From 5a2e97a12d56f629fb05d166e3f9357034621293 Mon Sep 17 00:00:00 2001 From: Nobody Date: Thu, 11 Aug 2022 22:29:58 +0300 Subject: [PATCH] Linux 5.4.163 with MCST patches (2.23) --- Documentation/admin-guide/LSM/AltHa.rst | 101 + Documentation/admin-guide/LSM/index.rst | 1 + .../admin-guide/kernel-parameters.txt | 8 + .../devicetree/bindings/hwmon/emc2305.txt | 60 + Documentation/hwmon/emc2305 | 33 + MAINTAINERS | 25 + Makefile | 42 +- arch/Kconfig | 3 + arch/e2k/3p/Makefile | 3 + arch/e2k/3p/binfmt_elf32_3P.c | 38 + arch/e2k/3p/binfmt_elf64_3P.c | 18 + arch/e2k/3p/binfmt_elfe2kp.c | 1348 + arch/e2k/3p/global_sp.c | 1593 + arch/e2k/3p/umalloc.c | 1130 + arch/e2k/Kconfig | 1248 + arch/e2k/Kconfig.debug | 51 + arch/e2k/Kconfig.virt | 22 + arch/e2k/Makefile | 252 + arch/e2k/boot/.gitignore | 5 + arch/e2k/boot/Am85C30.c | 237 + arch/e2k/boot/Am85C30.h | 416 + arch/e2k/boot/Makefile | 321 + arch/e2k/boot/apic.c | 265 + arch/e2k/boot/aploader.S | 125 + arch/e2k/boot/aploader.lds | 99 + arch/e2k/boot/apstartup.S | 42 + arch/e2k/boot/bios/Makefile | 12 + arch/e2k/boot/bios/bios.c | 361 + arch/e2k/boot/bios/bios.h | 41 + arch/e2k/boot/bios/ide_config.h | 18 + arch/e2k/boot/bios/init_kbd.c | 134 + arch/e2k/boot/bios/init_kbd.h | 103 + arch/e2k/boot/bios/io.c | 623 + arch/e2k/boot/bios/linuxpci.c | 681 + arch/e2k/boot/bios/mc146818rtc.h | 194 + arch/e2k/boot/bios/mga.c | 558 + arch/e2k/boot/bios/mga.h | 283 + arch/e2k/boot/bios/mpspec.c | 371 + arch/e2k/boot/bios/mptable.c | 422 + arch/e2k/boot/bios/newpci.c | 1707 + arch/e2k/boot/bios/pci.h | 605 + arch/e2k/boot/bios/pci_isa_config.h | 83 + arch/e2k/boot/bios/pciconf.h | 7 + arch/e2k/boot/bios/printk.h | 43 + arch/e2k/boot/bios/southbridge.c | 362 + arch/e2k/boot/bios/southbridge.h | 158 + arch/e2k/boot/bios/superio.c | 197 + arch/e2k/boot/bios/video/Makefile | 9 + arch/e2k/boot/bios/video/helper_exec.c | 269 + arch/e2k/boot/bios/video/helper_mem.c | 326 + arch/e2k/boot/bios/video/init.c | 592 + arch/e2k/boot/bios/video/init.h | 89 + arch/e2k/boot/bios/video/int10.c | 489 + arch/e2k/boot/bios/video/int15.c | 19 + arch/e2k/boot/bios/video/int16.c | 12 + arch/e2k/boot/bios/video/int1a.c | 187 + arch/e2k/boot/bios/video/inte6.c | 23 + arch/e2k/boot/bios/video/pci-iface.c | 150 + arch/e2k/boot/bios/video/pci-iface.h | 32 + arch/e2k/boot/bios/video/x86emu/include/msr.h | 33 + .../boot/bios/video/x86emu/include/x86emu.h | 205 + .../video/x86emu/include/x86emu/fpu_regs.h | 115 + .../bios/video/x86emu/include/x86emu/regs.h | 338 + .../bios/video/x86emu/include/x86emu/types.h | 51 + .../boot/bios/video/x86emu/src/x86emu/LICENSE | 17 + .../bios/video/x86emu/src/x86emu/Makefile | 6 + .../boot/bios/video/x86emu/src/x86emu/debug.c | 439 + .../bios/video/x86emu/src/x86emu/decode.c | 1148 + .../boot/bios/video/x86emu/src/x86emu/fpu.c | 945 + .../bios/video/x86emu/src/x86emu/makefile | 63 + .../video/x86emu/src/x86emu/makefile.linux | 73 + .../boot/bios/video/x86emu/src/x86emu/ops.c | 5428 + .../boot/bios/video/x86emu/src/x86emu/ops2.c | 1764 + .../bios/video/x86emu/src/x86emu/prim_ops.c | 2450 + .../boot/bios/video/x86emu/src/x86emu/sys.c | 646 + .../bios/video/x86emu/src/x86emu/validate.c | 789 + .../video/x86emu/src/x86emu/x86emu/debug.h | 212 + .../video/x86emu/src/x86emu/x86emu/decode.h | 88 + .../bios/video/x86emu/src/x86emu/x86emu/fpu.h | 61 + .../bios/video/x86emu/src/x86emu/x86emu/ops.h | 45 + .../video/x86emu/src/x86emu/x86emu/prim_asm.h | 1991 + .../video/x86emu/src/x86emu/x86emu/prim_ops.h | 142 + .../video/x86emu/src/x86emu/x86emu/x86emui.h | 101 + arch/e2k/boot/boot.h | 38 + arch/e2k/boot/boot.lds | 19 + arch/e2k/boot/boot_io.h | 55 + arch/e2k/boot/bootblock.c | 18 + arch/e2k/boot/compressed.lds | 37 + arch/e2k/boot/compressed_guest.lds | 37 + arch/e2k/boot/console.c | 46 + arch/e2k/boot/decompress.c | 720 + arch/e2k/boot/defkeymap.c | 262 + arch/e2k/boot/dts/Makefile | 12 + arch/e2k/boot/dts/e1cp_e1cmt_tablet.dts | 212 + arch/e2k/boot/dts/e1cp_m2e-uvp.dts | 141 + arch/e2k/boot/dts/e1cp_mbe1c-pc.dts | 124 + arch/e2k/boot/dts/e1cp_p2p8-sip-s1.dts | 184 + arch/e2k/boot/dts/e2c_mono-pc.dts | 87 + arch/e2k/boot/dts/e4c_apk-pc4c.dts | 459 + arch/e2k/boot/dts/e4c_el-2s-4.dts | 272 + arch/e2k/boot/dts/e4c_mbe2s-pc.dts | 86 + arch/e2k/boot/dts/e8c2_uatx_se.dts | 159 + arch/e2k/boot/dts/e8c_mbe8c-pc.dts | 64 + arch/e2k/boot/dts/e8c_swtx.dts | 256 + arch/e2k/boot/dts/include/dt-bindings | 1 + arch/e2k/boot/dumpsyms.c | 173 + arch/e2k/boot/e2k_sic.h | 76 + arch/e2k/boot/epic.c | 150 + arch/e2k/boot/epiggy.lds | 6 + arch/e2k/boot/flash.lds | 93 + arch/e2k/boot/info.c | 24 + arch/e2k/boot/jumpstart.c | 3264 + arch/e2k/boot/keyboard.c | 249 + arch/e2k/boot/machdep_fpic.c | 45 + arch/e2k/boot/malloc.c | 64 + arch/e2k/boot/pic.h | 211 + arch/e2k/boot/recovery_string.S | 7 + arch/e2k/boot/recovery_string_fpic.S | 7 + arch/e2k/boot/recovery_string_v5.S | 7 + arch/e2k/boot/recovery_string_v5_fpic.S | 7 + arch/e2k/boot/romloader.S | 282 + arch/e2k/boot/romloader.lds | 92 + arch/e2k/boot/romstartup.S | 51 + arch/e2k/boot/smp.c | 386 + arch/e2k/boot/stdio.c | 271 + arch/e2k/boot/string.c | 8 + arch/e2k/boot/string_fpic.c | 8 + arch/e2k/boot/string_guest_fpic.c | 8 + arch/e2k/boot/topology.h | 45 + arch/e2k/boot/vga.c | 221 + arch/e2k/boot/vga.h | 5807 ++ arch/e2k/boot/vmlinux.bin.lds | 8 + arch/e2k/boot/zip.c | 182 + arch/e2k/configs/build-config | 432 + arch/e2k/configs/defconfig | 755 + arch/e2k/configs/defconfig-guest-lms-pv | 2928 + arch/e2k/configs/e12c-lms-defconfig | 2651 + arch/e2k/configs/guest_lms_defconfig | 2895 + arch/e2k/configs/host_lms_defconfig | 2889 + arch/e2k/configs/mcst_rt.config | 1 + arch/e2k/fast_syscalls/Makefile | 17 + arch/e2k/fast_syscalls/clkr.c | 65 + arch/e2k/fast_syscalls/compat.c | 186 + arch/e2k/fast_syscalls/fast_clock_gettime.c | 69 + arch/e2k/fast_syscalls/fast_getcontext.c | 54 + arch/e2k/fast_syscalls/fast_getcpu.c | 36 + arch/e2k/fast_syscalls/fast_set_return.c | 52 + arch/e2k/fast_syscalls/fast_siggetmask.c | 14 + arch/e2k/fast_syscalls/fast_syscalls.c | 87 + arch/e2k/fast_syscalls/protected.c | 298 + arch/e2k/fast_syscalls/sclkr.c | 66 + arch/e2k/include/asm-l | 1 + arch/e2k/include/asm/3p.h | 113 + arch/e2k/include/asm/Kbuild | 16 + arch/e2k/include/asm/a.out.h | 28 + arch/e2k/include/asm/aau_context.h | 255 + arch/e2k/include/asm/aau_regs.h | 24 + arch/e2k/include/asm/aau_regs_access.h | 685 + arch/e2k/include/asm/aau_regs_types.h | 178 + arch/e2k/include/asm/acenv.h | 10 + arch/e2k/include/asm/acpi.h | 6 + arch/e2k/include/asm/alternative-asm.h | 193 + arch/e2k/include/asm/alternative.h | 265 + arch/e2k/include/asm/apic.h | 46 + arch/e2k/include/asm/apic_regs.h | 276 + arch/e2k/include/asm/apicdef.h | 9 + arch/e2k/include/asm/atomic.h | 380 + arch/e2k/include/asm/atomic_api.h | 892 + arch/e2k/include/asm/auxvec.h | 12 + arch/e2k/include/asm/barrier.h | 165 + arch/e2k/include/asm/bios_map.h | 116 + arch/e2k/include/asm/bitops.h | 63 + arch/e2k/include/asm/bitrev.h | 20 + arch/e2k/include/asm/bitsperlong.h | 8 + arch/e2k/include/asm/boot_flags.h | 123 + arch/e2k/include/asm/boot_profiling.h | 34 + arch/e2k/include/asm/boot_recovery.h | 42 + arch/e2k/include/asm/bootinfo.h | 10 + arch/e2k/include/asm/bug.h | 20 + arch/e2k/include/asm/byteorder.h | 10 + arch/e2k/include/asm/cache.h | 81 + arch/e2k/include/asm/cacheflush.h | 243 + arch/e2k/include/asm/checksum.h | 141 + arch/e2k/include/asm/clkr.h | 22 + arch/e2k/include/asm/clock_info.h | 109 + arch/e2k/include/asm/cmos.h | 37 + arch/e2k/include/asm/cmpxchg.h | 102 + arch/e2k/include/asm/cnt_point.h | 359 + arch/e2k/include/asm/compat.h | 215 + arch/e2k/include/asm/compiler.h | 77 + arch/e2k/include/asm/console.h | 47 + arch/e2k/include/asm/convert_array.h | 153 + arch/e2k/include/asm/copy-hw-stacks.h | 852 + arch/e2k/include/asm/coredump.h | 10 + arch/e2k/include/asm/cpu.h | 36 + arch/e2k/include/asm/cpu_features.h | 61 + arch/e2k/include/asm/cpu_regs.h | 3660 + arch/e2k/include/asm/cpu_regs_access.h | 555 + arch/e2k/include/asm/cpu_regs_types.h | 2511 + arch/e2k/include/asm/current.h | 12 + arch/e2k/include/asm/debug_print.h | 46 + arch/e2k/include/asm/delay.h | 7 + arch/e2k/include/asm/device.h | 27 + arch/e2k/include/asm/dma-direct.h | 6 + arch/e2k/include/asm/dma-mapping.h | 6 + arch/e2k/include/asm/dma.h | 297 + arch/e2k/include/asm/e12c.h | 57 + arch/e2k/include/asm/e16c.h | 57 + arch/e2k/include/asm/e1cp.h | 46 + arch/e2k/include/asm/e2c3.h | 55 + arch/e2k/include/asm/e2k-iommu.h | 11 + arch/e2k/include/asm/e2k.h | 424 + arch/e2k/include/asm/e2k_api.h | 6994 ++ arch/e2k/include/asm/e2k_debug.h | 841 + arch/e2k/include/asm/e2k_ptypes.h | 384 + arch/e2k/include/asm/e2k_sic.h | 130 + arch/e2k/include/asm/e2k_syswork.h | 102 + arch/e2k/include/asm/e2s.h | 57 + arch/e2k/include/asm/e8c.h | 59 + arch/e2k/include/asm/e8c2.h | 59 + arch/e2k/include/asm/el_posix.h | 59 + arch/e2k/include/asm/elf.h | 172 + arch/e2k/include/asm/epic.h | 97 + arch/e2k/include/asm/epic_regs.h | 4 + arch/e2k/include/asm/epicdef.h | 13 + arch/e2k/include/asm/errors_hndl.h | 68 + arch/e2k/include/asm/es2.h | 58 + arch/e2k/include/asm/exec.h | 3 + arch/e2k/include/asm/fast_syscalls.h | 337 + arch/e2k/include/asm/fb.h | 19 + arch/e2k/include/asm/fcntl.h | 1 + arch/e2k/include/asm/floppy.h | 272 + arch/e2k/include/asm/ftrace.h | 48 + arch/e2k/include/asm/futex.h | 77 + arch/e2k/include/asm/getsp_adj.h | 123 + arch/e2k/include/asm/glob_regs.h | 209 + arch/e2k/include/asm/gpio.h | 21 + arch/e2k/include/asm/gregs.h | 219 + arch/e2k/include/asm/hardirq.h | 40 + arch/e2k/include/asm/hb_regs.h | 437 + arch/e2k/include/asm/head.h | 270 + arch/e2k/include/asm/host_printk.h | 13 + arch/e2k/include/asm/hugetlb.h | 89 + arch/e2k/include/asm/hw_breakpoint.h | 47 + arch/e2k/include/asm/hw_irq.h | 8 + arch/e2k/include/asm/hw_stacks.h | 699 + arch/e2k/include/asm/io.h | 615 + arch/e2k/include/asm/io_apic.h | 12 + arch/e2k/include/asm/io_apic_regs.h | 81 + arch/e2k/include/asm/io_epic.h | 29 + arch/e2k/include/asm/io_epic_regs.h | 4 + arch/e2k/include/asm/ioctl.h | 1 + arch/e2k/include/asm/ioctls.h | 17 + arch/e2k/include/asm/iolinkmask.h | 6 + arch/e2k/include/asm/ipcbuf.h | 28 + arch/e2k/include/asm/irq.h | 22 + arch/e2k/include/asm/irq_vectors.h | 56 + arch/e2k/include/asm/irq_work.h | 1 + arch/e2k/include/asm/irqdomain.h | 6 + arch/e2k/include/asm/irqflags.h | 475 + arch/e2k/include/asm/kdebug.h | 22 + arch/e2k/include/asm/keyboard.h | 70 + arch/e2k/include/asm/kprobes.h | 83 + arch/e2k/include/asm/kvm/Kbuild | 8 + arch/e2k/include/asm/kvm/aau_regs_access.h | 667 + arch/e2k/include/asm/kvm/async_pf.h | 33 + arch/e2k/include/asm/kvm/boot.h | 50 + arch/e2k/include/asm/kvm/boot_spinlock.h | 38 + arch/e2k/include/asm/kvm/boot_spinlock_slow.h | 44 + arch/e2k/include/asm/kvm/copy-hw-stacks.h | 462 + arch/e2k/include/asm/kvm/cpu_hv_regs_access.h | 512 + arch/e2k/include/asm/kvm/cpu_hv_regs_types.h | 418 + arch/e2k/include/asm/kvm/cpu_regs_access.h | 1818 + arch/e2k/include/asm/kvm/csd_lock.h | 43 + arch/e2k/include/asm/kvm/debug.h | 122 + arch/e2k/include/asm/kvm/gmmu_context.h | 340 + arch/e2k/include/asm/kvm/gpid.h | 69 + arch/e2k/include/asm/kvm/gregs.h | 171 + arch/e2k/include/asm/kvm/guest.h | 357 + arch/e2k/include/asm/kvm/guest/Kbuild | 4 + arch/e2k/include/asm/kvm/guest/aau_context.h | 325 + arch/e2k/include/asm/kvm/guest/area_alloc.h | 13 + arch/e2k/include/asm/kvm/guest/atomic_api.h | 31 + arch/e2k/include/asm/kvm/guest/boot.h | 152 + arch/e2k/include/asm/kvm/guest/boot_flags.h | 43 + .../include/asm/kvm/guest/boot_mmu_context.h | 36 + .../e2k/include/asm/kvm/guest/boot_spinlock.h | 38 + arch/e2k/include/asm/kvm/guest/bootinfo.h | 44 + arch/e2k/include/asm/kvm/guest/cacheflush.h | 115 + arch/e2k/include/asm/kvm/guest/clkr.h | 17 + arch/e2k/include/asm/kvm/guest/console.h | 33 + .../include/asm/kvm/guest/copy-hw-stacks.h | 568 + arch/e2k/include/asm/kvm/guest/cpu.h | 31 + arch/e2k/include/asm/kvm/guest/debug.h | 78 + arch/e2k/include/asm/kvm/guest/e2k.h | 34 + arch/e2k/include/asm/kvm/guest/e2k_virt.h | 59 + .../e2k/include/asm/kvm/guest/fast_syscalls.h | 42 + arch/e2k/include/asm/kvm/guest/gregs.h | 90 + arch/e2k/include/asm/kvm/guest/host_printk.h | 31 + arch/e2k/include/asm/kvm/guest/hvc_l.h | 27 + arch/e2k/include/asm/kvm/guest/hw_stacks.h | 88 + arch/e2k/include/asm/kvm/guest/io.h | 426 + arch/e2k/include/asm/kvm/guest/irq.h | 85 + arch/e2k/include/asm/kvm/guest/machdep.h | 30 + arch/e2k/include/asm/kvm/guest/mm_hooks.h | 19 + arch/e2k/include/asm/kvm/guest/mmu.h | 162 + arch/e2k/include/asm/kvm/guest/mmu_context.h | 29 + arch/e2k/include/asm/kvm/guest/pgatomic.h | 134 + arch/e2k/include/asm/kvm/guest/process.h | 534 + arch/e2k/include/asm/kvm/guest/processor.h | 64 + arch/e2k/include/asm/kvm/guest/ptrace.h | 151 + arch/e2k/include/asm/kvm/guest/pv_info.h | 52 + arch/e2k/include/asm/kvm/guest/regs_state.h | 470 + .../include/asm/kvm/guest/secondary_space.h | 21 + arch/e2k/include/asm/kvm/guest/setup.h | 39 + arch/e2k/include/asm/kvm/guest/sge.h | 75 + arch/e2k/include/asm/kvm/guest/signal.h | 34 + arch/e2k/include/asm/kvm/guest/smp.h | 84 + arch/e2k/include/asm/kvm/guest/spinlock.h | 75 + arch/e2k/include/asm/kvm/guest/stacks.h | 33 + arch/e2k/include/asm/kvm/guest/string.h | 213 + arch/e2k/include/asm/kvm/guest/switch.h | 158 + arch/e2k/include/asm/kvm/guest/switch_to.h | 217 + .../include/asm/kvm/guest/sync_pg_tables.h | 19 + arch/e2k/include/asm/kvm/guest/system.h | 227 + arch/e2k/include/asm/kvm/guest/time.h | 29 + arch/e2k/include/asm/kvm/guest/timex.h | 26 + .../include/asm/kvm/guest/tlb_regs_types.h | 17 + arch/e2k/include/asm/kvm/guest/tlbflush.h | 183 + arch/e2k/include/asm/kvm/guest/trace-defs.h | 41 + .../include/asm/kvm/guest/trace-hw-stacks.h | 308 + arch/e2k/include/asm/kvm/guest/trap_table.S.h | 166 + arch/e2k/include/asm/kvm/guest/trap_table.h | 317 + arch/e2k/include/asm/kvm/guest/traps.h | 129 + arch/e2k/include/asm/kvm/guest/v2p.h | 133 + arch/e2k/include/asm/kvm/guest/vga.h | 52 + arch/e2k/include/asm/kvm/head.h | 111 + arch/e2k/include/asm/kvm/host_printk.h | 41 + arch/e2k/include/asm/kvm/hvc-console.h | 27 + arch/e2k/include/asm/kvm/hypercall.h | 1541 + arch/e2k/include/asm/kvm/hypervisor.h | 106 + arch/e2k/include/asm/kvm/irq.h | 44 + arch/e2k/include/asm/kvm/machdep.h | 69 + arch/e2k/include/asm/kvm/mm.h | 100 + arch/e2k/include/asm/kvm/mm_hooks.h | 31 + arch/e2k/include/asm/kvm/mmu.h | 439 + arch/e2k/include/asm/kvm/mmu_context.h | 37 + arch/e2k/include/asm/kvm/mmu_hv_regs_access.h | 318 + arch/e2k/include/asm/kvm/mmu_hv_regs_types.h | 253 + arch/e2k/include/asm/kvm/mmu_regs_access.h | 852 + arch/e2k/include/asm/kvm/nid.h | 89 + arch/e2k/include/asm/kvm/page.h | 60 + arch/e2k/include/asm/kvm/page_track.h | 85 + arch/e2k/include/asm/kvm/pgtable-tdp.h | 157 + arch/e2k/include/asm/kvm/pgtable-x86.h | 104 + arch/e2k/include/asm/kvm/pgtable.h | 77 + arch/e2k/include/asm/kvm/process.h | 495 + arch/e2k/include/asm/kvm/ptrace.h | 654 + arch/e2k/include/asm/kvm/pv-emul.h | 287 + arch/e2k/include/asm/kvm/regs_state.h | 458 + arch/e2k/include/asm/kvm/runstate.h | 479 + arch/e2k/include/asm/kvm/spinlock.h | 40 + arch/e2k/include/asm/kvm/spinlock_slow.h | 44 + arch/e2k/include/asm/kvm/stacks.h | 43 + arch/e2k/include/asm/kvm/string.h | 7 + arch/e2k/include/asm/kvm/switch.h | 1374 + arch/e2k/include/asm/kvm/thread_info.h | 555 + arch/e2k/include/asm/kvm/threads.h | 23 + arch/e2k/include/asm/kvm/tlbflush.h | 165 + arch/e2k/include/asm/kvm/trace-defs.h | 23 + arch/e2k/include/asm/kvm/trace-hw-stacks.h | 367 + arch/e2k/include/asm/kvm/trace_kvm.h | 1247 + arch/e2k/include/asm/kvm/trace_kvm_hv.h | 525 + arch/e2k/include/asm/kvm/trace_kvm_pv.h | 207 + arch/e2k/include/asm/kvm/trap_table.S.h | 467 + arch/e2k/include/asm/kvm/trap_table.h | 831 + arch/e2k/include/asm/kvm/ttable-help.h | 51 + arch/e2k/include/asm/kvm/uaccess.h | 211 + .../include/asm/kvm/vcpu-regs-debug-inline.h | 86 + arch/e2k/include/asm/kvm/vcpu-regs-debug.h | 130 + arch/e2k/include/asm/kvm_host.h | 1475 + arch/e2k/include/asm/l-iommu.h | 160 + arch/e2k/include/asm/l-mcmonitor.h | 87 + arch/e2k/include/asm/l_ide.h | 6 + arch/e2k/include/asm/l_pmc.h | 18 + arch/e2k/include/asm/l_spmc.h | 6 + arch/e2k/include/asm/l_timer.h | 14 + arch/e2k/include/asm/l_timer_regs.h | 107 + arch/e2k/include/asm/linkage.h | 7 + arch/e2k/include/asm/linux_logo.h | 48 + arch/e2k/include/asm/local.h | 175 + arch/e2k/include/asm/machdep.h | 721 + arch/e2k/include/asm/machdep_numa.h | 23 + arch/e2k/include/asm/mas.h | 156 + arch/e2k/include/asm/mc146818rtc.h | 12 + arch/e2k/include/asm/mlt.h | 85 + arch/e2k/include/asm/mm_hooks.h | 37 + arch/e2k/include/asm/mman.h | 132 + arch/e2k/include/asm/mmu-regs-types-v2.h | 156 + arch/e2k/include/asm/mmu-regs-types-v6.h | 180 + arch/e2k/include/asm/mmu.h | 226 + arch/e2k/include/asm/mmu_context.h | 484 + arch/e2k/include/asm/mmu_fault.h | 261 + arch/e2k/include/asm/mmu_regs.h | 863 + arch/e2k/include/asm/mmu_regs_access.h | 318 + arch/e2k/include/asm/mmu_regs_types.h | 932 + arch/e2k/include/asm/mmu_types.h | 873 + arch/e2k/include/asm/mmzone.h | 19 + arch/e2k/include/asm/module.h | 13 + arch/e2k/include/asm/monitors.h | 49 + arch/e2k/include/asm/mpspec.h | 35 + arch/e2k/include/asm/msgbuf.h | 27 + arch/e2k/include/asm/msidef.h | 6 + arch/e2k/include/asm/namei.h | 17 + arch/e2k/include/asm/native_aau_regs_access.h | 1487 + arch/e2k/include/asm/native_cpu_regs_access.h | 596 + .../include/asm/native_dcache_regs_access.h | 78 + arch/e2k/include/asm/native_mmu_regs_access.h | 338 + arch/e2k/include/asm/nbsr_v6_regs.h | 301 + arch/e2k/include/asm/nmi.h | 95 + arch/e2k/include/asm/numnodes.h | 124 + arch/e2k/include/asm/of_device.h | 8 + arch/e2k/include/asm/of_platform.h | 4 + arch/e2k/include/asm/openprom.h | 47 + arch/e2k/include/asm/oplib.h | 90 + arch/e2k/include/asm/ord_rwlock.h | 403 + arch/e2k/include/asm/ord_rwlock_types.h | 34 + arch/e2k/include/asm/override-lcc-warnings.h | 13 + arch/e2k/include/asm/p2v/boot_bitops.h | 71 + arch/e2k/include/asm/p2v/boot_cacheflush.h | 57 + arch/e2k/include/asm/p2v/boot_console.h | 67 + arch/e2k/include/asm/p2v/boot_head.h | 321 + arch/e2k/include/asm/p2v/boot_init.h | 372 + arch/e2k/include/asm/p2v/boot_map.h | 95 + arch/e2k/include/asm/p2v/boot_mmu_context.h | 91 + arch/e2k/include/asm/p2v/boot_param.h | 107 + arch/e2k/include/asm/p2v/boot_phys.h | 275 + arch/e2k/include/asm/p2v/boot_smp.h | 528 + arch/e2k/include/asm/p2v/boot_spinlock.h | 97 + .../e2k/include/asm/p2v/boot_spinlock_types.h | 15 + arch/e2k/include/asm/p2v/boot_v2p.h | 297 + arch/e2k/include/asm/p2v/io.h | 64 + arch/e2k/include/asm/page.h | 431 + arch/e2k/include/asm/page_io.h | 53 + arch/e2k/include/asm/paravirt.h | 8 + arch/e2k/include/asm/paravirt/aau_context.h | 255 + .../include/asm/paravirt/aau_regs_access.h | 511 + arch/e2k/include/asm/paravirt/apic.h | 56 + arch/e2k/include/asm/paravirt/area_alloc.h | 152 + arch/e2k/include/asm/paravirt/atomic_api.h | 38 + arch/e2k/include/asm/paravirt/boot.h | 201 + arch/e2k/include/asm/paravirt/boot_flags.h | 52 + arch/e2k/include/asm/paravirt/cacheflush.h | 88 + arch/e2k/include/asm/paravirt/clkr.h | 20 + arch/e2k/include/asm/paravirt/console.h | 39 + arch/e2k/include/asm/paravirt/cpu.h | 99 + .../include/asm/paravirt/cpu_regs_access.h | 1833 + arch/e2k/include/asm/paravirt/e2k.h | 53 + arch/e2k/include/asm/paravirt/epic.h | 77 + arch/e2k/include/asm/paravirt/fast_syscalls.h | 59 + arch/e2k/include/asm/paravirt/host_printk.h | 21 + arch/e2k/include/asm/paravirt/hw_stacks.h | 141 + arch/e2k/include/asm/paravirt/io.h | 294 + arch/e2k/include/asm/paravirt/mm_hooks.h | 14 + arch/e2k/include/asm/paravirt/mman.h | 203 + arch/e2k/include/asm/paravirt/mmu.h | 130 + arch/e2k/include/asm/paravirt/mmu_context.h | 37 + .../include/asm/paravirt/mmu_regs_access.h | 651 + arch/e2k/include/asm/paravirt/pgatomic.h | 109 + arch/e2k/include/asm/paravirt/pgtable.h | 212 + arch/e2k/include/asm/paravirt/process.h | 344 + arch/e2k/include/asm/paravirt/processor.h | 72 + arch/e2k/include/asm/paravirt/pv_info.h | 89 + arch/e2k/include/asm/paravirt/pv_ops.h | 731 + arch/e2k/include/asm/paravirt/regs_state.h | 87 + .../include/asm/paravirt/secondary_space.h | 35 + arch/e2k/include/asm/paravirt/setup.h | 21 + arch/e2k/include/asm/paravirt/sge.h | 46 + arch/e2k/include/asm/paravirt/smp.h | 121 + arch/e2k/include/asm/paravirt/spinlock.h | 151 + arch/e2k/include/asm/paravirt/string.h | 88 + arch/e2k/include/asm/paravirt/switch.h | 54 + arch/e2k/include/asm/paravirt/switch_to.h | 28 + arch/e2k/include/asm/paravirt/system.h | 69 + arch/e2k/include/asm/paravirt/time.h | 37 + arch/e2k/include/asm/paravirt/timex.h | 36 + arch/e2k/include/asm/paravirt/tlbflush.h | 85 + arch/e2k/include/asm/paravirt/trap_table.h | 106 + arch/e2k/include/asm/paravirt/traps.h | 87 + arch/e2k/include/asm/paravirt/v2p.h | 129 + arch/e2k/include/asm/paravirt/vga.h | 52 + arch/e2k/include/asm/parport.h | 18 + arch/e2k/include/asm/pci.h | 53 + arch/e2k/include/asm/percpu.h | 74 + arch/e2k/include/asm/perf_event.h | 101 + arch/e2k/include/asm/perf_event_types.h | 42 + arch/e2k/include/asm/perf_event_uncore.h | 116 + arch/e2k/include/asm/pgalloc.h | 598 + arch/e2k/include/asm/pgatomic.h | 131 + arch/e2k/include/asm/pgd.h | 293 + arch/e2k/include/asm/pgtable-v2.h | 412 + arch/e2k/include/asm/pgtable-v6.h | 339 + arch/e2k/include/asm/pgtable.h | 831 + arch/e2k/include/asm/pgtable_def.h | 1321 + arch/e2k/include/asm/pgtable_types.h | 194 + arch/e2k/include/asm/pic.h | 150 + arch/e2k/include/asm/poll.h | 1 + arch/e2k/include/asm/posix_types.h | 11 + arch/e2k/include/asm/preempt.h | 124 + arch/e2k/include/asm/process.h | 1065 + arch/e2k/include/asm/processor.h | 460 + arch/e2k/include/asm/prom.h | 17 + arch/e2k/include/asm/prot_loader.h | 176 + arch/e2k/include/asm/protected_syscalls.h | 257 + arch/e2k/include/asm/ptrace-abi.h | 78 + arch/e2k/include/asm/ptrace.h | 828 + arch/e2k/include/asm/pv_info.h | 120 + arch/e2k/include/asm/qspinlock.h | 92 + arch/e2k/include/asm/qspinlock_paravirt.h | 5 + arch/e2k/include/asm/regs_state.h | 1381 + arch/e2k/include/asm/rlimits.h | 13 + arch/e2k/include/asm/rtc.h | 6 + arch/e2k/include/asm/rwsem.h | 212 + arch/e2k/include/asm/sbus.h | 59 + arch/e2k/include/asm/sclkr.h | 105 + arch/e2k/include/asm/seccomp.h | 18 + arch/e2k/include/asm/secondary_space.h | 108 + arch/e2k/include/asm/sections.h | 54 + arch/e2k/include/asm/sembuf.h | 22 + arch/e2k/include/asm/serial.h | 2 + arch/e2k/include/asm/set_memory.h | 24 + arch/e2k/include/asm/setup.h | 55 + arch/e2k/include/asm/shmbuf.h | 13 + arch/e2k/include/asm/shmparam.h | 11 + arch/e2k/include/asm/sic_regs.h | 1405 + arch/e2k/include/asm/sic_regs_access.h | 95 + arch/e2k/include/asm/sigcontext.h | 22 + arch/e2k/include/asm/signal.h | 212 + arch/e2k/include/asm/simul.h | 60 + arch/e2k/include/asm/smp-boot.h | 52 + arch/e2k/include/asm/smp.h | 240 + arch/e2k/include/asm/socket.h | 1 + arch/e2k/include/asm/sockios.h | 1 + arch/e2k/include/asm/sparsemem.h | 10 + arch/e2k/include/asm/spinlock.h | 22 + arch/e2k/include/asm/spinlock_types.h | 10 + arch/e2k/include/asm/stacks.h | 177 + arch/e2k/include/asm/stacktrace.h | 4 + arch/e2k/include/asm/stat.h | 30 + arch/e2k/include/asm/statfs.h | 6 + arch/e2k/include/asm/string.h | 522 + arch/e2k/include/asm/swap_info.h | 16 + arch/e2k/include/asm/switch_to.h | 106 + arch/e2k/include/asm/sync_pg_tables.h | 18 + arch/e2k/include/asm/syscall.h | 62 + arch/e2k/include/asm/syscalls.h | 526 + arch/e2k/include/asm/system.h | 664 + arch/e2k/include/asm/tag_mem.h | 388 + arch/e2k/include/asm/tags.h | 60 + arch/e2k/include/asm/termbits.h | 6 + arch/e2k/include/asm/termios.h | 6 + arch/e2k/include/asm/thread_info.h | 442 + arch/e2k/include/asm/time.h | 42 + arch/e2k/include/asm/timer.h | 12 + arch/e2k/include/asm/timex.h | 77 + arch/e2k/include/asm/tlb.h | 29 + arch/e2k/include/asm/tlb_regs_access.h | 153 + arch/e2k/include/asm/tlb_regs_types.h | 428 + arch/e2k/include/asm/tlbflush.h | 131 + arch/e2k/include/asm/topology.h | 173 + arch/e2k/include/asm/trace-clock.h | 43 + arch/e2k/include/asm/trace-defs.h | 115 + arch/e2k/include/asm/trace-mmu-dtlb-v2.h | 37 + arch/e2k/include/asm/trace-mmu-dtlb-v6.h | 42 + arch/e2k/include/asm/trace.h | 364 + arch/e2k/include/asm/trace_clock.h | 12 + arch/e2k/include/asm/trace_pgtable-v2.h | 35 + arch/e2k/include/asm/trace_pgtable-v6.h | 45 + arch/e2k/include/asm/trap_def.h | 275 + arch/e2k/include/asm/trap_table.S.h | 167 + arch/e2k/include/asm/trap_table.h | 267 + arch/e2k/include/asm/traps.h | 244 + arch/e2k/include/asm/types.h | 11 + arch/e2k/include/asm/uaccess.h | 448 + arch/e2k/include/asm/ucontext.h | 48 + arch/e2k/include/asm/umalloc.h | 70 + arch/e2k/include/asm/unaligned.h | 25 + arch/e2k/include/asm/unistd.h | 60 + arch/e2k/include/asm/user.h | 225 + arch/e2k/include/asm/vga.h | 86 + arch/e2k/include/asm/vmlinux.lds.h | 14 + arch/e2k/include/asm/word-at-a-time.h | 50 + arch/e2k/include/uapi/asm/Kbuild | 1 + arch/e2k/include/uapi/asm/a.out.h | 35 + arch/e2k/include/uapi/asm/auxvec.h | 4 + arch/e2k/include/uapi/asm/bitsperlong.h | 8 + arch/e2k/include/uapi/asm/bootinfo.h | 211 + arch/e2k/include/uapi/asm/byteorder.h | 10 + arch/e2k/include/uapi/asm/e2k_api.h | 353 + arch/e2k/include/uapi/asm/e2k_syswork.h | 96 + arch/e2k/include/uapi/asm/errno.h | 6 + arch/e2k/include/uapi/asm/fcntl.h | 2 + arch/e2k/include/uapi/asm/ioctl.h | 1 + arch/e2k/include/uapi/asm/ioctls.h | 17 + arch/e2k/include/uapi/asm/ipcbuf.h | 27 + arch/e2k/include/uapi/asm/iset_ver.h | 55 + arch/e2k/include/uapi/asm/kexec.h | 26 + arch/e2k/include/uapi/asm/kvm.h | 488 + arch/e2k/include/uapi/asm/kvm_para.h | 57 + arch/e2k/include/uapi/asm/mas.h | 411 + arch/e2k/include/uapi/asm/mman.h | 101 + arch/e2k/include/uapi/asm/msgbuf.h | 27 + arch/e2k/include/uapi/asm/poll.h | 1 + arch/e2k/include/uapi/asm/posix_types.h | 11 + arch/e2k/include/uapi/asm/protected_mode.h | 79 + arch/e2k/include/uapi/asm/ptrace-abi.h | 78 + arch/e2k/include/uapi/asm/ptrace.h | 11 + arch/e2k/include/uapi/asm/resource.h | 12 + arch/e2k/include/uapi/asm/sembuf.h | 22 + arch/e2k/include/uapi/asm/setup.h | 6 + arch/e2k/include/uapi/asm/shmbuf.h | 38 + arch/e2k/include/uapi/asm/sigcontext.h | 72 + arch/e2k/include/uapi/asm/siginfo.h | 34 + arch/e2k/include/uapi/asm/signal.h | 97 + arch/e2k/include/uapi/asm/socket.h | 1 + arch/e2k/include/uapi/asm/sockios.h | 1 + arch/e2k/include/uapi/asm/stat.h | 46 + arch/e2k/include/uapi/asm/statfs.h | 6 + arch/e2k/include/uapi/asm/termbits.h | 6 + arch/e2k/include/uapi/asm/termios.h | 6 + arch/e2k/include/uapi/asm/types.h | 28 + arch/e2k/include/uapi/asm/ucontext.h | 17 + arch/e2k/include/uapi/asm/unistd.h | 470 + arch/e2k/kernel/.gitignore | 4 + arch/e2k/kernel/Makefile | 148 + arch/e2k/kernel/alternative.c | 96 + arch/e2k/kernel/asm-offsets.c | 238 + arch/e2k/kernel/backtrace.c | 282 + arch/e2k/kernel/clear_rf.S | 491 + arch/e2k/kernel/convert_array.c | 851 + arch/e2k/kernel/copy-hw-stacks.c | 34 + arch/e2k/kernel/cpu/Makefile | 55 + arch/e2k/kernel/cpu/cacheinfo.c | 216 + arch/e2k/kernel/cpu/e12c.c | 51 + arch/e2k/kernel/cpu/e16c.c | 51 + arch/e2k/kernel/cpu/e1cp.c | 53 + arch/e2k/kernel/cpu/e2c3.c | 49 + arch/e2k/kernel/cpu/e2s.c | 58 + arch/e2k/kernel/cpu/e8c.c | 56 + arch/e2k/kernel/cpu/e8c2.c | 56 + arch/e2k/kernel/cpu/es2.c | 59 + arch/e2k/kernel/cpu/iset_v2.c | 558 + arch/e2k/kernel/cpu/iset_v3.c | 183 + arch/e2k/kernel/cpu/iset_v5.c | 454 + arch/e2k/kernel/cpu/iset_v6.c | 688 + arch/e2k/kernel/cpu/recovery_string_v5.S | 468 + arch/e2k/kernel/devtree.c | 103 + arch/e2k/kernel/e2k-iommu.c | 1522 + arch/e2k/kernel/e2k.c | 438 + arch/e2k/kernel/e2k_sic.c | 674 + arch/e2k/kernel/e2k_syswork.c | 3808 + arch/e2k/kernel/elfcore.c | 150 + arch/e2k/kernel/entry_user.S | 127 + arch/e2k/kernel/fill_handler_entry.S | 1348 + arch/e2k/kernel/ftrace.c | 581 + arch/e2k/kernel/ftrace_graph_entry.S | 802 + arch/e2k/kernel/getsp.c | 292 + arch/e2k/kernel/hotplug-cpu.c | 61 + arch/e2k/kernel/hw_breakpoint.c | 436 + arch/e2k/kernel/io.c | 198 + arch/e2k/kernel/ioctl32.c | 30 + arch/e2k/kernel/kexec.c | 1038 + arch/e2k/kernel/kprobes.c | 488 + arch/e2k/kernel/ksyms.c | 7 + arch/e2k/kernel/libeprof/Makefile | 19 + arch/e2k/kernel/libeprof/libkeprof_24.c | 1781 + arch/e2k/kernel/libeprof/libkeprof_cur.c | 1736 + arch/e2k/kernel/mkclearwindow.c | 153 + arch/e2k/kernel/module.c | 130 + arch/e2k/kernel/monitors.c | 2527 + arch/e2k/kernel/nmi.c | 385 + arch/e2k/kernel/page_tables.S | 57 + arch/e2k/kernel/perf_event/Makefile | 14 + arch/e2k/kernel/perf_event/dimtp_trace.c | 567 + arch/e2k/kernel/perf_event/perf_event.c | 1377 + arch/e2k/kernel/perf_event/uncore.c | 311 + arch/e2k/kernel/perf_event/uncore_hc.c | 229 + arch/e2k/kernel/perf_event/uncore_hmu.c | 392 + arch/e2k/kernel/perf_event/uncore_iommu.c | 464 + arch/e2k/kernel/perf_event/uncore_mc.c | 309 + arch/e2k/kernel/perf_event/uncore_prepic.c | 231 + arch/e2k/kernel/perf_event/uncore_sic.c | 619 + arch/e2k/kernel/proc_context.c | 1856 + arch/e2k/kernel/proc_sclkr.c | 94 + arch/e2k/kernel/process.c | 2729 + arch/e2k/kernel/protected_mq_notify.c | 153 + arch/e2k/kernel/protected_syscalls.c | 4035 + arch/e2k/kernel/protected_timer_create.c | 301 + arch/e2k/kernel/ptrace.c | 1959 + arch/e2k/kernel/recovery.c | 165 + arch/e2k/kernel/rtc.c | 273 + arch/e2k/kernel/sclkr.c | 531 + arch/e2k/kernel/sec_space.c | 166 + arch/e2k/kernel/setup.c | 1205 + arch/e2k/kernel/signal.c | 2134 + arch/e2k/kernel/smp.c | 488 + arch/e2k/kernel/smpboot.c | 568 + arch/e2k/kernel/stacktrace.c | 124 + arch/e2k/kernel/sys_32.c | 93 + arch/e2k/kernel/sys_e2k.c | 132 + arch/e2k/kernel/systable.c | 3767 + arch/e2k/kernel/time.c | 101 + arch/e2k/kernel/topology.c | 189 + arch/e2k/kernel/trace.c | 2 + arch/e2k/kernel/trace_clock.c | 20 + arch/e2k/kernel/trace_stack.c | 641 + arch/e2k/kernel/trap_table.S | 1139 + arch/e2k/kernel/traps.c | 1729 + arch/e2k/kernel/ttable-help.h | 125 + arch/e2k/kernel/ttable-inline.h | 859 + arch/e2k/kernel/ttable.c | 4805 + arch/e2k/kernel/ttable_tmp.c | 1 + arch/e2k/kernel/vmlinux.lds.S | 323 + arch/e2k/kvm/.gitignore | 3 + arch/e2k/kvm/Kconfig | 311 + arch/e2k/kvm/Makefile | 73 + arch/e2k/kvm/boot_spinlock.c | 789 + arch/e2k/kvm/cepic.c | 817 + arch/e2k/kvm/cepic.h | 140 + arch/e2k/kvm/complete.c | 126 + arch/e2k/kvm/complete.h | 12 + arch/e2k/kvm/cpu.c | 2415 + arch/e2k/kvm/cpu.h | 1627 + arch/e2k/kvm/cpu/Makefile | 13 + arch/e2k/kvm/cpu/iset_v2.c | 59 + arch/e2k/kvm/cpu/iset_v5.c | 59 + arch/e2k/kvm/cpu/iset_v6.c | 1292 + arch/e2k/kvm/cpu_defs.h | 1303 + arch/e2k/kvm/csd_lock.c | 436 + arch/e2k/kvm/debug.c | 192 + arch/e2k/kvm/gaccess.c | 995 + arch/e2k/kvm/gaccess.h | 280 + arch/e2k/kvm/gpid.c | 130 + arch/e2k/kvm/gpid.h | 54 + arch/e2k/kvm/gregs.h | 6 + arch/e2k/kvm/guest/Makefile | 98 + arch/e2k/kvm/guest/async_pf.c | 225 + arch/e2k/kvm/guest/boot.c | 851 + arch/e2k/kvm/guest/boot.h | 47 + arch/e2k/kvm/guest/boot_e2k_virt.c | 59 + arch/e2k/kvm/guest/boot_io.c | 122 + arch/e2k/kvm/guest/boot_spinlock.c | 89 + arch/e2k/kvm/guest/boot_string.c | 61 + arch/e2k/kvm/guest/boot_vram.c | 195 + arch/e2k/kvm/guest/cepic.c | 204 + arch/e2k/kvm/guest/cpu.h | 186 + arch/e2k/kvm/guest/e2k_virt.c | 424 + arch/e2k/kvm/guest/fast_syscalls.c | 89 + arch/e2k/kvm/guest/fast_syscalls.h | 17 + arch/e2k/kvm/guest/host_dump_stack.c | 918 + arch/e2k/kvm/guest/host_printk.c | 27 + arch/e2k/kvm/guest/host_time.c | 50 + arch/e2k/kvm/guest/io.c | 573 + arch/e2k/kvm/guest/io.h | 14 + arch/e2k/kvm/guest/irq.c | 371 + arch/e2k/kvm/guest/irq.h | 104 + arch/e2k/kvm/guest/lapic.c | 206 + arch/e2k/kvm/guest/mmu.c | 728 + arch/e2k/kvm/guest/paravirt.c | 1551 + arch/e2k/kvm/guest/paravirt.h | 84 + arch/e2k/kvm/guest/pic.h | 154 + arch/e2k/kvm/guest/process.c | 1292 + arch/e2k/kvm/guest/process.h | 38 + arch/e2k/kvm/guest/signal.c | 250 + arch/e2k/kvm/guest/smp.c | 241 + arch/e2k/kvm/guest/spinlock.c | 141 + arch/e2k/kvm/guest/string.c | 152 + arch/e2k/kvm/guest/time.c | 756 + arch/e2k/kvm/guest/time.h | 19 + arch/e2k/kvm/guest/tlbflush.c | 152 + arch/e2k/kvm/guest/traps.c | 430 + arch/e2k/kvm/guest/traps.h | 42 + arch/e2k/kvm/guest/ttable.c | 933 + arch/e2k/kvm/hv_cpu.c | 1857 + arch/e2k/kvm/hv_mmu.c | 725 + arch/e2k/kvm/hv_mmu.h | 81 + arch/e2k/kvm/hypercalls.c | 1321 + arch/e2k/kvm/intercepts.c | 3037 + arch/e2k/kvm/intercepts.h | 543 + arch/e2k/kvm/io.c | 904 + arch/e2k/kvm/io.h | 71 + arch/e2k/kvm/ioapic.c | 444 + arch/e2k/kvm/ioapic.h | 109 + arch/e2k/kvm/ioepic.c | 569 + arch/e2k/kvm/ioepic.h | 65 + arch/e2k/kvm/irq.h | 268 + arch/e2k/kvm/irq_comm.c | 1021 + arch/e2k/kvm/kvm-e2k.c | 4674 + arch/e2k/kvm/kvm_timer.h | 57 + arch/e2k/kvm/lapic.c | 1707 + arch/e2k/kvm/lapic.h | 140 + arch/e2k/kvm/lt.c | 1351 + arch/e2k/kvm/lt.h | 85 + arch/e2k/kvm/lt_regs.h | 134 + arch/e2k/kvm/mm.c | 526 + arch/e2k/kvm/mman.h | 380 + arch/e2k/kvm/mmu-e2k.c | 10302 ++ arch/e2k/kvm/mmu-e2k.h | 51 + arch/e2k/kvm/mmu-pv-spt.c | 1512 + arch/e2k/kvm/mmu-pv.c | 1281 + arch/e2k/kvm/mmu-x86.h | 133 + arch/e2k/kvm/mmu.h | 1664 + arch/e2k/kvm/mmu_defs.h | 311 + arch/e2k/kvm/mmu_flush.c | 265 + arch/e2k/kvm/mmutrace-e2k.h | 337 + arch/e2k/kvm/nid.c | 218 + arch/e2k/kvm/page_track.c | 257 + arch/e2k/kvm/paging_tmpl.h | 2286 + arch/e2k/kvm/paravirt.c | 1880 + arch/e2k/kvm/pgtable-gp.h | 213 + arch/e2k/kvm/pic.h | 291 + arch/e2k/kvm/process.c | 2616 + arch/e2k/kvm/process.h | 837 + arch/e2k/kvm/pt-structs.c | 548 + arch/e2k/kvm/pv_mmu.h | 42 + arch/e2k/kvm/runstate.c | 81 + arch/e2k/kvm/sic-nbsr.c | 2802 + arch/e2k/kvm/sic-nbsr.h | 72 + arch/e2k/kvm/spinlock.c | 830 + arch/e2k/kvm/spmc.c | 1107 + arch/e2k/kvm/spmc.h | 150 + arch/e2k/kvm/spmc_regs.h | 117 + arch/e2k/kvm/string.h | 153 + arch/e2k/kvm/switch.c | 18 + arch/e2k/kvm/time.h | 114 + arch/e2k/kvm/timer.c | 375 + arch/e2k/kvm/trace_pgtable-gp.h | 45 + arch/e2k/kvm/trap_table.S | 495 + arch/e2k/kvm/ttable-inline.h | 909 + arch/e2k/kvm/ttable.c | 1101 + arch/e2k/kvm/user_area.c | 1424 + arch/e2k/kvm/user_area.h | 147 + arch/e2k/kvm/virq.c | 554 + arch/e2k/kvm/vmid.c | 72 + arch/e2k/kvm/vmid.h | 38 + arch/e2k/lib/Makefile | 22 + arch/e2k/lib/builtin.c | 67 + arch/e2k/lib/checksum.c | 176 + arch/e2k/lib/delay.c | 29 + arch/e2k/lib/recovery_string.S | 494 + arch/e2k/lib/string.c | 1047 + arch/e2k/lib/usercopy.c | 644 + arch/e2k/mm/Makefile | 13 + arch/e2k/mm/fault.c | 5092 + arch/e2k/mm/hugetlbpage.c | 182 + arch/e2k/mm/init.c | 902 + arch/e2k/mm/ioremap.c | 202 + arch/e2k/mm/memory.c | 710 + arch/e2k/mm/mmap.c | 173 + arch/e2k/mm/mmu.c | 994 + arch/e2k/mm/node_vmap.c | 419 + arch/e2k/mm/page_io.c | 1032 + arch/e2k/mm/pageattr.c | 774 + arch/e2k/mm/pgtable.c | 151 + arch/e2k/mm/tag_mem.c | 1200 + arch/e2k/p2v/Makefile | 34 + arch/e2k/p2v/boot_e2k_sic.c | 80 + arch/e2k/p2v/boot_find_bit.c | 48 + arch/e2k/p2v/boot_head.c | 952 + arch/e2k/p2v/boot_init.c | 4235 + arch/e2k/p2v/boot_map.c | 1611 + arch/e2k/p2v/boot_param.c | 343 + arch/e2k/p2v/boot_phys.c | 2617 + arch/e2k/p2v/boot_printk/Makefile | 5 + arch/e2k/p2v/boot_printk/am85c30.c | 137 + arch/e2k/p2v/boot_printk/boot_hvc_l.c | 92 + arch/e2k/p2v/boot_printk/console.c | 468 + arch/e2k/p2v/boot_profiling.c | 80 + arch/e2k/p2v/boot_recovery.c | 399 + arch/e2k/p2v/boot_smp.c | 242 + arch/e2k/p2v/boot_string.c | 334 + arch/e2k/p2v/boot_string.h | 22 + arch/e2k/p2v/cpu/Makefile | 41 + arch/e2k/p2v/cpu/boot_iset_v2.c | 17 + arch/e2k/p2v/cpu/boot_iset_v3.c | 22 + arch/e2k/p2v/cpu/boot_iset_v6.c | 92 + arch/e2k/p2v/cpu/e12c.c | 43 + arch/e2k/p2v/cpu/e16c.c | 43 + arch/e2k/p2v/cpu/e1cp.c | 39 + arch/e2k/p2v/cpu/e2c3.c | 43 + arch/e2k/p2v/cpu/e2s.c | 35 + arch/e2k/p2v/cpu/e8c.c | 35 + arch/e2k/p2v/cpu/e8c2.c | 35 + arch/e2k/p2v/cpu/es2.c | 35 + arch/e2k/p2v/machdep.c | 2 + arch/e2k/pci/Makefile | 2 + arch/e2k/pci/pci.c | 553 + arch/e2k/power/Makefile | 5 + arch/e2k/power/cpu.c | 27 + arch/e2k/power/hibernate.c | 423 + arch/l/Kconfig | 287 + arch/l/Kconfig.debug | 19 + arch/l/Makefile | 13 + arch/l/include/asm/acenv.h | 14 + arch/l/include/asm/acpi.h | 137 + arch/l/include/asm/apic.h | 776 + arch/l/include/asm/apicdef.h | 529 + arch/l/include/asm/boot_profiling.h | 35 + arch/l/include/asm/bootinfo.h | 315 + arch/l/include/asm/clk_rt.h | 22 + arch/l/include/asm/clkr.h | 6 + arch/l/include/asm/console.h | 59 + arch/l/include/asm/console_types.h | 31 + arch/l/include/asm/devtree.h | 13 + arch/l/include/asm/dma-direct.h | 29 + arch/l/include/asm/dma-mapping.h | 21 + arch/l/include/asm/epic.h | 117 + arch/l/include/asm/epic_regs.h | 669 + arch/l/include/asm/epicdef.h | 92 + arch/l/include/asm/gpio.h | 50 + arch/l/include/asm/hardirq.h | 55 + arch/l/include/asm/hw_irq.h | 141 + arch/l/include/asm/i2c-spi.h | 42 + arch/l/include/asm/idle.h | 7 + arch/l/include/asm/io_apic.h | 307 + arch/l/include/asm/io_epic.h | 77 + arch/l/include/asm/io_epic_regs.h | 147 + arch/l/include/asm/io_pic.h | 122 + arch/l/include/asm/iolinkmask.h | 606 + arch/l/include/asm/ipi.h | 164 + arch/l/include/asm/irq_numbers.h | 9 + arch/l/include/asm/irq_remapping.h | 6 + arch/l/include/asm/irq_vectors.h | 201 + arch/l/include/asm/irq_work.h | 16 + arch/l/include/asm/irqdomain.h | 63 + arch/l/include/asm/l-uncached.h | 11 + arch/l/include/asm/l_ide.h | 13 + arch/l/include/asm/l_pmc.h | 140 + arch/l/include/asm/l_spmc.h | 14 + arch/l/include/asm/l_timer.h | 104 + arch/l/include/asm/l_timer_regs.h | 119 + arch/l/include/asm/mpspec.h | 635 + arch/l/include/asm/msidef.h | 53 + arch/l/include/asm/nmi.h | 22 + arch/l/include/asm/of_device.h | 47 + arch/l/include/asm/pci.h | 170 + arch/l/include/asm/pci_l.h | 7 + arch/l/include/asm/pcie_fixup.h | 34 + arch/l/include/asm/percpu.h | 69 + arch/l/include/asm/pic.h | 370 + arch/l/include/asm/serial.h | 79 + arch/l/include/asm/setup.h | 12 + arch/l/include/asm/sic_regs.h | 332 + arch/l/include/asm/smp.h | 8 + arch/l/include/asm/swiotlb.h | 3 + arch/l/include/asm/tree_entry.h | 42 + arch/l/kernel/Makefile | 33 + arch/l/kernel/acpi/Makefile | 3 + arch/l/kernel/acpi/boot.c | 1692 + arch/l/kernel/acpi/cstate.c | 56 + arch/l/kernel/acpi/sleep.c | 61 + arch/l/kernel/acpi_tainted/Makefile | 1 + arch/l/kernel/acpi_tainted/l_spmc.c | 1035 + arch/l/kernel/am85c30.c | 175 + arch/l/kernel/apic/Makefile | 9 + arch/l/kernel/apic/apic.c | 2939 + arch/l/kernel/apic/apic_flat_64.c | 379 + arch/l/kernel/apic/io_apic.c | 4591 + arch/l/kernel/apic/ipi.c | 168 + arch/l/kernel/apic/irq.c | 236 + arch/l/kernel/apic/irq_work.c | 42 + arch/l/kernel/apic/msi.c | 365 + arch/l/kernel/apic/probe_64.c | 75 + arch/l/kernel/apic/smp.c | 131 + arch/l/kernel/boot_profiling.c | 191 + arch/l/kernel/clk_rt.c | 301 + arch/l/kernel/clkr.c | 293 + arch/l/kernel/console.c | 841 + arch/l/kernel/cpufreq.c | 89 + arch/l/kernel/devtree.c | 42 + arch/l/kernel/epic/Makefile | 7 + arch/l/kernel/epic/epic.c | 1051 + arch/l/kernel/epic/io_epic.c | 2394 + arch/l/kernel/epic/ipi.c | 125 + arch/l/kernel/epic/irq.c | 173 + arch/l/kernel/epic/irq_work.c | 26 + arch/l/kernel/epic/smp.c | 85 + arch/l/kernel/gpio.c | 662 + arch/l/kernel/i2c-spi/Makefile | 3 + arch/l/kernel/i2c-spi/core.c | 171 + arch/l/kernel/i2c-spi/i2c-devices.c | 283 + arch/l/kernel/i2c-spi/i2c.c | 784 + arch/l/kernel/i2c-spi/reset.c | 211 + arch/l/kernel/i2c-spi/spi-devices.c | 99 + arch/l/kernel/i2c-spi/spi.c | 779 + arch/l/kernel/l-i2c2.c | 428 + arch/l/kernel/l-iommu.c | 1142 + arch/l/kernel/l-mcmonitor.c | 187 + arch/l/kernel/l-uncached.c | 179 + arch/l/kernel/lt.c | 252 + arch/l/kernel/mpparse.c | 1410 + arch/l/kernel/panic2nvram.c | 247 + arch/l/kernel/pic_irq.c | 396 + arch/l/kernel/pmc/Makefile | 4 + arch/l/kernel/pmc/pmc.h | 196 + arch/l/kernel/pmc/pmc_drv.c | 684 + arch/l/kernel/pmc/pmc_e1cp.c | 487 + arch/l/kernel/pmc/pmc_hwmon.c | 905 + arch/l/kernel/pmc/pmc_s2.c | 199 + arch/l/kernel/procipcc2.c | 104 + arch/l/kernel/procmmpddiag.c | 101 + arch/l/kernel/procregs.c | 487 + arch/l/kernel/procshow.c | 760 + arch/l/kernel/setup.c | 303 + arch/l/pci/Kconfig | 28 + arch/l/pci/Makefile | 12 + arch/l/pci/acpi.c | 20 + arch/l/pci/common.c | 149 + arch/l/pci/direct.c | 18 + arch/l/pci/irq.c | 1157 + arch/l/pci/l_pci.c | 170 + arch/l/pci/numa.c | 374 + arch/l/pci/pci.c | 744 + arch/l/pci/pci.h | 93 + arch/sparc/Kbuild | 2 + arch/sparc/Kconfig | 135 +- arch/sparc/Makefile | 43 +- arch/sparc/boot/dts/Makefile | 2 + arch/sparc/boot/dts/include/dt-bindings | 1 + arch/sparc/boot/dts/r2000_m1r-uvp.dts | 183 + arch/sparc/configs/build-config | 131 + arch/sparc/configs/mcst_rt.config | 1 + arch/sparc/configs/sparc64_defconfig | 4125 +- arch/sparc/include/asm-l | 1 + arch/sparc/include/asm/acpi.h | 8 + arch/sparc/include/asm/apic.h | 40 + arch/sparc/include/asm/apic_regs.h | 276 + arch/sparc/include/asm/apicdef.h | 9 + arch/sparc/include/asm/atomic_64.h | 19 + arch/sparc/include/asm/barrier_64.h | 92 + arch/sparc/include/asm/bitops_64.h | 18 +- arch/sparc/include/asm/bootinfo.h | 279 + arch/sparc/include/asm/cache.h | 7 +- arch/sparc/include/asm/cacheflush_64.h | 16 +- arch/sparc/include/asm/cmpxchg_64.h | 35 +- arch/sparc/include/asm/console.h | 24 + arch/sparc/include/asm/device.h | 17 +- arch/sparc/include/asm/dma-mapping.h | 22 + arch/sparc/include/asm/e90s.h | 467 + arch/sparc/include/asm/el_posix.h | 10 + arch/sparc/include/asm/el_posix_64.h | 93 + arch/sparc/include/asm/elf_64.h | 16 +- arch/sparc/include/asm/epic.h | 41 + arch/sparc/include/asm/epic_regs.h | 4 + arch/sparc/include/asm/epicdef.h | 4 + arch/sparc/include/asm/fb.h | 5 +- arch/sparc/include/asm/futex_64.h | 34 + arch/sparc/include/asm/gpio.h | 6 + arch/sparc/include/asm/hardirq.h | 4 + arch/sparc/include/asm/head_64.h | 12 +- arch/sparc/include/asm/hw_irq.h | 4 +- arch/sparc/include/asm/io_64.h | 449 +- arch/sparc/include/asm/io_apic.h | 7 + arch/sparc/include/asm/io_apic_regs.h | 89 + arch/sparc/include/asm/io_epic.h | 24 + arch/sparc/include/asm/io_epic_regs.h | 4 + arch/sparc/include/asm/iolinkmask.h | 7 + arch/sparc/include/asm/iommu.h | 2 + arch/sparc/include/asm/irq_64.h | 18 +- arch/sparc/include/asm/irq_remapping.h | 6 + arch/sparc/include/asm/irq_vectors.h | 41 + arch/sparc/include/asm/irq_work.h | 10 + arch/sparc/include/asm/irqdomain.h | 6 + arch/sparc/include/asm/l-iommu.h | 74 + arch/sparc/include/asm/l-mcmonitor.h | 153 + arch/sparc/include/asm/l_ide.h | 15 + arch/sparc/include/asm/l_ide32.h | 354 + arch/sparc/include/asm/l_pmc.h | 26 + arch/sparc/include/asm/l_spmc.h | 8 + arch/sparc/include/asm/l_timer.h | 16 + arch/sparc/include/asm/l_timer_regs.h | 107 + arch/sparc/include/asm/machdep.h | 16 + arch/sparc/include/asm/mman.h | 9 +- arch/sparc/include/asm/mmu_64.h | 5 + arch/sparc/include/asm/mmu_context_64.h | 44 + arch/sparc/include/asm/mpspec.h | 16 + arch/sparc/include/asm/msidef.h | 6 + arch/sparc/include/asm/oplib_64.h | 5 + arch/sparc/include/asm/page.h | 1 + arch/sparc/include/asm/page_64.h | 16 +- arch/sparc/include/asm/pci.h | 7 +- arch/sparc/include/asm/pci_e90s.h | 66 + arch/sparc/include/asm/pcr.h | 80 + arch/sparc/include/asm/percpu_64.h | 5 + arch/sparc/include/asm/perf_event.h | 5 + arch/sparc/include/asm/pgtable_64.h | 109 +- arch/sparc/include/asm/pil.h | 3 + arch/sparc/include/asm/processor_32.h | 4 + arch/sparc/include/asm/processor_64.h | 51 + arch/sparc/include/asm/sections.h | 36 + arch/sparc/include/asm/serial.h | 10 + arch/sparc/include/asm/setup.h | 8 +- arch/sparc/include/asm/sic_regs.h | 247 + arch/sparc/include/asm/smp_64.h | 5 +- arch/sparc/include/asm/spitfire.h | 4 + arch/sparc/include/asm/switch_to_64.h | 35 +- arch/sparc/include/asm/thread_info_32.h | 3 + arch/sparc/include/asm/thread_info_64.h | 57 +- arch/sparc/include/asm/timer.h | 2 +- arch/sparc/include/asm/timex_64.h | 37 + arch/sparc/include/asm/tlbflush_64.h | 5 +- arch/sparc/include/asm/topology.h | 4 + arch/sparc/include/asm/topology_e90s.h | 137 + arch/sparc/include/asm/trap_block.h | 26 + arch/sparc/include/asm/tsb.h | 18 +- arch/sparc/include/asm/uaccess_64.h | 35 +- arch/sparc/include/uapi/asm/asi.h | 5 + arch/sparc/include/uapi/asm/ioctls.h | 3 + arch/sparc/include/uapi/asm/mman.h | 2 + arch/sparc/include/uapi/asm/perfctr.h | 27 +- arch/sparc/include/uapi/asm/ptrace.h | 9 + arch/sparc/include/uapi/asm/unistd.h | 2 +- arch/sparc/kernel/Makefile | 45 +- arch/sparc/kernel/cpu.c | 29 + arch/sparc/kernel/devtree.c | 122 + arch/sparc/kernel/e90s.c | 166 + arch/sparc/kernel/e90s_sic.c | 272 + arch/sparc/kernel/etrap_64.S | 35 +- arch/sparc/kernel/head_64.S | 380 +- arch/sparc/kernel/helpers.S | 7 +- arch/sparc/kernel/irq_32.c | 4 + arch/sparc/kernel/irq_64.c | 4 + arch/sparc/kernel/irq_e90s.c | 176 + arch/sparc/kernel/ivec.S | 101 + arch/sparc/kernel/kprobes.c | 2 +- arch/sparc/kernel/ktlb.S | 15 +- arch/sparc/kernel/module.c | 3 + arch/sparc/kernel/of_device_64.c | 9 + arch/sparc/kernel/of_device_common.c | 2 + arch/sparc/kernel/pci_e90s.c | 361 + arch/sparc/kernel/pcr_e90s.c | 79 + arch/sparc/kernel/perf_event_e90s.c | 804 + arch/sparc/kernel/process_64.c | 131 +- arch/sparc/kernel/procpfreg_e90s.c | 246 + arch/sparc/kernel/ptrace_64.c | 4 + arch/sparc/kernel/rtrap_64.S | 39 + arch/sparc/kernel/setup_64.c | 162 +- arch/sparc/kernel/signal_64.c | 1 + arch/sparc/kernel/smp_64.c | 67 +- arch/sparc/kernel/smp_e90s.c | 868 + arch/sparc/kernel/sys32.S | 2 +- arch/sparc/kernel/sys_sparc32.c | 195 +- arch/sparc/kernel/sys_sparc_64.c | 111 +- arch/sparc/kernel/syscalls/syscall.tbl | 3 +- arch/sparc/kernel/time_64.c | 32 +- arch/sparc/kernel/time_e90s.c | 175 + arch/sparc/kernel/trampoline_64.S | 12 + arch/sparc/kernel/trampoline_e90s.S | 39 + arch/sparc/kernel/traps_64.c | 482 +- arch/sparc/kernel/tsb.S | 30 +- arch/sparc/kernel/ttable_64.S | 71 +- arch/sparc/kernel/unaligned_64.c | 536 +- arch/sparc/kernel/vmlinux.lds.S | 9 + arch/sparc/lib/GENpatch.S | 4 + arch/sparc/lib/M7patch.S | 4 + arch/sparc/lib/NG2patch.S | 4 + arch/sparc/lib/NG4clear_page.S | 2 + arch/sparc/lib/NG4patch.S | 4 + arch/sparc/lib/NGpatch.S | 4 + arch/sparc/lib/PeeCeeI.c | 188 + arch/sparc/lib/U1copy_to_user.S | 4 + arch/sparc/lib/U3memcpy.S | 7 + arch/sparc/lib/U3patch.S | 4 + arch/sparc/lib/atomic_64.S | 21 + arch/sparc/lib/bitops.S | 39 + arch/sparc/math-emu/math_64.c | 237 +- arch/sparc/mm/Makefile | 6 +- arch/sparc/mm/fault_64.c | 234 +- arch/sparc/mm/hugetlbpage.c | 16 + arch/sparc/mm/init_64.c | 34 +- arch/sparc/mm/init_64.h | 4 + arch/sparc/mm/init_e90s.c | 2153 + arch/sparc/mm/srmmu.c | 55 + arch/sparc/mm/tlb.c | 48 +- arch/sparc/mm/tsb.c | 57 +- arch/sparc/mm/ultra.S | 221 +- arch/sparc/prom/printf.c | 3 +- arch/x86/Kconfig | 5 + arch/x86/Makefile | 10 +- arch/x86/configs/mcst.config | 3202 + arch/x86/entry/syscalls/syscall_32.tbl | 2 + arch/x86/entry/syscalls/syscall_64.tbl | 3 + arch/x86/include/asm-l | 1 + arch/x86/include/asm/cpu.h | 1 + arch/x86/include/asm/gpio.h | 21 + arch/x86/include/asm/kvm_page_track.h | 4 +- arch/x86/include/asm/pci.h | 7 + arch/x86/include/asm/setup.h | 4 + arch/x86/include/asm/thread_info.h | 9 + arch/x86/include/asm/tsc.h | 39 + arch/x86/kernel/Makefile | 3 + arch/x86/kernel/apic/apic.c | 70 + arch/x86/kernel/irq.c | 4 + arch/x86/kernel/mcst_eth_mac.c | 50 + arch/x86/kernel/tsc.c | 4 + config-elbrus-def | 7869 ++ crypto/hmac.c | 48 + drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/ata/ahci.c | 63 + drivers/ata/libahci.c | 80 + drivers/ata/libata-core.c | 149 +- drivers/ata/libata-scsi.c | 107 +- drivers/base/component.c | 1 + drivers/base/platform.c | 2 +- drivers/base/regmap/regcache-rbtree.c | 4 + drivers/base/regmap/regmap.c | 4 + drivers/block/virtio_blk.c | 17 + drivers/char/Kconfig | 2 +- drivers/char/Makefile | 1 + drivers/char/hw_random/Kconfig | 3 +- drivers/char/kdumper.c | 102 + drivers/char/mem.c | 20 + drivers/char/random.c | 16 + drivers/char/rtc.c | 5 + drivers/char/virtio_console.c | 17 + drivers/cpufreq/Kconfig | 25 + drivers/cpufreq/Makefile | 2 + drivers/cpufreq/cpufreq.c | 7 + drivers/cpufreq/cpufreq_ondemand.c | 6 + drivers/cpufreq/cpufreq_pstates.c | 504 + drivers/cpufreq/e2k-pcs-cpufreq.c | 432 + drivers/cpuidle/Kconfig | 13 +- drivers/cpuidle/Kconfig.e2k | 10 + drivers/cpuidle/Kconfig.e90s | 11 + drivers/cpuidle/Makefile | 9 + drivers/cpuidle/cpuidle-e2k.c | 197 + drivers/cpuidle/cpuidle-e90s.c | 164 + drivers/cpuidle/sysfs.c | 36 +- drivers/edac/Kconfig | 7 + drivers/edac/Makefile | 1 + drivers/edac/e2k_edac.c | 510 + drivers/gpio/gpiolib.c | 7 + drivers/gpu/drm/Kconfig | 8 + drivers/gpu/drm/Makefile | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 + drivers/gpu/drm/ast/ast_drv.c | 5 + drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 549 +- drivers/gpu/drm/bridge/synopsys/dw-hdmi.h | 5 + drivers/gpu/drm/drm_atomic_helper.c | 8 +- drivers/gpu/drm/drm_bufs.c | 2 +- drivers/gpu/drm/drm_lock.c | 9 + drivers/gpu/drm/drm_vm.c | 2 +- drivers/gpu/drm/i2c/sil164_drv.c | 31 +- drivers/gpu/drm/imx/Kconfig | 1 - drivers/gpu/drm/imx/dw_hdmi-imx.c | 246 +- drivers/gpu/drm/radeon/cik.c | 10 +- drivers/gpu/drm/radeon/evergreen.c | 26 +- drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/r600_dma.c | 6 + drivers/gpu/drm/radeon/radeon.h | 8 +- drivers/gpu/drm/radeon/radeon_bios.c | 5 + drivers/gpu/drm/radeon/radeon_device.c | 40 +- drivers/gpu/drm/radeon/radeon_drv.c | 11 +- drivers/gpu/drm/radeon/radeon_fb.c | 226 +- drivers/gpu/drm/radeon/radeon_fence.c | 10 +- drivers/gpu/drm/radeon/radeon_kms.c | 10 + drivers/gpu/drm/radeon/radeon_test.c | 41 +- drivers/gpu/drm/radeon/radeon_uvd.c | 34 +- drivers/gpu/drm/radeon/radeon_vce.c | 9 +- drivers/gpu/drm/radeon/si.c | 61 +- drivers/gpu/drm/radeon/vce_v1_0.c | 17 + drivers/gpu/drm/ttm/ttm_bo_util.c | 2 +- drivers/gpu/drm/ttm/ttm_page_alloc.c | 7 +- drivers/gpu/drm/udl/udl_modeset.c | 2 +- drivers/gpu/drm/vivante/Makefile | 29 + drivers/gpu/drm/vivante/vivante_drv.c | 148 + drivers/gpu/drm/vivante/vivante_drv.h | 38 + drivers/hwmon/Kconfig | 40 + drivers/hwmon/Makefile | 2 + drivers/hwmon/adt7475.c | 149 +- drivers/hwmon/l_p1mon.c | 589 + drivers/hwmon/lm63.c | 197 +- drivers/hwmon/lm95231.c | 531 + drivers/hwmon/pmbus/Kconfig | 10 + drivers/hwmon/pmbus/Makefile | 1 + drivers/hwmon/pmbus/max20730.c | 527 + drivers/hwmon/pmbus/pmbus_core.c | 42 +- drivers/i2c/muxes/Kconfig | 11 + drivers/i2c/muxes/i2c-mux-ltc4306.c | 4 + drivers/ide/Kconfig | 8 + drivers/ide/Makefile | 1 + drivers/ide/ide-dma.c | 11 +- drivers/ide/ide-io.c | 3 + drivers/ide/ide-probe.c | 18 + drivers/ide/l_ide.c | 641 + drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c | 26 + drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c | 8 + drivers/infiniband/core/iwpm_util.c | 4 + drivers/infiniband/hw/mlx5/main.c | 12 + drivers/input/keyboard/atkbd.c | 2 +- drivers/input/misc/Kconfig | 30 +- drivers/input/misc/Makefile | 1 + drivers/input/misc/ltc2954.c | 367 + drivers/input/mousedev.c | 93 + drivers/input/serio/i8042-io.h | 87 +- drivers/input/serio/i8042-x86ia64io.h | 7 + drivers/input/serio/i8042.c | 18 + drivers/input/touchscreen/goodix.c | 29 +- drivers/input/touchscreen/usbtouchscreen.c | 211 +- drivers/iommu/dma-iommu.c | 199 +- drivers/mcst/BigEv2/Makefile | 1 + drivers/mcst/BigEv2/pcie_driver/Makefile | 3 + drivers/mcst/BigEv2/pcie_driver/bige.c | 1091 + drivers/mcst/BigEv2/pcie_driver/bige.h | 57 + drivers/mcst/BigEv2/pcie_driver/bige_ioctl.h | 58 + drivers/mcst/Kconfig | 198 + drivers/mcst/Makefile | 40 + drivers/mcst/apkpwr/Makefile | 6 + drivers/mcst/apkpwr/apkpwr.c | 1457 + drivers/mcst/ddi/Makefile | 15 + drivers/mcst/ddi/ddi.c | 494 + drivers/mcst/ddi/ddi_arch.c | 157 + drivers/mcst/ddi/ddi_cv.c | 175 + drivers/mcst/dmp_assist/Makefile | 2 + drivers/mcst/dmp_assist/dmp_assist.c | 234 + drivers/mcst/dmp_assist/dmp_assist.h | 20 + drivers/mcst/eldsp/Makefile | 2 + drivers/mcst/eldsp/eldsp.c | 3110 + drivers/mcst/eldsp/eldsp.h | 527 + drivers/mcst/emc/Makefile | 5 + drivers/mcst/emc/emc2305.c | 957 + drivers/mcst/gpu-imgtec/GPL-COPYING | 339 + drivers/mcst/gpu-imgtec/INSTALL | 58 + drivers/mcst/gpu-imgtec/Kconfig | 55 + drivers/mcst/gpu-imgtec/MIT-COPYING | 41 + drivers/mcst/gpu-imgtec/Makefile | 60 + drivers/mcst/gpu-imgtec/README | 32 + drivers/mcst/gpu-imgtec/build/linux/bits.mk | 126 + .../mcst/gpu-imgtec/build/linux/buildvars.mk | 391 + .../mcst/gpu-imgtec/build/linux/commands.mk | 555 + .../gpu-imgtec/build/linux/common/3rdparty.mk | 119 + .../build/linux/common/chromiumos_kernel.mk | 56 + .../mcst/gpu-imgtec/build/linux/common/lws.mk | 54 + .../gpu-imgtec/build/linux/common/testchip.mk | 126 + .../common/window_systems/lws-generic.mk | 45 + .../linux/common/window_systems/nulldrmws.mk | 65 + .../common/window_systems/surfaceless.mk | 50 + .../linux/common/window_systems/wayland.mk | 49 + .../build/linux/common/window_systems/xorg.mk | 51 + .../gpu-imgtec/build/linux/config/compiler.mk | 299 + .../config/compilers/aarch64-linux-gnu.mk | 11 + .../config/compilers/arm-linux-gnueabi.mk | 6 + .../config/compilers/arm-linux-gnueabihf.mk | 6 + .../linux/config/compilers/e2k-linux-gnu.mk | 2 + .../linux/config/compilers/i386-linux-gnu.mk | 11 + .../compilers/mips64el-buildroot-linux-gnu.mk | 5 + .../compilers/mipsel-buildroot-linux-gnu.mk | 2 + .../config/compilers/riscv64-linux-gnu.mk | 2 + .../config/compilers/x86_64-linux-gnu.mk | 34 + .../gpu-imgtec/build/linux/config/core.mk | 1977 + .../build/linux/config/core_volcanic.mk | 1892 + .../linux/config/default_window_system.mk | 43 + .../gpu-imgtec/build/linux/config/help.mk | 74 + .../build/linux/config/kernel-defs.mk | 143 + .../build/linux/config/kernel_version.mk | 103 + .../build/linux/config/preconfig.mk | 387 + .../build/linux/config/window_system.mk | 191 + drivers/mcst/gpu-imgtec/build/linux/defs.mk | 304 + .../gpu-imgtec/build/linux/e2c3_gpu/Makefile | 94 + .../build/linux/fpga_linux/Makefile | 44 + .../build/linux/kbuild/Makefile.template | 90 + .../gpu-imgtec/build/linux/kbuild/kbuild.mk | 116 + .../build/linux/moduledefs/host_i386.mk | 81 + .../build/linux/moduledefs/host_x86_64.mk | 87 + .../build/linux/moduledefs/target_aarch64.mk | 263 + .../build/linux/moduledefs/target_armel.mk | 86 + .../build/linux/moduledefs/target_armhf.mk | 86 + .../build/linux/moduledefs/target_e2k.mk | 24 + .../build/linux/moduledefs/target_i686.mk | 273 + .../linux/moduledefs/target_mips32r6el.mk | 200 + .../linux/moduledefs/target_mips64r6el.mk | 217 + .../build/linux/moduledefs/target_neutral.mk | 44 + .../build/linux/moduledefs/target_riscv64.mk | 82 + .../build/linux/moduledefs/target_x86_64.mk | 267 + .../build/linux/moduledefs_common.mk | 152 + .../gpu-imgtec/build/linux/moduledefs_libs.mk | 149 + .../mcst/gpu-imgtec/build/linux/modules.mk | 48 + .../build/linux/modules/kernel_module.mk | 92 + .../build/linux/mt8173_linux/Makefile | 83 + .../build/linux/nohw_linux/Makefile | 71 + .../mcst/gpu-imgtec/build/linux/packaging.mk | 130 + .../gpu-imgtec/build/linux/plato/Makefile | 184 + .../gpu-imgtec/build/linux/prepare_tree.mk | 56 + .../mcst/gpu-imgtec/build/linux/pvrversion.mk | 57 + .../mcst/gpu-imgtec/build/linux/scripts.mk | 326 + .../build/linux/scripts/install.sh.tpl | 795 + .../gpu-imgtec/build/linux/tc_linux/Makefile | 109 + .../gpu-imgtec/build/linux/this_makefile.mk | 68 + .../gpu-imgtec/build/linux/tools/cc-check.sh | 120 + .../mcst/gpu-imgtec/build/linux/toplevel.mk | 442 + drivers/mcst/gpu-imgtec/config_kernel.h | 149 + drivers/mcst/gpu-imgtec/config_kernel.mk | 35 + .../gpu-imgtec/copy-to-kernel-tc/apollo.mk | 4 + .../copy-to-kernel-tc/config_kernel.h | 160 + .../copy-to-kernel-tc/config_kernel.mk | 39 + .../copy-to-kernel-tc/copy_items.sh | 443 + .../gpu-imgtec/copy-to-kernel-tc/drm_pdp.mk | 13 + .../gpu-imgtec/copy-to-kernel-tc/pvrsrvkm.mk | 138 + drivers/mcst/gpu-imgtec/copy-to-kernel.sh | 45 + .../rogue/cache_bridge/client_cache_bridge.h | 101 + .../cache_bridge/client_cache_direct_bridge.c | 143 + .../rogue/cache_bridge/common_cache_bridge.h | 128 + .../rogue/cache_bridge/server_cache_bridge.c | 503 + .../rogue/cmm_bridge/common_cmm_bridge.h | 113 + .../rogue/cmm_bridge/server_cmm_bridge.c | 445 + .../client_devicememhistory_bridge.h | 132 + .../client_devicememhistory_direct_bridge.c | 221 + .../common_devicememhistory_bridge.h | 184 + .../server_devicememhistory_bridge.c | 885 + .../dmabuf_bridge/common_dmabuf_bridge.h | 126 + .../dmabuf_bridge/server_dmabuf_bridge.c | 545 + .../htbuffer_bridge/client_htbuffer_bridge.h | 75 + .../client_htbuffer_direct_bridge.c | 91 + .../htbuffer_bridge/common_htbuffer_bridge.h | 102 + .../htbuffer_bridge/server_htbuffer_bridge.c | 346 + .../rogue/mm_bridge/client_mm_bridge.h | 377 + .../rogue/mm_bridge/client_mm_direct_bridge.c | 887 + .../rogue/mm_bridge/common_mm_bridge.h | 782 + .../rogue/mm_bridge/server_mm_bridge.c | 3502 + .../rogue/pdump_bridge/client_pdump_bridge.h | 153 + .../pdump_bridge/client_pdump_direct_bridge.c | 228 + .../rogue/pdump_bridge/common_pdump_bridge.h | 180 + .../rogue/pdump_bridge/server_pdump_bridge.c | 758 + .../client_pdumpctrl_bridge.h | 82 + .../client_pdumpctrl_direct_bridge.c | 120 + .../common_pdumpctrl_bridge.h | 148 + .../server_pdumpctrl_bridge.c | 266 + .../pdumpmm_bridge/client_pdumpmm_bridge.h | 169 + .../client_pdumpmm_direct_bridge.c | 300 + .../pdumpmm_bridge/common_pdumpmm_bridge.h | 260 + .../pdumpmm_bridge/server_pdumpmm_bridge.c | 1029 + .../rogue/pvrtl_bridge/client_pvrtl_bridge.h | 113 + .../pvrtl_bridge/client_pvrtl_direct_bridge.c | 200 + .../rogue/pvrtl_bridge/common_pvrtl_bridge.h | 214 + .../rogue/pvrtl_bridge/server_pvrtl_bridge.c | 880 + .../common_rgxbreakpoint_bridge.h | 148 + .../server_rgxbreakpoint_bridge.c | 400 + .../rgxcmp_bridge/common_rgxcmp_bridge.h | 228 + .../rgxcmp_bridge/server_rgxcmp_bridge.c | 1201 + .../rgxfwdbg_bridge/common_rgxfwdbg_bridge.h | 182 + .../rgxfwdbg_bridge/server_rgxfwdbg_bridge.c | 316 + .../common_rgxhwperf_bridge.h | 152 + .../server_rgxhwperf_bridge.c | 538 + .../common_rgxkicksync_bridge.h | 143 + .../server_rgxkicksync_bridge.c | 626 + .../rgxpdump_bridge/client_rgxpdump_bridge.h | 72 + .../client_rgxpdump_direct_bridge.c | 99 + .../rgxpdump_bridge/common_rgxpdump_bridge.h | 109 + .../rgxpdump_bridge/server_rgxpdump_bridge.c | 176 + .../common_rgxregconfig_bridge.h | 145 + .../server_rgxregconfig_bridge.c | 251 + .../common_rgxsignals_bridge.h | 76 + .../server_rgxsignals_bridge.c | 174 + .../rgxta3d_bridge/common_rgxta3d_bridge.h | 425 + .../rgxta3d_bridge/server_rgxta3d_bridge.c | 2516 + .../rgxtq2_bridge/common_rgxtq2_bridge.h | 227 + .../rgxtq2_bridge/server_rgxtq2_bridge.c | 1333 + .../rogue/rgxtq_bridge/common_rgxtq_bridge.h | 175 + .../rogue/rgxtq_bridge/server_rgxtq_bridge.c | 1339 + .../rogue/ri_bridge/client_ri_bridge.h | 120 + .../rogue/ri_bridge/client_ri_direct_bridge.c | 217 + .../rogue/ri_bridge/common_ri_bridge.h | 224 + .../rogue/ri_bridge/server_ri_bridge.c | 808 + .../srvcore_bridge/common_srvcore_bridge.h | 370 + .../srvcore_bridge/server_srvcore_bridge.c | 1164 + .../rogue/sync_bridge/client_sync_bridge.h | 142 + .../sync_bridge/client_sync_direct_bridge.c | 314 + .../rogue/sync_bridge/common_sync_bridge.h | 253 + .../rogue/sync_bridge/server_sync_bridge.c | 818 + .../client_synctracking_bridge.h | 78 + .../client_synctracking_direct_bridge.c | 103 + .../common_synctracking_bridge.h | 96 + .../server_synctracking_bridge.c | 347 + .../cache_bridge/client_cache_bridge.h | 101 + .../cache_bridge/client_cache_direct_bridge.c | 143 + .../cache_bridge/common_cache_bridge.h | 128 + .../cache_bridge/server_cache_bridge.c | 503 + .../volcanic/cmm_bridge/common_cmm_bridge.h | 113 + .../volcanic/cmm_bridge/server_cmm_bridge.c | 445 + .../client_devicememhistory_bridge.h | 132 + .../client_devicememhistory_direct_bridge.c | 221 + .../common_devicememhistory_bridge.h | 184 + .../server_devicememhistory_bridge.c | 885 + .../dmabuf_bridge/common_dmabuf_bridge.h | 126 + .../dmabuf_bridge/server_dmabuf_bridge.c | 545 + .../htbuffer_bridge/client_htbuffer_bridge.h | 75 + .../client_htbuffer_direct_bridge.c | 91 + .../htbuffer_bridge/common_htbuffer_bridge.h | 102 + .../htbuffer_bridge/server_htbuffer_bridge.c | 346 + .../volcanic/mm_bridge/client_mm_bridge.h | 377 + .../mm_bridge/client_mm_direct_bridge.c | 887 + .../volcanic/mm_bridge/common_mm_bridge.h | 782 + .../volcanic/mm_bridge/server_mm_bridge.c | 3502 + .../pdump_bridge/client_pdump_bridge.h | 153 + .../pdump_bridge/client_pdump_direct_bridge.c | 228 + .../pdump_bridge/common_pdump_bridge.h | 180 + .../pdump_bridge/server_pdump_bridge.c | 758 + .../client_pdumpctrl_bridge.h | 82 + .../client_pdumpctrl_direct_bridge.c | 120 + .../common_pdumpctrl_bridge.h | 148 + .../server_pdumpctrl_bridge.c | 266 + .../pdumpmm_bridge/client_pdumpmm_bridge.h | 169 + .../client_pdumpmm_direct_bridge.c | 300 + .../pdumpmm_bridge/common_pdumpmm_bridge.h | 260 + .../pdumpmm_bridge/server_pdumpmm_bridge.c | 1029 + .../pvrtl_bridge/client_pvrtl_bridge.h | 113 + .../pvrtl_bridge/client_pvrtl_direct_bridge.c | 200 + .../pvrtl_bridge/common_pvrtl_bridge.h | 214 + .../pvrtl_bridge/server_pvrtl_bridge.c | 880 + .../rgxcmp_bridge/common_rgxcmp_bridge.h | 226 + .../rgxcmp_bridge/server_rgxcmp_bridge.c | 1038 + .../rgxfwdbg_bridge/common_rgxfwdbg_bridge.h | 182 + .../rgxfwdbg_bridge/server_rgxfwdbg_bridge.c | 316 + .../common_rgxhwperf_bridge.h | 134 + .../server_rgxhwperf_bridge.c | 409 + .../common_rgxkicksync_bridge.h | 143 + .../server_rgxkicksync_bridge.c | 626 + .../rgxpdump_bridge/client_rgxpdump_bridge.h | 72 + .../client_rgxpdump_direct_bridge.c | 99 + .../rgxpdump_bridge/common_rgxpdump_bridge.h | 109 + .../rgxpdump_bridge/server_rgxpdump_bridge.c | 176 + .../common_rgxregconfig_bridge.h | 145 + .../server_rgxregconfig_bridge.c | 251 + .../common_rgxsignals_bridge.h | 76 + .../server_rgxsignals_bridge.c | 174 + .../rgxta3d_bridge/common_rgxta3d_bridge.h | 416 + .../rgxta3d_bridge/server_rgxta3d_bridge.c | 2454 + .../rgxtq2_bridge/common_rgxtq2_bridge.h | 225 + .../rgxtq2_bridge/server_rgxtq2_bridge.c | 1115 + .../volcanic/ri_bridge/client_ri_bridge.h | 120 + .../ri_bridge/client_ri_direct_bridge.c | 217 + .../volcanic/ri_bridge/common_ri_bridge.h | 224 + .../volcanic/ri_bridge/server_ri_bridge.c | 808 + .../srvcore_bridge/common_srvcore_bridge.h | 370 + .../srvcore_bridge/server_srvcore_bridge.c | 1164 + .../volcanic/sync_bridge/client_sync_bridge.h | 142 + .../sync_bridge/client_sync_direct_bridge.c | 314 + .../volcanic/sync_bridge/common_sync_bridge.h | 253 + .../volcanic/sync_bridge/server_sync_bridge.c | 818 + .../client_synctracking_bridge.h | 78 + .../client_synctracking_direct_bridge.c | 103 + .../common_synctracking_bridge.h | 96 + .../server_synctracking_bridge.c | 347 + .../rogue/km/configs/rgxconfig_km_1.V.2.30.h | 75 + .../rogue/km/configs/rgxconfig_km_1.V.4.19.h | 75 + .../rogue/km/configs/rgxconfig_km_1.V.4.5.h | 74 + .../rogue/km/configs/rgxconfig_km_15.V.1.64.h | 78 + .../km/configs/rgxconfig_km_22.V.104.18.h | 81 + .../km/configs/rgxconfig_km_22.V.104.218.h | 83 + .../km/configs/rgxconfig_km_22.V.208.318.h | 83 + .../km/configs/rgxconfig_km_22.V.21.16.h | 81 + .../km/configs/rgxconfig_km_22.V.22.25.h | 81 + .../km/configs/rgxconfig_km_22.V.54.25.h | 81 + .../km/configs/rgxconfig_km_22.V.54.30.h | 81 + .../km/configs/rgxconfig_km_22.V.54.330.h | 83 + .../km/configs/rgxconfig_km_22.V.54.38.h | 81 + .../km/configs/rgxconfig_km_24.V.104.504.h | 86 + .../km/configs/rgxconfig_km_24.V.208.504.h | 86 + .../km/configs/rgxconfig_km_24.V.208.505.h | 86 + .../km/configs/rgxconfig_km_24.V.54.204.h | 86 + .../km/configs/rgxconfig_km_29.V.108.208.h | 89 + .../km/configs/rgxconfig_km_29.V.52.202.h | 89 + .../km/configs/rgxconfig_km_29.V.54.208.h | 89 + .../rogue/km/configs/rgxconfig_km_4.V.2.51.h | 82 + .../rogue/km/configs/rgxconfig_km_4.V.2.58.h | 84 + .../rogue/km/configs/rgxconfig_km_4.V.4.55.h | 81 + .../rogue/km/configs/rgxconfig_km_4.V.6.62.h | 85 + .../rogue/km/configs/rgxconfig_km_5.V.1.46.h | 76 + .../rogue/km/configs/rgxconfig_km_6.V.4.35.h | 82 + .../rogue/km/cores/rgxcore_km_1.39.4.19.h | 71 + .../rogue/km/cores/rgxcore_km_1.75.2.30.h | 70 + .../rogue/km/cores/rgxcore_km_1.82.4.5.h | 69 + .../rogue/km/cores/rgxcore_km_15.5.1.64.h | 72 + .../rogue/km/cores/rgxcore_km_22.102.54.38.h | 75 + .../km/cores/rgxcore_km_22.104.208.318.h | 76 + .../rogue/km/cores/rgxcore_km_22.30.54.25.h | 77 + .../rogue/km/cores/rgxcore_km_22.40.54.30.h | 77 + .../rogue/km/cores/rgxcore_km_22.44.22.25.h | 77 + .../rogue/km/cores/rgxcore_km_22.46.54.330.h | 78 + .../rogue/km/cores/rgxcore_km_22.49.21.16.h | 77 + .../rogue/km/cores/rgxcore_km_22.67.54.30.h | 77 + .../rogue/km/cores/rgxcore_km_22.68.54.30.h | 76 + .../rogue/km/cores/rgxcore_km_22.86.104.218.h | 75 + .../rogue/km/cores/rgxcore_km_22.87.104.18.h | 75 + .../rogue/km/cores/rgxcore_km_24.50.208.504.h | 76 + .../rogue/km/cores/rgxcore_km_24.56.208.505.h | 76 + .../rogue/km/cores/rgxcore_km_24.66.54.204.h | 76 + .../rogue/km/cores/rgxcore_km_24.67.104.504.h | 76 + .../rogue/km/cores/rgxcore_km_29.13.54.208.h | 78 + .../rogue/km/cores/rgxcore_km_29.14.108.208.h | 78 + .../rogue/km/cores/rgxcore_km_29.19.52.202.h | 78 + .../rogue/km/cores/rgxcore_km_4.29.2.51.h | 73 + .../rogue/km/cores/rgxcore_km_4.31.4.55.h | 73 + .../rogue/km/cores/rgxcore_km_4.40.2.51.h | 73 + .../rogue/km/cores/rgxcore_km_4.43.6.62.h | 73 + .../rogue/km/cores/rgxcore_km_4.45.2.58.h | 72 + .../rogue/km/cores/rgxcore_km_4.46.6.62.h | 73 + .../rogue/km/cores/rgxcore_km_5.11.1.46.h | 73 + .../rogue/km/cores/rgxcore_km_5.9.1.46.h | 72 + .../rogue/km/cores/rgxcore_km_6.34.4.35.h | 72 + .../hwdefs/rogue/km/rgx_bvnc_defs_km.h | 319 + .../hwdefs/rogue/km/rgx_bvnc_table_km.h | 439 + .../hwdefs/rogue/km/rgx_cr_defs_km.h | 6515 ++ .../gpu-imgtec/hwdefs/rogue/km/rgxdefs_km.h | 311 + .../gpu-imgtec/hwdefs/rogue/km/rgxmhdefs_km.h | 336 + .../hwdefs/rogue/km/rgxmmudefs_km.h | 350 + .../rogue/km/tmp_rgx_cr_defs_riscv_km.h | 208 + .../km/configs/rgxconfig_km_27.V.254.2.h | 98 + .../km/configs/rgxconfig_km_30.V.1632.1.h | 100 + .../km/configs/rgxconfig_km_30.V.408.101.h | 100 + .../volcanic/km/cores/rgxcore_km_27.5.254.2.h | 69 + .../km/cores/rgxcore_km_30.3.408.101.h | 70 + .../km/cores/rgxcore_km_30.4.1632.1.h | 70 + .../km/cores/rgxcore_km_30.5.1632.1.h | 70 + .../hwdefs/volcanic/km/rgx_bvnc_defs_km.h | 192 + .../hwdefs/volcanic/km/rgx_bvnc_table_km.h | 365 + .../hwdefs/volcanic/km/rgx_cr_defs_km.h | 5036 + .../hwdefs/volcanic/km/rgxdefs_km.h | 339 + .../hwdefs/volcanic/km/rgxmmudefs_km.h | 426 + .../volcanic/km/tmp_rgx_cr_defs_riscv_km.h | 164 + .../gpu-imgtec/hwdefs/volcanic/rgxpmdefs.h | 4645 + drivers/mcst/gpu-imgtec/include/cache_ops.h | 56 + .../gpu-imgtec/include/devicemem_typedefs.h | 143 + drivers/mcst/gpu-imgtec/include/dllist.h | 356 + drivers/mcst/gpu-imgtec/include/drm/netlink.h | 148 + .../gpu-imgtec/include/drm/nulldisp_drm.h | 106 + drivers/mcst/gpu-imgtec/include/drm/pdp_drm.h | 106 + drivers/mcst/gpu-imgtec/include/drm/pvr_drm.h | 84 + .../gpu-imgtec/include/drm/pvr_drm_core.h | 77 + drivers/mcst/gpu-imgtec/include/img_3dtypes.h | 248 + drivers/mcst/gpu-imgtec/include/img_defs.h | 560 + .../include/img_drm_fourcc_internal.h | 86 + drivers/mcst/gpu-imgtec/include/img_elf.h | 111 + drivers/mcst/gpu-imgtec/include/img_types.h | 291 + .../mcst/gpu-imgtec/include/kernel_types.h | 137 + .../mcst/gpu-imgtec/include/linux_sw_sync.h | 66 + drivers/mcst/gpu-imgtec/include/lock_types.h | 92 + drivers/mcst/gpu-imgtec/include/log2.h | 409 + .../mcst/gpu-imgtec/include/osfunc_common.h | 229 + drivers/mcst/gpu-imgtec/include/pdumpdefs.h | 246 + drivers/mcst/gpu-imgtec/include/pdumpdesc.h | 205 + .../include/public/powervr/buffer_attribs.h | 147 + .../include/public/powervr/img_drm_fourcc.h | 113 + .../include/public/powervr/mem_types.h | 64 + .../include/public/powervr/pvrsrv_sync_ext.h | 72 + .../include/pvr_buffer_sync_shared.h | 57 + drivers/mcst/gpu-imgtec/include/pvr_debug.h | 801 + .../gpu-imgtec/include/pvr_fd_sync_kernel.h | 77 + .../mcst/gpu-imgtec/include/pvr_intrinsics.h | 70 + drivers/mcst/gpu-imgtec/include/pvrmodule.h | 48 + .../gpu-imgtec/include/pvrsrv_device_types.h | 55 + .../mcst/gpu-imgtec/include/pvrsrv_devvar.h | 291 + .../mcst/gpu-imgtec/include/pvrsrv_error.h | 61 + .../mcst/gpu-imgtec/include/pvrsrv_errors.h | 406 + .../gpu-imgtec/include/pvrsrv_memallocflags.h | 947 + .../mcst/gpu-imgtec/include/pvrsrv_sync_km.h | 65 + .../mcst/gpu-imgtec/include/pvrsrv_tlcommon.h | 261 + .../gpu-imgtec/include/pvrsrv_tlstreams.h | 61 + drivers/mcst/gpu-imgtec/include/pvrversion.h | 68 + .../gpu-imgtec/include/rgx_heap_firmware.h | 131 + .../gpu-imgtec/include/rgx_memallocflags.h | 49 + drivers/mcst/gpu-imgtec/include/rgx_meta.h | 385 + drivers/mcst/gpu-imgtec/include/rgx_mips.h | 376 + drivers/mcst/gpu-imgtec/include/rgx_riscv.h | 106 + drivers/mcst/gpu-imgtec/include/ri_typedefs.h | 52 + .../gpu-imgtec/include/rogue/rgx_common.h | 225 + .../include/rogue/rgx_fwif_alignchecks.h | 192 + .../include/rogue/rgx_fwif_shared.h | 245 + .../mcst/gpu-imgtec/include/rogue/rgx_heaps.h | 165 + .../gpu-imgtec/include/rogue/rgx_hwperf.h | 1621 + .../gpu-imgtec/include/rogue/rgx_options.h | 261 + .../rogue/system/rgx_tc/apollo_clocks.h | 151 + drivers/mcst/gpu-imgtec/include/services_km.h | 168 + drivers/mcst/gpu-imgtec/include/servicesext.h | 172 + .../include/sync_checkpoint_external.h | 83 + .../gpu-imgtec/include/sync_prim_internal.h | 84 + .../include/system/rgx_tc/apollo_regs.h | 108 + .../include/system/rgx_tc/bonnie_tcf.h | 68 + .../include/system/rgx_tc/odin_defs.h | 307 + .../include/system/rgx_tc/odin_pdp_regs.h | 8540 ++ .../include/system/rgx_tc/odin_regs.h | 924 + .../include/system/rgx_tc/orion_defs.h | 183 + .../include/system/rgx_tc/orion_regs.h | 439 + .../include/system/rgx_tc/pdp_regs.h | 75 + .../include/system/rgx_tc/tcf_clk_ctrl.h | 1018 + .../include/system/rgx_tc/tcf_pll.h | 311 + .../include/system/rgx_tc/tcf_rgbpdp_regs.h | 559 + .../gpu-imgtec/include/volcanic/rgx_common.h | 219 + .../include/volcanic/rgx_fwif_alignchecks.h | 192 + .../include/volcanic/rgx_fwif_shared.h | 259 + .../gpu-imgtec/include/volcanic/rgx_heaps.h | 175 + .../gpu-imgtec/include/volcanic/rgx_hwperf.h | 1534 + .../gpu-imgtec/include/volcanic/rgx_options.h | 227 + .../volcanic/system/rgx_tc/apollo_clocks.h | 96 + .../kernel/drivers/staging/imgtec/Kbuild.mk | 123 + .../kernel/drivers/staging/imgtec/Linux.mk | 73 + .../drivers/staging/imgtec/drm_netlink_gem.c | 144 + .../drivers/staging/imgtec/drm_netlink_gem.h | 62 + .../drivers/staging/imgtec/drm_nulldisp_drv.c | 2622 + .../drivers/staging/imgtec/drm_nulldisp_drv.h | 94 + .../drivers/staging/imgtec/drm_nulldisp_gem.c | 640 + .../drivers/staging/imgtec/drm_nulldisp_gem.h | 145 + .../staging/imgtec/drm_nulldisp_netlink.c | 656 + .../staging/imgtec/drm_nulldisp_netlink.h | 78 + .../staging/imgtec/e2c3_gpu/e2c3_gpu_drv.c | 426 + .../staging/imgtec/e2c3_gpu/e2c3_gpu_drv.h | 69 + .../imgtec/e2c3_gpu/e2c3_gpu_drv_internal.h | 107 + .../staging/imgtec/kernel_compatibility.h | 465 + .../imgtec/kernel_config_compatibility.h | 54 + .../drivers/staging/imgtec/kernel_nospec.h | 71 + .../drivers/staging/imgtec/plato/hdmi/hdmi.h | 198 + .../staging/imgtec/plato/hdmi/hdmi_core.c | 1018 + .../staging/imgtec/plato/hdmi/hdmi_i2c.c | 257 + .../staging/imgtec/plato/hdmi/hdmi_phy.c | 693 + .../staging/imgtec/plato/hdmi/hdmi_phy.h | 83 + .../staging/imgtec/plato/hdmi/hdmi_regs.h | 583 + .../staging/imgtec/plato/hdmi/hdmi_video.c | 592 + .../staging/imgtec/plato/hdmi/hdmi_video.h | 91 + .../staging/imgtec/plato/pdp2_mmu_regs.h | 764 + .../drivers/staging/imgtec/plato/pdp2_regs.h | 8565 ++ .../staging/imgtec/plato/plato_aon_regs.h | 606 + .../imgtec/plato/plato_ddr_ctrl_regs.h | 138 + .../imgtec/plato/plato_ddr_publ_regs.h | 121 + .../drivers/staging/imgtec/plato/plato_drv.c | 946 + .../drivers/staging/imgtec/plato/plato_drv.h | 415 + .../drivers/staging/imgtec/plato/plato_init.c | 1890 + .../staging/imgtec/plato/plato_top_regs.h | 306 + .../drivers/staging/imgtec/pvr_buffer_sync.c | 592 + .../drivers/staging/imgtec/pvr_buffer_sync.h | 142 + .../staging/imgtec/pvr_counting_timeline.c | 308 + .../staging/imgtec/pvr_counting_timeline.h | 70 + .../drivers/staging/imgtec/pvr_dma_resv.h | 71 + .../kernel/drivers/staging/imgtec/pvr_drm.c | 305 + .../kernel/drivers/staging/imgtec/pvr_drv.h | 99 + .../kernel/drivers/staging/imgtec/pvr_fence.c | 1173 + .../kernel/drivers/staging/imgtec/pvr_fence.h | 244 + .../drivers/staging/imgtec/pvr_fence_trace.h | 226 + .../drivers/staging/imgtec/pvr_linux_fence.h | 104 + .../drivers/staging/imgtec/pvr_platform_drv.c | 326 + .../drivers/staging/imgtec/pvr_sw_fence.c | 200 + .../drivers/staging/imgtec/pvr_sw_fence.h | 61 + .../kernel/drivers/staging/imgtec/pvr_sync.h | 101 + .../kernel/drivers/staging/imgtec/pvr_sync2.c | 2812 + .../drivers/staging/imgtec/pvr_sync_file.c | 1133 + .../staging/imgtec/services_kernel_client.h | 269 + .../drivers/staging/imgtec/tc/drm_pdp_crtc.c | 996 + .../staging/imgtec/tc/drm_pdp_debugfs.c | 179 + .../drivers/staging/imgtec/tc/drm_pdp_drv.c | 830 + .../drivers/staging/imgtec/tc/drm_pdp_drv.h | 221 + .../drivers/staging/imgtec/tc/drm_pdp_dvi.c | 309 + .../drivers/staging/imgtec/tc/drm_pdp_fb.c | 294 + .../drivers/staging/imgtec/tc/drm_pdp_gem.c | 725 + .../drivers/staging/imgtec/tc/drm_pdp_gem.h | 151 + .../staging/imgtec/tc/drm_pdp_modeset.c | 462 + .../drivers/staging/imgtec/tc/drm_pdp_plane.c | 278 + .../drivers/staging/imgtec/tc/drm_pdp_tmds.c | 145 + .../drivers/staging/imgtec/tc/pdp_apollo.c | 333 + .../drivers/staging/imgtec/tc/pdp_apollo.h | 89 + .../drivers/staging/imgtec/tc/pdp_common.h | 103 + .../drivers/staging/imgtec/tc/pdp_odin.c | 989 + .../drivers/staging/imgtec/tc/pdp_odin.h | 94 + .../drivers/staging/imgtec/tc/pdp_plato.c | 340 + .../drivers/staging/imgtec/tc/pdp_plato.h | 87 + .../drivers/staging/imgtec/tc/tc_apollo.c | 1411 + .../drivers/staging/imgtec/tc/tc_apollo.h | 78 + .../kernel/drivers/staging/imgtec/tc/tc_drv.c | 856 + .../kernel/drivers/staging/imgtec/tc/tc_drv.h | 162 + .../staging/imgtec/tc/tc_drv_internal.h | 183 + .../kernel/drivers/staging/imgtec/tc/tc_ion.h | 53 + .../drivers/staging/imgtec/tc/tc_odin.c | 1948 + .../drivers/staging/imgtec/tc/tc_odin.h | 76 + .../staging/imgtec/tc/tc_odin_common_regs.h | 107 + .../gpu-imgtec/services/include/htbuffer_sf.h | 237 + .../services/include/htbuffer_types.h | 118 + .../services/include/info_page_client.h | 89 + .../services/include/info_page_defs.h | 90 + .../services/include/km_apphint_defs_common.h | 269 + .../services/include/os_cpu_cache.h | 68 + .../mcst/gpu-imgtec/services/include/pdump.h | 232 + .../gpu-imgtec/services/include/physheap.h | 158 + .../gpu-imgtec/services/include/pvr_bridge.h | 425 + .../services/include/pvr_ricommon.h | 68 + .../gpu-imgtec/services/include/rgx_bridge.h | 220 + .../services/include/rgx_compat_bvnc.h | 140 + .../gpu-imgtec/services/include/rgx_fw_info.h | 135 + .../gpu-imgtec/services/include/rgx_fwif_sf.h | 830 + .../services/include/rgx_pdump_panics.h | 64 + .../services/include/rgx_tq_shared.h | 63 + .../services/include/rgxfw_log_helper.h | 79 + .../services/include/rgxtransfer_shader.h | 61 + .../services/include/rogue/km_apphint_defs.h | 144 + .../services/include/rogue/rgx_fwif_hwperf.h | 243 + .../services/include/rogue/rgx_fwif_km.h | 2029 + .../include/rogue/rgx_fwif_resetframework.h | 74 + .../services/include/rogue/rgxapi_km.h | 313 + .../services/include/rogue/rgxheapconfig.h | 192 + .../include/sync_checkpoint_internal.h | 270 + .../include/sync_checkpoint_internal_fw.h | 63 + .../include/volcanic/km_apphint_defs.h | 165 + .../include/volcanic/rgx_fwif_hwperf.h | 119 + .../services/include/volcanic/rgx_fwif_km.h | 2166 + .../services/include/volcanic/rgx_hw_errors.h | 57 + .../include/volcanic/rgx_hwperf_table.h | 473 + .../services/include/volcanic/rgxapi_km.h | 291 + .../services/include/volcanic/rgxheapconfig.h | 172 + .../services/server/common/cache_km.c | 2585 + .../server/common/connection_server.c | 466 + .../server/common/devicemem_heapcfg.c | 137 + .../server/common/devicemem_history_server.c | 1897 + .../services/server/common/devicemem_server.c | 1784 + .../services/server/common/di_server.c | 613 + .../services/server/common/handle.c | 2302 + .../services/server/common/htb_debug.c | 1239 + .../services/server/common/htb_debug.h | 64 + .../services/server/common/htbserver.c | 885 + .../services/server/common/info_page_km.c | 133 + .../gpu-imgtec/services/server/common/lists.c | 60 + .../services/server/common/mmu_common.c | 4349 + .../services/server/common/pdump_mmu.c | 898 + .../services/server/common/pdump_physmem.c | 620 + .../services/server/common/pdump_server.c | 5587 ++ .../services/server/common/physheap.c | 343 + .../services/server/common/physmem.c | 655 + .../server/common/physmem_fwdedicatedmem.c | 571 + .../services/server/common/physmem_hostmem.c | 150 + .../services/server/common/physmem_lma.c | 1510 + .../services/server/common/physmem_tdfwmem.c | 345 + .../gpu-imgtec/services/server/common/pmr.c | 3500 + .../gpu-imgtec/services/server/common/power.c | 1085 + .../services/server/common/process_stats.c | 3452 + .../services/server/common/pvr_notifier.c | 518 + .../services/server/common/pvrsrv.c | 3499 + .../server/common/pvrsrv_bridge_init.c | 453 + .../services/server/common/pvrsrv_pool.c | 260 + .../services/server/common/ri_server.c | 2111 + .../services/server/common/srvcore.c | 1397 + .../services/server/common/sync_checkpoint.c | 3006 + .../services/server/common/sync_server.c | 1227 + .../services/server/common/tlintern.c | 473 + .../services/server/common/tlserver.c | 751 + .../services/server/common/tlstream.c | 1624 + .../services/server/common/vmm_pvz_client.c | 143 + .../services/server/common/vmm_pvz_server.c | 245 + .../services/server/common/vz_vmm_pvz.c | 192 + .../services/server/common/vz_vmm_vm.c | 225 + .../services/server/devices/rgxbreakpoint.h | 141 + .../services/server/devices/rgxbvnc.h | 90 + .../services/server/devices/rgxfwdbg.c | 267 + .../services/server/devices/rgxfwdbg.h | 107 + .../server/devices/rgxfwtrace_strings.c | 56 + .../services/server/devices/rgxkicksync.h | 129 + .../services/server/devices/rgxmulticore.h | 66 + .../services/server/devices/rgxpdvfs.c | 279 + .../services/server/devices/rgxpdvfs.h | 68 + .../services/server/devices/rgxregconfig.h | 130 + .../services/server/devices/rgxshader.c | 304 + .../services/server/devices/rgxshader.h | 85 + .../services/server/devices/rgxsignals.h | 71 + .../services/server/devices/rgxstartstop.h | 84 + .../services/server/devices/rgxsyncutils.h | 86 + .../services/server/devices/rgxtimecorr.c | 645 + .../services/server/devices/rgxtimecorr.h | 269 + .../services/server/devices/rgxutils.h | 185 + .../services/server/devices/rgxworkest.c | 618 + .../services/server/devices/rgxworkest.h | 81 + .../server/devices/rogue/rgxbreakpoint.c | 295 + .../services/server/devices/rogue/rgxbvnc.c | 702 + .../services/server/devices/rogue/rgxccb.c | 2697 + .../services/server/devices/rogue/rgxccb.h | 333 + .../server/devices/rogue/rgxcompute.c | 1125 + .../server/devices/rogue/rgxcompute.h | 171 + .../services/server/devices/rogue/rgxdebug.c | 5497 + .../services/server/devices/rogue/rgxdebug.h | 260 + .../services/server/devices/rogue/rgxdevice.h | 750 + .../server/devices/rogue/rgxfwimageutils.c | 1059 + .../server/devices/rogue/rgxfwimageutils.h | 260 + .../server/devices/rogue/rgxfwutils.c | 5932 ++ .../server/devices/rogue/rgxfwutils.h | 1298 + .../services/server/devices/rogue/rgxhwperf.c | 3920 + .../services/server/devices/rogue/rgxhwperf.h | 502 + .../services/server/devices/rogue/rgxinit.c | 4945 + .../services/server/devices/rogue/rgxinit.h | 316 + .../server/devices/rogue/rgxkicksync.c | 789 + .../services/server/devices/rogue/rgxlayer.h | 817 + .../server/devices/rogue/rgxlayer_impl.c | 1265 + .../server/devices/rogue/rgxlayer_impl.h | 67 + .../services/server/devices/rogue/rgxmem.c | 764 + .../services/server/devices/rogue/rgxmem.h | 135 + .../server/devices/rogue/rgxmipsmmuinit.c | 991 + .../server/devices/rogue/rgxmipsmmuinit.h | 94 + .../server/devices/rogue/rgxmmuinit.c | 1079 + .../server/devices/rogue/rgxmmuinit.h | 60 + .../server/devices/rogue/rgxmulticore.c | 126 + .../services/server/devices/rogue/rgxpdump.c | 542 + .../services/server/devices/rogue/rgxpdump.h | 178 + .../services/server/devices/rogue/rgxpower.c | 1252 + .../services/server/devices/rogue/rgxpower.h | 245 + .../server/devices/rogue/rgxregconfig.c | 287 + .../server/devices/rogue/rgxsignals.c | 99 + .../server/devices/rogue/rgxsrvinit.c | 1455 + .../server/devices/rogue/rgxstartstop.c | 1248 + .../server/devices/rogue/rgxsyncutils.c | 175 + .../services/server/devices/rogue/rgxta3d.c | 5420 + .../services/server/devices/rogue/rgxta3d.h | 508 + .../server/devices/rogue/rgxtdmtransfer.c | 1297 + .../server/devices/rogue/rgxtdmtransfer.h | 132 + .../server/devices/rogue/rgxtransfer.c | 1647 + .../server/devices/rogue/rgxtransfer.h | 153 + .../services/server/devices/rogue/rgxutils.c | 221 + .../server/devices/volcanic/rgxbvnc.c | 658 + .../services/server/devices/volcanic/rgxccb.c | 2738 + .../services/server/devices/volcanic/rgxccb.h | 334 + .../server/devices/volcanic/rgxcompute.c | 1182 + .../server/devices/volcanic/rgxcompute.h | 177 + .../server/devices/volcanic/rgxdebug.c | 3527 + .../server/devices/volcanic/rgxdebug.h | 227 + .../server/devices/volcanic/rgxdevice.h | 784 + .../server/devices/volcanic/rgxfwimageutils.c | 993 + .../server/devices/volcanic/rgxfwimageutils.h | 251 + .../server/devices/volcanic/rgxfwutils.c | 6240 ++ .../server/devices/volcanic/rgxfwutils.h | 1280 + .../server/devices/volcanic/rgxhwperf.c | 3769 + .../server/devices/volcanic/rgxhwperf.h | 495 + .../server/devices/volcanic/rgxinit.c | 4502 + .../server/devices/volcanic/rgxinit.h | 340 + .../server/devices/volcanic/rgxkicksync.c | 788 + .../server/devices/volcanic/rgxlayer.h | 510 + .../server/devices/volcanic/rgxlayer_impl.c | 965 + .../server/devices/volcanic/rgxlayer_impl.h | 61 + .../services/server/devices/volcanic/rgxmem.c | 933 + .../services/server/devices/volcanic/rgxmem.h | 153 + .../server/devices/volcanic/rgxmmuinit.c | 1268 + .../server/devices/volcanic/rgxmmuinit.h | 61 + .../server/devices/volcanic/rgxmulticore.c | 129 + .../server/devices/volcanic/rgxpdump.c | 542 + .../server/devices/volcanic/rgxpdump.h | 178 + .../server/devices/volcanic/rgxpower.c | 1151 + .../server/devices/volcanic/rgxpower.h | 224 + .../server/devices/volcanic/rgxregconfig.c | 291 + .../server/devices/volcanic/rgxsignals.c | 100 + .../server/devices/volcanic/rgxsrvinit.c | 1478 + .../server/devices/volcanic/rgxstartstop.c | 771 + .../server/devices/volcanic/rgxsyncutils.c | 186 + .../server/devices/volcanic/rgxta3d.c | 5144 + .../server/devices/volcanic/rgxta3d.h | 440 + .../server/devices/volcanic/rgxtdmtransfer.c | 1278 + .../server/devices/volcanic/rgxtdmtransfer.h | 129 + .../server/devices/volcanic/rgxutils.c | 217 + .../services/server/env/linux/Kbuild.mk | 538 + .../services/server/env/linux/Linux.mk | 46 + .../services/server/env/linux/allocmem.c | 373 + .../server/env/linux/env_connection.h | 90 + .../services/server/env/linux/event.c | 514 + .../services/server/env/linux/event.h | 54 + .../services/server/env/linux/fwload.c | 303 + .../services/server/env/linux/handle_idr.c | 440 + .../services/server/env/linux/km_apphint.c | 1568 + .../services/server/env/linux/km_apphint.h | 99 + .../services/server/env/linux/linkage.h | 55 + .../services/server/env/linux/module_common.c | 513 + .../services/server/env/linux/module_common.h | 67 + .../server/env/linux/osconnection_server.c | 155 + .../services/server/env/linux/osfunc.c | 1760 + .../services/server/env/linux/osfunc_arm.c | 153 + .../services/server/env/linux/osfunc_arm64.c | 288 + .../services/server/env/linux/osfunc_e2k.c | 143 + .../services/server/env/linux/osfunc_mips.c | 113 + .../services/server/env/linux/osfunc_riscv.c | 168 + .../services/server/env/linux/osfunc_x86.c | 125 + .../services/server/env/linux/osmmap_stub.c | 146 + .../server/env/linux/physmem_dmabuf.c | 1257 + .../server/env/linux/physmem_osmem_linux.c | 4275 + .../server/env/linux/physmem_osmem_linux.h | 49 + .../services/server/env/linux/physmem_test.c | 710 + .../services/server/env/linux/physmem_test.h | 51 + .../services/server/env/linux/pmr_os.c | 640 + .../services/server/env/linux/private_data.h | 53 + .../services/server/env/linux/pvr_bridge_k.c | 588 + .../services/server/env/linux/pvr_bridge_k.h | 103 + .../services/server/env/linux/pvr_debug.c | 1860 + .../services/server/env/linux/pvr_debugfs.c | 609 + .../services/server/env/linux/pvr_debugfs.h | 50 + .../server/env/linux/pvr_dvfs_device.c | 644 + .../server/env/linux/pvr_dvfs_device.h | 58 + .../services/server/env/linux/pvr_gputrace.c | 1244 + .../services/server/env/linux/pvr_ion_stats.h | 80 + .../services/server/env/linux/pvr_procfs.c | 613 + .../services/server/env/linux/pvr_procfs.h | 50 + .../services/server/env/linux/pvr_uaccess.h | 99 + .../server/env/linux/rogue_trace_events.h | 543 + .../services/server/env/linux/trace_events.c | 265 + .../services/server/env/linux/trace_events.h | 198 + .../services/server/include/cache_km.h | 164 + .../server/include/connection_server.h | 128 + .../services/server/include/device.h | 556 + .../server/include/devicemem_heapcfg.h | 168 + .../server/include/devicemem_history_server.h | 152 + .../server/include/devicemem_server.h | 682 + .../server/include/devicemem_server_utils.h | 198 + .../services/server/include/di_common.h | 228 + .../services/server/include/di_server.h | 204 + .../services/server/include/fwload.h | 148 + .../services/server/include/fwtrace_string.h | 52 + .../services/server/include/handle.h | 201 + .../services/server/include/handle_impl.h | 89 + .../services/server/include/handle_types.h | 83 + .../services/server/include/htbserver.h | 249 + .../services/server/include/info_page.h | 99 + .../services/server/include/lists.h | 355 + .../services/server/include/mmu_common.h | 774 + .../services/server/include/opaque_types.h | 56 + .../server/include/os_srvinit_param.h | 322 + .../server/include/osconnection_server.h | 121 + .../services/server/include/osdi_impl.h | 185 + .../services/server/include/osfunc.h | 1599 + .../services/server/include/oskm_apphint.h | 176 + .../services/server/include/ospvr_gputrace.h | 167 + .../services/server/include/pdump_km.h | 1144 + .../services/server/include/pdump_mmu.h | 171 + .../services/server/include/pdump_physmem.h | 243 + .../server/include/pdump_symbolicaddr.h | 55 + .../services/server/include/physmem.h | 234 + .../services/server/include/physmem_dmabuf.h | 114 + .../server/include/physmem_fwdedicatedmem.h | 68 + .../services/server/include/physmem_hostmem.h | 54 + .../services/server/include/physmem_lma.h | 78 + .../services/server/include/physmem_osmem.h | 128 + .../services/server/include/physmem_tdfwmem.h | 72 + .../gpu-imgtec/services/server/include/pmr.h | 1131 + .../services/server/include/pmr_impl.h | 553 + .../services/server/include/pmr_os.h | 62 + .../services/server/include/power.h | 128 + .../services/server/include/process_stats.h | 257 + .../services/server/include/pvr_dvfs.h | 136 + .../services/server/include/pvr_notifier.h | 250 + .../services/server/include/pvrsrv.h | 497 + .../services/server/include/pvrsrv_apphint.h | 71 + .../server/include/pvrsrv_bridge_init.h | 57 + .../services/server/include/pvrsrv_cleanup.h | 159 + .../services/server/include/pvrsrv_device.h | 364 + .../services/server/include/pvrsrv_pool.h | 135 + .../server/include/pvrsrv_sync_server.h | 277 + .../services/server/include/ri_server.h | 106 + .../services/server/include/sofunc_pvr.h | 94 + .../services/server/include/sofunc_rgx.h | 95 + .../services/server/include/srvcore.h | 216 + .../services/server/include/srvinit.h | 68 + .../services/server/include/srvkm.h | 145 + .../services/server/include/sync_checkpoint.h | 725 + .../server/include/sync_checkpoint_init.h | 82 + .../server/include/sync_fallback_server.h | 198 + .../services/server/include/sync_server.h | 266 + .../services/server/include/tlintern.h | 346 + .../services/server/include/tlserver.h | 97 + .../services/server/include/tlstream.h | 601 + .../services/server/include/tutils_km.h | 153 + .../services/server/include/vmm_impl.h | 187 + .../services/server/include/vmm_pvz_client.h | 77 + .../services/server/include/vmm_pvz_common.h | 65 + .../services/server/include/vmm_pvz_server.h | 121 + .../services/server/include/vz_vm.h | 61 + .../services/server/include/vz_vmm_pvz.h | 79 + .../services/shared/common/devicemem.c | 2980 + .../services/shared/common/devicemem_pdump.c | 403 + .../services/shared/common/devicemem_utils.c | 1181 + .../services/shared/common/devicememx_pdump.c | 80 + .../gpu-imgtec/services/shared/common/hash.c | 734 + .../services/shared/common/htbuffer.c | 185 + .../services/shared/common/mem_utils.c | 449 + .../services/shared/common/pvrsrv_error.c | 61 + .../gpu-imgtec/services/shared/common/ra.c | 1589 + .../gpu-imgtec/services/shared/common/sync.c | 871 + .../services/shared/common/tlclient.c | 486 + .../shared/common/uniq_key_splay_tree.c | 280 + .../shared/common/uniq_key_splay_tree.h | 88 + .../shared/devices/rogue/rgx_hwperf_table.c | 676 + .../shared/devices/rogue/rgx_hwperf_table.h | 116 + .../services/shared/include/allocmem.h | 181 + .../shared/include/device_connection.h | 117 + .../services/shared/include/devicemem.h | 729 + .../services/shared/include/devicemem_pdump.h | 363 + .../services/shared/include/devicemem_utils.h | 513 + .../services/shared/include/devicememx.h | 223 + .../shared/include/devicememx_pdump.h | 81 + .../gpu-imgtec/services/shared/include/hash.h | 246 + .../services/shared/include/htbuffer.h | 132 + .../services/shared/include/htbuffer_init.h | 114 + .../gpu-imgtec/services/shared/include/lock.h | 425 + .../services/shared/include/osmmap.h | 115 + .../services/shared/include/proc_stats.h | 135 + .../gpu-imgtec/services/shared/include/ra.h | 223 + .../gpu-imgtec/services/shared/include/sync.h | 316 + .../services/shared/include/sync_internal.h | 118 + .../services/shared/include/tlclient.h | 257 + .../services/shared/include/tutilsdefs.h | 230 + .../rogue/common/env/linux/dma_support.c | 573 + .../rogue/common/env/linux/pci_support.c | 726 + .../system/rogue/common/vmm_type_stub.c | 119 + .../services/system/rogue/e2c3_gpu/Kbuild.mk | 58 + .../system/rogue/e2c3_gpu/sysconfig.c | 487 + .../services/system/rogue/e2c3_gpu/sysinfo.h | 62 + .../system/rogue/include/dma_support.h | 116 + .../system/rogue/include/interrupt_support.h | 103 + .../system/rogue/include/pci_support.h | 99 + .../services/system/rogue/include/syscommon.h | 129 + .../system/rogue/include/sysvalidation.h | 62 + .../services/system/rogue/mt8173/Kbuild.mk | 63 + .../system/rogue/mt8173/mt8173_mfgsys.c | 330 + .../system/rogue/mt8173/mt8173_mfgsys.h | 66 + .../system/rogue/mt8173/mt8173_sysconfig.c | 545 + .../services/system/rogue/mt8173/sysinfo.h | 57 + .../system/rogue/rgx_linux_plato/Kbuild.mk | 57 + .../system/rogue/rgx_linux_plato/sysconfig.c | 778 + .../system/rogue/rgx_linux_plato/sysconfig.h | 116 + .../system/rogue/rgx_linux_plato/sysinfo.h | 64 + .../system/rogue/rgx_linux_tc/Kbuild.mk | 57 + .../system/rogue/rgx_linux_tc/sysconfig.c | 896 + .../system/rogue/rgx_linux_tc/sysinfo.h | 60 + .../services/system/rogue/rgx_nohw/Kbuild.mk | 53 + .../system/rogue/rgx_nohw/sysconfig.c | 350 + .../system/rogue/rgx_nohw/sysconfig.h | 58 + .../services/system/rogue/rgx_nohw/sysinfo.h | 57 + .../volcanic/common/env/linux/dma_support.c | 335 + .../common/env/linux/interrupt_support.c | 151 + .../volcanic/common/env/linux/pci_support.c | 726 + .../system/volcanic/common/vmm_type_stub.c | 119 + .../system/volcanic/include/dma_support.h | 112 + .../volcanic/include/interrupt_support.h | 103 + .../system/volcanic/include/pci_support.h | 99 + .../system/volcanic/include/syscommon.h | 129 + .../system/volcanic/include/sysvalidation.h | 63 + .../system/volcanic/rgx_linux_tc/Kbuild.mk | 59 + .../system/volcanic/rgx_linux_tc/sysconfig.c | 1040 + .../system/volcanic/rgx_linux_tc/sysinfo.h | 60 + .../system/volcanic/rgx_nohw/Kbuild.mk | 54 + .../system/volcanic/rgx_nohw/sysconfig.c | 346 + .../system/volcanic/rgx_nohw/sysconfig.h | 58 + .../system/volcanic/rgx_nohw/sysinfo.h | 57 + drivers/mcst/gpu-viv/Kbuild | 295 + drivers/mcst/gpu-viv/Kconfig | 12 + drivers/mcst/gpu-viv/config | 72 + .../hal/kernel/arch/gc_hal_kernel_context.c | 4872 + .../hal/kernel/arch/gc_hal_kernel_context.h | 194 + .../hal/kernel/arch/gc_hal_kernel_hardware.c | 18398 ++++ .../hal/kernel/arch/gc_hal_kernel_hardware.h | 368 + .../hal/kernel/arch/gc_hal_kernel_recorder.c | 728 + .../mcst/gpu-viv/hal/kernel/gc_hal_kernel.c | 6504 ++ .../mcst/gpu-viv/hal/kernel/gc_hal_kernel.h | 2062 + .../hal/kernel/gc_hal_kernel_async_command.c | 477 + .../hal/kernel/gc_hal_kernel_command.c | 3475 + .../gpu-viv/hal/kernel/gc_hal_kernel_db.c | 1894 + .../gpu-viv/hal/kernel/gc_hal_kernel_debug.c | 2866 + .../gpu-viv/hal/kernel/gc_hal_kernel_event.c | 3017 + .../gpu-viv/hal/kernel/gc_hal_kernel_heap.c | 892 + .../gpu-viv/hal/kernel/gc_hal_kernel_mmu.c | 2989 + .../gpu-viv/hal/kernel/gc_hal_kernel_power.c | 393 + .../hal/kernel/gc_hal_kernel_precomp.h | 63 + .../hal/kernel/gc_hal_kernel_security.c | 286 + .../hal/kernel/gc_hal_kernel_security_v1.c | 320 + .../hal/kernel/gc_hal_kernel_video_memory.c | 3331 + .../hal/kernel/inc/gc_feature_database.h | 82877 ++++++++++++++++ drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal.h | 2833 + .../mcst/gpu-viv/hal/kernel/inc/gc_hal_base.h | 5989 ++ .../gpu-viv/hal/kernel/inc/gc_hal_driver.h | 1321 + .../gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h | 302 + .../mcst/gpu-viv/hal/kernel/inc/gc_hal_drm.h | 199 + .../mcst/gpu-viv/hal/kernel/inc/gc_hal_dump.h | 125 + .../hal/kernel/inc/gc_hal_eglplatform.h | 589 + .../hal/kernel/inc/gc_hal_eglplatform_type.h | 326 + .../gpu-viv/hal/kernel/inc/gc_hal_engine.h | 2969 + .../gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h | 1320 + .../mcst/gpu-viv/hal/kernel/inc/gc_hal_enum.h | 2154 + .../hal/kernel/inc/gc_hal_kernel_buffer.h | 320 + .../mcst/gpu-viv/hal/kernel/inc/gc_hal_mem.h | 566 + .../gpu-viv/hal/kernel/inc/gc_hal_metadata.h | 118 + .../gpu-viv/hal/kernel/inc/gc_hal_options.h | 1408 + .../gpu-viv/hal/kernel/inc/gc_hal_profiler.h | 1175 + .../gpu-viv/hal/kernel/inc/gc_hal_raster.h | 1109 + .../gpu-viv/hal/kernel/inc/gc_hal_rename.h | 279 + .../gpu-viv/hal/kernel/inc/gc_hal_resource.h | 69 + .../kernel/inc/gc_hal_security_interface.h | 186 + .../hal/kernel/inc/gc_hal_statistics.h | 135 + .../gpu-viv/hal/kernel/inc/gc_hal_types.h | 1035 + .../gpu-viv/hal/kernel/inc/gc_hal_version.h | 71 + .../mcst/gpu-viv/hal/kernel/inc/gc_hal_vg.h | 76 + .../default/gc_hal_kernel_allocator_array.h | 119 + .../default/gc_hal_kernel_allocator_dma.c | 610 + .../default/gc_hal_kernel_allocator_dmabuf.c | 544 + .../default/gc_hal_kernel_allocator_gfp.c | 1149 + .../gc_hal_kernel_allocator_reserved_mem.c | 511 + .../gc_hal_kernel_allocator_user_memory.c | 842 + .../os/linux/kernel/gc_hal_kernel_allocator.c | 264 + .../os/linux/kernel/gc_hal_kernel_allocator.h | 576 + .../hal/os/linux/kernel/gc_hal_kernel_debug.h | 147 + .../os/linux/kernel/gc_hal_kernel_debugfs.c | 965 + .../os/linux/kernel/gc_hal_kernel_debugfs.h | 170 + .../os/linux/kernel/gc_hal_kernel_device.c | 2340 + .../os/linux/kernel/gc_hal_kernel_device.h | 272 + .../os/linux/kernel/gc_hal_kernel_driver.c | 1255 + .../hal/os/linux/kernel/gc_hal_kernel_drm.c | 809 + .../hal/os/linux/kernel/gc_hal_kernel_iommu.c | 250 + .../hal/os/linux/kernel/gc_hal_kernel_linux.c | 476 + .../hal/os/linux/kernel/gc_hal_kernel_linux.h | 409 + .../hal/os/linux/kernel/gc_hal_kernel_math.c | 66 + .../hal/os/linux/kernel/gc_hal_kernel_mutex.h | 89 + .../hal/os/linux/kernel/gc_hal_kernel_os.c | 7419 ++ .../hal/os/linux/kernel/gc_hal_kernel_os.h | 123 + .../os/linux/kernel/gc_hal_kernel_platform.h | 313 + .../kernel/gc_hal_kernel_security_channel.c | 426 + .../gc_hal_kernel_security_channel_emulator.c | 116 + .../hal/os/linux/kernel/gc_hal_kernel_sync.c | 373 + .../hal/os/linux/kernel/gc_hal_kernel_sync.h | 151 + .../default/gc_hal_kernel_platform_default.c | 146 + .../mcst/gc_hal_kernel_platform_mcst.c | 155 + .../mcst/gc_hal_kernel_platform_mcst.config | 1 + .../mcst/gpu-viv/hal/security_v1/gc_hal_ta.c | 348 + .../mcst/gpu-viv/hal/security_v1/gc_hal_ta.h | 373 + .../hal/security_v1/gc_hal_ta_hardware.c | 1070 + .../hal/security_v1/gc_hal_ta_hardware.h | 139 + .../gpu-viv/hal/security_v1/gc_hal_ta_mmu.c | 586 + .../os/emulator/gc_hal_ta_emulator.c | 324 + drivers/mcst/hantrodec/Makefile | 3 + drivers/mcst/hantrodec/README | 41 + drivers/mcst/hantrodec/driver_load.sh | 43 + drivers/mcst/hantrodec/dwl_defs.h | 44 + drivers/mcst/hantrodec/hantrodec.c | 1332 + drivers/mcst/hantrodec/hantrodec.h | 73 + drivers/mcst/i2c_spd/Makefile | 2 + drivers/mcst/i2c_spd/i2c_spd.c | 358 + drivers/mcst/lptouts/Makefile | 1 + drivers/mcst/lptouts/lptouts.c | 391 + drivers/mcst/lptouts/readme.txt | 18 + drivers/mcst/m2mlc/Makefile | 7 + drivers/mcst/m2mlc/m2mlc.h | 238 + drivers/mcst/m2mlc/m2mlc_dbg.h | 60 + drivers/mcst/m2mlc/m2mlc_dev.c | 1647 + drivers/mcst/m2mlc/m2mlc_hw.c | 824 + drivers/mcst/m2mlc/m2mlc_ksvv.c | 887 + drivers/mcst/m2mlc/m2mlc_ksvv.h | 340 + drivers/mcst/m2mlc/m2mlc_main.c | 963 + drivers/mcst/m2mlc/m2mlc_net.c | 605 + drivers/mcst/m2mlc/m2mlc_pci.c | 186 + drivers/mcst/m2mlc/m2mlc_regs.h | 149 + drivers/mcst/mem2alloc/Makefile | 1 + drivers/mcst/mem2alloc/mem2alloc.c | 215 + drivers/mcst/mem2alloc/mem2alloc.h | 36 + drivers/mcst/mem2alloc/mem2alloc_load.sh | 42 + drivers/mcst/mga2-gpio/Makefile | 2 + drivers/mcst/mga2-gpio/mga2-gpio.c | 164 + drivers/mcst/mga2-pwm/Makefile | 2 + drivers/mcst/mga2-pwm/mga2-pwm.c | 188 + drivers/mcst/mga2/Makefile | 14 + drivers/mcst/mga2/it6613/HDMI_COMMON.h | 11 + drivers/mcst/mga2/it6613/HDMI_TX/EDID.c | 195 + drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.c | 311 + drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.h | 27 + drivers/mcst/mga2/it6613/HDMI_TX/edid.h | 126 + drivers/mcst/mga2/it6613/HDMI_TX/hdmitx.h | 75 + drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.c | 3465 + drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.h | 883 + drivers/mcst/mga2/it6613/HDMI_TX/it6613_sys.c | 548 + drivers/mcst/mga2/it6613/HDMI_TX/it6613_sys.h | 63 + drivers/mcst/mga2/it6613/mcu.h | 57 + drivers/mcst/mga2/it6613/typedef.h | 335 + drivers/mcst/mga2/mga2_auc2.c | 310 + drivers/mcst/mga2/mga2_bctrl.c | 268 + drivers/mcst/mga2/mga2_drv.c | 267 + drivers/mcst/mga2/mga2_drv.h | 386 + drivers/mcst/mga2/mga2_dvi.c | 219 + drivers/mcst/mga2/mga2_fb.c | 316 + drivers/mcst/mga2/mga2_fbdev.c | 843 + drivers/mcst/mga2/mga2_hdmi_it6613.c | 493 + drivers/mcst/mga2/mga2_i2c.c | 73 + drivers/mcst/mga2/mga2_irq.c | 201 + drivers/mcst/mga2/mga2_layer.c | 703 + drivers/mcst/mga2/mga2_lvds.c | 244 + drivers/mcst/mga2/mga2_main.c | 1380 + drivers/mcst/mga2/mga2_mode.c | 1397 + drivers/mcst/mga2/mga2_pll.c | 734 + drivers/mcst/mga2/mga2_regs.h | 559 + drivers/mcst/mgpm/Makefile | 6 + drivers/mcst/mgpm/mgpm.c | 1422 + drivers/mcst/mmrm/Makefile | 8 + drivers/mcst/mmrm/mmrm.c | 999 + drivers/mcst/mmrm/mmrm.h | 239 + drivers/mcst/mokm/Makefile | 6 + drivers/mcst/mokm/mokm.c | 1620 + drivers/mcst/mokx/Makefile | 8 + drivers/mcst/mokx/mokx.c | 5179 + drivers/mcst/mokx/mokx_ext_mode.c | 1824 + drivers/mcst/mokx/mokx_get_event.c | 773 + drivers/mcst/mokx/mokx_get_stat.c | 373 + drivers/mcst/mokx/mokx_intrrupt.c | 907 + drivers/mcst/mokx/mokx_iocc.h | 527 + drivers/mcst/mokx/mokx_iocc_error.h | 43 + drivers/mcst/mokx/mokx_iocc_regs.h | 223 + drivers/mcst/mokx/mokx_mok.h | 81 + drivers/mcst/mokx/mokx_mok_error.h | 16 + drivers/mcst/mokx/mokx_mok_regs.h | 246 + drivers/mcst/mokx/mokx_read_buf.c | 132 + drivers/mcst/mokx/mokx_send_msg.c | 183 + drivers/mcst/mokx/mokx_write_buf.c | 233 + drivers/mcst/mpk/Makefile | 15 + drivers/mcst/mpk/README | 29 + drivers/mcst/mpk/mpk.c | 1969 + drivers/mcst/mpv/Makefile | 12 + drivers/mcst/mpv/mpv.c | 2833 + drivers/mcst/mpv/mpv.h | 196 + drivers/mcst/msps/Makefile | 2 + drivers/mcst/msps/msps.c | 2087 + drivers/mcst/msps/msps.h | 371 + drivers/mcst/msps/msps_test.c | 258 + drivers/mcst/mvp/Makefile | 12 + drivers/mcst/mvp/mvp.c | 1147 + drivers/mcst/mvp/mvpvar.h | 101 + drivers/mcst/pcs/Makefile | 6 + drivers/mcst/pcs/l_pcs.c | 361 + drivers/mcst/pcsm/Makefile | 7 + drivers/mcst/pcsm/pcsm.c | 1877 + drivers/mcst/pcsm/pcsm.h | 366 + drivers/mcst/pcsm/pcsm_drv.c | 1217 + drivers/mcst/prom/Makefile | 1 + drivers/mcst/prom/e90_prom.c | 359 + drivers/mcst/prom/sbus_proc_tree.c | 233 + drivers/mcst/prom/sbus_proc_tree.h | 56 + drivers/mcst/prom/sbus_tree.c | 966 + drivers/mcst/rdma/Makefile | 8 + drivers/mcst/rdma/rdma.c | 2242 + drivers/mcst/rdma/rdma.h | 354 + drivers/mcst/rdma/rdma_error.h | 31 + drivers/mcst/rdma/rdma_intr.c | 481 + drivers/mcst/rdma/rdma_read_buf.c | 510 + drivers/mcst/rdma/rdma_regs.h | 207 + drivers/mcst/rdma/rdma_send_msg.c | 150 + drivers/mcst/rdma/rdma_write_buf.c | 234 + drivers/mcst/rdma_m/Makefile | 8 + drivers/mcst/rdma_m/get_event_rdma.c | 768 + drivers/mcst/rdma_m/get_stat_rdma.c | 373 + drivers/mcst/rdma_m/rdma.h | 573 + drivers/mcst/rdma_m/rdma_error.h | 31 + drivers/mcst/rdma_m/rdma_intr.c | 735 + drivers/mcst/rdma_m/rdma_m.c | 3371 + drivers/mcst/rdma_m/rdma_read_buf.c | 136 + drivers/mcst/rdma_m/rdma_regs.h | 266 + drivers/mcst/rdma_m/rdma_send_msg.c | 181 + drivers/mcst/rdma_m/rdma_write_buf.c | 408 + drivers/mcst/rdma_sic/Makefile | 8 + drivers/mcst/rdma_sic/get_event_rdma.c | 701 + drivers/mcst/rdma_sic/get_stat_rdma.c | 373 + drivers/mcst/rdma_sic/rdma.h | 475 + drivers/mcst/rdma_sic/rdma_error.h | 31 + drivers/mcst/rdma_sic/rdma_intr.c | 619 + drivers/mcst/rdma_sic/rdma_read_buf.c | 598 + drivers/mcst/rdma_sic/rdma_regs.h | 254 + drivers/mcst/rdma_sic/rdma_send_msg.c | 177 + drivers/mcst/rdma_sic/rdma_sic.c | 2279 + drivers/mcst/rdma_sic/rdma_write_buf.c | 265 + drivers/mcst/smi-gpio/Makefile | 2 + drivers/mcst/smi-gpio/smi-gpio.c | 323 + drivers/mcst/smi-pwm/Makefile | 2 + drivers/mcst/smi-pwm/smi-pwm.c | 180 + drivers/mcst/smi/Kconfig | 29 + drivers/mcst/smi/Makefile | 26 + drivers/mcst/smi/ddk750/ddk750_2d.c | 2951 + drivers/mcst/smi/ddk750/ddk750_2d.h | 480 + drivers/mcst/smi/ddk750/ddk750_chip.c | 235 + drivers/mcst/smi/ddk750/ddk750_chip.h | 194 + drivers/mcst/smi/ddk750/ddk750_clock.c | 564 + drivers/mcst/smi/ddk750/ddk750_clock.h | 178 + drivers/mcst/smi/ddk750/ddk750_cursor.c | 104 + drivers/mcst/smi/ddk750/ddk750_cursor.h | 48 + drivers/mcst/smi/ddk750/ddk750_defs.h | 83 + drivers/mcst/smi/ddk750/ddk750_display.c | 1702 + drivers/mcst/smi/ddk750/ddk750_display.h | 226 + drivers/mcst/smi/ddk750/ddk750_edid.c | 2220 + drivers/mcst/smi/ddk750/ddk750_edid.h | 1024 + drivers/mcst/smi/ddk750/ddk750_hardware.h | 191 + drivers/mcst/smi/ddk750/ddk750_help.c | 18 + drivers/mcst/smi/ddk750/ddk750_help.h | 33 + drivers/mcst/smi/ddk750/ddk750_helper.c | 47 + drivers/mcst/smi/ddk750/ddk750_helper.h | 30 + drivers/mcst/smi/ddk750/ddk750_hwi2c.c | 378 + drivers/mcst/smi/ddk750/ddk750_hwi2c.h | 67 + drivers/mcst/smi/ddk750/ddk750_mode.c | 1181 + drivers/mcst/smi/ddk750/ddk750_mode.h | 565 + drivers/mcst/smi/ddk750/ddk750_power.c | 495 + drivers/mcst/smi/ddk750/ddk750_power.h | 121 + drivers/mcst/smi/ddk750/ddk750_regdc.h | 960 + drivers/mcst/smi/ddk750/ddk750_regde.h | 359 + drivers/mcst/smi/ddk750/ddk750_reggpio.h | 239 + drivers/mcst/smi/ddk750/ddk750_regi2c.h | 69 + drivers/mcst/smi/ddk750/ddk750_regsc.h | 900 + drivers/mcst/smi/ddk750/ddk750_sii9022.c | 61 + drivers/mcst/smi/ddk750/ddk750_sii9022.h | 3 + drivers/mcst/smi/ddk750/ddk750_sw2d.h | 200 + drivers/mcst/smi/ddk750/ddk750_swi2c.c | 460 + drivers/mcst/smi/ddk750/ddk750_swi2c.h | 92 + drivers/mcst/smi/ddk750/ddkdebug.h | 141 + drivers/mcst/smi/ddk750/siHdmiTx_902x_TPI.c | 3514 + drivers/mcst/smi/ddk750/siHdmiTx_902x_TPI.h | 1122 + drivers/mcst/smi/ddk750/vdif.h | 63 + drivers/mcst/smi/ddk768/ddk768.h | 23 + drivers/mcst/smi/ddk768/ddk768_2d.c | 1209 + drivers/mcst/smi/ddk768/ddk768_2d.h | 503 + drivers/mcst/smi/ddk768/ddk768_chip.c | 188 + drivers/mcst/smi/ddk768/ddk768_chip.h | 84 + drivers/mcst/smi/ddk768/ddk768_clock.c | 182 + drivers/mcst/smi/ddk768/ddk768_clock.h | 67 + drivers/mcst/smi/ddk768/ddk768_cursor.c | 100 + drivers/mcst/smi/ddk768/ddk768_cursor.h | 54 + drivers/mcst/smi/ddk768/ddk768_display.c | 677 + drivers/mcst/smi/ddk768/ddk768_display.h | 235 + drivers/mcst/smi/ddk768/ddk768_edid.c | 2115 + drivers/mcst/smi/ddk768/ddk768_edid.h | 1049 + drivers/mcst/smi/ddk768/ddk768_hdmi.c | 1367 + drivers/mcst/smi/ddk768/ddk768_hdmi.h | 459 + drivers/mcst/smi/ddk768/ddk768_help.c | 16 + drivers/mcst/smi/ddk768/ddk768_help.h | 67 + drivers/mcst/smi/ddk768/ddk768_helper.c | 84 + drivers/mcst/smi/ddk768/ddk768_helper.h | 44 + drivers/mcst/smi/ddk768/ddk768_hwi2c.c | 294 + drivers/mcst/smi/ddk768/ddk768_hwi2c.h | 69 + drivers/mcst/smi/ddk768/ddk768_iis.c | 254 + drivers/mcst/smi/ddk768/ddk768_iis.h | 109 + drivers/mcst/smi/ddk768/ddk768_intr.c | 62 + drivers/mcst/smi/ddk768/ddk768_intr.h | 116 + drivers/mcst/smi/ddk768/ddk768_mode.c | 927 + drivers/mcst/smi/ddk768/ddk768_mode.h | 380 + drivers/mcst/smi/ddk768/ddk768_power.c | 269 + drivers/mcst/smi/ddk768/ddk768_power.h | 58 + drivers/mcst/smi/ddk768/ddk768_reg.h | 2849 + drivers/mcst/smi/ddk768/ddk768_swi2c.c | 463 + drivers/mcst/smi/ddk768/ddk768_swi2c.h | 95 + drivers/mcst/smi/ddk768/ddk768_timer.c | 341 + drivers/mcst/smi/ddk768/ddk768_timer.h | 150 + drivers/mcst/smi/ddk768/ddk768_video.c | 1075 + drivers/mcst/smi/ddk768/ddk768_video.h | 564 + drivers/mcst/smi/ddk768/ddkdebug.c | 227 + drivers/mcst/smi/ddk768/ddkdebug.h | 141 + drivers/mcst/smi/ddk768/hdmiregs.h | 945 + drivers/mcst/smi/ddk768/l3.c | 65 + drivers/mcst/smi/ddk768/l3.h | 22 + drivers/mcst/smi/ddk768/uda1345.c | 368 + drivers/mcst/smi/ddk768/uda1345.h | 131 + drivers/mcst/smi/ddk768/vdif.h | 63 + drivers/mcst/smi/hw750.c | 292 + drivers/mcst/smi/hw750.h | 154 + drivers/mcst/smi/hw768.c | 255 + drivers/mcst/smi/hw768.h | 150 + drivers/mcst/smi/hw_com.h | 92 + drivers/mcst/smi/smi_drv.c | 509 + drivers/mcst/smi/smi_drv.h | 424 + drivers/mcst/smi/smi_fbdev.c | 404 + drivers/mcst/smi/smi_main.c | 645 + drivers/mcst/smi/smi_mode.c | 1308 + drivers/mcst/smi/smi_plane.c | 501 + drivers/mcst/smi/smi_prime.c | 107 + drivers/mcst/smi/smi_snd.c | 672 + drivers/mcst/smi/smi_snd.h | 90 + drivers/mcst/smi/smi_ttm.c | 561 + drivers/mcst/video-imgtec/GPLHEADER | 344 + drivers/mcst/video-imgtec/MIT_COPYING | 39 + drivers/mcst/video-imgtec/Makefile | 54 + .../mcst/video-imgtec/encoder/quartz/Makefile | 73 + .../quartz/driver/kernel/code/fwtrace.c | 51 + .../driver/kernel/code/memmgr/memmgr_km.c | 1320 + .../quartz/driver/kernel/code/proc_FwIF.c | 2673 + .../driver/kernel/code/quartz_device_km.c | 2064 + .../quartz/driver/kernel/code/quartz_mmu.c | 747 + .../quartz/driver/kernel/code/vxe_KM.c | 4202 + .../quartz/driver/kernel/code/vxekm_debug.c | 1034 + .../kernel/include/e5500_public_regdefs.h | 907 + .../quartz/driver/kernel/include/fwtrace.h | 56 + .../driver/kernel/include/memmgr_api_quartz.h | 218 + .../quartz/driver/kernel/include/memmgr_km.h | 195 + .../quartz/driver/kernel/include/proc_FwIF.h | 324 + .../driver/kernel/include/quartz_device_km.h | 86 + .../quartz/driver/kernel/include/quartz_mmu.h | 147 + .../quartz/driver/kernel/include/vxe_KM.h | 527 + .../driver/kernel/include/vxe_km_api_quartz.h | 385 + .../quartz/driver/kernel/include/vxe_sysctl.h | 235 + .../H264_H265_FW_ALL_pipes_1_bin.c | 22074 ++++ .../H264_H265_FW_ALL_pipes_3_bin.c | 23954 +++++ .../fw_binaries/include_all_fw_variants.h | 86 + .../quartz/include/VXE_Enc_GlobalDefs.h | 342 + .../encoder/quartz/include/coreflags.h | 237 + .../encoder/quartz/include/target_config.h | 311 + .../encoder/quartz/include/vxe_common.h | 618 + .../encoder/quartz/include/vxe_fw_if.h | 1384 + .../encoder/quartz/platform/sysdev.c | 619 + .../quartz/img_soc_dmac_regs.h | 1147 + .../quartz/img_video_bus4_mmu_regs_defines.h | 1598 + .../register_includes/quartz/ltp_regs.h | 637 + drivers/mcst/video-imgtec/imgpcidd/Makefile | 24 + drivers/mcst/video-imgtec/imgpcidd/imgpcidd.c | 1766 + drivers/mcst/video-imgtec/imgpcidd/imgpcidd.h | 113 + drivers/mcst/video-imgtec/imgvideo/Makefile | 71 + .../libraries/pixelapi/code/pixel_api.c | 1591 + .../pixelapi/code/pixel_api_internals.c | 4158 + .../pixelapi/code/pixel_api_internals.h | 129 + .../libraries/pixelapi/include/pixel_api.h | 844 + .../libraries/talmmu_api/code/addr_alloc1.c | 900 + .../imglib/libraries/talmmu_api/code/hash.c | 754 + .../imglib/libraries/talmmu_api/code/pool.c | 322 + .../imglib/libraries/talmmu_api/code/ra.c | 2126 + .../libraries/talmmu_api/code/talmmu_api.c | 4236 + .../imglib/libraries/talmmu_api/code/trace.c | 64 + .../talmmu_api/include/addr_alloc1.h | 474 + .../libraries/talmmu_api/include/hash.h | 183 + .../libraries/talmmu_api/include/pool.h | 146 + .../imglib/libraries/talmmu_api/include/ra.h | 247 + .../libraries/talmmu_api/include/talmmu_api.h | 1691 + .../libraries/talmmu_api/include/trace.h | 69 + .../video-imgtec/imgvideo/include/img_defs.h | 315 + .../imgvideo/include/img_errors.h | 91 + .../video-imgtec/imgvideo/include/img_mem.h | 82 + .../imgvideo/include/img_pixfmts.h | 326 + .../imgvideo/include/img_structs.h | 85 + .../video-imgtec/imgvideo/include/img_types.h | 205 + .../include/linux-kernel/img_sysdefs.h | 286 + .../include/linux-kernel/img_systypes.h | 90 + .../imgvideo/kernel_comp/include/wrap_utils.h | 212 + .../kernel_comp/include/wrap_utils_km.h | 82 + .../libraries/wrap_utils/code/wrap_utils.c | 329 + .../imgvideo/list_utils/include/dq.h | 107 + .../imgvideo/list_utils/include/lst.h | 106 + .../imgvideo/list_utils/include/tre.h | 135 + .../imgvideo/list_utils/src/dq/dq.c | 340 + .../imgvideo/list_utils/src/lst/lst.c | 220 + .../imgvideo/list_utils/src/trees/addchild.c | 97 + .../imgvideo/list_utils/src/trees/addsib.c | 77 + .../imgvideo/list_utils/src/trees/copysub.c | 161 + .../imgvideo/list_utils/src/trees/finalsib.c | 79 + .../list_utils/src/trees/firstchild.c | 83 + .../imgvideo/list_utils/src/trees/init.c | 66 + .../imgvideo/list_utils/src/trees/isinsub.c | 74 + .../imgvideo/list_utils/src/trees/nextsib.c | 74 + .../imgvideo/list_utils/src/trees/prevsib.c | 80 + .../imgvideo/list_utils/src/trees/remleaf.c | 84 + .../imgvideo/list_utils/src/trees/remlsub.c | 88 + .../imgvideo/list_utils/src/trees/remsub.c | 98 + .../imgvideo/list_utils/src/trees/subnext.c | 107 + .../imgvideo/port_fwrk/include/dbgevent_api.h | 107 + .../port_fwrk/include/dbgevent_api_km.h | 103 + .../imgvideo/port_fwrk/include/dbgopt_api.h | 332 + .../port_fwrk/include/dbgopt_api_km.h | 334 + .../port_fwrk/include/dbgopt_api_um.h | 405 + .../imgvideo/port_fwrk/include/dman_api.h | 249 + .../imgvideo/port_fwrk/include/dman_api_km.h | 1467 + .../imgvideo/port_fwrk/include/idgen_api.h | 213 + .../port_fwrk/include/linux/sysbrg_drv.h | 73 + .../imgvideo/port_fwrk/include/page_alloc.h | 441 + .../port_fwrk/include/page_alloc_km.h | 196 + .../imgvideo/port_fwrk/include/perflog_api.h | 142 + .../imgvideo/port_fwrk/include/pman_api.h | 223 + .../imgvideo/port_fwrk/include/pool_api.h | 446 + .../imgvideo/port_fwrk/include/report_api.h | 309 + .../port_fwrk/include/report_levels.h | 113 + .../port_fwrk/include/report_modules.h | 354 + .../imgvideo/port_fwrk/include/rman_api.h | 517 + .../imgvideo/port_fwrk/include/sysbrg_api.h | 159 + .../port_fwrk/include/sysbrg_api_km.h | 245 + .../imgvideo/port_fwrk/include/sysbrg_utils.h | 339 + .../imgvideo/port_fwrk/include/sysdev_utils.h | 815 + .../port_fwrk/include/sysenv_api_km.h | 104 + .../imgvideo/port_fwrk/include/sysenv_utils.h | 103 + .../imgvideo/port_fwrk/include/sysmem_utils.h | 648 + .../imgvideo/port_fwrk/include/sysos_api_km.h | 1150 + .../imgvideo/port_fwrk/kernel/dbgevent_api.c | 81 + .../port_fwrk/kernel/dbgevent_api_km.c | 82 + .../imgvideo/port_fwrk/kernel/dbgopt_api.c | 552 + .../imgvideo/port_fwrk/kernel/dbgopt_api_km.c | 755 + .../imgvideo/port_fwrk/kernel/dman_api.c | 273 + .../imgvideo/port_fwrk/kernel/dman_api_km.c | 1764 + .../imgvideo/port_fwrk/kernel/idgen_api.c | 636 + .../port_fwrk/kernel/linux/perflog_api.c | 600 + .../port_fwrk/kernel/linux/sysbrg_api.c | 140 + .../port_fwrk/kernel/linux/sysbrg_drv.c | 585 + .../port_fwrk/kernel/linux/sysbrg_pdump.c | 1510 + .../kernel/linux/sysmem_api_ashmem.c | 161 + .../kernel/linux/sysmem_api_carveout.c | 363 + .../kernel/linux/sysmem_api_coherent.c | 664 + .../kernel/linux/sysmem_api_dmabuf.c | 417 + .../port_fwrk/kernel/linux/sysmem_api_ion.c | 587 + .../kernel/linux/sysmem_api_unified.c | 589 + .../port_fwrk/kernel/linux/sysos_api.c | 1962 + .../imgvideo/port_fwrk/kernel/page_alloc.c | 984 + .../imgvideo/port_fwrk/kernel/pman_api.c | 448 + .../imgvideo/port_fwrk/kernel/pool_api.c | 1007 + .../imgvideo/port_fwrk/kernel/rman_api.c | 1002 + .../imgvideo/port_fwrk/kernel/sysbrg_utils.c | 493 + .../imgvideo/port_fwrk/kernel/sysdev_utils.c | 767 + .../imgvideo/port_fwrk/kernel/sysenv_api.c | 84 + .../imgvideo/port_fwrk/kernel/sysenv_utils.c | 291 + .../imgvideo/port_fwrk/kernel/sysmem_utils.c | 576 + .../rpc/sysbrg/src/dbgevent_api_rpc.h | 98 + .../rpc/sysbrg/src/dbgevent_api_server.c | 87 + .../imgvideo/rpc/sysbrg/src/dbgopt_api_rpc.h | 167 + .../rpc/sysbrg/src/dbgopt_api_server.c | 166 + .../imgvideo/rpc/sysbrg/src/dman_api_rpc.h | 150 + .../imgvideo/rpc/sysbrg/src/dman_api_server.c | 135 + .../rpc/sysbrg/src/memmgr_api_quartz_rpc.h | 210 + .../rpc/sysbrg/src/memmgr_api_quartz_server.c | 183 + .../imgvideo/rpc/sysbrg/src/page_alloc_rpc.h | 141 + .../rpc/sysbrg/src/page_alloc_server.c | 118 + .../rpc/sysbrg/src/vxe_km_api_quartz_rpc.h | 215 + .../rpc/sysbrg/src/vxe_km_api_quartz_server.c | 187 + .../imgvideo/rpc/sysbrg/src/wrap_utils_rpc.h | 121 + .../rpc/sysbrg/src/wrap_utils_server.c | 111 + .../imgvideo/secure_media/tal/code/tal.c | 667 + .../imgvideo/secure_media/tal/include/tal.h | 210 + .../secure_media/target/code/target.c | 1473 + .../secure_media/target/include/target.h | 897 + .../imgvideo/system/include/api_common.h | 142 + .../imgvideo/system/include/system.h | 105 + drivers/mcst/video-imgtec/linux/Makefile | 75 + .../video-imgtec/linux/include/img_mem_man.h | 174 + .../linux/include/uapi/img_mem_man.h | 60 + .../video-imgtec/linux/include/uapi/vxd.h | 323 + .../linux/include/uapi/vxd_pvdec.h | 113 + .../mcst/video-imgtec/linux/mem_man/Makefile | 75 + .../linux/mem_man/img_mem_carveout.c | 260 + .../linux/mem_man/img_mem_dmabuf.c | 272 + .../video-imgtec/linux/mem_man/img_mem_ion.c | 259 + .../video-imgtec/linux/mem_man/img_mem_man.c | 1282 + .../linux/mem_man/img_mem_man_priv.h | 156 + .../linux/mem_man/img_mem_secure.c | 209 + .../linux/mem_man/img_mem_unified.c | 500 + .../linux/mem_man/imgmmu/imgmmu.c | 949 + .../linux/mem_man/imgmmu/imgold/img_defs.h | 266 + .../linux/mem_man/imgmmu/imgold/img_errors.h | 91 + .../linux/mem_man/imgmmu/imgold/img_sysdefs.h | 282 + .../mem_man/imgmmu/imgold/img_systypes.h | 90 + .../linux/mem_man/imgmmu/imgold/img_types.h | 118 + .../linux/mem_man/imgmmu/kernel_heap.c | 287 + .../linux/mem_man/imgmmu/mmu_defs.h | 135 + .../linux/mem_man/imgmmu/mmulib/heap.h | 166 + .../linux/mem_man/imgmmu/mmulib/mmu.h | 406 + drivers/mcst/video-imgtec/linux/vxd/Makefile | 50 + drivers/mcst/video-imgtec/linux/vxd/vxd_api.c | 692 + .../mcst/video-imgtec/linux/vxd/vxd_common.h | 282 + .../mcst/video-imgtec/linux/vxd/vxd_core.c | 2391 + .../mcst/video-imgtec/linux/vxd/vxd_debugfs.c | 1279 + .../mcst/video-imgtec/linux/vxd/vxd_debugfs.h | 115 + .../mcst/video-imgtec/linux/vxd/vxd_plat.h | 54 + .../mcst/video-imgtec/linux/vxd/vxd_plat_dt.c | 241 + .../mcst/video-imgtec/linux/vxd/vxd_plat_dt.h | 63 + .../video-imgtec/linux/vxd/vxd_plat_emu.c | 487 + .../video-imgtec/linux/vxd/vxd_plat_europa.c | 116 + .../video-imgtec/linux/vxd/vxd_plat_fpga.c | 682 + .../mcst/video-imgtec/linux/vxd/vxd_pvdec.c | 2262 + .../video-imgtec/linux/vxd/vxd_pvdec_priv.h | 167 + .../video-imgtec/linux/vxd/vxd_pvdec_regs.h | 923 + drivers/mcst/wrnvram/Makefile | 13 + drivers/mcst/wrnvram/wrnvram.c | 683 + drivers/md/dm.c | 12 + drivers/md/md.c | 4 + drivers/media/dvb-core/dvb_frontend.c | 17 + drivers/media/rc/rc-main.c | 4 + drivers/media/usb/uvc/uvc_driver.c | 11 + drivers/media/v4l2-core/v4l2-ioctl.c | 4 + drivers/misc/Kconfig | 52 +- drivers/misc/Makefile | 4 + drivers/misc/eeprom/Kconfig | 11 + drivers/misc/eeprom/Makefile | 1 + drivers/misc/i2c_p2pmc.c | 400 + drivers/misc/isl22317.c | 616 + drivers/misc/lkdtm/heap.c | 178 +- drivers/misc/ltc4306.c | 891 + drivers/misc/ucd9080.c | 437 + drivers/mmc/host/sdhci-pci-core.c | 23 + drivers/net/bonding/bond_3ad.c | 4 + drivers/net/bonding/bond_netlink.c | 3 + drivers/net/can/Kconfig | 2 + drivers/net/can/Makefile | 3 +- drivers/net/can/mcst/Kconfig | 11 + drivers/net/can/mcst/Makefile | 10 + drivers/net/can/mcst/can2.h | 130 + drivers/net/can/mcst/elcan.h | 170 + drivers/net/can/mcst/elcan_debugfs.c | 546 + drivers/net/can/mcst/elcan_debugfs.h | 18 + drivers/net/can/mcst/elcan_dev.c | 1051 + drivers/net/can/mcst/elcan_pci.c | 251 + drivers/net/ethercat/Kconfig | 36 + drivers/net/ethercat/Makefile | 71 + drivers/net/ethercat/cdev.c | 320 + drivers/net/ethercat/cdev.h | 61 + drivers/net/ethercat/coe_emerg_ring.c | 177 + drivers/net/ethercat/coe_emerg_ring.h | 78 + drivers/net/ethercat/datagram.c | 649 + drivers/net/ethercat/datagram.h | 148 + drivers/net/ethercat/datagram_pair.c | 199 + drivers/net/ethercat/datagram_pair.h | 69 + drivers/net/ethercat/debug.c | 266 + drivers/net/ethercat/debug.h | 66 + drivers/net/ethercat/device.c | 729 + drivers/net/ethercat/device.h | 158 + drivers/net/ethercat/domain.c | 706 + drivers/net/ethercat/domain.h | 90 + drivers/net/ethercat/ethernet.c | 868 + drivers/net/ethercat/ethernet.h | 125 + drivers/net/ethercat/fmmu_config.c | 99 + drivers/net/ethercat/fmmu_config.h | 66 + drivers/net/ethercat/foe.h | 62 + drivers/net/ethercat/foe_request.c | 238 + drivers/net/ethercat/foe_request.h | 86 + drivers/net/ethercat/fsm_change.c | 577 + drivers/net/ethercat/fsm_change.h | 92 + drivers/net/ethercat/fsm_coe.c | 2534 + drivers/net/ethercat/fsm_coe.h | 82 + drivers/net/ethercat/fsm_foe.c | 930 + drivers/net/ethercat/fsm_foe.h | 94 + drivers/net/ethercat/fsm_master.c | 1261 + drivers/net/ethercat/fsm_master.h | 113 + drivers/net/ethercat/fsm_pdo.c | 806 + drivers/net/ethercat/fsm_pdo.h | 84 + drivers/net/ethercat/fsm_pdo_entry.c | 541 + drivers/net/ethercat/fsm_pdo_entry.h | 83 + drivers/net/ethercat/fsm_sii.c | 490 + drivers/net/ethercat/fsm_sii.h | 90 + drivers/net/ethercat/fsm_slave.c | 582 + drivers/net/ethercat/fsm_slave.h | 83 + drivers/net/ethercat/fsm_slave_config.c | 1731 + drivers/net/ethercat/fsm_slave_config.h | 86 + drivers/net/ethercat/fsm_slave_scan.c | 1027 + drivers/net/ethercat/fsm_slave_scan.h | 83 + drivers/net/ethercat/fsm_soe.c | 844 + drivers/net/ethercat/fsm_soe.h | 75 + drivers/net/ethercat/globals.h | 317 + drivers/net/ethercat/ioctl.c | 4606 + drivers/net/ethercat/ioctl.h | 786 + drivers/net/ethercat/mailbox.c | 210 + drivers/net/ethercat/mailbox.h | 60 + drivers/net/ethercat/master.c | 3256 + drivers/net/ethercat/master.h | 390 + drivers/net/ethercat/module.c | 674 + drivers/net/ethercat/pdo.c | 317 + drivers/net/ethercat/pdo.h | 75 + drivers/net/ethercat/pdo_entry.c | 132 + drivers/net/ethercat/pdo_entry.h | 66 + drivers/net/ethercat/pdo_list.c | 346 + drivers/net/ethercat/pdo_list.h | 79 + drivers/net/ethercat/reg_request.c | 130 + drivers/net/ethercat/reg_request.h | 66 + drivers/net/ethercat/sdo.c | 132 + drivers/net/ethercat/sdo.h | 69 + drivers/net/ethercat/sdo_entry.c | 77 + drivers/net/ethercat/sdo_entry.h | 72 + drivers/net/ethercat/sdo_request.c | 258 + drivers/net/ethercat/sdo_request.h | 83 + drivers/net/ethercat/slave.c | 1000 + drivers/net/ethercat/slave.h | 273 + drivers/net/ethercat/slave_config.c | 1268 + drivers/net/ethercat/slave_config.h | 185 + drivers/net/ethercat/soe_errors.c | 96 + drivers/net/ethercat/soe_request.c | 254 + drivers/net/ethercat/soe_request.h | 80 + drivers/net/ethercat/sync.c | 178 + drivers/net/ethercat/sync.h | 68 + drivers/net/ethercat/sync_config.c | 63 + drivers/net/ethercat/sync_config.h | 59 + drivers/net/ethercat/voe_handler.c | 560 + drivers/net/ethercat/voe_handler.h | 73 + drivers/net/ethernet/3com/3c59x.c | 11 +- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/dec/tulip/tulip_core.c | 2 + drivers/net/ethernet/dec/tulip/winbond-840.c | 2 +- drivers/net/ethernet/intel/e1000/e1000_hw.c | 4 +- drivers/net/ethernet/intel/e1000/e1000_hw.h | 2 +- drivers/net/ethernet/intel/e1000/e1000_main.c | 4 +- drivers/net/ethernet/intel/e1000e/e1000.h | 12 + drivers/net/ethernet/intel/e1000e/ethtool.c | 7 +- drivers/net/ethernet/intel/e1000e/hw.h | 16 + drivers/net/ethernet/intel/e1000e/ich8lan.h | 10 +- drivers/net/ethernet/intel/e1000e/mac.c | 33 +- drivers/net/ethernet/intel/e1000e/netdev.c | 46 +- drivers/net/ethernet/intel/e1000e/nvm.c | 63 + drivers/net/ethernet/intel/igb/e1000_82575.c | 3 + drivers/net/ethernet/intel/igb/e1000_hw.h | 3 + drivers/net/ethernet/intel/igb/e1000_i210.c | 6 +- drivers/net/ethernet/intel/igb/igb.h | 6 + drivers/net/ethernet/intel/igb/igb_ethtool.c | 26 +- drivers/net/ethernet/intel/igb/igb_main.c | 57 +- drivers/net/ethernet/intel/igbvf/netdev.c | 20 +- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 6 +- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 22 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 58 +- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 13 +- drivers/net/ethernet/marvell/sky2.c | 3 + drivers/net/ethernet/mcst/Kconfig | 59 + drivers/net/ethernet/mcst/Makefile | 12 + drivers/net/ethernet/mcst/l_e1000.h | 245 + drivers/net/ethernet/mcst/l_e1000_nort.c | 4177 + drivers/net/ethernet/mcst/l_e1000_rt.c | 1990 + drivers/net/ethernet/mcst/mgb.c | 5410 + drivers/net/ethernet/mcst/mxgbe/Makefile | 20 + drivers/net/ethernet/mcst/mxgbe/kcompat.c | 3 + drivers/net/ethernet/mcst/mxgbe/kcompat.h | 13 + drivers/net/ethernet/mcst/mxgbe/mxgbe.h | 256 + drivers/net/ethernet/mcst/mxgbe/mxgbe_dbg.h | 96 + .../net/ethernet/mcst/mxgbe/mxgbe_debugfs.c | 1599 + .../net/ethernet/mcst/mxgbe/mxgbe_debugfs.h | 18 + .../net/ethernet/mcst/mxgbe/mxgbe_ethtool.c | 343 + drivers/net/ethernet/mcst/mxgbe/mxgbe_gpio.c | 189 + drivers/net/ethernet/mcst/mxgbe/mxgbe_gpio.h | 12 + drivers/net/ethernet/mcst/mxgbe/mxgbe_hw.c | 263 + drivers/net/ethernet/mcst/mxgbe/mxgbe_hw.h | 16 + drivers/net/ethernet/mcst/mxgbe/mxgbe_i2c.c | 508 + drivers/net/ethernet/mcst/mxgbe/mxgbe_i2c.h | 26 + drivers/net/ethernet/mcst/mxgbe/mxgbe_mac.c | 206 + drivers/net/ethernet/mcst/mxgbe/mxgbe_mac.h | 54 + drivers/net/ethernet/mcst/mxgbe/mxgbe_main.c | 376 + drivers/net/ethernet/mcst/mxgbe/mxgbe_msix.c | 298 + drivers/net/ethernet/mcst/mxgbe/mxgbe_msix.h | 11 + drivers/net/ethernet/mcst/mxgbe/mxgbe_net.c | 1227 + drivers/net/ethernet/mcst/mxgbe/mxgbe_pci.c | 197 + drivers/net/ethernet/mcst/mxgbe/mxgbe_phy.c | 527 + drivers/net/ethernet/mcst/mxgbe/mxgbe_phy.h | 11 + drivers/net/ethernet/mcst/mxgbe/mxgbe_regs.h | 384 + drivers/net/ethernet/mcst/mxgbe/mxgbe_rxq.c | 648 + drivers/net/ethernet/mcst/mxgbe/mxgbe_rxq.h | 37 + drivers/net/ethernet/mcst/mxgbe/mxgbe_txq.c | 482 + drivers/net/ethernet/mcst/mxgbe/mxgbe_txq.h | 35 + drivers/net/ethernet/mcst/mxgbe/xgbe_regs.h | 1301 + drivers/net/ethernet/mcst/pcc.c | 1349 + drivers/net/ethernet/mcst/pci_sunlance.c | 251 + drivers/net/ethernet/mcst/rdma_net/Makefile | 7 + drivers/net/ethernet/mcst/rdma_net/README.txt | 109 + .../ethernet/mcst/rdma_net/create_mknod_rdma | 8 + .../mcst/rdma_net/get_event_rdma_net.c | 1297 + .../mcst/rdma_net/get_stat_rdma_net.c | 291 + .../net/ethernet/mcst/rdma_net/lvnet_net.c | 2257 + .../net/ethernet/mcst/rdma_net/rdma_arp_net.c | 47 + .../ethernet/mcst/rdma_net/rdma_error_net.h | 37 + .../ethernet/mcst/rdma_net/rdma_intr_net.c | 1136 + .../mcst/rdma_net/rdma_proc_init_net.c | 150 + .../net/ethernet/mcst/rdma_net/rdma_reg_net.h | 1057 + .../mcst/rdma_net/rdma_send_msg_net.c | 174 + .../mcst/rdma_net/rdma_user_intf_gl_net.h | 50 + .../mcst/rdma_net/rdma_user_intf_net.h | 867 + drivers/net/ethernet/mcst/sunlance_body.h | 3002 + drivers/net/ethernet/mcst/sunlance_sbus.c | 258 + .../net/ethernet/mellanox/mlx5/core/alloc.c | 28 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 8 + drivers/net/ethernet/realtek/8139cp.c | 13 +- drivers/net/phy/dp83867.c | 7 + drivers/net/phy/micrel.c | 8 +- drivers/net/tun.c | 9 + drivers/net/virtio_net.c | 8 + drivers/net/wireless/ath/ath9k/hif_usb.c | 4 + drivers/of/Kconfig | 6 +- drivers/parport/Kconfig | 24 + drivers/parport/Makefile | 15 + drivers/parport/ieee1284.c | 344 +- drivers/parport/parport_mcst.c | 3701 + drivers/parport/share.c | 48 +- drivers/pci/controller/Kconfig | 1 - drivers/pci/controller/vmd.c | 182 +- drivers/pci/msi.c | 142 +- drivers/pci/pci-sysfs.c | 12 + drivers/pci/pci.c | 9 +- drivers/pci/probe.c | 55 + drivers/pci/quirks.c | 144 + drivers/pps/Kconfig | 2 +- drivers/rtc/Kconfig | 23 +- drivers/rtc/Makefile | 2 + drivers/rtc/interface.c | 22 + drivers/rtc/rtc-cmos.c | 2 +- drivers/rtc/rtc-cy14b101p.c | 922 + drivers/rtc/rtc-ds1307.c | 86 + drivers/rtc/rtc-fm33256.c | 630 + drivers/scsi/megaraid/megaraid_sas_fusion.c | 5 + drivers/scsi/qla2xxx/qla_nx.c | 4 + drivers/scsi/qla2xxx/qla_os.c | 20 +- drivers/spi/spidev.c | 1 + drivers/staging/Kconfig | 2 + drivers/staging/Makefile | 1 + drivers/staging/crystalhd/DriverFwShare.h | 95 + drivers/staging/crystalhd/FleaDefs.h | 188 + drivers/staging/crystalhd/Kconfig | 6 + drivers/staging/crystalhd/Makefile | 9 + drivers/staging/crystalhd/bc_dts_defs.h | 639 + drivers/staging/crystalhd/bc_dts_glob_lnx.h | 331 + drivers/staging/crystalhd/bcm_70012_regs.h | 12298 +++ drivers/staging/crystalhd/bcm_70015_regs.h | 1376 + drivers/staging/crystalhd/crystalhd_cmds.c | 1221 + drivers/staging/crystalhd/crystalhd_cmds.h | 91 + .../staging/crystalhd/crystalhd_flea_ddr.c | 735 + .../staging/crystalhd/crystalhd_flea_ddr.h | 73 + .../staging/crystalhd/crystalhd_fleafuncs.c | 2960 + .../staging/crystalhd/crystalhd_fleafuncs.h | 62 + drivers/staging/crystalhd/crystalhd_fw_if.h | 388 + drivers/staging/crystalhd/crystalhd_hw.c | 1075 + drivers/staging/crystalhd/crystalhd_hw.h | 551 + .../staging/crystalhd/crystalhd_linkfuncs.c | 2061 + .../staging/crystalhd/crystalhd_linkfuncs.h | 228 + drivers/staging/crystalhd/crystalhd_lnx.c | 847 + drivers/staging/crystalhd/crystalhd_lnx.h | 93 + drivers/staging/crystalhd/crystalhd_misc.c | 957 + drivers/staging/crystalhd/crystalhd_misc.h | 182 + drivers/thermal/Kconfig | 1 - drivers/thermal/cpu_cooling.c | 2 + drivers/tty/hvc/Kconfig | 8 + drivers/tty/hvc/Makefile | 3 +- drivers/tty/hvc/hvc_l.c | 136 + drivers/tty/mxser.c | 31 +- drivers/tty/serial/8250/8250.h | 4 + drivers/tty/serial/8250/8250_core.c | 52 +- drivers/tty/serial/8250/8250_pci.c | 239 + drivers/tty/serial/8250/8250_port.c | 4 + drivers/tty/serial/8250/Kconfig | 2 + drivers/tty/serial/Kconfig | 21 +- drivers/tty/serial/Makefile | 2 + drivers/tty/serial/l_zilog.c | 2151 + drivers/tty/serial/l_zilog.h | 342 + drivers/tty/serial/lmscon.c | 842 + drivers/tty/serial/serial_core.c | 44 +- drivers/tty/sysrq.c | 21 + drivers/tty/tty_io.c | 4 +- drivers/tty/vt/keyboard.c | 2 + drivers/tty/vt/vt.c | 5 + drivers/usb/core/hcd.c | 20 +- drivers/usb/core/hub.c | 1 - drivers/usb/core/quirks.c | 11 + drivers/usb/host/ehci-hcd.c | 85 +- drivers/usb/host/ehci-mem.c | 14 +- drivers/usb/host/ehci-pci.c | 29 + drivers/usb/host/ehci-sched.c | 11 + drivers/usb/host/ehci.h | 15 +- drivers/usb/host/ohci-hcd.c | 59 +- drivers/usb/host/ohci-hub.c | 16 +- drivers/usb/host/ohci-pci.c | 24 +- drivers/usb/host/ohci.h | 7 +- drivers/usb/host/pci-quirks.c | 8 +- drivers/usb/host/xhci-pci.c | 7 + drivers/usb/serial/cp210x.c | 3 + drivers/usb/storage/usb.c | 8 + drivers/vfio/Kconfig | 2 +- drivers/vfio/pci/Kconfig | 2 +- drivers/vfio/pci/vfio_pci_config.c | 4 + drivers/video/Kconfig | 69 + drivers/video/console/vgacon.c | 6 + drivers/video/fbdev/Kconfig | 2 +- drivers/video/fbdev/Makefile | 4 + drivers/video/fbdev/aty/aty128fb.c | 5 + drivers/video/fbdev/aty/atyfb_base.c | 7 + drivers/video/fbdev/aty/radeon_base.c | 2 + drivers/video/fbdev/core/cfbcopyarea.c | 17 + drivers/video/fbdev/core/cfbfillrect.c | 17 + drivers/video/fbdev/core/cfbimgblt.c | 20 +- drivers/video/fbdev/core/fbmem.c | 7 + drivers/video/fbdev/lynxfb/Makefile | 63 + drivers/video/fbdev/lynxfb/ddk750.h | 31 + drivers/video/fbdev/lynxfb/ddk750_chip.c | 589 + drivers/video/fbdev/lynxfb/ddk750_chip.h | 95 + drivers/video/fbdev/lynxfb/ddk750_display.c | 340 + drivers/video/fbdev/lynxfb/ddk750_display.h | 123 + drivers/video/fbdev/lynxfb/ddk750_dvi.c | 114 + drivers/video/fbdev/lynxfb/ddk750_dvi.h | 79 + drivers/video/fbdev/lynxfb/ddk750_help.c | 36 + drivers/video/fbdev/lynxfb/ddk750_help.h | 70 + drivers/video/fbdev/lynxfb/ddk750_hwi2c.c | 295 + drivers/video/fbdev/lynxfb/ddk750_hwi2c.h | 30 + drivers/video/fbdev/lynxfb/ddk750_mode.c | 246 + drivers/video/fbdev/lynxfb/ddk750_mode.h | 57 + drivers/video/fbdev/lynxfb/ddk750_power.c | 242 + drivers/video/fbdev/lynxfb/ddk750_power.h | 84 + drivers/video/fbdev/lynxfb/ddk750_reg.h | 381 + drivers/video/fbdev/lynxfb/ddk750_sii164.c | 464 + drivers/video/fbdev/lynxfb/ddk750_sii164.h | 185 + drivers/video/fbdev/lynxfb/ddk750_swi2c.c | 500 + drivers/video/fbdev/lynxfb/ddk750_swi2c.h | 90 + drivers/video/fbdev/lynxfb/lynx_accel.c | 446 + drivers/video/fbdev/lynxfb/lynx_accel.h | 151 + drivers/video/fbdev/lynxfb/lynx_cursor.c | 227 + drivers/video/fbdev/lynxfb/lynx_cursor.h | 33 + drivers/video/fbdev/lynxfb/lynx_drv.c | 1749 + drivers/video/fbdev/lynxfb/lynx_drv.h | 278 + drivers/video/fbdev/lynxfb/lynx_help.h | 115 + drivers/video/fbdev/lynxfb/lynx_hw750.c | 685 + drivers/video/fbdev/lynxfb/lynx_hw750.h | 126 + drivers/video/fbdev/lynxfb/modedb.c | 357 + drivers/video/fbdev/lynxfb/ver.h | 37 + drivers/video/fbdev/mgam83/Makefile | 18 + drivers/video/fbdev/mgam83/mgam83fb.h | 289 + drivers/video/fbdev/mgam83/mgam83fb_base.c | 2284 + drivers/video/fbdev/mgam83/mgam83fb_ramdac.c | 779 + drivers/video/fbdev/mgam83/sbus_mgam83fb.h | 291 + .../video/fbdev/mgam83/sbus_mgam83fb_base.c | 1101 + drivers/video/fbdev/mgam83/sbus_ramdac.c | 495 + drivers/video/fbdev/vga16fb.c | 16 +- drivers/watchdog/Kconfig | 8 + drivers/watchdog/Makefile | 5 + drivers/watchdog/lwdt.c | 369 + e2k-minver-cpu-details | 32 + fs/Kconfig | 1 + fs/binfmt_elf.c | 48 +- fs/block_dev.c | 1 + fs/btrfs/ctree.c | 4 + fs/btrfs/inode-map.c | 4 + fs/buffer.c | 4 + fs/cifs/inode.c | 6 +- fs/compat_binfmt_elf.c | 2 + fs/compat_ioctl.c | 3 + fs/coredump.c | 6 + fs/dcache.c | 5 + fs/exec.c | 63 +- fs/ext4/ext4.h | 2 + fs/ext4/ext4_jbd2.h | 3 + fs/ext4/extents.c | 4 + fs/ext4/ialloc.c | 4 + fs/ext4/indirect.c | 1 + fs/ext4/inode.c | 62 +- fs/ext4/mballoc.c | 1 + fs/ext4/super.c | 2 +- fs/inode.c | 8 +- fs/kernfs/file.c | 13 +- fs/namei.c | 6 + fs/namespace.c | 4 + fs/nfs/dir.c | 2 +- fs/nfs/filelayout/filelayout.c | 3 + fs/nfs/getroot.c | 4 +- fs/nfs/internal.h | 2 +- fs/notify/fanotify/fanotify_user.c | 131 + fs/notify/fsnotify.c | 28 + fs/ntfs/dir.c | 4 + fs/posix_acl.c | 3 + fs/proc/Makefile | 3 + fs/proc/base.c | 122 + fs/proc/interrupts.c | 2 +- fs/proc/mcst_debug.c | 181 + fs/proc/meminfo.c | 3 + fs/proc/proc_console_ctrl.c | 202 + fs/proc/task_mmu.c | 20 + fs/quota/kqid.c | 15 + fs/splice.c | 5 +- gen_config-elbrus-1cp.awk | 21 + gen_config-elbrus-4c.awk | 26 + gen_config-elbrus-8c.awk | 20 + gen_config-elbrus-8c2.awk | 20 + include/asm-generic/percpu.h | 5 + include/asm-generic/pgtable.h | 3 + include/asm-generic/qrwlock.h | 16 + include/asm-generic/qspinlock.h | 4 + include/drm/bridge/dw_hdmi.h | 18 +- include/drm/drm_device.h | 4 + include/linux/audit.h | 8 + include/linux/blkdev.h | 4 + include/linux/ceph/osdmap.h | 4 + include/linux/clk.h | 4 + include/linux/clocksource.h | 4 + include/linux/compiler-gcc.h | 9 + include/linux/compiler.h | 49 + include/linux/cpufreq.h | 9 + include/linux/cpumask.h | 10 + include/linux/dmaengine.h | 3 + include/linux/el_posix.h | 95 + include/linux/elfcore.h | 6 +- include/linux/fb.h | 7 +- include/linux/fs.h | 9 + include/linux/fsnotify_backend.h | 9 +- include/linux/gfp.h | 4 + include/linux/gpio/driver.h | 2 + include/linux/highmem.h | 12 + include/linux/hrtimer.h | 4 + include/linux/i2c/pca953x.h | 30 + include/linux/interrupt.h | 4 + include/linux/iommu.h | 6 + include/linux/jiffies.h | 5 +- include/linux/kbuild.h | 12 +- include/linux/kernel.h | 4 + include/linux/kthread.h | 4 + include/linux/kvm_host.h | 10 +- include/linux/libata.h | 11 +- include/linux/lsm_hooks.h | 1 + include/linux/mac/mac_kernel.h | 13 + include/linux/mac/mac_types.h | 7 + include/linux/mcst/dsp_io.h | 7 + include/linux/mcst/gpio.h | 7 + include/linux/mcst/m2mlc_io.h | 7 + include/linux/mcst/mcst_selftest.h | 8 + include/linux/mcst/mgpm.h | 12 + include/linux/mcst/mgpm_io.h | 12 + include/linux/mcst/mmrm_io.h | 12 + include/linux/mcst/mokm.h | 10 + include/linux/mcst/mokx_user_intf.h | 8 + include/linux/mcst/mpk.h | 7 + include/linux/mcst/msps_io.h | 8 + include/linux/mcst/mvp_def.h | 12 + include/linux/mcst/p2ssbus.h | 213 + include/linux/mcst/rdma_user_intf.h | 7 + include/linux/mcst/user_intf.h | 8 + include/linux/mcst/wrnvram.h | 92 + include/linux/mcst/wrnvram_io.h | 20 + include/linux/mcst_net_rt.h | 8 + include/linux/mcst_rt.h | 11 + include/linux/mm.h | 16 + include/linux/mm_types.h | 33 + include/linux/mman.h | 3 + include/linux/moduleparam.h | 4 + include/linux/mutex.h | 32 +- include/linux/namei.h | 4 + include/linux/netdevice.h | 23 +- include/linux/nfs_xdr.h | 1 + include/linux/of.h | 16 + include/linux/oom.h | 11 + include/linux/panic2nvram.h | 11 + include/linux/parport.h | 65 + include/linux/parport_pc.h | 68 +- include/linux/pci-dma-compat.h | 8 + include/linux/pci.h | 10 + include/linux/pci_ids.h | 69 +- include/linux/perf_event.h | 1 + include/linux/platform_data/i2c-l-i2c2.h | 26 + include/linux/preempt.h | 51 +- include/linux/rtc.h | 3 + include/linux/rtmutex.h | 23 +- include/linux/scatterlist.h | 5 + include/linux/sched.h | 139 + include/linux/sched/sysctl.h | 8 +- include/linux/sched/user.h | 7 + include/linux/signal_types.h | 6 + include/linux/skbuff.h | 3 + include/linux/slab.h | 7 + include/linux/spinlock.h | 4 + include/linux/spinlock_api_smp.h | 47 + include/linux/spinlock_api_up.h | 2 + include/linux/splice.h | 5 + include/linux/sunrpc/svc_xprt.h | 3 + include/linux/suspend.h | 5 + include/linux/swap.h | 69 +- include/linux/swiotlb.h | 1 - include/linux/syscalls.h | 12 + include/linux/sysctl.h | 3 + include/linux/time.h | 3 + include/linux/timex.h | 6 + include/linux/usb/hcd.h | 1 - include/linux/vmalloc.h | 3 + include/math-emu/op-1.h | 15 + include/math-emu/op-4.h | 25 + include/math-emu/op-common.h | 68 +- include/net/netns/ipv4.h | 2 + include/net/sock.h | 8 + include/sound/hda_codec.h | 3 + include/trace/events/iommu.h | 21 + include/trace/events/mmflags.h | 1 + include/trace/trace_events.h | 10 + include/uapi/asm-generic/ioctls.h | 3 + include/uapi/asm-generic/resource.h | 14 +- include/uapi/drm/mga2_drm.h | 110 + include/uapi/linux/audit.h | 3 +- include/uapi/linux/auto_fs.h | 3 +- include/uapi/linux/bpf.h | 5 + include/uapi/linux/el_posix.h | 137 + include/uapi/linux/elf-em.h | 3 +- include/uapi/linux/elf.h | 6 +- include/uapi/linux/eventpoll.h | 4 + include/uapi/linux/hw_breakpoint.h | 3 + include/uapi/linux/ip.h | 5 + include/uapi/linux/kvm.h | 7 + include/uapi/linux/mac/Kbuild | 1 + include/uapi/linux/mac/mac_types.h | 70 + include/uapi/linux/magic.h | 2 + include/uapi/linux/major.h | 2 + include/uapi/linux/mcst/Kbuild | 16 + include/uapi/linux/mcst/ddi.h | 268 + include/uapi/linux/mcst/define.h | 56 + include/uapi/linux/mcst/dsp_io.h | 173 + include/uapi/linux/mcst/gpio.h | 15 + include/uapi/linux/mcst/gpio_ac97.h | 84 + include/uapi/linux/mcst/kmng.h | 48 + include/uapi/linux/mcst/m2mlc_io.h | 1818 + include/uapi/linux/mcst/mcst_selftest.h | 67 + include/uapi/linux/mcst/mgpm.h | 96 + include/uapi/linux/mcst/mgpm_io.h | 139 + include/uapi/linux/mcst/mmrm_io.h | 97 + include/uapi/linux/mcst/mokm.h | 579 + include/uapi/linux/mcst/mokx_user_intf.h | 1040 + include/uapi/linux/mcst/mpk.h | 313 + include/uapi/linux/mcst/mpv_io.h | 127 + include/uapi/linux/mcst/msps_io.h | 91 + include/uapi/linux/mcst/mvp_def.h | 90 + include/uapi/linux/mcst/rdma_user_intf.h | 929 + include/uapi/linux/mcst/user_intf.h | 157 + include/uapi/linux/mcst_net_rt.h | 55 + include/uapi/linux/mcst_rt.h | 79 + include/uapi/linux/ptrace.h | 4 +- include/uapi/linux/sysctl.h | 5 + init/Kconfig | 14 + init/Makefile | 2 +- init/calibrate.c | 5 + init/init_task.c | 5 + init/main.c | 46 +- ipc/shm.c | 19 + ipc/util.c | 2 + kernel/Kconfig.hz | 18 + kernel/Kconfig.preempt | 28 +- kernel/Makefile | 2 + kernel/audit.c | 26 + kernel/auditsc.c | 3 + kernel/bpf/syscall.c | 2 +- kernel/configs/mcst.config | 118 + kernel/configs/mcst_debug.config | 48 + kernel/configs/mcst_rt.config | 11 + kernel/cpu.c | 6 + kernel/dma/remap.c | 6 + kernel/el_posix.c | 9506 ++ kernel/exit.c | 9 + kernel/fork.c | 121 + kernel/futex.c | 11 +- kernel/gcov/gcc_3_4.c | 5 + kernel/hung_task.c | 6 + kernel/irq/manage.c | 47 +- kernel/irq/proc.c | 87 +- kernel/kmod.c | 11 +- kernel/locking/mutex.c | 44 + kernel/locking/rtmutex.c | 83 +- kernel/locking/spinlock.c | 12 + kernel/params.c | 5 +- kernel/power/power.h | 3 + kernel/power/snapshot.c | 18 + kernel/printk/printk.c | 20 +- kernel/rcu/tree.c | 6 + kernel/rcu/tree_stall.h | 12 + kernel/rcu/update.c | 4 + kernel/sched/core.c | 570 +- kernel/sched/cpuacct.c | 8 + kernel/sched/cpupri.c | 4 + kernel/sched/cputime.c | 4 + kernel/sched/deadline.c | 126 + kernel/sched/fair.c | 16 +- kernel/sched/idle.c | 24 +- kernel/sched/rt.c | 142 + kernel/sched/sched.h | 99 + kernel/smp.c | 28 + kernel/softirq.c | 34 + kernel/sys.c | 4 + kernel/sysctl.c | 194 +- kernel/time/clocksource.c | 19 + kernel/time/jiffies.c | 6 +- kernel/time/ntp.c | 102 +- kernel/time/ntp_internal.h | 3 + kernel/time/tick-sched.c | 13 + kernel/time/timekeeping.c | 48 + kernel/time/timer.c | 14 + kernel/trace/Makefile | 20 + kernel/trace/trace.c | 9 + kernel/trace/trace_clock.c | 6 +- kernel/trace/trace_entries.h | 5 + kernel/trace/trace_events.c | 2 +- kernel/user.c | 6 + kernel/user_namespace.c | 5 + kernel/watch_preempt.c | 251 + kernel/workqueue.c | 7 + lib/Kconfig.debug | 9 +- lib/assoc_array.c | 3 + lib/cpumask.c | 3 + lib/ioremap.c | 17 + lib/libcrc32c.c | 20 +- lib/lzo/lzodefs.h | 4 +- lib/mpi/mpicoder.c | 4 + lib/vsprintf.c | 4 + lib/zstd/compress.c | 8 + ltt/Kconfig | 28 + ltt/Makefile | 1 + ltt/lttng-2.11.0/Makefile | 4 +- .../instrumentation/events/lttng-module/rcu.h | 4 +- ltt/lttng-2.11.0/lttng-events.c | 2 +- ltt/lttng-2.11.0/lttng-events.h | 4 +- mm/Kconfig | 15 + mm/Kconfig.debug | 1 + mm/compaction.c | 2 +- mm/dmapool.c | 11 + mm/filemap.c | 5 +- mm/gup.c | 39 + mm/huge_memory.c | 16 +- mm/kmemleak.c | 7 + mm/madvise.c | 8 + mm/memcontrol.c | 3 + mm/memory.c | 36 + mm/mempolicy.c | 17 +- mm/mlock.c | 18 + mm/mmap.c | 59 +- mm/mprotect.c | 36 + mm/mremap.c | 81 +- mm/oom_kill.c | 39 + mm/page-writeback.c | 8 + mm/page_alloc.c | 197 + mm/page_io.c | 12 + mm/readahead.c | 11 +- mm/rmap.c | 10 + mm/slab.c | 19 + mm/slab.h | 39 + mm/slab_common.c | 27 + mm/slob.c | 8 + mm/slub.c | 14 + mm/swap_state.c | 92 + mm/swapfile.c | 70 +- mm/vmalloc.c | 44 +- mm/vmscan.c | 12 + mm/zswap.c | 38 + net/ceph/ceph_fs.c | 5 +- net/core/dev.c | 27 +- net/core/dev_ioctl.c | 30 + net/core/devlink.c | 4 + net/core/ethtool.c | 3 + net/core/skbuff.c | 10 + net/core/sock.c | 20 + net/core/sysctl_net_core.c | 10 + net/ipv4/devinet.c | 5 + net/ipv4/sysctl_net_ipv4.c | 18 + net/ipv4/tcp_input.c | 2 + net/ipv4/tcp_ipv4.c | 2 + net/ipv4/tcp_output.c | 3 +- net/ipv4/udp.c | 55 +- net/ipv6/addrconf.c | 7 +- net/mac80211/ht.c | 4 + net/mac80211/mlme.c | 4 + net/mac80211/tdls.c | 3 + net/mac80211/tx.c | 8 + net/netfilter/ipvs/ip_vs_sync.c | 4 + net/netfilter/nf_conntrack_pptp.c | 2 +- net/netfilter/xt_AUDIT.c | 124 +- net/netlabel/netlabel_kapi.c | 122 + net/netlabel/netlabel_mgmt.c | 125 + net/netlabel/netlabel_mgmt.h | 8 + net/sched/sch_generic.c | 5 + net/sctp/sm_sideeffect.c | 8 + net/sctp/sm_statefuns.c | 16 + net/sctp/socket.c | 4 + net/wireless/nl80211.c | 4 + net/wireless/radiotap.c | 4 + scripts/Kbuild.include | 4 + scripts/checksyscalls.sh | 3 + scripts/depmod.sh | 13 +- scripts/headers_install.sh | 3 +- scripts/kallsyms.c | 15 + scripts/mkcompile_h | 7 +- scripts/mod/modpost.c | 1 + scripts/recordmcount.pl | 94 +- scripts/setlocalversion | 4 + security/Kconfig | 53 +- security/Makefile | 4 + security/altha/Kconfig | 11 + security/altha/Makefile | 3 + security/altha/altha_lsm.c | 336 + security/apparmor/.gitignore | 1 + security/apparmor/Makefile | 12 +- security/kiosk/Kconfig | 9 + security/kiosk/Makefile | 3 + security/kiosk/kiosk-test.sh | 252 + security/kiosk/kiosk_lsm.c | 337 + security/security.c | 1 + security/selinux/hooks.c | 15 +- sound/core/pcm_native.c | 2 + sound/pci/Kconfig | 2 +- sound/pci/cs4281.c | 21 +- sound/pci/hda/hda_codec.c | 19 +- sound/pci/hda/hda_controller.c | 18 +- sound/pci/hda/hda_intel.c | 36 + sound/pci/hda/patch_cirrus.c | 61 +- sound/soc/codecs/Kconfig | 6 +- sound/sparc/cs4231.c | 4 + tools/arch/e2k/include/uapi/asm/bitsperlong.h | 6 + tools/arch/e2k/include/uapi/asm/mman.h | 89 + tools/include/linux/compiler-gcc.h | 2 + tools/include/linux/compiler.h | 7 +- tools/include/linux/string.h | 16 - tools/include/uapi/asm/bitsperlong.h | 2 + tools/lib/api/Makefile | 2 +- tools/lib/subcmd/Makefile | 2 +- tools/lib/traceevent/plugins/Makefile | 4 +- tools/perf/Documentation/Makefile | 14 +- tools/perf/Makefile.config | 2 +- tools/perf/Makefile.perf | 30 +- tools/perf/arch/e2k/Build | 1 + tools/perf/arch/e2k/Makefile | 2 + tools/perf/arch/e2k/annotate/instructions.c | 5 + tools/perf/arch/e2k/util/Build | 3 + tools/perf/arch/e2k/util/auxtrace.c | 615 + tools/perf/arch/e2k/util/header.c | 76 + tools/perf/arch/e2k/util/pmu.c | 33 + tools/perf/jvmti/Build | 6 + tools/perf/pmu-events/arch/e2k/mapfile.csv | 7 + .../pmu-events/arch/e2k/parse_dprof_list.sh | 15 + tools/perf/pmu-events/arch/e2k/v1/Events.json | 657 + .../pmu-events/arch/e2k/v2/Cpu_metrics.json | 38 + tools/perf/pmu-events/arch/e2k/v2/Events.json | 842 + .../pmu-events/arch/e2k/v3/Cpu_metrics.json | 38 + tools/perf/pmu-events/arch/e2k/v3/Events.json | 817 + tools/perf/pmu-events/arch/e2k/v3/Uncore.json | 438 + .../arch/e2k/v3/Uncore_metrics.json | 66 + .../pmu-events/arch/e2k/v4/Cpu_metrics.json | 38 + tools/perf/pmu-events/arch/e2k/v4/Events.json | 817 + tools/perf/pmu-events/arch/e2k/v4/Uncore.json | 2128 + .../pmu-events/arch/e2k/v5/Cpu_metrics.json | 32 + tools/perf/pmu-events/arch/e2k/v5/Events.json | 842 + tools/perf/pmu-events/arch/e2k/v5/Uncore.json | 3467 + .../pmu-events/arch/e2k/v6/Cpu_metrics.json | 50 + tools/perf/pmu-events/arch/e2k/v6/Events.json | 957 + .../pmu-events/arch/e2k/v6/Uncore_hc.json | 434 + .../pmu-events/arch/e2k/v6/Uncore_hmu.json | 2274 + .../pmu-events/arch/e2k/v6/Uncore_iommu.json | 786 + .../pmu-events/arch/e2k/v6/Uncore_mc.json | 678 + .../arch/e2k/v6/Uncore_metrics.json | 162 + .../pmu-events/arch/e2k/v6/Uncore_prepic.json | 226 + tools/perf/pmu-events/jevents.c | 12 +- tools/perf/ui/browsers/annotate.c | 2 + tools/perf/util/Build | 1 + tools/perf/util/annotate.c | 47 + tools/perf/util/auxtrace.c | 7 + tools/perf/util/auxtrace.h | 3 + tools/perf/util/e2k-dimtp.h | 16 + tools/perf/util/genelf.h | 6 + tools/perf/util/header.c | 2 + tools/perf/util/symbol.c | 4 + tools/perf/util/vdso.c | 4 +- .../powerpc/copyloops/memcpy_mcsafe_64.S | 0 .../powerpc/primitives/asm/asm-const.h | 0 .../powerpc/primitives/asm/feature-fixups.h | 0 .../powerpc/primitives/asm/ppc_asm.h | 0 .../selftests/powerpc/stringloops/memcmp_32.S | 0 .../selftests/powerpc/stringloops/strlen_32.S | 0 .../selftests/powerpc/vphn/asm/lppaca.h | 0 virt/kvm/kvm_main.c | 46 +- 3437 files changed, 1410016 insertions(+), 1426 deletions(-) create mode 100644 Documentation/admin-guide/LSM/AltHa.rst create mode 100644 Documentation/devicetree/bindings/hwmon/emc2305.txt create mode 100644 Documentation/hwmon/emc2305 create mode 100644 arch/e2k/3p/Makefile create mode 100644 arch/e2k/3p/binfmt_elf32_3P.c create mode 100644 arch/e2k/3p/binfmt_elf64_3P.c create mode 100644 arch/e2k/3p/binfmt_elfe2kp.c create mode 100644 arch/e2k/3p/global_sp.c create mode 100644 arch/e2k/3p/umalloc.c create mode 100644 arch/e2k/Kconfig create mode 100644 arch/e2k/Kconfig.debug create mode 100644 arch/e2k/Kconfig.virt create mode 100644 arch/e2k/Makefile create mode 100644 arch/e2k/boot/.gitignore create mode 100644 arch/e2k/boot/Am85C30.c create mode 100644 arch/e2k/boot/Am85C30.h create mode 100644 arch/e2k/boot/Makefile create mode 100644 arch/e2k/boot/apic.c create mode 100644 arch/e2k/boot/aploader.S create mode 100644 arch/e2k/boot/aploader.lds create mode 100644 arch/e2k/boot/apstartup.S create mode 100644 arch/e2k/boot/bios/Makefile create mode 100644 arch/e2k/boot/bios/bios.c create mode 100644 arch/e2k/boot/bios/bios.h create mode 100644 arch/e2k/boot/bios/ide_config.h create mode 100644 arch/e2k/boot/bios/init_kbd.c create mode 100644 arch/e2k/boot/bios/init_kbd.h create mode 100644 arch/e2k/boot/bios/io.c create mode 100644 arch/e2k/boot/bios/linuxpci.c create mode 100644 arch/e2k/boot/bios/mc146818rtc.h create mode 100644 arch/e2k/boot/bios/mga.c create mode 100644 arch/e2k/boot/bios/mga.h create mode 100644 arch/e2k/boot/bios/mpspec.c create mode 100644 arch/e2k/boot/bios/mptable.c create mode 100644 arch/e2k/boot/bios/newpci.c create mode 100644 arch/e2k/boot/bios/pci.h create mode 100644 arch/e2k/boot/bios/pci_isa_config.h create mode 100644 arch/e2k/boot/bios/pciconf.h create mode 100644 arch/e2k/boot/bios/printk.h create mode 100644 arch/e2k/boot/bios/southbridge.c create mode 100644 arch/e2k/boot/bios/southbridge.h create mode 100644 arch/e2k/boot/bios/superio.c create mode 100644 arch/e2k/boot/bios/video/Makefile create mode 100644 arch/e2k/boot/bios/video/helper_exec.c create mode 100644 arch/e2k/boot/bios/video/helper_mem.c create mode 100644 arch/e2k/boot/bios/video/init.c create mode 100644 arch/e2k/boot/bios/video/init.h create mode 100644 arch/e2k/boot/bios/video/int10.c create mode 100644 arch/e2k/boot/bios/video/int15.c create mode 100644 arch/e2k/boot/bios/video/int16.c create mode 100644 arch/e2k/boot/bios/video/int1a.c create mode 100644 arch/e2k/boot/bios/video/inte6.c create mode 100644 arch/e2k/boot/bios/video/pci-iface.c create mode 100644 arch/e2k/boot/bios/video/pci-iface.h create mode 100644 arch/e2k/boot/bios/video/x86emu/include/msr.h create mode 100644 arch/e2k/boot/bios/video/x86emu/include/x86emu.h create mode 100644 arch/e2k/boot/bios/video/x86emu/include/x86emu/fpu_regs.h create mode 100644 arch/e2k/boot/bios/video/x86emu/include/x86emu/regs.h create mode 100644 arch/e2k/boot/bios/video/x86emu/include/x86emu/types.h create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/LICENSE create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/Makefile create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/debug.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/decode.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/fpu.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile.linux create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/ops.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/ops2.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/prim_ops.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/sys.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/validate.c create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/debug.h create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/decode.h create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/fpu.h create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/ops.h create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_asm.h create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_ops.h create mode 100644 arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/x86emui.h create mode 100644 arch/e2k/boot/boot.h create mode 100644 arch/e2k/boot/boot.lds create mode 100644 arch/e2k/boot/boot_io.h create mode 100644 arch/e2k/boot/bootblock.c create mode 100644 arch/e2k/boot/compressed.lds create mode 100644 arch/e2k/boot/compressed_guest.lds create mode 100644 arch/e2k/boot/console.c create mode 100644 arch/e2k/boot/decompress.c create mode 100644 arch/e2k/boot/defkeymap.c create mode 100644 arch/e2k/boot/dts/Makefile create mode 100644 arch/e2k/boot/dts/e1cp_e1cmt_tablet.dts create mode 100644 arch/e2k/boot/dts/e1cp_m2e-uvp.dts create mode 100644 arch/e2k/boot/dts/e1cp_mbe1c-pc.dts create mode 100644 arch/e2k/boot/dts/e1cp_p2p8-sip-s1.dts create mode 100644 arch/e2k/boot/dts/e2c_mono-pc.dts create mode 100644 arch/e2k/boot/dts/e4c_apk-pc4c.dts create mode 100644 arch/e2k/boot/dts/e4c_el-2s-4.dts create mode 100644 arch/e2k/boot/dts/e4c_mbe2s-pc.dts create mode 100644 arch/e2k/boot/dts/e8c2_uatx_se.dts create mode 100644 arch/e2k/boot/dts/e8c_mbe8c-pc.dts create mode 100644 arch/e2k/boot/dts/e8c_swtx.dts create mode 120000 arch/e2k/boot/dts/include/dt-bindings create mode 100644 arch/e2k/boot/dumpsyms.c create mode 100644 arch/e2k/boot/e2k_sic.h create mode 100644 arch/e2k/boot/epic.c create mode 100644 arch/e2k/boot/epiggy.lds create mode 100644 arch/e2k/boot/flash.lds create mode 100644 arch/e2k/boot/info.c create mode 100644 arch/e2k/boot/jumpstart.c create mode 100644 arch/e2k/boot/keyboard.c create mode 100644 arch/e2k/boot/machdep_fpic.c create mode 100644 arch/e2k/boot/malloc.c create mode 100644 arch/e2k/boot/pic.h create mode 100644 arch/e2k/boot/recovery_string.S create mode 100644 arch/e2k/boot/recovery_string_fpic.S create mode 100644 arch/e2k/boot/recovery_string_v5.S create mode 100644 arch/e2k/boot/recovery_string_v5_fpic.S create mode 100644 arch/e2k/boot/romloader.S create mode 100644 arch/e2k/boot/romloader.lds create mode 100644 arch/e2k/boot/romstartup.S create mode 100644 arch/e2k/boot/smp.c create mode 100644 arch/e2k/boot/stdio.c create mode 100644 arch/e2k/boot/string.c create mode 100644 arch/e2k/boot/string_fpic.c create mode 100644 arch/e2k/boot/string_guest_fpic.c create mode 100644 arch/e2k/boot/topology.h create mode 100644 arch/e2k/boot/vga.c create mode 100644 arch/e2k/boot/vga.h create mode 100644 arch/e2k/boot/vmlinux.bin.lds create mode 100644 arch/e2k/boot/zip.c create mode 100644 arch/e2k/configs/build-config create mode 100644 arch/e2k/configs/defconfig create mode 100644 arch/e2k/configs/defconfig-guest-lms-pv create mode 100644 arch/e2k/configs/e12c-lms-defconfig create mode 100644 arch/e2k/configs/guest_lms_defconfig create mode 100644 arch/e2k/configs/host_lms_defconfig create mode 100644 arch/e2k/configs/mcst_rt.config create mode 100644 arch/e2k/fast_syscalls/Makefile create mode 100644 arch/e2k/fast_syscalls/clkr.c create mode 100644 arch/e2k/fast_syscalls/compat.c create mode 100644 arch/e2k/fast_syscalls/fast_clock_gettime.c create mode 100644 arch/e2k/fast_syscalls/fast_getcontext.c create mode 100644 arch/e2k/fast_syscalls/fast_getcpu.c create mode 100644 arch/e2k/fast_syscalls/fast_set_return.c create mode 100644 arch/e2k/fast_syscalls/fast_siggetmask.c create mode 100644 arch/e2k/fast_syscalls/fast_syscalls.c create mode 100644 arch/e2k/fast_syscalls/protected.c create mode 100644 arch/e2k/fast_syscalls/sclkr.c create mode 120000 arch/e2k/include/asm-l create mode 100644 arch/e2k/include/asm/3p.h create mode 100644 arch/e2k/include/asm/Kbuild create mode 100644 arch/e2k/include/asm/a.out.h create mode 100644 arch/e2k/include/asm/aau_context.h create mode 100644 arch/e2k/include/asm/aau_regs.h create mode 100644 arch/e2k/include/asm/aau_regs_access.h create mode 100644 arch/e2k/include/asm/aau_regs_types.h create mode 100644 arch/e2k/include/asm/acenv.h create mode 100644 arch/e2k/include/asm/acpi.h create mode 100644 arch/e2k/include/asm/alternative-asm.h create mode 100644 arch/e2k/include/asm/alternative.h create mode 100644 arch/e2k/include/asm/apic.h create mode 100644 arch/e2k/include/asm/apic_regs.h create mode 100644 arch/e2k/include/asm/apicdef.h create mode 100644 arch/e2k/include/asm/atomic.h create mode 100644 arch/e2k/include/asm/atomic_api.h create mode 100644 arch/e2k/include/asm/auxvec.h create mode 100644 arch/e2k/include/asm/barrier.h create mode 100644 arch/e2k/include/asm/bios_map.h create mode 100644 arch/e2k/include/asm/bitops.h create mode 100644 arch/e2k/include/asm/bitrev.h create mode 100644 arch/e2k/include/asm/bitsperlong.h create mode 100644 arch/e2k/include/asm/boot_flags.h create mode 100644 arch/e2k/include/asm/boot_profiling.h create mode 100644 arch/e2k/include/asm/boot_recovery.h create mode 100644 arch/e2k/include/asm/bootinfo.h create mode 100644 arch/e2k/include/asm/bug.h create mode 100644 arch/e2k/include/asm/byteorder.h create mode 100644 arch/e2k/include/asm/cache.h create mode 100644 arch/e2k/include/asm/cacheflush.h create mode 100644 arch/e2k/include/asm/checksum.h create mode 100644 arch/e2k/include/asm/clkr.h create mode 100644 arch/e2k/include/asm/clock_info.h create mode 100644 arch/e2k/include/asm/cmos.h create mode 100644 arch/e2k/include/asm/cmpxchg.h create mode 100644 arch/e2k/include/asm/cnt_point.h create mode 100644 arch/e2k/include/asm/compat.h create mode 100644 arch/e2k/include/asm/compiler.h create mode 100644 arch/e2k/include/asm/console.h create mode 100644 arch/e2k/include/asm/convert_array.h create mode 100644 arch/e2k/include/asm/copy-hw-stacks.h create mode 100644 arch/e2k/include/asm/coredump.h create mode 100644 arch/e2k/include/asm/cpu.h create mode 100644 arch/e2k/include/asm/cpu_features.h create mode 100644 arch/e2k/include/asm/cpu_regs.h create mode 100644 arch/e2k/include/asm/cpu_regs_access.h create mode 100644 arch/e2k/include/asm/cpu_regs_types.h create mode 100644 arch/e2k/include/asm/current.h create mode 100644 arch/e2k/include/asm/debug_print.h create mode 100644 arch/e2k/include/asm/delay.h create mode 100644 arch/e2k/include/asm/device.h create mode 100644 arch/e2k/include/asm/dma-direct.h create mode 100644 arch/e2k/include/asm/dma-mapping.h create mode 100644 arch/e2k/include/asm/dma.h create mode 100644 arch/e2k/include/asm/e12c.h create mode 100644 arch/e2k/include/asm/e16c.h create mode 100644 arch/e2k/include/asm/e1cp.h create mode 100644 arch/e2k/include/asm/e2c3.h create mode 100644 arch/e2k/include/asm/e2k-iommu.h create mode 100644 arch/e2k/include/asm/e2k.h create mode 100644 arch/e2k/include/asm/e2k_api.h create mode 100644 arch/e2k/include/asm/e2k_debug.h create mode 100644 arch/e2k/include/asm/e2k_ptypes.h create mode 100644 arch/e2k/include/asm/e2k_sic.h create mode 100644 arch/e2k/include/asm/e2k_syswork.h create mode 100644 arch/e2k/include/asm/e2s.h create mode 100644 arch/e2k/include/asm/e8c.h create mode 100644 arch/e2k/include/asm/e8c2.h create mode 100644 arch/e2k/include/asm/el_posix.h create mode 100644 arch/e2k/include/asm/elf.h create mode 100644 arch/e2k/include/asm/epic.h create mode 100644 arch/e2k/include/asm/epic_regs.h create mode 100644 arch/e2k/include/asm/epicdef.h create mode 100644 arch/e2k/include/asm/errors_hndl.h create mode 100644 arch/e2k/include/asm/es2.h create mode 100644 arch/e2k/include/asm/exec.h create mode 100644 arch/e2k/include/asm/fast_syscalls.h create mode 100644 arch/e2k/include/asm/fb.h create mode 100644 arch/e2k/include/asm/fcntl.h create mode 100644 arch/e2k/include/asm/floppy.h create mode 100644 arch/e2k/include/asm/ftrace.h create mode 100644 arch/e2k/include/asm/futex.h create mode 100644 arch/e2k/include/asm/getsp_adj.h create mode 100644 arch/e2k/include/asm/glob_regs.h create mode 100644 arch/e2k/include/asm/gpio.h create mode 100644 arch/e2k/include/asm/gregs.h create mode 100644 arch/e2k/include/asm/hardirq.h create mode 100644 arch/e2k/include/asm/hb_regs.h create mode 100644 arch/e2k/include/asm/head.h create mode 100644 arch/e2k/include/asm/host_printk.h create mode 100644 arch/e2k/include/asm/hugetlb.h create mode 100644 arch/e2k/include/asm/hw_breakpoint.h create mode 100644 arch/e2k/include/asm/hw_irq.h create mode 100644 arch/e2k/include/asm/hw_stacks.h create mode 100644 arch/e2k/include/asm/io.h create mode 100644 arch/e2k/include/asm/io_apic.h create mode 100644 arch/e2k/include/asm/io_apic_regs.h create mode 100644 arch/e2k/include/asm/io_epic.h create mode 100644 arch/e2k/include/asm/io_epic_regs.h create mode 100644 arch/e2k/include/asm/ioctl.h create mode 100644 arch/e2k/include/asm/ioctls.h create mode 100644 arch/e2k/include/asm/iolinkmask.h create mode 100644 arch/e2k/include/asm/ipcbuf.h create mode 100644 arch/e2k/include/asm/irq.h create mode 100644 arch/e2k/include/asm/irq_vectors.h create mode 100644 arch/e2k/include/asm/irq_work.h create mode 100644 arch/e2k/include/asm/irqdomain.h create mode 100644 arch/e2k/include/asm/irqflags.h create mode 100644 arch/e2k/include/asm/kdebug.h create mode 100644 arch/e2k/include/asm/keyboard.h create mode 100644 arch/e2k/include/asm/kprobes.h create mode 100644 arch/e2k/include/asm/kvm/Kbuild create mode 100644 arch/e2k/include/asm/kvm/aau_regs_access.h create mode 100644 arch/e2k/include/asm/kvm/async_pf.h create mode 100644 arch/e2k/include/asm/kvm/boot.h create mode 100644 arch/e2k/include/asm/kvm/boot_spinlock.h create mode 100644 arch/e2k/include/asm/kvm/boot_spinlock_slow.h create mode 100644 arch/e2k/include/asm/kvm/copy-hw-stacks.h create mode 100644 arch/e2k/include/asm/kvm/cpu_hv_regs_access.h create mode 100644 arch/e2k/include/asm/kvm/cpu_hv_regs_types.h create mode 100644 arch/e2k/include/asm/kvm/cpu_regs_access.h create mode 100644 arch/e2k/include/asm/kvm/csd_lock.h create mode 100644 arch/e2k/include/asm/kvm/debug.h create mode 100644 arch/e2k/include/asm/kvm/gmmu_context.h create mode 100644 arch/e2k/include/asm/kvm/gpid.h create mode 100644 arch/e2k/include/asm/kvm/gregs.h create mode 100644 arch/e2k/include/asm/kvm/guest.h create mode 100644 arch/e2k/include/asm/kvm/guest/Kbuild create mode 100644 arch/e2k/include/asm/kvm/guest/aau_context.h create mode 100644 arch/e2k/include/asm/kvm/guest/area_alloc.h create mode 100644 arch/e2k/include/asm/kvm/guest/atomic_api.h create mode 100644 arch/e2k/include/asm/kvm/guest/boot.h create mode 100644 arch/e2k/include/asm/kvm/guest/boot_flags.h create mode 100644 arch/e2k/include/asm/kvm/guest/boot_mmu_context.h create mode 100644 arch/e2k/include/asm/kvm/guest/boot_spinlock.h create mode 100644 arch/e2k/include/asm/kvm/guest/bootinfo.h create mode 100644 arch/e2k/include/asm/kvm/guest/cacheflush.h create mode 100644 arch/e2k/include/asm/kvm/guest/clkr.h create mode 100644 arch/e2k/include/asm/kvm/guest/console.h create mode 100644 arch/e2k/include/asm/kvm/guest/copy-hw-stacks.h create mode 100644 arch/e2k/include/asm/kvm/guest/cpu.h create mode 100644 arch/e2k/include/asm/kvm/guest/debug.h create mode 100644 arch/e2k/include/asm/kvm/guest/e2k.h create mode 100644 arch/e2k/include/asm/kvm/guest/e2k_virt.h create mode 100644 arch/e2k/include/asm/kvm/guest/fast_syscalls.h create mode 100644 arch/e2k/include/asm/kvm/guest/gregs.h create mode 100644 arch/e2k/include/asm/kvm/guest/host_printk.h create mode 100644 arch/e2k/include/asm/kvm/guest/hvc_l.h create mode 100644 arch/e2k/include/asm/kvm/guest/hw_stacks.h create mode 100644 arch/e2k/include/asm/kvm/guest/io.h create mode 100644 arch/e2k/include/asm/kvm/guest/irq.h create mode 100644 arch/e2k/include/asm/kvm/guest/machdep.h create mode 100644 arch/e2k/include/asm/kvm/guest/mm_hooks.h create mode 100644 arch/e2k/include/asm/kvm/guest/mmu.h create mode 100644 arch/e2k/include/asm/kvm/guest/mmu_context.h create mode 100644 arch/e2k/include/asm/kvm/guest/pgatomic.h create mode 100644 arch/e2k/include/asm/kvm/guest/process.h create mode 100644 arch/e2k/include/asm/kvm/guest/processor.h create mode 100644 arch/e2k/include/asm/kvm/guest/ptrace.h create mode 100644 arch/e2k/include/asm/kvm/guest/pv_info.h create mode 100644 arch/e2k/include/asm/kvm/guest/regs_state.h create mode 100644 arch/e2k/include/asm/kvm/guest/secondary_space.h create mode 100644 arch/e2k/include/asm/kvm/guest/setup.h create mode 100644 arch/e2k/include/asm/kvm/guest/sge.h create mode 100644 arch/e2k/include/asm/kvm/guest/signal.h create mode 100644 arch/e2k/include/asm/kvm/guest/smp.h create mode 100644 arch/e2k/include/asm/kvm/guest/spinlock.h create mode 100644 arch/e2k/include/asm/kvm/guest/stacks.h create mode 100644 arch/e2k/include/asm/kvm/guest/string.h create mode 100644 arch/e2k/include/asm/kvm/guest/switch.h create mode 100644 arch/e2k/include/asm/kvm/guest/switch_to.h create mode 100644 arch/e2k/include/asm/kvm/guest/sync_pg_tables.h create mode 100644 arch/e2k/include/asm/kvm/guest/system.h create mode 100644 arch/e2k/include/asm/kvm/guest/time.h create mode 100644 arch/e2k/include/asm/kvm/guest/timex.h create mode 100644 arch/e2k/include/asm/kvm/guest/tlb_regs_types.h create mode 100644 arch/e2k/include/asm/kvm/guest/tlbflush.h create mode 100644 arch/e2k/include/asm/kvm/guest/trace-defs.h create mode 100644 arch/e2k/include/asm/kvm/guest/trace-hw-stacks.h create mode 100644 arch/e2k/include/asm/kvm/guest/trap_table.S.h create mode 100644 arch/e2k/include/asm/kvm/guest/trap_table.h create mode 100644 arch/e2k/include/asm/kvm/guest/traps.h create mode 100644 arch/e2k/include/asm/kvm/guest/v2p.h create mode 100644 arch/e2k/include/asm/kvm/guest/vga.h create mode 100644 arch/e2k/include/asm/kvm/head.h create mode 100644 arch/e2k/include/asm/kvm/host_printk.h create mode 100644 arch/e2k/include/asm/kvm/hvc-console.h create mode 100644 arch/e2k/include/asm/kvm/hypercall.h create mode 100644 arch/e2k/include/asm/kvm/hypervisor.h create mode 100644 arch/e2k/include/asm/kvm/irq.h create mode 100644 arch/e2k/include/asm/kvm/machdep.h create mode 100644 arch/e2k/include/asm/kvm/mm.h create mode 100644 arch/e2k/include/asm/kvm/mm_hooks.h create mode 100644 arch/e2k/include/asm/kvm/mmu.h create mode 100644 arch/e2k/include/asm/kvm/mmu_context.h create mode 100644 arch/e2k/include/asm/kvm/mmu_hv_regs_access.h create mode 100644 arch/e2k/include/asm/kvm/mmu_hv_regs_types.h create mode 100644 arch/e2k/include/asm/kvm/mmu_regs_access.h create mode 100644 arch/e2k/include/asm/kvm/nid.h create mode 100644 arch/e2k/include/asm/kvm/page.h create mode 100644 arch/e2k/include/asm/kvm/page_track.h create mode 100644 arch/e2k/include/asm/kvm/pgtable-tdp.h create mode 100644 arch/e2k/include/asm/kvm/pgtable-x86.h create mode 100644 arch/e2k/include/asm/kvm/pgtable.h create mode 100644 arch/e2k/include/asm/kvm/process.h create mode 100644 arch/e2k/include/asm/kvm/ptrace.h create mode 100644 arch/e2k/include/asm/kvm/pv-emul.h create mode 100644 arch/e2k/include/asm/kvm/regs_state.h create mode 100644 arch/e2k/include/asm/kvm/runstate.h create mode 100644 arch/e2k/include/asm/kvm/spinlock.h create mode 100644 arch/e2k/include/asm/kvm/spinlock_slow.h create mode 100644 arch/e2k/include/asm/kvm/stacks.h create mode 100644 arch/e2k/include/asm/kvm/string.h create mode 100644 arch/e2k/include/asm/kvm/switch.h create mode 100644 arch/e2k/include/asm/kvm/thread_info.h create mode 100644 arch/e2k/include/asm/kvm/threads.h create mode 100644 arch/e2k/include/asm/kvm/tlbflush.h create mode 100644 arch/e2k/include/asm/kvm/trace-defs.h create mode 100644 arch/e2k/include/asm/kvm/trace-hw-stacks.h create mode 100644 arch/e2k/include/asm/kvm/trace_kvm.h create mode 100644 arch/e2k/include/asm/kvm/trace_kvm_hv.h create mode 100644 arch/e2k/include/asm/kvm/trace_kvm_pv.h create mode 100644 arch/e2k/include/asm/kvm/trap_table.S.h create mode 100644 arch/e2k/include/asm/kvm/trap_table.h create mode 100644 arch/e2k/include/asm/kvm/ttable-help.h create mode 100644 arch/e2k/include/asm/kvm/uaccess.h create mode 100644 arch/e2k/include/asm/kvm/vcpu-regs-debug-inline.h create mode 100644 arch/e2k/include/asm/kvm/vcpu-regs-debug.h create mode 100644 arch/e2k/include/asm/kvm_host.h create mode 100644 arch/e2k/include/asm/l-iommu.h create mode 100644 arch/e2k/include/asm/l-mcmonitor.h create mode 100644 arch/e2k/include/asm/l_ide.h create mode 100644 arch/e2k/include/asm/l_pmc.h create mode 100644 arch/e2k/include/asm/l_spmc.h create mode 100644 arch/e2k/include/asm/l_timer.h create mode 100644 arch/e2k/include/asm/l_timer_regs.h create mode 100644 arch/e2k/include/asm/linkage.h create mode 100644 arch/e2k/include/asm/linux_logo.h create mode 100644 arch/e2k/include/asm/local.h create mode 100644 arch/e2k/include/asm/machdep.h create mode 100644 arch/e2k/include/asm/machdep_numa.h create mode 100644 arch/e2k/include/asm/mas.h create mode 100644 arch/e2k/include/asm/mc146818rtc.h create mode 100644 arch/e2k/include/asm/mlt.h create mode 100644 arch/e2k/include/asm/mm_hooks.h create mode 100644 arch/e2k/include/asm/mman.h create mode 100644 arch/e2k/include/asm/mmu-regs-types-v2.h create mode 100644 arch/e2k/include/asm/mmu-regs-types-v6.h create mode 100644 arch/e2k/include/asm/mmu.h create mode 100644 arch/e2k/include/asm/mmu_context.h create mode 100644 arch/e2k/include/asm/mmu_fault.h create mode 100644 arch/e2k/include/asm/mmu_regs.h create mode 100644 arch/e2k/include/asm/mmu_regs_access.h create mode 100644 arch/e2k/include/asm/mmu_regs_types.h create mode 100644 arch/e2k/include/asm/mmu_types.h create mode 100644 arch/e2k/include/asm/mmzone.h create mode 100644 arch/e2k/include/asm/module.h create mode 100644 arch/e2k/include/asm/monitors.h create mode 100644 arch/e2k/include/asm/mpspec.h create mode 100644 arch/e2k/include/asm/msgbuf.h create mode 100644 arch/e2k/include/asm/msidef.h create mode 100644 arch/e2k/include/asm/namei.h create mode 100644 arch/e2k/include/asm/native_aau_regs_access.h create mode 100644 arch/e2k/include/asm/native_cpu_regs_access.h create mode 100644 arch/e2k/include/asm/native_dcache_regs_access.h create mode 100644 arch/e2k/include/asm/native_mmu_regs_access.h create mode 100644 arch/e2k/include/asm/nbsr_v6_regs.h create mode 100644 arch/e2k/include/asm/nmi.h create mode 100644 arch/e2k/include/asm/numnodes.h create mode 100644 arch/e2k/include/asm/of_device.h create mode 100644 arch/e2k/include/asm/of_platform.h create mode 100644 arch/e2k/include/asm/openprom.h create mode 100644 arch/e2k/include/asm/oplib.h create mode 100644 arch/e2k/include/asm/ord_rwlock.h create mode 100644 arch/e2k/include/asm/ord_rwlock_types.h create mode 100644 arch/e2k/include/asm/override-lcc-warnings.h create mode 100644 arch/e2k/include/asm/p2v/boot_bitops.h create mode 100644 arch/e2k/include/asm/p2v/boot_cacheflush.h create mode 100644 arch/e2k/include/asm/p2v/boot_console.h create mode 100644 arch/e2k/include/asm/p2v/boot_head.h create mode 100644 arch/e2k/include/asm/p2v/boot_init.h create mode 100644 arch/e2k/include/asm/p2v/boot_map.h create mode 100644 arch/e2k/include/asm/p2v/boot_mmu_context.h create mode 100644 arch/e2k/include/asm/p2v/boot_param.h create mode 100644 arch/e2k/include/asm/p2v/boot_phys.h create mode 100644 arch/e2k/include/asm/p2v/boot_smp.h create mode 100644 arch/e2k/include/asm/p2v/boot_spinlock.h create mode 100644 arch/e2k/include/asm/p2v/boot_spinlock_types.h create mode 100644 arch/e2k/include/asm/p2v/boot_v2p.h create mode 100644 arch/e2k/include/asm/p2v/io.h create mode 100644 arch/e2k/include/asm/page.h create mode 100644 arch/e2k/include/asm/page_io.h create mode 100644 arch/e2k/include/asm/paravirt.h create mode 100644 arch/e2k/include/asm/paravirt/aau_context.h create mode 100644 arch/e2k/include/asm/paravirt/aau_regs_access.h create mode 100644 arch/e2k/include/asm/paravirt/apic.h create mode 100644 arch/e2k/include/asm/paravirt/area_alloc.h create mode 100644 arch/e2k/include/asm/paravirt/atomic_api.h create mode 100644 arch/e2k/include/asm/paravirt/boot.h create mode 100644 arch/e2k/include/asm/paravirt/boot_flags.h create mode 100644 arch/e2k/include/asm/paravirt/cacheflush.h create mode 100644 arch/e2k/include/asm/paravirt/clkr.h create mode 100644 arch/e2k/include/asm/paravirt/console.h create mode 100644 arch/e2k/include/asm/paravirt/cpu.h create mode 100644 arch/e2k/include/asm/paravirt/cpu_regs_access.h create mode 100644 arch/e2k/include/asm/paravirt/e2k.h create mode 100644 arch/e2k/include/asm/paravirt/epic.h create mode 100644 arch/e2k/include/asm/paravirt/fast_syscalls.h create mode 100644 arch/e2k/include/asm/paravirt/host_printk.h create mode 100644 arch/e2k/include/asm/paravirt/hw_stacks.h create mode 100644 arch/e2k/include/asm/paravirt/io.h create mode 100644 arch/e2k/include/asm/paravirt/mm_hooks.h create mode 100644 arch/e2k/include/asm/paravirt/mman.h create mode 100644 arch/e2k/include/asm/paravirt/mmu.h create mode 100644 arch/e2k/include/asm/paravirt/mmu_context.h create mode 100644 arch/e2k/include/asm/paravirt/mmu_regs_access.h create mode 100644 arch/e2k/include/asm/paravirt/pgatomic.h create mode 100644 arch/e2k/include/asm/paravirt/pgtable.h create mode 100644 arch/e2k/include/asm/paravirt/process.h create mode 100644 arch/e2k/include/asm/paravirt/processor.h create mode 100644 arch/e2k/include/asm/paravirt/pv_info.h create mode 100644 arch/e2k/include/asm/paravirt/pv_ops.h create mode 100644 arch/e2k/include/asm/paravirt/regs_state.h create mode 100644 arch/e2k/include/asm/paravirt/secondary_space.h create mode 100644 arch/e2k/include/asm/paravirt/setup.h create mode 100644 arch/e2k/include/asm/paravirt/sge.h create mode 100644 arch/e2k/include/asm/paravirt/smp.h create mode 100644 arch/e2k/include/asm/paravirt/spinlock.h create mode 100644 arch/e2k/include/asm/paravirt/string.h create mode 100644 arch/e2k/include/asm/paravirt/switch.h create mode 100644 arch/e2k/include/asm/paravirt/switch_to.h create mode 100644 arch/e2k/include/asm/paravirt/system.h create mode 100644 arch/e2k/include/asm/paravirt/time.h create mode 100644 arch/e2k/include/asm/paravirt/timex.h create mode 100644 arch/e2k/include/asm/paravirt/tlbflush.h create mode 100644 arch/e2k/include/asm/paravirt/trap_table.h create mode 100644 arch/e2k/include/asm/paravirt/traps.h create mode 100644 arch/e2k/include/asm/paravirt/v2p.h create mode 100644 arch/e2k/include/asm/paravirt/vga.h create mode 100644 arch/e2k/include/asm/parport.h create mode 100644 arch/e2k/include/asm/pci.h create mode 100644 arch/e2k/include/asm/percpu.h create mode 100644 arch/e2k/include/asm/perf_event.h create mode 100644 arch/e2k/include/asm/perf_event_types.h create mode 100644 arch/e2k/include/asm/perf_event_uncore.h create mode 100644 arch/e2k/include/asm/pgalloc.h create mode 100644 arch/e2k/include/asm/pgatomic.h create mode 100644 arch/e2k/include/asm/pgd.h create mode 100644 arch/e2k/include/asm/pgtable-v2.h create mode 100644 arch/e2k/include/asm/pgtable-v6.h create mode 100644 arch/e2k/include/asm/pgtable.h create mode 100644 arch/e2k/include/asm/pgtable_def.h create mode 100644 arch/e2k/include/asm/pgtable_types.h create mode 100644 arch/e2k/include/asm/pic.h create mode 100644 arch/e2k/include/asm/poll.h create mode 100644 arch/e2k/include/asm/posix_types.h create mode 100644 arch/e2k/include/asm/preempt.h create mode 100644 arch/e2k/include/asm/process.h create mode 100644 arch/e2k/include/asm/processor.h create mode 100644 arch/e2k/include/asm/prom.h create mode 100644 arch/e2k/include/asm/prot_loader.h create mode 100644 arch/e2k/include/asm/protected_syscalls.h create mode 100644 arch/e2k/include/asm/ptrace-abi.h create mode 100644 arch/e2k/include/asm/ptrace.h create mode 100644 arch/e2k/include/asm/pv_info.h create mode 100644 arch/e2k/include/asm/qspinlock.h create mode 100644 arch/e2k/include/asm/qspinlock_paravirt.h create mode 100644 arch/e2k/include/asm/regs_state.h create mode 100644 arch/e2k/include/asm/rlimits.h create mode 100644 arch/e2k/include/asm/rtc.h create mode 100644 arch/e2k/include/asm/rwsem.h create mode 100644 arch/e2k/include/asm/sbus.h create mode 100644 arch/e2k/include/asm/sclkr.h create mode 100644 arch/e2k/include/asm/seccomp.h create mode 100644 arch/e2k/include/asm/secondary_space.h create mode 100644 arch/e2k/include/asm/sections.h create mode 100644 arch/e2k/include/asm/sembuf.h create mode 100644 arch/e2k/include/asm/serial.h create mode 100644 arch/e2k/include/asm/set_memory.h create mode 100644 arch/e2k/include/asm/setup.h create mode 100644 arch/e2k/include/asm/shmbuf.h create mode 100644 arch/e2k/include/asm/shmparam.h create mode 100644 arch/e2k/include/asm/sic_regs.h create mode 100644 arch/e2k/include/asm/sic_regs_access.h create mode 100644 arch/e2k/include/asm/sigcontext.h create mode 100644 arch/e2k/include/asm/signal.h create mode 100644 arch/e2k/include/asm/simul.h create mode 100644 arch/e2k/include/asm/smp-boot.h create mode 100644 arch/e2k/include/asm/smp.h create mode 100644 arch/e2k/include/asm/socket.h create mode 100644 arch/e2k/include/asm/sockios.h create mode 100644 arch/e2k/include/asm/sparsemem.h create mode 100644 arch/e2k/include/asm/spinlock.h create mode 100644 arch/e2k/include/asm/spinlock_types.h create mode 100644 arch/e2k/include/asm/stacks.h create mode 100644 arch/e2k/include/asm/stacktrace.h create mode 100644 arch/e2k/include/asm/stat.h create mode 100644 arch/e2k/include/asm/statfs.h create mode 100644 arch/e2k/include/asm/string.h create mode 100644 arch/e2k/include/asm/swap_info.h create mode 100644 arch/e2k/include/asm/switch_to.h create mode 100644 arch/e2k/include/asm/sync_pg_tables.h create mode 100644 arch/e2k/include/asm/syscall.h create mode 100644 arch/e2k/include/asm/syscalls.h create mode 100644 arch/e2k/include/asm/system.h create mode 100644 arch/e2k/include/asm/tag_mem.h create mode 100644 arch/e2k/include/asm/tags.h create mode 100644 arch/e2k/include/asm/termbits.h create mode 100644 arch/e2k/include/asm/termios.h create mode 100644 arch/e2k/include/asm/thread_info.h create mode 100644 arch/e2k/include/asm/time.h create mode 100644 arch/e2k/include/asm/timer.h create mode 100644 arch/e2k/include/asm/timex.h create mode 100644 arch/e2k/include/asm/tlb.h create mode 100644 arch/e2k/include/asm/tlb_regs_access.h create mode 100644 arch/e2k/include/asm/tlb_regs_types.h create mode 100644 arch/e2k/include/asm/tlbflush.h create mode 100644 arch/e2k/include/asm/topology.h create mode 100644 arch/e2k/include/asm/trace-clock.h create mode 100644 arch/e2k/include/asm/trace-defs.h create mode 100644 arch/e2k/include/asm/trace-mmu-dtlb-v2.h create mode 100644 arch/e2k/include/asm/trace-mmu-dtlb-v6.h create mode 100644 arch/e2k/include/asm/trace.h create mode 100644 arch/e2k/include/asm/trace_clock.h create mode 100644 arch/e2k/include/asm/trace_pgtable-v2.h create mode 100644 arch/e2k/include/asm/trace_pgtable-v6.h create mode 100644 arch/e2k/include/asm/trap_def.h create mode 100644 arch/e2k/include/asm/trap_table.S.h create mode 100644 arch/e2k/include/asm/trap_table.h create mode 100644 arch/e2k/include/asm/traps.h create mode 100644 arch/e2k/include/asm/types.h create mode 100644 arch/e2k/include/asm/uaccess.h create mode 100644 arch/e2k/include/asm/ucontext.h create mode 100644 arch/e2k/include/asm/umalloc.h create mode 100644 arch/e2k/include/asm/unaligned.h create mode 100644 arch/e2k/include/asm/unistd.h create mode 100644 arch/e2k/include/asm/user.h create mode 100644 arch/e2k/include/asm/vga.h create mode 100644 arch/e2k/include/asm/vmlinux.lds.h create mode 100644 arch/e2k/include/asm/word-at-a-time.h create mode 100644 arch/e2k/include/uapi/asm/Kbuild create mode 100644 arch/e2k/include/uapi/asm/a.out.h create mode 100644 arch/e2k/include/uapi/asm/auxvec.h create mode 100644 arch/e2k/include/uapi/asm/bitsperlong.h create mode 100644 arch/e2k/include/uapi/asm/bootinfo.h create mode 100644 arch/e2k/include/uapi/asm/byteorder.h create mode 100644 arch/e2k/include/uapi/asm/e2k_api.h create mode 100644 arch/e2k/include/uapi/asm/e2k_syswork.h create mode 100644 arch/e2k/include/uapi/asm/errno.h create mode 100644 arch/e2k/include/uapi/asm/fcntl.h create mode 100644 arch/e2k/include/uapi/asm/ioctl.h create mode 100644 arch/e2k/include/uapi/asm/ioctls.h create mode 100644 arch/e2k/include/uapi/asm/ipcbuf.h create mode 100644 arch/e2k/include/uapi/asm/iset_ver.h create mode 100644 arch/e2k/include/uapi/asm/kexec.h create mode 100644 arch/e2k/include/uapi/asm/kvm.h create mode 100644 arch/e2k/include/uapi/asm/kvm_para.h create mode 100644 arch/e2k/include/uapi/asm/mas.h create mode 100644 arch/e2k/include/uapi/asm/mman.h create mode 100644 arch/e2k/include/uapi/asm/msgbuf.h create mode 100644 arch/e2k/include/uapi/asm/poll.h create mode 100644 arch/e2k/include/uapi/asm/posix_types.h create mode 100644 arch/e2k/include/uapi/asm/protected_mode.h create mode 100644 arch/e2k/include/uapi/asm/ptrace-abi.h create mode 100644 arch/e2k/include/uapi/asm/ptrace.h create mode 100644 arch/e2k/include/uapi/asm/resource.h create mode 100644 arch/e2k/include/uapi/asm/sembuf.h create mode 100644 arch/e2k/include/uapi/asm/setup.h create mode 100644 arch/e2k/include/uapi/asm/shmbuf.h create mode 100644 arch/e2k/include/uapi/asm/sigcontext.h create mode 100644 arch/e2k/include/uapi/asm/siginfo.h create mode 100644 arch/e2k/include/uapi/asm/signal.h create mode 100644 arch/e2k/include/uapi/asm/socket.h create mode 100644 arch/e2k/include/uapi/asm/sockios.h create mode 100644 arch/e2k/include/uapi/asm/stat.h create mode 100644 arch/e2k/include/uapi/asm/statfs.h create mode 100644 arch/e2k/include/uapi/asm/termbits.h create mode 100644 arch/e2k/include/uapi/asm/termios.h create mode 100644 arch/e2k/include/uapi/asm/types.h create mode 100644 arch/e2k/include/uapi/asm/ucontext.h create mode 100644 arch/e2k/include/uapi/asm/unistd.h create mode 100644 arch/e2k/kernel/.gitignore create mode 100644 arch/e2k/kernel/Makefile create mode 100644 arch/e2k/kernel/alternative.c create mode 100644 arch/e2k/kernel/asm-offsets.c create mode 100644 arch/e2k/kernel/backtrace.c create mode 100644 arch/e2k/kernel/clear_rf.S create mode 100644 arch/e2k/kernel/convert_array.c create mode 100644 arch/e2k/kernel/copy-hw-stacks.c create mode 100644 arch/e2k/kernel/cpu/Makefile create mode 100644 arch/e2k/kernel/cpu/cacheinfo.c create mode 100644 arch/e2k/kernel/cpu/e12c.c create mode 100644 arch/e2k/kernel/cpu/e16c.c create mode 100644 arch/e2k/kernel/cpu/e1cp.c create mode 100644 arch/e2k/kernel/cpu/e2c3.c create mode 100644 arch/e2k/kernel/cpu/e2s.c create mode 100644 arch/e2k/kernel/cpu/e8c.c create mode 100644 arch/e2k/kernel/cpu/e8c2.c create mode 100644 arch/e2k/kernel/cpu/es2.c create mode 100644 arch/e2k/kernel/cpu/iset_v2.c create mode 100644 arch/e2k/kernel/cpu/iset_v3.c create mode 100644 arch/e2k/kernel/cpu/iset_v5.c create mode 100644 arch/e2k/kernel/cpu/iset_v6.c create mode 100644 arch/e2k/kernel/cpu/recovery_string_v5.S create mode 100644 arch/e2k/kernel/devtree.c create mode 100644 arch/e2k/kernel/e2k-iommu.c create mode 100644 arch/e2k/kernel/e2k.c create mode 100644 arch/e2k/kernel/e2k_sic.c create mode 100644 arch/e2k/kernel/e2k_syswork.c create mode 100644 arch/e2k/kernel/elfcore.c create mode 100644 arch/e2k/kernel/entry_user.S create mode 100644 arch/e2k/kernel/fill_handler_entry.S create mode 100644 arch/e2k/kernel/ftrace.c create mode 100644 arch/e2k/kernel/ftrace_graph_entry.S create mode 100644 arch/e2k/kernel/getsp.c create mode 100644 arch/e2k/kernel/hotplug-cpu.c create mode 100644 arch/e2k/kernel/hw_breakpoint.c create mode 100644 arch/e2k/kernel/io.c create mode 100644 arch/e2k/kernel/ioctl32.c create mode 100644 arch/e2k/kernel/kexec.c create mode 100644 arch/e2k/kernel/kprobes.c create mode 100644 arch/e2k/kernel/ksyms.c create mode 100644 arch/e2k/kernel/libeprof/Makefile create mode 100644 arch/e2k/kernel/libeprof/libkeprof_24.c create mode 100644 arch/e2k/kernel/libeprof/libkeprof_cur.c create mode 100644 arch/e2k/kernel/mkclearwindow.c create mode 100644 arch/e2k/kernel/module.c create mode 100644 arch/e2k/kernel/monitors.c create mode 100644 arch/e2k/kernel/nmi.c create mode 100644 arch/e2k/kernel/page_tables.S create mode 100644 arch/e2k/kernel/perf_event/Makefile create mode 100644 arch/e2k/kernel/perf_event/dimtp_trace.c create mode 100644 arch/e2k/kernel/perf_event/perf_event.c create mode 100644 arch/e2k/kernel/perf_event/uncore.c create mode 100644 arch/e2k/kernel/perf_event/uncore_hc.c create mode 100644 arch/e2k/kernel/perf_event/uncore_hmu.c create mode 100644 arch/e2k/kernel/perf_event/uncore_iommu.c create mode 100644 arch/e2k/kernel/perf_event/uncore_mc.c create mode 100644 arch/e2k/kernel/perf_event/uncore_prepic.c create mode 100644 arch/e2k/kernel/perf_event/uncore_sic.c create mode 100644 arch/e2k/kernel/proc_context.c create mode 100644 arch/e2k/kernel/proc_sclkr.c create mode 100644 arch/e2k/kernel/process.c create mode 100644 arch/e2k/kernel/protected_mq_notify.c create mode 100644 arch/e2k/kernel/protected_syscalls.c create mode 100644 arch/e2k/kernel/protected_timer_create.c create mode 100644 arch/e2k/kernel/ptrace.c create mode 100644 arch/e2k/kernel/recovery.c create mode 100644 arch/e2k/kernel/rtc.c create mode 100644 arch/e2k/kernel/sclkr.c create mode 100644 arch/e2k/kernel/sec_space.c create mode 100644 arch/e2k/kernel/setup.c create mode 100644 arch/e2k/kernel/signal.c create mode 100644 arch/e2k/kernel/smp.c create mode 100644 arch/e2k/kernel/smpboot.c create mode 100644 arch/e2k/kernel/stacktrace.c create mode 100644 arch/e2k/kernel/sys_32.c create mode 100644 arch/e2k/kernel/sys_e2k.c create mode 100644 arch/e2k/kernel/systable.c create mode 100644 arch/e2k/kernel/time.c create mode 100644 arch/e2k/kernel/topology.c create mode 100644 arch/e2k/kernel/trace.c create mode 100644 arch/e2k/kernel/trace_clock.c create mode 100644 arch/e2k/kernel/trace_stack.c create mode 100644 arch/e2k/kernel/trap_table.S create mode 100644 arch/e2k/kernel/traps.c create mode 100644 arch/e2k/kernel/ttable-help.h create mode 100644 arch/e2k/kernel/ttable-inline.h create mode 100644 arch/e2k/kernel/ttable.c create mode 100644 arch/e2k/kernel/ttable_tmp.c create mode 100644 arch/e2k/kernel/vmlinux.lds.S create mode 100644 arch/e2k/kvm/.gitignore create mode 100644 arch/e2k/kvm/Kconfig create mode 100644 arch/e2k/kvm/Makefile create mode 100644 arch/e2k/kvm/boot_spinlock.c create mode 100644 arch/e2k/kvm/cepic.c create mode 100644 arch/e2k/kvm/cepic.h create mode 100644 arch/e2k/kvm/complete.c create mode 100644 arch/e2k/kvm/complete.h create mode 100644 arch/e2k/kvm/cpu.c create mode 100644 arch/e2k/kvm/cpu.h create mode 100644 arch/e2k/kvm/cpu/Makefile create mode 100644 arch/e2k/kvm/cpu/iset_v2.c create mode 100644 arch/e2k/kvm/cpu/iset_v5.c create mode 100644 arch/e2k/kvm/cpu/iset_v6.c create mode 100644 arch/e2k/kvm/cpu_defs.h create mode 100644 arch/e2k/kvm/csd_lock.c create mode 100644 arch/e2k/kvm/debug.c create mode 100644 arch/e2k/kvm/gaccess.c create mode 100644 arch/e2k/kvm/gaccess.h create mode 100644 arch/e2k/kvm/gpid.c create mode 100644 arch/e2k/kvm/gpid.h create mode 100644 arch/e2k/kvm/gregs.h create mode 100644 arch/e2k/kvm/guest/Makefile create mode 100644 arch/e2k/kvm/guest/async_pf.c create mode 100644 arch/e2k/kvm/guest/boot.c create mode 100644 arch/e2k/kvm/guest/boot.h create mode 100644 arch/e2k/kvm/guest/boot_e2k_virt.c create mode 100644 arch/e2k/kvm/guest/boot_io.c create mode 100644 arch/e2k/kvm/guest/boot_spinlock.c create mode 100644 arch/e2k/kvm/guest/boot_string.c create mode 100644 arch/e2k/kvm/guest/boot_vram.c create mode 100644 arch/e2k/kvm/guest/cepic.c create mode 100644 arch/e2k/kvm/guest/cpu.h create mode 100644 arch/e2k/kvm/guest/e2k_virt.c create mode 100644 arch/e2k/kvm/guest/fast_syscalls.c create mode 100644 arch/e2k/kvm/guest/fast_syscalls.h create mode 100644 arch/e2k/kvm/guest/host_dump_stack.c create mode 100644 arch/e2k/kvm/guest/host_printk.c create mode 100644 arch/e2k/kvm/guest/host_time.c create mode 100644 arch/e2k/kvm/guest/io.c create mode 100644 arch/e2k/kvm/guest/io.h create mode 100644 arch/e2k/kvm/guest/irq.c create mode 100644 arch/e2k/kvm/guest/irq.h create mode 100644 arch/e2k/kvm/guest/lapic.c create mode 100644 arch/e2k/kvm/guest/mmu.c create mode 100644 arch/e2k/kvm/guest/paravirt.c create mode 100644 arch/e2k/kvm/guest/paravirt.h create mode 100644 arch/e2k/kvm/guest/pic.h create mode 100644 arch/e2k/kvm/guest/process.c create mode 100644 arch/e2k/kvm/guest/process.h create mode 100644 arch/e2k/kvm/guest/signal.c create mode 100644 arch/e2k/kvm/guest/smp.c create mode 100644 arch/e2k/kvm/guest/spinlock.c create mode 100644 arch/e2k/kvm/guest/string.c create mode 100644 arch/e2k/kvm/guest/time.c create mode 100644 arch/e2k/kvm/guest/time.h create mode 100644 arch/e2k/kvm/guest/tlbflush.c create mode 100644 arch/e2k/kvm/guest/traps.c create mode 100644 arch/e2k/kvm/guest/traps.h create mode 100644 arch/e2k/kvm/guest/ttable.c create mode 100644 arch/e2k/kvm/hv_cpu.c create mode 100644 arch/e2k/kvm/hv_mmu.c create mode 100644 arch/e2k/kvm/hv_mmu.h create mode 100644 arch/e2k/kvm/hypercalls.c create mode 100644 arch/e2k/kvm/intercepts.c create mode 100644 arch/e2k/kvm/intercepts.h create mode 100644 arch/e2k/kvm/io.c create mode 100644 arch/e2k/kvm/io.h create mode 100644 arch/e2k/kvm/ioapic.c create mode 100644 arch/e2k/kvm/ioapic.h create mode 100644 arch/e2k/kvm/ioepic.c create mode 100644 arch/e2k/kvm/ioepic.h create mode 100644 arch/e2k/kvm/irq.h create mode 100644 arch/e2k/kvm/irq_comm.c create mode 100644 arch/e2k/kvm/kvm-e2k.c create mode 100644 arch/e2k/kvm/kvm_timer.h create mode 100644 arch/e2k/kvm/lapic.c create mode 100644 arch/e2k/kvm/lapic.h create mode 100644 arch/e2k/kvm/lt.c create mode 100644 arch/e2k/kvm/lt.h create mode 100644 arch/e2k/kvm/lt_regs.h create mode 100644 arch/e2k/kvm/mm.c create mode 100644 arch/e2k/kvm/mman.h create mode 100644 arch/e2k/kvm/mmu-e2k.c create mode 100644 arch/e2k/kvm/mmu-e2k.h create mode 100644 arch/e2k/kvm/mmu-pv-spt.c create mode 100644 arch/e2k/kvm/mmu-pv.c create mode 100644 arch/e2k/kvm/mmu-x86.h create mode 100644 arch/e2k/kvm/mmu.h create mode 100644 arch/e2k/kvm/mmu_defs.h create mode 100644 arch/e2k/kvm/mmu_flush.c create mode 100644 arch/e2k/kvm/mmutrace-e2k.h create mode 100644 arch/e2k/kvm/nid.c create mode 100644 arch/e2k/kvm/page_track.c create mode 100644 arch/e2k/kvm/paging_tmpl.h create mode 100644 arch/e2k/kvm/paravirt.c create mode 100644 arch/e2k/kvm/pgtable-gp.h create mode 100644 arch/e2k/kvm/pic.h create mode 100644 arch/e2k/kvm/process.c create mode 100644 arch/e2k/kvm/process.h create mode 100644 arch/e2k/kvm/pt-structs.c create mode 100644 arch/e2k/kvm/pv_mmu.h create mode 100644 arch/e2k/kvm/runstate.c create mode 100644 arch/e2k/kvm/sic-nbsr.c create mode 100644 arch/e2k/kvm/sic-nbsr.h create mode 100644 arch/e2k/kvm/spinlock.c create mode 100644 arch/e2k/kvm/spmc.c create mode 100644 arch/e2k/kvm/spmc.h create mode 100644 arch/e2k/kvm/spmc_regs.h create mode 100644 arch/e2k/kvm/string.h create mode 100644 arch/e2k/kvm/switch.c create mode 100644 arch/e2k/kvm/time.h create mode 100644 arch/e2k/kvm/timer.c create mode 100644 arch/e2k/kvm/trace_pgtable-gp.h create mode 100644 arch/e2k/kvm/trap_table.S create mode 100644 arch/e2k/kvm/ttable-inline.h create mode 100644 arch/e2k/kvm/ttable.c create mode 100644 arch/e2k/kvm/user_area.c create mode 100644 arch/e2k/kvm/user_area.h create mode 100644 arch/e2k/kvm/virq.c create mode 100644 arch/e2k/kvm/vmid.c create mode 100644 arch/e2k/kvm/vmid.h create mode 100644 arch/e2k/lib/Makefile create mode 100644 arch/e2k/lib/builtin.c create mode 100644 arch/e2k/lib/checksum.c create mode 100644 arch/e2k/lib/delay.c create mode 100644 arch/e2k/lib/recovery_string.S create mode 100644 arch/e2k/lib/string.c create mode 100644 arch/e2k/lib/usercopy.c create mode 100644 arch/e2k/mm/Makefile create mode 100644 arch/e2k/mm/fault.c create mode 100644 arch/e2k/mm/hugetlbpage.c create mode 100644 arch/e2k/mm/init.c create mode 100644 arch/e2k/mm/ioremap.c create mode 100644 arch/e2k/mm/memory.c create mode 100644 arch/e2k/mm/mmap.c create mode 100644 arch/e2k/mm/mmu.c create mode 100644 arch/e2k/mm/node_vmap.c create mode 100644 arch/e2k/mm/page_io.c create mode 100644 arch/e2k/mm/pageattr.c create mode 100644 arch/e2k/mm/pgtable.c create mode 100644 arch/e2k/mm/tag_mem.c create mode 100644 arch/e2k/p2v/Makefile create mode 100644 arch/e2k/p2v/boot_e2k_sic.c create mode 100644 arch/e2k/p2v/boot_find_bit.c create mode 100644 arch/e2k/p2v/boot_head.c create mode 100644 arch/e2k/p2v/boot_init.c create mode 100644 arch/e2k/p2v/boot_map.c create mode 100644 arch/e2k/p2v/boot_param.c create mode 100644 arch/e2k/p2v/boot_phys.c create mode 100644 arch/e2k/p2v/boot_printk/Makefile create mode 100644 arch/e2k/p2v/boot_printk/am85c30.c create mode 100644 arch/e2k/p2v/boot_printk/boot_hvc_l.c create mode 100644 arch/e2k/p2v/boot_printk/console.c create mode 100644 arch/e2k/p2v/boot_profiling.c create mode 100644 arch/e2k/p2v/boot_recovery.c create mode 100644 arch/e2k/p2v/boot_smp.c create mode 100644 arch/e2k/p2v/boot_string.c create mode 100644 arch/e2k/p2v/boot_string.h create mode 100644 arch/e2k/p2v/cpu/Makefile create mode 100644 arch/e2k/p2v/cpu/boot_iset_v2.c create mode 100644 arch/e2k/p2v/cpu/boot_iset_v3.c create mode 100644 arch/e2k/p2v/cpu/boot_iset_v6.c create mode 100644 arch/e2k/p2v/cpu/e12c.c create mode 100644 arch/e2k/p2v/cpu/e16c.c create mode 100644 arch/e2k/p2v/cpu/e1cp.c create mode 100644 arch/e2k/p2v/cpu/e2c3.c create mode 100644 arch/e2k/p2v/cpu/e2s.c create mode 100644 arch/e2k/p2v/cpu/e8c.c create mode 100644 arch/e2k/p2v/cpu/e8c2.c create mode 100644 arch/e2k/p2v/cpu/es2.c create mode 100644 arch/e2k/p2v/machdep.c create mode 100644 arch/e2k/pci/Makefile create mode 100644 arch/e2k/pci/pci.c create mode 100644 arch/e2k/power/Makefile create mode 100644 arch/e2k/power/cpu.c create mode 100644 arch/e2k/power/hibernate.c create mode 100644 arch/l/Kconfig create mode 100644 arch/l/Kconfig.debug create mode 100644 arch/l/Makefile create mode 100644 arch/l/include/asm/acenv.h create mode 100644 arch/l/include/asm/acpi.h create mode 100644 arch/l/include/asm/apic.h create mode 100644 arch/l/include/asm/apicdef.h create mode 100644 arch/l/include/asm/boot_profiling.h create mode 100644 arch/l/include/asm/bootinfo.h create mode 100644 arch/l/include/asm/clk_rt.h create mode 100644 arch/l/include/asm/clkr.h create mode 100644 arch/l/include/asm/console.h create mode 100644 arch/l/include/asm/console_types.h create mode 100644 arch/l/include/asm/devtree.h create mode 100644 arch/l/include/asm/dma-direct.h create mode 100644 arch/l/include/asm/dma-mapping.h create mode 100644 arch/l/include/asm/epic.h create mode 100644 arch/l/include/asm/epic_regs.h create mode 100644 arch/l/include/asm/epicdef.h create mode 100644 arch/l/include/asm/gpio.h create mode 100644 arch/l/include/asm/hardirq.h create mode 100644 arch/l/include/asm/hw_irq.h create mode 100644 arch/l/include/asm/i2c-spi.h create mode 100644 arch/l/include/asm/idle.h create mode 100644 arch/l/include/asm/io_apic.h create mode 100644 arch/l/include/asm/io_epic.h create mode 100644 arch/l/include/asm/io_epic_regs.h create mode 100644 arch/l/include/asm/io_pic.h create mode 100644 arch/l/include/asm/iolinkmask.h create mode 100644 arch/l/include/asm/ipi.h create mode 100644 arch/l/include/asm/irq_numbers.h create mode 100644 arch/l/include/asm/irq_remapping.h create mode 100644 arch/l/include/asm/irq_vectors.h create mode 100644 arch/l/include/asm/irq_work.h create mode 100644 arch/l/include/asm/irqdomain.h create mode 100644 arch/l/include/asm/l-uncached.h create mode 100644 arch/l/include/asm/l_ide.h create mode 100644 arch/l/include/asm/l_pmc.h create mode 100644 arch/l/include/asm/l_spmc.h create mode 100644 arch/l/include/asm/l_timer.h create mode 100644 arch/l/include/asm/l_timer_regs.h create mode 100644 arch/l/include/asm/mpspec.h create mode 100644 arch/l/include/asm/msidef.h create mode 100644 arch/l/include/asm/nmi.h create mode 100644 arch/l/include/asm/of_device.h create mode 100644 arch/l/include/asm/pci.h create mode 100644 arch/l/include/asm/pci_l.h create mode 100644 arch/l/include/asm/pcie_fixup.h create mode 100644 arch/l/include/asm/percpu.h create mode 100644 arch/l/include/asm/pic.h create mode 100644 arch/l/include/asm/serial.h create mode 100644 arch/l/include/asm/setup.h create mode 100644 arch/l/include/asm/sic_regs.h create mode 100644 arch/l/include/asm/smp.h create mode 100644 arch/l/include/asm/swiotlb.h create mode 100644 arch/l/include/asm/tree_entry.h create mode 100644 arch/l/kernel/Makefile create mode 100644 arch/l/kernel/acpi/Makefile create mode 100644 arch/l/kernel/acpi/boot.c create mode 100644 arch/l/kernel/acpi/cstate.c create mode 100644 arch/l/kernel/acpi/sleep.c create mode 100644 arch/l/kernel/acpi_tainted/Makefile create mode 100644 arch/l/kernel/acpi_tainted/l_spmc.c create mode 100644 arch/l/kernel/am85c30.c create mode 100644 arch/l/kernel/apic/Makefile create mode 100644 arch/l/kernel/apic/apic.c create mode 100644 arch/l/kernel/apic/apic_flat_64.c create mode 100644 arch/l/kernel/apic/io_apic.c create mode 100644 arch/l/kernel/apic/ipi.c create mode 100644 arch/l/kernel/apic/irq.c create mode 100644 arch/l/kernel/apic/irq_work.c create mode 100644 arch/l/kernel/apic/msi.c create mode 100644 arch/l/kernel/apic/probe_64.c create mode 100644 arch/l/kernel/apic/smp.c create mode 100644 arch/l/kernel/boot_profiling.c create mode 100644 arch/l/kernel/clk_rt.c create mode 100644 arch/l/kernel/clkr.c create mode 100644 arch/l/kernel/console.c create mode 100644 arch/l/kernel/cpufreq.c create mode 100644 arch/l/kernel/devtree.c create mode 100644 arch/l/kernel/epic/Makefile create mode 100644 arch/l/kernel/epic/epic.c create mode 100644 arch/l/kernel/epic/io_epic.c create mode 100644 arch/l/kernel/epic/ipi.c create mode 100644 arch/l/kernel/epic/irq.c create mode 100644 arch/l/kernel/epic/irq_work.c create mode 100644 arch/l/kernel/epic/smp.c create mode 100644 arch/l/kernel/gpio.c create mode 100644 arch/l/kernel/i2c-spi/Makefile create mode 100644 arch/l/kernel/i2c-spi/core.c create mode 100644 arch/l/kernel/i2c-spi/i2c-devices.c create mode 100644 arch/l/kernel/i2c-spi/i2c.c create mode 100644 arch/l/kernel/i2c-spi/reset.c create mode 100644 arch/l/kernel/i2c-spi/spi-devices.c create mode 100644 arch/l/kernel/i2c-spi/spi.c create mode 100644 arch/l/kernel/l-i2c2.c create mode 100644 arch/l/kernel/l-iommu.c create mode 100644 arch/l/kernel/l-mcmonitor.c create mode 100644 arch/l/kernel/l-uncached.c create mode 100644 arch/l/kernel/lt.c create mode 100644 arch/l/kernel/mpparse.c create mode 100644 arch/l/kernel/panic2nvram.c create mode 100644 arch/l/kernel/pic_irq.c create mode 100644 arch/l/kernel/pmc/Makefile create mode 100644 arch/l/kernel/pmc/pmc.h create mode 100644 arch/l/kernel/pmc/pmc_drv.c create mode 100644 arch/l/kernel/pmc/pmc_e1cp.c create mode 100644 arch/l/kernel/pmc/pmc_hwmon.c create mode 100644 arch/l/kernel/pmc/pmc_s2.c create mode 100644 arch/l/kernel/procipcc2.c create mode 100644 arch/l/kernel/procmmpddiag.c create mode 100644 arch/l/kernel/procregs.c create mode 100644 arch/l/kernel/procshow.c create mode 100644 arch/l/kernel/setup.c create mode 100644 arch/l/pci/Kconfig create mode 100644 arch/l/pci/Makefile create mode 100644 arch/l/pci/acpi.c create mode 100644 arch/l/pci/common.c create mode 100644 arch/l/pci/direct.c create mode 100644 arch/l/pci/irq.c create mode 100644 arch/l/pci/l_pci.c create mode 100644 arch/l/pci/numa.c create mode 100644 arch/l/pci/pci.c create mode 100644 arch/l/pci/pci.h create mode 100644 arch/sparc/boot/dts/Makefile create mode 120000 arch/sparc/boot/dts/include/dt-bindings create mode 100644 arch/sparc/boot/dts/r2000_m1r-uvp.dts create mode 100644 arch/sparc/configs/build-config create mode 100644 arch/sparc/configs/mcst_rt.config create mode 120000 arch/sparc/include/asm-l create mode 100644 arch/sparc/include/asm/acpi.h create mode 100644 arch/sparc/include/asm/apic.h create mode 100644 arch/sparc/include/asm/apic_regs.h create mode 100644 arch/sparc/include/asm/apicdef.h create mode 100644 arch/sparc/include/asm/bootinfo.h create mode 100644 arch/sparc/include/asm/console.h create mode 100644 arch/sparc/include/asm/e90s.h create mode 100644 arch/sparc/include/asm/el_posix.h create mode 100644 arch/sparc/include/asm/el_posix_64.h create mode 100644 arch/sparc/include/asm/epic.h create mode 100644 arch/sparc/include/asm/epic_regs.h create mode 100644 arch/sparc/include/asm/epicdef.h create mode 100644 arch/sparc/include/asm/gpio.h create mode 100644 arch/sparc/include/asm/io_apic.h create mode 100644 arch/sparc/include/asm/io_apic_regs.h create mode 100644 arch/sparc/include/asm/io_epic.h create mode 100644 arch/sparc/include/asm/io_epic_regs.h create mode 100644 arch/sparc/include/asm/iolinkmask.h create mode 100644 arch/sparc/include/asm/irq_remapping.h create mode 100644 arch/sparc/include/asm/irq_vectors.h create mode 100644 arch/sparc/include/asm/irq_work.h create mode 100644 arch/sparc/include/asm/irqdomain.h create mode 100644 arch/sparc/include/asm/l-iommu.h create mode 100644 arch/sparc/include/asm/l-mcmonitor.h create mode 100644 arch/sparc/include/asm/l_ide.h create mode 100644 arch/sparc/include/asm/l_ide32.h create mode 100644 arch/sparc/include/asm/l_pmc.h create mode 100644 arch/sparc/include/asm/l_spmc.h create mode 100644 arch/sparc/include/asm/l_timer.h create mode 100644 arch/sparc/include/asm/l_timer_regs.h create mode 100644 arch/sparc/include/asm/machdep.h create mode 100644 arch/sparc/include/asm/mpspec.h create mode 100644 arch/sparc/include/asm/msidef.h create mode 100644 arch/sparc/include/asm/pci_e90s.h create mode 100644 arch/sparc/include/asm/serial.h create mode 100644 arch/sparc/include/asm/sic_regs.h create mode 100644 arch/sparc/include/asm/topology_e90s.h create mode 100644 arch/sparc/kernel/devtree.c create mode 100644 arch/sparc/kernel/e90s.c create mode 100644 arch/sparc/kernel/e90s_sic.c create mode 100644 arch/sparc/kernel/irq_e90s.c create mode 100644 arch/sparc/kernel/pci_e90s.c create mode 100644 arch/sparc/kernel/pcr_e90s.c create mode 100644 arch/sparc/kernel/perf_event_e90s.c create mode 100644 arch/sparc/kernel/procpfreg_e90s.c create mode 100644 arch/sparc/kernel/smp_e90s.c create mode 100644 arch/sparc/kernel/time_e90s.c create mode 100644 arch/sparc/kernel/trampoline_e90s.S create mode 100644 arch/sparc/mm/init_e90s.c create mode 100644 arch/x86/configs/mcst.config create mode 120000 arch/x86/include/asm-l create mode 100644 arch/x86/include/asm/gpio.h create mode 100644 arch/x86/kernel/mcst_eth_mac.c create mode 100644 config-elbrus-def create mode 100644 drivers/char/kdumper.c create mode 100644 drivers/cpufreq/cpufreq_pstates.c create mode 100644 drivers/cpufreq/e2k-pcs-cpufreq.c create mode 100644 drivers/cpuidle/Kconfig.e2k create mode 100644 drivers/cpuidle/Kconfig.e90s create mode 100644 drivers/cpuidle/cpuidle-e2k.c create mode 100644 drivers/cpuidle/cpuidle-e90s.c create mode 100644 drivers/edac/e2k_edac.c create mode 100644 drivers/gpu/drm/vivante/Makefile create mode 100644 drivers/gpu/drm/vivante/vivante_drv.c create mode 100644 drivers/gpu/drm/vivante/vivante_drv.h create mode 100644 drivers/hwmon/l_p1mon.c create mode 100644 drivers/hwmon/lm95231.c create mode 100644 drivers/hwmon/pmbus/max20730.c create mode 100644 drivers/ide/l_ide.c create mode 100644 drivers/input/misc/ltc2954.c create mode 100644 drivers/mcst/BigEv2/Makefile create mode 100644 drivers/mcst/BigEv2/pcie_driver/Makefile create mode 100644 drivers/mcst/BigEv2/pcie_driver/bige.c create mode 100644 drivers/mcst/BigEv2/pcie_driver/bige.h create mode 100644 drivers/mcst/BigEv2/pcie_driver/bige_ioctl.h create mode 100644 drivers/mcst/Kconfig create mode 100644 drivers/mcst/Makefile create mode 100644 drivers/mcst/apkpwr/Makefile create mode 100644 drivers/mcst/apkpwr/apkpwr.c create mode 100644 drivers/mcst/ddi/Makefile create mode 100644 drivers/mcst/ddi/ddi.c create mode 100644 drivers/mcst/ddi/ddi_arch.c create mode 100644 drivers/mcst/ddi/ddi_cv.c create mode 100644 drivers/mcst/dmp_assist/Makefile create mode 100644 drivers/mcst/dmp_assist/dmp_assist.c create mode 100644 drivers/mcst/dmp_assist/dmp_assist.h create mode 100644 drivers/mcst/eldsp/Makefile create mode 100644 drivers/mcst/eldsp/eldsp.c create mode 100644 drivers/mcst/eldsp/eldsp.h create mode 100644 drivers/mcst/emc/Makefile create mode 100644 drivers/mcst/emc/emc2305.c create mode 100644 drivers/mcst/gpu-imgtec/GPL-COPYING create mode 100644 drivers/mcst/gpu-imgtec/INSTALL create mode 100644 drivers/mcst/gpu-imgtec/Kconfig create mode 100644 drivers/mcst/gpu-imgtec/MIT-COPYING create mode 100644 drivers/mcst/gpu-imgtec/Makefile create mode 100644 drivers/mcst/gpu-imgtec/README create mode 100644 drivers/mcst/gpu-imgtec/build/linux/bits.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/buildvars.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/commands.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/3rdparty.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/chromiumos_kernel.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/lws.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/testchip.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/window_systems/lws-generic.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/window_systems/nulldrmws.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/window_systems/surfaceless.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/window_systems/wayland.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/common/window_systems/xorg.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compiler.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/aarch64-linux-gnu.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabi.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabihf.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/e2k-linux-gnu.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/i386-linux-gnu.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/mips64el-buildroot-linux-gnu.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/mipsel-buildroot-linux-gnu.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/riscv64-linux-gnu.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/compilers/x86_64-linux-gnu.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/core.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/core_volcanic.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/default_window_system.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/help.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/kernel-defs.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/kernel_version.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/preconfig.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/config/window_system.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/defs.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/e2c3_gpu/Makefile create mode 100644 drivers/mcst/gpu-imgtec/build/linux/fpga_linux/Makefile create mode 100644 drivers/mcst/gpu-imgtec/build/linux/kbuild/Makefile.template create mode 100644 drivers/mcst/gpu-imgtec/build/linux/kbuild/kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_i386.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_x86_64.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_aarch64.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armel.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armhf.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_e2k.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_i686.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips32r6el.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips64r6el.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_neutral.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_riscv64.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_x86_64.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs_common.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/moduledefs_libs.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/modules.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/modules/kernel_module.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/mt8173_linux/Makefile create mode 100644 drivers/mcst/gpu-imgtec/build/linux/nohw_linux/Makefile create mode 100644 drivers/mcst/gpu-imgtec/build/linux/packaging.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/plato/Makefile create mode 100644 drivers/mcst/gpu-imgtec/build/linux/prepare_tree.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/pvrversion.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/scripts.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/scripts/install.sh.tpl create mode 100644 drivers/mcst/gpu-imgtec/build/linux/tc_linux/Makefile create mode 100644 drivers/mcst/gpu-imgtec/build/linux/this_makefile.mk create mode 100644 drivers/mcst/gpu-imgtec/build/linux/tools/cc-check.sh create mode 100644 drivers/mcst/gpu-imgtec/build/linux/toplevel.mk create mode 100644 drivers/mcst/gpu-imgtec/config_kernel.h create mode 100644 drivers/mcst/gpu-imgtec/config_kernel.mk create mode 100644 drivers/mcst/gpu-imgtec/copy-to-kernel-tc/apollo.mk create mode 100644 drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.h create mode 100644 drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.mk create mode 100644 drivers/mcst/gpu-imgtec/copy-to-kernel-tc/copy_items.sh create mode 100644 drivers/mcst/gpu-imgtec/copy-to-kernel-tc/drm_pdp.mk create mode 100644 drivers/mcst/gpu-imgtec/copy-to-kernel-tc/pvrsrvkm.mk create mode 100644 drivers/mcst/gpu-imgtec/copy-to-kernel.sh create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/common_cache_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/server_cache_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/common_cmm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/server_cmm_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/common_mm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/server_mm_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/common_pdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/server_pdump_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/common_pdumpmm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/common_rgxpdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/common_rgxsignals_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/server_rgxsignals_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/common_ri_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/server_ri_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/common_srvcore_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/server_srvcore_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/common_sync_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/server_sync_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/common_synctracking_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/server_synctracking_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/common_cache_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/server_cache_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/common_cmm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/server_cmm_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/common_devicememhistory_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/common_mm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/server_mm_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/common_pdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/server_pdump_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/common_pdumpmm_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/common_pvrtl_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/common_rgxkicksync_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/common_rgxpdump_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/common_rgxregconfig_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/common_rgxsignals_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/server_rgxsignals_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/common_ri_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/server_ri_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/common_sync_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/server_sync_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_direct_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/common_synctracking_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.2.30.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.22.25.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.54.208.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.39.4.19.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.75.2.30.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_15.5.1.64.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.102.54.38.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.104.208.318.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.30.54.25.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.40.54.30.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.44.22.25.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.46.54.330.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.49.21.16.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.67.54.30.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.68.54.30.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.86.104.218.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.87.104.18.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.50.208.504.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.56.208.505.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.66.54.204.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.67.104.504.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.13.54.208.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.14.108.208.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.19.52.202.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.29.2.51.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.31.4.55.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.40.2.51.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.43.6.62.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.45.2.58.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.46.6.62.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.11.1.46.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.9.1.46.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_6.34.4.35.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_defs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_table_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_cr_defs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxdefs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmhdefs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmmudefs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/rogue/km/tmp_rgx_cr_defs_riscv_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.1632.1.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_27.5.254.2.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.4.1632.1.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.5.1632.1.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_defs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_table_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_cr_defs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxdefs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxmmudefs_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/tmp_rgx_cr_defs_riscv_km.h create mode 100644 drivers/mcst/gpu-imgtec/hwdefs/volcanic/rgxpmdefs.h create mode 100644 drivers/mcst/gpu-imgtec/include/cache_ops.h create mode 100644 drivers/mcst/gpu-imgtec/include/devicemem_typedefs.h create mode 100644 drivers/mcst/gpu-imgtec/include/dllist.h create mode 100644 drivers/mcst/gpu-imgtec/include/drm/netlink.h create mode 100644 drivers/mcst/gpu-imgtec/include/drm/nulldisp_drm.h create mode 100644 drivers/mcst/gpu-imgtec/include/drm/pdp_drm.h create mode 100644 drivers/mcst/gpu-imgtec/include/drm/pvr_drm.h create mode 100644 drivers/mcst/gpu-imgtec/include/drm/pvr_drm_core.h create mode 100644 drivers/mcst/gpu-imgtec/include/img_3dtypes.h create mode 100644 drivers/mcst/gpu-imgtec/include/img_defs.h create mode 100644 drivers/mcst/gpu-imgtec/include/img_drm_fourcc_internal.h create mode 100644 drivers/mcst/gpu-imgtec/include/img_elf.h create mode 100644 drivers/mcst/gpu-imgtec/include/img_types.h create mode 100644 drivers/mcst/gpu-imgtec/include/kernel_types.h create mode 100644 drivers/mcst/gpu-imgtec/include/linux_sw_sync.h create mode 100644 drivers/mcst/gpu-imgtec/include/lock_types.h create mode 100644 drivers/mcst/gpu-imgtec/include/log2.h create mode 100644 drivers/mcst/gpu-imgtec/include/osfunc_common.h create mode 100644 drivers/mcst/gpu-imgtec/include/pdumpdefs.h create mode 100644 drivers/mcst/gpu-imgtec/include/pdumpdesc.h create mode 100644 drivers/mcst/gpu-imgtec/include/public/powervr/buffer_attribs.h create mode 100644 drivers/mcst/gpu-imgtec/include/public/powervr/img_drm_fourcc.h create mode 100644 drivers/mcst/gpu-imgtec/include/public/powervr/mem_types.h create mode 100644 drivers/mcst/gpu-imgtec/include/public/powervr/pvrsrv_sync_ext.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvr_buffer_sync_shared.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvr_debug.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvr_fd_sync_kernel.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvr_intrinsics.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrmodule.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_device_types.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_devvar.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_error.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_errors.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_memallocflags.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_sync_km.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_tlcommon.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrsrv_tlstreams.h create mode 100644 drivers/mcst/gpu-imgtec/include/pvrversion.h create mode 100644 drivers/mcst/gpu-imgtec/include/rgx_heap_firmware.h create mode 100644 drivers/mcst/gpu-imgtec/include/rgx_memallocflags.h create mode 100644 drivers/mcst/gpu-imgtec/include/rgx_meta.h create mode 100644 drivers/mcst/gpu-imgtec/include/rgx_mips.h create mode 100644 drivers/mcst/gpu-imgtec/include/rgx_riscv.h create mode 100644 drivers/mcst/gpu-imgtec/include/ri_typedefs.h create mode 100644 drivers/mcst/gpu-imgtec/include/rogue/rgx_common.h create mode 100644 drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_alignchecks.h create mode 100644 drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_shared.h create mode 100644 drivers/mcst/gpu-imgtec/include/rogue/rgx_heaps.h create mode 100644 drivers/mcst/gpu-imgtec/include/rogue/rgx_hwperf.h create mode 100644 drivers/mcst/gpu-imgtec/include/rogue/rgx_options.h create mode 100644 drivers/mcst/gpu-imgtec/include/rogue/system/rgx_tc/apollo_clocks.h create mode 100644 drivers/mcst/gpu-imgtec/include/services_km.h create mode 100644 drivers/mcst/gpu-imgtec/include/servicesext.h create mode 100644 drivers/mcst/gpu-imgtec/include/sync_checkpoint_external.h create mode 100644 drivers/mcst/gpu-imgtec/include/sync_prim_internal.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/apollo_regs.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/bonnie_tcf.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_defs.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_pdp_regs.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_regs.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_defs.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_regs.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/pdp_regs.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_clk_ctrl.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_pll.h create mode 100644 drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_rgbpdp_regs.h create mode 100644 drivers/mcst/gpu-imgtec/include/volcanic/rgx_common.h create mode 100644 drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_alignchecks.h create mode 100644 drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_shared.h create mode 100644 drivers/mcst/gpu-imgtec/include/volcanic/rgx_heaps.h create mode 100644 drivers/mcst/gpu-imgtec/include/volcanic/rgx_hwperf.h create mode 100644 drivers/mcst/gpu-imgtec/include/volcanic/rgx_options.h create mode 100644 drivers/mcst/gpu-imgtec/include/volcanic/system/rgx_tc/apollo_clocks.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Linux.mk create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv_internal.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_compatibility.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_config_compatibility.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_nospec.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_core.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_i2c.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_regs.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_regs.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_aon_regs.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_ctrl_regs.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_publ_regs.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_init.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_top_regs.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_dma_resv.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drm.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drv.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence_trace.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_linux_fence.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_platform_drv.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync2.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync_file.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/services_kernel_client.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_fb.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_plane.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_common.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv_internal.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_ion.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.c create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.h create mode 100644 drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin_common_regs.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/htbuffer_sf.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/htbuffer_types.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/info_page_client.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/info_page_defs.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/km_apphint_defs_common.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/os_cpu_cache.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/pdump.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/physheap.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/pvr_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/pvr_ricommon.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgx_bridge.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgx_compat_bvnc.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgx_fw_info.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgx_fwif_sf.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgx_pdump_panics.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgx_tq_shared.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgxfw_log_helper.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rgxtransfer_shader.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rogue/km_apphint_defs.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_hwperf.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_km.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_resetframework.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rogue/rgxapi_km.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/rogue/rgxheapconfig.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal_fw.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/volcanic/km_apphint_defs.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_hwperf.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_km.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hw_errors.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hwperf_table.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/volcanic/rgxapi_km.h create mode 100644 drivers/mcst/gpu-imgtec/services/include/volcanic/rgxheapconfig.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/cache_km.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/connection_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/devicemem_heapcfg.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/devicemem_history_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/devicemem_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/di_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/handle.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/htb_debug.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/htb_debug.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/htbserver.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/info_page_km.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/lists.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/mmu_common.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pdump_mmu.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pdump_physmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pdump_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/physheap.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/physmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/physmem_fwdedicatedmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/physmem_hostmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/physmem_lma.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/physmem_tdfwmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pmr.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/power.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/process_stats.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pvr_notifier.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pvrsrv.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_bridge_init.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_pool.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/ri_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/srvcore.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/sync_checkpoint.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/sync_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/tlintern.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/tlserver.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/tlstream.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_client.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_pvz.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_vm.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxbreakpoint.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxbvnc.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxfwtrace_strings.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxkicksync.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxmulticore.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxregconfig.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxsignals.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxstartstop.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxsyncutils.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxutils.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbreakpoint.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbvnc.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdevice.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxkicksync.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmulticore.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxregconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsignals.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsrvinit.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxstartstop.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsyncutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxbvnc.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdevice.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxkicksync.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmulticore.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxregconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsignals.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsrvinit.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxstartstop.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsyncutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxutils.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/Linux.mk create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/allocmem.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/env_connection.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/event.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/event.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/fwload.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/handle_idr.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/linkage.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osconnection_server.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm64.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_e2k.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_mips.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_riscv.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_x86.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/osmmap_stub.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_dmabuf.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pmr_os.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/private_data.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debug.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_gputrace.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_ion_stats.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_uaccess.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/rogue_trace_events.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.c create mode 100644 drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/cache_km.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/connection_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/device.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/devicemem_heapcfg.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/devicemem_history_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/devicemem_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/devicemem_server_utils.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/di_common.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/di_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/fwload.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/fwtrace_string.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/handle.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/handle_impl.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/handle_types.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/htbserver.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/info_page.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/lists.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/mmu_common.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/opaque_types.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/os_srvinit_param.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/osconnection_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/osdi_impl.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/osfunc.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/oskm_apphint.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/ospvr_gputrace.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pdump_km.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pdump_mmu.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pdump_physmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pdump_symbolicaddr.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/physmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/physmem_dmabuf.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/physmem_fwdedicatedmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/physmem_hostmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/physmem_lma.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/physmem_osmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/physmem_tdfwmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pmr.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pmr_impl.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pmr_os.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/power.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/process_stats.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvr_dvfs.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvr_notifier.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvrsrv.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_apphint.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_bridge_init.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_cleanup.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_device.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_pool.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_sync_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/ri_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/sofunc_pvr.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/sofunc_rgx.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/srvcore.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/srvinit.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/srvkm.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint_init.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/sync_fallback_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/sync_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/tlintern.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/tlserver.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/tlstream.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/tutils_km.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/vmm_impl.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_client.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_common.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_server.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/vz_vm.h create mode 100644 drivers/mcst/gpu-imgtec/services/server/include/vz_vmm_pvz.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/devicemem.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/devicemem_pdump.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/devicemem_utils.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/devicememx_pdump.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/hash.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/htbuffer.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/mem_utils.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/pvrsrv_error.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/ra.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/sync.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/tlclient.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.c create mode 100644 drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/allocmem.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/device_connection.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/devicemem.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/devicemem_pdump.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/devicemem_utils.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/devicememx.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/devicememx_pdump.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/hash.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/htbuffer.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/htbuffer_init.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/lock.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/osmmap.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/proc_stats.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/ra.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/sync.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/sync_internal.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/tlclient.h create mode 100644 drivers/mcst/gpu-imgtec/services/shared/include/tutilsdefs.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/dma_support.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/pci_support.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/common/vmm_type_stub.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysinfo.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/include/dma_support.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/include/interrupt_support.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/include/pci_support.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/include/syscommon.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/include/sysvalidation.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_sysconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/sysinfo.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysinfo.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysinfo.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysinfo.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/dma_support.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/interrupt_support.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/pci_support.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/common/vmm_type_stub.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/include/dma_support.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/include/interrupt_support.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/include/pci_support.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/include/syscommon.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/include/sysvalidation.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysinfo.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/Kbuild.mk create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.c create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.h create mode 100644 drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysinfo.h create mode 100644 drivers/mcst/gpu-viv/Kbuild create mode 100644 drivers/mcst/gpu-viv/Kconfig create mode 100644 drivers/mcst/gpu-viv/config create mode 100644 drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_async_command.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_command.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_db.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_debug.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_event.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_heap.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_power.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security_v1.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_feature_database.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_base.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_drm.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_dump.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_enum.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_mem.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_metadata.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_options.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_profiler.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_raster.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_rename.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_resource.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_statistics.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_types.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_version.h create mode 100644 drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_vg.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dma.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dmabuf.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_gfp.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_reserved_mem.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_user_memory.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_drm.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_mutex.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel_emulator.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/default/gc_hal_kernel_platform_default.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.c create mode 100644 drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.config create mode 100644 drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.c create mode 100644 drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.h create mode 100644 drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.c create mode 100644 drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.h create mode 100644 drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_mmu.c create mode 100644 drivers/mcst/gpu-viv/hal/security_v1/os/emulator/gc_hal_ta_emulator.c create mode 100644 drivers/mcst/hantrodec/Makefile create mode 100644 drivers/mcst/hantrodec/README create mode 100644 drivers/mcst/hantrodec/driver_load.sh create mode 100644 drivers/mcst/hantrodec/dwl_defs.h create mode 100644 drivers/mcst/hantrodec/hantrodec.c create mode 100644 drivers/mcst/hantrodec/hantrodec.h create mode 100644 drivers/mcst/i2c_spd/Makefile create mode 100644 drivers/mcst/i2c_spd/i2c_spd.c create mode 100644 drivers/mcst/lptouts/Makefile create mode 100644 drivers/mcst/lptouts/lptouts.c create mode 100644 drivers/mcst/lptouts/readme.txt create mode 100644 drivers/mcst/m2mlc/Makefile create mode 100644 drivers/mcst/m2mlc/m2mlc.h create mode 100644 drivers/mcst/m2mlc/m2mlc_dbg.h create mode 100644 drivers/mcst/m2mlc/m2mlc_dev.c create mode 100644 drivers/mcst/m2mlc/m2mlc_hw.c create mode 100644 drivers/mcst/m2mlc/m2mlc_ksvv.c create mode 100644 drivers/mcst/m2mlc/m2mlc_ksvv.h create mode 100644 drivers/mcst/m2mlc/m2mlc_main.c create mode 100644 drivers/mcst/m2mlc/m2mlc_net.c create mode 100644 drivers/mcst/m2mlc/m2mlc_pci.c create mode 100644 drivers/mcst/m2mlc/m2mlc_regs.h create mode 100644 drivers/mcst/mem2alloc/Makefile create mode 100644 drivers/mcst/mem2alloc/mem2alloc.c create mode 100644 drivers/mcst/mem2alloc/mem2alloc.h create mode 100644 drivers/mcst/mem2alloc/mem2alloc_load.sh create mode 100644 drivers/mcst/mga2-gpio/Makefile create mode 100644 drivers/mcst/mga2-gpio/mga2-gpio.c create mode 100644 drivers/mcst/mga2-pwm/Makefile create mode 100644 drivers/mcst/mga2-pwm/mga2-pwm.c create mode 100644 drivers/mcst/mga2/Makefile create mode 100644 drivers/mcst/mga2/it6613/HDMI_COMMON.h create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/EDID.c create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.c create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.h create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/edid.h create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/hdmitx.h create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.c create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.h create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/it6613_sys.c create mode 100644 drivers/mcst/mga2/it6613/HDMI_TX/it6613_sys.h create mode 100644 drivers/mcst/mga2/it6613/mcu.h create mode 100644 drivers/mcst/mga2/it6613/typedef.h create mode 100644 drivers/mcst/mga2/mga2_auc2.c create mode 100644 drivers/mcst/mga2/mga2_bctrl.c create mode 100644 drivers/mcst/mga2/mga2_drv.c create mode 100644 drivers/mcst/mga2/mga2_drv.h create mode 100644 drivers/mcst/mga2/mga2_dvi.c create mode 100644 drivers/mcst/mga2/mga2_fb.c create mode 100644 drivers/mcst/mga2/mga2_fbdev.c create mode 100644 drivers/mcst/mga2/mga2_hdmi_it6613.c create mode 100644 drivers/mcst/mga2/mga2_i2c.c create mode 100644 drivers/mcst/mga2/mga2_irq.c create mode 100644 drivers/mcst/mga2/mga2_layer.c create mode 100644 drivers/mcst/mga2/mga2_lvds.c create mode 100644 drivers/mcst/mga2/mga2_main.c create mode 100644 drivers/mcst/mga2/mga2_mode.c create mode 100644 drivers/mcst/mga2/mga2_pll.c create mode 100644 drivers/mcst/mga2/mga2_regs.h create mode 100644 drivers/mcst/mgpm/Makefile create mode 100644 drivers/mcst/mgpm/mgpm.c create mode 100644 drivers/mcst/mmrm/Makefile create mode 100644 drivers/mcst/mmrm/mmrm.c create mode 100644 drivers/mcst/mmrm/mmrm.h create mode 100644 drivers/mcst/mokm/Makefile create mode 100644 drivers/mcst/mokm/mokm.c create mode 100644 drivers/mcst/mokx/Makefile create mode 100644 drivers/mcst/mokx/mokx.c create mode 100644 drivers/mcst/mokx/mokx_ext_mode.c create mode 100644 drivers/mcst/mokx/mokx_get_event.c create mode 100644 drivers/mcst/mokx/mokx_get_stat.c create mode 100644 drivers/mcst/mokx/mokx_intrrupt.c create mode 100644 drivers/mcst/mokx/mokx_iocc.h create mode 100644 drivers/mcst/mokx/mokx_iocc_error.h create mode 100644 drivers/mcst/mokx/mokx_iocc_regs.h create mode 100644 drivers/mcst/mokx/mokx_mok.h create mode 100644 drivers/mcst/mokx/mokx_mok_error.h create mode 100644 drivers/mcst/mokx/mokx_mok_regs.h create mode 100644 drivers/mcst/mokx/mokx_read_buf.c create mode 100644 drivers/mcst/mokx/mokx_send_msg.c create mode 100644 drivers/mcst/mokx/mokx_write_buf.c create mode 100644 drivers/mcst/mpk/Makefile create mode 100644 drivers/mcst/mpk/README create mode 100644 drivers/mcst/mpk/mpk.c create mode 100644 drivers/mcst/mpv/Makefile create mode 100644 drivers/mcst/mpv/mpv.c create mode 100644 drivers/mcst/mpv/mpv.h create mode 100644 drivers/mcst/msps/Makefile create mode 100644 drivers/mcst/msps/msps.c create mode 100644 drivers/mcst/msps/msps.h create mode 100644 drivers/mcst/msps/msps_test.c create mode 100644 drivers/mcst/mvp/Makefile create mode 100644 drivers/mcst/mvp/mvp.c create mode 100644 drivers/mcst/mvp/mvpvar.h create mode 100644 drivers/mcst/pcs/Makefile create mode 100644 drivers/mcst/pcs/l_pcs.c create mode 100644 drivers/mcst/pcsm/Makefile create mode 100644 drivers/mcst/pcsm/pcsm.c create mode 100644 drivers/mcst/pcsm/pcsm.h create mode 100644 drivers/mcst/pcsm/pcsm_drv.c create mode 100644 drivers/mcst/prom/Makefile create mode 100644 drivers/mcst/prom/e90_prom.c create mode 100644 drivers/mcst/prom/sbus_proc_tree.c create mode 100644 drivers/mcst/prom/sbus_proc_tree.h create mode 100644 drivers/mcst/prom/sbus_tree.c create mode 100644 drivers/mcst/rdma/Makefile create mode 100644 drivers/mcst/rdma/rdma.c create mode 100644 drivers/mcst/rdma/rdma.h create mode 100644 drivers/mcst/rdma/rdma_error.h create mode 100644 drivers/mcst/rdma/rdma_intr.c create mode 100644 drivers/mcst/rdma/rdma_read_buf.c create mode 100644 drivers/mcst/rdma/rdma_regs.h create mode 100644 drivers/mcst/rdma/rdma_send_msg.c create mode 100644 drivers/mcst/rdma/rdma_write_buf.c create mode 100644 drivers/mcst/rdma_m/Makefile create mode 100644 drivers/mcst/rdma_m/get_event_rdma.c create mode 100644 drivers/mcst/rdma_m/get_stat_rdma.c create mode 100644 drivers/mcst/rdma_m/rdma.h create mode 100644 drivers/mcst/rdma_m/rdma_error.h create mode 100644 drivers/mcst/rdma_m/rdma_intr.c create mode 100644 drivers/mcst/rdma_m/rdma_m.c create mode 100644 drivers/mcst/rdma_m/rdma_read_buf.c create mode 100644 drivers/mcst/rdma_m/rdma_regs.h create mode 100644 drivers/mcst/rdma_m/rdma_send_msg.c create mode 100644 drivers/mcst/rdma_m/rdma_write_buf.c create mode 100644 drivers/mcst/rdma_sic/Makefile create mode 100644 drivers/mcst/rdma_sic/get_event_rdma.c create mode 100644 drivers/mcst/rdma_sic/get_stat_rdma.c create mode 100644 drivers/mcst/rdma_sic/rdma.h create mode 100644 drivers/mcst/rdma_sic/rdma_error.h create mode 100644 drivers/mcst/rdma_sic/rdma_intr.c create mode 100644 drivers/mcst/rdma_sic/rdma_read_buf.c create mode 100644 drivers/mcst/rdma_sic/rdma_regs.h create mode 100644 drivers/mcst/rdma_sic/rdma_send_msg.c create mode 100644 drivers/mcst/rdma_sic/rdma_sic.c create mode 100644 drivers/mcst/rdma_sic/rdma_write_buf.c create mode 100644 drivers/mcst/smi-gpio/Makefile create mode 100644 drivers/mcst/smi-gpio/smi-gpio.c create mode 100644 drivers/mcst/smi-pwm/Makefile create mode 100644 drivers/mcst/smi-pwm/smi-pwm.c create mode 100644 drivers/mcst/smi/Kconfig create mode 100644 drivers/mcst/smi/Makefile create mode 100644 drivers/mcst/smi/ddk750/ddk750_2d.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_2d.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_chip.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_chip.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_clock.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_clock.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_cursor.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_cursor.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_defs.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_display.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_display.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_edid.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_edid.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_hardware.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_help.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_help.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_helper.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_helper.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_hwi2c.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_hwi2c.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_mode.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_mode.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_power.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_power.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_regdc.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_regde.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_reggpio.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_regi2c.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_regsc.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_sii9022.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_sii9022.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_sw2d.h create mode 100644 drivers/mcst/smi/ddk750/ddk750_swi2c.c create mode 100644 drivers/mcst/smi/ddk750/ddk750_swi2c.h create mode 100644 drivers/mcst/smi/ddk750/ddkdebug.h create mode 100644 drivers/mcst/smi/ddk750/siHdmiTx_902x_TPI.c create mode 100644 drivers/mcst/smi/ddk750/siHdmiTx_902x_TPI.h create mode 100644 drivers/mcst/smi/ddk750/vdif.h create mode 100644 drivers/mcst/smi/ddk768/ddk768.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_2d.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_2d.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_chip.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_chip.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_clock.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_clock.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_cursor.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_cursor.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_display.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_display.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_edid.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_edid.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_hdmi.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_hdmi.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_help.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_help.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_helper.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_helper.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_hwi2c.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_hwi2c.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_iis.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_iis.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_intr.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_intr.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_mode.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_mode.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_power.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_power.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_reg.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_swi2c.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_swi2c.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_timer.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_timer.h create mode 100644 drivers/mcst/smi/ddk768/ddk768_video.c create mode 100644 drivers/mcst/smi/ddk768/ddk768_video.h create mode 100644 drivers/mcst/smi/ddk768/ddkdebug.c create mode 100644 drivers/mcst/smi/ddk768/ddkdebug.h create mode 100644 drivers/mcst/smi/ddk768/hdmiregs.h create mode 100644 drivers/mcst/smi/ddk768/l3.c create mode 100644 drivers/mcst/smi/ddk768/l3.h create mode 100644 drivers/mcst/smi/ddk768/uda1345.c create mode 100644 drivers/mcst/smi/ddk768/uda1345.h create mode 100644 drivers/mcst/smi/ddk768/vdif.h create mode 100644 drivers/mcst/smi/hw750.c create mode 100644 drivers/mcst/smi/hw750.h create mode 100644 drivers/mcst/smi/hw768.c create mode 100644 drivers/mcst/smi/hw768.h create mode 100644 drivers/mcst/smi/hw_com.h create mode 100644 drivers/mcst/smi/smi_drv.c create mode 100644 drivers/mcst/smi/smi_drv.h create mode 100644 drivers/mcst/smi/smi_fbdev.c create mode 100644 drivers/mcst/smi/smi_main.c create mode 100644 drivers/mcst/smi/smi_mode.c create mode 100644 drivers/mcst/smi/smi_plane.c create mode 100644 drivers/mcst/smi/smi_prime.c create mode 100644 drivers/mcst/smi/smi_snd.c create mode 100644 drivers/mcst/smi/smi_snd.h create mode 100644 drivers/mcst/smi/smi_ttm.c create mode 100644 drivers/mcst/video-imgtec/GPLHEADER create mode 100644 drivers/mcst/video-imgtec/MIT_COPYING create mode 100644 drivers/mcst/video-imgtec/Makefile create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/Makefile create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/code/fwtrace.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/code/memmgr/memmgr_km.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/code/proc_FwIF.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/code/quartz_device_km.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/code/quartz_mmu.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/code/vxe_KM.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/code/vxekm_debug.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/e5500_public_regdefs.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/fwtrace.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/memmgr_api_quartz.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/memmgr_km.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/proc_FwIF.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/quartz_device_km.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/quartz_mmu.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/vxe_KM.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/vxe_km_api_quartz.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/driver/kernel/include/vxe_sysctl.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/firmware/quartz/fw_binaries/H264_H265_FW_ALL_pipes_1_bin.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/firmware/quartz/fw_binaries/H264_H265_FW_ALL_pipes_3_bin.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/firmware/quartz/fw_binaries/include_all_fw_variants.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/include/VXE_Enc_GlobalDefs.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/include/coreflags.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/include/target_config.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/include/vxe_common.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/include/vxe_fw_if.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/platform/sysdev.c create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/register_includes/quartz/img_soc_dmac_regs.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/register_includes/quartz/img_video_bus4_mmu_regs_defines.h create mode 100644 drivers/mcst/video-imgtec/encoder/quartz/register_includes/quartz/ltp_regs.h create mode 100644 drivers/mcst/video-imgtec/imgpcidd/Makefile create mode 100644 drivers/mcst/video-imgtec/imgpcidd/imgpcidd.c create mode 100644 drivers/mcst/video-imgtec/imgpcidd/imgpcidd.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/Makefile create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/pixelapi/code/pixel_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/pixelapi/code/pixel_api_internals.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/pixelapi/code/pixel_api_internals.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/pixelapi/include/pixel_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/code/addr_alloc1.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/code/hash.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/code/pool.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/code/ra.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/code/talmmu_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/code/trace.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/include/addr_alloc1.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/include/hash.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/include/pool.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/include/ra.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/include/talmmu_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/imglib/libraries/talmmu_api/include/trace.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/img_defs.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/img_errors.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/img_mem.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/img_pixfmts.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/img_structs.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/img_types.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/linux-kernel/img_sysdefs.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/include/linux-kernel/img_systypes.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/kernel_comp/include/wrap_utils.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/kernel_comp/include/wrap_utils_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/kernel_comp/libraries/wrap_utils/code/wrap_utils.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/include/dq.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/include/lst.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/include/tre.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/dq/dq.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/lst/lst.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/addchild.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/addsib.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/copysub.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/finalsib.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/firstchild.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/init.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/isinsub.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/nextsib.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/prevsib.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/remleaf.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/remlsub.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/remsub.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/list_utils/src/trees/subnext.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/dbgevent_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/dbgevent_api_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/dbgopt_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/dbgopt_api_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/dbgopt_api_um.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/dman_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/dman_api_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/idgen_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/linux/sysbrg_drv.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/page_alloc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/page_alloc_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/perflog_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/pman_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/pool_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/report_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/report_levels.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/report_modules.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/rman_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysbrg_api.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysbrg_api_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysbrg_utils.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysdev_utils.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysenv_api_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysenv_utils.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysmem_utils.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/include/sysos_api_km.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/dbgevent_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/dbgevent_api_km.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/dbgopt_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/dbgopt_api_km.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/dman_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/dman_api_km.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/idgen_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/perflog_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysbrg_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysbrg_drv.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysbrg_pdump.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysmem_api_ashmem.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysmem_api_carveout.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysmem_api_coherent.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysmem_api_dmabuf.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysmem_api_ion.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysmem_api_unified.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/linux/sysos_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/page_alloc.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/pman_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/pool_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/rman_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/sysbrg_utils.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/sysdev_utils.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/sysenv_api.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/sysenv_utils.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/port_fwrk/kernel/sysmem_utils.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/dbgevent_api_rpc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/dbgevent_api_server.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/dbgopt_api_rpc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/dbgopt_api_server.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/dman_api_rpc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/dman_api_server.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/memmgr_api_quartz_rpc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/memmgr_api_quartz_server.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/page_alloc_rpc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/page_alloc_server.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/vxe_km_api_quartz_rpc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/vxe_km_api_quartz_server.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/wrap_utils_rpc.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/rpc/sysbrg/src/wrap_utils_server.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/secure_media/tal/code/tal.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/secure_media/tal/include/tal.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/secure_media/target/code/target.c create mode 100644 drivers/mcst/video-imgtec/imgvideo/secure_media/target/include/target.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/system/include/api_common.h create mode 100644 drivers/mcst/video-imgtec/imgvideo/system/include/system.h create mode 100644 drivers/mcst/video-imgtec/linux/Makefile create mode 100644 drivers/mcst/video-imgtec/linux/include/img_mem_man.h create mode 100644 drivers/mcst/video-imgtec/linux/include/uapi/img_mem_man.h create mode 100644 drivers/mcst/video-imgtec/linux/include/uapi/vxd.h create mode 100644 drivers/mcst/video-imgtec/linux/include/uapi/vxd_pvdec.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/Makefile create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/img_mem_carveout.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/img_mem_dmabuf.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/img_mem_ion.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/img_mem_man.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/img_mem_man_priv.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/img_mem_secure.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/img_mem_unified.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/imgmmu.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/imgold/img_defs.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/imgold/img_errors.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/imgold/img_sysdefs.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/imgold/img_systypes.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/imgold/img_types.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/kernel_heap.c create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/mmu_defs.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/mmulib/heap.h create mode 100644 drivers/mcst/video-imgtec/linux/mem_man/imgmmu/mmulib/mmu.h create mode 100644 drivers/mcst/video-imgtec/linux/vxd/Makefile create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_api.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_common.h create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_core.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_debugfs.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_debugfs.h create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_plat.h create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_plat_dt.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_plat_dt.h create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_plat_emu.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_plat_europa.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_plat_fpga.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_pvdec.c create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_pvdec_priv.h create mode 100644 drivers/mcst/video-imgtec/linux/vxd/vxd_pvdec_regs.h create mode 100644 drivers/mcst/wrnvram/Makefile create mode 100644 drivers/mcst/wrnvram/wrnvram.c create mode 100644 drivers/misc/i2c_p2pmc.c create mode 100644 drivers/misc/isl22317.c create mode 100644 drivers/misc/ltc4306.c create mode 100644 drivers/misc/ucd9080.c create mode 100644 drivers/net/can/mcst/Kconfig create mode 100644 drivers/net/can/mcst/Makefile create mode 100644 drivers/net/can/mcst/can2.h create mode 100644 drivers/net/can/mcst/elcan.h create mode 100644 drivers/net/can/mcst/elcan_debugfs.c create mode 100644 drivers/net/can/mcst/elcan_debugfs.h create mode 100644 drivers/net/can/mcst/elcan_dev.c create mode 100644 drivers/net/can/mcst/elcan_pci.c create mode 100644 drivers/net/ethercat/Kconfig create mode 100644 drivers/net/ethercat/Makefile create mode 100644 drivers/net/ethercat/cdev.c create mode 100644 drivers/net/ethercat/cdev.h create mode 100644 drivers/net/ethercat/coe_emerg_ring.c create mode 100644 drivers/net/ethercat/coe_emerg_ring.h create mode 100644 drivers/net/ethercat/datagram.c create mode 100644 drivers/net/ethercat/datagram.h create mode 100644 drivers/net/ethercat/datagram_pair.c create mode 100644 drivers/net/ethercat/datagram_pair.h create mode 100644 drivers/net/ethercat/debug.c create mode 100644 drivers/net/ethercat/debug.h create mode 100644 drivers/net/ethercat/device.c create mode 100644 drivers/net/ethercat/device.h create mode 100644 drivers/net/ethercat/domain.c create mode 100644 drivers/net/ethercat/domain.h create mode 100644 drivers/net/ethercat/ethernet.c create mode 100644 drivers/net/ethercat/ethernet.h create mode 100644 drivers/net/ethercat/fmmu_config.c create mode 100644 drivers/net/ethercat/fmmu_config.h create mode 100644 drivers/net/ethercat/foe.h create mode 100644 drivers/net/ethercat/foe_request.c create mode 100644 drivers/net/ethercat/foe_request.h create mode 100644 drivers/net/ethercat/fsm_change.c create mode 100644 drivers/net/ethercat/fsm_change.h create mode 100644 drivers/net/ethercat/fsm_coe.c create mode 100644 drivers/net/ethercat/fsm_coe.h create mode 100644 drivers/net/ethercat/fsm_foe.c create mode 100644 drivers/net/ethercat/fsm_foe.h create mode 100644 drivers/net/ethercat/fsm_master.c create mode 100644 drivers/net/ethercat/fsm_master.h create mode 100644 drivers/net/ethercat/fsm_pdo.c create mode 100644 drivers/net/ethercat/fsm_pdo.h create mode 100644 drivers/net/ethercat/fsm_pdo_entry.c create mode 100644 drivers/net/ethercat/fsm_pdo_entry.h create mode 100644 drivers/net/ethercat/fsm_sii.c create mode 100644 drivers/net/ethercat/fsm_sii.h create mode 100644 drivers/net/ethercat/fsm_slave.c create mode 100644 drivers/net/ethercat/fsm_slave.h create mode 100644 drivers/net/ethercat/fsm_slave_config.c create mode 100644 drivers/net/ethercat/fsm_slave_config.h create mode 100644 drivers/net/ethercat/fsm_slave_scan.c create mode 100644 drivers/net/ethercat/fsm_slave_scan.h create mode 100644 drivers/net/ethercat/fsm_soe.c create mode 100644 drivers/net/ethercat/fsm_soe.h create mode 100644 drivers/net/ethercat/globals.h create mode 100644 drivers/net/ethercat/ioctl.c create mode 100644 drivers/net/ethercat/ioctl.h create mode 100644 drivers/net/ethercat/mailbox.c create mode 100644 drivers/net/ethercat/mailbox.h create mode 100644 drivers/net/ethercat/master.c create mode 100644 drivers/net/ethercat/master.h create mode 100644 drivers/net/ethercat/module.c create mode 100644 drivers/net/ethercat/pdo.c create mode 100644 drivers/net/ethercat/pdo.h create mode 100644 drivers/net/ethercat/pdo_entry.c create mode 100644 drivers/net/ethercat/pdo_entry.h create mode 100644 drivers/net/ethercat/pdo_list.c create mode 100644 drivers/net/ethercat/pdo_list.h create mode 100644 drivers/net/ethercat/reg_request.c create mode 100644 drivers/net/ethercat/reg_request.h create mode 100644 drivers/net/ethercat/sdo.c create mode 100644 drivers/net/ethercat/sdo.h create mode 100644 drivers/net/ethercat/sdo_entry.c create mode 100644 drivers/net/ethercat/sdo_entry.h create mode 100644 drivers/net/ethercat/sdo_request.c create mode 100644 drivers/net/ethercat/sdo_request.h create mode 100644 drivers/net/ethercat/slave.c create mode 100644 drivers/net/ethercat/slave.h create mode 100644 drivers/net/ethercat/slave_config.c create mode 100644 drivers/net/ethercat/slave_config.h create mode 100644 drivers/net/ethercat/soe_errors.c create mode 100644 drivers/net/ethercat/soe_request.c create mode 100644 drivers/net/ethercat/soe_request.h create mode 100644 drivers/net/ethercat/sync.c create mode 100644 drivers/net/ethercat/sync.h create mode 100644 drivers/net/ethercat/sync_config.c create mode 100644 drivers/net/ethercat/sync_config.h create mode 100644 drivers/net/ethercat/voe_handler.c create mode 100644 drivers/net/ethercat/voe_handler.h create mode 100644 drivers/net/ethernet/mcst/Kconfig create mode 100644 drivers/net/ethernet/mcst/Makefile create mode 100644 drivers/net/ethernet/mcst/l_e1000.h create mode 100644 drivers/net/ethernet/mcst/l_e1000_nort.c create mode 100644 drivers/net/ethernet/mcst/l_e1000_rt.c create mode 100644 drivers/net/ethernet/mcst/mgb.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/Makefile create mode 100644 drivers/net/ethernet/mcst/mxgbe/kcompat.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/kcompat.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_dbg.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_debugfs.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_debugfs.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_ethtool.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_gpio.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_gpio.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_hw.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_hw.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_i2c.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_i2c.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_mac.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_mac.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_main.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_msix.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_msix.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_net.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_pci.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_phy.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_phy.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_regs.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_rxq.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_rxq.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_txq.c create mode 100644 drivers/net/ethernet/mcst/mxgbe/mxgbe_txq.h create mode 100644 drivers/net/ethernet/mcst/mxgbe/xgbe_regs.h create mode 100644 drivers/net/ethernet/mcst/pcc.c create mode 100644 drivers/net/ethernet/mcst/pci_sunlance.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/Makefile create mode 100644 drivers/net/ethernet/mcst/rdma_net/README.txt create mode 100644 drivers/net/ethernet/mcst/rdma_net/create_mknod_rdma create mode 100644 drivers/net/ethernet/mcst/rdma_net/get_event_rdma_net.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/get_stat_rdma_net.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/lvnet_net.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_arp_net.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_error_net.h create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_intr_net.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_proc_init_net.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_reg_net.h create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_send_msg_net.c create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_user_intf_gl_net.h create mode 100644 drivers/net/ethernet/mcst/rdma_net/rdma_user_intf_net.h create mode 100644 drivers/net/ethernet/mcst/sunlance_body.h create mode 100644 drivers/net/ethernet/mcst/sunlance_sbus.c create mode 100644 drivers/parport/parport_mcst.c create mode 100644 drivers/rtc/rtc-cy14b101p.c create mode 100644 drivers/rtc/rtc-fm33256.c create mode 100644 drivers/staging/crystalhd/DriverFwShare.h create mode 100644 drivers/staging/crystalhd/FleaDefs.h create mode 100644 drivers/staging/crystalhd/Kconfig create mode 100644 drivers/staging/crystalhd/Makefile create mode 100644 drivers/staging/crystalhd/bc_dts_defs.h create mode 100644 drivers/staging/crystalhd/bc_dts_glob_lnx.h create mode 100644 drivers/staging/crystalhd/bcm_70012_regs.h create mode 100644 drivers/staging/crystalhd/bcm_70015_regs.h create mode 100644 drivers/staging/crystalhd/crystalhd_cmds.c create mode 100644 drivers/staging/crystalhd/crystalhd_cmds.h create mode 100644 drivers/staging/crystalhd/crystalhd_flea_ddr.c create mode 100644 drivers/staging/crystalhd/crystalhd_flea_ddr.h create mode 100644 drivers/staging/crystalhd/crystalhd_fleafuncs.c create mode 100644 drivers/staging/crystalhd/crystalhd_fleafuncs.h create mode 100644 drivers/staging/crystalhd/crystalhd_fw_if.h create mode 100644 drivers/staging/crystalhd/crystalhd_hw.c create mode 100644 drivers/staging/crystalhd/crystalhd_hw.h create mode 100644 drivers/staging/crystalhd/crystalhd_linkfuncs.c create mode 100644 drivers/staging/crystalhd/crystalhd_linkfuncs.h create mode 100644 drivers/staging/crystalhd/crystalhd_lnx.c create mode 100644 drivers/staging/crystalhd/crystalhd_lnx.h create mode 100644 drivers/staging/crystalhd/crystalhd_misc.c create mode 100644 drivers/staging/crystalhd/crystalhd_misc.h create mode 100644 drivers/tty/hvc/hvc_l.c create mode 100644 drivers/tty/serial/l_zilog.c create mode 100644 drivers/tty/serial/l_zilog.h create mode 100644 drivers/tty/serial/lmscon.c create mode 100644 drivers/video/fbdev/lynxfb/Makefile create mode 100644 drivers/video/fbdev/lynxfb/ddk750.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_chip.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_chip.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_display.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_display.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_dvi.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_dvi.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_help.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_help.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_hwi2c.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_hwi2c.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_mode.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_mode.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_power.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_power.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_reg.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_sii164.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_sii164.h create mode 100644 drivers/video/fbdev/lynxfb/ddk750_swi2c.c create mode 100644 drivers/video/fbdev/lynxfb/ddk750_swi2c.h create mode 100644 drivers/video/fbdev/lynxfb/lynx_accel.c create mode 100644 drivers/video/fbdev/lynxfb/lynx_accel.h create mode 100644 drivers/video/fbdev/lynxfb/lynx_cursor.c create mode 100644 drivers/video/fbdev/lynxfb/lynx_cursor.h create mode 100644 drivers/video/fbdev/lynxfb/lynx_drv.c create mode 100644 drivers/video/fbdev/lynxfb/lynx_drv.h create mode 100644 drivers/video/fbdev/lynxfb/lynx_help.h create mode 100644 drivers/video/fbdev/lynxfb/lynx_hw750.c create mode 100644 drivers/video/fbdev/lynxfb/lynx_hw750.h create mode 100644 drivers/video/fbdev/lynxfb/modedb.c create mode 100644 drivers/video/fbdev/lynxfb/ver.h create mode 100644 drivers/video/fbdev/mgam83/Makefile create mode 100644 drivers/video/fbdev/mgam83/mgam83fb.h create mode 100644 drivers/video/fbdev/mgam83/mgam83fb_base.c create mode 100644 drivers/video/fbdev/mgam83/mgam83fb_ramdac.c create mode 100644 drivers/video/fbdev/mgam83/sbus_mgam83fb.h create mode 100644 drivers/video/fbdev/mgam83/sbus_mgam83fb_base.c create mode 100644 drivers/video/fbdev/mgam83/sbus_ramdac.c create mode 100644 drivers/watchdog/lwdt.c create mode 100644 e2k-minver-cpu-details create mode 100644 fs/proc/mcst_debug.c create mode 100644 fs/proc/proc_console_ctrl.c create mode 100644 gen_config-elbrus-1cp.awk create mode 100644 gen_config-elbrus-4c.awk create mode 100644 gen_config-elbrus-8c.awk create mode 100644 gen_config-elbrus-8c2.awk create mode 100644 include/linux/el_posix.h create mode 100644 include/linux/i2c/pca953x.h create mode 100644 include/linux/mac/mac_kernel.h create mode 100644 include/linux/mac/mac_types.h create mode 100644 include/linux/mcst/dsp_io.h create mode 100644 include/linux/mcst/gpio.h create mode 100644 include/linux/mcst/m2mlc_io.h create mode 100644 include/linux/mcst/mcst_selftest.h create mode 100644 include/linux/mcst/mgpm.h create mode 100644 include/linux/mcst/mgpm_io.h create mode 100644 include/linux/mcst/mmrm_io.h create mode 100644 include/linux/mcst/mokm.h create mode 100644 include/linux/mcst/mokx_user_intf.h create mode 100644 include/linux/mcst/mpk.h create mode 100644 include/linux/mcst/msps_io.h create mode 100644 include/linux/mcst/mvp_def.h create mode 100644 include/linux/mcst/p2ssbus.h create mode 100644 include/linux/mcst/rdma_user_intf.h create mode 100644 include/linux/mcst/user_intf.h create mode 100644 include/linux/mcst/wrnvram.h create mode 100644 include/linux/mcst/wrnvram_io.h create mode 100644 include/linux/mcst_net_rt.h create mode 100644 include/linux/mcst_rt.h create mode 100644 include/linux/panic2nvram.h create mode 100644 include/linux/platform_data/i2c-l-i2c2.h create mode 100644 include/uapi/drm/mga2_drm.h create mode 100644 include/uapi/linux/el_posix.h create mode 100644 include/uapi/linux/mac/Kbuild create mode 100644 include/uapi/linux/mac/mac_types.h create mode 100644 include/uapi/linux/mcst/Kbuild create mode 100644 include/uapi/linux/mcst/ddi.h create mode 100644 include/uapi/linux/mcst/define.h create mode 100644 include/uapi/linux/mcst/dsp_io.h create mode 100644 include/uapi/linux/mcst/gpio.h create mode 100644 include/uapi/linux/mcst/gpio_ac97.h create mode 100644 include/uapi/linux/mcst/kmng.h create mode 100644 include/uapi/linux/mcst/m2mlc_io.h create mode 100644 include/uapi/linux/mcst/mcst_selftest.h create mode 100644 include/uapi/linux/mcst/mgpm.h create mode 100644 include/uapi/linux/mcst/mgpm_io.h create mode 100644 include/uapi/linux/mcst/mmrm_io.h create mode 100644 include/uapi/linux/mcst/mokm.h create mode 100644 include/uapi/linux/mcst/mokx_user_intf.h create mode 100644 include/uapi/linux/mcst/mpk.h create mode 100644 include/uapi/linux/mcst/mpv_io.h create mode 100644 include/uapi/linux/mcst/msps_io.h create mode 100644 include/uapi/linux/mcst/mvp_def.h create mode 100644 include/uapi/linux/mcst/rdma_user_intf.h create mode 100644 include/uapi/linux/mcst/user_intf.h create mode 100644 include/uapi/linux/mcst_net_rt.h create mode 100644 include/uapi/linux/mcst_rt.h create mode 100644 kernel/configs/mcst.config create mode 100644 kernel/configs/mcst_debug.config create mode 100644 kernel/configs/mcst_rt.config create mode 100644 kernel/el_posix.c create mode 100644 kernel/watch_preempt.c create mode 100644 ltt/Kconfig create mode 100644 ltt/Makefile create mode 100644 security/altha/Kconfig create mode 100644 security/altha/Makefile create mode 100644 security/altha/altha_lsm.c create mode 100644 security/kiosk/Kconfig create mode 100644 security/kiosk/Makefile create mode 100644 security/kiosk/kiosk-test.sh create mode 100644 security/kiosk/kiosk_lsm.c create mode 100644 tools/arch/e2k/include/uapi/asm/bitsperlong.h create mode 100644 tools/arch/e2k/include/uapi/asm/mman.h create mode 100644 tools/perf/arch/e2k/Build create mode 100644 tools/perf/arch/e2k/Makefile create mode 100644 tools/perf/arch/e2k/annotate/instructions.c create mode 100644 tools/perf/arch/e2k/util/Build create mode 100644 tools/perf/arch/e2k/util/auxtrace.c create mode 100644 tools/perf/arch/e2k/util/header.c create mode 100644 tools/perf/arch/e2k/util/pmu.c create mode 100644 tools/perf/pmu-events/arch/e2k/mapfile.csv create mode 100644 tools/perf/pmu-events/arch/e2k/parse_dprof_list.sh create mode 100644 tools/perf/pmu-events/arch/e2k/v1/Events.json create mode 100644 tools/perf/pmu-events/arch/e2k/v2/Cpu_metrics.json create mode 100644 tools/perf/pmu-events/arch/e2k/v2/Events.json create mode 100644 tools/perf/pmu-events/arch/e2k/v3/Cpu_metrics.json create mode 100644 tools/perf/pmu-events/arch/e2k/v3/Events.json create mode 100644 tools/perf/pmu-events/arch/e2k/v3/Uncore.json create mode 100644 tools/perf/pmu-events/arch/e2k/v3/Uncore_metrics.json create mode 100644 tools/perf/pmu-events/arch/e2k/v4/Cpu_metrics.json create mode 100644 tools/perf/pmu-events/arch/e2k/v4/Events.json create mode 100644 tools/perf/pmu-events/arch/e2k/v4/Uncore.json create mode 100644 tools/perf/pmu-events/arch/e2k/v5/Cpu_metrics.json create mode 100644 tools/perf/pmu-events/arch/e2k/v5/Events.json create mode 100644 tools/perf/pmu-events/arch/e2k/v5/Uncore.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Cpu_metrics.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Events.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Uncore_hc.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Uncore_hmu.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Uncore_iommu.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Uncore_mc.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Uncore_metrics.json create mode 100644 tools/perf/pmu-events/arch/e2k/v6/Uncore_prepic.json create mode 100644 tools/perf/util/e2k-dimtp.h mode change 120000 => 100644 tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S mode change 120000 => 100644 tools/testing/selftests/powerpc/primitives/asm/asm-const.h mode change 120000 => 100644 tools/testing/selftests/powerpc/primitives/asm/feature-fixups.h mode change 120000 => 100644 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h mode change 120000 => 100644 tools/testing/selftests/powerpc/stringloops/memcmp_32.S mode change 120000 => 100644 tools/testing/selftests/powerpc/stringloops/strlen_32.S mode change 120000 => 100644 tools/testing/selftests/powerpc/vphn/asm/lppaca.h diff --git a/Documentation/admin-guide/LSM/AltHa.rst b/Documentation/admin-guide/LSM/AltHa.rst new file mode 100644 index 000000000000..be698709d3f0 --- /dev/null +++ b/Documentation/admin-guide/LSM/AltHa.rst @@ -0,0 +1,101 @@ +==== +AltHa +==== + +AltHa is a Linux Security Module currently has three userspace hardening options: + * ignore SUID on binaries (with exceptions possible); + * prevent running selected script interpreters in interactive mode; + * disable open file unlinking in selected dirs. + * enable kiosk mode + + +It is selectable at build-time with ``CONFIG_SECURITY_ALTHA``, and should be +enabled in runtime by command line option ``altha=1`` and configured +through sysctls in ``/proc/sys/kernel/altha``. + +NoSUID +============ +Modern Linux systems can be used with minimal (or even zero at least for OWL and ALT) usage of SUID programms, but in many cases in full-featured desktop or server systems there are plenty of them: uncounted and sometimes unnecessary. Privileged programms are always an attack surface, but mounting filesystems with ``nosuid`` flag doesn't provide enough granularity in SUID binaries management. This LSM module provides a single control point for all SUID binaries. When this submodule is enabled, SUID bits on all binaries except explicitly listed are system-wide ignored. + +Sysctl parameters and defaults: + +* ``kernel.altha.nosuid.enabled = 0``, set to 1 to enable +* ``kernel.altha.nosuid.exceptions =``, colon-separated list of enabled SUID binaries, for example: ``/bin/su:/usr/libexec/hasher-priv/hasher-priv`` + +RestrScript +============ +There is a one way to hardening: prevent users from executing ther own arbitrary code. Traditionally it can be done setting on user-writable filesystems ``noexec`` flag. But modern script languages such as Python also can be used to write exploits or even load arbitary machine code via ``dlopen`` and users can start scripts from ``noexec`` filesystem starting interpreter directly. +Restrscript LSM submodule provides a way to restrict some programms to be executed directly, but allows to execute them as shebang handler. + +Sysctl parameters and defaults: + +* ``kernel.altha.rstrscript.enabled = 0``, set to 1 to enable +* ``kernel.altha.rstrscript.interpreters =``, colon-separated list of restricted interpreters for example: ``/lib64/ld-linux-x86-64.so.2:/usr/bin/python:/usr/bin/python3:/usr/bin/perl:/usr/bin/tclsh``. Symlinks are supported in both ways: you can set symlink to interpreter as exception and interpreter and all symlinks on it will be restricted. + +Adding ld-linux into blocking list prevents running interpreters via ``ld-linux interpreter``. + +Note: in this configuration all scripts starting with ``#!/usr/bin/env python`` will be blocked. + +OLock +============ +Unlink disabling for open files needed for Russian certification, but this is a nasty feature leading to DOS. + +Sysctl parameters and defaults: + +* ``kernel.altha.olock.enabled = 0``, set to 1 to enable +* ``kernel.altha.olock.dirs =``, colon-separated list of dirs, for example: ``/var/lib/something:/tmp/something``. + +Kiosk +=========== +Disable execution for everything and everyone (including system users +and root, if required) except given whitelists. + +Kiosk interface uses generic netlink framework. +Interface name: ``altha`` + +Kiosk packet attributes:: + + static struct nla_policy kiosk_attrs_policy[KIOSK_MAX_ATTR] = { + [KIOSK_ACTION] = { + .type = NLA_S16, + }, + [KIOSK_DATA] = { + .type = NLA_STRING, + .maxlen = MAX_DATA /* 1024 */ + }, + }; + +Possible kiosk modes:: + + enum kiosk_mode { + KIOSK_PERMISSIVE = 0, /* kiosk is disabled */ + KIOSK_NONSYSTEM, /* kiosk is enabled for users with uid >= 500 */ + KIOSK_ALL, /* kiosk is enabled for all users */ + }; + +In ``KIOSK_ALL`` mode root will be restricted if running from tty +Otherwise application will be executed anyway, +enabling the system to boot without garbage in whitelists. + +Possible kiosk actions:: + + enum altha_kiosk_action { + KIOSK_SET_MODE = 0, /* set or get mode, see below */ + KIOSK_USERLIST_ADD, /* add app to user whitelist */ + KIOSK_USERLIST_DEL, /* remove app from user whitelist */ + KIOSK_SYSLIST_ADD, /* add app to system whitelist */ + KIOSK_SYSLIST_DEL, /* remove app from system whitelist */ + KIOSK_USER_LIST, /* retrieve user whitelist, see below */ + KIOSK_SYSTEM_LIST, /* retrieve system whitelist */ + }; + +``KIOSK_ACTION`` attribute is used. + +``SET_MODE`` action will send current mode if ``KIOSK_DATA`` is empty. + +When ``KIOSK_USER_LIST`` or ``KIOSK_SYSTEM_LIST`` action is requested, kernel sends +the first item from the list and waits for another request. +When end of list is reached, it sends an empty string and it will be safe +for client to request another list. + +``LD_*`` cheats will not be applied when kiosk is activated. diff --git a/Documentation/admin-guide/LSM/index.rst b/Documentation/admin-guide/LSM/index.rst index a6ba95fbaa9f..20b57e7adadd 100644 --- a/Documentation/admin-guide/LSM/index.rst +++ b/Documentation/admin-guide/LSM/index.rst @@ -47,3 +47,4 @@ subdirectories. tomoyo Yama SafeSetID + AltHa diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b11e1fd95ee5..2797f0acae8e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3425,6 +3425,14 @@ nomsi [MSI] If the PCI_MSI kernel config parameter is enabled, this kernel boot option can be used to disable the use of MSI interrupts system-wide. + pci_acs_override [PCIE] Override missing PCIe ACS support for: + downstream + All downstream ports - full ACS capabilities + multifunction + Add multifunction devices - multifunction ACS subset + id:nnnn:nnnn + Specific device - full ACS capabilities + Specified as vid:did (vendor/device ID) in hex noioapicquirk [APIC] Disable all boot interrupt quirks. Safety option to keep boot IRQs enabled. This should never be necessary. diff --git a/Documentation/devicetree/bindings/hwmon/emc2305.txt b/Documentation/devicetree/bindings/hwmon/emc2305.txt new file mode 100644 index 000000000000..ee67372ac66d --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/emc2305.txt @@ -0,0 +1,60 @@ +EMC2305 (I2C) + +This device is a RPM-based PWM Fan Speed Controller for up to 5 fans. + +Each fan can beconfigured individually: + + - pwm-enable defines the PWM mode: + 0: PWM is disabled + 3: RPM based PWM + + - fan-div sets the fan divisor (for RPM mesaurement) + 1, 2 ,4 or 8 + + - fan-target sets the target RPM speed (for RPM based PWM mode) + max 16000 (according to data sheet) + + +1) The /emc2305 node + + Required properties: + + - compatible : must be "smsc,emc2305" + - reg : I2C bus address of the device + - #address-cells : must be <1> + - #size-cells : must be <0> + + The node may contain child nodes for each fan that the platform uses. + If no child nodes are given, all possible fan control channels are exposed. + If at least one child node is given, only the configured fans are exposed. + + Example EMC2305 node: + + emc2305@2C { + compatible = "smsc,emc2305"; + reg = <0x2C>; + #address-cells = <1>; + #size-cells = <0>; + + [ child node definitions... ] + } + +2) fan nodes + + Required properties: + + - reg : the fan number (0 based) + + Optional properties: + + - fan-div : the fan divisor setting + - fan-target : the fan target speed + - pwm-enable : PWM mode + + Example EMC2305 fan node: + + fan@1 { + reg = <1>; + fan-div = <4>; + pwm-enable = <0>; + }; diff --git a/Documentation/hwmon/emc2305 b/Documentation/hwmon/emc2305 new file mode 100644 index 000000000000..4de033b33b3e --- /dev/null +++ b/Documentation/hwmon/emc2305 @@ -0,0 +1,33 @@ +Kernel driver emc2305 +===================== + +Supported chips: + * SMSC EMC2305, EMC2303, EMC2302, EMC2301 + Adresses scanned: I2C 0x2c, 0x2d, 0x2e, 0x2f, 0x4c, 0x4d + Prefixes: 'emc2305', 'emc2303', 'emc2302', 'emc2301' + Datasheet: Publicly available at the SMSC website : + http://www.smsc.com/Products/Thermal_and_Power_Management/Fan_Controllers + +Authors: + Reinhard Pfau, Guntermann & Drunck GmbH + +Description +----------- + +The SMSC EMC2305 is a fan controller for up to 5 fans. +The EMC2303 has the same functionality but supports only up to 3 fans. + +The EMC2302 supports 2 fans and the EMC2301 1 fan. These chips support less +possible I2C addresses. + +Fan rotation speeds are reported in RPM. +The driver supports the RPM based PWM control to keep a fan at a desired speed. +To enable this function for a fan, write 3 to pwm_enable and the desired +fan speed to fan_target. + + +Devicetree +---------- + +Configuration is also possible via devicetree: +Documentation/devicetree/bindings/hwmon/emc2305.txt diff --git a/MAINTAINERS b/MAINTAINERS index 1407008df749..c911e90cb7f9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14975,6 +14975,14 @@ S: Maintained F: Documentation/hwmon/emc2103.rst F: drivers/hwmon/emc2103.c +SMSC EMC2305 HARDWARE MONITOR DRIVER +M: Reinhard Pfau +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/emc2305 +F: Documentation/devicetree/bindings/hwmon/emc2305.txt +F: drivers/mcst/emc/emc2305.c + SMSC SCH5627 HARDWARE MONITOR DRIVER M: Hans de Goede L: linux-hwmon@vger.kernel.org @@ -15401,6 +15409,14 @@ M: H Hartley Sweeten S: Odd Fixes F: drivers/staging/comedi/ +STAGING - CRYSTAL HD VIDEO DECODER +M: Naren Sankar +M: Jarod Wilson +M: Scott Davilla +M: Manu Abraham +S: Odd Fixes +F: drivers/staging/crystalhd/ + STAGING - FIELDBUS SUBSYSTEM M: Sven Van Asbroeck S: Maintained @@ -16690,6 +16706,15 @@ F: include/linux/visorbus.h F: drivers/visorbus/ F: drivers/staging/unisys/ +UNION FILE SYSTEM +M: Erez Zadok +L: unionfs@filesystems.org +W: http://unionfs.filesystems.org/ +T: git git://git.fsl.cs.sunysb.edu/unionfs-latest.git +S: Maintained +F: Documentation/filesystems/unionfs/ +F: fs/unionfs/ + UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER R: Alim Akhtar R: Avri Altman diff --git a/Makefile b/Makefile index 91d77df0128b..be5d8255e59e 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 4 SUBLEVEL = 163 -EXTRAVERSION = +EXTRAVERSION = -2.23 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -422,6 +422,16 @@ OBJSIZE = llvm-size STRIP = llvm-strip else CC = $(CROSS_COMPILE)gcc +ifeq ($(call cc-lcc-yn),y) +LD := $(shell $(CC) -print-prog-name=ld) +AR := $(shell $(CC) -print-prog-name=ar) +NM := $(shell $(CC) -print-prog-name=nm) +OBJCOPY := $(shell $(CC) -print-prog-name=objcopy) +OBJDUMP := $(shell $(CC) -print-prog-name=objdump) +READELF := $(shell $(CC) -print-prog-name=readelf) +OBJSIZE := $(shell $(CC) -print-prog-name=size) +STRIP := $(shell $(CC) -print-prog-name=strip) +else LD = $(CROSS_COMPILE)ld AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm @@ -431,6 +441,7 @@ READELF = $(CROSS_COMPILE)readelf OBJSIZE = $(CROSS_COMPILE)size STRIP = $(CROSS_COMPILE)strip endif +endif PAHOLE = pahole LEX = flex YACC = bison @@ -447,7 +458,7 @@ KBZIP2 = bzip2 KLZOP = lzop LZMA = lzma LZ4 = lz4c -XZ = xz +XZ = xz CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) @@ -459,6 +470,8 @@ CFLAGS_KERNEL = AFLAGS_KERNEL = LDFLAGS_vmlinux = +-include .kernelvariables + # Use USERINCLUDE when you must reference the UAPI directories only. USERINCLUDE := \ -I$(srctree)/arch/$(SRCARCH)/include/uapi \ @@ -477,11 +490,22 @@ LINUXINCLUDE := \ $(USERINCLUDE) KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE +ifeq ($(call cc-lcc-yn),y) +# Although lcc-1.24 supports -fshort-wchar many users are still +# using lcc-1.23, so when they compile kernel modules themselves +# we must avoid passing "-fshort-wchar" to it. KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \ - -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \ + -fno-strict-aliasing -fno-common -fno-PIE \ -Werror=implicit-function-declaration -Werror=implicit-int \ -Werror=return-type -Wno-format-security \ -std=gnu89 +else +KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \ + -Werror=implicit-function-declaration -Werror=implicit-int \ + -Wno-format-security \ + -std=gnu89 +endif KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := @@ -649,9 +673,14 @@ endif # KBUILD_EXTMOD # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux +ifneq ($(call cc-lcc-yn),y) CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ $(call cc-option,-fno-tree-loop-im) \ $(call cc-disable-warning,maybe-uninitialized,) +else +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ + $(call cc-disable-warning,maybe-uninitialized,) +endif export CFLAGS_GCOV # The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later. @@ -890,8 +919,13 @@ KBUILD_CFLAGS += $(call cc-disable-warning, restrict) # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) +# MCST: The original gcc bug which caused introduction of -fno-strict-overflow +# (optimizing away pointer overflow checking) does not exist in lcc, and this +# option prohibits many compiler optimizations. +ifneq ($(call cc-lcc-yn),y) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) +endif # clang sets -fmerge-all-constants by default as optimization, but this # is non-conforming behavior for C and in fact breaks the kernel, so we @@ -1036,7 +1070,7 @@ PHONY += prepare0 export MODORDER := $(extmod-prefix)modules.order ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ ltt/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ diff --git a/arch/Kconfig b/arch/Kconfig index 13f70b3cfa15..6668f0e44c31 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -60,6 +60,9 @@ config OPROFILE_NMI_TIMER def_bool y depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64 +config HAVE_EL_POSIX_SYSCALL + bool + config KPROBES bool "Kprobes" depends on MODULES diff --git a/arch/e2k/3p/Makefile b/arch/e2k/3p/Makefile new file mode 100644 index 000000000000..49a83f225851 --- /dev/null +++ b/arch/e2k/3p/Makefile @@ -0,0 +1,3 @@ +subdir-ccflags-y := -Werror -Wswitch -Wenum-compare + +obj-y := global_sp.o umalloc.o binfmt_elf32_3P.o binfmt_elf64_3P.o diff --git a/arch/e2k/3p/binfmt_elf32_3P.c b/arch/e2k/3p/binfmt_elf32_3P.c new file mode 100644 index 000000000000..7a930d4ae8d5 --- /dev/null +++ b/arch/e2k/3p/binfmt_elf32_3P.c @@ -0,0 +1,38 @@ + +#include +#include + +typedef Elf32_Dyn elf_dyntab_entry_t; +#define sys_load_cu sys_load_cu_elf32_3P +#define sys_unload_cu sys_unload_cu_elf32_3P +#define ELF_CL_SZ 32 + +#define DEBUG_PROTECTED_ELFLOADER 0 +#define DBPL if (DEBUG_PROTECTED_ELFLOADER) printk + +/* + * Rename the basic ELF layout types to refer to the 32-bit class of files. + */ +#undef ELF_CLASS +#define ELF_CLASS ELFCLASS32 + +#undef elfhdr +#undef elf_phdr +#undef elf_note +#undef elf_addr_t +#define elfhdr elf32_hdr +#define elf_phdr elf32_phdr +#define elf_note elf32_note +#define elf_addr_t Elf32_Addr + +/* + * Rename a few of the symbols that binfmt_elfe2kp.c will define. + * These are all local so the names don't really matter, but it + * might make some debugging less confusing not to duplicate them. + */ +#define elf_format protected_32_elf_format +#define init_elf_binfmt init_protected_32_elf_binfmt +#define exit_elf_binfmt exit_protected_32_elf_binfmt + +#include "./binfmt_elfe2kp.c" + diff --git a/arch/e2k/3p/binfmt_elf64_3P.c b/arch/e2k/3p/binfmt_elf64_3P.c new file mode 100644 index 000000000000..b401408084a4 --- /dev/null +++ b/arch/e2k/3p/binfmt_elf64_3P.c @@ -0,0 +1,18 @@ + +#include + +typedef Elf64_Dyn elf_dyntab_entry_t; +#define sys_load_cu sys_load_cu_elf64_3P +#define sys_unload_cu sys_unload_cu_elf64_3P +#define ELF_CL_SZ 64 + + +#define DEBUG_PROTECTED_ELFLOADER 0 +#define DBPL if (DEBUG_PROTECTED_ELFLOADER) printk + +#define elf_format protected_64_elf_format +#define init_elf_binfmt init_protected_64_elf_binfmt +#define exit_elf_binfmt exit_protected_64_elf_binfmt + +#include "./binfmt_elfe2kp.c" + diff --git a/arch/e2k/3p/binfmt_elfe2kp.c b/arch/e2k/3p/binfmt_elfe2kp.c new file mode 100644 index 000000000000..9aefc58cd1e1 --- /dev/null +++ b/arch/e2k/3p/binfmt_elfe2kp.c @@ -0,0 +1,1348 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_ELF_CORE +extern int elf_core_dump_64(struct coredump_params *cprm); +#else /* ! CONFIG_ELF_CORE */ +#define elf_core_dump_64 NULL +#endif /* CONFIG_ELF_CORE */ + +static int load_e2p_load_binary(struct linux_binprm *); + +static struct linux_binfmt elf_format = { + .module = THIS_MODULE, + .load_binary = load_e2p_load_binary, + .load_shlib = NULL, + .core_dump = elf_core_dump_64, + .min_coredump = ELF_EXEC_PAGESIZE +}; + +#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE) +#define check_len(x) ((u64)(x) >= (1L << 32)) + +#if defined(CONFIG_MMU) && !defined(MAX_ARG_PAGES) +# define MAX_ARG_PAGES 32 +#endif + +static int protected_elf_code( struct elfhdr * x) +{ + if ((x->e_machine != ELF_ARCH) && (x->e_machine != ELF_ARCH_FAKE)) { + DBPL("bad ARCH 0x%x != 0x%x\n", x->e_machine, ELF_ARCH); + return 0; + } + if (x->e_ident[EI_CLASS] != ELF_CLASS) { + DBPL("bad CLASS 0x%x != 0x%x\n", x->e_ident[EI_CLASS], ELF_CLASS); + return 0; + } + + if (x->e_machine == ELF_ARCH) { + if ((x->e_flags & ELF_E2K_PM) == 0) { + DBPL("Protected code expected"); + return 0; + } + } + else if (x->e_machine == ELF_ARCH_FAKE) { + if (x->e_ident[EI_SEMANTIC] != ELF_CODE_NEW_PROTECTED && + x->e_ident[EI_SEMANTIC] != ELF_CODE_NEW_PROTECTED_CXX) { + DBPL("bad SEMANTIC: 0x%x != 0x%x and 0x%x != 0x%x\n", + x->e_ident[EI_SEMANTIC], ELF_CODE_NEW_PROTECTED, + x->e_ident[EI_SEMANTIC], ELF_CODE_NEW_PROTECTED_CXX); + return 0; + } + } + + if (x->e_flags & ELF_BIN_COMP) + { + DBPL("Code for binary compiler not expected"); + return 0; + } + + if (!elf_check_e2k_mtype(x)) + { + DBPL("Code for incompatible machine"); + return 0; + } + + return 1; +} + +static unsigned long inline do_mmap_elf(struct file *f, + unsigned long addr, + unsigned long len, + unsigned long prot, + unsigned long flags, + unsigned long off) +{ + return vm_mmap_notkillable(f, addr, len, prot, flags, off); +} + +static inline int do_munmap_elf(unsigned long addr, size_t len) +{ + return vm_munmap_notkillable(addr, len); +} + + + + +/* + * Layout of arguments for protected task (down to up from stack base) : + * if ( ARGS_AS_ONE_ARRAY) { + * descriptior to (4) -(6) area + * } else { + * descriptor to (1) - (3) area + * 1. Descriptor to argv descriptors array. + * Size of this descriptor = (argc + 1) * sizeof (e2k_ptr_t) + * 2. Descriptor to envp descriptors array. + * 3. Descriptor to the ELF interpreter info. + * } + * 4. argv descriptors array. Last descriptor is NULL + * 5. envp descriptors array. Last descriptor is NULL + * 6. ELF interpreter info + * 7. argv array + * 8. envp array. + * + * (7) and (9) were filled earlier by copy_strings() calls from do_execve + */ + + +static unsigned long protected_randomize_stack_top(unsigned long stack_top) +{ + unsigned int random_variable = 0; + + if (current->flags & PF_RANDOMIZE) + random_variable = get_random_int() % (8*1024*1024); + return PAGE_ALIGN(stack_top - random_variable); +} + + + + +/* Let's use some macros to make this stack manipulation a litle clearer */ + +#define STACK_ALLOC_AUX(sp, items) \ + (e2k_ptr_t *)((u64)((elf_addr_t *)(sp) - (items)) & ~15UL) +#define STACK_ALLOC_BYTES(sp, items) \ + (e2k_ptr_t *)((u64)((char __user *)(sp) - (items)) & ~15UL) +#define STACK_ALLOC_PTRS(sp, len) (sp - ( len)) +#define STACK_ROUND(sp) \ + (e2k_ptr_t *)(((unsigned long) (sp)) &~ 15UL) + + +static int +create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + unsigned long load_offset, unsigned long start_point, + unsigned long interp_elfhdr_offset) +{ + unsigned long p = bprm->p; + int argc = bprm->argc; + int envc = bprm->envc; + e2k_ptr_t *sp; + unsigned long argcp; + unsigned long argvb; + unsigned long envpb; + unsigned long auxb = 0; + elf_addr_t *elf_info = NULL; + int ei_index = 0; +#ifdef ARGS_AS_ONE_ARRAY + unsigned long args_end; +#endif + const struct cred *cred = current_cred(); + /* was stack aligned accordinatly before? */ + sp = STACK_ROUND(p); +#ifdef ARGS_AS_ONE_ARRAY + args_end = (unsigned long)sp; +#endif + + /* Create the ELF interpreter info */ + elf_info = (elf_addr_t *) current->mm->saved_auxv; +#define NEW_AUX_ENT(id, val) \ + do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0) + +#ifdef ARCH_DLINFO + ARCH_DLINFO; +#endif + NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); + NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); + NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); + NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr)); + NEW_AUX_ENT(AT_FLAGS, 0); + NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); + NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); + NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); + NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); + NEW_AUX_ENT(AT_SECURE, bprm->secureexec); + NEW_AUX_ENT(AT_PHDR, load_offset + exec->e_phoff); + NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); + NEW_AUX_ENT(AT_ENTRY, (u32) start_point); + NEW_AUX_ENT(AT_BASE, interp_elfhdr_offset); +#if 0 + if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { + NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data); + } +#endif +#undef NEW_AUX_ENT + /* AT_NULL is zero; clear the rest too */ + memset(&elf_info[ei_index], 0, sizeof (current->mm->saved_auxv) - + ei_index * sizeof elf_info[0]); + /* And advance past the AT_NULL entry. */ + ei_index += 2; + + /* allocate space for ELF interpreter info */ + sp = STACK_ALLOC_AUX(sp, ei_index); + auxb = (unsigned long)sp; + + /* allocate space for envp descriptors array */ + sp = STACK_ALLOC_PTRS(sp, envc + 1); + envpb = (unsigned long)sp; + + /* allocate space for argv descriptors arrays */ + sp = STACK_ALLOC_PTRS(sp, argc + 1); + argvb = (unsigned long)sp; + + /* allocate space for argc at address, aligned to 16 bytes */ + sp = STACK_ALLOC_PTRS(sp, 1); + argcp = (unsigned long) sp; + +#ifndef ARGS_AS_ONE_ARRAY + /* allocate space for (1) - (3) descriptors */ + sp = STACK_ALLOC_PTRS(sp, 3); + +#endif + + /* And at last allocate space for base descriptor. */ + sp = STACK_ALLOC_PTRS(sp, 1); + + /* Now sp points to the end of the stack */ + bprm->p = (unsigned long)sp; + + /* Populate allocated areas in revers order */ + +#ifdef ARGS_AS_ONE_ARRAY + /* The base descriptor is temporarily saved to the start of the + memory area it describes. After it is copied to %qr0 we may + erase it from stack. */ + PUT_USER_AP(sp, bprm->p, args_end - bprm->p, 0L, RW_ENABLE); + sp++; +#else + /* descriptor to the next four ones */ + PUT_USER_AP(sp, sp + 1, E2k_ELF_ARG_NUM_AP * sizeof (e2k_ptr_t), + 0L, R_ENABLE); + sp++; + + PUT_USER_AP(sp + E2k_ELF_ARGV_IND, argvb, + (envpb - argvb), 0, R_ENABLE); + PUT_USER_AP(sp + E2k_ELF_ENVP_IND, envpb, + (mddb - envpb), 0, R_ENABLE); + PUT_USER_AP(sp + E2k_ELF_AUX_IND, auxb, + (ei_index * sizeof elf_info[0]), 0, R_ENABLE); + } +#endif + + /* Save argc. */ + if (clear_user((e2k_ptr_t __user *) argcp, sizeof(e2k_ptr_t))) + return -EFAULT; + + if (copy_to_user((void __user *) argcp, &argc, sizeof(argc))) + return -EFAULT; + + /* Populate argv */ + p = current->mm->arg_end = current->mm->arg_start; + sp = (e2k_ptr_t __user *)argvb; + while (argc-- > 0) { + size_t len; + len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); + if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) { + return 0; + } + PUT_USER_AP(sp++, p, len, 0, RW_ENABLE); + p += len; + } + PUT_USER_AP(sp, 0, 0, 0, 0); + + + /* Populate envp */ + current->mm->arg_end = current->mm->env_start = p; + sp = (e2k_ptr_t __user *)envpb; + while (envc-- > 0) { + size_t len; + len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); + if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) { + return 0; + } + PUT_USER_AP(sp++, p, len, 0, RW_ENABLE); + p += len; + } + PUT_USER_AP(sp, 0, 0, 0, 0); + current->mm->env_end = p; + + /* Put the elf_info on the stack in the right place. */ + if (copy_to_user((void __user *)auxb, elf_info, ei_index * sizeof(elf_info[0]))) { + return -EFAULT; + } + + return 0; +} + + + + + +static int e2p_consistensy_check(struct file *f, struct elfhdr *elf_ex) +{ + if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0) { + DBPL("elf_ex->e_ident bad\n"); + goto out; + } + if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN) { + DBPL("not ET_EXEC && not ET_DYN\n"); + goto out; + } + if (!protected_elf_code(elf_ex)) { + DBPL("not protected_elf%d_code\n", ELF_CL_SZ); + goto out; + } + if (!f->f_op || !f->f_op->mmap) { + DBPL("!f->f_op || !f->f_op->mmap\n"); + goto out; + } + /* Now read in all of the header information */ + if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) { + DBPL("elf_ex->e_phentsize(%d) != sizeof(struct elf_phdr)(%ld)\n", + elf_ex->e_phentsize, sizeof(struct elf_phdr)); + goto out; + } + if (elf_ex->e_phnum < 1 || + elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) { + DBPL("elf_ex->e_phnum = %d\n", elf_ex->e_phnum); + goto out; + } + return 0; +out : + { + int i; int *p = (int *)elf_ex; + DBPL("Elf Header :\n"); + for (i = 0; i < sizeof (struct elfhdr) / 4; i += 2) { + DBPL(" 0x%08x 0x%08x\n", p[i], p[i+1]); + } + } + return -ENOEXEC; +} + + + +static int +get_dynamic_data(struct file *f, + struct elf_phdr *phdr_dyn, + elf_addr_t *got_off, + elf_addr_t *got_sz, + elf_addr_t *init_got_entryp, + elf_addr_t *init_entryp, + elf_addr_t *fini_entryp) +{ + int retval; + loff_t pos; + elf_dyntab_entry_t *dyn; + elf_dyntab_entry_t *dyn_tofree; + elf_dyntab_entry_t *end_dyn; + + if (phdr_dyn->p_filesz % sizeof (elf_dyntab_entry_t)) { + return -ENOEXEC; + } + dyn = (elf_dyntab_entry_t *)kmalloc(phdr_dyn->p_filesz, + GFP_KERNEL); + if (!dyn) { + return -ENOMEM; + } + dyn_tofree = dyn; + pos = phdr_dyn->p_offset; + retval = kernel_read(f, dyn, phdr_dyn->p_filesz, &pos); + if (retval != phdr_dyn->p_filesz) { + if (retval >= 0) { + retval = -EIO; + } + goto out; + } + end_dyn = (elf_dyntab_entry_t *)((char *)dyn + phdr_dyn->p_filesz); + + for ( ; dyn < end_dyn; dyn++) { + if (dyn->d_tag == DT_NULL) { + break; + } + switch (dyn->d_tag) { + case DT_PLTGOT : + *got_off = dyn->d_un.d_ptr; + break; + case DT_PLTGOTSZ : + *got_sz = dyn->d_un.d_val; + break; + case DT_INIT : + *init_entryp = dyn->d_un.d_ptr; + break; + case DT_INIT_GOT : + *init_got_entryp = dyn->d_un.d_ptr; + break; + case DT_FINI : + *fini_entryp = dyn->d_un.d_ptr; + break; + default : + break; + } + } +out: + kfree(dyn_tofree); + return 0; +} + +/* Sort ELF Program Headers in increasing order of their p_vaddrs. + */ +static int elf_phdr_cmp(const void *a, const void *b) +{ + const struct elf_phdr *one = (const struct elf_phdr *) a; + const struct elf_phdr *two = (const struct elf_phdr *) b; + + return (one->p_vaddr < two->p_vaddr + ? -1 : (one->p_vaddr == two->p_vaddr ? 0 : 1)); +} + +/** + * e2p_load_cu_file_by_headers - loads module into memory. + * creates CUT entry for loaded module + * initializes mdd if mdd not NULL + * Results : + * 0 if success, else -errno. + */ +static int +e2p_load_cu_file_by_headers(struct file *loadf, + struct elfhdr *elf, + struct elf_phdr *elf_phdr, + unsigned long *entryp, + kmdd_t *mdd, + unsigned long *load_offset, + unsigned long *interp_elfhdr_offset) +{ + int retval = -ENOEXEC; + unsigned long ulretval; + struct elf_phdr *prog_p; + int i; + unsigned long start_code_addr = 0; + unsigned long start_data_addr = 0; + elf_addr_t ud_start = ~0UL; + elf_addr_t ud_end = 0; + elf_addr_t ud_allocend = 0L; + elf_addr_t ud_mapend = 0; + elf_addr_t uc_start = ~0UL; + elf_addr_t uc_end = 0L; + elf_addr_t uc_allocend = 0; + elf_addr_t uc_mapend = 0; + int prot; + unsigned long start_point = 0; + unsigned long init_point = 0; + unsigned long fini_point = 0; + unsigned long init_got_point = 0; + struct elf_phdr *dyn = NULL; + elf_addr_t got_off; + elf_addr_t got_sz; + int tcount = 0; + elf_addr_t init_got_entry = 0; + elf_addr_t init_entry = 0; + elf_addr_t fini_entry = 0; + int packed; + + /* + * Distinguish between legacy and packed PM ELFs. + */ + packed = (elf->e_flags & ELF_E2K_PACK_SEGMENTS) ? 1 : 0; + + /* + * In the packed case Program Headers need to be sorted in the + * increasing order of their p_vaddrs before being mapped. + */ + if (packed) + sort(elf_phdr, elf->e_phnum, sizeof(elf_phdr[0]), + elf_phdr_cmp, NULL); + + /* + * Get the base address and size of image. + */ + for ( prog_p = elf_phdr, i = 0; i < elf->e_phnum; i++, prog_p++) { + elf_addr_t start; + elf_addr_t end; + elf_addr_t allocend; + elf_addr_t mapend; + + if (prog_p->p_type == PT_DYNAMIC) { + dyn = prog_p; + continue; + } + if (prog_p->p_type != PT_LOAD) { + continue; + } + /* case PT_LOAD */ + /* Check the correctness of segment */ + if ( prog_p->p_align % PAGE_SIZE != 0 ) { + DBPL("load segment not page-aligned 0x%llx.\n", + (u64)prog_p->p_align); + return retval; + } + if ((prog_p->p_vaddr - prog_p->p_offset) % prog_p->p_align ) { + DBPL( "load segment address/offset not properly" + " aligned 0x%llx : 0x%llx.\n", + (u64)prog_p->p_vaddr, (u64)prog_p->p_offset); + return retval; + } + /* + * Calculate the addresses of data and code segments. + */ + if (!(prog_p->p_flags & PF_X)) { + /* + * Handle the data segment + */ + start = ud_start; + end = ud_end; + allocend = ud_allocend; + mapend = ud_mapend; + } else { + /* + * Handle the code segment + */ + start = uc_start; + end = uc_end; + allocend = uc_allocend; + mapend = uc_mapend; + } + + if (!packed) { + if (start > (prog_p->p_vaddr & + ~(prog_p->p_align - 1))) { + /* Calculate the start address of the segment + * in memory + */ + start = prog_p->p_vaddr & + ~(prog_p->p_align - 1); + + /* Save the difference between `p_vaddr' and + * `p_offset' of the first data Program Header + * containing the Program Headers in PM ELF. + * In `create_elf_tables ()' it will be used to + * calculate the Program Headers' runtime + * address. + */ + if (!(prog_p->p_flags & PF_X) && load_offset) + *load_offset = (prog_p->p_vaddr + - prog_p->p_offset); + if (!(prog_p->p_flags & PF_X) + && interp_elfhdr_offset) + *interp_elfhdr_offset = + (prog_p->p_vaddr + - prog_p->p_offset); + } + if (end < prog_p->p_vaddr + prog_p->p_filesz) { + /* Calculate the end address of data/code + * in file. + */ + end = prog_p->p_vaddr + prog_p->p_filesz; + } + if (allocend < prog_p->p_vaddr + prog_p->p_memsz) { + /* Calculate the end address of data/code + * in memory + */ + allocend = prog_p->p_vaddr + prog_p->p_memsz; + } + if (mapend < PAGE_ALIGN(prog_p->p_vaddr + + prog_p->p_filesz)) { + /* Calculate the the end address + * of mmaped memory + */ + mapend = PAGE_ALIGN(prog_p->p_vaddr + + prog_p->p_filesz); + } + } else /* packed */ { + elf_addr_t b, r; + + if (start == ~0UL) { + /* The first "sorted" segment is mapped + * to 0 offset in {CU,G}D. + */ + start = 0L; + + /* p_offset (matches p_vaddr) of the first data + * segment is mapped to offset (see the + * computations below) (p_vaddr & (p_align - 1)) + * in GD. TODO: to be more strict do NOT rely + * on the fact that it's the "first" Program + * Header mapped into GD that contains "Program + * Headers", but locate such a Program Header + * via an explicit + * (e_phoff >= p_offset + * && ((e_phoff + e_phnum * e_phentsize) + * <= p_offset + p_filesz)) test instead. + * The same can probably be done in legacy case + * too. + */ + if (!(prog_p->p_flags & PF_X)) { + unsigned long delta; + + delta = ((prog_p->p_vaddr + & (prog_p->p_align - 1)) + - prog_p->p_offset); + + if (load_offset) + *load_offset = delta; + + if (interp_elfhdr_offset) + *interp_elfhdr_offset = delta; + } + } + + allocend = PAGE_ALIGN(allocend); + r = prog_p->p_vaddr & (prog_p->p_align - 1); + b = ((allocend + prog_p->p_align - (r + 1)) + / prog_p->p_align); + /* Let it hold the offset in CUD/GD corresponding to + * p_vaddr until it actually becomes "allocend" a few + * lines below. + */ + allocend = b * prog_p->p_align + r; + + end = allocend + prog_p->p_filesz; + mapend = PAGE_ALIGN(end); + allocend = allocend + prog_p->p_memsz; + } + + if (!(prog_p->p_flags & PF_X)) { + /* + * Handle the data segment + */ + ud_start = start; + ud_end = end; + ud_allocend = allocend; + ud_mapend = mapend; + } else { + /* + * Handle the code segment + */ + uc_start = start; + uc_end = end; + uc_allocend = allocend; + uc_mapend = mapend; + } + } + + /* + * Check if all lenghts and memory offsets no longer than 2**32 + */ + + if (check_len(PAGE_ALIGN(uc_mapend) + PAGE_ALIGN(uc_allocend) - + PAGE_ALIGN(uc_end))) { + DBPL("code size too big\n"); + return retval; + } + + if (check_len(PAGE_ALIGN(ud_mapend) + PAGE_ALIGN(ud_allocend) - + PAGE_ALIGN(ud_end))) { + DBPL("data size too big\n"); + return retval; + } + + /* + * Load the module into memory. + */ + if (uc_allocend) { + start_code_addr + = do_mmap_elf(NULL, 0, PAGE_ALIGN(uc_allocend), + PROT_NONE, MAP_PRIVATE | MAP_FIRST32, 0); + if (BAD_ADDR(start_code_addr)) { + retval = (int) (long) start_code_addr; + return retval; + } + } + + if (ud_allocend) { + start_data_addr + = do_mmap_elf(NULL, 0, PAGE_ALIGN(ud_allocend), + PROT_NONE, MAP_PRIVATE | MAP_FIRST32, 0); + if (BAD_ADDR(start_data_addr)) + return (int) (long) start_data_addr; + } + + if (packed) { + /* In the packed case these will be reevaluated when + * progressively obtaining the "offset" range in CUD/GD + * each segment should be mapped to. + */ + uc_allocend = 0L; + ud_allocend = 0L; + } + + /* + * Now search in dynamic section typecount, got offset, got length + */ + if (dyn) { + retval = get_dynamic_data(loadf, dyn, &got_off, + &got_sz, &init_got_entry, + &init_entry, &fini_entry); + if (retval) + return retval; + if (check_len(got_sz)) + return -ENOEXEC; + } + + for ( prog_p = elf_phdr, i = 0; i < elf->e_phnum; i++, prog_p++) { + unsigned mapflag = MAP_PRIVATE | MAP_FIRST32 | MAP_FIXED; + unsigned long start_aligned, start, end, map_addr; + unsigned long offset; + + if (prog_p->p_type != PT_LOAD) { + continue; + } + + prot = 0; + + if (prog_p->p_flags & PF_R) + prot |= PROT_READ; + + if (prog_p->p_flags & PF_W) + prot |= PROT_WRITE; + + + if (prog_p->p_flags & PF_X) + { + prot |= PROT_EXEC; + mapflag |= MAP_EXECUTABLE; + } + + + if (!packed) { + /* It's doubtful if the memory in the range [p_vaddr + * & ~(p->align - 1), p_vaddr & (PAGE_SIZE - 1)) should + * be actually mapped from the file. In other words, + * should the segment pages preceding the one + * containing p_vaddr be mapped from the file? + * Moreover, it's not quite clear if they should be + * accessible at all. + */ + if (!(prog_p->p_flags & PF_X)) { + start = start_data_addr + prog_p->p_vaddr; + start_aligned = (start_data_addr + + (prog_p->p_vaddr + & ~(prog_p->p_align - 1))); + } else { + start = start_code_addr + prog_p->p_vaddr; + start_aligned = (start_code_addr + + (prog_p->p_vaddr + & ~(prog_p->p_align - 1))); + } + + offset = prog_p->p_offset & ~(prog_p->p_align - 1); + } else /* packed */ { + elf_addr_t allocend; + elf_addr_t b, r; + + /* The packed implementation believes that the answer + * to the above questions is "no" and the only purpose + * of p_align is to ensure that the objects in the + * containing segment eventually (i.e. at runtime) get + * alignments assigned to them during compilation. It + * would be funny if my understanding of the role of + * p_align turned out to be wrong in the end ... + */ + + if (!(prog_p->p_flags & PF_X)) + allocend = ud_allocend; + else + allocend = uc_allocend; + + allocend = PAGE_ALIGN(allocend); + r = prog_p->p_vaddr & (prog_p->p_align - 1); + b = ((allocend + prog_p->p_align - (r + 1)) + / prog_p->p_align); + start = b * prog_p->p_align + r; + allocend = start + prog_p->p_memsz; + + if (!(prog_p->p_flags & PF_X)) { + start += start_data_addr; + ud_allocend = allocend; + } else { + /* In packed case it's handy to evaluate offsets + * in CUD used to set start_point and 3 user + * exported PLs in MDD right on seeing the + * containing Program Header. FIXME: only + * start_point (required to pass execution to + * userspace) and init_got_point (exported to + * the user) should make sense in packed case + * as it implies the use of new (as opposed to + * ancient) implementation of uselib. + */ + if (elf->e_entry >= prog_p->p_vaddr + && elf->e_entry < (prog_p->p_vaddr + + prog_p->p_filesz)) + start_point = (start_code_addr + + start + + elf->e_entry + - prog_p->p_vaddr); + + if (init_entry >= prog_p->p_vaddr + && init_entry < (prog_p->p_vaddr + + prog_p->p_filesz)) { + init_point = (start_code_addr + + start + + init_entry + - prog_p->p_vaddr); + } + + if (fini_entry >= prog_p->p_vaddr + && fini_entry < (prog_p->p_vaddr + + prog_p->p_filesz)) { + fini_point = (start_code_addr + + start + + fini_entry + - prog_p->p_vaddr); + } + + if (init_got_entry >= prog_p->p_vaddr + && init_got_entry < (prog_p->p_vaddr + + prog_p->p_filesz)) { + init_got_point = (start_code_addr + + start + + init_got_entry + - prog_p->p_vaddr); + } + + start += start_code_addr; + uc_allocend = allocend; + } + + /* According to my believes pages preceding the one + * matching p_vaddr should not be accessible. + */ + start_aligned = start & ~(PAGE_SIZE - 1); + offset = prog_p->p_offset & ~(PAGE_SIZE - 1); + + } + end = PAGE_ALIGN(start + prog_p->p_filesz); + + map_addr = do_mmap_elf(loadf, start_aligned, + end - start_aligned, prot, + mapflag, offset); + if (map_addr != start_aligned) + return (int) (long) map_addr; + + if (PAGE_ALIGN(start + prog_p->p_memsz) + > start + prog_p->p_filesz ) { + unsigned long start_zero = start + prog_p->p_filesz; + unsigned long start_zeropage = PAGE_ALIGN(start_zero ); + unsigned long end_zero = PAGE_ALIGN(start + prog_p->p_memsz); + + DBPL( "zero start = %#lx, zero end = %#lx, zeropage = %#lx\n", + start_zero, end_zero, start_zeropage ); + + /* Fill by zeroes the end of the last page with + * data from file + */ + if (start_zeropage > start_zero) { + struct vm_area_struct *vma, *prev; + unsigned long oldflags, newflags; + struct mm_struct *mm = current->mm; + + /* + * The trailing page mapped onto the file may + * quite legitimately belong to a readonly + * section like `.text' or `.rodata'. + * Temporarely make the page writable so as just + * to clean up its part mapped beyond `p_offset + * + p_filesz', otherwise it will be filled in + * with junk data probably from another + * section. + */ + down_write(&mm->mmap_sem); + vma = find_vma(mm, start_zeropage - PAGE_SIZE); + if (!vma) { + up_write(&mm->mmap_sem); + return -EFAULT; + } + oldflags = vma->vm_flags; + newflags = oldflags | VM_WRITE; + prev = NULL; + if (newflags != oldflags && + mprotect_fixup(vma, &prev, + start_zeropage - PAGE_SIZE, + start_zeropage, + newflags) != 0) { + up_write(&mm->mmap_sem); + return -EFAULT; + } + up_write(&mm->mmap_sem); + + if (clear_user((void *) start_zero, + start_zeropage - start_zero)) + return -EFAULT; + + if (newflags != oldflags) { + down_write(&mm->mmap_sem); + vma = find_vma(mm, + (start_zeropage + - PAGE_SIZE)); + prev = NULL; + if (!vma || + mprotect_fixup(vma, &prev, + (start_zeropage + - PAGE_SIZE), + start_zeropage, + oldflags) != 0) { + up_write(&mm->mmap_sem); + return -EFAULT; + } + up_write(&mm->mmap_sem); + } + } + + /* + * Fill by zeroes all the rest pages. + */ + if (end_zero > start_zeropage) { + ulretval = do_mmap_elf(NULL, start_zeropage, + end_zero - start_zeropage, + prot, + MAP_PRIVATE | MAP_FIRST32 | MAP_FIXED, + 0); + if (ulretval != start_zeropage) { + DBPL("could not map space for zero pages, " + "errno #%d.\n", + (int)(-(long)ulretval)); + return (int) (long) ulretval; + } + + /* + * Don't care about defective ELFs with read- + * only bss-like sections: let their load fail. + */ + if (clear_user((void *) start_zeropage, + end_zero - start_zeropage)) + return -EFAULT; + } + } + } + retval = 0; + + /* + * everything is mapped. Do some actions to complete the function. + */ + if (!packed) { + if (elf->e_entry) { + start_point = start_code_addr + elf->e_entry; + if (start_point >= start_code_addr + uc_end) + return -ENOEXEC; + } + } else { + /* Start point must have already been "packed" along with the + * containing segment. Fail if not. + */ + if (start_point == 0) + return -ENOEXEC; + } + + if (mdd) { + int cui; + /* The "packed" values of `{init,{,_got},fini}_point's have + * already been evaluated above. + */ + if (init_entry && !packed) + init_point = init_entry + start_code_addr; + if (fini_entry && !packed) + fini_point = fini_entry + start_code_addr; + if (init_got_entry && !packed) + init_got_point = init_got_entry + start_code_addr; + DBPL("DBPL : populate mdd (0x%llx, 0x%llx, " + "0x%lx, 0x%lx, 0x%lx, 0x%lx\n", + start_data_addr + (u64)got_off, (u64)got_sz, + init_got_point, init_point, fini_point, start_point); + + mdd->got_addr = start_data_addr + got_off; + mdd->got_len = got_sz; + mdd->init_got_point = init_got_point; + mdd->init_point = init_point; + mdd->fini_point = fini_point; + mdd->entry_point = start_point; + + cui = create_cut_entry(tcount, start_code_addr, + PAGE_ALIGN(uc_allocend), + start_data_addr, + PAGE_ALIGN(ud_allocend)); + if (cui < 0) + return cui; + mdd->cui = cui; + } else { + DBPL("DBPL : populate current mm\n"); + current->mm->start_code = start_code_addr; + current->mm->end_code = PAGE_ALIGN(uc_allocend); + current->mm->start_data = start_data_addr; + current->mm->end_data = PAGE_ALIGN(ud_allocend); + current->mm->context.tcount = tcount; + } + + if (entryp) { + DBPL("DBPL : start_point = 0x%lx\n", start_point); + *entryp = start_point; + } + + /* Ensure that `*load_offset' contains the difference between run-time + * address of the first data Program Header and its `p_offset', which + * is required to evaluate `AT_PHDR' in `create_elf_tables ()'. + */ + if (load_offset) + *load_offset += start_data_addr; + + return 0; +} + +static int +e2p_load_cu_file(struct file *loadf, + struct elfhdr *ret_ehdr, + unsigned long *entryp, + kmdd_t *mdd, + unsigned long *load_offset, + unsigned long *interp_elfhdr_offset) +{ + struct elfhdr ehdr; + struct elf_phdr *elf_phdr = NULL; + unsigned int size; + loff_t pos = 0; + long retval = -ENOEXEC; + + + retval = kernel_read(loadf, &ehdr, sizeof(ehdr), &pos); + if (retval != sizeof(ehdr)) { + if (retval >= 0) + retval = -EIO; + goto out; + } + + + retval = e2p_consistensy_check(loadf, &ehdr); + if (retval) { + goto out; + } + + size = ehdr.e_phnum * sizeof(struct elf_phdr); + retval = -ENOMEM; + elf_phdr = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); + if (!elf_phdr) { + goto out; + } + pos = ehdr.e_phoff; + retval = kernel_read(loadf, elf_phdr, size, &pos); + if (retval != size) { + if (retval >= 0) { + retval = -EIO; + } + goto out; + } + + retval = e2p_load_cu_file_by_headers(loadf, &ehdr, + elf_phdr, entryp, mdd, + load_offset, + interp_elfhdr_offset); + + /* If ret_ehdr is non-NULL, this means that the caller wants to replace + * the ELF Header of the main executable with that of ld.so + */ + if (retval == 0 && ret_ehdr != NULL) { + *ret_ehdr = ehdr; + } +out : + if (elf_phdr) { + kfree(elf_phdr); + } + return retval; +} + +static int load_e2p_load_binary(struct linux_binprm * bprm) +{ + struct pt_regs *regs = current_pt_regs(); + struct elfhdr elf_ex; + struct file *interpf = NULL; /* to shut gcc up */ + char *interp_name = NULL; + struct elf_phdr *elf_ppnt; + struct elf_phdr *elf_phdata; + loff_t pos; + int retval; + int i; + unsigned int size; + unsigned long start_point; + unsigned long load_offset = 0; + long task_flags = 0; + unsigned long interp_elfhdr_offset; + + DBPL("Protected loader elf%d started : %s\n", + ELF_CL_SZ, bprm->filename); + /* Get the exec-header */ + elf_ex = *((struct elfhdr *) bprm->buf); + retval = e2p_consistensy_check(bprm->file, &elf_ex); + if (retval != 0) { + DBPL(" PL-elf%d : file %s rejected\n", + ELF_CL_SZ, bprm->filename); + goto out; + } + + size = elf_ex.e_phnum * sizeof(struct elf_phdr); + retval = -ENOMEM; + elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); + if (!elf_phdata) { + goto out; + } + pos = elf_ex.e_phoff; + retval = kernel_read(bprm->file, elf_phdata, size, &pos); + if (retval != size) { + if (retval >= 0) { + retval = -EIO; + } + goto out_free_ph; + } + /* must be here */ + task_flags = current->thread.flags; + SET_PERSONALITY(elf_ex); + + + elf_ppnt = elf_phdata; + for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++) { + if (elf_ppnt->p_type != PT_INTERP) { + continue; + } + retval = -ENOEXEC; + if (elf_ppnt->p_filesz > PATH_MAX || + elf_ppnt->p_filesz < 2) { + goto out_free_file; + } + retval = -ENOMEM; + interp_name = (char *) kmalloc( + elf_ppnt->p_filesz, GFP_KERNEL); + if (interp_name == NULL) { + goto out_free_file; + } + pos = elf_ppnt->p_offset; + retval = kernel_read(bprm->file, interp_name, + elf_ppnt->p_filesz, &pos); + if (retval != elf_ppnt->p_filesz) { + if (retval >= 0) { + retval = -EIO; + } + goto out_free_interp; + } + /* make sure path is NULL terminated */ + retval = -ENOEXEC; + if (interp_name[elf_ppnt->p_filesz - 1] != '\0') { + goto out_free_interp; + } + interpf = open_exec(interp_name); + retval = PTR_ERR(interpf); + if (IS_ERR(interpf)) { + goto out_free_interp; + } + DBPL("PL : use interpreter %s\n", interp_name); + break; + } + + /* Flush all traces of the currently running executable */ + retval = flush_old_exec(bprm); + if (retval) { + goto out_free_interp; + } + + /* OK, This is the point of no return */ + current->mm->start_data = 0; + current->mm->end_data = 0; + current->mm->end_code = 0; + current->flags &= ~PF_FORKNOEXEC; + current->mm->def_flags = 0; + + if (elf_read_implies_exec(loc->elf_ex, EXSTACK_DISABLE_X)) { + current->personality |= READ_IMPLIES_EXEC; + } + if ( !(current->personality & ADDR_NO_RANDOMIZE) && + randomize_va_space) { + current->flags |= PF_RANDOMIZE; + } + setup_new_exec(bprm); + + retval = setup_arg_pages(bprm, protected_randomize_stack_top(STACK_TOP), + EXSTACK_DISABLE_X); + if (retval < 0) + goto out_free_interp_file; + + /* load binary or interpreter */ + if (interpf) { + /* The ELF Header of the main executable is replaced with the + * one of the interpreter on return from this function. Note + * that it is the latter which should be passed to `create_elf_ + * tables ()' below. + */ + retval = e2p_load_cu_file(interpf, &elf_ex, &start_point, NULL, + &load_offset, &interp_elfhdr_offset); + } else { + retval = e2p_load_cu_file_by_headers + (bprm->file, + &elf_ex, elf_phdata, &start_point, NULL, + &load_offset, &interp_elfhdr_offset); + } + if (retval != 0) { + goto out_free_interp_file; + } + + set_binfmt(&elf_format); + + /* load data for user */ + create_elf_tables(bprm, &elf_ex, load_offset, + /* Entry point should be believed to be unknown if + * ld.so is started as an interpreter (or implicitly + * in other words). + */ + interpf ? 0 : start_point, + interp_elfhdr_offset); + current->mm->start_stack = bprm->p; + + // XXX set stack protection if current->ptrace & PT_PTRACED + + start_thread(regs, start_point, bprm->p); + + if (unlikely(current->ptrace & PT_PTRACED)) { + if (current->ptrace & PT_TRACE_EXEC) { + ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); + } else { + send_sig(SIGTRAP, current, 0); + } + } + retval = 0; + /* Resetting debug mode: */ + current->mm->context.pm_sc_debug_mode = PM_SC_DBG_MODE_DEFAULT; + + /* error cleanup */ +out_free_interp_file: + if (interpf) { + allow_write_access(interpf); + fput(interpf); + } +out_free_interp: + if (interp_name) { + kfree(interp_name); + } +out_free_file: + if (retval) { + current->thread.flags = task_flags; + } +out_free_ph: + if (elf_phdata) { + kfree(elf_phdata); + } +out: + return retval; +} + + +long sys_load_cu(char *name, kmdd_t *mdd) +{ + struct file * file; + struct path path; + int error; + + error = user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, &path); + if (error) { + goto out; + } + error = -EINVAL; + if (!S_ISREG(path.dentry->d_inode->i_mode)) { + goto exit; + } + error = inode_permission(path.dentry->d_inode, MAY_READ); + if (error) { + goto exit; + } + file = dentry_open(&path, O_RDONLY, current_cred()); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto exit; + } + error = -ENOEXEC; + if (file->f_op == NULL) { + fput(file); + goto exit; + } + + error = e2p_load_cu_file(file, NULL, NULL, mdd, NULL, NULL); + + fput(file); + +out: + return error; +exit: + path_put(&path); + goto out; +} + +long sys_unload_cu(unsigned long glob_base, size_t glob_size) +{ + int error = 0; + /* + * The information about code segment + * of module, which should be unloaded + */ + unsigned long code_base; + size_t code_size; + + if (!glob_base || !glob_size) + return -EINVAL; + + /* Free cut entry for the module, which should be unloaded */ + error = free_cut_entry(glob_base, PAGE_ALIGN(glob_size), + &code_base, &code_size); + if (error) + return error; + + /* Unmap code and data areas of module */ + error = do_munmap_elf(glob_base, PAGE_ALIGN(glob_size)); + error = do_munmap_elf(code_base, code_size); + + if (error) + return error; + + return 0; +} + +/* Module load stuff */ +static int __init init_elf_binfmt(void) +{ + register_binfmt(&elf_format); + + return 0; +} + +static void __exit exit_elf_binfmt(void) +{ + /* Remove the COFF and ELF loaders. */ + unregister_binfmt(&elf_format); +} + +core_initcall(init_elf_binfmt); +module_exit(exit_elf_binfmt); +MODULE_LICENSE("GPL"); + + + + + + + diff --git a/arch/e2k/3p/global_sp.c b/arch/e2k/3p/global_sp.c new file mode 100644 index 000000000000..2b6695b5f869 --- /dev/null +++ b/arch/e2k/3p/global_sp.c @@ -0,0 +1,1593 @@ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#undef DEBUG_TRAP_CELLAR +#undef DbgTC +#define DEBUG_TRAP_CELLAR 0 /* DEBUG_TRAP_CELLAR */ +#define DbgTC(...) DebugPrint(DEBUG_TRAP_CELLAR ,##__VA_ARGS__) + + +/* + * The problems of multithreading + * + * I. PROBLEM + * If a stack pointer is written to a global variable seen by other threads + * there is a problemm accessing it from those threads. There are two + * possibilities: + * 1) 'psl' of the reading thread is greater or equal to 'psl' stored in + * the pointer. In this case the thread will access not the stack from + * which this pointer originated but rather from its own stack (as a + * consequence of having the highest part of stack address stored in the + * 'SBR' per-thread register). + * There is also a possibility that although psl is valid (from the point + * of view of that thread) the actual address is not - then reading will + * generate exc_page_miss or something like that and the process will be + * killed with SIGSEGV. + * 2) 'psl' of the reading thread is less than 'psl' stored in the pointer. + * The thread receives exc_illegal_operand and the process is terminated + * with SIGILL. + * + * II. SOLUTION + * So the solution is to replace SAP's written to global variables with some + * invalid value and let the interrupt handler deal with everything. But + * the written SAP might be pointing to another SAP which points to array + * of SAP's etc - thus it's impossible to follow all stack pointers in all + * threads (since that chain of pointers may change at *any* time). + * + * There are some subtleties to consider: + * + * 1. Since stack addresses are reused, two identical pointers to a stack + * can point to different values (and even to variables with different + * types). So to catch such accesses (which as we assume are invalid) + * the 'age' of a pointer must be keeped track of. + * + * 2. Lifetime of any given stack pointer written to global variable + * is unknown - any thread can read it to a local register which can live + * forever. So the 'age' of pointers grows indefinitly. + * + * 3. It is enough to increase the 'age' on every function return - + * since this is the only case of a pointer invalidation which is not + * handled by hardware. + * This can be optimized further - it is enough to increase the 'age' + * only on the returns for which there is a global pointer pointing to + * the released stack frame. + * + * 4. So every access to another thread's stack must generate an interrupt. + * But half-speculative loads can only generate exc_page_miss, so for example + * just clearing read/write bits in AP is not an option - half-speculative + * loads from such AP's will only write diagnostic (DP). + * + * In current implementation there is an area starting at START_VAL + * (START_VAL == thread_info->multithread_address) which all created AP's + * are pointing to. Every new AP created from SAP (when that SAP is written + * to a global variable or to another thread's stack) is calculated like this: + * AP.size = SAP.size + * AP.index = SAP.index + * AP.base = START_VAL + index; + * index += AP.size + * So the address in AP is acting as 'age'. + * + * 5. Since the address in AP is acting as 'age', when we run out of space + * in that area we cannot do anythyng anymore. The size of the area is defined + * at compile time in MAX_MULTITHREAD_SIZE. + * + * 6. Changing SAP's to AP's is required only when multithreading. So until + * there is a second thread we only keep track of globals holding pointers to + * stack but do not substitute SAP to AP. + * + * 7. When SAP is changed to AP we mark ALL frames in current stack to + * generate exc_last_wish on return. This because informations and pointers + * in our stack can change at any time, and the other threads accessing + * our stack must be informed about it. + * + * Actually this is an overkill - it is enough to mark only those frames + * to which that SAP *might point in a future*. So if the SAP is pointing to + * int (4 bytes) then we know that there will not be any lists in the future + * pointing to other frames and it is enough to mark with 'last wish' only + * the frame to which that SAP is pointing. + * + * 8. A couple of words about 'age'. + * In current implementation when last wish interrupt is generated, we add + * to the globals list a new element with type "TYPE_BOUND" which stores + * thread ID, psl and time. + * thread ID == current->pid + * psl == PUSD.psl + * time == index + * where 'index' is the same as in 4). + * + * This information is enough to check whether access by some SAP converted + * to AP is valid. + * + * + * And it would of great help to have hardware support for storing thread + * number in SAP (even better - store both thread number AND number of + * last_wish interrupts in SAP). Then this would be SO MUCH faster and + * simpler... + */ + + +/* + * To check validation of stack pointers which was written in global + * for multi_threading + */ +#define IS_THIS_THREAD(x) (x->type == TYPE_GLOBAL && current->pid == x->pid) +#define IS_MULTITHREADING (WAS_MULTITHREADING) + +typedef u32 Syllabe_t; + +typedef struct { + unsigned mdl : 4; + unsigned lng : 3; + unsigned nop : 3; + unsigned lm : 1; + unsigned x : 1; + unsigned s : 1; + unsigned sh : 1; + unsigned c : 2; // it is mask now + unsigned cd : 2; + unsigned pl : 2; + unsigned ale : 6; + unsigned al : 6; +} HS_syllable_fields_t; + +#undef DEBUG_SP +//#define DEBUG_SP +#ifdef DEBUG_SP + #define CHECK_SIZE(x, zz) \ + if ((p_psl - l_psl)* sizeof(e2k_mem_crs_t) \ + > AS_STRUCT(regs->stacks.pcsp_hi).ind) { \ + printk("CHECK_SIZE %s p_psl=%ld l_psl=%ld ind = %ld \n", \ + x, p_psl, l_psl, AS_STRUCT(regs->stacks.pcsp_hi).ind); \ + return zz; \ + } +#else /* !DEBUG_SP */ + #define CHECK_SIZE(x, zz) +#endif /* DEBUG_SP */ + +typedef union HS_syllable_union { + HS_syllable_fields_t fields; + Syllabe_t word; +} HS_syllable_struct_t; + +#define AL(w) (((HS_syllable_fields_t*)&w)->al) +#define ALE(w) (((HS_syllable_fields_t*)&w)->ale) +#define PL(w) (((HS_syllable_fields_t*)&w)->pl) +#define Cd(w) (((HS_syllable_fields_t*)&w)->cd) +#define C(w) (((HS_syllable_fields_t*)&w)->c) +#define SH(w) (((HS_syllable_fields_t*)&w)->sh) +#define S(w) (((HS_syllable_fields_t*)&w)->s) +#define lm(w) (((HS_syllable_fields_t*)&w)->lm) +#define NOP_CNT(w) (((HS_syllable_fields_t*)&w)->nop) +#define LNG(w) ((((HS_syllable_fields_t*)&w)->lng)+1) +#define HS_LNG(w) (((HS_syllable_fields_t*)&w)->lng) +#define MDL(w) (((HS_syllable_fields_t*)&w)->mdl) + +typedef struct +{ + + unsigned ctcond : 9; + unsigned xxx : 1; + unsigned ctop : 2; + unsigned aa : 4; + unsigned alc : 2; + unsigned abp : 2; + unsigned xx : 1; + unsigned abn : 2; + unsigned abg : 2; + unsigned x : 1; + unsigned vfdi : 1; + unsigned srp : 1; + unsigned bap : 1; + unsigned eap : 1; + unsigned ipd : 2; + +} SS_syllable_fields_t; + + +typedef union SS_syllable_union { + SS_syllable_fields_t fields; + Syllabe_t word; +} SS_syllable_struct_t; + +#define SSIPD(w) (((SS_syllable_fields_t*)&w)->ipd) +#define SSEAP(w) (((SS_syllable_fields_t*)&w)->eap) +#define SSBAP(w) (((SS_syllable_fields_t*)&w)->bap) +#define SSSRP(w) (((SS_syllable_fields_t*)&w)->srp) +#define SSVFDI(w) (((SS_syllable_fields_t*)&w)->vfdi) +#define SSABG(w) (((SS_syllable_fields_t*)&w)->abg) +#define SSABN(w) (((SS_syllable_fields_t*)&w)->abn) +#define SSAA(w) (((SS_syllable_fields_t*)&w)->aa) +#define SSCTOP(w) (((SS_syllable_fields_t*)&w)->ctop) +#define SSCTCOND(w) (((SS_syllable_fields_t*)&w)->ctcond) + +typedef struct { + unsigned param : 28; + unsigned opc : 4; +} CS1_syllable_fields_t; + +typedef union CS1_syllable_union { + CS1_syllable_fields_t fields; + Syllabe_t word; +} CS1_syllable_struct_t; + + +int gsp_is_return(struct pt_regs *regs) +{ + + e2k_rwp_struct_t tir_lo; + u64 ip; + HS_syllable_struct_t hs; + SS_syllable_struct_t ss; + int ctop; + + /* Pointer on the instruction that caused the exception + * is located in corresponding TIR register. + */ + tir_lo.E2K_RWP_reg = regs->trap->TIR_lo; + ip = tir_lo.E2K_RWP_base; + + DbgTC("IP = %llx\n", ip); + + /* We need to read Header Syllabe of instruction interrupted + * to determine general instruction structure + */ + + if (get_user(AS_WORD(hs), (Syllabe_t *) ip) == -EFAULT) { + return 0; + } + + DbgTC("HS = %x\n", AS_WORD(hs)); + + /* Check presence of Stub Syllabe */ + if (S(hs)) { + DbgTC("SS does exist\n"); + } else { + DbgTC("SS doesn't exist\n"); + return 0; + } + + /* Stub Syllabe encodes different short fragment of command */ + if (get_user(AS_WORD(ss), + (Syllabe_t *) (ip + sizeof (HS_syllable_struct_t))) == -EFAULT) { + return 0; + } + + DbgTC("SS = %x\n", AS_WORD(ss)); + + /* CTOP field encodes CTPR register in use */ + ctop = SSCTOP(ss); + + /* RETURN always uses CTPR3 */ + if (ctop != 3) { + DbgTC("SS.CTOP !=3 !!! SS.CTOP = %d\n", ctop); + return 0; + } else { + DbgTC("SS.CTOP == 3\n"); + } + + /* CTPR3.opc field should match RETURN operation indicator */ + if (AS_STRUCT(regs->ctpr3).opc != RETURN_CT_OPC) { + DbgTC("ctpr3.opc != RETURN_CT_OPC\n"); + return 0; + } else { + DbgTC("ctpr3.opc == RETURN_CT_OPC\n"); + } + + return 1; +} + +static int __set_last_wish_all(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + int *skip = (int *) arg; + e2k_cr1_lo_t cr1_lo = frame->cr1_lo; + + if (*skip) { + --(*skip); + return 0; + } + + if (AS(cr1_lo).pm) + return 0; + + AS(cr1_lo).lw = 1; + + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + + DbgTC("crs 0x%lx\n", real_frame_addr); + return put_cr1_lo(cr1_lo, real_frame_addr, 0); +} + +/* + * set last_wish for all procedures which psl < l_psl + */ +static long set_last_wish_all(struct pt_regs *regs, int l_psl, int p_psl) +{ + int skip = p_psl - l_psl; + long ret; + + if (p_psl == l_psl) + AS(regs->crs.cr1_lo).lw = 1; + + ret = parse_chain_stack(PCS_USER, NULL, __set_last_wish_all, &skip); + + return (IS_ERR_VALUE(ret)) ? ret : 0; +} + +#undef GET_IP +#define GET_IP (AS(regs->crs.cr0_hi).ip << E2K_ALIGN_INS) + +/* change SAP to AP with "rw" == 0 and unique address */ + + /* for type TYPE_INIT */ +#define START_VAL current_thread_info()->multithread_address +#define GET_CURR_ADDRESS(x) (x->lcl_psl + START_VAL) +#define WAS_MODIFIED(x) (x->old_address) +#define INCR_CURR_ADDRESS(entry, x) (entry->lcl_psl = entry->lcl_psl + x + \ + (1 + ~(x & 0xf) & 0xf)*(!!(x & 0xf))) +#define IS_CHANGED_ADDRESS(entry, x) \ + (x >= START_VAL && x < GET_CURR_ADDRESS(entry)) +#define MAX_MULTITHREAD_SIZE (PAGE_SIZE * 100) +#define CAN_INCR_CURR_ADDRESS(entry, x) (entry->lcl_psl + x + (1 + ~(x & \ + 0xf) & 0xf)*(!!(x & 0xf)) < MAX_MULTITHREAD_SIZE) + /* for type TYPE_BOUND */ +#define GET_TIME(x) (x->global_p) + +e2k_addr_t get_valid_address(e2k_addr_t address, global_store_t **record); +void down_read_lock_multithread(void); +void up_read_lock_multithread(void); +static void print_all_records(void); + + +global_store_t* get_init_record(void) +{ + global_store_t *entry = current_thread_info()->g_list; + + while (entry != NULL) + { + if (entry->type == TYPE_INIT) { + return entry; + } + entry = entry->next; + } + return NULL; +} + +/* + * create new record and change SAP to AP + * addr - address in stack ( was readed SAP) + */ +static void create_new_record(pt_regs_t *regs,e2k_addr_t addr, + long multithread_addr) +{ + global_store_t *list = current_thread_info()->g_list; + global_store_t *new, *last; + global_store_t *init = get_init_record(); + register unsigned long tmp_lo, tmp_hi; + union {e2k_rwsap_lo_struct_t sap_lo; e2k_rwap_lo_struct_t ap_lo;} lo; + union {e2k_rwsap_hi_struct_t sap_hi; e2k_rwap_hi_struct_t ap_hi;} hi; + unsigned long address; + unsigned long base; + e2k_addr_t usbr; + global_store_t *pnt_record; + int tag_lo, tag_hi; + + DbgTC("addr=0x%lx multithread_addr=0x%lx\n", + addr, multithread_addr); + if (get_valid_address(multithread_addr, &pnt_record)==0) { + DbgTC("bad multithread_addr=%lx\n", + multithread_addr); + return; + } + + + new = (global_store_t *) kmalloc(sizeof(global_store_t), GFP_ATOMIC); + if (!new) { + DbgTC("no memory\n"); + return; + } + + new->lcl_psl = 0; + new->global_p = 0; + new->next = NULL; + /* for multithreading support */ + new->type = TYPE_GLOBAL; + new->pid = pnt_record->pid; + new->sbr = pnt_record->sbr; + new->word1 = 0; + new->word2 = 0; + new->old_address = 0; + + + usbr = regs->stacks.top; + NATIVE_LOAD_TAGGED_QWORD_AND_TAGS(addr, AS_WORD(lo.sap_lo), + AS_WORD(hi.sap_hi), tag_lo, tag_hi); + address = GET_CURR_ADDRESS(init); + if (!CAN_INCR_CURR_ADDRESS(init, AS_STRUCT(hi.sap_hi).size)) { + pr_info(" create_new_record very many SAP IND=0x%lx " + "MAX=0x%lx CURR_SIZE=0x%x\n", + GET_CURR_ADDRESS(init) - START_VAL, + MAX_MULTITHREAD_SIZE, AS_STRUCT(hi.sap_hi).size); + return; + } + INCR_CURR_ADDRESS(init, AS_STRUCT(hi.sap_hi).size); + base = AS_STRUCT(lo.sap_lo).base; + new->word2 = AS_WORD(hi.sap_hi); + + /* field base for SAP and AP is different */ + AS_STRUCT(lo.ap_lo).base = address; + AS_AP_STRUCT(lo.ap_lo).itag = E2K_AP_ITAG; + tmp_lo = AS_WORD(lo.ap_lo); + tmp_hi = AS_WORD(hi.ap_hi); + NATIVE_STORE_TAGGED_QWORD((e2k_addr_t)addr, tmp_lo, tmp_hi, + E2K_AP_LO_ETAG, E2K_AP_HI_ETAG); + + new->new_address = address; + new->old_address = base; + + if (list == NULL) { + new->prev = NULL; + current_thread_info()->g_list = list = new; + } else { + last = list; + /* semaphore is required */ + while (last->next != NULL) + last = last->next; + + new->prev = last; + last->next = new; + } +} + +/* + * If the result of loadQ (operand - marked AP) is SAP + * than for different thread must be changed to new marked AP + * (if address of SAP is valid or NULL othervise) + */ +void change_sap(int cnt, pt_regs_t *regs, e2k_addr_t addr, + long multithread_addr) +{ + struct trap_pt_regs *trap = regs->trap; + trap_cellar_t *tcellar = trap->tcellar; + e2k_addr_t usbr; + + usbr = regs->stacks.top; + + addr -= 8; /* pointed to second word */ + DbgTC("change_sap :cnt=%d addr=0x%lx usbr=0x%lx GET_IP=%llx tag(addr)=%x tag(addr+8)=%x pid=%d\n", + cnt, addr, usbr, GET_IP, + NATIVE_LOAD_TAGD(addr), + NATIVE_LOAD_TAGD(addr+8), + current->pid); + + if (!WAS_MULTITHREADING) { + return; + } + if (DEBUG_TRAP_CELLAR) { + print_all_TC(tcellar, trap->tc_count); + } + DbgTC("change_sap *addr=0x%lx *(addr + 8)=0x%lx tag(addr)=%x tag(addr+8)=%x\n", + *(long *)addr, *(long *)(addr + 8), + NATIVE_LOAD_TAGD(addr), NATIVE_LOAD_TAGD((addr+8))); + + down_read_lock_multithread(); + if (IS_SAP_LO(addr) && IS_SAP_HI(addr + 8)) { + /* change SAP => AP */ + create_new_record(regs, addr, multithread_addr); + } + up_read_lock_multithread(); +} + +static int change_sap_to_ap(trap_cellar_t *tcellar, struct pt_regs *regs, + global_store_t* record, int no_check) +{ + e2k_rwsap_lo_struct_t sap_lo; + e2k_rwsap_hi_struct_t sap_hi; + e2k_rwap_lo_struct_t ap_lo; + unsigned int l_psl; + unsigned long address; + unsigned long base; + unsigned long *pnt; + global_store_t *init = get_init_record(); + + DbgTC("change_sap_to_ap():no_check=%d record=%px CURR_ADDRESS=0x%lx" + " WAS_MODIFIED(record)=%lx sbr=%lx\n", + no_check, record, GET_CURR_ADDRESS(init), + WAS_MODIFIED(record), (regs == NULL) ? 0 : regs->stacks.top); + /* Verify if the data is SAP */ + if (record->global_p && !WAS_MODIFIED(record) && + (no_check || (IS_SAP_LO(&(tcellar[0].data)) && + IS_SAP_HI(&(tcellar[1].data)))) ) { + register unsigned long tmp_hi, tmp_lo; + + AS_WORD(sap_lo) = tcellar[0].data; + AS_WORD(sap_hi) = tcellar[1].data; + record->word1 = AS_WORD(sap_lo); + record->word2 = AS_WORD(sap_hi); + l_psl = AS_STRUCT(sap_lo).psl; + AS_STRUCT(sap_lo).psl = 0; + sap_lo.E2K_RWSAP_lo_itag = AP_ITAG; + address = GET_CURR_ADDRESS(init); + if (!CAN_INCR_CURR_ADDRESS(init, AS_STRUCT(sap_hi).size)) { + pr_info(" change_sap_to_ap very many SAP IND=0x%lx " + "MAX=0x%lx CURR_SIZE=0x%x\n", + GET_CURR_ADDRESS(init) - START_VAL, + MAX_MULTITHREAD_SIZE, AS_STRUCT(sap_hi).size); + return 0; + } + INCR_CURR_ADDRESS(init, AS_STRUCT(sap_hi).size); + base = AS_STRUCT(sap_lo).base; + /* field base for SAP and AP is different */ + AS_WORD(ap_lo) = AS_WORD(sap_lo); + AS_STRUCT(ap_lo).base = address; + record->new_address = address; + /* in tcellar base already increased by regs->stacks.top */ + record->old_address = base; + AS_AP_STRUCT(ap_lo).itag = E2K_AP_ITAG; + tmp_lo = AS_WORD(ap_lo); + tmp_hi = AS_WORD(sap_hi); + if (!no_check) { + pnt = &tcellar[0].data; + NATIVE_STORE_TAGGED_WORD((e2k_addr_t)pnt, tmp_lo, + E2K_AP_LO_ETAG, + TAGGED_MEM_STORE_REC_OPC, 2); + pnt = &tcellar[1].data; + NATIVE_STORE_TAGGED_WORD((e2k_addr_t)pnt, tmp_hi, + E2K_AP_HI_ETAG, + TAGGED_MEM_STORE_REC_OPC, 5); + } else { + pnt = (unsigned long*)(record->global_p); + NATIVE_STORE_TAGGED_QWORD((e2k_addr_t)pnt, tmp_lo, + tmp_hi, E2K_AP_LO_ETAG, E2K_AP_HI_ETAG); + } + + if (!no_check && IS_AP_LO(&(tcellar[0].data)) && + IS_AP_HI(&(tcellar[1].data))) { + DbgTC("change_sap_to_ap(): now is AP\n"); + } + DbgTC("SAP.base=%lx AP.base=%lx psl=%d tcellar[0].data=%lx " + "tcellar[1].data=%lx CURR_SIZE=%x\n", + base, address, l_psl, + tcellar[0].data, tcellar[0 +1].data, + AS_STRUCT(sap_hi).size); + + return 1; + } + return 0; +} + +global_store_t *first_record(void) +{ + global_store_t *new; + + new = (global_store_t *) kmalloc(sizeof(global_store_t), GFP_ATOMIC); + if (!new) { + DbgTC("no memory\n"); + return NULL; + } + NUM_THREAD(new) = 0; /* (x->orig_psr_lw) */ + new->lcl_psl = 0; + new->global_p = 0; + new->next = NULL; + new->pid = current->pid; + new->old_address = 0; + new->prev = NULL; + new->type = TYPE_INIT; + current_thread_info()->g_list = new; + return new; + +} + +static void init_g_list(int from) +{ + global_store_t *entry = current_thread_info()->g_list; + + DbgTC("pid=%d, list %px START_VAL=%lx from=%d\n", + current->pid, entry, START_VAL, from); + + if (!START_VAL) { + START_VAL = (e2k_addr_t) vm_mmap_notkillable(NULL, 0L, + MAX_MULTITHREAD_SIZE, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, 0L); + } + if (entry == NULL) { + /* added new record to have common date for all threads */ + entry = first_record(); + } + if (entry != NULL) { + NUM_THREAD(entry) += from; + } + if (!current_thread_info()->lock) { + current_thread_info()->lock = (struct rw_semaphore *) + kmalloc(sizeof(struct rw_semaphore), GFP_ATOMIC); + init_rwsem(current_thread_info()->lock); + } + if (!current_thread_info()->lock) { + DbgTC("no memory\n"); + } +} + +/* + * search all globals pointed to SAP and change it to AP + * only for this thread + */ +void mark_all_global_sp(struct pt_regs * regs, pid_t pid) +{ + struct trap_pt_regs *trap = regs->trap; + global_store_t *entry = current_thread_info()->g_list; + trap_cellar_t tcellar[2]; + int curr_count; + + DbgTC("pid=%d, list %px START_VAL=%lx\n", + pid, entry, START_VAL); + init_g_list(1); + entry = current_thread_info()->g_list; + + if (trap) { + curr_count = trap->curr_cnt; + trap->curr_cnt = 0; + } + + while (entry != NULL) + { + if (entry->pid == pid && entry->type == TYPE_GLOBAL) { + tcellar[0].data = entry->word1; + tcellar[1].data = entry->word2; + change_sap_to_ap(tcellar, regs, entry, 1); + } + entry = entry->next; + } + + if (trap) + trap->curr_cnt = curr_count; + + DbgTC("exitting.\n"); +} + +static int is_valid_addr(global_store_t *entry, e2k_addr_t address, + e2k_addr_t* offset) +{ + e2k_rwsap_hi_struct_t sap_hi; + long size ; + + DbgTC("address=%lx entry->new_address=%lx entry=%px\n", + address, entry->new_address, entry); + AS_WORD(sap_hi) = entry->word2; + size = AS_STRUCT(sap_hi).size; + *offset = 0; + if (entry->new_address <= address && + entry->new_address + size > address) { + *offset = address - entry->new_address; + DbgTC("size=%lx\n", size); + return 1; + } + return 0; +} + +/* + * check that is address now in stack + */ + +e2k_addr_t is_correct_entry(global_store_t *record) +{ + global_store_t *entry = current_thread_info()->g_list; + + DbgTC("is_correct_entry new_address=%lx record->pid=%d " + " record->lcl_psl=%d\n", + record->new_address, record->pid, record->lcl_psl); + while (entry != NULL) + { + if (entry->type == TYPE_BOUND && entry->pid == record->pid && + record->lcl_psl == entry->lcl_psl) { + DbgTC("GET_TIME(entry)=0x%lx \n", + GET_TIME(entry)); + if (GET_TIME(entry) > record->new_address) { + return 0; + } else { + /* very old return */ + return 1; + } + } + entry = entry->next; + } + return 1; +} + +e2k_addr_t get_valid_address(e2k_addr_t address, global_store_t **record) +{ + global_store_t *entry = current_thread_info()->g_list; + e2k_addr_t offset; + DbgTC("address=%lx\n", address); + while (entry != NULL) + { + if (entry->type == TYPE_GLOBAL && + is_valid_addr(entry, address, &offset)) { + if (!is_correct_entry(entry)) { + return 0; + } + *record = entry; + DbgTC("address=%lx old_address=%lx" + " pid =%d \n", + address, entry->old_address + offset, entry->pid); + return entry->old_address + offset; + } + entry = entry->next; + } + return 0; +} + +static void set_new_bound(int psl, struct pt_regs *regs) +{ + global_store_t *entry = current_thread_info()->g_list; + global_store_t *new, *last, *list; + global_store_t *init = get_init_record(); + + if (!WAS_MULTITHREADING) { + return; + } + DbgTC("set_new_bound: psl=%d sbr=0x%lx GET_CURR_ADDRESS=%lx\n", + psl, regs->stacks.top, GET_CURR_ADDRESS(init)); + while (entry != NULL) { + if (entry->type == TYPE_BOUND && entry->pid == current->pid && + entry->lcl_psl == psl) { + /* update this record */ + GET_TIME(entry) = GET_CURR_ADDRESS(init); /* as time */ + entry->sbr = regs->stacks.top; + DbgTC("set_new_bound(): update entry=%px\n", entry); + return; + } + entry = entry->next; + } + + new = (global_store_t *) kmalloc(sizeof(global_store_t), GFP_ATOMIC); + if (!new) { + DbgTC("no memory\n"); + return; + } + + DbgTC("new entry=%px\n",new); + new->pid = current->pid; + new->lcl_psl = psl; + new->next = NULL; + new->type = TYPE_BOUND; + new->sbr = regs->stacks.top; + GET_TIME(new) = GET_CURR_ADDRESS(init); /* as time */ + INCR_CURR_ADDRESS(init, 16); + list = current_thread_info()->g_list; + if (list == NULL) { + new->prev = NULL; + current_thread_info()->g_list = list = new; + } else { + last = list; + while (last->next != NULL) + last = last->next; + + new->prev = last; + last->next = new; + } +} + +/* + * This code must done : + * check that address was changed (by change_sap_to_ap()) + * old address is valid + * access to old address can call page_fault + * change tcellar (old address => address) + * change vma && address + * result 0 - this address invalid + * 1 - changed address + * 2 - value of address may be changed + * 3 - unknown addr + */ +int interpreted_ap_code(struct pt_regs *regs, struct vm_area_struct **vma, + e2k_addr_t *address) +{ + struct trap_pt_regs *trap = regs->trap; + trap_cellar_t *tcellar = trap->tcellar; + int curr_count = trap->curr_cnt; + int tc_count = trap->tc_count /3; + unsigned long addr = *address; + e2k_addr_t old_address; + e2k_rwap_lo_struct_t ap_lo; + register unsigned long tmp; + struct vm_area_struct *new_vma; + tc_opcode_t opcode; + int i, count =-1; + global_store_t *init = get_init_record(); + global_store_t *pnt_record = NULL; + + DbgTC("interpreted_ap_op addr=%lx curr_count=%d tc_count=%d\n", + addr, curr_count, tc_count); + /* find our count */ + for(i = curr_count; i < tc_count; i++ ) { + AW(opcode) = AS(tcellar[i].condition).opcode; + if (tcellar[i].address == addr) { + count = i; + break; + } + } + if (count == -1) { + DbgTC("interpreted_ap_op not find count\n"); + return 3; + } + AW(opcode) = AS(tcellar[count].condition).opcode; + if (!IS_CHANGED_ADDRESS(init, addr)) { + DbgTC("interpreted_ap_op not changed address\n"); + return 3; + } + DbgTC("interpreted_ap_op count=%d\n", count); + if (DEBUG_TRAP_CELLAR) + print_all_TC(trap->tcellar, trap->tc_count); + + /* Verify if the data is AP */ + if (0 && AS(opcode).fmt == 5 && + (!IS_AP_LO(&(tcellar[count].data)) && + !IS_AP_HI(&(tcellar[count].data))) ) { + DbgTC("interpreted_ap_op not AP IS_AP_LO =%d IS_AP_HI=%d\n", + IS_AP_LO(&(tcellar[count].data)), + IS_AP_HI(&(tcellar[count].data))); + return 2; + + + } + old_address = get_valid_address(*address, &pnt_record); + DbgTC("interpreted_ap_op old_address=%lx pid=%d curr->pid=%d fmt=%d\n", + old_address, + (old_address)?pnt_record->pid:-1, + current->pid, AS(opcode).fmt); + if (old_address == 0) { + return 0; + } + tcellar[count].address = old_address; + + // print_user_address_ptes(current->mm, old_address); + DbgTC("interpreted_ap_op old_address=%lx *old_address=%lx\n", + old_address, *(long *)old_address); + *address = old_address; + + if (IS_AP_LO(&(tcellar[count].data))) { + AS_WORD(ap_lo) = tcellar[count].data; + AS_STRUCT(ap_lo).base = old_address; + tmp = AS_WORD(ap_lo); + NATIVE_STORE_VALUE_WITH_TAG(&tcellar[count].data, tmp, + E2K_AP_LO_ETAG); + } + + new_vma = find_vma(current->mm, old_address); + DbgTC("interpreted_ap_op tcellar[%d].data=%lx new_vma=%px\n", + count, tcellar[count].data, new_vma); + if (!new_vma) { + return 0; + } + /* To garantee that old_address has pte */ + /* TODO*/ + if (AS(opcode).fmt == 5) { + if (current->pid != pnt_record->pid) { + return 2; + } else { + return 1; + } + } + return 1; +} + +static void delete_list(global_store_t *entry) +{ + global_store_t *prev, *next; + + prev = entry->prev; + next = entry->next; + if (prev != NULL && next != NULL) { + prev->next = next; + next->prev = prev; + } else { + if (prev != NULL && next == NULL) { + prev->next = NULL; + } else { + if (prev == NULL && next != NULL) { + next->prev = NULL; + current_thread_info()->g_list = next; + } else { + current_thread_info()->g_list = NULL; + } + } + } +} + +void free_global_multithread(void) +{ + global_store_t *entry = current_thread_info()->g_list; + global_store_t *advance_entry; + + DbgTC("free_global_multithread(): pid=%d, NUM_THREAD=%d\n", + current->pid, NUM_THREAD(entry)); + NUM_THREAD(entry)--; + if (NUM_THREAD(entry) == 0) { + while (entry != NULL) { + advance_entry = entry->next; + delete_list(entry); + kfree((void *) entry); + entry = advance_entry; + } + } + if (current_thread_info()->lock) { + kfree((void *)current_thread_info()->lock); + } +} + +void free_global_sp(void) +{ + global_store_t *entry; + global_store_t *advance_entry; + struct mm_struct *mm; + + mm = current->mm; + if (!mm) { + return; + } + down_write(&mm->mmap_sem); + entry = current_thread_info()->g_list; + DbgTC("pid=%d, list %px\n", current->pid, entry); + while (entry != NULL) + { + advance_entry = entry->next; /* to avoid re-usage of */ + /* an entry after kfree() */ + /* entry->global_p == 0 is special record */ + if (entry->global_p && IS_THIS_THREAD(entry)) { + delete_list(entry); + kfree((void *) entry); + DbgTC("Entry %px freed\n", entry); + } + entry = advance_entry; + } + if (current_thread_info()->g_list) { + free_global_multithread(); + } + up_write(&mm->mmap_sem); + DbgTC("exitting.\n"); +} + +struct change_lw_args { + bool set; + int skip; + int orig_psr_lw; +}; + +static int __change_last_wish_one(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct change_lw_args *args = (struct change_lw_args *) arg; + int *skip = (int *) &args->skip; + bool set = args->set; + e2k_cr1_lo_t cr1_lo = frame->cr1_lo; + + if (*skip) { + --(*skip); + return 0; + } + + DbgTC("crs = 0x%lx\n", corrected_frame_addr); + + if (AS(cr1_lo).pm) + return -EINVAL; + + if (set) { + args->orig_psr_lw = AS(cr1_lo).lw; + + if (AS(cr1_lo).lw) { + DbgTC("lw was already set in memory\n"); + return 1; + } + + AS(cr1_lo).lw = 1; + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + if (put_cr1_lo(cr1_lo, real_frame_addr, 0)) + return -EFAULT; + + DbgTC("lw was cleared in memory, is SET now\n"); + } else { + if (!AS(cr1_lo).lw) { + DbgTC("lw was already cleared in memory\n"); + return 1; + } + + DbgTC("lw was set in memory\n"); + if (!args->orig_psr_lw) { + AS(cr1_lo).lw = 0; + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + if (put_cr1_lo(cr1_lo, real_frame_addr, 0)) + return -EFAULT; + + DbgTC("lw is CLEARED\n"); + } else { + DbgTC("lw remain SET\n"); + } + } + + return 1; +} + +static void new_record(global_store_t *list, struct pt_regs *regs, + e2k_addr_t global, unsigned int l_psl, unsigned int p_psl) +{ + struct trap_pt_regs *trap = regs->trap; + global_store_t *new, *last; + trap_cellar_t *tcellar = trap->tcellar; + long tc_count = trap->curr_cnt; + + new = (global_store_t *) kmalloc(sizeof(global_store_t), GFP_ATOMIC); + if (!new) { + DbgTC("no memory\n"); + return; + } + + new->lcl_psl = l_psl; + new->global_p = global; + new->next = NULL; + /* for multithreading support */ + new->type = TYPE_GLOBAL; + new->pid = current->pid; + new->word1 = tcellar[tc_count].data; + new->word2 = tcellar[tc_count + 1].data; + new->old_address = 0; + if (IS_MULTITHREADING) { + DbgTC("global=%lx new=%lx pid=%d\n", + global, new, new->pid); + change_sap_to_ap(tcellar, regs, new, 0); + } + if (list == NULL) { + new->prev = NULL; + current_thread_info()->g_list = list = new; + } else { + last = list; + /* semaphore is required */ + while (last->next != NULL) + last = last->next; + + new->prev = last; + last->next = new; + } + + if (l_psl == p_psl) { + e2k_psr_t psr; + AS_WORD(psr) = AS_STRUCT(regs->crs.cr1_lo).psr; + if (AS_STRUCT(psr).lw) { + DbgTC("lw was already set in CR1" + " l_psl=%d pid=%d\n", l_psl, current->pid); + } else { + DbgTC("lw was cleared in CR1 " + "l_psl=%d pid=%d\n", l_psl, current->pid); + AS_STRUCT(psr).lw = 1; + AS_STRUCT(regs->crs.cr1_lo).psr = AS_WORD(psr); + DbgTC("lw is SET\n"); + } + new->orig_psr_lw = AS_STRUCT(psr).lw; + } else { + struct change_lw_args args; + long ret; + + DbgTC("l_psl != p_psl\n"); + CHECK_SIZE("new_record", new) + args.set = true; + args.skip = p_psl - l_psl; + ret = parse_chain_stack(PCS_USER, NULL, __change_last_wish_one, &args); + if (!IS_ERR_VALUE(ret)) + new->orig_psr_lw = args.orig_psr_lw; + } + if (DEBUG_TRAP_CELLAR) { + print_all_records(); + } +} + +static void update_record(global_store_t *record, + struct pt_regs *regs, + unsigned int l_psl, + unsigned int p_psl) +{ + struct trap_pt_regs *trap = regs->trap; + unsigned int old_psl; + e2k_psr_t psr; + trap_cellar_t *tcellar = trap->tcellar; + long tc_count = trap->curr_cnt; + + + if (record == NULL) { + DbgTC("record pointer is NULL\n"); + return; + } + + DbgTC("record pointer is %px\n", record); + /* for multithreading support */ + record->pid = current->pid; + record->word1 = tcellar[tc_count].data; + record->word2 = tcellar[tc_count + 1].data; + record->old_address = 0; + + old_psl = record->lcl_psl; + + if (old_psl == l_psl) { + DbgTC("new local has the same psl.\n"); + return; /* do nothing */ + } + + /* Clear old last wish */ + if (old_psl == p_psl) { + AS_WORD(psr) = AS_STRUCT(regs->crs.cr1_lo).psr; + if (AS_STRUCT(psr).lw) { + DbgTC("lw was set in CR1\n"); + if (!record->orig_psr_lw) { + AS_STRUCT(psr).lw = 0; + AS_STRUCT(regs->crs.cr1_lo).psr = AS_WORD(psr); + DbgTC("lw is CLEARED\n"); + } else { + DbgTC("lw remain SET\n"); + } + } else { + DbgTC("lw was already cleared in CR1\n"); + } + } else { + struct change_lw_args args; + long ret; + + DbgTC("l_psl != p_psl\n"); + CHECK_SIZE("update_record",record); + args.set = false; + args.skip = p_psl - l_psl; + args.orig_psr_lw = record->orig_psr_lw; + ret = parse_chain_stack(PCS_USER, NULL, __change_last_wish_one, &args); + if (IS_ERR_VALUE(ret)) + return; + } + + /* Set new last wish */ + if (l_psl == p_psl) { + AS_WORD(psr) = AS_STRUCT(regs->crs.cr1_lo).psr; + if (AS_STRUCT(psr).lw) { + DbgTC("lw was already set in CR1\n"); + } else { + DbgTC("lw was cleared in CR1\n"); + AS_STRUCT(psr).lw = 1; + AS_STRUCT(regs->crs.cr1_lo).psr = AS_WORD(psr); + DbgTC("lw is SET\n"); + } + record->orig_psr_lw = AS_STRUCT(psr).lw; + } else { + struct change_lw_args args; + long ret; + + DbgTC("l_psl != p_psl\n"); + CHECK_SIZE("update_record1",record); + args.set = true; + args.skip = p_psl - l_psl; + ret = parse_chain_stack(PCS_USER, NULL, __change_last_wish_one, &args); + if (!IS_ERR_VALUE(ret)) + record->orig_psr_lw = args.orig_psr_lw; + } + + /* semaphore is required */ + record->lcl_psl = l_psl; +} + +void down_read_lock_multithread(void) +{ + struct rw_semaphore *lock = current_thread_info()->lock; + + if (WAS_MULTITHREADING && lock) { + down_read(lock); + } +} + +void up_read_lock_multithread(void) +{ + struct rw_semaphore *lock = current_thread_info()->lock; + + if (WAS_MULTITHREADING && lock) { + up_read(lock); + } +} + +/* + * do_global_sp() + */ + +int do_global_sp(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + e2k_addr_t global; /* address of the GLOBAL */ + unsigned int l_psl; /* PSL of the LOCAL */ + unsigned int p_psl; /* PSL of the interrupted */ + /* user procedure */ + e2k_rwsap_lo_struct_t sap_lo; + e2k_pusd_lo_t pusd_lo; + int res; + + global_store_t *list, *record; + + + /* Verify if the data is SAP */ + if ( IS_SAP_LO(&(tcellar[0].data)) && + IS_SAP_HI(&(tcellar[1].data)) ) { + + global = tcellar[0].address; + AS_WORD(sap_lo) = tcellar[0].data; + l_psl = AS_STRUCT(sap_lo).psl; + + DbgTC("SAP global addr = %lx, local psl = %d\n", + global, l_psl); + + DbgTC("SAP global tcellar[0].data = %lx, tcellar[1].data = %lx\n", + tcellar[0].data, tcellar[1].data); + DbgTC("SAP global *global = %lx, *global+8 = %lx\n", + *(long*)global, *(long*)(global+8)); + + DbgTC("SAP IS_SAP_LO=%d IS_SAP_HI=%d\n", + IS_SAP_LO(global), IS_SAP_HI(global+8)); + + + } else { + DbgTC("The data isn't SAP: data0 = %lx, data1 = %lx\n", + tcellar[0].data, tcellar[1].data); + return 1; + } + + if (! user_mode(regs)) { + DbgTC("exception happened in kernel mode. " + "Exiting.\n"); + return 1; + } + + AS_WORD(pusd_lo) = AS_WORD(regs->stacks.usd_lo); + + if (AS_STRUCT(pusd_lo).p) { + DbgTC("protected mode detected gd_lo=0x%llx gd_hi=0x%llx PID=%d\n", + AS_WORD(read_GD_lo_reg()), AS_WORD(read_GD_hi_reg()), + current->pid); + p_psl = AS_STRUCT(pusd_lo).psl; + + /* NOTICE: correction for OS entrance */ + p_psl--; + + DbgTC("interrupted procedure psl = %d\n", + p_psl); + } else { + DbgTC("NON-protected mode detected. " + "Exiting.\n"); + return 1; + } + res = gsp_is_return(regs); + if (WAS_MULTITHREADING) { + if (set_last_wish_all(regs, l_psl, p_psl + res)) { + DbgTC("error in set_last_wish_all\n"); + return 1; + } + } + /* check if the local is allowed to store in the global */ + + if (!res) { + if (l_psl > p_psl) { + DbgTC("l_psl > p_psl Exiting.\n"); + return 1; + }; + } else if (l_psl > (p_psl + 1)) { + /* Easy case. No need to do anything */ + DbgTC("RETURN case. l_psl > p_psl + 1. Exiting.\n"); + return 1; + } else if (l_psl == (p_psl + 1)) { + /* This is ugly patch against the case when LOAD/STORE to + * a global is located in the same VLIW with CT (control + * transfer) instruction associated with RETURN + */ + list = current_thread_info()->g_list; + + /* Most un-pleasant case, when stored local belongs to + * current procedure being executing on the CPU + */ + DbgTC("RETURN case. l_psl == (p_psl + 1)\n"); + for (record = list; record != NULL; record = record->next) { + if (record->global_p == global) + break; + } + if (!WAS_MULTITHREADING && record != NULL) { + unsigned int old_psl = record->lcl_psl; + + DbgTC("RETURN case. record != NULL, old_psl=%d, p_psl=%d\n", + old_psl, p_psl); + + /* Clear old last wish */ + if (old_psl == p_psl) { + e2k_psr_t psr; + AW(psr) = AS(regs->crs.cr1_lo).psr; + if (AS(psr).lw && !record->orig_psr_lw) { + AS(psr).lw = 0; + AS(regs->crs.cr1_lo).psr = AW(psr); + } + } else if (WAS_MULTITHREADING) { + if (set_last_wish_all(regs, l_psl, p_psl)) { + DbgTC("error in set_last_wish_all\n"); + return 1; + } + } else { + struct change_lw_args args; + long ret; + + CHECK_SIZE("do_global_sp ", 1); + args.set = false; + args.skip = p_psl - l_psl; + args.orig_psr_lw = record->orig_psr_lw; + ret = parse_chain_stack(PCS_USER, NULL, + __change_last_wish_one, &args); + if (IS_ERR_VALUE(ret)) + return 1; + } + + /* Delete the record from list */ + delete_list(record); + kfree((void *) record); + }; + + /* Clear the global with Null Pointer */ + DbgTC("RETURN case. Clearing of global with addr = %lx\n", + global); + + if (!IS_MULTITHREADING) { + E2K_STORE_NULLPTR_QWORD(global); + return 1; + } + } + + /* The list is defined as one entry per global. + * If the address matched an existing entry - just update the record. + * If not - create new one. + * Update will also move the last_wish flag on a new location in CS. + */ + init_g_list(0); + + list = current_thread_info()->g_list; + + /* + * Look for the first (the only) record in list that contains + * this global's pointer + */ + for (record = list; record != NULL; record = record->next) + { + if (record->global_p == global) + break; + } + + if (IS_MULTITHREADING || record == NULL) { + DbgTC("" + " record == NULL, call for new_record().\n"); + new_record(list, regs, global, l_psl, p_psl); + } else { + DbgTC("" + " record == NULL, call for update_record().\n"); + update_record(record, regs, l_psl, p_psl); + } + + return 0; +} + +static void print_global_records(global_store_t *entry) +{ + e2k_rwsap_hi_struct_t sap_hi; + long size; + + AS_WORD(sap_hi) = entry->word2; + size = AS_STRUCT(sap_hi).size; + + printk("GLOBAL pid=%d global=0x%lx psl=%d entry=%px\n", + entry->pid, entry->global_p, entry->lcl_psl, entry); + printk(" new_address=0x%lx old_address=0x%lx" + " sbr=0x%lx size=%lx\n", + entry->new_address, entry->old_address, + entry->sbr, size); +} + +static void print_init_records(global_store_t *entry) +{ + pr_info("INIT pid=%d GET_CURR_IND=0x%x NUM_THREAD=%d\n", + entry->pid, entry->lcl_psl, NUM_THREAD(entry)); +} + +static void print_bound_records(global_store_t *entry) +{ + printk("BOUND pid=%d TIME(CURR_IND)=0x%lx entry=%px\n", + entry->pid, GET_TIME(entry), entry); + printk(" sbr=0x%lx lcl_psl=%d\n", + entry->sbr, entry->lcl_psl); +} + +static void print_all_records(void) +{ + global_store_t *entry = current_thread_info()->g_list; + + DbgTC("print_all_records entry %px \n", entry); + while (entry != NULL) + { + switch (entry->type) + { + case TYPE_INIT: + print_init_records(entry); + break; + case TYPE_BOUND: + print_bound_records(entry); + break; + case TYPE_GLOBAL: + print_global_records(entry); + break; + default: + printk("UNKNOWN entry=%px entry->type =%d\n", + entry, entry->type); + break; + } + entry = entry->next; + } +} + +int delete_records(unsigned int psl_from) +{ + e2k_addr_t global; /* address of the GLOBAL */ + /* user procedure */ + unsigned int g_psl = 0; + global_store_t *entry, *advance_entry; + struct mm_struct *mm; + struct vm_area_struct *vma = NULL; + + DbgTC("Deleting records with psl >= %d\n", psl_from); + if (current_thread_info()->g_list == NULL) { + DbgTC("list of globals is empty. Exiting.\n"); + return 1; + } + + mm = current->mm; + down_read(&mm->mmap_sem); + entry = current_thread_info()->g_list; + while (entry != NULL) + { + advance_entry = entry->next; /* to avoid re-usage of */ + /* an entry after kfree() */ + + if (entry->lcl_psl >= psl_from && IS_THIS_THREAD(entry)) { + global = entry->global_p; + + vma = find_vma(mm, global); + if (!vma || ((vma->vm_start) > global)) { + printk(KERN_NOTICE "delete_records(): global %lx " + " belongs to an unmapped area\n", global); + } else if ( IS_SAP_LO(global) && + IS_SAP_HI(global + sizeof(e2k_rwsap_lo_struct_t)) ) { + + e2k_rwsap_lo_struct_t sap_lo; + AS_WORD(sap_lo) = *(u64 *) global; + + g_psl = AS_STRUCT(sap_lo).psl; + + DbgTC("SAP global addr LO = %lx, " + " HI = %lx, " + "global psl = %d\n", global, + global + sizeof (e2k_rwsap_lo_struct_t), g_psl); + + if (g_psl >= psl_from) + E2K_STORE_NULLPTR_QWORD(global); + } else { + DbgTC("The data isn't SAP: data0 = %llx, data1 = %llx\n", + *(u64 *) global, + *(u64 *) (global + sizeof(u64))); + } + delete_list(entry); + + kfree((void *) entry); + DbgTC("Entry %px freed\n", entry); + } + + entry = advance_entry; + + } + up_read(&mm->mmap_sem); + + return 0; +} + +int lw_global_sp(struct pt_regs *regs) +{ + e2k_addr_t global; /* address of the GLOBAL */ + unsigned int g_psl = 0; /* PSL of the GLOBAL */ + unsigned int p_psl; /* PSL of the interrupted */ + /* user procedure */ + e2k_pusd_lo_t pusd_lo; + global_store_t *entry, *advance_entry; + struct mm_struct *mm; + struct vm_area_struct *vma = NULL; + + AS_WORD(pusd_lo) = AS_WORD(regs->stacks.usd_lo); + + if (AS_STRUCT(pusd_lo).p) { + DbgTC("protected mode detected\n"); + p_psl = AS_STRUCT(pusd_lo).psl; + + /* NOTICE: correction for OS entrance */ + p_psl--; + + DbgTC("interrupted procedure psl = %d\n", + p_psl); + } else { + DbgTC("NON-protected mode detected. " + "Exiting.\n"); + return 1; + } + + if (current_thread_info()->g_list == NULL) { + DbgTC("list of globals is empty. Exiting.\n"); + return 1; + } + down_read_lock_multithread(); + mm = current->mm; + down_read(&mm->mmap_sem); + entry = current_thread_info()->g_list; + while (entry != NULL) + { + advance_entry = entry->next; /* to avoid re-usage of */ + /* an entry after kfree() */ + + if (entry->lcl_psl == (p_psl+1) && IS_THIS_THREAD(entry)) { + /* lms fires LWE after the CT */ + global = entry->global_p; + + vma = find_vma(mm, global); + if (!vma || ((vma->vm_start) > global)) { + DbgTC(KERN_NOTICE "lw_global_sp(): global %lx belongs to an" + " unmapped area\n", global); + } else if ( IS_SAP_LO(global) && + IS_SAP_HI(global + sizeof(e2k_rwsap_lo_struct_t)) ) { + + e2k_rwsap_lo_struct_t sap_lo; + AS_WORD(sap_lo) = *(u64 *) global; + + g_psl = AS_STRUCT(sap_lo).psl; + + DbgTC("SAP global addr LO = %lx, " + " HI = %lx, " + "global psl = %d\n", global, + global + sizeof (e2k_rwsap_lo_struct_t), g_psl); + + if (!WAS_MULTITHREADING && g_psl > p_psl) + E2K_STORE_NULLPTR_QWORD(global); + } else { + DbgTC("The data isn't SAP: data0 = %llx, data1 = %llx\n", + *(u64 *) global, + *(u64 *) (global + sizeof(u64))); + } + if (!WAS_MULTITHREADING) { + delete_list(entry); + kfree((void *) entry); + DbgTC("Entry %px freed\n", entry); + } + }; + + entry = advance_entry; + + } + set_new_bound(p_psl+1, regs); + up_read(&mm->mmap_sem); + up_read_lock_multithread(); + return 0; +} diff --git a/arch/e2k/3p/umalloc.c b/arch/e2k/3p/umalloc.c new file mode 100644 index 000000000000..ab6a46f509c4 --- /dev/null +++ b/arch/e2k/3p/umalloc.c @@ -0,0 +1,1130 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +extern long sys_exit(int error_code); + +#define MAX(a,b) ((a) > (b) ? (a) : (b)) +#define MAX_MUSED 16 + +#undef DEBUG_FAIL_RETURN +#ifdef DEBUG_FAIL_RETURN +#define FAIL_RETURN BUG() +#else +#define FAIL_RETURN return +#endif + +#define DebugBUG 1 +#define DBUG if (DebugBUG) printk + +#define DebugUM 0 +#define DBUM if (DebugUM) printk +#define DEBUG_TRACE 0 +#define Dbg_trace if (DEBUG_TRACE) printk +#define DEBUG_GC_TRACE 0 +#define Dbg_gc_trace if (DEBUG_GC_TRACE) printk +#define DEBUG_GC_RES 0 +#define Dbg_gc_res if (DEBUG_GC_RES) printk +#define DEBUG_GC_REMAP 0 +#define Dbg_gc_remap if (DEBUG_GC_REMAP) printk +#define DEBUG_GC_ADDR 0 +#define Dbg_gc_addr if (DEBUG_GC_RES) printk +#define DEBUG_GC_MEM 0 +#define Dbg_gc_mem if (DEBUG_GC_MEM) printk +#define DEBUG_GC_TBL_RES 0 + +#define DEBUG_CL_DESC 0 +#define Dbg_cl_desc(...) DebugPrint(DEBUG_CL_DESC, ##__VA_ARGS__) + +/* + * + * malloc is used pool + * + * pool: ------------------------- + * listpoolhdr_t | head | -> two order ref + * | | to next pool + * | mainp | --- ref to first + * | | | free element + * | (size of pool) size | | (subpoolhdr_t) + * | ------------------------| | + * ..... | + * subpoolhdr_t | (size of chunk) size | <-| + * | | + * |(size of chunk's element)| + * | mainsz | + * | mused[MAX_MUSED] | ----- + * | mainp | | chunk + * | ptr |-----|--------> ----- + * ...... | | | + * | | | + * address of first free element's chunk |_________ | | + * | | + * + * + */ +// i can't include include/list.h in umalloc.h file for recurrence +// for this resion we must use casting +// +#define get_list_head(x) ((struct list_head *)&(x)->head) + +// i can't include linux/rt_lock.h in umalloc.h file for recurrence +// for this resion we must use casting +// +#define get_rt_semaphore(x) ((struct semaphore *)&(x)->lock) + +#define check_size_rt_semaphore() \ + if (sizeof(struct semaphore) > sizeof(struct rt_mutex_um)) {\ + printk(" BAD sizeof(struct rt_mutex_um)=%ld < " \ + " sizeof(struct semaphore)=%ld \n", \ + sizeof(struct rt_mutex_um), \ + sizeof(struct semaphore)); \ + BUG(); \ + } +struct listpoolhdr { + struct list_head head; + u32 mainp; // index for free mused + u32 size; // size of listpoolhdr +}; +typedef struct listpoolhdr listpoolhdr_t; + // Small chunk pools + +struct subpoolhdr { + u32 size; // size of chunk + u32 mainsz; // size of chunk's element + u32 mused[MAX_MUSED]; // bit mask of valid main chunks + u32 mainp; // index for free mused + long *ptr; /* ptr to chunk */ +}; + +struct mem_moved_poolhdr { + u32 size; // size of chunk + u32 mainp; // index for free element + u32 new_mainp; // index for new added free element +}; + + +//#define MAX_PIND 8 + +/* + * This define is from kernel/traps.c + + */ +#define S_S(signo) do { \ + kernel_siginfo_t info; \ + info.si_signo = signo; \ + info.si_errno = 0; \ + info.si_trapno = TRAP_BRKPT; \ + info.si_code = 0; \ + force_sig_info(&info); \ +} while(0); + +#define BIT_INT 32 +#define LAST_BIT (BIT_INT-1) +#define WORD(x) ((x) >> 5) /*((x)/BIT_INT) */ +#define BIT_NR(x) ((x) & 0x1f) /*((x)%BIT_INT) */ + +#define FLAGS (PROT_READ | PROT_WRITE) +#define ALL_FF 0xffffffff +#define SHIFT(x) (BIT_INT - 1 - BIT_NR(x)) +#define FIRST_SUBPOOL_IND MAX(sizeof(subpoolhdr_t),sizeof(listpoolhdr_t)) + +#define MEM_MVD_SIZE PAGE_SIZE +#define FIRST_MVD_IND MAX(sizeof(mem_moved_t),sizeof(mem_moved_poolhdr_t)) + +struct mem_moved{ + u64 beg_addr; // used address + u64 end_addr; // end of used address + u64 new_addr; // new address +}; + +typedef struct mem_moved mem_moved_t; + +struct one_element{ + u64 addr; + u32 size; + u32 ind; + subpoolhdr_t *subpool; +}one_element_t; + + +/* calculate last posible correct address for subpoool */ +#define get_last_subpool(x) ({u32 mainp; u32 size; long *ptr; char *_t;\ + get_user(mainp, &x->mainp); \ + get_user(size, &x->size); \ + _t = (char *)(x) + mainp; \ + get_user(ptr, &((subpoolhdr_t *)_t)->ptr);\ + (subpoolhdr_t *)(_t - \ + ((((mainp + sizeof(subpoolhdr_t)) >= size) || \ + (ptr == NULL && mainp > FIRST_SUBPOOL_IND)) ? \ + sizeof(subpoolhdr_t) : 0)); }) + +#define get_first_subpool(x) (subpoolhdr_t *)((char *)(x) + FIRST_SUBPOOL_IND) + +#define subpool_list_for_each_prev(subpool,hdr) \ + for (subpool = get_last_subpool(hdr); \ + subpool >= get_first_subpool(hdr); \ + subpool--) + +#define subpool_list_for_each(subpool,hdr) \ + for (subpool = get_first_subpool(hdr); \ + subpool <= get_last_subpool(hdr); \ + subpool++) + +#define deleted_subpool(subpool) ({long *ptr; \ + get_user(ptr, &subpool->ptr); ptr == NULL; }) + + +#define get_first_mem_moved(x) (mem_moved_t *)((char*)(x) + FIRST_MVD_IND) + +/* calculate last posible correct address for mem_moved */ +#define get_last_mem_moved(x) \ + ((mem_moved_t *)((char*)(x) + x->mainp - sizeof(mem_moved_t))) + +#define MAX_PROC_PTE 20 + +// may be ~ 1024 ? +#define MAX_USED_MEM 128 + +static e2k_addr_t +ALLOC_MEM(long sz) +{ + e2k_addr_t map_addr; + int once=0; +again: + map_addr = (e2k_addr_t)vm_mmap_notkillable(NULL, 0L, sz, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, 0L); + if (once) { + Dbg_gc_res("once map_addr=%llx\n", (u64)map_addr); + } + if (map_addr & ~PAGE_MASK) { + map_addr = 0; + } + DBUM("ALLOC_MEM = 0x%lx : 0x%lx\n", map_addr, sz); + if (map_addr == 0 && once == 0) { + struct task_struct *tsk = current; + if (xchg(¤t->mm->context.umpools.gc_lock.counter, 1)) { + while(atomic_read(¤t->mm->context.umpools.gc_lock)){ + tsk->state = TASK_INTERRUPTIBLE; + schedule(); + } + + } else /*if (garbage_collection()) */{ + atomic_set(¤t->mm->context.umpools.gc_lock,0); + return map_addr; + } + atomic_set(¤t->mm->context.umpools.gc_lock,0); +// sz = 1; // open for debugging in LMS + once = 1; + goto again; + } + return map_addr; +} + +static void FREE_MEM(e2k_addr_t a, size_t sz) +{ + struct mm_struct *mm = current->mm; + + DBUM("FREE_MEM = 0x%lx : 0x%lx\n", a, sz); + down_write(&mm->mmap_sem); + (void) do_munmap(mm, a, sz, NULL); + up_write(&mm->mmap_sem); +} + +static int +get_pind(unsigned int size, u32 *chsz) +{ + int pind; + if (size <= 8) { + *chsz = 8; + pind = 0; + } else if (size <= 16) { + *chsz = 16; + pind = 1; + } else if (size <= 32) { + *chsz = 32; + pind = 2; + } else if (size <= 64) { + *chsz = 64; + pind = 3; + } else if (size <= 128) { + *chsz = 128; + pind = 4; + } else if (size <= 256) { + *chsz = 256; + pind = 5; + } else if (size <= 512) { + *chsz = 512; + pind = 6; + } else if (size <= 1024) { + *chsz = 1024; + pind = 7; + } else if (size <= 2048) { + *chsz = 2048; + pind = 8; + } else { // big chunk + *chsz = size; + pind = 9; + } + return pind; +} + +static int subpool_is_empty(subpoolhdr_t *sbp) +{ + u32 mused_i; + u32 res = 0; + int i; + + for (i = 0; i < MAX_MUSED; i++) { + get_user(mused_i, &sbp->mused[i]); + res = res | mused_i; + } + return res == 0; +} + +static void +set_used(subpoolhdr_t *sbp, u32 chsz, umlc_pool_t *mypool) +{ + u32 mainp, mainsz; + int chn; + int i; + u32 m; + u32 mused_i; + + get_user(mainp, &sbp->mainp); + get_user(mainsz, &sbp->mainsz); + chn = mainp / mainsz; + i = WORD(chn); + get_user(mused_i, &sbp->mused[i]); + m = 1 << (SHIFT(chn)); + + Dbg_trace("set_used sbp =%px (%d,%d) sbp->mainp=%x chsz=%x\n", + sbp, i, SHIFT(chn), mainp, chsz); + if (i >= MAX_MUSED) { + DBUG("Too big chunk number %d sz = %d\n", i, chsz); + dump_malloc_cart(); + sys_exit(7009); + } + if (mused_i & m) { + struct list_head *head = get_list_head(mypool); + DBUG("Chunk already used. size %u; mused[%d] = 0x%08x - m = 0x%08x\n" + " chn = %d sbp=%px mypool=%px mypool->mainp=%x\n" + " head->next =%px\n", + chsz, i, mused_i, m, chn, sbp, mypool, + mypool->mainp, (char *)head->next); + dump_malloc_cart(); + sys_exit (7009); + } + put_user(mused_i | m , &sbp->mused[i]); + put_user(mainp + chsz, &sbp->mainp); + Dbg_trace(" set_used mused =%08x\n", mused_i | m); +} + +static void clear_used(subpoolhdr_t *sbp, int chn) +{ + int i = WORD(chn); + u32 m = 1 <<(SHIFT(chn)); + u32 mused_i; + + get_user(mused_i, &sbp->mused[i]); + Dbg_trace(" clear_used sbp =%px (%d,%d) sbp->mainp=%x bit=%x\n", + sbp, i, SHIFT(chn), sbp->mainp, chn); + if (i >= MAX_MUSED) { + u32 mainsz; + + get_user(mainsz, &sbp->mainsz); + DBUG("Clear: Too big chunk number %d; sz = %d in %d\n", + chn, i, mainsz); + dump_malloc_cart(); + sys_exit(7009); + } + if (!(mused_i & m)) { + u32 mainsz; + + get_user(mainsz, &sbp->mainsz); + DBUG("Chunk not used. size %u; mused[%d]=0x%08x m=0x%08x\n", + mainsz, i, mused_i, m); +// dump_malloc_cart(); +// sys_exit (7009); + S_S(SIGABRT); + } + put_user(mused_i & ~m , &sbp->mused[i]); + Dbg_trace("clear_used =%08x\n", mused_i & ~m); +} + +static void * +create_new_subpool(u32 chsz, allpools_t *allpools, u32 *size) +{ + u32 sz; + void *new; + + if (chsz <= 16) { + sz = 1; + } else if (chsz <= 128) { + sz = 2; + } else if (chsz <= 2048) { + sz = 4; + } else { + // BIG chunk + sz = chsz; + } + if (chsz <= 2048) { + sz *= PAGE_SIZE; + } + + new = (void *)ALLOC_MEM(sz); + if (new == NULL) { + DBUG("No memory for subpool\n"); + return NULL; + } + *size = sz; +// memset(new, 0, sz); //!!!! delete for LMS + allpools->allsize += sz; + DBUM("create_new_subpool(0x%x) = 0x%px\n", chsz, new); + return new; +} + +static void free_subpool(subpoolhdr_t *a, allpools_t *allpools) +{ + + DBUM("free_subpool a=%px a->ptr =%px a->size=%x\n", a, a->ptr, a->size); + + if (!a->ptr) { + return; + } + allpools->allsize -= a->size; + FREE_MEM((e2k_addr_t)a->ptr, a->size); + memset(a, 0, sizeof(subpoolhdr_t)); +} + +static int delete_last_subpool(listpoolhdr_t *hdr, umlc_pool_t *mypool, + allpools_t *allpools) +{ + subpoolhdr_t *last = get_last_subpool(hdr); + if (deleted_subpool(last) && mypool->mainp > FIRST_SUBPOOL_IND && + (subpoolhdr_t *)((char *)get_list_head(mypool)->next + + mypool->mainp - sizeof(subpoolhdr_t)) == last) { + u32 mainp; + get_user(mainp, &hdr->mainp); + put_user(mainp - sizeof(subpoolhdr_t), &hdr->mainp); + mypool->mainp -= sizeof(subpoolhdr_t); + return 1; + } + return 0; +} + +static void free_compress_subpool(subpoolhdr_t *a, allpools_t *allpools, + umlc_pool_t *mypool, listpoolhdr_t *hdr) +{ + subpoolhdr_t *last = get_last_subpool(hdr); + free_subpool(a, allpools); + + /* delete last ref on subpool */ + if (a == get_last_subpool(hdr)) { + while (delete_last_subpool(hdr, mypool, allpools)) { + ; + } + } +} + +e2k_addr_t sys_malloc(size_t size) +{ + allpools_t *allpools = ¤t->mm->context.umpools; + e2k_addr_t addr = 0; + u32 chsz; + int pind; + umlc_pool_t *mypool; + e2k_addr_t mem; + struct list_head *head; + subpoolhdr_t *curr_subpool = NULL; + struct semaphore *lock; + u32 x; + unsigned long x1; + + /* max size for protected malloc*/ + if (size >= 0xffffffffUL) { + return 0; + } + pind = get_pind(size, &chsz); + DBUM("sys_malloc size=%lx pind=%d cz=%x\n", size, pind, chsz); + // small chunks + mypool = &allpools->pools[pind]; +// check_size_rt_semaphore(); + lock = get_rt_semaphore(mypool); + down(lock); + head = get_list_head(mypool); + DBUM("%s mainp=%x main_size=%x head->next=%px head=%px mypool=%px\n", + __func__, mypool->mainp, mypool->size, head->next, head, mypool); + if (head->next == NULL) { + INIT_LIST_HEAD(head); + } + + if (list_empty(head) || + (mypool->mainp + sizeof(subpoolhdr_t)) > mypool->size) { + mem = ALLOC_MEM(PAGE_SIZE); + if (mem == 0) { + goto out; + } + // may be called garbage_collection + head = get_list_head(mypool); + memset((char*)mem, 0, PAGE_SIZE); + mypool->mainp = FIRST_SUBPOOL_IND; + mypool->size = PAGE_SIZE; + put_user(FIRST_SUBPOOL_IND, &((listpoolhdr_t *)mem)->mainp); + put_user(PAGE_SIZE, &((listpoolhdr_t *)mem)->size); + list_add((struct list_head *)mem, head); + + } + curr_subpool = (subpoolhdr_t*)((char*)head->next + mypool->mainp); + DBUM("curr_subpool=%px curr_subpool->ptr=%px\n", + curr_subpool, curr_subpool->ptr); + get_user(x1, &curr_subpool->ptr); + if (!x1) { + u32 size; + void *ptr; + // no room in subpool. + // may be called garbage_collection + ptr = create_new_subpool(chsz, allpools ,&size); + head = get_list_head(mypool); + curr_subpool = (subpoolhdr_t*)((char*)head->next + mypool->mainp); + put_user(ptr, &curr_subpool->ptr); + put_user(chsz, &curr_subpool->mainsz); + put_user(size, &curr_subpool->size); + put_user(0, &curr_subpool->mainp); + } + DBUM("sys_malloc addr =%lx curr_subpool =%px ptr=%px mainp=0x%x\n", + addr, curr_subpool, curr_subpool ? curr_subpool->ptr : NULL, + curr_subpool->mainp); + // There is a room for a chunk + get_user(x, &curr_subpool->mainp); + get_user(x1, &curr_subpool->ptr); + addr = (e2k_addr_t)(x1 + x); + set_used(curr_subpool, chsz, mypool); + if (curr_subpool->mainp >= curr_subpool->size) { + mypool->mainp += sizeof(subpoolhdr_t); + ((listpoolhdr_t*)head->next)->mainp += sizeof(subpoolhdr_t); + } + allpools->allused += chsz; + allpools->allreal += size; +out: + up(lock); + DBUM("sys_malloc addr =%lx curr_subpool =%px ptr=%px mainp=0x%x\n", + addr, curr_subpool, curr_subpool ? curr_subpool->ptr : NULL, + curr_subpool->mainp); + return addr; +} + +void sys_free(e2k_addr_t a, size_t sz) +{ + allpools_t *allpools = ¤t->mm->context.umpools; + u32 chsz; + int pind; + umlc_pool_t *mypool; + subpoolhdr_t *subpool; + e2k_addr_t addr; + u32 size; + u32 mainsz, mainp; + struct list_head *ln; + struct list_head *head; + listpoolhdr_t *hdr; + struct semaphore *lock; + listpoolhdr_t *last_hdr = NULL; + + if (a == 0) { + return; + } + // At first assume size is a real chunk size + pind = get_pind(sz, &chsz); + DBUM(" sys_free a=%lx sz=%lx\n", a, sz); +// check_size_rt_semaphore(); + while (pind < MAX_CHUNKS) { + mypool = &allpools->pools[pind]; + lock = get_rt_semaphore(mypool); + down(lock); + head = get_list_head(mypool); + if (head->next == NULL) { + pind++; + up(lock); + continue; + } + list_for_each_prev(ln, head) { + hdr = list_entry(ln, listpoolhdr_t, head); + if (!last_hdr) { + last_hdr = hdr; + } + subpool_list_for_each_prev(subpool, hdr) { + if (deleted_subpool(subpool)) { + continue; + } + get_user(addr, &subpool->ptr); + get_user(size, &subpool->size); + if (a < addr || a >= (addr + size)) { + continue; + } + if ((a + sz) > (addr + size)) { + DBUG("Bad free desk pind =%d (0x%lx, 0x%lx) " + "for (0x%lx, 0x%x) subpool =%px\n", + pind, a, sz, addr, size, subpool); + dump_malloc_cart(); + // kill process + up(lock); + FAIL_RETURN; + } + /* + * glibc needs full coresponding of address + */ + get_user(mainsz, &subpool->mainsz); + if ((a - addr) % mainsz != 0) { + up(lock); + S_S(SIGABRT); + return; + } + clear_used(subpool, (a - addr) / mainsz); + allpools->allused -= mainsz; + get_user(mainp, &subpool->mainp); + if (subpool_is_empty(subpool) && mainp == size) { + // subpool is empty. Return it + free_compress_subpool(subpool, allpools, + mypool, last_hdr); + } + + DBUM("Free big: 0x%lx - 0x%x\n", addr, size); + up(lock); + return; + } + } + // go to biger chunk + pind++; + up(lock); + } + DBUG("Bad!!! free desk (0x%lx, 0x%lx) pind =%d curr pind =%d\n", + a, sz, get_pind(sz, &chsz), pind); + S_S(SIGABRT); +} + + +int get_malloc_stat(mallocstat_t *st) +{ + allpools_t *allpools = ¤t->mm->context.umpools; + st->m_used = allpools->allused; + st->m_real = allpools->allreal; + st->m_size = allpools->allsize; + return 0; +} + +typedef struct { + void *addr; + size_t size; +} array_t; + +void dump_malloc_cart(void) +{ + allpools_t *allpools = ¤t->mm->context.umpools; + umlc_pool_t *mypool; + subpoolhdr_t *subpool; + int i; + struct list_head *head; + struct list_head *ln; + listpoolhdr_t *hdr; + + printk("\n\t\tALLREAL = %u\n", allpools->allreal); + printk("\t\tUSED = %u\n", allpools->allused); + printk("\t\tALLSIZE = %u\n", allpools->allsize); + + for (i = 0; i < MAX_CHUNKS; i++) { + mypool = &allpools->pools[i]; + head = get_list_head(mypool); + if (head->next == NULL) { + continue; + } + printk("\n\tChunk = %u; mainp = %u\n", i , mypool->mainp); + list_for_each_prev(ln, head) { + hdr = list_entry(ln, listpoolhdr_t, head); + subpool_list_for_each(subpool, hdr) { + printk("last_subpool(hdr)=%px first_subpool(hdr)=%px " + "hdr->mainp=0x%x, hdr->size=0x%x subpool=%px\n", + get_last_subpool(hdr), + get_first_subpool(hdr), hdr->mainp, + hdr->size, subpool); + if (deleted_subpool(subpool)) { + printk("DELETED subpool = 0x%llx " + "mainp=0x%x size=0x%x\n", + (u64)subpool, subpool->mainp, + subpool->size); + continue; + } + printk("subpool = 0x%llx STARTMP = 0x%x ptr=%px\n", + (u64)subpool, (int)subpool->mainsz, subpool->ptr); + printk(" %08x%08x%08x%08x\n", + subpool->mused[0], subpool->mused[1], + subpool->mused[2], subpool->mused[3]); + printk(" %08x%08x%08x%08x\n", + subpool->mused[4], subpool->mused[5], + subpool->mused[6], subpool->mused[7]); + printk(" %08x%08x%08x%08x\n", + subpool->mused[8], subpool->mused[9], + subpool->mused[10], subpool->mused[11]); + printk(" %08x%08x%08x%08x\n", + subpool->mused[12], subpool->mused[13], + subpool->mused[14], subpool->mused[15]); + } + } + } + return; +} + + +static void stop_all_children_and_parent(void) +{ + struct task_struct *t; + + Dbg_gc_trace(" stop_all_children_and_parent \n"); + + if (thread_group_empty(current)) + return; + + rcu_read_lock(); + for_each_thread(current, t) { + if (t != current) + send_sig_info(SIGSTOP, SEND_SIG_PRIV, t); + } + rcu_read_unlock(); +} + +static void wakeup_all_children_and_parent(void) +{ + struct task_struct *t; + + Dbg_gc_trace(" wakeup_all_children_and_parent begin \n"); + + if (thread_group_empty(current)) + return; + + rcu_read_lock(); + for_each_thread(current, t) { + if (t != current) + send_sig_info(SIGCONT, SEND_SIG_PRIV, t); + } + rcu_read_unlock(); +} + +/* + * It used to create true context for new process + */ +void +init_pool_malloc(struct task_struct *old_tsk, struct task_struct *new_tsk) +{ + allpools_t *allpools = &new_tsk->mm->context.umpools; + memset(allpools, 0, sizeof(allpools_t)); + init_sem_malloc(allpools); +} + +void +init_sem_malloc(allpools_t *allpools) +{ + struct semaphore *lock; + int i; + + check_size_rt_semaphore(); + for (i =0; i < MAX_CHUNKS; i++) { + lock = get_rt_semaphore(&allpools->pools[i]); + sema_init(lock, 1); + } +} + +/* + * Fill 'ptr' with 'dw' double words + */ +int mem_set_empty_tagged_dw(void __user *ptr, s64 size, u64 dw) +{ + void __user *ptr_aligned; + s64 size_aligned, size_head, size_tail; + + if (size < 8) + if (clear_user((void __user *) ptr, size)) + return -EFAULT; + + ptr_aligned = PTR_ALIGN(ptr, 8); + size_head = (s64 __force) (ptr_aligned - ptr); + size_aligned = round_down(size - size_head, 8); + size_tail = size - size_head - size_aligned; + + if (fill_user(ptr, size_head, 0xff) || + fill_user_with_tags(ptr, size_aligned, ETAGEWD, dw) || + fill_user(ptr_aligned + size_aligned, size_tail, 0xff)) + return -EFAULT; + + return 0; +} + +/* Must be no page faults in a function called from TRY_USR_PFAULT block */ +__always_inline +static void find_data_in_list(struct rb_root_cached *areas, + e2k_ptr_t data, unsigned long ptr, unsigned long offset, + bool kernel_stack) +{ + unsigned long start, last; + struct interval_tree_node *it; + + if (!kernel_stack) + might_fault(); + + Dbg_cl_desc("data.lo = 0x%lx data.hi = 0x%lx ptr = 0x%lx\n", + AW(data).lo, AW(data).hi, ptr); + + start = AS(data).ap.base; + last = AS(data).ap.base + AS(data).size - 1; + if (!AS(data).size) + return; + + /* We know that there is no intersection between passed areas + * so there is no need to go over *all* intervals intersecting + * this particular descriptor: if the first one was not big enough + * then all others also won't be. */ + it = interval_tree_iter_first(areas, start, last); + if (it && it->start <= start && it->last >= last) { + /* + * If we find descriptor in readonly page, we would + * catch a reasonable PFAULT on store operation. + */ + if (kernel_stack || __range_ok(ptr, 16, PAGE_OFFSET)) { + __NATIVE_STORE_TAGGED_QWORD(ptr, AW(data).lo, + AW(data).hi, ETAGNVD, ETAGNVD, offset); + } + } +} + +__always_inline +static void clean_descriptors_in_psp(struct rb_root_cached *areas, + unsigned long start, unsigned long end, bool kernel_stack) +{ + unsigned long ptr; + + if (machine.native_iset_ver < E2K_ISET_V5) { + for (ptr = start; ptr < end; ptr += 64) { + u64 val0_lo, val0_hi, val1_lo, val1_hi; + u32 tag0_lo, tag0_hi, tag1_lo, tag1_hi; + + NATIVE_LOAD_VAL_AND_TAGD(ptr, val0_lo, tag0_lo); + NATIVE_LOAD_VAL_AND_TAGD(ptr + 8, val0_hi, tag0_hi); + + NATIVE_LOAD_VAL_AND_TAGD(ptr + 32, val1_lo, tag1_lo); + NATIVE_LOAD_VAL_AND_TAGD(ptr + 40, val1_hi, tag1_hi); + + if (unlikely(tag0_hi == E2K_AP_HI_ETAG && + tag0_lo == E2K_AP_LO_ETAG)) { + e2k_ptr_t data; + AW(data).lo = val0_lo; + AW(data).hi = val0_hi; + find_data_in_list(areas, data, ptr, 8, + kernel_stack); + } + if (unlikely(tag1_hi == E2K_AP_HI_ETAG && + tag1_lo == E2K_AP_LO_ETAG)) { + e2k_ptr_t data; + AW(data).lo = val1_lo; + AW(data).hi = val1_hi; + find_data_in_list(areas, data, ptr + 32, 8, + kernel_stack); + } + } + } else { + for (ptr = start; ptr < end; ptr += 32) { + u64 val0_lo, val0_hi, val1_lo, val1_hi; + u32 tag0_lo, tag0_hi, tag1_lo, tag1_hi; + + NATIVE_LOAD_VAL_AND_TAGD(ptr, val0_lo, tag0_lo); + NATIVE_LOAD_VAL_AND_TAGD(ptr + 16, val0_hi, tag0_hi); + + NATIVE_LOAD_VAL_AND_TAGD(ptr + 8, val1_lo, tag1_lo); + NATIVE_LOAD_VAL_AND_TAGD(ptr + 24, val1_hi, tag1_hi); + + if (unlikely(tag0_hi == E2K_AP_HI_ETAG && + tag0_lo == E2K_AP_LO_ETAG)) { + e2k_ptr_t data; + AW(data).lo = val0_lo; + AW(data).hi = val0_hi; + find_data_in_list(areas, data, ptr, 16, + kernel_stack); + } + if (unlikely(tag1_hi == E2K_AP_HI_ETAG && + tag1_lo == E2K_AP_LO_ETAG)) { + e2k_ptr_t data; + AW(data).lo = val1_lo; + AW(data).hi = val1_hi; + find_data_in_list(areas, data, ptr + 8, 16, + kernel_stack); + } + } + } +} + +static int clean_descriptors_range_user(struct rb_root_cached *areas, + unsigned long start, unsigned long end, bool proc_stack) +{ + unsigned long ts_flag; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + TRY_USR_PFAULT { + if (!proc_stack) { + unsigned long ptr; + +#pragma loop count (100000) + for (ptr = start; ptr < end; ptr += 32) { + u64 val0_lo, val0_hi, val1_lo, val1_hi; + u32 tag0_lo, tag0_hi, tag1_lo, tag1_hi; + + NATIVE_LOAD_VAL_AND_TAGD(ptr, val0_lo, tag0_lo); + NATIVE_LOAD_VAL_AND_TAGD(ptr + 8, + val0_hi, tag0_hi); + + NATIVE_LOAD_VAL_AND_TAGD(ptr + 16, + val1_lo, tag1_lo); + NATIVE_LOAD_VAL_AND_TAGD(ptr + 24, + val1_hi, tag1_hi); + + if (unlikely(tag0_hi == E2K_AP_HI_ETAG && + tag0_lo == E2K_AP_LO_ETAG)) { + e2k_ptr_t data; + AW(data).lo = val0_lo; + AW(data).hi = val0_hi; + find_data_in_list(areas, data, ptr, 8, + false); + } + if (unlikely(tag1_hi == E2K_AP_HI_ETAG && + tag1_lo == E2K_AP_LO_ETAG)) { + e2k_ptr_t data; + AW(data).lo = val1_lo; + AW(data).hi = val1_hi; + find_data_in_list(areas, data, ptr + 16, + 8, false); + } + } + } else { + clean_descriptors_in_psp(areas, start, end, false); + } + } CATCH_USR_PFAULT { + clear_ts_flag(ts_flag); + return -EFAULT; + } END_USR_PFAULT; + + clear_ts_flag(ts_flag); + + return 0; +} + +static int clean_descriptors_test_walk(unsigned long start, unsigned long end, + struct mm_walk *walk) +{ + unsigned long vm_flags = walk->vma->vm_flags; + + if ((vm_flags & (VM_PFNMAP|VM_HW_STACK_PCS)) || !(vm_flags & VM_READ)) + return 1; + + return 0; +} + +static int clean_descriptors_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct rb_root_cached *areas = walk->private; + const struct vm_area_struct *vma = walk->vma; + bool proc_stack = !!(vma->vm_flags & VM_HW_STACK_PS); + const pte_t *pte; + spinlock_t *ptl; + int ret = 0; + + if (pmd_none(*pmd)) + goto out; + + if (pmd_trans_unstable(pmd)) { + ret = clean_descriptors_range_user(areas, addr, end, proc_stack); + goto out; + } + + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) { + if (!pte_none(*pte)) { + ret = clean_descriptors_range_user(areas, addr, + addr + PAGE_SIZE, proc_stack); + if (ret) + goto out; + } + } + pte_unmap_unlock(pte - 1, ptl); + +out: + cond_resched(); + return ret; +} + +#ifdef CONFIG_HUGETLB_PAGE +/* This function walks within one hugetlb entry in the single call */ +static int clean_descriptors_hugetlb_range(pte_t *ptep, unsigned long hmask, + unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + struct rb_root_cached *areas = walk->private; + pte_t pte; + int ret = 0; + + pte = huge_ptep_get(ptep); + if (!pte_none(pte)) + ret = clean_descriptors_range_user(areas, addr, end, false); + + cond_resched(); + + return ret; +} +#endif /* HUGETLB_PAGE */ + +static int clean_descriptors_copies(struct rb_root_cached *areas) +{ + struct pt_regs *regs = current_pt_regs(); + u64 pshtp_size; + int ret; + struct mm_walk_ops clean_descriptors_walk = { + .test_walk = clean_descriptors_test_walk, + .pmd_entry = clean_descriptors_pte_range, +#ifdef CONFIG_HUGETLB_PAGE + .hugetlb_entry = clean_descriptors_hugetlb_range, +#endif + }; + + /* + * Parse part of user stack spilled to kernel + */ + pshtp_size = GET_PSHTP_MEM_INDEX(regs->stacks.pshtp); + if (pshtp_size) { + unsigned long ptr, end, flags; + + ptr = AS(current_thread_info()->k_psp_lo).base; + end = ptr + pshtp_size; + + raw_all_irq_save(flags); + NATIVE_FLUSHCPU; + clean_descriptors_in_psp(areas, ptr, end, true); + raw_all_irq_restore(flags); + } + + stop_all_children_and_parent(); + + down_read(¤t->mm->mmap_sem); + ret = walk_page_range(current->mm, 0, current->mm->highest_vm_end, + &clean_descriptors_walk, areas); + up_read(¤t->mm->mmap_sem); + + wakeup_all_children_and_parent(); + + return ret; +} + +/* + * Clean freed user memory and destroy freed descriptors in memory. + */ +int clean_single_descriptor(e2k_ptr_t descriptor) +{ + unsigned long ptr, size; + struct interval_tree_node it_entry; + struct rb_root_cached areas = RB_ROOT_CACHED; + + ptr = AS(descriptor).ap.base; + size = AS(descriptor).size; + + /* Make a copy of a list */ + if (!size) + return 0; + + it_entry.start = ptr; + it_entry.last = ptr + size - 1; + interval_tree_insert(&it_entry, &areas); + + /* Clean all descriptor copies from user memory */ + return clean_descriptors_copies(&areas); +} +/* + * Clean freed user memory and destroy freed descriptors in memory. + */ +int clean_descriptors(void __user *list_descriptors, unsigned long list_size) +{ + int i, res; + void __user *addr; + e2k_ptr_t descriptor; + u8 tag_lo, tag_hi, tag; + unsigned long ptr, size; + struct interval_tree_node *it_array; + struct rb_root_cached areas = RB_ROOT_CACHED; + + /* We need a copy of a list, because user memory whould be cleaned */ + it_array = kmalloc_array(list_size, sizeof(it_array[0]), GFP_KERNEL); + if (!it_array) + return -ENOMEM; + + for (i = 0, addr = list_descriptors; i < list_size; i++, addr += 16) { + TRY_USR_PFAULT { + NATIVE_LOAD_TAGGED_QWORD_AND_TAGS(addr, + AW(descriptor).lo, AW(descriptor).hi, + tag_lo, tag_hi); + } CATCH_USR_PFAULT { + res = -EFAULT; + goto free_list; + } END_USR_PFAULT + + tag = (tag_hi << 4) | tag_lo; + if (unlikely(tag != ETAGAPQ)) { + pr_info_ratelimited("%s: bad descriptor extag 0x%x hiw=0x%lx low=0x%lx ind=%d\n", + __func__, tag, + AW(descriptor).hi, AW(descriptor).lo, i); + pr_info_ratelimited("%s: list_descriptors: 0x%lx / list_size=%ld\n", + __func__, list_descriptors, list_size); + res = -EFAULT; + goto free_list; + } + + ptr = AS(descriptor).ap.base; + size = AS(descriptor).size; + if (!size) + continue; + + /* Set memory to empty values */ + res = mem_set_empty_tagged_dw((void __user *) ptr, size, + 0x0baddead0baddead); /*freed mem mark*/ + if (res) + goto free_list; + + /* Make a copy of a list. Here we check that + * there are no intersections between areas - + * this fact is used in find_data_in_list() */ + if (unlikely(interval_tree_iter_first(&areas, ptr, + ptr + size - 1))) { + pr_info_once("sys_clean_descriptors: intersection between passed areas found\n"); + res = -EINVAL; + goto free_list; + } + + it_array[i].start = ptr; + it_array[i].last = ptr + size - 1; + interval_tree_insert(&it_array[i], &areas); + } + /* Clean all descriptor copies from user memory */ + res = clean_descriptors_copies(&areas); + +free_list: + kfree(it_array); + return res; +} diff --git a/arch/e2k/Kconfig b/arch/e2k/Kconfig new file mode 100644 index 000000000000..7eb43bc79b16 --- /dev/null +++ b/arch/e2k/Kconfig @@ -0,0 +1,1248 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# + +config E2K + bool + default y + select ARCH_CACHES + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SET_MEMORY + select ARCH_HAS_SG_CHAIN + select ARCH_HAS_DMA_WRITE_COMBINE + select ARCH_INLINE_READ_LOCK if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION && !CPU_ES2 + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION && !CPU_ES2 + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION && !CPU_ES2 + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION && !CPU_ES2 + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION && !CPU_ES2 && !CPU_E8C + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_PROC_KCORE_TEXT + select ARCH_THREAD_STACK_ALLOCATOR + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_MSI + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION + select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_WEAK_RELEASE_ACQUIRE + #Unaligned loads are broken on es2 + select DCACHE_WORD_ACCESS if !CPU_ES2 + select GENERIC_IDLE_POLL_SETUP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_PENDING_IRQ if SMP + select GENERIC_SMP_IDLE_THREAD + select HAVE_ALIGNED_STRUCT_PAGE + select HAVE_ARCH_BITREVERSE + select HAVE_ARCH_COMPILER_H + select HAVE_ARCH_HUGE_VMAP if !PARAVIRT_GUEST && !KVM_GUEST_KERNEL + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_VMAP_STACK + # Note that atomic stores and loads are only supported since v6 + # (see do_recovery_store()/do_recovery_load() for details). + select HAVE_CMPXCHG_DOUBLE if CPU_ISET >= 6 + select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS + select HAVE_DEBUG_BUGVERBOSE + select HAVE_DEBUG_KMEMLEAK + select HAVE_DYNAMIC_FTRACE + #Unaligned loads are broken on es2 + select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_ES2 + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUTEX_CMPXCHG if FUTEX + select HAVE_HW_BREAKPOINT # Always on as it is used to implement ptrace + select HAVE_IDE + #select HAVE_KERNEL_BZIP2 -- worse than XZ on e2k + select HAVE_KERNEL_GZIP # Stands between LZ4 and XZ + select HAVE_KERNEL_LZ4 # Fastest compression/decompression + #select HAVE_KERNEL_LZMA -- worse than XZ on e2k + #select HAVE_KERNEL_LZO -- worse than GZIP on e2k + select HAVE_KERNEL_XZ # Smallest kernel + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_PERF_EVENTS + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ + select HAVE_TRACE_CLOCK + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_ARCH_AUDITSYSCALL + select HAVE_KPROBES + select HAVE_KRETPROBES + select HAVE_EXIT_THREAD + select IRQ_FORCED_THREADING + select MODULES_USE_ELF_RELA + select PANIC_ON_OOPS + select PERF_EVENTS + select RTC_CLASS + select SPARSE_IRQ + select SPARSEMEM_VMEMMAP_ENABLE + select SYSCTL_EXCEPTION_TRACE + select TTY + select USER_STACKTRACE_SUPPORT + select IRQ_DOMAIN + select SWIOTLB + select HAVE_MEMBLOCK_NODE_MAP + select INTERVAL_TREE if PROTECTED_MODE + select GENERIC_ALLOCATOR + select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + select IOMMU_DMA + select IOMMU_API + select DMA_REMAP + select THREAD_INFO_IN_TASK + select ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_HAS_STRICT_MODULE_RWX + select ARCH_HAS_ELF_RANDOMIZE + select HAVE_ARCH_MMAP_RND_BITS + select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select ARCH_HAS_PHYS_TO_DMA + select ARCH_KEEP_MEMBLOCK + select ARCH_SUPPORTS_RT + select HAVE_FAST_GUP + select HAVE_PCI + select HAVE_ARCH_SECCOMP_FILTER + select EDAC_SUPPORT + select HAVE_PREEMPT_LAZY + help + This is a port of Linux on Elbrus microprocessor architecture. + +config SWIOTLB + def_bool y + +config ARCH_SPARSEMEM_ENABLE + def_bool y + +config ARCH_HIBERNATION_POSSIBLE + def_bool y + +config 64BIT + def_bool y + +config MMU + bool + default y + +config NEED_DMA_MAP_STATE + def_bool y + +config ZONE_DMA + def_bool y + +config ARCH_PROC_KCORE_TEXT + def_bool y + depends on PROC_KCORE + +config GENERIC_FIND_FIRST_BIT + def_bool y + +config GENERIC_FIND_NEXT_BIT + def_bool y + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config GENERIC_BUG + def_bool y if BUG + +config GENERIC_BUG_RELATIVE_POINTERS + def_bool y + +config EARLY_PRINTK + bool + default y + depends on EARLY_DUMP_CONSOLE + +config ARCH_MAY_HAVE_PC_FDC + bool + default y +config GREGS_CONTEXT + def_bool y + +config GENERIC_IOMAP + bool + default y + +config PGTABLE_LEVELS + default 4 + +config HAVE_GENERIC_GUP + def_bool y + +config HAVE_SETUP_PER_CPU_AREA + def_bool y + +config NEED_PER_CPU_EMBED_FIRST_CHUNK + def_bool y + +config NEED_PER_CPU_PAGE_FIRST_CHUNK + def_bool y + +config NEED_SG_DMA_LENGTH + def_bool y + +config SYS_SUPPORTS_HUGETLBFS + def_bool y + +config ARCH_SUSPEND_POSSIBLE + def_bool y + +config ARCH_BOOT_TRACE_POSSIBLE + def_bool y + +config HOTPLUG_CPU + def_bool y + depends on SMP + select GENERIC_IRQ_MIGRATION + ---help--- + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + ( Note: power management support will enable this option + automatically on SMP systems. ) + Say N if you want to disable CPU hotplug. + +config GENERIC_GPIO + def_bool y + +config ARCH_DMA_ADDR_T_64BIT + def_bool y + +config OF_IRQ + bool + default n + +config OF_NET + bool + default n + +config ARCH_ENABLE_MEMORY_HOTPLUG + def_bool y + +config ARCH_ENABLE_MEMORY_HOTREMOVE + def_bool y + +config DEFERRED_STRUCT_PAGE_INIT + def_bool y + +config HOLES_IN_ZONE + def_bool y + +config ARCH_MMAP_RND_BITS_MIN + default 28 + +config ARCH_MMAP_RND_BITS_MAX + default 32 + +config ARCH_MMAP_RND_COMPAT_BITS_MIN + default 8 + +config ARCH_MMAP_RND_COMPAT_BITS_MAX + default 16 + +menu "Processor type and features" + +config E2K_MACHINE_SIC + def_bool y + +config E2K_MACHINE_IOHUB + def_bool y + +config LOCKDEP_SUPPORT + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config HAVE_LATENCYTOP_SUPPORT + def_bool y + +config E2K_MACHINE + bool "E2K machines support" + default n + help + Support only specified E2K machine type + If you say Y only one specified machine type will be supported + by this kernel. + The better way here say N to support all range of machines + +config E2K_SIMULATOR + bool "E2K simulator" + depends on E2K_MACHINE + default n + help + You can choose this when you are working on a machine simulator. + +config CPU_ES2 + def_bool !E2K_MACHINE + depends on !E2K_MINVER_V3 + depends on !E2K_MINVER_V4 + depends on !E2K_MINVER_V5 + depends on !E2K_MINVER_V6 + +config CPU_E2S + def_bool !E2K_MACHINE + depends on !E2K_MINVER_V4 + depends on !E2K_MINVER_V5 + depends on !E2K_MINVER_V6 + +config CPU_E8C + def_bool !E2K_MACHINE + depends on !E2K_MINVER_V5 + depends on !E2K_MINVER_V6 + +config CPU_E1CP + def_bool !E2K_MACHINE + depends on !E2K_MINVER_V5 + depends on !E2K_MINVER_V6 + +config CPU_E8C2 + def_bool !E2K_MACHINE + depends on !E2K_MINVER_V6 + +config CPU_E12C + def_bool !E2K_MACHINE + +config CPU_E16C + def_bool !E2K_MACHINE + +config CPU_E2C3 + def_bool !E2K_MACHINE + +config CPU_ISET + int + range 0 6 + default 0 if !E2K_MACHINE + default 2 if E2K_ES2_DSP || E2K_ES2_RU + default 3 if E2K_E2S + default 4 if E2K_E8C || E2K_E1CP + default 5 if E2K_E8C2 + default 6 if E2K_E16C || E2K_E12C || E2K_E2C3 + +# Since iset v5 hardware clears RF automatically. +# For fully paravirtualized guest hypervisor does +# all the clearing instead of hardware. +config CPU_HW_CLEAR_RF + def_bool CPU_ISET >= 5 || KVM_GUEST_KERNEL + +# 134709 - ibranch does not work in some cases +# Workaround - insert NOPs after ibranch in some places +config CPU_HWBUG_IBRANCH + def_bool CPU_ISET <= 5 + +config CPU_HAS_FILL_INSTRUCTION + def_bool CPU_ISET >= 6 + +choice + prompt "Subarchitecture Type" + depends on E2K_MACHINE + + +config E2K_ES2_DSP + bool "Elbrus e2c+ (Cubic, NUMA, 2 core, DSP, IOHUB, SIC)" + select CPU_ES2 + help + Support for Elbrus e2c+ (Cubic) systems. + Say 'Y' here if this kernel is supposed to run on + an Elbrus S2 (Cubic) system. + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_ES2_RU + bool "Elbrus e2c (Micron, NUMA, 2 core, IOHUB, SIC)" + select CPU_ES2 + help + Support for Elbrus e2c (Micron) systems. + Say 'Y' here if this kernel is supposed to run on + an Elbrus e2c (Micron) system. + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_E2S + bool "Elbrus 2S (NUMA, 4 core, IOHUB, SIC)" + select CPU_E2S + help + Support for Elbrus 2S systems. Say 'Y' here if this kernel is + supposed to run on an Elbrus 2S system. + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_E8C + bool "Elbrus 8C (NUMA, 8 core, IOHUB-2, SIC)" + select CPU_E8C + help + Support for Elbrus 8C systems. Say 'Y' here if this kernel is + supposed to run on an Elbrus 8C system. + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_E1CP + bool "Elbrus 1C+ (1 core + GC, IOHUB-2, Legacy SIC)" + select CPU_E1CP + help + Support for Elbrus 1C+ systems. Say 'Y' here if this kernel is + supposed to run on an Elbrus 1C+ system. + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_E8C2 + bool "Elbrus 8C2 (NUMA, 8 core, IOHUB-2, SIC)" + select CPU_E8C2 + help + Support for Elbrus 8C systems. Say 'Y' here if this kernel is + supposed to run on an Elbrus 8C2 system. + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_E12C + bool "Elbrus 12C (NUMA, 12 core, IOHUB-2 , SOC)" + select CPU_E12C + help + Support for Elbrus 12C systems. Say 'Y' here if this kernel is + supposed to run on an Elbrus 12C System On Chip + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_E16C + bool "Elbrus 16C (NUMA, 16 core, IOHUB-2 , SOC)" + select CPU_E16C + help + Support for Elbrus 16C systems. Say 'Y' here if this kernel is + supposed to run on an Elbrus 16C System On Chip + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +config E2K_E2C3 + bool "Elbrus 2C3 (NUMA, 2 core + GPU, IOHUB-2 , SOC)" + select CPU_E2C3 + help + Support for Elbrus 2C3 systems. Say 'Y' here if this kernel is + supposed to run on an Elbrus 2C + embeded GPU, System On Chip + Only choose this option if you have such a system and cannot run + the kernel on other machines types, + otherwise you should say N here. + +endchoice + +choice + prompt "Minimal supported E2K generation" + depends on !E2K_MACHINE + default E2K_MINVER_V3 + help + Support all Elbrus systems starting from selected architecture + generation. This allows to have better optimized kernel without + quirks for older systems. + +config E2K_MINVER_V2 + bool "Elbrus v2 support" + help + This options enables kernel with minimally supported Elbrus v2 + CPU architecture (Elbrus 2c, 2c+ and later). + +config E2K_MINVER_V3 + bool "Elbrus v3" + help + This options enables kernel with minimally supported Elbrus v3 + CPU architecture (Elbrus 4c and later). + +config E2K_MINVER_V4 + bool "Elbrus v4" + help + This options enables kernel with minimally supported Elbrus v4 + CPU architecture (Elbrus 8c, 1c+ and later). + +config E2K_MINVER_V5 + bool "Elbrus v5" + help + This options enables kernel with minimally supported Elbrus v5 + CPU architecture (Elbrus 8c2 and later). + +config E2K_MINVER_V6 + bool "Elbrus v6" + help + This options enables kernel with minimally supported Elbrus v6 + CPU architecture (Elbrus 12c, 16c, 2c3 and later). + +endchoice + +config E2K_MINVER + int + default 3 if E2K_MINVER_V3 + default 4 if E2K_MINVER_V4 + default 5 if E2K_MINVER_V5 + default 6 if E2K_MINVER_V6 + default 2 + +menu "MMU Page Tables features" + +config MMU_PT_V6 + bool "MMU Page Table V6 basic mode" + depends on !E2K_MACHINE || (!CPU_ES2 && !CPU_E2S && \ + !CPU_E8C && !CPU_E1CP && !CPU_E8C2) + default y + help + Support for basic (new) structure of Page Tables entries. + Say 'Y' here if this kernel is supposed to run on + an Elbrus 12C/2C3/16C Systems and should use new structures of + Page Tables. Say 'N' if kernel should be run only on legacy mode of + Page Tables + +config MMU_SEP_VIRT_SPACE + bool "MMU Separate Page Tables for kernel and users" + depends on !E2K_MACHINE || (!CPU_ES2 && !CPU_E2S && \ + !CPU_E8C && !CPU_E1CP && !CPU_E8C2) + default y + help + Enables MMU Separate Page Tables for kernel and users. + Say 'Y' here if this kernel is supposed to run on + an Elbrus 12C/2C3/16C (iset V6 and higher) Systems. + The feature should be enabled to support hardware virtualization mode + +config DYNAMIC_SEP_VIRT_SPACE + bool "Separate Page Tables mode can be set/reset in command line" + depends on MMU_SEP_VIRT_SPACE + default n + help + Separate Page Tables mode can be turned off in command line, + but only one time while kernel boot up. The mode is useful for + debug and to measure the efficiency of separation. + The mode can be set/reset only on systems with hardware supported + MMU feature (iset V6 and higher) + +config SECCOMP + def_bool y + prompt "Enable seccomp to safely compute untrusted bytecode" + ---help--- + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +endmenu + +config ENABLE_EXTMEM + def_bool y + ---help--- + E2K can use more than 4 Gigabytes of physical memory. + However, the address space of e2k processors has two regions with + hole between them: + + low region from 0 up to 2 Gigabytes 0 - 8000 0000 + high region from 2**32 up to 2**48 n 0000 0000 - 1 0000 0000 0000 + +config E16_CORE_SUPPORT + bool "New processors based on e16 core support" + depends on !E2K_MACHINE + default y if CPU_E16C + help + To support new family of processors based on e16 core + it need compilator lcc-23, which is not now on the staff + Say N to compule without new processors support, in this case + can use compilator lcc-21 and kernel can be compiled in generic + mode + If you say Y then need compile only by lcc-23 + +# Unknown bug of hardware or software while compiler lift up some operations +# of load of data before call of spinlock function when data can be acceessed +# only after call completion. In this case compiler use DAM to detect that data +# were updated while spinlock completion and reload them if updated. +# Turn off compiler optimization while bug is not detected and fixed. + +config SMP_DAM_BUG + bool + default SMP && CPU_ES2 + +source "kernel/Kconfig.hz" + +config GLOBAL_CONTEXT + bool "Map kernel virtual space to global context" + default y + help + Choose this option to map virtual space of kernel to + global context, so all kernel virtual pages and + page tables will be global for all processes. + It should reduce TLB and caches missing + +config SECONDARY_SPACE_SUPPORT + bool "Support Secondary Space feature in OS" + default y + help + Select 'y' if you are going to run native Linux-x86 code under + this OS. + +config MLT_STORAGE + def_bool SECONDARY_SPACE_SUPPORT + +# Note that iset v6 actually supports 48 bits but that +# requires finding space for the linear mapping and for +# vmalloc area if we use pcpu_embed_first_chunk(). +config E2K_PA_BITS + int + range 40 48 + default 40 if CPU_ISET < 6 || KVM_GUEST_KERNEL + default 44 if CPU_ISET >= 6 && !KVM_GUEST_KERNEL + +config ONLY_HIGH_PHYS_MEM + bool "Use only high region of address space of physical memory" + depends on !E2K_E1CP + default y + help + Physical memory of e2k machines can be mapped into two addresses + regions: + low region (below 2**32) + high region (above 2**32) + Low memory cut out some area from high region + If you say here Y then only high region addresses will be used + to access to physical memory. + It allows to extend the low MMIO and PCI addresses areas + and map physical memory to virtual pages of max order + +config ONLY_BSP_MEMORY + bool "Only specified nodes memory can be used by other CPUs" + default n + ---help--- + CPUs can have own memory and access to memory of other CPUs. + If you see Y then all CPUs can use only memory of nodes from + following config parametr + and ignore own + +config MEMORY_PRES_MAP + int "Specify mask of nodes with memory" + default 1 + depends on ONLY_BSP_MEMORY + range 1 65536 + +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + + range 11 64 + default "16" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + The page size is not necessarily 4KB. Keep + this in mind when choosing a value for this option. + +# Max 8 Nodes now limited by BOOT_MAX_MEM_NUMNODES to confirm +# to boot_info structure size +config NODES_SHIFT + int "Maximum memory (NUMA) Nodes (as a power of 2)" + range 0 3 + default "3" if SMP + default "0" + depends on NEED_MULTIPLE_NODES + ---help--- + Specify the maximum number of memory (NUMA) Nodes available on the target + system. Increases memory reserved to accommodate various tables. + +# Some NUMA nodes can have memory ranges that span +# other nodes. Even though a pfn is valid and +# between a node's start and end pfns, it may not +# reside on that node. +# For example while a few nodes have memory in low and high range + +config NODES_SPAN_OTHER_NODES + def_bool y + +config NUMA + bool "NUMA support" + default n + depends on SMP + +config COPY_USER_PGD_TO_KERNEL_ROOT_PT + bool "Copy user process PGD entries to kernel root page table" + default y + depends on NUMA + help + If say Y then each CPU has own kernel PGD copy + User process PGD entries are copied to this kernel + PGD table while the process is activated on this CPU + +config KTHREAD_ON_CPU + bool "Create kernel threads on specified CPU" + default n + depends on NUMA + help + Support creation of kernel threads on cpecified CPU + to allocate memory resources on the node of this CPU + +config BOOT_PRINTK + def_bool n + +config SERIAL_BOOT_PRINTK + bool "boot_printk() support" + default y + select BOOT_PRINTK + help + boot_printk() outputs directly to serial port bypassing all + buffers and locks and can work both before and after the switch + to virtual memory addressing. It is useful for hardware debugging. + + Only two devices are supported currently: ns16550 (== 8550) + and l_zilog (== am85c30). + + On simulator boot_printk() also outputs to LMS console. + +config SERIAL_AM85C30_BOOT_CONSOLE + bool "Boot-time serial console on Am85c30 zilog and compatible devices" + depends on SERIAL_BOOT_PRINTK + default y + help + Sey Y to support boot-time console based on Am85c30 serial + PCI device. + +config BOOT_SERIAL_BAUD + int "Boot printk serial rate (4800-115200)" + range 4800 115200 + depends on SERIAL_BOOT_PRINTK || SERIAL_PRINTK + default "115200" + help + Speed rate for boot printk console + +config EARLY_VIRTIO_CONSOLE + bool "Early VIRTIO console on guest kernel" + depends on VIRTIO_CONSOLE && KVM + default y + select BOOT_PRINTK + select L_EARLY_PRINTK + help + Sey Y to support early console based on VIRTIO + and pseudo HVC interface + +config ILLEGAL_POINTER_VALUE + hex + default 0xdead000000000000 + +config MEMLIMIT + int "Physical memory probing range (main region of RAM)" + range 0 2048 + default 64 + +config EXT_MEMLIMIT + int "Extended physical memory probing range (Megabytes)" + depends on ENABLE_EXTMEM + range 0 61440 + default 2048 + +choice + prompt "Embeded Video RAM size" + default VRAM_SIZE_128 + help + Embeded graphical controller video ram size + Option is usfull only for e2k internal boot loader on simulator + +config VRAM_SIZE_128 + bool "128 Mb" + help + Embeded graphical controller video ram size + is 128 Mb + Option is usfull only for e2k internal boot loader on simulator + +config VRAM_SIZE_256 + bool "256 Mb" + help + Embeded graphical controller video ram size + is 256 Mb + Option is usfull only for e2k internal boot loader on simulator + +config VRAM_SIZE_512 + bool "512 Mb" + help + Embeded graphical controller video ram size + is 512 Mb + Option is usfull only for e2k internal boot loader on simulator + +config VRAM_SIZE_1024 + bool "1 Gb" + help + Embeded graphical controller video ram size + is 1 Gb + Option is usfull only for e2k internal boot loader on simulator + +config VRAM_DISABLE + bool "Disable embeded graphic support" + help + Embeded graphical controller video ram can be disabled + Option is usfull only for e2k internal boot loader on simulator + +endchoice + +config ACCESS_CONTROL + bool + default N + +config USR_CONTROL_INTERRUPTS + bool "USER can control interrupts" + default N + help + USER can use UPSR register to control interrupts + (only for hardware debugging) + To control interrupts in user mode it needs to call: + e2k_syswork(USER_CONTROL_INTERRUPT, 1, 0); // start + e2k_syswork(USER_CONTROL_INTERRUPT, 0, 0); // stop + +config KERNEL_TIMES_ACCOUNT + bool "Collect kernel traps and system calls clock time info" + default N + +config MAX_KERNEL_TIMES_NUM + depends on KERNEL_TIMES_ACCOUNT + int "Max Number of last events to collect (1-1024)" + range 1 1024 + default 20 + +config CLI_CHECK_TIME + bool "CLI check time" + +# bool 'Symmetric multi-processing support' CONFIG_SMP + +config CMDLINE_PROMPT + bool "Enable prompt for kernel command string" + default n + +config MAKE_ALL_PAGES_VALID + bool "Make all virtual pages valid" + default y + +config USE_AAU + def_bool y + +config DATA_STACK_WINDOW + bool "Enable kernel data stack show while fancy stack listing" + default y + help + Say Y here to enable printing of data stack while stack's + printing. + + Do "echo 1 > /proc/sys/debug/datastack" to show the stack. + +config BINFMT_ELF32 + tristate "Kernel support for 32-bit ELF binaries" + default y + select COMPAT_BINFMT_ELF if BINFMT_ELF + +config COMPAT + def_bool y + depends on BINFMT_ELF32 + select ARCH_WANT_OLD_COMPAT_IPC + +config PROTECTED_MODE + tristate "Security mode" + default y + +config DBG_RTL_TRACE + bool "Enable printing rtl messages in security mode" + depends on PROTECTED_MODE + default n + +config CLW_ENABLE + bool "Enable clean of user stack window" + depends on PROTECTED_MODE + default y + +config IPD_DISABLE + bool "Disable Instruction Cache prefetch for kernel" + help + If you say 'Y' then Instruction Cache prefetch will be + disable for kernel threads + Otherwise two line of ICACHE prefetch will be set + default n + +config TC_STORAGE + bool "Store Table Cache default line" + help + Flushing Table Cashe and storing default string of TC + when tasks switch + default y + +config IGNORE_MEM_LOCK_AS + bool "Ignore exc_mem_lock_as" + help + Ignore mem_lock_as exception in binary compiler task + default n + +config RECOVERY + bool "Enable system recovery" + select PM + help + Enable mode to support Suspend to RAM + default y + +config MONITORS + bool "Hardware monitor counters support" + depends on PROC_FS + default y + help + Say Y here to enable the monitors supporting mechanism used + for counting of variuos hardware events + +config E2K_KEXEC + bool "kexec for E2K" + default y + help + Say Y here to enable support of kexec implementation for E2K. + +config DUMP_ALL_STACKS + bool "Enable dump of all stacks" + default y + +config CMDLINE + string "Bilt-in kernel command line" + default "root=/dev/sda3 console=ttyLMS0 console=ttyS0,115200 sclkr=no init=/bin/bash transparent_hugepage=madvise iommu=0" + +config CMDLINE_OVERRIDE + bool "Built-in command line overrides boot loader arguments" + help + Set this option to 'Y' to have the kernel ignore the boot loader + command line, and use ONLY the built-in command line. + + This is used to work around broken boot loaders. This should + be set to 'N' under normal conditions. + +# +# Define implied options from the CPU selection here +# + +config SMP + bool "Symmetric multi-processing support" + default n + ---help--- + This enables support for systems with more than one CPU. If you have + a system with only one CPU, like most personal computers, say N. If + you have a system with more than one CPU, say Y. + + If you say N here, the kernel will run on single and multiprocessor + machines, but will use only one CPU of a multiprocessor machine. If + you say Y here, the kernel will run on many, but not all, + singleprocessor machines. On a singleprocessor machine, the kernel + will run faster if you say N here. + + Note that if you say Y here and choose architecture "586" or + "Pentium" under "Processor family", the kernel will not work on 486 + architectures. Similarly, multiprocessor kernels for the "PPro" + architecture may not work on all Pentium based boards. + + People using multiprocessor machines who say Y here should also say + Y to "Enhanced Real Time Clock Support", below. The "Advanced Power + Management" code will be disabled if you say Y here. + + See also the , + , + and the SMP-HOWTO available at + . + + If you don't know what to do here, say N. + +config NR_CPUS + int "Maximum number of CPUs (2-255)" + range 2 255 + depends on SMP + default "2" + help + This allows you to specify the maximum number of CPUs which this + kernel will support. The maximum supported value is 255 and the + minimum value which makes sense is 2. + + This is purely to save memory - each supported CPU adds + approximately eight kilobytes to the kernel image. + +config SCHED_MC + def_bool y + depends on NUMA + ---help--- + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. + +config MCST + bool "MCST additions" + default y + +config ARCH_HAS_CACHE_LINE_SIZE + def_bool y + +config GENERIC_TIME + def_bool y + +config GENERIC_TIME_VSYSCALL + def_bool y + +config GENERIC_CMOS_UPDATE + def_bool y + +config GENERIC_CLOCKEVENTS + def_bool y + +config SCLKR_CLOCKSOURCE + def_bool y + +config BIOS + bool "Enable bios chipset settings" + default y + help + Enable bios chipset settings such as io apic serial ports + keyboard, mouse etc. + +config ENABLE_BIOS_MPTABLE + bool "Set mptable" + depends on BIOS && L_IO_APIC + default y + help + Constructing bios MP configuration table. + +config CEPIC_TIMER_FREQUENCY + int "CEPIC timer and %scklr register frequency as Mhz" + depends on ENABLE_BIOS_MPTABLE + range 1 1000 + default 100 + help + The simulator may have a CEPIC timer frequency different from + the actual frequency of the hardware implementation. + Define here the frequency of simulator in such case or if it does + not suit the defaul value. + +config ENABLE_ELBRUS_PCIBIOS + bool "Enable pcibios" + depends on BIOS + default y + help + Enable pci initialization in bios. + +config ENABLE_IDE + bool "Enable hard drive" + depends on BIOS + default y + help + Enable ide in southbridge chip. + +config ENABLE_KEYBOARD + bool "Enable keyboard" + depends on BIOS + default y + help + Enable keyboard in superio chip. + +config ENABLE_MOUSE + bool "Enable mouse" + depends on BIOS + default y + help + Enable mouse in superio chip. + +config ENABLE_FLOPPY + bool "Enable floppy" + depends on BIOS + default y + help + Enable fdc in superio chip. + +config ENABLE_MGA + bool "Enable MGA" + depends on BIOS + default y + + +config ENABLE_RTC + bool "Enable rtc" + depends on BIOS + default y + help + Enable rtc in southbridge chip. + +config ENABLE_SERIAL + bool "Enable serial ports" + depends on BIOS + default y + help + Enable com1 com2 in superio chip. + +config ENABLE_PARALLEL_PORT + bool "Enable parallel port" + depends on BIOS + default y + help + Enable lpt in superio chip. + +config ENABLE_IOAPIC + bool "Enable IO APIC" + depends on BIOS + default y + help + Set southbridge XBCS register io apic enable. + +config ADC_DISABLE + bool "Disable second IO-link on e2c+ connected to ADC" + depends on BIOS + default n + help + In some case second link is not connected to anywhere + or not work properly + +config OPTIMIZE_REGISTERS_ACCESS + bool "Optimize registers access" + default y + help + Disabling this option will prohibit lcc from optimizing + registers access instructions (rr/rw). + +config E2K_STACKS_TRACER + bool "Trace kernel stacks usage" + default n + depends on !STACK_TRACER + select FUNCTION_TRACER + select STACKTRACE + select KALLSYMS + help + This special tracer records the maximum stack footprint of the + kernel and displays it in /sys/kernel/debug/tracing/stack_trace. + You can use 'stack_trace_filter' file to select which functions + to check the stacks on. + + This tracer works by hooking into every function call that the + kernel executes, and keeping a maximum stack depth value and + stack-trace saved. If this is configured with DYNAMIC_FTRACE + then it will not have any overhead while the stack tracer + is disabled. + + To enable the stack tracer on bootup, pass in 'stacktrace' + on the kernel command line. You can pass 'stacktrace=kernel' + to trace only kernel part of hardware stacks. + + The stack tracer can also be enabled or disabled via the + sysctl kernel.stack_tracer_enabled + + There is also sysctl kernel.stack_tracer_kernel_only knob + for controlling which threads to trace. + + Pass stacktrace_filter= to the kernel command line to set + specific functions to check the stacks on. + + Say N if unsure. + +# Needed to initialize "fp" field which is used to correct fp stack +# in longjmp/copy_thread +# +# Also used to work around problems with leaf call optimization +# (for details see comments in _mcount()). +config HAVE_FUNCTION_GRAPH_FP_TEST + def_bool y + depends on FUNCTION_GRAPH_TRACER + +config EPROF_KERNEL + bool "Enable libeprof-based kernel profiling" + default n + ---help--- + This option enables libeprof-based branch profiling in kernel. + + If unsure, say N. +endmenu + +source "arch/l/Kconfig" + + + +menu "Power management options" + +source "kernel/power/Kconfig" + +source "drivers/acpi/Kconfig" + +source "drivers/cpufreq/Kconfig" + +source "drivers/cpuidle/Kconfig" + +endmenu + + + +menu "Bus options (PCI, ISA)" + +source "arch/l/pci/Kconfig" # should be set allways for e2k # + +config PCI_USE_VECTOR + bool "Vector-based interrupt indexing (MSI)" + depends on L_LOCAL_APIC && L_IO_APIC + default n + help + This replaces the current existing IRQ-based index interrupt scheme + with the vector-base index scheme. The advantages of vector base + over IRQ base are listed below: + 1) Support MSI implementation. + 2) Support future IOxAPIC hotplug + + Note that this allows the device drivers to enable MSI, Message + Signaled Interrupt, on all MSI capable device functions detected. + Message Signal Interrupt enables an MSI-capable hardware device to + send an inbound Memory Write on its PCI bus instead of asserting + IRQ signal on device IRQ pin. + + If you don't know what to do here, say N. + +config ISA + bool "ISA support" + help + Find out whether you have ISA slots on your motherboard. ISA is the + name of a bus system, i.e. the way the CPU talks to the other stuff + inside your box. Other bus systems are PCI, EISA, MicroChannel + (MCA) or VESA. ISA is an older system, now being displaced by PCI; + newer boards don't support it. If you have ISA, say Y, otherwise N. + +endmenu + + +config SYSVIPC_COMPAT + def_bool y + depends on SYSVIPC + +source "arch/e2k/Kconfig.virt" diff --git a/arch/e2k/Kconfig.debug b/arch/e2k/Kconfig.debug new file mode 100644 index 000000000000..4b5273835aea --- /dev/null +++ b/arch/e2k/Kconfig.debug @@ -0,0 +1,51 @@ +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config E2K_DEBUG_KERNEL + bool "Enable e2k arch kernel debugging" + depends on DEBUG_KERNEL + default y + +config NESTED_PAGE_FAULT_INJECTION + bool "Inject nested page faults when handling trap cellar" + depends on E2K_DEBUG_KERNEL && DEBUG_FS + default n + help + Makes do_page_fault() return success without doing anything with + some probability. This way recovery operation will fail, and the + code path in nested page fault handler will be tested. + +config DEBUG_LCC_VOLATILE_ATOMIC + bool "Mark GNU asm with atomic operations as volatile" + depends on E2K_DEBUG_KERNEL + default n + +config DEBUG_IRQ + bool "Check of IRQ enabled/disabled using both PSR & UPSR" + depends on E2K_DEBUG_KERNEL + default n + +config DEBUG_PT_REGS + bool "Enable checker of pt_regs structure linking" + depends on E2K_DEBUG_KERNEL + default n + +config DEBUG_KMEM_AREA + bool "Enable checker of kernel memory area getting and freeing" + depends on E2K_DEBUG_KERNEL + default n + +config DEBUG_STACK_USAGE + bool "Stack utilization instrumentation" + depends on DEBUG_KERNEL + help + Enables the display of the minimum amount of free stack which each + task has ever had available in the sysrq-T and sysrq-P debug output. + + This option will slow down process creation somewhat. + +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + def_bool y + +source "arch/l/Kconfig.debug" + diff --git a/arch/e2k/Kconfig.virt b/arch/e2k/Kconfig.virt new file mode 100644 index 000000000000..7f5c42530343 --- /dev/null +++ b/arch/e2k/Kconfig.virt @@ -0,0 +1,22 @@ +# +# Virtualiztion configuration +# + +menuconfig VIRTUALIZATION + bool "Virtualization" + default n + ---help--- + Say Y here to get to see options for using your Linux host to run other + operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if VIRTUALIZATION + +source "arch/e2k/kvm/Kconfig" + +select VIRTIO +source "drivers/virtio/Kconfig" + +endif # VIRTUALIZATION diff --git a/arch/e2k/Makefile b/arch/e2k/Makefile new file mode 100644 index 000000000000..26571176845d --- /dev/null +++ b/arch/e2k/Makefile @@ -0,0 +1,252 @@ +# e2k/Makefile +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. Remember to do have actions +# for "archclean" and "archdep" for cleaning up and making dependencies for +# this architecture +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. + +KBUILD_DEFCONFIG ?= defconfig + +AS = $(shell $(CC) -print-prog-name=as) +OBJDUMP = $(shell $(CC) -print-prog-name=objdump) +LD = $(shell $(CC) -print-prog-name=ld) +OBJCOPY = $(shell $(CC) -print-prog-name=objcopy) + +KBUILD_CFLAGS += -fkernel -gline -masm-inline $(call cc-option,-fforbid-fp) \ + $(call cc-option,-fmax-errors=5) $(call cc-option,-fno-loop-apb) \ + -fno-ident + +ifeq ($(PROFILE_GENERATE), 1) +KBUILD_CFLAGS += -fprofile-generate-kernel +endif +ifeq ($(origin PROFILE_USE), undefined) +else +KBUILD_CFLAGS += -fprofile-use="$(PROFILE_USE)" +endif + +KBUILD_CFLAGS += $(call cc-option,-finline-functions,) \ + $(call cc-option,-finline-functions-called-once,) + +# Some uninteresting or broken warnings can be disabled with #pragma's only +KBUILD_CFLAGS += -Wno-array-bounds -Wno-duplicate-type-qualifier \ + -Wno-builtin-functions-redefined -Wno-reduced-alignment \ + -Wno-unused-value -Wno-overflow -Wno-signed-one-bit-field \ + -include $(srctree)/arch/e2k/include/asm/override-lcc-warnings.h + +LDFLAGS_vmlinux := +CHECKFLAGS += -D__e2k__ + +CFLAGS += -pipe -D__linux__ + +KBUILD_CFLAGS += $(CFLAGS) + +ifdef CONFIG_SMP_DAM_BUG + KBUILD_CFLAGS += -fno-dam-call +endif + +CFLAGS_GENERIC := -march=elbrus-v$(CONFIG_E2K_MINVER) +CFLAGS_ES2 := -mtune=elbrus-2c+ +CFLAGS_E2S := -mtune=elbrus-4c +CFLAGS_E8C := -mtune=elbrus-8c +CFLAGS_E1CP := -mtune=elbrus-1c+ +CFLAGS_E8C2 := -mtune=elbrus-8c2 +CFLAGS_E12C := -mtune=elbrus-12c +CFLAGS_E16C := -mtune=elbrus-16c +CFLAGS_E2C3 := -mtune=elbrus-2c3 + +CFLAGS_ALL_CPUS := $(CFLAGS_ES2) $(CFLAGS_E2S) $(CFLAGS_E8C) $(CFLAGS_E1CP) \ + $(CFLAGS_E8C2) $(CFLAGS_E12C) $(CFLAGS_E16C) $(CFLAGS_E2C3) +export CFLAGS_ALL_CPUS + +CFLAGS_E2K_SIC := $(CFLAGS_ES2) + +export CFLAGS_ES2 CFLAGS_E2S CFLAGS_E8C CFLAGS_E1CP CFLAGS_E8C2 CFLAGS_E2C3 \ + CFLAGS_E12C CFLAGS_E16C CFLAGS_E2K_SIC + +ifeq ($(CONFIG_E2K_MACHINE),y) + ifeq ($(CONFIG_E2K_ES2_DSP),y) + KBUILD_CFLAGS += $(CFLAGS_ES2) + KBUILD_AFLAGS += $(CFLAGS_ES2) + TARGET_MDL := 04 + else + ifeq ($(CONFIG_E2K_ES2_RU),y) + KBUILD_CFLAGS += $(CFLAGS_ES2) + KBUILD_AFLAGS += $(CFLAGS_ES2) + TARGET_MDL := 06 + else + ifeq ($(CONFIG_E2K_E2S),y) + KBUILD_CFLAGS += $(CFLAGS_E2S) + KBUILD_AFLAGS += $(CFLAGS_E2S) + TARGET_MDL := 03 + else + ifeq ($(CONFIG_E2K_E8C),y) + KBUILD_CFLAGS += $(CFLAGS_E8C) + KBUILD_AFLAGS += $(CFLAGS_E8C) + TARGET_MDL := 07 + else + ifeq ($(CONFIG_E2K_E1CP),y) + KBUILD_CFLAGS += $(CFLAGS_E1CP) + KBUILD_AFLAGS += $(CFLAGS_E1CP) + TARGET_MDL := 08 + else + ifeq ($(CONFIG_E2K_E8C2),y) + KBUILD_CFLAGS += $(CFLAGS_E8C2) + KBUILD_AFLAGS += $(CFLAGS_E8C2) + TARGET_MDL := 09 + else + ifeq ($(CONFIG_E2K_E12C),y) + KBUILD_CFLAGS += $(CFLAGS_E12C) + KBUILD_AFLAGS += $(CFLAGS_E12C) + TARGET_MDL := 0a + else + ifeq ($(CONFIG_E2K_E16C),y) + KBUILD_CFLAGS += $(CFLAGS_E16C) + KBUILD_AFLAGS += $(CFLAGS_E16C) + TARGET_MDL := 0b + else + ifeq ($(CONFIG_E2K_E2C3),y) + KBUILD_CFLAGS += $(CFLAGS_E2C3) + KBUILD_AFLAGS += $(CFLAGS_E2C3) + TARGET_MDL := 0c + else + error "Invalid e2k machine type" + endif # ifeq ($(CONFIG_E2K_E2C3),y) + endif # ifeq ($(CONFIG_E2K_E16C),y) + endif # ifeq ($(CONFIG_E2K_E12C),y) + endif # ifeq ($(CONFIG_E2K_E8C2),y) + endif # ifeq ($(CONFIG_E2K_E1CP),y) + endif # ifeq ($(CONFIG_E2K_E8C),y) + endif # ifeq ($(CONFIG_E2K_E2S),y) + endif # ifeq ($(CONFIG_E2K_ES2_RU),y) + endif # ifeq ($(CONFIG_E2K_ES2_DSP),y) +else # ! ifeq ($(CONFIG_E2K_MACHINE),y) +KBUILD_CFLAGS += $(CFLAGS_GENERIC) +KBUILD_AFLAGS += $(CFLAGS_GENERIC) +TARGET_MDL := 00 +endif + +KBUILD_LDFLAGS += --relaxed-e2k-machine-check +KBUILD_CFLAGS += $(cflags-y) + +libs-y += arch/e2k/lib/ +core-y += arch/e2k/kernel/ \ + arch/e2k/mm/ \ + arch/e2k/p2v/ \ + arch/e2k/fast_syscalls/ + +core-$(CONFIG_PROTECTED_MODE) += arch/e2k/3p/ + +drivers-$(CONFIG_PCI) += arch/e2k/pci/ + +# suspend and hibernation support +drivers-$(CONFIG_PM) += arch/e2k/power/ + +#KVM hypervisor and guest support +core-$(CONFIG_KVM) += arch/e2k/kvm/ + +# Elbrus common modules +core-y += arch/l/ +drivers-$(CONFIG_PCI) += arch/l/pci/ + +boot := arch/e2k/boot +all: es2boot + +MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot + +.PHONY: clean archclean archmrproper archdep bootimage image zImage + +es2boot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_ES2=y boot + +e2sboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E2S=y boot + +e8cboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E8C=y boot + +e1cpboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E1CP=y boot + +e8c2boot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E8C2=y CONFIG_E8C=y boot + +e12cboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E12C=y boot + +e16cboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E16C=y boot + +e2c3boot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E2C3=y boot + +image: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_BOOT=y $(objtree)/image.boot + $(Q)echo "Target mdl: $(TARGET_MDL)"; \ + echo $(TARGET_MDL) | \ + xxd -r -p | \ + dd of=$(objtree)/image.boot bs=1 seek=258 count=1 conv=notrunc 2>/dev/null; \ + echo 00000000 | xxd -r -p | \ + dd of=$(objtree)/image.boot bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + e2k_kernel_csum=`cksum $(objtree)/image.boot | awk '{ printf "%08x\n", $$1 }'`; \ + echo "Kernel image check sum: $$e2k_kernel_csum"; \ + echo $$e2k_kernel_csum | \ + sed 's/\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)/\7\8\5\6\3\4\1\2/' | \ + xxd -r -p | \ + dd of=$(objtree)/image.boot bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + echo 'Kernel: image.boot is ready' ' (#'`cat .version`')' + +zImage: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_BOOT=y $(objtree)/zImage + $(Q)echo "Target mdl: $(TARGET_MDL)"; \ + echo $(TARGET_MDL) | \ + xxd -r -p | \ + dd of=$(objtree)/zImage bs=1 seek=258 count=1 conv=notrunc 2>/dev/null; \ + echo 00000000 | xxd -r -p | \ + dd of=$(objtree)/zImage bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + e2k_kernel_csum=`cksum $(objtree)/zImage | awk '{ printf "%08x\n", $$1 }'`; \ + echo "Kernel image check sum: $$e2k_kernel_csum"; \ + echo $$e2k_kernel_csum | \ + sed 's/\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)/\7\8\5\6\3\4\1\2/' | \ + xxd -r -p | \ + dd of=$(objtree)/zImage bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + echo 'Kernel: zImage is ready' ' (#'`cat .version`')' + +image.boot: bootimage +bootimage: image + +archclean: + $(Q)$(MAKE) $(clean)=arch/e2k/boot + +archmrproper: + +archdep: + @$(MAKEBOOT) dep + +install-headers: + @$(MAKEBOOT) install-headers + +install-includes: include/linux/version.h arch/e2k/include FORCE + $(CONFIG_SHELL) scripts/gen-osl-include -l $(srctree) -r $(ROOT_WA) + +build-install: FORCE + $(CONFIG_SHELL) scripts/gen-osl-build -l $(srctree) -m $(MODLIB) + +define archhelp + echo '* image/bootimage - Kernel boot image (image.boot)' + echo ' zImage - Compressed kernel boot image (image.boot)' + echo ' install-headers - Install kernel headers in ' + echo ' /usr/include' + echo ' es2boot - Build kernel boot image with small embedded boot for es2 simulator' + echo ' e2sboot - Build kernel boot image with small embedded boot for e2s simulator' + echo ' e8cboot - Build kernel boot image with small embedded boot for e8c simulator' + echo ' e1cpboot - Build kernel boot image with small embedded boot for e1cp simulator' + echo ' e8c2boot - Build kernel boot image with small embedded boot for e8c2 simulator' + echo ' e12cboot - Build kernel boot image with small embedded boot for e12c simulator' + echo ' e16cboot - Build kernel boot image with small embedded boot for e16c simulator' + echo ' e2c3boot - Build kernel boot image with small embedded boot for e2c3 simulator' + echo ' [with_kernel=1] - When building boot, build in compressed kernel into the boot image' +endef diff --git a/arch/e2k/boot/.gitignore b/arch/e2k/boot/.gitignore new file mode 100644 index 000000000000..40bac36bdfcb --- /dev/null +++ b/arch/e2k/boot/.gitignore @@ -0,0 +1,5 @@ +image +bimage +cimage +image.bios +System.map \ No newline at end of file diff --git a/arch/e2k/boot/Am85C30.c b/arch/e2k/boot/Am85C30.c new file mode 100644 index 000000000000..6ebf40ff889d --- /dev/null +++ b/arch/e2k/boot/Am85C30.c @@ -0,0 +1,237 @@ + +#include + +#include +#include +#include + +#include "bios/pci.h" +#include "bios/bios.h" +#include "bios/southbridge.h" +#include "Am85C30.h" + +#undef DEBUG_CONSOLE_MODE +#undef DebugC +#define DEBUG_CONSOLE_MODE 0 /* Console initialization */ +#define DebugC if (DEBUG_CONSOLE_MODE) printk + +#define PCI_DEVICE_ID_PAR_SER 0x8000 + +#ifdef CONFIG_EIOH +#define PCI_DEV_ID_SERIAL PCI_DEVICE_ID_MCST_SERIAL +#define SERIAL_BAR 0 +#else +#define PCI_DEV_ID_SERIAL PCI_DEVICE_ID_MCST_PARALLEL_SERIAL +#define SERIAL_BAR 1 +#endif + +unsigned long com_port; + +unsigned long ch_a_control; +unsigned long ch_a_data; +unsigned long ch_b_control; +unsigned long ch_b_data; + +#define NOT_BIOS 0 + +extern boot_info_t *boot_info; + +static void com_outb(u64 port, u8 byte) +{ + NATIVE_WRITE_MAS_B(port, byte, MAS_IOADDR); +} + +static u8 com_inb(u64 port) +{ + return NATIVE_READ_MAS_B(port, MAS_IOADDR); +} + +static u8 com_inb_command(u64 port, u8 reg_num) +{ + NATIVE_WRITE_MAS_B(port, reg_num, MAS_IOADDR); + return NATIVE_READ_MAS_B(port, MAS_IOADDR); +} + +static void com_outb_command(u64 port, u8 reg_num, u8 val) +{ + NATIVE_WRITE_MAS_B(port, reg_num, MAS_IOADDR); + NATIVE_WRITE_MAS_B(port, val, MAS_IOADDR); +} + +void +serial_putc(unsigned long com_port, unsigned char c) +{ + while ((com_inb_command(com_port, RR0) & D2) == 0){ + } + com_outb((com_port + 0x01), c); +} + +unsigned char +serial_getc(unsigned long com_port) +{ + while (((com_inb_command(com_port, RR0)) & D0) == 0){ + } + return com_inb(com_port + 0x01); +} + +unsigned short zilog_serial_init(void) +{ + struct bios_pci_dev *dev; + unsigned char val = 0; + + rom_printk("Scanning PCI bus for ieee1284/rs232 device ...\n"); + dev = bios_pci_find_device(INTEL_MULTIFUNC_VENDOR, + PCI_DEVICE_ID_PAR_SER, NULL); + if (dev == NULL) { + dev = bios_pci_find_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEV_ID_SERIAL, dev); + } + if (dev){ + ch_a_control = (dev->base_address[SERIAL_BAR] & ~0x01); + ch_a_data = (dev->base_address[SERIAL_BAR] & ~0x01) + 0x01; + ch_b_control = (dev->base_address[SERIAL_BAR] & ~0x01) + 0x02; + ch_b_data = (dev->base_address[SERIAL_BAR] & ~0x01) + 0x03; + DebugC("zilog_serial_init: ch_a_control = 0x%x, ch_a_data = 0x%x\n" + " ch_b_control = 0x%x, ch_b_data = 0x%x\n", + (unsigned int)ch_a_control, (unsigned int)ch_a_data, + (unsigned int)ch_b_control, (unsigned int)ch_b_data); + com_port = ch_a_control; + /* Hardware Reset */ + val = (val | D7 | D6); /* Force Hardware Reset */ + DebugC("zilog_serial_init: Hardware Reset: WR9 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR9, val); + /* It seems not neccesary due to WR9 sharing for both channels */ + com_outb_command(ch_b_control, WR9, val); +#if NOT_BIOS + /* Enabling interrupts */ + val = 0; val |= D3; /* Master Interrupt Enable */ + DebugC("zilog_serial_init: Hardware Reset: WR9 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR9, val); + /* It seems not neccesary due to WR9 sharing for both channels */ + com_outb_command(ch_b_control, WR9, val); +#else + /* Interrupts disabled */ +#endif +#if NOT_BIOS + /* Detailed interrupt installations */ + val = 0; + val |= D1; /* Transmit interrupt enabling. An interrupt will be + * generated each time a packet is transmitted */ + val |= D2; /* The parity error for recieved packet is Special + * Condition from now */ + val |= D4; /* Enabling interrupt for each packet recieving and when + * Special Condition occurs */ + DebugC("zilog_serial_init: Hardware Reset: WR1 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR1, val); + com_outb_command(ch_b_control, WR1, val); +#else + /* poll mode */ + val = 0; + DebugC("zilog_serial_init: Hardware Reset: WR1 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR1, val); + com_outb_command(ch_b_control, WR1, val); +#endif + /* Operation mode */ + val = 0; +#if NOT_BIOS + val |= D0; /* Parity Enable */ + /* Parity bit is present */ + val |= (D2 | D3); /* Setup stop bits, if any setuped the mode is asynchronus */ + /* 2 stop bits */ +#else + val |= D2; /* stop bit = 1 */ +#endif + val |= D6; /* x16 mode */ + DebugC("zilog_serial_init: Hardware Reset: WR4 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR4, val); + com_outb_command(ch_b_control, WR4, val); + /* xN Mode Enable */ + val = 0; + val |= D7; /* xN Mode Enable */ + DebugC("zilog_serial_init: Hardware Reset: WR7 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR7, val); + com_outb_command(ch_b_control, WR7, val); + /* setup xN constant */ + val = 0; + val |= (D0 | D2 | D4); /* 15_h = 21_d; xN = 0.5 * 21 = 10.5 */ + DebugC("zilog_serial_init: Hardware Reset: WR6 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR6, val); + com_outb_command(ch_b_control, WR6, val); + /* Bits per symbol to recieve */ + val = 0; + val |= (D7 | D6); /* 8 bits per symbol */ + DebugC("zilog_serial_init: Hardware Reset: WR3 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR3, val); + com_outb_command(ch_b_control, WR3, val); + /* Bits per symbol to transmit */ + val = 0; + val |= (D6 | D5); /* 8 bits per symbol */ + DebugC("zilog_serial_init: Hardware Reset: WR5 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR5, val); + com_outb_command(ch_b_control, WR5, val); + /* Encoding setup */ + val = 0; +#if NOT_BIOS + val |= D5; /* NRZI encoding */ +#else + /* NRZ encoding */ +#endif + DebugC("zilog_serial_init: Hardware Reset: WR10 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR10, val); + com_outb_command(ch_b_control, WR10, val); + /* Clock setup */ + val = 0; + val |= (D4 | D6); /* Transmit Clock = BRG output; + * Receive Clock = BRG output */ + DebugC("zilog_serial_init: Hardware Reset: WR11 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR11, val); + com_outb_command(ch_b_control, WR11, val); + /* Lower Byte of Time Constant */ + val = 0; + val |= (D4 | D3 | D2 | D1); /* = 1e_h (4800) */ + DebugC("zilog_serial_init: Hardware Reset: WR12 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR12, val); + com_outb_command(ch_b_control, WR12, val); + /* Upper Byte of Time Constant */ + val = 0; /* determine 115200 baud rate when pclk = 4.9152 MHz */ + DebugC("zilog_serial_init: Hardware Reset: WR13 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR13, val); + com_outb_command(ch_b_control, WR13, val); + /* Determine synchronization source for BGR */ + val = 0; /* the source is RTxC pin */ + DebugC("zilog_serial_init: Hardware Reset: WR14 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR14, val); + com_outb_command(ch_b_control, WR14, val); + /* switch on the reciver */ + val = 0; + val |= D0; /* turn on */ + val |= (D7 | D6); /* 8 bits per symbol */ + DebugC("zilog_serial_init: Hardware Reset: WR3 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR3, val); + com_outb_command(ch_b_control, WR3, val); + /* switch on the transmitter */ + val = 0; + val |= D3; /* turn on */ + val |= (D6 | D5); /* 8 bits per symbol */ + DebugC("zilog_serial_init: Hardware Reset: WR5 val = 0x%x\n", val); + com_outb_command(ch_a_control, WR5, val); + com_outb_command(ch_b_control, WR5, val); + com_port = ch_a_control; + rom_printk("Initialization compleete "); + if (boot_info) { + boot_info->serial_base = com_port; + rom_printk("AM85C30 Serial console enabled at " + "0x%X base\n", com_port); + } else { + rom_printk("Unable to init boot_info BUG!!!\n"); + } + hardware.serial = 1; + } else { + rom_printk("!!! NOT FOUND !!!\n"); + } + + return 0; +} + + + diff --git a/arch/e2k/boot/Am85C30.h b/arch/e2k/boot/Am85C30.h new file mode 100644 index 000000000000..060c8ee893f6 --- /dev/null +++ b/arch/e2k/boot/Am85C30.h @@ -0,0 +1,416 @@ + +#ifdef CONFIG_E2K_SIC +extern unsigned short zilog_serial_init(void); +#endif + +/* WRITE Registers */ + +#define WR0 0x00 +#define WR1 0x01 +#define WR2 0x02 +#define WR3 0x03 +#define WR4 0x04 +#define WR5 0x05 +#define WR6 0x06 +#define WR7 0x07 +#define WR8 0x08 +#define WR9 0x09 +#define WR10 0x0a +#define WR11 0x0b +#define WR12 0x0c +#define WR13 0x0d +#define WR14 0x0e +#define WR15 0x0f + +/* READ (Status) Registers */ + +#define RR0 0x00 +#define RR1 0x01 +#define RR2 0x02 +#define RR3 0x03 +#define RR8 0x08 +#define RR10 0x0a +#define RR12 0x0c +#define RR13 0x0d + +#define D0 (0x01 << 0) +#define D1 (0x01 << 1) +#define D2 (0x01 << 2) +#define D3 (0x01 << 3) +#define D4 (0x01 << 4) +#define D5 (0x01 << 5) +#define D6 (0x01 << 6) +#define D7 (0x01 << 7) + +/* WR0 */ +/* D2,D1,D0 +* Register Access Pointer +* +* 000 - N0, [N8]* +* 001 - N1, [N9]* +* 010 - N2, [N10]* +* 011 - N3, [N11]* +* 100 - N4, [N12]* +* 101 - N5, [N13]* +* 110 - N6, [N14]* +* 111 - N7, [N15]* +* +* if Point High Register Group = 1 +* +* D5,D4,D3 +* +* SCC Command +* +* 000 - Null Code +* 001 - Point High Register Group +* 010 - Reset Ext/Status Interrupts +* 011 - Send Abort +* 100 - Enable Int. on Next Rx Character +* 101 - Reset Tx Int. Pending +* 110 - Error Reset +* 111 - Reset Highest IUS +* +* D7,D6 +* SCC Command +* +* 00 - Null Code +* 01 - Reset Rx CRC Checker +* 10 - Reset Tx CRC Generator +* 11 - Reset Tx Underrun/EOM Latch +*/ + +/* WR1 */ +/* D0 +* Ext. Int. Enable +* D1 +* Tx Int. Enable +* D2 +* Parity is Special Condition +* D4,D3 +* Rx Int Mode +* +* 00 - Rx Int Disable +* 01 - Rx Int on First Char. or Special Condition +* 10 - Int on All Rx Char. or Special Condition +* 11 - Rx Int. on Special Condition Only +* D5 +* Wait/DMA Request on Receive/Transmit +* D6 +* Wait/DMA Request Function +* D7 +* Wait/DMA Request Enable +*/ + +/* WR2 */ +/* D7 - D0 +* Interrupt Vector +*/ + +/* WR3 */ +/* D0 +* Rx Enable +* D1 +* Sync Character Load Inhibit +* D2 +* Address Search Mode (SDLC) +* D3 +* Rx CRC Enable +* D4 +* Enter Hunt Mode +* D5 +* Auto Enable +* D7,D6 +* +* 00 - Rx 5 Bits / Character +* 01 - Rx 6 Bits / Character +* 10 - Rx 7 Bits / Character +* 11 - Rx 8 Bits / Character +*/ + +/* WR4 */ +/* D0 +* ParityEnable +* D1 +* Parity Even(0) / Odd(1) +* D3,D2 +* +* 00 - Sync Modes Enable +* 01 - 1 Stop Bit / Character +* 10 - 1.5 Stop Bits / Character +* 11 - 2 Stop Bits / Character +* D5,D4 +* +* 00 - 8-Bit Sync Character +* 01 - 16-Bit Sync Character +* 10 - SDLC Mode +* 11 - External Sync Mode +* D7,D6 +* +* 00 - X1 Clock Mode +* 01 - X16 Clock Mode +* 10 - X32 Clock Mode +* 11 - X64 Clock Mode +*/ + +/* WR5 */ +/* D0 +* Tx CRC Enable +* D1 +* RTS +* D2 +* SDLC-/CRC-16 +* D3 +* Tx Enable +* D4 +* Send Break +* D6,D5 +* +* 00 - Tx 5 Bits / Character +* 01 - Tx 6 Bits / Character +* 10 - Tx 7 Bits / Character +* 11 - Tx 8 Bits / Character +* D7 +* DTR +*/ + +/* WR6 */ +/* D5-D0 +* xN constant +* D7,D6 +* Reserved (not used in asynchronous mode) +*/ + +/* WR7 */ +/* D6-D0 +* Reserved (not used in asynchronous mode) +* D7 +* xN Mode Enable +*/ + +/* WR8 */ +/* D7-D0 +* Transmit Buffer +*/ + +/* WR9 */ +/* D0 +* Vector Includes Status +* D1 +* No Vector +* D2 +* Disable Lower Chain +* D3 +* Master Interrupt Enable +* D4 +* Status High/Low_ +* D5 +* Interrupt Masking Without INTACK_ +* D7-D6 +* +* 00 - No Reset +* 01 - Channel B Reset +* 10 - Channel A Reset +* 11 - Force Hardware Reset +*/ + +/* WR10 */ +/* D0 +* 6 bit / 8 bit SYNC +* D1 +* Loop Mode +* D2 +* Abort/Flag on Underrun +* D3 +* Mark/Flag Idle +* D4 +* Go Active on Poll +* D6-D5 +* +* 00 - NRZ +* 01 - NRZI +* 10 - FM1 (Transition = 1) +* 11 - FM0 (Transition = 0) +* D7 +* CRC Preset '1' or '0' +*/ + +/* WR11 */ +/* D1-D0 +* +* 00 - TRxC Out = XTAL output +* 01 - TRxC Out = Transmit Clock +* 10 - TRxC Out = BRG output +* 11 - TRxC Out = DPLL output +* D2 +* TRxC O/I +* D4-D3 +* +* 00 - Transmit Clock = RTxC pin +* 01 - Transmit Clock = TRxC pin +* 10 - Transmit Clock = BRG output +* 11 - Transmit Clock = DPLL output +* D6-D5 +* +* 00 - Receive Clock = RTxC pin +* 01 - Receive Clock = TRxC pin +* 10 - Receive Clock = BRG output +* 11 - Receive Clock = DPLL output +* D7 +* RTxC XTAL / NO XTAL +*/ + +/* WR12 */ +/* D7-D0 +* Lower Byte of Time Constant +*/ + +/* WR13 */ +/* D7-D0 +* Upper Byte of Time Constant +*/ + +/* WR14 */ +/* D0 +* BRG Enable +* D1 +* BRG Source +* D2 +* DTR / REQUESTt Function +* D3 +* Auto Echo +* D4 +* Local Loopback +* D7-D5 +* +* 000 - Null Command +* 001 - Enter Search Mode +* 010 - Reset Missing Clock +* 011 - Disable DPLL +* 100 - Set Source = BR Generator +* 101 - Set Source = RTxC_ +* 110 - Set FM Mode +* 111 - Set NRZI Mode +*/ + +/* WR15 */ +/* D0 +* SDLC/HDLC Enhancement Enable +* D1 +* Zero Count IE (Interrupt Enable) +* D2 +* 10 * 19-bit Frame Status FIFO Enable +* D3 +* DCD IE +* D4 +* Sync/Hunt IE +* D5 +* CTS IE +* D6 +* Tx Underrun / EOM IE +* D7 +* Break/Abort IE +*/ + + +/* RR0 */ +/* D0 +* Rx Character Availiable +* D1 +* Zero Count +* D2 +* Tx Buffer Empty +* D3 +* DCD +* D4 +* Sync/Hunt +* D5 +* CTS +* D6 +* Tx Underrun / EOM +* D7 +* Break/Abort +*/ + +/* RR1 */ +/* D0 +* All Sent +* D1 +* Residue Code 2 +* D2 +* Residue Code 1 +* D3 +* Residue Code 0 +* D4 +* Parity Error +* D5 +* Rx Overrun Error +* D6 +* CRC / Framing Error +* D7 +* End of Frame (SDLC) +*/ + +/* RR2 */ +/* D7-D0 +* Interrupt Vector +* +* Channel A RR2 = WR2 +* Channel B RR2 = Interrupt Vector Modified* +* +* * +* D3 D2 D1 Status High/Low = 0 +* D4 D5 D6 Status High/Low = 1 +* +* 0 0 0 Ch B Transmit Buffer Empty +* 0 0 1 Ch B External/Status Change +* 0 1 0 Ch B Receive Char. Availiable +* 0 1 1 Ch B Special Receive Condition +* 1 0 0 Ch A Transmit Buffer Empty +* 1 0 1 Ch A External/Status Change +* 1 1 0 Ch A Receive Char. Availiable +* 1 1 1 Ch A Special Receive Condition +*/ + +/* RR3 */ +/* D0 +* Channel B Ext/Status IP (Interrupt Pending) +* D1 +* Channel B Tx IP +* D2 +* Channel B Rx IP +* D3 +* Channel A Ext/Status IP +* D4 +* Channel A Tx IP +* D5 +* Channel A Rx IP +* D7-D6 +* Always 00 +*/ + +/* RR8 */ +/* D7-D0 +* Receive Buffer +*/ + +/* RR10 */ +/* D7-D0 +* Reserved (not used in asynchronous mode) +*/ + +/* RR12 */ +/* D7-D0 +* Lower Byte of Time Constant +*/ + +/* RR13 */ +/* D7-D0 +* Upper Byte of Time Constant +*/ + + + + + + + diff --git a/arch/e2k/boot/Makefile b/arch/e2k/boot/Makefile new file mode 100644 index 000000000000..3e01978e205f --- /dev/null +++ b/arch/e2k/boot/Makefile @@ -0,0 +1,321 @@ +GCOV_PROFILE := n + +OBJCOPY += -O binary -R .note -R .comment -S +OBJCOPYFLAGS := -O binary -R .note -R .comment -S + +ccflags-y := -DCONFIG_BOOT_E2K +ifeq ($(call cc-option-yn,-fno-semi-spec-ld -fno-spec-ld),y) + ccflags-y += -fno-semi-spec-ld -fno-spec-ld +else + ccflags-y += -fno-ld-spec +endif + + +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.xz \ + vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 cimage image \ + bimage cpiggy.o bpiggy.o epiggy.o zpiggy.o piggy.o System.map + +empty:= +space:= $(empty) $(empty) +ORIG_CFLAGS := $(KBUILD_CFLAGS) +ORIG_CFLAGS := $(subst $(space)-fprofile-generate-kernel$(space),$(space),$(ORIG_CFLAGS)) +ORIG_CFLAGS := $(subst $(space)-fprofile-use="$(PROFILE_USE)"$(space),$(space),$(ORIG_CFLAGS)) +KBUILD_CFLAGS = $(ORIG_CFLAGS) + +RANLIB = $(shell $(CC) -print-prog-name=ranlib) + +ROMSCRIPT = $(obj)/aploader.lds +KBUILD_CFLAGS += -DCONFIG_E2K_SIC +KBUILD_AFLAGS += -DCONFIG_E2K_SIC + +FLAGS_EPIC_EIOH := -DCONFIG_BOOT_EPIC -DCONFIG_EIOH + +ifeq ($(CONFIG_ES2),y) + KBUILD_CFLAGS += -DCONFIG_ES2 -DCONFIG_E2K_FULL_SIC + KBUILD_AFLAGS += -DCONFIG_ES2 -DCONFIG_E2K_FULL_SIC +else + ifeq ($(CONFIG_E2S),y) + KBUILD_CFLAGS += -DCONFIG_E2S -DCONFIG_E2K_FULL_SIC + KBUILD_AFLAGS += -DCONFIG_E2S -DCONFIG_E2K_FULL_SIC + else + ifeq ($(CONFIG_E8C),y) + KBUILD_CFLAGS += -DCONFIG_E8C \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 + KBUILD_AFLAGS += -DCONFIG_E8C \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 + else + ifeq ($(CONFIG_E1CP),y) + KBUILD_CFLAGS += -DCONFIG_E1CP \ + -DCONFIG_E2K_LEGACY_SIC \ + -DCONFIG_L_IOH2 + KBUILD_AFLAGS += -DCONFIG_E1CP \ + -DCONFIG_E2K_LEGACY_SIC \ + -DCONFIG_L_IOH2 + else + ifeq ($(CONFIG_E8C2),y) + KBUILD_CFLAGS += -DCONFIG_E8C2 \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 + KBUILD_AFLAGS += -DCONFIG_E8C2 \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 + else + ifeq ($(CONFIG_E12C),y) + KBUILD_CFLAGS += -DCONFIG_E12C \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 \ + $(FLAGS_EPIC_EIOH) + KBUILD_AFLAGS += -DCONFIG_E12C \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 \ + $(FLAGS_EPIC_EIOH) + else + ifeq ($(CONFIG_E16C),y) + KBUILD_CFLAGS += -DCONFIG_E16C \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 \ + $(FLAGS_EPIC_EIOH) + KBUILD_AFLAGS += -DCONFIG_E16C \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 \ + $(FLAGS_EPIC_EIOH) + else + ifeq ($(CONFIG_E2C3),y) + KBUILD_CFLAGS += -DCONFIG_E2C3 \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 \ + $(FLAGS_EPIC_EIOH) + KBUILD_AFLAGS += -DCONFIG_E2C3 \ + -DCONFIG_E2K_FULL_SIC \ + -DCONFIG_L_IOH2 \ + $(FLAGS_EPIC_EIOH) + endif + endif + endif + endif + endif + endif + endif +endif + +# remove profile flags +ORIG_CFLAGS := $(KBUILD_CFLAGS) +KBUILD_CFLAGS =$(subst -pg,,$(ORIG_CFLAGS)) + +START_OBJ = $(obj)/romstartup.o $(obj)/apstartup.o +targets += romstartup.o apstartup.o + +obj-y := romloader.o jumpstart.o keyboard.o zip.o malloc.o stdio.o \ + string.o recovery_string.o recovery_string_v5.o epic.o + +AFLAGS_REMOVE_recovery_string_v5.o = $(CFLAGS_ALL_CPUS) +AFLAGS_recovery_string_v5.o += -march=elbrus-v5 + +obj-$(CONFIG_SMP) += aploader.o smp.o +obj-$(CONFIG_L_LOCAL_APIC) += apic.o + +obj-$(CONFIG_SERIAL_AM85C30_BOOT_CONSOLE) += Am85C30.o + +obj-$(CONFIG_VGA_CONSOLE) += vga.o +obj-$(CONFIG_LMS_CONSOLE) += console.o + +obj-$(CONFIG_BIOS) += bios/ + +BIOS_ELF = image +ROM_LDFLAGS = -e start -T $(TOPDIR)/$(ROMSCRIPT) + + +# lcc option '-fvisibility=hidden' allows us to reduce +# .got section size - this is a small optimization. +CFLAGS_decompress.o += -fpic -fvisibility=hidden -DE2K_P2V -O4 +CFLAGS_machdep_fpic.o += -fpic -fvisibility=hidden -DE2K_P2V -O4 +CFLAGS_string_fpic.o += -fpic -fvisibility=hidden -DE2K_P2V -O4 +CFLAGS_string_guest_fpic.o += -fpic -fvisibility=hidden -DE2K_P2V -O4 +AFLAGS_recovery_string_fpic.o += -fpic -fvisibility=hidden -DE2K_P2V +AFLAGS_REMOVE_recovery_string_v5_fpic.o = $(CFLAGS_ALL_CPUS) +AFLAGS_recovery_string_v5_fpic.o += -march=elbrus-v5 -fpic \ + -fvisibility=hidden -DE2K_P2V + +CFLAGS_bootblock.o += -fno-toplevel-reorder + +targets += bootblock.o decompress.o machdep_fpic.o string_fpic.o \ + recovery_string_fpic.o recovery_string_v5_fpic.o + +TOPDIR = $(srctree) +SYSTEM = vmlinux +SYSTEM_IMAGE = $(objtree)/image.boot +CVS_TOPDIR = ../../../../.. + +ifdef CONFIG_BLK_DEV_INITRD +INITRD = /tmp/initrd +INITRDO = initrd.o +else +INITRD = +INITRDO = +endif + +PIGGYO = $(obj)/piggy.o + +targets += $(objtree)/image.boot + +targets += image.bios $(objtree)/image.bios $(objtree)/vmlinux.bin + +all: boot + +dep: + $(TOPDIR)/scripts/mkdep *.[Sch] > .depend + +clean: + rm -rf $(obj)/built-in-sym.o $(START_OBJ) $(BIOS_ELF) $(BIOS_IMAGE) _tmp_* + $(Q)$(MAKE) $(build)=arch/e2k/boot/bios clean + + +$(INITRDO): + tmpinitrd=_tmp_$$$$initrd; \ + rm -f $$tmpinitrd $$tmpinitrd.lnk; \ + if [ -x $(INITRD) ]; then \ + cp $(INITRD) $$tmpinitrd; \ + else \ + > $$tmpinitrd; \ + fi; \ + echo "SECTIONS { .initrd : { initrd_len = .; initrd_data = .; *(.data) initrd_data_end = .; }}" \ + > $$tmpinitrd.lnk; \ + $(LD) -r -o $(INITRDO) -b binary $$tmpinitrd -b elf64-e2k \ + -T $$tmpinitrd.lnk; \ + rm -f $$tmpinitrd $$tmpinitrd.lnk + +bios: FORCE + $(Q)$(MAKE) $(build)=arch/e2k/boot/bios bios + +# piggy is gzipped kernel +LDFLAGS_zpiggy.o = -r -b binary --oformat elf64-e2k -T $(srctree)/$(src)/vmlinux.bin.lds \ +-defsym input_data_noncomp_size=$(shell wc -c < $(objtree)/$(obj)/vmlinux.bin) \ +-defsym boot_mode=0 +$(obj)/zpiggy.o: $(obj)/vmlinux.bin.gz FORCE + $(call if_changed,ld) + +# piggy is empty +LDFLAGS_epiggy.o = -r -b binary --oformat elf64-e2k -T $(srctree)/$(src)/epiggy.lds \ +-defsym input_data_noncomp_size=$(shell wc -c < $(objtree)/$(obj)/vmlinux.bin) \ +-defsym boot_mode=1 -defsym input_data=0 -defsym input_data_end=0 +$(obj)/epiggy.o: $(obj)/vmlinux.bin FORCE + $(call if_changed,ld) + + +ifeq ($(with_kernel), 1) +$(PIGGYO): $(obj)/zpiggy.o + @cp $(obj)/zpiggy.o $(obj)/piggy.o; \ + rm -f $(obj)/epiggy.o + +$(objtree)/vmlinux.bin: ; +else +$(PIGGYO): $(obj)/epiggy.o + @cp $(obj)/epiggy.o $(obj)/piggy.o; \ + rm -f $(obj)/zpiggy.o + +$(objtree)/vmlinux.bin: $(obj)/vmlinux.bin + @cp $(obj)/vmlinux.bin $(objtree)/vmlinux.bin +endif + +$(obj)/built-in-sym.o: $(obj)/built-in.a + @cp $(obj)/built-in.a $(obj)/built-in-sym.o; \ + $(RANLIB) $(obj)/built-in-sym.o + +LDFLAGS_bimage := $(ROM_LDFLAGS) +$(obj)/bimage: $(obj)/built-in-sym.o $(START_OBJ) $(PIGGYO) $(INITRDO) FORCE + $(call if_changed,ld) + +$(obj)/image.bios: $(obj)/bimage FORCE + $(call if_changed,objcopy) + +$(objtree)/image.bios: $(obj)/image.bios + @cp $(obj)/image.bios $(objtree)/image.bios + +$(obj)/System.map: $(obj)/bimage + @$(NM) $(obj)/bimage | grep -v '\(compiled\)\|\(\.o$$\)\|\( [aUw] \)\|\(\ $(obj)/System.map + +boot: $(objtree)/image.bios $(objtree)/vmlinux.bin $(obj)/System.map + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + @ksize=`ls -l $(obj)/vmlinux.bin | awk '{print $$5;}'`; \ + echo "Non-compressed kernel size:" $$ksize + +suffix-y := +suffix-$(CONFIG_KERNEL_GZIP) := .gz +suffix-$(CONFIG_KERNEL_BZIP2) := .bz2 +suffix-$(CONFIG_KERNEL_LZ4) := .lz4 +suffix-$(CONFIG_KERNEL_LZMA) := .lzma +suffix-$(CONFIG_KERNEL_XZ) := .xz +suffix-$(CONFIG_KERNEL_LZO) := .lzo + +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + @ksize=`ls -l $(obj)/vmlinux.bin.gz | awk '{print $$5;}'`; \ + echo "Compressed kernel size:" $$ksize +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) + @ksize=`ls -l $(obj)/vmlinux.bin.bz2 | awk '{print $$5;}'`; \ + echo "Compressed kernel size:" $$ksize +$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE + $(call if_changed,lz4) + @ksize=`ls -l $(obj)/vmlinux.bin.lz4 | awk '{print $$5;}'`; \ + echo "Compressed kernel size:" $$ksize +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) + @ksize=`ls -l $(obj)/vmlinux.bin.lzma | awk '{print $$5;}'`; \ + echo "Compressed kernel size:" $$ksize +$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE + $(call if_changed,xzkern) + @ksize=`ls -l $(obj)/vmlinux.bin.xz | awk '{print $$5;}'`; \ + echo "Compressed kernel size:" $$ksize +$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzo) + @ksize=`ls -l $(obj)/vmlinux.bin.lzo | awk '{print $$5;}'`; \ + echo "Compressed kernel size:" $$ksize + +LDFLAGS_bpiggy.o := -r -b binary --oformat elf64-e2k -T $(srctree)/$(src)/vmlinux.bin.lds +$(obj)/bpiggy.o: $(obj)/vmlinux.bin FORCE + $(call if_changed,ld) + +LDFLAGS_cpiggy.o := -r -b binary --oformat elf64-e2k -T $(srctree)/$(src)/vmlinux.bin.lds +$(obj)/cpiggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE + $(call if_changed,ld) + +LDFLAGS_image = -static -e gap -T $(srctree)/$(src)/boot.lds \ + -defsym __kernel_size=$(shell wc -c < $(objtree)/$(obj)/vmlinux.bin) +$(obj)/image: $(obj)/bpiggy.o $(obj)/bootblock.o FORCE + $(call if_changed,ld) + +ifdef CONFIG_KVM_GUEST_KERNEL +targets += string_guest_fpic.o +STRING_GUEST_FPIC_O=$(obj)/string_guest_fpic.o +COMPRESSED_SCRIPT=compressed_guest.lds +else +STRING_GUEST_FPIC_O= +COMPRESSED_SCRIPT=compressed.lds +endif + +LDFLAGS_cimage = -static -e decompress_kernel -T $(srctree)/$(src)/$(COMPRESSED_SCRIPT) \ + -defsym __orig_kernel_size=$(shell wc -c < $(objtree)/$(obj)/vmlinux.bin) \ + -defsym __kernel_size=$(shell wc -c < $(objtree)/$(obj)/vmlinux.bin$(suffix-y)) +$(obj)/cimage: $(obj)/cpiggy.o $(obj)/bootblock.o $(obj)/decompress.o \ + $(obj)/string_fpic.o $(STRING_GUEST_FPIC_O) \ + $(obj)/recovery_string_fpic.o $(obj)/recovery_string_v5_fpic.o \ + $(obj)/machdep_fpic.o FORCE + $(call if_changed,ld) + +$(objtree)/image.boot: $(obj)/image FORCE + $(call if_changed,objcopy) + +$(objtree)/zImage: $(obj)/cimage FORCE + $(call if_changed,objcopy) + +install-headers: + rm -rf $(CVS_TOPDIR)/usr/include; \ + mkdir -p $(CVS_TOPDIR)/usr/include; \ + ln -s $(TOPDIR)/include/linux $(CVS_TOPDIR)/usr/include/linux; \ + ln -s $(TOPDIR)/include/asm $(CVS_TOPDIR)/usr/include/asm; \ + ln -s $(TOPDIR)/include/asm-generic $(CVS_TOPDIR)/usr/include/asm-generic diff --git a/arch/e2k/boot/apic.c b/arch/e2k/boot/apic.c new file mode 100644 index 000000000000..17dc1ece1ba5 --- /dev/null +++ b/arch/e2k/boot/apic.c @@ -0,0 +1,265 @@ +/* + * Local APIC handling, local APIC timers + */ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "boot_io.h" +#include "pic.h" + +#define SET_APIC_VERSION(x) ((x) & 0xFF) + +/**************************** DEBUG DEFINES *****************************/ +#undef DEBUG_BOOT_MODE +#undef Dprintk +#define DEBUG_BOOT_MODE 1 /* SMP CPU boot */ +#define Dprintk if (DEBUG_BOOT_MODE) rom_printk +/************************************************************************/ + +void +setup_local_apic(int cpu) +{ + unsigned int value, ver; + + value = native_apic_read(APIC_LVR); + ver = GET_APIC_VERSION(value); + if (ver == 0) { + ver = APIC_VERSION; + value |= SET_APIC_VERSION(ver); + native_apic_write(APIC_LVR, value); + } + Dprintk("APIC_LVR : 0x%x version 0x%x maxlvt 0x%x\n", + value, ver, GET_APIC_MAXLVT(value)); + + /* + * Put the APIC into flat delivery mode. + * Must be "all ones" explicitly for 82489DX. + */ + value = native_apic_read(APIC_DFR); + Dprintk("APIC_DFR before setup : 0x%x delivery mode 0x%x\n", + value, GET_APIC_DLVR_MODE(value)); + native_apic_write(APIC_DFR, 0xffffffff); + value = native_apic_read(APIC_DFR); + Dprintk("APIC_DFR after setup : 0x%x delivery mode 0x%x\n", + value, GET_APIC_DLVR_MODE(value)); + + /* + * Set up the logical destination ID. + */ + value = native_apic_read(APIC_LDR); + Dprintk("APIC_LDR before setup : 0x%x logical ID 0x%x\n", + value, GET_APIC_LOGICAL_ID(value)); + value &= ~APIC_LDR_MASK; + value |= SET_APIC_LOGICAL_ID(cpu); + native_apic_write(APIC_LDR, value); + value = native_apic_read(APIC_LDR); + Dprintk("APIC_LDR after setup : 0x%x logical ID 0x%x\n", + value, GET_APIC_LOGICAL_ID(value)); + + /* + * Reset all not masked interrupts + */ + + value = native_apic_read(APIC_NM); + Dprintk("APIC_NM before setup : 0x%x\n", + value); + native_apic_write(APIC_NM, APIC_NM_BIT_MASK); +#if DEBUG_BOOT_MODE + value = native_apic_read(APIC_NM); + native_apic_write(APIC_NM, APIC_NM_BIT_MASK); + Dprintk("APIC_NM after setup : 0x%x\n", value); +#endif + + /* + * Now that we are all set up, enable the APIC + */ + value = native_apic_read(APIC_BSP); + Dprintk("APIC_BSP before setup : 0x%x apic enable %d, BSP flag %d\n", + value, APIC_ENABLE(value) != 0, BootStrap(value) != 0); + value |= APIC_BSP_ENABLE; + native_apic_write(APIC_BSP, value); + value = native_apic_read(APIC_BSP); + Dprintk("APIC_BSP after setup : 0x%x apic enable %d, BSP flag %d\n", + value, APIC_ENABLE(value) != 0, BootStrap(value) != 0); + + value = native_apic_read(APIC_SPIV); + Dprintk("APIC_SPIV before setup : 0x%x apic soft enabled %d, " + "focus processor disabled %d, spurious vector 0x%x\n", + value, APIC_SOFT_ENABLED(value) != 0, + APIC_FOCUS_DISABLED(value) != 0, + GET_SPURIOUS_VECTOR(value)); +// value &= ~APIC_VECTOR_MASK; + /* + * Enable APIC + */ + value |= APIC_SPIV_APIC_ENABLED; + + /* + * Some unknown Intel IO/APIC (or APIC) errata is biting us with + * certain networking cards. If high frequency interrupts are + * happening on a particular IOAPIC pin, plus the IOAPIC routing + * entry is masked/unmasked at a high rate as well then sooner or + * later IOAPIC line gets 'stuck', no more interrupts are received + * from the device. If focus CPU is disabled then the hang goes + * away, oh well :-( + * + * [ This bug can be reproduced easily with a level-triggered + * PCI Ne2000 networking cards and PII/PIII processors, dual + * BX chipset. ] + */ + /* Disable focus processor (bit==1) */ + value |= APIC_SPIV_FOCUS_DISABLED; + native_apic_write(APIC_SPIV, value); + + Dprintk("APIC_SPIV after setup : 0x%x apic soft enabled %d, " + "focus processor disabled %d, spurious vector 0x%x\n", + value, APIC_SOFT_ENABLED(value) != 0, + APIC_FOCUS_DISABLED(value) != 0, + GET_SPURIOUS_VECTOR(value)); + + value = native_apic_read(APIC_LVT0); + Dprintk("APIC_LVT0 before setup : 0x%x apic lvt masked %d\n", + value, (value & APIC_LVT_MASKED) != 0); + if (!cpu) { + value = APIC_DM_EXTINT; + } else { + value = APIC_DM_EXTINT | APIC_LVT_MASKED; + } + native_apic_write(APIC_LVT0, value); + Dprintk("APIC_LVT0 after setup : 0x%x apic lvt masked %d, " + "Ext Int enabled 0x%d\n", + value, (value & APIC_LVT_MASKED) != 0, + (value & APIC_DM_EXTINT) != 0); + + /* + * only the BP should see the LINT1 NMI signal, obviously. + */ + if (!cpu) + value = APIC_DM_NMI; + else + value = APIC_DM_NMI | APIC_LVT_MASKED; + native_apic_write(APIC_LVT1, value); +} + +void +clear_local_apic(void) +{ + native_apic_write(APIC_BSP, 0); + native_apic_write(APIC_SPIV, 0); +} + +void +print_local_apic(void) +{ + unsigned int v, ver, maxlvt; + + v = native_apic_read(APIC_BSP); + if (!APIC_ENABLE(v)) { + rom_printk(" APIC disable\n"); + return; + } + if (BootStrap(v)) + rom_printk("... BootStrap processor\n"); + else + rom_printk("... Aplication processor\n"); + v = native_apic_read(APIC_ID); + rom_printk("... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v)); + v = native_apic_read(APIC_LVR); + rom_printk("... APIC VERSION: %08x\n", v); + ver = GET_APIC_VERSION(v); + maxlvt = GET_APIC_MAXLVT(v); + + v = native_apic_read(APIC_TASKPRI); + rom_printk("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); + + if (APIC_INTEGRATED(ver)) { /* !82489DX */ + v = native_apic_read(APIC_ARBPRI); + rom_printk( "... APIC ARBPRI: %08x (%02x)\n", v, + v & APIC_ARBPRI_MASK); + v = native_apic_read(APIC_PROCPRI); + rom_printk( "... APIC PROCPRI: %08x\n", v); + } + +/* v = native_apic_read(APIC_EOI); */ +/* rom_printk( "... APIC EOI: %08x\n", v); */ + v = native_apic_read(APIC_LDR); + rom_printk( "... APIC LDR: %08x\n", v); + v = native_apic_read(APIC_DFR); + rom_printk( "... APIC DFR: %08x\n", v); + v = native_apic_read(APIC_SPIV); + rom_printk( "... APIC SPIV: %08x\n", v); + + if (APIC_INTEGRATED(ver)) { /* !82489DX */ + if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ + native_apic_write(APIC_ESR, 0); + v = native_apic_read(APIC_ESR); + rom_printk( "... APIC ESR: %08x\n", v); + } + + v = native_apic_read(APIC_ICR); + rom_printk( "... APIC ICR: %08x\n", v); + v = native_apic_read(APIC_ICR2); + rom_printk( "... APIC ICR2: %08x\n", v); + + v = native_apic_read(APIC_LVTT); + rom_printk( "... APIC LVTT: %08x\n", v); + + if (maxlvt > 3) { /* PC is LVT#4. */ + v = native_apic_read(APIC_LVTPC); + rom_printk( "... APIC LVTPC: %08x\n", v); + } + v = native_apic_read(APIC_LVT0); + rom_printk( "... APIC LVT0: %08x\n", v); + v = native_apic_read(APIC_LVT1); + rom_printk( "... APIC LVT1: %08x\n", v); + + if (maxlvt > 2) { /* ERR is LVT#3. */ + v = native_apic_read(APIC_LVTERR); + rom_printk( "... APIC LVTERR: %08x\n", v); + } + + v = native_apic_read(APIC_TMICT); + rom_printk( "... APIC TMICT: %08x\n", v); + v = native_apic_read(APIC_TMCCT); + rom_printk( "... APIC TMCCT: %08x\n", v); + v = native_apic_read(APIC_TDCR); + rom_printk( "... APIC TDCR: %08x\n", v); + v = native_apic_read(APIC_M_ERM); + rom_printk( "... APIC_M_ERM: %08x\n", v); + v = native_apic_read(APIC_NM); + native_apic_write(APIC_NM, APIC_NM_BIT_MASK); + rom_printk( "... APIC_NM: %08x\n", v); + rom_printk("\n"); + +} + +void debug_apic_startup(int cpu, unsigned int value, unsigned long startup_addr) +{ + unsigned long addr; + + Dprintk("CPU #%d : APIC_NM value = 0x%x\n", cpu, value); + + if (!(value & APIC_NM_STARTUP)) + rom_printk("CPU #%d : ERROR: APIC startup bit is not set\n", + cpu); + + addr = value & APIC_NM_STARTUP_ADDR; + + Dprintk("CPU #%d : APIC received STARTUP with addr 0x%x\n", cpu, addr); + + if (addr != startup_addr >> 12) + rom_printk("CPU #%d : ERROR : APIC incorrect startup addr\n", + cpu); +} diff --git a/arch/e2k/boot/aploader.S b/arch/e2k/boot/aploader.S new file mode 100644 index 000000000000..da0652ed8a5b --- /dev/null +++ b/arch/e2k/boot/aploader.S @@ -0,0 +1,125 @@ +// +// aploader.s, written by +// secondary linux kernel loader module on application processor +// + +#ifdef _E2K_SIMULATION_MODE_ +#include +#endif /* _E2K_SIMULATION_MODE_ */ +#include +#include + +#define MMU_TRAP_CELLAR_MAX_SIZE 64 /* double-words */ + +#define RW 0x1800000000000000 +#define R 0x0800000000000000 +#define RW_NONP 0x1800000000000000 + +#define RBS 0x10 /* 10 quadwords */ + +.text +.global aploader + +.global _data +.global __bios_size_ld +.global __bios_fsize_ld +.global __bios_entry_ld +.global __bios_start_data +.global __bios_size_data +.global __bios_size_data_plus_bss + +// loader will call start_secondary() to continue. See smp.c +.global start_secondary +.global free_memory_p + +aploader: +// [OS]{CUD|GD} setup + addd 0, [__bios_size_ld], %r8 // EOS size + addd 0, [EOS_RAM_BASE_LABEL+R], %r2 + addd %r2,1<<58 , %r2 // set CUD.c flag + rwd %r2, %oscud.lo + addd 0, [EOS_RAM_BASE_LABEL+RW], %r6 +// shld %r8, 32, %r10 + addd 0, 0x01f0000000000000, %r10 // *D size (all space 1M upward) + + rwd %r2, %cud.lo + rwd %r6, %osgd.lo + rwd %r6, %gd.lo + + rwd %r10, %oscud.hi + rwd %r10, %osgd.hi + rwd %r10, %cud.hi + rwd %r10, %gd.hi + + ldd [free_memory_p] , %dr4 +// PSP - procedure stack pointer + +// 'E2K_ALIGN_PSTACK' kernel loader procedure stack align + addd %r4, E2K_ALIGN_PSTACK_MASK, %r4 + andd %r4, (~(E2K_ALIGN_PSTACK_MASK)),%r4 + +// 'E2K_KERNEL_PS_PAGE_SIZE' kernel loader procedure stack align + addd %r4, (E2K_KERNEL_PS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PS_PAGE_SIZE - 1)),%r4 + + rwd E2K_BOOT_KERNEL_PS_SIZE << 32, %psp.hi + addd %r4, RW, %r6 + rwd %r6, %psp.lo + addd %r4, (E2K_BOOT_KERNEL_PS_SIZE + E2K_KERNEL_PS_PAGE_SIZE), %r4 + +// 'E2K_KERNEL_PS_PAGE_SIZE' kernel loader procedure stack align + addd %r4, (E2K_KERNEL_PS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PS_PAGE_SIZE - 1)),%r4 + +// PCSP - procedure chain stack pointer + +// 'E2K_ALIGN_PCSTACK' kernel loader procedure chain stack align + addd %r4, E2K_ALIGN_PCSTACK_MASK, %r4 + andd %r4, (~(E2K_ALIGN_PCSTACK_MASK)),%r4 + +// 'E2K_KERNEL_PCS_PAGE_SIZE' kernel loader procedure chain stack align + addd %r4, (E2K_KERNEL_PCS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PCS_PAGE_SIZE - 1)),%r4 + rwd E2K_BOOT_KERNEL_PCS_SIZE << 32, %pcsp.hi + addd %r4, RW, %r6 + + rwd %r6, %pcsp.lo + addd %r4, (E2K_BOOT_KERNEL_PCS_SIZE + E2K_KERNEL_PCS_PAGE_SIZE), %r4 +// 'E2K_KERNEL_PCS_PAGE_SIZE' kernel loader procedure chain stack align + addd %r4, (E2K_KERNEL_PCS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PCS_PAGE_SIZE - 1)),%r4 + +// US - user (kernel loader) stack pointer + +// 'E2K_ALIGN_USTACK' kernel loader stack align + addd %r4, E2K_ALIGN_USTACK_MASK, %r4 + andd %r4, (~(E2K_ALIGN_USTACK_MASK)),%r4 + +// 'E2K_KERNEL_US_PAGE_SIZE' kernel loader stack align + +// User Stack is supposed to grow from higher memory addresses to lower ones +// Switch to higher memory addresses of stack + addd %r4, E2K_BOOT_KERNEL_US_SIZE, %r4 + addd %r4, (E2K_KERNEL_US_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_US_PAGE_SIZE - 1)),%r4 + rwd E2K_BOOT_KERNEL_US_SIZE << 32, %usd.hi + addd %r4, E2K_ALIGN_STACKS_BASE_MASK, %r4 + andd %r4, (~(E2K_ALIGN_STACKS_BASE_MASK)), %r4 + rwd %r4, %sbr + addd %r4, RW_NONP, %r6 + rwd %r6, %usd.lo + +// Trap Cellar + std %r0, [0x0] 71 + std %r4, [0x00000050] 71 + addd %dr4, MMU_TRAP_CELLAR_MAX_SIZE * 8, %dr4 + std %dr4, [free_memory_p] + +// Jump to the rtc0 (preparation) + addd 0, [start_secondary], %r2 + movtd %r2, %ctpr1 + + setwd wsz=RBS+4 + +// Call start_secondary() and fly away + call %ctpr1, wbs=RBS diff --git a/arch/e2k/boot/aploader.lds b/arch/e2k/boot/aploader.lds new file mode 100644 index 000000000000..0f5919182351 --- /dev/null +++ b/arch/e2k/boot/aploader.lds @@ -0,0 +1,99 @@ + +OUTPUT_FORMAT("elf64-e2k", "elf64-e2k", "elf64-e2k") +/* OUTPUT_ARCH(e2k) Defined by Makefile */ +ENTRY(start) + +MEMORY { + ROM (RX) : ORIGIN = 0x100000000, LENGTH = 16M + RAM (W) : ORIGIN = 1M, LENGTH = 31M + +/* ROM chunks */ + ROML (RX) : ORIGIN = 0x100000800, LENGTH = 16M - 16K - 2048 + ROMH (RX) : ORIGIN = 0x100ff0000, LENGTH = 16K + CPUSTART (RX) : ORIGIN = 0x100000000, LENGTH = 2048 +} + +SECTIONS { + + _start = .; + .text : { + _text = .; /* Text and read-only data */ + + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o arch/e2k/boot/apstartup.o) .text .gnu.linkonce.t.*) + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o arch/e2k/boot/piggy.o) .rodata) + + . = ALIGN(4096); + arch/e2k/boot/piggy.o(.rodata) + + _etext = .; /* End of text section */ + } > ROML + + .data : AT ( ADDR(.text) + SIZEOF ( .text ) ) { + _data = .; /* Data section */ + __apstartup_start = .; + arch/e2k/boot/apstartup.o(.text) + __apstartup_end = .; + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o) .data .gnu.linkonce.d.*) + + _edata = .; /* End of data section */ + } > RAM + + + + .bss : { + __bss_start = .; /* BSS */ + + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o) .bss COMMON) + + __bss_stop = . ; + } > RAM + + .initrd : { + . = ALIGN(4096); + *(.initrd) + } > ROMH + + .symtable : { + . = ALIGN(4096); + *(.symtable) + } > ROMH + + .strtable : { + . = ALIGN(4096); + *(.strtable) + } > ROMH + + .text.startup : { + __startup_start = .; + + arch/e2k/boot/romstartup.o(.text) + arch/e2k/boot/romstartup.o(.rodata) + arch/e2k/boot/romstartup.o(.data) + arch/e2k/boot/romstartup.o(.bss) + + __startup_end = .; + + . = ALIGN(2048); /* Round up the image size exactly to 16M. */ + + } > CPUSTART + + /* Sections to be discarded */ + /DISCARD/ : { + *(.info) + /* Do not bother with updating alternatives in boot, + * just discard the sections to reduce binary size. */ + *(.altinstructions) + *(.altinstr_replacement) + } + + __bios_start_code = ADDR(.text); + __bios_size_code = SIZEOF(.text); + __bios_start_data = ADDR(.text) + SIZEOF( .text ); + __bios_size_data = SIZEOF(.data); + __bios_size_data_plus_bss = SIZEOF(.data) + SIZEOF( .bss ); + + /* lld compatibility items. These calculations may be not quite accurate. */ + __bios_size_ld = SIZEOF ( .text ); + __bios_fsize_ld = SIZEOF ( .text ) + SIZEOF ( .text ) + SIZEOF ( .bss ); + __bios_entry_ld = _text - __startup_start; +} diff --git a/arch/e2k/boot/apstartup.S b/arch/e2k/boot/apstartup.S new file mode 100644 index 000000000000..f048f37f2c51 --- /dev/null +++ b/arch/e2k/boot/apstartup.S @@ -0,0 +1,42 @@ +/* $Id: apstartup.S,v 1.3 2005/08/16 12:32:40 atic Exp $ */ +// +// primary application processor startup module +// + +#define WSZ 0x20 /* 20 quadwords */ +#define RBS 0x10 /* 10 quadwords */ +#define RSZ 0x10 /* 10 quadwords */ + +#define UPSR 0x1 /* fpu enabled */ + +#define OSEM_LO 0x0 /* no software traps enabled */ +#define OSEM_HI 0x0 + +.text +.global apstart +.global loader +#ifdef CONFIG_SMP +.global aploader +#endif /* CONFIG_SMP */ + +// We are starting first steps on AP and branch to ap_loader(). See aploader.S +// Static memory allocation is defined by romloader.lds + +apstart: +{ + setwd wsz = WSZ + setbn rbs = RBS, rsz = RSZ, rcur = 0x0 + setbp psz = 0 +} + rws UPSR, %upsr +#ifdef CONFIG_SMP + addd 0, [aploader], %r0 +#else + addd 0, [loader], %r0 +#endif /* CONFIG_SMP */ + movtd %r0, %ctpr1 + rwd OSEM_LO, %osem + ct %ctpr1 + .size apstart, . - apstart + + diff --git a/arch/e2k/boot/bios/Makefile b/arch/e2k/boot/bios/Makefile new file mode 100644 index 000000000000..a6044d1fa85a --- /dev/null +++ b/arch/e2k/boot/bios/Makefile @@ -0,0 +1,12 @@ + +ccflags-y += -DCONFIG_BOOT_E2K + +obj-y := bios.o southbridge.o superio.o init_kbd.o \ + newpci.o linuxpci.o io.o + +obj-$(CONFIG_ENABLE_BIOS_MPTABLE) += mpspec.o mptable.o + +obj-$(CONFIG_ENABLE_MGA) += mga.o + +obj-y += video/ + diff --git a/arch/e2k/boot/bios/bios.c b/arch/e2k/boot/bios/bios.c new file mode 100644 index 000000000000..28e14bcfe873 --- /dev/null +++ b/arch/e2k/boot/bios/bios.c @@ -0,0 +1,361 @@ + +#include "bios.h" +#include +#include "pci.h" +#include "../Am85C30.h" + +#if defined(CONFIG_LMS_CONSOLE) +extern void console_probe(void); +#endif + +bios_hardware_t hardware = {0}; + +/* + * First part of BIOS initialization + * + * No any memory available yet. Minimum initializations for the moment. + */ + +void bios_first(void) +{ +#if defined(CONFIG_LMS_CONSOLE) + console_probe(); +#endif +} + +/* + * Rest of BIOS initialization + * + * Most of the job can be completed here. PCI should be inited before. + */ + +#undef TEST_RDMA_REGS +#ifdef TEST_RDMA_REGS +static void test_rdma(void) +{ + struct bios_pci_dev *dev; + unsigned int val = 0; + rom_printk("test_rdma: scanning for RDMA device on PCI bus\n"); + dev = bios_pci_find_device(0x1544, 0x7112, 0); /* 0x71121544 - lms */ + if (dev) { + rom_printk("found on bus %d device %d\n", + dev->bus->number, PCI_SLOT(dev->devfn)); + }else{ + rom_printk("!!! NOT FOUND !!!\n"); + return; + } + rom_printk("test_rdma: check it's own bars\n"); + rom_printk("test_rdma: bar[0] = 0x%x\n",dev->base_address[0]); + rom_printk("test_rdma: bar[1] = 0x%x\n",dev->base_address[1]); + rom_printk("RDMA controler regs : \n"); + rom_printk("0 = 0x%x\n", *(u32*)(dev->base_address[0])); + rom_printk("1 = 0x%x\n", *(u32*)(dev->base_address[0] + 0x4)); + rom_printk("2 = 0x%x\n", *(u32*)(dev->base_address[0] + 0x8)); + rom_printk("3 = 0x%x\n", *(u32*)(dev->base_address[0] + 0xc)); + pcibios_read_config_dword(dev->bus->number, dev->devfn, 0x00, &val); + rom_printk("conf space: 0x00 = 0x%x\n", val); + pcibios_read_config_dword(dev->bus->number, dev->devfn, 0x04, &val); + rom_printk("conf space: 0x04 = 0x%x\n", val); + pcibios_read_config_dword(dev->bus->number, dev->devfn, 0x08, &val); + rom_printk("conf space: 0x08 = 0x%x\n", val); + pcibios_read_config_dword(dev->bus->number, dev->devfn, 0x0c, &val); + rom_printk("conf space: 0x0c = 0x%x\n", val); + pcibios_read_config_dword(dev->bus->number, dev->devfn, 0x10, &val); + rom_printk("conf space: 0x10 = 0x%x\n", val); + pcibios_read_config_dword(dev->bus->number, dev->devfn, 0x14, &val); + rom_printk("conf space: 0x14 = 0x%x\n", val); + pcibios_read_config_dword(dev->bus->number, dev->devfn, 0x18, &val); + rom_printk("conf space: 0x18 = 0x%x\n", val); + +} +#endif + +#ifdef CONFIG_E2K_SIC +//#define TEST_FM33256 +#ifdef TEST_FM33256 + +#include +#include +#include +#include +#include "printk.h" +#include "pci.h" + +#define MAX_SPI_DEVICE_NR 3 + +#define SPI_CONTROL 0x00 +#define SPI_STATUS 0x04 +#define SPI_OPCODE 0x08 +#define SPI_ADDRESS 0x0C +#define SPI_MODE 0x10 + +#define SPI_WREN_CMD 6 +#define SPI_WRDI_CMD 4 +#define SPI_WRSR_CMD 5 +#define SPI_RDSR_CMD 1 +#define SPI_READ_CMD 3 +#define SPI_WRITE_CMD 2 +#define SPI_RDPC_CMD 0x13 +#define SPI_WRPC_CMD 0x12 + +#define SPI_STATUS_BUSY_SHIFT 0 +#define SPI_STATUS_INTR_SHIFT 1 +#define SPI_STATUS_FAIL_SHIFT 2 + +#define SPI_STATUS_BUSY (1 << SPI_STATUS_BUSY_SHIFT) +#define SPI_STATUS_INTR (1 << SPI_STATUS_INTR_SHIFT) +#define SPI_STATUS_FAIL (1 << SPI_STATUS_FAIL_SHIFT) + +#define SPI_DEVICE_0 0 +#define SPI_DEVICE_1 1 +#define SPI_DEVICE_2 2 +#define SPI_DEVICE_3 3 + +#define SPI_ADDRESS_SIZE_8 0 +#define SPI_ADDRESS_SIZE_16 1 +#define SPI_ADDRESS_SIZE_24 2 +#define SPI_ADDRESS_SIZE_32 3 + +#define MAX_SPI_BYTES 64 + +#define SPI_DEVICE_SHIFT 0 + +#define MAX_SPI_ADDRESS_SIZE_SHIFT 3 +#define SPI_ADDRESS_SIZE_SHIFT 2 + +#define SPI_DATA_SIZE_SHIFT 4 +#define SPI_ADDRESS_PHASE_SHIFT 11 + +#define SPI_ADDRESS_PHASE_ENABLE (1 << SPI_ADDRESS_PHASE_SHIFT) +#define SPI_ADDRESS_PHASE_DISABLE (0 << SPI_ADDRESS_PHASE_SHIFT) + +#define SPI_DATA_PHASE_SHIFT 12 + +#define SPI_DATA_PHASE_ENABLE (1 << SPI_DATA_PHASE_SHIFT) +#define SPI_DATA_PHASE_DISABLE (0 << SPI_DATA_PHASE_SHIFT) + +#define SPI_TRANS_TYPE_SHIFT 13 + +#define SPI_TRANS_READ (0 << SPI_TRANS_TYPE_SHIFT) +#define SPI_TRANS_WRITE (1 << SPI_TRANS_TYPE_SHIFT) + +#define SPI_START_SHIFT 14 + +#define SPI_START (1 << SPI_START_SHIFT) + +#define SPI_KILL_SHIFT 15 + +#define SPI_KILL (1 << SPI_KILL_SHIFT) + +static void error(char *x) +{ + rom_puts("\n\n"); + rom_puts(x); + rom_puts("\n\n -- System halted"); + + E2K_LMS_HALT_ERROR(0xdead); /* Halt */ +} + +struct i2c_spi { + unsigned long cntrl_base; + unsigned long data_base; + unsigned char dev_number; +}; + +struct i2c_spi i2c_spi; +/* cmos_addr - entire registers offset in +* Processor Companion case +* (SPI_RDPC_CMD or SPI_WRPC_CMD) or +* entire flash offset in the case of +* (SPI_READ_CMD or SPI_READ_CMD) +* i2c_spi_cntrl.cntrl_base i2c/spi control bar = bar[0] for pci device +* i2c_spi_cntrl.data_base i2c/spi memory buffer bar = bar[1] for pci device */ + +int spi_read(unsigned int cmos_addr) +{ + unsigned long i2c_spi_cntrl = i2c_spi.cntrl_base; + unsigned long i2c_spi_data = i2c_spi.data_base; + unsigned char data; + unsigned int cmd = 0; + + /* Set READ operation code */ + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_OPCODE, SPI_RDPC_CMD, MAS_IOADDR); + + /* Set addr offset */ + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_ADDRESS, cmos_addr, MAS_IOADDR); + + /* Set Device number, Address size, Data size offset */ + cmd = i2c_spi.dev_number << SPI_DEVICE_SHIFT | + SPI_ADDRESS_SIZE_16 << SPI_ADDRESS_SIZE_SHIFT | + 1 << SPI_DATA_SIZE_SHIFT | + SPI_ADDRESS_PHASE_ENABLE | + SPI_DATA_PHASE_ENABLE | + SPI_TRANS_READ | + SPI_START; + + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_CONTROL, cmd, MAS_IOADDR); + + while((E2K_READ_MAS_W(i2c_spi_cntrl + SPI_STATUS, MAS_IOADDR) & + (SPI_STATUS_INTR | SPI_STATUS_FAIL)) == 0) + if (E2K_READ_MAS_W(i2c_spi_cntrl + SPI_STATUS, MAS_IOADDR) & SPI_STATUS_FAIL) { + rom_printk("spi_read: Error - Transfer Failed"); + return -1; + } + data = E2K_READ_MAS_B(i2c_spi_data, MAS_IOADDR); + return (int)data; +} + +int spi_ops(unsigned int dev_number, unsigned char cmd_code) +{ + unsigned int cmd; + unsigned long i2c_spi_cntrl = i2c_spi.cntrl_base; + + if (dev_number > MAX_SPI_DEVICE_NR) { + rom_printk("spi_ops: Error - Device number is to large: %d (Max: %d)", dev_number, MAX_SPI_DEVICE_NR); + return -1; + } + switch(cmd_code) { + case SPI_READ_CMD: + case SPI_WRITE_CMD: + case SPI_RDPC_CMD: + case SPI_WRPC_CMD: + rom_printk("spi_ops: Error - Wrong command code: %d", cmd_code); + return -1; + default: + break; + } + + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_OPCODE, cmd_code, MAS_IOADDR); + + cmd = dev_number << SPI_DEVICE_SHIFT | + SPI_ADDRESS_PHASE_DISABLE | + SPI_DATA_PHASE_DISABLE | + SPI_START; + + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_CONTROL, cmd, MAS_IOADDR); + + while((E2K_READ_MAS_W(i2c_spi_cntrl + SPI_STATUS, MAS_IOADDR) & + (SPI_STATUS_INTR | SPI_STATUS_FAIL)) == 0) + if (E2K_READ_MAS_W(i2c_spi_cntrl + SPI_STATUS, MAS_IOADDR) & SPI_STATUS_FAIL) { + rom_printk("spi_ops: Error - Operation Failed"); + return -1; + } + return 1; +} + +int spi_write(unsigned char val, unsigned int cmos_addr) +{ + + unsigned int cmd; + unsigned long i2c_spi_cntrl = i2c_spi.cntrl_base; + unsigned long i2c_spi_data = i2c_spi.data_base; + + + if(spi_ops(i2c_spi.dev_number, SPI_WREN_CMD) == -1) { + rom_printk("%s: Error - Failed to enable write operation", __FUNCTION__); + return -1; + } + + E2K_WRITE_MAS_B(i2c_spi_data, val, MAS_IOADDR); + + /* Set WRITE operation code */ + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_OPCODE, SPI_WRPC_CMD, MAS_IOADDR); + + /* Set addr offset */ + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_ADDRESS, cmos_addr, MAS_IOADDR); + + /* Set Device number, Address size, Data size offset */ + cmd = i2c_spi.dev_number << SPI_DEVICE_SHIFT | + SPI_ADDRESS_SIZE_16 << SPI_ADDRESS_SIZE_SHIFT | + 1 << SPI_DATA_SIZE_SHIFT | + SPI_ADDRESS_PHASE_ENABLE | + SPI_DATA_PHASE_ENABLE | + SPI_TRANS_READ | + SPI_START; + + E2K_WRITE_MAS_W(i2c_spi_cntrl + SPI_CONTROL, cmd, MAS_IOADDR); + + while((E2K_READ_MAS_W(i2c_spi_cntrl + SPI_STATUS, MAS_IOADDR) & + (SPI_STATUS_INTR | SPI_STATUS_FAIL)) == 0) + if (E2K_READ_MAS_W(i2c_spi_cntrl + SPI_STATUS, MAS_IOADDR) & SPI_STATUS_FAIL) { + rom_printk("spi_read: Error - Transfer Failed"); + return -1; + } + return 1; +} + + +int cmos_read(unsigned int cmos_addr) +{ + int data = spi_read(cmos_addr); + + if (data == -1) { + rom_printk("%s: read operation failed", __FUNCTION__); + return -1; + } + return data; +} + +int cmos_write(unsigned char val, unsigned int cmos_addr) +{ + if (spi_write(val, cmos_addr) == -1) { + rom_printk("%s: write operation failed", __FUNCTION__); + return -1; + } + return 1; +} + +void test_fm33256(void){ + int tmp; + struct bios_pci_dev *dev; + rom_printk("test_fm33256: Scanning PCI bus for ioapic/pic/timer i2c/spi controller ..."); + dev = bios_pci_find_device(INTEL_MULTIFUNC_VENDOR, + INTEL_MULTIFUNC_DEVICE, 0); + if (dev) { + rom_printk("found on bus %d device %d\n", + dev->bus->number, PCI_SLOT(dev->devfn)); + }else{ + rom_printk("!!! NOT FOUND !!!\n"); + return; + } + rom_printk("test_fm33256: control base addr = 0x%x, data base addr = 0x%x\n", + (unsigned int)dev->base_address[0], (unsigned int)dev->base_address[1]); + i2c_spi.cntrl_base = dev->base_address[0]; + i2c_spi.data_base = dev->base_address[1]; + i2c_spi.dev_number = 1; + tmp = cmos_read(0x18); + rom_printk("test_fm33256: tmp = 0x%x\n", tmp); +} +#endif +#endif + +void bios_rest(void) +{ +#ifdef CONFIG_ENABLE_IOAPIC +#ifdef CONFIG_E2K_SIC + configure_pic_system(); + configure_system_timer(); +#ifdef CONFIG_SERIAL_AM85C30_BOOT_CONSOLE + zilog_serial_init(); +#endif +#endif +#endif + +#ifdef TEST_RDMA_REGS + test_rdma(); +#endif + +#ifdef CONFIG_E2K_SIC +#ifdef TEST_FM33256 + test_fm33256(); +#endif +#endif + +#ifdef CONFIG_E2K_LEGACY_SIC + enable_embeded_graphic(); +#else /* ! CONFIG_E2K_LEGACY_SIC */ +#ifdef CONFIG_ENABLE_MGA + enable_mga(); +#endif /* CONFIG_ENABLE_MGA */ +#endif /* CONFIG_E2K_LEGACY_SIC */ +} diff --git a/arch/e2k/boot/bios/bios.h b/arch/e2k/boot/bios/bios.h new file mode 100644 index 000000000000..fb9d0255a605 --- /dev/null +++ b/arch/e2k/boot/bios/bios.h @@ -0,0 +1,41 @@ + +void rom_printk(char const *fmt, ...); + +extern void sb_enable_itself(void); +#ifndef CONFIG_E2K_SIC +extern void sb_enable_ioapic(void); +#endif +#ifdef CONFIG_E2K_SIC +extern void configure_pic_system(void); +extern void configure_system_timer(void); +#endif +extern void sb_enable_rtc(void); +extern void sb_enable_ide(void); +extern void enable_serial_ports(void); +extern void enable_parallel_port(void); +extern void enable_mouse(void); +extern void enable_keyboard(void); +extern void enable_rtc(void); +extern void enable_floppy(void); +extern void enable_mga(void); +extern void vga_init(void); +#ifdef CONFIG_E2K_LEGACY_SIC +extern void enable_embeded_graphic(void); +#endif /* CONFIG_E2K_LEGACY_SIC */ + +extern void init_kbd(void); + +struct bios_hardware { + unsigned char serial :1; + unsigned char parallel :1; + unsigned char rtc :1; + unsigned char keyboard :1; + unsigned char mouse :1; + unsigned char floppy :1; + unsigned char video :1; + unsigned char dbgport :1; +}; + +typedef struct bios_hardware bios_hardware_t; + +extern bios_hardware_t hardware; diff --git a/arch/e2k/boot/bios/ide_config.h b/arch/e2k/boot/bios/ide_config.h new file mode 100644 index 000000000000..264b80842800 --- /dev/null +++ b/arch/e2k/boot/bios/ide_config.h @@ -0,0 +1,18 @@ +/* + * $Id: ide_config.h,v 1.1 2006/03/30 16:53:22 kostin Exp $ + * Southbridge configuration. + * IDE Configuration Registers (Function 1) + */ + +#ifndef _IDE_CONFIG_H_ +#define _IDE_CONFIG_H_ + +#define SB_PCICMD 0x4 // 0x4-0x5 +#define SB_PCICMD_IOSE 0x1 // access to the Legacy IDE ports + +#define SB_IDETIM 0x40 // 0x40-0x41=Primary Cnannel + // 0x42-0x43=Secondary Channel +#define SB_IDETIM_DECODE_ENABLE 0x8000 +#define SB_IDETIM_SHIFT 16 + +#endif diff --git a/arch/e2k/boot/bios/init_kbd.c b/arch/e2k/boot/bios/init_kbd.c new file mode 100644 index 000000000000..a7ebf640b715 --- /dev/null +++ b/arch/e2k/boot/bios/init_kbd.c @@ -0,0 +1,134 @@ +#include +#include "init_kbd.h" + +#include "../boot_io.h" + +#undef DEBUG_KBD_MODE +#undef DebugKBD +#define DEBUG_KBD_MODE 0 /* keyboard debug */ +#define DebugKBD if (DEBUG_KBD_MODE) rom_printk + +void wait_kbd_write(void) +{ + unsigned char in = bios_inb(KBD_STATUS_REG); + while (in & KBD_STAT_IBF) { + in = bios_inb(KBD_STATUS_REG); + } +} + +void wait_kbd_read(void) +{ + unsigned char in = bios_inb(KBD_STATUS_REG); + DebugKBD("wait_kbd_read() status 0x%x\n", in); + while ((~in) & KBD_STAT_OBF) { + in = bios_inb(KBD_STATUS_REG); + DebugKBD("wait_kbd_read() while status 0x%x\n", in); + } +} + +void send_kbd_cmd(unsigned int cmd) +{ + wait_kbd_write(); + DebugKBD("send_kbd_cmd() cmd 0x%x\n", cmd); + bios_outb(cmd, KBD_CNTL_REG); +} + +void send_kbd_data(unsigned int data) +{ + wait_kbd_write(); + DebugKBD("send_kbd_data() data out 0x%x\n", data); + bios_outb(data, KBD_DATA_REG); +} + +unsigned int recv_kbd_data(void) +{ + unsigned int in; + wait_kbd_read(); + in = (unsigned int)bios_inb(KBD_DATA_REG); + DebugKBD("recv_kbd_data() data in 0x%x\n", in); + return in; +} + +void init_kbd(void) +{ + int check = 0; + rom_printk("kbd init ...\n"); +// init KBC + send_kbd_cmd(KBD_CCMD_KBD_DISABLE); + check = 1; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_cmd(KBD_CCMD_SELF_TEST); + if (recv_kbd_data() != 0x55) goto failed; + check = 2; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_cmd(KBD_CCMD_GET_VERSION); + recv_kbd_data(); + check = 3; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_cmd(KBD_CCMD_KBD_TEST); + if (recv_kbd_data() != 0x00) goto failed; + check = 4; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_cmd(KBD_CCMD_KBD_ENABLE); +// + check = 5; + DebugKBD("init_kbd() check #%d\n", check); +#if 1 + send_kbd_data(KBD_CMD_RESET); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + if (recv_kbd_data() != KBD_REPLY_POR) goto failed; + check = 6; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(KBD_CMD_DISABLE); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 7; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(KBD_CMD_SET_LEDS); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 8; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(0x07); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 9; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(KBD_CMD_ECHO); + if (recv_kbd_data() != 0xee) goto failed; + check = 10; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(KBD_CMD_READ_ID); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 11; + DebugKBD("init_kbd() check #%d\n", check); + recv_kbd_data(); + recv_kbd_data(); + check = 12; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(KBD_CMD_SET_RATE); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 13; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(0x0); +// if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 14; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(KBD_CMD_SET_LEDS); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 15; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(0x0); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 16; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_data(KBD_CMD_ENABLE); + if (recv_kbd_data() != KBD_REPLY_ACK) goto failed; + check = 17; + DebugKBD("init_kbd() check #%d\n", check); + send_kbd_cmd(KBD_CCMD_WRITE_MODE); + send_kbd_data(KBD_MODE_KBD_INT | KBD_MODE_SYS | KBD_MODE_KCC); +#endif + rom_printk("kbd init passed ...\n"); + return; +failed: + rom_printk("kbd init faled... check %d\n", check); + return; +} diff --git a/arch/e2k/boot/bios/init_kbd.h b/arch/e2k/boot/bios/init_kbd.h new file mode 100644 index 000000000000..e10bd672e37a --- /dev/null +++ b/arch/e2k/boot/bios/init_kbd.h @@ -0,0 +1,103 @@ +#ifndef _INIT_KEYB_H_ +#define _INIT_KEYB_H_ + +/* + * Keyboard Controller Registers + * + * NOTE: These are offsets from pcikbd_iobase, not absolute. + */ + +#define KBD_STATUS_REG 0x64 +#define KBD_CNTL_REG KBD_STATUS_REG +#define KBD_DATA_REG 0x60 + +/* + * Keyboard Controller Commands + */ + +#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */ +#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */ +#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */ +#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */ +#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */ +#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */ +#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */ +#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */ +#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */ +#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */ +#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */ + +/* + * Keyboard Commands + */ + +#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */ +#define KBD_CMD_ECHO 0xEE /* Echo */ +#define KBD_CMD_READ_ID 0xF2 /* Get keyboard ID */ +#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */ +#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */ +#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */ +#define KBD_CMD_RESET 0xFF /* Reset */ + +/* + * Keyboard Replies + */ + +#define KBD_REPLY_POR 0xAA /* Power on reset */ +#define KBD_REPLY_ACK 0xFA /* Command ACK */ +#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */ + +/* + * Status Register Bits + */ + +#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */ +#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */ +#define KBD_STAT_SELFTEST 0x04 /* Self test successful */ +#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */ +#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */ +#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */ +#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */ +#define KBD_STAT_PERR 0x80 /* Parity error */ + +#define AUX_STAT_OBF (KBD_STAT_OBF | KBD_STAT_MOUSE_OBF) + +/* + * Controller Mode Register Bits + */ + +#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generage IRQ1 */ +#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */ +#define KBD_MODE_SYS 0x04 /* The system flag (?) */ +#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */ +#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */ +#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */ +#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */ +#define KBD_MODE_RFU 0x80 + +/* + * Mouse Commands + */ + +#define AUX_SET_RES 0xE8 /* Set resolution */ +#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */ +#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */ +#define AUX_GET_SCALE 0xE9 /* Get scaling factor */ +#define AUX_SET_STREAM 0xEA /* Set stream mode */ +#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */ +#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */ +#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */ +#define AUX_RESET 0xFF /* Reset aux device */ + +#define KBD_INIT_TIMEOUT 1000 /* Timeout in ms for initializing the keyboard */ +#define KBC_TIMEOUT 250 /* Timeout in ms for sending to keyboard controller */ +#define KBD_TIMEOUT 1000 /* Timeout in ms for keyboard command acknowledge */ +/* How to access the keyboard macros on this platform. */ +#define kbd_read_input() inb(KBD_DATA_REG) +#define kbd_read_status() inb(KBD_STATUS_REG) +#define kbd_write_output(val) outb(val, KBD_DATA_REG) +#define kbd_write_command(val) outb(val, KBD_CNTL_REG) +#define KBD_NO_DATA (-1) /* No data */ +#define KBD_BAD_DATA (-2) /* Parity or other error */ + +#endif diff --git a/arch/e2k/boot/bios/io.c b/arch/e2k/boot/bios/io.c new file mode 100644 index 000000000000..eaf33004b8ff --- /dev/null +++ b/arch/e2k/boot/bios/io.c @@ -0,0 +1,623 @@ +#include "pci.h" +#include +#include +#include +#include "../boot_io.h" + +#undef DEBUG_IO +#undef DebugIO +#define DEBUG_IO 0 +#define DebugIO if (DEBUG_IO) rom_printk + +#undef DEBUG_IOH +#undef DebugIOH +#define DEBUG_IOH 0 +#define DebugIOH if (DEBUG_IOH) rom_printk + +#ifdef CONFIG_E2K_SIC + +#define es2_domain_pci_conf_base(domain) (ES2_PCICFG_AREA_PHYS_BASE + \ + ES2_PCICFG_AREA_SIZE * ((unsigned long) domain)) +#define e2s_domain_pci_conf_base(domain) (E2S_PCICFG_AREA_PHYS_BASE + \ + E2S_PCICFG_AREA_SIZE * ((unsigned long) domain)) +#define e8c_domain_pci_conf_base(domain) (E8C_PCICFG_AREA_PHYS_BASE + \ + E8C_PCICFG_AREA_SIZE * ((unsigned long) domain)) +#define e1cp_domain_pci_conf_base(domain) (E1CP_PCICFG_AREA_PHYS_BASE) +#define e8c2_domain_pci_conf_base(domain) (E8C2_PCICFG_AREA_PHYS_BASE + \ + E8C2_PCICFG_AREA_SIZE * ((unsigned long) domain)) +#define e12c_domain_pci_conf_base(domain) (E12C_PCICFG_AREA_PHYS_BASE + \ + E12C_PCICFG_AREA_SIZE * ((unsigned long) domain)) +#define e16c_domain_pci_conf_base(domain) (E16C_PCICFG_AREA_PHYS_BASE + \ + E16C_PCICFG_AREA_SIZE * ((unsigned long) domain)) +#define e2c3_domain_pci_conf_base(domain) (E2C3_PCICFG_AREA_PHYS_BASE + \ + E2C3_PCICFG_AREA_SIZE * ((unsigned long) domain)) + +static inline unsigned long bios_get_domain_pci_conf_base(unsigned int domain) +{ + unsigned long conf_base; + +#if defined(CONFIG_ES2) + conf_base = es2_domain_pci_conf_base(domain); +#elif defined(CONFIG_E2S) + conf_base = e2s_domain_pci_conf_base(domain); +#elif defined(CONFIG_E8C) + conf_base = e8c_domain_pci_conf_base(domain); +#elif defined(CONFIG_E8C2) + conf_base = e8c2_domain_pci_conf_base(domain); +#elif defined(CONFIG_E12C) + conf_base = e12c_domain_pci_conf_base(domain); +#elif defined(CONFIG_E16C) + conf_base = e16c_domain_pci_conf_base(domain); +#elif defined(CONFIG_E2C3) + conf_base = e2c3_domain_pci_conf_base(domain); +#else + #error "Invalid e2k machine type" +#endif /* CONFIG_E3S */ + return (conf_base); +} + +unsigned char bios_conf_inb(int domain, unsigned char bus, unsigned long port) +{ + + unsigned char byte; + unsigned long conf_base; + + conf_base = bios_get_domain_pci_conf_base(domain); + port = conf_base + port; + byte = NATIVE_READ_MAS_B(port, MAS_IOADDR); + + DebugIO("conf_inb(): value %x read from port %x\n", + (int) byte, (int) port); + + return byte; +} +#endif + +unsigned char bios_inb(unsigned short port) +{ + unsigned char byte; + + DebugIO("bios_inb entered.\n"); + + byte = NATIVE_READ_MAS_B(PHYS_X86_IO_BASE + port, MAS_IOADDR); + + DebugIO("value %x read from port %x\n", (int) byte, (int) port); + + DebugIO("bios_inb exited.\n"); + + return byte; +} + +unsigned char bios_inb_p(unsigned long port) +{ + + unsigned char byte; + + DebugIO("bios_inb_p entered.\n"); + + byte = NATIVE_READ_MAS_B(PHYS_X86_IO_BASE + port, MAS_IOADDR); + + DebugIO("bios_inb_p exited.\n"); + + return byte; +} + +void bios_outb_p(unsigned char byte, unsigned long port) +{ + DebugIO("bios_outb_p entered.\n"); + + NATIVE_WRITE_MAS_B(PHYS_X86_IO_BASE + port, byte, MAS_IOADDR); + + DebugIO("bios_outb_p exited.\n"); +} + +#ifdef CONFIG_E2K_SIC +void bios_conf_outb(int domain, unsigned char bus, unsigned char byte, + unsigned long port) +{ + unsigned long conf_base; + + conf_base = bios_get_domain_pci_conf_base(domain); + port = conf_base + port; + DebugIO("conf_outb(): port = %x\n", (int) port); + NATIVE_WRITE_MAS_B(port, byte, MAS_IOADDR); + + DebugIO("conf_outb exited.\n"); +} +void bios_ioh_e3s_outb(int domain, unsigned char bus, unsigned char byte, + unsigned long port) +{ + unsigned long addr; + + addr = IOHUB_SCRB_DOMAIN_START(domain); + addr += port; + NATIVE_WRITE_MAS_B(addr, byte, MAS_IOADDR); + DebugIOH("ioh_e3s_outb write 0x%x to domain %d bus 0x%x, port = 0x%x.\n", + byte, domain, bus, addr); +} + +u8 bios_ioh_e3s_inb(int domain, unsigned char bus, unsigned long port) +{ + unsigned long addr; + u8 byte; + + addr = IOHUB_SCRB_DOMAIN_START(domain); + addr += port; + byte = NATIVE_READ_MAS_B(addr, MAS_IOADDR); + DebugIOH("bios_ioh_e3s_inb() read 0x%x from domain %d bus 0x%x, " + "port = 0x%x\n", + byte, domain, bus, addr); + return (byte); +} +#endif + +void bios_outb(unsigned char byte, unsigned short port) +{ + DebugIO("outb entered.\n"); + + NATIVE_WRITE_MAS_B(PHYS_X86_IO_BASE + port, byte, MAS_IOADDR); + + DebugIO("outb exited.\n"); +} + +#ifdef CONFIG_E2K_SIC +void bios_conf_outw(int domain, unsigned char bus, u16 halfword, + unsigned long port) +{ + unsigned long conf_base; + + conf_base = bios_get_domain_pci_conf_base(domain); + port = conf_base + port; + DebugIO("conf_outw(): port = %x\n", (int) port); + NATIVE_WRITE_MAS_H(port, halfword, MAS_IOADDR); + + DebugIO("conf_outw exited.\n"); +} + +void bios_ioh_e3s_outw(int domain, unsigned char bus, u16 halfword, + unsigned long port) +{ + unsigned long addr; + + addr = IOHUB_SCRB_DOMAIN_START(domain); + addr += port; + NATIVE_WRITE_MAS_H(addr, halfword, MAS_IOADDR); + DebugIOH("ioh_e3s_outw write 0x%x to domain %d bus 0x%x, port = 0x%x\n", + halfword, domain, bus, addr); +} + +u16 bios_ioh_e3s_inw(int domain, unsigned char bus, unsigned long port) +{ + unsigned long addr; + u16 halfword; + + addr = IOHUB_SCRB_DOMAIN_START(domain); + addr += port; + halfword = NATIVE_READ_MAS_B(addr, MAS_IOADDR); + DebugIOH("bios_ioh_e3s_inw() read 0x%x from domain %d bus 0x%x, " + "port = 0x%x\n", + halfword, domain, bus, addr); + return (halfword); +} +#endif + +void bios_outw(u16 halfword, unsigned short port) +{ + DebugIO("outw entered.\n"); + + NATIVE_WRITE_MAS_H(PHYS_X86_IO_BASE + port, halfword, MAS_IOADDR); + + DebugIO("outw exited.\n"); +} + +void bios_outw_p(u16 halfword, unsigned long port) +{ + DebugIO("outw_p entered.\n"); + + NATIVE_WRITE_MAS_H(PHYS_X86_IO_BASE + port, halfword, MAS_IOADDR); + + DebugIO("outw_p exited.\n"); +} + +#ifdef CONFIG_E2K_SIC +u16 bios_conf_inw(int domain, unsigned char bus, unsigned long port) +{ + u16 hword; + unsigned long conf_base; + + conf_base = bios_get_domain_pci_conf_base(domain); + port = conf_base + port; + hword = NATIVE_READ_MAS_H(port, MAS_IOADDR); + DebugIO("conf_inw(): value %x read from port %x\n",hword, (int)port); + DebugIO("conf_inw exited.\n"); + + return hword; +} +#endif + +u16 bios_inw(unsigned short port) +{ + u16 hword; + + DebugIO("inw entered.\n"); + + hword = NATIVE_READ_MAS_H(PHYS_X86_IO_BASE + port, MAS_IOADDR); + + DebugIO("inw exited.\n"); + + return hword; +} + +u16 bios_inw_p(unsigned long port) +{ + u16 hword; + + DebugIO("inw_p entered.\n"); + + hword = NATIVE_READ_MAS_H(PHYS_X86_IO_BASE + port, MAS_IOADDR); + + DebugIO("inw_p exited.\n"); + + return hword; +} + +/* + * 'unsigned long' for I/O means 'u32', because IN/OUT ops are IA32-specific + */ +#ifdef CONFIG_E2K_SIC +void bios_conf_outl(int domain, unsigned char bus, u32 word, unsigned long port) +{ + unsigned long conf_base; + + conf_base = bios_get_domain_pci_conf_base(domain); + port = conf_base + port; + NATIVE_WRITE_MAS_W(port, word, MAS_IOADDR); + DebugIO("conf_outl exited.\n"); +} + +u32 bios_conf_inl(int domain, unsigned char bus, unsigned long port) +{ + u32 word; + unsigned long conf_base; + + conf_base = bios_get_domain_pci_conf_base(domain); + port = conf_base + port; + word = NATIVE_READ_MAS_W(port, MAS_IOADDR); + DebugIO("conf_inl(): value %x read from port %x\n", + (int) word, (int) port); + DebugIO("conf_inl exited.\n"); + return word; +} + +void bios_ioh_e3s_outl(int domain, unsigned char bus, u32 word, + unsigned long port) +{ + unsigned long addr; + + addr = IOHUB_SCRB_DOMAIN_START(domain); + addr += port; + NATIVE_WRITE_MAS_W(addr, word, MAS_IOADDR); + DebugIOH("ioh_e3s_outl write 0x%x to domain %d bus 0x%x, port = 0x%x\n", + word, domain, bus, addr); +} + +u32 bios_ioh_e3s_inl(int domain, unsigned char bus, unsigned long port) +{ + unsigned long addr; + u32 word; + + addr = IOHUB_SCRB_DOMAIN_START(domain); + addr += port; + word = NATIVE_READ_MAS_W(addr, MAS_IOADDR); + DebugIOH("bios_ioh_e3s_inl read 0x%x from domain %d bus 0x%x, " + "port = 0x%x\n", + word, domain, bus, addr); + return (word); +} +#endif + +void bios_outl(u32 word, unsigned short port) +{ + DebugIO("outl entered.\n"); + + NATIVE_WRITE_MAS_W(PHYS_X86_IO_BASE + port, word, MAS_IOADDR); + + DebugIO("outl exited.\n"); +} + +u32 bios_inl(unsigned short port) +{ + u32 word; + DebugIO("inl entered.\n"); + word = NATIVE_READ_MAS_W(PHYS_X86_IO_BASE + port, MAS_IOADDR); + DebugIO("inl(): value %x read from port %x\n", (int) word, (int) port); + DebugIO("inl exited.\n"); + + return word; +} + +void bios_outll(unsigned long data, unsigned short port) +{ + DebugIO("outb entered.\n"); + + NATIVE_WRITE_MAS_D(PHYS_X86_IO_BASE + port, data, MAS_IOADDR); + + DebugIO("outb exited.\n"); +} + +unsigned long bios_inll(unsigned short port) +{ + unsigned long dword; + DebugIO("inl entered.\n"); + dword = NATIVE_READ_MAS_D(PHYS_X86_IO_BASE + port, MAS_IOADDR); + DebugIO("inl(): value %lx read from port %x\n", + (unsigned long)dword, (int)port); + DebugIO("inl exited.\n"); + + return dword; +} + +static inline void fast_outw_p(u16 halfword, unsigned long port) +{ + NATIVE_WRITE_MAS_H(PHYS_X86_IO_BASE + port, halfword, MAS_IOADDR); +} + +void bios_outsw(unsigned long port, const void *src, unsigned long count) +{ + u16 *hw_p = (u16 *)src; + + DebugIO("outsw entered.\n"); + + DebugIO("outsw(): port=%lx src=%px count=%lx\n", port, src, count); + + if (((unsigned long)src) & 0x1) { + rom_printk("outsw: memory address is not short aligned"); + } + if (!count) + return; + + while (count--) { + fast_outw_p(*hw_p++, port); + } + + DebugIO("outsw exited.\n"); +} + +static inline u16 fast_inw_p(unsigned long port) +{ + return NATIVE_READ_MAS_H(PHYS_X86_IO_BASE + port, MAS_IOADDR); +} + +void bios_insw(unsigned long port, void *dst, unsigned long count) +{ + u16 *hw_p = (u16 *)dst; + + DebugIO("insw entered.\n"); + + DebugIO("insw(): port=%lx dst=%px count=%lx\n",port, dst, count); + + if (((unsigned long)dst) & 0x1) { + rom_printk("insw: memory address is not short aligned"); + } + if (!count) + return; + + while (count--) { + *hw_p++ = fast_inw_p(port); + } + + DebugIO("insw exited.\n"); +} + +/* + * Read COUNT 32-bit words from port PORT into memory starting at + * SRC. Now works with any alignment in SRC. Performance is important, + * but the interfaces seems to be slow: just using the inlined version + * of the bios_inl() breaks things. + * + * The source code was taken from Alpha's lib/io.c + */ +void bios_insl(unsigned long port, void *dst, unsigned long count) +{ + unsigned int l = 0, l2; + + if (!count) + return; + + switch (((unsigned long) dst) & 0x3) + { + case 0x00: /* Buffer 32-bit aligned */ + while (count--) + { + *(unsigned int *) dst = bios_inl(port); + dst += 4; + } + break; + + /* Assuming little endian in cases 0x01 -- 0x03 ... */ + + case 0x02: /* Buffer 16-bit aligned */ + --count; + + l = bios_inl(port); + *(unsigned short *) dst = l; + dst += 2; + + while (count--) + { + l2 = bios_inl(port); + *(unsigned int *) dst = l >> 16 | l2 << 16; + dst += 4; + l = l2; + } + *(unsigned short *) dst = l >> 16; + break; + + case 0x01: /* Buffer 8-bit aligned */ + --count; + + l = bios_inl(port); + *(unsigned char *) dst = l; + dst += 1; + *(unsigned short *) dst = l >> 8; + dst += 2; + while (count--) + { + l2 = bios_inl(port); + *(unsigned int *) dst = l >> 24 | l2 << 8; + dst += 4; + l = l2; + } + *(unsigned char *) dst = l >> 24; + break; + + case 0x03: /* Buffer 8-bit aligned */ + --count; + + l = bios_inl(port); + *(unsigned char *) dst = l; + dst += 1; + while (count--) + { + l2 = bios_inl(port); + *(unsigned int *) dst = l << 24 | l2 >> 8; + dst += 4; + l = l2; + } + *(unsigned short *) dst = l >> 8; + dst += 2; + *(unsigned char *) dst = l >> 24; + break; + } +} + +/* + * Like insl but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Works with any alignment in SRC. + * Performance is important, but the interfaces seems to be slow: + * just using the inlined version of the outl() breaks things. + * + * The source code was taken from Alpha's lib/io.c + */ +void bios_outsl(unsigned long port, const void *src, unsigned long count) +{ + unsigned int l = 0, l2; + + if (!count) + return; + + switch (((unsigned long) src) & 0x3) + { + case 0x00: /* Buffer 32-bit aligned */ + while (count--) + { + bios_outl(*(unsigned int *) src, port); + src += 4; + } + break; + + case 0x02: /* Buffer 16-bit aligned */ + --count; + + l = *(unsigned short *) src << 16; + src += 2; + + while (count--) + { + l2 = *(unsigned int *) src; + src += 4; + bios_outl(l >> 16 | l2 << 16, port); + l = l2; + } + l2 = *(unsigned short *) src; + bios_outl(l >> 16 | l2 << 16, port); + break; + + case 0x01: /* Buffer 8-bit aligned */ + --count; + + l = *(unsigned char *) src << 8; + src += 1; + l |= *(unsigned short *) src << 16; + src += 2; + while (count--) + { + l2 = *(unsigned int *) src; + src += 4; + bios_outl(l >> 8 | l2 << 24, port); + l = l2; + } + l2 = *(unsigned char *) src; + bios_outl(l >> 8 | l2 << 24, port); + break; + + case 0x03: /* Buffer 8-bit aligned */ + --count; + + l = *(unsigned char *) src << 24; + src += 1; + while (count--) + { + l2 = *(unsigned int *) src; + src += 4; + bios_outl(l >> 24 | l2 << 8, port); + l = l2; + } + l2 = *(unsigned short *) src; + src += 2; + l2 |= *(unsigned char *) src << 16; + bios_outl(l >> 24 | l2 << 8, port); + break; + } +} + +/* + * Read COUNT 8-bit bytes from port PORT into memory starting at + * SRC. + * + * The source code was taken from Alpha's lib/io.c + */ +void bios_insb(unsigned long port, void *dst, unsigned long count) +{ + while (((unsigned long)dst) & 0x3) { + if (!count) + return; + count--; + *(unsigned char *) dst = bios_inb(port); + dst += 1; + } + + while (count >= 4) { + unsigned int w; + count -= 4; + w = bios_inb(port); + w |= bios_inb(port) << 8; + w |= bios_inb(port) << 16; + w |= bios_inb(port) << 24; + *(unsigned int *) dst = w; + dst += 4; + } + + while (count) { + --count; + *(unsigned char *) dst = bios_inb(port); + dst += 1; + } +} + +/* + * Like insb but in the opposite direction. + * Don't worry as much about doing aligned memory transfers: + * doing byte reads the "slow" way isn't nearly as slow as + * doing byte writes the slow way (no r-m-w cycle). + * + * The source code was taken from Alpha's lib/io.c + */ +void bios_outsb(unsigned long port, const void *src, unsigned long count) +{ + while (count) { + count--; + bios_outb(*(char *)src, port); + src += 1; + } +} diff --git a/arch/e2k/boot/bios/linuxpci.c b/arch/e2k/boot/bios/linuxpci.c new file mode 100644 index 000000000000..8c2cd9ffec1c --- /dev/null +++ b/arch/e2k/boot/bios/linuxpci.c @@ -0,0 +1,681 @@ +/* + * $Id: linuxpci.c,v 1.10 2008/05/23 20:26:35 alexmipt Exp $ + * + * PCI Bus Services, see include/linux/pci.h for further explanation. + * + * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, + * David Mosberger-Tang + * + * Copyright 1997 -- 1999 Martin Mares + */ + +#include +#include +#include +#include +#include "asm/string.h" +#include +#include +#include "pci.h" + +#define GCC_WORKS_ON_O2 0 + +/**************************** DEBUG DEFINES *****************************/ +#undef DEBUG_BOOT_MODE +#undef Dprintk +#define DEBUG_BOOT_MODE 0 /* PCI scanning */ +#define Dprintk if (DEBUG_BOOT_MODE) rom_printk + +#undef DEBUG_VERBOSE_BOOT_MODE +#undef VDprintk +#define DEBUG_VERBOSE_BOOT_MODE 0 /* verbose PCI scanning */ +#define VDprintk if (DEBUG_VERBOSE_BOOT_MODE) rom_printk +/************************************************************************/ + + +/** + * This is the root of the PCI tree. A PCI tree always has + * one bus, bus 0. Bus 0 contains devices and bridges. + */ +struct bios_pci_bus pci_root[MAX_NUMIOHUBS]; +int pci_root_num = 0; +/// Linked list of PCI devices. ALL devices are on this list +struct bios_pci_dev *pci_devices = 0; +/// pointer to the last device */ +static struct bios_pci_dev **pci_last_dev_p = &pci_devices; +/// We're going to probably delete this -- flag to add in reverse order */ +static int pci_reverse = 0; + +/** + * Given a bus and a devfn number, find the device structure + * @param bus The bus number + * @param devfn a device/function number + * @return pointer to the device structure + */ +struct bios_pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn) +{ + struct bios_pci_dev *dev; + + for (dev = pci_devices; dev; dev = dev->next) + if (dev->bus->number == bus && dev->devfn == devfn) + break; + return dev; +} + +/** Find a device of a given vendor and type + * @param vendor Vendor ID (e.g. 0x8086 for Intel) + * @param device Device ID + * @param from Pointer to the device structure, used as a starting point + * in the linked list of devices, which can be 0 to start at the + * head of the list (i.e. pci_devices) + * @return Pointer to the device struct + */ +struct bios_pci_dev *bios_pci_find_device(unsigned int vendor, + unsigned int device, struct bios_pci_dev *from) +{ + if (!from) + from = pci_devices; + else + from = from->next; + while (from && (from->vendor != vendor || from->device != device)) + from = from->next; + return from; +} + +/** Find a device of a given class + * @param class Class of the device + * @param from Pointer to the device structure, used as a starting point + * in the linked list of devices, which can be 0 to start at the + * head of the list (i.e. pci_devices) + * @return Pointer to the device struct + */ +struct bios_pci_dev *pci_find_class(unsigned int class, + struct bios_pci_dev *from) +{ + if (!from) + from = pci_devices; + else + from = from->next; + while (from && from->class != class) + from = from->next; + return from; +} + +/** Given a device, set the PCI_COMMAND_MASTER bit in the command register + * @param dev Pointer to the device structure + */ +void bios_pci_set_master(struct bios_pci_dev *dev) +{ + u16 cmd; + u8 lat; + + bios_pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + printk_debug("PCI: Enabling bus mastering for device %02x:%02x\n", + dev->bus->number, dev->devfn); + cmd |= PCI_COMMAND_MASTER; + bios_pci_write_config_word(dev, PCI_COMMAND, cmd); + } + bios_pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); + if (lat < 16) { + printk_debug("PCI: Increasing latency timer of device %02x:%02x to 64\n", + dev->bus->number, dev->devfn); + bios_pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); + } +} + +/** Given a device and register, read the size of the BAR for that register. + * @param dev Pointer to the device structure + * @param reg Which register to use + * @param addr Address to load into the register after size is found + */ +void pci_get_size(struct bios_pci_dev *dev, unsigned long reg, + unsigned long addr) +{ + u32 size; + unsigned long type; + + /* FIXME: more consideration for 64-bit PCI devices */ + // get the size + bios_pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + (reg << 2), ~0); + bios_pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (reg << 2), + &size); + + // restore addr + bios_pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + (reg << 2), + addr); + + // some broken hardware has read-only registers that do not + // really size correctly. You can tell this if addr == size + // Example: the acer m7229 has BARs 1-4 normally read-only. + // so BAR1 at offset 0x10 reads 0x1f1. If you size that register + // by writing 0xffffffff to it, it will read back as 0x1f1 -- a + // violation of the spec. + // We catch this case and ignore it by settting size and type to 0. + // This incidentally catches the common case where registers + // read back as 0 for both address and size. + +#if 0 /* DON'T WORk on E2K */ + if (addr == size) { + printk_debug( + "pci_get_size: dev_fn 0x%x, register %d, read-only" + " SO, ignoring it\n", + dev->devfn, reg); + printk_debug("addr was 0x%x, size was 0x%x\n",addr,size); + type = 0; + size = 0; + } + // Now compute the actual size, See PCI Spec 6.2.5.1 ... + else +#endif + if (size & PCI_BASE_ADDRESS_SPACE_IO) { + type = size & (~PCI_BASE_ADDRESS_IO_MASK); + size &= (PCI_BASE_ADDRESS_IO_MASK); + // BUG! Top 16 bits can be zero (or not) + // So set them to 0xffff so they go away ... + size |= 0xffff0000; + size = ~size; + size++; + } else { + type = size & (~PCI_BASE_ADDRESS_MEM_MASK); + size &= (PCI_BASE_ADDRESS_MEM_MASK); + size = ~size; + size++; + } + dev->size[reg] = size | type; + Dprintk("BAR%d = %x (size %x, type %x)\n", + reg, size | type, size, type); +} + +/** Read the base address registers for a given device. + * @param dev Pointer to the dev structure + * @param howmany How many registers to read (6 for device, 2 for bridge) + */ +void pci_read_bases(struct bios_pci_dev *dev, unsigned int howmany) +{ + unsigned int reg; + u32 /* unsigned long for 64 bits ?? */ addr; + + /* FIXME: to deal with 64-bits PCI */ + Dprintk("pci_read_bases bus 0x%x, devfn 0x%x\n", + dev->bus->number, dev->devfn); + for (reg = 0; reg < howmany; reg++) { + bios_pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (reg << 2), + &addr); + if (addr == 0xffffffff) + continue; + + /* get address space size */ + pci_get_size(dev, reg, addr); + + addr &= (PCI_BASE_ADDRESS_SPACE | + PCI_BASE_ADDRESS_MEM_TYPE_MASK); + if (addr == (PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64)) { + printk_debug("reg %d is 64-bit\n", reg); + /* this is a 64-bit memory base address */ + reg++; + bios_pci_read_config_dword(dev, + PCI_BASE_ADDRESS_0 + (reg << 2), &addr); + if (addr) { +#if BITS_PER_LONG == 64 + dev->base_address[reg - 1] |= + ((unsigned long) addr) << 32; +#else + printk_err("PCI: Unable to handle 64-bit " + "address for device %02x:%02x\n", + dev->bus->number, dev->devfn); + dev->base_address[reg - 1] = 0; +#endif + } + } + } +} + +/* + * Find the extent of a PCI decode.. + */ +static unsigned int pci_size(unsigned int base, unsigned int maxbase, unsigned long mask) +{ + unsigned int size = mask & maxbase; /* Find the significant bits */ + if (!size) + return 0; + +// Dprintk("pci_size: base = %x maxbase = %x\n", base, maxbase); + + /* Get the lowest of them to find the decode size, and + from that the extent. */ + size = (size & ~(size-1)) ; /* - 1; NEEDSWORK: Linar */ + + /* base == maxbase can be valid only if the BAR has + already been programmed with all 1s. */ + if (base == maxbase && ((base | size) & mask) != mask) + return 0; + + return size; +} + +#ifdef CONFIG_E2K_SIC +/* That means the level of buses hierarchy. The 0 level means the main bus called +* the pci_root. The main bus may has several subbuses due to the CPU amount. +* Each system on CPU has its own PCI2PCI bridge that serves the link between +* the main bus pci_root and other devices chiped in that system on CPU. So each +* system has its own configuration space. */ +int level = -1; +#endif + +/** Scan the bus, first for bridges and next for devices. + * @param bios_pci_bus pointer to the bus structure + * @return The maximum bus number found, after scanning all subordinate busses + */ +static unsigned int bios_pci_scan_bus(struct bios_pci_bus *bus) +{ + unsigned int devfn, max; + struct bios_pci_dev *dev, **bus_last; + struct bios_pci_bus *child; + int domain = bios_pci_domain_nr(bus); +#if 0 + unsigned int msg_st[2], msg_end[2]; + unsigned long start, end; +#endif +#ifdef CONFIG_E2K_SIC + /* Each time we enter the bios_pci_scan_bus function we must to + * encrease the bus hierarchy level */ + level++; + Dprintk("PCI #%d: bios_pci_scan_bus enter for level %d\n", + domain, level); +#endif + Dprintk("PCI #%d: bios_pci_scan_bus for bus %d\n", + domain, bus->number); + + bus_last = &bus->devices; + max = bus->secondary; + + /* probe all devices on this bus with some optimization for non-existance and + single funcion devices */ + for (devfn = 0; devfn < 0xff; devfn++) { + u32 id, class, addr, size; + u8 cmd, tmp, hdr_type; + u16 subsystem; +#if 0 + u32 tmphdr; +#endif /* 0 */ + // gcc just went to hell. Don't test -- this always + // returns 0 anyway. +#if GCC_WORKS_ON_O2 + if (pcibios_read_config_dword(domain, bus->number, devfn, PCI_VENDOR_ID, &id)) { + printk_spew("PCI #%d: devfn 0x%x, read_config_dword fails\n", + domain, devfn); + continue; + } +#endif + pcibios_read_config_dword(domain, bus->number, devfn, PCI_VENDOR_ID, &id); + + /* some broken boards return 0 if a slot is empty: */ + if (id == 0xffffffff || id == 0x00000000 || id == 0x0000ffff || id == 0xffff0000) { + VDprintk("PCI #%d: devfn 0x%x, bad id 0x%x\n", + domain, devfn, id); + if (PCI_FUNC(devfn) == 0x00) { + /* if this is a function 0 device and it is not present, + skip to next device */ + devfn += 0x07; + } + /* multi function device, skip to next function */ + continue; + } + if (pcibios_read_config_byte(domain, bus->number, devfn, PCI_HEADER_TYPE, &hdr_type)){ + Dprintk("PCI #%d: devfn 0x%x, header type read fails\n", + domain, devfn); + continue; + } + if (pcibios_read_config_dword(domain, bus->number, devfn, PCI_CLASS_REVISION, &class)) { + Dprintk("PCI #%d: devfn 0x%x, class read fails\n", + domain, devfn); + continue; + } + if (pcibios_read_config_word(domain, bus->number, devfn, PCI_SUBSYSTEM_ID, &subsystem)){ + Dprintk("PCI #%d: devfn 0x%x, subsystem id read fails\n", + domain, devfn); + continue; + } + + if ((dev = malloc(sizeof(*dev))) == 0) { + printk_err("PCI: out of memory.\n"); + continue; + } + + memset(dev, 0, sizeof(*dev)); + dev->bus = bus; + dev->devfn = devfn; + dev->vendor = id & 0xffff; + dev->device = (id >> 16) & 0xffff; + dev->hdr_type = hdr_type; + dev->revision = (unsigned char) class & 0xff; + /* class code, the upper 3 bytes of PCI_CLASS_REVISION */ + dev->class = class >> 8; + class >>= 16; + dev->subsys_id = subsystem ; + + /* non-destructively determine if device can be a master: */ + pcibios_read_config_byte(domain, bus->number, devfn, + PCI_COMMAND, &cmd); + pcibios_write_config_byte(domain, bus->number, devfn, + PCI_COMMAND, + cmd | PCI_COMMAND_MASTER); + pcibios_read_config_byte(domain, bus->number, devfn, + PCI_COMMAND, &tmp); + dev->master = ((tmp & PCI_COMMAND_MASTER) != 0); + pcibios_read_config_byte(domain, bus->number, devfn, + PCI_COMMAND, &cmd); + Dprintk("PCI %d:%d:%d:%d CMD %02x\n", + domain, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), + cmd); + + switch (hdr_type & 0x7f) { /* header type */ + case PCI_HEADER_TYPE_NORMAL: /* standard header */ + Dprintk("PCI #%d: detected header type PCI_HEADER_TYPE_NORMAL\n", + domain); + if (class == PCI_CLASS_BRIDGE_PCI) + goto bad; + /* read base address registers, again pci_fixup() can tweak these */ + pci_read_bases(dev, 6); + pcibios_read_config_dword(domain, bus->number, devfn, PCI_ROM_ADDRESS, &addr); + pcibios_write_config_dword(domain, bus->number, devfn, PCI_ROM_ADDRESS, ~PCI_ROM_ADDRESS_ENABLE); + pcibios_read_config_dword(domain, bus->number, devfn, PCI_ROM_ADDRESS, &size); + pcibios_write_config_dword(domain, bus->number, devfn, PCI_ROM_ADDRESS, addr); + if (addr == 0xffffffff) + addr = 0; + if (size && size != 0xffffffff) { + size = pci_size(addr, size, PCI_ROM_ADDRESS_MASK); + if (size) { + dev->rom_address = addr; + dev->rom_address &= PCI_ROM_ADDRESS_MASK; + dev->rom_size = size; + } + } + + break; + case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ + Dprintk("PCI #%d: detected header type PCI_HEADER_TYPE_BRIDGE\n", + domain); + if (class != PCI_CLASS_BRIDGE_PCI) + goto bad; + pci_read_bases(dev, 2); + pcibios_read_config_dword(domain, bus->number, devfn, PCI_ROM_ADDRESS1, &addr); + dev->rom_address = (addr == 0xffffffff) ? 0 : addr; + break; + case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ + Dprintk("PCI #%d: detected header type PCI_HEADER_TYPE_CARDBUS\n", + domain); + if (class != PCI_CLASS_BRIDGE_CARDBUS) + goto bad; + pci_read_bases(dev, 1); + break; + default: /* unknown header */ + bad: + printk_err("PCI: %02x:%02x [%04x/%04x/%06x] has unknown header " + "type %02x, ignoring.\n", + bus->number, dev->devfn, dev->vendor, dev->device, class, + hdr_type); + continue; + } + + Dprintk("PCI #%d: %02x:%02x [%04x/%04x]\n", + domain, bus->number, dev->devfn, + dev->vendor, dev->device); + + /* Put it into the global PCI device chain. It's used to find devices once + everything is set up. */ + if (!pci_reverse) { + *pci_last_dev_p = dev; + pci_last_dev_p = &dev->next; + } else { + dev->next = pci_devices; + pci_devices = dev; + } + + /* Now insert it into the list of devices held by the parent bus. */ + *bus_last = dev; + bus_last = &dev->sibling; + + if (PCI_FUNC(devfn) == 0x00 && (hdr_type & 0x80) != 0x80) { + /* if this is not a multi function device, don't waste time probe + another function. Skip to next device. */ + devfn += 0x07; + } + } + + /* + * The fixup code may have just found some peer pci bridges on this + * machine. Update the max variable if that happened so we don't + * get duplicate bus numbers. + */ + for (dev = bus->devices; dev; dev = dev->sibling) + /* If it's a bridge, scan the bus behind it. */ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + u32 buses; + unsigned int devfn = dev->devfn; + unsigned short cr; +#define NOTUSED +#ifdef NOTUSED + /* + * Check for a duplicate bus. If we already scanned + * this bus number as a peer bus, don't also scan it + * as a child bus + */ + if (((dev->vendor == PCI_VENDOR_ID_SERVERWORKS) && + ((dev->device == PCI_DEVICE_ID_SERVERWORKS_HE) || + (dev->device == PCI_DEVICE_ID_SERVERWORKS_LE))) || + ((dev->vendor == PCI_VENDOR_ID_INTEL) && + ((dev->device == PCI_DEVICE_ID_INTEL_82454NX)|| + (dev->device == PCI_DEVICE_ID_INTEL_82451NX)))) + continue; + + /* Read the existing primary/secondary/subordinate bus number + configuration to determine if the PCI bridge has already been + configured by the system. If so, check to see if we've already + scanned this bus as a result of peer bus scanning, if so, skip this. + FIMXE: We are BIOS, is there anyone else doing this dirty job BEFORE us ?? */ + pcibios_read_config_dword(domain, bus->number, devfn, PCI_PRIMARY_BUS, &buses); + if ((buses & 0xFFFFFF) != 0) { + for (child = pci_root[domain].next; child; child = child->next) + if (child->number == ((buses >> 8) & 0xff)) + goto skip_it; + } +#endif + /* Insert it into the tree of buses. */ + if ((child = malloc(sizeof(*child))) == 0) { + printk_err("PCI: out of memory for bridge.\n"); + continue; + } + memset(child, 0, sizeof(*child)); + child->next = bus->children; + bus->children = child; + child->self = dev; + child->parent = bus; + + /* Set up the primary, secondary and subordinate bus numbers. We have + no idea how many buses are behind this bridge yet, so we set the + subordinate bus number to 0xff for the moment */ + bios_set_pci_domain_nr(child, domain); + child->number = child->secondary = ++max; + child->primary = bus->secondary; + child->subordinate = 0xff; +#ifdef CONFIG_E2K_SIC + /* you are programming the main bus bridges when + * the level is 0 + * FIXME must be reconstructed using NSR number + * FIXME now for 1 iohub only + */ + if (child->number >= 255) { + Dprintk("bios_pci_scan_bus: too large amount " + "of bridges,encrease the option " + "please!!!\n"); + break; + } +#endif + /* Clear all status bits and turn off memory, I/O and master enables. */ + pcibios_read_config_word(domain, bus->number, devfn, PCI_COMMAND, &cr); + pcibios_write_config_word(domain, bus->number, devfn, PCI_COMMAND, 0x0000); + pcibios_write_config_word(domain, bus->number, devfn, PCI_STATUS, 0xffff); + + /* + * Read the existing primary/secondary/subordinate bus + * number configuration to determine if the PCI bridge + * has already been configured by the system. If so, + * do not modify the configuration, merely note it. + */ + pcibios_read_config_dword(domain, bus->number, devfn, PCI_PRIMARY_BUS, &buses); + +#ifdef BRIDGE_CONFIGURED_AT_POWERUP + // There is some hardware (ALPHA) that configures bridges in hardware, at bootup. + // We need to take that into account at some point. + // At the same time, we're finding buggy bridge hardware that comes up + // with these registers non-zero (VIA VT8601). Hence this #ifdef -- in some cases, + // you should never check the buses; in other cases, you have no choice. + if ((buses & 0xFFFFFF) != 0) { + unsigned int cmax; + + child->primary = buses & 0xFF; + child->secondary = (buses >> 8) & 0xFF; + child->subordinate = (buses >> 16) & 0xFF; + child->number = child->secondary; + cmax = bios_pci_scan_bus(child); + if (cmax > max) + max = cmax; + } else +#endif + { + /* Configure the bus numbers for this bridge: the configuration + transactions will not be propagated by the bridge if it is not + correctly configured */ + buses &= 0xff000000; + buses |= (((unsigned int) (child->primary) << 0) | + ((unsigned int) (child->secondary) << 8) | + ((unsigned int) (child->subordinate) << 16)); + pcibios_write_config_dword(domain, bus->number, devfn, + PCI_PRIMARY_BUS, buses); +#ifdef CONFIG_E2K_SIC +#ifndef CONFIG_L_IOH2 + /* Here we need to setup system commutator register for PCI bridges + * (PCI Bridge Bus Number Reg - 0x18 - 0x1b ) that is in accordance with + * that of current bridge. We are interested only Subordinate Bus Number + * and Secondary Bus Number fields so it is useless to write Primary. + * According to iset manual aren't required for virtual PCI_2_PCI on + * bus 0 */ + if ((dev->device != + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE) && + (dev->device != + PCI_DEVICE_ID_MCST_PCIE_BRIDGE)) { + Dprintk("PCI #%d: bios_pci_scan_bus: " + "setup iohub for buses\n", + domain); + system_commutator_es2_ioh_write_dword( + domain, bus->number, B1_BN, + buses); + } else if (dev->device == + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE) { + unsigned long scrb_base; + unsigned char iohub_num; + + pcibios_read_config_byte(domain, + bus->number, devfn, + IOHUB_DevNum, &iohub_num); + Dprintk("PCI #%d: bios_pci_scan_bus: " + "IOHUB.DevNum = 0x%x\n", + domain, iohub_num); + scrb_base = + IOHUB_SCRB_DOMAIN_START(domain); + /* Setup SCBA_0, SCBA_1 seems to be 0. + * If E2K_SCRB_PHYS_BASE has + * more then 32 bits you should setup + * SCBA_1 register. Its only for + * virtual PCI_2_PCI BRIDGE + */ + Dprintk("PCI #%d: bios_pci_scan_bus: " + "setup for SCRB table ... " + "virtual PCI_2_PCI on bus 0x%x " + "slot %d func %d\n", + domain, bus->number, + PCI_SLOT(devfn), + PCI_FUNC(devfn)); + pcibios_write_config_dword(domain, + bus->number, devfn, PCI_SCBA_0, + (scrb_base | 0x1)); + Dprintk("PCI #%d: bios_pci_scan_bus: " + "SCBA_0 = 0x%x\n", + domain, (scrb_base | 0x1)); + } +#endif /* ! CONFIG_L_IOH2 */ +#endif + /* Now we can scan all subordinate buses i.e. the bus hehind the bridge */ + max = bios_pci_scan_bus(child); + + /* We know the number of buses behind this + * bridge. Set the subordinate + * bus number to its real value + */ + child->subordinate = max; + + buses = (buses & 0xff00ffff) | + ((unsigned int) (child->subordinate) << + 16); + pcibios_write_config_dword(domain, bus->number, + devfn, PCI_PRIMARY_BUS, buses); +#ifdef CONFIG_E2K_SIC +#ifndef CONFIG_L_IOH2 + if ((dev->device != + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE) && + (dev->device != + PCI_DEVICE_ID_MCST_PCIE_BRIDGE)) { + system_commutator_es2_ioh_write_dword( + domain, bus->number, B1_BN, + buses); + } +#endif /* ! CONFIG_L_IOH2 */ +#endif + Dprintk("PCI #%d: bios_pci_scan_bus: found " + "Bridge, primary = %d, number = %d, " + "subordinate = %d\n", + domain, child->primary, child->number, + child->subordinate); + } + + pcibios_write_config_word(domain, bus->number, devfn, + PCI_COMMAND, cr); +skip_it: + ; + } + /* + * We've scanned the bus and so we know all about what's on + * the other side of any bridges that may be on this bus plus + * any devices. + * + * Return how far we've got finding sub-buses. + */ + + Dprintk("PCI #%d: bios_pci_scan_bus returning with max=%02x\n", + domain, max); +#ifdef CONFIG_E2K_SIC + /* Each time we leave bios_pci_scan_bus function we must to decrease + * the bus hierarchy level */ + level--; +#endif + return max; +} + +/** Initialize pci root struct, then scan starting at the root. + * Note that this function will recurse at each bridge. + */ +struct bios_pci_bus *pci_init(domain) +{ + struct bios_pci_bus *root_bus; + + root_bus = &pci_root[pci_root_num]; + memset(root_bus, 0, sizeof(*root_bus)); + bios_set_pci_domain_nr(root_bus, domain); + set_iohub_dev_num(domain); + pci_root->subordinate = bios_pci_scan_bus(root_bus); + pci_root_num ++; + return (root_bus); +} diff --git a/arch/e2k/boot/bios/mc146818rtc.h b/arch/e2k/boot/bios/mc146818rtc.h new file mode 100644 index 000000000000..e77995eb215b --- /dev/null +++ b/arch/e2k/boot/bios/mc146818rtc.h @@ -0,0 +1,194 @@ +/* + * $Id: mc146818rtc.h,v 1.7 2006/11/10 15:39:48 kostin Exp $ + */ + +#ifndef _MCRTC_ +#define _MCRTC_ + +#include "../boot_io.h" + +#define RTC_BASE_PORT 0x70 + +#define RTC_PORT(x) (RTC_BASE_PORT + (x)) + +/* On PCs, the checksum is built only over bytes 16..45 */ +#define PC_CKS_RANGE_START 16 +#define PC_CKS_RANGE_END 45 +#define PC_CKS_LOC 46 + + +/* Linux bios checksum is built only over bytes 49..125 */ +#define LB_CKS_RANGE_START 49 +#define LB_CKS_RANGE_END 125 +#define LB_CKS_LOC 126 + +#define CMOS_READ(addr) ({ \ +bios_outb((addr), RTC_PORT(0)); \ +bios_inb(RTC_PORT(1)); \ +}) + +#define CMOS_WRITE(val, addr) ({ \ +bios_outb((addr), RTC_PORT(0)); \ +bios_outb((val), RTC_PORT(1)); \ +}) + +/* control registers - Moto names + */ +#define RTC_REG_A 10 +#define RTC_REG_B 11 +#define RTC_REG_C 12 +#define RTC_REG_D 13 + + +/********************************************************************** + * register details + **********************************************************************/ +#define RTC_FREQ_SELECT RTC_REG_A + +/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, + * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, + * totalling to a max high interval of 2.228 ms. + */ +# define RTC_UIP 0x80 +# define RTC_DIV_CTL 0x70 + /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ +# define RTC_REF_CLCK_4MHZ 0x00 +# define RTC_REF_CLCK_1MHZ 0x10 +# define RTC_REF_CLCK_32KHZ 0x20 + /* 2 values for divider stage reset, others for "testing purposes only" */ +# define RTC_DIV_RESET1 0x60 +# define RTC_DIV_RESET2 0x70 + /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ +# define RTC_RATE_SELECT 0x0F +# define RTC_RATE_NONE 0x00 +# define RTC_RATE_32786HZ 0x01 +# define RTC_RATE_16384HZ 0x02 +# define RTC_RATE_8192HZ 0x03 +# define RTC_RATE_4096HZ 0x04 +# define RTC_RATE_2048HZ 0x05 +# define RTC_RATE_1024HZ 0x06 +# define RTC_RATE_512HZ 0x07 +# define RTC_RATE_256HZ 0x08 +# define RTC_RATE_128HZ 0x09 +# define RTC_RATE_64HZ 0x0a +# define RTC_RATE_32HZ 0x0b +# define RTC_RATE_16HZ 0x0c +# define RTC_RATE_8HZ 0x0d +# define RTC_RATE_4HZ 0x0e +# define RTC_RATE_2HZ 0x0f + +/**********************************************************************/ +#define RTC_CONTROL RTC_REG_B +# define RTC_SET 0x80 /* disable updates for clock setting */ +# define RTC_PIE 0x40 /* periodic interrupt enable */ +# define RTC_AIE 0x20 /* alarm interrupt enable */ +# define RTC_UIE 0x10 /* update-finished interrupt enable */ +# define RTC_SQWE 0x08 /* enable square-wave output */ +# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + +/**********************************************************************/ +#define RTC_INTR_FLAGS RTC_REG_C +/* caution - cleared by read */ +# define RTC_IRQF 0x80 /* any of the following 3 is active */ +# define RTC_PF 0x40 +# define RTC_AF 0x20 +# define RTC_UF 0x10 + +/**********************************************************************/ +#define RTC_VALID RTC_REG_D +# define RTC_VRT 0x80 /* valid RAM and time */ +/**********************************************************************/ + +#if 0 +static int rtc_checksum_valid(int range_start, int range_end, int cks_loc) +{ + int i; + unsigned sum, old_sum; + sum = 0; + for(i = range_start; i <= range_end; i++) { + sum += CMOS_READ(i); + } + sum = (~sum)&0x0ffff; + old_sum = ((CMOS_READ(cks_loc)<<8) | CMOS_READ(cks_loc+1))&0x0ffff; + return sum == old_sum; +} + +static void rtc_set_checksum(int range_start, int range_end, int cks_loc) +{ + int i; + unsigned sum; + sum = 0; + for(i = range_start; i <= range_end; i++) { + sum += CMOS_READ(i); + } + sum = ~(sum & 0x0ffff); + CMOS_WRITE(((sum >> 8) & 0x0ff), cks_loc); + CMOS_WRITE(((sum >> 0) & 0x0ff), cks_loc+1); +} +#endif + +#define RTC_CONTROL_DEFAULT (RTC_24H) +#define RTC_FREQ_SELECT_DEFAULT (RTC_REF_CLCK_32KHZ | RTC_RATE_1024HZ) + +static inline void rtc_init(int invalid) +{ +// unsigned char x; +// int cmos_invalid, checksum_invalid; + + rom_printk("RTC Init\n"); +#if 0 + /* See if there has been a CMOS power problem. */ + x = CMOS_READ(RTC_VALID); + cmos_invalid = !(x & RTC_VRT); + + /* See if there is a CMOS checksum error */ + checksum_invalid = !rtc_checksum_valid(PC_CKS_RANGE_START, + PC_CKS_RANGE_END,PC_CKS_LOC); + + if (invalid || cmos_invalid || checksum_invalid) { +// int i; + rom_printk("RTC:%s%s%s zeroing cmos\n", + invalid?" Clear requested":"", + cmos_invalid?" Power Problem":"", + checksum_invalid?" Checksum invalid":""); + CMOS_WRITE(0, 0x01); + CMOS_WRITE(0, 0x03); + CMOS_WRITE(0, 0x05); + for(i = 10; i < 48; i++) { + CMOS_WRITE(0, i); + } + + if (cmos_invalid) { + /* Now setup a default date of Sat 1 January 2000 */ + CMOS_WRITE(0, 0x00); /* seconds */ + CMOS_WRITE(0, 0x02); /* minutes */ + CMOS_WRITE(1, 0x04); /* hours */ + CMOS_WRITE(7, 0x06); /* day of week */ + CMOS_WRITE(1, 0x07); /* day of month */ + CMOS_WRITE(1, 0x08); /* month */ + CMOS_WRITE(0, 0x09); /* year */ + } + } + /* See if there is a LB CMOS checksum error */ + checksum_invalid = !rtc_checksum_valid(LB_CKS_RANGE_START, + LB_CKS_RANGE_END,LB_CKS_LOC); + if(checksum_invalid) + rom_printk("Invalid CMOS LB checksum\n"); + +#endif + /* Setup the real time clock */ + CMOS_WRITE(RTC_CONTROL_DEFAULT, RTC_CONTROL); + /* Setup the frequency it operates at */ + CMOS_WRITE(RTC_FREQ_SELECT_DEFAULT, RTC_FREQ_SELECT); + /* Make certain we have a valid checksum */ +#if 0 + rtc_set_checksum(PC_CKS_RANGE_START, + PC_CKS_RANGE_END,PC_CKS_LOC); + /* Clear any pending interrupts */ + (void) CMOS_READ(RTC_INTR_FLAGS); +#endif +} + +#endif diff --git a/arch/e2k/boot/bios/mga.c b/arch/e2k/boot/bios/mga.c new file mode 100644 index 000000000000..2d80540c9277 --- /dev/null +++ b/arch/e2k/boot/bios/mga.c @@ -0,0 +1,558 @@ + +#include +#include + +#include +#include +#include "pci_isa_config.h" +#include "ide_config.h" + +#include "southbridge.h" +#include "pci.h" +#include "mga.h" +#ifdef CONFIG_E2K_LEGACY_SIC +#include +#endif /* CONFIG_E2K_LEGACY_SIC */ + +#undef DEBUG_MGA_MODE +#undef DebugMGA +#define DEBUG_MGA_MODE 0 +#define DebugMGA if (DEBUG_MGA_MODE) rom_printk + +#undef TRACE_MSG +#define DBG_MODE 0 +#define DEBUG_MSG if (DBG_MODE) rom_printk + +typedef struct { + int div; // [6:0] Linear output divider + + int q; // [7:0] PPL*_Q + int p; // [9:0] PPL*_P + int po; // [0:0] PPL_PO + + int pixclock; +} clk_t; + +clk_t __calc( int pixclock ) +{ + clk_t res; + DEBUG_MSG("__calc start\n"); + res.pixclock = 39721; + res.div = 0x2; + res.q = 0x95; + res.p = 0x106; + res.po = 0x1; + DEBUG_MSG("__calc finish\n"); + DEBUG_MSG( "Calulated: pixclock %d div %x q %x p %x po %x\n", res.pixclock, res.div, res.q, res.p, res.po ); + + return res; +} + +static inline void mga_write(unsigned long v, unsigned long reg) +{ + NATIVE_WRITE_MAS_W(reg, v, MAS_IOADDR); +} + +static inline unsigned long mga_read(unsigned long reg) +{ + return NATIVE_READ_MAS_W(reg, MAS_IOADDR); +} + +static inline void i2c_write(unsigned long i2c_vbase, unsigned long reg, uint8_t val ) +{ +#ifdef MGA_TRACE + uint32_t rdval; +#endif + DEBUG_MSG( " i2c_write: I2C[0x%03lx] <= 0x%02x\n", reg, val ); + mga_write( val, ((unsigned long)i2c_vbase + reg)); +#ifdef MGA_TRACE + rdval = mga_read(((unsigned long)i2c_vbase + reg)); + TRACE_MSG( " i2c_write: I2C[0x%03lx] => 0x%02x\n", reg, rdval ); +#endif +} + +static inline uint8_t i2c_read(unsigned long i2c_vbase, unsigned long reg ) +{ + uint32_t result = 0; + result = mga_read(((unsigned long)i2c_vbase + reg) ); + DEBUG_MSG( " i2c_read: I2C[0x%03lx] => 0x%02x\n", reg, result ); + return result; +} + +static void i2c_send(unsigned long i2c_vbase, int cmd, int data ) +{ +#if 0 + unsigned char status; +#endif + if (cmd & I2C_CR_WR) + i2c_write(i2c_vbase, I2C_REG_TXR, data ); + + i2c_write(i2c_vbase, I2C_REG_CR, cmd ); + +#if 0 + while ( ( status = i2c_read(i2c_vbase, I2C_REG_SR ) & I2C_SR_TIP ) ) { +// mdelay(1); + DEBUG_MSG( "waiting 1 msec...\n" ); + } +#endif +} + + +static int ramdac_write(unsigned long i2c_vbase, unsigned long ramdac_reg, uint8_t val ) +{ + // Sending RAMDAC device address + i2c_send(i2c_vbase, I2C_CR_STA | I2C_CR_WR, (I2C_RAMDAC_ADDR << 1) & I2C_WRITE_OP); + if ( i2c_read(i2c_vbase, I2C_REG_SR ) & I2C_SR_RxACK) { + DEBUG_MSG( "RAMDAC[0x%02lx] <= 0x%02x\t[FAILED]", ramdac_reg, val ); + return -1; + } + + // Sending RAMDAC register address + i2c_send(i2c_vbase, I2C_CR_WR, ramdac_reg ); + if ( i2c_read(i2c_vbase, I2C_REG_SR ) & I2C_SR_RxACK) { + DEBUG_MSG( "RAMDAC[0x%02lx] <= 0x%02x\t[FAILED]", ramdac_reg, val ); + return -1; + } + + // Sending RAMDAC register data + i2c_send(i2c_vbase, I2C_CR_STO | I2C_CR_WR, val); + if ( i2c_read(i2c_vbase, I2C_REG_SR ) & I2C_SR_RxACK) { + DEBUG_MSG( "RAMDAC[0x%02lx] <= 0x%02x\t[FAILED]", ramdac_reg, val ); + return -1; + } + + return 0; +} + + +static uint8_t ramdac_read(unsigned long i2c_vbase, unsigned long ramdac_reg ) +{ + uint8_t val = 0; + + // Sending RAMDAC device address + i2c_send(i2c_vbase, I2C_CR_STA | I2C_CR_WR, (I2C_RAMDAC_ADDR << 1) & I2C_WRITE_OP); + if ( i2c_read(i2c_vbase, I2C_REG_SR ) & I2C_SR_RxACK) { + DEBUG_MSG( "RAMDAC[0x%02lx] => ????\t[FAILED]", ramdac_reg ); + return -1; + } + + // Sending RAMDAC register address + i2c_send(i2c_vbase, I2C_CR_WR, ramdac_reg ); + if ( i2c_read(i2c_vbase, I2C_REG_SR ) & I2C_SR_RxACK) { + DEBUG_MSG( "RAMDAC[0x%02lx] => ????\t[FAILED]", ramdac_reg ); + return -1; + } + + // Sending RAMDAC device address + i2c_send(i2c_vbase, I2C_CR_STA | I2C_CR_WR, (I2C_RAMDAC_ADDR << 1) | I2C_READ_OP); + if ( i2c_read(i2c_vbase, I2C_REG_SR ) & I2C_SR_RxACK) { + DEBUG_MSG( "RAMDAC[0x%02lx] => ????\t[FAILED]", ramdac_reg ); + return -1; + } + + // Sending RAMDAC register data + i2c_send(i2c_vbase, I2C_CR_STO | I2C_CR_RD | I2C_CR_NACK, 0); + + val = i2c_read(i2c_vbase, I2C_REG_RXR ); + + return val; +} + +static void set_prescaler(unsigned long i2c_vbase, int value) +{ + DEBUG_MSG("set_prescaler start\n"); + i2c_write(i2c_vbase, I2C_REG_PRER_LO, value & 0xFF ); + i2c_write(i2c_vbase, I2C_REG_PRER_HI, (value >> 8) & 0xFF ); + DEBUG_MSG("set_prescaler finish\n"); +} + +static void __set_clk_fs(unsigned long i2c_vbase, uint8_t a, uint8_t b, uint8_t c ) +{ + uint8_t d = FS_REF; + + DEBUG_MSG("__set_clk_fs start\n"); + // ClkA_FS[2:0] + ramdac_write(i2c_vbase, 0x08, ( ramdac_read(i2c_vbase, 0x08 ) & 0x7F ) | ( ( a & 0x01 ) << 7 ) ); + ramdac_write(i2c_vbase, 0x0E, ( ramdac_read(i2c_vbase, 0x0E ) & 0xFC ) | ( ( a & 0x06 ) >> 1 ) ); + // ClkB_FS[2:0] + ramdac_write(i2c_vbase, 0x0A, ( ramdac_read(i2c_vbase, 0x0A ) & 0x7F ) | ( ( b & 0x01 ) << 7 ) ); + ramdac_write(i2c_vbase, 0x0E, ( ramdac_read(i2c_vbase, 0x0E ) & 0xF3 ) | ( ( b & 0x06 ) << 1 ) ); + // ClkC_FS[2:0] + ramdac_write(i2c_vbase, 0x0C, ( ramdac_read(i2c_vbase, 0x0C ) & 0x7F ) | ( ( c & 0x01 ) << 7 ) ); + ramdac_write(i2c_vbase, 0x0E, ( ramdac_read(i2c_vbase, 0x0E ) & 0xCF ) | ( ( c & 0x06 ) << 3 ) ); + // ClkD_FS[2:0] + ramdac_write(i2c_vbase, 0x0D, ( ramdac_read(i2c_vbase, 0x0D ) & 0x7F ) | ( ( d & 0x01 ) << 7 ) ); + ramdac_write(i2c_vbase, 0x0E, ( ramdac_read(i2c_vbase, 0x0E ) & 0x3F ) | ( ( d & 0x06 ) << 5 ) ); + DEBUG_MSG("__set_clk_fs finish\n"); +} + +static void __set_ppl(unsigned long i2c_vbase, int index, uint8_t Q, uint16_t P, uint8_t PO ) +{ + unsigned long base; + + switch( index ) { + case 2 : + base = 0x11; + break; + case 3 : + base = 0x14; + break; + default : + rom_printk( "Invalid PPL index %d\n", index ); + return; + } + DEBUG_MSG("__set_ppl start\n"); + // PPL*_Q[7:0] + ramdac_write(i2c_vbase, base + 0, Q ); + + // PPL*_P[7:0] + ramdac_write(i2c_vbase, base + 1, P & 0xFF ); + { + uint8_t val; + uint8_t LF = 0x0; + + int P_T = ( 2 * ( (P & 0x3FF) + 3 ) ) + (PO & 0x01); + + if ( P_T <= 231 ) + LF = 0x0; + else if ( P_T <= 626 ) + LF = 0x1; + else if ( P_T <= 834 ) + LF = 0x2; + else if ( P_T <= 1043 ) + LF = 0x3; + else if ( P_T <= 1600 ) + LF = 0x4; + + + // PPL*_En, PPL*_LF, PPL*_PO, PPL*_P[9:8] + val = ( P & 0x300 ) >> 8; + val |= ( PO & 0x1 ) << 2; + val |= LF << 3; + //val |= (enabled & 0x01) << 6; + + ramdac_write(i2c_vbase, base + 2, val ); + } + DEBUG_MSG("__set_ppl finish\n"); +} + + +static void __set_enabled(unsigned long i2c_vbase, int index, uint8_t enabled ) +{ + unsigned long base; + uint8_t val; + + switch( index ) { + case 2 : + base = 0x11; + break; + case 3 : + base = 0x14; + break; + default : + rom_printk( "Invalid PPL index %d\n", index ); + return; + } + + DEBUG_MSG("__set_enabled start\n"); + val = ramdac_read(i2c_vbase, base + 2 ); + val = val & (~(0x01 << 6)); + val |= (enabled & 0x01) << 6; + ramdac_write(i2c_vbase, base + 2, val ); + DEBUG_MSG("__set_enabled finish\n"); +} + +void __set_pixclock( unsigned long i2c_vbase, uint32_t pixclock ) +{ + clk_t vidclk = __calc( pixclock ); + + set_prescaler(i2c_vbase, NORMAL_SCL ); + + // Enable I2C core + i2c_write(i2c_vbase, I2C_REG_CTR, I2C_CTR_EN ); + + ramdac_write(i2c_vbase, 0x08, 0x0 ); + + ramdac_write(i2c_vbase, 0x0C, 0x0 ); + __set_clk_fs(i2c_vbase, FS_REF, FS_REF, FS_REF ); + + // Reset vidclk enabled bit + __set_enabled(i2c_vbase, 2, 0 ); + __set_ppl(i2c_vbase, 2, vidclk.q, vidclk.p, vidclk.po ); + + __set_clk_fs(i2c_vbase, FS_PPL2_0, FS_REF, FS_PPL2_0 ); + ramdac_write(i2c_vbase, 0x08, ( ( FS_PPL2_0 & 0x01 ) << 7 ) | (vidclk.div & 0x7F) ); + ramdac_write(i2c_vbase, 0x0C, ( ( FS_PPL2_0 & 0x01 ) << 7 ) | (vidclk.div & 0x7F) ); + + // Set vidclk enabled bit + __set_enabled(i2c_vbase, 2, 1 ); + + + // Disable I2C core + i2c_write(i2c_vbase, I2C_REG_CTR, 0x0 ); +} + +static void MMIO_WRITE( struct mgam83fb_par* p, unsigned long reg, uint32_t val ) +{ + DEBUG_MSG( "MMIO[0x%03lx] <= 0x%08x\n", reg, val ); + mga_write( val, ((unsigned long)p->mmio.vbase + reg) ); + DEBUG_MSG( "Sleeping 10 msecs...\n" ); +// mdelay(10); +} + +struct fb_bitfield { + __u32 offset; /* beginning of bitfield */ + __u32 length; /* length of bitfield */ + __u32 msb_right; /* != 0 : Most significant bit is */ + /* right */ +}; +#undef MGA_TEST +#ifdef MGA_TEST +static struct { struct fb_bitfield transp, red, green, blue; } colors = { + { 0, 8, 0}, { 8, 8, 0}, { 16, 8, 0}, { 24, 8, 0} +}; +#endif + +int __set_mode( struct mgam83fb_par* p ) +{ + int hsync = p->hsync_len; // The Horizontal Syncronization Time (Sync Pulse ) + int hgdel = p->left_margin; // The Horizontal Gate Delay Time (Back Porch) + int hgate = p->xres; // The Horizontal Gate Time (Active Time) + int hlen = hsync + hgdel + hgate + p->right_margin; // The Horizontal Length Time (Line Total) + int vsync = p->vsync_len; // The Vertical Syncronization Time (Sync Pulse ) + int vgdel = p->upper_margin; // The Vertical Gate Delay Time (Back Porch) + int vgate = p->yres; // The Vertical Gate Time (Active Time) + int vlen = vsync + vgdel + vgate + p->lower_margin; // The Vertical Length Time (Frame total) + int vbl = CTRL_VBL1024; // Video Memory Burst Length + int ctrl = CTRL_BL_NEG | vbl; + + + DEBUG_MSG("__set_mode: start\n"); + switch( p->bits_per_pixel ) { + case 8 : + ctrl |= CTRL_CD_8BPP | CTRL_PC_PSEUDO; + break; + case 16 : + ctrl |= CTRL_CD_16BPP; +#ifdef __LITTLE_ENDIAN +// ctrl |= CTRL_IBBO; +#endif + break; + case 24 : + ctrl |= CTRL_CD_24BPP; +#ifdef __LITTLE_ENDIAN +// ctrl |= CTRL_IBBO; +#endif + break; + case 32 : + ctrl |= CTRL_CD_32BPP; +#ifdef __LITTLE_ENDIAN +// ctrl |= CTRL_IBBO; +#endif + break; + default: + rom_printk( "Invalid color depth: %s %s %d\n", __FILE__, __FUNCTION__, __LINE__ ); + return -1; + } + + ctrl |= ( p->sync & FB_SYNC_COMP_HIGH_ACT ) ? CTRL_CSYNC_HIGH : CTRL_CSYNC_LOW; + ctrl |= ( p->sync & FB_SYNC_VERT_HIGH_ACT ) ? CTRL_VSYNC_HIGH : CTRL_VSYNC_LOW; + ctrl |= ( p->sync & FB_SYNC_HOR_HIGH_ACT ) ? CTRL_HSYNC_HIGH : CTRL_HSYNC_LOW; + + hsync--, hgdel--, hgate--, vsync--, vgdel--, vgate--, hlen--, vlen--; + MMIO_WRITE( p, REG_CTRL, ctrl ); + MMIO_WRITE( p, REG_HTIM, hsync << 24 | hgdel << 16 | hgate ); + MMIO_WRITE( p, REG_VTIM, vsync << 24 | vgdel << 16 | vgate ); + MMIO_WRITE( p, REG_HVLEN, hlen << 16 | vlen ); + MMIO_WRITE( p, REG_VBARa, 0x0 ); + + DEBUG_MSG( "hsync: %d hgdel: %d hgate %d\n", hsync, hgdel, hgate ); + DEBUG_MSG( "vsync: %d vgdel: %d vgate %d\n", vsync, vgdel, vgate ); + DEBUG_MSG( "hlen: %d vlen: %d\n", hlen, vlen ); + MMIO_WRITE( p, REG_CTRL, ctrl | CTRL_VEN ); + DEBUG_MSG("__set_mode: finish\n"); + return 0; +} + + +#ifdef MGA_TEST +static inline void +bios_writel(u32 l, volatile void *addr) +{ + NATIVE_WRITE_MAS_W((e2k_addr_t)addr, l, MAS_IOADDR); +} + +static inline void +bios_writell(u64 q, volatile void *addr) +{ + NATIVE_WRITE_MAS_D((e2k_addr_t)addr, q, MAS_IOADDR); +} + +void drawStripe( unsigned long addr, + int yB, int yE, + int rB, int rE, int gB, int gE, int bB, int bE ) +{ + int x, y; + int xres = 640; + int bpp = 32; +// unsigned int once = 0; + + addr += yB * xres * (bpp >> 3); +/* rom_printk("addr = 0x%x, yB = %d, yE = %d, rB = %d, rE = %d, gB = %d, gE = %d" + " bB = %d, bE = %d\n", addr, yB, yE, rB, rE, gB, gE, bB, bE); */ + for ( y = yB; y < yE; y++ ) { + for ( x = 0; x < xres; x++ ) { +/* float factor = (float)x / (float)xres; + unsigned int r = rB + factor * ( rE - rB ); + unsigned int g = gB + factor * ( gE - gB ); + unsigned int b = bB + factor * ( bE - bB );*/ + unsigned int r = rB + 1 * ( rE - rB ); + unsigned int g = gB + 1 * ( gE - gB ); + unsigned int b = bB + 1 * ( bE - bB ); +#if 0 + if (once != 757){ + rom_printk("r = %d, g = %d, b = %d\n", r, g, b); + once++; + } +#endif + bios_writel(r << colors.red.offset | + g << colors.green.offset | + b << colors.blue.offset, (void *)addr); + + addr += bpp >> 3; + } + } +} + +#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16) +void draw(struct bios_pci_dev *dev) +{ + u64 fb_phys_addr; + int stripeHeight = 480 / 4; + int rE = ( 1 << colors.red.length ) - 1; + int gE = ( 1 << colors.red.length ) - 1; + int bE = ( 1 << colors.red.length ) - 1; + + fb_phys_addr = dev->base_address[PCI_MEM_BAR]; + + drawStripe( fb_phys_addr, 0, 1 * stripeHeight, 0, rE, 0, gE, 0, bE ); + drawStripe( fb_phys_addr, 1 * stripeHeight, 2 * stripeHeight, 0, 0, 0, 0, 0, bE ); + drawStripe( fb_phys_addr, 2 * stripeHeight, 3 * stripeHeight, 0, 0, 0, gE, 0, 0 ); + drawStripe( fb_phys_addr, 3 * stripeHeight, 4 * stripeHeight, 0, rE, 0, 0, 0, 0 ); +}; +#endif + + +void enable_mga(void) +{ + struct bios_pci_dev *dev; + struct mgam83fb_par p; + + rom_printk("Scanning PCI bus for MGA video card ..."); + + dev = bios_pci_find_device(PCI_VENDOR_ID_MGAM83, PCI_DEVICE_ID_MGAM83, + NULL); + + if (dev) { + SB_bus = dev->bus->number; + SB_device = PCI_SLOT(dev->devfn); + rom_printk("found on bus %d device %d\n", SB_bus, SB_device); + DebugMGA("--------- VIDEO BIOS ------\n"); + DebugMGA("Class: %X\n", dev->class); + DebugMGA("command: %x\n", dev->command); + DebugMGA("base_address[0]: %04x\n", dev->base_address[0]); + DebugMGA("size[0]: %04x\n", dev->size[0]); + DebugMGA("base_address[1]: %04x\n", dev->base_address[1]); + DebugMGA("size[1]: %04x\n", dev->size[1]); + DebugMGA("base_address[2]: %04x\n", dev->base_address[2]); + DebugMGA("size[2]: %04x\n", dev->size[2]); + DebugMGA("base_address[3]: %04x\n", dev->base_address[3]); + DebugMGA("size[0]: %04x\n", dev->size[3]); + DebugMGA("base_address[4]: %04x\n", dev->base_address[4]); + DebugMGA("size[4]: %04x\n", dev->size[4]); + DebugMGA("base_address[5]: %04x\n", dev->base_address[5]); + DebugMGA("size[5]: %04x\n", dev->size[5]); + DebugMGA("rom_address: %04x\n", dev->rom_address); + DebugMGA("rom_size %04x\n", dev->rom_size); + p.mem.base = dev->base_address[PCI_MEM_BAR]; + p.mem.len = dev->size[PCI_MEM_BAR]; + p.mem.vbase = dev->base_address[PCI_MEM_BAR]; + + p.mmio.base = dev->base_address[PCI_MMIO_BAR]; + p.mmio.len = dev->size[PCI_MMIO_BAR]; + p.mmio.vbase = dev->base_address[PCI_MMIO_BAR]; + + p.i2c.base = dev->base_address[PCI_I2C_BAR]; + p.i2c.len = dev->size[PCI_I2C_BAR]; + p.i2c.vbase = dev->base_address[PCI_I2C_BAR]; + + /* Update par */ + p.xres = 0x280; + p.yres = 0x1e0; + p.xres_virtual = 0x280; + p.yres_virtual = 0x1e0; + p.xoffset = 0; + p.yoffset = 0; + p.left_margin = 0x28; + p.right_margin = 0x18; + p.hsync_len = 0x60; + p.upper_margin = 0x20; + p.lower_margin = 0xb; + p.vsync_len = 0x2; + p.bits_per_pixel = 0x20; + p.pixclock = 0x9b29; + p.sync = 0; + + DEBUG_MSG("!!! enable_mga: setting pixclock !!!\n"); + __set_pixclock( (unsigned long)p.i2c.vbase, p.pixclock ); + __set_mode( &p ); + rom_printk("MGA Initialization complete\n"); +#ifdef MGA_TEST + draw(dev); +#endif + } else { + rom_printk("!!! NOT FOUND !!!\n"); + } +} + +#ifdef CONFIG_E2K_LEGACY_SIC +void enable_embeded_graphic(void) +{ + struct bios_pci_dev *dev; + unsigned int hb_cfg; + unsigned short vpci_cmd; + + hb_cfg = early_readl_hb_reg(HB_PCI_CFG); + if (!(hb_cfg & HB_CFG_IntegratedGraphicsEnable)) { + rom_printk("Embeded graphic disabled, " + "legacy VGA mode impossible\n"); + return; + } + + rom_printk("Scanning PCI bus for Embeded MGA2 card ..."); + + dev = bios_pci_find_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_MGA2, NULL); + if (dev) { + rom_printk("found on %d:%d:%d\n", + dev->bus->number, + PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); + } else { + rom_printk("!!! NOT FOUND !!!\n"); + return; + } + hb_cfg |= HB_CFG_IntegratedVgaEnable; + hb_cfg &= ~HB_CFG_ShareGraphicsInterrupts; + early_writel_hb_reg(hb_cfg, HB_PCI_CFG); + rom_printk("host bridge CFG: enable legacy VGA mode 0x%X\n", + early_readl_hb_reg(HB_PCI_CFG)); + + vpci_cmd = early_readw_eg_reg(PCI_COMMAND); + vpci_cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + early_writew_eg_reg(vpci_cmd, PCI_COMMAND); + rom_printk("Embeded Graphic CMD: enable IO/MMIO/DMA 0x%04x\n", + vpci_cmd); +} +#endif /* CONFIG_E2K_LEGACY_SIC */ + + diff --git a/arch/e2k/boot/bios/mga.h b/arch/e2k/boot/bios/mga.h new file mode 100644 index 000000000000..3febc87f38c7 --- /dev/null +++ b/arch/e2k/boot/bios/mga.h @@ -0,0 +1,283 @@ +#ifndef MGA_H +#define MGA_H + +# define PCI_VENDOR_ID_MGAM83 0x108e +# define PCI_DEVICE_ID_MGAM83 0x8000 + + // Indexes of pci_dev.resource[] +# define PCI_MMIO_BAR 0 +# define PCI_MEM_BAR 1 +# define PCI_I2C_BAR 2 + +/******************************************************************************* + * MMIO Registers + ******************************************************************************* + */ +#define REG_CTRL 0x000 // Control Register +#define REG_STAT 0x004 // Status Register +#define REG_HTIM 0x008 // Horizontal Timing Register +#define REG_VTIM 0x00c // Vertical Timing Register +#define REG_HVLEN 0x010 // Horizontal and Vertical Length Register +#define REG_VBARa 0x014 // Video Memory Base Address Register A +#define REG_VBARb 0x018 // Video Memory Base Address Register B +#define REG_C0XY 0x030 // Cursor 0 X,Y Register +#define REG_C0BAR 0x034 // Cursor0 Base Address register +#define REG_C0CR 0x040 // Cursor0 Color Registers +#define REG_C1XY 0x070 // Cursor 0 X,Y Register +#define REG_C1BAR 0x074 // Cursor0 Base Address register +#define REG_C1CR 0x080 // Cursor0 Color Registers +#define REG_PCLT 0x800 // 8bpp Pseudo Color Lockup Table +#define REG_TST_D 0x01C // Test Mode + +// BitBlt module registers +#define BBR0 0x1000 // CTRL_REG if writing + // STAT_REG if reading +#define BBR1 0x1004 // WINDOW_REG (size of the window to copy) +#define BBR2 0x1008 // SADDR_REG (Source address reg - byte offset + // inside framebuffer) invisible framebuffer part +#define BBR3 0x100c // DADDR_REG (Destination address reg - byte offset + // inside framebuffer) visible framebuffer part +#define BBR4 0x1010 // PITCH_REG (value to increment both SADDR_REG and + // DADDR_REG to have them pointing to the next + // lines of WINDOWS (source and destination windows + // respecteviely)). PITCH_REG has 2 parts. The highest 16 + // little-endian bits are for the destination and the lowest + // ones are for the source. Determined in bytes +#define BBR5 0x1014 // BG_REG - Background color (color extenshion mode for + // originally monochromed color only) +#define BBR6 0x1018 // FG_REG - Foreground color (color extenshion mode for + // originally monochromed color only) +#define BBR7 0x101c // RESERVED + /* Bit feilds for CTRL_REG (little-endian mode) */ +#define CE_EN (1 << 0) // Enable color extenshion (for + // originally monochromed color only) +#define PAT_EN (1 << 1) // Enable 8x8 pattern multiplication + // (Pattern Copy). Doesn't work if + // continuous address generation for source + // is enabled. +#define SFILL_EN (1 << 2) // Enable continuous color pouring mode + // Works only in color extenshion mode +#define INV_EN (1 << 3) // monochromed image invertion mode + // Works only in color extenshion mode +#define TR_EN (1 << 4) // Transparency mode + // Works only in color extenshion mode +#define HDIR (1 << 5) // Horizontal (when drawing a line of WINDOW) + // incrementation sign (0 --->; 1 <---;) +#define VDIR (1 << 6) // Vertical incrementation sign (0 --->; 1 <---;) + // The same as PITCH_REG parts sign +#define SRC_MODE (1 << 7) // Enable address generation for source + // If enabled makes module to increment source + // address continuously inspite of lowest part of + // PITCH_REG. (This mode has to be enabled if you + // want to have your image object continuous in + // invisible part of framebuffer memory due to economy + // reason i think) +/*#define TERM_MODE*///FIXME // 32 bits word alignment when reached end of a line + // Address generation for source mode only +#define BPP_08 (0x00 << 10) +#define BPP_16 (0x01 << 10) +#define BPP_24 (0x02 << 10) +#define BPP_32 (0x03 << 10) + +#define ROP_02 (0x02 << 12) // DST = DST &~ SRC +#define ROP_03 (0x03 << 12) // DST = DST +#define ROP_04 (0x04 << 12) // DST = ~DST & SRC +#define ROP_05 (0x05 << 12) // DST = SRC +#define ROP_06 (0x06 << 12) // DST = DST != SRC +#define ROP_07 (0x07 << 12) // DST = DST | SRC +#define ROP_08 (0x08 << 12) // DST = ~DST &~ SRC +#define ROP_09 (0x09 << 12) // DST = DST == SRC +#define ROP_0A (0x0a << 12) // DST = ~SRC +#define ROP_0B (0x0b << 12) // DST = DST | ~SRC +#define ROP_0C (0x0c << 12) // DST = ~DST +#define ROP_0D (0x0d << 12) // DST = ~DST | SRC +#define ROP_0E (0x0e << 12) // DST = ~DST | ~SRC +#define ROP_0F (0x0f << 12) // DST = {1} + +/*#define SOFFS*///FIXME + +#define SDMA_EN (1 << 20) // Enable DMA for Source +#define DDMA_EN (1 << 21) // Enable DMA for Destination + +#define PAUSE (1 << 27) // BitBlt operation delay + // Used for the executive control only +#define NFIE (1 << 28) // Enable interrupt in the case of unfilling + // task buffer + // Used for the executive control only +#define NPIE (1 << 29) // Enable interrupt in the case of unexecuting + // of BitBlt operation + // Used for the executive control only +#define ABORT (1 << 30) // BitBlt operation abortion +#define START (1 << 31) // BitBlt operation starting + /* Bit feilds for STAT_REG (little-endian mode) */ +#define FULL (1 << 30) // Double buffering state + // 0 - Buffer isn't filled up, so another + // operation may be initiated + // 1 - Buffer is filled up +#define PROCESS (1 << 31) // BitBlt operation carring out state + // 0 - The module isn't carring out BitBlt operation + // 1 - BitBlt operation is running + + +// Control Register REG_CTRL +#define CTRL_IBBO (0x1<<31) // Inverse Bus Byte Order +#define CTRL_SAP +#define CTRL_HC1R_32 0 // Hardware Cursor1 Resolution 32x32 +#define CTRL_HC1R_64 (0x1<<25) // 64x64 +#define CTRL_HC1E (0x1<<24) // Hardware Cursor1 Enabled +#define CTRL_HC0R_32 0 // Hardware Cursor0 Resolution 32x32 +#define CTRL_HC0R_64 (0x1<<21) // 64x64 +#define CTRL_HC0E (0x1<<20) // Hardware Cursor0 Enabled +#define CTRL_TST (0x1<<17) // TODO: ????? +#define CTRL_BL_POS 0 // Blanking Polarization Level Positive +#define CTRL_BL_NEG (0x1<<15) // Negative +#define CTRL_CSYNC_HIGH 0 // Composite Synchronization Pulse Polarization Level Positive +#define CTRL_CSYNC_LOW (0x1<<14) // Negative +#define CTRL_VSYNC_HIGH 0 // Vertical Synchronization Pulse Polarization Level Positive +#define CTRL_VSYNC_LOW (0x1<<13) // Negative +#define CTRL_HSYNC_HIGH 0 // Horizontal Synchronization Pulse polarization Level Positive +#define CTRL_HSYNC_LOW (0x1<<12) // Negative + +#define CTRL_PC_GRAY 0 // 8-bit Pseudo Color Grayscale +#define CTRL_PC_PSEUDO (0x1<<11) // Pseudo Color + +#define CTRL_CD_8BPP 0 // Color Depth 8bpp +#define CTRL_CD_16BPP (0x1<<9) // 16bpp +#define CTRL_CD_24BPP (0x2<<9) // 24bpp +#define CTRL_CD_32BPP (0x3<<9) // 32bpp + +#define CTRL_VBL_1 0 // Video Memory Burst Length 1 cycle +#define CTRL_VBL_2 (0x1<<7) // 2 cycles +#define CTRL_VBL_4 (0x2<<7) // 4 cycles +#define CTRL_VBL_8 (0x3<<7) // 8 cycles +#define CTRL_VBL1024 (0x203<<7) // 16 cycles (extension) + +#define CTRL_CBSWE (0x1<<6) // CLUT Bank Switching Enable +#define CTRL_VBSWE (0x1<<5) // Video Bank Switching Enable +#define CTRL_CBSIE (0x1<<4) // CLUT Bank Switch Interrupt Enable +#define CTRL_VBSIE (0x1<<3) // VideoBank Switch Interrupt Enable +#define CTRL_HIE (0x1<<2) // HSync Interrupt Enable +#define CTRL_VIE (0x1<<1) // VSync Interrupt Enable +#define CTRL_VEN (0x1<<0) // Video Enable + +// Status Register REG_STAT +#define STAT_HC1A (0x1<<24) // Hardware cursor1 available +#define STAT_HC0A (0x1<<20) // Hardware cursor0 available +#define STAT_ACMP (0x1<<17) // Active CLUT Memory Page +#define STAT_AVMP (0x1<<16) // Active Video Memory Page +#define STAT_CBSINT (0x1<<7) // CLUT Bank Switch Interrupt Pending +#define STAT_VBSINT (0x1<<6) // Bank Switch Interrupt Pending +#define STAT_HINT (0x1<<5) // Horizontal Interrupt Pending +#define STAT_VINT (0x1<<4) // Vertical Interrupt Pending +#define STAT_LUINT (0x1<<1) // Line FIFO Under-Run Interrupt Pending +#define STAT_SINT (0x1<<0) // System Error Interrupt Pending + +#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ +#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ +#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ + +/******************************************************************************* + * I2C Registers + ******************************************************************************* + */ +#define I2C_REG_PRER_LO (0x00 << 2) // Clock Prescale register lo-byte (RW) +#define I2C_REG_PRER_HI (0x01 << 2) // Clock Prescale register hi-byte (RW) +#define I2C_REG_CTR (0x02 << 2) // Control Register (RW) +#define I2C_REG_TXR (0x03 << 2) // Transmit Register (W) +#define I2C_REG_RXR (0x03 << 2) // Receive Register (R) +#define I2C_REG_CR (0x04 << 2) // Command Register (W) +#define I2C_REG_SR (0x06 << 2) // Status Register (R) +#define I2C_REG_RESET (0x07 << 2) // Reset Register + +// Prescaler divider evaluates as (PCICLK/(5*SCLK))-1 +#define NORMAL_SCL 0x3F + +// Control Register bits +#define I2C_CTR_EN (1 << 7) // I2C core enable bit +#define I2C_CTR_IEN (1 << 6) // I2C core interrupt enable bit + +// Command Register bits +#define I2C_CR_STA (1 << 7) // generate (repeated) start condition +#define I2C_CR_STO (1 << 6) // generate stop condition +#define I2C_CR_RD (1 << 5) // read from slave +#define I2C_CR_WR (1 << 4) // write to slave +#define I2C_CR_NACK (1 << 3) // when a receiver, sent I2C_CR_NACK +#define I2C_CR_IACK (1 << 0) // Interrupt acknowledge. When set, clears pending interrrupt + +// Status Register bits +#define I2C_SR_RxACK (1 << 7) // Receive acknowledge from slave. '1' - no acknowledge received +#define I2C_SR_BUSY (1 << 6) // I2C bus busy. '1' after START, '0' after STOP +#define I2C_SR_AL (1 << 5) // Arbitration lost +#define I2C_SR_TIP (1 << 1) // Transfer in progress. '1' when transferring data +#define I2C_SR_IF (1 << 0) // Interrupt flag + + +// Transmit Register operations +#define I2C_READ_OP 0x01 // Reading from slave ( x << 1 | I2C_READ_OP ) +#define I2C_WRITE_OP 0xFE // Writing to slave ( x << 1 & I2C_WRITE_OP ) + +/******************************************************************************* + * RAMDAC + ******************************************************************************* + */ +#define I2C_RAMDAC_ADDR 0x69 + +#define FS_REF 0x0 // Reference clock [000] +#define FS_PPL1_0 0x2 // PPL1 0* Phase +#define FS_PPL1_180 0x3 // PPL1 180* Phase +#define FS_PPL2_0 0x4 // PPL2 0* Phase +#define FS_PPL2_180 0x5 // PPL2 180* Phase +#define FS_PPL3_0 0x6 // PPL3 0* Phase +#define FS_PPL3_180 0x7 // PPL3 180* Phase + +// External clock frequency 14.3181 Mhz +#define PIXCLOCK_EXT 69841 + + +struct mgam83fb_par { + int bus_type; // 0 - PCI, 1 - SBUS + int index; // MGAM index + + struct { + unsigned long base; // phys address + unsigned long vbase; // virtual address + unsigned int len; + } mem; + struct { + unsigned long base; // phys address + unsigned long vbase; // virtual address + unsigned int len; + } mmio; + struct { + unsigned long base; // phys address + unsigned long vbase; // virtual address + unsigned int len; + } i2c; + + + + /* Current videomode **************************************************/ + __u32 xres; // visible resolution + __u32 yres; + __u32 xres_virtual; // virtual resolution + __u32 yres_virtual; + __u32 xoffset; // offset from virtual to visible + __u32 yoffset; // resolution + + __u32 bits_per_pixel; // Bits per pixel + + __u32 pixclock; // pixel clock in ps (pico seconds) + __u32 left_margin; // time from sync to picture + __u32 right_margin; // time from picture to sync + __u32 upper_margin; // time from sync to picture + __u32 lower_margin; + __u32 hsync_len; // length of horizontal sync + __u32 vsync_len; // length of vertical sync + + __u32 sync; + + u32 pseudo_palette[16]; + +}; + +#endif /* MGA_H */ diff --git a/arch/e2k/boot/bios/mpspec.c b/arch/e2k/boot/bios/mpspec.c new file mode 100644 index 000000000000..3d1089db71ac --- /dev/null +++ b/arch/e2k/boot/bios/mpspec.c @@ -0,0 +1,371 @@ +/* + * $Id: mpspec.c,v 1.8 2009/02/24 15:13:21 atic Exp $ + * From linuxbios.org + */ + +#include +#include + +#include +#include +#include "../pic.h" +#include "pci.h" + +#undef BIOS_DEBUG +#define MPSPEC_DEBUG 0 +#define BIOS_DEBUG MPSPEC_DEBUG + +#define CONFIG_DEBUG_MPTABLE 0 + +#include "printk.h" + +unsigned char smp_compute_checksum(void *v, int len) +{ + unsigned char *bytes; + unsigned char checksum; + int i; + bytes = v; + checksum = 0; + for(i = 0; i < len; i++) { + checksum -= bytes[i]; + } + return checksum; +} + +static int +mpf_do_checksum(unsigned char *mp, int len) +{ + int sum = 0; + + while (len--) + sum += *mp++; + + return 0x100 - (sum & 0xFF); +} + +void smp_write_floating_table(struct intel_mp_floating *mpf) +{ + mpf->mpf_signature[0] = '_'; + mpf->mpf_signature[1] = 'M'; + mpf->mpf_signature[2] = 'P'; + mpf->mpf_signature[3] = '_'; + mpf->mpf_physptr = (unsigned long)(((char *)mpf) + SMP_FLOATING_TABLE_LEN); + mpf->mpf_length = 1; + mpf->mpf_specification = 4; + mpf->mpf_checksum = 0; + mpf->mpf_feature1 = 0; + mpf->mpf_feature2 = 0; + mpf->mpf_feature3 = 0; + mpf->mpf_feature4 = 0; + mpf->mpf_feature5 = 0; +/// mpf->mpf_checksum = smp_compute_checksum(mpf, mpf->mpf_length*16); + mpf->mpf_checksum = mpf_do_checksum((unsigned char *)mpf, sizeof (*mpf)); +} + +void *smp_next_mpc_entry(struct mpc_table *mc) +{ + void *v; + v = (void *)(((char *)mc) + mc->mpc_length); + return v; +} +static void smp_add_mpc_entry(struct mpc_table *mc, unsigned length) +{ + mc->mpc_length += length; + mc->mpc_oemcount++; +} + +void *smp_next_mpe_entry(struct mpc_table *mc) +{ + void *v; + v = (void *)(((char *)mc) + mc->mpc_length + mc->mpe_length); + return v; +} +static void smp_add_mpe_entry(struct mpc_table *mc, mpe_t mpe) +{ + mc->mpe_length += mpe->mpe_length; +} + +void smp_write_processor(struct mpc_table *mc, + unsigned char apicid, unsigned char apicver, + unsigned char cpuflag, unsigned int cpufeature, + unsigned int featureflag, unsigned int cepictimerfreq) +{ + struct mpc_config_processor *mpc; + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->mpc_type = MP_PROCESSOR; + mpc->mpc_apicid = apicid; + mpc->mpc_apicver = apicver; + mpc->mpc_cpuflag = cpuflag; + mpc->mpc_cpufeature = cpufeature; + mpc->mpc_featureflag = featureflag; + mpc->mpc_cepictimerfreq = cepictimerfreq; + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +//unsigned int initial_apicid[MAX_CPUS] = +//{ +// 0, 1, [2 ... MAX_CPUS - 1] = -1 +//}; + +/* If we assume a symmetric processor configuration we can + * get all of the information we need to write the processor + * entry from the bootstrap processor. + * Plus I don't think linux really even cares. + * Having the proper apicid's in the table so the non-bootstrap + * processors can be woken up should be enough. + */ +void smp_write_processors(struct mpc_table *mc, + unsigned int phys_cpu_num) +{ + int i; + int processor_id; + unsigned int pic_version; + unsigned cpu_flags; + unsigned cpu_features; + unsigned cpu_feature_flags; + unsigned int cepic_timer_freq; + + processor_id = NATIVE_READ_PIC_ID(); + pic_version = native_pic_read_version(); +#ifndef CONFIG_CEPIC_TIMER_FREQUENCY + cepic_timer_freq = 100 * 1000000; /* 100 MHz for LMS */ +#else + cepic_timer_freq = CONFIG_CEPIC_TIMER_FREQUENCY * 1000000; +#endif /* CONFIG_CEPIC_TIMER_FREQUENCY */ + cpu_features = 0x0f; + cpu_features = 0x0f; + cpu_feature_flags = 1 << 9; + for(i = 0; i < NR_CPUS; i++) { +#ifdef CONFIG_SMP + unsigned int cpu_picid = all_pic_ids[i]; +#else /* ! CONFIG_SMP */ + unsigned int cpu_picid = processor_id; +#endif /* CONFIG_SMP */ +/// if(initial_apicid[i]==-1) +/// continue; + if((i+1) > phys_cpu_num) + continue; +#ifdef CONFIG_SMP + if (processor_id == all_pic_ids[i]) +#endif /* CONFIG_SMP */ + cpu_flags = CPU_BOOTPROCESSOR | CPU_ENABLED; +#ifdef CONFIG_SMP + else + cpu_flags = CPU_ENABLED; +#endif /* CONFIG_SMP */ + smp_write_processor(mc, cpu_picid, pic_version, + cpu_flags, + cpu_features, cpu_feature_flags, cepic_timer_freq + ); + + } +} + +void smp_write_bus(struct mpc_table *mc, + unsigned char id, unsigned char *bustype) +{ + struct mpc_config_bus *mpc; + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->mpc_type = MP_BUS; + mpc->mpc_busid = id; + memcpy(mpc->mpc_bustype, bustype, sizeof(mpc->mpc_bustype)); + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +void smp_write_ioapic(struct mpc_table *mc, + unsigned char id, unsigned char ver, + unsigned long apicaddr) +{ + struct mpc_ioapic *mpc; + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->type = MP_IOAPIC; + mpc->apicid = id; + mpc->apicver = ver; + mpc->flags = MPC_APIC_USABLE; + mpc->apicaddr = apicaddr; + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +void smp_write_iolink(struct mpc_table *mc, + int node, int link, + short bus_min, short bus_max, + short picid, + unsigned long pci_mem_start, unsigned long pci_mem_end) +{ + struct mpc_config_iolink *mpc; + + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->mpc_type = MP_IOLINK; + mpc->mpc_iolink_type = MP_IOLINK_IOHUB; + mpc->mpc_iolink_ver = MP_IOHUB_FPGA_VER; + mpc->node = node; + mpc->link = link; + mpc->bus_min = bus_min; + mpc->bus_max = bus_max; + mpc->apicid = picid; + mpc->pci_mem_start = pci_mem_start; + mpc->pci_mem_end = pci_mem_end; + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +void smp_write_ioepic(struct mpc_table *mc, + unsigned short id, unsigned short nodeid, + unsigned char ver, unsigned long epicaddr) +{ + struct mpc_ioepic *mpc; + + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->type = MP_IOEPIC; + mpc->epicid = id; + mpc->nodeid = nodeid; + mpc->epicver = ver; + mpc->epicaddr = epicaddr; + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +void smp_write_intsrc(struct mpc_table *mc, + unsigned char irqtype, unsigned short irqflag, + unsigned char srcbus, unsigned char srcbusirq, + unsigned char dstapic, unsigned char dstirq) +{ + struct mpc_intsrc *mpc; + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->type = MP_INTSRC; + mpc->irqtype = irqtype; + mpc->irqflag = irqflag; + mpc->srcbus = srcbus; + mpc->srcbusirq = srcbusirq; + mpc->dstapic = dstapic; + mpc->dstirq = dstirq; + smp_add_mpc_entry(mc, sizeof(*mpc)); +#if CONFIG_DEBUG_MPTABLE + printk_info("add intsrc srcbus 0x%x srcbusirq 0x%x, dstapic 0x%x, dstirq 0x%x\n", + srcbus, srcbusirq, dstapic, dstirq); + hexdump(__FUNCTION__, mpc, sizeof(*mpc)); +#endif +} + +void smp_i2c_spi_timer(struct mpc_table *mc, + unsigned char timertype, unsigned char timerver, + unsigned char timerflags, unsigned long timeraddr) +{ + struct mpc_config_timer *mpc; + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->mpc_type = MP_TIMER; + mpc->mpc_timertype = timertype; + mpc->mpc_timerver = timerver; + mpc->mpc_timerflags = timerflags; + mpc->mpc_timeraddr = timeraddr; + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +void smp_i2c_spi_dev(struct mpc_table *mc, unsigned char max_channel, + unsigned char irq, unsigned long pcidevaddr) +{ + struct mpc_config_i2c *mpc; + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->mpc_type = MP_I2C_SPI; + mpc->mpc_i2ccntrladdr = + ((struct bios_pci_dev *)pcidevaddr)->base_address[0]; + mpc->mpc_i2cdataaddr = + ((struct bios_pci_dev *)pcidevaddr)->base_address[1]; + mpc->mpc_max_channel = max_channel; + mpc->mpc_i2c_irq = irq; +#if CONFIG_DEBUG_MPTABLE + rom_printk("add i2c/spi dev addr to mptable, " + " base[0] = 0x%x, base[1] = 0x%x. IRQ %d\n", + ((struct bios_pci_dev *)pcidevaddr)->base_address[0], + ((struct bios_pci_dev *)pcidevaddr)->base_address[1], + irq); +#endif + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +void smp_write_lintsrc(struct mpc_table *mc, + unsigned char irqtype, unsigned short irqflag, + unsigned char srcbusid, unsigned char srcbusirq, + unsigned char destapic, unsigned char destapiclint) +{ + struct mpc_config_lintsrc *mpc; + mpc = smp_next_mpc_entry(mc); + memset(mpc, '\0', sizeof(*mpc)); + mpc->mpc_type = MP_LINTSRC; + mpc->mpc_irqtype = irqtype; + mpc->mpc_irqflag = irqflag; + mpc->mpc_srcbusid = srcbusid; + mpc->mpc_srcbusirq = srcbusirq; + mpc->mpc_destapic = destapic; + mpc->mpc_destapiclint = destapiclint; + smp_add_mpc_entry(mc, sizeof(*mpc)); +} + +void smp_write_address_space(struct mpc_table *mc, + unsigned char busid, unsigned char address_type, + unsigned int address_base_low, unsigned int address_base_high, + unsigned int address_length_low, unsigned int address_length_high) +{ + struct mp_exten_system_address_space *mpe; + mpe = smp_next_mpe_entry(mc); + memset(mpe, '\0', sizeof(*mpe)); + mpe->mpe_type = MPE_SYSTEM_ADDRESS_SPACE; + mpe->mpe_length = sizeof(*mpe); + mpe->mpe_busid = busid; + mpe->mpe_address_type = address_type; + mpe->mpe_address_base_low = address_base_low; + mpe->mpe_address_base_high = address_base_high; + mpe->mpe_address_length_low = address_length_low; + mpe->mpe_address_length_high = address_length_high; + smp_add_mpe_entry(mc, (mpe_t)mpe); +} + + +void smp_write_bus_hierarchy(struct mpc_table *mc, + unsigned char busid, unsigned char bus_info, + unsigned char parent_busid) +{ + struct mp_exten_bus_hierarchy *mpe; + mpe = smp_next_mpe_entry(mc); + memset(mpe, '\0', sizeof(*mpe)); + mpe->mpe_type = MPE_BUS_HIERARCHY; + mpe->mpe_length = sizeof(*mpe); + mpe->mpe_busid = busid; + mpe->mpe_bus_info = bus_info; + mpe->mpe_parent_busid = parent_busid; + smp_add_mpe_entry(mc, (mpe_t)mpe); +} + +void smp_write_compatibility_address_space(struct mpc_table *mc, + unsigned char busid, unsigned char address_modifier, + unsigned int range_list) +{ + struct mp_exten_compatibility_address_space *mpe; + mpe = smp_next_mpe_entry(mc); + memset(mpe, '\0', sizeof(*mpe)); + mpe->mpe_type = MPE_COMPATIBILITY_ADDRESS_SPACE; + mpe->mpe_length = sizeof(*mpe); + mpe->mpe_busid = busid; + mpe->mpe_address_modifier = address_modifier; + mpe->mpe_range_list = range_list; + smp_add_mpe_entry(mc, (mpe_t)mpe); +} + +#if 0 +/* memcpy standard block */ +const static char smpblock[] = +{0x5F, 0x4D, 0x50, 0x5F, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x04, 0x9B, 0x05, 0x00, 0x00, 0x00, 0x00 +}; +void write_smp_table(void *v) +{ + memcpy(v, smpblock, sizeof(smpblock)); +} +#endif /* 0 */ + diff --git a/arch/e2k/boot/bios/mptable.c b/arch/e2k/boot/bios/mptable.c new file mode 100644 index 000000000000..9cd3c8d642d5 --- /dev/null +++ b/arch/e2k/boot/bios/mptable.c @@ -0,0 +1,422 @@ +/* + * $Id: mptable.c,v 1.22 2009/02/24 15:13:30 atic Exp $ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "printk.h" +#include "pci.h" +#include "../boot_io.h" + +#undef BIOS_DEBUG +#define MTABLE_DEBUG 0 +#define BIOS_DEBUG MTABLE_DEBUG + +#define MP_IRQ_EDGE_HIGH (MP_IRQ_TRIGGER_EDGE | MP_IRQ_POLARITY_HIGH) +#define MP_IRQ_LEVEL_HIGH (MP_IRQ_TRIGGER_LEVEL | MP_IRQ_POLARITY_HIGH) +#define MP_BUS_ISA_NUM 0x1f + +/* Setting resources for iopic, timer and cmos to mptable */ +void smp_write_ioepic_i2c_spi_info(struct mpc_table *mc, + struct bios_pci_dev *dev, unsigned int ioepicid) +{ + unsigned int timer_base; + unsigned int domain = bios_pci_domain_nr(dev->bus); + unsigned int ioepic_base; + + pcibios_read_config_dword(domain, dev->bus->number, + dev->devfn, PCI_BASE_ADDRESS_2, &timer_base); + rom_printk("Reading timer_base from I2C-SPI: 0x%x\n", timer_base); + timer_base = timer_base & 0xfffffff0; /* Masking the lower bits */ + smp_i2c_spi_timer(mc, MP_LT_TYPE, MP_LT_VERSION, MP_LT_FLAGS, + timer_base); + smp_i2c_spi_timer(mc, MP_RTC_TYPE, MP_RTC_VER_CY14B101P, 0, + 0xffffffff); + + pcibios_read_config_dword(domain, dev->bus->number, + dev->devfn, PCI_BASE_ADDRESS_3, &ioepic_base); + rom_printk("Reading ioepic_base from I2C-SPI: 0x%x\n", ioepic_base); + + ioepic_base = ioepic_base & 0xfffffff0; /* Masking the lower bits */ + smp_write_ioepic(mc, ioepicid, domain, 0x0, ioepic_base); + + smp_i2c_spi_dev(mc, 1, 15, (unsigned long)dev); + + /* + * Additionally, write IO-link info for EIOHub. Otherwise, kernel + * will try to construct one by default, discover that IO-APIC entry + * is missing from the MP table (nr_ioapics) and panic. + * Includes new mpc_config_iolink fields for IOMMU: PCI MEM area + */ + rom_printk("Passing PCI MEM area for IOMMU (node %d): 0x%x - 0x%x\n", + domain, PCI_MEM_DOMAIN_START(domain), + PCI_MEM_DOMAIN_END(domain)); + smp_write_iolink(mc, domain, 0, 0, 1, ioepicid, + PCI_MEM_DOMAIN_START(domain), PCI_MEM_DOMAIN_END(domain)); +} + +/* Setting resources for iopic, timer and cmos to mptable */ +void smp_write_ioapic_i2c_spi_info(struct mpc_table *mc, + struct bios_pci_dev *dev, unsigned int ioapicid) +{ + unsigned int timer_base; + unsigned long timeraddr; + unsigned int timer_upper32; + unsigned int domain = bios_pci_domain_nr(dev->bus); + + pcibios_read_config_dword(domain, dev->bus->number, + dev->devfn, SYSTEM_TIMER_BASE_ADDRESS, &timer_base); + pcibios_read_config_dword(domain, dev->bus->number, dev->devfn, + SYSTEM_TIMER_UPPER_ADDRESS, &timer_upper32); + rom_printk("MP: setting timeraddr to mptable:\n" + "timer_upper32 = 0x%x, timer_base = 0x%x\n", + timer_upper32, timer_base); + timeraddr = timer_upper32; + timeraddr = (timeraddr << 32); + timeraddr |= timer_base; + smp_i2c_spi_timer(mc, MP_LT_TYPE, MP_LT_VERSION, MP_LT_FLAGS, + timeraddr); + + smp_write_ioapic(mc, ioapicid, 0x11, 0xfec00000 + domain * 0x1000); + + smp_i2c_spi_dev(mc, 1, 23, (unsigned long)dev); + + smp_write_iolink(mc, domain, 0, 1, 3, ioapicid, 0, 0); +} + +void smp_write_ioapic_intsrc_info(struct mpc_table *mc, unsigned int node, + unsigned int bus, unsigned int ioapicid) +{ + smp_write_intsrc(mc, mp_ExtINT, 0x05, 0x00, 0x00, ioapicid, 0x00); /* PIC */ + smp_write_intsrc(mc, mp_FixINT, 0x05, 0x1f, 0x00, ioapicid, 0x02); /* System Timer */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x1f, 0x16, ioapicid, 0x16); /* SERR */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x1f, 0x09, ioapicid, 0x09); /* gpio 1 */ +#ifdef CONFIG_L_IOH2 + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(1, 0), + ioapicid, 0x0a); /* Ethernet */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(10, 0), + ioapicid, 0x0c); /* USB */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(10, 1), + ioapicid, 0x0c); /* USB */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(11, 0), + ioapicid, 0x0d); /* USB */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(11, 1), + ioapicid, 0x0d); /* USB */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 0), + ioapicid, 0x0b); /* IDE */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 0), + ioapicid, 0x17); /* IDE hidden */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 0), + ioapicid, 0x0b); /* IDE cable */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 0), + ioapicid, 0x0b); /* IDE cable */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 1), + ioapicid, 0x08); /* WD TIMER + gpio 0 */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 1), + ioapicid, 0x0f); /* I2c/spi */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 2), + ioapicid, 0x03); /* Serial Port */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 2), + ioapicid, 0x04); /* Serial Port */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 2), + ioapicid, 0x07); /* Parallel Port */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 3), + ioapicid, 0x05); /* AC-97 */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(3, 0), + ioapicid, 0x14); /* SATA */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(3, 1), + ioapicid, 0x15); /* SATA */ +#else /* IOHUB version 1 */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(1, 0), + ioapicid, 0x0a); /* Ethernet */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x03, PCI_DEVFN(1, 0), + ioapicid, 0x14); /* USB */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 0), + ioapicid, 0x0b); /* IDE */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 0), + ioapicid, 0x0d); /* IDE hidden */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 0), + ioapicid, 0x0e); /* IDE cable */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 0), + ioapicid, 0x0f); /* IDE cable */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 1), + ioapicid, 0x08); /* WD TIMER + gpio 0 */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 1), + ioapicid, 0x17); /* I2c/spi */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 2), + ioapicid, 0x03); /* Serial Port */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 2), + ioapicid, 0x04); /* Serial Port */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 2), + ioapicid, 0x07); /* Parallel Port */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(2, 3), + ioapicid, 0x05); /* AC-97 */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x02, PCI_DEVFN(3, 0), + ioapicid, 0x15); /* SATA */ +#endif /* CONFIG_L_IOH2 */ + /* On bus 0 device 0 PCI -> PCIexp bridge pin 2 (intb) */ + smp_write_intsrc(mc, mp_INT, 0x0f, 0x00, 0x01, ioapicid, 0x11); /* IOAPIC IRQ B */ + + /* On bus 0 device 1 virtual PCI -> PCI bridge interrupt pin unused */ + + /* On bus 2 device 0 PCI -> PCI bridge pin 1 */ + smp_write_intsrc(mc, mp_INT, 0x0f, 0x02, 0x00, ioapicid, 0x10); /* IOAPIC IRQ A */ +#if defined(CONFIG_ES2) && !defined(CONFIG_ADC_DISABLE) + smp_write_ioapic(mc, ioapicid + 1, 0x11, 0xfec01000); + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(1, 0), ioapicid + 1, 0x0a); /* ADC */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x01, PCI_DEVFN(2, 0), ioapicid + 1, 0x17); /* I2c/spi */ +#endif /* CONFIG_ES2 && ! CONFIG_ADC_DISABLE */ + +#ifdef CONFIG_E2K_LEGACY_SIC + /* Configure embeded IO-APIC */ + smp_write_ioapic(mc, ioapicid + 1, 0x11, E1CP_EMBEDED_IOAPIC_BASE); + /* IRQ0 - GC2500 level high */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x1f, 0x00, ioapicid + 1, 0x00); + /* IRQ1 - MGA2 level high */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x1f, 0x01, ioapicid + 1, 0x01); + /* IRQ2 - PMC level high */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x1f, 0x02, ioapicid + 1, 0x02); + /* IRQ3 - IIC level high */ + smp_write_intsrc(mc, mp_FixINT, 0x0d, 0x1f, 0x03, ioapicid + 1, 0x03); + /* IRQ4 - IOMMU edge high */ + smp_write_intsrc(mc, mp_FixINT, 0x05, 0x1f, 0x04, ioapicid + 1, 0x04); + /* IRQ5 - WLCC edge high */ + smp_write_intsrc(mc, mp_FixINT, 0x05, 0x1f, 0x05, ioapicid + 1, 0x05); + /* IRQ6 - SIC edge high */ + smp_write_intsrc(mc, mp_FixINT, 0x05, 0x1f, 0x06, ioapicid + 1, 0x06); +#endif /* CONFIG_E2K_LEGACY_SIC */ +} + +void smp_write_ioepic_intsrc_info(struct mpc_table *mc, unsigned int node, + unsigned int bus, unsigned int ioepicid) +{ + if (node == 0) + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, + MP_BUS_ISA_NUM, 0, ioepicid, 0); /* IPMB */ + else + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, + bus, PCI_DEVFN(2, 1), ioepicid, 0); /* IPMB */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, + PCI_DEVFN(9, 0), ioepicid, 1); /* SCI */ + + if (node == 0) + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_EDGE_HIGH, + MP_BUS_ISA_NUM, 0, ioepicid, 2); /* System Timer */ + else + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_EDGE_HIGH, + bus, PCI_DEVFN(2, 1), ioepicid, 2); /* System Timer */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 0), + ioepicid, 3); /* Ethernet0_tx0 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 0), + ioepicid, 4); /* Ethernet0_tx1 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 0), + ioepicid, 5); /* Ethernet0_rx0 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 0), + ioepicid, 6); /* Ethernet0_rx1 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 0), + ioepicid, 7); /* Ethernet0_sys */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 3), + ioepicid, 8); /* HDA (eioh) */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 0), + ioepicid, 9); /* Mpv_timers0 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 0), + ioepicid, 10); /* Mpv_timers1 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 0), + ioepicid, 11); /* Mpv_timers2 */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 0), + ioepicid, 12); /* GPIO0 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 0), + ioepicid, 13); /* GPIO1 */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 2), + ioepicid, 14); /* Serial port */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 1), + ioepicid, 15); /* I2c/spi */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 1), + ioepicid, 16); /* PCI IRQ A */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 1), + ioepicid, 17); /* PCI IRQ B */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 1), + ioepicid, 18); /* PCI IRQ C */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 1), + ioepicid, 19); /* PCI IRQ D */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(2, 1), + ioepicid, 20); /* WD Timer */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(3, 0), + ioepicid, 21); /* SATA-3 */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, MP_BUS_ISA_NUM, 22, + ioepicid, 22); /* SERR */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 1), + ioepicid, 23); /* Ethernet1_tx0 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 1), + ioepicid, 24); /* Ethernet2_tx1 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 1), + ioepicid, 25); /* Ethernet1_rx0 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 1), + ioepicid, 26); /* Ethernet1_rx1 */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(1, 1), + ioepicid, 27); /* Ethernet1_sys */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, bus, PCI_DEVFN(0, 0), + ioepicid, 28); /* USB */ + + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_EDGE_HIGH, bus, + PCI_DEVFN(10, 0), ioepicid, 29); /* WLCC */ +#ifdef CONFIG_E2C3 + /* Embedded devices */ + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, 0, + PCI_DEVFN(26, 0), ioepicid, 32); + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, 0, + PCI_DEVFN(26, 1), ioepicid, 33); + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, 0, + PCI_DEVFN(26, 2), ioepicid, 34); + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_LEVEL_HIGH, 0, + PCI_DEVFN(27, 0), ioepicid, 35); + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_EDGE_HIGH, 0, + PCI_DEVFN(28, 0), ioepicid, 36); + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_EDGE_HIGH, 0, + PCI_DEVFN(29, 0), ioepicid, 37); + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_EDGE_HIGH, 0, + PCI_DEVFN(30, 0), ioepicid, 38); + smp_write_intsrc(mc, mp_FixINT, MP_IRQ_EDGE_HIGH, 0, + PCI_DEVFN(31, 0), ioepicid, 39); +#endif + + +} + +void *smp_write_config_table(struct intel_mp_floating *mpf, + unsigned int phys_cpu_num) +{ + int iopic_id = 0; + static const char sig[4] = "PCMP"; + static const char oem[8] = "LNXI "; + static const char productid[12] = "440GX "; + struct mpc_table *mc; + struct bios_pci_dev *dev; + unsigned int domain, bus; + + mc = (void *)(((char *)mpf) + SMP_FLOATING_TABLE_LEN); + memset(mc, 0, sizeof(*mc)); + + memcpy(mc->mpc_signature, sig, sizeof(sig)); + mc->mpc_length = sizeof(*mc); /* initially just the header */ + mc->mpc_spec = 0x04; + mc->mpc_checksum = 0; /* not yet computed */ + memcpy(mc->mpc_oem, oem, sizeof(oem)); + memcpy(mc->mpc_productid, productid, sizeof(productid)); + mc->mpc_oemptr = 0; + mc->mpc_oemsize = 0; + mc->mpc_oemcount = 0; /* No entries yet... */ + mc->mpc_lapic = LAPIC_ADDR; + mc->mpe_length = 0; + mc->mpe_checksum = 0; + mc->reserved = 0; + + smp_write_processors(mc, phys_cpu_num); + iopic_id = NR_CPUS /*phys_cpu_num*/; + + smp_write_bus(mc, 0, "PCI "); + smp_write_bus(mc, 1, "PCI "); + smp_write_bus(mc, 2, "PCI "); + +#ifdef CONFIG_E2K_SIC + smp_write_bus(mc, 3, "PCI "); +#endif /* ! CONFIG_E2K_SIC */ + smp_write_bus(mc, 0x1f, "ISA "); + +#ifdef CONFIG_E2K_SIC +#ifdef CONFIG_EIOH + rom_printk("MP: Scanning PCI bus for eiohub i2c/spi+ioepic\n"); + dev = NULL; + do { + dev = bios_pci_find_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI_EPIC, dev); + if (dev) { + domain = bios_pci_domain_nr(dev->bus); + bus = dev->bus->number; + rom_printk("found on domain %d bus %d " + "device %d\n", + domain, bus, PCI_SLOT(dev->devfn)); + smp_write_ioepic_i2c_spi_info(mc, dev, iopic_id); + smp_write_ioepic_intsrc_info(mc, domain, bus, iopic_id); + iopic_id++; + } + } while (dev); +#endif + + rom_printk("MP: Scanning PCI bus for iohub1 i2c/spi+ioapic\n"); + dev = NULL; + do { + dev = bios_pci_find_device(INTEL_MULTIFUNC_VENDOR, + INTEL_MULTIFUNC_DEVICE, dev); + if (dev) { + domain = bios_pci_domain_nr(dev->bus); + bus = dev->bus->number; + rom_printk("IOHub found on domain %d bus %d " + "device %d\n", + domain, bus, PCI_SLOT(dev->devfn)); + smp_write_ioapic_i2c_spi_info(mc, dev, iopic_id); + smp_write_ioapic_intsrc_info(mc, domain, bus, iopic_id); + iopic_id++; + } + } while (dev); + + rom_printk("MP: Scanning PCI bus for iohub2 i2c/spi+ioapic\n"); + dev = NULL; + do { + dev = bios_pci_find_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI, dev); + if (dev) { + domain = bios_pci_domain_nr(dev->bus); + bus = dev->bus->number; + rom_printk("IOHub-2 found on domain %d bus %d " + "device %d\n", + domain, bus, PCI_SLOT(dev->devfn)); + smp_write_ioapic_i2c_spi_info(mc, dev, iopic_id); + smp_write_ioapic_intsrc_info(mc, domain, bus, iopic_id); + iopic_id++; + } + } while (dev); +#endif + +#ifndef CONFIG_EIOH + /* Standard local interrupt assignments */ + smp_write_lintsrc(mc, mp_ExtINT, 0x05, 0x03, 0x00, MP_APIC_ALL, 0x00); /* local connection pic->ioapic */ + smp_write_lintsrc(mc, mp_NMI, 0x05, 0x00, 0x00, MP_APIC_ALL, 0x01); /* local connection lapic->ioapic */ +#endif + + mc->mpe_checksum = smp_compute_checksum(smp_next_mpc_entry(mc), mc->mpe_length); + mc->mpc_checksum = smp_compute_checksum(mc, mc->mpc_length); + printk_debug("Wrote the mp table end at: %px - %px\n", + mc, smp_next_mpe_entry(mc)); + return smp_next_mpe_entry(mc); +} + +unsigned int write_smp_table(struct intel_mp_floating *mpf, unsigned int phys_cpu_num) +{ + rom_printk("write_smp_table() will create floating table\n"); + smp_write_floating_table(mpf); + return (unsigned long)smp_write_config_table(mpf, phys_cpu_num); +} + + diff --git a/arch/e2k/boot/bios/newpci.c b/arch/e2k/boot/bios/newpci.c new file mode 100644 index 000000000000..7db8406376b8 --- /dev/null +++ b/arch/e2k/boot/bios/newpci.c @@ -0,0 +1,1707 @@ +/* + * Low-Level PCI Support for PC + * + * (c) 1999--2000 Martin Mares + */ +/* lots of mods by ron minnich (rminnich@lanl.gov), with + * the final architecture guidance from Tom Merritt (tjm@codegen.com) + * In particular, we changed from the one-pass original version to + * Tom's recommended multiple-pass version. I wasn't sure about doing + * it with multiple passes, until I actually started doing it and saw + * the wisdom of Tom's recommendations ... + */ +#include +#include "pci.h" +#include + +#ifdef CONFIG_E2K_SIC +#include "../e2k_sic.h" +#endif /* CONFIG_E2K_SIC */ + +#include "../topology.h" +#include "../boot_io.h" + +#undef __KERNEL__ + +/**************************** DEBUG DEFINES *****************************/ +#undef DEBUG_BOOT_MODE +#undef Dprintk +#undef DEBUG_BOOT_AIO_MODE +#undef DaIOprintk +#undef DEBUG_BOOT_AR_MODE +#undef DaRprintk +#define DEBUG_BOOT_MODE 1 /* SMP CPU boot */ +#define Dprintk if (DEBUG_BOOT_MODE) rom_printk +#define DEBUG_BOOT_AIO_MODE 1 /* SMP CPU boot */ +#define DaIOprintk if (DEBUG_BOOT_AIO_MODE) rom_printk +#define DEBUG_BOOT_AR_MODE 1 /* SMP CPU boot */ +#define DaRprintk if (DEBUG_BOOT_AR_MODE) rom_printk +/************************************************************************/ + +#define ONEMEG (1 << 20) + +/* IDE iterrupt number */ +#define NATIVE_MODE_IDE_IRQ 11 /* IRQ # for native mode */ +#define LEGACY_MODE_IDE_IRQ 14 /* IRQ # for legacy mode */ + +#define IOHUB_AMR_PRIMARY_NATIVE 0x1 /* IDE primary channel at */ + /* native mode */ +#define IOHUB_AMR_SECONDARY_NATIVE 0x4 /* IDE secondary channel at */ + /* native mode */ + +extern volatile unsigned long phys_node_pres_map; +extern int phys_node_num; +extern volatile unsigned long online_iohubs_map; +extern int online_iohubs_num; +extern volatile unsigned long possible_iohubs_map; +extern int possible_iohubs_num; + +// historical functions, sometimes very useful. +/* + * Write the special configuration registers on the INTEL + */ +void intel_conf_writeb(unsigned long port, unsigned char value) +{ + unsigned char whichbyte = port & 3; + port &= (~3); + bios_outl(port, PCI_CONF_REG_INDEX); + bios_outb(value, PCI_CONF_REG_DATA + whichbyte); +} + +/* + * Read the special configuration registers on the INTEL + */ +unsigned char intel_conf_readb(unsigned long port) +{ + unsigned char whichbyte = port & 3; + port &= (~3); + bios_outl(port, PCI_CONF_REG_INDEX); + return bios_inb(PCI_CONF_REG_DATA + whichbyte); +} + + +struct bios_pci_ops { + int (*read_byte) (int domain, u8 bus, int devfn, int where, u8 * val); + int (*read_word) (int domain, u8 bus, int devfn, int where, u16 * val); + int (*read_dword) (int domain, u8 bus, int devfn, int where, u32 * val); + int (*write_byte) (int domain, u8 bus, int devfn, int where, u8 val); + int (*write_word) (int domain, u8 bus, int devfn, int where, u16 val); + int (*write_dword) (int domain, u8 bus, int devfn, int where, u32 val); +}; + +static const struct bios_pci_ops *conf; + + +/* + * Direct access to PCI hardware... + */ + +/* + * Functions for accessing PCI configuration space with type 1 accesses + */ + +#ifdef CONFIG_E2K_SIC +#define CONFIG_CMD(bus,devfn,where) ((bus&0xFF)<<20)|((devfn&0xFF)<<12)|(where&0xFFF) +#define BUS_DEV_FUNC(bus,devfn) ((bus&0xFF)<<20)|((devfn&0xFF)<<12) +#define SLOT_DEV_FN(devfn) ((devfn) >> 3) +#define FUNC_DEV_FN(devfn) ((devfn) & 0x7) +#ifdef CONFIG_L_IOH2 +#define B2_2_0 BUS_DEV_FUNC(1, ((2<<3)|0)) +#define B2_2_1 BUS_DEV_FUNC(1, ((2<<3)|1)) +#define B2_2_2 BUS_DEV_FUNC(1, ((2<<3)|2)) +#define B2_3_0 BUS_DEV_FUNC(1, ((3<<3)|0)) +#define B2_2_3 BUS_DEV_FUNC(1, ((2<<3)|3)) +#define B2_0_0 BUS_DEV_FUNC(1, ((0<<3)|0)) +#define B2_1_0 BUS_DEV_FUNC(1, ((1<<3)|0)) +#define B1_1_0 BUS_DEV_FUNC(0, ((1<<3)|0)) +#define B1_2_0 BUS_DEV_FUNC(0, ((2<<3)|0)) +#else /* IOHUB-1 */ +#define B2_2_0 BUS_DEV_FUNC(2, ((2<<3)|0)) +#define B2_2_1 BUS_DEV_FUNC(2, ((2<<3)|1)) +#define B2_2_2 BUS_DEV_FUNC(2, ((2<<3)|2)) +#define B2_2_3 BUS_DEV_FUNC(2, ((2<<3)|3)) +#define B2_3_0 BUS_DEV_FUNC(2, ((3<<3)|0)) +#define B2_0_0 BUS_DEV_FUNC(2, ((0<<3)|0)) +#define B2_1_0 BUS_DEV_FUNC(2, ((1<<3)|0)) +#define B1_1_0 BUS_DEV_FUNC(1, ((1<<3)|0)) +#define B1_2_0 BUS_DEV_FUNC(1, ((2<<3)|0)) +#endif /* CONFIG_L_IOH2 */ +#ifdef CONFIG_EIOH +#define EIOH_B1_0_0 BUS_DEV_FUNC(1, ((0<<3)|0)) +#define EIOH_B1_2_1 BUS_DEV_FUNC(1, ((2<<3)|1)) +#define EIOH_B1_2_2 BUS_DEV_FUNC(1, ((2<<3)|2)) +#define EIOH_B1_3_0 BUS_DEV_FUNC(1, ((3<<3)|0)) +#endif /* CONFIG_EIOH */ +#else +#undef CONFIG_CMD +#define CONFIG_CMD(bus,devfn,where) (0x80000000 | (bus << 16) | (devfn << 8) | (where & ~3)) +#endif + +static int pci_conf1_read_config_byte(int domain, unsigned char bus, int devfn, + int where, u8 * value) +{ +#ifdef CONFIG_E2K_SIC + printk_spew("pci_conf1_read_config_byte start\n"); + *value = bios_conf_inb(domain, bus, CONFIG_CMD(bus, devfn, where)); +#else + bios_outl(CONFIG_CMD(bus, devfn, where), 0xCF8); + *value = bios_inb(0xCFC + (where & 3)); +#endif + return 0; +} + +static int pci_conf1_read_config_word(int domain, unsigned char bus, int devfn, + int where, u16 * value) +{ +#ifdef CONFIG_E2K_SIC + printk_spew("pci_conf1_read_config_word start\n"); + *value = bios_conf_inw(domain, bus, CONFIG_CMD(bus, devfn, where)); +#else + bios_outl(CONFIG_CMD(bus, devfn, where), 0xCF8); + *value = bios_inw(0xCFC + (where & 2)); +#endif + return 0; +} + +static int pci_conf1_read_config_dword(int domain, unsigned char bus, int devfn, + int where, u32 * value) +{ +#ifdef CONFIG_E2K_SIC + printk_spew("pci_conf1_read_config_dword start\n"); + *value = bios_conf_inl(domain, bus, CONFIG_CMD(bus, devfn, where)); +#else + bios_outl(CONFIG_CMD(bus, devfn, where), 0xCF8); + *value = bios_inl(0xCFC); +#endif + return 0; +} + +#ifdef CONFIG_E2K_SIC +#ifndef CONFIG_L_IOH2 +int system_commutator_es2_ioh_write_byte(int domain, unsigned char bus, + int where, u8 value) +{ + int link = iohub_domain_to_link(domain); + /* You must programming SCRB table registers only for bus 2 link 0 */ + /* or bus 1 link 1 on es2 (cubic) */ + if ((bus == 2 && link == 0) || (bus == 1 && link == 1)) { + bios_ioh_e3s_outb(domain, bus, value, where); + } + return 0; +} + +int system_commutator_es2_ioh_read_byte(int domain, unsigned char bus, + int where, u8 *value) +{ + int link = iohub_domain_to_link(domain); + /* You must programming SCRB table registers only for bus 2 link 0 */ + /* or bus 1 link 1 on es2 (cubic) */ + if ((bus == 2 && link == 0) || (bus == 1 && link == 1)) { + *value = bios_ioh_e3s_inb(domain, bus, where); + } + return 0; +} + +int system_commutator_es2_ioh_write_word(int domain, unsigned char bus, + int where, u16 value) +{ + int link = iohub_domain_to_link(domain); + /* You must programming SCRB table registers only for bus 2 link 0 */ + /* or bus 1 link 1 on es2 (cubic) */ + if ((bus == 2 && link == 0) || (bus == 1 && link == 1)) { + bios_ioh_e3s_outw(domain, bus, value, where); + } + return 0; +} + +int system_commutator_es2_ioh_read_word(int domain, unsigned char bus, + int where, u16 *value) +{ + int link = iohub_domain_to_link(domain); + /* You must programming SCRB table registers only for bus 2 link 0 */ + /* or bus 1 link 1 on es2 (cubic) */ + if ((bus == 2 && link == 0) || (bus == 1 && link == 1)) { + *value = bios_ioh_e3s_inw(domain, bus, where); + } + return 0; +} + +int system_commutator_es2_ioh_write_dword(int domain, unsigned char bus, + int where, u32 value) +{ + int link = iohub_domain_to_link(domain); + /* You must programming SCRB table registers only for bus 2 link 0 */ + /* or bus 1 link 1 on es2 (cubic) */ + if ((bus == 2 && link == 0) || (bus == 1 && link == 1)) { + bios_ioh_e3s_outl(domain, bus, value, where); + } + return 0; +} +int system_commutator_es2_ioh_read_dword(int domain, unsigned char bus, + int where, u32 *value) +{ + int link = iohub_domain_to_link(domain); + /* You must programming SCRB table registers only for bus 2 link 0 */ + /* or bus 1 link 1 on es2 (cubic) */ + if ((bus == 2 && link == 0) || (bus == 1 && link == 1)) { + *value = bios_ioh_e3s_inl(domain, bus, where); + } + return 0; +} +#endif /* ! CONFIG_L_IOH2 */ +#endif + +static int pci_conf1_write_config_byte(int domain, unsigned char bus, int devfn, + int where, u8 value) +{ +#ifdef CONFIG_E2K_SIC + printk_spew("pci_conf1_write_config_byte start\n"); + bios_conf_outb(domain, bus, value, CONFIG_CMD(bus, devfn, where)); +#else + bios_outl(CONFIG_CMD(bus, devfn, where), 0xCF8); + bios_outb(value, 0xCFC + (where & 3)); +#endif + return 0; +} + +static int pci_conf1_write_config_word(int domain, unsigned char bus, int devfn, + int where, u16 value) +{ +#ifdef CONFIG_E2K_SIC + printk_spew("pci_conf1_write_config_word start\n"); + bios_conf_outw(domain, bus, value, CONFIG_CMD(bus, devfn, where)); +#else + bios_outl(CONFIG_CMD(bus, devfn, where), 0xCF8); + bios_outw(value, 0xCFC + (where & 2)); +#endif + return 0; +} + +static int pci_conf1_write_config_dword(int domain, unsigned char bus, + int devfn, int where, u32 value) +{ +#ifdef CONFIG_E2K_SIC + printk_spew("pci_conf1_write_config_dword start\n"); + bios_conf_outl(domain, bus, value, CONFIG_CMD(bus, devfn, where)); +#else + bios_outl(CONFIG_CMD(bus, devfn, where), 0xCF8); + bios_outl(value, 0xCFC); +#endif + return 0; +} + +#undef CONFIG_CMD + +static const struct bios_pci_ops pci_direct_conf1 = +{ + pci_conf1_read_config_byte, + pci_conf1_read_config_word, + pci_conf1_read_config_dword, + pci_conf1_write_config_byte, + pci_conf1_write_config_word, + pci_conf1_write_config_dword +}; + +/* + * Functions for accessing PCI configuration space with type 2 accesses + */ + +#define IOADDR(devfn, where) ((0xC000 | ((devfn & 0x78) << 5)) + where) +#define FUNC(devfn) (((devfn & 7) << 1) | 0xf0) +#define SET(bus, devfn) \ +({ \ + if (devfn & 0x80) \ + return -1; \ + bios_outb(FUNC(devfn), 0xCF8); \ + bios_outb(bus, 0xCFA); \ +}) + +static int pci_conf2_read_config_byte(int domain, unsigned char bus, int devfn, + int where, u8 * value) +{ + SET(bus, devfn); + *value = bios_inb(IOADDR(devfn, where)); + bios_outb(0, 0xCF8); + return 0; +} + +static int pci_conf2_read_config_word(int domain, unsigned char bus, int devfn, + int where, u16 * value) +{ + SET(bus, devfn); + *value = bios_inw(IOADDR(devfn, where)); + bios_outb(0, 0xCF8); + return 0; +} + +static int pci_conf2_read_config_dword(int domain, unsigned char bus, int devfn, + int where, u32 * value) +{ + SET(bus, devfn); + *value = bios_inl(IOADDR(devfn, where)); + bios_outb(0, 0xCF8); + return 0; +} + +static int pci_conf2_write_config_byte(int domain, unsigned char bus, int devfn, + int where, u8 value) +{ + SET(bus, devfn); + bios_outb(value, IOADDR(devfn, where)); + bios_outb(0, 0xCF8); + return 0; +} + +static int pci_conf2_write_config_word(int domain, unsigned char bus, int devfn, + int where, u16 value) +{ + SET(bus, devfn); + bios_outw(value, IOADDR(devfn, where)); + bios_outb(0, 0xCF8); + return 0; +} + +static int pci_conf2_write_config_dword(int domain, unsigned char bus, + int devfn, int where, u32 value) +{ + SET(bus, devfn); + bios_outl(value, IOADDR(devfn, where)); + bios_outb(0, 0xCF8); + return 0; +} + +#undef SET +#undef IOADDR +#undef FUNC + +static const struct bios_pci_ops pci_direct_conf2 = +{ + pci_conf2_read_config_byte, + pci_conf2_read_config_word, + pci_conf2_read_config_dword, + pci_conf2_write_config_byte, + pci_conf2_write_config_word, + pci_conf2_write_config_dword +}; + +int bios_pci_read_config_byte(struct bios_pci_dev *dev, u8 where, u8 *val) +{ + int res; + int domain = bios_pci_domain_nr(dev->bus); + + res = conf->read_byte(domain, dev->bus->number, dev->devfn, where, val); + printk_spew("Read config byte bus %d,devfn 0x%x,reg 0x%x,val 0x%x,res 0x%x\n", + dev->bus->number, dev->devfn, where, *val, res); + return res; + + +} + +int bios_pci_read_config_word(struct bios_pci_dev *dev, u8 where, u16 *val) +{ + int res; + int domain = bios_pci_domain_nr(dev->bus); + + res = conf->read_word(domain, dev->bus->number, dev->devfn, where, val); + printk_spew( "Read config word bus %d,devfn 0x%x,reg 0x%x,val 0x%x,res 0x%x\n", + dev->bus->number, dev->devfn, where, *val, res); + return res; +} + +int bios_pci_read_config_dword(struct bios_pci_dev *dev, u8 where, u32 *val) +{ + int res; + int domain = bios_pci_domain_nr(dev->bus); + + res = conf->read_dword(domain, dev->bus->number, dev->devfn, where, val); + printk_spew( "Read config dword bus %d,devfn 0x%x,reg 0x%x,val 0x%x,res 0x%x\n", + dev->bus->number, dev->devfn, where, *val, res); + return res; +} + +int bios_pci_write_config_byte(struct bios_pci_dev *dev, u8 where, u8 val) +{ + int domain = bios_pci_domain_nr(dev->bus); + + printk_spew( "Write config byte bus %d, devfn 0x%x, reg 0x%x, val 0x%x\n", + dev->bus->number, dev->devfn, where, val); + return conf->write_byte(domain, dev->bus->number, dev->devfn, where, val); +} + +int bios_pci_write_config_word(struct bios_pci_dev *dev, u8 where, u16 val) +{ + int domain = bios_pci_domain_nr(dev->bus); + + printk_spew( "Write config word bus %d, devfn 0x%x, reg 0x%x, val 0x%x\n", + dev->bus->number, dev->devfn, where, val); + return conf->write_word(domain, dev->bus->number, dev->devfn, where, val); +} + +int bios_pci_write_config_dword(struct bios_pci_dev *dev, u8 where, u32 val) +{ + int domain = bios_pci_domain_nr(dev->bus); + + printk_spew( "Write config dword bus %d, devfn 0x%x, reg 0x%x, val 0x%x\n", + dev->bus->number, dev->devfn, where, val); + return conf->write_dword(domain, dev->bus->number, dev->devfn, where, val); +} + +int pcibios_read_config_byte(int domain, unsigned char bus, unsigned char devfn, + u8 where, u8 *val) +{ + int res; + + res = conf->read_byte(domain, bus, devfn, where, val); + printk_spew( "Read config byte bus %d,devfn 0x%x,reg 0x%x,val 0x%x,res 0x%x\n", + bus, devfn, where, *val, res); + return res; +} + +int pcibios_read_config_word(int domain, unsigned char bus, unsigned char devfn, + u8 where, u16 *val) +{ + int res; + + res = conf->read_word(domain, bus, devfn, where, val); + printk_spew( "Read config word bus %d,devfn 0x%x,reg 0x%x,val 0x%x, " + "res 0x%x\n", + bus, devfn, where, *val, res); + + return res; + +} + +int pcibios_read_config_dword(int domain, unsigned char bus, + unsigned char devfn, u8 where, u32 *val) +{ + int res; + + res = conf->read_dword(domain, bus, devfn, where, val); + printk_spew( "Read config dword bus %d,devfn 0x%x,reg 0x%x,val 0x%x, " + "res 0x%x\n", + bus, devfn, where, *val, res); + return res; + +} + +int pcibios_write_config_byte(int domain, unsigned char bus, + unsigned char devfn, u8 where, u8 val) +{ + printk_spew( "Write byte bus %d, devfn 0x%x, reg 0x%x, val 0x%x\n", + bus, devfn, where, val); + return conf->write_byte(domain, bus, devfn, where, val); +} + +int pcibios_write_config_word(int domain, unsigned char bus, + unsigned char devfn, u8 where, u16 val) +{ + printk_spew( "Write word bus %d, devfn 0x%x, reg 0x%x, val 0x%x\n", + bus, devfn, where, val); + return conf->write_word(domain, bus, devfn, where, val); +} + +int pcibios_write_config_dword(int domain, unsigned char bus, + unsigned char devfn, u8 where, u32 val) +{ + printk_spew( "Write doubleword bus %d, devfn 0x%x, reg 0x%x, val 0x%x\n", + bus, devfn, where, val); + return conf->write_dword(domain, bus, devfn, where, val); +} + +/** round a number to an alignment. + * @param val the starting value + * @param roundup Alignment as a power of two + * @returns rounded up number + */ +unsigned long round(unsigned long val, unsigned long roundup) +{ + // ROUNDUP MUST BE A POWER OF TWO. + unsigned long inverse; + inverse = ~(roundup - 1); + val += (roundup - 1); + val &= inverse; + return val; +} + +/** Set the method to be used for PCI, type I or type II + */ +void pci_set_method() +{ + conf = &pci_direct_conf1; +} + +/* allocating resources on PCI is a mess. The reason is that + * the BAR size is actually two things: one is the size, and + * the other is the alignment of the data. Take, for example, the + * SiS agp hardware. BAR 0 reports a size as follows: 0xf8000008. + * This means prefetchable, and you can compute the size of + * 0x8000000 (128 Mbytes). But it also turns you that only the + * top five bits of the address are decoded. So you can not, for + * example, allocate address space at 0x400000 for 0x8000000 bytes, + * because in the register that will turn into 0. You have + * to allocate address space using only the top five bits of the + * PCI address space, i.e. you have to start allocating at 0x8000000. + * + * we have a more complex algorithm for address space allocation in the + * works, that is actually simple code but gets the desired behavior. + * For now, though, we operate as follows: + * as you encounter BAR values, just round up the current usage + * to be aligned to the BAR size. Then allocate. + * This has the advantage of being simple, and in practice there are + * so few large BAR areas that we expect it to cover all cases. + * If we find problems with this strategy we'll go to the more complex + * algorithm. + */ +/* it's worse than I thought ... + * rules: + * bridges contain all sub-bridges, and the address space for mem and + * prefetch has to be contiguous. + * Anyway, this has gotten so complicated we're going to a one-pass + * allocate for now. + */ + + +/** Given a desired amount of io, round it to IO_BRIDGE_ALIGN + * @param amount Amount of memory desired. + */ +unsigned long iolimit(unsigned long amount) +{ + /* Workaround - if amount is 0 do not return -1. + * Otherwise iobase calculation in compute_allocate_io() + * will round it up to some bogus value taking up all + * IO space. */ + if (amount) + amount = round(amount, IO_BRIDGE_ALIGN) - 1; + else + amount = IO_BRIDGE_ALIGN - 1; + return amount; +} + +/** Given a desired amount of memory, round it to ONEMEG + * @param amount Amount of memory desired. + */ +unsigned long memlimit(unsigned long amount) +{ + amount = round(amount, ONEMEG) - 1; + return amount; +} + +/** Compute and allocate the io for this bus. + * @param bus Pointer to the struct for this bus. + */ +void compute_allocate_io(struct bios_pci_bus *bus) +{ + int i; + struct bios_pci_bus *curbus; + struct bios_pci_dev *curdev; + unsigned long io_base; + int domain = bios_pci_domain_nr(bus); + + io_base = bus->iobase; + DaIOprintk("%s(): bus #%d: base 0x%x\n", + __func__, bus->number, bus->iobase); + + /* First, walk all the bridges. When you return, grow the limit of the current bus + since sub-busses need IO rounded to 4096 */ + for (curbus = bus->children; curbus; curbus = curbus->next) { + curbus->iobase = io_base; + compute_allocate_io(curbus); + io_base = round(curbus->iolimit, IO_BRIDGE_ALIGN); + DaIOprintk("BUSIO: done PCI #%d Bridge Bus 0x%x, iobase now 0x%x\n", + domain, curbus->number, io_base); + } + + /* Walk through all the devices on current bus and compute IO address space.*/ + for (curdev = bus->devices; curdev; curdev = curdev->sibling) { + u32 class_revision; + /* FIXME Special case for VGA for now just note + * we have an I/O resource later make certain + * we don't have a device conflict. + */ + bios_pci_read_config_dword(curdev, PCI_CLASS_REVISION, + &class_revision); + DaIOprintk("Vendor %02x, Device %02x, Class %08x\n", + curdev->vendor, curdev->device, class_revision); + if (((class_revision >> 24) == 0x03)) { + if ((class_revision >> 16) != 0x380) { + DaIOprintk("Running VGA fix...\n"); + /* All legacy VGA cards have I/O space */ + /* registers */ + curdev->command |= PCI_COMMAND_IO; + } else { + unsigned long iosize; + + DaIOprintk("Running VGA: reserve IO ports\n"); + iosize = round(0x1ff, IO_ALIGN); + /* io_base must be aligned to the io size */ + io_base = round(io_base, iosize); + DaIOprintk(" rounded size 0x%x base 0x%x\n", + iosize, io_base); + DaIOprintk("-->set base to 0x%x\n", io_base); + io_base += iosize; + if (io_base > PCI_IO_DOMAIN_END(domain)) { + rom_printk("ERROR: PCI #%d IO memory " + "limit %X is exceeded %X\n", + domain, + PCI_IO_DOMAIN_END(domain), + io_base); + } + } + } + for (i = 0; i < 6; i++) { + unsigned long size = curdev->size[i]; + if (size & PCI_BASE_ADDRESS_SPACE_IO) { + unsigned long iosize = size & PCI_BASE_ADDRESS_IO_MASK; + if (!iosize) + continue; + + DaIOprintk("DEVIO: PCI #%d Bus 0x%x, devfn 0x%x, reg 0x%x: " + "iosize 0x%x\n", + domain, curdev->bus->number, curdev->devfn, i, iosize); + // Make sure that iosize is a minimum + // size. + iosize = round(iosize, IO_ALIGN); + // io_base must be aligned to the io size. + io_base = round(io_base, iosize); + DaIOprintk(" rounded size 0x%x base 0x%x\n", iosize, io_base); + curdev->base_address[i] = io_base; + // some chipsets allow us to set/clear the IO bit. + // (e.g. VIA 82c686a.) So set it to be safe) + curdev->base_address[i] |= + PCI_BASE_ADDRESS_SPACE_IO; + DaIOprintk("-->set base to 0x%x\n", io_base); + io_base += iosize; + if (io_base > PCI_IO_DOMAIN_END(domain)) { + rom_printk("ERROR: PCI #%d IO memory limit %X " + "is exceeded %X\n", + domain, + PCI_IO_DOMAIN_END(domain), + io_base); + break; + } + curdev->command |= PCI_COMMAND_IO; + } + } + if ((class_revision >> 16) == PCI_CLASS_STORAGE_IDE) { + u8 progif; + /* Set IDE to native mode */ + bios_pci_read_config_byte(curdev, PCI_CLASS_PROG, + &progif); + progif |= 0x5; + bios_pci_write_config_byte(curdev, PCI_CLASS_PROG, + progif); + } + } + bus->iolimit = iolimit(io_base); + + DaIOprintk("BUS %d: set iolimit to 0x%x\n", bus->number, bus->iolimit); +} + +/** Compute and allocate the memory for this bus. + * @param bus Pointer to the struct for this bus. + */ +void compute_allocate_mem(struct bios_pci_bus *bus) +{ + int i; + struct bios_pci_bus *curbus; + struct bios_pci_dev *curdev; + unsigned long mem_base; + int domain = bios_pci_domain_nr(bus); + + mem_base = bus->membase; + Dprintk("compute_allocate_mem: PCI #%d, bus %d base 0x%x\n", + domain, bus->number, bus->membase); + + /* First, walk all the bridges. When you return, grow the limit of the current bus + since sub-busses need MEMORY rounded to 1 Mega */ + for (curbus = bus->children; curbus; curbus = curbus->next) { + curbus->membase = mem_base; + compute_allocate_mem(curbus); + mem_base = round(curbus->memlimit, ONEMEG); + Dprintk("BUSMEM: PCI #%d Bridge Bus 0x%x,membase now 0x%x\n", + domain, curbus->number, mem_base); + } + + /* Walk through all the devices on current bus and oompute MEMORY address space.*/ + for (curdev = bus->devices; curdev; curdev = curdev->sibling) { + Dprintk("compute_allocate_mem() device %d:%d:%d:%d\n", + domain, curdev->bus->number, + PCI_SLOT(curdev->devfn), PCI_FUNC(curdev->devfn)); + for (i = 0; i < 6; i++) { + unsigned long size = curdev->size[i]; + unsigned long memorysize = size & (PCI_BASE_ADDRESS_MEM_MASK); + unsigned long type = size & (~PCI_BASE_ADDRESS_MEM_MASK); + Dprintk("compute_allocate_mem() device resorce #%d " + "size is 0x%x\n", + i, curdev->size[i]); + if (!memorysize) { + continue; + } + + if (type & PCI_BASE_ADDRESS_SPACE_IO) { + continue; + } + + // we don't support the 1M type + if (type & PCI_BASE_ADDRESS_MEM_TYPE_1M) { + continue; + } + + // if it's prefetch type, continue; + if (type & PCI_BASE_ADDRESS_MEM_PREFETCH) { + continue; + } + + // now mask out all but the 32 or 64 bits + type &= PCI_BASE_ADDRESS_MEM_TYPE_MASK; + + // I'm pretty sure this test is not needed, but ... + if ((type == PCI_BASE_ADDRESS_MEM_TYPE_32) || + (type == PCI_BASE_ADDRESS_MEM_TYPE_64)) { + /* this is normal memory space */ + unsigned long regmem; + + Dprintk("DEVMEM: PCI #%d Bus 0x%x, devfn 0x%x, reg 0x%x: " + "memsize 0x%x\n", domain, + curdev->bus->number, curdev->devfn, i, memorysize); + + /* PCI BUS Spec suggests that the memory address should be + consumed in 4KB unit */ + regmem = round(memorysize, MEM_ALIGN); + + mem_base = round(mem_base, regmem); + Dprintk(" rounded size 0x%x base 0x%x\n", regmem, mem_base); + curdev->base_address[i] = mem_base; + curdev->base_address[i] |= + PCI_BASE_ADDRESS_SPACE_MEMORY; + Dprintk("-->set base to 0x%x\n", mem_base); + + mem_base += regmem; + if (mem_base > PCI_MEM_DOMAIN_END(domain)) { + rom_printk("ERROR: PCI #%d MEMory limit %X " + "is exceeded %X\n", + domain, + PCI_MEM_DOMAIN_END(domain), + mem_base); + break; + } + curdev->command |= PCI_COMMAND_MEMORY; + // for 64-bit BARs, the odd ones don't count + if (type == PCI_BASE_ADDRESS_MEM_TYPE_64) + continue; + + } + } + /* Now we take care about ROM BIOS */ + { + unsigned long size = curdev->rom_size; + unsigned long memorysize = size & (PCI_BASE_ADDRESS_MEM_MASK); + unsigned long regmem; + + if (!memorysize) + continue; + Dprintk("DEVROM: Bus 0x%x, devfn 0x%x: " + "memsize 0x%x\n", + curdev->bus->number, curdev->devfn, memorysize); + regmem = round(memorysize, MEM_ALIGN); + mem_base = round(mem_base, regmem); + Dprintk(" rounded size 0x%x base 0x%x\n", regmem, mem_base); + curdev->rom_address = mem_base; + Dprintk("-->set base to 0x%x\n", mem_base); + mem_base += regmem; + if (mem_base > PCI_MEM_DOMAIN_END(domain)) { + rom_printk("ERROR: PCI #%d ROM memory limit %X " + "is exceeded %X\n", + domain, + PCI_MEM_DOMAIN_END(domain), + mem_base); + break; + } + curdev->command |= PCI_COMMAND_MEMORY; + } + } + bus->memlimit = memlimit(mem_base); + + Dprintk("BUS %d: set memlimit to 0x%x\n", bus->number, bus->memlimit); +} + +/** Compute and allocate the prefetch memory for this bus. + * @param bus Pointer to the struct for this bus. + */ +void compute_allocate_prefmem(struct bios_pci_bus *bus) +{ + int i; + struct bios_pci_bus *curbus; + struct bios_pci_dev *curdev; + unsigned long prefmem_base; + int domain = bios_pci_domain_nr(bus); + + prefmem_base = bus->prefmembase; + Dprintk("Compute_allocate_prefmem: base 0x%x\n", bus->prefmembase); + + /* First, walk all the bridges. When you return, grow the limit of the current bus + since sub-busses need MEMORY rounded to 1 Mega */ + for (curbus = bus->children; curbus; curbus = curbus->next) { + curbus->prefmembase = prefmem_base; + compute_allocate_prefmem(curbus); + prefmem_base = round(curbus->prefmemlimit, ONEMEG); + Dprintk("BUSPREFMEM: Bridge Bus 0x%x, prefmem base now 0x%x\n", + curbus->number, prefmem_base); + } + + /* Walk through all the devices on current bus and oompute PREFETCHABLE MEMORY address space.*/ + for (curdev = bus->devices; curdev; curdev = curdev->sibling) { + for (i = 0; i < 6; i++) { + unsigned long size = curdev->size[i]; + unsigned long memorysize = size & (PCI_BASE_ADDRESS_MEM_MASK); + unsigned long type = size & (~PCI_BASE_ADDRESS_MEM_MASK); + + if (!memorysize) + continue; + + if (type & PCI_BASE_ADDRESS_SPACE_IO) { + continue; + } + + // we don't support the 1M type + if (type & PCI_BASE_ADDRESS_MEM_TYPE_1M) { + Dprintk("compute_allocate_prefmem: 1M memory not supported\n"); + continue; + } + + // if it's not a prefetch type, continue; + if (! (type & PCI_BASE_ADDRESS_MEM_PREFETCH)) + continue; + // this should be a function some day ... comon code with + // the non-prefetch allocate + // now mask out all but the 32 or 64 bit type info + type &= PCI_BASE_ADDRESS_MEM_TYPE_MASK; + // if all these names confuse you, they confuse me too! + if ((type == PCI_BASE_ADDRESS_MEM_TYPE_32) || + (type == PCI_BASE_ADDRESS_MEM_TYPE_64)) { + unsigned long regmem; + + /* PCI BUS Spec suggests that the memory address should be + consumed in 4KB unit */ + Dprintk("DEVPREFMEM: Bus 0x%x, devfn 0x%x, reg 0x%x: " + "prefmemsize 0x%x\n", + curdev->bus->number, curdev->devfn, i, memorysize); + regmem = round(memorysize, MEM_ALIGN); + prefmem_base = round(prefmem_base, regmem); + Dprintk(" rounded size 0x%x base 0x%x\n", regmem, prefmem_base); + curdev->base_address[i] = prefmem_base; + Dprintk("-->set base to 0x%x\n", prefmem_base); + prefmem_base += regmem; + if (prefmem_base > PCI_MEM_DOMAIN_END(domain)) { + rom_printk("ERROR: PCI #%d PREF MEMory limit %X " + "is exceeded %X\n", + domain, + PCI_MEM_DOMAIN_END(domain), + prefmem_base); + break; + } + curdev->command |= PCI_COMMAND_MEMORY; + // for 64-bit BARs, the odd ones don't count + if (type == PCI_BASE_ADDRESS_MEM_TYPE_64) + continue; + } + } + } + bus->prefmemlimit = memlimit(prefmem_base); + + Dprintk("BUS %d: set prefmemlimit to 0x%x\n", bus->number, bus->prefmemlimit); +} + +/** Compute and allocate resources. + * This is a one-pass process. We first compute all the IO, then + * memory, then prefetchable memory. + * This is really only called at the top level + * @param bus Pointer to the struct for this bus. + */ +void compute_allocate_resources(struct bios_pci_bus *bus) +{ + Dprintk("COMPUTE_ALLOCATE: do IO\n"); + compute_allocate_io(bus); + + Dprintk("COMPUTE_ALLOCATE: do MEM\n"); + compute_allocate_mem(bus); + + // now put the prefetchable memory at the end of the memory + bus->prefmembase = round(bus->memlimit, ONEMEG); + + Dprintk("COMPUTE_ALLOCATE: do PREFMEM\n"); + compute_allocate_prefmem(bus); +} + +/** Assign the computed resources to the bridges and devices on the bus. + * Recurse to any bridges found on this bus first. Then do the devices + * on this bus. + * @param bus Pointer to the structure for this bus + */ +void assign_resources(struct bios_pci_bus *bus) +{ + struct bios_pci_dev *curdev = pci_devices; + struct bios_pci_bus *curbus; +#ifdef CONFIG_E2K_SIC + u16 b1_iobl_val; + u32 b1_mbl_val, b1_pmbl_val; +#endif + int domain = bios_pci_domain_nr(bus); + + DaRprintk("ASSIGN RESOURCES, bus %d\n", bus->number); + + /* walk trhough all the buses, assign resources for bridges */ + for (curbus = bus->children; curbus; curbus = curbus->next) { + curbus->self->command = 0; + + /* set the IO ranges + WARNING: we don't really do 32-bit addressing for IO yet! */ + if (curbus->iobase || curbus->iolimit) { + curbus->self->command |= PCI_COMMAND_IO; + bios_pci_write_config_byte(curbus->self, PCI_IO_BASE, + curbus->iobase >> 8); + bios_pci_write_config_byte(curbus->self, PCI_IO_LIMIT, + curbus->iolimit >> 8); + DaRprintk("assign_resources: for BRIDGE on bus 0x%x IO " + "base 0x%x limit 0x%x\n", + curbus->self->bus->number, curbus->iobase, + curbus->iolimit); + +#ifdef CONFIG_E2K_SIC + if (curbus->self->device != + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE && + curbus->self->device != + PCI_DEVICE_ID_MCST_PCIE_BRIDGE) { + b1_iobl_val = ((curbus->iolimit)&0xff00) | + ((curbus->iobase >> 8) & 0xff); + DaRprintk("assign_resources: bus 0x%x, io val " + "to SCRB = 0x%x\n", + curbus->self->bus->number, b1_iobl_val); + system_commutator_es2_ioh_write_word(domain, + curbus->self->bus->number, B1_IOBL, + b1_iobl_val); + } else { + DaRprintk("assign_resources: PCI_IO_BASE " + "skiping device 0x%x on bus 0x%x\n", + curbus->self->device, bus->number); + } +#endif + DaRprintk("Bus 0x%x Child Bus %x iobase to 0x%x " + "iolimit 0x%x\n", + bus->number, curbus->number, curbus->iobase, + curbus->iolimit); + } + + // set the memory range + if (curbus->membase) { + curbus->self->command |= PCI_COMMAND_MEMORY; + bios_pci_write_config_word(curbus->self, + PCI_MEMORY_BASE, curbus->membase >> 16); + bios_pci_write_config_word(curbus->self, + PCI_MEMORY_LIMIT, curbus->memlimit >> 16); +#ifdef CONFIG_E2K_SIC + if (curbus->self->device != + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE && + curbus->self->device != + PCI_DEVICE_ID_MCST_PCIE_BRIDGE) { + b1_mbl_val = ((curbus->memlimit)&0xffff0000) | + ((curbus->membase >> 16) & + 0xffff); + DaRprintk("assign_resources: will set bus " + "0x%x, mem val to SCRB = 0x%x\n", + curbus->self->bus->number, b1_mbl_val); + system_commutator_es2_ioh_write_dword(domain, + curbus->self->bus->number, B1_MBL, + b1_mbl_val); + system_commutator_es2_ioh_read_dword(domain, + curbus->self->bus->number, B1_MBL, + &b1_mbl_val); + DaRprintk("assign_resources: read bus 0x%x, " + "mem val from SCRB = 0x%x\n", + curbus->self->bus->number, b1_mbl_val); + } else { + DaRprintk("assign_resources: PCI_MEMORY_BASE " + "skiping device 0x%x on bus 0x%x\n", + curbus->self->device, bus->number); + } +#endif + DaRprintk("Bus 0x%x Child Bus %x membase to 0x%x " + "memlimit 0x%x\n", + bus->number, curbus->number, curbus->membase, + curbus->memlimit); + + } + + // set the prefetchable memory range + if (curbus->prefmembase) { + curbus->self->command |= PCI_COMMAND_MEMORY; + bios_pci_write_config_word(curbus->self, + PCI_PREF_MEMORY_BASE, + curbus->prefmembase >> 16); + bios_pci_write_config_word(curbus->self, + PCI_PREF_MEMORY_LIMIT, + curbus->prefmemlimit >> 16); +#ifdef CONFIG_E2K_SIC + if (curbus->self->device != + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE && + curbus->self->device != + PCI_DEVICE_ID_MCST_PCIE_BRIDGE) { + b1_pmbl_val = ((curbus->prefmemlimit) & + 0xffff0000) | + ((curbus->prefmembase >> 16) & + 0xffff); + DaRprintk("assign_resources: bus 0x%x, pmem " + "val to SCRB = 0x%x\n", + curbus->self->bus->number, b1_pmbl_val); + system_commutator_es2_ioh_write_dword(domain, + curbus->self->bus->number, B1_PMBL, + b1_pmbl_val); + } else { + DaRprintk("assign_resources: " + "PCI_PREF_MEMORY_BASE skiping device " + "0x%x on bus 0x%x\n", + curbus->self->device, bus->number); + } +#endif + DaRprintk("Bus 0x%x Child Bus %x prefmembase to 0x%x " + "prefmemlimit 0x%x\n", + bus->number, curbus->number, + curbus->prefmembase, curbus->prefmemlimit); + + } + curbus->self->command |= PCI_COMMAND_MASTER; + assign_resources(curbus); + } + + for (curdev = bus->devices; curdev; curdev = curdev->sibling) { + int i; + for (i = 0; i < 6; i++) { + unsigned long reg; + if (curdev->base_address[i] == 0) + continue; + + reg = PCI_BASE_ADDRESS_0 + (i << 2); + bios_pci_write_config_dword(curdev, reg, + curdev->base_address[i]); +#ifdef CONFIG_E2K_SIC +#ifndef CONFIG_EIOH + switch (BUS_DEV_FUNC(curdev->bus->number, + curdev->devfn)) { + case B2_2_3: /* BUS:2 DEV:2 FUNC:3 = AC97 audio/gpio */ + if (i == 0){ + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A0_BA0, + curdev->base_address[i]); + break; + } + if (i == 1){ + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A0_BA1, + curdev->base_address[i]); + break; + } + DaRprintk("assign_resources: warning: found " + "i = 0x%x for 2_2_3 device\n", i); + break; + case B1_2_0: /* BUS:1 DEV:2 FUNC:0 = + * ioapic/pic/timer/i2c/spi + * on IOLINK 1 + */ + case B2_2_1: /* BUS:2 DEV:2 FUNC:1 = + * ioapic/pic/timer/i2c/spi + * on IOLINK 1 + */ + if (i == 0){ /* i2c/spi */ + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A1_BA0, + curdev->base_address[i]); + break; + } + if (i == 1){ /* i2c/spi */ + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A1_BA1, + curdev->base_address[i]); + break; + } + DaRprintk("assign_resources: warning: found " + "i = 0x%x for I2C/SPI device\n", i); + break; + case B2_2_2: /* BUS:2 DEV:2 FUNC:2 = ieee1284/rs232 */ + if (i == 0){ /* parport */ + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A5_BA0, + curdev->base_address[i]); + break; + } + if (i == 1){ /* rs232 */ + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A6_BA0, + curdev->base_address[i]); + break; + } + DaRprintk("assign_resources: warning: found " + "i = 0x%x for 2_2_2 device\n", i); + break; + case B2_2_0: /* IDE contr */ + if (i == 0) { + u32 bar; + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A7_BA0, + curdev->base_address[i]); + system_commutator_es2_ioh_read_dword( + domain, curdev->bus->number, + A7_BA0, &bar); + DaRprintk("assign_resources: set " + "A7_BA0 to 0x%x for 2_2_0 " + "device\n", + bar); + break; + } + if (i == 1){ + u32 bar; + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A7_BA1, + curdev->base_address[i]); + system_commutator_es2_ioh_read_dword( + domain, curdev->bus->number, + A7_BA1, &bar); + DaRprintk("assign_resources: set " + "A7_BA1 to 0x%x for 2_2_0 " + "device\n", + bar); + break; + } + if (i == 2){ + u32 bar; + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A7_BA2, + curdev->base_address[i]); + system_commutator_es2_ioh_read_dword( + domain, curdev->bus->number, + A7_BA2, + &bar); + DaRprintk("assign_resources: set " + "A7_BA2 to 0x%x for 2_2_0 " + "device\n", + bar); + break; + } + if (i == 3){ + u32 bar; + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A7_BA3, + curdev->base_address[i]); + system_commutator_es2_ioh_read_dword( + domain, curdev->bus->number, + A7_BA3, &bar); + DaRprintk("assign_resources: set " + "A7_BA3 to 0x%x for 2_2_0 " + "device\n", + bar); + break; + } + if (i == 4){ + u32 bar; + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A7_BA4, + curdev->base_address[i]); + system_commutator_es2_ioh_read_dword( + domain, curdev->bus->number, + A7_BA4, &bar); + DaRprintk("assign_resources: set " + "A7_BA4 to 0x%x for 2_2_0 " + "device\n", + bar); + break; + } + DaRprintk("assign_resources: warning: found " + "i = 0x%x for 2_2_0 device\n", i); + break; + case B2_3_0: /* SATA contr */ + if (i == 5) { + u32 bar; + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A8_ABAR, + curdev->base_address[i]); + system_commutator_es2_ioh_read_dword( + domain, curdev->bus->number, + A8_ABAR, &bar); + DaRprintk("assign_resources: set " + "A8_ABAR to 0x%x for 2_3_0 " + "device\n", + bar); + } + break; + case B2_1_0: /* ETHERNET */ + if (i == 0) { + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A4_BA0, + curdev->base_address[i]); + DaRprintk("assign_resources: warning: " + "found i = 0x%x for ETHERNET " + "device\n", i); + } + break; + case B1_1_0: /* ADC */ + if (i == 0) { + system_commutator_es2_ioh_write_dword( + domain, curdev->bus->number, + A4_BA0, + curdev->base_address[i]); + DaRprintk("assign_resources: warning: " + "found i = 0x%x for ADC " + "device\n", i); + } + break; + default: + DaRprintk("assign_resources: bus: 0x%x dev: " + "0x%x func: 0x%x shouldn't be " + "configured for i(reg) = %d.\n", + curdev->bus->number, + (curdev->devfn) >> 3, + (curdev->devfn)&0x7, i); + break; + } +#endif +#endif + DaRprintk("PCI #%d Bus 0x%x slot %d func %d " + "resource #%d base to 0x%x\n", + domain, curdev->bus->number, + PCI_SLOT(curdev->devfn), + PCI_FUNC(curdev->devfn), + i, curdev->base_address[i]); + } + if (curdev->rom_address == 0 || curdev->rom_size == 0) + continue; + + bios_pci_write_config_dword(curdev, PCI_ROM_ADDRESS, + curdev->rom_address | PCI_ROM_ADDRESS_ENABLE); + DaRprintk("Bus 0x%x devfn 0x%x ROM address base to 0x%x\n", + curdev->bus->number, curdev->devfn, curdev->rom_address); + + /* set a default latency timer */ + bios_pci_write_config_byte(curdev, PCI_LATENCY_TIMER, 0x40); + } + DaRprintk("ASSIGN RESOURCES, exit for bus %d\n", bus->number); +} + +void enable_resources(struct bios_pci_bus *bus) +{ + struct bios_pci_dev *curdev = pci_devices; + + /* walk through the chain of all pci device, this time we don't + * have to deal with the device v.s. bridge stuff, since every + * bridge has its own bios_pci_dev assocaited with it + */ + for (curdev = pci_devices; curdev; curdev = curdev->next) { + u16 command; + int domain; + + domain = bios_pci_domain_nr(curdev->bus); + bios_pci_read_config_word(curdev, PCI_COMMAND, &command); +#ifdef CONFIG_E2K_SIC +#ifdef CONFIG_EIOH + if ((BUS_DEV_FUNC(curdev->bus->number, curdev->devfn) == + EIOH_B1_2_1)) { + /* BUS:1 DEV:2 FUNC:1 = i2c/spi */ + curdev->command |= PCI_COMMAND_IO; + curdev->command |= PCI_COMMAND_MASTER; + } +#else + if ((BUS_DEV_FUNC(curdev->bus->number, curdev->devfn) == + B2_2_1) || + (BUS_DEV_FUNC(curdev->bus->number, curdev->devfn) == + B1_2_0)) { + curdev->command |= PCI_COMMAND_IO; + curdev->command |= PCI_COMMAND_MASTER; + } +#endif +#endif + command |= curdev->command; + Dprintk("DEV Set command bus 0x%x devfn 0x%x to 0x%x\n", + curdev->bus->number, curdev->devfn, command); + bios_pci_write_config_word(curdev, PCI_COMMAND, command); +#ifdef CONFIG_E2K_SIC +#ifndef CONFIG_EIOH + switch (BUS_DEV_FUNC(curdev->bus->number,curdev->devfn)){ + case B2_2_3: /* BUS:2 DEV:2 FUNC:3 = AC97 audio/gpio */ + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A0_SE, + PCI_COMMAND_MEMORY); + break; + case B1_2_0: /* BUS:1 DEV:2 FUNC:0 = + * ioapic/pic/timer/i2c/spi IOLINK 1 + */ + Dprintk("enable_resources() enable BUS:1 DEV:2 FUNC:0 " + "= ioapic/pic/timer/i2c/spi contr\n"); + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A1_SE, + PCI_COMMAND_MEMORY | PCI_COMMAND_IO); + break; + case B2_2_1: /* BUS:2 DEV:2 FUNC:1 = + * ioapic/pic/timer/i2c/spi IOLINK 0 + */ + Dprintk("enable_resources() enable BUS:2 DEV:2 FUNC:1 " + "= ioapic/pic/timer/i2c/spi contr\n"); + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A1_SE, + PCI_COMMAND_MEMORY | PCI_COMMAND_IO); + break; + case B2_2_2: /* BUS:2 DEV:2 FUNC:2 = ieee1284/rs232 */ + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A5_SE, + PCI_COMMAND_MEMORY | PCI_COMMAND_IO); + break; + case B2_2_0: /* BUS:2 DEV:2 FUNC:0 = IDE contr */ + bios_pci_write_config_dword(curdev, PCI_CLASS_REVISION, + NATIVE_MODE_CLASSC << 8); + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A7_AMR, + NATIVE_MODE_CLASSC); + Dprintk("enable_resources() set IDE BUS:2 DEV:2 FUNC:0 " + "to native mode\n"); + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A7_SE, + PCI_COMMAND_MEMORY | PCI_COMMAND_IO); + /* set Addressing Mode Register to native mode + * on IOHUB + */ + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A7_AMR, + IOHUB_AMR_PRIMARY_NATIVE | + IOHUB_AMR_SECONDARY_NATIVE); + break; + case B2_3_0: /* BUS:2 DEV:3 FUNC:0 = SATA contr */ + Dprintk("enable_resources() set SATA BUS:2 DEV:3 " + "FUNC:0 A8_SE to 0x%x\n", + PCI_COMMAND_MEMORY); + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A8_SE, + PCI_COMMAND_MEMORY); + break; + case B2_0_0: /* BUS:2 DEV:0 FUNC:0 = REAL PCI_2_PCI BRIDGE */ + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, B1_SE, + PCI_COMMAND_MEMORY | PCI_COMMAND_IO); + /* Allow arbitration to everyone */ + bios_pci_write_config_word(curdev, Arb_CtlSta, 0xf); + break; + case B1_1_0: /* BUS:1 DEV:1 FUNC:0 = ADC */ + case B2_1_0: /* BUS:2 DEV:1 FUNC:0 = ETHERNET */ + system_commutator_es2_ioh_write_byte(domain, + curdev->bus->number, A4_SE, + PCI_COMMAND_MEMORY); + break; + default: + Dprintk("enable_resources: bus: 0x%x dev: 0x%x func: " + "0x%x shouldn't be configured\n", + curdev->bus->number, (curdev->devfn) >> 3, + (curdev->devfn)&0x7); + break; + } +#endif +#endif + } +} +void assign_interrupts(struct bios_pci_bus *bus) +{ +#ifdef CONFIG_E2K_SIC + struct bios_pci_dev *curdev = pci_devices; + int domain = bios_pci_domain_nr(bus); + + /* + * Walk through the all pci device on the bus and + * assifn interrupts if device interrupt pin directly + * connect to IOAPIC pin + */ + for (curdev = pci_devices; curdev; curdev = curdev->next) { + u8 int_line; + int bus = curdev->bus->number; + int slot = SLOT_DEV_FN(curdev->devfn); + int func = FUNC_DEV_FN(curdev->devfn); + if (bios_pci_domain_nr(curdev->bus) != domain) + continue; +#ifdef CONFIG_EIOH + switch (BUS_DEV_FUNC(bus, curdev->devfn)) { + case EIOH_B1_0_0: /* BUS:1 DEV:0 FUNC:0 = USB 3.0 */ + int_line = 0x1c; + Dprintk("%d.%d.%d.%d: USB 3.0 controller", + domain, bus, slot, func); + break; + case EIOH_B1_2_1: /* BUS:1 DEV:2 FUNC:1 = i2c/spi */ + int_line = 0x0f; + Dprintk("%d.%d.%d.%d: i2c/spi controller", + domain, bus, slot, func); + break; + case EIOH_B1_2_2: /* BUS:1 DEV:2 FUNC:2 = ieee1284/rs232 */ + int_line = 0x0e; + Dprintk("%d.%d.%d.%d: ieee1284/rs232 controller", + domain, bus, slot, func); + break; + case EIOH_B1_3_0: /* BUS:1 DEV:3 FUNC:0 = SATA 3.0 */ + int_line = 0x15; + Dprintk("%d.%d.%d.%d: SATA 3.0 controller", + domain, bus, slot, func); + break; + default: + bios_pci_read_config_byte(curdev, PCI_INTERRUPT_LINE, + &int_line); + Dprintk("%d.%d.%d.%d: is not connected to IOEPIC pin", + domain, bus, slot, func); + break; + } +#else + switch (BUS_DEV_FUNC(bus, curdev->devfn)) { + case B2_2_3: /* BUS:2 DEV:2 FUNC:3 = AC97 audio/gpio */ + int_line = 0x05; + Dprintk("%d.%d.%d.%d: AC-97", + domain, bus, slot, func); + break; + case B1_2_0: /* BUS:1 DEV:2 FUNC:0 = i2c/spi IOLINK 1 */ + int_line = 0x17; + Dprintk("%d.%d.%d.%d: i2c/spi controller", + domain, bus, slot, func); + break; + case B2_2_1: /* BUS:2 DEV:2 FUNC:1 = i2c/spi IOLINK 0 */ + int_line = 0x0f; + Dprintk("%d.%d.%d.%d: i2c/spi controller", + domain, bus, slot, func); + break; + case B2_2_2: /* BUS:2 DEV:2 FUNC:2 = ieee1284/rs232 */ + int_line = 0x03; + Dprintk("%d.%d.%d.%d: ieee1284/rs232 controller", + domain, bus, slot, func); + break; + case B2_2_0: /* BUS:2 DEV:2 FUNC:0 = IDE contr */ + int_line = 0x0b; + Dprintk("%d.%d.%d.%d: IDE controller", + domain, bus, slot, func); + break; + case B2_3_0: /* BUS:2 DEV:3 FUNC:0 = SATA contr */ + int_line = 0x15; + Dprintk("%d.%d.%d.%d: SATA controller", + domain, bus, slot, func); + break; + case B2_1_0: /* BUS:2 DEV:1 FUNC:0 = ETHERNET */ + int_line = 0x0a; + Dprintk("%d.%d.%d.%d: Ethernet 1Gb controller", + domain, bus, slot, func); + break; + case B1_1_0: /* BUS:1 DEV:1 FUNC:0 = ADC */ + int_line = 0x0a; + Dprintk("%d.%d.%d.%d: ADC controller", + domain, bus, slot, func); + break; + default: + bios_pci_read_config_byte(curdev, PCI_INTERRUPT_LINE, + &int_line); + Dprintk("%d.%d.%d.%d: does not connect directly " + "to IOAPIC", + domain, bus, slot, func); + break; + } +#endif + bios_pci_write_config_byte(curdev, PCI_INTERRUPT_LINE, + int_line); + Dprintk(": Assign IRQ %d\n", int_line); + } +#endif /* CONFIG_E2K_SIC */ +} + +/** Enumerate the resources on the PCI by calling pci_init + */ +struct bios_pci_bus *pci_enumerate(int domain) +{ + struct bios_pci_bus *bus_root; + printk_info("Scanning PCI domain %d (node %d link %d) bus...", + domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain)); + // scan it. + bus_root = pci_init(domain); + printk_info("done\n"); + return (bus_root); +} + +/** Starting at the root, compute what resources are needed and allocate them. + * We start memory, prefetchable memory at PCI_MEM_START. I/O starts at + * PCI_IO_START. Since the assignment is hierarchical we set the values + * into the pci_root struct. + */ +void pci_configure(struct bios_pci_bus *pci_root) +{ + int domain = bios_pci_domain_nr(pci_root); + + printk_info("Allocating PCI domain %d (node %d link %d) resources...", + domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain)); + pci_root->membase = PCI_MEM_DOMAIN_START(domain); + pci_root->prefmembase = PCI_MEM_DOMAIN_START(domain); + pci_root->iobase = PCI_IO_DOMAIN_START(domain); + if (pci_root->iobase == 0) + pci_root->iobase += 0x1000; + + compute_allocate_resources(pci_root); + // now just set things into registers ... we hope ... + assign_resources(pci_root); + assign_interrupts(pci_root); + printk_info("done.\n"); +} + +/** Starting at the root, walk the tree and enable all devices/bridges. + * What really happens is computed COMMAND bits get set in register 4 + */ +void pci_enable(struct bios_pci_bus *pci_root) +{ + int domain = bios_pci_domain_nr(pci_root); + + printk_info("Enabling PCI domain %d (node %d link %d) resources...", + domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain)); + + // now enable everything. + enable_resources(pci_root); + printk_info("done.\n"); +} + +void pci_zero_irq_settings(void) +{ + struct bios_pci_dev *pcidev; + unsigned char line; + + printk_info("Zeroing PCI IRQ settings..."); + + pcidev = pci_devices; + + while (pcidev) { + bios_pci_read_config_byte(pcidev, 0x3d, &line); + if (line) { + bios_pci_write_config_byte(pcidev, 0x3c, 0); + } + pcidev = pcidev->next; + } + printk_info("done.\n"); +} + +void +handle_superio(int pass, struct superio *all_superio[], int nsuperio) +{ + int i; + struct superio *s; + printk_debug("handle_superio start, nsuperio %d\n", nsuperio); + for(i = 0; i < nsuperio; i++){ + s = all_superio[i]; + printk_debug("handle_superio: Pass %d, check #%d, s %x s->super %x\n", + pass, i, s, s->super); + if (!s->super) { + printk_debug("handle_superio: Pass %d, Skipping #%d as it has no superio pointer!\n", pass, i); + continue; + } + printk_debug("handle_superio: Pass %d, Superio %s\n", pass, + s->super->name); + // if no port is assigned use the defaultport + printk_info("handle_superio: port 0x%x, defaultport 0x%x\n", + s->port, s->super->defaultport); + if (! s->port) + s->port = s->super->defaultport; + + printk_info("handle_superio: Using port 0x%x\n", s->port); + + // need to have both pre_pci_init and devfn defined. + if (s->super->pre_pci_init && (pass == 0)) { + printk_debug("handle_superio: Call pre_pci_init\n"); + s->super->pre_pci_init(s); + } + else + if (s->super->init && (pass == 1)) + { + printk_debug(" Call init\n"); + s->super->init(s); + } + else + if (s->super->finishup && (pass == 2)) + { + printk_debug(" Call finishup\n"); + s->super->finishup(s); + } + printk_debug("handle_superio: Pass %d, done #%d\n", pass, i); + } + printk_debug("handle_superio done\n"); +} + +void +handle_southbridge(int pass, struct southbridge *s, int nsouthbridge) +{ + int i; + for(i = 0; i < nsouthbridge; i++, s++){ + + if (!s->southbridge) + continue; + printk_debug("handle_southbridge: Pass %d, Superio %s\n", pass, + s->southbridge->name); + + // need to have both pre_pci_init and devfn defined. + if (s->southbridge->pre_pci_init && (pass == 0) && (s->devfn)) { + printk_debug(" Call pre_pci_init\n"); + s->southbridge->pre_pci_init(s); + } + else + { + // first, have to set up any device not set up. + // policy: we ignore the devfn here. First, it's in the pcidev, and + // second, it's really only to be used BEFORE pci config is done. + if (!s->device) + s->device = bios_pci_find_device(s->southbridge->vendor, + s->southbridge->device, 0); + + if (! s->device) { // not there! + printk_info(" No such device\n"); + continue; + } + // problem. We have to handle multiple devices of same type. + // We don't do this yet. One way is to mark the pci device used at + // this point, i.e. + // s->device->inuse = 1 + // and then continue looking if the device is in use. + // For now, let's get this basic thing to work. + if (s->southbridge->init && (pass == 1)) { + printk_debug(" Call init\n"); + s->southbridge->init(s); + } + else + if (s->southbridge->finishup && (pass == 2)) { + printk_debug(" Call finishup\n"); + s->southbridge->finishup(s); + } + } + } +} + +void pci_bios(void) +{ + struct bios_pci_bus *pci_root; + int domain; + + printk_info("Finding PCI configuration type\n"); + pci_set_method(); + for (domain = 0; domain < MAX_NUMIOHUBS; domain ++) { + if (!(online_iohubs_map & (1 << domain))) + continue; + pci_root = pci_enumerate(domain); + pci_configure(pci_root); + pci_enable(pci_root); + } +} + diff --git a/arch/e2k/boot/bios/pci.h b/arch/e2k/boot/bios/pci.h new file mode 100644 index 000000000000..83c6e0fc9bd6 --- /dev/null +++ b/arch/e2k/boot/bios/pci.h @@ -0,0 +1,605 @@ +/* + * $Id: pci.h,v 1.13 2009/01/27 11:34:42 atic Exp $ + * + * PCI defines and function prototypes + * Copyright 1994, Drew Eckhardt + * Copyright 1997--1999 Martin Mares + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI BIOS Specification + * PCI Local Bus Specification + * PCI to PCI Bridge Specification + * PCI System Design Guide + */ + +#ifndef PCI_H +#define PCI_H + +#include +#include +#include "../boot_io.h" + +extern void *malloc(int size); + +#undef BIOS_DEBUG +#define PCIBIOS_DEBUG 0 + +#define BIOS_DEBUG PCIBIOS_DEBUG + +#include "printk.h" + +#undef DEBUG_PCI_MODE +#undef DebugCI +#define DEBUG_PCI_MODE 0 /* PCI scanning */ +#define DebugPCI(fmt, args...) \ + ({ if (DEBUG_PCI_MODE) \ + rom_printk(fmt, ##args); }) + +/* + * PCI memory and IO ports mapping + */ +#ifdef CONFIG_E2K_SIC +#define PCI_IO_START 0x00000000 +#define PCI_IO_DOMAIN_SIZE 0x00004000 +#define PCI_IO_END 0x00010000 +#define PCI_MEM_START 0x80000000UL +#define PCI_MEM_DOMAIN_SIZE 0x10000000UL +#define PCI_MEM_END E2K_PCI_MEM_AREA_PHYS_END // f800_0000 + +#define IOHUB_SCRB_DOMAIN_SIZE E2K_SCRB_SIZE // 1000 + +#define PCI_IO_DOMAIN_START(domain) \ + (PCI_IO_START + PCI_IO_DOMAIN_SIZE * (domain)) +#define PCI_IO_DOMAIN_END(domain) \ +({ \ + unsigned long io_end; \ + io_end = (PCI_IO_DOMAIN_START(domain) + PCI_IO_DOMAIN_SIZE); \ + if (io_end > PCI_IO_END) \ + io_end = PCI_IO_END; \ + io_end; \ +}) +#define PCI_MEM_DOMAIN_START(domain) \ + (PCI_MEM_START + PCI_MEM_DOMAIN_SIZE * (domain)) +#define PCI_MEM_DOMAIN_END(domain) \ +({ \ + unsigned long mem_end; \ + mem_end = (PCI_MEM_DOMAIN_START(domain) + PCI_MEM_DOMAIN_SIZE); \ + if (mem_end > PCI_MEM_END) \ + mem_end = PCI_MEM_END; \ + mem_end; \ +}) +#define IOHUB_SCRB_DOMAIN_START(domain) \ + (PCI_MEM_DOMAIN_END(domain) - IOHUB_SCRB_DOMAIN_SIZE) +#define IOHUB_SCRB_DOMAIN_END(domain) \ + (PCI_MEM_DOMAIN_END(domain)) +#endif /* CONFIG_E2K_SIC */ + + +/* + * Under PCI, each device has 256 bytes of configuration address space, + * of which the first 64 bytes are standardized as follows: + */ +#ifdef CONFIG_E2K_SIC +/* Additional registers in PCI configuration space for virtual PCI to PCI bridges */ +#if defined(CONFIG_L_IOH2) +/* IOHUB Device Number register */ +#define IOH2_DevNum 0x44 /* IOHUB Device Number */ +#define IOHUB_DevNum IOH2_DevNum + #define IOHUB_DevNum_valid 0x0100 + #define set_IOHUB_DevNum(num) ((num) & 0x000f) +#else /* IOHUB version 1 */ +#define PCI_SCBA_0 0xf0 /* System commutator base address [31:00] */ +#define PCI_SCBA_1 0xf4 /* System commutator base address [63:32] */ +/* IOHUB Device Number register */ +#define IOH_DevNum 0xf8 +#define IOHUB_DevNum IOH_DevNum + #define IOHUB_DevNum_valid 0x10 + #define set_IOHUB_DevNum(num) ((num) & 0x0f) +#endif /* CONFIG_L_IOH2 */ + +/* Additional registers in PCI configuration space for I2C/SPI controller */ + /** IOAPIC Base Address(SCRB: "ioapic" Mem Base Address Register 0) **/ +#define IOAPIC_BASE_ADDRESS 0x40 // [31:5]-RWS [4:0]-RO + /** IOAPIC Upper Base Address(SCRB: "ioapic" Mem Base Address Register 0 Upper 32 bits) **/ +#define IOAPIC_UPPER_ADDRESS 0x44 + /** MSI TRANSACTION ADDRESS (SCRB: "ioapic" Mem Base Address Register 1) **/ +#define MSI_TRANSACTION_ADDRESS 0x48 // [31:2]-RWS [1:0]-RO + /** MSI TRANSACTION UPPER ADDRESS(SCRB: "ioapic" Mem Base Address Register 1 Upper 32 bits) **/ +#define MSI_TRANSACTION_UPPER_ADDRESS 0x4c + /** IOPIC Message Base Address(SCRB: "ioapic" Mem Base Address Register 2) **/ +#define IOAPIC_MESSAGE_BASE_ADDRESS 0x50 // [31:12]-RWS [11:0]-RO + /** IOPIC Upper Base Address(SCRB: "ioapic" Mem Base Address Register 2 Upper 32 bits) **/ +#define IOAPIC_MESSAGE_UPPER_ADDRESS 0x54 + /** System Timer Base Address(SCRB: "timer" Mem Base Address Register 0) **/ +#define SYSTEM_TIMER_BASE_ADDRESS 0x58 // [31:6]-RWS [5:0]-RO + /** System Timer Upper Base Address(SCRB: "timer" Mem Base Address Register 0 Upper 32 bits) **/ +#define SYSTEM_TIMER_UPPER_ADDRESS 0x5c + /** Reset Control **/ +#define RESET_CONTROL 0x60 + /** Software Reset Control **/ +#define SOFTWARE_RESET_CONTROL 0x64 + /** Software Reset Duration **/ +#define SOFTWARE_RESET_DURATION 0x68 + /** LAPIC Message Base Address **/ +#define LAPIC_MESSAGE_BASE_ADDRESS 0x6c + /** LAPIC Message Upper Address **/ +#define LAPIC_MESSAGE_UPPER_ADDRESS 0x70 + +#define IOAPICINT_BASE 0x13000000 +#define SAPICINT_BASE 0x1f000000 +#define LAPICINT_BASE 0x17000000 + +#define APICINT_SIZE 0x01000000 + +#define ES2_LAPICINT_BASE 0x120000000 +#define ES2_IOAPICINT_BASE 0x130000000 +#define ES2_SAPICINT_BASE 0x140000000 + +#define E2S_LAPICINT_BASE ES2_LAPICINT_BASE +#define E2S_IOAPICINT_BASE ES2_IOAPICINT_BASE +#define E2S_SAPICINT_BASE ES2_SAPICINT_BASE + +#define E8C_LAPICINT_BASE ES2_LAPICINT_BASE +#define E8C_IOAPICINT_BASE ES2_IOAPICINT_BASE +#define E8C_SAPICINT_BASE ES2_SAPICINT_BASE + +#define E1CP_EMBEDED_IOAPIC_BASE 0x00000010fec01000 +#define E1CP_LEGACY_NBSR_BASE 0x0000001100000000 +#define E1CP_LAPICINT_BASE 0x0000001200000000 +#define E1CP_IOAPICINT_BASE 0x0000001300000000 +#define E1CP_PMC_BASE 0x0000001400000000 + +#define E12C_IOAPICINT_BASE 0x122000000 + +#define E16C_IOAPICINT_BASE E12C_IOAPICINT_BASE + +#define E2C3_IOAPICINT_BASE E12C_IOAPICINT_BASE + +/* LAPICINT and SAPICINT do not exist on EPIC systems */ +#ifndef CONFIG_BOOT_EPIC +#define E12C_LAPICINT_BASE E8C_LAPICINT_BASE +#define E12C_SAPICINT_BASE E8C_SAPICINT_BASE + +#define E16C_LAPICINT_BASE E8C_LAPICINT_BASE +#define E16C_SAPICINT_BASE E8C_SAPICINT_BASE + +#define E2C3_LAPICINT_BASE E8C_LAPICINT_BASE +#define E2C3_SAPICINT_BASE E8C_SAPICINT_BASE +#endif + +#endif + +#define INTEL_MULTIFUNC_VENDOR PCI_VENDOR_ID_INTEL +#define INTEL_MULTIFUNC_DEVICE 0x0002 + +#define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */ +#define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */ +#define PCI_BRIDGE_CTL_NO_ISA 0x04 /* Disable bridging of ISA ports */ +#define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */ +#define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */ +#define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */ +#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */ +#ifdef CONFIG_E2K_SIC +/* SCRB registers only for bus after virual bus */ +#define B0_SE 0x104 /* 8/0x03 PCIe brigde Spaces Enable 0:N:0{04} */ +#define B0_BN 0x118 /* 32/0x00ffffff PCIe brigde Bus Number 0:N:0{0x18-0x1b} */ +#define B0_IOBL 0x11c /* 16/0xf0f0 PCIe bridge I/O Base and + * Limit Register 0:N:0{0x1c-0x1d} */ +#define B0_IOBLU 0x130 /* 32/0xffffffff PCIe bridge I/O Base and + * Limit Upper 16 bits 0:N:0{0x30-0x33} */ +#define B0_MBL 0x120 /* 32/0xfff0fff0 PCIe bridge Mem Base and + * Limit Register 0:N:0{0x20-0x23} */ +#define B0_PMBL 0x124 /* 32/0xfff0fff0 PCIe bridge Prefetchable + * Mem Base and Limit Register 0:N:0{0x24-0x27} */ +#define B0_PMBU32 0x128 /* 32/0xffffffff PCIe bridge Prefetchable + * Memory Base Upper 32 bits 0:N:0{0x28-0x2b} */ +#define B0_PMLU32 0x12c /* 32/0xffffffff PCIe bridge Prefetchable + * Memory Limit Upper 32 bits 0:N:0{0x2c-0x2f} */ +#define B0_BA0 0x17c /* 32/0xfffffff0 PCIe bridge Base Address + * Register 0:N:0{0x7c-0x7f} */ +#define B0_BUA0 0x180 /* 32/0xffffffff PCIe bridge Base Address + * Upper 32 bits 0:N:0{0x80-0x83} */ +#define B0_BCTRL 0x13e /* 8/0x03 PCIe bridge control 0:N:0{0x3e} */ +#define B1_SE 0x204 /* 8/0x03 PCI bridge Spaces Enable m:0:0{0x04} */ +#define B1_BN 0x218 /* 32/0x00ffffff PCI bridge Bus Number m:0:0{0x18-0x1b} */ +#define B1_IOBL 0x21c /* 16/0xf0f0 PCI bridge I/O Base and + * Limit Register m:0:0{0x1c-0x1d} */ +#define B1_IOBLU 0x230 /* 32/0xffffffff PCI bridge I/O Base and + * Limit Upper 16 bits m:0:0{0x30-0x33} */ +#define B1_MBL 0x220 /* 32/0xfff0fff0 PCI bridge Mem Base and + * Limit Register m:0:0{0x20-0x23} */ +#define B1_PMBL 0x224 /* 32/0xfff0fff0 PCI bridge Prefetchable + * Mem Base and Limit Register m:0:0{0x24-0x27} */ +#define B1_PMBU32 0x228 /* 32/0xffffffff PCI bridge Prefetchable + * Memory Base Upper 32 bits m:0:0{0x28-0x2b} */ +#define B1_PMLU32 0x22c /* 32/0xffffffff PCI bridge Prefetchable + * Memory Limit Upper 32 bits m:0:0{0x2c-0x2f} */ +#define B1_BCTRL 0x23e /* 8/0x1c PCI bridge control m:0:0{0x3e} */ +#define A0_SE 0x304 /* 8/0x02 "AC97" Mem Space Enable m:2:3{0x04} */ +#define A0_BA0 0x310 /* 32/0xfffff000 "AC97" Mem Base Address + * Register 0 m:2:3{0x10-0x13} */ +#define A0_BA1 0x314 /* 32/0xfffff000 "gpio" Mem Base Address + * Register 1 m:2:3{0x14-0x17} */ +#define A1_SE 0x404 /* 8/0x02 "i2c/spi/ioapic" Mem Space + * Enable m:2:1{0x04} */ +#define A1_BA0 0x410 /* 32/0xffffffc0 "i2c/spi" Mem Base Address + * Register 0 m:2:1{0x10-0x13} */ +#define A1_BA1 0x414 /* 32/0xffffffc0 "i2c/spi" Mem Base Address + * Register 1 m:2:1{0x14-0x17} */ +#define A2_BA0 0x440 /* 32/0xffffffe0 "ioapic" Mem Base Address + * Register 0 m:2:1{0x40-0x43} */ +#define A2_BUA0 0x444 /* 32/0xffffffff "ioapic" Mem Base Address 0 + * Upper 32 bits m:2:1{0x44-0x47} */ +#define A2_BA1 0x448 /* 32/0xfffffffc "ioapic" Mem Base Address + * Register 1 m:2:1{0x48-0x4b} */ +#define A2_BUA1 0x44c /* 32/0xffffffff "ioapic" Mem Base Address 1 + * Upper 32 bits m:2:1{0x4c-0x4f} */ +#define A2_BA2 0x450 /* 32/0xfffff000 "ioapic" Mem Base Address + * Register 2 m:2:1{0x50-0x53} */ +#define A2_BUA2 0x454 /* 32/0xffffffff "ioapic" Mem Base Address 2 + * Upper 32 bits m:2:1{0x54-0x57} */ +#define A3_BA0 0x458 /* 32/0xffffffc0 "timer" Mem Base Address + * Register 0 m:2:1{0x58-0x5b} */ +#define A3_BUA0 0x45c /* 32/0xffffffff "timer" Mem Base Address + * Upper 32 bits m:2:1{0x5c-0x5f} */ +#define A4_SE 0x504 /* 8/0x02 "eth" Mem Space Enable m:1:0{0x04} */ +#define A4_BA0 0x510 /* 32/0xffffffe0 "eth" Mem Base Address + * Register m:1:0{0x10-0x13} */ +#define A5_SE 0x604 /* 8/0x03 "parport/rs232" Spaces + * Enable m:2:2{0x04} */ +#define A5_BA0 0x610 /* 32/0xffffffe0 "parport" IO Base Address + * Register m:2:2{0x10-0x13} */ +#define A6_BA0 0x614 /* 32/?? "rs232" Mem Base Address + * Register m:2:2{0x14-0x17} */ +#define A7_SE 0x704 /* 8/0x03 "IDE" Spaces Enable m:2:0{0x04} */ +#define A7_AMR 0x709 /* 8/0x05 "IDE" Addressing Mode + * Register m:2:0{0x09} */ +#define A7_BA0 0x710 /* 32/0xfffffff8 "IDE" IO Base Address + * Register 0 m:2:0{0x10-0x13} */ +#define A7_BA1 0x714 /* 32/0xfffffffc "IDE" IO Base Address + * Register 1 m:2:0{0x14-0x17} */ +#define A7_BA2 0x718 /* 32/0xfffffff8 "IDE" IO Base Address + * Register 2 m:2:0{0x18-0x1b} */ +#define A7_BA3 0x71c /* 32/0xfffffffc "IDE" IO Base Address + * Register 3 m:2:0{0x1c-0x1f} */ +#define A7_BA4 0x720 /* 32/0x0000fff0 "IDE" IO Base Address + * Register 4 m:2:0{0x20-0x23} */ +#define A7_HCE 0x750 /* 8/0x40 "IDE" Hidden Channel + * Enable m:2:0{0x50} */ +#define A7_BA5 0x758 /* 32/0xffffffe0 "IDE" Mem Base Address + * Register 5 m:2:0{0x58-0x5b} */ +#define A7_BUA5 0x75c /* 32/0xffffffff "IDE" Mem Base Address 5 + * Upper 32 bits m:2:0{0x5c-0x5f} */ +#define A7_BA6 0x760 /* 32/0xfffffff0 "IDE" Mem Base Address + * Register 6 m:2:0{0x60-0x63} */ +#define A7_BUA6 0x764 /* 32/0xffffffff "IDE" Mem Base Address 6 + * Upper 32 bits m:2:0{0x64-0x67} */ +#define A7_BA7 0x768 /* 32/0xfffffff0 "IDE" Mem Base Address + * Register 7 m:2:0{0x68-0x6b} */ +#define A7_BUA7 0x76c /* 32/0xffffffff "IDE" Mem Base Address 7 + * Upper 32 bits m:2:0{0x6c-0x6f} */ +/* 8/0x02 "SATA" Spaces Enable m:2:0{0x04} */ +#define A8_SE 0x804 +/* 8/0x24-0x27 "SATA" Mem Base Address Register m:3:0{0x24-0x27} */ +#define A8_ABAR 0x824 +/* Control Register */ +#define AHOM 0xa00 /* Abonent Hide Operation Mode R/W */ + +/* Registers for Real Pci 2 Pci Bridge */ +#define Arb_CtlSta 0x78 /* Arbitration Control (Real Pci 2 Pci Configuration Space) */ +#endif + + +/* + * There is one bios_pci_dev structure for each slot-number/function-number + * combination: + */ +struct bios_pci_dev { + struct bios_pci_bus *bus; /* bus this device is on */ + struct bios_pci_dev *sibling; /* next device on this bus */ + struct bios_pci_dev *next; /* chain of all devices */ + + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ + + unsigned int devfn; /* encoded device & function index */ + unsigned short vendor; + unsigned short device; + unsigned char revision; /* chip revision */ + unsigned int class; /* 3 bytes: (base,sub,prog-if) */ + unsigned char subsys_id; /* subsystem ID */ + unsigned int hdr_type; /* PCI header type */ + unsigned int master : 1; /* set if device is master capable */ + u8 command; + /* + * In theory, the irq level can be read from configuration + * space and all would be fine. However, old PCI chips don't + * support these registers and return 0 instead. For example, + * the Vision864-P rev 0 chip can uses INTA, but returns 0 in + * the interrupt line and pin registers. pci_init() + * initializes this field with the value at PCI_INTERRUPT_LINE + * and it is the job of pcibios_fixup() to change it if + * necessary. The field must not be 0 unless the device + * cannot generate interrupts at all. + */ + unsigned int irq; /* irq generated by this device */ + + /* Base registers for this device, can be adjusted by + * pcibios_fixup() as necessary. + */ + unsigned long base_address[6]; + unsigned long size[6]; + unsigned long rom_address; + unsigned long rom_size; +}; + +struct bios_pci_bus { + struct bios_pci_bus *parent; /* parent bus this bridge is on */ + struct bios_pci_bus *children; /* chain of P2P bridges on this bus */ + struct bios_pci_bus *next; /* chain of all PCI buses */ + + struct bios_pci_dev *self; /* bridge device as seen by parent */ + struct bios_pci_dev *devices; /* devices behind this bridge */ + + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ + unsigned char number; /* bus number */ + unsigned char primary; /* number of primary bridge */ + unsigned char secondary; /* number of secondary bridge */ + unsigned char subordinate; /* max number of subordinate buses */ + + unsigned long mem, prefmem, io; /* amount of mem, prefetch mem, + * and I/O needed for this bridge. + * computed by compute_resources, + * inclusive of all child bridges + * and devices + */ + u32 membase, memlimit; + u32 prefmembase, prefmemlimit; + u32 iobase, iolimit; +}; + +extern struct bios_pci_bus pci_root[]; /* root buses */ +extern int pci_root_num; +extern struct bios_pci_dev *pci_devices; /* list of all devices */ + +#ifdef CONFIG_E2K_SIC +static inline int bios_pci_domain_nr(struct bios_pci_bus *bus) +{ + return ((unsigned long)bus->sysdata); +} +static inline void bios_set_pci_domain_nr(struct bios_pci_bus *bus, int domain) +{ + bus->sysdata = (void *)domain; +} +#endif /* CONFIG_E2K_SIC */ + +/* + * Error values that may be returned by the PCI bios. + */ +#define PCIBIOS_SUCCESSFUL 0x00 +#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 +#define PCIBIOS_BAD_VENDOR_ID 0x83 +#define PCIBIOS_DEVICE_NOT_FOUND 0x86 +#define PCIBIOS_BAD_REGISTER_NUMBER 0x87 +#define PCIBIOS_SET_FAILED 0x88 +#define PCIBIOS_BUFFER_TOO_SMALL 0x89 + +/* Class Code register */ +#define NATIVE_MODE_PRIMARY_CLASSC 0x01 /* primary channel in native */ + /* mode */ +#define NATIVE_MODE_SECONDARY_CLASSC 0x04 /* secondary channel in */ + /* native mode */ +#define NATIVE_MODE_CLASSC (NATIVE_MODE_PRIMARY_CLASSC | \ + NATIVE_MODE_SECONDARY_CLASSC) + +/* Low-level architecture-dependent routines */ + +int pcibios_present (void); +void pcibios_init(void); +void pcibios_fixup(void); +char *pcibios_setup (char *str); +int bios_pci_read_config_byte(struct bios_pci_dev *dev, u8 where, u8 *val); +int bios_pci_read_config_word(struct bios_pci_dev *dev, u8 where, u16 *val); +int bios_pci_read_config_dword(struct bios_pci_dev *dev, u8 where, u32 *val); +int bios_pci_write_config_byte(struct bios_pci_dev *dev, u8 where, u8 val); +int bios_pci_write_config_word(struct bios_pci_dev *dev, u8 where, u16 val); +int bios_pci_write_config_dword(struct bios_pci_dev *dev, u8 where, u32 val); +int pcibios_read_config_byte(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u8 *val); +int pcibios_read_config_word(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u16 *val); +int pcibios_read_config_dword(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u32 *val); +int pcibios_write_config_byte(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u8 val); +int pcibios_write_config_word(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u16 val); +int pcibios_write_config_dword(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u32 val); +int pcibios_debugwrite_config_byte(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u8 val); +int pcibios_debugwrite_config_word(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u16 val); +int pcibios_debugwrite_config_dword(int domain, unsigned char bus, unsigned char dev_fn, + unsigned char where, u32 val); + +#ifdef CONFIG_E2K_SIC +#ifndef CONFIG_L_IOH2 +int system_commutator_es2_ioh_write_byte(int domain, unsigned char bus, + int where, u8 value); +int system_commutator_es2_ioh_read_byte(int domain, unsigned char bus, + int where, u8 *value); +int system_commutator_es2_ioh_write_word(int domain, unsigned char bus, + int where, u16 value); +int system_commutator_es2_ioh_read_word(int domain, unsigned char bus, + int where, u16 *value); +int system_commutator_es2_ioh_write_dword(int domain, unsigned char bus, + int where, u32 value); +int system_commutator_es2_ioh_read_dword(int domain, unsigned char bus, + int where, u32 *value); +#else /* CONFIG_L_IOH2 */ +/* IOHUB #2 has not SCRB registers to read/write */ +#define system_commutator_es2_ioh_write_byte(domain, bus, where, value) 0 +#define system_commutator_es2_ioh_read_byte(domain, bus, where, value) 0 +#define system_commutator_es2_ioh_write_word(domain, bus, where, value) 0 +#define system_commutator_es2_ioh_read_word(domain, bus, where, value) 0 +#define system_commutator_es2_ioh_write_dword(domain, bus, where, value) 0 +#define system_commutator_es2_ioh_read_dword(domain, bus, where, value) 0 +#endif /* ! CONFIG_L_IOH2 */ +#endif /* CONFIG_E2K_SIC */ + + +/* Don't use these in new code, use pci_find_... instead */ + +int pcibios_find_class (unsigned int class_code, unsigned short index, unsigned char *bus, unsigned char *dev_fn); +int pcibios_find_device (unsigned short vendor, unsigned short dev_id, + unsigned short index, unsigned char *bus, + unsigned char *dev_fn); + +/* Generic PCI interface functions */ + +struct bios_pci_bus *pci_init(int domain); +void pci_setup(char *str, int *ints); +void pci_quirks_init(void); +void pci_proc_init(void); +void proc_old_pci_init(void); +int get_pci_list(char *buf); +int pci_proc_attach_device(struct bios_pci_dev *dev); +int pci_proc_detach_device(struct bios_pci_dev *dev); + +struct bios_pci_dev *bios_pci_find_device(unsigned int vendor, + unsigned int device, struct bios_pci_dev *from); +struct bios_pci_dev *pci_find_class(unsigned int class, + struct bios_pci_dev *from); +struct bios_pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn); + +#define pci_present pcibios_present +int pci_debugwrite_config_byte(struct bios_pci_dev *dev, u8 where, u8 val); +int pci_debugwrite_config_word(struct bios_pci_dev *dev, u8 where, u16 val); +int pci_debugwrite_config_dword(struct bios_pci_dev *dev, u8 where, u32 val); +void bios_pci_set_master(struct bios_pci_dev *dev); +void pci_set_method(void); +struct bios_pci_bus *pci_enumerate(int domain); +void pci_configure(struct bios_pci_bus *root_bus); +void pci_enable(struct bios_pci_bus *root_bus); +void pci_zero_irq_settings(void); + +// historical functions ... +void intel_conf_writeb(unsigned long port, unsigned char value); +unsigned char intel_conf_readb(unsigned long port); + +#ifdef CONFIG_E2K_SIC +static inline void set_iohub_dev_num(int domain) +{ + int devfn; + unsigned int reg; + + reg = set_IOHUB_DevNum(domain) | IOHUB_DevNum_valid; +#ifdef CONFIG_L_IOH2 + devfn = domain * 8; + pcibios_write_config_word(domain, 0, devfn, IOHUB_DevNum, (u16)reg); + pcibios_read_config_word(domain, 0, devfn, IOHUB_DevNum, (u16 *)®); +#else /* IOHUB-1 */ + devfn = (domain * 2 + 1) * 8; /* slot #0 PCIe, #1: virt PCI to PCI */ + pcibios_write_config_byte(domain, 0, devfn, IOHUB_DevNum, (u8)reg); + pcibios_read_config_byte(domain, 0, devfn, IOHUB_DevNum, (u8 *)®); +#endif /* CONFIG_L_IOH2 */ + DebugPCI("set_iohub_dev_num() set device number to 0x%04x\n", reg); +} +#endif /* CONFIG_E2K_SIC */ + +// Rounding for boundaries. +// Due to some chip bugs, go ahead and roung IO to 16 +#define IO_ALIGN 16 +#define IO_BRIDGE_ALIGN 4096 +#define MEM_ALIGN 4096 + +#include "pciconf.h" + +/* linkages from devices of a type (e.g. superio devices) + * to the actual physical PCI device. This type is used in an array of + * structs built by NLBConfig.py. We owe this idea to Plan 9. + */ + +struct superio; + +struct superio_control { + void (*pre_pci_init)(struct superio *s); + void (*init)(struct superio *s); + void (*finishup)(struct superio *s); + unsigned int defaultport; /* the defaultport. Can be overridden + * by commands in config + */ + // This is the print name for debugging + char *name; +}; + +struct com_ports { + unsigned int enable,baud, base, irq; +}; + +// lpt port description. +// Note that for many superios you only really need to define the +// enable. +struct lpt_ports { + unsigned int enable, // 1 if this port is enabled + mode, // pp mode + base, // IO base of the parallel port + irq; // irq +}; + +struct superio { + struct superio_control *super; // the ops for the device. + unsigned int port; // if non-zero, overrides the default port + // com ports. This is not done as an array (yet). + // We think it's easier to set up from python if it is not an array. + struct com_ports com1, com2, com3, com4; + // DMA, if it exists. + struct lpt_ports lpt1, lpt2; + /* flags for each device type. Unsigned int. */ + // low order bit ALWAYS means enable. Next bit means to enable + // LPT is in transition, so we leave this here for the moment. + // The winbond chips really stretched the way this works. + // so many functions! + unsigned int ide, floppy, lpt; + unsigned int keyboard, cir, game; + unsigned int gpio1, gpio2, gpio3; + unsigned int acpi,hwmonitor; +}; + +struct southbridge; + +struct southbridge_control { + void (*pre_pci_init)(struct southbridge *s); + void (*init)(struct southbridge *s); + void (*finishup)(struct southbridge *s); + // this is the vendor and device id + unsigned int vendor, device; + // This is the print name for debugging + char *name; +}; + +struct southbridge { + struct bios_pci_dev *device; /* the device. */ + struct southbridge_control *southbridge; /* the ops for the device. */ + unsigned int devfn; /* the devfn. + * if devfn is known, the device can be + * configured for PCI discovery. + * this is needed for some devices + * such as acer m1535 + */ + /* flags for each device type. Unsigned int. + * low order bit ALWAYS means enable. Next bit means to enable + * DMA, if it exists. + */ + unsigned int ide; +}; + +#endif /* PCI_H */ + + + + diff --git a/arch/e2k/boot/bios/pci_isa_config.h b/arch/e2k/boot/bios/pci_isa_config.h new file mode 100644 index 000000000000..af23a71c234e --- /dev/null +++ b/arch/e2k/boot/bios/pci_isa_config.h @@ -0,0 +1,83 @@ +/* + * Southbridge configuration. + * PCI/ISA Bridge Configuration Registers (Function 0). + */ +#ifndef _PCI_ISA_CONFIG_H_ +#define _PCI_ISA_CONFIG_H_ + +#include +#include +#include + +#define PSI_ISA_CONFIG_REGS_FUNC 0 + +/* REG XBCS + * X-BUS CHIP SELECT REGISTER 4E-4F default 0x3 + */ +#define SB_XBCS 0x4E // 4E-4F +#define SB_XBCS_io_lo 0x4E // 4E-4F +#define SB_XBCS_io_hi 0x4F // 4E-4F +#define SB_XBCS_DEFAULT 0x0003 +#define SB_XBCS_RTC_ENABLE 0x0001 +#define SB_XBCS_KBC_ENABLE 0x0002 +#define SB_XBCS_BIOSWP_ENABLE 0x0004 +#define SB_XBCS_PORT61ALIAS_ENABLE 0x0008 +#define SB_XBCS_IRQ12_MOUSE_ENABLE 0x0010 +#define SB_XBCS_COERR_ENABLE 0x0020 +#define SB_XBCS_LOWER_BIOS_ENABLE 0x0040 +#define SB_XBCS_EXT_BIOS_ENABLE 0x0080 +#define SB_XBCS_IOAPIC_ENABLE 0x0100 +#define SB_XBCS_1M_EXT_BIOS_ENABLE 0x0200 +/* Micro Controller Adress Location */ +#define SB_XBCS_MCA_LOCATION_ENABLE 0x0400 + +#define SB_XBCS_MASK 0x07FF + +/* REG SERIRQC + * SERIAL IRQ CONTROL REGISTER 64H default 0x10 + */ +#define SB_SERIRQC 0x64 +/* Start Frame Pulse Width bits 1:0 */ +#define SB_SERIRQC_SFP_4CLOCK 0x00 +#define SB_SERIRQC_SFP_6CLOCK 0x01 +#define SB_SERIRQC_SFP_8CLOCK 0x02 +#define SB_SERIRQC_SFP_RESERVED 0x03 +#define SB_SERIRQC_SFP_MASK 0x03 +/* Serial IRQ Frame Size bits 5:2, only 0100b supported by PIIX4 */ +#define SB_SERIRQC_FRAME_SIZE 0x10 +#define SB_SERIRQC_FRAME_MASK 0x3C +/* Serial IRQ Mode Select bit 6, 0 - quite mode 1 - continuous mode */ +#define SB_SERIRQC_SHIFT 6 +#define SB_SERIRQC_CONT_MODE 0x40 +/* Serial IRQ Enable, bit 16 in register offset B0h-B3h must also be 1 */ +#define SB_SERIRQC_IRQ_ENABLE 0x80 + +/* REG GENCFG + * GENERAL CONFIGURATION REGISTER B0-B3h default 0 + */ +#define SB_GENCFG 0XB0 // B0 - B4 +#define SB_GENCFG1 0XB0 +#define SB_GENCFG2 0XB1 +#define SB_GENCFG3 0XB2 +#define SB_GENCFG4 0XB3 +/* 0=EOI 1=ISA */ +#define SB_GENCFG_ISA_SELECT 0x00000001 +#define SB_GENCFG_DECODE_CONFIG 0x00000002 +#define SB_GENCFG_CONFIG_1 0x00000004 +#define SB_GENCFG_CONFIG_2 0x00000008 +/* 0 - primary&secondary interface, 1 - primary0&primary1 */ +#define SB_GENCFG_IDE_INTERFACE 0x00000010 +#define SB_GENCFG_ALT_ACCESS_MODE 0x00000020 +#define SB_GENCFG_PnP_ADDR_DECODE_ENABLE 0x00000040 +//#define SB_GENCFG_RESERVED 0x00000080 +#define SB_GENCFG_SIGNAL_PIN_SELECTED8 0x00000100 +#define SB_GENCFG_SIGNAL_PIN_SELECTED9 0x00000200 +#define SB_GENCFG_SIGNAL_PIN_SELECTED10 0x00000400 +#define SB_GENCFG_PRIMARY_IDE_SigIn 0x00000800 +#define SB_GENCFG_SECONDARY_IDE_SigIn 0x00001000 +//#define SB_GENCFG_RESERVED 0x00002000 +/* 14 - 31 bits Signal Pin Selected*/ +#define SB_GENCFG_SIGNAL_PIN_SELECTED14 0x00004000 +#define SB_GENCFG_SERIRQ_PIN_SELECTED 0x00010000 + +#endif /* _PCI_ISA_CONFIG_H_ */ diff --git a/arch/e2k/boot/bios/pciconf.h b/arch/e2k/boot/bios/pciconf.h new file mode 100644 index 000000000000..692d3338c5b6 --- /dev/null +++ b/arch/e2k/boot/bios/pciconf.h @@ -0,0 +1,7 @@ +#ifndef PCI_CONF_REG_INDEX +// These are defined in the PCI spec, and hence are theoretically +// inclusive of ANYTHING that uses a PCI bus. +#define PCI_CONF_REG_INDEX 0xcf8 +#define PCI_CONF_REG_DATA 0xcfc +#define CONFIG_ADDR(bus,devfn,where) (((bus) << 16) | ((devfn) << 8) | (where)) +#endif diff --git a/arch/e2k/boot/bios/printk.h b/arch/e2k/boot/bios/printk.h new file mode 100644 index 000000000000..82045ace3511 --- /dev/null +++ b/arch/e2k/boot/bios/printk.h @@ -0,0 +1,43 @@ +/* + * $Id: printk.h,v 1.1 2005/08/19 13:17:27 kostin Exp $ + */ + +#ifndef _PRINTK_H_ +#define _PRINTK_H_ + +extern void rom_printk(char const *fmt, ...); +#define do_printk rom_printk + +#undef printk_emerg +#undef printk_alert +#undef printk_crit +#undef printk_err +#undef printk_warning +#undef printk_notice +#undef printk_info +#undef printk_debug +#undef printk_spew + +#ifdef BIOS_DEBUG +#define printk_emerg(fmt, arg...) do_printk(fmt, ##arg) +#define printk_alert(fmt, arg...) do_printk(fmt, ##arg) +#define printk_crit(fmt, arg...) do_printk(fmt, ##arg) +#define printk_err(fmt, arg...) do_printk(fmt, ##arg) +#define printk_warning(fmt, arg...) do_printk(fmt, ##arg) +#define printk_notice(fmt, arg...) do_printk(fmt, ##arg) +#define printk_info(fmt, arg...) do_printk(fmt, ##arg) +#define printk_debug(fmt, arg...) if (BIOS_DEBUG > 0) do_printk(fmt, ##arg) +#define printk_spew(fmt, arg...) if (BIOS_DEBUG > 1) do_printk(fmt, ##arg) +#else +#define printk_emerg(fmt, arg...) do_printk(fmt, ##arg) +#define printk_alert(fmt, arg...) do_printk(fmt, ##arg) +#define printk_crit(fmt, arg...) do_printk(fmt, ##arg) +#define printk_err(fmt, arg...) do_printk(fmt, ##arg) +#define printk_warning(fmt, arg...) do_printk(fmt, ##arg) +#define printk_notice(fmt, arg...) do_printk(fmt, ##arg) +#define printk_info(fmt, arg...) do_printk(fmt, ##arg) +#define printk_debug(fmt, arg...) +#define printk_spew(fmt, arg...) +#endif + +#endif diff --git a/arch/e2k/boot/bios/southbridge.c b/arch/e2k/boot/bios/southbridge.c new file mode 100644 index 000000000000..0e147b57e6c0 --- /dev/null +++ b/arch/e2k/boot/bios/southbridge.c @@ -0,0 +1,362 @@ +/* + * $Id: southbridge.c,v 1.18 2009/02/24 15:14:04 atic Exp $ + */ + +#include +#include + +#include "../boot_io.h" + +#include +#include +#include +#include +#include + +#include "pci_isa_config.h" +#include "ide_config.h" +#include "southbridge.h" +#include "mc146818rtc.h" +#include "pci.h" + +#define DEBUG_IOSB 1 +#define DebugSB if (DEBUG_IOSB) rom_printk + +extern volatile unsigned long phys_node_pres_map; +extern int phys_node_num; +extern volatile unsigned long online_iohubs_map; +extern int online_iohubs_num; +extern volatile unsigned long possible_iohubs_map; +extern int possible_iohubs_num; + +int SB_bus, SB_device; + +#ifdef CONFIG_E2K_SIC +# define E2K_IO_APIC_AREA_PHYS_BASE 0x00000000fec00000UL +static void configure_iohub_apic(int domain) +{ + struct bios_pci_dev *dev = NULL; + unsigned int ioapic_base, ioapic_upper32; +#ifndef CONFIG_BOOT_EPIC + unsigned int lapic_base, lapic_upper32; +#ifdef CONFIG_E2K_FULL_SIC + unsigned int sapic_base; + unsigned int sapic_upper32; +#endif /* CONFIG_E2K_FULL_SIC */ +#endif + unsigned long tmp; + + rom_printk("Scanning PCI domain %d bus for ioapic/pic/timer i2c/spi " + "controller ...", domain); + do { + dev = bios_pci_find_device(INTEL_MULTIFUNC_VENDOR, + INTEL_MULTIFUNC_DEVICE, dev); + if (dev) { + if (bios_pci_domain_nr(dev->bus) != domain) + continue; + rom_printk("found on domain %d bus %d device %d\n", + bios_pci_domain_nr(dev->bus), dev->bus->number, + PCI_SLOT(dev->devfn)); + break; + } + } while (dev); + if (dev == NULL) { + do { + dev = bios_pci_find_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI, dev); + if (dev) { + if (bios_pci_domain_nr(dev->bus) != domain) + continue; + rom_printk("found on domain %d bus %d " + "device %d\n", + bios_pci_domain_nr(dev->bus), + dev->bus->number, + PCI_SLOT(dev->devfn)); + break; + } + } while (dev); + if (dev == NULL) { + rom_printk("!!! NOT FOUND !!!\n"); + return; + } + } + + /* configure configuration space for ioapic on domain */ + ioapic_base = E2K_IO_APIC_AREA_PHYS_BASE + domain * 0x1000; + DebugSB("configure_apic_system: --> to i2c & scrb (iohub)\n" + "ioapic_upper32 = 0x%x, ioapic_base = 0x%x\n", + 0, ioapic_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + IOAPIC_BASE_ADDRESS, ioapic_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + IOAPIC_UPPER_ADDRESS, 0); + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, + A2_BA0, ioapic_base); + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, + A2_BUA0, 0); + +/* LAPICINT and SAPICINT should not be written on EPIC systems */ +#ifndef CONFIG_BOOT_EPIC +#ifdef CONFIG_E2K_FULL_SIC + /* configure configuration space for sapic on BSP */ +#if defined(CONFIG_ES2) + tmp = ES2_SAPICINT_BASE + (domain * APICINT_SIZE); +#elif defined(CONFIG_E2S) + tmp = E2S_SAPICINT_BASE + (domain * APICINT_SIZE); +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) + tmp = E8C_SAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E12C) + tmp = E12C_SAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E16C) + tmp = E16C_SAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E2C3) + tmp = E2C3_SAPICINT_BASE + (domain * (APICINT_SIZE)); +#else + #error "Invalid e2k machine type" +#endif /* CONFIG_ES2 */ + sapic_base = tmp & 0xffffffff; + sapic_upper32 = (tmp >> 32) & 0xffffffff; + DebugSB("configure_apic_system: --> to i2c & scrb (iohub)\n" + "sapic_message_upper32 = 0x%x, sapic_message_base = 0x%x\n", + sapic_upper32, sapic_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + MSI_TRANSACTION_ADDRESS, sapic_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + MSI_TRANSACTION_UPPER_ADDRESS, sapic_upper32); + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, A2_BA1, + sapic_base); + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, A2_BUA1, + sapic_upper32); +#endif /* CONFIG_E2K_FULL_SIC */ + /* configure configuration space for lapic on BSP */ +#if defined(CONFIG_ES2) + tmp = ES2_LAPICINT_BASE + (domain * APICINT_SIZE); +#elif defined(CONFIG_E2S) + tmp = E2S_LAPICINT_BASE + (domain * APICINT_SIZE); +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) + tmp = E8C_LAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E1CP) + tmp = E1CP_LAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E12C) + tmp = E12C_LAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E16C) + tmp = E16C_LAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E2C3) + tmp = E2C3_LAPICINT_BASE + (domain * (APICINT_SIZE)); +#else + #error "Invalid e2k machine type" +#endif /* CONFIG_ES2 */ + lapic_base = tmp & 0xffffffff; + lapic_upper32 = (tmp >> 32) & 0xffffffff; + DebugSB("configure_apic_system: --> to i2c & scrb (iohub)\n" + "lapic_message_upper32 = 0x%x, lapic_message_base = 0x%x\n", + lapic_upper32, lapic_base); +#ifdef CONFIG_E2K_LEGACY_SIC + early_sic_write_node_nbsr_reg(0, SIC_rt_lapicintb, tmp >> 12); + DebugSB("configure_apic_system: NBSR lapicint base = 0x%x\n", + early_sic_read_node_nbsr_reg(0, SIC_rt_lapicintb)); +#endif /* CONFIG_E2K_LEGACY_SIC */ + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + LAPIC_MESSAGE_BASE_ADDRESS, lapic_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + LAPIC_MESSAGE_UPPER_ADDRESS, lapic_upper32); +#endif + + /* configure configuration space for ioapic on BSP */ +#if defined(CONFIG_ES2) + tmp = ES2_IOAPICINT_BASE + (domain * APICINT_SIZE); +#elif defined(CONFIG_E2S) + tmp = E2S_IOAPICINT_BASE + (domain * APICINT_SIZE); +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) + tmp = E8C_IOAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E1CP) + tmp = E1CP_IOAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E12C) + tmp = E12C_IOAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E16C) + tmp = E16C_IOAPICINT_BASE + (domain * (APICINT_SIZE)); +#elif defined(CONFIG_E2C3) + tmp = E2C3_IOAPICINT_BASE + (domain * (APICINT_SIZE)); +#else + #error "Invalid e2k machine type" +#endif /* CONFIG_ES2 */ + ioapic_base = tmp & 0xffffffff; + ioapic_upper32 = (tmp >> 32) & 0xffffffff; + DebugSB("configure_apic_system: --> to i2c\n" + "ioapic_message_upper32 = 0x%x, ioapic_message_base = 0x%x\n", + ioapic_upper32, ioapic_base); +#ifdef CONFIG_E2K_LEGACY_SIC + early_sic_write_node_nbsr_reg(0, SIC_rt_ioapicintb, tmp >> 12); + DebugSB("configure_apic_system: NBSR ioapicint base = 0x%x\n", + early_sic_read_node_nbsr_reg(0, SIC_rt_ioapicintb)); +#endif /* CONFIG_E2K_LEGACY_SIC */ + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, + A2_BA2, ioapic_base); + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, + A2_BUA2, ioapic_upper32); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + IOAPIC_MESSAGE_BASE_ADDRESS, ioapic_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + IOAPIC_MESSAGE_UPPER_ADDRESS, ioapic_upper32); +} +#ifdef CONFIG_E2K_LEGACY_SIC +void configure_embeded_apic(void) +{ + unsigned long ioapic_base; + unsigned int hb_cfg; + + /* configure configuration space for ioapic: */ + /* embeded IOAPIC always is #1 */ + ioapic_base = E1CP_EMBEDED_IOAPIC_BASE; + early_writell_hb_reg(ioapic_base, HB_PCI_IOAPICBASE); + rom_printk("set embeded ioapic base to 0x%X\n", + early_readll_hb_reg(HB_PCI_IOAPICBASE)); + /* enable embeded IOAPIC and IRQs at host bridge CFG */ + hb_cfg = early_readl_hb_reg(HB_PCI_CFG); + DebugSB("configure_embeded_apic: host bridge CFG 0x%08x\n", + hb_cfg); + hb_cfg |= HB_CFG_InternalIoApicEnable; + hb_cfg |= (HB_CFG_MaskIntSic | HB_CFG_MaskIntWlcc | + HB_CFG_MaskIntIommu); + hb_cfg &= ~HB_CFG_ShareHostInterrupts; + early_writel_hb_reg(hb_cfg, HB_PCI_CFG); + rom_printk("host bridge CFG: enable embeded IOAPIC AND IRQs 0x%X\n", + early_readl_hb_reg(HB_PCI_CFG)); +} +#endif /* CONFIG_E2K_LEGACY_SIC */ + +void configure_pic_system(void) +{ + int domain; + + for (domain = 0; domain < MAX_NUMIOHUBS; domain ++) { + if (!(online_iohubs_map & (1 << domain))) + continue; + configure_iohub_apic(domain); + } +#ifdef CONFIG_E2K_LEGACY_SIC + configure_embeded_apic(); +#endif /* CONFIG_E2K_LEGACY_SIC */ +} + +#define ONEMEG (1 << 20) +/** round a number to an alignment. + * @param val the starting value + * @param roundup Alignment as a power of two + * @returns rounded up number + */ +extern unsigned long round(unsigned long val, unsigned long roundup); + +static void configure_iohub_system_timer(int domain) +{ + struct bios_pci_dev *dev; + unsigned int timer_base, timer_upper32; + wd_control_t wd_control; + unsigned long addr = 0; + + DebugSB("start configure_system_timer\n"); + rom_printk("Scanning PCI #%d bus for ioapic/pic/timer i2c/spi controller ...", + domain); + dev = bios_pci_find_device(INTEL_MULTIFUNC_VENDOR, + INTEL_MULTIFUNC_DEVICE, NULL); + if (dev == NULL) { + do { + dev = bios_pci_find_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI, dev); + if (dev) { + if (bios_pci_domain_nr(dev->bus) != domain) + continue; + rom_printk("found on domain %d bus %d " + "device %d\n", + bios_pci_domain_nr(dev->bus), + dev->bus->number, + PCI_SLOT(dev->devfn)); + break; + } + } while (dev); + } + if (dev && bios_pci_domain_nr(dev->bus) == domain) { + rom_printk("found on bus %d device %d\n", + dev->bus->number, PCI_SLOT(dev->devfn)); + } else { + rom_printk("!!! NOT FOUND !!!\n"); + return; + } + + /* configure configuration space for timer on BSP */ + timer_base = round(pci_root[0].prefmemlimit, ONEMEG); + timer_upper32 = 0; + DebugSB("configure_system_timer:\n" + "timer_upper32 = 0x%x, timer_base = 0x%x\n", + timer_upper32, timer_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + SYSTEM_TIMER_BASE_ADDRESS, timer_base); + pcibios_write_config_dword(domain, dev->bus->number, dev->devfn, + SYSTEM_TIMER_UPPER_ADDRESS, timer_upper32); + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, + A3_BA0, timer_base); + system_commutator_es2_ioh_write_dword(domain, dev->bus->number, + A3_BUA0, timer_upper32); + /* Disable WD timer */ + AS_WORD(wd_control) = 0; + addr = timer_base + WD_CONTROL; + AS_WORD(wd_control) = NATIVE_READ_MAS_W(addr, MAS_IOADDR); + if (AS_STRUCT(wd_control).w_out_e){ + DebugSB("configure_system_timer: wd timer found to be enabled.\n"); + DebugSB("configure_system_timer: Set wd timer to disable mode\n"); + AS_STRUCT(wd_control).w_out_e = 0; + AS_STRUCT(wd_control).w_m = 1; /* Interrupt mode */ + NATIVE_WRITE_MAS_W(addr, AS_WORD(wd_control), MAS_IOADDR); + } +} + +void configure_system_timer(void) +{ + int domain; + + for (domain = 0; domain < MAX_NUMIOHUBS; domain ++) { + if (!(online_iohubs_map & (1 << domain))) + continue; + configure_iohub_system_timer(domain); + } +} + +#endif + +void sb_enable_rtc(void) +{ + int xdata; + + rom_printk("southbridge enable rtc ...\n"); + + xdata = SB_read_config32(SB_GENCFG, 0); + xdata |= SB_GENCFG_SIGNAL_PIN_SELECTED14; + SB_write_config32(xdata, SB_GENCFG, 0); + + DebugSB("GENCFG = 0x%x\n", + SB_read_config32(SB_GENCFG, 0)); + + rtc_init(0); +} + +void sb_enable_ide(void) +{ + int xdata; + + rom_printk("southbridge enable ide ...\n"); + + xdata = SB_read_config32(SB_IDETIM, 1); + xdata |= ((SB_IDETIM_DECODE_ENABLE << SB_IDETIM_SHIFT) | + SB_IDETIM_DECODE_ENABLE); + SB_write_config32(xdata, SB_IDETIM, 1); + + DebugSB("IDETIM = 0x%x\n", + SB_read_config32(SB_IDETIM, 1)); + + xdata = SB_read_config16(SB_PCICMD, 1); + xdata |= SB_PCICMD_IOSE; + SB_write_config32(xdata, SB_PCICMD, 1); + + DebugSB("PCICMD = 0x%x\n", + SB_read_config16(SB_PCICMD, 1) & 0xffff); +} diff --git a/arch/e2k/boot/bios/southbridge.h b/arch/e2k/boot/bios/southbridge.h new file mode 100644 index 000000000000..c16f002d0b2d --- /dev/null +++ b/arch/e2k/boot/bios/southbridge.h @@ -0,0 +1,158 @@ +/* PIIX4 southbridge configuration registers */ + +#ifndef _SOUTHBRIDGE_H_ +#define _SOUTHBRIDGE_H_ + +#include +#include +#include +#include "../boot_io.h" + +extern int SB_bus, SB_device, SB_function; + +#if 0 +#define PCI_BUS 0 +#define PHYS_DEV 7 +#else +#define PCI_BUS SB_bus +#define PHYS_DEV SB_device +#endif + + +/* Nothbridge addr i/o ports 0xcf8 - 0xcfb */ +#define SB_IO_ADDR_BASE 0x0CF8 +#define SB_IO_ADDR_PORT0 0x0CF8 +#define SB_IO_ADDR_PORT1 0x0CF9 +#define SB_IO_ADDR_PORT2 0x0CFA +#define SB_IO_ADDR_PORT3 0x0CFB + +/* Nothbridge data i/o ports 0xcfc - 0xcff */ +#define SB_IO_DATA_BASE 0x0CFC +#define SB_IO_DATA_PORT0 0x0CFC +#define SB_IO_DATA_PORT1 0x0CFD +#define SB_IO_DATA_PORT2 0x0CFE +#define SB_IO_DATA_PORT3 0x0CFF + +#define pci_cfg_xaddr(bus,physdev,fun,byte) \ + ((byte&~3)|(fun<<8)|(physdev<<11)|(bus<<16)|(1<<31)) + +#define data_port PHYS_X86_IO_BASE + SB_IO_DATA_BASE +#define addr_port PHYS_X86_IO_BASE + SB_IO_ADDR_BASE + +static inline unsigned int SB_read_config(int reg, int func, int size) +{ + unsigned int xaddr; + int mask = reg & 0x3; + int rval = 0; + int cnt = 0; + xaddr = pci_cfg_xaddr(PCI_BUS, PHYS_DEV, func, reg); + NATIVE_WRITE_MAS_W(addr_port, xaddr, MAS_IOADDR); + while (mask) { + rval |= (NATIVE_READ_MAS_B(data_port + mask, + MAS_IOADDR) << (cnt * 8)); + if (!--size) + break; + ++mask; + ++cnt; + if (mask == 4) { + xaddr = pci_cfg_xaddr(PCI_BUS, PHYS_DEV, + func, (reg + 4)); + NATIVE_WRITE_MAS_W(addr_port, xaddr, MAS_IOADDR); + mask = 0; + break; + } + } + + while (size & (reg & 0x3)) { + rval |= (NATIVE_READ_MAS_B(data_port + mask, + MAS_IOADDR) << (cnt * 8)); + if (!--size) + break; + ++mask; + ++cnt; + } + + switch (size) { + case 0: + break; + case 1: + rval = NATIVE_READ_MAS_B(data_port, MAS_IOADDR); + break; + case 2: + rval = NATIVE_READ_MAS_H(data_port, MAS_IOADDR); + break; + case 4: + rval = NATIVE_READ_MAS_W(data_port, MAS_IOADDR); + break; + default: + break; + } + return rval; +} + +#define SB_read_config8(reg, func) \ + ((char)SB_read_config(reg, func, sizeof(char))) +#define SB_read_config16(reg, func) \ + ((short)SB_read_config(reg, func, sizeof(short))) +#define SB_read_config32(reg, func) \ + ((int)SB_read_config(reg, func, sizeof(int))) + +static inline void SB_write_config(int xdata, int reg, int func, int size) +{ + unsigned int xaddr; + int mask = reg & 0x3; + int data; + int cnt = 0; + xaddr = pci_cfg_xaddr(PCI_BUS, PHYS_DEV, func, reg); + NATIVE_WRITE_MAS_W(addr_port, xaddr, MAS_IOADDR); + while (mask) { + data = xdata >> (8 * cnt); + NATIVE_WRITE_MAS_B(data_port + mask, data, MAS_IOADDR); + if (!--size) + break; + ++mask; + ++cnt; + if (mask == 4) { + xaddr = pci_cfg_xaddr(PCI_BUS, PHYS_DEV, + func, (reg + 4)); + NATIVE_WRITE_MAS_W(addr_port, xaddr, MAS_IOADDR); + mask = 0; + break; + } + } + + while (size & (reg & 0x3)) { + data = xdata >> (8 * cnt); + NATIVE_WRITE_MAS_B(data_port + mask, data, MAS_IOADDR); + if (!--size) + break; + ++mask; + ++cnt; + } + + switch (size) { + case 0: + break; + case 1: + NATIVE_WRITE_MAS_B(data_port, xdata, MAS_IOADDR); + break; + case 2: + NATIVE_WRITE_MAS_H(data_port, xdata, MAS_IOADDR); + break; + case 4: + NATIVE_WRITE_MAS_W(data_port, xdata, MAS_IOADDR); + break; + default: + break; + } +} + + +#define SB_write_config8(xdata, reg, func) \ + SB_write_config(xdata, reg, func, sizeof(char)) +#define SB_write_config16(xdata, reg, func) \ + SB_write_config(xdata, reg, func, sizeof(short)) +#define SB_write_config32(xdata, reg, func) \ + SB_write_config(xdata, reg, func, sizeof(int)) + +#endif /* _SOUTHBRIDGE_H_ */ diff --git a/arch/e2k/boot/bios/superio.c b/arch/e2k/boot/bios/superio.c new file mode 100644 index 000000000000..3695d07412b9 --- /dev/null +++ b/arch/e2k/boot/bios/superio.c @@ -0,0 +1,197 @@ +#include + +#include "bios.h" +#include "mc146818rtc.h" + +#include "../boot_io.h" + +/* Control */ +#define UART_IER 0x01 +#define UART_IIR 0x02 +#define UART_FCR 0x02 +#define UART_LCR 0x03 +#define UART_MCR 0x04 +#define UART_DLL 0x00 +#define UART_DLM 0x01 + +/* Status */ +#define UART_LSR 0x05 +#define UART_MSR 0x06 +#define UART_SCR 0x07 + +#define UART_LCS 0x3 + +#define BASE_BAUD1 ( 1280000 / 16 ) + +static int set_irq = 0; + +void set_irq_pin(void); + +void write_sio(int index,int data) +{ + bios_outb(index, 0x3f0); + bios_outb(data, 0x3f1); +} + +unsigned char read_sio(int index) +{ + bios_outb(index, 0x3f0); + return bios_inb(0x3f1); +} + +inline void uart_init (unsigned base_port) +{ + int divisor = BASE_BAUD1; + /* enable interrupts */ + bios_outb(0x7, base_port + UART_IER); + + /* enable fifo's */ + bios_outb(0x01, base_port + UART_FCR); + + bios_outb(0x80 | UART_LCS, base_port + UART_LCR); + bios_outb(divisor & 0xFF, base_port + UART_DLL); + bios_outb((divisor >> 8) & 0xFF, base_port + UART_DLM); + bios_outb(UART_LCS, base_port + UART_LCR); +} + +void enable_parallel_port(void) +{ + unsigned char byte; + rom_printk("enable superio parallel port ...\n"); + bios_outb(0x55, 0x3f0); + + // [0]-FDC,[4]COM1,[5]COM2,[3]LPT + byte = read_sio(0x22); + byte |= (1 << 3); + write_sio(0x22, byte); // lpt power on + + write_sio(0x7, 0x3); + write_sio(0x30, 0); + write_sio(0x60, 0x3); + write_sio(0x61, 0x78); + write_sio(0x70, 0x7); + write_sio(0x74, 0x4); // no dma + write_sio(0xf0, 0x3c); // default + write_sio(0xf1, 0x00); // default + write_sio(0x30, 0x1); + + bios_outb(0xAA, 0x3f0); + + if (!set_irq) set_irq_pin(); + set_irq = 1; + + hardware.parallel = 1; +} + +void enable_rtc(void) +{ + rom_printk("enable superio rtc ...\n"); + bios_outb(0x55, 0x3f0); + + write_sio(0x7, 0x6); + write_sio(0x30, 0); + write_sio(0x70, 0x8); + write_sio(0x30, 0x1); + + bios_outb(0xAA, 0x3f0); + + if (!set_irq) set_irq_pin(); + set_irq = 1; + + hardware.rtc = 1; +} + +void enable_keyboard(void) +{ + rom_printk("enable superio keyboard ...\n"); + bios_outb(0x55, 0x3f0); + + write_sio(0x7, 0x7); + write_sio(0x30, 0); + write_sio(0x70, 0x1); + write_sio(0xf0, 0x3); + write_sio(0x30, 0x1); + + bios_outb(0xAA, 0x3f0); + + if (!set_irq) set_irq_pin(); + set_irq = 1; + + init_kbd(); + + hardware.keyboard = 1; +} + +void enable_mouse(void) +{ + rom_printk("enable superio mouse ...\n"); + bios_outb(0x55, 0x3f0); + + write_sio(0x7, 0x7); + write_sio(0x30, 0); + write_sio(0x72, 0xc); + write_sio(0xf0, 0x3); + write_sio(0x30, 0x1); + + bios_outb(0xAA, 0x3f0); + + if (!set_irq) set_irq_pin(); + set_irq = 1; + + hardware.mouse = 1; +} + +void enable_floppy(void) +{ + unsigned char byte; + rom_printk("enable superio fdc ...\n"); + + bios_outb(0x55, 0x3f0); + + byte = read_sio(0x22); + byte |= (1 << 0); + write_sio(0x22, byte); // fdc power on + + write_sio(0x7, 0x0); + write_sio(0x30, 0); // disable fdc + write_sio(0x70, 0x06); // irq + write_sio(0x30, 0x1); // enable fdc + + bios_outb(0xAA, 0x3f0); + + if (!set_irq) set_irq_pin(); + set_irq = 1; + + /* 0x10 CMOS fd drive type (2 nibbles: high=fd0, low=fd1) + * values: + * 1: 360K 5.25" + * 2: 1.2MB 5.25" + * 3: 720K 3.5" + * 4: 1.44MB 3.5" + * 5: 2.88MB 3.5" + */ + if (!CMOS_READ(0x10)) + CMOS_WRITE(0x40, 0x10); + + + hardware.floppy = 1; +} + +void set_irq_pin(void) +{ + bios_outb(0x55, 0x3f0); + + write_sio(0x07, 0x08); + write_sio(0x30, 0x00); + write_sio(0xc0, 0x03); + write_sio(0xcc, (1<<3)); // irq12 + write_sio(0xd0, (1<<3)); // irq1 + write_sio(0xd1, (1<<3)); // irq3 + write_sio(0xd2, (1<<3)); // irq4 + write_sio(0xd3, (1<<3)); // irq5 + write_sio(0xd4, (1<<3)); // irq6 + write_sio(0xd5, (1<<3)); // irq7 + write_sio(0x30, 0x01); + + bios_outb(0xAA, 0x3f0); +} diff --git a/arch/e2k/boot/bios/video/Makefile b/arch/e2k/boot/bios/video/Makefile new file mode 100644 index 000000000000..e3751fd5ca0f --- /dev/null +++ b/arch/e2k/boot/bios/video/Makefile @@ -0,0 +1,9 @@ +KBUILD_CFLAGS += -I$(obj)/.. -I$(obj)/x86emu/include -DIN_MODULE + +obj-y := int10.o int15.o int16.o int1a.o \ + inte6.o init.o helper_exec.o helper_mem.o \ + pci-iface.o + +obj-y += x86emu/src/x86emu/ + + diff --git a/arch/e2k/boot/bios/video/helper_exec.c b/arch/e2k/boot/bios/video/helper_exec.c new file mode 100644 index 000000000000..1a17f1e0f6db --- /dev/null +++ b/arch/e2k/boot/bios/video/helper_exec.c @@ -0,0 +1,269 @@ +/* $XFree86: xc/programs/Xserver/hw/xfree86/int10/helper_exec.c,v 1.16 2001/04/30 14:34:57 tsi Exp $ */ +/* + * XFree86 int10 module + * execute BIOS int 10h calls in x86 real mode environment + * Copyright 1999 Egbert Eich + * + * Part of this is based on code taken from DOSEMU + * (C) Copyright 1992, ..., 1999 the "DOSEMU-Development-Team" + */ + +/* + * To debug port accesses define PRINT_PORT. + * Note! You also have to comment out ioperm() + * in xf86EnableIO(). Otherwise we won't trap + * on PIO. + */ +#include +#include "init.h" +#include "printk.h" +#include "pci.h" + +int port_rep_inb(u16 port, u32 base, int d_f, u32 count); +u8 x_inb(u16 port); +u16 x_inw(u16 port); +void x_outb(u16 port, u8 val); +void x_outw(u16 port, u16 val); +u32 x_inl(u16 port); +void x_outl(u16 port, u32 val); + +/* general software interrupt handler */ +u32 getIntVect(int num) +{ + return MEM_RW(num << 2) + (MEM_RW((num << 2) + 2) << 4); +} + +void pushw(u16 val) +{ + X86_ESP -= 2; + MEM_WW(((u32) X86_SS << 4) + X86_SP, val); +} + +int run_bios_int(int num) +{ + u32 eflags; + + eflags = X86_EFLAGS; + pushw(eflags); + pushw(X86_CS); + pushw(X86_IP); + X86_CS = MEM_RW((num << 2) + 2); + X86_IP = MEM_RW(num << 2); + +// printf("%s: INT %x CS:IP = %x:%x\n", __FUNCTION__, +// num, MEM_RW((num << 2) + 2), MEM_RW(num << 2)); + + return 1; +} + +int port_rep_inb(u16 port, u32 base, int d_f, u32 count) +{ + register int inc = d_f ? -1 : 1; + u32 dst = base; + while (count--) { + MEM_WB(dst, x_inb(port)); + dst += inc; + } + return dst - base; +} + +int port_rep_inw(u16 port, u32 base, int d_f, u32 count) +{ + register int inc = d_f ? -2 : 2; + u32 dst = base; + while (count--) { + MEM_WW(dst, x_inw(port)); + dst += inc; + } + return dst - base; +} + +int port_rep_inl(u16 port, u32 base, int d_f, u32 count) +{ + register int inc = d_f ? -4 : 4; + u32 dst = base; + while (count--) { + MEM_WL(dst, x_inl(port)); + dst += inc; + } + return dst - base; +} + +int port_rep_outb(u16 port, u32 base, int d_f, u32 count) +{ + register int inc = d_f ? -1 : 1; + u32 dst = base; + while (count--) { + x_outb(port, MEM_RB(dst)); + dst += inc; + } + return dst - base; +} + +int port_rep_outw(u16 port, u32 base, int d_f, u32 count) +{ + register int inc = d_f ? -2 : 2; + u32 dst = base; + while (count--) { + x_outw(port, MEM_RW(dst)); + dst += inc; + } + return dst - base; +} + +int port_rep_outl(u16 port, u32 base, int d_f, u32 count) +{ + register int inc = d_f ? -4 : 4; + u32 dst = base; + while (count--) { + x_outl(port, MEM_RL(dst)); + dst += inc; + } + return dst - base; +} + +u8 x_inb(u16 port) +{ + u8 val; + + val = bios_inb(port); + +// printf("%x inb(0x%04x) = 0x%02x\n", X86_IP, port, val); + + return val; +} + +u16 x_inw(u16 port) +{ + u16 val; + + val = bios_inw(port); + +// printf("%x inw(0x%04x) = 0x%04x\n", X86_IP, port, val); + return val; +} + +u32 x_inl(u16 port) +{ + u32 val; + + val = bios_inl(port); + +// printf("%x inl(0x%04x) = 0x%08x\n", X86_IP, port, val); + return val; +} + +void x_outb(u16 port, u8 val) +{ +// printf("%x outb(0x%02x, 0x%04x)\n", X86_IP, val, port); + bios_outb(val, port); +} + +void x_outw(u16 port, u16 val) +{ +// printf("%x outw(0x%04x, 0x%04x)\n", X86_IP, val, port); + bios_outw(val, port); +} + +void x_outl(u16 port, u32 val) +{ +// printf("%x outl(0x%08x, 0x%04x)\n", X86_IP, val, port); + bios_outl(val, port); +} + +u8 Mem_rb(int addr) +{ + return (*currentp->mem->rb) (currentp, addr); +} + +u16 Mem_rw(int addr) +{ + return (*currentp->mem->rw) (currentp, addr); +} + +u32 Mem_rl(int addr) +{ + return (*currentp->mem->rl) (currentp, addr); +} + +void Mem_wb(int addr, u8 val) +{ + (*currentp->mem->wb) (currentp, addr, val); +} + +void Mem_ww(int addr, u16 val) +{ + (*currentp->mem->ww) (currentp, addr, val); +} + +void Mem_wl(int addr, u32 val) +{ + (*currentp->mem->wl) (currentp, addr, val); +} + +#if 0 +void getsecs(unsigned long *sec, unsigned long *usec) +{ + struct timeval tv; + gettimeofday(&tv, 0); + *sec = tv.tv_sec; + *usec = tv.tv_usec; +} +#endif + +#define TAG(Cfg1Addr) (Cfg1Addr & 0xffff00) +#define OFFSET(Cfg1Addr) (Cfg1Addr & 0xff) + +u8 bios_checksum(u8 * start, int size) +{ + u8 sum = 0; + + while (size-- > 0) + sum += *start++; + return sum; +} + +/* + * Lock/Unlock legacy VGA. Some Bioses try to be very clever and make + * an attempt to detect a legacy ISA card. If they find one they might + * act very strange: for example they might configure the card as a + * monochrome card. This might cause some drivers to choke. + * To avoid this we attempt legacy VGA by writing to all know VGA + * disable registers before we call the BIOS initialization and + * restore the original values afterwards. In beween we hold our + * breath. To get to a (possibly exising) ISA card need to disable + * our currentp PCI card. + */ +/* + * This is just for booting: we just want to catch pure + * legacy vga therefore we don't worry about mmio etc. + * This stuff should really go into vgaHW.c. However then + * the driver would have to load the vga-module prior to + * doing int10. + */ +/*void +LockLegacyVGA(int screenIndex,legacyVGAPtr vga) +{ + xf86SetCurrentAccess(FALSE, xf86Screens[screenIndex]); + vga->save_msr = inb(0x3CC); + vga->save_vse = inb(0x3C3); + vga->save_46e8 = inb(0x46e8); + vga->save_pos102 = inb(0x102); + outb(0x3C2, ~(u8)0x03 & vga->save_msr); + outb(0x3C3, ~(u8)0x01 & vga->save_vse); + outb(0x46e8, ~(u8)0x08 & vga->save_46e8); + outb(0x102, ~(u8)0x01 & vga->save_pos102); + xf86SetCurrentAccess(TRUE, xf86Screens[screenIndex]); +} + +void +UnlockLegacyVGA(int screenIndex, legacyVGAPtr vga) +{ + xf86SetCurrentAccess(FALSE, xf86Screens[screenIndex]); + outb(0x102, vga->save_pos102); + outb(0x46e8, vga->save_46e8); + outb(0x3C3, vga->save_vse); + outb(0x3C2, vga->save_msr); + xf86SetCurrentAccess(TRUE, xf86Screens[screenIndex]); +} +*/ diff --git a/arch/e2k/boot/bios/video/helper_mem.c b/arch/e2k/boot/bios/video/helper_mem.c new file mode 100644 index 000000000000..651ea0f12260 --- /dev/null +++ b/arch/e2k/boot/bios/video/helper_mem.c @@ -0,0 +1,326 @@ +/* $XFree86: xc/programs/Xserver/hw/xfree86/int10/helper_mem.c,v 1.21 2001/05/22 16:24:37 tsi Exp $ */ +/* + * XFree86 int10 module + * execute BIOS int 10h calls in x86 real mode environment + * Copyright 1999 Egbert Eich + */ + +#include "init.h" + +#define _INT10_PRIVATE + +#define SYS_BIOS 0xF0000 + +#define REG pInt +#if 0 +typedef enum { + OPT_NOINT10, + OPT_INIT_PRIMARY, + OPT_BIOS_LOCATION +} INT10Opts; + +static const OptionInfoRec INT10Options[] = { + {OPT_NOINT10, "NoINT10", OPTV_BOOLEAN, {0}, FALSE}, + {OPT_INIT_PRIMARY, "InitPrimary", OPTV_BOOLEAN, {0}, FALSE}, + {OPT_BIOS_LOCATION, "BiosLocation", OPTV_STRING, {0}, FALSE}, + {-1, NULL, OPTV_NONE, {0}, FALSE}, +}; +#endif + +void reset_int_vect(void); + +#ifdef DEBUG +void dprint(unsigned long start, unsigned long size) +{ + int i, j; + char *c = (char *) start; + + for (j = 0; j < (size >> 4); j++) { + char *d = c; + printf("\n0x%lx: ", (unsigned long) c); + for (i = 0; i < 16; i++) + printf("%2.2x ", (unsigned char) (*(c++))); + c = d; + for (i = 0; i < 16; i++) { + printf("%c", ((((u8) (*c)) > 32) && (((u8) (*c)) < 128)) ? + (unsigned char) (*(c)) : '.'); + c++; + } + } + printf("\n"); +} +#endif /* DEBUG */ + +#if 1 +#ifndef _PC +/* + * here we are really paranoid about faking a "real" + * BIOS. Most of this information was pulled from + * dosemu. + */ +void setup_int_vect(void) +{ + int i; + + /* let the int vects point to the SYS_BIOS seg */ + for (i = 0; i < 0x80; i++) { + MEM_WW(i << 2, 0); + MEM_WW((i << 2) + 2, SYS_BIOS >> 4); + } + + reset_int_vect(); + /* font tables default location (int 1F) */ + MEM_WW(0x1f << 2, 0xfa6e); + + /* int 11 default location (Get Equipment Configuration) */ + MEM_WW(0x11 << 2, 0xf84d); + /* int 12 default location (Get Conventional Memory Size) */ + MEM_WW(0x12 << 2, 0xf841); + /* int 15 default location (I/O System Extensions) */ + MEM_WW(0x15 << 2, 0xf859); + /* int 1A default location (RTC, PCI and others) */ + MEM_WW(0x1a << 2, 0xff6e); + /* int 05 default location (Bound Exceeded) */ + MEM_WW(0x05 << 2, 0xff54); + /* int 08 default location (Double Fault) */ + MEM_WW(0x08 << 2, 0xfea5); + /* int 13 default location (Disk) */ + MEM_WW(0x13 << 2, 0xec59); + /* int 0E default location (Page Fault) */ + MEM_WW(0x0e << 2, 0xef57); + /* int 17 default location (Parallel Port) */ + MEM_WW(0x17 << 2, 0xefd2); + /* fdd table default location (int 1e) */ + MEM_WW(0x1e << 2, 0xefc7); + + /* Set Equipment flag to VGA */ + i = MEM_RB(0x0410) & 0xCF; + MEM_WB(0x0410, i); + /* XXX Perhaps setup more of the BDA here. See also int42(0x00). */ +} +#endif + +#if 0 + +int setup_system_bios(void *base_addr) +{ + char *base = (char *) base_addr; + + /* + * we trap the "industry standard entry points" to the BIOS + * and all other locations by filling them with "hlt" + * TODO: implement hlt-handler for these + */ + memset(base, 0xf4, 0x10000); + + /* set bios date */ + strcpy(base + 0x0FFF5, "06/11/99"); + /* set up eisa ident string */ + strcpy(base + 0x0FFD9, "PCI_ISA"); + /* write system model id for IBM-AT */ + *((unsigned char *) (base + 0x0FFFE)) = 0xfc; + + return 1; +} + +#endif + +void reset_int_vect(void) +{ + /* + * This table is normally located at 0xF000:0xF0A4. However, int 0x42, + * function 0 (Mode Set) expects it (or a copy) somewhere in the bottom + * 64kB. Note that because this data doesn't survive POST, int 0x42 should + * only be used during EGA/VGA BIOS initialisation. + */ + static const unsigned char VideoParms[] = { + /* Timing for modes 0x00 & 0x01 */ + 0x38, 0x28, 0x2d, 0x0a, 0x1f, 0x06, 0x19, 0x1c, + 0x02, 0x07, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, + /* Timing for modes 0x02 & 0x03 */ + 0x71, 0x50, 0x5a, 0x0a, 0x1f, 0x06, 0x19, 0x1c, + 0x02, 0x07, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, + /* Timing for modes 0x04, 0x05 & 0x06 */ + 0x38, 0x28, 0x2d, 0x0a, 0x7f, 0x06, 0x64, 0x70, + 0x02, 0x01, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, + /* Timing for mode 0x07 */ + 0x61, 0x50, 0x52, 0x0f, 0x19, 0x06, 0x19, 0x19, + 0x02, 0x0d, 0x0b, 0x0c, 0x00, 0x00, 0x00, 0x00, + /* Display page lengths in little endian order */ + 0x00, 0x08, /* Modes 0x00 and 0x01 */ + 0x00, 0x10, /* Modes 0x02 and 0x03 */ + 0x00, 0x40, /* Modes 0x04 and 0x05 */ + 0x00, 0x40, /* Modes 0x06 and 0x07 */ + /* Number of columns for each mode */ + 40, 40, 80, 80, 40, 40, 80, 80, + /* CGA Mode register value for each mode */ + 0x2c, 0x28, 0x2d, 0x29, 0x2a, 0x2e, 0x1e, 0x29, + /* Padding */ + 0x00, 0x00, 0x00, 0x00 + }; + int i; + + for (i = 0; i < sizeof(VideoParms); i++) + MEM_WB(i + (0x1000 - sizeof(VideoParms)), VideoParms[i]); + MEM_WW(0x1d << 2, 0x1000 - sizeof(VideoParms)); + MEM_WW((0x1d << 2) + 2, 0); + +// rom_printk("SETUP INT\n"); + MEM_WW(0x10 << 2, 0xf065); + MEM_WW((0x10 << 2) + 2, SYS_BIOS >> 4); + MEM_WW(0x42 << 2, 0xf065); + MEM_WW((0x42 << 2) + 2, SYS_BIOS >> 4); + MEM_WW(0x6D << 2, 0xf065); + MEM_WW((0x6D << 2) + 2, SYS_BIOS >> 4); +} + +#endif + +#if 0 + +void set_return_trap(void) +{ + /* + * Here we set the exit condition: We return when we encounter + * 'hlt' (=0xf4), which we locate at address 0x600 in x86 memory. + */ + MEM_WB(0x0600, 0xf4); + + /* + * Allocate a segment for the stack + */ + xf86Int10AllocPages(1, current->stackseg); +} + +void *xf86HandleInt10Options(ScrnInfoPtr pScrn, int entityIndex) +{ + EntityInfoPtr pEnt = xf86GetEntityInfo(entityIndex); + OptionInfoPtr options = NULL; + + if (pEnt->device) { + pointer configOptions = NULL; + + /* Check if xf86CollectOptions() has already been called */ + if (((pEnt->index < 0) || + !xf86Screens[pEnt->index] || + !(configOptions = xf86Screens[pEnt->index]->options)) && + pEnt->device) + configOptions = pEnt->device->options; + + if (configOptions) { + if (!(options = (OptionInfoPtr) xalloc(sizeof(INT10Options)))) + return NULL; + + (void) memcpy(options, INT10Options, sizeof(INT10Options)); + xf86ProcessOptions(pScrn->scrnIndex, configOptions, options); + } + } + xfree(pEnt); + + return options; +} + +Bool int10skip(void *options) +{ + Bool noint10 = FALSE; + + if (!options) + return FALSE; + + xf86GetOptValBool(options, OPT_NOINT10, &noint10); + return noint10; +} + +Bool int10_check_bios(int scrnIndex, int codeSeg, unsigned char *vbiosMem) +{ + int size; + + if ((codeSeg & 0x1f) || /* Not 512-byte aligned otherwise */ + ((codeSeg << 4) < V_BIOS) || ((codeSeg << 4) >= SYS_SIZE)) + return FALSE; + + if (xf86IsPc98()) + return FALSE; + + if ((*vbiosMem != 0x55) || (*(vbiosMem + 1) != 0xAA) || !*(vbiosMem + 2)) + return FALSE; + + size = *(vbiosMem + 2) * 512; + + if ((size + (codeSeg << 4)) > SYS_SIZE) + return FALSE; + + if (bios_checksum(vbiosMem, size)) + xf86DrvMsg(scrnIndex, X_WARNING, "Bad V_BIOS checksum\n"); + + return TRUE; +} + +Bool initPrimary(void *options) +{ + Bool initPrimary = FALSE; + + if (!options) + return FALSE; + + xf86GetOptValBool(options, OPT_INIT_PRIMARY, &initPrimary); + return initPrimary; +} + +void xf86int10ParseBiosLocation(void *options, xf86int10BiosLocationPtr bios) +{ + char *s; + char *p; + char *str = NULL; + + if (options) + str = xf86GetOptValString(options, OPT_BIOS_LOCATION); + + bios->bus = BUS_NONE; + if (!str) + return; + + s = xstrdup(str); + p = strtok(s, ":"); + if (xf86NameCmp(p, "pci")) + bios->bus = BUS_PCI; + else if (xf86NameCmp(p, "primary")) + bios->bus = BUS_ISA; + + xfree(s); + + if (bios->bus == BUS_NONE) + return; + + s = xstrdup(str); + p = strchr(s, ':'); + + switch (bios->bus) { + case BUS_ISA: + if (p) + bios->location.legacy = atoi(++p); + else + bios->location.legacy = 0; + break; + case BUS_PCI: + if (p) { + bios->location.pci.bus = atoi(++p); + if ((p = strchr(p, ':'))) { + bios->location.pci.dev = atoi(++p); + if ((p = strchr(p, ':'))) { + bios->location.pci.func = atoi(++p); + break; + } + } + } + /* fall through */ + bios->bus = BUS_NONE; + break; + default: + break; + } + xfree(s); +} + + +#endif diff --git a/arch/e2k/boot/bios/video/init.c b/arch/e2k/boot/bios/video/init.c new file mode 100644 index 000000000000..9e4b707e2213 --- /dev/null +++ b/arch/e2k/boot/bios/video/init.c @@ -0,0 +1,592 @@ + + +#include +#include "init.h" +#include "printk.h" + +#include + +#include "pci-iface.h" +#include "bios.h" + +#define die(x) { rom_printk(x); } +#define warn(x) { rom_printk(x); } + +#define DEBUG_VIDEO 0 +#define DebugV(fmt, args...) \ + ({ if (DEBUG_VIDEO) \ + rom_printk(fmt, ##args); }) + +void x86emu_dump_xregs(void); +int int15_handler(void); +int int16_handler(void); +int int1A_handler(void); +#ifndef _PC +int int42_handler(void); +#endif +int intE6_handler(void); +void setup_int_vect(void); +int run_bios_int(int num); +u32 getIntVect(int num); + + +void pushw(u16 val); + +_ptr p; +ptr currentp = 0; +unsigned char biosmem[1024 * 1024]; + +int verbose = 1; + + +/* Interrupt multiplexer */ + +void do_int(int num) +{ + int ret = 0; + +// rom_printk("int%x vector at %x\n", num, getIntVect(num)); + + /* This is a pInt leftover */ + currentp->num = num; + + switch (num) { +#ifndef _PC + case 0x10: + case 0x42: + case 0x6D: + + if (getIntVect(num) == 0xFF065) { + ret = int42_handler(); + } + break; +#endif + case 0x15: + ret = int15_handler(); + break; + case 0x16: + ret = int16_handler(); + break; + case 0x1A: + ret = int1A_handler(); + break; + case 0xe6: + ret = intE6_handler(); + break; + default: + break; + } + + if (!ret) + ret = run_bios_int(num); + + if (!ret) { + rom_printk("\nint%x: not implemented\n", num); + //x86emu_dump_xregs(); + } +} + +static void x_outb(u16 port, u8 val) +{ + bios_outb(val, port); +} + +static void x_outw(u16 port, u16 val) +{ + bios_outw(val, port); +} + +static void x_outl(u16 port, u32 val) +{ + bios_outl(val, port); +} + +X86EMU_pioFuncs myfuncs = { + bios_inb, bios_inw, bios_inl, + x_outb, x_outw, x_outl +}; + + +void X86EMU_setMemBase(void *base, unsigned int size); +void X86EMU_setabseg(void *abseg); +void x86emu_dump_xregs(void); +int X86EMU_set_debug(int debug); + +X86EMU_intrFuncs intFuncs[256]; + +int pci_video_bios_init(struct bios_pci_dev *dev) +{ + void *abseg = 0; + int i; + unsigned char *cp; + unsigned int size = 0; + int base = 0; + unsigned short initialip = 0, initialcs = 0, devfn = 0; + char *date = "01/01/99"; +#ifdef DEBUG + int debugflag = 0; + int trace = 0; +#endif + +// size = 64 * 1024; + size = dev->rom_size; + + base = 0xc0000; + initialcs = 0xc000; + initialip = 0x0003; + +// rom_printk("Point 1 int%x vector at %x\n", 0x42, getIntVect(0x42)); + + abseg = (void *) 0xa0000; + + currentp = &p; + X86EMU_setMemBase(biosmem, sizeof(biosmem)); + X86EMU_setabseg(abseg); + X86EMU_setupPioFuncs(&myfuncs); + + /* Setting up interrupt environment. + * basically this means initializing PCI and + * intXX handlers. + */ + pciInit(); + + setup_int_vect(); + + for (i = 0; i < 256; i++) + intFuncs[i] = do_int; + + X86EMU_setupIntrFuncs(intFuncs); + + cp = (unsigned char *) dev->rom_address ; + + devfn = (PCI_SLOT(dev->devfn) << 3) | + PCI_FUNC(dev->devfn); + + currentp->ax = devfn ? devfn : 0xff; + currentp->dx = 0x80; + + for (i = 0; i < size; i++) { + wrb(base + i, cp[i]); + } + + /* Put a date into ROM */ + for (i = 0; date[i]; i++) + wrb(0xffff5 + i, date[i]); + wrb(0xffff7, '/'); + wrb(0xffffa, '/'); + + /* cpu setup */ + X86_AX = devfn ? devfn : 0xff; + X86_DX = 0x80; + X86_EIP = initialip; + X86_CS = initialcs; + + /* Initialize stack and data segment */ + X86_SS = 0x0030; + X86_DS = 0x0040; + X86_SP = 0xfffe; + /* We need a sane way to return from bios + * execution. A hlt instruction and a pointer + * to it, both kept on the stack, will do. + */ + pushw(0xf4f4); /* hlt; hlt */ + pushw(X86_SS); + pushw(X86_SP + 2); + + X86_ES = 0x0000; + +#ifdef DEBUG + if (trace) { + rom_printk("Switching to single step mode.\n"); + X86EMU_trace_on(); + } +#endif + +#if 0 + debugflag = DEBUG_MEM_TRACE_F | + DEBUG_DECODE_F | DEBUG_DISASSEMBLE_F | + DEBUG_TRACE_F | + DEBUG_SYSINT_F; +#endif + +#ifdef DEBUG +// debugflag = 0x00ffffff; + if (debugflag) { + X86EMU_set_debug(debugflag); + } +#endif + + X86EMU_exec(); + /* Cleaning up */ + pciExit(); + + return 0; +} + + + +/* VGA index register ports */ +#define GRA_I 0x3CE /* Graphics Controller Index */ +#define SEQ_I 0x3C4 /* Sequencer Index */ + +/* VGA data register ports */ +#define GRA_D 0x3CF /* Graphics Controller Data Register */ +#define SEQ_D 0x3C5 /* Sequencer Data Register */ + +#define CRT_IC 0x3D4 /* CRT Controller Index - color emulation */ +#define CRT_DC 0x3D5 /* CRT Controller Data Register - color emulation */ +#define IS1_RC 0x3DA /* Input Status Register 1 - color emulation */ +#define ATT_IW 0x3C0 /* Attribute Controller Index & Data Write Register */ +#define ATT_R 0x3C1 /* Attribute Controller Data Read Register */ + +#define ATC_MODE 0x10 +#define ATC_COLOR_PAGE 0x14 + + +#define CRTC_H_TOTAL 0 +#define CRTC_H_DISP 1 +#define CRTC_H_BLANK_START 2 +#define CRTC_H_BLANK_END 3 +#define CRTC_H_SYNC_START 4 +#define CRTC_H_SYNC_END 5 +#define CRTC_V_TOTAL 6 +#define CRTC_OVERFLOW 7 +#define CRTC_PRESET_ROW 8 +#define CRTC_MAX_SCAN 9 +#define CRTC_CURSOR_START 0x0A +#define CRTC_CURSOR_END 0x0B +#define CRTC_START_HI 0x0C +#define CRTC_START_LO 0x0D +#define CRTC_CURSOR_HI 0x0E +#define CRTC_CURSOR_LO 0x0F +#define CRTC_V_SYNC_START 0x10 +#define CRTC_V_SYNC_END 0x11 +#define CRTC_V_DISP_END 0x12 +#define CRTC_OFFSET 0x13 +#define CRTC_UNDERLINE 0x14 +#define CRTC_V_BLANK_START 0x15 +#define CRTC_V_BLANK_END 0x16 +#define CRTC_MODE 0x17 +#define CRTC_LINE_COMPARE 0x18 + +// macros for writing to vga regs +#define write_seq(data, addr) \ +({ \ + bios_outb(addr, SEQ_I); \ + bios_outb(data, SEQ_D); \ +}) +#define write_gra(data, addr) \ +({ \ + bios_outb(addr, GRA_I); \ + bios_outb(data, GRA_D); \ +}) +#define write_crtc(data, addr) \ +({ \ + bios_outb(addr, CRT_IC); \ + bios_outb(data, CRT_DC); \ +}) +#define write_att(data, addr) \ +({ \ + bios_inb(IS1_RC); \ + bios_inb(0x80); \ + bios_outb(addr, ATT_IW); \ + bios_inb(0x80); \ + bios_outb(data, ATT_IW); \ + bios_inb(0x80); \ +}) + +#define SEQ_CLOCK_MODE 0x01 +#define SEQ_PLANE_WRITE 0x02 +#define SEQ_CHARACTER_MAP 0x03 +#define SEQ_MEMORY_MODE 0x04 + +#define GDC_PLANE_READ 0x04 +#define GDC_MODE 0x05 +#define GDC_MISC 0x06 +#define GDC_BIT_MASK 0x08 + +#define VGA_FONT_BASE 0xa8000 +#define CHAR_HEIGHT 16 + +unsigned char read_seq_b(unsigned short addr) { + bios_outb(addr, SEQ_I); + return bios_inb(SEQ_D); +} +unsigned char read_gra_b(unsigned short addr) { + bios_outb(addr, GRA_I); + return bios_inb(GRA_D); +} +unsigned char read_crtc_b(unsigned short addr) { + bios_outb(addr, CRT_IC); + return bios_inb(CRT_DC); +} +unsigned char read_att_b(unsigned short addr) { + bios_inb(IS1_RC); + bios_inb(0x80); + bios_outb(addr, ATT_IW); + return bios_inb(ATT_R); +} + + +#if 0 + +void vga_set_amode (void) { + unsigned char byte; + + rom_printk("Switching into alpha mode..."); + + + write_att(0x0c, ATC_MODE); + + //reset palette to normal in the case it was changed + write_att(0x0, ATC_COLOR_PAGE); +// +// display is off at this point + + write_seq(0x3, SEQ_PLANE_WRITE); /* planes 0 & 1 */ + byte = read_seq_b(SEQ_MEMORY_MODE) & ~0x04; + write_seq(byte, SEQ_MEMORY_MODE); + + byte = read_gra_b(GDC_MODE) & ~0x60; + write_gra(byte|0x10, GDC_MODE); + + write_gra(0x0e, GDC_MISC); + + write_crtc(0x00, CRTC_CURSOR_START); + write_crtc(CHAR_HEIGHT-1, CRTC_CURSOR_END); + + byte = read_crtc_b(CRTC_MODE) & ~0xe0; + write_crtc(byte|0xa0, CRTC_MODE); + byte = read_crtc_b(CRTC_MAX_SCAN) & ~0x01f; + write_crtc(byte | (CHAR_HEIGHT-1), CRTC_MAX_SCAN); + + +// turn on display, disable access to attr palette + bios_inb(IS1_RC); + bios_outb(0x20, ATT_IW); + + rom_printk("done.\n"); +} + +#endif + +/* + * by Steve M. Gehlbach, Ph.D. + * + * vga_font_load loads a font into font memory. It + * assumes alpha mode has been set. + * + * The font load code follows technique used + * in the tiara project, which came from + * the Universal Talkware Boot Loader, + * http://www.talkware.net. + */ + + +void atyr128_font_enable(unsigned char *vidmem, int height, int num_chars) { + +/* Note: the font table is 'height' long but the font storage area + * is 32 bytes long. + */ + + int i; + unsigned char byte; + + +// rom_printk("Loading VGA font..."); + + // set sequencer map 2, odd/even off + byte = read_seq_b(SEQ_PLANE_WRITE) & ~0xf; + +// rom_printk("SEQ_PLANE_WRITE %x\n", byte); + + write_seq(byte|4,SEQ_PLANE_WRITE); + byte = read_seq_b(SEQ_MEMORY_MODE); + +// rom_printk("SEQ_MEMORY_MODE %x\n", byte); + + write_seq(byte|4,SEQ_MEMORY_MODE); + + // select graphics map 2, odd/even off, map starts at 0xa0000 + write_gra(2,GDC_PLANE_READ); + byte = read_gra_b(GDC_MODE) & ~0x10; + +// rom_printk("GDC_MODE %x\n", byte); + + write_gra(byte,GDC_MODE); + write_gra(0,GDC_MISC); + + /* Clear 256K */ + for (i = 0; i<(256 * 1024); i++) { + vidmem[i] = 0; + } + + + // set sequencer back to maps 0,1, odd/even on + byte = read_seq_b(SEQ_PLANE_WRITE) & ~0xf; + write_seq(byte|3,SEQ_PLANE_WRITE); + byte = read_seq_b(SEQ_MEMORY_MODE) & ~0x4; + write_seq(byte,SEQ_MEMORY_MODE); + + byte = read_seq_b(SEQ_CHARACTER_MAP); + +// rom_printk("SEQ_CHARACTER_MAP %x\n", byte); + + write_seq(0x0a, SEQ_CHARACTER_MAP); + + // select graphics back to map 0,1, odd/even on + write_gra(0,GDC_PLANE_READ); + byte = read_gra_b(GDC_MODE); + write_gra(byte|0x10,GDC_MODE); + write_gra(0xe,GDC_MISC); + +// rom_printk("done\n"); + +} + + +void video_bios(void) +{ + struct bios_pci_dev *dev; + int adpt_cnt; + unsigned char *code = 0; + int pcirom = 0; + int atyr128 = 0; + int cl5446 = 0; + int mga2 = 0; + + adpt_cnt = 0; + + DebugV("video_bios() started\n"); + dev = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); + adpt_cnt++; + + if (dev) { +#if DEBUG_VIDEO + rom_printk("--------- VIDEO BIOS ------\n"); + rom_printk("Class: %X\n", dev->class); + rom_printk("command: %x\n", dev->command); + rom_printk("base_address[0]: %04x\n", dev->base_address[0]); + rom_printk("size[0]: %04x\n", dev->size[0]); + rom_printk("base_address[1]: %04x\n", dev->base_address[1]); + rom_printk("size[1]: %04x\n", dev->size[1]); + rom_printk("base_address[2]: %04x\n", dev->base_address[2]); + rom_printk("size[2]: %04x\n", dev->size[2]); + rom_printk("base_address[3]: %04x\n", dev->base_address[3]); + rom_printk("size[0]: %04x\n", dev->size[3]); + rom_printk("base_address[4]: %04x\n", dev->base_address[4]); + rom_printk("size[4]: %04x\n", dev->size[4]); + rom_printk("base_address[5]: %04x\n", dev->base_address[5]); + rom_printk("size[5]: %04x\n", dev->size[5]); + rom_printk("rom_address: %04x\n", dev->rom_address); + rom_printk("rom_size %04x\n", dev->rom_size); +#endif + code = (unsigned char *) dev->rom_address; + + if (code[0] == 0x55U && + code[1] == 0xAAU ) { + rom_printk("VIDEO BIOS found at %X\n", code); + } else { + rom_printk("No ROM signature found." + " Skipping BIOS init...\n"); + rom_printk("BYTES: %x %x\n", code[0], + code[1]); + } + + switch (dev->vendor) + { + case PCI_VENDOR_ID_CIRRUS: + if (dev->device == PCI_DEVICE_ID_CIRRUS_5446) { + rom_printk("Cirrus Logic GD 5446 detected!\n"); + cl5446 = 1; + }; + break; + case PCI_VENDOR_ID_ATI: + + switch(dev->device) + { + case PCI_DEVICE_ID_ATI_RAGE128_PP: + rom_printk("ATI Rage 128 PP detected!\n"); + atyr128 = 1; + break; + case PCI_DEVICE_ID_ATI_RAGE128_TR: + rom_printk("ATI Rage 128 TR detected!\n"); + atyr128 = 1; + break; + default: + rom_printk("Unknown ATI display adapter detected!\n"); + break; + }; + + break; + case PCI_VENDOR_ID_MCST_TMP: + if (dev->device == PCI_DEVICE_ID_MCST_MGA2) { + rom_printk("Embeded Graphic MGA2/GC2500 " + "detected!\n"); + mga2 = 1; + }; + break; + default: + rom_printk("Unknown display adapter found!\n"); + break; + } + + + } else { + rom_printk("No PCI display adaplers found!\n"); + + } + + if (pcirom) { + pci_video_bios_init(dev); + } else if (mga2) { +#ifdef CONFIG_VGA_CONSOLE + vga_init(); +#endif /* CONFIG_VGA_CONSOLE */ + } else { + return; + } + + if (atyr128) { + + atyr128_font_enable( (unsigned char *) VGA_FONT_BASE, + CHAR_HEIGHT, 256); +#if 0 + + unsigned char *vidmem = (unsigned char *) dev->base_address[0]; + int i; + for (i=0; i < (1 * 1024 * 1024); i++) { + vidmem[i] = 0; + }; +#endif + } + +// vga_set_amode(); + + hardware.video = 1; + + if (atyr128) + { + long int i; + /* delay to relax ATI hardware */ + for (i=0; i<77000000L; i++) { + do { + (void) (i); + } while (0) ; + } + } + +#if 0 + if (cl5446) + { + long int i; + +// rom_printk("qwertyuiopasdfghjklzxcvbnm\n"); +// rom_printk("qwertyuiopasdfghjklzxcvbnm\n"); +// rom_printk("qwertyuiopasdfghjklzxcvbnm\n"); + for (i=0; i<2000000L; i++) { do {i; } while (0) ; } + } +#endif + +} + diff --git a/arch/e2k/boot/bios/video/init.h b/arch/e2k/boot/bios/video/init.h new file mode 100644 index 000000000000..1a25bc453633 --- /dev/null +++ b/arch/e2k/boot/bios/video/init.h @@ -0,0 +1,89 @@ +/* $XFree86: xc/programs/Xserver/hw/xfree86/int10/xf86x86emu.h,v 1.2 2001/01/06 20:19:13 tsi Exp $ */ +/* + * XFree86 int10 module + * execute BIOS int 10h calls in x86 real mode environment + * Copyright 1999 Egbert Eich + */ +#ifndef XF86X86EMU_H_ +#define XF86X86EMU_H_ +#include + +#define M _X86EMU_env + +#define X86_EAX M.x86.R_EAX +#define X86_EBX M.x86.R_EBX +#define X86_ECX M.x86.R_ECX +#define X86_EDX M.x86.R_EDX +#define X86_ESI M.x86.R_ESI +#define X86_EDI M.x86.R_EDI +#define X86_EBP M.x86.R_EBP +#define X86_EIP M.x86.R_EIP +#define X86_ESP M.x86.R_ESP +#define X86_EFLAGS M.x86.R_EFLG + +#define X86_FLAGS M.x86.R_FLG +#define X86_AX M.x86.R_AX +#define X86_BX M.x86.R_BX +#define X86_CX M.x86.R_CX +#define X86_DX M.x86.R_DX +#define X86_SI M.x86.R_SI +#define X86_DI M.x86.R_DI +#define X86_BP M.x86.R_BP +#define X86_IP M.x86.R_IP +#define X86_SP M.x86.R_SP +#define X86_CS M.x86.R_CS +#define X86_DS M.x86.R_DS +#define X86_ES M.x86.R_ES +#define X86_SS M.x86.R_SS +#define X86_FS M.x86.R_FS +#define X86_GS M.x86.R_GS + +#define X86_AL M.x86.R_AL +#define X86_BL M.x86.R_BL +#define X86_CL M.x86.R_CL +#define X86_DL M.x86.R_DL + +#define X86_AH M.x86.R_AH +#define X86_BH M.x86.R_BH +#define X86_CH M.x86.R_CH +#define X86_DH M.x86.R_DH + + +/* int10 info structure */ +typedef struct { + u16 BIOSseg; + u16 inb40time; + struct _mem *mem; + int num; + int ax; + int bx; + int cx; + int dx; + int si; + int di; + int es; + int bp; + int flags; + int stackseg; +} _ptr, *ptr; + +typedef struct _mem { + u8(*rb) (ptr, int); + u16(*rw) (ptr, int); + u32(*rl) (ptr, int); + void (*wb) (ptr, int, u8); + void (*ww) (ptr, int, u16); + void (*wl) (ptr, int, u32); +} mem; + +#define MEM_WB(where, what) wrb(where,what) +#define MEM_WW(where, what) wrw(where, what) +#define MEM_WL(where, what) wrl(where, what) + +#define MEM_RB(where) rdb(where) +#define MEM_RW(where) rdw(where) +#define MEM_RL(where) rdl(where) + +extern ptr currentp; + +#endif diff --git a/arch/e2k/boot/bios/video/int10.c b/arch/e2k/boot/bios/video/int10.c new file mode 100644 index 000000000000..4a4e49a13ff3 --- /dev/null +++ b/arch/e2k/boot/bios/video/int10.c @@ -0,0 +1,489 @@ + +#ifndef IN_MODULE +#include +#endif + +#include "init.h" +#include "pci.h" + +void x86emu_dump_xregs(void); +extern int verbose; + + +#ifndef _PC +/* + * This is derived from a number of PC system BIOS'es. The intent here is to + * provide very primitive video support, before an EGA/VGA BIOS installs its + * own interrupt vector. Here, "Ignored" calls should remain so. "Not + * Implemented" denotes functionality that can be implemented should the need + * arise. What are "Not Implemented" throughout are video memory accesses. + * Also, very little input validity checking is done here. + */ +int int42_handler(void) +{ +#if 0 + if (verbose && X86_AH != 0x0e) { + rom_printk("int%x\n", current->num); + x86emu_dump_xregs(); + } + + switch (X86_AH) { + case 0x00: + /* Set Video Mode */ + /* Enter: AL = video mode number */ + /* Leave: Nothing */ + /* Implemented (except for clearing the screen) */ + { /* Localise */ + int i; + u16 ioport, int1d, regvals, tmp; + u8 mode, cgamode, cgacolour; + + /* + * Ignore all mode numbers but 0x00-0x13. Some systems also ignore + * 0x0B and 0x0C, but don't do that here. + */ + if (X86_AL > 0x13) + break; + + /* + * You didn't think that was really the mode set, did you? There + * are only so many slots in the video parameter table... + */ + mode = X86_AL; + ioport = 0x03D4; +/* Linar */ + tmp = MEM_RB(0x0410) & 0x30; + rom_printk("tmp: %x mode %x\n", tmp, mode); + if (mode == 7 || tmp == 0x30) { + mode = 3; + tmp = 0x20; + } +// switch (MEM_RB(0x0410) & 0x30) { + switch (tmp) { + case 0x30: /* MDA */ + mode = 0x07; /* Force mode to 0x07 */ + ioport = 0x03B4; + break; + case 0x10: /* CGA 40x25 */ + if (mode >= 0x07) + mode = 0x01; + break; + case 0x20: /* CGA 80x25 (MCGA?) */ + if (mode >= 0x07) + mode = 0x03; + break; + case 0x00: /* EGA/VGA */ + if (mode >= 0x07) /* Don't try MDA timings */ + mode = 0x01; /* !?!?! */ + break; + } + + /* Locate data in video parameter table */ + int1d = MEM_RW(0x1d << 2); + regvals = ((mode >> 1) << 4) + int1d; + cgacolour = 0x30; + if (mode == 0x06) { + regvals -= 0x10; + cgacolour = 0x3F; + } + + /** Update BIOS Data Area **/ + + /* Video mode */ + MEM_WB(0x0449, mode); + + /* Columns */ + tmp = MEM_RB(mode + int1d + 0x48); + MEM_WW(0x044A, tmp); + + /* Page length */ + tmp = MEM_RW((mode & 0x06) + int1d + 0x40); + MEM_WW(0x044C, tmp); + + /* Start Address */ + MEM_WW(0x044E, 0); + + /* Cursor positions, one for each display page */ + for (i = 0x0450; i < 0x0460; i += 2) + MEM_WW(i, 0); + + /* Cursor start & end scanlines */ + tmp = MEM_RB(regvals + 0x0B); + MEM_WB(0x0460, tmp); + tmp = MEM_RB(regvals + 0x0A); + MEM_WB(0x0461, tmp); + + /* Current display page number */ + MEM_WB(0x0462, 0); + + /* CRTC I/O address */ + MEM_WW(0x0463, ioport); + + /* CGA Mode register value */ + cgamode = MEM_RB(mode + int1d + 0x50); + MEM_WB(0x0465, cgamode); + + /* CGA Colour register value */ + MEM_WB(0x0466, cgacolour); + + /* Rows */ + MEM_WB(0x0484, (25 - 1)); + + /* Programme the mode */ + outb(ioport + 4, cgamode & 0x37); /* Turn off screen */ + for (i = 0; i < 0x10; i++) { + tmp = MEM_RB(regvals + i); + outb(ioport, i); + outb(ioport + 1, tmp); + } + outb(ioport + 5, cgacolour); /* Select colour mode */ + outb(ioport + 4, cgamode); /* Turn on screen */ + } + break; + + case 0x01: + /* Set Cursor Type */ + /* Enter: CH = starting line for cursor */ + /* CL = ending line for cursor */ + /* Leave: Nothing */ + /* Implemented */ + { /* Localise */ + u16 ioport = MEM_RW(0x0463); + + MEM_WB(0x0460, X86_CL); + MEM_WB(0x0461, X86_CH); + + outb(ioport, 0x0A); + outb(ioport + 1, X86_CH); + outb(ioport, 0x0B); + outb(ioport + 1, X86_CL); + } + break; + + case 0x02: + /* Set Cursor Position */ + /* Enter: BH = display page number */ + /* DH = row */ + /* DL = column */ + /* Leave: Nothing */ + /* Implemented */ + { /* Localise */ + u16 offset, ioport; + + MEM_WB((X86_BH << 1) + 0x0450, X86_DL); + MEM_WB((X86_BH << 1) + 0x0451, X86_DH); + + if (X86_BH != MEM_RB(0x0462)) + break; + + offset = (X86_DH * MEM_RW(0x044A)) + X86_DL; + offset += MEM_RW(0x044E) << 1; + + ioport = MEM_RW(0x0463); + outb(ioport, 0x0E); + outb(ioport + 1, offset >> 8); + outb(ioport, 0x0F); + outb(ioport + 1, offset & 0xFF); + } + break; + + case 0x03: + /* Get Cursor Position */ + /* Enter: BH = display page number */ + /* Leave: CH = starting line for cursor */ + /* CL = ending line for cursor */ + /* DH = row */ + /* DL = column */ + /* Implemented */ + { /* Localise */ + X86_CL = MEM_RB(0x0460); + X86_CH = MEM_RB(0x0461); + X86_DL = MEM_RB((X86_BH << 1) + 0x0450); + X86_DH = MEM_RB((X86_BH << 1) + 0x0451); + } + break; + + case 0x04: + /* Get Light Pen Position */ + /* Enter: Nothing */ + /* Leave: AH = 0x01 (down/triggered) or 0x00 (not) */ + /* BX = pixel column */ + /* CX = pixel row */ + /* DH = character row */ + /* DL = character column */ + /* Not Implemented */ + { /* Localise */ + rom_printk("int%x - Get Light Pen Position. " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + X86_AH = X86_BX = X86_CX = X86_DX = 0; + } + break; + + case 0x05: + /* Set Display Page */ + /* Enter: AL = display page number */ + /* Leave: Nothing */ + /* Implemented */ + { /* Localise */ + u16 start, ioport = MEM_RW(0x0463); + u8 x, y; + + /* Calculate new start address */ + MEM_WB(0x0462, X86_AL); + start = X86_AL * MEM_RW(0x044C); + MEM_WW(0x044E, start); + start <<= 1; + + /* Update start address */ + outb(ioport, 0x0C); + outb(ioport + 1, start >> 8); + outb(ioport, 0x0D); + outb(ioport + 1, start & 0xFF); + + /* Switch cursor position */ + y = MEM_RB((X86_AL << 1) + 0x0450); + x = MEM_RB((X86_AL << 1) + 0x0451); + start += (y * MEM_RW(0x044A)) + x; + + /* Update cursor position */ + outb(ioport, 0x0E); + outb(ioport + 1, start >> 8); + outb(ioport, 0x0F); + outb(ioport + 1, start & 0xFF); + } + break; + + case 0x06: + /* Initialise or Scroll Window Up */ + /* Enter: AL = lines to scroll up */ + /* BH = attribute for blank */ + /* CH = upper y of window */ + /* CL = left x of window */ + /* DH = lower y of window */ + /* DL = right x of window */ + /* Leave: Nothing */ + /* Not Implemented */ + { /* Localise */ + rom_printk("int%x: Initialise or Scroll Window Up - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + } + break; + + case 0x07: + /* Initialise or Scroll Window Down */ + /* Enter: AL = lines to scroll down */ + /* BH = attribute for blank */ + /* CH = upper y of window */ + /* CL = left x of window */ + /* DH = lower y of window */ + /* DL = right x of window */ + /* Leave: Nothing */ + /* Not Implemented */ + { /* Localise */ + rom_printk("int%x: Initialise or Scroll Window Down - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + + } + break; + + case 0x08: + /* Read Character and Attribute at Cursor */ + /* Enter: BH = display page number */ + /* Leave: AH = attribute */ + /* AL = character */ + /* Not Implemented */ + { /* Localise */ + rom_printk + ("int%x: Read Character and Attribute at Cursor - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + + X86_AX = 0; + } + break; + + case 0x09: + /* Write Character and Attribute at Cursor */ + /* Enter: AL = character */ + /* BH = display page number */ + /* BL = attribute (text) or colour (graphics) */ + /* CX = replication count */ + /* Leave: Nothing */ + /* Not Implemented */ + { /* Localise */ + rom_printk + ("int%x: Write Character and Attribute at Cursor - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + + } + break; + + case 0x0a: + /* Write Character at Cursor */ + /* Enter: AL = character */ + /* BH = display page number */ + /* BL = colour */ + /* CX = replication count */ + /* Leave: Nothing */ + /* Not Implemented */ + { /* Localise */ + rom_printk("int%x: Write Character at Cursor - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + + } + break; + + case 0x0b: + /* Set Palette, Background or Border */ + /* Enter: BH = 0x00 or 0x01 */ + /* BL = colour or palette (respectively) */ + /* Leave: Nothing */ + /* Implemented */ + { /* Localise */ + u16 ioport = MEM_RW(0x0463) + 5; + u8 cgacolour = MEM_RB(0x0466); + + if (X86_BH) { + cgacolour &= 0xDF; + cgacolour |= (X86_BL & 0x01) << 5; + } else { + cgacolour &= 0xE0; + cgacolour |= X86_BL & 0x1F; + } + + MEM_WB(0x0466, cgacolour); + outb(ioport, cgacolour); + } + break; + + case 0x0c: + /* Write Graphics Pixel */ + /* Enter: AL = pixel value */ + /* BH = display page number */ + /* CX = column */ + /* DX = row */ + /* Leave: Nothing */ + /* Not Implemented */ + { /* Localise */ + rom_printk("int%x: Write Graphics Pixel - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + + } + break; + + case 0x0d: + /* Read Graphics Pixel */ + /* Enter: BH = display page number */ + /* CX = column */ + /* DX = row */ + /* Leave: AL = pixel value */ + /* Not Implemented */ + { /* Localise */ + rom_printk("int%x: Write Graphics Pixel - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + + X86_AL = 0; + + } + break; + + case 0x0e: + /* Write Character in Teletype Mode */ + /* Enter: AL = character */ + /* BH = display page number */ + /* BL = foreground colour */ + /* Leave: Nothing */ + /* Not Implemented */ + /* WARNING: Emulation of BEL characters will require */ + /* emulation of RTC and PC speaker I/O. */ + /* Also, this recurses through int 0x10 */ + /* which might or might not have been */ + /* installed yet. */ + { /* Localise */ +#ifdef PARANOID + rom_printk("int%x: Write Character in Teletype Mode - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); +#endif + rom_printk("%c", X86_AL); + } + break; + + case 0x0f: + /* Get Video Mode */ + /* Enter: Nothing */ + /* Leave: AH = number of columns */ + /* AL = video mode number */ + /* BH = display page number */ + /* Implemented */ + { /* Localise */ + X86_AH = MEM_RW(0x044A); + X86_AL = MEM_RB(0x0449); + X86_BH = MEM_RB(0x0462); + } + break; + + case 0x10: + /* Colour Control (subfunction in AL) */ + /* Enter: Various */ + /* Leave: Various */ + /* Ignored */ + break; + + case 0x11: + /* Font Control (subfunction in AL) */ + /* Enter: Various */ + /* Leave: Various */ + /* Ignored */ + break; + + case 0x12: + /* Miscellaneous (subfunction in BL) */ + /* Enter: Various */ + /* Leave: Various */ + /* Ignored. Previous code here optionally allowed */ + /* the enabling and disabling of VGA, but no system */ + /* BIOS I've come across actually implements it. */ + break; + + case 0x13: + /* Write String in Teletype Mode */ + /* Enter: AL = write mode */ + /* BL = attribute (if (AL & 0x02) == 0) */ + /* CX = string length */ + /* DH = row */ + /* DL = column */ + /* ES:BP = string segment:offset */ + /* Leave: Nothing */ + /* Not Implemented */ + /* WARNING: Emulation of BEL characters will require */ + /* emulation of RTC and PC speaker I/O. */ + /* Also, this recurses through int 0x10 */ + /* which might or might not have been */ + /* installed yet. */ + { /* Localise */ + rom_printk("int%x: Write String in Teletype Mode - " + "Function not implemented.\n", current->num); + x86emu_dump_xregs(); + + } + break; + + default: + /* Various extensions */ + /* Enter: Various */ + /* Leave: Various */ + /* Ignored */ + break; + } +#endif + return 1; +} +#endif diff --git a/arch/e2k/boot/bios/video/int15.c b/arch/e2k/boot/bios/video/int15.c new file mode 100644 index 000000000000..a89e2d63d8f8 --- /dev/null +++ b/arch/e2k/boot/bios/video/int15.c @@ -0,0 +1,19 @@ + +#ifndef IN_MODULE +#include +#endif + +#include "init.h" +#include "printk.h" + +void x86emu_dump_xregs(void); + +int int15_handler(void) +{ +#ifdef DEBUG + rom_printk("\nint15 encountered.\n"); + x86emu_dump_xregs(); +#endif + X86_EAX = 0; + return 1; +} diff --git a/arch/e2k/boot/bios/video/int16.c b/arch/e2k/boot/bios/video/int16.c new file mode 100644 index 000000000000..09b461bd19cb --- /dev/null +++ b/arch/e2k/boot/bios/video/int16.c @@ -0,0 +1,12 @@ + +#ifndef IN_MODULE +#include +#endif + +#include "printk.h" + +int int16_handler(void) +{ + rom_printk("\nint16: keyboard not supported right now.\n"); + return 1; +} diff --git a/arch/e2k/boot/bios/video/int1a.c b/arch/e2k/boot/bios/video/int1a.c new file mode 100644 index 000000000000..56c5e1b74e6d --- /dev/null +++ b/arch/e2k/boot/bios/video/int1a.c @@ -0,0 +1,187 @@ + +#ifndef IN_MODULE +#include +#endif + +#include + +#include "init.h" +#include "pci-iface.h" +#include "pci.h" +#include "printk.h" + +#define DEBUG_INT1A + +#define SUCCESSFUL 0x00 +#define DEVICE_NOT_FOUND 0x86 +#define BAD_REGISTER_NUMBER 0x87 + +void x86emu_dump_xregs(void); +extern int verbose; + + +int int1A_handler(void) +{ + PCITAG tag; + pciVideoPtr pvp = NULL; + + if (verbose) { + rom_printk("\nint1a encountered.\n"); + x86emu_dump_xregs(); + } + +rom_printk("int1a: X86_AX = %x\n", X86_AX); + + switch (X86_AX) { + case 0xb101: + X86_EAX = 0x00; /* no config space/special cycle support */ + X86_AL = 0x01; /* config mechanism 1 */ + X86_EDX = 0x20494350; /* " ICP" */ + X86_EBX = 0x0210; /* Version 2.10 */ + X86_ECX &= 0xFF00; + X86_ECX |= (pciNumBuses & 0xFF); /* Max bus number in system */ + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ +#ifdef DEBUG_INT1A + if (verbose) + rom_printk("PCI bios present.\n"); +#endif + return 1; + case 0xb102: + if (X86_DX == pvp->vendor && X86_CX == pvp->device && X86_ESI == 0) { + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + X86_EBX = pciSlotBX(pvp); + } +#ifdef SHOW_ALL_DEVICES + else if ((pvp = xf86FindPciDeviceVendor(X86_EDX, X86_ECX, X86_ESI, pvp))) { + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + X86_EBX = pciSlotBX(pvp); + } +#endif + else { + X86_EAX = X86_AL | (DEVICE_NOT_FOUND << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x ebx=0x%x eflags=0x%x\n", X86_EAX, X86_EBX, X86_EFLAGS); +#endif + return 1; + case 0xb103: +#if 0 + if (X86_CL == pvp->interface && + X86_CH == pvp->subclass && + ((X86_ECX & 0xFFFF0000) >> 16) == pvp->class) { + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EBX = pciSlotBX(pvp); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + } +#else + /* FIXME: dirty hack */ + if (0); +#endif +#ifdef SHOW_ALL_DEVICES + else if ((pvp = FindPciClass(X86_CL, X86_CH, + (X86_ECX & 0xffff0000) >> 16, + X86_ESI, pvp))) { + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + X86_EBX = pciSlotBX(pvp); + } +#endif + else { + X86_EAX = X86_AL | (DEVICE_NOT_FOUND << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); +#endif + return 1; + case 0xb108: + if ((tag = findPci(X86_EBX))) { + X86_CL = pciReadByte(tag, X86_EDI); + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + } else { + X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x ecx=0x%x eflags=0x%x\n", X86_EAX, X86_ECX, X86_EFLAGS); +#endif + return 1; + case 0xb109: + +rom_printk("int1a: X86_EBX = %x X86_EDI = %x\n", X86_EBX, X86_EDI); + if ((tag = findPci(X86_EBX))) { + + X86_CX = pciReadWord(tag, X86_EDI); + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + } else { + X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x ecx=0x%x eflags=0x%x\n", X86_EAX, X86_ECX, X86_EFLAGS); +#endif + return 1; + case 0xb10a: + if ((tag = findPci(X86_EBX))) { + X86_ECX = pciReadLong(tag, X86_EDI); + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + } else { + X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x ecx=0x%x eflags=0x%x\n", X86_EAX, X86_ECX, X86_EFLAGS); +#endif + return 1; + case 0xb10b: + if ((tag = findPci(X86_EBX))) { + pciWriteByte(tag, X86_EDI, X86_CL); + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + } else { + X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); +#endif + return 1; + case 0xb10c: +rom_printk("int1a: X86_EBX = %x X86_EDI = %x X86_CX = %x\n", + X86_EBX, X86_EDI, X86_CX); + if ((tag = findPci(X86_EBX))) { + pciWriteWord(tag, X86_EDI, X86_CX); + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + } else { + X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); +#endif + return 1; + case 0xb10d: + if ((tag = findPci(X86_EBX))) { + pciWriteLong(tag, X86_EDI, X86_ECX); + X86_EAX = X86_AL | (SUCCESSFUL << 8); + X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ + } else { + X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); + X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ + } +#ifdef DEBUG_INT1A + rom_printk("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); +#endif + return 1; + default: + rom_printk("int1a: subfunction not implemented.\n"); + return 0; + } +} diff --git a/arch/e2k/boot/bios/video/inte6.c b/arch/e2k/boot/bios/video/inte6.c new file mode 100644 index 000000000000..bd045d2f5ef0 --- /dev/null +++ b/arch/e2k/boot/bios/video/inte6.c @@ -0,0 +1,23 @@ + +#ifndef IN_MODULE +#include +#endif + +#include "printk.h" + +int intE6_handler(void) +{ +#if 0 + pciVideoPtr pvp; + + if ((pvp = xf86GetPciInfoForEntity(pInt->entityIndex))) + X86_AX = (pvp->bus << 8) | (pvp->device << 3) | (pvp->func & 0x7); + pushw(X86_CS); + pushw(X86_IP); + X86_CS = pInt->BIOSseg; + X86_EIP = 0x0003; + X86_ES = 0; /* standard pc es */ +#endif + rom_printk("intE6 not supported right now.\n"); + return 1; +} diff --git a/arch/e2k/boot/bios/video/pci-iface.c b/arch/e2k/boot/bios/video/pci-iface.c new file mode 100644 index 000000000000..39369410a5b8 --- /dev/null +++ b/arch/e2k/boot/bios/video/pci-iface.c @@ -0,0 +1,150 @@ + +#ifndef IN_MODULE +#include +#include +#endif + +#include +#include "pci-iface.h" +#include "pci.h" + +#define PCITAG struct pci_filter * + +#define DEBUG_PCI 1 + +struct pci_access *pacc; +struct bios_pci_dev *dev; + +struct pci_filter ltag; + + +int pciNumBuses = 0; + +int pciInit(void) +{ + return 0; +} + +int pciExit(void) +{ + + return 0; +} + +PCITAG findPci(unsigned short bx) +{ + PCITAG tag = <ag; + + int bus = (bx >> 8) & 0xFF; + int slot = (bx >> 3) & 0x1F; + int func = bx & 0x7; + + tag->bus = bus; + tag->slot = slot; + tag->func = func; + + + if (pci_find_slot(bus, PCI_DEVFN(slot, func))) + return tag; + + return NULL; +} + +u32 pciSlotBX(pciVideoPtr pvp) +{ + + PCITAG tag = <ag; + + tag->bus = pvp->bus->number; + tag->slot = PCI_SLOT(pvp->devfn); + tag->func = PCI_FUNC(pvp->devfn); + + return (tag->bus << 8) | (tag->slot << 3) | (tag->func); +} + +u8 pciReadByte(PCITAG tag, u32 idx) +{ + struct bios_pci_dev *d; + + u8 res; + if ((d = pci_find_slot(tag->bus, PCI_DEVFN(tag->slot, tag->func)))) { + bios_pci_read_config_byte(d, (u8) idx, &res); + return res; + } + +#ifdef DEBUG_PCI + printf("PCI: device not found while read byte (%x:%x.%x)\n", + tag->bus, tag->slot, tag->func); +#endif + return 0; +} + +u16 pciReadWord(PCITAG tag, u32 idx) +{ + struct bios_pci_dev *d; + + u16 res; + if ((d = pci_find_slot(tag->bus, PCI_DEVFN(tag->slot, tag->func)))) { + bios_pci_read_config_word(d, (u8) idx, &res); + return res; + } +#ifdef DEBUG_PCI + printf("PCI: device not found while read word (%x:%x.%x)\n", + tag->bus, tag->slot, tag->func); +#endif + return 0; +} + +u32 pciReadLong(PCITAG tag, u32 idx) +{ + struct bios_pci_dev *d; + + u32 res; + if ((d = pci_find_slot(tag->bus, PCI_DEVFN(tag->slot, tag->func)))) { + bios_pci_read_config_dword(d, (u8) idx, &res); + return res; + } +#ifdef DEBUG_PCI + printf("PCI: device not found while read long (%x:%x.%x)\n", + tag->bus, tag->slot, tag->func); +#endif + return 0; +} + + +void pciWriteLong(PCITAG tag, u32 idx, u32 data) +{ + struct bios_pci_dev *d; + if ((d = pci_find_slot(tag->bus, PCI_DEVFN(tag->slot, tag->func)))) + bios_pci_write_config_dword(d, (u8) idx, data); +#ifdef DEBUG_PCI + else + printf("PCI: device not found while write long (%x:%x.%x)\n", + tag->bus, tag->slot, tag->func); +#endif +} + +void pciWriteWord(PCITAG tag, u32 idx, u16 data) +{ + struct bios_pci_dev *d; + if ((d = pci_find_slot(tag->bus, PCI_DEVFN(tag->slot, tag->func)))) + bios_pci_write_config_word(d, (u8) idx, data); +#ifdef DEBUG_PCI + else + printf("PCI: device not found while write word (%x:%x.%x)\n", + tag->bus, tag->slot, tag->func); +#endif + +} + +void pciWriteByte(PCITAG tag, u32 idx, u8 data) +{ + struct bios_pci_dev *d; + if ((d = pci_find_slot(tag->bus, PCI_DEVFN(tag->slot, tag->func)))) + bios_pci_write_config_byte(d, (u8) idx, data); +#ifdef DEBUG_PCI + else + printf("PCI: device not found while write long (%x:%x.%x)\n", + tag->bus, tag->slot, tag->func); +#endif +} diff --git a/arch/e2k/boot/bios/video/pci-iface.h b/arch/e2k/boot/bios/video/pci-iface.h new file mode 100644 index 000000000000..57ad1186b926 --- /dev/null +++ b/arch/e2k/boot/bios/video/pci-iface.h @@ -0,0 +1,32 @@ + +#include "pci.h" + +typedef unsigned long pciaddr_t; +typedef u8 byte; +typedef u16 word; + + +struct pci_filter { + int bus, slot, func; /* -1 = ANY */ + int vendor, device; +}; + +#define PCITAG struct pci_filter * +#define pciVideoPtr struct bios_pci_dev * + +extern int pciNumBuses; + +int pciInit(void); +int pciExit(void); + + +PCITAG findPci(unsigned short bx); +u32 pciSlotBX(pciVideoPtr pvp); + +void pciWriteLong(PCITAG tag, u32 idx, u32 data); +void pciWriteWord(PCITAG tag, u32 idx, u16 data); +void pciWriteByte(PCITAG tag, u32 idx, u8 data); + +u32 pciReadLong(PCITAG tag, u32 idx); +u16 pciReadWord(PCITAG tag, u32 idx); +u8 pciReadByte(PCITAG tag, u32 idx); diff --git a/arch/e2k/boot/bios/video/x86emu/include/msr.h b/arch/e2k/boot/bios/video/x86emu/include/msr.h new file mode 100644 index 000000000000..4977b0201d83 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/include/msr.h @@ -0,0 +1,33 @@ +#ifndef CPU_P6_MSR_H +#define CPU_P6_MSR_H + +/* + * Access to machine-specific registers (available on 586 and better only) + * Note: the rd* operations modify the parameters directly (without using + * pointer indirection), this allows gcc to optimize better + */ + +#define rdmsr(msr,val1,val2) \ + __asm__ __volatile__("rdmsr" \ + : "=a" (val1), "=d" (val2) \ + : "c" (msr)) + +#define wrmsr(msr,val1,val2) \ + __asm__ __volatile__("wrmsr" \ + : /* no outputs */ \ + : "c" (msr), "a" (val1), "d" (val2)) + +#define rdtsc(low,high) \ + __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) + +#define rdtscl(low) \ + __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") + +#define rdtscll(val) \ + __asm__ __volatile__ ("rdtsc" : "=A" (val)) + +#define rdpmc(counter,low,high) \ + __asm__ __volatile__("rdpmc" \ + : "=a" (low), "=d" (high) \ + : "c" (counter)) +#endif /* CPU_P6_MSR_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/include/x86emu.h b/arch/e2k/boot/bios/video/x86emu/include/x86emu.h new file mode 100644 index 000000000000..c936db91d1bf --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/include/x86emu.h @@ -0,0 +1,205 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1996-1999 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for public specific functions. +* Any application linking against us should only +* include this header +* +****************************************************************************/ +/* $XFree86: xc/extras/x86emu/include/x86emu.h,v 1.2 2000/11/21 23:10:25 tsi Exp $ */ + +#ifndef __X86EMU_X86EMU_H +#define __X86EMU_X86EMU_H + +extern void rom_printk(char const *fmt, ...); + +#undef printf +#define printf(...) rom_printk(##__VA_ARGS__) +#ifndef NULL +#define NULL (void *) 0 +#endif + + +#ifdef SCITECH +#include "scitech.h" +#define X86API _ASMAPI +#define X86APIP _ASMAPIP +typedef int X86EMU_pioAddr; +#else +#include "x86emu/types.h" +#define X86API +#define X86APIP * +#endif +#include "x86emu/regs.h" + +/*---------------------- Macros and type definitions ----------------------*/ + +#pragma pack(1) + +/**************************************************************************** +REMARKS: +Data structure containing ponters to programmed I/O functions used by the +emulator. This is used so that the user program can hook all programmed +I/O for the emulator to handled as necessary by the user program. By +default the emulator contains simple functions that do not do access the +hardware in any way. To allow the emualtor access the hardware, you will +need to override the programmed I/O functions using the X86EMU_setupPioFuncs +function. + +HEADER: +x86emu.h + +MEMBERS: +inb - Function to read a byte from an I/O port +inw - Function to read a word from an I/O port +inl - Function to read a dword from an I/O port +outb - Function to write a byte to an I/O port +outw - Function to write a word to an I/O port +outl - Function to write a dword to an I/O port +****************************************************************************/ +typedef struct { + u8 (X86APIP inb_func)(X86EMU_pioAddr addr); + u16 (X86APIP inw_func)(X86EMU_pioAddr addr); + u32 (X86APIP inl_func)(X86EMU_pioAddr addr); + void (X86APIP outb_func)(X86EMU_pioAddr addr, u8 val); + void (X86APIP outw_func)(X86EMU_pioAddr addr, u16 val); + void (X86APIP outl_func)(X86EMU_pioAddr addr, u32 val); + } X86EMU_pioFuncs; + +/**************************************************************************** +REMARKS: +Data structure containing ponters to memory access functions used by the +emulator. This is used so that the user program can hook all memory +access functions as necessary for the emulator. By default the emulator +contains simple functions that only access the internal memory of the +emulator. If you need specialised functions to handle access to different +types of memory (ie: hardware framebuffer accesses and BIOS memory access +etc), you will need to override this using the X86EMU_setupMemFuncs +function. + +HEADER: +x86emu.h + +MEMBERS: +rdb - Function to read a byte from an address +rdw - Function to read a word from an address +rdl - Function to read a dword from an address +wrb - Function to write a byte to an address +wrw - Function to write a word to an address +wrl - Function to write a dword to an address +****************************************************************************/ +typedef struct { + u8 (X86APIP rdb)(u32 addr); + u16 (X86APIP rdw)(u32 addr); + u32 (X86APIP rdl)(u32 addr); + void (X86APIP wrb)(u32 addr, u8 val); + void (X86APIP wrw)(u32 addr, u16 val); + void (X86APIP wrl)(u32 addr, u32 val); + } X86EMU_memFuncs; + +/**************************************************************************** + Here are the default memory read and write + function in case they are needed as fallbacks. +***************************************************************************/ +extern u8 X86API rdb(u32 addr); +extern u16 X86API rdw(u32 addr); +extern u32 X86API rdl(u32 addr); +extern void X86API wrb(u32 addr, u8 val); +extern void X86API wrw(u32 addr, u16 val); +extern void X86API wrl(u32 addr, u32 val); + +#pragma pack() + +/*--------------------- type definitions -----------------------------------*/ + +typedef void (X86APIP X86EMU_intrFuncs)(int num); +extern X86EMU_intrFuncs _X86EMU_intrTab[256]; + +/*-------------------------- Function Prototypes --------------------------*/ + +#ifdef __cplusplus +extern "C" { /* Use "C" linkage when in C++ mode */ +#endif + +void X86EMU_setupMemFuncs(X86EMU_memFuncs *funcs); +void X86EMU_setupPioFuncs(X86EMU_pioFuncs *funcs); +void X86EMU_setupIntrFuncs(X86EMU_intrFuncs funcs[]); +void X86EMU_prepareForInt(int num); + +/* decode.c */ + +void X86EMU_exec(void); +void X86EMU_halt_sys(void); + +#ifdef DEBUG +#define HALT_SYS() \ + rom_printk("halt_sys: file %s, line %d\n", __FILE__, __LINE__), \ + X86EMU_halt_sys() +#else +#define HALT_SYS() X86EMU_halt_sys() +#endif + +/* Debug options */ + +#define DEBUG_DECODE_F 0x000001 /* print decoded instruction */ +#define DEBUG_TRACE_F 0x000002 /* dump regs before/after execution */ +#define DEBUG_STEP_F 0x000004 +#define DEBUG_DISASSEMBLE_F 0x000008 +#define DEBUG_BREAK_F 0x000010 +#define DEBUG_SVC_F 0x000020 +#define DEBUG_FS_F 0x000080 +#define DEBUG_PROC_F 0x000100 +#define DEBUG_SYSINT_F 0x000200 /* bios system interrupts. */ +#define DEBUG_TRACECALL_F 0x000400 +#define DEBUG_INSTRUMENT_F 0x000800 +#define DEBUG_MEM_TRACE_F 0x001000 +#define DEBUG_IO_TRACE_F 0x002000 +#define DEBUG_TRACECALL_REGS_F 0x004000 +#define DEBUG_DECODE_NOPRINT_F 0x008000 +#define DEBUG_SAVE_IP_CS_F 0x010000 +#define DEBUG_EXIT 0x020000 +#define DEBUG_SAVE_CS_IP 0x040000 +#define DEBUG_SYS_F (DEBUG_SVC_F|DEBUG_FS_F|DEBUG_PROC_F) + +void X86EMU_trace_regs(void); +void X86EMU_trace_xregs(void); +void X86EMU_dump_memory(u16 seg, u16 off, u32 amt); +int X86EMU_trace_on(void); +int X86EMU_trace_off(void); + +#ifdef __cplusplus +} /* End of "C" linkage for C++ */ +#endif + +#endif /* __X86EMU_X86EMU_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/include/x86emu/fpu_regs.h b/arch/e2k/boot/bios/video/x86emu/include/x86emu/fpu_regs.h new file mode 100644 index 000000000000..56e9a04d75c1 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/include/x86emu/fpu_regs.h @@ -0,0 +1,115 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1996-1999 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for FPU register definitions. +* +****************************************************************************/ + +#ifndef __X86EMU_FPU_REGS_H +#define __X86EMU_FPU_REGS_H + +#ifdef X86_FPU_SUPPORT + +#pragma pack(1) + +/* Basic 8087 register can hold any of the following values: */ + +union x86_fpu_reg_u { + s8 tenbytes[10]; + double dval; + float fval; + s16 sval; + s32 lval; + }; + +struct x86_fpu_reg { + union x86_fpu_reg_u reg; + char tag; + }; + +/* + * Since we are not going to worry about the problems of aliasing + * registers, every time a register is modified, its result type is + * set in the tag fields for that register. If some operation + * attempts to access the type in a way inconsistent with its current + * storage format, then we flag the operation. If common, we'll + * attempt the conversion. + */ + +#define X86_FPU_VALID 0x80 +#define X86_FPU_REGTYP(r) ((r) & 0x7F) + +#define X86_FPU_WORD 0x0 +#define X86_FPU_SHORT 0x1 +#define X86_FPU_LONG 0x2 +#define X86_FPU_FLOAT 0x3 +#define X86_FPU_DOUBLE 0x4 +#define X86_FPU_LDBL 0x5 +#define X86_FPU_BSD 0x6 + +#define X86_FPU_STKTOP 0 + +struct x86_fpu_registers { + struct x86_fpu_reg x86_fpu_stack[8]; + int x86_fpu_flags; + int x86_fpu_config; /* rounding modes, etc. */ + short x86_fpu_tos, x86_fpu_bos; + }; + +#pragma pack() + +/* + * There are two versions of the following macro. + * + * One version is for opcode D9, for which there are more than 32 + * instructions encoded in the second byte of the opcode. + * + * The other version, deals with all the other 7 i87 opcodes, for + * which there are only 32 strings needed to describe the + * instructions. + */ + +#endif /* X86_FPU_SUPPORT */ + +#ifdef DEBUG +# define DECODE_PRINTINSTR32(t,mod,rh,rl) \ + DECODE_PRINTF(t[(mod<<3)+(rh)]); +# define DECODE_PRINTINSTR256(t,mod,rh,rl) \ + DECODE_PRINTF(t[(mod<<6)+(rh<<3)+(rl)]); +#else +# define DECODE_PRINTINSTR32(t,mod,rh,rl) +# define DECODE_PRINTINSTR256(t,mod,rh,rl) +#endif + +#endif /* __X86EMU_FPU_REGS_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/include/x86emu/regs.h b/arch/e2k/boot/bios/video/x86emu/include/x86emu/regs.h new file mode 100644 index 000000000000..d575aff5580e --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/include/x86emu/regs.h @@ -0,0 +1,338 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1996-1999 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for x86 register definitions. +* +****************************************************************************/ +/* $XFree86: xc/extras/x86emu/include/x86emu/regs.h,v 1.3 2001/10/28 03:32:25 tsi Exp $ */ + +#ifndef __X86EMU_REGS_H +#define __X86EMU_REGS_H + +//#include + +/*---------------------- Macros and type definitions ----------------------*/ + +#pragma pack(1) + +/* + * General EAX, EBX, ECX, EDX type registers. Note that for + * portability, and speed, the issue of byte swapping is not addressed + * in the registers. All registers are stored in the default format + * available on the host machine. The only critical issue is that the + * registers should line up EXACTLY in the same manner as they do in + * the 386. That is: + * + * EAX & 0xff === AL + * EAX & 0xffff == AX + * + * etc. The result is that alot of the calculations can then be + * done using the native instruction set fully. + */ + +#ifdef __BIG_ENDIAN__ + +typedef struct { + u32 e_reg; + } I32_reg_t; + +typedef struct { + u16 filler0, x_reg; + } I16_reg_t; + +typedef struct { + u8 filler0, filler1, h_reg, l_reg; + } I8_reg_t; + +#else /* !__BIG_ENDIAN__ */ + +typedef struct { + u32 e_reg; + } I32_reg_t; + +typedef struct { + u16 x_reg; + } I16_reg_t; + +typedef struct { + u8 l_reg, h_reg; + } I8_reg_t; + +#endif /* BIG_ENDIAN */ + +typedef union { + I32_reg_t I32_reg; + I16_reg_t I16_reg; + I8_reg_t I8_reg; + } i386_general_register; + +struct i386_general_regs { + i386_general_register A, B, C, D; + }; + +typedef struct i386_general_regs Gen_reg_t; + +struct i386_special_regs { + i386_general_register SP, BP, SI, DI, IP; + u32 FLAGS; + }; + +/* + * Segment registers here represent the 16 bit quantities + * CS, DS, ES, SS. + */ + +struct i386_segment_regs { + u16 CS, DS, SS, ES, FS, GS; + }; + +/* 8 bit registers */ +#define R_AH gen.A.I8_reg.h_reg +#define R_AL gen.A.I8_reg.l_reg +#define R_BH gen.B.I8_reg.h_reg +#define R_BL gen.B.I8_reg.l_reg +#define R_CH gen.C.I8_reg.h_reg +#define R_CL gen.C.I8_reg.l_reg +#define R_DH gen.D.I8_reg.h_reg +#define R_DL gen.D.I8_reg.l_reg + +/* 16 bit registers */ +#define R_AX gen.A.I16_reg.x_reg +#define R_BX gen.B.I16_reg.x_reg +#define R_CX gen.C.I16_reg.x_reg +#define R_DX gen.D.I16_reg.x_reg + +/* 32 bit extended registers */ +#define R_EAX gen.A.I32_reg.e_reg +#define R_EBX gen.B.I32_reg.e_reg +#define R_ECX gen.C.I32_reg.e_reg +#define R_EDX gen.D.I32_reg.e_reg + +/* special registers */ +#define R_SP spc.SP.I16_reg.x_reg +#define R_BP spc.BP.I16_reg.x_reg +#define R_SI spc.SI.I16_reg.x_reg +#define R_DI spc.DI.I16_reg.x_reg +#define R_IP spc.IP.I16_reg.x_reg +#define R_FLG spc.FLAGS + +/* special registers */ +#define R_SP spc.SP.I16_reg.x_reg +#define R_BP spc.BP.I16_reg.x_reg +#define R_SI spc.SI.I16_reg.x_reg +#define R_DI spc.DI.I16_reg.x_reg +#define R_IP spc.IP.I16_reg.x_reg +#define R_FLG spc.FLAGS + +/* special registers */ +#define R_ESP spc.SP.I32_reg.e_reg +#define R_EBP spc.BP.I32_reg.e_reg +#define R_ESI spc.SI.I32_reg.e_reg +#define R_EDI spc.DI.I32_reg.e_reg +#define R_EIP spc.IP.I32_reg.e_reg +#define R_EFLG spc.FLAGS + +/* segment registers */ +#define R_CS seg.CS +#define R_DS seg.DS +#define R_SS seg.SS +#define R_ES seg.ES +#define R_FS seg.FS +#define R_GS seg.GS + +/* flag conditions */ +#define FB_CF 0x0001 /* CARRY flag */ +#define FB_PF 0x0004 /* PARITY flag */ +#define FB_AF 0x0010 /* AUX flag */ +#define FB_ZF 0x0040 /* ZERO flag */ +#define FB_SF 0x0080 /* SIGN flag */ +#define FB_TF 0x0100 /* TRAP flag */ +#define FB_IF 0x0200 /* INTERRUPT ENABLE flag */ +#define FB_DF 0x0400 /* DIR flag */ +#define FB_OF 0x0800 /* OVERFLOW flag */ + +/* 80286 and above always have bit#1 set */ +#define F_ALWAYS_ON (0x0002) /* flag bits always on */ + +/* + * Define a mask for only those flag bits we will ever pass back + * (via PUSHF) + */ +#define F_MSK (FB_CF|FB_PF|FB_AF|FB_ZF|FB_SF|FB_TF|FB_IF|FB_DF|FB_OF) + +/* following bits masked in to a 16bit quantity */ + +#define F_CF 0x0001 /* CARRY flag */ +#define F_PF 0x0004 /* PARITY flag */ +#define F_AF 0x0010 /* AUX flag */ +#define F_ZF 0x0040 /* ZERO flag */ +#define F_SF 0x0080 /* SIGN flag */ +#define F_TF 0x0100 /* TRAP flag */ +#define F_IF 0x0200 /* INTERRUPT ENABLE flag */ +#define F_DF 0x0400 /* DIR flag */ +#define F_OF 0x0800 /* OVERFLOW flag */ + +#define TOGGLE_FLAG(flag) (M.x86.R_FLG ^= (flag)) +#define SET_FLAG(flag) (M.x86.R_FLG |= (flag)) +#define CLEAR_FLAG(flag) (M.x86.R_FLG &= ~(flag)) +#define ACCESS_FLAG(flag) (M.x86.R_FLG & (flag)) +#define CLEARALL_FLAG(m) (M.x86.R_FLG = 0) + +#define CONDITIONAL_SET_FLAG(COND,FLAG) \ + if (COND) SET_FLAG(FLAG); else CLEAR_FLAG(FLAG) + +#define F_PF_CALC 0x010000 /* PARITY flag has been calced */ +#define F_ZF_CALC 0x020000 /* ZERO flag has been calced */ +#define F_SF_CALC 0x040000 /* SIGN flag has been calced */ + +#define F_ALL_CALC 0xff0000 /* All have been calced */ + +/* + * Emulator machine state. + * Segment usage control. + */ +#define SYSMODE_SEG_DS_SS 0x00000001 +#define SYSMODE_SEGOVR_CS 0x00000002 +#define SYSMODE_SEGOVR_DS 0x00000004 +#define SYSMODE_SEGOVR_ES 0x00000008 +#define SYSMODE_SEGOVR_FS 0x00000010 +#define SYSMODE_SEGOVR_GS 0x00000020 +#define SYSMODE_SEGOVR_SS 0x00000040 +#define SYSMODE_PREFIX_REPE 0x00000080 +#define SYSMODE_PREFIX_REPNE 0x00000100 +#define SYSMODE_PREFIX_DATA 0x00000200 +#define SYSMODE_PREFIX_ADDR 0x00000400 +#define SYSMODE_INTR_PENDING 0x10000000 +#define SYSMODE_EXTRN_INTR 0x20000000 +#define SYSMODE_HALTED 0x40000000 + +#define SYSMODE_SEGMASK (SYSMODE_SEG_DS_SS | \ + SYSMODE_SEGOVR_CS | \ + SYSMODE_SEGOVR_DS | \ + SYSMODE_SEGOVR_ES | \ + SYSMODE_SEGOVR_FS | \ + SYSMODE_SEGOVR_GS | \ + SYSMODE_SEGOVR_SS) +#define SYSMODE_CLRMASK (SYSMODE_SEG_DS_SS | \ + SYSMODE_SEGOVR_CS | \ + SYSMODE_SEGOVR_DS | \ + SYSMODE_SEGOVR_ES | \ + SYSMODE_SEGOVR_FS | \ + SYSMODE_SEGOVR_GS | \ + SYSMODE_SEGOVR_SS | \ + SYSMODE_PREFIX_DATA | \ + SYSMODE_PREFIX_ADDR) + +#define INTR_SYNCH 0x1 +#define INTR_ASYNCH 0x2 +#define INTR_HALTED 0x4 + +typedef struct { + struct i386_general_regs gen; + struct i386_special_regs spc; + struct i386_segment_regs seg; + /* + * MODE contains information on: + * REPE prefix 2 bits repe,repne + * SEGMENT overrides 5 bits normal,DS,SS,CS,ES + * Delayed flag set 3 bits (zero, signed, parity) + * reserved 6 bits + * interrupt # 8 bits instruction raised interrupt + * BIOS video segregs 4 bits + * Interrupt Pending 1 bits + * Extern interrupt 1 bits + * Halted 1 bits + */ + u32 mode; + volatile int intr; /* mask of pending interrupts */ + int debug; +#ifdef DEBUG + int check; + u16 saved_ip; + u16 saved_cs; + int enc_pos; + int enc_str_pos; + char decode_buf[32]; /* encoded byte stream */ + char decoded_buf[256]; /* disassembled strings */ +#endif + u8 intno; + u8 __pad[3]; + } X86EMU_regs; + +/**************************************************************************** +REMARKS: +Structure maintaining the emulator machine state. + +MEMBERS: +mem_base - Base real mode memory for the emulator +abseg - Base for the absegment +mem_size - Size of the real mode memory block for the emulator +private - private data pointer +x86 - X86 registers +****************************************************************************/ +typedef struct { + unsigned long mem_base; + unsigned long mem_size; + unsigned long abseg; + void* private; + X86EMU_regs x86; + } X86EMU_sysEnv; + +#pragma pack() + +/*----------------------------- Global Variables --------------------------*/ + +#ifdef __cplusplus +extern "C" { /* Use "C" linkage when in C++ mode */ +#endif + +/* Global emulator machine state. + * + * We keep it global to avoid pointer dereferences in the code for speed. + */ + +extern X86EMU_sysEnv _X86EMU_env; +#define M _X86EMU_env + +/*-------------------------- Function Prototypes --------------------------*/ + +/* Function to log information at runtime */ + +//void printk(const char *fmt, ...); + +#ifdef __cplusplus +} /* End of "C" linkage for C++ */ +#endif + +#endif /* __X86EMU_REGS_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/include/x86emu/types.h b/arch/e2k/boot/bios/video/x86emu/include/x86emu/types.h new file mode 100644 index 000000000000..fef6c45c6858 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/include/x86emu/types.h @@ -0,0 +1,51 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1996-1999 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for x86 emulator type definitions. +* +****************************************************************************/ + +/* $XFree86: xc/extras/x86emu/include/x86emu/types.h,v 1.4 2000/09/26 15:56:44 tsi Exp $ */ + +#ifndef __X86EMU_TYPES_H +#define __X86EMU_TYPES_H + +#include + +/*---------------------- Macros and type definitions ----------------------*/ + +typedef int sint; +typedef u16 X86EMU_pioAddr; + +#endif /* __X86EMU_TYPES_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/LICENSE b/arch/e2k/boot/bios/video/x86emu/src/x86emu/LICENSE new file mode 100644 index 000000000000..a3ede4a87d5d --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/LICENSE @@ -0,0 +1,17 @@ + License information + ------------------- + +The x86emu library is under a BSD style license, comaptible +with the XFree86 and X licenses used by XFree86. The +original x86emu libraries were under the GNU General Public +License. Due to license incompatibilities between the GPL +and the XFree86 license, the original authors of the code +decided to allow a license change. If you have submitted +code to the original x86emu project, and you don't agree +with the license change, please contact us and let you +know. Your code will be removed to comply with your wishes. + +If you have any questions about this, please send email to +x86emu@linuxlabs.com or KendallB@scitechsoft.com for +clarification. + diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/Makefile b/arch/e2k/boot/bios/video/x86emu/src/x86emu/Makefile new file mode 100644 index 000000000000..6f676fab48e7 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/Makefile @@ -0,0 +1,6 @@ + +obj-y := decode.o fpu.o ops.o ops2.o prim_ops.o sys.o debug.o + +INCS = -I$(obj)/. -I$(obj)/../../../../../../../../include -I$(obj)/../../../.. -I$(obj)/../../include -I$(obj)/../../include/x86emu + +KBUILD_CFLAGS += -D__DRIVER__ -DFORCE_POST $(INCS) diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/debug.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/debug.c new file mode 100644 index 000000000000..6c3a24aa9214 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/debug.c @@ -0,0 +1,439 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: This file contains the code to handle debugging of the +* emulator. +* +****************************************************************************/ + +#include "x86emu/x86emui.h" +#include + +/*----------------------------- Implementation ----------------------------*/ + +#ifdef DEBUG + +static void print_encoded_bytes (u16 s, u16 o); +static void print_decoded_instruction (void); +static int parse_line (char *s, int *ps, int *n); + +/* should look something like debug's output. */ +void X86EMU_trace_regs (void) +{ + if (DEBUG_TRACE()) { + x86emu_dump_regs(); + } + if (DEBUG_DECODE() && ! DEBUG_DECODE_NOPRINT()) { + rom_printk("%04x:%04x ",M.x86.saved_cs, M.x86.saved_ip); + print_encoded_bytes( M.x86.saved_cs, M.x86.saved_ip); + print_decoded_instruction(); + } +} + +void X86EMU_trace_xregs (void) +{ + if (DEBUG_TRACE()) { + x86emu_dump_xregs(); + } +} + +void x86emu_just_disassemble (void) +{ + /* + * This routine called if the flag DEBUG_DISASSEMBLE is set kind + * of a hack! + */ + rom_printk("%04x:%04x ",M.x86.saved_cs, M.x86.saved_ip); + print_encoded_bytes( M.x86.saved_cs, M.x86.saved_ip); + print_decoded_instruction(); +} + +static void disassemble_forward (u16 seg, u16 off, int n) +{ + X86EMU_sysEnv tregs; + int i; + u8 op1; + /* + * hack, hack, hack. What we do is use the exact machinery set up + * for execution, except that now there is an additional state + * flag associated with the "execution", and we are using a copy + * of the register struct. All the major opcodes, once fully + * decoded, have the following two steps: TRACE_REGS(r,m); + * SINGLE_STEP(r,m); which disappear if DEBUG is not defined to + * the preprocessor. The TRACE_REGS macro expands to: + * + * if (debug&DEBUG_DISASSEMBLE) + * {just_disassemble(); goto EndOfInstruction;} + * if (debug&DEBUG_TRACE) trace_regs(r,m); + * + * ...... and at the last line of the routine. + * + * EndOfInstruction: end_instr(); + * + * Up to the point where TRACE_REG is expanded, NO modifications + * are done to any register EXCEPT the IP register, for fetch and + * decoding purposes. + * + * This was done for an entirely different reason, but makes a + * nice way to get the system to help debug codes. + */ + tregs = M; + tregs.x86.R_IP = off; + tregs.x86.R_CS = seg; + + /* reset the decoding buffers */ + tregs.x86.enc_str_pos = 0; + tregs.x86.enc_pos = 0; + + /* turn on the "disassemble only, no execute" flag */ + tregs.x86.debug |= DEBUG_DISASSEMBLE_F; + + /* DUMP NEXT n instructions to screen in straight_line fashion */ + /* + * This looks like the regular instruction fetch stream, except + * that when this occurs, each fetched opcode, upon seeing the + * DEBUG_DISASSEMBLE flag set, exits immediately after decoding + * the instruction. XXX --- CHECK THAT MEM IS NOT AFFECTED!!! + * Note the use of a copy of the register structure... + */ + for (i=0; i 256) return; + seg = fetch_data_word_abs(0,iv*4); + off = fetch_data_word_abs(0,iv*4+2); + rom_printk("%04x:%04x ", seg, off); +} + +void X86EMU_dump_memory (u16 seg, u16 off, u32 amt) +{ + u32 start = off & 0xfffffff0; + u32 end = (off+16) & 0xfffffff0; + u32 i; + u32 current; + + current = start; + while (end <= off + amt) { + rom_printk("%04x:%04x ", seg, start); + for (i=start; i< off; i++) + rom_printk(" "); + for ( ; i< end; i++) + rom_printk("%02x ", fetch_data_byte_abs(seg,i)); + rom_printk("\n"); + start = end; + end = start + 16; + } +} + +#if 0 + +void x86emu_single_step (void) +{ + char s[1024]; + int ps[10]; + int ntok; + int cmd; + int done; + int segment; + int offset; + static int breakpoint; + static int noDecode = 1; + + char *p; + + if (DEBUG_BREAK()) { + if (M.x86.saved_ip != breakpoint) { + return; + } else { + M.x86.debug &= ~DEBUG_DECODE_NOPRINT_F; + M.x86.debug |= DEBUG_TRACE_F; + M.x86.debug &= ~DEBUG_BREAK_F; + print_decoded_instruction (); + X86EMU_trace_regs(); + } + } + done=0; + offset = M.x86.saved_ip; + while (!done) { + rom_printk("-"); + p = fgets(s, 1023, stdin); + cmd = parse_line(s, ps, &ntok); + switch(cmd) { + case 'u': + disassemble_forward(M.x86.saved_cs,(u16)offset,10); + break; + case 'd': + if (ntok == 2) { + segment = M.x86.saved_cs; + offset = ps[1]; + X86EMU_dump_memory(segment,(u16)offset,16); + offset += 16; + } else if (ntok == 3) { + segment = ps[1]; + offset = ps[2]; + X86EMU_dump_memory(segment,(u16)offset,16); + offset += 16; + } else { + segment = M.x86.saved_cs; + X86EMU_dump_memory(segment,(u16)offset,16); + offset += 16; + } + break; + case 'c': + M.x86.debug ^= DEBUG_TRACECALL_F; + break; + case 's': + M.x86.debug ^= DEBUG_SVC_F | DEBUG_SYS_F | DEBUG_SYSINT_F; + break; + case 'r': + X86EMU_trace_regs(); + break; + case 'x': + X86EMU_trace_xregs(); + break; + case 'g': + if (ntok == 2) { + breakpoint = ps[1]; + if (noDecode) { + M.x86.debug |= DEBUG_DECODE_NOPRINT_F; + } else { + M.x86.debug &= ~DEBUG_DECODE_NOPRINT_F; + } + M.x86.debug &= ~DEBUG_TRACE_F; + M.x86.debug |= DEBUG_BREAK_F; + done = 1; + } + break; + case 'q': + M.x86.debug |= DEBUG_EXIT; + return; + case 'P': + noDecode = (noDecode)?0:1; + rom_printk("Toggled decoding to %s\n",(noDecode)?"FALSE":"TRUE"); + break; + case 't': + case 0: + done = 1; + break; + } + } +} + +#endif + +int X86EMU_trace_on(void) +{ + return M.x86.debug |= DEBUG_STEP_F | DEBUG_DECODE_F | DEBUG_TRACE_F; +} + +int X86EMU_trace_off(void) +{ + return M.x86.debug &= ~(DEBUG_STEP_F | DEBUG_DECODE_F | DEBUG_TRACE_F); +} + + +int X86EMU_set_debug(int debug) +{ + return M.x86.debug = debug; +} + +#if 0 + +static int parse_line (char *s, int *ps, int *n) +{ + int cmd; + + *n = 0; + while(*s == ' ' || *s == '\t') s++; + ps[*n] = *s; + switch (*s) { + case '\n': + *n += 1; + return 0; + default: + cmd = *s; + *n += 1; + } + + while (1) { + while (*s != ' ' && *s != '\t' && *s != '\n') s++; + + if (*s == '\n') + return cmd; + + while(*s == ' ' || *s == '\t') s++; + + sscanf(s,"%x",&ps[*n]); + *n += 1; + } +} + +#endif + +#endif /* DEBUG */ + +void x86emu_dump_regs (void) +{ + rom_printk("\tAX=%04x ", M.x86.R_AX ); + rom_printk("BX=%04x ", M.x86.R_BX ); + rom_printk("CX=%04x ", M.x86.R_CX ); + rom_printk("DX=%04x ", M.x86.R_DX ); + rom_printk("SP=%04x ", M.x86.R_SP ); + rom_printk("BP=%04x ", M.x86.R_BP ); + rom_printk("SI=%04x ", M.x86.R_SI ); + rom_printk("DI=%04x\n", M.x86.R_DI ); + rom_printk("\tDS=%04x ", M.x86.R_DS ); + rom_printk("ES=%04x ", M.x86.R_ES ); + rom_printk("SS=%04x ", M.x86.R_SS ); + rom_printk("CS=%04x ", M.x86.R_CS ); + rom_printk("IP=%04x ", M.x86.R_IP ); + if (ACCESS_FLAG(F_OF)) rom_printk("OV "); /* CHECKED... */ + else rom_printk("NV "); + if (ACCESS_FLAG(F_DF)) rom_printk("DN "); + else rom_printk("UP "); + if (ACCESS_FLAG(F_IF)) rom_printk("EI "); + else rom_printk("DI "); + if (ACCESS_FLAG(F_SF)) rom_printk("NG "); + else rom_printk("PL "); + if (ACCESS_FLAG(F_ZF)) rom_printk("ZR "); + else rom_printk("NZ "); + if (ACCESS_FLAG(F_AF)) rom_printk("AC "); + else rom_printk("NA "); + if (ACCESS_FLAG(F_PF)) rom_printk("PE "); + else rom_printk("PO "); + if (ACCESS_FLAG(F_CF)) rom_printk("CY "); + else rom_printk("NC "); + rom_printk("\n"); +} + +void x86emu_dump_xregs (void) +{ + rom_printk("\tEAX=%08x ", M.x86.R_EAX ); + rom_printk("EBX=%08x ", M.x86.R_EBX ); + rom_printk("ECX=%08x ", M.x86.R_ECX ); + rom_printk("EDX=%08x \n", M.x86.R_EDX ); + rom_printk("\tESP=%08x ", M.x86.R_ESP ); + rom_printk("EBP=%08x ", M.x86.R_EBP ); + rom_printk("ESI=%08x ", M.x86.R_ESI ); + rom_printk("EDI=%08x\n", M.x86.R_EDI ); + rom_printk("\tDS=%04x ", M.x86.R_DS ); + rom_printk("ES=%04x ", M.x86.R_ES ); + rom_printk("SS=%04x ", M.x86.R_SS ); + rom_printk("CS=%04x ", M.x86.R_CS ); + rom_printk("EIP=%08x\n\t", M.x86.R_EIP ); + if (ACCESS_FLAG(F_OF)) rom_printk("OV "); /* CHECKED... */ + else rom_printk("NV "); + if (ACCESS_FLAG(F_DF)) rom_printk("DN "); + else rom_printk("UP "); + if (ACCESS_FLAG(F_IF)) rom_printk("EI "); + else rom_printk("DI "); + if (ACCESS_FLAG(F_SF)) rom_printk("NG "); + else rom_printk("PL "); + if (ACCESS_FLAG(F_ZF)) rom_printk("ZR "); + else rom_printk("NZ "); + if (ACCESS_FLAG(F_AF)) rom_printk("AC "); + else rom_printk("NA "); + if (ACCESS_FLAG(F_PF)) rom_printk("PE "); + else rom_printk("PO "); + if (ACCESS_FLAG(F_CF)) rom_printk("CY "); + else rom_printk("NC "); + rom_printk("\n"); +} diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/decode.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/decode.c new file mode 100644 index 000000000000..910d1e979697 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/decode.c @@ -0,0 +1,1148 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: This file includes subroutines which are related to +* instruction decoding and accessess of immediate data via IP. etc. +* +****************************************************************************/ + +#include "x86emu/x86emui.h" + +/*----------------------------- Implementation ----------------------------*/ + +/**************************************************************************** +REMARKS: +Handles any pending asychronous interrupts. +****************************************************************************/ +static void x86emu_intr_handle(void) +{ + u8 intno; + + if (M.x86.intr & INTR_SYNCH) { + intno = M.x86.intno; + if (_X86EMU_intrTab[intno]) { + (*_X86EMU_intrTab[intno])(intno); + } else { + push_word((u16)M.x86.R_FLG); + CLEAR_FLAG(F_IF); + CLEAR_FLAG(F_TF); + push_word(M.x86.R_CS); + M.x86.R_CS = mem_access_word(intno * 4 + 2); + push_word(M.x86.R_IP); + M.x86.R_IP = mem_access_word(intno * 4); + M.x86.intr = 0; + } + } +} + +/**************************************************************************** +PARAMETERS: +intrnum - Interrupt number to raise + +REMARKS: +Raise the specified interrupt to be handled before the execution of the +next instruction. +****************************************************************************/ +void x86emu_intr_raise( + u8 intrnum) +{ + M.x86.intno = intrnum; + M.x86.intr |= INTR_SYNCH; +} + +/**************************************************************************** +REMARKS: +Main execution loop for the emulator. We return from here when the system +halts, which is normally caused by a stack fault when we return from the +original real mode call. +****************************************************************************/ +void X86EMU_exec(void) +{ + u8 op1; + + M.x86.intr = 0; + DB(x86emu_end_instr();) + + for (;;) { +DB( if (CHECK_IP_FETCH()) + x86emu_check_ip_access();) + /* If debugging, save the IP and CS values. */ + SAVE_IP_CS(M.x86.R_CS, M.x86.R_IP); + INC_DECODED_INST_LEN(1); + if (M.x86.intr) { + if (M.x86.intr & INTR_HALTED) { +DB( if (M.x86.R_SP != 0) { + printk("halted\n"); + X86EMU_trace_regs(); + } + else { + if (M.x86.debug) + printk("Service completed successfully\n"); + }) + return; + } + if (((M.x86.intr & INTR_SYNCH) && (M.x86.intno == 0 || M.x86.intno == 2)) || + !ACCESS_FLAG(F_IF)) { + x86emu_intr_handle(); + } + } + op1 = (*sys_rdb)(((u32)M.x86.R_CS << 4) + (M.x86.R_IP++)); + (*x86emu_optab[op1])(op1); + if (M.x86.debug & DEBUG_EXIT) { + M.x86.debug &= ~DEBUG_EXIT; + return; + } + } +} + +/**************************************************************************** +REMARKS: +Halts the system by setting the halted system flag. +****************************************************************************/ +void X86EMU_halt_sys(void) +{ + M.x86.intr |= INTR_HALTED; +} + +/**************************************************************************** +PARAMETERS: +mod - Mod value from decoded byte +regh - Reg h value from decoded byte +regl - Reg l value from decoded byte + +REMARKS: +Raise the specified interrupt to be handled before the execution of the +next instruction. + +NOTE: Do not inline this function, as (*sys_rdb) is already inline! +****************************************************************************/ +void fetch_decode_modrm( + int *mod, + int *regh, + int *regl) +{ + int fetched; + +DB( if (CHECK_IP_FETCH()) + x86emu_check_ip_access();) + fetched = (*sys_rdb)(((u32)M.x86.R_CS << 4) + (M.x86.R_IP++)); + INC_DECODED_INST_LEN(1); + *mod = (fetched >> 6) & 0x03; + *regh = (fetched >> 3) & 0x07; + *regl = (fetched >> 0) & 0x07; +} + +/**************************************************************************** +RETURNS: +Immediate byte value read from instruction queue + +REMARKS: +This function returns the immediate byte from the instruction queue, and +moves the instruction pointer to the next value. + +NOTE: Do not inline this function, as (*sys_rdb) is already inline! +****************************************************************************/ +u8 fetch_byte_imm(void) +{ + u8 fetched; + +DB( if (CHECK_IP_FETCH()) + x86emu_check_ip_access();) + fetched = (*sys_rdb)(((u32)M.x86.R_CS << 4) + (M.x86.R_IP++)); + INC_DECODED_INST_LEN(1); + return fetched; +} + +/**************************************************************************** +RETURNS: +Immediate word value read from instruction queue + +REMARKS: +This function returns the immediate byte from the instruction queue, and +moves the instruction pointer to the next value. + +NOTE: Do not inline this function, as (*sys_rdw) is already inline! +****************************************************************************/ +u16 fetch_word_imm(void) +{ + u16 fetched; + +DB( if (CHECK_IP_FETCH()) + x86emu_check_ip_access();) + fetched = (*sys_rdw)(((u32)M.x86.R_CS << 4) + (M.x86.R_IP)); + M.x86.R_IP += 2; + INC_DECODED_INST_LEN(2); + return fetched; +} + +/**************************************************************************** +RETURNS: +Immediate lone value read from instruction queue + +REMARKS: +This function returns the immediate byte from the instruction queue, and +moves the instruction pointer to the next value. + +NOTE: Do not inline this function, as (*sys_rdw) is already inline! +****************************************************************************/ +u32 fetch_long_imm(void) +{ + u32 fetched; + +DB( if (CHECK_IP_FETCH()) + x86emu_check_ip_access();) + fetched = (*sys_rdl)(((u32)M.x86.R_CS << 4) + (M.x86.R_IP)); + M.x86.R_IP += 4; + INC_DECODED_INST_LEN(4); + return fetched; +} + +/**************************************************************************** +RETURNS: +Value of the default data segment + +REMARKS: +Inline function that returns the default data segment for the current +instruction. + +On the x86 processor, the default segment is not always DS if there is +no segment override. Address modes such as -3[BP] or 10[BP+SI] all refer to +addresses relative to SS (ie: on the stack). So, at the minimum, all +decodings of addressing modes would have to set/clear a bit describing +whether the access is relative to DS or SS. That is the function of the +cpu-state-varible M.x86.mode. There are several potential states: + + repe prefix seen (handled elsewhere) + repne prefix seen (ditto) + + cs segment override + ds segment override + es segment override + fs segment override + gs segment override + ss segment override + + ds/ss select (in absense of override) + +Each of the above 7 items are handled with a bit in the mode field. +****************************************************************************/ +_INLINE u32 get_data_segment(void) +{ +#define GET_SEGMENT(segment) + switch (M.x86.mode & SYSMODE_SEGMASK) { + case 0: /* default case: use ds register */ + case SYSMODE_SEGOVR_DS: + case SYSMODE_SEGOVR_DS | SYSMODE_SEG_DS_SS: + return M.x86.R_DS; + case SYSMODE_SEG_DS_SS: /* non-overridden, use ss register */ + return M.x86.R_SS; + case SYSMODE_SEGOVR_CS: + case SYSMODE_SEGOVR_CS | SYSMODE_SEG_DS_SS: + return M.x86.R_CS; + case SYSMODE_SEGOVR_ES: + case SYSMODE_SEGOVR_ES | SYSMODE_SEG_DS_SS: + return M.x86.R_ES; + case SYSMODE_SEGOVR_FS: + case SYSMODE_SEGOVR_FS | SYSMODE_SEG_DS_SS: + return M.x86.R_FS; + case SYSMODE_SEGOVR_GS: + case SYSMODE_SEGOVR_GS | SYSMODE_SEG_DS_SS: + return M.x86.R_GS; + case SYSMODE_SEGOVR_SS: + case SYSMODE_SEGOVR_SS | SYSMODE_SEG_DS_SS: + return M.x86.R_SS; + default: +#ifdef DEBUG + printk("error: should not happen: multiple overrides.\n"); +#endif + HALT_SYS(); + return 0; + } +} + +/**************************************************************************** +PARAMETERS: +offset - Offset to load data from + +RETURNS: +Byte value read from the absolute memory location. + +NOTE: Do not inline this function as (*sys_rdX) is already inline! +****************************************************************************/ +u8 fetch_data_byte( + uint offset) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access((u16)get_data_segment(), offset); +#endif + return (*sys_rdb)((get_data_segment() << 4) + offset); +} + +/**************************************************************************** +PARAMETERS: +offset - Offset to load data from + +RETURNS: +Word value read from the absolute memory location. + +NOTE: Do not inline this function as (*sys_rdX) is already inline! +****************************************************************************/ +u16 fetch_data_word( + uint offset) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access((u16)get_data_segment(), offset); +#endif + return (*sys_rdw)((get_data_segment() << 4) + offset); +} + +/**************************************************************************** +PARAMETERS: +offset - Offset to load data from + +RETURNS: +Long value read from the absolute memory location. + +NOTE: Do not inline this function as (*sys_rdX) is already inline! +****************************************************************************/ +u32 fetch_data_long( + uint offset) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access((u16)get_data_segment(), offset); +#endif + return (*sys_rdl)((get_data_segment() << 4) + offset); +} + +/**************************************************************************** +PARAMETERS: +segment - Segment to load data from +offset - Offset to load data from + +RETURNS: +Byte value read from the absolute memory location. + +NOTE: Do not inline this function as (*sys_rdX) is already inline! +****************************************************************************/ +u8 fetch_data_byte_abs( + uint segment, + uint offset) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access(segment, offset); +#endif + return (*sys_rdb)(((u32)segment << 4) + offset); +} + +/**************************************************************************** +PARAMETERS: +segment - Segment to load data from +offset - Offset to load data from + +RETURNS: +Word value read from the absolute memory location. + +NOTE: Do not inline this function as (*sys_rdX) is already inline! +****************************************************************************/ +u16 fetch_data_word_abs( + uint segment, + uint offset) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access(segment, offset); +#endif + return (*sys_rdw)(((u32)segment << 4) + offset); +} + +/**************************************************************************** +PARAMETERS: +segment - Segment to load data from +offset - Offset to load data from + +RETURNS: +Long value read from the absolute memory location. + +NOTE: Do not inline this function as (*sys_rdX) is already inline! +****************************************************************************/ +u32 fetch_data_long_abs( + uint segment, + uint offset) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access(segment, offset); +#endif + return (*sys_rdl)(((u32)segment << 4) + offset); +} + +/**************************************************************************** +PARAMETERS: +offset - Offset to store data at +val - Value to store + +REMARKS: +Writes a word value to an segmented memory location. The segment used is +the current 'default' segment, which may have been overridden. + +NOTE: Do not inline this function as (*sys_wrX) is already inline! +****************************************************************************/ +void store_data_byte( + uint offset, + u8 val) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access((u16)get_data_segment(), offset); +#endif + (*sys_wrb)((get_data_segment() << 4) + offset, val); +} + +/**************************************************************************** +PARAMETERS: +offset - Offset to store data at +val - Value to store + +REMARKS: +Writes a word value to an segmented memory location. The segment used is +the current 'default' segment, which may have been overridden. + +NOTE: Do not inline this function as (*sys_wrX) is already inline! +****************************************************************************/ +void store_data_word( + uint offset, + u16 val) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access((u16)get_data_segment(), offset); +#endif + (*sys_wrw)((get_data_segment() << 4) + offset, val); +} + +/**************************************************************************** +PARAMETERS: +offset - Offset to store data at +val - Value to store + +REMARKS: +Writes a long value to an segmented memory location. The segment used is +the current 'default' segment, which may have been overridden. + +NOTE: Do not inline this function as (*sys_wrX) is already inline! +****************************************************************************/ +void store_data_long( + uint offset, + u32 val) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access((u16)get_data_segment(), offset); +#endif + (*sys_wrl)((get_data_segment() << 4) + offset, val); +} + +/**************************************************************************** +PARAMETERS: +segment - Segment to store data at +offset - Offset to store data at +val - Value to store + +REMARKS: +Writes a byte value to an absolute memory location. + +NOTE: Do not inline this function as (*sys_wrX) is already inline! +****************************************************************************/ +void store_data_byte_abs( + uint segment, + uint offset, + u8 val) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access(segment, offset); +#endif + (*sys_wrb)(((u32)segment << 4) + offset, val); +} + +/**************************************************************************** +PARAMETERS: +segment - Segment to store data at +offset - Offset to store data at +val - Value to store + +REMARKS: +Writes a word value to an absolute memory location. + +NOTE: Do not inline this function as (*sys_wrX) is already inline! +****************************************************************************/ +void store_data_word_abs( + uint segment, + uint offset, + u16 val) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access(segment, offset); +#endif + (*sys_wrw)(((u32)segment << 4) + offset, val); +} + +/**************************************************************************** +PARAMETERS: +segment - Segment to store data at +offset - Offset to store data at +val - Value to store + +REMARKS: +Writes a long value to an absolute memory location. + +NOTE: Do not inline this function as (*sys_wrX) is already inline! +****************************************************************************/ +void store_data_long_abs( + uint segment, + uint offset, + u32 val) +{ +#ifdef DEBUG + if (CHECK_DATA_ACCESS()) + x86emu_check_data_access(segment, offset); +#endif + (*sys_wrl)(((u32)segment << 4) + offset, val); +} + +/**************************************************************************** +PARAMETERS: +reg - Register to decode + +RETURNS: +Pointer to the appropriate register + +REMARKS: +Return a pointer to the register given by the R/RM field of the +modrm byte, for byte operands. Also enables the decoding of instructions. +****************************************************************************/ +u8* decode_rm_byte_register( + int reg) +{ + switch (reg) { + case 0: + DECODE_PRINTF("AL"); + return &M.x86.R_AL; + case 1: + DECODE_PRINTF("CL"); + return &M.x86.R_CL; + case 2: + DECODE_PRINTF("DL"); + return &M.x86.R_DL; + case 3: + DECODE_PRINTF("BL"); + return &M.x86.R_BL; + case 4: + DECODE_PRINTF("AH"); + return &M.x86.R_AH; + case 5: + DECODE_PRINTF("CH"); + return &M.x86.R_CH; + case 6: + DECODE_PRINTF("DH"); + return &M.x86.R_DH; + case 7: + DECODE_PRINTF("BH"); + return &M.x86.R_BH; + } + HALT_SYS(); + return NULL; /* NOT REACHED OR REACHED ON ERROR */ +} + +/**************************************************************************** +PARAMETERS: +reg - Register to decode + +RETURNS: +Pointer to the appropriate register + +REMARKS: +Return a pointer to the register given by the R/RM field of the +modrm byte, for word operands. Also enables the decoding of instructions. +****************************************************************************/ +u16* decode_rm_word_register( + int reg) +{ + switch (reg) { + case 0: + DECODE_PRINTF("AX"); + return &M.x86.R_AX; + case 1: + DECODE_PRINTF("CX"); + return &M.x86.R_CX; + case 2: + DECODE_PRINTF("DX"); + return &M.x86.R_DX; + case 3: + DECODE_PRINTF("BX"); + return &M.x86.R_BX; + case 4: + DECODE_PRINTF("SP"); + return &M.x86.R_SP; + case 5: + DECODE_PRINTF("BP"); + return &M.x86.R_BP; + case 6: + DECODE_PRINTF("SI"); + return &M.x86.R_SI; + case 7: + DECODE_PRINTF("DI"); + return &M.x86.R_DI; + } + HALT_SYS(); + return NULL; /* NOTREACHED OR REACHED ON ERROR */ +} + +/**************************************************************************** +PARAMETERS: +reg - Register to decode + +RETURNS: +Pointer to the appropriate register + +REMARKS: +Return a pointer to the register given by the R/RM field of the +modrm byte, for dword operands. Also enables the decoding of instructions. +****************************************************************************/ +u32* decode_rm_long_register( + int reg) +{ + switch (reg) { + case 0: + DECODE_PRINTF("EAX"); + return &M.x86.R_EAX; + case 1: + DECODE_PRINTF("ECX"); + return &M.x86.R_ECX; + case 2: + DECODE_PRINTF("EDX"); + return &M.x86.R_EDX; + case 3: + DECODE_PRINTF("EBX"); + return &M.x86.R_EBX; + case 4: + DECODE_PRINTF("ESP"); + return &M.x86.R_ESP; + case 5: + DECODE_PRINTF("EBP"); + return &M.x86.R_EBP; + case 6: + DECODE_PRINTF("ESI"); + return &M.x86.R_ESI; + case 7: + DECODE_PRINTF("EDI"); + return &M.x86.R_EDI; + } + HALT_SYS(); + return NULL; /* NOTREACHED OR REACHED ON ERROR */ +} + +/**************************************************************************** +PARAMETERS: +reg - Register to decode + +RETURNS: +Pointer to the appropriate register + +REMARKS: +Return a pointer to the register given by the R/RM field of the +modrm byte, for word operands, modified from above for the weirdo +special case of segreg operands. Also enables the decoding of instructions. +****************************************************************************/ +u16* decode_rm_seg_register( + int reg) +{ + switch (reg) { + case 0: + DECODE_PRINTF("ES"); + return &M.x86.R_ES; + case 1: + DECODE_PRINTF("CS"); + return &M.x86.R_CS; + case 2: + DECODE_PRINTF("SS"); + return &M.x86.R_SS; + case 3: + DECODE_PRINTF("DS"); + return &M.x86.R_DS; + case 4: + DECODE_PRINTF("FS"); + return &M.x86.R_FS; + case 5: + DECODE_PRINTF("GS"); + return &M.x86.R_GS; + case 6: + case 7: + DECODE_PRINTF("ILLEGAL SEGREG"); + break; + } + HALT_SYS(); + return NULL; /* NOT REACHED OR REACHED ON ERROR */ +} + +/**************************************************************************** +PARAMETERS: +scale - scale value of SIB byte +index - index value of SIB byte + +RETURNS: +Value of scale * index + +REMARKS: +Decodes scale/index of SIB byte and returns relevant offset part of +effective address. +****************************************************************************/ +unsigned decode_sib_si( + int scale, + int index) +{ + scale = 1 << scale; + if (scale > 1) { + DECODE_PRINTF2("[%d*", scale); + } else { + DECODE_PRINTF("["); + } + switch (index) { + case 0: + DECODE_PRINTF("EAX]"); + return M.x86.R_EAX * index; + case 1: + DECODE_PRINTF("ECX]"); + return M.x86.R_ECX * index; + case 2: + DECODE_PRINTF("EDX]"); + return M.x86.R_EDX * index; + case 3: + DECODE_PRINTF("EBX]"); + return M.x86.R_EBX * index; + case 4: + DECODE_PRINTF("0]"); + return 0; + case 5: + DECODE_PRINTF("EBP]"); + return M.x86.R_EBP * index; + case 6: + DECODE_PRINTF("ESI]"); + return M.x86.R_ESI * index; + case 7: + DECODE_PRINTF("EDI]"); + return M.x86.R_EDI * index; + } + HALT_SYS(); + return 0; /* NOT REACHED OR REACHED ON ERROR */ +} + +/**************************************************************************** +PARAMETERS: +mod - MOD value of preceding ModR/M byte + +RETURNS: +Offset in memory for the address decoding + +REMARKS: +Decodes SIB addressing byte and returns calculated effective address. +****************************************************************************/ +unsigned decode_sib_address( + int mod) +{ + int sib = fetch_byte_imm(); + int ss = (sib >> 6) & 0x03; + int index = (sib >> 3) & 0x07; + int base = sib & 0x07; + int offset = 0; + int displacement; + + switch (base) { + case 0: + DECODE_PRINTF("[EAX]"); + offset = M.x86.R_EAX; + break; + case 1: + DECODE_PRINTF("[ECX]"); + offset = M.x86.R_ECX; + break; + case 2: + DECODE_PRINTF("[EDX]"); + offset = M.x86.R_EDX; + break; + case 3: + DECODE_PRINTF("[EBX]"); + offset = M.x86.R_EBX; + break; + case 4: + DECODE_PRINTF("[ESP]"); + offset = M.x86.R_ESP; + break; + case 5: + switch (mod) { + case 0: + displacement = (s32)fetch_long_imm(); + DECODE_PRINTF2("[%d]", displacement); + offset = displacement; + break; + case 1: + displacement = (s8)fetch_byte_imm(); + DECODE_PRINTF2("[%d][EBP]", displacement); + offset = M.x86.R_EBP + displacement; + break; + case 2: + displacement = (s32)fetch_long_imm(); + DECODE_PRINTF2("[%d][EBP]", displacement); + offset = M.x86.R_EBP + displacement; + break; + default: + HALT_SYS(); + } + DECODE_PRINTF("[EAX]"); + offset = M.x86.R_EAX; + break; + case 6: + DECODE_PRINTF("[ESI]"); + offset = M.x86.R_ESI; + break; + case 7: + DECODE_PRINTF("[EDI]"); + offset = M.x86.R_EDI; + break; + default: + HALT_SYS(); + } + offset += decode_sib_si(ss, index); + return offset; + +} + +/**************************************************************************** +PARAMETERS: +rm - RM value to decode + +RETURNS: +Offset in memory for the address decoding + +REMARKS: +Return the offset given by mod=00 addressing. Also enables the +decoding of instructions. + +NOTE: The code which specifies the corresponding segment (ds vs ss) + below in the case of [BP+..]. The assumption here is that at the + point that this subroutine is called, the bit corresponding to + SYSMODE_SEG_DS_SS will be zero. After every instruction + except the segment override instructions, this bit (as well + as any bits indicating segment overrides) will be clear. So + if a SS access is needed, set this bit. Otherwise, DS access + occurs (unless any of the segment override bits are set). +****************************************************************************/ +unsigned decode_rm00_address( + int rm) +{ + unsigned offset; + + if (M.x86.mode & SYSMODE_PREFIX_ADDR) { + /* 32-bit addressing */ + switch (rm) { + case 0: + DECODE_PRINTF("[EAX]"); + return M.x86.R_EAX; + case 1: + DECODE_PRINTF("[ECX]"); + return M.x86.R_ECX; + case 2: + DECODE_PRINTF("[EDX]"); + return M.x86.R_EDX; + case 3: + DECODE_PRINTF("[EBX]"); + return M.x86.R_EBX; + case 4: + return decode_sib_address(0); + case 5: + offset = fetch_long_imm(); + DECODE_PRINTF2("[%08x]", offset); + return offset; + case 6: + DECODE_PRINTF("[ESI]"); + return M.x86.R_ESI; + case 7: + DECODE_PRINTF("[EDI]"); + return M.x86.R_EDI; + } + } else { + /* 16-bit addressing */ + switch (rm) { + case 0: + DECODE_PRINTF("[BX+SI]"); + return (M.x86.R_BX + M.x86.R_SI) & 0xffff; + case 1: + DECODE_PRINTF("[BX+DI]"); + return (M.x86.R_BX + M.x86.R_DI) & 0xffff; + case 2: + DECODE_PRINTF("[BP+SI]"); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + M.x86.R_SI) & 0xffff; + case 3: + DECODE_PRINTF("[BP+DI]"); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + M.x86.R_DI) & 0xffff; + case 4: + DECODE_PRINTF("[SI]"); + return M.x86.R_SI; + case 5: + DECODE_PRINTF("[DI]"); + return M.x86.R_DI; + case 6: + offset = fetch_word_imm(); + DECODE_PRINTF2("[%04x]", offset); + return offset; + case 7: + DECODE_PRINTF("[BX]"); + return M.x86.R_BX; + } + } + HALT_SYS(); + return 0; +} + +/**************************************************************************** +PARAMETERS: +rm - RM value to decode + +RETURNS: +Offset in memory for the address decoding + +REMARKS: +Return the offset given by mod=01 addressing. Also enables the +decoding of instructions. +****************************************************************************/ +unsigned decode_rm01_address( + int rm) +{ + int displacement; + + if (M.x86.mode & SYSMODE_PREFIX_ADDR) { + /* 32-bit addressing */ + if (rm != 4) + displacement = (s8)fetch_byte_imm(); + else + displacement = 0; + + switch (rm) { + case 0: + DECODE_PRINTF2("%d[EAX]", displacement); + return M.x86.R_EAX + displacement; + case 1: + DECODE_PRINTF2("%d[ECX]", displacement); + return M.x86.R_ECX + displacement; + case 2: + DECODE_PRINTF2("%d[EDX]", displacement); + return M.x86.R_EDX + displacement; + case 3: + DECODE_PRINTF2("%d[EBX]", displacement); + return M.x86.R_EBX + displacement; + case 4: { + int offset = decode_sib_address(1); + displacement = (s8)fetch_byte_imm(); + DECODE_PRINTF2("[%d]", displacement); + return offset + displacement; + } + case 5: + DECODE_PRINTF2("%d[EBP]", displacement); + return M.x86.R_EBP + displacement; + case 6: + DECODE_PRINTF2("%d[ESI]", displacement); + return M.x86.R_ESI + displacement; + case 7: + DECODE_PRINTF2("%d[EDI]", displacement); + return M.x86.R_EDI + displacement; + } + } else { + /* 16-bit addressing */ + displacement = (s8)fetch_byte_imm(); + switch (rm) { + case 0: + DECODE_PRINTF2("%d[BX+SI]", displacement); + return (M.x86.R_BX + M.x86.R_SI + displacement) & 0xffff; + case 1: + DECODE_PRINTF2("%d[BX+DI]", displacement); + return (M.x86.R_BX + M.x86.R_DI + displacement) & 0xffff; + case 2: + DECODE_PRINTF2("%d[BP+SI]", displacement); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + M.x86.R_SI + displacement) & 0xffff; + case 3: + DECODE_PRINTF2("%d[BP+DI]", displacement); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + M.x86.R_DI + displacement) & 0xffff; + case 4: + DECODE_PRINTF2("%d[SI]", displacement); + return (M.x86.R_SI + displacement) & 0xffff; + case 5: + DECODE_PRINTF2("%d[DI]", displacement); + return (M.x86.R_DI + displacement) & 0xffff; + case 6: + DECODE_PRINTF2("%d[BP]", displacement); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + displacement) & 0xffff; + case 7: + DECODE_PRINTF2("%d[BX]", displacement); + return (M.x86.R_BX + displacement) & 0xffff; + } + } + HALT_SYS(); + return 0; /* SHOULD NOT HAPPEN */ +} + +/**************************************************************************** +PARAMETERS: +rm - RM value to decode + +RETURNS: +Offset in memory for the address decoding + +REMARKS: +Return the offset given by mod=10 addressing. Also enables the +decoding of instructions. +****************************************************************************/ +unsigned decode_rm10_address( + int rm) +{ + if (M.x86.mode & SYSMODE_PREFIX_ADDR) { + int displacement; + + /* 32-bit addressing */ + if (rm != 4) + displacement = (s32)fetch_long_imm(); + else + displacement = 0; + + switch (rm) { + case 0: + DECODE_PRINTF2("%d[EAX]", displacement); + return M.x86.R_EAX + displacement; + case 1: + DECODE_PRINTF2("%d[ECX]", displacement); + return M.x86.R_ECX + displacement; + case 2: + DECODE_PRINTF2("%d[EDX]", displacement); + return M.x86.R_EDX + displacement; + case 3: + DECODE_PRINTF2("%d[EBX]", displacement); + return M.x86.R_EBX + displacement; + case 4: { + int offset = decode_sib_address(2); + displacement = (s32)fetch_long_imm(); + DECODE_PRINTF2("[%d]", displacement); + return offset + displacement; + } + case 5: + DECODE_PRINTF2("%d[EBP]", displacement); + return M.x86.R_EBP + displacement; + case 6: + DECODE_PRINTF2("%d[ESI]", displacement); + return M.x86.R_ESI + displacement; + case 7: + DECODE_PRINTF2("%d[EDI]", displacement); + return M.x86.R_EDI + displacement; + } + } else { + int displacement = (s16)fetch_word_imm(); + + /* 16-bit addressing */ + switch (rm) { + case 0: + DECODE_PRINTF2("%d[BX+SI]", displacement); + return (M.x86.R_BX + M.x86.R_SI + displacement) & 0xffff; + case 1: + DECODE_PRINTF2("%d[BX+DI]", displacement); + return (M.x86.R_BX + M.x86.R_DI + displacement) & 0xffff; + case 2: + DECODE_PRINTF2("%d[BP+SI]", displacement); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + M.x86.R_SI + displacement) & 0xffff; + case 3: + DECODE_PRINTF2("%d[BP+DI]", displacement); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + M.x86.R_DI + displacement) & 0xffff; + case 4: + DECODE_PRINTF2("%d[SI]", displacement); + return (M.x86.R_SI + displacement) & 0xffff; + case 5: + DECODE_PRINTF2("%d[DI]", displacement); + return (M.x86.R_DI + displacement) & 0xffff; + case 6: + DECODE_PRINTF2("%d[BP]", displacement); + M.x86.mode |= SYSMODE_SEG_DS_SS; + return (M.x86.R_BP + displacement) & 0xffff; + case 7: + DECODE_PRINTF2("%d[BX]", displacement); + return (M.x86.R_BX + displacement) & 0xffff; + } + } + HALT_SYS(); + return 0; /* SHOULD NOT HAPPEN */ +} + + +/**************************************************************************** +PARAMETERS: +mod - modifier +rm - RM value to decode + +RETURNS: +Offset in memory for the address decoding, multiplexing calls to +the decode_rmXX_address functions + +REMARKS: +Return the offset given by "mod" addressing. +****************************************************************************/ + +unsigned decode_rmXX_address(int mod, int rm) +{ + if(mod == 0) + return decode_rm00_address(rm); + if(mod == 1) + return decode_rm01_address(rm); + return decode_rm10_address(rm); +} + + + diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/fpu.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/fpu.c new file mode 100644 index 000000000000..5da363d6d5b5 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/fpu.c @@ -0,0 +1,945 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: This file contains the code to implement the decoding and +* emulation of the FPU instructions. +* +****************************************************************************/ + +#include "x86emu/x86emui.h" + +/*----------------------------- Implementation ----------------------------*/ + +/* opcode=0xd8 */ +void x86emuOp_esc_coprocess_d8(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("ESC D8\n"); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} + +#ifdef DEBUG + +static char *x86emu_fpu_op_d9_tab[] = { + "FLD\tDWORD PTR ", "ESC_D9\t", "FST\tDWORD PTR ", "FSTP\tDWORD PTR ", + "FLDENV\t", "FLDCW\t", "FSTENV\t", "FSTCW\t", + + "FLD\tDWORD PTR ", "ESC_D9\t", "FST\tDWORD PTR ", "FSTP\tDWORD PTR ", + "FLDENV\t", "FLDCW\t", "FSTENV\t", "FSTCW\t", + + "FLD\tDWORD PTR ", "ESC_D9\t", "FST\tDWORD PTR ", "FSTP\tDWORD PTR ", + "FLDENV\t", "FLDCW\t", "FSTENV\t", "FSTCW\t", +}; + +static char *x86emu_fpu_op_d9_tab1[] = { + "FLD\t", "FLD\t", "FLD\t", "FLD\t", + "FLD\t", "FLD\t", "FLD\t", "FLD\t", + + "FXCH\t", "FXCH\t", "FXCH\t", "FXCH\t", + "FXCH\t", "FXCH\t", "FXCH\t", "FXCH\t", + + "FNOP", "ESC_D9", "ESC_D9", "ESC_D9", + "ESC_D9", "ESC_D9", "ESC_D9", "ESC_D9", + + "FSTP\t", "FSTP\t", "FSTP\t", "FSTP\t", + "FSTP\t", "FSTP\t", "FSTP\t", "FSTP\t", + + "FCHS", "FABS", "ESC_D9", "ESC_D9", + "FTST", "FXAM", "ESC_D9", "ESC_D9", + + "FLD1", "FLDL2T", "FLDL2E", "FLDPI", + "FLDLG2", "FLDLN2", "FLDZ", "ESC_D9", + + "F2XM1", "FYL2X", "FPTAN", "FPATAN", + "FXTRACT", "ESC_D9", "FDECSTP", "FINCSTP", + + "FPREM", "FYL2XP1", "FSQRT", "ESC_D9", + "FRNDINT", "FSCALE", "ESC_D9", "ESC_D9", +}; + +#endif /* DEBUG */ + +/* opcode=0xd9 */ +void x86emuOp_esc_coprocess_d9(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 stkelem; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (mod != 3) { + DECODE_PRINTINSTR32(x86emu_fpu_op_d9_tab, mod, rh, rl); + } else { + DECODE_PRINTF(x86emu_fpu_op_d9_tab1[(rh << 3) + rl]); + } +#endif + switch (mod) { + case 0: + destoffset = decode_rm00_address(rl); + DECODE_PRINTF("\n"); + break; + case 1: + destoffset = decode_rm01_address(rl); + DECODE_PRINTF("\n"); + break; + case 2: + destoffset = decode_rm10_address(rl); + DECODE_PRINTF("\n"); + break; + case 3: /* register to register */ + stkelem = (u8)rl; + if (rh < 4) { + DECODE_PRINTF2("ST(%d)\n", stkelem); + } else { + DECODE_PRINTF("\n"); + } + break; + } +#ifdef X86EMU_FPU_PRESENT + /* execute */ + switch (mod) { + case 3: + switch (rh) { + case 0: + x86emu_fpu_R_fld(X86EMU_FPU_STKTOP, stkelem); + break; + case 1: + x86emu_fpu_R_fxch(X86EMU_FPU_STKTOP, stkelem); + break; + case 2: + switch (rl) { + case 0: + x86emu_fpu_R_nop(); + break; + default: + x86emu_fpu_illegal(); + break; + } + case 3: + x86emu_fpu_R_fstp(X86EMU_FPU_STKTOP, stkelem); + break; + case 4: + switch (rl) { + case 0: + x86emu_fpu_R_fchs(X86EMU_FPU_STKTOP); + break; + case 1: + x86emu_fpu_R_fabs(X86EMU_FPU_STKTOP); + break; + case 4: + x86emu_fpu_R_ftst(X86EMU_FPU_STKTOP); + break; + case 5: + x86emu_fpu_R_fxam(X86EMU_FPU_STKTOP); + break; + default: + /* 2,3,6,7 */ + x86emu_fpu_illegal(); + break; + } + break; + + case 5: + switch (rl) { + case 0: + x86emu_fpu_R_fld1(X86EMU_FPU_STKTOP); + break; + case 1: + x86emu_fpu_R_fldl2t(X86EMU_FPU_STKTOP); + break; + case 2: + x86emu_fpu_R_fldl2e(X86EMU_FPU_STKTOP); + break; + case 3: + x86emu_fpu_R_fldpi(X86EMU_FPU_STKTOP); + break; + case 4: + x86emu_fpu_R_fldlg2(X86EMU_FPU_STKTOP); + break; + case 5: + x86emu_fpu_R_fldln2(X86EMU_FPU_STKTOP); + break; + case 6: + x86emu_fpu_R_fldz(X86EMU_FPU_STKTOP); + break; + default: + /* 7 */ + x86emu_fpu_illegal(); + break; + } + break; + + case 6: + switch (rl) { + case 0: + x86emu_fpu_R_f2xm1(X86EMU_FPU_STKTOP); + break; + case 1: + x86emu_fpu_R_fyl2x(X86EMU_FPU_STKTOP); + break; + case 2: + x86emu_fpu_R_fptan(X86EMU_FPU_STKTOP); + break; + case 3: + x86emu_fpu_R_fpatan(X86EMU_FPU_STKTOP); + break; + case 4: + x86emu_fpu_R_fxtract(X86EMU_FPU_STKTOP); + break; + case 5: + x86emu_fpu_illegal(); + break; + case 6: + x86emu_fpu_R_decstp(); + break; + case 7: + x86emu_fpu_R_incstp(); + break; + } + break; + + case 7: + switch (rl) { + case 0: + x86emu_fpu_R_fprem(X86EMU_FPU_STKTOP); + break; + case 1: + x86emu_fpu_R_fyl2xp1(X86EMU_FPU_STKTOP); + break; + case 2: + x86emu_fpu_R_fsqrt(X86EMU_FPU_STKTOP); + break; + case 3: + x86emu_fpu_illegal(); + break; + case 4: + x86emu_fpu_R_frndint(X86EMU_FPU_STKTOP); + break; + case 5: + x86emu_fpu_R_fscale(X86EMU_FPU_STKTOP); + break; + case 6: + case 7: + default: + x86emu_fpu_illegal(); + break; + } + break; + + default: + switch (rh) { + case 0: + x86emu_fpu_M_fld(X86EMU_FPU_FLOAT, destoffset); + break; + case 1: + x86emu_fpu_illegal(); + break; + case 2: + x86emu_fpu_M_fst(X86EMU_FPU_FLOAT, destoffset); + break; + case 3: + x86emu_fpu_M_fstp(X86EMU_FPU_FLOAT, destoffset); + break; + case 4: + x86emu_fpu_M_fldenv(X86EMU_FPU_WORD, destoffset); + break; + case 5: + x86emu_fpu_M_fldcw(X86EMU_FPU_WORD, destoffset); + break; + case 6: + x86emu_fpu_M_fstenv(X86EMU_FPU_WORD, destoffset); + break; + case 7: + x86emu_fpu_M_fstcw(X86EMU_FPU_WORD, destoffset); + break; + } + } + } +#endif /* X86EMU_FPU_PRESENT */ + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} + +#ifdef DEBUG + +char *x86emu_fpu_op_da_tab[] = { + "FIADD\tDWORD PTR ", "FIMUL\tDWORD PTR ", "FICOM\tDWORD PTR ", + "FICOMP\tDWORD PTR ", + "FISUB\tDWORD PTR ", "FISUBR\tDWORD PTR ", "FIDIV\tDWORD PTR ", + "FIDIVR\tDWORD PTR ", + + "FIADD\tDWORD PTR ", "FIMUL\tDWORD PTR ", "FICOM\tDWORD PTR ", + "FICOMP\tDWORD PTR ", + "FISUB\tDWORD PTR ", "FISUBR\tDWORD PTR ", "FIDIV\tDWORD PTR ", + "FIDIVR\tDWORD PTR ", + + "FIADD\tDWORD PTR ", "FIMUL\tDWORD PTR ", "FICOM\tDWORD PTR ", + "FICOMP\tDWORD PTR ", + "FISUB\tDWORD PTR ", "FISUBR\tDWORD PTR ", "FIDIV\tDWORD PTR ", + "FIDIVR\tDWORD PTR ", + + "ESC_DA ", "ESC_DA ", "ESC_DA ", "ESC_DA ", + "ESC_DA ", "ESC_DA ", "ESC_DA ", "ESC_DA ", +}; + +#endif /* DEBUG */ + +/* opcode=0xda */ +void x86emuOp_esc_coprocess_da(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 stkelem; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + DECODE_PRINTINSTR32(x86emu_fpu_op_da_tab, mod, rh, rl); + switch (mod) { + case 0: + destoffset = decode_rm00_address(rl); + DECODE_PRINTF("\n"); + break; + case 1: + destoffset = decode_rm01_address(rl); + DECODE_PRINTF("\n"); + break; + case 2: + destoffset = decode_rm10_address(rl); + DECODE_PRINTF("\n"); + break; + case 3: /* register to register */ + stkelem = (u8)rl; + DECODE_PRINTF2("\tST(%d),ST\n", stkelem); + break; + } +#ifdef X86EMU_FPU_PRESENT + switch (mod) { + case 3: + x86emu_fpu_illegal(); + break; + default: + switch (rh) { + case 0: + x86emu_fpu_M_iadd(X86EMU_FPU_SHORT, destoffset); + break; + case 1: + x86emu_fpu_M_imul(X86EMU_FPU_SHORT, destoffset); + break; + case 2: + x86emu_fpu_M_icom(X86EMU_FPU_SHORT, destoffset); + break; + case 3: + x86emu_fpu_M_icomp(X86EMU_FPU_SHORT, destoffset); + break; + case 4: + x86emu_fpu_M_isub(X86EMU_FPU_SHORT, destoffset); + break; + case 5: + x86emu_fpu_M_isubr(X86EMU_FPU_SHORT, destoffset); + break; + case 6: + x86emu_fpu_M_idiv(X86EMU_FPU_SHORT, destoffset); + break; + case 7: + x86emu_fpu_M_idivr(X86EMU_FPU_SHORT, destoffset); + break; + } + } +#endif + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} + +#ifdef DEBUG + +char *x86emu_fpu_op_db_tab[] = { + "FILD\tDWORD PTR ", "ESC_DB\t19", "FIST\tDWORD PTR ", "FISTP\tDWORD PTR ", + "ESC_DB\t1C", "FLD\tTBYTE PTR ", "ESC_DB\t1E", "FSTP\tTBYTE PTR ", + + "FILD\tDWORD PTR ", "ESC_DB\t19", "FIST\tDWORD PTR ", "FISTP\tDWORD PTR ", + "ESC_DB\t1C", "FLD\tTBYTE PTR ", "ESC_DB\t1E", "FSTP\tTBYTE PTR ", + + "FILD\tDWORD PTR ", "ESC_DB\t19", "FIST\tDWORD PTR ", "FISTP\tDWORD PTR ", + "ESC_DB\t1C", "FLD\tTBYTE PTR ", "ESC_DB\t1E", "FSTP\tTBYTE PTR ", +}; + +#endif /* DEBUG */ + +/* opcode=0xdb */ +void x86emuOp_esc_coprocess_db(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (mod != 3) { + DECODE_PRINTINSTR32(x86emu_fpu_op_db_tab, mod, rh, rl); + } else if (rh == 4) { /* === 11 10 0 nnn */ + switch (rl) { + case 0: + DECODE_PRINTF("FENI\n"); + break; + case 1: + DECODE_PRINTF("FDISI\n"); + break; + case 2: + DECODE_PRINTF("FCLEX\n"); + break; + case 3: + DECODE_PRINTF("FINIT\n"); + break; + } + } else { + DECODE_PRINTF2("ESC_DB %0x\n", (mod << 6) + (rh << 3) + (rl)); + } +#endif /* DEBUG */ + switch (mod) { + case 0: + destoffset = decode_rm00_address(rl); + break; + case 1: + destoffset = decode_rm01_address(rl); + break; + case 2: + destoffset = decode_rm10_address(rl); + break; + case 3: /* register to register */ + break; + } +#ifdef X86EMU_FPU_PRESENT + /* execute */ + switch (mod) { + case 3: + switch (rh) { + case 4: + switch (rl) { + case 0: + x86emu_fpu_R_feni(); + break; + case 1: + x86emu_fpu_R_fdisi(); + break; + case 2: + x86emu_fpu_R_fclex(); + break; + case 3: + x86emu_fpu_R_finit(); + break; + default: + x86emu_fpu_illegal(); + break; + } + break; + default: + x86emu_fpu_illegal(); + break; + } + break; + default: + switch (rh) { + case 0: + x86emu_fpu_M_fild(X86EMU_FPU_SHORT, destoffset); + break; + case 1: + x86emu_fpu_illegal(); + break; + case 2: + x86emu_fpu_M_fist(X86EMU_FPU_SHORT, destoffset); + break; + case 3: + x86emu_fpu_M_fistp(X86EMU_FPU_SHORT, destoffset); + break; + case 4: + x86emu_fpu_illegal(); + break; + case 5: + x86emu_fpu_M_fld(X86EMU_FPU_LDBL, destoffset); + break; + case 6: + x86emu_fpu_illegal(); + break; + case 7: + x86emu_fpu_M_fstp(X86EMU_FPU_LDBL, destoffset); + break; + } + } +#endif + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} + +#ifdef DEBUG +char *x86emu_fpu_op_dc_tab[] = { + "FADD\tQWORD PTR ", "FMUL\tQWORD PTR ", "FCOM\tQWORD PTR ", + "FCOMP\tQWORD PTR ", + "FSUB\tQWORD PTR ", "FSUBR\tQWORD PTR ", "FDIV\tQWORD PTR ", + "FDIVR\tQWORD PTR ", + + "FADD\tQWORD PTR ", "FMUL\tQWORD PTR ", "FCOM\tQWORD PTR ", + "FCOMP\tQWORD PTR ", + "FSUB\tQWORD PTR ", "FSUBR\tQWORD PTR ", "FDIV\tQWORD PTR ", + "FDIVR\tQWORD PTR ", + + "FADD\tQWORD PTR ", "FMUL\tQWORD PTR ", "FCOM\tQWORD PTR ", + "FCOMP\tQWORD PTR ", + "FSUB\tQWORD PTR ", "FSUBR\tQWORD PTR ", "FDIV\tQWORD PTR ", + "FDIVR\tQWORD PTR ", + + "FADD\t", "FMUL\t", "FCOM\t", "FCOMP\t", + "FSUBR\t", "FSUB\t", "FDIVR\t", "FDIV\t", +}; +#endif /* DEBUG */ + +/* opcode=0xdc */ +void x86emuOp_esc_coprocess_dc(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 stkelem; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + DECODE_PRINTINSTR32(x86emu_fpu_op_dc_tab, mod, rh, rl); + switch (mod) { + case 0: + destoffset = decode_rm00_address(rl); + DECODE_PRINTF("\n"); + break; + case 1: + destoffset = decode_rm01_address(rl); + DECODE_PRINTF("\n"); + break; + case 2: + destoffset = decode_rm10_address(rl); + DECODE_PRINTF("\n"); + break; + case 3: /* register to register */ + stkelem = (u8)rl; + DECODE_PRINTF2("\tST(%d),ST\n", stkelem); + break; + } +#ifdef X86EMU_FPU_PRESENT + /* execute */ + switch (mod) { + case 3: + switch (rh) { + case 0: + x86emu_fpu_R_fadd(stkelem, X86EMU_FPU_STKTOP); + break; + case 1: + x86emu_fpu_R_fmul(stkelem, X86EMU_FPU_STKTOP); + break; + case 2: + x86emu_fpu_R_fcom(stkelem, X86EMU_FPU_STKTOP); + break; + case 3: + x86emu_fpu_R_fcomp(stkelem, X86EMU_FPU_STKTOP); + break; + case 4: + x86emu_fpu_R_fsubr(stkelem, X86EMU_FPU_STKTOP); + break; + case 5: + x86emu_fpu_R_fsub(stkelem, X86EMU_FPU_STKTOP); + break; + case 6: + x86emu_fpu_R_fdivr(stkelem, X86EMU_FPU_STKTOP); + break; + case 7: + x86emu_fpu_R_fdiv(stkelem, X86EMU_FPU_STKTOP); + break; + } + break; + default: + switch (rh) { + case 0: + x86emu_fpu_M_fadd(X86EMU_FPU_DOUBLE, destoffset); + break; + case 1: + x86emu_fpu_M_fmul(X86EMU_FPU_DOUBLE, destoffset); + break; + case 2: + x86emu_fpu_M_fcom(X86EMU_FPU_DOUBLE, destoffset); + break; + case 3: + x86emu_fpu_M_fcomp(X86EMU_FPU_DOUBLE, destoffset); + break; + case 4: + x86emu_fpu_M_fsub(X86EMU_FPU_DOUBLE, destoffset); + break; + case 5: + x86emu_fpu_M_fsubr(X86EMU_FPU_DOUBLE, destoffset); + break; + case 6: + x86emu_fpu_M_fdiv(X86EMU_FPU_DOUBLE, destoffset); + break; + case 7: + x86emu_fpu_M_fdivr(X86EMU_FPU_DOUBLE, destoffset); + break; + } + } +#endif + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} + +#ifdef DEBUG + +static char *x86emu_fpu_op_dd_tab[] = { + "FLD\tQWORD PTR ", "ESC_DD\t29,", "FST\tQWORD PTR ", "FSTP\tQWORD PTR ", + "FRSTOR\t", "ESC_DD\t2D,", "FSAVE\t", "FSTSW\t", + + "FLD\tQWORD PTR ", "ESC_DD\t29,", "FST\tQWORD PTR ", "FSTP\tQWORD PTR ", + "FRSTOR\t", "ESC_DD\t2D,", "FSAVE\t", "FSTSW\t", + + "FLD\tQWORD PTR ", "ESC_DD\t29,", "FST\tQWORD PTR ", "FSTP\tQWORD PTR ", + "FRSTOR\t", "ESC_DD\t2D,", "FSAVE\t", "FSTSW\t", + + "FFREE\t", "FXCH\t", "FST\t", "FSTP\t", + "ESC_DD\t2C,", "ESC_DD\t2D,", "ESC_DD\t2E,", "ESC_DD\t2F,", +}; + +#endif /* DEBUG */ + +/* opcode=0xdd */ +void x86emuOp_esc_coprocess_dd(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 stkelem; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + DECODE_PRINTINSTR32(x86emu_fpu_op_dd_tab, mod, rh, rl); + switch (mod) { + case 0: + destoffset = decode_rm00_address(rl); + DECODE_PRINTF("\n"); + break; + case 1: + destoffset = decode_rm01_address(rl); + DECODE_PRINTF("\n"); + break; + case 2: + destoffset = decode_rm10_address(rl); + DECODE_PRINTF("\n"); + break; + case 3: /* register to register */ + stkelem = (u8)rl; + DECODE_PRINTF2("\tST(%d),ST\n", stkelem); + break; + } +#ifdef X86EMU_FPU_PRESENT + switch (mod) { + case 3: + switch (rh) { + case 0: + x86emu_fpu_R_ffree(stkelem); + break; + case 1: + x86emu_fpu_R_fxch(stkelem); + break; + case 2: + x86emu_fpu_R_fst(stkelem); /* register version */ + break; + case 3: + x86emu_fpu_R_fstp(stkelem); /* register version */ + break; + default: + x86emu_fpu_illegal(); + break; + } + break; + default: + switch (rh) { + case 0: + x86emu_fpu_M_fld(X86EMU_FPU_DOUBLE, destoffset); + break; + case 1: + x86emu_fpu_illegal(); + break; + case 2: + x86emu_fpu_M_fst(X86EMU_FPU_DOUBLE, destoffset); + break; + case 3: + x86emu_fpu_M_fstp(X86EMU_FPU_DOUBLE, destoffset); + break; + case 4: + x86emu_fpu_M_frstor(X86EMU_FPU_WORD, destoffset); + break; + case 5: + x86emu_fpu_illegal(); + break; + case 6: + x86emu_fpu_M_fsave(X86EMU_FPU_WORD, destoffset); + break; + case 7: + x86emu_fpu_M_fstsw(X86EMU_FPU_WORD, destoffset); + break; + } + } +#endif + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} + +#ifdef DEBUG + +static char *x86emu_fpu_op_de_tab[] = +{ + "FIADD\tWORD PTR ", "FIMUL\tWORD PTR ", "FICOM\tWORD PTR ", + "FICOMP\tWORD PTR ", + "FISUB\tWORD PTR ", "FISUBR\tWORD PTR ", "FIDIV\tWORD PTR ", + "FIDIVR\tWORD PTR ", + + "FIADD\tWORD PTR ", "FIMUL\tWORD PTR ", "FICOM\tWORD PTR ", + "FICOMP\tWORD PTR ", + "FISUB\tWORD PTR ", "FISUBR\tWORD PTR ", "FIDIV\tWORD PTR ", + "FIDIVR\tWORD PTR ", + + "FIADD\tWORD PTR ", "FIMUL\tWORD PTR ", "FICOM\tWORD PTR ", + "FICOMP\tWORD PTR ", + "FISUB\tWORD PTR ", "FISUBR\tWORD PTR ", "FIDIV\tWORD PTR ", + "FIDIVR\tWORD PTR ", + + "FADDP\t", "FMULP\t", "FCOMP\t", "FCOMPP\t", + "FSUBRP\t", "FSUBP\t", "FDIVRP\t", "FDIVP\t", +}; + +#endif /* DEBUG */ + +/* opcode=0xde */ +void x86emuOp_esc_coprocess_de(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 stkelem; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + DECODE_PRINTINSTR32(x86emu_fpu_op_de_tab, mod, rh, rl); + switch (mod) { + case 0: + destoffset = decode_rm00_address(rl); + DECODE_PRINTF("\n"); + break; + case 1: + destoffset = decode_rm01_address(rl); + DECODE_PRINTF("\n"); + break; + case 2: + destoffset = decode_rm10_address(rl); + DECODE_PRINTF("\n"); + break; + case 3: /* register to register */ + stkelem = (u8)rl; + DECODE_PRINTF2("\tST(%d),ST\n", stkelem); + break; + } +#ifdef X86EMU_FPU_PRESENT + switch (mod) { + case 3: + switch (rh) { + case 0: + x86emu_fpu_R_faddp(stkelem, X86EMU_FPU_STKTOP); + break; + case 1: + x86emu_fpu_R_fmulp(stkelem, X86EMU_FPU_STKTOP); + break; + case 2: + x86emu_fpu_R_fcomp(stkelem, X86EMU_FPU_STKTOP); + break; + case 3: + if (stkelem == 1) + x86emu_fpu_R_fcompp(stkelem, X86EMU_FPU_STKTOP); + else + x86emu_fpu_illegal(); + break; + case 4: + x86emu_fpu_R_fsubrp(stkelem, X86EMU_FPU_STKTOP); + break; + case 5: + x86emu_fpu_R_fsubp(stkelem, X86EMU_FPU_STKTOP); + break; + case 6: + x86emu_fpu_R_fdivrp(stkelem, X86EMU_FPU_STKTOP); + break; + case 7: + x86emu_fpu_R_fdivp(stkelem, X86EMU_FPU_STKTOP); + break; + } + break; + default: + switch (rh) { + case 0: + x86emu_fpu_M_fiadd(X86EMU_FPU_WORD, destoffset); + break; + case 1: + x86emu_fpu_M_fimul(X86EMU_FPU_WORD, destoffset); + break; + case 2: + x86emu_fpu_M_ficom(X86EMU_FPU_WORD, destoffset); + break; + case 3: + x86emu_fpu_M_ficomp(X86EMU_FPU_WORD, destoffset); + break; + case 4: + x86emu_fpu_M_fisub(X86EMU_FPU_WORD, destoffset); + break; + case 5: + x86emu_fpu_M_fisubr(X86EMU_FPU_WORD, destoffset); + break; + case 6: + x86emu_fpu_M_fidiv(X86EMU_FPU_WORD, destoffset); + break; + case 7: + x86emu_fpu_M_fidivr(X86EMU_FPU_WORD, destoffset); + break; + } + } +#endif + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} + +#ifdef DEBUG + +static char *x86emu_fpu_op_df_tab[] = { + /* mod == 00 */ + "FILD\tWORD PTR ", "ESC_DF\t39\n", "FIST\tWORD PTR ", "FISTP\tWORD PTR ", + "FBLD\tTBYTE PTR ", "FILD\tQWORD PTR ", "FBSTP\tTBYTE PTR ", + "FISTP\tQWORD PTR ", + + /* mod == 01 */ + "FILD\tWORD PTR ", "ESC_DF\t39 ", "FIST\tWORD PTR ", "FISTP\tWORD PTR ", + "FBLD\tTBYTE PTR ", "FILD\tQWORD PTR ", "FBSTP\tTBYTE PTR ", + "FISTP\tQWORD PTR ", + + /* mod == 10 */ + "FILD\tWORD PTR ", "ESC_DF\t39 ", "FIST\tWORD PTR ", "FISTP\tWORD PTR ", + "FBLD\tTBYTE PTR ", "FILD\tQWORD PTR ", "FBSTP\tTBYTE PTR ", + "FISTP\tQWORD PTR ", + + /* mod == 11 */ + "FFREE\t", "FXCH\t", "FST\t", "FSTP\t", + "ESC_DF\t3C,", "ESC_DF\t3D,", "ESC_DF\t3E,", "ESC_DF\t3F," +}; + +#endif /* DEBUG */ + +/* opcode=0xdf */ +void x86emuOp_esc_coprocess_df(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 stkelem; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + DECODE_PRINTINSTR32(x86emu_fpu_op_df_tab, mod, rh, rl); + switch (mod) { + case 0: + destoffset = decode_rm00_address(rl); + DECODE_PRINTF("\n"); + break; + case 1: + destoffset = decode_rm01_address(rl); + DECODE_PRINTF("\n"); + break; + case 2: + destoffset = decode_rm10_address(rl); + DECODE_PRINTF("\n"); + break; + case 3: /* register to register */ + stkelem = (u8)rl; + DECODE_PRINTF2("\tST(%d)\n", stkelem); + break; + } +#ifdef X86EMU_FPU_PRESENT + switch (mod) { + case 3: + switch (rh) { + case 0: + x86emu_fpu_R_ffree(stkelem); + break; + case 1: + x86emu_fpu_R_fxch(stkelem); + break; + case 2: + x86emu_fpu_R_fst(stkelem); /* register version */ + break; + case 3: + x86emu_fpu_R_fstp(stkelem); /* register version */ + break; + default: + x86emu_fpu_illegal(); + break; + } + break; + default: + switch (rh) { + case 0: + x86emu_fpu_M_fild(X86EMU_FPU_WORD, destoffset); + break; + case 1: + x86emu_fpu_illegal(); + break; + case 2: + x86emu_fpu_M_fist(X86EMU_FPU_WORD, destoffset); + break; + case 3: + x86emu_fpu_M_fistp(X86EMU_FPU_WORD, destoffset); + break; + case 4: + x86emu_fpu_M_fbld(X86EMU_FPU_BSD, destoffset); + break; + case 5: + x86emu_fpu_M_fild(X86EMU_FPU_LONG, destoffset); + break; + case 6: + x86emu_fpu_M_fbstp(X86EMU_FPU_BSD, destoffset); + break; + case 7: + x86emu_fpu_M_fistp(X86EMU_FPU_LONG, destoffset); + break; + } + } +#endif + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR_NO_TRACE(); +} diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile b/arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile new file mode 100644 index 000000000000..3fbc363b34e6 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile @@ -0,0 +1,63 @@ +############################################################################# +# +# Realmode X86 Emulator Library +# +# Copyright (C) 1991-2004 SciTech Software, Inc. +# +# ======================================================================== +# +# Permission to use, copy, modify, distribute, and sell this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of the authors not be used +# in advertising or publicity pertaining to distribution of the software +# without specific, written prior permission. The authors makes no +# representations about the suitability of this software for any purpose. +# It is provided "as is" without express or implied warranty. +# +# THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +# EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. +# +# ======================================================================== +# +# Descripton: Generic makefile for the x86emu library. Requires +# the SciTech Software makefile definitions package to be +# installed, which uses the DMAKE make program. +# +############################################################################# + +.IMPORT .IGNORE: DEBUG + +#---------------------------------------------------------------------------- +# Define the lists of object files +#---------------------------------------------------------------------------- + +OBJECTS = sys$O decode$O ops$O ops2$O prim_ops$O fpu$O debug$O +CFLAGS += -DSCITECH +.IF $(DEBUG) +CFLAGS += -DDEBUG +.ENDIF +LIBCLEAN = *.dll *.lib *.a +LIBFILE = $(LP)x86emu$L + +#---------------------------------------------------------------------------- +# Sample test programs +#---------------------------------------------------------------------------- + +all: $(LIBFILE) + +validate$E: validate$O $(LIBFILE) + +#---------------------------------------------------------------------------- +# Define the list of object files to create dependency information for +#---------------------------------------------------------------------------- + +DEPEND_OBJ = validate$O $(OBJECTS) + +.INCLUDE: "$(SCITECH)/makedefs/common.mk" diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile.linux b/arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile.linux new file mode 100644 index 000000000000..0fc0a29b9267 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/makefile.linux @@ -0,0 +1,73 @@ +############################################################################# +# +# Realmode X86 Emulator Library +# +# Copyright (C) 1996-1999 SciTech Software, Inc. +# +# ======================================================================== +# +# Permission to use, copy, modify, distribute, and sell this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of the authors not be used +# in advertising or publicity pertaining to distribution of the software +# without specific, written prior permission. The authors makes no +# representations about the suitability of this software for any purpose. +# It is provided "as is" without express or implied warranty. +# +# THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +# EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. +# +# ======================================================================== +# +# Descripton: Linux specific makefile for the x86emu library. +# +############################################################################# + +TARGETLIB = libx86emu.a +TARGETDEBUGLIB =libx86emud.a + +OBJS=\ + decode.o \ + fpu.o \ + ops.o \ + ops2.o \ + prim_ops.o \ + sys.o \ + debug.o + +DEBUGOBJS=debug.d \ + decode.d \ + fpu.d \ + ops.d \ + ops2.d \ + prim_ops.d \ + sys.d + +.SUFFIXES: .d + +FORCE: + + +lib: $(TARGETLIB) # $(TARGETDEBUGLIB) + +$(TARGETLIB): FORCE $(OBJS) + $(AR) rv $(TARGETLIB) $(OBJS) + +$(TARGETDEBUGLIB): $(DEBUGOBJS) + $(AR) rv $(TARGETDEBUGLIB) $(DEBUGOBJS) + +INCS = -I. -I../../../../../../../../include -I../../../.. -I../../include -I../../include/x86emu +#CFLAGS = -D__DRIVER__ -DFORCE_POST -D_CEXPORT= -DNO_LONG_LONG +CFLAGS += -D__DRIVER__ -DFORCE_POST $(INCS) +CDEBUGFLAGS = -DDEBUG + +clean: + rm -f *.a *.o *.d + diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/ops.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/ops.c new file mode 100644 index 000000000000..4b6ab74844ff --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/ops.c @@ -0,0 +1,5428 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: This file includes subroutines to implement the decoding +* and emulation of all the x86 processor instructions. +* +* There are approximately 250 subroutines in here, which correspond +* to the 256 byte-"opcodes" found on the 8086. The table which +* dispatches this is found in the files optab.[ch]. +* +* Each opcode proc has a comment preceeding it which gives it's table +* address. Several opcodes are missing (undefined) in the table. +* +* Each proc includes information for decoding (DECODE_PRINTF and +* DECODE_PRINTF2), debugging (TRACE_REGS, SINGLE_STEP), and misc +* functions (START_OF_INSTR, END_OF_INSTR). +* +* Many of the procedures are *VERY* similar in coding. This has +* allowed for a very large amount of code to be generated in a fairly +* short amount of time (i.e. cut, paste, and modify). The result is +* that much of the code below could have been folded into subroutines +* for a large reduction in size of this file. The downside would be +* that there would be a penalty in execution speed. The file could +* also have been *MUCH* larger by inlining certain functions which +* were called. This could have resulted even faster execution. The +* prime directive I used to decide whether to inline the code or to +* modularize it, was basically: 1) no unnecessary subroutine calls, +* 2) no routines more than about 200 lines in size, and 3) modularize +* any code that I might not get right the first time. The fetch_* +* subroutines fall into the latter category. The The decode_* fall +* into the second category. The coding of the "switch(mod){ .... }" +* in many of the subroutines below falls into the first category. +* Especially, the coding of {add,and,or,sub,...}_{byte,word} +* subroutines are an especially glaring case of the third guideline. +* Since so much of the code is cloned from other modules (compare +* opcode #00 to opcode #01), making the basic operations subroutine +* calls is especially important; otherwise mistakes in coding an +* "add" would represent a nightmare in maintenance. +* +****************************************************************************/ + +#include "x86emu/x86emui.h" +#include "x86emu/types.h" + +/*----------------------------- Implementation ----------------------------*/ + +/* constant arrays to do several instructions in just one function */ + +#ifdef DEBUG +static char *x86emu_GenOpName[8] = { + "ADD", "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP"}; +#endif + +/* used by several opcodes */ +static u8 (*genop_byte_operation[])(u8 d, u8 s) = +{ + add_byte, /* 00 */ + or_byte, /* 01 */ + adc_byte, /* 02 */ + sbb_byte, /* 03 */ + and_byte, /* 04 */ + sub_byte, /* 05 */ + xor_byte, /* 06 */ + cmp_byte, /* 07 */ +}; + +static u16 (*genop_word_operation[])(u16 d, u16 s) = +{ + add_word, /*00 */ + or_word, /*01 */ + adc_word, /*02 */ + sbb_word, /*03 */ + and_word, /*04 */ + sub_word, /*05 */ + xor_word, /*06 */ + cmp_word, /*07 */ +}; + +static u32 (*genop_long_operation[])(u32 d, u32 s) = +{ + add_long, /*00 */ + or_long, /*01 */ + adc_long, /*02 */ + sbb_long, /*03 */ + and_long, /*04 */ + sub_long, /*05 */ + xor_long, /*06 */ + cmp_long, /*07 */ +}; + +/* used by opcodes 80, c0, d0, and d2. */ +static u8(*opcD0_byte_operation[])(u8 d, u8 s) = +{ + rol_byte, + ror_byte, + rcl_byte, + rcr_byte, + shl_byte, + shr_byte, + shl_byte, /* sal_byte === shl_byte by definition */ + sar_byte, +}; + +/* used by opcodes c1, d1, and d3. */ +static u16(*opcD1_word_operation[])(u16 s, u8 d) = +{ + rol_word, + ror_word, + rcl_word, + rcr_word, + shl_word, + shr_word, + shl_word, /* sal_byte === shl_byte by definition */ + sar_word, +}; + +/* used by opcodes c1, d1, and d3. */ +static u32 (*opcD1_long_operation[])(u32 s, u8 d) = +{ + rol_long, + ror_long, + rcl_long, + rcr_long, + shl_long, + shr_long, + shl_long, /* sal_byte === shl_byte by definition */ + sar_long, +}; + +#ifdef DEBUG + +static char *opF6_names[8] = + { "TEST\t", "", "NOT\t", "NEG\t", "MUL\t", "IMUL\t", "DIV\t", "IDIV\t" }; + +#endif + +/**************************************************************************** +PARAMETERS: +op1 - Instruction op code + +REMARKS: +Handles illegal opcodes. +****************************************************************************/ +void x86emuOp_illegal_op( + u8 op1) +{ + START_OF_INSTR(); + if (M.x86.R_SP != 0) { + DECODE_PRINTF("ILLEGAL X86 OPCODE\n"); + TRACE_REGS(); + DB( printk("%04x:%04x: %02X ILLEGAL X86 OPCODE!\n", + M.x86.R_CS, M.x86.R_IP-1,op1)); + HALT_SYS(); + } + else { + /* If we get here, it means the stack pointer is back to zero + * so we are just returning from an emulator service call + * so therte is no need to display an error message. We trap + * the emulator with an 0xF1 opcode to finish the service + * call. + */ + X86EMU_halt_sys(); + } + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcodes 0x00, 0x08, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38 +****************************************************************************/ +void x86emuOp_genop_byte_RM_R(u8 op1) +{ + int mod, rl, rh; + uint destoffset; + u8 *destreg, *srcreg; + u8 destval; + + op1 = (op1 >> 3) & 0x7; + + START_OF_INSTR(); + DECODE_PRINTF(x86emu_GenOpName[op1]); + DECODE_PRINTF("\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if(mod<3) + { destoffset = decode_rmXX_address(mod,rl); + DECODE_PRINTF(","); + destval = fetch_data_byte(destoffset); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = genop_byte_operation[op1](destval, *srcreg); + store_data_byte(destoffset, destval); + } + else + { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = genop_byte_operation[op1](*destreg, *srcreg); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcodes 0x01, 0x09, 0x11, 0x19, 0x21, 0x29, 0x31, 0x39 +****************************************************************************/ +void x86emuOp_genop_word_RM_R(u8 op1) +{ + int mod, rl, rh; + uint destoffset; + + op1 = (op1 >> 3) & 0x7; + + START_OF_INSTR(); + DECODE_PRINTF(x86emu_GenOpName[op1]); + DECODE_PRINTF("\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + + if(mod<3) { + destoffset = decode_rmXX_address(mod,rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + u32 *srcreg; + + DECODE_PRINTF(","); + destval = fetch_data_long(destoffset); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = genop_long_operation[op1](destval, *srcreg); + store_data_long(destoffset, destval); + } else { + u16 destval; + u16 *srcreg; + + DECODE_PRINTF(","); + destval = fetch_data_word(destoffset); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = genop_word_operation[op1](destval, *srcreg); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*srcreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = genop_long_operation[op1](*destreg, *srcreg); + } else { + u16 *destreg,*srcreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = genop_word_operation[op1](*destreg, *srcreg); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcodes 0x02, 0x0a, 0x12, 0x1a, 0x22, 0x2a, 0x32, 0x3a +****************************************************************************/ +void x86emuOp_genop_byte_R_RM(u8 op1) +{ + int mod, rl, rh; + u8 *destreg, *srcreg; + uint srcoffset; + u8 srcval; + + op1 = (op1 >> 3) & 0x7; + + START_OF_INSTR(); + DECODE_PRINTF(x86emu_GenOpName[op1]); + DECODE_PRINTF("\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod,rl); + srcval = fetch_data_byte(srcoffset); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rl); + srcval = *srcreg; + } + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = genop_byte_operation[op1](*destreg, srcval); + + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcodes 0x03, 0x0b, 0x13, 0x1b, 0x23, 0x2b, 0x33, 0x3b +****************************************************************************/ +void x86emuOp_genop_word_R_RM(u8 op1) +{ + int mod, rl, rh; + uint srcoffset; + u32 *destreg32, srcval; + u16 *destreg; + + op1 = (op1 >> 3) & 0x7; + + START_OF_INSTR(); + DECODE_PRINTF(x86emu_GenOpName[op1]); + DECODE_PRINTF("\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod,rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + destreg32 = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcval = fetch_data_long(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg32 = genop_long_operation[op1](*destreg32, srcval); + } else { + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcval = fetch_data_word(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = genop_word_operation[op1](*destreg, srcval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg; + destreg32 = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg32 = genop_long_operation[op1](*destreg32, *srcreg); + } else { + u16 *srcreg; + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = genop_word_operation[op1](*destreg, *srcreg); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcodes 0x04, 0x0c, 0x14, 0x1c, 0x24, 0x2c, 0x34, 0x3c +****************************************************************************/ +void x86emuOp_genop_byte_AL_IMM(u8 op1) +{ + u8 srcval; + + op1 = (op1 >> 3) & 0x7; + + START_OF_INSTR(); + DECODE_PRINTF(x86emu_GenOpName[op1]); + DECODE_PRINTF("\tAL,"); + srcval = fetch_byte_imm(); + DECODE_PRINTF2("%x\n", srcval); + TRACE_AND_STEP(); + M.x86.R_AL = genop_byte_operation[op1](M.x86.R_AL, srcval); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcodes 0x05, 0x0d, 0x15, 0x1d, 0x25, 0x2d, 0x35, 0x3d +****************************************************************************/ +void x86emuOp_genop_word_AX_IMM(u8 op1) +{ + u32 srcval; + + op1 = (op1 >> 3) & 0x7; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF(x86emu_GenOpName[op1]); + DECODE_PRINTF("\tEAX,"); + srcval = fetch_long_imm(); + } else { + DECODE_PRINTF(x86emu_GenOpName[op1]); + DECODE_PRINTF("\tAX,"); + srcval = fetch_word_imm(); + } + DECODE_PRINTF2("%x\n", srcval); + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_EAX = genop_long_operation[op1](M.x86.R_EAX, srcval); + } else { + M.x86.R_AX = genop_word_operation[op1](M.x86.R_AX, (u16)srcval); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x06 +****************************************************************************/ +void x86emuOp_push_ES(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("PUSH\tES\n"); + TRACE_AND_STEP(); + push_word(M.x86.R_ES); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x07 +****************************************************************************/ +void x86emuOp_pop_ES(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("POP\tES\n"); + TRACE_AND_STEP(); + M.x86.R_ES = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0e +****************************************************************************/ +void x86emuOp_push_CS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("PUSH\tCS\n"); + TRACE_AND_STEP(); + push_word(M.x86.R_CS); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f. Escape for two-byte opcode (286 or better) +****************************************************************************/ +void x86emuOp_two_byte(u8 X86EMU_UNUSED(op1)) +{ + u8 op2 = (*sys_rdb)(((u32)M.x86.R_CS << 4) + (M.x86.R_IP++)); + INC_DECODED_INST_LEN(1); + (*x86emu_optab2[op2])(op2); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x16 +****************************************************************************/ +void x86emuOp_push_SS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("PUSH\tSS\n"); + TRACE_AND_STEP(); + push_word(M.x86.R_SS); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x17 +****************************************************************************/ +void x86emuOp_pop_SS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("POP\tSS\n"); + TRACE_AND_STEP(); + M.x86.R_SS = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x1e +****************************************************************************/ +void x86emuOp_push_DS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("PUSH\tDS\n"); + TRACE_AND_STEP(); + push_word(M.x86.R_DS); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x1f +****************************************************************************/ +void x86emuOp_pop_DS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("POP\tDS\n"); + TRACE_AND_STEP(); + M.x86.R_DS = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x26 +****************************************************************************/ +void x86emuOp_segovr_ES(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("ES:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_SEGOVR_ES; + /* + * note the lack of DECODE_CLEAR_SEGOVR(r) since, here is one of 4 + * opcode subroutines we do not want to do this. + */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x27 +****************************************************************************/ +void x86emuOp_daa(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("DAA\n"); + TRACE_AND_STEP(); + M.x86.R_AL = daa_byte(M.x86.R_AL); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x2e +****************************************************************************/ +void x86emuOp_segovr_CS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("CS:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_SEGOVR_CS; + /* note no DECODE_CLEAR_SEGOVR here. */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x2f +****************************************************************************/ +void x86emuOp_das(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("DAS\n"); + TRACE_AND_STEP(); + M.x86.R_AL = das_byte(M.x86.R_AL); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x36 +****************************************************************************/ +void x86emuOp_segovr_SS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("SS:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_SEGOVR_SS; + /* no DECODE_CLEAR_SEGOVR ! */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x37 +****************************************************************************/ +void x86emuOp_aaa(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("AAA\n"); + TRACE_AND_STEP(); + M.x86.R_AX = aaa_word(M.x86.R_AX); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x3e +****************************************************************************/ +void x86emuOp_segovr_DS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("DS:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_SEGOVR_DS; + /* NO DECODE_CLEAR_SEGOVR! */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x3f +****************************************************************************/ +void x86emuOp_aas(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("AAS\n"); + TRACE_AND_STEP(); + M.x86.R_AX = aas_word(M.x86.R_AX); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x40 - 0x47 +****************************************************************************/ +void x86emuOp_inc_register(u8 op1) +{ + START_OF_INSTR(); + op1 &= 0x7; + DECODE_PRINTF("INC\t"); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *reg; + reg = DECODE_RM_LONG_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *reg = inc_long(*reg); + } else { + u16 *reg; + reg = DECODE_RM_WORD_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *reg = inc_word(*reg); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x48 - 0x4F +****************************************************************************/ +void x86emuOp_dec_register(u8 op1) +{ + START_OF_INSTR(); + op1 &= 0x7; + DECODE_PRINTF("DEC\t"); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *reg; + reg = DECODE_RM_LONG_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *reg = dec_long(*reg); + } else { + u16 *reg; + reg = DECODE_RM_WORD_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *reg = dec_word(*reg); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x50 - 0x57 +****************************************************************************/ +void x86emuOp_push_register(u8 op1) +{ + START_OF_INSTR(); + op1 &= 0x7; + DECODE_PRINTF("PUSH\t"); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *reg; + reg = DECODE_RM_LONG_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + push_long(*reg); + } else { + u16 *reg; + reg = DECODE_RM_WORD_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + push_word(*reg); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x58 - 0x5F +****************************************************************************/ +void x86emuOp_pop_register(u8 op1) +{ + START_OF_INSTR(); + op1 &= 0x7; + DECODE_PRINTF("POP\t"); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *reg; + reg = DECODE_RM_LONG_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *reg = pop_long(); + } else { + u16 *reg; + reg = DECODE_RM_WORD_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *reg = pop_word(); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x60 +****************************************************************************/ +void x86emuOp_push_all(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("PUSHAD\n"); + } else { + DECODE_PRINTF("PUSHA\n"); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 old_sp = M.x86.R_ESP; + + push_long(M.x86.R_EAX); + push_long(M.x86.R_ECX); + push_long(M.x86.R_EDX); + push_long(M.x86.R_EBX); + push_long(old_sp); + push_long(M.x86.R_EBP); + push_long(M.x86.R_ESI); + push_long(M.x86.R_EDI); + } else { + u16 old_sp = M.x86.R_SP; + + push_word(M.x86.R_AX); + push_word(M.x86.R_CX); + push_word(M.x86.R_DX); + push_word(M.x86.R_BX); + push_word(old_sp); + push_word(M.x86.R_BP); + push_word(M.x86.R_SI); + push_word(M.x86.R_DI); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x61 +****************************************************************************/ +void x86emuOp_pop_all(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("POPAD\n"); + } else { + DECODE_PRINTF("POPA\n"); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_EDI = pop_long(); + M.x86.R_ESI = pop_long(); + M.x86.R_EBP = pop_long(); + M.x86.R_ESP += 4; /* skip ESP */ + M.x86.R_EBX = pop_long(); + M.x86.R_EDX = pop_long(); + M.x86.R_ECX = pop_long(); + M.x86.R_EAX = pop_long(); + } else { + M.x86.R_DI = pop_word(); + M.x86.R_SI = pop_word(); + M.x86.R_BP = pop_word(); + M.x86.R_SP += 2; /* skip SP */ + M.x86.R_BX = pop_word(); + M.x86.R_DX = pop_word(); + M.x86.R_CX = pop_word(); + M.x86.R_AX = pop_word(); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/*opcode 0x62 ILLEGAL OP, calls x86emuOp_illegal_op() */ +/*opcode 0x63 ILLEGAL OP, calls x86emuOp_illegal_op() */ + +/**************************************************************************** +REMARKS: +Handles opcode 0x64 +****************************************************************************/ +void x86emuOp_segovr_FS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("FS:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_SEGOVR_FS; + /* + * note the lack of DECODE_CLEAR_SEGOVR(r) since, here is one of 4 + * opcode subroutines we do not want to do this. + */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x65 +****************************************************************************/ +void x86emuOp_segovr_GS(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("GS:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_SEGOVR_GS; + /* + * note the lack of DECODE_CLEAR_SEGOVR(r) since, here is one of 4 + * opcode subroutines we do not want to do this. + */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x66 - prefix for 32-bit register +****************************************************************************/ +void x86emuOp_prefix_data(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("DATA:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_PREFIX_DATA; + /* note no DECODE_CLEAR_SEGOVR here. */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x67 - prefix for 32-bit address +****************************************************************************/ +void x86emuOp_prefix_addr(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("ADDR:\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_PREFIX_ADDR; + /* note no DECODE_CLEAR_SEGOVR here. */ + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x68 +****************************************************************************/ +void x86emuOp_push_word_IMM(u8 X86EMU_UNUSED(op1)) +{ + u32 imm; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + imm = fetch_long_imm(); + } else { + imm = fetch_word_imm(); + } + DECODE_PRINTF2("PUSH\t%x\n", imm); + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + push_long(imm); + } else { + push_word((u16)imm); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x69 +****************************************************************************/ +void x86emuOp_imul_word_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("IMUL\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 srcval; + u32 res_lo,res_hi; + s32 imm; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcval = fetch_data_long(srcoffset); + imm = fetch_long_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + TRACE_AND_STEP(); + imul_long_direct(&res_lo,&res_hi,(s32)srcval,(s32)imm); + if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || + (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u32)res_lo; + } else { + u16 *destreg; + u16 srcval; + u32 res; + s16 imm; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcval = fetch_data_word(srcoffset); + imm = fetch_word_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + TRACE_AND_STEP(); + res = (s16)srcval * (s16)imm; + if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || + (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u16)res; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*srcreg; + u32 res_lo,res_hi; + s32 imm; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rl); + imm = fetch_long_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + TRACE_AND_STEP(); + imul_long_direct(&res_lo,&res_hi,(s32)*srcreg,(s32)imm); + if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || + (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u32)res_lo; + } else { + u16 *destreg,*srcreg; + u32 res; + s16 imm; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + imm = fetch_word_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + res = (s16)*srcreg * (s16)imm; + if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || + (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u16)res; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x6a +****************************************************************************/ +void x86emuOp_push_byte_IMM(u8 X86EMU_UNUSED(op1)) +{ + s16 imm; + + START_OF_INSTR(); + imm = (s8)fetch_byte_imm(); + DECODE_PRINTF2("PUSH\t%d\n", imm); + TRACE_AND_STEP(); + push_word(imm); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x6b +****************************************************************************/ +void x86emuOp_imul_byte_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint srcoffset; + s8 imm; + + START_OF_INSTR(); + DECODE_PRINTF("IMUL\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 srcval; + u32 res_lo,res_hi; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcval = fetch_data_long(srcoffset); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + TRACE_AND_STEP(); + imul_long_direct(&res_lo,&res_hi,(s32)srcval,(s32)imm); + if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || + (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u32)res_lo; + } else { + u16 *destreg; + u16 srcval; + u32 res; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcval = fetch_data_word(srcoffset); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + TRACE_AND_STEP(); + res = (s16)srcval * (s16)imm; + if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || + (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u16)res; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*srcreg; + u32 res_lo,res_hi; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rl); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + TRACE_AND_STEP(); + imul_long_direct(&res_lo,&res_hi,(s32)*srcreg,(s32)imm); + if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || + (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u32)res_lo; + } else { + u16 *destreg,*srcreg; + u32 res; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%d\n", (s32)imm); + TRACE_AND_STEP(); + res = (s16)*srcreg * (s16)imm; + if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || + (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } + *destreg = (u16)res; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x6c +****************************************************************************/ +void x86emuOp_ins_byte(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("INSB\n"); + ins(1); + TRACE_AND_STEP(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x6d +****************************************************************************/ +void x86emuOp_ins_word(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("INSD\n"); + ins(4); + } else { + DECODE_PRINTF("INSW\n"); + ins(2); + } + TRACE_AND_STEP(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x6e +****************************************************************************/ +void x86emuOp_outs_byte(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("OUTSB\n"); + outs(1); + TRACE_AND_STEP(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x6f +****************************************************************************/ +void x86emuOp_outs_word(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("OUTSD\n"); + outs(4); + } else { + DECODE_PRINTF("OUTSW\n"); + outs(2); + } + TRACE_AND_STEP(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x70 - 0x7F +****************************************************************************/ +int x86emu_check_jump_condition(u8 op); + +void x86emuOp_jump_near_cond(u8 op1) +{ + s8 offset; + u16 target; + int cond; + + /* jump to byte offset if overflow flag is set */ + START_OF_INSTR(); + cond = x86emu_check_jump_condition(op1 & 0xF); + offset = (s8)fetch_byte_imm(); + target = (u16)(M.x86.R_IP + (s16)offset); + DECODE_PRINTF2("%x\n", target); + TRACE_AND_STEP(); + if (cond) + M.x86.R_IP = target; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x80 +****************************************************************************/ +void x86emuOp_opc80_byte_RM_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg; + uint destoffset; + u8 imm; + u8 destval; + + /* + * Weirdo special case instruction format. Part of the opcode + * held below in "RH". Doubly nested case would result, except + * that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + + switch (rh) { + case 0: + DECODE_PRINTF("ADD\t"); + break; + case 1: + DECODE_PRINTF("OR\t"); + break; + case 2: + DECODE_PRINTF("ADC\t"); + break; + case 3: + DECODE_PRINTF("SBB\t"); + break; + case 4: + DECODE_PRINTF("AND\t"); + break; + case 5: + DECODE_PRINTF("SUB\t"); + break; + case 6: + DECODE_PRINTF("XOR\t"); + break; + case 7: + DECODE_PRINTF("CMP\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + destval = fetch_data_byte(destoffset); + imm = fetch_byte_imm(); + DECODE_PRINTF2("%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_byte_operation[rh]) (destval, imm); + if (rh != 7) + store_data_byte(destoffset, destval); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF(","); + imm = fetch_byte_imm(); + DECODE_PRINTF2("%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_byte_operation[rh]) (*destreg, imm); + if (rh != 7) + *destreg = destval; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x81 +****************************************************************************/ +void x86emuOp_opc81_word_RM_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + /* + * Weirdo special case instruction format. Part of the opcode + * held below in "RH". Doubly nested case would result, except + * that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + + switch (rh) { + case 0: + DECODE_PRINTF("ADD\t"); + break; + case 1: + DECODE_PRINTF("OR\t"); + break; + case 2: + DECODE_PRINTF("ADC\t"); + break; + case 3: + DECODE_PRINTF("SBB\t"); + break; + case 4: + DECODE_PRINTF("AND\t"); + break; + case 5: + DECODE_PRINTF("SUB\t"); + break; + case 6: + DECODE_PRINTF("XOR\t"); + break; + case 7: + DECODE_PRINTF("CMP\t"); + break; + } + } +#endif + /* + * Know operation, decode the mod byte to find the addressing + * mode. + */ + if (mod < 3) { + DECODE_PRINTF("DWORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval,imm; + + DECODE_PRINTF(","); + destval = fetch_data_long(destoffset); + imm = fetch_long_imm(); + DECODE_PRINTF2("%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_long_operation[rh]) (destval, imm); + if (rh != 7) + store_data_long(destoffset, destval); + } else { + u16 destval,imm; + + DECODE_PRINTF(","); + destval = fetch_data_word(destoffset); + imm = fetch_word_imm(); + DECODE_PRINTF2("%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_word_operation[rh]) (destval, imm); + if (rh != 7) + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 destval,imm; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + imm = fetch_long_imm(); + DECODE_PRINTF2("%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_long_operation[rh]) (*destreg, imm); + if (rh != 7) + *destreg = destval; + } else { + u16 *destreg; + u16 destval,imm; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + imm = fetch_word_imm(); + DECODE_PRINTF2("%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_word_operation[rh]) (*destreg, imm); + if (rh != 7) + *destreg = destval; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x82 +****************************************************************************/ +void x86emuOp_opc82_byte_RM_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg; + uint destoffset; + u8 imm; + u8 destval; + + /* + * Weirdo special case instruction format. Part of the opcode + * held below in "RH". Doubly nested case would result, except + * that the decoded instruction Similar to opcode 81, except that + * the immediate byte is sign extended to a word length. + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + switch (rh) { + case 0: + DECODE_PRINTF("ADD\t"); + break; + case 1: + DECODE_PRINTF("OR\t"); + break; + case 2: + DECODE_PRINTF("ADC\t"); + break; + case 3: + DECODE_PRINTF("SBB\t"); + break; + case 4: + DECODE_PRINTF("AND\t"); + break; + case 5: + DECODE_PRINTF("SUB\t"); + break; + case 6: + DECODE_PRINTF("XOR\t"); + break; + case 7: + DECODE_PRINTF("CMP\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + destval = fetch_data_byte(destoffset); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_byte_operation[rh]) (destval, imm); + if (rh != 7) + store_data_byte(destoffset, destval); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_byte_operation[rh]) (*destreg, imm); + if (rh != 7) + *destreg = destval; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x83 +****************************************************************************/ +void x86emuOp_opc83_word_RM_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + /* + * Weirdo special case instruction format. Part of the opcode + * held below in "RH". Doubly nested case would result, except + * that the decoded instruction Similar to opcode 81, except that + * the immediate byte is sign extended to a word length. + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + switch (rh) { + case 0: + DECODE_PRINTF("ADD\t"); + break; + case 1: + DECODE_PRINTF("OR\t"); + break; + case 2: + DECODE_PRINTF("ADC\t"); + break; + case 3: + DECODE_PRINTF("SBB\t"); + break; + case 4: + DECODE_PRINTF("AND\t"); + break; + case 5: + DECODE_PRINTF("SUB\t"); + break; + case 6: + DECODE_PRINTF("XOR\t"); + break; + case 7: + DECODE_PRINTF("CMP\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + if (mod < 3) { + DECODE_PRINTF("DWORD PTR "); + destoffset = decode_rmXX_address(mod,rl); + + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval,imm; + + destval = fetch_data_long(destoffset); + imm = (s8) fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_long_operation[rh]) (destval, imm); + if (rh != 7) + store_data_long(destoffset, destval); + } else { + u16 destval,imm; + + destval = fetch_data_word(destoffset); + imm = (s8) fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_word_operation[rh]) (destval, imm); + if (rh != 7) + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 destval,imm; + + destreg = DECODE_RM_LONG_REGISTER(rl); + imm = (s8) fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_long_operation[rh]) (*destreg, imm); + if (rh != 7) + *destreg = destval; + } else { + u16 *destreg; + u16 destval,imm; + + destreg = DECODE_RM_WORD_REGISTER(rl); + imm = (s8) fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + destval = (*genop_word_operation[rh]) (*destreg, imm); + if (rh != 7) + *destreg = destval; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x84 +****************************************************************************/ +void x86emuOp_test_byte_RM_R(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg, *srcreg; + uint destoffset; + u8 destval; + + START_OF_INSTR(); + DECODE_PRINTF("TEST\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + destval = fetch_data_byte(destoffset); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + test_byte(destval, *srcreg); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + test_byte(*destreg, *srcreg); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x85 +****************************************************************************/ +void x86emuOp_test_word_RM_R(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("TEST\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + u32 *srcreg; + + DECODE_PRINTF(","); + destval = fetch_data_long(destoffset); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + test_long(destval, *srcreg); + } else { + u16 destval; + u16 *srcreg; + + DECODE_PRINTF(","); + destval = fetch_data_word(destoffset); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + test_word(destval, *srcreg); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*srcreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + test_long(*destreg, *srcreg); + } else { + u16 *destreg,*srcreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + test_word(*destreg, *srcreg); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x86 +****************************************************************************/ +void x86emuOp_xchg_byte_RM_R(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg, *srcreg; + uint destoffset; + u8 destval; + u8 tmp; + + START_OF_INSTR(); + DECODE_PRINTF("XCHG\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + destval = fetch_data_byte(destoffset); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = *srcreg; + *srcreg = destval; + destval = tmp; + store_data_byte(destoffset, destval); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = *srcreg; + *srcreg = *destreg; + *destreg = tmp; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x87 +****************************************************************************/ +void x86emuOp_xchg_word_RM_R(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("XCHG\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg; + u32 destval,tmp; + + destval = fetch_data_long(destoffset); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = *srcreg; + *srcreg = destval; + destval = tmp; + store_data_long(destoffset, destval); + } else { + u16 *srcreg; + u16 destval,tmp; + + destval = fetch_data_word(destoffset); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = *srcreg; + *srcreg = destval; + destval = tmp; + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*srcreg; + u32 tmp; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = *srcreg; + *srcreg = *destreg; + *destreg = tmp; + } else { + u16 *destreg,*srcreg; + u16 tmp; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = *srcreg; + *srcreg = *destreg; + *destreg = tmp; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x88 +****************************************************************************/ +void x86emuOp_mov_byte_RM_R(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg, *srcreg; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + store_data_byte(destoffset, *srcreg); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x89 +****************************************************************************/ +void x86emuOp_mov_word_RM_R(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg; + + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + store_data_long(destoffset, *srcreg); + } else { + u16 *srcreg; + + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + store_data_word(destoffset, *srcreg); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*srcreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } else { + u16 *destreg,*srcreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x8a +****************************************************************************/ +void x86emuOp_mov_byte_R_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg, *srcreg; + uint srcoffset; + u8 srcval; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_byte(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x8b +****************************************************************************/ +void x86emuOp_mov_word_R_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 srcval; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_long(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } else { + u16 *destreg; + u16 srcval; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_word(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg, *srcreg; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } else { + u16 *destreg, *srcreg; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x8c +****************************************************************************/ +void x86emuOp_mov_word_RM_SR(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u16 *destreg, *srcreg; + uint destoffset; + u16 destval; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + srcreg = decode_rm_seg_register(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = *srcreg; + store_data_word(destoffset, destval); + } else { /* register to register */ + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + srcreg = decode_rm_seg_register(rh); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x8d +****************************************************************************/ +void x86emuOp_lea_word_R_M(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u16 *srcreg; + uint destoffset; + +/* + * TODO: Need to handle address size prefix! + * + * lea eax,[eax+ebx*2] ?? + */ + + START_OF_INSTR(); + DECODE_PRINTF("LEA\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *srcreg = (u16)destoffset; + } + /* } else { undefined. Do nothing. } */ + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x8e +****************************************************************************/ +void x86emuOp_mov_word_SR_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u16 *destreg, *srcreg; + uint srcoffset; + u16 srcval; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destreg = decode_rm_seg_register(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_word(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } else { /* register to register */ + destreg = decode_rm_seg_register(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + /* + * Clean up, and reset all the R_xSP pointers to the correct + * locations. This is about 3x too much overhead (doing all the + * segreg ptrs when only one is needed, but this instruction + * *cannot* be that common, and this isn't too much work anyway. + */ + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x8f +****************************************************************************/ +void x86emuOp_pop_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("POP\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (rh != 0) { + DECODE_PRINTF("ILLEGAL DECODE OF OPCODE 8F\n"); + HALT_SYS(); + } + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = pop_long(); + store_data_long(destoffset, destval); + } else { + u16 destval; + + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = pop_word(); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = pop_long(); + } else { + u16 *destreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = pop_word(); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x90 +****************************************************************************/ +void x86emuOp_nop(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("NOP\n"); + TRACE_AND_STEP(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x91-0x97 +****************************************************************************/ +void x86emuOp_xchg_word_AX_register(u8 X86EMU_UNUSED(op1)) +{ + u32 tmp; + + op1 &= 0x7; + + START_OF_INSTR(); + + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *reg32; + DECODE_PRINTF("XCHG\tEAX,"); + reg32 = DECODE_RM_LONG_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = M.x86.R_EAX; + M.x86.R_EAX = *reg32; + *reg32 = tmp; + } else { + u16 *reg16; + DECODE_PRINTF("XCHG\tAX,"); + reg16 = DECODE_RM_WORD_REGISTER(op1); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + tmp = M.x86.R_AX; + M.x86.R_EAX = *reg16; + *reg16 = (u16)tmp; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x98 +****************************************************************************/ +void x86emuOp_cbw(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("CWDE\n"); + } else { + DECODE_PRINTF("CBW\n"); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + if (M.x86.R_AX & 0x8000) { + M.x86.R_EAX |= 0xffff0000; + } else { + M.x86.R_EAX &= 0x0000ffff; + } + } else { + if (M.x86.R_AL & 0x80) { + M.x86.R_AH = 0xff; + } else { + M.x86.R_AH = 0x0; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x99 +****************************************************************************/ +void x86emuOp_cwd(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("CDQ\n"); + } else { + DECODE_PRINTF("CWD\n"); + } + DECODE_PRINTF("CWD\n"); + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + if (M.x86.R_EAX & 0x80000000) { + M.x86.R_EDX = 0xffffffff; + } else { + M.x86.R_EDX = 0x0; + } + } else { + if (M.x86.R_AX & 0x8000) { + M.x86.R_DX = 0xffff; + } else { + M.x86.R_DX = 0x0; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x9a +****************************************************************************/ +void x86emuOp_call_far_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 farseg, faroff; + + START_OF_INSTR(); + DECODE_PRINTF("CALL\t"); + faroff = fetch_word_imm(); + farseg = fetch_word_imm(); + DECODE_PRINTF2("%04x:", farseg); + DECODE_PRINTF2("%04x\n", faroff); + CALL_TRACE(M.x86.saved_cs, M.x86.saved_ip, farseg, faroff, "FAR "); + + /* XXX + * + * Hooked interrupt vectors calling into our "BIOS" will cause + * problems unless all intersegment stuff is checked for BIOS + * access. Check needed here. For moment, let it alone. + */ + TRACE_AND_STEP(); + push_word(M.x86.R_CS); + M.x86.R_CS = farseg; + push_word(M.x86.R_IP); + M.x86.R_IP = faroff; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x9b +****************************************************************************/ +void x86emuOp_wait(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("WAIT"); + TRACE_AND_STEP(); + /* NADA. */ + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x9c +****************************************************************************/ +void x86emuOp_pushf_word(u8 X86EMU_UNUSED(op1)) +{ + u32 flags; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("PUSHFD\n"); + } else { + DECODE_PRINTF("PUSHF\n"); + } + TRACE_AND_STEP(); + + /* clear out *all* bits not representing flags, and turn on real bits */ + flags = (M.x86.R_EFLG & F_MSK) | F_ALWAYS_ON; + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + push_long(flags); + } else { + push_word((u16)flags); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x9d +****************************************************************************/ +void x86emuOp_popf_word(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("POPFD\n"); + } else { + DECODE_PRINTF("POPF\n"); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_EFLG = pop_long(); + } else { + M.x86.R_FLG = pop_word(); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x9e +****************************************************************************/ +void x86emuOp_sahf(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("SAHF\n"); + TRACE_AND_STEP(); + /* clear the lower bits of the flag register */ + M.x86.R_FLG &= 0xffffff00; + /* or in the AH register into the flags register */ + M.x86.R_FLG |= M.x86.R_AH; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x9f +****************************************************************************/ +void x86emuOp_lahf(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("LAHF\n"); + TRACE_AND_STEP(); + M.x86.R_AH = (u8)(M.x86.R_FLG & 0xff); + /*undocumented TC++ behavior??? Nope. It's documented, but + you have too look real hard to notice it. */ + M.x86.R_AH |= 0x2; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa0 +****************************************************************************/ +void x86emuOp_mov_AL_M_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 offset; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\tAL,"); + offset = fetch_word_imm(); + DECODE_PRINTF2("[%04x]\n", offset); + TRACE_AND_STEP(); + M.x86.R_AL = fetch_data_byte(offset); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa1 +****************************************************************************/ +void x86emuOp_mov_AX_M_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 offset; + + START_OF_INSTR(); + offset = fetch_word_imm(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF2("MOV\tEAX,[%04x]\n", offset); + } else { + DECODE_PRINTF2("MOV\tAX,[%04x]\n", offset); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_EAX = fetch_data_long(offset); + } else { + M.x86.R_AX = fetch_data_word(offset); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa2 +****************************************************************************/ +void x86emuOp_mov_M_AL_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 offset; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + offset = fetch_word_imm(); + DECODE_PRINTF2("[%04x],AL\n", offset); + TRACE_AND_STEP(); + store_data_byte(offset, M.x86.R_AL); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa3 +****************************************************************************/ +void x86emuOp_mov_M_AX_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 offset; + + START_OF_INSTR(); + offset = fetch_word_imm(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF2("MOV\t[%04x],EAX\n", offset); + } else { + DECODE_PRINTF2("MOV\t[%04x],AX\n", offset); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + store_data_long(offset, M.x86.R_EAX); + } else { + store_data_word(offset, M.x86.R_AX); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa4 +****************************************************************************/ +void x86emuOp_movs_byte(u8 X86EMU_UNUSED(op1)) +{ + u8 val; + u32 count; + int inc; + + START_OF_INSTR(); + DECODE_PRINTF("MOVS\tBYTE\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -1; + else + inc = 1; + TRACE_AND_STEP(); + count = 1; + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* move them until CX is ZERO. */ + count = M.x86.R_CX; + M.x86.R_CX = 0; + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } + while (count--) { + val = fetch_data_byte(M.x86.R_SI); + store_data_byte_abs(M.x86.R_ES, M.x86.R_DI, val); + M.x86.R_SI += inc; + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa5 +****************************************************************************/ +void x86emuOp_movs_word(u8 X86EMU_UNUSED(op1)) +{ + u32 val; + int inc; + u32 count; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("MOVS\tDWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -4; + else + inc = 4; + } else { + DECODE_PRINTF("MOVS\tWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -2; + else + inc = 2; + } + TRACE_AND_STEP(); + count = 1; + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* move them until CX is ZERO. */ + count = M.x86.R_CX; + M.x86.R_CX = 0; + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } + while (count--) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + val = fetch_data_long(M.x86.R_SI); + store_data_long_abs(M.x86.R_ES, M.x86.R_DI, val); + } else { + val = fetch_data_word(M.x86.R_SI); + store_data_word_abs(M.x86.R_ES, M.x86.R_DI, (u16)val); + } + M.x86.R_SI += inc; + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa6 +****************************************************************************/ +void x86emuOp_cmps_byte(u8 X86EMU_UNUSED(op1)) +{ + s8 val1, val2; + int inc; + + START_OF_INSTR(); + DECODE_PRINTF("CMPS\tBYTE\n"); + TRACE_AND_STEP(); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -1; + else + inc = 1; + + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* REPE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + val1 = fetch_data_byte(M.x86.R_SI); + val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); + cmp_byte(val1, val2); + M.x86.R_CX -= 1; + M.x86.R_SI += inc; + M.x86.R_DI += inc; + if ( (M.x86.mode & SYSMODE_PREFIX_REPE) && (ACCESS_FLAG(F_ZF) == 0) ) break; + if ( (M.x86.mode & SYSMODE_PREFIX_REPNE) && ACCESS_FLAG(F_ZF) ) break; + } + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } else { + val1 = fetch_data_byte(M.x86.R_SI); + val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); + cmp_byte(val1, val2); + M.x86.R_SI += inc; + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa7 +****************************************************************************/ +void x86emuOp_cmps_word(u8 X86EMU_UNUSED(op1)) +{ + u32 val1,val2; + int inc; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("CMPS\tDWORD\n"); + inc = 4; + } else { + DECODE_PRINTF("CMPS\tWORD\n"); + inc = 2; + } + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -inc; + + TRACE_AND_STEP(); + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* REPE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + val1 = fetch_data_long(M.x86.R_SI); + val2 = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); + cmp_long(val1, val2); + } else { + val1 = fetch_data_word(M.x86.R_SI); + val2 = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); + cmp_word((u16)val1, (u16)val2); + } + M.x86.R_CX -= 1; + M.x86.R_SI += inc; + M.x86.R_DI += inc; + if ( (M.x86.mode & SYSMODE_PREFIX_REPE) && ACCESS_FLAG(F_ZF) == 0 ) break; + if ( (M.x86.mode & SYSMODE_PREFIX_REPNE) && ACCESS_FLAG(F_ZF) ) break; + } + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } else { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + val1 = fetch_data_long(M.x86.R_SI); + val2 = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); + cmp_long(val1, val2); + } else { + val1 = fetch_data_word(M.x86.R_SI); + val2 = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); + cmp_word((u16)val1, (u16)val2); + } + M.x86.R_SI += inc; + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa8 +****************************************************************************/ +void x86emuOp_test_AL_IMM(u8 X86EMU_UNUSED(op1)) +{ + int imm; + + START_OF_INSTR(); + DECODE_PRINTF("TEST\tAL,"); + imm = fetch_byte_imm(); + DECODE_PRINTF2("%04x\n", imm); + TRACE_AND_STEP(); + test_byte(M.x86.R_AL, (u8)imm); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xa9 +****************************************************************************/ +void x86emuOp_test_AX_IMM(u8 X86EMU_UNUSED(op1)) +{ + u32 srcval; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("TEST\tEAX,"); + srcval = fetch_long_imm(); + } else { + DECODE_PRINTF("TEST\tAX,"); + srcval = fetch_word_imm(); + } + DECODE_PRINTF2("%x\n", srcval); + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + test_long(M.x86.R_EAX, srcval); + } else { + test_word(M.x86.R_AX, (u16)srcval); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xaa +****************************************************************************/ +void x86emuOp_stos_byte(u8 X86EMU_UNUSED(op1)) +{ + int inc; + + START_OF_INSTR(); + DECODE_PRINTF("STOS\tBYTE\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -1; + else + inc = 1; + TRACE_AND_STEP(); + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + store_data_byte_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_AL); + M.x86.R_CX -= 1; + M.x86.R_DI += inc; + } + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } else { + store_data_byte_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_AL); + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xab +****************************************************************************/ +void x86emuOp_stos_word(u8 X86EMU_UNUSED(op1)) +{ + int inc; + u32 count; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("STOS\tDWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -4; + else + inc = 4; + } else { + DECODE_PRINTF("STOS\tWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -2; + else + inc = 2; + } + TRACE_AND_STEP(); + count = 1; + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* move them until CX is ZERO. */ + count = M.x86.R_CX; + M.x86.R_CX = 0; + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } + while (count--) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + store_data_long_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_EAX); + } else { + store_data_word_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_AX); + } + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xac +****************************************************************************/ +void x86emuOp_lods_byte(u8 X86EMU_UNUSED(op1)) +{ + int inc; + + START_OF_INSTR(); + DECODE_PRINTF("LODS\tBYTE\n"); + TRACE_AND_STEP(); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -1; + else + inc = 1; + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + M.x86.R_AL = fetch_data_byte(M.x86.R_SI); + M.x86.R_CX -= 1; + M.x86.R_SI += inc; + } + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } else { + M.x86.R_AL = fetch_data_byte(M.x86.R_SI); + M.x86.R_SI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xad +****************************************************************************/ +void x86emuOp_lods_word(u8 X86EMU_UNUSED(op1)) +{ + int inc; + u32 count; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("LODS\tDWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -4; + else + inc = 4; + } else { + DECODE_PRINTF("LODS\tWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -2; + else + inc = 2; + } + TRACE_AND_STEP(); + count = 1; + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* move them until CX is ZERO. */ + count = M.x86.R_CX; + M.x86.R_CX = 0; + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } + while (count--) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_EAX = fetch_data_long(M.x86.R_SI); + } else { + M.x86.R_AX = fetch_data_word(M.x86.R_SI); + } + M.x86.R_SI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xae +****************************************************************************/ +void x86emuOp_scas_byte(u8 X86EMU_UNUSED(op1)) +{ + s8 val2; + int inc; + + START_OF_INSTR(); + DECODE_PRINTF("SCAS\tBYTE\n"); + TRACE_AND_STEP(); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -1; + else + inc = 1; + if (M.x86.mode & SYSMODE_PREFIX_REPE) { + /* REPE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); + cmp_byte(M.x86.R_AL, val2); + M.x86.R_CX -= 1; + M.x86.R_DI += inc; + if (ACCESS_FLAG(F_ZF) == 0) + break; + } + M.x86.mode &= ~SYSMODE_PREFIX_REPE; + } else if (M.x86.mode & SYSMODE_PREFIX_REPNE) { + /* REPNE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); + cmp_byte(M.x86.R_AL, val2); + M.x86.R_CX -= 1; + M.x86.R_DI += inc; + if (ACCESS_FLAG(F_ZF)) + break; /* zero flag set means equal */ + } + M.x86.mode &= ~SYSMODE_PREFIX_REPNE; + } else { + val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); + cmp_byte(M.x86.R_AL, val2); + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xaf +****************************************************************************/ +void x86emuOp_scas_word(u8 X86EMU_UNUSED(op1)) +{ + int inc; + u32 val; + + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("SCAS\tDWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -4; + else + inc = 4; + } else { + DECODE_PRINTF("SCAS\tWORD\n"); + if (ACCESS_FLAG(F_DF)) /* down */ + inc = -2; + else + inc = 2; + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_REPE) { + /* REPE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + val = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); + cmp_long(M.x86.R_EAX, val); + } else { + val = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); + cmp_word(M.x86.R_AX, (u16)val); + } + M.x86.R_CX -= 1; + M.x86.R_DI += inc; + if (ACCESS_FLAG(F_ZF) == 0) + break; + } + M.x86.mode &= ~SYSMODE_PREFIX_REPE; + } else if (M.x86.mode & SYSMODE_PREFIX_REPNE) { + /* REPNE */ + /* move them until CX is ZERO. */ + while (M.x86.R_CX != 0) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + val = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); + cmp_long(M.x86.R_EAX, val); + } else { + val = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); + cmp_word(M.x86.R_AX, (u16)val); + } + M.x86.R_CX -= 1; + M.x86.R_DI += inc; + if (ACCESS_FLAG(F_ZF)) + break; /* zero flag set means equal */ + } + M.x86.mode &= ~SYSMODE_PREFIX_REPNE; + } else { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + val = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); + cmp_long(M.x86.R_EAX, val); + } else { + val = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); + cmp_word(M.x86.R_AX, (u16)val); + } + M.x86.R_DI += inc; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xb0 - 0xb7 +****************************************************************************/ +void x86emuOp_mov_byte_register_IMM(u8 op1) +{ + u8 imm, *ptr; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + ptr = DECODE_RM_BYTE_REGISTER(op1 & 0x7); + DECODE_PRINTF(","); + imm = fetch_byte_imm(); + DECODE_PRINTF2("%x\n", imm); + TRACE_AND_STEP(); + *ptr = imm; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xb8 - 0xbf +****************************************************************************/ +void x86emuOp_mov_word_register_IMM(u8 X86EMU_UNUSED(op1)) +{ + u32 srcval; + + op1 &= 0x7; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *reg32; + reg32 = DECODE_RM_LONG_REGISTER(op1); + srcval = fetch_long_imm(); + DECODE_PRINTF2(",%x\n", srcval); + TRACE_AND_STEP(); + *reg32 = srcval; + } else { + u16 *reg16; + reg16 = DECODE_RM_WORD_REGISTER(op1); + srcval = fetch_word_imm(); + DECODE_PRINTF2(",%x\n", srcval); + TRACE_AND_STEP(); + *reg16 = (u16)srcval; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc0 +****************************************************************************/ +void x86emuOp_opcC0_byte_RM_MEM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg; + uint destoffset; + u8 destval; + u8 amt; + + /* + * Yet another weirdo special case instruction format. Part of + * the opcode held below in "RH". Doubly nested case would + * result, except that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + + switch (rh) { + case 0: + DECODE_PRINTF("ROL\t"); + break; + case 1: + DECODE_PRINTF("ROR\t"); + break; + case 2: + DECODE_PRINTF("RCL\t"); + break; + case 3: + DECODE_PRINTF("RCR\t"); + break; + case 4: + DECODE_PRINTF("SHL\t"); + break; + case 5: + DECODE_PRINTF("SHR\t"); + break; + case 6: + DECODE_PRINTF("SAL\t"); + break; + case 7: + DECODE_PRINTF("SAR\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + amt = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", amt); + destval = fetch_data_byte(destoffset); + TRACE_AND_STEP(); + destval = (*opcD0_byte_operation[rh]) (destval, amt); + store_data_byte(destoffset, destval); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + amt = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", amt); + TRACE_AND_STEP(); + destval = (*opcD0_byte_operation[rh]) (*destreg, amt); + *destreg = destval; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc1 +****************************************************************************/ +void x86emuOp_opcC1_word_RM_MEM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 amt; + + /* + * Yet another weirdo special case instruction format. Part of + * the opcode held below in "RH". Doubly nested case would + * result, except that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + + switch (rh) { + case 0: + DECODE_PRINTF("ROL\t"); + break; + case 1: + DECODE_PRINTF("ROR\t"); + break; + case 2: + DECODE_PRINTF("RCL\t"); + break; + case 3: + DECODE_PRINTF("RCR\t"); + break; + case 4: + DECODE_PRINTF("SHL\t"); + break; + case 5: + DECODE_PRINTF("SHR\t"); + break; + case 6: + DECODE_PRINTF("SAL\t"); + break; + case 7: + DECODE_PRINTF("SAR\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + + DECODE_PRINTF("DWORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + amt = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", amt); + destval = fetch_data_long(destoffset); + TRACE_AND_STEP(); + destval = (*opcD1_long_operation[rh]) (destval, amt); + store_data_long(destoffset, destval); + } else { + u16 destval; + + DECODE_PRINTF("WORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + amt = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", amt); + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + destval = (*opcD1_word_operation[rh]) (destval, amt); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + amt = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", amt); + TRACE_AND_STEP(); + *destreg = (*opcD1_long_operation[rh]) (*destreg, amt); + } else { + u16 *destreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + amt = fetch_byte_imm(); + DECODE_PRINTF2(",%x\n", amt); + TRACE_AND_STEP(); + *destreg = (*opcD1_word_operation[rh]) (*destreg, amt); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc2 +****************************************************************************/ +void x86emuOp_ret_near_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 imm; + + START_OF_INSTR(); + DECODE_PRINTF("RET\t"); + imm = fetch_word_imm(); + DECODE_PRINTF2("%x\n", imm); + RETURN_TRACE("RET",M.x86.saved_cs,M.x86.saved_ip); + TRACE_AND_STEP(); + M.x86.R_IP = pop_word(); + M.x86.R_SP += imm; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc3 +****************************************************************************/ +void x86emuOp_ret_near(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("RET\n"); + RETURN_TRACE("RET",M.x86.saved_cs,M.x86.saved_ip); + TRACE_AND_STEP(); + M.x86.R_IP = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc4 +****************************************************************************/ +void x86emuOp_les_R_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rh, rl; + u16 *dstreg; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("LES\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + dstreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *dstreg = fetch_data_word(srcoffset); + M.x86.R_ES = fetch_data_word(srcoffset + 2); + } + /* else UNDEFINED! register to register */ + + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc5 +****************************************************************************/ +void x86emuOp_lds_R_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rh, rl; + u16 *dstreg; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("LDS\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + dstreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *dstreg = fetch_data_word(srcoffset); + M.x86.R_DS = fetch_data_word(srcoffset + 2); + } + /* else UNDEFINED! */ + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc6 +****************************************************************************/ +void x86emuOp_mov_byte_RM_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg; + uint destoffset; + u8 imm; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (rh != 0) { + DECODE_PRINTF("ILLEGAL DECODE OF OPCODE c6\n"); + HALT_SYS(); + } + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%2x\n", imm); + TRACE_AND_STEP(); + store_data_byte(destoffset, imm); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + imm = fetch_byte_imm(); + DECODE_PRINTF2(",%2x\n", imm); + TRACE_AND_STEP(); + *destreg = imm; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc7 +****************************************************************************/ +void x86emuOp_mov_word_RM_IMM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("MOV\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (rh != 0) { + DECODE_PRINTF("ILLEGAL DECODE OF OPCODE 8F\n"); + HALT_SYS(); + } + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 imm; + + DECODE_PRINTF("DWORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + imm = fetch_long_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + store_data_long(destoffset, imm); + } else { + u16 imm; + + DECODE_PRINTF("WORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + imm = fetch_word_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + store_data_word(destoffset, imm); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 imm; + + destreg = DECODE_RM_LONG_REGISTER(rl); + imm = fetch_long_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + *destreg = imm; + } else { + u16 *destreg; + u16 imm; + + destreg = DECODE_RM_WORD_REGISTER(rl); + imm = fetch_word_imm(); + DECODE_PRINTF2(",%x\n", imm); + TRACE_AND_STEP(); + *destreg = imm; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc8 +****************************************************************************/ +void x86emuOp_enter(u8 X86EMU_UNUSED(op1)) +{ + u16 local,frame_pointer; + u8 nesting; + int i; + + START_OF_INSTR(); + local = fetch_word_imm(); + nesting = fetch_byte_imm(); + DECODE_PRINTF2("ENTER %x\n", local); + DECODE_PRINTF2(",%x\n", nesting); + TRACE_AND_STEP(); + push_word(M.x86.R_BP); + frame_pointer = M.x86.R_SP; + if (nesting > 0) { + for (i = 1; i < nesting; i++) { + M.x86.R_BP -= 2; + push_word(fetch_data_word_abs(M.x86.R_SS, M.x86.R_BP)); + } + push_word(frame_pointer); + } + M.x86.R_BP = frame_pointer; + M.x86.R_SP = (u16)(M.x86.R_SP - local); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xc9 +****************************************************************************/ +void x86emuOp_leave(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("LEAVE\n"); + TRACE_AND_STEP(); + M.x86.R_SP = M.x86.R_BP; + M.x86.R_BP = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xca +****************************************************************************/ +void x86emuOp_ret_far_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 imm; + + START_OF_INSTR(); + DECODE_PRINTF("RETF\t"); + imm = fetch_word_imm(); + DECODE_PRINTF2("%x\n", imm); + RETURN_TRACE("RETF",M.x86.saved_cs,M.x86.saved_ip); + TRACE_AND_STEP(); + M.x86.R_IP = pop_word(); + M.x86.R_CS = pop_word(); + M.x86.R_SP += imm; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xcb +****************************************************************************/ +void x86emuOp_ret_far(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("RETF\n"); + RETURN_TRACE("RETF",M.x86.saved_cs,M.x86.saved_ip); + TRACE_AND_STEP(); + M.x86.R_IP = pop_word(); + M.x86.R_CS = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xcc +****************************************************************************/ +void x86emuOp_int3(u8 X86EMU_UNUSED(op1)) +{ + u16 tmp; + + START_OF_INSTR(); + DECODE_PRINTF("INT 3\n"); + tmp = (u16) mem_access_word(3 * 4 + 2); + /* access the segment register */ + TRACE_AND_STEP(); + if (_X86EMU_intrTab[3]) { + (*_X86EMU_intrTab[3])(3); + } else { + push_word((u16)M.x86.R_FLG); + CLEAR_FLAG(F_IF); + CLEAR_FLAG(F_TF); + push_word(M.x86.R_CS); + M.x86.R_CS = mem_access_word(3 * 4 + 2); + push_word(M.x86.R_IP); + M.x86.R_IP = mem_access_word(3 * 4); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xcd +****************************************************************************/ +void x86emuOp_int_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 tmp; + u8 intnum; + + START_OF_INSTR(); + DECODE_PRINTF("INT\t"); + intnum = fetch_byte_imm(); + DECODE_PRINTF2("%x\n", intnum); + tmp = mem_access_word(intnum * 4 + 2); + TRACE_AND_STEP(); + if (_X86EMU_intrTab[intnum]) { + (*_X86EMU_intrTab[intnum])(intnum); + } else { + push_word((u16)M.x86.R_FLG); + CLEAR_FLAG(F_IF); + CLEAR_FLAG(F_TF); + push_word(M.x86.R_CS); + M.x86.R_CS = mem_access_word(intnum * 4 + 2); + push_word(M.x86.R_IP); + M.x86.R_IP = mem_access_word(intnum * 4); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xce +****************************************************************************/ +void x86emuOp_into(u8 X86EMU_UNUSED(op1)) +{ + u16 tmp; + + START_OF_INSTR(); + DECODE_PRINTF("INTO\n"); + TRACE_AND_STEP(); + if (ACCESS_FLAG(F_OF)) { + tmp = mem_access_word(4 * 4 + 2); + if (_X86EMU_intrTab[4]) { + (*_X86EMU_intrTab[4])(4); + } else { + push_word((u16)M.x86.R_FLG); + CLEAR_FLAG(F_IF); + CLEAR_FLAG(F_TF); + push_word(M.x86.R_CS); + M.x86.R_CS = mem_access_word(4 * 4 + 2); + push_word(M.x86.R_IP); + M.x86.R_IP = mem_access_word(4 * 4); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xcf +****************************************************************************/ +void x86emuOp_iret(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("IRET\n"); + + TRACE_AND_STEP(); + + M.x86.R_IP = pop_word(); + M.x86.R_CS = pop_word(); + M.x86.R_FLG = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xd0 +****************************************************************************/ +void x86emuOp_opcD0_byte_RM_1(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg; + uint destoffset; + u8 destval; + + /* + * Yet another weirdo special case instruction format. Part of + * the opcode held below in "RH". Doubly nested case would + * result, except that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + switch (rh) { + case 0: + DECODE_PRINTF("ROL\t"); + break; + case 1: + DECODE_PRINTF("ROR\t"); + break; + case 2: + DECODE_PRINTF("RCL\t"); + break; + case 3: + DECODE_PRINTF("RCR\t"); + break; + case 4: + DECODE_PRINTF("SHL\t"); + break; + case 5: + DECODE_PRINTF("SHR\t"); + break; + case 6: + DECODE_PRINTF("SAL\t"); + break; + case 7: + DECODE_PRINTF("SAR\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(",1\n"); + destval = fetch_data_byte(destoffset); + TRACE_AND_STEP(); + destval = (*opcD0_byte_operation[rh]) (destval, 1); + store_data_byte(destoffset, destval); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF(",1\n"); + TRACE_AND_STEP(); + destval = (*opcD0_byte_operation[rh]) (*destreg, 1); + *destreg = destval; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xd1 +****************************************************************************/ +void x86emuOp_opcD1_word_RM_1(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + /* + * Yet another weirdo special case instruction format. Part of + * the opcode held below in "RH". Doubly nested case would + * result, except that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + switch (rh) { + case 0: + DECODE_PRINTF("ROL\t"); + break; + case 1: + DECODE_PRINTF("ROR\t"); + break; + case 2: + DECODE_PRINTF("RCL\t"); + break; + case 3: + DECODE_PRINTF("RCR\t"); + break; + case 4: + DECODE_PRINTF("SHL\t"); + break; + case 5: + DECODE_PRINTF("SHR\t"); + break; + case 6: + DECODE_PRINTF("SAL\t"); + break; + case 7: + DECODE_PRINTF("SAR\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + + DECODE_PRINTF("DWORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(",1\n"); + destval = fetch_data_long(destoffset); + TRACE_AND_STEP(); + destval = (*opcD1_long_operation[rh]) (destval, 1); + store_data_long(destoffset, destval); + } else { + u16 destval; + + DECODE_PRINTF("WORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(",1\n"); + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + destval = (*opcD1_word_operation[rh]) (destval, 1); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + u32 *destreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(",1\n"); + TRACE_AND_STEP(); + destval = (*opcD1_long_operation[rh]) (*destreg, 1); + *destreg = destval; + } else { + u16 destval; + u16 *destreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(",1\n"); + TRACE_AND_STEP(); + destval = (*opcD1_word_operation[rh]) (*destreg, 1); + *destreg = destval; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xd2 +****************************************************************************/ +void x86emuOp_opcD2_byte_RM_CL(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg; + uint destoffset; + u8 destval; + u8 amt; + + /* + * Yet another weirdo special case instruction format. Part of + * the opcode held below in "RH". Doubly nested case would + * result, except that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + switch (rh) { + case 0: + DECODE_PRINTF("ROL\t"); + break; + case 1: + DECODE_PRINTF("ROR\t"); + break; + case 2: + DECODE_PRINTF("RCL\t"); + break; + case 3: + DECODE_PRINTF("RCR\t"); + break; + case 4: + DECODE_PRINTF("SHL\t"); + break; + case 5: + DECODE_PRINTF("SHR\t"); + break; + case 6: + DECODE_PRINTF("SAL\t"); + break; + case 7: + DECODE_PRINTF("SAR\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + amt = M.x86.R_CL; + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(",CL\n"); + destval = fetch_data_byte(destoffset); + TRACE_AND_STEP(); + destval = (*opcD0_byte_operation[rh]) (destval, amt); + store_data_byte(destoffset, destval); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + destval = (*opcD0_byte_operation[rh]) (*destreg, amt); + *destreg = destval; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xd3 +****************************************************************************/ +void x86emuOp_opcD3_word_RM_CL(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + u8 amt; + + /* + * Yet another weirdo special case instruction format. Part of + * the opcode held below in "RH". Doubly nested case would + * result, except that the decoded instruction + */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + switch (rh) { + case 0: + DECODE_PRINTF("ROL\t"); + break; + case 1: + DECODE_PRINTF("ROR\t"); + break; + case 2: + DECODE_PRINTF("RCL\t"); + break; + case 3: + DECODE_PRINTF("RCR\t"); + break; + case 4: + DECODE_PRINTF("SHL\t"); + break; + case 5: + DECODE_PRINTF("SHR\t"); + break; + case 6: + DECODE_PRINTF("SAL\t"); + break; + case 7: + DECODE_PRINTF("SAR\t"); + break; + } + } +#endif + /* know operation, decode the mod byte to find the addressing + mode. */ + amt = M.x86.R_CL; + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + + DECODE_PRINTF("DWORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(",CL\n"); + destval = fetch_data_long(destoffset); + TRACE_AND_STEP(); + destval = (*opcD1_long_operation[rh]) (destval, amt); + store_data_long(destoffset, destval); + } else { + u16 destval; + + DECODE_PRINTF("WORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(",CL\n"); + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + destval = (*opcD1_word_operation[rh]) (destval, amt); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + *destreg = (*opcD1_long_operation[rh]) (*destreg, amt); + } else { + u16 *destreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + *destreg = (*opcD1_word_operation[rh]) (*destreg, amt); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xd4 +****************************************************************************/ +void x86emuOp_aam(u8 X86EMU_UNUSED(op1)) +{ + u8 a; + + START_OF_INSTR(); + DECODE_PRINTF("AAM\n"); + a = fetch_byte_imm(); /* this is a stupid encoding. */ + if (a != 10) { + DECODE_PRINTF("ERROR DECODING AAM\n"); + TRACE_REGS(); + HALT_SYS(); + } + TRACE_AND_STEP(); + /* note the type change here --- returning AL and AH in AX. */ + M.x86.R_AX = aam_word(M.x86.R_AL); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xd5 +****************************************************************************/ +void x86emuOp_aad(u8 X86EMU_UNUSED(op1)) +{ + u8 a; + + START_OF_INSTR(); + DECODE_PRINTF("AAD\n"); + a = fetch_byte_imm(); + TRACE_AND_STEP(); + M.x86.R_AX = aad_word(M.x86.R_AX); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/* opcode 0xd6 ILLEGAL OPCODE */ + +/**************************************************************************** +REMARKS: +Handles opcode 0xd7 +****************************************************************************/ +void x86emuOp_xlat(u8 X86EMU_UNUSED(op1)) +{ + u16 addr; + + START_OF_INSTR(); + DECODE_PRINTF("XLAT\n"); + TRACE_AND_STEP(); + addr = (u16)(M.x86.R_BX + (u8)M.x86.R_AL); + M.x86.R_AL = fetch_data_byte(addr); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/* instuctions D8 .. DF are in i87_ops.c */ + +/**************************************************************************** +REMARKS: +Handles opcode 0xe0 +****************************************************************************/ +void x86emuOp_loopne(u8 X86EMU_UNUSED(op1)) +{ + s16 ip; + + START_OF_INSTR(); + DECODE_PRINTF("LOOPNE\t"); + ip = (s8) fetch_byte_imm(); + ip += (s16) M.x86.R_IP; + DECODE_PRINTF2("%04x\n", ip); + TRACE_AND_STEP(); + M.x86.R_CX -= 1; + if (M.x86.R_CX != 0 && !ACCESS_FLAG(F_ZF)) /* CX != 0 and !ZF */ + M.x86.R_IP = ip; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe1 +****************************************************************************/ +void x86emuOp_loope(u8 X86EMU_UNUSED(op1)) +{ + s16 ip; + + START_OF_INSTR(); + DECODE_PRINTF("LOOPE\t"); + ip = (s8) fetch_byte_imm(); + ip += (s16) M.x86.R_IP; + DECODE_PRINTF2("%04x\n", ip); + TRACE_AND_STEP(); + M.x86.R_CX -= 1; + if (M.x86.R_CX != 0 && ACCESS_FLAG(F_ZF)) /* CX != 0 and ZF */ + M.x86.R_IP = ip; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe2 +****************************************************************************/ +void x86emuOp_loop(u8 X86EMU_UNUSED(op1)) +{ + s16 ip; + + START_OF_INSTR(); + DECODE_PRINTF("LOOP\t"); + ip = (s8) fetch_byte_imm(); + ip += (s16) M.x86.R_IP; + DECODE_PRINTF2("%04x\n", ip); + TRACE_AND_STEP(); + M.x86.R_CX -= 1; + if (M.x86.R_CX != 0) + M.x86.R_IP = ip; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe3 +****************************************************************************/ +void x86emuOp_jcxz(u8 X86EMU_UNUSED(op1)) +{ + u16 target; + s8 offset; + + /* jump to byte offset if overflow flag is set */ + START_OF_INSTR(); + DECODE_PRINTF("JCXZ\t"); + offset = (s8)fetch_byte_imm(); + target = (u16)(M.x86.R_IP + offset); + DECODE_PRINTF2("%x\n", target); + TRACE_AND_STEP(); + if (M.x86.R_CX == 0) + M.x86.R_IP = target; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe4 +****************************************************************************/ +void x86emuOp_in_byte_AL_IMM(u8 X86EMU_UNUSED(op1)) +{ + u8 port; + + START_OF_INSTR(); + DECODE_PRINTF("IN\t"); + port = (u8) fetch_byte_imm(); + DECODE_PRINTF2("%x,AL\n", port); + TRACE_AND_STEP(); + M.x86.R_AL = (*sys_inb)(port); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe5 +****************************************************************************/ +void x86emuOp_in_word_AX_IMM(u8 X86EMU_UNUSED(op1)) +{ + u8 port; + + START_OF_INSTR(); + DECODE_PRINTF("IN\t"); + port = (u8) fetch_byte_imm(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF2("EAX,%x\n", port); + } else { + DECODE_PRINTF2("AX,%x\n", port); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_EAX = (*sys_inl)(port); + } else { + M.x86.R_AX = (*sys_inw)(port); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe6 +****************************************************************************/ +void x86emuOp_out_byte_IMM_AL(u8 X86EMU_UNUSED(op1)) +{ + u8 port; + + START_OF_INSTR(); + DECODE_PRINTF("OUT\t"); + port = (u8) fetch_byte_imm(); + DECODE_PRINTF2("%x,AL\n", port); + TRACE_AND_STEP(); + (*sys_outb)(port, M.x86.R_AL); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe7 +****************************************************************************/ +void x86emuOp_out_word_IMM_AX(u8 X86EMU_UNUSED(op1)) +{ + u8 port; + + START_OF_INSTR(); + DECODE_PRINTF("OUT\t"); + port = (u8) fetch_byte_imm(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF2("%x,EAX\n", port); + } else { + DECODE_PRINTF2("%x,AX\n", port); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + (*sys_outl)(port, M.x86.R_EAX); + } else { + (*sys_outw)(port, M.x86.R_AX); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe8 +****************************************************************************/ +void x86emuOp_call_near_IMM(u8 X86EMU_UNUSED(op1)) +{ + s16 ip; + + START_OF_INSTR(); + DECODE_PRINTF("CALL\t"); + ip = (s16) fetch_word_imm(); + ip += (s16) M.x86.R_IP; /* CHECK SIGN */ + DECODE_PRINTF2("%04x\n", ip); + CALL_TRACE(M.x86.saved_cs, M.x86.saved_ip, M.x86.R_CS, ip, ""); + TRACE_AND_STEP(); + push_word(M.x86.R_IP); + M.x86.R_IP = ip; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xe9 +****************************************************************************/ +void x86emuOp_jump_near_IMM(u8 X86EMU_UNUSED(op1)) +{ + int ip; + + START_OF_INSTR(); + DECODE_PRINTF("JMP\t"); + ip = (s16)fetch_word_imm(); + ip += (s16)M.x86.R_IP; + DECODE_PRINTF2("%04x\n", ip); + TRACE_AND_STEP(); + M.x86.R_IP = (u16)ip; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xea +****************************************************************************/ +void x86emuOp_jump_far_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 cs, ip; + + START_OF_INSTR(); + DECODE_PRINTF("JMP\tFAR "); + ip = fetch_word_imm(); + cs = fetch_word_imm(); + DECODE_PRINTF2("%04x:", cs); + DECODE_PRINTF2("%04x\n", ip); + TRACE_AND_STEP(); + M.x86.R_IP = ip; + M.x86.R_CS = cs; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xeb +****************************************************************************/ +void x86emuOp_jump_byte_IMM(u8 X86EMU_UNUSED(op1)) +{ + u16 target; + s8 offset; + + START_OF_INSTR(); + DECODE_PRINTF("JMP\t"); + offset = (s8)fetch_byte_imm(); + target = (u16)(M.x86.R_IP + offset); + DECODE_PRINTF2("%x\n", target); + TRACE_AND_STEP(); + M.x86.R_IP = target; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xec +****************************************************************************/ +void x86emuOp_in_byte_AL_DX(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("IN\tAL,DX\n"); + TRACE_AND_STEP(); + M.x86.R_AL = (*sys_inb)(M.x86.R_DX); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xed +****************************************************************************/ +void x86emuOp_in_word_AX_DX(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("IN\tEAX,DX\n"); + } else { + DECODE_PRINTF("IN\tAX,DX\n"); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_EAX = (*sys_inl)(M.x86.R_DX); + } else { + M.x86.R_AX = (*sys_inw)(M.x86.R_DX); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xee +****************************************************************************/ +void x86emuOp_out_byte_DX_AL(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("OUT\tDX,AL\n"); + TRACE_AND_STEP(); + (*sys_outb)(M.x86.R_DX, M.x86.R_AL); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xef +****************************************************************************/ +void x86emuOp_out_word_DX_AX(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("OUT\tDX,EAX\n"); + } else { + DECODE_PRINTF("OUT\tDX,AX\n"); + } + TRACE_AND_STEP(); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + (*sys_outl)(M.x86.R_DX, M.x86.R_EAX); + } else { + (*sys_outw)(M.x86.R_DX, M.x86.R_AX); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf0 +****************************************************************************/ +void x86emuOp_lock(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("LOCK:\n"); + TRACE_AND_STEP(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/*opcode 0xf1 ILLEGAL OPERATION */ + +/**************************************************************************** +REMARKS: +Handles opcode 0xf2 +****************************************************************************/ +void x86emuOp_repne(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("REPNE\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_PREFIX_REPNE; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf3 +****************************************************************************/ +void x86emuOp_repe(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("REPE\n"); + TRACE_AND_STEP(); + M.x86.mode |= SYSMODE_PREFIX_REPE; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf4 +****************************************************************************/ +void x86emuOp_halt(u8 X86EMU_UNUSED(op1)) +{ + START_OF_INSTR(); + DECODE_PRINTF("HALT\n"); + TRACE_AND_STEP(); + HALT_SYS(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf5 +****************************************************************************/ +void x86emuOp_cmc(u8 X86EMU_UNUSED(op1)) +{ + /* complement the carry flag. */ + START_OF_INSTR(); + DECODE_PRINTF("CMC\n"); + TRACE_AND_STEP(); + TOGGLE_FLAG(F_CF); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf6 +****************************************************************************/ +void x86emuOp_opcF6_byte_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + u8 *destreg; + uint destoffset; + u8 destval, srcval; + + /* long, drawn out code follows. Double switch for a total + of 32 cases. */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + DECODE_PRINTF(opF6_names[rh]); + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + destval = fetch_data_byte(destoffset); + + switch (rh) { + case 0: /* test byte imm */ + DECODE_PRINTF(","); + srcval = fetch_byte_imm(); + DECODE_PRINTF2("%02x\n", srcval); + TRACE_AND_STEP(); + test_byte(destval, srcval); + break; + case 1: + DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); + HALT_SYS(); + break; + case 2: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = not_byte(destval); + store_data_byte(destoffset, destval); + break; + case 3: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = neg_byte(destval); + store_data_byte(destoffset, destval); + break; + case 4: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + mul_byte(destval); + break; + case 5: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + imul_byte(destval); + break; + case 6: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + div_byte(destval); + break; + default: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + idiv_byte(destval); + break; + } + } else { /* mod=11 */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + switch (rh) { + case 0: /* test byte imm */ + DECODE_PRINTF(","); + srcval = fetch_byte_imm(); + DECODE_PRINTF2("%02x\n", srcval); + TRACE_AND_STEP(); + test_byte(*destreg, srcval); + break; + case 1: + DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); + HALT_SYS(); + break; + case 2: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = not_byte(*destreg); + break; + case 3: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = neg_byte(*destreg); + break; + case 4: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + mul_byte(*destreg); /*!!! */ + break; + case 5: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + imul_byte(*destreg); + break; + case 6: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + div_byte(*destreg); + break; + default: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + idiv_byte(*destreg); + break; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf7 +****************************************************************************/ +void x86emuOp_opcF7_word_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + DECODE_PRINTF(opF6_names[rh]); + if (mod < 3) { + + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval, srcval; + + DECODE_PRINTF("DWORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + destval = fetch_data_long(destoffset); + + switch (rh) { + case 0: + DECODE_PRINTF(","); + srcval = fetch_long_imm(); + DECODE_PRINTF2("%x\n", srcval); + TRACE_AND_STEP(); + test_long(destval, srcval); + break; + case 1: + DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F7\n"); + HALT_SYS(); + break; + case 2: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = not_long(destval); + store_data_long(destoffset, destval); + break; + case 3: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = neg_long(destval); + store_data_long(destoffset, destval); + break; + case 4: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + mul_long(destval); + break; + case 5: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + imul_long(destval); + break; + case 6: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + div_long(destval); + break; + case 7: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + idiv_long(destval); + break; + } + } else { + u16 destval, srcval; + + DECODE_PRINTF("WORD PTR "); + destoffset = decode_rmXX_address(mod, rl); + destval = fetch_data_word(destoffset); + + switch (rh) { + case 0: /* test word imm */ + DECODE_PRINTF(","); + srcval = fetch_word_imm(); + DECODE_PRINTF2("%x\n", srcval); + TRACE_AND_STEP(); + test_word(destval, srcval); + break; + case 1: + DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F7\n"); + HALT_SYS(); + break; + case 2: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = not_word(destval); + store_data_word(destoffset, destval); + break; + case 3: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + destval = neg_word(destval); + store_data_word(destoffset, destval); + break; + case 4: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + mul_word(destval); + break; + case 5: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + imul_word(destval); + break; + case 6: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + div_word(destval); + break; + case 7: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + idiv_word(destval); + break; + } + } + + } else { /* mod=11 */ + + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 srcval; + + destreg = DECODE_RM_LONG_REGISTER(rl); + + switch (rh) { + case 0: /* test word imm */ + DECODE_PRINTF(","); + srcval = fetch_long_imm(); + DECODE_PRINTF2("%x\n", srcval); + TRACE_AND_STEP(); + test_long(*destreg, srcval); + break; + case 1: + DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); + HALT_SYS(); + break; + case 2: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = not_long(*destreg); + break; + case 3: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = neg_long(*destreg); + break; + case 4: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + mul_long(*destreg); /*!!! */ + break; + case 5: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + imul_long(*destreg); + break; + case 6: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + div_long(*destreg); + break; + case 7: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + idiv_long(*destreg); + break; + } + } else { + u16 *destreg; + u16 srcval; + + destreg = DECODE_RM_WORD_REGISTER(rl); + + switch (rh) { + case 0: /* test word imm */ + DECODE_PRINTF(","); + srcval = fetch_word_imm(); + DECODE_PRINTF2("%x\n", srcval); + TRACE_AND_STEP(); + test_word(*destreg, srcval); + break; + case 1: + DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); + HALT_SYS(); + break; + case 2: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = not_word(*destreg); + break; + case 3: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = neg_word(*destreg); + break; + case 4: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + mul_word(*destreg); /*!!! */ + break; + case 5: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + imul_word(*destreg); + break; + case 6: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + div_word(*destreg); + break; + case 7: + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + idiv_word(*destreg); + break; + } + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf8 +****************************************************************************/ +void x86emuOp_clc(u8 X86EMU_UNUSED(op1)) +{ + /* clear the carry flag. */ + START_OF_INSTR(); + DECODE_PRINTF("CLC\n"); + TRACE_AND_STEP(); + CLEAR_FLAG(F_CF); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xf9 +****************************************************************************/ +void x86emuOp_stc(u8 X86EMU_UNUSED(op1)) +{ + /* set the carry flag. */ + START_OF_INSTR(); + DECODE_PRINTF("STC\n"); + TRACE_AND_STEP(); + SET_FLAG(F_CF); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xfa +****************************************************************************/ +void x86emuOp_cli(u8 X86EMU_UNUSED(op1)) +{ + /* clear interrupts. */ + START_OF_INSTR(); + DECODE_PRINTF("CLI\n"); + TRACE_AND_STEP(); + CLEAR_FLAG(F_IF); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xfb +****************************************************************************/ +void x86emuOp_sti(u8 X86EMU_UNUSED(op1)) +{ + /* enable interrupts. */ + START_OF_INSTR(); + DECODE_PRINTF("STI\n"); + TRACE_AND_STEP(); + SET_FLAG(F_IF); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xfc +****************************************************************************/ +void x86emuOp_cld(u8 X86EMU_UNUSED(op1)) +{ + /* clear interrupts. */ + START_OF_INSTR(); + DECODE_PRINTF("CLD\n"); + TRACE_AND_STEP(); + CLEAR_FLAG(F_DF); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xfd +****************************************************************************/ +void x86emuOp_std(u8 X86EMU_UNUSED(op1)) +{ + /* clear interrupts. */ + START_OF_INSTR(); + DECODE_PRINTF("STD\n"); + TRACE_AND_STEP(); + SET_FLAG(F_DF); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xfe +****************************************************************************/ +void x86emuOp_opcFE_byte_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rh, rl; + u8 destval; + uint destoffset; + u8 *destreg; + + /* Yet another special case instruction. */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + + switch (rh) { + case 0: + DECODE_PRINTF("INC\t"); + break; + case 1: + DECODE_PRINTF("DEC\t"); + break; + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + DECODE_PRINTF2("ILLEGAL OP MAJOR OP 0xFE MINOR OP %x \n", mod); + HALT_SYS(); + break; + } + } +#endif + if (mod < 3) { + DECODE_PRINTF("BYTE PTR "); + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + destval = fetch_data_byte(destoffset); + TRACE_AND_STEP(); + if (rh == 0) + destval = inc_byte(destval); + else + destval = dec_byte(destval); + store_data_byte(destoffset, destval); + } else { + destreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + if (rh == 0) + *destreg = inc_byte(*destreg); + else + *destreg = dec_byte(*destreg); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0xff +****************************************************************************/ +void x86emuOp_opcFF_word_RM(u8 X86EMU_UNUSED(op1)) +{ + int mod, rh, rl; + uint destoffset = 0; + u16 *destreg; + u16 destval,destval2; + + /* Yet another special case instruction. */ + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); +#ifdef DEBUG + if (DEBUG_DECODE()) { + /* XXX DECODE_PRINTF may be changed to something more + general, so that it is important to leave the strings + in the same format, even though the result is that the + above test is done twice. */ + + switch (rh) { + case 0: + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("INC\tDWORD PTR "); + } else { + DECODE_PRINTF("INC\tWORD PTR "); + } + break; + case 1: + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + DECODE_PRINTF("DEC\tDWORD PTR "); + } else { + DECODE_PRINTF("DEC\tWORD PTR "); + } + break; + case 2: + DECODE_PRINTF("CALL\t "); + break; + case 3: + DECODE_PRINTF("CALL\tFAR "); + break; + case 4: + DECODE_PRINTF("JMP\t"); + break; + case 5: + DECODE_PRINTF("JMP\tFAR "); + break; + case 6: + DECODE_PRINTF("PUSH\t"); + break; + case 7: + DECODE_PRINTF("ILLEGAL DECODING OF OPCODE FF\t"); + HALT_SYS(); + break; + } + } +#endif + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + switch (rh) { + case 0: /* inc word ptr ... */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + + destval = fetch_data_long(destoffset); + TRACE_AND_STEP(); + destval = inc_long(destval); + store_data_long(destoffset, destval); + } else { + u16 destval; + + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + destval = inc_word(destval); + store_data_word(destoffset, destval); + } + break; + case 1: /* dec word ptr ... */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + + destval = fetch_data_long(destoffset); + TRACE_AND_STEP(); + destval = dec_long(destval); + store_data_long(destoffset, destval); + } else { + u16 destval; + + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + destval = dec_word(destval); + store_data_word(destoffset, destval); + } + break; + case 2: /* call word ptr ... */ + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + push_word(M.x86.R_IP); + M.x86.R_IP = destval; + break; + case 3: /* call far ptr ... */ + destval = fetch_data_word(destoffset); + destval2 = fetch_data_word(destoffset + 2); + TRACE_AND_STEP(); + push_word(M.x86.R_CS); + M.x86.R_CS = destval2; + push_word(M.x86.R_IP); + M.x86.R_IP = destval; + break; + case 4: /* jmp word ptr ... */ + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + M.x86.R_IP = destval; + break; + case 5: /* jmp far ptr ... */ + destval = fetch_data_word(destoffset); + destval2 = fetch_data_word(destoffset + 2); + TRACE_AND_STEP(); + M.x86.R_IP = destval; + M.x86.R_CS = destval2; + break; + case 6: /* push word ptr ... */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + + destval = fetch_data_long(destoffset); + TRACE_AND_STEP(); + push_long(destval); + } else { + u16 destval; + + destval = fetch_data_word(destoffset); + TRACE_AND_STEP(); + push_word(destval); + } + break; + } + } else { + switch (rh) { + case 0: + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = inc_long(*destreg); + } else { + u16 *destreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = inc_word(*destreg); + } + break; + case 1: + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = dec_long(*destreg); + } else { + u16 *destreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = dec_word(*destreg); + } + break; + case 2: /* call word ptr ... */ + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + push_word(M.x86.R_IP); + M.x86.R_IP = *destreg; + break; + case 3: /* jmp far ptr ... */ + DECODE_PRINTF("OPERATION UNDEFINED 0XFF \n"); + TRACE_AND_STEP(); + HALT_SYS(); + break; + + case 4: /* jmp ... */ + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + M.x86.R_IP = (u16) (*destreg); + break; + case 5: /* jmp far ptr ... */ + DECODE_PRINTF("OPERATION UNDEFINED 0XFF \n"); + TRACE_AND_STEP(); + HALT_SYS(); + break; + case 6: + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + push_long(*destreg); + } else { + u16 *destreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + push_word(*destreg); + } + break; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/*************************************************************************** + * Single byte operation code table: + **************************************************************************/ +void (*x86emu_optab[256])(u8) = +{ +/* 0x00 */ x86emuOp_genop_byte_RM_R, +/* 0x01 */ x86emuOp_genop_word_RM_R, +/* 0x02 */ x86emuOp_genop_byte_R_RM, +/* 0x03 */ x86emuOp_genop_word_R_RM, +/* 0x04 */ x86emuOp_genop_byte_AL_IMM, +/* 0x05 */ x86emuOp_genop_word_AX_IMM, +/* 0x06 */ x86emuOp_push_ES, +/* 0x07 */ x86emuOp_pop_ES, + +/* 0x08 */ x86emuOp_genop_byte_RM_R, +/* 0x09 */ x86emuOp_genop_word_RM_R, +/* 0x0a */ x86emuOp_genop_byte_R_RM, +/* 0x0b */ x86emuOp_genop_word_R_RM, +/* 0x0c */ x86emuOp_genop_byte_AL_IMM, +/* 0x0d */ x86emuOp_genop_word_AX_IMM, +/* 0x0e */ x86emuOp_push_CS, +/* 0x0f */ x86emuOp_two_byte, + +/* 0x10 */ x86emuOp_genop_byte_RM_R, +/* 0x11 */ x86emuOp_genop_word_RM_R, +/* 0x12 */ x86emuOp_genop_byte_R_RM, +/* 0x13 */ x86emuOp_genop_word_R_RM, +/* 0x14 */ x86emuOp_genop_byte_AL_IMM, +/* 0x15 */ x86emuOp_genop_word_AX_IMM, +/* 0x16 */ x86emuOp_push_SS, +/* 0x17 */ x86emuOp_pop_SS, + +/* 0x18 */ x86emuOp_genop_byte_RM_R, +/* 0x19 */ x86emuOp_genop_word_RM_R, +/* 0x1a */ x86emuOp_genop_byte_R_RM, +/* 0x1b */ x86emuOp_genop_word_R_RM, +/* 0x1c */ x86emuOp_genop_byte_AL_IMM, +/* 0x1d */ x86emuOp_genop_word_AX_IMM, +/* 0x1e */ x86emuOp_push_DS, +/* 0x1f */ x86emuOp_pop_DS, + +/* 0x20 */ x86emuOp_genop_byte_RM_R, +/* 0x21 */ x86emuOp_genop_word_RM_R, +/* 0x22 */ x86emuOp_genop_byte_R_RM, +/* 0x23 */ x86emuOp_genop_word_R_RM, +/* 0x24 */ x86emuOp_genop_byte_AL_IMM, +/* 0x25 */ x86emuOp_genop_word_AX_IMM, +/* 0x26 */ x86emuOp_segovr_ES, +/* 0x27 */ x86emuOp_daa, + +/* 0x28 */ x86emuOp_genop_byte_RM_R, +/* 0x29 */ x86emuOp_genop_word_RM_R, +/* 0x2a */ x86emuOp_genop_byte_R_RM, +/* 0x2b */ x86emuOp_genop_word_R_RM, +/* 0x2c */ x86emuOp_genop_byte_AL_IMM, +/* 0x2d */ x86emuOp_genop_word_AX_IMM, +/* 0x2e */ x86emuOp_segovr_CS, +/* 0x2f */ x86emuOp_das, + +/* 0x30 */ x86emuOp_genop_byte_RM_R, +/* 0x31 */ x86emuOp_genop_word_RM_R, +/* 0x32 */ x86emuOp_genop_byte_R_RM, +/* 0x33 */ x86emuOp_genop_word_R_RM, +/* 0x34 */ x86emuOp_genop_byte_AL_IMM, +/* 0x35 */ x86emuOp_genop_word_AX_IMM, +/* 0x36 */ x86emuOp_segovr_SS, +/* 0x37 */ x86emuOp_aaa, + +/* 0x38 */ x86emuOp_genop_byte_RM_R, +/* 0x39 */ x86emuOp_genop_word_RM_R, +/* 0x3a */ x86emuOp_genop_byte_R_RM, +/* 0x3b */ x86emuOp_genop_word_R_RM, +/* 0x3c */ x86emuOp_genop_byte_AL_IMM, +/* 0x3d */ x86emuOp_genop_word_AX_IMM, +/* 0x3e */ x86emuOp_segovr_DS, +/* 0x3f */ x86emuOp_aas, + +/* 0x40 */ x86emuOp_inc_register, +/* 0x41 */ x86emuOp_inc_register, +/* 0x42 */ x86emuOp_inc_register, +/* 0x43 */ x86emuOp_inc_register, +/* 0x44 */ x86emuOp_inc_register, +/* 0x45 */ x86emuOp_inc_register, +/* 0x46 */ x86emuOp_inc_register, +/* 0x47 */ x86emuOp_inc_register, + +/* 0x48 */ x86emuOp_dec_register, +/* 0x49 */ x86emuOp_dec_register, +/* 0x4a */ x86emuOp_dec_register, +/* 0x4b */ x86emuOp_dec_register, +/* 0x4c */ x86emuOp_dec_register, +/* 0x4d */ x86emuOp_dec_register, +/* 0x4e */ x86emuOp_dec_register, +/* 0x4f */ x86emuOp_dec_register, + +/* 0x50 */ x86emuOp_push_register, +/* 0x51 */ x86emuOp_push_register, +/* 0x52 */ x86emuOp_push_register, +/* 0x53 */ x86emuOp_push_register, +/* 0x54 */ x86emuOp_push_register, +/* 0x55 */ x86emuOp_push_register, +/* 0x56 */ x86emuOp_push_register, +/* 0x57 */ x86emuOp_push_register, + +/* 0x58 */ x86emuOp_pop_register, +/* 0x59 */ x86emuOp_pop_register, +/* 0x5a */ x86emuOp_pop_register, +/* 0x5b */ x86emuOp_pop_register, +/* 0x5c */ x86emuOp_pop_register, +/* 0x5d */ x86emuOp_pop_register, +/* 0x5e */ x86emuOp_pop_register, +/* 0x5f */ x86emuOp_pop_register, + +/* 0x60 */ x86emuOp_push_all, +/* 0x61 */ x86emuOp_pop_all, +/* 0x62 */ x86emuOp_illegal_op, /* bound */ +/* 0x63 */ x86emuOp_illegal_op, /* arpl */ +/* 0x64 */ x86emuOp_segovr_FS, +/* 0x65 */ x86emuOp_segovr_GS, +/* 0x66 */ x86emuOp_prefix_data, +/* 0x67 */ x86emuOp_prefix_addr, + +/* 0x68 */ x86emuOp_push_word_IMM, +/* 0x69 */ x86emuOp_imul_word_IMM, +/* 0x6a */ x86emuOp_push_byte_IMM, +/* 0x6b */ x86emuOp_imul_byte_IMM, +/* 0x6c */ x86emuOp_ins_byte, +/* 0x6d */ x86emuOp_ins_word, +/* 0x6e */ x86emuOp_outs_byte, +/* 0x6f */ x86emuOp_outs_word, + +/* 0x70 */ x86emuOp_jump_near_cond, +/* 0x71 */ x86emuOp_jump_near_cond, +/* 0x72 */ x86emuOp_jump_near_cond, +/* 0x73 */ x86emuOp_jump_near_cond, +/* 0x74 */ x86emuOp_jump_near_cond, +/* 0x75 */ x86emuOp_jump_near_cond, +/* 0x76 */ x86emuOp_jump_near_cond, +/* 0x77 */ x86emuOp_jump_near_cond, + +/* 0x78 */ x86emuOp_jump_near_cond, +/* 0x79 */ x86emuOp_jump_near_cond, +/* 0x7a */ x86emuOp_jump_near_cond, +/* 0x7b */ x86emuOp_jump_near_cond, +/* 0x7c */ x86emuOp_jump_near_cond, +/* 0x7d */ x86emuOp_jump_near_cond, +/* 0x7e */ x86emuOp_jump_near_cond, +/* 0x7f */ x86emuOp_jump_near_cond, + +/* 0x80 */ x86emuOp_opc80_byte_RM_IMM, +/* 0x81 */ x86emuOp_opc81_word_RM_IMM, +/* 0x82 */ x86emuOp_opc82_byte_RM_IMM, +/* 0x83 */ x86emuOp_opc83_word_RM_IMM, +/* 0x84 */ x86emuOp_test_byte_RM_R, +/* 0x85 */ x86emuOp_test_word_RM_R, +/* 0x86 */ x86emuOp_xchg_byte_RM_R, +/* 0x87 */ x86emuOp_xchg_word_RM_R, + +/* 0x88 */ x86emuOp_mov_byte_RM_R, +/* 0x89 */ x86emuOp_mov_word_RM_R, +/* 0x8a */ x86emuOp_mov_byte_R_RM, +/* 0x8b */ x86emuOp_mov_word_R_RM, +/* 0x8c */ x86emuOp_mov_word_RM_SR, +/* 0x8d */ x86emuOp_lea_word_R_M, +/* 0x8e */ x86emuOp_mov_word_SR_RM, +/* 0x8f */ x86emuOp_pop_RM, + +/* 0x90 */ x86emuOp_nop, +/* 0x91 */ x86emuOp_xchg_word_AX_register, +/* 0x92 */ x86emuOp_xchg_word_AX_register, +/* 0x93 */ x86emuOp_xchg_word_AX_register, +/* 0x94 */ x86emuOp_xchg_word_AX_register, +/* 0x95 */ x86emuOp_xchg_word_AX_register, +/* 0x96 */ x86emuOp_xchg_word_AX_register, +/* 0x97 */ x86emuOp_xchg_word_AX_register, + +/* 0x98 */ x86emuOp_cbw, +/* 0x99 */ x86emuOp_cwd, +/* 0x9a */ x86emuOp_call_far_IMM, +/* 0x9b */ x86emuOp_wait, +/* 0x9c */ x86emuOp_pushf_word, +/* 0x9d */ x86emuOp_popf_word, +/* 0x9e */ x86emuOp_sahf, +/* 0x9f */ x86emuOp_lahf, + +/* 0xa0 */ x86emuOp_mov_AL_M_IMM, +/* 0xa1 */ x86emuOp_mov_AX_M_IMM, +/* 0xa2 */ x86emuOp_mov_M_AL_IMM, +/* 0xa3 */ x86emuOp_mov_M_AX_IMM, +/* 0xa4 */ x86emuOp_movs_byte, +/* 0xa5 */ x86emuOp_movs_word, +/* 0xa6 */ x86emuOp_cmps_byte, +/* 0xa7 */ x86emuOp_cmps_word, +/* 0xa8 */ x86emuOp_test_AL_IMM, +/* 0xa9 */ x86emuOp_test_AX_IMM, +/* 0xaa */ x86emuOp_stos_byte, +/* 0xab */ x86emuOp_stos_word, +/* 0xac */ x86emuOp_lods_byte, +/* 0xad */ x86emuOp_lods_word, +/* 0xac */ x86emuOp_scas_byte, +/* 0xad */ x86emuOp_scas_word, + +/* 0xb0 */ x86emuOp_mov_byte_register_IMM, +/* 0xb1 */ x86emuOp_mov_byte_register_IMM, +/* 0xb2 */ x86emuOp_mov_byte_register_IMM, +/* 0xb3 */ x86emuOp_mov_byte_register_IMM, +/* 0xb4 */ x86emuOp_mov_byte_register_IMM, +/* 0xb5 */ x86emuOp_mov_byte_register_IMM, +/* 0xb6 */ x86emuOp_mov_byte_register_IMM, +/* 0xb7 */ x86emuOp_mov_byte_register_IMM, + +/* 0xb8 */ x86emuOp_mov_word_register_IMM, +/* 0xb9 */ x86emuOp_mov_word_register_IMM, +/* 0xba */ x86emuOp_mov_word_register_IMM, +/* 0xbb */ x86emuOp_mov_word_register_IMM, +/* 0xbc */ x86emuOp_mov_word_register_IMM, +/* 0xbd */ x86emuOp_mov_word_register_IMM, +/* 0xbe */ x86emuOp_mov_word_register_IMM, +/* 0xbf */ x86emuOp_mov_word_register_IMM, + +/* 0xc0 */ x86emuOp_opcC0_byte_RM_MEM, +/* 0xc1 */ x86emuOp_opcC1_word_RM_MEM, +/* 0xc2 */ x86emuOp_ret_near_IMM, +/* 0xc3 */ x86emuOp_ret_near, +/* 0xc4 */ x86emuOp_les_R_IMM, +/* 0xc5 */ x86emuOp_lds_R_IMM, +/* 0xc6 */ x86emuOp_mov_byte_RM_IMM, +/* 0xc7 */ x86emuOp_mov_word_RM_IMM, +/* 0xc8 */ x86emuOp_enter, +/* 0xc9 */ x86emuOp_leave, +/* 0xca */ x86emuOp_ret_far_IMM, +/* 0xcb */ x86emuOp_ret_far, +/* 0xcc */ x86emuOp_int3, +/* 0xcd */ x86emuOp_int_IMM, +/* 0xce */ x86emuOp_into, +/* 0xcf */ x86emuOp_iret, + +/* 0xd0 */ x86emuOp_opcD0_byte_RM_1, +/* 0xd1 */ x86emuOp_opcD1_word_RM_1, +/* 0xd2 */ x86emuOp_opcD2_byte_RM_CL, +/* 0xd3 */ x86emuOp_opcD3_word_RM_CL, +/* 0xd4 */ x86emuOp_aam, +/* 0xd5 */ x86emuOp_aad, +/* 0xd6 */ x86emuOp_illegal_op, /* Undocumented SETALC instruction */ +/* 0xd7 */ x86emuOp_xlat, +/* 0xd8 */ x86emuOp_esc_coprocess_d8, +/* 0xd9 */ x86emuOp_esc_coprocess_d9, +/* 0xda */ x86emuOp_esc_coprocess_da, +/* 0xdb */ x86emuOp_esc_coprocess_db, +/* 0xdc */ x86emuOp_esc_coprocess_dc, +/* 0xdd */ x86emuOp_esc_coprocess_dd, +/* 0xde */ x86emuOp_esc_coprocess_de, +/* 0xdf */ x86emuOp_esc_coprocess_df, + +/* 0xe0 */ x86emuOp_loopne, +/* 0xe1 */ x86emuOp_loope, +/* 0xe2 */ x86emuOp_loop, +/* 0xe3 */ x86emuOp_jcxz, +/* 0xe4 */ x86emuOp_in_byte_AL_IMM, +/* 0xe5 */ x86emuOp_in_word_AX_IMM, +/* 0xe6 */ x86emuOp_out_byte_IMM_AL, +/* 0xe7 */ x86emuOp_out_word_IMM_AX, + +/* 0xe8 */ x86emuOp_call_near_IMM, +/* 0xe9 */ x86emuOp_jump_near_IMM, +/* 0xea */ x86emuOp_jump_far_IMM, +/* 0xeb */ x86emuOp_jump_byte_IMM, +/* 0xec */ x86emuOp_in_byte_AL_DX, +/* 0xed */ x86emuOp_in_word_AX_DX, +/* 0xee */ x86emuOp_out_byte_DX_AL, +/* 0xef */ x86emuOp_out_word_DX_AX, + +/* 0xf0 */ x86emuOp_lock, +/* 0xf1 */ x86emuOp_illegal_op, +/* 0xf2 */ x86emuOp_repne, +/* 0xf3 */ x86emuOp_repe, +/* 0xf4 */ x86emuOp_halt, +/* 0xf5 */ x86emuOp_cmc, +/* 0xf6 */ x86emuOp_opcF6_byte_RM, +/* 0xf7 */ x86emuOp_opcF7_word_RM, + +/* 0xf8 */ x86emuOp_clc, +/* 0xf9 */ x86emuOp_stc, +/* 0xfa */ x86emuOp_cli, +/* 0xfb */ x86emuOp_sti, +/* 0xfc */ x86emuOp_cld, +/* 0xfd */ x86emuOp_std, +/* 0xfe */ x86emuOp_opcFE_byte_RM, +/* 0xff */ x86emuOp_opcFF_word_RM, +}; diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/ops2.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/ops2.c new file mode 100644 index 000000000000..1747fd960a47 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/ops2.c @@ -0,0 +1,1764 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: This file includes subroutines to implement the decoding +* and emulation of all the x86 extended two-byte processor +* instructions. +* +****************************************************************************/ + +#include "x86emu/x86emui.h" + +/*----------------------------- Implementation ----------------------------*/ + +/**************************************************************************** +PARAMETERS: +op1 - Instruction op code + +REMARKS: +Handles illegal opcodes. +****************************************************************************/ +void x86emuOp2_illegal_op( + u8 op2) +{ + START_OF_INSTR(); + DECODE_PRINTF("ILLEGAL EXTENDED X86 OPCODE\n"); + TRACE_REGS(); + rom_printk("%04x:%04x: %02X ILLEGAL EXTENDED X86 OPCODE!\n", + M.x86.R_CS, M.x86.R_IP-2,op2); + HALT_SYS(); + END_OF_INSTR(); +} + +#define xorl(a,b) ((a) && !(b)) || (!(a) && (b)) + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0x80-0x8F +****************************************************************************/ +int x86emu_check_jump_condition(u8 op) +{ + switch (op) { + case 0x0: + DECODE_PRINTF("JO\t"); + return ACCESS_FLAG(F_OF); + case 0x1: + DECODE_PRINTF("JNO\t"); + return !ACCESS_FLAG(F_OF); + break; + case 0x2: + DECODE_PRINTF("JB\t"); + return ACCESS_FLAG(F_CF); + break; + case 0x3: + DECODE_PRINTF("JNB\t"); + return !ACCESS_FLAG(F_CF); + break; + case 0x4: + DECODE_PRINTF("JZ\t"); + return ACCESS_FLAG(F_ZF); + break; + case 0x5: + DECODE_PRINTF("JNZ\t"); + return !ACCESS_FLAG(F_ZF); + break; + case 0x6: + DECODE_PRINTF("JBE\t"); + return ACCESS_FLAG(F_CF) || ACCESS_FLAG(F_ZF); + break; + case 0x7: + DECODE_PRINTF("JNBE\t"); + return !(ACCESS_FLAG(F_CF) || ACCESS_FLAG(F_ZF)); + break; + case 0x8: + DECODE_PRINTF("JS\t"); + return ACCESS_FLAG(F_SF); + break; + case 0x9: + DECODE_PRINTF("JNS\t"); + return !ACCESS_FLAG(F_SF); + break; + case 0xa: + DECODE_PRINTF("JP\t"); + return ACCESS_FLAG(F_PF); + break; + case 0xb: + DECODE_PRINTF("JNP\t"); + return !ACCESS_FLAG(F_PF); + break; + case 0xc: + DECODE_PRINTF("JL\t"); + return xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)); + break; + case 0xd: + DECODE_PRINTF("JNL\t"); + return !xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)); + break; + case 0xe: + DECODE_PRINTF("JLE\t"); + return xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)) || + ACCESS_FLAG(F_ZF); + break; + default: + DECODE_PRINTF("JNLE\t"); + return !(xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)) || + ACCESS_FLAG(F_ZF)); + } +} + +void x86emuOp2_long_jump(u8 op2) +{ + s32 target; + int cond; + + /* conditional jump to word offset. */ + START_OF_INSTR(); + cond = x86emu_check_jump_condition(op2 & 0xF); + target = (s16) fetch_word_imm(); + target += (s16) M.x86.R_IP; + DECODE_PRINTF2("%04x\n", target); + TRACE_AND_STEP(); + if (cond) + M.x86.R_IP = (u16)target; + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0x90-0x9F +****************************************************************************/ +void x86emuOp2_set_byte(u8 op2) +{ + int mod, rl, rh; + uint destoffset; + u8 *destreg; + char *name = 0; + int cond = 0; + + START_OF_INSTR(); + switch (op2) { + case 0x90: + name = "SETO\t"; + cond = ACCESS_FLAG(F_OF); + break; + case 0x91: + name = "SETNO\t"; + cond = !ACCESS_FLAG(F_OF); + break; + case 0x92: + name = "SETB\t"; + cond = ACCESS_FLAG(F_CF); + break; + case 0x93: + name = "SETNB\t"; + cond = !ACCESS_FLAG(F_CF); + break; + case 0x94: + name = "SETZ\t"; + cond = ACCESS_FLAG(F_ZF); + break; + case 0x95: + name = "SETNZ\t"; + cond = !ACCESS_FLAG(F_ZF); + break; + case 0x96: + name = "SETBE\t"; + cond = ACCESS_FLAG(F_CF) || ACCESS_FLAG(F_ZF); + break; + case 0x97: + name = "SETNBE\t"; + cond = !(ACCESS_FLAG(F_CF) || ACCESS_FLAG(F_ZF)); + break; + case 0x98: + name = "SETS\t"; + cond = ACCESS_FLAG(F_SF); + break; + case 0x99: + name = "SETNS\t"; + cond = !ACCESS_FLAG(F_SF); + break; + case 0x9a: + name = "SETP\t"; + cond = ACCESS_FLAG(F_PF); + break; + case 0x9b: + name = "SETNP\t"; + cond = !ACCESS_FLAG(F_PF); + break; + case 0x9c: + name = "SETL\t"; + cond = xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)); + break; + case 0x9d: + name = "SETNL\t"; + cond = !xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)); + break; + case 0x9e: + name = "SETLE\t"; + cond = (xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)) || + ACCESS_FLAG(F_ZF)); + break; + case 0x9f: + name = "SETNLE\t"; + cond = !(xorl(ACCESS_FLAG(F_SF), ACCESS_FLAG(F_OF)) || + ACCESS_FLAG(F_ZF)); + break; + } + DECODE_PRINTF(name); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + TRACE_AND_STEP(); + store_data_byte(destoffset, cond ? 0x01 : 0x00); + } else { /* register to register */ + destreg = DECODE_RM_BYTE_REGISTER(rl); + TRACE_AND_STEP(); + *destreg = cond ? 0x01 : 0x00; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xa0 +****************************************************************************/ +void x86emuOp2_push_FS(u8 X86EMU_UNUSED(op2)) +{ + START_OF_INSTR(); + DECODE_PRINTF("PUSH\tFS\n"); + TRACE_AND_STEP(); + push_word(M.x86.R_FS); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xa1 +****************************************************************************/ +void x86emuOp2_pop_FS(u8 X86EMU_UNUSED(op2)) +{ + START_OF_INSTR(); + DECODE_PRINTF("POP\tFS\n"); + TRACE_AND_STEP(); + M.x86.R_FS = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xa3 +****************************************************************************/ +void x86emuOp2_bt_R(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + int bit,disp; + + START_OF_INSTR(); + DECODE_PRINTF("BT\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 srcval; + u32 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + disp = (s16)*shiftreg >> 5; + srcval = fetch_data_long(srcoffset+disp); + CONDITIONAL_SET_FLAG(srcval & (0x1 << bit),F_CF); + } else { + u16 srcval; + u16 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + disp = (s16)*shiftreg >> 4; + srcval = fetch_data_word(srcoffset+disp); + CONDITIONAL_SET_FLAG(srcval & (0x1 << bit),F_CF); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg,*shiftreg; + + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + CONDITIONAL_SET_FLAG(*srcreg & (0x1 << bit),F_CF); + } else { + u16 *srcreg,*shiftreg; + + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + CONDITIONAL_SET_FLAG(*srcreg & (0x1 << bit),F_CF); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xa4 +****************************************************************************/ +void x86emuOp2_shld_IMM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint destoffset; + u8 shift; + + START_OF_INSTR(); + DECODE_PRINTF("SHLD\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + u32 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + destval = fetch_data_long(destoffset); + destval = shld_long(destval,*shiftreg,shift); + store_data_long(destoffset, destval); + } else { + u16 destval; + u16 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + destval = fetch_data_word(destoffset); + destval = shld_word(destval,*shiftreg,shift); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*shiftreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + *destreg = shld_long(*destreg,*shiftreg,shift); + } else { + u16 *destreg,*shiftreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + *destreg = shld_word(*destreg,*shiftreg,shift); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xa5 +****************************************************************************/ +void x86emuOp2_shld_CL(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("SHLD\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + u32 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + destval = fetch_data_long(destoffset); + destval = shld_long(destval,*shiftreg,M.x86.R_CL); + store_data_long(destoffset, destval); + } else { + u16 destval; + u16 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + destval = fetch_data_word(destoffset); + destval = shld_word(destval,*shiftreg,M.x86.R_CL); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*shiftreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + *destreg = shld_long(*destreg,*shiftreg,M.x86.R_CL); + } else { + u16 *destreg,*shiftreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + *destreg = shld_word(*destreg,*shiftreg,M.x86.R_CL); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xa8 +****************************************************************************/ +void x86emuOp2_push_GS(u8 X86EMU_UNUSED(op2)) +{ + START_OF_INSTR(); + DECODE_PRINTF("PUSH\tGS\n"); + TRACE_AND_STEP(); + push_word(M.x86.R_GS); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xa9 +****************************************************************************/ +void x86emuOp2_pop_GS(u8 X86EMU_UNUSED(op2)) +{ + START_OF_INSTR(); + DECODE_PRINTF("POP\tGS\n"); + TRACE_AND_STEP(); + M.x86.R_GS = pop_word(); + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xaa +****************************************************************************/ +void x86emuOp2_bts_R(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + int bit,disp; + + START_OF_INSTR(); + DECODE_PRINTF("BTS\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 srcval,mask; + u32 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + disp = (s16)*shiftreg >> 5; + srcval = fetch_data_long(srcoffset+disp); + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + store_data_long(srcoffset+disp, srcval | mask); + } else { + u16 srcval,mask; + u16 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + disp = (s16)*shiftreg >> 4; + srcval = fetch_data_word(srcoffset+disp); + mask = (u16)(0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + store_data_word(srcoffset+disp, srcval | mask); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg,*shiftreg; + u32 mask; + + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + *srcreg |= mask; + } else { + u16 *srcreg,*shiftreg; + u16 mask; + + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + mask = (u16)(0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + *srcreg |= mask; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xac +****************************************************************************/ +void x86emuOp2_shrd_IMM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint destoffset; + u8 shift; + + START_OF_INSTR(); + DECODE_PRINTF("SHLD\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + u32 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + destval = fetch_data_long(destoffset); + destval = shrd_long(destval,*shiftreg,shift); + store_data_long(destoffset, destval); + } else { + u16 destval; + u16 *shiftreg; + + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + destval = fetch_data_word(destoffset); + destval = shrd_word(destval,*shiftreg,shift); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*shiftreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + *destreg = shrd_long(*destreg,*shiftreg,shift); + } else { + u16 *destreg,*shiftreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + shift = fetch_byte_imm(); + DECODE_PRINTF2("%d\n", shift); + TRACE_AND_STEP(); + *destreg = shrd_word(*destreg,*shiftreg,shift); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xad +****************************************************************************/ +void x86emuOp2_shrd_CL(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint destoffset; + + START_OF_INSTR(); + DECODE_PRINTF("SHLD\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 destval; + u32 *shiftreg; + + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + destval = fetch_data_long(destoffset); + destval = shrd_long(destval,*shiftreg,M.x86.R_CL); + store_data_long(destoffset, destval); + } else { + u16 destval; + u16 *shiftreg; + + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + destval = fetch_data_word(destoffset); + destval = shrd_word(destval,*shiftreg,M.x86.R_CL); + store_data_word(destoffset, destval); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*shiftreg; + + destreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + *destreg = shrd_long(*destreg,*shiftreg,M.x86.R_CL); + } else { + u16 *destreg,*shiftreg; + + destreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(",CL\n"); + TRACE_AND_STEP(); + *destreg = shrd_word(*destreg,*shiftreg,M.x86.R_CL); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xaf +****************************************************************************/ +void x86emuOp2_imul_R_RM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("IMUL\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 srcval; + u32 res_lo,res_hi; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_long(srcoffset); + TRACE_AND_STEP(); + imul_long_direct(&res_lo,&res_hi,(s32)*destreg,(s32)srcval); + if (res_hi != 0) { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } else { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } + *destreg = (u32)res_lo; + } else { + u16 *destreg; + u16 srcval; + u32 res; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_word(srcoffset); + TRACE_AND_STEP(); + res = (s16)*destreg * (s16)srcval; + if (res > 0xFFFF) { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } else { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } + *destreg = (u16)res; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg,*srcreg; + u32 res_lo,res_hi; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_LONG_REGISTER(rl); + TRACE_AND_STEP(); + imul_long_direct(&res_lo,&res_hi,(s32)*destreg,(s32)*srcreg); + if (res_hi != 0) { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } else { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } + *destreg = (u32)res_lo; + } else { + u16 *destreg,*srcreg; + u32 res; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + res = (s16)*destreg * (s16)*srcreg; + if (res > 0xFFFF) { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } else { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } + *destreg = (u16)res; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xb2 +****************************************************************************/ +void x86emuOp2_lss_R_IMM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rh, rl; + u16 *dstreg; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("LSS\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + dstreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *dstreg = fetch_data_word(srcoffset); + M.x86.R_SS = fetch_data_word(srcoffset + 2); + } else { /* register to register */ + /* UNDEFINED! */ + TRACE_AND_STEP(); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xb3 +****************************************************************************/ +void x86emuOp2_btr_R(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + int bit,disp; + + START_OF_INSTR(); + DECODE_PRINTF("BTR\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 srcval,mask; + u32 *shiftreg; + + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + disp = (s16)*shiftreg >> 5; + srcval = fetch_data_long(srcoffset+disp); + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + store_data_long(srcoffset+disp, srcval & ~mask); + } else { + u16 srcval,mask; + u16 *shiftreg; + + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + disp = (s16)*shiftreg >> 4; + srcval = fetch_data_word(srcoffset+disp); + mask = (u16)(0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + store_data_word(srcoffset+disp, (u16)(srcval & ~mask)); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg,*shiftreg; + u32 mask; + + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + *srcreg &= ~mask; + } else { + u16 *srcreg,*shiftreg; + u16 mask; + + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + mask = (u16)(0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + *srcreg &= ~mask; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xb4 +****************************************************************************/ +void x86emuOp2_lfs_R_IMM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rh, rl; + u16 *dstreg; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("LFS\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + dstreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *dstreg = fetch_data_word(srcoffset); + M.x86.R_FS = fetch_data_word(srcoffset + 2); + } else { /* register to register */ + /* UNDEFINED! */ + TRACE_AND_STEP(); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xb5 +****************************************************************************/ +void x86emuOp2_lgs_R_IMM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rh, rl; + u16 *dstreg; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("LGS\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + dstreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *dstreg = fetch_data_word(srcoffset); + M.x86.R_GS = fetch_data_word(srcoffset + 2); + } else { /* register to register */ + /* UNDEFINED! */ + TRACE_AND_STEP(); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xb6 +****************************************************************************/ +void x86emuOp2_movzx_byte_R_RM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("MOVZX\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 srcval; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_byte(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } else { + u16 *destreg; + u16 srcval; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_byte(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u8 *srcreg; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } else { + u16 *destreg; + u8 *srcreg; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xb7 +****************************************************************************/ +void x86emuOp2_movzx_word_R_RM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + u32 *destreg; + u32 srcval; + u16 *srcreg; + + START_OF_INSTR(); + DECODE_PRINTF("MOVZX\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = fetch_data_word(srcoffset); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } else { /* register to register */ + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = *srcreg; + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xba +****************************************************************************/ +void x86emuOp2_btX_I(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + u8 shift; + int bit; + + START_OF_INSTR(); + FETCH_DECODE_MODRM(mod, rh, rl); + switch (rh) { + case 4: + DECODE_PRINTF("BT\t"); + break; + case 5: + DECODE_PRINTF("BTS\t"); + break; + case 6: + DECODE_PRINTF("BTR\t"); + break; + case 7: + DECODE_PRINTF("BTC\t"); + break; + default: + DECODE_PRINTF("ILLEGAL EXTENDED X86 OPCODE\n"); + TRACE_REGS(); + rom_printk("%04x:%04x: %02X%02X ILLEGAL EXTENDED X86 OPCODE EXTENSION!\n", + M.x86.R_CS, M.x86.R_IP-3,op2, (mod<<6)|(rh<<3)|rl); + HALT_SYS(); + } + if (mod < 3) { + + srcoffset = decode_rmXX_address(mod, rl); + shift = fetch_byte_imm(); + DECODE_PRINTF2(",%d\n", shift); + TRACE_AND_STEP(); + + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 srcval, mask; + + bit = shift & 0x1F; + srcval = fetch_data_long(srcoffset); + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + switch (rh) { + case 5: + store_data_long(srcoffset, srcval | mask); + break; + case 6: + store_data_long(srcoffset, srcval & ~mask); + break; + case 7: + store_data_long(srcoffset, srcval ^ mask); + break; + default: + break; + } + } else { + u16 srcval, mask; + + bit = shift & 0xF; + srcval = fetch_data_word(srcoffset); + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + switch (rh) { + case 5: + store_data_word(srcoffset, srcval | mask); + break; + case 6: + store_data_word(srcoffset, srcval & ~mask); + break; + case 7: + store_data_word(srcoffset, srcval ^ mask); + break; + default: + break; + } + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg; + u32 mask; + + srcreg = DECODE_RM_LONG_REGISTER(rl); + shift = fetch_byte_imm(); + DECODE_PRINTF2(",%d\n", shift); + TRACE_AND_STEP(); + bit = shift & 0x1F; + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + switch (rh) { + case 5: + *srcreg |= mask; + break; + case 6: + *srcreg &= ~mask; + break; + case 7: + *srcreg ^= mask; + break; + default: + break; + } + } else { + u16 *srcreg; + u16 mask; + + srcreg = DECODE_RM_WORD_REGISTER(rl); + shift = fetch_byte_imm(); + DECODE_PRINTF2(",%d\n", shift); + TRACE_AND_STEP(); + bit = shift & 0xF; + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + switch (rh) { + case 5: + *srcreg |= mask; + break; + case 6: + *srcreg &= ~mask; + break; + case 7: + *srcreg ^= mask; + break; + default: + break; + } + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xbb +****************************************************************************/ +void x86emuOp2_btc_R(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + int bit,disp; + + START_OF_INSTR(); + DECODE_PRINTF("BTC\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 srcval,mask; + u32 *shiftreg; + + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + disp = (s16)*shiftreg >> 5; + srcval = fetch_data_long(srcoffset+disp); + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + store_data_long(srcoffset+disp, srcval ^ mask); + } else { + u16 srcval,mask; + u16 *shiftreg; + + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + disp = (s16)*shiftreg >> 4; + srcval = fetch_data_word(srcoffset+disp); + mask = (u16)(0x1 << bit); + CONDITIONAL_SET_FLAG(srcval & mask,F_CF); + store_data_word(srcoffset+disp, (u16)(srcval ^ mask)); + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg,*shiftreg; + u32 mask; + + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0x1F; + mask = (0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + *srcreg ^= mask; + } else { + u16 *srcreg,*shiftreg; + u16 mask; + + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + shiftreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + bit = *shiftreg & 0xF; + mask = (u16)(0x1 << bit); + CONDITIONAL_SET_FLAG(*srcreg & mask,F_CF); + *srcreg ^= mask; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xbc +****************************************************************************/ +void x86emuOp2_bsf(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("BSF\n"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 srcval, *dstreg; + + dstreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + srcval = fetch_data_long(srcoffset); + CONDITIONAL_SET_FLAG(srcval == 0, F_ZF); + for(*dstreg = 0; *dstreg < 32; (*dstreg)++) + if ((srcval >> *dstreg) & 1) break; + } else { + u16 srcval, *dstreg; + + dstreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + srcval = fetch_data_word(srcoffset); + CONDITIONAL_SET_FLAG(srcval == 0, F_ZF); + for(*dstreg = 0; *dstreg < 16; (*dstreg)++) + if ((srcval >> *dstreg) & 1) break; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg, *dstreg; + + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + dstreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + CONDITIONAL_SET_FLAG(*srcreg == 0, F_ZF); + for(*dstreg = 0; *dstreg < 32; (*dstreg)++) + if ((*srcreg >> *dstreg) & 1) break; + } else { + u16 *srcreg, *dstreg; + + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + dstreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + CONDITIONAL_SET_FLAG(*srcreg == 0, F_ZF); + for(*dstreg = 0; *dstreg < 16; (*dstreg)++) + if ((*srcreg >> *dstreg) & 1) break; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xbd +****************************************************************************/ +void x86emuOp2_bsr(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("BSF\n"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + srcoffset = decode_rmXX_address(mod, rl); + DECODE_PRINTF(","); + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 srcval, *dstreg; + + dstreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + srcval = fetch_data_long(srcoffset); + CONDITIONAL_SET_FLAG(srcval == 0, F_ZF); + for(*dstreg = 31; *dstreg > 0; (*dstreg)--) + if ((srcval >> *dstreg) & 1) break; + } else { + u16 srcval, *dstreg; + + dstreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + srcval = fetch_data_word(srcoffset); + CONDITIONAL_SET_FLAG(srcval == 0, F_ZF); + for(*dstreg = 15; *dstreg > 0; (*dstreg)--) + if ((srcval >> *dstreg) & 1) break; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *srcreg, *dstreg; + + srcreg = DECODE_RM_LONG_REGISTER(rl); + DECODE_PRINTF(","); + dstreg = DECODE_RM_LONG_REGISTER(rh); + TRACE_AND_STEP(); + CONDITIONAL_SET_FLAG(*srcreg == 0, F_ZF); + for(*dstreg = 31; *dstreg > 0; (*dstreg)--) + if ((*srcreg >> *dstreg) & 1) break; + } else { + u16 *srcreg, *dstreg; + + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF(","); + dstreg = DECODE_RM_WORD_REGISTER(rh); + TRACE_AND_STEP(); + CONDITIONAL_SET_FLAG(*srcreg == 0, F_ZF); + for(*dstreg = 15; *dstreg > 0; (*dstreg)--) + if ((*srcreg >> *dstreg) & 1) break; + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xbe +****************************************************************************/ +void x86emuOp2_movsx_byte_R_RM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + + START_OF_INSTR(); + DECODE_PRINTF("MOVSX\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u32 srcval; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = (s32)((s8)fetch_data_byte(srcoffset)); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } else { + u16 *destreg; + u16 srcval; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = (s16)((s8)fetch_data_byte(srcoffset)); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } + } else { /* register to register */ + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + u32 *destreg; + u8 *srcreg; + + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = (s32)((s8)*srcreg); + } else { + u16 *destreg; + u8 *srcreg; + + destreg = DECODE_RM_WORD_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_BYTE_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = (s16)((s8)*srcreg); + } + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/**************************************************************************** +REMARKS: +Handles opcode 0x0f,0xbf +****************************************************************************/ +void x86emuOp2_movsx_word_R_RM(u8 X86EMU_UNUSED(op2)) +{ + int mod, rl, rh; + uint srcoffset; + u32 *destreg; + u32 srcval; + u16 *srcreg; + + START_OF_INSTR(); + DECODE_PRINTF("MOVSX\t"); + FETCH_DECODE_MODRM(mod, rh, rl); + if (mod < 3) { + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcoffset = decode_rmXX_address(mod, rl); + srcval = (s32)((s16)fetch_data_word(srcoffset)); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = srcval; + } else { /* register to register */ + destreg = DECODE_RM_LONG_REGISTER(rh); + DECODE_PRINTF(","); + srcreg = DECODE_RM_WORD_REGISTER(rl); + DECODE_PRINTF("\n"); + TRACE_AND_STEP(); + *destreg = (s32)((s16)*srcreg); + } + DECODE_CLEAR_SEGOVR(); + END_OF_INSTR(); +} + +/*************************************************************************** + * Double byte operation code table: + **************************************************************************/ +void (*x86emu_optab2[256])(u8) = +{ +/* 0x00 */ x86emuOp2_illegal_op, /* Group F (ring 0 PM) */ +/* 0x01 */ x86emuOp2_illegal_op, /* Group G (ring 0 PM) */ +/* 0x02 */ x86emuOp2_illegal_op, /* lar (ring 0 PM) */ +/* 0x03 */ x86emuOp2_illegal_op, /* lsl (ring 0 PM) */ +/* 0x04 */ x86emuOp2_illegal_op, +/* 0x05 */ x86emuOp2_illegal_op, /* loadall (undocumented) */ +/* 0x06 */ x86emuOp2_illegal_op, /* clts (ring 0 PM) */ +/* 0x07 */ x86emuOp2_illegal_op, /* loadall (undocumented) */ +/* 0x08 */ x86emuOp2_illegal_op, /* invd (ring 0 PM) */ +/* 0x09 */ x86emuOp2_illegal_op, /* wbinvd (ring 0 PM) */ +/* 0x0a */ x86emuOp2_illegal_op, +/* 0x0b */ x86emuOp2_illegal_op, +/* 0x0c */ x86emuOp2_illegal_op, +/* 0x0d */ x86emuOp2_illegal_op, +/* 0x0e */ x86emuOp2_illegal_op, +/* 0x0f */ x86emuOp2_illegal_op, + +/* 0x10 */ x86emuOp2_illegal_op, +/* 0x11 */ x86emuOp2_illegal_op, +/* 0x12 */ x86emuOp2_illegal_op, +/* 0x13 */ x86emuOp2_illegal_op, +/* 0x14 */ x86emuOp2_illegal_op, +/* 0x15 */ x86emuOp2_illegal_op, +/* 0x16 */ x86emuOp2_illegal_op, +/* 0x17 */ x86emuOp2_illegal_op, +/* 0x18 */ x86emuOp2_illegal_op, +/* 0x19 */ x86emuOp2_illegal_op, +/* 0x1a */ x86emuOp2_illegal_op, +/* 0x1b */ x86emuOp2_illegal_op, +/* 0x1c */ x86emuOp2_illegal_op, +/* 0x1d */ x86emuOp2_illegal_op, +/* 0x1e */ x86emuOp2_illegal_op, +/* 0x1f */ x86emuOp2_illegal_op, + +/* 0x20 */ x86emuOp2_illegal_op, /* mov reg32,creg (ring 0 PM) */ +/* 0x21 */ x86emuOp2_illegal_op, /* mov reg32,dreg (ring 0 PM) */ +/* 0x22 */ x86emuOp2_illegal_op, /* mov creg,reg32 (ring 0 PM) */ +/* 0x23 */ x86emuOp2_illegal_op, /* mov dreg,reg32 (ring 0 PM) */ +/* 0x24 */ x86emuOp2_illegal_op, /* mov reg32,treg (ring 0 PM) */ +/* 0x25 */ x86emuOp2_illegal_op, +/* 0x26 */ x86emuOp2_illegal_op, /* mov treg,reg32 (ring 0 PM) */ +/* 0x27 */ x86emuOp2_illegal_op, +/* 0x28 */ x86emuOp2_illegal_op, +/* 0x29 */ x86emuOp2_illegal_op, +/* 0x2a */ x86emuOp2_illegal_op, +/* 0x2b */ x86emuOp2_illegal_op, +/* 0x2c */ x86emuOp2_illegal_op, +/* 0x2d */ x86emuOp2_illegal_op, +/* 0x2e */ x86emuOp2_illegal_op, +/* 0x2f */ x86emuOp2_illegal_op, + +/* 0x30 */ x86emuOp2_illegal_op, +/* 0x31 */ x86emuOp2_illegal_op, +/* 0x32 */ x86emuOp2_illegal_op, +/* 0x33 */ x86emuOp2_illegal_op, +/* 0x34 */ x86emuOp2_illegal_op, +/* 0x35 */ x86emuOp2_illegal_op, +/* 0x36 */ x86emuOp2_illegal_op, +/* 0x37 */ x86emuOp2_illegal_op, +/* 0x38 */ x86emuOp2_illegal_op, +/* 0x39 */ x86emuOp2_illegal_op, +/* 0x3a */ x86emuOp2_illegal_op, +/* 0x3b */ x86emuOp2_illegal_op, +/* 0x3c */ x86emuOp2_illegal_op, +/* 0x3d */ x86emuOp2_illegal_op, +/* 0x3e */ x86emuOp2_illegal_op, +/* 0x3f */ x86emuOp2_illegal_op, + +/* 0x40 */ x86emuOp2_illegal_op, +/* 0x41 */ x86emuOp2_illegal_op, +/* 0x42 */ x86emuOp2_illegal_op, +/* 0x43 */ x86emuOp2_illegal_op, +/* 0x44 */ x86emuOp2_illegal_op, +/* 0x45 */ x86emuOp2_illegal_op, +/* 0x46 */ x86emuOp2_illegal_op, +/* 0x47 */ x86emuOp2_illegal_op, +/* 0x48 */ x86emuOp2_illegal_op, +/* 0x49 */ x86emuOp2_illegal_op, +/* 0x4a */ x86emuOp2_illegal_op, +/* 0x4b */ x86emuOp2_illegal_op, +/* 0x4c */ x86emuOp2_illegal_op, +/* 0x4d */ x86emuOp2_illegal_op, +/* 0x4e */ x86emuOp2_illegal_op, +/* 0x4f */ x86emuOp2_illegal_op, + +/* 0x50 */ x86emuOp2_illegal_op, +/* 0x51 */ x86emuOp2_illegal_op, +/* 0x52 */ x86emuOp2_illegal_op, +/* 0x53 */ x86emuOp2_illegal_op, +/* 0x54 */ x86emuOp2_illegal_op, +/* 0x55 */ x86emuOp2_illegal_op, +/* 0x56 */ x86emuOp2_illegal_op, +/* 0x57 */ x86emuOp2_illegal_op, +/* 0x58 */ x86emuOp2_illegal_op, +/* 0x59 */ x86emuOp2_illegal_op, +/* 0x5a */ x86emuOp2_illegal_op, +/* 0x5b */ x86emuOp2_illegal_op, +/* 0x5c */ x86emuOp2_illegal_op, +/* 0x5d */ x86emuOp2_illegal_op, +/* 0x5e */ x86emuOp2_illegal_op, +/* 0x5f */ x86emuOp2_illegal_op, + +/* 0x60 */ x86emuOp2_illegal_op, +/* 0x61 */ x86emuOp2_illegal_op, +/* 0x62 */ x86emuOp2_illegal_op, +/* 0x63 */ x86emuOp2_illegal_op, +/* 0x64 */ x86emuOp2_illegal_op, +/* 0x65 */ x86emuOp2_illegal_op, +/* 0x66 */ x86emuOp2_illegal_op, +/* 0x67 */ x86emuOp2_illegal_op, +/* 0x68 */ x86emuOp2_illegal_op, +/* 0x69 */ x86emuOp2_illegal_op, +/* 0x6a */ x86emuOp2_illegal_op, +/* 0x6b */ x86emuOp2_illegal_op, +/* 0x6c */ x86emuOp2_illegal_op, +/* 0x6d */ x86emuOp2_illegal_op, +/* 0x6e */ x86emuOp2_illegal_op, +/* 0x6f */ x86emuOp2_illegal_op, + +/* 0x70 */ x86emuOp2_illegal_op, +/* 0x71 */ x86emuOp2_illegal_op, +/* 0x72 */ x86emuOp2_illegal_op, +/* 0x73 */ x86emuOp2_illegal_op, +/* 0x74 */ x86emuOp2_illegal_op, +/* 0x75 */ x86emuOp2_illegal_op, +/* 0x76 */ x86emuOp2_illegal_op, +/* 0x77 */ x86emuOp2_illegal_op, +/* 0x78 */ x86emuOp2_illegal_op, +/* 0x79 */ x86emuOp2_illegal_op, +/* 0x7a */ x86emuOp2_illegal_op, +/* 0x7b */ x86emuOp2_illegal_op, +/* 0x7c */ x86emuOp2_illegal_op, +/* 0x7d */ x86emuOp2_illegal_op, +/* 0x7e */ x86emuOp2_illegal_op, +/* 0x7f */ x86emuOp2_illegal_op, + +/* 0x80 */ x86emuOp2_long_jump, +/* 0x81 */ x86emuOp2_long_jump, +/* 0x82 */ x86emuOp2_long_jump, +/* 0x83 */ x86emuOp2_long_jump, +/* 0x84 */ x86emuOp2_long_jump, +/* 0x85 */ x86emuOp2_long_jump, +/* 0x86 */ x86emuOp2_long_jump, +/* 0x87 */ x86emuOp2_long_jump, +/* 0x88 */ x86emuOp2_long_jump, +/* 0x89 */ x86emuOp2_long_jump, +/* 0x8a */ x86emuOp2_long_jump, +/* 0x8b */ x86emuOp2_long_jump, +/* 0x8c */ x86emuOp2_long_jump, +/* 0x8d */ x86emuOp2_long_jump, +/* 0x8e */ x86emuOp2_long_jump, +/* 0x8f */ x86emuOp2_long_jump, + +/* 0x90 */ x86emuOp2_set_byte, +/* 0x91 */ x86emuOp2_set_byte, +/* 0x92 */ x86emuOp2_set_byte, +/* 0x93 */ x86emuOp2_set_byte, +/* 0x94 */ x86emuOp2_set_byte, +/* 0x95 */ x86emuOp2_set_byte, +/* 0x96 */ x86emuOp2_set_byte, +/* 0x97 */ x86emuOp2_set_byte, +/* 0x98 */ x86emuOp2_set_byte, +/* 0x99 */ x86emuOp2_set_byte, +/* 0x9a */ x86emuOp2_set_byte, +/* 0x9b */ x86emuOp2_set_byte, +/* 0x9c */ x86emuOp2_set_byte, +/* 0x9d */ x86emuOp2_set_byte, +/* 0x9e */ x86emuOp2_set_byte, +/* 0x9f */ x86emuOp2_set_byte, + +/* 0xa0 */ x86emuOp2_push_FS, +/* 0xa1 */ x86emuOp2_pop_FS, +/* 0xa2 */ x86emuOp2_illegal_op, +/* 0xa3 */ x86emuOp2_bt_R, +/* 0xa4 */ x86emuOp2_shld_IMM, +/* 0xa5 */ x86emuOp2_shld_CL, +/* 0xa6 */ x86emuOp2_illegal_op, +/* 0xa7 */ x86emuOp2_illegal_op, +/* 0xa8 */ x86emuOp2_push_GS, +/* 0xa9 */ x86emuOp2_pop_GS, +/* 0xaa */ x86emuOp2_illegal_op, +/* 0xab */ x86emuOp2_bt_R, +/* 0xac */ x86emuOp2_shrd_IMM, +/* 0xad */ x86emuOp2_shrd_CL, +/* 0xae */ x86emuOp2_illegal_op, +/* 0xaf */ x86emuOp2_imul_R_RM, + +/* 0xb0 */ x86emuOp2_illegal_op, /* TODO: cmpxchg */ +/* 0xb1 */ x86emuOp2_illegal_op, /* TODO: cmpxchg */ +/* 0xb2 */ x86emuOp2_lss_R_IMM, +/* 0xb3 */ x86emuOp2_btr_R, +/* 0xb4 */ x86emuOp2_lfs_R_IMM, +/* 0xb5 */ x86emuOp2_lgs_R_IMM, +/* 0xb6 */ x86emuOp2_movzx_byte_R_RM, +/* 0xb7 */ x86emuOp2_movzx_word_R_RM, +/* 0xb8 */ x86emuOp2_illegal_op, +/* 0xb9 */ x86emuOp2_illegal_op, +/* 0xba */ x86emuOp2_btX_I, +/* 0xbb */ x86emuOp2_btc_R, +/* 0xbc */ x86emuOp2_bsf, +/* 0xbd */ x86emuOp2_bsr, +/* 0xbe */ x86emuOp2_movsx_byte_R_RM, +/* 0xbf */ x86emuOp2_movsx_word_R_RM, + +/* 0xc0 */ x86emuOp2_illegal_op, /* TODO: xadd */ +/* 0xc1 */ x86emuOp2_illegal_op, /* TODO: xadd */ +/* 0xc2 */ x86emuOp2_illegal_op, +/* 0xc3 */ x86emuOp2_illegal_op, +/* 0xc4 */ x86emuOp2_illegal_op, +/* 0xc5 */ x86emuOp2_illegal_op, +/* 0xc6 */ x86emuOp2_illegal_op, +/* 0xc7 */ x86emuOp2_illegal_op, +/* 0xc8 */ x86emuOp2_illegal_op, /* TODO: bswap */ +/* 0xc9 */ x86emuOp2_illegal_op, /* TODO: bswap */ +/* 0xca */ x86emuOp2_illegal_op, /* TODO: bswap */ +/* 0xcb */ x86emuOp2_illegal_op, /* TODO: bswap */ +/* 0xcc */ x86emuOp2_illegal_op, /* TODO: bswap */ +/* 0xcd */ x86emuOp2_illegal_op, /* TODO: bswap */ +/* 0xce */ x86emuOp2_illegal_op, /* TODO: bswap */ +/* 0xcf */ x86emuOp2_illegal_op, /* TODO: bswap */ + +/* 0xd0 */ x86emuOp2_illegal_op, +/* 0xd1 */ x86emuOp2_illegal_op, +/* 0xd2 */ x86emuOp2_illegal_op, +/* 0xd3 */ x86emuOp2_illegal_op, +/* 0xd4 */ x86emuOp2_illegal_op, +/* 0xd5 */ x86emuOp2_illegal_op, +/* 0xd6 */ x86emuOp2_illegal_op, +/* 0xd7 */ x86emuOp2_illegal_op, +/* 0xd8 */ x86emuOp2_illegal_op, +/* 0xd9 */ x86emuOp2_illegal_op, +/* 0xda */ x86emuOp2_illegal_op, +/* 0xdb */ x86emuOp2_illegal_op, +/* 0xdc */ x86emuOp2_illegal_op, +/* 0xdd */ x86emuOp2_illegal_op, +/* 0xde */ x86emuOp2_illegal_op, +/* 0xdf */ x86emuOp2_illegal_op, + +/* 0xe0 */ x86emuOp2_illegal_op, +/* 0xe1 */ x86emuOp2_illegal_op, +/* 0xe2 */ x86emuOp2_illegal_op, +/* 0xe3 */ x86emuOp2_illegal_op, +/* 0xe4 */ x86emuOp2_illegal_op, +/* 0xe5 */ x86emuOp2_illegal_op, +/* 0xe6 */ x86emuOp2_illegal_op, +/* 0xe7 */ x86emuOp2_illegal_op, +/* 0xe8 */ x86emuOp2_illegal_op, +/* 0xe9 */ x86emuOp2_illegal_op, +/* 0xea */ x86emuOp2_illegal_op, +/* 0xeb */ x86emuOp2_illegal_op, +/* 0xec */ x86emuOp2_illegal_op, +/* 0xed */ x86emuOp2_illegal_op, +/* 0xee */ x86emuOp2_illegal_op, +/* 0xef */ x86emuOp2_illegal_op, + +/* 0xf0 */ x86emuOp2_illegal_op, +/* 0xf1 */ x86emuOp2_illegal_op, +/* 0xf2 */ x86emuOp2_illegal_op, +/* 0xf3 */ x86emuOp2_illegal_op, +/* 0xf4 */ x86emuOp2_illegal_op, +/* 0xf5 */ x86emuOp2_illegal_op, +/* 0xf6 */ x86emuOp2_illegal_op, +/* 0xf7 */ x86emuOp2_illegal_op, +/* 0xf8 */ x86emuOp2_illegal_op, +/* 0xf9 */ x86emuOp2_illegal_op, +/* 0xfa */ x86emuOp2_illegal_op, +/* 0xfb */ x86emuOp2_illegal_op, +/* 0xfc */ x86emuOp2_illegal_op, +/* 0xfd */ x86emuOp2_illegal_op, +/* 0xfe */ x86emuOp2_illegal_op, +/* 0xff */ x86emuOp2_illegal_op, +}; diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/prim_ops.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/prim_ops.c new file mode 100644 index 000000000000..7f14c64bbc1b --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/prim_ops.c @@ -0,0 +1,2450 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: This file contains the code to implement the primitive +* machine operations used by the emulation code in ops.c +* +* Carry Chain Calculation +* +* This represents a somewhat expensive calculation which is +* apparently required to emulate the setting of the OF and AF flag. +* The latter is not so important, but the former is. The overflow +* flag is the XOR of the top two bits of the carry chain for an +* addition (similar for subtraction). Since we do not want to +* simulate the addition in a bitwise manner, we try to calculate the +* carry chain given the two operands and the result. +* +* So, given the following table, which represents the addition of two +* bits, we can derive a formula for the carry chain. +* +* a b cin r cout +* 0 0 0 0 0 +* 0 0 1 1 0 +* 0 1 0 1 0 +* 0 1 1 0 1 +* 1 0 0 1 0 +* 1 0 1 0 1 +* 1 1 0 0 1 +* 1 1 1 1 1 +* +* Construction of table for cout: +* +* ab +* r \ 00 01 11 10 +* |------------------ +* 0 | 0 1 1 1 +* 1 | 0 0 1 0 +* +* By inspection, one gets: cc = ab + r'(a + b) +* +* That represents alot of operations, but NO CHOICE.... +* +* Borrow Chain Calculation. +* +* The following table represents the subtraction of two bits, from +* which we can derive a formula for the borrow chain. +* +* a b bin r bout +* 0 0 0 0 0 +* 0 0 1 1 1 +* 0 1 0 1 1 +* 0 1 1 0 1 +* 1 0 0 1 0 +* 1 0 1 0 0 +* 1 1 0 0 0 +* 1 1 1 1 1 +* +* Construction of table for cout: +* +* ab +* r \ 00 01 11 10 +* |------------------ +* 0 | 0 1 0 0 +* 1 | 1 1 1 0 +* +* By inspection, one gets: bc = a'b + r(a' + b) +* +****************************************************************************/ + +#define PRIM_OPS_NO_REDEFINE_ASM +#include "x86emu/x86emui.h" + +/* + * abs() macros needs be defined directly + * because of '-ffreestanding' GCC option + * The macros is the same as macros in + */ +#define abs(x) ({ \ + int __x = (x); \ + (__x < 0) ? -__x : __x; \ + }) + +/*------------------------- Global Variables ------------------------------*/ + +static u32 x86emu_parity_tab[8] = +{ + 0x96696996, + 0x69969669, + 0x69969669, + 0x96696996, + 0x69969669, + 0x96696996, + 0x96696996, + 0x69969669, +}; + +#define PARITY(x) (((x86emu_parity_tab[(x) / 32] >> ((x) % 32)) & 1) == 0) +#define XOR2(x) (((x) ^ ((x)>>1)) & 0x1) + +/*----------------------------- Implementation ----------------------------*/ + + +/*--------- Side effects helper functions -------*/ + +/**************************************************************************** +REMARKS: +implements side efects for byte operations that don't overflow +****************************************************************************/ + +static void set_parity_flag(u32 res) +{ + CONDITIONAL_SET_FLAG(PARITY(res & 0xFF), F_PF); +} + +static void set_szp_flags_8(u8 res) +{ + CONDITIONAL_SET_FLAG(res & 0x80, F_SF); + CONDITIONAL_SET_FLAG(res == 0, F_ZF); + set_parity_flag(res); +} + +static void set_szp_flags_16(u16 res) +{ + CONDITIONAL_SET_FLAG(res & 0x8000, F_SF); + CONDITIONAL_SET_FLAG(res == 0, F_ZF); + set_parity_flag(res); +} + +static void set_szp_flags_32(u32 res) +{ + CONDITIONAL_SET_FLAG(res & 0x80000000, F_SF); + CONDITIONAL_SET_FLAG(res == 0, F_ZF); + set_parity_flag(res); +} + +static void no_carry_byte_side_eff(u8 res) +{ + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_AF); + set_szp_flags_8(res); +} + +static void no_carry_word_side_eff(u16 res) +{ + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_AF); + set_szp_flags_16(res); +} + +static void no_carry_long_side_eff(u32 res) +{ + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_AF); + set_szp_flags_32(res); +} + +static void calc_carry_chain(int bits, u32 d, u32 s, u32 res, int set_carry) +{ + u32 cc; + + cc = (s & d) | ((~res) & (s | d)); + CONDITIONAL_SET_FLAG(XOR2(cc >> (bits - 2)), F_OF); + CONDITIONAL_SET_FLAG(cc & 0x8, F_AF); + if (set_carry) { + CONDITIONAL_SET_FLAG(res & (1 << bits), F_CF); + } +} + +static void calc_borrow_chain(int bits, u32 d, u32 s, u32 res, int set_carry) +{ + u32 bc; + + bc = (res & (~d | s)) | (~d & s); + CONDITIONAL_SET_FLAG(XOR2(bc >> (bits - 2)), F_OF); + CONDITIONAL_SET_FLAG(bc & 0x8, F_AF); + if (set_carry) { + CONDITIONAL_SET_FLAG(bc & (1 << (bits - 1)), F_CF); + } +} + +/**************************************************************************** +REMARKS: +Implements the AAA instruction and side effects. +****************************************************************************/ +u16 aaa_word(u16 d) +{ + u16 res; + if ((d & 0xf) > 0x9 || ACCESS_FLAG(F_AF)) { + d += 0x6; + d += 0x100; + SET_FLAG(F_AF); + SET_FLAG(F_CF); + } else { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_AF); + } + res = (u16)(d & 0xFF0F); + set_szp_flags_16(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the AAA instruction and side effects. +****************************************************************************/ +u16 aas_word(u16 d) +{ + u16 res; + if ((d & 0xf) > 0x9 || ACCESS_FLAG(F_AF)) { + d -= 0x6; + d -= 0x100; + SET_FLAG(F_AF); + SET_FLAG(F_CF); + } else { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_AF); + } + res = (u16)(d & 0xFF0F); + set_szp_flags_16(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the AAD instruction and side effects. +****************************************************************************/ +u16 aad_word(u16 d) +{ + u16 l; + u8 hb, lb; + + hb = (u8)((d >> 8) & 0xff); + lb = (u8)((d & 0xff)); + l = (u16)((lb + 10 * hb) & 0xFF); + + no_carry_byte_side_eff(l & 0xFF); + return l; +} + +/**************************************************************************** +REMARKS: +Implements the AAM instruction and side effects. +****************************************************************************/ +u16 aam_word(u8 d) +{ + u16 h, l; + + h = (u16)(d / 10); + l = (u16)(d % 10); + l |= (u16)(h << 8); + + no_carry_byte_side_eff(l & 0xFF); + return l; +} + +/**************************************************************************** +REMARKS: +Implements the ADC instruction and side effects. +****************************************************************************/ +u8 adc_byte(u8 d, u8 s) +{ + u32 res; /* all operands in native machine order */ + + res = d + s; + if (ACCESS_FLAG(F_CF)) res++; + + set_szp_flags_8(res); + calc_carry_chain(8,s,d,res,1); + + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the ADC instruction and side effects. +****************************************************************************/ +u16 adc_word(u16 d, u16 s) +{ + u32 res; /* all operands in native machine order */ + + res = d + s; + if (ACCESS_FLAG(F_CF)) + res++; + + set_szp_flags_16((u16)res); + calc_carry_chain(16,s,d,res,1); + + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the ADC instruction and side effects. +****************************************************************************/ +u32 adc_long(u32 d, u32 s) +{ + u32 lo; /* all operands in native machine order */ + u32 hi; + u32 res; + + lo = (d & 0xFFFF) + (s & 0xFFFF); + res = d + s; + + if (ACCESS_FLAG(F_CF)) { + lo++; + res++; + } + + hi = (lo >> 16) + (d >> 16) + (s >> 16); + + set_szp_flags_32(res); + calc_carry_chain(32,s,d,res,0); + + CONDITIONAL_SET_FLAG(hi & 0x10000, F_CF); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the ADD instruction and side effects. +****************************************************************************/ +u8 add_byte(u8 d, u8 s) +{ + u32 res; /* all operands in native machine order */ + + res = d + s; + set_szp_flags_8((u8)res); + calc_carry_chain(8,s,d,res,1); + + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the ADD instruction and side effects. +****************************************************************************/ +u16 add_word(u16 d, u16 s) +{ + u32 res; /* all operands in native machine order */ + + res = d + s; + set_szp_flags_16((u16)res); + calc_carry_chain(16,s,d,res,1); + + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the ADD instruction and side effects. +****************************************************************************/ +u32 add_long(u32 d, u32 s) +{ + u32 res; + + res = d + s; + set_szp_flags_32(res); + calc_carry_chain(32,s,d,res,0); + + CONDITIONAL_SET_FLAG(res < d || res < s, F_CF); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the AND instruction and side effects. +****************************************************************************/ +u8 and_byte(u8 d, u8 s) +{ + u8 res; /* all operands in native machine order */ + + res = d & s; + + no_carry_byte_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the AND instruction and side effects. +****************************************************************************/ +u16 and_word(u16 d, u16 s) +{ + u16 res; /* all operands in native machine order */ + + res = d & s; + + no_carry_word_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the AND instruction and side effects. +****************************************************************************/ +u32 and_long(u32 d, u32 s) +{ + u32 res; /* all operands in native machine order */ + + res = d & s; + no_carry_long_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the CMP instruction and side effects. +****************************************************************************/ +u8 cmp_byte(u8 d, u8 s) +{ + u32 res; /* all operands in native machine order */ + + res = d - s; + set_szp_flags_8((u8)res); + calc_borrow_chain(8, d, s, res, 1); + + return d; +} + +/**************************************************************************** +REMARKS: +Implements the CMP instruction and side effects. +****************************************************************************/ +u16 cmp_word(u16 d, u16 s) +{ + u32 res; /* all operands in native machine order */ + + res = d - s; + set_szp_flags_16((u16)res); + calc_borrow_chain(16, d, s, res, 1); + + return d; +} + +/**************************************************************************** +REMARKS: +Implements the CMP instruction and side effects. +****************************************************************************/ +u32 cmp_long(u32 d, u32 s) +{ + u32 res; /* all operands in native machine order */ + + res = d - s; + set_szp_flags_32(res); + calc_borrow_chain(32, d, s, res, 1); + + return d; +} + +/**************************************************************************** +REMARKS: +Implements the DAA instruction and side effects. +****************************************************************************/ +u8 daa_byte(u8 d) +{ + u32 res = d; + if ((d & 0xf) > 9 || ACCESS_FLAG(F_AF)) { + res += 6; + SET_FLAG(F_AF); + } + if (res > 0x9F || ACCESS_FLAG(F_CF)) { + res += 0x60; + SET_FLAG(F_CF); + } + set_szp_flags_8((u8)res); + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the DAS instruction and side effects. +****************************************************************************/ +u8 das_byte(u8 d) +{ + if ((d & 0xf) > 9 || ACCESS_FLAG(F_AF)) { + d -= 6; + SET_FLAG(F_AF); + } + if (d > 0x9F || ACCESS_FLAG(F_CF)) { + d -= 0x60; + SET_FLAG(F_CF); + } + set_szp_flags_8(d); + return d; +} + +/**************************************************************************** +REMARKS: +Implements the DEC instruction and side effects. +****************************************************************************/ +u8 dec_byte(u8 d) +{ + u32 res; /* all operands in native machine order */ + + res = d - 1; + set_szp_flags_8((u8)res); + calc_borrow_chain(8, d, 1, res, 0); + + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the DEC instruction and side effects. +****************************************************************************/ +u16 dec_word(u16 d) +{ + u32 res; /* all operands in native machine order */ + + res = d - 1; + set_szp_flags_16((u16)res); + calc_borrow_chain(16, d, 1, res, 0); + + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the DEC instruction and side effects. +****************************************************************************/ +u32 dec_long(u32 d) +{ + u32 res; /* all operands in native machine order */ + + res = d - 1; + + set_szp_flags_32(res); + calc_borrow_chain(32, d, 1, res, 0); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the INC instruction and side effects. +****************************************************************************/ +u8 inc_byte(u8 d) +{ + u32 res; /* all operands in native machine order */ + + res = d + 1; + set_szp_flags_8((u8)res); + calc_carry_chain(8, d, 1, res, 0); + + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the INC instruction and side effects. +****************************************************************************/ +u16 inc_word(u16 d) +{ + u32 res; /* all operands in native machine order */ + + res = d + 1; + set_szp_flags_16((u16)res); + calc_carry_chain(16, d, 1, res, 0); + + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the INC instruction and side effects. +****************************************************************************/ +u32 inc_long(u32 d) +{ + u32 res; /* all operands in native machine order */ + + res = d + 1; + set_szp_flags_32(res); + calc_carry_chain(32, d, 1, res, 0); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the OR instruction and side effects. +****************************************************************************/ +u8 or_byte(u8 d, u8 s) +{ + u8 res; /* all operands in native machine order */ + + res = d | s; + no_carry_byte_side_eff(res); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the OR instruction and side effects. +****************************************************************************/ +u16 or_word(u16 d, u16 s) +{ + u16 res; /* all operands in native machine order */ + + res = d | s; + no_carry_word_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the OR instruction and side effects. +****************************************************************************/ +u32 or_long(u32 d, u32 s) +{ + u32 res; /* all operands in native machine order */ + + res = d | s; + no_carry_long_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the OR instruction and side effects. +****************************************************************************/ +u8 neg_byte(u8 s) +{ + u8 res; + + CONDITIONAL_SET_FLAG(s != 0, F_CF); + res = (u8)-s; + set_szp_flags_8(res); + calc_borrow_chain(8, 0, s, res, 0); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the OR instruction and side effects. +****************************************************************************/ +u16 neg_word(u16 s) +{ + u16 res; + + CONDITIONAL_SET_FLAG(s != 0, F_CF); + res = (u16)-s; + set_szp_flags_16((u16)res); + calc_borrow_chain(16, 0, s, res, 0); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the OR instruction and side effects. +****************************************************************************/ +u32 neg_long(u32 s) +{ + u32 res; + + CONDITIONAL_SET_FLAG(s != 0, F_CF); + res = (u32)-s; + set_szp_flags_32(res); + calc_borrow_chain(32, 0, s, res, 0); + + return res; +} + +/**************************************************************************** +REMARKS: +Implements the NOT instruction and side effects. +****************************************************************************/ +u8 not_byte(u8 s) +{ + return ~s; +} + +/**************************************************************************** +REMARKS: +Implements the NOT instruction and side effects. +****************************************************************************/ +u16 not_word(u16 s) +{ + return ~s; +} + +/**************************************************************************** +REMARKS: +Implements the NOT instruction and side effects. +****************************************************************************/ +u32 not_long(u32 s) +{ + return ~s; +} + +/**************************************************************************** +REMARKS: +Implements the RCL instruction and side effects. +****************************************************************************/ +u8 rcl_byte(u8 d, u8 s) +{ + unsigned int res, cnt, mask, cf; + + /* s is the rotate distance. It varies from 0 - 8. */ + /* have + + CF B_7 B_6 B_5 B_4 B_3 B_2 B_1 B_0 + + want to rotate through the carry by "s" bits. We could + loop, but that's inefficient. So the width is 9, + and we split into three parts: + + The new carry flag (was B_n) + the stuff in B_n-1 .. B_0 + the stuff in B_7 .. B_n+1 + + The new rotate is done mod 9, and given this, + for a rotation of n bits (mod 9) the new carry flag is + then located n bits from the MSB. The low part is + then shifted up cnt bits, and the high part is or'd + in. Using CAPS for new values, and lowercase for the + original values, this can be expressed as: + + IF n > 0 + 1) CF <- b_(8-n) + 2) B_(7) .. B_(n) <- b_(8-(n+1)) .. b_0 + 3) B_(n-1) <- cf + 4) B_(n-2) .. B_0 <- b_7 .. b_(8-(n-1)) + */ + res = d; + if ((cnt = s % 9) != 0) { + /* extract the new CARRY FLAG. */ + /* CF <- b_(8-n) */ + cf = (d >> (8 - cnt)) & 0x1; + + /* get the low stuff which rotated + into the range B_7 .. B_cnt */ + /* B_(7) .. B_(n) <- b_(8-(n+1)) .. b_0 */ + /* note that the right hand side done by the mask */ + res = (d << cnt) & 0xff; + + /* now the high stuff which rotated around + into the positions B_cnt-2 .. B_0 */ + /* B_(n-2) .. B_0 <- b_7 .. b_(8-(n-1)) */ + /* shift it downward, 7-(n-2) = 9-n positions. + and mask off the result before or'ing in. + */ + mask = (1 << (cnt - 1)) - 1; + res |= (d >> (9 - cnt)) & mask; + + /* if the carry flag was set, or it in. */ + if (ACCESS_FLAG(F_CF)) { /* carry flag is set */ + /* B_(n-1) <- cf */ + res |= 1 << (cnt - 1); + } + /* set the new carry flag, based on the variable "cf" */ + CONDITIONAL_SET_FLAG(cf, F_CF); + /* OVERFLOW is set *IFF* cnt==1, then it is the + xor of CF and the most significant bit. Blecck. */ + /* parenthesized this expression since it appears to + be causing OF to be misset */ + CONDITIONAL_SET_FLAG(cnt == 1 && XOR2(cf + ((res >> 6) & 0x2)), + F_OF); + + } + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the RCL instruction and side effects. +****************************************************************************/ +u16 rcl_word(u16 d, u8 s) +{ + unsigned int res, cnt, mask, cf; + + res = d; + if ((cnt = s % 17) != 0) { + cf = (d >> (16 - cnt)) & 0x1; + res = (d << cnt) & 0xffff; + mask = (1 << (cnt - 1)) - 1; + res |= (d >> (17 - cnt)) & mask; + if (ACCESS_FLAG(F_CF)) { + res |= 1 << (cnt - 1); + } + CONDITIONAL_SET_FLAG(cf, F_CF); + CONDITIONAL_SET_FLAG(cnt == 1 && XOR2(cf + ((res >> 14) & 0x2)), + F_OF); + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the RCL instruction and side effects. +****************************************************************************/ +u32 rcl_long(u32 d, u8 s) +{ + u32 res, cnt, mask, cf; + + res = d; + if ((cnt = s % 33) != 0) { + cf = (d >> (32 - cnt)) & 0x1; + res = (d << cnt) & 0xffffffff; + mask = (1 << (cnt - 1)) - 1; + res |= (d >> (33 - cnt)) & mask; + if (ACCESS_FLAG(F_CF)) { /* carry flag is set */ + res |= 1 << (cnt - 1); + } + CONDITIONAL_SET_FLAG(cf, F_CF); + CONDITIONAL_SET_FLAG(cnt == 1 && XOR2(cf + ((res >> 30) & 0x2)), + F_OF); + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the RCR instruction and side effects. +****************************************************************************/ +u8 rcr_byte(u8 d, u8 s) +{ + u32 res, cnt; + u32 mask, cf, ocf = 0; + + /* rotate right through carry */ + /* + s is the rotate distance. It varies from 0 - 8. + d is the byte object rotated. + + have + + CF B_7 B_6 B_5 B_4 B_3 B_2 B_1 B_0 + + The new rotate is done mod 9, and given this, + for a rotation of n bits (mod 9) the new carry flag is + then located n bits from the LSB. The low part is + then shifted up cnt bits, and the high part is or'd + in. Using CAPS for new values, and lowercase for the + original values, this can be expressed as: + + IF n > 0 + 1) CF <- b_(n-1) + 2) B_(8-(n+1)) .. B_(0) <- b_(7) .. b_(n) + 3) B_(8-n) <- cf + 4) B_(7) .. B_(8-(n-1)) <- b_(n-2) .. b_(0) + */ + res = d; + if ((cnt = s % 9) != 0) { + /* extract the new CARRY FLAG. */ + /* CF <- b_(n-1) */ + if (cnt == 1) { + cf = d & 0x1; + /* note hackery here. Access_flag(..) evaluates to either + 0 if flag not set + non-zero if flag is set. + doing access_flag(..) != 0 casts that into either + 0..1 in any representation of the flags register + (i.e. packed bit array or unpacked.) + */ + ocf = ACCESS_FLAG(F_CF) != 0; + } else + cf = (d >> (cnt - 1)) & 0x1; + + /* B_(8-(n+1)) .. B_(0) <- b_(7) .. b_n */ + /* note that the right hand side done by the mask + This is effectively done by shifting the + object to the right. The result must be masked, + in case the object came in and was treated + as a negative number. Needed??? */ + + mask = (1 << (8 - cnt)) - 1; + res = (d >> cnt) & mask; + + /* now the high stuff which rotated around + into the positions B_cnt-2 .. B_0 */ + /* B_(7) .. B_(8-(n-1)) <- b_(n-2) .. b_(0) */ + /* shift it downward, 7-(n-2) = 9-n positions. + and mask off the result before or'ing in. + */ + res |= (d << (9 - cnt)); + + /* if the carry flag was set, or it in. */ + if (ACCESS_FLAG(F_CF)) { /* carry flag is set */ + /* B_(8-n) <- cf */ + res |= 1 << (8 - cnt); + } + /* set the new carry flag, based on the variable "cf" */ + CONDITIONAL_SET_FLAG(cf, F_CF); + /* OVERFLOW is set *IFF* cnt==1, then it is the + xor of CF and the most significant bit. Blecck. */ + /* parenthesized... */ + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(ocf + ((d >> 6) & 0x2)), + F_OF); + } + } + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the RCR instruction and side effects. +****************************************************************************/ +u16 rcr_word(u16 d, u8 s) +{ + u32 res, cnt; + u32 mask, cf, ocf = 0; + + /* rotate right through carry */ + res = d; + if ((cnt = s % 17) != 0) { + if (cnt == 1) { + cf = d & 0x1; + ocf = ACCESS_FLAG(F_CF) != 0; + } else + cf = (d >> (cnt - 1)) & 0x1; + mask = (1 << (16 - cnt)) - 1; + res = (d >> cnt) & mask; + res |= (d << (17 - cnt)); + if (ACCESS_FLAG(F_CF)) { + res |= 1 << (16 - cnt); + } + CONDITIONAL_SET_FLAG(cf, F_CF); + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(ocf + ((d >> 14) & 0x2)), + F_OF); + } + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the RCR instruction and side effects. +****************************************************************************/ +u32 rcr_long(u32 d, u8 s) +{ + u32 res, cnt; + u32 mask, cf, ocf = 0; + + /* rotate right through carry */ + res = d; + if ((cnt = s % 33) != 0) { + if (cnt == 1) { + cf = d & 0x1; + ocf = ACCESS_FLAG(F_CF) != 0; + } else + cf = (d >> (cnt - 1)) & 0x1; + mask = (1 << (32 - cnt)) - 1; + res = (d >> cnt) & mask; + if (cnt != 1) + res |= (d << (33 - cnt)); + if (ACCESS_FLAG(F_CF)) { /* carry flag is set */ + res |= 1 << (32 - cnt); + } + CONDITIONAL_SET_FLAG(cf, F_CF); + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(ocf + ((d >> 30) & 0x2)), + F_OF); + } + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the ROL instruction and side effects. +****************************************************************************/ +u8 rol_byte(u8 d, u8 s) +{ + unsigned int res, cnt, mask; + + /* rotate left */ + /* + s is the rotate distance. It varies from 0 - 8. + d is the byte object rotated. + + have + + CF B_7 ... B_0 + + The new rotate is done mod 8. + Much simpler than the "rcl" or "rcr" operations. + + IF n > 0 + 1) B_(7) .. B_(n) <- b_(8-(n+1)) .. b_(0) + 2) B_(n-1) .. B_(0) <- b_(7) .. b_(8-n) + */ + res = d; + if ((cnt = s % 8) != 0) { + /* B_(7) .. B_(n) <- b_(8-(n+1)) .. b_(0) */ + res = (d << cnt); + + /* B_(n-1) .. B_(0) <- b_(7) .. b_(8-n) */ + mask = (1 << cnt) - 1; + res |= (d >> (8 - cnt)) & mask; + + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x1, F_CF); /* OVERFLOW is set *IFF* s==1, then it is the + xor of CF and the most significant bit. Blecck. */ + CONDITIONAL_SET_FLAG(s == 1 && + XOR2((res & 0x1) + ((res >> 6) & 0x2)), + F_OF); + } if (s != 0) { + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x1, F_CF); + } + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the ROL instruction and side effects. +****************************************************************************/ +u16 rol_word(u16 d, u8 s) +{ + unsigned int res, cnt, mask; + + res = d; + if ((cnt = s % 16) != 0) { + res = (d << cnt); + mask = (1 << cnt) - 1; + res |= (d >> (16 - cnt)) & mask; + CONDITIONAL_SET_FLAG(res & 0x1, F_CF); + CONDITIONAL_SET_FLAG(s == 1 && + XOR2((res & 0x1) + ((res >> 14) & 0x2)), + F_OF); + } if (s != 0) { + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x1, F_CF); + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the ROL instruction and side effects. +****************************************************************************/ +u32 rol_long(u32 d, u8 s) +{ + u32 res, cnt, mask; + + res = d; + if ((cnt = s % 32) != 0) { + res = (d << cnt); + mask = (1 << cnt) - 1; + res |= (d >> (32 - cnt)) & mask; + CONDITIONAL_SET_FLAG(res & 0x1, F_CF); + CONDITIONAL_SET_FLAG(s == 1 && + XOR2((res & 0x1) + ((res >> 30) & 0x2)), + F_OF); + } if (s != 0) { + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x1, F_CF); + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the ROR instruction and side effects. +****************************************************************************/ +u8 ror_byte(u8 d, u8 s) +{ + unsigned int res, cnt, mask; + + /* rotate right */ + /* + s is the rotate distance. It varies from 0 - 8. + d is the byte object rotated. + + have + + B_7 ... B_0 + + The rotate is done mod 8. + + IF n > 0 + 1) B_(8-(n+1)) .. B_(0) <- b_(7) .. b_(n) + 2) B_(7) .. B_(8-n) <- b_(n-1) .. b_(0) + */ + res = d; + if ((cnt = s % 8) != 0) { /* not a typo, do nada if cnt==0 */ + /* B_(7) .. B_(8-n) <- b_(n-1) .. b_(0) */ + res = (d << (8 - cnt)); + + /* B_(8-(n+1)) .. B_(0) <- b_(7) .. b_(n) */ + mask = (1 << (8 - cnt)) - 1; + res |= (d >> (cnt)) & mask; + + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x80, F_CF); + /* OVERFLOW is set *IFF* s==1, then it is the + xor of the two most significant bits. Blecck. */ + CONDITIONAL_SET_FLAG(s == 1 && XOR2(res >> 6), F_OF); + } else if (s != 0) { + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x80, F_CF); + } + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the ROR instruction and side effects. +****************************************************************************/ +u16 ror_word(u16 d, u8 s) +{ + unsigned int res, cnt, mask; + + res = d; + if ((cnt = s % 16) != 0) { + res = (d << (16 - cnt)); + mask = (1 << (16 - cnt)) - 1; + res |= (d >> (cnt)) & mask; + CONDITIONAL_SET_FLAG(res & 0x8000, F_CF); + CONDITIONAL_SET_FLAG(s == 1 && XOR2(res >> 14), F_OF); + } else if (s != 0) { + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x8000, F_CF); + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the ROR instruction and side effects. +****************************************************************************/ +u32 ror_long(u32 d, u8 s) +{ + u32 res, cnt, mask; + + res = d; + if ((cnt = s % 32) != 0) { + res = (d << (32 - cnt)); + mask = (1 << (32 - cnt)) - 1; + res |= (d >> (cnt)) & mask; + CONDITIONAL_SET_FLAG(res & 0x80000000, F_CF); + CONDITIONAL_SET_FLAG(s == 1 && XOR2(res >> 30), F_OF); + } else if (s != 0) { + /* set the new carry flag, Note that it is the low order + bit of the result!!! */ + CONDITIONAL_SET_FLAG(res & 0x80000000, F_CF); + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the SHL instruction and side effects. +****************************************************************************/ +u8 shl_byte(u8 d, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 8) { + cnt = s % 8; + + /* last bit shifted out goes into carry flag */ + if (cnt > 0) { + res = d << cnt; + cf = d & (1 << (8 - cnt)); + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_8((u8)res); + } else { + res = (u8) d; + } + + if (cnt == 1) { + /* Needs simplification. */ + CONDITIONAL_SET_FLAG( + (((res & 0x80) == 0x80) ^ + (ACCESS_FLAG(F_CF) != 0)), + /* was (M.x86.R_FLG&F_CF)==F_CF)), */ + F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CONDITIONAL_SET_FLAG((d << (s-1)) & 0x80, F_CF); + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_PF); + SET_FLAG(F_ZF); + } + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the SHL instruction and side effects. +****************************************************************************/ +u16 shl_word(u16 d, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 16) { + cnt = s % 16; + if (cnt > 0) { + res = d << cnt; + cf = d & (1 << (16 - cnt)); + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_16((u16)res); + } else { + res = (u16) d; + } + + if (cnt == 1) { + CONDITIONAL_SET_FLAG( + (((res & 0x8000) == 0x8000) ^ + (ACCESS_FLAG(F_CF) != 0)), + F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CONDITIONAL_SET_FLAG((d << (s-1)) & 0x8000, F_CF); + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_PF); + SET_FLAG(F_ZF); + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the SHL instruction and side effects. +****************************************************************************/ +u32 shl_long(u32 d, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 32) { + cnt = s % 32; + if (cnt > 0) { + res = d << cnt; + cf = d & (1 << (32 - cnt)); + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_32((u32)res); + } else { + res = d; + } + if (cnt == 1) { + CONDITIONAL_SET_FLAG((((res & 0x80000000) == 0x80000000) ^ + (ACCESS_FLAG(F_CF) != 0)), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CONDITIONAL_SET_FLAG((d << (s-1)) & 0x80000000, F_CF); + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_PF); + SET_FLAG(F_ZF); + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the SHR instruction and side effects. +****************************************************************************/ +u8 shr_byte(u8 d, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 8) { + cnt = s % 8; + if (cnt > 0) { + cf = d & (1 << (cnt - 1)); + res = d >> cnt; + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_8((u8)res); + } else { + res = (u8) d; + } + + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(res >> 6), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CONDITIONAL_SET_FLAG((d >> (s-1)) & 0x1, F_CF); + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_PF); + SET_FLAG(F_ZF); + } + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the SHR instruction and side effects. +****************************************************************************/ +u16 shr_word(u16 d, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 16) { + cnt = s % 16; + if (cnt > 0) { + cf = d & (1 << (cnt - 1)); + res = d >> cnt; + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_16((u16)res); + } else { + res = d; + } + + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(res >> 14), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + SET_FLAG(F_ZF); + CLEAR_FLAG(F_SF); + CLEAR_FLAG(F_PF); + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the SHR instruction and side effects. +****************************************************************************/ +u32 shr_long(u32 d, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 32) { + cnt = s % 32; + if (cnt > 0) { + cf = d & (1 << (cnt - 1)); + res = d >> cnt; + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_32((u32)res); + } else { + res = d; + } + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(res >> 30), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + SET_FLAG(F_ZF); + CLEAR_FLAG(F_SF); + CLEAR_FLAG(F_PF); + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the SAR instruction and side effects. +****************************************************************************/ +u8 sar_byte(u8 d, u8 s) +{ + unsigned int cnt, res, cf, mask, sf; + + res = d; + sf = d & 0x80; + cnt = s % 8; + if (cnt > 0 && cnt < 8) { + mask = (1 << (8 - cnt)) - 1; + cf = d & (1 << (cnt - 1)); + res = (d >> cnt) & mask; + CONDITIONAL_SET_FLAG(cf, F_CF); + if (sf) { + res |= ~mask; + } + set_szp_flags_8((u8)res); + } else if (cnt >= 8) { + if (sf) { + res = 0xff; + SET_FLAG(F_CF); + CLEAR_FLAG(F_ZF); + SET_FLAG(F_SF); + SET_FLAG(F_PF); + } else { + res = 0; + CLEAR_FLAG(F_CF); + SET_FLAG(F_ZF); + CLEAR_FLAG(F_SF); + CLEAR_FLAG(F_PF); + } + } + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the SAR instruction and side effects. +****************************************************************************/ +u16 sar_word(u16 d, u8 s) +{ + unsigned int cnt, res, cf, mask, sf; + + sf = d & 0x8000; + cnt = s % 16; + res = d; + if (cnt > 0 && cnt < 16) { + mask = (1 << (16 - cnt)) - 1; + cf = d & (1 << (cnt - 1)); + res = (d >> cnt) & mask; + CONDITIONAL_SET_FLAG(cf, F_CF); + if (sf) { + res |= ~mask; + } + set_szp_flags_16((u16)res); + } else if (cnt >= 16) { + if (sf) { + res = 0xffff; + SET_FLAG(F_CF); + CLEAR_FLAG(F_ZF); + SET_FLAG(F_SF); + SET_FLAG(F_PF); + } else { + res = 0; + CLEAR_FLAG(F_CF); + SET_FLAG(F_ZF); + CLEAR_FLAG(F_SF); + CLEAR_FLAG(F_PF); + } + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the SAR instruction and side effects. +****************************************************************************/ +u32 sar_long(u32 d, u8 s) +{ + u32 cnt, res, cf, mask, sf; + + sf = d & 0x80000000; + cnt = s % 32; + res = d; + if (cnt > 0 && cnt < 32) { + mask = (1 << (32 - cnt)) - 1; + cf = d & (1 << (cnt - 1)); + res = (d >> cnt) & mask; + CONDITIONAL_SET_FLAG(cf, F_CF); + if (sf) { + res |= ~mask; + } + set_szp_flags_32(res); + } else if (cnt >= 32) { + if (sf) { + res = 0xffffffff; + SET_FLAG(F_CF); + CLEAR_FLAG(F_ZF); + SET_FLAG(F_SF); + SET_FLAG(F_PF); + } else { + res = 0; + CLEAR_FLAG(F_CF); + SET_FLAG(F_ZF); + CLEAR_FLAG(F_SF); + CLEAR_FLAG(F_PF); + } + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the SHLD instruction and side effects. +****************************************************************************/ +u16 shld_word (u16 d, u16 fill, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 16) { + cnt = s % 16; + if (cnt > 0) { + res = (d << cnt) | (fill >> (16-cnt)); + cf = d & (1 << (16 - cnt)); + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_16((u16)res); + } else { + res = d; + } + if (cnt == 1) { + CONDITIONAL_SET_FLAG((((res & 0x8000) == 0x8000) ^ + (ACCESS_FLAG(F_CF) != 0)), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CONDITIONAL_SET_FLAG((d << (s-1)) & 0x8000, F_CF); + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_PF); + SET_FLAG(F_ZF); + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the SHLD instruction and side effects. +****************************************************************************/ +u32 shld_long (u32 d, u32 fill, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 32) { + cnt = s % 32; + if (cnt > 0) { + res = (d << cnt) | (fill >> (32-cnt)); + cf = d & (1 << (32 - cnt)); + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_32((u32)res); + } else { + res = d; + } + if (cnt == 1) { + CONDITIONAL_SET_FLAG((((res & 0x80000000) == 0x80000000) ^ + (ACCESS_FLAG(F_CF) != 0)), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CONDITIONAL_SET_FLAG((d << (s-1)) & 0x80000000, F_CF); + CLEAR_FLAG(F_OF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_PF); + SET_FLAG(F_ZF); + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the SHRD instruction and side effects. +****************************************************************************/ +u16 shrd_word (u16 d, u16 fill, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 16) { + cnt = s % 16; + if (cnt > 0) { + cf = d & (1 << (cnt - 1)); + res = (d >> cnt) | (fill << (16 - cnt)); + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_16((u16)res); + } else { + res = d; + } + + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(res >> 14), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + SET_FLAG(F_ZF); + CLEAR_FLAG(F_SF); + CLEAR_FLAG(F_PF); + } + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the SHRD instruction and side effects. +****************************************************************************/ +u32 shrd_long (u32 d, u32 fill, u8 s) +{ + unsigned int cnt, res, cf; + + if (s < 32) { + cnt = s % 32; + if (cnt > 0) { + cf = d & (1 << (cnt - 1)); + res = (d >> cnt) | (fill << (32 - cnt)); + CONDITIONAL_SET_FLAG(cf, F_CF); + set_szp_flags_32((u32)res); + } else { + res = d; + } + if (cnt == 1) { + CONDITIONAL_SET_FLAG(XOR2(res >> 30), F_OF); + } else { + CLEAR_FLAG(F_OF); + } + } else { + res = 0; + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + SET_FLAG(F_ZF); + CLEAR_FLAG(F_SF); + CLEAR_FLAG(F_PF); + } + return res; +} + +/**************************************************************************** +REMARKS: +Implements the SBB instruction and side effects. +****************************************************************************/ +u8 sbb_byte(u8 d, u8 s) +{ + u32 res; /* all operands in native machine order */ + u32 bc; + + if (ACCESS_FLAG(F_CF)) + res = d - s - 1; + else + res = d - s; + set_szp_flags_8((u8)res); + + /* calculate the borrow chain. See note at top */ + bc = (res & (~d | s)) | (~d & s); + CONDITIONAL_SET_FLAG(bc & 0x80, F_CF); + CONDITIONAL_SET_FLAG(XOR2(bc >> 6), F_OF); + CONDITIONAL_SET_FLAG(bc & 0x8, F_AF); + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the SBB instruction and side effects. +****************************************************************************/ +u16 sbb_word(u16 d, u16 s) +{ + u32 res; /* all operands in native machine order */ + u32 bc; + + if (ACCESS_FLAG(F_CF)) + res = d - s - 1; + else + res = d - s; + set_szp_flags_16((u16)res); + + /* calculate the borrow chain. See note at top */ + bc = (res & (~d | s)) | (~d & s); + CONDITIONAL_SET_FLAG(bc & 0x8000, F_CF); + CONDITIONAL_SET_FLAG(XOR2(bc >> 14), F_OF); + CONDITIONAL_SET_FLAG(bc & 0x8, F_AF); + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the SBB instruction and side effects. +****************************************************************************/ +u32 sbb_long(u32 d, u32 s) +{ + u32 res; /* all operands in native machine order */ + u32 bc; + + if (ACCESS_FLAG(F_CF)) + res = d - s - 1; + else + res = d - s; + + set_szp_flags_32(res); + + /* calculate the borrow chain. See note at top */ + bc = (res & (~d | s)) | (~d & s); + CONDITIONAL_SET_FLAG(bc & 0x80000000, F_CF); + CONDITIONAL_SET_FLAG(XOR2(bc >> 30), F_OF); + CONDITIONAL_SET_FLAG(bc & 0x8, F_AF); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the SUB instruction and side effects. +****************************************************************************/ +u8 sub_byte(u8 d, u8 s) +{ + u32 res; /* all operands in native machine order */ + u32 bc; + + res = d - s; + set_szp_flags_8((u8)res); + + /* calculate the borrow chain. See note at top */ + bc = (res & (~d | s)) | (~d & s); + CONDITIONAL_SET_FLAG(bc & 0x80, F_CF); + CONDITIONAL_SET_FLAG(XOR2(bc >> 6), F_OF); + CONDITIONAL_SET_FLAG(bc & 0x8, F_AF); + return (u8)res; +} + +/**************************************************************************** +REMARKS: +Implements the SUB instruction and side effects. +****************************************************************************/ +u16 sub_word(u16 d, u16 s) +{ + u32 res; /* all operands in native machine order */ + u32 bc; + + res = d - s; + set_szp_flags_16((u16)res); + + /* calculate the borrow chain. See note at top */ + bc = (res & (~d | s)) | (~d & s); + CONDITIONAL_SET_FLAG(bc & 0x8000, F_CF); + CONDITIONAL_SET_FLAG(XOR2(bc >> 14), F_OF); + CONDITIONAL_SET_FLAG(bc & 0x8, F_AF); + return (u16)res; +} + +/**************************************************************************** +REMARKS: +Implements the SUB instruction and side effects. +****************************************************************************/ +u32 sub_long(u32 d, u32 s) +{ + u32 res; /* all operands in native machine order */ + u32 bc; + + res = d - s; + set_szp_flags_32(res); + + /* calculate the borrow chain. See note at top */ + bc = (res & (~d | s)) | (~d & s); + CONDITIONAL_SET_FLAG(bc & 0x80000000, F_CF); + CONDITIONAL_SET_FLAG(XOR2(bc >> 30), F_OF); + CONDITIONAL_SET_FLAG(bc & 0x8, F_AF); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the TEST instruction and side effects. +****************************************************************************/ +void test_byte(u8 d, u8 s) +{ + u32 res; /* all operands in native machine order */ + + res = d & s; + + CLEAR_FLAG(F_OF); + set_szp_flags_8((u8)res); + /* AF == dont care */ + CLEAR_FLAG(F_CF); +} + +/**************************************************************************** +REMARKS: +Implements the TEST instruction and side effects. +****************************************************************************/ +void test_word(u16 d, u16 s) +{ + u32 res; /* all operands in native machine order */ + + res = d & s; + + CLEAR_FLAG(F_OF); + set_szp_flags_16((u16)res); + /* AF == dont care */ + CLEAR_FLAG(F_CF); +} + +/**************************************************************************** +REMARKS: +Implements the TEST instruction and side effects. +****************************************************************************/ +void test_long(u32 d, u32 s) +{ + u32 res; /* all operands in native machine order */ + + res = d & s; + + CLEAR_FLAG(F_OF); + set_szp_flags_32(res); + /* AF == dont care */ + CLEAR_FLAG(F_CF); +} + +/**************************************************************************** +REMARKS: +Implements the XOR instruction and side effects. +****************************************************************************/ +u8 xor_byte(u8 d, u8 s) +{ + u8 res; /* all operands in native machine order */ + + res = d ^ s; + no_carry_byte_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the XOR instruction and side effects. +****************************************************************************/ +u16 xor_word(u16 d, u16 s) +{ + u16 res; /* all operands in native machine order */ + + res = d ^ s; + no_carry_word_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the XOR instruction and side effects. +****************************************************************************/ +u32 xor_long(u32 d, u32 s) +{ + u32 res; /* all operands in native machine order */ + + res = d ^ s; + no_carry_long_side_eff(res); + return res; +} + +/**************************************************************************** +REMARKS: +Implements the IMUL instruction and side effects. +****************************************************************************/ +void imul_byte(u8 s) +{ + s16 res = (s16)((s8)M.x86.R_AL * (s8)s); + + M.x86.R_AX = res; + if (((M.x86.R_AL & 0x80) == 0 && M.x86.R_AH == 0x00) || + ((M.x86.R_AL & 0x80) != 0 && M.x86.R_AH == 0xFF)) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } +} + +/**************************************************************************** +REMARKS: +Implements the IMUL instruction and side effects. +****************************************************************************/ +void imul_word(u16 s) +{ + s32 res = (s16)M.x86.R_AX * (s16)s; + + M.x86.R_AX = (u16)res; + M.x86.R_DX = (u16)(res >> 16); + if (((M.x86.R_AX & 0x8000) == 0 && M.x86.R_DX == 0x0000) || + ((M.x86.R_AX & 0x8000) != 0 && M.x86.R_DX == 0xFFFF)) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } +} + +/**************************************************************************** +REMARKS: +Implements the IMUL instruction and side effects. +****************************************************************************/ +void imul_long_direct(u32 *res_lo, u32* res_hi,u32 d, u32 s) +{ +#ifdef __HAS_LONG_LONG__ + s64 res = (s32)d * (s32)s; + + *res_lo = (u32)res; + *res_hi = (u32)(res >> 32); +#else + u32 d_lo,d_hi,d_sign; + u32 s_lo,s_hi,s_sign; + u32 rlo_lo,rlo_hi,rhi_lo; + + if ((d_sign = d & 0x80000000) != 0) + d = -d; + d_lo = d & 0xFFFF; + d_hi = d >> 16; + if ((s_sign = s & 0x80000000) != 0) + s = -s; + s_lo = s & 0xFFFF; + s_hi = s >> 16; + rlo_lo = d_lo * s_lo; + rlo_hi = (d_hi * s_lo + d_lo * s_hi) + (rlo_lo >> 16); + rhi_lo = d_hi * s_hi + (rlo_hi >> 16); + *res_lo = (rlo_hi << 16) | (rlo_lo & 0xFFFF); + *res_hi = rhi_lo; + if (d_sign != s_sign) { + d = ~*res_lo; + s = (((d & 0xFFFF) + 1) >> 16) + (d >> 16); + *res_lo = ~*res_lo+1; + *res_hi = ~*res_hi+(s >> 16); + } +#endif +} + +/**************************************************************************** +REMARKS: +Implements the IMUL instruction and side effects. +****************************************************************************/ +void imul_long(u32 s) +{ + imul_long_direct(&M.x86.R_EAX,&M.x86.R_EDX,M.x86.R_EAX,s); + if (((M.x86.R_EAX & 0x80000000) == 0 && M.x86.R_EDX == 0x00000000) || + ((M.x86.R_EAX & 0x80000000) != 0 && M.x86.R_EDX == 0xFFFFFFFF)) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } +} + +/**************************************************************************** +REMARKS: +Implements the MUL instruction and side effects. +****************************************************************************/ +void mul_byte(u8 s) +{ + u16 res = (u16)(M.x86.R_AL * s); + + M.x86.R_AX = res; + if (M.x86.R_AH == 0) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } +} + +/**************************************************************************** +REMARKS: +Implements the MUL instruction and side effects. +****************************************************************************/ +void mul_word(u16 s) +{ + u32 res = M.x86.R_AX * s; + + M.x86.R_AX = (u16)res; + M.x86.R_DX = (u16)(res >> 16); + if (M.x86.R_DX == 0) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } +} + +/**************************************************************************** +REMARKS: +Implements the MUL instruction and side effects. +****************************************************************************/ +void mul_long(u32 s) +{ +#ifdef __HAS_LONG_LONG__ + u64 res = (u32)M.x86.R_EAX * (u32)s; + + M.x86.R_EAX = (u32)res; + M.x86.R_EDX = (u32)(res >> 32); +#else + u32 a,a_lo,a_hi; + u32 s_lo,s_hi; + u32 rlo_lo,rlo_hi,rhi_lo; + + a = M.x86.R_EAX; + a_lo = a & 0xFFFF; + a_hi = a >> 16; + s_lo = s & 0xFFFF; + s_hi = s >> 16; + rlo_lo = a_lo * s_lo; + rlo_hi = (a_hi * s_lo + a_lo * s_hi) + (rlo_lo >> 16); + rhi_lo = a_hi * s_hi + (rlo_hi >> 16); + M.x86.R_EAX = (rlo_hi << 16) | (rlo_lo & 0xFFFF); + M.x86.R_EDX = rhi_lo; +#endif + if (M.x86.R_EDX == 0) { + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_OF); + } else { + SET_FLAG(F_CF); + SET_FLAG(F_OF); + } +} + +/**************************************************************************** +REMARKS: +Implements the IDIV instruction and side effects. +****************************************************************************/ +void idiv_byte(u8 s) +{ + s32 dvd, div, mod; + + dvd = (s16)M.x86.R_AX; + if (s == 0) { + x86emu_intr_raise(0); + return; + } + div = dvd / (s8)s; + mod = dvd % (s8)s; + if (abs(div) > 0x7f) { + x86emu_intr_raise(0); + return; + } + M.x86.R_AL = (s8) div; + M.x86.R_AH = (s8) mod; +} + +/**************************************************************************** +REMARKS: +Implements the IDIV instruction and side effects. +****************************************************************************/ +void idiv_word(u16 s) +{ + s32 dvd, div, mod; + + dvd = (((s32)M.x86.R_DX) << 16) | M.x86.R_AX; + if (s == 0) { + x86emu_intr_raise(0); + return; + } + div = dvd / (s16)s; + mod = dvd % (s16)s; + if (abs(div) > 0x7fff) { + x86emu_intr_raise(0); + return; + } + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_SF); + CONDITIONAL_SET_FLAG(div == 0, F_ZF); + set_parity_flag(mod); + + M.x86.R_AX = (u16)div; + M.x86.R_DX = (u16)mod; +} + +/**************************************************************************** +REMARKS: +Implements the IDIV instruction and side effects. +****************************************************************************/ +void idiv_long(u32 s) +{ +#ifdef __HAS_LONG_LONG__ + s64 dvd, div, mod; + + dvd = (((s64)M.x86.R_EDX) << 32) | M.x86.R_EAX; + if (s == 0) { + x86emu_intr_raise(0); + return; + } + div = dvd / (s32)s; + mod = dvd % (s32)s; + if (abs(div) > 0x7fffffff) { + x86emu_intr_raise(0); + return; + } +#else + s32 div = 0, mod; + s32 h_dvd = M.x86.R_EDX; + u32 l_dvd = M.x86.R_EAX; + u32 abs_s = s & 0x7FFFFFFF; + u32 abs_h_dvd = h_dvd & 0x7FFFFFFF; + u32 h_s = abs_s >> 1; + u32 l_s = abs_s << 31; + int counter = 31; + int carry; + + if (s == 0) { + x86emu_intr_raise(0); + return; + } + do { + div <<= 1; + carry = (l_dvd >= l_s) ? 0 : 1; + + if (abs_h_dvd < (h_s + carry)) { + h_s >>= 1; + l_s = abs_s << (--counter); + continue; + } else { + abs_h_dvd -= (h_s + carry); + l_dvd = carry ? ((0xFFFFFFFF - l_s) + l_dvd + 1) + : (l_dvd - l_s); + h_s >>= 1; + l_s = abs_s << (--counter); + div |= 1; + continue; + } + + } while (counter > -1); + /* overflow */ + if (abs_h_dvd || (l_dvd > abs_s)) { + x86emu_intr_raise(0); + return; + } + /* sign */ + div |= ((h_dvd & 0x10000000) ^ (s & 0x10000000)); + mod = l_dvd; + +#endif + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_AF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_ZF); + set_parity_flag(mod); + + M.x86.R_EAX = (u32)div; + M.x86.R_EDX = (u32)mod; +} + +/**************************************************************************** +REMARKS: +Implements the DIV instruction and side effects. +****************************************************************************/ +void div_byte(u8 s) +{ + u32 dvd, div, mod; + + dvd = M.x86.R_AX; + if (s == 0) { + x86emu_intr_raise(0); + return; + } + div = dvd / (u8)s; + mod = dvd % (u8)s; + if (abs(div) > 0xff) { + x86emu_intr_raise(0); + return; + } + M.x86.R_AL = (u8)div; + M.x86.R_AH = (u8)mod; +} + +/**************************************************************************** +REMARKS: +Implements the DIV instruction and side effects. +****************************************************************************/ +void div_word(u16 s) +{ + u32 dvd, div, mod; + + dvd = (((u32)M.x86.R_DX) << 16) | M.x86.R_AX; + if (s == 0) { + x86emu_intr_raise(0); + return; + } + div = dvd / (u16)s; + mod = dvd % (u16)s; + if (abs(div) > 0xffff) { + x86emu_intr_raise(0); + return; + } + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_SF); + CONDITIONAL_SET_FLAG(div == 0, F_ZF); + set_parity_flag(mod); + + M.x86.R_AX = (u16)div; + M.x86.R_DX = (u16)mod; +} + +/**************************************************************************** +REMARKS: +Implements the DIV instruction and side effects. +****************************************************************************/ +void div_long(u32 s) +{ +#ifdef __HAS_LONG_LONG__ + u64 dvd, div, mod; + + dvd = (((u64)M.x86.R_EDX) << 32) | M.x86.R_EAX; + if (s == 0) { + x86emu_intr_raise(0); + return; + } + div = dvd / (u32)s; + mod = dvd % (u32)s; + if (abs(div) > 0xffffffff) { + x86emu_intr_raise(0); + return; + } +#else + s32 div = 0, mod; + s32 h_dvd = M.x86.R_EDX; + u32 l_dvd = M.x86.R_EAX; + + u32 h_s = s; + u32 l_s = 0; + int counter = 32; + int carry; + + if (s == 0) { + x86emu_intr_raise(0); + return; + } + do { + div <<= 1; + carry = (l_dvd >= l_s) ? 0 : 1; + + if (h_dvd < (h_s + carry)) { + h_s >>= 1; + l_s = s << (--counter); + continue; + } else { + h_dvd -= (h_s + carry); + l_dvd = carry ? ((0xFFFFFFFF - l_s) + l_dvd + 1) + : (l_dvd - l_s); + h_s >>= 1; + l_s = s << (--counter); + div |= 1; + continue; + } + + } while (counter > -1); + /* overflow */ + if (h_dvd || (l_dvd > s)) { + x86emu_intr_raise(0); + return; + } + mod = l_dvd; +#endif + CLEAR_FLAG(F_CF); + CLEAR_FLAG(F_AF); + CLEAR_FLAG(F_SF); + SET_FLAG(F_ZF); + set_parity_flag(mod); + + M.x86.R_EAX = (u32)div; + M.x86.R_EDX = (u32)mod; +} + +/**************************************************************************** +REMARKS: +Implements the IN string instruction and side effects. +****************************************************************************/ + +static void single_in(int size) +{ + if(size == 1) + store_data_byte_abs(M.x86.R_ES, M.x86.R_DI,(*sys_inb)(M.x86.R_DX)); + else if (size == 2) + store_data_word_abs(M.x86.R_ES, M.x86.R_DI,(*sys_inw)(M.x86.R_DX)); + else + store_data_long_abs(M.x86.R_ES, M.x86.R_DI,(*sys_inl)(M.x86.R_DX)); +} + +void ins(int size) +{ + int inc = size; + + if (ACCESS_FLAG(F_DF)) { + inc = -size; + } + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* in until CX is ZERO. */ + u32 count = ((M.x86.mode & SYSMODE_PREFIX_DATA) ? + M.x86.R_ECX : M.x86.R_CX); + + while (count--) { + single_in(size); + M.x86.R_DI += inc; + } + M.x86.R_CX = 0; + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_ECX = 0; + } + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } else { + single_in(size); + M.x86.R_DI += inc; + } +} + +/**************************************************************************** +REMARKS: +Implements the OUT string instruction and side effects. +****************************************************************************/ + +static void single_out(int size) +{ + if(size == 1) + (*sys_outb)(M.x86.R_DX,fetch_data_byte_abs(M.x86.R_ES, M.x86.R_SI)); + else if (size == 2) + (*sys_outw)(M.x86.R_DX,fetch_data_word_abs(M.x86.R_ES, M.x86.R_SI)); + else + (*sys_outl)(M.x86.R_DX,fetch_data_long_abs(M.x86.R_ES, M.x86.R_SI)); +} + +void outs(int size) +{ + int inc = size; + + if (ACCESS_FLAG(F_DF)) { + inc = -size; + } + if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { + /* dont care whether REPE or REPNE */ + /* out until CX is ZERO. */ + u32 count = ((M.x86.mode & SYSMODE_PREFIX_DATA) ? + M.x86.R_ECX : M.x86.R_CX); + while (count--) { + single_out(size); + M.x86.R_SI += inc; + } + M.x86.R_CX = 0; + if (M.x86.mode & SYSMODE_PREFIX_DATA) { + M.x86.R_ECX = 0; + } + M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); + } else { + single_out(size); + M.x86.R_SI += inc; + } +} + +/**************************************************************************** +PARAMETERS: +addr - Address to fetch word from + +REMARKS: +Fetches a word from emulator memory using an absolute address. +****************************************************************************/ +u16 mem_access_word(int addr) +{ +DB( if (CHECK_MEM_ACCESS()) + x86emu_check_mem_access(addr);) + return (*sys_rdw)(addr); +} + +/**************************************************************************** +REMARKS: +Pushes a word onto the stack. + +NOTE: Do not inline this, as (*sys_wrX) is already inline! +****************************************************************************/ +void push_word(u16 w) +{ +DB( if (CHECK_SP_ACCESS()) + x86emu_check_sp_access();) + M.x86.R_SP -= 2; + (*sys_wrw)(((u32)M.x86.R_SS << 4) + M.x86.R_SP, w); +} + +/**************************************************************************** +REMARKS: +Pushes a long onto the stack. + +NOTE: Do not inline this, as (*sys_wrX) is already inline! +****************************************************************************/ +void push_long(u32 w) +{ +DB( if (CHECK_SP_ACCESS()) + x86emu_check_sp_access();) + M.x86.R_SP -= 4; + (*sys_wrl)(((u32)M.x86.R_SS << 4) + M.x86.R_SP, w); +} + +/**************************************************************************** +REMARKS: +Pops a word from the stack. + +NOTE: Do not inline this, as (*sys_rdX) is already inline! +****************************************************************************/ +u16 pop_word(void) +{ + u16 res; + +DB( if (CHECK_SP_ACCESS()) + x86emu_check_sp_access();) + res = (*sys_rdw)(((u32)M.x86.R_SS << 4) + M.x86.R_SP); + M.x86.R_SP += 2; + return res; +} + +/**************************************************************************** +REMARKS: +Pops a long from the stack. + +NOTE: Do not inline this, as (*sys_rdX) is already inline! +****************************************************************************/ +u32 pop_long(void) +{ + u32 res; + +DB( if (CHECK_SP_ACCESS()) + x86emu_check_sp_access();) + res = (*sys_rdl)(((u32)M.x86.R_SS << 4) + M.x86.R_SP); + M.x86.R_SP += 4; + return res; +} + diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/sys.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/sys.c new file mode 100644 index 000000000000..9e5d31d6fee7 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/sys.c @@ -0,0 +1,646 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1996-1999 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: This file includes subroutines which are related to +* programmed I/O and memory access. Included in this module +* are default functions with limited usefulness. For real +* uses these functions will most likely be overriden by the +* user library. +* +****************************************************************************/ +/* $XFree86: xc/extras/x86emu/src/x86emu/sys.c,v 1.5 2000/08/23 22:10:01 tsi Exp $ */ + +#include "x86emu.h" +#include "x86emu/regs.h" +#include "x86emu/debug.h" +#include "x86emu/prim_ops.h" +#include "pci.h" + +//#include +//#ifdef IN_MODULE +//#include "xf86_ansic.h" +//#else +//#include +//#endif +/*------------------------- Global Variables ------------------------------*/ + +X86EMU_sysEnv _X86EMU_env; /* Global emulator machine state */ +X86EMU_intrFuncs _X86EMU_intrTab[256]; + +/*----------------------------- Implementation ----------------------------*/ +#if defined(__alpha__) || defined(__alpha) +/* to cope with broken egcs-1.1.2 :-(((( */ + +/* + * inline functions to do unaligned accesses + * from linux/include/asm-alpha/unaligned.h + */ + +/* + * EGCS 1.1 knows about arbitrary unaligned loads. Define some + * packed structures to talk about such things with. + */ + +#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91 +struct __una_u64 { + unsigned long x __attribute__ ((packed)); +}; +struct __una_u32 { + unsigned int x __attribute__ ((packed)); +}; +struct __una_u16 { + unsigned short x __attribute__ ((packed)); +}; +#endif + +static __inline__ unsigned long ldq_u(unsigned long *r11) +{ +#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91 + const struct __una_u64 *ptr = (const struct __una_u64 *) r11; + return ptr->x; +#else + unsigned long r1, r2; + __asm__("ldq_u %0,%3\n\t" "ldq_u %1,%4\n\t" "extql %0,%2,%0\n\t" "extqh %1,%2,%1":"=&r"(r1), + "=&r" + (r2) + : "r"(r11), "m"(*r11), + "m"(*(const unsigned long *) (7 + (char *) r11))); + return r1 | r2; +#endif +} + +static __inline__ unsigned long ldl_u(unsigned int *r11) +{ +#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91 + const struct __una_u32 *ptr = (const struct __una_u32 *) r11; + return ptr->x; +#else + unsigned long r1, r2; + __asm__("ldq_u %0,%3\n\t" "ldq_u %1,%4\n\t" "extll %0,%2,%0\n\t" "extlh %1,%2,%1":"=&r"(r1), + "=&r" + (r2) + : "r"(r11), "m"(*r11), + "m"(*(const unsigned long *) (3 + (char *) r11))); + return r1 | r2; +#endif +} + +static __inline__ unsigned long ldw_u(unsigned short *r11) +{ +#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91 + const struct __una_u16 *ptr = (const struct __una_u16 *) r11; + return ptr->x; +#else + unsigned long r1, r2; + __asm__("ldq_u %0,%3\n\t" "ldq_u %1,%4\n\t" "extwl %0,%2,%0\n\t" "extwh %1,%2,%1":"=&r"(r1), + "=&r" + (r2) + : "r"(r11), "m"(*r11), + "m"(*(const unsigned long *) (1 + (char *) r11))); + return r1 | r2; +#endif +} + +/* + * Elemental unaligned stores + */ + +static __inline__ void stq_u(unsigned long r5, unsigned long *r11) +{ +#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91 + struct __una_u64 *ptr = (struct __una_u64 *) r11; + ptr->x = r5; +#else + unsigned long r1, r2, r3, r4; + + __asm__("ldq_u %3,%1\n\t" "ldq_u %2,%0\n\t" "insqh %6,%7,%5\n\t" "insql %6,%7,%4\n\t" "mskqh %3,%7,%3\n\t" "mskql %2,%7,%2\n\t" "bis %3,%5,%3\n\t" "bis %2,%4,%2\n\t" "stq_u %3,%1\n\t" "stq_u %2,%0":"=m"(*r11), + "=m"(*(unsigned long *) (7 + (char *) r11)), + "=&r"(r1), "=&r"(r2), "=&r"(r3), "=&r"(r4) + : "r"(r5), "r"(r11)); +#endif +} + +static __inline__ void stl_u(unsigned long r5, unsigned int *r11) +{ +#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91 + struct __una_u32 *ptr = (struct __una_u32 *) r11; + ptr->x = r5; +#else + unsigned long r1, r2, r3, r4; + + __asm__("ldq_u %3,%1\n\t" "ldq_u %2,%0\n\t" "inslh %6,%7,%5\n\t" "insll %6,%7,%4\n\t" "msklh %3,%7,%3\n\t" "mskll %2,%7,%2\n\t" "bis %3,%5,%3\n\t" "bis %2,%4,%2\n\t" "stq_u %3,%1\n\t" "stq_u %2,%0":"=m"(*r11), + "=m"(*(unsigned long *) (3 + (char *) r11)), + "=&r"(r1), "=&r"(r2), "=&r"(r3), "=&r"(r4) + : "r"(r5), "r"(r11)); +#endif +} + +static __inline__ void stw_u(unsigned long r5, unsigned short *r11) +{ +#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91 + struct __una_u16 *ptr = (struct __una_u16 *) r11; + ptr->x = r5; +#else + unsigned long r1, r2, r3, r4; + + __asm__("ldq_u %3,%1\n\t" "ldq_u %2,%0\n\t" "inswh %6,%7,%5\n\t" "inswl %6,%7,%4\n\t" "mskwh %3,%7,%3\n\t" "mskwl %2,%7,%2\n\t" "bis %3,%5,%3\n\t" "bis %2,%4,%2\n\t" "stq_u %3,%1\n\t" "stq_u %2,%0":"=m"(*r11), + "=m"(*(unsigned long *) (1 + (char *) r11)), + "=&r"(r1), "=&r"(r2), "=&r"(r3), "=&r"(r4) + : "r"(r5), "r"(r11)); +#endif +} +#endif + +/* compute a pointer. This replaces code scattered all over the place! */ +u8 *mem_ptr(u32 addr, int size) +{ + u8 *retaddr = 0; + + if (addr > M.mem_size - size) { + DB(rom_printk("mem_ptr: address %x out of range!\n", addr); + ) + HALT_SYS(); + } + /* a or b segment? */ + /* & with e to clear low-order bit, if it is a or b it will be a */ + if (((addr & 0xfffe0000) == 0xa0000) && M.abseg) { + //rom_printk("It's a0000\n"); + addr &= ~0xfffe0000; + retaddr = (u8 *) (M.abseg + addr); + //rom_printk("retaddr now 0x%px\n", retaddr); + } else if (addr < 0x200) { +// rom_printk("addr 0x%x updating int vector 0x%x\n", +// addr, addr >> 2); + retaddr = (u8 *) (M.mem_base + addr); + } else { + retaddr = (u8 *) (M.mem_base + addr); + } + return retaddr; +} + +/**************************************************************************** +PARAMETERS: +addr - Emulator memory address to read + +RETURNS: +Byte value read from emulator memory. + +REMARKS: +Reads a byte value from the emulator memory. +****************************************************************************/ +u8 X86API rdb(u32 addr) +{ + u8 val; + u8 *ptr; + + ptr = mem_ptr(addr, 1); + + val = *ptr; + DB(if (DEBUG_MEM_TRACE()) + rom_printk("%x 1 -> %x\n", addr, val);) + return val; +} + +/**************************************************************************** +PARAMETERS: +addr - Emulator memory address to read + +RETURNS: +Word value read from emulator memory. + +REMARKS: +Reads a word value from the emulator memory. +****************************************************************************/ +u16 X86API rdw(u32 addr) +{ + u16 val = 0; + u8 *ptr; + + ptr = mem_ptr(addr, 2); + + if (addr > M.mem_size - 2) { + DB(rom_printk("mem_read: address %x out of range!\n", (unsigned long) addr); + ) + HALT_SYS(); + } +#ifdef __BIG_ENDIAN__ + if (addr & 0x1) { + val = (*ptr | (*(ptr + 1) << 8)); + } else +#endif +#if defined(__alpha__) || defined(__alpha) + val = ldw_u((u16 *) (ptr)); +#else + val = *(u16 *) (ptr); +#endif + DB(if (DEBUG_MEM_TRACE()) + rom_printk("%x 2 -> %x\n", addr, val);) + + return val; +} + +/**************************************************************************** +PARAMETERS: +addr - Emulator memory address to read + +RETURNS: +Long value read from emulator memory. +REMARKS: +Reads a long value from the emulator memory. +****************************************************************************/ +u32 X86API rdl(u32 addr) +{ + u32 val = 0; + u8 *ptr; + + ptr = mem_ptr(addr, 4); + +#ifdef __BIG_ENDIAN__ + if (addr & 0x3) { + val = (*(u8 *) (ptr + 0) | + (*(u8 *) (ptr + 1) << 8) | + (*(u8 *) (ptr + 2) << 16) | (*(u8 *) (ptr + 3) << 24)); + } else +#endif +#if defined(__alpha__) || defined(__alpha) + val = ldl_u((u32 *) (ptr)); +#else + val = *(u32 *) (ptr); +#endif + DB(if (DEBUG_MEM_TRACE()) + rom_printk("%x 4 -> %x\n", addr, val);) + return val; +} + +/**************************************************************************** +PARAMETERS: +addr - Emulator memory address to read +val - Value to store + +REMARKS: +Writes a byte value to emulator memory. +****************************************************************************/ +void X86API wrb(u32 addr, u8 val) +{ + u8 *ptr; + + ptr = mem_ptr(addr, 1); + +// if (addr >= 0xc0000 && addr < 0xd0000 ) { +// rom_printk("WARNING! Attempt to overwrite ROM 0x%x\n", addr); +// } else { + + + DB(if (DEBUG_MEM_TRACE()) + rom_printk("%x 1 <- %x\n", addr, val);) + *(u8 *) (ptr) = val; +// } + +} + +/**************************************************************************** +PARAMETERS: +addr - Emulator memory address to read +val - Value to store + +REMARKS: +Writes a word value to emulator memory. +****************************************************************************/ +void X86API wrw(u32 addr, u16 val) +{ + u8 *ptr; + + ptr = mem_ptr(addr, 2); + +// if (addr >= 0xc0000 && addr < 0xd0000 ) { +// rom_printk("WARNING! Attempt to overwrite ROM 0x%x\n", addr); +// } else { + + + DB(if (DEBUG_MEM_TRACE()) + rom_printk("%x 2 <- %x\n", addr, val);) +#ifdef __BIG_ENDIAN__ + if (addr & 0x1) { + *(u8 *) (ptr + 0) = (val >> 0) & 0xff; + *(u8 *) (ptr + 1) = (val >> 8) & 0xff; + } else +#endif +#if defined(__alpha__) || defined(__alpha) + stw_u(val, (u16 *) (ptr)); +#else + *(u16 *) (ptr) = val; +#endif + +// } +} + +/**************************************************************************** +PARAMETERS: +addr - Emulator memory address to read +val - Value to store + +REMARKS: +Writes a long value to emulator memory. +****************************************************************************/ +void X86API wrl(u32 addr, u32 val) +{ + u8 *ptr; + + ptr = mem_ptr(addr, 4); + +// if (addr >= 0xc0000 && addr < 0xd0000 ) { +// rom_printk("WARNING! Attempt to overwrite ROM 0x%x\n", addr); +// } else { + + DB(if (DEBUG_MEM_TRACE()) + rom_printk("%x 4 <- %x\n", addr, val);) +#ifdef __BIG_ENDIAN__ + if (addr & 0x1) { + *(u8 *) (ptr + 0) = (val >> 0) & 0xff; + *(u8 *) (ptr + 1) = (val >> 8) & 0xff; + *(u8 *) (ptr + 2) = (val >> 16) & 0xff; + *(u8 *) (ptr + 3) = (val >> 24) & 0xff; + } else +#endif +#if defined(__alpha__) || defined(__alpha) + stl_u(val, (u32 *) (ptr)); +#else + *(u32 *) (ptr) = val; +#endif +// } +} + +/**************************************************************************** +PARAMETERS: +addr - PIO address to read +RETURN: +0 +REMARKS: +Default PIO byte read function. Doesn't perform real inb. +****************************************************************************/ +static u8 X86API p_inb(X86EMU_pioAddr addr) +{ + DB(if (DEBUG_IO_TRACE()) + rom_printk("inb %x \n", addr);) +/* return 0;*/ + +#ifndef IN_MODULE + if (ioperm(0x3c0, 0xdf, 1) == -1) { + rom_printk("Permission not set on port 0x%x.\n", addr); + } +#endif + return bios_inb(addr); +} + +/**************************************************************************** +PARAMETERS: +addr - PIO address to read +RETURN: +0 +REMARKS: +Default PIO word read function. Doesn't perform real inw. +****************************************************************************/ +static u16 X86API p_inw(X86EMU_pioAddr addr) +{ + DB(if (DEBUG_IO_TRACE()) + rom_printk("inw %#04x \n", addr);) +/* return 0;*/ + +#ifndef IN_MODULE + if (ioperm(0x3c0, 0xdf, 1) == -1) { + rom_printk("Permission not set on port 0x%x.\n", addr); + } +#endif + return bios_inw(addr); +} + +/**************************************************************************** +PARAMETERS: +addr - PIO address to read +RETURN: +0 +REMARKS: +Default PIO long read function. Doesn't perform real inl. +****************************************************************************/ +static u32 X86API p_inl(X86EMU_pioAddr addr) +{ + DB(if (DEBUG_IO_TRACE()) + rom_printk("inl %#04x \n", addr);) +/* return 0;*/ + +#ifndef IN_MODULE + if (ioperm(0x3c0, 0xdf, 1) == -1) { + rom_printk("Permission not set on port 0x%x.\n", addr); + } +#endif + return bios_inl(addr); +} + +/**************************************************************************** +PARAMETERS: +addr - PIO address to write +val - Value to store +REMARKS: +Default PIO byte write function. Doesn't perform real outb. +****************************************************************************/ +static void X86API p_outb(X86EMU_pioAddr addr, u8 val) +{ + DB(if (DEBUG_IO_TRACE()) + rom_printk("outb %#02x -> %#04x \n", val, addr);) + +#ifndef IN_MODULE + if (ioperm(0x3c0, 0xdf, 1) == -1) { + rom_printk("Permission not set on port 0x%x.\n", addr); + } +#endif + bios_outb(val, addr); + return; +} + +/**************************************************************************** +PARAMETERS: +addr - PIO address to write +val - Value to store +REMARKS: +Default PIO word write function. Doesn't perform real outw. +****************************************************************************/ +static void X86API p_outw(X86EMU_pioAddr addr, u16 val) +{ + + DB(if (DEBUG_IO_TRACE()) + rom_printk("outw %#04x -> %#04x \n", val, addr);) +#ifndef IN_MODULE + if (ioperm(0x3c0, 0xdf, 1) == -1) { + rom_printk("Permission not set on port 0x%x.\n", addr); + } +#endif + bios_outw(val, addr); + return; +} + +/**************************************************************************** +PARAMETERS: +addr - PIO address to write +val - Value to store +REMARKS: +Default PIO ;ong write function. Doesn't perform real outl. +****************************************************************************/ +static void X86API p_outl(X86EMU_pioAddr addr, u32 val) +{ + DB(if (DEBUG_IO_TRACE()) + rom_printk("outl %#08x -> %#04x \n", val, addr);) + +#ifndef IN_MODULE + if (ioperm(0x3c0, 0xdf, 1) == -1) { + rom_printk("Permission not set on port 0x%x.\n", addr); + } +#endif + bios_outl(val, addr); + return; +} + +/*------------------------- Global Variables ------------------------------*/ + +u8(X86APIP sys_rdb) (u32 addr) = rdb; +u16(X86APIP sys_rdw) (u32 addr) = rdw; +u32(X86APIP sys_rdl) (u32 addr) = rdl; +void (X86APIP sys_wrb) (u32 addr, u8 val) = wrb; +void (X86APIP sys_wrw) (u32 addr, u16 val) = wrw; +void (X86APIP sys_wrl) (u32 addr, u32 val) = wrl; +u8(X86APIP sys_inb) (X86EMU_pioAddr addr) = p_inb; +u16(X86APIP sys_inw) (X86EMU_pioAddr addr) = p_inw; +u32(X86APIP sys_inl) (X86EMU_pioAddr addr) = p_inl; +void (X86APIP sys_outb) (X86EMU_pioAddr addr, u8 val) = p_outb; +void (X86APIP sys_outw) (X86EMU_pioAddr addr, u16 val) = p_outw; +void (X86APIP sys_outl) (X86EMU_pioAddr addr, u32 val) = p_outl; + +/*----------------------------- Setup -------------------------------------*/ + +/**************************************************************************** +PARAMETERS: +funcs - New memory function pointers to make active + +REMARKS: +This function is used to set the pointers to functions which access +memory space, allowing the user application to override these functions +and hook them out as necessary for their application. +****************************************************************************/ +void X86EMU_setupMemFuncs(X86EMU_memFuncs * funcs) +{ + sys_rdb = funcs->rdb; + sys_rdw = funcs->rdw; + sys_rdl = funcs->rdl; + sys_wrb = funcs->wrb; + sys_wrw = funcs->wrw; + sys_wrl = funcs->wrl; +} + +/**************************************************************************** +PARAMETERS: +funcs - New programmed I/O function pointers to make active + +REMARKS: +This function is used to set the pointers to functions which access +I/O space, allowing the user application to override these functions +and hook them out as necessary for their application. +****************************************************************************/ +void X86EMU_setupPioFuncs(X86EMU_pioFuncs * funcs) +{ + sys_inb = funcs->inb_func; + sys_inw = funcs->inw_func; + sys_inl = funcs->inl_func; + sys_outb = funcs->outb_func; + sys_outw = funcs->outw_func; + sys_outl = funcs->outl_func; +} + +/**************************************************************************** +PARAMETERS: +funcs - New interrupt vector table to make active + +REMARKS: +This function is used to set the pointers to functions which handle +interrupt processing in the emulator, allowing the user application to +hook interrupts as necessary for their application. Any interrupts that +are not hooked by the user application, and reflected and handled internally +in the emulator via the interrupt vector table. This allows the application +to get control when the code being emulated executes specific software +interrupts. +****************************************************************************/ +void X86EMU_setupIntrFuncs(X86EMU_intrFuncs funcs[]) +{ + int i; + + for (i = 0; i < 256; i++) + _X86EMU_intrTab[i] = NULL; + if (funcs) { + for (i = 0; i < 256; i++) + _X86EMU_intrTab[i] = funcs[i]; + } +} + +/**************************************************************************** +PARAMETERS: +int - New software interrupt to prepare for + +REMARKS: +This function is used to set up the emulator state to exceute a software +interrupt. This can be used by the user application code to allow an +interrupt to be hooked, examined and then reflected back to the emulator +so that the code in the emulator will continue processing the software +interrupt as per normal. This essentially allows system code to actively +hook and handle certain software interrupts as necessary. +****************************************************************************/ +void X86EMU_prepareForInt(int num) +{ + push_word((u16) M.x86.R_FLG); + CLEAR_FLAG(F_IF); + CLEAR_FLAG(F_TF); + push_word(M.x86.R_CS); + M.x86.R_CS = mem_access_word(num * 4 + 2); + push_word(M.x86.R_IP); + M.x86.R_IP = mem_access_word(num * 4); + M.x86.intr = 0; +} + +void X86EMU_setMemBase(void *base, unsigned int size) +{ + M.mem_base = (unsigned long) base; + M.mem_size = size; +} + +void X86EMU_setabseg(void *abseg) +{ + M.abseg = (unsigned long) abseg; +} diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/validate.c b/arch/e2k/boot/bios/video/x86emu/src/x86emu/validate.c new file mode 100644 index 000000000000..d7b62e5f8169 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/validate.c @@ -0,0 +1,789 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: Watcom C 10.6 or later +* Environment: 32-bit DOS +* Developer: Kendall Bennett +* +* Description: Program to validate the x86 emulator library for +* correctness. We run the emulator primitive operations +* functions against the real x86 CPU, and compare the result +* and flags to ensure correctness. +* +* We use inline assembler to compile and build this program. +* +****************************************************************************/ + +#include +#include +#include +#include +#include "x86emu.h" +#include "x86emu/prim_asm.h" +#include "x86emu/prim_ops.h" + +/*-------------------------- Implementation -------------------------------*/ + +#define true 1 +#define false 0 + +u32 cur_flags_mask = 0; + +int flags_are_different(u32 flags1, u32 flags2) +{ + return (flags1&cur_flags_mask) != (flags2&cur_flags_mask); +} + +#define ALL_FLAGS (F_CF | F_PF | F_AF | F_ZF | F_SF | F_OF) + +#define VAL_START_BINARY(parm_type,res_type,dmax,smax,dincr,sincr) \ +{ \ + parm_type d,s; \ + res_type r,r_asm; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < dmax; d += dincr) { \ + for (s = 0; s < smax; s += sincr) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { + +#define VAL_TEST_BINARY(name) \ + r_asm = name##_asm(&flags,d,s); \ + r = name(d,s); \ + if (r != r_asm || flags_are_different(M.x86.R_EFLG, flags)) \ + failed = true; \ + if (failed || trace) { + +#define VAL_TEST_BINARY_VOID(name) \ + name##_asm(&flags,d,s); \ + name(d,s); \ + r = r_asm = 0; \ + if (flags_are_different(M.x86.R_EFLG, flags)) \ + failed = true; \ + if (failed || trace) { + +#define VAL_FAIL_BYTE_BYTE_BINARY(name) \ + if (failed) \ + printk("fail\n"); \ + printk("0x%02X = %-15s(0x%02X,0x%02X), flags = %s -> %s\n", \ + r, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%02X = %-15s(0x%02X,0x%02X), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_FAIL_WORD_WORD_BINARY(name) \ + if (failed) \ + printk("fail\n"); \ + printk("0x%04X = %-15s(0x%04X,0x%04X), flags = %s -> %s\n", \ + r, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%04X = %-15s(0x%04X,0x%04X), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_FAIL_LONG_LONG_BINARY(name) \ + if (failed) \ + printk("fail\n"); \ + printk("0x%08X = %-15s(0x%08X,0x%08X), flags = %s -> %s\n", \ + r, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%08X = %-15s(0x%08X,0x%08X), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_END_BINARY() \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_BYTE_BYTE_BINARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u8,u8,0xFF,0xFF,1,1) \ + VAL_TEST_BINARY(name) \ + VAL_FAIL_BYTE_BYTE_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_WORD_WORD_BINARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u16,u16,0xFF00,0xFF00,0x100,0x100) \ + VAL_TEST_BINARY(name) \ + VAL_FAIL_WORD_WORD_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_LONG_LONG_BINARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u32,u32,0xFF000000,0xFF000000,0x1000000,0x1000000) \ + VAL_TEST_BINARY(name) \ + VAL_FAIL_LONG_LONG_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_VOID_BYTE_BINARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u8,u8,0xFF,0xFF,1,1) \ + VAL_TEST_BINARY_VOID(name) \ + VAL_FAIL_BYTE_BYTE_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_VOID_WORD_BINARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u16,u16,0xFF00,0xFF00,0x100,0x100) \ + VAL_TEST_BINARY_VOID(name) \ + VAL_FAIL_WORD_WORD_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_VOID_LONG_BINARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u32,u32,0xFF000000,0xFF000000,0x1000000,0x1000000) \ + VAL_TEST_BINARY_VOID(name) \ + VAL_FAIL_LONG_LONG_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_BYTE_ROTATE(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u8,u8,0xFF,8,1,1) \ + VAL_TEST_BINARY(name) \ + VAL_FAIL_BYTE_BYTE_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_WORD_ROTATE(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u16,u16,0xFF00,16,0x100,1) \ + VAL_TEST_BINARY(name) \ + VAL_FAIL_WORD_WORD_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_LONG_ROTATE(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_BINARY(u32,u32,0xFF000000,32,0x1000000,1) \ + VAL_TEST_BINARY(name) \ + VAL_FAIL_LONG_LONG_BINARY(name) \ + VAL_END_BINARY() + +#define VAL_START_TERNARY(parm_type,res_type,dmax,smax,dincr,sincr,maxshift)\ +{ \ + parm_type d,s; \ + res_type r,r_asm; \ + u8 shift; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < dmax; d += dincr) { \ + for (s = 0; s < smax; s += sincr) { \ + for (shift = 0; shift < maxshift; shift += 1) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { + +#define VAL_TEST_TERNARY(name) \ + r_asm = name##_asm(&flags,d,s,shift); \ + r = name(d,s,shift); \ + if (r != r_asm || flags_are_different(M.x86.R_EFLG, flags)) \ + failed = true; \ + if (failed || trace) { + +#define VAL_FAIL_WORD_WORD_TERNARY(name) \ + if (failed) \ + printk("fail\n"); \ + printk("0x%04X = %-15s(0x%04X,0x%04X,%d), flags = %s -> %s\n", \ + r, #name, d, s, shift, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%04X = %-15s(0x%04X,0x%04X,%d), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, s, shift, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_FAIL_LONG_LONG_TERNARY(name) \ + if (failed) \ + printk("fail\n"); \ + printk("0x%08X = %-15s(0x%08X,0x%08X,%d), flags = %s -> %s\n", \ + r, #name, d, s, shift, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%08X = %-15s(0x%08X,0x%08X,%d), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, s, shift, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_END_TERNARY() \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_WORD_ROTATE_DBL(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_TERNARY(u16,u16,0xFF00,0xFF00,0x100,0x100,16) \ + VAL_TEST_TERNARY(name) \ + VAL_FAIL_WORD_WORD_TERNARY(name) \ + VAL_END_TERNARY() + +#define VAL_LONG_ROTATE_DBL(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_TERNARY(u32,u32,0xFF000000,0xFF000000,0x1000000,0x1000000,32) \ + VAL_TEST_TERNARY(name) \ + VAL_FAIL_LONG_LONG_TERNARY(name) \ + VAL_END_TERNARY() + +#define VAL_START_UNARY(parm_type,max,incr) \ +{ \ + parm_type d,r,r_asm; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < max; d += incr) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { + +#define VAL_TEST_UNARY(name) \ + r_asm = name##_asm(&flags,d); \ + r = name(d); \ + if (r != r_asm || flags_are_different(M.x86.R_EFLG, flags)) { \ + failed = true; + +#define VAL_FAIL_BYTE_UNARY(name) \ + printk("fail\n"); \ + printk("0x%02X = %-15s(0x%02X), flags = %s -> %s\n", \ + r, #name, d, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%02X = %-15s(0x%02X), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_FAIL_WORD_UNARY(name) \ + printk("fail\n"); \ + printk("0x%04X = %-15s(0x%04X), flags = %s -> %s\n", \ + r, #name, d, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%04X = %-15s(0x%04X), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_FAIL_LONG_UNARY(name) \ + printk("fail\n"); \ + printk("0x%08X = %-15s(0x%08X), flags = %s -> %s\n", \ + r, #name, d, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%08X = %-15s(0x%08X), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, print_flags(buf1,inflags), print_flags(buf2,flags)); + +#define VAL_END_UNARY() \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | ALL_FLAGS; \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_BYTE_UNARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_UNARY(u8,0xFF,0x1) \ + VAL_TEST_UNARY(name) \ + VAL_FAIL_BYTE_UNARY(name) \ + VAL_END_UNARY() + +#define VAL_WORD_UNARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_UNARY(u16,0xFF00,0x100) \ + VAL_TEST_UNARY(name) \ + VAL_FAIL_WORD_UNARY(name) \ + VAL_END_UNARY() + +#define VAL_WORD_BYTE_UNARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_UNARY(u16,0xFF,0x1) \ + VAL_TEST_UNARY(name) \ + VAL_FAIL_WORD_UNARY(name) \ + VAL_END_UNARY() + +#define VAL_LONG_UNARY(name) \ + printk("Validating %s ... ", #name); \ + VAL_START_UNARY(u32,0xFF000000,0x1000000) \ + VAL_TEST_UNARY(name) \ + VAL_FAIL_LONG_UNARY(name) \ + VAL_END_UNARY() + +#define VAL_BYTE_MUL(name) \ + printk("Validating %s ... ", #name); \ +{ \ + u8 d,s; \ + u16 r,r_asm; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < 0xFF; d += 1) { \ + for (s = 0; s < 0xFF; s += 1) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { \ + name##_asm(&flags,&r_asm,d,s); \ + M.x86.R_AL = d; \ + name(s); \ + r = M.x86.R_AX; \ + if (r != r_asm || flags_are_different(M.x86.R_EFLG, flags)) \ + failed = true; \ + if (failed || trace) { \ + if (failed) \ + printk("fail\n"); \ + printk("0x%04X = %-15s(0x%02X,0x%02X), flags = %s -> %s\n", \ + r, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%04X = %-15s(0x%02X,0x%02X), flags = %s -> %s\n", \ + r_asm, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_WORD_MUL(name) \ + printk("Validating %s ... ", #name); \ +{ \ + u16 d,s; \ + u16 r_lo,r_asm_lo; \ + u16 r_hi,r_asm_hi; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < 0xFF00; d += 0x100) { \ + for (s = 0; s < 0xFF00; s += 0x100) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { \ + name##_asm(&flags,&r_asm_lo,&r_asm_hi,d,s); \ + M.x86.R_AX = d; \ + name(s); \ + r_lo = M.x86.R_AX; \ + r_hi = M.x86.R_DX; \ + if (r_lo != r_asm_lo || r_hi != r_asm_hi || flags_are_different(M.x86.R_EFLG, flags))\ + failed = true; \ + if (failed || trace) { \ + if (failed) \ + printk("fail\n"); \ + printk("0x%04X:0x%04X = %-15s(0x%04X,0x%04X), flags = %s -> %s\n", \ + r_hi,r_lo, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%04X:0x%04X = %-15s(0x%04X,0x%04X), flags = %s -> %s\n", \ + r_asm_hi,r_asm_lo, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_LONG_MUL(name) \ + printk("Validating %s ... ", #name); \ +{ \ + u32 d,s; \ + u32 r_lo,r_asm_lo; \ + u32 r_hi,r_asm_hi; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < 0xFF000000; d += 0x1000000) { \ + for (s = 0; s < 0xFF000000; s += 0x1000000) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { \ + name##_asm(&flags,&r_asm_lo,&r_asm_hi,d,s); \ + M.x86.R_EAX = d; \ + name(s); \ + r_lo = M.x86.R_EAX; \ + r_hi = M.x86.R_EDX; \ + if (r_lo != r_asm_lo || r_hi != r_asm_hi || flags_are_different(M.x86.R_EFLG, flags))\ + failed = true; \ + if (failed || trace) { \ + if (failed) \ + printk("fail\n"); \ + printk("0x%08X:0x%08X = %-15s(0x%08X,0x%08X), flags = %s -> %s\n", \ + r_hi,r_lo, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%08X:0x%08X = %-15s(0x%08X,0x%08X), flags = %s -> %s\n", \ + r_asm_hi,r_asm_lo, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_BYTE_DIV(name) \ + printk("Validating %s ... ", #name); \ +{ \ + u16 d,s; \ + u8 r_quot,r_rem,r_asm_quot,r_asm_rem; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < 0xFF00; d += 0x100) { \ + for (s = 1; s < 0xFF; s += 1) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { \ + M.x86.intr = 0; \ + M.x86.R_AX = d; \ + name(s); \ + r_quot = M.x86.R_AL; \ + r_rem = M.x86.R_AH; \ + if (M.x86.intr & INTR_SYNCH) \ + continue; \ + name##_asm(&flags,&r_asm_quot,&r_asm_rem,d,s); \ + if (r_quot != r_asm_quot || r_rem != r_asm_rem || flags_are_different(M.x86.R_EFLG, flags)) \ + failed = true; \ + if (failed || trace) { \ + if (failed) \ + printk("fail\n"); \ + printk("0x%02X:0x%02X = %-15s(0x%04X,0x%02X), flags = %s -> %s\n", \ + r_quot, r_rem, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%02X:0x%02X = %-15s(0x%04X,0x%02X), flags = %s -> %s\n", \ + r_asm_quot, r_asm_rem, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_WORD_DIV(name) \ + printk("Validating %s ... ", #name); \ +{ \ + u32 d,s; \ + u16 r_quot,r_rem,r_asm_quot,r_asm_rem; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < 0xFF000000; d += 0x1000000) { \ + for (s = 0x100; s < 0xFF00; s += 0x100) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { \ + M.x86.intr = 0; \ + M.x86.R_AX = d & 0xFFFF; \ + M.x86.R_DX = d >> 16; \ + name(s); \ + r_quot = M.x86.R_AX; \ + r_rem = M.x86.R_DX; \ + if (M.x86.intr & INTR_SYNCH) \ + continue; \ + name##_asm(&flags,&r_asm_quot,&r_asm_rem,d & 0xFFFF,d >> 16,s);\ + if (r_quot != r_asm_quot || r_rem != r_asm_rem || flags_are_different(M.x86.R_EFLG, flags)) \ + failed = true; \ + if (failed || trace) { \ + if (failed) \ + printk("fail\n"); \ + printk("0x%04X:0x%04X = %-15s(0x%08X,0x%04X), flags = %s -> %s\n", \ + r_quot, r_rem, #name, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%04X:0x%04X = %-15s(0x%08X,0x%04X), flags = %s -> %s\n", \ + r_asm_quot, r_asm_rem, #name"_asm", d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +#define VAL_LONG_DIV(name) \ + printk("Validating %s ... ", #name); \ +{ \ + u32 d,s; \ + u32 r_quot,r_rem,r_asm_quot,r_asm_rem; \ + u32 flags,inflags; \ + int f,failed = false; \ + char buf1[80],buf2[80]; \ + for (d = 0; d < 0xFF000000; d += 0x1000000) { \ + for (s = 0x100; s < 0xFF00; s += 0x100) { \ + M.x86.R_EFLG = inflags = flags = def_flags; \ + for (f = 0; f < 2; f++) { \ + M.x86.intr = 0; \ + M.x86.R_EAX = d; \ + M.x86.R_EDX = 0; \ + name(s); \ + r_quot = M.x86.R_EAX; \ + r_rem = M.x86.R_EDX; \ + if (M.x86.intr & INTR_SYNCH) \ + continue; \ + name##_asm(&flags,&r_asm_quot,&r_asm_rem,d,0,s); \ + if (r_quot != r_asm_quot || r_rem != r_asm_rem || flags_are_different(M.x86.R_EFLG, flags)) \ + failed = true; \ + if (failed || trace) { \ + if (failed) \ + printk("fail\n"); \ + printk("0x%08X:0x%08X = %-15s(0x%08X:0x%08X,0x%08X), flags = %s -> %s\n", \ + r_quot, r_rem, #name, 0, d, s, print_flags(buf1,inflags), print_flags(buf2,M.x86.R_EFLG)); \ + printk("0x%08X:0x%08X = %-15s(0x%08X:0x%08X,0x%08X), flags = %s -> %s\n", \ + r_asm_quot, r_asm_rem, #name"_asm", 0, d, s, print_flags(buf1,inflags), print_flags(buf2,flags)); \ + } \ + M.x86.R_EFLG = inflags = flags = def_flags | (ALL_FLAGS & ~F_OF); \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (failed) \ + break; \ + } \ + if (!failed) \ + printk("passed\n"); \ +} + +void printk(const char *fmt, ...) +{ + va_list argptr; + va_start(argptr, fmt); + vfprintf(stdout, fmt, argptr); + fflush(stdout); + va_end(argptr); +} + +char * print_flags(char *buf,ulong flags) +{ + char *separator = ""; + + buf[0] = 0; + if (flags & F_CF) { + strcat(buf,separator); + strcat(buf,"CF"); + separator = ","; + } + if (flags & F_PF) { + strcat(buf,separator); + strcat(buf,"PF"); + separator = ","; + } + if (flags & F_AF) { + strcat(buf,separator); + strcat(buf,"AF"); + separator = ","; + } + if (flags & F_ZF) { + strcat(buf,separator); + strcat(buf,"ZF"); + separator = ","; + } + if (flags & F_SF) { + strcat(buf,separator); + strcat(buf,"SF"); + separator = ","; + } + if (flags & F_OF) { + strcat(buf,separator); + strcat(buf,"OF"); + separator = ","; + } + if (separator[0] == 0) + strcpy(buf,"None"); + return buf; +} + +int main(int argc, char *argv[]) +{ + u32 def_flags; + int trace = false; + + if (argc > 1) + trace = true; + memset(&M, 0, sizeof(M)); + def_flags = get_flags_asm() & ~ALL_FLAGS; + + cur_flags_mask = F_AF | F_CF; + VAL_WORD_UNARY(aaa_word); + VAL_WORD_UNARY(aas_word); + + cur_flags_mask = F_SF | F_ZF | F_PF; + VAL_WORD_UNARY(aad_word); + VAL_WORD_UNARY(aam_word); + + cur_flags_mask = ALL_FLAGS; + VAL_BYTE_BYTE_BINARY(adc_byte); + VAL_WORD_WORD_BINARY(adc_word); + VAL_LONG_LONG_BINARY(adc_long); + + VAL_BYTE_BYTE_BINARY(add_byte); + VAL_WORD_WORD_BINARY(add_word); + VAL_LONG_LONG_BINARY(add_long); + + cur_flags_mask = ALL_FLAGS & (~F_AF); + VAL_BYTE_BYTE_BINARY(and_byte); + VAL_WORD_WORD_BINARY(and_word); + VAL_LONG_LONG_BINARY(and_long); + + cur_flags_mask = ALL_FLAGS; + VAL_BYTE_BYTE_BINARY(cmp_byte); + VAL_WORD_WORD_BINARY(cmp_word); + VAL_LONG_LONG_BINARY(cmp_long); + + cur_flags_mask = ALL_FLAGS & (~F_OF); + VAL_BYTE_UNARY(daa_byte); + VAL_BYTE_UNARY(das_byte); // Fails for 0x9A (out of range anyway) + + cur_flags_mask = ALL_FLAGS; + VAL_BYTE_UNARY(dec_byte); + VAL_WORD_UNARY(dec_word); + VAL_LONG_UNARY(dec_long); + + VAL_BYTE_UNARY(inc_byte); + VAL_WORD_UNARY(inc_word); + VAL_LONG_UNARY(inc_long); + + cur_flags_mask = ALL_FLAGS & (~F_AF); + VAL_BYTE_BYTE_BINARY(or_byte); + VAL_WORD_WORD_BINARY(or_word); + VAL_LONG_LONG_BINARY(or_long); + + cur_flags_mask = ALL_FLAGS; + VAL_BYTE_UNARY(neg_byte); + VAL_WORD_UNARY(neg_word); + VAL_LONG_UNARY(neg_long); + + VAL_BYTE_UNARY(not_byte); + VAL_WORD_UNARY(not_word); + VAL_LONG_UNARY(not_long); + + cur_flags_mask = ALL_FLAGS & (~F_OF); + VAL_BYTE_ROTATE(rcl_byte); + VAL_WORD_ROTATE(rcl_word); + VAL_LONG_ROTATE(rcl_long); + + VAL_BYTE_ROTATE(rcr_byte); + VAL_WORD_ROTATE(rcr_word); + VAL_LONG_ROTATE(rcr_long); + + VAL_BYTE_ROTATE(rol_byte); + VAL_WORD_ROTATE(rol_word); + VAL_LONG_ROTATE(rol_long); + + VAL_BYTE_ROTATE(ror_byte); + VAL_WORD_ROTATE(ror_word); + VAL_LONG_ROTATE(ror_long); + + cur_flags_mask = ALL_FLAGS & (~(F_AF | F_OF)); + VAL_BYTE_ROTATE(shl_byte); + VAL_WORD_ROTATE(shl_word); + VAL_LONG_ROTATE(shl_long); + + VAL_BYTE_ROTATE(shr_byte); + VAL_WORD_ROTATE(shr_word); + VAL_LONG_ROTATE(shr_long); + + VAL_BYTE_ROTATE(sar_byte); + VAL_WORD_ROTATE(sar_word); + VAL_LONG_ROTATE(sar_long); + + cur_flags_mask = ALL_FLAGS & (~(F_AF | F_OF)); + VAL_WORD_ROTATE_DBL(shld_word); + VAL_LONG_ROTATE_DBL(shld_long); + + VAL_WORD_ROTATE_DBL(shrd_word); + VAL_LONG_ROTATE_DBL(shrd_long); + + cur_flags_mask = ALL_FLAGS; + VAL_BYTE_BYTE_BINARY(sbb_byte); + VAL_WORD_WORD_BINARY(sbb_word); + VAL_LONG_LONG_BINARY(sbb_long); + + VAL_BYTE_BYTE_BINARY(sub_byte); + VAL_WORD_WORD_BINARY(sub_word); + VAL_LONG_LONG_BINARY(sub_long); + + cur_flags_mask = ALL_FLAGS & (~F_AF); + VAL_BYTE_BYTE_BINARY(xor_byte); + VAL_WORD_WORD_BINARY(xor_word); + VAL_LONG_LONG_BINARY(xor_long); + + VAL_VOID_BYTE_BINARY(test_byte); + VAL_VOID_WORD_BINARY(test_word); + VAL_VOID_LONG_BINARY(test_long); + + cur_flags_mask = F_CF | F_OF; + VAL_BYTE_MUL(imul_byte); + VAL_WORD_MUL(imul_word); + VAL_LONG_MUL(imul_long); + + VAL_BYTE_MUL(mul_byte); + VAL_WORD_MUL(mul_word); + VAL_LONG_MUL(mul_long); + + cur_flags_mask = 0; + VAL_BYTE_DIV(idiv_byte); + VAL_WORD_DIV(idiv_word); + VAL_LONG_DIV(idiv_long); + + VAL_BYTE_DIV(div_byte); + VAL_WORD_DIV(div_word); + VAL_LONG_DIV(div_long); + + return 0; +} diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/debug.h b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/debug.h new file mode 100644 index 000000000000..1c109597e257 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/debug.h @@ -0,0 +1,212 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for debug definitions. +* +****************************************************************************/ + +#ifndef __X86EMU_DEBUG_H +#define __X86EMU_DEBUG_H + +#include "x86emu/types.h" + +/*---------------------- Macros and type definitions ----------------------*/ + +/* checks to be enabled for "runtime" */ + +#define CHECK_IP_FETCH_F 0x1 +#define CHECK_SP_ACCESS_F 0x2 +#define CHECK_MEM_ACCESS_F 0x4 /*using regular linear pointer */ +#define CHECK_DATA_ACCESS_F 0x8 /*using segment:offset*/ + +#ifdef DEBUG +# define CHECK_IP_FETCH() (M.x86.check & CHECK_IP_FETCH_F) +# define CHECK_SP_ACCESS() (M.x86.check & CHECK_SP_ACCESS_F) +# define CHECK_MEM_ACCESS() (M.x86.check & CHECK_MEM_ACCESS_F) +# define CHECK_DATA_ACCESS() (M.x86.check & CHECK_DATA_ACCESS_F) +#else +# define CHECK_IP_FETCH() +# define CHECK_SP_ACCESS() +# define CHECK_MEM_ACCESS() +# define CHECK_DATA_ACCESS() +#endif + +#ifdef DEBUG +# define DEBUG_INSTRUMENT() (M.x86.debug & DEBUG_INSTRUMENT_F) +# define DEBUG_DECODE() (M.x86.debug & DEBUG_DECODE_F) +# define DEBUG_TRACE() (M.x86.debug & DEBUG_TRACE_F) +# define DEBUG_STEP() (M.x86.debug & DEBUG_STEP_F) +# define DEBUG_DISASSEMBLE() (M.x86.debug & DEBUG_DISASSEMBLE_F) +# define DEBUG_BREAK() (M.x86.debug & DEBUG_BREAK_F) +# define DEBUG_SVC() (M.x86.debug & DEBUG_SVC_F) +# define DEBUG_SAVE_IP_CS() (M.x86.debug & DEBUG_SAVE_CS_IP) + +# define DEBUG_FS() (M.x86.debug & DEBUG_FS_F) +# define DEBUG_PROC() (M.x86.debug & DEBUG_PROC_F) +# define DEBUG_SYSINT() (M.x86.debug & DEBUG_SYSINT_F) +# define DEBUG_TRACECALL() (M.x86.debug & DEBUG_TRACECALL_F) +# define DEBUG_TRACECALLREGS() (M.x86.debug & DEBUG_TRACECALL_REGS_F) +# define DEBUG_SYS() (M.x86.debug & DEBUG_SYS_F) +# define DEBUG_MEM_TRACE() (M.x86.debug & DEBUG_MEM_TRACE_F) +# define DEBUG_IO_TRACE() (M.x86.debug & DEBUG_IO_TRACE_F) +# define DEBUG_DECODE_NOPRINT() (M.x86.debug & DEBUG_DECODE_NOPRINT_F) +#else +# define DEBUG_INSTRUMENT() 0 +# define DEBUG_DECODE() 0 +# define DEBUG_TRACE() 0 +# define DEBUG_STEP() 0 +# define DEBUG_DISASSEMBLE() 0 +# define DEBUG_BREAK() 0 +# define DEBUG_SVC() 0 +# define DEBUG_SAVE_IP_CS() 0 +# define DEBUG_FS() 0 +# define DEBUG_PROC() 0 +# define DEBUG_SYSINT() 0 +# define DEBUG_TRACECALL() 0 +# define DEBUG_TRACECALLREGS() 0 +# define DEBUG_SYS() 0 +# define DEBUG_MEM_TRACE() 0 +# define DEBUG_IO_TRACE() 0 +# define DEBUG_DECODE_NOPRINT() 0 +#endif + +#ifdef DEBUG + +# define DECODE_PRINTF(x) if (DEBUG_DECODE()) \ + x86emu_decode_printf(x) +# define DECODE_PRINTF2(x,y) if (DEBUG_DECODE()) \ + x86emu_decode_printf2(x,y) + +/* + * The following allow us to look at the bytes of an instruction. The + * first INCR_INSTRN_LEN, is called everytime bytes are consumed in + * the decoding process. The SAVE_IP_CS is called initially when the + * major opcode of the instruction is accessed. + */ +#define INC_DECODED_INST_LEN(x) \ + if (DEBUG_DECODE()) \ + x86emu_inc_decoded_inst_len(x) + +#define SAVE_IP_CS(x,y) \ + if (DEBUG_DECODE() | DEBUG_TRACECALL() | DEBUG_BREAK() \ + | DEBUG_IO_TRACE() | DEBUG_SAVE_IP_CS()) { \ + M.x86.saved_cs = x; \ + M.x86.saved_ip = y; \ + } +#else +# define INC_DECODED_INST_LEN(x) +# define DECODE_PRINTF(x) +# define DECODE_PRINTF2(x,y) +# define SAVE_IP_CS(x,y) +#endif + +#ifdef DEBUG +#define TRACE_REGS() \ + if (DEBUG_DISASSEMBLE()) { \ + x86emu_just_disassemble(); \ + goto EndOfTheInstructionProcedure; \ + } \ + if (DEBUG_TRACE() || DEBUG_DECODE()) X86EMU_trace_regs() +#else +# define TRACE_REGS() +#endif + +#ifdef DEBUG +# define SINGLE_STEP() if (DEBUG_STEP()) x86emu_single_step() +#else +# define SINGLE_STEP() +#endif + +#define TRACE_AND_STEP() \ + TRACE_REGS(); \ + SINGLE_STEP() + +#ifdef DEBUG +# define START_OF_INSTR() +# define END_OF_INSTR() EndOfTheInstructionProcedure: x86emu_end_instr(); +# define END_OF_INSTR_NO_TRACE() x86emu_end_instr(); +#else +# define START_OF_INSTR() +# define END_OF_INSTR() +# define END_OF_INSTR_NO_TRACE() +#endif + +#ifdef DEBUG +# define CALL_TRACE(u,v,w,x,s) \ + if (DEBUG_TRACECALLREGS()) \ + x86emu_dump_regs(); \ + if (DEBUG_TRACECALL()) \ + printk("%04x:%04x: CALL %s%04x:%04x\n", u , v, s, w, x); +# define RETURN_TRACE(n,u,v) \ + if (DEBUG_TRACECALLREGS()) \ + x86emu_dump_regs(); \ + if (DEBUG_TRACECALL()) \ + printk("%04x:%04x: %s\n",u,v,n); +#else +# define CALL_TRACE(u,v,w,x,s) +# define RETURN_TRACE(n,u,v) +#endif + +#ifdef DEBUG +#define DB(x) x +#else +#define DB(x) +#endif + +/*-------------------------- Function Prototypes --------------------------*/ + +#ifdef __cplusplus +extern "C" { /* Use "C" linkage when in C++ mode */ +#endif + +extern void x86emu_inc_decoded_inst_len (int x); +extern void x86emu_decode_printf (char *x); +extern void x86emu_decode_printf2 (char *x, int y); +extern void x86emu_just_disassemble (void); +extern void x86emu_single_step (void); +extern void x86emu_end_instr (void); +extern void x86emu_dump_regs (void); +extern void x86emu_dump_xregs (void); +extern void x86emu_print_int_vect (u16 iv); +extern void x86emu_instrument_instruction (void); +extern void x86emu_check_ip_access (void); +extern void x86emu_check_sp_access (void); +extern void x86emu_check_mem_access (u32 p); +extern void x86emu_check_data_access (uint s, uint o); + +#ifdef __cplusplus +} /* End of "C" linkage for C++ */ +#endif + +#endif /* __X86EMU_DEBUG_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/decode.h b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/decode.h new file mode 100644 index 000000000000..77769f0094c1 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/decode.h @@ -0,0 +1,88 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for instruction decoding logic. +* +****************************************************************************/ + +#ifndef __X86EMU_DECODE_H +#define __X86EMU_DECODE_H + +/*---------------------- Macros and type definitions ----------------------*/ + +/* Instruction Decoding Stuff */ + +#define FETCH_DECODE_MODRM(mod,rh,rl) fetch_decode_modrm(&mod,&rh,&rl) +#define DECODE_RM_BYTE_REGISTER(r) decode_rm_byte_register(r) +#define DECODE_RM_WORD_REGISTER(r) decode_rm_word_register(r) +#define DECODE_RM_LONG_REGISTER(r) decode_rm_long_register(r) +#define DECODE_CLEAR_SEGOVR() M.x86.mode &= ~SYSMODE_CLRMASK + +/*-------------------------- Function Prototypes --------------------------*/ + +#ifdef __cplusplus +extern "C" { /* Use "C" linkage when in C++ mode */ +#endif + +void x86emu_intr_raise (u8 type); +void fetch_decode_modrm (int *mod,int *regh,int *regl); +u8 fetch_byte_imm (void); +u16 fetch_word_imm (void); +u32 fetch_long_imm (void); +u8 fetch_data_byte (uint offset); +u8 fetch_data_byte_abs (uint segment, uint offset); +u16 fetch_data_word (uint offset); +u16 fetch_data_word_abs (uint segment, uint offset); +u32 fetch_data_long (uint offset); +u32 fetch_data_long_abs (uint segment, uint offset); +void store_data_byte (uint offset, u8 val); +void store_data_byte_abs (uint segment, uint offset, u8 val); +void store_data_word (uint offset, u16 val); +void store_data_word_abs (uint segment, uint offset, u16 val); +void store_data_long (uint offset, u32 val); +void store_data_long_abs (uint segment, uint offset, u32 val); +u8* decode_rm_byte_register(int reg); +u16* decode_rm_word_register(int reg); +u32* decode_rm_long_register(int reg); +u16* decode_rm_seg_register(int reg); +unsigned decode_rm00_address(int rm); +unsigned decode_rm01_address(int rm); +unsigned decode_rm10_address(int rm); +unsigned decode_rmXX_address(int mod, int rm); + +#ifdef __cplusplus +} /* End of "C" linkage for C++ */ +#endif + +#endif /* __X86EMU_DECODE_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/fpu.h b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/fpu.h new file mode 100644 index 000000000000..206c6eebbaf3 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/fpu.h @@ -0,0 +1,61 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for FPU instruction decoding. +* +****************************************************************************/ + +#ifndef __X86EMU_FPU_H +#define __X86EMU_FPU_H + +#ifdef __cplusplus +extern "C" { /* Use "C" linkage when in C++ mode */ +#endif + +/* these have to be defined, whether 8087 support compiled in or not. */ + +extern void x86emuOp_esc_coprocess_d8 (u8 op1); +extern void x86emuOp_esc_coprocess_d9 (u8 op1); +extern void x86emuOp_esc_coprocess_da (u8 op1); +extern void x86emuOp_esc_coprocess_db (u8 op1); +extern void x86emuOp_esc_coprocess_dc (u8 op1); +extern void x86emuOp_esc_coprocess_dd (u8 op1); +extern void x86emuOp_esc_coprocess_de (u8 op1); +extern void x86emuOp_esc_coprocess_df (u8 op1); + +#ifdef __cplusplus +} /* End of "C" linkage for C++ */ +#endif + +#endif /* __X86EMU_FPU_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/ops.h b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/ops.h new file mode 100644 index 000000000000..a4f2316ba0d4 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/ops.h @@ -0,0 +1,45 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for operand decoding functions. +* +****************************************************************************/ + +#ifndef __X86EMU_OPS_H +#define __X86EMU_OPS_H + +extern void (*x86emu_optab[0x100])(u8 op1); +extern void (*x86emu_optab2[0x100])(u8 op2); + +#endif /* __X86EMU_OPS_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_asm.h b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_asm.h new file mode 100644 index 000000000000..79d78e0b7720 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_asm.h @@ -0,0 +1,1991 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: Watcom C++ 10.6 or later +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Inline assembler versions of the primitive operand +* functions for faster performance. At the moment this is +* x86 inline assembler, but these functions could be replaced +* with native inline assembler for each supported processor +* platform. +* +****************************************************************************/ + +#ifndef __X86EMU_PRIM_ASM_H +#define __X86EMU_PRIM_ASM_H + +#ifdef __WATCOMC__ + +#ifndef VALIDATE +#define __HAVE_INLINE_ASSEMBLER__ +#endif + +u32 get_flags_asm(void); +#pragma aux get_flags_asm = \ + "pushf" \ + "pop eax" \ + value [eax] \ + modify exact [eax]; + +u16 aaa_word_asm(u32 *flags,u16 d); +#pragma aux aaa_word_asm = \ + "push [edi]" \ + "popf" \ + "aaa" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] \ + value [ax] \ + modify exact [ax]; + +u16 aas_word_asm(u32 *flags,u16 d); +#pragma aux aas_word_asm = \ + "push [edi]" \ + "popf" \ + "aas" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] \ + value [ax] \ + modify exact [ax]; + +u16 aad_word_asm(u32 *flags,u16 d); +#pragma aux aad_word_asm = \ + "push [edi]" \ + "popf" \ + "aad" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] \ + value [ax] \ + modify exact [ax]; + +u16 aam_word_asm(u32 *flags,u8 d); +#pragma aux aam_word_asm = \ + "push [edi]" \ + "popf" \ + "aam" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] \ + value [ax] \ + modify exact [ax]; + +u8 adc_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux adc_byte_asm = \ + "push [edi]" \ + "popf" \ + "adc al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 adc_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux adc_word_asm = \ + "push [edi]" \ + "popf" \ + "adc ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 adc_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux adc_long_asm = \ + "push [edi]" \ + "popf" \ + "adc eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +u8 add_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux add_byte_asm = \ + "push [edi]" \ + "popf" \ + "add al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 add_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux add_word_asm = \ + "push [edi]" \ + "popf" \ + "add ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 add_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux add_long_asm = \ + "push [edi]" \ + "popf" \ + "add eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +u8 and_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux and_byte_asm = \ + "push [edi]" \ + "popf" \ + "and al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 and_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux and_word_asm = \ + "push [edi]" \ + "popf" \ + "and ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 and_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux and_long_asm = \ + "push [edi]" \ + "popf" \ + "and eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +u8 cmp_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux cmp_byte_asm = \ + "push [edi]" \ + "popf" \ + "cmp al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 cmp_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux cmp_word_asm = \ + "push [edi]" \ + "popf" \ + "cmp ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 cmp_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux cmp_long_asm = \ + "push [edi]" \ + "popf" \ + "cmp eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +u8 daa_byte_asm(u32 *flags,u8 d); +#pragma aux daa_byte_asm = \ + "push [edi]" \ + "popf" \ + "daa" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] \ + value [al] \ + modify exact [al]; + +u8 das_byte_asm(u32 *flags,u8 d); +#pragma aux das_byte_asm = \ + "push [edi]" \ + "popf" \ + "das" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] \ + value [al] \ + modify exact [al]; + +u8 dec_byte_asm(u32 *flags,u8 d); +#pragma aux dec_byte_asm = \ + "push [edi]" \ + "popf" \ + "dec al" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] \ + value [al] \ + modify exact [al]; + +u16 dec_word_asm(u32 *flags,u16 d); +#pragma aux dec_word_asm = \ + "push [edi]" \ + "popf" \ + "dec ax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] \ + value [ax] \ + modify exact [ax]; + +u32 dec_long_asm(u32 *flags,u32 d); +#pragma aux dec_long_asm = \ + "push [edi]" \ + "popf" \ + "dec eax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] \ + value [eax] \ + modify exact [eax]; + +u8 inc_byte_asm(u32 *flags,u8 d); +#pragma aux inc_byte_asm = \ + "push [edi]" \ + "popf" \ + "inc al" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] \ + value [al] \ + modify exact [al]; + +u16 inc_word_asm(u32 *flags,u16 d); +#pragma aux inc_word_asm = \ + "push [edi]" \ + "popf" \ + "inc ax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] \ + value [ax] \ + modify exact [ax]; + +u32 inc_long_asm(u32 *flags,u32 d); +#pragma aux inc_long_asm = \ + "push [edi]" \ + "popf" \ + "inc eax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] \ + value [eax] \ + modify exact [eax]; + +u8 or_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux or_byte_asm = \ + "push [edi]" \ + "popf" \ + "or al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 or_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux or_word_asm = \ + "push [edi]" \ + "popf" \ + "or ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 or_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux or_long_asm = \ + "push [edi]" \ + "popf" \ + "or eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +u8 neg_byte_asm(u32 *flags,u8 d); +#pragma aux neg_byte_asm = \ + "push [edi]" \ + "popf" \ + "neg al" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] \ + value [al] \ + modify exact [al]; + +u16 neg_word_asm(u32 *flags,u16 d); +#pragma aux neg_word_asm = \ + "push [edi]" \ + "popf" \ + "neg ax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] \ + value [ax] \ + modify exact [ax]; + +u32 neg_long_asm(u32 *flags,u32 d); +#pragma aux neg_long_asm = \ + "push [edi]" \ + "popf" \ + "neg eax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] \ + value [eax] \ + modify exact [eax]; + +u8 not_byte_asm(u32 *flags,u8 d); +#pragma aux not_byte_asm = \ + "push [edi]" \ + "popf" \ + "not al" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] \ + value [al] \ + modify exact [al]; + +u16 not_word_asm(u32 *flags,u16 d); +#pragma aux not_word_asm = \ + "push [edi]" \ + "popf" \ + "not ax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] \ + value [ax] \ + modify exact [ax]; + +u32 not_long_asm(u32 *flags,u32 d); +#pragma aux not_long_asm = \ + "push [edi]" \ + "popf" \ + "not eax" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] \ + value [eax] \ + modify exact [eax]; + +u8 rcl_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux rcl_byte_asm = \ + "push [edi]" \ + "popf" \ + "rcl al,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [cl] \ + value [al] \ + modify exact [al cl]; + +u16 rcl_word_asm(u32 *flags,u16 d, u8 s); +#pragma aux rcl_word_asm = \ + "push [edi]" \ + "popf" \ + "rcl ax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [cl] \ + value [ax] \ + modify exact [ax cl]; + +u32 rcl_long_asm(u32 *flags,u32 d, u8 s); +#pragma aux rcl_long_asm = \ + "push [edi]" \ + "popf" \ + "rcl eax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [cl] \ + value [eax] \ + modify exact [eax cl]; + +u8 rcr_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux rcr_byte_asm = \ + "push [edi]" \ + "popf" \ + "rcr al,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [cl] \ + value [al] \ + modify exact [al cl]; + +u16 rcr_word_asm(u32 *flags,u16 d, u8 s); +#pragma aux rcr_word_asm = \ + "push [edi]" \ + "popf" \ + "rcr ax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [cl] \ + value [ax] \ + modify exact [ax cl]; + +u32 rcr_long_asm(u32 *flags,u32 d, u8 s); +#pragma aux rcr_long_asm = \ + "push [edi]" \ + "popf" \ + "rcr eax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [cl] \ + value [eax] \ + modify exact [eax cl]; + +u8 rol_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux rol_byte_asm = \ + "push [edi]" \ + "popf" \ + "rol al,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [cl] \ + value [al] \ + modify exact [al cl]; + +u16 rol_word_asm(u32 *flags,u16 d, u8 s); +#pragma aux rol_word_asm = \ + "push [edi]" \ + "popf" \ + "rol ax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [cl] \ + value [ax] \ + modify exact [ax cl]; + +u32 rol_long_asm(u32 *flags,u32 d, u8 s); +#pragma aux rol_long_asm = \ + "push [edi]" \ + "popf" \ + "rol eax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [cl] \ + value [eax] \ + modify exact [eax cl]; + +u8 ror_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux ror_byte_asm = \ + "push [edi]" \ + "popf" \ + "ror al,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [cl] \ + value [al] \ + modify exact [al cl]; + +u16 ror_word_asm(u32 *flags,u16 d, u8 s); +#pragma aux ror_word_asm = \ + "push [edi]" \ + "popf" \ + "ror ax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [cl] \ + value [ax] \ + modify exact [ax cl]; + +u32 ror_long_asm(u32 *flags,u32 d, u8 s); +#pragma aux ror_long_asm = \ + "push [edi]" \ + "popf" \ + "ror eax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [cl] \ + value [eax] \ + modify exact [eax cl]; + +u8 shl_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux shl_byte_asm = \ + "push [edi]" \ + "popf" \ + "shl al,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [cl] \ + value [al] \ + modify exact [al cl]; + +u16 shl_word_asm(u32 *flags,u16 d, u8 s); +#pragma aux shl_word_asm = \ + "push [edi]" \ + "popf" \ + "shl ax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [cl] \ + value [ax] \ + modify exact [ax cl]; + +u32 shl_long_asm(u32 *flags,u32 d, u8 s); +#pragma aux shl_long_asm = \ + "push [edi]" \ + "popf" \ + "shl eax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [cl] \ + value [eax] \ + modify exact [eax cl]; + +u8 shr_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux shr_byte_asm = \ + "push [edi]" \ + "popf" \ + "shr al,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [cl] \ + value [al] \ + modify exact [al cl]; + +u16 shr_word_asm(u32 *flags,u16 d, u8 s); +#pragma aux shr_word_asm = \ + "push [edi]" \ + "popf" \ + "shr ax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [cl] \ + value [ax] \ + modify exact [ax cl]; + +u32 shr_long_asm(u32 *flags,u32 d, u8 s); +#pragma aux shr_long_asm = \ + "push [edi]" \ + "popf" \ + "shr eax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [cl] \ + value [eax] \ + modify exact [eax cl]; + +u8 sar_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux sar_byte_asm = \ + "push [edi]" \ + "popf" \ + "sar al,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [cl] \ + value [al] \ + modify exact [al cl]; + +u16 sar_word_asm(u32 *flags,u16 d, u8 s); +#pragma aux sar_word_asm = \ + "push [edi]" \ + "popf" \ + "sar ax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [cl] \ + value [ax] \ + modify exact [ax cl]; + +u32 sar_long_asm(u32 *flags,u32 d, u8 s); +#pragma aux sar_long_asm = \ + "push [edi]" \ + "popf" \ + "sar eax,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [cl] \ + value [eax] \ + modify exact [eax cl]; + +u16 shld_word_asm(u32 *flags,u16 d, u16 fill, u8 s); +#pragma aux shld_word_asm = \ + "push [edi]" \ + "popf" \ + "shld ax,dx,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [dx] [cl] \ + value [ax] \ + modify exact [ax dx cl]; + +u32 shld_long_asm(u32 *flags,u32 d, u32 fill, u8 s); +#pragma aux shld_long_asm = \ + "push [edi]" \ + "popf" \ + "shld eax,edx,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [edx] [cl] \ + value [eax] \ + modify exact [eax edx cl]; + +u16 shrd_word_asm(u32 *flags,u16 d, u16 fill, u8 s); +#pragma aux shrd_word_asm = \ + "push [edi]" \ + "popf" \ + "shrd ax,dx,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [dx] [cl] \ + value [ax] \ + modify exact [ax dx cl]; + +u32 shrd_long_asm(u32 *flags,u32 d, u32 fill, u8 s); +#pragma aux shrd_long_asm = \ + "push [edi]" \ + "popf" \ + "shrd eax,edx,cl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [edx] [cl] \ + value [eax] \ + modify exact [eax edx cl]; + +u8 sbb_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux sbb_byte_asm = \ + "push [edi]" \ + "popf" \ + "sbb al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 sbb_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux sbb_word_asm = \ + "push [edi]" \ + "popf" \ + "sbb ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 sbb_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux sbb_long_asm = \ + "push [edi]" \ + "popf" \ + "sbb eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +u8 sub_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux sub_byte_asm = \ + "push [edi]" \ + "popf" \ + "sub al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 sub_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux sub_word_asm = \ + "push [edi]" \ + "popf" \ + "sub ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 sub_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux sub_long_asm = \ + "push [edi]" \ + "popf" \ + "sub eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +void test_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux test_byte_asm = \ + "push [edi]" \ + "popf" \ + "test al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + modify exact [al bl]; + +void test_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux test_word_asm = \ + "push [edi]" \ + "popf" \ + "test ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + modify exact [ax bx]; + +void test_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux test_long_asm = \ + "push [edi]" \ + "popf" \ + "test eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + modify exact [eax ebx]; + +u8 xor_byte_asm(u32 *flags,u8 d, u8 s); +#pragma aux xor_byte_asm = \ + "push [edi]" \ + "popf" \ + "xor al,bl" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [al] [bl] \ + value [al] \ + modify exact [al bl]; + +u16 xor_word_asm(u32 *flags,u16 d, u16 s); +#pragma aux xor_word_asm = \ + "push [edi]" \ + "popf" \ + "xor ax,bx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [ax] [bx] \ + value [ax] \ + modify exact [ax bx]; + +u32 xor_long_asm(u32 *flags,u32 d, u32 s); +#pragma aux xor_long_asm = \ + "push [edi]" \ + "popf" \ + "xor eax,ebx" \ + "pushf" \ + "pop [edi]" \ + parm [edi] [eax] [ebx] \ + value [eax] \ + modify exact [eax ebx]; + +void imul_byte_asm(u32 *flags,u16 *ax,u8 d,u8 s); +#pragma aux imul_byte_asm = \ + "push [edi]" \ + "popf" \ + "imul bl" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],ax" \ + parm [edi] [esi] [al] [bl] \ + modify exact [esi ax bl]; + +void imul_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 d,u16 s); +#pragma aux imul_word_asm = \ + "push [edi]" \ + "popf" \ + "imul bx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],ax" \ + "mov [ecx],dx" \ + parm [edi] [esi] [ecx] [ax] [bx]\ + modify exact [esi edi ax bx dx]; + +void imul_long_asm(u32 *flags,u32 *eax,u32 *edx,u32 d,u32 s); +#pragma aux imul_long_asm = \ + "push [edi]" \ + "popf" \ + "imul ebx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],eax" \ + "mov [ecx],edx" \ + parm [edi] [esi] [ecx] [eax] [ebx] \ + modify exact [esi edi eax ebx edx]; + +void mul_byte_asm(u32 *flags,u16 *ax,u8 d,u8 s); +#pragma aux mul_byte_asm = \ + "push [edi]" \ + "popf" \ + "mul bl" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],ax" \ + parm [edi] [esi] [al] [bl] \ + modify exact [esi ax bl]; + +void mul_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 d,u16 s); +#pragma aux mul_word_asm = \ + "push [edi]" \ + "popf" \ + "mul bx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],ax" \ + "mov [ecx],dx" \ + parm [edi] [esi] [ecx] [ax] [bx]\ + modify exact [esi edi ax bx dx]; + +void mul_long_asm(u32 *flags,u32 *eax,u32 *edx,u32 d,u32 s); +#pragma aux mul_long_asm = \ + "push [edi]" \ + "popf" \ + "mul ebx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],eax" \ + "mov [ecx],edx" \ + parm [edi] [esi] [ecx] [eax] [ebx] \ + modify exact [esi edi eax ebx edx]; + +void idiv_byte_asm(u32 *flags,u8 *al,u8 *ah,u16 d,u8 s); +#pragma aux idiv_byte_asm = \ + "push [edi]" \ + "popf" \ + "idiv bl" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],al" \ + "mov [ecx],ah" \ + parm [edi] [esi] [ecx] [ax] [bl]\ + modify exact [esi edi ax bl]; + +void idiv_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 dlo,u16 dhi,u16 s); +#pragma aux idiv_word_asm = \ + "push [edi]" \ + "popf" \ + "idiv bx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],ax" \ + "mov [ecx],dx" \ + parm [edi] [esi] [ecx] [ax] [dx] [bx]\ + modify exact [esi edi ax dx bx]; + +void idiv_long_asm(u32 *flags,u32 *eax,u32 *edx,u32 dlo,u32 dhi,u32 s); +#pragma aux idiv_long_asm = \ + "push [edi]" \ + "popf" \ + "idiv ebx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],eax" \ + "mov [ecx],edx" \ + parm [edi] [esi] [ecx] [eax] [edx] [ebx]\ + modify exact [esi edi eax edx ebx]; + +void div_byte_asm(u32 *flags,u8 *al,u8 *ah,u16 d,u8 s); +#pragma aux div_byte_asm = \ + "push [edi]" \ + "popf" \ + "div bl" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],al" \ + "mov [ecx],ah" \ + parm [edi] [esi] [ecx] [ax] [bl]\ + modify exact [esi edi ax bl]; + +void div_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 dlo,u16 dhi,u16 s); +#pragma aux div_word_asm = \ + "push [edi]" \ + "popf" \ + "div bx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],ax" \ + "mov [ecx],dx" \ + parm [edi] [esi] [ecx] [ax] [dx] [bx]\ + modify exact [esi edi ax dx bx]; + +void div_long_asm(u32 *flags,u32 *eax,u32 *edx,u32 dlo,u32 dhi,u32 s); +#pragma aux div_long_asm = \ + "push [edi]" \ + "popf" \ + "div ebx" \ + "pushf" \ + "pop [edi]" \ + "mov [esi],eax" \ + "mov [ecx],edx" \ + parm [edi] [esi] [ecx] [eax] [edx] [ebx]\ + modify exact [esi edi eax edx ebx]; + +#else + +static inline u32 get_flags_asm(void) +{ + u32 ret; + + asm ( "pushf\n\t" + "pop %%eax" + : "=a"(ret) + ); + return ret; +} + +static inline u16 aaa_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "aaa\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u16 aas_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "aas\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u16 aad_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "aad\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u16 aam_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "aam\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u8 adc_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "adcb %%bl, %%al\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u16 adc_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "adcw %%bx, %%ax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u32 adc_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "adcl %%ebx, %%eax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u8 add_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "addb %%bl, %%al\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u16 add_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "addw %%bx, %%ax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u32 add_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "addl %%ebx, %%eax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + + + +static inline u8 and_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "andb %%bl, %%al\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u16 and_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "andw %%bx, %%ax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u32 and_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "andl %%ebx, %%eax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + + +static inline u8 cmp_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "cmpb %%bl, %%al\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u16 cmp_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "cmpw %%bx, %%ax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u32 cmp_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "cmpl %%ebx, %%eax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + + +static inline u8 daa_byte_asm(u32 *flags, u8 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "daa\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u8 das_byte_asm(u32 *flags, u8 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "das\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + + + +static inline u8 dec_byte_asm(u32 *flags, u8 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "dec %%al\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u16 dec_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "dec %%ax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u32 dec_long_asm(u32 *flags, u32 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "dec %%eax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + + +static inline u8 inc_byte_asm(u32 *flags, u8 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "inc %%al\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u16 inc_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "inc %%ax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u32 inc_long_asm(u32 *flags, u32 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "inc %%eax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u8 or_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "orb %%bl, %%al\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u16 or_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "orw %%bx, %%ax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + +static inline u32 or_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" + "popf\n\t" + "orl %%ebx, %%eax\n\t" + "pushf\n\t" + "popl (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d), "b"(s) + ); + return d; +} + + +static inline u8 neg_byte_asm(u32 *flags, u8 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "neg %%al\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u16 neg_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "neg %%ax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u32 neg_long_asm(u32 *flags, u32 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "neg %%eax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u8 not_byte_asm(u32 *flags, u8 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "not %%al\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u16 not_word_asm(u32 *flags, u16 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "not %%ax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + +static inline u32 not_long_asm(u32 *flags, u32 d) +{ + asm ( + "push (%%edi)\n\t" + "popf\n\t" + "not %%eax\n\t" + "pushf\n\t" + "pop (%%edi)" + : "=D"(flags), "=a"(d) + : "0"(flags), "1"(d) + ); + return d; +} + + +static inline u8 rcl_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rclb %%cl,%%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u16 rcl_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rclw %%cl,%%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u32 rcl_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rcll %%cl,%%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + + +static inline u8 rcr_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rcrb %%cl,%%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u16 rcr_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rcrw %%cl,%%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u32 rcr_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rcrl %%cl,%%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + + +static inline u8 rol_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rolb %%cl,%%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u16 rol_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rolw %%cl,%%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u32 rol_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "roll %%cl,%%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + + +static inline u8 ror_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rorb %%cl,%%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u16 ror_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rorw %%cl,%%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u32 ror_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "rorl %%cl,%%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + + +static inline u8 shl_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shlb %%cl,%%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u16 shl_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shlw %%cl,%%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u32 shl_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shll %%cl,%%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + + +static inline u8 shr_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shrb %%cl,%%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u16 shr_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shrw %%cl,%%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u32 shr_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shrl %%cl,%%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + + +static inline u8 sar_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "sarb %%cl,%%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u16 sar_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "sarw %%cl,%%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + +static inline u32 sar_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "sarl %%cl,%%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "c"(s) ); + return d; +} + + +static inline u16 shld_word_asm(u32 *flags, u16 data, u16 fill, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shld %%cl, %%dx, %%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(data) : "0"(flags), "1"(data), "d"(fill), "c"(s) ); + return data; +} + +static inline u32 shld_long_asm(u32 *flags, u32 data, u32 fill, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shldl %%cl, %%edx, %%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(data) : "0"(flags), "1"(data), "d"(fill), "c"(s) ); + return data; +} + +static inline u16 shrd_word_asm(u32 *flags, u16 data, u16 fill, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shrd %%cl, %%dx, %%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(data) : "0"(flags), "1"(data), "d"(fill), "c"(s) ); + return data; +} + +static inline u32 shrd_long_asm(u32 *flags, u32 data, u32 fill, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "shrdl %%cl, %%edx, %%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(data) : "0"(flags), "1"(data), "d"(fill), "c"(s) ); + return data; +} + + +static inline u8 sbb_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "sbbb %%bl, %%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + +static inline u16 sbb_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "sbbw %%bx, %%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + +static inline u32 sbb_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "sbbl %%ebx, %%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + + + +static inline u8 sub_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "subb %%bl, %%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + +static inline u16 sub_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "subw %%bx, %%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + +static inline u32 sub_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "subl %%ebx, %%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + + +static inline u8 xor_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "xorb %%bl, %%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + +static inline u16 xor_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "xorw %%bx, %%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + +static inline u32 xor_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "xorl %%ebx, %%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(d) : "0"(flags), "1"(d), "b"(s) ); + return d; +} + + +static inline void test_byte_asm(u32 *flags, u8 d, u8 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "testb %%bl, %%al\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags) : "0"(flags), "a"(d), "b"(s) ); +} + +static inline void test_word_asm(u32 *flags, u16 d, u16 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "testw %%bx, %%ax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags) : "0"(flags), "a"(d), "b"(s) ); +} + +static inline void test_long_asm(u32 *flags, u32 d, u32 s) +{ + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "testl %%ebx, %%eax\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags) : "0"(flags), "a"(d), "b"(s) ); +} + + +static inline void imul_byte_asm(u32 *flags,u16 *ax,u8 d,u8 s) +{ + u16 ret; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "imul %%bl\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret) : "0"(flags), "a"(d), "b"(s) ); + *ax = ret; +} + +static inline void imul_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 d,u16 s) +{ + u16 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "imulw %%bx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(d), "b"(s) ); + *ax = ret_ax; + *dx = ret_dx; +} + +static inline void imul_long_asm(u32 *flags,u32 *ax,u32 *dx,u32 d,u32 s) +{ + u32 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "imull %%ebx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(d), "b"(s) ); + *ax = ret_ax; + *dx = ret_dx; +} + + +static inline void mul_byte_asm(u32 *flags,u16 *ax,u8 d,u8 s) +{ + u16 ret; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "mul %%bl\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret) : "0"(flags), "a"(d), "b"(s) ); + *ax = ret; +} + +static inline void mul_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 d,u16 s) +{ + u16 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "mulw %%bx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(d), "b"(s) ); + *ax = ret_ax; + *dx = ret_dx; +} + +static inline void mul_long_asm(u32 *flags,u32 *ax,u32 *dx,u32 d,u32 s) +{ + u32 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "mull %%ebx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(d), "b"(s) ); + *ax = ret_ax; + *dx = ret_dx; +} + + +static inline void idiv_byte_asm(u32 *flags,u8 *al,u8 *ah,u16 d,u8 s) +{ + u16 ret_ax; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "idiv %%bl\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax) : "0"(flags), "a"(d), "b"(s) ); + *al = ret_ax & 0xff; + *ah = (ret_ax >> 8) & 0xff; +} + +static inline void idiv_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 dlo,u16 dhi,u16 s) +{ + u16 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "idiv %%bx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(dlo), "d"(dhi), "b"(s) ); + *ax = ret_ax; + *dx = ret_dx; +} + +static inline void idiv_long_asm(u32 *flags,u32 *eax,u32 *edx,u32 dlo,u32 dhi,u32 s) +{ + u32 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "idiv %%ebx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(dlo), "d"(dhi), "b"(s) ); + *eax = ret_ax; + *edx = ret_dx; +} + + +static inline void div_byte_asm(u32 *flags,u8 *al,u8 *ah,u16 d,u8 s) +{ + u16 ret_ax; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "div %%bl\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax) : "0"(flags), "a"(d), "b"(s) ); + *al = ret_ax & 0xff; + *ah = (ret_ax >> 8) & 0xff; +} + +static inline void div_word_asm(u32 *flags,u16 *ax,u16 *dx,u16 dlo,u16 dhi,u16 s) +{ + u16 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "div %%bx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(dlo), "d"(dhi), "b"(s) ); + *ax = ret_ax; + *dx = ret_dx; +} + +static inline void div_long_asm(u32 *flags,u32 *eax,u32 *edx,u32 dlo,u32 dhi,u32 s) +{ + u32 ret_ax, ret_dx; + + __asm__ __volatile__( + "pushl (%%edi)\n\t" "popf\n\t" + "div %%ebx\n\t" + "pushf\n\t" "popl (%%edi)" + : "=D"(flags), "=a"(ret_ax), "=d"(ret_dx) : "0"(flags), "a"(dlo), "d"(dhi), "b"(s) ); + *eax = ret_ax; + *edx = ret_dx; +} + +#endif + +#endif /* __X86EMU_PRIM_ASM_H */ diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_ops.h b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_ops.h new file mode 100644 index 000000000000..0ea825d3c1a7 --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/prim_ops.h @@ -0,0 +1,142 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for primitive operation functions. +* +****************************************************************************/ + +#ifndef __X86EMU_PRIM_OPS_H +#define __X86EMU_PRIM_OPS_H + +#ifdef __cplusplus +extern "C" { /* Use "C" linkage when in C++ mode */ +#endif + +u16 aaa_word (u16 d); +u16 aas_word (u16 d); +u16 aad_word (u16 d); +u16 aam_word (u8 d); +u8 adc_byte (u8 d, u8 s); +u16 adc_word (u16 d, u16 s); +u32 adc_long (u32 d, u32 s); +u8 add_byte (u8 d, u8 s); +u16 add_word (u16 d, u16 s); +u32 add_long (u32 d, u32 s); +u8 and_byte (u8 d, u8 s); +u16 and_word (u16 d, u16 s); +u32 and_long (u32 d, u32 s); +u8 cmp_byte (u8 d, u8 s); +u16 cmp_word (u16 d, u16 s); +u32 cmp_long (u32 d, u32 s); +u8 daa_byte (u8 d); +u8 das_byte (u8 d); +u8 dec_byte (u8 d); +u16 dec_word (u16 d); +u32 dec_long (u32 d); +u8 inc_byte (u8 d); +u16 inc_word (u16 d); +u32 inc_long (u32 d); +u8 or_byte (u8 d, u8 s); +u16 or_word (u16 d, u16 s); +u32 or_long (u32 d, u32 s); +u8 neg_byte (u8 s); +u16 neg_word (u16 s); +u32 neg_long (u32 s); +u8 not_byte (u8 s); +u16 not_word (u16 s); +u32 not_long (u32 s); +u8 rcl_byte (u8 d, u8 s); +u16 rcl_word (u16 d, u8 s); +u32 rcl_long (u32 d, u8 s); +u8 rcr_byte (u8 d, u8 s); +u16 rcr_word (u16 d, u8 s); +u32 rcr_long (u32 d, u8 s); +u8 rol_byte (u8 d, u8 s); +u16 rol_word (u16 d, u8 s); +u32 rol_long (u32 d, u8 s); +u8 ror_byte (u8 d, u8 s); +u16 ror_word (u16 d, u8 s); +u32 ror_long (u32 d, u8 s); +u8 shl_byte (u8 d, u8 s); +u16 shl_word (u16 d, u8 s); +u32 shl_long (u32 d, u8 s); +u8 shr_byte (u8 d, u8 s); +u16 shr_word (u16 d, u8 s); +u32 shr_long (u32 d, u8 s); +u8 sar_byte (u8 d, u8 s); +u16 sar_word (u16 d, u8 s); +u32 sar_long (u32 d, u8 s); +u16 shld_word (u16 d, u16 fill, u8 s); +u32 shld_long (u32 d, u32 fill, u8 s); +u16 shrd_word (u16 d, u16 fill, u8 s); +u32 shrd_long (u32 d, u32 fill, u8 s); +u8 sbb_byte (u8 d, u8 s); +u16 sbb_word (u16 d, u16 s); +u32 sbb_long (u32 d, u32 s); +u8 sub_byte (u8 d, u8 s); +u16 sub_word (u16 d, u16 s); +u32 sub_long (u32 d, u32 s); +void test_byte (u8 d, u8 s); +void test_word (u16 d, u16 s); +void test_long (u32 d, u32 s); +u8 xor_byte (u8 d, u8 s); +u16 xor_word (u16 d, u16 s); +u32 xor_long (u32 d, u32 s); +void imul_byte (u8 s); +void imul_word (u16 s); +void imul_long (u32 s); +void imul_long_direct(u32 *res_lo, u32* res_hi,u32 d, u32 s); +void mul_byte (u8 s); +void mul_word (u16 s); +void mul_long (u32 s); +void idiv_byte (u8 s); +void idiv_word (u16 s); +void idiv_long (u32 s); +void div_byte (u8 s); +void div_word (u16 s); +void div_long (u32 s); +void ins (int size); +void outs (int size); +u16 mem_access_word (int addr); +void push_word (u16 w); +void push_long (u32 w); +u16 pop_word (void); +u32 pop_long (void); + +#ifdef __cplusplus +} /* End of "C" linkage for C++ */ +#endif + +#endif /* __X86EMU_PRIM_OPS_H */ + diff --git a/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/x86emui.h b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/x86emui.h new file mode 100644 index 000000000000..1cc3c08a5eaa --- /dev/null +++ b/arch/e2k/boot/bios/video/x86emu/src/x86emu/x86emu/x86emui.h @@ -0,0 +1,101 @@ +/**************************************************************************** +* +* Realmode X86 Emulator Library +* +* Copyright (C) 1991-2004 SciTech Software, Inc. +* Copyright (C) David Mosberger-Tang +* Copyright (C) 1999 Egbert Eich +* +* ======================================================================== +* +* Permission to use, copy, modify, distribute, and sell this software and +* its documentation for any purpose is hereby granted without fee, +* provided that the above copyright notice appear in all copies and that +* both that copyright notice and this permission notice appear in +* supporting documentation, and that the name of the authors not be used +* in advertising or publicity pertaining to distribution of the software +* without specific, written prior permission. The authors makes no +* representations about the suitability of this software for any purpose. +* It is provided "as is" without express or implied warranty. +* +* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR +* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +* PERFORMANCE OF THIS SOFTWARE. +* +* ======================================================================== +* +* Language: ANSI C +* Environment: Any +* Developer: Kendall Bennett +* +* Description: Header file for system specific functions. These functions +* are always compiled and linked in the OS depedent libraries, +* and never in a binary portable driver. +* +****************************************************************************/ + +#ifndef __X86EMU_X86EMUI_H +#define __X86EMU_X86EMUI_H + +/* If we are compiling in C++ mode, we can compile some functions as + * inline to increase performance (however the code size increases quite + * dramatically in this case). + */ + +#if defined(__cplusplus) && !defined(_NO_INLINE) +#define _INLINE inline +#else +#define _INLINE static +#endif + +/* Get rid of unused parameters in C++ compilation mode */ + +#ifdef __cplusplus +#define X86EMU_UNUSED(v) +#else +#define X86EMU_UNUSED(v) v +#endif + +#include "x86emu.h" +#include "x86emu/regs.h" +#include "x86emu/debug.h" +#include "x86emu/decode.h" +#include "x86emu/ops.h" +#include "x86emu/prim_ops.h" +#include "x86emu/fpu.h" +#include "x86emu/fpu_regs.h" +#ifndef __KERNEL__ +#include +#include +#include +#endif + +/*--------------------------- Inline Functions ----------------------------*/ + +#ifdef __cplusplus +extern "C" { /* Use "C" linkage when in C++ mode */ +#endif + +extern u8 (X86APIP sys_rdb)(u32 addr); +extern u16 (X86APIP sys_rdw)(u32 addr); +extern u32 (X86APIP sys_rdl)(u32 addr); +extern void (X86APIP sys_wrb)(u32 addr,u8 val); +extern void (X86APIP sys_wrw)(u32 addr,u16 val); +extern void (X86APIP sys_wrl)(u32 addr,u32 val); + +extern u8 (X86APIP sys_inb)(X86EMU_pioAddr addr); +extern u16 (X86APIP sys_inw)(X86EMU_pioAddr addr); +extern u32 (X86APIP sys_inl)(X86EMU_pioAddr addr); +extern void (X86APIP sys_outb)(X86EMU_pioAddr addr,u8 val); +extern void (X86APIP sys_outw)(X86EMU_pioAddr addr,u16 val); +extern void (X86APIP sys_outl)(X86EMU_pioAddr addr,u32 val); + +#ifdef __cplusplus +} /* End of "C" linkage for C++ */ +#endif + +#endif /* __X86EMU_X86EMUI_H */ diff --git a/arch/e2k/boot/boot.h b/arch/e2k/boot/boot.h new file mode 100644 index 000000000000..e97e4ba098cc --- /dev/null +++ b/arch/e2k/boot/boot.h @@ -0,0 +1,38 @@ +/* + * Small boot for simulator + */ + +#include +#include + +#ifndef _E2K_BOOT_BOOT_H_ +#define _E2K_BOOT_BOOT_H_ + +/* + * E2K physical memory layout + */ + +#define E2K_MAIN_MEM_REGION_START 0x0000000000000000UL /* from 0 */ +#define E2K_MAIN_MEM_REGION_END 0x0000000080000000UL /* up to 2Gb */ +#define E2K_EXT_MEM_REGION_START 0x0000000100000000UL /* from 4Gb */ +#define E2K_EXT_MEM_REGION_END 0x0000001000000000UL /* up to 64Gb */ + +extern void *malloc_aligned(int size, int alignment); +extern void *malloc(int size); +extern void bios_mem_init(long membase, long memsize); +extern e2k_addr_t get_busy_memory_end(void); + +extern int decompress_kernel(ulong base); +extern void rom_putc(char c); +extern int rom_getc(void); +extern int rom_tstc(void); + +#ifdef CONFIG_SMP +extern void smp_start_cpus(void); +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_STATE_SAVE +extern void load_machine_state_new(boot_info_t *boot_info); +#endif /* CONFIG_STATE_SAVE */ + +#endif /* _E2K_BOOT_BOOT_H_ */ diff --git a/arch/e2k/boot/boot.lds b/arch/e2k/boot/boot.lds new file mode 100644 index 000000000000..cd44e5b53a50 --- /dev/null +++ b/arch/e2k/boot/boot.lds @@ -0,0 +1,19 @@ +OUTPUT_FORMAT("elf64-e2k", "elf64-e2k", "elf64-e2k") +/* OUTPUT_ARCH(e2k) Defined by Makefile */ +ENTRY(start) + +SECTIONS { + .bootblock : { + _bootblock = .; + arch/e2k/boot/bootblock.o(.rodata) + _ebootblock = .; + } + + . = _bootblock + 0x10000; + + .kernel : { + _kernel = .; + arch/e2k/boot/bpiggy.o(.rodata) + _ekernel = .; + } +} diff --git a/arch/e2k/boot/boot_io.h b/arch/e2k/boot/boot_io.h new file mode 100644 index 000000000000..787747fc2e64 --- /dev/null +++ b/arch/e2k/boot/boot_io.h @@ -0,0 +1,55 @@ + +#ifndef _E2K_BOOT_IO_H_ +#define _E2K_BOOT_IO_H_ + +//#include +//#include +#include + +/* + * E2K I/O ports for BIOS + */ + +#if defined(CONFIG_E2K_FULL_SIC) +#define PHYS_X86_IO_BASE E2K_FULL_SIC_IO_AREA_PHYS_BASE +#elif defined(CONFIG_E2K_LEGACY_SIC) +#define PHYS_X86_IO_BASE E2K_LEGACY_SIC_IO_AREA_PHYS_BASE +#else +#error "Undefined machine or SIC type" +#endif + +extern unsigned char bios_inb(unsigned short port); +extern unsigned short bios_inw(unsigned short port); +extern unsigned int bios_inl(unsigned short port); +extern unsigned long bios_inll(unsigned short port); +extern void bios_outb(unsigned char byte, unsigned short port); +extern void bios_outw(unsigned short byte, unsigned short port); +extern void bios_outl(unsigned int byte, unsigned short port); +extern void bios_outll(unsigned long byte, unsigned short port); + +#ifdef CONFIG_E2K_SIC +extern u8 bios_conf_inb(int domain, unsigned char bus, unsigned long port); +extern u16 bios_conf_inw(int domain, unsigned char bus, unsigned long port); +extern u32 bios_conf_inl(int domain, unsigned char bus, unsigned long port); +extern void bios_conf_outb(int domain, unsigned char bus, u8 byte, + unsigned long port); +extern void bios_conf_outw(int domain, unsigned char bus, u16 halwword, + unsigned long port); +extern void bios_conf_outl(int domain, unsigned char bus, u32 word, + unsigned long port); +extern u8 bios_ioh_e3s_inb(int domain, unsigned char bus, unsigned long port); +extern u16 bios_ioh_e3s_inw(int domain, unsigned char bus, unsigned long port); +extern u32 bios_ioh_e3s_inl(int domain, unsigned char bus, unsigned long port); +extern void bios_ioh_e3s_outb(int domain, unsigned char bus, unsigned char byte, + unsigned long port); +extern void bios_ioh_e3s_outw(int domain, unsigned char bus, u16 halfword, + unsigned long port); +extern void bios_ioh_e3s_outl(int domain, unsigned char bus, u32 word, + unsigned long port); + +#endif /* CONFIG_E2K_SIC */ + +extern void rom_puts(char *s); +extern void rom_printk(char const *fmt, ...); + +#endif /* _E2K_BOOT_IO_H_ */ diff --git a/arch/e2k/boot/bootblock.c b/arch/e2k/boot/bootblock.c new file mode 100644 index 000000000000..412ec96f5dca --- /dev/null +++ b/arch/e2k/boot/bootblock.c @@ -0,0 +1,18 @@ +#include +#include + +extern u64 __kernel_size; + +const char gap[256] = {0}; + +const struct bootblock_struct boot_block = +{ + info: { + signature : X86BOOT_SIGNATURE, /* signature */ + kernel_size : (u64)&__kernel_size, /* kernel size */ + kernel_args_string : CONFIG_CMDLINE, /* kernel command line */ + }, + bootblock_ver : BOOTBLOCK_VER, /* bootblock version number */ + x86_marker : 0xAA55 /* x86 marker */ +}; + diff --git a/arch/e2k/boot/compressed.lds b/arch/e2k/boot/compressed.lds new file mode 100644 index 000000000000..eb378d0b76b6 --- /dev/null +++ b/arch/e2k/boot/compressed.lds @@ -0,0 +1,37 @@ +OUTPUT_FORMAT("elf64-e2k", "elf64-e2k", "elf64-e2k") +/* OUTPUT_ARCH(e2k) Defined by Makefile */ +ENTRY(start) + +SECTIONS { + _start = .; + .bootblock : { + _bootblock = .; + arch/e2k/boot/bootblock.o(.rodata) + _ebootblock = .; + } + + . = _bootblock + 0x10000; + + .loader : { + . += 0x6000; + *(.boot_entry) + *(.text) + *(EXCLUDE_FILE (arch/e2k/boot/cpiggy.o) .rodata) + *(.data) + + _bss = .; + *(.bss) + _ebss = .; + } + .kernel : { + _kernel = .; + arch/e2k/boot/cpiggy.o(.rodata) + _ekernel = .; + } + .got : { + _got = .; + *(.got) + _egot = .; + } + _end = .; +} diff --git a/arch/e2k/boot/compressed_guest.lds b/arch/e2k/boot/compressed_guest.lds new file mode 100644 index 000000000000..66be6d7852e3 --- /dev/null +++ b/arch/e2k/boot/compressed_guest.lds @@ -0,0 +1,37 @@ +OUTPUT_FORMAT("elf64-e2k", "elf64-e2k", "elf64-e2k") +/* OUTPUT_ARCH(e2k) Defined by Makefile */ +ENTRY(start) + +SECTIONS { + _start = .; + .bootblock : { + _bootblock = .; + arch/e2k/boot/bootblock.o(.rodata) + _ebootblock = .; + } + + . = _bootblock + 0x10000; + + .loader : { + . += 0x10000; + *(.boot_entry) + *(.text) + *(EXCLUDE_FILE (arch/e2k/boot/cpiggy.o) .rodata) + *(.data) + + _bss = .; + *(.bss) + _ebss = .; + } + .kernel : { + _kernel = .; + arch/e2k/boot/cpiggy.o(.rodata) + _ekernel = .; + } + .got : { + _got = .; + *(.got) + _egot = .; + } + _end = .; +} diff --git a/arch/e2k/boot/console.c b/arch/e2k/boot/console.c new file mode 100644 index 000000000000..24dc7f1a5fdc --- /dev/null +++ b/arch/e2k/boot/console.c @@ -0,0 +1,46 @@ + +#include +#include +#include +#include "boot_io.h" + +#if defined(CONFIG_BIOS) +#include "bios/bios.h" +#endif + +static inline unsigned int e2k_rom_debug_inl(__u16 port) +{ + unsigned int ret; + ret = NATIVE_READ_MAS_W(PHYS_X86_IO_BASE + port, MAS_IOADDR); + return ret; +} + +static inline void e2k_rom_debug_outb(__u16 port, __u8 byte) +{ + NATIVE_WRITE_MAS_B(PHYS_X86_IO_BASE + port, byte, MAS_IOADDR); +} + +static inline void e2k_rom_debug_putc(char c) +{ + while (e2k_rom_debug_inl(LMS_CONS_DATA_PORT)); + + e2k_rom_debug_outb(LMS_CONS_DATA_PORT, c); + e2k_rom_debug_outb(LMS_CONS_DATA_PORT, 0); +} + + +void console_probe(void) +{ +#if defined(CONFIG_BIOS) + if (e2k_rom_debug_inl(LMS_CONS_DATA_PORT) != 0xFFFFFFFF) { + hardware.dbgport = 1; + }; +#endif +} + + +void console_putc(char c) +{ + e2k_rom_debug_putc(c); +} + diff --git a/arch/e2k/boot/decompress.c b/arch/e2k/boot/decompress.c new file mode 100644 index 000000000000..d9638f28b54e --- /dev/null +++ b/arch/e2k/boot/decompress.c @@ -0,0 +1,720 @@ +#define DEBUG_BOOT_MODE 0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BOOT_HEAP_SIZE 0x1000000 +static unsigned long free_mem_ptr; +static unsigned long free_mem_end_ptr; + +#define STATIC static + +static void error_loop(char *s); +#define assert(condition) \ +do { \ + if (unlikely(!(condition))) \ + error_loop("Assertion failed: " #condition); \ +} while (0) + +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif + +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZ4 +#include "../../../../lib/decompress_unlz4.c" +#endif + +#ifdef CONFIG_KERNEL_XZ +#define memmove memmove +#include "../../../../lib/decompress_unxz.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif + +#ifdef CONFIG_KERNEL_LZO +#include "../../../../lib/decompress_unlzo.c" +#endif + +/* Symbols defined by linker scripts */ +extern char _bss[], _ebss[]; +extern char _got[], _egot[]; +extern char _kernel[], _ekernel[]; +extern char _start[], _end[]; +extern char __orig_kernel_size[]; + +struct mem_bank { + unsigned long mb_bottom; + unsigned long mb_top; +}; + +/* Add some number to account for banks being broken by reserved memory */ +#define MAX_MEM_BANKS (L_MAX_MEM_NUMNODES * L_MAX_NODE_PHYS_BANKS + 64) +struct board_mem { + unsigned long bm_size; + unsigned bm_nBanks; + struct mem_bank bm_Banks[MAX_MEM_BANKS]; +}; + +/* + * Put 'got_updating_in_progress' and 'unpacking_in_progress' into + * compiler-initialized .data section so that all processors can access it + * before .bss section is cleared. + */ +static int got_updating_in_progress = 1; +static int unpacking_in_progress = 1; + +static boot_info_t *boot_info; + +static unsigned long kernel_address; + +static unsigned long io_area_phys_base; + +#ifdef CONFIG_KVM_GUEST_KERNEL +#define STARTUP_TTABLE_ENTRY_OFFSET 0x10000 + +static unsigned long +dec_guest_mmio(unsigned long addr, u64 value, u8 size, u8 is_write) +{ + unsigned long data[1]; + + if (is_write) + data[0] = value; + + assert(!HYPERVISOR_guest_mmio_request(addr, data, size, is_write)); + + return data[0]; +} + +static void dec_writeb(u8 b, void __iomem *addr) +{ + dec_guest_mmio((unsigned long) addr, b, 1, 1); +} + +static u8 dec_readb(void __iomem *addr) +{ + return dec_guest_mmio((unsigned long) addr, 0, 1, 0); +} + +static u32 dec_readl(void __iomem *addr) +{ + return dec_guest_mmio((unsigned long) addr, 0, 4, 0); +} +#else +#define STARTUP_TTABLE_ENTRY_OFFSET 0x6000 + +static void dec_writeb(u8 b, void __iomem *addr) +{ + NATIVE_WRITE_MAS_B((unsigned long) addr, b, MAS_IOADDR); +} + +static u8 dec_readb(void __iomem *addr) +{ + return NATIVE_READ_MAS_B((unsigned long) addr, MAS_IOADDR); +} + +static u32 dec_readl(void __iomem *addr) +{ + return NATIVE_READ_MAS_W((unsigned long) addr, MAS_IOADDR); +} +#endif + +static inline u8 am85c30_com_inb_command(u64 iomem_addr, u8 reg_num) +{ + dec_writeb(reg_num, (void __iomem *) iomem_addr); + return dec_readb((void __iomem *) iomem_addr); +} + +static inline void am85c30_com_outb(u64 iomem_addr, u8 byte) +{ + dec_writeb(byte, (void __iomem *) iomem_addr); +} + +static inline unsigned int dec_epic_is_bsp(void) +{ + union cepic_ctrl reg; + + reg.raw = dec_readl((void __iomem *)(EPIC_DEFAULT_PHYS_BASE + CEPIC_CTRL)); + return reg.bits.bsp_core; +} + +static inline unsigned int dec_apic_is_bsp(void) +{ + return BootStrap(dec_readl((void __iomem *)(APIC_DEFAULT_PHYS_BASE + APIC_BSP))); +} + +#define AM85C30_RR0 0x00 +#define AM85C30_D2 (0x01 << 2) +static void am85c30_putc(unsigned long port, char c) +{ + /* + * Output to ttyS0 + */ + while ((am85c30_com_inb_command(port, AM85C30_RR0) & AM85C30_D2) == 0) + E2K_NOP(7); + am85c30_com_outb(port + 0x01, c); + + /* + * Output to ttyS1 + */ + port += 2; + while ((am85c30_com_inb_command(port, AM85C30_RR0) & AM85C30_D2) == 0) + E2K_NOP(7); + am85c30_com_outb(port + 0x01, c); +} + +static void __putc(unsigned long port, char c) +{ + am85c30_putc(port, c); +} + +static void putc(char c) +{ + unsigned long port = boot_info->serial_base; + + if (!port) + return; + + __putc(port, c); + if (c == '\n') + __putc(port, '\r'); +} + +static void puts(char *s) +{ + while (*s) + putc(*s++); +} + +/* + * Use global variables to prevent using data stack + */ +static const char hex_numbers_for_debug[16] = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' +}; + +static void put_u64(u64 num, int newline) +{ + char u64_char[18]; + int i; + + if (newline) { + u64_char[16] = '\n'; + u64_char[17] = 0; + } else { + u64_char[16] = 0; + } + + for (i = 0; i < 16; i++) { + u64_char[15 - i] = hex_numbers_for_debug[num % 16]; + num = num / 16; + } + puts(u64_char); +} + + +static void error(char *str) +{ + puts(str); + putc('\n'); +} + +static void error_loop(char *s) +{ + puts(s); + for (;;) + E2K_NOP(7); +} + +static void probe_node_memory(bootblock_struct_t *bootblock, int node, + bank_info_t *bank_info, bank_info_t **bank_info_ex_p, + struct board_mem *bm) +{ + boot_info_t *bootinfo = &bootblock->info; + bank_info_t *bank_info_ex = *bank_info_ex_p; + int bank, bm_bank; + + for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS; bank++) { + unsigned long bank_start, bank_end; + + if (bank >= L_MAX_NODE_PHYS_BANKS_FUSTY) { + int banks_ex_id = bank_info - bootinfo->bios.banks_ex; + + if (bank == L_MAX_NODE_PHYS_BANKS_FUSTY) { + bank_info = bank_info_ex; + banks_ex_id = bank_info - + bootinfo->bios.banks_ex; + } + if (banks_ex_id >= L_MAX_PHYS_BANKS_EX) { + bank_info_ex = bank_info; + puts("WARNING: Node has phys banks in extended area, but extended area is full, ignored\n"); + goto out; + } + } + + if (bank_info->size == 0) { + if (bank >= L_MAX_NODE_PHYS_BANKS_FUSTY) + bank_info_ex = bank_info + 1; + goto out; /* no more banks on node */ + } + + bank_start = bank_info->address; + bank_end = bank_start + bank_info->size; + +#ifdef DEBUG + puts("Memory bank from 0x"); + put_u64(bank_start, false); + puts(" to 0x"); + put_u64(bank_end, true); +#endif + + bm_bank = bm->bm_nBanks; + if (bm_bank > 0 && + bm->bm_Banks[bm_bank - 1].mb_top == bank_start) { + /* Continue previous bank */ + --bm_bank; + bm->bm_Banks[bm_bank].mb_top = bank_end; + } else { + /* Add new bank */ + assert(bm_bank < MAX_MEM_BANKS); + bm->bm_Banks[bm_bank].mb_bottom = bank_start; + bm->bm_Banks[bm_bank].mb_top = bank_end; + ++bm->bm_nBanks; + } + + ++bank_info; + } + + if (bank == L_MAX_NODE_PHYS_BANKS) { + bank_info_ex = bank_info; + puts("WARNING: Node last phys bank for node in extended area is not null, ignored\n"); + goto out; + } + + if (bank < L_MAX_NODE_PHYS_BANKS_FUSTY) { + for (; bank < L_MAX_NODE_PHYS_BANKS_FUSTY; bank++) { + if (!bank_info++->size) + goto out; + } + } else { + bank_info_ex = bank_info; + } + + while (bank_info_ex++->size) { + if (++bank >= L_MAX_NODE_PHYS_BANKS) { + puts("WARNING: Node last phys bank for node in extended area is not null, ignored\n"); + break; + } + if (bank_info_ex - bootinfo->bios.banks_ex >= + L_MAX_PHYS_BANKS_EX) { + puts("WARNING: Node last phys bank in extended area is not null, ignored\n"); + break; + } + } + +out: + *bank_info_ex_p = bank_info_ex; +} + +/* + * probe_memory - initialize free memory list + */ +static void probe_memory(bootblock_struct_t *bootblock, struct board_mem *bm) +{ + boot_info_t *bootinfo = &bootblock->info; + bank_info_t *bank_info_ex = bootinfo->bios.banks_ex; + u_int64_t phys_nodes_map = bootinfo->nodes_map; + int node; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + bank_info_t *bank_info = bootinfo->nodes_mem[node].banks; + + if (!(phys_nodes_map & (1UL << node)) || bank_info->size == 0) + continue; + + probe_node_memory(bootblock, node, bank_info, + &bank_info_ex, bm); + } +} + +static int intersect(struct mem_bank *b1, const struct mem_bank *b2, + struct mem_bank *b3, int ignore_busy) +{ + assert(b1->mb_top >= b1->mb_bottom && b2->mb_top >= b2->mb_bottom); + + if (b1->mb_bottom < b2->mb_bottom && b1->mb_top > b2->mb_top) { + /* Cut one bank into two */ + b3->mb_bottom = b1->mb_bottom; + b3->mb_top = b2->mb_bottom; + b3++; + b3->mb_bottom = b2->mb_top; + b3->mb_top = b1->mb_top; + return 2; + } + + if (b1->mb_bottom >= b2->mb_top || b1->mb_top <= b2->mb_bottom) { + /* No intersection */ + *b3 = *b1; + } else { + /* Do not allow double reservations */ + assert(ignore_busy || (b1->mb_bottom <= b2->mb_bottom && + b1->mb_top >= b2->mb_top)); + + /* Intersection */ + b3->mb_bottom = (b1->mb_bottom < b2->mb_bottom) ? + b1->mb_bottom : b2->mb_top; + b3->mb_top = (b1->mb_top > b2->mb_top) ? b1->mb_top : + b2->mb_bottom; + } + + if (b3->mb_bottom < b3->mb_top) + return 1; + + *b3 = (struct mem_bank) {0, 0}; + + return 0; +} + +static void sub(struct board_mem *from, const struct mem_bank *b, + struct board_mem *to, int ignore_busy) +{ + int n, i; + + for(i = 0, n = 0; i < from->bm_nBanks; i++) + n += intersect(&from->bm_Banks[i], b, + &to->bm_Banks[n], ignore_busy); + + to->bm_nBanks = n; +} + +static struct board_mem bm_tmp; +static void reserve_memory_area(struct board_mem *bm, unsigned long phys_addr, + unsigned long mem_size, int ignore_busy, + char *name) +{ + struct mem_bank reserved; + unsigned long end_addr = phys_addr + mem_size; + + assert(mem_size); + + phys_addr = round_down(phys_addr, PAGE_SIZE); + end_addr = round_up(end_addr, PAGE_SIZE); + mem_size = end_addr - phys_addr; + +#ifdef DEBUG + puts("Reserved "); + puts(name); + puts(" area: address 0x"); + put_u64(phys_addr, false); + puts(", size 0x"); + put_u64(mem_size, true); +#endif + + reserved.mb_bottom = phys_addr; + reserved.mb_top = end_addr; + + sub(bm, &reserved, &bm_tmp, ignore_busy); + + /* Do not allow double reservations */ + assert(ignore_busy || bm->bm_nBanks != bm_tmp.bm_nBanks || + memcmp(bm->bm_Banks, &bm_tmp.bm_Banks, sizeof(*bm))); + + memcpy(bm, &bm_tmp, sizeof(*bm)); +} + +/* + * Reserve the needed memory from MP - tables + */ + +static void boot_reserve_mp_table(boot_info_t *bootinfo, struct board_mem *bm) +{ + struct intel_mp_floating *mpf; + + if (bootinfo->mp_table_base == 0UL) + return; + + /* + * MP floating specification table + */ + reserve_memory_area(bm, bootinfo->mp_table_base, PAGE_SIZE, + 1, "MP floating table"); + + mpf = (struct intel_mp_floating *) bootinfo->mp_table_base; + + /* + * MP configuration table + */ + if (mpf->mpf_physptr != 0UL) + reserve_memory_area(bm, mpf->mpf_physptr, PAGE_SIZE, + 1, "MP configuration table"); +} + +static void reserve_memory(boot_info_t *bootinfo, struct board_mem *bm) +{ + unsigned long area_base, area_size; + psp_struct_t PSP = {{{0}}, {{0}}}; + pcsp_struct_t PCSP = {{{0}}, {{0}}}; + e2k_usbr_t USBR = {{0}}; + usd_struct_t USD = {{{0}}, {{0}}}; + int bank; + + reserve_memory_area(bm, 0, PAGE_SIZE, 0, "0-page"); + + reserve_memory_area(bm, (unsigned long)_start, + (unsigned long) (_end - _start), 0, "kernel image"); + + reserve_memory_area(bm, 640 * 1024 /* ROM, VGA ... */, + (1024 - 640) * 1024, 0, "PC"); + + for (bank = 0; bank < bootinfo->num_of_busy; bank++) { + bank_info_t *busy_area = &bootinfo->busy[bank]; + + reserve_memory_area(bm, busy_area->address, busy_area->size, + 1, "BIOS data"); + } + + if (boot_info->ramdisk_size) + reserve_memory_area(bm, boot_info->ramdisk_base, + boot_info->ramdisk_size, 1, "ramdisk"); + + reserve_memory_area(bm, 0x7ee00000, PAGE_SIZE, 1, "APIC page"); + + boot_reserve_mp_table(bootinfo, bm); + + PSP = READ_PSP_REG(); + reserve_memory_area(bm, PSP.PSP_base, PSP.PSP_size, 1, + "kernel boot-time procedures stack"); + + PCSP = READ_PCSP_REG(); + reserve_memory_area(bm, PCSP.PCSP_base, PCSP.PCSP_size, 1, + "kernel boot-time procedure chain stack"); + + USBR = read_USBR_reg(); + area_base = USBR.USBR_base; + read_USD_reg(&USD); + area_size = area_base - USD.USD_base + USD.USD_size; + area_base -= area_size; + reserve_memory_area(bm, area_base, area_size, 1, + "kernel boot-time data stack"); +} + +static unsigned long find_free_memory(struct board_mem *bm, + unsigned long size, unsigned long align) +{ + unsigned long start, end; + struct mem_bank *bank; + int search_low = 0; + int i; + +retry: + for (i = 0; i < bm->bm_nBanks; i++) { + bank = &bm->bm_Banks[i]; + start = round_up(bank->mb_bottom, align); + end = round_down(bank->mb_top, align); + + if (start < end && size <= end - start && + (search_low || start >= 0x100000000UL)) { + reserve_memory_area(bm, start, size, 0, "allocated"); + return start; + } + } + + /* First try to find non-DMA memory */ + if (!search_low) { + search_low = 1; + goto retry; + } + + return -ENOMEM; +} + +static __always_inline void jump_to_image(unsigned long kernel_address, + int n, bootblock_struct_t *bootblock) +{ + e2k_oscud_lo_t oscud_lo; + + /* + * Before jumping we must correct %oscud and %cud + * registers which contain kernel entry address. + */ + oscud_lo = READ_OSCUD_LO_REG(); + AS(oscud_lo).base = kernel_address; + WRITE_OSCUD_LO_REG(oscud_lo); + WRITE_CUD_LO_REG(oscud_lo); + + E2K_JUMP_ABSOLUTE_WITH_ARGUMENTS_2(kernel_address + STARTUP_TTABLE_ENTRY_OFFSET, + n, bootblock); +} + +static struct board_mem memory; +extern int machdep_setup_features(int cpu, int revision); + +/* + * Now we can use global variables (i.e. machine) and linker defined symbols (i.e. _bss) + */ +noinline void decompress_kernel_updated_got(int n, bootblock_struct_t *bootblock, + int bsp, e2k_idr_t idr, unsigned long orig_kernel_size) +{ + struct board_mem *bm = &memory; + int ret; + + if (!bsp) { + while (unpacking_in_progress) + E2K_NOP(7); + /* Barrier between reading `unpacking_in_progress' + * and reading unpacked kernel */ + smp_rmb(); + jump_to_image(kernel_address, n, bootblock); + } + + /* + * Setup machine features + */ + assert(!machdep_setup_features(idr.IDR_mdl, idr.IDR_rev)); + + /* + * Clear .bss (guest variant uses machine) + */ + memset(_bss, 0, _ebss - _bss); + + /* + * Initialize console and say hello + */ + boot_info = &bootblock->info; + + if (read_IDR_reg().mdl == IDR_E1CP_MDL) + io_area_phys_base = E2K_LEGACY_SIC_IO_AREA_PHYS_BASE; + else + io_area_phys_base = E2K_FULL_SIC_IO_AREA_PHYS_BASE; + + puts("\nDecompressor started\n"); + +#ifdef DEBUG + puts("Cleared .bss at 0x"); + put_u64(_bss, false); + puts(", size 0x"); + put_u64(_ebss - _bss, true); +#endif + + /* + * Mark free and reserved memory + */ + probe_memory(bootblock, bm); + + reserve_memory(&bootblock->info, bm); + + /* + * Find free memory area for heap + */ + free_mem_ptr = find_free_memory(bm, BOOT_HEAP_SIZE + PAGE_SIZE, 8); + if (IS_ERR_VALUE(free_mem_ptr)) + error_loop("ERROR: could not find free memory area for heap\n"); + + /* free_mem_ptr must not be equal to 0 */ + if (!free_mem_ptr) + free_mem_ptr += PAGE_SIZE; + free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE; + + puts("Heap from 0x"); + put_u64(free_mem_ptr, false); + puts(" to 0x"); + put_u64(free_mem_end_ptr, true); + + /* + * Decompress the kernel + */ + kernel_address = find_free_memory(bm, + orig_kernel_size, 0x400000); + if (IS_ERR_VALUE(kernel_address)) + error_loop("ERROR: could not find free memory area to unpack kernel to\n"); + + puts("Unpacking 0x"); + put_u64(orig_kernel_size, false); + puts(" bytes from 0x"); + put_u64((unsigned long)_kernel, false); + puts(" to 0x"); + put_u64(kernel_address, false); + puts("...\n"); + + ret = __decompress(_kernel, _ekernel - _kernel, NULL, + NULL, (char *) kernel_address, 0, NULL, error); + if (ret) + error_loop("ERROR: failed to unpack kernel\n"); + + puts("Done\n"); + + /* + * Tell others they can proceed + */ + bootblock->info.kernel_base = kernel_address; + bootblock->info.kernel_size = orig_kernel_size; + smp_wmb(); /* Wait for unpacked kernel and bootblock changes */ + unpacking_in_progress = 0; + + /* + * Jump to the kernel + */ + jump_to_image(kernel_address, n, bootblock); +} + +/* + * Updating GOT should be done in a separate function. Otherwise compiler might put + * GOT load before GOT update (even ignoring the memory clobbers). + * Using global variables isn't allowed here. + */ +__section(.boot_entry) +void decompress_kernel(int n, bootblock_struct_t *bootblock) +{ + unsigned long load_offset, got, egot, addr; + unsigned long orig_kernel_size = 0; + e2k_idr_t idr; + int bsp; + + /* + * Only bootstrap processor proceeds to unpacking + */ + idr = read_IDR_reg(); + + if (idr.mdl >= IDR_E12C_MDL) + bsp = dec_epic_is_bsp(); + else + bsp = dec_apic_is_bsp(); + + if (!bsp) { + while (got_updating_in_progress) + E2K_NOP(7); + /* Barrier between reading `got_updating_in_progress' + * and reading GOT */ + smp_rmb(); + } else { + load_offset = AS(READ_OSCUD_LO_REG()).base - 0x10000; + got = (unsigned long)_got + load_offset; + egot = (unsigned long)_egot + load_offset; + + /* orig_kernel_size should not be shifted by load_offset */ + orig_kernel_size = (unsigned long)__orig_kernel_size; + + /* Update GOT */ + for (addr = got; addr < egot; addr += 8) + *((unsigned long *)addr) += load_offset; + + smp_wmb(); /* Wait for GOT changes */ + got_updating_in_progress = 0; + } + + decompress_kernel_updated_got(n, bootblock, bsp, idr, orig_kernel_size); +} diff --git a/arch/e2k/boot/defkeymap.c b/arch/e2k/boot/defkeymap.c new file mode 100644 index 000000000000..735984fcdfe9 --- /dev/null +++ b/arch/e2k/boot/defkeymap.c @@ -0,0 +1,262 @@ +/* Do not edit this file! It was automatically generated by */ +/* loadkeys --mktable defkeymap.map > defkeymap.c */ + +#include +#include +#include + +u_short plain_map[NR_KEYS] = { + 0xf200, 0xf01b, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, + 0xf037, 0xf038, 0xf039, 0xf030, 0xf02d, 0xf03d, 0xf07f, 0xf009, + 0xfb71, 0xfb77, 0xfb65, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69, + 0xfb6f, 0xfb70, 0xf05b, 0xf05d, 0xf201, 0xf702, 0xfb61, 0xfb73, + 0xfb64, 0xfb66, 0xfb67, 0xfb68, 0xfb6a, 0xfb6b, 0xfb6c, 0xf03b, + 0xf027, 0xf060, 0xf700, 0xf05c, 0xfb7a, 0xfb78, 0xfb63, 0xfb76, + 0xfb62, 0xfb6e, 0xfb6d, 0xf02c, 0xf02e, 0xf02f, 0xf700, 0xf30c, + 0xf703, 0xf020, 0xf207, 0xf100, 0xf101, 0xf102, 0xf103, 0xf104, + 0xf105, 0xf106, 0xf107, 0xf108, 0xf109, 0xf208, 0xf209, 0xf307, + 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301, + 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf03c, 0xf10a, + 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603, + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, +}; + +u_short shift_map[NR_KEYS] = { + 0xf200, 0xf01b, 0xf021, 0xf040, 0xf023, 0xf024, 0xf025, 0xf05e, + 0xf026, 0xf02a, 0xf028, 0xf029, 0xf05f, 0xf02b, 0xf07f, 0xf009, + 0xfb51, 0xfb57, 0xfb45, 0xfb52, 0xfb54, 0xfb59, 0xfb55, 0xfb49, + 0xfb4f, 0xfb50, 0xf07b, 0xf07d, 0xf201, 0xf702, 0xfb41, 0xfb53, + 0xfb44, 0xfb46, 0xfb47, 0xfb48, 0xfb4a, 0xfb4b, 0xfb4c, 0xf03a, + 0xf022, 0xf07e, 0xf700, 0xf07c, 0xfb5a, 0xfb58, 0xfb43, 0xfb56, + 0xfb42, 0xfb4e, 0xfb4d, 0xf03c, 0xf03e, 0xf03f, 0xf700, 0xf30c, + 0xf703, 0xf020, 0xf207, 0xf10a, 0xf10b, 0xf10c, 0xf10d, 0xf10e, + 0xf10f, 0xf110, 0xf111, 0xf112, 0xf113, 0xf213, 0xf203, 0xf307, + 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301, + 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf03e, 0xf10a, + 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603, + 0xf20b, 0xf601, 0xf602, 0xf117, 0xf600, 0xf20a, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, +}; + +u_short altgr_map[NR_KEYS] = { + 0xf200, 0xf200, 0xf200, 0xf040, 0xf200, 0xf024, 0xf200, 0xf200, + 0xf07b, 0xf05b, 0xf05d, 0xf07d, 0xf05c, 0xf200, 0xf200, 0xf200, + 0xfb71, 0xfb77, 0xf918, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69, + 0xfb6f, 0xfb70, 0xf200, 0xf07e, 0xf201, 0xf702, 0xf914, 0xfb73, + 0xf917, 0xf919, 0xfb67, 0xfb68, 0xfb6a, 0xfb6b, 0xfb6c, 0xf200, + 0xf200, 0xf200, 0xf700, 0xf200, 0xfb7a, 0xfb78, 0xf916, 0xfb76, + 0xf915, 0xfb6e, 0xfb6d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c, + 0xf703, 0xf200, 0xf207, 0xf50c, 0xf50d, 0xf50e, 0xf50f, 0xf510, + 0xf511, 0xf512, 0xf513, 0xf514, 0xf515, 0xf208, 0xf202, 0xf911, + 0xf912, 0xf913, 0xf30b, 0xf90e, 0xf90f, 0xf910, 0xf30a, 0xf90b, + 0xf90c, 0xf90d, 0xf90a, 0xf310, 0xf206, 0xf200, 0xf07c, 0xf516, + 0xf517, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603, + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, +}; + +u_short ctrl_map[NR_KEYS] = { + 0xf200, 0xf200, 0xf200, 0xf000, 0xf01b, 0xf01c, 0xf01d, 0xf01e, + 0xf01f, 0xf07f, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf008, 0xf200, + 0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009, + 0xf00f, 0xf010, 0xf01b, 0xf01d, 0xf201, 0xf702, 0xf001, 0xf013, + 0xf004, 0xf006, 0xf007, 0xf008, 0xf00a, 0xf00b, 0xf00c, 0xf200, + 0xf007, 0xf000, 0xf700, 0xf01c, 0xf01a, 0xf018, 0xf003, 0xf016, + 0xf002, 0xf00e, 0xf00d, 0xf200, 0xf20e, 0xf07f, 0xf700, 0xf30c, + 0xf703, 0xf000, 0xf207, 0xf100, 0xf101, 0xf102, 0xf103, 0xf104, + 0xf105, 0xf106, 0xf107, 0xf108, 0xf109, 0xf208, 0xf204, 0xf307, + 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301, + 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf200, 0xf10a, + 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603, + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, +}; + +u_short shift_ctrl_map[NR_KEYS] = { + 0xf200, 0xf200, 0xf200, 0xf000, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf200, 0xf200, + 0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009, + 0xf00f, 0xf010, 0xf200, 0xf200, 0xf201, 0xf702, 0xf001, 0xf013, + 0xf004, 0xf006, 0xf007, 0xf008, 0xf00a, 0xf00b, 0xf00c, 0xf200, + 0xf200, 0xf200, 0xf700, 0xf200, 0xf01a, 0xf018, 0xf003, 0xf016, + 0xf002, 0xf00e, 0xf00d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c, + 0xf703, 0xf200, 0xf207, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf208, 0xf200, 0xf307, + 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301, + 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603, + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, +}; + +u_short alt_map[NR_KEYS] = { + 0xf200, 0xf81b, 0xf831, 0xf832, 0xf833, 0xf834, 0xf835, 0xf836, + 0xf837, 0xf838, 0xf839, 0xf830, 0xf82d, 0xf83d, 0xf87f, 0xf809, + 0xf871, 0xf877, 0xf865, 0xf872, 0xf874, 0xf879, 0xf875, 0xf869, + 0xf86f, 0xf870, 0xf85b, 0xf85d, 0xf80d, 0xf702, 0xf861, 0xf873, + 0xf864, 0xf866, 0xf867, 0xf868, 0xf86a, 0xf86b, 0xf86c, 0xf83b, + 0xf827, 0xf860, 0xf700, 0xf85c, 0xf87a, 0xf878, 0xf863, 0xf876, + 0xf862, 0xf86e, 0xf86d, 0xf82c, 0xf82e, 0xf82f, 0xf700, 0xf30c, + 0xf703, 0xf820, 0xf207, 0xf500, 0xf501, 0xf502, 0xf503, 0xf504, + 0xf505, 0xf506, 0xf507, 0xf508, 0xf509, 0xf208, 0xf209, 0xf907, + 0xf908, 0xf909, 0xf30b, 0xf904, 0xf905, 0xf906, 0xf30a, 0xf901, + 0xf902, 0xf903, 0xf900, 0xf310, 0xf206, 0xf200, 0xf83c, 0xf50a, + 0xf50b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603, + 0xf118, 0xf210, 0xf211, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, +}; + +u_short ctrl_alt_map[NR_KEYS] = { + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf811, 0xf817, 0xf805, 0xf812, 0xf814, 0xf819, 0xf815, 0xf809, + 0xf80f, 0xf810, 0xf200, 0xf200, 0xf201, 0xf702, 0xf801, 0xf813, + 0xf804, 0xf806, 0xf807, 0xf808, 0xf80a, 0xf80b, 0xf80c, 0xf200, + 0xf200, 0xf200, 0xf700, 0xf200, 0xf81a, 0xf818, 0xf803, 0xf816, + 0xf802, 0xf80e, 0xf80d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c, + 0xf703, 0xf200, 0xf207, 0xf500, 0xf501, 0xf502, 0xf503, 0xf504, + 0xf505, 0xf506, 0xf507, 0xf508, 0xf509, 0xf208, 0xf200, 0xf307, + 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301, + 0xf302, 0xf303, 0xf300, 0xf20c, 0xf206, 0xf200, 0xf200, 0xf50a, + 0xf50b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603, + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf20c, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, +}; + +ushort *key_maps[MAX_NR_KEYMAPS] = { + plain_map, shift_map, altgr_map, NULL, + ctrl_map, shift_ctrl_map, NULL, NULL, + alt_map, NULL, NULL, NULL, + ctrl_alt_map, NULL +}; + +unsigned int keymap_count = 7; + +/* + * Philosophy: most people do not define more strings, but they who do + * often want quite a lot of string space. So, we statically allocate + * the default and allocate dynamically in chunks of 512 bytes. + */ + +char func_buf[] = { + '\033', '[', '[', 'A', 0, + '\033', '[', '[', 'B', 0, + '\033', '[', '[', 'C', 0, + '\033', '[', '[', 'D', 0, + '\033', '[', '[', 'E', 0, + '\033', '[', '1', '7', '~', 0, + '\033', '[', '1', '8', '~', 0, + '\033', '[', '1', '9', '~', 0, + '\033', '[', '2', '0', '~', 0, + '\033', '[', '2', '1', '~', 0, + '\033', '[', '2', '3', '~', 0, + '\033', '[', '2', '4', '~', 0, + '\033', '[', '2', '5', '~', 0, + '\033', '[', '2', '6', '~', 0, + '\033', '[', '2', '8', '~', 0, + '\033', '[', '2', '9', '~', 0, + '\033', '[', '3', '1', '~', 0, + '\033', '[', '3', '2', '~', 0, + '\033', '[', '3', '3', '~', 0, + '\033', '[', '3', '4', '~', 0, + '\033', '[', '1', '~', 0, + '\033', '[', '2', '~', 0, + '\033', '[', '3', '~', 0, + '\033', '[', '4', '~', 0, + '\033', '[', '5', '~', 0, + '\033', '[', '6', '~', 0, + '\033', '[', 'M', 0, + '\033', '[', 'P', 0, +}; + +char *funcbufptr = func_buf; +int funcbufsize = sizeof(func_buf); +int funcbufleft = 0; /* space left */ + +char *func_table[MAX_NR_FUNC] = { + func_buf + 0, + func_buf + 5, + func_buf + 10, + func_buf + 15, + func_buf + 20, + func_buf + 25, + func_buf + 31, + func_buf + 37, + func_buf + 43, + func_buf + 49, + func_buf + 55, + func_buf + 61, + func_buf + 67, + func_buf + 73, + func_buf + 79, + func_buf + 85, + func_buf + 91, + func_buf + 97, + func_buf + 103, + func_buf + 109, + func_buf + 115, + func_buf + 120, + func_buf + 125, + func_buf + 130, + func_buf + 135, + func_buf + 140, + func_buf + 145, + NULL, + NULL, + func_buf + 149, + NULL, +}; + +struct kbdiacruc accent_table[MAX_DIACR] = { + {'`', 'A', 0300}, {'`', 'a', 0340}, + {'\'', 'A', 0301}, {'\'', 'a', 0341}, + {'^', 'A', 0302}, {'^', 'a', 0342}, + {'~', 'A', 0303}, {'~', 'a', 0343}, + {'"', 'A', 0304}, {'"', 'a', 0344}, + {'O', 'A', 0305}, {'o', 'a', 0345}, + {'0', 'A', 0305}, {'0', 'a', 0345}, + {'A', 'A', 0305}, {'a', 'a', 0345}, + {'A', 'E', 0306}, {'a', 'e', 0346}, + {',', 'C', 0307}, {',', 'c', 0347}, + {'`', 'E', 0310}, {'`', 'e', 0350}, + {'\'', 'E', 0311}, {'\'', 'e', 0351}, + {'^', 'E', 0312}, {'^', 'e', 0352}, + {'"', 'E', 0313}, {'"', 'e', 0353}, + {'`', 'I', 0314}, {'`', 'i', 0354}, + {'\'', 'I', 0315}, {'\'', 'i', 0355}, + {'^', 'I', 0316}, {'^', 'i', 0356}, + {'"', 'I', 0317}, {'"', 'i', 0357}, + {'-', 'D', 0320}, {'-', 'd', 0360}, + {'~', 'N', 0321}, {'~', 'n', 0361}, + {'`', 'O', 0322}, {'`', 'o', 0362}, + {'\'', 'O', 0323}, {'\'', 'o', 0363}, + {'^', 'O', 0324}, {'^', 'o', 0364}, + {'~', 'O', 0325}, {'~', 'o', 0365}, + {'"', 'O', 0326}, {'"', 'o', 0366}, + {'/', 'O', 0330}, {'/', 'o', 0370}, + {'`', 'U', 0331}, {'`', 'u', 0371}, + {'\'', 'U', 0332}, {'\'', 'u', 0372}, + {'^', 'U', 0333}, {'^', 'u', 0373}, + {'"', 'U', 0334}, {'"', 'u', 0374}, + {'\'', 'Y', 0335}, {'\'', 'y', 0375}, + {'T', 'H', 0336}, {'t', 'h', 0376}, + {'s', 's', 0337}, {'"', 'y', 0377}, + {'s', 'z', 0337}, {'i', 'j', 0377}, +}; + +unsigned int accent_table_size = 68; diff --git a/arch/e2k/boot/dts/Makefile b/arch/e2k/boot/dts/Makefile new file mode 100644 index 000000000000..1c24ce0628f4 --- /dev/null +++ b/arch/e2k/boot/dts/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +dtb-$(CONFIG_MCST) += e1cp_e1cmt_tablet.dts +dtb-$(CONFIG_MCST) += e1cp_m2e-uvp.dts +dtb-$(CONFIG_MCST) += e1cp_mbe1c-pc.dts +dtb-$(CONFIG_MCST) += e1cp_p2p8-sip-s1.dts +dtb-$(CONFIG_MCST) += e2c_mono-pc.dts +dtb-$(CONFIG_MCST) += e4c_apk-pc4c.dts +dtb-$(CONFIG_MCST) += e4c_el-2s-4.dts +dtb-$(CONFIG_MCST) += e4c_mbe2s-pc.dts +dtb-$(CONFIG_MCST) += e8c2_uatx_se.dts +dtb-$(CONFIG_MCST) += e8c_mbe8c-pc.dts +dtb-$(CONFIG_MCST) += e8c_swtx.dts diff --git a/arch/e2k/boot/dts/e1cp_e1cmt_tablet.dts b/arch/e2k/boot/dts/e1cp_e1cmt_tablet.dts new file mode 100644 index 000000000000..6f8331dfd290 --- /dev/null +++ b/arch/e2k/boot/dts/e1cp_e1cmt_tablet.dts @@ -0,0 +1,212 @@ +/* + * E1CMT + * TVGI.469555.374 + * (Tablet) + */ +/dts-v1/; + +/*#include */ +/*#include */ +/*#include */ + +/ { + /* version = ""; */ + model = "e1c+,mcst,e1cmt,tablet"; + compatible = "mcst,e1cmt"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD2:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN Control (X1)"; + + temp1_label = "Internal Temp"; + temp2_label = "IOHUB Temp"; + }; + }; + + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + pca9534@20 { + /* gpio/gpio-pca953x.ko (DD20:PCA9534PW) */ + compatible = "nxp,pca9534"; + reg = <0x20>; + label = "spmc_gpio"; + + gpio-controller; + ngpios = <8>; + #gpio-cells = <2>; + gpio-line-names = + "SPMC_BATLOW_3V3#", + "SPMC_AC_PWR_PRSNT_3V3", + "none", + "none", + "none", + "none", + "none", + "none"; + }; + }; + + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* Pressure Sensor */ + bmp280@76 { + /* iio/pressure/bmp280.ko (DA14:BMP280) */ + compatible = "bosch,bmp280"; + reg = <0x76>; + label = "pressure sensor"; + }; + + /* Light Sensor */ + opt3001@44 { + /* iio/light/opt3001.ko (DD17:OPT3001DNPT) */ + compatible = "ti,opt3001"; + reg = <0x44>; + label = "light sensor"; + }; + + /* (DA17:L96) - GPS @20, @21 */ + }; + + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* Battery Management unit */ + bq40z60@b { + /* power/supply/sbs-batterypow.ko (DA11:BQ40Z60RHBT) */ + compatible = "sbs,sbs-battery"; + reg = <0xb>; + label = "battery management"; + + sbs,i2c-retry-count = <2>; + sbs,poll-retry-count = <10>; + /* sbs,battery-detect-gpios = <&gpio0 ? 0>; */ + }; + + /* (X5:DF52-4S-0.8H(21)) */ + }; + }; + + /* PMBUS I2C */ + pmc_i2c { + #address-cells = <1>; + #size-cells = <0>; + compatible = "pmc_i2c"; + label = "pmbus i2c"; + + /* touchscreen sensor */ + gt9110@14 { + /* input/touchscreen/goodix.ko (X2:DD???:) */ + compatible = "goodix,gt9110"; + reg = <0x14>; + label = "touchscreen"; + + interrupt-parent = <&gpio0>; + /*interrupts = <0 0>;*/ + irq-gpios = <&gpio0 0 0>; /* TS_INT GPIO_ACTIVE_HIGH */ + reset-gpios = <&gpio0 1 0>; /* TS_RST GPIO_ACTIVE_HIGH */ + /* + touchscreen-inverted-x = 0; + touchscreen-inverted-y = 0; + touchscreen-swapped-x-y = 0; + */ + }; + + /* 9 Axis MEMS MotionTracking - is 2 in 1 package: */ + icm20948@69 { + /* FIXME: use new driver */ + /* 1) 3-axis accelerometer, 3-axis gyroscope */ + /* iio/imu/inv_mpu6050/inv-mpu6050-i2c.ko (DD16:ICM-20948) */ + compatible = "invensense,mpu9255"; /* mpu9250 ? */ + reg = <0x69>; + label = "accelerometer, gyroscope"; + + interrupt-parent = <&gpio0>; + /* irq-gpios = <&gpio0 2 1>; /* INT_ICM GPIO_ACTIVE_LOW */ + interrupts = <2 8>; /* INT_ICM IRQ_TYPE_LEVEL_LOW */ + /*mount-matrix = x0, y0, z0, x1, y1, z1, x2, y2, z2;*/ + i2c-gate { + #address-cells = <1>; + #size-cells = <0>; + + akm8963@c { + /* ak8975.ko (DD16:ICM-20948) */ + compatible = "ak,ak8975"; + reg = <0x0c>; + label = "compass"; + }; + }; + }; + + /* (DD29:IT6251FN) - LVDS to DisplayPort 1.1a Transmitter + * error!?! --> connect to I2C_GFX3_DDC_1V8 (TP11, TP12) + */ + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + /* DD23:S25FL064P0XNFI001) */ + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + /* (DD24:CY14B101PA-SFXI) */ + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; + + /* IOHUB2 GPIO */ + gpio0:l_gpio@0 { + compatible = "mcst,gpio"; + reg = <0x0>; + label = "L-GPIO"; + + gpio-controller; + ngpios = <32>; + #gpio-cells = <2>; + gpio-line-names = + /* 0.. 1 */ "TS_INT", "TS_RST", /* touchscreen */ + /* 2.. 3 */ "INT_ICM", "nc", /* MotionTracking */ + /* 4.. 7 */ "GPSPPS", "TP10", "S3", "S4", + /* 8..11 */ "nc", "nc", "nc", "nc", + /* 12..15 */ "nc", "nc", "nc", "nc", + /* 16..19 */ "nc", "nc", "nc", "nc", + /* 20..23 */ "nc", "nc", "nc", "nc", + /* 24..27 */ "nc", "nc", "nc", "nc", + /* 28..31 */ "nc", "nc", "nc", "nc"; + }; +}; diff --git a/arch/e2k/boot/dts/e1cp_m2e-uvp.dts b/arch/e2k/boot/dts/e1cp_m2e-uvp.dts new file mode 100644 index 000000000000..e9ffa6843738 --- /dev/null +++ b/arch/e2k/boot/dts/e1cp_m2e-uvp.dts @@ -0,0 +1,141 @@ +/* + * M2E-UVP + * TVGI.469555.385 ver.1 + * (UVM_KMI) + */ +/dts-v1/; + +/*#include */ + +/ { + /* version = ""; */ + model = "e1c+,mcst,m2e-uvp"; + compatible = "mcst,m2e-uvp"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD19:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "IOHUB Temp Sensor"; + + temp1_label = "Internal Temp"; + temp2_label = "IOHUB Temp"; + /* no fan control */ + }; + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + /* pndt006@38 from M1R:i2c@1 */ + + /* KSZ9896CTX@64 from PK-UVP:I2C_1 (M1R:i2c@1) */ + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* KSZ9896CTX@64 from PK-UVP:I2C_2 (M1R:i2c@2) */ + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* config SENSORS_PMBUS */ + pndt006@10 { + /* hwmon/pmbus/pmbus.ko (DA7:PNDT006A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x10>; + label = "+1V0_P8 (in +3V3)"; + }; + pndt006@14 { + /* hwmon/pmbus/pmbus.ko (DA8:PNDT006A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x14>; + label = "+2V5_IOH (in +5V)"; + }; + }; + }; + + /* PMBUS I2C */ + pmc_i2c { + #address-cells = <1>; + #size-cells = <0>; + compatible = "pmc_i2c"; + label = "pmbus i2c"; + + /* config SENSORS_PMBUS */ + pndt012@10 { + /* hwmon/pmbus/pmbus.ko (DA2:PNDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x10>; + label = "+0V9_CPU (in +3V3)"; + }; + pndt006@13 { + /* hwmon/pmbus/pmbus.ko (DA3:PNDT006A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x13>; + label = "+PWR_MC (in +5V)"; + }; + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + /* DD13:S25FL128SAGNFI001) */ + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + /* (DD14:CY14B101PA-SFXI) */ + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; + + /* IOHUB2 GPIO */ + gpio0:l_gpio@0 { + compatible = "mcst,gpio"; + reg = <0x0>; + label = "L-GPIO"; + gpio-controller; + ngpios = <32>; + #gpio-cells = <2>; + + gpio-line-names = + /* 0.. 3 */ "nc", "nc", "nc", "nc", + /* 4.. 7 */ "GA0", "GA1", "nc", "nc", + /* 8..11 */ "nc", "nc", "nc", "nc", + /* 12..15 */ "nc", "nc", "nc", "nc", + /* 16..19 */ "nc", "nc", "nc", "nc", + /* 20..23 */ "nc", "nc", "nc", "nc", + /* 24..27 */ "nc", "nc", "nc", "nc", + /* 28..31 */ "nc", "nc", "nc", "nc"; + }; +}; diff --git a/arch/e2k/boot/dts/e1cp_mbe1c-pc.dts b/arch/e2k/boot/dts/e1cp_mbe1c-pc.dts new file mode 100644 index 000000000000..8f9b85a5f9a0 --- /dev/null +++ b/arch/e2k/boot/dts/e1cp_mbe1c-pc.dts @@ -0,0 +1,124 @@ +/* + * MBE1C-PC + * TVGI.469555.331 + * (101-PC) + */ +/dts-v1/; + +/*#include */ + +/ { + /* version = ""; */ + model = "e1c+,mcst,mbe1c-pc,101-pc"; + compatible = "mcst,mbe1c-pc"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD9:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN Control (X29)"; + + temp1_label = "Internal Temp"; + temp2_label = "IOHUB Temp"; + }; + + /* (X32, X33) DIMMs: 0x18 0x1A */ + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + /* empty */ + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* empty */ + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* config SENSORS_PMBUS */ + pdt012@10 { + /* hwmon/pmbus/pmbus.ko (DA3:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x10>; + label = "+1V0_P8"; + }; + pdt012@14 { + /* hwmon/pmbus/pmbus.ko (DA4:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x14>; + label = "+2V5_IOH"; + }; + }; + }; + + /* PMBUS I2C */ + pmc_i2c { + #address-cells = <1>; + #size-cells = <0>; + compatible = "pmc_i2c"; + label = "pmbus i2c"; + + /* config SENSORS_PMBUS */ + udt020@10 { + /* hwmon/pmbus/pmbus.ko (DA6:UDT020A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x10>; + label = "+0V9_CPU"; + }; + udt020@13 { + /* hwmon/pmbus/pmbus.ko (DA7:UDT020A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x13>; + label = "+PWR_MC"; + }; + pdt012@14 { + /* hwmon/pmbus/pmbus.ko (DA5:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x14>; + label = "+1V8_CPU"; + }; + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; +}; diff --git a/arch/e2k/boot/dts/e1cp_p2p8-sip-s1.dts b/arch/e2k/boot/dts/e1cp_p2p8-sip-s1.dts new file mode 100644 index 000000000000..1f01b0ac493f --- /dev/null +++ b/arch/e2k/boot/dts/e1cp_p2p8-sip-s1.dts @@ -0,0 +1,184 @@ +/* + * P2P8-SIP-S1 + * TVGI.469555.371 ver.1 + * Razbrakovshik + */ +/dts-v1/; + +/*#include */ + +/ { + /* version = ""; */ + model = "e1c+,mcst,p2p8-sip-s1,razbrakovshik"; + compatible = "mcst,p2p8-sip-s1"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD75:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN Control (X27)"; + + temp1_label = "Internal Temp"; + temp2_label = "IOHUB Temp"; + }; + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + spmc_gpio:pca9534@20 { + /* gpio/gpio-pca953x.ko (DD74:PCA9534PW) */ + compatible = "nxp,pca9534"; + reg = <0x20>; + label = "spmc_gpio"; + gpio-controller; + ngpios = <8>; + #gpio-cells = <2>; + gpio-line-names = + "SPMC_BATLOW_3V3#", + "SPMC_AC_PWR_PRSNT_3V3", + "none", + "none", + "none", + "none", + "none", + "none"; + }; + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + pe1_p8_gpio:pca9534@21 { + /* gpio/gpio-pca953x.ko (DD112:PCA9534PW) */ + compatible = "nxp,pca9534"; + reg = <0x21>; + label = "PE1_P8_GPIO"; + gpio-controller; + ngpios = <8>; + #gpio-cells = <2>; + gpio-line-names = + "CTRL_PE1_PRSNT0#", + "CTRL_PE1_PRSNT1#", + "CTRL_PE1_PRSNT2#", + "CTRL_PE1_PRSNT3#", + "CTRL_PCIE_X1_1_CONNECT", + "CTRL_PCIE_X1_23_CONNECT", + "none", + "none"; + }; + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* config SENSORS_PMBUS */ + pdt012@10 { + /* hwmon/pmbus/pmbus.ko (DA16:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x10>; + label = "+1V0_P8"; + }; + pdt012@12 { + /* hwmon/pmbus/pmbus.ko (DA33:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x12>; + label = "+3V0_P8"; + }; + pdt012@14 { + /* hwmon/pmbus/pmbus.ko (DA15:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x14>; + label = "+2V5_P8"; + }; + }; + }; + + /* PMBUS I2C */ + pmc_i2c { + #address-cells = <1>; + #size-cells = <0>; + compatible = "pmc_i2c"; + label = "pmbus i2c"; + + /* config SENSORS_PMBUS */ + pdt012@10 { + /* hwmon/pmbus/pmbus.ko (DA7:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x10>; + label = "+0V9_NB"; + }; + pdt012@11 { + /* hwmon/pmbus/pmbus.ko (DA8:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x11>; + label = "+PWR_3D"; + }; + pdt012@12 { + /* hwmon/pmbus/pmbus.ko (DA9:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x12>; + label = "+PWR_CORE"; + }; + udt020@13 { + /* hwmon/pmbus/pmbus.ko (DA1:UDT020A0X3) */ + compatible = "pmbus"; + reg = <0x13>; + label = "+PWR_MC"; + }; + pdt012@14 { + /* hwmon/pmbus/pmbus.ko (DA10:PDT012A0X3) */ + compatible = "pmbus"; + reg = <0x14>; + label = "+1V8_P2"; + }; + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + spidev@2 { + compatible = "mcst,boot"; // run spidev wo WARN + reg = <2>; + spi-max-frequency = <50000000>; + }; + spidev@3 { + compatible = "mcst,boot"; // run spidev wo WARN + reg = <3>; + spi-max-frequency = <50000000>; + }; + }; +}; diff --git a/arch/e2k/boot/dts/e2c_mono-pc.dts b/arch/e2k/boot/dts/e2c_mono-pc.dts new file mode 100644 index 000000000000..c0521514bed4 --- /dev/null +++ b/arch/e2k/boot/dts/e2c_mono-pc.dts @@ -0,0 +1,87 @@ +/* + * MONOCUBE + * TVGI.469555.253 ver.5 + * (MONOCUBE-PC) + */ +/dts-v1/; + +/ { + /* version = ""; */ + model = "e2c+,mcst,mono-pc"; + compatible = "mcst,mono-pc"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm95231@2b { + /* hwmon/lm95231.ko (DD12:LM95231CIMM) */ + /* CIMM:0x57 / CIMM-1:0x19 / CIMM-2:0x2A */ + compatible = "ti,lm95231"; + reg = <0x2b>; + label = "CPU Temp Sensor"; + + temp1_label = "Board Temp"; + temp2_label = "CPU Temp"; + temp3_label = "not connected"; + /* no fan control */ + }; + + /* TODO: ???.ko (DD14:MAX3674ECM+) */ + + /* (X29, X30) DIMMs */ + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + /* empty */ + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* empty */ + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* empty */ + }; + }; + + /* IOHUB SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub spi"; + + boot@0 { + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; +}; diff --git a/arch/e2k/boot/dts/e4c_apk-pc4c.dts b/arch/e2k/boot/dts/e4c_apk-pc4c.dts new file mode 100644 index 000000000000..ff40b8867389 --- /dev/null +++ b/arch/e2k/boot/dts/e4c_apk-pc4c.dts @@ -0,0 +1,459 @@ +/* + * support for the Mcst Pevm based board + * + */ +/dts-v1/; + +/*#include */ + +/ { + version = "apk-pc4c v1.0 2018-03-16"; + model = "e4c, apk-pc4c, notebook"; + compatible = "mcst,apk-pc4c"; + #address-cells = <1>; + #size-cells = <0>; + + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + lm96163@4c { + /* hwmon/lm63.ko (DD22:LM96163CISD) - CPU_TERM */ + compatible = "lm63"; + reg = <0x4c>; + label = "COM: CPU Temp Sensor"; + + temp1_label = "Internal Temp"; + temp2_label = "CPU Temp"; + /* no fan control */ + }; + ltm4676@40 { + /* hwmon/pmbus/ltc2978.ko (LTM4676IY) - +3V3out / +1V0out */ + compatible = "lltc,ltm4676"; + reg = <0x40>; + label = "COM: +3V3out / +1V0out"; + regulators { + vout0 { + regulator-name = "COM:3V3"; + }; + vout1 { + regulator-name = "COM:1V0"; + }; + }; + }; + ltm4676@4f { + /* hwmon/pmbus/ltc2978.ko (LTM4676IY) - +1V5out / +1V2out */ + compatible = "lltc,ltm4676"; + reg = <0x4f>; + label = "COM: RAM-1V5 / IOHUB-1V2"; + regulators { + vout0 { + regulator-name = "IOHUB-1V5"; + }; + vout1 { + regulator-name = "IOHUB-1V2"; + }; + }; + }; + ltc4151@6a { + /* hwmon/ltc4151.ko (LTC4551IDD) - TERM_LTM / +12V_CPU */ + compatible = "lltc,ltc4151"; + reg = <0x6a>; + shunt-resistor-micro-ohms = <20000>; + label = "COM: TERM_LTM / +12V_CPU"; + }; + /* + pca9534@20 { + compatible = "nxp,pca9534"; //driver: gpio/gpio-pca953x + reg = <0x20>; + }; + pca9534@21 { + compatible = "nxp,pca9534"; //driver: gpio/gpio-pca953x + reg = <0x21>; + }; + */ + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + lm96163@4c { + /* hwmon/lm63.ko (DD24:LM96163CISD) - board temp */ + compatible = "lm63"; + reg = <0x4c>; + label = "COM: Board Temp Sensor"; + + temp1_label = "Internal Temp"; + temp2_label = "not connected"; + /* no fan control */ + }; + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + /* (PEX8624-BB50BIF) - PCIe switch */ + }; + i2c@3 { + /* COMexpress:SMB */ + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + i2c-mux@44 { + /* i2c/muxes/i2c-mux-ltc4306.ko (LTC4306) - I2C MUX */ + #address-cells = <1>; + #size-cells = <0>; + compatible = "lltc,ltc4306"; + reg = <0x44>; + /* i2c-mux-idle-disconnect; */ + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + /* E8860 MXM Radeon video */ + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + /* mPCI-E (WiFi+BT) */ + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + /* mPCI-E (Exp) */ + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + apkpwr@5a { + /* mcst/apkpwr.ko [X13:PowerIn] */ + #address-cells = <1>; + #size-cells = <0>; + compatible = "mcst,apkpwr"; + reg = <0x5a>; + label = "MPN power in"; + }; + }; + }; + i2c-mux@58 { + /* i2c/muxes/i2c-mux-ltc4306.ko (LTC4306) - I2C MUX */ + #address-cells = <1>; + #size-cells = <0>; + compatible = "lltc,ltc4306"; + reg = <0x58>; + /* i2c-mux-idle-disconnect; */ + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + gpi1:pca9534@20 { + /* gpio/gpio-pca953x.ko (DD14:PCA9534PW) - DiscretIn#1 */ + compatible = "nxp,pca9534"; + reg = <0x20>; + label = "DiscretIn#1"; + gpio-controller; + /* FUTURE: (now interrupt support not compiled in) + interrupt-parent = <&gpio?>; + interrupts = ; + */ + ngpios = <8>; + #gpio-cells = <2>; + /* use gpio_keys to control lines 0..4 */ + gpio-line-names = "wifi_btn", "avia_btn", "pwm_btn", "tpad_btn", "cover_down", + "PRSNT_L", "PRSNT_R", "NT_FLAG"; + }; + gpi2:pca9534@21 { + /* gpio/gpio-pca953x.ko (DD6:PCA9534PW) - DiscretIn#2 */ + compatible = "nxp,pca9534"; + reg = <0x21>; + label = "DiscretIn#2"; + gpio-controller; + /* FUTURE: (now interrupt support not compiled in) + interrupt-parent = <&gpio?>; + interrupts = ; + */ + ngpios = <6>; + #gpio-cells = <2>; + gpio-line-names = "PWR_ALERT", "CASE_ALERT", "COVER_ALERT", + "CPU_OVERH", "TH_OVERH", "HDD_ALERT"; + }; + gpo1:pca9534@22 { + /* gpio/gpio-pca953x.ko (DD5:PCA9534) - DiscretOut#1 */ + compatible = "nxp,pca9534"; + reg = <0x22>; + label = "DiscretOut#1"; + gpio-controller; + /* FUTURE: (now interrupt support not compiled in) + interrupt-parent = <&gpio?>; + interrupts = ; + */ + ngpios = <8>; + #gpio-cells = <2>; + /* use gpio_leds to control this lines */ + gpio-line-names = "wifi_led", "bluetooth_led", "pwm_r_led", "pwm_g_led", + "tpad_off_led", "cam_act_led", "numlock_led", "capslock_led"; + }; + gpo2:pca9534@23 { + /* gpio/gpio-pca953x.ko (DD7:PCA9534) - DiscretOut#2 */ + compatible = "nxp,pca9534"; + reg = <0x23>; + label = "DiscretOut#2"; + gpio-controller; + /* FUTURE: (now interrupt support not compiled in) + interrupt-parent = <&gpio?>; + interrupts = ; + */ + ngpios = <4>; + #gpio-cells = <2>; + /* use gpio_leds to control line 0 and gpio to other */ + gpio-line-names = "sata_led", + "PWR_LEVEL", "RS_232_EN", "WIFI_DIS"; + rs232_en { + gpio-hog; + gpios = <2 0>; + output-low; + line-name = "RS_232_EN"; + /* use IOHUB-GPIO for this function */ + }; + }; + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + lm95245@18 { + /* hwmon/lm95245.ko (DD54:LM95245CIMM) - TEMP_CASE + MXM */ + compatible = "national,lm95245"; + reg = <0x18>; + label = "MB: TEMP_CASE / MXM"; + // TODO: in reg 0xBF clear bit 3 (lm95245.pdf, p.20) + }; + lm95245@4c { + /* hwmon/lm95245.ko (DD55:LM95245CIMM) - TEMP_COVER */ + compatible = "national,lm95245"; + reg = <0x4c>; + label = "MB: TEMP_COVER"; + // TODO: in reg 0xBF clear bit 3 (lm95245.pdf, p.20) + }; + lm95245@29 { + /* hwmon/lm95245.ko (DD56:LM95245CIMM) - TEMP_HDD */ + compatible = "national,lm95245"; + reg = <0x29>; + label = "MB: TEMP_HDD"; + // TODO: in reg 0xBF clear bit 3 (lm95245.pdf, p.20) + }; + ltm4676@40 { + /* hwmon/pmbus/ltc2978.ko (DA1:LTM4676IY) - DC/DC 5V & 3V3 */ + compatible = "lltc,ltm4676"; + reg = <0x40>; + label = "MB: DC/DC 5V & 3V3"; + regulators { + vout0 { + regulator-name = "MB-5V"; + }; + vout1 { + regulator-name = "MB-3V3"; + }; + }; + }; + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + ptn3460@20 { + /* gpu/drm/bridge/nxp-ptn3460.ko (DD54:LM95245CIMM) - DP2LVDS */ + compatible = "nxp,ptn3460"; + reg = <0x20>; + label = "DP2LVDS"; + powerdown-gpio = <&gpio 0 1 0 0>; /* PD_N pin */ + reset-gpio = <&gpio 0 1 0 0>; /* RST_N pin */ + edid-emulation = <1>; + /* edid-emulation: + | 0 | 1024x768 | NXP Generic | + | 1 | 1920x1080 | NXP Generic | + | 2 | 1920x1080 | NXP Generic | + | 3 | 1600x900 | Samsung LTM200KT | + | 4 | 1920x1080 | Samsung LTM230HT | + | 5 | 1366x768 | NXP Generic | + | 6 | 1600x900 | ChiMei M215HGE | + */ + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + bridge_out:endpoint { + remote-endpoint = <&panel_in>; + }; + }; + }; + }; + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; + }; + }; + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + boot@0 { + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <16000000>; + }; + rtc-cy14b101p@1 { + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <16000000>; + }; + }; + gpio0:l_gpio@0 { + compatible = "mcst,gpio"; + reg = <0x0>; + label = "L-GPIO"; + gpio-controller; + ngpios = <16>; + #gpio-cells = <2>; + gpio-line-names = "nc", "nc", "nc", "nc", "nc", + "GPI0.PWR_NMI", "GPI1.I2C_INT", + "GPI2.TP14", "GPI3.TP15", + "GPO0.PWR_KILL", "GPO1.RS232_EN1", + "GPO2.TP16", "GPO3.TP17", + "nc", "nc", "nc"; + }; + + gpio_leds { + compatible = "gpio-leds"; + /* DiscretOut#1: */ + wifi_led { + gpios = <&gpo1 0 0>; /* (F3) */ + linux,default-trigger = "rfkill0"; + label = "wifi:green"; + }; + avia_led { + gpios = <&gpo1 1 0>; /* (F1) */ + linux,default-trigger = "rfkill1"; + label = "bluetooth:green"; + }; + pwm_r_led { + gpios = <&gpo1 2 0>; /* (F4) */ + linux,default-trigger = "none"; + default-state = "on"; + label = "pwm_r:yellow"; + }; + pwm_g_led { + gpios = <&gpo1 3 0>; /* (F4) */ + linux,default-trigger = "none"; + default-state = "off"; + label = "pwm_g:green"; + }; + tpad_off_led { + gpios = <&gpo1 4 0>; /* (F2) */ + linux,default-trigger = "none"; + default-state = "off"; + label = "tpad_off:red"; + }; + cam_act_led { + gpios = <&gpo1 5 0>; + linux,default-trigger = "none"; + default-state = "off"; + label = "cam_act:yellow"; + }; + numlock_led { + gpios = <&gpo1 6 0>; + linux,default-trigger = "kbd-numlock"; + label = "numlock:green"; + }; + capslock_led { + gpios = <&gpo1 7 0>; + linux,default-trigger = "kbd-capslock"; + label = "capslock:green"; + }; + /* DiscretOut#2: */ + sata_led { + gpios = <&gpo2 0 0>; + linux,default-trigger = "disk-activity"; + label = "sata:yellow"; + }; + }; + gpio_keys_polled { + #address-cells = <1>; + #size-cells = <0>; + compatible = "gpio-keys-polled"; + poll-interval = <250>; + /*autorepeat;*/ + wifi_btn { + gpios = <&gpi1 0 0>; /* [F3] */ + linux,code = <238>; /* KEY_WLAN */ + debounce-interval = <10>; + label = "wifi_btn"; + }; + avia_btn { + gpios = <&gpi1 1 0>; /* [F1] */ + linux,code = <247>; /* KEY_RFKILL */ + debounce-interval = <10>; + label = "avia_btn"; + }; + pwm_btn { + gpios = <&gpi1 2 0>; /* [F4] */ + linux,code = <243>; /* KEY_BRIGHTNESS_CYCLE */ + debounce-interval = <10>; + label = "pwm_btn"; + }; + tpad_btn { + gpios = <&gpi1 3 0>; /* [F2] */ + linux,code = <0x212>; /* KEY_TOUCHPAD_TOGGLE */ + debounce-interval = <10>; + label = "tpad_btn"; + }; + cover_down { + gpios = <&gpi1 4 0>; /* case closed */ + linux,code = <245>; /* KEY_DISPLAY_OFF */ + debounce-interval = <10>; + label = "cover_down"; + }; + power { + gpios = <&gpio0 5 0>; + linux,code = <116>; /* KEY_POWER */ + debounce-interval = <10>; + label = "Power-key"; + }; + }; + + /* stub */ + gpio:gpio-controller { + gpio-controller; + ngpios = <2>; + #gpio-cells = <2>; + line_0 { + gpio-hog; + gpios = <0 0>; + output-low; + }; + line_1 { + gpio-hog; + gpios = <1 0>; + output-high; + }; + }; + port { + panel_in: endpoint { + remote-endpoint = <&bridge_out>; + }; + }; +}; diff --git a/arch/e2k/boot/dts/e4c_el-2s-4.dts b/arch/e2k/boot/dts/e4c_el-2s-4.dts new file mode 100644 index 000000000000..dbce1289f615 --- /dev/null +++ b/arch/e2k/boot/dts/e4c_el-2s-4.dts @@ -0,0 +1,272 @@ +/* + * EL-2S-4 + * LYUI.469555.052 + * (Footballer / UEM-2U) + */ +/dts-v1/; + +/ { + /* version = ""; */ + model = "e4c,mcst,el-2s-4,uem-2u"; + compatible = "mcst,el-2s-4"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub i2c"; + + i2c@0 { + /* I2C_*0 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + i2c-mux@8A { + /* i2c/muxes/i2c-mux-ltc4306.ko (DD48:LTC4306IUFD) */ + #address-cells = <1>; + #size-cells = <0>; + compatible = "lltc,ltc4306"; + reg = <0x8A>; /* LHH */ + /* i2c-mux-idle-disconnect; */ + i2c@0 { + /* I2C_*0_0 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + /* CPU0 */ + lm96163@4c { + /* hwmon/lm63.ko (0_DD2:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN_CPU0"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "CPU0_TERM"; + }; + /* 0xA0 (0_X1:DIMM) */ + /* 0xA2 (0_X2:DIMM) */ + /* 0xA3 (0_X3:DIMM) */ + /* (0_X4:Power) */ + }; + i2c@1 { + /* I2C_*0_1 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + /* X35:PCIe */ + }; + i2c@2 { + /* I2C_*0_2 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* X36:PCIe */ + }; + i2c@3 { + /* I2C_*0_3 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* ???.ko (DD1:MAX3674ECM+) + * 0xB4? - 500MHz CPU IOHUB1,2 + */ + /* ???.ko (DD2:MAX3674ECM+) + * 0xB6? - 500MHz IOL IOHUB1,2 + */ + pdt003@1B { + /* hwmon/pmbus/pmbus.ko (DA7:PDT003A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x1B>; /* 033 */ + label = "+3V3SB"; + }; + pdt012@2A { + /* hwmon/pmbus/pmbus.ko (DA1:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x2A>; /* 052 */ + label = "+2V5 IOHUB"; + }; + pdt012@11 { + /* hwmon/pmbus/pmbus.ko (DA2:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x11>; /* 021 */ + label = "+1V2 IOHUB"; + }; + service_gpio:pca9534@20 { + /* gpio/gpio-pca953x.ko (DD27:PCA9534PW) */ + compatible = "nxp,pca9534"; + reg = <0x20>; /* 0100 000 */ + label = "Service Register"; + gpio-controller; + ngpios = <8>; + #gpio-cells = <2>; + gpio-line-names = + "ModSel0", + "ModSel1", + "KCBB_SERVICE3", + "KCBB_SERVICE4", + "KCBB_CONF_DONE", + "KCBB_PWR_OK", + "SPEAKER", + "I2C_RESET#"; + }; + i2c-mux@B2 { + /* i2c/muxes/i2c-mux-ltc4306.ko (DD3:LTC4306IUFD) */ + #address-cells = <1>; + #size-cells = <0>; + compatible = "lltc,ltc4306"; + reg = <0xB2>; /* LHL */ + /* i2c-mux-idle-disconnect; */ + i2c@0 { + /* I2C00 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD25:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "PWM2_FAN"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "not connected"; + }; + }; + i2c@1 { + /* I2C10 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + lm96163@4c { + /* hwmon/lm63.ko (DD23:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN_CPU0"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "not connected"; + }; + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* empty */ + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* empty */ + }; + }; + /* X23:KCBB + * - 0xA0 - Trans + * - 0x12, 0x1A, 0x2A - Power + */ + }; + }; + }; + i2c@1 { + /* I2C_*1 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + /* CPU1 */ + lm96163@4c { + /* hwmon/lm63.ko (1_DD2:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN_CPU1"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "CPU1_TERM"; + }; + /* 0xA0 (1_X1:DIMM) */ + /* 0xA2 (1_X2:DIMM) */ + /* 0xA3 (1_X3:DIMM) */ + /* (1_X4:Power) */ + }; + i2c@2 { + /* I2C_*2 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* CPU2 */ + lm96163@4c { + /* hwmon/lm63.ko (2_DD2:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN_CPU2"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "CPU2_TERM"; + }; + /* 0xA0 (2_X1:DIMM) */ + /* 0xA2 (2_X2:DIMM) */ + /* 0xA3 (2_X3:DIMM) */ + /* (2_X4:Power) */ + }; + i2c@3 { + /* I2C_*3 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* CPU3 */ + lm96163@4c { + /* hwmon/lm63.ko (3_DD2:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN_CPU3"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "CPU3_TERM"; + }; + /* 0xA0 (3_X1:DIMM) */ + /* 0xA2 (3_X2:DIMM) */ + /* 0xA3 (3_X3:DIMM) */ + /* (3_X4:Power) */ + }; + }; + + /* IOHUB SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub spi"; + + boot@0 { + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; +}; diff --git a/arch/e2k/boot/dts/e4c_mbe2s-pc.dts b/arch/e2k/boot/dts/e4c_mbe2s-pc.dts new file mode 100644 index 000000000000..a3479e6f787b --- /dev/null +++ b/arch/e2k/boot/dts/e4c_mbe2s-pc.dts @@ -0,0 +1,86 @@ +/* + * MBE2S-PC + * TVGI.469555.300 ver.4 / ver.5 + * (401-PC) + */ +/dts-v1/; + +/ { + /* version = ""; */ + model = "e4c,mcst,mbe2s-pc,401-pc"; + compatible = "mcst,mbe2s-pc"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + adt7475@2e { + /* hwmon/adt7475.ko (DD13:ADT7475ARQZ) */ + compatible = "adt7475"; + reg = <0x2e>; + label = "board temp"; + /* trivial-devices */ + + temp1_label = "internal"; + temp2_label = "cpu"; + temp3_label = "motherboard"; + }; + + /* TODO: ???.ko (DD18:MAX3674ECM+) */ + + /* (X9..X11) DIMMs */ + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + /* empty */ + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* empty */ + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* empty */ + }; + }; + + /* IOHUB SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub spi"; + + boot@0 { + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; +}; diff --git a/arch/e2k/boot/dts/e8c2_uatx_se.dts b/arch/e2k/boot/dts/e8c2_uatx_se.dts new file mode 100644 index 000000000000..afecb64971a9 --- /dev/null +++ b/arch/e2k/boot/dts/e8c2_uatx_se.dts @@ -0,0 +1,159 @@ +/* + * E8C2-uATX/SE + * LYUI.469555.098 ver.1 / ver.2 + * + */ +/dts-v1/; + +/*#include */ + +/ { + /* version = ""; */ + model = "e8c2,mcst,e8c2-uatx_se"; + compatible = "mcst,e8c2-uatx_se"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD22:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN_CPU"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "External diode"; + }; + + /* (X45..X48) DDR4 DIMMs: 0x18 0x1A 0x1C 0x1E */ + /* (X52) "MNGR" I2C1_ISO */ + }; + + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + /* config SENSORS_PMBUS */ + pdt012@23 { + /* hwmon/pmbus/pmbus.ko (DA4:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x23>; + label = "+2V5"; + }; + mdt040@12 { + /* ver1: hwmon/pmbus/pmbus.ko (DA8:UDT020A0X3-SRZ) */ + /* ver2: hwmon/pmbus/pmbus.ko (DA8:MDT040A0X3-SRPHZ) */ + compatible = "pmbus"; + reg = <0x12>; + label = "+1V2"; + }; + pdt012@22 { + /* hwmon/pmbus/pmbus.ko (DA5:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x22>; + label = "+1V0"; + }; + pdt012@0A { + /* hwmon/pmbus/pmbus.ko (DA12:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x0A>; + label = "+0V9_LINK"; + }; + udt020@1A { + /* hwmon/pmbus/pmbus.ko (DA10:UDT020A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x1A>; + label = "+0V9UN"; + }; + pdt003@1B { + /* hwmon/pmbus/pmbus.ko (DA7:PDT003A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x1B>; + label = "+1V2_0"; + }; + + lm96163@4c { + /* hwmon/lm63.ko (DD23:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "FAN_IOHUB2"; + /* trivial-devices */ + + temp1_label = "Internal diode"; + temp2_label = "IOHUB2 TERM"; + }; + + /* (X52) "MNGR" I2C3_ISO */ + }; + + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* (X6..X9) PCIe/PCI */ + /* (X52) "MNGR" I2C2_ISO */ + }; + + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* config SENSORS_PMBUS */ + pdt012@1B { + /* hwmon/pmbus/pmbus.ko (DA19:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x1B>; + label = "+3V3_SUS"; + }; + + /* (X52) "MNGR" I2C4_ISO */ + /* [DD26]24FC128-I/ST(EEPROM)@0x57 "FRUID" */ + }; + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + /* (DD20:S25FL128SAGNFI001) */ + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + /* (DD19:CY14B101PA-SFXI) */ + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; + + /* l_gpio@%d */ + /* + * I2C3_ALERT - [DD2]1991VG2YA - GPIO15 + * I2C2_ALERT - [DD2]1991VG2YA - none!!!(16) + * I2C1_ALERT - [DD2]1991VG2YA - GPIO17 + * I2C0_ALERT - [DD2]1991VG2YA - GPIO18 + */ +}; diff --git a/arch/e2k/boot/dts/e8c_mbe8c-pc.dts b/arch/e2k/boot/dts/e8c_mbe8c-pc.dts new file mode 100644 index 000000000000..77373f70b09b --- /dev/null +++ b/arch/e2k/boot/dts/e8c_mbe8c-pc.dts @@ -0,0 +1,64 @@ +/* + * MBE8C-PC + * TVGI.369555.318 ver.5 + * (801-PC) + */ +/dts-v1/; + +/*#include */ + +/ { + /* version = ""; */ + model = "e8c,mcst,mbe8c-pc,801-pc"; + compatible = "mcst,mbe8c-pc"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + adt7475@2e { + /* hwmon/adt7475.ko (DD4:ADT7475ARQ) */ + compatible = "adt7475"; + reg = <0x2e>; + label = "board temp"; + /* trivial-devices */ + + temp1 = "iohub2"; + temp2 = "motherboard"; + /* sensor under power supply on motherboard */ + temp3 = "power supply"; + }; + }; + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; +}; diff --git a/arch/e2k/boot/dts/e8c_swtx.dts b/arch/e2k/boot/dts/e8c_swtx.dts new file mode 100644 index 000000000000..6876841067a0 --- /dev/null +++ b/arch/e2k/boot/dts/e8c_swtx.dts @@ -0,0 +1,256 @@ +/* + * E8C-SWTX + * TVGI.469535.221 ver.4 (izm7) + * (804 1U) + */ +/dts-v1/; + +/*#include */ + +/ { + /* version = ""; */ + model = "e8c,mcst,e8c-swtx,804_1u"; + compatible = "mcst,e8c-swtx"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + /* I2C_CPU */ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + i2c-mux@4E { + /* i2c/muxes/i2c-mux-ltc4306.ko (DD7:LTC4306IUFD) */ + #address-cells = <1>; + #size-cells = <0>; + compatible = "lltc,ltc4306"; + reg = <0x4E>; /* NLN */ + /* i2c-mux-idle-disconnect; */ + + i2c@0 { + /* I2C_CPU0 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD8:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "CPU0 FAN Control (X31)"; + + temp1_label = "Internal Temp"; + temp2_label = "CPU0 Temp"; + }; + + /* A1 "CPU0" - I2C_MAIN */ + /* + * MC0.S0:0x50,0x30,0x18 + * MC0.S1:0x51,0x31,0x19 + * MC1.S0:0x52,0x32,0x1A + * MC1.S1:0x53,0x33,0x1B + * MC2.S0:0x54,0x34,0x1C + * MC2.S1:0x55,0x35,0x1D + * MC3.S0:0x56,0x36,0x1E + * MC3.S1:0x57,0x37,0x1f + */ + }; + + i2c@1 { + /* I2C_CPU1 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + lm96163@4c { + /* hwmon/lm63.ko (DD11:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "CPU1 FAN Control (X20)"; + + temp1_label = "Internal Temp"; + temp2_label = "CPU1 Temp"; + }; + + /* A2 "CPU1" - I2C_MAIN */ + /* + * MC0.S0:0x50,0x30,0x18 + * MC0.S1:0x51,0x31,0x19 + * MC1.S0:0x52,0x32,0x1A + * MC1.S1:0x53,0x33,0x1B + * MC2.S0:0x54,0x34,0x1C + * MC2.S1:0x55,0x35,0x1D + * MC3.S0:0x56,0x36,0x1E + * MC3.S1:0x57,0x37,0x1f + */ + }; + + i2c@2 { + /* I2C_CPU2 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + lm96163@4c { + /* hwmon/lm63.ko (DD14:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "CPU2 FAN Control (X32)"; + + temp1_label = "Internal Temp"; + temp2_label = "CPU2 Temp"; + }; + + /* A3 "CPU2" - I2C_MAIN */ + /* + * MC0.S0:0x50,0x30,0x18 + * MC0.S1:0x51,0x31,0x19 + * MC1.S0:0x52,0x32,0x1A + * MC1.S1:0x53,0x33,0x1B + * MC2.S0:0x54,0x34,0x1C + * MC2.S1:0x55,0x35,0x1D + * MC3.S0:0x56,0x36,0x1E + * MC3.S1:0x57,0x37,0x1f + */ + }; + + i2c@3 { + /* I2C_CPU3 */ + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + lm96163@4c { + /* hwmon/lm63.ko (DD15:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "CPU3 FAN Control (X30)"; + + temp1_label = "Internal Temp"; + temp2_label = "CPU3 Temp"; + }; + + /* A4 "CPU3" - I2C_MAIN */ + /* + * MC0.S0:0x50,0x30,0x18 + * MC0.S1:0x51,0x31,0x19 + * MC1.S0:0x52,0x32,0x1A + * MC1.S1:0x53,0x33,0x1B + * MC2.S0:0x54,0x34,0x1C + * MC2.S1:0x55,0x35,0x1D + * MC3.S0:0x56,0x36,0x1E + * MC3.S1:0x57,0x37,0x1f + */ + }; + }; + }; + + i2c@1 { + /* I2C_PMBUS */ + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + lm96163@4c { + /* hwmon/lm63.ko (DD5:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "IOHUB FAN Control (X52)"; + + temp1_label = "Internal Temp"; + temp2_label = "IOHUB Temp"; + }; + + /* config SENSORS_PMBUS */ + pdt012@12 { + /* hwmon/pmbus/pmbus.ko (DA2:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x12>; /* 23k7|23k7 */ + label = "+1V8_CPU (in: +12V_IO)"; + }; + pdt012@22 { + /* hwmon/pmbus/pmbus.ko (DA4:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x22>; /* 54k9|23k7 */ + label = "+1V0 (in: +12V_IO)"; + }; + pdt012@23 { + /* hwmon/pmbus/pmbus.ko (DA3:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x23>; /* 54k9|36k5 */ + label = "+2V5 (in: +12V_IO)"; + }; + + /* I2C_PMBUS -- [DD40] -- I2C_PMBUS_ISO */ + + pdt012@24 { + /* hwmon/pmbus/pmbus.ko (DA1:PDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x24>; /* 54k9|54k9 */ + label = "+3V3_SUSP (in: +5V_SB)"; + }; + + /* + * [X39:SNP346-24VP21-2V_FP_IO] SSI EEB v.1.0.1 p. 2.3.3 + * [X56:SNP346-10VP21-2V] Digital Power Insight + * [X55:70545-0004] SSI EEB v.1.0.1 p. 2.3.1.3 + * Addr:0x38,0x39,0x3C,0x3D + */ + }; + + i2c@2 { + /* I2C_PCI */ + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* PCIe/PCI slots */ + }; + + i2c@3 { + /* I2C_FRUID */ + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* [DD28:24FC128-I/ST] (EEPROM)@0x57 "FRUID" */ + }; + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + /* (DD33:S25FL128SAGNFI001) */ + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + /* (DD6:CY14B101PA-SFXI) */ + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; + + /* l_gpio@%d */ + /* + * I2C0: I2C_CPU_ALERT# - [DD2]1991VG2YA - GPIO18 + * I2C1: I2C_PMBUS_ALERT# - [DD2]1991VG2YA - GPIO17 + */ +}; diff --git a/arch/e2k/boot/dts/include/dt-bindings b/arch/e2k/boot/dts/include/dt-bindings new file mode 120000 index 000000000000..08c00e4972fa --- /dev/null +++ b/arch/e2k/boot/dts/include/dt-bindings @@ -0,0 +1 @@ +../../../../../include/dt-bindings \ No newline at end of file diff --git a/arch/e2k/boot/dumpsyms.c b/arch/e2k/boot/dumpsyms.c new file mode 100644 index 000000000000..f260a7de7c7a --- /dev/null +++ b/arch/e2k/boot/dumpsyms.c @@ -0,0 +1,173 @@ +#include +#include +#include +#include +#include +#include + +static Elf64_Shdr * elf_shdr_p; +static int input_fd; + +static int +open_file (char * name, + int flags, + int mode) +{ + char error_message[1024]; + int fd; + + fd = open (name, flags, mode); + if (fd < 0) + { + sprintf (error_message, "Can not open \'%s\'", name); + perror (error_message); + exit (1); + } + + return fd; +} + +static void * +read_from_file (long offset, + long size) +{ + int r; + void * ptr; + + ptr = malloc (size); + if (ptr == 0) + { + perror ("malloc"); + exit (1); + } + + r = lseek (input_fd, offset, SEEK_SET); + if (r < 0) + { + perror ("lseek"); + exit (1); + } + + r = read (input_fd, ptr, size); + if (r < 0) + { + perror ("read"); + exit (1); + } + + return ptr; +} + +static void * +read_section (int section_index, + long * size_p) +{ + long size, offset; + + size = elf_shdr_p[section_index].sh_size; + offset = elf_shdr_p[section_index].sh_offset; + + if (size_p != 0) + { + *size_p = size; + } + + return read_from_file (offset, size); +} + +int +main (int argc, char **argv) +{ + Elf64_Ehdr elf_ehdr; + Elf64_Off e_shoff, e_shentsize, e_shnum; + int symtab_fd, strtab_fd, r, i, symtab_index, strtab_index; + char * elf_sym_p, * elf_str_p; + long symtab_size, strtab_size; + + if (argc != 4) + { + fprintf (stderr, "Usage: %s \n", argv[0]); + exit (1); + } + + input_fd = open_file (argv[1], O_RDONLY, 0); + symtab_fd = open (argv[2], O_CREAT | O_WRONLY, 0644); + strtab_fd = open (argv[3], O_CREAT | O_WRONLY, 0644); + + /* ELF header */ + + r = read (input_fd, (void *) &elf_ehdr, sizeof (elf_ehdr)); + if (r < 0) + { + perror ("read"); + exit (1); + } + + if (elf_ehdr.e_ident[EI_CLASS] != ELFCLASS64) + { + fprintf (stderr, "Not ELF64\n"); + exit (1); + } + + e_shoff = elf_ehdr.e_shoff; + e_shentsize = elf_ehdr.e_shentsize; + e_shnum = elf_ehdr.e_shnum; + + /* Section header table */ + + elf_shdr_p = read_from_file (e_shoff, e_shentsize * e_shnum); + + /* Symbol table */ + + symtab_index = 0; + for (i = 0; i < e_shnum; i++) + { + if (elf_shdr_p[i].sh_type == SHT_SYMTAB) + { + symtab_index = i; + break; + } + } + + if (symtab_index == 0) + { + fprintf (stderr, "Can not find symbol table\n"); + exit (1); + } + + elf_sym_p = read_section (symtab_index, &symtab_size); + + /* String table */ + + strtab_index = elf_shdr_p[symtab_index].sh_link; + + elf_str_p = read_section (strtab_index, &strtab_size); + + /* Write */ + + r = write (symtab_fd, elf_sym_p, symtab_size); + if (r < 0) + { + perror ("write"); + exit (1); + } + if (r != symtab_size) + { + fprintf (stderr, "Error while write symtab\n"); + exit (1); + } + + r = write (strtab_fd, elf_str_p, strtab_size); + if (r < 0) + { + perror ("write"); + exit (1); + } + if (r != strtab_size) + { + fprintf (stderr, "Error while write strtab\n"); + exit (1); + } + + return 0; +} diff --git a/arch/e2k/boot/e2k_sic.h b/arch/e2k/boot/e2k_sic.h new file mode 100644 index 000000000000..7f77a9056af7 --- /dev/null +++ b/arch/e2k/boot/e2k_sic.h @@ -0,0 +1,76 @@ +#ifndef _BOOT_E2K_SIC_H_ +#define _BOOT_E2K_SIC_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * NBR area configuration + */ +#undef BOOT_NSR_AREA_PHYS_BASE +#undef BOOT_NSR_AREA_SIZE +#undef BOOT_NBSR_OFFSET + +#if defined(CONFIG_ES2) +#define BOOT_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE ES2_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET ES2_NBSR_AREA_OFFSET +#elif defined(CONFIG_E2S) +#define BOOT_NSR_AREA_PHYS_BASE E2S_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE E2S_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET E2S_NBSR_AREA_OFFSET +#elif defined(CONFIG_E8C) +#define BOOT_NSR_AREA_PHYS_BASE E8C_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE E8C_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET E8C_NBSR_AREA_OFFSET +#elif defined(CONFIG_E8C2) +#define BOOT_NSR_AREA_PHYS_BASE E8C2_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE E8C2_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET E8C2_NBSR_AREA_OFFSET +#elif defined(CONFIG_E1CP) +#define BOOT_NSR_AREA_PHYS_BASE E1CP_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE E1CP_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET E1CP_NBSR_AREA_OFFSET +#elif defined(CONFIG_E12C) +#define BOOT_NSR_AREA_PHYS_BASE E12C_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE E12C_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET E12C_NBSR_AREA_OFFSET +#elif defined(CONFIG_E16C) +#define BOOT_NSR_AREA_PHYS_BASE E16C_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE E16C_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET E16C_NBSR_AREA_OFFSET +#elif defined(CONFIG_E2C3) +#define BOOT_NSR_AREA_PHYS_BASE E2C3_NSR_AREA_PHYS_BASE +#define BOOT_NSR_AREA_SIZE E2C3_NBSR_AREA_SIZE +#define BOOT_NBSR_OFFSET E2C3_NBSR_AREA_OFFSET +#endif + +/* + * Nodes system registers area - NSR = { NSR0 ... NSRj ... } + * NSR is some part of common system communicator area SR + */ +#define BOOT_NODE_NSR_SIZE BOOT_NSR_AREA_SIZE +#undef THE_NODE_NSR_PHYS_BASE +#define THE_NODE_NSR_PHYS_BASE(node) \ + (BOOT_NSR_AREA_PHYS_BASE + (node * BOOT_NODE_NSR_SIZE)) + +/* + * Nodes processor system registers (north bridge) + * NBSR = { NBSR0 ... NBSRj ... } + * NBSR is some part of node system registers area NSR + */ +#define BOOT_NODE_NBSR_OFFSET BOOT_NBSR_OFFSET +#undef THE_NODE_NBSR_PHYS_BASE +#define THE_NODE_NBSR_PHYS_BASE(node) \ + ((unsigned char *)(THE_NODE_NSR_PHYS_BASE(node) + \ + BOOT_NODE_NBSR_OFFSET)) + +#endif /* _BOOT_E2K_SIC_H_ */ diff --git a/arch/e2k/boot/epic.c b/arch/e2k/boot/epic.c new file mode 100644 index 000000000000..1013476afd46 --- /dev/null +++ b/arch/e2k/boot/epic.c @@ -0,0 +1,150 @@ +/* + * CEPIC handling + */ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "boot_io.h" +#include "pic.h" + +/**************************** DEBUG DEFINES *****************************/ +#undef DEBUG_BOOT_MODE +#undef Dprintk +#define DEBUG_BOOT_MODE 1 /* SMP CPU boot */ +#define Dprintk(fmt, ...) \ +do { \ + if (DEBUG_BOOT_MODE) \ + rom_printk(fmt, ##__VA_ARGS__); \ +} while (0) +/************************************************************************/ + +/* + * Print all CEPIC/PREPIC registers + */ +void boot_print_cepic(void) +{ + unsigned int value; + + value = native_epic_read_w(CEPIC_CTRL); + rom_printk("0xfee00000 = CEPIC_CTRL: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_ID); + rom_printk("0xfee00010 = CEPIC_ID: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_CPR); + rom_printk("0xfee00070 = CEPIC_CPR: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_ESR); + rom_printk("0xfee00080 = CEPIC_ESR: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_ESR2); + rom_printk("0xfee00090 = CEPIC_ESR2: 0x%x\n", value); + + /* CEPIC_EOI is write-only */ + + value = native_epic_read_w(CEPIC_CIR); + rom_printk("0xfee000b0 = CEPIC_CIR: 0x%x\n", value); + + /* Reading CEPIC_PNMIRR starts NMI handling */ + + value = native_epic_read_w(CEPIC_ICR); + rom_printk("0xfee00200 = CEPIC_ICR: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_ICR2); + rom_printk("0xfee00204 = CEPIC_ICR2: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_TIMER_LVTT); + rom_printk("0xfee00220 = CEPIC_TIMER_LVTT: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_TIMER_INIT); + rom_printk("0xfee00230 = CEPIC_TIMER_INIT: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_TIMER_CUR); + rom_printk("0xfee00240 = CEPIC_TIMER_CUR: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_TIMER_DIV); + rom_printk("0xfee00250 = CEPIC_TIMER_DIV: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_NM_TIMER_LVTT); + rom_printk("0xfee00260 = CEPIC_NM_TIMER_LVTT: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_NM_TIMER_INIT); + rom_printk("0xfee00270 = CEPIC_NM_TIMER_INIT: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_NM_TIMER_CUR); + rom_printk("0xfee00280 = CEPIC_NM_TIMER_CUR: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_NM_TIMER_DIV); + rom_printk("0xfee00290 = CEPIC_NM_TIMER_DIV: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_SVR); + rom_printk("0xfee002a0 = CEPIC_SVR: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_PNMIRR_MASK); + rom_printk("0xfee002d0 = CEPIC_PNMIRR_MASK: 0x%x\n", value); + + /* Reading CEPIC_VECT_INTA starts MI handling */ + + value = native_epic_read_w(CEPIC_CTRL2); + rom_printk("0xfee01820 = CEPIC_CTRL2: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_DAT); + rom_printk("0xfee01830 = CEPIC_DAT: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_DAT2); + rom_printk("0xfee01834 = CEPIC_DAT2: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_EPIC_INT); + rom_printk("0xfee01850 = CEPIC_EPIC_INT: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_EPIC_INT2); + rom_printk("0xfee01860 = CEPIC_EPIC_INT2: 0x%x\n", value); + + value = native_epic_read_w(CEPIC_EPIC_INT3); + rom_printk("0xfee01864 = CEPIC_EPIC_INT3: 0x%x\n", value); + + rom_printk("\n"); +} + +/* + * Placeholder for boot-time CEPIC setup. Currently reset state is fine for + * kernel, so do nothing + */ +void boot_setup_cepic(int cpu) +{ +} + +/* + * Ensure that AP core received startup interrupt with matching address. + * Print error messages, if that is not the case. + */ +void debug_epic_startup(int cpu, unsigned int value, unsigned long startup_addr) +{ + unsigned long addr; + + Dprintk("CPU #%d : CEPIC_PNMIRR value = 0x%x\n", cpu, value); + + if (!(value & CEPIC_PNMIRR_STARTUP)) + rom_printk("CPU #%d : ERROR: CEPIC startup bit is not set\n", + cpu); + + addr = value & CEPIC_PNMIRR_STARTUP_ENTRY; + + Dprintk("CPU #%d : CEPIC received STARTUP with addr 0x%x\n", cpu, addr); + + if (addr != startup_addr >> 12) + rom_printk("CPU #%d : ERROR : CEPIC incorrect startup addr\n", + cpu); +} diff --git a/arch/e2k/boot/epiggy.lds b/arch/e2k/boot/epiggy.lds new file mode 100644 index 000000000000..109a89d0b0e9 --- /dev/null +++ b/arch/e2k/boot/epiggy.lds @@ -0,0 +1,6 @@ +SECTIONS +{ + /DISCARD/ : { + *(.data) + } +} diff --git a/arch/e2k/boot/flash.lds b/arch/e2k/boot/flash.lds new file mode 100644 index 000000000000..1608a9e1030a --- /dev/null +++ b/arch/e2k/boot/flash.lds @@ -0,0 +1,93 @@ + +OUTPUT_FORMAT("elf64-e2k", "elf64-e2k", "elf64-e2k") +/* OUTPUT_ARCH(e2k) Defined by Makefile */ +ENTRY(start) + +MEMORY { + ROM (RX) : ORIGIN = 0xFFFF000000, LENGTH = 16M + RAM (W) : ORIGIN = 1M, LENGTH = 3M /* 31M */ + +/* ROM chunks */ + ROML (RX) : ORIGIN = 4M, LENGTH = 24M + ROMH (RX) : ORIGIN = 4M + 24M - 4 , LENGTH = 16M - 2K + + CPUSTART (RX) : ORIGIN = 4M + 24M + 16M - 2K , LENGTH = 2K +} + +SECTIONS { + _start = .; + .text : { + _text = .; /* Text and read-only data */ + + . = ALIGN(4096); + piggy.o(.rodata) + + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o arch/e2k/boot/apstartup.o) .text .gnu.linkonce.t.*) + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o piggy.o) .rodata) + + _etext = .; /* End of text section */ + + } > ROML + + .data : AT ( ADDR(.text) + SIZEOF ( .text ) ) { + _data = .; /* Data section */ + __apstartup_start = .; + arch/e2k/boot/apstartup.o(.text) + __apstartup_end = .; + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o) .data .gnu.linkonce.d.*) + + _edata = .; /* End of data section */ + } > RAM + + + + .bss : { + __bss_start = .; /* BSS */ + + *(EXCLUDE_FILE (arch/e2k/boot/romstartup.o) .bss COMMON) + + __bss_stop = . ; + } > RAM + + .initrd : { + *(.initrd) + } > ROMH + + .symtable : { + *(.symtable) + } > ROMH + + .strtable : { + *(.strtable) + } > ROMH + + .text.startup : { + __startup_start = .; + + arch/e2k/boot/romstartup.o(.text) + arch/e2k/boot/romstartup.o(.rodata) + arch/e2k/boot/romstartup.o(.data) + arch/e2k/boot/romstartup.o(.bss) + + __startup_end = .; + + . = ALIGN(2048); /* Round up the image size exactly to 16M. */ + + } > CPUSTART + + /* Sections to be discarded */ + /DISCARD/ : { + *(.info) + } + + __bios_start_code = ADDR(.text); + __bios_size_code = SIZEOF(.text); + __bios_start_data = ADDR(.text) + SIZEOF( .text ); + __bios_size_data = SIZEOF(.data); + __bios_size_data_plus_bss = SIZEOF(.data) + SIZEOF( .bss ); + + /* lld compatibility items. These calculations may be not quite accurate. */ + __bios_size_ld = SIZEOF ( .text ); + __bios_fsize_ld = SIZEOF ( .text ) + SIZEOF ( .text ) + SIZEOF ( .bss ); + __bios_entry_ld = __startup_start - _text; +} diff --git a/arch/e2k/boot/info.c b/arch/e2k/boot/info.c new file mode 100644 index 000000000000..fb30d1fc9da5 --- /dev/null +++ b/arch/e2k/boot/info.c @@ -0,0 +1,24 @@ +#include +#include +#include + +extern u64 __kernel_size; + + +const struct bootblock_struct boot_block = +{ + X86BOOT_SIGNATURE, /* signature */ + 0, 0, 0, /* boot disk C/H/S */ + 0, /* vga mode */ + 0, /* number of memory banks */ + 0UL, /* kernel base */ + (u64) & __kernel_size, /* kernel size */ + 0UL, /* ramdisk base */ + 0UL, /* ramdisk size */ +#if 0 + CONFIG_CMDLINE, /* kernel command line */ + { 0UL, 0UL } /* first bank descriptor */ +#else + CONFIG_CMDLINE /* kernel command line */ +#endif +}; diff --git a/arch/e2k/boot/jumpstart.c b/arch/e2k/boot/jumpstart.c new file mode 100644 index 000000000000..26b74a6fb30d --- /dev/null +++ b/arch/e2k/boot/jumpstart.c @@ -0,0 +1,3264 @@ + +#include +#include +#include + +#include "bios/printk.h" + +#include +#include +#include +#include +#include +#ifdef CONFIG_SMP +#include +#endif /* CONFIG_SMP */ +#include "pic.h" + +#include +#include +#include +#ifdef CONFIG_E2K_SIC +#include "e2k_sic.h" +#include +#endif /* CONFIG_E2K_SIC */ +#include "topology.h" +#include "boot.h" +#include "bios/pci.h" + +#include + +#undef DEBUG_RT_MODE +#undef DebugRT +#define DEBUG_RT_MODE 0 /* routing registers */ +#define DebugRT if (DEBUG_RT_MODE) rom_printk + +#undef DEBUG_MRT_MODE +#undef DebugMRT +#define DEBUG_MRT_MODE 1 /* memory routing registers */ +#define DebugMRT if (DEBUG_MRT_MODE) rom_printk + +#undef DEBUG_IORT_MODE +#undef DebugIORT +#define DEBUG_IORT_MODE 1 /* IO memory routing registers */ +#define DebugIORT if (DEBUG_IORT_MODE) rom_printk + +#define BOOT_VER_STR "BOOT SIMULATOR" + +extern long input_data, input_data_end, input_data_noncomp_size; +extern long boot_mode; + +#ifdef CONFIG_BLK_DEV_INITRD +extern long initrd_data, initrd_data_end; +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_CMDLINE +#define CMDLINE CONFIG_CMDLINE +#else +#define CMDLINE ""; +#endif + +#define ALIGN_DOWN_TO_MASK(addr, mask) ((addr) & ~(mask)) +#define ALIGN_UP_TO_MASK(addr, mask) (((addr) + (mask)) & ~(mask)) +#define ALIGN_DOWN_TO_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_DOWN_TO_MASK(addr, ((size)-1))) +#define ALIGN_UP_TO_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_UP_TO_MASK(addr, ((size)-1))) + +char cmd_preset[] = CMDLINE; +char cmd_buf[KSTRMAX_SIZE_EX + KSTRMAX_SIZE]; +char *cmd_line = cmd_buf; +char *free_memory_p; + +#ifdef CONFIG_SMP +extern atomic_t cpu_count; +extern int phys_cpu_num; +extern void do_smp_commence(void); +extern volatile unsigned long phys_cpu_pres_map; +#endif /* CONFIG_SMP */ + +volatile unsigned long phys_node_pres_map = 0; +int phys_node_num = 0; +volatile unsigned long online_iohubs_map = 0; +int online_iohubs_num = 0; +volatile unsigned long possible_iohubs_map = 0; +int possible_iohubs_num = 0; +volatile unsigned long online_rdmas_map = 0; +int online_rdmas_num = 0; +volatile unsigned long possible_rdmas_map = 0; +int possible_rdmas_num = 0; + +static e2k_addr_t kernel_areabase; +static e2k_size_t kernel_areasize; +bootblock_struct_t *bootblock; +boot_info_t *boot_info; +bios_info_t *bios_info; +#ifdef CONFIG_RECOVERY +int recovery_flag = 0; +int not_read_image; +#endif /* CONFIG_RECOVERY */ +int banks_ex_num = 0; + +void set_kernel_image_pointers(void); + +#ifdef CONFIG_BIOS +extern void bios_first(void); +extern void bios_rest(void); +#ifdef CONFIG_ENABLE_ELBRUS_PCIBIOS +extern void pci_bios(void); +#endif +extern void video_bios(void); +#endif + +/* Memory probing definitions block */ + +#define _1MB (1024 * 1024UL) +#define _1GB (1024 * _1MB) +#define _64MB (64 * _1MB) +#define _2MB ( 2 * _1MB) + +#ifndef CONFIG_MEMLIMIT +#define CONFIG_MEMLIMIT (2 * 1024) +#endif + +#define PROBE_MEM_LIMIT (CONFIG_MEMLIMIT * _1MB) + +#ifndef CONFIG_EXT_MEMLIMIT +#define CONFIG_EXT_MEMLIMIT (60 * 1024) +#endif + +#define PROBE_EXT_MEM_LIMIT (CONFIG_EXT_MEMLIMIT * _1MB) + +#define LO_MEMORY_START 0x00000000000ULL +#define ES2_HI_MEMORY_START 0x00400000000ULL +#define ES2_HI_MEMORY_NODE_MAX_SIZE 0x02000000000ULL +#define E2S_HI_MEMORY_START 0x02000000000ULL +#define E2S_HI_MEMORY_NODE_MAX_SIZE 0x02000000000ULL +#define E8C_HI_MEMORY_START E2S_HI_MEMORY_START +#define E8C_HI_MEMORY_NODE_MAX_SIZE E2S_HI_MEMORY_NODE_MAX_SIZE +#define E16C_HI_MEMORY_START 0x10000000000ULL +#define E16C_HI_MEMORY_NODE_MAX_SIZE 0x10000000000ULL + +#if defined(CONFIG_E1CP) + #define HI_MEMORY_START E2S_HI_MEMORY_START + #define HI_MEMORY_NODE_MAX_SIZE E2S_HI_MEMORY_NODE_MAX_SIZE +#elif defined(CONFIG_ES2) + #define HI_MEMORY_START ES2_HI_MEMORY_START + #define HI_MEMORY_NODE_MAX_SIZE ES2_HI_MEMORY_NODE_MAX_SIZE +#elif defined(CONFIG_E2S) + #define HI_MEMORY_START E2S_HI_MEMORY_START + #define HI_MEMORY_NODE_MAX_SIZE E2S_HI_MEMORY_NODE_MAX_SIZE +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) + #define HI_MEMORY_START E8C_HI_MEMORY_START + #define HI_MEMORY_NODE_MAX_SIZE E8C_HI_MEMORY_NODE_MAX_SIZE +#elif defined(CONFIG_E2C3) + #define HI_MEMORY_START E16C_HI_MEMORY_START + #define HI_MEMORY_NODE_MAX_SIZE E16C_HI_MEMORY_NODE_MAX_SIZE +#elif defined(CONFIG_E12C) + #define HI_MEMORY_START E16C_HI_MEMORY_START + #define HI_MEMORY_NODE_MAX_SIZE E16C_HI_MEMORY_NODE_MAX_SIZE +#elif defined(CONFIG_E16C) + #define HI_MEMORY_START E16C_HI_MEMORY_START + #define HI_MEMORY_NODE_MAX_SIZE E16C_HI_MEMORY_NODE_MAX_SIZE +#else + #error "Unknown MicroProcessor type" +#endif + +#if defined(CONFIG_VRAM_SIZE_128) +#define EG_VRAM_SIZE_FLAGS EG_CFG_VRAM_SIZE_128 +#define EG_VRAM_MBYTES_SIZE (128 * 1024 * 1024) +#elif defined(CONFIG_VRAM_SIZE_256) +#define EG_VRAM_SIZE_FLAGS EG_CFG_VRAM_SIZE_256 +#define EG_VRAM_MBYTES_SIZE (256 * 1024 * 1024) +#elif defined(CONFIG_VRAM_SIZE_512) +#define EG_VRAM_SIZE_FLAGS EG_CFG_VRAM_SIZE_512 +#define EG_VRAM_MBYTES_SIZE (512 * 1024 * 1024) +#elif defined(CONFIG_VRAM_SIZE_1024) +#define EG_VRAM_SIZE_FLAGS EG_CFG_VRAM_SIZE_1024 +#define EG_VRAM_MBYTES_SIZE (1024 * 1024 * 1024) +#elif defined(CONFIG_VRAM_DISABLE) +#define EG_VRAM_MBYTES_SIZE 0 +#else + #error "Undefined embeded graphic VRAM size" +#endif /* CONFIG_VRAM_SIZE_ */ + +#define START_KERNEL_SYSCALL 12 + +#define ALIGN_UP(addr, size) (((u64)(addr) + ((size)-1)) & ~((size)-1)) + +u64 size_real; +#ifdef CONFIG_ENABLE_EXTMEM +u64 hi_memory_start = HI_MEMORY_START; +#endif /* CONFIG_ENABLE_EXTMEM */ +#ifdef CONFIG_ONLY_BSP_MEMORY +#define only_BSP_has_memory (memory_pres_map == 0x1) +#define memory_pres_map CONFIG_MEMORY_PRES_MAP +#else /* ! CONFIG_ONLY_BSP_MEMORY */ +#define only_BSP_has_memory 0 +#define memory_pres_map 0xffff +#endif /* CONFIG_ONLY_BSP_MEMORY */ + +inline void scall2(bootblock_struct_t *bootblock) +{ + (void) E2K_SYSCALL(START_KERNEL_SYSCALL, /* Trap number */ + 0, /* empty sysnum */ + 1, /* single argument */ + (long) bootblock); /* the argument */ +} + +size_t +bios_strlen(const char *s) +{ + int len = 0; + while (*s++) len++; + return len; +} + +static inline u64 get_hi_memory_start(int node_id) +{ + return HI_MEMORY_START + (HI_MEMORY_START * node_id); +} +static inline u64 get_lo_memory_size(int node_id) +{ + return (PCI_MEM_START - LO_MEMORY_START) / phys_node_num; +} +static inline u64 get_lo_memory_start(int node_id) +{ + return LO_MEMORY_START + get_lo_memory_size(node_id) * node_id; +} + +#ifdef CONFIG_E2K_SIC +static inline e2k_rt_mhi_struct_t +get_rt_mhi(int mhi_no, int node_on, int node_for) +{ + e2k_rt_mhi_struct_t rt_mhi; + + AS_WORD(rt_mhi) = 0x000000ff; + if (mhi_no != 0 && mhi_no != node_for) { + rom_printk("BUG: memory router setting is implemented on " + "node #0 for all other nodes\n"); + return rt_mhi; + } + switch (mhi_no) { + case 0: + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi0, 0, node_on); + return rt_mhi; + case 1: + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi1, 0, node_on); + return rt_mhi; + case 2: + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi2, 0, node_on); + return rt_mhi; + case 3: + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi3, 0, node_on); + return rt_mhi; + default: + rom_printk("BUG : get_rt_mhi() : invalid RT_MHI #%d >= " + "%d (max node numbers), ignored\n", + mhi_no, MAX_NUMNODES); + return rt_mhi; + } +} +static inline void +set_rt_mhi(e2k_rt_mhi_struct_t rt_mhi, int mhi_no, int node_on, int node_for) +{ + if (mhi_no != 0 && mhi_no != node_for) { + rom_printk("BUG: memory router setting is only implemented on " + "node #0 for all other nodes\n"); + return; + } + switch (mhi_no) { + case 0: + NATIVE_SET_SICREG(rt_mhi0, AS_WORD(rt_mhi), 0, node_on); + return; + case 1: + NATIVE_SET_SICREG(rt_mhi1, AS_WORD(rt_mhi), 0, node_on); + return; + case 2: + NATIVE_SET_SICREG(rt_mhi2, AS_WORD(rt_mhi), 0, node_on); + return; + case 3: + NATIVE_SET_SICREG(rt_mhi3, AS_WORD(rt_mhi), 0, node_on); + return; + default: + rom_printk("BUG : get_rt_mhi() : invalid RT_MHI #%d >= " + "%d (max node numbers), ignored\n", + mhi_no, MAX_NUMNODES); + return; + } +} +#endif /* CONFIG_E2K_SIC */ + +static void +add_memory_region(boot_info_t *boot_info, int node_id, e2k_addr_t start_addr, + e2k_size_t size) +{ + u64 end_addr = start_addr + size; + bank_info_t *node_banks = boot_info->nodes_mem[node_id].banks; + int bank; + +#ifdef CONFIG_DISCONTIGMEM + if (node_id >= MAX_NUMNODES) { + rom_printk("BUG : add_memory_region() : invalid node #%d >= " + "%d (max node numbers), ignored\n", + node_id, MAX_NUMNODES); + return; + } else +#endif /* CONFIG_DISCONTIGMEM */ + if (node_id >= L_MAX_MEM_NUMNODES) { + rom_printk("BUG : add_memory_region() : node #%d >= " + "%d (max nodes in nodes_mem table), ignored\n", + node_id, L_MAX_MEM_NUMNODES); + return; + } + for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS_FUSTY; bank++) { + if (node_banks->size == 0) + break; + node_banks++; + } + if (start_addr == 0 && size == 0) { + if (bank == L_MAX_NODE_PHYS_BANKS_FUSTY) { + banks_ex_num++; + rom_printk("Count of busy banks of memory in extended " + "area was corrected from 0x%X to 0x%X\n", + banks_ex_num - 1, banks_ex_num); + } + return; + } + if (bank >= L_MAX_NODE_PHYS_BANKS_FUSTY) { + rom_printk("Node #%d has banks of memory in extended area\n", + node_id); + bank = -1; + if (banks_ex_num >= L_MAX_PHYS_BANKS_EX) { + rom_printk("BUG : add_memory_region() : banks of " + "memory extended area is full, ignored\n"); + return; + } + node_banks = boot_info->bios.banks_ex + banks_ex_num++; + } + node_banks->address = start_addr; + node_banks->size = size; + if (bank != -1) + rom_printk("Node #%d : physical memory bank #%d: base from " + "0x%X to 0x%X (%d Mgb)\n", + node_id, bank, start_addr, end_addr, + (int)(size / _1MB)); + else + rom_printk("Node #%d : extended physical memory bank #%d: " + "base from 0x%X to 0x%X (%d Mgb)\n", + node_id, banks_ex_num - 1, start_addr, end_addr, + (int)(size / _1MB)); + boot_info->num_of_banks ++; +} + +static u64 +probe_memory_region(boot_info_t *boot_info, e2k_addr_t start_addr, + e2k_size_t size) +{ + u64 addr = start_addr; + u64 address = start_addr; + u64 end_addr = start_addr + size; + u64 len; + u64 memory_size = 0; + u64 tmpvar; + + if (start_addr >= E2K_MAIN_MEM_REGION_START && + start_addr < E2K_MAIN_MEM_REGION_END) { + if (start_addr + size > E2K_MAIN_MEM_REGION_END) { + size = E2K_MAIN_MEM_REGION_END - start_addr; + end_addr = start_addr + size; + } + } else if (start_addr >= E2K_MAIN_MEM_REGION_END && + start_addr < hi_memory_start) { + rom_printk("no low memory for node"); + return 0; + } +#ifdef CONFIG_E2K_LEGACY_SIC + /* Set memory range probing at TOP register of host bridge */ + if (end_addr >= APIC_DEFAULT_PHYS_BASE) { + end_addr = APIC_DEFAULT_PHYS_BASE; + } + early_writel_hb_reg(end_addr, HB_PCI_TOM); + +#endif /* CONFIG_E2K_LEGACY_SIC */ + rom_printk(" from addr 0x%X to 0x%X ... ", + start_addr, start_addr + size); + while (address < end_addr) { + + if (address < _2MB) { + addr = _2MB; + } + for ( ; addr < end_addr; addr += _1MB) { + /* Skip addresses reserved by PIC */ + if (addr == PIC_DEFAULT_PHYS_BASE + || addr == IO_PIC_DEFAULT_PHYS_BASE) + break; + + /* + !!! WARNING !!! NEEDSWORK !!! + Improper tagged variable handling! + */ + tmpvar = NATIVE_READ_MAS_D(addr, MAS_IOADDR); + + NATIVE_WRITE_MAS_D(addr, 0x0123456789abcdef, + MAS_IOADDR); + if (NATIVE_READ_MAS_D(addr, MAS_IOADDR) != + 0x0123456789abcdef) + break; +#ifdef CONFIG_E2K_SIC + if ((addr - start_addr) / E2K_SIC_MIN_MEMORY_BANK) { + u64 offset = addr % E2K_SIC_MIN_MEMORY_BANK; + u64 start_bank = start_addr + offset; + + if (NATIVE_READ_MAS_D(start_bank, MAS_IOADDR) == + 0x0123456789abcdef) { + /* + * New bank address point to start + * bank address, so enable memory size + * limit is reached + */ + NATIVE_WRITE_MAS_D(addr, tmpvar, + MAS_IOADDR); + break; + } + } +#endif /* CONFIG_E2K_SIC */ + /* + !!! WARNING !!! NEEDSWORK !!! + Improper tagged variable handling! + */ + NATIVE_WRITE_MAS_D(addr, tmpvar, MAS_IOADDR); + + rom_putc('+'); + } + len = addr - address; + rom_putc('\n'); + memory_size = len; + address = addr; + /* + * Memory on e2k with SIC cannot be holed, so no more memory + */ + break; + if (address >= end_addr) + break; + for ( ; addr < end_addr; addr += _1MB) { + /* Skip addresses reserved by PIC */ + if (addr == PIC_DEFAULT_PHYS_BASE + || addr == IO_PIC_DEFAULT_PHYS_BASE) { + rom_putc('-'); + continue; + } + + /* + !!! WARNING !!! NEEDSWORK !!! + Improper tagged variable handling! + */ + tmpvar = NATIVE_READ_MAS_D(addr, MAS_IOADDR); + + NATIVE_WRITE_MAS_D(addr, 0x0123456789abcdef, + MAS_IOADDR); + if (NATIVE_READ_MAS_D(addr, MAS_IOADDR) == + 0x0123456789abcdef) { + + /* + !!! WARNING !!! NEEDSWORK !!! + Improper tagged variable handling! + */ + NATIVE_WRITE_MAS_D(addr, tmpvar, MAS_IOADDR); + rom_putc('\n'); + rom_printk("Physical memory hole: base 0x%X " + "size 0x%X (%d Mgb)\n", + address, addr - address, + (int)((addr - address) / _1MB)); + break; + } + + rom_putc('-'); + } + address = addr; + } + + rom_putc('\n'); + return memory_size; +} + +static u64 +probe_memory(boot_info_t *boot_info, int mhi_no, int node_on, int node_for) +{ + u64 address = 0; + u64 size = 0; +#ifdef CONFIG_E2K_SIC + u64 hi_start, hi_end; + e2k_rt_mhi_struct_t rt_mhi; +#endif + + if (mhi_no == 0) { + rom_printk("Physical memory probing\n"); + boot_info->num_of_banks = 0; + } + address = E2K_MAIN_MEM_REGION_START; + size = PROBE_MEM_LIMIT; +#if defined(CONFIG_E2K_SIC) && defined(CONFIG_ENABLE_EXTMEM) + address = get_hi_memory_start(node_for); + size = ALIGN_UP(size, E2K_SIC_SIZE_RT_MLO); + size += PROBE_EXT_MEM_LIMIT; +#endif /* CONFIG_E2K_SIC && CONFIG_ENABLE_EXTMEM */ +#ifdef CONFIG_E2K_SIC + if (!is_power_of_2(size)) { + /* all memory banks size can be only 2^n */ + size = __roundup_pow_of_two(size); + } +#endif /* CONFIG_E2K_SIC */ + +#ifdef CONFIG_E2K_SIC + if (!is_power_of_2(size)) { + /* all memory banks size can be only 2^n */ + size = __rounddown_pow_of_two(size); + } +#else /* ! CONFIG_E2K_SIC */ + if (size == 0) + size = E2K_MAIN_MEM_REGION_END - address; + if (address + size > E2K_MAIN_MEM_REGION_END) + size = E2K_MAIN_MEM_REGION_END - address; +#endif /* CONFIG_E2K_SIC */ + + rom_printk(" init addr = 0x%X, init size = 0x%X\n", address, size); + +#ifdef CONFIG_E2K_SIC + rt_mhi = get_rt_mhi(mhi_no, node_on, node_for); + DebugMRT("get_memory_filters: on node #%d rt_mhi%d = 0x%x\n", + node_on, mhi_no, AS_WORD(rt_mhi)); + hi_start = ALIGN_DOWN_TO_SIZE(address, E2K_SIC_SIZE_RT_MHI); + hi_end = ALIGN_UP_TO_SIZE(address + size, E2K_SIC_SIZE_RT_MHI); + AS_STRUCT(rt_mhi).bgn = hi_start >> E2K_SIC_ALIGN_RT_MHI; + AS_STRUCT(rt_mhi).end = (hi_end - 1) >> E2K_SIC_ALIGN_RT_MHI; + DebugMRT("set_memory_filters: on node #%d set rt_mhi%d to 0x%x\n", + node_on, mhi_no, + AS_WORD(get_rt_mhi(mhi_no, node_on, node_for))); + set_rt_mhi(rt_mhi, mhi_no, node_on, node_for); + if (mhi_no != 0) { + /* setup rt_mhi0 on node 'for' */ + DebugMRT("set_memory_filters: on node #%d set rt_mhi%d " + "to 0x%x\n", + node_for, 0, + AS_WORD(get_rt_mhi(0, node_for, node_for))); + set_rt_mhi(rt_mhi, 0, node_for, node_for); + } + rom_printk("NODE #%d high memory router set from 0x%X to 0x%X\n", + node_on, hi_start, hi_end); +#endif + + size_real = probe_memory_region(boot_info, address, size); + return size_real; +} + +static void +add_busy_memory_area(boot_info_t *boot_info, + e2k_addr_t area_start, e2k_addr_t area_end) +{ + int num_of_busy = boot_info->num_of_busy; + bank_info_t *busy_area = &boot_info->busy[num_of_busy]; + + busy_area->address = area_start; + busy_area->size = area_end - area_start; + + rom_printk("ROM loader busy memory area #%d start 0x%X, end 0x%X\n", + num_of_busy, area_start, area_end); + + num_of_busy ++; + boot_info->num_of_busy = num_of_busy; +} + +#ifdef CONFIG_L_IO_APIC +#ifndef CONFIG_ENABLE_BIOS_MPTABLE +static int +mpf_do_checksum(unsigned char *mp, int len) +{ + int sum = 0; + + while (len--) + sum += *mp++; + + return 0x100 - (sum & 0xFF); +} + +static void +set_mpt_config(struct intel_mp_floating *mpf) +{ + + mpf->mpf_signature[0] = '_'; /* "_MP_" */ + mpf->mpf_signature[1] = 'M'; + mpf->mpf_signature[2] = 'P'; + mpf->mpf_signature[3] = '_'; + mpf->mpf_physptr = 0; /* MP Configuration Table */ + /* does not exist */ + mpf->mpf_length = 0x01; + mpf->mpf_specification = 0x01; + mpf->mpf_checksum = 0; /* ??? */ + mpf->mpf_feature1 = 1; /* If 0 MP CT exist, */ + /* else # default CT */ + mpf->mpf_feature2 = 1<<7; /* PIC mode */ + mpf->mpf_feature3 = 0; + mpf->mpf_feature4 = 0; + mpf->mpf_feature5 = 0; + mpf->mpf_checksum = mpf_do_checksum((unsigned char *)mpf, + sizeof (*mpf)); +} +#endif +#endif /* CONFIG_L_IO_APIC */ + +static inline e2k_addr_t +allocate_mpf_structure(void) +{ +#ifndef CONFIG_L_IO_APIC + return (e2k_addr_t)0; +#else + + return (e2k_addr_t) malloc_aligned(PAGE_SIZE, PAGE_SIZE); +#endif /* ! (CONFIG_L_IO_APIC) */ +} + +static void +create_smp_config(boot_info_t *boot_info) +{ + +#ifndef CONFIG_SMP + boot_info->num_of_cpus = 1; + boot_info->num_of_nodes = 1; + boot_info->nodes_map = 0x1; +#else + boot_info->num_of_cpus = phys_cpu_num; + boot_info->num_of_nodes = phys_node_num; + boot_info->nodes_map = phys_node_pres_map; +#endif /* CONFIG_SMP */ + + boot_info->mp_table_base = allocate_mpf_structure(); + +#ifdef CONFIG_L_IO_APIC +#ifndef CONFIG_ENABLE_BIOS_MPTABLE + set_mpt_config((struct intel_mp_floating *)boot_info->mp_table_base); +#else + write_smp_table((struct intel_mp_floating *)boot_info->mp_table_base, + boot_info->num_of_cpus); +#endif /* CONFIG_BIOS */ + rom_printk("MP-table is starting at: 0x%X size 0x%x\n", + boot_info->mp_table_base, PAGE_SIZE); + +#endif /* CONFIG_L_IO_APIC */ +} + +#ifdef CONFIG_RECOVERY +static void +recover_smp_config(boot_info_t *recovery_info) +{ + (void) allocate_mpf_structure(); + +#ifdef CONFIG_SMP + if (recovery_info->num_of_cpus != phys_cpu_num) { + rom_puts("ERROR: Invalid number of live CPUs to recover " + "kernel\n"); + rom_printk("Number of live CPUs %d is not %d as from " + "'recovery_info'\n", + phys_cpu_num, recovery_info->num_of_cpus); + } +#endif /* CONFIG_SMP */ + +} +#endif /* CONFIG_RECOVERY */ + +#ifdef CONFIG_CMDLINE_PROMPT + +static void kernel_command_prompt(char *line, char *preset) +{ + char *cp, ch; + int sec_start, sec_stop; + +#define COMMAND_PROMPT_TIMEOUT 3 + + rom_printk("\nCommand: "); + cp = line; + + /* Simple PC-keyboard manager */ + memcpy(line, preset, bios_strlen(preset)); + while ( *cp ) rom_putc(*cp++); + + sec_start = CMOS_READ(RTC_SECONDS); + sec_stop = sec_start + COMMAND_PROMPT_TIMEOUT; + if (sec_stop > 60) + sec_stop = sec_stop - 60; + + while (CMOS_READ(RTC_SECONDS) != sec_stop) { + if (keyb_tstc()) { + while ((ch = rom_getc()) != '\n' && + ch != '\r') { + if (ch == '\b') { + if (cp != line) { + cp--; + rom_puts("\b \b"); + }; + } else { + *cp++ = ch; + rom_putc(ch); + }; + } + break; /* Exit 'timer' loop */ + } + } + + *cp = 0; + rom_putc('\n'); +} + +#endif + +#ifdef CONFIG_E2K_SIC +#ifdef CONFIG_E2K_FULL_SIC +static void configure_routing_regs(void) +{ + e2k_rt_lcfg_struct_t rt_lcfg; + e2k_rt_mlo_struct_t rt_mlo; + e2k_st_p_struct_t st_p; + + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 0); + DebugRT("configure_routing_regs: before setting up: rt_lcfg = 0x%x\n", + E2K_RT_LCFG_reg(rt_lcfg)); + + DebugRT("configure_routing_regs: configure RT_LCFGj\n"); + phys_node_num = 1; + phys_node_pres_map = 0x1; + + AS_WORD(st_p) = NATIVE_GET_SICREG(st_p, E2K_MAX_CL_NUM, 0); + DebugRT("configure_routing_regs: st_p = 0x%x\n", AS_WORD(st_p)); + +if (st_p.E2K_ST_P_pl_val & 0x1){ // 001 - CPU 1 is present +/*********************** CONFIGURE KNOB 1 ***********************************/ + phys_node_num ++; + phys_node_pres_map |= 0x02; + /* Open link CPU 0 -> CPU 1 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); + + /* setup LCFG0 for knob 1; initially knob 1 = knob 3 */ + E2K_RT_LCFG_reg(rt_lcfg) = + NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 3); + /* open all links for knob 3 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setting knob cluster to 0 */ + E2K_RT_LCFG_cln(rt_lcfg) = 0; + /* setting knob number 3 to 1 */ + E2K_RT_LCFG_pln(rt_lcfg) = 1; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), + E2K_MAX_CL_NUM, 3); + +/* setup LCFGj for knob 1;*/ + + /* change parameters for BSP (due to new params for knob 1: cln = 0| pln = 1) */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_cln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); + /* change parameters for link CPU 0 -> CPU 1 (due to new params for knob 1: pln = 1) */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, 0, 0); + E2K_RT_LCFG_pln(rt_lcfg) = 1; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), 0, 0); + /****************************/ + if (st_p.E2K_ST_P_pl_val & 0x2){ // 010 - Node 2 is present + /**** setup LCFG1 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, 0, 1); + /* open all links for knob 1 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setting link CPU 1 -> CPU 2 */ + E2K_RT_LCFG_pln(rt_lcfg) = 2; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), 0, 1); + }else{ + /* close link CPU 1 -> CPU 2 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, 0, 1); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), 0, 1); + } + if (st_p.E2K_ST_P_pl_val & 0x4){ // 100 - Node 3 is present + /**** setup LCFG2 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, 0, 1); + /* open all links for knob 1 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setiing link CPU 1 -> CPU 3 */ + E2K_RT_LCFG_pln(rt_lcfg) = 3; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), 0, 1); + }else{ + /* close link CPU 1 -> CPU 3 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, 0, 1); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), 0, 1); + } + + /**** setup LCFG3 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg3, 0, 1); + /* open all links for knob 1 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 1; + E2K_RT_LCFG_vio(rt_lcfg) = 1; + /* setiing link CPU 1 -> CPU 0 */ + E2K_RT_LCFG_pln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg3, E2K_RT_LCFG_reg(rt_lcfg), 0, 1); + /*****************************/ + /*#####################################################*/ + /* configure own link CPU 1 to own ioapic space */ +#if defined(CONFIG_ES2) + /* configure own link CPU 1 to own ioapic #1 space */ +#endif /* CONFIG_ES2 */ + /* configure link CPU 1 to pcim space through CPU 0 */ + /* configure link CPU 1 to mlo space through CPU 0 */ + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 0); + NATIVE_SET_SICREG(rt_mlo3, AS_WORD(rt_mlo), 0, 1); + /* May be the same for mhi ????????? */ + /*#####################################################*/ + + /* Restore previous values for BSP */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg0, 0, 0); + E2K_RT_LCFG_cln(rt_lcfg) = E2K_MAX_CL_NUM; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), 0, 0); + /* Close link CPU 0 -> CPU 1 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); +/*****************************************************************************/ +} +if (st_p.E2K_ST_P_pl_val & 0x2){ // 010 - Node 2 is present +/*********************** CONFIGURE KNOB 2 ***********************************/ + phys_node_num ++; + phys_node_pres_map |= 0x04; + + /* Open link CPU 0 -> CPU 2 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); + + /* setup LCFG0 for knob 2; initially knob 2 = knob 3 */ + E2K_RT_LCFG_reg(rt_lcfg) = + NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 3); + /* open all links for knob 2 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setting knob cluster to 0 */ + E2K_RT_LCFG_cln(rt_lcfg) = 0; + /* setting knob number 3 to 2 */ + E2K_RT_LCFG_pln(rt_lcfg) = 2; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), + E2K_MAX_CL_NUM, 3); + +/* setup LCFGj for knob 2 */ + + /* change parameters for BSP (due to new params for knob 2: cln = 0| pln = 2) */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_cln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); + /* change parameters for link CPU 0 -> CPU 2 (due to new params for knob 2: pln = 2) */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, 0, 0); + E2K_RT_LCFG_pln(rt_lcfg) = 2; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), 0, 0); + /****************************/ + if (st_p.E2K_ST_P_pl_val & 0x4){ // 100 - Node 3 is present + /**** setup LCFG1 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, 0, 2); + /* open all links for knob 2 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setting link CPU 2 -> CPU 3 */ + E2K_RT_LCFG_pln(rt_lcfg) = 3; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), 0, 2); + }else{ + /* close link CPU 2 -> CPU 3 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, 0, 2); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), 0, 2); + } + + /**** setup LCFG2 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, 0, 2); + /* open all links for knob 1 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 1; + E2K_RT_LCFG_vio(rt_lcfg) = 1; + /* setiing link CPU 2 -> CPU 0 */ + E2K_RT_LCFG_pln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), 0, 2); + /*#####################################################*/ + /* configure link CPU 2 to mlo space through CPU 0 */ + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 0); + NATIVE_SET_SICREG(rt_mlo2, AS_WORD(rt_mlo), 0, 2); + /* May be the same for mhi ????????? */ + /*#####################################################*/ + if (st_p.E2K_ST_P_pl_val & 0x1){ // 001 - Node 1 is present + /**** setup LCFG3 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg3, 0, 2); + /* open all links for knob 1 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setiing link CPU 2 -> CPU 1 */ + E2K_RT_LCFG_pln(rt_lcfg) = 1; + NATIVE_SET_SICREG(rt_lcfg3, E2K_RT_LCFG_reg(rt_lcfg), 0, 2); + /*****************************/ + }else{ + /* close link CPU 2 -> CPU 1 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg3, 0, 2); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg3, E2K_RT_LCFG_reg(rt_lcfg), 0, 2); + } + /* Restore previous values for BSP */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg0, 0, 0); + E2K_RT_LCFG_cln(rt_lcfg) = E2K_MAX_CL_NUM; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), 0, 0); + /* Close link CPU 0 -> CPU 2 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); +/*****************************************************************************/ +} +if (st_p.E2K_ST_P_pl_val & 0x4){ // 100 - Node 3 is present +/*********************** CONFIGURE KNOB 3 ***********************************/ + phys_node_num ++; + phys_node_pres_map |= 0x08; + + /* Open link CPU 0 -> CPU 3 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg3, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg3, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); + + /* setup LCFG0 for knob 3 */ + E2K_RT_LCFG_reg(rt_lcfg) = + NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 3); + /* open all links for knob 2 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setting knob cluster to 0 */ + E2K_RT_LCFG_cln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), + E2K_MAX_CL_NUM, 3); + +/* setup LCFGj for knob 3 */ + + /* change parameters for BSP (due to new params for knob 3: cln = 0| pln = 3) */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_cln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); + /* change parameters for link CPU 0 -> CPU 3 (due to new params for knob 3: pln = 3) */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg3, 0, 0); + E2K_RT_LCFG_pln(rt_lcfg) = 3; + NATIVE_SET_SICREG(rt_lcfg3, E2K_RT_LCFG_reg(rt_lcfg), 0, 0); + /****************************/ + /**** setup LCFG1 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, 0, 3); + /* open all links for knob 3 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 1; + E2K_RT_LCFG_vio(rt_lcfg) = 1; + /* setting link CPU 3 -> CPU 0 */ + E2K_RT_LCFG_pln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), 0, 3); + /*#####################################################*/ + /* configure link CPU 3 to mlo space through CPU 0 */ + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 0); + NATIVE_SET_SICREG(rt_mlo1, AS_WORD(rt_mlo), 0, 3); + /* May be the same for mhi ????????? */ + /*#####################################################*/ + if (st_p.E2K_ST_P_pl_val & 0x1){ // 001 - Node 1 is present + /**** setup LCFG2 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, 0, 3); + /* open all links for knob 3 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setiing link CPU 3 -> CPU 1 */ + E2K_RT_LCFG_pln(rt_lcfg) = 1; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), 0, 3); + }else{ + /* close link CPU 3 -> CPU 1 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, 0, 3); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), 0, 3); + } + if (st_p.E2K_ST_P_pl_val & 0x2){ // 010 - Node 2 is present + /**** setup LCFG3 params ****/ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg3, 0, 3); + /* open all links for knob 3 */ + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + /* setiing link CPU 3 -> CPU 2 */ + E2K_RT_LCFG_pln(rt_lcfg) = 2; + NATIVE_SET_SICREG(rt_lcfg3, E2K_RT_LCFG_reg(rt_lcfg), 0, 3); + /*****************************/ + }else{ + /* close link CPU 3 -> CPU 2 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg3, 0, 3); + E2K_RT_LCFG_vp(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg3, E2K_RT_LCFG_reg(rt_lcfg), 0, 3); + } +/*******************************************************************************/ +} +if (!(st_p.E2K_ST_P_pl_val & 0x4)){ // 100 - Node 3 is not present + /* change parameters for BSP (cln = 0| pln = 0) */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg0, E2K_MAX_CL_NUM, 0); + E2K_RT_LCFG_cln(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg0, E2K_RT_LCFG_reg(rt_lcfg), E2K_MAX_CL_NUM, 0); +} + + /* Open all links (cfg1 and cfg3) BSP */ +if (st_p.E2K_ST_P_pl_val & 0x1){ // 001 - Node 1 is present + /* Open link CPU 0 -> CPU 1 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg1, 0, 0); + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg1, E2K_RT_LCFG_reg(rt_lcfg), 0, 0); +} +if (st_p.E2K_ST_P_pl_val & 0x2){ // 010 - Node 2 is present + /* Open link CPU 0 -> CPU 2 */ + E2K_RT_LCFG_reg(rt_lcfg) = NATIVE_GET_SICREG(rt_lcfg2, 0, 0); + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + NATIVE_SET_SICREG(rt_lcfg2, E2K_RT_LCFG_reg(rt_lcfg), 0, 0); +} +/*****************************************************************************/ + rom_printk("KNOB 0: rt_lcfg0 rt_lcfg1 rt_lcfg2 rt_lcfg3\n" + " 0x%x 0x%x 0x%x 0x%x\n" + " st_p 0x%x\n", + NATIVE_GET_SICREG(rt_lcfg0, 0, 0), NATIVE_GET_SICREG(rt_lcfg1, 0, 0), + NATIVE_GET_SICREG(rt_lcfg2, 0, 0), NATIVE_GET_SICREG(rt_lcfg3, 0, 0), + NATIVE_GET_SICREG(st_p, 0, 0)); +if (st_p.E2K_ST_P_pl_val & 0x1){ // 001 - Node 1 is present + rom_printk("KNOB 1: rt_lcfg0 rt_lcfg1 rt_lcfg2 rt_lcfg3\n" + " 0x%x 0x%x 0x%x 0x%x\n" + " st_p 0x%x\n", + NATIVE_GET_SICREG(rt_lcfg0, 0, 1), NATIVE_GET_SICREG(rt_lcfg1, 0, 1), + NATIVE_GET_SICREG(rt_lcfg2, 0, 1), NATIVE_GET_SICREG(rt_lcfg3, 0, 1), + NATIVE_GET_SICREG(st_p, 0, 1)); +} +if (st_p.E2K_ST_P_pl_val & 0x2){ // 020 - Node 2 is present + rom_printk("KNOB 2: rt_lcfg0 rt_lcfg1 rt_lcfg2 rt_lcfg3\n" + " 0x%x 0x%x 0x%x 0x%x\n" + " st_p 0x%x\n", + NATIVE_GET_SICREG(rt_lcfg0, 0, 2), NATIVE_GET_SICREG(rt_lcfg1, 0, 2), + NATIVE_GET_SICREG(rt_lcfg2, 0, 2), NATIVE_GET_SICREG(rt_lcfg3, 0, 2), + NATIVE_GET_SICREG(st_p, 0, 2)); +} +if (st_p.E2K_ST_P_pl_val & 0x4){ // 100 - Node 3 is present + rom_printk("KNOB 3: rt_lcfg0 rt_lcfg1 rt_lcfg2 rt_lcfg3\n" + " 0x%x 0x%x 0x%x 0x%x\n" + " st_p 0x%x\n", + NATIVE_GET_SICREG(rt_lcfg0, 0, 3), NATIVE_GET_SICREG(rt_lcfg1, 0, 3), + NATIVE_GET_SICREG(rt_lcfg2, 0, 3), NATIVE_GET_SICREG(rt_lcfg3, 0, 3), + NATIVE_GET_SICREG(st_p, 0, 3)); +} +} +void set_memory_filters(boot_info_t *boot_info) +{ + u64 size_lo = size_real; + u64 memory_start = 0; /* memory starts from 0 and can be on BSP */ + e2k_rt_mlo_struct_t rt_mlo; + u64 hole_size_lo; +#ifdef CONFIG_SMP + u64 size_to_probe; + u64 lo_memory_start; +#endif /* CONFIG_SMP */ +#ifdef CONFIG_ENABLE_EXTMEM + u64 size_hi = 0; +#ifdef CONFIG_SMP + u64 lo_high_memory_start; + u64 lo_high_memory_size; + u64 hi_high_memory_start; + u64 hi_high_memory_size; +#endif /* CONFIG_SMP */ + e2k_rt_mhi_struct_t rt_mhi; +#endif /* CONFIG_ENABLE_EXTMEM */ + + /* Configure MLO & MHI for BSP. */ + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 0); + DebugMRT("get_memory_filters: BSP rt_mlo0 = 0x%x\n", + AS_WORD(rt_mlo)); + + size_lo = get_lo_memory_size(0); + if (size_lo > size_real) + size_lo = size_real; + lo_memory_start = get_lo_memory_start(0); + lo_memory_start = ALIGN_UP(lo_memory_start, E2K_SIC_MIN_MEMORY_BANK); + + AS_STRUCT(rt_mlo).bgn = (u32)(lo_memory_start / E2K_SIC_SIZE_RT_MLO); + AS_STRUCT(rt_mlo).end = (u32)((lo_memory_start + size_lo - 1) / + E2K_SIC_SIZE_RT_MLO); + NATIVE_SET_SICREG(rt_mlo0, AS_WORD(rt_mlo), 0, 0); + rom_printk("BSP NODE #0 low memory router set from 0x%X to " + "0x%X\n", + lo_memory_start, lo_memory_start + size_lo - 1); + DebugMRT("set_memory_filters: BSP set rt_mlo0 to 0x%x\n", + AS_WORD(rt_mlo)); + add_memory_region(boot_info, 0, lo_memory_start, size_lo); +#ifdef CONFIG_SMP + if (phys_node_pres_map & 0x2) { /* NODE #1 is online */ + /* + * Setup memory routers of NODE #1 to access memory NODE #0 + * NODE #0 is located on link #3 of NODE #1 + */ + NATIVE_SET_SICREG(rt_mlo3, AS_WORD(rt_mlo), 0, 1); + DebugMRT("set_memory_filters: NODE #1 set rt_mlo3 to 0x%x " + "to access to memory of NODE #0\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x4) { /* NODE #2 is online */ + /* + * Setup memory routers of NODE #2 to access memory NODE #0 + * NODE #0 is located on link #2 of NODE #2 + */ + NATIVE_SET_SICREG(rt_mlo2, AS_WORD(rt_mlo), 0, 2); + DebugMRT("set_memory_filters: NODE #2 set rt_mlo2 to 0x%x " + "to access to memory of NODE #0\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x8) { /* NODE #3 is online */ + /* + * Setup memory routers of NODE #3 to access memory NODE #0 + * NODE #0 is located on link #1 of NODE #3 + */ + NATIVE_SET_SICREG(rt_mlo1, AS_WORD(rt_mlo), 0, 3); + DebugMRT("set_memory_filters: NODE #3 set rt_mlo1 to 0x%x " + "to access to memory of NODE #0\n", + AS_WORD(rt_mlo)); + } +#endif /* CONFIG_SMP */ + memory_start += size_lo; + memory_start = ALIGN_UP(memory_start, E2K_SIC_MIN_MEMORY_BANK); +#ifdef CONFIG_ENABLE_EXTMEM + hole_size_lo = ALIGN_UP(size_lo, E2K_SIC_SIZE_RT_MLO); + if (hole_size_lo < size_real) { + /* Setup high memory filter of BSP */ + size_hi = size_real - hole_size_lo; + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi0, 0, 0); + DebugMRT("get_memory_filters: BSP rt_mhi0 = 0x%x\n", + AS_WORD(rt_mhi)); + hi_memory_start = get_hi_memory_start(0); + AS_STRUCT(rt_mhi).bgn = (hi_memory_start / + E2K_SIC_SIZE_RT_MHI); + AS_STRUCT(rt_mhi).end = ((hi_memory_start + hole_size_lo + + size_hi - 1) / E2K_SIC_SIZE_RT_MHI); + NATIVE_SET_SICREG(rt_mhi0, AS_WORD(rt_mhi), 0, 0); + rom_printk("BSP NODE #0 high memory router set from 0x%X " + "to 0x%X\n", + hi_memory_start, + hi_memory_start + hole_size_lo + size_hi - 1); + DebugMRT("set_memory_filters: BSP set rt_mhi0 to 0x%x\n", + AS_WORD(rt_mhi)); + add_memory_region(boot_info, 0, hi_memory_start + hole_size_lo, + size_hi); +#ifdef CONFIG_SMP + if (phys_node_pres_map & 0x2) { /* NODE #1 is online */ + /* + * Setup memory routers of NODE #1 to access to hi + * memory NODE #0, + * NODE #0 is located on link #3 of NODE #1 + */ + NATIVE_SET_SICREG(rt_mhi3, AS_WORD(rt_mhi), 0, 1); + DebugMRT("set_memory_filters: NODE #1 set rt_mhi3 to " + "0x%x to access to memory of NODE #0\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x4) { /* NODE #2 is online */ + /* + * Setup memory routers of NODE #2 to access to hi + * memory NODE #0, + * NODE #0 is located on link #2 of NODE #2 + */ + NATIVE_SET_SICREG(rt_mhi2, AS_WORD(rt_mhi), 0, 2); + DebugMRT("set_memory_filters: NODE #2 set rt_mhi2 to " + "0x%x to access to memory of NODE #0\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x8) { /* NODE #3 is online */ + /* + * Setup memory routers of NODE #3 to access to hi + * memory NODE #0, + * NODE #0 is located on link #1 of NODE #3 + */ + NATIVE_SET_SICREG(rt_mhi1, AS_WORD(rt_mhi), 0, 3); + DebugMRT("set_memory_filters: NODE #3 set rt_mhi1 to " + "0x%x to access to memory of NODE #0\n", + AS_WORD(rt_mhi)); + } +#endif /* CONFIG_SMP */ + hi_memory_start += (size_lo + size_hi); + hi_memory_start = ALIGN_UP(hi_memory_start, + E2K_SIC_MIN_MEMORY_BANK); + } +#endif /* CONFIG_ENABLE_EXTMEM */ + add_memory_region(boot_info, 0, 0, 0); + +#ifndef CONFIG_SMP + return; /* none other CPUS */ +#else /* CONFIG_SMP */ + if (phys_cpu_num <= 1) + return; /* none other CPUs */ + if (only_BSP_has_memory) { + /* + * The only BSP has access to memory, and other cpus + * through BSP, so we leave rt_mlo 1,2,3 of BSP and + * rt_mlo0 of other CPUs closed by default + */ + return; + } + size_to_probe = PROBE_MEM_LIMIT; +#ifdef CONFIG_ENABLE_EXTMEM + size_to_probe = ALIGN_UP(size_to_probe, E2K_SIC_SIZE_RT_MLO); + size_to_probe += PROBE_EXT_MEM_LIMIT; + if (!is_power_of_2(size_to_probe)) { + /* all memory banks size can be only 2^n */ + size_to_probe = __roundup_pow_of_two(size_to_probe); + } +#endif /* CONFIG_ENABLE_EXTMEM */ + + if ((phys_node_pres_map & 0x2) && (memory_pres_map & 0x2)) { + /* NODE #1 is online */ + /* + * Setup memory routers of NODE #1 to access to own memory + */ + /* Configure MLO of NODE #1 */ + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 1); + DebugMRT("get_memory_filters: NODE #1 rt_mlo0 = 0x%x\n", + AS_WORD(rt_mlo)); + /* + * Setup memory routers of NODE #0 to probe memory + * of NODE #1 located on link #1 of NODE #0 + */ + size_real = probe_memory(boot_info, 1, 0, 1); + if (size_real > 0) { + size_lo = get_lo_memory_size(1); + if (size_lo > size_real) + size_lo = size_real; + } else { + size_lo = 0; + } + + lo_memory_start = get_lo_memory_start(1); + lo_memory_start = ALIGN_UP(lo_memory_start, + E2K_SIC_MIN_MEMORY_BANK); + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 1); + AS_STRUCT(rt_mlo).bgn = lo_memory_start / E2K_SIC_SIZE_RT_MLO; + AS_STRUCT(rt_mlo).end = (lo_memory_start + size_lo - 1) / + E2K_SIC_SIZE_RT_MLO; + NATIVE_SET_SICREG(rt_mlo0, AS_WORD(rt_mlo), 0, 1); + + rom_printk("NODE #1 low memory router set from 0x%X to " + "0x%X\n", + lo_memory_start, lo_memory_start + size_lo - 1); + DebugMRT("set_memory_filters: NODE #1 set rt_mlo0 to 0x%x\n", + AS_WORD(rt_mlo)); + if (size_lo > 0) { + add_memory_region(boot_info, 1, lo_memory_start, + size_lo); + } else { + rom_printk("NODE #1 has not own memory\n"); + } + if (phys_node_pres_map & 0x1) { /* NODE #0 is online */ + /* + * Setup memory routers of NODE #0 to access to memory + * of NODE #1 located on link #1 of NODE #0 + */ + NATIVE_SET_SICREG(rt_mlo1, AS_WORD(rt_mlo), 0, 0); + DebugMRT("set_memory_filters: NODE #0 set rt_mlo1 to " + "0x%x to access to memory of NODE #1\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x4) { /* NODE #2 is online */ + /* + * Setup memory routers of NODE #2 to access to memory + * of NODE #1 located on link #3 of NODE #2 + */ + NATIVE_SET_SICREG(rt_mlo3, AS_WORD(rt_mlo), 0, 2); + DebugMRT("set_memory_filters: NODE #2 set rt_mlo3 to " + "0x%x to access to memory of NODE #1\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x8) { /* NODE #3 is online */ + /* + * Setup memory routers of NODE #3 to access to memory + * of NODE #1 is located on link #2 of NODE #3 + */ + NATIVE_SET_SICREG(rt_mlo2, AS_WORD(rt_mlo), 0, 3); + DebugMRT("set_memory_filters: NODE #3 set rt_mlo2 to " + "0x%x to access to memory of NODE #1\n", + AS_WORD(rt_mlo)); + } + memory_start += size_lo; + memory_start = ALIGN_UP(memory_start, E2K_SIC_MIN_MEMORY_BANK); +#ifdef CONFIG_ENABLE_EXTMEM + hole_size_lo = ALIGN_UP(size_lo, E2K_SIC_SIZE_RT_MLO); + if (hole_size_lo < size_real) { + size_hi = size_real - hole_size_lo; + } else { + size_hi = 0; + } + /* Setup high memory filter of BSP */ + if (size_hi != 0) { + /* Setup high memory filter of NODE #1 */ + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi0, 0, 1); + DebugMRT("get_memory_filters: NODE #1 rt_mhi0 = " + "0x%x\n", AS_WORD(rt_mhi)); + hi_memory_start = get_hi_memory_start(1); + hi_memory_start = ALIGN_UP(hi_memory_start, + E2K_SIC_SIZE_RT_MHI); + AS_STRUCT(rt_mhi).bgn = (u32)(hi_memory_start / + E2K_SIC_SIZE_RT_MHI); + AS_STRUCT(rt_mhi).end = (u32)((hi_memory_start + + hole_size_lo + size_hi - 1) / + E2K_SIC_SIZE_RT_MHI); + NATIVE_SET_SICREG(rt_mhi0, AS_WORD(rt_mhi), 0, 1); + rom_printk("NODE #1 high memory router set from " + "0x%X to 0x%X\n", + hi_memory_start, + hi_memory_start + hole_size_lo + size_hi - 1); + DebugMRT("set_memory_filters: NODE #1 set rt_mhi0 to " + "0x%x\n", + AS_WORD(rt_mhi)); + lo_high_memory_start = hi_memory_start; + lo_high_memory_size = lo_memory_start; + if (lo_high_memory_size < size_hi) { + hi_high_memory_size = + size_hi - lo_high_memory_size; + hi_high_memory_start = hi_memory_start + + lo_memory_start + hole_size_lo; + } else { + lo_high_memory_size = size_hi; + hi_high_memory_size = 0; + } + if (lo_high_memory_size != 0) { + add_memory_region(boot_info, 1, + lo_high_memory_start, + lo_high_memory_size); + rom_printk("NODE #1 high memory lo region set " + "from 0x%X to 0x%X\n", + lo_high_memory_start, + lo_high_memory_start + + lo_high_memory_size); + } + if (hi_high_memory_size != 0) { + add_memory_region(boot_info, 1, + hi_high_memory_start, + hi_high_memory_size); + rom_printk("NODE #1 high memory hi region set " + "from 0x%X to 0x%X\n", + hi_high_memory_start, + hi_high_memory_start + + hi_high_memory_size); + } + if (phys_node_pres_map & 0x1) { /* NODE #0 is online */ + /* + * Setup memory routers of NODE #0 to access" + * to hi memory of NODE #1, + * NODE #1 is located on link #1 of NODE #0 + */ + NATIVE_SET_SICREG(rt_mhi1, AS_WORD(rt_mhi), 0, 0); + DebugMRT("set_memory_filters: NODE #0 set " + "rt_mhi1 to 0x%x to access to " + "memory of NODE #1\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x4) { /* NODE #2 is online */ + /* + * Setup memory routers of NODE #2 to access + * to hi memory of NODE #1, + * NODE #1 is located on link #3 of NODE #2 + */ + NATIVE_SET_SICREG(rt_mhi3, AS_WORD(rt_mhi), 0, 2); + DebugMRT("set_memory_filters: NODE #2 set " + "rt_mhi3 to 0x%x to access to " + "memory of NODE #1\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x8) { /* NODE #3 is online */ + /* + * Setup memory routers of NODE #3 to access + * to hi memory of NODE #1, + * NODE #1 is located on link #2 of NODE #3 + */ + NATIVE_SET_SICREG(rt_mhi2, AS_WORD(rt_mhi), 0, 3); + DebugMRT("set_memory_filters: NODE #3 set " + "rt_mhi2 to 0x%x to access to " + "memory of NODE #1\n", + AS_WORD(rt_mhi)); + } + hi_memory_start += (hole_size_lo + size_hi); + hi_memory_start = ALIGN_UP(hi_memory_start, + E2K_SIC_MIN_MEMORY_BANK); + } +#endif /* CONFIG_ENABLE_EXTMEM */ + } + add_memory_region(boot_info, 1, 0, 0); + + if ((phys_node_pres_map & 0x4) && (memory_pres_map & 0x4)) { /* NODE #2 is online */ + /* + * Setup memory routers of NODE #2 to access to own memory + */ + /* Configure MLO of NODE #2 */ + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 2); + DebugMRT("get_memory_filters: NODE #2 rt_mlo0 = 0x%x\n", + AS_WORD(rt_mlo)); + + /* + * Setup memory routers of NODE #0 to probe memory + * of NODE #2 located on link #2 of NODE #0 + */ + size_real = probe_memory(boot_info, 2, 0, 2); + if (size_real > 0) { + size_lo = get_lo_memory_size(2); + if (size_lo > size_real) + size_lo = size_real; + } else { + size_lo = 0; + } + + lo_memory_start = get_lo_memory_start(2); + lo_memory_start = ALIGN_UP(lo_memory_start, + E2K_SIC_MIN_MEMORY_BANK); + + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 2); + AS_STRUCT(rt_mlo).bgn = lo_memory_start / E2K_SIC_SIZE_RT_MLO; + AS_STRUCT(rt_mlo).end = (lo_memory_start + size_lo - 1) / + E2K_SIC_SIZE_RT_MLO; + NATIVE_SET_SICREG(rt_mlo0, AS_WORD(rt_mlo), 0, 2); + + rom_printk("NODE #2 low memory router set from 0x%X to " + "0x%X\n", + lo_memory_start, lo_memory_start + size_lo - 1); + DebugMRT("set_memory_filters: NODE #2 set rt_mlo0 to 0x%x\n", + AS_WORD(rt_mlo)); + if (size_lo > 0) { + add_memory_region(boot_info, 2, lo_memory_start, + size_lo); + } else { + rom_printk("NODE #2 has not own memory\n"); + } + if (phys_node_pres_map & 0x1) { /* NODE #0 is online */ + /* + * Setup memory routers of NODE #0 to access to memory + * of NODE #2 located on link #2 of NODE #0 + */ + NATIVE_SET_SICREG(rt_mlo2, AS_WORD(rt_mlo), 0, 0); + DebugMRT("set_memory_filters: NODE #0 set rt_mlo2 to " + "0x%x to access to memory of NODE #2\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x2) { /* NODE #1 is online */ + /* + * Setup memory routers of NODE #1 to access to memory + * of NODE #2 located on link #1 of NODE #1 + */ + NATIVE_SET_SICREG(rt_mlo1, AS_WORD(rt_mlo), 0, 1); + DebugMRT("set_memory_filters: NODE #1 set rt_mlo1 to " + "0x%x to access to memory of NODE #2\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x8) { /* NODE #3 is online */ + /* + * Setup memory routers of NODE #3 to access to memory + * of NODE #2 is located on link #3 of NODE #3 + */ + NATIVE_SET_SICREG(rt_mlo3, AS_WORD(rt_mlo), 0, 3); + DebugMRT("set_memory_filters: NODE #3 set rt_mlo3 to " + "0x%x to access to memory of NODE #2\n", + AS_WORD(rt_mlo)); + } + memory_start += size_lo; + memory_start = ALIGN_UP(memory_start, E2K_SIC_MIN_MEMORY_BANK); +#ifdef CONFIG_ENABLE_EXTMEM + hole_size_lo = ALIGN_UP(size_lo, E2K_SIC_SIZE_RT_MLO); + if (hole_size_lo < size_real) { + size_hi = size_real - hole_size_lo; + } else { + size_hi = 0; + } + if (size_hi != 0) { + /* Setup high memory filter of NODE #2 */ + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi0, 0, 2); + DebugMRT("get_memory_filters: NODE #2 rt_mhi0 = " + "0x%x\n", AS_WORD(rt_mhi)); + hi_memory_start = get_hi_memory_start(2); + hi_memory_start = ALIGN_UP(hi_memory_start, + E2K_SIC_SIZE_RT_MHI); + AS_STRUCT(rt_mhi).bgn = hi_memory_start / + E2K_SIC_SIZE_RT_MHI; + AS_STRUCT(rt_mhi).end = (hi_memory_start + + hole_size_lo + size_hi - 1) / + E2K_SIC_SIZE_RT_MHI; + NATIVE_SET_SICREG(rt_mhi0, AS_WORD(rt_mhi), 0, 2); + rom_printk("NODE #2 high memory router set from " + "0x%X to 0x%X\n", + hi_memory_start, + hi_memory_start + hole_size_lo + size_hi - 1); + DebugMRT("set_memory_filters: NODE #2 set rt_mhi0 to " + "0x%x\n", + AS_WORD(rt_mhi)); + lo_high_memory_start = hi_memory_start; + lo_high_memory_size = lo_memory_start; + if (lo_high_memory_size < size_hi) { + hi_high_memory_size = + size_hi - lo_high_memory_size; + hi_high_memory_start = hi_memory_start + + lo_memory_start + hole_size_lo; + } else { + lo_high_memory_size = size_hi; + hi_high_memory_size = 0; + } + if (lo_high_memory_size != 0) { + add_memory_region(boot_info, 2, + lo_high_memory_start, + lo_high_memory_size); + rom_printk("NODE #2 high memory lo region set " + "from 0x%X to 0x%X\n", + lo_high_memory_start, + lo_high_memory_start + + lo_high_memory_size); + } + if (hi_high_memory_size != 0) { + add_memory_region(boot_info, 2, + hi_high_memory_start, + hi_high_memory_size); + rom_printk("NODE #2 high memory hi region set " + "from 0x%X to 0x%X\n", + hi_high_memory_start, + hi_high_memory_start + + hi_high_memory_size); + } + if (phys_node_pres_map & 0x1) { /* NODE #0 is online */ + /* + * Setup memory routers of NODE #0 to access" + * to hi memory of NODE #2, + * NODE #2 is located on link #2 of NODE #0 + */ + NATIVE_SET_SICREG(rt_mhi2, AS_WORD(rt_mhi), 0, 0); + DebugMRT("set_memory_filters: NODE #0 set " + "rt_mhi2 to 0x%x to access to " + "memory of NODE #2\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x2) { /* NODE #1 is online */ + /* + * Setup memory routers of NODE #1 to access + * to hi memory of NODE #2, + * NODE #2 is located on link #1 of NODE #1 + */ + NATIVE_SET_SICREG(rt_mhi1, AS_WORD(rt_mhi), 0, 1); + DebugMRT("set_memory_filters: NODE #1 set " + "rt_mhi1 to 0x%x to access to " + "memory of NODE #2\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x8) { /* NODE #3 is online */ + /* + * Setup memory routers of NODE #3 to access + * to hi memory of NODE #2, + * NODE #2 is located on link #3 of NODE #3 + */ + NATIVE_SET_SICREG(rt_mhi3, AS_WORD(rt_mhi), 0, 3); + DebugMRT("set_memory_filters: NODE #3 set " + "rt_mhi3 to 0x%x to access to " + "memory of NODE #2\n", + AS_WORD(rt_mhi)); + } + hi_memory_start += (hole_size_lo + size_hi); + hi_memory_start = ALIGN_UP(hi_memory_start, + E2K_SIC_MIN_MEMORY_BANK); + } +#endif /* CONFIG_ENABLE_EXTMEM */ + } + add_memory_region(boot_info, 2, 0, 0); + + if ((phys_node_pres_map & 0x8) && (memory_pres_map & 0x8)) { /* NODE #3 is online */ + /* + * Setup memory routers of NODE #3 to access to own memory + */ + /* Configure MLO of NODE #3 */ + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 3); + DebugMRT("set_memory_filters: NODE #3 rt_mlo0 = 0x%x\n", + AS_WORD(rt_mlo)); + + /* + * Setup memory routers of NODE #0 to probe memory + * of NODE #3 located on link #3 of NODE #0 + */ + size_real = probe_memory(boot_info, 3, 0, 3); + if (size_real > 0) { + size_lo = get_lo_memory_size(3); + if (size_lo > size_real) + size_lo = size_real; + } else { + size_lo = 0; + } + + lo_memory_start = get_lo_memory_start(3); + lo_memory_start = ALIGN_UP(lo_memory_start, + E2K_SIC_MIN_MEMORY_BANK); + + AS_WORD(rt_mlo) = NATIVE_GET_SICREG(rt_mlo0, 0, 3); + AS_STRUCT(rt_mlo).bgn = lo_memory_start / E2K_SIC_SIZE_RT_MLO; + AS_STRUCT(rt_mlo).end = (lo_memory_start + size_lo - 1) / + E2K_SIC_SIZE_RT_MLO; + NATIVE_SET_SICREG(rt_mlo0, AS_WORD(rt_mlo), 0, 3); + + rom_printk("NODE #3 low memory router set from 0x%X to " + "0x%X\n", + lo_memory_start, lo_memory_start + size_lo - 1); + DebugMRT("set_memory_filters: NODE #3 set rt_mlo0 to 0x%x\n", + AS_WORD(rt_mlo)); + if (size_lo > 0) { + add_memory_region(boot_info, 3, lo_memory_start, + size_lo); + } else { + rom_printk("NODE #3 has not own memory\n"); + } + if (phys_node_pres_map & 0x1) { /* NODE #0 is online */ + /* + * Setup memory routers of NODE #0 to access to memory + * of NODE #3 located on link #3 of NODE #0 + */ + NATIVE_SET_SICREG(rt_mlo3, AS_WORD(rt_mlo), 0, 0); + DebugMRT("set_memory_filters: NODE #0 set rt_mlo3 to " + "0x%x to access to memory of NODE #3\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x2) { /* NODE #1 is online */ + /* + * Setup memory routers of NODE #1 to access to memory + * of NODE #3 located on link #2 of NODE #1 + */ + NATIVE_SET_SICREG(rt_mlo2, AS_WORD(rt_mlo), 0, 1); + DebugMRT("set_memory_filters: NODE #1 set rt_mlo2 to " + "0x%x to access to memory of NODE #3\n", + AS_WORD(rt_mlo)); + } + if (phys_node_pres_map & 0x4) { /* NODE #2 is online */ + /* + * Setup memory routers of NODE #2 to access to memory + * of NODE #3 is located on link #1 of NODE #2 + */ + NATIVE_SET_SICREG(rt_mlo1, AS_WORD(rt_mlo), 0, 2); + DebugMRT("set_memory_filters: NODE #2 set rt_mlo1 to " + "0x%x to access to memory of NODE #3\n", + AS_WORD(rt_mlo)); + } + memory_start += size_lo; + memory_start = ALIGN_UP(memory_start, E2K_SIC_MIN_MEMORY_BANK); +#ifdef CONFIG_ENABLE_EXTMEM + hole_size_lo = ALIGN_UP(size_lo, E2K_SIC_SIZE_RT_MLO); + if (hole_size_lo < size_real) { + size_hi = size_real - hole_size_lo; + } else { + size_hi = 0; + } + if (size_hi != 0) { + /* Setup high memory filter of NODE #3 */ + AS_WORD(rt_mhi) = NATIVE_GET_SICREG(rt_mhi0, 0, 3); + DebugMRT("set_memory_filters: NODE #3 rt_mhi0 = " + "0x%x\n", AS_WORD(rt_mhi)); + hi_memory_start = get_hi_memory_start(3); + hi_memory_start = ALIGN_UP(hi_memory_start, + E2K_SIC_SIZE_RT_MHI); + AS_STRUCT(rt_mhi).bgn = hi_memory_start / + E2K_SIC_SIZE_RT_MHI; + AS_STRUCT(rt_mhi).end = (hi_memory_start + + hole_size_lo + size_hi - 1) / + E2K_SIC_SIZE_RT_MHI; + NATIVE_SET_SICREG(rt_mhi0, AS_WORD(rt_mhi), 0, 3); + rom_printk("NODE #3 high memory router set from " + "0x%X to 0x%X\n", + hi_memory_start, + hi_memory_start + hole_size_lo + size_hi - 1); + DebugMRT("set_memory_filters: NODE #3 set rt_mhi0 to " + "0x%x\n", + AS_WORD(rt_mhi)); + lo_high_memory_start = hi_memory_start; + lo_high_memory_size = lo_memory_start; + if (lo_high_memory_size < size_hi) { + hi_high_memory_size = + size_hi - lo_high_memory_size; + hi_high_memory_start = hi_memory_start + + lo_memory_start + hole_size_lo; + } else { + lo_high_memory_size = size_hi; + hi_high_memory_size = 0; + } + if (lo_high_memory_size != 0) { + add_memory_region(boot_info, 3, + lo_high_memory_start, + lo_high_memory_size); + rom_printk("NODE #3 high memory lo region set " + "from 0x%X to 0x%X\n", + lo_high_memory_start, + lo_high_memory_start + + lo_high_memory_size); + } + if (hi_high_memory_size != 0) { + add_memory_region(boot_info, 3, + hi_high_memory_start, + hi_high_memory_size); + rom_printk("NODE #3 high memory hi region set " + "from 0x%X to 0x%X\n", + hi_high_memory_start, + hi_high_memory_start + + hi_high_memory_size); + } + if (phys_node_pres_map & 0x1) { /* NODE #0 is online */ + /* + * Setup memory routers of NODE #0 to access" + * to hi memory of NODE #3, + * NODE #3 is located on link #3 of NODE #0 + */ + NATIVE_SET_SICREG(rt_mhi3, AS_WORD(rt_mhi), 0, 0); + DebugMRT("set_memory_filters: NODE #0 set " + "rt_mhi3 to 0x%x to access to " + "memory of NODE #3\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x2) { /* NODE #1 is online */ + /* + * Setup memory routers of NODE #1 to access + * to hi memory of NODE #3, + * NODE #3 is located on link #2 of NODE #1 + */ + NATIVE_SET_SICREG(rt_mhi2, AS_WORD(rt_mhi), 0, 1); + DebugMRT("set_memory_filters: NODE #1 set " + "rt_mhi2 to 0x%x to access to " + "memory of NODE #3\n", + AS_WORD(rt_mhi)); + } + if (phys_node_pres_map & 0x4) { /* NODE #2 is online */ + /* + * Setup memory routers of NODE #2 to access + * to hi memory of NODE #3, + * NODE #3 is located on link #1 of NODE #2 + */ + NATIVE_SET_SICREG(rt_mhi1, AS_WORD(rt_mhi), 0, 2); + DebugMRT("set_memory_filters: NODE #2 set " + "rt_mhi1 to 0x%x to access to " + "memory of NODE #3\n", + AS_WORD(rt_mhi)); + } + hi_memory_start += (hole_size_lo + size_hi); + hi_memory_start = ALIGN_UP(hi_memory_start, + E2K_SIC_MIN_MEMORY_BANK); + } +#endif /* CONFIG_ENABLE_EXTMEM */ + } + add_memory_region(boot_info, 3, 0, 0); + +#endif /* ! CONFIG_SMP */ +} +#elif defined(CONFIG_E2K_LEGACY_SIC) +static void configure_routing_regs(void) +{ + unsigned short vid, vvid; + unsigned short did, vdid; + unsigned short pci_cmd; + unsigned int hb_cfg; + + vid = early_readw_hb_reg(PCI_VENDOR_ID); + did = early_readw_hb_reg(PCI_DEVICE_ID); + DebugRT("configure_routing_regs: host bridge vendor ID = 0x%04x " + "device ID = 0x%04x\n", vid, did); + if (vid != PCI_VENDOR_ID_MCST_TMP) { + rom_printk("Invalid Host Bridge vendor ID 0x%04x instead of " + "0x%04x\n", vid, PCI_VENDOR_ID_MCST_TMP); + } + if (did != PCI_DEVICE_ID_MCST_HB) { + rom_printk("Invalid Host Bridge device ID 0x%04x instead of " + "0x%04x\n", did, PCI_DEVICE_ID_MCST_HB); + } + vvid = early_readw_eg_reg(PCI_VENDOR_ID); + vdid = early_readw_eg_reg(PCI_DEVICE_ID); + DebugRT("configure_routing_regs: embeded graphic controller vendor " + "ID = 0x%04x device ID = 0x%04x\n", vvid, vdid); + if (vvid != PCI_VENDOR_ID_MCST_TMP) { + rom_printk("Invalid Embeded Graphic controller vendor " + "ID 0x%04x instead of 0x%04x\n", + vvid, PCI_VENDOR_ID_MCST_TMP); + } + if (vdid != PCI_DEVICE_ID_MCST_MGA2) { + rom_printk("Invalid Embeded Graphic controller device " + "ID 0x%04x instead of 0x%04x\n", + vdid, PCI_DEVICE_ID_MCST_MGA2); + } + + /* Setup initial state of Host Bridge CFG */ + hb_cfg = early_readl_hb_reg(HB_PCI_CFG); + DebugRT("configure_routing_regs: host bridge CFG 0x%08x\n", + hb_cfg); +#ifdef CONFIG_VRAM_DISABLE + hb_cfg &= ~HB_CFG_IntegratedGraphicsEnable; + early_writel_hb_reg(hb_cfg, HB_PCI_CFG); + rom_printk("host bridge CFG: disable embeded graphic 0x%X\n", hb_cfg); +#endif /* CONFIG_VRAM_DISABLE */ + + phys_node_num = 1; + phys_node_pres_map = 0x1; + + pci_cmd = early_readw_hb_reg(PCI_COMMAND); + DebugRT("configure_routing_regs: host bridge PCICMD 0x%04x\n", + pci_cmd); + pci_cmd |= PCI_COMMAND_MEMORY; + early_writew_hb_reg(pci_cmd, PCI_COMMAND); + rom_printk("Host Bridge PCICMD set to 0x%04x\n", pci_cmd); +} +void set_memory_filters(boot_info_t *boot_info) +{ + long size_lo = size_real; + u64 memory_start = 0; /* memory starts from 0 and can be on BSP */ + u64 lo_mem_end; + u32 tom_lo; + int vram_size = EG_VRAM_MBYTES_SIZE; +#ifndef CONFIG_VRAM_DISABLE + u32 eg_cfg; +#endif /* ! CONFIG_VRAM_DISABLE */ +#ifdef CONFIG_ENABLE_EXTMEM + long size_hi = 0; + u64 hi_mem_end; + u64 tom_hi; + u64 remapbase; +#endif /* CONFIG_ENABLE_EXTMEM */ + + /* Configure TOM & TOM2 & REMAPBASE */ + tom_lo = early_readl_hb_reg(HB_PCI_TOM); + DebugMRT("set_memory_filters: TOM (low memory top) = 0x%x\n", tom_lo); + + size_lo -= vram_size; + if (size_lo > PROBE_MEM_LIMIT) { + size_lo = PROBE_MEM_LIMIT; + } + size_lo &= HB_PCI_TOM_LOW_MASK; + if (size_lo <= 0) { + rom_printk("memory size 0x%X is too small to enable low memory " + "and VRAM,\n" + "\tincrease CONFIG_MEMLIMIT (now 0x%X)\n" + "\tor change VRAM size (now 0x%X) at config\n", + size_real, PROBE_MEM_LIMIT, vram_size); + E2K_LMS_HALT_OK; + } + lo_mem_end = memory_start + size_lo; + lo_mem_end &= HB_PCI_TOM_LOW_MASK; + if (lo_mem_end == 0) { + rom_printk("low memory size 0x%X is too small, use default " + "size 0x%X\n", + size_lo, tom_lo); + } else { + tom_lo = (tom_lo & ~HB_PCI_TOM_LOW_MASK) | lo_mem_end; + early_writel_hb_reg(tom_lo, HB_PCI_TOM); +#ifndef CONFIG_VRAM_DISABLE + /* VRAM is part of common low memory */ + eg_cfg = early_readl_eg_reg(EG_PCI_CFG); + DebugMRT("set_memory_filters: EG CFG = 0x%x\n", eg_cfg); + eg_cfg &= ~EG_CFG_VRAM_SIZE_MASK; + eg_cfg |= EG_VRAM_SIZE_FLAGS; + early_writel_eg_reg(eg_cfg, EG_PCI_CFG); + rom_printk("set VRAM size to 0x%X at CFG 0x%x\n", + vram_size, early_readl_eg_reg(EG_PCI_CFG)); +#endif /* ! CONFIG_VRAM_DISABLE */ + } + rom_printk("low memory TOM set to 0x%X\n", tom_lo); + add_memory_region(boot_info, 0, memory_start, size_lo); +#ifdef CONFIG_ENABLE_EXTMEM + if (size_lo + vram_size < size_real) { + /* Setup high memory filter */ + hi_memory_start = HB_PCI_HI_ADDR_BASE; + size_hi = size_real - size_lo - vram_size; + tom_hi = early_readll_hb_reg(HB_PCI_TOM2); + DebugMRT("set_memory_filters: TOM2 (high memory top) = 0x%x\n", + tom_hi); + size_hi &= HB_PCI_TOM2_HI_MASK; + hi_mem_end = (hi_memory_start + size_hi); + hi_mem_end &= HB_PCI_TOM2_HI_MASK; + if (hi_mem_end == hi_memory_start) { + rom_printk("high memory size 0x%X is too small, " + "ignore high memory\n", + size_hi); + } else { + tom_hi = (tom_hi & ~HB_PCI_TOM2_HI_MASK) | hi_mem_end; + early_writell_hb_reg(tom_hi, HB_PCI_TOM2); + rom_printk("high memory TOM2 set to 0x%X\n", tom_hi); + remapbase = HB_PCI_HI_ADDR_BASE; + if (size_lo + vram_size + size_hi > HB_PCI_HI_ADDR_BASE) + remapbase = size_lo + vram_size + size_hi; + early_writell_hb_reg(remapbase, HB_PCI_REMAPBASE); + rom_printk("low memory REMAPBASE set to 0x%X\n", + remapbase); + add_memory_region(boot_info, 0, hi_memory_start, + size_hi); + } + } +#endif /* CONFIG_ENABLE_EXTMEM */ + +} +#endif /* CONFIG_E2K_FULL_SIC */ +#endif /* CONFIG_E2K_SIC */ + +#ifdef CONFIG_SMP +#ifdef CONFIG_E2K_SIC +#ifdef CONFIG_E2K_FULL_SIC +int inline e2k_startup_core(e2k_rt_lcfg_struct_t rt_lcfg, int core) +{ + e2k_st_core_t st_core = {{ 0 }}; + int cln = E2K_RT_LCFG_cln(rt_lcfg); + int pln = E2K_RT_LCFG_pln(rt_lcfg); + + if (core == 0) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core0, cln, pln); + } else if (core == 1) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core1, cln, pln); + } else if (core == 2) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core2, cln, pln); + } else if (core == 3) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core3, cln, pln); + } else if (core == 4) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core4, cln, pln); + } else if (core == 5) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core5, cln, pln); + } else if (core == 6) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core6, cln, pln); + } else if (core == 7) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core7, cln, pln); + } else if (core == 8) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core8, cln, pln); + } else if (core == 9) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core9, cln, pln); + } else if (core == 10) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core10, cln, pln); + } else if (core == 11) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core11, cln, pln); + } else if (core == 12) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core12, cln, pln); + } else if (core == 13) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core13, cln, pln); + } else if (core == 14) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core14, cln, pln); + } else if (core == 15) { + E2K_ST_CORE_reg(st_core) = + NATIVE_GET_SICREG(st_core15, cln, pln); + } else { + rom_printk("Invalid core # %d to detect\n", core); + return 0; + } + + if (!E2K_ST_CORE_val(st_core)) + return 0; + rom_printk("Start up detected core #%d in cluster %d node %d\n", + core, cln, pln); + E2K_ST_CORE_wait_init(st_core) = 0; + if (core == 0) { + NATIVE_SET_SICREG(st_core0, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 1) { + NATIVE_SET_SICREG(st_core1, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 2) { + NATIVE_SET_SICREG(st_core2, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 3) { + NATIVE_SET_SICREG(st_core3, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 4) { + NATIVE_SET_SICREG(st_core4, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 5) { + NATIVE_SET_SICREG(st_core5, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 6) { + NATIVE_SET_SICREG(st_core6, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 7) { + NATIVE_SET_SICREG(st_core7, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 8) { + NATIVE_SET_SICREG(st_core8, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 9) { + NATIVE_SET_SICREG(st_core9, E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 10) { + NATIVE_SET_SICREG(st_core10, + E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 11) { + NATIVE_SET_SICREG(st_core11, + E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 12) { + NATIVE_SET_SICREG(st_core12, + E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 13) { + NATIVE_SET_SICREG(st_core13, + E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 14) { + NATIVE_SET_SICREG(st_core14, + E2K_ST_CORE_reg(st_core), cln, pln); + } else if (core == 15) { + NATIVE_SET_SICREG(st_core15, + E2K_ST_CORE_reg(st_core), cln, pln); + } else { + rom_printk("Invalid core # %d to start up\n", core); + return 0; + } + rom_printk("Started up core #%d in cluster %d node %d\n", + core, cln, pln); + return 1; +} +#elif defined(CONFIG_E2K_LEGACY_SIC) +inline int e2k_startup_core(e2k_rt_lcfg_struct_t rt_lcfg, int core) +{ + return 1; +} +#endif /* CONFIG_E2K_FULL_SIC */ +#endif /* CONFIG_E2K_SIC */ +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_E2K_SIC +#if defined(CONFIG_E2K_FULL_SIC) +static void configure_node_io_routing(int node, int link) +{ + e2k_rt_ioapic_struct_t rt_ioapic; + e2k_rt_pcim_struct_t rt_pcim; + e2k_rt_pciio_struct_t rt_pciio; + unsigned long pcim_bgn; + unsigned long pcim_end; + int rt_ioapic0_reg; + int rt_ioapic1_reg; + int rt_ioapic2_reg; + int rt_ioapic3_reg; + int rt_pcim0_reg; + int rt_pcim1_reg; + int rt_pcim2_reg; + int rt_pcim3_reg; + int rt_pciio0_reg; + int rt_pciio1_reg; + int rt_pciio2_reg; + int rt_pciio3_reg; + int domain; + + rt_ioapic.E2K_RT_IOAPIC_reg = 0; + rt_pcim.E2K_RT_PCIM_reg = 0; + rt_pciio.E2K_RT_PCIIO_reg = 0; + + if (node == 0) { + rt_ioapic0_reg = SIC_rt_ioapic0; + rt_ioapic1_reg = SIC_rt_ioapic1; + rt_ioapic2_reg = SIC_rt_ioapic2; + rt_ioapic3_reg = SIC_rt_ioapic3; + rt_pcim0_reg = SIC_rt_pcim0; + rt_pcim1_reg = SIC_rt_pcim1; + rt_pcim2_reg = SIC_rt_pcim2; + rt_pcim3_reg = SIC_rt_pcim3; + rt_pciio0_reg = SIC_rt_pciio0; + rt_pciio1_reg = SIC_rt_pciio1; + rt_pciio2_reg = SIC_rt_pciio2; + rt_pciio3_reg = SIC_rt_pciio3; + } else if (node == 1) { + rt_ioapic0_reg = SIC_rt_ioapic3; + rt_ioapic1_reg = SIC_rt_ioapic0; + rt_ioapic2_reg = SIC_rt_ioapic1; + rt_ioapic3_reg = SIC_rt_ioapic2; + rt_pcim0_reg = SIC_rt_pcim3; + rt_pcim1_reg = SIC_rt_pcim0; + rt_pcim2_reg = SIC_rt_pcim1; + rt_pcim3_reg = SIC_rt_pcim2; + rt_pciio0_reg = SIC_rt_pciio3; + rt_pciio1_reg = SIC_rt_pciio0; + rt_pciio2_reg = SIC_rt_pciio1; + rt_pciio3_reg = SIC_rt_pciio2; + } else if (node == 2) { + rt_ioapic0_reg = SIC_rt_ioapic2; + rt_ioapic1_reg = SIC_rt_ioapic3; + rt_ioapic2_reg = SIC_rt_ioapic0; + rt_ioapic3_reg = SIC_rt_ioapic1; + rt_pcim0_reg = SIC_rt_pcim2; + rt_pcim1_reg = SIC_rt_pcim3; + rt_pcim2_reg = SIC_rt_pcim0; + rt_pcim3_reg = SIC_rt_pcim1; + rt_pciio0_reg = SIC_rt_pciio2; + rt_pciio1_reg = SIC_rt_pciio3; + rt_pciio2_reg = SIC_rt_pciio0; + rt_pciio3_reg = SIC_rt_pciio1; + } else if (node == 3) { + rt_ioapic0_reg = SIC_rt_ioapic1; + rt_ioapic1_reg = SIC_rt_ioapic2; + rt_ioapic2_reg = SIC_rt_ioapic3; + rt_ioapic3_reg = SIC_rt_ioapic0; + rt_pcim0_reg = SIC_rt_pcim1; + rt_pcim1_reg = SIC_rt_pcim2; + rt_pcim2_reg = SIC_rt_pcim3; + rt_pcim3_reg = SIC_rt_pcim0; + rt_pciio0_reg = SIC_rt_pciio1; + rt_pciio1_reg = SIC_rt_pciio2; + rt_pciio2_reg = SIC_rt_pciio3; + rt_pciio3_reg = SIC_rt_pciio0; + } else { + rom_printk("configure_node_io_routing() invalid node #%d\n", + node); + return; + } + domain = node_iohub_to_domain(node, link); + + /* configure own link of the NODE to access to own ioapic space */ + rt_ioapic.E2K_RT_IOAPIC_bgn = domain; + early_sic_write_node_iolink_nbsr_reg(node, link, SIC_rt_ioapic0, + rt_ioapic.E2K_RT_IOAPIC_reg); + DebugIORT("NODE #%d IO link #%d: IO-APIC router set from 0x%X\n", + node, link, domain); + pcim_bgn = PCI_MEM_DOMAIN_START(domain); + pcim_end = PCI_MEM_DOMAIN_END(domain); + rt_pcim.E2K_RT_PCIM_bgn = (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIM; + rt_pcim.E2K_RT_PCIM_end = (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIM; + early_sic_write_node_iolink_nbsr_reg(node, link, SIC_rt_pcim0, + rt_pcim.E2K_RT_PCIM_reg); + DebugIORT("NODE #%d IO link #%d: PCI-MM router set from 0x%X " + "to 0x%X\n", + node, link, pcim_bgn, pcim_end); + pcim_bgn = PCI_IO_DOMAIN_START(domain); + pcim_end = PCI_IO_DOMAIN_END(domain); + rt_pciio.E2K_RT_PCIIO_bgn = (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIIO; + rt_pciio.E2K_RT_PCIIO_end = (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIIO; + early_sic_write_node_iolink_nbsr_reg(node, link, SIC_rt_pciio0, + rt_pciio.E2K_RT_PCIIO_reg); + DebugIORT("NODE #%d IO link #%d: PCI-IO router set from 0x%X " + "to 0x%X\n", + node, link, pcim_bgn, pcim_end); + + if (node != 0 && (phys_node_pres_map & 0x1)) { // node #0 is present + /* configure link the NODE to access to ioapic space NODE 0 */ + domain = node_iohub_to_domain(0, link); + rt_ioapic.E2K_RT_IOAPIC_bgn = domain; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_ioapic0_reg, + rt_ioapic.E2K_RT_IOAPIC_reg); + DebugIORT("NODE #%d IO link #%d: router to IO-APIC node #0 set " + "from 0x%X\n", + node, link, domain); + pcim_bgn = PCI_MEM_DOMAIN_START(domain); + pcim_end = PCI_MEM_DOMAIN_END(domain); + rt_pcim.E2K_RT_PCIM_bgn = (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIM; + rt_pcim.E2K_RT_PCIM_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIM; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pcim0_reg, + rt_pcim.E2K_RT_PCIM_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-MM node #0 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + pcim_bgn = PCI_IO_DOMAIN_START(domain); + pcim_end = PCI_IO_DOMAIN_END(domain); + rt_pciio.E2K_RT_PCIIO_bgn = + (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIIO; + rt_pciio.E2K_RT_PCIIO_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIIO; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pciio0_reg, + rt_pciio.E2K_RT_PCIIO_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-IO node #0 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + } + if (node != 1 && (phys_node_pres_map & 0x2)) { // node #1 is present + /* configure link the NODE to access to ioapic space NODE 1 */ + domain = node_iohub_to_domain(1, link); + rt_ioapic.E2K_RT_IOAPIC_bgn = domain; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_ioapic1_reg, + rt_ioapic.E2K_RT_IOAPIC_reg); + DebugIORT("NODE #%d IO link #%d: router to IO-APIC node #1 set " + "from 0x%X\n", + node, link, domain); + pcim_bgn = PCI_MEM_DOMAIN_START(domain); + pcim_end = PCI_MEM_DOMAIN_END(domain); + rt_pcim.E2K_RT_PCIM_bgn = (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIM; + rt_pcim.E2K_RT_PCIM_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIM; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pcim1_reg, + rt_pcim.E2K_RT_PCIM_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-MM node #1 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + pcim_bgn = PCI_IO_DOMAIN_START(domain); + pcim_end = PCI_IO_DOMAIN_END(domain); + rt_pciio.E2K_RT_PCIIO_bgn = + (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIIO; + rt_pciio.E2K_RT_PCIIO_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIIO; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pciio1_reg, + rt_pciio.E2K_RT_PCIIO_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-IO node #1 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + } + if (node != 2 && (phys_node_pres_map & 0x4)) { // node #2 is present + /* configure link the NODE to access to ioapic space NODE 2 */ + domain = node_iohub_to_domain(2, link); + rt_ioapic.E2K_RT_IOAPIC_bgn = domain; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_ioapic2_reg, + rt_ioapic.E2K_RT_IOAPIC_reg); + DebugIORT("NODE #%d IO link #%d: router to IO-APIC node #2 set " + "from 0x%X\n", + node, link, domain); + pcim_bgn = PCI_MEM_DOMAIN_START(domain); + pcim_end = PCI_MEM_DOMAIN_END(domain); + rt_pcim.E2K_RT_PCIM_bgn = (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIM; + rt_pcim.E2K_RT_PCIM_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIM; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pcim2_reg, + rt_pcim.E2K_RT_PCIM_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-MM node #2 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + pcim_bgn = PCI_IO_DOMAIN_START(domain); + pcim_end = PCI_IO_DOMAIN_END(domain); + rt_pciio.E2K_RT_PCIIO_bgn = + (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIIO; + rt_pciio.E2K_RT_PCIIO_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIIO; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pciio2_reg, + rt_pciio.E2K_RT_PCIIO_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-IO node #2 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + } + if (node != 3 && (phys_node_pres_map & 0x8)) { // node #3 is present + /* configure link the NODE to access to ioapic space NODE 3 */ + domain = node_iohub_to_domain(3, link); + rt_ioapic.E2K_RT_IOAPIC_bgn = domain; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_ioapic3_reg, + rt_ioapic.E2K_RT_IOAPIC_reg); + DebugIORT("NODE #%d IO link #%d: router to IO-APIC node #3 set " + "from 0x%X\n", + node, link, domain); + pcim_bgn = PCI_MEM_DOMAIN_START(domain); + pcim_end = PCI_MEM_DOMAIN_END(domain); + rt_pcim.E2K_RT_PCIM_bgn = (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIM; + rt_pcim.E2K_RT_PCIM_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIM; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pcim3_reg, + rt_pcim.E2K_RT_PCIM_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-MM node #3 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + pcim_bgn = PCI_IO_DOMAIN_START(domain); + pcim_end = PCI_IO_DOMAIN_END(domain); + rt_pciio.E2K_RT_PCIIO_bgn = + (pcim_bgn) >> E2K_SIC_ALIGN_RT_PCIIO; + rt_pciio.E2K_RT_PCIIO_end = + (pcim_end - 1) >> E2K_SIC_ALIGN_RT_PCIIO; + early_sic_write_node_iolink_nbsr_reg(node, link, rt_pciio3_reg, + rt_pciio.E2K_RT_PCIIO_reg); + DebugIORT("NODE #%d IO link #%d: router to PCI-IO node #3 set " + "from 0x%X\n", + node, link, pcim_bgn, pcim_end); + } +} + +static void configure_io_routing(void) +{ + int node; + int link; + + for (node = 0; node < MAX_NUMNODES; node ++) { + if (!(phys_node_pres_map & (1 << node))) + continue; + for_each_iolink_of_node(link) { + configure_node_io_routing(node, link); + } + } +} +#elif defined(CONFIG_E2K_LEGACY_SIC) +#define configure_io_routing() +#endif /* CONFIG_E2K_FULL_SIC */ +#endif /* CONFIG_E2K_SIC */ + +#ifdef CONFIG_E2K_SIC +#ifdef CONFIG_E2K_FULL_SIC +#ifdef CONFIG_SMP +static int startup_all_cores(e2k_rt_lcfg_struct_t rt_lcfg, int max_cores_num, + bool bsp) +{ + int i = 0, core; + + for (core = 0; core < max_cores_num; core++) { + if (core == 0 && bsp) + /* if core # 0 is BSP then already started */ + continue; + i += e2k_startup_core(rt_lcfg, core); + } + return i; +} +#endif /* CONFIG_SMP */ + +static void configure_node_io_link(int node) +{ + e2k_rt_lcfg_struct_t rt_lcfg; + int rt_lcfg0_reg; + int rt_lcfg1_reg; + int rt_lcfg2_reg; + int rt_lcfg3_reg; + int iolink_on; + int link; + int domain; + + if (node == 0) { + rt_lcfg0_reg = SIC_rt_lcfg0; + rt_lcfg1_reg = SIC_rt_lcfg1; + rt_lcfg2_reg = SIC_rt_lcfg2; + rt_lcfg3_reg = SIC_rt_lcfg3; + } else if (node == 1) { + rt_lcfg0_reg = SIC_rt_lcfg3; + rt_lcfg1_reg = SIC_rt_lcfg0; + rt_lcfg2_reg = SIC_rt_lcfg1; + rt_lcfg3_reg = SIC_rt_lcfg2; + } else if (node == 2) { + rt_lcfg0_reg = SIC_rt_lcfg2; + rt_lcfg1_reg = SIC_rt_lcfg3; + rt_lcfg2_reg = SIC_rt_lcfg0; + rt_lcfg3_reg = SIC_rt_lcfg1; + } else if (node == 3) { + rt_lcfg0_reg = SIC_rt_lcfg1; + rt_lcfg1_reg = SIC_rt_lcfg2; + rt_lcfg2_reg = SIC_rt_lcfg3; + rt_lcfg3_reg = SIC_rt_lcfg0; + } else { + rom_printk("configure_node_io_link() invalid node #%d\n", + node); + return; + } + + /* configure own link cfg of the NODE to access to own io link */ + E2K_RT_LCFG_reg(rt_lcfg) = early_sic_read_node_nbsr_reg(node, + SIC_rt_lcfg0); + iolink_on = 0; + for_each_iolink_of_node(link) { + domain = node_iohub_to_domain(node, link); + if ((online_iohubs_map & (1 << domain)) || + (online_rdmas_map & (1 << domain))) + iolink_on |= 1; + } + E2K_RT_LCFG_vio(rt_lcfg) = iolink_on; + early_sic_write_node_nbsr_reg(node, SIC_rt_lcfg0, + E2K_RT_LCFG_reg(rt_lcfg)); + + if (node != 0 && (phys_node_pres_map & 0x1)) { // node #0 is present + /* configure link cfg the NODE to access to io link of NODE 0 */ + E2K_RT_LCFG_reg(rt_lcfg) = early_sic_read_node_nbsr_reg(node, + rt_lcfg0_reg); + iolink_on = 0; + for_each_iolink_of_node(link) { + domain = node_iohub_to_domain(0, link); + if ((online_iohubs_map & (1 << domain)) || + (online_rdmas_map & (1 << domain))) + iolink_on |= 1; + } + E2K_RT_LCFG_vio(rt_lcfg) = iolink_on; + early_sic_write_node_nbsr_reg(node, rt_lcfg0_reg, + E2K_RT_LCFG_reg(rt_lcfg)); + } + if (node != 1 && (phys_node_pres_map & 0x2)) { // node #1 is present + /* configure link cfg the NODE to access to io link of NODE 1 */ + E2K_RT_LCFG_reg(rt_lcfg) = early_sic_read_node_nbsr_reg(node, + rt_lcfg1_reg); + iolink_on = 0; + for_each_iolink_of_node(link) { + domain = node_iohub_to_domain(1, link); + if ((online_iohubs_map & (1 << domain)) || + (online_rdmas_map & (1 << domain))) + iolink_on |= 1; + } + E2K_RT_LCFG_vio(rt_lcfg) = iolink_on; + early_sic_write_node_nbsr_reg(node, rt_lcfg1_reg, + E2K_RT_LCFG_reg(rt_lcfg)); + } + if (node != 2 && (phys_node_pres_map & 0x4)) { // node #2 is present + /* configure link cfg the NODE to access to io link of NODE 2 */ + E2K_RT_LCFG_reg(rt_lcfg) = early_sic_read_node_nbsr_reg(node, + rt_lcfg2_reg); + iolink_on = 0; + for_each_iolink_of_node(link) { + domain = node_iohub_to_domain(2, link); + if ((online_iohubs_map & (1 << domain)) || + (online_rdmas_map & (1 << domain))) + iolink_on |= 1; + } + E2K_RT_LCFG_vio(rt_lcfg) = iolink_on; + early_sic_write_node_nbsr_reg(node, rt_lcfg2_reg, + E2K_RT_LCFG_reg(rt_lcfg)); + } + if (node != 3 && (phys_node_pres_map & 0x8)) { // node #3 is present + /* configure link cfg the NODE to access to io link of NODE 3 */ + E2K_RT_LCFG_reg(rt_lcfg) = early_sic_read_node_nbsr_reg(node, + rt_lcfg3_reg); + iolink_on = 0; + for_each_iolink_of_node(link) { + domain = node_iohub_to_domain(3, link); + if ((online_iohubs_map & (1 << domain)) || + (online_rdmas_map & (1 << domain))) + iolink_on |= 1; + } + E2K_RT_LCFG_vio(rt_lcfg) = iolink_on; + early_sic_write_node_nbsr_reg(node, rt_lcfg3_reg, + E2K_RT_LCFG_reg(rt_lcfg)); + } +} + +static void configure_io_links(void) +{ + int node; + + for (node = 0; node < MAX_NUMNODES; node ++) { + if (!(phys_node_pres_map & (1 << node))) + continue; + configure_node_io_link(node); + } +} + +#ifdef CONFIG_EIOH +static void scan_iolink_config(int node, int link) +{ + rom_printk("%s() is not implemented for EIOHub\n", __func__); +} +#else /* ! CONFIG_EIOH */ +static void scan_iolink_config(int node, int link) +{ + e2k_iol_csr_struct_t io_link; + e2k_io_csr_struct_t io_hub; + e2k_rdma_cs_struct_t rdma; + int src_mode, dst_mode; + int ab_type; + int link_on; + + link_on = 0; + + io_link.E2K_IOL_CSR_reg = + early_sic_read_node_iolink_nbsr_reg(node, link, SIC_iol_csr); + src_mode = io_link.E2K_IOL_CSR_mode; + rom_printk("Node #%d IO LINK #%d is", node, link); + if (io_link.E2K_IOL_CSR_mode == IOHUB_IOL_MODE) { + io_hub.E2K_IO_CSR_reg = + early_sic_read_node_iolink_nbsr_reg(node, link, + SIC_io_csr); + if (io_hub.E2K_IO_CSR_ch_on) + link_on = 1; + } else { + rdma.E2K_RDMA_CS_reg = + early_sic_read_node_iolink_nbsr_reg(node, link, + SIC_rdma_cs); + if (rdma.E2K_RDMA_CS_ch_on) + link_on = 1; + } + if (!link_on) { + if (src_mode == IOHUB_IOL_MODE) { + possible_iohubs_map |= + (1 << node_iohub_to_domain(node, link)); + possible_iohubs_num ++; + rom_printk(" IOHUB controller"); + } else { + possible_rdmas_map |= + (1 << node_iohub_to_domain(node, link)); + possible_rdmas_num ++; + rom_printk(" RDMA controller"); + } + rom_printk(" OFF\n"); + return; + } + + ab_type = io_link.E2K_IOL_CSR_abtype; + switch (ab_type) { + case IOHUB_ONLY_IOL_ABTYPE: + rom_printk(" IO HUB controller ON connected to IOHUB"); + dst_mode = IOHUB_IOL_MODE; + break; + case RDMA_ONLY_IOL_ABTYPE: + rom_printk(" RDMA controller ON connected to RDMA"); + dst_mode = RDMA_IOL_MODE; + break; + case RDMA_IOHUB_IOL_ABTYPE: + rom_printk(" RDMA controller ON connected to IOHUB/RDMA"); + dst_mode = RDMA_IOL_MODE; + break; + default: + rom_printk(" %s controller ON connected to unknown controller", + (src_mode == IOHUB_IOL_MODE) ? "IO HUB" : "RDMA"); + dst_mode = src_mode; + break; + } + + if (src_mode != dst_mode) { + io_link.E2K_IOL_CSR_mode = dst_mode; + early_sic_write_node_iolink_nbsr_reg(node, link, SIC_iol_csr, + io_link.E2K_IOL_CSR_reg); + } + if (dst_mode == IOHUB_IOL_MODE) { + online_iohubs_map |= (1 << node_iohub_to_domain(node, link)); + online_iohubs_num ++; + } else { + online_rdmas_map |= (1 << node_iohub_to_domain(node, link)); + online_rdmas_num ++; + } + rom_printk("\n"); +} +#endif /* CONFIG_EIOH */ + +#ifdef CONFIG_EIOH +static void set_embeded_iohub(int node, int link) +{ + possible_iohubs_map |= (1 << node_iohub_to_domain(node, link)); + possible_iohubs_num++; + + online_iohubs_map |= (1 << node_iohub_to_domain(node, link)); + online_iohubs_num++; + + rom_printk("Node #%d embeded EIOHub controller #%d is ON\n", + node, link); +} +#else /* ! CONFIG_EIOH */ +static void set_embeded_iohub(int node, int link) +{ + /* cannot be embeded IOHub */ +} +#endif /* CONFIG_EIOH */ + +static void scan_iohubs(void) +{ + int node; + int link; + + for (node = 0; node < MAX_NUMNODES; node++) { + if (!(phys_node_pres_map & (1 << node))) + continue; + set_embeded_iohub(node, 0); + for_each_iolink_of_node(link) { + scan_iolink_config(node, link); + } + } +} +#elif defined(CONFIG_E2K_LEGACY_SIC) +static void scan_iohubs(void) +{ + /* only one IOHUB on root bus #0 */ + + online_iohubs_map = 0x1; + online_iohubs_num = 1; +} +#define configure_io_links() +#endif /* CONFIG_E2K_FULL_SIC */ +#endif /* CONFIG_E2K_SIC */ + +#ifdef CONFIG_E2C3 +static void enable_embedded_devices(void) +{ + int node; + unsigned int reg = 0xfc000000; /* Enable bits [31:26] */ + + for (node = 0; node < MAX_NUMNODES; node++) { + if (!(phys_node_pres_map & (1 << node))) + continue; + early_sic_write_node_nbsr_reg(node, SIC_rt_pcicfged, reg); + } +} +#endif +#ifdef CONFIG_EIOH +static void setup_rt_msi(void) +{ + unsigned long rt_msi = PCI_MEM_END + 1; /* 0xf8000000 */ + unsigned long rt_msi_lo = rt_msi & 0xffffffff; + unsigned long rt_msi_hi = rt_msi >> 32; + int node; + + for (node = 0; node < MAX_NUMNODES; node++) { + if (!(phys_node_pres_map & (1 << node))) + continue; + early_sic_write_node_nbsr_reg(node, SIC_rt_msi, rt_msi_lo); + early_sic_write_node_nbsr_reg(node, SIC_rt_msi_h, rt_msi_hi); + } +} +#endif + +void jump(void) +{ + + bool bootmode = (bool)(unsigned long)&boot_mode; + e2k_addr_t areabase; + e2k_size_t areasize; + + e2k_psp_hi_t psp_hi; + e2k_psp_lo_t psp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_usbr_t usbr; + + int cmd_size; + e2k_addr_t busy_mem_start; + e2k_addr_t busy_mem_end; + bank_info_t *bank_info; +#ifdef CONFIG_E2K_SIC +#if defined(CONFIG_E2K_FULL_SIC) +#ifdef CONFIG_SMP + int max_cpus_num; +#endif /* CONFIG_SMP */ + e2k_rt_pciio_struct_t rt_pciio; + e2k_rt_pcim_struct_t rt_pcim; + e2k_rt_ioapic_struct_t rt_ioapic; + /* Configure PCIIO for BSP. The only BSP has access to PCIIO, and other cpus through BSP */ + /* so we leave rt_pciio 1,2,3 closed by default */ + AS_WORD(rt_pciio) = NATIVE_GET_SICREG(rt_pciio0, E2K_MAX_CL_NUM, 0); + AS_STRUCT(rt_pciio).bgn = 0x0; + AS_STRUCT(rt_pciio).end = 0xf; /* All the memory for bsp + * 0x01_0100_0 000 - 0x01_0100_F FFF + * Align = 4Kb (0x1000) */ + NATIVE_SET_SICREG(rt_pciio0, AS_WORD(rt_pciio), E2K_MAX_CL_NUM, 0); +#ifdef CONFIG_BIOS + bios_first(); +#endif + + /* Configure IOAPIC for BSP. The only BSP has access to IOAPIC, and other cpus through BSP */ + /* so we leave rt_ioapic 1,2,3 closed by default */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic0, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic0 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x0; /* 0x00_fec0_0000-0x00_fec0_0fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic0, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#if defined(CONFIG_ES2) + /* Configure second IOAPIC for BSP */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic10, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic10 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x4; /* 0x00_fec0_4000-0x00_fec0_4fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic10, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#endif /* CONFIG_ES2 */ + + /* Configure IOAPIC link for NODE 1 FIXME: may be used in future */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic1, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic1 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x1; /* 0x00_fec0_1000-0x00_fec0_1fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic1, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#if defined(CONFIG_ES2) + /* Configure second IOAPIC for NODE 1 */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic11, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic11 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x5; /* 0x00_fec0_5000-0x00_fec0_5fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic11, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#endif /* CONFIG_ES2 */ + + /* Configure IOAPIC link for NODE 2 FIXME: may be used in future */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic2, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic2 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x2; /* 0x00_fec0_2000-0x00_fec0_2fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic2, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#if defined(CONFIG_ES2) + /* Configure second IOAPIC for NODE 2 */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic12, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic12 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x6; /* 0x00_fec0_6000-0x00_fec0_6fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic12, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#endif /* CONFIG_ES2 */ + + /* Configure IOAPIC link for NODE 3 FIXME: may be used in future */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic3, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic3 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x3; /* 0x00_fec0_3000-0x00_fec0_3fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic3, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#if defined(CONFIG_ES2) + /* Configure second IOAPIC for NODE 3 */ + AS_WORD(rt_ioapic) = NATIVE_GET_SICREG(rt_ioapic13, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_ioapic13 = 0x%x\n", AS_WORD(rt_ioapic)); + AS_STRUCT(rt_ioapic).bgn = 0x7; /* 0x00_fec0_7000-0x00_fec0_7fff Align = 4k + * end[20:12] = bgn[20:12] + * end[11:0] = 0xfff */ + NATIVE_SET_SICREG(rt_ioapic13, AS_WORD(rt_ioapic), E2K_MAX_CL_NUM, 0); +#endif /* CONFIG_ES2 */ + + /* Configure PCIM for BSP. The only BSP has access to PCIM, and other cpus through BSP */ + /* so we leave rt_pcim 1,2,3 closed by default */ + AS_WORD(rt_pcim) = NATIVE_GET_SICREG(rt_pcim0, E2K_MAX_CL_NUM, 0); + DebugRT("jump: rt_pcim0 = 0x%x\n", AS_WORD(rt_pcim)); + AS_STRUCT(rt_pcim).bgn = 0x10; /* 2 Gb start of PCI memory */ + AS_STRUCT(rt_pcim).end = 0x1e; /* All other memory fo bsp + * 0x00_10 00_0000 - 0x00_f7 ff_ffff (0xf0 00_0000 + 0x7 ff_ffff); + * Align = 128Mb (0x8000000) + * BUG: 0x1f= 0x00_ff ff_ffff intersects with LAPIC area but + * available. Due to specification the end can be 0x00_FEBF_FFFF but + * it's ipmossible to reach */ + NATIVE_SET_SICREG(rt_pcim0, AS_WORD(rt_pcim), E2K_MAX_CL_NUM, 0); +#elif defined(CONFIG_E2K_LEGACY_SIC) +#ifdef CONFIG_BIOS + bios_first(); +#endif +#endif /* CONFIG_E2K_FULL_SIC */ +#endif /* CONFIG_E2K_SIC */ + +#ifdef CONFIG_E2K_SIC + configure_routing_regs(); + configure_io_routing(); +#endif /* CONFIG_E2K_SIC */ + +#ifdef CONFIG_SMP + all_pic_ids[0] = NATIVE_READ_PIC_ID(); +#ifdef CONFIG_E2K_SIC +#if defined(CONFIG_E2K_LEGACY_SIC) +#ifdef CONFIG_E1CP + atomic_set(&cpu_count, 1); /*only BSP CPU is enable */ +#endif /* CONFIG_E1CP */ +#elif defined(CONFIG_E2K_FULL_SIC) +/* Determine the total number of CPUs */ + atomic_set(&cpu_count, 0); /* start application CPUs to determine + own # and total CPU number */ +#if defined(CONFIG_E1CP) + max_cpus_num = E1CP_NR_NODE_CPUS; +#elif defined(CONFIG_ES2) || defined(CONFIG_E2C3) + max_cpus_num = ES2_NR_NODE_CPUS; +#elif defined(CONFIG_E2S) + max_cpus_num = E2S_NR_NODE_CPUS; +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) + max_cpus_num = E8C_NR_NODE_CPUS; +#elif defined(CONFIG_E12C) + max_cpus_num = E12C_NR_NODE_CPUS; +#elif defined(CONFIG_E16C) + max_cpus_num = E16C_NR_NODE_CPUS; +#else + #error "Unknown MicroProcessor type" +#endif + for (;;) + { + e2k_rt_lcfg_struct_t rt_lcfg; + int i = 0; + + if (max_cpus_num > 1) { + E2K_RT_LCFG_reg(rt_lcfg) = /* Read on BSP */ + NATIVE_GET_SICREG(rt_lcfg0, 0, 0); + i += startup_all_cores(rt_lcfg, max_cpus_num, + true /* BSP */); + } + + E2K_RT_LCFG_reg(rt_lcfg) = + NATIVE_GET_SICREG(rt_lcfg1, 0, 0); /* Read on BSP */ + if (E2K_RT_LCFG_vp(rt_lcfg) == 1) { + i += startup_all_cores(rt_lcfg, max_cpus_num, + false /* BSP ? */); + } + + E2K_RT_LCFG_reg(rt_lcfg) = + NATIVE_GET_SICREG(rt_lcfg2, 0, 0); /* Read on BSP */ + if (E2K_RT_LCFG_vp(rt_lcfg) == 1) { + i += startup_all_cores(rt_lcfg, max_cpus_num, + false /* BSP ? */); + } + E2K_RT_LCFG_reg(rt_lcfg) = + NATIVE_GET_SICREG(rt_lcfg3, 0, 0); /* Read on BSP */ + if (E2K_RT_LCFG_vp(rt_lcfg) == 1) { + i += startup_all_cores(rt_lcfg, max_cpus_num, + false /* BSP ? */); + } + if (max_cpus_num > 1) + atomic_inc(&cpu_count); /* acoount BSP core */ + i = atomic_read(&cpu_count); + rom_printk("Detected %d CPUS\n", i); + break; + } +#endif /* CONFIG_E2K_LEGACY_SIC */ +#endif /* CONFIG_E2K_SIC */ +#endif /* CONFIG_SMP */ + + /* Boot info goes under loader's C-stack and below kernel code. */ + bootblock = (bootblock_struct_t *) + _PAGE_ALIGN_DOWN((e2k_addr_t)free_memory_p, + E2K_BOOTINFO_PAGE_SIZE); + free_memory_p = (char *)((e2k_addr_t)bootblock + + sizeof(bootblock_struct_t)); + boot_info = &bootblock->info; + rom_printk("Boot info structure at 0x%X\n", boot_info); + bios_info = &boot_info->bios; + rom_printk("BIOS info structure at 0x%X\n", bios_info); + +#ifdef CONFIG_RECOVERY + if (boot_info->signature == ROMLOADER_SIGNATURE) { + recovery_flag = bootblock->boot_flags & RECOVERY_BB_FLAG; + not_read_image = bootblock->boot_flags & NO_READ_IMAGE_BB_FLAG; + + if (recovery_flag) { + rom_puts("ROM loader restarted to recover " + "kernel\n"); + } else { + rom_puts("ROM loader restarted to boot kernel.\n"); + } + } else { +#endif /* CONFIG_RECOVERY */ + + rom_printk("Kernel ROM loader's initialization started.\n"); +#ifdef CONFIG_RECOVERY + } +#endif /* CONFIG_RECOVERY */ + + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + rom_printk("DATA at: 0x%X,",(u64)_data); + rom_printk(" size: 0x%X.\n", ((u64)_edata - (u64)_data)); + + rom_printk("BSS at: 0x%X,",(u64)__bss_start); + rom_printk(" size: 0x%X.\n", ((u64)__bss_stop - + (u64)__bss_start)); + + psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + + rom_printk("Proc. Stack (PSP) at: 0x%X,", + AS_STRUCT(psp_lo).base); + rom_printk(" size: 0x%X,", AS_STRUCT(psp_hi).size); + rom_printk(" direction: %s.\n", "upward"); + + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + + rom_printk("Proc. Chain Stack (PCSP) at: 0x%X,", + AS_STRUCT(pcsp_lo).base); + rom_printk(" size: 0x%X,", AS_STRUCT(pcsp_hi).size); + rom_printk(" direction: %s.\n", "upward"); + usbr.USBR_reg = NATIVE_NV_READ_USBR_REG_VALUE(); + rom_printk("GNU C Stack at: 0x%X,", usbr.USBR_base); + rom_printk(" size: 0x%X, ", E2K_BOOT_KERNEL_US_SIZE); + rom_printk(" direction: %s.\n", "downward"); + rom_printk("BOOTINFO structure is starting at: 0x%X, size 0x%X\n", + (u64) bootblock, sizeof(bootblock_struct_t)); +#ifdef CONFIG_RECOVERY + } +#endif /* CONFIG_RECOVERY */ + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + + +#ifdef CONFIG_CMDLINE_PROMPT + kernel_command_prompt(cmd_line, cmd_preset); +#else + cmd_size = bios_strlen(cmd_preset); + if (cmd_size > sizeof(cmd_buf)) { + rom_printk("Kernel command line size is too big " + "size %d > %d (buffer size)\n", + cmd_size, sizeof(cmd_buf)); + E2K_LMS_HALT_OK; + } + memcpy(cmd_line, cmd_preset, bios_strlen(cmd_preset)); +#endif /* CONFIG_CMDLINE_PROMPT */ + + +#ifdef CONFIG_RECOVERY + } +#endif /* CONFIG_RECOVERY */ + +#ifdef CONFIG_SMP + smp_start_cpus(); +#else + +#ifdef CONFIG_L_LOCAL_APIC + setup_local_pic(0); +#endif /* CONFIG_L_LOCAL_APIC */ + +#endif /* CONFIG_SMP */ + + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + memset(boot_info, 0, sizeof(*boot_info)); + memset(bios_info, 0, sizeof(*bios_info)); + + /* Creation of boot info records. */ + boot_info->signature = ROMLOADER_SIGNATURE; /* ROMLoader */ + /* marker */ + boot_info->vga_mode = 0; + + /* our loader used only on simulator */ + boot_info->mach_flags = SIMULATOR_MACH_FLAG; + + probe_memory(boot_info, 0, 0, 0); +#ifdef CONFIG_E2K_SIC + set_memory_filters(boot_info); +#endif /* CONFIG_E2K_SIC */ + + /* + * The kernel command line. + * read Linux documentation for cmdline syntax. + */ + cmd_size = bios_strlen(cmd_line) + 1; + if (cmd_size <= KSTRMAX_SIZE) { + memcpy(boot_info->kernel_args_string, cmd_line, + cmd_size); + } else if (cmd_size <= KSTRMAX_SIZE_EX) { + memcpy(boot_info->bios.kernel_args_string_ex, cmd_line, + cmd_size); + memcpy(boot_info->kernel_args_string, + KERNEL_ARGS_STRING_EX_SIGNATURE, + KERNEL_ARGS_STRING_EX_SIGN_SIZE); + } else { + /* command line too big */ + rom_printk("Kernel command line too big, " + "size %d > %d\n", + cmd_size, KSTRMAX_SIZE_EX); + E2K_LMS_HALT_OK; + } + rom_printk("Kernel command line: %s\n", + boot_info->kernel_args_string); + + /* Creation of bios info records. */ + memcpy(bios_info->signature, BIOS_INFO_SIGNATURE, + (int)bios_strlen(BIOS_INFO_SIGNATURE) + 1); + memcpy(bios_info->boot_ver, BOOT_VER_STR, + (int)bios_strlen(BOOT_VER_STR) + 1); + if (NATIVE_IS_MACHINE_ES2 || NATIVE_IS_MACHINE_ES2_DSP) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_ES2_DSP; + } else if (NATIVE_IS_MACHINE_ES2_RU) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_ES2_RU; + } else if (NATIVE_IS_MACHINE_E2S) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_E2S; + } else if (NATIVE_IS_MACHINE_E8C) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_E8C; + } else if (NATIVE_IS_MACHINE_E8C2) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_E8C2; + } else if (NATIVE_IS_MACHINE_E1CP) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_E1CP; + } else if (NATIVE_IS_MACHINE_E12C) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_E12C; + } else if (NATIVE_IS_MACHINE_E16C) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_E16C; + } else if (NATIVE_IS_MACHINE_E2C3) { + bios_info->chipset_type = CHIPSET_TYPE_IOHUB; + bios_info->cpu_type = CPU_TYPE_E2C3; + } + rom_printk("CPU & MicroProcessor: %s\n", + GET_CPU_TYPE_NAME(bios_info->cpu_type)); + +#ifdef CONFIG_RECOVERY + } +#endif /* CONFIG_RECOVERY */ + + boot_info->num_of_busy = 0; + + /* + * Memory assumptions: node #0 & bank #0 exist and starts from 0 + * If memory banks > 1 we use bank #1 on the node #0 + */ + busy_mem_end = PAGE_ALIGN_DOWN((e2k_addr_t)free_memory_p); + add_busy_memory_area(boot_info, (e2k_addr_t)_data, busy_mem_end); + + bank_info = &boot_info->nodes_mem[0].banks[1]; + if (bank_info->size == 0) + /* only one bank of memory detected on the node #0 */ + bank_info = &boot_info->nodes_mem[0].banks[0]; + if (busy_mem_end >= bank_info->address && + busy_mem_end < (bank_info->address + bank_info->size)) { + areabase = busy_mem_end; + areasize = bank_info->size - + (busy_mem_end - bank_info->address); + } else { + /* should panic indeed */ ; + areabase = bank_info->address; + areasize = bank_info->size; + } + busy_mem_start = areabase; + + bios_mem_init(areabase, areasize); + + scan_iohubs(); + configure_io_links(); + +#ifdef CONFIG_E2C3 + enable_embedded_devices(); +#endif +#ifdef CONFIG_EIOH + setup_rt_msi(); +#endif + +#ifdef CONFIG_BIOS +#ifdef CONFIG_ENABLE_ELBRUS_PCIBIOS + pci_bios(); +#endif +#endif + +#ifdef CONFIG_BIOS + bios_rest(); +#endif + +#ifdef CONFIG_BIOS +#if defined(CONFIG_E2K_LEGACY_SIC) + video_bios(); +#endif /* CONFIG_E2K_LEGACY_SIC */ +#endif + +#ifdef CONFIG_BLK_DEV_INITRD + + /* + * INITRD - initial ramdisk + */ + + areasize = (e2k_addr_t)&initrd_data_end - (e2k_addr_t)&initrd_data; + areabase = (long) malloc_aligned(areasize, E2K_INITRD_PAGE_SIZE); + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + rom_puts("Copying initial ramdisk from ROM to RAM ... "); + + memcpy((void *)areabase, (void *)&initrd_data, (int)areasize); + + rom_puts("done.\n"); + + boot_info->ramdisk_base = areabase; + boot_info->ramdisk_size = areasize; + + rom_printk("Initial ramdisk relocated at: 0x%X, " + "size: 0x%X.\n", areabase, areasize); +#ifdef CONFIG_RECOVERY + } +#endif /* CONFIG_RECOVERY */ +#else /* ! CONFIG_BLK_DEV_INITRD */ +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + boot_info->ramdisk_base = 0; + boot_info->ramdisk_size = 0; +#ifdef CONFIG_RECOVERY + } +#endif /* CONFIG_RECOVERY */ + +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + create_smp_config(boot_info); +#ifdef CONFIG_RECOVERY + } else { + recover_smp_config(boot_info); + } +#endif /* CONFIG_RECOVERY */ + + busy_mem_end = PAGE_ALIGN_DOWN(get_busy_memory_end()); + add_busy_memory_area(boot_info, busy_mem_start, busy_mem_end); + + if (bootmode) { + areasize = (e2k_addr_t)&input_data_noncomp_size; + rom_printk("Kernel will be loaded from 'romimage' file " + "by simulator, size %d\n", areasize); + } else { + areasize = (e2k_addr_t)&input_data_end - + (e2k_addr_t)&input_data; + rom_printk("Kernel was loaded from ROM\n"); + } + + if (bootmode || areasize == (e2k_addr_t)&input_data_noncomp_size) { + + if (!bootmode) + rom_printk("Non-compressed kernel found. Size: %d\n", + areasize); + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + rom_puts("Allocating space for kernel copy... "); + areabase = (long) malloc_aligned(areasize, + E2K_MAX_PAGE_SIZE); + rom_puts("done.\n"); + if (bootmode) { + bios_outll(areabase, LMS_RAM_ADDR_PORT); + } +#ifdef CONFIG_RECOVERY + } else { + areabase = boot_info->kernel_base; + rom_printk("Kernel was loaded to 0x%X, size of " + "0x%X\n", areabase, areasize); + } +#endif /* CONFIG_RECOVERY */ + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + + if (bootmode) { + rom_puts("Loading the kernel from 'romimage file " + "to RAM ... "); + bios_outb(LMS_LOAD_IMAGE_TO_RAM, LMS_TRACE_CNTL_PORT); + rom_puts(" done.\n"); + } else { + rom_puts("Copying the kernel from ROM to RAM ... "); + memcpy((void *)areabase, (void *) &input_data, + (int) areasize); + rom_puts(" done.\n"); + } + +#ifdef CONFIG_RECOVERY + } +#endif /* CONFIG_RECOVERY */ + } else { + + rom_printk("Compressed kernel found. Size: %d\n", areasize); + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + rom_printk("Allocating %d bytes for kernel decompression... ", + &input_data_noncomp_size); + areabase = (long) malloc_aligned( + (e2k_addr_t)&input_data_noncomp_size, + E2K_MAX_PAGE_SIZE); + rom_puts("done.\n"); +#ifdef CONFIG_RECOVERY + } else { + areabase = boot_info->kernel_base; + rom_printk("Kernel was decompressed to 0x%X, size of " + "0x%X\n", areabase, areasize); + areasize = boot_info->kernel_size; + } +#endif /* CONFIG_RECOVERY */ + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + rom_printk("Uncompressing Linux at 0x%X, size 0x%X...", + areabase, (e2k_addr_t)&input_data_noncomp_size); + areasize = decompress_kernel(areabase); + rom_puts(" done.\n"); +#ifdef CONFIG_RECOVERY + } else { + rom_printk("Uncompressed Linux at 0x%X, size 0x%X\n", + areabase, areasize); + } +#endif /* CONFIG_RECOVERY */ + } + + kernel_areabase = areabase; + kernel_areasize = areasize; + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + boot_info->kernel_base = kernel_areabase; + boot_info->kernel_size = kernel_areasize; + + rom_printk("Kernel relocated at: 0x%X,", kernel_areabase); + rom_printk(" size: 0x%X.\n", kernel_areasize); +#ifdef CONFIG_RECOVERY + } else { + if (boot_info->kernel_base != kernel_areabase) { + rom_puts("ERROR: Invalid kernel base address to " + "recover the system.\n"); + rom_printk("Kernel base address from 'recovery_info' " + "0x%X != 0x%X (current kernel allocation)\n", + boot_info->kernel_base, kernel_areabase); + } + if (boot_info->kernel_size != kernel_areasize) { + rom_puts("ERROR: Invalid kernel size to recover " + "the system.\n"); + rom_printk("Kernel size from 'recovery_info' " + "0x%X != 0x%X (current kernel size)\n", + boot_info->kernel_size, kernel_areasize); + } +#ifdef CONFIG_STATE_SAVE + rom_printk("Loading memory from disk...\n"); + load_machine_state_new(boot_info); +#endif /* CONFIG_STATE_SAVE */ + } +#endif /* CONFIG_RECOVERY */ + + set_kernel_image_pointers(); + +#ifdef CONFIG_RECOVERY + if (!recovery_flag) { +#endif /* CONFIG_RECOVERY */ + rom_puts("Jump into the vmlinux startup code using SCALL #12 " + "...\n\n"); +#ifdef CONFIG_RECOVERY + } else { + bootblock->boot_flags &= ~RECOVERY_BB_FLAG; + rom_printk("Jump into the vmlinux startup code using SCALL #12 " + "to start kernel recovery\n\n"); + } +#endif /* CONFIG_RECOVERY */ + +#ifdef CONFIG_SMP + do_smp_commence(); +#endif /* CONFIG_SMP */ + + scall2(bootblock); + + E2K_LMS_HALT_OK; +} + +void +set_kernel_image_pointers(void) +{ + e2k_rwap_lo_struct_t reg_lo; + e2k_rwap_hi_struct_t reg_hi; + + /* + * Set Kernel 'text/data/bss' segment registers to kernel image + * physical addresses + */ + + reg_lo.CUD_lo_base = kernel_areabase; + reg_lo.CUD_lo_c = E2K_CUD_CHECKED_FLAG; + reg_lo._CUD_lo_rw = E2K_CUD_RW_PROTECTIONS; + reg_hi.CUD_hi_size = kernel_areasize; + reg_hi._CUD_hi_curptr = 0; + NATIVE_WRITE_CUD_HI_REG_VALUE(reg_hi.CUD_hi_half); + NATIVE_WRITE_CUD_LO_REG_VALUE(reg_lo.CUD_lo_half); + NATIVE_WRITE_OSCUD_HI_REG_VALUE(reg_hi.OSCUD_hi_half); + NATIVE_WRITE_OSCUD_LO_REG_VALUE(reg_lo.OSCUD_lo_half); + + reg_lo.GD_lo_base = kernel_areabase; + reg_lo._GD_lo_rw = E2K_GD_RW_PROTECTIONS; + reg_hi.GD_hi_size = kernel_areasize; + reg_hi._GD_hi_curptr = 0; + NATIVE_WRITE_GD_HI_REG_VALUE(reg_hi.GD_hi_half); + NATIVE_WRITE_GD_LO_REG_VALUE(reg_lo.GD_lo_half); + NATIVE_WRITE_OSGD_HI_REG_VALUE(reg_hi.OSGD_hi_half); + NATIVE_WRITE_OSGD_LO_REG_VALUE(reg_lo.OSGD_lo_half); + +} + diff --git a/arch/e2k/boot/keyboard.c b/arch/e2k/boot/keyboard.c new file mode 100644 index 000000000000..1741077d1958 --- /dev/null +++ b/arch/e2k/boot/keyboard.c @@ -0,0 +1,249 @@ +/* + * Adopted from Linux/PPC port. + */ + +#include +#include + +#include "defkeymap.c" +#include "boot_io.h" +#include "bios/printk.h" + +unsigned char shfts, ctls, alts, caps; + +#define KBDATAP 0x60 /* kbd data port */ +#define KBSTATUSPORT 0x61 /* kbd status */ +#define KBSTATP 0x64 /* kbd status port */ +#define KBINRDY 0x01 +#define KBOUTRDY 0x02 + +static void kbd_outb(u16 port, u8 byte) +{ + NATIVE_WRITE_MAS_B(PHYS_X86_IO_BASE + port, byte, MAS_IOADDR); +} + +static u8 kbd_inb(u16 port) +{ +// u8 *addr = (u8 *) E2K_X86_IO_BASE; +// return *(addr + port); + return NATIVE_READ_MAS_B(PHYS_X86_IO_BASE + port, MAS_IOADDR); +} + +static void kbd_udelay(int delay) /* Valid for LMS keyboard only */ +{ + int i; + for (i=0; i +#include + +__nodedata machdep_t machine = { 0 }; + +int cpu_to_iset(int cpu) +{ + int iset = ELBRUS_GENERIC_ISET; + + switch (cpu) { + case IDR_ES2_DSP_MDL: + case IDR_ES2_RU_MDL: + iset = ELBRUS_S_ISET; + case IDR_E2S_MDL: + iset = ELBRUS_2S_ISET; + case IDR_E8C_MDL: + iset = ELBRUS_8C_ISET; + case IDR_E1CP_MDL: + iset = ELBRUS_1CP_ISET; + case IDR_E8C2_MDL: + iset = ELBRUS_8C2_ISET; + case IDR_E12C_MDL: + iset = ELBRUS_12C_ISET; + case IDR_E16C_MDL: + iset = ELBRUS_16C_ISET; + case IDR_E2C3_MDL: + iset = ELBRUS_2C3_ISET; + } + + return iset; +} + +int machdep_setup_features(int cpu, int revision) +{ + int iset_ver = cpu_to_iset(cpu); + + if (iset_ver == ELBRUS_GENERIC_ISET) + return 1; + + CPU_FEAT_EPIC_initializer(cpu, revision, iset_ver, cpu, &machine); + CPU_FEAT_ISET_V6_initializer(cpu, revision, iset_ver, cpu, &machine); + + return 0; +} diff --git a/arch/e2k/boot/malloc.c b/arch/e2k/boot/malloc.c new file mode 100644 index 000000000000..aa335e0eaead --- /dev/null +++ b/arch/e2k/boot/malloc.c @@ -0,0 +1,64 @@ + +#include + +#include "boot_io.h" + +static void error(char *x) +{ + rom_puts("\n\n"); + rom_puts(x); + rom_puts("\n\n -- System halted"); + + E2K_LMS_HALT_ERROR(0xdead); /* Halt */ +} + +e2k_addr_t free_mem_ptr; /* zip.c wants it visible */ +e2k_addr_t free_mem_end_ptr; + +void bios_mem_init(long membase, long memsize) +{ + free_mem_ptr = membase; + free_mem_end_ptr = membase + memsize; +} + + +void inline *malloc_aligned(int size, int alignment) +{ + void *p; + int mask; + + if (alignment == 0) alignment = 8; + + mask = alignment - 1; + + if (size <0) error("Malloc error"); + if (free_mem_ptr <= 0) error("Memory error"); + + free_mem_ptr = (free_mem_ptr + mask) & ~mask; /* Align */ + + p = (void *)free_mem_ptr; + free_mem_ptr += size; + + if (free_mem_ptr >= free_mem_end_ptr) + error("Out of memory"); + + return p; +} + +inline void *malloc(int size) +{ + void *p; + + p = malloc_aligned(size, 8); + + return p; +} + +void inline free(void *where) +{ /* Don't care */ +} +e2k_addr_t +get_busy_memory_end(void) +{ + return free_mem_ptr; +} diff --git a/arch/e2k/boot/pic.h b/arch/e2k/boot/pic.h new file mode 100644 index 000000000000..1cc103c3770e --- /dev/null +++ b/arch/e2k/boot/pic.h @@ -0,0 +1,211 @@ +#ifndef __BOOT_PIC_H +#define __BOOT_PIC_H + +/* + * Statically choose between APIC and EPIC basic functions, based on + * CONFIG_BOOT_EPIC (defined in arch/e2k/boot/Makefile) + */ + +#include +#include + +#include "bios/printk.h" +#include +#include "e2k_sic.h" +#include + +/* Boot */ +#ifdef CONFIG_SMP +extern unsigned int all_pic_ids[]; +#endif + +static inline void native_epic_write_w(unsigned int reg, unsigned int v) +{ + NATIVE_WRITE_MAS_W(EPIC_DEFAULT_PHYS_BASE + reg, v, MAS_IOADDR); +} + +static inline unsigned int native_epic_read_w(unsigned int reg) +{ + return NATIVE_READ_MAS_W(EPIC_DEFAULT_PHYS_BASE + reg, MAS_IOADDR); +} + +static inline void native_epic_write_d(unsigned int reg, unsigned long v) +{ + NATIVE_WRITE_MAS_D(EPIC_DEFAULT_PHYS_BASE + reg, v, MAS_IOADDR); +} + +static inline unsigned long native_epic_read_d(unsigned int reg) +{ + return NATIVE_READ_MAS_D(EPIC_DEFAULT_PHYS_BASE + reg, MAS_IOADDR); +} + +static inline void native_apic_write(unsigned int reg, unsigned int v) +{ + NATIVE_WRITE_MAS_W(APIC_DEFAULT_PHYS_BASE + reg, v, MAS_IOADDR); +} + +static inline unsigned int native_apic_read(unsigned int reg) +{ + return NATIVE_READ_MAS_W(APIC_DEFAULT_PHYS_BASE + reg, MAS_IOADDR); +} + +#ifdef CONFIG_BOOT_EPIC + +#define PIC_DEFAULT_PHYS_BASE EPIC_DEFAULT_PHYS_BASE +#define IO_PIC_DEFAULT_PHYS_BASE IO_EPIC_DEFAULT_PHYS_BASE + +extern void debug_epic_startup(int cpu, unsigned int value, unsigned long addr); +static inline void debug_pic_startup(int cpu, unsigned int value, + unsigned long addr) +{ + debug_epic_startup(cpu, value, addr); +} + +extern void boot_setup_cepic(int cpu); +static inline void setup_local_pic(int cpu) +{ + boot_setup_cepic(cpu); +} + +extern void boot_print_cepic(void); +static inline void print_local_pic(void) +{ + boot_print_cepic(); +} + +#define NATIVE_READ_PIC_ID() native_read_epic_id() +static inline unsigned int native_read_epic_id(void) +{ + return cepic_id_full_to_short(native_epic_read_w(CEPIC_ID)); +} + +/* No need for EOI at boot-time */ +#define native_pic_write_eoi() do {} while (0) + +static inline unsigned int native_pic_read_esr(void) +{ + return native_epic_read_w(CEPIC_ESR) & CEPIC_ESR_BIT_MASK; +} +static inline unsigned int native_pic_read_icr_busy(void) +{ + union cepic_icr reg; + + reg.raw = (unsigned long)native_epic_read_w(CEPIC_ICR); + return reg.bits.stat; +} + +static inline void native_pic_reset_esr(void) +{ + native_epic_write_w(CEPIC_ESR, 0); +} + +static inline void native_pic_send_startup(int picid, unsigned long addr) +{ + union cepic_icr icr; + + /* Send startup IPI via ICR */ + icr.raw = 0; + icr.bits.dst = cepic_id_short_to_full(picid); + icr.bits.dlvm = CEPIC_ICR_DLVM_STARTUP; + icr.bits.vect = addr >> 12; + native_epic_write_d(CEPIC_ICR, icr.raw); +} + +static inline unsigned int native_pic_read_nm(void) +{ + return native_epic_read_w(CEPIC_PNMIRR); +} + +static inline void native_pic_reset_nm(void) +{ + native_epic_write_w(CEPIC_PNMIRR, CEPIC_PNMIRR_BIT_MASK); +} + +static inline unsigned int native_pic_read_version(void) +{ + return NATIVE_GET_SICREG(prepic_version, 0, 0); +} + +#else /* CONFIG_BOOT_EPIC */ + +#define PIC_DEFAULT_PHYS_BASE APIC_DEFAULT_PHYS_BASE +#define IO_PIC_DEFAULT_PHYS_BASE IO_APIC_DEFAULT_PHYS_BASE + +#define NATIVE_READ_PIC_ID() native_read_apic_id() +static inline unsigned int native_read_apic_id(void) +{ + return GET_APIC_ID(native_apic_read(APIC_ID)); +} + +extern void debug_apic_startup(int cpu, unsigned int value, unsigned long addr); +static inline void debug_pic_startup(int cpu, unsigned int value, + unsigned long addr) +{ + debug_apic_startup(cpu, value, addr); +} + +extern void setup_local_apic(int cpu); +static inline void setup_local_pic(int cpu) +{ + setup_local_apic(cpu); +} + +extern void print_local_apic(void); +static inline void print_local_pic(void) +{ + print_local_apic(); +} + +static inline unsigned int native_pic_read_esr(void) +{ + return native_apic_read(APIC_ESR) & 0xEF; +} + +static inline unsigned int native_pic_read_icr_busy(void) +{ + return native_apic_read(APIC_ICR) & APIC_ICR_BUSY; +} + +static inline void native_pic_reset_esr(void) +{ + native_apic_write(APIC_ESR, 0); +} + +static inline void native_pic_send_startup(int picid, unsigned long addr) +{ + /* Target chip */ + native_apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(picid)); + + /* Boot on the stack */ + /* Kick the second */ + native_apic_write(APIC_ICR, APIC_DM_STARTUP + | (addr >> 12)); +} + +static inline unsigned int native_pic_read_nm(void) +{ + return native_apic_read(APIC_NM); +} + +static inline void native_pic_reset_nm(void) +{ + native_apic_write(APIC_NM, APIC_NM_BIT_MASK); +} + +static inline void native_pic_write_eoi(void) +{ + native_apic_write(APIC_EOI, 0x0); +} + +static inline unsigned int native_pic_read_version(void) +{ + unsigned int apic_version; + + apic_version = GET_APIC_VERSION(native_apic_read(APIC_LVR)); + if (apic_version == 0) + apic_version = APIC_VERSION; + return apic_version; +} + +#endif /* CONFIG_BOOT_EPIC */ +#endif /* __BOOT_PIC_H */ diff --git a/arch/e2k/boot/recovery_string.S b/arch/e2k/boot/recovery_string.S new file mode 100644 index 000000000000..fbf745e70f32 --- /dev/null +++ b/arch/e2k/boot/recovery_string.S @@ -0,0 +1,7 @@ +/* + * string routines + * + * Boot uses the same routines as main kernel + */ + +#include "../lib/recovery_string.S" diff --git a/arch/e2k/boot/recovery_string_fpic.S b/arch/e2k/boot/recovery_string_fpic.S new file mode 100644 index 000000000000..fbf745e70f32 --- /dev/null +++ b/arch/e2k/boot/recovery_string_fpic.S @@ -0,0 +1,7 @@ +/* + * string routines + * + * Boot uses the same routines as main kernel + */ + +#include "../lib/recovery_string.S" diff --git a/arch/e2k/boot/recovery_string_v5.S b/arch/e2k/boot/recovery_string_v5.S new file mode 100644 index 000000000000..904218518808 --- /dev/null +++ b/arch/e2k/boot/recovery_string_v5.S @@ -0,0 +1,7 @@ +/* + * string routines + * + * Boot uses the same routines as main kernel + */ + +#include "../kernel/cpu/recovery_string_v5.S" diff --git a/arch/e2k/boot/recovery_string_v5_fpic.S b/arch/e2k/boot/recovery_string_v5_fpic.S new file mode 100644 index 000000000000..904218518808 --- /dev/null +++ b/arch/e2k/boot/recovery_string_v5_fpic.S @@ -0,0 +1,7 @@ +/* + * string routines + * + * Boot uses the same routines as main kernel + */ + +#include "../kernel/cpu/recovery_string_v5.S" diff --git a/arch/e2k/boot/romloader.S b/arch/e2k/boot/romloader.S new file mode 100644 index 000000000000..dea4f5c5aef7 --- /dev/null +++ b/arch/e2k/boot/romloader.S @@ -0,0 +1,282 @@ +// +// rom_loader.S, written by +// secondary kernel startup module +// + + + +#ifdef _E2K_SIMULATION_MODE_ +#include +#endif /* _E2K_SIMULATION_MODE_ */ +#include +#include +#ifdef CONFIG_E2K_LEGACY_SIC +#include +#endif /* CONFIG_E2K_LEGACY_SIC */ +#ifdef CONFIG_SMP +#include +#include +#include +#endif /* CONFIG_SMP */ + +#define MMU_TRAP_CELLAR_MAX_SIZE 64 /* double-words */ + +#define RW 0x1800000000000000 +#define R 0x0800000000000000 +#define RW_NONP 0x1800000000000000 + +#define RBS 0x10 /* 10 quadwords */ + +#define NMIE_PSR 0x20 /* enable nm-interrupts */ + +.text +.global loader + +.global _data +.global __bios_size_ld +.global __bios_fsize_ld +.global __bios_entry_ld +.global __bios_start_data +.global __bios_size_data +.global __bios_size_data_plus_bss + +// loader will call jump() to continue. See jumpstart.c +.global jump +.global free_memory_p + +#ifdef CONFIG_SMP +.global cpu_count +.global phys_cpu_count +.global all_pic_ids +#endif /* CONFIG_SMP */ + +loader: +// [OS]{CUD|GD} setup + addd 0, [__bios_size_ld], %r8 // EOS size + addd 0, [EOS_RAM_BASE_LABEL+R], %r2 + addd %r2,1<<58 , %r2 // set CUD.c flag + rwd %r2, %oscud.lo + addd 0, [EOS_RAM_BASE_LABEL+RW], %r6 +// shld %r8, 32, %r10 + addd 0, 0x01f0000000000000, %r10 // *D size (all space 1M upward) + + rwd %r2, %cud.lo + rwd %r6, %osgd.lo + rwd %r6, %gd.lo + + rwd %r10, %oscud.hi + rwd %r10, %osgd.hi + rwd %r10, %cud.hi + rwd %r10, %gd.hi + +#ifdef CONFIG_SMP +#ifdef CONFIG_BOOT_EPIC +// read CEPIC_CTRL register + addd,1 CEPIC_CTRL, EPIC_DEFAULT_PHYS_BASE, %dr0 + ldw,2 [%dr0] MAS_IOADDR, %r0 // read CEPIC_CTRL + disp %ctpr1, cpu_is_BSP + ands,0 %r0, CEPIC_CTRL_BSP_CORE, %r0 // is CPU BSP? +#else /* CONFIG_BOOT_EPIC */ +// read APIC_BSP register + addd,1 APIC_BSP, APIC_DEFAULT_PHYS_BASE, %dr0 // APIC_BSP reg addr + ldw,2 [%dr0] MAS_IOADDR, %r0 // read APIC_BSP + disp %ctpr1, cpu_is_BSP + ands,0 %r0, APIC_BSP_IS_BSP, %r0 // is CPU BSP? +#endif /* CONFIG_BOOT_EPIC */ + cmpesb %r0, 0, %pred0 + ct %ctpr1 ? ~%pred0 + addd,0 0, [cpu_count], %dr1 +#ifdef CONFIG_BOOT_EPIC + addd,1 0, (CEPIC_ID + EPIC_DEFAULT_PHYS_BASE), %dr0 + ldw,2 [%dr0] MAS_IOADDR, %r0 // read CEPIC_ID + ands,0 %r0, CEPIC_ID_BIT_MASK, %r0 + shrs,0 %r0, 8, %r3 // calculate prepicn + ands,0 %r0, 0xf, %r0 // calculate cepicn + shls,0 %r3, 4, %r3 + ors %r0, %r3, %r0 // ignore 4 bits in CEPIC_ID +#else /* CONFIG_BOOT_EPIC */ + addd,1 0, (APIC_ID + APIC_DEFAULT_PHYS_BASE), %dr0 // APIC_ID reg addr + ldw,2 [%dr0] MAS_IOADDR, %r0 // read APIC_ID + shrs,0 %r0, APIC_ID_SHIFT, %r0 + ands,0 %r0, APIC_ID_BIT_MASK, %r0 +#endif /* CONFIG_BOOT_EPIC */ +#ifdef CONFIG_E2K_SIC + ldw,0 [%dr1], %r2 // load cpu_count + adds,0 %r2, 1, %r2 // cpu_count ++ + stw,2 %r2, [%dr1] // save cpu_count + sxt,0 6, %r2, %dr2 // ES2 AP CPU # +#endif /* CONFIG_E2K_SIC */ + shld,0 %dr2, 2, %dr2 + stw,2 %r0, [%dr2 + all_pic_ids] + + addd,0 0, [phys_cpu_count], %dr1 +waiting_for_cpus_startup: + ldw,0 [%dr1], %r0 // load phys_cpu_count + disp %ctpr1, waiting_for_cpus_startup + cmpesb %r0, 0, %pred0 // phys_cpu_count != 0 ? + ct %ctpr1 ? %pred0 // go waiting_for_cpus_startup + +#ifndef CONFIG_BOOT_EPIC + +// reset APIC_NM register + + addd,1 0, APIC_DEFAULT_PHYS_BASE, %dr0 // APIC_NM reg addr + addd %dr0, APIC_NM, %dr0 // + ors 0, APIC_NM_BIT_MASK, %r1 // initial state of APIC_NM + stw,2 %r1, [%dr0] MAS_IOADDR // reset APIC_BSP + addd,1 0, APIC_DEFAULT_PHYS_BASE, %dr0 // APIC_EOI reg addr + addd %dr0, APIC_EOI, %dr0 // end of interrupt + ors 0, APIC_EOI_ACK, %r1 + stw,2 %r1, [%dr0] MAS_IOADDR + +#endif /* CONFIG_BOOT_EPIC */ + + rrs %psr, %r1 + ors %r1, NMIE_PSR, %r1 + rws %r1, %psr +waiting_for_apic_startup: + nop 4 + disp %ctpr1, waiting_for_apic_startup + ct %ctpr1 + +cpu_is_BSP: + +#endif /* CONFIG_SMP */ + +// copy BIOS .data segment from ROM to RAM + +#ifdef CONFIG_E2K_LEGACY_SIC + +#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) +#define CONFIG_CMD(bus, devfn, where) \ + ((bus & 0xff) << 20) | ((devfn & 0xff) << 12) | (where & 0xfff) +#define HB_REG_ADDR(reg_offset) \ + CONFIG_CMD(HB_PCI_BUS_NUM, \ + PCI_DEVFN(HB_PCI_SLOT, HB_PCI_FUNC), \ + reg_offset) +#define E1CP_BIOS_DATA_RAM_SIZE 0x1000000 /* 16M */ + +// Set low memory size to enable copiing data segment to RAM + + addd 0, E1CP_PCICFG_AREA_PHYS_BASE, %dr4 + addd 0, E1CP_BIOS_DATA_RAM_SIZE, %dr0 + stw %dr0, [%dr4 + HB_REG_ADDR(HB_PCI_TOM)] MAS_IOADDR +#endif /* CONFIG_E2K_LEGACY_SIC */ + + disp %ctpr1,looper + + addd 0, EOS_RAM_BASE, %r4 + + + addd 0, [__bios_start_data], %r6 // BIOS RW data segment address + // in ROM + addd 0, [__bios_size_data], %r8 // BIOS RW data segment size + + subd %r8, 16, %r8 // size-=16 due to ROM copying + // cycle organization + + addd 0, [__bios_size_data_plus_bss], %r0 // BIOS RW data+BSS size + subd %r0, 32, %r0 // size-=32 due to BSS clearing + // cycle organization (ct after + // move completed - 16, + // ct evaluates %pred on next + // iteration only - 16 + addd %r6, %r8, %r2 // EOS ending point in ROM + addd %r4, %r0, %r8 // EOS+BSS ending point in RAM + +looper: + cmpbesb %r2,%r6, %pred0 + ldd [%r6], %r0 + ldd [%r6+8], %r12 + addd %r6, 16, %r6 + std %r0, [%r4] + std %r12, [%r4+8] + addd %r4, 16, %r4 + ct %ctpr1 ? ~%pred0 + + +// clear BSS + + disp %ctpr1, looper_bss + addd 0, 0, %r0 + +looper_bss: + cmpbesb %r8, %r4, %pred0 + std %r0, [%r4] + std %r0, [%r4+8] + addd %r4, 16, %r4 + ct %ctpr1 ? ~%pred0 + +// PSP - procedure stack pointer + +// 'E2K_ALIGN_PSTACK' kernel loader procedure stack align + addd %r4, E2K_ALIGN_PSTACK_MASK, %r4 + andd %r4, (~(E2K_ALIGN_PSTACK_MASK)),%r4 + +// 'E2K_KERNEL_PS_PAGE_SIZE' kernel loader procedure stack align + addd %r4, (E2K_KERNEL_PS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PS_PAGE_SIZE - 1)),%r4 + + rwd E2K_BOOT_KERNEL_PS_SIZE << 32, %psp.hi + addd %r4, RW, %r6 + rwd %r6, %psp.lo + addd %r4, (E2K_BOOT_KERNEL_PS_SIZE + E2K_KERNEL_PS_PAGE_SIZE), %r4 +// 'E2K_KERNEL_PS_PAGE_SIZE' kernel loader procedure stack align + addd %r4, (E2K_KERNEL_PS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PS_PAGE_SIZE - 1)),%r4 + +// PCSP - procedure chain stack pointer + +// 'E2K_ALIGN_PCSTACK' kernel loader procedure chain stack align + addd %r4, E2K_ALIGN_PCSTACK_MASK, %r4 + andd %r4, (~(E2K_ALIGN_PCSTACK_MASK)),%r4 + +// 'E2K_KERNEL_PCS_PAGE_SIZE' kernel loader procedure chain stack align + addd %r4, (E2K_KERNEL_PCS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PCS_PAGE_SIZE - 1)),%r4 + rwd E2K_BOOT_KERNEL_PCS_SIZE << 32, %pcsp.hi + addd %r4, RW, %r6 + + rwd %r6, %pcsp.lo + addd %r4, (E2K_BOOT_KERNEL_PCS_SIZE + E2K_KERNEL_PCS_PAGE_SIZE), %r4 +// 'E2K_KERNEL_PCS_PAGE_SIZE' kernel loader procedure chain stack align + addd %r4, (E2K_KERNEL_PCS_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_PCS_PAGE_SIZE - 1)),%r4 + +// US - user (kernel loader) stack pointer + +// 'E2K_ALIGN_USTACK' kernel loader stack align + addd %r4, E2K_ALIGN_USTACK_MASK, %r4 + andd %r4, (~(E2K_ALIGN_USTACK_MASK)),%r4 + +// 'E2K_KERNEL_US_PAGE_SIZE' kernel loader stack align + +// User Stack is supposed to grow from higher memory addresses to lower ones +// Switch to higher memory addresses of stack + addd %r4, E2K_BOOT_KERNEL_US_SIZE, %r4 + addd %r4, (E2K_KERNEL_US_PAGE_SIZE - 1), %r4 + andd %r4, (~(E2K_KERNEL_US_PAGE_SIZE - 1)),%r4 + rwd E2K_BOOT_KERNEL_US_SIZE << 32, %usd.hi + addd %r4, E2K_ALIGN_STACKS_BASE_MASK, %r4 + andd %r4, (~(E2K_ALIGN_STACKS_BASE_MASK)), %r4 + rwd %r4, %sbr + addd %r4, RW_NONP, %r6 + rwd %r6, %usd.lo + +// Trap Cellar + std %r0, [0x0] 71 + std %r4, [0x00000050] 71 + addd %dr4, MMU_TRAP_CELLAR_MAX_SIZE * 8, %dr4 + std %dr4, [free_memory_p] + +// Jump to the rtc0 (preparation) + addd 0, [jump], %r2 + movtd %r2, %ctpr1 + + + setwd wsz=RBS+4 + +// Call jump() and fly away + call %ctpr1, wbs=RBS + .size loader, . - loader + diff --git a/arch/e2k/boot/romloader.lds b/arch/e2k/boot/romloader.lds new file mode 100644 index 000000000000..432f70f8f9cd --- /dev/null +++ b/arch/e2k/boot/romloader.lds @@ -0,0 +1,92 @@ + +OUTPUT_FORMAT("elf64-e2k", "elf64-e2k", "elf64-e2k") +OUTPUT_ARCH(e2k) +ENTRY(start) + +MEMORY { + ROM (RX) : ORIGIN = 0xFFFF000000, LENGTH = 16M + RAM (W) : ORIGIN = 1M, LENGTH = 31M + +/* ROM chunks */ + ROML (RX) : ORIGIN = 0xFFFF000000, LENGTH = 8M + ROMH (RX) : ORIGIN = 0xFFFF800000, LENGTH = 8M + CPUSTART (RX) : ORIGIN = 0xFFFFFFF800, LENGTH = 8M +} + +SECTIONS { + + _start = .; + .text : { + _text = .; /* Text and read-only data */ + + *(EXCLUDE_FILE (romstartup.o) .text .gnu.linkonce.t.*) + *(EXCLUDE_FILE (romstartup.o piggy.o) .rodata) + + . = ALIGN(4096); + piggy.o(.rodata) + + _etext = .; /* End of text section */ + } > ROML + + .data : AT ( ADDR(.text) + SIZEOF ( .text ) ) { + _data = .; /* Data section */ + *(EXCLUDE_FILE (romstartup.o) .data .gnu.linkonce.d.*) + + + _edata = .; /* End of data section */ + } > RAM + + + + .bss : { + __bss_start = .; /* BSS */ + + *(EXCLUDE_FILE (romstartup.o) .bss COMMON) + + __bss_end = . ; + } > RAM + + .initrd : { + *(.initrd) + } > ROMH + + .symtable : { + *(.symtable) + } > ROMH + + .strtable : { + *(.strtable) + } > ROMH + + .text.startup : { + __startup_start = .; + + romstartup.o(.text) + romstartup.o(.rodata) + romstartup.o(.data) + romstartup.o(.bss) + + __startup_end = .; + + . = ALIGN(2048); /* Round up the image size exactly to 16M. */ + + } > CPUSTART + + /* Sections to be discarded */ + /DISCARD/ : { + *(.info) + } + + __bios_start_code = ADDR(.text); + __bios_size_code = SIZEOF(.text); + __bios_start_data = ADDR(.text) + SIZEOF( .text ); + __bios_size_data = SIZEOF(.data); + __bios_size_data_plus_bss = SIZEOF(.data) + SIZEOF( .bss ); + + + + /* lld compatibility items. These calculations may be not quite accurate. */ + __bios_size_ld = SIZEOF ( .text ); + __bios_fsize_ld = SIZEOF ( .text ) + SIZEOF ( .text ) + SIZEOF ( .bss ); + __bios_entry_ld = __startup_start - _text; +} diff --git a/arch/e2k/boot/romstartup.S b/arch/e2k/boot/romstartup.S new file mode 100644 index 000000000000..885cfc31d2b7 --- /dev/null +++ b/arch/e2k/boot/romstartup.S @@ -0,0 +1,51 @@ +/* $Id: romstartup.S,v 1.8 2005/08/16 12:32:40 atic Exp $ */ +// +// primary linux startup module +// lintel_startup.s, written by +// linux_startup.s, written by as modification +// of 'lintel_startup.s' to start up linux secondary module +// + +#define WSZ 0x20 /* 20 quadwords */ +#define RBS 0x10 /* 10 quadwords */ +#define RSZ 0x10 /* 10 quadwords */ + +#define UPSR 0x1 /* fpu enabled */ + +#define OSEM_LO 0x0 /* no software traps enabled */ +#define OSEM_HI 0x0 + +.text +.global start +.global loader + +// We are starting first steps and call loader(). See romloader.S +// Static memory allocation is defined by romloader.lds + +start: +{ + setwd wsz=WSZ + setbn rbs = RBS, rsz = RSZ, rcur = 0x0 + setbp psz = 0 +} + rws UPSR,%upsr + disp %ctpr1,loader + rwd OSEM_LO,%osem + ct %ctpr1 + .size start, . - start + +/* + + [probably] pending stuff: + + physprot: address=0x70, val=addr, lower 12 buts unused + pcilb: address=0x80, val=addr, lower 12 bits unused + + not needed stuff: + + *NO* USD; + *NO* CU table and CUTD; + *NO* turn virtual addressing on; + +*/ + diff --git a/arch/e2k/boot/smp.c b/arch/e2k/boot/smp.c new file mode 100644 index 000000000000..dda0977ba47e --- /dev/null +++ b/arch/e2k/boot/smp.c @@ -0,0 +1,386 @@ + +#include + +#include +#include +#include +#include +#include + +#include "boot_io.h" +#include "pic.h" + +#undef DEBUG_BOOT_MODE +#undef Dprintk +#define DEBUG_BOOT_MODE 0 /* SMP CPU boot */ +#define Dprintk if (DEBUG_BOOT_MODE) rom_printk + +extern char __apstartup_start, __apstartup_end; + +atomic_t cpu_count = ATOMIC_INIT(0); +unsigned int all_pic_ids[NR_CPUS]; + +volatile int phys_cpu_count = 0; +volatile int phys_cpu_num = 0; +volatile unsigned long cpu_callin_map = 0; +volatile unsigned long cpu_callout_map = 0; +volatile unsigned long phys_cpu_pres_map = 0; + +extern void set_kernel_image_pointers(void); +extern bootblock_struct_t *bootblock; + +extern void scall2(bootblock_struct_t *bootblock); + +inline unsigned long +__atomic_test_mask(unsigned long mask, volatile unsigned long *val) +{ + return ((*val) & mask) != 0; +} + +static inline void __atomic_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << nr; + __api_atomic_op(mask, addr, d, "ord", RELAXED_MB); +} + +static inline void __atomic_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << nr; + __api_atomic_op(mask, addr, d, "andnd", RELAXED_MB); +} + +#define atomic_test_bit(num, val) __atomic_test_mask((1UL << num), &val) +#define atomic_set_bit(num, val) __atomic_set_bit(num, &val) +#define atomic_clear_bit(num, val) __atomic_clear_bit(num, &val) + +static void +simulate_udelay(long utime) +{ + long usec; + long clock; + for (usec = 0; usec < utime; usec ++) { + for (clock = 0; clock < 10; clock ++) { + E2K_BUBBLE(1); + } + } +} + +#define E2K_APSTARTUP_BASE 0xe2000 + +static unsigned long +setup_trampoline(void) +{ + e2k_addr_t apstartup; + int apstartup_size; + + apstartup = (e2k_addr_t)_PAGE_ALIGN_DOWN(E2K_APSTARTUP_BASE, + E2K_EOS_RAM_PAGE_SIZE); + + apstartup_size = (e2k_addr_t)&__apstartup_end - + (e2k_addr_t)&__apstartup_start; + + Dprintk("The application CPU stratup code started from addr 0x%X " + "size 0x%X bytes\n", + (e2k_addr_t)&__apstartup_start, apstartup_size); + return apstartup; +} + +static atomic_t smp_commenced = ATOMIC_INIT(0); + +void +do_smp_commence(void) +{ + /* + * Lets the callins below out of their loop. + */ + Dprintk("Setting commenced=1, go go go\n"); + + atomic_set(&cpu_count, 0); + cpu_callin_map = 0; + cpu_callout_map = 0; + atomic_set(&smp_commenced, 1); +} + +static int +do_boot_cpu(int picid) +{ + unsigned long send_status, accept_status, boot_status; + int timeout, num_starts, j, cpu; + unsigned long start_addr; + int ret = 0; + + cpu = ++phys_cpu_count; + + start_addr = setup_trampoline(); + + /* So we see what's up */ + rom_printk("Booting processor #%d (PIC ID %d) start addr 0x%X\n", + cpu, picid, start_addr); + + /* + * Status is now clean + */ + send_status = 0; + accept_status = 0; + boot_status = 0; + + num_starts = 1; + + /* + * Run STARTUP IPI loop. + */ + Dprintk("#startup loops: %d.\n", num_starts); + + for (j = 1; j <= num_starts; j++) { + Dprintk("Sending STARTUP #%d.\n",j); + + native_pic_reset_esr(); + + /* + * STARTUP IPI + */ + native_pic_send_startup(picid, start_addr); + + /* + * Give the other CPU some time to accept the IPI. + */ + simulate_udelay(300); + + Dprintk("Startup point 1.\n"); + + Dprintk("Waiting for send to finish...\n"); + timeout = 0; + do { + Dprintk("+"); + simulate_udelay(100); + send_status = native_pic_read_icr_busy(); + } while (send_status && (timeout++ < 1000)); + + /* + * Give the other CPU some time to accept the IPI. + */ + simulate_udelay(200); + + accept_status = native_pic_read_esr(); + + if (send_status || accept_status) + break; + } + Dprintk("After Startup loop.\n"); + + if (send_status) + rom_printk("PIC never delivered???\n"); + if (accept_status) + rom_printk("PIC delivery error (0x%X).\n", accept_status); + + if (!send_status && !accept_status) { + /* + * allow APs to start initializing. + */ + Dprintk("Before Callout %d, cpu_callout_map = 0x%x, &cpu_callout_map = 0x%X\n", + cpu, cpu_callout_map, &cpu_callout_map); + atomic_set_bit(cpu, cpu_callout_map); + Dprintk("After Callout %d, cpu_callout_map = 0x%x\n", cpu, cpu_callout_map); + + /* + * Wait 5s total for a response + */ + for (timeout = 0; timeout < 5000; timeout++) { + if (atomic_test_bit(cpu, cpu_callin_map)) + break; /* It has booted */ + simulate_udelay(100); + } + + if (atomic_test_bit(cpu, cpu_callin_map)) { + /* number CPUs logically, starting from 1 (BSP is 0) */ + Dprintk("CPU has booted.\n"); + } else { + boot_status = 1; + rom_printk("Not responding.\n"); + rom_printk("Printing PIC contents on CPU#%d/PIC#%d:\n", + 0, all_pic_ids[0]); + print_local_pic(); + } + } + if (send_status || accept_status || boot_status) { + phys_cpu_count--; + ret = -1; + } + + return ret; +} + +/* + * Cycle through the processors sending APIC STARTUP to boot each. + */ + +void +smp_start_cpus(void) +{ + int picid, cpu; + int live_cpu_num; + int ret; + + atomic_set(&smp_commenced, 0); + phys_cpu_pres_map = 0; + phys_cpu_count = 0; + live_cpu_num = atomic_read(&cpu_count); + if (live_cpu_num > 1) + rom_printk("Total number of live processors is %d\n", + live_cpu_num); + else + rom_printk("Only one live processor is booting\n"); + for (cpu = 0; cpu < live_cpu_num; cpu ++) { + phys_cpu_pres_map |= (1 << all_pic_ids[cpu]); + rom_printk(" CPU #%d %s PIC ID %d\n", + cpu, (cpu == 0) ? "BSP" : "AP ", all_pic_ids[cpu]); + } + +#ifdef CONFIG_L_LOCAL_APIC + setup_local_pic(all_pic_ids[0]); +#endif /* CONFIG_L_LOCAL_APIC */ + + /* + * Now scan the CPU present map and fire up the other CPUs. + */ + rom_printk("CPU present map: 0x%X\n", phys_cpu_pres_map); + + for (cpu = 1; cpu < live_cpu_num; cpu ++) { + + picid = all_pic_ids[cpu]; + + ret = do_boot_cpu(picid); + + /* + * Make sure we unmap all failed CPUs + */ + if (ret != 0) { + phys_cpu_pres_map &= ~(1 << picid); + rom_printk("phys CPU #%d not responding - " + "cannot use it.\n", + picid); + } + } + phys_cpu_num = phys_cpu_count + 1; + + Dprintk("All CPU boot done.\n"); + +} + +static void +do_smp_callin(int cpuid) +{ + int phys_id; + unsigned long timeout; + + /* + * (This works even if the APIC is not enabled.) + */ + phys_id = NATIVE_READ_PIC_ID(); + + /* + * STARTUP IPIs are fragile beasts as they might sometimes + * trigger some glue motherboard logic. Complete APIC bus + * silence for 1 second, this overestimates the time the + * boot CPU is spending to send the up to 2 STARTUP IPIs + * by a factor of two. This should be enough. + */ + + /* + * Waiting 2s total for startup (udelay is not yet working) + */ + Dprintk("CPU#%d (PIC ID: %d) waiting for CALLOUT\n", cpuid, phys_id); + for (timeout = 0; timeout < 20000; timeout++) { + if (atomic_test_bit(cpuid, cpu_callout_map)) + break; + simulate_udelay(10); + } + + if (!atomic_test_bit(cpuid, cpu_callout_map)) { + rom_printk("BUG: CPU#%d (PIC ID: %d) started up but did " + "not get a callout!, cpu_callout_map = 0x%x\n", + cpuid, phys_id, cpu_callout_map); + do { } while (1); + } + + /* + * the boot CPU has finished the init stage and is spinning + * on callin_map until we finish. We are free to set up this + * CPU, first the APIC. (this is probably redundant on most + * boards) + */ + + Dprintk("CALLIN, before setup_local_pic().\n"); + setup_local_pic(cpuid); + + /* + * Allow the master to continue. + */ + atomic_set_bit(cpuid, cpu_callin_map); +} + +/* + * Activate a secondary processor. + */ +void +start_secondary(void *unused) +{ + e2k_psp_hi_t psp_hi; + e2k_psp_lo_t psp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_usbr_t usbr; + int cpu; + int cpu_id; + unsigned int value; + register unsigned long TIR_hi, TIR_lo; + + cpu = phys_cpu_count; + cpu_id = all_pic_ids[cpu]; + + TIR_hi = NATIVE_READ_TIR_HI_REG_VALUE(); /* order is */ + TIR_lo = NATIVE_READ_TIR_LO_REG_VALUE(); /* significant */ + NATIVE_WRITE_TIR_LO_REG_VALUE(TIR_lo); /* un-freeze TIR's */ + + value = native_pic_read_nm(); + native_pic_reset_nm(); + debug_pic_startup(cpu_id, value, E2K_APSTARTUP_BASE); + native_pic_write_eoi(); + + psp_hi.PSP_hi_half = NATIVE_NV_READ_PSP_HI_REG_VALUE(); + psp_lo.PSP_lo_half = NATIVE_NV_READ_PSP_LO_REG_VALUE(); + + rom_printk("CPU #%d Proc. Stack (PSP) at: 0x%X,", + cpu_id, AS_STRUCT(psp_lo).base); + rom_printk(" size: 0x%X,", AS_STRUCT(psp_hi).size); + rom_printk(" direction: %s.\n", "upward"); + + pcsp_hi.PCSP_hi_half = NATIVE_NV_READ_PCSP_HI_REG_VALUE(); + pcsp_lo.PCSP_lo_half = NATIVE_NV_READ_PCSP_LO_REG_VALUE(); + + rom_printk("CPU #%d Proc. Chain Stack (PCSP) at: 0x%X,", + cpu_id, AS_STRUCT(pcsp_lo).base); + rom_printk(" size: 0x%X,", AS_STRUCT(pcsp_hi).size); + rom_printk(" direction: %s.\n", "upward"); + usbr.USBR_reg = NATIVE_NV_READ_USBR_REG_VALUE(); + rom_printk("CPU #%d GNU C Stack at: 0x%X,", + cpu_id, usbr.USBR_base); + rom_printk(" size: 0x%X, ", E2K_BOOT_KERNEL_US_SIZE); + rom_printk(" direction: %s.\n", "downward"); + + /* + * Dont put anything before do_smp_callin(), SMP + * booting is too fragile that we want to limit the + * things done here to the most necessary things. + */ + do_smp_callin(cpu); + while (!atomic_read(&smp_commenced)) { + E2K_BUBBLE(1); + } + + set_kernel_image_pointers(); + + scall2(bootblock); + + E2K_LMS_HALT_OK; +} diff --git a/arch/e2k/boot/stdio.c b/arch/e2k/boot/stdio.c new file mode 100644 index 000000000000..60f620fe4a8c --- /dev/null +++ b/arch/e2k/boot/stdio.c @@ -0,0 +1,271 @@ + +#include +#include +#include + +#if defined(CONFIG_BIOS) +#include "bios/bios.h" +#endif + +#define FALSE 0 +#define TRUE 1 + +#if defined(CONFIG_VGA_CONSOLE) +extern void vga_init(void); +extern void vga_putc(const char c); +extern int keyb_present; +extern int keyb_tstc(void); +extern int keyb_getc(void); +#endif + +#if defined(CONFIG_LMS_CONSOLE) +extern void console_putc(const char c); +#endif + +#if defined(CONFIG_E2K_SIC) && defined(CONFIG_SERIAL_AM85C30_BOOT_CONSOLE) +extern unsigned long com_port; +extern void serial_putc(unsigned long com_port, const char c); +extern unsigned char serial_getc(unsigned long com_port); +#endif + +#define is_digit(c) ((c >= '0') && (c <= '9')) + +int rom_strlen(char *s) +{ + int len = 0; + while (*s++) len++; + return len; +} + +#ifdef CONFIG_E2K_SIC +int rom_getc(void) +{ +#if defined(CONFIG_BIOS) && defined(CONFIG_SERIAL_AM85C30_BOOT_CONSOLE) + if (hardware.serial) { + return serial_getc(com_port); + } +#endif /* serial console */ + +#if defined(CONFIG_VGA_CONSOLE) +#if defined(CONFIG_BIOS) + if (hardware.keyboard) +#endif /* CONFIG_BIOS */ + { + if (keyb_tstc()) { + return keyb_getc(); + } + } +#endif + return 0; +} +#endif + + +void rom_putc(char c) +{ +#if defined(CONFIG_LMS_CONSOLE) +#if defined(CONFIG_BIOS) + if (hardware.dbgport) +#endif /* CONFIG_BIOS */ + { + console_putc(c); + }; +#endif /* E2K console */ + +#if defined(CONFIG_BIOS) && defined(CONFIG_E2K_SIC) && \ + defined(CONFIG_SERIAL_AM85C30_BOOT_CONSOLE) + if (hardware.serial) { + serial_putc(com_port, c); + if ( c == '\n' ) + serial_putc(com_port, '\r'); + } +#endif /* serial console */ + +#if defined(CONFIG_VGA_CONSOLE) +#if defined(CONFIG_BIOS) + if (hardware.video) +#endif /* CONFIG_BIOS */ + { + vga_putc(c); + } +#endif /* VGA console */ + +} + +void rom_puts(char *s) +{ + + while (*s) + rom_putc(*s++); +} + +int rom_cvt(unsigned long val, char *buf, long radix, char *digits) +{ + char temp[80]; + char *cp = temp; + int length = 0; + + if (val == 0) + { /* Special case */ + *cp++ = '0'; + } else + while (val) + { + *cp++ = digits[val % radix]; + val /= radix; + } + + while (cp != temp) + { + *buf++ = *--cp; + length++; + } + *buf = '\0'; + return length; +} + +void +rom_vprintk(const char *fmt0, va_list ap) +{ + + char c, sign, *cp = NULL; + int left_prec, right_prec, zero_fill, length = 0, pad, pad_on_right; + char buf[32]; + long val; + + while ((c = *fmt0++) != 0) + { + if (c == '%') + { + c = *fmt0++; + left_prec = right_prec = pad_on_right = 0; + if (c == '-') + { + c = *fmt0++; + pad_on_right++; + } + if (c == '0') + { + zero_fill = TRUE; + c = *fmt0++; + } else + { + zero_fill = FALSE; + } + while (is_digit(c)) + { + left_prec = (left_prec * 10) + (c - '0'); + c = *fmt0++; + } + if (c == '.') + { + c = *fmt0++; + zero_fill++; + while (is_digit(c)) + { + right_prec = (right_prec * 10) + (c - '0'); + c = *fmt0++; + } + } else + { + right_prec = left_prec; + } + + sign = '\0'; + +#ifdef CONFIG_E2K_SIC /* BUG: incorrect switch. + * It jumps to another code mark of switch function */ + if (c == 'd'){ + val = va_arg(ap, int); + if (val < 0) + { + sign = '-'; + val = -val; + } + length = rom_cvt(val, buf, 10, "0123456789"); + cp = buf; + }else if (c == 'x'){ + val = va_arg(ap, unsigned int); + length = rom_cvt(val, buf, 16, "0123456789abcdef"); + cp = buf; + }else if (c == 'X'){ + val = va_arg(ap, unsigned long); + length = rom_cvt(val, buf, 16, "0123456789ABCDEF"); + cp = buf; + }else if (c == 'p'){ + val = va_arg(ap, unsigned long); + length = rom_cvt(val, buf, 16, "0123456789abcdef"); + cp = buf; + }else if (c == 's'){ + cp = va_arg(ap, char *); + length = rom_strlen(cp); + }else if (c == 'c'){ + c = va_arg(ap, int); + rom_putc(c); + continue; + }else{ + rom_putc('?'); + } +#endif + pad = left_prec - length; + if (sign != '\0') + { + pad--; + } + if (zero_fill) + { + c = '0'; + if (sign != '\0') + { + rom_putc(sign); + sign = '\0'; + } + } else + { + c = ' '; + } + if (!pad_on_right) + { + while (pad-- > 0) + { + rom_putc(c); + } + } + if (sign != '\0') + { + rom_putc(sign); + } + while (length-- > 0) + { + rom_putc(c = *cp++); + if (c == '\n') + { + rom_putc('\r'); + } + } + if (pad_on_right) + { + while (pad-- > 0) + { + rom_putc(c); + } + } + } else + { + rom_putc(c); + if (c == '\n') + { + rom_putc('\r'); + } + } + } +} + +void +rom_printk(char const *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + rom_vprintk(fmt, ap); + va_end(ap); +} diff --git a/arch/e2k/boot/string.c b/arch/e2k/boot/string.c new file mode 100644 index 000000000000..6ae2a967442b --- /dev/null +++ b/arch/e2k/boot/string.c @@ -0,0 +1,8 @@ +/* + * string routines + * + * Boot uses the same routines as main kernel + */ + +#define BOOT +#include "../lib/string.c" diff --git a/arch/e2k/boot/string_fpic.c b/arch/e2k/boot/string_fpic.c new file mode 100644 index 000000000000..6ae2a967442b --- /dev/null +++ b/arch/e2k/boot/string_fpic.c @@ -0,0 +1,8 @@ +/* + * string routines + * + * Boot uses the same routines as main kernel + */ + +#define BOOT +#include "../lib/string.c" diff --git a/arch/e2k/boot/string_guest_fpic.c b/arch/e2k/boot/string_guest_fpic.c new file mode 100644 index 000000000000..7a6416f6c9ec --- /dev/null +++ b/arch/e2k/boot/string_guest_fpic.c @@ -0,0 +1,8 @@ +/* + * string routines + * + * Boot uses the same routines as main kernel + */ + +#define BOOT +#include "../kvm/guest/string.c" diff --git a/arch/e2k/boot/topology.h b/arch/e2k/boot/topology.h new file mode 100644 index 000000000000..7f91e776380f --- /dev/null +++ b/arch/e2k/boot/topology.h @@ -0,0 +1,45 @@ +#ifndef _BOOT_TOPOLOGY_H_ +#define _BOOT_TOPOLOGY_H_ + +#include +#ifdef CONFIG_NUMA +#include +#endif /* CONFIG_NUMA */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * IO links/controllers/buses topology: + * each node of e2k machines can have from 1 to MAX_NODE_IOLINKS IO links + * which can be connected to IOHUB or RDMA + * Real possible number of IO links on node is described by following + * macroses for every type of machines + */ + +#undef E2K_NODE_IOLINKS + +#if defined(CONFIG_ES2) +#define E2K_NODE_IOLINKS ES2_NODE_IOLINKS +#elif defined(CONFIG_E2S) +#define E2K_NODE_IOLINKS E2S_NODE_IOLINKS +#elif defined(CONFIG_E8C) +#define E2K_NODE_IOLINKS E8C_NODE_IOLINKS +#elif defined(CONFIG_E8C2) +#define E2K_NODE_IOLINKS E8C2_NODE_IOLINKS +#elif defined(CONFIG_E1CP) +#define E2K_NODE_IOLINKS E1CP_NODE_IOLINKS +#elif defined(CONFIG_E12C) +#define E2K_NODE_IOLINKS E12C_NODE_IOLINKS +#elif defined(CONFIG_E16C) +#define E2K_NODE_IOLINKS E16C_NODE_IOLINKS +#elif defined(CONFIG_E2C3) +#define E2K_NODE_IOLINKS E2C3_NODE_IOLINKS +#endif + +#endif /* _BOOT_TOPOLOGY_H_ */ diff --git a/arch/e2k/boot/vga.c b/arch/e2k/boot/vga.c new file mode 100644 index 000000000000..de822bbcc6bf --- /dev/null +++ b/arch/e2k/boot/vga.c @@ -0,0 +1,221 @@ + +#include + +#include "vga.h" +#include "boot_io.h" + +char *vidmem = (char *) 0xB8000UL; +static int lines=25, cols=80; +static int orig_x=0, orig_y=0; + +#define VIDMEM_BUG 1 + +static void vidmem_cpy(void * __dest, __const void * __src, + int __n) +{ + int i; + char *d = (char *)__dest, *s = (char *)__src; + + for (i=0;i<__n;i++) { + +#ifndef VIDMEM_BUG + d[i] = s[i]; +#else + char c; + + c = NATIVE_READ_MAS_B(& s[i] , MAS_IOADDR); + NATIVE_WRITE_MAS_B(& d[i] , c , MAS_IOADDR); +#endif + } +} + +static void vga_outb(u16 port, u8 byte) +{ + NATIVE_WRITE_MAS_B(PHYS_X86_IO_BASE + port, byte, MAS_IOADDR); +} + +static void clear_screen(void) +{ + int i, j; + for (i = 0; i < lines; i++) { + for (j = 0; j < cols; j++) { +#ifndef VIDMEM_BUG + vidmem[((i*cols)+j)*2] = ' '; + vidmem[((i*cols)+j)*2+1] = 0x07; +#else + NATIVE_WRITE_MAS_B(&vidmem[((i*cols)+j)*2] , ' ', MAS_IOADDR); + NATIVE_WRITE_MAS_B(&vidmem[((i*cols)+j)*2+1] , 0x07, + MAS_IOADDR); +#endif + } + } +} + +static void scroll(void) +{ + int i; + + vidmem_cpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 ); + for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 ) { +#ifndef VIDMEM_BUG + vidmem[i] = ' '; +#else + NATIVE_WRITE_MAS_B(& vidmem[i], ' ' , MAS_IOADDR); +#endif + } +} + +/* + * cursor() sets an offset (0-1999) into the 80x25 text area + */ +static void cursor(int x, int y) +{ + int pos = (y*cols)+x; + vga_outb(0x3D4, 14); + vga_outb(0x3D5, pos >> 8); + vga_outb(0x3D4, 15); + vga_outb(0x3D5, pos); +} + +void vga_putc(const char c) +{ + int x,y; + + x = orig_x; + y = orig_y; + + if ( c == '\n' ) { + x = 0; + if ( ++y >= lines ) { + scroll(); + y--; + } + } else if (c == '\r') { + x = 0; + } else if (c == '\b') { + if (x > 0) { + x--; + } else { + x = cols - 1; y = ( y == 0 ? y : y - 1); + } + } else { +#ifndef VIDMEM_BUG + vidmem [ ( x + cols * y ) * 2 ] = c; +#else + NATIVE_WRITE_MAS_B(&vidmem[(x+cols*y)*2], c, MAS_IOADDR); +#endif + if ( ++x >= cols ) { + x = 0; + if ( ++y >= lines ) { + scroll(); + y--; + } + } + } + + cursor(x, y); + + orig_x = x; + orig_y = y; +} + +void vga_puts(const char *s) +{ + int x,y; + char c; + + x = orig_x; + y = orig_y; + + while ( ( c = *s++ ) != '\0' ) { + + if ( c == '\n' ) { + x = 0; + if ( ++y >= lines ) { + scroll(); + y--; + } + } else if (c == '\b') { + if (x > 0) { + x--; + } else { + x = cols - 1; y = ( y == 0 ? y : y - 1); + } + } else { +#ifndef VIDMEM_BUG + vidmem [ ( x + cols * y ) * 2 ] = c; +#else + NATIVE_WRITE_MAS_B(&vidmem[(x+cols*y)*2], c, + MAS_IOADDR); +#endif + if ( ++x >= cols ) { + x = 0; + if ( ++y >= lines ) { + scroll(); + y--; + } + } + } + } + + cursor(x, y); + + orig_x = x; + orig_y = y; +} + + +static void regs_init(void) +{ + int i; + u16 port; + u8 byte; + + i=0; + + while (vga_regs[i][0] != 0) { +// rom_printk("regs_init: i = %d ," , i ); + + port = (u16) vga_regs[i][0]; + byte = (u8) vga_regs[i][1]; +// rom_printk(" port = %x , byte = %x\n", port, byte); + + vga_outb(port, byte); + i++; + } + +} + +static void loadfont(void) +{ + int i; + u8 *p; + u8 byte; + + i=0; + + while (font[i][0] != 0) { + p = (u8 *) font[i][0]; + byte = (u8) font[i][1]; +#ifndef VIDMEM_BUG + *p = byte; +#else + NATIVE_WRITE_MAS_B(p , byte, MAS_IOADDR); +#endif + i++; + } +} + + +void vga_init(void) +{ + regs_init(); + loadfont(); + + clear_screen(); + cursor(0,0); + orig_x=0; orig_y=0; + + vga_puts("legacy VGA console. Text mode. 80x25. 16 colors.\n"); + +} diff --git a/arch/e2k/boot/vga.h b/arch/e2k/boot/vga.h new file mode 100644 index 000000000000..7c8241d2122d --- /dev/null +++ b/arch/e2k/boot/vga.h @@ -0,0 +1,5807 @@ + +unsigned short int vga_regs[][2] = { + + { 0x3c2, 0xc3 }, + { 0x3b4, 0x0d }, + { 0x3b5, 0xaa }, + { 0x3b4, 0x0d }, + { 0x3c2, 0xc2 }, + { 0x3d4, 0x0d }, + { 0x3d5, 0xaa }, + { 0x3d4, 0x0d }, + { 0x3c0, 0x00 }, + { 0x3c4, 0x00 }, + { 0x3c5, 0x01 }, + { 0x3c2, 0x67 }, + { 0x3c4, 0x01 }, + { 0x3c5, 0x00 }, + { 0x3c4, 0x02 }, + { 0x3c5, 0x03 }, + { 0x3c4, 0x03 }, + { 0x3c5, 0x00 }, + { 0x3c4, 0x04 }, + { 0x3c5, 0x02 }, + { 0x3c4, 0x00 }, + { 0x3c5, 0x03 }, + { 0x3d4, 0x11 }, + { 0x3d5, 0x20 }, + { 0x3d4, 0x00 }, + { 0x3d5, 0x5f }, + { 0x3d4, 0x01 }, + { 0x3d5, 0x4f }, + { 0x3d4, 0x02 }, + { 0x3d5, 0x50 }, + { 0x3d4, 0x03 }, + { 0x3d5, 0x82 }, + { 0x3d4, 0x04 }, + { 0x3d5, 0x55 }, + { 0x3d4, 0x05 }, + { 0x3d5, 0x81 }, + { 0x3d4, 0x06 }, + { 0x3d5, 0xbf }, + { 0x3d4, 0x07 }, + { 0x3d5, 0x1f }, + { 0x3d4, 0x08 }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x09 }, + { 0x3d5, 0x4f }, + { 0x3d4, 0x0a }, + { 0x3d5, 0x0d }, + { 0x3d4, 0x0b }, + { 0x3d5, 0x0e }, + { 0x3d4, 0x0c }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0d }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x10 }, + { 0x3d5, 0x9c }, + { 0x3d4, 0x11 }, + { 0x3d5, 0x8e }, + { 0x3d4, 0x12 }, + { 0x3d5, 0x8f }, + { 0x3d4, 0x13 }, + { 0x3d5, 0x28 }, + { 0x3d4, 0x14 }, + { 0x3d5, 0x1f }, + { 0x3d4, 0x15 }, + { 0x3d5, 0x96 }, + { 0x3d4, 0x16 }, + { 0x3d5, 0xb9 }, + { 0x3d4, 0x17 }, + { 0x3d5, 0xa3 }, + { 0x3d4, 0x18 }, + { 0x3d5, 0xff }, + { 0x3ce, 0x00 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x01 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x02 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x03 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x04 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x05 }, + { 0x3cf, 0x10 }, + { 0x3ce, 0x06 }, + { 0x3cf, 0x0e }, + { 0x3ce, 0x07 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x08 }, + { 0x3cf, 0xff }, + { 0x3da, 0x00 }, + { 0x3c4, 0x01 }, + { 0x3c4, 0x01 }, + { 0x3c5, 0x20 }, + { 0x3c0, 0x00 }, + { 0x3c0, 0x00 }, + { 0x3c0, 0x01 }, + { 0x3c0, 0x01 }, + { 0x3c0, 0x02 }, + { 0x3c0, 0x02 }, + { 0x3c0, 0x03 }, + { 0x3c0, 0x03 }, + { 0x3c0, 0x04 }, + { 0x3c0, 0x04 }, + { 0x3c0, 0x05 }, + { 0x3c0, 0x05 }, + { 0x3c0, 0x06 }, + { 0x3c0, 0x14 }, + { 0x3c0, 0x07 }, + { 0x3c0, 0x07 }, + { 0x3c0, 0x08 }, + { 0x3c0, 0x38 }, + { 0x3c0, 0x09 }, + { 0x3c0, 0x39 }, + { 0x3c0, 0x0a }, + { 0x3c0, 0x3a }, + { 0x3c0, 0x0b }, + { 0x3c0, 0x3b }, + { 0x3c0, 0x0c }, + { 0x3c0, 0x3c }, + { 0x3c0, 0x0d }, + { 0x3c0, 0x3d }, + { 0x3c0, 0x0e }, + { 0x3c0, 0x3e }, + { 0x3c0, 0x0f }, + { 0x3c0, 0x3f }, + { 0x3c0, 0x10 }, + { 0x3c0, 0x0c }, + { 0x3c0, 0x11 }, + { 0x3c0, 0x00 }, + { 0x3c0, 0x12 }, + { 0x3c0, 0x0f }, + { 0x3c0, 0x13 }, + { 0x3c0, 0x08 }, + { 0x3c0, 0x14 }, + { 0x3c0, 0x00 }, + { 0x3c6, 0xff }, + { 0x3c8, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3ce, 0x05 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x06 }, + { 0x3ce, 0x06 }, + { 0x3cf, 0x04 }, + { 0x3c4, 0x02 }, + { 0x3c5, 0x04 }, + { 0x3c4, 0x04 }, + { 0x3c5, 0x06 }, + { 0x3c4, 0x01 }, + { 0x3ce, 0x06 }, + { 0x3cf, 0x0e }, + { 0x3ce, 0x05 }, + { 0x3cf, 0x10 }, + { 0x3c4, 0x02 }, + { 0x3c5, 0x03 }, + { 0x3c4, 0x04 }, + { 0x3c5, 0x02 }, + { 0x3c4, 0x01 }, + { 0x3c4, 0x01 }, + { 0x3c5, 0x00 }, + { 0x3c0, 0x20 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x02 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x03 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x04 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x05 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x06 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x07 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x08 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x09 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x0a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x0b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x0c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x0d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x0e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x0f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x10 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x11 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x12 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x13 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x14 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x15 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x16 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x17 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x18 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x50 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x50 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x51 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x52 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x53 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x54 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x55 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x56 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x57 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x58 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x59 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x60 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x61 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x62 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x63 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x64 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x65 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x66 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x67 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x68 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x69 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x70 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x71 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x72 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x73 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x74 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x75 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x76 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x77 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x78 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x79 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x50 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x50 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa1 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa2 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa3 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa4 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa5 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa6 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa7 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa8 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa9 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xaa }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xab }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xac }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xad }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xae }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xaf }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb1 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb2 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb3 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb4 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xf0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xf0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xf0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xf0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xf0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x40 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x40 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x41 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x42 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x43 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x44 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x45 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x46 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x47 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x48 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x49 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x4a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x4b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x4c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x4d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x4e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x4f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x50 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x51 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x52 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x53 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x54 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x55 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x56 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x57 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x58 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x59 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x5f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x60 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x61 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x62 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x63 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x64 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x65 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x66 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x67 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x68 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x69 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x6f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x70 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x71 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x72 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x73 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x74 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x75 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x76 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x77 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x78 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x79 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x7f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x80 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x81 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x82 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x40 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x40 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x90 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x90 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x91 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x92 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x93 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x94 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x95 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x96 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x97 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x98 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x99 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa1 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa2 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa3 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa4 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa5 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa6 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa7 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa8 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa9 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xaa }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xab }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xac }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xad }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xae }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xaf }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb1 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb2 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb3 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb4 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb5 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb6 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb7 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb8 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xb9 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xba }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xbb }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xbc }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xbd }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xbe }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xbf }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xc0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xc1 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x90 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xe0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x02 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x30 }, + { 0x3c0, 0x00 }, + { 0x3c4, 0x00 }, + { 0x3c5, 0x01 }, + { 0x3c2, 0x67 }, + { 0x3c4, 0x01 }, + { 0x3c5, 0x00 }, + { 0x3c4, 0x02 }, + { 0x3c5, 0x03 }, + { 0x3c4, 0x03 }, + { 0x3c5, 0x00 }, + { 0x3c4, 0x04 }, + { 0x3c5, 0x02 }, + { 0x3c4, 0x00 }, + { 0x3c5, 0x03 }, + { 0x3d4, 0x11 }, + { 0x3d5, 0x20 }, + { 0x3d4, 0x00 }, + { 0x3d5, 0x5f }, + { 0x3d4, 0x01 }, + { 0x3d5, 0x4f }, + { 0x3d4, 0x02 }, + { 0x3d5, 0x50 }, + { 0x3d4, 0x03 }, + { 0x3d5, 0x82 }, + { 0x3d4, 0x04 }, + { 0x3d5, 0x55 }, + { 0x3d4, 0x05 }, + { 0x3d5, 0x81 }, + { 0x3d4, 0x06 }, + { 0x3d5, 0xbf }, + { 0x3d4, 0x07 }, + { 0x3d5, 0x1f }, + { 0x3d4, 0x08 }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x09 }, + { 0x3d5, 0x4f }, + { 0x3d4, 0x0a }, + { 0x3d5, 0x0d }, + { 0x3d4, 0x0b }, + { 0x3d5, 0x0e }, + { 0x3d4, 0x0c }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0d }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x00 }, + { 0x3d4, 0x10 }, + { 0x3d5, 0x9c }, + { 0x3d4, 0x11 }, + { 0x3d5, 0x8e }, + { 0x3d4, 0x12 }, + { 0x3d5, 0x8f }, + { 0x3d4, 0x13 }, + { 0x3d5, 0x28 }, + { 0x3d4, 0x14 }, + { 0x3d5, 0x1f }, + { 0x3d4, 0x15 }, + { 0x3d5, 0x96 }, + { 0x3d4, 0x16 }, + { 0x3d5, 0xb9 }, + { 0x3d4, 0x17 }, + { 0x3d5, 0xa3 }, + { 0x3d4, 0x18 }, + { 0x3d5, 0xff }, + { 0x3ce, 0x00 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x01 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x02 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x03 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x04 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x05 }, + { 0x3cf, 0x10 }, + { 0x3ce, 0x06 }, + { 0x3cf, 0x0e }, + { 0x3ce, 0x07 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x08 }, + { 0x3cf, 0xff }, + { 0x3da, 0x00 }, + { 0x3c4, 0x01 }, + { 0x3c4, 0x01 }, + { 0x3c5, 0x20 }, + { 0x3c0, 0x00 }, + { 0x3c0, 0x00 }, + { 0x3c0, 0x01 }, + { 0x3c0, 0x01 }, + { 0x3c0, 0x02 }, + { 0x3c0, 0x02 }, + { 0x3c0, 0x03 }, + { 0x3c0, 0x03 }, + { 0x3c0, 0x04 }, + { 0x3c0, 0x04 }, + { 0x3c0, 0x05 }, + { 0x3c0, 0x05 }, + { 0x3c0, 0x06 }, + { 0x3c0, 0x14 }, + { 0x3c0, 0x07 }, + { 0x3c0, 0x07 }, + { 0x3c0, 0x08 }, + { 0x3c0, 0x38 }, + { 0x3c0, 0x09 }, + { 0x3c0, 0x39 }, + { 0x3c0, 0x0a }, + { 0x3c0, 0x3a }, + { 0x3c0, 0x0b }, + { 0x3c0, 0x3b }, + { 0x3c0, 0x0c }, + { 0x3c0, 0x3c }, + { 0x3c0, 0x0d }, + { 0x3c0, 0x3d }, + { 0x3c0, 0x0e }, + { 0x3c0, 0x3e }, + { 0x3c0, 0x0f }, + { 0x3c0, 0x3f }, + { 0x3c0, 0x10 }, + { 0x3c0, 0x0c }, + { 0x3c0, 0x11 }, + { 0x3c0, 0x00 }, + { 0x3c0, 0x12 }, + { 0x3c0, 0x0f }, + { 0x3c0, 0x13 }, + { 0x3c0, 0x08 }, + { 0x3c0, 0x14 }, + { 0x3c0, 0x00 }, + { 0x3c6, 0xff }, + { 0x3c8, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x00 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x2a }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x15 }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3c9, 0x3f }, + { 0x3ce, 0x05 }, + { 0x3cf, 0x00 }, + { 0x3ce, 0x06 }, + { 0x3ce, 0x06 }, + { 0x3cf, 0x04 }, + { 0x3c4, 0x02 }, + { 0x3c5, 0x04 }, + { 0x3c4, 0x04 }, + { 0x3c5, 0x06 }, + { 0x3c4, 0x01 }, + { 0x3ce, 0x06 }, + { 0x3cf, 0x0e }, + { 0x3ce, 0x05 }, + { 0x3cf, 0x10 }, + { 0x3c4, 0x02 }, + { 0x3c5, 0x03 }, + { 0x3c4, 0x04 }, + { 0x3c5, 0x02 }, + { 0x3c4, 0x01 }, + { 0x3c4, 0x01 }, + { 0x3c5, 0x00 }, + { 0x3c0, 0x20 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x90 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x91 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x92 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x93 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x94 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x95 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x96 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x97 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x98 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x99 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9a }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9b }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9c }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9d }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9e }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x9f }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa1 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xa2 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x90 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x01 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0xe0 }, + { 0x3d4, 0x0e }, + { 0x3d5, 0x02 }, + { 0x3d4, 0x0f }, + { 0x3d5, 0x30 }, + { 0x000, 0x00 } + +}; + + + +unsigned long int font[][2] = { + + { 0xa0000 , 0x00 }, + { 0xa0001 , 0x00 }, + { 0xa0002 , 0x00 }, + { 0xa0003 , 0x00 }, + { 0xa0004 , 0x00 }, + { 0xa0005 , 0x00 }, + { 0xa0006 , 0x00 }, + { 0xa0007 , 0x00 }, + { 0xa0008 , 0x00 }, + { 0xa0009 , 0x00 }, + { 0xa000a , 0x00 }, + { 0xa000b , 0x00 }, + { 0xa000c , 0x00 }, + { 0xa000d , 0x00 }, + { 0xa000e , 0x00 }, + { 0xa000f , 0x00 }, + { 0xa0020 , 0x00 }, + { 0xa0021 , 0x00 }, + { 0xa0022 , 0x7e }, + { 0xa0023 , 0x81 }, + { 0xa0024 , 0xa5 }, + { 0xa0025 , 0x81 }, + { 0xa0026 , 0x81 }, + { 0xa0027 , 0xa5 }, + { 0xa0028 , 0x99 }, + { 0xa0029 , 0x81 }, + { 0xa002a , 0x81 }, + { 0xa002b , 0x7e }, + { 0xa002c , 0x00 }, + { 0xa002d , 0x00 }, + { 0xa002e , 0x00 }, + { 0xa002f , 0x00 }, + { 0xa0040 , 0x00 }, + { 0xa0041 , 0x00 }, + { 0xa0042 , 0x7e }, + { 0xa0043 , 0xff }, + { 0xa0044 , 0xdb }, + { 0xa0045 , 0xff }, + { 0xa0046 , 0xff }, + { 0xa0047 , 0xdb }, + { 0xa0048 , 0xe7 }, + { 0xa0049 , 0xff }, + { 0xa004a , 0xff }, + { 0xa004b , 0x7e }, + { 0xa004c , 0x00 }, + { 0xa004d , 0x00 }, + { 0xa004e , 0x00 }, + { 0xa004f , 0x00 }, + { 0xa0060 , 0x00 }, + { 0xa0061 , 0x00 }, + { 0xa0062 , 0x00 }, + { 0xa0063 , 0x00 }, + { 0xa0064 , 0x6c }, + { 0xa0065 , 0xfe }, + { 0xa0066 , 0xfe }, + { 0xa0067 , 0xfe }, + { 0xa0068 , 0xfe }, + { 0xa0069 , 0x7c }, + { 0xa006a , 0x38 }, + { 0xa006b , 0x10 }, + { 0xa006c , 0x00 }, + { 0xa006d , 0x00 }, + { 0xa006e , 0x00 }, + { 0xa006f , 0x00 }, + { 0xa0080 , 0x00 }, + { 0xa0081 , 0x00 }, + { 0xa0082 , 0x00 }, + { 0xa0083 , 0x00 }, + { 0xa0084 , 0x10 }, + { 0xa0085 , 0x38 }, + { 0xa0086 , 0x7c }, + { 0xa0087 , 0xfe }, + { 0xa0088 , 0x7c }, + { 0xa0089 , 0x38 }, + { 0xa008a , 0x10 }, + { 0xa008b , 0x00 }, + { 0xa008c , 0x00 }, + { 0xa008d , 0x00 }, + { 0xa008e , 0x00 }, + { 0xa008f , 0x00 }, + { 0xa00a0 , 0x00 }, + { 0xa00a1 , 0x00 }, + { 0xa00a2 , 0x00 }, + { 0xa00a3 , 0x18 }, + { 0xa00a4 , 0x3c }, + { 0xa00a5 , 0x3c }, + { 0xa00a6 , 0xe7 }, + { 0xa00a7 , 0xe7 }, + { 0xa00a8 , 0xe7 }, + { 0xa00a9 , 0x18 }, + { 0xa00aa , 0x18 }, + { 0xa00ab , 0x3c }, + { 0xa00ac , 0x00 }, + { 0xa00ad , 0x00 }, + { 0xa00ae , 0x00 }, + { 0xa00af , 0x00 }, + { 0xa00c0 , 0x00 }, + { 0xa00c1 , 0x00 }, + { 0xa00c2 , 0x00 }, + { 0xa00c3 , 0x18 }, + { 0xa00c4 , 0x3c }, + { 0xa00c5 , 0x7e }, + { 0xa00c6 , 0xff }, + { 0xa00c7 , 0xff }, + { 0xa00c8 , 0x7e }, + { 0xa00c9 , 0x18 }, + { 0xa00ca , 0x18 }, + { 0xa00cb , 0x3c }, + { 0xa00cc , 0x00 }, + { 0xa00cd , 0x00 }, + { 0xa00ce , 0x00 }, + { 0xa00cf , 0x00 }, + { 0xa00e0 , 0x00 }, + { 0xa00e1 , 0x00 }, + { 0xa00e2 , 0x00 }, + { 0xa00e3 , 0x00 }, + { 0xa00e4 , 0x00 }, + { 0xa00e5 , 0x00 }, + { 0xa00e6 , 0x18 }, + { 0xa00e7 , 0x3c }, + { 0xa00e8 , 0x3c }, + { 0xa00e9 , 0x18 }, + { 0xa00ea , 0x00 }, + { 0xa00eb , 0x00 }, + { 0xa00ec , 0x00 }, + { 0xa00ed , 0x00 }, + { 0xa00ee , 0x00 }, + { 0xa00ef , 0x00 }, + { 0xa0100 , 0xff }, + { 0xa0101 , 0xff }, + { 0xa0102 , 0xff }, + { 0xa0103 , 0xff }, + { 0xa0104 , 0xff }, + { 0xa0105 , 0xff }, + { 0xa0106 , 0xe7 }, + { 0xa0107 , 0xc3 }, + { 0xa0108 , 0xc3 }, + { 0xa0109 , 0xe7 }, + { 0xa010a , 0xff }, + { 0xa010b , 0xff }, + { 0xa010c , 0xff }, + { 0xa010d , 0xff }, + { 0xa010e , 0xff }, + { 0xa010f , 0xff }, + { 0xa0120 , 0x00 }, + { 0xa0121 , 0x00 }, + { 0xa0122 , 0x00 }, + { 0xa0123 , 0x00 }, + { 0xa0124 , 0x00 }, + { 0xa0125 , 0x3c }, + { 0xa0126 , 0x66 }, + { 0xa0127 , 0x42 }, + { 0xa0128 , 0x42 }, + { 0xa0129 , 0x66 }, + { 0xa012a , 0x3c }, + { 0xa012b , 0x00 }, + { 0xa012c , 0x00 }, + { 0xa012d , 0x00 }, + { 0xa012e , 0x00 }, + { 0xa012f , 0x00 }, + { 0xa0140 , 0xff }, + { 0xa0141 , 0xff }, + { 0xa0142 , 0xff }, + { 0xa0143 , 0xff }, + { 0xa0144 , 0xff }, + { 0xa0145 , 0xc3 }, + { 0xa0146 , 0x99 }, + { 0xa0147 , 0xbd }, + { 0xa0148 , 0xbd }, + { 0xa0149 , 0x99 }, + { 0xa014a , 0xc3 }, + { 0xa014b , 0xff }, + { 0xa014c , 0xff }, + { 0xa014d , 0xff }, + { 0xa014e , 0xff }, + { 0xa014f , 0xff }, + { 0xa0160 , 0x00 }, + { 0xa0161 , 0x00 }, + { 0xa0162 , 0x1e }, + { 0xa0163 , 0x06 }, + { 0xa0164 , 0x0e }, + { 0xa0165 , 0x1a }, + { 0xa0166 , 0x78 }, + { 0xa0167 , 0xcc }, + { 0xa0168 , 0xcc }, + { 0xa0169 , 0xcc }, + { 0xa016a , 0xcc }, + { 0xa016b , 0x78 }, + { 0xa016c , 0x00 }, + { 0xa016d , 0x00 }, + { 0xa016e , 0x00 }, + { 0xa016f , 0x00 }, + { 0xa0180 , 0x00 }, + { 0xa0181 , 0x00 }, + { 0xa0182 , 0x3c }, + { 0xa0183 , 0x66 }, + { 0xa0184 , 0x66 }, + { 0xa0185 , 0x66 }, + { 0xa0186 , 0x66 }, + { 0xa0187 , 0x3c }, + { 0xa0188 , 0x18 }, + { 0xa0189 , 0x7e }, + { 0xa018a , 0x18 }, + { 0xa018b , 0x18 }, + { 0xa018c , 0x00 }, + { 0xa018d , 0x00 }, + { 0xa018e , 0x00 }, + { 0xa018f , 0x00 }, + { 0xa01a0 , 0x00 }, + { 0xa01a1 , 0x00 }, + { 0xa01a2 , 0x3f }, + { 0xa01a3 , 0x33 }, + { 0xa01a4 , 0x3f }, + { 0xa01a5 , 0x30 }, + { 0xa01a6 , 0x30 }, + { 0xa01a7 , 0x30 }, + { 0xa01a8 , 0x30 }, + { 0xa01a9 , 0x70 }, + { 0xa01aa , 0xf0 }, + { 0xa01ab , 0xe0 }, + { 0xa01ac , 0x00 }, + { 0xa01ad , 0x00 }, + { 0xa01ae , 0x00 }, + { 0xa01af , 0x00 }, + { 0xa01c0 , 0x00 }, + { 0xa01c1 , 0x00 }, + { 0xa01c2 , 0x7f }, + { 0xa01c3 , 0x63 }, + { 0xa01c4 , 0x7f }, + { 0xa01c5 , 0x63 }, + { 0xa01c6 , 0x63 }, + { 0xa01c7 , 0x63 }, + { 0xa01c8 , 0x63 }, + { 0xa01c9 , 0x67 }, + { 0xa01ca , 0xe7 }, + { 0xa01cb , 0xe6 }, + { 0xa01cc , 0xc0 }, + { 0xa01cd , 0x00 }, + { 0xa01ce , 0x00 }, + { 0xa01cf , 0x00 }, + { 0xa01e0 , 0x00 }, + { 0xa01e1 , 0x00 }, + { 0xa01e2 , 0x00 }, + { 0xa01e3 , 0x18 }, + { 0xa01e4 , 0x18 }, + { 0xa01e5 , 0xdb }, + { 0xa01e6 , 0x3c }, + { 0xa01e7 , 0xe7 }, + { 0xa01e8 , 0x3c }, + { 0xa01e9 , 0xdb }, + { 0xa01ea , 0x18 }, + { 0xa01eb , 0x18 }, + { 0xa01ec , 0x00 }, + { 0xa01ed , 0x00 }, + { 0xa01ee , 0x00 }, + { 0xa01ef , 0x00 }, + { 0xa0200 , 0x00 }, + { 0xa0201 , 0x80 }, + { 0xa0202 , 0xc0 }, + { 0xa0203 , 0xe0 }, + { 0xa0204 , 0xf0 }, + { 0xa0205 , 0xf8 }, + { 0xa0206 , 0xfe }, + { 0xa0207 , 0xf8 }, + { 0xa0208 , 0xf0 }, + { 0xa0209 , 0xe0 }, + { 0xa020a , 0xc0 }, + { 0xa020b , 0x80 }, + { 0xa020c , 0x00 }, + { 0xa020d , 0x00 }, + { 0xa020e , 0x00 }, + { 0xa020f , 0x00 }, + { 0xa0220 , 0x00 }, + { 0xa0221 , 0x02 }, + { 0xa0222 , 0x06 }, + { 0xa0223 , 0x0e }, + { 0xa0224 , 0x1e }, + { 0xa0225 , 0x3e }, + { 0xa0226 , 0xfe }, + { 0xa0227 , 0x3e }, + { 0xa0228 , 0x1e }, + { 0xa0229 , 0x0e }, + { 0xa022a , 0x06 }, + { 0xa022b , 0x02 }, + { 0xa022c , 0x00 }, + { 0xa022d , 0x00 }, + { 0xa022e , 0x00 }, + { 0xa022f , 0x00 }, + { 0xa0240 , 0x00 }, + { 0xa0241 , 0x00 }, + { 0xa0242 , 0x18 }, + { 0xa0243 , 0x3c }, + { 0xa0244 , 0x7e }, + { 0xa0245 , 0x18 }, + { 0xa0246 , 0x18 }, + { 0xa0247 , 0x18 }, + { 0xa0248 , 0x7e }, + { 0xa0249 , 0x3c }, + { 0xa024a , 0x18 }, + { 0xa024b , 0x00 }, + { 0xa024c , 0x00 }, + { 0xa024d , 0x00 }, + { 0xa024e , 0x00 }, + { 0xa024f , 0x00 }, + { 0xa0260 , 0x00 }, + { 0xa0261 , 0x00 }, + { 0xa0262 , 0x66 }, + { 0xa0263 , 0x66 }, + { 0xa0264 , 0x66 }, + { 0xa0265 , 0x66 }, + { 0xa0266 , 0x66 }, + { 0xa0267 , 0x66 }, + { 0xa0268 , 0x66 }, + { 0xa0269 , 0x00 }, + { 0xa026a , 0x66 }, + { 0xa026b , 0x66 }, + { 0xa026c , 0x00 }, + { 0xa026d , 0x00 }, + { 0xa026e , 0x00 }, + { 0xa026f , 0x00 }, + { 0xa0280 , 0x00 }, + { 0xa0281 , 0x00 }, + { 0xa0282 , 0x7f }, + { 0xa0283 , 0xdb }, + { 0xa0284 , 0xdb }, + { 0xa0285 , 0xdb }, + { 0xa0286 , 0x7b }, + { 0xa0287 , 0x1b }, + { 0xa0288 , 0x1b }, + { 0xa0289 , 0x1b }, + { 0xa028a , 0x1b }, + { 0xa028b , 0x1b }, + { 0xa028c , 0x00 }, + { 0xa028d , 0x00 }, + { 0xa028e , 0x00 }, + { 0xa028f , 0x00 }, + { 0xa02a0 , 0x00 }, + { 0xa02a1 , 0x7c }, + { 0xa02a2 , 0xc6 }, + { 0xa02a3 , 0x60 }, + { 0xa02a4 , 0x38 }, + { 0xa02a5 , 0x6c }, + { 0xa02a6 , 0xc6 }, + { 0xa02a7 , 0xc6 }, + { 0xa02a8 , 0x6c }, + { 0xa02a9 , 0x38 }, + { 0xa02aa , 0x0c }, + { 0xa02ab , 0xc6 }, + { 0xa02ac , 0x7c }, + { 0xa02ad , 0x00 }, + { 0xa02ae , 0x00 }, + { 0xa02af , 0x00 }, + { 0xa02c0 , 0x00 }, + { 0xa02c1 , 0x00 }, + { 0xa02c2 , 0x00 }, + { 0xa02c3 , 0x00 }, + { 0xa02c4 , 0x00 }, + { 0xa02c5 , 0x00 }, + { 0xa02c6 , 0x00 }, + { 0xa02c7 , 0x00 }, + { 0xa02c8 , 0xfe }, + { 0xa02c9 , 0xfe }, + { 0xa02ca , 0xfe }, + { 0xa02cb , 0xfe }, + { 0xa02cc , 0x00 }, + { 0xa02cd , 0x00 }, + { 0xa02ce , 0x00 }, + { 0xa02cf , 0x00 }, + { 0xa02e0 , 0x00 }, + { 0xa02e1 , 0x00 }, + { 0xa02e2 , 0x18 }, + { 0xa02e3 , 0x3c }, + { 0xa02e4 , 0x7e }, + { 0xa02e5 , 0x18 }, + { 0xa02e6 , 0x18 }, + { 0xa02e7 , 0x18 }, + { 0xa02e8 , 0x7e }, + { 0xa02e9 , 0x3c }, + { 0xa02ea , 0x18 }, + { 0xa02eb , 0x7e }, + { 0xa02ec , 0x00 }, + { 0xa02ed , 0x00 }, + { 0xa02ee , 0x00 }, + { 0xa02ef , 0x00 }, + { 0xa0300 , 0x00 }, + { 0xa0301 , 0x00 }, + { 0xa0302 , 0x18 }, + { 0xa0303 , 0x3c }, + { 0xa0304 , 0x7e }, + { 0xa0305 , 0x18 }, + { 0xa0306 , 0x18 }, + { 0xa0307 , 0x18 }, + { 0xa0308 , 0x18 }, + { 0xa0309 , 0x18 }, + { 0xa030a , 0x18 }, + { 0xa030b , 0x18 }, + { 0xa030c , 0x00 }, + { 0xa030d , 0x00 }, + { 0xa030e , 0x00 }, + { 0xa030f , 0x00 }, + { 0xa0320 , 0x00 }, + { 0xa0321 , 0x00 }, + { 0xa0322 , 0x18 }, + { 0xa0323 , 0x18 }, + { 0xa0324 , 0x18 }, + { 0xa0325 , 0x18 }, + { 0xa0326 , 0x18 }, + { 0xa0327 , 0x18 }, + { 0xa0328 , 0x18 }, + { 0xa0329 , 0x7e }, + { 0xa032a , 0x3c }, + { 0xa032b , 0x18 }, + { 0xa032c , 0x00 }, + { 0xa032d , 0x00 }, + { 0xa032e , 0x00 }, + { 0xa032f , 0x00 }, + { 0xa0340 , 0x00 }, + { 0xa0341 , 0x00 }, + { 0xa0342 , 0x00 }, + { 0xa0343 , 0x00 }, + { 0xa0344 , 0x00 }, + { 0xa0345 , 0x18 }, + { 0xa0346 , 0x0c }, + { 0xa0347 , 0xfe }, + { 0xa0348 , 0x0c }, + { 0xa0349 , 0x18 }, + { 0xa034a , 0x00 }, + { 0xa034b , 0x00 }, + { 0xa034c , 0x00 }, + { 0xa034d , 0x00 }, + { 0xa034e , 0x00 }, + { 0xa034f , 0x00 }, + { 0xa0360 , 0x00 }, + { 0xa0361 , 0x00 }, + { 0xa0362 , 0x00 }, + { 0xa0363 , 0x00 }, + { 0xa0364 , 0x00 }, + { 0xa0365 , 0x30 }, + { 0xa0366 , 0x60 }, + { 0xa0367 , 0xfe }, + { 0xa0368 , 0x60 }, + { 0xa0369 , 0x30 }, + { 0xa036a , 0x00 }, + { 0xa036b , 0x00 }, + { 0xa036c , 0x00 }, + { 0xa036d , 0x00 }, + { 0xa036e , 0x00 }, + { 0xa036f , 0x00 }, + { 0xa0380 , 0x00 }, + { 0xa0381 , 0x00 }, + { 0xa0382 , 0x00 }, + { 0xa0383 , 0x00 }, + { 0xa0384 , 0x00 }, + { 0xa0385 , 0x00 }, + { 0xa0386 , 0xc0 }, + { 0xa0387 , 0xc0 }, + { 0xa0388 , 0xc0 }, + { 0xa0389 , 0xfe }, + { 0xa038a , 0x00 }, + { 0xa038b , 0x00 }, + { 0xa038c , 0x00 }, + { 0xa038d , 0x00 }, + { 0xa038e , 0x00 }, + { 0xa038f , 0x00 }, + { 0xa03a0 , 0x00 }, + { 0xa03a1 , 0x00 }, + { 0xa03a2 , 0x00 }, + { 0xa03a3 , 0x00 }, + { 0xa03a4 , 0x00 }, + { 0xa03a5 , 0x28 }, + { 0xa03a6 , 0x6c }, + { 0xa03a7 , 0xfe }, + { 0xa03a8 , 0x6c }, + { 0xa03a9 , 0x28 }, + { 0xa03aa , 0x00 }, + { 0xa03ab , 0x00 }, + { 0xa03ac , 0x00 }, + { 0xa03ad , 0x00 }, + { 0xa03ae , 0x00 }, + { 0xa03af , 0x00 }, + { 0xa03c0 , 0x00 }, + { 0xa03c1 , 0x00 }, + { 0xa03c2 , 0x00 }, + { 0xa03c3 , 0x00 }, + { 0xa03c4 , 0x10 }, + { 0xa03c5 , 0x38 }, + { 0xa03c6 , 0x38 }, + { 0xa03c7 , 0x7c }, + { 0xa03c8 , 0x7c }, + { 0xa03c9 , 0xfe }, + { 0xa03ca , 0xfe }, + { 0xa03cb , 0x00 }, + { 0xa03cc , 0x00 }, + { 0xa03cd , 0x00 }, + { 0xa03ce , 0x00 }, + { 0xa03cf , 0x00 }, + { 0xa03e0 , 0x00 }, + { 0xa03e1 , 0x00 }, + { 0xa03e2 , 0x00 }, + { 0xa03e3 , 0x00 }, + { 0xa03e4 , 0xfe }, + { 0xa03e5 , 0xfe }, + { 0xa03e6 , 0x7c }, + { 0xa03e7 , 0x7c }, + { 0xa03e8 , 0x38 }, + { 0xa03e9 , 0x38 }, + { 0xa03ea , 0x10 }, + { 0xa03eb , 0x00 }, + { 0xa03ec , 0x00 }, + { 0xa03ed , 0x00 }, + { 0xa03ee , 0x00 }, + { 0xa03ef , 0x00 }, + { 0xa0400 , 0x00 }, + { 0xa0401 , 0x00 }, + { 0xa0402 , 0x00 }, + { 0xa0403 , 0x00 }, + { 0xa0404 , 0x00 }, + { 0xa0405 , 0x00 }, + { 0xa0406 , 0x00 }, + { 0xa0407 , 0x00 }, + { 0xa0408 , 0x00 }, + { 0xa0409 , 0x00 }, + { 0xa040a , 0x00 }, + { 0xa040b , 0x00 }, + { 0xa040c , 0x00 }, + { 0xa040d , 0x00 }, + { 0xa040e , 0x00 }, + { 0xa040f , 0x00 }, + { 0xa0420 , 0x00 }, + { 0xa0421 , 0x00 }, + { 0xa0422 , 0x18 }, + { 0xa0423 , 0x3c }, + { 0xa0424 , 0x3c }, + { 0xa0425 , 0x3c }, + { 0xa0426 , 0x18 }, + { 0xa0427 , 0x18 }, + { 0xa0428 , 0x18 }, + { 0xa0429 , 0x00 }, + { 0xa042a , 0x18 }, + { 0xa042b , 0x18 }, + { 0xa042c , 0x00 }, + { 0xa042d , 0x00 }, + { 0xa042e , 0x00 }, + { 0xa042f , 0x00 }, + { 0xa0440 , 0x00 }, + { 0xa0441 , 0x66 }, + { 0xa0442 , 0x66 }, + { 0xa0443 , 0x66 }, + { 0xa0444 , 0x24 }, + { 0xa0445 , 0x00 }, + { 0xa0446 , 0x00 }, + { 0xa0447 , 0x00 }, + { 0xa0448 , 0x00 }, + { 0xa0449 , 0x00 }, + { 0xa044a , 0x00 }, + { 0xa044b , 0x00 }, + { 0xa044c , 0x00 }, + { 0xa044d , 0x00 }, + { 0xa044e , 0x00 }, + { 0xa044f , 0x00 }, + { 0xa0460 , 0x00 }, + { 0xa0461 , 0x00 }, + { 0xa0462 , 0x00 }, + { 0xa0463 , 0x6c }, + { 0xa0464 , 0x6c }, + { 0xa0465 , 0xfe }, + { 0xa0466 , 0x6c }, + { 0xa0467 , 0x6c }, + { 0xa0468 , 0x6c }, + { 0xa0469 , 0xfe }, + { 0xa046a , 0x6c }, + { 0xa046b , 0x6c }, + { 0xa046c , 0x00 }, + { 0xa046d , 0x00 }, + { 0xa046e , 0x00 }, + { 0xa046f , 0x00 }, + { 0xa0480 , 0x18 }, + { 0xa0481 , 0x18 }, + { 0xa0482 , 0x7c }, + { 0xa0483 , 0xc6 }, + { 0xa0484 , 0xc2 }, + { 0xa0485 , 0xc0 }, + { 0xa0486 , 0x7c }, + { 0xa0487 , 0x06 }, + { 0xa0488 , 0x06 }, + { 0xa0489 , 0x86 }, + { 0xa048a , 0xc6 }, + { 0xa048b , 0x7c }, + { 0xa048c , 0x18 }, + { 0xa048d , 0x18 }, + { 0xa048e , 0x00 }, + { 0xa048f , 0x00 }, + { 0xa04a0 , 0x00 }, + { 0xa04a1 , 0x00 }, + { 0xa04a2 , 0x00 }, + { 0xa04a3 , 0x00 }, + { 0xa04a4 , 0xc2 }, + { 0xa04a5 , 0xc6 }, + { 0xa04a6 , 0x0c }, + { 0xa04a7 , 0x18 }, + { 0xa04a8 , 0x30 }, + { 0xa04a9 , 0x60 }, + { 0xa04aa , 0xc6 }, + { 0xa04ab , 0x86 }, + { 0xa04ac , 0x00 }, + { 0xa04ad , 0x00 }, + { 0xa04ae , 0x00 }, + { 0xa04af , 0x00 }, + { 0xa04c0 , 0x00 }, + { 0xa04c1 , 0x00 }, + { 0xa04c2 , 0x38 }, + { 0xa04c3 , 0x6c }, + { 0xa04c4 , 0x6c }, + { 0xa04c5 , 0x38 }, + { 0xa04c6 , 0x76 }, + { 0xa04c7 , 0xdc }, + { 0xa04c8 , 0xcc }, + { 0xa04c9 , 0xcc }, + { 0xa04ca , 0xcc }, + { 0xa04cb , 0x76 }, + { 0xa04cc , 0x00 }, + { 0xa04cd , 0x00 }, + { 0xa04ce , 0x00 }, + { 0xa04cf , 0x00 }, + { 0xa04e0 , 0x00 }, + { 0xa04e1 , 0x30 }, + { 0xa04e2 , 0x30 }, + { 0xa04e3 , 0x30 }, + { 0xa04e4 , 0x60 }, + { 0xa04e5 , 0x00 }, + { 0xa04e6 , 0x00 }, + { 0xa04e7 , 0x00 }, + { 0xa04e8 , 0x00 }, + { 0xa04e9 , 0x00 }, + { 0xa04ea , 0x00 }, + { 0xa04eb , 0x00 }, + { 0xa04ec , 0x00 }, + { 0xa04ed , 0x00 }, + { 0xa04ee , 0x00 }, + { 0xa04ef , 0x00 }, + { 0xa0500 , 0x00 }, + { 0xa0501 , 0x00 }, + { 0xa0502 , 0x0c }, + { 0xa0503 , 0x18 }, + { 0xa0504 , 0x30 }, + { 0xa0505 , 0x30 }, + { 0xa0506 , 0x30 }, + { 0xa0507 , 0x30 }, + { 0xa0508 , 0x30 }, + { 0xa0509 , 0x30 }, + { 0xa050a , 0x18 }, + { 0xa050b , 0x0c }, + { 0xa050c , 0x00 }, + { 0xa050d , 0x00 }, + { 0xa050e , 0x00 }, + { 0xa050f , 0x00 }, + { 0xa0520 , 0x00 }, + { 0xa0521 , 0x00 }, + { 0xa0522 , 0x30 }, + { 0xa0523 , 0x18 }, + { 0xa0524 , 0x0c }, + { 0xa0525 , 0x0c }, + { 0xa0526 , 0x0c }, + { 0xa0527 , 0x0c }, + { 0xa0528 , 0x0c }, + { 0xa0529 , 0x0c }, + { 0xa052a , 0x18 }, + { 0xa052b , 0x30 }, + { 0xa052c , 0x00 }, + { 0xa052d , 0x00 }, + { 0xa052e , 0x00 }, + { 0xa052f , 0x00 }, + { 0xa0540 , 0x00 }, + { 0xa0541 , 0x00 }, + { 0xa0542 , 0x00 }, + { 0xa0543 , 0x00 }, + { 0xa0544 , 0x00 }, + { 0xa0545 , 0x66 }, + { 0xa0546 , 0x3c }, + { 0xa0547 , 0xff }, + { 0xa0548 , 0x3c }, + { 0xa0549 , 0x66 }, + { 0xa054a , 0x00 }, + { 0xa054b , 0x00 }, + { 0xa054c , 0x00 }, + { 0xa054d , 0x00 }, + { 0xa054e , 0x00 }, + { 0xa054f , 0x00 }, + { 0xa0560 , 0x00 }, + { 0xa0561 , 0x00 }, + { 0xa0562 , 0x00 }, + { 0xa0563 , 0x00 }, + { 0xa0564 , 0x00 }, + { 0xa0565 , 0x18 }, + { 0xa0566 , 0x18 }, + { 0xa0567 , 0x7e }, + { 0xa0568 , 0x18 }, + { 0xa0569 , 0x18 }, + { 0xa056a , 0x00 }, + { 0xa056b , 0x00 }, + { 0xa056c , 0x00 }, + { 0xa056d , 0x00 }, + { 0xa056e , 0x00 }, + { 0xa056f , 0x00 }, + { 0xa0580 , 0x00 }, + { 0xa0581 , 0x00 }, + { 0xa0582 , 0x00 }, + { 0xa0583 , 0x00 }, + { 0xa0584 , 0x00 }, + { 0xa0585 , 0x00 }, + { 0xa0586 , 0x00 }, + { 0xa0587 , 0x00 }, + { 0xa0588 , 0x00 }, + { 0xa0589 , 0x18 }, + { 0xa058a , 0x18 }, + { 0xa058b , 0x18 }, + { 0xa058c , 0x30 }, + { 0xa058d , 0x00 }, + { 0xa058e , 0x00 }, + { 0xa058f , 0x00 }, + { 0xa05a0 , 0x00 }, + { 0xa05a1 , 0x00 }, + { 0xa05a2 , 0x00 }, + { 0xa05a3 , 0x00 }, + { 0xa05a4 , 0x00 }, + { 0xa05a5 , 0x00 }, + { 0xa05a6 , 0x00 }, + { 0xa05a7 , 0xfe }, + { 0xa05a8 , 0x00 }, + { 0xa05a9 , 0x00 }, + { 0xa05aa , 0x00 }, + { 0xa05ab , 0x00 }, + { 0xa05ac , 0x00 }, + { 0xa05ad , 0x00 }, + { 0xa05ae , 0x00 }, + { 0xa05af , 0x00 }, + { 0xa05c0 , 0x00 }, + { 0xa05c1 , 0x00 }, + { 0xa05c2 , 0x00 }, + { 0xa05c3 , 0x00 }, + { 0xa05c4 , 0x00 }, + { 0xa05c5 , 0x00 }, + { 0xa05c6 , 0x00 }, + { 0xa05c7 , 0x00 }, + { 0xa05c8 , 0x00 }, + { 0xa05c9 , 0x00 }, + { 0xa05ca , 0x18 }, + { 0xa05cb , 0x18 }, + { 0xa05cc , 0x00 }, + { 0xa05cd , 0x00 }, + { 0xa05ce , 0x00 }, + { 0xa05cf , 0x00 }, + { 0xa05e0 , 0x00 }, + { 0xa05e1 , 0x00 }, + { 0xa05e2 , 0x00 }, + { 0xa05e3 , 0x00 }, + { 0xa05e4 , 0x02 }, + { 0xa05e5 , 0x06 }, + { 0xa05e6 , 0x0c }, + { 0xa05e7 , 0x18 }, + { 0xa05e8 , 0x30 }, + { 0xa05e9 , 0x60 }, + { 0xa05ea , 0xc0 }, + { 0xa05eb , 0x80 }, + { 0xa05ec , 0x00 }, + { 0xa05ed , 0x00 }, + { 0xa05ee , 0x00 }, + { 0xa05ef , 0x00 }, + { 0xa0600 , 0x00 }, + { 0xa0601 , 0x00 }, + { 0xa0602 , 0x38 }, + { 0xa0603 , 0x6c }, + { 0xa0604 , 0xc6 }, + { 0xa0605 , 0xc6 }, + { 0xa0606 , 0xd6 }, + { 0xa0607 , 0xd6 }, + { 0xa0608 , 0xc6 }, + { 0xa0609 , 0xc6 }, + { 0xa060a , 0x6c }, + { 0xa060b , 0x38 }, + { 0xa060c , 0x00 }, + { 0xa060d , 0x00 }, + { 0xa060e , 0x00 }, + { 0xa060f , 0x00 }, + { 0xa0620 , 0x00 }, + { 0xa0621 , 0x00 }, + { 0xa0622 , 0x18 }, + { 0xa0623 , 0x38 }, + { 0xa0624 , 0x78 }, + { 0xa0625 , 0x18 }, + { 0xa0626 , 0x18 }, + { 0xa0627 , 0x18 }, + { 0xa0628 , 0x18 }, + { 0xa0629 , 0x18 }, + { 0xa062a , 0x18 }, + { 0xa062b , 0x7e }, + { 0xa062c , 0x00 }, + { 0xa062d , 0x00 }, + { 0xa062e , 0x00 }, + { 0xa062f , 0x00 }, + { 0xa0640 , 0x00 }, + { 0xa0641 , 0x00 }, + { 0xa0642 , 0x7c }, + { 0xa0643 , 0xc6 }, + { 0xa0644 , 0x06 }, + { 0xa0645 , 0x0c }, + { 0xa0646 , 0x18 }, + { 0xa0647 , 0x30 }, + { 0xa0648 , 0x60 }, + { 0xa0649 , 0xc0 }, + { 0xa064a , 0xc6 }, + { 0xa064b , 0xfe }, + { 0xa064c , 0x00 }, + { 0xa064d , 0x00 }, + { 0xa064e , 0x00 }, + { 0xa064f , 0x00 }, + { 0xa0660 , 0x00 }, + { 0xa0661 , 0x00 }, + { 0xa0662 , 0x7c }, + { 0xa0663 , 0xc6 }, + { 0xa0664 , 0x06 }, + { 0xa0665 , 0x06 }, + { 0xa0666 , 0x3c }, + { 0xa0667 , 0x06 }, + { 0xa0668 , 0x06 }, + { 0xa0669 , 0x06 }, + { 0xa066a , 0xc6 }, + { 0xa066b , 0x7c }, + { 0xa066c , 0x00 }, + { 0xa066d , 0x00 }, + { 0xa066e , 0x00 }, + { 0xa066f , 0x00 }, + { 0xa0680 , 0x00 }, + { 0xa0681 , 0x00 }, + { 0xa0682 , 0x0c }, + { 0xa0683 , 0x1c }, + { 0xa0684 , 0x3c }, + { 0xa0685 , 0x6c }, + { 0xa0686 , 0xcc }, + { 0xa0687 , 0xfe }, + { 0xa0688 , 0x0c }, + { 0xa0689 , 0x0c }, + { 0xa068a , 0x0c }, + { 0xa068b , 0x1e }, + { 0xa068c , 0x00 }, + { 0xa068d , 0x00 }, + { 0xa068e , 0x00 }, + { 0xa068f , 0x00 }, + { 0xa06a0 , 0x00 }, + { 0xa06a1 , 0x00 }, + { 0xa06a2 , 0xfe }, + { 0xa06a3 , 0xc0 }, + { 0xa06a4 , 0xc0 }, + { 0xa06a5 , 0xc0 }, + { 0xa06a6 , 0xfc }, + { 0xa06a7 , 0x06 }, + { 0xa06a8 , 0x06 }, + { 0xa06a9 , 0x06 }, + { 0xa06aa , 0xc6 }, + { 0xa06ab , 0x7c }, + { 0xa06ac , 0x00 }, + { 0xa06ad , 0x00 }, + { 0xa06ae , 0x00 }, + { 0xa06af , 0x00 }, + { 0xa06c0 , 0x00 }, + { 0xa06c1 , 0x00 }, + { 0xa06c2 , 0x38 }, + { 0xa06c3 , 0x60 }, + { 0xa06c4 , 0xc0 }, + { 0xa06c5 , 0xc0 }, + { 0xa06c6 , 0xfc }, + { 0xa06c7 , 0xc6 }, + { 0xa06c8 , 0xc6 }, + { 0xa06c9 , 0xc6 }, + { 0xa06ca , 0xc6 }, + { 0xa06cb , 0x7c }, + { 0xa06cc , 0x00 }, + { 0xa06cd , 0x00 }, + { 0xa06ce , 0x00 }, + { 0xa06cf , 0x00 }, + { 0xa06e0 , 0x00 }, + { 0xa06e1 , 0x00 }, + { 0xa06e2 , 0xfe }, + { 0xa06e3 , 0xc6 }, + { 0xa06e4 , 0x06 }, + { 0xa06e5 , 0x06 }, + { 0xa06e6 , 0x0c }, + { 0xa06e7 , 0x18 }, + { 0xa06e8 , 0x30 }, + { 0xa06e9 , 0x30 }, + { 0xa06ea , 0x30 }, + { 0xa06eb , 0x30 }, + { 0xa06ec , 0x00 }, + { 0xa06ed , 0x00 }, + { 0xa06ee , 0x00 }, + { 0xa06ef , 0x00 }, + { 0xa0700 , 0x00 }, + { 0xa0701 , 0x00 }, + { 0xa0702 , 0x7c }, + { 0xa0703 , 0xc6 }, + { 0xa0704 , 0xc6 }, + { 0xa0705 , 0xc6 }, + { 0xa0706 , 0x7c }, + { 0xa0707 , 0xc6 }, + { 0xa0708 , 0xc6 }, + { 0xa0709 , 0xc6 }, + { 0xa070a , 0xc6 }, + { 0xa070b , 0x7c }, + { 0xa070c , 0x00 }, + { 0xa070d , 0x00 }, + { 0xa070e , 0x00 }, + { 0xa070f , 0x00 }, + { 0xa0720 , 0x00 }, + { 0xa0721 , 0x00 }, + { 0xa0722 , 0x7c }, + { 0xa0723 , 0xc6 }, + { 0xa0724 , 0xc6 }, + { 0xa0725 , 0xc6 }, + { 0xa0726 , 0x7e }, + { 0xa0727 , 0x06 }, + { 0xa0728 , 0x06 }, + { 0xa0729 , 0x06 }, + { 0xa072a , 0x0c }, + { 0xa072b , 0x78 }, + { 0xa072c , 0x00 }, + { 0xa072d , 0x00 }, + { 0xa072e , 0x00 }, + { 0xa072f , 0x00 }, + { 0xa0740 , 0x00 }, + { 0xa0741 , 0x00 }, + { 0xa0742 , 0x00 }, + { 0xa0743 , 0x00 }, + { 0xa0744 , 0x18 }, + { 0xa0745 , 0x18 }, + { 0xa0746 , 0x00 }, + { 0xa0747 , 0x00 }, + { 0xa0748 , 0x00 }, + { 0xa0749 , 0x18 }, + { 0xa074a , 0x18 }, + { 0xa074b , 0x00 }, + { 0xa074c , 0x00 }, + { 0xa074d , 0x00 }, + { 0xa074e , 0x00 }, + { 0xa074f , 0x00 }, + { 0xa0760 , 0x00 }, + { 0xa0761 , 0x00 }, + { 0xa0762 , 0x00 }, + { 0xa0763 , 0x00 }, + { 0xa0764 , 0x18 }, + { 0xa0765 , 0x18 }, + { 0xa0766 , 0x00 }, + { 0xa0767 , 0x00 }, + { 0xa0768 , 0x00 }, + { 0xa0769 , 0x18 }, + { 0xa076a , 0x18 }, + { 0xa076b , 0x30 }, + { 0xa076c , 0x00 }, + { 0xa076d , 0x00 }, + { 0xa076e , 0x00 }, + { 0xa076f , 0x00 }, + { 0xa0780 , 0x00 }, + { 0xa0781 , 0x00 }, + { 0xa0782 , 0x00 }, + { 0xa0783 , 0x06 }, + { 0xa0784 , 0x0c }, + { 0xa0785 , 0x18 }, + { 0xa0786 , 0x30 }, + { 0xa0787 , 0x60 }, + { 0xa0788 , 0x30 }, + { 0xa0789 , 0x18 }, + { 0xa078a , 0x0c }, + { 0xa078b , 0x06 }, + { 0xa078c , 0x00 }, + { 0xa078d , 0x00 }, + { 0xa078e , 0x00 }, + { 0xa078f , 0x00 }, + { 0xa07a0 , 0x00 }, + { 0xa07a1 , 0x00 }, + { 0xa07a2 , 0x00 }, + { 0xa07a3 , 0x00 }, + { 0xa07a4 , 0x00 }, + { 0xa07a5 , 0x7e }, + { 0xa07a6 , 0x00 }, + { 0xa07a7 , 0x00 }, + { 0xa07a8 , 0x7e }, + { 0xa07a9 , 0x00 }, + { 0xa07aa , 0x00 }, + { 0xa07ab , 0x00 }, + { 0xa07ac , 0x00 }, + { 0xa07ad , 0x00 }, + { 0xa07ae , 0x00 }, + { 0xa07af , 0x00 }, + { 0xa07c0 , 0x00 }, + { 0xa07c1 , 0x00 }, + { 0xa07c2 , 0x00 }, + { 0xa07c3 , 0x60 }, + { 0xa07c4 , 0x30 }, + { 0xa07c5 , 0x18 }, + { 0xa07c6 , 0x0c }, + { 0xa07c7 , 0x06 }, + { 0xa07c8 , 0x0c }, + { 0xa07c9 , 0x18 }, + { 0xa07ca , 0x30 }, + { 0xa07cb , 0x60 }, + { 0xa07cc , 0x00 }, + { 0xa07cd , 0x00 }, + { 0xa07ce , 0x00 }, + { 0xa07cf , 0x00 }, + { 0xa07e0 , 0x00 }, + { 0xa07e1 , 0x00 }, + { 0xa07e2 , 0x7c }, + { 0xa07e3 , 0xc6 }, + { 0xa07e4 , 0xc6 }, + { 0xa07e5 , 0x0c }, + { 0xa07e6 , 0x18 }, + { 0xa07e7 , 0x18 }, + { 0xa07e8 , 0x18 }, + { 0xa07e9 , 0x00 }, + { 0xa07ea , 0x18 }, + { 0xa07eb , 0x18 }, + { 0xa07ec , 0x00 }, + { 0xa07ed , 0x00 }, + { 0xa07ee , 0x00 }, + { 0xa07ef , 0x00 }, + { 0xa0800 , 0x00 }, + { 0xa0801 , 0x00 }, + { 0xa0802 , 0x00 }, + { 0xa0803 , 0x7c }, + { 0xa0804 , 0xc6 }, + { 0xa0805 , 0xc6 }, + { 0xa0806 , 0xde }, + { 0xa0807 , 0xde }, + { 0xa0808 , 0xde }, + { 0xa0809 , 0xdc }, + { 0xa080a , 0xc0 }, + { 0xa080b , 0x7c }, + { 0xa080c , 0x00 }, + { 0xa080d , 0x00 }, + { 0xa080e , 0x00 }, + { 0xa080f , 0x00 }, + { 0xa0820 , 0x00 }, + { 0xa0821 , 0x00 }, + { 0xa0822 , 0x10 }, + { 0xa0823 , 0x38 }, + { 0xa0824 , 0x6c }, + { 0xa0825 , 0xc6 }, + { 0xa0826 , 0xc6 }, + { 0xa0827 , 0xfe }, + { 0xa0828 , 0xc6 }, + { 0xa0829 , 0xc6 }, + { 0xa082a , 0xc6 }, + { 0xa082b , 0xc6 }, + { 0xa082c , 0x00 }, + { 0xa082d , 0x00 }, + { 0xa082e , 0x00 }, + { 0xa082f , 0x00 }, + { 0xa0840 , 0x00 }, + { 0xa0841 , 0x00 }, + { 0xa0842 , 0xfc }, + { 0xa0843 , 0x66 }, + { 0xa0844 , 0x66 }, + { 0xa0845 , 0x66 }, + { 0xa0846 , 0x7c }, + { 0xa0847 , 0x66 }, + { 0xa0848 , 0x66 }, + { 0xa0849 , 0x66 }, + { 0xa084a , 0x66 }, + { 0xa084b , 0xfc }, + { 0xa084c , 0x00 }, + { 0xa084d , 0x00 }, + { 0xa084e , 0x00 }, + { 0xa084f , 0x00 }, + { 0xa0860 , 0x00 }, + { 0xa0861 , 0x00 }, + { 0xa0862 , 0x3c }, + { 0xa0863 , 0x66 }, + { 0xa0864 , 0xc2 }, + { 0xa0865 , 0xc0 }, + { 0xa0866 , 0xc0 }, + { 0xa0867 , 0xc0 }, + { 0xa0868 , 0xc0 }, + { 0xa0869 , 0xc2 }, + { 0xa086a , 0x66 }, + { 0xa086b , 0x3c }, + { 0xa086c , 0x00 }, + { 0xa086d , 0x00 }, + { 0xa086e , 0x00 }, + { 0xa086f , 0x00 }, + { 0xa0880 , 0x00 }, + { 0xa0881 , 0x00 }, + { 0xa0882 , 0xf8 }, + { 0xa0883 , 0x6c }, + { 0xa0884 , 0x66 }, + { 0xa0885 , 0x66 }, + { 0xa0886 , 0x66 }, + { 0xa0887 , 0x66 }, + { 0xa0888 , 0x66 }, + { 0xa0889 , 0x66 }, + { 0xa088a , 0x6c }, + { 0xa088b , 0xf8 }, + { 0xa088c , 0x00 }, + { 0xa088d , 0x00 }, + { 0xa088e , 0x00 }, + { 0xa088f , 0x00 }, + { 0xa08a0 , 0x00 }, + { 0xa08a1 , 0x00 }, + { 0xa08a2 , 0xfe }, + { 0xa08a3 , 0x66 }, + { 0xa08a4 , 0x62 }, + { 0xa08a5 , 0x68 }, + { 0xa08a6 , 0x78 }, + { 0xa08a7 , 0x68 }, + { 0xa08a8 , 0x60 }, + { 0xa08a9 , 0x62 }, + { 0xa08aa , 0x66 }, + { 0xa08ab , 0xfe }, + { 0xa08ac , 0x00 }, + { 0xa08ad , 0x00 }, + { 0xa08ae , 0x00 }, + { 0xa08af , 0x00 }, + { 0xa08c0 , 0x00 }, + { 0xa08c1 , 0x00 }, + { 0xa08c2 , 0xfe }, + { 0xa08c3 , 0x66 }, + { 0xa08c4 , 0x62 }, + { 0xa08c5 , 0x68 }, + { 0xa08c6 , 0x78 }, + { 0xa08c7 , 0x68 }, + { 0xa08c8 , 0x60 }, + { 0xa08c9 , 0x60 }, + { 0xa08ca , 0x60 }, + { 0xa08cb , 0xf0 }, + { 0xa08cc , 0x00 }, + { 0xa08cd , 0x00 }, + { 0xa08ce , 0x00 }, + { 0xa08cf , 0x00 }, + { 0xa08e0 , 0x00 }, + { 0xa08e1 , 0x00 }, + { 0xa08e2 , 0x3c }, + { 0xa08e3 , 0x66 }, + { 0xa08e4 , 0xc2 }, + { 0xa08e5 , 0xc0 }, + { 0xa08e6 , 0xc0 }, + { 0xa08e7 , 0xde }, + { 0xa08e8 , 0xc6 }, + { 0xa08e9 , 0xc6 }, + { 0xa08ea , 0x66 }, + { 0xa08eb , 0x3a }, + { 0xa08ec , 0x00 }, + { 0xa08ed , 0x00 }, + { 0xa08ee , 0x00 }, + { 0xa08ef , 0x00 }, + { 0xa0900 , 0x00 }, + { 0xa0901 , 0x00 }, + { 0xa0902 , 0xc6 }, + { 0xa0903 , 0xc6 }, + { 0xa0904 , 0xc6 }, + { 0xa0905 , 0xc6 }, + { 0xa0906 , 0xfe }, + { 0xa0907 , 0xc6 }, + { 0xa0908 , 0xc6 }, + { 0xa0909 , 0xc6 }, + { 0xa090a , 0xc6 }, + { 0xa090b , 0xc6 }, + { 0xa090c , 0x00 }, + { 0xa090d , 0x00 }, + { 0xa090e , 0x00 }, + { 0xa090f , 0x00 }, + { 0xa0920 , 0x00 }, + { 0xa0921 , 0x00 }, + { 0xa0922 , 0x3c }, + { 0xa0923 , 0x18 }, + { 0xa0924 , 0x18 }, + { 0xa0925 , 0x18 }, + { 0xa0926 , 0x18 }, + { 0xa0927 , 0x18 }, + { 0xa0928 , 0x18 }, + { 0xa0929 , 0x18 }, + { 0xa092a , 0x18 }, + { 0xa092b , 0x3c }, + { 0xa092c , 0x00 }, + { 0xa092d , 0x00 }, + { 0xa092e , 0x00 }, + { 0xa092f , 0x00 }, + { 0xa0940 , 0x00 }, + { 0xa0941 , 0x00 }, + { 0xa0942 , 0x1e }, + { 0xa0943 , 0x0c }, + { 0xa0944 , 0x0c }, + { 0xa0945 , 0x0c }, + { 0xa0946 , 0x0c }, + { 0xa0947 , 0x0c }, + { 0xa0948 , 0xcc }, + { 0xa0949 , 0xcc }, + { 0xa094a , 0xcc }, + { 0xa094b , 0x78 }, + { 0xa094c , 0x00 }, + { 0xa094d , 0x00 }, + { 0xa094e , 0x00 }, + { 0xa094f , 0x00 }, + { 0xa0960 , 0x00 }, + { 0xa0961 , 0x00 }, + { 0xa0962 , 0xe6 }, + { 0xa0963 , 0x66 }, + { 0xa0964 , 0x66 }, + { 0xa0965 , 0x6c }, + { 0xa0966 , 0x78 }, + { 0xa0967 , 0x78 }, + { 0xa0968 , 0x6c }, + { 0xa0969 , 0x66 }, + { 0xa096a , 0x66 }, + { 0xa096b , 0xe6 }, + { 0xa096c , 0x00 }, + { 0xa096d , 0x00 }, + { 0xa096e , 0x00 }, + { 0xa096f , 0x00 }, + { 0xa0980 , 0x00 }, + { 0xa0981 , 0x00 }, + { 0xa0982 , 0xf0 }, + { 0xa0983 , 0x60 }, + { 0xa0984 , 0x60 }, + { 0xa0985 , 0x60 }, + { 0xa0986 , 0x60 }, + { 0xa0987 , 0x60 }, + { 0xa0988 , 0x60 }, + { 0xa0989 , 0x62 }, + { 0xa098a , 0x66 }, + { 0xa098b , 0xfe }, + { 0xa098c , 0x00 }, + { 0xa098d , 0x00 }, + { 0xa098e , 0x00 }, + { 0xa098f , 0x00 }, + { 0xa09a0 , 0x00 }, + { 0xa09a1 , 0x00 }, + { 0xa09a2 , 0xc6 }, + { 0xa09a3 , 0xee }, + { 0xa09a4 , 0xfe }, + { 0xa09a5 , 0xfe }, + { 0xa09a6 , 0xd6 }, + { 0xa09a7 , 0xc6 }, + { 0xa09a8 , 0xc6 }, + { 0xa09a9 , 0xc6 }, + { 0xa09aa , 0xc6 }, + { 0xa09ab , 0xc6 }, + { 0xa09ac , 0x00 }, + { 0xa09ad , 0x00 }, + { 0xa09ae , 0x00 }, + { 0xa09af , 0x00 }, + { 0xa09c0 , 0x00 }, + { 0xa09c1 , 0x00 }, + { 0xa09c2 , 0xc6 }, + { 0xa09c3 , 0xe6 }, + { 0xa09c4 , 0xf6 }, + { 0xa09c5 , 0xfe }, + { 0xa09c6 , 0xde }, + { 0xa09c7 , 0xce }, + { 0xa09c8 , 0xc6 }, + { 0xa09c9 , 0xc6 }, + { 0xa09ca , 0xc6 }, + { 0xa09cb , 0xc6 }, + { 0xa09cc , 0x00 }, + { 0xa09cd , 0x00 }, + { 0xa09ce , 0x00 }, + { 0xa09cf , 0x00 }, + { 0xa09e0 , 0x00 }, + { 0xa09e1 , 0x00 }, + { 0xa09e2 , 0x7c }, + { 0xa09e3 , 0xc6 }, + { 0xa09e4 , 0xc6 }, + { 0xa09e5 , 0xc6 }, + { 0xa09e6 , 0xc6 }, + { 0xa09e7 , 0xc6 }, + { 0xa09e8 , 0xc6 }, + { 0xa09e9 , 0xc6 }, + { 0xa09ea , 0xc6 }, + { 0xa09eb , 0x7c }, + { 0xa09ec , 0x00 }, + { 0xa09ed , 0x00 }, + { 0xa09ee , 0x00 }, + { 0xa09ef , 0x00 }, + { 0xa0a00 , 0x00 }, + { 0xa0a01 , 0x00 }, + { 0xa0a02 , 0xfc }, + { 0xa0a03 , 0x66 }, + { 0xa0a04 , 0x66 }, + { 0xa0a05 , 0x66 }, + { 0xa0a06 , 0x7c }, + { 0xa0a07 , 0x60 }, + { 0xa0a08 , 0x60 }, + { 0xa0a09 , 0x60 }, + { 0xa0a0a , 0x60 }, + { 0xa0a0b , 0xf0 }, + { 0xa0a0c , 0x00 }, + { 0xa0a0d , 0x00 }, + { 0xa0a0e , 0x00 }, + { 0xa0a0f , 0x00 }, + { 0xa0a20 , 0x00 }, + { 0xa0a21 , 0x00 }, + { 0xa0a22 , 0x7c }, + { 0xa0a23 , 0xc6 }, + { 0xa0a24 , 0xc6 }, + { 0xa0a25 , 0xc6 }, + { 0xa0a26 , 0xc6 }, + { 0xa0a27 , 0xc6 }, + { 0xa0a28 , 0xc6 }, + { 0xa0a29 , 0xd6 }, + { 0xa0a2a , 0xde }, + { 0xa0a2b , 0x7c }, + { 0xa0a2c , 0x0c }, + { 0xa0a2d , 0x0e }, + { 0xa0a2e , 0x00 }, + { 0xa0a2f , 0x00 }, + { 0xa0a40 , 0x00 }, + { 0xa0a41 , 0x00 }, + { 0xa0a42 , 0xfc }, + { 0xa0a43 , 0x66 }, + { 0xa0a44 , 0x66 }, + { 0xa0a45 , 0x66 }, + { 0xa0a46 , 0x7c }, + { 0xa0a47 , 0x6c }, + { 0xa0a48 , 0x66 }, + { 0xa0a49 , 0x66 }, + { 0xa0a4a , 0x66 }, + { 0xa0a4b , 0xe6 }, + { 0xa0a4c , 0x00 }, + { 0xa0a4d , 0x00 }, + { 0xa0a4e , 0x00 }, + { 0xa0a4f , 0x00 }, + { 0xa0a60 , 0x00 }, + { 0xa0a61 , 0x00 }, + { 0xa0a62 , 0x7c }, + { 0xa0a63 , 0xc6 }, + { 0xa0a64 , 0xc6 }, + { 0xa0a65 , 0x60 }, + { 0xa0a66 , 0x38 }, + { 0xa0a67 , 0x0c }, + { 0xa0a68 , 0x06 }, + { 0xa0a69 , 0xc6 }, + { 0xa0a6a , 0xc6 }, + { 0xa0a6b , 0x7c }, + { 0xa0a6c , 0x00 }, + { 0xa0a6d , 0x00 }, + { 0xa0a6e , 0x00 }, + { 0xa0a6f , 0x00 }, + { 0xa0a80 , 0x00 }, + { 0xa0a81 , 0x00 }, + { 0xa0a82 , 0x7e }, + { 0xa0a83 , 0x7e }, + { 0xa0a84 , 0x5a }, + { 0xa0a85 , 0x18 }, + { 0xa0a86 , 0x18 }, + { 0xa0a87 , 0x18 }, + { 0xa0a88 , 0x18 }, + { 0xa0a89 , 0x18 }, + { 0xa0a8a , 0x18 }, + { 0xa0a8b , 0x3c }, + { 0xa0a8c , 0x00 }, + { 0xa0a8d , 0x00 }, + { 0xa0a8e , 0x00 }, + { 0xa0a8f , 0x00 }, + { 0xa0aa0 , 0x00 }, + { 0xa0aa1 , 0x00 }, + { 0xa0aa2 , 0xc6 }, + { 0xa0aa3 , 0xc6 }, + { 0xa0aa4 , 0xc6 }, + { 0xa0aa5 , 0xc6 }, + { 0xa0aa6 , 0xc6 }, + { 0xa0aa7 , 0xc6 }, + { 0xa0aa8 , 0xc6 }, + { 0xa0aa9 , 0xc6 }, + { 0xa0aaa , 0xc6 }, + { 0xa0aab , 0x7c }, + { 0xa0aac , 0x00 }, + { 0xa0aad , 0x00 }, + { 0xa0aae , 0x00 }, + { 0xa0aaf , 0x00 }, + { 0xa0ac0 , 0x00 }, + { 0xa0ac1 , 0x00 }, + { 0xa0ac2 , 0xc6 }, + { 0xa0ac3 , 0xc6 }, + { 0xa0ac4 , 0xc6 }, + { 0xa0ac5 , 0xc6 }, + { 0xa0ac6 , 0xc6 }, + { 0xa0ac7 , 0xc6 }, + { 0xa0ac8 , 0xc6 }, + { 0xa0ac9 , 0x6c }, + { 0xa0aca , 0x38 }, + { 0xa0acb , 0x10 }, + { 0xa0acc , 0x00 }, + { 0xa0acd , 0x00 }, + { 0xa0ace , 0x00 }, + { 0xa0acf , 0x00 }, + { 0xa0ae0 , 0x00 }, + { 0xa0ae1 , 0x00 }, + { 0xa0ae2 , 0xc6 }, + { 0xa0ae3 , 0xc6 }, + { 0xa0ae4 , 0xc6 }, + { 0xa0ae5 , 0xc6 }, + { 0xa0ae6 , 0xd6 }, + { 0xa0ae7 , 0xd6 }, + { 0xa0ae8 , 0xd6 }, + { 0xa0ae9 , 0xfe }, + { 0xa0aea , 0xee }, + { 0xa0aeb , 0x6c }, + { 0xa0aec , 0x00 }, + { 0xa0aed , 0x00 }, + { 0xa0aee , 0x00 }, + { 0xa0aef , 0x00 }, + { 0xa0b00 , 0x00 }, + { 0xa0b01 , 0x00 }, + { 0xa0b02 , 0xc6 }, + { 0xa0b03 , 0xc6 }, + { 0xa0b04 , 0x6c }, + { 0xa0b05 , 0x7c }, + { 0xa0b06 , 0x38 }, + { 0xa0b07 , 0x38 }, + { 0xa0b08 , 0x7c }, + { 0xa0b09 , 0x6c }, + { 0xa0b0a , 0xc6 }, + { 0xa0b0b , 0xc6 }, + { 0xa0b0c , 0x00 }, + { 0xa0b0d , 0x00 }, + { 0xa0b0e , 0x00 }, + { 0xa0b0f , 0x00 }, + { 0xa0b20 , 0x00 }, + { 0xa0b21 , 0x00 }, + { 0xa0b22 , 0x66 }, + { 0xa0b23 , 0x66 }, + { 0xa0b24 , 0x66 }, + { 0xa0b25 , 0x66 }, + { 0xa0b26 , 0x3c }, + { 0xa0b27 , 0x18 }, + { 0xa0b28 , 0x18 }, + { 0xa0b29 , 0x18 }, + { 0xa0b2a , 0x18 }, + { 0xa0b2b , 0x3c }, + { 0xa0b2c , 0x00 }, + { 0xa0b2d , 0x00 }, + { 0xa0b2e , 0x00 }, + { 0xa0b2f , 0x00 }, + { 0xa0b40 , 0x00 }, + { 0xa0b41 , 0x00 }, + { 0xa0b42 , 0xfe }, + { 0xa0b43 , 0xc6 }, + { 0xa0b44 , 0x86 }, + { 0xa0b45 , 0x0c }, + { 0xa0b46 , 0x18 }, + { 0xa0b47 , 0x30 }, + { 0xa0b48 , 0x60 }, + { 0xa0b49 , 0xc2 }, + { 0xa0b4a , 0xc6 }, + { 0xa0b4b , 0xfe }, + { 0xa0b4c , 0x00 }, + { 0xa0b4d , 0x00 }, + { 0xa0b4e , 0x00 }, + { 0xa0b4f , 0x00 }, + { 0xa0b60 , 0x00 }, + { 0xa0b61 , 0x00 }, + { 0xa0b62 , 0x3c }, + { 0xa0b63 , 0x30 }, + { 0xa0b64 , 0x30 }, + { 0xa0b65 , 0x30 }, + { 0xa0b66 , 0x30 }, + { 0xa0b67 , 0x30 }, + { 0xa0b68 , 0x30 }, + { 0xa0b69 , 0x30 }, + { 0xa0b6a , 0x30 }, + { 0xa0b6b , 0x3c }, + { 0xa0b6c , 0x00 }, + { 0xa0b6d , 0x00 }, + { 0xa0b6e , 0x00 }, + { 0xa0b6f , 0x00 }, + { 0xa0b80 , 0x00 }, + { 0xa0b81 , 0x00 }, + { 0xa0b82 , 0x00 }, + { 0xa0b83 , 0x80 }, + { 0xa0b84 , 0xc0 }, + { 0xa0b85 , 0xe0 }, + { 0xa0b86 , 0x70 }, + { 0xa0b87 , 0x38 }, + { 0xa0b88 , 0x1c }, + { 0xa0b89 , 0x0e }, + { 0xa0b8a , 0x06 }, + { 0xa0b8b , 0x02 }, + { 0xa0b8c , 0x00 }, + { 0xa0b8d , 0x00 }, + { 0xa0b8e , 0x00 }, + { 0xa0b8f , 0x00 }, + { 0xa0ba0 , 0x00 }, + { 0xa0ba1 , 0x00 }, + { 0xa0ba2 , 0x3c }, + { 0xa0ba3 , 0x0c }, + { 0xa0ba4 , 0x0c }, + { 0xa0ba5 , 0x0c }, + { 0xa0ba6 , 0x0c }, + { 0xa0ba7 , 0x0c }, + { 0xa0ba8 , 0x0c }, + { 0xa0ba9 , 0x0c }, + { 0xa0baa , 0x0c }, + { 0xa0bab , 0x3c }, + { 0xa0bac , 0x00 }, + { 0xa0bad , 0x00 }, + { 0xa0bae , 0x00 }, + { 0xa0baf , 0x00 }, + { 0xa0bc0 , 0x10 }, + { 0xa0bc1 , 0x38 }, + { 0xa0bc2 , 0x6c }, + { 0xa0bc3 , 0xc6 }, + { 0xa0bc4 , 0x00 }, + { 0xa0bc5 , 0x00 }, + { 0xa0bc6 , 0x00 }, + { 0xa0bc7 , 0x00 }, + { 0xa0bc8 , 0x00 }, + { 0xa0bc9 , 0x00 }, + { 0xa0bca , 0x00 }, + { 0xa0bcb , 0x00 }, + { 0xa0bcc , 0x00 }, + { 0xa0bcd , 0x00 }, + { 0xa0bce , 0x00 }, + { 0xa0bcf , 0x00 }, + { 0xa0be0 , 0x00 }, + { 0xa0be1 , 0x00 }, + { 0xa0be2 , 0x00 }, + { 0xa0be3 , 0x00 }, + { 0xa0be4 , 0x00 }, + { 0xa0be5 , 0x00 }, + { 0xa0be6 , 0x00 }, + { 0xa0be7 , 0x00 }, + { 0xa0be8 , 0x00 }, + { 0xa0be9 , 0x00 }, + { 0xa0bea , 0x00 }, + { 0xa0beb , 0x00 }, + { 0xa0bec , 0x00 }, + { 0xa0bed , 0xff }, + { 0xa0bee , 0x00 }, + { 0xa0bef , 0x00 }, + { 0xa0c00 , 0x30 }, + { 0xa0c01 , 0x30 }, + { 0xa0c02 , 0x18 }, + { 0xa0c03 , 0x00 }, + { 0xa0c04 , 0x00 }, + { 0xa0c05 , 0x00 }, + { 0xa0c06 , 0x00 }, + { 0xa0c07 , 0x00 }, + { 0xa0c08 , 0x00 }, + { 0xa0c09 , 0x00 }, + { 0xa0c0a , 0x00 }, + { 0xa0c0b , 0x00 }, + { 0xa0c0c , 0x00 }, + { 0xa0c0d , 0x00 }, + { 0xa0c0e , 0x00 }, + { 0xa0c0f , 0x00 }, + { 0xa0c20 , 0x00 }, + { 0xa0c21 , 0x00 }, + { 0xa0c22 , 0x00 }, + { 0xa0c23 , 0x00 }, + { 0xa0c24 , 0x00 }, + { 0xa0c25 , 0x78 }, + { 0xa0c26 , 0x0c }, + { 0xa0c27 , 0x7c }, + { 0xa0c28 , 0xcc }, + { 0xa0c29 , 0xcc }, + { 0xa0c2a , 0xcc }, + { 0xa0c2b , 0x76 }, + { 0xa0c2c , 0x00 }, + { 0xa0c2d , 0x00 }, + { 0xa0c2e , 0x00 }, + { 0xa0c2f , 0x00 }, + { 0xa0c40 , 0x00 }, + { 0xa0c41 , 0x00 }, + { 0xa0c42 , 0xe0 }, + { 0xa0c43 , 0x60 }, + { 0xa0c44 , 0x60 }, + { 0xa0c45 , 0x78 }, + { 0xa0c46 , 0x6c }, + { 0xa0c47 , 0x66 }, + { 0xa0c48 , 0x66 }, + { 0xa0c49 , 0x66 }, + { 0xa0c4a , 0x66 }, + { 0xa0c4b , 0x7c }, + { 0xa0c4c , 0x00 }, + { 0xa0c4d , 0x00 }, + { 0xa0c4e , 0x00 }, + { 0xa0c4f , 0x00 }, + { 0xa0c60 , 0x00 }, + { 0xa0c61 , 0x00 }, + { 0xa0c62 , 0x00 }, + { 0xa0c63 , 0x00 }, + { 0xa0c64 , 0x00 }, + { 0xa0c65 , 0x7c }, + { 0xa0c66 , 0xc6 }, + { 0xa0c67 , 0xc0 }, + { 0xa0c68 , 0xc0 }, + { 0xa0c69 , 0xc0 }, + { 0xa0c6a , 0xc6 }, + { 0xa0c6b , 0x7c }, + { 0xa0c6c , 0x00 }, + { 0xa0c6d , 0x00 }, + { 0xa0c6e , 0x00 }, + { 0xa0c6f , 0x00 }, + { 0xa0c80 , 0x00 }, + { 0xa0c81 , 0x00 }, + { 0xa0c82 , 0x1c }, + { 0xa0c83 , 0x0c }, + { 0xa0c84 , 0x0c }, + { 0xa0c85 , 0x3c }, + { 0xa0c86 , 0x6c }, + { 0xa0c87 , 0xcc }, + { 0xa0c88 , 0xcc }, + { 0xa0c89 , 0xcc }, + { 0xa0c8a , 0xcc }, + { 0xa0c8b , 0x76 }, + { 0xa0c8c , 0x00 }, + { 0xa0c8d , 0x00 }, + { 0xa0c8e , 0x00 }, + { 0xa0c8f , 0x00 }, + { 0xa0ca0 , 0x00 }, + { 0xa0ca1 , 0x00 }, + { 0xa0ca2 , 0x00 }, + { 0xa0ca3 , 0x00 }, + { 0xa0ca4 , 0x00 }, + { 0xa0ca5 , 0x7c }, + { 0xa0ca6 , 0xc6 }, + { 0xa0ca7 , 0xfe }, + { 0xa0ca8 , 0xc0 }, + { 0xa0ca9 , 0xc0 }, + { 0xa0caa , 0xc6 }, + { 0xa0cab , 0x7c }, + { 0xa0cac , 0x00 }, + { 0xa0cad , 0x00 }, + { 0xa0cae , 0x00 }, + { 0xa0caf , 0x00 }, + { 0xa0cc0 , 0x00 }, + { 0xa0cc1 , 0x00 }, + { 0xa0cc2 , 0x38 }, + { 0xa0cc3 , 0x6c }, + { 0xa0cc4 , 0x64 }, + { 0xa0cc5 , 0x60 }, + { 0xa0cc6 , 0xf0 }, + { 0xa0cc7 , 0x60 }, + { 0xa0cc8 , 0x60 }, + { 0xa0cc9 , 0x60 }, + { 0xa0cca , 0x60 }, + { 0xa0ccb , 0xf0 }, + { 0xa0ccc , 0x00 }, + { 0xa0ccd , 0x00 }, + { 0xa0cce , 0x00 }, + { 0xa0ccf , 0x00 }, + { 0xa0ce0 , 0x00 }, + { 0xa0ce1 , 0x00 }, + { 0xa0ce2 , 0x00 }, + { 0xa0ce3 , 0x00 }, + { 0xa0ce4 , 0x00 }, + { 0xa0ce5 , 0x76 }, + { 0xa0ce6 , 0xcc }, + { 0xa0ce7 , 0xcc }, + { 0xa0ce8 , 0xcc }, + { 0xa0ce9 , 0xcc }, + { 0xa0cea , 0xcc }, + { 0xa0ceb , 0x7c }, + { 0xa0cec , 0x0c }, + { 0xa0ced , 0xcc }, + { 0xa0cee , 0x78 }, + { 0xa0cef , 0x00 }, + { 0xa0d00 , 0x00 }, + { 0xa0d01 , 0x00 }, + { 0xa0d02 , 0xe0 }, + { 0xa0d03 , 0x60 }, + { 0xa0d04 , 0x60 }, + { 0xa0d05 , 0x6c }, + { 0xa0d06 , 0x76 }, + { 0xa0d07 , 0x66 }, + { 0xa0d08 , 0x66 }, + { 0xa0d09 , 0x66 }, + { 0xa0d0a , 0x66 }, + { 0xa0d0b , 0xe6 }, + { 0xa0d0c , 0x00 }, + { 0xa0d0d , 0x00 }, + { 0xa0d0e , 0x00 }, + { 0xa0d0f , 0x00 }, + { 0xa0d20 , 0x00 }, + { 0xa0d21 , 0x00 }, + { 0xa0d22 , 0x18 }, + { 0xa0d23 , 0x18 }, + { 0xa0d24 , 0x00 }, + { 0xa0d25 , 0x38 }, + { 0xa0d26 , 0x18 }, + { 0xa0d27 , 0x18 }, + { 0xa0d28 , 0x18 }, + { 0xa0d29 , 0x18 }, + { 0xa0d2a , 0x18 }, + { 0xa0d2b , 0x3c }, + { 0xa0d2c , 0x00 }, + { 0xa0d2d , 0x00 }, + { 0xa0d2e , 0x00 }, + { 0xa0d2f , 0x00 }, + { 0xa0d40 , 0x00 }, + { 0xa0d41 , 0x00 }, + { 0xa0d42 , 0x06 }, + { 0xa0d43 , 0x06 }, + { 0xa0d44 , 0x00 }, + { 0xa0d45 , 0x0e }, + { 0xa0d46 , 0x06 }, + { 0xa0d47 , 0x06 }, + { 0xa0d48 , 0x06 }, + { 0xa0d49 , 0x06 }, + { 0xa0d4a , 0x06 }, + { 0xa0d4b , 0x06 }, + { 0xa0d4c , 0x66 }, + { 0xa0d4d , 0x66 }, + { 0xa0d4e , 0x3c }, + { 0xa0d4f , 0x00 }, + { 0xa0d60 , 0x00 }, + { 0xa0d61 , 0x00 }, + { 0xa0d62 , 0xe0 }, + { 0xa0d63 , 0x60 }, + { 0xa0d64 , 0x60 }, + { 0xa0d65 , 0x66 }, + { 0xa0d66 , 0x6c }, + { 0xa0d67 , 0x78 }, + { 0xa0d68 , 0x78 }, + { 0xa0d69 , 0x6c }, + { 0xa0d6a , 0x66 }, + { 0xa0d6b , 0xe6 }, + { 0xa0d6c , 0x00 }, + { 0xa0d6d , 0x00 }, + { 0xa0d6e , 0x00 }, + { 0xa0d6f , 0x00 }, + { 0xa0d80 , 0x00 }, + { 0xa0d81 , 0x00 }, + { 0xa0d82 , 0x38 }, + { 0xa0d83 , 0x18 }, + { 0xa0d84 , 0x18 }, + { 0xa0d85 , 0x18 }, + { 0xa0d86 , 0x18 }, + { 0xa0d87 , 0x18 }, + { 0xa0d88 , 0x18 }, + { 0xa0d89 , 0x18 }, + { 0xa0d8a , 0x18 }, + { 0xa0d8b , 0x3c }, + { 0xa0d8c , 0x00 }, + { 0xa0d8d , 0x00 }, + { 0xa0d8e , 0x00 }, + { 0xa0d8f , 0x00 }, + { 0xa0da0 , 0x00 }, + { 0xa0da1 , 0x00 }, + { 0xa0da2 , 0x00 }, + { 0xa0da3 , 0x00 }, + { 0xa0da4 , 0x00 }, + { 0xa0da5 , 0xec }, + { 0xa0da6 , 0xfe }, + { 0xa0da7 , 0xd6 }, + { 0xa0da8 , 0xd6 }, + { 0xa0da9 , 0xd6 }, + { 0xa0daa , 0xd6 }, + { 0xa0dab , 0xc6 }, + { 0xa0dac , 0x00 }, + { 0xa0dad , 0x00 }, + { 0xa0dae , 0x00 }, + { 0xa0daf , 0x00 }, + { 0xa0dc0 , 0x00 }, + { 0xa0dc1 , 0x00 }, + { 0xa0dc2 , 0x00 }, + { 0xa0dc3 , 0x00 }, + { 0xa0dc4 , 0x00 }, + { 0xa0dc5 , 0xdc }, + { 0xa0dc6 , 0x66 }, + { 0xa0dc7 , 0x66 }, + { 0xa0dc8 , 0x66 }, + { 0xa0dc9 , 0x66 }, + { 0xa0dca , 0x66 }, + { 0xa0dcb , 0x66 }, + { 0xa0dcc , 0x00 }, + { 0xa0dcd , 0x00 }, + { 0xa0dce , 0x00 }, + { 0xa0dcf , 0x00 }, + { 0xa0de0 , 0x00 }, + { 0xa0de1 , 0x00 }, + { 0xa0de2 , 0x00 }, + { 0xa0de3 , 0x00 }, + { 0xa0de4 , 0x00 }, + { 0xa0de5 , 0x7c }, + { 0xa0de6 , 0xc6 }, + { 0xa0de7 , 0xc6 }, + { 0xa0de8 , 0xc6 }, + { 0xa0de9 , 0xc6 }, + { 0xa0dea , 0xc6 }, + { 0xa0deb , 0x7c }, + { 0xa0dec , 0x00 }, + { 0xa0ded , 0x00 }, + { 0xa0dee , 0x00 }, + { 0xa0def , 0x00 }, + { 0xa0e00 , 0x00 }, + { 0xa0e01 , 0x00 }, + { 0xa0e02 , 0x00 }, + { 0xa0e03 , 0x00 }, + { 0xa0e04 , 0x00 }, + { 0xa0e05 , 0xdc }, + { 0xa0e06 , 0x66 }, + { 0xa0e07 , 0x66 }, + { 0xa0e08 , 0x66 }, + { 0xa0e09 , 0x66 }, + { 0xa0e0a , 0x66 }, + { 0xa0e0b , 0x7c }, + { 0xa0e0c , 0x60 }, + { 0xa0e0d , 0x60 }, + { 0xa0e0e , 0xf0 }, + { 0xa0e0f , 0x00 }, + { 0xa0e20 , 0x00 }, + { 0xa0e21 , 0x00 }, + { 0xa0e22 , 0x00 }, + { 0xa0e23 , 0x00 }, + { 0xa0e24 , 0x00 }, + { 0xa0e25 , 0x76 }, + { 0xa0e26 , 0xcc }, + { 0xa0e27 , 0xcc }, + { 0xa0e28 , 0xcc }, + { 0xa0e29 , 0xcc }, + { 0xa0e2a , 0xcc }, + { 0xa0e2b , 0x7c }, + { 0xa0e2c , 0x0c }, + { 0xa0e2d , 0x0c }, + { 0xa0e2e , 0x1e }, + { 0xa0e2f , 0x00 }, + { 0xa0e40 , 0x00 }, + { 0xa0e41 , 0x00 }, + { 0xa0e42 , 0x00 }, + { 0xa0e43 , 0x00 }, + { 0xa0e44 , 0x00 }, + { 0xa0e45 , 0xdc }, + { 0xa0e46 , 0x76 }, + { 0xa0e47 , 0x66 }, + { 0xa0e48 , 0x60 }, + { 0xa0e49 , 0x60 }, + { 0xa0e4a , 0x60 }, + { 0xa0e4b , 0xf0 }, + { 0xa0e4c , 0x00 }, + { 0xa0e4d , 0x00 }, + { 0xa0e4e , 0x00 }, + { 0xa0e4f , 0x00 }, + { 0xa0e60 , 0x00 }, + { 0xa0e61 , 0x00 }, + { 0xa0e62 , 0x00 }, + { 0xa0e63 , 0x00 }, + { 0xa0e64 , 0x00 }, + { 0xa0e65 , 0x7c }, + { 0xa0e66 , 0xc6 }, + { 0xa0e67 , 0x60 }, + { 0xa0e68 , 0x38 }, + { 0xa0e69 , 0x0c }, + { 0xa0e6a , 0xc6 }, + { 0xa0e6b , 0x7c }, + { 0xa0e6c , 0x00 }, + { 0xa0e6d , 0x00 }, + { 0xa0e6e , 0x00 }, + { 0xa0e6f , 0x00 }, + { 0xa0e80 , 0x00 }, + { 0xa0e81 , 0x00 }, + { 0xa0e82 , 0x10 }, + { 0xa0e83 , 0x30 }, + { 0xa0e84 , 0x30 }, + { 0xa0e85 , 0xfc }, + { 0xa0e86 , 0x30 }, + { 0xa0e87 , 0x30 }, + { 0xa0e88 , 0x30 }, + { 0xa0e89 , 0x30 }, + { 0xa0e8a , 0x36 }, + { 0xa0e8b , 0x1c }, + { 0xa0e8c , 0x00 }, + { 0xa0e8d , 0x00 }, + { 0xa0e8e , 0x00 }, + { 0xa0e8f , 0x00 }, + { 0xa0ea0 , 0x00 }, + { 0xa0ea1 , 0x00 }, + { 0xa0ea2 , 0x00 }, + { 0xa0ea3 , 0x00 }, + { 0xa0ea4 , 0x00 }, + { 0xa0ea5 , 0xcc }, + { 0xa0ea6 , 0xcc }, + { 0xa0ea7 , 0xcc }, + { 0xa0ea8 , 0xcc }, + { 0xa0ea9 , 0xcc }, + { 0xa0eaa , 0xcc }, + { 0xa0eab , 0x76 }, + { 0xa0eac , 0x00 }, + { 0xa0ead , 0x00 }, + { 0xa0eae , 0x00 }, + { 0xa0eaf , 0x00 }, + { 0xa0ec0 , 0x00 }, + { 0xa0ec1 , 0x00 }, + { 0xa0ec2 , 0x00 }, + { 0xa0ec3 , 0x00 }, + { 0xa0ec4 , 0x00 }, + { 0xa0ec5 , 0x66 }, + { 0xa0ec6 , 0x66 }, + { 0xa0ec7 , 0x66 }, + { 0xa0ec8 , 0x66 }, + { 0xa0ec9 , 0x66 }, + { 0xa0eca , 0x3c }, + { 0xa0ecb , 0x18 }, + { 0xa0ecc , 0x00 }, + { 0xa0ecd , 0x00 }, + { 0xa0ece , 0x00 }, + { 0xa0ecf , 0x00 }, + { 0xa0ee0 , 0x00 }, + { 0xa0ee1 , 0x00 }, + { 0xa0ee2 , 0x00 }, + { 0xa0ee3 , 0x00 }, + { 0xa0ee4 , 0x00 }, + { 0xa0ee5 , 0xc6 }, + { 0xa0ee6 , 0xc6 }, + { 0xa0ee7 , 0xd6 }, + { 0xa0ee8 , 0xd6 }, + { 0xa0ee9 , 0xd6 }, + { 0xa0eea , 0xfe }, + { 0xa0eeb , 0x6c }, + { 0xa0eec , 0x00 }, + { 0xa0eed , 0x00 }, + { 0xa0eee , 0x00 }, + { 0xa0eef , 0x00 }, + { 0xa0f00 , 0x00 }, + { 0xa0f01 , 0x00 }, + { 0xa0f02 , 0x00 }, + { 0xa0f03 , 0x00 }, + { 0xa0f04 , 0x00 }, + { 0xa0f05 , 0xc6 }, + { 0xa0f06 , 0x6c }, + { 0xa0f07 , 0x38 }, + { 0xa0f08 , 0x38 }, + { 0xa0f09 , 0x38 }, + { 0xa0f0a , 0x6c }, + { 0xa0f0b , 0xc6 }, + { 0xa0f0c , 0x00 }, + { 0xa0f0d , 0x00 }, + { 0xa0f0e , 0x00 }, + { 0xa0f0f , 0x00 }, + { 0xa0f20 , 0x00 }, + { 0xa0f21 , 0x00 }, + { 0xa0f22 , 0x00 }, + { 0xa0f23 , 0x00 }, + { 0xa0f24 , 0x00 }, + { 0xa0f25 , 0xc6 }, + { 0xa0f26 , 0xc6 }, + { 0xa0f27 , 0xc6 }, + { 0xa0f28 , 0xc6 }, + { 0xa0f29 , 0xc6 }, + { 0xa0f2a , 0xc6 }, + { 0xa0f2b , 0x7e }, + { 0xa0f2c , 0x06 }, + { 0xa0f2d , 0x0c }, + { 0xa0f2e , 0xf8 }, + { 0xa0f2f , 0x00 }, + { 0xa0f40 , 0x00 }, + { 0xa0f41 , 0x00 }, + { 0xa0f42 , 0x00 }, + { 0xa0f43 , 0x00 }, + { 0xa0f44 , 0x00 }, + { 0xa0f45 , 0xfe }, + { 0xa0f46 , 0xcc }, + { 0xa0f47 , 0x18 }, + { 0xa0f48 , 0x30 }, + { 0xa0f49 , 0x60 }, + { 0xa0f4a , 0xc6 }, + { 0xa0f4b , 0xfe }, + { 0xa0f4c , 0x00 }, + { 0xa0f4d , 0x00 }, + { 0xa0f4e , 0x00 }, + { 0xa0f4f , 0x00 }, + { 0xa0f60 , 0x00 }, + { 0xa0f61 , 0x00 }, + { 0xa0f62 , 0x0e }, + { 0xa0f63 , 0x18 }, + { 0xa0f64 , 0x18 }, + { 0xa0f65 , 0x18 }, + { 0xa0f66 , 0x70 }, + { 0xa0f67 , 0x18 }, + { 0xa0f68 , 0x18 }, + { 0xa0f69 , 0x18 }, + { 0xa0f6a , 0x18 }, + { 0xa0f6b , 0x0e }, + { 0xa0f6c , 0x00 }, + { 0xa0f6d , 0x00 }, + { 0xa0f6e , 0x00 }, + { 0xa0f6f , 0x00 }, + { 0xa0f80 , 0x00 }, + { 0xa0f81 , 0x00 }, + { 0xa0f82 , 0x18 }, + { 0xa0f83 , 0x18 }, + { 0xa0f84 , 0x18 }, + { 0xa0f85 , 0x18 }, + { 0xa0f86 , 0x00 }, + { 0xa0f87 , 0x18 }, + { 0xa0f88 , 0x18 }, + { 0xa0f89 , 0x18 }, + { 0xa0f8a , 0x18 }, + { 0xa0f8b , 0x18 }, + { 0xa0f8c , 0x00 }, + { 0xa0f8d , 0x00 }, + { 0xa0f8e , 0x00 }, + { 0xa0f8f , 0x00 }, + { 0xa0fa0 , 0x00 }, + { 0xa0fa1 , 0x00 }, + { 0xa0fa2 , 0x70 }, + { 0xa0fa3 , 0x18 }, + { 0xa0fa4 , 0x18 }, + { 0xa0fa5 , 0x18 }, + { 0xa0fa6 , 0x0e }, + { 0xa0fa7 , 0x18 }, + { 0xa0fa8 , 0x18 }, + { 0xa0fa9 , 0x18 }, + { 0xa0faa , 0x18 }, + { 0xa0fab , 0x70 }, + { 0xa0fac , 0x00 }, + { 0xa0fad , 0x00 }, + { 0xa0fae , 0x00 }, + { 0xa0faf , 0x00 }, + { 0xa0fc0 , 0x00 }, + { 0xa0fc1 , 0x00 }, + { 0xa0fc2 , 0x76 }, + { 0xa0fc3 , 0xdc }, + { 0xa0fc4 , 0x00 }, + { 0xa0fc5 , 0x00 }, + { 0xa0fc6 , 0x00 }, + { 0xa0fc7 , 0x00 }, + { 0xa0fc8 , 0x00 }, + { 0xa0fc9 , 0x00 }, + { 0xa0fca , 0x00 }, + { 0xa0fcb , 0x00 }, + { 0xa0fcc , 0x00 }, + { 0xa0fcd , 0x00 }, + { 0xa0fce , 0x00 }, + { 0xa0fcf , 0x00 }, + { 0xa0fe0 , 0x00 }, + { 0xa0fe1 , 0x00 }, + { 0xa0fe2 , 0x00 }, + { 0xa0fe3 , 0x00 }, + { 0xa0fe4 , 0x10 }, + { 0xa0fe5 , 0x38 }, + { 0xa0fe6 , 0x6c }, + { 0xa0fe7 , 0xc6 }, + { 0xa0fe8 , 0xc6 }, + { 0xa0fe9 , 0xc6 }, + { 0xa0fea , 0xfe }, + { 0xa0feb , 0x00 }, + { 0xa0fec , 0x00 }, + { 0xa0fed , 0x00 }, + { 0xa0fee , 0x00 }, + { 0xa0fef , 0x00 }, + { 0xa1000 , 0x00 }, + { 0xa1001 , 0x00 }, + { 0xa1002 , 0x3c }, + { 0xa1003 , 0x66 }, + { 0xa1004 , 0xc2 }, + { 0xa1005 , 0xc0 }, + { 0xa1006 , 0xc0 }, + { 0xa1007 , 0xc0 }, + { 0xa1008 , 0xc2 }, + { 0xa1009 , 0x66 }, + { 0xa100a , 0x3c }, + { 0xa100b , 0x0c }, + { 0xa100c , 0x06 }, + { 0xa100d , 0x7c }, + { 0xa100e , 0x00 }, + { 0xa100f , 0x00 }, + { 0xa1020 , 0x00 }, + { 0xa1021 , 0x00 }, + { 0xa1022 , 0xcc }, + { 0xa1023 , 0x00 }, + { 0xa1024 , 0x00 }, + { 0xa1025 , 0xcc }, + { 0xa1026 , 0xcc }, + { 0xa1027 , 0xcc }, + { 0xa1028 , 0xcc }, + { 0xa1029 , 0xcc }, + { 0xa102a , 0xcc }, + { 0xa102b , 0x76 }, + { 0xa102c , 0x00 }, + { 0xa102d , 0x00 }, + { 0xa102e , 0x00 }, + { 0xa102f , 0x00 }, + { 0xa1040 , 0x00 }, + { 0xa1041 , 0x0c }, + { 0xa1042 , 0x18 }, + { 0xa1043 , 0x30 }, + { 0xa1044 , 0x00 }, + { 0xa1045 , 0x7c }, + { 0xa1046 , 0xc6 }, + { 0xa1047 , 0xfe }, + { 0xa1048 , 0xc0 }, + { 0xa1049 , 0xc0 }, + { 0xa104a , 0xc6 }, + { 0xa104b , 0x7c }, + { 0xa104c , 0x00 }, + { 0xa104d , 0x00 }, + { 0xa104e , 0x00 }, + { 0xa104f , 0x00 }, + { 0xa1060 , 0x00 }, + { 0xa1061 , 0x10 }, + { 0xa1062 , 0x38 }, + { 0xa1063 , 0x6c }, + { 0xa1064 , 0x00 }, + { 0xa1065 , 0x78 }, + { 0xa1066 , 0x0c }, + { 0xa1067 , 0x7c }, + { 0xa1068 , 0xcc }, + { 0xa1069 , 0xcc }, + { 0xa106a , 0xcc }, + { 0xa106b , 0x76 }, + { 0xa106c , 0x00 }, + { 0xa106d , 0x00 }, + { 0xa106e , 0x00 }, + { 0xa106f , 0x00 }, + { 0xa1080 , 0x00 }, + { 0xa1081 , 0x00 }, + { 0xa1082 , 0xcc }, + { 0xa1083 , 0x00 }, + { 0xa1084 , 0x00 }, + { 0xa1085 , 0x78 }, + { 0xa1086 , 0x0c }, + { 0xa1087 , 0x7c }, + { 0xa1088 , 0xcc }, + { 0xa1089 , 0xcc }, + { 0xa108a , 0xcc }, + { 0xa108b , 0x76 }, + { 0xa108c , 0x00 }, + { 0xa108d , 0x00 }, + { 0xa108e , 0x00 }, + { 0xa108f , 0x00 }, + { 0xa10a0 , 0x00 }, + { 0xa10a1 , 0x60 }, + { 0xa10a2 , 0x30 }, + { 0xa10a3 , 0x18 }, + { 0xa10a4 , 0x00 }, + { 0xa10a5 , 0x78 }, + { 0xa10a6 , 0x0c }, + { 0xa10a7 , 0x7c }, + { 0xa10a8 , 0xcc }, + { 0xa10a9 , 0xcc }, + { 0xa10aa , 0xcc }, + { 0xa10ab , 0x76 }, + { 0xa10ac , 0x00 }, + { 0xa10ad , 0x00 }, + { 0xa10ae , 0x00 }, + { 0xa10af , 0x00 }, + { 0xa10c0 , 0x00 }, + { 0xa10c1 , 0x38 }, + { 0xa10c2 , 0x6c }, + { 0xa10c3 , 0x38 }, + { 0xa10c4 , 0x00 }, + { 0xa10c5 , 0x78 }, + { 0xa10c6 , 0x0c }, + { 0xa10c7 , 0x7c }, + { 0xa10c8 , 0xcc }, + { 0xa10c9 , 0xcc }, + { 0xa10ca , 0xcc }, + { 0xa10cb , 0x76 }, + { 0xa10cc , 0x00 }, + { 0xa10cd , 0x00 }, + { 0xa10ce , 0x00 }, + { 0xa10cf , 0x00 }, + { 0xa10e0 , 0x00 }, + { 0xa10e1 , 0x00 }, + { 0xa10e2 , 0x00 }, + { 0xa10e3 , 0x00 }, + { 0xa10e4 , 0x3c }, + { 0xa10e5 , 0x66 }, + { 0xa10e6 , 0x60 }, + { 0xa10e7 , 0x60 }, + { 0xa10e8 , 0x66 }, + { 0xa10e9 , 0x3c }, + { 0xa10ea , 0x0c }, + { 0xa10eb , 0x06 }, + { 0xa10ec , 0x3c }, + { 0xa10ed , 0x00 }, + { 0xa10ee , 0x00 }, + { 0xa10ef , 0x00 }, + { 0xa1100 , 0x00 }, + { 0xa1101 , 0x10 }, + { 0xa1102 , 0x38 }, + { 0xa1103 , 0x6c }, + { 0xa1104 , 0x00 }, + { 0xa1105 , 0x7c }, + { 0xa1106 , 0xc6 }, + { 0xa1107 , 0xfe }, + { 0xa1108 , 0xc0 }, + { 0xa1109 , 0xc0 }, + { 0xa110a , 0xc6 }, + { 0xa110b , 0x7c }, + { 0xa110c , 0x00 }, + { 0xa110d , 0x00 }, + { 0xa110e , 0x00 }, + { 0xa110f , 0x00 }, + { 0xa1120 , 0x00 }, + { 0xa1121 , 0x00 }, + { 0xa1122 , 0xc6 }, + { 0xa1123 , 0x00 }, + { 0xa1124 , 0x00 }, + { 0xa1125 , 0x7c }, + { 0xa1126 , 0xc6 }, + { 0xa1127 , 0xfe }, + { 0xa1128 , 0xc0 }, + { 0xa1129 , 0xc0 }, + { 0xa112a , 0xc6 }, + { 0xa112b , 0x7c }, + { 0xa112c , 0x00 }, + { 0xa112d , 0x00 }, + { 0xa112e , 0x00 }, + { 0xa112f , 0x00 }, + { 0xa1140 , 0x00 }, + { 0xa1141 , 0x60 }, + { 0xa1142 , 0x30 }, + { 0xa1143 , 0x18 }, + { 0xa1144 , 0x00 }, + { 0xa1145 , 0x7c }, + { 0xa1146 , 0xc6 }, + { 0xa1147 , 0xfe }, + { 0xa1148 , 0xc0 }, + { 0xa1149 , 0xc0 }, + { 0xa114a , 0xc6 }, + { 0xa114b , 0x7c }, + { 0xa114c , 0x00 }, + { 0xa114d , 0x00 }, + { 0xa114e , 0x00 }, + { 0xa114f , 0x00 }, + { 0xa1160 , 0x00 }, + { 0xa1161 , 0x00 }, + { 0xa1162 , 0x66 }, + { 0xa1163 , 0x00 }, + { 0xa1164 , 0x00 }, + { 0xa1165 , 0x38 }, + { 0xa1166 , 0x18 }, + { 0xa1167 , 0x18 }, + { 0xa1168 , 0x18 }, + { 0xa1169 , 0x18 }, + { 0xa116a , 0x18 }, + { 0xa116b , 0x3c }, + { 0xa116c , 0x00 }, + { 0xa116d , 0x00 }, + { 0xa116e , 0x00 }, + { 0xa116f , 0x00 }, + { 0xa1180 , 0x00 }, + { 0xa1181 , 0x18 }, + { 0xa1182 , 0x3c }, + { 0xa1183 , 0x66 }, + { 0xa1184 , 0x00 }, + { 0xa1185 , 0x38 }, + { 0xa1186 , 0x18 }, + { 0xa1187 , 0x18 }, + { 0xa1188 , 0x18 }, + { 0xa1189 , 0x18 }, + { 0xa118a , 0x18 }, + { 0xa118b , 0x3c }, + { 0xa118c , 0x00 }, + { 0xa118d , 0x00 }, + { 0xa118e , 0x00 }, + { 0xa118f , 0x00 }, + { 0xa11a0 , 0x00 }, + { 0xa11a1 , 0x60 }, + { 0xa11a2 , 0x30 }, + { 0xa11a3 , 0x18 }, + { 0xa11a4 , 0x00 }, + { 0xa11a5 , 0x38 }, + { 0xa11a6 , 0x18 }, + { 0xa11a7 , 0x18 }, + { 0xa11a8 , 0x18 }, + { 0xa11a9 , 0x18 }, + { 0xa11aa , 0x18 }, + { 0xa11ab , 0x3c }, + { 0xa11ac , 0x00 }, + { 0xa11ad , 0x00 }, + { 0xa11ae , 0x00 }, + { 0xa11af , 0x00 }, + { 0xa11c0 , 0x00 }, + { 0xa11c1 , 0xc6 }, + { 0xa11c2 , 0x00 }, + { 0xa11c3 , 0x10 }, + { 0xa11c4 , 0x38 }, + { 0xa11c5 , 0x6c }, + { 0xa11c6 , 0xc6 }, + { 0xa11c7 , 0xc6 }, + { 0xa11c8 , 0xfe }, + { 0xa11c9 , 0xc6 }, + { 0xa11ca , 0xc6 }, + { 0xa11cb , 0xc6 }, + { 0xa11cc , 0x00 }, + { 0xa11cd , 0x00 }, + { 0xa11ce , 0x00 }, + { 0xa11cf , 0x00 }, + { 0xa11e0 , 0x38 }, + { 0xa11e1 , 0x6c }, + { 0xa11e2 , 0x38 }, + { 0xa11e3 , 0x00 }, + { 0xa11e4 , 0x38 }, + { 0xa11e5 , 0x6c }, + { 0xa11e6 , 0xc6 }, + { 0xa11e7 , 0xc6 }, + { 0xa11e8 , 0xfe }, + { 0xa11e9 , 0xc6 }, + { 0xa11ea , 0xc6 }, + { 0xa11eb , 0xc6 }, + { 0xa11ec , 0x00 }, + { 0xa11ed , 0x00 }, + { 0xa11ee , 0x00 }, + { 0xa11ef , 0x00 }, + { 0xa1200 , 0x18 }, + { 0xa1201 , 0x30 }, + { 0xa1202 , 0x60 }, + { 0xa1203 , 0x00 }, + { 0xa1204 , 0xfe }, + { 0xa1205 , 0x66 }, + { 0xa1206 , 0x60 }, + { 0xa1207 , 0x7c }, + { 0xa1208 , 0x60 }, + { 0xa1209 , 0x60 }, + { 0xa120a , 0x66 }, + { 0xa120b , 0xfe }, + { 0xa120c , 0x00 }, + { 0xa120d , 0x00 }, + { 0xa120e , 0x00 }, + { 0xa120f , 0x00 }, + { 0xa1220 , 0x00 }, + { 0xa1221 , 0x00 }, + { 0xa1222 , 0x00 }, + { 0xa1223 , 0x00 }, + { 0xa1224 , 0x00 }, + { 0xa1225 , 0xcc }, + { 0xa1226 , 0x76 }, + { 0xa1227 , 0x36 }, + { 0xa1228 , 0x7e }, + { 0xa1229 , 0xd8 }, + { 0xa122a , 0xd8 }, + { 0xa122b , 0x6e }, + { 0xa122c , 0x00 }, + { 0xa122d , 0x00 }, + { 0xa122e , 0x00 }, + { 0xa122f , 0x00 }, + { 0xa1240 , 0x00 }, + { 0xa1241 , 0x00 }, + { 0xa1242 , 0x3e }, + { 0xa1243 , 0x6c }, + { 0xa1244 , 0xcc }, + { 0xa1245 , 0xcc }, + { 0xa1246 , 0xfe }, + { 0xa1247 , 0xcc }, + { 0xa1248 , 0xcc }, + { 0xa1249 , 0xcc }, + { 0xa124a , 0xcc }, + { 0xa124b , 0xce }, + { 0xa124c , 0x00 }, + { 0xa124d , 0x00 }, + { 0xa124e , 0x00 }, + { 0xa124f , 0x00 }, + { 0xa1260 , 0x00 }, + { 0xa1261 , 0x10 }, + { 0xa1262 , 0x38 }, + { 0xa1263 , 0x6c }, + { 0xa1264 , 0x00 }, + { 0xa1265 , 0x7c }, + { 0xa1266 , 0xc6 }, + { 0xa1267 , 0xc6 }, + { 0xa1268 , 0xc6 }, + { 0xa1269 , 0xc6 }, + { 0xa126a , 0xc6 }, + { 0xa126b , 0x7c }, + { 0xa126c , 0x00 }, + { 0xa126d , 0x00 }, + { 0xa126e , 0x00 }, + { 0xa126f , 0x00 }, + { 0xa1280 , 0x00 }, + { 0xa1281 , 0x00 }, + { 0xa1282 , 0xc6 }, + { 0xa1283 , 0x00 }, + { 0xa1284 , 0x00 }, + { 0xa1285 , 0x7c }, + { 0xa1286 , 0xc6 }, + { 0xa1287 , 0xc6 }, + { 0xa1288 , 0xc6 }, + { 0xa1289 , 0xc6 }, + { 0xa128a , 0xc6 }, + { 0xa128b , 0x7c }, + { 0xa128c , 0x00 }, + { 0xa128d , 0x00 }, + { 0xa128e , 0x00 }, + { 0xa128f , 0x00 }, + { 0xa12a0 , 0x00 }, + { 0xa12a1 , 0x60 }, + { 0xa12a2 , 0x30 }, + { 0xa12a3 , 0x18 }, + { 0xa12a4 , 0x00 }, + { 0xa12a5 , 0x7c }, + { 0xa12a6 , 0xc6 }, + { 0xa12a7 , 0xc6 }, + { 0xa12a8 , 0xc6 }, + { 0xa12a9 , 0xc6 }, + { 0xa12aa , 0xc6 }, + { 0xa12ab , 0x7c }, + { 0xa12ac , 0x00 }, + { 0xa12ad , 0x00 }, + { 0xa12ae , 0x00 }, + { 0xa12af , 0x00 }, + { 0xa12c0 , 0x00 }, + { 0xa12c1 , 0x30 }, + { 0xa12c2 , 0x78 }, + { 0xa12c3 , 0xcc }, + { 0xa12c4 , 0x00 }, + { 0xa12c5 , 0xcc }, + { 0xa12c6 , 0xcc }, + { 0xa12c7 , 0xcc }, + { 0xa12c8 , 0xcc }, + { 0xa12c9 , 0xcc }, + { 0xa12ca , 0xcc }, + { 0xa12cb , 0x76 }, + { 0xa12cc , 0x00 }, + { 0xa12cd , 0x00 }, + { 0xa12ce , 0x00 }, + { 0xa12cf , 0x00 }, + { 0xa12e0 , 0x00 }, + { 0xa12e1 , 0x60 }, + { 0xa12e2 , 0x30 }, + { 0xa12e3 , 0x18 }, + { 0xa12e4 , 0x00 }, + { 0xa12e5 , 0xcc }, + { 0xa12e6 , 0xcc }, + { 0xa12e7 , 0xcc }, + { 0xa12e8 , 0xcc }, + { 0xa12e9 , 0xcc }, + { 0xa12ea , 0xcc }, + { 0xa12eb , 0x76 }, + { 0xa12ec , 0x00 }, + { 0xa12ed , 0x00 }, + { 0xa12ee , 0x00 }, + { 0xa12ef , 0x00 }, + { 0xa1300 , 0x00 }, + { 0xa1301 , 0x00 }, + { 0xa1302 , 0xc6 }, + { 0xa1303 , 0x00 }, + { 0xa1304 , 0x00 }, + { 0xa1305 , 0xc6 }, + { 0xa1306 , 0xc6 }, + { 0xa1307 , 0xc6 }, + { 0xa1308 , 0xc6 }, + { 0xa1309 , 0xc6 }, + { 0xa130a , 0xc6 }, + { 0xa130b , 0x7e }, + { 0xa130c , 0x06 }, + { 0xa130d , 0x0c }, + { 0xa130e , 0x78 }, + { 0xa130f , 0x00 }, + { 0xa1320 , 0x00 }, + { 0xa1321 , 0xc6 }, + { 0xa1322 , 0x00 }, + { 0xa1323 , 0x7c }, + { 0xa1324 , 0xc6 }, + { 0xa1325 , 0xc6 }, + { 0xa1326 , 0xc6 }, + { 0xa1327 , 0xc6 }, + { 0xa1328 , 0xc6 }, + { 0xa1329 , 0xc6 }, + { 0xa132a , 0xc6 }, + { 0xa132b , 0x7c }, + { 0xa132c , 0x00 }, + { 0xa132d , 0x00 }, + { 0xa132e , 0x00 }, + { 0xa132f , 0x00 }, + { 0xa1340 , 0x00 }, + { 0xa1341 , 0xc6 }, + { 0xa1342 , 0x00 }, + { 0xa1343 , 0xc6 }, + { 0xa1344 , 0xc6 }, + { 0xa1345 , 0xc6 }, + { 0xa1346 , 0xc6 }, + { 0xa1347 , 0xc6 }, + { 0xa1348 , 0xc6 }, + { 0xa1349 , 0xc6 }, + { 0xa134a , 0xc6 }, + { 0xa134b , 0x7c }, + { 0xa134c , 0x00 }, + { 0xa134d , 0x00 }, + { 0xa134e , 0x00 }, + { 0xa134f , 0x00 }, + { 0xa1360 , 0x00 }, + { 0xa1361 , 0x18 }, + { 0xa1362 , 0x18 }, + { 0xa1363 , 0x3c }, + { 0xa1364 , 0x66 }, + { 0xa1365 , 0x60 }, + { 0xa1366 , 0x60 }, + { 0xa1367 , 0x60 }, + { 0xa1368 , 0x66 }, + { 0xa1369 , 0x3c }, + { 0xa136a , 0x18 }, + { 0xa136b , 0x18 }, + { 0xa136c , 0x00 }, + { 0xa136d , 0x00 }, + { 0xa136e , 0x00 }, + { 0xa136f , 0x00 }, + { 0xa1380 , 0x00 }, + { 0xa1381 , 0x38 }, + { 0xa1382 , 0x6c }, + { 0xa1383 , 0x64 }, + { 0xa1384 , 0x60 }, + { 0xa1385 , 0xf0 }, + { 0xa1386 , 0x60 }, + { 0xa1387 , 0x60 }, + { 0xa1388 , 0x60 }, + { 0xa1389 , 0x60 }, + { 0xa138a , 0xe6 }, + { 0xa138b , 0xfc }, + { 0xa138c , 0x00 }, + { 0xa138d , 0x00 }, + { 0xa138e , 0x00 }, + { 0xa138f , 0x00 }, + { 0xa13a0 , 0x00 }, + { 0xa13a1 , 0x00 }, + { 0xa13a2 , 0x66 }, + { 0xa13a3 , 0x66 }, + { 0xa13a4 , 0x3c }, + { 0xa13a5 , 0x18 }, + { 0xa13a6 , 0x7e }, + { 0xa13a7 , 0x18 }, + { 0xa13a8 , 0x7e }, + { 0xa13a9 , 0x18 }, + { 0xa13aa , 0x18 }, + { 0xa13ab , 0x18 }, + { 0xa13ac , 0x00 }, + { 0xa13ad , 0x00 }, + { 0xa13ae , 0x00 }, + { 0xa13af , 0x00 }, + { 0xa13c0 , 0x00 }, + { 0xa13c1 , 0xf8 }, + { 0xa13c2 , 0xcc }, + { 0xa13c3 , 0xcc }, + { 0xa13c4 , 0xf8 }, + { 0xa13c5 , 0xc4 }, + { 0xa13c6 , 0xcc }, + { 0xa13c7 , 0xde }, + { 0xa13c8 , 0xcc }, + { 0xa13c9 , 0xcc }, + { 0xa13ca , 0xcc }, + { 0xa13cb , 0xc6 }, + { 0xa13cc , 0x00 }, + { 0xa13cd , 0x00 }, + { 0xa13ce , 0x00 }, + { 0xa13cf , 0x00 }, + { 0xa13e0 , 0x00 }, + { 0xa13e1 , 0x0e }, + { 0xa13e2 , 0x1b }, + { 0xa13e3 , 0x18 }, + { 0xa13e4 , 0x18 }, + { 0xa13e5 , 0x18 }, + { 0xa13e6 , 0x7e }, + { 0xa13e7 , 0x18 }, + { 0xa13e8 , 0x18 }, + { 0xa13e9 , 0x18 }, + { 0xa13ea , 0x18 }, + { 0xa13eb , 0x18 }, + { 0xa13ec , 0xd8 }, + { 0xa13ed , 0x70 }, + { 0xa13ee , 0x00 }, + { 0xa13ef , 0x00 }, + { 0xa1400 , 0x00 }, + { 0xa1401 , 0x18 }, + { 0xa1402 , 0x30 }, + { 0xa1403 , 0x60 }, + { 0xa1404 , 0x00 }, + { 0xa1405 , 0x78 }, + { 0xa1406 , 0x0c }, + { 0xa1407 , 0x7c }, + { 0xa1408 , 0xcc }, + { 0xa1409 , 0xcc }, + { 0xa140a , 0xcc }, + { 0xa140b , 0x76 }, + { 0xa140c , 0x00 }, + { 0xa140d , 0x00 }, + { 0xa140e , 0x00 }, + { 0xa140f , 0x00 }, + { 0xa1420 , 0x00 }, + { 0xa1421 , 0x0c }, + { 0xa1422 , 0x18 }, + { 0xa1423 , 0x30 }, + { 0xa1424 , 0x00 }, + { 0xa1425 , 0x38 }, + { 0xa1426 , 0x18 }, + { 0xa1427 , 0x18 }, + { 0xa1428 , 0x18 }, + { 0xa1429 , 0x18 }, + { 0xa142a , 0x18 }, + { 0xa142b , 0x3c }, + { 0xa142c , 0x00 }, + { 0xa142d , 0x00 }, + { 0xa142e , 0x00 }, + { 0xa142f , 0x00 }, + { 0xa1440 , 0x00 }, + { 0xa1441 , 0x18 }, + { 0xa1442 , 0x30 }, + { 0xa1443 , 0x60 }, + { 0xa1444 , 0x00 }, + { 0xa1445 , 0x7c }, + { 0xa1446 , 0xc6 }, + { 0xa1447 , 0xc6 }, + { 0xa1448 , 0xc6 }, + { 0xa1449 , 0xc6 }, + { 0xa144a , 0xc6 }, + { 0xa144b , 0x7c }, + { 0xa144c , 0x00 }, + { 0xa144d , 0x00 }, + { 0xa144e , 0x00 }, + { 0xa144f , 0x00 }, + { 0xa1460 , 0x00 }, + { 0xa1461 , 0x18 }, + { 0xa1462 , 0x30 }, + { 0xa1463 , 0x60 }, + { 0xa1464 , 0x00 }, + { 0xa1465 , 0xcc }, + { 0xa1466 , 0xcc }, + { 0xa1467 , 0xcc }, + { 0xa1468 , 0xcc }, + { 0xa1469 , 0xcc }, + { 0xa146a , 0xcc }, + { 0xa146b , 0x76 }, + { 0xa146c , 0x00 }, + { 0xa146d , 0x00 }, + { 0xa146e , 0x00 }, + { 0xa146f , 0x00 }, + { 0xa1480 , 0x00 }, + { 0xa1481 , 0x00 }, + { 0xa1482 , 0x76 }, + { 0xa1483 , 0xdc }, + { 0xa1484 , 0x00 }, + { 0xa1485 , 0xdc }, + { 0xa1486 , 0x66 }, + { 0xa1487 , 0x66 }, + { 0xa1488 , 0x66 }, + { 0xa1489 , 0x66 }, + { 0xa148a , 0x66 }, + { 0xa148b , 0x66 }, + { 0xa148c , 0x00 }, + { 0xa148d , 0x00 }, + { 0xa148e , 0x00 }, + { 0xa148f , 0x00 }, + { 0xa14a0 , 0x76 }, + { 0xa14a1 , 0xdc }, + { 0xa14a2 , 0x00 }, + { 0xa14a3 , 0xc6 }, + { 0xa14a4 , 0xe6 }, + { 0xa14a5 , 0xf6 }, + { 0xa14a6 , 0xfe }, + { 0xa14a7 , 0xde }, + { 0xa14a8 , 0xce }, + { 0xa14a9 , 0xc6 }, + { 0xa14aa , 0xc6 }, + { 0xa14ab , 0xc6 }, + { 0xa14ac , 0x00 }, + { 0xa14ad , 0x00 }, + { 0xa14ae , 0x00 }, + { 0xa14af , 0x00 }, + { 0xa14c0 , 0x00 }, + { 0xa14c1 , 0x3c }, + { 0xa14c2 , 0x6c }, + { 0xa14c3 , 0x6c }, + { 0xa14c4 , 0x3e }, + { 0xa14c5 , 0x00 }, + { 0xa14c6 , 0x7e }, + { 0xa14c7 , 0x00 }, + { 0xa14c8 , 0x00 }, + { 0xa14c9 , 0x00 }, + { 0xa14ca , 0x00 }, + { 0xa14cb , 0x00 }, + { 0xa14cc , 0x00 }, + { 0xa14cd , 0x00 }, + { 0xa14ce , 0x00 }, + { 0xa14cf , 0x00 }, + { 0xa14e0 , 0x00 }, + { 0xa14e1 , 0x38 }, + { 0xa14e2 , 0x6c }, + { 0xa14e3 , 0x6c }, + { 0xa14e4 , 0x38 }, + { 0xa14e5 , 0x00 }, + { 0xa14e6 , 0x7c }, + { 0xa14e7 , 0x00 }, + { 0xa14e8 , 0x00 }, + { 0xa14e9 , 0x00 }, + { 0xa14ea , 0x00 }, + { 0xa14eb , 0x00 }, + { 0xa14ec , 0x00 }, + { 0xa14ed , 0x00 }, + { 0xa14ee , 0x00 }, + { 0xa14ef , 0x00 }, + { 0xa1500 , 0x00 }, + { 0xa1501 , 0x00 }, + { 0xa1502 , 0x30 }, + { 0xa1503 , 0x30 }, + { 0xa1504 , 0x00 }, + { 0xa1505 , 0x30 }, + { 0xa1506 , 0x30 }, + { 0xa1507 , 0x60 }, + { 0xa1508 , 0xc0 }, + { 0xa1509 , 0xc6 }, + { 0xa150a , 0xc6 }, + { 0xa150b , 0x7c }, + { 0xa150c , 0x00 }, + { 0xa150d , 0x00 }, + { 0xa150e , 0x00 }, + { 0xa150f , 0x00 }, + { 0xa1520 , 0x00 }, + { 0xa1521 , 0x00 }, + { 0xa1522 , 0x00 }, + { 0xa1523 , 0x00 }, + { 0xa1524 , 0x00 }, + { 0xa1525 , 0x00 }, + { 0xa1526 , 0xfe }, + { 0xa1527 , 0xc0 }, + { 0xa1528 , 0xc0 }, + { 0xa1529 , 0xc0 }, + { 0xa152a , 0xc0 }, + { 0xa152b , 0x00 }, + { 0xa152c , 0x00 }, + { 0xa152d , 0x00 }, + { 0xa152e , 0x00 }, + { 0xa152f , 0x00 }, + { 0xa1540 , 0x00 }, + { 0xa1541 , 0x00 }, + { 0xa1542 , 0x00 }, + { 0xa1543 , 0x00 }, + { 0xa1544 , 0x00 }, + { 0xa1545 , 0x00 }, + { 0xa1546 , 0xfe }, + { 0xa1547 , 0x06 }, + { 0xa1548 , 0x06 }, + { 0xa1549 , 0x06 }, + { 0xa154a , 0x06 }, + { 0xa154b , 0x00 }, + { 0xa154c , 0x00 }, + { 0xa154d , 0x00 }, + { 0xa154e , 0x00 }, + { 0xa154f , 0x00 }, + { 0xa1560 , 0x00 }, + { 0xa1561 , 0xc0 }, + { 0xa1562 , 0xc0 }, + { 0xa1563 , 0xc2 }, + { 0xa1564 , 0xc6 }, + { 0xa1565 , 0xcc }, + { 0xa1566 , 0x18 }, + { 0xa1567 , 0x30 }, + { 0xa1568 , 0x60 }, + { 0xa1569 , 0xdc }, + { 0xa156a , 0x86 }, + { 0xa156b , 0x0c }, + { 0xa156c , 0x18 }, + { 0xa156d , 0x3e }, + { 0xa156e , 0x00 }, + { 0xa156f , 0x00 }, + { 0xa1580 , 0x00 }, + { 0xa1581 , 0xc0 }, + { 0xa1582 , 0xc0 }, + { 0xa1583 , 0xc2 }, + { 0xa1584 , 0xc6 }, + { 0xa1585 , 0xcc }, + { 0xa1586 , 0x18 }, + { 0xa1587 , 0x30 }, + { 0xa1588 , 0x66 }, + { 0xa1589 , 0xce }, + { 0xa158a , 0x9e }, + { 0xa158b , 0x3e }, + { 0xa158c , 0x06 }, + { 0xa158d , 0x06 }, + { 0xa158e , 0x00 }, + { 0xa158f , 0x00 }, + { 0xa15a0 , 0x00 }, + { 0xa15a1 , 0x00 }, + { 0xa15a2 , 0x18 }, + { 0xa15a3 , 0x18 }, + { 0xa15a4 , 0x00 }, + { 0xa15a5 , 0x18 }, + { 0xa15a6 , 0x18 }, + { 0xa15a7 , 0x18 }, + { 0xa15a8 , 0x3c }, + { 0xa15a9 , 0x3c }, + { 0xa15aa , 0x3c }, + { 0xa15ab , 0x18 }, + { 0xa15ac , 0x00 }, + { 0xa15ad , 0x00 }, + { 0xa15ae , 0x00 }, + { 0xa15af , 0x00 }, + { 0xa15c0 , 0x00 }, + { 0xa15c1 , 0x00 }, + { 0xa15c2 , 0x00 }, + { 0xa15c3 , 0x00 }, + { 0xa15c4 , 0x00 }, + { 0xa15c5 , 0x36 }, + { 0xa15c6 , 0x6c }, + { 0xa15c7 , 0xd8 }, + { 0xa15c8 , 0x6c }, + { 0xa15c9 , 0x36 }, + { 0xa15ca , 0x00 }, + { 0xa15cb , 0x00 }, + { 0xa15cc , 0x00 }, + { 0xa15cd , 0x00 }, + { 0xa15ce , 0x00 }, + { 0xa15cf , 0x00 }, + { 0xa15e0 , 0x00 }, + { 0xa15e1 , 0x00 }, + { 0xa15e2 , 0x00 }, + { 0xa15e3 , 0x00 }, + { 0xa15e4 , 0x00 }, + { 0xa15e5 , 0xd8 }, + { 0xa15e6 , 0x6c }, + { 0xa15e7 , 0x36 }, + { 0xa15e8 , 0x6c }, + { 0xa15e9 , 0xd8 }, + { 0xa15ea , 0x00 }, + { 0xa15eb , 0x00 }, + { 0xa15ec , 0x00 }, + { 0xa15ed , 0x00 }, + { 0xa15ee , 0x00 }, + { 0xa15ef , 0x00 }, + { 0xa1600 , 0x11 }, + { 0xa1601 , 0x44 }, + { 0xa1602 , 0x11 }, + { 0xa1603 , 0x44 }, + { 0xa1604 , 0x11 }, + { 0xa1605 , 0x44 }, + { 0xa1606 , 0x11 }, + { 0xa1607 , 0x44 }, + { 0xa1608 , 0x11 }, + { 0xa1609 , 0x44 }, + { 0xa160a , 0x11 }, + { 0xa160b , 0x44 }, + { 0xa160c , 0x11 }, + { 0xa160d , 0x44 }, + { 0xa160e , 0x11 }, + { 0xa160f , 0x44 }, + { 0xa1620 , 0x55 }, + { 0xa1621 , 0xaa }, + { 0xa1622 , 0x55 }, + { 0xa1623 , 0xaa }, + { 0xa1624 , 0x55 }, + { 0xa1625 , 0xaa }, + { 0xa1626 , 0x55 }, + { 0xa1627 , 0xaa }, + { 0xa1628 , 0x55 }, + { 0xa1629 , 0xaa }, + { 0xa162a , 0x55 }, + { 0xa162b , 0xaa }, + { 0xa162c , 0x55 }, + { 0xa162d , 0xaa }, + { 0xa162e , 0x55 }, + { 0xa162f , 0xaa }, + { 0xa1640 , 0xdd }, + { 0xa1641 , 0x77 }, + { 0xa1642 , 0xdd }, + { 0xa1643 , 0x77 }, + { 0xa1644 , 0xdd }, + { 0xa1645 , 0x77 }, + { 0xa1646 , 0xdd }, + { 0xa1647 , 0x77 }, + { 0xa1648 , 0xdd }, + { 0xa1649 , 0x77 }, + { 0xa164a , 0xdd }, + { 0xa164b , 0x77 }, + { 0xa164c , 0xdd }, + { 0xa164d , 0x77 }, + { 0xa164e , 0xdd }, + { 0xa164f , 0x77 }, + { 0xa1660 , 0x18 }, + { 0xa1661 , 0x18 }, + { 0xa1662 , 0x18 }, + { 0xa1663 , 0x18 }, + { 0xa1664 , 0x18 }, + { 0xa1665 , 0x18 }, + { 0xa1666 , 0x18 }, + { 0xa1667 , 0x18 }, + { 0xa1668 , 0x18 }, + { 0xa1669 , 0x18 }, + { 0xa166a , 0x18 }, + { 0xa166b , 0x18 }, + { 0xa166c , 0x18 }, + { 0xa166d , 0x18 }, + { 0xa166e , 0x18 }, + { 0xa166f , 0x18 }, + { 0xa1680 , 0x18 }, + { 0xa1681 , 0x18 }, + { 0xa1682 , 0x18 }, + { 0xa1683 , 0x18 }, + { 0xa1684 , 0x18 }, + { 0xa1685 , 0x18 }, + { 0xa1686 , 0x18 }, + { 0xa1687 , 0xf8 }, + { 0xa1688 , 0x18 }, + { 0xa1689 , 0x18 }, + { 0xa168a , 0x18 }, + { 0xa168b , 0x18 }, + { 0xa168c , 0x18 }, + { 0xa168d , 0x18 }, + { 0xa168e , 0x18 }, + { 0xa168f , 0x18 }, + { 0xa16a0 , 0x18 }, + { 0xa16a1 , 0x18 }, + { 0xa16a2 , 0x18 }, + { 0xa16a3 , 0x18 }, + { 0xa16a4 , 0x18 }, + { 0xa16a5 , 0xf8 }, + { 0xa16a6 , 0x18 }, + { 0xa16a7 , 0xf8 }, + { 0xa16a8 , 0x18 }, + { 0xa16a9 , 0x18 }, + { 0xa16aa , 0x18 }, + { 0xa16ab , 0x18 }, + { 0xa16ac , 0x18 }, + { 0xa16ad , 0x18 }, + { 0xa16ae , 0x18 }, + { 0xa16af , 0x18 }, + { 0xa16c0 , 0x36 }, + { 0xa16c1 , 0x36 }, + { 0xa16c2 , 0x36 }, + { 0xa16c3 , 0x36 }, + { 0xa16c4 , 0x36 }, + { 0xa16c5 , 0x36 }, + { 0xa16c6 , 0x36 }, + { 0xa16c7 , 0xf6 }, + { 0xa16c8 , 0x36 }, + { 0xa16c9 , 0x36 }, + { 0xa16ca , 0x36 }, + { 0xa16cb , 0x36 }, + { 0xa16cc , 0x36 }, + { 0xa16cd , 0x36 }, + { 0xa16ce , 0x36 }, + { 0xa16cf , 0x36 }, + { 0xa16e0 , 0x00 }, + { 0xa16e1 , 0x00 }, + { 0xa16e2 , 0x00 }, + { 0xa16e3 , 0x00 }, + { 0xa16e4 , 0x00 }, + { 0xa16e5 , 0x00 }, + { 0xa16e6 , 0x00 }, + { 0xa16e7 , 0xfe }, + { 0xa16e8 , 0x36 }, + { 0xa16e9 , 0x36 }, + { 0xa16ea , 0x36 }, + { 0xa16eb , 0x36 }, + { 0xa16ec , 0x36 }, + { 0xa16ed , 0x36 }, + { 0xa16ee , 0x36 }, + { 0xa16ef , 0x36 }, + { 0xa1700 , 0x00 }, + { 0xa1701 , 0x00 }, + { 0xa1702 , 0x00 }, + { 0xa1703 , 0x00 }, + { 0xa1704 , 0x00 }, + { 0xa1705 , 0xf8 }, + { 0xa1706 , 0x18 }, + { 0xa1707 , 0xf8 }, + { 0xa1708 , 0x18 }, + { 0xa1709 , 0x18 }, + { 0xa170a , 0x18 }, + { 0xa170b , 0x18 }, + { 0xa170c , 0x18 }, + { 0xa170d , 0x18 }, + { 0xa170e , 0x18 }, + { 0xa170f , 0x18 }, + { 0xa1720 , 0x36 }, + { 0xa1721 , 0x36 }, + { 0xa1722 , 0x36 }, + { 0xa1723 , 0x36 }, + { 0xa1724 , 0x36 }, + { 0xa1725 , 0xf6 }, + { 0xa1726 , 0x06 }, + { 0xa1727 , 0xf6 }, + { 0xa1728 , 0x36 }, + { 0xa1729 , 0x36 }, + { 0xa172a , 0x36 }, + { 0xa172b , 0x36 }, + { 0xa172c , 0x36 }, + { 0xa172d , 0x36 }, + { 0xa172e , 0x36 }, + { 0xa172f , 0x36 }, + { 0xa1740 , 0x36 }, + { 0xa1741 , 0x36 }, + { 0xa1742 , 0x36 }, + { 0xa1743 , 0x36 }, + { 0xa1744 , 0x36 }, + { 0xa1745 , 0x36 }, + { 0xa1746 , 0x36 }, + { 0xa1747 , 0x36 }, + { 0xa1748 , 0x36 }, + { 0xa1749 , 0x36 }, + { 0xa174a , 0x36 }, + { 0xa174b , 0x36 }, + { 0xa174c , 0x36 }, + { 0xa174d , 0x36 }, + { 0xa174e , 0x36 }, + { 0xa174f , 0x36 }, + { 0xa1760 , 0x00 }, + { 0xa1761 , 0x00 }, + { 0xa1762 , 0x00 }, + { 0xa1763 , 0x00 }, + { 0xa1764 , 0x00 }, + { 0xa1765 , 0xfe }, + { 0xa1766 , 0x06 }, + { 0xa1767 , 0xf6 }, + { 0xa1768 , 0x36 }, + { 0xa1769 , 0x36 }, + { 0xa176a , 0x36 }, + { 0xa176b , 0x36 }, + { 0xa176c , 0x36 }, + { 0xa176d , 0x36 }, + { 0xa176e , 0x36 }, + { 0xa176f , 0x36 }, + { 0xa1780 , 0x36 }, + { 0xa1781 , 0x36 }, + { 0xa1782 , 0x36 }, + { 0xa1783 , 0x36 }, + { 0xa1784 , 0x36 }, + { 0xa1785 , 0xf6 }, + { 0xa1786 , 0x06 }, + { 0xa1787 , 0xfe }, + { 0xa1788 , 0x00 }, + { 0xa1789 , 0x00 }, + { 0xa178a , 0x00 }, + { 0xa178b , 0x00 }, + { 0xa178c , 0x00 }, + { 0xa178d , 0x00 }, + { 0xa178e , 0x00 }, + { 0xa178f , 0x00 }, + { 0xa17a0 , 0x36 }, + { 0xa17a1 , 0x36 }, + { 0xa17a2 , 0x36 }, + { 0xa17a3 , 0x36 }, + { 0xa17a4 , 0x36 }, + { 0xa17a5 , 0x36 }, + { 0xa17a6 , 0x36 }, + { 0xa17a7 , 0xfe }, + { 0xa17a8 , 0x00 }, + { 0xa17a9 , 0x00 }, + { 0xa17aa , 0x00 }, + { 0xa17ab , 0x00 }, + { 0xa17ac , 0x00 }, + { 0xa17ad , 0x00 }, + { 0xa17ae , 0x00 }, + { 0xa17af , 0x00 }, + { 0xa17c0 , 0x18 }, + { 0xa17c1 , 0x18 }, + { 0xa17c2 , 0x18 }, + { 0xa17c3 , 0x18 }, + { 0xa17c4 , 0x18 }, + { 0xa17c5 , 0xf8 }, + { 0xa17c6 , 0x18 }, + { 0xa17c7 , 0xf8 }, + { 0xa17c8 , 0x00 }, + { 0xa17c9 , 0x00 }, + { 0xa17ca , 0x00 }, + { 0xa17cb , 0x00 }, + { 0xa17cc , 0x00 }, + { 0xa17cd , 0x00 }, + { 0xa17ce , 0x00 }, + { 0xa17cf , 0x00 }, + { 0xa17e0 , 0x00 }, + { 0xa17e1 , 0x00 }, + { 0xa17e2 , 0x00 }, + { 0xa17e3 , 0x00 }, + { 0xa17e4 , 0x00 }, + { 0xa17e5 , 0x00 }, + { 0xa17e6 , 0x00 }, + { 0xa17e7 , 0xf8 }, + { 0xa17e8 , 0x18 }, + { 0xa17e9 , 0x18 }, + { 0xa17ea , 0x18 }, + { 0xa17eb , 0x18 }, + { 0xa17ec , 0x18 }, + { 0xa17ed , 0x18 }, + { 0xa17ee , 0x18 }, + { 0xa17ef , 0x18 }, + { 0xa1800 , 0x18 }, + { 0xa1801 , 0x18 }, + { 0xa1802 , 0x18 }, + { 0xa1803 , 0x18 }, + { 0xa1804 , 0x18 }, + { 0xa1805 , 0x18 }, + { 0xa1806 , 0x18 }, + { 0xa1807 , 0x1f }, + { 0xa1808 , 0x00 }, + { 0xa1809 , 0x00 }, + { 0xa180a , 0x00 }, + { 0xa180b , 0x00 }, + { 0xa180c , 0x00 }, + { 0xa180d , 0x00 }, + { 0xa180e , 0x00 }, + { 0xa180f , 0x00 }, + { 0xa1820 , 0x18 }, + { 0xa1821 , 0x18 }, + { 0xa1822 , 0x18 }, + { 0xa1823 , 0x18 }, + { 0xa1824 , 0x18 }, + { 0xa1825 , 0x18 }, + { 0xa1826 , 0x18 }, + { 0xa1827 , 0xff }, + { 0xa1828 , 0x00 }, + { 0xa1829 , 0x00 }, + { 0xa182a , 0x00 }, + { 0xa182b , 0x00 }, + { 0xa182c , 0x00 }, + { 0xa182d , 0x00 }, + { 0xa182e , 0x00 }, + { 0xa182f , 0x00 }, + { 0xa1840 , 0x00 }, + { 0xa1841 , 0x00 }, + { 0xa1842 , 0x00 }, + { 0xa1843 , 0x00 }, + { 0xa1844 , 0x00 }, + { 0xa1845 , 0x00 }, + { 0xa1846 , 0x00 }, + { 0xa1847 , 0xff }, + { 0xa1848 , 0x18 }, + { 0xa1849 , 0x18 }, + { 0xa184a , 0x18 }, + { 0xa184b , 0x18 }, + { 0xa184c , 0x18 }, + { 0xa184d , 0x18 }, + { 0xa184e , 0x18 }, + { 0xa184f , 0x18 }, + { 0xa1860 , 0x18 }, + { 0xa1861 , 0x18 }, + { 0xa1862 , 0x18 }, + { 0xa1863 , 0x18 }, + { 0xa1864 , 0x18 }, + { 0xa1865 , 0x18 }, + { 0xa1866 , 0x18 }, + { 0xa1867 , 0x1f }, + { 0xa1868 , 0x18 }, + { 0xa1869 , 0x18 }, + { 0xa186a , 0x18 }, + { 0xa186b , 0x18 }, + { 0xa186c , 0x18 }, + { 0xa186d , 0x18 }, + { 0xa186e , 0x18 }, + { 0xa186f , 0x18 }, + { 0xa1880 , 0x00 }, + { 0xa1881 , 0x00 }, + { 0xa1882 , 0x00 }, + { 0xa1883 , 0x00 }, + { 0xa1884 , 0x00 }, + { 0xa1885 , 0x00 }, + { 0xa1886 , 0x00 }, + { 0xa1887 , 0xff }, + { 0xa1888 , 0x00 }, + { 0xa1889 , 0x00 }, + { 0xa188a , 0x00 }, + { 0xa188b , 0x00 }, + { 0xa188c , 0x00 }, + { 0xa188d , 0x00 }, + { 0xa188e , 0x00 }, + { 0xa188f , 0x00 }, + { 0xa18a0 , 0x18 }, + { 0xa18a1 , 0x18 }, + { 0xa18a2 , 0x18 }, + { 0xa18a3 , 0x18 }, + { 0xa18a4 , 0x18 }, + { 0xa18a5 , 0x18 }, + { 0xa18a6 , 0x18 }, + { 0xa18a7 , 0xff }, + { 0xa18a8 , 0x18 }, + { 0xa18a9 , 0x18 }, + { 0xa18aa , 0x18 }, + { 0xa18ab , 0x18 }, + { 0xa18ac , 0x18 }, + { 0xa18ad , 0x18 }, + { 0xa18ae , 0x18 }, + { 0xa18af , 0x18 }, + { 0xa18c0 , 0x18 }, + { 0xa18c1 , 0x18 }, + { 0xa18c2 , 0x18 }, + { 0xa18c3 , 0x18 }, + { 0xa18c4 , 0x18 }, + { 0xa18c5 , 0x1f }, + { 0xa18c6 , 0x18 }, + { 0xa18c7 , 0x1f }, + { 0xa18c8 , 0x18 }, + { 0xa18c9 , 0x18 }, + { 0xa18ca , 0x18 }, + { 0xa18cb , 0x18 }, + { 0xa18cc , 0x18 }, + { 0xa18cd , 0x18 }, + { 0xa18ce , 0x18 }, + { 0xa18cf , 0x18 }, + { 0xa18e0 , 0x36 }, + { 0xa18e1 , 0x36 }, + { 0xa18e2 , 0x36 }, + { 0xa18e3 , 0x36 }, + { 0xa18e4 , 0x36 }, + { 0xa18e5 , 0x36 }, + { 0xa18e6 , 0x36 }, + { 0xa18e7 , 0x37 }, + { 0xa18e8 , 0x36 }, + { 0xa18e9 , 0x36 }, + { 0xa18ea , 0x36 }, + { 0xa18eb , 0x36 }, + { 0xa18ec , 0x36 }, + { 0xa18ed , 0x36 }, + { 0xa18ee , 0x36 }, + { 0xa18ef , 0x36 }, + { 0xa1900 , 0x36 }, + { 0xa1901 , 0x36 }, + { 0xa1902 , 0x36 }, + { 0xa1903 , 0x36 }, + { 0xa1904 , 0x36 }, + { 0xa1905 , 0x37 }, + { 0xa1906 , 0x30 }, + { 0xa1907 , 0x3f }, + { 0xa1908 , 0x00 }, + { 0xa1909 , 0x00 }, + { 0xa190a , 0x00 }, + { 0xa190b , 0x00 }, + { 0xa190c , 0x00 }, + { 0xa190d , 0x00 }, + { 0xa190e , 0x00 }, + { 0xa190f , 0x00 }, + { 0xa1920 , 0x00 }, + { 0xa1921 , 0x00 }, + { 0xa1922 , 0x00 }, + { 0xa1923 , 0x00 }, + { 0xa1924 , 0x00 }, + { 0xa1925 , 0x3f }, + { 0xa1926 , 0x30 }, + { 0xa1927 , 0x37 }, + { 0xa1928 , 0x36 }, + { 0xa1929 , 0x36 }, + { 0xa192a , 0x36 }, + { 0xa192b , 0x36 }, + { 0xa192c , 0x36 }, + { 0xa192d , 0x36 }, + { 0xa192e , 0x36 }, + { 0xa192f , 0x36 }, + { 0xa1940 , 0x36 }, + { 0xa1941 , 0x36 }, + { 0xa1942 , 0x36 }, + { 0xa1943 , 0x36 }, + { 0xa1944 , 0x36 }, + { 0xa1945 , 0xf7 }, + { 0xa1946 , 0x00 }, + { 0xa1947 , 0xff }, + { 0xa1948 , 0x00 }, + { 0xa1949 , 0x00 }, + { 0xa194a , 0x00 }, + { 0xa194b , 0x00 }, + { 0xa194c , 0x00 }, + { 0xa194d , 0x00 }, + { 0xa194e , 0x00 }, + { 0xa194f , 0x00 }, + { 0xa1960 , 0x00 }, + { 0xa1961 , 0x00 }, + { 0xa1962 , 0x00 }, + { 0xa1963 , 0x00 }, + { 0xa1964 , 0x00 }, + { 0xa1965 , 0xff }, + { 0xa1966 , 0x00 }, + { 0xa1967 , 0xf7 }, + { 0xa1968 , 0x36 }, + { 0xa1969 , 0x36 }, + { 0xa196a , 0x36 }, + { 0xa196b , 0x36 }, + { 0xa196c , 0x36 }, + { 0xa196d , 0x36 }, + { 0xa196e , 0x36 }, + { 0xa196f , 0x36 }, + { 0xa1980 , 0x36 }, + { 0xa1981 , 0x36 }, + { 0xa1982 , 0x36 }, + { 0xa1983 , 0x36 }, + { 0xa1984 , 0x36 }, + { 0xa1985 , 0x37 }, + { 0xa1986 , 0x30 }, + { 0xa1987 , 0x37 }, + { 0xa1988 , 0x36 }, + { 0xa1989 , 0x36 }, + { 0xa198a , 0x36 }, + { 0xa198b , 0x36 }, + { 0xa198c , 0x36 }, + { 0xa198d , 0x36 }, + { 0xa198e , 0x36 }, + { 0xa198f , 0x36 }, + { 0xa19a0 , 0x00 }, + { 0xa19a1 , 0x00 }, + { 0xa19a2 , 0x00 }, + { 0xa19a3 , 0x00 }, + { 0xa19a4 , 0x00 }, + { 0xa19a5 , 0xff }, + { 0xa19a6 , 0x00 }, + { 0xa19a7 , 0xff }, + { 0xa19a8 , 0x00 }, + { 0xa19a9 , 0x00 }, + { 0xa19aa , 0x00 }, + { 0xa19ab , 0x00 }, + { 0xa19ac , 0x00 }, + { 0xa19ad , 0x00 }, + { 0xa19ae , 0x00 }, + { 0xa19af , 0x00 }, + { 0xa19c0 , 0x36 }, + { 0xa19c1 , 0x36 }, + { 0xa19c2 , 0x36 }, + { 0xa19c3 , 0x36 }, + { 0xa19c4 , 0x36 }, + { 0xa19c5 , 0xf7 }, + { 0xa19c6 , 0x00 }, + { 0xa19c7 , 0xf7 }, + { 0xa19c8 , 0x36 }, + { 0xa19c9 , 0x36 }, + { 0xa19ca , 0x36 }, + { 0xa19cb , 0x36 }, + { 0xa19cc , 0x36 }, + { 0xa19cd , 0x36 }, + { 0xa19ce , 0x36 }, + { 0xa19cf , 0x36 }, + { 0xa19e0 , 0x18 }, + { 0xa19e1 , 0x18 }, + { 0xa19e2 , 0x18 }, + { 0xa19e3 , 0x18 }, + { 0xa19e4 , 0x18 }, + { 0xa19e5 , 0xff }, + { 0xa19e6 , 0x00 }, + { 0xa19e7 , 0xff }, + { 0xa19e8 , 0x00 }, + { 0xa19e9 , 0x00 }, + { 0xa19ea , 0x00 }, + { 0xa19eb , 0x00 }, + { 0xa19ec , 0x00 }, + { 0xa19ed , 0x00 }, + { 0xa19ee , 0x00 }, + { 0xa19ef , 0x00 }, + { 0xa1a00 , 0x36 }, + { 0xa1a01 , 0x36 }, + { 0xa1a02 , 0x36 }, + { 0xa1a03 , 0x36 }, + { 0xa1a04 , 0x36 }, + { 0xa1a05 , 0x36 }, + { 0xa1a06 , 0x36 }, + { 0xa1a07 , 0xff }, + { 0xa1a08 , 0x00 }, + { 0xa1a09 , 0x00 }, + { 0xa1a0a , 0x00 }, + { 0xa1a0b , 0x00 }, + { 0xa1a0c , 0x00 }, + { 0xa1a0d , 0x00 }, + { 0xa1a0e , 0x00 }, + { 0xa1a0f , 0x00 }, + { 0xa1a20 , 0x00 }, + { 0xa1a21 , 0x00 }, + { 0xa1a22 , 0x00 }, + { 0xa1a23 , 0x00 }, + { 0xa1a24 , 0x00 }, + { 0xa1a25 , 0xff }, + { 0xa1a26 , 0x00 }, + { 0xa1a27 , 0xff }, + { 0xa1a28 , 0x18 }, + { 0xa1a29 , 0x18 }, + { 0xa1a2a , 0x18 }, + { 0xa1a2b , 0x18 }, + { 0xa1a2c , 0x18 }, + { 0xa1a2d , 0x18 }, + { 0xa1a2e , 0x18 }, + { 0xa1a2f , 0x18 }, + { 0xa1a40 , 0x00 }, + { 0xa1a41 , 0x00 }, + { 0xa1a42 , 0x00 }, + { 0xa1a43 , 0x00 }, + { 0xa1a44 , 0x00 }, + { 0xa1a45 , 0x00 }, + { 0xa1a46 , 0x00 }, + { 0xa1a47 , 0xff }, + { 0xa1a48 , 0x36 }, + { 0xa1a49 , 0x36 }, + { 0xa1a4a , 0x36 }, + { 0xa1a4b , 0x36 }, + { 0xa1a4c , 0x36 }, + { 0xa1a4d , 0x36 }, + { 0xa1a4e , 0x36 }, + { 0xa1a4f , 0x36 }, + { 0xa1a60 , 0x36 }, + { 0xa1a61 , 0x36 }, + { 0xa1a62 , 0x36 }, + { 0xa1a63 , 0x36 }, + { 0xa1a64 , 0x36 }, + { 0xa1a65 , 0x36 }, + { 0xa1a66 , 0x36 }, + { 0xa1a67 , 0x3f }, + { 0xa1a68 , 0x00 }, + { 0xa1a69 , 0x00 }, + { 0xa1a6a , 0x00 }, + { 0xa1a6b , 0x00 }, + { 0xa1a6c , 0x00 }, + { 0xa1a6d , 0x00 }, + { 0xa1a6e , 0x00 }, + { 0xa1a6f , 0x00 }, + { 0xa1a80 , 0x18 }, + { 0xa1a81 , 0x18 }, + { 0xa1a82 , 0x18 }, + { 0xa1a83 , 0x18 }, + { 0xa1a84 , 0x18 }, + { 0xa1a85 , 0x1f }, + { 0xa1a86 , 0x18 }, + { 0xa1a87 , 0x1f }, + { 0xa1a88 , 0x00 }, + { 0xa1a89 , 0x00 }, + { 0xa1a8a , 0x00 }, + { 0xa1a8b , 0x00 }, + { 0xa1a8c , 0x00 }, + { 0xa1a8d , 0x00 }, + { 0xa1a8e , 0x00 }, + { 0xa1a8f , 0x00 }, + { 0xa1aa0 , 0x00 }, + { 0xa1aa1 , 0x00 }, + { 0xa1aa2 , 0x00 }, + { 0xa1aa3 , 0x00 }, + { 0xa1aa4 , 0x00 }, + { 0xa1aa5 , 0x1f }, + { 0xa1aa6 , 0x18 }, + { 0xa1aa7 , 0x1f }, + { 0xa1aa8 , 0x18 }, + { 0xa1aa9 , 0x18 }, + { 0xa1aaa , 0x18 }, + { 0xa1aab , 0x18 }, + { 0xa1aac , 0x18 }, + { 0xa1aad , 0x18 }, + { 0xa1aae , 0x18 }, + { 0xa1aaf , 0x18 }, + { 0xa1ac0 , 0x00 }, + { 0xa1ac1 , 0x00 }, + { 0xa1ac2 , 0x00 }, + { 0xa1ac3 , 0x00 }, + { 0xa1ac4 , 0x00 }, + { 0xa1ac5 , 0x00 }, + { 0xa1ac6 , 0x00 }, + { 0xa1ac7 , 0x3f }, + { 0xa1ac8 , 0x36 }, + { 0xa1ac9 , 0x36 }, + { 0xa1aca , 0x36 }, + { 0xa1acb , 0x36 }, + { 0xa1acc , 0x36 }, + { 0xa1acd , 0x36 }, + { 0xa1ace , 0x36 }, + { 0xa1acf , 0x36 }, + { 0xa1ae0 , 0x36 }, + { 0xa1ae1 , 0x36 }, + { 0xa1ae2 , 0x36 }, + { 0xa1ae3 , 0x36 }, + { 0xa1ae4 , 0x36 }, + { 0xa1ae5 , 0x36 }, + { 0xa1ae6 , 0x36 }, + { 0xa1ae7 , 0xff }, + { 0xa1ae8 , 0x36 }, + { 0xa1ae9 , 0x36 }, + { 0xa1aea , 0x36 }, + { 0xa1aeb , 0x36 }, + { 0xa1aec , 0x36 }, + { 0xa1aed , 0x36 }, + { 0xa1aee , 0x36 }, + { 0xa1aef , 0x36 }, + { 0xa1b00 , 0x18 }, + { 0xa1b01 , 0x18 }, + { 0xa1b02 , 0x18 }, + { 0xa1b03 , 0x18 }, + { 0xa1b04 , 0x18 }, + { 0xa1b05 , 0xff }, + { 0xa1b06 , 0x18 }, + { 0xa1b07 , 0xff }, + { 0xa1b08 , 0x18 }, + { 0xa1b09 , 0x18 }, + { 0xa1b0a , 0x18 }, + { 0xa1b0b , 0x18 }, + { 0xa1b0c , 0x18 }, + { 0xa1b0d , 0x18 }, + { 0xa1b0e , 0x18 }, + { 0xa1b0f , 0x18 }, + { 0xa1b20 , 0x18 }, + { 0xa1b21 , 0x18 }, + { 0xa1b22 , 0x18 }, + { 0xa1b23 , 0x18 }, + { 0xa1b24 , 0x18 }, + { 0xa1b25 , 0x18 }, + { 0xa1b26 , 0x18 }, + { 0xa1b27 , 0xf8 }, + { 0xa1b28 , 0x00 }, + { 0xa1b29 , 0x00 }, + { 0xa1b2a , 0x00 }, + { 0xa1b2b , 0x00 }, + { 0xa1b2c , 0x00 }, + { 0xa1b2d , 0x00 }, + { 0xa1b2e , 0x00 }, + { 0xa1b2f , 0x00 }, + { 0xa1b40 , 0x00 }, + { 0xa1b41 , 0x00 }, + { 0xa1b42 , 0x00 }, + { 0xa1b43 , 0x00 }, + { 0xa1b44 , 0x00 }, + { 0xa1b45 , 0x00 }, + { 0xa1b46 , 0x00 }, + { 0xa1b47 , 0x1f }, + { 0xa1b48 , 0x18 }, + { 0xa1b49 , 0x18 }, + { 0xa1b4a , 0x18 }, + { 0xa1b4b , 0x18 }, + { 0xa1b4c , 0x18 }, + { 0xa1b4d , 0x18 }, + { 0xa1b4e , 0x18 }, + { 0xa1b4f , 0x18 }, + { 0xa1b60 , 0xff }, + { 0xa1b61 , 0xff }, + { 0xa1b62 , 0xff }, + { 0xa1b63 , 0xff }, + { 0xa1b64 , 0xff }, + { 0xa1b65 , 0xff }, + { 0xa1b66 , 0xff }, + { 0xa1b67 , 0xff }, + { 0xa1b68 , 0xff }, + { 0xa1b69 , 0xff }, + { 0xa1b6a , 0xff }, + { 0xa1b6b , 0xff }, + { 0xa1b6c , 0xff }, + { 0xa1b6d , 0xff }, + { 0xa1b6e , 0xff }, + { 0xa1b6f , 0xff }, + { 0xa1b80 , 0x00 }, + { 0xa1b81 , 0x00 }, + { 0xa1b82 , 0x00 }, + { 0xa1b83 , 0x00 }, + { 0xa1b84 , 0x00 }, + { 0xa1b85 , 0x00 }, + { 0xa1b86 , 0x00 }, + { 0xa1b87 , 0xff }, + { 0xa1b88 , 0xff }, + { 0xa1b89 , 0xff }, + { 0xa1b8a , 0xff }, + { 0xa1b8b , 0xff }, + { 0xa1b8c , 0xff }, + { 0xa1b8d , 0xff }, + { 0xa1b8e , 0xff }, + { 0xa1b8f , 0xff }, + { 0xa1ba0 , 0xf0 }, + { 0xa1ba1 , 0xf0 }, + { 0xa1ba2 , 0xf0 }, + { 0xa1ba3 , 0xf0 }, + { 0xa1ba4 , 0xf0 }, + { 0xa1ba5 , 0xf0 }, + { 0xa1ba6 , 0xf0 }, + { 0xa1ba7 , 0xf0 }, + { 0xa1ba8 , 0xf0 }, + { 0xa1ba9 , 0xf0 }, + { 0xa1baa , 0xf0 }, + { 0xa1bab , 0xf0 }, + { 0xa1bac , 0xf0 }, + { 0xa1bad , 0xf0 }, + { 0xa1bae , 0xf0 }, + { 0xa1baf , 0xf0 }, + { 0xa1bc0 , 0x0f }, + { 0xa1bc1 , 0x0f }, + { 0xa1bc2 , 0x0f }, + { 0xa1bc3 , 0x0f }, + { 0xa1bc4 , 0x0f }, + { 0xa1bc5 , 0x0f }, + { 0xa1bc6 , 0x0f }, + { 0xa1bc7 , 0x0f }, + { 0xa1bc8 , 0x0f }, + { 0xa1bc9 , 0x0f }, + { 0xa1bca , 0x0f }, + { 0xa1bcb , 0x0f }, + { 0xa1bcc , 0x0f }, + { 0xa1bcd , 0x0f }, + { 0xa1bce , 0x0f }, + { 0xa1bcf , 0x0f }, + { 0xa1be0 , 0xff }, + { 0xa1be1 , 0xff }, + { 0xa1be2 , 0xff }, + { 0xa1be3 , 0xff }, + { 0xa1be4 , 0xff }, + { 0xa1be5 , 0xff }, + { 0xa1be6 , 0xff }, + { 0xa1be7 , 0x00 }, + { 0xa1be8 , 0x00 }, + { 0xa1be9 , 0x00 }, + { 0xa1bea , 0x00 }, + { 0xa1beb , 0x00 }, + { 0xa1bec , 0x00 }, + { 0xa1bed , 0x00 }, + { 0xa1bee , 0x00 }, + { 0xa1bef , 0x00 }, + { 0xa1c00 , 0x00 }, + { 0xa1c01 , 0x00 }, + { 0xa1c02 , 0x00 }, + { 0xa1c03 , 0x00 }, + { 0xa1c04 , 0x00 }, + { 0xa1c05 , 0x76 }, + { 0xa1c06 , 0xdc }, + { 0xa1c07 , 0xd8 }, + { 0xa1c08 , 0xd8 }, + { 0xa1c09 , 0xd8 }, + { 0xa1c0a , 0xdc }, + { 0xa1c0b , 0x76 }, + { 0xa1c0c , 0x00 }, + { 0xa1c0d , 0x00 }, + { 0xa1c0e , 0x00 }, + { 0xa1c0f , 0x00 }, + { 0xa1c20 , 0x00 }, + { 0xa1c21 , 0x00 }, + { 0xa1c22 , 0x78 }, + { 0xa1c23 , 0xcc }, + { 0xa1c24 , 0xcc }, + { 0xa1c25 , 0xcc }, + { 0xa1c26 , 0xd8 }, + { 0xa1c27 , 0xcc }, + { 0xa1c28 , 0xc6 }, + { 0xa1c29 , 0xc6 }, + { 0xa1c2a , 0xc6 }, + { 0xa1c2b , 0xcc }, + { 0xa1c2c , 0x00 }, + { 0xa1c2d , 0x00 }, + { 0xa1c2e , 0x00 }, + { 0xa1c2f , 0x00 }, + { 0xa1c40 , 0x00 }, + { 0xa1c41 , 0x00 }, + { 0xa1c42 , 0xfe }, + { 0xa1c43 , 0xc6 }, + { 0xa1c44 , 0xc6 }, + { 0xa1c45 , 0xc0 }, + { 0xa1c46 , 0xc0 }, + { 0xa1c47 , 0xc0 }, + { 0xa1c48 , 0xc0 }, + { 0xa1c49 , 0xc0 }, + { 0xa1c4a , 0xc0 }, + { 0xa1c4b , 0xc0 }, + { 0xa1c4c , 0x00 }, + { 0xa1c4d , 0x00 }, + { 0xa1c4e , 0x00 }, + { 0xa1c4f , 0x00 }, + { 0xa1c60 , 0x00 }, + { 0xa1c61 , 0x00 }, + { 0xa1c62 , 0x00 }, + { 0xa1c63 , 0x00 }, + { 0xa1c64 , 0xfe }, + { 0xa1c65 , 0x6c }, + { 0xa1c66 , 0x6c }, + { 0xa1c67 , 0x6c }, + { 0xa1c68 , 0x6c }, + { 0xa1c69 , 0x6c }, + { 0xa1c6a , 0x6c }, + { 0xa1c6b , 0x6c }, + { 0xa1c6c , 0x00 }, + { 0xa1c6d , 0x00 }, + { 0xa1c6e , 0x00 }, + { 0xa1c6f , 0x00 }, + { 0xa1c80 , 0x00 }, + { 0xa1c81 , 0x00 }, + { 0xa1c82 , 0x00 }, + { 0xa1c83 , 0xfe }, + { 0xa1c84 , 0xc6 }, + { 0xa1c85 , 0x60 }, + { 0xa1c86 , 0x30 }, + { 0xa1c87 , 0x18 }, + { 0xa1c88 , 0x30 }, + { 0xa1c89 , 0x60 }, + { 0xa1c8a , 0xc6 }, + { 0xa1c8b , 0xfe }, + { 0xa1c8c , 0x00 }, + { 0xa1c8d , 0x00 }, + { 0xa1c8e , 0x00 }, + { 0xa1c8f , 0x00 }, + { 0xa1ca0 , 0x00 }, + { 0xa1ca1 , 0x00 }, + { 0xa1ca2 , 0x00 }, + { 0xa1ca3 , 0x00 }, + { 0xa1ca4 , 0x00 }, + { 0xa1ca5 , 0x7e }, + { 0xa1ca6 , 0xd8 }, + { 0xa1ca7 , 0xd8 }, + { 0xa1ca8 , 0xd8 }, + { 0xa1ca9 , 0xd8 }, + { 0xa1caa , 0xd8 }, + { 0xa1cab , 0x70 }, + { 0xa1cac , 0x00 }, + { 0xa1cad , 0x00 }, + { 0xa1cae , 0x00 }, + { 0xa1caf , 0x00 }, + { 0xa1cc0 , 0x00 }, + { 0xa1cc1 , 0x00 }, + { 0xa1cc2 , 0x00 }, + { 0xa1cc3 , 0x00 }, + { 0xa1cc4 , 0x66 }, + { 0xa1cc5 , 0x66 }, + { 0xa1cc6 , 0x66 }, + { 0xa1cc7 , 0x66 }, + { 0xa1cc8 , 0x66 }, + { 0xa1cc9 , 0x7c }, + { 0xa1cca , 0x60 }, + { 0xa1ccb , 0x60 }, + { 0xa1ccc , 0xc0 }, + { 0xa1ccd , 0x00 }, + { 0xa1cce , 0x00 }, + { 0xa1ccf , 0x00 }, + { 0xa1ce0 , 0x00 }, + { 0xa1ce1 , 0x00 }, + { 0xa1ce2 , 0x00 }, + { 0xa1ce3 , 0x00 }, + { 0xa1ce4 , 0x76 }, + { 0xa1ce5 , 0xdc }, + { 0xa1ce6 , 0x18 }, + { 0xa1ce7 , 0x18 }, + { 0xa1ce8 , 0x18 }, + { 0xa1ce9 , 0x18 }, + { 0xa1cea , 0x18 }, + { 0xa1ceb , 0x18 }, + { 0xa1cec , 0x00 }, + { 0xa1ced , 0x00 }, + { 0xa1cee , 0x00 }, + { 0xa1cef , 0x00 }, + { 0xa1d00 , 0x00 }, + { 0xa1d01 , 0x00 }, + { 0xa1d02 , 0x00 }, + { 0xa1d03 , 0x7e }, + { 0xa1d04 , 0x18 }, + { 0xa1d05 , 0x3c }, + { 0xa1d06 , 0x66 }, + { 0xa1d07 , 0x66 }, + { 0xa1d08 , 0x66 }, + { 0xa1d09 , 0x3c }, + { 0xa1d0a , 0x18 }, + { 0xa1d0b , 0x7e }, + { 0xa1d0c , 0x00 }, + { 0xa1d0d , 0x00 }, + { 0xa1d0e , 0x00 }, + { 0xa1d0f , 0x00 }, + { 0xa1d20 , 0x00 }, + { 0xa1d21 , 0x00 }, + { 0xa1d22 , 0x00 }, + { 0xa1d23 , 0x38 }, + { 0xa1d24 , 0x6c }, + { 0xa1d25 , 0xc6 }, + { 0xa1d26 , 0xc6 }, + { 0xa1d27 , 0xfe }, + { 0xa1d28 , 0xc6 }, + { 0xa1d29 , 0xc6 }, + { 0xa1d2a , 0x6c }, + { 0xa1d2b , 0x38 }, + { 0xa1d2c , 0x00 }, + { 0xa1d2d , 0x00 }, + { 0xa1d2e , 0x00 }, + { 0xa1d2f , 0x00 }, + { 0xa1d40 , 0x00 }, + { 0xa1d41 , 0x00 }, + { 0xa1d42 , 0x38 }, + { 0xa1d43 , 0x6c }, + { 0xa1d44 , 0xc6 }, + { 0xa1d45 , 0xc6 }, + { 0xa1d46 , 0xc6 }, + { 0xa1d47 , 0x6c }, + { 0xa1d48 , 0x6c }, + { 0xa1d49 , 0x6c }, + { 0xa1d4a , 0x6c }, + { 0xa1d4b , 0xee }, + { 0xa1d4c , 0x00 }, + { 0xa1d4d , 0x00 }, + { 0xa1d4e , 0x00 }, + { 0xa1d4f , 0x00 }, + { 0xa1d60 , 0x00 }, + { 0xa1d61 , 0x00 }, + { 0xa1d62 , 0x1e }, + { 0xa1d63 , 0x30 }, + { 0xa1d64 , 0x18 }, + { 0xa1d65 , 0x0c }, + { 0xa1d66 , 0x3e }, + { 0xa1d67 , 0x66 }, + { 0xa1d68 , 0x66 }, + { 0xa1d69 , 0x66 }, + { 0xa1d6a , 0x66 }, + { 0xa1d6b , 0x3c }, + { 0xa1d6c , 0x00 }, + { 0xa1d6d , 0x00 }, + { 0xa1d6e , 0x00 }, + { 0xa1d6f , 0x00 }, + { 0xa1d80 , 0x00 }, + { 0xa1d81 , 0x00 }, + { 0xa1d82 , 0x00 }, + { 0xa1d83 , 0x00 }, + { 0xa1d84 , 0x00 }, + { 0xa1d85 , 0x7e }, + { 0xa1d86 , 0xdb }, + { 0xa1d87 , 0xdb }, + { 0xa1d88 , 0xdb }, + { 0xa1d89 , 0x7e }, + { 0xa1d8a , 0x00 }, + { 0xa1d8b , 0x00 }, + { 0xa1d8c , 0x00 }, + { 0xa1d8d , 0x00 }, + { 0xa1d8e , 0x00 }, + { 0xa1d8f , 0x00 }, + { 0xa1da0 , 0x00 }, + { 0xa1da1 , 0x00 }, + { 0xa1da2 , 0x00 }, + { 0xa1da3 , 0x03 }, + { 0xa1da4 , 0x06 }, + { 0xa1da5 , 0x7e }, + { 0xa1da6 , 0xdb }, + { 0xa1da7 , 0xdb }, + { 0xa1da8 , 0xf3 }, + { 0xa1da9 , 0x7e }, + { 0xa1daa , 0x60 }, + { 0xa1dab , 0xc0 }, + { 0xa1dac , 0x00 }, + { 0xa1dad , 0x00 }, + { 0xa1dae , 0x00 }, + { 0xa1daf , 0x00 }, + { 0xa1dc0 , 0x00 }, + { 0xa1dc1 , 0x00 }, + { 0xa1dc2 , 0x1c }, + { 0xa1dc3 , 0x30 }, + { 0xa1dc4 , 0x60 }, + { 0xa1dc5 , 0x60 }, + { 0xa1dc6 , 0x7c }, + { 0xa1dc7 , 0x60 }, + { 0xa1dc8 , 0x60 }, + { 0xa1dc9 , 0x60 }, + { 0xa1dca , 0x30 }, + { 0xa1dcb , 0x1c }, + { 0xa1dcc , 0x00 }, + { 0xa1dcd , 0x00 }, + { 0xa1dce , 0x00 }, + { 0xa1dcf , 0x00 }, + { 0xa1de0 , 0x00 }, + { 0xa1de1 , 0x00 }, + { 0xa1de2 , 0x00 }, + { 0xa1de3 , 0x7c }, + { 0xa1de4 , 0xc6 }, + { 0xa1de5 , 0xc6 }, + { 0xa1de6 , 0xc6 }, + { 0xa1de7 , 0xc6 }, + { 0xa1de8 , 0xc6 }, + { 0xa1de9 , 0xc6 }, + { 0xa1dea , 0xc6 }, + { 0xa1deb , 0xc6 }, + { 0xa1dec , 0x00 }, + { 0xa1ded , 0x00 }, + { 0xa1dee , 0x00 }, + { 0xa1def , 0x00 }, + { 0xa1e00 , 0x00 }, + { 0xa1e01 , 0x00 }, + { 0xa1e02 , 0x00 }, + { 0xa1e03 , 0x00 }, + { 0xa1e04 , 0xfe }, + { 0xa1e05 , 0x00 }, + { 0xa1e06 , 0x00 }, + { 0xa1e07 , 0xfe }, + { 0xa1e08 , 0x00 }, + { 0xa1e09 , 0x00 }, + { 0xa1e0a , 0xfe }, + { 0xa1e0b , 0x00 }, + { 0xa1e0c , 0x00 }, + { 0xa1e0d , 0x00 }, + { 0xa1e0e , 0x00 }, + { 0xa1e0f , 0x00 }, + { 0xa1e20 , 0x00 }, + { 0xa1e21 , 0x00 }, + { 0xa1e22 , 0x00 }, + { 0xa1e23 , 0x00 }, + { 0xa1e24 , 0x18 }, + { 0xa1e25 , 0x18 }, + { 0xa1e26 , 0x7e }, + { 0xa1e27 , 0x18 }, + { 0xa1e28 , 0x18 }, + { 0xa1e29 , 0x00 }, + { 0xa1e2a , 0x00 }, + { 0xa1e2b , 0xff }, + { 0xa1e2c , 0x00 }, + { 0xa1e2d , 0x00 }, + { 0xa1e2e , 0x00 }, + { 0xa1e2f , 0x00 }, + { 0xa1e40 , 0x00 }, + { 0xa1e41 , 0x00 }, + { 0xa1e42 , 0x00 }, + { 0xa1e43 , 0x30 }, + { 0xa1e44 , 0x18 }, + { 0xa1e45 , 0x0c }, + { 0xa1e46 , 0x06 }, + { 0xa1e47 , 0x0c }, + { 0xa1e48 , 0x18 }, + { 0xa1e49 , 0x30 }, + { 0xa1e4a , 0x00 }, + { 0xa1e4b , 0x7e }, + { 0xa1e4c , 0x00 }, + { 0xa1e4d , 0x00 }, + { 0xa1e4e , 0x00 }, + { 0xa1e4f , 0x00 }, + { 0xa1e60 , 0x00 }, + { 0xa1e61 , 0x00 }, + { 0xa1e62 , 0x00 }, + { 0xa1e63 , 0x0c }, + { 0xa1e64 , 0x18 }, + { 0xa1e65 , 0x30 }, + { 0xa1e66 , 0x60 }, + { 0xa1e67 , 0x30 }, + { 0xa1e68 , 0x18 }, + { 0xa1e69 , 0x0c }, + { 0xa1e6a , 0x00 }, + { 0xa1e6b , 0x7e }, + { 0xa1e6c , 0x00 }, + { 0xa1e6d , 0x00 }, + { 0xa1e6e , 0x00 }, + { 0xa1e6f , 0x00 }, + { 0xa1e80 , 0x00 }, + { 0xa1e81 , 0x00 }, + { 0xa1e82 , 0x0e }, + { 0xa1e83 , 0x1b }, + { 0xa1e84 , 0x1b }, + { 0xa1e85 , 0x18 }, + { 0xa1e86 , 0x18 }, + { 0xa1e87 , 0x18 }, + { 0xa1e88 , 0x18 }, + { 0xa1e89 , 0x18 }, + { 0xa1e8a , 0x18 }, + { 0xa1e8b , 0x18 }, + { 0xa1e8c , 0x18 }, + { 0xa1e8d , 0x18 }, + { 0xa1e8e , 0x18 }, + { 0xa1e8f , 0x18 }, + { 0xa1ea0 , 0x18 }, + { 0xa1ea1 , 0x18 }, + { 0xa1ea2 , 0x18 }, + { 0xa1ea3 , 0x18 }, + { 0xa1ea4 , 0x18 }, + { 0xa1ea5 , 0x18 }, + { 0xa1ea6 , 0x18 }, + { 0xa1ea7 , 0x18 }, + { 0xa1ea8 , 0xd8 }, + { 0xa1ea9 , 0xd8 }, + { 0xa1eaa , 0xd8 }, + { 0xa1eab , 0x70 }, + { 0xa1eac , 0x00 }, + { 0xa1ead , 0x00 }, + { 0xa1eae , 0x00 }, + { 0xa1eaf , 0x00 }, + { 0xa1ec0 , 0x00 }, + { 0xa1ec1 , 0x00 }, + { 0xa1ec2 , 0x00 }, + { 0xa1ec3 , 0x00 }, + { 0xa1ec4 , 0x18 }, + { 0xa1ec5 , 0x18 }, + { 0xa1ec6 , 0x00 }, + { 0xa1ec7 , 0x7e }, + { 0xa1ec8 , 0x00 }, + { 0xa1ec9 , 0x18 }, + { 0xa1eca , 0x18 }, + { 0xa1ecb , 0x00 }, + { 0xa1ecc , 0x00 }, + { 0xa1ecd , 0x00 }, + { 0xa1ece , 0x00 }, + { 0xa1ecf , 0x00 }, + { 0xa1ee0 , 0x00 }, + { 0xa1ee1 , 0x00 }, + { 0xa1ee2 , 0x00 }, + { 0xa1ee3 , 0x00 }, + { 0xa1ee4 , 0x00 }, + { 0xa1ee5 , 0x76 }, + { 0xa1ee6 , 0xdc }, + { 0xa1ee7 , 0x00 }, + { 0xa1ee8 , 0x76 }, + { 0xa1ee9 , 0xdc }, + { 0xa1eea , 0x00 }, + { 0xa1eeb , 0x00 }, + { 0xa1eec , 0x00 }, + { 0xa1eed , 0x00 }, + { 0xa1eee , 0x00 }, + { 0xa1eef , 0x00 }, + { 0xa1f00 , 0x00 }, + { 0xa1f01 , 0x38 }, + { 0xa1f02 , 0x6c }, + { 0xa1f03 , 0x6c }, + { 0xa1f04 , 0x38 }, + { 0xa1f05 , 0x00 }, + { 0xa1f06 , 0x00 }, + { 0xa1f07 , 0x00 }, + { 0xa1f08 , 0x00 }, + { 0xa1f09 , 0x00 }, + { 0xa1f0a , 0x00 }, + { 0xa1f0b , 0x00 }, + { 0xa1f0c , 0x00 }, + { 0xa1f0d , 0x00 }, + { 0xa1f0e , 0x00 }, + { 0xa1f0f , 0x00 }, + { 0xa1f20 , 0x00 }, + { 0xa1f21 , 0x00 }, + { 0xa1f22 , 0x00 }, + { 0xa1f23 , 0x00 }, + { 0xa1f24 , 0x00 }, + { 0xa1f25 , 0x00 }, + { 0xa1f26 , 0x00 }, + { 0xa1f27 , 0x18 }, + { 0xa1f28 , 0x18 }, + { 0xa1f29 , 0x00 }, + { 0xa1f2a , 0x00 }, + { 0xa1f2b , 0x00 }, + { 0xa1f2c , 0x00 }, + { 0xa1f2d , 0x00 }, + { 0xa1f2e , 0x00 }, + { 0xa1f2f , 0x00 }, + { 0xa1f40 , 0x00 }, + { 0xa1f41 , 0x00 }, + { 0xa1f42 , 0x00 }, + { 0xa1f43 , 0x00 }, + { 0xa1f44 , 0x00 }, + { 0xa1f45 , 0x00 }, + { 0xa1f46 , 0x00 }, + { 0xa1f47 , 0x00 }, + { 0xa1f48 , 0x18 }, + { 0xa1f49 , 0x00 }, + { 0xa1f4a , 0x00 }, + { 0xa1f4b , 0x00 }, + { 0xa1f4c , 0x00 }, + { 0xa1f4d , 0x00 }, + { 0xa1f4e , 0x00 }, + { 0xa1f4f , 0x00 }, + { 0xa1f60 , 0x00 }, + { 0xa1f61 , 0x0f }, + { 0xa1f62 , 0x0c }, + { 0xa1f63 , 0x0c }, + { 0xa1f64 , 0x0c }, + { 0xa1f65 , 0x0c }, + { 0xa1f66 , 0x0c }, + { 0xa1f67 , 0xec }, + { 0xa1f68 , 0x6c }, + { 0xa1f69 , 0x6c }, + { 0xa1f6a , 0x3c }, + { 0xa1f6b , 0x1c }, + { 0xa1f6c , 0x00 }, + { 0xa1f6d , 0x00 }, + { 0xa1f6e , 0x00 }, + { 0xa1f6f , 0x00 }, + { 0xa1f80 , 0x00 }, + { 0xa1f81 , 0xd8 }, + { 0xa1f82 , 0x6c }, + { 0xa1f83 , 0x6c }, + { 0xa1f84 , 0x6c }, + { 0xa1f85 , 0x6c }, + { 0xa1f86 , 0x6c }, + { 0xa1f87 , 0x00 }, + { 0xa1f88 , 0x00 }, + { 0xa1f89 , 0x00 }, + { 0xa1f8a , 0x00 }, + { 0xa1f8b , 0x00 }, + { 0xa1f8c , 0x00 }, + { 0xa1f8d , 0x00 }, + { 0xa1f8e , 0x00 }, + { 0xa1f8f , 0x00 }, + { 0xa1fa0 , 0x00 }, + { 0xa1fa1 , 0x70 }, + { 0xa1fa2 , 0xd8 }, + { 0xa1fa3 , 0x30 }, + { 0xa1fa4 , 0x60 }, + { 0xa1fa5 , 0xc8 }, + { 0xa1fa6 , 0xf8 }, + { 0xa1fa7 , 0x00 }, + { 0xa1fa8 , 0x00 }, + { 0xa1fa9 , 0x00 }, + { 0xa1faa , 0x00 }, + { 0xa1fab , 0x00 }, + { 0xa1fac , 0x00 }, + { 0xa1fad , 0x00 }, + { 0xa1fae , 0x00 }, + { 0xa1faf , 0x00 }, + { 0xa1fc0 , 0x00 }, + { 0xa1fc1 , 0x00 }, + { 0xa1fc2 , 0x00 }, + { 0xa1fc3 , 0x00 }, + { 0xa1fc4 , 0x7c }, + { 0xa1fc5 , 0x7c }, + { 0xa1fc6 , 0x7c }, + { 0xa1fc7 , 0x7c }, + { 0xa1fc8 , 0x7c }, + { 0xa1fc9 , 0x7c }, + { 0xa1fca , 0x7c }, + { 0xa1fcb , 0x00 }, + { 0xa1fcc , 0x00 }, + { 0xa1fcd , 0x00 }, + { 0xa1fce , 0x00 }, + { 0xa1fcf , 0x00 }, + { 0xa1fe0 , 0x00 }, + { 0xa1fe1 , 0x00 }, + { 0xa1fe2 , 0x00 }, + { 0xa1fe3 , 0x00 }, + { 0xa1fe4 , 0x00 }, + { 0xa1fe5 , 0x00 }, + { 0xa1fe6 , 0x00 }, + { 0xa1fe7 , 0x00 }, + { 0xa1fe8 , 0x00 }, + { 0xa1fe9 , 0x00 }, + { 0xa1fea , 0x00 }, + { 0xa1feb , 0x00 }, + { 0xa1fec , 0x00 }, + { 0xa1fed , 0x00 }, + { 0xa1fee , 0x00 }, + { 0xa1fef , 0x00 }, + { 0x00000 , 0x00 } + + }; + diff --git a/arch/e2k/boot/vmlinux.bin.lds b/arch/e2k/boot/vmlinux.bin.lds new file mode 100644 index 000000000000..68016f406ad7 --- /dev/null +++ b/arch/e2k/boot/vmlinux.bin.lds @@ -0,0 +1,8 @@ +SECTIONS +{ + .rodata : { + input_data = .; + *(.data) + input_data_end = .; + } +} diff --git a/arch/e2k/boot/zip.c b/arch/e2k/boot/zip.c new file mode 100644 index 000000000000..fdd7918efe8c --- /dev/null +++ b/arch/e2k/boot/zip.c @@ -0,0 +1,182 @@ +/* + * misc.c + * + * This is a collection of several routines from gzip-1.0.3 + * adapted for Linux. + * + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + * puts by Nick Holloway 1993, better puts by Martin Mares 1995 + * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 + */ + +#include +#include +#include +#include +#include + +#include "boot_io.h" + +/* + * gzip declarations + */ + +#define OF(args) args +#define STATIC static +#define NOMEMCPY 1 + +//#undef memset +//#undef memcpy + +/* + * Why do we do this? Don't ask me.. + * + * Incomprehensible are the ways of bootloaders. + */ +#define memzero(s, n) memset ((s), 0, (n)) + +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; + +#define WSIZE 0x8000 /* Window size must be at least 32k, */ + /* and a power of two */ + +static uch *inbuf; /* input buffer */ +static uch window[WSIZE]; /* Sliding window buffer */ + +static unsigned insize = 0; /* valid bytes in inbuf */ +static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ +static unsigned outcnt = 0; /* bytes in output buffer */ + +/* gzip flag byte */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ + +#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) + +/* Diagnostic functions */ +#ifdef DEBUG +# define Assert(cond,msg) {if(!(cond)) error(msg);} +# define Trace(x) fprintf x +# define Tracev(x) {if (verbose) fprintf x ;} +# define Tracevv(x) {if (verbose>1) fprintf x ;} +# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} +# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +static int fill_inbuf(void); +static inline void flush_window(void); +static void error(char *m); + +extern char input_data[]; +extern char input_data_end[]; + +static long bytes_out = 0; +static uch *output_data; +static unsigned long output_ptr = 0; + +extern long free_mem_ptr; +extern long free_mem_end_ptr; + + +#ifdef CONFIG_X86_NUMAQ +static void * xquad_portio = NULL; +#endif + +#define ZLIB_INFLATE_NO_INFLATE_LOCK +#include "../../../lib/inflate.c" + +#ifndef __HAVE_ARCH_MEMSET +void* memset(void* s, int c, size_t n) +{ + int i; + char *ss = (char*)s; + + for (i=0;i> 8); + } + crc = c; + bytes_out += (ulg)outcnt; + output_ptr += (ulg)outcnt; + outcnt = 0; + rom_printk("decompressed %d bytes\r", bytes_out); +} + + +static void error(char *x) +{ + rom_puts("\n\n"); + rom_puts(x); + rom_puts("\n\n -- System halted"); + + E2K_LMS_HALT_ERROR(0xdead); /* Halt */ +} + + +int decompress_kernel(ulong base) +{ + + output_data = (uch *) base; + + makecrc(); + gunzip(); + + return bytes_out; +} diff --git a/arch/e2k/configs/build-config b/arch/e2k/configs/build-config new file mode 100644 index 000000000000..8befef1263b4 --- /dev/null +++ b/arch/e2k/configs/build-config @@ -0,0 +1,432 @@ +#!/bin/bash +scname=`basename $0` +scdir=`dirname $0` +conf_script=./scripts/config +conf_path=./arch/e2k/configs +[ "`uname -m`" != "e2k" ] && export ARCH=e2k +Confs() +{ +cat <s:" +Confs +} +usage() +{ + echo "incorrect param: $1" + echo "usage: $scname [--def] --conf [--file _defconfig] [CC=]" >&2 + exit 1 +} +Usage() { + cat >&2 < [--output_directory/-o ] [--file _defconfig] + +Generate .config or|and files for some kernel configuration +from arch/e2k/configs/defconfig | arch/e2k/configs/rt_defconfig + +: ./arch/e2k/configs/_defconfig + (default: ./arch/e2k/configs/gen__defconfig) + +1. To generate file + $scname --def --conf [--file _defconfig] +2. To generate .config and + $scname --conf --file _defconfig +3. To generate .config only + $scname --conf +4. To generate .config only in separate directory + $scname --conf -o + +s meaning: + + | realtime | NUMA + -------+----------+----- + *_nort | - | + + *_nn | - | - + *_rt | + | + + *_nnrt | + | - + ------------------------ + + *_debug configs also enable additional selftesting options. + *_host/*_guest are for pure paravirtualization without HW support. + +EOL +KnownConfs +exit 0 +} +tmpf=tmp_$$_defconfig +cleanup() +{ + rm -f $conf_path/$tmpf +} +localversion() +{ + l=`echo $DCONF | sed -e 's/_/-/' -e 's/nort_//' -e 's/nort//' -e 's/-$//'` + $conf_script --file $OUTPUT_DIR/.config --set-str CONFIG_LOCALVERSION "-${l}" \ + || exit 1 +} +############################################################################### +conf_no_builtin_gpu() +{ + $conf_script $OPTFILE \ + -d CONFIG_MCST_GPU_VIV -d CONFIG_DRM_VIVANTE \ + -d CONFIG_DRM_MGA2 -d CONFIG_MGA2_PWM -d CONFIG_MGA2_GPIO \ + -d CONFIG_DRM_IMX_HDMI \ + -d CONFIG_MCST_GPU_IMGTEC -d CONFIG_IMGTEC \ + -d CONFIG_MEM2ALLOC -d CONFIG_HANTRODEC -d CONFIG_BIGE \ + || exit 1 +} +conf_e1cp() +{ + # e1cp has: + # 1) Vivante 3D GPU GC2500 (25 GFLOPS FP32) - + # galcore.ko (CONFIG_MCST_GPU_VIV), vivante.ko (CONFIG_DRM_VIVANTE) + # 2) MGA2 - mga2.ko (CONFIG_DRM_MGA2), mga2-pwm.ko (CONFIG_MGA2_PWM), + # mga2-gpio.ko (CONFIG_MGA2_GPIO), dw_hdmi_imx.ko (CONFIG_DRM_IMX_HDMI) + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_E1CP \ + -d CONFIG_SMP \ + -m CONFIG_MCST_GPU_VIV -m CONFIG_DRM_VIVANTE \ + -m CONFIG_DRM_MGA2 -m CONFIG_MGA2_PWM -m CONFIG_MGA2_GPIO \ + -m CONFIG_DRM_IMX_HDMI \ + -d CONFIG_MCST_GPU_IMGTEC -d CONFIG_IMGTEC \ + -d CONFIG_MEM2ALLOC -d CONFIG_HANTRODEC -d CONFIG_BIGE \ + || exit 1 +} +conf_e3s() +{ + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_ES2_DSP \ + || exit 1; + conf_no_builtin_gpu; +} +conf_e2s() +{ + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_E2S \ + || exit 1; + conf_no_builtin_gpu; +} +conf_e8c() +{ + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_E8C \ + || exit 1; + conf_no_builtin_gpu; +} +conf_e8c2() +{ + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_E8C2 \ + || exit 1; + conf_no_builtin_gpu; +} +conf_e2c3() +{ + # e2c3 has: + # 1) MGA2.5 (MGA2 with more controllers, overlays and 2x 2D GPU) - + # mga2 (CONFIG_DRM_MGA2), mga2-pwm (CONFIG_MGA2_PWM), + # mga2-gpio (CONFIG_MGA2_GPIO), dw_hdmi_imx (CONFIG_DRM_IMX_HDMI) + # 2) Imagination 3D GX6650 GPU (300 GFLOPS FP32) - pvrsrvkm.ko, + # dolly_drv.ko (CONFIG_MCST_GPU_IMGTEC) + # 3) Imagination D5820 (decoder) - vxd.ko, img_mem.ko (CONFIG_IMGTEC) + # 4) Imagination E5510 (encoder) - vxekm.ko, imgvideo.ko (CONFIG_IMGTEC) + # 5) Google VP9 memory allocator - mem2alloc.ko (CONFIG_MEM2ALLOC) + # 6) Google VP9 decoder - hantrodec.ko (CONFIG_HANTRODEC) + # 7) Google VP9 encoder - bige.ko (CONFIG_BIGE) + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_E2C3 \ + -d CONFIG_MCST_GPU_VIV -d CONFIG_DRM_VIVANTE \ + -m CONFIG_DRM_MGA2 -m CONFIG_MGA2_PWM -m CONFIG_MGA2_GPIO \ + -m CONFIG_DRM_IMX_HDMI \ + -m CONFIG_MCST_GPU_IMGTEC -m CONFIG_IMGTEC \ + -m CONFIG_MEM2ALLOC -m CONFIG_HANTRODEC -m CONFIG_BIGE \ + || exit 1 +} +conf_e12c() +{ + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_E12C \ + || exit 1; + conf_no_builtin_gpu; +} +conf_e16c() +{ + $conf_script $OPTFILE \ + -e CONFIG_E2K_MACHINE -e CONFIG_E2K_E16C \ + || exit 1; + conf_no_builtin_gpu; +} +conf_nn() +{ + $conf_script $OPTFILE \ + -d CONFIG_NUMA -u CONFIG_NUMA_BALANCING -u CONFIG_NUMA_BALANCING_DEFAULT_ENABLED \ + || exit 1 +} +conf_prototype() +{ + # CONFIG_HZ is set to 10 temporarily (for e2c3 prototype/simulator) + # CONFIG_IGB is builtin temporarily (e2c3 prototype uses it) + # CONFIG_BLK_DEV_LOOP is builtin temporarily (for virtualization on e2c3 prototype) + # CONFIG_KERNEL_GZIP is switched to CONFIG_KERNEL_LZ4 temporarily + # (for faster e2c3 prototype booting) + $conf_script $OPTFILE \ + -d CONFIG_KERNEL_GZIP -e CONFIG_KERNEL_LZ4 \ + -u CONFIG_HZ_100 -e CONFIG_HZ_10 \ + -e CONFIG_BLK_DEV_LOOP \ + -e CONFIG_IGB \ + || exit 1 +} +conf_hardware_virt() +{ + $conf_script $OPTFILE \ + -e CONFIG_EARLY_VIRTIO_CONSOLE \ + -e CONFIG_VSOCKETS \ + -e CONFIG_VIRTIO_VSOCKETS \ + -e CONFIG_VIRTIO_VSOCKETS_COMMON \ + -e CONFIG_VIRTIO_BLK \ + -e CONFIG_SCSI_VIRTIO \ + -e CONFIG_VIRTIO_NET \ + -e CONFIG_HVC_L \ + -e CONFIG_VIRTIO_CONSOLE \ + -e CONFIG_E1000 \ + -e CONFIG_DRM_BOCHS \ + -e CONFIG_VIRT_DRIVERS \ + -e CONFIG_VIRTIO \ + -e CONFIG_VIRTIO_PCI \ + -e CONFIG_VIRTIO_PCI_LEGACY \ + -e CONFIG_VIRTIO_BALLOON \ + -e CONFIG_VIRTIO_INPUT \ + -e CONFIG_VIRTIO_MMIO \ + -e CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES \ + -e CONFIG_VIRTUALIZATION \ + -e CONFIG_HAVE_KVM_IRQCHIP \ + -e CONFIG_HAVE_KVM_IRQFD \ + -e CONFIG_HAVE_KVM_IRQ_ROUTING \ + -e CONFIG_HAVE_KVM_EVENTFD \ + -e CONFIG_KVM_COMPAT \ + -e CONFIG_KVM \ + -e CONFIG_KVM_GUEST \ + -e CONFIG_KVM_PARAVIRTUALIZATION \ + -e CONFIG_KVM_HW_VIRTUALIZATION \ + -e CONFIG_KVM_HW_PARAVIRTUALIZATION \ + -e CONFIG_KVM_HOST_KERNEL \ + -d CONFIG_KVM_GUEST_KERNEL \ + -d CONFIG_PARAVIRT_GUEST \ + -e CONFIG_KVM_SHADOW_PT_ENABLE \ + -e CONFIG_KVM_TDP_ENABLE \ + -e CONFIG_KVM_NONPAGING_ENABLE \ + -e CONFIG_KVM_PHYS_PT_ENABLE \ + -e CONFIG_KVM_HOST_MODE \ + -e CONFIG_DIRECT_VIRQ_INJECTION \ + -e CONFIG_VIRQ_VCPU_INJECTION \ + -e CONFIG_VFIO \ + -e CONFIG_VFIO_PCI \ + || exit 1 +} +conf_host() +{ + $conf_script $OPTFILE \ + -e CONFIG_VIRTIO \ + -e CONFIG_VIRTUALIZATION \ + -e CONFIG_HAVE_KVM_IRQCHIP \ + -e CONFIG_HAVE_KVM_IRQFD \ + -e CONFIG_HAVE_KVM_IRQ_ROUTING \ + -e CONFIG_HAVE_KVM_EVENTFD \ + -e CONFIG_KVM_COMPAT \ + -e CONFIG_KVM \ + -e CONFIG_KVM_GUEST \ + -e CONFIG_KVM_PARAVIRTUALIZATION \ + -e CONFIG_KVM_HW_VIRTUALIZATION \ + -e CONFIG_KVM_HW_PARAVIRTUALIZATION \ + -e CONFIG_KVM_HOST_KERNEL \ + -d CONFIG_KVM_GUEST_KERNEL \ + -d CONFIG_PARAVIRT_GUEST \ + -e CONFIG_KVM_SHADOW_PT_ENABLE \ + -e CONFIG_KVM_NONPAGING_ENABLE \ + -e CONFIG_KVM_PHYS_PT_ENABLE \ + -e CONFIG_KVM_TDP_ENABLE \ + -e CONFIG_KVM_HOST_MODE \ + -e CONFIG_DIRECT_VIRQ_INJECTION \ + -e CONFIG_VIRQ_VCPU_INJECTION \ + -e CONFIG_STP \ + -e CONFIG_BLK_DEV_LOOP \ + --set-str CONFIG_CMDLINE "root=/dev/sda1 console=ttyS0,115200 ignore_loglevel init=/root/run_spt transparent_hugepage=madvise idle=mwait usbcore.nousb" \ + || exit 1 +} +conf_guest() +{ + $conf_script $OPTFILE \ + -e CONFIG_EARLY_VIRTIO_CONSOLE \ + -e CONFIG_VSOCKETS \ + -e CONFIG_VIRTIO_BLK \ + -e CONFIG_SCSI_VIRTIO \ + -e CONFIG_VIRTIO_NET \ + -e CONFIG_VIRTIO_CONSOLE \ + -e CONFIG_VIRT_DRIVERS \ + -e CONFIG_VIRTIO \ + -e CONFIG_VIRTIO_PCI \ + -e CONFIG_VIRTIO_PCI_LEGACY \ + -e CONFIG_VIRTIO_BALLOON \ + -e CONFIG_VIRTIO_INPUT \ + -e CONFIG_VIRTIO_MMIO \ + -e CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES \ + -e CONFIG_HVC_L \ + -e CONFIG_VIRTUALIZATION \ + -e CONFIG_HAVE_KVM_IRQCHIP \ + -e CONFIG_HAVE_KVM_IRQFD \ + -e CONFIG_HAVE_KVM_IRQ_ROUTING \ + -e CONFIG_HAVE_KVM_EVENTFD \ + -e CONFIG_KVM_COMPAT \ + -e CONFIG_KVM \ + -e CONFIG_KVM_GUEST \ + -e CONFIG_KVM_PARAVIRTUALIZATION \ + -e CONFIG_KVM_HW_VIRTUALIZATION \ + -d CONFIG_KVM_HOST_KERNEL \ + -e CONFIG_KVM_GUEST_KERNEL \ + -d CONFIG_PARAVIRT_GUEST \ + -e CONFIG_KVM_GUEST_HW_PV \ + -e CONFIG_KVM_SHADOW_PT \ + -e CONFIG_KVM_GUEST_MODE \ + -e CONFIG_KVM_GUEST_SMP \ + -d CONFIG_KVM_GUEST_DEBUG \ + -d CONFIG_KVM_GUEST_HW_HCALL \ + -e CONFIG_DIRECT_VIRQ_INJECTION \ + -e CONFIG_VIRQ_VCPU_INJECTION \ + -e CONFIG_ISO9660_FS \ + -e CONFIG_BLK_DEV_LOOP \ + -d CONFIG_L_I2C_CONTROLLER \ + -d CONFIG_L_SPI_CONTROLLER \ + || exit 1 +} +conf_power_efficient() +{ + $conf_script $OPTFILE \ + -e CONFIG_WQ_POWER_EFFICIENT_DEFAULT \ + || exit 1 +} +e2k_nort() { :; } +e2k_rt() { :; } +e2k_nn() { conf_nn; } +e2k_nnrt() { conf_nn; } +e2k_host() { conf_host; } +e2k_guest() { conf_guest; } +e1cp_nort() { conf_e1cp; conf_power_efficient; } +e1cp_rt() { conf_e1cp; conf_power_efficient; } +e1cp_host() { conf_e1cp; conf_power_efficient; conf_host; } +e1cp_guest(){ conf_e1cp; conf_power_efficient; conf_guest; } +e2s_nort() { conf_e2s; } +e2s_rt() { conf_e2s; } +e2s_nn() { conf_e2s; conf_nn; } +e2s_nnrt() { conf_e2s; conf_nn; } +e2s_host() { conf_e2s; conf_host; } +e2s_guest() { conf_e2s; conf_guest; } +e2s_nort_debug() { conf_e2s; } +e3s_nort() { conf_e3s; } +e3s_rt() { conf_e3s; } +e3s_nn() { conf_e3s; conf_nn; } +e3s_nnrt() { conf_e3s; conf_nn; } +e3s_host() { conf_e3s; conf_host; } +e3s_guest() { conf_e3s; conf_guest; } +e8c_nort() { conf_e8c; } +e8c_rt() { conf_e8c; } +e8c_nn() { conf_e8c; conf_nn; } +e8c_nnrt() { conf_e8c; conf_nn; } +e8c_host() { conf_e8c; conf_host; } +e8c_guest() { conf_e8c; conf_guest; } +e8c_nort_debug() { conf_e8c; } +e8c2_nort() { conf_e8c2; } +e8c2_rt() { conf_e8c2; } +e8c2_nn() { conf_e8c2; conf_nn; } +e8c2_nnrt() { conf_e8c2; conf_nn; } +e8c2_host() { conf_e8c2; conf_host; } +e8c2_guest(){ conf_e8c2; conf_guest; } +e8c2_nort_debug() { conf_e8c2; } +e16c_nort() { conf_e16c; conf_hardware_virt; } +e16c_rt() { conf_e16c; conf_hardware_virt; } +e16c_nn() { conf_e16c; conf_hardware_virt; conf_nn; } +e16c_nnrt() { conf_e16c; conf_hardware_virt; conf_nn; } +e2c3_nort() { conf_e2c3; conf_hardware_virt; conf_power_efficient; conf_nn; } +e2c3_rt() { conf_e2c3; conf_hardware_virt; conf_power_efficient; conf_nn; } +e12c_proto(){ conf_e12c; conf_hardware_virt; conf_prototype; } +e16c_proto(){ conf_e16c; conf_hardware_virt; conf_prototype; } +e2c3_proto(){ conf_e2c3; conf_hardware_virt; conf_prototype; conf_power_efficient; conf_nn; } +# These are temporary and needed because distribution does not know about "_proto" kernels +e12c_nort() { e12c_proto; } +############################################################################### +trap "cleanup" 1 2 3 6 15 EXIT +OUTPUT_DIR=. +while [ "$1" != "" ] ; do + opt="$1" + shift + case "$opt" in + --def|-d) def_flg=0;; + --conf|-c) [ x"$DCONF" != x ] && usage; DCONF="$1"; shift;; + --file|-f) [ x"$DFILE" != x ] && usage; DFILE="$1"; shift;; + --output_directory|-o) OUTPUT_DIR="$1"; shift;; + CC=*) optcc="$opt";; + V=*) optv="$opt";; + --help|-h) Usage;; + *) echo "incorrect param: $opt"; Usage;; + esac +done +[ x"$DCONF" == x ] && Usage +if [[ "$CONFS" != *" $DCONF "* ]]; then + echo "unknown : $DCONF" + KnownConfs + exit 1 +fi +# additional configuration files to use +ADDITIONAL_CONFIGS="mcst.config" +if [[ "$DCONF" == *"_rt"* ]] || [[ "$DCONF" == *"_nnrt"* ]]; then + ADDITIONAL_CONFIGS+=" mcst_rt.config " +fi +if [[ "$DCONF" == *"debug"* ]]; then + ADDITIONAL_CONFIGS=$ADDITIONAL_CONFIGS" mcst_debug.config " +fi +# src defconfig +SFILE=$conf_path/defconfig +# dst defconfig +if [ x"$DFILE" == x ]; then + DFILENAME=$tmpf + [ x$def_flg == x0 ] && DFILENAME=gen_${DCONF}_defconfig +else + if [[ $DFILE != *"_defconfig" ]]; then + usage "incorrect defconfig file name: $DFILE" + fi + DFILENAME=$DFILE +fi +DFILE=$conf_path/$DFILENAME +OPTFILE="--file $DFILE" + +cd $scdir/../../.. || exit 1 +#pwd +#echo "$DCONF $DFILENAME $DFILE $SFILE" +#echo "$OPTFILE" +#exit 0 + +# Generate defconfig +cp $SFILE $DFILE || exit 1 +$DCONF +[ x$def_flg == x0 ] && exit 0 + +# Generate .config; silence configs merging output by adding >/dev/null +make O=$OUTPUT_DIR $DFILENAME $ADDITIONAL_CONFIGS $optcc $optv >/dev/null || exit 1 +localversion +make O=$OUTPUT_DIR olddefconfig $optcc $optv +exit $? diff --git a/arch/e2k/configs/defconfig b/arch/e2k/configs/defconfig new file mode 100644 index 000000000000..cee9163cf64e --- /dev/null +++ b/arch/e2k/configs/defconfig @@ -0,0 +1,755 @@ +CONFIG_LOCALVERSION="-e2k" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_USELIB=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_NUMA_BALANCING=y +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_EXPERT=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_USERFAULTFD=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_PROFILING=y +CONFIG_HZ_300=y +CONFIG_NODES_SHIFT=2 +CONFIG_NUMA=y +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_IOHUB_GPIO=y +CONFIG_HIBERNATION=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_IDLE=y +CONFIG_KPROBES=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_AIX_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_LDM_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_MQ_IOSCHED_KYBER=m +CONFIG_IOSCHED_BFQ=m +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_FRONTSWAP=y +CONFIG_ZSWAP=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_GRE=m +CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_VS=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_SCTP=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_BRIDGE=y +CONFIG_VLAN_8021Q=m +CONFIG_LLC2=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_DCB=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +CONFIG_CAN=m +CONFIG_BT=m +CONFIG_AF_RXRPC=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_MPV=m +CONFIG_MMRM=m +CONFIG_MOKM=m +CONFIG_WD=y +CONFIG_CONNECTOR=y +CONFIG_MTD=m +CONFIG_PARPORT=m +CONFIG_PARPORT_MCST=m +CONFIG_PARPORT_1284=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_WCACHE=y +CONFIG_ATA_OVER_ETH=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_ISL22317=m +CONFIG_UCD9080=m +CONFIG_I2C_P2PMC=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_EE1004=m +CONFIG_IDE=m +CONFIG_BLK_DEV_IDECD=m +CONFIG_BLK_DEV_GENERIC=m +CONFIG_BLK_DEV_PIIX=m +CONFIG_BLK_DEV_ELBRUS=m +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_BFA_FC=m +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_SIL24=y +CONFIG_ATA_PIIX=y +CONFIG_SATA_SIL=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_FUSION=y +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_IFB=m +CONFIG_TUN=y +CONFIG_ATM_DUMMY=m +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +CONFIG_ATM_ENI_TUNE_BURST=y +CONFIG_VORTEX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_BNX2=m +CONFIG_BNXT=m +CONFIG_NET_TULIP=y +CONFIG_WINBOND_840=m +CONFIG_E100=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_SKY2=y +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_NE2K_PCI=m +CONFIG_FORCEDETH=m +CONFIG_8139TOO=m +CONFIG_R8169=m +CONFIG_PCI_SUNLANCE=m +CONFIG_ETH1000=y +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_VELOCITY=m +CONFIG_FDDI=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_GPIO=m +CONFIG_PHYLIB=y +CONFIG_DP83867_PHY=y +CONFIG_MARVELL_PHY=y +CONFIG_MICREL_PHY=y +CONFIG_NATIONAL_PHY=y +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_HTC=m +CONFIG_HOSTAP=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +CONFIG_MOUSE_SERIAL=m +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_LTC2954=m +CONFIG_INPUT_UINPUT=m +CONFIG_SERIO_SERPORT=m +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_DEVKMEM=y +CONFIG_SERIAL_8250=m +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_L_ZILOG_CONSOLE=y +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=m +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=m +CONFIG_NTP_PPS=y +CONFIG_PTP_1588_CLOCK=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_PCA953X=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM95231=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_PMBUS=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC2978_REGULATOR=y +CONFIG_SENSORS_TPS53679=m +CONFIG_SENSORS_PWM_FAN=m +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_CPU_THERMAL=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_L_WDT=y +CONFIG_REGULATOR=y +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_PWC=m +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_DRM=m +# CONFIG_DRM_I2C_CH7006 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VIVANTE=m +CONFIG_DRM_NXP_PTN3460=m +CONFIG_FB=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_VGA16=y +CONFIG_FB_RADEON=m +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_SOUND=m +CONFIG_SND=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_HRTIMER=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_CS4281=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_PREALLOC_SIZE=2048 +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_TLV320AIC26=m +CONFIG_SND_SIMPLE_CARD=m +CONFIG_HIDRAW=y +CONFIG_HID_A4TECH=m +CONFIG_HID_APPLE=m +CONFIG_HID_BELKIN=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GYRATION=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LOGITECH=y +CONFIG_LOGITECH_FF=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=m +CONFIG_HID_PANTHERLORD=m +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=m +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_STORAGE=y +CONFIG_USB_UAS=m +CONFIG_USB_SERIAL=m +CONFIG_USB_GADGET=m +CONFIG_MMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +# CONFIG_MMC_RICOH_MMC is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_MTHCA=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_EDAC=m +CONFIG_EDAC_E2K=m +CONFIG_RTC_DRV_DS1307=y +CONFIG_RTC_DRV_CY14B101P=y +CONFIG_RTC_DRV_FM33256=y +CONFIG_UIO=m +CONFIG_STAGING=y +CONFIG_VT6656=m +CONFIG_CRYSTALHD=m +CONFIG_IIO=m +CONFIG_INV_MPU6050_I2C=m +CONFIG_AK8975=m +CONFIG_PWM=y +CONFIG_RAS=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_XFS_FS=m +CONFIG_XFS_POSIX_ACL=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_NTFS_FS=m +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_ORANGEFS_FS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_UTF8=y +CONFIG_BIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_APPARMOR=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +# CONFIG_INTEGRITY_TRUSTED_KEYRING is not set +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_TRUSTED_KEYRING is not set +CONFIG_EVM=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_PRINTK_TIME=y +CONFIG_FRAME_WARN=0 +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_RCU_TRACE is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_BOOT_TRACE=y diff --git a/arch/e2k/configs/defconfig-guest-lms-pv b/arch/e2k/configs/defconfig-guest-lms-pv new file mode 100644 index 000000000000..6dc3e87f2a00 --- /dev/null +++ b/arch/e2k/configs/defconfig-guest-lms-pv @@ -0,0 +1,2928 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/e2k 4.19.72-0 Kernel Configuration +# + +# +# Compiler: lcc:1.24.07:Dec--4-2019:e2k-v2-linux.cross:i386-linux +# +CONFIG_GCC_VERSION=0 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-e2k-pv-guest" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_IRQ_DOMAIN=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_HZ_PERIODIC=y +# CONFIG_NO_HZ_IDLE is not set +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_NONE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +# CONFIG_MEMCG is not set +# CONFIG_BLK_CGROUP is not set +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUP_PIDS is not set +# CONFIG_CGROUP_RDMA is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_HUGETLB is not set +# CONFIG_CPUSETS is not set +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CGROUP_CPUACCT is not set +# CONFIG_CGROUP_PERF is not set +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_NAMESPACES is not set +# CONFIG_CHECKPOINT_RESTORE is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_HAVE_FUTEX_CMPXCHG=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +# CONFIG_BPF_SYSCALL is not set +# CONFIG_USERFAULTFD is not set +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# CONFIG_WATCH_PREEMPT is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_LTTNG=m +CONFIG_E2K=y +CONFIG_SWIOTLB=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ZONE_DMA=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_EARLY_PRINTK=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GREGS_CONTEXT=y +CONFIG_GENERIC_IOMAP=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_BOOT_TRACE_POSSIBLE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_GENERIC_GPIO=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_HOLES_IN_ZONE=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 + +# +# Processor type and features +# +CONFIG_E2K_MACHINE_SIC=y +CONFIG_E2K_MACHINE_IOHUB=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_E2K_MACHINE is not set +CONFIG_CPU_ES2=y +CONFIG_CPU_E2S=y +CONFIG_CPU_E8C=y +CONFIG_CPU_E1CP=y +CONFIG_CPU_E8C2=y +CONFIG_CPU_E12C=y +CONFIG_CPU_E16C=y +CONFIG_CPU_E2C3=y +CONFIG_CPU_ISET=0 + +# +# MMU Page Tables features +# +# CONFIG_MMU_PT_V6 is not set +CONFIG_MMU_SEP_VIRT_SPACE=y +# CONFIG_DYNAMIC_SEP_VIRT_SPACE is not set +CONFIG_ENABLE_EXTMEM=y +CONFIG_E16_CORE_SUPPORT=y +CONFIG_SMP_DAM_BUG=y +CONFIG_HZ_100=y +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +# CONFIG_HZ_10 is not set +# CONFIG_HZ_20 is not set +CONFIG_HZ=100 +CONFIG_SCHED_HRTICK=y +CONFIG_GLOBAL_CONTEXT=y +# CONFIG_SECONDARY_SPACE_SUPPORT is not set +CONFIG_ONLY_HIGH_PHYS_MEM=y +# CONFIG_ONLY_BSP_MEMORY is not set +CONFIG_FORCE_MAX_ZONEORDER=15 +CONFIG_NODES_SHIFT=2 +CONFIG_NODES_SPAN_OTHER_NODES=y +CONFIG_NUMA=y +CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT=y +# CONFIG_KTHREAD_ON_CPU is not set +CONFIG_BOOT_PRINTK=y +# CONFIG_SERIAL_BOOT_PRINTK is not set +CONFIG_EARLY_VIRTIO_CONSOLE=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_MEMLIMIT=2048 +CONFIG_EXT_MEMLIMIT=2048 +CONFIG_VRAM_SIZE_128=y +# CONFIG_VRAM_SIZE_256 is not set +# CONFIG_VRAM_SIZE_512 is not set +# CONFIG_VRAM_SIZE_1024 is not set +# CONFIG_VRAM_DISABLE is not set +# CONFIG_USR_CONTROL_INTERRUPTS is not set +# CONFIG_KERNEL_TIMES_ACCOUNT is not set +# CONFIG_CLI_CHECK_TIME is not set +# CONFIG_CMDLINE_PROMPT is not set +CONFIG_MAKE_ALL_PAGES_VALID=y +CONFIG_USE_AAU=y +CONFIG_DATA_STACK_WINDOW=y +CONFIG_BINFMT_ELF32=y +CONFIG_COMPAT=y +CONFIG_PROTECTED_MODE=y +# CONFIG_DBG_RTL_TRACE is not set +CONFIG_CLW_ENABLE=y +# CONFIG_IPD_DISABLE is not set +CONFIG_TC_STORAGE=y +# CONFIG_IGNORE_MEM_LOCK_AS is not set +CONFIG_RECOVERY=y +CONFIG_MONITORS=y +CONFIG_E2K_KEXEC=y +CONFIG_DUMP_ALL_STACKS=y +CONFIG_CMDLINE="root=/dev/hda1 console=ttyLMS0 console=ttyS0,115200 sclkr=no lowmem_enable init=/root/run_qemu transparent_hugepage=madvise iommu=0" +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_SCHED_MC=y +CONFIG_MCST=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_GENERIC_TIME=y +CONFIG_SCLKR_CLOCKSOURCE=y +CONFIG_BIOS=y +CONFIG_ENABLE_BIOS_MPTABLE=y +CONFIG_CEPIC_TIMER_FREQUENCY=100 +CONFIG_ENABLE_ELBRUS_PCIBIOS=y +CONFIG_ENABLE_IDE=y +CONFIG_ENABLE_KEYBOARD=y +CONFIG_ENABLE_MOUSE=y +CONFIG_ENABLE_FLOPPY=y +CONFIG_ENABLE_MGA=y +CONFIG_ENABLE_RTC=y +CONFIG_ENABLE_SERIAL=y +CONFIG_ENABLE_PARALLEL_PORT=y +CONFIG_ENABLE_IOAPIC=y +# CONFIG_ADC_DISABLE is not set +CONFIG_OPTIMIZE_REGISTERS_ACCESS=y +# CONFIG_E2K_STACKS_TRACER is not set +CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y +# CONFIG_EPROF_KERNEL is not set +CONFIG_IOMMU_HELPER=y +CONFIG_HAVE_DMA_ATTRS=y + +# +# Elbrus Architecture Linux Kernel Configuration +# +CONFIG_EPIC=y + +# +# Boot/prom console support +# +CONFIG_L_EARLY_PRINTK=y +# CONFIG_SERIAL_PRINTK is not set +CONFIG_NVRAM_PANIC=y +CONFIG_EARLY_DUMP_CONSOLE=y +CONFIG_CLKR_CLOCKSOURCE=y +CONFIG_CLKR_OFFSET=y +CONFIG_IOHUB_GPIO=y +# CONFIG_PIC is not set +CONFIG_L_X86_64=y +CONFIG_NUMA_IRQ_DESC=y +CONFIG_L_LOCAL_APIC=y +CONFIG_L_IO_APIC=y +CONFIG_L_PCI_QUIRKS=y +CONFIG_L_SIC_IPLINK_OFF=y +CONFIG_L_MMPD=y +CONFIG_L_PMC=y +CONFIG_I2C_SPI_RESET_CONTROLLER=y +CONFIG_L_I2C_CONTROLLER=y +CONFIG_L_SPI_CONTROLLER=y +CONFIG_IPE2ST_POWER=m +CONFIG_ACPI_L_SPMC=y + +# +# Device Tree +# +# CONFIG_OF is not set + +# +# Power management options (ACPI, APM) +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PSTATES is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PSTATES=y + +# +# CPU frequency scaling drivers +# + +# +# APM (Advanced Power Management) BIOS Support +# +# CONFIG_APM is not set + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_RT=y + +# +# E2K CPU Idle Drivers +# +CONFIG_E2K_CPUIDLE=m + +# +# E2K CPU Idle Drivers +# + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# + +# +# Elbrus chipset PCI support +# +CONFIG_PCI_ELBRUS=y +CONFIG_IOHUB_DOMAINS=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI=y +# CONFIG_PCI_USE_VECTOR is not set +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# + +# +# DesignWare PCI Core Support +# + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +CONFIG_ISA=y +# CONFIG_PCCARD is not set +CONFIG_SYSVIPC_COMPAT=y +CONFIG_VIRTUALIZATION=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_COMPAT=y +CONFIG_KVM=y +CONFIG_KVM_GUEST=y +CONFIG_KVM_PARAVIRTUALIZATION=y +CONFIG_KVM_HW_VIRTUALIZATION=y +CONFIG_KVM_HW_PARAVIRTUALIZATION=y +# CONFIG_KVM_HOST_KERNEL is not set +CONFIG_KVM_GUEST_KERNEL=y +# CONFIG_PARAVIRT_GUEST is not set +CONFIG_KVM_GUEST_HW_PV=y +CONFIG_KVM_GUEST_HW_HCALL=y +CONFIG_KVM_GUEST_HW_EPIC=y +CONFIG_KVM_SHADOW_PT=y +CONFIG_KVM_GUEST_MODE=y +CONFIG_KVM_GUEST_SMP=y +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_QUEUED_LOCK_STAT is not set +# CONFIG_KVM_GUEST_DEBUG is not set +# CONFIG_GUEST_LAPIC_REGS is not set +CONFIG_DIRECT_VIRQ_INJECTION=y +CONFIG_VIRQ_VCPU_INJECTION=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_TASK_STRUCT_ALLOCATOR=y +CONFIG_ARCH_THREAD_STACK_ALLOCATOR=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_ARCH_WEAK_RELEASE_ACQUIRE=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_ISA_BUS_API=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_COMPILER_H=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_MMU_NOTIFIER=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y + +# +# Elbrus MCST Device Drivers +# +CONFIG_DDI=y +CONFIG_MGA2_GPIO=m +CONFIG_MSPS=m +CONFIG_MPV=m +# CONFIG_MGPM is not set +CONFIG_MMRM=m +CONFIG_MOKM=m +CONFIG_RDMA=m +CONFIG_RDMA_SIC=m +CONFIG_RDMA_M=m +CONFIG_MOKX=m +CONFIG_WD=y +# CONFIG_DMP_ASSIST is not set +CONFIG_LPTOUTS=m +CONFIG_M2MLC=m +CONFIG_APKPWR=m +CONFIG_MEM2ALLOC=m +CONFIG_HANTRODEC=m +CONFIG_BIGE=m +CONFIG_IMGTEC=m +CONFIG_BUS_MASTERING=y +CONFIG_VXD_FPGA=y +CONFIG_SMI_GPIO=m + +# +# Bus devices +# +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_UBI is not set +CONFIG_PARPORT=m +CONFIG_PARPORT_MCST=m +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +# CONFIG_ISAPNP is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=m + +# +# DRBD disabled because PROC_FS or INET not selected +# +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +CONFIG_CDROM_PKTCDVD_WCACHE=y +CONFIG_VIRTIO_BLK=y +# CONFIG_VIRTIO_BLK_SCSI is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29020 is not set +CONFIG_ISL22317=m +# CONFIG_ISL29003 is not set +CONFIG_LTC4306=m +CONFIG_UCD9080=m +CONFIG_I2C_P2PMC=m +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +CONFIG_HAVE_IDE=y +CONFIG_IDE=y + +# +# Please see Documentation/ide/ide.txt for help/info on IDE drives +# +CONFIG_IDE_XFER_MODE=y +CONFIG_IDE_ATAPI=y +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set +CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_IDE_TASK_IOCTL is not set +CONFIG_IDE_PROC_FS=y + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_PLATFORM is not set +# CONFIG_BLK_DEV_IDEPNP is not set +CONFIG_BLK_DEV_IDEDMA_SFF=y + +# +# PCI IDE chipsets support +# +CONFIG_BLK_DEV_IDEPCI=y +CONFIG_IDEPCI_PCIBUS_ORDER=y +# CONFIG_BLK_DEV_OFFBOARD is not set +CONFIG_BLK_DEV_GENERIC=y +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_JMICRON is not set +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_ELBRUS=y +# CONFIG_BLK_DEV_IT8172 is not set +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_BLK_DEV_IDEDMA=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=m +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=m +# CONFIG_SCSI_SAS_LIBSAS is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AHA152X is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_GENERIC_NCR5380 is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +CONFIG_SATA_SIL=y +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_QDI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_WINBOND_VLB is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +# CONFIG_MD is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +# CONFIG_LOOPBACK_TARGET is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=m +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_INPORT is not set +# CONFIG_MOUSE_LOGIBM is not set +# CONFIG_MOUSE_PC110PAD is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +CONFIG_MOXA_SMARTIO=m +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +# CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_LMS_CONSOLE is not set +CONFIG_SERIAL_L_ZILOG=y +# CONFIG_SERIAL_L_ZILOG_CONSOLE is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_HVC_DRIVER=y +CONFIG_HVC_L=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +# CONFIG_IPMI_WATCHDOG is not set +CONFIG_IPMI_POWEROFF=m +# CONFIG_DTLK is not set +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_ISA is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +CONFIG_NTP_PPS=y + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +# CONFIG_PINCTRL is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=m +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +CONFIG_SENSORS_LM95231=m +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +CONFIG_SENSORS_LM95245=m +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LTC2978 is not set +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_ZL6100 is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_IR_IMON_DECODER is not set +# CONFIG_RC_DEVICES is not set +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set + +# +# Media drivers +# +# CONFIG_MEDIA_PCI_SUPPORT is not set + +# +# Supported MMC/SDIO adapters +# + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# + +# +# Media SPI Adapters +# + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +# CONFIG_DRM is not set +# CONFIG_DRM_DP_CEC is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# +# CONFIG_DRM_IMX_HDMI is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +CONFIG_FB_ATY128=y +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY=y +# CONFIG_FB_ATY_CT is not set +# CONFIG_FB_ATY_GX is not set +CONFIG_FB_ATY_BACKLIGHT=y +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_PCI_FB_MGAM83 is not set +# CONFIG_FB_MGA3D is not set +# CONFIG_FB_LYNXFB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +CONFIG_HID_MULTITOUCH=y +# CONFIG_HID_NTI is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +CONFIG_RTC_DRV_CY14B101P=y +CONFIG_RTC_DRV_FM33256=y +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_DMA=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_RAS is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set +CONFIG_NVMEM=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_REISERFS_FS_XATTR is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +# CONFIG_JFS_SECURITY is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +# CONFIG_XFS_QUOTA is not set +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=y +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +# CONFIG_SQUASHFS_XATTR is not set +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +# CONFIG_SQUASHFS_XZ is not set +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +CONFIG_NLS_CODEPAGE_855=m +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +CONFIG_NLS_CODEPAGE_866=m +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +CONFIG_NLS_ISO8859_5=m +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +CONFIG_NLS_MAC_CYRILLIC=m +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=m + +# +# Security options +# + +# +# Miscellaneous hardening features +# +CONFIG_MCST_MEMORY_SANITIZE=y +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=m +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=m +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_MD4=m +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +# CONFIG_CRYPTO_DES is not set +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +# CONFIG_CRYPTO_LZO is not set +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m +# CONFIG_CRYPTO_HW is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=m +# CONFIG_CRC8 is not set +CONFIG_XXHASH=m +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=m +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_SGL_ALLOC=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=4096 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_FTRACE_SYSCALLS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +CONFIG_KPROBE_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +# CONFIG_UBSAN is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_E2K_DEBUG_KERNEL is not set +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y + +# +# Elbrus architecture kernel hacking +# +CONFIG_BOOT_TRACE=y +CONFIG_BOOT_TRACE_THRESHOLD=100 diff --git a/arch/e2k/configs/e12c-lms-defconfig b/arch/e2k/configs/e12c-lms-defconfig new file mode 100644 index 000000000000..943183d63ee9 --- /dev/null +++ b/arch/e2k/configs/e12c-lms-defconfig @@ -0,0 +1,2651 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/e2k 4.9.33-0.xx-virt Kernel Configuration +# +CONFIG_E2K=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ZONE_DMA=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_EARLY_PRINTK=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_IOMAP=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_BOOT_TRACE_POSSIBLE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_GENERIC_GPIO=y +CONFIG_ARCH_HAS_DEFAULT_IDLE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +# CONFIG_OF_IRQ is not set +# CONFIG_OF_NET is not set +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-e2k-host" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_FHANDLE is not set +CONFIG_USELIB=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_HZ_PERIODIC=y +# CONFIG_NO_HZ_IDLE is not set +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_BSD_PROCESS_ACCT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +# CONFIG_TASKS_RCU is not set +CONFIG_RCU_STALL_COMMON=y +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING is not set +# CONFIG_CGROUPS is not set +# CONFIG_CHECKPOINT_RESTORE is not set +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_RELAY is not set +# CONFIG_BLK_DEV_INITRD is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set +# CONFIG_KALLSYMS_BASE_RELATIVE is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +# CONFIG_BPF_SYSCALL is not set +# CONFIG_SHMEM is not set +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +# CONFIG_USERFAULTFD is not set +CONFIG_PCI_QUIRKS=y +CONFIG_MEMBARRIER=y +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# CONFIG_WATCH_PREEMPT is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_SLAB_FREELIST_RANDOM is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_SYSTEM_DATA_VERIFICATION is not set +# CONFIG_PROFILING is not set +# CONFIG_TRACEPOINTS is not set +# CONFIG_KPROBES is not set +# CONFIG_UPROBES is not set +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_TASK_STRUCT_ALLOCATOR=y +CONFIG_ARCH_THREAD_STACK_ALLOCATOR=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_EXIT_THREAD=y +# CONFIG_HAVE_ARCH_HASH is not set +CONFIG_ISA_BUS_API=y +# CONFIG_CPU_NO_EFFICIENT_FFS is not set +# CONFIG_HAVE_ARCH_VMAP_STACK is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_CMDLINE_PARSER is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# Processor type and features +# +CONFIG_E2K_MACHINE_SIC=y +CONFIG_E2K_MACHINE_IOHUB=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_E2K_MACHINE is not set +CONFIG_CPU_ES2=y +CONFIG_CPU_E2S=y +CONFIG_CPU_E8C=y +CONFIG_CPU_E1CP=y +CONFIG_CPU_E8C2=y +CONFIG_CPU_E12C=y +CONFIG_CPU_E2C3=y +# CONFIG_ISET_V5_SUPPORTED is not set +# CONFIG_ISET_V6_SUPPORTED is not set +# CONFIG_E2S_CPU_RF_BUG is not set +CONFIG_SMP_DAM_BUG=y +CONFIG_HZ=100 +CONFIG_GLOBAL_CONTEXT=y +CONFIG_SECONDARY_SPACE_SUPPORT=y +CONFIG_ENABLE_EXTMEM=y +# CONFIG_ONLY_BSP_MEMORY is not set +CONFIG_FORCE_MAX_ZONEORDER=15 +CONFIG_NODES_SHIFT=2 +CONFIG_NODES_SPAN_OTHER_NODES=y +CONFIG_NUMA=y +CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT=y +# CONFIG_KTHREAD_ON_CPU is not set +CONFIG_SERIAL_BOOT_PRINTK=y +CONFIG_SERIAL_AM85C30_BOOT_CONSOLE=y +CONFIG_BOOT_SERIAL_BAUD=115200 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_MEMLIMIT=2048 +CONFIG_EXT_MEMLIMIT=2048 +CONFIG_VRAM_SIZE_128=y +# CONFIG_VRAM_SIZE_256 is not set +# CONFIG_VRAM_SIZE_512 is not set +# CONFIG_VRAM_SIZE_1024 is not set +# CONFIG_VRAM_DISABLE is not set +# CONFIG_ACCESS_CONTROL is not set +# CONFIG_USR_CONTROL_INTERRUPTS is not set +# CONFIG_KERNEL_TIMES_ACCOUNT is not set +# CONFIG_CLI_CHECK_TIME is not set +CONFIG_ARCH_CACHES=y +# CONFIG_CMDLINE_PROMPT is not set +CONFIG_MAKE_ALL_PAGES_VALID=y +CONFIG_SET_STACKS_SIZE=y +CONFIG_PSP_STACK_SIZE=1024 +CONFIG_PSP_WIN_SIZE=32 +CONFIG_UPS_AREA_SIZE=32 +CONFIG_UPCS_AREA_SIZE=2 +CONFIG_USE_AAU=y +CONFIG_STACK_REG_WINDOW=y +CONFIG_DATA_STACK_WINDOW=y +# CONFIG_DUMP_ON_OTHER_CPUS is not set +CONFIG_BINFMT_ELF32=y +CONFIG_COMPAT=y +CONFIG_PROTECTED_MODE=y +# CONFIG_DBG_RTL_TRACE is not set +CONFIG_CLW_ENABLE=y +# CONFIG_IPD_DISABLE is not set +CONFIG_GREGS_CONTEXT=y +CONFIG_MLT_STORAGE=y +CONFIG_TC_STORAGE=y +# CONFIG_IGNORE_MEM_LOCK_AS is not set +CONFIG_RECOVERY=y +CONFIG_MONITORS=y +CONFIG_DUMP_ALL_STACKS=y +CONFIG_CMDLINE="root=/dev/hda3 console=ttyLMS0 console=ttyS0,115200 init=bin/bash" +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_MCST=y +CONFIG_PREEMPT_NONE=y +# CONFIG_RWSEM_GENERIC_SPINLOCK is not set +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_TIME=y + +# +# Timers subsystem +# +CONFIG_BIOS=y +CONFIG_ENABLE_BIOS_MPTABLE=y +CONFIG_ENABLE_ELBRUS_PCIBIOS=y +CONFIG_ENABLE_IDE=y +CONFIG_ENABLE_KEYBOARD=y +CONFIG_ENABLE_MOUSE=y +CONFIG_ENABLE_FLOPPY=y +CONFIG_ENABLE_MGA=y +CONFIG_ENABLE_RTC=y +CONFIG_ENABLE_SERIAL=y +CONFIG_ENABLE_PARALLEL_PORT=y +CONFIG_ENABLE_IOAPIC=y +# CONFIG_ADC_DISABLE is not set +CONFIG_OPTIMIZE_REGISTERS_ACCESS=y +# CONFIG_E2K_STACKS_TRACER is not set +# CONFIG_EPROF_KERNEL is not set +CONFIG_IOMMU_HELPER=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_SWIOTLB=y + +# +# Elbrus Architecture Linux Kernel Configuration +# + +# +# Boot/prom console support +# +CONFIG_SERIAL_PRINTK=y +CONFIG_SERIAL_AM85C30_CONSOLE=y +CONFIG_EARLY_DUMP_CONSOLE=y +CONFIG_SCLKR_CLOCKSOURCE=y +CONFIG_CLKR_CLOCKSOURCE=y +# CONFIG_CLKR_SYNCHRONIZATION_WARNING is not set +CONFIG_CLKR_OFFSET=y +CONFIG_IOHUB_GPIO=y +CONFIG_L_X86_64=y +CONFIG_NUMA_IRQ_DESC=y +CONFIG_L_LOCAL_APIC=y +CONFIG_L_IO_APIC=y +CONFIG_L_PCI_QUIRKS=y +CONFIG_L_SIC_IPLINK_OFF=y +CONFIG_L_MMPD=y +CONFIG_L_PMC=y +CONFIG_I2C_SPI_RESET_CONTROLLER=y +CONFIG_L_I2C_CONTROLLER=y +CONFIG_L_SPI_CONTROLLER=y +# CONFIG_I2C_SPI_IRQ is not set +CONFIG_IPE2ST_POWER=m +CONFIG_ACPI_L_SPMC=y + +# +# Device Tree +# +# CONFIG_OF is not set + +# +# Power management options (ACPI, APM) +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ACPI is not set +CONFIG_E2S_CLK_GATE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PSTATES is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_PSTATES=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# + +# +# APM (Advanced Power Management) BIOS Support +# +# CONFIG_APM is not set + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_RT=y + +# +# E2K CPU Idle Drivers +# +CONFIG_E2K_CPUIDLE=m + +# +# E2K CPU Idle Drivers +# +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# + +# +# Elbrus chipset PCI support +# +CONFIG_PCI_ELBRUS=y +CONFIG_IOHUB_DOMAINS=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI=y +# CONFIG_PCI_USE_VECTOR is not set +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_BUS_ADDR_T_64BIT=y +CONFIG_PCI_MSI=y +# CONFIG_PCI_MSI_IRQ_DOMAIN is not set +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# PCI host controller drivers +# +CONFIG_ISA=y +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +# CONFIG_DMA_SHARED_BUFFER is not set + +# +# Elbrus MCST Device Drivers +# +CONFIG_DDI=y +CONFIG_AGAT_SCSI=m +CONFIG_GPIO=m +CONFIG_MSPS=m +CONFIG_MPV=m +# CONFIG_MGPM is not set +CONFIG_MMRM=m +CONFIG_MOKM=m +CONFIG_RDMA=m +CONFIG_RDMA_SIC=m +CONFIG_RDMA_M=m +CONFIG_MOKX=m +CONFIG_WD=y +# CONFIG_DMP_ASSIST is not set +CONFIG_LPTOUTS=m +CONFIG_M2MLC=m +CONFIG_E2K_KEXEC=y +# CONFIG_MCST_GPU_VIV is not set + +# +# Bus devices +# +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_UBI is not set +CONFIG_PARPORT=m +CONFIG_PARPORT_MCST=m +# CONFIG_PARPORT_GSC is not set +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +# CONFIG_PNP is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=m + +# +# DRBD disabled because PROC_FS or INET not selected +# +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +# CONFIG_BLK_DEV_RAM is not set +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_VIRTIO_BLK is not set +# CONFIG_BLK_DEV_HD is not set +# CONFIG_BLK_DEV_RSXX is not set +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29020 is not set +CONFIG_ISL22317=m +# CONFIG_ISL29003 is not set +CONFIG_LTC4306=m +CONFIG_UCD9080=m +CONFIG_I2C_P2PMC=m +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PANEL is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_CXL_BASE is not set +# CONFIG_CXL_AFU_DRIVER_OPS is not set +CONFIG_HAVE_IDE=y +CONFIG_IDE=y + +# +# Please see Documentation/ide/ide.txt for help/info on IDE drives +# +CONFIG_IDE_XFER_MODE=y +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_IDE_TASK_IOCTL is not set +CONFIG_IDE_PROC_FS=y + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_PLATFORM is not set +CONFIG_BLK_DEV_IDEDMA_SFF=y + +# +# PCI IDE chipsets support +# +CONFIG_BLK_DEV_IDEPCI=y +CONFIG_IDEPCI_PCIBUS_ORDER=y +# CONFIG_BLK_DEV_OFFBOARD is not set +CONFIG_BLK_DEV_GENERIC=y +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_JMICRON is not set +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_ELBRUS=y +# CONFIG_BLK_DEV_IT8172 is not set +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_BLK_DEV_IDEDMA=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +CONFIG_CHR_DEV_SG=m +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +CONFIG_SCSI_SAS_ATTRS=m +# CONFIG_SCSI_SAS_LIBSAS is not set +CONFIG_SCSI_SRP_ATTRS=m +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +# CONFIG_ATA_SFF is not set +# CONFIG_MD is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +# CONFIG_LOOPBACK_TARGET is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +# CONFIG_VT is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +# CONFIG_SERIAL_8250_FSL is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +CONFIG_LMS_CONSOLE=y +CONFIG_SERIAL_L_ZILOG=y +CONFIG_SERIAL_L_ZILOG_CONSOLE=y +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +# CONFIG_VIRTIO_CONSOLE is not set +CONFIG_IPMI_HANDLER=m +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +# CONFIG_IPMI_WATCHDOG is not set +CONFIG_IPMI_POWEROFF=m +# CONFIG_HW_RANDOM is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DEVPORT is not set +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_ISA is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set + +# +# PPS support +# +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +CONFIG_NTP_PPS=y + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_ZX is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=m +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_TPIC2810 is not set +# CONFIG_GPIO_TS4900 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set + +# +# SPI or I2C GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +CONFIG_SENSORS_LM95231=m +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +CONFIG_SENSORS_LM95245=m +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NTC_THERMISTOR=y +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LTC2978 is not set +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_ZL6100 is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_EXYNOS_LPASS is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_INTEL_SOC_PMIC is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_RC_SUPPORT is not set +# CONFIG_MEDIA_CONTROLLER is not set +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2=m +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_DVB_CORE=m +# CONFIG_TTPCI_EEPROM is not set +CONFIG_DVB_MAX_ADAPTERS=8 +# CONFIG_DVB_DYNAMIC_MINORS is not set + +# +# Media drivers +# +# CONFIG_MEDIA_PCI_SUPPORT is not set +CONFIG_V4L_PLATFORM_DRIVERS=y +# CONFIG_VIDEO_CAFE_CCIC is not set +# CONFIG_SOC_CAMERA is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set + +# +# Supported MMC/SDIO adapters +# +CONFIG_RADIO_ADAPTERS=y +# CONFIG_RADIO_SI470X is not set +# CONFIG_RADIO_SI4713 is not set +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set + +# +# Texas Instruments WL128x FM driver (ST based) +# +# CONFIG_V4L_RADIO_ISA_DRIVERS is not set + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y +CONFIG_MEDIA_ATTACH=y + +# +# Audio decoders, processors and mixers +# + +# +# RDS decoders +# + +# +# Video decoders +# + +# +# Video and audio decoders +# + +# +# Video encoders +# + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# + +# +# Audio/Video compression chips +# + +# +# Miscellaneous helper chips +# + +# +# Sensors used on soc_camera driver +# +CONFIG_MEDIA_TUNER=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_MC44S803=m + +# +# Multistandard (satellite) frontends +# + +# +# Multistandard (cable + terrestrial) frontends +# + +# +# DVB-S (satellite) frontends +# + +# +# DVB-T (terrestrial) frontends +# +# CONFIG_DVB_AS102_FE is not set +# CONFIG_DVB_GP8PSK_FE is not set + +# +# DVB-C (cable) frontends +# + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# + +# +# ISDB-T (terrestrial) frontends +# + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# + +# +# Digital terrestrial only tuners/PLL +# + +# +# SEC control devices for DVB-S +# + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set + +# +# Graphics support +# +# CONFIG_VGA_ARB is not set +# CONFIG_DRM is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_IMX_HDMI is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_DDC=y +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +CONFIG_FB_ATY128=y +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY=y +# CONFIG_FB_ATY_CT is not set +# CONFIG_FB_ATY_GX is not set +CONFIG_FB_ATY_BACKLIGHT=y +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_VGASTATE is not set +# CONFIG_FB_MGAM83 is not set +# CONFIG_PCI_FB_MGAM83 is not set +CONFIG_FB_MGA3D=m +# CONFIG_FB_LYNXFB is not set +# CONFIG_LOGO is not set +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_SEQUENCER_OSS=y +# CONFIG_SND_HRTIMER is not set +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_RAWMIDI_SEQ=m +CONFIG_SND_OPL3_LIB_SEQ=m +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_DUMMY=m +# CONFIG_SND_ALOOP is not set +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +# CONFIG_SND_PORTMAN2X4 is not set +# CONFIG_SND_AC97_POWER_SAVE is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ALS300 is not set +# CONFIG_SND_ALI5451 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_AZT3328 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +CONFIG_SND_CS4281=m +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +# CONFIG_SND_EMU10K1 is not set +# CONFIG_SND_EMU10K1X is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +CONFIG_SND_ES1938=m +# CONFIG_SND_ES1968 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1712 is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MAESTRO3 is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_SONICVIBES is not set +# CONFIG_SND_TRIDENT is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_INTEL=m +# CONFIG_SND_HDA_HWDEP is not set +# CONFIG_SND_HDA_RECONFIG is not set +# CONFIG_SND_HDA_INPUT_BEEP is not set +# CONFIG_SND_HDA_PATCH_LOADER is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +# CONFIG_SND_HDA_CODEC_CA0132_DSP is not set +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_PREALLOC_SIZE=2048 +CONFIG_SND_SPI=y +# CONFIG_SND_SOC is not set +# CONFIG_SOUND_PRIME is not set +CONFIG_AC97_BUS=m + +# +# HID support +# +# CONFIG_HID is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +CONFIG_RTC_DRV_CY14B101P=y +CONFIG_RTC_DRV_FM33256=y +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# + +# +# HID Sensor RTC drivers +# +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y + +# +# Virtio drivers +# +# CONFIG_VIRTIO_PCI is not set +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set + +# +# Hardware Spinlock drivers +# + +# +# Clock Source drivers +# +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +# CONFIG_MAILBOX is not set +# CONFIG_IOMMU_SUPPORT is not set + +# +# Remoteproc drivers +# +# CONFIG_STE_MODEM_RPROC is not set + +# +# Rpmsg drivers +# + +# +# SOC (System On Chip) specific Drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_INV_MPU6050_IIO=m +CONFIG_INV_MPU6050_I2C=m +CONFIG_AK8975=m +CONFIG_BMP280=m +CONFIG_BMP280_I2C=m +CONFIG_BMP280_SPI=m +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_RAS is not set +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_LIBNVDIMM is not set +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=m +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set + +# +# FPGA Configuration Support +# +# CONFIG_FPGA is not set + +# +# File systems +# +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_REISERFS_FS_XATTR is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +# CONFIG_JFS_SECURITY is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +# CONFIG_XFS_QUOTA is not set +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=m +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=y +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +# CONFIG_SQUASHFS_XATTR is not set +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +# CONFIG_SQUASHFS_XZ is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +CONFIG_NLS_CODEPAGE_855=m +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +CONFIG_NLS_CODEPAGE_866=m +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +CONFIG_NLS_ISO8859_5=m +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +CONFIG_NLS_MAC_CYRILLIC=m +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=m + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=4096 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +# CONFIG_FRAME_POINTER is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_LOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHED_INFO is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_TIMER_STATS is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_PROVE_RCU is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_SAMPLES is not set +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set +# CONFIG_UBSAN is not set +# CONFIG_E2K_DEBUG_KERNEL is not set +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y + +# +# Elbrus architecture kernel hacking +# +CONFIG_BOOT_TRACE=y +CONFIG_BOOT_TRACE_THRESHOLD=100 + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_SYSVIPC_COMPAT=y +CONFIG_XOR_BLOCKS=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=m +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=m +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CTR=m +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=m +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_MD4=m +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +# CONFIG_CRYPTO_SHA3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +# CONFIG_CRYPTO_DES is not set +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +# CONFIG_CRYPTO_LZO is not set +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m +# CONFIG_CRYPTO_HW is not set + +# +# Certificates for signature checking +# +CONFIG_VIRTUALIZATION=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_COMPAT=y +CONFIG_KVM=y +CONFIG_KVM_GUEST=y +CONFIG_KVM_HOST_KERNEL=y +# CONFIG_KVM_GUEST_KERNEL is not set +# CONFIG_PARAVIRT_GUEST is not set +CONFIG_KVM_HOST_MODE=y +# CONFIG_GUEST_LAPIC_REGS is not set +CONFIG_DIRECT_VIRQ_INJECTION=y +CONFIG_VIRQ_VCPU_INJECTION=y + +# +# Virtio drivers +# +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=m +# CONFIG_CRC8 is not set +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +# CONFIG_XZ_DEC_BCJ is not set +# CONFIG_XZ_DEC_TEST is not set +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +# CONFIG_SG_SPLIT is not set +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y diff --git a/arch/e2k/configs/guest_lms_defconfig b/arch/e2k/configs/guest_lms_defconfig new file mode 100644 index 000000000000..8d2634e0063b --- /dev/null +++ b/arch/e2k/configs/guest_lms_defconfig @@ -0,0 +1,2895 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/e2k 4.14.69-0.xx Kernel Configuration +# +CONFIG_E2K=y +CONFIG_SWIOTLB=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ZONE_DMA=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_EARLY_PRINTK=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GREGS_CONTEXT=y +CONFIG_GENERIC_IOMAP=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_BOOT_TRACE_POSSIBLE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_GENERIC_GPIO=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +# CONFIG_OF_IRQ is not set +# CONFIG_OF_NET is not set +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_HOLES_IN_ZONE=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-e2k-host" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +CONFIG_USELIB=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_IRQ_DOMAIN=y +CONFIG_GENERIC_MSI_IRQ=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_HZ_PERIODIC=y +# CONFIG_NO_HZ_IDLE is not set +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_BSD_PROCESS_ACCT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +# CONFIG_TASKS_RCU is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +# CONFIG_MEMCG is not set +# CONFIG_BLK_CGROUP is not set +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUP_PIDS is not set +# CONFIG_CGROUP_RDMA is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_HUGETLB is not set +# CONFIG_CPUSETS is not set +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CGROUP_CPUACCT is not set +# CONFIG_CGROUP_PERF is not set +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_SOCK_CGROUP_DATA is not set +# CONFIG_CHECKPOINT_RESTORE is not set +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_POSIX_TIMERS=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +# CONFIG_USERFAULTFD is not set +CONFIG_PCI_QUIRKS=y +CONFIG_MEMBARRIER=y +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# CONFIG_WATCH_PREEMPT is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_SYSTEM_DATA_VERIFICATION is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_KPROBES=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_TASK_STRUCT_ALLOCATOR=y +CONFIG_ARCH_THREAD_STACK_ALLOCATOR=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_THIN_ARCHIVES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +# CONFIG_HAVE_ARCH_HASH is not set +CONFIG_ISA_BUS_API=y +# CONFIG_CPU_NO_EFFICIENT_FFS is not set +# CONFIG_HAVE_ARCH_VMAP_STACK is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +# CONFIG_REFCOUNT_FULL is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_LTTNG=m +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# Processor type and features +# +CONFIG_E2K_MACHINE_SIC=y +CONFIG_E2K_MACHINE_IOHUB=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_E2K_MACHINE is not set +CONFIG_CPU_ES2=y +CONFIG_CPU_E2S=y +CONFIG_CPU_E8C=y +CONFIG_CPU_E1CP=y +CONFIG_CPU_E8C2=y +CONFIG_CPU_E12C=y +CONFIG_CPU_E16C=y +CONFIG_CPU_E2C3=y +CONFIG_CPU_ISET=0 + +# +# MMU Page Tables features +# +# CONFIG_MMU_PT_V6 is not set +CONFIG_MMU_SEP_VIRT_SPACE=y +# CONFIG_DYNAMIC_SEP_VIRT_SPACE is not set +CONFIG_ENABLE_EXTMEM=y +CONFIG_EPIC=y +CONFIG_E16_CORE_SUPPORT=y +CONFIG_SMP_DAM_BUG=y +CONFIG_HZ=100 +CONFIG_GLOBAL_CONTEXT=y +# CONFIG_MLT_STORAGE is not set +# CONFIG_SECONDARY_SPACE_SUPPORT is not set +CONFIG_ONLY_HIGH_PHYS_MEM=y +# CONFIG_ONLY_BSP_MEMORY is not set +CONFIG_FORCE_MAX_ZONEORDER=15 +CONFIG_NODES_SHIFT=2 +CONFIG_NODES_SPAN_OTHER_NODES=y +CONFIG_NUMA=y +CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT=y +# CONFIG_KTHREAD_ON_CPU is not set +CONFIG_SERIAL_BOOT_PRINTK=y +CONFIG_SERIAL_AM85C30_BOOT_CONSOLE=y +CONFIG_BOOT_SERIAL_BAUD=115200 +CONFIG_EARLY_VIRTIO_CONSOLE=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y +# CONFIG_IDLE_PAGE_TRACKING is not set +# CONFIG_PERCPU_STATS is not set +CONFIG_MEMLIMIT=2048 +CONFIG_EXT_MEMLIMIT=2048 +CONFIG_VRAM_SIZE_128=y +# CONFIG_VRAM_SIZE_256 is not set +# CONFIG_VRAM_SIZE_512 is not set +# CONFIG_VRAM_SIZE_1024 is not set +# CONFIG_VRAM_DISABLE is not set +# CONFIG_ACCESS_CONTROL is not set +# CONFIG_USR_CONTROL_INTERRUPTS is not set +# CONFIG_KERNEL_TIMES_ACCOUNT is not set +# CONFIG_CLI_CHECK_TIME is not set +# CONFIG_CMDLINE_PROMPT is not set +CONFIG_MAKE_ALL_PAGES_VALID=y +CONFIG_SET_STACKS_SIZE=y +CONFIG_PSP_STACK_SIZE=1024 +CONFIG_PSP_WIN_SIZE=32 +CONFIG_UPS_AREA_SIZE=32 +CONFIG_UPCS_AREA_SIZE=2 +CONFIG_USE_AAU=y +CONFIG_STACK_REG_WINDOW=y +CONFIG_DATA_STACK_WINDOW=y +CONFIG_BINFMT_ELF32=y +CONFIG_COMPAT=y +CONFIG_PROTECTED_MODE=y +# CONFIG_DBG_RTL_TRACE is not set +CONFIG_CLW_ENABLE=y +# CONFIG_IPD_DISABLE is not set +CONFIG_TC_STORAGE=y +# CONFIG_IGNORE_MEM_LOCK_AS is not set +CONFIG_RECOVERY=y +CONFIG_MONITORS=y +CONFIG_E2K_KEXEC=y +CONFIG_DUMP_ALL_STACKS=y +CONFIG_CMDLINE="root=/dev/hda1 console=ttyLMS0 console=ttyS0,115200 sclkr=no lowmem_enable init=/root/run_qemu transparent_hugepage=madvise iommu=0" +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_MCST=y +CONFIG_PREEMPT_NONE=y +# CONFIG_RWSEM_GENERIC_SPINLOCK is not set +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_TIME=y + +# +# Timers subsystem +# +CONFIG_BIOS=y +CONFIG_ENABLE_BIOS_MPTABLE=y +CONFIG_ENABLE_ELBRUS_PCIBIOS=y +CONFIG_ENABLE_IDE=y +CONFIG_ENABLE_KEYBOARD=y +CONFIG_ENABLE_MOUSE=y +CONFIG_ENABLE_FLOPPY=y +CONFIG_ENABLE_MGA=y +CONFIG_ENABLE_RTC=y +CONFIG_ENABLE_SERIAL=y +CONFIG_ENABLE_PARALLEL_PORT=y +CONFIG_ENABLE_IOAPIC=y +# CONFIG_ADC_DISABLE is not set +CONFIG_OPTIMIZE_REGISTERS_ACCESS=y +# CONFIG_E2K_STACKS_TRACER is not set +CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y +# CONFIG_EPROF_KERNEL is not set +CONFIG_IOMMU_HELPER=y +CONFIG_HAVE_DMA_ATTRS=y + +# +# Elbrus Architecture Linux Kernel Configuration +# + +# +# Boot/prom console support +# +CONFIG_SERIAL_PRINTK=y +CONFIG_SERIAL_AM85C30_CONSOLE=y +CONFIG_EARLY_DUMP_CONSOLE=y +CONFIG_SCLKR_CLOCKSOURCE=y +CONFIG_CLKR_CLOCKSOURCE=y +# CONFIG_CLKR_SYNCHRONIZATION_WARNING is not set +CONFIG_CLKR_OFFSET=y +CONFIG_IOHUB_GPIO=y +# CONFIG_PIC is not set +CONFIG_L_X86_64=y +CONFIG_NUMA_IRQ_DESC=y +CONFIG_L_LOCAL_APIC=y +CONFIG_L_IO_APIC=y +CONFIG_L_PCI_QUIRKS=y +CONFIG_L_SIC_IPLINK_OFF=y +CONFIG_L_MMPD=y +CONFIG_L_PMC=y +CONFIG_I2C_SPI_RESET_CONTROLLER=y +CONFIG_L_I2C_CONTROLLER=y +CONFIG_L_SPI_CONTROLLER=y +# CONFIG_I2C_SPI_IRQ is not set +CONFIG_IPE2ST_POWER=m +CONFIG_ACPI_L_SPMC=y + +# +# Device Tree +# +# CONFIG_OF is not set + +# +# Power management options (ACPI, APM) +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PSTATES is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PSTATES=y + +# +# CPU frequency scaling drivers +# + +# +# APM (Advanced Power Management) BIOS Support +# +# CONFIG_APM is not set + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_RT=y + +# +# E2K CPU Idle Drivers +# +CONFIG_E2K_CPUIDLE=m + +# +# E2K CPU Idle Drivers +# +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# + +# +# Elbrus chipset PCI support +# +CONFIG_PCI_ELBRUS=y +CONFIG_IOHUB_DOMAINS=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI=y +# CONFIG_PCI_USE_VECTOR is not set +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_BUS_ADDR_T_64BIT=y +CONFIG_PCI_MSI=y +# CONFIG_PCI_MSI_IRQ_DOMAIN is not set +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# DesignWare PCI Core Support +# + +# +# PCI host controller drivers +# + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +CONFIG_ISA=y +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +# CONFIG_DMA_SHARED_BUFFER is not set + +# +# Elbrus MCST Device Drivers +# +CONFIG_DDI=y +CONFIG_AGAT_SCSI=m +CONFIG_GPIO=m +CONFIG_MSPS=m +CONFIG_MPV=m +# CONFIG_MGPM is not set +CONFIG_MMRM=m +CONFIG_MOKM=m +CONFIG_RDMA=m +CONFIG_RDMA_SIC=m +CONFIG_RDMA_M=m +CONFIG_MOKX=m +CONFIG_WD=y +# CONFIG_DMP_ASSIST is not set +CONFIG_LPTOUTS=m +CONFIG_M2MLC=m +CONFIG_APKPWR=m +CONFIG_HANTRODEC=m +CONFIG_BIGE=m +CONFIG_IMGTEC=m +CONFIG_BUS_MASTERING=y +CONFIG_VXD_FPGA=y + +# +# Bus devices +# +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_UBI is not set +CONFIG_PARPORT=m +CONFIG_PARPORT_MCST=m +# CONFIG_PARPORT_GSC is not set +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +# CONFIG_ISAPNP is not set +# CONFIG_PNPACPI is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=m + +# +# DRBD disabled because PROC_FS or INET not selected +# +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +CONFIG_CDROM_PKTCDVD_WCACHE=y +CONFIG_VIRTIO_BLK=y +# CONFIG_VIRTIO_BLK_SCSI is not set +# CONFIG_BLK_DEV_RSXX is not set +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29020 is not set +CONFIG_ISL22317=m +# CONFIG_ISL29003 is not set +CONFIG_LTC4306=m +CONFIG_UCD9080=m +CONFIG_I2C_P2PMC=m +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_CXL_BASE is not set +# CONFIG_CXL_AFU_DRIVER_OPS is not set +# CONFIG_CXL_LIB is not set +CONFIG_HAVE_IDE=y +CONFIG_IDE=y + +# +# Please see Documentation/ide/ide.txt for help/info on IDE drives +# +CONFIG_IDE_XFER_MODE=y +CONFIG_IDE_ATAPI=y +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set +CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_IDE_TASK_IOCTL is not set +CONFIG_IDE_PROC_FS=y + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_PLATFORM is not set +# CONFIG_BLK_DEV_IDEPNP is not set +CONFIG_BLK_DEV_IDEDMA_SFF=y + +# +# PCI IDE chipsets support +# +CONFIG_BLK_DEV_IDEPCI=y +CONFIG_IDEPCI_PCIBUS_ORDER=y +# CONFIG_BLK_DEV_OFFBOARD is not set +CONFIG_BLK_DEV_GENERIC=y +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_JMICRON is not set +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_ELBRUS=y +# CONFIG_BLK_DEV_IT8172 is not set +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_BLK_DEV_IDEDMA=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=m +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=m +# CONFIG_SCSI_SAS_LIBSAS is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AHA152X is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_GENERIC_NCR5380 is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_NCR53C406A is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_SYM53C416 is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +CONFIG_SATA_SIL=y +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_QDI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_WINBOND_VLB is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +# CONFIG_MD is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +# CONFIG_LOOPBACK_TARGET is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=m +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_INPORT is not set +# CONFIG_MOUSE_LOGIBM is not set +# CONFIG_MOUSE_PC110PAD is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +CONFIG_MOXA_SMARTIO=m +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +# CONFIG_SERIAL_8250_FSL is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_LMS_CONSOLE is not set +CONFIG_SERIAL_L_ZILOG=y +CONFIG_SERIAL_L_ZILOG_CONSOLE=y +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_HVC_DRIVER=y +CONFIG_HVC_L=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +# CONFIG_IPMI_WATCHDOG is not set +CONFIG_IPMI_POWEROFF=m +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_ISA is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +CONFIG_NTP_PPS=y + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_MOCKUP is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=m +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +CONFIG_SENSORS_LM95231=m +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +CONFIG_SENSORS_LM95245=m +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LTC2978 is not set +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_ZL6100 is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +CONFIG_RC_DECODERS=y +# CONFIG_LIRC is not set +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_RC_DEVICES is not set +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +# CONFIG_TTPCI_EEPROM is not set + +# +# Media drivers +# +# CONFIG_MEDIA_PCI_SUPPORT is not set + +# +# Supported MMC/SDIO adapters +# + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +# CONFIG_DRM is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_IMX_HDMI is not set +# CONFIG_DRM_LIB_RANDOM is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_DDC=y +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +CONFIG_FB_ATY128=y +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY=y +# CONFIG_FB_ATY_CT is not set +# CONFIG_FB_ATY_GX is not set +CONFIG_FB_ATY_BACKLIGHT=y +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_VGASTATE is not set +# CONFIG_FB_MGAM83 is not set +# CONFIG_PCI_FB_MGAM83 is not set +# CONFIG_FB_MGA3D is not set +# CONFIG_FB_LYNXFB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +CONFIG_HID_MULTITOUCH=y +# CONFIG_HID_NTI is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +CONFIG_RTC_DRV_CY14B101P=y +CONFIG_RTC_DRV_FM33256=y +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y + +# +# Virtio drivers +# +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV_TSCPAGE is not set +# CONFIG_STAGING is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_DMA=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_INV_MPU6050_IIO=m +CONFIG_INV_MPU6050_I2C=m +CONFIG_AK8975=m +CONFIG_BMP280=m +CONFIG_BMP280_I2C=m +CONFIG_BMP280_SPI=m +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_RAS is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set +CONFIG_NVMEM=y +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set + +# +# FSI support +# +# CONFIG_FSI is not set + +# +# File systems +# +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_REISERFS_FS_XATTR is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +# CONFIG_JFS_SECURITY is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +# CONFIG_XFS_QUOTA is not set +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=y +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +# CONFIG_SQUASHFS_XATTR is not set +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +# CONFIG_SQUASHFS_XZ is not set +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +CONFIG_NLS_CODEPAGE_855=m +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +CONFIG_NLS_CODEPAGE_866=m +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +CONFIG_NLS_ISO8859_5=m +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +CONFIG_NLS_MAC_CYRILLIC=m +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=m + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=4096 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHED_INFO is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_PROVE_RCU is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_FTRACE_SYSCALLS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +CONFIG_KPROBE_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set +# CONFIG_UBSAN is not set +# CONFIG_E2K_DEBUG_KERNEL is not set +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y + +# +# Elbrus architecture kernel hacking +# +CONFIG_BOOT_TRACE=y +CONFIG_BOOT_TRACE_THRESHOLD=100 + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITY_WRITABLE_HOOKS is not set +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_SYSVIPC_COMPAT=y +CONFIG_XOR_BLOCKS=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=m +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=m +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_MD4=m +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +# CONFIG_CRYPTO_SHA3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +# CONFIG_CRYPTO_DES is not set +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +# CONFIG_CRYPTO_LZO is not set +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m +# CONFIG_CRYPTO_HW is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_VIRTUALIZATION=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_COMPAT=y +CONFIG_KVM=y +CONFIG_KVM_GUEST=y +CONFIG_KVM_PARAVIRTUALIZATION=y +CONFIG_KVM_HW_VIRTUALIZATION=y +CONFIG_KVM_HW_PARAVIRTUALIZATION=y +# CONFIG_KVM_HOST_KERNEL is not set +CONFIG_KVM_GUEST_KERNEL=y +# CONFIG_PARAVIRT_GUEST is not set +CONFIG_KVM_GUEST_HW_PV=y +CONFIG_KVM_GUEST_HW_HCALL=y +# CONFIG_KVM_GUEST_HW_EPIC is not set +CONFIG_KVM_SHADOW_PT=y +CONFIG_KVM_GUEST_MODE=y +CONFIG_KVM_GUEST_SMP=y +# CONFIG_KVM_GUEST_DEBUG is not set +# CONFIG_GUEST_LAPIC_REGS is not set +CONFIG_DIRECT_VIRQ_INJECTION=y +CONFIG_VIRQ_VCPU_INJECTION=y + +# +# Virtio drivers +# +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=m +# CONFIG_CRC8 is not set +CONFIG_XXHASH=m +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=m +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +# CONFIG_XZ_DEC_BCJ is not set +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +# CONFIG_SG_SPLIT is not set +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set diff --git a/arch/e2k/configs/host_lms_defconfig b/arch/e2k/configs/host_lms_defconfig new file mode 100644 index 000000000000..a31309abc1ff --- /dev/null +++ b/arch/e2k/configs/host_lms_defconfig @@ -0,0 +1,2889 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/e2k 4.14.69-0.xx Kernel Configuration +# +CONFIG_E2K=y +CONFIG_SWIOTLB=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ZONE_DMA=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_EARLY_PRINTK=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GREGS_CONTEXT=y +CONFIG_GENERIC_IOMAP=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_BOOT_TRACE_POSSIBLE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_GENERIC_GPIO=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +# CONFIG_OF_IRQ is not set +# CONFIG_OF_NET is not set +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_HOLES_IN_ZONE=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-e2k-host" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +CONFIG_USELIB=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_IRQ_DOMAIN=y +CONFIG_GENERIC_MSI_IRQ=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_HZ_PERIODIC=y +# CONFIG_NO_HZ_IDLE is not set +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_BSD_PROCESS_ACCT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +# CONFIG_TASKS_RCU is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +# CONFIG_MEMCG is not set +# CONFIG_BLK_CGROUP is not set +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUP_PIDS is not set +# CONFIG_CGROUP_RDMA is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_HUGETLB is not set +# CONFIG_CPUSETS is not set +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CGROUP_CPUACCT is not set +# CONFIG_CGROUP_PERF is not set +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_SOCK_CGROUP_DATA is not set +# CONFIG_CHECKPOINT_RESTORE is not set +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_POSIX_TIMERS=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +# CONFIG_USERFAULTFD is not set +CONFIG_PCI_QUIRKS=y +CONFIG_MEMBARRIER=y +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# CONFIG_WATCH_PREEMPT is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_SYSTEM_DATA_VERIFICATION is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_KPROBES=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_TASK_STRUCT_ALLOCATOR=y +CONFIG_ARCH_THREAD_STACK_ALLOCATOR=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_THIN_ARCHIVES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +# CONFIG_HAVE_ARCH_HASH is not set +CONFIG_ISA_BUS_API=y +# CONFIG_CPU_NO_EFFICIENT_FFS is not set +# CONFIG_HAVE_ARCH_VMAP_STACK is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +# CONFIG_REFCOUNT_FULL is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_LTTNG=m +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# Processor type and features +# +CONFIG_E2K_MACHINE_SIC=y +CONFIG_E2K_MACHINE_IOHUB=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_E2K_MACHINE is not set +CONFIG_CPU_ES2=y +CONFIG_CPU_E2S=y +CONFIG_CPU_E8C=y +CONFIG_CPU_E1CP=y +CONFIG_CPU_E8C2=y +CONFIG_CPU_E12C=y +CONFIG_CPU_E16C=y +CONFIG_CPU_E2C3=y +CONFIG_CPU_ISET=0 + +# +# MMU Page Tables features +# +# CONFIG_MMU_PT_V6 is not set +CONFIG_MMU_SEP_VIRT_SPACE=y +CONFIG_DYNAMIC_SEP_VIRT_SPACE=y +CONFIG_ENABLE_EXTMEM=y +CONFIG_EPIC=y +CONFIG_E16_CORE_SUPPORT=y +CONFIG_SMP_DAM_BUG=y +CONFIG_HZ=100 +CONFIG_GLOBAL_CONTEXT=y +# CONFIG_MLT_STORAGE is not set +# CONFIG_SECONDARY_SPACE_SUPPORT is not set +CONFIG_ONLY_HIGH_PHYS_MEM=y +# CONFIG_ONLY_BSP_MEMORY is not set +CONFIG_FORCE_MAX_ZONEORDER=15 +CONFIG_NODES_SHIFT=2 +CONFIG_NODES_SPAN_OTHER_NODES=y +CONFIG_NUMA=y +CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT=y +# CONFIG_KTHREAD_ON_CPU is not set +CONFIG_SERIAL_BOOT_PRINTK=y +CONFIG_SERIAL_AM85C30_BOOT_CONSOLE=y +CONFIG_BOOT_SERIAL_BAUD=115200 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y +# CONFIG_IDLE_PAGE_TRACKING is not set +# CONFIG_PERCPU_STATS is not set +CONFIG_MEMLIMIT=2048 +CONFIG_EXT_MEMLIMIT=2048 +CONFIG_VRAM_SIZE_128=y +# CONFIG_VRAM_SIZE_256 is not set +# CONFIG_VRAM_SIZE_512 is not set +# CONFIG_VRAM_SIZE_1024 is not set +# CONFIG_VRAM_DISABLE is not set +# CONFIG_ACCESS_CONTROL is not set +# CONFIG_USR_CONTROL_INTERRUPTS is not set +# CONFIG_KERNEL_TIMES_ACCOUNT is not set +# CONFIG_CLI_CHECK_TIME is not set +# CONFIG_CMDLINE_PROMPT is not set +CONFIG_MAKE_ALL_PAGES_VALID=y +CONFIG_SET_STACKS_SIZE=y +CONFIG_PSP_STACK_SIZE=1024 +CONFIG_PSP_WIN_SIZE=32 +CONFIG_UPS_AREA_SIZE=32 +CONFIG_UPCS_AREA_SIZE=2 +CONFIG_USE_AAU=y +CONFIG_STACK_REG_WINDOW=y +CONFIG_DATA_STACK_WINDOW=y +CONFIG_BINFMT_ELF32=y +CONFIG_COMPAT=y +CONFIG_PROTECTED_MODE=y +# CONFIG_DBG_RTL_TRACE is not set +CONFIG_CLW_ENABLE=y +# CONFIG_IPD_DISABLE is not set +CONFIG_TC_STORAGE=y +# CONFIG_IGNORE_MEM_LOCK_AS is not set +CONFIG_RECOVERY=y +CONFIG_MONITORS=y +CONFIG_E2K_KEXEC=y +CONFIG_DUMP_ALL_STACKS=y +CONFIG_CMDLINE="root=/dev/hda1 console=ttyLMS0 console=ttyS0,115200 sclkr=no lowmem_enable init=/root/run_qemu transparent_hugepage=madvise iommu=0" +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_MCST=y +CONFIG_PREEMPT_NONE=y +# CONFIG_RWSEM_GENERIC_SPINLOCK is not set +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_TIME=y + +# +# Timers subsystem +# +CONFIG_BIOS=y +CONFIG_ENABLE_BIOS_MPTABLE=y +CONFIG_ENABLE_ELBRUS_PCIBIOS=y +CONFIG_ENABLE_IDE=y +CONFIG_ENABLE_KEYBOARD=y +CONFIG_ENABLE_MOUSE=y +CONFIG_ENABLE_FLOPPY=y +CONFIG_ENABLE_MGA=y +CONFIG_ENABLE_RTC=y +CONFIG_ENABLE_SERIAL=y +CONFIG_ENABLE_PARALLEL_PORT=y +CONFIG_ENABLE_IOAPIC=y +# CONFIG_ADC_DISABLE is not set +CONFIG_OPTIMIZE_REGISTERS_ACCESS=y +# CONFIG_E2K_STACKS_TRACER is not set +CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y +# CONFIG_EPROF_KERNEL is not set +CONFIG_IOMMU_HELPER=y +CONFIG_HAVE_DMA_ATTRS=y + +# +# Elbrus Architecture Linux Kernel Configuration +# + +# +# Boot/prom console support +# +CONFIG_SERIAL_PRINTK=y +CONFIG_SERIAL_AM85C30_CONSOLE=y +CONFIG_EARLY_DUMP_CONSOLE=y +CONFIG_SCLKR_CLOCKSOURCE=y +CONFIG_CLKR_CLOCKSOURCE=y +# CONFIG_CLKR_SYNCHRONIZATION_WARNING is not set +CONFIG_CLKR_OFFSET=y +CONFIG_IOHUB_GPIO=y +# CONFIG_PIC is not set +CONFIG_L_X86_64=y +CONFIG_NUMA_IRQ_DESC=y +CONFIG_L_LOCAL_APIC=y +CONFIG_L_IO_APIC=y +CONFIG_L_PCI_QUIRKS=y +CONFIG_L_SIC_IPLINK_OFF=y +CONFIG_L_MMPD=y +CONFIG_L_PMC=y +CONFIG_I2C_SPI_RESET_CONTROLLER=y +CONFIG_L_I2C_CONTROLLER=y +CONFIG_L_SPI_CONTROLLER=y +# CONFIG_I2C_SPI_IRQ is not set +CONFIG_IPE2ST_POWER=m +CONFIG_ACPI_L_SPMC=y + +# +# Device Tree +# +# CONFIG_OF is not set + +# +# Power management options (ACPI, APM) +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PSTATES is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PSTATES=y + +# +# CPU frequency scaling drivers +# + +# +# APM (Advanced Power Management) BIOS Support +# +# CONFIG_APM is not set + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_RT=y + +# +# E2K CPU Idle Drivers +# +CONFIG_E2K_CPUIDLE=m + +# +# E2K CPU Idle Drivers +# +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# + +# +# Elbrus chipset PCI support +# +CONFIG_PCI_ELBRUS=y +CONFIG_IOHUB_DOMAINS=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI=y +# CONFIG_PCI_USE_VECTOR is not set +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_BUS_ADDR_T_64BIT=y +CONFIG_PCI_MSI=y +# CONFIG_PCI_MSI_IRQ_DOMAIN is not set +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# DesignWare PCI Core Support +# + +# +# PCI host controller drivers +# + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +CONFIG_ISA=y +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +# CONFIG_DMA_SHARED_BUFFER is not set + +# +# Elbrus MCST Device Drivers +# +CONFIG_DDI=y +CONFIG_AGAT_SCSI=m +CONFIG_GPIO=m +CONFIG_MSPS=m +CONFIG_MPV=m +# CONFIG_MGPM is not set +CONFIG_MMRM=m +CONFIG_MOKM=m +CONFIG_RDMA=m +CONFIG_RDMA_SIC=m +CONFIG_RDMA_M=m +CONFIG_MOKX=m +CONFIG_WD=y +# CONFIG_DMP_ASSIST is not set +CONFIG_LPTOUTS=m +CONFIG_M2MLC=m +CONFIG_APKPWR=m +CONFIG_HANTRODEC=m +CONFIG_BIGE=m +CONFIG_IMGTEC=m +CONFIG_BUS_MASTERING=y +CONFIG_VXD_FPGA=y + +# +# Bus devices +# +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_UBI is not set +CONFIG_PARPORT=m +CONFIG_PARPORT_MCST=m +# CONFIG_PARPORT_GSC is not set +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +# CONFIG_ISAPNP is not set +# CONFIG_PNPACPI is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=m + +# +# DRBD disabled because PROC_FS or INET not selected +# +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +CONFIG_CDROM_PKTCDVD_WCACHE=y +# CONFIG_VIRTIO_BLK is not set +# CONFIG_BLK_DEV_RSXX is not set +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29020 is not set +CONFIG_ISL22317=m +# CONFIG_ISL29003 is not set +CONFIG_LTC4306=m +CONFIG_UCD9080=m +CONFIG_I2C_P2PMC=m +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_CXL_BASE is not set +# CONFIG_CXL_AFU_DRIVER_OPS is not set +# CONFIG_CXL_LIB is not set +CONFIG_HAVE_IDE=y +CONFIG_IDE=y + +# +# Please see Documentation/ide/ide.txt for help/info on IDE drives +# +CONFIG_IDE_XFER_MODE=y +CONFIG_IDE_ATAPI=y +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set +CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_IDE_TASK_IOCTL is not set +CONFIG_IDE_PROC_FS=y + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_PLATFORM is not set +# CONFIG_BLK_DEV_IDEPNP is not set +CONFIG_BLK_DEV_IDEDMA_SFF=y + +# +# PCI IDE chipsets support +# +CONFIG_BLK_DEV_IDEPCI=y +CONFIG_IDEPCI_PCIBUS_ORDER=y +# CONFIG_BLK_DEV_OFFBOARD is not set +CONFIG_BLK_DEV_GENERIC=y +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_JMICRON is not set +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_ELBRUS=y +# CONFIG_BLK_DEV_IT8172 is not set +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_BLK_DEV_IDEDMA=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=m +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=m +# CONFIG_SCSI_SAS_LIBSAS is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AHA152X is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_GENERIC_NCR5380 is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_NCR53C406A is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_SYM53C416 is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +CONFIG_SATA_SIL=y +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_QDI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_WINBOND_VLB is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +# CONFIG_MD is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +# CONFIG_LOOPBACK_TARGET is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=m +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_INPORT is not set +# CONFIG_MOUSE_LOGIBM is not set +# CONFIG_MOUSE_PC110PAD is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +CONFIG_MOXA_SMARTIO=m +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +# CONFIG_SERIAL_8250_FSL is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +CONFIG_LMS_CONSOLE=y +CONFIG_SERIAL_L_ZILOG=y +CONFIG_SERIAL_L_ZILOG_CONSOLE=y +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +# CONFIG_VIRTIO_CONSOLE is not set +CONFIG_IPMI_HANDLER=m +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +# CONFIG_IPMI_WATCHDOG is not set +CONFIG_IPMI_POWEROFF=m +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_ISA is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +CONFIG_NTP_PPS=y + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_MOCKUP is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=m +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +CONFIG_SENSORS_LM95231=m +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +CONFIG_SENSORS_LM95245=m +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LTC2978 is not set +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_ZL6100 is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +CONFIG_RC_DECODERS=y +# CONFIG_LIRC is not set +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_RC_DEVICES is not set +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +# CONFIG_TTPCI_EEPROM is not set + +# +# Media drivers +# +# CONFIG_MEDIA_PCI_SUPPORT is not set + +# +# Supported MMC/SDIO adapters +# + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +# CONFIG_DRM is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_IMX_HDMI is not set +# CONFIG_DRM_LIB_RANDOM is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_DDC=y +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +CONFIG_FB_ATY128=y +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY=y +# CONFIG_FB_ATY_CT is not set +# CONFIG_FB_ATY_GX is not set +CONFIG_FB_ATY_BACKLIGHT=y +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_VGASTATE is not set +# CONFIG_FB_MGAM83 is not set +# CONFIG_PCI_FB_MGAM83 is not set +# CONFIG_FB_MGA3D is not set +# CONFIG_FB_LYNXFB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +CONFIG_HID_MULTITOUCH=y +# CONFIG_HID_NTI is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +CONFIG_RTC_DRV_CY14B101P=y +CONFIG_RTC_DRV_FM33256=y +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y + +# +# Virtio drivers +# +# CONFIG_VIRTIO_PCI is not set +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV_TSCPAGE is not set +# CONFIG_STAGING is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_DMA=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_INV_MPU6050_IIO=m +CONFIG_INV_MPU6050_I2C=m +CONFIG_AK8975=m +CONFIG_BMP280=m +CONFIG_BMP280_I2C=m +CONFIG_BMP280_SPI=m +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_RAS is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set +CONFIG_NVMEM=y +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set + +# +# FSI support +# +# CONFIG_FSI is not set + +# +# File systems +# +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_REISERFS_FS_XATTR is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +# CONFIG_JFS_SECURITY is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +# CONFIG_XFS_QUOTA is not set +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=y +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +# CONFIG_SQUASHFS_XATTR is not set +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +# CONFIG_SQUASHFS_XZ is not set +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +CONFIG_NLS_CODEPAGE_855=m +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +CONFIG_NLS_CODEPAGE_866=m +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +CONFIG_NLS_ISO8859_5=m +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +CONFIG_NLS_MAC_CYRILLIC=m +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=m + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=4096 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHED_INFO is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_PROVE_RCU is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_FTRACE_SYSCALLS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +CONFIG_KPROBE_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set +# CONFIG_UBSAN is not set +# CONFIG_E2K_DEBUG_KERNEL is not set +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y + +# +# Elbrus architecture kernel hacking +# +CONFIG_BOOT_TRACE=y +CONFIG_BOOT_TRACE_THRESHOLD=100 + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITY_WRITABLE_HOOKS is not set +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_SYSVIPC_COMPAT=y +CONFIG_XOR_BLOCKS=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=m +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=m +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_MD4=m +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +# CONFIG_CRYPTO_SHA3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +# CONFIG_CRYPTO_DES is not set +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +# CONFIG_CRYPTO_LZO is not set +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m +# CONFIG_CRYPTO_HW is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_VIRTUALIZATION=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_COMPAT=y +CONFIG_KVM=y +CONFIG_KVM_GUEST=y +CONFIG_KVM_PARAVIRTUALIZATION=y +CONFIG_KVM_HW_VIRTUALIZATION=y +CONFIG_KVM_HW_PARAVIRTUALIZATION=y +CONFIG_KVM_HOST_KERNEL=y +# CONFIG_KVM_GUEST_KERNEL is not set +# CONFIG_PARAVIRT_GUEST is not set +CONFIG_KVM_SHADOW_PT_ENABLE=y +# CONFIG_KVM_TDP_ENABLE is not set +CONFIG_KVM_NONPAGING_ENABLE=y +CONFIG_KVM_PHYS_PT_ENABLE=y +CONFIG_KVM_HOST_MODE=y +CONFIG_KVM_HV_MMU=y +# CONFIG_GUEST_LAPIC_REGS is not set +CONFIG_DIRECT_VIRQ_INJECTION=y +CONFIG_VIRQ_VCPU_INJECTION=y + +# +# Virtio drivers +# +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=m +# CONFIG_CRC8 is not set +CONFIG_XXHASH=m +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=m +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +# CONFIG_XZ_DEC_BCJ is not set +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +# CONFIG_SG_SPLIT is not set +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set diff --git a/arch/e2k/configs/mcst_rt.config b/arch/e2k/configs/mcst_rt.config new file mode 100644 index 000000000000..9ffa2e365aff --- /dev/null +++ b/arch/e2k/configs/mcst_rt.config @@ -0,0 +1 @@ +CONFIG_LOCALVERSION="-e2k-rt" diff --git a/arch/e2k/fast_syscalls/Makefile b/arch/e2k/fast_syscalls/Makefile new file mode 100644 index 000000000000..a035d6df5e75 --- /dev/null +++ b/arch/e2k/fast_syscalls/Makefile @@ -0,0 +1,17 @@ +subdir-ccflags-y := -DE2K_FAST_SYSCALL -Werror -Wswitch -Wenum-compare + +empty:= +space:= $(empty) $(empty) +ORIG_CFLAGS := $(KBUILD_CFLAGS) +ORIG_CFLAGS := $(subst $(space)-fprofile-generate-kernel$(space),$(space),$(ORIG_CFLAGS)) +ORIG_CFLAGS := $(subst $(space)-fprofile-use="$(PROFILE_USE)"$(space),$(space),$(ORIG_CFLAGS)) +KBUILD_CFLAGS = $(ORIG_CFLAGS) + +obj-y += fast_clock_gettime.o fast_syscalls.o clkr.o sclkr.o +obj-y += fast_getcpu.o fast_siggetmask.o \ + fast_getcontext.o fast_set_return.o +obj-$(CONFIG_COMPAT) += compat.o +obj-$(CONFIG_PROTECTED_MODE) += protected.o + +CFLAGS_REMOVE_sclkr.o = $(CFLAGS_ALL_CPUS) +CFLAGS_sclkr.o += -march=elbrus-v3 diff --git a/arch/e2k/fast_syscalls/clkr.c b/arch/e2k/fast_syscalls/clkr.c new file mode 100644 index 000000000000..f3a8a5bfa2c9 --- /dev/null +++ b/arch/e2k/fast_syscalls/clkr.c @@ -0,0 +1,65 @@ +/* + * arch/e2k/kernel/clkr.c + * + * This file contains implementation of clkr clocksource. + * + * Copyright (C) 2011 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +/* includes */ +#include +#include +#include + +#include + + +/* definitions */ + +/* See comment before __cycles_2_ns() */ +#define CYC2NS_SCALE 22 +/* CPU frequency must be greater than this to avoid overflows on conversions */ +#define CYC2NS_MIN_CPU_FREQ \ + ((NSEC_PER_SEC << CYC2NS_SCALE) / ((1UL << 32) - 1UL)) + +/* Special version for use inside of fast system calls. Limitations: + * 1) Must be called with disabled interrupts. + * 2) Must not use data stack. + * 3) Must not use 'current' and 'current_thread_info()' since + * corresponding global registers are not set. + * 4) Must not do any calls. */ +__section(".entry.text") +notrace __interrupt +u64 fast_syscall_read_clkr(void) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + u64 before, now; +#ifdef CONFIG_CLKR_OFFSET + unsigned cpu; +#endif + + before = last_clkr; + /* Make sure we read 'last_clkr' before CLKR register */ + smp_rmb(); +#ifndef CONFIG_CLKR_OFFSET + now = get_cycles(); +#else + /* Do not access current_thread_info() here since we + * do not setup g12 and g13 in fast system calls. */ + cpu = task_cpu(thread_info_task(ti)); + now = get_cycles() + per_cpu(clkr_offset, cpu); +#endif + if (unlikely(now < before)) { + /* Time is going backwards. This must be because of + * clkr drift (or someone disabling CPUs... in which + * case offset should be corrected in resume()). */ +#ifdef CONFIG_CLKR_OFFSET + per_cpu(clkr_offset, cpu) += before - now; +#endif + now = before; + } else { + last_clkr = now; + } + + return now; +} diff --git a/arch/e2k/fast_syscalls/compat.c b/arch/e2k/fast_syscalls/compat.c new file mode 100644 index 000000000000..e0b0abaa2a4a --- /dev/null +++ b/arch/e2k/fast_syscalls/compat.c @@ -0,0 +1,186 @@ +#include +#include + +#include +#include +#include +#include + +#define ttable_entry1_args3(sys_num, arg1, arg2) \ + ((ttable_entry_args3)(ttable_entry1))(sys_num, arg1, arg2) +#define ttable_entry1_args4(sys_num, arg1, arg2, arg3) \ + ((ttable_entry_args4)(ttable_entry1))(sys_num, arg1, arg2, arg3) + +/* trap table entry started by direct branch (it is closer to fast system */ +/* call wirthout switch and use user local data stack) */ +#define goto_ttable_entry1_args3(sys_num, arg1, arg2) \ + goto_ttable_entry_args3(ttable_entry1, sys_num, arg1, arg2) +#define goto_ttable_entry1_args4(sys_num, arg1, arg2, arg3) \ + goto_ttable_entry_args4(ttable_entry1, sys_num, arg1, arg2, arg3) + +#define ttable_entry1_clock_gettime(which, time) \ + goto_ttable_entry1_args3(__NR_clock_gettime, which, time) +#define ttable_entry1_gettimeofday(tv, tz) \ + goto_ttable_entry1_args3(__NR_gettimeofday, tv, tz) +#define ttable_entry1_sigprocmask(how, nset, oset) \ + goto_ttable_entry1_args4(__NR_sigprocmask, how, nset, oset) + +notrace __section(".entry.text") +int compat_fast_sys_clock_gettime(const clockid_t which_clock, + struct compat_timespec __user *__restrict tp) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + struct timespec kts; + int ret; + + prefetch_nospec(&fsys_data); + +#ifdef CONFIG_KVM_HOST_MODE + if (unlikely(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE))) + ttable_entry1_clock_gettime((u64) which_clock, (u64) tp); +#endif + + if (unlikely((u64) tp + sizeof(struct compat_timespec) > + ti->addr_limit.seg)) + return -EFAULT; + + ret = do_fast_clock_gettime(which_clock, &kts); + if (likely(!ret)) { + tp->tv_sec = kts.tv_sec; + tp->tv_nsec = kts.tv_nsec; + } else { + ttable_entry1_clock_gettime((u64) which_clock, (u64) tp); + } + + return ret; +} + +notrace __section(".entry.text") +int compat_fast_sys_gettimeofday(struct compat_timeval __user *__restrict tv, + struct timezone __user *__restrict tz) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + struct timeval ktv; + int ret; + + prefetch_nospec(&fsys_data); + +#ifdef CONFIG_KVM_HOST_MODE + if (unlikely(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE))) + ttable_entry1_gettimeofday((u64) tv, (u64) tz); +#endif + + if (unlikely((u64) tv + sizeof(struct compat_timeval) > + ti->addr_limit.seg + || (u64) tz + sizeof(struct timezone) > + ti->addr_limit.seg)) + return -EFAULT; + + if (likely(tv)) { + ret = do_fast_gettimeofday(&ktv); + if (unlikely(ret)) + ttable_entry1_gettimeofday((u64) tv, (u64) tz); + } else { + ret = 0; + } + + if (tv) { + tv->tv_sec = ktv.tv_sec; + tv->tv_usec = ktv.tv_usec; + } + if (tz) { + tz->tz_minuteswest = sys_tz.tz_minuteswest; + tz->tz_dsttime = sys_tz.tz_dsttime; + } + + return ret; +} + +#if _NSIG != 64 +# error We read u64 value here... +#endif +notrace __interrupt __section(".entry.text") +int compat_fast_sys_siggetmask(u32 __user *oset, size_t sigsetsize) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + struct task_struct *task = thread_info_task(ti); + int ret = 0; +#ifdef CONFIG_KVM_HOST_MODE + bool guest = test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE); +#endif + union { + u32 word[2]; + u64 whole; + } set; + + set.whole = task->blocked.sig[0]; + + if (unlikely(sigsetsize != 8)) + return -EINVAL; + +#ifdef CONFIG_KVM_HOST_MODE + if (unlikely(guest)) + ttable_entry1_sigprocmask((u64) 0, (u64) NULL, (u64) oset); +#endif + + if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg)) + return -EFAULT; + + oset[0] = set.word[0]; + oset[1] = set.word[1]; + + return ret; +} + +#if _NSIG != 64 +# error We read u64 value here... +#endif +notrace __interrupt __section(".entry.text") +int compat_fast_sys_getcontext(struct ucontext_32 __user *ucp, + size_t sigsetsize) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + struct task_struct *task = thread_info_task(ti); + register u64 pcsp_lo, pcsp_hi; + register u32 fpcr, fpsr, pfpfr; + union { + u32 word[2]; + u64 whole; + } set; + u64 key; + +#ifdef CONFIG_KVM_HOST_MODE + /* TODO getcontext does not have a slow counterpart, not implemented for paravirt guest */ + KVM_BUG_ON(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)); +#endif + + BUILD_BUG_ON(sizeof(task->blocked.sig[0]) != 8); + set.whole = task->blocked.sig[0]; + + if (unlikely(sigsetsize != 8)) + return -EINVAL; + + if (unlikely((u64) ucp + sizeof(struct ucontext_32) > + ti->addr_limit.seg + || (u64) ucp >= ti->addr_limit.seg)) + return -EFAULT; + + key = context_ti_key_fast_syscall(ti); + + E2K_GETCONTEXT(fpcr, fpsr, pfpfr, pcsp_lo, pcsp_hi); + + /* We want stack to point to user frame that called us */ + pcsp_hi -= SZ_OF_CR; + + ((u32 *) &ucp->uc_sigmask)[0] = set.word[0]; + ((u32 *) &ucp->uc_sigmask)[1] = set.word[1]; + ucp->uc_mcontext.sbr = key; + ucp->uc_mcontext.pcsp_lo = pcsp_lo; + ucp->uc_mcontext.pcsp_hi = pcsp_hi; + ucp->uc_extra.fpcr = fpcr; + ucp->uc_extra.fpsr = fpsr; + ucp->uc_extra.pfpfr = pfpfr; + + return 0; +} + diff --git a/arch/e2k/fast_syscalls/fast_clock_gettime.c b/arch/e2k/fast_syscalls/fast_clock_gettime.c new file mode 100644 index 000000000000..4c1890908e2e --- /dev/null +++ b/arch/e2k/fast_syscalls/fast_clock_gettime.c @@ -0,0 +1,69 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +noinline notrace __interrupt +int native_do_fast_clock_gettime(const clockid_t which_clock, + struct timespec *tp) +{ + return DO_FAST_CLOCK_GETTIME(which_clock, tp); +} + +notrace __interrupt __section(.ttable_entry6_C) +int native_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp) +{ + return FAST_SYS_CLOCK_GETTIME(which_clock, tp); +} + +notrace __interrupt __section(".entry.text") +int native_do_fast_gettimeofday(struct timeval *tv) +{ + return DO_FAST_GETTIMEOFDAY(tv); +} + +notrace __interrupt __section(".entry.text") +int fast_sys_gettimeofday(struct timeval __user *__restrict tv, + struct timezone __user *__restrict tz) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + int ret; + + prefetch_nospec(&fsys_data); + +#ifdef CONFIG_KVM_HOST_MODE + if (unlikely(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE))) + ttable_entry_gettimeofday((u64) tv, (u64) tz); +#endif + + tv = (typeof(tv)) ((u64) tv & E2K_VA_MASK); + tz = (typeof(tz)) ((u64) tz & E2K_VA_MASK); + if (unlikely((u64) tv + sizeof(struct timeval) > ti->addr_limit.seg + || (u64) tz + sizeof(struct timezone) + > ti->addr_limit.seg)) + return -EFAULT; + + if (likely(tv)) { + ret = do_fast_gettimeofday(tv); + if (unlikely(ret)) + ttable_entry_gettimeofday((u64) tv, (u64) tz); + } else { + ret = 0; + } + + if (tz) { + tz->tz_minuteswest = sys_tz.tz_minuteswest; + tz->tz_dsttime = sys_tz.tz_dsttime; + } + + return ret; +} + diff --git a/arch/e2k/fast_syscalls/fast_getcontext.c b/arch/e2k/fast_syscalls/fast_getcontext.c new file mode 100644 index 000000000000..4c3ce1810ec3 --- /dev/null +++ b/arch/e2k/fast_syscalls/fast_getcontext.c @@ -0,0 +1,54 @@ +#include +#include + +#include +#include +#include +#include +#include + +#if _NSIG != 64 +# error We read u64 value here... +#endif +notrace __interrupt __section(".entry.text") +int fast_sys_getcontext(struct ucontext __user *ucp, size_t sigsetsize) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + struct task_struct *task = thread_info_task(ti); + register u64 pcsp_lo, pcsp_hi; + register u32 fpcr, fpsr, pfpfr; + u64 set, key; + +#ifdef CONFIG_KVM_HOST_MODE + /* TODO getcontext does not have a slow counterpart, not implemented for paravirt guest */ + KVM_BUG_ON(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)); +#endif + + BUILD_BUG_ON(sizeof(task->blocked.sig[0]) != 8); + set = task->blocked.sig[0]; + + if (unlikely(sigsetsize != 8)) + return -EINVAL; + + ucp = (typeof(ucp)) ((u64) ucp & E2K_VA_MASK); + if (unlikely((u64) ucp + sizeof(struct ucontext) > ti->addr_limit.seg)) + return -EFAULT; + + key = context_ti_key_fast_syscall(ti); + + E2K_GETCONTEXT(fpcr, fpsr, pfpfr, pcsp_lo, pcsp_hi); + + /* We want stack to point to user frame that called us */ + pcsp_hi -= SZ_OF_CR; + + *((u64 *) &ucp->uc_sigmask) = set; + ucp->uc_mcontext.sbr = key; + ucp->uc_mcontext.pcsp_lo = pcsp_lo; + ucp->uc_mcontext.pcsp_hi = pcsp_hi; + ucp->uc_extra.fpcr = fpcr; + ucp->uc_extra.fpsr = fpsr; + ucp->uc_extra.pfpfr = pfpfr; + + return 0; +} + diff --git a/arch/e2k/fast_syscalls/fast_getcpu.c b/arch/e2k/fast_syscalls/fast_getcpu.c new file mode 100644 index 000000000000..4d452314ad0e --- /dev/null +++ b/arch/e2k/fast_syscalls/fast_getcpu.c @@ -0,0 +1,36 @@ +#include + +#include +#include +#include + +notrace __interrupt __section(".entry.text") +int fast_sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, + struct getcpu_cache __user *unused) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + int cpu = task_cpu(thread_info_task(ti)); + int node; + +#ifdef CONFIG_KVM_HOST_MODE + if (unlikely(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE))) + ttable_entry_getcpu((u64) cpup, (u64) nodep, (u64) unused); +#endif + + cpup = (typeof(cpup)) ((u64) cpup & E2K_VA_MASK); + nodep = (typeof(nodep)) ((u64) nodep & E2K_VA_MASK); + if (unlikely((u64) cpup + sizeof(unsigned) > ti->addr_limit.seg + || (u64) nodep + sizeof(unsigned) > ti->addr_limit.seg)) + return -EFAULT; + + if (nodep) + node = cpu_to_node(cpu); + + if (nodep) + *nodep = node; + if (cpup) + *cpup = cpu; + + return 0; +} + diff --git a/arch/e2k/fast_syscalls/fast_set_return.c b/arch/e2k/fast_syscalls/fast_set_return.c new file mode 100644 index 000000000000..4cfbaa3dd922 --- /dev/null +++ b/arch/e2k/fast_syscalls/fast_set_return.c @@ -0,0 +1,52 @@ +#include +#include +#include +#include + +notrace __interrupt __section(".entry.text") +int fast_sys_set_return(u64 ip, int flags) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_cr0_hi_t cr0_hi; + e2k_mem_crs_t *frame, *base; + u64 prev_ip; + +#ifdef CONFIG_KVM_HOST_MODE + /* TODO set_retrun does not have a slow counterpart, not implemented for paravirt guest */ + KVM_BUG_ON(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)); +#endif + + E2K_FLUSHC; + + if (unlikely(flags)) + return -EINVAL; + + if (unlikely(ip >= USER_DS.seg)) + return -EFAULT; + + pcsp_hi = READ_PCSP_HI_REG(); /* We don't use %pcsp_hi.size */ + pcsp_lo = READ_PCSP_LO_REG(); + + base = GET_PCS_BASE(&ti->u_hw_stack); + frame = (e2k_mem_crs_t *) (AS(pcsp_lo).base + AS(pcsp_hi).ind); + + do { + --frame; + + cr0_hi = frame->cr0_hi; + + prev_ip = AS(cr0_hi).ip << 3; + } while (unlikely(prev_ip >= TASK_SIZE && frame > base)); + + /* No user frames above? */ + if (unlikely(prev_ip >= TASK_SIZE)) + return -EPERM; + + /* Modify stack */ + AS(cr0_hi).ip = ip >> 3; + frame->cr0_hi = cr0_hi; + + return 0; +} diff --git a/arch/e2k/fast_syscalls/fast_siggetmask.c b/arch/e2k/fast_syscalls/fast_siggetmask.c new file mode 100644 index 000000000000..d35bf1ff0801 --- /dev/null +++ b/arch/e2k/fast_syscalls/fast_siggetmask.c @@ -0,0 +1,14 @@ +#include +#include +#include + +#if _NSIG != 64 +# error We read u64 value here... +#endif + +notrace __interrupt __section(.ttable_entry6_C) +int native_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return FAST_SYS_SIGGETMASK(oset, sigsetsize); +} + diff --git a/arch/e2k/fast_syscalls/fast_syscalls.c b/arch/e2k/fast_syscalls/fast_syscalls.c new file mode 100644 index 000000000000..c415d5c4444f --- /dev/null +++ b/arch/e2k/fast_syscalls/fast_syscalls.c @@ -0,0 +1,87 @@ +#include +#include + +#include + +struct fast_syscalls_data fsys_data; + +/* + * update_vsyscall() is called with xtime_lock held for writing, + * so all synchronization with readers is done by the caller + */ +void update_vsyscall(struct timekeeper *tk) +{ + struct timespec64 ts; + + ts.tv_sec = tk->xtime_sec; + ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); + + fsys_data.tk = tk; + fsys_data.mult = tk->tkr_mono.mult; + fsys_data.shift = tk->tkr_mono.shift; + fsys_data.clock = tk->tkr_mono.clock; + fsys_data.wall_time_coarse = timespec64_to_timespec(ts); +} + +void update_vsyscall_tz(void) +{ +} + +notrace __interrupt +int fast_sys_ni_syscall() +{ + return -ENOSYS; +} + +#define FAST_SYSTEM_CALL_TBL_ENTRY(sysname) (fast_system_call_func) sysname +#define COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) compat_##sysname +#define PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) protected_##sysname + + +/* + * To improve locality, fast syscalls tables are located + * in the .text section next to the OS entry code. + */ + +__section(.ttable_entry6_table) +const fast_system_call_func fast_sys_calls_table[NR_fast_syscalls] = { + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_gettimeofday), + FAST_SYSTEM_CALL_TBL_ENTRY(native_fast_sys_clock_gettime), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcpu), + FAST_SYSTEM_CALL_TBL_ENTRY(native_fast_sys_siggetmask), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcontext), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_set_return), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), +}; + +#ifdef CONFIG_COMPAT +__section(.ttable_entry5_table) +const fast_system_call_func fast_sys_calls_table_32[NR_fast_syscalls] = { + COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_gettimeofday), + COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_clock_gettime), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcpu), + COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_siggetmask), + COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcontext), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_set_return), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), +}; +#endif + +#ifdef CONFIG_PROTECTED_MODE +__section(.ttable_entry7_table) +const fast_system_call_func fast_sys_calls_table_128[NR_fast_syscalls] = { + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_gettimeofday), + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_clock_gettime), + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcpu), + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_siggetmask), + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcontext), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), +}; +#endif + diff --git a/arch/e2k/fast_syscalls/protected.c b/arch/e2k/fast_syscalls/protected.c new file mode 100644 index 000000000000..54f09ec226d5 --- /dev/null +++ b/arch/e2k/fast_syscalls/protected.c @@ -0,0 +1,298 @@ +#include +#include + +#include +#include +#include +#include +#include + +static inline +unsigned long e2k_ptr_ptr(long low, long hiw, unsigned int min_size, + const struct thread_info *ti) +{ + e2k_ptr_t ptr; + unsigned int ptr_size; + unsigned long va_ptr; + + AW(ptr).lo = low; + AW(ptr).hi = hiw; + ptr_size = AS(ptr).size - AS(ptr).curptr; + + if (ptr_size < min_size) { + va_ptr = 0; + } else { + if (AS(ptr).itag == AP_ITAG) + va_ptr = AS(ptr).ap.base + AS(ptr).curptr; + else + va_ptr = AS(ptr).sap.base + AS(ptr).curptr + + (ti->u_stack.bottom & 0xFFFF00000000UL); + } + + return va_ptr; +} + +static inline +unsigned int e2k_ptr_size(long low, long hiw, unsigned int min_size) +{ + e2k_ptr_hi_t hi; + unsigned int ptr_size; + + AW(hi) = hiw; + ptr_size = AS(hi).size - AS(hi).curptr; + + if (ptr_size < min_size) + return 0; + else + return ptr_size; +} + +#define ARG_TAG(i) ((tags & (0xF << (4*(i)))) >> (4*(i))) +#define NOT_PTR(i) ((tags & (0xFF << (4*(i)))) >> (4*(i)) != ETAGAPQ) +#define NULL_PTR(i) ((ARG_TAG(i) == E2K_NULLPTR_ETAG) && (arg##i == 0)) + +#define GET_PTR(ptr, size, i, j, min_size, null_is_allowed, ti) \ +do { \ + if (unlikely(NULL_PTR(i))) { \ + ptr = 0; \ + size = min_size * !!null_is_allowed; \ + } else if (likely(!NOT_PTR(i))) { \ + ptr = (typeof(ptr)) e2k_ptr_ptr(arg##i, arg##j, min_size, ti); \ + size = e2k_ptr_size(arg##i, arg##j, min_size); \ + } else { \ + ptr = 0; \ + size = 0; \ + } \ +} while (0) + + +extern long ttable_entry8(int sys_num, + u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, u64 r6, u64 r7); + +/* This macro fills missing arguments with "(u64) (0)". */ +#define EXPAND_SYSCALL_ARGS_TO_8(...) \ + __EXPAND_SYSCALL_ARGS_TO_8(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0) +#define __EXPAND_SYSCALL_ARGS_TO_8(sys_num, tags, a2, a3, a4, a5, a6, a7, ...) \ + (sys_num), (tags), (u64) (a2), (u64) (a3), (u64) (a4), \ + (u64) (a5), (u64) (a6), (u64) (a7) + +#define PROTECTED_SYSCALL(sys_num, tags, ...) \ + _PROTECTED_SYSCALL(EXPAND_SYSCALL_ARGS_TO_8(sys_num, tags ,##__VA_ARGS__)) +/* + * Needed because preprocessor checks for number of arguments before + * expansion takes place, so without this define it would think that + * __PROTECTED_SYSCALL(EXPAND_SYSCALL_ARGS_TO_8(__VA_ARGS__)) + * is invoked with one argument. + */ +#define _PROTECTED_SYSCALL(...) __PROTECTED_SYSCALL(__VA_ARGS__) + +#define __PROTECTED_SYSCALL(sys_num, tags, arg2, arg3, arg4, arg5, arg6, arg7) \ +({ \ + long __ret; \ + u32 tag2 = ARG_TAG(2), tag3 = ARG_TAG(3), tag4 = ARG_TAG(4), \ + tag5 = ARG_TAG(5), tag6 = ARG_TAG(6), tag7 = ARG_TAG(7); \ + asm volatile ("{\n" \ + "nop 3\n" \ + "disp %%ctpr1, %[_func]\n" \ + "adds %[_sys_num], 0, %%b[0]\n" \ + "puttagd %[_arg2], %[_tag2], %%db[2]\n" \ + "puttagd %[_arg3], %[_tag3], %%db[3]\n" \ + "}\n" \ + "{\n" \ + "puttagd %[_arg4], %[_tag4], %%db[4]\n" \ + "puttagd %[_arg5], %[_tag5], %%db[5]\n" \ + "}\n" \ + "{\n" \ + "puttagd %[_arg6], %[_tag6], %%db[6]\n" \ + "puttagd %[_arg7], %[_tag7], %%db[7]\n" \ + "call %%ctpr1, wbs=%#\n" \ + "}\n" \ + "addd %%db[0], 0, %[_ret]\n" \ + : [_ret] "=r" (__ret) \ + : [_func] "i" (&ttable_entry8), [_sys_num] "r" (sys_num), \ + [_arg2] "r" (arg2), [_arg3] "r" (arg3), \ + [_arg4] "r" (arg4), [_arg5] "r" (arg5), \ + [_arg6] "r" (arg6), [_arg7] "r" (arg7), \ + [_tag2] "r" (tag2), [_tag3] "r" (tag3), \ + [_tag4] "r" (tag4), [_tag5] "r" (tag5), \ + [_tag6] "r" (tag6), [_tag7] "r" (tag7) \ + : E2K_SYSCALL_CLOBBERS); \ + __ret; \ +}) + +notrace __interrupt __section(".entry.text") +int protected_fast_sys_clock_gettime(u32 tags, clockid_t which_clock, + u64 arg3, u64 arg4, u64 arg5) +{ + const struct thread_info *ti = READ_CURRENT_REG(); + struct timespec __user *tp; + int size, ret; + + prefetchw(&fsys_data); + + GET_PTR(tp, size, 4, 5, sizeof(struct timespec), 0, ti); + if (!size) + return -EFAULT; + + if (unlikely((u64) tp + sizeof(struct timespec) > ti->addr_limit.seg)) + return -EFAULT; + + ret = do_fast_clock_gettime(which_clock, tp); + if (unlikely(ret)) + ret = PROTECTED_SYSCALL(__NR_clock_gettime, tags, + which_clock, arg3, arg4, arg5); + + return ret; +} + +notrace __interrupt __section(".entry.text") +int protected_fast_sys_gettimeofday(u32 tags, u64 arg2, u64 arg3, u64 arg4, u64 arg5) +{ + const struct thread_info *ti = READ_CURRENT_REG(); + struct timeval __user *tv; + struct timezone __user *tz; + int size, ret; + + prefetch_nospec(&fsys_data); + + GET_PTR(tv, size, 2, 3, sizeof(struct timeval), 1, ti); + if (!size) + return -EFAULT; + + GET_PTR(tz, size, 4, 5, sizeof(struct timezone), 1, ti); + if (!size) + return -EFAULT; + + if (unlikely((u64) tv + sizeof(struct compat_timeval) > + ti->addr_limit.seg + || (u64) tz + sizeof(struct timezone) > + ti->addr_limit.seg)) + return -EFAULT; + + if (likely(tv)) { + ret = do_fast_gettimeofday(tv); + if (unlikely(ret)) + return PROTECTED_SYSCALL(__NR_gettimeofday, tags, + arg2, arg3, arg4, arg5); + } else { + ret = 0; + } + + if (tz) { + tz->tz_minuteswest = sys_tz.tz_minuteswest; + tz->tz_dsttime = sys_tz.tz_dsttime; + } + + return ret; +} + + +notrace __interrupt __section(".entry.text") +int protected_fast_sys_getcpu(u32 tags, u64 arg2, u64 arg3, u64 arg4, u64 arg5) +{ + const struct thread_info *ti = READ_CURRENT_REG(); + int cpu = task_cpu(thread_info_task(ti)); + int node, size; + unsigned __user *cpup; + unsigned __user *nodep; + + GET_PTR(cpup, size, 2, 3, sizeof(unsigned int), 1, ti); + if (!size) + return -EFAULT; + + GET_PTR(nodep, size, 4, 5, sizeof(unsigned int), 1, ti); + if (!size) + return -EFAULT; + + if (unlikely((u64) cpup + sizeof(unsigned) > ti->addr_limit.seg + || (u64) nodep + sizeof(unsigned) > ti->addr_limit.seg)) + return -EFAULT; + + if (nodep) + node = cpu_to_node(cpu); + + if (nodep) + *nodep = node; + if (cpup) + *cpup = cpu; + + return 0; +} + +#if _NSIG != 64 +# error We read u64 value here... +#endif +notrace __interrupt __section(".entry.text") +int protected_fast_sys_siggetmask(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize) +{ + const struct thread_info *ti = READ_CURRENT_REG(); + const struct task_struct *task = thread_info_task(ti); + u64 set; + int size; + u64 __user *oset; + + BUILD_BUG_ON(sizeof(task->blocked.sig[0]) != 8); + set = task->blocked.sig[0]; + + if (unlikely(sigsetsize != 8)) + return -EINVAL; + + GET_PTR(oset, size, 2, 3, sizeof(sigset_t), 0, ti); + if (!size) + return -EFAULT; + + if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg)) + return -EFAULT; + + *oset = set; + + return 0; +} + +#if _NSIG != 64 +# error We read u64 value here... +#endif +notrace __interrupt __section(".entry.text") +int protected_fast_sys_getcontext(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize) +{ + const struct thread_info *ti = READ_CURRENT_REG(); + const struct task_struct *task = thread_info_task(ti); + register u64 pcsp_lo, pcsp_hi; + register u32 fpcr, fpsr, pfpfr; + u64 set, key; + int size, ret = 0; + struct ucontext_prot __user *ucp; + + BUILD_BUG_ON(sizeof(task->blocked.sig[0]) != 8); + set = task->blocked.sig[0]; + + if (unlikely(sigsetsize != 8)) + return -EINVAL; + + GET_PTR(ucp, size, 2, 3, sizeof(struct ucontext_prot), 0, ti); + if (!size) + return -EFAULT; + + if (unlikely((u64) ucp + sizeof(struct ucontext_prot) + > ti->addr_limit.seg + || (u64) ucp >= ti->addr_limit.seg)) + return -EFAULT; + + key = context_ti_key_fast_syscall(ti); + + E2K_GETCONTEXT(fpcr, fpsr, pfpfr, pcsp_lo, pcsp_hi); + + /* We want stack to point to user frame that called us */ + pcsp_hi -= SZ_OF_CR; + + *((u64 *) &ucp->uc_sigmask) = set; + ucp->uc_mcontext.sbr = key; + ucp->uc_mcontext.pcsp_lo = pcsp_lo; + ucp->uc_mcontext.pcsp_hi = pcsp_hi; + ucp->uc_extra.fpcr = fpcr; + ucp->uc_extra.fpsr = fpsr; + ucp->uc_extra.pfpfr = pfpfr; + + return ret; +} + diff --git a/arch/e2k/fast_syscalls/sclkr.c b/arch/e2k/fast_syscalls/sclkr.c new file mode 100644 index 000000000000..e6f6dace29df --- /dev/null +++ b/arch/e2k/fast_syscalls/sclkr.c @@ -0,0 +1,66 @@ +/* + * arch/e2k/kernel/sclkr.c + * + * This file contains implementation of sclkr clocksource. + * + * Copyright (C) MCST 2015 Leonid Ananiev (leoan@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include + +/* #define SET_SCLKR_TIME1970 */ + +#define SCLKR_LO 0xffffffff +#define SCLKM1_DIV 0xffffffff +/* OS may write in SCLKM1_DIV field */ +#define SCLKM1_MDIV 0x100000000LL +/* external mode field */ +#define SCLKM1_EXT 0x200000000LL +/* training mode field will unset by hardware on 2-nd pulse */ +#define SCLKM1_TRN 0x400000000LL +/* software field is set if sclkr is correct */ +#define SCLKM1_SW_OK 0x800000000LL +#define SCLKR_DFLT_HZ 0x0773593f /* 125 MHz */ + +/* For kernel 4.9: */ +#define READ_SSCLKR_REG() READ_SCLKR_REG() +#define READ_SSCLKM1_REG() READ_SCLKM1_REG() +#define READ_SSCLKM3_REG() READ_SCLKM3_REG() +#define READ_SCURRENT_REG() READ_CURRENT_REG() + +__section(".entry.text") +notrace __interrupt +u64 fast_syscall_read_sclkr(void) +{ + u64 sclkr, freq; + struct thread_info *const ti = READ_SCURRENT_REG(); + e2k_sclkm1_t sclkm1; +#ifdef DEBUG_SCLKR_FREQ + u64 this_prev_freq; + u64 *prev_freq_ptr; + + prev_freq_ptr = &per_cpu(prev_freq, ti->cpu); + this_prev_freq = *prev_freq_ptr; +#endif + sclkr = READ_SSCLKR_REG(); + sclkm1 = READ_SSCLKM1_REG(); + freq = sclkm1.div; + + if (unlikely(sclkr_mode != SCLKR_INT && !sclkm1.mode || + !sclkm1.sw || !freq)) + return 0; +#ifdef DEBUG_SCLKR_FREQ + if (unlikely(abs(this_prev_freq - freq) > + (this_prev_freq >> OSCIL_JIT_SHFT))) + freq = basic_freq_hz; + *prev_freq_ptr = freq; +#endif + + return sclkr_to_ns(sclkr, freq); +} diff --git a/arch/e2k/include/asm-l b/arch/e2k/include/asm-l new file mode 120000 index 000000000000..55881a560a9f --- /dev/null +++ b/arch/e2k/include/asm-l @@ -0,0 +1 @@ +../../l/include/asm \ No newline at end of file diff --git a/arch/e2k/include/asm/3p.h b/arch/e2k/include/asm/3p.h new file mode 100644 index 000000000000..20c6616f6107 --- /dev/null +++ b/arch/e2k/include/asm/3p.h @@ -0,0 +1,113 @@ +#ifndef _E2K_3P_H_ +#define _E2K_3P_H_ + +#ifdef __KERNEL__ + +#include +#include +#include + +struct vm_area_struct; +struct pt_regs; +struct file; +extern int do_global_sp(struct pt_regs *regs, trap_cellar_t *tcellar); +extern int lw_global_sp(struct pt_regs *regs); +extern void free_global_sp(void); +extern int delete_records(unsigned int psl_from); +extern void mark_all_global_sp(struct pt_regs *regs, pid_t pid); +extern int interpreted_ap_code(struct pt_regs *regs, + struct vm_area_struct **vma, e2k_addr_t *address); + +struct syscall_attrs { + u32 mask; /* for coding specs see systable.c */ + /* The next 6 fields specify minimum allowed argument size + * in case of argument-descriptor. + * If negative value, this means size is defined by corresponding arg. + * F.e. value (-3) means size is specified by argument #3. + */ + short size1; /* min allowed size of arg1 of particular system call */ + short size2; /* minimum allowed size of arg2 */ + short size3; /* minimum allowed size of arg3 */ + short size4; /* minimum allowed size of arg4 */ + u16 size5; /* minimum allowed size of arg5 */ + u16 size6; /* minimum allowed size of arg6 */ +} __aligned(16) /* For faster address calculation */; +extern const struct syscall_attrs sys_protcall_args[]; +extern const char *sys_call_ID_to_name[]; + +/* + * Definition of ttable entry number used for protected system calls. + * This is under agreement with protected mode compiler/plib team. + */ +#define PMODE_NEW_SYSCALL_TRAPNUM 10 + +/* + * List of protected mode system calls supported. + * For the moment it covers all the calls implemented in plib library. + */ + +#define __NR_P_get_mem 500 +#define __NR_P_free_mem 501 +#define __NR_P_dump_umem 507 + + +/* + * Here are some stuff that belongs to LOCAL->GLOBAL operation support + */ + +typedef struct global_store_trace_record global_store_t; + +typedef enum { + TYPE_GLOBAL = 0, + TYPE_BOUND, + TYPE_INIT, +} type_global_type_t; + +struct global_store_trace_record { + global_store_t *prev; /*that is struct list_head list; */ + global_store_t *next; + type_global_type_t type; + unsigned int lcl_psl; + unsigned int orig_psr_lw; /* to keep track */ + e2k_addr_t global_p; + pid_t pid; + e2k_addr_t new_address; + e2k_addr_t old_address; + unsigned long word1; /*the first word of SAP */ + unsigned long word2; /*the second word of SAP */ + e2k_addr_t sbr; + /* + * just to care about perhaps I need to store the LOCAL here + * as a backup. + */ +}; + +#define IS_SAP_LO(addr) \ +({ \ + e2k_rwsap_lo_struct_t *sap_lo; \ + sap_lo = (e2k_rwsap_lo_struct_t *) addr; \ + (AS_SAP_STRUCT((*sap_lo)).itag == E2K_SAP_ITAG ? \ + (NATIVE_LOAD_TAGD(addr) == E2K_SAP_LO_ETAG ? 1 : 0) : 0); \ +}) + +#define IS_SAP_HI(addr) \ +({ \ + (NATIVE_LOAD_TAGD(addr) == E2K_SAP_HI_ETAG ? 1 : 0); \ +}) + +#define IS_AP_LO(addr) \ +({ \ + e2k_rwap_lo_struct_t *ap_lo; \ + ap_lo = (e2k_rwap_lo_struct_t *) addr; \ + (AS_AP_STRUCT((*ap_lo)).itag == E2K_AP_ITAG ? \ + (NATIVE_LOAD_TAGD(addr) == E2K_AP_LO_ETAG ? 1 : 0) : 0); \ +}) + +#define IS_AP_HI(addr) \ +({ \ + (NATIVE_LOAD_TAGD(addr) == E2K_AP_HI_ETAG ? 1 : 0); \ +}) + +#endif /* __KERNEL__ */ + +#endif /* _E2K_3P_H_ */ diff --git a/arch/e2k/include/asm/Kbuild b/arch/e2k/include/asm/Kbuild new file mode 100644 index 000000000000..04885f82b2f3 --- /dev/null +++ b/arch/e2k/include/asm/Kbuild @@ -0,0 +1,16 @@ +### generic + +generic-y += bugs.h +generic-y += div64.h +generic-y += errno.h +generic-y += emergency-restart.h +generic-y += irq_regs.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += local64.h +generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h +generic-y += param.h +generic-y += qrwlock.h +generic-y += xor.h +generic-y += mmiowb.h diff --git a/arch/e2k/include/asm/a.out.h b/arch/e2k/include/asm/a.out.h new file mode 100644 index 000000000000..397dad70d643 --- /dev/null +++ b/arch/e2k/include/asm/a.out.h @@ -0,0 +1,28 @@ +#ifndef __E2K_A_OUT_H__ +#define __E2K_A_OUT_H__ + +#ifndef __ASSEMBLY__ + +struct exec +{ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned int a_text; /* length of text, in bytes */ + unsigned int a_data; /* length of data, in bytes */ + unsigned int a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned int a_syms; /* length of symbol table data in file, in bytes */ + unsigned int a_entry; /* start address */ + unsigned int a_trsize; /* length of relocation info for text, in bytes */ + unsigned int a_drsize; /* length of relocation info for data, in bytes */ +}; + +#endif /* __ASSEMBLY__ */ + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#endif + +#endif /* __E2K_A_OUT_H__ */ diff --git a/arch/e2k/include/asm/aau_context.h b/arch/e2k/include/asm/aau_context.h new file mode 100644 index 000000000000..42714d0393e1 --- /dev/null +++ b/arch/e2k/include/asm/aau_context.h @@ -0,0 +1,255 @@ +/* + * aau_context.h - saving/loading AAU context. + * + * In this file you can see various lists of similar operations. All + * of these operations are of AAU access. The hint is the following: + * AAU regiters can be obtained only through LDAA operation with index + * hardcoded into the AAU syllable. So, index as variable can not be + * substituted. As a cosequence we can not pack them into the loop and + * they are forced to be in lists. + */ +#ifndef _E2K_AAU_CONTEXT_H_ +#define _E2K_AAU_CONTEXT_H_ + +#include +#include +#include +#include + +/******************************* DEBUG DEFINES ********************************/ +#undef DEBUG_AAU_CHECK + +#define DEBUG_AAU_CHECK 0 +#define DbgChk if (DEBUG_AAU_CHECK) printk +/******************************************************************************/ + +typedef union e2k_fapb_aps { + union { + struct { + u64 abs : 5; /* [4:0] area base */ + u64 asz : 3; /* [7:5] area size */ + u64 ind : 4; /* [11:8] initial index (si == 0) */ + u64 incr : 3; /* [14:12] AAINCR number (si == 0) */ + u64 d : 5; /* [19:15] AAD number */ + u64 mrng : 5; /* [24:20] element size */ + u64 fmt : 3; /* [27:25] format */ + u64 dcd : 2; /* [29:28] data cache disabled */ + u64 si : 1; /* [30] secondary index access */ + u64 ct : 1; /* [31] control transfer (left ch.) */ + u64 disp : 32; + }; + struct { + u64 __x1 : 8; + u64 area : 5; /* [12:8] APB area index (si == 1) */ + u64 am : 1; /* [13] (si == 1) */ + u64 be : 1; /* [14] big endian (si == 1) */ + u64 __x2 : 16; + u64 dpl : 1; /* [31] duplicate (right channel) */ + u64 __x3 : 32; + }; + } fields; + u64 word; +} e2k_fapb_instr_t; + +/* constants to pick LSR register fields up */ +#define LSR_LCNT_MASK 0xFFFFFFFF +#define LSR_LDMC_MASK 0x1 +#define LSR_LDMC_SHIFT 39 +#define LSR_ECNT_MASK 0x1f +#define LSR_ECNT_SHIFT 32 +#define LSR_PCNT_MASK 0xf +#define LSR_PCNT_SHIFT 48 +#define LSR_VLC_MASK 0x1 +#define LSR_VLC_SHIFT 37 + +#define get_lcnt(reg) (reg & LSR_LCNT_MASK) +#define get_ldmc(reg) ((reg >> LSR_LDMC_SHIFT) & LSR_LDMC_MASK) +#define get_ecnt(reg) ((reg >> LSR_ECNT_SHIFT) & LSR_ECNT_MASK) +#define get_pcnt(reg) ((reg >> LSR_PCNT_SHIFT) & LSR_PCNT_MASK) +#define get_vlc(reg) ((reg >> LSR_VLC_SHIFT) & LSR_VLC_MASK) + +static inline void +native_get_array_descriptors_v2(e2k_aau_t *context) +{ + NATIVE_GET_ARRAY_DESCRIPTORS_V2(context); +} +static inline void +native_get_array_descriptors_v5(e2k_aau_t *context) +{ + NATIVE_GET_ARRAY_DESCRIPTORS_V5(context); +} + +static __always_inline void +native_set_array_descriptors(const e2k_aau_t *context) +{ + NATIVE_SET_ARRAY_DESCRIPTORS(context); +} + +static inline void +native_get_synchronous_part_v2(e2k_aau_t *context) +{ + NATIVE_GET_SYNCHRONOUS_PART_V2(context); +} +static inline void +native_get_synchronous_part_v5(e2k_aau_t *context) +{ + NATIVE_GET_SYNCHRONOUS_PART_V5(context); +} + +static __always_inline void +native_set_synchronous_part(const e2k_aau_t *context) +{ + NATIVE_SET_SYNCHRONOUS_PART(context); +} + +static inline void +native_set_all_aaldis(const u64 aaldis[]) +{ + NATIVE_SET_ALL_AALDIS(aaldis); +} + +static inline void +native_set_all_aaldas(const e2k_aalda_t aaldas_p[]) +{ +#ifndef __LITTLE_ENDIAN +# error This loads must be little endian to not mix aaldas up (and the same goes to SAVE_AALDA) +#endif + NATIVE_SET_ALL_AALDAS(aaldas_p); +} + +/* set current array prefetch buffer indices values */ +static __always_inline void native_set_aau_aaldis_aaldas( + const struct thread_info *ti, const e2k_aau_t *aau_regs) +{ + native_set_all_aaldis(aau_regs->aaldi); + native_set_all_aaldas(ti->aalda); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +static inline void +native_get_aau_context_v2(e2k_aau_t *context) +{ + NATIVE_GET_AAU_CONTEXT_V2(context); +} +static inline void +native_get_aau_context_v5(e2k_aau_t *context) +{ + NATIVE_GET_AAU_CONTEXT_V5(context); +} + +/* + * It's taken that comparison with aasr.iab was taken and assr + * will be set later. + */ +static __always_inline void +native_set_aau_context(e2k_aau_t *context) +{ + NATIVE_SET_AAU_CONTEXT(context); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel without paravirtualization */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native kernel without virtualization */ +/* or native host kernel with virtualization support */ + +#define GET_ARRAY_DESCRIPTORS_V2(aau_context) \ +({ \ + native_get_array_descriptors_v2(aau_context); \ +}) +#define GET_ARRAY_DESCRIPTORS_V5(aau_context) \ +({ \ + native_get_array_descriptors_v5(aau_context); \ +}) +#define GET_SYNCHRONOUS_PART_V2(aau_context) \ +({ \ + native_get_synchronous_part_v2(aau_context); \ +}) +#define GET_SYNCHRONOUS_PART_V5(aau_context) \ +({ \ + native_get_synchronous_part_v5(aau_context); \ +}) + +#define GET_AAU_CONTEXT_V2(cntx) native_get_aau_context_v2(cntx) +#define GET_AAU_CONTEXT_V5(cntx) native_get_aau_context_v5(cntx) + +#define SAVE_AAU_MASK_REGS(aau_context, aasr) \ + NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr) + +#define RESTORE_AAU_MASK_REGS(aau_context) \ + NATIVE_RESTORE_AAU_MASK_REGS(aau_context) + +#define SAVE_AADS(aau_regs) \ + NATIVE_SAVE_AADS(aau_regs) + +#define RESTORE_AADS(aau_regs) \ + NATIVE_RESTORE_AADS(aau_regs) + + +#define SAVE_AALDIS_V2(regs) NATIVE_SAVE_AALDIS_V2(regs) +#define SAVE_AALDIS_V5(regs) NATIVE_SAVE_AALDIS_V5(regs) + +#define SAVE_AALDA(aaldas) \ +({ \ + register u32 aalda0, aalda4, aalda8, aalda12, \ + aalda16, aalda20, aalda24, aalda28, \ + aalda32, aalda36, aalda40, aalda44, \ + aalda48, aalda52, aalda56, aalda60; \ + \ + NATIVE_GET_AAU_AALDA(aalda0, aalda32, aalda0); \ + NATIVE_GET_AAU_AALDA(aalda4, aalda36, aalda4); \ + NATIVE_GET_AAU_AALDA(aalda8, aalda40, aalda8); \ + NATIVE_GET_AAU_AALDA(aalda12, aalda44, aalda12); \ + NATIVE_GET_AAU_AALDA(aalda16, aalda48, aalda16); \ + NATIVE_GET_AAU_AALDA(aalda20, aalda52, aalda20); \ + NATIVE_GET_AAU_AALDA(aalda24, aalda56, aalda24); \ + NATIVE_GET_AAU_AALDA(aalda28, aalda60, aalda28); \ + *(u32 *) (&aaldas[0]) = aalda0; \ + *(u32 *) (&aaldas[4]) = aalda4; \ + *(u32 *) (&aaldas[8]) = aalda8; \ + *(u32 *) (&aaldas[12]) = aalda12; \ + *(u32 *) (&aaldas[16]) = aalda16; \ + *(u32 *) (&aaldas[20]) = aalda20; \ + *(u32 *) (&aaldas[24]) = aalda24; \ + *(u32 *) (&aaldas[28]) = aalda28; \ + *(u32 *) (&aaldas[32]) = aalda32; \ + *(u32 *) (&aaldas[36]) = aalda36; \ + *(u32 *) (&aaldas[40]) = aalda40; \ + *(u32 *) (&aaldas[44]) = aalda44; \ + *(u32 *) (&aaldas[48]) = aalda48; \ + *(u32 *) (&aaldas[52]) = aalda52; \ + *(u32 *) (&aaldas[56]) = aalda56; \ + *(u32 *) (&aaldas[60]) = aalda60; \ +}) + +#define SAVE_AAFSTR(regs) \ +({ \ + regs = native_read_aafstr_reg_value(); \ +}) + +static __always_inline void +set_aau_context(e2k_aau_t *context) +{ + native_set_aau_context(context); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +/* + * for code optimization + */ +static inline int aau_working(e2k_aau_t *context) +{ + e2k_aasr_t aasr = context->aasr; + + return unlikely(AW(aasr) & (AAU_AASR_IAB | AAU_AASR_STB)); +} + +#endif /* _E2K_AAU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/aau_regs.h b/arch/e2k/include/asm/aau_regs.h new file mode 100644 index 000000000000..8f9ff0889e13 --- /dev/null +++ b/arch/e2k/include/asm/aau_regs.h @@ -0,0 +1,24 @@ +/* + * AAU registers description, macroses for load/store AAU context + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_AAU_H_ +#define _E2K_AAU_H_ + +#include +#include +#include + +#endif /* _E2K_AAU_H_ */ diff --git a/arch/e2k/include/asm/aau_regs_access.h b/arch/e2k/include/asm/aau_regs_access.h new file mode 100644 index 000000000000..78e23553d19f --- /dev/null +++ b/arch/e2k/include/asm/aau_regs_access.h @@ -0,0 +1,685 @@ +/* + * AAU registers description, macroses for load/store AAU context + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_AAU_REGS_ACCESS_H_ +#define _E2K_AAU_REGS_ACCESS_H_ + +#include +#include + +#include + +/* + * see comment about of PREFIX_ at top of arch/e2k/include/regs_state.h + * + additional parameter: + * pv_type argument in macroses is same as prefix but by small letter + * and can be: + * native native kernel with or without virtualization support + * kvm guest kernel (can be run only as paravirtualized + * guest kernel) + * pv paravirtualized kernel (can be run as host and as guest + * paravirtualized kernels) + */ + +#define PREFIX_SAVE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context, aasr) \ +({ \ + if (unlikely(AAU_ACTIVE(aasr))) { \ + /* As it turns out AAU can be in ACTIVE state \ + * in interrupt handler (bug 53227 comment 28 \ + * and bug 53227 comment 36). \ + * The hardware stops AAU automatically but \ + * the value to be written should be corrected \ + * to "stopped" so that the "DONE" instruction \ + * works as expected. \ + */ \ + AS(aasr).lds = AASR_STOPPED; \ + } \ + (aau_context)->aasr = aasr; \ + if (unlikely(AAU_STOPPED(aasr))) { \ + pv_type##_read_aaldv_reg(&(aau_context)->aaldv); \ + pv_type##_read_aaldm_reg(&(aau_context)->aaldm); \ + } else { \ + AW((aau_context)->aaldv) = 0; \ + AW((aau_context)->aaldm) = 0; \ + } \ +}) + +#define NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr) \ + PREFIX_SAVE_AAU_MASK_REGS(NATIVE, native, aau_context, aasr) + +#define PREFIX_RESTORE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context) \ +({ \ + pv_type##_write_aafstr_reg_value(0); \ + pv_type##_write_aaldm_reg(&(aau_context)->aaldm); \ + pv_type##_write_aaldv_reg(&(aau_context)->aaldv); \ + /* aasr can be in 'ACTIVE' state, so we set it last */ \ + pv_type##_write_aasr_reg((aau_context)->aasr); \ +}) + +#define NATIVE_RESTORE_AAU_MASK_REGS(aau_context) \ + PREFIX_RESTORE_AAU_MASK_REGS(NATIVE, native, aau_context) + +#define PREFIX_SAVE_AADS(PV_TYPE, pv_type, aau_regs) \ +({ \ + register e2k_aadj_t *aads = (aau_regs)->aads; \ + \ + pv_type##_read_aads_4_reg(0, &aads[0]); \ + pv_type##_read_aads_4_reg(4, &aads[4]); \ + pv_type##_read_aads_4_reg(8, &aads[8]); \ + pv_type##_read_aads_4_reg(12, &aads[12]); \ + pv_type##_read_aads_4_reg(16, &aads[16]); \ + pv_type##_read_aads_4_reg(20, &aads[20]); \ + pv_type##_read_aads_4_reg(24, &aads[24]); \ + pv_type##_read_aads_4_reg(28, &aads[28]); \ +}) + +#define NATIVE_SAVE_AADS(aau_regs) \ + PREFIX_SAVE_AADS(NATIVE, native, aau_regs) + +#define PREFIX_RESTORE_AADS(PV_TYPE, pv_type, aau_regs) \ +({ \ + register e2k_aadj_t *aads = (aau_regs)->aads; \ + \ + pv_type##_write_aads_4_reg(0, &aads[0]); \ + pv_type##_write_aads_4_reg(4, &aads[4]); \ + pv_type##_write_aads_4_reg(8, &aads[8]); \ + pv_type##_write_aads_4_reg(12, &aads[12]); \ + pv_type##_write_aads_4_reg(16, &aads[16]); \ + pv_type##_write_aads_4_reg(20, &aads[20]); \ + pv_type##_write_aads_4_reg(24, &aads[24]); \ + pv_type##_write_aads_4_reg(28, &aads[28]); \ +}) + +#define NATIVE_RESTORE_AADS(aau_regs) \ + PREFIX_RESTORE_AADS(NATIVE, native, aau_regs) + +#define PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, ISET, iset, regs) \ +({ \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(0, regs[0], regs[32]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(1, regs[1], regs[33]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(2, regs[2], regs[34]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(3, regs[3], regs[35]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(4, regs[4], regs[36]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(5, regs[5], regs[37]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(6, regs[6], regs[38]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(7, regs[7], regs[39]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(8, regs[8], regs[40]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(9, regs[9], regs[41]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(10, regs[10], regs[42]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(11, regs[11], regs[43]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(12, regs[12], regs[44]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(13, regs[13], regs[45]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(14, regs[14], regs[46]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(15, regs[15], regs[47]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(16, regs[16], regs[48]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(17, regs[17], regs[49]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(18, regs[18], regs[50]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(19, regs[19], regs[51]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(20, regs[20], regs[52]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(21, regs[21], regs[53]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(22, regs[22], regs[54]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(23, regs[23], regs[55]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(24, regs[24], regs[56]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(25, regs[25], regs[57]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(26, regs[26], regs[58]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(27, regs[27], regs[59]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(28, regs[28], regs[60]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(29, regs[29], regs[61]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(30, regs[30], regs[62]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(31, regs[31], regs[63]); \ +}) +#define PREFIX_SAVE_AALDIS_V2(PV_TYPE, pv_type, regs) \ + PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V2, v2, regs) +#define PREFIX_SAVE_AALDIS_V5(PV_TYPE, pv_type, regs) \ + PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V5, v5, regs) + +#define NATIVE_SAVE_AALDIS_V2(regs) \ + PREFIX_SAVE_AALDIS_V2(NATIVE, native, regs) +#define NATIVE_SAVE_AALDIS_V5(regs) \ + PREFIX_SAVE_AALDIS_V5(NATIVE, native, regs) +#define NATIVE_SAVE_AALDIS(regs) \ +({ \ + if (IS_AAU_ISET_V5()) { \ + NATIVE_SAVE_AALDIS_V5(regs); \ + } else if (IS_AAU_ISET_V2()) { \ + NATIVE_SAVE_AALDIS_V2(regs); \ + } else if (IS_AAU_ISET_GENERIC()) { \ + machine.save_aaldi(regs); \ + } else { \ + BUILD_BUG_ON(true); \ + } \ +}) + +#define PREFIX_GET_ARRAY_DESCRIPTORS_V2(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aainds = (aau_context)->aainds; \ + u64 *const aaincrs = (aau_context)->aaincrs; \ + \ + /* \ + * get AAINDs, omit the AAIND0 saving since it has predefined 0 \ + * value \ + */ \ + { \ + register u32 ind1, ind2, ind3, ind4, \ + ind5, ind6, ind7, ind8, \ + ind9, ind10, ind11, ind12, \ + ind13, ind14, ind15; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(1, ind1, ind2); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(3, ind3, ind4); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(5, ind5, ind6); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(7, ind7, ind8); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(9, ind9, ind10); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(11, ind11, ind12); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(13, ind13, ind14); \ + PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V2(ind15, tags); \ + aainds[0] = 0; \ + aainds[1] = ind1; \ + aainds[2] = ind2; \ + aainds[3] = ind3; \ + aainds[4] = ind4; \ + aainds[5] = ind5; \ + aainds[6] = ind6; \ + aainds[7] = ind7; \ + aainds[8] = ind8; \ + aainds[9] = ind9; \ + aainds[10] = ind10; \ + aainds[11] = ind11; \ + aainds[12] = ind12; \ + aainds[13] = ind13; \ + aainds[14] = ind14; \ + aainds[15] = ind15; \ + context->aaind_tags = tags; \ + } \ + \ + /* \ + * get AAINCRs, omit the AAINCR0 saving since it has predefined 1 \ + * value \ + */ \ + { \ + register u32 incr1, incr2, incr3, incr4, \ + incr5, incr6, incr7; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(1, incr1, incr2); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(3, incr3, incr4); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(5, incr5, incr6); \ + PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(incr7, tags); \ + aaincrs[0] = 1; \ + aaincrs[1] = (s64) (s32) incr1; \ + aaincrs[2] = (s64) (s32) incr2; \ + aaincrs[3] = (s64) (s32) incr3; \ + aaincrs[4] = (s64) (s32) incr4; \ + aaincrs[5] = (s64) (s32) incr5; \ + aaincrs[6] = (s64) (s32) incr6; \ + aaincrs[7] = (s64) (s32) incr7; \ + context->aaincr_tags = tags; \ + } \ +}) +#define NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context) \ + PREFIX_GET_ARRAY_DESCRIPTORS_V2(NATIVE, native, aau_context) + +#define PREFIX_GET_ARRAY_DESCRIPTORS_V5(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aainds = (aau_context)->aainds; \ + u64 *const aaincrs = (aau_context)->aaincrs; \ + \ + /* \ + * get AAINDs, omit the AAIND0 saving since it has predefined 0 \ + * value \ + */ \ + { \ + register u64 ind1, ind2, ind3, ind4, \ + ind5, ind6, ind7, ind8, \ + ind9, ind10, ind11, ind12, \ + ind13, ind14, ind15; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(1, ind1, ind2); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(3, ind3, ind4); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(5, ind5, ind6); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(7, ind7, ind8); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(9, ind9, ind10); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(11, ind11, ind12); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(13, ind13, ind14); \ + PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V5(ind15, tags); \ + aainds[0] = 0; \ + aainds[1] = ind1; \ + aainds[2] = ind2; \ + aainds[3] = ind3; \ + aainds[4] = ind4; \ + aainds[5] = ind5; \ + aainds[6] = ind6; \ + aainds[7] = ind7; \ + aainds[8] = ind8; \ + aainds[9] = ind9; \ + aainds[10] = ind10; \ + aainds[11] = ind11; \ + aainds[12] = ind12; \ + aainds[13] = ind13; \ + aainds[14] = ind14; \ + aainds[15] = ind15; \ + context->aaind_tags = tags; \ + } \ + \ + /* \ + * get AAINCRs, omit the AAINCR0 saving since it has predefined 1 \ + * value \ + */ \ + { \ + register u64 incr1, incr2, incr3, incr4, \ + incr5, incr6, incr7; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(1, incr1, incr2); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(3, incr3, incr4); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(5, incr5, incr6); \ + PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(incr7, tags); \ + aaincrs[0] = 1; \ + aaincrs[1] = incr1; \ + aaincrs[2] = incr2; \ + aaincrs[3] = incr3; \ + aaincrs[4] = incr4; \ + aaincrs[5] = incr5; \ + aaincrs[6] = incr6; \ + aaincrs[7] = incr7; \ + context->aaincr_tags = tags; \ + } \ +}) +#define NATIVE_GET_ARRAY_DESCRIPTORS_V5(aau_context) \ + PREFIX_GET_ARRAY_DESCRIPTORS_V5(NATIVE, native, aau_context) + +#define PREFIX_SET_ARRAY_DESCRIPTORS(PV_TYPE, pv_type, aau_context) \ +({ \ + const e2k_aau_t *const aau = (aau_context); \ + const u64 *const aainds = aau->aainds; \ + const u64 *const aaincrs = aau->aaincrs; \ + \ + /* \ + * set AAINDs, omit the AAIND0 restoring since \ + * it has predefined 0 value. \ + */ \ + pv_type##_write_aainds_pair_value(1, aainds[1], aainds[2]); \ + pv_type##_write_aainds_pair_value(3, aainds[3], aainds[4]); \ + pv_type##_write_aainds_pair_value(5, aainds[5], aainds[6]); \ + pv_type##_write_aainds_pair_value(7, aainds[7], aainds[8]); \ + pv_type##_write_aainds_pair_value(9, aainds[9], aainds[10]); \ + pv_type##_write_aainds_pair_value(11, aainds[11], aainds[12]); \ + pv_type##_write_aainds_pair_value(13, aainds[13], aainds[14]); \ + pv_type##_write_aaind_reg_value(15, aainds[15]); \ + \ + /* \ + * set AAINCRs, omit the AAINCR0 restoring since \ + * it has predefined 1 value. \ + */ \ + pv_type##_write_aaincrs_pair_value(1, aaincrs[1], aaincrs[2]); \ + pv_type##_write_aaincrs_pair_value(3, aaincrs[3], aaincrs[4]); \ + pv_type##_write_aaincrs_pair_value(5, aaincrs[5], aaincrs[6]); \ + pv_type##_write_aaincr_reg_value(7, aaincrs[7]); \ + \ + /* Set TAGS */ \ + PV_TYPE##_SET_AAU_AAIND_AAINCR_TAGS(aau->aaind_tags, aau->aaincr_tags); \ +}) +#define NATIVE_SET_ARRAY_DESCRIPTORS(aau_context) \ + PREFIX_SET_ARRAY_DESCRIPTORS(NATIVE, native, aau_context) + +#define PREFIX_GET_SYNCHRONOUS_PART_V2(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aastis = (aau_context)->aastis; \ + register u32 sti0, sti1, sti2, sti3, \ + sti4, sti5, sti6, sti7, \ + sti8, sti9, sti10, sti11, \ + sti12, sti13, sti14, sti15; \ + \ + /* get AASTIs */ \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(0, sti0, sti1); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(2, sti2, sti3); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(4, sti4, sti5); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(6, sti6, sti7); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(8, sti8, sti9); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(10, sti10, sti11); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(12, sti12, sti13); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(14, sti14, sti15); \ + \ + aastis[0] = sti0; \ + aastis[1] = sti1; \ + aastis[2] = sti2; \ + aastis[3] = sti3; \ + aastis[4] = sti4; \ + aastis[5] = sti5; \ + aastis[6] = sti6; \ + aastis[7] = sti7; \ + aastis[8] = sti8; \ + aastis[9] = sti9; \ + aastis[10] = sti10; \ + aastis[11] = sti11; \ + aastis[12] = sti12; \ + aastis[13] = sti13; \ + aastis[14] = sti14; \ + aastis[15] = sti15; \ + (aau_context)->aasti_tags = \ + pv_type##_read_aasti_tags_reg_value(); \ +}) + +#define PREFIX_GET_SYNCHRONOUS_PART_V5(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aastis = (aau_context)->aastis; \ + register u64 sti0, sti1, sti2, sti3, \ + sti4, sti5, sti6, sti7, \ + sti8, sti9, sti10, sti11, \ + sti12, sti13, sti14, sti15; \ + \ + /* get AASTIs */ \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(0, sti0, sti1); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(2, sti2, sti3); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(4, sti4, sti5); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(6, sti6, sti7); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(8, sti8, sti9); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(10, sti10, sti11); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(12, sti12, sti13); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(14, sti14, sti15); \ + \ + aastis[0] = sti0; \ + aastis[1] = sti1; \ + aastis[2] = sti2; \ + aastis[3] = sti3; \ + aastis[4] = sti4; \ + aastis[5] = sti5; \ + aastis[6] = sti6; \ + aastis[7] = sti7; \ + aastis[8] = sti8; \ + aastis[9] = sti9; \ + aastis[10] = sti10; \ + aastis[11] = sti11; \ + aastis[12] = sti12; \ + aastis[13] = sti13; \ + aastis[14] = sti14; \ + aastis[15] = sti15; \ + (aau_context)->aasti_tags = \ + pv_type##_read_aasti_tags_reg_value(); \ +}) +#define NATIVE_GET_SYNCHRONOUS_PART_V2(aau_context) \ + PREFIX_GET_SYNCHRONOUS_PART_V2(NATIVE, native, aau_context) +#define NATIVE_GET_SYNCHRONOUS_PART_V5(aau_context) \ + PREFIX_GET_SYNCHRONOUS_PART_V5(NATIVE, native, aau_context) + +#define PREFIX_SET_SYNCHRONOUS_PART(PV_TYPE, pv_type, aau_context) \ +({ \ + const u64 *const aastis = (aau_context)->aastis; \ + \ + /* set AASTIs */ \ + pv_type##_write_aastis_pair_value(0, aastis[0], aastis[1]); \ + pv_type##_write_aastis_pair_value(2, aastis[2], aastis[3]); \ + pv_type##_write_aastis_pair_value(4, aastis[4], aastis[5]); \ + pv_type##_write_aastis_pair_value(6, aastis[6], aastis[7]); \ + pv_type##_write_aastis_pair_value(8, aastis[8], aastis[9]); \ + pv_type##_write_aastis_pair_value(10, aastis[10], aastis[11]); \ + pv_type##_write_aastis_pair_value(12, aastis[12], aastis[13]); \ + pv_type##_write_aastis_pair_value(14, aastis[14], aastis[15]); \ + pv_type##_write_aasti_tags_reg_value((aau_context)->aasti_tags); \ +}) +#define NATIVE_SET_SYNCHRONOUS_PART(aau_context) \ + PREFIX_SET_SYNCHRONOUS_PART(NATIVE, native, aau_context) + +#define PREFIX_SET_ALL_AALDIS(PV_TYPE, pv_type, aaldis) \ +({ \ + pv_type##_write_aaldi_reg_value(0, aaldis[0], aaldis[32]); \ + pv_type##_write_aaldi_reg_value(1, aaldis[1], aaldis[33]); \ + pv_type##_write_aaldi_reg_value(2, aaldis[2], aaldis[34]); \ + pv_type##_write_aaldi_reg_value(3, aaldis[3], aaldis[35]); \ + pv_type##_write_aaldi_reg_value(4, aaldis[4], aaldis[36]); \ + pv_type##_write_aaldi_reg_value(5, aaldis[5], aaldis[37]); \ + pv_type##_write_aaldi_reg_value(6, aaldis[6], aaldis[38]); \ + pv_type##_write_aaldi_reg_value(7, aaldis[7], aaldis[39]); \ + pv_type##_write_aaldi_reg_value(8, aaldis[8], aaldis[40]); \ + pv_type##_write_aaldi_reg_value(9, aaldis[9], aaldis[41]); \ + pv_type##_write_aaldi_reg_value(10, aaldis[10], aaldis[42]); \ + pv_type##_write_aaldi_reg_value(11, aaldis[11], aaldis[43]); \ + pv_type##_write_aaldi_reg_value(12, aaldis[12], aaldis[44]); \ + pv_type##_write_aaldi_reg_value(13, aaldis[13], aaldis[45]); \ + pv_type##_write_aaldi_reg_value(14, aaldis[14], aaldis[46]); \ + pv_type##_write_aaldi_reg_value(15, aaldis[15], aaldis[47]); \ + pv_type##_write_aaldi_reg_value(16, aaldis[16], aaldis[48]); \ + pv_type##_write_aaldi_reg_value(17, aaldis[17], aaldis[49]); \ + pv_type##_write_aaldi_reg_value(18, aaldis[18], aaldis[50]); \ + pv_type##_write_aaldi_reg_value(19, aaldis[19], aaldis[51]); \ + pv_type##_write_aaldi_reg_value(20, aaldis[20], aaldis[52]); \ + pv_type##_write_aaldi_reg_value(21, aaldis[21], aaldis[53]); \ + pv_type##_write_aaldi_reg_value(22, aaldis[22], aaldis[54]); \ + pv_type##_write_aaldi_reg_value(23, aaldis[23], aaldis[55]); \ + pv_type##_write_aaldi_reg_value(24, aaldis[24], aaldis[56]); \ + pv_type##_write_aaldi_reg_value(25, aaldis[25], aaldis[57]); \ + pv_type##_write_aaldi_reg_value(26, aaldis[26], aaldis[58]); \ + pv_type##_write_aaldi_reg_value(27, aaldis[27], aaldis[59]); \ + pv_type##_write_aaldi_reg_value(28, aaldis[28], aaldis[60]); \ + pv_type##_write_aaldi_reg_value(29, aaldis[29], aaldis[61]); \ + pv_type##_write_aaldi_reg_value(30, aaldis[30], aaldis[62]); \ + pv_type##_write_aaldi_reg_value(31, aaldis[31], aaldis[63]); \ +}) +#define NATIVE_SET_ALL_AALDIS(aaldis) \ + PREFIX_SET_ALL_AALDIS(NATIVE, native, aaldis) + +#define PREFIX_SET_ALL_AALDAS(PV_TYPE, pv_type, aaldas_p) \ +({ \ + register u32 *aaldas = (u32 *)(aaldas_p); \ + \ + pv_type##_write_aaldas_reg_value(0, aaldas[0], aaldas[8]); \ + pv_type##_write_aaldas_reg_value(4, aaldas[1], aaldas[9]); \ + pv_type##_write_aaldas_reg_value(8, aaldas[2], aaldas[10]); \ + pv_type##_write_aaldas_reg_value(12, aaldas[3], aaldas[11]); \ + pv_type##_write_aaldas_reg_value(16, aaldas[4], aaldas[12]); \ + pv_type##_write_aaldas_reg_value(20, aaldas[5], aaldas[13]); \ + pv_type##_write_aaldas_reg_value(24, aaldas[6], aaldas[14]); \ + pv_type##_write_aaldas_reg_value(28, aaldas[7], aaldas[15]); \ +}) + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +#define PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, ISET, iset, aau_context) \ +({ \ + /* get registers, which describe arrays in APB operations */ \ + e2k_aasr_t aasr = (aau_context)->aasr; \ + \ + /* get descriptors & auxiliary registers */ \ + if (AS(aasr).iab) \ + PV_TYPE##_GET_ARRAY_DESCRIPTORS_##ISET(aau_context); \ + \ + /* get synchronous part of APB */ \ + if (AS(aasr).stb) \ + PV_TYPE##_GET_SYNCHRONOUS_PART_##ISET(aau_context); \ +}) +#define PREFIX_GET_AAU_CONTEXT_V2(PV_TYPE, pv_type, aau_context) \ + PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V2, v2, aau_context) +#define PREFIX_GET_AAU_CONTEXT_V5(PV_TYPE, pv_type, aau_context) \ + PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V5, v5, aau_context) +#define NATIVE_GET_AAU_CONTEXT_V2(aau_context) \ + PREFIX_GET_AAU_CONTEXT_V2(NATIVE, native, aau_context) +#define NATIVE_GET_AAU_CONTEXT_V5(aau_context) \ + PREFIX_GET_AAU_CONTEXT_V5(NATIVE, native, aau_context) +#define NATIVE_GET_AAU_CONTEXT(aau_context) \ +({ \ + if (IS_AAU_ISET_V5()) { \ + NATIVE_GET_AAU_CONTEXT_V5(aau_context); \ + } else if (IS_AAU_ISET_V2()) { \ + NATIVE_GET_AAU_CONTEXT_V2(aau_context); \ + } else if (IS_AAU_ISET_GENERIC()) { \ + machine.get_aau_context(aau_context); \ + } else { \ + BUILD_BUG_ON(true); \ + } \ +}) + +/* + * It's taken that comparison with aasr.iab was taken and assr + * will be set later. + */ +#define PREFIX_SET_AAU_CONTEXT(PV_TYPE, pv_type, aau_context) \ +do { \ + const e2k_aau_t *const aau = (aau_context); \ + /* retrieve common APB status register */\ + e2k_aasr_t aasr = aau->aasr; \ + \ + /* prefetch data to restore */ \ + if (AS(aasr).stb) \ + prefetch_nospec_range(aau->aastis, sizeof(aau->aastis) + \ + sizeof(aau->aasti_tags)); \ + if (AS(aasr).iab) \ + prefetch_nospec_range(aau->aainds, sizeof(aau->aainds) + \ + sizeof(aau->aaind_tags) + sizeof(aau->aaincrs) + \ + sizeof(aau->aaincr_tags) + sizeof(aau->aads)); \ + if (AAU_STOPPED(aasr)) \ + prefetch_nospec_range(aau->aaldi, sizeof(aau->aaldi)); \ + \ + /* Make sure prefetches are issued */ \ + barrier(); \ + \ + /* set synchronous part of APB */ \ + if (AS(aasr).stb) \ + pv_type##_set_synchronous_part(aau); \ + \ + /* set descriptors & auxiliary registers */ \ + if (AS(aasr).iab) \ + pv_type##_set_array_descriptors(aau); \ +} while (0) +#define NATIVE_SET_AAU_CONTEXT(aau_context) \ + PREFIX_SET_AAU_CONTEXT(NATIVE, native, aau_context) + +#define PREFIX_SAVE_AALDAS(PV_TYPE, pv_type, aaldas_p) \ +({ \ + register u32 *aaldas = (u32 *)aaldas_p; \ + \ + pv_type##_read_aaldas_reg_value(0, &aaldas[0], &aaldas[8]); \ + pv_type##_read_aaldas_reg_value(4, &aaldas[1], &aaldas[9]); \ + pv_type##_read_aaldas_reg_value(8, &aaldas[2], &aaldas[10]); \ + pv_type##_read_aaldas_reg_value(12, &aaldas[3], &aaldas[11]); \ + pv_type##_read_aaldas_reg_value(16, &aaldas[4], &aaldas[12]); \ + pv_type##_read_aaldas_reg_value(20, &aaldas[5], &aaldas[13]); \ + pv_type##_read_aaldas_reg_value(24, &aaldas[6], &aaldas[14]); \ + pv_type##_read_aaldas_reg_value(28, &aaldas[7], &aaldas[15]); \ +}) + +#define NATIVE_SAVE_AALDAS(aaldas_p) \ + PREFIX_SAVE_AALDAS(NATIVE, native, aaldas_p) + +#define PREFIX_SAVE_AAFSTR(PV_TYPE, pv_type, aau_context) \ +({ \ + (aau_context)->aafstr = pv_type##_read_aafstr_reg_value(); \ +}) + +#define NATIVE_SAVE_AAFSTR(aau_context) \ + PREFIX_SAVE_AAFSTR(NATIVE, native, aau_context) + +#define PREFIX_SAVE_AAU_REGS_FOR_PTRACE(PV_TYPE, pv_type, pt_regs, ti) \ +({ \ + e2k_aau_t *__aau_context = (pt_regs)->aau_context; \ + if (__aau_context) { \ + if (machine.native_iset_ver < E2K_ISET_V6) \ + PV_TYPE##_SAVE_AALDIS(__aau_context->aaldi); \ + PV_TYPE##_SAVE_AALDAS(ti->aalda); \ + PV_TYPE##_SAVE_AAFSTR(__aau_context); \ + } \ +}) + +#define NATIVE_SAVE_AAU_REGS_FOR_PTRACE(pt_regs, ti) \ + PREFIX_SAVE_AAU_REGS_FOR_PTRACE(NATIVE, native, pt_regs, ti) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel without paravirtualization */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native kernel without virtualization */ +/* or native host kernel with virtualization support */ + +static __always_inline u32 read_aasr_reg_value(void) +{ + return native_read_aasr_reg_value(); +} +static __always_inline void write_aasr_reg_value(u32 reg_value) +{ + native_write_aasr_reg_value(reg_value); +} +static inline u32 read_aafstr_reg_value(void) +{ + return native_read_aafstr_reg_value(); +} +static inline void write_aafstr_reg_value(u32 reg_value) +{ + native_write_aafstr_reg_value(reg_value); +} + +static __always_inline e2k_aasr_t read_aasr_reg(void) +{ + return native_read_aasr_reg(); +} +static __always_inline void write_aasr_reg(e2k_aasr_t aasr) +{ + native_write_aasr_reg(aasr); +} +static inline void read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_read_aaldm_reg(aaldm); +} +static inline void write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_write_aaldm_reg(aaldm); +} +static inline void read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_read_aaldv_reg(aaldv); +} +static inline void write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_write_aaldv_reg(aaldv); +} + +#define clear_apb() native_clear_apb() + +#ifdef CONFIG_USE_AAU +# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) \ + NATIVE_SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) +#else +# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) +#endif + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#define SWITCH_GUEST_AAU_AASR(aasr, aau_context, do_switch) \ +({ \ + if (do_switch) { \ + e2k_aasr_t aasr_worst_case; \ + AW(aasr_worst_case) = 0; \ + AS(aasr_worst_case).stb = 1; \ + AS(aasr_worst_case).iab = 1; \ + AS(aasr_worst_case).lds = AASR_STOPPED; \ + (aau_context)->guest_aasr = *(aasr); \ + *(aasr) = aasr_worst_case; \ + } \ +}) + +#define RESTORE_GUEST_AAU_AASR(aau_context, do_restore) \ +({ \ + if (do_restore) { \ + (aau_context)->aasr = (aau_context)->guest_aasr; \ + } \ +}) + +#endif /* _E2K_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/aau_regs_types.h b/arch/e2k/include/asm/aau_regs_types.h new file mode 100644 index 000000000000..f53611700cb4 --- /dev/null +++ b/arch/e2k/include/asm/aau_regs_types.h @@ -0,0 +1,178 @@ +/* + * AAU registers structures description + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_AAU_REGS_TYPES_H_ +#define _E2K_AAU_REGS_TYPES_H_ + +#include +#include + +#if CONFIG_CPU_ISET >= 5 +# define IS_AAU_ISET_V5() true +# define IS_AAU_ISET_V2() false +# define IS_AAU_ISET_GENERIC() false +#elif CONFIG_CPU_ISET >= 1 +# define IS_AAU_ISET_V2() true +# define IS_AAU_ISET_V5() false +# define IS_AAU_ISET_GENERIC() false +#elif CONFIG_CPU_ISET == 0 +# define IS_AAU_ISET_GENERIC() true +# define IS_AAU_ISET_V2() false +# define IS_AAU_ISET_V5() false +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, IS_AAU_ISET_Vx is defined dinamicaly" +# define IS_AAU_ISET_GENERIC() true +# define IS_AAU_ISET_V2() false +# define IS_AAU_ISET_V5() false +#endif /* CONFIG_CPU_ISET 0-6 */ + +/* Values for AASR.lds */ +enum { + AASR_NULL = 0, + AASR_READY = 1, + AASR_ACTIVE = 3, + AASR_STOPPED = 5 +}; +#define AAU_AASR_STB 0x20 +#define AAU_AASR_IAB 0x40 +typedef struct e2k_aasr_fields { + u32 reserved : 5; /* [4:0] */ + u32 stb : 1; /* [5:5] */ + u32 iab : 1; /* [6:6] */ + u32 lds : 3; /* [9:7] */ +} e2k_aasr_fields_t; +typedef union e2k_aasr { /* aadj quad-word */ + e2k_aasr_fields_t fields; + u32 word; +} e2k_aasr_t; + +/* Check up AAU state */ +#define AAU_NULL(aasr) (AS(aasr).lds == AASR_NULL) +#define AAU_READY(aasr) (AS(aasr).lds == AASR_READY) +#define AAU_ACTIVE(aasr) (AS(aasr).lds == AASR_ACTIVE) +#define AAU_STOPPED(aasr) (AS(aasr).lds == AASR_STOPPED) + +typedef u32 e2k_aafstr_t; + +/* Values for AAD.tag */ +enum { + AAD_AAUNV = 0, + AAD_AAUDT = 1, + AAD_AAUET = 2, + AAD_AAUAP = 4, + AAD_AAUSAP = 5, + AAD_AAUDS = 6 +}; + +/* We are not using AAD SAP format here + * so it is not described in the structure */ +typedef union e2k_aadj_lo_fields { + struct { + u64 ap_base : E2K_VA_SIZE; /* [E2K_VA_MSB:0] */ + u64 unused1 : 53 - E2K_VA_MSB; /* [53:48] */ + u64 tag : 3; /* [56:54] */ + u64 mb : 1; /* [57] */ + u64 ed : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 unused2 : 3; /* [63:60] */ + }; + struct { + u64 sap_base : 32; + u64 psl : 16; + u64 __pad : 16; + }; +} e2k_aadj_lo_fields_t; +typedef struct e2k_aadj_hi_fields { + u64 unused : 32; + u64 size : 32; /* [63:32] */ +} e2k_aadj_hi_fields_t; +typedef union e2k_aadj { /* aadj quad-word */ + struct { + e2k_aadj_lo_fields_t lo; + e2k_aadj_hi_fields_t hi; + } fields; + struct { + u64 lo; + u64 hi; + } word; +} e2k_aadj_t; + +/* Possible values for aalda.exc field */ +enum { + AALDA_EIO = 1, + AALDA_EPM = 2, + AALDA_EPMSI = 3 +}; + +union e2k_u64_struct { /* aaldv,aaldm,aasta_restore dword */ + struct { + u32 lo; /* read/write on left channel */ + u32 hi; /* read/write on right channel */ + }; + u64 word; +}; +typedef union e2k_u64_struct e2k_aaldv_t; +typedef union e2k_u64_struct e2k_aaldm_t; + +typedef struct e2k_aalda_fields { + u8 exc: 2; + u8 cincr: 1; + u8 unused1: 1; + u8 root: 1; + u8 unused2: 3; +} e2k_aalda_fields_t; + +typedef union e2k_aalda_struct { + e2k_aalda_fields_t fields; + u8 word; +} e2k_aalda_t; + +#define AASTIS_REGS_NUM 16 +#define AASTIS_TAG_no AASTIS_REGS_NUM +#define AAINDS_REGS_NUM 16 +#define AAINDS_TAG_no AAINDS_REGS_NUM +#define AAINCRS_REGS_NUM 8 +#define AAINCRS_TAG_no AAINCRS_REGS_NUM +#define AADS_REGS_NUM 32 +#define AALDIS_REGS_NUM 64 +#define AALDAS_REGS_NUM 64 + +/* + * For virtualization, aasr might be switched to worst-case scenario (lds = AAU_STOPPED, + * iab = 1, stb = 1). In that case, real aasr will be saved to guest_aasr + */ +typedef struct e2k_aau_context { + e2k_aasr_t aasr; + e2k_aasr_t guest_aasr; + e2k_aafstr_t aafstr; + e2k_aaldm_t aaldm; + e2k_aaldv_t aaldv; + + /* Synchronous part */ + u64 aastis[AASTIS_REGS_NUM]; + u32 aasti_tags; + + /* Asynchronous part */ + u64 aainds[AAINDS_REGS_NUM]; + u32 aaind_tags; + u64 aaincrs[AAINCRS_REGS_NUM]; + u32 aaincr_tags; + e2k_aadj_t aads[AADS_REGS_NUM]; + /* %aaldi [synonim for %aaldsi] must be saved since iset v6 */ + u64 aaldi[AALDIS_REGS_NUM]; +} e2k_aau_t; + +#endif /* _E2K_AAU_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/acenv.h b/arch/e2k/include/asm/acenv.h new file mode 100644 index 000000000000..c6fb4ab6a518 --- /dev/null +++ b/arch/e2k/include/asm/acenv.h @@ -0,0 +1,10 @@ + +#ifndef _ASM_E2K_ACENV_H_ +#define _ASM_E2K_ACENV_H_ + +#include +#define ACPI_FLUSH_CPU_CACHE() write_back_CACHE_L12() +#include + +#endif /* _ASM_E2K_ACENV_H_ */ + diff --git a/arch/e2k/include/asm/acpi.h b/arch/e2k/include/asm/acpi.h new file mode 100644 index 000000000000..7b5bcd22329c --- /dev/null +++ b/arch/e2k/include/asm/acpi.h @@ -0,0 +1,6 @@ +#ifndef __ASM_ACPI_H +#define __ASM_ACPI_H + +#include + +#endif diff --git a/arch/e2k/include/asm/alternative-asm.h b/arch/e2k/include/asm/alternative-asm.h new file mode 100644 index 000000000000..c68bba475174 --- /dev/null +++ b/arch/e2k/include/asm/alternative-asm.h @@ -0,0 +1,193 @@ +#ifndef _ASM_E2K_ALTERNATIVE_ASM_H +#define _ASM_E2K_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +/* + * Check the length of an instruction sequence, must be a multiple of 8. + */ +.macro alt_len_check start,end + .if ( \end - \start ) % 8 + .error "cpu alternatives instructions length is not divisible by 8\n" + .endif +.endm + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature + .align 4 + .word \orig_start - . + .word \alt_start - . + .short \orig_end - \orig_start + .short \alt_end - \alt_start + .short \feature +.endm + +.macro alt_pad_64bytes bytes, check + .if ( \bytes >= \check ) + .fill 1, 4, 0x00000070 + .fill 15, 4, 0 + .endif +.endm + +/* + * Fill up @bytes with nops. + */ +.macro alt_pad bytes + .if ( \bytes >= 576 ) + ibranch . + \bytes + alt_pad_fill \bytes - 16 + .else + alt_pad_64bytes \bytes, 512 + alt_pad_64bytes \bytes, 448 + alt_pad_64bytes \bytes, 384 + alt_pad_64bytes \bytes, 320 + alt_pad_64bytes \bytes, 256 + alt_pad_64bytes \bytes, 192 + alt_pad_64bytes \bytes, 128 + alt_pad_64bytes \bytes, 64 + .if ( \bytes % 64 ) == 56 + .fill 1, 4, 0x00000060 + .fill 13, 4, 0 + .endif + .if ( \bytes % 64 ) == 48 + .fill 1, 4, 0x00000050 + .fill 11, 4, 0 + .endif + .if ( \bytes % 64 ) == 40 + .fill 1, 4, 0x00000040 + .fill 9, 4, 0 + .endif + .if ( \bytes % 64 ) == 32 + .fill 1, 4, 0x00000030 + .fill 7, 4, 0 + .endif + .if ( \bytes % 64 ) == 24 + .fill 1, 4, 0x00000020 + .fill 5, 4, 0 + .endif + .if ( \bytes % 64 ) == 16 + .fill 1, 4, 0x00000010 + .fill 3, 4, 0 + .endif + .if ( \bytes % 64 ) == 8 + .fill 2, 4, 0 + .endif + .endif +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature + .pushsection .altinstr_replacement,"ax" +770: \newinstr +771: .popsection +772: \oldinstr +773: alt_len_check 770b, 771b + alt_len_check 772b, 773b + alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) +774: .pushsection .altinstructions,"a" + alt_entry 772b, 774b, 770b, 771b, \feature + .popsection +.endm + +/* + * Define an alternative between three instructions. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 + .pushsection .altinstr_replacement,"ax" +770: \newinstr1 +771: \newinstr2 +772: .popsection +773: \oldinstr +774: alt_len_check 770b, 771b + alt_len_check 771b, 772b + alt_len_check 773b, 774b + .if ( 771b - 770b > 772b - 771b ) + alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) + .else + alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) + .endif +775: .pushsection .altinstructions,"a" + alt_entry 773b, 775b, 770b, 771b,\feature1 + alt_entry 773b, 775b, 771b, 772b,\feature2 + .popsection +.endm + + +/* + * bug 110687: we cannot pass e2k wide instructions to GNU assembler .macro + * as a parameter in a sane way so use the following in complex cases. + * How to use: + * + * 1) There is one alternative + * + * ALTERNATIVE_1_ALTINSTR + * < alt. instruction > + * ALTERNATIVE_2_OLDINSTR + * < initial instruction > + * ALTERNATIVE_3_FEATURE + * + * 2) There are two alternatives + * + * ALTERNATIVE_1_ALTINSTR + * "< first alt. instruction >" + * ALTERNATIVE_2_ALTINSTR2 + * "< second alt. instruction >" + * ALTERNATIVE_3_OLDINSTR2 + * "< initial instruction >" + * ALTERNATIVE_4_FEATURE2(feature1, feature2) + */ +#define ALTERNATIVE_1_ALTINSTR \ + .pushsection .altinstr_replacement,"ax" ; \ + 770: + +#define ALTERNATIVE_2_OLDINSTR \ + 771: ; \ + .popsection ; \ + 772: + +#define ALTERNATIVE_3_FEATURE(feature) \ + 773: ; \ + alt_len_check 770b, 771b ; \ + alt_len_check 772b, 773b ; \ + alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) ; \ + 774: ; \ + .pushsection .altinstructions,"a" ; \ + alt_entry 772b, 774b, 770b, 771b, feature ; \ + .popsection + +#define ALTERNATIVE_2_ALTINSTR2 \ + 771: + +#define ALTERNATIVE_3_OLDINSTR2 \ + 772: ; \ + .popsection ; \ + 773: + +#define ALTERNATIVE_4_FEATURE2(feature1, feature2) \ + 774: ; \ + alt_len_check 770b, 771b ; \ + alt_len_check 771b, 772b ; \ + alt_len_check 773b, 774b ; \ + .if ( 771b - 770b > 772b - 771b ) ; \ + alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) ; \ + .else ; \ + alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) ; \ + .endif ; \ + 775: ; \ + .pushsection .altinstructions,"a" ; \ + alt_entry 773b, 775b, 770b, 771b, feature1 ; \ + alt_entry 773b, 775b, 771b, 772b, feature2 ; \ + .popsection + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_E2K_ALTERNATIVE_ASM_H */ diff --git a/arch/e2k/include/asm/alternative.h b/arch/e2k/include/asm/alternative.h new file mode 100644 index 000000000000..e3b33dcc14bb --- /dev/null +++ b/arch/e2k/include/asm/alternative.h @@ -0,0 +1,265 @@ +#ifndef _ASM_E2K_ALTERNATIVE_H +#define _ASM_E2K_ALTERNATIVE_H + +#ifndef __ASSEMBLY__ + +#include + +struct alt_instr { + s32 instr_offset; /* original instruction */ + s32 repl_offset; /* offset to replacement instruction */ + u16 instrlen; /* length of original instruction */ + u16 replacementlen; /* length of new instruction */ + u16 facility; /* facility bit set for replacement */ +} __aligned(4); + +void apply_alternative_instructions(void); +void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +/* + * An example when first alternative instruction is the biggest, + * and original instruction is the smallest. + * + * Original instruction is padded statically at compile time, + * while alternative instructions are padded if necessary in + * runtime when patching them in. + * + * |661: |662: |663: + * +-----------+---------+-----------------+ + * | oldinstr | oldinstr_padding | + * | +---------+-----------------+ + * | | ibranch if length >= 576 | + * | | 64-bytes NOPs otherwise | + * +-----------+---------+-----------------+ + * ^^^^^^ static padding ^^^^^ + * + * .altinstr_replacement section + * +-----------+---------+-----------------+ + * |6641: |6651: + * | alternative instr 1 | + * +-----------+---------+- - - - - - - - -+ + * |6642: |6652: | + * | alternative instr 2 | padding | + * +-----------+---------+- - - - - - - - -+ + * ^runtime padding^ + * + * + * 'struct alt_instr' holds details about how and when + * instructions must be replaced: + * + * .altinstructions section + * +----------------------------+ + * | alt_instr entries for each | + * | alternative instruction | + * +----------------------------+ + */ + +#define b_altinstr(num) "664"#num +#define e_altinstr(num) "665"#num + +#define e_oldinstr_pad_end "663" +#define oldinstr_len "662b-661b" +#define oldinstr_total_len e_oldinstr_pad_end"b-661b" +#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b" +#define oldinstr_pad_len(num) \ + "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \ + "((" altinstr_len(num) ")-(" oldinstr_len "))" + +#define INSTR_LEN_SANITY_CHECK(len) \ + ".if (" len ") %% 8\n" \ + "\t.error \"cpu alternatives instructions length is not divisible by 8\"\n" \ + ".endif\n" + +#define OLDINSTR_PAD_64_BYTES(num, check) \ + ".if " oldinstr_pad_len(num) " >= " __stringify(check) "\n" \ + "\t.fill 1, 4, 0x00000070\n" \ + "\t.fill 15, 4, 0\n" \ + ".endif\n" + +#define OLDINSTR_PADDING(oldinstr, num) \ + ".if " oldinstr_pad_len(num) " >= 576\n" \ + "\tibranch " e_oldinstr_pad_end "f\n" \ + "6620:\n" \ + "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 8, 8, 0\n" \ + ".else\n" \ + OLDINSTR_PAD_64_BYTES(num, 512) \ + OLDINSTR_PAD_64_BYTES(num, 448) \ + OLDINSTR_PAD_64_BYTES(num, 384) \ + OLDINSTR_PAD_64_BYTES(num, 320) \ + OLDINSTR_PAD_64_BYTES(num, 256) \ + OLDINSTR_PAD_64_BYTES(num, 192) \ + OLDINSTR_PAD_64_BYTES(num, 128) \ + OLDINSTR_PAD_64_BYTES(num, 64) \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 56\n" \ + "\t.fill 1, 4, 0x00000060\n" \ + "\t.fill 13, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 48\n" \ + "\t.fill 1, 4, 0x00000050\n" \ + "\t.fill 11, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 40\n" \ + "\t.fill 1, 4, 0x00000040\n" \ + "\t.fill 9, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 32\n" \ + "\t.fill 1, 4, 0x00000030\n" \ + "\t.fill 7, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 24\n" \ + "\t.fill 1, 4, 0x00000020\n" \ + "\t.fill 5, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 16\n" \ + "\t.fill 1, 4, 0x00000010\n" \ + "\t.fill 3, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 8\n" \ + "\t.fill 2, 4, 0\n" \ + ".endif\n" \ + ".endif\n" + +#define OLDINSTR(oldinstr, num) \ + "661:\n\t" oldinstr "\n662:\n" \ + OLDINSTR_PADDING(oldinstr, num) \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define OLDINSTR_2(oldinstr, num1, num2) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \ + OLDINSTR_PADDING(oldinstr, num2) \ + ".else\n" \ + OLDINSTR_PADDING(oldinstr, num1) \ + ".endif\n" \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define ALTINSTR_ENTRY(facility, num) \ + "\t.align 4\n" \ + "\t.word 661b - .\n" /* old instruction */ \ + "\t.word " b_altinstr(num)"b - .\n" /* alt instruction */ \ + "\t.short " oldinstr_total_len "\n" /* source len */ \ + "\t.short " altinstr_len(num) "\n" /* alt instruction len */ \ + "\t.short " __stringify(facility) "\n" /* facility bit */ + +#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \ + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(num)) + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, altinstr, facility) \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr, 1) \ + ".popsection\n" \ + OLDINSTR(oldinstr, 1) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility, 1) \ + ".popsection\n" + +#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr1, 1) \ + ALTINSTR_REPLACEMENT(altinstr2, 2) \ + ".popsection\n" \ + OLDINSTR_2(oldinstr, 1, 2) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility1, 1) \ + ALTINSTR_ENTRY(facility2, 2) \ + ".popsection\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * oldinstr is padded with jump and nops at compile time if altinstr is + * longer. altinstr is padded with jump and nops at run-time during patching. + */ +#define alternative(oldinstr, altinstr, _facility, clobbers...) \ + _Pragma("no_asm_inline") \ + asm volatile (ALTERNATIVE(oldinstr, altinstr, %[facility]) \ + :: [facility] "i" (_facility) : clobbers) + +#define alternative_2(oldinstr, altinstr1, _facility1, altinstr2, _facility2) \ + _Pragma("no_asm_inline") \ + asm volatile (ALTERNATIVE_2(oldinstr, altinstr1, %[facility1], \ + altinstr2, %[facility2]) \ + : \ + : [facility1] "i" (_facility1), \ + [facility2] "i" (_facility2) \ + : clobbers) + +/* + * How to use: + * + * 1) There is one alternative + * + * asm volatile ( + * ALTERNATIVE_1_ALTINSTR + * "< alt. instruction >" + * ALTERNATIVE_2_OLDINSTR + * "< initial instruction >" + * ALTERNATIVE_3_FEATURE(feature) + * ) + * + * 2) There are two alternatives + * + * asm volatile ( + * ALTERNATIVE_1_ALTINSTR + * "< first alt. instruction >" + * ALTERNATIVE_2_ALTINSTR2 + * "< second alt. instruction >" + * ALTERNATIVE_3_OLDINSTR2 + * "< initial instruction >" + * ALTERNATIVE_4_FEATURE2(feature1, feature2) + * ) + */ +#define ALTERNATIVE_1_ALTINSTR \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + b_altinstr(1)":\n" + +#define ALTERNATIVE_2_OLDINSTR \ + "\n" e_altinstr(1) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(1)) \ + ".popsection\n" \ + "661:\n" + +#define ALTERNATIVE_3_FEATURE(facility) \ + "\n662:\n" \ + OLDINSTR_PADDING(oldinstr, 1) \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility, 1) \ + ".popsection\n" + +#define ALTERNATIVE_2_ALTINSTR2 \ + "\n" e_altinstr(1) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(1)) \ + b_altinstr(2)":\n" + +#define ALTERNATIVE_3_OLDINSTR2 \ + "\n" e_altinstr(2) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(2)) \ + ".popsection\n" \ + "661:\n" + +#define ALTERNATIVE_4_FEATURE2(facility1, facility2) \ + "\n662:\n" \ + ".if " altinstr_len(1) " < " altinstr_len(2) "\n" \ + OLDINSTR_PADDING(oldinstr, 2) \ + ".else\n" \ + OLDINSTR_PADDING(oldinstr, 1) \ + ".endif\n" \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility1, 1) \ + ALTINSTR_ENTRY(facility2, 2) \ + ".popsection\n" + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_E2K_ALTERNATIVE_H */ diff --git a/arch/e2k/include/asm/apic.h b/arch/e2k/include/asm/apic.h new file mode 100644 index 000000000000..8d6081a57545 --- /dev/null +++ b/arch/e2k/include/asm/apic.h @@ -0,0 +1,46 @@ +#ifndef __ASM_E2K_APIC_H +#define __ASM_E2K_APIC_H + +#ifdef __KERNEL__ +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * Basic functions accessing APICs. + */ +static inline void arch_apic_write(unsigned int reg, unsigned int v) +{ + boot_writel(v, (void __iomem *) (APIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline unsigned int arch_apic_read(unsigned int reg) +{ + return boot_readl((void __iomem *) (APIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline void boot_arch_apic_write(unsigned int reg, unsigned int v) +{ + arch_apic_write(reg, v); +} + +static inline unsigned int boot_arch_apic_read(unsigned int reg) +{ + return arch_apic_read(reg); +} + +#if IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET) +extern int rdma_apic_init; +extern int rdma_node[]; +#endif + +#endif /* !(__ASSEMBLY__) */ + +#include + +#endif /* __KERNEL__ */ +#endif /* __ASM_E2K_APIC_H */ diff --git a/arch/e2k/include/asm/apic_regs.h b/arch/e2k/include/asm/apic_regs.h new file mode 100644 index 000000000000..5fbabfc75e5b --- /dev/null +++ b/arch/e2k/include/asm/apic_regs.h @@ -0,0 +1,276 @@ +#ifndef __ASM_APIC_REGS_H +#define __ASM_APIC_REGS_H + + +#ifndef __ASSEMBLY__ + +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved_1 : 8, + boot_strap : 1, + __reserved_2 : 2, + apic_enable : 1, + __reserved_3 : 20; + u32 __reserved[3]; + } bsp; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { u32 __reserved[4]; } __reserved_15; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; +#if 0 +/*3F0*/ struct { u32 __reserved[764]; } __reserved_20; +/*FE0*/ struct { /* Vector from PIC or APIC in nmi */ + u32 nm_vector : 8, + __reserved : 24; + u32 __reserved[3]; + } nm_vect; +/*FF0*/ struct { /* Vector */ + u32 vector : 8, + __reserved_1 : 24; + u32 __reserved[3]; + } vect; +#endif +} __attribute__ ((packed)); + +#undef u32 + +#endif /* !(__ASSEMBLY__) */ + +#endif /* __ASM_APIC_REGS_H */ diff --git a/arch/e2k/include/asm/apicdef.h b/arch/e2k/include/asm/apicdef.h new file mode 100644 index 000000000000..70fc4a91f080 --- /dev/null +++ b/arch/e2k/include/asm/apicdef.h @@ -0,0 +1,9 @@ +#ifndef __ASM_E2K_APICDEF_H +#define __ASM_E2K_APICDEF_H + +#ifdef __KERNEL__ +#include +#include +#endif + +#endif /* __ASM_E2K_APICDEF_H */ diff --git a/arch/e2k/include/asm/atomic.h b/arch/e2k/include/asm/atomic.h new file mode 100644 index 000000000000..75b7a2056692 --- /dev/null +++ b/arch/e2k/include/asm/atomic.h @@ -0,0 +1,380 @@ +#ifndef _E2K_ATOMIC_ +#define _E2K_ATOMIC_ + +#include +#include +#include + +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } + +#define atomic_read(v) READ_ONCE((v)->counter) +#define atomic64_read(v) READ_ONCE((v)->counter) + +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +static inline void atomic_and(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "ands", RELAXED_MB); +} + +static inline void atomic64_and(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "andd", RELAXED_MB); +} + +#define atomic_andnot atomic_andnot +static inline void atomic_andnot(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "andns", RELAXED_MB); +} + +#define atomic64_andnot atomic64_andnot +static inline void atomic64_andnot(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "andnd", RELAXED_MB); +} + +static inline void atomic_or(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "ors", RELAXED_MB); +} + +static inline void atomic64_or(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "ord", RELAXED_MB); +} + +static inline void atomic_xor(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "xors", RELAXED_MB); +} + +static inline void atomic64_xor(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "xord", RELAXED_MB); +} + +static inline void atomic_add(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "adds", RELAXED_MB); +} + +static inline void atomic64_add(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "addd", RELAXED_MB); +} + +static inline void atomic_sub(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "subs", RELAXED_MB); +} + +static inline void atomic64_sub(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "subd", RELAXED_MB); +} + +#define __atomic_add_return(v, p, mem_model) \ + __api_atomic_op((int) (v), &(p)->counter, w, "adds", mem_model) +#define atomic_add_return_relaxed(v, p) __atomic_add_return((v), (p), RELAXED_MB) +#define atomic_add_return_acquire(v, p) __atomic_add_return((v), (p), ACQUIRE_MB) +#define atomic_add_return_release(v, p) __atomic_add_return((v), (p), RELEASE_MB) +#define atomic_add_return(v, p) __atomic_add_return((v), (p), STRONG_MB) +#define atomic_add_return_lock(v, p) __atomic_add_return((v), (p), LOCK_MB) + +#define __atomic64_add_return(v, p, mem_model) \ + __api_atomic_op((__s64) (v), &(p)->counter, d, "addd", mem_model) +#define atomic64_add_return_relaxed(v, p) __atomic64_add_return((v), (p), RELAXED_MB) +#define atomic64_add_return_acquire(v, p) __atomic64_add_return((v), (p), ACQUIRE_MB) +#define atomic64_add_return_release(v, p) __atomic64_add_return((v), (p), RELEASE_MB) +#define atomic64_add_return(v, p) __atomic64_add_return((v), (p), STRONG_MB) + +#define __atomic_sub_return(v, p, mem_model) \ + __api_atomic_op((int) (v), &(p)->counter, w, "subs", mem_model) +#define atomic_sub_return_relaxed(v, p) __atomic_sub_return((v), (p), RELAXED_MB) +#define atomic_sub_return_acquire(v, p) __atomic_sub_return((v), (p), ACQUIRE_MB) +#define atomic_sub_return_release(v, p) __atomic_sub_return((v), (p), RELEASE_MB) +#define atomic_sub_return(v, p) __atomic_sub_return((v), (p), STRONG_MB) + +#define __atomic64_sub_return(v, p, mem_model) \ + __api_atomic_op((__s64) (v), &(p)->counter, d, "subd", mem_model) +#define atomic64_sub_return_relaxed(v, p) __atomic64_sub_return((v), (p), RELAXED_MB) +#define atomic64_sub_return_acquire(v, p) __atomic64_sub_return((v), (p), ACQUIRE_MB) +#define atomic64_sub_return_release(v, p) __atomic64_sub_return((v), (p), RELEASE_MB) +#define atomic64_sub_return(v, p) __atomic64_sub_return((v), (p), STRONG_MB) + +#define __atomic_fetch_add(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "adds", mem_model) +#define atomic_fetch_add_relaxed(v, p) __atomic_fetch_add((v), (p), RELAXED_MB) +#define atomic_fetch_add_acquire(v, p) __atomic_fetch_add((v), (p), ACQUIRE_MB) +#define atomic_fetch_add_release(v, p) __atomic_fetch_add((v), (p), RELEASE_MB) +#define atomic_fetch_add(v, p) __atomic_fetch_add((v), (p), STRONG_MB) + +#define __atomic64_fetch_add(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "addd", mem_model) +#define atomic64_fetch_add_relaxed(v, p) __atomic64_fetch_add((v), (p), RELAXED_MB) +#define atomic64_fetch_add_acquire(v, p) __atomic64_fetch_add((v), (p), ACQUIRE_MB) +#define atomic64_fetch_add_release(v, p) __atomic64_fetch_add((v), (p), RELEASE_MB) +#define atomic64_fetch_add(v, p) __atomic64_fetch_add((v), (p), STRONG_MB) + +#define __atomic_fetch_sub(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "subs", mem_model) +#define atomic_fetch_sub_relaxed(v, p) __atomic_fetch_sub((v), (p), RELAXED_MB) +#define atomic_fetch_sub_acquire(v, p) __atomic_fetch_sub((v), (p), ACQUIRE_MB) +#define atomic_fetch_sub_release(v, p) __atomic_fetch_sub((v), (p), RELEASE_MB) +#define atomic_fetch_sub(v, p) __atomic_fetch_sub((v), (p), STRONG_MB) + +#define __atomic64_fetch_sub(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "subd", mem_model) +#define atomic64_fetch_sub_relaxed(v, p) __atomic64_fetch_sub((v), (p), RELAXED_MB) +#define atomic64_fetch_sub_acquire(v, p) __atomic64_fetch_sub((v), (p), ACQUIRE_MB) +#define atomic64_fetch_sub_release(v, p) __atomic64_fetch_sub((v), (p), RELEASE_MB) +#define atomic64_fetch_sub(v, p) __atomic64_fetch_sub((v), (p), STRONG_MB) + +#define __atomic_fetch_or(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "ors", mem_model) +#define atomic_fetch_or_relaxed(v, p) __atomic_fetch_or((v), (p), RELAXED_MB) +#define atomic_fetch_or_acquire(v, p) __atomic_fetch_or((v), (p), ACQUIRE_MB) +#define atomic_fetch_or_release(v, p) __atomic_fetch_or((v), (p), RELEASE_MB) +#define atomic_fetch_or(v, p) __atomic_fetch_or((v), (p), STRONG_MB) + +#define __atomic64_fetch_or(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "ord", mem_model) +#define atomic64_fetch_or_relaxed(v, p) __atomic64_fetch_or((v), (p), RELAXED_MB) +#define atomic64_fetch_or_acquire(v, p) __atomic64_fetch_or((v), (p), ACQUIRE_MB) +#define atomic64_fetch_or_release(v, p) __atomic64_fetch_or((v), (p), RELEASE_MB) +#define atomic64_fetch_or(v, p) __atomic64_fetch_or((v), (p), STRONG_MB) + +#define __atomic_fetch_and(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "ands", mem_model) +#define atomic_fetch_and_relaxed(v, p) __atomic_fetch_and((v), (p), RELAXED_MB) +#define atomic_fetch_and_acquire(v, p) __atomic_fetch_and((v), (p), ACQUIRE_MB) +#define atomic_fetch_and_release(v, p) __atomic_fetch_and((v), (p), RELEASE_MB) +#define atomic_fetch_and(v, p) __atomic_fetch_and((v), (p), STRONG_MB) + +#define __atomic64_fetch_and(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "andd", mem_model) +#define atomic64_fetch_and_relaxed(v, p) __atomic64_fetch_and((v), (p), RELAXED_MB) +#define atomic64_fetch_and_acquire(v, p) __atomic64_fetch_and((v), (p), ACQUIRE_MB) +#define atomic64_fetch_and_release(v, p) __atomic64_fetch_and((v), (p), RELEASE_MB) +#define atomic64_fetch_and(v, p) __atomic64_fetch_and((v), (p), STRONG_MB) + +#define __atomic_fetch_andnot(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "andns", mem_model) +#define atomic_fetch_andnot_relaxed(v, p) __atomic_fetch_andnot((v), (p), RELAXED_MB) +#define atomic_fetch_andnot_acquire(v, p) __atomic_fetch_andnot((v), (p), ACQUIRE_MB) +#define atomic_fetch_andnot_release(v, p) __atomic_fetch_andnot((v), (p), RELEASE_MB) +#define atomic_fetch_andnot(v, p) __atomic_fetch_andnot((v), (p), STRONG_MB) + +#define __atomic64_fetch_andnot(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "andnd", mem_model) +#define atomic64_fetch_andnot_relaxed(v, p) __atomic64_fetch_andnot((v), (p), RELAXED_MB) +#define atomic64_fetch_andnot_acquire(v, p) __atomic64_fetch_andnot((v), (p), ACQUIRE_MB) +#define atomic64_fetch_andnot_release(v, p) __atomic64_fetch_andnot((v), (p), RELEASE_MB) +#define atomic64_fetch_andnot(v, p) __atomic64_fetch_andnot((v), (p), STRONG_MB) + +#define __atomic_fetch_xor(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "xors", mem_model) +#define atomic_fetch_xor_relaxed(v, p) __atomic_fetch_xor((v), (p), RELAXED_MB) +#define atomic_fetch_xor_acquire(v, p) __atomic_fetch_xor((v), (p), ACQUIRE_MB) +#define atomic_fetch_xor_release(v, p) __atomic_fetch_xor((v), (p), RELEASE_MB) +#define atomic_fetch_xor(v, p) __atomic_fetch_xor((v), (p), STRONG_MB) + +#define __atomic64_fetch_xor(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "xord", mem_model) +#define atomic64_fetch_xor_relaxed(v, p) __atomic64_fetch_xor((v), (p), RELAXED_MB) +#define atomic64_fetch_xor_acquire(v, p) __atomic64_fetch_xor((v), (p), ACQUIRE_MB) +#define atomic64_fetch_xor_release(v, p) __atomic64_fetch_xor((v), (p), RELEASE_MB) +#define atomic64_fetch_xor(v, p) __atomic64_fetch_xor((v), (p), STRONG_MB) + +#define __atomic_xchg(p, v, mem_model) \ + (int)__api_xchg_return((int) (v), &(p)->counter, w, mem_model) +#define atomic_xchg_relaxed(p, v) __atomic_xchg((p), (v), RELAXED_MB) +#define atomic_xchg_acquire(p, v) __atomic_xchg((p), (v), ACQUIRE_MB) +#define atomic_xchg_release(p, v) __atomic_xchg((p), (v), RELEASE_MB) +#define atomic_xchg(p, v) __atomic_xchg((p), (v), STRONG_MB) + +#define __atomic64_xchg(p, v, mem_model) \ + __api_xchg_return((__s64) (v), &(p)->counter, d, mem_model) +#define atomic64_xchg_relaxed(p, v) __atomic64_xchg((p), (v), RELAXED_MB) +#define atomic64_xchg_acquire(p, v) __atomic64_xchg((p), (v), ACQUIRE_MB) +#define atomic64_xchg_release(p, v) __atomic64_xchg((p), (v), RELEASE_MB) +#define atomic64_xchg(p, v) __atomic64_xchg((p), (v), STRONG_MB) + +#define __atomic_cmpxchg(p, o, n, mem_model) \ + (int)__api_cmpxchg_word_return((int) (o), (int) (n), \ + &(p)->counter, mem_model) +#define atomic_cmpxchg_relaxed(p, o, n) __atomic_cmpxchg((p), (o), (n), RELAXED_MB) +#define atomic_cmpxchg_acquire(p, o, n) __atomic_cmpxchg((p), (o), (n), ACQUIRE_MB) +#define atomic_cmpxchg_release(p, o, n) __atomic_cmpxchg((p), (o), (n), RELEASE_MB) +#define atomic_cmpxchg(p, o, n) __atomic_cmpxchg((p), (o), (n), STRONG_MB) +#define atomic_cmpxchg_lock(p, o, n) __atomic_cmpxchg((p), (o), (n), LOCK_MB) + +#define __atomic64_cmpxchg(p, o, n, mem_model) \ + __api_cmpxchg_dword_return((__s64) (o), (__s64) (n), \ + &(p)->counter, mem_model) +#define atomic64_cmpxchg_relaxed(p, o, n) __atomic64_cmpxchg((p), (o), (n), RELAXED_MB) +#define atomic64_cmpxchg_acquire(p, o, n) __atomic64_cmpxchg((p), (o), (n), ACQUIRE_MB) +#define atomic64_cmpxchg_release(p, o, n) __atomic64_cmpxchg((p), (o), (n), RELEASE_MB) +#define atomic64_cmpxchg(p, o, n) __atomic64_cmpxchg((p), (o), (n), STRONG_MB) +#define atomic64_cmpxchg_lock(p, o, n) __atomic64_cmpxchg((p), (o), (n), LOCK_MB) + +#define atomic_long_cmpxchg_lock(p, o, n) atomic64_cmpxchg_lock((p), (o), (n)) + +#define atomic_inc_unless_negative atomic_inc_unless_negative +static inline bool atomic_inc_unless_negative(atomic_t *p) +{ + return __api_atomic32_fetch_inc_unless_negative(&p->counter) >= 0; +} + +#define atomic64_inc_unless_negative atomic64_inc_unless_negative +static inline bool atomic64_inc_unless_negative(atomic64_t *p) +{ + return __api_atomic64_fetch_inc_unless_negative(&p->counter) >= 0; +} + +#define atomic_dec_unless_positive atomic_dec_unless_positive +static inline bool atomic_dec_unless_positive(atomic_t *p) +{ + return __api_atomic32_fetch_dec_unless_positive(&p->counter) <= 0; +} + +#define atomic64_dec_unless_positive atomic64_dec_unless_positive +static inline bool atomic64_dec_unless_positive(atomic64_t *p) +{ + return __api_atomic64_fetch_dec_unless_positive(&p->counter) <= 0; +} + +/** + * atomic_dec_if_positive - decrement by 1 if old value positive + * @p: pointer of type atomic_t + * + * The function returns the old value of *p minus 1, even if + * the atomic variable, v, was not decremented. + */ +#define atomic_dec_if_positive atomic_dec_if_positive +static inline int atomic_dec_if_positive(atomic_t *p) +{ + return __api_atomic32_fetch_dec_if_positive(&p->counter) - 1; +} + +#define atomic64_dec_if_positive atomic64_dec_if_positive +static inline s64 atomic64_dec_if_positive(atomic64_t *p) +{ + return __api_atomic64_fetch_dec_if_positive(&p->counter) - 1; +} + +/** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +#define atomic_fetch_add_unless atomic_fetch_add_unless +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + return __api_atomic32_fetch_add_unless(a, &v->counter, u); +} + +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return __api_atomic64_fetch_add_unless(a, &v->counter, u); +} + +#define atomic_try_cmpxchg atomic_try_cmpxchg +static inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +static inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_acquire(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +static inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_release(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed +static inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_relaxed(v, o, new); + *old = r; + return likely(r == o); +} + +static __always_inline bool atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_lock(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +static inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +static inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_acquire(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +static inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_release(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed +static inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_relaxed(v, o, new); + *old = r; + return likely(r == o); +} + +static inline bool atomic64_try_cmpxchg_lock(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_lock(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_long_try_cmpxchg_lock(p, o, n) atomic64_try_cmpxchg_lock((p), (s64 *) (o), (n)) + +#endif /* _E2K_ATOMIC_ */ diff --git a/arch/e2k/include/asm/atomic_api.h b/arch/e2k/include/asm/atomic_api.h new file mode 100644 index 000000000000..b617c5ae7120 --- /dev/null +++ b/arch/e2k/include/asm/atomic_api.h @@ -0,0 +1,892 @@ +#ifndef _ASM_E2K_ATOMIC_API_H_ +#define _ASM_E2K_ATOMIC_API_H_ + +#include +#include +#include +#include + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +/* + * Special page that is accessible for reading by every user + * process is used for hardware bug #89242 workaround. + */ +#define NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS 0xff6000000000UL + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) + +# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \ + NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS \ + virt_cpu_has(CPU_HWBUG_WRITE_MEMORY_BARRIER) +# ifdef E2K_FAST_SYSCALL +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU NATIVE_GET_DSREG_OPEN(clkr) +# else +# ifndef __ASSEMBLY__ +# include +register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC(SMP_CPU_ID_GREG); +# endif +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU ((unsigned int) __cpu_preempt_reg) +# endif + +#elif defined(E2K_P2V) + +# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \ + (NATIVE_READ_IP_REG_VALUE() & ~0x3fUL) +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0 +# if (!defined(CONFIG_E2K_MACHINE) && CONFIG_E2K_MINVER <= 4) || defined(CONFIG_E2K_E8C) +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 1 +# else +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 0 +# endif + +#else /* CONFIG_BOOT_E2K */ + +# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \ + (NATIVE_READ_IP_REG_VALUE() & ~0x3fUL) +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 0 +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0 + +#endif + +#if (!defined CONFIG_E2K_MACHINE && CONFIG_E2K_MINVER <= 4) || defined CONFIG_E2K_E8C +/* Define these here to avoid include hell... */ +# define _UPSR_IE 0x20U +# define _UPSR_NMIE 0x80U +# define NATIVE_HWBUG_AFTER_LD_ACQ() \ +do { \ + unsigned long long __reg1, __reg2; \ + if (NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS) { \ + unsigned long __hwbug_cpu = NATIVE_HWBUG_AFTER_LD_ACQ_CPU; \ + unsigned long __hwbug_address = \ + NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS + \ + (__hwbug_cpu & 0x3) * 4096; \ + unsigned long __hwbug_atomic_flags; \ + __hwbug_atomic_flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_SET_UPSR_IRQ_BARRIER( \ + __hwbug_atomic_flags & ~(_UPSR_IE | _UPSR_NMIE)); \ + NATIVE_CLEAN_LD_ACQ_ADDRESS(__reg1, __reg2, __hwbug_address); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 0 * 4096 + 0 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 0 * 4096 + 4 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 8 * 4096 + 1 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 8 * 4096 + 5 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 16 * 4096 + 2 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 16 * 4096 + 6 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 24 * 4096 + 3 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 24 * 4096 + 7 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + __E2K_WAIT(_fl_c); \ + NATIVE_SET_UPSR_IRQ_BARRIER(__hwbug_atomic_flags); \ + } \ +} while (0) +#else +# define NATIVE_HWBUG_AFTER_LD_ACQ() do { } while (0) +#endif + +/* FIXME: here will be paravirtualized only hardware bugs workaround macroses */ +/* but in guest general case these bugs can be workarounded only on host and */ +/* guest should call appropriate hypercalls to make all atomic */ +/* sequence on host, because of they contain privileged actions */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not virtualized based on pv_ops */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ + +/* examine bare hardware bugs */ +#define virt_cpu_has(hwbug) cpu_has(hwbug) + +#define VIRT_HWBUG_AFTER_LD_ACQ() NATIVE_HWBUG_AFTER_LD_ACQ() +#endif /* CONFIG_PARAVIRT_GUEST */ + +#define VIRT_HWBUG_AFTER_LD_ACQ_STRONG_MB VIRT_HWBUG_AFTER_LD_ACQ +#define VIRT_HWBUG_AFTER_LD_ACQ_LOCK_MB VIRT_HWBUG_AFTER_LD_ACQ +#define VIRT_HWBUG_AFTER_LD_ACQ_ACQUIRE_MB VIRT_HWBUG_AFTER_LD_ACQ +#define VIRT_HWBUG_AFTER_LD_ACQ_RELEASE_MB() +#define VIRT_HWBUG_AFTER_LD_ACQ_RELAXED_MB() + + +#define virt_api_atomic32_add_if_not_negative(val, addr, mem_model) \ +({ \ + register int rval; \ + NATIVE_ATOMIC32_ADD_IF_NOT_NEGATIVE(val, addr, rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_atomic64_add_if_not_negative(val, addr, mem_model) \ +({ \ + register long long rval; \ + NATIVE_ATOMIC64_ADD_IF_NOT_NEGATIVE(val, addr, rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +/* Atomically add to 16 low bits and return the new 32 bits value */ +#define virt_api_atomic16_add_return32_lock(val, addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC16_ADD_RETURN32_LOCK(val, addr, rval, tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +/* Atomically add two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define virt_api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) \ +({ \ + register long rval, tmp1, tmp2, tmp3; \ + NATIVE_ATOMIC32_PAIR_ADD_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +/* Atomically sub two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define virt_api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) \ +({ \ + register long rval, tmp1, tmp2, tmp3; \ + NATIVE_ATOMIC32_PAIR_SUB_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic_ticket_trylock(spinlock, tail_shift) \ +({ \ + register int __rval; \ + register int __val; \ + register int __head; \ + register int __tail; \ + NATIVE_ATOMIC_TICKET_TRYLOCK(spinlock, tail_shift, \ + __val, __head, __tail, __rval); \ + VIRT_HWBUG_AFTER_LD_ACQ_LOCK_MB(); \ + __rval; \ +}) + +/* + * Atomic support of new read/write spinlock mechanism. + * Locking is ordered and later readers cannot outrun former writers. + * Locking order based on coupons (tickets) received while first try to get + * lock, if lock is already taken by other. + * + * read/write spinlocks initial state allowing 2^32 active readers and + * only one active writer. But coupon discipline allows simultaniously + * have only 2^16 registered users of the lock: active + waiters + */ + +/* + * It is test: is read/write lock can be now taken by reader + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_can_lock_reader(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __src; \ + \ + NATIVE_ATOMIC_CAN_LOCK_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src); \ + __src; \ +}) + +/* + * It is test: is read/write lock can be now taken by writer + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_can_lock_writer(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __src; \ + \ + NATIVE_ATOMIC_CAN_LOCK_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src); \ + __src; \ +}) + +/* + * The first try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise reader receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + } + dst_lock.count = count; + dst_lock.head = head; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_add_new_reader(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * Only try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + // increment ticket number for next users + dst_lock.ticket = ticket + 1; + dst_lock.count = count - 1; + dst_lock.head = head + 1; + } else { + dst_lock.lock = src_lock.lock; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_try_add_new_reader(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_TRY_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * The slow try to take read spinlock according to erlier received # of coupon + * Successful locking increment # of head, decrement active readers counter + * (negative counter) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise reader should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_reader(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + dst_lock.count = count; + dst_lock.head = head; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_add_slow_reader(__rw_addr, __ticket, __success) \ +({ \ + register unsigned int __head; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_SLOW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +/* + * Unlocking of read spinlock. + * Need only increment active readers counter (negative counter) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_reader(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_free_lock_reader(__rw_addr) \ +({ \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_FREE_LOCK_READER(__rw_addr, __dst); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +/* + * The first try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise writer receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment writerss, + count = count + 1; + } + dst_lock.count = count; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_add_new_writer(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * Only try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is set to false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers counter, + // increment ticket number for next readers/writers + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + dst_lock.count = count + 1; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_try_add_new_writer(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_TRY_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * The slow try to take write spinlock according to erlier received # of coupon + * Successful locking increment active writers counter + * (positive counter - can be only one active writer, so set counter to 1) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise writer should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_writer(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers, + count = count + 1; + dst_lock.count = count; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_add_slow_writer(__rw_addr, __ticket, __success) \ +({ \ + register unsigned int __head; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_SLOW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +/* + * Unlocking of write spinlock. + * Need only increment # of queue head and decrement active writers counter + * (positive counter - can be only one writer, so set counter to 0) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_writer(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + dst_lock.head++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_free_lock_writer(__rw_addr) \ +({ \ + register unsigned long __dst; \ + register unsigned int __head; \ + register int __count; \ + register unsigned long __tmp; \ + \ + NATIVE_ATOMIC_FREE_LOCK_WRITER(__rw_addr, \ + __head, __count, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +#define virt_api_atomic_op(val, addr, size_letter, op, mem_model) \ +({ \ + typeof(val) rval; \ + NATIVE_ATOMIC_OP(val, addr, rval, size_letter, op, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_atomic_fetch_op(val, addr, size_letter, op, mem_model) \ +({ \ + typeof(val) rval, stored_val; \ + NATIVE_ATOMIC_FETCH_OP(val, addr, rval, stored_val, \ + size_letter, op, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + + +/* + * Atomic operations with return value and acquire/release semantics + */ + +#define virt_api_atomic32_fetch_inc_unless_negative(addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \ + w, "adds", "~ ", "adds", "", "cmplsb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_inc_unless_negative(addr) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \ + d, "addd", "~ ", "addd", "", "cmpldb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic32_fetch_dec_unless_positive(addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \ + w, "subs", "", "adds", "~ ", "cmplesb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_dec_unless_positive(addr) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \ + d, "subd", "", "addd", "~ ", "cmpledb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic32_fetch_dec_if_positive(addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \ + w, "subs", "~ ", "adds", "", "cmplesb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_dec_if_positive(addr) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \ + d, "subd", "~ ", "addd", "", "cmpledb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic32_fetch_add_unless(val, addr, unless) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(val, addr, unless, tmp, rval, \ + w, "adds", "~ ", "adds", "", "cmpesb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_add_unless(val, addr, unless) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(val, addr, unless, tmp, rval, \ + d, "addd", "~ ", "addd", "", "cmpedb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define __api_atomic64_fetch_xchg_if_below(val, addr, mem_model) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_XCHG_UNLESS(val, addr, tmp, rval, d, \ + "merged", "cmpbdb", mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_xchg_return(val, addr, size_letter, mem_model) \ +({ \ + register long rval; \ + NATIVE_ATOMIC_XCHG_RETURN(val, addr, rval, size_letter, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_cmpxchg_return(old, new, addr, size_letter, \ + sxt_size, mem_model) \ +({ \ + register long rval; \ + register long stored_val; \ + NATIVE_ATOMIC_CMPXCHG_RETURN(old, new, addr, stored_val, rval, \ + size_letter, sxt_size, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_cmpxchg_word_return(old, new, addr, mem_model) \ +({ \ + int rval, stored_val; \ + NATIVE_ATOMIC_CMPXCHG_WORD_RETURN(old, new, addr, \ + stored_val, rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_cmpxchg_dword_return(old, new, addr, mem_model) \ +({ \ + long long rval, stored_val; \ + NATIVE_ATOMIC_CMPXCHG_DWORD_RETURN(old, new, addr, stored_val, \ + rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +/* + * implementation of cmpxchg_double for 64-bit pairs + * and activates the logic required for the SLUB + * + * C equivalent: + * +static int +atomic_cmpxchg_double(struct page page, void *freelist_old, + unsigned long counters_old, + void *freelist_new, unsigned long counters_new) +{ + unsigned long flags; + + local_irq_save(flags); + slab_lock(page); + if (page->freelist == freelist_old && + page->counters == counters_old) { + page->freelist = freelist_new; + set_page_slub_counters(page, counters_new); + slab_unlock(page); + local_irq_restore(flags); + return true; + } + slab_unlock(page); + local_irq_restore(flags); + return false; +} + */ + +#define virt_api_cmpxchg_double(addr1, addr2, old1, old2, new1, new2, \ + mem_model) \ +({ \ + register long rval; \ + NATIVE_ATOMIC_CMPXCHG_DWORD_PAIRS(addr1, old1, old2, new1, new2,\ + rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define __api_cmpxchg_double(addr1, addr2, old1, old2, new1, new2) \ + virt_api_cmpxchg_double(addr1, addr2, old1, old2, \ + new1, new2, STRONG_MB) + +#define __api_futex_atomic32_op(insn, oparg, uaddr) \ + virt_api_atomic_fetch_op(oparg, uaddr, w, insn, STRONG_MB) + +#define __api_atomic32_add_if_not_negative \ + virt_api_atomic32_add_if_not_negative + +#define __api_atomic64_add_if_not_negative \ + virt_api_atomic64_add_if_not_negative + +/* Atomically add and return the old value */ +#define __api_atomic32_add_oldval(val, addr) \ + virt_api_atomic_fetch_op(val, addr, w, "adds", STRONG_MB) + +#define __api_atomic32_add_oldval_lock(val, addr) \ + virt_api_atomic_fetch_op(val, addr, w, "adds", LOCK_MB) + +/* Atomically add to 16 low bits and return the new 32 bits value */ +#define __api_atomic16_add_return32_lock(val, addr) \ + virt_api_atomic16_add_return32_lock(val, addr) + +/* Atomically add two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define __api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) \ + virt_api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) + +/* Atomically sub two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define __api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) \ + virt_api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) + +#define __api_atomic_ticket_trylock(spinlock, tail_shift) \ + virt_api_atomic_ticket_trylock(spinlock, tail_shift) +#define __api_atomic_can_lock_reader(__rw_addr, __success) \ + virt_api_atomic_can_lock_reader(__rw_addr, __success) +#define __api_atomic_can_lock_writer(__rw_addr, __success) \ + virt_api_atomic_can_lock_writer(__rw_addr, __success) +#define __api_atomic_add_new_reader(__rw_addr, __success) \ + virt_api_atomic_add_new_reader(__rw_addr, __success) +#define __api_atomic_try_add_new_reader(__rw_addr, __success) \ + virt_api_atomic_try_add_new_reader(__rw_addr, __success) +#define __api_atomic_add_slow_reader(__rw_addr, __ticket, __success) \ + virt_api_atomic_add_slow_reader(__rw_addr, __ticket, __success) +#define __api_atomic_free_lock_reader(__rw_addr) \ + virt_api_atomic_free_lock_reader(__rw_addr) +#define __api_atomic_add_new_writer(__rw_addr, __success) \ + virt_api_atomic_add_new_writer(__rw_addr, __success) +#define __api_atomic_try_add_new_writer(__rw_addr, __success) \ + virt_api_atomic_try_add_new_writer(__rw_addr, __success) +#define __api_atomic_add_slow_writer(__rw_addr, __ticket, __success) \ + virt_api_atomic_add_slow_writer(__rw_addr, __ticket, \ + __success) +#define __api_atomic_free_lock_writer(__rw_addr) \ + virt_api_atomic_free_lock_writer(__rw_addr) + +#define __api_atomic_op virt_api_atomic_op +#define __api_atomic_fetch_op virt_api_atomic_fetch_op + +/* + * Atomic operations with return value and acquire/release semantics + */ +#define __api_atomic32_fetch_add_unless(val, addr, unless) \ + virt_api_atomic32_fetch_add_unless(val, addr, unless) +#define __api_atomic64_fetch_add_unless(val, addr, unless) \ + virt_api_atomic64_fetch_add_unless(val, addr, unless) + +#define __api_atomic32_fetch_dec_if_positive virt_api_atomic32_fetch_dec_if_positive +#define __api_atomic64_fetch_dec_if_positive virt_api_atomic64_fetch_dec_if_positive +#define __api_atomic32_fetch_dec_unless_positive virt_api_atomic32_fetch_dec_unless_positive +#define __api_atomic64_fetch_dec_unless_positive virt_api_atomic64_fetch_dec_unless_positive +#define __api_atomic32_fetch_inc_unless_negative virt_api_atomic32_fetch_inc_unless_negative +#define __api_atomic64_fetch_inc_unless_negative virt_api_atomic64_fetch_inc_unless_negative + +#define __api_xchg_return virt_api_xchg_return + +#define __api_cmpxchg_return virt_api_cmpxchg_return + +#define __api_cmpxchg_word_return virt_api_cmpxchg_word_return + +#define __api_cmpxchg_dword_return virt_api_cmpxchg_dword_return + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_E2K_ATOMIC_API_H_ */ diff --git a/arch/e2k/include/asm/auxvec.h b/arch/e2k/include/asm/auxvec.h new file mode 100644 index 000000000000..21189a6399fd --- /dev/null +++ b/arch/e2k/include/asm/auxvec.h @@ -0,0 +1,12 @@ +#ifndef _E2K_AUXVEC_H +#define _E2K_AUXVEC_H + +#define AT_FAST_SYSCALLS 32 +/* Skip 33 as it is assumed to be AT_SYSINFO_EHDR in Linux */ +#define AT_SYSTEM_INFO 34 + +#ifdef __KERNEL__ +# define AT_VECTOR_SIZE_ARCH 2 +#endif + +#endif /* _E2K_AUXVEC_H */ diff --git a/arch/e2k/include/asm/barrier.h b/arch/e2k/include/asm/barrier.h new file mode 100644 index 000000000000..38025ba91e63 --- /dev/null +++ b/arch/e2k/include/asm/barrier.h @@ -0,0 +1,165 @@ +#ifndef _ASM_E2K_BARRIER_H +#define _ASM_E2K_BARRIER_H + +#include + +#include +#include + +#if CONFIG_CPU_ISET >= 6 +/* Cannot use this on V5 because of load-after-store dependencies - + * compiled kernel won't honour them */ +# define mb() E2K_WAIT(_st_c | _ld_c | _sas | _sal | _las | _lal) +#else +# define mb() E2K_WAIT(_st_c | _ld_c) +#endif +#define wmb() E2K_WAIT(_st_c | _sas) +#define rmb() E2K_WAIT(_ld_c | _lal) + +/* + * For smp_* variants add _mt modifier + */ +#if CONFIG_CPU_ISET >= 6 +/* Cannot use this on V5 because of load-after-store dependencies - + * compiled kernel won't honour them */ +# define __smp_mb() E2K_WAIT(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt) +#else +# define __smp_mb() E2K_WAIT(_st_c | _ld_c) +#endif +#define __smp_wmb() E2K_WAIT(_st_c | _sas | _mt) +#define __smp_rmb() E2K_WAIT(_ld_c | _lal | _mt) + +#define dma_rmb() __smp_rmb() +#define dma_wmb() __smp_wmb() + +#define __smp_read_barrier_depends() NATIVE_HWBUG_AFTER_LD_ACQ() + + +#if CONFIG_CPU_ISET >= 5 +# define __smp_mb__after_atomic() barrier() +# define __smp_mb__before_atomic() E2K_WAIT(_st_c | _las | _ld_c | _lal | _mt) +#elif CONFIG_CPU_ISET >= 3 +/* Atomic operations are serializing since e2s */ +# define __smp_mb__after_atomic() \ +do { \ + barrier(); \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +} while (0) +# define __smp_mb__before_atomic() barrier() +#else +# define __smp_mb__after_atomic() E2K_WAIT(_st_c) +# define __smp_mb__before_atomic() barrier() +#endif + +extern int __smp_store_release_bad(void) __attribute__((noreturn)); +#if CONFIG_CPU_ISET >= 6 +# define __smp_store_release(p, v) \ +do { \ + __typeof__(*(p)) __ssr_v = (v); \ + switch (sizeof(*p)) { \ + case 1: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), b, "memory"); break; \ + case 2: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), h, "memory"); break; \ + case 4: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), w, "memory"); break; \ + case 8: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), d, "memory"); break; \ + default: __smp_store_release_bad(); break; \ + } \ +} while (0) +#else +# define __smp_store_release(p, v) \ +do { \ + compiletime_assert(sizeof(*p) == 1 || sizeof(*p) == 2 || \ + sizeof(*p) == 4 || sizeof(*p) == 8, \ + "Need native word sized stores/loads for atomicity."); \ + E2K_WAIT(_st_c | _sas | _ld_c | _sal | _mt); \ + WRITE_ONCE(*(p), (v)); \ +} while (0) +#endif /* CONFIG_CPU_ISET >= 6 */ + +/* + * store_release() - same as __smp_store_release but acts on device accesses too + */ +#define store_release_v2 __smp_store_release +#define store_release_v6(p, v) \ +do { \ + __typeof__(*(p)) __sr6_v = (v); \ + switch (sizeof(*p)) { \ + case 1: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), b, "memory"); break; \ + case 2: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), h, "memory"); break; \ + case 4: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), w, "memory"); break; \ + case 8: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), d, "memory"); break; \ + default: __smp_store_release_bad(); break; \ + } \ +} while (0) +#define store_release(p, v) \ +do { \ + if (cpu_has(CPU_FEAT_ISET_V6)) \ + store_release_v6((p), (v)); \ + else \ + store_release_v2((p), (v)); \ +} while (0) + +#if CONFIG_CPU_ISET >= 6 +extern int __smp_load_acquire_bad(void) __attribute__((noreturn)); +# define __smp_load_acquire(p) \ +({ \ + union { typeof(*(p)) __ret_la; char __c[1]; } __u; \ + switch (sizeof(*p)) { \ + case 1: LOAD_NV_MAS((p), (*(__u8 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), b, "memory"); \ + break; \ + case 2: LOAD_NV_MAS((p), (*(__u16 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), h, "memory"); \ + break; \ + case 4: LOAD_NV_MAS((p), (*(__u32 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), w, "memory"); \ + break; \ + case 8: LOAD_NV_MAS((p), (*(__u64 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), d, "memory"); \ + break; \ + default: __smp_load_acquire_bad(); break; \ + } \ + __u.__ret_la; \ +}) +#else +# define __smp_load_acquire(p) \ +({ \ + typeof(*(p)) ___p1 = READ_ONCE(*(p)); \ + compiletime_assert(sizeof(*p) == 1 || sizeof(*p) == 2 || \ + sizeof(*p) == 4 || sizeof(*p) == 8, \ + "Need native word sized stores/loads for atomicity."); \ + E2K_RF_WAIT_LOAD(___p1); \ + ___p1; \ +}) +#endif + +/* + * e2k is in-order architecture, thus loads are not speculated by hardware + * and we only have to protect against compiler optimizations + */ +#define smp_acquire__after_ctrl_dep() barrier() + +/** + * array_index_mask_nospec - hide 'index' from compiler so that + * it does not try to load array speculatively across this point + * + * On e2k there is no hardware speculation, only software, so the + * trick with mask is not needed. + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + OPTIMIZER_HIDE_VAR(index); + + return -1UL; +} + +/* + * Follow the example of RISC-V and forbid IO crossing of scheduling + * boundary by using mb() instead of smp_mb(). This should not have + * any measurable performance impact on e2k. The bad case is when + * task is preempted after writeX() and migrated to another CPU fast + * enough so that the CPU it was preempted on has not called any + * spin_unlock()'s yet. + */ +#define smp_mb__after_spinlock() mb() + +#include + +#endif /* _ASM_E2K_BARRIER_H */ diff --git a/arch/e2k/include/asm/bios_map.h b/arch/e2k/include/asm/bios_map.h new file mode 100644 index 000000000000..d5ffde9b97b4 --- /dev/null +++ b/arch/e2k/include/asm/bios_map.h @@ -0,0 +1,116 @@ +/* + * $Id: bios_map.h,v 1.1 2009/01/15 13:47:21 kostin Exp $ + * Bios cmos map distribution. + */ +#ifndef _E2K_BIOS_MAP_H_ +#define _E2K_BIOS_MAP_H_ + +#ifdef __KERNEL__ +#define ECMOS_PORT(ext) (0x70 + (ext)) +/* + * The yet supported machines all access the RTC index register via + * an ISA port access but the way to access the date register differs ... + */ +#define ECMOS_READ(addr, ext) ({ \ +outb_p((addr),ECMOS_PORT(ext + 0)); \ +inb_p(ECMOS_PORT(ext + 1)); \ +}) +#define ECMOS_WRITE(val, addr, ext) ({ \ +outb_p((addr),ECMOS_PORT(ext + 0)); \ +outb_p((val),ECMOS_PORT(ext + 1)); \ +}) + +static inline unsigned char bios_read(int addr) +{ + char byte; + if (addr & 0x80) byte = ECMOS_READ(addr - 0x80, 2); + else byte = ECMOS_READ(addr, 0); + return byte; +} + +static inline void bios_write(unsigned char val, int addr) +{ + if (addr & 0x80) ECMOS_WRITE(val, addr - 0x80, 2); + else ECMOS_WRITE(val, addr, 0); +} +#endif /* __KERNEL__ */ + +//#define bios_read(addr) ECMOS_READ(addr) +//#define bios_write(val, addr) ECMOS_WRITE(val, addr) + +#define BIOS_UNSET_ONE -1 + +#define name_length 15 +#define cmdline_length 127 + +#define CMOS_BASE 128 + 64 +#define CMOS_SIZE 64 +#define CMOS_FILE_LENGTH 15 + +#define BIOS_PROC_MASK CMOS_BASE + 0 +#define BIOS_DEV_NUM CMOS_BASE + 3 /* device number(0 - 3) */ +#define BIOS_AUTOBOOT_TIMER CMOS_BASE + 4 /* boot waiting seconds */ +#define BIOS_BOOT_ITEM CMOS_BASE + 5 /* boot item: kernel, lintel, + tests - '','','' */ +#define BIOS_BOOT_KNAME CMOS_BASE + 6 /* kernel name */ + +#define BIOS_TEST_FLAG 0x6c +#define BIOS_TEST_FLAG2 0x6d +#define BIOS_SERIAL_RATE 0x6e /* 3 - 38400 other - 115200 */ + +#define BIOS_MACHINE_TYPE CMOS_BASE + 28 /* architecture type */ + +#define BIOS_PASSWD_FLAG CMOS_BASE + 29 +#define BIOS_PASSWD_FLAG2 CMOS_BASE + 30 +#define BIOS_PASSWD1 CMOS_BASE + 31 +#define BIOS_PASSWD2 CMOS_BASE + 32 +#define BIOS_PASSWD3 CMOS_BASE + 33 +#define BIOS_PASSWD4 CMOS_BASE + 34 +#define BIOS_PASSWD5 CMOS_BASE + 35 +#define BIOS_PASSWD6 CMOS_BASE + 36 +#define BIOS_PASSWD7 CMOS_BASE + 37 +#define BIOS_PASSWD8 CMOS_BASE + 38 +#define BIOS_PASSWD9 CMOS_BASE + 39 +#define BIOS_PASSWD10 CMOS_BASE + 40 + +#define BIOS_CSUM CMOS_BASE + 61 /* checksum lsb */ +#define BIOS_CSUM2 CMOS_BASE + 62 /* checksum msb */ + +typedef struct e2k_bios_param { + char kernel_name[name_length + 1]; + char command_line[cmdline_length + 1]; + int booting_item; + int dev_num; + int serial_rate; + int autoboot_timer; + int machine_type; +} e2k_bios_param_t; + +#ifdef __KERNEL__ +static unsigned int _bios_csum(unsigned int counter, unsigned int len) +{ + unsigned int csum = 0; + + len = len + counter; + + + while(counter < len) { + csum += bios_read(counter); + counter++; + } + + return csum; +} + +static inline unsigned int _bios_checksum(void) +{ + unsigned int csum = 0; + + csum = _bios_csum( 106, 6); + csum += _bios_csum( 192, 21); + csum += _bios_csum( 220, 12 ); + + return csum; +} +#endif /* __KERNEL__ */ +#endif /*_E2K_BIOS_MAP_H_ */ diff --git a/arch/e2k/include/asm/bitops.h b/arch/e2k/include/asm/bitops.h new file mode 100644 index 000000000000..325b87dcb8cd --- /dev/null +++ b/arch/e2k/include/asm/bitops.h @@ -0,0 +1,63 @@ +#ifndef _E2K_BITOPS_H_ +#define _E2K_BITOPS_H_ + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include +#include + +/* This is better than generic definition */ +static inline int fls(unsigned int x) +{ + return 8 * sizeof(int) - E2K_LZCNTS(x); +} + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return E2K_POPCNTS(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return E2K_POPCNTS(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return E2K_POPCNTS(w & 0xff); +} + +static inline unsigned long __arch_hweight64(unsigned long w) +{ + return E2K_POPCNTD(w); +} + +#include +#include +#include +#include +#include + +#include + +#if defined E2K_P2V && !defined CONFIG_BOOT_E2K +extern unsigned long boot_find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); +extern unsigned long boot_find_next_zero_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); +# define find_next_bit boot_find_next_bit +# define find_next_zero_bit boot_find_next_zero_bit +#endif +#include + +#include +#include +#include +#include +#include +#include + +#endif /* _E2K_BITOPS_H_ */ diff --git a/arch/e2k/include/asm/bitrev.h b/arch/e2k/include/asm/bitrev.h new file mode 100644 index 000000000000..4db2bc7b3b5e --- /dev/null +++ b/arch/e2k/include/asm/bitrev.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BITREV_H +#define __ASM_BITREV_H + +static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x) +{ + return __builtin_e2k_bitrevs(x); +} + +static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x) +{ + return __builtin_e2k_bitrevs((u32) x) >> 16; +} + +static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x) +{ + return __builtin_e2k_bitrevs((u32) x) >> 24; +} + +#endif diff --git a/arch/e2k/include/asm/bitsperlong.h b/arch/e2k/include/asm/bitsperlong.h new file mode 100644 index 000000000000..0697e907d859 --- /dev/null +++ b/arch/e2k/include/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_E2K_BITSPERLONG_H +#define __ASM_E2K_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_E2K_BITSPERLONG_H */ diff --git a/arch/e2k/include/asm/boot_flags.h b/arch/e2k/include/asm/boot_flags.h new file mode 100644 index 000000000000..2bb35c0d3117 --- /dev/null +++ b/arch/e2k/include/asm/boot_flags.h @@ -0,0 +1,123 @@ +/* + * E2K boot info flags support. + */ +#ifndef _E2K_BOOT_FLAGS_H +#define _E2K_BOOT_FLAGS_H + +#include + +#include +#include +#include +#include + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on physical address + * bootblock virtual address can be only read + */ + +#define DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, mas) \ +({ \ + u64 field_value; \ + switch (sizeof((bootblock_p)->blk_field)) { \ + case 1: \ + field_value = \ + NATIVE_READ_MAS_B(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + case 2: \ + field_value = \ + NATIVE_READ_MAS_H(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + case 4: \ + field_value = \ + NATIVE_READ_MAS_W(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + case 8: \ + field_value = \ + NATIVE_READ_MAS_D(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + default: \ + BUG(); \ + } \ + (field_value); \ +}) + +#define DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value, mas) \ +({ \ + switch (sizeof((bootblock_p)->blk_field)) { \ + case 1: \ + NATIVE_WRITE_MAS_B(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + case 2: \ + NATIVE_WRITE_MAS_H(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + case 4: \ + NATIVE_WRITE_MAS_W(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + case 8: \ + NATIVE_WRITE_MAS_D(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + default: \ + BUG(); \ + } \ +}) +#define NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, MAS_IOADDR) +#define NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value, MAS_IOADDR) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without virtualization support */ +/* or host kernel with virtualization support */ +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +static inline u64 +read_bootblock_flags(bootblock_struct_t *bootblock) +{ + return READ_BOOTBLOCK_FIELD(bootblock, kernel_flags); +} + +static inline void +write_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, boot_flags, new_flags); + WRITE_BOOTBLOCK_FIELD(bootblock, kernel_flags, new_flags); +} + +static inline void +set_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags) +{ + u64 cur_flags = read_bootblock_flags(bootblock); + write_bootblock_flags(bootblock, cur_flags | new_flags); +} + +static inline void +reset_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags) +{ + u64 cur_flags = read_bootblock_flags(bootblock); + write_bootblock_flags(bootblock, cur_flags & ~new_flags); +} + +#endif /* _E2K_BOOT_FLAGS_H */ diff --git a/arch/e2k/include/asm/boot_profiling.h b/arch/e2k/include/asm/boot_profiling.h new file mode 100644 index 000000000000..64353a99d7ce --- /dev/null +++ b/arch/e2k/include/asm/boot_profiling.h @@ -0,0 +1,34 @@ +#ifndef _ASM_E2K_BOOT_PROFILING_H +#define _ASM_E2K_BOOT_PROFILING_H + +#include +#include + +#ifdef CONFIG_BOOT_TRACE +extern void notrace boot_add_boot_trace_event(char *name); + +/* EARLY_BOOT_TRACEPOINT should be used if virtual memory + * is not working yet. It does not support formatted strings. */ +# define EARLY_BOOT_TRACEPOINT(name) \ + boot_add_boot_trace_event(name) + +#ifdef CONFIG_RECOVERY +/* Clears boot trace data (needed to trace recovery times). */ +void reinitialize_boot_trace_data(void); +#endif /* CONFIG_RECOVERY */ + +#define boot_trace_get_cycles get_cycles + +/* Convert boot counter cycles to ms */ +static inline u64 boot_cycles_to_ms(u64 cycles) +{ + u64 cpu_hz = cpu_data[0].proc_freq; + + return MSEC_PER_SEC * cycles / cpu_hz; +} +#else /* !CONFIG_BOOT_TRACE */ +# define EARLY_BOOT_TRACEPOINT(name) +#endif /* CONFIG_BOOT_TRACE */ + +#endif /* _ASM_E2K_BOOT_PROFILING_H */ + diff --git a/arch/e2k/include/asm/boot_recovery.h b/arch/e2k/include/asm/boot_recovery.h new file mode 100644 index 000000000000..4a669ea8fbad --- /dev/null +++ b/arch/e2k/include/asm/boot_recovery.h @@ -0,0 +1,42 @@ +/* $Id: boot_recovery.h,v 1.12 2009/06/29 11:52:31 atic Exp $ + * + * boot-time recovery of kernel from control point. + */ + +#ifndef _E2K_BOOT_RECOVERY_H +#define _E2K_BOOT_RECOVERY_H + +#include +#include +#include + +/* To use stgd upon kernel entry task_struct must be aligned + * (since %gd_lo.base points to it) */ +struct aligned_task { + struct task_struct t; +} __aligned(E2K_ALIGN_GLOBALS_SZ); +extern struct aligned_task task_to_restart[]; +extern struct task_struct *task_to_recover; + +/* + * Forwards of boot-time functions to recover system state + */ + +extern void boot_recovery(bootblock_struct_t *bootblock); +extern void recover_kernel(void); +extern int restart_system(void (*restart_func)(void *), void *arg); + +#define full_phys_mem nodes_phys_mem + +#define START_KERNEL_SYSCALL 12 + +extern inline void +scall2(bootblock_struct_t *bootblock) +{ + (void) E2K_SYSCALL(START_KERNEL_SYSCALL, /* Trap number */ + 0, /* empty sysnum */ + 1, /* single argument */ + (long) bootblock); /* the argument */ +} + +#endif /* _E2K_BOOT_RECOVERY_H */ diff --git a/arch/e2k/include/asm/bootinfo.h b/arch/e2k/include/asm/bootinfo.h new file mode 100644 index 000000000000..1c3a637d4685 --- /dev/null +++ b/arch/e2k/include/asm/bootinfo.h @@ -0,0 +1,10 @@ +#ifndef _E2K_BOOTINFO_H_ +#define _E2K_BOOTINFO_H_ + +#ifdef __KERNEL__ +#include +#endif + +#include + +#endif /* _E2K_BOOTINFO_H_ */ diff --git a/arch/e2k/include/asm/bug.h b/arch/e2k/include/asm/bug.h new file mode 100644 index 000000000000..178748bd79b1 --- /dev/null +++ b/arch/e2k/include/asm/bug.h @@ -0,0 +1,20 @@ +#ifndef _E2K_BUG_H +#define _E2K_BUG_H + +#ifdef CONFIG_BUG +# include + +# define BUG() \ +do { \ + __EMIT_BUG(0); \ + unreachable(); \ +} while (0) + +# define __WARN_FLAGS(flags) __EMIT_BUG(BUGFLAG_WARNING|(flags)); + +# define HAVE_ARCH_BUG +#endif /* CONFIG_BUG */ + +#include + +#endif /* _E2K_BUG_H */ diff --git a/arch/e2k/include/asm/byteorder.h b/arch/e2k/include/asm/byteorder.h new file mode 100644 index 000000000000..1bd1c69ea1b9 --- /dev/null +++ b/arch/e2k/include/asm/byteorder.h @@ -0,0 +1,10 @@ +#ifndef _E2K_BYTEORDER_H_ +#define _E2K_BYTEORDER_H_ + +#include + +#define __BYTEORDER_HAS_U64__ + +#include + +#endif /* _E2K_BYTEORDER_H_ */ diff --git a/arch/e2k/include/asm/cache.h b/arch/e2k/include/asm/cache.h new file mode 100644 index 000000000000..c2dcc8e4b765 --- /dev/null +++ b/arch/e2k/include/asm/cache.h @@ -0,0 +1,81 @@ +#ifndef _E2K_CACHE_H_ +#define _E2K_CACHE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define _max_(a, b) ((a) > (b) ? (a) : (b)) +#define _max3_(a, b, c) _max_((a), _max_((b), (c))) + +#ifdef CONFIG_E2K_MACHINE +# if defined(CONFIG_E2K_ES2_DSP) || defined(CONFIG_E2K_ES2_RU) +# define L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +# elif defined(CONFIG_E2K_E2S) +# define L1_CACHE_SHIFT E2S_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E2S_L2_CACHE_SHIFT +# elif defined(CONFIG_E2K_E8C) +# define L1_CACHE_SHIFT E8C_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E8C_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E1CP) +# define L1_CACHE_SHIFT E1CP_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E1CP_L2_CACHE_SHIFT +# elif defined(CONFIG_E2K_E8C2) +# define L1_CACHE_SHIFT E8C2_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E8C2_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E8C2_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E12C) +# define L1_CACHE_SHIFT E12C_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E12C_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E12C_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E16C) +# define L1_CACHE_SHIFT E16C_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E16C_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E16C_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E2C3) +# define L1_CACHE_SHIFT E2C3_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E2C3_L2_CACHE_SHIFT +# else +# error "E2K MACHINE type does not defined" +# endif +# ifndef L3_CACHE_SHIFT +# define L3_CACHE_SHIFT 0 +# endif +#else /* ! CONFIG_E2K_MACHINE */ +/* + * FIXME: Take it in mind while adding new cpu type + */ +# define L1_CACHE_SHIFT_MAX ES2_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT_MAX ES2_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT_MAX E8C_L3_CACHE_SHIFT + +# define L1_CACHE_SHIFT L1_CACHE_SHIFT_MAX +# define L2_CACHE_SHIFT L2_CACHE_SHIFT_MAX +# define L3_CACHE_SHIFT L3_CACHE_SHIFT_MAX +#endif /* CONFIG_E2K_MACHINE */ + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) +#define L3_CACHE_BYTES (L3_CACHE_SHIFT ? (1 << L3_CACHE_SHIFT) : 0) + +/* Stores pass through L1$, so we should use the biggest size. */ +#define SMP_CACHE_BYTES _max3_(L1_CACHE_BYTES, L2_CACHE_BYTES, \ + L3_CACHE_BYTES) +#define INTERNODE_CACHE_SHIFT _max3_(L1_CACHE_SHIFT, L2_CACHE_SHIFT, \ + L3_CACHE_SHIFT) + +#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) + +#define cache_line_size() _max3_(L1_CACHE_BYTES, L2_CACHE_BYTES, \ + L3_CACHE_BYTES) + +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +#endif /* _E2K_CACHE_H_ */ diff --git a/arch/e2k/include/asm/cacheflush.h b/arch/e2k/include/asm/cacheflush.h new file mode 100644 index 000000000000..2b272d6686f8 --- /dev/null +++ b/arch/e2k/include/asm/cacheflush.h @@ -0,0 +1,243 @@ +/* + * pgalloc.h: the functions and defines necessary to allocate + * page tables. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_CACHEFLUSH_H +#define _E2K_CACHEFLUSH_H + +#include +#include +#include + +#include +#include + +#undef DEBUG_MR_MODE +#undef DebugMR +#define DEBUG_MR_MODE 0 /* MMU registers access */ +#define DebugMR(...) DebugPrint(DEBUG_MR_MODE, ##__VA_ARGS__) + +/* + * Caches flushing routines. This is the kind of stuff that can be very + * expensive, so should try to avoid them whenever possible. + */ + +/* + * Caches aren't brain-dead on the E2K + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(mm, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_page_to_ram(page) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +/* + * Invalidate all ICAHES of the host processor + */ + +typedef struct icache_range_array { + icache_range_t *ranges; + int count; + struct mm_struct *mm; +} icache_range_array_t; + +extern void native_flush_icache_all(void); +extern void native_flush_icache_range(e2k_addr_t start, e2k_addr_t end); +extern void native_flush_icache_range_array( + icache_range_array_t *icache_range_arr); +extern void native_flush_icache_page(struct vm_area_struct *vma, + struct page *page); + +#ifndef CONFIG_SMP +#define flush_icache_all() __flush_icache_all() +#define flush_icache_range(start, end) __flush_icache_range(start, end) +#define flush_icache_range_array __flush_icache_range_array +#define flush_icache_page(vma, page) __flush_icache_page(vma, page) + +#define smp_flush_icache_all() +#define native_smp_flush_icache_range(start, end) +#define native_smp_flush_icache_range_array(icache_range_arr) +#define native_smp_flush_icache_page(vma, page) +#define native_smp_flush_icache_kernel_line(addr) +#else /* CONFIG_SMP */ +extern void smp_flush_icache_all(void); +extern void native_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_icache_range_array( + icache_range_array_t *icache_range_arr); +extern void native_smp_flush_icache_page(struct vm_area_struct *vma, + struct page *page); +extern void native_smp_flush_icache_kernel_line(e2k_addr_t addr); + +#define flush_icache_all() smp_flush_icache_all() + +#define flush_icache_range(start, end) \ +({ \ + if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \ + __flush_icache_range(start, end); \ + else \ + smp_flush_icache_range(start, end); \ +}) + +#define flush_icache_range_array smp_flush_icache_range_array + +#define flush_icache_page(vma, page) \ +({ \ + if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \ + __flush_icache_page(vma, page); \ + else \ + smp_flush_icache_page(vma, page); \ +}) + +#endif /* ! (CONFIG_SMP) */ + +/* + * Some usefull routines to flush caches + */ + +/* + * Write Back and Invalidate all caches (instruction and data). + * "local_" versions work on the calling CPU only. + */ +extern void local_write_back_cache_all(void); +extern void local_write_back_cache_range(unsigned long start, size_t size); +extern void write_back_cache_all(void); +extern void write_back_cache_range(unsigned long start, size_t size); + +/* + * Flush multiple DCACHE lines + */ +static inline void +native_flush_DCACHE_range(void *addr, size_t len) +{ + char *cp, *end; + unsigned long stride; + + DebugMR("Flush DCACHE range: virtual addr 0x%lx, len %lx\n", addr, len); + + /* Although L1 cache line is 32 bytes, coherency works + * with 64 bytes granularity. So a single flush_dc_line + * can flush _two_ lines from L1 */ + stride = SMP_CACHE_BYTES; + + end = PTR_ALIGN(addr + len, SMP_CACHE_BYTES); + + E2K_WAIT_ST; + for (cp = addr; cp < end; cp += stride) + flush_DCACHE_line((unsigned long) cp); + E2K_WAIT_FLUSH; +} + +/* + * Clear multiple DCACHE L1 lines + */ +static inline void +native_clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + unsigned long cp; + unsigned long end = (unsigned long) virt_addr + len; + unsigned long stride; + + stride = cacheinfo_get_l1d_line_size(); + + for (cp = (u64) virt_addr; cp < end; cp += stride) + clear_DCACHE_L1_line(cp); +} + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ +static inline void +smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + native_smp_flush_icache_range(start, end); +} +static inline void +smp_flush_icache_range_array(icache_range_array_t *icache_range_arr) +{ + native_smp_flush_icache_range_array(icache_range_arr); +} +static inline void +smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + native_smp_flush_icache_page(vma, page); +} +static inline void +smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + native_smp_flush_icache_kernel_line(addr); +} + +static inline void +__flush_icache_all(void) +{ + native_flush_icache_all(); +} +static inline void +__flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + native_flush_icache_range(start, end); +} +static inline void +__flush_icache_range_array(icache_range_array_t *icache_range_arr) +{ + native_flush_icache_range_array(icache_range_arr); +} +static inline void +__flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + native_flush_icache_page(vma, page); +} + +static inline void +flush_DCACHE_range(void *addr, size_t len) +{ + native_flush_DCACHE_range(addr, len); +} +static inline void +clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + native_clear_DCACHE_L1_range(virt_addr, len); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, + const void *src, unsigned long len) +{ + if (IS_ALIGNED((unsigned long) dst, 8) && + IS_ALIGNED((unsigned long) src, 8) && IS_ALIGNED(len, 8)) { + tagged_memcpy_8(dst, src, len); + } else { + memcpy(dst, src, len); + } + flush_icache_range((unsigned long) dst, (unsigned long) dst + len); +} + +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, + const void *src, size_t len) +{ + if (IS_ALIGNED((unsigned long) dst, 8) && + IS_ALIGNED((unsigned long) src, 8) && IS_ALIGNED(len, 8)) { + tagged_memcpy_8(dst, src, len); + } else { + memcpy(dst, src, len); + } +} + +#endif /* _E2K_CACHEFLUSH_H */ diff --git a/arch/e2k/include/asm/checksum.h b/arch/e2k/include/asm/checksum.h new file mode 100644 index 000000000000..2a9db9568b95 --- /dev/null +++ b/arch/e2k/include/asm/checksum.h @@ -0,0 +1,141 @@ +#ifndef _E2K_CHECKSUM_H_ +#define _E2K_CHECKSUM_H_ + +#include +#include + +extern unsigned int __pure e2k_do_csum(const unsigned char *buff, int len); + +/* + * Fold a partial checksum + */ +#define csum_fold csum_fold +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + return (__force __sum16) ((~sum - __builtin_e2k_scls(sum, 16)) >> 16); +} + +static inline u32 from64to32(u64 x) +{ + x += __builtin_e2k_scld(x, 32); + return (u32) (x >> 32); +} + +/* + * ihl is always 5 or greater, almost always is 5, + * and iph is word aligned the majority of the time. + */ +static inline __wsum ip_fast_csum_nofold_maybe_unaligned(const void *iph, unsigned int ihl) +{ + const u32 *iph32 = iph; + size_t i; + u64 sum; + + sum = (u64) iph32[0] + (u64) iph32[1] + (u64) iph32[2] + + (u64) iph32[3] + (u64) iph32[4]; + + if (unlikely(ihl > 5)) { + for (i = 5; i < ihl; i++) + sum += (u64) iph32[i]; + } + + return (__force __wsum) from64to32(sum); +} +#define ip_fast_csum ip_fast_csum +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + if (cpu_has(CPU_HWBUG_UNALIGNED_LOADS) && + !IS_ALIGNED((unsigned long) iph, 4)) + return (__force __sum16) ~e2k_do_csum(iph, ihl*4); + else + return csum_fold(ip_fast_csum_nofold_maybe_unaligned(iph, ihl)); + +} + +static inline u32 add32_with_carry(u32 a, u32 b) +{ + u64 arg1 = ((u64) a << 32ULL) | (u64) b; + u64 arg2 = ((u64) b << 32ULL) | (u64) a; + + return (arg1 + arg2) >> 32ULL; +} + +#define HAVE_ARCH_CSUM_ADD +static inline __wsum csum_add(__wsum csum, __wsum addend) +{ + return (__force __wsum) add32_with_carry((__force u32) csum, + (__force u32) addend); +} + + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum __csum_partial(const void *buff, int len, __wsum sum); + +static inline __wsum csum_partial(const void *buff, int len, __wsum sum) +{ + if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0 && + !cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) { + u64 sum_64 = (__force u32) sum; + + if (len == 2) + sum_64 += *(const u16 *) buff; + if (len >= 4) + sum_64 += *(const u32 *) buff; + if (len == 6) + sum_64 += *(const u16 *) (buff + 4); + if (len >= 8) + sum_64 += *(const u32 *) (buff + 4); + if (len == 10) + sum_64 += *(const u16 *) (buff + 8); + if (len >= 12) + sum_64 += *(const u32 *) (buff + 8); + if (len == 14) + sum_64 += *(const u16 *) (buff + 12); + if (len >= 16) + sum_64 += *(const u32 *) (buff + 12); + + sum = from64to32(sum_64); + } else if (__builtin_constant_p(len) && (len & 3) == 0 && + !cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) { + sum = csum_add(sum, ip_fast_csum_nofold_maybe_unaligned(buff, len >> 2)); + } else { + prefetch((__force void *) buff); + sum = __csum_partial(buff, len, sum); + } + return sum; +} + +#define csum_tcpudp_nofold csum_tcpudp_nofold +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + u64 s = (__force u32) sum; + + s += (__force u32) saddr; + s += (__force u32) daddr; + s += (proto + len) << 8; + return (__force __wsum) from64to32(s); +} + +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum); + + +#include + +#endif /* _E2K_CHECKSUM_H_ */ diff --git a/arch/e2k/include/asm/clkr.h b/arch/e2k/include/asm/clkr.h new file mode 100644 index 000000000000..a1d6bff0443d --- /dev/null +++ b/arch/e2k/include/asm/clkr.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_CLKR_H +#define _ASM_E2K_CLKR_H + +#include +#include + +extern __interrupt u64 fast_syscall_read_clkr(void); + +extern u64 last_clkr; +DECLARE_PER_CPU(u64, clkr_offset); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel with or without virtualization support */ +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_CLKR_H */ diff --git a/arch/e2k/include/asm/clock_info.h b/arch/e2k/include/asm/clock_info.h new file mode 100644 index 000000000000..1d919050292f --- /dev/null +++ b/arch/e2k/include/asm/clock_info.h @@ -0,0 +1,109 @@ +/* + * Kernel performance measuring tool and support + */ +#ifndef _E2K_CLOCK_INFO_H +#define _E2K_CLOCK_INFO_H + +#include + +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +typedef u64 e2k_clock_t; + +typedef enum { + SYSTEM_CALL_TT = 1, /* system calls */ + TRAP_TT /* traps */ +} times_type_t; + +typedef struct { + int syscall_num; /* # of system call */ + int signals_num; /* number of handled signals */ + e2k_clock_t start; /* start clock of system call */ + e2k_clock_t end; /* end clock */ + e2k_clock_t pt_regs_set; /* pt_regs structure is set */ + e2k_clock_t save_stack_regs; + e2k_clock_t save_sys_regs; + e2k_clock_t save_stacks_state; + e2k_clock_t save_thread_state; + e2k_clock_t scall_switch; + e2k_clock_t scall_done; + e2k_clock_t restore_thread_state; + e2k_clock_t check_pt_regs; + e2k_clock_t do_signal_start; + e2k_clock_t do_signal_done; + e2k_clock_t restore_start; + e2k_clock_t restore_user_regs; + e2k_pshtp_t pshtp; + u64 psp_ind; + e2k_pshtp_t pshtp_to_done; + u64 psp_ind_to_done; +} scall_times_t; + +typedef struct { + e2k_clock_t start; /* start clock of system call */ + e2k_clock_t end; /* end clock */ + e2k_clock_t pt_regs_set; /* pt_regs structure is set */ + e2k_clock_t signal_done; + int nr_TIRs; + e2k_tir_t TIRs[TIR_NUM]; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + u64 psp_ind; + e2k_pcsp_hi_t pcsp_hi; + u64 ctpr1; + u64 ctpr2; + u64 ctpr3; + u8 ps_bounds; + u8 pcs_bounds; + int trap_num; + e2k_psp_hi_t psp_hi_to_done; + e2k_pshtp_t pshtp_to_done; + e2k_pcsp_hi_t pcsp_hi_to_done; + u64 ctpr1_to_done; + u64 ctpr2_to_done; + u64 ctpr3_to_done; +} trap_times_t; + +typedef struct kernel_times { + times_type_t type; + union { + scall_times_t syscall; /* system calls */ + trap_times_t trap; /* traps */ + } of; +} kernel_times_t; + +#ifdef CONFIG_MAX_KERNEL_TIMES_NUM +#define MAX_KERNEL_TIMES_NUM CONFIG_MAX_KERNEL_TIMES_NUM +#else +#define MAX_KERNEL_TIMES_NUM 20 +#endif /* CONFIG_MAX_KERNEL_TIMES_NUM */ + +#define INCR_KERNEL_TIMES_COUNT(ti) { \ + (ti)->times_index ++; \ + (ti)->times_num ++; \ + if ((ti)->times_index >= MAX_KERNEL_TIMES_NUM) \ + (ti)->times_index = 0; \ + } +#define GET_DECR_KERNEL_TIMES_COUNT(ti, count) { \ + (count) = (ti)->times_index; \ + if ((ti)->times_num == 0) \ + (ti)->times_num = 1; \ + (count) --; \ + if ((count) < 0) \ + (count) = MAX_KERNEL_TIMES_NUM - 1; \ + } +#define E2K_SAVE_CLOCK_REG(clock) { \ + (clock) = E2K_GET_DSREG(clkr); \ + } +#define CALCULATE_CLOCK_TIME(start_clock, end_clock) \ + ((end_clock) - (start_clock)) + +extern void sys_e2k_print_kernel_times(struct task_struct *task, + kernel_times_t *times, long times_num, int times_index); + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_THREAD_INFO_H */ diff --git a/arch/e2k/include/asm/cmos.h b/arch/e2k/include/asm/cmos.h new file mode 100644 index 000000000000..a6b204a869b2 --- /dev/null +++ b/arch/e2k/include/asm/cmos.h @@ -0,0 +1,37 @@ +#ifndef _ASM_CMOS_H +#define _ASM_CMOS_H + +#include +#include + +#ifndef RTC_PORT +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ +#endif + +static inline char mc146818_cmos_read(char addr) +{ + if (HAS_MACHINE_E2K_IOHUB) { + WARN_ONCE(1, "Warning: CMOS_READ attempted on a machine without a functioning CMOS\n"); + return 0; + } + + outb_p((addr),RTC_PORT(0)); + return inb_p(RTC_PORT(1)); +} + +static inline void mc146818_cmos_write(char val, char addr) +{ + if (HAS_MACHINE_E2K_IOHUB) { + WARN_ONCE(1, "Warning: CMOS_WRITE attempted on a machine without a functioning CMOS\n"); + return; + } + + outb_p(addr, RTC_PORT(0)); + outb_p(val, RTC_PORT(1)); +} + +#define CMOS_READ(addr) mc146818_cmos_read(addr) +#define CMOS_WRITE(val, addr) mc146818_cmos_write(val, addr) + +#endif diff --git a/arch/e2k/include/asm/cmpxchg.h b/arch/e2k/include/asm/cmpxchg.h new file mode 100644 index 000000000000..77136d1223df --- /dev/null +++ b/arch/e2k/include/asm/cmpxchg.h @@ -0,0 +1,102 @@ +#ifndef ASM_E2K_CMPXCHG_H +#define ASM_E2K_CMPXCHG_H + +#include +#include +#include +#include + +/* + * Non-existant functions to indicate usage errors at link time + * (or compile-time if the compiler implements __compiletime_error(). + */ +extern void __xchg_wrong_size(void) + __compiletime_error("Bad argument size for xchg"); +extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + +#define __xchg(ptr, val, mem_model) \ +({ \ + volatile void *__x_ptr = (volatile void *) (ptr); \ + u64 __x_ret, __x_val = (u64) (val); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __x_ret = __api_xchg_return(__x_val, (volatile u8 *) __x_ptr, \ + b, mem_model); \ + break; \ + case 2: \ + __x_ret = __api_xchg_return(__x_val, (volatile u16 *) __x_ptr, \ + h, mem_model); \ + break; \ + case 4: \ + __x_ret = __api_xchg_return(__x_val, (volatile u32 *) __x_ptr, \ + w, mem_model); \ + break; \ + case 8: \ + __x_ret = __api_xchg_return(__x_val, (volatile u64 *) __x_ptr, \ + d, mem_model); \ + break; \ + default: \ + __x_ret = 0; \ + __xchg_wrong_size(); \ + break; \ + } \ + (__typeof__(*(ptr))) __x_ret; \ +}) +#define xchg_relaxed(ptr, v) __xchg((ptr), (v), RELAXED_MB) +#define xchg_acquire(ptr, v) __xchg((ptr), (v), ACQUIRE_MB) +#define xchg_release(ptr, v) __xchg((ptr), (v), RELEASE_MB) +#define xchg(ptr, v) __xchg((ptr), (v), STRONG_MB) + +#define __cmpxchg(ptr, old, new, mem_model) \ +({ \ + volatile void *__x_ptr = (volatile void *) (ptr); \ + u64 __x_ret, __x_old = (u64) (old), __x_new = (u64) (new); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __x_ret = __api_cmpxchg_return(__x_old, __x_new, \ + (volatile u8 *) __x_ptr, b, 0x4, mem_model); \ + break; \ + case 2: \ + __x_ret = __api_cmpxchg_return(__x_old, __x_new, \ + (volatile u16 *) __x_ptr, h, 0x5, mem_model); \ + break; \ + case 4: \ + __x_ret = __api_cmpxchg_word_return(__x_old, __x_new, \ + (volatile u32 *) __x_ptr, mem_model); \ + break; \ + case 8: \ + __x_ret = __api_cmpxchg_dword_return(__x_old, __x_new, \ + (volatile u64 *) __x_ptr, mem_model); \ + break; \ + default: \ + __x_ret = 0; \ + __cmpxchg_wrong_size(); \ + break; \ + } \ + (__typeof__(*(ptr))) __x_ret; \ +}) +#define cmpxchg_relaxed(ptr, o, n) __cmpxchg((ptr), (o), (n), RELAXED_MB) +#define cmpxchg_acquire(ptr, o, n) __cmpxchg((ptr), (o), (n), ACQUIRE_MB) +#define cmpxchg_release(ptr, o, n) __cmpxchg((ptr), (o), (n), RELEASE_MB) +#define cmpxchg(ptr, o, n) __cmpxchg((ptr), (o), (n), STRONG_MB) +#define cmpxchg_lock(ptr, o, n) __cmpxchg((ptr), (o), (n), LOCK_MB) + +#define __cmpxchg64(ptr, o, n, mem_model) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + (u64) __cmpxchg((ptr), (o), (n), mem_model); \ +}) +#define cmpxchg64_relaxed(ptr, o, n) __cmpxchg64((ptr), (o), (n), RELAXED_MB) +#define cmpxchg64_acquire(ptr, o, n) __cmpxchg64((ptr), (o), (n), ACQUIRE_MB) +#define cmpxchg64_release(ptr, o, n) __cmpxchg64((ptr), (o), (n), RELEASE_MB) +#define cmpxchg64(ptr, o, n) __cmpxchg64((ptr), (o), (n), STRONG_MB) + +#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ + __api_cmpxchg_double(p1, p2, o1, o2, n1, n2) + +#define cmpxchg_local(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n)) + +#define system_has_cmpxchg_double() 1 +#endif /* ASM_E2K_CMPXCHG_H */ diff --git a/arch/e2k/include/asm/cnt_point.h b/arch/e2k/include/asm/cnt_point.h new file mode 100644 index 000000000000..bb4e3650740a --- /dev/null +++ b/arch/e2k/include/asm/cnt_point.h @@ -0,0 +1,359 @@ +/* $Id: cnt_point.h,v 1.3 2009/06/29 11:51:48 atic Exp $ + * + * Recovery the system from control point. + */ + +#ifndef _E2K_CNT_POINT_H +#define _E2K_CNT_POINT_H + +#include +#include +#include + +/* + * Core dump header on the disk + * Total size of header should be one page of memory = one block on disk + * Note that the first kilobyte is reserved for boot loader or + * disk label stuff... + * The following first bytes should contain signature and the last bytes + * of header - magic value to indicate dump header itegrety + * Other structures are aligned to have constant offset in the header + * by adding zip areas in the structure end. + */ +#define TOTAL_DUMP_HEADER_SIZE PAGE_SIZE +#define BOOTBITS_DUMP_HEADER_SIZE 0x400 /* offset 0x000 */ +#define DUMP_INFO_HEADER_SIZE 0x100 /* offset 0x400 */ +#define CORE_DUMP_HEADER_SIZE 0x500 /* offset 0x500 */ + /* offset 0xa00 - gap */ + /* offset 0xff8 - magic */ + +/* + * Dump device and common dump state info + * Dump file space layout: + * block 0 dump file header + * block 1 core dump area start + * --------------------------------- + * | header | core dump area | + * --------------------------------- + * 0 block + * 1 block + */ + +#define CORE_DUMP_AREA_OFFSET 1 +#define DEFAULT_CORE_AREA_MAX_SIZE (16 * 1024L) /* 16 Gb */ + +typedef struct dump_desc { + u64 signature; /* signature to indicate dump */ + /* header structure start */ + /* should be first bytes of useful */ + /* part of the header */ + u8 cntp_valid; /* control points header of file */ + /* is created and valid */ + u8 core_valid; /* system core dump header of file */ + /* is created and valid */ + u64 file_size; /* total size of dump file */ + /* in pages */ + /* (page size = block size) */ + u64 cntp_offset; /* offset (in blocks = page) */ + /* of control points area in */ + /* the dump file */ + u64 cntp_size; /* size of control points area */ + /* in blocks */ + u64 core_offset; /* offset (in blocks = page) */ + /* of core dump area in */ + /* the dump file */ + u64 core_size; /* size of core dump area */ + /* in blocks */ +} dump_desc_t; + +/* + * System core dump state info + */ +typedef struct core_dump { +} core_dump_t; + +/* + * Dump header on the disk structure + */ +typedef struct dump_header { + /* Space for disklabel etc. */ + u8 bootbits[BOOTBITS_DUMP_HEADER_SIZE]; + + dump_desc_t info; /* Device & dump state common info */ + u8 zip1[DUMP_INFO_HEADER_SIZE - sizeof (dump_desc_t)]; + + core_dump_t core; /* System core dump header stuff */ + u8 zip3[CORE_DUMP_HEADER_SIZE - sizeof (core_dump_t)]; + + /* zip area to make size of */ + /* header - constant == PAGE_SIZE */ + u8 gap[ TOTAL_DUMP_HEADER_SIZE - + BOOTBITS_DUMP_HEADER_SIZE - + DUMP_INFO_HEADER_SIZE - + CORE_DUMP_HEADER_SIZE - + 8]; /* u64 : magic */ + + u64 magic; /* magic value to indicate control */ + /* point header structure */ + /* should be last bytes of the */ + /* header */ +} dump_header_t; + +#define DUMP_HEADER_SIGNATURE 0xe2c0c0e226143210 +#define DUMP_HEADER_MAGIC 0xe2c0c0e22614cdef + +#define DUMP_BLOCK_TO_SECTOR(block) ((block) * (PAGE_SIZE >> 9)) +#define CORE_BLOCK_TO_SECTOR(block) DUMP_BLOCK_TO_SECTOR(block) + +/* + * Forwards of some functions to recover system state + */ + +extern struct vm_area_struct *cntp_find_vma(struct task_struct *ts, + unsigned long addr); +extern void dump_prepare(u16 dump_dev, u64 dump_sector); +extern void start_emergency_dump(void); +extern int create_dump_point(void); + +extern void init_dump_analyze_mode(void); +extern void start_dump_analyze(void); + +extern e2k_addr_t cntp_kernel_address_to_phys(e2k_addr_t address); +extern e2k_addr_t cntp_user_address_to_phys(struct task_struct *tsk, + e2k_addr_t address); + +extern int map_memory_region(e2k_addr_t mem_base, e2k_addr_t mem_end, + int *just_mapped_point); + +extern int run_init_process(const char *init_filename); + +#if defined(CONFIG_EMERGENCY_DUMP) +extern unsigned int nr_swapfiles; +extern struct swap_info_struct *swap_info[MAX_SWAPFILES]; +#endif + +extern e2k_addr_t cntp_kernel_base; + +extern int cur_cnt_point; +extern int cntp_small_kern_mem_div; +extern int dump_analyze_mode; +extern int dump_analyze_opt; +extern char dump_analyze_cmd[]; + +#define boot_cur_cnt_point \ + boot_get_vo_value(cur_cnt_point) +#define boot_cntp_small_kern_mem_div \ + boot_get_vo_value(cntp_small_kern_mem_div) +#define boot_dump_analyze_mode \ + boot_get_vo_value(dump_analyze_mode) +#define boot_dump_analyze_opt \ + boot_get_vo_value(dump_analyze_opt) +#define boot_dump_analyze_cmd \ + boot_vp_to_pp((char *)dump_analyze_cmd) + +extern inline e2k_size_t +get_dump_analyze_bank_size(e2k_phys_bank_t *phys_bank, int cntp_num) +{ + e2k_addr_t base, new_base; + e2k_size_t size, new_size; + + BUG_ON(cntp_num == 0 || cntp_num == 1); + + size = phys_bank->pages_num * PAGE_SIZE; + base = phys_bank->base_addr; + new_base = LARGE_PAGE_ALIGN_DOWN(base); + new_size = size - (new_base - base); + + return LARGE_PAGE_ALIGN_UP(new_size / cntp_num); +} + +extern inline e2k_size_t +get_dump_analyze_memory_len(e2k_phys_bank_t *phys_bank, int cntp, int cntp_num) +{ + e2k_size_t size = get_dump_analyze_bank_size(phys_bank, cntp_num); + e2k_size_t len = size; + e2k_addr_t base; + e2k_addr_t new_base; + + BUG_ON(cntp_num == 0 || cntp_num == 1); + BUG_ON(cntp != cntp_num - 1); + + + base = phys_bank->base_addr; + new_base = LARGE_PAGE_ALIGN_DOWN(base); + len += phys_bank->pages_num * PAGE_SIZE - + ((new_base - base) + size * cntp_num); + + return len; +} + +extern inline e2k_addr_t +get_dump_analyze_memory_offset(e2k_phys_bank_t *phys_bank, int cntp, + int cntp_num) +{ + e2k_size_t size; + e2k_addr_t offset = 0; + e2k_addr_t base; + e2k_addr_t new_base; + + BUG_ON(cntp_num == 0 || cntp_num == 1); + BUG_ON(cntp != cntp_num - 1); + + size = get_dump_analyze_bank_size(phys_bank, cntp_num); + base = phys_bank->base_addr; + new_base = LARGE_PAGE_ALIGN_DOWN(base); + offset = (new_base - base) + size * cntp; + return offset; +} + +extern inline e2k_addr_t +get_dump_analyze_memory_base(e2k_phys_bank_t *phys_bank, int cntp, int cntp_num) +{ + e2k_addr_t offset = get_dump_analyze_memory_offset( + phys_bank, cntp, cntp_num); + e2k_addr_t base = phys_bank->base_addr; + + base += offset; + return base; +} + +extern inline e2k_addr_t +boot_get_dump_analyze_kernel_base(void) +{ + e2k_phys_bank_t *phys_bank; + e2k_addr_t base; + e2k_addr_t new_base; + e2k_size_t cntp_size; + int node; + int bank; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node ++) { + phys_bank = full_phys_mem[node].banks; + if (phys_bank->pages_num == 0) + continue; /* node has not memory */ + + for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS; bank ++) { + if (phys_bank->pages_num == 0) + break; + + cntp_size = get_dump_analyze_memory_len( + phys_bank, + boot_cntp_small_kern_mem_div - 1, + boot_cntp_small_kern_mem_div); + if (cntp_size < boot_kernel_image_size) + goto next_bank; + + base = get_dump_analyze_memory_base( + phys_bank, + boot_cntp_small_kern_mem_div - 1, + boot_cntp_small_kern_mem_div); + + new_base = _PAGE_ALIGN_DOWN(base, E2K_KERNEL_PAGE_SIZE); + if (new_base - base + boot_kernel_image_size <= + cntp_size) + return new_base; +next_bank: + phys_bank ++; + } + } + + /* + * TODO: avoid this + */ + BUG(); + + return -1; +} + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on physical address + * bootblock virtual address can be only read + */ + +static inline u64 +read_bootblock_cur_cnt_point(bootblock_struct_t *bootblock) +{ + return READ_BOOTBLOCK_FIELD(bootblock, cur_cnt_point); +} + +extern inline void +write_bootblock_cur_cnt_point(bootblock_struct_t *bootblock, u64 new_cnt_point) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, cur_cnt_point, new_cnt_point); +} + +extern inline void +write_bootblock_mem_cnt_points(bootblock_struct_t *bootblock, u64 new_mem_points) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, mem_cnt_points, new_mem_points); +} + +extern inline void +write_bootblock_disk_cnt_points(bootblock_struct_t *bootblock, + u64 new_disk_points) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, disk_cnt_points, new_disk_points); +} + +extern inline void +write_bootblock_kernel_base(bootblock_struct_t *bootblock, + u64 new_kernel_base) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, info.kernel_base, new_kernel_base); +} + +extern inline u64 +read_bootblock_cntp_kernel_base(bootblock_struct_t *bootblock, int cntp) +{ + return READ_BOOTBLOCK_FIELD(bootblock, + info.cntp_info[cntp].kernel_base); +} + +extern inline void +write_bootblock_cntp_kernel_base(bootblock_struct_t *bootblock, int cntp, + u64 kernel_base) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, info.cntp_info[cntp].kernel_base, + kernel_base); +} + +extern inline void +set_bootblock_cntp_created(bootblock_struct_t *bootblock) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, cnt_points_created, 1); +} + +/* + * Convert virtual address of kernel item in a control point context + * to the consistent physical address. + */ +#define cntp_va_to_pa(virt_addr, cntp_kernel_phys_base, ts) \ +({ \ + e2k_addr_t phys = 0; \ + e2k_addr_t virt = (e2k_addr_t)virt_addr; \ + \ + if (virt > 0 && virt < PAGE_OFFSET) \ + phys = cntp_user_address_to_phys(ts, virt); \ + else if (virt >= PAGE_OFFSET && virt < PAGE_OFFSET + MAX_PM_SIZE) \ + phys = __pa(virt); \ + else if (virt >= KERNEL_BASE && virt <= KERNEL_END) \ + phys = virt - KERNEL_BASE + cntp_kernel_phys_base; \ + else if (virt != 0) \ + phys = cntp_kernel_address_to_phys(virt); \ + \ + phys; \ +}) + +#define cntp_va(virt_addr, ts) \ +({ \ + void *virt = (void*)0; \ + if ((e2k_addr_t)virt_addr != 0) { \ + virt = (void *) cntp_va_to_pa(virt_addr, cntp_kernel_base, ts);\ + if (((unsigned long) virt) != -1) \ + virt = __va(virt); \ + } \ + virt; \ +}) +#endif /* _E2K_CNT_POINT_H */ diff --git a/arch/e2k/include/asm/compat.h b/arch/e2k/include/asm/compat.h new file mode 100644 index 000000000000..9bf8d52f9767 --- /dev/null +++ b/arch/e2k/include/asm/compat.h @@ -0,0 +1,215 @@ +#ifndef _ASM_E2K_COMPAT_H +#define _ASM_E2K_COMPAT_H + +/* + * Architecture specific compatibility types + */ +#include + +#include +#include +#include + +#include + +#define COMPAT_USER_HZ 100 + +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_clock_t; +typedef s32 compat_pid_t; +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; +typedef u16 compat_mode_t; +typedef u32 compat_ino_t; +typedef u16 compat_dev_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef s16 compat_nlink_t; +typedef u16 compat_ipc_pid_t; +typedef s32 compat_daddr_t; +typedef u32 compat_caddr_t; +typedef __kernel_fsid_t compat_fsid_t; +typedef s32 compat_key_t; +typedef s32 compat_timer_t; + +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u32 compat_uptr_t; + +typedef u64 compat_u64; +typedef s64 compat_s64; + +struct compat_stat { + compat_dev_t st_dev; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid_t st_uid; + __compat_gid_t st_gid; + compat_dev_t st_rdev; + compat_off_t st_size; + compat_time_t st_atime; + compat_ulong_t st_atime_nsec; + compat_time_t st_mtime; + compat_ulong_t st_mtime_nsec; + compat_time_t st_ctime; + compat_ulong_t st_ctime_nsec; + compat_off_t st_blksize; + compat_off_t st_blocks; + u32 __unused4[2]; +}; + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; + short __unused; +}; + +#define F_GETLK64 12 +#define F_SETLK64 13 +#define F_SETLKW64 14 + +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; + short __unused; +}; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; + int f_frsize; + int f_flags; + int f_spare[4]; +}; + +#define COMPAT_RLIM_INFINITY 0x7fffffff + +typedef u32 compat_old_sigset_t; + +#undef DebugUS +#define DEBUG_US 0 /* Allocate User Space */ +#define DebugUS(...) DebugPrint(DEBUG_US ,##__VA_ARGS__) + + +#define _COMPAT_NSIG 64 +#define _COMPAT_NSIG_BPW 32 + +typedef u32 compat_sigset_word; + +typedef struct sigevent32 { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[SIGEV_PAD_SIZE32]; + + struct { + u32 _function; + u32 _attribute; /* really pthread_attr_t */ + } _sigev_thread; + } _sigev_un; +} sigevent_t32; + +#define COMPAT_OFF_T_MAX 0x7fffffff + +/* + * The type of struct elf_prstatus.pr_reg in compatible core dumps. + */ +typedef struct user_regs_struct compat_elf_gregset_t; + +static inline void __user *compat_ptr(compat_uptr_t uptr) +{ + return (void __user *)(unsigned long)uptr; +} + +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + +extern void __user *arch_compat_alloc_user_space(unsigned long len); + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + unsigned short __pad1; + compat_mode_t mode; + unsigned short __pad2; + unsigned short seq; + unsigned long __unused1; /* yes they really are 64bit pads */ + unsigned long __unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_ulong_t __unused1; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_nsems; + compat_ulong_t sem_ctime_high; + compat_ulong_t __unused2; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_ulong_t __unused1; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t msg_ctime_high; + compat_ulong_t __unused2; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_ulong_t __unused1; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_size_t shm_segsz; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t shm_ctime_high; + compat_ulong_t __unused2; +}; + +static inline int is_compat_task(void) +{ + return (TASK_IS_PROTECTED(current)) ? 0 : + (current->thread.flags & E2K_FLAG_32BIT); +} + +#endif /* _ASM_E2K_COMPAT_H */ diff --git a/arch/e2k/include/asm/compiler.h b/arch/e2k/include/asm/compiler.h new file mode 100644 index 000000000000..10c3487b8620 --- /dev/null +++ b/arch/e2k/include/asm/compiler.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_COMPILER_H +#define _ASM_COMPILER_H + +#include + +#undef barrier +#undef barrier_data +#undef RELOC_HIDE + +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) +#if GCC_VERSION >= 40400 +/* builtin version has better throughput but worse latency */ +#undef __HAVE_BUILTIN_BSWAP32__ +#endif +#endif + +#define __PREEMPTION_CLOBBERS_1(cpu_greg, offset_greg) \ + "g" #cpu_greg, "g" #offset_greg +#define __PREEMPTION_CLOBBERS(cpu_greg, offset_greg) \ + __PREEMPTION_CLOBBERS_1(cpu_greg, offset_greg) +/* If a compiler barrier is used in loop, these clobbers will + * force the compiler to always access *current* per-cpu area + * instead of moving its address calculation out from the loop. + * + * The same goes for preemption-disabled sections: these clobbers + * will forbid compiler to move per-cpu area address calculation out + * from them. Since disabling interrupts also disables preemption, + * we also need these clobbers when writing PSR/UPSR. + * + * And of course operations on preempt_count must not be moved + * out of/into preemption disabled sections. */ +#define PREEMPTION_CLOBBERS __PREEMPTION_CLOBBERS(SMP_CPU_ID_GREG, MY_CPU_OFFSET_GREG) + +#ifdef CONFIG_DEBUG_LCC_VOLATILE_ATOMIC +#define NOT_VOLATILE volatile +#else +#define NOT_VOLATILE +#endif + +/* See bug #89623, bug #94946 */ +#define barrier() \ +do { \ + int unused; \ + /* TODO bug 126238 - insert additional NOP until fixed */ \ + __asm__ NOT_VOLATILE("{nop}" : "=r" (unused) : : "memory", PREEMPTION_CLOBBERS);\ +} while (0) + +/* See comment before PREEMPTION_CLOBBERS */ +#define barrier_preemption() \ +do { \ + int unused; \ + /* TODO bug 126238 - insert additional NOP until fixed */ \ + __asm__ NOT_VOLATILE("{nop}" : "=r" (unused) : : PREEMPTION_CLOBBERS);\ +} while (0) + +#define barrier_data(ptr) \ +do { \ + /* TODO bug 126238 - insert additional NOP until fixed */ \ + __asm__ NOT_VOLATILE("{nop}" : : "r"(ptr) : "memory", PREEMPTION_CLOBBERS); \ +} while (0) + +#define RELOC_HIDE(ptr, off) \ +({ \ + unsigned long __ptr; \ + /* TODO bug 126238 - insert additional NOP until fixed */ \ + __asm__ ("{nop}" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); \ +}) + +#if defined(__LCC__) && (__LCC__ > 125 || __LCC__ == 125 && __LCC_MINOR__ >= 9) +# define builtin_expect_wrapper(x, val) __builtin_expect_with_probability((x), (val), 0.9999) +#else +# define builtin_expect_wrapper(x, val) __builtin_expect((x), (val)) +#endif + +#endif /* _ASM_COMPILER_H */ diff --git a/arch/e2k/include/asm/console.h b/arch/e2k/include/asm/console.h new file mode 100644 index 000000000000..e4740ae1d7b7 --- /dev/null +++ b/arch/e2k/include/asm/console.h @@ -0,0 +1,47 @@ + +#ifndef _E2K_CONSOLE_H_ +#define _E2K_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include + +static inline void +native_virt_console_dump_putc(char c) +{ +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE + if (IS_HV_GM()) { + /* virtio console is actual only for guest mode */ + kvm_virt_console_dump_putc(c); + } +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ +} + +extern void init_bug(const char *fmt_v, ...); +extern void init_warning(const char *fmt_v, ...); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel or native kernel with virtualization support */ +static inline void +virt_console_dump_putc(char c) +{ + native_virt_console_dump_putc(c); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/convert_array.h b/arch/e2k/include/asm/convert_array.h new file mode 100644 index 000000000000..0889b1c6850b --- /dev/null +++ b/arch/e2k/include/asm/convert_array.h @@ -0,0 +1,153 @@ +/* + * convert_array.h - Linux syscall interfaces (arch-specific) + * + * Copyright (c) 2019 MCST + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + */ + +#ifndef _ASM_E2K_UAPI_CONVERT_ARRAY_H +#define _ASM_E2K_UAPI_CONVERT_ARRAY_H + + +#ifdef CONFIG_PROTECTED_MODE + +/* New mask format: 4 bits per structure field */ +#define get_pm_struct_simple(struct128, struct64, \ + max_prot_array_size, fields, \ + items, mask_type, mask_align) \ + get_pm_struct(struct128, struct64, \ + max_prot_array_size, fields, \ + items, mask_type, mask_align, 0, 0) + + +extern int get_pm_struct(long __user *struct128, + long *struct64, + const int max_prot_array_size, const int fieldnum, + const int items, const long mask_type, + const long mask_align, const long mask_rw, + const int rval_mode); +/* + * Converts protected structure (array of structures), which can contain + * protected user pointers to memory, function descriptors, and int values. + * struct128 - pointer to the protected (user-space) structure (128 bit). + * struct64 - pointer to allocated area where to put converted structure. + * max_prot_array_size - estimated maximum size, which struct128 occupies + * filednum - number of fields in the given structure. + * items - number of elements (structures) in array (items == array size) + * if 'struct128' is array of structures to be converted. + * mask_type - mask for encoding structure field types: + * (4 bits per each entry): + * --- 0000 (0x0) - int + * --- 0001 (0x1) - long + * --- 0010 (0x2) - Fptr (pointer to function) + * --- 0011 (0x3) - descriptor (pointer to memory) + * --- 0100 (0x4) - descriptor or int + * --- 0101 (0x5) - descriptor or long + * --- 0110 (0x6) - descriptor or Fptr + * --- 0111 (0x7) - everything is possible (i/P/F) + * --- 1*** (0x8) - may be uninitialized (empty tag allowed) + * mask_align - mask for encoding alignment of the NEXT (!!!) structure field; + * for example, bits #0-3 code alignment of the 2nd structure field + * (4 bits per each entry): + * --- 00 (0x0) - next field aligned as int (to 4 bytes) + * --- 01 (0x1) - next field aligned as long (to 8 bytes) + * --- 10 (0x2) - not used yet + * --- 11 (0x3) - next field aligned as pointer (to 16 bytes) + * mask_rw - mask for encoding access type of structure fields + * (4 bits per each entry): + * --- 01 (0x1) - the field's content gets read by syscall (READ-able) + * --- 02 (0x2) - the field's content gets updated by syscall (WRITE-able) + * --- 11 (0x3) - the field is both READ-able and WRITE-able + * --- 00 (0x0) - default type; the same as (READ-able) + * rval_mode - error (return value) reporting mode mask: + * 0 - report only critical problems in struct128 structure; + * 1 - return with -EFAULT if wrong tag in 'int' field; + * 2 - --'-- --'-- 'long' field; + * 4 - --'-- --'-- 'func' field; + * 8 - --'-- --'-- 'descr' field; + * 16 - ignore errors in 'int' field; + * 32 - --'-- --'-- 'long' field; + * 64 - --'-- --'-- 'func' field; + * 128 - --'-- --'-- 'descr' field. + * Returns: 0 - if converted OK; + * error number - otherwise. + */ + + +#define CONV_ARR_WRONG_INT_FLD 1 +#define CONV_ARR_WRONG_LONG_FLD 2 +#define CONV_ARR_WRONG_FUNC_FLD 4 +#define CONV_ARR_WRONG_DSCR_FLD 8 +#define CONV_ARR_WRONG_ANY_FLD 15 /* error if any field appeared bad */ +#define CONV_ARR_IGNORE_INT_FLD_ERR 16 +#define CONV_ARR_IGNORE_LONG_FLD_ERR 32 +#define CONV_ARR_IGNORE_FUNC_FLD_ERR 64 +#define CONV_ARR_IGNORE_DSCR_FLD_ERR 128 + + +extern int check_args_array4(const long __user *args_array, + const long tags, + const int arg_num, + const long mask_type, + const int rval_mode, const char *ErrMsgHeader); +/* + * This function checks protected syscall arguments on correspondence with + * the given mask: + * args_array - pointer to argument array + * tags - argument tags (4 bits per arg; lower to higher bits ordered) + * arg_num - number of arguments + * mask_type - mask for encoding of field type in each element + * 4 bits per each entry: + * --- 0000 (0x0) - int + * --- 0001 (0x1) - long + * --- 0010 (0x2) - pointer to function + * --- 0011 (0x3) - pointer to memory + * --- 0100 (0x4) - descriptor or int + * --- 0101 (0x5) - descriptor or long + * --- 1*** (0x8) - may be uninitialized (empty tag allowed) + * rval_mode - error (return value) reporting mode mask: + * 0 - report only critical problems; + * 1 - return with -EFAULT if wrong tag in 'int' field; + * 2 - --'-- --'-- 'long' field; + * 4 - --'-- --'-- 'func' field; + * 8 - --'-- --'-- 'descr' field; + * 16 - ignore errors in 'int' field; + * 32 - --'-- --'-- 'long' field; + * 64 - --'-- --'-- 'func' field; + * 128 - --'-- --'-- 'descr' field. + * Returns: 0 - if converted OK; + * error number - otherwise. + */ + + +/* This function realizes compact mask format: 2 bits per structure field */ +extern int convert_array_3(long __user *prot_array, long *new_array, + const int max_prot_array_size, const int fields, + const int items, unsigned long mask_type, + unsigned long mask_align, unsigned long mask_rw, + const int rval_mode); + +/* This function realizes compact mask format: 2 bits per structure field */ +extern int check_args_array(const long __user *args_array, + const long tags, + const int arg_num, + unsigned long mask_type, + const int rval_mode, + const char *ErrMsgHeader); + + +/* This is deprecated. Not reconnemded to use. + * Old mask format: 2 bits per structure field + */ +#define convert_array(prot_array, new_array, max_prot_array_size, fields, \ + items, mask_type, mask_align) \ + convert_array_3(prot_array, new_array, max_prot_array_size, fields, \ + items, mask_type, mask_align, 0, 0) + +#else +# define convert_array(...) 0 +#endif /* CONFIG_PROTECTED_MODE */ + +#endif /* _ASM_E2K_UAPI_CONVERT_ARRAY_H */ diff --git a/arch/e2k/include/asm/copy-hw-stacks.h b/arch/e2k/include/asm/copy-hw-stacks.h new file mode 100644 index 000000000000..891fb2c3b85a --- /dev/null +++ b/arch/e2k/include/asm/copy-hw-stacks.h @@ -0,0 +1,852 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * include/asm-e2k/copy-hw-stacks.h + * + * Copyright 2021 mcst.ru + */ + +#ifndef _E2K_COPY_HW_STACKS_H +#define _E2K_COPY_HW_STACKS_H + +#include + +#include +#include +#include + +#include + +#undef DEBUG_PV_UST_MODE +#undef DebugUST +#define DEBUG_PV_UST_MODE 0 /* guest user stacks debug */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_guest_ust) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_SYSCALL_MODE +#define DEBUG_PV_SYSCALL_MODE 0 /* syscall injection debugging */ + +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE +extern bool debug_guest_ust; +#else +#define debug_guest_ust false +#endif /* DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE */ + +#ifndef CONFIG_VIRTUALIZATION +/* it native kernel without virtualization support */ +#else /* CONFIG_VIRTUALIZATION */ +/* It is native host kernel with virtualization support */ +/* or paravirtualized host and guest */ +/* or native guest kernel + #include + */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +typedef void (*trace_ps_frame_func_t)(kernel_mem_ps_t *base, kernel_mem_ps_t *frame); +typedef void (*trace_pcs_frame_func_t)(e2k_mem_crs_t *base, e2k_mem_crs_t *frame); + +static inline void trace_proc_stack_frames(kernel_mem_ps_t *dst_ps_base, + kernel_mem_ps_t *src_ps_base, u64 ps_size, + trace_ps_frame_func_t trace_func) +{ + int qreg, qreg_num; + kernel_mem_ps_t *dst_ps_frame, *src_ps_frame; + kernel_mem_ps_t rw; + + qreg_num = ps_size / EXT_4_NR_SZ; + for (qreg = qreg_num - 1; qreg >= 0; qreg--) { + dst_ps_frame = &dst_ps_base[qreg]; + src_ps_frame = &src_ps_base[qreg]; + rw.word_lo = src_ps_frame->word_lo; + if (machine.native_iset_ver < E2K_ISET_V5) { + rw.word_hi = src_ps_frame->word_hi; + rw.ext_lo = src_ps_frame->ext_lo; + rw.ext_hi = src_ps_frame->ext_hi; + } else { + rw.word_hi = src_ps_frame->ext_lo; + rw.ext_lo = src_ps_frame->word_hi; + rw.ext_hi = src_ps_frame->ext_hi; + } + + trace_func(dst_ps_frame, &rw); + } +} + +static inline void trace_chain_stack_frames(e2k_mem_crs_t *dst_pcs_base, + e2k_mem_crs_t *src_pcs_base, u64 pcs_size, + trace_pcs_frame_func_t trace_func) +{ + int crs_no, crs_num; + e2k_mem_crs_t *dst_pcs_frame, *src_pcs_frame; + e2k_mem_crs_t crs; + unsigned long flags; + + crs_num = pcs_size / sizeof(crs); + raw_all_irq_save(flags); + for (crs_no = crs_num - 1; crs_no >= 0; crs_no--) { + dst_pcs_frame = &dst_pcs_base[crs_no]; + src_pcs_frame = &src_pcs_base[crs_no]; + crs = *src_pcs_frame; + + trace_func(dst_pcs_frame, &crs); + } + raw_all_irq_restore(flags); +} + +static inline void trace_host_hva_area(u64 *hva_base, u64 hva_size) +{ + int line_no, line_num; + u64 *dst_hva_line; + unsigned long flags; + + line_num = hva_size / (sizeof(u64) * 4); + raw_all_irq_save(flags); + for (line_no = line_num - 1; line_no >= 0; line_no--) { + dst_hva_line = &hva_base[line_no * 4]; + trace_host_hva_area_line(dst_hva_line, (sizeof(u64) * 4)); + } + if (line_num * (sizeof(u64) * 4) < hva_size) { + dst_hva_line = &hva_base[line_no * 4]; + trace_host_hva_area_line(dst_hva_line, + hva_size - line_num * (sizeof(u64) * 4)); + } + raw_all_irq_restore(flags); +} + +static __always_inline void +native_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + void *dst_tail; + const void *src_tail; + u64 copied; + int i; + + /* + * Kernel does not use FP registers so do not copy them. + * This only applies to CPUs before V5 instruction set + * (since V5 FP registers become general-purpose QP registers). + */ + if (cpu_has(CPU_FEAT_QPREG)) { +#pragma loop count (10) + for (i = 0; i < size / 64; i++) + E2K_TAGGED_MEMMOVE_64(&dst[8 * i], &src[8 * i]); + + copied = round_down(size, 64); + dst_tail = (void *) dst + copied; + src_tail = (void *) src + copied; + } else { +#pragma loop count (5) + for (i = 0; i < size / 128; i++) + E2K_TAGGED_MEMMOVE_128_RF_V2(&dst[16 * i], + &src[16 * i]); + + copied = round_down(size, 128); + dst_tail = (void *) dst + copied; + src_tail = (void *) src + copied; + + if (size & 64) { + E2K_TAGGED_MEMMOVE_64(dst_tail, src_tail); + dst_tail += 64; + src_tail += 64; + } + } + + if (size & 32) + E2K_TAGGED_MEMMOVE_32(dst_tail, src_tail); +} + +static __always_inline void +native_collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_pcsp_hi_t k_pcsp_hi; + u64 size; + int i; + + DebugUST("current host chain stack index 0x%x, PCSHTP 0x%llx\n", + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_ind, + NATIVE_READ_PCSHTP_REG_SVALUE()); + + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + size = k_pcsp_hi.PCSP_hi_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PCSTACK_TOP_SIZE) || (s64) size < 0); +#pragma loop count (2) + for (i = 0; i < size / 32; i++) { + u64 v0, v1, v2, v3; + + v0 = src[4 * i]; + v1 = src[4 * i + 1]; + v2 = src[4 * i + 2]; + v3 = src[4 * i + 3]; + dst[4 * i] = v0; + dst[4 * i + 1] = v1; + dst[4 * i + 2] = v2; + dst[4 * i + 3] = v3; + } + + k_pcsp_hi.PCSP_hi_ind -= spilled_size; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + + DebugUST("move spilled chain part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel chain stack index is now 0x%x, " + "guest user PCSHTP 0x%llx\n", + k_pcsp_hi.PCSP_hi_ind, spilled_size); +} + +static __always_inline void +native_collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_psp_hi_t k_psp_hi; + u64 size; + + DebugUST("current host procedure stack index 0x%x, PSHTP 0x%x\n", + NATIVE_NV_READ_PSP_HI_REG().PSP_hi_ind, + NATIVE_NV_READ_PSHTP_REG().PSHTP_ind); + + NATIVE_FLUSHR; + k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + + size = k_psp_hi.PSP_hi_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PSTACK_TOP_SIZE) || (s64) size < 0); + + prefetch_nospec_range(src, size); + native_kernel_hw_stack_frames_copy(dst, src, size); + + k_psp_hi.PSP_hi_ind -= spilled_size; + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(k_psp_hi); + + DebugUST("move spilled procedure part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel procedure stack index is now 0x%x, " + "guest user PSHTP 0x%llx\n", + k_psp_hi.PSP_hi_ind, spilled_size); +} + +#if defined(CONFIG_PARAVIRT_GUEST) +/* paravirtualized kernel (host and guest) */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization) */ +#include +#elif defined(CONFIG_VIRTUALIZATION) || !defined(CONFIG_VIRTUALIZATION) +/* native kernel with virtualization support */ +/* native kernel without virtualization support */ + +static __always_inline void +kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + native_kernel_hw_stack_frames_copy(dst, src, size); +} +static __always_inline void +collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + native_collapse_kernel_pcs(dst, src, spilled_size); +} +static __always_inline void +collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + native_collapse_kernel_ps(dst, src, spilled_size); +} + +#else /* ??? */ + #error "Undefined virtualization mode" +#endif /* CONFIG_PARAVIRT_GUEST */ + +static __always_inline u64 get_wsz(enum restore_caller from) +{ + return NATIVE_READ_WD_REG().size >> 4; +} + +static __always_inline u64 get_ps_clear_size(u64 cur_window_q, + e2k_pshtp_t pshtp) +{ + s64 u_pshtp_size_q; + + u_pshtp_size_q = GET_PSHTP_Q_INDEX(pshtp); + if (u_pshtp_size_q > E2K_MAXSR - cur_window_q) + u_pshtp_size_q = E2K_MAXSR - cur_window_q; + + return E2K_MAXSR - (cur_window_q + u_pshtp_size_q); +} + +static __always_inline s64 get_ps_copy_size(u64 cur_window_q, s64 u_pshtp_size) +{ + return u_pshtp_size - (E2K_MAXSR - cur_window_q) * EXT_4_NR_SZ; +} + +#ifdef CONFIG_CPU_HAS_FILL_INSTRUCTION +# define E2K_CF_MAX_FILL (E2K_CF_MAX_FILL_FILLC_q * 0x10) +#else +extern int cf_max_fill_return; +# define E2K_CF_MAX_FILL cf_max_fill_return +#endif + +static __always_inline s64 get_pcs_copy_size(s64 u_pcshtp_size) +{ + /* Before v6 it was possible to fill no more than 16 registers. + * Since E2K_MAXCR_q is much bigger than 16 we can be sure that + * there is enough space in CF for the FILL, so there is no + * need to take into account space taken by current window. */ + return u_pcshtp_size - E2K_CF_MAX_FILL; +} + +/* + * Copy hardware stack from user to *current* kernel stack. + * One has to be careful to avoid hardware FILL of this stack. + */ +static inline int __copy_user_to_current_hw_stack(void *dst, void __user *src, + unsigned long size, const pt_regs_t *regs, bool chain) +{ + unsigned long min_flt, maj_flt, ts_flag; + + if (likely(!host_test_intc_emul_mode(regs))) { + if (!__range_ok((unsigned long __force) src, size, + PAGE_OFFSET)) + return -EFAULT; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + /* + * Every page fault here has a chance of FILL'ing the frame + * that is being copied, in which case we repeat the copy. + */ + do { + min_flt = READ_ONCE(current->min_flt); + maj_flt = READ_ONCE(current->maj_flt); + + if (chain) + E2K_FLUSHC; + else + E2K_FLUSHR; + + SET_USR_PFAULT("$.recovery_memcpy_fault"); + fast_tagged_memory_copy_from_user(dst, src, size, regs, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + true); + if (RESTORE_USR_PFAULT) { + clear_ts_flag(ts_flag); + return -EFAULT; + } + } while (unlikely(min_flt != READ_ONCE(current->min_flt) || + maj_flt != READ_ONCE(current->maj_flt))); + + clear_ts_flag(ts_flag); + return 0; +} + + +static inline int copy_user_to_current_hw_stack(void *dst, void __user *src, + unsigned long size, pt_regs_t *regs, bool chain) +{ + unsigned long flags; + int ret; + + raw_all_irq_save(flags); + ret = __copy_user_to_current_hw_stack(dst, src, size, regs, chain); + raw_all_irq_restore(flags); + + return ret; +} + +static inline int copy_e2k_stack_from_user(void *dst, void __user *src, + unsigned long size, pt_regs_t *regs) +{ + unsigned long ts_flag; + int ret; + + if (likely(!host_test_intc_emul_mode(regs))) { + if (!__range_ok((unsigned long __force) src, size, PAGE_OFFSET)) + return -EFAULT; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = host_copy_from_user_with_tags(dst, src, size, regs); + clear_ts_flag(ts_flag); + + return (ret) ? -EFAULT : 0; +} + +static inline int copy_e2k_stack_to_user(void __user *dst, void *src, + unsigned long size, pt_regs_t *regs) +{ + unsigned long ts_flag; + int ret; + + if (likely(!host_test_intc_emul_mode(regs))) { + if (!__range_ok((unsigned long __force) dst, size, PAGE_OFFSET)) + return -EFAULT; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = host_copy_to_user_with_tags(dst, src, size, regs); + clear_ts_flag(ts_flag); + + return (ret) ? -EFAULT : 0; +} + +static __always_inline int +user_hw_stack_frames_copy(void __user *dst, void *src, unsigned long copy_size, + const pt_regs_t *regs, unsigned long hw_stack_ind, bool is_pcsp) +{ + unsigned long ts_flag; + + if (unlikely(hw_stack_ind < copy_size)) { + unsigned long flags; + raw_all_irq_save(flags); + if (is_pcsp) { + E2K_FLUSHC; + } else { + E2K_FLUSHR; + } + raw_all_irq_restore(flags); + } + + SET_USR_PFAULT("$.recovery_memcpy_fault"); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + fast_tagged_memory_copy_to_user(dst, src, copy_size, regs, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, true); + clear_ts_flag(ts_flag); + + if (RESTORE_USR_PFAULT) { + pr_err("process %s (%d) %s stack could not be copied " + "from %px to %px size 0x%lx (out of memory?)\n", + current->comm, current->pid, + (is_pcsp) ? "chain" : "procedure", + src, dst, copy_size); + return -EFAULT; + } + DebugUST("copying guest %s stack spilled to host from %px " + "to guest kernel stack from %px, size 0x%lx\n", + (is_pcsp) ? "chain" : "procedure", src, dst, copy_size); + + return 0; +} + +static __always_inline int +user_crs_frames_copy(e2k_mem_crs_t __user *u_frame, pt_regs_t *regs, + e2k_mem_crs_t *crs) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = host_copy_to_user(u_frame, crs, sizeof(*crs), regs); + clear_ts_flag(ts_flag); + if (unlikely(ret)) + return -EFAULT; + + return 0; +} + +static __always_inline int user_psp_stack_copy(e2k_psp_lo_t u_psp_lo, + e2k_psp_hi_t u_psp_hi, s64 u_pshtp_size, + e2k_psp_lo_t k_psp_lo, e2k_psp_hi_t k_psp_hi, + unsigned long copy_size, const pt_regs_t *regs) +{ + void __user *dst; + void *src; + int ret; + + dst = (void __user *) (AS(u_psp_lo).base + AS(u_psp_hi).ind - + u_pshtp_size); + src = (void *) AS(k_psp_lo).base; + + if (host_test_intc_emul_mode(regs) && trace_host_copy_hw_stack_enabled()) + trace_host_copy_hw_stack(dst, src, copy_size, false); + + ret = user_hw_stack_frames_copy(dst, src, copy_size, + regs, k_psp_hi.PSP_hi_ind, false); + + if (host_test_intc_emul_mode(regs) && trace_host_proc_stack_frame_enabled()) + trace_proc_stack_frames((kernel_mem_ps_t *)dst, + (kernel_mem_ps_t *)src, copy_size, + trace_host_proc_stack_frame); + + return ret; + +} + +static __always_inline int user_pcsp_stack_copy(e2k_pcsp_lo_t u_pcsp_lo, + e2k_pcsp_hi_t u_pcsp_hi, s64 u_pcshtp_size, + e2k_pcsp_lo_t k_pcsp_lo, e2k_pcsp_hi_t k_pcsp_hi, + unsigned long copy_size, const pt_regs_t *regs) +{ + void __user *dst; + void *src; + int ret; + + dst = (void __user *)(AS(u_pcsp_lo).base + AS(u_pcsp_hi).ind - + u_pcshtp_size); + src = (void *) AS(k_pcsp_lo).base; + + if (host_test_intc_emul_mode(regs) && trace_host_copy_hw_stack_enabled()) + trace_host_copy_hw_stack(dst, src, copy_size, true); + + ret = user_hw_stack_frames_copy(dst, src, copy_size, + regs, k_pcsp_hi.PCSP_hi_ind, true); + + if (host_test_intc_emul_mode(regs) && trace_host_chain_stack_frame_enabled()) + trace_chain_stack_frames((e2k_mem_crs_t *)dst, + (e2k_mem_crs_t *)src, copy_size, + trace_host_chain_stack_frame); + + return ret; +} + +/** + * user_hw_stacks_copy - copy user hardware stacks that have been + * SPILLed to kernel back to user space + * @stacks - saved user stack registers + * @cur_window_q - size of current window in procedure stack, + * needed only if @copy_full is not set + * @copy_full - set if want to copy _all_ of SPILLed stacks + * + * This does not update stacks->pshtp and stacks->pcshtp. Main reason is + * signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space is spilled to kernel space. + */ +static __always_inline int +native_user_hw_stacks_copy(struct e2k_stacks *stacks, + pt_regs_t *regs, u64 cur_window_q, bool copy_full) +{ + trap_pt_regs_t *trap = regs->trap; + e2k_psp_lo_t u_psp_lo = stacks->psp_lo, + k_psp_lo = current_thread_info()->k_psp_lo; + e2k_psp_hi_t u_psp_hi = stacks->psp_hi; + e2k_pcsp_lo_t u_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi = stacks->pcsp_hi; + s64 u_pshtp_size, u_pcshtp_size, ps_copy_size, pcs_copy_size; + int ret; + + u_pshtp_size = GET_PSHTP_MEM_INDEX(stacks->pshtp); + u_pcshtp_size = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + + /* + * Copy user's part from kernel stacks into user stacks + * Update user's stack registers + */ + if (copy_full) { + pcs_copy_size = u_pcshtp_size; + ps_copy_size = u_pshtp_size; + } else { + pcs_copy_size = get_pcs_copy_size(u_pcshtp_size); + ps_copy_size = get_ps_copy_size(cur_window_q, u_pshtp_size); + + /* Make sure there is enough space in CF for the FILL */ + BUG_ON((E2K_MAXCR_q - 4) * 16 < E2K_CF_MAX_FILL); + } + + if (likely(pcs_copy_size <= 0 && ps_copy_size <= 0)) + return 0; + + if (unlikely(pcs_copy_size > 0)) { + e2k_pcsp_hi_t k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + /* Since not all user data has been SPILL'ed it is possible + * that we have already overflown user's hardware stack. */ + if (unlikely(AS(u_pcsp_hi).ind > AS(u_pcsp_hi).size)) { + ret = handle_chain_stack_bounds(stacks, trap); + if (unlikely(ret)) { + pr_warning("process %s (%d) chain stack overflow (out of memory?)\n", + current->comm, current->pid); + return ret; + } + + u_pcsp_lo = stacks->pcsp_lo; + u_pcsp_hi = stacks->pcsp_hi; + } + + ret = user_pcsp_stack_copy(u_pcsp_lo, u_pcsp_hi, u_pcshtp_size, + k_pcsp_lo, k_pcsp_hi, pcs_copy_size, regs); + if (ret) + return ret; + } + + if (unlikely(ps_copy_size > 0)) { + e2k_psp_hi_t k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + + /* Since not all user data has been SPILL'ed it is possible + * that we have already overflowed user's hardware stack. */ + if (unlikely(AS(u_psp_hi).ind > AS(u_psp_hi).size)) { + ret = handle_proc_stack_bounds(stacks, trap); + if (unlikely(ret)) { + pr_warning("process %s (%d) procedure stack overflow (out of memory?)\n", + current->comm, current->pid); + return ret; + } + + u_psp_lo = stacks->psp_lo; + u_psp_hi = stacks->psp_hi; + } + + ret = user_psp_stack_copy(u_psp_lo, u_psp_hi, u_pshtp_size, + k_psp_lo, k_psp_hi, ps_copy_size, regs); + if (ret) + return ret; + } + + return 0; +} + +static inline void collapse_kernel_hw_stacks(struct e2k_stacks *stacks) +{ + e2k_pcsp_lo_t k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_psp_lo_t k_psp_lo = current_thread_info()->k_psp_lo; + unsigned long flags, spilled_pc_size, spilled_p_size; + e2k_pshtp_t pshtp = stacks->pshtp; + u64 *dst; + const u64 *src; + + spilled_pc_size = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + spilled_p_size = GET_PSHTP_MEM_INDEX(pshtp); + DebugUST("guest user spilled to host kernel stack part: chain 0x%lx " + "procedure 0x%lx\n", + spilled_pc_size, spilled_p_size); + /* When user tries to return from the last user frame + * we will have pcshtp = pcsp_hi.ind = 0. But situation + * with pcsp_hi.ind != 0 and pcshtp = 0 is impossible. */ + if (WARN_ON_ONCE(spilled_pc_size < SZ_OF_CR && + AS(stacks->pcsp_hi).ind != 0)) + do_exit(SIGKILL); + + /* Keep the last user frame (see user_hw_stacks_copy_full()) */ + if (spilled_pc_size >= SZ_OF_CR) { + spilled_pc_size -= SZ_OF_CR; + DebugUST("Keep the prev user chain frame, so spilled chain " + "size is now 0x%lx\n", + spilled_pc_size); + } + + raw_all_irq_save(flags); + + if (spilled_pc_size) { + dst = (u64 *) AS(k_pcsp_lo).base; + src = (u64 *) (AS(k_pcsp_lo).base + spilled_pc_size); + collapse_kernel_pcs(dst, src, spilled_pc_size); + + stacks->pcshtp = SZ_OF_CR; + + apply_graph_tracer_delta(-spilled_pc_size); + } + + if (spilled_p_size) { + dst = (u64 *) AS(k_psp_lo).base; + src = (u64 *) (AS(k_psp_lo).base + spilled_p_size); + collapse_kernel_ps(dst, src, spilled_p_size); + + AS(pshtp).ind = 0; + stacks->pshtp = pshtp; + } + + raw_all_irq_restore(flags); +} + +/** + * user_hw_stacks_prepare - prepare user hardware stacks that have been + * SPILLed to kernel back to user space + * @stacks - saved user stack registers + * @cur_window_q - size of current window in procedure stack, + * needed only if @copy_full is not set + * @syscall - true if called upon direct system call exit (no signal handlers) + * + * This does two things: + * + * 1) It is possible that upon kernel entry pcshtp == 0 in some cases: + * - user signal handler had pcshtp==0x20 before return to sigreturn() + * - user context had pcshtp==0x20 before return to makecontext_trampoline() + * - chain stack underflow happened + * So it is possible in sigreturn() and traps, but not in system calls. + * If we are using the trick with return to FILL user hardware stacks than + * we must have frame in chain stack to return to. So in this case kernel's + * chain stack is moved up by one frame (0x20 bytes). + * We also fill the new frame with actual user data and update stacks->pcshtp, + * this is needed to keep the coherent state where saved stacks->pcshtp values + * shows how much data from user space has been spilled to kernel space. + * + * 2) It is not possible to always FILL all of user data that have been + * SPILLed to kernel stacks. So we manually copy the leftovers that can + * not be FILLed to user space. + * This copy does not update stacks->pshtp and stacks->pcshtp. Main reason + * is signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space has been spilled to kernel space. + */ +static __always_inline void native_user_hw_stacks_prepare( + struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + e2k_pcshtp_t u_pcshtp = stacks->pcshtp; + int ret; + + BUG_ON(from & FROM_PV_VCPU_MODE); + + /* + * 1) Make sure there is free space in kernel chain stack to return to + */ + if (!syscall && u_pcshtp == 0) { + unsigned long flags; + e2k_pcsp_lo_t u_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi = stacks->pcsp_hi, k_pcsp_hi; + e2k_mem_crs_t __user *u_cframe; + e2k_mem_crs_t *k_crs; + u64 u_cbase; + int ret = -EINVAL; + + raw_all_irq_save(flags); + E2K_FLUSHC; + k_pcsp_hi = READ_PCSP_HI_REG(); + BUG_ON(AS(k_pcsp_hi).ind); + AS(k_pcsp_hi).ind += SZ_OF_CR; + WRITE_PCSP_HI_REG(k_pcsp_hi); + + k_crs = (e2k_mem_crs_t *) AS(k_pcsp_lo).base; + u_cframe = (e2k_mem_crs_t __user *) (AS(u_pcsp_lo).base + + AS(u_pcsp_hi).ind); + u_cbase = ((from & FROM_RETURN_PV_VCPU_TRAP) || + host_test_intc_emul_mode(regs)) ? + u_pcsp_lo.PCSP_lo_base : + (u64) CURRENT_PCS_BASE(); + if ((u64) u_cframe > u_cbase) { + ret = __copy_user_to_current_hw_stack(k_crs, + u_cframe - 1, sizeof(*k_crs), regs, true); + } + raw_all_irq_restore(flags); + + /* Can happen if application returns until runs out of + * chain stack or there is no free memory for stacks. + * There is no user stack to return to - die. */ + if (ret) { + SIGDEBUG_PRINT("SIGKILL. %s\n", + (ret == -EINVAL) ? "tried to return to kernel" : + "ran into Out-of-Memory on user stacks"); + force_sig(SIGKILL); + return; + } + + if (AS(u_pcsp_hi).ind < SZ_OF_CR) { + update_pcsp_regs(AS(u_pcsp_lo).base, + &u_pcsp_lo, &u_pcsp_hi); + stacks->pcsp_lo = u_pcsp_lo; + stacks->pcsp_hi = u_pcsp_hi; + BUG_ON(AS(u_pcsp_hi).ind < SZ_OF_CR); + } + + u_pcshtp = SZ_OF_CR; + stacks->pcshtp = u_pcshtp; + } + + /* + * 2) Copy user data that cannot be FILLed + */ + ret = native_user_hw_stacks_copy(stacks, regs, cur_window_q, false); + if (unlikely(ret)) + do_exit(SIGKILL); +} + +#ifndef CONFIG_VIRTUALIZATION +/* native kernel without virtualization support */ +static __always_inline int +user_hw_stacks_copy(struct e2k_stacks *stacks, + pt_regs_t *regs, u64 cur_window_q, bool copy_full) +{ + return native_user_hw_stacks_copy(stacks, regs, cur_window_q, copy_full); +} + +static __always_inline void +host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + native_user_hw_stacks_prepare(stacks, regs, cur_window_q, + from, syscall); +} +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized kernel (host and guest) */ +#include +#elif defined(CONFIG_KVM_HOST_MODE) +/* It is host kernel with virtualization support */ +#include +#else /* unknow mode */ +#error "unknown virtualization mode" +#endif /* !CONFIG_VIRTUALIZATION */ + +/** + * user_hw_stacks_copy_full - copy part of user stacks that was SPILLed + * into kernel back to user stacks. + * @stacks - saved user stack registers + * @regs - pt_regs pointer + * @crs - last frame to copy + * + * If @crs is not NULL then the frame pointed to by it will also be copied + * to userspace. Note that 'stacks->pcsp_hi.ind' is _not_ updated after + * copying since it would leave stack in inconsistent state (with two + * copies of the same @crs frame), this is left to the caller. * + * + * Inlining this reduces the amount of memory to copy in + * collapse_kernel_hw_stacks(). + */ +static inline int do_user_hw_stacks_copy_full(struct e2k_stacks *stacks, + pt_regs_t *regs, e2k_mem_crs_t *crs) +{ + int ret; + + /* + * Copy part of user stacks that were SPILLed into kernel stacks + */ + ret = user_hw_stacks_copy(stacks, regs, 0, true); + if (unlikely(ret)) + return ret; + + /* + * Nothing to FILL so remove the resulting hole from kernel stacks. + * + * IMPORTANT: there is always at least one user frame at the top of + * kernel stack - the one that issued a system call (in case of an + * exception we uphold this rule manually, see user_hw_stacks_prepare()) + * We keep this ABI and _always_ leave space for one user frame, + * this way we can later FILL using return trick (otherwise there + * would be no space in chain stack for the trick). + */ + collapse_kernel_hw_stacks(stacks); + + /* + * Copy saved %cr registers + * + * Caller must take care of filling of resulting hole + * (last user frame from pcshtp == SZ_OF_CR). + */ + if (crs) { + e2k_mem_crs_t __user *u_frame; + int ret; + + u_frame = (void __user *) (AS(stacks->pcsp_lo).base + + AS(stacks->pcsp_hi).ind); + ret = user_crs_frames_copy(u_frame, regs, ®s->crs); + if (unlikely(ret)) + return ret; + } + + return 0; +} + +#endif /* _E2K_COPY_HW_STACKS_H */ + diff --git a/arch/e2k/include/asm/coredump.h b/arch/e2k/include/asm/coredump.h new file mode 100644 index 000000000000..94abf5e84f1c --- /dev/null +++ b/arch/e2k/include/asm/coredump.h @@ -0,0 +1,10 @@ +#ifndef _E2K_COREDUMP_H +#define _E2K_COREDUMP_H +/* + * For coredump + */ +extern void clear_delayed_free_hw_stacks(struct mm_struct *mm); +extern void create_delayed_free_hw_stacks(void); + +#endif /* _E2K_COREDUMP_H */ + diff --git a/arch/e2k/include/asm/cpu.h b/arch/e2k/include/asm/cpu.h new file mode 100644 index 000000000000..42ca8b91c8e5 --- /dev/null +++ b/arch/e2k/include/asm/cpu.h @@ -0,0 +1,36 @@ +#ifndef _ASM_E2K_CPU_H_ +#define _ASM_E2K_CPU_H_ + +#include + +extern int arch_register_cpu(int num); +#ifdef CONFIG_HOTPLUG_CPU +extern void arch_unregister_cpu(int); +#endif + +static inline unsigned long +native_get_cpu_running_cycles(void) +{ + /* native kernel is running always */ + return get_cycles(); +} + +extern void store_cpu_info(int cpuid); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel or native kernel with virtualization support */ + +static inline unsigned long +get_cpu_running_cycles(void) +{ + return native_get_cpu_running_cycles(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_CPU_H_ */ diff --git a/arch/e2k/include/asm/cpu_features.h b/arch/e2k/include/asm/cpu_features.h new file mode 100644 index 000000000000..1d0cb66ab68b --- /dev/null +++ b/arch/e2k/include/asm/cpu_features.h @@ -0,0 +1,61 @@ +#ifndef _E2K_ASM_CPU_FEATURES_H +#define _E2K_ASM_CPU_FEATURES_H + +#ifndef __ASSEMBLY__ +enum { + /* Hardware bugs */ + CPU_HWBUG_LARGE_PAGES, + CPU_HWBUG_LAPIC_TIMER, + CPU_HWBUG_PIO_READS, + CPU_HWBUG_ATOMIC, + CPU_HWBUG_CLW, + CPU_HWBUG_PAGE_A, + CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR, + CPU_HWBUG_UNALIGNED_LOADS, + CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE, + CPU_HWBUG_DMA_AT_APIC_ADDR, + CPU_HWBUG_KERNEL_DATA_MONITOR, + CPU_HWBUG_WRITE_MEMORY_BARRIER, + CPU_HWBUG_BAD_RESET, + CPU_HWBUG_BREAKPOINT_INSTR, + CPU_HWBUG_E8C_WATCHDOG, + CPU_HWBUG_IOMMU, + CPU_HWBUG_WC_DAM, + CPU_HWBUG_TRAP_CELLAR_S_F, + CPU_HWBUG_SS, + CPU_HWBUG_AAU_AALDV, + CPU_HWBUG_LEVEL_EOI, + CPU_HWBUG_FALSE_SS, + CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG, + CPU_HWBUG_TLB_FLUSH_L1D, + CPU_HWBUG_GUEST_ASYNC_PM, + CPU_HWBUG_E16C_SLEEP, + CPU_HWBUG_L1I_STOPS_WORKING, + CPU_HWBUG_CLW_STALE_L1_ENTRY, + CPU_HWBUG_PIPELINE_FREEZE_MONITORS, + CPU_HWBUG_C3_WAIT_MA_C, + CPU_HWBUG_VIRT_SCLKM3_INTC, + CPU_HWBUG_USD_ALIGNMENT, + CPU_HWBUG_VIRT_PSIZE_INTERCEPTION, + CPU_NO_HWBUG_SOFT_WAIT, + CPU_HWBUG_SOFT_WAIT_E8C2, + CPU_HWBUG_C3, + + /* Features, not bugs */ + CPU_FEAT_WC_PCI_PREFETCH, + CPU_FEAT_FLUSH_DC_IC, + CPU_FEAT_EPIC, + CPU_FEAT_TRAP_V5, + CPU_FEAT_TRAP_V6, + CPU_FEAT_QPREG, + CPU_FEAT_HW_PREFETCHER, + CPU_FEAT_SEPARATE_TLU_CACHE, + CPU_FEAT_ISET_V3, + CPU_FEAT_ISET_V5, + CPU_FEAT_ISET_V6, + + NR_CPU_FEATURES +}; +#endif + +#endif diff --git a/arch/e2k/include/asm/cpu_regs.h b/arch/e2k/include/asm/cpu_regs.h new file mode 100644 index 000000000000..699966a00004 --- /dev/null +++ b/arch/e2k/include/asm/cpu_regs.h @@ -0,0 +1,3660 @@ + +#ifndef _E2K_CPU_REGS_H_ +#define _E2K_CPU_REGS_H_ + +#ifdef __KERNEL__ + +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include + +#define NATIVE_STRIP_PCSHTP_WINDOW() NATIVE_WRITE_PCSHTP_REG_SVALUE(0) +#define STRIP_PCSHTP_WINDOW() WRITE_PCSHTP_REG_SVALUE(0) + +/* + * Read low double-word OS Compilation Unit Register (OSCUD) + * from the low word structure + * Register fields access: fff = OSCUD_lo.OSCUD_lo_xxx; + * Register double-word half access: oscud_lo = OSCUD_lo.OSCUD_lo_half; + */ +#define NATIVE_READ_OSCUD_LO_REG() \ +({ \ + e2k_oscud_lo_t OSCUD_lo; \ + OSCUD_lo.OSCUD_lo_half = NATIVE_READ_OSCUD_LO_REG_VALUE(); \ + OSCUD_lo; \ +}) +#define READ_OSCUD_LO_REG() \ +({ \ + e2k_oscud_lo_t OSCUD_lo; \ + OSCUD_lo.OSCUD_lo_half = READ_OSCUD_LO_REG_VALUE(); \ + OSCUD_lo; \ +}) +#define BOOT_READ_OSCUD_LO_REG() \ +({ \ + e2k_oscud_lo_t OSCUD_lo; \ + OSCUD_lo.OSCUD_lo_half = BOOT_READ_OSCUD_LO_REG_VALUE(); \ + OSCUD_lo; \ +}) + +static inline e2k_oscud_lo_t +native_read_OSCUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_lo register 0x%lx\n", + NATIVE_READ_OSCUD_LO_REG().OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSCUD_LO_REG(); +} +static inline e2k_oscud_lo_t +read_OSCUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_lo register 0x%lx\n", + READ_OSCUD_LO_REG().OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSCUD_LO_REG(); +} +static inline e2k_oscud_lo_t +boot_read_OSCUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_lo register 0x%lx\n", + BOOT_READ_OSCUD_LO_REG().OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSCUD_LO_REG(); +} + +/* + * Read high double-word OS Compilation Unit Register (OSCUD) + * from the high word structure + * Register fields access: fff = OSCUD_hi.OSCUD_hi_xxx; + * Register double-word half access: oscud_lo = OSCUD_hi.OSCUD_hi_half; + */ +#define NATIVE_READ_OSCUD_HI_REG() \ +({ \ + e2k_oscud_hi_t OSCUD_hi; \ + OSCUD_hi.OSCUD_hi_half = NATIVE_READ_OSCUD_HI_REG_VALUE(); \ + OSCUD_hi; \ +}) +#define READ_OSCUD_HI_REG() \ +({ \ + e2k_oscud_hi_t OSCUD_hi; \ + OSCUD_hi.OSCUD_hi_half = READ_OSCUD_HI_REG_VALUE(); \ + OSCUD_hi; \ +}) +#define BOOT_READ_OSCUD_HI_REG() \ +({ \ + e2k_oscud_hi_t OSCUD_hi; \ + OSCUD_hi.OSCUD_hi_half = BOOT_READ_OSCUD_HI_REG_VALUE(); \ + OSCUD_hi; \ +}) + +static inline e2k_oscud_hi_t +native_read_OSCUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_hi register 0x%lx\n", + NATIVE_READ_OSCUD_HI_REG().OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSCUD_HI_REG(); +} +static inline e2k_oscud_hi_t +read_OSCUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_hi register 0x%lx\n", + READ_OSCUD_HI_REG().OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSCUD_HI_REG(); +} +static inline e2k_oscud_hi_t +boot_read_OSCUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_hi register 0x%lx\n", + BOOT_READ_OSCUD_HI_REG().OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSCUD_HI_REG(); +} + +/* + * Read quad-word OS Compilation Unit Register (OSCUD) to the structure + * Register fields access: fff = OSCUD -> OSCUD_xxx + * Register double-word halfs access: OSCUD_lo = OSCUD -> OSCUD_lo_reg + * OSCUD_hi = OSCUD -> OSCUD_hi_reg + */ +#define READ_OSCUD_REG() \ +({ \ + oscud_struct_t OSCUD; \ + OSCUD.OSCUD_hi_struct = READ_OSCUD_HI_REG(); \ + OSCUD.OSCUD_lo_struct = READ_OSCUD_LO_REG(); \ + OSCUD; \ +}) +#define READ_OSCUD_REG_TO(OSCUD) (*(OSCUD) = READ_OSCUD_REG()) +#define BOOT_READ_OSCUD_REG() \ +({ \ + oscud_struct_t OSCUD; \ + OSCUD.OSCUD_hi_struct = BOOT_READ_OSCUD_HI_REG(); \ + OSCUD.OSCUD_lo_struct = BOOT_READ_OSCUD_LO_REG(); \ + OSCUD; \ +}) +#define READ_OSCUD_REG_TO(OSCUD) (*(OSCUD) = READ_OSCUD_REG()) +#define BOOT_READ_OSCUD_REG_TO(OSCUD) (*(OSCUD) = BOOT_READ_OSCUD_REG()) + +static inline void +read_OSCUD_reg(oscud_struct_t *OSCUD) +{ + READ_OSCUD_REG_TO(OSCUD); +} +static inline void +boot_read_OSCUD_reg(oscud_struct_t *OSCUD) +{ + BOOT_READ_OSCUD_REG_TO(OSCUD); +} + +/* + * Write low double-word OS Compilation Unit Register (OSCUD) + * from the low word structure + * Register fields filling: OSCUD_lo.OSCUD_lo_xxx = fff; + * Register double-word half filling: OSCUD_lo.OSCUD_lo_half = oscud_lo; + */ +#define NATIVE_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) + +static inline void +native_write_OSCUD_lo_reg(e2k_oscud_lo_t OSCUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_lo register 0x%lx\n", OSCUD_lo.OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSCUD_LO_REG(OSCUD_lo); +} +static inline void +write_OSCUD_lo_reg(e2k_oscud_lo_t OSCUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_lo register 0x%lx\n", OSCUD_lo.OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSCUD_LO_REG(OSCUD_lo); +} + +/* + * Write high double-word OS Compilation Unit Register (OSCUD) + * from the high word structure + * Register fields filling: OSCUD_hi.OSCUD_hi_xxx = fff; + * Register double-word half filling: OSCUD_hi.OSCUD_hi_half = oscud_lo; + */ +#define NATIVE_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define BOOT_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) + +static inline void +native_write_OSCUD_hi_reg(e2k_oscud_hi_t OSCUD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_hi register 0x%lx\n", OSCUD_hi.OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSCUD_HI_REG(OSCUD_hi); +} +static inline void +write_OSCUD_hi_reg(e2k_oscud_hi_t OSCUD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_hi register 0x%lx\n", OSCUD_hi.OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSCUD_HI_REG(OSCUD_hi); +} + +/* + * Write high & low quad-word OS Compilation Unit Register (OSCUD) + * from the high & low word structure + */ + +#define WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define BOOT_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + BOOT_WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +static inline void +write_OSCUD_hi_lo_reg(e2k_oscud_hi_t OSCUD_hi, e2k_oscud_lo_t OSCUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_hi register 0x%lx\n", OSCUD_hi.OSCUD_hi_half); + boot_printk("Write OSCUD_lo register 0x%lx\n", OSCUD_lo.OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo); +} + +/* + * Write quad-word OS Compilation Unit Register (OSCUD) from the structure + * Register fields filling: OSCUD.OSCUD_xxx = fff; + * Register double-word halfs filling: OSCUD.OSCUD_lo_reg = OSCUD_lo; + * OSCUD.OSCUD_hi_reg = OSCUD_hi; + */ +#define WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define BOOT_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) + +static inline void +write_OSCUD_reg(oscud_struct_t OSCUD) +{ + WRITE_OSCUD_REG(OSCUD.OSCUD_hi_struct, OSCUD.OSCUD_lo_struct); +} + +/* + * Read low double-word OS Globals Register (OSGD) + * from the low word structure + * Register fields access: fff = OSGD_lo.OSGD_lo_xxx; + * Register double-word half access: osgd_lo = OSGD_lo.OSGD_lo_half; + */ +#define NATIVE_READ_OSGD_LO_REG() \ +({ \ + e2k_osgd_lo_t OSGD_lo; \ + OSGD_lo.OSGD_lo_half = NATIVE_READ_OSGD_LO_REG_VALUE(); \ + OSGD_lo; \ +}) +#define READ_OSGD_LO_REG() \ +({ \ + e2k_osgd_lo_t OSGD_lo; \ + OSGD_lo.OSGD_lo_half = READ_OSGD_LO_REG_VALUE(); \ + OSGD_lo; \ +}) +#define BOOT_READ_OSGD_LO_REG() \ +({ \ + e2k_osgd_lo_t OSGD_lo; \ + OSGD_lo.OSGD_lo_half = BOOT_READ_OSGD_LO_REG_VALUE(); \ + OSGD_lo; \ +}) + +static inline e2k_osgd_lo_t +native_read_OSGD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_lo register 0x%lx\n", + NATIVE_READ_OSGD_LO_REG().OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSGD_LO_REG(); +} +static inline e2k_osgd_lo_t +read_OSGD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_lo register 0x%lx\n", + READ_OSGD_LO_REG().OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSGD_LO_REG(); +} +static inline e2k_osgd_lo_t +boot_read_OSGD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_lo register 0x%lx\n", + BOOT_READ_OSGD_LO_REG().OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSGD_LO_REG(); +} + +/* + * Read high double-word OS Globals Register (OSGD) + * from the high word structure + * Register fields access: fff = OSGD_hi.OSGD_hi_xxx; + * Register double-word half access: osgd_lo = OSGD_hi.OSGD_hi_half; + */ +#define NATIVE_READ_OSGD_HI_REG() \ +({ \ + e2k_osgd_hi_t OSGD_hi; \ + OSGD_hi.OSGD_hi_half = NATIVE_READ_OSGD_HI_REG_VALUE(); \ + OSGD_hi; \ +}) +#define READ_OSGD_HI_REG() \ +({ \ + e2k_osgd_hi_t OSGD_hi; \ + OSGD_hi.OSGD_hi_half = READ_OSGD_HI_REG_VALUE(); \ + OSGD_hi; \ +}) +#define BOOT_READ_OSGD_HI_REG() \ +({ \ + e2k_osgd_hi_t OSGD_hi; \ + OSGD_hi.OSGD_hi_half = BOOT_READ_OSGD_HI_REG_VALUE(); \ + OSGD_hi; \ +}) + +static inline e2k_osgd_hi_t +native_read_OSGD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_hi register 0x%lx\n", + NATIVE_READ_OSGD_HI_REG().OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSGD_HI_REG(); +} +static inline e2k_osgd_hi_t +read_OSGD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_hi register 0x%lx\n", + READ_OSGD_HI_REG().OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSGD_HI_REG(); +} +static inline e2k_osgd_hi_t +boot_read_OSGD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_hi register 0x%lx\n", + BOOT_READ_OSGD_HI_REG().OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSGD_HI_REG(); +} + +/* + * Read quad-word OS Globals Register (OSGD) to the structure + * Register fields access: fff = OSGD -> OSGD_xxx + * Register double-word halfs access: OSGD_lo = OSGD -> OSGD_lo_reg + * OSGD_hi = OSGD -> OSGD_hi_reg + */ +#define READ_OSGD_REG() \ +({ \ + osgd_struct_t OSGD; \ + OSGD.OSGD_hi_struct = READ_OSGD_HI_REG(); \ + OSGD.OSGD_lo_struct = READ_OSGD_LO_REG(); \ + OSGD; \ +}) +#define READ_OSGD_REG_TO(OSGD) (*(OSGD) = READ_OSGD_REG()) +#define BOOT_READ_OSGD_REG() \ +({ \ + osgd_struct_t OSGD; \ + OSGD.OSGD_hi_struct = BOOT_READ_OSGD_HI_REG(); \ + OSGD.OSGD_lo_struct = BOOT_READ_OSGD_LO_REG(); \ + OSGD; \ +}) +#define BOOT_READ_OSGD_REG_TO(OSGD) (*(OSGD) = BOOT_READ_OSGD_REG()) + +static inline void +read_OSGD_reg(osgd_struct_t *OSGD) +{ + READ_OSGD_REG_TO(OSGD); +} +static inline void +boot_read_OSGD_reg(osgd_struct_t *OSGD) +{ + BOOT_READ_OSGD_REG_TO(OSGD); +} + +/* + * Write low double-word OS Globals Register (OSGD) + * from the low word structure + * Register fields filling: OSGD_lo.OSGD_lo_xxx = fff; + * Register double-word half filling: OSGD_lo.OSGD_lo_half = gd_lo; + */ +#define NATIVE_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) + +static inline void +native_write_OSGD_lo_reg(e2k_osgd_lo_t OSGD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_lo register 0x%lx\n", OSGD_lo.OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSGD_LO_REG(OSGD_lo); +} +static inline void +write_OSGD_lo_reg(e2k_osgd_lo_t OSGD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_lo register 0x%lx\n", OSGD_lo.OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSGD_LO_REG(OSGD_lo); +} + +/* + * Write high double-word OS Globals Register (OSGD) + * from the high word structure + * Register fields filling: OSGD_hi.OSGD_hi_xxx = fff; + * Register double-word half filling: OSGD_hi.OSGD_hi_half = gd_lo; + */ +#define NATIVE_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define BOOT_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) + +static inline void +native_write_OSGD_hi_reg(e2k_osgd_hi_t OSGD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_hi register 0x%lx\n", OSGD_hi.OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSGD_HI_REG(OSGD_hi); +} +static inline void +write_OSGD_hi_reg(e2k_osgd_hi_t OSGD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_hi register 0x%lx\n", OSGD_hi.OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSGD_HI_REG(OSGD_hi); +} + +/* + * Write high & low quad-word OS Globals Register (OSGD) + * from the high & low word structure + */ + +#define BOOT_WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +({ \ + BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define WRITE_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + WRITE_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_WRITE_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + BOOT_WRITE_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, \ + OSGD_lo.OSGD_lo_half); \ +}) + +static inline void +write_OSGD_hi_lo_reg(e2k_osgd_hi_t OSGD_hi, e2k_osgd_lo_t OSGD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_hi register 0x%lx\n", OSGD_hi.OSGD_hi_half); + boot_printk("Write OSGD_lo register 0x%lx\n", OSGD_lo.OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSGD_REG(OSGD_hi, OSGD_lo); +} + +/* + * Write quad-word OS Globals Register (OSGD) from the structure + * Register fields filling: OSGD.OSGD_xxx = fff; + * Register double-word halfs filling: OSGD.OSGD_lo_reg = OSGD_lo; + * OSGD.OSGD_hi_reg = OSGD_hi; + */ +static inline void +write_OSGD_reg(osgd_struct_t OSGD) +{ + WRITE_OSGD_REG(OSGD.OSGD_hi_struct, OSGD.OSGD_lo_struct); +} + +/* + * Read low double-word Compilation Unit Register (CUD) + * from the low word structure + * Register fields access: fff = CUD_lo.CUD_lo_xxx; + * Register double-word half access: cud_lo = CUD_lo.CUD_lo_half; + */ +#define NATIVE_READ_CUD_LO_REG() \ +({ \ + e2k_cud_lo_t CUD_lo; \ + CUD_lo.CUD_lo_half = NATIVE_READ_CUD_LO_REG_VALUE(); \ + CUD_lo; \ +}) +#define READ_CUD_LO_REG() \ +({ \ + e2k_cud_lo_t CUD_lo; \ + CUD_lo.CUD_lo_half = READ_CUD_LO_REG_VALUE(); \ + CUD_lo; \ +}) +#define BOOT_READ_CUD_LO_REG() \ +({ \ + e2k_cud_lo_t CUD_lo; \ + CUD_lo.CUD_lo_half = BOOT_READ_CUD_LO_REG_VALUE(); \ + CUD_lo; \ +}) + +static inline e2k_cud_lo_t +read_CUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CUD_lo register 0x%lx\n", + READ_CUD_LO_REG().CUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CUD_LO_REG(); +} + +/* + * Read high double-word Compilation Unit Register (CUD) + * from the high word structure + * Register fields access: fff = CUD_hi.CUD_hi_xxx; + * Register double-word half access: cud_lo = CUD_hi.CUD_hi_half; + */ +#define NATIVE_READ_CUD_HI_REG() \ +({ \ + e2k_cud_hi_t CUD_hi; \ + CUD_hi.CUD_hi_half = NATIVE_READ_CUD_HI_REG_VALUE(); \ + CUD_hi; \ +}) +#define READ_CUD_HI_REG() \ +({ \ + e2k_cud_hi_t CUD_hi; \ + CUD_hi.CUD_hi_half = READ_CUD_HI_REG_VALUE(); \ + CUD_hi; \ +}) +#define BOOT_READ_CUD_HI_REG() \ +({ \ + e2k_cud_hi_t CUD_hi; \ + CUD_hi.CUD_hi_half = BOOT_READ_CUD_HI_REG_VALUE(); \ + CUD_hi; \ +}) + +static inline e2k_cud_hi_t +read_CUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CUD_hi register 0x%lx\n", + READ_CUD_HI_REG().CUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CUD_HI_REG(); +} + +/* + * Read quad-word Compilation Unit Register (CUD) to the structure + * Register fields access: fff = CUD -> CUD_xxx + * Register double-word halfs access: CUD_lo = CUD -> CUD_lo_reg + * CUD_hi = CUD -> CUD_hi_reg + */ +#define READ_CUD_REG() \ +({ \ + cud_struct_t CUD; \ + CUD.CUD_hi_struct = READ_CUD_HI_REG(); \ + CUD.CUD_lo_struct = READ_CUD_LO_REG(); \ + CUD; \ +}) +#define READ_CUD_REG_TO(CUD) (*(CUD) = READ_CUD_REG()) +#define BOOT_READ_CUD_REG() \ +({ \ + cud_struct_t CUD; \ + CUD.CUD_hi_struct = BOOT_READ_CUD_HI_REG(); \ + CUD.CUD_lo_struct = BOOT_READ_CUD_LO_REG(); \ + CUD; \ +}) +#define BOOT_READ_CUD_REG_TO(CUD) (*(CUD) = BOOT_READ_CUD_REG()) + +static inline void +read_CUD_reg(cud_struct_t *CUD) +{ + READ_CUD_REG_TO(CUD); +} +static inline void +boot_read_CUD_reg(cud_struct_t *CUD) +{ + BOOT_READ_CUD_REG_TO(CUD); +} + +/* + * Write low double-word Compilation Unit Register (CUD) + * from the low word structure + * Register fields filling: CUD_lo.CUD_lo_xxx = fff; + * Register double-word half filling: CUD_lo.CUD_lo_half = cud_lo; + */ +#define WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define BOOT_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) + +static inline void +write_CUD_lo_reg(e2k_cud_lo_t CUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CUD_lo register 0x%lx\n", CUD_lo.CUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CUD_LO_REG(CUD_lo); +} + +/* + * Write high double-word Compilation Unit Register (CUD) + * from the high word structure + * Register fields filling: CUD_hi.CUD_hi_xxx = fff; + * Register double-word half filling: CUD_hi.CUD_hi_half = cud_lo; + */ +#define WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) +#define BOOT_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) + +static inline void +write_CUD_hi_reg(e2k_cud_hi_t CUD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CUD_hi register 0x%lx\n", CUD_hi.CUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CUD_HI_REG(CUD_hi); +} + +/* + * Write high & low quad-word Compilation Unit Register (CUD) + * from the high & low word structure + */ + +#define WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define BOOT_WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) +#define BOOT_WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + BOOT_WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) + +static inline void +write_CUD_hi_lo_reg(e2k_cud_hi_t CUD_hi, e2k_cud_lo_t CUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CUD_hi register 0x%lx\n", CUD_hi.CUD_hi_half); + boot_printk("Write CUD_lo register 0x%lx\n", CUD_lo.CUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CUD_REG(CUD_hi, CUD_lo); +} + +/* + * Write quad-word Compilation Unit Register (CUD) from the structure + * Register fields filling: CUD.CUD_xxx = fff; + * Register double-word halfs filling: CUD.CUD_lo_reg = CUD_lo; + * CUD.CUD_hi_reg = CUD_hi; + */ +static inline void +write_CUD_reg(cud_struct_t CUD) +{ + WRITE_CUD_REG(CUD.CUD_hi_struct, CUD.CUD_lo_struct); +} + +/* + * Read low double-word Globals Register (GD) + * from the low word structure + * Register fields access: fff = GD_lo.GD_lo_xxx; + * Register double-word half access: gd_lo = GD_lo.GD_lo_half; + */ +#define READ_GD_LO_REG() \ +({ \ + e2k_gd_lo_t GD_lo; \ + GD_lo.GD_lo_half = READ_GD_LO_REG_VALUE(); \ + GD_lo; \ +}) +#define BOOT_READ_GD_LO_REG() \ +({ \ + e2k_gd_lo_t GD_lo; \ + GD_lo.GD_lo_half = BOOT_READ_GD_LO_REG_VALUE(); \ + GD_lo; \ +}) + +static inline e2k_gd_lo_t +read_GD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_lo register 0x%lx\n", + READ_GD_LO_REG().GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_GD_LO_REG(); +} +static inline e2k_gd_lo_t +boot_read_GD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_lo register 0x%lx\n", + BOOT_READ_GD_LO_REG().GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_GD_LO_REG(); +} + +/* + * Read high double-word Globals Register (GD) + * from the high word structure + * Register fields access: fff = GD_hi.GD_hi_xxx; + * Register double-word half access: gd_lo = GD_hi.GD_hi_half; + */ +#define READ_GD_HI_REG() \ +({ \ + e2k_gd_hi_t GD_hi; \ + GD_hi.GD_hi_half = READ_GD_HI_REG_VALUE(); \ + GD_hi; \ +}) +#define BOOT_READ_GD_HI_REG() \ +({ \ + e2k_gd_hi_t GD_hi; \ + GD_hi.GD_hi_half = BOOT_READ_GD_HI_REG_VALUE(); \ + GD_hi; \ +}) + +static inline e2k_gd_hi_t +read_GD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_hi register 0x%lx\n", + READ_GD_HI_REG().GD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_GD_HI_REG(); +} +static inline e2k_gd_hi_t +boot_read_GD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_hi register 0x%lx\n", + BOOT_READ_GD_HI_REG().GD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_GD_HI_REG(); +} + +/* + * Read quad-word Globals Register (GD) to the structure + * Register fields access: fff = GD -> GD_xxx + * Register double-word halfs access: GD_lo = GD -> GD_lo_reg + * GD_hi = GD -> GD_hi_reg + */ +#define READ_GD_REG() \ +({ \ + gd_struct_t GD; \ + GD.GD_hi_struct = READ_GD_HI_REG(); \ + GD.GD_lo_struct = READ_GD_LO_REG(); \ + GD; \ +}) +#define READ_GD_REG_TO(GD) (*(GD) = READ_GD_REG()) +#define BOOT_READ_GD_REG() \ +({ \ + gd_struct_t GD; \ + GD.GD_hi_struct = BOOT_READ_GD_HI_REG(); \ + GD.GD_lo_struct = BOOT_READ_GD_LO_REG(); \ + GD; \ +}) +#define BOOT_READ_GD_REG_TO(GD) (*(GD) = BOOT_READ_GD_REG()) + +static inline void +read_GD_reg(gd_struct_t *GD) +{ + READ_GD_REG_TO(GD); +} +static inline void +boot_read_GD_reg(gd_struct_t *GD) +{ + BOOT_READ_GD_REG_TO(GD); +} + +/* + * Write low double-word Globals Register (GD) + * from the low word structure + * Register fields filling: GD_lo.GD_lo_xxx = fff; + * Register double-word half filling: GD_lo.GD_lo_half = gd_lo; + */ +#define WRITE_GD_LO_REG(GD_lo) \ + WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half) +#define BOOT_WRITE_GD_LO_REG(GD_lo) \ + BOOT_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half) + +static inline void +write_GD_lo_reg(e2k_gd_lo_t GD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write GD_lo register 0x%lx\n", GD_lo.GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_GD_LO_REG(GD_lo); +} + +/* + * Write high double-word Globals Register (GD) + * from the high word structure + * Register fields filling: GD_hi.GD_hi_xxx = fff; + * Register double-word half filling: GD_hi.GD_hi_half = gd_lo; + */ +#define WRITE_GD_HI_REG(GD_hi) \ + WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half) +#define BOOT_WRITE_GD_HI_REG(GD_hi) \ + BOOT_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half) + +static inline void +write_GD_hi_reg(e2k_gd_hi_t GD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write GD_hi register 0x%lx\n", GD_hi.GD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_GD_HI_REG(GD_hi); +} + +/* + * Write high & low quad-word Globals Register (GD) + * from the high & low word structure + */ + +#define WRITE_GD_REG_VALUE(GD_hi_value, GD_lo_value) \ +({ \ + WRITE_GD_HI_REG_VALUE(GD_hi_value); \ + WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define BOOT_WRITE_GD_REG_VALUE(GD_hi_value, GD_lo_value) \ +({ \ + BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ + BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define WRITE_GD_REG(GD_hi, GD_lo) \ +({ \ + WRITE_GD_REG_VALUE(GD_hi.GD_hi_half, GD_lo.GD_lo_half); \ +}) +#define BOOT_WRITE_GD_REG(GD_hi, GD_lo) \ +({ \ + BOOT_WRITE_GD_REG_VALUE(GD_hi.GD_hi_half, GD_lo.GD_lo_half); \ +}) + +static inline void +write_GD_hi_lo_reg(e2k_gd_hi_t GD_hi, e2k_gd_lo_t GD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write GD_hi register 0x%lx\n", GD_hi.GD_hi_half); + boot_printk("Write GD_lo register 0x%lx\n", GD_lo.GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_GD_REG(GD_hi, GD_lo); +} + +/* + * Write quad-word Globals Register (GD) from the structure + * Register fields filling: GD.GD_xxx = fff; + * Register double-word halfs filling: GD.GD_lo_reg = GD_lo; + * GD.GD_hi_reg = GD_hi; + */ +static inline void +write_GD_reg(gd_struct_t GD) +{ + WRITE_GD_REG(GD.GD_hi_struct, GD.GD_lo_struct); +} + +/* + * Read quad-word Procedure Stack Pointer Register (PSP) to the structure + * Register fields access: PSP_hi = READ_PSP_HI_REG(); + * fff = PSP_hi.PSP_hi_xxx; + * PSP_lo = READ_PSP_LO_REG(); + * fff = PSP_lo.PSP_lo_xxx; + */ + +#define NATIVE_NV_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = NATIVE_NV_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define NATIVE_NV_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = NATIVE_NV_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define NATIVE_NV_READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = NATIVE_NV_READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = NATIVE_NV_READ_PSP_LO_REG(); \ + PSP; \ +}) +#define NATIVE_NV_READ_PSP_REG_TO(PSP) \ +({ \ + *PSP = NATIVE_NV_READ_PSP_REG(); \ +}) + +#define READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = READ_PSP_LO_REG(); \ + PSP; \ +}) +#define READ_PSP_REG_TO(PSP) \ +({ \ + *PSP = READ_PSP_REG(); \ +}) + +#define BOOT_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = BOOT_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define BOOT_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = BOOT_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define BOOT_READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = BOOT_READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = BOOT_READ_PSP_LO_REG(); \ + PSP; \ +}) +#define BOOT_READ_PSP_REG_TO(PSP) \ +({ \ + *PSP = BOOT_READ_PSP_REG(); \ +}) + +/* + * Read low double-word Procedure Stack Pointer Register (PSP) + * from the low word structure + * Register fields access: fff = PSP_lo.PSP_lo_xxx; + * Register double-word half access: psp_lo = PSP_lo.PSP_lo_half; + */ +static inline e2k_psp_lo_t +native_nv_read_PSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_lo register 0x%lx\n", + NATIVE_NV_READ_PSP_LO_REG().PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PSP_LO_REG(); +} +static inline e2k_psp_lo_t +read_PSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_lo register 0x%lx\n", + READ_PSP_LO_REG().PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PSP_LO_REG(); +} +static inline e2k_psp_lo_t +boot_read_PSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_lo register 0x%lx\n", + BOOT_READ_PSP_LO_REG().PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PSP_LO_REG(); +} + +/* + * Read high double-word Procedure Stack Pointer Register (PSP) + * from the high word structure + * Register fields access: fff = PSP_hi.PSP_hi_xxx; + * Register double-word half access: psp_lo = PSP_hi.PSP_hi_half; + */ +static inline e2k_psp_hi_t +native_nv_read_PSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_hi register 0x%lx\n", + NATIVE_NV_READ_PSP_HI_REG().PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PSP_HI_REG(); +} +static inline e2k_psp_hi_t +read_PSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_hi register 0x%lx\n", + READ_PSP_HI_REG().PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PSP_HI_REG(); +} +static inline e2k_psp_hi_t +boot_read_PSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_hi register 0x%lx\n", + BOOT_READ_PSP_HI_REG().PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PSP_HI_REG(); +} + +/* + * Read quad-word Procedure Stack Pointer Register (PSP) to the structure + * Register fields access: fff = PSP -> PSP_xxx + * Register double-word halfs access: PSP_lo_word = PSP -> PSP_lo_reg + * PSP_hi_word = PSP -> PSP_hi_reg + */ +static inline void +native_nv_read_PSP_reg(psp_struct_t *PSP) +{ + NATIVE_NV_READ_PSP_REG_TO(PSP); +} +static inline void +read_PSP_reg(psp_struct_t *PSP) +{ + READ_PSP_REG_TO(PSP); +} +static inline void +boot_read_PSP_reg(psp_struct_t *PSP) +{ + BOOT_READ_PSP_REG_TO(PSP); +} + +/* + * Write low double-word Procedure Stack Pointer Register (PSP) + * from the low word structure + * Register fields filling: PSP_lo.PSP_lo_xxx = fff; + * Register double-word half filling: PSP_lo.PSP_lo_half = psp_lo; + */ +#define NATIVE_NV_WRITE_PSP_LO_REG(PSP_lo) \ +({ \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo.PSP_lo_half); \ +}) +#define WRITE_PSP_LO_REG(PSP_lo) \ +({ \ + WRITE_PSP_LO_REG_VALUE(PSP_lo.PSP_lo_half); \ +}) +#define BOOT_WRITE_PSP_LO_REG(PSP_lo) \ +({ \ + BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo.PSP_lo_half); \ +}) + +static inline void +native_nv_write_PSP_lo_reg(e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PSP_LO_REG(PSP_lo); +} +static inline void +write_PSP_lo_reg(e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PSP_LO_REG(PSP_lo); +} + +static inline void +boot_write_PSP_lo_reg(e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PSP_LO_REG(PSP_lo); +} + +/* + * Write high double-word Procedure Stack Pointer Register (PSP) + * from the high word structure + * Register fields filling: PSP_hi.PSP_hi_xxx = fff; + * Register double-word half filling: PSP_hi.PSP_hi_half = psp_lo; + */ +#define NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(PSP_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi.PSP_hi_half); \ +}) +#define WRITE_PSP_HI_REG(PSP_hi) \ +({ \ + WRITE_PSP_HI_REG_VALUE(PSP_hi.PSP_hi_half); \ +}) +#define BOOT_WRITE_PSP_HI_REG(PSP_hi) \ +({ \ + BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi.PSP_hi_half); \ +}) + +static inline void +native_nv_noirq_write_PSP_hi_reg(e2k_psp_hi_t PSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(PSP_hi); +} +static inline void +write_PSP_hi_reg(e2k_psp_hi_t PSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PSP_HI_REG(PSP_hi); +} + +static inline void +boot_write_PSP_hi_reg(e2k_psp_hi_t PSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PSP_HI_REG(PSP_hi); +} + +/* + * Write high & low quad-word Procedure Stack Pointer Register (PSP) + * from the high & low word structure + */ + +#define NATIVE_NV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define NATIVE_NV_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + NATIVE_NV_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) +#define WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) +#define BOOT_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define BOOT_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + BOOT_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +static inline void +native_nv_write_PSP_hi_lo_reg(e2k_psp_hi_t PSP_hi, e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PSP_REG(PSP_hi, PSP_lo); +} +static inline void +write_PSP_hi_lo_reg(e2k_psp_hi_t PSP_hi, e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PSP_REG(PSP_hi, PSP_lo); +} +static inline void +boot_write_PSP_hi_lo_reg(e2k_psp_hi_t PSP_hi, e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PSP_REG(PSP_hi, PSP_lo); +} + +/* + * Write quad-word Procedure Stack Pointer Register (PSP) from the structure + * Register fields filling: PSP.PSP_xxx = fff; + * Register double-word halfs filling: PSP.PSP_lo_reg = PSP_lo; + * PSP.PSP_hi_reg = PSP_hi; + */ +static inline void +native_nv_write_PSP_reg(psp_struct_t PSP) +{ + NATIVE_NV_WRITE_PSP_REG(PSP.PSP_hi_struct, PSP.PSP_lo_struct); +} +static inline void +write_PSP_reg(psp_struct_t PSP) +{ + WRITE_PSP_REG(PSP.PSP_hi_struct, PSP.PSP_lo_struct); +} +static inline void +boot_write_PSP_reg(psp_struct_t PSP) +{ + BOOT_WRITE_PSP_REG(PSP.PSP_hi_struct, PSP.PSP_lo_struct); +} + + +/* + * Read quad-word Procedure Chain Stack Pointer Register (PCSP) to the structure + * Register fields access: PCSP_hi = READ_PCSP_HI_REG(); + * fff = PCSP_hi.PCSP_hi_xxx; + * PCSP_lo = READ_PCSP_LO_REG(); + * fff = PCSP_lo.PCSP_lo_xxx; + */ + +#define NATIVE_NV_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = NATIVE_NV_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define NATIVE_NV_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = NATIVE_NV_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define NATIVE_NV_READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = NATIVE_NV_READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = NATIVE_NV_READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define NATIVE_NV_READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = NATIVE_NV_READ_PCSP_REG(); \ +}) +#define READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = READ_PCSP_REG(); \ +}) +#define BOOT_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = BOOT_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define BOOT_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = BOOT_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define BOOT_READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = BOOT_READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = BOOT_READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define BOOT_READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = BOOT_READ_PCSP_REG(); \ +}) + +/* + * Read low double-word Procedure Chain Stack Pointer Register (PCSP) + * from the low word structure + * Register fields access: fff = PCSP_lo.PCSP_lo_xxx; + * Register double-word half access: pcsp_lo = PCSP_lo.PCSP_lo_half; + */ +static inline e2k_pcsp_lo_t +native_nv_read_PCSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_lo register 0x%lx\n", + NATIVE_NV_READ_PCSP_LO_REG().PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PCSP_LO_REG(); +} +static inline e2k_pcsp_lo_t +read_PCSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_lo register 0x%lx\n", + READ_PCSP_LO_REG().PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PCSP_LO_REG(); +} +static inline e2k_pcsp_lo_t +boot_read_PCSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_lo register 0x%lx\n", + BOOT_READ_PCSP_LO_REG().PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PCSP_LO_REG(); +} + +/* + * Read high double-word Procedure Chain Stack Pointer Register (PCSP) + * from the high word structure + * Register fields access: fff = PCSP_hi.PCSP_hi_xxx; + * Register double-word half access: pcsp_lo = PCSP_hi.PCSP_hi_half; + */ +static inline e2k_pcsp_hi_t +native_nv_read_PCSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_hi register 0x%lx\n", + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PCSP_HI_REG(); +} +static inline e2k_pcsp_hi_t +read_PCSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_hi register 0x%lx\n", + READ_PCSP_HI_REG().PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PCSP_HI_REG(); +} +static inline e2k_pcsp_hi_t +boot_read_PCSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_hi register 0x%lx\n", + BOOT_READ_PCSP_HI_REG().PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PCSP_HI_REG(); +} + +/* + * Read quad-word Procedure Chain Stack Pointer Register (PCSP) to the structure + * Register fields access: fff = PCSP -> PCSP_xxx + * Register double-word halfs access: PCSP_lo_word = PCSP -> PCSP_lo_reg + * PCSP_hi_word = PCSP -> PCSP_hi_reg + */ + +static inline void +native_nv_read_PCSP_reg(pcsp_struct_t *PCSP) +{ + NATIVE_NV_READ_PCSP_REG_TO(PCSP); +} +static inline void +read_PCSP_reg(pcsp_struct_t *PCSP) +{ + READ_PCSP_REG_TO(PCSP); +} +static inline void +boot_read_PCSP_reg(pcsp_struct_t *PCSP) +{ + BOOT_READ_PCSP_REG_TO(PCSP); +} + +/* + * Write low double-word Procedure Chain Stack Pointer Register (PCSP) + * from the low word structure + * Register fields filling: PCSP_lo.PCSP_lo_xxx = fff; + * Register double-word half filling: PCSP_lo.PCSP_lo_half = pcsp_lo; + */ +#define NATIVE_NV_WRITE_PCSP_LO_REG(PCSP_lo) \ +({ \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo.PCSP_lo_half); \ +}) +#define WRITE_PCSP_LO_REG(PCSP_lo) \ +({ \ + WRITE_PCSP_LO_REG_VALUE(PCSP_lo.PCSP_lo_half); \ +}) +#define BOOT_WRITE_PCSP_LO_REG(PCSP_lo) \ +({ \ + BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo.PCSP_lo_half); \ +}) + +static inline void +native_nv_write_PCSP_lo_reg(e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PCSP_LO_REG(PCSP_lo); +} +static inline void +write_PCSP_lo_reg(e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PCSP_LO_REG(PCSP_lo); +} +static inline void +boot_write_PCSP_lo_reg(e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PCSP_LO_REG(PCSP_lo); +} + +/* + * Write high double-word Procedure Chain Stack Pointer Register (PCSP) + * from the high word structure + * Register fields filling: PCSP_hi.PCSP_hi_xxx = fff; + * Register double-word half filling: PCSP_hi.PCSP_hi_half = pcsp_lo; + */ +#define NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(PCSP_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi.PCSP_hi_half); \ +}) +#define WRITE_PCSP_HI_REG(PCSP_hi) \ +({ \ + WRITE_PCSP_HI_REG_VALUE(PCSP_hi.PCSP_hi_half); \ +}) +#define BOOT_WRITE_PCSP_HI_REG(PCSP_hi) \ +({ \ + BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi.PCSP_hi_half); \ +}) + +static inline void +native_nv_noirq_write_PCSP_hi_reg(e2k_pcsp_hi_t PCSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(PCSP_hi); +} +static inline void +write_PCSP_hi_reg(e2k_pcsp_hi_t PCSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PCSP_HI_REG(PCSP_hi); +} +static inline void +boot_write_PCSP_hi_reg(e2k_pcsp_hi_t PCSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PCSP_HI_REG(PCSP_hi); +} + +/* + * Write high & low quad-word Procedure Chain Stack Pointer Register (PCSP) + * from the high & low word structure + */ + +#define NATIVE_NV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define BOOT_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define NATIVE_NV_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + NATIVE_NV_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half); \ +}) +#define WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half); \ +}) +#define BOOT_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + BOOT_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half); \ +}) + +static inline void +native_nv_write_PCSP_hi_lo_reg(e2k_pcsp_hi_t PCSP_hi, e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PCSP_REG(PCSP_hi, PCSP_lo); +} +static inline void +write_PCSP_hi_lo_reg(e2k_pcsp_hi_t PCSP_hi, e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PCSP_REG(PCSP_hi, PCSP_lo); +} +static inline void +boot_write_PCSP_hi_lo_reg(e2k_pcsp_hi_t PCSP_hi, e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PCSP_REG(PCSP_hi, PCSP_lo); +} + +/* + * Write quad-word Procedure Chain Stack Pointer Register (PCSP) from the + * structure + * Register fields filling: PCSP.PCSP_xxx = fff; + * Register double-word halfs filling: PCSP.PCSP_lo_reg = PCSP_lo; + * PCSP.PCSP_hi_reg = PCSP_hi; + */ +static inline void +native_nv_write_PCSP_reg(pcsp_struct_t PCSP) +{ + NATIVE_NV_WRITE_PCSP_REG(PCSP.PCSP_hi_struct, PCSP.PCSP_lo_struct); +} +static inline void +write_PCSP_reg(pcsp_struct_t PCSP) +{ + WRITE_PCSP_REG(PCSP.PCSP_hi_struct, PCSP.PCSP_lo_struct); +} +static inline void +boot_write_PCSP_reg(pcsp_struct_t PCSP) +{ + BOOT_WRITE_PCSP_REG(PCSP.PCSP_hi_struct, PCSP.PCSP_lo_struct); +} + +/* + * Read Current Chain Register (CR0/CR1) to the structure + * Register fields access: crX_hi = READ_CRx_HI_REG(); + * fff = CRx_hi.CRx_hi_xxx; + * CRx_lo = READ_CRx_LO_REG(); + * fff = CRx_lo.CRx_lo_xxx; + */ + +#define NATIVE_NV_READ_CR0_LO_REG() \ +({ \ + e2k_cr0_lo_t CR0_lo; \ + CR0_lo.CR0_lo_half = NATIVE_NV_READ_CR0_LO_REG_VALUE(); \ + CR0_lo; \ +}) +#define NATIVE_NV_READ_CR0_HI_REG() \ +({ \ + e2k_cr0_hi_t CR0_hi; \ + CR0_hi.CR0_hi_half = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + CR0_hi; \ +}) +#define NATIVE_NV_READ_CR1_LO_REG() \ +({ \ + e2k_cr1_lo_t CR1_lo; \ + CR1_lo.CR1_lo_half = NATIVE_NV_READ_CR1_LO_REG_VALUE(); \ + CR1_lo; \ +}) +#define NATIVE_NV_READ_CR1_HI_REG() \ +({ \ + e2k_cr1_hi_t CR1_hi; \ + CR1_hi.CR1_hi_half = NATIVE_NV_READ_CR1_HI_REG_VALUE(); \ + CR1_hi; \ +}) +#define READ_CR0_LO_REG() \ +({ \ + e2k_cr0_lo_t CR0_lo; \ + CR0_lo.CR0_lo_half = READ_CR0_LO_REG_VALUE(); \ + CR0_lo; \ +}) +#define READ_CR0_HI_REG() \ +({ \ + e2k_cr0_hi_t CR0_hi; \ + CR0_hi.CR0_hi_half = READ_CR0_HI_REG_VALUE(); \ + CR0_hi; \ +}) +#define READ_CR1_LO_REG() \ +({ \ + e2k_cr1_lo_t CR1_lo; \ + CR1_lo.CR1_lo_half = READ_CR1_LO_REG_VALUE(); \ + CR1_lo; \ +}) +#define READ_CR1_HI_REG() \ +({ \ + e2k_cr1_hi_t CR1_hi; \ + CR1_hi.CR1_hi_half = READ_CR1_HI_REG_VALUE(); \ + CR1_hi; \ +}) + +static inline e2k_cr0_lo_t +native_nv_read_CR0_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_lo register 0x%lx\n", + NATIVE_NV_READ_CR0_LO_REG().CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR0_LO_REG(); +} +static inline e2k_cr0_hi_t +native_nv_read_CR0_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_hi register 0x%lx\n", + NATIVE_NV_READ_CR0_HI_REG().CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR0_HI_REG(); +} + +static inline e2k_cr1_lo_t +native_nv_read_CR1_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_lo register 0x%lx\n", + NATIVE_NV_READ_CR1_LO_REG().CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR1_LO_REG(); +} +static inline e2k_cr1_hi_t +native_nv_read_CR1_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_hi register 0x%lx\n", + NATIVE_NV_READ_CR1_HI_REG().CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR1_HI_REG(); +} +static inline e2k_cr0_lo_t +read_CR0_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_lo register 0x%lx\n", + READ_CR0_LO_REG().CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR0_LO_REG(); +} +static inline e2k_cr0_hi_t +read_CR0_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_hi register 0x%lx\n", + READ_CR0_HI_REG().CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR0_HI_REG(); +} + +static inline e2k_cr1_lo_t +read_CR1_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_lo register 0x%lx\n", + READ_CR1_LO_REG().CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR1_LO_REG(); +} +static inline e2k_cr1_hi_t +read_CR1_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_hi register 0x%lx\n", + READ_CR1_HI_REG().CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR1_HI_REG(); +} + +/* + * Write Current Chain Register (CR0/CR1) + * from the low word structure + * Register fields filling: CRx_lo.CRx_lo_xxx = fff; + * Register double-word half filling: CRx_lo.CRx_lo_half = crX_lo; + */ +#define NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(CR0_lo) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo.CR0_lo_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(CR0_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi.CR0_hi_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(CR1_lo) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo.CR1_lo_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(CR1_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi.CR1_hi_half); \ +}) +#define WRITE_CR0_LO_REG(CR0_lo) \ +({ \ + WRITE_CR0_LO_REG_VALUE(CR0_lo.CR0_lo_half); \ +}) +#define WRITE_CR0_HI_REG(CR0_hi) \ +({ \ + WRITE_CR0_HI_REG_VALUE(CR0_hi.CR0_hi_half); \ +}) +#define WRITE_CR1_LO_REG(CR1_lo) \ +({ \ + WRITE_CR1_LO_REG_VALUE(CR1_lo.CR1_lo_half); \ +}) +#define WRITE_CR1_HI_REG(CR1_hi) \ +({ \ + WRITE_CR1_HI_REG_VALUE(CR1_hi.CR1_hi_half); \ +}) +static inline void +native_nv_noirq_write_CR0_lo_reg(e2k_cr0_lo_t CR0_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_lo register 0x%lx\n", CR0_lo.CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(CR0_lo); +} +static inline void +native_nv_noirq_write_CR0_hi_reg(e2k_cr0_hi_t CR0_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_hi register 0x%lx\n", CR0_hi.CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(CR0_hi); +} +static inline void +native_nv_noirq_write_CR1_lo_reg(e2k_cr1_lo_t CR1_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_lo register 0x%lx\n", CR1_lo.CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(CR1_lo); +} +static inline void +native_nv_noirq_write_CR1_hi_reg(e2k_cr1_hi_t CR1_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_hi register 0x%lx\n", CR1_hi.CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(CR1_hi); +} +static inline void +write_CR0_lo_reg(e2k_cr0_lo_t CR0_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_lo register 0x%lx\n", CR0_lo.CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR0_LO_REG(CR0_lo); +} +static inline void +write_CR0_hi_reg(e2k_cr0_hi_t CR0_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_hi register 0x%lx\n", CR0_hi.CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR0_HI_REG(CR0_hi); +} +static inline void +write_CR1_lo_reg(e2k_cr1_lo_t CR1_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_lo register 0x%lx\n", CR1_lo.CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR1_LO_REG(CR1_lo); +} +static inline void +write_CR1_hi_reg(e2k_cr1_hi_t CR1_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_hi register 0x%lx\n", CR1_hi.CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR1_HI_REG(CR1_hi); +} + +/* + * Read double-word Control Transfer Preparation Registers (CTPR1/CTPR2/CTPR3) + * to the structure + * Register fields access: fff = CTPRn -> CTPRn_xxx + * Register entire access: CTPRn_entire = CTPRn -> CTPRn_reg + */ +#define NATIVE_NV_READ_CTPR1_REG_VALUE() NATIVE_NV_READ_CTPR_REG_VALUE(1) +#define NATIVE_NV_READ_CTPR2_REG_VALUE() NATIVE_NV_READ_CTPR_REG_VALUE(2) +#define NATIVE_NV_READ_CTPR3_REG_VALUE() NATIVE_NV_READ_CTPR_REG_VALUE(3) +#define READ_CTPR1_REG_VALUE() READ_CTPR_REG_VALUE(1) +#define READ_CTPR2_REG_VALUE() READ_CTPR_REG_VALUE(2) +#define READ_CTPR3_REG_VALUE() READ_CTPR_REG_VALUE(3) +#define NATIVE_NV_READ_CTPR_REG(reg_no) \ +({ \ + e2k_ctpr_t CTPR; \ + CTPR.CTPR_reg = NATIVE_NV_READ_CTPR_REG_VALUE(reg_no); \ + CTPR; \ +}) +#define NATIVE_NV_READ_CTPR1_REG() NATIVE_NV_READ_CTPR_REG(1) +#define NATIVE_NV_READ_CTPR2_REG() NATIVE_NV_READ_CTPR_REG(2) +#define NATIVE_NV_READ_CTPR3_REG() NATIVE_NV_READ_CTPR_REG(3) +#define READ_CTPR_REG(reg_no) \ +({ \ + e2k_ctpr_t CTPR; \ + CTPR.CTPR_reg = READ_CTPR_REG_VALUE(reg_no); \ + CTPR; \ +}) +#define READ_CTPR1_REG() READ_CTPR_REG(1) +#define READ_CTPR2_REG() READ_CTPR_REG(2) +#define READ_CTPR3_REG() READ_CTPR_REG(3) +static inline e2k_ctpr_t +read_CTPR_reg(int reg_no) +{ + switch (reg_no) { + case 1: return READ_CTPR1_REG(); + case 2: return READ_CTPR2_REG(); + case 3: return READ_CTPR3_REG(); + default: + return READ_CTPR1_REG(); + } +} +static inline e2k_ctpr_t +read_CTPR1_reg(void) +{ + return read_CTPR_reg(1); +} +static inline e2k_ctpr_t +read_CTPR2_reg(void) +{ + return read_CTPR_reg(2); +} +static inline e2k_ctpr_t +read_CTPR3_reg(void) +{ + return read_CTPR_reg(3); +} + +#define NATIVE_READ_CTPR1_HI_REG_VALUE() NATIVE_READ_CTPR_HI_REG_VALUE(1) +#define NATIVE_READ_CTPR2_HI_REG_VALUE() NATIVE_READ_CTPR_HI_REG_VALUE(2) +#define NATIVE_READ_CTPR3_HI_REG_VALUE() NATIVE_READ_CTPR_HI_REG_VALUE(3) + +/* + * Write double-word Control Transfer Preparation Registers (CTPR1/CTPR2/CTPR3) + * from the structure + * Register fields filling: CTPRn.CTPR_xxx = fff; + * Register entire filling: CTPRn.CTPR_reg = CTPRn_value; + */ +#define NATIVE_WRITE_CTPR1_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(1, CTPR_value) +#define NATIVE_WRITE_CTPR2_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(2, CTPR_value) +#define NATIVE_WRITE_CTPR3_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(3, CTPR_value) +#define NATIVE_WRITE_CTPR_REG(reg_no, CTPR) \ + NATIVE_WRITE_CTPR_REG_VALUE(reg_no, CTPR.CTPR_reg) +#define NATIVE_WRITE_CTPR1_REG(CTPR) NATIVE_WRITE_CTPR_REG(1, CTPR) +#define NATIVE_WRITE_CTPR2_REG(CTPR) NATIVE_WRITE_CTPR_REG(2, CTPR) +#define NATIVE_WRITE_CTPR3_REG(CTPR) NATIVE_WRITE_CTPR_REG(3, CTPR) +#define WRITE_CTPR1_REG_VALUE(CTPR_value) \ + WRITE_CTPR_REG_VALUE(1, CTPR_value) +#define WRITE_CTPR2_REG_VALUE(CTPR_value) \ + WRITE_CTPR_REG_VALUE(2, CTPR_value) +#define WRITE_CTPR3_REG_VALUE(CTPR_value) \ + WRITE_CTPR_REG_VALUE(3, CTPR_value) +#define WRITE_CTPR_REG(reg_no, CTPR) \ + WRITE_CTPR_REG_VALUE(reg_no, CTPR.CTPR_reg) +#define WRITE_CTPR1_REG(CTPR) WRITE_CTPR_REG(1, CTPR) +#define WRITE_CTPR2_REG(CTPR) WRITE_CTPR_REG(2, CTPR) +#define WRITE_CTPR3_REG(CTPR) WRITE_CTPR_REG(3, CTPR) + +static inline void +write_CTPR_reg(int reg_no, e2k_ctpr_t CTPR) +{ + switch (reg_no) { + case 1: + WRITE_CTPR1_REG(CTPR); + break; + case 2: + WRITE_CTPR2_REG(CTPR); + break; + case 3: + WRITE_CTPR3_REG(CTPR); + break; + default: + break; + } +} +static inline void +write_CTPR1_reg(e2k_ctpr_t CTPR) +{ + write_CTPR_reg(1, CTPR); +} +static inline void +write_CTPR2_reg(e2k_ctpr_t CTPR) +{ + write_CTPR_reg(2, CTPR); +} +static inline void +write_CTPR3_reg(e2k_ctpr_t CTPR) +{ + write_CTPR_reg(3, CTPR); +} + +#define NATIVE_WRITE_CTPR1_HI_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_HI_REG_VALUE(1, CTPR_value) +#define NATIVE_WRITE_CTPR2_HI_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_HI_REG_VALUE(2, CTPR_value) +#define NATIVE_WRITE_CTPR3_HI_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_HI_REG_VALUE(3, CTPR_value) + +/* + * Read signed word-register Procedure Chain Stack Hardware + * Top Pointer (PCSHTP) + */ + +static inline e2k_pcshtp_t +read_PCSHTP_reg(void) +{ + return READ_PCSHTP_REG_SVALUE(); +} + +/* + * Write signed word-register Procedure Chain Stack Hardware + * Top Pointer (PCSHTP) + */ + +static inline void +write_PCSHTP_reg(e2k_pcshtp_t PCSHTP) +{ + WRITE_PCSHTP_REG_SVALUE(PCSHTP); +} + + +/* + * Read low double-word Non-Protected User Stack Descriptor Register (USD) + * as the low word structure + * Register fields access: USD_lo = READ_USD_LO_REG(); + * fff = USD_lo.USD_lo_xxx; + */ +#define NATIVE_NV_READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = NATIVE_NV_READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) +#define READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) +#define BOOT_READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = BOOT_READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) + +static inline e2k_usd_lo_t +native_nv_read_USD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_lo register 0x%lx\n", + NATIVE_NV_READ_USD_LO_REG().USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_USD_LO_REG(); +} +static inline e2k_usd_lo_t +read_USD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_lo register 0x%lx\n", + READ_USD_LO_REG().USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_USD_LO_REG(); +} +static inline e2k_usd_lo_t +boot_read_USD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_lo register 0x%lx\n", + BOOT_READ_USD_LO_REG().USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_USD_LO_REG(); +} + +/* + * Read high double-word Non-Protected User Stack Descriptor Register (USD) + * as the high word structure + * Register fields access: USD_hi = READ_USD_HI_REG(); + * fff = USD_hi.USD_hi_xxx; + */ +#define NATIVE_NV_READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = NATIVE_NV_READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) +#define READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) +#define BOOT_READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = BOOT_READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) + +static inline e2k_usd_hi_t +native_nv_read_USD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_hi register 0x%lx\n", + NATIVE_NV_READ_USD_HI_REG().USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_USD_HI_REG(); +} +static inline e2k_usd_hi_t +read_USD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_hi register 0x%lx\n", + READ_USD_HI_REG().USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_USD_HI_REG(); +} +static inline e2k_usd_hi_t +boot_read_USD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_hi register 0x%lx\n", + BOOT_READ_USD_HI_REG().USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_USD_HI_REG(); +} + +/* + * Read quad-word Non-Protected User Stack Descriptor Register (USD) + * to the structure + * Register fields access: fff = USD -> USD_xxx + * Register double-word halfs access: USD_lo = USD -> USD_lo_reg + * USD_hi = USD -> USD_hi_reg + */ +#define READ_USD_REG() \ +({ \ + usd_struct_t USD; \ + USD.USD_hi_struct = READ_USD_HI_REG(); \ + USD.USD_lo_struct = READ_USD_LO_REG(); \ + USD; \ +}) +#define READ_USD_REG_TO(USD) \ +({ \ + *USD = READ_USD_REG(); \ +}) +#define BOOT_READ_USD_REG() \ +({ \ + usd_struct_t USD; \ + USD.USD_hi_struct = BOOT_READ_USD_HI_REG(); \ + USD.USD_lo_struct = BOOT_READ_USD_LO_REG(); \ + USD; \ +}) +#define BOOT_READ_USD_REG_TO(USD) \ +({ \ + *USD = BOOT_READ_USD_REG(); \ +}) + +static inline void +read_USD_reg(usd_struct_t *USD) +{ + READ_USD_REG_TO(USD); +} +static inline void +boot_read_USD_reg(usd_struct_t *USD) +{ + BOOT_READ_USD_REG_TO(USD); +} + +/* + * Write low double-word Non-Protected User Stack Descriptor Register (USD) + * from the low word structure + * Register fields filling: USD_lo.USD_lo_xxx = fff; + * Register double-word half filling: USD_lo.USD_lo_half = usd_lo; + */ +#define NATIVE_NV_WRITE_USD_LO_REG(USD_lo) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) +#define WRITE_USD_LO_REG(USD_lo) \ + WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) +#define BOOT_WRITE_USD_LO_REG(USD_lo) \ + BOOT_WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) + +static inline void +native_nv_write_USD_lo_reg(e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_USD_LO_REG(USD_lo); +} +static inline void +write_USD_lo_reg(e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_USD_LO_REG(USD_lo); +} +static inline void +boot_write_USD_lo_reg(e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_USD_LO_REG(USD_lo); +} + +/* + * Write high double-word Non-Protected User Stack Descriptor Register (USD) + * from the high word structure + * Register fields filling: USD_hi.USD_hi_xxx = fff; + * Register double-word half filling: USD_hi.USD_hi_half = usd_hi; + */ +#define NATIVE_NV_WRITE_USD_HI_REG(USD_hi) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) +#define WRITE_USD_HI_REG(USD_hi) \ + WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) +#define BOOT_WRITE_USD_HI_REG(USD_hi) \ + BOOT_WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) + +static inline void +native_nv_write_USD_hi_reg(e2k_usd_hi_t USD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_USD_HI_REG(USD_hi); +} +static inline void +write_USD_hi_reg(e2k_usd_hi_t USD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_USD_HI_REG(USD_hi); +} +static inline void +boot_write_USD_hi_reg(e2k_usd_hi_t USD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_USD_HI_REG(USD_hi); +} + +/* + * Write high & low quad-word Non-Protected User Stack Descriptor Register (USD) + * from the high & low word structure + */ +#define WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) +#define BOOT_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define BOOT_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + BOOT_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) + +#define WRITE_USBR_USD_REG_VALUE(usbr, USD_hi, USD_lo) \ +do { \ + WRITE_USBR_REG_VALUE(usbr); \ + WRITE_USD_REG_VALUE(USD_hi, USD_lo); \ +} while (0) + +#define WRITE_USBR_USD_REG(usbr, USD_hi, USD_lo) \ +do { \ + WRITE_USBR_REG(usbr); \ + WRITE_USD_REG(USD_hi, USD_lo); \ +} while (0) + + +static inline void +write_USD_hi_lo_reg(e2k_usd_hi_t USD_hi, e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_USD_REG(USD_hi, USD_lo); +} +static inline void +boot_write_USD_hi_lo_reg(e2k_usd_hi_t USD_hi, e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_USD_REG(USD_hi, USD_lo); +} + +/* + * Write quad-word Non-Protected User Stack Descriptor Register (USD) + * from the structure + * Register fields filling: USD.USD_xxx = fff; + * Register double-word halfs filling: USD.USD_lo_reg = USD_lo; + * USD.USD_hi_reg = USD_hi; + */ +static inline void +write_USD_reg(usd_struct_t USD) +{ + WRITE_USD_REG(USD.USD_hi_struct, USD.USD_lo_struct); +} +static inline void +boot_write_USD_reg(usd_struct_t USD) +{ + BOOT_WRITE_USD_REG(USD.USD_hi_struct, USD.USD_lo_struct); +} + +/* + * Read low double-word Protected User Stack Descriptor Register (PUSD) + * as the low word structure + * Register fields access: PUSD_lo = READ_PUSD_LO_REG(); + * fff = PUSD_lo.PUSD_lo_xxx; + */ +#define NATIVE_NV_READ_PUSD_LO_REG() \ +({ \ + e2k_pusd_lo_t PUSD_lo; \ + PUSD_lo.PUSD_lo_half = NATIVE_NV_READ_PUSD_LO_REG_VALUE(); \ + PUSD_lo; \ +}) +#define READ_PUSD_LO_REG() \ +({ \ + e2k_pusd_lo_t PUSD_lo; \ + PUSD_lo.PUSD_lo_half = READ_PUSD_LO_REG_VALUE(); \ + PUSD_lo; \ +}) +static inline e2k_pusd_lo_t +native_nv_read_PUSD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_lo register 0x%lx\n", + NATIVE_NV_READ_PUSD_LO_REG().PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PUSD_LO_REG(); +} +static inline e2k_pusd_lo_t +read_PUSD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_lo register 0x%lx\n", + READ_PUSD_LO_REG().PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PUSD_LO_REG(); +} + +/* + * Read high double-word Protected User Stack Descriptor Register (PUSD) + * as the high word structure + * Register fields access: PUSD_hi = READ_PUSD_HI_REG(); + * fff = PUSD_hi.PUSD_hi_xxx; + */ +#define NATIVE_NV_READ_PUSD_HI_REG() \ +({ \ + e2k_pusd_hi_t PUSD_hi; \ + PUSD_hi.PUSD_hi_half = NATIVE_NV_READ_PUSD_HI_REG_VALUE(); \ + PUSD_hi; \ +}) +#define READ_PUSD_HI_REG() \ +({ \ + e2k_pusd_hi_t PUSD_hi; \ + PUSD_hi.PUSD_hi_half = READ_PUSD_HI_REG_VALUE(); \ + PUSD_hi; \ +}) +static inline e2k_pusd_hi_t +native_nv_read_PUSD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_hi register 0x%lx\n", + NATIVE_NV_READ_PUSD_HI_REG().PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PUSD_HI_REG(); +} +static inline e2k_pusd_hi_t +read_PUSD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_hi register 0x%lx\n", + READ_PUSD_HI_REG().PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PUSD_HI_REG(); +} + +/* + * Read quad-word User Protected Stack Descriptor Register (PUSD) + * to the structure + * Register fields access: fff = PUSD -> PUSD_xxx + * Register double-word halfs access: PUSD_lo = PUSD -> PUSD_lo_reg + * PUSD_hi = PUSD -> PUSD_hi_reg + */ +#define NATIVE_NV_READ_PUSD_REG() \ +({ \ + pusd_struct_t PUSD; \ + PUSD.PUSD_hi_struct = NATIVE_NV_READ_PUSD_HI_REG(); \ + PUSD.PUSD_lo_struct = NATIVE_NV_READ_PUSD_LO_REG(); \ + PUSD; \ +}) +#define NATIVE_NV_READ_PUSD_REG_TO(PUSD) \ +({ \ + *PUSD = NATIVE_NV_READ_PUSD_REG(); \ +}) +#define READ_PUSD_REG() \ +({ \ + pusd_struct_t PUSD; \ + PUSD.PUSD_hi_struct = READ_PUSD_HI_REG(); \ + PUSD.PUSD_lo_struct = READ_PUSD_LO_REG(); \ + PUSD; \ +}) +#define READ_PUSD_REG_TO(PUSD) \ +({ \ + *PUSD = READ_PUSD_REG(); \ +}) +static inline void +native_nv_read_PUSD_reg(pusd_struct_t *PUSD) +{ + NATIVE_NV_READ_PUSD_REG_TO(PUSD); +} +static inline void +read_PUSD_reg(pusd_struct_t *PUSD) +{ + READ_PUSD_REG_TO(PUSD); +} + +/* + * Write low double-word Protected User Stack Descriptor Register (PUSD) + * from the low word structure + * Register fields filling: PUSD_lo.PUSD_lo_xxx = fff; + * Register double-word half filling: PUSD_lo.PUSD_lo_half = pusd_lo; + */ +#define NATIVE_NV_WRITE_PUSD_LO_REG(PUSD_lo) \ + NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo.PUSD_lo_half) +#define WRITE_PUSD_LO_REG(PUSD_lo) \ + WRITE_PUSD_LO_REG_VALUE(PUSD_lo.PUSD_lo_half) + +static inline void +native_nv_write_PUSD_lo_reg(e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PUSD_LO_REG(PUSD_lo); +} +static inline void +write_PUSD_lo_reg(e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PUSD_LO_REG(PUSD_lo); +} + +/* + * Write high double-word Protected User Stack Descriptor Register (PUSD) + * from the high word structure + * Register fields filling: PUSD_hi.PUSD_hi_xxx = fff; + * Register double-word half filling: PUSD_hi.PUSD_hi_half = pusd_hi; + */ +#define NATIVE_NV_WRITE_PUSD_HI_REG(PUSD_hi) \ + NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi.PUSD_hi_half) +#define WRITE_PUSD_HI_REG(PUSD_hi) \ + WRITE_PUSD_HI_REG_VALUE(PUSD_hi.PUSD_hi_half) + +static inline void +native_nv_write_PUSD_hi_reg(e2k_pusd_hi_t PUSD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PUSD_HI_REG(PUSD_hi); +} +static inline void +write_PUSD_hi_reg(e2k_pusd_hi_t PUSD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PUSD_HI_REG(PUSD_hi); +} + +/* + * Write high & low quad-word Protected User Stack Descriptor Register (PUSD) + * from the high & low word structure + */ +#define NATIVE_NV_WRITE_PUSD_REG_VALUE(PUSD_hi_value, PUSD_lo_value) \ +({ \ + NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value); \ + NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value); \ +}) +#define NATIVE_NV_WRITE_PUSD_REG(PUSD_hi, PUSD_lo) \ +({ \ + NATIVE_NV_WRITE_PUSD_REG_VALUE(PUSD_hi.PUSD_hi_half, \ + PUSD_lo.PUSD_lo_half); \ +}) +#define WRITE_PUSD_REG_VALUE(PUSD_hi_value, PUSD_lo_value) \ +({ \ + WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value); \ + WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value); \ +}) +#define WRITE_PUSD_REG(PUSD_hi, PUSD_lo) \ +({ \ + WRITE_PUSD_REG_VALUE(PUSD_hi.PUSD_hi_half, PUSD_lo.PUSD_lo_half); \ +}) +static inline void +native_nv_write_PUSD_hi_lo_reg(e2k_pusd_hi_t PUSD_hi, e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PUSD_REG(PUSD_hi, PUSD_lo); +} +static inline void +write_PUSD_hi_lo_reg(e2k_pusd_hi_t PUSD_hi, e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PUSD_REG(PUSD_hi, PUSD_lo); +} + +/* + * Write quad-word User Protected Stack Descriptor Register (PUSD) + * from the structure + * Register fields filling: PUSD.PUSD_xxx = fff; + * Register double-word halfs filling: PUSD.PUSD_lo_reg = PUSD_lo; + * PUSD.PUSD_hi_reg = PUSD_hi; + */ +static inline void +write_PUSD_reg(pusd_struct_t PUSD) +{ + WRITE_PUSD_REG(PUSD.PUSD_hi_struct, PUSD.PUSD_lo_struct); +} + +/* + * Read double-word User Stacks Base Register (USBR) to the structure + * Register fields access: fff = USBR -> USBR_xxx + * Register entire access: USBR_entire = USBR -> USBR_reg + */ +#define READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = READ_USBR_REG_VALUE(); \ + USBR; \ +}) +#define NATIVE_NV_READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = NATIVE_NV_READ_USBR_REG_VALUE(); \ + USBR; \ +}) +#define BOOT_READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = BOOT_READ_USBR_REG_VALUE(); \ + USBR; \ +}) +static inline e2k_usbr_t +read_USBR_reg(void) +{ + return READ_USBR_REG(); +} +static inline e2k_usbr_t +boot_read_USBR_reg(void) +{ + return BOOT_READ_USBR_REG(); +} + +/* + * Write double-word User Stacks Base Register (USBR) from the structure + * Register fields filling: USBR.USBR_xxx = fff; + * Register entire filling: USBR.USBR_reg = USBR_value; + */ +#define WRITE_USBR_REG(USBR) WRITE_USBR_REG_VALUE(USBR.USBR_reg) +#define BOOT_WRITE_USBR_REG(USBR) \ + BOOT_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +static inline void +write_USBR_reg(e2k_usbr_t USBR) +{ + WRITE_USBR_REG(USBR); +} +static inline void +boot_write_USBR_reg(e2k_usbr_t USBR) +{ + BOOT_WRITE_USBR_REG(USBR); +} + +/* + * Read double-word Stacks Base Register (SBR) to the structure + * Register fields access: fff = SBR -> SBR_xxx + * Register entire access: SBR_entire = SBR -> SBR_reg + */ +#define NATIVE_NV_READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = NATIVE_NV_READ_SBR_REG_VALUE(); \ + SBR; \ +}) +#define READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = READ_SBR_REG_VALUE(); \ + SBR; \ +}) +#define BOOT_READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = BOOT_READ_SBR_REG_VALUE(); \ + SBR; \ +}) + +static inline e2k_sbr_t +native_nv_read_SBR_reg(void) +{ + return NATIVE_NV_READ_SBR_REG(); +} +static inline e2k_sbr_t +read_SBR_reg(void) +{ + return READ_SBR_REG(); +} +static inline e2k_sbr_t +boot_read_SBR_reg(void) +{ + return BOOT_READ_SBR_REG(); +} + +/* + * Write double-word Stacks Base Register (SBR) from the structure + * Register fields filling: SBR.SBR_xxx = fff; + * Register entire filling: SBR.SBR_reg = SBR_value; + */ +#define NATIVE_NV_WRITE_SBR_REG(SBR) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(SBR.SBR_reg) +#define WRITE_SBR_REG(SBR) \ + WRITE_SBR_REG_VALUE(SBR.SBR_reg) +#define BOOT_WRITE_SBR_REG(SBR) \ + BOOT_WRITE_SBR_REG_VALUE(SBR.SBR_reg) + +static inline void +native_nv_write_SBR_reg(e2k_sbr_t SBR) +{ + NATIVE_NV_WRITE_SBR_REG(SBR); +} +static inline void +write_SBR_reg(e2k_sbr_t SBR) +{ + WRITE_SBR_REG(SBR); +} +static inline void +boot_write_SBR_reg(e2k_sbr_t SBR) +{ + BOOT_WRITE_SBR_REG(SBR); +} + +#define NATIVE_NV_READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP_reg; \ + PSHTP_reg.word = NATIVE_NV_READ_PSHTP_REG_VALUE(); \ + PSHTP_reg; \ +}) +#define READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP_reg; \ + PSHTP_reg.word = READ_PSHTP_REG_VALUE(); \ + PSHTP_reg; \ +}) + +#define NATIVE_WRITE_PSHTP_REG(PSHTP_reg) \ +({ \ + NATIVE_WRITE_PSHTP_REG_VALUE(AS_WORD(PSHTP_reg)); \ +}) +#define NATIVE_STRIP_PSHTP_WINDOW() NATIVE_WRITE_PSHTP_REG_VALUE(0) +#define WRITE_PSHTP_REG(PSHTP_reg) \ +({ \ + WRITE_PSHTP_REG_VALUE(AS_WORD(PSHTP_reg)); \ +}) +#define STRIP_PSHTP_WINDOW() WRITE_PSHTP_REG_VALUE(0) + +/* + * Read double-word Window Descriptor Register (WD) to the structure + * Register fields access: fff = WD -> WD_xxx + * Register entire access: WD_entire = WD -> WD_reg + */ +#define NATIVE_READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = NATIVE_READ_WD_REG_VALUE(); \ + WD; \ +}) +#define READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = READ_WD_REG_VALUE(); \ + WD; \ +}) +static inline e2k_wd_t +native_read_WD_reg(void) +{ + return NATIVE_READ_WD_REG(); +} +static inline e2k_wd_t +read_WD_reg(void) +{ + return READ_WD_REG(); +} + +/* + * Write double-word Window Descriptor Register (WD) from the structure + * Register fields filling: WD.WD_xxx = fff; + * Register entire filling: WD.WD_reg = WD_value; + */ +#define NATIVE_WRITE_WD_REG(WD) NATIVE_WRITE_WD_REG_VALUE(WD.WD_reg) +#define WRITE_WD_REG(WD) WRITE_WD_REG_VALUE(WD.WD_reg) +static inline void +native_write_WD_reg(e2k_wd_t WD) +{ + NATIVE_WRITE_WD_REG(WD); +} +static inline void +write_WD_reg(e2k_wd_t WD) +{ + WRITE_WD_REG(WD); +} + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS + +/* + * Read double-word Loop Status Register (LSR) to the structure + * Register fields access: fff = LSR -> LSR_xxx + * Register entire access: LSR_entire = LSR -> LSR_reg + */ +#define READ_LSR_REG() \ +({ \ + e2k_lsr_t LSR; \ + LSR.LSR_reg = READ_LSR_REG_VALUE(); \ + LSR; \ +}) +static inline e2k_lsr_t +read_LSR_reg(void) +{ + return READ_LSR_REG(); +} + +/* + * Write double-word Loop Status Register (LSR) from the structure + * Register fields filling: LSR.LSR_xxx = fff; + * Register entire filling: LSR.LSR_reg = LSR_value; + */ +#define WRITE_LSR_REG(LSR) WRITE_LSR_REG_VALUE(LSR.LSR_reg) +static inline void +write_LSR_reg(e2k_lsr_t LSR) +{ + WRITE_LSR_REG(LSR); +} + +/* + * Read double-word Initial Loop Counters Register (ILCR) to the structure + * Register fields access: fff = ILCR -> ILCR_xxx + * Register entire access: ILCR_entire = ILCR -> ILCR_reg + */ +#define READ_ILCR_REG() \ +({ \ + e2k_ilcr_t ILCR; \ + ILCR.ILCR_reg = READ_ILCR_REG_VALUE(); \ + ILCR; \ +}) +static inline e2k_ilcr_t +read_ILCR_reg(void) +{ + return READ_ILCR_REG(); +} + +/* + * Write double-word Initial Loop Counters Register (ILCR) from the structure + * Register fields filling: ILCR.ILCR_xxx = fff; + * Register entire filling: ILCR.ILCR_reg = ILCR_value; + */ +#define WRITE_ILCR_REG(ILCR) WRITE_ILCR_REG_VALUE(ILCR.ILCR_reg) +static inline void +write_ILCR_reg(e2k_ilcr_t ILCR) +{ + WRITE_ILCR_REG(ILCR); +} + +/* + * Write double-word LSR/ILCR registers in complex + */ +#define WRITE_LSR_LSR1_ILCR_ILCR1_REGS(lsr, lsr1, ilcr, ilcr1) \ + WRITE_LSR_ILCR_LSR1_ILCR1_REGS_VALUE(lsr.ILSR_reg, \ + lsr1.LSR1_reg, ilcr.ILCR_reg, ilcr1.ILCR1_reg) +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define NATIVE_GET_OSR0_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() +#define NATIVE_READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() +#define NATIVE_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)NATIVE_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define BOOT_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)BOOT_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) + +static inline struct thread_info * +read_current_reg(void) +{ + return READ_CURRENT_REG(); +} +static inline struct thread_info * +boot_read_current_reg(void) +{ + return BOOT_READ_CURRENT_REG(); +} + +#define NATIVE_SET_OSR0_REG_VALUE(TI) \ + NATIVE_NV_WRITE_OSR0_REG_VALUE(TI) +#define NATIVE_WRITE_CURRENT_REG(TI) \ + NATIVE_SET_OSR0_REG_VALUE((unsigned long)(TI)) +#define WRITE_CURRENT_REG(TI) \ + WRITE_CURRENT_REG_VALUE((unsigned long)(TI)) +#define BOOT_WRITE_CURRENT_REG(TI) \ + BOOT_WRITE_CURRENT_REG_VALUE((unsigned long)(TI)) +static inline void +native_write_current_reg(struct thread_info *TI) +{ + NATIVE_WRITE_CURRENT_REG(TI); +} +static inline void +write_current_reg(struct thread_info *TI) +{ + WRITE_CURRENT_REG(TI); +} + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG() READ_OSEM_REG_VALUE() +static inline unsigned int +read_OSEM_reg(void) +{ + return READ_OSEM_REG(); +} +#define WRITE_OSEM_REG(OSEM) WRITE_OSEM_REG_VALUE(OSEM) +static inline void +write_OSEM_reg(unsigned int OSEM) +{ + WRITE_OSEM_REG(OSEM); +} + +#define READ_HCEM_REG() NATIVE_GET_SREG_CLOSED(hcem) +#define WRITE_HCEM_REG(value) NATIVE_SET_SREG_CLOSED_NOEXC(hcem, (value), 5) + +#define READ_HCEB_REG() NATIVE_GET_DSREG_CLOSED(hceb) +#define WRITE_HCEB_REG(value) NATIVE_SET_DSREG_CLOSED_NOEXC(hceb, (value), 5) + +/* + * Read/write word Base Global Register (BGR) to the structure + * Register fields access: fff = BGR.xxx + * Register entire access: BGR_entire = BGR.BGR_reg + */ +#define NATIVE_READ_BGR_REG() \ +({ \ + e2k_bgr_t BGR; \ + BGR.BGR_reg = NATIVE_READ_BGR_REG_VALUE(); \ + BGR; \ +}) +#define READ_BGR_REG() \ +({ \ + e2k_bgr_t BGR; \ + BGR.BGR_reg = READ_BGR_REG_VALUE(); \ + BGR; \ +}) +static inline e2k_bgr_t +native_read_BGR_reg(void) +{ + return NATIVE_READ_BGR_REG(); +} +static inline e2k_bgr_t +read_BGR_reg(void) +{ + return READ_BGR_REG(); +} + +/* + * Write word Base Global Register (BGR) from the structure + * Register fields filling: BGR.BGR_xxx = fff + * Register entire filling: BGR.BGR_reg = BGR_value + */ + +#define NATIVE_WRITE_BGR_REG(BGR) NATIVE_WRITE_BGR_REG_VALUE(BGR.BGR_reg) +#define WRITE_BGR_REG(BGR) WRITE_BGR_REG_VALUE(BGR.BGR_reg) +#define BOOT_WRITE_BGR_REG(BGR) BOOT_WRITE_BGR_REG_VALUE(BGR.BGR_reg) + +static inline void +native_write_BGR_reg(e2k_bgr_t bgr) +{ + NATIVE_WRITE_BGR_REG(bgr); +} +static inline void +write_BGR_reg(e2k_bgr_t bgr) +{ + WRITE_BGR_REG(bgr); +} + +#define NATIVE_INIT_BGR_REG() NATIVE_WRITE_BGR_REG(E2K_INITIAL_BGR) +#define NATIVE_BOOT_INIT_BGR_REG() NATIVE_INIT_BGR_REG() +#define INIT_BGR_REG() WRITE_BGR_REG(E2K_INITIAL_BGR) +#define BOOT_INIT_BGR_REG() BOOT_WRITE_BGR_REG(E2K_INITIAL_BGR) + +static inline void +native_init_BGR_reg(void) +{ + NATIVE_INIT_BGR_REG(); +} +static inline void +init_BGR_reg(void) +{ + INIT_BGR_REG(); +} +static inline void +native_boot_init_BGR_reg(void) +{ + NATIVE_BOOT_INIT_BGR_REG(); +} + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG() READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG() READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG() ((e2k_sclkm1_t) READ_SCLKM1_REG_VALUE()) +#define READ_SCLKM2_REG() READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG() READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG(reg_value) WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG(reg) WRITE_SCLKM1_REG_VALUE(AW(reg)) +#define WRITE_SCLKM2_REG(reg_value) WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG(reg_value) WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG() ((e2k_cu_hw0_t) { .word = READ_CU_HW0_REG_VALUE() }) +#define READ_CU_HW1_REG() READ_CU_HW1_REG_VALUE() + +#define WRITE_CU_HW0_REG(reg) WRITE_CU_HW0_REG_VALUE(reg.word) +#define WRITE_CU_HW1_REG(reg) WRITE_CU_HW1_REG_VALUE(reg) + +/* + * Read low/high double-word Recovery point register (RPR) + */ +#define NATIVE_READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = NATIVE_READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define NATIVE_READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = NATIVE_READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +#define NATIVE_CL_READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = NATIVE_CL_READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define NATIVE_CL_READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = NATIVE_CL_READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +#define READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +static inline e2k_rpr_lo_t +native_read_RPR_lo_reg(void) +{ + return NATIVE_READ_RPR_LO_REG(); +} +static inline e2k_rpr_hi_t +native_read_RPR_hi_reg(void) +{ + return NATIVE_READ_RPR_HI_REG(); +} +static inline e2k_rpr_lo_t +read_RPR_lo_reg(void) +{ + return READ_RPR_LO_REG(); +} +static inline e2k_rpr_hi_t +read_RPR_hi_reg(void) +{ + return READ_RPR_HI_REG(); +} + +/* + * Write low/high double-word Recovery point register (RPR) + */ +#define NATIVE_WRITE_RPR_LO_REG(RPR_lo) \ + NATIVE_WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define NATIVE_WRITE_RPR_HI_REG(RPR_hi) \ + NATIVE_WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) +#define WRITE_RPR_LO_REG(RPR_lo) \ + WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define WRITE_RPR_HI_REG(RPR_hi) \ + WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) + +static inline void +native_write_RPR_lo_reg(e2k_rpr_lo_t RPR_lo) +{ + NATIVE_WRITE_RPR_LO_REG(RPR_lo); +} +static inline void +native_write_RPR_hi_reg(e2k_rpr_hi_t RPR_hi) +{ + NATIVE_WRITE_RPR_HI_REG(RPR_hi); +} +static inline void +write_RPR_lo_reg(e2k_rpr_lo_t RPR_lo) +{ + WRITE_RPR_LO_REG(RPR_lo); +} +static inline void +write_RPR_hi_reg(e2k_rpr_hi_t RPR_hi) +{ + WRITE_RPR_HI_REG(RPR_hi); +} + +/* + * Read CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG() READ_IP_REG_VALUE() +static inline unsigned long +read_ip_reg(void) +{ + return READ_IP_REG(); +} + +/* + * Read debug and monitors registers + */ +#define NATIVE_READ_DIBCR_REG() \ +({ \ + e2k_dibcr_t DIBCR; \ + AS_WORD(DIBCR) = NATIVE_READ_DIBCR_REG_VALUE(); \ + DIBCR; \ +}) +#define READ_DIBCR_REG() \ +({ \ + e2k_dibcr_t DIBCR; \ + AS_WORD(DIBCR) = READ_DIBCR_REG_VALUE(); \ + DIBCR; \ +}) +static inline e2k_dibcr_t +read_DIBCR_reg(void) +{ + return READ_DIBCR_REG(); +} + +#define NATIVE_READ_DIBSR_REG() \ +({ \ + e2k_dibsr_t DIBSR; \ + AS_WORD(DIBSR) = NATIVE_READ_DIBSR_REG_VALUE(); \ + DIBSR; \ +}) +#define READ_DIBSR_REG() \ +({ \ + e2k_dibsr_t DIBSR; \ + AS_WORD(DIBSR) = READ_DIBSR_REG_VALUE(); \ + DIBSR; \ +}) +static inline e2k_dibsr_t +read_DIBSR_reg(void) +{ + return READ_DIBSR_REG(); +} + +#define NATIVE_READ_DIMCR_REG() \ +({ \ + e2k_dimcr_t DIMCR; \ + AS_WORD(DIMCR) = NATIVE_READ_DIMCR_REG_VALUE(); \ + DIMCR; \ +}) +#define READ_DIMCR_REG() \ +({ \ + e2k_dimcr_t DIMCR; \ + AS_WORD(DIMCR) = READ_DIMCR_REG_VALUE(); \ + DIMCR; \ +}) +static inline e2k_dimcr_t +read_DIMCR_reg(void) +{ + return READ_DIMCR_REG(); +} + +#define NATIVE_READ_DIBAR0_REG() NATIVE_READ_DIBAR0_REG_VALUE() +#define NATIVE_READ_DIBAR1_REG() NATIVE_READ_DIBAR1_REG_VALUE() +#define NATIVE_READ_DIBAR2_REG() NATIVE_READ_DIBAR2_REG_VALUE() +#define NATIVE_READ_DIBAR3_REG() NATIVE_READ_DIBAR3_REG_VALUE() +#define READ_DIBAR0_REG() READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG() READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG() READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG() READ_DIBAR3_REG_VALUE() +static inline unsigned long +read_DIBAR0_reg(void) +{ + return READ_DIBAR0_REG(); +} +static inline unsigned long +read_DIBAR1_reg(void) +{ + return READ_DIBAR1_REG(); +} +static inline unsigned long +read_DIBAR2_reg(void) +{ + return READ_DIBAR2_REG(); +} +static inline unsigned long +read_DIBAR3_reg(void) +{ + return READ_DIBAR3_REG(); +} + +#define NATIVE_READ_DIMAR0_REG() NATIVE_READ_DIMAR0_REG_VALUE() +#define NATIVE_READ_DIMAR1_REG() NATIVE_READ_DIMAR1_REG_VALUE() +#define READ_DIMAR0_REG() READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG() READ_DIMAR1_REG_VALUE() +static inline unsigned long +read_DIMAR0_reg(void) +{ + return READ_DIMAR0_REG(); +} +static inline unsigned long +read_DIMAR1_reg(void) +{ + return READ_DIMAR1_REG(); +} + +#define NATIVE_WRITE_DIBCR_REG(DIBCR) \ + NATIVE_WRITE_DIBCR_REG_VALUE(DIBCR.DIBCR_reg) +#define WRITE_DIBCR_REG(DIBCR) WRITE_DIBCR_REG_VALUE(DIBCR.DIBCR_reg) +static inline void +write_DIBCR_reg(e2k_dibcr_t DIBCR) +{ + WRITE_DIBCR_REG(DIBCR); +} +#define NATIVE_WRITE_DIBSR_REG(DIBSR) \ + NATIVE_WRITE_DIBSR_REG_VALUE(DIBSR.DIBSR_reg) +#define WRITE_DIBSR_REG(DIBSR) WRITE_DIBSR_REG_VALUE(DIBSR.DIBSR_reg) +static inline void +write_DIBSR_reg(e2k_dibsr_t DIBSR) +{ + WRITE_DIBSR_REG(DIBSR); +} +#define NATIVE_WRITE_DIMCR_REG(DIMCR) \ + NATIVE_WRITE_DIMCR_REG_VALUE(DIMCR.DIMCR_reg) +#define WRITE_DIMCR_REG(DIMCR) WRITE_DIMCR_REG_VALUE(DIMCR.DIMCR_reg) + +#define NATIVE_WRITE_DIBAR0_REG(DIBAR0) NATIVE_WRITE_DIBAR0_REG_VALUE(DIBAR0) +#define NATIVE_WRITE_DIBAR1_REG(DIBAR1) NATIVE_WRITE_DIBAR1_REG_VALUE(DIBAR1) +#define NATIVE_WRITE_DIBAR2_REG(DIBAR2) NATIVE_WRITE_DIBAR2_REG_VALUE(DIBAR2) +#define NATIVE_WRITE_DIBAR3_REG(DIBAR3) NATIVE_WRITE_DIBAR3_REG_VALUE(DIBAR3) +#define WRITE_DIBAR0_REG(DIBAR0) WRITE_DIBAR0_REG_VALUE(DIBAR0) +#define WRITE_DIBAR1_REG(DIBAR1) WRITE_DIBAR1_REG_VALUE(DIBAR1) +#define WRITE_DIBAR2_REG(DIBAR2) WRITE_DIBAR2_REG_VALUE(DIBAR2) +#define WRITE_DIBAR3_REG(DIBAR3) WRITE_DIBAR3_REG_VALUE(DIBAR3) +static inline void +write_DIBAR0_reg(unsigned long DIBAR0) +{ + WRITE_DIBAR0_REG(DIBAR0); +} +static inline void +write_DIBAR1_reg(unsigned long DIBAR1) +{ + WRITE_DIBAR1_REG(DIBAR1); +} +static inline void +write_DIBAR2_reg(unsigned long DIBAR2) +{ + WRITE_DIBAR2_REG(DIBAR2); +} +static inline void +write_DIBAR3_reg(unsigned long DIBAR3) +{ + WRITE_DIBAR3_REG(DIBAR3); +} + +#define NATIVE_WRITE_DIMAR0_REG(DIMAR0) NATIVE_WRITE_DIMAR0_REG_VALUE(DIMAR0) +#define NATIVE_WRITE_DIMAR1_REG(DIMAR1) NATIVE_WRITE_DIMAR1_REG_VALUE(DIMAR1) +#define WRITE_DIMAR0_REG(DIMAR0) WRITE_DIMAR0_REG_VALUE(DIMAR0) +#define WRITE_DIMAR1_REG(DIMAR1) WRITE_DIMAR1_REG_VALUE(DIMAR1) +static inline void +write_DIMAR0_reg(unsigned long DIMAR0) +{ + WRITE_DIMAR0_REG(DIMAR0); +} +static inline void +write_DIMAR1_reg(unsigned long DIMAR1) +{ + WRITE_DIMAR1_REG(DIMAR1); +} + +/* + * Read double-word Compilation Unit Table Register (CUTD) to the structure + * Register fields access: fff = CUTD.CUTD_xxx or + * fff = CUTD->CUTD_xxx + * Register entire access: CUTD_entire = CUTD.CUTD_reg or + * CUTD_entire = CUTD->CUTD_reg + */ +#define NATIVE_NV_READ_CUTD_REG() \ +({ \ + e2k_cutd_t CUTD; \ + CUTD.CUTD_reg = NATIVE_NV_READ_CUTD_REG_VALUE(); \ + CUTD; \ +}) +#define READ_CUTD_REG() \ +({ \ + e2k_cutd_t CUTD; \ + CUTD.CUTD_reg = READ_CUTD_REG_VALUE(); \ + CUTD; \ +}) +static inline e2k_cutd_t +native_nv_read_CUTD_reg(void) +{ + return NATIVE_NV_READ_CUTD_REG(); +} +static inline e2k_cutd_t +read_CUTD_reg(void) +{ + return READ_CUTD_REG(); +} + +/* + * Write double-word Compilation Unit Table Register (CUTD) from the structure + * Register fields filling: CUTD.CUTD_xxx = fff or + * CUTD->CUTD_xxx = fff + * Register entire filling: CUTD.CUTD_reg = CUTD_value or + * CUTD->CUTD_reg = CUTD_value + */ +#define NATIVE_NV_NOIRQ_WRITE_CUTD_REG(CUTD) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +#define WRITE_CUTD_REG(CUTD) \ + WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +static inline void +native_nv_noirq_write_CUTD_reg(e2k_cutd_t CUTD) +{ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(CUTD); +} +static inline void +write_CUTD_reg(e2k_cutd_t CUTD) +{ + WRITE_CUTD_REG(CUTD); +} + +/* + * Read word Compilation Unit Index Register (CUIR) to the structure + * Register fields access: fff = CUIR.CUIR_xxx or + * fff = CUIR->CUIR_xxx + * Register entire access: CUIR_entire = CUIR.CUIR_reg or + * CUIR_entire = CUIR->CUIR_reg + */ +#define READ_CUIR_REG() \ +({ \ + e2k_cuir_t CUIR; \ + CUIR.CUIR_reg = READ_CUIR_REG_VALUE(); \ + CUIR; \ +}) +static inline e2k_cuir_t +read_CUIR_reg(void) +{ + return READ_CUIR_REG(); +} + +/* + * Read Core Mode Register (CORE_MODE) to the structure + * Register fields access: fff = AS_STRACT(CORE_MODE).xxx + * Register entire access: reg_entire = AS_WORD(CORE_MODE) + */ +#define NATIVE_READ_CORE_MODE_REG() \ +({ \ + e2k_core_mode_t CORE_MODE; \ + CORE_MODE.CORE_MODE_reg = NATIVE_READ_CORE_MODE_REG_VALUE(); \ + CORE_MODE; \ +}) +#define READ_CORE_MODE_REG() \ +({ \ + e2k_core_mode_t CORE_MODE; \ + CORE_MODE.CORE_MODE_reg = READ_CORE_MODE_REG_VALUE(); \ + CORE_MODE; \ +}) +#define BOOT_READ_CORE_MODE_REG() \ +({ \ + e2k_core_mode_t CORE_MODE; \ + CORE_MODE.CORE_MODE_reg = BOOT_READ_CORE_MODE_REG_VALUE(); \ + CORE_MODE; \ +}) + +/* + * Write Core Mode Register (CORE_MODE) from the structure + * Register fields filling: AS_STRACT(CORE_MODE).xxx = fff + * Register entire filling: AS_WORD(CORE_MODE) = CORE_MODE_value + */ +#define NATIVE_WRITE_CORE_MODE_REG(CORE_MODE) \ + NATIVE_WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) +#define BOOT_NATIVE_WRITE_CORE_MODE_REG(CORE_MODE) \ + BOOT_NATIVE_WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) +#define WRITE_CORE_MODE_REG(CORE_MODE) \ + WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) +#define BOOT_WRITE_CORE_MODE_REG(CORE_MODE) \ + BOOT_WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) + +/* + * Read word Processor State Register (PSR) to the structure + * Register fields access: fff = AS_STRACT(PSR).xxx + * Register entire access: PSR_entire = AS_WORD(PSR) + */ +#define BOOT_NATIVE_NV_READ_PSR_REG_VALUE() \ + NATIVE_NV_READ_PSR_REG_VALUE() +#define NATIVE_NV_READ_PSR_REG() \ +({ \ + e2k_psr_t PSR; \ + PSR.PSR_reg = NATIVE_NV_READ_PSR_REG_VALUE(); \ + PSR; \ +}) +#define READ_PSR_REG() \ +({ \ + e2k_psr_t PSR; \ + PSR.PSR_reg = READ_PSR_REG_VALUE(); \ + PSR; \ +}) +#define BOOT_READ_PSR_REG() \ +({ \ + e2k_psr_t PSR; \ + PSR.PSR_reg = BOOT_READ_PSR_REG_VALUE(); \ + PSR; \ +}) + +static inline e2k_psr_t +read_PSR_reg(void) +{ + return READ_PSR_REG(); +} + +/* + * Write word Processor State Register (PSR) from the structure + * Register fields filling: AS_STRACT(PSR).xxx = fff + * Register entire filling: AS_WORD(PSR) = PSR_value + */ +#define BOOT_NATIVE_WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value) +#define NATIVE_WRITE_PSR_REG(PSR) NATIVE_WRITE_PSR_REG_VALUE(PSR.PSR_reg) +#define BOOT_NATIVE_WRITE_PSR_REG(PSR) \ + BOOT_NATIVE_WRITE_PSR_REG_VALUE(PSR.PSR_reg) +#define WRITE_PSR_REG(PSR) WRITE_PSR_REG_VALUE(PSR.PSR_reg) +#define BOOT_WRITE_PSR_REG(PSR) BOOT_WRITE_PSR_REG_VALUE(PSR.PSR_reg) + +static inline void +write_PSR_reg(e2k_psr_t PSR) +{ + WRITE_PSR_REG(PSR); +} + +/* + * Read word User Processor State Register (UPSR) to the structure + * Register fields access: fff = AS_STRACT(UPSR).xxx + * Register entire access: UPSR_entire = AS_WORD(UPSR) + */ +#define BOOT_NATIVE_NV_READ_UPSR_REG_VALUE() \ + NATIVE_NV_READ_UPSR_REG_VALUE() +#define NATIVE_NV_READ_UPSR_REG() \ +({ \ + e2k_upsr_t UPSR; \ + UPSR.UPSR_reg = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + UPSR; \ +}) +#define READ_UPSR_REG() \ +({ \ + e2k_upsr_t UPSR; \ + UPSR.UPSR_reg = READ_UPSR_REG_VALUE(); \ + UPSR; \ +}) + +static inline e2k_upsr_t +read_UPSR_reg(void) +{ + return READ_UPSR_REG(); +} + +/* + * Write word User Processor State Register (UPSR) from the structure + * Register fields filling: AS_STRACT(UPSR).xxx = fff + * Register entire filling: AS_WORD(UPSR) = UPSR_value + */ +#define BOOT_NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) +#define NATIVE_WRITE_UPSR_REG(UPSR) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg) +#define BOOT_NATIVE_WRITE_UPSR_REG(UPSR) \ + NATIVE_WRITE_UPSR_REG(UPSR) +#define WRITE_UPSR_REG(UPSR) \ + WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg) +#define BOOT_WRITE_UPSR_REG(UPSR) \ + BOOT_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg) + +static inline void +native_write_UPSR_reg(e2k_upsr_t UPSR) +{ + NATIVE_WRITE_UPSR_REG(UPSR); +} +static inline void +boot_native_write_UPSR_reg(e2k_upsr_t UPSR) +{ + BOOT_NATIVE_WRITE_UPSR_REG(UPSR); +} +static inline void +write_UPSR_reg(e2k_upsr_t UPSR) +{ + WRITE_UPSR_REG(UPSR); +} + +/* + * Read word floating point control registers (PFPFR/FPCR/FPSR) to the structure + * Register fields access: fff = PFnnn.yyy + * Register entire access: PFnnn_entire = PFnnn_value + */ +#define NATIVE_NV_READ_PFPFR_REG() \ +({ \ + e2k_pfpfr_t PFPFR; \ + PFPFR.PFPFR_reg = NATIVE_NV_READ_PFPFR_REG_VALUE(); \ + PFPFR; \ +}) +#define NATIVE_NV_READ_FPCR_REG() \ +({ \ + e2k_fpcr_t FPCR; \ + FPCR.FPCR_reg = NATIVE_NV_READ_FPCR_REG_VALUE(); \ + FPCR; \ +}) +#define NATIVE_NV_READ_FPSR_REG() \ +({ \ + e2k_fpsr_t FPSR; \ + FPSR.FPSR_reg = NATIVE_NV_READ_FPSR_REG_VALUE(); \ + FPSR; \ +}) +#define READ_PFPFR_REG() \ +({ \ + e2k_pfpfr_t PFPFR; \ + PFPFR.PFPFR_reg = READ_PFPFR_REG_VALUE(); \ + PFPFR; \ +}) +#define READ_FPCR_REG() \ +({ \ + e2k_fpcr_t FPCR; \ + FPCR.FPCR_reg = READ_FPCR_REG_VALUE(); \ + FPCR; \ +}) +#define READ_FPSR_REG() \ +({ \ + e2k_fpsr_t FPSR; \ + FPSR.FPSR_reg = READ_FPSR_REG_VALUE(); \ + FPSR; \ +}) +static inline e2k_pfpfr_t +native_nv_read_PFPFR_reg(void) +{ + return NATIVE_NV_READ_PFPFR_REG(); +} +static inline e2k_fpcr_t +native_nv_read_FPCR_reg(void) +{ + return NATIVE_NV_READ_FPCR_REG(); +} +static inline e2k_fpsr_t +native_nv_read_FPSR_reg(void) +{ + return NATIVE_NV_READ_FPSR_REG(); +} +static inline e2k_pfpfr_t +read_PFPFR_reg(void) +{ + return READ_PFPFR_REG(); +} +static inline e2k_fpcr_t +read_FPCR_reg(void) +{ + return READ_FPCR_REG(); +} +static inline e2k_fpsr_t +read_FPSR_reg(void) +{ + return READ_FPSR_REG(); +} + +/* + * Write word floating point control registers (PFPFR/FPCR/FPSR) + * from the structure + * Register fields filling: PFnnn.xxx = fff + * Register entire filling: PFnnn = PFnnn_value + */ +#define NATIVE_NV_WRITE_PFPFR_REG(PFPFR) \ + NATIVE_NV_WRITE_PFPFR_REG_VALUE(PFPFR.PFPFR_reg) +#define NATIVE_NV_WRITE_FPCR_REG(FPCR) \ + NATIVE_NV_WRITE_FPCR_REG_VALUE(FPCR.FPCR_reg) +#define NATIVE_NV_WRITE_FPSR_REG(FPSR) \ + NATIVE_NV_WRITE_FPSR_REG_VALUE(FPSR.FPSR_reg) +#define WRITE_PFPFR_REG(PFPFR) \ + WRITE_PFPFR_REG_VALUE(PFPFR.PFPFR_reg) +#define WRITE_FPCR_REG(FPCR) WRITE_FPCR_REG_VALUE(FPCR.FPCR_reg) +#define WRITE_FPSR_REG(FPSR) WRITE_FPSR_REG_VALUE(FPSR.FPSR_reg) +static inline void +native_nv_write_PFPFR_reg(e2k_pfpfr_t PFPFR) +{ + NATIVE_NV_WRITE_PFPFR_REG(PFPFR); +} +static inline void +native_nv_write_FPCR_reg(e2k_fpcr_t FPCR) +{ + NATIVE_NV_WRITE_FPCR_REG(FPCR); +} +static inline void +native_nv_write_FPSR_reg(e2k_fpsr_t FPSR) +{ + NATIVE_NV_WRITE_FPSR_REG(FPSR); +} +static inline void +write_PFPFR_reg(e2k_pfpfr_t PFPFR) +{ + WRITE_PFPFR_REG(PFPFR); +} +static inline void +write_FPCR_reg(e2k_fpcr_t FPCR) +{ + WRITE_FPCR_REG(FPCR); +} +static inline void +write_FPSR_reg(e2k_fpsr_t FPSR) +{ + WRITE_FPSR_REG(FPSR); +} + +/* + * Read doubleword Processor Identification Register (IDR) to the structure + * Register fields access: fff = AS_STRACT(IDR).xxx or + * fff = IDR.IDR_xxx + * Register entire access: IDR_entire = AS_WORD(IDR) or + * IDR_entire = IDR.IDR_reg + */ +#define NATIVE_READ_IDR_REG() \ +({ \ + e2k_idr_t IDR; \ + AS_WORD(IDR) = NATIVE_READ_IDR_REG_VALUE(); \ + IDR; \ +}) +#define BOOT_NATIVE_READ_IDR_REG() NATIVE_READ_IDR_REG() +static inline e2k_idr_t +native_read_IDR_reg(void) +{ + return NATIVE_READ_IDR_REG(); +} +static inline e2k_idr_t +boot_native_read_IDR_reg(void) +{ + return BOOT_NATIVE_READ_IDR_REG(); +} +#define READ_IDR_REG() \ +({ \ + e2k_idr_t IDR; \ + AS_WORD(IDR) = READ_IDR_REG_VALUE(); \ + IDR; \ +}) +#define BOOT_READ_IDR_REG() \ +({ \ + e2k_idr_t IDR; \ + AS_WORD(IDR) = BOOT_READ_IDR_REG_VALUE(); \ + IDR; \ +}) +static inline e2k_idr_t +read_IDR_reg(void) +{ + return READ_IDR_REG(); +} +static inline e2k_idr_t +boot_read_IDR_reg(void) +{ + return BOOT_READ_IDR_REG(); +} + +static inline instr_cs0_t *find_cs0(void *ip) +{ + instr_hs_t *hs; + + hs = (instr_hs_t *) &E2K_GET_INSTR_HS(ip); + if (!hs->c0) + return NULL; + + return (instr_cs0_t *) (hs + hs->s + hweight32(hs->al) + 1); +} + +static inline instr_cs1_t *find_cs1(void *ip) +{ + instr_hs_t *hs; + + hs = (instr_hs_t *) &E2K_GET_INSTR_HS(ip); + if (!hs->c1) + return NULL; + + return (instr_cs1_t *) (hs + hs->mdl); +} + +static inline int get_instr_size_by_vaddr(unsigned long addr) +{ + int instr_size; + instr_syl_t *syl; + instr_hs_t hs; + + syl = &E2K_GET_INSTR_HS((e2k_addr_t)addr); + hs.word = *syl; + instr_size = E2K_GET_INSTR_SIZE(hs); + + return instr_size; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_CPU_REGS_H_ */ diff --git a/arch/e2k/include/asm/cpu_regs_access.h b/arch/e2k/include/asm/cpu_regs_access.h new file mode 100644 index 000000000000..4b2790495abc --- /dev/null +++ b/arch/e2k/include/asm/cpu_regs_access.h @@ -0,0 +1,555 @@ + +#ifndef _E2K_CPU_REGS_ACCESS_H_ +#define _E2K_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +#if CONFIG_CPU_ISET >= 3 +# define native_read_CORE_MODE_reg_value() \ + NATIVE_READ_CORE_MODE_REG_VALUE() +# define native_write_CORE_MODE_reg_value(modes) \ + NATIVE_WRITE_CORE_MODE_REG_VALUE((modes)); +#else +# define native_read_CORE_MODE_reg_value() \ + (machine.rrd(E2K_REG_CORE_MODE)) +# define native_write_CORE_MODE_reg_value(modes) \ + (machine.rwd(E2K_REG_CORE_MODE, modes)) +#endif +#define native_read_OSCUTD_reg_value() \ + (machine.rrd(E2K_REG_OSCUTD)) +#define native_write_OSCUTD_reg_value(modes) \ + (machine.rwd(E2K_REG_OSCUTD, modes)) +#define native_read_OSCUIR_reg_value() \ + (machine.rrd(E2K_REG_OSCUIR)) +#define native_write_OSCUIR_reg_value() \ + (machine.rwd(E2K_REG_OSCUIR)) + +#define boot_native_read_CORE_MODE_reg_value() \ +({ \ + typeof(boot_machine.boot_rrd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rrd); \ + func(E2K_REG_CORE_MODE); \ +}) +#define boot_native_write_CORE_MODE_reg_value(modes) \ +({ \ + typeof(boot_machine.boot_rwd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rwd); \ + func(E2K_REG_CORE_MODE, modes); \ +}) +#define boot_native_read_OSCUTD_reg_value() \ +({ \ + typeof(boot_machine.boot_rrd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rrd); \ + func(E2K_REG_OSCUTD); \ +}) +#define boot_native_write_OSCUTD_reg_value(v) \ +({ \ + typeof(boot_machine.boot_rwd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rwd); \ + func(E2K_REG_OSCUTD, (v)); \ +}) +#define boot_native_read_OSCUIR_reg_value() \ +({ \ + typeof(boot_machine.boot_rrd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rrd); \ + func(E2K_REG_OSCUIR); \ +}) +#define boot_native_write_OSCUIR_reg_value(v) \ +({ \ + typeof(boot_machine.boot_rwd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rwd); \ + func(E2K_REG_OSCUIR, v); \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +/* + * Set flags of updated VCPU registers + */ +#define PUT_UPDATED_CPU_REGS_FLAGS(flags) + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define READ_PSHTP_REG_VALUE() NATIVE_NV_READ_PSHTP_REG_VALUE() + +#define WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + NATIVE_WRITE_PSHTP_REG_VALUE(PSHTP_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define READ_PCSHTP_REG_SVALUE() NATIVE_READ_PCSHTP_REG_SVALUE() + +#define WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + NATIVE_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define READ_OSCUD_LO_REG_VALUE() NATIVE_READ_OSCUD_LO_REG_VALUE() +#define READ_OSCUD_HI_REG_VALUE() NATIVE_READ_OSCUD_HI_REG_VALUE() +#define BOOT_READ_OSCUD_LO_REG_VALUE() NATIVE_READ_OSCUD_LO_REG_VALUE() +#define BOOT_READ_OSCUD_HI_REG_VALUE() NATIVE_READ_OSCUD_HI_REG_VALUE() + +#define WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) +#define BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define READ_OSGD_LO_REG_VALUE() NATIVE_READ_OSGD_LO_REG_VALUE() +#define READ_OSGD_HI_REG_VALUE() NATIVE_READ_OSGD_HI_REG_VALUE() +#define BOOT_READ_OSGD_LO_REG_VALUE() NATIVE_READ_OSGD_LO_REG_VALUE() +#define BOOT_READ_OSGD_HI_REG_VALUE() NATIVE_READ_OSGD_HI_REG_VALUE() + +#define WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ + NATIVE_WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define READ_CUD_LO_REG_VALUE() NATIVE_READ_CUD_LO_REG_VALUE() +#define READ_CUD_HI_REG_VALUE() NATIVE_READ_CUD_HI_REG_VALUE() +#define BOOT_READ_CUD_LO_REG_VALUE() NATIVE_READ_CUD_LO_REG_VALUE() +#define BOOT_READ_CUD_HI_REG_VALUE() NATIVE_READ_CUD_HI_REG_VALUE() + +#define WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) +#define BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define READ_GD_LO_REG_VALUE() NATIVE_READ_GD_LO_REG_VALUE() +#define READ_GD_HI_REG_VALUE() NATIVE_READ_GD_HI_REG_VALUE() +#define BOOT_READ_GD_LO_REG_VALUE() NATIVE_READ_GD_LO_REG_VALUE() +#define BOOT_READ_GD_HI_REG_VALUE() NATIVE_READ_GD_HI_REG_VALUE() + +#define WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value) +#define BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define READ_PSP_LO_REG_VALUE() NATIVE_NV_READ_PSP_LO_REG_VALUE() +#define READ_PSP_HI_REG_VALUE() NATIVE_NV_READ_PSP_HI_REG_VALUE() +#define BOOT_READ_PSP_LO_REG_VALUE() NATIVE_NV_READ_PSP_LO_REG_VALUE() +#define BOOT_READ_PSP_HI_REG_VALUE() NATIVE_NV_READ_PSP_HI_REG_VALUE() + +#define WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define READ_PCSP_LO_REG_VALUE() NATIVE_NV_READ_PCSP_LO_REG_VALUE() +#define READ_PCSP_HI_REG_VALUE() NATIVE_NV_READ_PCSP_HI_REG_VALUE() +#define BOOT_READ_PCSP_LO_REG_VALUE() NATIVE_NV_READ_PCSP_LO_REG_VALUE() +#define BOOT_READ_PCSP_HI_REG_VALUE() NATIVE_NV_READ_PCSP_HI_REG_VALUE() + +#define WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define READ_CR0_LO_REG_VALUE() NATIVE_NV_READ_CR0_LO_REG_VALUE() +#define READ_CR0_HI_REG_VALUE() NATIVE_NV_READ_CR0_HI_REG_VALUE() +#define READ_CR1_LO_REG_VALUE() NATIVE_NV_READ_CR1_LO_REG_VALUE() +#define READ_CR1_HI_REG_VALUE() NATIVE_NV_READ_CR1_HI_REG_VALUE() + +#define WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define READ_CTPR_REG_VALUE(reg_no) NATIVE_NV_READ_CTPR_REG_VALUE(reg_no) + +#define WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define READ_USD_LO_REG_VALUE() NATIVE_NV_READ_USD_LO_REG_VALUE() +#define READ_USD_HI_REG_VALUE() NATIVE_NV_READ_USD_HI_REG_VALUE() +#define BOOT_READ_USD_LO_REG_VALUE() NATIVE_NV_READ_USD_LO_REG_VALUE() +#define BOOT_READ_USD_HI_REG_VALUE() NATIVE_NV_READ_USD_HI_REG_VALUE() + +#define WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define READ_PUSD_LO_REG_VALUE() NATIVE_NV_READ_PUSD_LO_REG_VALUE() +#define READ_PUSD_HI_REG_VALUE() NATIVE_NV_READ_PUSD_HI_REG_VALUE() + +#define WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) +#define WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define READ_USBR_REG_VALUE() NATIVE_NV_READ_USBR_REG_VALUE() +#define READ_SBR_REG_VALUE() NATIVE_NV_READ_SBR_REG_VALUE() +#define BOOT_READ_USBR_REG_VALUE() NATIVE_NV_READ_USBR_REG_VALUE() +#define BOOT_READ_SBR_REG_VALUE() NATIVE_NV_READ_SBR_REG_VALUE() + +#define WRITE_USBR_REG_VALUE(USBR_value) \ + NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value) +#define WRITE_SBR_REG_VALUE(SBR_value) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value) +#define NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \ + NATIVE_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) +#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \ + NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value) +#define BOOT_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \ + NATIVE_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define READ_WD_REG_VALUE() NATIVE_READ_WD_REG_VALUE() + +#define WRITE_WD_REG_VALUE(WD_value) \ + NATIVE_WRITE_WD_REG_VALUE(WD_value) + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define READ_LSR_REG_VALUE() NATIVE_READ_LSR_REG_VALUE() + +#define WRITE_LSR_REG_VALUE(LSR_value) \ + NATIVE_WRITE_LSR_REG_VALUE(LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define READ_ILCR_REG_VALUE() NATIVE_READ_ILCR_REG_VALUE() + +#define WRITE_ILCR_REG_VALUE(ILCR_value) \ + NATIVE_WRITE_ILCR_REG_VALUE(ILCR_value) + +/* + * Write double-word LSR/ILCR registers in complex + */ +#define WRITE_LSR_LSR1_ILCR_ILCR1_REGS_VALUE(lsr, lsr1, ilcr, ilcr1) \ + NATIVE_WRITE_LSR_LSR1_ILCR_ILCR1_REGS_VALUE(lsr, lsr1, \ + ilcr, ilcr1) +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() +#define BOOT_READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() + +#define WRITE_CURRENT_REG_VALUE(osr0_value) \ + NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value) +#define BOOT_WRITE_CURRENT_REG_VALUE(osr0_value) \ + NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG_VALUE() NATIVE_READ_OSEM_REG_VALUE() + +#define WRITE_OSEM_REG_VALUE(osem_value) \ + NATIVE_WRITE_OSEM_REG_VALUE(osem_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define READ_BGR_REG_VALUE() NATIVE_READ_BGR_REG_VALUE() +#define BOOT_READ_BGR_REG_VALUE() NATIVE_READ_BGR_REG_VALUE() + +#define WRITE_BGR_REG_VALUE(BGR_value) \ + NATIVE_WRITE_BGR_REG_VALUE(BGR_value) +#define BOOT_WRITE_BGR_REG_VALUE(BGR_value) \ + NATIVE_WRITE_BGR_REG_VALUE(BGR_value) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG_VALUE() NATIVE_READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG_VALUE() NATIVE_READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG_VALUE() NATIVE_READ_SCLKM1_REG_VALUE() +#define READ_SCLKM2_REG_VALUE() NATIVE_READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG_VALUE() NATIVE_READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKM1_REG_VALUE(reg_value) +#define WRITE_SCLKM2_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG_VALUE() NATIVE_READ_CU_HW0_REG_VALUE() +#define READ_CU_HW1_REG_VALUE() NATIVE_READ_CU_HW1_REG_VALUE() + +#define WRITE_CU_HW0_REG_VALUE(reg) NATIVE_WRITE_CU_HW0_REG_VALUE(reg) +#define WRITE_CU_HW1_REG_VALUE(reg) NATIVE_WRITE_CU_HW1_REG_VALUE(reg) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define READ_RPR_LO_REG_VALUE() NATIVE_READ_RPR_LO_REG_VALUE() +#define READ_RPR_HI_REG_VALUE() NATIVE_READ_RPR_HI_REG_VALUE() +#define READ_SBBP_REG_VALUE() NATIVE_READ_SBBP_REG_VALUE() + +#define WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + NATIVE_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) +#define WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + NATIVE_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG_VALUE() NATIVE_READ_IP_REG_VALUE() + +/* + * Read debug and monitors regigisters + */ +#define READ_DIBCR_REG_VALUE() NATIVE_READ_DIBCR_REG_VALUE() +#define READ_DIBSR_REG_VALUE() NATIVE_READ_DIBSR_REG_VALUE() +#define READ_DIMCR_REG_VALUE() NATIVE_READ_DIMCR_REG_VALUE() +#define READ_DIBAR0_REG_VALUE() NATIVE_READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG_VALUE() NATIVE_READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG_VALUE() NATIVE_READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG_VALUE() NATIVE_READ_DIBAR3_REG_VALUE() +#define READ_DIMAR0_REG_VALUE() NATIVE_READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG_VALUE() NATIVE_READ_DIMAR1_REG_VALUE() + +#define WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + NATIVE_WRITE_DIBCR_REG_VALUE(DIBCR_value) +#define WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + NATIVE_WRITE_DIBSR_REG_VALUE(DIBSR_value) +#define WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + NATIVE_WRITE_DIMCR_REG_VALUE(DIMCR_value) +#define WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + NATIVE_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) +#define WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + NATIVE_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) +#define WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + NATIVE_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) +#define WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + NATIVE_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) +#define WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + NATIVE_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) +#define WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + NATIVE_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD) + */ +#define READ_CUTD_REG_VALUE() NATIVE_NV_READ_CUTD_REG_VALUE() + +#define WRITE_CUTD_REG_VALUE(CUTD_value) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value) + +/* + * Read word Compilation Unit Index Register (CUIR) + */ +#define READ_CUIR_REG_VALUE() NATIVE_READ_CUIR_REG_VALUE() + +/* + * Read/write word Processor State Register (PSR) + */ +#define READ_PSR_REG_VALUE() NATIVE_NV_READ_PSR_REG_VALUE() +#define BOOT_READ_PSR_REG_VALUE() NATIVE_NV_READ_PSR_REG_VALUE() + +#define WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value) +#define BOOT_WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value) +#define WRITE_PSR_IRQ_BARRIER(PSR_value) \ + NATIVE_WRITE_PSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define READ_UPSR_REG_VALUE() NATIVE_NV_READ_UPSR_REG_VALUE() +#define BOOT_READ_UPSR_REG_VALUE() NATIVE_NV_READ_UPSR_REG_VALUE() + +#define WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) +#define WRITE_UPSR_IRQ_BARRIER(UPSR_value) \ + NATIVE_WRITE_UPSR_IRQ_BARRIER(UPSR_value) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define READ_PFPFR_REG_VALUE() NATIVE_NV_READ_PFPFR_REG_VALUE() +#define READ_FPCR_REG_VALUE() NATIVE_NV_READ_FPCR_REG_VALUE() +#define READ_FPSR_REG_VALUE() NATIVE_NV_READ_FPSR_REG_VALUE() + +#define WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + NATIVE_NV_WRITE_PFPFR_REG_VALUE(PFPFR_value) +#define WRITE_FPCR_REG_VALUE(FPCR_value) \ + NATIVE_NV_WRITE_FPCR_REG_VALUE(FPCR_value) +#define WRITE_FPSR_REG_VALUE(FPSR_value) \ + NATIVE_NV_WRITE_FPSR_REG_VALUE(FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define READ_CS_LO_REG_VALUE() NATIVE_READ_CS_LO_REG_VALUE() +#define READ_CS_HI_REG_VALUE() NATIVE_READ_CS_HI_REG_VALUE() +#define READ_DS_LO_REG_VALUE() NATIVE_READ_DS_LO_REG_VALUE() +#define READ_DS_HI_REG_VALUE() NATIVE_READ_DS_HI_REG_VALUE() +#define READ_ES_LO_REG_VALUE() NATIVE_READ_ES_LO_REG_VALUE() +#define READ_ES_HI_REG_VALUE() NATIVE_READ_ES_HI_REG_VALUE() +#define READ_FS_LO_REG_VALUE() NATIVE_READ_FS_LO_REG_VALUE() +#define READ_FS_HI_REG_VALUE() NATIVE_READ_FS_HI_REG_VALUE() +#define READ_GS_LO_REG_VALUE() NATIVE_READ_GS_LO_REG_VALUE() +#define READ_GS_HI_REG_VALUE() NATIVE_READ_GS_HI_REG_VALUE() +#define READ_SS_LO_REG_VALUE() NATIVE_READ_SS_LO_REG_VALUE() +#define READ_SS_HI_REG_VALUE() NATIVE_READ_SS_HI_REG_VALUE() + +#define WRITE_CS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_CS_LO_REG_VALUE(sd) +#define WRITE_CS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_CS_HI_REG_VALUE(sd) +#define WRITE_DS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_DS_LO_REG_VALUE(sd) +#define WRITE_DS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_DS_HI_REG_VALUE(sd) +#define WRITE_ES_LO_REG_VALUE(sd) NATIVE_CL_WRITE_ES_LO_REG_VALUE(sd) +#define WRITE_ES_HI_REG_VALUE(sd) NATIVE_CL_WRITE_ES_HI_REG_VALUE(sd) +#define WRITE_FS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_FS_LO_REG_VALUE(sd) +#define WRITE_FS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_FS_HI_REG_VALUE(sd) +#define WRITE_GS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_GS_LO_REG_VALUE(sd) +#define WRITE_GS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_GS_HI_REG_VALUE(sd) +#define WRITE_SS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_SS_LO_REG_VALUE(sd) +#define WRITE_SS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_SS_HI_REG_VALUE(sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define READ_IDR_REG_VALUE() NATIVE_READ_IDR_REG_VALUE() +#define BOOT_READ_IDR_REG_VALUE() NATIVE_READ_IDR_REG_VALUE() + +/* + * Processor Core Mode Register (CORE_MODE) + */ +#define READ_CORE_MODE_REG_VALUE() native_read_CORE_MODE_reg_value() +#define BOOT_READ_CORE_MODE_REG_VALUE() boot_native_read_CORE_MODE_reg_value() +#define WRITE_CORE_MODE_REG_VALUE(modes) \ + native_write_CORE_MODE_reg_value(modes) +#define BOOT_WRITE_CORE_MODE_REG_VALUE(modes) \ + boot_native_write_CORE_MODE_reg_value(modes) + +/* + * OS Compilation Unit Table Descriptor Register (OSCUTD) + */ +#define READ_OSCUTD_REG_VALUE() native_read_OSCUTD_reg_value() +#define BOOT_READ_OSCUTD_REG_VALUE() boot_native_read_OSCUTD_reg_value() +#define WRITE_OSCUTD_REG_VALUE(desc) \ + native_write_OSCUTD_reg_value(desc) +#define BOOT_WRITE_OSCUTD_REG_VALUE(desc) \ + boot_native_write_OSCUTD_reg_value((desc)) + +/* + * OS Compilation Unit Index Register (OSCUIR) + */ +#define READ_OSCUIR_REG_VALUE() native_read_OSCUIR_reg_value() +#define WRITE_OSCUIR_REG_VALUE(v) native_write_OSCUIR_reg_value((v)) +#define BOOT_READ_OSCUIR_REG_VALUE() boot_native_read_OSCUIR_reg_value() +#define BOOT_WRITE_OSCUIR_REG_VALUE(v) boot_native_write_OSCUIR_reg_value((v)) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/cpu_regs_types.h b/arch/e2k/include/asm/cpu_regs_types.h new file mode 100644 index 000000000000..092bd76b4e92 --- /dev/null +++ b/arch/e2k/include/asm/cpu_regs_types.h @@ -0,0 +1,2511 @@ + +#ifndef _E2K_CPU_REGS_TYPES_H_ +#define _E2K_CPU_REGS_TYPES_H_ + +#ifdef __KERNEL__ + +#include + +#ifndef __ASSEMBLY__ + +/* E2K physical address definitions */ + +/* E2K physical address size (bits number) */ +#define MAX_PA_SIZE CONFIG_E2K_PA_BITS +/* The number of the most significant bit of E2K physical address */ +#define MAX_PA_MSB (MAX_PA_SIZE - 1) +#define MAX_PA_MASK ((1UL << MAX_PA_SIZE) - 1) +#define MAX_PM_SIZE (1UL << MAX_PA_SIZE) + +/* E2K virtual address definitions */ +#define MAX_VA_SIZE 59 /* Virtual address maximum */ + /* size (bits number) */ +#define MAX_VA_MSB (MAX_VA_SIZE -1) /* The maximum number of the */ + /* most significant bit of */ + /* virtual address */ +#define MAX_VA_MASK ((1UL << MAX_VA_SIZE) - 1) + +#define E2K_VA_SIZE 48 /* E2K Virtual address size */ + /* (bits number) */ +#define E2K_VA_MSB (E2K_VA_SIZE - 1) /* The number of the most */ + /* significant bit of E2K */ + /* virtual address */ +#define E2K_VA_MASK ((1UL << E2K_VA_SIZE) - 1) + +#define E2K_VA_PAGE_MASK (E2K_VA_MASK & PAGE_MASK) + + +/* + * Read/Write Pointer (RWP) (64 bits) + */ +typedef struct e2k_rwp_fields { /* Structure of Read/write pointer */ + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused2 : 53 - E2K_VA_MSB; /* [53:48] */ + u64 stub5 : 1; /* [54] */ + u64 stub4 : 1; /* [55] */ + u64 stub3 : 1; /* [56] */ + u64 stub2 : 1; /* [57] */ + u64 stub1 : 1; /* [58] */ + u64 unused : 5; /* [63:59] */ +} e2k_rwp_fields_t; +typedef union e2k_rwp_struct { /* Structure of lower word */ + e2k_rwp_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rwp_struct_t; +#define E2K_RWP_stub1 fields.stub1 +#define E2K_RWP_stub2 fields.stub2 +#define E2K_RWP_stub3 fields.stub3 +#define E2K_RWP_stub4 fields.stub4 +#define E2K_RWP_stub5 fields.stub5 +#define E2K_RWP_base fields.base +#define E2K_RWP_reg word + +/* + * Read/Write Array Pointer (RWAP) + */ +typedef struct e2k_rwap_lo_fields { /* Fields of lower word */ + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused2 : 55 - E2K_VA_MSB; /* [55:48] */ + u64 stub3 : 1; /* [56] */ + u64 stub2 : 1; /* [57] */ + u64 stub1 : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ +} e2k_rwap_lo_fields_t; +typedef struct e2k_rusd_lo_fields { /* Fields of lower word */ + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused2 : 57 - E2K_VA_MSB; /* [57:48] */ + u64 p : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 unused : 3; /* [63:61] */ +} e2k_rusd_lo_fields_t; +typedef union e2k_rwap_lo_struct { /* Structure of lower word */ + e2k_rwap_lo_fields_t ap_fields; /* as AP fields */ + e2k_rusd_lo_fields_t fields; /* as USD fields */ + u64 word; /* as entire register */ +} e2k_rwap_lo_struct_t; +#define E2K_RWAP_lo_itag ap_fields.itag +#define E2K_RWAP_lo_rw ap_fields.rw +#define E2K_RWAP_lo_stub1 ap_fields.stub1 +#define E2K_RWAP_lo_stub2 ap_fields.stub2 +#define E2K_RWAP_lo_stub3 ap_fields.stub3 +#define E2K_RWAP_lo_base ap_fields.base +#define E2K_RUSD_lo_rw fields.rw +#define E2K_RUSD_lo_p fields.p +#define E2K_RUSD_lo_p_bit 58 /* do not forget to modify if changed */ +#define E2K_RUSD_lo_base fields.base +#define E2K_RWAP_lo_half word +#define E2K_RUSD_lo_half word + +typedef struct e2k_rwap_hi_fields { /* Fields of high word */ + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ +} e2k_rwap_hi_fields_t; +typedef struct e2k_rpsp_hi_fields { /* Fields of high word */ + u64 ind : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ +} e2k_rpsp_hi_fields_t; +typedef union e2k_rwap_hi_struct { /* Structure of high word */ + e2k_rwap_hi_fields_t ap_fields; /* as AP fields */ + e2k_rpsp_hi_fields_t fields; /* as PSP fields */ + u64 word; /* as entire register */ +} e2k_rwap_hi_struct_t; +#define E2K_RWAP_hi_size ap_fields.size +#define E2K_RWAP_hi_curptr ap_fields.curptr +#define E2K_RWAP_hi_half word +#define E2K_RPSP_hi_size fields.size +#define E2K_RPSP_hi_ind fields.ind +#define E2K_RPSP_hi_half word + +typedef struct e2k_rwap_struct { /* quad-word register */ + e2k_rwap_lo_struct_t lo; + e2k_rwap_hi_struct_t hi; +} e2k_rwap_struct_t; +#define E2K_RWAP_lo_struct lo +#define E2K_RUSD_lo_struct lo +#define E2K_RWAP_hi_struct hi +#define E2K_RPSP_hi_struct hi +#define E2K_RWAP_itag lo.E2K_RWAP_lo_itag +#define E2K_RWAP_rw lo.E2K_RWAP_lo_rw +#define E2K_RWAP_stub1 lo.E2K_RWAP_lo_stub1 +#define E2K_RWAP_stub2 lo.E2K_RWAP_lo_stub2 +#define E2K_RWAP_stub3 lo.E2K_RWAP_lo_stub3 +#define E2K_RWAP_base lo.E2K_RWAP_lo_base +#define E2K_RUSD_rw lo.E2K_RUSD_lo_rw +#define E2K_RUSD_p lo.E2K_RUSD_lo_p +#define E2K_RUSD_p_bit E2K_RUSD_lo_p_bit /* protected flag */ +#define E2K_RUSD_p_flag (1 << E2K_RUSD_p_bit) /* as value */ +#define E2K_RUSD_base lo.E2K_RUSD_lo_base +#define E2K_RWAP_size hi.E2K_RWAP_hi_size +#define E2K_RWAP_curptr hi.E2K_RWAP_hi_curptr +#define E2K_RPSP_size hi.E2K_RPSP_hi_size +#define E2K_RPSP_ind hi.E2K_RPSP_hi_ind +#define E2K_RWAP_lo_reg lo.E2K_RWAP_lo_half +#define E2K_RUSD_lo_reg lo.E2K_RUSD_lo_half +#define E2K_RWAP_hi_reg hi.E2K_RWAP_hi_half +#define E2K_RPSP_hi_reg hi.E2K_RPSP_hi_half + +#define E2_RWAR_R_ENABLE 0x1 +#define E2_RWAR_W_ENABLE 0x2 +#define E2_RWAR_RW_ENABLE (E2_RWAR_R_ENABLE | E2_RWAR_W_ENABLE) +#define E2_RWAR_C_TRUE 0x1 + +#define R_ENABLE 0x1 +#define W_ENABLE 0x2 +#define RW_ENABLE 0x3 + +/* + * Read/Write Stack Array Pointer (RWSAP) + */ +typedef struct e2k_rwsap_lo_fields { /* Fields of lower word */ + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused2 : 8; /* [55:48] */ + u64 stub3 : 1; /* [56] */ + u64 stub2 : 1; /* [57] */ + u64 stub1 : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ +} e2k_rwsap_lo_fields_t; +typedef struct e2k_rpusd_lo_fields { /* Fields of lower word */ + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused2 : 10; /* [57:48] */ + u64 p : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 unused : 3; /* [63:61] */ +} e2k_rpusd_lo_fields_t; +typedef union e2k_rwsap_lo_struct { /* Structure of lower word */ + e2k_rwsap_lo_fields_t sap_fields; /* as SAP fields */ + e2k_rpusd_lo_fields_t fields; /* as PUSD fields */ + u64 word; /* as entire register */ +} e2k_rwsap_lo_struct_t; +#define E2K_RWSAP_lo_itag sap_fields.itag +#define E2K_RWSAP_lo_rw sap_fields.rw +#define E2K_RWSAP_lo_stub1 sap_fields.stub1 +#define E2K_RWSAP_lo_stub2 sap_fields.stub2 +#define E2K_RWSAP_lo_stub3 sap_fields.stub3 +#define E2K_RWSAP_lo_psl sap_fields.psl +#define E2K_RWSAP_lo_base sap_fields.base +#define E2K_RPUSD_lo_rw fields.rw +#define E2K_RPUSD_lo_p fields.p +#define E2K_RPUSD_lo_psl fields.psl +#define E2K_RPUSD_lo_base fields.base +#define E2K_RWSAP_lo_half word +#define E2K_RPUSD_lo_half word + +typedef struct e2k_rwsap_hi_fields { /* Fields of high word */ + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ +} e2k_rwsap_hi_fields_t; +typedef union e2k_rwsap_hi_struct { /* Structure of high word */ + e2k_rwsap_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rwsap_hi_struct_t; +#define E2K_RWSAP_hi_size fields.size +#define E2K_RWSAP_hi_curptr fields.curptr +#define E2K_RWSAP_hi_half word + +typedef struct e2k_rwsap_struct { /* quad-word register */ + e2k_rwsap_lo_struct_t lo; + e2k_rwsap_hi_struct_t hi; +} e2k_rwsap_struct_t; +#define E2K_RWSAP_lo_struct lo +#define E2K_RPUSD_lo_struct lo +#define E2K_RWSAP_hi_struct hi +#define E2K_RWSAP_itag lo.E2K_RWSAP_lo_itag +#define E2K_RWSAP_rw lo.E2K_RWSAP_lo_rw +#define E2K_RWSAP_stub1 lo.E2K_RWSAP_lo_stub1 +#define E2K_RWSAP_stub2 lo.E2K_RWSAP_lo_stub2 +#define E2K_RWSAP_stub3 lo.E2K_RWSAP_lo_stub3 +#define E2K_RWSAP_psl lo.E2K_RWSAP_lo_psl +#define E2K_RWSAP_base lo.E2K_RWSAP_lo_base +#define E2K_RPUSD_rw lo.E2K_RPUSD_lo_rw +#define E2K_RPUSD_p lo.E2K_RPUSD_lo_p +#define E2K_RPUSD_psl lo.E2K_RPUSD_lo_psl +#define E2K_RPUSD_base lo.E2K_RPUSD_lo_base +#define E2K_RWSAP_size hi.E2K_RWSAP_hi_size +#define E2K_RWSAP_curptr hi.E2K_RWSAP_hi_curptr +#define E2K_RWSAP_lo_reg lo.E2K_RWSAP_lo_half +#define E2K_RPUSD_lo_reg lo.E2K_RPUSD_lo_half +#define E2K_RWSAP_hi_reg hi.E2K_RWSAP_hi_half + +/* + * Compilation Unit Descriptor (CUD) + * describes the memory containing codes of the current compilation unit + */ + + /* + * Structure of lower word + * access CUD.lo.CUD_lo_xxx or CUD -> lo.CUD_lo_xxx + * or CUD_lo.CUD_lo_xxx or CUD_lo -> CUD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_cud_lo_t; +#define _CUD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define E2K_CUD_RW_PROTECTIONS E2_RWAR_R_ENABLE +#define CUD_lo_c E2K_RWAP_lo_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define E2K_CUD_CHECKED_FLAG E2_RWAR_C_TRUE +#define CUD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define CUD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + /* + * Structure of high word + * access CUD.hi.CUD_hi_xxx or CUD -> hi.CUD_hi_xxx + * or CUD_hi.CUD_hi_xxx or CUD_hi -> CUD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_cud_hi_t; +#define CUD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _CUD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define CUD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access CUD.CUD_xxx or CUD -> CUD_xxx + */ +typedef e2k_rwap_struct_t cud_struct_t; +#define _CUD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define CUD_c E2K_RWAP_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define CUD_base E2K_RWAP_base /* [47: 0] - base address */ +#define CUD_size E2K_RWAP_size /* [63:32] - size */ +#define _CUD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define CUD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define CUD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define CUD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define CUD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_CODES 12 /* Codes area boundaries */ + /* alignment (2's exponent */ + /* value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_CODES_MASK ((1UL << E2K_ALIGN_CODES) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_CODES_MASK ((1 << E2K_ALIGN_CODES) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * Compilation Unit Globals Descriptor (GD) + * describes the global variables memory of the current compilation unit + */ + + /* + * Structure of lower word + * access GD.lo.GD_lo_xxx or GD -> lo.GD_lo_xxx + * or GD_lo.GD_lo_xxx or GD_lo -> GD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_gd_lo_t; +#define _GD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_GD_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define GD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define GD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access GD.hi.GD_hi_xxx or GD -> hi.GD_hi_xxx + * or GD_hi.GD_hi_xxx or GD_hi -> GD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_gd_hi_t; +#define GD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _GD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define GD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access GD.GD_xxx or GD -> GD_xxx + */ +typedef e2k_rwap_struct_t gd_struct_t; +#define _GD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define GD_base E2K_RWAP_base /* [47: 0] - base address */ +#define GD_size E2K_RWAP_size /* [63:32] - size */ +#define _GD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define GD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define GD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define GD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define GD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_GLOBALS 12 /* Globals area boundaries */ + /* alignment (2's exponent */ + /* value */ +#define E2K_ALIGN_GLOBALS_SZ _BITUL(E2K_ALIGN_GLOBALS) +#define E2K_ALIGN_GLOBALS_MASK (_BITUL(E2K_ALIGN_GLOBALS) - 1) + +#ifndef __ASSEMBLY__ +/* + * OS Compilation Unit Descriptor (OSCUD) + * describes the global variables memory containing interface codes of the OS + */ + + /* + * Structure of lower word + * access OSCUD.lo.OSCUD_xxx or OSCUD -> lo.OSCUD_xxx + * or OSCUD_lo.OSCUD_xxx or OSCUD_lo -> OSCUD_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_oscud_lo_t; +#define _OSCUD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define E2K_OSCUD_RW_PROTECTIONS E2_RWAR_R_ENABLE; +#define OSCUD_lo_c E2K_RWAP_lo_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define OSCUD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define OSCUD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + +#define OSCUD_lo_base_mask E2K_VA_MASK + + /* + * Structure of high word + * access OSCUD.hi.OSCUD_xxx or OSCUD -> hi.OSCUD_xxx + * or OSCUD_hi.OSCUD_xxx or OSCUD_hi -> OSCUD_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_oscud_hi_t; +#define OSCUD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _OSCUD_hi_curptr \ + E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define OSCUD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access OSCUD.OSCUD_xxx or OSCUD -> OSCUD_xxx + */ +typedef e2k_rwap_struct_t oscud_struct_t; +#define _OSCUD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define OSCUD_c E2K_RWAP_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define OSCUD_base E2K_RWAP_base /* [47: 0] - base address */ +#define OSCUD_size E2K_RWAP_size /* [63:32] - size */ +#define _OSCUD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define OSCUD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define OSCUD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define OSCUD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define OSCUD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_OSCU 12 /* OS codes area boundaries */ + /* alignment (2's exponent */ + /* value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_OSCU_MASK ((1UL << E2K_ALIGN_OSCU) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_OSCU_MASK ((1 << E2K_ALIGN_OSCU) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * OS Compilation Unit Globals Descriptor (OSGD) + * describes the OS global variables memory + */ + + /* + * Structure of lower word + * access OSGD.lo.OSGD_lo_xxx or OSGD -> lo.OSGD_lo_xxx + * or OSGD_lo.OSGD_lo_xxx or OSGD_lo -> OSGD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_osgd_lo_t; +#define _OSGD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_OSGD_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define OSGD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define OSGD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access OSGD.hi.OSGD_hi_xxx or OSGD -> hi.OSGD_hi_xxx + * or OSGD_hi.OSGD_hi_xxx or OSGD_hi -> OSGD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_osgd_hi_t; +#define OSGD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _OSGD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define OSGD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access OSGD.OSGD_xxx or OSGD -> OSGD_xxx + */ +typedef e2k_rwap_struct_t osgd_struct_t; +#define _OSGD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define OSGD_base E2K_RWAP_base /* [47: 0] - base address */ +#define OSGD_size E2K_RWAP_size /* [63:32] - size */ +#define _OSGD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define OSGD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define OSGD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define OSGD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define OSGD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_OS_GLOBALS 12 /* OS Globals area boundaries */ + /* alignment (2's exponent */ + /* value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_OS_GLOBALS_MASK ((1UL << E2K_ALIGN_OS_GLOBALS) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_OS_GLOBALS_MASK ((1 << E2K_ALIGN_OS_GLOBALS) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * Procedure Stack Pointer (PSP) + * describes the full procedure stack memory as well as the current pointer + * to the top of a procedure stack memory part. + */ + + /* + * Structure of lower word + * access PSP.lo.PSP_lo_xxx or PSP -> lo.PSP_lo_xxx + * or PSP_lo.PSP_lo_xxx or PSP_lo -> PSP_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_psp_lo_t; +#define _PSP_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_PSP_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define PSP_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define PSP_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access PSP.hi.PSP_hi_xxx or PSP -> hi.PSP_hi_xxx + * or PSP_hi.PSP_hi_xxx or PSP_hi -> PSP_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_psp_hi_t; +#define PSP_hi_size E2K_RPSP_hi_size /* [63:32] - size */ +#define PSP_hi_ind E2K_RPSP_hi_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PSP_hi_half E2K_RPSP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + /* + * Structure of LSR -Loop status register + */ + +typedef struct e2k_lsr_fields { + u64 lcnt : 32; /* [31: 0] (loop counter) */ + u64 ecnt : 5; /* [36:32] (epilogue counter)*/ + u64 vlc : 1; /* [37] (loop counter valid bit) */ + u64 over : 1; /* [38] */ + u64 ldmc : 1; /* [39] (loads manual control)*/ + u64 ldovl : 8; /* [47:40] (load overlap)*/ + u64 pcnt : 5; /* [52:48] (prologue counter)*/ + u64 strmd : 7; /* [59:53] (store remainder counter)*/ + u64 semc : 1; /* [60] (side effects manual control */ + u64 unused : 3; /* [63:61] */ +} e2k_lsr_fields_t; + +typedef union e2k_lsr_struct_t { /* quad-word register */ + e2k_lsr_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_lsr_t; + +#define LSR_lcnt fields.lcnt +#define LSR_ecnt fields.ecnt +#define LSR_vlc fields.vlc +#define LSR_over fields.over +#define LSR_ldmc fields.ldmc +#define LSR_ldovl fields.ldovl +#define LSR_pcnt fields.pcnt +#define LSR_strmd fields.strmd +#define LSR_semc fields.semc +#define LSR_reg word + +/* see C.19.1. */ +#define ls_prlg(x) ((x).LSR_pcnt != 0) +#define ls_lst_itr(x) ((x).LSR_vlc && ((x).LSR_lcnt < 2)) +#define ls_loop_end(x) (ls_lst_itr(x) && ((x).LSR_ecnt == 0)) + +#define E2K_LSR_VLC (1UL << 37) + + /* + * Structure of ILCR - Initial loop counters register + */ + +typedef struct e2k_ilcr_fields { + u64 lcnt : 32; /* [31: 0] (loop counter) */ + u64 ecnt : 5; /* [36:32] (epilogue counter)*/ + u64 unused1 : 11; /* [47:37] unused */ + u64 pcnt : 5; /* [52:48] (prologue counter)*/ + u64 unused2 : 11; /* [63:53] unused */ +} e2k_ilcr_fields_t; + +typedef union e2k_ilcr { /* quad-word register */ + e2k_ilcr_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_ilcr_t; + +#define ILCR_lcnt fields.lcnt +#define ILCR_ecnt fields.ecnt +#define ILCR_pcnt fields.pcnt +#define ILCR_reg word + +/* see C.17.1.2. */ +typedef struct e2k_ct_op_fields { + u64 psrc : 5; /* [4:0] (pointer to condition)*/ + u64 ct : 4; /* [8:5] (condition type) */ +} e2k_ct_op_fields_t; + +typedef union e2k_ct_struct_t { + e2k_ct_op_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_ct_t; + +#define CT_reg(x) ((x).word) +#define CT_PSRC(x) ((x).fields.psrc) +#define CT_CT(x) ((x).fields.ct) + + /* + * Structure of quad-word register + * access PSP.PSP_xxx or PSP -> PSP_xxx + */ +typedef e2k_rwap_struct_t psp_struct_t; +#define _PSP_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PSP_base E2K_RWAP_base /* [47: 0] - base address */ +#define PSP_size E2K_RPSP_size /* [63:32] - size */ +#define PSP_ind E2K_RPSP_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PSP_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define PSP_hi_reg E2K_RPSP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define PSP_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define PSP_hi_struct E2K_RPSP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_PSTACK 12 /* Procedure stack boundaries */ + /* alignment (2's exponent */ + /* value) */ +#define E2K_ALIGN_PSTACK_TOP 5 /* Procedure stack top */ + /* boundaries alignment */ + /* (2's exponent value) */ +#ifndef __ASSEMBLY__ +# define ALIGN_PSTACK_SIZE (1ULL << E2K_ALIGN_PSTACK) +# define ALIGN_PSTACK_TOP_SIZE (1ULL << E2K_ALIGN_PSTACK_TOP) +#else /* __ASSEMBLY__ */ +# define ALIGN_PSTACK_SIZE (1 << E2K_ALIGN_PSTACK) +# define ALIGN_PSTACK_TOP_SIZE (1 << E2K_ALIGN_PSTACK_TOP) +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_PSTACK_MASK (ALIGN_PSTACK_SIZE - 1) +#define E2K_ALIGN_PSTACK_TOP_MASK (ALIGN_PSTACK_TOP_SIZE - 1) + +#ifndef __ASSEMBLY__ +/* + * Procedure Chain Stack Pointer (PCSP) + * describes the full procedure chain stack memory as well as the current + * pointer to the top of a procedure chain stack memory part. + */ + + /* + * Structure of lower word + * access PCSP.lo.PCSP_lo_xxx or PCSP -> lo.PCSP_lo_xxx + * or PCSP_lo.PCSP_lo_xxx or PCSP_lo -> PCSP_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_pcsp_lo_t; +#define _PCSP_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_PCSR_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define PCSP_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define PCSP_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + /* + * Structure of high word + * access PCSP.hi.PCSP_hi_xxx or PCSP -> hi.PCSP_hi_xxx + * or PCSP_hi.PCSP_hi_xxx or PCSP_hi -> PCSP_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_pcsp_hi_t; +#define PCSP_hi_size E2K_RPSP_hi_size /* [63:32] - size */ +#define PCSP_hi_ind E2K_RPSP_hi_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PCSP_hi_half E2K_RPSP_hi_half /* [63: 0] - entire high */ + + /* + * Structure of quad-word register + * access PCSP.PCSP_xxx or PCSP -> PCSP_xxx + */ +typedef e2k_rwap_struct_t pcsp_struct_t; +#define _PCSP_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PCSP_base E2K_RWAP_base /* [47: 0] - base address */ +#define PCSP_size E2K_RPSP_size /* [63:32] - size */ +#define PCSP_ind E2K_RPSP_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PCSP_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define PCSP_hi_reg E2K_RPSP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define PCSP_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define PCSP_hi_struct E2K_RPSP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_PCSTACK 12 /* Procedure chain stack */ + /* boundaries alignment */ + /* (2's exponent value) */ +#define E2K_ALIGN_PCSTACK_TOP 5 /* Procedure chain stack top */ + /* boundaries alignment */ + /* (2's exponent value) */ + +#ifndef __ASSEMBLY__ +# define ALIGN_PCSTACK_SIZE (1ULL << E2K_ALIGN_PCSTACK) +# define ALIGN_PCSTACK_TOP_SIZE (1ULL << E2K_ALIGN_PCSTACK_TOP) +#else +# define ALIGN_PCSTACK_SIZE (1 << E2K_ALIGN_PCSTACK) +# define ALIGN_PCSTACK_TOP_SIZE (1 << E2K_ALIGN_PCSTACK_TOP) +#endif + +#define E2K_ALIGN_PCSTACK_MASK (ALIGN_PCSTACK_SIZE - 1) +#define E2K_ALIGN_PCSTACK_TOP_MASK (ALIGN_PCSTACK_TOP_SIZE - 1) + + +/* + * ========== numeric registers (register file) =========== + */ + +#define E2K_MAXCR 64 /* The total number of */ + /* chain registers */ +#define E2K_MAXCR_q E2K_MAXCR /* The total number of */ + /* chain quad-registers */ +#define E2K_ALIGN_CHAIN_WINDOW 5 /* Chain registers Window */ + /* boundaries alignment */ +#define E2K_CWD_MSB 9 /* The number of the */ + /* most significant bit */ + /* of CWD_base */ +#define E2K_CWD_SIZE (E2K_CWD_MSB + 1) /* The number of bits in */ + /* CWD_base field */ +#define E2K_PCSHTP_MSB (E2K_CWD_MSB + 1) /* The number of the */ + /* most significant bit */ + /* of PCSHTP */ +#define E2K_PCSHTP_SIZE (E2K_PCSHTP_MSB + 1) /* The number of bits in */ + /* PCSHTP */ + +/* Maximum size to be filled by hardware */ +#define E2K_CF_MAX_FILL_FILLC_q (E2K_MAXCR_q - 6) + + +#ifndef __ASSEMBLY__ + +/* Current chain registers window descriptor (CWD) */ + +typedef unsigned int e2k_cwd_t; + +/* + * Structure of procedure chain stack hardare top register PCSHTP + * Register is signed value, so read from register get signed value + * and write to put signed value. + */ + +typedef unsigned int e2k_pcshtp_t; + +#define PCSHTP_SIGN_EXTEND(pcshtp) \ + (((s64) (pcshtp) << (s64) (64 - E2K_PCSHTP_SIZE)) \ + >> (s64) (64 - E2K_PCSHTP_SIZE)) + +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * User Stack Base Register (USBR/SBR) + * USBR - contains the base virtual address of the current User Stack area. + * SBR - contains the base virtual address of an area dedicated for all user + * stacks of the current task + */ +typedef e2k_rwp_struct_t e2k_usbr_t; +typedef e2k_rwp_struct_t e2k_sbr_t; + + /* + * Structure of double-word register + * access USBR.USBR_xxx or USBR -> USBR_xxx + * access SBR.SBR_xxx or SBR -> SBR_xxx + */ +#define USBR_base E2K_RWP_base /* [47: 0] - base address */ +#define USBR_reg E2K_RWP_reg /* [63: 0] - entire */ + /* double-word register */ +#define SBR_base USBR_base /* [47: 0] - base address */ +#define SBR_reg USBR_reg /* [63: 0] - entire */ + /* double-word register */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_STACKS_BASE 12 /* User stacks boundaries */ + /* alignment */ + /* (2's exponent value) */ +#define E2K_ALIGN_ALL_STACKS_BASE 37 /* All User stacks area */ + /* boundaries alignment */ + /* (2's exponent value) */ +#define E2K_PROTECTED_STACK_BASE_BITS 32 /* Protected mode stack */ + /* does not cross 4 Gb */ + /* boundary. */ + +#define E2K_ALIGN_STACK_BASE_REG (1UL << E2K_ALIGN_STACKS_BASE) +#define E2K_ALIGN_STACKS_BASE_MASK ((1UL << E2K_ALIGN_STACKS_BASE) - 1) +#define E2K_ALL_STACKS_MAX_SIZE (1UL << E2K_ALIGN_ALL_STACKS_BASE) +#define E2K_PROTECTED_STACK_BASE_MASK \ + ((1UL << E2K_PROTECTED_STACK_BASE_BITS) - 1) + +#ifndef __ASSEMBLY__ + +/* + * Non-Protected User Stack Descriptor (USD) + * contains free memory space dedicated for user stack data and + * is supposed to grow from higher memory addresses to lower ones + */ + + /* + * Structure of lower word + * access USD.lo.USD_lo_xxx or USD -> lo.USD_lo_xxx + * or USD.USD_lo_xxx or USD -> USD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_usd_lo_t; +#define _USD_lo_rw E2K_RUSD_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define USD_lo_p E2K_RUSD_lo_p /* [58] - flag of "protected" */ + /* mode: should be */ + /* 0 - non-protected */ +#define USD_lo_p_bit E2K_RUSD_lo_p_bit /* protected flag as value */ +#define USD_lo_p_flag (1UL << USD_lo_p_bit) + +#define USD_lo_base E2K_RUSD_lo_base /* [47: 0] - base address */ +#define USD_lo_half E2K_RUSD_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access USD.hi.USD_hi_xxx or USD -> hi.USD_hi_xxx + * or USD_hi.USD_hi_xxx or USD_hi -> USD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_usd_hi_t; +#define USD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _USD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define USD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + +#define MAX_USD_HI_SIZE (4ULL * 1024 * 1024 * 1024 - 1ULL) + + /* + * Structure of quad-word register + * access USD.USD_xxx or USD -> USD_xxx + */ +typedef e2k_rwap_struct_t usd_struct_t; +#define _USD_rw E2K_RUSD_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define USD_p E2K_RUSD_p /* [58] - flag of "protected" */ + /* mode: 1 - protected */ +#define USD_base E2K_RUSD_base /* [31: 0] - base address */ +#define USD_size E2K_RWAP_size /* [63:32] - size */ +#define _USD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define USD_lo_reg E2K_RUSD_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define USD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define USD_lo_struct E2K_RUSD_lo_struct /* low register structure */ +#define USD_hi_struct E2K_RWAP_hi_struct /* high register structure */ + +/* + * Protected User Stack Descriptor (PUSD) + * contains free memory space dedicated for user stack data and + * is supposed to grow from higher memory addresses to lower ones + */ + + /* + * Structure of lower word + * access PUSD.lo.PUSD_lo_xxx or PUSD -> lo.PUSD_lo_xxx + * or PUSD.PUSD_lo_xxx or PUSD -> PUSD_lo_xxx + */ +typedef e2k_rwsap_lo_struct_t e2k_pusd_lo_t; +#define _PUSD_lo_rw E2K_RPUSD_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PUSD_lo_p E2K_RPUSD_lo_p /* [58] - flag of "protected" */ + /* mode: should be */ + /* 1 - protected */ +#define PUSD_lo_psl E2K_RPUSD_lo_psl /* {47:32} - dynamic level of */ + /* the current procedure in a */ + /* stack of called procedures */ +#define PUSD_lo_base E2K_RPUSD_lo_base /* [31: 0] - base address */ +#define PUSD_lo_half E2K_RPUSD_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access PUSD.hi.PUSD_hi_xxx or PUSD -> hi.PUSD_hi_xxx + * or PUSD_hi.PUSD_hi_xxx or PUSD_hi -> PUSD_hi_xxx + */ +typedef e2k_rwsap_hi_struct_t e2k_pusd_hi_t; +#define PUSD_hi_size E2K_RWSAP_hi_size /* [63:32] - size */ +#define _PUSD_hi_curptr E2K_RWSAP_hi_curptr /* [31: 0] - should be 0 */ +#define PUSD_hi_half E2K_RWSAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access PUSD.PUSD_xxx or PUSD -> PUSD_xxx + */ +typedef e2k_rwsap_struct_t pusd_struct_t; +#define _PUSD_rw E2K_RPUSD_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PUSD_p E2K_RPUSD_p /* [58] - flag of "protected" */ + /* mode: should be */ + /* 1 - protected */ +#define PUSD_psl E2K_RPUSD_psl /* {47:32} - dynamic level of */ + /* the current procedure in a */ + /* stack of called procedures */ +#define PUSD_base E2K_RUSD_base /* [31: 0] - base address */ +#define PUSD_size E2K_RWSAP_size /* [63:32] - size */ +#define _PUSD_curptr E2K_RWSAP_curptr /* [31: 0] - should be 0 */ +#define PUSD_lo_reg E2K_RPUSD_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define PUSD_hi_reg E2K_RWSAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define PUSD_lo_struct E2K_RUSD_lo_struct /* low register structure */ +#define PUSD_hi_struct E2K_RWSAP_hi_struct /* high register structure */ + + +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_USTACK 4 /* Non-Protected User Stack */ + /* boundaries alignment */ + /* (2's exponent value) */ +#define E2K_ALIGN_PUSTACK 5 /* Protected User Stack */ + /* boundaries alignment */ + /* (2's exponent value) */ + +#define E2K_ALIGN_USTACK_SIZE (1UL << E2K_ALIGN_USTACK) +#define E2K_ALIGN_PUSTACK_SIZE (1UL << E2K_ALIGN_PUSTACK) + +/* + * This should be + * max(E2K_ALIGN_USTACK_SIZE, E2K_ALIGN_PUSTACK_SIZE) + * but we want it to be constant + */ +#define E2K_ALIGN_STACK 32UL + +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_USTACK_MASK ((1UL << E2K_ALIGN_USTACK) - 1) +#define E2K_ALIGN_PUSTACK_MASK ((1UL << E2K_ALIGN_PUSTACK) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_USTACK_MASK ((1 << E2K_ALIGN_USTACK) - 1) +#define E2K_ALIGN_PUSTACK_MASK ((1 << E2K_ALIGN_PUSTACK) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ + +/* + * Instruction structure + */ + +typedef u64 instr_item_t; /* min. item of instruction */ + /* is double-word */ + +#define E2K_INSTR_MAX_SYLLABLES_NUM 8 /* max length of instruction */ + /* in terms of min item of */ + /* instruction */ +#define E2K_INSTR_MAX_SIZE (E2K_INSTR_MAX_SYLLABLES_NUM * \ + sizeof(instr_item_t)) + +/* Asynchronous program instruction 'fapb' is always 16 bytes long */ +#define E2K_ASYNC_INSTR_SIZE 16 +/* Asynchronous program can contain maximum 32 instructions */ +#define MAX_ASYNC_PROGRAM_INSTRUCTIONS 32 + +typedef u16 instr_semisyl_t; /* instruction semi-syllable */ + /* is short */ + +typedef u32 instr_syl_t; /* instruction syllable */ + /* is word */ + +/* + * Order of fixed syllables of instruction + */ +#define E2K_INSTR_HS_NO 0 /* header syllable */ +#define E2K_INSTR_SS_NO 1 /* stubs syllable (if present) */ + +#define E2K_GET_INSTR_SEMISYL(instr_addr, semisyl_no) \ + (((instr_semisyl_t *)(instr_addr)) \ + [((semisyl_no) & 0x1) ? ((semisyl_no) - 1) : \ + ((semisyl_no) + 1)]) +#define E2K_GET_INSTR_SYL(instr_addr, syl_no) \ + (((instr_syl_t *)(instr_addr))[syl_no]) + +#define E2K_GET_INSTR_HS(instr_addr) E2K_GET_INSTR_SYL(instr_addr, \ + E2K_INSTR_HS_NO) +#define E2K_GET_INSTR_SS(instr_addr) E2K_GET_INSTR_SYL(instr_addr, \ + E2K_INSTR_SS_NO) +#define E2K_GET_INSTR_ALS0(instr_addr, ss_flag) \ + E2K_GET_INSTR_SYL(instr_addr, \ + (ss_flag) ? E2K_INSTR_SS_NO + 1 \ + : \ + E2K_INSTR_SS_NO) +#define E2K_GET_INSTR_ALES0(instr_addr, mdl) \ + E2K_GET_INSTR_SEMISYL(instr_addr, ((mdl) + 1) * 2) + +/* + * Header syllable structure + */ + +typedef union instr_hs { + struct { + u32 mdl : 4; /* [ 3: 0] middle pointer in terms of */ + /* syllables - 1 */ + u32 lng : 3; /* [ 6: 4] length of instruction in */ + /* terms of double-words - 1 */ + u32 nop : 3; /* [ 9: 7] no operation code */ + u32 lm : 1; /* [10] loop mode flag */ + u32 x : 1; /* [11] unused field */ + u32 s : 1; /* [12] Stubs syllable presence bit */ + u32 sw : 1; /* [13] bit used by software */ + u32 c : 2; /* [15:14] Control syllables presence */ + /* mask */ + u32 cd : 2; /* [17:16] Conditional execution */ + /* syllables number */ + u32 pl : 2; /* [19:18] Predicate logic channel */ + /* syllables number */ + u32 ale : 6; /* [25:20] Arithmetic-logic channel */ + /* syllable extensions */ + /* presence mask */ + u32 al : 6; /* [31:26] Arithmetic-logic channel */ + /* syllables presence mask */ + }; + struct { + u32 __pad : 14; + u32 c0 : 1; /* CS0 */ + u32 c1 : 1; /* CS1 */ + u32 __pad2 : 4; + u32 ale0 : 1; + u32 ale1 : 1; + u32 ale2 : 1; + u32 ale3 : 1; + u32 ale4 : 1; + u32 ale5 : 1; + u32 al0 : 1; + u32 al1 : 1; + u32 al2 : 1; + u32 al3 : 1; + u32 al4 : 1; + u32 al5 : 1; + }; + instr_syl_t word; /* as entire syllable */ +} instr_hs_t; + +#define E2K_INSTR_HS_LNG_MASK 0x70 + +#define E2K_GET_INSTR_SIZE(hs) ((hs.lng + 1) * sizeof(instr_item_t)) + +/* + * Stubs sullable structure + */ + +typedef union instr_ss { + struct { + u32 ctcond : 9; /* [ 8: 0] control transfer condition */ + u32 x : 1; /* [ 9] unused field */ + u32 ctop : 2; /* [11:10] control transfer opcode */ + u32 aa : 4; /* [15:12] mask of AAS */ + u32 alc : 2; /* [17:16] advance loop counters */ + u32 abp : 2; /* [19:18] advance predicate base */ + u32 xx : 1; /* [20] unused field */ + u32 abn : 2; /* [22:21] advance numeric base */ + u32 abg : 2; /* [24:23] advance global base */ + u32 xxx : 1; /* [25] unused field */ + u32 vfdi : 1; /* [26] verify deferred interrupt */ + u32 srp : 1; /* [27] store recovery point */ + u32 bap : 1; /* [28] begin array prefetch */ + u32 eap : 1; /* [29] end array prefetch */ + u32 ipd : 2; /* [31:30] instruction prefetch depth */ + }; + instr_syl_t word; /* as entire syllable */ +} instr_ss_t; + +/* + * ALU syllables structure + */ + +typedef union { + union { + struct { + u32 dst : 8; /* [ 7: 0] destination */ + u32 src2 : 8; /* [15: 8] source register #2 */ + u32 opce : 8; /* [23:16] opcode extension */ + u32 cop : 7; /* [30:24] code of operation */ + u32 spec : 1; /* [31] speculative mode */ + }; + struct { + u32 : 24; + u32 opc : 8; + }; + } alf2; + instr_syl_t word; /* as entire syllable */ +} instr_als_t; + +typedef union instr_ales { + struct { + u16 src3 : 8; + u16 opc2 : 8; + } alef1; + struct { + u16 opce : 8; + u16 opc2 : 8; + } alef2; + instr_semisyl_t word; /* as entire syllable */ +} instr_ales_t; + +typedef union { + struct { + u8 __pad : 5; + u8 rt5 : 1; + u8 rt6 : 1; + u8 rt7 : 1; + }; + u8 word; +} instr_src_t; + +#define INSTR_SRC2_GREG_VALUE 0xe0 +#define INSTR_SRC2_GREG_MASK 0xe0 +#define INSTR_SRC2_GREG_NUM_MASK 0x1f +#define INSTR_SRC2_16BIT_VALUE 0xd0 +#define INSTR_SRC2_32BIT_VALUE 0xd8 +#define INSTR_SRC2_BIT_MASK 0xf8 +#define INSTR_SRC2_LTS_NUM_MASK 0x03 +#define INSTR_SRC2_LTS_SHIFT_MASK 0x04 +#define INSTR_LTS_32BIT_SHIFT 0 +#define INSTR_LTS_16BIT_SHIFT 16 +#define INSTR_LTS_16BIT_NOSHIFT 0 +#define INSTR_LTS_32BIT_MASK 0xffffffff +#define INSTR_LTS_16BIT_SHIFT_MASK 0xffff0000 +#define INSTR_LTS_16BIT_NOSHIFT_MASK 0x0000ffff + +/* + * ALU syllable code of operations and opcode extentions + */ +#define DRTOAP_ALS_COP 0x62 /* DRTOAP */ +#define GETSP_ALS_COP 0x58 /* GETSP */ +#define GETSOD_ALS_COP 0x5a /* GETSOP */ +#define EXT_ALES_OPC2 0x01 /* EXTension */ +#define USD_ALS_OPCE 0xec /* USD */ + +/* + * CS0 syllable structure + */ + +typedef union { + struct { + u32 prefr : 3; + u32 ipd : 1; + u32 pdisp : 24; + u32 __pad : 4; + } pref; + struct { + u32 param : 28; + u32 ctp_opc : 2; + u32 ctpr : 2; + } cof1; + struct { + u32 disp : 28; + u32 ctp_opc : 2; + u32 ctpr : 2; + } cof2; + struct { + u32 __pad1 : 28; + u32 opc : 4; + }; + struct { + u32 __pad2 : 28; + u32 ctp_opc : 2; + u32 ctpr : 2; + }; + instr_syl_t word; +} instr_cs0_t; + +#define CS0_CTP_OPC_IBRANCH 0 +#define CS0_CTP_OPC_DISP 0 +#define CS0_CTP_OPC_LDISP 1 +#define CS0_CTP_OPC_PREF 1 +#define CS0_CTP_OPC_PUTTSD 2 + + +/* + * CS1 syllable structure + */ + +typedef union { + struct { + u32 __pad1 : 27; + u32 sft : 1; + u32 __pad2 : 4; + }; + struct { + u32 param : 28; + u32 opc : 4; + }; + instr_syl_t word; +} instr_cs1_t; + +#define CS1_OPC_SETEI 2 +#define CS1_OPC_WAIT 3 +#define CS1_OPC_CALL 5 + + +/* + * ========== numeric registers (register file) =========== + */ + +#define E2K_MAXNR 128 /* The total number of */ + /* quad-NRs */ +#define E2K_MAXGR 16 /* The total number of global */ + /* quad-NRs */ +#define E2K_MAXSR (E2K_MAXNR - E2K_MAXGR) /* The total number of stack */ + /* quad-NRs */ +#define E2K_MAXNR_d (E2K_MAXNR * 2) /* The total number of */ + /* double-NRs */ +#define E2K_MAXGR_d (E2K_MAXGR * 2) /* The total number of global */ + /* double-NRs */ +#define E2K_MAXSR_d (E2K_MAXSR * 2) /* The total number of stack */ + /* double-NRs */ +#define E2K_ALIGN_WINDOW 4 /* Window boundaries */ + /* alignment */ +#define E2K_WD_MSB 10 /* The number of bits in WD */ + /* fields */ +#define E2K_WD_SIZE (E2K_WD_MSB + 1) /* The number of bits in WD */ + /* fields */ +#define E2K_NR_SIZE 16 /* Byte size of quad-NR */ + +/* Total size of registers file (local stack registers + global registers) */ +#define MAX_NRF_SIZE (E2K_MAXNR * E2K_NR_SIZE) +/* Size of local stack registers file */ +#define MAX_SRF_SIZE (E2K_MAXSR * E2K_NR_SIZE) + +struct e2k_wd_fields { + u64 base : E2K_WD_SIZE; /* [10: 0] window base: */ + /* %r0 physical address */ + u64 unused1 : 16 - E2K_WD_SIZE; /* [15:11] */ + u64 size : E2K_WD_SIZE; /* [26:16] window size */ + u64 unused2 : 16 - E2K_WD_SIZE; /* [31:27] */ + u64 psize : E2K_WD_SIZE; /* [42:32] parameters area */ + /* size */ + u64 unused3 : 16 - E2K_WD_SIZE; /* [47:43] */ + u64 fx : 1; /* [48] spill/fill */ + /* extended flag; indicates */ + /* that the current procedure */ + /* has variables of FX type */ + u64 unused4 : 15; /* [63:49] unused field */ +}; + +/* Current window descriptor (WD) */ +typedef union e2k_wd { + struct { + u64 : 3; + u64 base_d : E2K_WD_SIZE - 3; + u64 : 16 - E2K_WD_SIZE + 3; + u64 size_d : E2K_WD_SIZE - 3; + u64 : 16 - E2K_WD_SIZE + 3; + u64 psize_d : E2K_WD_SIZE - 3; + u64 : 32 - E2K_WD_SIZE; + }; + struct { + u64 base : E2K_WD_SIZE; /* [10: 0] window base: */ + /* %r0 physical address */ + u64 unused1 : 16 - E2K_WD_SIZE; /* [15:11] */ + u64 size : E2K_WD_SIZE; /* [26:16] window size */ + u64 unused2 : 16 - E2K_WD_SIZE; /* [31:27] */ + u64 psize : E2K_WD_SIZE; /* [42:32] parameters area */ + /* size */ + u64 unused3 : 16 - E2K_WD_SIZE; /* [47:43] */ + u64 fx : 1; /* [48] spill/fill */ + /* extended flag; indicates */ + /* that the current procedure */ + /* has variables of FX type */ + u64 dbl : 1; /* [49] */ + u64 unused4 : 14; /* [63:50] unused field */ + }; + struct e2k_wd_fields fields; + u64 word; /* as entire opcode */ +} e2k_wd_t; + +#define WD_base base +#define WD_size size +#define WD_psize psize +#define WD_fx fx +#define WD_reg word + +/* Structure of dword register PSHTP */ +typedef struct e2k_pshtp_fields { /* PSHTP fields */ + u64 ind : E2K_WD_SIZE + 1; /* [WD_MSB + 1 : 0] */ + u64 unused1 : 16 - E2K_WD_SIZE - 1; /* [15: WD_MSB + 2] */ + u64 fxind : E2K_WD_SIZE; /* [16 + WD_MSB : 16] */ + u64 unused2 : 32 - E2K_WD_SIZE - 16;/* [31: 16+ WD_MSB + 1] */ + u64 tind : E2K_WD_SIZE; /* [32 + WD_MSB : 32] */ + u64 unused3 : 48 - E2K_WD_SIZE - 32;/* [47: 32+ WD_MSB + 1] */ + u64 fx : 1; /* [48 : 48] */ + u64 unused4 : 15; /* [63 : 49] */ +} e2k_pshtp_fields_t; + +typedef union e2k_pshtp_struct { /* Register */ + e2k_pshtp_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_pshtp_t; + +#define PSHTP_ind fields.ind +#define PSHTP_tind fields.tind +#define PSHTP_fxind fields.fxind +#define PSHTP_fx fields.fx +#define PSHTP_reg word + +/* + * PSHTP register contains index in terms of double-numeric registers + * PSP register contains index in terms of extended double-numeric + * registers spilled into memory - each double-numeric register occupy + * two double words: one for main part and second for extension. + * So it need some conversion to operate with PSP_ind and PSHTP_ind in + * common terms. + */ +#define PSHTP_IND_TO_PSP(nr_nums) ((nr_nums) * 2) +#define PSP_IND_TO_PSHTP(mem_ind) ((mem_ind) / 2) +#define PSHTP_IND_SIGN_EXTEND(pshtp) \ + (((s64) (pshtp) << (s64) (64 - (E2K_WD_SIZE + 1))) \ + >> (s64) (64 - (E2K_WD_SIZE + 1))) +#define PSHTP_MEM_SIGN_EXTEND(pshtp) \ + (((s64) (pshtp) << (s64) (64 - (E2K_WD_SIZE + 1))) \ + >> (s64) (64 - (E2K_WD_SIZE + 1) - 1)) +#define PSHTP_Q_SIGN_EXTEND(pshtp) \ + (((s64) (pshtp) << (s64) (64 - (E2K_WD_SIZE + 1))) \ + >> (s64) (64 - (E2K_WD_SIZE + 1) + 4)) +#define GET_PSHTP_NR_INDEX(pshtp) ((u64) PSHTP_IND_SIGN_EXTEND(AW(pshtp))) +#define SET_PSHTP_NR_INDEX(pshtp, signed_nr_nums) \ + ((pshtp).PSHTP_ind = (signed_nr_nums)) +#define GET_PSHTP_MEM_INDEX(pshtp) ((u64) PSHTP_MEM_SIGN_EXTEND(AW(pshtp))) +#define SET_PSHTP_MEM_INDEX(pshtp, mem_ind) \ + SET_PSHTP_NR_INDEX(pshtp, PSP_IND_TO_PSHTP(mem_ind)) +#define GET_PSHTP_Q_INDEX(pshtp) ((u64) PSHTP_Q_SIGN_EXTEND(AW(pshtp))) + + +/* Numeric Register in a rotatable area: %br# or %dbr# (OPCODE) */ +typedef struct e2k_nbr_fields { + u8 index : 7; /* [ 6: 0] NR index in a */ + /* rotatable area */ + u8 rt7 : 1; /* [ 7] should be 0 */ +} e2k_nbr_fields_t; +typedef union e2k_nbr { + e2k_nbr_fields_t fields; /* as fields */ + u8 word; /* as entire opcode */ +} e2k_nbr_t; + +/* Numeric Register in a window: %r# or %dr# (OPCODE) */ +typedef struct e2k_nr_fields { + u8 index : 6; /* [ 5: 0] NR index in a */ + /* window */ + u8 rt6 : 1; /* [ 6] should be 0 */ + u8 rt7 : 1; /* [ 7] should be 1 */ +} e2k_nr_fields_t; +typedef union e2k_nr { + e2k_nr_fields_t fields; /* as fields */ + u8 word; /* as entire opcode */ +} e2k_nr_t; + +/* Numeric results */ +/* Result destination (destination(ALS.dst)) is encoded in dst fields */ +/* of ALS or AAS syllables as follows: */ + +typedef union e2k_dst { + e2k_nbr_t nbr; /* as rotatable register */ + e2k_nr_t nr; /* as window register */ + u8 word; /* as entire opcode */ +} e2k_dst_t; + +#define DST_IS_NBR(dst) (AS_STRUCT(dst.nbr).rt7 == 0) +#define DST_IS_NR(dst) (AS_STRUCT(dst.nr).rt7 == 1 && \ + AS_STRUCT(dst.nr).rt6 == 0) +#define DST_NBR_INDEX(dst) (AS_STRUCT(dst.nbr).index) +#define DST_NR_INDEX(dst) (AS_STRUCT(dst.nr).index) +#define DST_NBR_RNUM_d(dst) DST_NBR_INDEX(dst) +#define DST_NR_RNUM_d(dst) DST_NR_INDEX(dst) + +/* The effective address of NR in a rotatable area (in terms of double-NR) */ +#define NBR_IND_d(BR, rnum_d) (AS_STRUCT(BR).rbs * 2 + \ + (AS_STRUCT(BR).rcur * 2 + rnum_d) % \ + (AS_STRUCT(BR).rsz * 2 + 2)) +#define NBR_REA_d(WD, ind_d) ((AS_STRUCT(WD).base / 8 + ind_d) % \ + E2K_MAXSR_d) + +/* The effective address of NR in a window (in terms of double-NR) */ +#define NR_REA_d(WD, rnum_d) ((AS_STRUCT(WD).base / 8 + rnum_d) % \ + E2K_MAXSR_d) + + +/* + * ========== chain regs & usd regs =========== + * To work with reg as with word use AS_WORD + * To work with reg as with struct use AS_STRUCT + */ + + +#define AS_WORD(x) ((x).word) +#define AS_STRUCT(x) ((x).fields) +#define AS_V2_STRUCT(x) ((x).v2_fields) +#define AS_V6_STRUCT(x) ((x).v6_fields) +#define AS_SAP_STRUCT(x) ((x).sap_fields) +#define AS_AP_STRUCT(x) ((x).ap_fields) +#define AS_WORD_P(xp) ((xp)->word) +#define AS_STRUCT_P(xp) ((xp)->fields) +#define AS_SAP_STRUCT_P(xp) ((xp)->sap_fields) +#define AS_AP_STRUCT_P(xp) ((xp)->ap_fields) + +#define AW(x) AS_WORD(x) +#define AS(x) AS_STRUCT(x) +#define AWP(xp) AS_WORD_P(xp) +#define ASP(xp) AS_STRUCT_P(xp) + +/* BR */ +typedef struct e2k_br_fields { /* Structure of br reg */ + u32 rbs : 6; /* [ 5: 0] */ + u32 rsz : 6; /* [11: 6] */ + u32 rcur : 6; /* [17:12] */ + u32 psz : 5; /* [22:18] */ + u32 pcur : 5; /* [27:23] */ +} e2k_br_fields_t; +typedef union e2k_br { + struct { + u32 rbs : 6; + u32 rsz : 6; + u32 rcur : 6; + u32 psz : 5; + u32 pcur : 5; + }; + e2k_br_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_br_t; +#define BR_rbs fields.rbs +#define BR_rsz fields.rsz +#define BR_rcur fields.rcur +#define BR_psz fields.psz +#define BR_pcur fields.pcur +#define BR_reg word + +static inline int br_rsz_full_d(e2k_br_t br) +{ + return 2 * (br.rsz + 1); +} + +/* see 5.25.1. */ + +typedef union e2k_rpr_lo_struct { + e2k_rwp_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rpr_lo_t; + +#define RPR_lo_reg(rpr_lo) ((rpr_lo).word) +#define RPR_lo_ip E2K_RWP_base /* [47: 0] - IP of trap */ +#define RPR_lo_stp E2K_RWP_stub1 /* [58] - store pointer */ +#define RPR_lo_half BR_reg + +typedef union e2k_rpr_hi_struct { + e2k_br_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rpr_hi_t; + +#define RPR_hi_reg(rpr_hi) ((rpr_hi).word) +#define RPR_hi_rbs BR_rbs +#define RPR_hi_rsz BR_rsz +#define RPR_hi_rcur BR_rcur +#define RPR_hi_psz BR_psz +#define RPR_hi_pcur BR_pcur +#define RPR_hi_half BR_reg + +#define RPR_IP(x) ((x).RPR_lo_ip) +#define RPR_STP(x) ((x).RPR_lo_stp) +#define RPR_BR_CUR(x) ((x).RPR_hi_rcur) +#define RPR_BR_PCUR(x) ((x).RPR_hi_pcur) + +/* + * BGR. Rotation base of global registers. + * 11 bits wide. Rounded to 32-bit, because 16-bit memory & sysreg access + * makes no sense in this case + */ +typedef struct e2k_bgr_fields { /* Structure of bgr reg */ + u32 val : 8; /* [ 7: 0] */ + u32 cur : 3; /* [10: 8] */ +} e2k_bgr_fields_t; +typedef union e2k_bgr { + e2k_bgr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_bgr_t; + +#define BGR_val fields.val +#define BGR_cur fields.cur +#define BGR_reg word + +#define E2K_INITIAL_BGR_VAL 0xff +#define E2K_INITIAL_BGR ((e2k_bgr_t) { {cur : 0, val : 0xff} }) + + +#define E2K_GB_START_REG_NO_d 24 +#define E2K_GB_REGS_NUM_d (E2K_MAXGR_d - E2K_GB_START_REG_NO_d) + + +/* CR0 */ + +typedef struct { /* Structure of cr0_hi chain reg */ + u64 unused : 3; /* [ 2: 0] */ + u64 ip : 61; /* [63: 3] */ +} e2k_cr0_hi_fields_t; +typedef union { + struct { + u64 : 3; + u64 ip : 61; + }; + e2k_cr0_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr0_hi_t; +#define CR0_hi_ip fields.ip /* [63: 3] - IP >> 3 */ +#define CR0_hi_half word /* [63: 0] - entire high */ +#define CR0_hi_IP CR0_hi_half /* [63: 0] - IP */ + +typedef struct { /* Structure of cr0_lo chain reg */ + u64 pf : 64; /* [63: 0] */ +} e2k_cr0_lo_fields_t; +typedef union { + e2k_cr0_lo_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr0_lo_t; +#define CR0_lo_pf fields.pf /* [63: 0] - predicates file */ +#define CR0_lo_half word /* [63: 0] - entire high */ + +/* CR1 */ + +typedef union { /* Structure of cr1_hi chain reg */ + struct { + u64 br : 28; /* [27: 0] */ + u64 unused : 7; /* [34:28] */ + u64 wdbl : 1; /* [35:35] */ + u64 ussz : 28; /* [63:36] */ + }; + struct { + u64 rbs : 6; /* [5 :0 ] */ + u64 rsz : 6; /* [11:6 ] */ + u64 rcur : 6; /* [17:12] */ + u64 psz : 5; /* [22:18] */ + u64 pcur : 5; /* [27:23] */ + u64 __x1 : 36; /* [63:28] */ + }; +} e2k_cr1_hi_fields_t; +typedef union { + struct { + u64 br : 28; + u64 : 7; + u64 wdbl : 1; + u64 ussz : 28; + }; + struct { + u64 rbs : 6; + u64 rsz : 6; + u64 rcur : 6; + u64 psz : 5; + u64 pcur : 5; + u64 : 36; + }; + e2k_cr1_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr1_hi_t; +#define CR1_hi_br fields.br /* [27: 0] - base of rotate regs */ +#define CR1_hi_wdbl fields.wdbl /* [63:36] - ??? */ +#define CR1_hi_ussz fields.ussz /* [63:36] - user stack size */ +#define CR1_hi_rbs fields.rbs /* [ 5: 0] - base of rotate regs */ +#define CR1_hi_rsz fields.rsz /* [11: 6] - size of rotate regs */ +#define CR1_hi_rcur fields.rcur /* [17:12] - current of rotate regs */ +#define CR1_hi_psz fields.psz /* [22:18] - size of rotate preds */ +#define CR1_hi_pcur fields.pcur /* [27:23] - current of rotate preds */ +#define CR1_hi_half word /* [63: 0] - entire high */ + +typedef union { /* Structure of cr1_lo chain reg */ + struct { + u64 unused1 : 16; /* [15:0] */ + u64 ein : 8; /* [23:16] */ + u64 ss : 1; /* [24] */ + u64 wfx : 1; /* [25] */ + u64 wpsz : 7; /* [32:26] */ + u64 wbs : 7; /* [39:33] */ + u64 cuir : 17; /* [56:40] */ + u64 psr : 7; /* [63:57] */ + }; + struct { + u64 __x1 : 40; /* [39:0] */ + u64 cui : 16; /* [40:55] */ + u64 ic : 1; /* [56] */ + u64 pm : 1; /* [57] privileged mode */ + u64 ie : 1; /* [58] interrupt enable */ + u64 sge : 1; /* [59] stack gard control enable */ + u64 lw : 1; /* [60] last wish */ + u64 uie : 1; /* [61] user interrupts enable */ + u64 nmie : 1; /* [62] not masked interrupts enable */ + u64 unmie : 1; /* [63] user not masked interrupts */ + /* enable */ + }; +} e2k_cr1_lo_fields_t; +typedef union { + struct { + u64 : 16; + u64 ein : 8; + u64 ss : 1; + u64 wfx : 1; + u64 wpsz : 7; + u64 wbs : 7; + u64 cuir : 17; + u64 psr : 7; + }; + struct { + u64 : 40; + u64 cui : 16; + u64 ic : 1; /* iset <= v5 */ + u64 pm : 1; + u64 ie : 1; + u64 sge : 1; + u64 lw : 1; + u64 uie : 1; + u64 nmie : 1; + u64 unmie : 1; + }; + e2k_cr1_lo_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr1_lo_t; +#define CR1_lo_tr fields.tr /* [14: 0] - ??? */ +#define CR1_lo_ein fields.ein /* [23:16] - ??? */ +#define CR1_lo_wfx fields.wfx /* [25] - ??? */ +#define CR1_lo_wpsz fields.wpsz /* [32:26] - regs window parametr */ +#define CR1_lo_wbs fields.wbs /* [39:33] - regs window base */ +#define CR1_lo_cuir fields.cuir /* [56:40] - compilation unit index */ +#define CR1_lo_cui fields.cui /* [55:40] - compilation unit index */ + /* new release field */ +#define CR1_lo_ic fields.ic /* [56] - index checkup flag */ +#define CR1_lo_psr fields.psr /* [63:57] - processor state reg */ +#define CR1_lo_pm fields.pm /* [57] - privileged mode */ +#define CR1_lo_ie fields.ie /* [58] - interrupt enable */ +#define CR1_lo_sge fields.sge /* [59] - stack gard control */ + /* enable */ +#define CR1_lo_lw fields.lw /* [60] - last wish */ +#define CR1_lo_uie fields.uie /* [61] - user interrupts enable */ +#define CR1_lo_nmie fields.nmie /* [62] - not masked interrupts */ + /* enable */ +#define CR1_lo_unmie fields.unmie /* [63] - user not masked */ + /* interrupts enable */ +#define CR1_lo_half word /* [63: 0] - entire high */ +#endif /* !(__ASSEMBLY__) */ + +#ifdef __ASSEMBLY__ +/* for assemly only */ +#define CR1_lo_psr_shift 57 /* [63:57] - processor state reg */ +#define CR1_LO_PSR_PM_BIT (PSR_PM_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_IE_BIT (PSR_IE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_SGE_BIT (PSR_SGE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_LW_BIT (PSR_LW_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_UIE_BIT (PSR_UIE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_NMIE_BIT (PSR_NMIE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_UNMIE_BIT (PSR_UNMIE_BIT + CR1_lo_psr_shift) + +#define CR1_LO_PSR_PM_MASK (0x1UL << CR1_LO_PSR_PM_BIT) +#define CR1_LO_PSR_IE_MASK (0x1UL << CR1_LO_PSR_IE_BIT) +#define CR1_LO_PSR_SGE_MASK (0x1UL << CR1_LO_PSR_SGE_BIT) +#define CR1_LO_PSR_LW_MASK (0x1UL << CR1_LO_PSR_LW_BIT) +#define CR1_LO_PSR_UIE_MASK (0x1UL << CR1_LO_PSR_UIE_BIT) +#define CR1_LO_PSR_NMIE_MASK (0x1UL << CR1_LO_PSR_NMIE_BIT) +#define CR1_LO_PSR_UNMIE_MASK (0x1UL << CR1_LO_PSR_UNMIE_BIT) + +#define CR1_lo_cuir_shift 40 /* [55:40] - CUIR value */ +#define CR1_lo_cuir_mask (CUIR_mask << CR1_lo_cuir_shift) + +#endif /* __ASSEMBLY__ */ + +#define CR1_lo_cuir_size 16 /* size in bits */ +#define CUIR_mask ((1UL << CR1_lo_cuir_size) - 1) + +#ifndef __ASSEMBLY__ + +#define E2K_ALIGN_INS 3 /* number of least */ + /* significant bits of IP */ + /* are zeroed */ + +/* + * Control Transfer Preparation Register (CTPR) + */ + + /* + * Structure of double-word register + * access CTPR.CTPR_xxx or CTPR -> CTPR_xxx + */ +typedef union { + struct { + u64 ta_base : E2K_VA_SIZE; /* [47: 0] */ + u64 __pad1 : 53 - E2K_VA_MSB; /* [53:48] */ + u64 ta_tag : 3; /* [56:54] */ + u64 opc : 2; /* [58:57] */ + u64 ipd : 2; /* [60:59] */ + u64 __pad2 : 3; /* [63:61] */ + } fields; + struct { + u64 ta_base : E2K_VA_SIZE; + u64 __pad1 : 53 - E2K_VA_MSB; + u64 ta_tag : 3; + u64 opc : 2; + u64 ipd : 2; + u64 __pad2 : 3; + }; + u64 word; +} e2k_ctpr_t; +#define CTPR_ta_base ta_base /* [47: 0] - transfer address */ +#define CTPR_ta_tag ta_tag /* [56:54] - tag */ +#define CTPR_opc opc /* [58:57] - opcode */ +#define CTPR_ipd ipd /* [58:57] - prefetch level */ +#define CTPR_reg word /* [63: 0] - entire register */ +/* Control Transfer Opcodes */ +#define DISP_CT_OPC 0 +#define LDISP_CT_OPC 1 +#define RETURN_CT_OPC 3 + +/* Control Transfer Tag */ +#define CTPEW_CT_TAG 0 /* empty word */ +#define CTPDW_CT_TAG 1 /* diagnostic word */ +#define CTPPL_CT_TAG 2 /* procedure label */ +#define CTPLL_CT_TAG 3 /* local label */ +#define CTPNL_CT_TAG 4 /* numeric label */ +#define CTPSL_CT_TAG 5 /* system label */ + +/* Control Transfer Prefetch Level */ +#define NONE_CT_IPD 0 /* none any prefetching */ +#define ONE_IP_CT_IPD 1 /* only one instruction on 'ta_base' IP */ +#define TWO_IP_CT_IPD 2 /* two instructions on 'ta_base' and next IP */ + +typedef union { + struct { + u64 cui : 16; + u64 __pad : 48; + }; + u64 word; +} e2k_ctpr_hi_t; + + +/* PSR */ +typedef struct { + u32 pm : 1; /* [ 0] */ + u32 ie : 1; /* [ 1] */ + u32 sge : 1; /* [ 2] */ + u32 lw : 1; /* [ 3] last wish */ + u32 uie : 1; /* [ 4] user interrupts enable */ + u32 nmie : 1; /* [ 5] not masked interrupts enable */ + u32 unmie : 1; /* [ 6] user not masked interrupts */ + /* enable */ + u32 unused : 25; /* [31: 7] */ +} e2k_psr_fields_t; +typedef union { + struct { + u32 pm : 1; + u32 ie : 1; + u32 sge : 1; + u32 lw : 1; + u32 uie : 1; + u32 nmie : 1; + u32 unmie : 1; + u32 : 25; + }; + e2k_psr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_psr_t; + +#define PSR_pm fields.pm /* [ 0] */ +#define PSR_ie fields.ie /* [ 1] */ +#define PSR_sge fields.sge /* [ 2] */ +#define PSR_lw fields.lw /* [ 3] */ +#define PSR_uie fields.uie /* [ 4] */ +#define PSR_nmie fields.nmie /* [ 5] */ +#define PSR_unmie fields.unmie /* [ 6] */ +#define PSR_reg word /* [31: 0] - entire */ + /* single-word register */ + +#define PSR_PM 0x01U +#define PSR_IE 0x02U +#define PSR_SGE 0x04U +#define PSR_LW 0x08U +#define PSR_UIE 0x10U +#define PSR_NMIE 0x20U +#define PSR_UNMIE 0x40U +#define PSR_DISABLE 0xff8dU /*~(PSR_IE|PSR_NMIE|PSR_UIE|PSR_UNMIE)*/ +#define PSR_PM_DISABLE 0xfffeU /* ~PSR_PM_AS */ +#endif /* !(__ASSEMBLY__) */ + +#ifdef __ASSEMBLY__ +/* for assemly only */ +#define PSR_PM_BIT 0 +#define PSR_IE_BIT 1 +#define PSR_SGE_BIT 2 +#define PSR_LW_BIT 3 +#define PSR_UIE_BIT 4 +#define PSR_NMIE_BIT 5 +#define PSR_UNMIE_BIT 6 + +#define PSR_PM_AS (0x1 << PSR_PM_BIT) +#define PSR_IE_AS (0x1 << PSR_IE_BIT) +#define PSR_SGE_AS (0x1 << PSR_SGE_BIT) +#define PSR_LW_AS (0x1 << PSR_LW_BIT) +#define PSR_UIE_AS (0x1 << PSR_UIE_BIT) +#define PSR_NMIE_AS (0x1 << PSR_NMIE_BIT) +#define PSR_UNMIE_AS (0x1 << PSR_UNMIE_BIT) +#define PSR_DISABLE (~(PSR_IE_AS | PSR_NMIE_AS | PSR_UIE_AS | PSR_UNMIE_AS)) +#define PSR_PM_DISABLE (~PSR_PM_AS) + +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +/* CUT entry */ + +typedef struct e2k_cute_dw0_fields { /* Structure of the first d-word */ + /* of CUT entry */ + u64 cud_base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 57 - E2K_VA_MSB; /* [57:48] */ + u64 cud_c : 1; /* [58:58] */ + u64 unused2 : 5; /* [63:59] */ +} e2k_cute_dw0_fields_t; + +typedef union e2k_cute_dw0 { + e2k_cute_dw0_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw0_t; + + +typedef struct e2k_cute_dw1_fields { /* Structure of the second d-word */ + /* of CUT entry */ + u64 unused1 : 32; /* [31: 0] */ + u64 cud_size : 32; /* [63:32] */ +} e2k_cute_dw1_fields_t; + +typedef union e2k_cute_dw1 { + e2k_cute_dw1_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw1_t; + +typedef struct e2k_cute_dw2_fields { /* Structure of the third d-word */ + /* of CUT entry */ + u64 gd_base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 63 - E2K_VA_MSB; /* [63:48] */ +} e2k_cute_dw2_fields_t; + +typedef union e2k_cute_dw2 { + e2k_cute_dw2_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw2_t; + +typedef struct e2k_cute_dw3_fields { /* Structure of the fourth d-word */ + /* of CUT entry */ + u64 tsd_base : 15; /* [14: 0] */ + u64 unused1 : 1; /* [15:15] */ + u64 tsd_size : 15; /* [30:16] */ + u64 unused2 : 1; /* [31:31] */ + u64 gd_size : 32; /* [63:32] */ +} e2k_cute_dw3_fields_t; + +typedef union e2k_cute_dw3 { + e2k_cute_dw3_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw3_t; + +/* Structure of entire CUT entry */ +typedef struct e2k_cute { + e2k_cute_dw0_t dw0; + e2k_cute_dw1_t dw1; + e2k_cute_dw2_t dw2; + e2k_cute_dw3_t dw3; +} e2k_cute_t; + +#define CUTE_CUD_BASE(p) AS_STRUCT(p->dw0).cud_base +#define CUTE_CUD_SIZE(p) AS_STRUCT(p->dw1).cud_size +#define CUTE_CUD_C(p) AS_STRUCT(p->dw0).cud_c + +#define CUTE_GD_BASE(p) AS_STRUCT(p->dw2).gd_base +#define CUTE_GD_SIZE(p) AS_STRUCT(p->dw3).gd_size + +#define CUTE_TSD_BASE(p) AS_STRUCT(p->dw3).tsd_base +#define CUTE_TSD_SIZE(p) AS_STRUCT(p->dw3).tsd_size + +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_CUT 5 /* Compilation units table */ + /* boundaries alignment */ + /* (2's exponent value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_CUT_MASK ((1UL << E2K_ALIGN_CUT) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_CUT_MASK ((1 << E2K_ALIGN_CUT) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ + +/* CUTD */ + +typedef e2k_rwp_struct_t e2k_cutd_t; +#define CUTD_base E2K_RWP_base /* [47: 0] - base address */ +#define CUTD_reg E2K_RWP_reg /* [63: 0] - entire double- */ + /* word register */ + +/* CUIR */ + +typedef struct e2k_cuir_fields { /* Structure of the CUIR reg */ + u32 index : 16; /* [15: 0] */ + u32 checkup : 1; /* [16:16] */ + u32 unused1 : 15; /* [31:17] */ +} e2k_cuir_fields_t; + +typedef union e2k_cuir { + e2k_cuir_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_cuir_t; +#define CUIR_index fields.index +#define CUIR_checkup fields.checkup +#define CUIR_reg word + +/* TSD */ + +typedef struct e2k_tsd_fields { /* Structure of the TSD reg */ + u64 base : 15; /* [14: 0] */ + u64 unused1 : 17; /* [31:15] */ + u64 size : 15; /* [46:32] */ + u64 unused2 : 17; /* [63:47] */ +} e2k_tsd_fields_t; + +typedef union e2k_tsd { + e2k_tsd_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_tsd_t; +#define TSD_base fields.base +#define TSD_size fields.size +#define TSD_reg word + +#define CUD_CFLAG_CEARED 0 /* intermodule security verification */ + /* (ISV) have not passed */ +#define CUD_CFLAG_SET 1 /* ISV have passed */ + +/* Hardware procedure stack memory mapping (one quad-register record, LE) */ +/* Istruction sets from V2 to V4 */ +typedef struct e2k_mem_ps_v2 { + unsigned long word_lo; /* low word value */ + unsigned long word_hi; /* high word value */ + unsigned long ext_lo; /* extention of low word */ + unsigned long ext_hi; /* extention of hagh word */ +} e2k_mem_ps_v2_t; +/* Istruction sets from V5 to V6 */ +typedef struct e2k_mem_ps_v5 { + unsigned long word_lo; /* low word value */ + unsigned long ext_lo; /* extention of low word */ + unsigned long word_hi; /* high word value */ + unsigned long ext_hi; /* extention of hagh word */ +} e2k_mem_ps_v5_t; +typedef union e2k_mem_ps { + e2k_mem_ps_v2_t v2; + e2k_mem_ps_v5_t v5; +} e2k_mem_ps_t; + +/* interkernel hardware-independent representation */ +typedef struct kernel_mem_ps { + unsigned long word_lo; /* low word value */ + unsigned long word_hi; /* high word value */ + unsigned long ext_lo; /* extention of low word */ + unsigned long ext_hi; /* extention of hagh word */ +} kernel_mem_ps_t; + +/* Chain stack memory mapping (one record, LE) */ + +typedef struct e2k_mem_crstack { + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; +} e2k_mem_crs_t; + +#endif /* !(__ASSEMBLY__) */ + +/* + * chain stack items relative offset from cr_ind for pcsp + */ + +#define CR0_LO_I (0 * 8) +#define CR0_HI_I (1 * 8) +#define CR1_LO_I (2 * 8) +#define CR1_HI_I (3 * 8) + +#ifndef __ASSEMBLY__ + +/* + * cr1.lo.wbs is size of prev proc in term of size of 4 32 bit reegs. + * But in hard stack these regs are in extended format (*2) + */ +#define EXT_4_NR_SZ ((4 * 4) * 2) +#define SZ_OF_CR sizeof(e2k_mem_crs_t) + + +typedef union { + struct { + u64 trwm_itag : 3; + u64 trwm_idata : 3; + u64 trwm_cf : 3; + u64 ib_snoop_dsbl : 1; + u64 bist_cf : 1; + u64 bist_tu : 1; + u64 bist_itag : 1; + u64 bist_itlbtag : 1; + u64 bist_itlbdata : 1; + u64 bist_idata_nm : 4; + u64 bist_idata_cnt : 10; + u64 pipe_frz_dsbl : 1; /* Since iset v5 */ + u64 rf_clean_dsbl : 1; + /* iset v6 */ + u64 virt_dsbl : 1; + u64 upt_sec_ad_shift_dsbl : 1; + u64 pdct_stat_enbl : 1; + u64 pdct_dyn_enbl : 1; + u64 pdct_rbr_enbl : 1; + u64 pdct_ret_enbl : 1; + u64 pdct_retst_enbl : 1; + u64 pdct_cond_enbl : 1; + }; + u64 word; +} e2k_cu_hw0_t; + + +/* + * Trap Info Registers + */ + +typedef e2k_rwp_struct_t e2k_tir_lo_t; + +typedef struct tir_hi_fields { /* Structure of the TIR_hi reg */ + u64 exc : 44; /* exceptions mask [43: 0] */ + u64 al : 6; /* ALS mask [49:44] */ + u64 unused1 : 2; /* unused bits [51:50] */ + u64 aa : 4; /* MOVA mask [55:52] */ + u64 j : 8; /* # of TIR [63:56] */ +} tir_hi_fields_t; + +typedef union tir_hi_struct { + struct { /* Structure of the TIR_hi reg */ + u64 exc : 44; /* exceptions mask [43: 0] */ + u64 al : 6; /* ALS mask [49:44] */ + u64 unused1 : 2; /* unused bits [51:50] */ + u64 aa : 4; /* MOVA mask [55:52] */ + u64 j : 8; /* # of TIR [63:56] */ + }; + tir_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_tir_hi_t; + +typedef struct e2k_tir_reg { /* simple TIRj register desc */ + e2k_tir_lo_t TIR_lo; + e2k_tir_hi_t TIR_hi; +} e2k_tir_t; + + /* + * Structure of low word of the register + * access TIR_lo.TIR_lo_xxx or TIR_lo -> TIR_lo_xxx + */ +#define TIR_lo_ip E2K_RWP_base /* [47: 0] - IP of trap */ +#define TIR_lo_reg E2K_RWP_reg /* [63: 0] - entire */ + /* double-word register */ + + /* + * Structure of hi word of the register + * access TIR_hi.TIR_hi_xxx or TIR_hi -> TIR_hi_xxx + */ +#define TIR_hi_reg word /* [63: 0] - entire */ + +#define TIR_hi_exc fields.exc +#define TIR_hi_al fields.al +#define TIR_hi_aa fields.aa +#define TIR_hi_j fields.j + +/* ALS mask structure */ +#define ALS0_mask 0x01 +#define ALS1_mask 0x02 +#define ALS2_mask 0x04 +#define ALS3_mask 0x08 +#define ALS4_mask 0x10 +#define ALS5_mask 0x20 + +#define MAX_TIRs_NUM 19 + +/* + * User processor status register (UPSR) + */ +typedef struct e2k_upsr_fields { + u32 fe : 1; /* float-pointing enable */ + u32 se : 1; /* supervisor mode enable (only for Intel) */ + u32 ac : 1; /* not-aligned access control */ + u32 di : 1; /* delayed interrupt (only for Intel) */ + u32 wp : 1; /* write protection (only for Intel) */ + u32 ie : 1; /* interrupt enable */ + u32 a20 : 1; /* emulation of 1 Mb memory (only for Intel) */ + /* should be 0 for Elbrus */ + u32 nmie : 1; /* not masked interrupt enable */ + /* next field of register exist only on ES2/E2S/E8C/E1C+ CPUs */ + u32 fsm : 1; /* floating comparison mode flag */ + /* 1 - compatible with x86/x87 */ + u32 impt : 1; /* ignore Memory Protection Table flag */ + u32 iuc : 1; /* ignore access right for uncached pages */ + +} e2k_upsr_fields_t; +typedef union e2k_upsr { + e2k_upsr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_upsr_t; +#define UPSR_fe fields.fe +#define UPSR_se fields.se +#define UPSR_ac fields.ac +#define UPSR_di fields.di +#define UPSR_wp fields.wp +#define UPSR_ie fields.ie +#define UPSR_a20 fields.a20 +#define UPSR_nmie fields.nmie +#define UPSR_fsm fields.fsm +#define UPSR_impt fields.impt +#define UPSR_iuc fields.iuc +#define UPSR_reg word + +#endif /* !(__ASSEMBLY__) */ + +#define UPSR_FE 0x01U +#define UPSR_SE 0x02U +#define UPSR_AC 0x04U +#define UPSR_DI 0x08U +#define UPSR_WP 0x10U +#define UPSR_IE 0x20U +#define UPSR_A20 0x40U +#define UPSR_NMIE 0x80U +/* next field of register exist only on ES2/E2S/E8C/E1C+ CPUs */ +#define UPSR_FSM 0x100U +#define UPSR_IMPT 0x200U +#define UPSR_IUC 0x400U +#define UPSR_DISABLE (0xff5f) /* ~(UPSR_IE_AS|UPSR_NMIE_AS) */ + +/* (IS_UPT_E3S ? 0 : UPSR_SE_AS) */ +#ifndef IS_UPT_E3S + #define KERNEL_UPSR_SE_INIT 0 +#else + #define KERNEL_UPSR_SE_INIT UPSR_SE +#endif /* IS_UPT_E3S */ +#ifndef CONFIG_ACCESS_CONTROL + #define KERNEL_UPSR_ALL_INIT (UPSR_FE | KERNEL_UPSR_SE_INIT) +#else + #define KERNEL_UPSR_ALL_INIT (UPSR_FE | KERNEL_UPSR_SE_INIT | UPSR_AC) +#endif /* KERNEL_UPSR_ALL_INIT */ + +#ifndef __ASSEMBLY__ + +/* + * Processor Identification Register (IDR) + */ +typedef union e2k_idr { + struct { + u64 mdl : 8; /* CPU model number */ + u64 rev : 4; /* revision number */ + u64 wbl : 3; /* write back length of L2 */ + u64 core : 5; /* number of the core into node */ + u64 pn : 4; /* node number from RT_LCFG0.pn */ + u64 hw_virt : 1; /* hardware virtualization enabled */ + u64 hw_virt_ver : 4; /* hardware virtualization revision */ + /* number */ + u64 reserve : 35; /* reserved */ + }; + struct { + u64 __pad : 12; + u64 ms : 52; /* model specific info */ + }; + u64 word; /* as entire register */ +} e2k_idr_t; + +#define IDR_reg word /* [63: 0] - entire */ + +#define IDR_mdl mdl +#define IDR_rev rev +#define IDR_ms ms +#define IDR_wbl wbl +#define IDR_ms_core core +#define IDR_ms_pn pn +#define IDR_ms_hw_virt hw_virt +#define IDR_ms_hw_virt_ver hw_virt_ver + +/* CPU model numbers */ +#define IDR_NONE 0x00 /* No such hardware exists */ +#define IDR_E2S_MDL 0x03 /* Elbrus-4C (Elbrus-2S) */ +#define IDR_ES2_DSP_MDL 0x04 /* Elbrus-2C+ */ +#define IDR_E4S_MDL 0x05 /* reserve */ +#define IDR_ES2_RU_MDL 0x06 /* Elbrus-2CM (without DSP) */ + /* russian MICRON release */ +#define IDR_E8C_MDL 0x07 /* Elbrus-8C */ +#define IDR_E1CP_MDL 0x08 /* Elbrus-1C+ one processor e2s */ + /* + graphic */ +#define IDR_E8C2_MDL 0x09 /* Elbrus-8C2 */ +#define IDR_E12C_MDL 0x0a /* Elbrus-12C */ +#define IDR_E16C_MDL 0x0b /* Elbrus-16C */ +#define IDR_E2C3_MDL 0x0c /* Elbrus-2C3 */ + +/* Convert IDR register write back length code to number of bytes */ +/* using current WBL code presentation */ +#define IDR_WBL_TO_BYTES(wbl) ((wbl) ? (1 << (wbl + 4)) : 1) + +/* + * Processor Core Mode Register (CORE_MODE) + */ +typedef union e2k_core_mode { + struct { + u32 reserve0 : 1; /* bit #0 reserved */ + u32 no_stack_prot : 1; /* no check stack pointers */ + u32 sep_virt_space : 1; /* separate page tables for kernel */ + /* and users */ + u32 gmi : 1; /* indicator of guest mode */ + /* actual only in guest mode */ + u32 hci : 1; /* indicator of hypercalls enabled */ + /* actual only in guest mode */ + u32 pt_v6 : 1; /* new Page Tables structures mode */ + /* only for ISET >= V6 */ + u32 sp_rollback_en : 1; /* hardware rollback PSP/PCSP stack */ + /* pointers is enabled */ + u32 reserve6 : 25; /* other bits reserved */ + }; + u32 word; /* as entire register */ +} e2k_core_mode_t; + +#define CORE_MODE_reg word /* [31: 0] - entire */ + +#define CORE_MODE_no_stack_prot no_stack_prot +#define CORE_MODE_sep_virt_space sep_virt_space +#define CORE_MODE_gmi gmi +#define CORE_MODE_hci hci +#define CORE_MODE_pt_v6 pt_v6 +#define CORE_MODE_sp_rollback_en sp_rollback_en + +/* + * Packed Floating Point Flag Register (PFPFR) + */ +typedef struct e2k_pfpfr_fields { + u32 ie : 1; /* [0] */ + u32 de : 1; /* [1] */ + u32 ze : 1; /* [2] */ + u32 oe : 1; /* [3] */ + u32 ue : 1; /* [4] */ + u32 pe : 1; /* [5] */ + u32 zero1 : 1; /* [6] */ + u32 im : 1; /* [7] */ + u32 dm : 1; /* [8] */ + u32 zm : 1; /* [9] */ + u32 om : 1; /* [10] */ + u32 um : 1; /* [11] */ + u32 pm : 1; /* [12] */ + u32 rc : 2; /* [14:13] */ + u32 fz : 1; /* [15] */ + u32 zero2 : 10; /* [25:16] */ + u32 die : 1; /* [26] */ + u32 dde : 1; /* [27] */ + u32 dze : 1; /* [28] */ + u32 doe : 1; /* [29] */ + u32 due : 1; /* [30] */ + u32 dpe : 1; /* [31] */ +} e2k_pfpfr_fields_t; +typedef union e2k_pfpfr { + e2k_pfpfr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_pfpfr_t; + +#define PFPFR_ie fields.ie +#define PFPFR_de fields.de +#define PFPFR_ze fields.ze +#define PFPFR_oe fields.oe +#define PFPFR_ue fields.ue +#define PFPFR_pe fields.pe +#define PFPFR_zero1 fields.zero1 +#define PFPFR_im fields.im +#define PFPFR_dm fields.dm +#define PFPFR_zm fields.zm +#define PFPFR_om fields.om +#define PFPFR_um fields.um +#define PFPFR_pm fields.pm +#define PFPFR_rc fields.rc +#define PFPFR_fz fields.fz +#define PFPFR_zero2 fields.zero2 +#define PFPFR_die fields.die +#define PFPFR_dde fields.dde +#define PFPFR_dze fields.dze +#define PFPFR_doe fields.doe +#define PFPFR_due fields.due +#define PFPFR_dpe fields.dpe +#define PFPFR_reg word + +/* + * Floating point control register (FPCR) + */ +typedef struct e2k_fpcr_fields { + u32 im : 1; /* [0] */ + u32 dm : 1; /* [1] */ + u32 zm : 1; /* [2] */ + u32 om : 1; /* [3] */ + u32 um : 1; /* [4] */ + u32 pm : 1; /* [5] */ + u32 one1 : 1; /* [6] */ + u32 zero1 : 1; /* [7] */ + u32 pc : 2; /* [9:8] */ + u32 rc : 2; /* [11:10] */ + u32 ic : 1; /* [12] */ + u32 zero2 : 3; /* [15:13] */ +} e2k_fpcr_fields_t; +typedef union e2k_fpcr { + e2k_fpcr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_fpcr_t; + +#define FPCR_im fields.im +#define FPCR_dm fields.dm +#define FPCR_zm fields.zm +#define FPCR_om fields.om +#define FPCR_um fields.um +#define FPCR_pm fields.pm +#define FPCR_one1 fields.one1 +#define FPCR_zero1 fields.zero1 +#define FPCR_pc fields.pc +#define FPCR_rc fields.rc +#define FPCR_ic fields.ic +#define FPCR_zero2 fields.zero2 +#define FPCR_reg word + + +/* + * Floating point status register (FPSR) + */ +typedef struct e2k_fpsr_fields { + u32 ie : 1; /* [0] */ + u32 de : 1; /* [1] */ + u32 ze : 1; /* [2] */ + u32 oe : 1; /* [3] */ + u32 ue : 1; /* [4] */ + u32 pe : 1; /* [5] */ + u32 zero1 : 1; /* [6] */ + u32 es : 1; /* [7] */ + u32 zero2 : 1; /* [8] */ + u32 c1 : 1; /* [9] */ + u32 zero3 : 5; /* [14:10] */ + u32 bf : 1; /* [15] */ +} e2k_fpsr_fields_t; +typedef union e2k_fpsr { + e2k_fpsr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_fpsr_t; + +#define FPSR_ie fields.ie +#define FPSR_de fields.de +#define FPSR_ze fields.ze +#define FPSR_oe fields.oe +#define FPSR_ue fields.ue +#define FPSR_pe fields.pe +#define FPSR_zero1 fields.zero1 +#define FPSR_es fields.es +#define FPSR_zero2 fields.zero2 +#define FPSR_c1 fields.c1 +#define FPSR_zero3 fields.zero3 +#define FPSR_bf fields.bf +#define FPSR_reg word + +typedef union { + u32 half_word[2]; + struct { + u32 user : 1; + u32 system : 1; + u32 trap : 1; + u32 unused1 : 13; + u32 event : 7; + u32 unused2 : 9; + } fields[2]; + struct { + u64 __pad1 : 11; + u64 u_m_en : 1; + u64 mode : 4; + u64 __pad2 : 48; + }; + u64 word; +} e2k_dimcr_t; +#define DIMCR_reg word + +static inline bool dimcr_enabled(e2k_dimcr_t dimcr, int monitor) +{ + return (monitor == 0) ? (AS(dimcr)[0].user || AS(dimcr)[0].system) + : (AS(dimcr)[1].user || AS(dimcr)[1].system); +} + +typedef union { + struct { + u32 b0 : 1; + u32 b1 : 1; + u32 b2 : 1; + u32 b3 : 1; + u32 bt : 1; + u32 m0 : 1; + u32 m1 : 1; + u32 ss : 1; + u32 btf : 1; + }; + struct { /* structure of register */ + u32 b0 : 1; /* [0] */ + u32 b1 : 1; /* */ + u32 b2 : 1; /* */ + u32 b3 : 1; /* */ + u32 bt : 1; /* [4] */ + u32 m0 : 1; /* [5] */ + u32 m1 : 1; /* [6] */ + u32 ss : 1; /* [7] */ + u32 btf : 1; /* [8] */ + } fields; + u32 word; +} e2k_dibsr_t; +#define DIBSR_reg word + +#define E2K_DIBSR_MASK(cp_num) (0x1ULL << (cp_num)) +#define E2K_DIBSR_MASK_ALL_BP 0xfULL + +typedef union { + struct { + u32 v0 : 1; + u32 t0 : 1; + u32 v1 : 1; + u32 t1 : 1; + u32 v2 : 1; + u32 t2 : 1; + u32 v3 : 1; + u32 t3 : 1; + u32 bt : 1; + u32 stop : 1; + u32 btf : 1; + u32 gm : 1; + }; + struct { + u32 v0 : 1; + u32 t0 : 1; + u32 v1 : 1; + u32 t1 : 1; + u32 v2 : 1; + u32 t2 : 1; + u32 v3 : 1; + u32 t3 : 1; + u32 bt : 1; + u32 stop : 1; + u32 btf : 1; + u32 gm : 1; + } fields; + u32 word; +} e2k_dibcr_t; +#define DIBCR_reg word + +#define E2K_DIBCR_MASK(cp_num) (0x3ULL << ((cp_num) * 2)) + +typedef union e2k_dimtp { + struct { + struct { + u64 base : E2K_VA_SIZE; + u64 __pad1 : 59 - E2K_VA_SIZE; + u64 rw : 2; + u64 __pad2 : 3; + }; + struct { + u64 ind : 32; + u64 size : 32; + }; + }; + struct { + u64 lo; + u64 hi; + }; +} e2k_dimtp_t; + +#define E2K_DIMTP_ALIGN 32 + + +/* + * Global registers (saved state) definition + */ +typedef struct e2k_svd_gregs_struct { + u64 base; /* exists any time */ + u32 extension; /* when holds an FP value */ + u8 tag; /* any time too */ +} e2k_svd_gregs_t; + + +struct hw_stacks { + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; +}; + + +typedef union { + struct { + u64 div : 32; + u64 mdiv : 1; + u64 mode : 1; + u64 trn : 1; + u64 sw : 1; + u64 wsclkr : 1; + u64 __pad1 : 19; + u64 ver : 8; + }; + struct { + u64 __pad2 : 56; + u64 w_sclkr_hi : 1; + u64 sclkm3 : 1; + u64 __pad3 : 6; + }; + u64 word; +} e2k_sclkm1_t; + +typedef enum cu_reg_no { + undef_cu_reg_no = -1, + SCLKM1_cu_reg_no = 0x70, + SCLKM2_cu_reg_no = 0x71, + SCLKM3_cu_reg_no = 0x72, + IDR_cu_reg_no = 0x8a, + CLKR_cu_reg_no = 0x90, + SCLKR_cu_reg_no = 0x92, + DIBCR_cu_reg_no = 0x40, + DIMCR_cu_reg_no = 0x41, + DIBSR_cu_reg_no = 0x42, + DTCR_cu_reg_no = 0x43, + DIMTP_hi_cu_reg_no = 0x46, + DIMTP_lo_cu_reg_no = 0x47, + DIBAR0_cu_reg_no = 0x48, + DIBAR1_cu_reg_no = 0x49, + DIBAR2_cu_reg_no = 0x4a, + DIBAR3_cu_reg_no = 0x4b, + DIMAR0_cu_reg_no = 0x4c, + DIMAR1_cu_reg_no = 0x4d, + DTARF_cu_reg_no = 0x4e, + DTART_cu_reg_no = 0x4f, +} cu_reg_no_t; + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_CPU_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/current.h b/arch/e2k/include/asm/current.h new file mode 100644 index 000000000000..c2018802277e --- /dev/null +++ b/arch/e2k/include/asm/current.h @@ -0,0 +1,12 @@ +#ifndef _E2K_CURRENT_H +#define _E2K_CURRENT_H + +#include +#include + +struct task_struct; +register struct task_struct *current DO_ASM_GET_GREG_MEMONIC( + CURRENT_TASK_GREG); +#define native_current() current + +#endif /* _E2K_CURRENT_H */ diff --git a/arch/e2k/include/asm/debug_print.h b/arch/e2k/include/asm/debug_print.h new file mode 100644 index 000000000000..e31830ce1a21 --- /dev/null +++ b/arch/e2k/include/asm/debug_print.h @@ -0,0 +1,46 @@ +#ifndef _DEBUG_PRINT_H_ +#define _DEBUG_PRINT_H_ + +#include +#include + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#ifdef E2K_P2V + +#include + +# define DebugPrint(condition, fmt, ...) \ +do { \ + if (condition) \ + do_boot_printk("%s: " fmt, __func__ ,##__VA_ARGS__); \ +} while (0) + +# define DebugPrintCont(condition, fmt, ...) \ +do { \ + if (condition) \ + do_boot_printk(fmt, ##__VA_ARGS__); \ +} while (0) + +#else + +# define DebugPrint(condition, fmt, ...) \ +do { \ + if (condition) \ + printk(KERN_DEBUG "%d %d %s: " fmt, \ + raw_smp_processor_id(), current->pid, __func__ , \ + ##__VA_ARGS__); \ +} while (0) + +# define DebugPrintCont(condition, fmt, ...) \ +do { \ + if (condition) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ +} while (0) + +#endif + +#endif +#endif +#endif /* _DEBUG_PRINT_H_ */ diff --git a/arch/e2k/include/asm/delay.h b/arch/e2k/include/asm/delay.h new file mode 100644 index 000000000000..990093245f74 --- /dev/null +++ b/arch/e2k/include/asm/delay.h @@ -0,0 +1,7 @@ +#ifndef _E2K_DELAY_H_ +#define _E2K_DELAY_H_ + +extern void udelay(unsigned long usecs); +extern void __delay(unsigned long loops); + +#endif /* _E2K_DELAY_H_ */ diff --git a/arch/e2k/include/asm/device.h b/arch/e2k/include/asm/device.h new file mode 100644 index 000000000000..1827e0b570bc --- /dev/null +++ b/arch/e2k/include/asm/device.h @@ -0,0 +1,27 @@ +#ifndef _ASM_E2K_DEVICE_H +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include + +struct dev_archdata { + unsigned int link; +#ifdef CONFIG_IOMMU_API + void *iommu; /* private IOMMU data */ + struct e2k_iommu_domain *domain; /* Domain the device is bound to */ + struct kvm *kvm; /* Virtual machine, to which device is + * passed */ +#endif +}; + +struct pdev_archdata { +}; + +#define dev_to_link(__dev) (__dev ? (__dev)->archdata.link : 0) +#define set_dev_link(__dev, __link) do { \ + (__dev)->archdata.link = __link; \ + } while(0) + +#endif /* _ASM_E2K_DEVICE_H */ diff --git a/arch/e2k/include/asm/dma-direct.h b/arch/e2k/include/asm/dma-direct.h new file mode 100644 index 000000000000..e200ef98b5c0 --- /dev/null +++ b/arch/e2k/include/asm/dma-direct.h @@ -0,0 +1,6 @@ +#ifndef _ASM_E2K_DMA_DIRECT_H +#define _ASM_E2K_DMA_DIRECT_H + +#include + +#endif /* _ASM_E2K_DMA_DIRECT_H */ diff --git a/arch/e2k/include/asm/dma-mapping.h b/arch/e2k/include/asm/dma-mapping.h new file mode 100644 index 000000000000..2e568dc978bb --- /dev/null +++ b/arch/e2k/include/asm/dma-mapping.h @@ -0,0 +1,6 @@ +#ifndef _ASM_E2K_DMA_MAPPING_H +#define _ASM_E2K_DMA_MAPPING_H + +#include + +#endif /* _ASM_E2K_DMA_MAPPING_H */ diff --git a/arch/e2k/include/asm/dma.h b/arch/e2k/include/asm/dma.h new file mode 100644 index 000000000000..80ffae359ff6 --- /dev/null +++ b/arch/e2k/include/asm/dma.h @@ -0,0 +1,297 @@ +/* $Id: dma.h,v 1.4 2006/02/02 14:25:30 atic Exp $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_DMA_H +#define _ASM_DMA_H + +#include /* And spinlocks */ +#include /* need byte IO */ +#include + + +#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER +#define dma_outb outb_p +#else +#define dma_outb outb +#endif + +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* The maximum address that we can perform a DMA transfer to on this platform */ +#define MAX_DMA_ADDRESS (PAGE_OFFSET + (1UL << ARCH_ZONE_DMA_BITS)) + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + + +extern spinlock_t dma_spin_lock; + +static __inline__ unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static __inline__ void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static __inline__ void enable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static __inline__ void disable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while holding the DMA lock ! --- + */ +static __inline__ void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr<=3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr&3), DMA2_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register, but a 64k boundary + * may have been crossed. + */ +static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) +{ + switch(dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + set_dma_page(dmanr, a>>16); + if (dmanr <= 3) { + dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + } else { + dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + } +} + + +/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + } else { + dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static __inline__ int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE + : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr<=3)? count : (count<<1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* _ASM_DMA_H */ diff --git a/arch/e2k/include/asm/e12c.h b/arch/e2k/include/asm/e12c.h new file mode 100644 index 000000000000..4e5311482f89 --- /dev/null +++ b/arch/e2k/include/asm/e12c.h @@ -0,0 +1,57 @@ +#ifndef _ASM_E12C_H_ +#define _ASM_E12C_H_ + +/* + * Machine (based on E12C processor) topology: + * E12C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 12 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e12c_setup_arch(void); +extern void e12c_setup_machine(void); +#endif + +#define E12C_NR_NODE_CPUS 12 +#define E12C_MAX_NR_NODE_CPUS 16 + +#define E12C_NODE_IOLINKS 1 + +#define E12C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E12C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E12C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E12C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E12C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E12C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E12C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E12C_MLT_SIZE ES2_MLT_SIZE + +#define E12C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E12C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E12C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E12C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E12C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E12C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E12C_SIC_MC_SIZE E16C_SIC_MC_SIZE +#define E12C_SIC_MC_COUNT 2 + +#define E12C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E12C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E12C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E12C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E12C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E12C_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +#define E12C_L3_CACHE_BYTES E8C_L3_CACHE_BYTES + +#endif /* _ASM_E12C_H_ */ diff --git a/arch/e2k/include/asm/e16c.h b/arch/e2k/include/asm/e16c.h new file mode 100644 index 000000000000..4cfe6d4e5ddf --- /dev/null +++ b/arch/e2k/include/asm/e16c.h @@ -0,0 +1,57 @@ +#ifndef _ASM_E16C_H_ +#define _ASM_E16C_H_ + +/* + * Machine (based on E16C processor) topology: + * E16C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 16 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e16c_setup_arch(void); +extern void e16c_setup_machine(void); +#endif + +#define E16C_NR_NODE_CPUS 16 +#define E16C_MAX_NR_NODE_CPUS 16 + +#define E16C_NODE_IOLINKS 1 + +#define E16C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E16C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E16C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E16C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E16C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E16C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E16C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E16C_MLT_SIZE ES2_MLT_SIZE + +#define E16C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E16C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E16C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E16C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E16C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E16C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E16C_SIC_MC_SIZE 0x60 +#define E16C_SIC_MC_COUNT 8 + +#define E16C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E16C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E16C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E16C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E16C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E16C_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +#define E16C_L3_CACHE_BYTES E8C_L3_CACHE_BYTES + +#endif /* _ASM_E16C_H_ */ diff --git a/arch/e2k/include/asm/e1cp.h b/arch/e2k/include/asm/e1cp.h new file mode 100644 index 000000000000..02189e1d26b3 --- /dev/null +++ b/arch/e2k/include/asm/e1cp.h @@ -0,0 +1,46 @@ +#ifndef _ASM_E1CP_H_ +#define _ASM_E1CP_H_ + +/* + * Machine (based on E1C+ processor) topology: + * E1C+ is one core CPU + graphical processor to support 3D, so + * - is not NUMA system + * - is not SMP system + */ + +#ifndef __ASSEMBLY__ +extern void boot_e1cp_setup_arch(void); +extern void e1cp_setup_machine(void); +#endif + +#define E1CP_NR_NODE_CPUS 1 +#define E1CP_MAX_NR_NODE_CPUS E1CP_NR_NODE_CPUS + +#define E1CP_NODE_IOLINKS 2 + +#define E1CP_PCICFG_AREA_PHYS_BASE 0x000000ff10000000UL +#define E1CP_PCICFG_AREA_SIZE 0x0000000010000000UL + +#define E1CP_NBSR_AREA_OFFSET E2S_NBSR_AREA_OFFSET +#define E1CP_NBSR_AREA_SIZE E2S_NBSR_AREA_SIZE + +#define E1CP_MLT_SIZE ES2_MLT_SIZE + +#define E1CP_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E1CP_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E1CP_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E1CP_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E1CP_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E1CP_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E1CP_SIC_MC_COUNT ES2_SIC_MC_COUNT +#define E1CP_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E1CP_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E1CP_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E1CP_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E1CP_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E1CP_L2_CACHE_BYTES ES2_L2_CACHE_BYTES + +#endif /* _ASM_E1CP_H_ */ diff --git a/arch/e2k/include/asm/e2c3.h b/arch/e2k/include/asm/e2c3.h new file mode 100644 index 000000000000..aa4419bfe160 --- /dev/null +++ b/arch/e2k/include/asm/e2c3.h @@ -0,0 +1,55 @@ +#ifndef _ASM_E2C3_H_ +#define _ASM_E2C3_H_ + +/* + * Machine (based on E2C3 processor) topology: + * E2C3 is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 2 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e2c3_setup_arch(void); +extern void e2c3_setup_machine(void); +#endif + +#define E2C3_NR_NODE_CPUS 2 +#define E2C3_MAX_NR_NODE_CPUS 16 + +#define E2C3_NODE_IOLINKS 1 + +#define E2C3_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E2C3_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E2C3_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E2C3_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E2C3_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E2C3_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E2C3_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E2C3_MLT_SIZE ES2_MLT_SIZE + +#define E2C3_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E2C3_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E2C3_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E2C3_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E2C3_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E2C3_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E2C3_SIC_MC_SIZE E16C_SIC_MC_SIZE +#define E2C3_SIC_MC_COUNT E12C_SIC_MC_COUNT + +#define E2C3_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E2C3_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E2C3_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E2C3_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E2C3_L2_CACHE_BYTES ES2_L2_CACHE_BYTES + +#endif /* _ASM_E2C3_H_ */ diff --git a/arch/e2k/include/asm/e2k-iommu.h b/arch/e2k/include/asm/e2k-iommu.h new file mode 100644 index 000000000000..5d619ec266cd --- /dev/null +++ b/arch/e2k/include/asm/e2k-iommu.h @@ -0,0 +1,11 @@ +#ifndef __ASM_E2K_IOMMU_H +#define __ASM_E2K_IOMMU_H + + +extern int iommu_panic_off; +extern void e2k_iommu_error_interrupt(void); +extern void e2k_iommu_guest_write_ctrl(u32 reg_value); +extern void e2k_iommu_setup_guest_2d_dte(struct kvm *kvm, u64 g_page_table); +extern void e2k_iommu_flush_guest(struct kvm *kvm, u64 command); + +#endif /* __ASM_E2K_IOMMU_H */ diff --git a/arch/e2k/include/asm/e2k.h b/arch/e2k/include/asm/e2k.h new file mode 100644 index 000000000000..ef02fc702662 --- /dev/null +++ b/arch/e2k/include/asm/e2k.h @@ -0,0 +1,424 @@ +#ifndef _ASM_E2K_H_ +#define _ASM_E2K_H_ + +#include +#include + +#include /* E2K_VA_MSB */ +#include +#include +#include +#include +#include +#include +#include + +/* CPU model numbers */ +#define IDR_E2K_VIRT_MDL 0x00 /* machine is virtual, so CPUs also */ + +#define MACHINE_ID_NONE 0x0000 +#define MACHINE_ID_CPU_TYPE_MASK 0x000f +#define MACHINE_ID_SIMUL 0x0010 +#define MACHINE_ID_E2K_FULL_SIC 0x0020 +#define MACHINE_ID_E2K_IOHUB 0x0040 +#define MACHINE_ID_L_IOMMU 0x0080 +#define MACHINE_ID_E2K_LEGACY_SIC 0x0100 /* host bridge & legacy NBSR */ +#define MACHINE_ID_E2K_VIRT_IO 0x0400 /* machine is virtual and */ + /* IO simulates on user level */ + /* (for example by QEMU) */ +#define MACHINE_ID_HW_VIRT 0x4000 /* hardware virtualized VM */ +#define MACHINE_ID_VIRT 0x8000 /* soft paravirtualized VM */ +#define MACHINE_ID_E2K_IOMMU 0x10000 + +#define MACHINE_ID_ES2_DSP (IDR_ES2_DSP_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB) +#define MACHINE_ID_ES2_RU (IDR_ES2_RU_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB) +#define MACHINE_ID_E2S (IDR_E2S_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +#define MACHINE_ID_E8C (IDR_E8C_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +#define MACHINE_ID_E1CP (IDR_E1CP_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +#define MACHINE_ID_E8C2 (IDR_E8C2_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +/* + * IO_* NBSRs are absent in models with EIOHub. Using LEGACY_SIC with FULL_SIC + * helps to avoid reading those NBSRs while still using IO_AREA_PHYS_BASE + * defined for FULL_SIC + */ +#define MACHINE_ID_E12C (IDR_E12C_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU) +#define MACHINE_ID_E16C (IDR_E16C_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU) +#define MACHINE_ID_E2C3 (IDR_E2C3_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU) +#define MACHINE_ID_E2K_VIRT (IDR_E2K_VIRT_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_VIRT_IO) +#define MACHINE_ID_E2K_HW_VIRT (IDR_E2K_VIRT_MDL | \ + MACHINE_ID_HW_VIRT | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU | \ + MACHINE_ID_E2K_VIRT_IO) + +#define MACHINE_ID_ES2_DSP_LMS (MACHINE_ID_ES2_DSP | \ + MACHINE_ID_SIMUL) +#define MACHINE_ID_ES2_RU_LMS (MACHINE_ID_ES2_RU | MACHINE_ID_SIMUL) +#define MACHINE_ID_E2S_LMS (MACHINE_ID_E2S | MACHINE_ID_SIMUL) +#define MACHINE_ID_E8C_LMS (MACHINE_ID_E8C | MACHINE_ID_SIMUL) +#define MACHINE_ID_E1CP_LMS (MACHINE_ID_E1CP | MACHINE_ID_SIMUL) +#define MACHINE_ID_E8C2_LMS (MACHINE_ID_E8C2 | MACHINE_ID_SIMUL) +#define MACHINE_ID_E12C_LMS (MACHINE_ID_E12C | MACHINE_ID_SIMUL) +#define MACHINE_ID_E16C_LMS (MACHINE_ID_E16C | MACHINE_ID_SIMUL) +#define MACHINE_ID_E2C3_LMS (MACHINE_ID_E2C3 | MACHINE_ID_SIMUL) + +#define MACHINE_ID_VIRT_ES2_DSP (MACHINE_ID_ES2_DSP | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_ES2_RU (MACHINE_ID_ES2_RU | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E2S (MACHINE_ID_E2S | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E8C (MACHINE_ID_E8C | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E1CP (MACHINE_ID_E1CP | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E8C2 (MACHINE_ID_E8C2 | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E12C (MACHINE_ID_E12C | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E16C (MACHINE_ID_E16C | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E2C3 (MACHINE_ID_E2C3 | MACHINE_ID_VIRT) + +#ifdef CONFIG_E2K_SIMULATOR +# define MACHINE_SIMUL_FLAG MACHINE_ID_SIMUL +#else +# define MACHINE_SIMUL_FLAG 0 +#endif + +#ifdef CONFIG_E2K_MACHINE + #if defined(CONFIG_E2K_ES2_DSP) + #define native_machine_id (MACHINE_ID_ES2_DSP | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_ES2_RU) + #define native_machine_id (MACHINE_ID_ES2_RU | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E2S) + #define native_machine_id (MACHINE_ID_E2S | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E8C) + #define native_machine_id (MACHINE_ID_E8C | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E1CP) + #define native_machine_id (MACHINE_ID_E1CP | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E8C2) + #define native_machine_id (MACHINE_ID_E8C2 | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E12C) + #define native_machine_id (MACHINE_ID_E12C | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E16C) + #define native_machine_id (MACHINE_ID_E16C | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E2C3) + #define native_machine_id (MACHINE_ID_E2C3 | MACHINE_SIMUL_FLAG) + #else + # error "E2K MACHINE type does not defined" + #endif +#elif defined(CONFIG_ES2) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_ES2_DSP_LMS +#elif defined(CONFIG_E2S) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E2S_LMS +#elif defined(CONFIG_E8C) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E8C_LMS +#elif defined(CONFIG_E1CP) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E1CP_LMS +#elif defined(CONFIG_E8C2) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E8C2_LMS +#elif defined(CONFIG_E12C) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E12C_LMS +#elif defined(CONFIG_E16C) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E16C_LMS +#elif defined(CONFIG_E2C3) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E2C3_LMS +#else /* ! CONFIG_E2K_MACHINE && ! our boot on lms */ +extern unsigned int __nodedata native_machine_id; +#endif /* CONFIG_E2K_MACHINE */ + +extern const char *e2k_get_cpu_type_name(int mach_type_id); +extern const char *e2k_get_mach_type_name(int mach_type_id); +extern int e2k_get_machine_type_name(int mach_id); +extern void __init native_setup_machine(void); +extern void native_set_mach_type_id(void); +extern const char *native_get_mach_type_name(void); + +extern void e2k_init_IRQ(void); + +#define IS_THE_MACHINE_ES2_DSP(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_ES2_DSP_MDL) +#define IS_THE_MACHINE_ES2_RU(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_ES2_RU_MDL) +#define IS_THE_MACHINE_ES2(mach_id) \ + ((IS_THE_MACHINE_ES2_DSP(mach_id)) || \ + (IS_THE_MACHINE_ES2_RU(mach_id))) +#define IS_THE_MACHINE_E2S(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E2S_MDL) +#define IS_THE_MACHINE_E8C(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E8C_MDL) +#define IS_THE_MACHINE_E1CP(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E1CP_MDL) +#define IS_THE_MACHINE_E8C2(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E8C2_MDL) +#define IS_THE_MACHINE_E12C(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E12C_MDL) +#define IS_THE_MACHINE_E16C(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E16C_MDL) +#define IS_THE_MACHINE_E2C3(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E2C3_MDL) +#define IS_THE_MACHINE_E2K_VIRT(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E2K_VIRT_MDL) + +#define IS_THE_MACHINE_SIM(mach_id) \ + (((mach_id) & MACHINE_ID_SIMUL) != 0) + +#define HAS_THE_MACHINE_E2K_DSP(mach_id) \ + (IS_THE_MACHINE_ES2_DSP(mach_id)) +#define HAS_THE_MACHINE_E2K_FULL_SIC(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_FULL_SIC) != 0) +#define HAS_THE_MACHINE_E2K_IOHUB(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_IOHUB) != 0) +#define HAS_THE_MACHINE_L_IOMMU(mach_id) \ + (((mach_id) & MACHINE_ID_L_IOMMU) != 0) +#define HAS_THE_MACHINE_E2K_IOMMU(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_IOMMU) != 0) +#define HAS_THE_MACHINE_E2K_LEGACY_SIC(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_LEGACY_SIC) != 0) +#define HAS_THE_MACHINE_L_SIC(mach_id) \ + (HAS_THE_MACHINE_E2K_FULL_SIC(mach_id) || \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(mach_id)) + +#define NATIVE_IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(native_machine_id) +#define NATIVE_IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(native_machine_id) +#define NATIVE_IS_MACHINE_ES2 \ + IS_THE_MACHINE_ES2(native_machine_id) +#define NATIVE_IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(native_machine_id) +#define NATIVE_IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(native_machine_id) +#define NATIVE_IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(native_machine_id) +#define NATIVE_IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(native_machine_id) +#define NATIVE_IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(native_machine_id) +#define NATIVE_IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(native_machine_id) +#define NATIVE_IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C3(native_machine_id) +#define NATIVE_IS_MACHINE_E2K_VIRT (false) + +#define BOOT_NATIVE_IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_ES2 \ + ((BOOT_NATIVE_IS_MACHINE_ES2_DSP) || \ + (BOOT_NATIVE_IS_MACHINE_ES2_RU)) +#define BOOT_NATIVE_IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C2(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E2K_VIRT false + +#define NATIVE_IS_MACHINE_SIM \ + IS_THE_MACHINE_SIM(native_machine_id) + +#define NATIVE_HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_IOMMU \ + HAS_THE_MACHINE_E2K_IOMMU(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(native_machine_id) +#define NATIVE_HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(native_machine_id) + +#define BOOT_NATIVE_IS_MACHINE_SIM \ + IS_THE_MACHINE_SIM(boot_native_machine_id) + +#define BOOT_NATIVE_HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(boot_native_machine_id) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ +/* only native machine is exists and should be examined */ +#define machine_id native_machine_id +#define boot_machine_id boot_native_machine_id + +#define get_machine_id() machine_id +#define boot_get_machine_id() boot_machine_id +#define set_machine_id(mach_id) (machine_id = (mach_id)) +#define boot_set_machine_id(mach_id) (boot_machine_id = (mach_id)) + +static inline void set_mach_type_id(void) +{ + native_set_mach_type_id(); +} + +#define boot_panic(fmt, args...) boot_native_panic(fmt, ##args) + +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#define IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(get_machine_id()) +#define IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(get_machine_id()) +#define IS_MACHINE_ES2 \ + IS_THE_MACHINE_ES2(get_machine_id()) +#define IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(get_machine_id()) +#define IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(get_machine_id()) +#define IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(get_machine_id()) +#define IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(get_machine_id()) +#define IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(get_machine_id()) +#define IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(get_machine_id()) +#define IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C3(get_machine_id()) +#define IS_MACHINE_E2K_VIRT \ + IS_THE_MACHINE_E2K_VIRT(get_machine_id()) + +#define HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(get_machine_id()) +#define HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(get_machine_id()) +#define HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(get_machine_id()) +#define HAS_MACHINE_L_IOMMU \ + HAS_THE_MACHINE_L_IOMMU(get_machine_id()) +#define HAS_MACHINE_E2K_IOMMU \ + HAS_THE_MACHINE_E2K_IOMMU(get_machine_id()) +#define HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(get_machine_id()) +#define HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(get_machine_id()) + +#define BOOT_IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(boot_get_machine_id()) +#define BOOT_IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(boot_get_machine_id()) +#define BOOT_IS_MACHINE_ES2 \ + IS_THE_MACHINE_ES2(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C3(boot_get_machine_id()) +#define BOOT_IS_MACHINE_VIRT \ + IS_THE_MACHINE_VIRT(boot_get_machine_id()) + +#define BOOT_HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_L_IOMMU \ + HAS_THE_MACHINE_L_IOMMU(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_IOMMU \ + HAS_THE_MACHINE_E2K_IOMMU(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(boot_get_machine_id()) + +#define MAX_NODE_CPUS 16 /* all 16 CPU cores on a node */ + +#define E2K_MAX_NODE_IOLINKS 2 /* each node can has max 2 IO links */ + /* connected to IOHUB or RDMA */ +#define MACH_MAX_NUMIOLINKS (E2K_MAX_NODE_IOLINKS * MAX_NUMNODES) + +#define LMS_CONS_DATA_PORT 0x300UL /* On READ - data from keyboard */ + /* On WRITE - data to debug ouput */ + /* port (console/journal) */ + +#define LMS_CONS_STATUS_PORT 0x301UL /* On READ - data available on 0x300 */ + /* On WRITE - shift count for 0x304 */ + +#define LMS_NSOCK_BADDR_PORT 0x302UL /* On READ - network socket base addr*/ + /* On WRITE - the same. */ + +#define LMS_NSOCK_DATA_PORT 0x303UL /* On READ - data from network socket*/ + /* On WRITE - data to network socket*/ + +#define LMS_TRACE_CNTL_PORT 0x304UL /* On READ - state of the instruction*/ + /* counter */ + /* On WRITE - LMS tracer control */ + /* (1 - start, 0 - stop) */ + +#define LMS_RAM_ADDR_PORT 0x309UL /* On WRITE - RAM address to load */ + /* kernel image by simulator */ + +#define LMS_TRACE_CNTL_OFF 0 +#define LMS_TRACE_CNTL_ON 1 + +#define LMS_LOAD_IMAGE_TO_RAM 5 /* Load 'romimage' file to RAM from */ + /* address before written to */ + /* 'LMS_RAM_ADDR_PORT' port */ + +extern unsigned long machine_serial_num; + +#define ELBRUS_CPU_VENDOR "Elbrus-MCST" + +#endif /* _ASM_E2K_H_ */ diff --git a/arch/e2k/include/asm/e2k_api.h b/arch/e2k/include/asm/e2k_api.h new file mode 100644 index 000000000000..338bdc2302da --- /dev/null +++ b/arch/e2k/include/asm/e2k_api.h @@ -0,0 +1,6994 @@ +#ifndef _E2K_API_H_ +#define _E2K_API_H_ + +#include +#include +#include +#include /* For instr_cs1_t */ +#include + +#include + + +#ifndef __ASSEMBLY__ +typedef unsigned char __e2k_u8_t; +typedef unsigned short int __e2k_u16_t; +typedef unsigned int __e2k_u32_t; +typedef unsigned long long __e2k_u64_t; +typedef void *__e2k_ptr_t; +#endif + + +/* + * FIXME: the following header include is commented +#include + * because of this header 'asm/e2k_api.h' is included in arch-independent + * header 'linux/compiler-gcc.h' (see header to know why). + * So header 'asm/mmu_types.h' cannot contain any types from 'linux/types.h' + * and it need include the header directly before 'asm/e2k_api.h' into follow + * files: + * kernel/trap_table.S + * kernel/page_tables.S + */ + +/* + * Used to separate one wide instruction from another + */ +#define E2K_CMD_SEPARATOR asm volatile ("{nop}" ::: "memory") + +/* To avoid header dependencies use this define + * instead of BUILD_BUG_ON() from . */ +#define E2K_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) + +#ifndef E2K_BIN_VER +#define E2K_BIN_VER 8 +#endif + +#define EI_SEMANTIC 7 + +#define ELF_CODE_UNKNOWN 0 +#define ELF_CODE_32_UNPROTECTED 1 +#define ELF_CODE_64_UNPROTECTED 2 +#define ELF_CODE_NEW_PROTECTED 5 +#define ELF_CODE_NEW_PROTECTED_CXX 6 + +#define ELF_BIN_COMP 0x4 +/* + * If x->e_flags && ELF_E2K_INCOMPAT == 1 + * the code can executed only (mtype==0) - any + * ==2 es2 + * ==3 e2s + * ==4 e8c + */ +#define ELF_E2K_INCOMPAT 0x10 + +#define IS_INCOMPAT(x) ((x)->e_machine == ELF_ARCH && \ + ((x)->e_flags & ELF_E2K_INCOMPAT)) + +/* protected mode flag */ +#define ELF_E2K_PM 0x20 +/* ELF segments are to be mapped in packed way. */ +#define ELF_E2K_PACK_SEGMENTS 0x40 + +#define EM_E2KL (33 + (E2K_BIN_VER << 1)) /* Little endian */ +#define EM_E2KB (34 + (E2K_BIN_VER << 1)) /* Big endian */ +#define EM_E2K_FAKE EM_E2KL +#define EM_E2K 175 +/* Compilation unit number for all memory allocations in 32-bit comp. mode */ +#define E2K_ELF32_COMP_UNIT 1 + +/* + * Machine type checker. Is to be used for 64-bit, 32-bit elf + * and protected mode. Result depends on machine type and binary type. + */ + +#define elf_check_e2k_mtype(x) \ +({ \ + unsigned long mt; \ + int _res = 0; \ + int _iset = machine.native_iset_ver; \ + \ + if ((x)->e_machine == ELF_ARCH) { \ + mt = (unsigned long)((x)->e_flags >> 24); \ + } else { \ + mt = (unsigned long)((x)->e_flags >> 28) & 0x7; \ + } \ + \ + switch (mt) { \ + case 0: \ + if (!IS_INCOMPAT(x) || _iset == ELBRUS_S_ISET) \ + _res = 1; \ + break; \ + case 2: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_S_ISET \ + || _iset == ELBRUS_S_ISET) \ + _res = 1; \ + break; \ + case 3: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_2S_ISET \ + || _iset == ELBRUS_2S_ISET) \ + _res = 1; \ + break; \ + case 4: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_8C_ISET \ + || _iset == ELBRUS_8C_ISET) \ + _res = 1; \ + break; \ + case 5: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_8C2_ISET \ + || _iset == ELBRUS_8C2_ISET) \ + _res = 1; \ + break; \ + case 6: \ + if (_iset == E2K_ISET_V6) \ + _res = 1; \ + break; \ + case 19: \ + if (IS_MACHINE_E8C) \ + _res = 1; \ + break; \ + case 20: \ + if (IS_MACHINE_E1CP) \ + _res = 1; \ + break; \ + case 21: \ + if (IS_MACHINE_E12C) \ + _res = 1; \ + break; \ + case 22: \ + if (IS_MACHINE_E16C) \ + _res = 1; \ + break; \ + case 23: \ + if (IS_MACHINE_E2C3) \ + _res = 1; \ + break; \ + default: \ + break; \ + } \ + \ + _res; \ +}) + +/* + * Normal simulator termination + */ +#define E2K_LMS_HALT_OK \ +({ \ + _Pragma("no_asm_inline") \ + asm volatile (".word \t0x00008001\n\t" \ + ".word \t0x60000000"); \ +}) + +/* + * Simulator termination on error + */ +#define E2K_LMS_HALT_ERROR(err_no) \ +({ \ + _Pragma("no_asm_inline") \ + asm volatile (".word \t0x00008001\n\t" \ + ".word \t0x60000000 | %0" \ + : \ + : "i" (err_no)); \ +}) + +/* + * Kprobes breakpoint instruction + */ +#define E2K_KPROBES_BREAKPOINT \ +({ \ + _Pragma("no_asm_inline") \ + asm volatile (".word \t0x04000001\n\t" \ + ".word \t0x0dc0c040"); \ +}) + +#define ASM_GET_GREG_MEMONIC(greg_no) __asm__("%g" #greg_no) +#define DO_ASM_GET_GREG_MEMONIC(greg_no) ASM_GET_GREG_MEMONIC(greg_no) + +#define E2K_GET_REG(reg_no) \ +({ \ + register __e2k_u32_t res; \ + asm volatile ("adds \t0x0, %%r" #reg_no ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_GET_DREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd \t0x0, %%dr" #reg_no ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define ASM_GET_DGREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd \t0x0, %%dg" #reg_no ", %0" \ + : "=r" (res)); \ + res; \ +}) +#define DO_ASM_GET_DGREG(greg_no) \ + ASM_GET_DGREG(greg_no) +#define E2K_GET_DGREG(greg_no) \ + DO_ASM_GET_DGREG(greg_no) +#define NATIVE_GET_DGREG(greg_no) \ + DO_ASM_GET_DGREG(greg_no) + + +#define ASM_GET_UNTEGGED_DGREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd,s \t0x0, %%dg" #reg_no ", %0\n" \ + "puttagd,s \t%0, 0, %0" \ + : "=r" (res)); \ + res; \ +}) +#define DO_ASM_GET_UNTEGGED_DGREG(greg_no) \ + ASM_GET_UNTEGGED_DGREG(greg_no) +#define E2K_GET_UNTEGGED_DGREG(greg_no) \ + DO_ASM_GET_UNTEGGED_DGREG(greg_no) +#define NATIVE_GET_UNTEGGED_DGREG(greg_no) \ + DO_ASM_GET_UNTEGGED_DGREG(greg_no) + +#define ASM_SET_DGREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%dg" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) +#define DO_ASM_SET_DGREG(greg_no, val) \ + ASM_SET_DGREG(greg_no, val) +#define E2K_SET_DGREG(greg_no, val) \ + DO_ASM_SET_DGREG(greg_no, val) +#define NATIVE_SET_DGREG(greg_no, val) \ + DO_ASM_SET_DGREG(greg_no, val) + +#define ASM_SET_DGREG_NV(greg_no, _val) \ +({ \ + register u64 _greg asm("g" #greg_no); \ + asm ("addd 0, %[val], %[greg]" \ + : [greg] "=r" (_greg) \ + : [val] "ri" ((__e2k_u64_t) (_val))); \ +}) +#define DO_ASM_SET_DGREG_NV(greg_no, val) \ + ASM_SET_DGREG_NV(greg_no, val) +#define E2K_SET_DGREG_NV(greg_no, val) \ + DO_ASM_SET_DGREG_NV(greg_no, val) +#define NATIVE_SET_DGREG_NV(greg_no, val) \ + DO_ASM_SET_DGREG_NV(greg_no, val) + + +#define __E2K_QPSWITCHD_SM_GREG(num) \ +do { \ + asm ("qpswitchd,sm %%dg" #num ", %%dg" #num \ + ::: "%g" #num); \ +} while (0) + +#define E2K_QPSWITCHD_SM_GREG(greg_num) \ +do { \ + switch (greg_num) { \ + case 0: __E2K_QPSWITCHD_SM_GREG(0); break; \ + case 1: __E2K_QPSWITCHD_SM_GREG(1); break; \ + case 2: __E2K_QPSWITCHD_SM_GREG(2); break; \ + case 3: __E2K_QPSWITCHD_SM_GREG(3); break; \ + case 4: __E2K_QPSWITCHD_SM_GREG(4); break; \ + case 5: __E2K_QPSWITCHD_SM_GREG(5); break; \ + case 6: __E2K_QPSWITCHD_SM_GREG(6); break; \ + case 7: __E2K_QPSWITCHD_SM_GREG(7); break; \ + case 8: __E2K_QPSWITCHD_SM_GREG(8); break; \ + case 9: __E2K_QPSWITCHD_SM_GREG(9); break; \ + case 10: __E2K_QPSWITCHD_SM_GREG(10); break; \ + case 11: __E2K_QPSWITCHD_SM_GREG(11); break; \ + case 12: __E2K_QPSWITCHD_SM_GREG(12); break; \ + case 13: __E2K_QPSWITCHD_SM_GREG(13); break; \ + case 14: __E2K_QPSWITCHD_SM_GREG(14); break; \ + case 15: __E2K_QPSWITCHD_SM_GREG(15); break; \ + case 16: __E2K_QPSWITCHD_SM_GREG(16); break; \ + case 17: __E2K_QPSWITCHD_SM_GREG(17); break; \ + case 18: __E2K_QPSWITCHD_SM_GREG(18); break; \ + case 19: __E2K_QPSWITCHD_SM_GREG(19); break; \ + case 20: __E2K_QPSWITCHD_SM_GREG(20); break; \ + case 21: __E2K_QPSWITCHD_SM_GREG(21); break; \ + case 22: __E2K_QPSWITCHD_SM_GREG(22); break; \ + case 23: __E2K_QPSWITCHD_SM_GREG(23); break; \ + case 24: __E2K_QPSWITCHD_SM_GREG(24); break; \ + case 25: __E2K_QPSWITCHD_SM_GREG(25); break; \ + case 26: __E2K_QPSWITCHD_SM_GREG(26); break; \ + case 27: __E2K_QPSWITCHD_SM_GREG(27); break; \ + case 28: __E2K_QPSWITCHD_SM_GREG(28); break; \ + case 29: __E2K_QPSWITCHD_SM_GREG(29); break; \ + case 30: __E2K_QPSWITCHD_SM_GREG(30); break; \ + case 31: __E2K_QPSWITCHD_SM_GREG(31); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +/* + * Copy single register tagged value to single register + * src_reg - local value of type single register to copy from + * dst_reg - local value of type single register to copy to + */ +#define E2K_MOVE_TAGGED_REG(src_reg, dst_reg) \ +({ \ + asm volatile ("movts \t%1, %0" \ + : "=r" (dst_reg) \ + : "r" ((__e2k_u32_t) (src_reg))); \ +}) + +#define _E2K_GET_DGREG_VAL_AND_TAG(greg_no, dst_reg, tag) \ +({ \ + u32 __dtag; \ + asm volatile ("{gettagd %%dg" #greg_no ", %0\n\t" \ + "puttagd %%dg" #greg_no ", 0, %1}" \ + : "=r" (__dtag), "=r" (dst_reg) \ + : ); \ + tag = __dtag; \ +}) + +#define E2K_GET_DGREG_VAL_AND_TAG(greg_num, dst_reg, tag) \ +({ \ + switch (greg_num) { \ + case 0: _E2K_GET_DGREG_VAL_AND_TAG(0, dst_reg, tag); break; \ + case 1: _E2K_GET_DGREG_VAL_AND_TAG(1, dst_reg, tag); break; \ + case 2: _E2K_GET_DGREG_VAL_AND_TAG(2, dst_reg, tag); break; \ + case 3: _E2K_GET_DGREG_VAL_AND_TAG(3, dst_reg, tag); break; \ + case 4: _E2K_GET_DGREG_VAL_AND_TAG(4, dst_reg, tag); break; \ + case 5: _E2K_GET_DGREG_VAL_AND_TAG(5, dst_reg, tag); break; \ + case 6: _E2K_GET_DGREG_VAL_AND_TAG(6, dst_reg, tag); break; \ + case 7: _E2K_GET_DGREG_VAL_AND_TAG(7, dst_reg, tag); break; \ + case 8: _E2K_GET_DGREG_VAL_AND_TAG(8, dst_reg, tag); break; \ + case 9: _E2K_GET_DGREG_VAL_AND_TAG(9, dst_reg, tag); break; \ + case 10: _E2K_GET_DGREG_VAL_AND_TAG(10, dst_reg, tag); break; \ + case 11: _E2K_GET_DGREG_VAL_AND_TAG(11, dst_reg, tag); break; \ + case 12: _E2K_GET_DGREG_VAL_AND_TAG(12, dst_reg, tag); break; \ + case 13: _E2K_GET_DGREG_VAL_AND_TAG(13, dst_reg, tag); break; \ + case 14: _E2K_GET_DGREG_VAL_AND_TAG(14, dst_reg, tag); break; \ + case 15: _E2K_GET_DGREG_VAL_AND_TAG(15, dst_reg, tag); break; \ + case 16: _E2K_GET_DGREG_VAL_AND_TAG(16, dst_reg, tag); break; \ + case 17: _E2K_GET_DGREG_VAL_AND_TAG(17, dst_reg, tag); break; \ + case 18: _E2K_GET_DGREG_VAL_AND_TAG(18, dst_reg, tag); break; \ + case 19: _E2K_GET_DGREG_VAL_AND_TAG(19, dst_reg, tag); break; \ + case 20: _E2K_GET_DGREG_VAL_AND_TAG(20, dst_reg, tag); break; \ + case 21: _E2K_GET_DGREG_VAL_AND_TAG(21, dst_reg, tag); break; \ + case 22: _E2K_GET_DGREG_VAL_AND_TAG(22, dst_reg, tag); break; \ + case 23: _E2K_GET_DGREG_VAL_AND_TAG(23, dst_reg, tag); break; \ + case 24: _E2K_GET_DGREG_VAL_AND_TAG(24, dst_reg, tag); break; \ + case 25: _E2K_GET_DGREG_VAL_AND_TAG(25, dst_reg, tag); break; \ + case 26: _E2K_GET_DGREG_VAL_AND_TAG(26, dst_reg, tag); break; \ + case 27: _E2K_GET_DGREG_VAL_AND_TAG(27, dst_reg, tag); break; \ + case 28: _E2K_GET_DGREG_VAL_AND_TAG(28, dst_reg, tag); break; \ + case 29: _E2K_GET_DGREG_VAL_AND_TAG(29, dst_reg, tag); break; \ + case 30: _E2K_GET_DGREG_VAL_AND_TAG(30, dst_reg, tag); break; \ + case 31: _E2K_GET_DGREG_VAL_AND_TAG(31, dst_reg, tag); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +}) + +#define _E2K_SET_DGREG_VAL_AND_TAG(greg_no, val, tag) \ +do { \ + asm volatile ("puttagd %0, %1, %%dg" #greg_no \ + : \ + : "r" (val), "r" (tag)); \ +} while (0) + +#define E2K_SET_DGREG_VAL_AND_TAG(greg_num, val, tag) \ +do { \ + switch (greg_num) { \ + case 0: _E2K_SET_DGREG_VAL_AND_TAG(0, val, tag); break; \ + case 1: _E2K_SET_DGREG_VAL_AND_TAG(1, val, tag); break; \ + case 2: _E2K_SET_DGREG_VAL_AND_TAG(2, val, tag); break; \ + case 3: _E2K_SET_DGREG_VAL_AND_TAG(3, val, tag); break; \ + case 4: _E2K_SET_DGREG_VAL_AND_TAG(4, val, tag); break; \ + case 5: _E2K_SET_DGREG_VAL_AND_TAG(5, val, tag); break; \ + case 6: _E2K_SET_DGREG_VAL_AND_TAG(6, val, tag); break; \ + case 7: _E2K_SET_DGREG_VAL_AND_TAG(7, val, tag); break; \ + case 8: _E2K_SET_DGREG_VAL_AND_TAG(8, val, tag); break; \ + case 9: _E2K_SET_DGREG_VAL_AND_TAG(9, val, tag); break; \ + case 10: _E2K_SET_DGREG_VAL_AND_TAG(10, val, tag); break; \ + case 11: _E2K_SET_DGREG_VAL_AND_TAG(11, val, tag); break; \ + case 12: _E2K_SET_DGREG_VAL_AND_TAG(12, val, tag); break; \ + case 13: _E2K_SET_DGREG_VAL_AND_TAG(13, val, tag); break; \ + case 14: _E2K_SET_DGREG_VAL_AND_TAG(14, val, tag); break; \ + case 15: _E2K_SET_DGREG_VAL_AND_TAG(15, val, tag); break; \ + case 16: _E2K_SET_DGREG_VAL_AND_TAG(16, val, tag); break; \ + case 17: _E2K_SET_DGREG_VAL_AND_TAG(17, val, tag); break; \ + case 18: _E2K_SET_DGREG_VAL_AND_TAG(18, val, tag); break; \ + case 19: _E2K_SET_DGREG_VAL_AND_TAG(19, val, tag); break; \ + case 20: _E2K_SET_DGREG_VAL_AND_TAG(20, val, tag); break; \ + case 21: _E2K_SET_DGREG_VAL_AND_TAG(21, val, tag); break; \ + case 22: _E2K_SET_DGREG_VAL_AND_TAG(22, val, tag); break; \ + case 23: _E2K_SET_DGREG_VAL_AND_TAG(23, val, tag); break; \ + case 24: _E2K_SET_DGREG_VAL_AND_TAG(24, val, tag); break; \ + case 25: _E2K_SET_DGREG_VAL_AND_TAG(25, val, tag); break; \ + case 26: _E2K_SET_DGREG_VAL_AND_TAG(26, val, tag); break; \ + case 27: _E2K_SET_DGREG_VAL_AND_TAG(27, val, tag); break; \ + case 28: _E2K_SET_DGREG_VAL_AND_TAG(28, val, tag); break; \ + case 29: _E2K_SET_DGREG_VAL_AND_TAG(29, val, tag); break; \ + case 30: _E2K_SET_DGREG_VAL_AND_TAG(30, val, tag); break; \ + case 31: _E2K_SET_DGREG_VAL_AND_TAG(31, val, tag); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +#define _E2K_GET_GREG_VAL_AND_TAG(greg_no, dst_reg, tag) \ +({ \ + u32 __tag; \ + asm volatile ("{gettags %%g" #greg_no ", %0\n\t" \ + "puttags %%g" #greg_no ", 0, %1}" \ + : "=r" (__tag), "=r" (dst_reg) \ + : ); \ + tag = __tag; \ +}) + +#define E2K_GET_GREG_VAL_AND_TAG(greg_num, dst_reg, tag) \ +({ \ + switch (greg_num) { \ + case 0: _E2K_GET_GREG_VAL_AND_TAG(0, dst_reg, tag); break; \ + case 1: _E2K_GET_GREG_VAL_AND_TAG(1, dst_reg, tag); break; \ + case 2: _E2K_GET_GREG_VAL_AND_TAG(2, dst_reg, tag); break; \ + case 3: _E2K_GET_GREG_VAL_AND_TAG(3, dst_reg, tag); break; \ + case 4: _E2K_GET_GREG_VAL_AND_TAG(4, dst_reg, tag); break; \ + case 5: _E2K_GET_GREG_VAL_AND_TAG(5, dst_reg, tag); break; \ + case 6: _E2K_GET_GREG_VAL_AND_TAG(6, dst_reg, tag); break; \ + case 7: _E2K_GET_GREG_VAL_AND_TAG(7, dst_reg, tag); break; \ + case 8: _E2K_GET_GREG_VAL_AND_TAG(8, dst_reg, tag); break; \ + case 9: _E2K_GET_GREG_VAL_AND_TAG(9, dst_reg, tag); break; \ + case 10: _E2K_GET_GREG_VAL_AND_TAG(10, dst_reg, tag); break; \ + case 11: _E2K_GET_GREG_VAL_AND_TAG(11, dst_reg, tag); break; \ + case 12: _E2K_GET_GREG_VAL_AND_TAG(12, dst_reg, tag); break; \ + case 13: _E2K_GET_GREG_VAL_AND_TAG(13, dst_reg, tag); break; \ + case 14: _E2K_GET_GREG_VAL_AND_TAG(14, dst_reg, tag); break; \ + case 15: _E2K_GET_GREG_VAL_AND_TAG(15, dst_reg, tag); break; \ + case 16: _E2K_GET_GREG_VAL_AND_TAG(16, dst_reg, tag); break; \ + case 17: _E2K_GET_GREG_VAL_AND_TAG(17, dst_reg, tag); break; \ + case 18: _E2K_GET_GREG_VAL_AND_TAG(18, dst_reg, tag); break; \ + case 19: _E2K_GET_GREG_VAL_AND_TAG(19, dst_reg, tag); break; \ + case 20: _E2K_GET_GREG_VAL_AND_TAG(20, dst_reg, tag); break; \ + case 21: _E2K_GET_GREG_VAL_AND_TAG(21, dst_reg, tag); break; \ + case 22: _E2K_GET_GREG_VAL_AND_TAG(22, dst_reg, tag); break; \ + case 23: _E2K_GET_GREG_VAL_AND_TAG(23, dst_reg, tag); break; \ + case 24: _E2K_GET_GREG_VAL_AND_TAG(24, dst_reg, tag); break; \ + case 25: _E2K_GET_GREG_VAL_AND_TAG(25, dst_reg, tag); break; \ + case 26: _E2K_GET_GREG_VAL_AND_TAG(26, dst_reg, tag); break; \ + case 27: _E2K_GET_GREG_VAL_AND_TAG(27, dst_reg, tag); break; \ + case 28: _E2K_GET_GREG_VAL_AND_TAG(28, dst_reg, tag); break; \ + case 29: _E2K_GET_GREG_VAL_AND_TAG(29, dst_reg, tag); break; \ + case 30: _E2K_GET_GREG_VAL_AND_TAG(30, dst_reg, tag); break; \ + case 31: _E2K_GET_GREG_VAL_AND_TAG(31, dst_reg, tag); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +}) + +#define ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 reg0, reg1; \ + BUILD_BUG_ON(iset != E2K_ISET_V2); \ + \ + asm ( \ + "strd,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "strd,5 [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + "movfi %%dg" #numlo ", %[reg0]\n" \ + "movfi %%dg" #numhi ", %[reg1]\n" \ + "sth [ %[addr_lo] + 8 ], %[reg0]\n" \ + "sth [ %[addr_hi] + 8 ], %[reg1]\n" \ + : [reg0] "=&r" (reg0), [reg1] "=&r" (reg1) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 reg0, reg1, reg2, reg3; \ + BUILD_BUG_ON(iset != E2K_ISET_V2); \ + \ + asm ( \ + "ldrd,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "ldrd,5 [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + "ldh [ %[addr_lo] + 8 ], %[reg0]\n" \ + "ldh [ %[addr_hi] + 8 ], %[reg1]\n" \ + "gettagd %%dg" #numlo ", %[reg2]\n" \ + "gettagd %%dg" #numhi ", %[reg3]\n" \ + "cmpesb 0, %[reg2], %%pred2\n" \ + "cmpesb 0, %[reg3], %%pred3\n" \ + "movif %%dg" #numlo ", %[reg0], %%dg" #numlo " ? %%pred2\n" \ + "movif %%dg" #numhi ", %[reg1], %%dg" #numhi " ? %%pred3\n" \ + : [reg0] "=&r" (reg0), [reg1] "=&r" (reg1), \ + [reg2] "=&r" (reg2), [reg3] "=&r" (reg3) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC) \ + :"%g" #numlo, "%g" #numhi, "%pred2", "%pred3"); \ +}) + +#define ASM_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 unused; \ + BUILD_BUG_ON(iset != E2K_ISET_V5); \ + \ + asm ( \ + /* Bug 116851 - all strqp must be speculative \ + * if dealing with tags */ \ + "{\n" /* Close this asm because 'sm' for 'strqp' \ + is not supported by lcc */ \ + "strqp,2,sm [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "strqp,5,sm [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + "}\n" \ + : [unused] "=r" (unused) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define ASM_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 reg0, reg1; \ + BUILD_BUG_ON(iset != E2K_ISET_V5); \ + \ + asm ( \ + "ldrqp,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "ldrqp,5 [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + : [reg0] "=&r" (reg0), [reg1] "=&r" (reg1) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC) \ + : "%g" #numlo, "%g" #numhi); \ +}) + +#if __iset__ == 2 + +#define ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) + +#define ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) + +#elif __iset__ == 5 + +#define ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) + +#define ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) + +#else +/* IMPORTANT: Do NOT use these macros directly, use + * machine.save_gregs()/machine.restore_gregs() instead */ +#endif + +#define NATIVE_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) +#define NATIVE_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V2) +#define NATIVE_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V5) +#define NATIVE_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) +#define NATIVE_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V2) +#define NATIVE_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V5) + +#define ASM_SAVE_THE_KERNEL_GREG(greg_no, _base, ind) \ +({ \ + u64 reg0, reg1; \ + BUILD_BUG_ON(iset != E2K_ISET_V2); \ + \ + asm ( \ + "strd [ %[base] + %[opc] ], %%dg" #greg_no "\n" \ + : \ + : [base] "r" (_base), \ + [opc] "i" (TAGGED_MEM_STORE_REC_OPC | ind * 16UL) \ + : "%g" #greg_no, "memory"); \ +}) +#define NATIVE_SAVE_THE_KERNEL_GREG(greg_no, _base, ind) \ + ASM_SAVE_THE_KERNEL_GREG(greg_no, _base, ind) + +#define ASM_RESTORE_THE_KERNEL_GREG(greg_no, _base, ind) \ +do { \ + asm ( \ + "ldrd [ %[base] + %[opc] ], %%dg" #greg_no "\n" \ + : \ + : [base] "r" (_base), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC | ind * 16UL) \ + : "%g" #greg_no, "memory"); \ +} while (false) +#define NATIVE_RESTORE_THE_KERNEL_GREG(greg_no, _base, ind) \ + ASM_RESTORE_THE_KERNEL_GREG(greg_no, _base, ind) + +#define ASM_RESTORE_KERNEL_GREG(__base, \ + indlo1, indhi1, indlo2, indhi2, \ + numlo1, numhi1, numlo2, numhi2) \ +do { \ + u64 _base = (u64) (__base); \ + register u64 g##numlo1 asm("g" #numlo1); \ + register u64 g##numhi1 asm("g" #numhi1); \ + register u64 g##numlo2 asm("g" #numlo2); \ + register u64 g##numhi2 asm("g" #numhi2); \ + asm ( "ldrd [ %[base] + %[opc_lo1] ], %[g_lo1]\n" \ + "ldrd [ %[base] + %[opc_hi1] ], %[g_hi1]\n" \ + "ldrd [ %[base] + %[opc_lo2] ], %[g_lo2]\n" \ + "ldrd [ %[base] + %[opc_hi2] ], %[g_hi2]\n" \ + : [g_lo1] "=r" (g##numlo1), [g_hi1] "=r" (g##numhi1), \ + [g_lo2] "=r" (g##numlo2), [g_hi2] "=r" (g##numhi2) \ + : [base] "r" (_base), \ + [opc_lo1] "i" (TAGGED_MEM_LOAD_REC_OPC | indlo1 * 16UL), \ + [opc_hi1] "i" (TAGGED_MEM_LOAD_REC_OPC | indhi1 * 16UL), \ + [opc_lo2] "i" (TAGGED_MEM_LOAD_REC_OPC | indlo2 * 16UL), \ + [opc_hi2] "i" (TAGGED_MEM_LOAD_REC_OPC | indhi2 * 16UL)); \ +} while (false) +#define NATIVE_RESTORE_KERNEL_GREG(base, \ + indlo1, indhi1, indlo2, indhi2, \ + numlo1, numhi1, numlo2, numhi2) \ + ASM_RESTORE_KERNEL_GREG(base, \ + indlo1, indhi1, indlo2, indhi2, \ + numlo1, numhi1, numlo2, numhi2) + +#define E2K_ALL_GREGS_SET_EMPTY() \ +({ \ + register char tag; \ + tag = ETAGEWD; \ + asm ("puttagd 0, %0, %%dg0\n" \ + "puttagd 0, %0, %%dg1\n" \ + "puttagd 0, %0, %%dg2\n" \ + "puttagd 0, %0, %%dg3\n" \ + "puttagd 0, %0, %%dg4\n" \ + "puttagd 0, %0, %%dg5\n" \ + "puttagd 0, %0, %%dg6\n" \ + "puttagd 0, %0, %%dg7\n" \ + "puttagd 0, %0, %%dg8\n" \ + "puttagd 0, %0, %%dg9\n" \ + "puttagd 0, %0, %%dg10\n" \ + "puttagd 0, %0, %%dg11\n" \ + "puttagd 0, %0, %%dg12\n" \ + "puttagd 0, %0, %%dg13\n" \ + "puttagd 0, %0, %%dg14\n" \ + "puttagd 0, %0, %%dg15\n" \ + "puttagd 0, %0, %%dg16\n" \ + "puttagd 0, %0, %%dg17\n" \ + "puttagd 0, %0, %%dg18\n" \ + "puttagd 0, %0, %%dg19\n" \ + "puttagd 0, %0, %%dg20\n" \ + "puttagd 0, %0, %%dg21\n" \ + "puttagd 0, %0, %%dg22\n" \ + "puttagd 0, %0, %%dg23\n" \ + "puttagd 0, %0, %%dg24\n" \ + "puttagd 0, %0, %%dg25\n" \ + "puttagd 0, %0, %%dg26\n" \ + "puttagd 0, %0, %%dg27\n" \ + "puttagd 0, %0, %%dg28\n" \ + "puttagd 0, %0, %%dg29\n" \ + "puttagd 0, %0, %%dg30\n" \ + "puttagd 0, %0, %%dg31\n" \ + : \ + : "ri" ((char) (tag)) \ + : "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", \ + "%g6", "%g7", "%g8", "%g9", "%g10", "%g11", \ + "%g12", "%g13", "%g14", "%g15", "%g16", \ + "%g17", "%g18", "%g19", "%g20", "%g21", \ + "%g22", "%g23", "%g24", "%g25", "%g26", \ + "%g27", "%g28", "%g29", "%g30", "%g31"); \ +}) + +#define NATIVE_GREGS_SET_EMPTY() \ +({ \ + register char tag; \ + tag = ETAGEWD; \ + asm ("puttagd 0, %0, %%dg0\n" \ + "puttagd 0, %0, %%dg1\n" \ + "puttagd 0, %0, %%dg2\n" \ + "puttagd 0, %0, %%dg3\n" \ + "puttagd 0, %0, %%dg4\n" \ + "puttagd 0, %0, %%dg5\n" \ + "puttagd 0, %0, %%dg6\n" \ + "puttagd 0, %0, %%dg7\n" \ + "puttagd 0, %0, %%dg8\n" \ + "puttagd 0, %0, %%dg9\n" \ + "puttagd 0, %0, %%dg10\n" \ + "puttagd 0, %0, %%dg11\n" \ + "puttagd 0, %0, %%dg12\n" \ + "puttagd 0, %0, %%dg13\n" \ + "puttagd 0, %0, %%dg14\n" \ + "puttagd 0, %0, %%dg15\n" \ + /* g16-g19 are used by kernel */ \ + /*"puttagd 0, %0, %%dg16\n"*/ \ + /*"puttagd 0, %0, %%dg17\n"*/ \ + /*"puttagd 0, %0, %%dg18\n"*/ \ + /*"puttagd 0, %0, %%dg19\n"*/ \ + "puttagd 0, %0, %%dg20\n" \ + "puttagd 0, %0, %%dg21\n" \ + "puttagd 0, %0, %%dg22\n" \ + "puttagd 0, %0, %%dg23\n" \ + "puttagd 0, %0, %%dg24\n" \ + "puttagd 0, %0, %%dg25\n" \ + "puttagd 0, %0, %%dg26\n" \ + "puttagd 0, %0, %%dg27\n" \ + "puttagd 0, %0, %%dg28\n" \ + "puttagd 0, %0, %%dg29\n" \ + "puttagd 0, %0, %%dg30\n" \ + "puttagd 0, %0, %%dg31\n" \ + : \ + : "ri" ((char) (tag)) \ + : "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", \ + "%g6", "%g7", "%g8", "%g9", "%g10", "%g11", \ + "%g12", "%g13", "%g14", "%g15", /*"%g16",*/ \ + /*"%g17", "%g18", "%g19",*/ "%g20", "%g21", \ + "%g22", "%g23", "%g24", "%g25", "%g26", \ + "%g27", "%g28", "%g29", "%g30", "%g31"); \ +}) + +/* + * We copy the value,tag and extension for all global regs + * (we must copy all componets of register with bad tags too) + */ +#define E2K_GET_GREGS_FROM_THREAD(_g_u, _gt_u, _base) \ +({ \ + u64 reg0, reg1, reg2, reg3, reg6, reg7, reg8; \ + \ + asm ( \ + "addd %[base], 0x0, %[r6]\n" \ + "addd 0, 0x0, %[r7]\n" \ + "addd 0, 0x0, %[r8]\n" \ + \ + "1:\n" \ + "ldrd,2 [%[r6] + %[opc_0]], %[val_lo]\n" \ + "ldrd,5 [%[r6] + %[opc_16]], %[val_hi]\n" \ + "addd %[r6], 32, %[r6]\n" \ + \ + "gettagd,2 %[val_lo], %[tag_lo]\n" \ + "gettagd,5 %[val_hi], %[tag_hi]\n" \ + "shls %[tag_hi], 8, %[tag_hi]\n" \ + "ors %[tag_lo], %[tag_hi], %[tag_lo]\n" \ + "sth [%[gt_u], %[r8]], %[tag_lo]\n" \ + "addd %[r8], 2, %[r8]\n" \ + \ + "puttagd,2 %[val_lo], 0, %[val_lo]\n" \ + "std [%[g_u], %[r7]], %[val_lo]\n" \ + "addd %[r7], 8, %[r7]\n" \ + \ + "puttagd,5 %[val_hi], 0, %[val_hi]\n" \ + "std [%[g_u], %[r7]], %[val_hi]\n" \ + "addd %[r7], 8, %[r7]\n" \ + \ + "disp %%ctpr3, 1b\n" \ + "cmpedb %[r8], 32, %%pred2\n" \ + "ct %%ctpr3 ? ~ %%pred2\n" \ + \ + : [val_lo] "=&r"(reg0), [val_hi] "=&r"(reg1), \ + [tag_lo] "=&r"(reg2), [tag_hi] "=&r"(reg3), \ + [r6] "=&r"(reg6), [r7] "=&r"(reg7), [r8] "=&r"(reg8) \ + : [g_u] "r"(_g_u), [gt_u] "r"(_gt_u), [base] "r"(_base),\ + [opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16UL) \ + : "%ctpr3", "%pred1", "%pred2", "memory"); \ +}) + +#define E2K_SET_GREGS_TO_THREAD(_base, _g_u, _gt_u) \ +({ \ + u64 reg0, reg1, reg2, reg3, reg6, reg7, reg8; \ + \ + asm ( \ + "addd 0, 0x0, %[r6]\n" \ + "addd 0, 0x0, %[r7]\n" \ + "addd %[base], 0x0, %[r8]\n" \ + \ + "2:\n" \ + "ldd [%[g_u], %[r6]], %[val_lo]\n" \ + "addd %[r6], 8, %[r6]\n" \ + "ldd [%[g_u], %[r6]], %[val_hi]\n" \ + "addd %[r6], 8, %[r6]\n" \ + \ + "ldb [%[gt_u], %[r7]], %[tag_lo]\n" \ + "addd %[r7], 1, %[r7]\n" \ + "ldb [%[gt_u], %[r7]], %[tag_hi]\n" \ + "addd %[r7], 1, %[r7]\n" \ + \ + "puttagd,2 %[val_lo], %[tag_lo], %[val_lo]\n" \ + "puttagd,5 %[val_hi], %[tag_hi], %[val_hi]\n" \ + \ + "strd,2 [%[r8] + %[opc_0]], %[val_lo]\n" \ + "strd,5 [%[r8] + %[opc_16]], %[val_hi]\n" \ + "addd %[r8], 32, %[r8]\n" \ + \ + "disp %%ctpr3, 2b\n" \ + \ + "cmpedb %[r7], 32, %%pred2\n" \ + "ct %%ctpr3 ? ~ %%pred2\n" \ + \ + : [val_lo] "=&r"(reg0), [val_hi] "=&r"(reg1), \ + [tag_lo] "=&r"(reg2), [tag_hi] "=&r"(reg3), \ + [r6] "=&r"(reg6), [r7] "=&r"(reg7), [r8] "=&r"(reg8) \ + : [base] "r"(_base), [g_u] "r"(_g_u), [gt_u] "r"(_gt_u),\ + [opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16UL) \ + : "%ctpr3", "%pred2", "memory"); \ +}) + +#define E2K_MOVE_GREG_TO_REG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovts \t%%g" #greg_no ", %0" \ + : "=&r" (local_reg)); \ +}) +#define E2K_MOVE_DGREG_TO_DREG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovtd \t%%dg" #greg_no ", %0" \ + : "=&r" (local_reg)); \ +}) + +#define E2K_MOVE_REG_TO_GREG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovts \t%0, %%g" #greg_no \ + : \ + : "&r" ((__e2k_u32_t) (local_reg))); \ +}) +#define E2K_MOVE_DREG_TO_DGREG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovtd \t%0, %%dg" #greg_no \ + : \ + : "r" ((__e2k_u64_t) (local_reg))); \ +}) +#define E2K_ADD_DREGS_TO_DGREG(greg_no, local_reg1, local_reg2) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\naddd \t%0, %1, %%dg" #greg_no \ + : \ + : "ri" ((__e2k_u64_t) (local_reg1)), \ + "r" ((__e2k_u64_t) (local_reg2))); \ +}) + +/* + * bug #97048 + * + * We have following macros for registers reading/writing + * depending on whether lcc supports the register in question: + * + * NATIVE_GET_[D]SREG_OPEN() - read supported register + * NATIVE_GET_[D]SREG_CLOSED() - read unsupported register + * + * NATIVE_SET_[D]SREG_OPEN() - write supported register + * NATIVE_SET_[D]SREG_OPEN_NOIRQ() - write supported register when + * it must be done under closed interrupts (for psp.hi/pcsp.hi/cr/cutd) + * NATIVE_SET_[D]SREG_CLOSED_NOEXC() - write unsupported register when + * it is _not_ listed in exceptions list in 1.1.1 1) of "Scheduling" + * NATIVE_SET_[D]SREG_CLOSED_EXC() - write unsupported register when + * it _is_ listed in exceptions list in 1.1.1 1) of "Scheduling" + */ + +/* + * bug #60599, #97048 + * Allow for lcc optimizations of registers reads and writes + * (when lcc supports the registers in question) + */ +#if defined CONFIG_OPTIMIZE_REGISTERS_ACCESS +# define NATIVE_GET_SREG_OPEN(reg_mnemonic) \ +({ \ + register __e2k_u32_t res; \ + asm ("rrs %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +# define NATIVE_GET_DSREG_OPEN(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + asm ("rrd %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +# define NATIVE_SET_SREG_OPEN(reg_mnemonic, val) \ +({ \ + /* Fake return value is needed for lcc to optimize inline asm... */ \ + register __e2k_u32_t res; \ + asm ("rws %1, %%" #reg_mnemonic \ + : "=r" (res) \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +# define NATIVE_SET_DSREG_OPEN(reg_mnemonic, val) \ +({ \ + /* Fake return value is needed for lcc to optimize inline asm... */ \ + register __e2k_u64_t res; \ + asm ("rwd %1, %%" #reg_mnemonic \ + : "=r" (res) \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +/* + * *_NOIRQ version is for psp.hi/pcsp.hi/cr/cutd + * + * Rules for writing: + * 1) There must be NO exceptions and interrupts + * 2) As a consequence of 1), instructions that are placed _later_ + * than "rw" should not generate exceptions too because compiler + * can reorder them before the "rw" instruction. + * + * IOW in the whole area covered by all_irq_save()/all_irq_restore() + * there must not be any exception-generating instructions. + */ + +# define NATIVE_SET_DSREG_OPEN_NOIRQ(reg_mnemonic, val) \ +({ \ + register __e2k_u64_t res; \ + asm ("rwd %1, %%" #reg_mnemonic \ + : "=r" (res) \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#else +# define NATIVE_GET_SREG_OPEN NATIVE_GET_SREG_CLOSED +# define NATIVE_GET_DSREG_OPEN NATIVE_GET_DSREG_CLOSED +# define NATIVE_SET_SREG_OPEN(reg, val) \ + NATIVE_SET_SREG_CLOSED_NOEXC(reg, (val), 7) +# define NATIVE_SET_DSREG_OPEN(reg, val) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(reg, (val), 7) +# define NATIVE_SET_DSREG_OPEN_NOIRQ(reg, val) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(reg, (val), 7) +#endif + + +/* + * bug #97048 + * Closed GNU asm is used for rarely read registers. + * Keep "volatile" since some of those registers can have side effects + * (for example, see %dibsr reading in arch/e2k/kernel/perf_event.c - + * it must be done before reading %dimar; or look at %clkr). + */ +#define NATIVE_GET_SREG_CLOSED(reg_mnemonic) \ +({ \ + register __e2k_u32_t res; \ + _Pragma("no_asm_inline") \ + asm volatile ("rrs %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_GET_DSREG_CLOSED(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + _Pragma("no_asm_inline") \ + asm volatile ("rrd %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_GET_DSREG_CLOSED_CLOBBERS(reg_mnemonic, clobbers) \ +({ \ + register __e2k_u64_t res; \ + _Pragma("no_asm_inline") \ + asm volatile ("rrd %%" #reg_mnemonic ", %0" \ + : "=r" (res) :: clobbers); \ + res; \ +}) + +/* + * These macros will insert real wide instructions + * instead of doing all nops with "nop x" field in HS. + * This is needed, for example, when writing %wd. + */ +#define NOP_0_MINUS_4 0 +#define NOP_1_MINUS_4 0 +#define NOP_2_MINUS_4 0 +#define NOP_3_MINUS_4 0 +#define NOP_4_MINUS_4 0 +#define NOP_5_MINUS_4 1 +#define NOP_6_MINUS_4 2 +#define NOP_7_MINUS_4 3 + +/* + * bug #97048 + * + * For closed writes we have to manually check how many NOPs are needed + * for this register. If we try to use _Pragma("no_asm_inline"), then + * lcc will use its default value of 5 nops which is not always enough. + * + * Also, according to "Scheduling 1.1.1", the next 3 long instructions + * after the write must not generate delayed exceptions, and the next + * 4 long instruction must not generate exact exceptions. So add 4 nops + * after the write. + * + * This is slow but this version is used only for rarely written registers. + * %usd/%psp/etc registers are supported by lcc and are written with an + * open GNU asm. + */ +#define NATIVE_SET_SREG_CLOSED_NOEXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(NOP_##nop##_MINUS_4) "\n" \ + " rws %0, %%" #reg_mnemonic "}\n" \ + "{nop} {nop} {nop} {nop}" \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define NATIVE_SET_DSREG_CLOSED_NOEXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(NOP_##nop##_MINUS_4) "\n" \ + " rwd %0, %%" #reg_mnemonic "}" \ + "{nop} {nop} {nop} {nop}" \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#define NATIVE_SET_DSREGS_CLOSED_NOEXC(reg_mnemonic_lo, reg_mnemonic_hi, \ + _val_lo, _val_hi, nop) \ +({ \ + asm volatile ("{rwd %[val_lo], %%" #reg_mnemonic_lo "}" \ + "{nop " __stringify(NOP_##nop##_MINUS_4) "\n" \ + " rwd %[val_hi], %%" #reg_mnemonic_hi "}" \ + "{nop} {nop} {nop} {nop}" \ + : \ + : [val_lo] "ri" ((u64) (_val_lo)), \ + [val_hi] "ri" ((u64) (_val_hi))); \ +}) + +/* + * For some registers (see "Scheduling 1.1.1") there is no requirement + * of avoiding deferred and exact exception after the long instruction. + * But some registers (e.g. %wd, %bgr) still require at least 1 real + * instruction after the write. + */ +#define NATIVE_SET_SREG_CLOSED_EXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(nop) "\n" \ + " rws %0, %%" #reg_mnemonic "}\n" \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define NATIVE_SET_DSREG_CLOSED_EXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(nop) "\n" \ + " rwd %0, %%" #reg_mnemonic "}" \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#define NATIVE_SET_DSREG_CLOSED_EXC_CLOBBERS(reg_mnemonic, val, nop, clobbers) \ +({ \ + asm volatile ("{nop " __stringify(nop) "\n" \ + " rwd %0, %%" #reg_mnemonic "}" \ + : \ + : "ri" ((__e2k_u64_t) (val)) \ + : clobbers); \ +}) + +#define NATIVE_EXIT_HANDLE_SYSCALL(sbr, usd_hi, usd_lo, upsr) \ +({ \ + asm volatile (ALTERNATIVE_1_ALTINSTR \ + /* CPU_HWBUG_USD_ALIGNMENT version */ \ + "{rwd %0, %%sbr;" \ + " nop}" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{rwd %0, %%sbr}" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + "{rwd %2, %%usd.lo}" \ + "{rwd %1, %%usd.hi}" \ + "{rws %3, %%upsr;" \ + " nop 4}\n" \ + : \ + : "ri" ((__e2k_u64_t) (sbr)), \ + "ri" ((__e2k_u64_t) (usd_hi)), \ + "ri" ((__e2k_u64_t) (usd_lo)), \ + "ri" ((__e2k_u32_t) (upsr)), \ + [facility] "i" (CPU_HWBUG_USD_ALIGNMENT)); \ +}) + + +/* lcc ignores manually specified clobbers for opened GNU asm, + * so use closed version (bug #69565, bug #60599) */ +#define NATIVE_SET_PSR_IRQ_BARRIER(val) \ +({ \ + asm volatile ("{\n" \ + "nop 5\n" \ + "rwd %0, %%psr" \ + "}" \ + : \ + : "ri" ((__e2k_u64_t) (val)) \ + : "memory", PREEMPTION_CLOBBERS); \ +}) +#define NATIVE_SET_UPSR_IRQ_BARRIER(val) \ +({ \ + asm volatile ("{\n" \ + "nop 4\n" \ + "rwd %0, %%upsr" \ + "}" \ + : \ + : "ri" ((__e2k_u64_t) (val)) \ + : "memory", PREEMPTION_CLOBBERS); \ +}) + + +#define NATIVE_GET_MMUREG(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("mmurr \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_SET_MMUREG(reg_mnemonic, val) \ +({ \ + asm volatile ("mmurw \t%0, %%" #reg_mnemonic \ + : \ + : "r" ((__e2k_u64_t) (val))); \ +}) + +#define NATIVE_SET_MMUREG_CLOSED(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " #nop "\n" \ + " mmurw %0, %%" #reg_mnemonic "}" \ + : \ + : "r" ((u64) (val))); \ +}) + + +#define NATIVE_TAGGED_LOAD_TO_MMUREG(reg_mnemonic, _addr) \ +do { \ + unsigned long long _tmp; \ + asm volatile ("ldrd [ %[addr] + %[opc] ], %[tmp]\n" \ + "mmurw,s %[tmp], %%" #reg_mnemonic "\n" \ + : [tmp] "=r" (_tmp) \ + : [addr] "m" (*((unsigned long long *) (_addr))), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC)); \ +} while (0) + +#define NATIVE_STORE_TAGGED_MMUREG(_addr, reg_mnemonic) \ +do { \ + unsigned long long _tmp; \ + asm volatile ("mmurr %%" #reg_mnemonic ", %[tmp]\n" \ + "strd [ %[addr] + %[opc] ], %[tmp]\n" \ + : [tmp] "=r" (_tmp) \ + : [addr] "m" (*((unsigned long long *) (_addr))), \ + [opc] "i" (TAGGED_MEM_STORE_REC_OPC)); \ +} while (0) + +#define NATIVE_GET_MMU_DEBUG_REG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("ldd,5 \t[%1 + 0] %2, %0" \ + : "=r" (res) \ + : "ri" ((__e2k_ptr_t) _DEBUG_REG_NO_TO_MMU_ADDR(reg_no)), \ + "i" MAS_MMU_DEBUG_REG); \ + res; \ +}) +#define NATIVE_SET_MMU_DEBUG_REG(reg_no, val) \ +({ \ + asm volatile ("std,2 \t[%0 + 0] %1, %2" \ + : \ + : "ri" ((__e2k_ptr_t) _DEBUG_REG_NO_TO_MMU_ADDR(reg_no)), \ + "i" MAS_MMU_DEBUG_REG, \ + "ri" ((__e2k_u64_t) (val))); \ +}) + +#define NATIVE_GET_AAUREG(reg_mnemonic, chan_letter) \ +({ \ + register __e2k_u32_t res; \ + asm ("aaurr," #chan_letter " \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +/* This macro is used to pack two 'aaurr' into one long instruction */ +#define NATIVE_GET_AAUREGS(l_reg, r_reg, lval, rval) \ +({ \ + asm ("aaurr,2 \t%%" #l_reg ", %0\n" \ + "aaurr,5 \t%%" #r_reg ", %1" \ + : "=r" (lval), "=r" (rval)); \ +}) + +#define NATIVE_SET_AAUREG(reg_mnemonic, val, chan_letter) \ +({ \ + int unused; \ + asm ("aaurw," #chan_letter " %1, %%" #reg_mnemonic \ + : "=r" (unused) \ + : "r" ((__e2k_u32_t) (val))); \ +}) + +/* This macro is used to pack two 'aaurr' into one long instruction */ +#define NATIVE_SET_AAUREGS(l_reg, r_reg, lval, rval) \ +do { \ + int unused; \ + asm ("aaurw,2 %1, %%" #l_reg "\n" \ + "aaurw,5 %2, %%" #r_reg \ + : "=r" (unused) \ + : "r" ((__e2k_u32_t) (lval)), "r" ((__e2k_u32_t) (rval))); \ +} while (0) + +#define NATIVE_GET_AAUDREG(reg_mnemonic, chan_letter) \ +({ \ + register __e2k_u64_t res; \ + asm ("aaurrd," #chan_letter " %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_GET_AAUDREGS(l_reg, r_reg, lval, rval) \ +({ \ + asm ("aaurrd,2 %%" #l_reg ", %0\n" \ + "aaurrd,5 %%" #r_reg ", %1" \ + : "=r" (lval), "=r" (rval)); \ +}) + + +#define NATIVE_SET_AAUDREG(reg_mnemonic, val, chan_letter) \ +do { \ + int unused; \ + asm ("aaurwd," #chan_letter " %1, %%" #reg_mnemonic \ + : "=r" (unused) \ + : "r" (val)); \ +} while (0) + +#define NATIVE_SET_AAUDREGS(l_reg, r_reg, lval, rval) \ +do { \ + int unused; \ + asm ("aaurwd,2 %1, %%" #l_reg "\n" \ + "aaurwd,5 %2, %%" #r_reg \ + : "=r" (unused) \ + : "r" (lval), "r" (rval)); \ +} while (0) + + +#define NATIVE_GET_AAUQREG(mem_p, reg_mnemonic) \ +({ \ + register __e2k_u64_t lo asm ("%b[0]"); \ + register __e2k_u64_t hi asm ("%b[1]"); \ + asm volatile ("aaurrq \t%%" #reg_mnemonic ", %%qb[0]" \ + : \ + : \ + : "%b[0]", "%b[1]"); \ + (mem_p)->lo = lo; \ + (mem_p)->hi = hi; \ +}) + +#define NATIVE_GET_AAUQREGS(mem_p, reg1, reg2, reg3, reg4) \ +({ \ + register __e2k_u64_t lo1 asm ("%b[0]"); \ + register __e2k_u64_t hi1 asm ("%b[1]"); \ + register __e2k_u64_t lo2 asm ("%b[2]"); \ + register __e2k_u64_t hi2 asm ("%b[3]"); \ + register __e2k_u64_t lo3 asm ("%b[4]"); \ + register __e2k_u64_t hi3 asm ("%b[5]"); \ + register __e2k_u64_t lo4 asm ("%b[6]"); \ + register __e2k_u64_t hi4 asm ("%b[7]"); \ + asm volatile ("aaurrq \t%%" #reg1 ", %%qb[0]\n" \ + "aaurrq \t%%" #reg2 ", %%qb[2]\n" \ + "aaurrq \t%%" #reg3 ", %%qb[4]\n" \ + "aaurrq \t%%" #reg4 ", %%qb[6]\n" \ + : \ + : \ + : "%b[0]", "%b[1]", "%b[2]", "%b[3]", \ + "%b[4]", "%b[5]", "%b[6]", "%b[7]"); \ + (mem_p)->lo = lo1; \ + (mem_p)->hi = hi1; \ + (mem_p + 1)->lo = lo2; \ + (mem_p + 1)->hi = hi2; \ + (mem_p + 2)->lo = lo3; \ + (mem_p + 2)->hi = hi3; \ + (mem_p + 3)->lo = lo4; \ + (mem_p + 3)->hi = hi4; \ +}) + +#define NATIVE_SET_AAUQREG(reg_mnemonic, mem_p) \ +do { \ + register u64 lo asm ("%b[0]"); \ + register u64 hi asm ("%b[1]"); \ + int unused; \ + lo = (mem_p)->lo; \ + hi = (mem_p)->hi; \ + asm ("aaurwq %%r0, %%" #reg_mnemonic \ + : "=r" (unused) \ + : "r" (lo), "r" (hi)); \ +} while (0) + +#define NATIVE_SET_AAUQREGS(mem_p, reg1, reg2, reg3, reg4) \ +do { \ + asm volatile ("{ldd,0 [ %0 + 0x0 ], %%db[0]\n" \ + " ldd,2 [ %0 + 0x8 ], %%db[1]\n" \ + " ldd,3 [ %0 + 0x10 ], %%db[2]\n" \ + " ldd,5 [ %0 + 0x18 ], %%db[3]}\n" \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_ISET_V6 version */ \ + "{nop 3\n" \ + " ldd,0 [ %0 + 0x20 ], %%db[4]\n" \ + " ldd,2 [ %0 + 0x28 ], %%db[5]\n" \ + " ldd,3 [ %0 + 0x30 ], %%db[6]\n" \ + " ldd,5 [ %0 + 0x38 ], %%db[7]}\n" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{nop 1\n" \ + " ldd,0 [ %0 + 0x20 ], %%db[4]\n" \ + " ldd,2 [ %0 + 0x28 ], %%db[5]\n" \ + " ldd,3 [ %0 + 0x30 ], %%db[6]\n" \ + " ldd,5 [ %0 + 0x38 ], %%db[7]}\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + "aaurwq,2 %%qb[0], %%" #reg1 "\n" \ + "aaurwq,2 %%qb[2], %%" #reg2 "\n" \ + "aaurwq,2 %%qb[4], %%" #reg3 "\n" \ + "aaurwq,2 %%qb[6], %%" #reg4 "\n" \ + : \ + : "r" (mem_p), [facility] "i" (CPU_FEAT_ISET_V6) \ + : "%b[0]", "%b[1]", "%b[2]", "%b[3]", \ + "%b[4]", "%b[5]", "%b[6]", "%b[7]"); \ +} while (0) + +/* Clear AAU to prepare it for restoring */ +#define NATIVE_CLEAR_APB() \ +do { \ + asm volatile ("1:\n" \ + "{ipd 0; disp %%ctpr2, 1b}" \ + : \ + : \ + : "ctpr2"); \ +} while (0) + +/* Do "disp" for all %ctpr's */ +#define E2K_DISP_CTPRS() \ + asm volatile ("1:\n" \ + "{ipd 0; disp %%ctpr1, 1b}" \ + "{ipd 0; disp %%ctpr2, 1b}" \ + "{ipd 0; disp %%ctpr3, 1b}" \ + : \ + : \ + : "ctpr1", "ctpr2", "ctpr3") + +#define LOAD_NV_MAS(_addr, _val, _mas, size_letter, clobber) \ +do { \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE ("ld" #size_letter" %[addr], %[val], mas=%[mas]" \ + : [val] "=r" (_val) \ + : [addr] "m" (*(_addr)), \ + [mas] "i" (_mas) \ + : clobber); \ +} while (0) + +#define STORE_NV_MAS(_addr, _val, _mas, size_letter, clobber) \ +do { \ + if ((_mas) == MAS_STORE_RELEASE_V6(MAS_MT_0) || \ + (_mas) == MAS_STORE_RELEASE_V6(MAS_MT_1)) { \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE ( \ + ALTERNATIVE( \ + /* Default version */ \ + "{wait st_c=1, ld_c=1\n" \ + " st" #size_letter" %[addr], %[val]}", \ + /* CPU_NO_HWBUG_SOFT_WAIT version */ \ + "{st" #size_letter" %[addr], %[val], mas=%[mas]}", \ + %[facility]) \ + : [addr] "=m" (*(_addr)) \ + : [val] "r" (_val), \ + [mas] "i" (_mas), \ + [facility] "i" (CPU_NO_HWBUG_SOFT_WAIT) \ + : clobber); \ + } else { \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE ("st" #size_letter" %[addr], %[val], mas=%[mas]" \ + : [addr] "=m" (*(_addr)) \ + : [val] "r" (_val), \ + [mas] "i" (_mas) \ + : clobber); \ + } \ +} while (0) + +/* + * Do load with specified MAS + */ + +/* + * After iset v6 these loads are not ordered with regards to RAM accesses. + * so add barriers manually. Driver writers who want control over barriers + * should use readX_relaxed()/writeX_relaxed() anyway. + */ +#if CONFIG_CPU_ISET >= 6 + +# define READ_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_ld_c | _lal | _sal); \ +} while (0) +# define WRITE_MAS_BARRIER_BEFORE(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_st_c | _sas | _ld_c | _sal); \ +} while (0) +/* + * Not required by documentation, but this is how + * x86 works and how most of the drivers are tested. + */ +# define WRITE_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_st_c | _sas); \ +} while (0) + +#elif CONFIG_CPU_ISET == 0 + +# define READ_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_ld_c); \ +} while (0) +# define WRITE_MAS_BARRIER_BEFORE(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_st_c | _sas | _ld_c | _sal); \ +} while (0) +/* + * Not required by documentation, but this is how + * x86 works and how most of the drivers are tested. + */ +# define WRITE_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_st_c | _sas); \ +} while (0) + +#else + +# define READ_MAS_BARRIER_AFTER(mas) +# define WRITE_MAS_BARRIER_BEFORE(mas) +# define WRITE_MAS_BARRIER_AFTER(mas) +#endif + +#define NATIVE_DO_READ_MAS_TO(addr, val, mas, size_letter, chan_letter) \ +({ \ + int __mas = (mas); \ + asm volatile ("ld" #size_letter "," #chan_letter " \t0x0, [%1] %2, %0" \ + : "=r" (val) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "i" (__mas)); \ + READ_MAS_BARRIER_AFTER(__mas); \ +}) + +#define NATIVE_DO_READ_MAS(addr, mas, type, size_letter, chan_letter) \ +({ \ + register type res; \ + int __mas = (mas); \ + asm volatile ("ld" #size_letter "," #chan_letter " \t0x0, [%1] %2, %0" \ + : "=r" (res) \ + : "r" ((u64) (addr)), \ + "i" (__mas)); \ + READ_MAS_BARRIER_AFTER(__mas); \ + res; \ +}) + +#define NATIVE_DO_WRITE_MAS(addr, val, mas, type, size_letter, chan_letter) \ +({ \ + int __mas = (mas); \ + WRITE_MAS_BARRIER_BEFORE(__mas); \ + asm volatile ("st" #size_letter "," #chan_letter " \t0x0, [%0] %2, %1" \ + : \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((type) (val)), \ + "i" (__mas)); \ + WRITE_MAS_BARRIER_AFTER(__mas); \ +}) + +#define NATIVE_DO_WRITE_TAGGED(addr, val, type, size_letter, chan_letter) \ +({ \ + asm volatile ("st" #size_letter ",sm," #chan_letter " \t0x0, [%0], %1" \ + : \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((type) (val))); \ +}) + +#define NATIVE_READ_MAS_B_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), b, chan_letter) +#define NATIVE_READ_MAS_H_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), h, chan_letter) +#define NATIVE_READ_MAS_W_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), w, chan_letter) +#define NATIVE_READ_MAS_D_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), d, chan_letter) + +#define NATIVE_READ_MAS_B_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u8_t, b, chan_letter) +#define NATIVE_READ_MAS_H_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u16_t, h, chan_letter) +#define NATIVE_READ_MAS_W_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u32_t, w, chan_letter) +#define NATIVE_READ_MAS_D_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u64_t, d, chan_letter) + +#define NATIVE_READ_MAS_B(addr, mas) NATIVE_READ_MAS_B_CH((addr), (mas), 2) +#define NATIVE_READ_MAS_H(addr, mas) NATIVE_READ_MAS_H_CH((addr), (mas), 2) +#define NATIVE_READ_MAS_W(addr, mas) NATIVE_READ_MAS_W_CH((addr), (mas), 2) +#define NATIVE_READ_MAS_D(addr, mas) NATIVE_READ_MAS_D_CH((addr), (mas), 2) + +#define NATIVE_READ_MAS_B_5(addr, mas) NATIVE_READ_MAS_B_CH((addr), (mas), 5) +#define NATIVE_READ_MAS_H_5(addr, mas) NATIVE_READ_MAS_H_CH((addr), (mas), 5) +#define NATIVE_READ_MAS_W_5(addr, mas) NATIVE_READ_MAS_W_CH((addr), (mas), 5) +#define NATIVE_READ_MAS_D_5(addr, mas) NATIVE_READ_MAS_D_CH((addr), (mas), 5) + +#define NATIVE_WRITE_MAS_B_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u8_t, b, \ + chan_letter) +#define NATIVE_WRITE_MAS_H_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u16_t, h, \ + chan_letter) +#define NATIVE_WRITE_MAS_W_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u32_t, w, \ + chan_letter) +#define NATIVE_WRITE_MAS_D_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u64_t, d, \ + chan_letter) +#define NATIVE_WRITE_TAGGED_D_CH(addr, val, chan_letter) \ + NATIVE_DO_WRITE_TAGGED((addr), (val), __e2k_u64_t, d, \ + chan_letter) +#define NATIVE_WRITE_MAS_B(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u8_t, b, 2) +#define NATIVE_WRITE_MAS_H(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u16_t, h, 2) +#define NATIVE_WRITE_MAS_W(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u32_t, w, 2) +#define NATIVE_WRITE_MAS_D(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u64_t, d, 2) + +/* + * Read from and write to system configuration registers SIC + * Now SIC is the same as NBSRs registers + */ + +#define NATIVE_SET_SICREG(reg_mnemonic, val, cln, pln) \ +({ \ + register __e2k_u64_t addr; \ + register __e2k_u64_t node_id = (cln) << 2; \ + node_id = node_id + ((pln)&0x3); \ + addr = (__e2k_u64_t) THE_NODE_NBSR_PHYS_BASE(node_id); \ + addr = addr + SIC_##reg_mnemonic; \ + NATIVE_WRITE_MAS_W(addr, val, MAS_IOADDR); \ +}) +#define NATIVE_GET_SICREG(reg_mnemonic, cln, pln) \ +({ \ + register __e2k_u32_t res; \ + register __e2k_u64_t addr; \ + register __e2k_u64_t node_id = (cln) << 2; \ + node_id = node_id + ((pln)&0x3); \ + addr = (__e2k_u64_t) THE_NODE_NBSR_PHYS_BASE(node_id); \ + addr = addr + SIC_##reg_mnemonic; \ + res = NATIVE_READ_MAS_W(addr, MAS_IOADDR); \ + res; \ +}) + + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) +# define E2K_PREFETCH_L2_SPEC(addr) \ +do { \ + int unused; \ + asm ("ldb,sm %1, 0, %%empty, mas=%2\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L2_NOSPEC_OFFSET(addr, offset) \ +do { \ + int unused; \ + asm ("ldb %1, %2, %%empty, mas=%3\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (offset), \ + "i" (MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L2_NOSPEC_256(addr) \ +do { \ + int unused; \ + asm ( "ldb,0 %1, 0, %%empty, mas=%2\n" \ + "ldb,2 %1, 64, %%empty, mas=%2\n" \ + "ldb,3 %1, 128, %%empty, mas=%2\n" \ + "ldb,5 %1, 192, %%empty, mas=%2" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L1_SPEC(addr) \ +do { \ + int unused; \ + asm ("ldb,sm %1, 0, %%empty, mas=%2\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (MAS_LOAD_SPEC)); \ +} while (0) + +# define E2K_PREFETCH_L1_NOSPEC(addr) \ +do { \ + int unused; \ + asm ("ldb %1, 0, %%empty" \ + : "=r" (unused) \ + : "r" (addr)); \ +} while (0) + +# define E2K_PREFETCH_L1_SPEC_OFFSET(addr, offset) \ +do { \ + int unused; \ + asm ("ldb,sm %1, %2, %%empty, mas=%3\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (offset), \ + "i" (MAS_LOAD_SPEC)); \ +} while (0) +#else +# define E2K_PREFETCH_L2_SPEC(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L2_NOSPEC_OFFSET(addr, offset) \ + do { (void) (addr); (void) (offset); } while (0) +# define E2K_PREFETCH_L2_NOSPEC_256(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L1_SPEC(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L1_NOSPEC(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L1_SPEC_OFFSET(addr, offset) \ + do { (void) (addr); (void) (offset); } while (0) +#endif + +/* + * Recovery operations + * chan: 0, 1, 2 or 3 + */ +#define NATIVE_RECOVERY_TAGGED_LOAD_TO(_addr, _opc, _val, _tag, _chan) \ +do { \ + asm ( "{nop 1\n" \ + " cmpesb,0 %[chan], 0, %%pred20\n" \ + " cmpesb,1 %[chan], 1, %%pred21\n" \ + " cmpesb,3 %[chan], 2, %%pred22\n" \ + " cmpesb,4 %[chan], 3, %%pred23}\n" \ + "{nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val] ? %%pred20\n" \ + " ldrd,2 [ %[addr] + %[opc] ], %[val] ? %%pred21\n" \ + " ldrd,3 [ %[addr] + %[opc] ], %[val] ? %%pred22\n" \ + " ldrd,5 [ %[addr] + %[opc] ], %[val] ? %%pred23}\n" \ + "{gettagd,2 %[val], %[tag]\n" \ + " puttagd,5 %[val], 0, %[val]}\n" \ + : [val] "=r"(_val), [tag] "=r"(_tag) \ + : [addr] "r" (_addr), [opc] "r" (_opc), \ + [chan] "r" ((u32) (_chan)) \ + : "memory", "pred20", "pred21", "pred22", "pred23"); \ +} while (0) + +#define NATIVE_RECOVERY_LOAD_TO(addr, opc, val, chan_letter) \ +({ \ + asm volatile ("ldrd," #chan_letter "\t[%1 + %2], %0" \ + : "=r"(val) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((__e2k_u64_t) (opc))); \ +}) + +#define NATIVE_LOAD_TAGGED_DGREGS(addr, numlo, numhi) \ +do { \ + asm ("ldrd,2 [%0 + %1], %%dg" #numlo "\n" \ + "ldrd,5 [%0 + %2], %%dg" #numhi "\n" \ + : \ + : "r" (addr), \ + "i" (TAGGED_MEM_LOAD_REC_OPC), \ + "i" (TAGGED_MEM_LOAD_REC_OPC | 8UL) \ + : "%g" #numlo, "%g" #numhi); \ +} while (0) + +#define NATIVE_STORE_TAGGED_DGREG(addr, greg_no) \ +do { \ + asm ("strd [%0 + %1], %%dg" #greg_no \ + : \ + : "r" (addr), "i" (TAGGED_MEM_STORE_REC_OPC)); \ +} while (0) + +/* + * chan: 0, 1, 2 or 3 + * vr: set to 0 if we want to preserve the lower 4-byte word + * (same as vr in cellar) + */ +#define NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(_addr, _opc, greg_no, \ + _chan, _vr, _quadro) \ +do { \ + u64 val, val_8; \ + u32 __chan = (u32) (_chan); \ + u32 __quadro = (u32) (_quadro); \ + u32 __chan_q = (__quadro) ? __chan : 4; /* Not existent channel - skip */ \ + u64 __opc = (_opc); \ + asm volatile ( \ + "{disp %%ctpr1, qpswitchd_sm\n" \ + " cmpesb,0 %[chan], 0, %%pred20\n" \ + " cmpesb,1 %[chan], 1, %%pred21\n" \ + " cmpesb,3 %[chan], 2, %%pred22\n" \ + " cmpesb,4 %[chan], 3, %%pred23}\n" \ + "{cmpesb,0 %[chan_q], 0, %%pred24\n" \ + " cmpesb,1 %[chan_q], 1, %%pred25\n" \ + " cmpesb,3 %[chan_q], 2, %%pred26\n" \ + " cmpesb,4 %[chan_q], 3, %%pred27}\n" \ + "{ldrd,0 [ %[addr] + %[opc] ], %[val] ? %%pred20\n" \ + " ldrd,2 [ %[addr] + %[opc] ], %[val] ? %%pred21\n" \ + " ldrd,3 [ %[addr] + %[opc] ], %[val] ? %%pred22\n" \ + " ldrd,5 [ %[addr] + %[opc] ], %[val] ? %%pred23\n" \ + " cmpesb,1 %[quadro], 0, %%pred18\n" \ + " cmpesb,4 %[vr], 0, %%pred19}\n" \ + "{nop 3\n" \ + " ldrd,0 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred24\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred25\n" \ + " ldrd,3 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred26\n" \ + " ldrd,5 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred27}\n" \ + "{movts %%g" #greg_no ", %[val] ? %%pred19}\n" \ + "{movtd %[val_8], %%dg" #greg_no " ? ~ %%pred18\n" \ + " addd %[greg], 0, %%db[0] ? ~ %%pred18\n" \ + " call %%ctpr1, wbs=%# ? ~ %%pred18}\n" \ + "{movtd %[val], %%dg" #greg_no "}\n" \ + : [val] "=&r" (val), [val_8] "=&r" (val_8) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [chan] "ir" (__chan), [chan_q] "ir" (__chan_q), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [quadro] "r" (__quadro), [greg] "i" ((u64) (greg_no)) \ + : "call", "memory", "pred18", "pred19", "pred20", "pred21", \ + "pred22", "pred23", "pred24", "pred25", "pred26", "pred27", \ + "g" #greg_no); \ +} while (0) + +/* + * As NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR but repeats from cellar + * an aligned atomic 16-bytes load. + */ +#define NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(_addr, _opc, \ + greg_no, _vr) \ +do { \ + u64 val; \ + u64 __opc = (_opc); \ + asm ( "{disp %%ctpr1, qpswitchd_sm\n" \ + " nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %%g" #greg_no "\n" \ + " cmpesb,1 %[vr], 0, %%pred19}\n" \ + "{movts,0 %%g" #greg_no ", %[val] ? %%pred19\n" \ + " addd,2 %[greg], 0, %%db[0]\n" \ + " call %%ctpr1, wbs=%#}\n" \ + "{movtd %[val], %%dg" #greg_no "}\n" \ + : [val] "=&r" (val) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [greg] "i" ((u64) (greg_no)) \ + : "call", "memory", "pred19", "g" #greg_no); \ +} while (false) + +#define NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(_addr, _opc, \ + greg_no_lo, greg_no_hi, _vr, _qp_load) \ +do { \ + u64 val; \ + u64 __opc = (_opc); \ + if (_qp_load) { \ + asm ( "{disp %%ctpr1, qpswitchd_sm\n" \ + " nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %%g" #greg_no_lo "\n" \ + " cmpesb,1 %[vr], 0, %%pred19}\n" \ + "{movts,0 %%g" #greg_no_lo ", %[val] ? %%pred19\n" \ + " addd,2 %[greg], 0, %%db[0]\n" \ + " call %%ctpr1, wbs=%#}\n" \ + "{movtd %[val], %%dg" #greg_no_lo "}\n" \ + : [val] "=&r" (val) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [greg] "i" ((u64) (greg_no_lo)) \ + : "call", "memory", "pred19", "g" #greg_no_lo); \ + } else { \ + asm ( "{nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %%g" #greg_no_hi "\n" \ + " cmpesb,1 %[vr], 0, %%pred19}\n" \ + "{nop 1\n" \ + " movts,0 %%g" #greg_no_lo ", %[val] ? %%pred19}\n" \ + "{movtd,0 %[val], %%dg" #greg_no_lo "}\n" \ + : [val] "=&r" (val) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [greg] "i" ((u64) (greg_no_lo)) \ + : "call", "memory", "pred19", "g" #greg_no_lo); \ + } \ +} while (false) + +#define NATIVE_RECOVERY_LOAD_TO_A_GREG_CH_VR(addr, opc, greg_num, \ + chan_opc, vr, quadro) \ +do { \ + switch (greg_num) { \ + case 0: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 0, \ + chan_opc, vr, quadro); \ + break; \ + case 1: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 1, \ + chan_opc, vr, quadro); \ + break; \ + case 2: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 2, \ + chan_opc, vr, quadro); \ + break; \ + case 3: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 3, \ + chan_opc, vr, quadro); \ + break; \ + case 4: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 4, \ + chan_opc, vr, quadro); \ + break; \ + case 5: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 5, \ + chan_opc, vr, quadro); \ + break; \ + case 6: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 6, \ + chan_opc, vr, quadro); \ + break; \ + case 7: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 7, \ + chan_opc, vr, quadro); \ + break; \ + case 8: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 8, \ + chan_opc, vr, quadro); \ + break; \ + case 9: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 9, \ + chan_opc, vr, quadro); \ + break; \ + case 10: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 10, \ + chan_opc, vr, quadro); \ + break; \ + case 11: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 11, \ + chan_opc, vr, quadro); \ + break; \ + case 12: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 12, \ + chan_opc, vr, quadro); \ + break; \ + case 13: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 13, \ + chan_opc, vr, quadro); \ + break; \ + case 14: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 14, \ + chan_opc, vr, quadro); \ + break; \ + case 15: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 15, \ + chan_opc, vr, quadro); \ + break; \ + /* Do not load g16-g19 as they are used by kernel */ \ + case 16: \ + case 17: \ + case 18: \ + case 19: \ + break; \ + case 20: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 20, \ + chan_opc, vr, quadro); \ + break; \ + case 21: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 21, \ + chan_opc, vr, quadro); \ + break; \ + case 22: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 22, \ + chan_opc, vr, quadro); \ + break; \ + case 23: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 23, \ + chan_opc, vr, quadro); \ + break; \ + case 24: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 24, \ + chan_opc, vr, quadro); \ + break; \ + case 25: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 25, \ + chan_opc, vr, quadro); \ + break; \ + case 26: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 26, \ + chan_opc, vr, quadro); \ + break; \ + case 27: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 27, \ + chan_opc, vr, quadro); \ + break; \ + case 28: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 28, \ + chan_opc, vr, quadro); \ + break; \ + case 29: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 29, \ + chan_opc, vr, quadro); \ + break; \ + case 30: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 30, \ + chan_opc, vr, quadro); \ + break; \ + case 31: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 31, \ + chan_opc, vr, quadro); \ + break; \ + default: \ + panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +#define NATIVE_RECOVERY_LOAD_TO_A_GREG_VR_ATOMIC(addr, opc, greg_num, \ + vr, qp_load) \ +do { \ + switch (greg_num) { \ + case 0: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 0, 1, vr, qp_load); \ + break; \ + case 1: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 1, \ + vr); \ + break; \ + case 2: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 2, 3, vr, qp_load); \ + break; \ + case 3: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 3, \ + vr); \ + break; \ + case 4: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 4, 5, vr, qp_load); \ + break; \ + case 5: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 5, \ + vr); \ + break; \ + case 6: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 6, 7, vr, qp_load); \ + break; \ + case 7: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 7, \ + vr); \ + break; \ + case 8: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 8, 9, vr, qp_load); \ + break; \ + case 9: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 9, \ + vr); \ + break; \ + case 10: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 10, 11, vr, qp_load); \ + break; \ + case 11: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 11, \ + vr); \ + break; \ + case 12: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 12, 13, vr, qp_load); \ + break; \ + case 13: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 13, \ + vr); \ + break; \ + case 14: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 14, 15, vr, qp_load); \ + break; \ + case 15: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 15, \ + vr); \ + break; \ + /* Do not load g16-g19 as they are used by kernel */ \ + case 16: \ + case 17: \ + case 18: \ + case 19: \ + break; \ + case 20: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 20, 21, vr, qp_load); \ + break; \ + case 21: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 21, \ + vr); \ + break; \ + case 22: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 22, 23, vr, qp_load); \ + break; \ + case 23: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 23, \ + vr); \ + break; \ + case 24: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 24, 25, vr, qp_load); \ + break; \ + case 25: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 25, \ + vr); \ + break; \ + case 26: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 26, 27, vr, qp_load); \ + break; \ + case 27: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 27, \ + vr); \ + break; \ + case 28: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 28, 29, vr, qp_load); \ + break; \ + case 29: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 29, \ + vr); \ + break; \ + case 30: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 30, 31, vr, qp_load); \ + break; \ + case 31: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 31, \ + vr); \ + break; \ + default: \ + panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +#define NATIVE_RECOVERY_STORE(_addr, _val, _opc, _chan) \ +do { \ + asm volatile ("strd," #_chan " [ %[addr] + %[opc] ], %[val]" \ + : \ + : [addr] "r" ((u64) (_addr)), \ + [opc] "ir" ((u64) (_opc)), \ + [val] "r" ((u64) (_val)) \ + : "memory"); \ +} while (0) + +#define NATIVE_RECOVERY_TAGGED_STORE_ATOMIC(_addr, _val, _tag, _opc, \ + _val_ext, _tag_ext, _opc_ext) \ +({ \ + u64 tmp, tmp_ext; \ + asm ( "{puttagd,2 %[val], %[tag], %[tmp]\n" \ + " puttagd,5 %[val_ext], %[tag_ext], %[tmp_ext]}\n" \ + "{strd,2 [ %[addr] + %[opc] ], %[tmp]\n" \ + " strd,5 [ %[addr] + %[opc_ext] ], %[tmp_ext]}\n" \ + : [tmp] "=&r" (tmp), [tmp_ext] "=&r" (tmp_ext) \ + : [addr] "r" (_addr), \ + [val] "r" ((u64) (_val)), [val_ext] "r" ((u64) (_val_ext)), \ + [tag] "r" ((u32) (_tag)), [tag_ext] "r" ((u32) (_tag_ext)), \ + [opc] "ir" (_opc), [opc_ext] "ir" (_opc_ext) \ + : "memory"); \ +}) + +#define NATIVE_RECOVERY_TAGGED_STORE(_addr, _val, _tag, _opc, \ + _val_ext, _tag_ext, _opc_ext, _chan, _quadro) \ +({ \ + u64 tmp, tmp_ext; \ + u32 __chan = (u32) (_chan); \ + u32 __chan_q = (_quadro) ? __chan : 4; /* Not existent channel - skip */ \ + asm ( "{nop 1\n" \ + " puttagd,2 %[val], %[tag], %[tmp]\n" \ + " puttagd,5,sm %[val_ext], %[tag_ext], %[tmp_ext]\n" \ + " cmpesb,0 %[chan], 1, %%pred20\n" \ + " cmpesb,3 %[chan], 3, %%pred21\n" \ + " cmpesb,1 %[chan_q], 1, %%pred22\n" \ + " cmpesb,4 %[chan_q], 3, %%pred23}\n" \ + "{strd,2 [ %[addr] + %[opc] ], %[tmp] ? %%pred20\n" \ + " strd,5 [ %[addr] + %[opc] ], %[tmp] ? %%pred21}\n" \ + "{strd,2 [ %[addr] + %[opc_ext] ], %[tmp_ext] ? %%pred22\n" \ + " strd,5 [ %[addr] + %[opc_ext] ], %[tmp_ext] ? %%pred23}\n" \ + : [tmp] "=&r" (tmp), [tmp_ext] "=&r" (tmp_ext) \ + : [addr] "r" (_addr), \ + [val] "r" ((u64) (_val)), [val_ext] "r" ((u64) (_val_ext)), \ + [tag] "r" ((u32) (_tag)), [tag_ext] "r" ((u32) (_tag_ext)), \ + [opc] "ir" (_opc), [opc_ext] "ir" (_opc_ext), \ + [chan] "ir" ((u32) (__chan)), [chan_q] "ir" ((u32) (__chan_q)) \ + : "memory", "pred20", "pred21", "pred22", "pred23"); \ +}) + + +/* + * #58441 - work with taged value (compiler problem) + * store tag and store taged word must be in common asm code + * (cloused asm code) + */ +#define NATIVE_STORE_VALUE_WITH_TAG(addr, val, tag) \ + NATIVE_STORE_TAGGED_WORD(addr, val, tag, \ + TAGGED_MEM_STORE_REC_OPC, 2) + +#define NATIVE_STORE_TAGGED_WORD(addr, val, tag, opc, chan_letter) \ +do { \ + u64 __tmp_reg = val; \ + E2K_BUILD_BUG_ON(sizeof(val) != 8); \ + asm volatile ("{puttagd \t%0, %2, %0\n}" \ + " strd," #chan_letter " \t[%1 + %3], %0\n" \ + : "+r" (__tmp_reg) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "ri" ((__e2k_u32_t) (tag)), \ + "ri" ((opc)) \ + : "memory"); \ +} while (0) + +#define NATIVE_STORE_TAGGED_WORD_CH(addr, val, tag, opc, trap_cellar_chan) \ +do { \ + switch (trap_cellar_chan) { \ + case 1: \ + NATIVE_STORE_TAGGED_WORD(addr, val, tag, opc, 2); \ + break; \ + case 3: \ + NATIVE_STORE_TAGGED_WORD(addr, val, tag, opc, 5); \ + break; \ + } \ +} while (0) + + +#define __NATIVE_STORE_TAGGED_QWORD(addr, val_lo, val_hi, \ + tag_lo, tag_hi, offset) \ +({ \ + u64 reg1, reg2; \ + E2K_BUILD_BUG_ON(sizeof(val_hi) != 8); \ + E2K_BUILD_BUG_ON(sizeof(val_lo) != 8); \ + asm volatile ( "{puttagd %3, %5, %0\n" \ + " puttagd %4, %6, %1}\n" \ + "{strd,2 [%2 + %7], %0\n" \ + " strd,5 [%2 + %8], %1}\n" \ + : "=&r" (reg1), "=&r" (reg2) \ + : "r" (addr), \ + "r" (val_lo), \ + "r" (val_hi), \ + "ri" (tag_lo), \ + "ri" (tag_hi), \ + "i" (TAGGED_MEM_STORE_REC_OPC), \ + "ri" (TAGGED_MEM_STORE_REC_OPC | offset) \ + : "memory"); \ +}) +#define NATIVE_STORE_TAGGED_QWORD(addr, val_lo, val_hi, tag_lo, tag_hi) \ + __NATIVE_STORE_TAGGED_QWORD((addr), (val_lo), (val_hi), \ + (tag_lo), (tag_hi), 8UL) + +#define E2K_STORE_NULLPTR_QWORD(addr) \ + _E2K_STORE_NULLPTR_QWORD(addr, TAGGED_MEM_STORE_REC_OPC) + + +#define _E2K_STORE_NULLPTR_QWORD(addr, opc) \ +({ \ + e2k_addr_t addr_hi = (e2k_addr_t)addr + 8; \ + unsigned long np = 0UL; \ + asm volatile ("{puttagd \t%0, %3, %0}\n" \ + " {strd, 2 \t[%1 + %4], %0\n" \ + " strd, 5 \t[%2 + %4], %0}\n" \ + : "+r" (np) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((__e2k_ptr_t) (addr_hi)), \ + "i" (E2K_NULLPTR_ETAG), \ + "i" ( (opc)) \ + : "memory" \ + ); \ +}) + +#define NATIVE_MOVE_TAGGED_QWORD(_from_lo, _from_hi, _to_lo, _to_hi) \ +({ \ + u64 __val_lo, __val_hi; \ + asm ("{nop 4\n" \ + " ldrd,2 [ %[from_lo] + %[opc_ld] ], %[val_lo]\n" \ + " ldrd,5 [ %[from_hi] + %[opc_ld] ], %[val_hi]}\n" \ + "{strd,2 [ %[to_lo] + %[opc_st] ], %[val_lo]\n" \ + " strd,5 [ %[to_hi] + %[opc_st] ], %[val_hi]}\n" \ + : [val_lo] "=&r" (__val_lo), [val_hi] "=&r" (__val_hi) \ + : [from_lo] "r" (_from_lo), [from_hi] "r" (_from_hi), \ + [to_lo] "r" (_to_lo), [to_hi] "r" (_to_hi), \ + [opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define NATIVE_MOVE_TAGGED_DWORD(_from, _to) \ +do { \ + long _tmp; \ + asm ( "ldrd [ %[from] + %[opc] ], %[tmp]\n" \ + "strd [ %[to] + %[opc_st] ], %[tmp]\n" \ + : [tmp] "=&r" (_tmp) \ + : [from] "r" (_from), [to] "r" (_to), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +} while (false) + +#define NATIVE_MOVE_TAGGED_WORD(_from, _to) \ +do { \ + long _tmp; \ + asm ( "ldrd [ %[from] + %[opc] ], %[tmp]\n" \ + "strd [ %[to] + %[opc_st] ], %[tmp]\n" \ + : [tmp] "=&r" (_tmp) \ + : [from] "r" (_from), [to] "r" (_to), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC_W), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC_W) \ + : "memory"); \ +} while (false) + +/* + * Repeat memory load from cellar. + * chan: 0, 1, 2 or 3 - channel for operation + * quadro: set if this is a non-atomic quadro operation to move 16 bytes + * vr: set to 0 if we want to preserve the lower 4-byte word + * (same as vr in cellar) + * not_single_byte: set to "false" if we want to write only 1 byte at target + * address (i.e. do not clear the whole register we are + * writing into). This makes sense when we manually split + * the faulting load into a series of 1-byte loads - only + * the first one should clear the register then. + */ +#define NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(_from, _to, _to_hi, _vr, _opc, \ + _chan, _quadro, _not_single_byte) \ +do { \ + u64 prev, val, val_8; \ + u32 __chan = (u32) (_chan); \ + u32 __quadro = (u32) (_quadro); \ + u32 __chan_q = (__quadro) ? __chan : 4 /* Not existent channel - skip */; \ + u64 __opc = (_opc); \ + asm ( "{cmpesb %[quadro], 0, %%pred18\n" \ + " cmpesb %[vr], 0, %%pred19\n" \ + " cmpesb %[not_single_byte], 0, %%pred28}\n" \ + "{cmpesb,0 %[chan], 0, %%pred20\n" \ + " cmpesb,1 %[chan], 1, %%pred21\n" \ + " cmpesb,3 %[chan], 2, %%pred22\n" \ + " cmpesb,4 %[chan], 3, %%pred23}\n" \ + "{cmpesb,0 %[chan_q], 0, %%pred24\n" \ + " cmpesb,1 %[chan_q], 1, %%pred25\n" \ + " cmpesb,3 %[chan_q], 2, %%pred26\n" \ + " cmpesb,4 %[chan_q], 3, %%pred27\n" \ + " ldrd [ %[to] + %[opc_ld] ], %[prev] ? %%pred19}\n" \ + "{ldrd,0 [ %[from] + %[opc] ], %[val] ? %%pred20\n" \ + " ldrd,2 [ %[from] + %[opc] ], %[val] ? %%pred21\n" \ + " ldrd,3 [ %[from] + %[opc] ], %[val] ? %%pred22\n" \ + " ldrd,5 [ %[from] + %[opc] ], %[val] ? %%pred23}\n" \ + "{nop 3\n" \ + " ldrd,0 [ %[from] + %[opc_8] ], %[val_8] ? %%pred24\n" \ + " ldrd,2 [ %[from] + %[opc_8] ], %[val_8] ? %%pred25\n" \ + " ldrd,3 [ %[from] + %[opc_8] ], %[val_8] ? %%pred26\n" \ + " ldrd,5 [ %[from] + %[opc_8] ], %[val_8] ? %%pred27}\n" \ + "{movts,1 %[prev], %[val] ? %%pred19}\n" \ + "{strd,2 [ %[to] + %[opc_st_byte] ], %[val] ? %%pred28}\n" \ + "{strd,2 [ %[to] + %[opc_st] ], %[val] ? ~%%pred28\n" \ + " strd,5 [ %[to_hi] + %[opc_st] ], %[val_8] ? ~ %%pred18}\n" \ + : [prev] "=&r" (prev), [val] "=&r" (val), \ + [val_8] "=&r" (val_8) \ + : [from] "r" (_from), [to] "r" (_to), [to_hi] "r" (_to_hi), \ + [vr] "ir" ((u32) (_vr)), [quadro] "r" (__quadro), \ + [chan] "ir" (__chan), [chan_q] "ir" (__chan_q), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [not_single_byte] "ir" (_not_single_byte), \ + [opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st_byte] "i" (MEM_STORE_REC_OPC_B), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory", "pred18", "pred19", "pred20", "pred21", \ + "pred22", "pred23", "pred24", "pred25", "pred26", \ + "pred27", "pred28"); \ +} while (false) + +/* + * As NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR but repeats from cellar + * an aligned atomic 16-bytes load. + */ +#define NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_VR_ATOMIC(_from, _to, _to_hi, \ + _vr, _opc) \ +do { \ + u64 prev, val, val_8; \ + u64 __opc = (_opc); \ + asm ( "{cmpesb %[vr], 0, %%pred19}\n" \ + "{ldrd,0 [ %[from] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[from] + %[opc_8] ], %[val_8]}\n" \ + "{nop 4\n" \ + " ldrd [ %[to] + %[opc_ld] ], %[prev] ? %%pred19}\n" \ + "{movts,1 %[prev], %[val] ? %%pred19}\n" \ + "{strd,2 [ %[to] + %[opc_st] ], %[val]\n" \ + " strd,5 [ %[to_hi] + %[opc_st] ], %[val_8]}\n" \ + : [prev] "=&r" (prev), [val] "=&r" (val), \ + [val_8] "=&r" (val_8) \ + : [from] "r" (_from), [to] "r" (_to), [to_hi] "r" (_to_hi), \ + [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory", "pred19"); \ +} while (false) + +#define E2K_TAGGED_MEMMOVE_8(__dst, __src) \ +({ \ + u64 __tmp1; \ + asm ( \ + "{\n" \ + "nop 4\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_16(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2; \ + asm ( \ + "{\n" \ + "nop 4\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_24(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "nop 3\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_32(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "nop 3\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_40(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "nop 2\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_48(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "nop 2\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_40] ], %[tmp6]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_40] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_40] "i" (TAGGED_MEM_STORE_REC_OPC | 40) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_56(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6, __tmp7; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldrd,2 [ %[src] + %[ld_opc_48] ], %[tmp7]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_48] ], %[tmp7]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6), \ + [tmp7] "=&r" (__tmp7) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_40] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [ld_opc_48] "i" (TAGGED_MEM_LOAD_REC_OPC | 48), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_40] "i" (TAGGED_MEM_STORE_REC_OPC | 40), \ + [st_opc_48] "i" (TAGGED_MEM_STORE_REC_OPC | 48) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_64(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6, __tmp7, __tmp8; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldrd,2 [ %[src] + %[ld_opc_48] ], %[tmp7]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_56] ], %[tmp8]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_48] ], %[tmp7]\n" \ + "strd,5 [ %[dst] + %[st_opc_56] ], %[tmp8]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6), \ + [tmp7] "=&r" (__tmp7), [tmp8] "=&r" (__tmp8) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_40] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [ld_opc_48] "i" (TAGGED_MEM_LOAD_REC_OPC | 48), \ + [ld_opc_56] "i" (TAGGED_MEM_LOAD_REC_OPC | 56), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_40] "i" (TAGGED_MEM_STORE_REC_OPC | 40), \ + [st_opc_48] "i" (TAGGED_MEM_STORE_REC_OPC | 48), \ + [st_opc_56] "i" (TAGGED_MEM_STORE_REC_OPC | 56) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_128_RF_V2(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6, __tmp7, __tmp8; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r1] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r2] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r3] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r4] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r5] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r6] ], %[tmp7]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r7] ], %[tmp8]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_r1] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r2] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_r3] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r4] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_r5] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r6] ], %[tmp7]\n" \ + "strd,5 [ %[dst] + %[st_opc_r7] ], %[tmp8]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6), \ + [tmp7] "=&r" (__tmp7), [tmp8] "=&r" (__tmp8) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_r0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_r1] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_r2] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_r3] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [ld_opc_r4] "i" (TAGGED_MEM_LOAD_REC_OPC | 64), \ + [ld_opc_r5] "i" (TAGGED_MEM_LOAD_REC_OPC | 72), \ + [ld_opc_r6] "i" (TAGGED_MEM_LOAD_REC_OPC | 96), \ + [ld_opc_r7] "i" (TAGGED_MEM_LOAD_REC_OPC | 104), \ + [st_opc_r0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_r1] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_r2] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_r3] "i" (TAGGED_MEM_STORE_REC_OPC | 40), \ + [st_opc_r4] "i" (TAGGED_MEM_STORE_REC_OPC | 64), \ + [st_opc_r5] "i" (TAGGED_MEM_STORE_REC_OPC | 72), \ + [st_opc_r6] "i" (TAGGED_MEM_STORE_REC_OPC | 96), \ + [st_opc_r7] "i" (TAGGED_MEM_STORE_REC_OPC | 104) \ + : "memory"); \ +}) + +/* Store quadro pointer "ptr" at address "addr" */ +#define E2K_SET_TAGS_AND_STORE_QUADRO(ptr, addr) \ +do { \ + asm ("{\n" \ + "puttagd %0, 15, %%db[0]\n" \ + "puttagd %1, 12, %%db[1]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %2 + %3 ], %%db[0]\n" \ + "strd,5 [ %2 + %4 ], %%db[1]\n" \ + "}\n" \ + : \ + : "r" (AW(ptr).lo), "r" (AW(ptr).hi), \ + "r" ((unsigned long) addr), \ + "i" (TAGGED_MEM_STORE_REC_OPC), \ + "i" (TAGGED_MEM_STORE_REC_OPC | 8UL) \ + : "%b[0]", "%b[1]"); \ +} while (0) + + +/* + * Read tags at @src and pack them at @dst. + */ +#define NATIVE_EXTRACT_TAGS_32(dst, src) \ +do { \ + register u64 __opc0 = TAGGED_MEM_LOAD_REC_OPC; \ + register u64 __opc8 = TAGGED_MEM_LOAD_REC_OPC | 8; \ + register u64 __opc16 = TAGGED_MEM_LOAD_REC_OPC | 16; \ + register u64 __opc24 = TAGGED_MEM_LOAD_REC_OPC | 24; \ + register u64 __tmp0, __tmp8, __tmp16, __tmp24; \ + \ + asm volatile ( "{\n" \ + "nop 4\n" \ + "ldrd,0 [%5 + %6], %0\n" \ + "ldrd,2 [%5 + %7], %1\n" \ + "ldrd,3 [%5 + %8], %2\n" \ + "ldrd,5 [%5 + %9], %3\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %1, %1\n" \ + "gettagd,5 %3, %3\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %0, %0\n" \ + "gettagd,5 %2, %2\n" \ + "shls,0 %1, 4, %1\n" \ + "shls,3 %3, 4, %3\n" \ + "}\n" \ + "{\n" \ + "ors,0 %0, %1, %0\n" \ + "ors,3 %2, %3, %2\n" \ + "}\n" \ + "{\n" \ + "stb,2 [ %4 + 0 ], %0\n" \ + "stb,5 [ %4 + 1 ], %2\n" \ + "}\n" \ + : "=&r" (__tmp0), "=&r" (__tmp8), \ + "=&r" (__tmp16), "=&r" (__tmp24) \ + : "r" (dst), "r" (src), \ + "r" (__opc0), "r" (__opc8), \ + "r" (__opc16), "r" (__opc24)); \ +} while (0) + +#define NATIVE_LOAD_TAGD(addr) \ +({ \ + u32 __dtag; \ + asm ("ldrd [%1 + %2], %0\n" \ + "gettagd %0, %0\n" \ + : "=r"(__dtag) \ + : "m" (*(unsigned long long *) (addr)), \ + "i"(TAGGED_MEM_LOAD_REC_OPC)); \ + __dtag; \ +}) + +#define NATIVE_LOAD_VAL_AND_TAGD(addr, val, tag) \ +do { \ + BUILD_BUG_ON(sizeof(tag) > 4); \ + asm ("ldrd [%2 + %3], %1\n" \ + "gettagd %1, %0\n" \ + "puttagd %1, 0, %1\n" \ + : "=r" (tag), "=r" (val) \ + : "m" (*((unsigned long long *) (addr))), \ + "i" (TAGGED_MEM_LOAD_REC_OPC)); \ +} while (0) + +#define NATIVE_LOAD_VAL_AND_TAGW(addr, val, tag) \ +({ \ + register int __tag; \ + register long __word; \ + asm ("{ldrd [%2 + %3], %1\n}" \ + "{gettagd \t%1, %0\n" \ + " puttagd \t%1, 0, %1}\n" \ + : "=r"(__tag), "=r"(__word) \ + : "m" (*((unsigned long long *) (addr))), \ + "i"(TAGGED_MEM_LOAD_REC_OPC_W)); \ + val = __word; \ + tag = __tag; \ +}) + +#define NATIVE_LOAD_TAGGED_QWORD_AND_TAGS(addr, lo, hi, tag_lo, tag_hi) \ +{ \ + NATIVE_LOAD_VAL_AND_TAGD(addr, lo, tag_lo); \ + NATIVE_LOAD_VAL_AND_TAGD(((__e2k_u64_t *) (addr)) + 1, \ + hi, tag_hi); \ +} + +#define E2K_LOAD_VAL_AND_TAG(addr, val, tag) \ +({ \ + register int __tag; \ + register long __dword; \ + asm ("{ldrd [%2 + %3], %1\n}" \ + "{gettags \t%1, %0\n" \ + " puttags \t%1, 0, %1}\n" \ + : "=r"(__tag), "=r"(__dword) \ + : "m" (*((unsigned long long *) (addr))), \ + "i"(TAGGED_MEM_LOAD_REC_OPC)); \ + val = __dword; \ + tag = __tag; \ +}) + +/** + * Load/stote based data operations + */ +#define E2K_LD_GREG_BASED_B(greg_no, offset, chan_letter) \ +({ \ + register unsigned long res; \ + asm volatile ("ldb," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_LD_GREG_BASED_H(greg_no, offset, chan_letter) \ +({ \ + register unsigned long res; \ + asm volatile ("ldh," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_LD_GREG_BASED_W(greg_no, offset, chan_letter) \ +({ \ + register unsigned long res; \ + asm volatile ("ldw," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_LD_GREG_BASED_D(greg_no, offset, chan_letter) \ +({ \ + register unsigned long long res; \ + asm volatile ("ldd," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_ST_GREG_BASED_B(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("stb," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u8_t) (value))); \ +}) +#define E2K_ST_GREG_BASED_H(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("sth," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u16_t) (value))); \ +}) +#define E2K_ST_GREG_BASED_W(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("stw," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u32_t) (value))); \ +}) +#define E2K_ST_GREG_BASED_D(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("std," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u64_t) (value))); \ +}) + +#define E2K_LOAD_GREG_BASED_B(greg_no, offset) \ + E2K_LD_GREG_BASED_B(greg_no, offset, 0) +#define E2K_LOAD_GREG_BASED_H(greg_no, offset) \ + E2K_LD_GREG_BASED_H(greg_no, offset, 0) +#define E2K_LOAD_GREG_BASED_W(greg_no, offset) \ + E2K_LD_GREG_BASED_W(greg_no, offset, 0) +#define E2K_LOAD_GREG_BASED_D(greg_no, offset) \ + E2K_LD_GREG_BASED_D(greg_no, offset, 0) + +#define E2K_STORE_GREG_BASED_B(greg_no, offset, value) \ + E2K_ST_GREG_BASED_B(greg_no, offset, value, 2) +#define E2K_STORE_GREG_BASED_H(greg_no, offset, value) \ + E2K_ST_GREG_BASED_H(greg_no, offset, value, 2) +#define E2K_STORE_GREG_BASED_W(greg_no, offset, value) \ + E2K_ST_GREG_BASED_W(greg_no, offset, value, 2) +#define E2K_STORE_GREG_BASED_D(greg_no, offset, value) \ + E2K_ST_GREG_BASED_D(greg_no, offset, value, 2) + +/* + * Bytes swapping + */ + +#define E2K_SWAPB_16(addr) E2K_READ_MAS_H(addr, MAS_BIGENDIAN) +#define E2K_SWAPB_32(addr) E2K_READ_MAS_W(addr, MAS_BIGENDIAN) +#define E2K_SWAPB_64(addr) E2K_READ_MAS_D(addr, MAS_BIGENDIAN) + +#define _E2K_GEN_LABEL(label_name, label_no) #label_name #label_no + +#define _E2K_ASM_LABEL_L(label_name, label_no) \ + asm volatile ("\n" _E2K_GEN_LABEL(label_name, label_no) ":"); + +#define _E2K_ASM_LABEL_R(label_name, label_no) \ + _E2K_GEN_LABEL(label_name, label_no) + + +/* + * Atomic read hardware stacks (procedure and chain) registers + * in coordinated state. + * Any interrupt inside registers reading sequence can update + * some fields of registers and them can be at miscoordinated state + * So use "wait lock" and "wait unlock" load/store to avoid interrupts + * Argument 'lock_addr' is used only to provide lock/unlock, so it can be + * any unused local variable of caller + */ +#define ATOMIC_READ_P_STACK_REGS(psp_lo, psp_hi, pshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%3] 7, %0\n" \ + \ + "\t rrd \t %%psp.lo, %0\n" \ + "\t rrd \t %%psp.hi, %1\n" \ + "\t rrd \t %%pshtp, %2\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%3] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_lo), \ + "=&r" (psp_hi), \ + "=&r" (pshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_PC_STACK_REGS(pcsp_lo, pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%3] 7, %0\n" \ + \ + "\t rrd \t %%pcsp.lo, %0\n" \ + "\t rrd \t %%pcsp.hi, %1\n" \ + "\t rrd \t %%pcshtp, %2\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%3] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (pcsp_lo), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_PS_SIZES(psp_hi, pshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%2] 7, %0\n" \ + \ + "\t rrd \t %%psp.hi, %0\n" \ + "\t rrd \t %%pshtp, %1\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%2] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_hi), \ + "=&r" (pshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_PCS_SIZES(pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%2] 7, %0\n" \ + \ + "\t rrd \t %%pcsp.hi, %0\n" \ + "\t rrs \t %%pcshtp, %1\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%2] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_STACKS_SIZES(psp_hi, pshtp, pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%4] 7, %0\n" \ + \ + "\t rrd \t %%psp.hi, %0\n" \ + "\t rrd \t %%pshtp, %1\n" \ + "\t rrd \t %%pcsp.hi, %2\n" \ + "\t rrs \t %%pcshtp, %3\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%4] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_hi), \ + "=&r" (pshtp), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_STACKS_REGS(psp_lo, psp_hi, pshtp, \ + pcsp_lo, pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%6] 7, %0\n" \ + \ + "\t rrd \t %%psp.lo, %0\n" \ + "\t rrd \t %%psp.hi, %1\n" \ + "\t rrd \t %%pshtp, %2\n" \ + "\t rrd \t %%pcsp.lo, %3\n" \ + "\t rrd \t %%pcsp.hi, %4\n" \ + "\t rrs \t %%pcshtp, %5\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%6] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_lo), \ + "=&r" (psp_hi), \ + "=&r" (pshtp), \ + "=&r" (pcsp_lo), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +/* + * Atomic read all stacks hardware (procedure and chain) and data stack + * registers in coordinated state. + */ +#define ATOMIC_READ_ALL_STACKS_REGS(psp_lo, psp_hi, pshtp, \ + pcsp_lo, pcsp_hi, pcshtp, \ + usd_lo, usd_hi, cr1_hi) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%9] 7, %0\n" \ + \ + "\t rrd \t %%psp.lo, %0\n" \ + "\t rrd \t %%psp.hi, %1\n" \ + "\t rrd \t %%pshtp, %2\n" \ + "\t rrd \t %%pcsp.lo, %3\n" \ + "\t rrd \t %%pcsp.hi, %4\n" \ + "\t rrs \t %%pcshtp, %5\n" \ + "\t rrd \t %%usd.lo, %6\n" \ + "\t rrd \t %%usd.hi, %7\n" \ + "\t rrd \t %%cr1.hi, %8\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%9] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_lo), \ + "=&r" (psp_hi), \ + "=&r" (pshtp), \ + "=&r" (pcsp_lo), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp), \ + "=&r" (usd_lo), \ + "=&r" (usd_hi), \ + "=&r" (cr1_hi) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) + +#define NATIVE_ASM_FLUSH_DCACHE_LINE(addr) \ +do { \ + asm volatile("{wait st_c=1}\n" \ + "{std,2 [ %0 + 0 ] %2, %1}\n" \ + "{wait fl_c=1}\n" \ + : \ + : "r" (addr), "r" (0), "i" (MAS_DCACHE_LINE_FLUSH));\ +} while (0) + +#define NATIVE_CLEAN_LD_ACQ_ADDRESS(_reg1, _reg2, _hwbug_address) \ +({ \ + asm volatile ( \ + "{\n" \ + "ldb,0,sm %[addr], 0 * 4096 + 0 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 0 * 4096 + 4 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + "{\n" \ + "ldb,0,sm %[addr], 8 * 4096 + 1 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 8 * 4096 + 5 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + "{\n" \ + "ldb,0,sm %[addr], 16 * 4096 + 2 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 16 * 4096 + 6 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + "{\n" \ + "ldb,0,sm %[addr], 24 * 4096 + 3 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 24 * 4096 + 7 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + : [reg1] "=&r" (_reg1), [reg2] "=&r" (_reg2) \ + : [addr] "r" (__hwbug_address), \ + [mas] "i" (MAS_BYPASS_ALL_CACHES | \ + MAS_MODE_LOAD_OP_LOCK_CHECK)); \ +}) + + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) && defined(CONFIG_CPU_ES2) + +# define HWBUG_ATOMIC_BEGIN(addr) \ + unsigned long __hwbug_atomic_flags = 0; \ + bool __hwbug_atomic_possible = cpu_has(CPU_HWBUG_ATOMIC); \ + if (__hwbug_atomic_possible) { \ + __hwbug_atomic_flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_SET_UPSR_IRQ_BARRIER( \ + __hwbug_atomic_flags & ~(UPSR_IE | UPSR_NMIE)); \ + NATIVE_FLUSH_DCACHE_LINE_UNPRIV((unsigned long) (addr)); \ + } +# define HWBUG_ATOMIC_END() \ + if (__hwbug_atomic_possible) \ + NATIVE_SET_UPSR_IRQ_BARRIER(__hwbug_atomic_flags) +#else +# define HWBUG_ATOMIC_BEGIN(addr) +# define HWBUG_ATOMIC_END() +#endif + +/* + * On E2C+ atomic operations have relaxed memory ordering: + * _st_unlock can be reordered with subsequent loads and stores. + * Issue an explicit memory barrier if atomic operation returns a value. + * + * On E4C with multiple nodes and E2C+ atomic operations have fully + * relaxed memory ordering because of a hardware bug, must add "wait ma_c". + */ +#if !defined CONFIG_E2K_MACHINE && defined CONFIG_E2K_MINVER_V2 +# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC_LOCK_MB /* E2K_WAIT_ST_C_SAS() */ \ + ".word 0x00008001\n" \ + ".word 0x30000084\n" +#elif defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU +# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC_LOCK_MB /* E2K_WAIT_ST_C_SAS() */ \ + ".word 0x00008001\n" \ + ".word 0x30000084\n" +#elif (defined CONFIG_E2K_E2S || defined CONFIG_E2K_MINVER_V3) && defined CONFIG_NUMA +# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC_LOCK_MB +#else +# define MB_BEFORE_ATOMIC +# define MB_AFTER_ATOMIC +# define MB_AFTER_ATOMIC_LOCK_MB +#endif + +#define MB_BEFORE_ATOMIC_LOCK_MB + +#define MB_BEFORE_ATOMIC_STRONG_MB MB_BEFORE_ATOMIC +#define MB_AFTER_ATOMIC_STRONG_MB MB_AFTER_ATOMIC + +#define MB_BEFORE_ATOMIC_RELEASE_MB MB_BEFORE_ATOMIC +#define MB_AFTER_ATOMIC_RELEASE_MB + +#define MB_BEFORE_ATOMIC_ACQUIRE_MB +#define MB_AFTER_ATOMIC_ACQUIRE_MB MB_AFTER_ATOMIC + +#define MB_BEFORE_ATOMIC_RELAXED_MB +#define MB_AFTER_ATOMIC_RELAXED_MB + +#ifdef CONFIG_DEBUG_LCC_VOLATILE_ATOMIC +# define NOT_VOLATILE volatile +#else +# define NOT_VOLATILE +#endif + +#if CONFIG_CPU_ISET >= 5 +# define ACQUIRE_MB_ATOMIC_CHANNEL "5" +# define RELAXED_MB_ATOMIC_CHANNEL "5" +#else /* CONFIG_CPU_ISET < 5 */ +# define ACQUIRE_MB_ATOMIC_CHANNEL "2" +# define RELAXED_MB_ATOMIC_CHANNEL "2" +#endif /* CONFIG_CPU_ISET >= 5 */ +#define RELEASE_MB_ATOMIC_CHANNEL "2" +#define STRONG_MB_ATOMIC_CHANNEL "2" +#define LOCK_MB_ATOMIC_CHANNEL ACQUIRE_MB_ATOMIC_CHANNEL + +#if CONFIG_CPU_ISET >= 6 +# define LOCK_MB_ATOMIC_MAS "0x2" +# define ACQUIRE_MB_ATOMIC_MAS "0x2" +# define RELEASE_MB_ATOMIC_MAS "0x73" +# define STRONG_MB_ATOMIC_MAS "0x73" +# define RELAXED_MB_ATOMIC_MAS "0x2" +#else +# define LOCK_MB_ATOMIC_MAS "0x2" +# define ACQUIRE_MB_ATOMIC_MAS "0x2" +# define RELEASE_MB_ATOMIC_MAS "0x2" +# define STRONG_MB_ATOMIC_MAS "0x2" +# define RELAXED_MB_ATOMIC_MAS "0x2" +#endif + +#define CLOBBERS_LOCK_MB : "memory" +#define CLOBBERS_ACQUIRE_MB : "memory" +#define CLOBBERS_RELEASE_MB : "memory" +#define CLOBBERS_STRONG_MB : "memory" +#define CLOBBERS_RELAXED_MB + +/* + * mem_model - one of the following: + * LOCK_MB + * ACQUIRE_MB + * RELEASE_MB + * STRONG_MB + * RELAXED_MB + */ +#define NATIVE_ATOMIC_OP(__val, __addr, __rval, \ + size_letter, op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld" #size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\n" op " %[rval], %[val], %[rval]" \ + "\n}" \ + "\n{"\ + "\nst" #size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[rval], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_FETCH_OP(__val, __addr, __rval, __tmp, \ + size_letter, op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld" #size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\n" op " %[rval], %[val], %[tmp]" \ + "\n}" \ + "\n{"\ + "\nst" #size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[tmp], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [tmp] "=&r" (__tmp), [addr] "+m" (*(__addr)), \ + [rval] "=&r" (__rval) \ + : [val] "ir" (__val) \ + CLOBBERS_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC32_ADD_IF_NOT_NEGATIVE(__val, __addr, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\ncmplsb %[rval], 0, %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\nadds %[rval], %[val], %[rval] ? ~ %%pred2" \ + "\n}" \ + "\n{"\ + "\nstw," mem_model##_ATOMIC_CHANNEL " %[addr], %[rval], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC64_ADD_IF_NOT_NEGATIVE(__val, __addr, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{" \ + "\nnop 4" \ + "\nldd,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1" \ + "\ncmpldb %[rval], 0, %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\naddd %[rval], %[val], %[rval] ? ~ %%pred2" \ + "\n}" \ + "\n{"\ + "\nstd," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[rval], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +/* Atomically add to 16 low bits and return the new 32 bits value */ +#define NATIVE_ATOMIC16_ADD_RETURN32_LOCK(val, addr, rval, tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0\t0x0, [%3] 0x7, %0" \ + "\n}" \ + "\n{"\ + "\nadds %0, %2, %1" \ + "\nands %0, 0xffff0000, %0" \ + "\n}" \ + "\nands %1, 0x0000ffff, %1" \ + "\nadds %0, %1, %0" \ + "\n{"\ + "\nstw," ACQUIRE_MB_ATOMIC_CHANNEL " 0x0, [%3] 0x2, %0" \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : "=&r" (rval), "=&r" (tmp) \ + : "i" (val), "r" ((__e2k_ptr_t) (addr)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* Atomically add two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define NATIVE_ATOMIC32_PAIR_ADD_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3) \ +({ \ + HWBUG_ATOMIC_BEGIN(addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{"\ + "\n\tnop 4"\ + "\n\tldd,0\t0x0, [%6] 0x7, %0" \ + "\n\t}"\ + "\n\t{"\ + "\n\tsard %0, 32, %1" \ + "\n\tadds %4, 0, %2" \ + "\n\tadds %5, 0, %3" \ + "\n\t}" \ + "\n\t{"\ + "\n\tadds %1, %3, %1" \ + "\n\tadds %0, %2, %0" \ + "\n\t}" \ + "\n\t{"\ + "\n\tsxt 6, %1, %1" \ + "\n\tsxt 6, %0, %0" \ + "\n\t}" \ + "\n\tshld %1, 32, %1" \ + "\n\tord %1, %0, %0" \ + "\n\t{"\ + "\n\tstd," ACQUIRE_MB_ATOMIC_CHANNEL "0x0, [%6] 0x2, %0" \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : "=&r" (rval), \ + "=&r" (tmp1), \ + "=&r" (tmp2), \ + "=&r" (tmp3) \ + : "ri" (val_lo), \ + "ri" (val_hi), \ + "r" ((__e2k_ptr_t) (addr)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* Atomically sub two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define NATIVE_ATOMIC32_PAIR_SUB_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3) \ +({ \ + HWBUG_ATOMIC_BEGIN(addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{"\ + "\n\tnop 4"\ + "\n\tldd,0\t0x0, [%6] 0x7, %0" \ + "\n\t}"\ + "\n\t{"\ + "\n\tsard %0, 32, %1" \ + "\n\tadds %4, 0, %2" \ + "\n\tadds %5, 0, %3" \ + "\n\t}" \ + "\n\t{"\ + "\n\tsubs %1, %3, %1" \ + "\n\tsubs %0, %2, %0" \ + "\n\t}" \ + "\n\t{"\ + "\n\tsxt 6, %1, %1" \ + "\n\tsxt 6, %0, %0" \ + "\n\t}" \ + "\n\tshld %1, 32, %1" \ + "\n\tord %1, %0, %0" \ + "\n\t{"\ + "\n\tstd," ACQUIRE_MB_ATOMIC_CHANNEL "0x0, [%6] 0x2, %0" \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : "=&r" (rval), \ + "=&r" (tmp1), \ + "=&r" (tmp2), \ + "=&r" (tmp3) \ + : "ri" (val_lo), \ + "ri" (val_hi), \ + "r" ((__e2k_ptr_t) (addr)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * C equivalent: + * + * boot_spinlock_t oldval, newval; + * oldval.lock = ACCESS_ONCE(lock->lock); + * if (oldval.head == oldval.tail) { + * newval.lock = oldval.lock + (1 << BOOT_SPINLOCK_TAIL_SHIFT); + * if (cmpxchg(&lock->lock, oldval.lock, newval.lock) == + * oldval.lock) + * return 1; + * } + * return 0; + */ +#define NATIVE_ATOMIC_TICKET_TRYLOCK(spinlock, tail_shift, \ + __val, __head, __tail, __rval) \ +do { \ + HWBUG_ATOMIC_BEGIN(spinlock); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %[addr], %[val], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nshrs,0 %[val], 0x10, %[tail]" \ + "\ngetfs,1 %[val], 0x400, %[head]" \ + "\n}" \ + "\n{" \ + "\nnop" \ + "\ncmpesb,0 %[tail], %[head], %%pred2" \ + "\nadds 0, 0, %[rval]" \ + "\n}" \ + "\n{" \ + "\nnop 3" /* bug 92891 - optimize for performance */ \ + "\nadds,0 0, 1, %[rval] ? %%pred2" \ + "\nadds,2 %[val], %[incr], %[val] ? %%pred2" \ + "\n}" \ + "\n{" \ + "\nstw," ACQUIRE_MB_ATOMIC_CHANNEL " %[addr], %[val], mas=" LOCK_MB_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [rval] "=&r" (__rval), [val] "=&r" (__val), \ + [head] "=&r" (__head), [tail] "=&r" (__tail), \ + [addr] "+m" (*(spinlock)) \ + : [incr] "i" (1 << tail_shift) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +/* + * Atomic support of new read/write spinlock mechanism. + * Locking is ordered and later readers cannot outrun former writers. + * Locking order based on coupons (tickets) received while first try to get + * lock, if lock is already taken by other. + * + * read/write spinlocks initial state allowing 2^32 active readers and + * only one active writer. But coupon discipline allows simultaniously + * have only 2^16 registered users of the lock: active + waiters + */ + +/* + * It is test: is read/write lock can be now taken by reader + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_CAN_LOCK_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src) \ +({ \ + asm ( \ + "\n\tldd,0 %[addr], %[src]" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[count]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count) \ + : [addr] "m" (*(__rw_addr)) \ + : "memory", "pred2", "pred3"); \ +}) + +/* + * It is test: is read/write lock can be now taken by writer + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_CAN_LOCK_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src) \ +({ \ + asm ( \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count) \ + : [addr] "m" (*(__rw_addr)) \ + : "memory", "pred2", "pred3"); \ +}) + +/* + * The first try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise reader receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + } + dst_lock.count = count; + dst_lock.head = head; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[tmp]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[tmp], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head] ? %%pred2" \ + "\n\tsubs %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head] ? %%pred2" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp]" \ + "\n\tord %[dst], %[head], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst]" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Only try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + // increment ticket number for next users + dst_lock.ticket = ticket + 1; + dst_lock.count = count - 1; + dst_lock.head = head + 1; + } else { + dst_lock.lock = src_lock.lock; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_TRY_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[tmp]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[tmp], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head] ? %%pred2" \ + "\n\tsubs %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head] ? %%pred2" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\tord %[dst], %[head], %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\taddd %[src], 0, %[dst] ? ~%%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * The slow try to take read spinlock according to erlier received # of coupon + * Successful locking increment # of head, decrement active readers counter + * (negative counter) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise reader should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_reader(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + dst_lock.count = count; + dst_lock.head = head; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_SLOW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[dst], 32, %[count]" \ + "\n\tgetfd %[dst], 0x400, %[head]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[tmp]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[tmp], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head] ? %%pred2" \ + "\n\tsubs %[count], 1, %[count] ? %%pred2" \ + "\n\tandd %[dst], 0xffff0000, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head] ? %%pred2" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\tord %[dst], %[head], %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + : [ticket] "r" (__ticket) \ + : "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Unlocking of read spinlock. + * Need only increment active readers counter (negative counter) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_reader(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_FREE_LOCK_READER(__rw_addr, __dst) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tnop 2" \ + "\n\taddd %[dst], 0x100000000, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [dst] "=&r" (__dst), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * The first try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise writer receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment writerss, + count = count + 1; + } + dst_lock.count = count; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp]" \ + "\n\tord %[dst], %[head], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst]" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Only try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is set to false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers counter, + // increment ticket number for next readers/writers + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + dst_lock.count = count + 1; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_TRY_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\tord %[dst], %[head], %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\taddd %[src], 0, %[dst] ? ~%%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * The slow try to take write spinlock according to erlier received # of coupon + * Successful locking increment active writers counter + * (positive counter - can be only one active writer, so set counter to 1) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise writer should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_writer(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers, + count = count + 1; + dst_lock.count = count; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_SLOW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[dst], 32, %[count]" \ + "\n\tgetfd %[dst], 0x400, %[head]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[count], 1, %[count] ? %%pred2" \ + "\n\tandd %[dst], 0xffffffff, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + : [ticket] "r" (__ticket) \ + : "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Unlocking of write spinlock. + * Need only increment # of queue head and decrement active writers counter + * (positive counter - can be only one writer, so set counter to 0) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_writer(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + dst_lock.head++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_FREE_LOCK_WRITER(__rw_addr, \ + __head, __count, __dst, __tmp); \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[dst], 32, %[count]" \ + "\n\tgetfd %[dst], 0x400, %[head]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head]" \ + "\n\tsubs %[count], 1, %[count]" \ + "\n\tandd %[dst], 0xffff0000, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head]" \ + "\n\tsxt 2, %[count], %[count]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp]" \ + "\n\tord %[dst], %[head], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + + +/* + * Atomic operations with return value and acquire/release semantics + */ + +#define NATIVE_ATOMIC_FETCH_OP_UNLESS(__val, __addr, __unless, __tmp, __rval, \ + size_letter, op, op_pred, add_op, add_op_pred, cmp_op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nnop" \ + "\n" cmp_op " %[rval], %[unless], %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\n" op " %[rval], %[val], %[tmp] ? " op_pred "%%pred2" \ + "\n" add_op " %[rval], 0, %[tmp] ? " add_op_pred "%%pred2" \ + "\n}" \ + "\n{"\ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[tmp], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val), [unless] "ir" (__unless) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_FETCH_XCHG_UNLESS(__val, __addr, __tmp, __rval, \ + size_letter, merge_op, cmp_op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nnop" \ + "\n" cmp_op " %[rval], %[val], %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\n" merge_op " %[rval], %[val], %[tmp], %%pred2" \ + "\n}" \ + "\n{"\ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[tmp], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_XCHG_RETURN(__val, __addr, __rval, \ + size_letter, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n2:" \ + "\n{"\ + "\nnop 5" /* bug 92891 - optimize for performance */ \ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 2b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "r" (__val) \ + CLOBBERS_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define CLOBBERS_PRED2_LOCK_MB : "memory", "pred2" +#define CLOBBERS_PRED2_ACQUIRE_MB : "memory", "pred2" +#define CLOBBERS_PRED2_RELEASE_MB : "memory", "pred2" +#define CLOBBERS_PRED2_STRONG_MB : "memory", "pred2" +#define CLOBBERS_PRED2_RELAXED_MB : "pred2" + + +#define CLOBBERS_PRED2_3_R16_17_LOCK_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_ACQUIRE_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_RELEASE_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_STRONG_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_RELAXED_MB :"pred2", "pred3", "r16", "r17" + + +#define NATIVE_ATOMIC_CMPXCHG_RETURN(__old, __new, __addr, __stored_val, \ + __rval, size_letter, sxt_size, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nsxt\t"#sxt_size", %[rval], %[rval]" \ + "\naddd 0x0, %[new], %[stored_val]" \ + "\n}" \ + "\n{" \ + "\nnop 1" \ + "\ncmpedb %[rval], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 1" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[rval], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[stored_val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [stored_val] "=&r" (__stored_val), \ + [addr] "+m" (*(__addr)) \ + : [new] "ir" (__new), [old] "ir" (__old) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_CMPXCHG_WORD_RETURN(__old, __new, __addr, \ + __stored_val, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\nadds 0x0, %[new], %[stored_val]" \ + "\ncmpesb %[rval], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\nadds 0x0, %[rval], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstw," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[stored_val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [stored_val] "=&r" (__stored_val), \ + [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [new] "ir" (__new), [old] "ir" (__old) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_CMPXCHG_DWORD_RETURN(__old, __new, __addr, \ + __stored_val, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldd,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\naddd 0x0, %[new], %[stored_val]" \ + "\ncmpedb %[rval], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[rval], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstd," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[stored_val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [stored_val] "=&r" (__stored_val), \ + [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [new] "ir" (__new), [old] "ir" (__old) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#ifdef CONFIG_HAVE_CMPXCHG_DOUBLE +/* + * Some problem to use ldq/stq operations + * C language don't use quadro operands + * To avoid some changes of compiler we use fixed register for those operations + * r16 r17 + * + * C equivalent: + if (page->freelist == freelist_old && + page->counters == counters_old) { + page->freelist = freelist_new; + page->counters = counters_new; + */ +#define NATIVE_ATOMIC_CMPXCHG_DWORD_PAIRS(__addr, __old1, __old2, \ + __new1, __new2, __rval, mem_model) \ +do { \ + asm NOT_VOLATILE( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 5" /* bug 92891 - optimize for performance */ \ + "\nldq,0 %[addr], %%r16, mas=0x5" \ + "\naddd 0x1, 0x0, %[rval]" \ + "\n}" \ + "\n{" \ + "\ncmpedb %[old1], %%r16, %%pred2" \ + "\ncmpedb %[old2], %%r17, %%pred3" \ + "\n}" \ + "\n\t{" \ + "\nnop 1"\ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n{" \ + "\naddd 0x0, %[new1], %%r16 ? %%pred2" \ + "\naddd 0x0, %[new2], %%r17 ? %%pred2" \ + "\naddd 0x0, 0x0, %[rval]? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstq," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %%r16, mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [new1] "ir" (__new1), [old1] "ir" (__old1), \ + [new2] "ir" (__new2), [old2] "ir" (__old2) \ + CLOBBERS_PRED2_3_R16_17_##mem_model); \ +} while (0) +#endif /* CONFIG_HAVE_CMPXCHG_DOUBLE */ + +/* Get number of leading zeroes */ +#define E2K_LZCNTS(val) \ +({ \ + register __e2k_u32_t __res; \ + asm ("lzcnts %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +#define E2K_LZCNTD(val) \ +({ \ + register __e2k_u64_t __res; \ + asm ("lzcntd %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +/* Get number of 1's */ +#define E2K_POPCNTS(val) \ +({ \ + register __e2k_u32_t __res; \ + asm ("popcnts %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +#define E2K_POPCNTD(val) \ +({ \ + register __e2k_u64_t __res; \ + asm ("popcntd %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +#define _mem_mod 0x2000 /* watch for modification */ +#define _int 0x1000 /* stop the conveyor untill interrupt */ +#define _mt 0x800 +#define _lal 0x400 /* load-after-load modifier for _ld_c */ +#define _las 0x200 /* load-after-store modifier for _st_c */ +#define _sal 0x100 /* store-after-load modifier for _ld_c */ +#define _sas 0x80 /* store-after-store modifier for _st_c */ +/* "trap=1" requires special handling, see C1_wait_trap() so don't + * define it here, as using it in E2K_WAIT() makes no sense. */ +#define _ma_c 0x20 /* stop until all memory operations complete */ +#define _fl_c 0x10 /* stop until TLB/cache flush operations complete */ +#define _ld_c 0x8 /* stop until all load operations complete */ +#define _st_c 0x4 /* stop until all store operations complete */ +#define _all_e 0x2 /* stop until prev. operations issue all exceptions */ +#define _all_c 0x1 /* stop until prev. operations complete */ + +#if (!defined CONFIG_E2K_MACHINE && defined CONFIG_E2K_MINVER_V2) || \ + defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU || \ + ((defined CONFIG_E2K_E2S || defined CONFIG_E2K_MINVER_V3) && defined CONFIG_NUMA) +# define WORKAROUND_WAIT_HWBUG(num) (((num) & (_st_c | _all_c | _sas)) ? \ + ((num) | _ma_c) : (num)) +#else +# define WORKAROUND_WAIT_HWBUG(num) num +#endif + +#ifndef __ASSEMBLY__ +/* We use a static inline function instead of a macro + * because otherwise the preprocessed files size will + * increase tenfold making compile times much worse. */ +__attribute__((__always_inline__)) +static inline void __E2K_WAIT(int _num) +{ + int unused, num = WORKAROUND_WAIT_HWBUG(_num); + instr_cs1_t cs1 = { + .opc = CS1_OPC_WAIT, + .param = num + }; + + /* Use "asm volatile" around tricky barriers such as _ma_c, _fl_c, etc */ + if (_num & ~(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt)) + asm volatile ("" ::: "memory"); + + /* Header dependency hell, cannot use here: + * cpu_has(CPU_HWBUG_SOFT_WAIT_E8C2) + * so just check straight for E8C2 */ + if (IS_ENABLED(CONFIG_CPU_E8C2) && (num & (_sas | _sal))) + asm ("{nop}" ::: "memory"); + + /* CPU_NO_HWBUG_SOFT_WAIT: use faster workaround for "lal" barriers */ + if (_num == (_ld_c | _lal) || _num == (_ld_c | _lal | _mt)) { +#pragma no_asm_inline + asm NOT_VOLATILE (ALTERNATIVE( + /* Default version - add "nop 5" after and a separate + * wide instruction before the barrier. */ + "{nop}" + ".word 0x00008281\n" + ".word %[cs1]\n", + /* CPU_NO_HWBUG_SOFT_WAIT version */ + ".word 0x00008011\n" + ".word %[cs1]\n" + ".word 0x0\n" + ".word 0x0\n", + %[facility]) + : "=r" (unused) + : [cs1] "i" (cs1.word), + [facility] "i" (CPU_NO_HWBUG_SOFT_WAIT) + : "memory"); + } else { + instr_cs1_t cs1_no_soft_barriers = { + .opc = CS1_OPC_WAIT, + .param = num & ~(_lal | _las | _sal | _sas) + }; + /* #79245 - use .word to encode relaxed barriers */ +#pragma no_asm_inline + asm NOT_VOLATILE (ALTERNATIVE( + /* Default version */ + ".word 0x00008001\n" + ".word %[cs1_no_soft_barriers]\n", + /* CPU_NO_HWBUG_SOFT_WAIT version - use soft barriers */ + ".word 0x00008001\n" + ".word %[cs1]\n", + %[facility]) + : "=r" (unused) + : [cs1] "i" (cs1.word), + [cs1_no_soft_barriers] "i" (cs1_no_soft_barriers.word), + [facility] "i" (CPU_NO_HWBUG_SOFT_WAIT) + : "memory"); + } + + /* Use "asm volatile" around tricky barriers such as _ma_c, _fl_c, etc */ + if (_num & ~(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt)) + asm volatile ("" ::: "memory"); +} +#endif + +#define E2K_WAIT(num) \ +do { \ + __E2K_WAIT(num); \ + if (num & (_st_c | _ld_c | _all_c | _ma_c)) \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +} while (0) + +/* + * IMPORTANT NOTE!!! + * Do not add 'sas' and 'sal' here, as they are modifiers + * for st_c/ld_c which make them _less_ restrictive. + */ +#define E2K_WAIT_OP_ALL_MASK (_ma_c | _fl_c | _ld_c | _st_c | _all_c | _all_e) + +#define E2K_WAIT_MA E2K_WAIT(_ma_c) +#define E2K_WAIT_FLUSH E2K_WAIT(_fl_c) +#define E2K_WAIT_LD E2K_WAIT(_ld_c) +#define E2K_WAIT_ST E2K_WAIT(_st_c) +#define E2K_WAIT_ALL_OP E2K_WAIT(_all_c) +#define E2K_WAIT_ALL_EX E2K_WAIT(_all_e) +#define E2K_WAIT_ALL E2K_WAIT(E2K_WAIT_OP_ALL_MASK) +#define __E2K_WAIT_ALL __E2K_WAIT(E2K_WAIT_OP_ALL_MASK) + +/* Wait for the load to finish before issuing + * next memory loads/stores. */ +#define E2K_RF_WAIT_LOAD(reg) \ +do { \ + int unused; \ + asm NOT_VOLATILE ("{adds %1, 0, %%empty}" \ + : "=r" (unused) \ + : "r" (reg) \ + : "memory"); \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +} while (0) + +#define E2K_FLUSHTS \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("flushts"); \ +} while (0) + +/* + * Hardware stacks flush rules for e2k: + * + * 1) PSP/PCSP/PSHTP/PCSHTP reads wait for the corresponding SPILL/FILL + * to finish (whatever the reason for SPILL/FILL is - "flushc", "flushr", + * register file overflow, etc). "rr" must not be in the same wide + * instruction as "flushc"/"flushr". + * + * 2) CWD reads wait for the chain stack SPILL/FILL to finish. + * + * 3) On e3m SPILL/FILL were asynchronous and "wait all_e=1" should had + * been used between SPILL/FILL operations and memory accesses. This is + * not needed anymore. + * + * 4) PSP/PCSP writes wait _only_ for SPILL. So if we do not know whether + * there can be a FILL going right now then some form of wait must be + * inserted before the write. Also writing PSHTP/PCSHTP has undefined + * behavior in instruction set, so using it is not recommended because + * of compatibility with future processors. + * + * 5) "wait ma_c=1" waits for all memory accesses including those issued + * by SPILL/FILL opertions. It does _not_ wait for SPILL/FILL itself. + * + * 6) Because of hardware bug #102582 "flushr" shouldn't be in the first + * command after "call". + */ + +#define NATIVE_FLUSHR \ +do { \ + asm volatile ("{nop} {flushr}" ::: "memory"); \ +} while (0) + +#define NATIVE_FLUSHC \ +do { \ + asm volatile ("{nop 2} {flushc; nop 3}" ::: "memory"); \ +} while (0) + +#define NATIVE_FLUSHCPU \ +do { \ + asm volatile ("{nop 2} {flushc; nop 3} {flushr}" ::: "memory"); \ +} while (0) + +#define NATIVE_FLUSH_ALL_TC \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("{nop 3; invtc 0x0, %0}" \ + : "=r" (res)); \ + res; \ +}) + +#define DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \ + under_upsr_off, under_upsr_bool) \ +({ \ + asm volatile ( \ + "{\n\t" \ + " stw %%dg" #greg_no ", [%0], %2\n\t" \ + " stb %%dg" #greg_no ", [%1], %3\n\t" \ + "}" \ + : \ + : "ri" ((__e2k_u64_t)(psr_off)), \ + "ri" ((__e2k_u64_t)(under_upsr_off)), \ + "r" ((__e2k_u32_t)(psr_value)), \ + "r" ((__e2k_u8_t)(under_upsr_bool))); \ +}) +#define KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \ + under_upsr_off, under_upsr_bool) \ + DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \ + under_upsr_off, under_upsr_bool) \ + +#define DO_ATOMIC_WRITE_UPSR_REG_VALUE(greg_no, upsr_off, upsr_value) \ +({ \ + asm volatile ( \ + "{\n\t" \ + " rws %1, %%upsr\n\t" \ + " stw %%dg" #greg_no ", [%0], %1\n\t" \ + "}" \ + : \ + : "ri" ((__e2k_u64_t)(upsr_off)), \ + "r" ((__e2k_u32_t)(upsr_value))); \ +}) +#define KVM_DO_ATOMIC_WRITE_UPSR_REG_VALUE(greg_no, upsr_off, upsr_value) \ + DO_ATOMIC_WRITE_UPSR_REG_VALUE(greg_no, upsr_off, upsr_value) + +#define NATIVE_GET_TCD() \ +({ \ + register __e2k_u64_t res; \ + asm volatile ( \ + "\n\t{gettc \t0x1 , %%ctpr1; nop 5}" \ + "\n\trrd \t%%ctpr1, %0" \ + : "=r" (res) : : "ctpr1" ); \ + res; \ +}) + +#define NATIVE_SET_TCD(val) \ +({ \ + asm volatile ("{puttc %0, 0x0 , %%tcd}" \ + : \ + :"r" (val)); \ +}) + +#define E2K_BUBBLE(num) \ +do { \ + asm volatile ("{nop %0}" \ + : \ + : "i" (num & 0x7) \ + : "memory"); \ +} while (0) + +/* Add ctpr3 to clobbers to explain to lcc that this + * GNU asm does a return. */ +#define E2K_DONE() \ +do { \ + /* #80747: must repeat interrupted barriers */ \ + asm volatile ("{nop 3; wait st_c=1} {done}" ::: "ctpr3"); \ +} while (0) + +#define NATIVE_RETURN() \ +do { \ + asm volatile( "{\n" \ + "return %%ctpr3\n" \ + "}\n" \ + "{\n" \ + "ct %%ctpr3\n" \ + "}\n" \ + : \ + : \ + : "ctpr3"); \ +} while (0) + +#define NATIVE_RETURN_VALUE(rval) \ +do { \ + asm volatile( "{\n" \ + "return %%ctpr3\n" \ + "addd %[r0], 0, %%dr0\n" \ + "}\n" \ + "{\n" \ + "ct %%ctpr3\n" \ + "}\n" \ + : \ + : [r0] "ir" (rval) \ + : "ctpr3"); \ +} while (0) + +#define E2K_SYSCALL_RETURN NATIVE_RETURN_VALUE + +#define E2K_EMPTY_CMD(input...) \ +do { \ + asm volatile ("{nop}" :: input); \ +} while (0) + +#define E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) \ +do { \ + asm volatile ( "{\n" \ + "return %%ctpr3\n" \ + "puttagd %[_r2], %[_tag2], %%dr2\n" \ + "puttagd %[_r3], %[_tag3], %%dr3\n" \ + "addd %[_r0], 0, %%dr0\n" \ + "addd %[_r1], 0, %%dr1\n" \ + "}\n" \ + "{\n" \ + "ct %%ctpr3\n" \ + "}\n" \ + :: [_r0] "ir" (r0), [_r1] "ir" (r1), \ + [_r2] "ir" (r2), [_r3] "ir" (r3), \ + [_tag2] "ir" (tag2), [_tag3] "ir" (tag3) \ + : "ctpr3"); \ +} while (0) + + +#define GET_USER_ASM(_x, _addr, fmt, __ret_gu) \ + asm ( \ + "1:\n" \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_ISET_V6 version */ \ + "{ld" #fmt "[ %[addr] + 0 ], %[x]\n" \ + " adds 0, 0, %[ret]\n" \ + " nop 4}\n" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{ld" #fmt "[ %[addr] + 0 ], %[x]\n" \ + " adds 0, 0, %[ret]\n" \ + " nop 2}\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:{adds 0, %[efault], %[ret]\n" \ + " ibranch 2b}\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".dword 1b, 3b\n" \ + ".previous\n" \ + : [ret] "=r" (__ret_gu), [x] "=r"(_x) \ + : [addr] "m" (*(_addr)), [efault] "i" (-EFAULT), \ + [facility] "i" (CPU_FEAT_ISET_V6)) \ + +#define PUT_USER_ASM(x, ptr, fmt, retval) \ + asm ("1:{st" #fmt "%1, %2\n" \ + " adds 0, 0, %0}\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n{adds 0, %3, %0\n" \ + " ibranch 2b}\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".dword 1b, 3b\n" \ + ".previous\n" \ + : "=r" (retval), "=m" (*ptr) \ + : "r" (x), "i" (-EFAULT)) + +#define LOAD_UNALIGNED_ZEROPAD(_addr) \ +({ \ + u64 *__addr = (u64 *) (_addr); \ + u64 _ret, _aligned_addr, _offset; \ + asm ( "1:\n" \ + " ldd [ %[addr] + 0 ], %[ret]\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n" \ + "{\n" \ + " andnd %[addr_val], 7, %[aligned_addr]\n" \ + " andd %[addr_val], 7, %[offset]\n" \ + "}\n" \ + "{\n" \ + " nop 4\n" \ + " ldd [ %[aligned_addr] + 0 ], %[ret]\n" \ + " shld %[offset], 3, %[offset]\n" \ + "}\n" \ + "{\n" \ + " shrd %[ret], %[offset], %[ret]\n" \ + " ibranch 2b\n" \ + "}\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".dword 1b, 3b\n" \ + ".previous\n" \ + : [ret] "=&r" (_ret), [offset] "=&r" (_offset), \ + [aligned_addr] "=&r" (_aligned_addr) \ + : [addr] "m" (*__addr), \ + [addr_val] "r" (__addr)); \ + _ret; \ +}) + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +# define __EMIT_BUG(_flags) \ + asm ("1:\n" \ + "{.word 0x00008001\n" /* SETSFT */ \ + " .word 0x28000000}\n" \ + ".section .rodata.str,\"aMS\",@progbits,1\n" \ + "2: .asciz \""__FILE__"\"\n" \ + ".previous\n" \ + ".section __bug_table,\"aw\"\n" \ + "3:\n" \ + ".word 1b - 3b\n" /* bug_entry:bug_addr_disp */ \ + ".word 2b - 3b\n" /* bug_entry:file_disp */ \ + ".short %[line]\n" /* bug_entry:line */ \ + ".short %[flags]\n" /* bug_entry:flags */ \ + ".org 3b + %[entry_size]\n" \ + ".previous\n" \ + :: [line] "i" (__LINE__), [flags] "i" (_flags), \ + [entry_size] "i" (sizeof(struct bug_entry))) + +#else + +# define __EMIT_BUG(_flags) \ + asm ("1:\n" \ + "{.word 0x00008001\n" /* SETSFT */ \ + " .word 0x28000000}\n" \ + ".section __bug_table,\"aw\"\n" \ + "3:\n" \ + ".word 1b - 3b\n" /* bug_entry:bug_addr_disp */ \ + ".short %[flags]\n" /* bug_entry:flags */ \ + ".org 3b + %[entry_size]\n" \ + ".previous\n" \ + :: [flags] "i" (_flags), \ + [entry_size] "i" (sizeof(struct bug_entry))) + +#endif + +#ifndef __ASSEMBLY__ +/* new version */ +/* + * this code used before call printk in special procedures + * sp register is used to pass parameters for printk + */ +static inline void E2K_SET_USER_STACK(int x) +{ + register __e2k_ptr_t sp asm ("%SP"); + if (__builtin_constant_p(x) ) { + if (x) { + asm volatile ("{getsp -1024, %0\n\t}" + : "=r" (sp)); + } + } else { + /* special for compiler error */ + /* fix gcc problem - warning */ +#ifdef __LCC__ + asm ("" : : "i"(x)); /* hook!! parameter must be const */ +#endif /* __LCC__ */ + } +} +#endif /* __ASSEMBLY__ */ + + +#define E2K_GET_FP() \ +({ \ + register __e2k_ptr_t res; \ + asm volatile ("addd \t0x0, %F0, %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_FP( val) \ +({ \ + asm volatile ("addd \t0x0, %0, %F0" \ + : \ + : "ri" ((__e2k_ptr_t) val)); \ +}) + +#define E2K_GET_SP() \ +({ \ + register __e2k_ptr_t res; \ + asm volatile ("addd \t0x0, %S0, %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_SP( val) \ +({ \ + asm volatile ("addd \t0x0, %0, %S0" \ + : \ + : "ri" ((__e2k_ptr_t) val)); \ +}) + +#define E2K_NOP(nr) __asm__ __volatile__("{nop " #nr "}" ::: "memory") + +#ifdef CONFIG_SMP +# define SMP_ONLY(...) __VA_ARGS__ +#else +# define SMP_ONLY(...) +#endif + +#ifdef CONFIG_CPU_HAS_FILL_INSTRUCTION +# define NATIVE_FILL_HARDWARE_STACKS() \ + asm volatile ("{fillc; fillr}" ::: "memory") +#else +# define NATIVE_FILL_HARDWARE_STACKS() \ +do { \ + asm volatile ( \ + "{\n" \ + "nop 4\n" \ + "return %%ctpr3\n" \ + "movtd [ 0f ], %%dg" __stringify(GUEST_VCPU_STATE_GREG) "\n" \ + "}\n" \ + "{\n" \ + "rrd %%wd, %%dg" __stringify(CURRENT_TASK_GREG) "\n" \ + "}\n" \ + "{\n" \ + "rrd %%br, %%dg" __stringify(SMP_CPU_ID_GREG) "\n" \ + "ct %%ctpr3\n" \ + "}\n" \ + "0:\n" \ + "{\n" \ + "rwd %%dg" __stringify(CURRENT_TASK_GREG) ", %%wd\n" \ + "}\n" \ + "{\n" \ + "rwd %%dg" __stringify(SMP_CPU_ID_GREG) ", %%br\n" \ + "}\n" \ + "{\n" \ + "nop 3\n" \ + SMP_ONLY("ldw %%dg" __stringify(GUEST_VCPU_STATE_GREG) ", " \ + "%[task_ti_cpu_delta], " \ + "%%dg" __stringify(SMP_CPU_ID_GREG) "\n") \ + "subd %%dg" __stringify(GUEST_VCPU_STATE_GREG) ", " \ + "%[task_ti_offset], " \ + "%%dg" __stringify(CURRENT_TASK_GREG) "\n" \ + "}\n" \ + "{\n" \ + "nop\n" /* For "rwd %wd" */ \ + "}\n" \ + :: SMP_ONLY([task_ti_cpu_delta] "i" (offsetof(struct task_struct, cpu) - \ + offsetof(struct task_struct, thread_info)),) \ + [task_ti_offset] "i" (offsetof(struct task_struct, thread_info)) \ + : "ctpr1", "ctpr3", "memory"); \ +} while (0) +#endif + +#ifndef __ASSEMBLY__ + +#define E2K_PARALLEL_WRITE(addr1, val1, addr2, val2) \ +{ \ + asm volatile ("{\n\t" \ + " std 0x0, %2, %4\n\t" \ + " std 0x0, %3, %5\n\t" \ + "}" \ + : "=m" (*(addr1)), "=m" (*(addr2)) \ + : "r" (addr1), "r" (addr2), "r" (val1), "r" (val2)); \ +} + +/* + * Macroses to construct alternative return point from trap + */ + +#define STICK_ON_REG(reg) asm( #reg ) + +#define SAVE_CURRENT_ADDR(_ptr) \ +do { \ + unsigned long long _tmp; \ + _Pragma("no_asm_inline") \ + asm volatile ("movtd [ 0f ], %[tmp]\n" \ + "std [ %[ptr] ], %[tmp]\n" \ + "0:" \ + : [ptr] "=m" (*(_ptr)), [tmp] "=&r" (_tmp)); \ +} while (0) + +#define DO_FUNC_TO_NAME(func) #func +#define FUNC_TO_NAME(func) DO_FUNC_TO_NAME(func) + +#define GET_LBL_ADDR(name, where) \ + _Pragma("no_asm_inline") \ + asm ("movtd [" name "], %0" : "=r" (where)) + +#define E2K_JUMP(func) E2K_JUMP_WITH_ARGUMENTS(func, 0) + +#define E2K_JUMP_WITH_ARGUMENTS(func, num_args, ...) \ + __E2K_JUMP_WITH_ARGUMENTS_##num_args(func, ##__VA_ARGS__) + +#define __E2K_JUMP_WITH_ARGUMENTS_0(func) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %0\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + :: "i" (&(func)) : "ctpr1"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_1(func, arg1) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %1\n" \ + "addd %0, 0, %%dr0\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "i" (&(func)) \ + : "ctpr1", "r0"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_2(func, arg1, arg2) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %2\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), "i" (&(func)) \ + : "ctpr1", "r0", "r1"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_3(func, arg1, arg2, arg3) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %3\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_4(func, arg1, arg2, arg3, arg4) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %4\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_5(func, arg1, arg2, arg3, arg4, arg5) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %5\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_6(func, \ + arg1, arg2, arg3, arg4, arg5, arg6) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %6\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_FUNC_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %7\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "{\n" \ + "addd %6, 0, %%dr6\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), \ + "ri" ((u64) (arg7)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_FUNC_ADDR_WITH_ARGUMENTS_7(_func_addr, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ +do { \ + asm volatile ("{\n" \ + "movtd,0,sm %[func_addr], %%ctpr1\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "}\n" \ + "{\n" \ + "addd %5, 0, %%dr5\n" \ + "addd %6, 0, %%dr6\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : [func_addr] "r" (_func_addr), \ + "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), \ + "ri" ((u64) (arg7)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6"); \ + unreachable(); \ +} while (false) +#define __E2K_JUMP_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, is_name) \ +do { \ + if (is_name) { \ + __E2K_JUMP_FUNC_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7); \ + } else { \ + __E2K_JUMP_FUNC_ADDR_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7); \ + } \ +} while (false) + +#define __E2K_JUMP_FUNC_WITH_ARGUMENTS_8(func_name, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, " func_name "\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "{\n" \ + "addd %6, 0, %%dr6\n" \ + "addd %7, 0, %%dr7\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), \ + "ri" ((u64) (arg7)), "ri" ((u64) (arg8)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6", \ + "r7"); \ + unreachable(); \ +} while (0) +#define __E2K_JUMP_WITH_ARGUMENTS_8(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \ + __E2K_JUMP_FUNC_WITH_ARGUMENTS_8(FUNC_TO_NAME(func), \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + +#ifdef CONFIG_CPU_HWBUG_IBRANCH +# define WORKAROUND_IBRANCH_HWBUG "{nop} {nop} \n" +#else +# define WORKAROUND_IBRANCH_HWBUG +#endif + +#define E2K_GOTO_ARG0(func) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("{ibranch " #func "}\n" \ + WORKAROUND_IBRANCH_HWBUG \ + :: ); \ +} while (0) +#define E2K_GOTO_ARG1(label, arg1) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "ri" ((__e2k_u64_t) (arg1)) \ + ); \ +} while (false) +#define E2K_GOTO_ARG2(label, arg1, arg2) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "addd \t 0, %1, %%dr1\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)) \ + ); \ +} while (false) +#define E2K_GOTO_ARG3(label, arg1, arg2, arg3) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "addd \t 0, %1, %%dr1\n" \ + "addd \t 0, %2, %%dr2\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)), \ + "ri" ((__e2k_u64_t) (arg3)) \ + ); \ +} while (false) +#define E2K_GOTO_ARG4(label, arg1, arg2, arg3, arg4) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "addd \t 0, %1, %%dr1\n" \ + "addd \t 0, %2, %%dr2\n" \ + "addd \t 0, %3, %%dr3\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)), \ + "ri" ((__e2k_u64_t) (arg3)), \ + "ri" ((__e2k_u64_t) (arg4)) \ + ); \ +} while (false) +#define E2K_GOTO_ARG7(label, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %1, %%dr1\n" \ + "addd \t 0, %2, %%dr2\n" \ + "addd \t 0, %3, %%dr3\n" \ + "addd \t 0, %4, %%dr4\n" \ + "addd \t 0, %5, %%dr5\n" \ + "addd \t 0, %6, %%dr6\n" \ + "}\n" \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "i" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)), \ + "ri" ((__e2k_u64_t) (arg3)), \ + "ri" ((__e2k_u64_t) (arg4)), \ + "ri" ((__e2k_u64_t) (arg5)), \ + "ri" ((__e2k_u64_t) (arg6)), \ + "ri" ((__e2k_u64_t) (arg7)) \ + ); \ +} while (false) +#define E2K_SCALL_ARG7(trap_num, ret, sys_num, arg1, arg2, arg3, \ + arg4, arg5, arg6) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %[_sys_num], %%db[0]\n" \ + "addd \t 0, %[_arg1], %%db[1]\n" \ + "addd \t 0, %[_arg2], %%db[2]\n" \ + "addd \t 0, %[_arg3], %%db[3]\n" \ + "addd \t 0, %[_arg4], %%db[4]\n" \ + "addd \t 0, %[_arg5], %%db[5]\n" \ + "}\n" \ + "{\n" \ + "addd \t 0, %[_arg6], %%db[6]\n" \ + "sdisp \t %%ctpr1, 0x"#trap_num"\n" \ + "}\n" \ + "{\n" \ + "call %%ctpr1, wbs = %#\n" \ + "}\n" \ + "{\n" \ + "addd,0,sm 0x0, %%db[0], %[_ret]\n" \ + "}\n" \ + : [_ret] "=r" (ret) \ + : [_sys_num] "ri" ((__e2k_u64_t) (sys_num)), \ + [_arg1] "ri" ((__e2k_u64_t) (arg1)), \ + [_arg2] "ri" ((__e2k_u64_t) (arg2)), \ + [_arg3] "ri" ((__e2k_u64_t) (arg3)), \ + [_arg4] "ri" ((__e2k_u64_t) (arg4)), \ + [_arg5] "ri" ((__e2k_u64_t) (arg5)), \ + [_arg6] "ri" ((__e2k_u64_t) (arg6)) \ + : "b[0]", "b[1]", "b[2]", "b[3]", "b[4]", "b[5]", \ + "b[6]", "ctpr1" \ + ); \ +} while (false) +#define E2K_COND_GOTO(label, cond, pred_no) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "\ncmpesb \t0, %0, %%pred" #pred_no \ + "\n{" \ + "\nibranch \t" #label " ? ~%%pred" #pred_no \ + "\n}" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "ri" ((__e2k_u32_t) (cond)) \ + : "pred" #pred_no \ + ); \ +} while (false) +#define E2K_COND_GOTO_ARG1(label, cond, pred_no, arg1) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "\ncmpesb \t0, %0, %%pred" #pred_no \ + "\n{" \ + "\naddd \t 0, %1, %%dr0 ? ~%%pred" #pred_no \ + "\nibranch \t" #label " ? ~%%pred" #pred_no \ + "\n}" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "ri" ((__e2k_u32_t) (cond)), \ + "ri" ((__e2k_u64_t) (arg1)) \ + : "pred" #pred_no \ + ); \ +} while (false) +#define E2K_COND_GOTO_ARG2(label, cond, pred_no, arg1, arg2) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "\ncmpesb \t0, %0, %%pred" #pred_no \ + "\n{" \ + "\naddd \t 0, %1, %%dr0 ? ~%%pred" #pred_no \ + "\naddd \t 0, %2, %%dr1 ? ~%%pred" #pred_no \ + "\nibranch \t" #label " ? ~%%pred" #pred_no \ + "\n}" \ + WORKAROUND_IBRANCH_HWBUG \ + : \ + : "ri" ((__e2k_u32_t) (cond)), \ + "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)) \ + : "pred" #pred_no \ + ); \ +} while (false) +#define DEF_COND_GOTO(label, cond) \ + E2K_COND_GOTO(label, cond, 0) +#define DEF_COND_GOTO_ARG1(label, cond, arg1) \ + E2K_COND_GOTO_ARG1(label, cond, 0, arg1) +#define DEF_COND_GOTO_ARG2(label, cond, arg1, arg2) \ + E2K_COND_GOTO_ARG2(label, cond, 0, arg1, arg2) + +#define E2K_JUMP_ABSOLUTE_WITH_ARGUMENTS_1(func, arg1) \ +do { \ + asm volatile ("{\n" \ + "movtd %[_func], %%ctpr1\n" \ + "addd %[_arg1], 0, %%dr0\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : [_func] "ir" (func), \ + [_arg1] "ri" (arg1) \ + : "ctpr1", "r0"); \ + unreachable(); \ +} while (0) + +#define E2K_JUMP_ABSOLUTE_WITH_ARGUMENTS_2(func, arg1, arg2) \ +do { \ + asm volatile ("{\n" \ + "movtd %[_func], %%ctpr1\n" \ + "addd %[_arg1], 0, %%dr0\n" \ + "addd %[_arg2], 0, %%dr1\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : [_func] "ir" (func), \ + [_arg1] "ri" (arg1), [_arg2] "ri" (arg2) \ + : "ctpr1", "r0", "r1"); \ + unreachable(); \ +} while (0) + +#define E2K_GOTO_INTEGER_LABEL_ARGS_0(_func) \ +({ \ + asm volatile ( \ + "{\n" \ + "movtd %[func], %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : [func] "r" (_func) \ + : "ctpr1"); \ +}) + +#define __E2K_RESTART_TTABLE_ENTRY10_C(func, arg0, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7, tags) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, " #func "\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "{\n" \ + "addd %6, 0, %%dr6\n" \ + "addd %7, 0, %%dr7\n" \ + "addd %8, 0, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr0, %%dr8, %%dr0\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr1, %%dr8, %%dr1\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr2, %%dr8, %%dr2\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr3, %%dr8, %%dr3\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr4, %%dr8, %%dr4\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr5, %%dr8, %%dr5\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr6, %%dr8, %%dr6\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr7, %%dr8, %%dr7\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : "ri" (arg0), "ri" (arg1), "ri" (arg2), "ri" (arg3), \ + "ri" (arg4), "ri" (arg5), "ri" (arg6), "ri" (arg7), \ + "ri" (tags) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6", \ + "r7", "r8"); \ + unreachable(); \ +} while (0) + +#define __E2K_RESTART_TTABLE_ENTRY8_C(func, _sys_num, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, _tags) \ +do { \ + u64 tag_lo, tag_hi; \ + asm volatile ( \ + "{\n" \ + "disp %%ctpr1, " #func "\n" \ + "shrd,1 %[tags], 8, %[tag_lo]\n" \ + "shrd,4 %[tags], 12, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a1], %[tag_lo], %%dr2\n" \ + "puttagd,5 %[a2], %[tag_hi], %%dr3\n" \ + "shrd,1 %[tags], 16, %[tag_lo]\n" \ + "shrd,4 %[tags], 20, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a3], %[tag_lo], %%dr4\n" \ + "puttagd,5 %[a4], %[tag_hi], %%dr5\n" \ + "shrd,1 %[tags], 24, %[tag_lo]\n" \ + "shrd,4 %[tags], 28, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a5], %[tag_lo], %%dr6\n" \ + "puttagd,5 %[a6], %[tag_hi], %%dr7\n" \ + "shrd,1 %[tags], 32, %[tag_lo]\n" \ + "shrd,4 %[tags], 36, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a7], %[tag_lo], %%dr8\n" \ + "puttagd,5 %[a8], %[tag_hi], %%dr9\n" \ + "shrd,1 %[tags], 40, %[tag_lo]\n" \ + "shrd,4 %[tags], 44, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a9], %[tag_lo], %%dr10\n" \ + "puttagd,5 %[a10], %[tag_hi], %%dr11\n" \ + "shrd,1 %[tags], 48, %[tag_lo]\n" \ + "shrd,4 %[tags], 52, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a11], %[tag_lo], %%dr12\n" \ + "puttagd,5 %[a12], %[tag_hi], %%dr13\n" \ + "adds 0, %[sys_num], %%r0\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : [tag_lo] "=&r" (tag_lo), [tag_hi] "=&r" (tag_hi) \ + : [sys_num] "ri" (_sys_num), [a1] "ri" (arg1), \ + [a2] "ri" (arg2), [a3] "ri" (arg3), [a4] "ri" (arg4), \ + [a5] "ri" (arg5), [a6] "ri" (arg6), [a7] "ri" (arg7), \ + [a8] "ri" (arg8), [a9] "ri" (arg9), [a10] "ri" (arg10), \ + [a11] "ri" (arg11), [a12] "ri" (arg12), [tags] "ri" (_tags) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ + "r8", "r9", "r10", "r11", "r12", "r13"); \ + unreachable(); \ +} while (0) + +/* Important: delay after FPU reading is 9 cycles for 0 cluster + * and 11 for 1 cluster, thus the NOPs. */ +#define E2K_GETCONTEXT(fpcr, fpsr, pfpfr, pcsp_lo, pcsp_hi) \ +do { \ + u64 __pcshtp; \ + asm volatile ("rrs %%fpcr, %0\n" \ + "rrs %%fpsr, %1\n" \ + "rrs %%pfpfr, %2\n" \ + "rrd %%pcshtp, %5\n" \ + "rrd %%pcsp.lo, %3\n" \ + "{rrd %%pcsp.hi, %4\n" \ + "shld %5, 53, %5}\n" \ + "sard %5, 53, %5\n" \ + "{addd %4, %5, %4\n" \ + "nop 5}\n" \ + : "=r" (fpcr), "=r" (fpsr), "=r" (pfpfr), \ + "=r" (pcsp_lo), "=r" (pcsp_hi), "=r" (__pcshtp) \ + : ); \ +} while (0) + +#define E2K_CLEAR_RF_108() \ +do { \ + asm volatile ( \ + "{\n" \ + "nop 3\n" \ + "disp %%ctpr1, 1f\n" \ + "setwd wsz=108\n" \ + "setbn rbs=0, rsz=62, rcur=0\n" \ + "rwd 21UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "{\n" \ + "disp %%ctpr2, 2f\n" \ + "}\n" \ + "1:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[42]\n" \ + "addd 0, 0, %%db[43]\n" \ + "addd 0, 0, %%db[84]\n" \ + "addd 0, 0, %%db[85]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr1 ? %%NOT_LOOP_END\n" \ + "}\n" \ + "{\n" \ + "nop 4\n" \ + "setbn rbs=63, rsz=44, rcur=0\n" \ + "rwd 15UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "2:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[32]\n" \ + "addd 0, 0, %%db[33]\n" \ + "addd 0, 0, %%db[64]\n" \ + "addd 0, 0, %%db[65]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr2 ? %%NOT_LOOP_END\n" \ + "}\n" \ + ::: "ctpr1", "ctpr2"); \ +} while (0) + +#define E2K_CLEAR_RF_112() \ +do { \ + asm volatile ( \ + "{\n" \ + "nop 3\n" \ + "disp %%ctpr1, 1f\n" \ + "setwd wsz=112\n" \ + "setbn rbs=0, rsz=62, rcur=0\n" \ + "rwd 21UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "{\n" \ + "disp %%ctpr2, 2f\n" \ + "}\n" \ + "1:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[42]\n" \ + "addd 0, 0, %%db[43]\n" \ + "addd 0, 0, %%db[84]\n" \ + "addd 0, 0, %%db[85]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr1 ? %%NOT_LOOP_END\n" \ + "}\n" \ + "{\n" \ + "nop 4\n" \ + "setbn rbs=63, rsz=48, rcur=0\n" \ + "rwd 16UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "2:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[32]\n" \ + "addd 0, 0, %%db[33]\n" \ + "addd 0, 0, %%db[64]\n" \ + "addd 0, 0, %%db[65]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr2 ? %%NOT_LOOP_END\n" \ + "}\n" \ + "{\n" \ + "addd 0, 0, %%db[64]\n" \ + "addd 0, 0, %%db[65]\n" \ + "}\n" \ + ::: "ctpr1", "ctpr2"); \ +} while (0) + +#define E2K_CLEAR_CTPRS() \ +do { \ + __e2k_u64_t reg; \ + asm volatile ( \ + "{\n" \ + "puttagd 0, 5, %0\n" \ + "}\n" \ + "{\n" \ + "movtd,s %0, %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "movtd,s %0, %%ctpr2\n" \ + "}\n" \ + "{\n" \ + "movtd,s %0, %%ctpr3\n" \ + "}\n" \ + : "=r" (reg) \ + : \ + : "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + +#define NATIVE_RESTORE_COMMON_REGS_VALUES(_ctpr1, _ctpr2, _ctpr3, _ctpr1_hi, \ + _ctpr2_hi, _ctpr3_hi, _lsr, _lsr1, _ilcr, _ilcr1) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ( \ + "{\n" \ + "rwd %[ctpr2], %%ctpr2\n" \ + "}\n" \ + \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_TRAP_V5 version */ \ + \ + "{\n" \ + "rwd %[ctpr3], %%ctpr3\n" \ + "}\n" \ + "{\n" \ + "rwd %[ctpr1], %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "addd %[lsr1], 0, %%db[1]\n" \ + "addd %[ilcr1], 0, %%db[3]\n" \ + "}\n" \ + /* rwd %db[1], %%lsr1 */ \ + ".word 0x04100011; .word 0x3dc001c3\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + /* rwd %db[3], %%ilcr1 */ \ + ".word 0x04100011; .word 0x3dc003c7\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + \ + ALTERNATIVE_2_ALTINSTR2 \ + /* CPU_FEAT_TRAP_V6 version */ \ + \ + "{\n" \ + "rwd %[ctpr3], %%ctpr3\n" \ + "addd %[ctpr1_hi], %%db[0]\n" \ + "addd %[ctpr2_hi], %%db[2]\n" \ + "}\n" \ + "{\n" \ + "rwd %[ctpr1], %%ctpr1\n" \ + "addd %[ctpr3_hi], %%db[4]\n" \ + "addd %[lsr1], 0, %%db[1]\n" \ + "}\n" \ + /* rwd %db[0], %%ctpr1.hi */ \ + ".word 0x04100011; .word 0x3dc00019\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + /* rwd %db[2], %%ctpr2.hi */ \ + ".word 0x04100011; .word 0x3dc0021a\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + /* rwd %db[4], %%ctpr3.hi */ \ + ".word 0x04100011; .word 0x3dc0041b\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "addd %[ilcr1], 0, %%db[3]\n" \ + "}\n" \ + /* rwd %db[1], %%lsr1 */ \ + ".word 0x04100011; .word 0x3dc001c3\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + /* rwd %db[3], %%ilcr1 */ \ + ".word 0x04100011; .word 0x3dc003c7\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + \ + ALTERNATIVE_3_OLDINSTR2 \ + \ + "{\n" \ + "rwd %[ctpr3], %%ctpr3\n" \ + "}\n" \ + "{\n" \ + "rwd %[ctpr1], %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "}\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + \ + ALTERNATIVE_4_FEATURE2(%[facility1], %[facility2]) \ + :: [ctpr1] "r" (_ctpr1), [ctpr2] "r" (_ctpr2), \ + [ctpr3] "r" (_ctpr3), [ctpr1_hi] "r" (_ctpr1_hi), \ + [ctpr2_hi] "r" (_ctpr2_hi), [ctpr3_hi] "r" (_ctpr3_hi), \ + [lsr] "r" (_lsr), [lsr1] "r" (_lsr1), \ + [ilcr] "r" (_ilcr), [ilcr1] "r" (_ilcr1), \ + [facility1] "i" (CPU_FEAT_TRAP_V5), \ + [facility2] "i" (CPU_FEAT_TRAP_V6) \ + : "memory", "b[0]", "b[1]", "b[2]", "b[3]", "b[4]"); \ +} while (0) + +#define NATIVE_RESTORE_KERNEL_GREGS(_k_gregs) \ +do { \ + u64 f16, f17, f18, f19, tmp1, tmp2; \ + _Pragma("no_asm_inline") \ + asm volatile ( \ + ALTERNATIVE_1_ALTINSTR \ + /* iset v5 version - restore qp registers extended part */ \ + \ + "{\n" \ + "addd,2 %[k_gregs], %%db[0]\n" \ + "addd,5 %[k_gregs], %%db[1]\n" \ + "}\n" \ + /* "{ldrqp,2 [ %%db[0] + 0x50400000000 ], %%g16\n" \ + " ldrqp,5 [ %%db[1] + 0x50400000010 ], %%g17}\n" */ \ + ".word 0x92400033\n" \ + ".word 0x6b00dcf0\n" \ + ".word 0x6b01def1\n" \ + ".word 0x02c002c0\n" \ + ".word 0x00000504\n" \ + ".word 0x00000010\n" \ + ".word 0x00000504\n" \ + ".word 0x00000000\n" \ + /* "{ldrqp,2 [ %%db[0] + 0x50400000020 ], %%g18\n" \ + " ldrqp,5 [ %%db[1] + 0x50400000030 ], %%g19}\n" */ \ + ".word 0x92400033\n" \ + ".word 0x6b00dcf2\n" \ + ".word 0x6b01def3\n" \ + ".word 0x02c002c0\n" \ + ".word 0x00000504\n" \ + ".word 0x00000030\n" \ + ".word 0x00000504\n" \ + ".word 0x00000020\n" \ + \ + ALTERNATIVE_2_OLDINSTR \ + /* Original instruction - restore only 16 bits */ \ + \ + "{\n" \ + "ldrd,2 [ %[k_gregs] + 0x50400000000 ], %%g16\n" \ + "ldrd,5 [ %[k_gregs] + 0x50400000010 ], %%g17\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[k_gregs] + 0x50400000020 ], %%g18\n" \ + "ldrd,5 [ %[k_gregs] + 0x50400000030 ], %%g19\n" \ + "}\n" \ + "{\n" \ + "ldh,0 [ %[k_gregs] + 0x8 ], %[f16]\n" \ + "ldh,3 [ %[k_gregs] + 0x18 ], %[f17]\n" \ + "ldh,2 [ %[k_gregs] + 0x28 ], %[f18]\n" \ + "ldh,5 [ %[k_gregs] + 0x38 ], %[f19]\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %%g16, %[tmp1]\n" \ + "gettagd,5 %%g17, %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "cmpesb,0 0x0, %[tmp1], %%pred16\n" \ + "cmpesb,3 0x0, %[tmp2], %%pred17\n" \ + "gettagd,2 %%g18, %[tmp1]\n" \ + "gettagd,5 %%g19, %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "cmpesb,0 0x0, %[tmp1], %%pred18\n" \ + "cmpesb,3 0x0, %[tmp2], %%pred19\n" \ + "}\n" \ + "{\n" \ + "movif,0 %%g16, %[f16], %%g16 ? %%pred16\n" \ + "movif,3 %%g17, %[f17], %%g17 ? %%pred17\n" \ + "}\n" \ + "{\n" \ + "movif,0 %%g18, %[f18], %%g18 ? %%pred18\n" \ + "movif,3 %%g19, %[f19], %%g19 ? %%pred19\n" \ + "}\n" \ + \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + : [f16] "=&r" (f16), [f17] "=&r" (f17), [f18] "=&r" (f18), \ + [f19] "=&r" (f19), [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \ + : [k_gregs] "m" (*(_k_gregs)), [facility] "i" (CPU_FEAT_QPREG) \ + : "g16", "g17", "g18", "g19", \ + "pred16", "pred17", "pred18", "pred19"); \ +} while (0) + +#define NATIVE_RESTORE_HOST_GREGS(_h_gregs) \ +do { \ + u64 f20, f21, tmp1, tmp2; \ + _Pragma("no_asm_inline") \ + asm volatile ( \ + ALTERNATIVE_1_ALTINSTR \ + /* iset v5 version - restore qp registers extended part */ \ + \ + "{\n" \ + "addd,2 %[h_gregs], %%db[0]\n" \ + "addd,5 %[h_gregs], %%db[1]\n" \ + "}\n" \ + /* "{ldrqp,2 [ %%db[0] + 0x50400000000 ], %%g20\n" \ + "ldrqp,5 [ %%db[1] + 0x50400000010 ], %%g21}\n" */ \ + ".word 0x92400033\n" \ + ".word 0x6b00dcf4\n" \ + ".word 0x6b01def5\n" \ + ".word 0x02c002c0\n" \ + ".word 0x00000504\n" \ + ".word 0x00000010\n" \ + ".word 0x00000504\n" \ + ".word 0x00000000\n" \ + \ + ALTERNATIVE_2_OLDINSTR \ + /* Original instruction - restore only 16 bits */ \ + \ + "{\n" \ + "ldrd,2 [ %[h_gregs] + 0x50400000000 ], %%g20\n" \ + "ldrd,5 [ %[h_gregs] + 0x50400000010 ], %%g21\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldh,0 [ %[h_gregs] + 0x8 ], %[f20]\n" \ + "ldh,3 [ %[h_gregs] + 0x18 ], %[f21]\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %%g20, %[tmp1]\n" \ + "gettagd,5 %%g21, %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "cmpesb,0 0x0, %[tmp1], %%pred20\n" \ + "cmpesb,3 0x0, %[tmp2], %%pred21\n" \ + "}\n" \ + "{\n" \ + "movif,0 %%g20, %[f20], %%g20 ? %%pred20\n" \ + "movif,3 %%g21, %[f21], %%g21 ? %%pred21\n" \ + "}\n" \ + \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + : [f20] "=&r" (f20), [f21] "=&r" (f21), \ + [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \ + : [h_gregs] "m" (*(_h_gregs)), [facility] "i" (CPU_FEAT_QPREG) \ + : "g20", "g21", "pred20", "pred21"); \ +} while (0) + + +#define LDRD(addr) \ +({ \ + register long __dres; \ + asm volatile ("{ldrd [%1], %0\n}" \ + : "=r"(__dres) \ + : "m" (*((unsigned long long *)(addr)))); \ + __dres; \ +}) + +#define SIMPLE_RECOVERY_STORE(_addr, _data, _opc) \ +do { \ + u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \ + u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \ + asm ( \ + "{nop 1\n" \ + " cmpesb,0 %[fmt], 1, %%pred20\n" \ + " cmpesb,1 %[fmt], 2, %%pred21\n" \ + " cmpesb,3 %[fmt], 3, %%pred22\n" \ + " cmpesb,4 %[fmt], 4, %%pred23}\n" \ + "{stb,2 %[addr], %[ind], %[data] ? %%pred20\n" \ + " sth,5 %[addr], %[ind], %[data] ? %%pred21}\n" \ + "{stw,2 %[addr], %[ind], %[data] ? %%pred22\n" \ + " std,5 %[addr], %[ind], %[data] ? %%pred23}\n" \ + : \ + : [addr] "r" (_addr), [data] "r" (_data), \ + [fmt] "r" (_fmt), [ind] "r" (_ind) \ + : "memory", "pred20", "pred21", "pred22", "pred23" \ + ); \ +} while (0) + +#define SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, _greg_no, _sm, _mas) \ +do { \ + u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \ + u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \ + asm ( \ + "{nop 1\n" \ + " cmpesb,0 %[fmt], 1, %%pred20\n" \ + " cmpesb,1 %[fmt], 2, %%pred21\n" \ + " cmpesb,3 %[fmt], 3, %%pred22\n" \ + " cmpesb,4 %[fmt], 4, %%pred23}\n" \ + "{nop 4\n" \ + " ldb" _sm ",0 %[addr], %[ind], %%dg" #_greg_no ", " \ + "mas=%[mas] ? %%pred20\n" \ + " ldh" _sm ",2 %[addr], %[ind], %%dg" #_greg_no ", " \ + "mas=%[mas] ? %%pred21\n" \ + " ldw" _sm ",3 %[addr], %[ind], %%dg" #_greg_no ", " \ + "mas=%[mas] ? %%pred22\n" \ + " ldd" _sm ",5 %[addr], %[ind], %%dg" #_greg_no ", " \ + "mas=%[mas] ? %%pred23}\n" \ + : \ + : [addr] "r" (_addr), [fmt] "r" (_fmt), \ + [ind] "r" (_ind), [mas] "i" (_mas) \ + : "memory", "pred20", "pred21", "pred22", "pred23", \ + "g" #_greg_no \ + ); \ +} while (0) + +#define SIMPLE_RECOVERY_LOAD_TO_GREG(_addr, _opc, _greg_num, _sm, _mas) \ +do { \ + switch (_greg_num) { \ + case 0: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 0, _sm, _mas); \ + break; \ + case 1: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 1, _sm, _mas); \ + break; \ + case 2: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 2, _sm, _mas); \ + break; \ + case 3: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 3, _sm, _mas); \ + break; \ + case 4: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 4, _sm, _mas); \ + break; \ + case 5: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 5, _sm, _mas); \ + break; \ + case 6: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 6, _sm, _mas); \ + break; \ + case 7: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 7, _sm, _mas); \ + break; \ + case 8: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 8, _sm, _mas); \ + break; \ + case 9: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 9, _sm, _mas); \ + break; \ + case 10: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 10, _sm, _mas); \ + break; \ + case 11: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 11, _sm, _mas); \ + break; \ + case 12: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 12, _sm, _mas); \ + break; \ + case 13: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 13, _sm, _mas); \ + break; \ + case 14: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 14, _sm, _mas); \ + break; \ + case 15: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 15, _sm, _mas); \ + break; \ + /* Do not load g16-g19 as they are used by kernel */ \ + case 16: \ + case 17: \ + case 18: \ + case 19: \ + break; \ + case 20: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 20, _sm, _mas); \ + break; \ + case 21: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 21, _sm, _mas); \ + break; \ + case 22: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 22, _sm, _mas); \ + break; \ + case 23: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 23, _sm, _mas); \ + break; \ + case 24: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 24, _sm, _mas); \ + break; \ + case 25: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 25, _sm, _mas); \ + break; \ + case 26: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 26, _sm, _mas); \ + break; \ + case 27: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 27, _sm, _mas); \ + break; \ + case 28: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 28, _sm, _mas); \ + break; \ + case 29: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 29, _sm, _mas); \ + break; \ + case 30: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 30, _sm, _mas); \ + break; \ + case 31: \ + SIMPLE_RECOVERY_LOAD_TO_GREG_NO(_addr, _opc, 31, _sm, _mas); \ + break; \ + default: \ + panic("Invalid global register # %d\n", _greg_num); \ + } \ +} while (0) + +#define SIMPLE_RECOVERY_MOVE(_from, _to, _opc, _first_time, _sm, _mas) \ +do { \ + u64 _data; \ + u32 _fmt = ((ldst_rec_op_t *) &_opc)->fmt; \ + u32 _ind = ((ldst_rec_op_t *) &_opc)->index; \ + asm ( \ + "{nop 1\n" \ + " cmpesb,0 %[fmt], 1, %%pred20\n" \ + " cmpesb,1 %[fmt], 2, %%pred21\n" \ + " cmpesb,3 %[fmt], 3, %%pred22\n" \ + " cmpesb,4 %[fmt], 4, %%pred23}\n" \ + "{nop 4\n" \ + " ldb" _sm ",0 %[from], %[ind], %[data], " \ + "mas=%[mas] ? %%pred20\n" \ + " ldh" _sm ",2 %[from], %[ind], %[data], " \ + "mas=%[mas] ? %%pred21\n" \ + " ldw" _sm ",3 %[from], %[ind], %[data], " \ + "mas=%[mas] ? %%pred22\n" \ + " ldd" _sm ",5 %[from], %[ind], %[data], " \ + "mas=%[mas] ? %%pred23}\n" \ + "{cmpesb,0 %[first_time], 0, %%pred19}\n" \ + "{pass %%pred19, @p0\n" \ + " pass %%pred20, @p1\n" \ + " pass %%pred21, @p2\n" \ + " pass %%pred22, @p3\n" \ + " landp @p0, @p1, @p4\n" \ + " pass @p4, %%pred20\n" \ + " landp @p0, @p2, @p5\n" \ + " pass @p5, %%pred21\n" \ + " landp @p0, @p3, @p6\n" \ + " pass @p6, %%pred22}\n" \ + "{pass %%pred19, @p0\n" \ + " pass %%pred23, @p1\n" \ + " landp @p0, ~@p1, @p4\n" \ + " pass @p4, %%pred23}\n" \ + "{stb,sm,2 %[to], 0, %[data] ? %%pred20\n" \ + " sth,sm,5 %[to], 0, %[data] ? %%pred21}\n" \ + "{stw,sm,2 %[to], 0, %[data] ? %%pred22\n" \ + " std,sm,5 %[to], 0, %[data] ? ~%%pred23}\n" \ + : [data] "=&r" (_data) \ + : [from] "r" (_from), [to] "r" (_to), \ + [fmt] "r" (_fmt), [ind] "r" (_ind), \ + [first_time] "r" (_first_time), [mas] "i" (_mas) \ + : "memory", "pred19", "pred20", "pred21", "pred22", "pred23" \ + ); \ +} while (0) + +/* Since v6 this got replaced with "wait int=1,mem_mod=1" */ +#define C1_WAIT_TRAP_V3() \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("wait trap=1" ::: "memory"); \ +} while (0) + +#define C3_WAIT_TRAP_V3(__val, __phys_addr) \ +do { \ + u64 _reg; \ + asm volatile ( \ + /* 1) Disable instruction prefetch */ \ + "mmurr %%mmu_cr, %[reg]\n" \ + "andnd %[reg], 0x800, %[reg]\n" /* clear mmu_cr.ipd */ \ + "{nop 3\n" \ + " mmurw %[reg], %%mmu_cr}\n" \ + "disp %%ctpr1, 1f\n" \ + "{wait all_c=1\n" \ + " ct %%ctpr1}\n" /* force Instruction Buffer to use new ipd */ \ + "1:\n" \ + /* 2) Disable %ctpr's */ \ + "rwd 0, %%ctpr1\n" \ + "rwd 0, %%ctpr2\n" \ + "rwd 0, %%ctpr3\n" \ + "wait all_c=1\n" \ + /* 3) Flush TLB and instruction cache (wait only for L1I \ + * flush so that it does not flush stw + wait from under us) */ \ + "wait ma_c=1\n" \ + "std,2 0x0, %[addr_flush_icache], %[val_icache], mas=%[mas_icache]\n" \ + "std,2 0x0, %[addr_flush_tlb], %[val_tlb], mas=%[mas_tlb]\n" \ + "{wait fl_c=1\n" \ + /* 4) Make sure the actual disabling code lies in the same cache line */ \ + " ibranch 2f}\n" \ + ".align 256\n" \ + "2:\n" \ + /* 5) Flush data cache (except L3 which is shared) */ \ + "std,2 0x0, %[addr_flush_cache], %[val_cache], mas=%[mas_cache]\n" \ + "wait fl_c=1, ma_c=1\n" \ + /* 6) Disable the clock. We access SIC register by physical address \ + * because we've just flushed TLB, and accessing by virtual address \ + * would stall until all 4 page table levels are read into TLB. */ \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_HWBUG_C3_WAIT_MA_C version */ \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 1\n" \ + "wait ma_c=1\n" \ + "stw %[phys_addr], 0, %[val], mas=%[mas_ioaddr]\n" \ + "wait trap=1\n" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "stw %[phys_addr], 0, %[val], mas=%[mas_ioaddr]\n" \ + "wait trap=1\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + /* Will never get here */ \ + : [reg] "=&r" (_reg) \ + : [val] "r" ((u32) (__val)), \ + [phys_addr] "r" ((u64) (__phys_addr)), \ + [addr_flush_cache] "r" ((u64) (_FLUSH_WRITE_BACK_CACHE_L12_OP)), \ + [val_cache] "r" (0ULL), \ + [mas_cache] "i" (MAS_CACHE_FLUSH), \ + [addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \ + [val_icache] "r" (0ULL), \ + [mas_icache] "i" (MAS_ICACHE_FLUSH), \ + [addr_flush_tlb] "r" ((u64) (_FLUSH_TLB_ALL_OP)), \ + [val_tlb] "r" (0ULL), \ + [mas_tlb] "i" (MAS_TLB_FLUSH), \ + [mas_ioaddr] "i" (MAS_IOADDR), \ + [facility] "i" (CPU_HWBUG_C3_WAIT_MA_C) \ + : "memory", "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + +/* Preparing to turn the synchoniztion clock off + * by writing the value __val to register PMC pointed by __phys_addr */ +#define C3_WAIT_INT_V6(__val, __phys_addr) \ +do { \ + u64 _reg; \ + asm volatile ( \ + /* 1) Disable instruction prefetch */ \ + "mmurr %%mmu_cr, %[reg]\n" \ + "andnd %[reg], 0x800, %[reg]\n" /* clear mmu_cr.ipd */ \ + "{nop 3\n" \ + " mmurw %[reg], %%mmu_cr}\n" \ + "disp %%ctpr1, 1f\n" \ + "{wait all_c=1\n" \ + " ct %%ctpr1}\n" /* force Instruction Buffer to use new ipd */ \ + "1:\n" \ + /* 2) Disable %ctpr's */ \ + "rwd 0, %%ctpr1\n" \ + "rwd 0, %%ctpr2\n" \ + "rwd 0, %%ctpr3\n" \ + "wait all_c=1\n" \ + /* 3) Flush TLB and instruction cache */ \ + "wait ma_c=1\n" \ + "std,2 0x0, %[addr_flush_icache], %[val_icache], mas=%[mas_icache]\n" \ + "std,2 0x0, %[addr_flush_tlb], %[val_tlb], mas=%[mas_tlb]\n" \ + "{wait fl_c=1, ma_c=1\n" \ + /* 4) Make sure the actual disabling code lies in the same cache line */ \ + " ibranch 2f}\n" \ + ".align 256\n" \ + "2:\n" \ + /* 5) Flush data cache (except L3 which is shared) */ \ + "std,2 0x0, %[addr_flush_cache], %[val_cache], mas=%[mas_cache]\n" \ + "wait fl_c=1, ma_c=1\n" \ + /* 6) Disable the clock. We access SIC register by physical address \ + * because we've just flushed TLB, and accessing by virtual address \ + * would stall until all 4 page table levels are read into TLB. */ \ + "stw %[phys_addr], 0, %[val], mas=%[mas_ioaddr]\n" \ + "wait st_c=1, int=1\n" \ + /* 7) We are woken, reenable instruction prefetch */ \ + "mmurr %%mmu_cr, %[reg]\n" \ + "ord %[reg], 0x800, %[reg]\n" /* clear mmu_cr.ipd */ \ + "mmurw %[reg], %%mmu_cr\n" \ + "disp %%ctpr1, 3f\n" \ + "{wait all_c=1\n" \ + " ct %%ctpr1}\n" /* force Instruction Buffer to use new ipd */ \ + "3:\n" \ + : [reg] "=&r" (_reg) \ + : [val] "r" ((u32) (__val)), \ + [phys_addr] "r" ((u64) (__phys_addr)), \ + [addr_flush_cache] "r" ((u64) (_FLUSH_WRITE_BACK_CACHE_L12_OP)), \ + [val_cache] "r" (0ULL), \ + [mas_cache] "i" (MAS_CACHE_FLUSH), \ + [addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \ + [val_icache] "r" (0ULL), \ + [mas_icache] "i" (MAS_ICACHE_FLUSH), \ + [addr_flush_tlb] "r" ((u64) (_FLUSH_TLB_ALL_OP)), \ + [val_tlb] "r" (0ULL), \ + [mas_tlb] "i" (MAS_TLB_FLUSH), \ + [mas_ioaddr] "i" (MAS_IOADDR) \ + : "memory", "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + +/* Hardware virtualized extensions support */ + +#define E2K_GLAUNCH(_ctpr1, _ctpr1_hi, _ctpr2, _ctpr2_hi, _ctpr3, _ctpr3_hi, \ + _lsr, _lsr1, _ilcr, _ilcr1) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("{rwd %[ctpr1], %%ctpr1}\n" \ + "{rwd %[ctpr1_hi], %%ctpr1.hi}\n" \ + "{rwd %[ctpr3], %%ctpr3}\n" \ + "{rwd %[ctpr3_hi], %%ctpr3.hi}\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "addd %[lsr1], 0, %%db[1]\n" \ + "addd %[ilcr1], 0, %%db[3]\n" \ + "}\n" \ + /* rwd %db[1], %%lsr1 */ \ + ".word 0x04100011\n" \ + ".word 0x3dc001c3\n" \ + ".word 0x01c00000\n" \ + ".word 0x00000000\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + /* rwd %db[3], %%ilcr1 */ \ + ".word 0x04100011\n" \ + ".word 0x3dc003c7\n" \ + ".word 0x01c00000\n" \ + ".word 0x00000000\n" \ + /* #80747: must repeat interrupted barriers */ \ + "{nop 3; wait st_c=1}\n" \ + "{glaunch}\n" \ + "{wait fl_c=1\n" \ + " rrd %%lsr, %[lsr]}\n" \ + "{rrd %%ilcr, %[ilcr]}\n" \ + "{rrd %%lsr1, %[lsr1]}\n" \ + "{rrd %%ilcr1, %[ilcr1]}\n" \ + "{rrd %%ctpr1, %[ctpr1]}\n" \ + "{rrd %%ctpr1.hi, %[ctpr1_hi]}\n" \ + "{rrd %%ctpr2, %[ctpr2]}\n" \ + "{rrd %%ctpr2.hi, %[ctpr2_hi]}\n" \ + "{rrd %%ctpr3, %[ctpr3]}\n" \ + "{rrd %%ctpr3.hi, %[ctpr3_hi]}\n" \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_HWBUG_L1I_STOPS_WORKING version */ \ + "1:\n" \ + "{ipd 0; disp %%ctpr1, 1b}" \ + /* ctpr2 will be cleared after saving AAU */ \ + "{ipd 0; disp %%ctpr3, 1b}" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{nop}" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + : [lsr] "+r" (_lsr), [lsr1] "+r" (_lsr1), \ + [ilcr] "+r" (_ilcr), [ilcr1] "+r" (_ilcr1), \ + [ctpr1] "+r" (_ctpr1), [ctpr1_hi] "+r" (_ctpr1_hi), \ + [ctpr2] "+r" (_ctpr2), [ctpr2_hi] "+r" (_ctpr2_hi), \ + [ctpr3] "+r" (_ctpr3), [ctpr3_hi] "+r" (_ctpr3_hi) \ + : [facility] "i" (CPU_HWBUG_L1I_STOPS_WORKING) \ + : "memory", "b[1]", "b[3]", "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + + +/* Clobbers "ctpr" are here to tell lcc that there is a call inside */ +#define E2K_HCALL_CLOBBERS \ + "ctpr1", "ctpr2", "ctpr3", \ + "b[0]", "b[1]", "b[2]", "b[3]", \ + "b[4]", "b[5]", "b[6]", "b[7]" + +#define __E2K_HCALL_0(_trap, _sys_num, _arg1) \ +({ \ + register u64 __res; \ + asm volatile ( \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_1(_trap, _sys_num, _arg1) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_2(_trap, _sys_num, _arg1, _arg2) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_3(_trap, _sys_num, _arg1, _arg2, _arg3) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_4(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_5(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4, _arg5) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "addd 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)), \ + [arg5] "ri" ((u64) (_arg5)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_6(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6) \ +({ \ + register u64 __res; \ + asm volatile ( \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "{\n" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "addd 0x0, %[arg5], %%b[5]\n\t" \ + "addd 0x0, %[arg6], %%b[6]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)), \ + [arg5] "ri" ((u64) (_arg5)), \ + [arg6] "ri" ((u64) (_arg6)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_7(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6, _arg7) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "addd 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + "addd 0x0, %[arg6], %%b[6]\n\t" \ + "addd 0x0, %[arg7], %%b[7]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)), \ + [arg5] "ri" ((u64) (_arg5)), \ + [arg6] "ri" ((u64) (_arg6)), \ + [arg7] "ri" ((u64) (_arg7)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define E2K_HCALL(trap, sys_num, num_args, args...) \ + __E2K_HCALL_##num_args(trap, sys_num, args) + + +/* Clobbers "ctpr" are here to tell lcc that there is a return inside */ +#define E2K_HRET_CLOBBERS "ctpr1", "ctpr2", "ctpr3" + +#define E2K_HRET(_ret) \ +do { \ + asm volatile ( \ + "addd 0x0, %[ret], %%r0\n" \ + "{.word 0x00005012\n" /* HRET */ \ + " .word 0xc0000020\n" \ + " .word 0x30000003\n" \ + " .word 0x00000000}\n" \ + : \ + : [ret] "ir" (_ret) \ + : E2K_HRET_CLOBBERS); \ + unreachable(); \ +} while (0) + +#define __arch_this_cpu_read(_var, size) \ +({ \ + typeof(_var) __ret; \ + _Pragma("no_asm_inline") \ + asm ("ld" size " %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret]" \ + : [ret] "=r" (__ret) \ + : [var] "r" (&(_var)) \ + : "memory"); \ + __ret; \ +}) + +#define __arch_this_cpu_write(_var, _val, size) \ +do { \ + _Pragma("no_asm_inline") \ + asm ("st" size " %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[val]" \ + :: [var] "r" (&(_var)), [val] "r" (_val) \ + : "memory"); \ +} while (0) + +/* Use relaxed atomics for percpu if they are available */ +#if CONFIG_CPU_ISET >= 5 + +# define __arch_pcpu_atomic_xchg(_val, _var, size) \ +({ \ + typeof(_var) __ret; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n2:" \ + "\n{"\ + "\nnop 5" /* bug 92891 - optimize for performance */ \ + "\nld" size ",0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nst" size "," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 2b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret) \ + : [var] "r" (&(_var)), [val] "r" ((u64) (_val)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +# define __arch_pcpu_atomic_cmpxchg(_old, _new, _var, size, sxt_size) \ +({ \ + typeof(_var) __ret, __stored_val; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n3:" \ + "\n{" \ + "\nnop 4" \ + "\nld" size ",0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nsxt "#sxt_size", %[ret], %[ret]" \ + "\naddd 0x0, %[new], %[stored_val]" \ + "\n}" \ + "\n{" \ + "\nnop 1" \ + "\ncmpedb %[ret], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 1" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[ret], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nst" size "," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[stored_val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \ + : [var] "r" (&(_var)), [new] "ir" (_new), [old] "ir" (_old) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +# define __arch_pcpu_atomic_cmpxchg_word(_old, _new, _var) \ +({ \ + typeof(_var) __ret, __stored_val; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\nadds 0x0, %[new], %[stored_val]" \ + "\ncmpesb %[ret], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\nadds 0x0, %[ret], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstw," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[stored_val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \ + : [var] "r" (&(_var)), [new] "ir" (_new), [old] "ir" (_old) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +# define __arch_pcpu_atomic_cmpxchg_dword(_old, _new, _var) \ +({ \ + typeof(_var) __ret, __stored_val; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldd,0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\naddd 0x0, %[new], %[stored_val]" \ + "\ncmpedb %[ret], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[ret], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstd," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[stored_val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \ + : [var] "r" (&(_var)), [new] "ir" ((u64) (_new)), [old] "ir" ((u64) (_old)) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +#define __arch_pcpu_atomic_op(_val, _var, size, op) \ +({ \ + typeof(_var) __ret; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld" size ",0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n" op " %[ret], %[val], %[ret]" \ + "\n{"\ + "\nst" size "," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret) \ + : [var] "r" (&(_var)), [val] "ir" ((u64) (_val)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +#endif /* #ifndef CONFIG_CPU_ES2 */ + +/* Disable %aalda writes on iset v6 (iset correction v6.107). + * Use alternatives since we cannot do jumps at this point + * (%ctpr's have been restored already). */ +#define NATIVE_SET_ALL_AALDAS(aaldas_p) \ +do { \ + u32 *aaldas = (u32 *)(aaldas_p); \ + asm ( \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_ISET_V6 version */ \ + "{nop}" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{aaurws,2 %[aalda0], %%aalda0\n" \ + " aaurws,5 %[aalda32], %%aalda0}\n" \ + "{aaurws,2 %[aalda4], %%aalda4\n" \ + " aaurws,5 %[aalda36], %%aalda4}\n" \ + "{aaurws,2 %[aalda8], %%aalda8\n" \ + " aaurws,5 %[aalda40], %%aalda8}\n" \ + "{aaurws,2 %[aalda12], %%aalda12\n" \ + " aaurws,5 %[aalda44], %%aalda12}\n" \ + "{aaurws,2 %[aalda16], %%aalda16\n" \ + " aaurws,5 %[aalda48], %%aalda16}\n" \ + "{aaurws,2 %[aalda20], %%aalda20\n" \ + " aaurws,5 %[aalda52], %%aalda20}\n" \ + "{aaurws,2 %[aalda24], %%aalda24\n" \ + " aaurws,5 %[aalda56], %%aalda24}\n" \ + "{aaurws,2 %[aalda28], %%aalda28\n" \ + " aaurws,5 %[aalda60], %%aalda28}\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + :: [aalda0] "r" (aaldas[0]), [aalda32] "r" (aaldas[8]), \ + [aalda4] "r" (aaldas[1]), [aalda36] "r" (aaldas[9]), \ + [aalda8] "r" (aaldas[2]), [aalda40] "r" (aaldas[10]), \ + [aalda12] "r" (aaldas[3]), [aalda44] "r" (aaldas[11]), \ + [aalda16] "r" (aaldas[4]), [aalda48] "r" (aaldas[12]), \ + [aalda20] "r" (aaldas[5]), [aalda52] "r" (aaldas[13]), \ + [aalda24] "r" (aaldas[6]), [aalda56] "r" (aaldas[14]), \ + [aalda28] "r" (aaldas[7]), [aalda60] "r" (aaldas[15]), \ + [facility] "i" (CPU_FEAT_ISET_V6)); \ +} while (0) + +/* Force load OSGD->GD */ +#define E2K_LOAD_OSGD_TO_GD() \ +do { \ + asm volatile ("{nop; sdisp %%ctpr2, 11}\n" \ + "{call %%ctpr2, wbs=%#}\n" \ + ::: "call"); \ +} while (0) + +/* + * Arithmetic operations that are atomic with regard to interrupts. + * I.e. an interrupt can arrive only before or after the operation. + */ +#define E2K_INSFD_ATOMIC(src1, src2, src3_dst) \ +do { \ + _Pragma("no_asm_inline") \ + asm ("insfd %[new_value], %[insf_params], %[reg], %[reg]" \ + : [reg] "+r" (src3_dst) \ + : [insf_params] "i" (src2), \ + [new_value] "ir" (src1)); \ +} while (0) + +#define E2K_ADDD_ATOMIC(src1_dst, src2) \ +do { \ + _Pragma("no_asm_inline") \ + asm ("addd %[reg], %[val], %[reg]" \ + : [reg] "+r" (src1_dst) \ + : [val] "ir" (src2)); \ +} while (0) + +#define E2K_SUBD_ATOMIC(src1_dst, src2) \ +do { \ + _Pragma("no_asm_inline") \ + asm ("subd %[reg], %[val], %[reg]" \ + : [reg] "+r" (src1_dst) \ + : [val] "ir" (src2)); \ +} while (0) + +#define E2K_SUBD_ATOMIC__SHRD32(src1_dst, src2, _old) \ +do { \ + asm ("{subd %[reg], %[val], %[reg]\n" \ + " shrd %[reg], 32, %[old]}" \ + : [reg] "+r" (src1_dst), \ + [old] "=r" (_old) \ + : [val] "i" (src2)); \ +} while (0) + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_API_H_ */ diff --git a/arch/e2k/include/asm/e2k_debug.h b/arch/e2k/include/asm/e2k_debug.h new file mode 100644 index 000000000000..0c448a2bc5d3 --- /dev/null +++ b/arch/e2k/include/asm/e2k_debug.h @@ -0,0 +1,841 @@ +/* + * asm-e2k/e2k_debug.h + */ +#ifndef _E2K_DEBUG_H_ +#define _E2K_DEBUG_H_ + +#ifndef __ASSEMBLY__ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IS_KERNEL_THREAD(task, mm) \ +({ \ + e2k_addr_t ps_base; \ + \ + ps_base = (e2k_addr_t)task_thread_info(task)->u_hw_stack.ps.base; \ + ((mm) == NULL || ps_base >= TASK_SIZE); \ +}) + +extern void print_stack_frames(struct task_struct *task, + struct pt_regs *pt_regs, int show_reg_window) __cold; +extern void print_mmap(struct task_struct *task) __cold; +extern void print_va_tlb(e2k_addr_t addr, int large_page) __cold; +extern void print_all_TC(const trap_cellar_t *TC, int TC_count) __cold; +extern void print_tc_record(const trap_cellar_t *tcellar, int num) __cold; +extern u64 print_all_TIRs(const e2k_tir_t *TIRs, u64 nr_TIRs) __cold; +extern void print_address_page_tables(unsigned long address, + int last_level_only) __cold; +extern void print_pt_regs(const pt_regs_t *regs) __cold; + +__init extern void setup_stack_print(void); + +static inline void print_address_tlb(unsigned long address) +{ + print_va_tlb(address, 0); + print_va_tlb(pte_virt_offset(round_down(address, PTE_SIZE)), 0); + print_va_tlb(pmd_virt_offset(round_down(address, PMD_SIZE)), 0); + print_va_tlb(pud_virt_offset(round_down(address, PUD_SIZE)), 0); +} + +/** + * *parse_chain_fn_t - function to be called on every frame in chain stack + * @crs - contents of current frame in chain stack + * @real_frame_addr - real address of current frame, can be used to modify frame + * @corrected_frame_addr - address of current frame where it would be in stack + * @flags - PCF_FLUSH_NEEDED if chain stack flush is needed before modifying, + * PCF_IRQS_CLOSE_NEEDED if irqs should be closed before modifying + * @arg - passed argument from parse_chain_stack() + * + * The distinction between @real_frame_addr and @corrected_frame_addr is + * important. Normally top of user chain stack is spilled to kernel chain + * stack, in which case @real_frame_addr points to spilled frame in kernel + * stack and @corrected_frame_addr holds the address in userspace where + * the frame _would_ be if it was spilled to userspace. In all other cases + * these two variables are equal. + * + * Generally @corrected_frame_addr is used in comparisons and + * @real_frame_addr is used for modifying stack in memory. + * + * IMPORTANT: if function wants to modify frame contents it must flush + * chain stack if @flush_needed is set. + */ +#define PCF_FLUSH_NEEDED 0x1 +#define PCF_IRQS_CLOSE_NEEDED 0x2 +typedef int (*parse_chain_fn_t)(e2k_mem_crs_t *crs, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, + int flags, void *arg); +#define PCS_USER 0x1 +#define PCS_OPEN_IRQS 0x2 +extern notrace long parse_chain_stack(int flags, struct task_struct *p, + parse_chain_fn_t func, void *arg); + + +extern void *kernel_symtab; +extern long kernel_symtab_size; +extern void *kernel_strtab; +extern long kernel_strtab_size; + +#define boot_kernel_symtab boot_get_vo_value(kernel_symtab) +#define boot_kernel_symtab_size boot_get_vo_value(kernel_symtab_size) +#define boot_kernel_strtab boot_get_vo_value(kernel_strtab) +#define boot_kernel_strtab_size boot_get_vo_value(kernel_strtab_size) + +#define NATIVE_IS_USER_ADDR(task, addr) \ + (((e2k_addr_t)(addr)) < NATIVE_TASK_SIZE) +#define NATIVE_GET_PHYS_ADDR(task, addr) \ +({ \ + e2k_addr_t phys; \ + if (NATIVE_IS_USER_ADDR(task, addr)) \ + phys = (unsigned long)user_address_to_pva(task, addr); \ + else \ + phys = (unsigned long)kernel_address_to_pva(addr); \ + phys; \ +}) + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +native_read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + return NATIVE_READ_MAS_D(phys_ip, MAS_LOAD_PA); +} +/* Write modified instruction word at IP address */ +static inline void +native_modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + NATIVE_WRITE_MAS_D(phys_ip, instr_word, MAS_STORE_PA); +} + +#define SIZE_PSP_STACK (16 * 4096) +#define DATA_STACK_PAGES 16 +#define SIZE_DATA_STACK (DATA_STACK_PAGES * PAGE_SIZE) + +#define SIZE_CHAIN_STACK KERNEL_PC_STACK_SIZE + +/* Maximum number of user windows where a trap occured + * for which additional registers will be printed (ctpr's, lsr and ilcr). */ +#define MAX_USER_TRAPS 12 + +/* Maximum number of pt_regs being marked as such + * when showing kernel data stack */ +#define MAX_PT_REGS_SHOWN 30 + +typedef struct printed_trap_regs { + bool valid; + u64 frame; + e2k_ctpr_t ctpr1; + e2k_ctpr_t ctpr2; + e2k_ctpr_t ctpr3; + e2k_ctpr_hi_t ctpr1_hi; + e2k_ctpr_hi_t ctpr2_hi; + e2k_ctpr_hi_t ctpr3_hi; + u64 lsr; + u64 ilcr; + u64 lsr1; + u64 ilcr1; + u64 sbbp[SBBP_ENTRIES_NUM]; +} printed_trap_regs_t; + +typedef struct stack_regs { + bool used; + bool valid; + bool ignore_banner; + struct task_struct *task; + e2k_mem_crs_t crs; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + void *base_psp_stack; + u64 user_size_psp_stack; + u64 orig_base_psp_stack_u; + u64 orig_base_psp_stack_k; + void *psp_stack_cache; + u64 size_psp_stack; + bool show_trap_regs; + bool show_user_regs; + struct printed_trap_regs trap[MAX_USER_TRAPS]; +#ifdef CONFIG_GREGS_CONTEXT + struct global_regs gregs; + bool gregs_valid; +#endif +#ifdef CONFIG_DATA_STACK_WINDOW + bool show_k_data_stack; + void *base_k_data_stack; + void *k_data_stack_cache; + u64 size_k_data_stack; + void *real_k_data_stack_addr; + struct { + unsigned long addr; + bool valid; + } pt_regs[MAX_PT_REGS_SHOWN]; +#endif + u64 size_chain_stack; + void *base_chain_stack; + u64 user_size_chain_stack; + u64 orig_base_chain_stack_u; + u64 orig_base_chain_stack_k; + void *chain_stack_cache; +} stack_regs_t; + +extern void print_chain_stack(struct stack_regs *regs, + int show_reg_window); +extern void copy_stack_regs(struct task_struct *task, + const struct pt_regs *limit_regs, struct stack_regs *regs); + +extern struct stack_regs stack_regs_cache[NR_CPUS]; +extern int debug_userstack; +extern int print_window_regs; + +#ifdef CONFIG_DATA_STACK_WINDOW +extern int debug_datastack; +#endif + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#define GET_PHYS_ADDR(task, addr) NATIVE_GET_PHYS_ADDR(task, addr) +#define print_all_guest_stacks() /* nothing to do */ +#define print_guest_vcpu_stack(vcpu) /* nothing to do */ +#define debug_guest_regs(task) false /* none any guests */ +#define get_cpu_type_name() "CPU" /* real CPU */ + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + return native_read_instr_on_IP(ip, phys_ip); +} +/* Write modified instruction word at IP address */ +static inline void +modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + native_modify_instr_on_IP(ip, phys_ip, instr_word); +} +static inline void +print_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window) +{ + return; +} +static inline void +host_ftrace_stop(void) +{ + return; +} +static inline void +host_ftrace_dump(void) +{ + return; +} + +static const bool kvm_debug = false; +#else /* CONFIG_VIRTUALIZATION */ +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host/guest kernel */ +/* or it is native guest kernel */ +#include +#endif /* ! CONFIG_VIRTUALIZATION */ + +/* + * Print Chain Regs CR0 and CR1 + */ +#undef DEBUG_CRs_MODE +#undef DebugCRs +#define DEBUG_CRs_MODE 0 +#define DebugCRs(POS) if (DEBUG_CRs_MODE) print_chain_stack_regs(POS) +extern inline void +print_chain_stack_regs(char *point) +{ + register e2k_cr0_hi_t cr0_hi = READ_CR0_HI_REG(); + register e2k_cr0_lo_t cr0_lo = READ_CR0_LO_REG(); + register e2k_cr1_hi_t cr1_hi = READ_CR1_HI_REG(); + register e2k_cr1_lo_t cr1_lo = READ_CR1_LO_REG(); + register e2k_psr_t psr; + + printk("Procedure chain registers state"); + if (point != NULL) + printk(" at %s :", point); + printk("\n"); + + printk(" CR0.hi ip 0x%lx\n", (long)AS_STRUCT(cr0_hi).ip << 3); + printk(" CR0.lo pf 0x%lx\n", (long)AS_STRUCT(cr0_lo).pf); + printk(" CR1.hi ussz 0x%x br 0x%x\n", + (int)AS_STRUCT(cr1_hi).ussz << 4, (int)AS_STRUCT(cr1_hi).br); + AS_WORD(psr) = AS_STRUCT(cr1_lo).psr; + printk(" CR1.lo: unmie %d nmie %d uie %d lw %d sge %d ie %d " + "pm %d\n", + (int)AS_STRUCT(psr).unmie, + (int)AS_STRUCT(psr).nmie, + (int)AS_STRUCT(psr).uie, + (int)AS_STRUCT(psr).lw, + (int)AS_STRUCT(psr).sge, + (int)AS_STRUCT(psr).ie, + (int)AS_STRUCT(psr).pm); + printk(" cuir 0x%x wbs 0x%x wpsz %d wfx %d ein %d\n", + (int)AS_STRUCT(cr1_lo).cuir, (int)AS_STRUCT(cr1_lo).wbs, + (int)AS_STRUCT(cr1_lo).wpsz, (int)AS_STRUCT(cr1_lo).wfx, + (int)AS_STRUCT(cr1_lo).ein); +} + +/* + * Registers CPU + */ + +#define DebugCpuR(str) if (DEBUG_CpuR_MODE) print_cpu_regs(str) +#define DebugSPRs(POS) if (DEBUG_SPRs_MODE) print_stack_pointers_reg(POS) +static inline void +print_cpu_regs(char *str) +{ + pr_info("%s\n %s", str, "CPU REGS value:\n"); + pr_info("usbr %llx\n", READ_SBR_REG_VALUE()); + pr_info("usd.hi.curptr %llx usd.hi.size %llx\n", + READ_USD_HI_REG_VALUE() & 0xffffffff, + (READ_USD_HI_REG_VALUE() >> 32) & 0xffffffff); + pr_info("usd.lo.base 0x%llx\n", + READ_USD_LO_REG_VALUE() & 0xffffffffffff); + pr_info("psp.hi.ind %llx psp.hi.size %llx\n", + READ_PSP_HI_REG_VALUE() & 0xffffffff, + (READ_PSP_HI_REG_VALUE() >> 32) & 0xffffffff); + pr_info("psp.lo %llx\n", READ_PSP_LO_REG_VALUE()); + pr_info("pcsp.hi.ind %llx pcsp.hi.size %llx\n", + READ_PCSP_HI_REG_VALUE() & 0xffffffff, + (READ_PCSP_HI_REG_VALUE() >> 32) & 0xffffffff); + pr_info("pcsp.lo %llx\n", READ_PCSP_LO_REG_VALUE()); + pr_info("cr0.hi.ip %llx\n", + READ_CR0_HI_REG_VALUE() & ~0x7UL); + pr_info("cr1.hi.rbs %llx cr1.hi.rsz %llx\ncr1.hi.rcur %llx " + "cr1.hi.psz %llx cr1.hi.pcur %llx\ncr1.hi.ussz %llx\n", + READ_CR1_HI_REG_VALUE() & 0x3f, + READ_CR1_HI_REG_VALUE() >> 6 & 0x3f, + READ_CR1_HI_REG_VALUE() >> 12 & 0x3f, + READ_CR1_HI_REG_VALUE() >> 18 & 0x1f, + READ_CR1_HI_REG_VALUE() >> 23 & 0x1f, + READ_CR1_HI_REG_VALUE() >> 36 & 0xfffffff); + pr_info("cr1.lo.wpsz %llx cr1.lo.wbs %llx cr1.lo.psr %llx\n", + (READ_CR1_LO_REG_VALUE() >> 26) & 0x7f, + (READ_CR1_LO_REG_VALUE() >> 33) & 0x7f, + (READ_CR1_LO_REG_VALUE() >> 57) & 0x7); + pr_info("wd %llx\n", READ_WD_REG_VALUE()); +} + +extern inline void +print_stack_pointers_reg(char *point) +{ + register e2k_psp_hi_t psp_hi = READ_PSP_HI_REG(); + register e2k_psp_lo_t psp_lo = READ_PSP_LO_REG(); + register e2k_pcsp_hi_t pcsp_hi = READ_PCSP_HI_REG(); + register e2k_pcsp_lo_t pcsp_lo = READ_PCSP_LO_REG(); + register long pshtp_reg = READ_PSHTP_REG_VALUE() & + 0xffffUL; + register long pcshtp_reg = READ_PCSHTP_REG_SVALUE() & + 0xffffUL; + + pr_info("Stack pointer registers state"); + if (point != NULL) + pr_info(" at %s :", point); + pr_info("\n"); + pr_info(" USBR_base 0x%llx\n", + READ_USBR_REG().USBR_base); + pr_info(" USD_size 0x%x USD_p %d USD_base 0x%llx\n", + READ_USD_HI_REG().USD_hi_size, + READ_USD_LO_REG().USD_lo_p, + READ_USD_LO_REG().USD_lo_base); + + pr_info(" PSP_size 0x%x PSP_ind 0x%x PSP_base 0x%lx PSHTP " + "0x%llx (0x%lx)\n", + psp_hi.PSP_hi_size, + psp_hi.PSP_hi_ind, pshtp_reg, + psp_lo.PSP_lo_base, + (long)(psp_hi.PSP_hi_ind + pshtp_reg)); + if (psp_hi.PSP_hi_ind + pshtp_reg >= psp_hi.PSP_hi_size) { + pr_info("PROCEDURE STACK OVERFLOW 0x%lx > size 0x%x\n", + (long)(psp_hi.PSP_hi_ind + pshtp_reg), + psp_hi.PSP_hi_size); + } + pr_info(" PCSP_size 0x%x PCSP_ind 0x%x PCSP_base 0x%lx " + "PCSHTP 0x%llx (0x%lx)\n", + pcsp_hi.PCSP_hi_size, + pcsp_hi.PCSP_hi_ind, pcshtp_reg, + pcsp_lo.PCSP_lo_base, + (long)(pcsp_hi.PCSP_hi_ind + pcshtp_reg)); + + DebugCRs(point); + +} + +static inline int print_siginfo(siginfo_t *info, struct pt_regs *regs) +{ + pr_info("Signal #%d info structure:\n" + " errno %d code %d pid %d uid %d\n" + " trap #%d address 0x%px\n", + info->si_signo, info->si_errno, info->si_code, info->si_pid, + info->si_uid, info->si_trapno, info->si_addr); + + print_pt_regs(regs); + + return 1; +} + + +/* + * Print Switch Regs + */ +extern inline void +print_sw_regs(char *point, sw_regs_t *sw_regs) +{ + pr_info("%s\n", point); + pr_info("top: %lx\n", sw_regs->top); + pr_info("usd_lo: %llx\n", AS_WORD(sw_regs->usd_lo)); + pr_info("usd_hi: %llx\n", AS_WORD(sw_regs->usd_hi)); + pr_info("psp_lo: %llx\n", AS_WORD(sw_regs->psp_lo)); + pr_info("psp_hi: %llx\n", AS_WORD(sw_regs->psp_hi)); + pr_info("pcsp_lo: %llx\n", AS_WORD(sw_regs->pcsp_lo)); + pr_info("pcsp_hi: %llx\n", AS_WORD(sw_regs->pcsp_hi)); +} + +/* + * Print PAGE_FAULT (TC TRAP_CELLAR) + */ + +#define DebugTC(a, b) \ + if(DEBUG_PAGE_FAULT_MODE) print_tc_state(a, b); +#include +static inline void print_tc_state(const trap_cellar_t *tcellar, int num) +{ + tc_fault_type_t ftype; + tc_dst_t dst ; + tc_opcode_t opcode; + u64 data; + u8 data_tag; + + AW(dst) = AS(tcellar->condition).dst; + AW(opcode) = AS(tcellar->condition).opcode; + AW(ftype) = AS(tcellar->condition).fault_type; + + load_value_and_tagd(&tcellar->data, &data, &data_tag); + + printk("\n----------------------------" + "TRAP_CELLAR record #%d:" + "-----------------------------\n" + "address = 0x%016lx\n" + "data = 0x%016llx tag = 0x%x\n" + "condition = 0x%016llx:\n" + " dst = 0x%05x: address = 0x%04x, vl = 0x%x, vr = 0x%x\n" + " opcode = 0x%03x: fmt = 0x%02x, npsp = 0x%x\n\n" + " store = 0x%x, s_f = 0x%x, mas = 0x%x\n" + " root = 0x%x, scal = 0x%x, sru = 0x%x\n" + " chan = 0x%x, se = 0x%x, pm = 0x%x\n\n" + " fault_type = 0x%x:\n" + " intl_res_bits = %d MLT_trap = %d\n" + " ph_pr_page = %d page_bound = %d\n" + " io_page = %d isys_page = %d\n" + " prot_page = %d priv_page = %d\n" + " illegal_page = %d nwrite_page = %d\n" + " page_miss = %d ph_bound = %d\n" + " global_sp = %d\n\n" + " miss_lvl = 0x%x, num_align = 0x%x, empt = 0x%x\n" + " clw = 0x%x, rcv = 0x%x dst_rcv = 0x%x\n" + "----------------------------------------------------" + "---------------------------\n", num, + tcellar->address, + data, data_tag, + AW(tcellar->condition), + (u32)AW(dst),(u32)(AS(dst).address), (u32)(AS(dst).vl), + (u32)(AS(dst).vr), + (u32)AW(opcode), (u32)(AS(opcode).fmt),(u32)(AS(opcode).npsp), + (u32)AS(tcellar->condition).store, + (u32)AS(tcellar->condition).s_f, + (u32)AS(tcellar->condition).mas, + (u32)AS(tcellar->condition).root, + (u32)AS(tcellar->condition).scal, + (u32)AS(tcellar->condition).sru, + (u32)AS(tcellar->condition).chan, + (u32)AS(tcellar->condition).spec, + (u32)AS(tcellar->condition).pm, + (u32)AS(tcellar->condition).fault_type, + (u32)AS(ftype).intl_res_bits, (u32)(AS(ftype).exc_mem_lock), + (u32)AS(ftype).ph_pr_page, (u32)AS(ftype).page_bound, + (u32)AS(ftype).io_page, (u32)AS(ftype).isys_page, + (u32)AS(ftype).prot_page, (u32)AS(ftype).priv_page, + (u32)AS(ftype).illegal_page, (u32)AS(ftype).nwrite_page, + (u32)AS(ftype).page_miss, (u32)AS(ftype).ph_bound, + (u32)AS(ftype).global_sp, + (u32)AS(tcellar->condition).miss_lvl, + (u32)AS(tcellar->condition).num_align, + (u32)AS(tcellar->condition).empt, + (u32)AS(tcellar->condition).clw, + (u32)AS(tcellar->condition).rcv, + (u32)AS(tcellar->condition).dst_rcv); + +} + + +/* + * Set instruction data breakpoint at virtual address @addr. + * + * NOTE: breakpoint is set only for the current thread! + * To set it for the whole system, remove restoring of + * debug registers on a task switch. + */ +static inline int set_hardware_instr_breakpoint(u64 addr, + const int stop, const int cp_num, const int v) +{ + u64 dibcr, dibsr, dibar = (u64) addr; + + switch (cp_num) { + case 0: WRITE_DIBAR0_REG_VALUE(dibar); break; + case 1: WRITE_DIBAR1_REG_VALUE(dibar); break; + case 2: WRITE_DIBAR2_REG_VALUE(dibar); break; + case 3: WRITE_DIBAR3_REG_VALUE(dibar); break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + + /* Rewrite only the requested breakpoint. */ + dibcr = ( + !!(v) /* enable*/ + | (1ULL << 1) /* generate exc_instr_debug */ + ) << (cp_num * 2); + dibcr |= (!!stop << 9); + dibcr |= READ_DIBCR_REG_VALUE() & ~E2K_DIBCR_MASK(cp_num); + + dibsr = READ_DIBSR_REG_VALUE() & ~E2K_DIBSR_MASK(cp_num); + + WRITE_DIBCR_REG_VALUE(dibcr); + WRITE_DIBSR_REG_VALUE(dibsr); + + return 0; +} + + +/* + * Set hardware data breakpoint at virtual address @addr. + * + * NOTE: breakpoint is set only for the current thread! + * To set it for the whole system, remove restoring of + * debug registers on a task switch. + */ +static inline int set_hardware_data_breakpoint(u64 addr, u64 size, + const int write, const int read, + const int stop, const int cp_num, const int v) +{ + u64 ddbcr, ddbsr, ddbar = (u64) addr; + + switch (size) { + case 1: + size = 1; + break; + case 2: + size = 2; + break; + case 4: + size = 3; + break; + case 8: + size = 4; + break; + case 16: + size = 5; + break; + default: + if (__builtin_constant_p((size))) + BUILD_BUG(); + return -EINVAL; + } + + switch (cp_num) { + case 0: + WRITE_DDBAR0_REG_VALUE(ddbar); + break; + case 1: + WRITE_DDBAR1_REG_VALUE(ddbar); + break; + case 2: + WRITE_DDBAR2_REG_VALUE(ddbar); + break; + case 3: + WRITE_DDBAR3_REG_VALUE(ddbar); + break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + + /* Rewrite only the requested breakpoint. */ + ddbcr = ( + !!(v) /* enable*/ + | (0ULL << 1) /* primary space */ + | ((!!write) << 2) + | ((!!read) << 3) + | (size << 4) + | (1ULL << 7) /* sync */ + | (1ULL << 8) /* speculative */ + | (1ULL << 9) /* ap */ + | (1ULL << 10) /* spill/fill */ + | (1ULL << 11) /* hardware */ + | (1ULL << 12) /* generate exc_data_debug */ + ) << (cp_num * 14); + ddbcr |= READ_DDBCR_REG_VALUE() & ~E2K_DDBCR_MASK(cp_num); + + ddbsr = READ_DDBSR_REG_VALUE() & ~E2K_DDBSR_MASK(cp_num); + + WRITE_DDBCR_REG_VALUE(ddbcr); + WRITE_DDBSR_REG_VALUE(ddbsr); + if (stop) { + e2k_dibcr_t dibcr; + + dibcr = READ_DIBCR_REG(); + AS(dibcr).stop = 1; + WRITE_DIBCR_REG(dibcr); + } + + return 0; +} + +static inline int reset_hardware_data_breakpoint(void *addr) +{ + u64 ddbcr, ddbsr, ddbar; + int cp_num; + + ddbcr = READ_DDBCR_REG_VALUE(); + for (cp_num = 0; cp_num < 4; cp_num++, ddbcr >>= 14) { + if (!(ddbcr & 0x1)) /* valid */ + continue; + switch (cp_num) { + case 0: + ddbar = READ_DDBAR0_REG_VALUE(); + break; + case 1: + ddbar = READ_DDBAR1_REG_VALUE(); + break; + case 2: + ddbar = READ_DDBAR2_REG_VALUE(); + break; + case 3: + ddbar = READ_DDBAR3_REG_VALUE(); + break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + if ((ddbar & E2K_VA_MASK) == ((e2k_addr_t)addr & E2K_VA_MASK)) + break; + } + if (cp_num >= 4) + return cp_num; + + /* Reset only the requested breakpoint. */ + ddbcr = READ_DDBCR_REG_VALUE() & (~(0x3FFFULL << (cp_num * 14))); + ddbsr = READ_DDBSR_REG_VALUE() & (~(0x3FFFULL << (cp_num * 14))); + mb(); /* wait for completion of all load/store in progress */ + WRITE_DDBCR_REG_VALUE(ddbcr); + WRITE_DDBSR_REG_VALUE(ddbsr); + + switch (cp_num) { + case 0: + WRITE_DDBAR0_REG_VALUE(0); + break; + case 1: + WRITE_DDBAR1_REG_VALUE(0); + break; + case 2: + WRITE_DDBAR2_REG_VALUE(0); + break; + case 3: + WRITE_DDBAR3_REG_VALUE(0); + break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + + return cp_num; +} + +struct data_breakpoint_params { + void *address; + u64 size; + int write; + int read; + int stop; + int cp_num; +}; +extern void nmi_set_hardware_data_breakpoint( + struct data_breakpoint_params *params); +/** + * set_hardware_data_breakpoint_on_each_cpu() - set hardware data breakpoint + * on every online cpu. + * @addr: virtual address of the breakpoint. + * + * This uses non-maskable interrupts to set the breakpoint for the whole + * system atomically. That is, by the time this function returns the + * breakpoint will be set everywhere. + */ +#define set_hardware_data_breakpoint_on_each_cpu( \ + addr, sz, wr, rd, st, cp) \ +({ \ + struct data_breakpoint_params params; \ + MAYBE_BUILD_BUG_ON((sz) != 1 && (sz) != 2 && (sz) != 4 \ + && (sz) != 8 && (sz) != 16); \ + MAYBE_BUILD_BUG_ON((cp) != 0 && (cp) != 1 \ + && (cp) != 2 && (cp) != 3); \ + params.address = (addr); \ + params.size = (sz); \ + params.write = (wr); \ + params.read = (rd); \ + params.stop = (st); \ + params.cp_num = (cp); \ + nmi_on_each_cpu(nmi_set_hardware_data_breakpoint, ¶ms, 1, 0); \ +}) + + +extern int jtag_stop_var; +static inline void jtag_stop(void) +{ + set_hardware_data_breakpoint((u64) &jtag_stop_var, + sizeof(jtag_stop_var), 1, 0, 1, 3, 1); + + jtag_stop_var = 0; + + /* Wait for the hardware to stop us */ + wmb(); +} + + +#ifdef CONFIG_USE_AAU +#include + +/* print some aux. & AAU registers */ +static inline void +print_aau_regs(char *str, e2k_aau_t *context, struct pt_regs *regs, + struct thread_info *ti) +{ + int i; + bool old_iset; + + old_iset = (machine.native_iset_ver < E2K_ISET_V5); + + if (str) + pr_info("%s\n", str); + + pr_info("\naasr register = 0x%x (state: %s, iab: %d, stb: %d)\n" + "ctpr2 = 0x%llx\n" + "lsr = 0x%llx\n" + "ilcr = 0x%llx\n", + AW(context->aasr), + AAU_NULL(context->aasr) ? "NULL" : + AAU_READY(context->aasr) ? "READY" : + AAU_ACTIVE(context->aasr) ? "ACTIVE" : + AAU_STOPPED(context->aasr) ? "STOPPED": + "undefined", + AS(context->aasr).iab, + AS(context->aasr).stb, + AW(regs->ctpr2), regs->lsr, regs->ilcr); + + if (AAU_STOPPED(context->aasr)) { + pr_info("aaldv = 0x%llx\n" + "aaldm = 0x%llx\n", + AW(context->aaldv), AW(context->aaldm)); + } else { + /* AAU can be in active state in kernel - automatic + * stop by hardware upon trap enter does not work. */ + pr_info("AAU is not in STOPPED or ACTIVE states, AALDV and " + "AALDM will not be printed\n"); + } + + if (AS(context->aasr).iab) { + for (i = 0; i < 32; i++) { + pr_info("aad[%d].hi = 0x%llx ", i, + AW(context->aads[i]).hi); + pr_info("aad[%d].lo = 0x%llx\n", i, + AW(context->aads[i]).lo); + } + + for (i = 0; i < 8; i++) { + pr_info("aaincr[%d] = 0x%llx\n", i, (old_iset) ? + (u32) context->aaincrs[i] : + context->aaincrs[i]); + } + pr_info("aaincr_tags = 0x%x\n", context->aaincr_tags); + + for (i = 0; i < 16; i++) { + pr_info("aaind[%d] = 0x%llx\n", i, (old_iset) ? + (u64) (u32) context->aainds[i] : + context->aainds[i]); + } + pr_info("aaind_tags = 0x%x\n", context->aaind_tags); + } else { + pr_info("IAB flag in AASR is not set, following registers " + "will not be printed: AAD, AAIND, AAIND_TAGS, " + "AAINCR, AAINCR_TAGS\n"); + } + + if (AS(context->aasr).stb) { + for (i = 0; i < 16; i++) { + pr_info("aasti[%d] = 0x%llx\n", i, (old_iset) ? + (u64) (u32) context->aastis[i] : + context->aastis[i]); + } + pr_info("aasti_tags = 0x%x\n", context->aasti_tags); + } else { + pr_info("STB flag in AASR is not set, following registers " + "will not be printed: AASTI, AASTI_TAGS\n"); + } + + if (ti) { + for (i = 0; i < 32; i++) { + pr_info("aaldi[%d] = 0x%llx ", i, (old_iset) ? + (u64) (u32) context->aaldi[i] : + context->aaldi[i]); + pr_info("aaldi[%d] = 0x%llx\n", i+32, (old_iset) ? + (u64) (u32) context->aaldi[i+32] : + context->aaldi[i+32]); + } + + for (i = 0; i < 32; i++) { + pr_info("aalda[%d] = 0x%x ", i, AW(ti->aalda[i])); + pr_info("aalda[%d] = 0x%x\n", i + 32, + AW(ti->aalda[i+32])); + } + } + + pr_info("aafstr = 0x%x\n", read_aafstr_reg_value()); + pr_info("aafstr = 0x%x\n", context->aafstr); +} +#endif /* CONFIG_USE_AAU */ + +extern int debug_signal; +#define SIGDEBUG_PRINT(format, ...) \ +do { \ + if (debug_signal) \ + pr_info("%s (pid=%d): " format, \ + current->comm, current->pid, ##__VA_ARGS__); \ +} while (0) + +extern int debug_trap; + +#endif /* !(__ASSEMBLY__) */ + +#endif /* _E2K_DEBUG_H_ */ diff --git a/arch/e2k/include/asm/e2k_ptypes.h b/arch/e2k/include/asm/e2k_ptypes.h new file mode 100644 index 000000000000..ca3bafda34ef --- /dev/null +++ b/arch/e2k/include/asm/e2k_ptypes.h @@ -0,0 +1,384 @@ + + +/* + * Descriptions of E2K tagged types + */ + +#ifndef _E2K_PTYPES_H_ +#define _E2K_PTYPES_H_ + + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include + + + + + /* + * Tagged values structures + */ + + + /* Address Pointers */ + +typedef union { /* High word of all pointers */ + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + } fields; + u64 word; +} e2k_ptr_hi_t; + +typedef union { + union { + struct { + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused : 59 - E2K_VA_SIZE; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + } ap; + struct { + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused : 11; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + } sap; + struct { + u64 unused1 : 59; /* [58: 0] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + } fields; + u64 word; +} e2k_ptr_lo_t; + +typedef union { /* Lower word of array pointer */ + union { + struct { + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused : 59 - E2K_VA_SIZE; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 __unused1 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 __unused2 : 3; /* [63:61] */ + }; + } fields; + u64 word; +} e2k_ap_lo_t; + +typedef union { /* Lower word of stack array pointer */ + union { + struct { + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused : 11; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 __unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 __unused3 : 3; /* [63:61] */ + }; + } fields; + u64 word; +} e2k_sap_lo_t; + +typedef struct { + union { + struct { + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 59 - E2K_VA_SIZE; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + }; + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + }; +} e2k_ap_t; + +typedef struct { + union { + struct { + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused1 : 11; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + }; + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + }; +} e2k_sap_t; + +typedef union { /* Common array pointer */ + union { + e2k_ap_t ap; + e2k_sap_t sap; + struct { + /* Low word common fields */ + union { + struct { + u64 unused1 : 59; /* [58:0] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + }; + /* High word common fields */ + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + }; + }; + } fields; + struct { + long lo; + long hi; + } word; +} __aligned(16) e2k_ptr_t; + +#define R_ENABLE 0x1 +#define W_ENABLE 0x2 +#define RW_ENABLE 0x3 + +#define AP_ITAG_MASK 0xe000000000000000ULL +#define AP_ITAG_SHIFT 61 +#define AP_ITAG 0x0UL +#define SAP_ITAG 0x4UL + +#define __E2K_PTR_BASE(low, sbr_hi) \ +({ \ + e2k_ptr_lo_t lo; \ + AW(lo) = low; \ + (AS(lo).itag == AP_ITAG ? AS(lo).ap.base : (AS(lo).sap.base + (sbr_hi))); \ +}) +#define __E2K_PTR_PTR(low, hiw, sbr_hi) \ +({ \ + e2k_ptr_hi_t hi; \ + AW(hi) = hiw; \ + (__E2K_PTR_BASE(low, (sbr_hi)) + AS(hi).curptr); \ +}) + +#define E2K_PTR_BASE(p, sbr_hi) (AS(p).itag == AP_ITAG ? \ + AS(p).ap.base : (AS(p).sap.base + (sbr_hi))) + +#define E2K_PTR_PTR(p, sbr_hi) (E2K_PTR_BASE(p, (sbr_hi)) + AS(p).curptr) + +#define GET_SBR_HI() (current_thread_info()->u_stack.top & 0xffff00000000ULL) + + /* handling Address Pointers */ + +#define MAKE_AP_LO(area_base, area_size, off, access) \ +({ \ + e2k_ap_lo_t __lo; \ + AW(__lo) = 0UL; \ + AS(__lo).base = area_base; \ + AS(__lo).rw = access; \ + AS(__lo).itag = E2K_AP_ITAG; \ + AW(__lo); \ +}) + +#define MAKE_AP_HI(area_base, area_size, offs, access) \ +({ \ + union { \ + e2k_ptr_hi_t hi; \ + u64 w; \ + } u; \ + u.w = 0UL; \ + AS(u.hi).size = area_size; \ + AS(u.hi).curptr = offs; \ + u.w; \ +}) + +#define MAKE_SAP_LO(area_base, area_size, offs, access) \ +({ \ + e2k_rwsap_lo_struct_t sap_lo; \ + AS_WORD(sap_lo) = 0; \ + AS_SAP_STRUCT(sap_lo).base = area_base; \ + AS_SAP_STRUCT(sap_lo).rw = access; \ + AS_SAP_STRUCT(sap_lo).itag = E2K_SAP_ITAG; \ + AS_WORD(sap_lo); \ +}) + +#define MAKE_SAP_HI(area_base, area_size, offs, access) \ +({ \ + e2k_rwsap_hi_struct_t sap_hi; \ + AS_WORD(sap_hi) = 0; \ + AS_STRUCT(sap_hi).size = area_size; \ + AS_STRUCT(sap_hi).curptr = offs; \ + AS_WORD(sap_hi); \ +}) + +static inline e2k_ptr_t MAKE_AP(u64 base, u64 len) +{ + e2k_ptr_t ptr = {{0}}; + AW(ptr).lo = 0L | ((base & E2K_VA_MASK) | + ((u64)E2K_AP_ITAG << 61) | + ((u64)RW_ENABLE << 59)); + AW(ptr).hi = 0L | ((len & 0xFFFFFFFF) << 32); + return ptr; +} + + +/* + * Procedure Label (PL) + */ + +typedef struct pl_lo_fields { + u64 target : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 58 - E2K_VA_MSB; /* [58:48] */ + u64 pm : 1; /* [59] privileged mode */ + /* (affects only from v2) */ + u64 unused2 : 1; /* [60] */ + u64 itag : 3; /* [63:61] internel tag bits */ +} pl_lo_fields_t; +#define PL_PM_BIT 59 /* bit # of privileged label flag */ +#define PL_PM_MASK (1UL << PL_PM_BIT) +#define PL_ITAG_SHIFT 61 +#define PL_ITAG_NUM_BITS 3 /* size of field ITAG in bits */ +#define PL_ITAG_BITS_MASK ((1UL << PL_ITAG_NUM_BITS) - 1) +#define PL_ITAG_GET(pl_lo_word) (((pl_lo_word) >> PL_ITAG_SHIFT) & \ + ((1UL << PL_ITAG_NUM_BITS) - 1)) +#define PL_ITAG_SET(pl_lo_word, itag) \ + (((pl_lo_word) & ~(PL_ITAG_BITS_MASK << PL_ITAG_SHIFT)) | \ + (((itag) & PL_ITAG_BITS_MASK) << PL_ITAG_SHIFT)) + +typedef struct pl_hi_fields { + u64 cui : 16; /* [15: 0] compilation unit undex */ + u64 unused3 : 48; /* [63:16] */ +} pl_hi_fields_t; + +typedef union e2k_pl_lo { + struct { + u64 target : E2K_VA_SIZE; + u64 unused1 : 58 - E2K_VA_MSB; + u64 pm : 1; + u64 unused2 : 1; + u64 itag : 3; + }; + pl_lo_fields_t fields; + u64 word; +} e2k_pl_lo_t; +#define PL_lo_target fields.target +#define PL_lo_itag fields.itag +#define PL_lo_pm fields.pm +#define PL_lo_value word + +typedef union e2k_pl_hi { + pl_hi_fields_t fields; + u64 word; +} e2k_pl_hi_t; +#define PL_hi_cui fields.cui +#define PL_hi_value word + +typedef struct e2k_pl { + e2k_pl_lo_t lo; + e2k_pl_hi_t hi; +} e2k_pl_t; + +#define PL_target lo.PL_lo_target +#define PL_itag lo.PL_lo_itag +#define PL_pm lo.PL_lo_pm +#define PL_cui hi.PL_hi_cui +#define PLLO_value lo.PL_lo_value +#define PLHI_value hi.PL_hi_value +#define PLLO_item lo +#define PLHI_item hi +#define IS_PL_ITAG(pl_lo_word) (PL_ITAG_GET(pl_lo_word) == E2K_PL_ITAG) + +static inline e2k_pl_t DO_MAKE_PL_V2(u64 addr, bool pm) +{ + e2k_pl_t p; + e2k_pl_lo_t pl; + + pl.PL_lo_value = 0; + pl.PL_lo_target = addr; + pl.PL_lo_pm = pm; + pl.PL_lo_itag = E2K_PL_V2_ITAG; + p.lo = pl; + p.hi.word = 0L; + return p; +} + +static inline e2k_pl_t DO_MAKE_PL_V6(u64 addr, bool pm, unsigned int cui) +{ + e2k_pl_t pl; + + pl = DO_MAKE_PL_V2(addr, pm); + pl.PL_itag = E2K_PL_ITAG; + pl.PLHI_value = 0; + pl.PL_cui = cui; + return pl; +} + +static inline e2k_pl_t MAKE_PL_V2(u64 addr) +{ + return DO_MAKE_PL_V2(addr, false); +} + +static inline e2k_pl_t MAKE_PL_V6(u64 addr, unsigned int cui) +{ + return DO_MAKE_PL_V6(addr, false, cui); +} + +static inline e2k_pl_t MAKE_PL(u64 addr, unsigned int cui) +{ + return MAKE_PL_V6(addr, cui); +} + +static inline e2k_pl_t MAKE_PRIV_PL(u64 addr, unsigned int cui) +{ + return DO_MAKE_PL_V6(addr, true, cui); +} + +static inline e2k_pl_lo_t DO_MAKE_INTEGER_PL(u64 addr) +{ + e2k_pl_lo_t pl_lo; + + pl_lo.PL_lo_value = 0; + pl_lo.PL_lo_target = addr; + return pl_lo; +} +#define MAKE_INTEGER_PL(func_p) \ + ((typeof(func_p))(DO_MAKE_INTEGER_PL((u64)func_p).PL_lo_value)) + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_PTYPES_H_ */ diff --git a/arch/e2k/include/asm/e2k_sic.h b/arch/e2k/include/asm/e2k_sic.h new file mode 100644 index 000000000000..acfb459c4191 --- /dev/null +++ b/arch/e2k/include/asm/e2k_sic.h @@ -0,0 +1,130 @@ +#ifndef _ASM_E2K_SIC_H_ +#define _ASM_E2K_SIC_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * NBR area configuration + */ +#define E2K_NSR_AREA_PHYS_BASE (machine.get_nsr_area_phys_base()) +#define E2K_NSR_AREA_SIZE (machine.nbsr_area_size) +#define E2K_NBSR_OFFSET (machine.nbsr_area_offset) +#define E2K_NBSR_SIZE (machine.nbsr_area_size) +#define E2K_COPSR_AREA_PHYS_BASE (machine.copsr_area_phys_base) +#define E2K_COPSR_AREA_SIZE (machine.copsr_area_size) + +#define BOOT_NSR_AREA_PHYS_BASE (boot_machine.nsr_area_phys_base) +#define BOOT_NSR_AREA_SIZE (boot_machine.nbsr_area_size) +#define BOOT_NBSR_OFFSET (boot_machine.nbsr_area_offset) +#define BOOT_NBSR_SIZE (boot_machine.nbsr_area_size) +#define BOOT_COPSR_AREA_PHYS_BASE (boot_machine.copsr_area_phys_base) +#define BOOT_COPSR_AREA_SIZE (boot_machine.copsr_area_size) + +/* + * Nodes system registers area - NSR = { NSR0 ... NSRj ... } + * NSR is some part of common system communicator area SR + */ +#define NODE_NSR_SIZE E2K_NSR_AREA_SIZE +#define THE_NODE_NSR_PHYS_BASE(node) \ + (E2K_NSR_AREA_PHYS_BASE + (node * NODE_NSR_SIZE)) + +#define BOOT_NODE_NSR_SIZE BOOT_NSR_AREA_SIZE +#define BOOT_THE_NODE_NSR_PHYS_BASE(node) \ + (BOOT_NSR_AREA_PHYS_BASE + (node * BOOT_NODE_NSR_SIZE)) + +/* + * Nodes processor system registers (north bridge) + * NBSR = { NBSR0 ... NBSRj ... } + * NBSR is some part of node system registers area NSR + */ +#define NODE_NBSR_SIZE E2K_NBSR_SIZE +#define NODE_NBSR_OFFSET E2K_NBSR_OFFSET +#define THE_NODE_NBSR_PHYS_BASE(node) \ + ((unsigned char *)(THE_NODE_NSR_PHYS_BASE(node) + \ + NODE_NBSR_OFFSET)) + +#define BOOT_NODE_NBSR_SIZE BOOT_NBSR_SIZE +#define BOOT_NODE_NBSR_OFFSET BOOT_NBSR_OFFSET +#define BOOT_THE_NODE_NBSR_PHYS_BASE(node) \ + ((unsigned char *)(BOOT_THE_NODE_NSR_PHYS_BASE(node) + \ + BOOT_NODE_NBSR_OFFSET)) + +/* + * Nodes system coprocessors registers area - COPSR = { COPSR0 ... COPSRj ... } + */ +#define NODE_COPSR_SIZE E2K_COPSR_AREA_SIZE +#define THE_NODE_COPSR_PHYS_BASE(node) \ + (E2K_COPSR_AREA_PHYS_BASE + (node * NODE_COPSR_SIZE)) + +extern unsigned char *nodes_nbsr_base[MAX_NUMNODES]; +extern phys_addr_t nodes_nbsr_phys_base[MAX_NUMNODES]; + +extern void boot_e2k_sic_setup_arch(void); +#ifndef CONFIG_E2K_MACHINE +extern int boot_get_e2k_machine_id(void); +#endif + +extern int __init e2k_sic_init(void); +extern int __init e2k_early_iohub_online(int node, int link); + +static inline e2k_addr_t sic_get_io_area_base(void) +{ + return machine.x86_io_area_base; +} + +static inline e2k_addr_t sic_get_io_area_size(void) +{ + return machine.x86_io_area_size; +} +extern e2k_addr_t sic_get_io_area_max_size(void); + +static inline unsigned char *sic_get_node_nbsr_base(int node_id) +{ + return nodes_nbsr_base[node_id]; +} + +static inline phys_addr_t sic_get_node_nbsr_phys_base(int node_id) +{ + phys_addr_t base = nodes_nbsr_phys_base[node_id]; + VM_BUG_ON(!base); + return base; +} + +#define sic_domain_pci_conf_size() (machine.pcicfg_area_size) +#define sic_domain_pci_conf_base(domain) \ + (machine.pcicfg_area_phys_base + \ + sic_domain_pci_conf_size() * ((unsigned long)domain)) + +#define boot_sic_domain_pci_conf_base(domain) \ + (boot_machine.pcicfg_area_phys_base + \ + boot_machine.pcicfg_area_size * ((unsigned long)domain)) + +extern unsigned long domain_to_pci_conf_base[]; + +static inline unsigned long +domain_pci_conf_base(unsigned int domain) +{ + return domain_to_pci_conf_base[domain]; +} + +static inline unsigned long +domain_pci_conf_size(unsigned int domain) +{ + return sic_domain_pci_conf_size(); +} + +#endif /* _ASM_E2K_SIC_H_ */ diff --git a/arch/e2k/include/asm/e2k_syswork.h b/arch/e2k/include/asm/e2k_syswork.h new file mode 100644 index 000000000000..1a0b15ab6696 --- /dev/null +++ b/arch/e2k/include/asm/e2k_syswork.h @@ -0,0 +1,102 @@ +#ifndef _E2K_SYSWORK_H_ +#define _E2K_SYSWORK_H_ + +#include + + +/****************************/ +#define TIME_SHARE 1 +/****************************/ + +/* info_for_other for work_for_other_cpu() */ + +typedef struct info_for_other { + int work; + int wait; +} info_for_other_t; + +#define PRINT_STK_ON_OTHER 1 +#define PRINT_FUNCY_ON_OTHER 2 +#define WAIT_ON_OTHER 3 + +/****************************/ + +/* info_instr_exec for instr_exec() */ + +typedef struct info_instr_exec { + int instr_type; + long addr1; + long addr2; + long val_1; + long val_2; +} info_instr_exec_t; + +#define PAR_WRITE 1 +#define PAR_READ 2 +/****************************/ + +/****************************/ +/* get task pages info for PRINT_STATM syswork */ +typedef struct user_mm { + int size; + int resident; + int text; + int data; + int shared; +} task_pages_info_t; +/****************************/ + +extern void print_all_task(void); +extern long ide_info(long what); +#ifdef CONFIG_PAGE_DOUBLE_FREE_ENTRAP +extern void save_kernel_chine_stack(struct page *page); +#endif /* CONFIG_PAGE_DOUBLE_FREE_ENTRAP */ + +extern int is_kernel_address_valid(e2k_addr_t address); + + +/* This macro fills missing arguments with "(u64) (0)". */ +#define EXPAND_ARGS_TO_8(...) \ + __EXPAND_ARGS_TO_8(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0) +#define __EXPAND_ARGS_TO_8(fmt, a1, a2, a3, a4, a5, a6, a7, ...) \ + fmt, (u64) (a1), (u64) (a2), (u64) (a3), \ + (u64) (a4), (u64) (a5), (u64) (a6), (u64) (a7) + +/* This macro is used to avoid printks with variable number of arguments + * inside of functions with __check_stack attribute. + * + * If a call to printk has less than 8 parameters the macro sets any missing + * arguments to (u64) (0). + * + * NOTE: maximum number of arguments that can be passed to a function + * from within an __interrupt function is 8! */ +#define printk_fixed_args(...) \ + __printk_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) +#define __trace_bprintk_fixed_args(...) \ + ____trace_bprintk_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) +#define panic_fixed_args(...) \ + __panic_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) +#define delay_printk_fixed_args(...) \ + __delay_printk_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) + +extern void __printk_fixed_args(char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, u64 a7); +extern void __panic_fixed_args(char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, u64 a7) + __noreturn; +#ifdef CONFIG_TRACING +extern void ____trace_bprintk_fixed_args(unsigned long ip, + char *fmt, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); +#endif + +long do_longjmp(u64 retval, u64 jmp_sigmask, e2k_cr0_hi_t jmp_cr0_hi, + e2k_cr1_lo_t jmp_cr1_lo, e2k_pcsp_lo_t jmp_pcsp_lo, + e2k_pcsp_hi_t jmp_pcsp_hi, u32 jmp_br, u32 jmp_psize, + e2k_fpcr_t fpcr, e2k_fpsr_t fpsr, e2k_pfpfr_t pfpfr, bool restore_fpu); + +long write_current_chain_stack(unsigned long dst, void __user *buf, + unsigned long size); +long copy_current_proc_stack(void __user *buf, void __user *p_stack, + unsigned long size, int write, unsigned long ps_used_top); + +#endif /* _E2K_SYSWORK_H_ */ diff --git a/arch/e2k/include/asm/e2s.h b/arch/e2k/include/asm/e2s.h new file mode 100644 index 000000000000..56f25ab1332a --- /dev/null +++ b/arch/e2k/include/asm/e2s.h @@ -0,0 +1,57 @@ +#ifndef _ASM_E2S_H_ +#define _ASM_E2S_H_ + +/* + * Machine (based on E4C processor) topology: + * E4C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 4 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e2s_setup_arch(void); +extern void e2s_setup_machine(void); +extern void sic_error_interrupt(struct pt_regs *regs); +#endif + +#define E2S_NR_NODE_CPUS 4 +#define E2S_MAX_NR_NODE_CPUS E2S_NR_NODE_CPUS + +#define E2S_NODE_IOLINKS 1 + +#define E2S_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E2S_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E2S_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E2S_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E2S_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E2S_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E2S_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E2S_MLT_SIZE ES2_MLT_SIZE + +#define E2S_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E2S_TLB_ADDR_LINE_NUM ES2_TLB_ADDR_LINE_NUM +#define E2S_TLB_ADDR_LINE_NUM2 0x000000001fe00000 +#define E2S_TLB_ADDR_LINE_NUM_SHIFT2 21 +#define E2S_TLB_ADDR_SET_NUM ES2_TLB_ADDR_SET_NUM +#define E2S_TLB_ADDR_SET_NUM_SHIFT ES2_TLB_ADDR_SET_NUM_SHIFT + +#define E2S_SIC_MC_SIZE 0xa4 +#define E2S_SIC_MC_COUNT 3 +#define E2S_SIC_MC1_ECC 0x440 + +#define E2S_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E2S_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E2S_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E2S_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E2S_L2_CACHE_BYTES ES2_L2_CACHE_BYTES + +#endif /* _ASM_E2S_H_ */ diff --git a/arch/e2k/include/asm/e8c.h b/arch/e2k/include/asm/e8c.h new file mode 100644 index 000000000000..c5a3000c7f63 --- /dev/null +++ b/arch/e2k/include/asm/e8c.h @@ -0,0 +1,59 @@ +#ifndef _ASM_E8C_H_ +#define _ASM_E8C_H_ + +/* + * Machine (based on E8C processor) topology: + * E8C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 8 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e8c_setup_arch(void); +extern void e8c_setup_machine(void); +extern void sic_error_interrupt(struct pt_regs *regs); +#endif + +#define E8C_NR_NODE_CPUS 8 +#define E8C_MAX_NR_NODE_CPUS 16 + +#define E8C_NODE_IOLINKS 1 + +#define E8C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E8C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E8C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E8C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E8C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E8C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E8C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E8C_MLT_SIZE ES2_MLT_SIZE + +#define E8C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E8C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E8C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E8C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E8C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E8C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E8C_SIC_MC_SIZE 0xe4 +#define E8C_SIC_MC_COUNT 4 +#define E8C_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E8C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E8C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E8C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E8C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E8C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E8C_L3_CACHE_SHIFT 6 +#define E8C_L3_CACHE_BYTES (1 << E8C_L3_CACHE_SHIFT) + +#endif /* _ASM_E8C_H_ */ diff --git a/arch/e2k/include/asm/e8c2.h b/arch/e2k/include/asm/e8c2.h new file mode 100644 index 000000000000..bc9c85a98179 --- /dev/null +++ b/arch/e2k/include/asm/e8c2.h @@ -0,0 +1,59 @@ +#ifndef _ASM_E8C2_H_ +#define _ASM_E8C2_H_ + +/* + * Machine (based on E8C2 processor) topology: + * E8C2 is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 8 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e8c2_setup_arch(void); +extern void e8c2_setup_machine(void); +extern void sic_error_interrupt(struct pt_regs *regs); +#endif + +#define E8C2_NR_NODE_CPUS E8C_NR_NODE_CPUS +#define E8C2_MAX_NR_NODE_CPUS E8C_MAX_NR_NODE_CPUS + +#define E8C2_NODE_IOLINKS E8C_NODE_IOLINKS + +#define E8C2_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E8C2_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E8C2_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E8C2_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E8C2_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E8C2_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E8C2_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E8C2_MLT_SIZE ES2_MLT_SIZE + +#define E8C2_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E8C2_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E8C2_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E8C2_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E8C2_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E8C2_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E8C2_SIC_MC_SIZE 0xf4 +#define E8C2_SIC_MC_COUNT E8C_SIC_MC_COUNT +#define E8C2_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E8C2_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E8C2_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E8C2_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E8C2_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E8C2_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E8C2_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +#define E8C2_L3_CACHE_BYTES E8C_L3_CACHE_BYTES + +#endif /* _ASM_E8C2_H_ */ diff --git a/arch/e2k/include/asm/el_posix.h b/arch/e2k/include/asm/el_posix.h new file mode 100644 index 000000000000..c89e06a5eb69 --- /dev/null +++ b/arch/e2k/include/asm/el_posix.h @@ -0,0 +1,59 @@ +#ifndef _ASM_EL_POSIX_ATOMIC_H +#define _ASM_EL_POSIX_ATOMIC_H + +#ifdef CONFIG_HAVE_EL_POSIX_SYSCALL +#ifdef __KERNEL__ +#include +#include + +#define ARCH_HAS_GET_CYCLES + +#define ARCH_HAS_ATOMIC_CMPXCHG + +static int __el_atomic_cmpxchg(int *x, int *uaddr, int oldval, int newval) +{ + int rval; + + TRY_USR_PFAULT { + *x = cmpxchg(uaddr, oldval, newval); + rval = 0; + } CATCH_USR_PFAULT { + DebugUAF("%s (%d) - %s : " + "el_atomic_cmpxchg data fault %px(%ld)\n", + __FILE__, __LINE__, __FUNCTION__, + (uaddr), (sizeof(*uaddr))); + rval = -EFAULT; + } END_USR_PFAULT + + return rval; +} + +#define el_atomic_cmpxchg_acq(x, uaddr, oldval, newval) \ + __el_atomic_cmpxchg(&x, uaddr, oldval, newval) +#define el_atomic_cmpxchg_rel(x, uaddr, oldval, newval) \ + __el_atomic_cmpxchg(&x, uaddr, oldval, newval) + +#define el_atomic_xchg_acq(x, uaddr, value) \ + __el_atomic_xchg_acq(&x, uaddr, value) + +static int __el_atomic_xchg_acq(int *x, int *uaddr, const int value) +{ + int rval; + + TRY_USR_PFAULT { + *x = xchg(uaddr, value); + rval = 0; + } CATCH_USR_PFAULT { + DebugUAF("%s (%d) - %s : " + "el_atomic_xchg data fault %px(%ld)\n", + __FILE__, __LINE__, __FUNCTION__, + (uaddr), (sizeof(*uaddr))); + rval = -EFAULT; + } END_USR_PFAULT + + return rval; +} + +#endif +#endif +#endif diff --git a/arch/e2k/include/asm/elf.h b/arch/e2k/include/asm/elf.h new file mode 100644 index 000000000000..0b9dd9863a78 --- /dev/null +++ b/arch/e2k/include/asm/elf.h @@ -0,0 +1,172 @@ +#ifndef _E2K_ELF_H_ +#define _E2K_ELF_H_ + +/* + * ELF register definitions.. + */ + +#include +#include +#include +#include + +#define PT_E2K_TAGS 0x70000000 + +/* + * e2k relocation types + */ +#define R_E2K_32_ABS 0 +#define R_E2K_32_PC 2 +#define R_E2K_64_ABS 50 /* Direct 64 bit */ +#define R_E2K_64_ABS_LIT 51 /* Direct 64 bit for LTS syllable */ +#define R_E2K_64_CALL 52 /* PC relative 64 bit for DISP */ +#define R_E2K_DISP 110 /* PC relative 28-bit for DISP */ + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_ARCH_FAKE EM_E2K_FAKE +#define ELF_ARCH EM_E2K +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB + +// #define CORE_DUMP_USE_REGSET !!!! + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ + +#define elf_check_arch(x) \ + ( (((x)->e_machine == ELF_ARCH && \ + ((x)->e_flags & ELF_E2K_PM) == 0) || \ + ((x)->e_machine == ELF_ARCH_FAKE && \ + (x)->e_ident[EI_SEMANTIC] == ELF_CODE_64_UNPROTECTED)) && \ + (x)->e_ident[EI_CLASS] == ELFCLASS64 && \ + elf_check_e2k_mtype(x) \ + ) + +#define compat_elf_check_arch(x) \ + ( (((x)->e_machine == ELF_ARCH && \ + ((x)->e_flags & ELF_E2K_PM) == 0) || \ + ((x)->e_machine == ELF_ARCH_FAKE && \ + (x)->e_ident[EI_SEMANTIC] == ELF_CODE_32_UNPROTECTED)) && \ + (x)->e_ident[EI_CLASS] == ELFCLASS32 && \ + elf_check_e2k_mtype(x) \ + ) + +/* General registers */ + +typedef unsigned long long elf_greg_t; + +typedef struct user_regs_struct elf_gregset_t; + +/* Floating point registers */ + +/* + * NEEDSWORK: Take care about floating point registers too! + */ + +/* just to get the things compiled */ +#define ELF_NFPREG 32 + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* Addition types of symbol type. */ + +#define STT_PRIVATE 5 +#define STT_INIT_FUNC 6 +#define STT_FINI_FUNC 7 + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 +//#define CORE_DUMP_USE_REGSET + +#ifdef __KERNEL__ +/* #define ELF_CORE_COPY_REGS(gregs, regs) \ + memcpy(gregs, regs, sizeof(struct pt_regs)); */ + +/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is + now struct_user_regs, they are different) */ + +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ + core_pt_regs_to_user_regs(regs, (struct user_regs_struct*) (&pr_reg)); +extern void core_pt_regs_to_user_regs (struct pt_regs *pt_regs, + struct user_regs_struct *user_regs); +#endif /* __KERNEL__ */ + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) /* NEEDSWORK */ +#define COMPAT_ELF_ET_DYN_BASE (2 * TASK32_SIZE / 3) + +#ifdef __KERNEL__ +#define SET_PERSONALITY(ex) \ +do { \ + current->thread.flags &= ~E2K_FLAG_64BIT_BINCO; \ + if (((ex).e_machine == ELF_ARCH && \ + ((ex).e_flags & ELF_E2K_PM)) || \ + ((ex).e_machine == ELF_ARCH_FAKE && \ + ((ex).e_ident[EI_SEMANTIC] == ELF_CODE_NEW_PROTECTED || \ + (ex).e_ident[EI_SEMANTIC] == ELF_CODE_NEW_PROTECTED_CXX))) { \ + current->thread.flags |= E2K_FLAG_PROTECTED_MODE; \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ + current->thread.flags |= E2K_FLAG_3P_ELF32; \ + } else { \ + current->thread.flags &= ~ E2K_FLAG_3P_ELF32; \ + } \ + } else { \ + current->thread.flags &= ~(E2K_FLAG_PROTECTED_MODE | \ + E2K_FLAG_3P_ELF32); \ + } \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + current->thread.flags |= E2K_FLAG_32BIT; \ + else \ + current->thread.flags &= ~E2K_FLAG_32BIT; \ + if ((ex).e_flags & ELF_BIN_COMP) \ + current->thread.flags |= E2K_FLAG_BIN_COMP_CODE; \ + else \ + current->thread.flags &= ~E2K_FLAG_BIN_COMP_CODE; \ +} while (0) +#endif + +#define FAST_SYSCALLS_ENTRY 0x1f +/* + * SYSTEM_INFO_ENTRY: + * 0x1: vfork() supported + */ +#define SYSTEM_INFO_ENTRY 0x1 +#define E2K_DLINFO \ +do { \ + NEW_AUX_ENT(AT_FAST_SYSCALLS, FAST_SYSCALLS_ENTRY); \ + NEW_AUX_ENT(AT_SYSTEM_INFO, SYSTEM_INFO_ENTRY); \ +} while (0) + +#define ARCH_DLINFO E2K_DLINFO +#define COMPAT_ARCH_DLINFO E2K_DLINFO + +/* + * Support for tags dumping + */ +extern unsigned long vma_dump_size(struct vm_area_struct *vma, + unsigned long mm_flags); + +#endif /* _E2K_ELF_H_ */ diff --git a/arch/e2k/include/asm/epic.h b/arch/e2k/include/asm/epic.h new file mode 100644 index 000000000000..372271d8b321 --- /dev/null +++ b/arch/e2k/include/asm/epic.h @@ -0,0 +1,97 @@ +#ifndef __ASM_E2K_EPIC_H +#define __ASM_E2K_EPIC_H + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +void do_sic_error_interrupt(void); + +static inline bool cpu_has_epic(void) +{ + if (cpu_has(CPU_FEAT_EPIC)) + return true; + else + return false; +} + +static inline unsigned get_current_epic_core_priority(void) +{ +#ifdef CONFIG_EPIC + return current_thread_info()->pt_regs->epic_core_priority; +#else + return 0; +#endif +} + +static inline void set_current_epic_core_priority(unsigned p) +{ +#ifdef CONFIG_EPIC + current_thread_info()->pt_regs->epic_core_priority = p; +#endif +} + +/* + * Basic functions accessing EPICs. + */ +static inline void epic_write_w(unsigned int reg, u32 v) +{ + boot_writel(v, (void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline u32 epic_read_w(unsigned int reg) +{ + return boot_readl((void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline void epic_write_d(unsigned int reg, u64 v) +{ + boot_writeq(v, (void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline u64 epic_read_d(unsigned int reg) +{ + return boot_readq((void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline void boot_epic_write_w(unsigned int reg, u32 v) +{ + epic_write_w(reg, v); +} + +static inline u32 boot_epic_read_w(unsigned int reg) +{ + return epic_read_w(reg); +} + +static inline void epic_write_guest_w(unsigned int reg, unsigned int v) +{ + epic_write_w(CEPIC_GUEST + reg, v); +} + +static inline unsigned int epic_read_guest_w(unsigned int reg) +{ + return epic_read_w(CEPIC_GUEST + reg); +} + +static inline void epic_write_guest_d(unsigned int reg, unsigned long v) +{ + epic_write_d(CEPIC_GUEST + reg, v); +} + +static inline unsigned long epic_read_guest_d(unsigned int reg) +{ + return epic_read_d(CEPIC_GUEST + reg); +} + +#include + +#endif /* !(__ASSEMBLY__) */ +#endif /* __KERNEL__ */ +#endif /* __ASM_E2K_EPIC_H */ diff --git a/arch/e2k/include/asm/epic_regs.h b/arch/e2k/include/asm/epic_regs.h new file mode 100644 index 000000000000..d823ab909f10 --- /dev/null +++ b/arch/e2k/include/asm/epic_regs.h @@ -0,0 +1,4 @@ +#ifndef __ASM_E2K_EPIC_REGS_H +#define __ASM_E2K_EPIC_REGS_H +#include +#endif /* __ASM_E2K_EPIC_REGS_H */ diff --git a/arch/e2k/include/asm/epicdef.h b/arch/e2k/include/asm/epicdef.h new file mode 100644 index 000000000000..36c5a74ec75d --- /dev/null +++ b/arch/e2k/include/asm/epicdef.h @@ -0,0 +1,13 @@ +#ifndef _ASM_E2K_EPICDEF_H +#define _ASM_E2K_EPICDEF_H + +/* + * Constants for e2k EPICs (CEPIC, IOEPIC) + */ + +#define IO_EPIC_DEFAULT_PHYS_BASE 0xfec00000UL +#define EPIC_DEFAULT_PHYS_BASE 0xfee00000UL + + +#include +#endif /* _ASM_E2K_EPICDEF_H */ diff --git a/arch/e2k/include/asm/errors_hndl.h b/arch/e2k/include/asm/errors_hndl.h new file mode 100644 index 000000000000..d47653f4ba50 --- /dev/null +++ b/arch/e2k/include/asm/errors_hndl.h @@ -0,0 +1,68 @@ +/* $Id: errors_hndl.h,v 1.6 2009/01/22 17:10:07 atic Exp $ + * + * Handling of errors of boot-time & initialization. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_ERRORS_HNDL_H +#define _E2K_ERRORS_HNDL_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +extern void init_bug(const char *fmt_v, ...) __noreturn __cold; +extern void init_warning(const char *fmt_v, ...) __cold; + +extern void boot_bug(const char *fmt_v, ...) __noreturn __cold; +extern void boot_warning(const char *fmt_v, ...) __cold; +#define BOOT_BUG_POINT(func_name) \ + do_boot_printk("kernel boot-time BUG at %s:%d:%s\n", __FILE__, \ + __LINE__, func_name) +#define BOOT_BUG_ON(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) { \ + do_boot_printk("kernel boot-time BUG at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + boot_bug(format); \ + } \ + unlikely(__ret_warn_on); \ +}) +#define BOOT_BUG(format...) \ +do { \ + do_boot_printk("kernel boot-time BUG at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + boot_bug(format); \ +} while (0) + +#define BOOT_WARNING(format...) \ +do { \ + do_boot_printk("kernel boot-time WARNING at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + boot_warning(format); \ +} while (0) + +#define boot_native_panic(fmt, args...) \ + do_boot_printk(fmt, ##args) + +#define init_printk dump_printk +#define init_vprintk dump_vprintk +#define INIT_BUG(format...) \ +do { \ + init_printk("kernel init-time BUG at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + init_bug(format); \ +} while (0) +#define INIT_WARNING(format...) \ +do { \ + init_printk("kernel init-time WARNING at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + init_warning(format); \ +} while (0) + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_ERRORS_HNDL_H) */ diff --git a/arch/e2k/include/asm/es2.h b/arch/e2k/include/asm/es2.h new file mode 100644 index 000000000000..0533a8abf6f8 --- /dev/null +++ b/arch/e2k/include/asm/es2.h @@ -0,0 +1,58 @@ +#ifndef _ASM_ES2_H_ +#define _ASM_ES2_H_ + +/* + * Machine (based on E2C+ processor) topology: + * E2C+ is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 4 CPUs (cores), + * but real processor chip has only two cores (2 other should be considered + * as always disabled). So online CPU numbers will be 0, 1, 4, 5, 8, 9 ... + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_es2_setup_arch(void); +extern void es2_setup_machine(void); +extern void eldsp_interrupt(struct pt_regs *regs); +#endif + +#define ES2_NR_NODE_CPUS 2 +#define ES2_MAX_NR_NODE_CPUS 4 + +#define ES2_NODE_IOLINKS 2 + +#define ES2_PCICFG_AREA_PHYS_BASE 0x0000000200000000UL +#define ES2_PCICFG_AREA_SIZE 0x0000000010000000UL + +#define ES2_NSR_AREA_PHYS_BASE 0x0000000110000000UL + +#define ES2_NBSR_AREA_OFFSET 0x0000000000000000UL +#define ES2_NBSR_AREA_SIZE 0x0000000000100000UL + +#define ES2_COPSR_AREA_PHYS_BASE 0x00000001c0000000UL +#define ES2_COPSR_AREA_SIZE 0x0000000001000000UL + +#define ES2_MLT_SIZE 16 + +#define ES2_TLB_LINES_BITS_NUM 8 +#define ES2_TLB_ADDR_LINE_NUM 0x00000000000ff000 +#define ES2_TLB_ADDR_LINE_NUM2 0x000000003fc00000 +#define ES2_TLB_ADDR_LINE_NUM_SHIFT2 22 +#define ES2_TLB_ADDR_SET_NUM 0x0000000000000018 +#define ES2_TLB_ADDR_SET_NUM_SHIFT 3 + +#define ES2_SIC_MC_COUNT 2 +#define ES2_SIC_MC1_ECC 0x500 + +#define ES2_CLOCK_TICK_RATE 10000000 + +#define ES2_L1_CACHE_SHIFT 5 +#define ES2_L1_CACHE_BYTES (1 << ES2_L1_CACHE_SHIFT) +#define ES2_L2_CACHE_SHIFT 6 +#define ES2_L2_CACHE_BYTES (1 << ES2_L2_CACHE_SHIFT) + +#endif /* _ASM_ES2_H_ */ diff --git a/arch/e2k/include/asm/exec.h b/arch/e2k/include/asm/exec.h new file mode 100644 index 000000000000..6bb202861767 --- /dev/null +++ b/arch/e2k/include/asm/exec.h @@ -0,0 +1,3 @@ +#include + +#define arch_align_stack(x) (x) diff --git a/arch/e2k/include/asm/fast_syscalls.h b/arch/e2k/include/asm/fast_syscalls.h new file mode 100644 index 000000000000..b21080644dab --- /dev/null +++ b/arch/e2k/include/asm/fast_syscalls.h @@ -0,0 +1,337 @@ +#ifndef _ASM_E2K_FAST_SYSCALLS_H +#define _ASM_E2K_FAST_SYSCALLS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct fast_syscalls_data { + struct timekeeper *tk; + u32 mult; + u32 shift; + struct clocksource *clock; + struct timespec wall_time_coarse; +}; + +extern struct fast_syscalls_data fsys_data; + +extern seqcount_t timekeeper_seq; + +typedef void (*fast_system_call_func)(u64 arg1, u64 arg2); + +extern const fast_system_call_func fast_sys_calls_table[NR_fast_syscalls]; +extern const fast_system_call_func fast_sys_calls_table_32[NR_fast_syscalls]; + +int fast_sys_ni_syscall(void); + +#define FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) sysname +#define COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) compat_##sysname +#define PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) protected_##sysname + +int native_do_fast_clock_gettime(const clockid_t which_clock, + struct timespec *tp); +int native_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp); +int native_do_fast_gettimeofday(struct timeval *tv); +int native_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized basec on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native host kernel withounr virtualization */ +/* or host kernel with virtualization support */ +static inline int +do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return native_do_fast_clock_gettime(which_clock, tp); +} + +static inline int +fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) +{ + return native_fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +do_fast_gettimeofday(struct timeval *tv) +{ + return native_do_fast_gettimeofday(tv); +} +static inline int +fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return native_fast_sys_siggetmask(oset, sigsetsize); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +/* + * These have to be macros since there is no way to return two + * values (seconds and nanoseconds) to an __interrupt function + * without assembler magic. + */ + +enum { + FAST_SYS_OK, + FAST_SYS_ERROR +}; + +#define fast_get_time(secs, nsecs, monotonic) \ +({ \ + struct clocksource *__clock; \ + struct timekeeper *__tk; \ + u64 __cycles = 0, __cycle_last = 0, __mask = 0; \ + u32 __mult, __shift; \ + unsigned __seq; \ + int __ret = FAST_SYS_ERROR; \ + long wall2mon_sec, wall2mon_nsec; \ + \ + do { \ + __seq = raw_read_seqcount_begin(&timekeeper_seq); \ + \ + __tk = fsys_data.tk; \ + __clock = fsys_data.clock; \ + __mult = fsys_data.mult; \ + __shift = fsys_data.shift; \ + \ + secs = __tk->xtime_sec; \ + nsecs = __tk->tkr_mono.xtime_nsec; \ + \ + if (monotonic) { \ + wall2mon_sec = __tk->wall_to_monotonic.tv_sec; \ + wall2mon_nsec = __tk->wall_to_monotonic.tv_nsec;\ + } \ + \ + if (likely(__clock == &clocksource_sclkr)) { \ + __cycle_last = __tk->tkr_mono.cycle_last; \ + __mask = __clock->mask; \ + __cycles = fast_syscall_read_sclkr(); \ + if (__cycles) \ + __ret = FAST_SYS_OK; \ + } else if (likely(__clock == &clocksource_clkr)) { \ + __cycle_last = __tk->tkr_mono.cycle_last; \ + __mask = __clock->mask; \ + __cycles = fast_syscall_read_clkr(); \ + __ret = FAST_SYS_OK; \ + } \ + } while (unlikely(read_seqcount_retry(&timekeeper_seq, __seq))); \ + \ + if (__ret == FAST_SYS_OK) { \ + nsecs = (((__cycles - __cycle_last) & __mask) \ + * __mult + nsecs) >> __shift; \ + \ + if (monotonic) { \ + secs += wall2mon_sec; \ + nsecs += wall2mon_nsec; \ + } \ + \ + while (nsecs >= NSEC_PER_SEC) { \ + ++secs; \ + nsecs -= NSEC_PER_SEC; \ + } \ + } \ + \ + __ret; \ +}) + +#define fast_get_time_coarse(secs, nsecs, monotonic) \ +({ \ + struct timekeeper *__tk; \ + unsigned __seq; \ + \ + do { \ + __seq = raw_read_seqcount_begin(&timekeeper_seq); \ + \ + secs = fsys_data.wall_time_coarse.tv_sec; \ + nsecs = fsys_data.wall_time_coarse.tv_nsec; \ + \ + if (monotonic) { \ + __tk = fsys_data.tk; \ + secs += __tk->wall_to_monotonic.tv_sec; \ + nsecs += __tk->wall_to_monotonic.tv_nsec; \ + } \ + } while (unlikely(read_seqcount_retry(&timekeeper_seq, __seq))); \ + \ + while (nsecs >= NSEC_PER_SEC) { \ + ++secs; \ + nsecs -= NSEC_PER_SEC; \ + } \ + \ + FAST_SYS_OK; \ +}) + +static inline int +DO_FAST_CLOCK_GETTIME(const clockid_t which_clock, struct timespec *tp) +{ + u64 secs = 0, nsecs = 0; + int ret; + + switch (which_clock) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + ret = fast_get_time(secs, nsecs, + which_clock == CLOCK_MONOTONIC); + break; + case CLOCK_REALTIME_COARSE: + case CLOCK_MONOTONIC_COARSE: + ret = fast_get_time_coarse(secs, nsecs, + which_clock == CLOCK_MONOTONIC_COARSE); + break; + default: + ret = FAST_SYS_ERROR; + break; + } + + if (likely(!ret)) { + tp->tv_sec = secs; + tp->tv_nsec = nsecs; + } + + return ret; +} + +/* trap table entry is called as function (it is closer to hardware start) */ +typedef long (*ttable_entry_args3)(int sys_num, u64 arg1, u64 arg2); +typedef long (*ttable_entry_args4)(int sys_num, u64 arg1, u64 arg2, u64 arg3); +#define ttable_entry3_args3(sys_num, arg1, arg2) \ + ((ttable_entry_args3)(get_ttable_entry3))(sys_num, arg1, arg2) +#define ttable_entry3_args4(sys_num, arg1, arg2) \ + ((ttable_entry_args4)(get_ttable_entry3))(sys_num, arg1, arg2, arg3) + +/* trap table entry started by direct branch (it is closer to fast system */ +/* call wirthout switch and use user local data stack */ +#define goto_ttable_entry_args3(entry_label, sys_num, arg1, arg2) \ + E2K_GOTO_ARG3(entry_label, sys_num, arg1, arg2) +#define goto_ttable_entry_args4(entry_label, sys_num, arg1, arg2, arg3) \ + E2K_GOTO_ARG4(entry_label, sys_num, arg1, arg2, arg3) +#define goto_ttable_entry3_args3(sys_num, arg1, arg2) \ + goto_ttable_entry_args3(ttable_entry3, sys_num, arg1, arg2) +#define goto_ttable_entry3_args4(sys_num, arg1, arg2, arg3) \ + goto_ttable_entry_args4(ttable_entry3, sys_num, arg1, arg2, arg3) + + +#define ttable_entry_clock_gettime(which, time) \ +/* ibranch */ goto_ttable_entry3_args3(__NR_clock_gettime, which, time) +/* call ttable_entry3_args3(__NR_clock_gettime, which, time) */ +#define ttable_entry_gettimeofday(tv, tz) \ +/* ibranch */ goto_ttable_entry3_args3(__NR_gettimeofday, tv, tz) +/* ttable_entry3_args3(__NR_gettimeofday, tv, tz) */ +#define ttable_entry_sigprocmask(how, nset, oset) \ +/* ibranch */ goto_ttable_entry3_args4(__NR_sigprocmask, how, nset, oset) +/* ttable_entry3_args4(__NR_sigprocmask, how, nset, oset) */ +#define ttable_entry_getcpu(cpup, nodep, unused) \ +/* ibranch */ goto_ttable_entry3_args4(__NR_getcpu, cpup, nodep, unused) +/* ttable_entry3_args4(__NR_getcpu, cpup, nodep, unused) */ + +static inline int +FAST_SYS_CLOCK_GETTIME(const clockid_t which_clock, struct timespec __user *tp) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + int r; + + prefetch_nospec(&fsys_data); + +#ifdef CONFIG_KVM_HOST_MODE + if (unlikely(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE))) + ttable_entry_clock_gettime((u64) which_clock, (u64) tp); +#endif + + tp = (typeof(tp)) ((u64) tp & E2K_VA_MASK); + if (unlikely((u64) tp + sizeof(struct timespec) > ti->addr_limit.seg)) + return -EFAULT; + + r = do_fast_clock_gettime(which_clock, tp); + if (unlikely(r)) + ttable_entry_clock_gettime((u64) which_clock, (u64) tp); + + return r; +} + +static inline int +DO_FAST_GETTIMEOFDAY(struct timeval *tv) +{ + u64 secs = 0, nsecs = 0; + int ret; + + ret = fast_get_time(secs, nsecs, false); + if (likely(!ret)) { + tv->tv_sec = secs; + tv->tv_usec = nsecs / 1000; + } + + return ret; +} + +static inline int +FAST_SYS_SIGGETMASK(u64 __user *oset, size_t sigsetsize) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + struct task_struct *task = thread_info_task(ti); +#ifdef CONFIG_KVM_HOST_MODE + bool guest = test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE); +#endif + u64 set; + + set = task->blocked.sig[0]; + + if (unlikely(sigsetsize != 8)) + return -EINVAL; + +#ifdef CONFIG_KVM_HOST_MODE + if (unlikely(guest)) + ttable_entry_sigprocmask((u64) 0, (u64) NULL, (u64) oset); +#endif + + oset = (typeof(oset)) ((u64) oset & E2K_VA_MASK); + if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg)) + return -EFAULT; + + *oset = set; + + return 0; +} + +int fast_sys_gettimeofday(struct timeval __user *tv, + struct timezone __user *tz); +int fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp); +struct getcpu_cache; +int fast_sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, + struct getcpu_cache __user *unused); +int fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize); +struct ucontext; +int fast_sys_getcontext(struct ucontext __user *ucp, size_t sigsetsize); +int fast_sys_set_return(u64 ip, int flags); + +struct compat_timespec; +int compat_fast_sys_clock_gettime(const clockid_t which_clock, + struct compat_timespec __user *tp); +struct compat_timeval; +int compat_fast_sys_gettimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +int compat_fast_sys_siggetmask(u32 __user *oset, size_t sigsetsize); +struct ucontext_32; +int compat_fast_sys_getcontext(struct ucontext_32 __user *ucp, + size_t sigsetsize); +int compat_fast_sys_set_return(u32 ip, int flags); + +int protected_fast_sys_clock_gettime(u32 tags, clockid_t which_clock, + u64 arg3, u64 arg4, u64 arg5); +int protected_fast_sys_gettimeofday(u32 tags, + u64 arg2, u64 arg3, u64 arg4, u64 arg5); +int protected_fast_sys_getcpu(u32 tags, u64 arg2, u64 arg3, u64 arg4, u64 arg5); +int protected_fast_sys_siggetmask(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize); +int protected_fast_sys_getcontext(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize); +#endif /* _ASM_E2K_FAST_SYSCALLS_H */ + diff --git a/arch/e2k/include/asm/fb.h b/arch/e2k/include/asm/fb.h new file mode 100644 index 000000000000..129118f2dfd6 --- /dev/null +++ b/arch/e2k/include/asm/fb.h @@ -0,0 +1,19 @@ +#ifndef _ASM_E2K_FB_H +#define _ASM_E2K_FB_H + +#include +#include +#include +#include + +static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + unsigned long off) +{ + vma->vm_page_prot = (cpu_has(CPU_FEAT_WC_PCI_PREFETCH) && + vma->vm_flags & VM_WRITECOMBINED) ? + pgprot_writecombine(vma->vm_page_prot) : + pgprot_noncached(vma->vm_page_prot); +} + +extern int fb_is_primary_device(struct fb_info *info); +#endif /* _ASM_E2K_FB_H */ diff --git a/arch/e2k/include/asm/fcntl.h b/arch/e2k/include/asm/fcntl.h new file mode 100644 index 000000000000..46ab12db5739 --- /dev/null +++ b/arch/e2k/include/asm/fcntl.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/floppy.h b/arch/e2k/include/asm/floppy.h new file mode 100644 index 000000000000..de7942e040c5 --- /dev/null +++ b/arch/e2k/include/asm/floppy.h @@ -0,0 +1,272 @@ +/* + * Architecture specific parts of the Floppy driver + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 + */ +#ifndef __ASM_E2K_FLOPPY_H +#define __ASM_E2K_FLOPPY_H + +#include + + +/* + * The DMA channel used by the floppy controller cannot access data at + * addresses >= 16MB + * + * Went back to the 1MB limit, as some people had problems with the floppy + * driver otherwise. It doesn't matter much for performance anyway, as most + * floppy accesses go through the track buffer. + */ +#define _CROSS_64KB(a,s,vdma) \ +(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) + +#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) + + +#define SW fd_routine[use_virtual_dma&1] +#define CSW fd_routine[can_use_virtual_dma & 1] + + +#define fd_inb(port) inb_p(port) +#define fd_outb(value,port) outb_p(value,port) + +#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") +#define fd_free_dma() CSW._free_dma(FLOPPY_DMA) +#define fd_enable_irq() enable_irq(FLOPPY_IRQ) +#define fd_disable_irq() disable_irq(FLOPPY_IRQ) +#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) +#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) + +/* E2K Use default memory allocation policy (DMA only), defined in floppy.c + * + * #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) + */ +#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) + +/* E2K Disable fallback on no-dma mode + * + * #define FLOPPY_CAN_FALLBACK_ON_NODMA + */ + +static int virtual_dma_count; +static int virtual_dma_residue; +static char *virtual_dma_addr; +static int virtual_dma_mode; +static int doing_pdma; + +static irqreturn_t floppy_hardint(int irq, void *dev_id) +{ + register unsigned char st; + +#undef TRACE_FLPY_INT + +#ifdef TRACE_FLPY_INT + static int calls=0; + static int bytes=0; + static int dma_wait=0; +#endif + if (!doing_pdma) + return floppy_interrupt(irq, dev_id); + +#ifdef TRACE_FLPY_INT + if(!calls) + bytes = virtual_dma_count; +#endif + + { + register int lcount; + register char *lptr; + + st = 1; + for(lcount=virtual_dma_count, lptr=virtual_dma_addr; + lcount; lcount--, lptr++) { + st=inb(virtual_dma_port+4) & 0xa0 ; + if(st != 0xa0) + break; + if(virtual_dma_mode) + outb_p(*lptr, virtual_dma_port+5); + else + *lptr = inb_p(virtual_dma_port+5); + } + virtual_dma_count = lcount; + virtual_dma_addr = lptr; + st = inb(virtual_dma_port+4); + } + +#ifdef TRACE_FLPY_INT + calls++; +#endif + if(st == 0x20) + return IRQ_HANDLED; + if(!(st & 0x20)) { + virtual_dma_residue += virtual_dma_count; + virtual_dma_count=0; +#ifdef TRACE_FLPY_INT + printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", + virtual_dma_count, virtual_dma_residue, calls, bytes, + dma_wait); + calls = 0; + dma_wait=0; +#endif + doing_pdma = 0; + floppy_interrupt(irq, dev_id); + return IRQ_HANDLED; + } +#ifdef TRACE_FLPY_INT + if(!virtual_dma_count) + dma_wait++; +#endif + return IRQ_HANDLED; +} + +static void fd_disable_dma(void) +{ + if(! (can_use_virtual_dma & 1)) + disable_dma(FLOPPY_DMA); + doing_pdma = 0; + virtual_dma_residue += virtual_dma_count; + virtual_dma_count=0; +} + +static int vdma_request_dma(unsigned int dmanr, const char * device_id) +{ + return 0; +} + +static void vdma_nop(unsigned int dummy) +{ +} + + +static int vdma_get_dma_residue(unsigned int dummy) +{ + return virtual_dma_count + virtual_dma_residue; +} + + +static int fd_request_irq(void) +{ + if(can_use_virtual_dma) + return request_irq(FLOPPY_IRQ, floppy_hardint, IRQF_DISABLED, + "floppy", NULL); + else + return request_irq(FLOPPY_IRQ, floppy_interrupt, IRQF_DISABLED, + "floppy", NULL); + +} + +static unsigned long dma_mem_alloc(unsigned long size) +{ + return __get_dma_pages(GFP_KERNEL,get_order(size)); +} + + +static unsigned long vdma_mem_alloc(unsigned long size) +{ + return (unsigned long) vmalloc(size); + +} + +#define nodma_mem_alloc(size) vdma_mem_alloc(size) + +static void _fd_dma_mem_free(unsigned long addr, unsigned long size) +{ + if((unsigned long) addr >= (unsigned long) high_memory) + vfree((void *)addr); + else + free_pages(addr, get_order(size)); +} + +#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) + +static void _fd_chose_dma_mode(char *addr, unsigned long size) +{ + if(can_use_virtual_dma == 2) { + if((unsigned long) addr >= (unsigned long) high_memory || + isa_virt_to_bus(addr) >= 0x1000000 || + _CROSS_64KB(addr, size, 0)) + use_virtual_dma = 1; + else + use_virtual_dma = 0; + } else { + use_virtual_dma = can_use_virtual_dma & 1; + } +} + +#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) + + +static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) +{ + doing_pdma = 1; + virtual_dma_port = io; + virtual_dma_mode = (mode == DMA_MODE_WRITE); + virtual_dma_addr = addr; + virtual_dma_count = size; + virtual_dma_residue = 0; + return 0; +} + +static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) +{ +#ifdef FLOPPY_SANITY_CHECK + if (CROSS_64KB(addr, size)) { + printk("DMA crossing 64-K boundary %px-%px\n", addr, addr+size); + return -1; + } +#endif + /* actual, physical DMA */ + doing_pdma = 0; + clear_dma_ff(FLOPPY_DMA); + set_dma_mode(FLOPPY_DMA,mode); + set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); + set_dma_count(FLOPPY_DMA,size); + enable_dma(FLOPPY_DMA); + return 0; +} + +static struct fd_routine_l { + int (*_request_dma)(unsigned int dmanr, const char * device_id); + void (*_free_dma)(unsigned int dmanr); + int (*_get_dma_residue)(unsigned int dummy); + unsigned long (*_dma_mem_alloc) (unsigned long size); + int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); +} fd_routine[] = { + { + request_dma, + free_dma, + get_dma_residue, + dma_mem_alloc, + hard_dma_setup + }, + { + vdma_request_dma, + vdma_nop, + vdma_get_dma_residue, + vdma_mem_alloc, + vdma_dma_setup + } +}; + + +static int FDC1 = 0x3f0; +static int FDC2 = -1; + +/* 1.44 Mb */ +#define FLOPPY0_TYPE 4 +#define FLOPPY1_TYPE 4 + +#define N_FDC 2 +#define N_DRIVE 8 + +#define FLOPPY_MOTOR_MASK 0xf0 + +#define AUTO_DMA + +#define EXTRA_FLOPPY_PARAMS + +#endif /* __ASM_E2K_FLOPPY_H */ diff --git a/arch/e2k/include/asm/ftrace.h b/arch/e2k/include/asm/ftrace.h new file mode 100644 index 000000000000..4af6f67df63c --- /dev/null +++ b/arch/e2k/include/asm/ftrace.h @@ -0,0 +1,48 @@ +#ifndef _ASM_E2K_FTRACE_H +#define _ASM_E2K_FTRACE_H + +static inline void return_to_handler(void){} + +extern struct ftrace_ops *function_trace_op; + +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST +# define HAVE_FUNCTION_GRAPH_FP_TEST +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +/* On e2k _mcount() is used for both dynamic and static cases. */ +# define FTRACE_ADDR ((unsigned long) _mcount) +# define MCOUNT_ADDR ((unsigned long) _mcount) +# define MCOUNT_INSN_SIZE 8 + +# define ARCH_SUPPORTS_FTRACE_OPS 1 + +extern void _mcount(e2k_cr0_hi_t frompc); + +struct dyn_arch_ftrace { + /* No extra data needed for e2k */ +}; + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer); +#endif + +#define ftrace_return_address(n) __e2k_kernel_return_address(n) + +#ifdef CONFIG_E2K_STACKS_TRACER +extern int stack_tracer_enabled; +extern int stack_tracer_kernel_only; +int +stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + +#endif /* _ASM_E2K_FTRACE_H */ + diff --git a/arch/e2k/include/asm/futex.h b/arch/e2k/include/asm/futex.h new file mode 100644 index 000000000000..8c301d907359 --- /dev/null +++ b/arch/e2k/include/asm/futex.h @@ -0,0 +1,77 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#ifdef __KERNEL__ + +#include + +#include +#include +#include +#include + +static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + u32 __user *uaddr) +{ + int oldval, ret = 0; + + pagefault_disable(); + + TRY_USR_PFAULT { + switch (op) { + case FUTEX_OP_SET: + oldval = __api_xchg_return(oparg, uaddr, w, STRONG_MB); + break; + case FUTEX_OP_ADD: + oldval = __api_futex_atomic32_op("adds", oparg, uaddr); + break; + case FUTEX_OP_OR: + oldval = __api_futex_atomic32_op("ors", oparg, uaddr); + break; + case FUTEX_OP_ANDN: + oldval = __api_futex_atomic32_op("andns", oparg, uaddr); + break; + case FUTEX_OP_XOR: + oldval = __api_futex_atomic32_op("xors", oparg, uaddr); + break; + default: + oldval = 0; + ret = -ENOSYS; + break; + } + } CATCH_USR_PFAULT { + pagefault_enable(); + DebugUAF("%s (%d) - %s : futex_atomic_op data fault " + "%px(%ld)\n" , __FILE__, __LINE__, + __FUNCTION__, (uaddr), (sizeof(*uaddr))); + return -EFAULT; + } END_USR_PFAULT + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + if (!access_ok(uaddr, sizeof(int))) + return -EFAULT; + + TRY_USR_PFAULT { + *uval = cmpxchg(uaddr, oldval, newval); + } CATCH_USR_PFAULT { + DebugUAF("%s (%d) - %s : futex_atomic_cmpxchg data fault " + "%px(%ld)\n", __FILE__, __LINE__, + __FUNCTION__, (uaddr), (sizeof(*uaddr))); + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +#endif +#endif diff --git a/arch/e2k/include/asm/getsp_adj.h b/arch/e2k/include/asm/getsp_adj.h new file mode 100644 index 000000000000..3b66fad2e3b5 --- /dev/null +++ b/arch/e2k/include/asm/getsp_adj.h @@ -0,0 +1,123 @@ +#ifndef _E2K_ASM_GETSP_ADJ_H +#define _E2K_ASM_GETSP_ADJ_H + +#include + +#include + +/* + * bug #101468: if user allocated more than 4Gb of stack then %cr1_hi.ussz + * field would overflow. In this case we remember all such over/underflows + * in software and apply corresponding corrections to %usd.lo.base manually + * from exc_last_wish handler. + * + * All under/overflows are kept in a single list. + */ +struct getsp_adj { + struct list_head list_entry; + unsigned long frame_index; + int correction; +}; + + +static inline int __copy_getsp_adj(struct list_head *dst, + const struct list_head *src) +{ + const struct getsp_adj *p; + struct getsp_adj *new; + + list_for_each_entry(p, src, list_entry) { + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + + new->correction = p->correction; + new->frame_index = p->frame_index; + list_add_tail(&new->list_entry, dst); + } + + return 0; +} + +static inline int copy_getsp_adj(struct thread_info *new_ti, + const struct thread_info *old_ti) +{ + return __copy_getsp_adj(&new_ti->getsp_adj, &old_ti->getsp_adj); +} + +static inline void free_getsp_adj(struct list_head *getsp_adj_list) +{ + struct getsp_adj *p, *tmp; + + list_for_each_entry_safe(p, tmp, getsp_adj_list, list_entry) { + list_del(&p->list_entry); + kfree(p); + } +} + +static inline s64 getsp_adj_get_correction(unsigned long frame) +{ + unsigned long frame_index = frame - (u64) CURRENT_PCS_BASE(); + struct getsp_adj *p; + + list_for_each_entry(p, ¤t_thread_info()->getsp_adj, list_entry) { + if (p->frame_index == frame_index) + return (s64) p->correction; + } + + return 0; +} + +static inline int getsp_adj_set_correction(int correction, unsigned long frame) +{ + unsigned long frame_index = frame - (u64) CURRENT_PCS_BASE(); + struct getsp_adj *new, *p; + + list_for_each_entry(p, ¤t_thread_info()->getsp_adj, list_entry) { + if (p->frame_index == frame_index) { + if (correction) { + p->correction = correction; + } else { + list_del(&p->list_entry); + kfree(p); + } + return 0; + } + } + + if (!correction) + return 0; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + + new->correction = correction; + new->frame_index = frame_index; + list_add(&new->list_entry, ¤t_thread_info()->getsp_adj); + + return 0; +} + +static inline void getsp_adj_apply(struct pt_regs *regs) +{ + unsigned long frame, frame_index; + struct getsp_adj *p; + + frame = AS(regs->stacks.pcsp_lo).base + AS(regs->stacks.pcsp_hi).ind; + frame_index = frame - (u64) CURRENT_PCS_BASE(); + + list_for_each_entry(p, ¤t_thread_info()->getsp_adj, list_entry) { + if (p->frame_index == frame_index) + goto found; + } + + return; + +found: + AS(regs->stacks.usd_lo).base += p->correction * 0x100000000ULL; + list_del(&p->list_entry); + kfree(p); +} + +#endif diff --git a/arch/e2k/include/asm/glob_regs.h b/arch/e2k/include/asm/glob_regs.h new file mode 100644 index 000000000000..8aa511a505a1 --- /dev/null +++ b/arch/e2k/include/asm/glob_regs.h @@ -0,0 +1,209 @@ +/* + * + * Copyright (C) 2014 MCST + * + * CPU global registers using by kernel + */ +#ifndef _E2K_GLOB_REGS_H +#define _E2K_GLOB_REGS_H + +#ifdef __KERNEL__ + +/* + * MAP of global registers using for the user purposes + */ +#define E2K_GLOBAL_REGS_NUM E2K_MAXGR_d +#define GLOBAL_GREGS_START 0 +#define GLOBAL_GREGS_NUM (E2K_GLOBAL_REGS_NUM / 2) +#define LOCAL_GREGS_START (GLOBAL_GREGS_START + GLOBAL_GREGS_NUM) +#define LOCAL_GREGS_NUM (E2K_GLOBAL_REGS_NUM - GLOBAL_GREGS_NUM) + +/* Follow global registers are global for user applications according to ABI */ +#define GLOBAL_GREGS_USER_MASK \ +( \ + 1UL << 0 | 1UL << 1 | /* %dg0 - %dg1 */ \ + 1UL << 2 | 1UL << 3 | /* %dg2 - %dg3 */ \ + 1UL << 4 | 1UL << 5 | /* %dg4 - %dg5 */ \ + 1UL << 6 | 1UL << 7 | /* %dg6 - %dg7 */ \ + 1UL << 8 | 1UL << 9 | /* %dg8 - %dg9 */ \ + 1UL << 10 | 1UL << 11 | /* %dg10 - %dg11 */ \ + 1UL << 12 | 1UL << 13 | /* %dg12 - %dg13 */ \ + 1UL << 14 | 1UL << 15 | /* %dg14 - %dg15 */ \ + 0UL \ +) +/* Follow global registers are local for user applications according to ABI */ +#define LOCAL_GREGS_USER_MASK \ +( \ + 1UL << 16 | 1UL << 17 | /* %dg16 - %dg17 */ \ + 1UL << 18 | 1UL << 19 | /* %dg18 - %dg19 */ \ + 1UL << 20 | 1UL << 21 | /* %dg20 - %dg21 */ \ + 1UL << 22 | 1UL << 23 | /* %dg22 - %dg23 */ \ + 1UL << 24 | 1UL << 25 | /* %dg24 - %dg25 */ \ + 1UL << 26 | 1UL << 27 | /* %dg26 - %dg27 */ \ + 1UL << 28 | 1UL << 29 | /* %dg28 - %dg29 */ \ + 1UL << 30 | 1UL << 31 | /* %dg30 - %dg31 */ \ + 0UL \ +) + +#define USER_THREAD_TLS_GREG 13 /* TLS of user threads */ + +/* + * MAP of global registers using for the kernel purposes + */ + +/* THe next register is used only at paravirtualization mode on host & guest */ +#define GUEST_VCPU_STATE_GREG 16 /* pointer to VCPU state structure */ +/* Global registers to point to current structure */ +#define CURRENT_TASK_GREG 17 /* pointer to current task structure */ +/* smp_processor_id() & per_cpu_offset */ +#define MY_CPU_OFFSET_GREG 18 /* offset of per CPU data */ +#define SMP_CPU_ID_GREG 19 /* CPU number */ +#define KERNEL_GREGS_MAX_NUM 4 /* kernel use 4 global registers */ + +#ifdef CONFIG_VIRTUALIZATION +/* Global register to support virtualization */ +#define HOST_GREGS_MAX_NUM 1 /* 1 global register is used by host */ + /* kernel to support virtualization */ +#endif /* CONFIG_VIRTUALIZATION */ + +#define CURRENTS_GREGS_MASK ((1UL << GUEST_VCPU_STATE_GREG) | \ + (1UL << CURRENT_TASK_GREG)) +#define CPUS_GREGS_MASK ((1UL << MY_CPU_OFFSET_GREG) | \ + (1UL << SMP_CPU_ID_GREG)) +#define KERNEL_GREGS_MAX_MASK (CURRENTS_GREGS_MASK | CPUS_GREGS_MASK) +#ifdef CONFIG_GREGS_CONTEXT +#define NATIVE_KERNEL_GREGS_MAX_NUM KERNEL_GREGS_MAX_NUM +#define CURRENTS_GREGS_KERNEL_MASK CURRENTS_GREGS_MASK +#define CPUS_GREGS_KERNEL_MASK CPUS_GREGS_MASK +#else /* ! CONFIG_GREGS_CONTEXT */ +#define NATIVE_KERNEL_GREGS_MAX_NUM 0 +#define CURRENTS_GREGS_KERNEL_MASK 0UL +#define CPUS_GREGS_KERNEL_MASK 0UL +#endif /* CONFIG_GREGS_CONTEXT */ + +#define NATIVE_KERNEL_GREGS_MASK \ + (CURRENTS_GREGS_KERNEL_MASK | CPUS_GREGS_KERNEL_MASK) + +#ifdef CONFIG_VIRTUALIZATION +/* Global register to point to guest VCPU state */ +#define VCPU_STATE_GREGS_MASK (1UL << GUEST_VCPU_STATE_GREG) +#define VCPU_STATE_GREGS_PAIR_MASK \ + (VCPU_STATE_GREGS_MASK | (1UL << CURRENT_TASK_GREG)) + +#define HOST_KERNEL_GREGS_MAX_NUM HOST_GREGS_MAX_NUM +#define HOST_GREGS_KERNEL_MASK VCPU_STATE_GREGS_MASK +#define HOST_GREGS_PAIR_KERNEL_MASK VCPU_STATE_GREGS_PAIR_MASK +#else /* ! CONFIG_VIRTUALIZATION */ +#define HOST_KERNEL_GREGS_MAX_NUM 0 +#define HOST_GREGS_KERNEL_MASK 0UL +#define HOST_GREGS_PAIR_KERNEL_MASK 0UL +#endif /* CONFIG_VIRTUALIZATION */ + +#define HOST_KERNEL_GREGS_MASK HOST_GREGS_KERNEL_MASK +#define HOST_KERNEL_GREGS_PAIR_MASK HOST_GREGS_PAIR_KERNEL_MASK + +/* Guest kernel can use global registers too (now only same as native kernel) */ +/* and addition registers on host to support virtulaization (now only one */ +/* register as pointer to VCPU state structure). */ +/* All guest global registers are saved/restored by host */ +/* Host can not now is guest used own global registers, so should */ +/* save/restore on max */ +#define GUEST_GREGS_NUM KERNEL_GREGS_MAX_NUM +#define GUEST_GREGS_MASK KERNEL_GREGS_MAX_MASK + +/* real number & mask of registers used by kernel */ +/* in accordance with config variables anf execution mode */ +#define KERNEL_GREGS_NUM NATIVE_KERNEL_GREGS_MAX_NUM +#define KERNEL_GREGS_MASK NATIVE_KERNEL_GREGS_MASK + +/* + * Global register used by user and kernel, so it need save/restore + * some global registers state while enter to/return from kernel. + * Global registers can contain tagged values and be used by protected + * processes. So it need store/restore registers state by pairs to do not + * destroy quad pointers + */ + +#ifdef CONFIG_GREGS_CONTEXT +/* Pair of global registers used by kernel: */ +#define KERNEL_GREGS_PAIRS_START GUEST_VCPU_STATE_GREG /* %dg16 */ +/* Pair of global registers to point to current structures: */ +/* current & current_thread_info() */ +#define CURRENT_GREGS_PAIR_LO GUEST_VCPU_STATE_GREG /* %dg16 */ +#define CURRENT_GREGS_PAIR_HI CURRENT_TASK_GREG /* %dg17 */ +#define CURRENT_GREGS_PAIRS_NUM 1 /* one pair: */ + /* low: thread info */ + /* high: task structure */ +#define CURRENT_GREGS_PAIRS_SIZE /* one pair of */ \ + /* two registers */ \ + (CURRENT_GREGS_PAIRS_NUM * 2) +#define GUEST_VCPU_STATE_GREGS_PAIRS_INDEX /* g[0] */ \ + (GUEST_VCPU_STATE_GREG - KERNEL_GREGS_PAIRS_START) +#define CURRENT_TASK_GREGS_PAIRS_INDEX /* g[1] */ \ + (CURRENT_TASK_GREG - KERNEL_GREGS_PAIRS_START) +#define CURRENT_GREGS_PAIRS_INDEX_LO GUEST_VCPU_STATE_GREGS_PAIRS_INDEX +#define CURRENT_GREGS_PAIRS_INDEX_HI CURRENT_TASK_GREGS_PAIRS_INDEX +/* raw_smp_processor_id & __my_cpu_offset */ +#define CPU_GREGS_PAIR_LO MY_CPU_OFFSET_GREG /* %dg18 */ +#define CPU_GREGS_PAIR_HI SMP_CPU_ID_GREG /* %dg19 */ +#define CPU_GREGS_PAIRS_NUM 1 /* one pair: */ + /* low: my per cpu offset */ + /* high: CPU ID */ +#define CPU_GREGS_PAIRS_SIZE /* one pair of */ \ + /* two registers */ \ + (CPU_GREGS_PAIRS_NUM * 2) +#define MY_CPU_OFFSET_GREGS_PAIRS_INDEX /* g[2] */ \ + (MY_CPU_OFFSET_GREG - KERNEL_GREGS_PAIRS_START) +#define SMP_CPU_ID_GREGS_PAIRS_INDEX /* g[3] */ \ + (SMP_CPU_ID_GREG - KERNEL_GREGS_PAIRS_START) +#define CPU_GREGS_PAIRS_INDEX_LO MY_CPU_OFFSET_GREGS_PAIRS_INDEX +#define CPU_GREGS_PAIRS_INDEX_HI SMP_CPU_ID_GREGS_PAIRS_INDEX + +#define NATIVE_KERNEL_GREGS_PAIRS_NUM \ + (CURRENT_GREGS_PAIRS_NUM + CPU_GREGS_PAIRS_NUM) +#define NATIVE_KERNEL_GREGS_PAIRS_SIZE \ + (CURRENT_GREGS_PAIRS_SIZE + CPU_GREGS_PAIRS_SIZE) +#else /* ! CONFIG_GREGS_CONTEXT */ +#define NATIVE_KERNEL_GREGS_PAIRS_NUM 0 +#define NATIVE_KERNEL_GREGS_PAIRS_SIZE 0 +#endif /* CONFIG_GREGS_CONTEXT */ + +#ifdef CONFIG_VIRTUALIZATION +/* Pair of global registers used by host to support virtualization */ +#define HOST_GREGS_PAIRS_START GUEST_VCPU_STATE_GREG /* %dg16 */ +/* VCPU state pointer is used always for virtualization support */ +/* so it need be into starting pair */ +#define VCPU_STATE_GREGS_PAIR_LO GUEST_VCPU_STATE_GREG /* %dg16 */ +#define VCPU_STATE_GREGS_PAIR_HI CURRENT_TASK_GREG /* %dg17 */ +#define VCPU_STATE_GREGS_PAIRS_NUM 1 /* one pair: */ + /* low: kvm_vcpu_state */ + /* high: current task */ +#define VCPU_STATE_GREGS_PAIRS_SIZE /* one pair of */ \ + /* two registers */ \ + (VCPU_STATE_GREGS_PAIRS_NUM * 2) +#define VCPU_STATE_GREGS_PAIRS_INDEX /* g[0] */ \ + (GUEST_VCPU_STATE_GREG - HOST_GREGS_PAIRS_START) +#define VCPU_STATE_GREGS_PAIRS_HI_INDEX /* g[1] */ \ + (CURRENT_TASK_GREG - HOST_GREGS_PAIRS_START) + +/* indexes of global registers for virtualization in structure */ +/* guest_gregs->g[] */ +#define HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO /* g[0] */ \ + VCPU_STATE_GREGS_PAIRS_INDEX +#define HOST_VCPU_STATE_GREGS_PAIRS_INDEX_HI /* g[1] */ \ + VCPU_STATE_GREGS_PAIRS_HI_INDEX + +#define HOST_KERNEL_GREGS_PAIRS_NUM VCPU_STATE_GREGS_PAIRS_NUM +#define HOST_KERNEL_GREGS_PAIRS_SIZE VCPU_STATE_GREGS_PAIRS_SIZE +#else /* ! CONFIG_VIRTUALIZATION */ +#define HOST_KERNEL_GREGS_PAIRS_NUM 0 +#define HOST_KERNEL_GREGS_PAIRS_SIZE 0 +#endif /* CONFIG_VIRTUALIZATION */ + +/* real number & size of global registers pairs used by kernel */ +/* in accordance with config and execution mode */ +#define KERNEL_GREGS_PAIRS_NUM NATIVE_KERNEL_GREGS_PAIRS_NUM +#define KERNEL_GREGS_PAIRS_SIZE NATIVE_KERNEL_GREGS_PAIRS_SIZE + +#endif /* __KERNEL__ */ +#endif /* _E2K_GLOB_REGS_H */ diff --git a/arch/e2k/include/asm/gpio.h b/arch/e2k/include/asm/gpio.h new file mode 100644 index 000000000000..9414a7c50a3d --- /dev/null +++ b/arch/e2k/include/asm/gpio.h @@ -0,0 +1,21 @@ +/* + * Generic GPIO API implementation for e2k. + * + * Derived from the generic GPIO API for x86: + * + * Copyright (c) 2012 MCST. + * + * Author: Evgeny Kravtsunov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ASM_X86_GPIO_H +#define _ASM_X86_GPIO_H + +#include + +#endif /* _ASM_X86_GPIO_H */ diff --git a/arch/e2k/include/asm/gregs.h b/arch/e2k/include/asm/gregs.h new file mode 100644 index 000000000000..d5e9b111d3df --- /dev/null +++ b/arch/e2k/include/asm/gregs.h @@ -0,0 +1,219 @@ +#ifndef _E2K_GREGS_H +#define _E2K_GREGS_H + +#include +#include +#include +#include +#include + +/* + * Save new value of gN and set current pointer into these register + * to can use macroses current & current_thread_info() + */ +#define SET_CURRENTS_GREGS(__task) \ +({ \ + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, (__task)); \ +}) +#define SET_SMP_CPUS_GREGS(__cpu, __per_cpu_off) \ +({ \ + E2K_SET_DGREG_NV(SMP_CPU_ID_GREG, (__cpu)); \ + E2K_SET_DGREG_NV(MY_CPU_OFFSET_GREG, (__per_cpu_off)); \ +}) +#define SET_KERNEL_GREGS(__task, __cpu, __per_cpu_off) \ +({ \ + SET_CURRENTS_GREGS(__task); \ + SET_SMP_CPUS_GREGS(__cpu, __per_cpu_off); \ +}) +#define ONLY_SET_CURRENTS_GREGS(__ti) \ +({ \ + SET_CURRENTS_GREGS(thread_info_task(__ti)); \ +}) +#define ONLY_SAVE_KERNEL_CURRENTS_GREGS(task__) \ +({ \ + (task__) = NATIVE_GET_UNTEGGED_DGREG(CURRENT_TASK_GREG); \ +}) +#ifdef CONFIG_SMP +#define ONLY_SAVE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) \ +({ \ + (cpu_id__) = NATIVE_GET_UNTEGGED_DGREG(SMP_CPU_ID_GREG); \ + (cpu_off__) = NATIVE_GET_UNTEGGED_DGREG(MY_CPU_OFFSET_GREG); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_SAVE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_SAVE_KERNEL_GREGS(task__, cpu_id__, cpu_off__) \ +({ \ + ONLY_SAVE_KERNEL_CURRENTS_GREGS(task__); \ + ONLY_SAVE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__); \ +}) + +#define ONLY_RESTORE_KERNEL_CURRENTS_GREGS(task__) \ +({ \ + NATIVE_SET_DGREG(CURRENT_TASK_GREG, task__); \ +}) +#ifdef CONFIG_SMP +#define ONLY_RESTORE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) \ +({ \ + NATIVE_SET_DGREG(SMP_CPU_ID_GREG, cpu_id__); \ + NATIVE_SET_DGREG(MY_CPU_OFFSET_GREG, cpu_off__); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_RESTORE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_RESTORE_KERNEL_GREGS(task__, cpu_id__, cpu_off__)\ +({ \ + ONLY_RESTORE_KERNEL_CURRENTS_GREGS(task__); \ + ONLY_RESTORE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__); \ +}) + +#ifdef CONFIG_SMP +#define ONLY_SET_SMP_CPUS_GREGS(__ti) \ +({ \ + long __cpu = task_cpu(thread_info_task(__ti)); \ + \ + SET_SMP_CPUS_GREGS(__cpu, per_cpu_offset(__cpu)); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_SET_SMP_CPUS_GREGS(__ti) +#endif /* CONFIG_SMP */ + +#define ONLY_SET_KERNEL_GREGS(__ti) \ +({ \ + ONLY_SET_CURRENTS_GREGS(__ti); \ + ONLY_SET_SMP_CPUS_GREGS(__ti); \ +}) + +#define CLEAR_KERNEL_GREGS() \ +({ \ + SET_KERNEL_GREGS(0, 0, 0); \ +}) +#define NATIVE_SAVE_KERNEL_GREGS_AND_SET(__ti) \ +({ \ + machine.save_kernel_gregs(&(__ti)->k_gregs); \ + ONLY_SET_KERNEL_GREGS(__ti); \ +}) +/* + * global registers used as pointers to current task & thread info + * must be restored and current & current_thread_info() can not be + * used from now + */ +#define ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(__k_gregs, task__) \ +({ \ + (task__) = (__k_gregs)->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base; \ +}) +#ifdef CONFIG_SMP +#define ONLY_COPY_FROM_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) \ +({ \ + (cpu_id__) = (__k_gregs)->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base; \ + (cpu_off__) = (__k_gregs)->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base; \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_COPY_FROM_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_COPY_FROM_KERNEL_GREGS(__k_gregs, task__, cpu_id__, cpu_off__) \ +({ \ + ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(__k_gregs, task__); \ + ONLY_COPY_FROM_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__); \ +}) + +#define ONLY_COPY_TO_KERNEL_CURRENT_GREGS(__k_gregs, task__) \ +({ \ + (__k_gregs)->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base = (task__); \ +}) +#ifdef CONFIG_SMP +#define ONLY_COPY_TO_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) \ +({ \ + (__k_gregs)->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base = (cpu_id__); \ + (__k_gregs)->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base = (cpu_off__); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_COPY_TO_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_COPY_TO_KERNEL_GREGS(__k_gregs, task__, cpu_id__, cpu_off__) \ +({ \ + ONLY_COPY_TO_KERNEL_CURRENT_GREGS(__k_gregs, task__); \ + ONLY_COPY_TO_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__); \ +}) +#define CLEAR_KERNEL_GREGS_COPY(__ti) \ + ONLY_COPY_TO_KERNEL_GREGS(&(__ti)->k_gregs, 0, 0, 0) + +#define NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + \ + NATIVE_RESTORE_KERNEL_GREG(__ti->k_gregs.g, \ + GUEST_VCPU_STATE_GREGS_PAIRS_INDEX, \ + CURRENT_TASK_GREGS_PAIRS_INDEX, \ + MY_CPU_OFFSET_GREGS_PAIRS_INDEX, \ + SMP_CPU_ID_GREGS_PAIRS_INDEX, \ + GUEST_VCPU_STATE_GREG, CURRENT_TASK_GREG, \ + MY_CPU_OFFSET_GREG, SMP_CPU_ID_GREG); \ +}) + +/* User global registers, used by kernel, keep into thread info structure */ +/* and save to/restore from while enter to/return from kernel */ +#define CLEAR_GREGS_COPY_FROM_CURRENTS(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + \ + __ti->k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base = 0; \ + __ti->k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].ext = 0; \ + __ti->k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].base = 0; \ + __ti->k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].ext = 0; \ +}) + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ + +#define SAVE_KERNEL_GREGS_AND_SET(thread_info) \ + NATIVE_SAVE_KERNEL_GREGS_AND_SET(thread_info) +#define RESTORE_KERNEL_GREGS_AND_FREE(thread_info) \ + NATIVE_RESTORE_KERNEL_GREGS(&(thread_info)->k_gregs) +#define RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ + NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) + #ifdef CONFIG_VIRTUALIZATION + /* it is native host kernel with virtualization support */ + #include + #endif /* CONFIG_VIRTUALIZATION */ +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +static inline void copy_k_gregs_to_gregs(struct global_regs *dst, + const struct kernel_gregs *src) +{ + tagged_memcpy_8(&dst->g[KERNEL_GREGS_PAIRS_START], src->g, + sizeof(src->g)); +} + +static inline void copy_k_gregs_to_k_gregs(struct kernel_gregs *dst, + const struct kernel_gregs *src) +{ + tagged_memcpy_8(dst->g, src->g, sizeof(src->g)); +} + +static inline void get_k_gregs_from_gregs(struct kernel_gregs *dst, + const struct global_regs *src) +{ + tagged_memcpy_8(dst->g, &src->g[KERNEL_GREGS_PAIRS_START], + sizeof(dst->g)); +} + +static inline void copy_k_gregs_to_l_gregs(struct local_gregs *dst, + const struct kernel_gregs *src) +{ + BUG_ON(KERNEL_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(&dst->g[KERNEL_GREGS_PAIRS_START - LOCAL_GREGS_START], + src->g, sizeof(src->g)); +} + +static inline void get_k_gregs_from_l_regs(struct kernel_gregs *dst, + const struct local_gregs *src) +{ + BUG_ON(KERNEL_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(dst->g, + &src->g[KERNEL_GREGS_PAIRS_START - LOCAL_GREGS_START], + sizeof(dst->g)); +} + +#endif diff --git a/arch/e2k/include/asm/hardirq.h b/arch/e2k/include/asm/hardirq.h new file mode 100644 index 000000000000..2f4ff34eee5d --- /dev/null +++ b/arch/e2k/include/asm/hardirq.h @@ -0,0 +1,40 @@ +#pragma once +#include + +#define __ARCH_IRQ_EXIT_IRQS_DISABLED + +extern void irq_enter(void); +extern void irq_exit(void); + +#define l_irq_enter() irq_enter() +#define l_irq_exit() irq_exit() + +#include + +static inline bool is_from_C1_wait_trap(const struct pt_regs *regs) +{ + unsigned long ip = get_return_ip(regs); + + return unlikely(ip >= (unsigned long) __C1_wait_trap_start && + ip < (unsigned long) __C1_wait_trap_end); +} +static inline bool is_from_C3_wait_trap(const struct pt_regs *regs) +{ + unsigned long ip = get_return_ip(regs); + + return unlikely(ip >= (unsigned long) __C3_wait_trap_start && + ip < (unsigned long) __C3_wait_trap_end); +} +static inline bool is_from_wait_trap(const struct pt_regs *regs) +{ + return is_from_C1_wait_trap(regs) || is_from_C3_wait_trap(regs); +} +extern void handle_wtrap(struct pt_regs *regs); +#define arch_nmi_enter() \ +do { \ + inc_irq_stat(__nmi_count); \ + if (is_from_wait_trap(regs)) \ + handle_wtrap(regs); \ +} while (0) + +#define arch_nmi_exit() do { } while (0) diff --git a/arch/e2k/include/asm/hb_regs.h b/arch/e2k/include/asm/hb_regs.h new file mode 100644 index 000000000000..319f943139c4 --- /dev/null +++ b/arch/e2k/include/asm/hb_regs.h @@ -0,0 +1,437 @@ + +#ifndef _E2K_HB_REGS_H_ +#define _E2K_HB_REGS_H_ + +#ifdef __KERNEL__ + +#include +#ifndef __ASSEMBLY__ +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +#undef DEBUG_ERALY_HB_MODE +#undef DebugEHB +#define DEBUG_ERALY_HB_MODE 0 /* early Host Bridge access */ +#undef DEBUG_BOOT_HB_MODE +#undef DebugBEHB +#define DEBUG_BOOT_HB_MODE 0 /* boot Host Bridge access */ +#ifndef CONFIG_BOOT_E2K +#define DebugEHB(fmt, args...) \ + ({ if (DEBUG_ERALY_HB_MODE) \ + printk(fmt, ##args); }) +#define DebugBEHB(fmt, args...) \ + ({ if (DEBUG_BOOT_HB_MODE) \ + do_boot_printk(fmt, ##args); }) +#else /* CONFIG_BOOT_E2K */ +#define DebugEHB(fmt, args...) \ + ({ if (DEBUG_ERALY_HB_MODE) \ + rom_printk(fmt, ##args); }) +#define DebugBEHB(fmt, args...) \ + ({ if (DEBUG_BOOT_HB_MODE) \ + rom_printk(fmt, ##args); }) +#endif /* ! CONFIG_BOOT_E2K */ + +#undef DEBUG_HB_MODE +#undef DebugHB +#define DEBUG_HB_MODE 0 /* Host Bridge access */ +#define DebugHB(fmt, args...) \ + ({ if (DEBUG_HB_MODE) \ + printk(fmt, ##args); }) + +/* + * Host Bridge is PCI device on root bus #0 and has common PCI configure + * registers and some additional special registers + */ + +/* Host bridge is device 0x1f on root bus #0 */ +#define HB_PCI_BUS_NUM 0x00 +#define HB_PCI_SLOT 0x1f +#define HB_PCI_FUNC 0x00 +/* Embeded Graphic is device 0x1e on root bus #0 */ +#define EG_PCI_BUS_NUM 0x00 +#define EG_PCI_SLOT 0x1e +#define EG_PCI_FUNC 0x00 + +/* Base address of legacy NBSR registers */ +#define HB_PCI_LEGACY_BAR PCI_BASE_ADDRESS_0 /* 0x10 64 bits */ + #define HB_PCI_LEGACY_MEMORY_BAR 0x000000fffff00000 /* [39:20] */ + #define HB_PCI_LEGACY_ADDR_MASK 0x00000000000ffff0 /* [19: 4] 1M */ +/* Base address of Power Management Controller registers */ +#define HB_PCI_PMC_BAR PCI_BASE_ADDRESS_2 /* 0x18 64 bits */ + #define HB_PCI_PMC_MEMORY_BAR 0x000000ffffff0000 /* [39:16] */ + #define HB_PCI_PMC_ADDR_MASK 0x000000000000fff0 /* [15: 4] */ + /* 64K */ + +/* Additional special registers */ + +/* Host Bridge configuration register */ +#define HB_PCI_CFG 0x40 /* 32 bits */ + #define HB_CFG_MaskIntSic 0x00000080 /* SIC interrupts */ + /* to embedeed IOAPIC */ + #define HB_CFG_MaskIntWlcc 0x00000040 /* WLCC interrupts */ + /* to embedeed IOAPIC */ + #define HB_CFG_MaskIntIommu 0x00000020 /* IOMMU interrupts */ + /* to embedeed IOAPIC */ + #define HB_CFG_ShareHostInterrupts 0x00000010 /* HB interrupts */ + /* are shared as IRQ2 */ + /* else IRQ2 & IRQ3 */ + #define HB_CFG_ShareGraphicsInterrupts 0x00000008 /* EG interrupts */ + /* are shared as IRQ0 */ + /* else IRQ0 & IRQ1 */ + #define HB_CFG_InternalIoApicEnable 0x00000004 /* embeded interrupts */ + /* to embedeed IOAPIC */ + /* else to LAPIC LVT */ + #define HB_CFG_IntegratedVgaEnable 0x00000002 /* Legacy VGA access */ + /* to EG */ + /* else to IOHUB */ + #define HB_CFG_IntegratedGraphicsEnable 0x00000001 /* EG is on */ + +/* Top Of low Memory register */ +#define HB_PCI_TOM 0x44 /* 32 bits */ + #define HB_PCI_TOM_LOW_MASK 0x00000000fff00000 /* [31:20] */ + +/* Top Of high Memory register */ +#define HB_PCI_TOM2 0x48 /* 64 bits */ + #define HB_PCI_TOM2_HI_MASK 0x000000fffff00000 /* [39:20] */ + #define HB_PCI_HI_ADDR_BASE 0x0000000100000000 /* 4Gb */ + +/* Base Address of high memory from which remapped low memore */ +#define HB_PCI_REMAPBASE 0x50 /* 64 bits */ + #define HB_PCI_REMAPBASE_MASK 0x000000fffff00000 /* [39:20] */ + +/* Base Address of embeded IO APIC */ +#define HB_PCI_IOAPICBASE 0x58 /* 64 bits */ + #define HB_PCI_IOAPICBASE_MASK 0x000000ffffffff00 /* [39: 8] */ + +/* PMC MSI configuration register */ +#define HB_PCI_PMC_MSGCTL 0x62 /* 16 bits */ +/* PMC MSI address register */ +#define HB_PCI_PMC_MSGADDR 0x64 /* 32 bits */ + #define HB_PCI_MESSADGEADDRESS 0xfffffffc /* [31: 2] */ + +/* + * Embeded Graphic controller registers + */ + +/* Base address of Video RAM */ +#define EG_PCI_VRAM_BAR PCI_BASE_ADDRESS_0 /* 0x10 64 bits */ + #define EG_PCI_VRAM_MEMORY_BAR 0x000000fff8000000 /* [39:30] */ + #define EG_PCI_VRAM_ADDRMASK1024 0x0000000020000000 /* [29] */ + #define EG_PCI_VRAM_ADDRMASK512 0x0000000010000000 /* [28] */ + #define EG_PCI_VRAM_ADDRMASK256 0x0000000008000000 /* [27] */ + #define EG_PCI_VRAM_ADDR_MASK 0x0000000007fffff0 /* [26: 4] 128M */ +/* Base address of MGA-2 registers */ +#define EG_PCI_MGA2_BAR PCI_BASE_ADDRESS_2 /* 0x18 32 bits */ + #define EG_PCI_MGA2_MEMORY_BAR 0xfffc0000 /* [31:18] */ + #define EG_PCI_MGA2_ADDR_MASK 0x0003ffff /* [17: 4] 256K */ +/* Base address of GC2500 registers */ +#define EG_PCI_GC2500_BAR PCI_BASE_ADDRESS_3 /* 0x1c 32 bits */ + #define EG_PCI_GC2500_MEMORY_BAR 0xfffc0000 /* [31:18] */ + #define EG_PCI_GC2500_ADDR_MASK 0x0003ffff /* [17: 4] 256K */ +/* Embeded Graphic controller CFG register */ +#define EG_PCI_CFG 0x40 /* 32 bits */ + #define EG_CFG_VRAM_SIZE_MASK 0x00000003 /* [ 1: 0] */ + #define EG_CFG_VRAM_SIZE_128 0x0 /* 128 Mb */ + #define EG_CFG_VRAM_SIZE_256 0x1 /* 256 Mb */ + #define EG_CFG_VRAM_SIZE_512 0x2 /* 512 Mb */ + #define EG_CFG_VRAM_SIZE_1024 0x3 /* 1 Gb */ +/* Embeded Graphic MSI configuration register */ +#define EG_PCI_MSGCTL 0x46 /* 16 bits */ +/* Embeded Graphic MSI address register */ +#define EG_PCI_MSGADDR 0x48 /* 32 bits */ + #define EG_PCI_MESSADGEADDRESS 0xfffffffc /* [31: 2] */ + +#ifndef __ASSEMBLY__ + +/* + * Host bridge & embeded graphic see as PCI devices on bus #0 + */ +#define hb_eg_early_readb(addr) \ + boot_readb((void *)(addr)) +#define hb_eg_early_readw(addr) \ + boot_readw((void *)(addr)) +#define hb_eg_early_readl(addr) \ + boot_readl((void *)(addr)) +#define hb_eg_early_writeb(value, addr) \ + boot_writeb((value), (void *)(addr)) +#define hb_eg_early_writew(value, addr) \ + boot_writew((value), (void *)(addr)) +#define hb_eg_early_writel(value, addr) \ + boot_writel((value), (void *)(addr)) + +#define hb_eg_early_pci_conf_base() sic_domain_pci_conf_base(0) + +#define boot_hb_eg_readl(addr) boot_readl((void *)(addr)) +#define boot_hb_eg_pci_conf_base() boot_sic_domain_pci_conf_base(0) + +static inline unsigned char +early_readb_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned char reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value = hb_eg_early_readb(reg_addr); + DebugEHB("early_readb_hb_eg_reg() reg 0x%x read 0x%02hhx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned short +early_readw_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned short reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value = hb_eg_early_readw(reg_addr); + DebugEHB("early_readw_hb_eg_reg() reg 0x%x read 0x%04hx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned int +early_readl_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned int reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value = hb_eg_early_readl(reg_addr); + DebugEHB("early_readl_hb_eg_reg() reg 0x%x read 0x%08x from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned long +early_readll_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned long reg_value_lo; + unsigned long reg_value_hi; + unsigned long reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value_lo = hb_eg_early_readl(reg_addr); + reg_value_hi = hb_eg_early_readl(reg_addr + sizeof(unsigned int)); + reg_value = reg_value_lo | (reg_value_hi << sizeof(unsigned int) * 8); + DebugEHB("early_readw_hb_eg_reg() reg 0x%x read 0x%016lx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned long +boot_readll_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = boot_hb_eg_pci_conf_base(); + unsigned long reg_value_lo; + unsigned long reg_value_hi; + unsigned long reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value_lo = boot_hb_eg_readl(reg_addr); + reg_value_hi = boot_hb_eg_readl(reg_addr + sizeof(unsigned int)); + reg_value = reg_value_lo | (reg_value_hi << sizeof(unsigned int) * 8); + DebugBEHB("boot_readw_hb_eg_reg() reg 0x%x read 0x%016lx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline void +early_writeb_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned char reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + hb_eg_early_writeb(reg_value, reg_addr); + DebugEHB("early_writeb_hb_eg_reg() reg 0x%x write 0x%02hhx to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline void +early_writew_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned short reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + hb_eg_early_writew(reg_value, reg_addr); + DebugEHB("early_writew_hb_eg_reg() reg 0x%x write 0x%04hx to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline void +early_writel_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + hb_eg_early_writel(reg_value, reg_addr); + DebugEHB("early_writel_hb_eg_reg() reg 0x%x write 0x%08x to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline void +early_writell_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned long reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned int reg_value_lo; + unsigned int reg_value_hi; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value_lo = reg_value & 0x00000000ffffffff; + reg_value_hi = reg_value >> (sizeof(unsigned int) * 8); + hb_eg_early_writel(reg_value_lo, reg_addr); + hb_eg_early_writel(reg_value_hi, reg_addr + sizeof(unsigned int)); + DebugEHB("early_writell_hb_eg_reg() reg 0x%x write 0x%016lx to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline unsigned char +early_readb_hb_reg(unsigned int reg_offset) +{ + return early_readb_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned short +early_readw_hb_reg(unsigned int reg_offset) +{ + return early_readw_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned int +early_readl_hb_reg(unsigned int reg_offset) +{ + return early_readl_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned long +early_readll_hb_reg(unsigned int reg_offset) +{ + return early_readll_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned long +boot_readll_hb_reg(unsigned int reg_offset) +{ + return boot_readll_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline void +early_writeb_hb_reg(unsigned char reg_value, unsigned int reg_offset) +{ + early_writeb_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writew_hb_reg(unsigned short reg_value, unsigned int reg_offset) +{ + early_writew_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writel_hb_reg(unsigned int reg_value, unsigned int reg_offset) +{ + early_writel_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writell_hb_reg(unsigned long reg_value, unsigned int reg_offset) +{ + early_writell_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline unsigned char +early_readb_eg_reg(unsigned int reg_offset) +{ + return early_readb_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline unsigned short +early_readw_eg_reg(unsigned int reg_offset) +{ + return early_readw_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline unsigned int +early_readl_eg_reg(unsigned int reg_offset) +{ + return early_readl_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline unsigned long +early_readll_eg_reg(unsigned int reg_offset) +{ + return early_readll_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline void +early_writeb_eg_reg(unsigned char reg_value, unsigned int reg_offset) +{ + early_writeb_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writew_eg_reg(unsigned short reg_value, unsigned int reg_offset) +{ + early_writew_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writel_eg_reg(unsigned int reg_value, unsigned int reg_offset) +{ + early_writel_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writell_eg_reg(unsigned long reg_value, unsigned int reg_offset) +{ + early_writell_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline unsigned long +early_get_legacy_nbsr_base(void) +{ + return early_readll_hb_reg(HB_PCI_LEGACY_BAR) & + HB_PCI_LEGACY_MEMORY_BAR; +} + +static inline unsigned long +boot_get_legacy_nbsr_base(void) +{ + return boot_readll_hb_reg(HB_PCI_LEGACY_BAR) & + HB_PCI_LEGACY_MEMORY_BAR; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_HB_REGS_H_ */ diff --git a/arch/e2k/include/asm/head.h b/arch/e2k/include/asm/head.h new file mode 100644 index 000000000000..57856923f5ed --- /dev/null +++ b/arch/e2k/include/asm/head.h @@ -0,0 +1,270 @@ +/* $Id: head.h,v 1.41 2009/10/27 10:14:51 atic Exp $ */ +#ifndef _E2K_HEAD_H +#define _E2K_HEAD_H + +#include +#include +#include +#include +#ifndef __ASSEMBLY__ +#include +#endif /* _ASSEMBLY__ */ + +#define EOS_RAM_BASE_LABEL _data +#define KERNEL_START_LABEL _start /* start label of Linux Image */ +#define KERNEL_END_LABEL _end /* end label of Linux Image */ + +#define TTABLE_START_LABEL __ttable_start /* start label of kernel */ + /* trap table */ +#define TTABLE_END_LABEL __ttable_end /* end label of kernel */ + /* trap table */ + +#ifdef __ASSEMBLY__ + +#define KERNEL_BASE [KERNEL_START_LABEL] /* virtual address of Linux */ + /* Image begining */ +#define KERNEL_END [KERNEL_END_LABEL] /* virtual address of Linux */ + /* Image end */ +#define EOS_RAM_BASE [EOS_RAM_BASE_LABEL] + +#define KERNEL_TTABLE_BASE [TTABLE_START_LABEL] /* kernel trap table */ + /* start address */ +#define KERNEL_TTABLE_END [TTABLE_END_LABEL] /* kernel trap table */ + /* end address */ + +#else /* !(__ASSEMBLY__) */ + +#define EOS_RAM_BASE ((e2k_addr_t)&EOS_RAM_BASE_LABEL) + +#define KERNEL_BASE ((e2k_addr_t)&KERNEL_START_LABEL) +#define KERNEL_END ((e2k_addr_t)&KERNEL_END_LABEL) + +#define KERNEL_TTABLE_BASE ((e2k_addr_t)&TTABLE_START_LABEL) +#define KERNEL_TTABLE_END ((e2k_addr_t)&TTABLE_END_LABEL) + +#endif /* !(__ASSEMBLY__) */ + + +#define E2K_EOS_RAM_PAGE_SIZE E2K_SMALL_PAGE_SIZE /* Loader warks into */ + /* the small pages */ + +/* Size of pages where the kernel is loaded */ +#define E2K_KERNEL_PAGE_SIZE (cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE) +#define BOOT_E2K_KERNEL_PAGE_SIZE (boot_cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE) + + /* Equal map of phys */ + /* to virt addresses */ + /* should be done */ + /* into pages of one */ + /* size */ +#define BOOT_E2K_EQUAL_MAP_PAGE_SIZE BOOT_E2K_KERNEL_PAGE_SIZE + +#define E2K_KERNEL_PS_PAGE_SIZE E2K_SMALL_PAGE_SIZE /* kernel procedure */ + /* stack loads into */ + /* the small pages */ + + /* kernel procedure */ + /* stack size 8 * 4KB */ + /* at boot-time */ +#define E2K_BOOT_KERNEL_PS_SIZE (16 * E2K_KERNEL_PS_PAGE_SIZE) + + /* kernel procedure */ + /* chain stack loads */ + /* into the small */ + /* pages */ +#define E2K_KERNEL_PCS_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* kernel procedure */ + /* chain stack size */ + /* at boot-time */ + /* 4 * 4KB */ +#define E2K_BOOT_KERNEL_PCS_SIZE (4 * E2K_KERNEL_PCS_PAGE_SIZE) + + /* kernel stack loads */ + /* into the small */ + /* pages */ +#define E2K_KERNEL_US_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* kernel stack size */ + /* at boot-time */ + /* 8 * 4KB */ +#define E2K_BOOT_KERNEL_US_SIZE (4 * E2K_KERNEL_US_PAGE_SIZE) + + /* map initrd using */ + /* 4K pages (4Mb in */ + /* the future) */ +#define E2K_INITRD_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map bootinfo data */ + /* using 4K pages */ +#define E2K_BOOTINFO_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map MP tables */ + /* using 4K pages */ +#define E2K_MPT_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map symbols & */ + /* strings tables */ + /* using 4K pages */ +#define E2K_NAMETAB_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map x86 HW area */ + /* using 4K pages */ +#define E2K_X86_HW_PAGE_SIZE E2K_SMALL_PAGE_SIZE + +/* + * All or some parts of physical memory pages are mapped to virtual + * space starting from 'PAGE_OFFSET' + */ +#define E2K_MAPPED_PHYS_MEM_SIZE (0 * (1024 * 1024)) + /* full physical */ + /* memory */ + +/* Size of pages to map physical memory */ +#define E2K_MAPPED_PHYS_MEM_PAGE_SIZE \ + ((cpu_has(CPU_HWBUG_LARGE_PAGES) || \ + IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) ? \ + E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE) +#define BOOT_E2K_MAPPED_PHYS_MEM_PAGE_SIZE \ + ((boot_cpu_has(CPU_HWBUG_LARGE_PAGES) || \ + IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) ? \ + E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE) + +/* + * Size of the top of kernel stack to map to equal virtual addresses to ensure + * switching from physical to virtual addressing + */ +#ifndef __ASSEMBLY__ +#define E2K_KERNEL_US_PAGE_SWITCHING_SIZE (128 * sizeof(long)) +#else +#define E2K_KERNEL_US_PAGE_SWITCHING_SIZE (128 * 8) +#endif /* !(__ASSEMBLY__) */ + +/* + * Kernel virtual memory layout + */ + +/* + * The topmost virtual addresses are used to allocate Virtually Mapped + * Linear Page Tables (VM LPT). + * All page tables is virtually mapped into the same virtual space as kernel + * Definition of Virtually Mapped Linear Page Table base address. + * Virtual page table lives at the end of virtual addresses space + * 0x0000 ff80 0000 0000 - 0x0000 ffff ffff ffff all PTs virtual space: + * + * 0x0000 ff80 0000 0000 - 0x0000 ffff bfff ffff first-level PTs (PTEs) + * 0x0000 ffff c000 0000 - 0x0000 ffff ffdf ffff second-level PTs (PMDs) + * 0x0000 ffff ffe0 0000 - 0x0000 ffff ffff efff third-level PTs (PUDs) + * 0x0000 ffff ffff f000 - 0x0000 ffff ffff fffe root-fourth-level PTs (PGD) + * 0x0000 ffff ffff ffff - root-fourth-level itself PGD + */ + +#define KERNEL_VPTB_BASE_ADDR 0x0000ff8000000000UL +#ifndef __ASSEMBLY__ +#define KERNEL_PPTB_BASE_ADDR ((e2k_addr_t)boot_root_pt) +#else +#define KERNEL_PPTB_BASE_ADDR (boot_root_pt) +#endif /* !(__ASSEMBLY__) */ + +/* + * Area dedicated for I/O ports and BIOS physical memory + * 0x0000 00ff fe00 0000 - 0x0000 00ff ffff ffff all I/O physical memory + * 0x0000 ff7b fc00 0000 - 0x0000 ff7b ffff ffff all I/O virtual memory + */ + +#define E2K_KERNEL_IO_BIOS_AREAS_BASE 0x0000ff7bfc000000UL +#define E2K_KERNEL_IO_BIOS_AREAS_SIZE 0x0000000004000000UL + +/* + * See BUG in pcim configuration block in jump func + * should be 0x00000000FEBFFFFFUL due to specification + */ +#define E2K_PCI_MEM_AREA_PHYS_END 0x00000000F7FFFFFFUL +#define E2K_SCRB_SIZE 0x0000000000001000UL + +#define E2K_FULL_SIC_IO_AREA_PHYS_BASE 0x0000000101000000UL +#define E2K_FULL_SIC_IO_AREA_SIZE 0x0000000000010000UL /* 64K */ + +#define E2K_LEGACY_SIC_IO_AREA_PHYS_BASE 0x000000ff20000000UL +#define E2K_LEGACY_SIC_IO_AREA_SIZE 0x0000000010000000UL /* 256M */ + +#define E2K_VIRT_CPU_X86_IO_AREA_PHYS_BASE 0x000000fff0000000UL + +/* + * Area dedicated for I/O ports and BIOS physical memory + * Area size should be max of + * E2K_FULL_SIC_IO_AREA_SIZE 0x0000000001000000UL + * E2K_LEGACY_SIC_BIOS_AREA_SIZE 0x0000000010000000UL 256 Mb IGNORE + * + * 0x0000 00ff f000 0000 - 0x0000 00ff ffff ffff all I/O physical memory + * 0x0000 ff7b fc00 0000 - 0x0000 ff7b ffff ffff all I/O virtual memory + * + * see area dedication above : + * + * #define E2K_KERNEL_IO_BIOS_AREAS_BASE 0x0000ff7bfc000000UL + * #define E2K_KERNEL_IO_BIOS_AREAS_SIZE 0x0000000004000000UL + */ + +/* + * Area dedicated for kernel resident image virtual space and virtual space + * to allocate and load kernel modules. + * Both this areas should be within 2 ** 30 bits of virtual adresses to provide + * call of extern functions based on literal displacement DISP + * 0x0000 e200 0000 0000 - 0x0000 e200 3fff ffff kernel image area with modules + * 0x0000 e200 0000 0000 - 0x0000 e200 0xxx x000 kernel image area + * xxx x defined by kernel_image_size + * 0x0000 e200 0xxx x000 - 0x0000 e200 3fff ffff area to load modules + */ +#define NATIVE_KERNEL_IMAGE_AREA_BASE 0x0000e20000000000 + +#define KERNEL_IMAGE_PGD_INDEX pgd_index(E2K_KERNEL_IMAGE_AREA_BASE) + +#define E2K_KERNEL_IMAGE_AREA_SIZE kernel_image_size +#define E2K_MODULES_START _PAGE_ALIGN_DOWN( \ + (E2K_KERNEL_IMAGE_AREA_BASE + \ + E2K_KERNEL_IMAGE_AREA_SIZE), \ + E2K_KERNEL_PAGE_SIZE) +#define E2K_MODULES_END (E2K_KERNEL_IMAGE_AREA_BASE + (1 << 30)) +#define E2K_KERNEL_AREAS_SIZE 0x0000000040000000UL /* 2 ** 30 */ + +#define KERNEL_CODES_INDEX 0UL /* kernel CUI */ +/* bug 114501: use 0 index for all unprotected executables */ +#define USER_CODES_UNPROT_INDEX(p) \ + ((machine.native_iset_ver >= E2K_ISET_V6 || \ + !(current->thread.flags & E2K_FLAG_32BIT)) ? 0UL : 1UL) /* user CUI */ +#define USER_CODES_PROT_INDEX 1UL /* user protected codes */ + /* index */ +#define MAX_KERNEL_CODES_UNITS (KERNEL_CODES_INDEX + 1) + +#define GUEST_CODES_INDEX 0UL +#define HOST_CODES_INDEX (KERNEL_CODES_INDEX) +#define MAX_GUEST_CODES_UNITS (GUEST_CODES_INDEX + 1) +#define MAX_HOST_CODES_UNITS (MAX_KERNEL_CODES_UNITS) + +/* + * Area dedicated for kernel symbols & strings tables + * 0x0000 e200 4000 0000 - 0x0000 e200 ffff ffff kernel symbols & strings tables + */ +#define E2K_KERNEL_NAMETAB_AREA_BASE (E2K_KERNEL_IMAGE_AREA_BASE + \ + E2K_KERNEL_AREAS_SIZE) + +/* + * Area dedicated for physical memory mapping to virtual space + * 0x0000 0000 0000 0000 - 0x0000 00ff ffff ffff all physical memory + * 0x0000 d000 0000 0000 - 0x0000 d0ff ffff ffff all virtual memory to map + * all physical memory + */ +#define E2K_KERNEL_PHYS_MEM_VIRT_BASE PAGE_OFFSET /* 0x0000d00000000000 */ +#define E2K_KERNEL_PHYS_MEM_SIZE MAX_PM_SIZE /* == 2**40 - 2**48 */ + +/* virtualization support */ +#include + +/* + * Kernel virtual memory context + */ +#define E2K_KERNEL_CONTEXT 0x000 + +#endif /* !(_E2K_HEAD_H) */ diff --git a/arch/e2k/include/asm/host_printk.h b/arch/e2k/include/asm/host_printk.h new file mode 100644 index 000000000000..b657387f599f --- /dev/null +++ b/arch/e2k/include/asm/host_printk.h @@ -0,0 +1,13 @@ +/* + * Guest VM printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_HOST_PRINTK_H +#define _E2K_HOST_PRINTK_H + +#include +#include + +#endif /* ! _E2K_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/hugetlb.h b/arch/e2k/include/asm/hugetlb.h new file mode 100644 index 000000000000..f482b2497a0e --- /dev/null +++ b/arch/e2k/include/asm/hugetlb.h @@ -0,0 +1,89 @@ +#ifndef _ASM_E2K_HUGETLB_H_ +#define _ASM_E2K_HUGETLB_H_ + +#include + +#include +#include +#include + + +extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + + +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + return 0; +} + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +#define __HAVE_ARCH_HUGE_PTE_NONE +static inline int huge_pte_none(pte_t pte) +{ +#ifndef CONFIG_MAKE_ALL_PAGES_VALID + return _PAGE_CLEAR(pte_val(pte), UNI_PAGE_HUGE) == 0; +#else /* CONFIG_MAKE_ALL_PAGES_VALID */ + return _PAGE_CLEAR(pte_val(pte), UNI_PAGE_VALID | UNI_PAGE_HUGE) == 0; +#endif /* !CONFIG_MAKE_ALL_PAGES_VALID */ +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) + ptep_set_wrprotect(mm, addr, ++ptep); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int changed = !pte_same(*ptep, pte); + if (changed) { + set_pte_at(vma->vm_mm, addr, ptep, pte); + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) + set_pte_at(vma->vm_mm, addr, ++ptep, pte); + flush_tlb_range(vma, addr, addr + PMD_SIZE); + } + return changed; +} + +#define __HAVE_ARCH_HUGE_PTE_CLEAR +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long address, + pte_t *page_table, unsigned long sz) +{ + /* + * In this case virtual page occupied two sequential entries in + * page table on 2-th level (PMD). + * All two pte's (pmd's) should be cleared. + */ + pte_clear(mm, address, page_table); + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) + pte_clear(mm, address, (++page_table)); +} + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT + +#include + +#endif /* _ASM_E2K_HUGETLB_H_ */ diff --git a/arch/e2k/include/asm/hw_breakpoint.h b/arch/e2k/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..ed813ba14afa --- /dev/null +++ b/arch/e2k/include/asm/hw_breakpoint.h @@ -0,0 +1,47 @@ +#ifndef _E2K_HW_BREAKPOINT_H +#define _E2K_HW_BREAKPOINT_H + +#include + +struct arch_hw_breakpoint { + unsigned long address; + u8 len; + u8 type; + u8 ss; +}; + +#define HBP_NUM 4 +static inline int hw_breakpoint_slots(int type) +{ + return HBP_NUM; +} + +struct perf_event; +struct perf_event_attr; +struct task_struct; + +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); + +extern int arch_install_hw_breakpoint(struct perf_event *bp); +extern void arch_uninstall_hw_breakpoint(struct perf_event *bp); + +struct notifier_block; +extern int hw_breakpoint_exceptions_notify( + struct notifier_block *unused, unsigned long val, void *data); + +extern void hw_breakpoint_pmu_read(struct perf_event *bp); + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern void bp_data_overflow_handle(struct pt_regs *regs); +extern void bp_instr_overflow_handle(struct pt_regs *regs); +extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk); +#else /* ! CONFIG_HAVE_HW_BREAKPOINT */ +static inline void bp_data_overflow_handle(struct pt_regs *regs) { } +static inline void bp_instr_overflow_handle(struct pt_regs *regs) { } +static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +#endif /* _E2K_HW_BREAKPOINT_H */ diff --git a/arch/e2k/include/asm/hw_irq.h b/arch/e2k/include/asm/hw_irq.h new file mode 100644 index 000000000000..2d885d4eb6ce --- /dev/null +++ b/arch/e2k/include/asm/hw_irq.h @@ -0,0 +1,8 @@ +#ifndef _ASM_E2K_HW_IRQ_H +#define _ASM_E2K_HW_IRQ_H + +/* required by linux/irq.h */ + +#include + +#endif /* _ASM_E2K_HW_IRQ_H */ diff --git a/arch/e2k/include/asm/hw_stacks.h b/arch/e2k/include/asm/hw_stacks.h new file mode 100644 index 000000000000..500a2a118e40 --- /dev/null +++ b/arch/e2k/include/asm/hw_stacks.h @@ -0,0 +1,699 @@ +/* + * Hardware stacks support + * + * Copyright 2001-2015 Salavat S. Guilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_HW_STACKS_H +#define _E2K_HW_STACKS_H + +#include +#include +#include + +typedef enum hw_stack_type { + HW_STACK_TYPE_PS, + HW_STACK_TYPE_PCS +} hw_stack_type_t; + +/* + * Procedure chain stacks can be mapped to user (user processes) + * or kernel space (kernel threads). But mapping is always to privileged area + * and directly can be accessed only by host kernel. + * SPECIAL CASE: access to current procedure chain stack: + * 1. Current stack frame must be locked (resident), so access is + * safety and can use common load/store operations + * 2. Top of stack can be loaded to the special hardware register file and + * must be spilled to memory before any access. + * 3. If items of chain stack are not updated, then spilling is enough to + * their access + * 4. If items of chain stack are updated, then interrupts and + * any calling of function should be disabled in addition to spilling, + * because of return (done) will fill some part of stack from memory and can be + * two copy of chain stack items: in memory and in registers file. + * We can update only in memory and following spill recover not updated + * value from registers file. + */ +static inline unsigned long +native_get_active_cr_mem_value(e2k_addr_t base, + e2k_addr_t cr_ind, e2k_addr_t cr_item) +{ + return *((unsigned long *)(base + cr_ind + cr_item)); +} +static inline unsigned long +native_get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR0_LO_I); +} +static inline unsigned long +native_get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR0_HI_I); +} +static inline unsigned long +native_get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR1_LO_I); +} +static inline unsigned long +native_get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR1_HI_I); +} +static inline void +native_put_active_cr_mem_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, e2k_addr_t cr_item) +{ + *((unsigned long *)(base + cr_ind + cr_item)) = cr_value; +} +static inline void +native_put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr0_lo_value, base, cr_ind, CR0_LO_I); +} +static inline void +native_put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr0_hi_value, base, cr_ind, CR0_HI_I); +} +static inline void +native_put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr1_lo_value, base, cr_ind, CR1_LO_I); +} +static inline void +native_put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr1_hi_value, base, cr_ind, CR1_HI_I); +} + +static inline e2k_cr0_lo_t +native_get_active_cr0_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_lo_t cr0_lo; + + cr0_lo.CR0_lo_half = native_get_active_cr0_lo_value(base, cr_ind); + return cr0_lo; +} +static inline e2k_cr0_hi_t +native_get_active_cr0_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_hi_t cr0_hi; + + cr0_hi.CR0_hi_half = native_get_active_cr0_hi_value(base, cr_ind); + return cr0_hi; +} +static inline e2k_cr1_lo_t +native_get_active_cr1_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_lo_t cr1_lo; + + cr1_lo.CR1_lo_half = native_get_active_cr1_lo_value(base, cr_ind); + return cr1_lo; +} +static inline e2k_cr1_hi_t +native_get_active_cr1_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_hi_t cr1_hi; + + cr1_hi.CR1_hi_half = native_get_active_cr1_hi_value(base, cr_ind); + return cr1_hi; +} +static inline void +native_put_active_cr0_lo(e2k_cr0_lo_t cr0_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_lo_value(cr0_lo.CR0_lo_half, base, cr_ind); +} +static inline void +native_put_active_cr0_hi(e2k_cr0_hi_t cr0_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_hi_value(cr0_hi.CR0_hi_half, base, cr_ind); +} +static inline void +native_put_active_cr1_lo(e2k_cr1_lo_t cr1_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_lo_value(cr1_lo.CR1_lo_half, base, cr_ind); +} +static inline void +native_put_active_cr1_hi(e2k_cr1_hi_t cr1_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_hi_value(cr1_hi.CR1_hi_half, base, cr_ind); +} + +static inline int +native_get_user_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr0_lo), + (u64 __user *)(base + cr_ind + CR0_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_get_user_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr0_hi), + (u64 __user *)(base + cr_ind + CR0_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_get_user_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr1_lo), + (u64 __user *)(base + cr_ind + CR1_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_get_user_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr1_hi), + (u64 __user *)(base + cr_ind + CR1_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr0_lo), + (u64 __user *)(base + cr_ind + CR0_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr0_hi), + (u64 __user *)(base + cr_ind + CR0_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr1_lo), + (u64 __user *)(base + cr_ind + CR1_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr1_hi), + (u64 __user *)(base + cr_ind + CR1_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} + +static inline void +native_get_kernel_cr0_lo(e2k_cr0_lo_t *cr0_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr0_lo) = *((u64 *)(base + cr_ind + CR0_LO_I)); +} +static inline void +native_get_kernel_cr0_hi(e2k_cr0_hi_t *cr0_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr0_hi) = *((u64 *)(base + cr_ind + CR0_HI_I)); +} +static inline void +native_get_kernel_cr1_lo(e2k_cr1_lo_t *cr1_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr1_lo) = *((u64 *)(base + cr_ind + CR1_LO_I)); +} +static inline void +native_get_kernel_cr1_hi(e2k_cr1_hi_t *cr1_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr1_hi) = *((u64 *)(base + cr_ind + CR1_HI_I)); +} +static inline void +native_put_kernel_cr0_lo(e2k_cr0_lo_t cr0_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR0_LO_I)) = AS_WORD(cr0_lo); +} +static inline void +native_put_kernel_cr0_hi(e2k_cr0_hi_t cr0_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR0_HI_I)) = AS_WORD(cr0_hi); +} +static inline void +native_put_kernel_cr1_lo(e2k_cr1_lo_t cr1_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR1_LO_I)) = AS_WORD(cr1_lo); +} +static inline void +native_put_kernel_cr1_hi(e2k_cr1_hi_t cr1_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR1_HI_I)) = AS_WORD(cr1_hi); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* pure guest kernel (not paravirtualized) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ +static inline unsigned long +get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr1_hi_value(base, cr_ind); +} +static inline void +put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_lo_value(cr0_lo_value, base, cr_ind); +} +static inline void +put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_hi_value(cr0_hi_value, base, cr_ind); +} +static inline void +put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_lo_value(cr1_lo_value, base, cr_ind); +} +static inline void +put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_hi_value(cr1_hi_value, base, cr_ind); +} +#endif /* CONFIG_PARAVIRT */ + +static inline e2k_cr0_lo_t +get_active_cr0_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_lo_t cr0_lo; + + cr0_lo.CR0_lo_half = get_active_cr0_lo_value(base, cr_ind); + return cr0_lo; +} +static inline e2k_cr0_hi_t +get_active_cr0_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_hi_t cr0_hi; + + cr0_hi.CR0_hi_half = get_active_cr0_hi_value(base, cr_ind); + return cr0_hi; +} +static inline e2k_cr1_lo_t +get_active_cr1_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_lo_t cr1_lo; + + cr1_lo.CR1_lo_half = get_active_cr1_lo_value(base, cr_ind); + return cr1_lo; +} +static inline e2k_cr1_hi_t +get_active_cr1_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_hi_t cr1_hi; + + cr1_hi.CR1_hi_half = get_active_cr1_hi_value(base, cr_ind); + return cr1_hi; +} +static inline void +put_active_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr0_lo_value(cr0_lo.CR0_lo_half, base, cr_ind); +} +static inline void +put_active_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr0_hi_value(cr0_hi.CR0_hi_half, base, cr_ind); +} +static inline void +put_active_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr1_lo_value(cr1_lo.CR1_lo_half, base, cr_ind); +} +static inline void +put_active_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr1_hi_value(cr1_hi.CR1_hi_half, base, cr_ind); +} + +static inline int +get_user_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr0_lo(cr0_lo, base, cr_ind); +} +static inline int +get_user_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr0_hi(cr0_hi, base, cr_ind); +} +static inline int +get_user_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr1_lo(cr1_lo, base, cr_ind); +} +static inline int +get_user_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr1_hi(cr1_hi, base, cr_ind); +} +static inline int +put_user_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr0_lo(cr0_lo, base, cr_ind); +} +static inline int +put_user_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr0_hi(cr0_hi, base, cr_ind); +} +static inline int +put_user_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr1_lo(cr1_lo, base, cr_ind); +} +static inline int +put_user_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr1_hi(cr1_hi, base, cr_ind); +} + +static inline void +get_kernel_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr0_lo(cr0_lo, base, cr_ind); +} +static inline void +get_kernel_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr0_hi(cr0_hi, base, cr_ind); +} +static inline void +get_kernel_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr1_lo(cr1_lo, base, cr_ind); +} +static inline void +get_kernel_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr1_hi(cr1_hi, base, cr_ind); +} +static inline void +put_kernel_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr0_lo(cr0_lo, base, cr_ind); +} +static inline void +put_kernel_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr0_hi(cr0_hi, base, cr_ind); +} +static inline void +put_kernel_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr1_lo(cr1_lo, base, cr_ind); +} +static inline void +put_kernel_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr1_hi(cr1_hi, base, cr_ind); +} + +static inline int +get_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr0_lo(cr0_lo, base, cr_ind); + else + get_kernel_cr0_lo(cr0_lo, base, cr_ind); + return ret; +} + +static inline int +get_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr0_hi(cr0_hi, base, cr_ind); + else + get_kernel_cr0_hi(cr0_hi, base, cr_ind); + return ret; +} + +static inline int +get_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr1_lo(cr1_lo, base, cr_ind); + else + get_kernel_cr1_lo(cr1_lo, base, cr_ind); + return ret; +} + +static inline int +get_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr1_hi(cr1_hi, base, cr_ind); + else + get_kernel_cr1_hi(cr1_hi, base, cr_ind); + return ret; +} + +static inline int +put_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr0_lo(cr0_lo, base, cr_ind); + else + put_kernel_cr0_lo(cr0_lo, base, cr_ind); + return ret; +} +static inline int +put_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr0_hi(cr0_hi, base, cr_ind); + else + put_kernel_cr0_hi(cr0_hi, base, cr_ind); + return ret; +} + +static inline int +put_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr1_lo(cr1_lo, base, cr_ind); + else + put_kernel_cr1_lo(cr1_lo, base, cr_ind); + return ret; +} + +static inline int +put_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr1_hi(cr1_hi, base, cr_ind); + else + put_kernel_cr1_hi(cr1_hi, base, cr_ind); + return ret; +} + +static inline int +get_cr0(e2k_cr0_lo_t *cr0_lo, e2k_cr0_hi_t *cr0_hi, + e2k_pcsp_lo_t pcsp_lo, u64 cr_ind) +{ + u64 base = pcsp_lo.PCSP_lo_base; + int ret = 0; + + ret += get_cr0_lo(cr0_lo, base, cr_ind); + ret += get_cr0_hi(cr0_hi, base, cr_ind); + return ret; +} + +static inline int +get_cr1(e2k_cr1_lo_t *cr1_lo, e2k_cr1_hi_t *cr1_hi, + e2k_pcsp_lo_t pcsp_lo, u64 cr_ind) +{ + u64 base = pcsp_lo.PCSP_lo_base; + int ret = 0; + + ret += get_cr1_lo(cr1_lo, base, cr_ind); + ret += get_cr1_hi(cr1_hi, base, cr_ind); + return ret; +} + +static inline int +get_user_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_from_user(crs, (const char __user *)(base + cr_ind), + sizeof(*crs)); + clear_ts_flag(ts_flag); + + return ret; +} + +static inline int +put_user_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user((char __user *)(base + cr_ind), crs, + sizeof(*crs)); + clear_ts_flag(ts_flag); + + return ret; +} + +static inline void +get_kernel_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + get_kernel_cr0_lo(&crs->cr0_lo, base, cr_ind); + get_kernel_cr0_hi(&crs->cr0_hi, base, cr_ind); + get_kernel_cr1_lo(&crs->cr1_lo, base, cr_ind); + get_kernel_cr1_hi(&crs->cr1_hi, base, cr_ind); +} + +static inline void +put_kernel_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_kernel_cr0_lo(crs->cr0_lo, base, cr_ind); + put_kernel_cr0_hi(crs->cr0_hi, base, cr_ind); + put_kernel_cr1_lo(crs->cr1_lo, base, cr_ind); + put_kernel_cr1_hi(crs->cr1_hi, base, cr_ind); +} + +static inline int +get_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_crs(crs, base, cr_ind); + else + get_kernel_crs(crs, base, cr_ind); + return ret; +} + +static inline int +put_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_crs(crs, base, cr_ind); + else + put_kernel_crs(crs, base, cr_ind); + return ret; +} + +extern int chain_stack_frame_init(e2k_mem_crs_t *crs, void *fn_ptr, + size_t dstack_size, e2k_psr_t psr, + int wbs, int wpsz, bool user); + +extern void __update_psp_regs(unsigned long base, unsigned long size, + unsigned long new_fp, + e2k_psp_lo_t *psp_lo, e2k_psp_hi_t *psp_hi); +extern void update_psp_regs(unsigned long new_fp, + e2k_psp_lo_t *psp_lo, e2k_psp_hi_t *psp_hi); + +extern void __update_pcsp_regs(unsigned long base, unsigned long size, + unsigned long new_fp, + e2k_pcsp_lo_t *pcsp_lo, e2k_pcsp_hi_t *pcsp_hi); +extern void update_pcsp_regs(unsigned long new_fp, + e2k_pcsp_lo_t *pcsp_lo, e2k_pcsp_hi_t *pcsp_hi); + +#endif /* _E2K_HW_STACKS_H */ diff --git a/arch/e2k/include/asm/io.h b/arch/e2k/include/asm/io.h new file mode 100644 index 000000000000..43e46bb77249 --- /dev/null +++ b/arch/e2k/include/asm/io.h @@ -0,0 +1,615 @@ +#ifndef _E2K_IO_H_ +#define _E2K_IO_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +extern int __init native_arch_pci_init(void); + +#define E2K_X86_IO_AREA_BASE E2K_KERNEL_IO_BIOS_AREAS_BASE + +/* Size of pages for the IO area */ +#define E2K_X86_IO_PAGE_SIZE (cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE) +#define X86_IO_AREA_PHYS_BASE (machine.x86_io_area_base) +#define X86_IO_AREA_PHYS_SIZE (machine.x86_io_area_size) + + +/* + * We add all the necessary barriers manually + */ +#define __io_br() +#define __io_ar(v) do { (void) (v); } while (0) +#define __io_bw() +#define __io_aw() + +/* + * _relaxed() accessors. + */ + +static inline u8 native_readb_relaxed(const volatile void __iomem *addr) +{ + u8 res = *(const volatile u8 __force *) addr; + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline u16 native_readw_relaxed(const volatile void __iomem *addr) +{ + u16 res = *(const volatile u16 __force *) addr; + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline u32 native_readl_relaxed(const volatile void __iomem *addr) +{ + u32 res = *(const volatile u32 __force *) addr; + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline u64 native_readq_relaxed(const volatile void __iomem *addr) +{ + u64 res = *(const volatile u64 __force *) addr; + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline void native_writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + *(volatile u8 __force *) addr = value; +} + +static inline void native_writew_relaxed(u16 value, volatile void __iomem *addr) +{ + *(volatile u16 __force *) addr = value; +} + +static inline void native_writel_relaxed(u32 value, volatile void __iomem *addr) +{ + *(volatile u32 __force *) addr = value; +} + +static inline void native_writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + *(volatile u64 __force *) addr = value; +} + + +/* + * Strongly ordered accessors. + */ + +static inline u8 native_readb(const volatile void __iomem *addr) +{ + u8 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u8 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), b, "memory"); + } else { + res = native_readb_relaxed(addr); + } + return res; +} + +static inline u16 native_readw(const volatile void __iomem *addr) +{ + u16 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u16 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), h, "memory"); + } else { + res = native_readw_relaxed(addr); + } + return res; +} + +static inline u32 native_readl(const volatile void __iomem *addr) +{ + u32 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u32 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), w, "memory"); + } else { + res = native_readl_relaxed(addr); + } + return res; +} + +static inline u64 native_readq(const volatile void __iomem *addr) +{ + u64 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u64 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), d, "memory"); + } else { + res = native_readq_relaxed(addr); + } + return res; +} + +static inline void native_writeb(u8 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u8 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), b, "memory"); + /* wmb() after MMIO writes is not required by documentation, but + * this is how x86 works and how most of the drivers are tested. */ + wmb(); + } else { + native_writeb_relaxed(value, addr); + } +} + +static inline void native_writew(u16 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u16 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), h, "memory"); + wmb(); + } else { + native_writew_relaxed(value, addr); + } +} + +static inline void native_writel(u32 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u32 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), w, "memory"); + wmb(); + } else { + native_writel_relaxed(value, addr); + } +} + +static inline void native_writeq(u64 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u64 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), d, "memory"); + wmb(); + } else { + native_writeq_relaxed(value, addr); + } +} + +/* + * Port accessors, also strongly ordered + */ + +#if CONFIG_CPU_ISET >= 6 +# define __io_par() __E2K_WAIT(_ld_c | _sal | _lal) +# define __io_pbw() __E2K_WAIT(_st_c | _sas | _ld_c | _sal) +/* Not required by documentation, but this is how + * x86 works and how most of the drivers are tested. */ +# define __io_paw() __E2K_WAIT(_st_c | _sas) +#else +# define __io_par() \ +do { \ + if (cpu_has(CPU_HWBUG_PIO_READS)) \ + __E2K_WAIT(_ld_c); \ +} while (0) +# define __io_pbw() +# define __io_paw() +#endif + +static inline u8 native_inb(unsigned int port) +{ + u8 byte = NATIVE_READ_MAS_B(X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); + __io_par(); + return byte; +} +static inline u16 native_inw(unsigned int port) +{ + u16 hword = NATIVE_READ_MAS_H(X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); + __io_par(); + return hword; +} +static inline u32 native_inl(unsigned int port) +{ + u32 word = NATIVE_READ_MAS_W(X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); + __io_par(); + return word; +} +static inline void native_outb(u8 byte, unsigned int port) +{ + __io_pbw(); + NATIVE_WRITE_MAS_B(X86_IO_AREA_PHYS_BASE + port, byte, MAS_IOADDR); + __io_paw(); +} +static inline void native_outw(u16 halfword, unsigned int port) +{ + __io_pbw(); + NATIVE_WRITE_MAS_H(X86_IO_AREA_PHYS_BASE + port, halfword, MAS_IOADDR); + __io_paw(); +} +static inline void native_outl(u32 word, unsigned int port) +{ + __io_pbw(); + NATIVE_WRITE_MAS_W(X86_IO_AREA_PHYS_BASE + port, word, MAS_IOADDR); + __io_paw(); +} + + +/* + * Variants of inX/outX that repeatedly access the same port + */ + +static inline void native_insb(unsigned short port, void *dst, unsigned long count) +{ + u8 *b_p = dst; + while (count--) + *b_p++ = native_inb(port); +} +static inline void native_insw(unsigned short port, void *dst, unsigned long count) +{ + u16 *hw_p = dst; + while (count--) + *hw_p++ = native_inw(port); +} +static inline void native_insl(unsigned short port, void *dst, unsigned long count) +{ + u32 *l_p = dst; + while (count--) + *l_p++ = native_inl(port); +} + +static inline void native_outsb(unsigned short port, const void *src, unsigned long count) +{ + const u8 *b_p = src; + while (count--) + native_outb(*b_p++, port); +} +static inline void native_outsw(unsigned short port, const void *src, unsigned long count) +{ + const u16 *hw_p = src; + while (count--) + native_outw(*hw_p++, port); +} +static inline void native_outsl(unsigned short port, const void *src, unsigned long count) +{ + const u32 *l_p = src; + while (count--) + native_outl(*l_p++, port); +} + +/* + * And some e2k-specific accessors + */ +static inline void native_debug_cons_outb(u8 byte, u16 port) +{ + native_outb(byte, port); +} +static inline u8 native_debug_cons_inb(u16 port) +{ + return native_inb(port); +} +static inline u32 native_debug_cons_inl(u16 port) +{ + return native_inl(port); +} + +extern void native_conf_inb(unsigned int domain, unsigned int bus, + unsigned long port, u8 *byte); +extern void native_conf_inw(unsigned int domain, unsigned int bus, + unsigned long port, u16 *hword); +extern void native_conf_inl(unsigned int domain, unsigned int bus, + unsigned long port, u32 *word); +extern void native_conf_outb(unsigned int domain, unsigned int bus, + unsigned long port, u8 byte); +extern void native_conf_outw(unsigned int domain, unsigned int bus, + unsigned long port, u16 hword); +extern void native_conf_outl(unsigned int domain, unsigned int bus, + unsigned long port, u32 word); + + +#if defined CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +# include + +# define __raw_readb kvm_readb_relaxed +# define __raw_readw kvm_readw_relaxed +# define __raw_readl kvm_readl_relaxed +# define __raw_readq kvm_readq_relaxed +# define __raw_writeb kvm_writeb_relaxed +# define __raw_writew kvm_writew_relaxed +# define __raw_writel kvm_writel_relaxed +# define __raw_writeq kvm_writeq_relaxed +# define readb kvm_hv_readb +# define readw kvm_hv_readw +# define readl kvm_hv_readl +# define readq kvm_hv_readq +# define writeb kvm_hv_writeb +# define writew kvm_hv_writew +# define writel kvm_hv_writel +# define writeq kvm_hv_writeq +# define inb kvm_hv_inb +# define inw kvm_hv_inw +# define inl kvm_hv_inl +# define outb kvm_hv_outb +# define outw kvm_hv_outw +# define outl kvm_hv_outl +# define insb kvm_hv_insb +# define insw kvm_hv_insw +# define insl kvm_hv_insl +# define outsb kvm_hv_outsb +# define outsw kvm_hv_outsw +# define outsl kvm_hv_outsl +#elif defined CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +# include +# define __raw_readb pv_readb +# define __raw_readw pv_readw +# define __raw_readl pv_readl +# define __raw_readq pv_readq +# define __raw_writeb pv_writeb +# define __raw_writew pv_writew +# define __raw_writel pv_writel +# define __raw_writeq pv_writeq +# define inb pv_inb +# define inw pv_inw +# define inl pv_inl +# define outb pv_outb +# define outw pv_outw +# define outl pv_outl +# define insb pv_insb +# define insw pv_insw +# define insl pv_insl +# define outsb pv_outsb +# define outsw pv_outsw +# define outsl pv_outsl +#else +/* Native kernel - either host or without any virtualization at all */ +# define __raw_readb native_readb_relaxed +# define __raw_readw native_readw_relaxed +# define __raw_readl native_readl_relaxed +# define __raw_readq native_readq_relaxed +# define __raw_writeb native_writeb_relaxed +# define __raw_writew native_writew_relaxed +# define __raw_writel native_writel_relaxed +# define __raw_writeq native_writeq_relaxed +# define readb native_readb +# define readw native_readw +# define readl native_readl +# define readq native_readq +# define writeb native_writeb +# define writew native_writew +# define writel native_writel +# define writeq native_writeq +# define inb native_inb +# define inw native_inw +# define inl native_inl +# define outb native_outb +# define outw native_outw +# define outl native_outl +# define insb native_insb +# define insw native_insw +# define insl native_insl +# define outsb native_outsb +# define outsw native_outsw +# define outsl native_outsl + +static inline void boot_writeb(u8 b, void __iomem *addr) +{ + boot_native_writeb(b, addr); +} + +static inline void boot_writew(u16 w, void __iomem *addr) +{ + boot_native_writew(w, addr); +} + +static inline void boot_writel(u32 l, void __iomem *addr) +{ + boot_native_writel(l, addr); +} + +static inline void boot_writeq(u64 q, void __iomem *addr) +{ + boot_native_writeq(q, addr); +} + +static inline u8 boot_readb(void __iomem *addr) +{ + return boot_native_readb(addr); +} + +static inline u16 boot_readw(void __iomem *addr) +{ + return boot_native_readw(addr); +} + +static inline u32 boot_readl(void __iomem *addr) +{ + return boot_native_readl(addr); +} + +static inline u64 boot_readq(void __iomem *addr) +{ + return boot_native_readq(addr); +} + + +static inline void +conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + native_conf_inb(domain, bus, port, byte); +} +static inline void +conf_inw(unsigned int domain, unsigned int bus, unsigned long port, u16 *hword) +{ + native_conf_inw(domain, bus, port, hword); +} +static inline void +conf_inl(unsigned int domain, unsigned int bus, unsigned long port, u32 *word) +{ + native_conf_inl(domain, bus, port, word); +} +static inline void +conf_outb(unsigned int domain, unsigned int bus, unsigned long port, u8 byte) +{ + native_conf_outb(domain, bus, port, byte); +} +static inline void +conf_outw(unsigned int domain, unsigned int bus, unsigned long port, u16 hword) +{ + native_conf_outw(domain, bus, port, hword); +} +static inline void +conf_outl(unsigned int domain, unsigned int bus, unsigned long port, u32 word) +{ + native_conf_outl(domain, bus, port, word); +} + +static inline void debug_cons_outb(u8 byte, u16 port) +{ + native_debug_cons_outb(byte, port); +} +static inline void debug_cons_outb_p(u8 byte, u16 port) +{ + native_debug_cons_outb(byte, port); +} +static inline u8 debug_cons_inb(u16 port) +{ + return native_debug_cons_inb(port); +} +static inline u32 debug_cons_inl(u16 port) +{ + return native_debug_cons_inl(port); +} +static inline void boot_debug_cons_outb(u8 byte, u16 port) +{ + boot_native_outb(byte, port); +} +static inline u8 boot_debug_cons_inb(u16 port) +{ + return boot_native_inb(port); +} +static inline u32 boot_debug_cons_inl(u16 port) +{ + return boot_native_inl(port); +} + +static inline int __init arch_pci_init(void) +{ + return native_arch_pci_init(); +} + +#endif + +/* + * Map in an area of physical address space, for accessing + * I/O devices etc. + */ +#define ioremap_cache ioremap_cache +#define ioremap_wc ioremap_wc +#define ioremap_wt ioremap_wc +#define ioremap_nocache ioremap_nocache +#define ioremap_uc ioremap_nocache +#define ioremap ioremap_nocache +extern void __iomem *ioremap_cache(resource_size_t address, unsigned long size); +extern void __iomem *ioremap_wc(resource_size_t address, unsigned long size); +extern void __iomem *ioremap_nocache(resource_size_t address, unsigned long size); + +#define iounmap iounmap +extern void iounmap(volatile void __iomem *addr); + +#define ARCH_HAS_IOREMAP_WC +#define ARCH_HAS_IOREMAP_WT + + +extern void __memset_io(void *s, long c, size_t count); + +#define memset_io(dst, c, n) _memset_io(dst, c, n, __alignof(*(dst))) +static inline void _memset_io(volatile void __iomem *dst, int c, size_t n, + const unsigned long dst_align) +{ + long cc; + + cc = c & 0xff; + cc = cc | (cc << 8); + cc = cc | (cc << 16); + cc = cc | (cc << 32); + + if (__builtin_constant_p(n) && dst_align >= 8 && n < 136) { + /* Inline small aligned memset's */ + volatile u64 __iomem *l_dst = dst; + + if (n >= 8) + l_dst[0] = cc; + if (n >= 16) + l_dst[1] = cc; + if (n >= 24) + l_dst[2] = cc; + if (n >= 32) + l_dst[3] = cc; + if (n >= 40) + l_dst[4] = cc; + if (n >= 48) + l_dst[5] = cc; + if (n >= 56) + l_dst[6] = cc; + if (n >= 64) + l_dst[7] = cc; + if (n >= 72) + l_dst[8] = cc; + if (n >= 80) + l_dst[9] = cc; + if (n >= 88) + l_dst[10] = cc; + if (n >= 96) + l_dst[11] = cc; + if (n >= 104) + l_dst[12] = cc; + if (n >= 112) + l_dst[13] = cc; + if (n >= 120) + l_dst[14] = cc; + if (n >= 128) + l_dst[15] = cc; + + /* Set the tail */ + if (n & 4) + *(u32 __iomem *) (dst + (n & ~0x7UL)) = cc; + if (n & 2) + *(u16 __iomem *) (dst + (n & ~0x3UL)) = cc; + if (n & 1) + *(u8 __iomem *) (dst + (n & ~0x1UL)) = cc; + } else { + __memset_io((void * __force) dst, cc, n); + } +} + +extern void __memcpy_fromio(void *dst, const void *src, size_t n); +extern void __memcpy_toio(void *dst, const void *src, size_t n); +#define memcpy_fromio(a, b, c) __memcpy_fromio((a), (void * __force) (b), (c)) +#define memcpy_toio(a, b, c) __memcpy_toio((void * __force) (a), (b), (c)) + + +#include +#undef PCI_IOBASE + + +extern unsigned long get_domain_pci_conf_base(unsigned int domain); +extern unsigned long get_domain_pci_conf_size(unsigned int domain); + +/* + * ISA I/O bus memory addresses are 1:1 with the physical address. + */ +#define isa_virt_to_bus virt_to_phys + +#endif /* _E2K_IO_H_ */ diff --git a/arch/e2k/include/asm/io_apic.h b/arch/e2k/include/asm/io_apic.h new file mode 100644 index 000000000000..0cc55a656217 --- /dev/null +++ b/arch/e2k/include/asm/io_apic.h @@ -0,0 +1,12 @@ +#ifndef __ASM_E2K_IO_APIC_H +#define __ASM_E2K_IO_APIC_H + +#ifdef __KERNEL__ + +#include +#include + +extern int e2k_msi_disabled; + +#endif /* __KERNEL__ */ +#endif /* __ASM_E2K_IO_APIC_H */ diff --git a/arch/e2k/include/asm/io_apic_regs.h b/arch/e2k/include/asm/io_apic_regs.h new file mode 100644 index 000000000000..6d140392e691 --- /dev/null +++ b/arch/e2k/include/asm/io_apic_regs.h @@ -0,0 +1,81 @@ +#ifndef __ASM_IO_APIC_REGS_H +#define __ASM_IO_APIC_REGS_H + +#include + + +/* + * The structure of the IO-APIC: + */ +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2 : 14, + LTS : 1, + delivery_type : 1, + __reserved_1 : 8, + ID : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version : 8, + __reserved_2 : 7, + PRQ : 1, + entries : 8, + __reserved_1 : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2 : 24, + arbitration : 4, + __reserved_1 : 4; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT : 1, + __reserved_1 : 31; + } __attribute__ ((packed)) bits; +}; + +struct IO_APIC_route_entry { + __u32 vector : 8, + delivery_mode : 3, /* 000: FIXED + * 001: lowest prio + * 111: ExtINT + */ + dest_mode : 1, /* 0: physical, 1: logical */ + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, /* 0: edge, 1: level */ + mask : 1, /* 0: enabled, 1: disabled */ + __reserved_2 : 15; + + __u32 __reserved_3 : 24, + dest : 8; +} __attribute__ ((packed)); + +struct IR_IO_APIC_route_entry { + __u64 vector : 8, + zero : 3, + index2 : 1, + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, + mask : 1, + reserved : 31, + format : 1, + index : 15; +} __attribute__ ((packed)); + +#endif /* __ASM_IO_APIC_REGS_H */ diff --git a/arch/e2k/include/asm/io_epic.h b/arch/e2k/include/asm/io_epic.h new file mode 100644 index 000000000000..22500628d40a --- /dev/null +++ b/arch/e2k/include/asm/io_epic.h @@ -0,0 +1,29 @@ +#ifndef _ASM_E2K_IO_EPIC_H +#define _ASM_E2K_IO_EPIC_H + +#include + +static inline void epic_ioapic_eoi(u8 vector) +{ + unsigned int value = vector << 8; + + value |= 0x5; + + sic_write_nbsr_reg(SIC_hc_ioapic_eoi, value); +} + +static inline void get_io_epic_msi(int node, u32 *lo, u32 *hi) +{ + if (node < 0) + node = 0; + /* FIXME SIC reads with mas 0x13 aren't supported by hypervisor */ + if (paravirt_enabled()) { + *lo = early_sic_read_node_nbsr_reg(node, SIC_rt_msi); + *hi = early_sic_read_node_nbsr_reg(node, SIC_rt_msi_h); + } else { + *lo = sic_read_node_nbsr_reg(node, SIC_rt_msi); + *hi = sic_read_node_nbsr_reg(node, SIC_rt_msi_h); + } +} +#include +#endif /* _ASM_E2K_IO_EPIC_H */ diff --git a/arch/e2k/include/asm/io_epic_regs.h b/arch/e2k/include/asm/io_epic_regs.h new file mode 100644 index 000000000000..297de80b9115 --- /dev/null +++ b/arch/e2k/include/asm/io_epic_regs.h @@ -0,0 +1,4 @@ +#ifndef __ASM_E2K_IO_EPIC_REGS_H +#define __ASM_E2K_IO_EPIC_REGS_H +#include +#endif /* __ASM_E2K_IO_EPIC_REGS_H */ diff --git a/arch/e2k/include/asm/ioctl.h b/arch/e2k/include/asm/ioctl.h new file mode 100644 index 000000000000..b279fe06dfe5 --- /dev/null +++ b/arch/e2k/include/asm/ioctl.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/ioctls.h b/arch/e2k/include/asm/ioctls.h new file mode 100644 index 000000000000..dd1229114d10 --- /dev/null +++ b/arch/e2k/include/asm/ioctls.h @@ -0,0 +1,17 @@ +#ifndef _E2K_IOCTLS_H_ +#define _E2K_IOCTLS_H_ + +/* + * We are too far from real ioctl handling and it is difficult to predict + * any errors now. So I accept i386(ia64) ioctl's stuff as the basis. + */ + + +#include +#include + +#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define TIOCGDB 0x547F /* enable GDB stub mode on this tty */ + +#endif /* _E2K_IOCTLS_H_ */ diff --git a/arch/e2k/include/asm/iolinkmask.h b/arch/e2k/include/asm/iolinkmask.h new file mode 100644 index 000000000000..fa0860f4007c --- /dev/null +++ b/arch/e2k/include/asm/iolinkmask.h @@ -0,0 +1,6 @@ +#ifndef __ASM_IOHUBMASK_H +#define __ASM_IOHUBMASK_H + +#include + +#endif /* __LINUX_IOHUBMASK_H */ diff --git a/arch/e2k/include/asm/ipcbuf.h b/arch/e2k/include/asm/ipcbuf.h new file mode 100644 index 000000000000..dc3376b63e52 --- /dev/null +++ b/arch/e2k/include/asm/ipcbuf.h @@ -0,0 +1,28 @@ +#ifndef _E2K_IPCBUF_H_ +#define _E2K_IPCBUF_H_ + +/* + * The ipc64_perm structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit seq + * - 2 miscellaneous 64-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; + unsigned short __pad1; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_IPCBUF_H_ */ diff --git a/arch/e2k/include/asm/irq.h b/arch/e2k/include/asm/irq.h new file mode 100644 index 000000000000..d5879da00a7f --- /dev/null +++ b/arch/e2k/include/asm/irq.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_IRQ_H_ +#define _ASM_E2K_IRQ_H_ +/* + * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar + * + * IRQ/IPI changes taken from work by Thomas Radke + * + */ + +#include +#include +#include +#include + +#define irq_canonicalize(irq) (irq) + +extern int can_request_irq(unsigned int, unsigned long flags); +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self) __cold; +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace + +#endif /* _ASM_E2K_IRQ_H_ */ diff --git a/arch/e2k/include/asm/irq_vectors.h b/arch/e2k/include/asm/irq_vectors.h new file mode 100644 index 000000000000..71dbcc263b63 --- /dev/null +++ b/arch/e2k/include/asm/irq_vectors.h @@ -0,0 +1,56 @@ +#ifndef _ASM_E2K_IRQ_VECTORS_H +#define _ASM_E2K_IRQ_VECTORS_H + +#define ERROR_APIC_VECTOR 0xfe +#define RESCHEDULE_VECTOR 0xfd +#define CALL_FUNCTION_VECTOR 0xfc +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb +#define RDMA_INTERRUPT_VECTOR 0xf9 +#define LVT3_INTERRUPT_VECTOR 0xf8 +#define LVT4_INTERRUPT_VECTOR 0xf7 +#define IRQ_WORK_VECTOR 0xf6 +#define NMI_PSEUDO_VECTOR 0x100 +/* + * Local APIC timer IRQ vector is on a different priority level, + * to work around the 'lost local interrupt if more than 2 IRQ + * sources per level' errata. + */ +#define LOCAL_TIMER_VECTOR 0xef + +#ifdef CONFIG_VIRTUALIZATION +/* VIRQ vector to emulate SysRq on guest kernel */ +#define SYSRQ_SHOWSTATE_APIC_VECTOR 0xfa +/* VIRQ vector to emulate NMI on guest kernel */ +#define KVM_NMI_APIC_VECTOR 0xee + +#define SYSRQ_SHOWSTATE_EPIC_VECTOR 0x3fa +#define KVM_NMI_EPIC_VECTOR 0x3ee +#endif /* CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_EPIC +/* EPIC system vectors have the highest priority level of 3 (0x300 - 0x3ff) */ +#define LINP0_INTERRUPT_VECTOR 0x3e0 +#define LINP1_INTERRUPT_VECTOR 0x3e1 +#define LINP2_INTERRUPT_VECTOR 0x3e2 +#define LINP3_INTERRUPT_VECTOR 0x3e3 +#define LINP4_INTERRUPT_VECTOR 0x3e4 +#define LINP5_INTERRUPT_VECTOR 0x3e5 +#define CEPIC_TIMER_VECTOR 0x3ef +#define CEPIC_EPIC_INT_VECTOR 0x3f0 +#define EPIC_IRQ_WORK_VECTOR 0x3f6 +#define PREPIC_ERROR_VECTOR 0x3f9 +#define EPIC_CALL_FUNCTION_SINGLE_VECTOR 0x3fb +#define EPIC_CALL_FUNCTION_VECTOR 0x3fc +#define EPIC_RESCHEDULE_VECTOR 0x3fd +#define ERROR_EPIC_VECTOR 0x3fe +#define SPURIOUS_EPIC_VECTOR 0x3ff +#endif + +#ifdef CONFIG_KVM_ASYNC_PF +#define ASYNC_PF_WAKE_VECTOR 0x3f8 +#endif /* CONFIG_KVM_ASYNC_PF */ + +#include + +#endif /* _ASM_E2K_IRQ_VECTORS_H */ + diff --git a/arch/e2k/include/asm/irq_work.h b/arch/e2k/include/asm/irq_work.h new file mode 100644 index 000000000000..ee424c373c5b --- /dev/null +++ b/arch/e2k/include/asm/irq_work.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/irqdomain.h b/arch/e2k/include/asm/irqdomain.h new file mode 100644 index 000000000000..c81446b25fc3 --- /dev/null +++ b/arch/e2k/include/asm/irqdomain.h @@ -0,0 +1,6 @@ +#ifndef __ASM_IRQDOMAIN_H +#define __ASM_IRQDOMAIN_H + +#include + +#endif diff --git a/arch/e2k/include/asm/irqflags.h b/arch/e2k/include/asm/irqflags.h new file mode 100644 index 000000000000..fa2159554dc2 --- /dev/null +++ b/arch/e2k/include/asm/irqflags.h @@ -0,0 +1,475 @@ +#ifndef _E2K_IRQFLAGS_H_ +#define _E2K_IRQFLAGS_H_ + +#ifndef __ASSEMBLY__ + +#ifndef _LINUX_TRACE_IRQFLAGS_H +# error "Do not include directly; use instead." +#endif + +#include + +/* + * There are two registers to control interrupts (enable/disable) + * + * The main register is privileged register PSR, + * + * the seconde is nonprivileged UPSR. + * + * PSR bits should enable interrupts and enable user interrupts to use UPSR + * as control interrupts register. + * + * Principal difference between two registers is scope. UPSR is global + * register: its scope is all execution, if some function enables/disables + * interrupts in UPSR and returns to caller then caller will have enabled/ + * disabled interrupts as well. PSR is local register: its scope is current + * function, and all invoked functions inherit its PSR state, but if invoked + * function changes PSR and returns, then current function (caller) will see + * own unchanged PSR state. + * + * (PSR is saved by call operation and is restored by return operation from + * chine registers). + * + * So in PSR case, in particular, if interrupts are enabled/disabled + * by some function call, then it is an error - interrupts enable/disable + * state will be unchanged. But it is not error in UPSR case. + * + * Interrupts control using PSR requires structured kernel organization and + * it can be permited only inheritance of interrupts enable/disable state + * (from caller to invoked function) and it cannot be permited return of + * interrupts enable/disable state (to caller) + * + * There is doubt that we should use interrupts control under UPSR + * + * + * PSR and UPSR bits are used to enable and disable interrupts. + * + * PSR bits are used while: + * - A user process executes; + * - Trap or interrupt occures on user or kernel process, hardware + * disables interrupts mask in PSR and PSR becomes main register to control + * interrupts. Trap handler switches control from PSR register to UPSR + * in the appropriate point and all following trap handling is done under + * UPSR control; + * - Trap handler returns control from UPSR to PSR in the appropriate + * point of trap handling end. Return from trap handler (DONE) restores + * PSR from CR register and recovers interrupts control type in the trap point; + * - System call is same as trap (see above); + * - System call end is same as trap handler end (see above); + * - Switch from kernel process to user (exec() and signal handler) + * is same as trap handler end. Before return to user function kernel sets + * control under PSR and (only for signal handler) after return from user + * recovers control under UPSR. + * + * Kernel cannot use standard macros, functions to enable / disable + * interrupts same as local_irq_xxx() spin_lock_irq_xxx() ... while + * interrupts are controled by PSR. + * + * UPSR bits are used by kernel while: + * Kernel jumpstart (system call #12) set UPSR register in the + * initial state (where interrupts are disabled) and switches + * control from PSR register to UPSR; From this point kernel runs + * (except cases listed above for PSR) under UPSR interrupt bits + */ +#define NATIVE_SWITCH_IRQ_TO_UPSR() \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)) + +#define NATIVE_RETURN_IRQ_TO_PSR() \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)) + +#define NATIVE_SET_USER_INITIAL_UPSR(upsr) \ +({ \ + NATIVE_RETURN_IRQ_TO_PSR(); \ + NATIVE_WRITE_UPSR_REG(upsr); \ +}) + +#define BOOT_NATIVE_SWITCH_IRQ_TO_UPSR() \ + BOOT_NATIVE_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define SWITCH_IRQ_TO_UPSR(set_cr1_lo) \ +do { \ + if (set_cr1_lo) { \ + e2k_cr1_lo_t cr1_lo = READ_CR1_LO_REG(); \ + AS(cr1_lo).ie = 1; \ + AS(cr1_lo).nmie = 1; \ + AS(cr1_lo).uie = 1; \ + AS(cr1_lo).unmie = 1; \ + WRITE_CR1_LO_REG(cr1_lo); \ + } \ + \ + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); \ +} while (0) + +#define BOOT_SWITCH_IRQ_TO_UPSR() \ + BOOT_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define UPSR_STI() \ +({ \ + condition_collect_disable_interrupt_ticks( \ + READ_UPSR_REG_VALUE() & ~UPSR_IE); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_ENABLED)); \ +}) + +#define UPSR_CLI() \ +({ \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) + +#define UPSR_SAVE_AND_CLI() \ +({ \ + unsigned long __flags = READ_UPSR_REG_VALUE(); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED)); \ + condition_mark_disable_interrupt_ticks(1); \ + __flags; \ +}) + +#define UPSR_SAVE() READ_UPSR_REG_VALUE() + +/* + * nmi_* versions work only with non-maskbale ones interrupts. + */ + +#define upsr_nmi_irqs_disabled() \ + ((READ_UPSR_REG_VALUE() & UPSR_NMIE) == 0) + +#define upsr_nmi_irqs_disabled_flags(flags) \ + ((flags & UPSR_NMIE) == 0) +#define NATIVE_UPSR_NMI_SAVE_AND_CLI(flags) \ +({ \ + flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_WRITE_UPSR_IRQ_BARRIER(flags & ~UPSR_NMIE); \ + condition_mark_disable_interrupt_ticks(1); \ +}) +#define NATIVE_UPSR_NMI_STI(flags) \ +({ \ + NATIVE_WRITE_UPSR_IRQ_BARRIER((flags) | UPSR_NMIE); \ + condition_mark_disable_interrupt_ticks(0); \ +}) +#define NATIVE_UPSR_ALL_SAVE_AND_CLI(flags) \ +({ \ + flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_WRITE_UPSR_IRQ_BARRIER( \ + AW(E2K_KERNEL_UPSR_DISABLED_ALL)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) + +/* + * all_* versions work on all interrupts including + * both maskable and non-maskbale ones. + */ + +#define UPSR_ALL_STI() \ +({ \ + condition_collect_disable_interrupt_ticks( \ + READ_UPSR_REG_VALUE() & ~UPSR_IE & ~UPSR_NMIE); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_ENABLED)); \ +}) + +#define UPSR_ALL_CLI() \ +({ \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED_ALL)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) + +#define UPSR_ALL_SAVE_AND_CLI(flags) \ +({ \ + flags = READ_UPSR_REG_VALUE(); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED_ALL)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) +#define BOOT_UPSR_ALL_STI() \ +({ \ + unsigned long last_upsr = BOOT_READ_UPSR_REG_VALUE(); \ + unsigned long cur_upsr; \ + cur_upsr = last_upsr | (UPSR_IE | UPSR_NMIE); \ + BOOT_WRITE_UPSR_REG_VALUE(cur_upsr); \ +}) +#define BOOT_UPSR_ALL_CLI() \ +({ \ + unsigned long last_upsr = BOOT_READ_UPSR_REG_VALUE(); \ + unsigned long cur_upsr; \ + cur_upsr = last_upsr & ~(UPSR_IE | UPSR_NMIE); \ + BOOT_WRITE_UPSR_REG_VALUE(cur_upsr); \ +}) +#define BOOT_UPSR_ALL_SAVE_AND_CLI(flags) \ +({ \ + flags = BOOT_READ_UPSR_REG_VALUE(); \ + BOOT_WRITE_UPSR_REG_VALUE(flags & ~(UPSR_IE | UPSR_NMIE)); \ +}) +#define BOOT_UPSR_SAVE(src_upsr) \ + (src_upsr = BOOT_READ_UPSR_REG_VALUE()) +#define BOOT_UPSR_RESTORE(src_upsr) \ + BOOT_WRITE_UPSR_REG_VALUE(src_upsr) + +#define psr_irqs_disabled_flags(flags) (((flags) & PSR_IE) == 0) +#define upsr_irqs_disabled_flags(flags) (((flags) & UPSR_IE) == 0) +#define irqs_under_upsr_flags(psr_flags) (((psr_flags) & PSR_UIE) != 0) +#define psr_and_upsr_irqs_disabled_flags(psr_flags, upsr_flags) \ +({ \ + bool ret; \ + if (psr_irqs_disabled_flags(psr_flags)) { \ + ret = true; \ + } else if (irqs_under_upsr_flags(psr_flags)) { \ + ret = upsr_irqs_disabled_flags(upsr_flags); \ + } else { \ + ret = false; \ + } \ + ret; \ +}) + +#define upsr_all_irqs_disabled_flags(flags) \ + ((flags & (UPSR_IE | UPSR_NMIE)) == 0) +#define upsr_all_irqs_disabled() \ + upsr_all_irqs_disabled_flags(READ_UPSR_REG_VALUE()) + +#define psr_all_irqs_disabled_flags(flags) \ + ((flags & (PSR_IE | PSR_NMIE)) == 0) +#define psr_all_irqs_enabled_flags(flags) \ + ((flags & (PSR_IE | PSR_NMIE)) == (PSR_IE | PSR_NMIE)) +#define psr_all_irqs_disabled() \ + psr_all_irqs_disabled_flags(READ_PSR_REG_VALUE()) +#define all_irqs_under_upsr_flags(psr_flags) \ + (((psr_flags) & (PSR_UIE | PSR_UNMIE)) != 0) +#define psr_and_upsr_all_irqs_disabled_flags(psr_flags, upsr_flags) \ +({ \ + bool ret; \ + if (psr_all_irqs_disabled_flags(psr_flags)) { \ + ret = true; \ + } else if (all_irqs_under_upsr_flags(psr_flags)) { \ + ret = upsr_all_irqs_disabled_flags(upsr_flags); \ + } else { \ + ret = false; \ + } \ + ret; \ +}) + +#define psr_irqs_disabled() \ + psr_irqs_disabled_flags(READ_PSR_REG_VALUE()) +#define upsr_irqs_disabled() \ + upsr_irqs_disabled_flags(READ_UPSR_REG_VALUE()) + +#define psr_and_upsr_irqs_disabled() \ +({ \ + unsigned long psr = READ_PSR_REG_VALUE(); \ + unsigned long upsr = READ_UPSR_REG_VALUE(); \ + \ + psr_and_upsr_irqs_disabled_flags(psr, upsr); \ +}) + +#define psr_and_upsr_all_irqs_disabled() \ +({ \ + unsigned long psr = READ_PSR_REG_VALUE(); \ + unsigned long upsr = READ_UPSR_REG_VALUE(); \ + \ + psr_and_upsr_all_irqs_disabled_flags(psr, upsr); \ +}) +#define __raw_all_irqs_disabled() psr_and_upsr_all_irqs_disabled() + +#define native_psr_irqs_disabled() \ + psr_irqs_disabled_flags(NATIVE_NV_READ_PSR_REG_VALUE()) + +#define psr_and_upsr_nm_irqs_disabled() \ +({ \ + int ret; \ + unsigned long psr = READ_PSR_REG_VALUE(); \ + if ((psr & PSR_NMIE) == 0) { \ + ret = 1; \ + } else if (psr & PSR_UNMIE) { \ + ret = !(READ_UPSR_REG_VALUE() & UPSR_NMIE); \ + } else { \ + ret = 0; \ + } \ + ret; \ +}) + +#ifndef CONFIG_DEBUG_IRQ +#define __raw_irqs_disabled() upsr_irqs_disabled() +#else +#define __raw_irqs_disabled() psr_and_upsr_irqs_disabled() +#endif /* ! CONFIG_DEBUG_IRQ */ + +#define __raw_irqs_disabled_flags(flags) ((flags & UPSR_IE) == 0) + +#ifdef CONFIG_MCST_RT + +#define SAVE_CURR_TIME_SWITCH_TO \ +{ \ + cpu_times[raw_smp_processor_id()].curr_time_switch_to = \ + READ_CLKR_REG_VALUE(); \ +} + +#define CALCULATE_TIME_SWITCH_TO \ +{ \ + int cpu = raw_smp_processor_id(); \ + cpu_times[cpu].curr_time_switch_to = READ_CLKR_REG_VALUE() - \ + cpu_times[cpu].curr_time_switch_to; \ + if (cpu_times[cpu].curr_time_switch_to < \ + cpu_times[cpu].min_time_switch_to){ \ + cpu_times[cpu].min_time_switch_to = \ + cpu_times[cpu].curr_time_switch_to; \ + } \ + if (cpu_times[cpu].curr_time_switch_to > \ + cpu_times[cpu].max_time_switch_to){ \ + cpu_times[cpu].max_time_switch_to = \ + cpu_times[cpu].curr_time_switch_to; \ + } \ +} + +#else /* !CONFIG_MCST_RT */ + #define SAVE_CURR_TIME_SWITCH_TO + #define CALCULATE_TIME_SWITCH_TO +#endif /* CONFIG_MCST_RT */ + +#ifdef CONFIG_CLI_CHECK_TIME + +typedef struct cli_info { + long cli; + long max_cli; + long max_cli_cl; + long max_cli_ip; + + long gcli; + long max_gcli; + long max_gcli_cl; + long max_gcli_ip; + +} cli_info_t; + +typedef struct tt0_info { + long max_tt0_prolog; + long max_tt0_cl; +} tt0_info_t; + +extern cli_info_t cli_info[]; +extern tt0_info_t tt0_info[]; +extern int cli_info_needed; +extern void tt0_prolog_ticks(long ticks); + +#define Cli_cl cli_info[raw_smp_processor_id()].cli +#define Max_cli cli_info[raw_smp_processor_id()].max_cli +#define Max_cli_cl cli_info[raw_smp_processor_id()].max_cli_cl +#define Max_cli_ip cli_info[raw_smp_processor_id()].max_cli_ip +#define Cli_irq cli_info[raw_smp_processor_id()].irq + +#define Gcli_cl cli_info[raw_smp_processor_id()].gcli +#define Max_gcli cli_info[raw_smp_processor_id()].max_gcli +#define Max_gcli_cl cli_info[raw_smp_processor_id()].max_gcli_cl +#define Max_gcli_ip cli_info[raw_smp_processor_id()].max_gcli_ip + +#define Max_tt0_prolog tt0_info[raw_smp_processor_id()].max_tt0_prolog +#define Max_tt0_cl tt0_info[raw_smp_processor_id()].max_tt0_cl + +#define e2k_cli() \ +{ \ + bool __save_time = cli_info_needed && !__raw_irqs_disabled(); \ + UPSR_CLI(); \ + if (__save_time) \ + Cli_cl = READ_CLKR_REG_VALUE(); \ +} + +#define e2k_sti() \ +{ \ + if (Cli_cl && __raw_irqs_disabled() && \ + (Max_cli < READ_CLKR_REG_VALUE() - Cli_cl)) { \ + Max_cli = READ_CLKR_REG_VALUE() - Cli_cl; \ + Max_cli_cl = Cli_cl; \ + Max_cli_ip = READ_IP_REG_VALUE(); \ + } \ + UPSR_STI(); \ +} + +// check_cli() works under cli() but we want to check time of cli() + +#define check_cli() \ +{ \ + if (cli_info_needed) { \ + Cli_cl = READ_CLKR_REG_VALUE(); \ + } \ +} + +#define sti_return() \ +{ \ + if (cli_info_needed && __raw_irqs_disabled() && \ + (Max_cli < READ_CLKR_REG_VALUE() - Cli_cl)) { \ + Max_cli = READ_CLKR_REG_VALUE() - Cli_cl; \ + Max_cli_cl = Cli_cl; \ + Max_cli_ip = READ_IP_REG_VALUE(); \ + } \ +} +#else /* above CONFIG_CLI_CHECK_TIME */ +#define e2k_cli() UPSR_CLI() +#define e2k_sti() UPSR_STI() +#define check_cli() +#define sti_return() +#endif /* CONFIG_CLI_CHECK_TIME */ + +/* Normal irq operations: disable maskable interrupts only, + * but enable both maskable and non-maskable interrupts. */ + +#define arch_local_irq_enable() e2k_sti() +#define arch_local_irq_disable() e2k_cli() + +#define arch_local_irq_save() UPSR_SAVE_AND_CLI() +#define arch_local_irq_restore(x) UPSR_RESTORE(x) + +#define arch_local_save_flags() UPSR_SAVE() + +#define arch_irqs_disabled_flags(x) __raw_irqs_disabled_flags(x) +#define arch_irqs_disabled() __raw_irqs_disabled() + +/* nmi_irq_*() - the same as above, but checks only non-maskable interrupts. */ + +#define raw_nmi_irqs_disabled_flags(x) upsr_nmi_irqs_disabled_flags(x) +#define raw_nmi_irqs_disabled() upsr_nmi_irqs_disabled() + +/* all_irq_*() - the same as above, but enables, disables and checks + * both non-maskable and maskable interrupts. */ + +#define raw_all_irq_enable() UPSR_ALL_STI() +#define raw_all_irq_disable() UPSR_ALL_CLI() +#define boot_raw_all_irq_enable() BOOT_UPSR_ALL_STI() +#define boot_raw_all_irq_disable() BOOT_UPSR_ALL_CLI() + +#define raw_all_irq_save(x) UPSR_ALL_SAVE_AND_CLI(x) +#define raw_all_irq_restore(x) UPSR_RESTORE(x) +#define boot_raw_all_irq_save(x) BOOT_UPSR_ALL_SAVE_AND_CLI(x) +#define boot_raw_all_irq_restore(x) BOOT_UPSR_RESTORE(x) + +#define raw_all_irqs_disabled_flags(x) upsr_all_irqs_disabled_flags(x) +#define raw_all_irqs_disabled() upsr_all_irqs_disabled() + +#define all_irq_enable() \ + do { trace_hardirqs_on(); raw_all_irq_enable(); } while (0) + +#define all_irq_disable() \ + do { raw_all_irq_disable(); trace_hardirqs_off(); } while (0) + +#define all_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + raw_all_irq_save(flags); \ + trace_hardirqs_off(); \ + } while (0) + +#define all_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + if (raw_all_irqs_disabled_flags(flags)) { \ + raw_all_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_all_irq_restore(flags); \ + } \ + } while (0) + +/* + * Used in the idle loop + */ +static inline void arch_safe_halt(void) +{ +} + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_IRQFLAGS_H_ */ diff --git a/arch/e2k/include/asm/kdebug.h b/arch/e2k/include/asm/kdebug.h new file mode 100644 index 000000000000..5679fca84aa9 --- /dev/null +++ b/arch/e2k/include/asm/kdebug.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_KDEBUG_H +#define _ASM_E2K_KDEBUG_H + +#include + +struct pt_regs; + +/* Grossly misnamed. */ +enum die_val { + DIE_OOPS = 1, + DIE_BREAKPOINT +}; + +extern void printk_address(unsigned long address, int reliable); +extern void show_trace(struct task_struct *t, struct pt_regs *regs, + unsigned long *sp, unsigned long bp); +extern void __show_regs(struct pt_regs *regs, int all); +extern void show_regs(struct pt_regs *regs); +extern unsigned long oops_begin(void); +extern void oops_end(unsigned long, struct pt_regs *, int signr); + +#endif /* _ASM_E2K_KDEBUG_H */ diff --git a/arch/e2k/include/asm/keyboard.h b/arch/e2k/include/asm/keyboard.h new file mode 100644 index 000000000000..33adac822e80 --- /dev/null +++ b/arch/e2k/include/asm/keyboard.h @@ -0,0 +1,70 @@ + +/* + * Assume that we have a generic PC-style keyboard controller + * in our E2K station built-in. + */ + +/* + * linux/include/asm-i386/keyboard.h + * + * Created 3 Nov 1996 by Geert Uytterhoeven + */ + +#ifndef _E2K_KEYBOARD_H_ +#define _E2K_KEYBOARD_H_ + +#ifdef __KERNEL__ + +#include +#include +#include + +#define KEYBOARD_IRQ 1 +#define DISABLE_KBD_DURING_INTERRUPTS 0 + +extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode); +extern int pckbd_getkeycode(unsigned int scancode); +extern int pckbd_translate(unsigned char scancode, unsigned char *keycode, + char raw_mode); +extern char pckbd_unexpected_up(unsigned char keycode); +extern void pckbd_leds(unsigned char leds); +extern void pckbd_init_hw(void); +extern unsigned char pckbd_sysrq_xlate[128]; + +#define kbd_setkeycode pckbd_setkeycode +#define kbd_getkeycode pckbd_getkeycode +#define kbd_translate pckbd_translate +#define kbd_unexpected_up pckbd_unexpected_up +#define kbd_leds pckbd_leds +#define kbd_init_hw pckbd_init_hw +#define kbd_sysrq_xlate pckbd_sysrq_xlate + +#define SYSRQ_KEY 0x54 + +/* resource allocation */ +#define kbd_request_region() +#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \ + "keyboard", NULL) + +/* How to access the keyboard macros on this platform. */ +#define kbd_read_input() inb(KBD_DATA_REG) +#define kbd_read_status() inb(KBD_STATUS_REG) +#define kbd_write_output(val) outb(val, KBD_DATA_REG) +#define kbd_write_command(val) outb(val, KBD_CNTL_REG) + +/* Some stoneage hardware needs delays after some operations. */ +#define kbd_pause() do { } while(0) + +/* + * Machine specific bits for the PS/2 driver + */ + +#define AUX_IRQ 12 + +#define aux_request_irq(hand, dev_id) \ + request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS/2 Mouse", dev_id) + +#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id) + +#endif /* __KERNEL__ */ +#endif /* _E2K_KEYBOARD_H_ */ diff --git a/arch/e2k/include/asm/kprobes.h b/arch/e2k/include/asm/kprobes.h new file mode 100644 index 000000000000..99970be7ce54 --- /dev/null +++ b/arch/e2k/include/asm/kprobes.h @@ -0,0 +1,83 @@ +#ifndef __ASM_E2K_KPROBES_H +#define __ASM_E2K_KPROBES_H + +#include + +#ifdef CONFIG_KPROBES + +#include +#include + +#include +#include +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +typedef u8 kprobe_opcode_t; + +#define KPROBE_BREAK_1 0x0dc0c04004000001UL + +/* + * We need to store one additional instruction after the copied one + * to make sure processor won't generate exc_illegal_opcode instead + * of exc_last_wish/exc_instr_debug (exc_illegal_opcode has priority). + */ +#define MAX_INSN_SIZE (E2K_INSTR_MAX_SIZE + sizeof(unsigned long)) + +struct arch_specific_insn { + kprobe_opcode_t *insn; +}; + +/* per-cpu kprobe control block */ +#define MAX_STACK_SIZE 256 +struct kprobe_ctlblk { + int kprobe_status; +}; + +#define kretprobe_blacklist_size 0 +#define arch_remove_kprobe(p) do { } while (0) + +#define flush_insn_slot(p) \ +do { \ + unsigned long slot = (unsigned long) p->ainsn.insn; \ + flush_icache_range(slot, slot + \ + MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); \ +} while (0) + +extern int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr); +extern int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); + +static inline int is_kprobe_break1_trap(struct pt_regs *regs) +{ + u64 *instr = (u64 *)GET_IP_CR0_HI(regs->crs.cr0_hi); + + if (cpu_has(CPU_HWBUG_BREAKPOINT_INSTR)) + return (*instr & ~E2K_INSTR_HS_LNG_MASK) == + (KPROBE_BREAK_1 & ~E2K_INSTR_HS_LNG_MASK); + else + return *instr == KPROBE_BREAK_1; +} + +extern void kprobe_instr_debug_handle(struct pt_regs *); +#else +static inline int is_kprobe_break1_trap(struct pt_regs *regs) +{ + return false; +} + +static inline void kprobe_instr_debug_handle(struct pt_regs *regs) { } +#endif /* #ifdef CONFIG_KPROBES */ + +#ifdef CONFIG_KRETPROBES +extern int kretprobe_last_wish_handle(struct pt_regs *); +#else +static inline int kretprobe_last_wish_handle(struct pt_regs *regs) +{ + return 0; +} +#endif + +#endif /*__ASM_E2K_KPROBES_H */ diff --git a/arch/e2k/include/asm/kvm/Kbuild b/arch/e2k/include/asm/kvm/Kbuild new file mode 100644 index 000000000000..4f1c6a5242db --- /dev/null +++ b/arch/e2k/include/asm/kvm/Kbuild @@ -0,0 +1,8 @@ + +### e2k virtualization + +unifdef-y += guest/ + +unifdef-y += guest.h +unifdef-y += threads.h + diff --git a/arch/e2k/include/asm/kvm/aau_regs_access.h b/arch/e2k/include/asm/kvm/aau_regs_access.h new file mode 100644 index 000000000000..01a3a67e61f2 --- /dev/null +++ b/arch/e2k/include/asm/kvm/aau_regs_access.h @@ -0,0 +1,667 @@ +/* + * KVM AAU registers model access + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _KVM_AAU_REGS_ACCESS_H_ +#define _KVM_AAU_REGS_ACCESS_H_ + +#include +#include +#include + +/* + * Basic functions accessing virtual AAUs registers on guest. + */ +#define GUEST_AAU_REGS_BASE offsetof(kvm_vcpu_state_t, cpu.aau) +#define GUEST_AAU_REG(reg_name) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, reg_name))) +#define GUEST_AAU_AAIND(AAIND_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aainds)) + \ + (sizeof(u64) * (AAIND_no))) +#define GUEST_AAU_AAINCR(AAINCR_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaincrs)) + \ + (sizeof(u64) * (AAINCR_no))) +#define GUEST_AAU_AASTI(AASTI_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aastis)) + \ + (sizeof(u64) * (AASTI_no))) +#define GUEST_AAU_AALDI(AALDI_no) (offsetof(kvm_vcpu_state_t, cpu.aaldi) + \ + (sizeof(u64) * (AALDI_no))) +#define GUEST_AAU_AALDA(AALDA_no) (offsetof(kvm_vcpu_state_t, cpu.aalda) + \ + (sizeof(e2k_aalda_t) * (AALDA_no))) +#define GUEST_AAU_AAD_lo(AAD_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aads)) + \ + (sizeof(e2k_aadj_t) * (AAD_no)) + \ + (offsetof(e2k_aadj_t, word.lo))) +#define GUEST_AAU_AAD_hi(AAD_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aads)) + \ + (sizeof(e2k_aadj_t) * (AAD_no)) + \ + (offsetof(e2k_aadj_t, word.hi))) +#define GUEST_AAU_AALDM_lo() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldm)) + \ + (offsetof(e2k_aaldm_t, lo))) +#define GUEST_AAU_AALDM_hi() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldm)) + \ + (offsetof(e2k_aaldm_t, hi))) +#define GUEST_AAU_AALDV_lo() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldv)) + \ + (offsetof(e2k_aaldv_t, lo))) +#define GUEST_AAU_AALDV_hi() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldv)) + \ + (offsetof(e2k_aaldv_t, hi))) +#define GUEST_GET_AAU_BREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_B(GUEST_AAU_REG(reg_name)) +#define GUEST_GET_AAU_SREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_REG(reg_name)) +#define GUEST_GET_AAU_DREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_REG(reg_name)) +#define GUEST_SET_AAU_BREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_B(GUEST_AAU_REG(reg_name), value) +#define GUEST_SET_AAU_SREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_REG(reg_name), value) +#define GUEST_SET_AAU_DREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_REG(reg_name), value) +#define GUEST_GET_AAU_AAIND(AAIND_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAIND(AAIND_no)) +#define GUEST_GET_AAU_AAINCR(AAINCR_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAINCR(AAINCR_no)) +#define GUEST_GET_AAU_AASTI(AASTI_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AASTI(AASTI_no)) +#define GUEST_GET_AAU_AALDI(AALDI_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AALDI(AALDI_no)) +#define GUEST_GET_AAU_AALDA(AALDA_no) \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDA(AALDA_no)) +#define GUEST_GET_AAU_AAD_lo(AAD_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_lo(AAD_no)) +#define GUEST_GET_AAU_AAD_hi(AAD_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_hi(AAD_no)) +#define GUEST_GET_AAU_AAD(AAD_no, mem_p) \ +({ \ + AWP(mem_p).lo = GUEST_GET_AAU_AAD_lo(AAD_no); \ + AWP(mem_p).hi = GUEST_GET_AAU_AAD_hi(AAD_no); \ +}) +#define GUEST_GET_AAU_AALDM_lo() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_lo()) +#define GUEST_GET_AAU_AALDM_hi() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_hi()) +#define GUEST_GET_AAU_AALDV_lo() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_lo()) +#define GUEST_GET_AAU_AALDV_hi() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_hi()) + +#define GUEST_SET_AAU_AAIND(AAIND_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAIND(AAIND_no), value) +#define GUEST_SET_AAU_AAINCR(AAINCR_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAINCR(AAINCR_no), value) +#define GUEST_SET_AAU_AASTI(AASTI_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AASTI(AASTI_no), value) +#define GUEST_SET_AAU_AALDI(AALDI_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AALDI(AALDI_no), value) +#define GUEST_SET_AAU_AALDA(AALDA_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDI(AALDA_no), value) +#define GUEST_SET_AAU_AAD_lo(AAD_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_lo(AAD_no), value) +#define GUEST_SET_AAU_AAD_hi(AAD_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_hi(AAD_no), value) +#define GUEST_SET_AAU_AAD(AAD_no, mem_p) \ +({ \ + GUEST_SET_AAU_AAD_lo(AAD_no, AWP(mem_p).lo); \ + GUEST_SET_AAU_AAD_hi(AAD_no, AWP(mem_p).hi); \ +}) +#define GUEST_SET_AAU_AALDM_lo(lo) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_lo(), lo) +#define GUEST_SET_AAU_AALDM_hi(hi) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_hi(), hi) +#define GUEST_SET_AAU_AALDV_lo(lo) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_lo(), lo) +#define GUEST_SET_AAU_AALDV_hi(hi) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_hi(), hi) + +/* macros to deal with KVM AAU registers model */ + +#define KVM_GET_AAU_AAD(AAD_no, mem_p) \ + GUEST_GET_AAU_AAD(AAD_no, (mem_p)) +#define KVM_GET_AAU_4_AADs(AAD_4_no, mem_4_p) \ +({ \ + KVM_GET_AAU_AAD(((AAD_4_no) + 0), &(mem_4_p)[0]); \ + KVM_GET_AAU_AAD(((AAD_4_no) + 1), &(mem_4_p)[1]); \ + KVM_GET_AAU_AAD(((AAD_4_no) + 2), &(mem_4_p)[2]); \ + KVM_GET_AAU_AAD(((AAD_4_no) + 3), &(mem_4_p)[3]); \ +}) +#define KVM_GET_AAU_AAIND(AAIND_no) \ + GUEST_GET_AAU_AAIND(AAIND_no) +#define KVM_GET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + val1 = GUEST_GET_AAU_AAIND(AAIND1_no); \ + val2 = GUEST_GET_AAU_AAIND(AAIND2_no); \ +}) +#define KVM_GET_AAU_AAIND_TAG() \ + GUEST_GET_AAU_SREG(aaind_tags) +#define KVM_GET_AAU_AAINDS(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + if (((AAIND1_no) != AAINDS_TAG_no) && \ + ((AAIND2_no) != AAINDS_TAG_no)) { \ + KVM_GET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2); \ + } else if ((AAIND1_no) == AAINDS_TAG_no) { \ + val1 = KVM_GET_AAU_AAIND_TAG(); \ + val2 = GUEST_GET_AAU_AAIND(AAIND2_no); \ + } else { \ + val1 = GUEST_GET_AAU_AAIND(AAIND1_no); \ + val2 = KVM_GET_AAU_AAIND_TAG(); \ + } \ +}) +#define KVM_GET_AAU_AAINCR(AAINCR_no) \ + GUEST_GET_AAU_AAINCR(AAINCR_no) +#define KVM_GET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + val1 = GUEST_GET_AAU_AAINCR(AAINCR1_no); \ + val2 = GUEST_GET_AAU_AAINCR(AAINCR2_no); \ +}) +#define KVM_GET_AAU_AAINCR_TAG() \ + GUEST_GET_AAU_SREG(aaincr_tags) +#define KVM_GET_AAU_AAINCRS(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + if (((AAINCR1_no) != AAINCRS_TAG_no) && \ + ((AAINCR2_no) != AAINCRS_TAG_no)) { \ + KVM_GET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2); \ + } else if ((AAINCR1_no) == AAINCRS_TAG_no) { \ + val1 = KVM_GET_AAU_AAINCR_TAG(); \ + val2 = GUEST_GET_AAU_AAINCR(AAINCR2_no); \ + } else { \ + val1 = GUEST_GET_AAU_AAINCR(AAINCR1_no); \ + val2 = KVM_GET_AAU_AAINCR_TAG(); \ + } \ +}) +#define KVM_GET_AAU_AASTI(AASTI_no) \ + GUEST_GET_AAU_AASTI(AASTI_no) +#define KVM_GET_AAU_AASTIS(AASTI1_no, AASTI2_no, val1, val2) \ +({ \ + val1 = GUEST_GET_AAU_AASTI(AASTI1_no); \ + val2 = GUEST_GET_AAU_AASTI(AASTI2_no); \ +}) +#define KVM_GET_AAU_AASTI_TAG() \ + GUEST_GET_AAU_SREG(aasti_tags) +#define KVM_GET_AAU_AASR() \ + GUEST_GET_AAU_SREG(aasr) +#define KVM_GET_AAU_AAFSTR() \ + GUEST_GET_AAU_SREG(aafstr) +#define KVM_GET_AAU_AALDI(AALDI_no, lval, rval) \ +({ \ + lval = GUEST_GET_AAU_AALDI(((AALDI_no) + 0)); \ + rval = GUEST_GET_AAU_AALDI(((AALDI_no) + 32)); \ +}) +#define KVM_GET_AAU_AALDA(AALDA_no, lval, rval) \ +({ \ + lval = GUEST_GET_AAU_AALDA((AALDA_no) + 0); \ + rval = GUEST_GET_AAU_AALDA((AALDA_no) + 32); \ +}) +#define KVM_GET_AAU_AALDM_lo() GUEST_GET_AAU_AALDM_lo() +#define KVM_GET_AAU_AALDM_hi() GUEST_GET_AAU_AALDM_hi() +#define KVM_GET_AAU_AALDM(lo, hi) \ +({ \ + lo = KVM_GET_AAU_AALDM_lo(); \ + hi = KVM_GET_AAU_AALDM_hi(); \ +}) +#define KVM_GET_AAU_AALDV_lo() GUEST_GET_AAU_AALDV_lo() +#define KVM_GET_AAU_AALDV_hi() GUEST_GET_AAU_AALDV_hi() +#define KVM_GET_AAU_AALDV(lo, hi) \ +({ \ + lo = KVM_GET_AAU_AALDV_lo(); \ + hi = KVM_GET_AAU_AALDV_hi(); \ +}) + +#define KVM_SET_AAU_AAD(AAD_no, mem_p) \ + GUEST_SET_AAU_AAD(AAD_no, (mem_p)) +#define KVM_SET_AAU_4_AADs(AAD_4_no, mem_4_p) \ +({ \ + KVM_SET_AAU_AAD(((AAD_4_no) + 0), &(mem_4_p)[0]); \ + KVM_SET_AAU_AAD(((AAD_4_no) + 1), &(mem_4_p)[1]); \ + KVM_SET_AAU_AAD(((AAD_4_no) + 2), &(mem_4_p)[2]); \ + KVM_SET_AAU_AAD(((AAD_4_no) + 3), &(mem_4_p)[3]); \ +}) +#define KVM_SET_AAU_AAIND(AAIND_no, value) \ + GUEST_SET_AAU_AAIND(AAIND_no, value) +#define KVM_SET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + GUEST_SET_AAU_AAIND(AAIND1_no, val1); \ + GUEST_SET_AAU_AAIND(AAIND2_no, val2); \ +}) +#define KVM_SET_AAU_AAIND_TAG(val) \ + GUEST_SET_AAU_SREG(aaind_tags, val) +#define KVM_SET_AAU_AAINDS(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + if (((AAIND1_no) != AAINDS_TAG_no) && \ + ((AAIND2_no) != AAINDS_TAG_no)) { \ + KVM_SET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2); \ + } else if ((AAIND1_no) == AAINDS_TAG_no) { \ + KVM_SET_AAU_AAIND_TAG(val1); \ + GUEST_SET_AAU_AAIND(AAIND2_no, val2); \ + } else { \ + GUEST_SET_AAU_AAIND(AAIND1_no, val1); \ + KVM_SET_AAU_AAIND_TAG(val2); \ + } \ +}) +#define KVM_SET_AAU_AAINCR(AAINCR_no, val) \ + GUEST_SET_AAU_AAINCR(AAINCR_no, val) +#define KVM_SET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + GUEST_SET_AAU_AAINCR(AAINCR1_no, val1); \ + GUEST_SET_AAU_AAINCR(AAINCR2_no, val2); \ +}) +#define KVM_SET_AAU_AAINCR_TAG(val) \ + GUEST_SET_AAU_SREG(aaincr_tags, val) +#define KVM_SET_AAU_AAIND_AAINCR_TAGS(aaind, aaincr) \ +do { \ + GUEST_SET_AAU_SREG(aaincr_tags, aaincr); \ + GUEST_SET_AAU_SREG(aaind_tags, aaind); \ +} while (0) + +#define KVM_SET_AAU_AAINCRS(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + if (((AAINCR1_no) != AAINCRS_TAG_no) && \ + ((AAINCR2_no) != AAINCRS_TAG_no)) { \ + KVM_SET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2); \ + } else if ((AAINCR1_no) == AAINCRS_TAG_no) { \ + KVM_SET_AAU_AAINCR_TAG(val1); \ + GUEST_SET_AAU_AAINCR(AAINCR2_no, val2); \ + } else { \ + GUEST_SET_AAU_AAINCR(AAINCR1_no, val1); \ + KVM_SET_AAU_AAINCR_TAG(val2); \ + } \ +}) +#define KVM_SET_AAU_AASTI(AASTI_no, val) \ + GUEST_SET_AAU_AASTI(AASTI_no, val) +#define KVM_SET_AAU_AASTIS(AASTI1_no, AASTI2_no, val1, val2) \ +({ \ + GUEST_SET_AAU_AASTI(AASTI1_no, val1); \ + GUEST_SET_AAU_AASTI(AASTI2_no, val2); \ +}) +#define KVM_SET_AAU_AASTI_TAG(val) \ + GUEST_SET_AAU_SREG(aasti_tags, val) +#define KVM_SET_AAU_AASR(val) \ + GUEST_SET_AAU_SREG(aasr, val) +#define KVM_SET_AAU_AAFSTR(val) \ + GUEST_SET_AAU_SREG(aafstr, val) +#define KVM_SET_AAU_AALDI(AALDI_no, lval, rval) \ +({ \ + GUEST_SET_AAU_AALDI(((AALDI_no) + 0), lval); \ + GUEST_SET_AAU_AALDI(((AALDI_no) + 32), rval); \ +}) +#define KVM_SET_AAU_AALDA(AALDA_no, lval, rval) \ +({ \ + GUEST_SET_AAU_AALDA(((AALDA_no) + 0), lval); \ + GUEST_SET_AAU_AALDA(((AALDA_no) + 32), rval); \ +}) +#define KVM_SET_AAU_AALDM(lo, hi) \ +({ \ + GUEST_SET_AAU_AALDM_lo(lo); \ + GUEST_SET_AAU_AALDM_hi(hi); \ +}) +#define KVM_SET_AAU_AALDV(lo, hi) \ +({ \ + GUEST_SET_AAU_AALDV_lo(lo); \ + GUEST_SET_AAU_AALDV_hi(hi); \ +}) + +/* + * KVM virtual AAU registers access function (can be paravirtualized) + */ +static __always_inline u32 +kvm_read_aasr_reg_value(void) +{ + return KVM_GET_AAU_AASR(); +} +static __always_inline void +kvm_write_aasr_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AASR(reg_value); +} +static inline u32 +kvm_read_aafstr_reg_value(void) +{ + return KVM_GET_AAU_AAFSTR(); +} +static inline void +kvm_write_aafstr_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AAFSTR(reg_value); +} + +static __always_inline e2k_aasr_t +kvm_read_aasr_reg(void) +{ + e2k_aasr_t aasr; + + AW(aasr) = kvm_read_aasr_reg_value(); + return aasr; +} +static __always_inline void +kvm_write_aasr_reg(e2k_aasr_t aasr) +{ + kvm_write_aafstr_reg_value(AW(aasr)); +} + +static inline u64 +kvm_read_aaind_reg_value(int AAIND_no) +{ + return KVM_GET_AAU_AAIND(AAIND_no); +} +static inline void +kvm_write_aaind_reg_value(int AAIND_no, u64 reg_value) +{ + KVM_SET_AAU_AAIND(AAIND_no, reg_value); +} + +static inline void +kvm_read_aainds_pair_value(int AAINDs_pair, u64 *lo_value, u64 *hi_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AAINDS(AAINDs_pair, (AAINDs_pair + 1), value1, value2); + *lo_value = value1; + *hi_value = value2; +} +#define KVM_READ_AAINDS_PAIR_VALUE_V2(AAINDs_pair, value1, value2) \ + KVM_GET_AAU_AAINDS(AAINDs_pair, ((AAINDs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAINDS_PAIR_VALUE_V5(AAINDs_pair, value1, value2) \ + KVM_GET_AAU_AAINDS(AAINDs_pair, ((AAINDs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAIND_REG15_AND_TAGS_VALUE_V5(value15, tags) \ +({ \ + value15 = kvm_read_aaind_reg_value(15); \ + tags = kvm_read_aaind_tags_reg_value(); \ +}) + +static inline void +kvm_write_aainds_pair_value(int AAINDs_pair, u64 lo_value, u64 hi_value) +{ + KVM_SET_AAU_AAINDS(AAINDs_pair, (AAINDs_pair + 1), lo_value, hi_value); +} +#define KVM_WRITE_AAINDS_PAIR_VALUE_V2(AAINDs_pair, lo_value, hi_value) \ + kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value) +#define KVM_WRITE_AAINDS_PAIR_VALUE_V5(AAINDs_pair, lo_value, hi_value) \ + kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value) + +static inline u32 +kvm_read_aaind_tags_reg_value(void) +{ + return KVM_GET_AAU_AAIND_TAG(); +} +static inline void +kvm_write_aaind_tags_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AAIND_TAG(reg_value); +} +static inline u64 +kvm_read_aaincr_reg_value(int AAINCR_no) +{ + return KVM_GET_AAU_AAINCR(AAINCR_no); +} +static inline void +kvm_write_aaincr_reg_value(int AAINCR_no, u64 reg_value) +{ + KVM_SET_AAU_AAINCR(AAINCR_no, reg_value); +} +static inline u32 +kvm_read_aaincr_tags_reg_value(void) +{ + return KVM_GET_AAU_AAINCR_TAG(); +} +static inline void +kvm_write_aaincr_tags_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AAINCR_TAG(reg_value); +} + +static inline void +kvm_read_aaincrs_pair_value(int AAINCRs_pair, u64 *lo_value, u64 *hi_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AAINCRS(AAINCRs_pair, (AAINCRs_pair + 1), value1, value2); + *lo_value = value1; + *hi_value = value2; +} +#define KVM_READ_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, value1, value2) \ + KVM_GET_AAU_AAINCRS(AAINCRs_pair, ((AAINCRs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, value1, value2) \ + KVM_GET_AAU_AAINCRS(AAINCRs_pair, ((AAINCRs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(value7, tags) \ +({ \ + value7 = kvm_read_aaincr_reg_value(7); \ + tags = kvm_read_aaincr_tags_reg_value(); \ +}) + +static inline void +kvm_write_aaincrs_pair_value(int AAINCRs_pair, u64 lo_value, u64 hi_value) +{ + KVM_SET_AAU_AAINCRS(AAINCRs_pair, (AAINCRs_pair + 1), + lo_value, hi_value); +} +#define KVM_WRITE_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, lo_value, hi_value) \ + kvm_write_aaincrs_pair_value(AAINCRs_pair, lo_value, hi_value) +#define KVM_WRITE_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, lo_value, hi_value) \ + kvm_write_aaincrs_pair_value(AAINCRs_pair, lo_value, hi_value) + +static inline u64 +kvm_read_aasti_reg_value(int AASTI_no) +{ + return KVM_GET_AAU_AASTI(AASTI_no); +} +static inline void +kvm_write_aasti_reg_value(int AASTI_no, u64 reg_value) +{ + KVM_SET_AAU_AASTI(AASTI_no, reg_value); +} +static inline u32 +kvm_read_aasti_tags_reg_value(void) +{ + return KVM_GET_AAU_AASTI_TAG(); +} +static inline void +kvm_write_aasti_tags_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AASTI_TAG(reg_value); +} + +static inline void +kvm_read_aastis_pair_value(int AASTIs_pair, u64 *lo_value, u64 *hi_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AASTIS(AASTIs_pair, (AASTIs_pair + 1), value1, value2); + *lo_value = value1; + *hi_value = value2; +} +#define KVM_READ_AASTIS_PAIR_VALUE_V2(AASTIs_pair, value1, value2) \ + KVM_GET_AAU_AASTIS(AASTIs_pair, ((AASTIs_pair) + 1), \ + value1, value2) +#define KVM_READ_AASTIS_PAIR_VALUE_V5(AASTIs_pair, value1, value2) \ + KVM_GET_AAU_AASTIS(AASTIs_pair, ((AASTIs_pair) + 1), \ + value1, value2) + +static inline void +kvm_write_aastis_pair_value(int AASTIs_pair, u64 lo_value, u64 hi_value) +{ + KVM_SET_AAU_AASTIS(AASTIs_pair, (AASTIs_pair + 1), lo_value, hi_value); +} +#define KVM_WRITE_AASTIS_PAIR_VALUE_V2(AASTIs_pair, lo_value, hi_value) \ + kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value) +#define KVM_WRITE_AASTIS_PAIR_VALUE_V5(AASTIs_pair, lo_value, hi_value) \ + kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value) + +static inline void +kvm_read_aaldi_reg_value(int AALDI_no, u64 *l_value, u64 *r_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AALDI(AALDI_no, value1, value2); + *l_value = value1; + *r_value = value2; +} +#define KVM_READ_AALDI_REG_VALUE_V2(AALDI_no, value1, value2) \ + KVM_GET_AAU_AALDI(AALDI_no, value1, value2) +#define KVM_READ_AALDI_REG_VALUE_V5(AALDI_no, value1, value2) \ + KVM_GET_AAU_AALDI(AALDI_no, value1, value2) + +static inline void +kvm_write_aaldi_reg_value(int AALDI_no, u64 l_value, u64 r_value) +{ + KVM_SET_AAU_AALDI(AALDI_no, l_value, r_value); +} + +static inline void +kvm_read_aaldas_reg_value(int AALDAs_no, u32 *l_value, u32 *r_value) +{ + u32 value1, value2; + + KVM_GET_AAU_AALDA(AALDAs_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +kvm_write_aaldas_reg_value(int AALDAs_no, u32 l_value, u32 r_value) +{ + KVM_SET_AAU_AALDA(AALDAs_no, l_value, r_value); +} +static inline void +kvm_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + KVM_GET_AAU_AALDM(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static inline void +kvm_write_aaldm_reg_value(u32 lo_value, u32 hi_value) +{ + KVM_SET_AAU_AALDM(lo_value, hi_value); +} +static inline void +kvm_read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static inline void +kvm_write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void +kvm_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + KVM_GET_AAU_AALDV(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static inline void +kvm_write_aaldv_reg_value(u32 lo_value, u32 hi_value) +{ + KVM_SET_AAU_AALDV(lo_value, hi_value); +} +static inline void +kvm_read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static inline void +kvm_write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi); +} + +static inline void +kvm_read_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + KVM_GET_AAU_AAD(AAD_no, mem_p); +} + +static inline void +kvm_write_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + KVM_SET_AAU_AAD(AAD_no, mem_p); +} + +static inline void +kvm_read_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + KVM_GET_AAU_4_AADs(AADs_no, mem_p); +} + +static inline void +kvm_write_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + KVM_SET_AAU_4_AADs(AADs_no, mem_p); +} + +#define kvm_clear_apb() /* AAU context should restore host */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure kvm kernel without paravirtualization */ + +#include + +static __always_inline u32 read_aasr_reg_value(void) +{ + return kvm_read_aasr_reg_value(); +} +static __always_inline void write_aasr_reg_value(u32 reg_value) +{ + kvm_write_aasr_reg_value(reg_value); +} +static __always_inline e2k_aasr_t read_aasr_reg(void) +{ + return kvm_read_aasr_reg(); +} +static __always_inline void write_aasr_reg(e2k_aasr_t aasr) +{ + kvm_write_aasr_reg(aasr); +} +static inline u32 read_aafstr_reg_value(void) +{ + return kvm_read_aafstr_reg_value(); +} +static inline void write_aafstr_reg_value(u32 reg_value) +{ + kvm_write_aafstr_reg_value(reg_value); +} +static inline void read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static inline void write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static inline void write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi); +} + +#define clear_apb() kvm_clear_apb() + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _KVM_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/async_pf.h b/arch/e2k/include/asm/kvm/async_pf.h new file mode 100644 index 000000000000..e6bdaf918367 --- /dev/null +++ b/arch/e2k/include/asm/kvm/async_pf.h @@ -0,0 +1,33 @@ +#ifndef ASYNC_PF_E2K_H +#define ASYNC_PF_E2K_H + +#ifdef CONFIG_KVM_ASYNC_PF + +/* No async page fault occured */ +#define KVM_APF_NO 0 +/* Physical page was swapped out by host */ +#define KVM_APF_PAGE_IN_SWAP 1 +/* Physical page is loaded from swap and ready for access */ +#define KVM_APF_PAGE_READY 2 + +#define KVM_APF_HASH_BITS 8 +/* Number of hash buckets in apf cache */ +#define KVM_APF_CACHE_SIZE (1 << KVM_APF_HASH_BITS) + +/* + * Type of irq controller which will be used + * by host to notify guest that page is ready + */ +enum { + EPIC_CONTROLLER = 1, + APIC_CONTROLLER = 2 +}; + +u32 pv_apf_read_and_reset_reason(void); + +void pv_apf_wait(void); +void pv_apf_wake(void); + +#endif /* CONFIG_KVM_ASYNC_PF */ + +#endif /* ASYNC_PF_H */ diff --git a/arch/e2k/include/asm/kvm/boot.h b/arch/e2k/include/asm/kvm/boot.h new file mode 100644 index 000000000000..2684553792ec --- /dev/null +++ b/arch/e2k/include/asm/kvm/boot.h @@ -0,0 +1,50 @@ +/* + * E2K boot-time initializtion virtualization for KVM host + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_BOOT_H_ +#define _E2K_KVM_BOOT_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +#include +#include + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization support */ +#else /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or pure guest kernel */ +#include +#endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is native host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +static inline void +kvm_host_machine_setup(machdep_t *host_machine) +{ + machdep_t *node_mach; + int nid; + + for_each_node_has_dup_kernel(nid) { + node_mach = the_node_machine(nid); + if (host_machine->native_iset_ver < E2K_ISET_V5) { + } else { + } + } +} +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_BOOT_H_ */ diff --git a/arch/e2k/include/asm/kvm/boot_spinlock.h b/arch/e2k/include/asm/kvm/boot_spinlock.h new file mode 100644 index 000000000000..977dc1ad3c5e --- /dev/null +++ b/arch/e2k/include/asm/kvm/boot_spinlock.h @@ -0,0 +1,38 @@ +#ifndef __ASM_E2K_KVM_BOOT_SPINLOCK_H +#define __ASM_E2K_KVM_BOOT_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * boot time spin_lock()/spin_unlock() slow part + * + * Copyright 2020 MCST + */ + +#include +#include + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else + #error "Unknown virtualization type" +#endif /* CONFIG_PARAVIRT_GUEST */ + +#define arch_boot_spin_unlock kvm_boot_spin_unlock +static inline void kvm_boot_spin_unlock(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + u16 ticket, ready; + + wmb(); /* wait for all store completion */ + val.lock = __api_atomic16_add_return32_lock( + 1 << BOOT_SPINLOCK_HEAD_SHIFT, &lock->lock); + ticket = val.tail; + ready = val.head; + + if (unlikely(ticket != ready)) { + /* spinlock has more user(s): so activate it(s) */ + boot_arch_spin_unlock_slow(lock); + } +} + +#endif /* __ASM_E2K_KVM_BOOT_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/boot_spinlock_slow.h b/arch/e2k/include/asm/kvm/boot_spinlock_slow.h new file mode 100644 index 000000000000..17bdaecec492 --- /dev/null +++ b/arch/e2k/include/asm/kvm/boot_spinlock_slow.h @@ -0,0 +1,44 @@ +#ifndef __ASM_E2K_KVM_BOOT_SPINLOCK_SLOW_H +#define __ASM_E2K_KVM_BOOT_SPINLOCK_SLOW_H +/* + * This file implements on host the arch-dependent parts of kvm guest + * boot-time spin_lock()/spin_unlock() slow part + * + * Copyright 2020 MCST + */ + +#include +#include +#include +#include + +typedef struct boot_spinlock_unlocked { + struct list_head unlocked_list; + struct kvm_vcpu *vcpu; + struct list_head checked_unlocked; /* list of tasks */ + /* which alredy */ + /* checked spin */ + /* was unlocked */ + + void *lock; +} boot_spinlock_unlocked_t; + +#define BOOT_SPINLOCK_HASH_BITS 6 +#define BOOT_SPINLOCK_HASH_SHIFT 4 /* [9:4] hash bits */ +#define BOOT_SPINLOCK_HASH_SIZE (1 << SPINLOCK_HASH_BITS) +#define boot_spinlock_hashfn(lockp) \ + hash_long(((unsigned long)(lockp)) >> \ + BOOT_SPINLOCK_HASH_SHIFT, \ + BOOT_SPINLOCK_HASH_BITS) +#define BOOT_SPINUNLOCKED_LIST_SIZE 32 + +extern int kvm_boot_spin_lock_slow(struct kvm_vcpu *vcpu, void *lock, + bool check_unlock); +extern int kvm_boot_spin_locked_slow(struct kvm_vcpu *vcpu, void *lock); +extern int kvm_boot_spin_unlock_slow(struct kvm_vcpu *vcpu, void *lock, + bool add_to_unlock); + +extern int kvm_boot_spinlock_init(struct kvm *kvm); +extern void kvm_boot_spinlock_destroy(struct kvm *kvm); + +#endif /* __ASM_E2K_KVM_BOOT_SPINLOCK_SLOW_H */ \ No newline at end of file diff --git a/arch/e2k/include/asm/kvm/copy-hw-stacks.h b/arch/e2k/include/asm/kvm/copy-hw-stacks.h new file mode 100644 index 000000000000..55a35e6c12a0 --- /dev/null +++ b/arch/e2k/include/asm/kvm/copy-hw-stacks.h @@ -0,0 +1,462 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_COPY_HW_STACKS_H +#define _E2K_KVM_COPY_HW_STACKS_H + +#include + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_GUEST_STACKS_MODE +#undef DebugGUST +#define DEBUG_KVM_GUEST_STACKS_MODE 0 /* guest user stacks */ + /* copy debug */ +#define DebugGUST(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#ifdef CONFIG_KVM_HOST_MODE +static inline void +prepare_pv_vcpu_inject_stacks(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + e2k_stacks_t *stacks, *g_stacks; + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + + if (regs->g_stacks_valid) { + /* already prepared */ + return; + } + + /* all stacks at empty state, because of guest user recursion */ + /* of trap/system calls can not be */ + g_stacks = ®s->g_stacks; + g_stacks->usd_lo = gti->g_usd_lo; + g_stacks->usd_hi = gti->g_usd_hi; + g_stacks->top = gti->g_sbr.SBR_base; + g_stacks->psp_lo = gti->g_psp_lo; + g_stacks->psp_hi = gti->g_psp_hi; + g_stacks->pcsp_lo = gti->g_pcsp_lo; + g_stacks->pcsp_hi = gti->g_pcsp_hi; + + /* pshtp & pcshtp from guest user stack real state upon trap/syscall */ + stacks = ®s->stacks; + g_stacks->pshtp = stacks->pshtp; + g_stacks->pcshtp = stacks->pcshtp; + + regs->g_stacks_valid = true; + regs->g_stacks_active = false; + regs->need_inject = false; +} + +#undef EMULATE_EMPTY_CHAIN_STACK /* only to debug */ + +#ifdef EMULATE_EMPTY_CHAIN_STACK +static __always_inline void +pv_vcpu_emulate_empty_chain_staks(struct kvm_vcpu *vcpu, pt_regs_t *regs, + e2k_stacks_t *stacks, bool guest_user) +{ + e2k_pcshtp_t pcshtp; + unsigned long flags; + e2k_pcsp_lo_t g_pcsp_lo, k_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi, k_pcsp_hi; + e2k_mem_crs_t __user *g_cframe; + e2k_mem_crs_t *k_crs; + int ret; + + pcshtp = stacks->pcshtp; + if (!(guest_user && pcshtp <= 0x40)) + return; + + g_pcsp_lo = regs->stacks.pcsp_lo; + g_pcsp_hi = regs->stacks.pcsp_hi; + + raw_all_irq_save(flags); + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + k_pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + BUG_ON(AS(k_pcsp_hi).ind != pcshtp); + + k_crs = (e2k_mem_crs_t *) AS(k_pcsp_lo).base; + g_cframe = (e2k_mem_crs_t __user *) (AS(g_pcsp_lo).base + + AS(g_pcsp_hi).ind - pcshtp); + ret = user_hw_stack_frames_copy(g_cframe, k_crs, pcshtp, regs, + k_pcsp_hi.PCSP_hi_ind, true); + if (ret) { + pr_err("%s(): copy to user stack failed\n", __func__); + BUG_ON(true); + } + k_pcsp_hi.PCSP_hi_ind -= pcshtp; + pcshtp = 0; + regs->stacks.pcshtp = pcshtp; + stacks->pcshtp = pcshtp; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + raw_all_irq_restore(flags); +} +#else /* !EMULATE_EMPTY_CHAIN_STACK */ +static __always_inline void +pv_vcpu_emulate_empty_chain_staks(struct kvm_vcpu *vcpu, pt_regs_t *regs, + e2k_stacks_t *stacks, bool guest_user) +{ +} +#endif /* EMULATE_EMPTY_CHAIN_STACK */ + +/** + * pv_vcpu_user_hw_stacks_copy - check size of user hardware stacks that have + * been SPILLed to kernel back to guest space + * @regs - saved guest user stack registers + * @cur_window_q - size of current window in procedure stack + * + * All guest user's stacks part were already copied to guest kernel stacks, + * so it need only check that it was full size and nothing to copy here + */ +static __always_inline int +pv_vcpu_user_hw_stacks_copy(pt_regs_t *regs, e2k_stacks_t *stacks, + u64 cur_window_q, bool guest_user) +{ + e2k_psp_lo_t g_psp_lo = stacks->psp_lo, + k_psp_lo = current_thread_info()->k_psp_lo; + e2k_psp_hi_t g_psp_hi = stacks->psp_hi; + e2k_pcsp_lo_t g_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi = stacks->pcsp_hi; + s64 g_pshtp_size, g_pcshtp_size, ps_copy_size, pcs_copy_size; + int ret; + + DebugUST("guest kernel chain state: base 0x%llx ind 0x%x size 0x%x\n", + g_pcsp_lo.PCSP_lo_base, g_pcsp_hi.PCSP_hi_ind, + g_pcsp_hi.PCSP_hi_size); + DebugUST("guest kernel proc state: base 0x%llx ind 0x%x size 0x%x\n", + g_psp_lo.PSP_lo_base, g_psp_hi.PSP_hi_ind, + g_psp_hi.PSP_hi_size); + g_pshtp_size = GET_PSHTP_MEM_INDEX(stacks->pshtp); + g_pcshtp_size = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + DebugUST("guest kernel chain stack PCSHTP 0x%llx, " + "proc stack PSHTP 0x%llx cur window 0x%llx\n", + g_pcshtp_size, g_pshtp_size, cur_window_q); + + /* + * FIXME: the current implementation of the guest user signal handler + * injection uses direct copying to guest hardware stacks. + * It is bad decision, needs to be corrected + KVM_BUG_ON(is_paging(current_thread_info()->vcpu) && + (g_psp_lo.PSP_lo_base < GUEST_TASK_SIZE || + g_pcsp_lo.PCSP_lo_base < GUEST_TASK_SIZE)); + */ + + /* + * Calculate size of user's part to copy from kernel stacks + * into guest kernel stacks + */ + pcs_copy_size = get_pcs_copy_size(g_pcshtp_size); + ps_copy_size = get_ps_copy_size(cur_window_q, g_pshtp_size); + /* Make sure there is enough space in CF for the FILL */ + BUG_ON((E2K_MAXCR_q - 4) * 16 < E2K_CF_MAX_FILL); + DebugUST("to copy chain stack 0x%llx, proc stack 0x%llx\n", + pcs_copy_size, ps_copy_size); + + if (likely(pcs_copy_size <= 0 && ps_copy_size <= 0)) + return 0; + + if (unlikely(pcs_copy_size > 0)) { + e2k_pcsp_hi_t k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + void __user *dst; + void *src; + + /* Since SPILL'ed guest user data will be copyed to guest */ + /* kernel stacks then cannot be any overflow of user's */ + /* hardware stack. */ + if (unlikely(AS(g_pcsp_hi).ind > AS(g_pcsp_hi).size)) { + pr_err("%s(): guest kernel chain stack overflow " + "(out of memory?): ind 0x%x size 0x%x\n", + __func__, g_pcsp_hi.PCSP_hi_ind, + g_pcsp_hi.PCSP_hi_size); + KVM_BUG_ON(true); + } + dst = (void __user *)(g_pcsp_lo.PCSP_lo_base + + g_pcsp_hi.PCSP_hi_ind); + if (!guest_user) { + /* stack index has been incremented on PCSHTP */ + dst -= g_pcshtp_size; + } + src = (void *)k_pcsp_lo.PCSP_lo_base; + + if (trace_host_copy_hw_stack_enabled()) + trace_host_copy_hw_stack(dst, src, pcs_copy_size, true); + + ret = user_hw_stack_frames_copy(dst, src, pcs_copy_size, regs, + k_pcsp_hi.PCSP_hi_ind, true); + if (trace_host_chain_stack_frame_enabled()) + trace_chain_stack_frames((e2k_mem_crs_t *)dst, + (e2k_mem_crs_t *)src, pcs_copy_size, + trace_host_chain_stack_frame); + if (ret) + return ret; + if (guest_user) { + g_pcsp_hi.PCSP_hi_ind += pcs_copy_size; + stacks->pcsp_hi = g_pcsp_hi; + DebugGUST("guest user chain stack frames copied from " + "host %px to guest kernel from %px size 0x%llx " + "PCSP.ind 0x%x\n", + src, dst, pcs_copy_size, g_pcsp_hi.PCSP_hi_ind); + } + } + + if (unlikely(ps_copy_size > 0)) { + e2k_psp_hi_t k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + void __user *dst; + void *src; + + /* Since SPILL'ed guest user data will be copyed to guest */ + /* kernel stacks then cannot be any overflow of user's */ + /* hardware stack. */ + if (unlikely(AS(g_psp_hi).ind > AS(g_psp_hi).size)) { + pr_err("%s(): guest kernel proc stack overflow " + "(out of memory?): ind 0x%x size 0x%x\n", + __func__, g_psp_hi.PSP_hi_ind, + g_psp_hi.PSP_hi_size); + KVM_BUG_ON(true); + } + dst = (void __user *)(g_psp_lo.PSP_lo_base + + g_psp_hi.PSP_hi_ind); + if (!guest_user) { + /* stack index has been incremented on PSHTP */ + dst -= g_pshtp_size; + } + src = (void *)k_psp_lo.PSP_lo_base; + + if (trace_host_copy_hw_stack_enabled()) + trace_host_copy_hw_stack(dst, src, ps_copy_size, false); + + ret = user_hw_stack_frames_copy(dst, src, ps_copy_size, regs, + k_psp_hi.PSP_hi_ind, false); + if (trace_host_proc_stack_frame_enabled()) + trace_proc_stack_frames((kernel_mem_ps_t *)dst, + (kernel_mem_ps_t *)src, ps_copy_size, + trace_host_proc_stack_frame); + if (ret) + return ret; + if (guest_user) { + g_psp_hi.PSP_hi_ind += ps_copy_size; + stacks->psp_hi = g_psp_hi; + DebugGUST("guest user proc stack frames copied from " + "host %px to guest kernel from %px size 0x%llx " + "PSP.ind 0x%x\n", + src, dst, ps_copy_size, g_psp_hi.PSP_hi_ind); + } + } + + return 0; +} + +/** + * pv_vcpu_user_hw_stacks_prepare - prepare guest user hardware stacks + that have been SPILLed to kernel back + to guest user space + * @regs - saved guest user stack registers + * @cur_window_q - size of current window in procedure stack + * @syscall - true if called upon direct system call exit (no signal handlers) + * + * This does two things: + * + * 1) It is possible that upon kernel entry pcshtp == 0 in some cases: + * - user signal handler had pcshtp==0x20 before return to sigreturn() + * - user context had pcshtp==0x20 before return to makecontext_trampoline() + * - chain stack underflow happened + * So it is possible in sigreturn() and traps, but not in system calls. + * If we are using the trick with return to FILL user hardware stacks than + * we must have frame in chain stack to return to. So in this case kernel's + * chain stack is moved up by one frame (0x20 bytes). + * We also fill the new frame with actual user data and update stacks->pcshtp, + * this is needed to keep the coherent state where saved stacks->pcshtp values + * shows how much data from user space has been spilled to kernel space. + * + * 2) It is not possible to always FILL all of user data that have been + * SPILLed to kernel stacks. So we manually copy the leftovers that can + * not be FILLed to user space. + * This copy does not update stacks->pshtp and stacks->pcshtp. Main reason + * is signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space has been spilled to kernel space. + */ +static __always_inline void +pv_vcpu_user_hw_stacks_prepare(struct kvm_vcpu *vcpu, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + e2k_stacks_t *stacks; + e2k_pcshtp_t pcshtp; + bool guest_user; + bool paging = is_paging(vcpu); + int ret; + + if (likely(paging)) { + guest_user = !!(syscall || !pv_vcpu_trap_on_guest_kernel(regs)); + } else { + guest_user = false; + } + if (guest_user) { + if (from & FROM_PV_VCPU_MODE) { + /* all preparation has been made */ + /* by host & guest handler */ + return; + } + + /* trap on/syscall from guest user, so regs keeps user */ + /* registers state and it need use guest kernel stacks */ + /* in empty state to handle this trap/syscall */ + if (!regs->g_stacks_valid) { + prepare_pv_vcpu_inject_stacks(vcpu, regs); + } + stacks = ®s->g_stacks; + } else { + /* trap on guest kernel, so regs already points to guest */ + /* kernel stacks and trap will be handled by host */ + /* same as other user's processes traps */ + stacks = ®s->stacks; + } + + /* only to debug on simulator : pcshtp == 0 */ + pv_vcpu_emulate_empty_chain_staks(vcpu, regs, stacks, guest_user); + + pcshtp = stacks->pcshtp; + DebugUST("guest kernel chain stack state: base 0x%llx ind 0x%x " + "size 0x%x\n", + stacks->pcsp_lo.PCSP_lo_base, + stacks->pcsp_hi.PCSP_hi_ind, + stacks->pcsp_hi.PCSP_hi_size); + DebugUST("host kernel chain stack state: base 0x%llx ind 0x%x " + "size 0x%x\n", + NATIVE_NV_READ_PCSP_LO_REG().PCSP_lo_base, + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_ind, + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_size); + DebugUST("guest kernel chain stack size to fill PCSHTP 0x%x\n", + pcshtp); + /* + * 1) Make sure there is free space in kernel chain stack to return to + */ + if (!syscall && pcshtp == 0 && !guest_user) { + unsigned long flags; + e2k_pcsp_lo_t g_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi = stacks->pcsp_hi, k_pcsp_hi; + e2k_mem_crs_t __user *g_cframe; + e2k_mem_crs_t *k_crs; + int ret = -EINVAL; + + raw_all_irq_save(flags); + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + BUG_ON(AS(k_pcsp_hi).ind); + AS(k_pcsp_hi).ind += SZ_OF_CR; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + + k_crs = (e2k_mem_crs_t *) AS(k_pcsp_lo).base; + g_cframe = (e2k_mem_crs_t __user *) (AS(g_pcsp_lo).base + + AS(g_pcsp_hi).ind); + if ((u64) g_cframe > (u64) AS(g_pcsp_lo).base) { + ret = __copy_user_to_current_hw_stack(k_crs, + g_cframe - 1, sizeof(*k_crs), regs, true); + } + raw_all_irq_restore(flags); + + /* Can happen if application returns until runs out of + * chain stack or there is no free memory for stacks. + * There is no user stack to return to - die. */ + if (ret) { + E2K_LMS_HALT_OK; + pr_err("%s(): SIGKILL. %s\n", + __func__, + (ret == -EINVAL) ? + "tried to return to kernel" + : + "ran into Out-of-Memory on user stacks"); + force_sig(SIGKILL); + return; + } + DebugUST("copy guest user chain frame from %px to kernel " + "bottom from %px\n", + g_cframe - 1, k_crs); + + if (AS(g_pcsp_hi).ind < SZ_OF_CR) { + pr_err("%s(): guest kernel chain stack underflow\n", + __func__); + KVM_BUG_ON(true); + } + + pcshtp = SZ_OF_CR; + stacks->pcshtp = pcshtp; + DebugUST("guest kernel chain stack to FILL PCSHTP " + "set to 0x%x\n", + stacks->pcshtp); + } else if (!syscall && pcshtp == 0 && guest_user) { + e2k_pcsp_hi_t k_pcsp_hi; + unsigned long flags; + + /* set flag for unconditional injection to do not copy */ + /* from guest user space */ + regs->need_inject = true; + + /* reserve one bottom frames for trampoline */ + /* the guest handler replaces guest user trapped frame */ + raw_all_irq_save(flags); + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + BUG_ON(k_pcsp_hi.PCSP_hi_ind); + k_pcsp_hi.PCSP_hi_ind += 1 * SZ_OF_CR; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + raw_all_irq_restore(flags); + } + + /* + * 2) Copy user data that cannot be FILLed + */ + ret = pv_vcpu_user_hw_stacks_copy(regs, stacks, cur_window_q, + guest_user); + if (unlikely(ret)) + do_exit(SIGKILL); +} + +/* Same as for native kernel without virtualization support */ +static __always_inline int +user_hw_stacks_copy(struct e2k_stacks *stacks, + pt_regs_t *regs, u64 cur_window_q, bool copy_full) +{ + return native_user_hw_stacks_copy(stacks, regs, cur_window_q, copy_full); +} + +static __always_inline void +host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + struct kvm_vcpu *vcpu; + + if (likely(!kvm_test_intc_emul_flag(regs))) { + /* trap on/syscall from host user processes */ + return native_user_hw_stacks_prepare(stacks, regs, + cur_window_q, from, syscall); + } + + vcpu = current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL); + pv_vcpu_user_hw_stacks_prepare(vcpu, regs, cur_window_q, from, syscall); +} +#endif /* CONFIG_KVM_HOST_MODE */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#else /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_COPY_HW_STACKS_H */ diff --git a/arch/e2k/include/asm/kvm/cpu_hv_regs_access.h b/arch/e2k/include/asm/kvm/cpu_hv_regs_access.h new file mode 100644 index 000000000000..fc93174f1c39 --- /dev/null +++ b/arch/e2k/include/asm/kvm/cpu_hv_regs_access.h @@ -0,0 +1,512 @@ +#ifndef _E2K_KVM_CPU_HV_REGS_ACCESS_H_ +#define _E2K_KVM_CPU_HV_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * Virtualization control registers + */ +#define READ_VIRT_CTRL_CU_REG_VALUE() NATIVE_GET_DSREG_CLOSED(virt_ctrl_cu) +/* Bug #127239: on some CPUs "rwd %virt_ctrl_cu" instruction must also + * contain a NOP. This is already accomplished by using delay "5" here. */ +#define WRITE_VIRT_CTRL_CU_REG_VALUE(virt_ctrl) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(virt_ctrl_cu, virt_ctrl, 5) +#define READ_VIRT_CTRL_CU_REG() read_VIRT_CTRL_CU_reg() +#define WRITE_VIRT_CTRL_CU_REG(virt_ctrl) \ + write_VIRT_CTRL_CU_reg(virt_ctrl) + +/* Shadow CPU registers */ + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (SH_OSCUD) + */ + +#define READ_SH_OSCUD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_oscud.lo) +#define READ_SH_OSCUD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_oscud.hi) + +#define WRITE_SH_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscud.lo, OSCUD_lo_value, 5) +#define WRITE_SH_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscud.hi, OSCUD_hi_value, 5) +#define WRITE_SH_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + WRITE_SH_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define WRITE_SH_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + WRITE_SH_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define WRITE_SH_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + WRITE_SH_OSCUD_LO_REG_VALUE(OSCUD_hi_value); \ + WRITE_SH_OSCUD_HI_REG_VALUE(OSCUD_lo_value); \ +}) +#define WRITE_SH_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + WRITE_SH_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +/* + * Read/write low/hgh double-word OS Globals Register (SH_OSGD) + */ + +#define READ_SH_OSGD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_osgd.lo) +#define READ_SH_OSGD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_osgd.hi) + +#define WRITE_SH_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_osgd.lo, OSGD_lo_value, 5) +#define WRITE_SH_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_osgd.hi, OSGD_hi_value, 5) +#define WRITE_SH_OSGD_LO_REG(OSGD_lo) \ +({ \ + WRITE_SH_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define WRITE_SH_OSGD_HI_REG(OSGD_hi) \ +({ \ + WRITE_SH_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define WRITE_SH_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +({ \ + WRITE_SH_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + WRITE_SH_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define WRITE_SH_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + WRITE_SH_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, \ + OSGD_lo.OSGD_lo_half); \ +}) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register + * (SH_PSP, backup BU_PSP) + */ + +#define READ_SH_PSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_psp.lo) +#define READ_SH_PSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_psp.hi) +#define READ_BU_PSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_psp.lo) +#define READ_BU_PSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_psp.hi) + +#define WRITE_SH_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_psp.lo, PSP_lo_value, 5) +#define WRITE_SH_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_psp.hi, PSP_hi_value, 5) +#define WRITE_BU_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_psp.lo, PSP_lo_value, 5) +#define WRITE_BU_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_psp.hi, PSP_hi_value, 5) + +/* + * Read/write word Procedure Stack Harware Top Pointer (SH_PSHTP) + */ +#define READ_SH_PSHTP_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_pshtp) +#define WRITE_SH_PSHTP_REG_VALUE(PSHTP_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_pshtp, PSHTP_value, 5) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register + * (SH_PCSP, backup registers BU_PCSP) + */ +#define READ_SH_PCSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_pcsp.lo) +#define READ_SH_PCSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_pcsp.hi) +#define READ_BU_PCSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_pcsp.lo) +#define READ_BU_PCSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_pcsp.hi) + +#define WRITE_SH_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_pcsp.lo, PCSP_lo_value, 5) +#define WRITE_SH_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_pcsp.hi, PCSP_hi_value, 5) +#define WRITE_BU_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_pcsp.lo, PCSP_lo_value, 5) +#define WRITE_BU_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_pcsp.hi, PCSP_hi_value, 5) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (SH_PCSHTP) + * and shadow pointer (SH_PCSHTP) + */ +#define READ_SH_PCSHTP_REG_VALUE() \ + NATIVE_GET_SREG_CLOSED(sh_pcshtp) +#define READ_SH_PCSHTP_REG_SVALUE() \ + PCSHTP_SIGN_EXTEND(NATIVE_GET_SREG_CLOSED(sh_pcshtp)) +#define WRITE_SH_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + NATIVE_SET_SREG_CLOSED_NOEXC(sh_pcshtp, PCSHTP_svalue, 5) + +/* + * Read/write current window descriptor register (SH_WD) + */ +#define READ_SH_WD_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_wd) +#define WRITE_SH_WD_REG_VALUE(WD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_wd, WD_value, 5) + +/* + * Read/write OS register which point to current process thread info + * structure (SH_OSR0) + */ +#define READ_SH_OSR0_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_osr0) + +#define WRITE_SH_OSR0_REG_VALUE(osr0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_osr0, osr0_value, 5) + +/* + * Read/Write system clock registers (SH_SCLKM3) + */ +#define READ_SH_SCLKM3_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_sclkm3) + +#define WRITE_SH_SCLKM3_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_sclkm3, reg_value, 4) + +/* + * Read/write double-word Compilation Unit Table Register (SH_OSCUTD) + */ +#define READ_SH_OSCUTD_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_oscutd) + +#define WRITE_SH_OSCUTD_REG_VALUE(CUTD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscutd, CUTD_value, 7) + +/* + * Read/write word Compilation Unit Index Register (SH_OSCUIR) + */ +#define READ_SH_OSCUIR_REG_VALUE() NATIVE_GET_SREG_CLOSED(sh_oscuir) + +#define WRITE_SH_OSCUIR_REG_VALUE(CUIR_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscuir, CUIR_value, 7) + +/* + * Read/Write Processor Core Mode Register (SH_CORE_MODE) + */ +#define READ_SH_CORE_MODE_REG_VALUE() NATIVE_GET_SREG_CLOSED(sh_core_mode) +#define WRITE_SH_CORE_MODE_REG_VALUE(modes) \ + NATIVE_SET_SREG_CLOSED_NOEXC(sh_core_mode, modes, 5) + +extern unsigned long read_VIRT_CTRL_CU_reg_value(void); +extern void write_VIRT_CTRL_CU_reg_value(unsigned long value); +extern unsigned int read_SH_CORE_MODE_reg_value(void); +extern void write_SH_CORE_MODE_reg_value(unsigned int value); +extern unsigned long read_SH_PSP_LO_reg_value(void); +extern unsigned long read_SH_PSP_HI_reg_value(void); +extern void write_SH_PSP_LO_reg_value(unsigned long value); +extern void write_SH_PSP_HI_reg_value(unsigned long value); +extern unsigned long read_BU_PSP_LO_reg_value(void); +extern unsigned long read_BU_PSP_HI_reg_value(void); +extern void write_BU_PSP_LO_reg_value(unsigned long value); +extern void write_BU_PSP_HI_reg_value(unsigned long value); +extern unsigned long read_SH_PSHTP_reg_value(void); +extern void write_SH_PSHTP_reg_value(unsigned long value); +extern unsigned long read_SH_PCSP_LO_reg_value(void); +extern unsigned long read_SH_PCSP_HI_reg_value(void); +extern void write_SH_PCSP_LO_reg_value(unsigned long value); +extern void write_SH_PCSP_HI_reg_value(unsigned long value); +extern unsigned long read_BU_PCSP_LO_reg_value(void); +extern unsigned long read_BU_PCSP_HI_reg_value(void); +extern void write_BU_PCSP_LO_reg_value(unsigned long value); +extern void write_BU_PCSP_HI_reg_value(unsigned long value); +extern int read_SH_PCSHTP_reg_value(void); +extern void write_SH_PCSHTP_reg_value(int value); +extern unsigned long read_SH_WD_reg_value(void); +extern void write_SH_WD_reg_value(unsigned long value); +extern unsigned long read_SH_OSCUD_LO_reg_value(void); +extern unsigned long read_SH_OSCUD_HI_reg_value(void); +extern void write_SH_OSCUD_LO_reg_value(unsigned long value); +extern void write_SH_OSCUD_HI_reg_value(unsigned long value); +extern unsigned long read_SH_OSGD_LO_reg_value(void); +extern unsigned long read_SH_OSGD_HI_reg_value(void); +extern void write_SH_OSGD_LO_reg_value(unsigned long value); +extern void write_SH_OSGD_HI_reg_value(unsigned long value); +extern unsigned long read_SH_OSCUTD_reg_value(void); +extern void write_SH_OSCUTD_reg_value(unsigned long value); +extern unsigned int read_SH_OSCUIR_reg_value(void); +extern void write_SH_OSCUIR_reg_value(unsigned int value); +extern unsigned long read_SH_OSR0_reg_value(void); +extern void write_SH_OSR0_reg_value(unsigned long value); + +static inline virt_ctrl_cu_t read_VIRT_CTRL_CU_reg(void) +{ + virt_ctrl_cu_t virt_ctrl; + + virt_ctrl.VIRT_CTRL_CU_reg = read_VIRT_CTRL_CU_reg_value(); + return virt_ctrl; +} +static inline void write_VIRT_CTRL_CU_reg(virt_ctrl_cu_t virt_ctrl) +{ + write_VIRT_CTRL_CU_reg_value(virt_ctrl.VIRT_CTRL_CU_reg); +} + +static inline e2k_psp_lo_t read_SH_PSP_LO_reg(void) +{ + e2k_psp_lo_t psp_lo; + + psp_lo.PSP_lo_half = read_SH_PSP_LO_reg_value(); + return psp_lo; +} +static inline e2k_psp_hi_t read_SH_PSP_HI_reg(void) +{ + e2k_psp_hi_t psp_hi; + + psp_hi.PSP_hi_half = read_SH_PSP_HI_reg_value(); + return psp_hi; +} +static inline void write_SH_PSP_LO_reg(e2k_psp_lo_t psp_lo) +{ + write_SH_PSP_LO_reg_value(psp_lo.PSP_lo_half); +} +static inline void write_SH_PSP_HI_reg(e2k_psp_hi_t psp_hi) +{ + write_SH_PSP_HI_reg_value(psp_hi.PSP_hi_half); +} + +static inline e2k_pcsp_lo_t read_SH_PCSP_LO_reg(void) +{ + e2k_pcsp_lo_t pcsp_lo; + + pcsp_lo.PCSP_lo_half = read_SH_PCSP_LO_reg_value(); + return pcsp_lo; +} +static inline e2k_pcsp_hi_t read_SH_PCSP_HI_reg(void) +{ + e2k_pcsp_hi_t pcsp_hi; + + pcsp_hi.PCSP_hi_half = read_SH_PCSP_HI_reg_value(); + return pcsp_hi; +} +static inline void write_SH_PCSP_LO_reg(e2k_pcsp_lo_t pcsp_lo) +{ + write_SH_PCSP_LO_reg_value(pcsp_lo.PCSP_lo_half); +} +static inline void write_SH_PCSP_HI_reg(e2k_pcsp_hi_t pcsp_hi) +{ + write_SH_PCSP_HI_reg_value(pcsp_hi.PCSP_hi_half); +} + +static inline e2k_psp_lo_t read_BU_PSP_LO_reg(void) +{ + e2k_psp_lo_t psp_lo; + + psp_lo.PSP_lo_half = read_BU_PSP_LO_reg_value(); + return psp_lo; +} +static inline e2k_psp_hi_t read_BU_PSP_HI_reg(void) +{ + e2k_psp_hi_t psp_hi; + + psp_hi.PSP_hi_half = read_BU_PSP_HI_reg_value(); + return psp_hi; +} +static inline void write_BU_PSP_LO_reg(e2k_psp_lo_t psp_lo) +{ + write_BU_PSP_LO_reg_value(psp_lo.PSP_lo_half); +} +static inline void write_BU_PSP_HI_reg(e2k_psp_hi_t psp_hi) +{ + write_BU_PSP_HI_reg_value(psp_hi.PSP_hi_half); +} + +static inline e2k_pcsp_lo_t read_BU_PCSP_LO_reg(void) +{ + e2k_pcsp_lo_t pcsp_lo; + + pcsp_lo.PCSP_lo_half = read_BU_PCSP_LO_reg_value(); + return pcsp_lo; +} +static inline e2k_pcsp_hi_t read_BU_PCSP_HI_reg(void) +{ + e2k_pcsp_hi_t pcsp_hi; + + pcsp_hi.PCSP_hi_half = read_BU_PCSP_HI_reg_value(); + return pcsp_hi; +} +static inline void write_BU_PCSP_LO_reg(e2k_pcsp_lo_t pcsp_lo) +{ + write_BU_PCSP_LO_reg_value(pcsp_lo.PCSP_lo_half); +} +static inline void write_BU_PCSP_HI_reg(e2k_pcsp_hi_t pcsp_hi) +{ + write_BU_PCSP_HI_reg_value(pcsp_hi.PCSP_hi_half); +} + +static inline e2k_oscud_lo_t read_SH_OSCUD_LO_reg(void) +{ + e2k_oscud_lo_t oscud_lo; + + oscud_lo.OSCUD_lo_half = read_SH_OSCUD_LO_reg_value(); + return oscud_lo; +} +static inline e2k_oscud_hi_t read_SH_OSCUD_HI_reg(void) +{ + e2k_oscud_hi_t oscud_hi; + + oscud_hi.OSCUD_hi_half = read_SH_OSCUD_HI_reg_value(); + return oscud_hi; +} +static inline void write_SH_OSCUD_LO_reg(e2k_oscud_lo_t oscud_lo) +{ + write_SH_OSCUD_LO_reg_value(oscud_lo.OSCUD_lo_half); +} +static inline void write_SH_OSCUD_HI_reg(e2k_oscud_hi_t oscud_hi) +{ + write_SH_OSCUD_HI_reg_value(oscud_hi.OSCUD_hi_half); +} + +static inline e2k_osgd_lo_t read_SH_OSGD_LO_reg(void) +{ + e2k_osgd_lo_t osgd_lo; + + osgd_lo.OSGD_lo_half = read_SH_OSGD_LO_reg_value(); + return osgd_lo; +} +static inline e2k_osgd_hi_t read_SH_OSGD_HI_reg(void) +{ + e2k_osgd_hi_t osgd_hi; + + osgd_hi.OSGD_hi_half = read_SH_OSGD_HI_reg_value(); + return osgd_hi; +} +static inline void write_SH_OSGD_LO_reg(e2k_osgd_lo_t osgd_lo) +{ + write_SH_OSGD_LO_reg_value(osgd_lo.OSGD_lo_half); +} +static inline void write_SH_OSGD_HI_reg(e2k_osgd_hi_t osgd_hi) +{ + write_SH_OSGD_HI_reg_value(osgd_hi.OSGD_hi_half); +} + +static inline e2k_cutd_t read_SH_OSCUTD_reg(void) +{ + e2k_cutd_t cutd; + + cutd.CUTD_reg = read_SH_OSCUTD_reg_value(); + return cutd; +} +static inline void write_SH_OSCUTD_reg(e2k_cutd_t cutd) +{ + write_SH_OSCUTD_reg_value(cutd.CUTD_reg); +} + +static inline e2k_cuir_t read_SH_OSCUIR_reg(void) +{ + e2k_cuir_t cuir; + + cuir.CUIR_reg = read_SH_OSCUIR_reg_value(); + return cuir; +} +static inline void write_SH_OSCUIR_reg(e2k_cuir_t cuir) +{ + write_SH_OSCUIR_reg_value(cuir.CUIR_reg); +} + +static inline e2k_core_mode_t read_SH_CORE_MODE_reg(void) +{ + e2k_core_mode_t core_mode; + + core_mode.CORE_MODE_reg = read_SH_CORE_MODE_reg_value(); + return core_mode; +} +static inline void write_SH_CORE_MODE_reg(e2k_core_mode_t core_mode) +{ + write_SH_CORE_MODE_reg_value(core_mode.CORE_MODE_reg); +} + +#define READ_G_PREEMPT_TMR_REG() \ + ((e2k_g_preempt_tmr_t) NATIVE_GET_SREG_CLOSED(g_preempt_tmr)) +#define WRITE_G_PREEMPT_TMR_REG(x) \ + NATIVE_SET_SREG_CLOSED_NOEXC(g_preempt_tmr, AW(x), 5) + +#define READ_INTC_PTR_CU() NATIVE_GET_DSREG_CLOSED(intc_ptr_cu) +#define READ_INTC_INFO_CU() NATIVE_GET_DSREG_CLOSED(intc_info_cu) +#define WRITE_INTC_INFO_CU(x) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(intc_info_cu, x, 5) + +static inline void save_intc_info_cu(intc_info_cu_t *info, int *num) +{ + u64 info_ptr, i = 0; + + /* + * The read of INTC_PTR will clear the hardware pointer, + * but the subsequent reads fo INTC_INFO will increase + * it again until it reaches the same value it had before. + */ + info_ptr = READ_INTC_PTR_CU(); + if (!info_ptr) { + *num = -1; + AW(info->header.lo) = 0; + AW(info->header.hi) = 0; + return; + } + + /* + * CU header should be cleared --- fg@mcst.ru + */ + AW(info->header.lo) = READ_INTC_INFO_CU(); + AW(info->header.hi) = READ_INTC_INFO_CU(); + READ_INTC_PTR_CU(); + WRITE_INTC_INFO_CU(0ULL); + WRITE_INTC_INFO_CU(0ULL); + info_ptr -= 2; + + /* + * Read intercepted events list + */ + for (; info_ptr > 0; info_ptr -= 2) { + AW(info->entry[i].lo) = READ_INTC_INFO_CU(); + info->entry[i].hi = READ_INTC_INFO_CU(); + info->entry[i].no_restore = false; + ++i; + }; + + *num = i; +} + +static inline void restore_intc_info_cu(const intc_info_cu_t *info, int num) +{ + int i; + + /* + * 1) Clear the hardware pointer + */ + READ_INTC_PTR_CU(); + if (num == -1) + return; + + /* + * 2) Write the registers + * + * CU header should be cleared --- fg@mcst.ru + */ + WRITE_INTC_INFO_CU(0ULL); + WRITE_INTC_INFO_CU(0ULL); + for (i = 0; i < num; i++) { + if (!info->entry[i].no_restore) { + WRITE_INTC_INFO_CU(AW(info->entry[i].lo)); + WRITE_INTC_INFO_CU(info->entry[i].hi); + } + } +} + +static inline void +kvm_reset_intc_info_cu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.cu_updated = false; +} +static inline void +kvm_set_intc_info_cu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.cu_updated = true; +} +static inline bool +kvm_get_intc_info_cu_is_updated(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.intc_ctxt.cu_updated; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_CPU_HV_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/cpu_hv_regs_types.h b/arch/e2k/include/asm/kvm/cpu_hv_regs_types.h new file mode 100644 index 000000000000..4fbc21ebbc5e --- /dev/null +++ b/arch/e2k/include/asm/kvm/cpu_hv_regs_types.h @@ -0,0 +1,418 @@ +#ifndef _E2K_KVM_CPU_HV_REGS_TYPES_H_ +#define _E2K_KVM_CPU_HV_REGS_TYPES_H_ + +#ifdef __KERNEL__ + +#include + +#ifndef __ASSEMBLY__ + +typedef union virt_ctrl_cu { + struct { + u64 evn_c : 16; /* [15: 0] */ + u64 exc_c : 8; /* [23:16] */ + u64 glnch : 2; /* [25:24] */ + u64 __pad1 : 38; /* [63:26] */ + }; + struct { + /* env_c: */ + u64 rr_idr : 1; /* [ 0] */ + u64 rr_clkr : 1; /* [ 1] */ + u64 rr_sclkr : 1; /* [ 2] */ + u64 rr_dbg : 1; /* [ 3] */ + u64 rw_core_mode : 1; /* [ 4] */ + u64 rw_clkr : 1; /* [ 5] */ + u64 rw_sclkr : 1; /* [ 6] */ + u64 rw_sclkm3 : 1; /* [ 7] */ + u64 rw_dbg : 1; /* [ 8] */ + u64 hcem : 1; /* [ 9] */ + u64 virt : 1; /* [10] */ + u64 stop : 1; /* [11] */ + u64 evn_c_res : 4; /* [15:12] */ + /* exc_c: */ + u64 exc_instr_debug : 1; /* [16] */ + u64 exc_data_debug : 1; /* [17] */ + u64 exc_instr_page : 1; /* [18] */ + u64 exc_data_page : 1; /* [19] */ + u64 exc_mova : 1; /* [20] */ + u64 exc_interrupt : 1; /* [21] */ + u64 exc_nm_interrupt : 1; /* [22] */ + u64 exc_c_res : 1; /* [23] */ + /* glnch: */ + u64 g_th : 1; /* [24] */ + u64 tir_fz : 1; /* [25] */ + u64 tir_rst : 1; /* [26] */ + u64 __resb : 37; /* [63:27] */ + }; + u64 word; /* as entire register */ +} virt_ctrl_cu_t; +#define VIRT_CTRL_CU_evn_c evn_c /* events mask to intercept */ +#define VIRT_CTRL_CU_rr_idr rr_idr +#define VIRT_CTRL_CU_rr_clkr rr_clkr +#define VIRT_CTRL_CU_rr_sclkr rr_sclkr +#define VIRT_CTRL_CU_rr_dbg rr_dbg +#define VIRT_CTRL_CU_rw_core_mode rw_core_mode +#define VIRT_CTRL_CU_rw_clkr rw_clkr +#define VIRT_CTRL_CU_rw_sclkr rw_sclkr +#define VIRT_CTRL_CU_rw_sclkm3 rw_sclkm3 +#define VIRT_CTRL_CU_rw_dbg rw_dbg +#define VIRT_CTRL_CU_hcem hcem +#define VIRT_CTRL_CU_virt virt +#define VIRT_CTRL_CU_stop stop +#define VIRT_CTRL_CU_exc_c exc_c /* exceptions mask */ + /* to intercept */ +#define VIRT_CTRL_CU_exc_instr_debug exc_instr_debug +#define VIRT_CTRL_CU_exc_data_debug exc_data_debug +#define VIRT_CTRL_CU_exc_instr_page exc_instr_page +#define VIRT_CTRL_CU_exc_data_page exc_data_page +#define VIRT_CTRL_CU_exc_mova exc_mova +#define VIRT_CTRL_CU_exc_interrupt exc_interrupt +#define VIRT_CTRL_CU_exc_nm_interrupt exc_nm_interrupt +#define VIRT_CTRL_CU_glnch glnch /* modes of guest launch */ + /* instruction execution */ +#define VIRT_CTRL_CU_glnch_g_th g_th +#define VIRT_CTRL_CU_glnch_tir_fz tir_fz +#define VIRT_CTRL_CU_tir_rst tir_rst /* mode of TIR registers */ + /* restore */ +#define VIRT_CTRL_CU_reg word /* [63: 0] - entire register */ + +/* Bits mask of VIRT_CTRL_CU fields and flags */ +#define VIRT_CTRL_CU_ENV_C_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_evn_c = -1, }.word) +#define VIRT_CTRL_CU_RR_IDR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_idr = 1, }.word) +#define VIRT_CTRL_CU_RR_CLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_clkr = 1, }.word) +#define VIRT_CTRL_CU_RR_SCLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_sclkr = 1, }.word) +#define VIRT_CTRL_CU_RR_DBG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_dbg = 1, }.word) +#define VIRT_CTRL_CU_RW_CORE_MODE_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_core_mode = 1, }.word) +#define VIRT_CTRL_CU_RW_CLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_clkr = 1, }.word) +#define VIRT_CTRL_CU_RW_SCLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_sclkr = 1, }.word) +#define VIRT_CTRL_CU_RW_SCLKM3_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_sclkm3 = 1, }.word) +#define VIRT_CTRL_CU_RW_DBG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_dbg = 1, }.word) +#define VIRT_CTRL_CU_HCEM_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_hcem = 1, }.word) +#define VIRT_CTRL_CU_VIRT_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_virt = 1, }.word) +#define VIRT_CTRL_CU_STOP_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_stop = 1, }.word) +#define VIRT_CTRL_CU_EXC_C_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_c = -1, }.word) +#define VIRT_CTRL_CU_EXC_INSTR_DEBUG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_instr_debug = 1, }.word) +#define VIRT_CTRL_CU_EXC_DATA_DEBUG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_data_debug = 1, }.word) +#define VIRT_CTRL_CU_EXC_INSTR_PAGE_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_instr_page = 1, }.word) +#define VIRT_CTRL_CU_EXC_DATA_PAGE_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_data_page = 1, }.word) +#define VIRT_CTRL_CU_EXC_MOVA_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_mova = 1, }.word) +#define VIRT_CTRL_CU_EXC_INTERRUPT_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_interrupt = 1, }.word) +#define VIRT_CTRL_CU_EXC_NM_INTERRUPT_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_nm_interrupt = 1, }.word) +#define VIRT_CTRL_CU_GLNCH_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch = -1, }.word) +#define VIRT_CTRL_CU_GLNCH_G_TH_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch_g_th = 1, }.word) +#define VIRT_CTRL_CU_GLNCH_TIR_FZ_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch_tir_fz = 1, }.word) +#define VIRT_CTRL_CU_TIR_RST_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_tir_rst = 1, }.word) + +#endif /* ! __ASSEMBLY__ */ + +#define INTC_CU_COND_EVENT_NO 0 +#define INTC_CU_COND_EVENT_MAX 16 +#define INTC_CU_COND_EXC_NO (INTC_CU_COND_EVENT_NO + \ + INTC_CU_COND_EVENT_MAX) +#define INTC_CU_COND_EXC_MAX 8 +#define INTC_CU_UNCOND_EVENT_NO (INTC_CU_COND_EXC_NO + \ + INTC_CU_COND_EXC_MAX) +#define INTC_CU_UNCOND_EVENT_MAX 8 +#define INTC_CU_EVENTS_NUM_MAX (INTC_CU_COND_EVENT_MAX + \ + INTC_CU_COND_EXC_MAX + \ + INTC_CU_UNCOND_EVENT_MAX) +typedef union { + struct { + u64 evn_c : INTC_CU_COND_EVENT_MAX; + u64 exc_c : INTC_CU_COND_EXC_MAX; + u64 evn_u : INTC_CU_UNCOND_EVENT_MAX; + u64 hi_half : 32; + }; + struct { + /* evn_c fields */ + u64 rr_idr : 1; + u64 rr_clkr : 1; + u64 rr_sclkr : 1; + u64 rr_dbg : 1; + u64 rw_core_mode : 1; + u64 rw_clkr : 1; + u64 rw_sclkr : 1; + u64 rw_sclkm3 : 1; + u64 rw_dbg : 1; + u64 hcem : 1; + u64 virt : 1; + u64 stop : 1; + u64 hret_last_wish : 1; + u64 __reserved_evn_c : 3; + /* exc_c fields */ + u64 exc_instr_debug : 1; + u64 exc_data_debug : 1; + u64 exc_instr_page : 1; + u64 exc_data_page : 1; + u64 exc_mova : 1; + u64 exc_interrupt : 1; + u64 exc_nm_interrupt : 1; + u64 __reserved_exc_c : 1; + /* evn_u fields */ + u64 hv_int : 1; + u64 hv_nm_int : 1; + u64 g_tmr : 1; + u64 rr : 1; + u64 rw : 1; + u64 exc_mem_error : 1; + u64 wait_trap : 1; + u64 dbg : 1; + /* high half of hdr_lo */ + u64 tir_fz : 1; + u64 __reserved : 31; + }; + u64 word; +} intc_info_cu_hdr_lo_t; + +/* evn_c fields bit # */ +#define INTC_CU_RR_IDR_NO 0 +#define INTC_CU_RR_CLKR_NO 1 +#define INTC_CU_RR_SCLKR_NO 2 +#define INTC_CU_RR_DBG_NO 3 +#define INTC_CU_RW_CORE_MODE_NO 4 +#define INTC_CU_RW_CLKR_NO 5 +#define INTC_CU_RW_SCLKR_NO 6 +#define INTC_CU_RW_SCLKM3_NO 7 +#define INTC_CU_RW_DBG_NO 8 +#define INTC_CU_HCEM_NO 9 +#define INTC_CU_VIRT_NO 10 +#define INTC_CU_STOP_NO 11 +#define INTC_CU_HRET_LAST_WISH_NO 12 + +/* INTC_INFO_CU.evn_c fields mask */ +#define intc_cu_evn_c_rr_idr_mask (1UL << INTC_CU_RR_IDR_NO) +#define intc_cu_evn_c_rr_clkr_mask (1UL << INTC_CU_RR_CLKR_NO) +#define intc_cu_evn_c_rr_sclkr_mask (1UL << INTC_CU_RR_SCLKR_NO) +#define intc_cu_evn_c_rr_dbg_mask (1UL << INTC_CU_RR_DBG_NO) +#define intc_cu_evn_c_rw_core_mode_mask (1UL << INTC_CU_RW_CORE_MODE_NO) +#define intc_cu_evn_c_rw_clkr_mask (1UL << INTC_CU_RW_CLKR_NO) +#define intc_cu_evn_c_rw_sclkr_mask (1UL << INTC_CU_RW_SCLKR_NO) +#define intc_cu_evn_c_rw_sclkm3_mask (1UL << INTC_CU_RW_SCLKM3_NO) +#define intc_cu_evn_c_rw_dbg_mask (1UL << INTC_CU_RW_DBG_NO) +#define intc_cu_evn_c_hcem_mask (1UL << INTC_CU_HCEM_NO) +#define intc_cu_evn_c_virt_mask (1UL << INTC_CU_VIRT_NO) +#define intc_cu_evn_c_stop_mask (1UL << INTC_CU_STOP_NO) +#define intc_cu_evn_c_hret_last_wish_mask (1UL << INTC_CU_HRET_LAST_WISH_NO) + +/* common mask of all 'read registers' interceptions */ +#define intc_cu_evn_c_rr_mask (intc_cu_evn_c_rr_idr_mask | \ + intc_cu_evn_c_rr_clkr_mask | \ + intc_cu_evn_c_rr_sclkr_mask | \ + intc_cu_evn_c_rr_dbg_mask) +/* common mask of all 'write registers' interceptions */ +#define intc_cu_evn_c_rw_mask (intc_cu_evn_c_rw_core_mode_mask | \ + intc_cu_evn_c_rw_clkr_mask | \ + intc_cu_evn_c_rw_sclkr_mask | \ + intc_cu_evn_c_rw_sclkm3_mask | \ + intc_cu_evn_c_rw_dbg_mask) +/* INTC_INFO_CU.hdr.evn_c fields mask */ +#define intc_cu_hdr_lo_rr_idr_mask \ + (intc_cu_evn_c_rr_idr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rr_clkr_mask \ + (intc_cu_evn_c_rr_clkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rr_sclkr_mask \ + (intc_cu_evn_c_rr_sclkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rr_dbg_mask \ + (intc_cu_evn_c_rr_dbg_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_core_mode_mask \ + (intc_cu_evn_c_rw_core_mode_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_clkr_mask \ + (intc_cu_evn_c_rw_clkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_sclkr_mask \ + (intc_cu_evn_c_rw_sclkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_sclkm3_mask \ + (intc_cu_evn_c_rw_sclkm3_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_dbg_mask \ + (intc_cu_evn_c_rw_dbg_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_hcem_mask \ + (intc_cu_evn_c_hcem_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_virt_mask \ + (intc_cu_evn_c_virt_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_stop_mask \ + (intc_cu_evn_c_stop_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_hret_last_wish_mask \ + (intc_cu_evn_c_hret_last_wish_mask << INTC_CU_COND_EVENT_NO) + +/* common mask of all 'read registers' interceptions */ +#define intc_cu_hrd_lo_rr_mask (intc_cu_hdr_lo_rr_idr_mask | \ + intc_cu_hdr_lo_rr_clkr_mask | \ + intc_cu_hdr_lo_rr_sclkr_mask | \ + intc_cu_hdr_lo_rr_dbg_mask) +/* common mask of all 'write registers' interceptions */ +#define intc_cu_hrd_lo_rw_mask (intc_cu_hdr_lo_rw_core_mode_mask | \ + intc_cu_hdr_lo_rw_clkr_mask | \ + intc_cu_hdr_lo_rw_sclkr_mask | \ + intc_cu_hdr_lo_rw_sclkm3_mask | \ + intc_cu_hdr_lo_rw_dbg_mask) + +/* exc_c fields bit # */ +#define INTC_CU_EXC_INSTR_DEBUG_NO 0 +#define INTC_CU_EXC_DATA_DEBUG_NO 1 +#define INTC_CU_EXC_INSTR_PAGE_NO 2 +#define INTC_CU_EXC_DATA_PAGE_NO 3 +#define INTC_CU_EXC_MOVA_NO 4 +#define INTC_CU_EXC_INTERRUPT_NO 5 +#define INTC_CU_EXC_NM_INTERRUPT_NO 6 +/* exc_c fields mask */ +#define intc_cu_exc_c_exc_instr_debug_mask \ + (1UL << INTC_CU_EXC_INSTR_DEBUG_NO) +#define intc_cu_exc_c_exc_data_debug_mask \ + (1UL << INTC_CU_EXC_DATA_DEBUG_NO) +#define intc_cu_exc_c_exc_instr_page_mask \ + (1UL << INTC_CU_EXC_INSTR_PAGE_NO) +#define intc_cu_exc_c_exc_data_page_mask \ + (1UL << INTC_CU_EXC_DATA_PAGE_NO) +#define intc_cu_exc_c_exc_mova_mask \ + (1UL << INTC_CU_EXC_MOVA_NO) +#define intc_cu_exc_c_exc_interrupt_mask \ + (1UL << INTC_CU_EXC_INTERRUPT_NO) +#define intc_cu_exc_c_exc_nm_interrupt_mask \ + (1UL << INTC_CU_EXC_NM_INTERRUPT_NO) +/* INTC_INFO_CU.exc_c fields mask */ +#define intc_cu_hdr_lo_exc_instr_debug_mask \ + (intc_cu_exc_c_exc_instr_debug_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_data_debug_mask \ + (intc_cu_exc_c_exc_data_debug_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_instr_page_mask \ + (intc_cu_exc_c_exc_instr_page_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_data_page_mask \ + (intc_cu_exc_c_exc_data_page_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_mova_mask \ + (intc_cu_exc_c_exc_mova_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_interrupt_mask \ + (intc_cu_exc_c_exc_interrupt_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_nm_interrupt_mask \ + (intc_cu_exc_c_exc_nm_interrupt_mask << INTC_CU_COND_EXC_NO) + +/* evn_u fields bit # */ +#define INTC_CU_HV_INT_NO 0 +#define INTC_CU_HV_NM_INT_NO 1 +#define INTC_CU_G_TMR_NO 2 +#define INTC_CU_RR_NO 3 +#define INTC_CU_RW_NO 4 +#define INTC_CU_EXC_MEM_ERROR_NO 5 +#define INTC_CU_WAIT_TRAP_NO 6 +#define INTC_CU_DBG_NO 7 +/* evn_u fields mask */ +#define intc_cu_evn_u_hv_int_mask (1UL << INTC_CU_HV_INT_NO) +#define intc_cu_evn_u_hv_nm_int_mask (1UL << INTC_CU_HV_NM_INT_NO) +#define intc_cu_evn_u_g_tmr_mask (1UL << INTC_CU_G_TMR_NO) +#define intc_cu_evn_u_rr_mask (1UL << INTC_CU_RR_NO) +#define intc_cu_evn_u_rw_mask (1UL << INTC_CU_RW_NO) +#define intc_cu_evn_u_exc_mem_error_mask \ + (1UL << INTC_CU_EXC_MEM_ERROR_NO) +#define intc_cu_evn_u_wait_trap_mask (1UL << INTC_CU_WAIT_TRAP_NO) +#define intc_cu_evn_u_dbg_mask (1UL << INTC_CU_DBG_NO) +/* INT_INFO_CU.evn_u fields mask */ +#define intc_cu_hdr_lo_hv_int_mask \ + (intc_cu_evn_u_hv_int_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_hv_nm_int_mask \ + (intc_cu_evn_u_hv_nm_int_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_g_tmr_mask \ + (intc_cu_evn_u_g_tmr_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_rr_mask \ + (intc_cu_evn_u_rr_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_rw_mask \ + (intc_cu_evn_u_rw_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_exc_mem_error_mask \ + (intc_cu_evn_u_exc_mem_error_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_wait_trap_mask \ + (intc_cu_evn_u_wait_trap_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_dbg_mask \ + (intc_cu_evn_u_dbg_mask << INTC_CU_UNCOND_EVENT_NO) + +#if (1UL << INTC_CU_EVENTS_NUM_MAX) < intc_cu_hdr_lo_dbg_mask +#error "INTC_CU_EVENTS_NUM_MAX value is out of real events number" +#endif + +#define INTC_CU_TIR_FZ_NO 32 +#define intc_cu_hdr_lo_tir_fz_mask (1UL << INTC_CU_TIR_FZ_NO) + +typedef union { + u64 word; +} intc_info_cu_hdr_hi_t; + +typedef struct e2k_intc_info_cu_hdr { + intc_info_cu_hdr_lo_t lo; + intc_info_cu_hdr_hi_t hi; +} intc_info_cu_hdr_t; + +typedef union { + struct { + u64 event_code : 8; + u64 ch_code : 4; + u64 reg_num : 8; + u64 dst : 8; + u64 vm_dst : 3; + u64 __reserved : 33; + }; + u64 word; +} intc_info_cu_entry_lo_t; + +#define intc_cu_info_lo_get_event_code(x) ((x) & 0xff) + +/* Possible values for `INTC_INFO_CU[2 * j].event_code' */ +typedef enum info_cu_event_code { + ICE_FORCED = 0, + ICE_READ_CU = 1, + ICE_WRITE_CU = 2, + ICE_MASKED_HCALL = 3, + ICE_GLAUNCH = 4, + ICE_HRET = 5, +} info_cu_event_code_t; + +typedef u64 intc_info_cu_entry_hi_t; + +typedef struct e2k_intc_info_cu_entry { + intc_info_cu_entry_lo_t lo; + intc_info_cu_entry_hi_t hi; + bool no_restore; +} intc_info_cu_entry_t; + +#define INTC_INFO_CU_MAX 6 +#define INTC_INFO_CU_HDR_MAX 2 +#define INTC_INFO_CU_ENTRY_MAX (INTC_INFO_CU_MAX - INTC_INFO_CU_HDR_MAX) +#define INTC_INFO_CU_PAIRS_MAX (INTC_INFO_CU_ENTRY_MAX / 2) + +typedef struct { + intc_info_cu_hdr_t header; + intc_info_cu_entry_t entry[INTC_INFO_CU_PAIRS_MAX]; +} intc_info_cu_t; + +typedef union { + struct { + u64 tmr : 32; + u64 v : 1; + u64 __reserved : 31; + }; + u64 word; +} g_preempt_tmr_t; + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_CPU_HV_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/kvm/cpu_regs_access.h b/arch/e2k/include/asm/kvm/cpu_regs_access.h new file mode 100644 index 000000000000..8100390ba471 --- /dev/null +++ b/arch/e2k/include/asm/kvm/cpu_regs_access.h @@ -0,0 +1,1818 @@ +#ifndef _E2K_KVM_CPU_REGS_ACCESS_H_ +#define _E2K_KVM_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#include +#include + +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include + +/* + * Basic macroses to access to virtual CPUs registers status on guest. + */ +#define GUEST_CPU_REGS_STATUS (offsetof(kvm_vcpu_state_t, cpu) + \ + offsetof(kvm_cpu_state_t, regs_status)) +#define KVM_GET_CPU_REGS_STATUS() \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_REGS_STATUS) +#define KVM_PUT_CPU_REGS_STATUS(status) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_CPU_REGS_STATUS, status) +#define KVM_RESET_CPU_REGS_STATUS() \ + KVM_SET_CPU_REGS_STATUS(0) +#define KVM_PUT_UPDATED_CPU_REGS_FLAGS(flags) \ +({ \ + unsigned long regs_status = KVM_GET_CPU_REGS_STATUS(); \ + regs_status = KVM_SET_UPDATED_CPU_REGS_FLAGS(regs_status, flags); \ + KVM_PUT_CPU_REGS_STATUS(regs_status); \ +}) +#define KVM_RESET_UPDATED_CPU_REGS_FLAGS(flags) \ +({ \ + unsigned long regs_status = KVM_GET_CPU_REGS_STATUS(); \ + regs_status = KVM_INIT_UPDATED_CPU_REGS_FLAGS(regs_status); \ + KVM_PUT_CPU_REGS_STATUS(regs_status); \ +}) + +/* + * Basic functions accessing virtual CPUs registers on guest. + */ +#define GUEST_CPU_SREGS_BASE (offsetof(kvm_vcpu_state_t, cpu) + \ + offsetof(kvm_cpu_state_t, regs)) +#define GUEST_CPU_SREG(reg_name) (GUEST_CPU_SREGS_BASE + \ + (offsetof(kvm_cpu_regs_t, CPU_##reg_name))) +#define GUEST_CPU_TIR_lo(TIR_no) (GUEST_CPU_SREGS_BASE + \ + (offsetof(kvm_cpu_regs_t, CPU_TIRs)) + \ + (sizeof(e2k_tir_t) * TIR_no) + \ + (offsetof(e2k_tir_t, TIR_lo))) +#define GUEST_CPU_TIR_hi(TIR_no) (GUEST_CPU_SREGS_BASE + \ + (offsetof(kvm_cpu_regs_t, CPU_TIRs)) + \ + (sizeof(e2k_tir_t) * TIR_no) + \ + (offsetof(e2k_tir_t, TIR_hi))) +#define GUEST_CPU_SBBP(SBBP_no) (GUEST_CPU_SREGS_BASE + \ + (offsetof(kvm_cpu_regs_t, CPU_SBBP)) + \ + (sizeof(u64) * SBBP_no)) +#define GUEST_GET_CPU_SREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_CPU_SREG(reg_name)) +#define GUEST_GET_CPU_DSREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_SREG(reg_name)) +#define GUEST_SET_CPU_SREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_CPU_SREG(reg_name), value) +#define GUEST_SET_CPU_DSREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_CPU_SREG(reg_name), value) +#define GUEST_GET_CPU_TIR_lo(TIR_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_lo(TIR_no)) +#define GUEST_GET_CPU_TIR_hi(TIR_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_hi(TIR_no)) +#define GUEST_GET_CPU_SBBP(SBBP_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_SBBP(SBBP_no)) +#define GUEST_IRQS_UNDER_UPSR() \ + offsetof(kvm_vcpu_state_t, irqs_under_upsr) + +/* + * Read virtual VCPU register with VCPU # + */ +#define KVM_READ_VCPU_ID() ((u32)GUEST_GET_CPU_SREG(VCPU_ID)) + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define KVM_READ_PSHTP_REG_VALUE() GUEST_GET_CPU_DSREG(PSHTP) +#define KVM_COPY_WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + GUEST_SET_CPU_DSREG(PSHTP, PSHTP_value) +#define KVM_WRITE_PSHTP_REG_VALUE(PSHTP_value) \ +({ \ + KVM_COPY_WRITE_PSHTP_REG_VALUE(PSHTP_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_PSHTP_REG_VALUE(PSHTP_value); \ +}) + +#define KVM_NV_READ_PSHTP_REG_VALUE KVM_READ_PSHTP_REG_VALUE +#define KVM_READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP; \ + PSHTP.PSHTP_reg = KVM_READ_PSHTP_REG_VALUE(); \ + PSHTP; \ +}) + +/* + * Read/write word Procedure Chain Stack Hardware Top Pointer (PCSHTP) + */ +#define KVM_READ_PCSHTP_REG_SVALUE() \ + (((e2k_pcshtp_t)GUEST_GET_CPU_SREG(PCSHTP) << \ + (32 - E2K_PCSHTP_SIZE)) >> \ + (32 - E2K_PCSHTP_SIZE)) +#define KVM_COPY_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + GUEST_SET_CPU_DSREG(PCSHTP, PCSHTP_svalue) +#define KVM_WRITE_PCSHTP_REG_SVALUE(PCSHTP_value) \ +({ \ + KVM_COPY_WRITE_PCSHTP_REG_SVALUE(PCSHTP_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_PCSHTP_REG_SVALUE(PCSHTP_value); \ +}) + +#define KVM_READ_PCSHTP_REG() \ +({ \ + e2k_pcshtp_t PCSHTP; \ + PCSHTP = KVM_READ_PCSHTP_REG_SVALUE(); \ + PCSHTP; \ +}) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define KVM_READ_OSCUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_lo) +#define KVM_READ_OSCUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_hi) +#define BOOT_KVM_READ_OSCUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_lo) +#define BOOT_KVM_READ_OSCUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_hi) + +#define KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + GUEST_SET_CPU_DSREG(OSCUD_lo, OSCUD_lo_value) +#define KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + GUEST_SET_CPU_DSREG(OSCUD_hi, OSCUD_hi_value); +#define KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ +({ \ + KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ +({ \ + KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ +}) + +#define BOOT_KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + GUEST_SET_CPU_DSREG(OSCUD_lo, OSCUD_lo_value) +#define BOOT_KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + GUEST_SET_CPU_DSREG(OSCUD_hi, OSCUD_hi_value); +#define BOOT_KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define BOOT_KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ +}) + +#define KVM_COPY_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define KVM_COPYWRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define KVM_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define KVM_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define KVM_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define KVM_WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + KVM_WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +#define BOOT_KVM_COPY_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_KVM_COPYWRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define BOOT_KVM_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + BOOT_KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_KVM_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + BOOT_KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define KVM_READ_OSGD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_lo) +#define KVM_READ_OSGD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_hi) +#define BOOT_KVM_READ_OSGD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_lo) +#define BOOT_KVM_READ_OSGD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_hi) + +#define KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + GUEST_SET_CPU_DSREG(OSGD_lo, OSGD_lo_value) +#define KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + GUEST_SET_CPU_DSREG(OSGD_hi, OSGD_hi_value) +#define BOOT_KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + GUEST_SET_CPU_DSREG(OSGD_lo, OSGD_lo_value) +#define BOOT_KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + GUEST_SET_CPU_DSREG(OSGD_hi, OSGD_hi_value) + +#define KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ +({ \ + KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ +({ \ + KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ +}) +#define BOOT_KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define BOOT_KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ +}) + +#define KVM_COPY_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define KVM_COPY_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define KVM_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define KVM_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) + +#define BOOT_KVM_COPY_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_KVM_COPY_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define BOOT_KVM_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + BOOT_KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_KVM_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + BOOT_KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define KVM_READ_CUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_lo) +#define KVM_READ_CUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_hi) +#define BOOT_KVM_READ_CUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_lo) +#define BOOT_KVM_READ_CUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_hi) + +#define KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_lo, CUD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_hi, CUD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ +}) + +#define BOOT_KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_lo, CUD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define BOOT_KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_hi, CUD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ +}) + +#define KVM_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define KVM_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) +#define KVM_WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define KVM_WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + KVM_WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) + +#define BOOT_KVM_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + BOOT_KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define BOOT_KVM_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + BOOT_KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define KVM_READ_GD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(GD_lo) +#define KVM_READ_GD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(GD_hi) +#define BOOT_KVM_READ_GD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(GD_lo) +#define BOOT_KVM_READ_GD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(GD_hi) + +#define KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_lo, GD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_hi, GD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ +}) + +#define BOOT_KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_lo, GD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define BOOT_KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_hi, GD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ +}) +#define KVM_WRITE_GD_LO_REG(GD_lo) \ +({ \ + KVM_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half); \ +}) +#define KVM_WRITE_GD_HI_REG(GD_hi) \ +({ \ + KVM_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half); \ +}) +#define BOOT_KVM_WRITE_GD_LO_REG(GD_lo) \ +({ \ + BOOT_KVM_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half); \ +}) +#define BOOT_KVM_WRITE_GD_HI_REG(GD_hi) \ +({ \ + BOOT_KVM_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half); \ +}) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD/OSCUTD) + */ +#define KVM_READ_CUTD_REG_VALUE() GUEST_GET_CPU_DSREG(CUTD) +#define KVM_READ_OSCUTD_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUTD) +#define BOOT_KVM_READ_CUTD_REG_VALUE() GUEST_GET_CPU_DSREG(CUTD) +#define BOOT_KVM_READ_OSCUTD_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUTD) + +#define KVM_WRITE_CUTD_REG_VALUE(CUTD_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUTD, CUTD_value); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value); \ +}) +#define KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + GUEST_SET_CPU_DSREG(OSCUTD, CUTD_value) +#define KVM_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ +({ \ + KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value); \ + if (IS_HV_GM()) \ + native_write_OSCUTD_reg_value(CUTD_value); \ +}) +#define BOOT_KVM_WRITE_CUTD_REG_VALUE(CUTD_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUTD, CUTD_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value); \ +}) +#define BOOT_KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + GUEST_SET_CPU_DSREG(OSCUTD, CUTD_value) +#define BOOT_KVM_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value); \ + if (IS_HV_GM()) \ + boot_native_write_OSCUTD_reg_value(CUTD_value); \ +}) + +#define KVM_WRITE_CUTD_REG(CUTD) \ + KVM_WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +#define KVM_COPY_WRITE_OSCUTD_REG(CUTD) \ + KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) +#define KVM_WRITE_OSCUTD_REG(CUTD) \ + KVM_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) +#define BOOT_KVM_WRITE_CUTD_REG(CUTD) \ + BOOT_KVM_WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +#define BOOT_KVM_COPY_WRITE_OSCUTD_REG(CUTD) \ + BOOT_KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) +#define BOOT_KVM_WRITE_OSCUTD_REG(CUTD) \ + BOOT_KVM_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) + +/* + * Read word Compilation Unit Index Register (CUIR/OSCUIR) + */ +#define KVM_READ_CUIR_REG_VALUE() GUEST_GET_CPU_SREG(CUIR) +#define KVM_READ_OSCUIR_REG_VALUE() GUEST_GET_CPU_SREG(OSCUIR) +#define BOOT_KVM_READ_CUIR_REG_VALUE() GUEST_GET_CPU_SREG(CUIR) +#define BOOT_KVM_READ_OSCUIR_REG_VALUE() GUEST_GET_CPU_SREG(OSCUIR) + +#define KVM_COPY_WRITE_OSCUIR_REG_VALUE(v) GUEST_SET_CPU_SREG(OSCUIR, (v)) +#define KVM_WRITE_OSCUIR_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_OSCUIR_REG_VALUE(v); \ + if (IS_HV_GM()) \ + native_write_OSCUIR_reg_value((v)); \ +}) +#define BOOT_KVM_COPY_WRITE_OSCUIR_REG_VALUE(v) GUEST_SET_CPU_SREG(OSCUIR, (v)) +#define BOOT_KVM_WRITE_OSCUIR_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUIR_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + boot_native_write_OSCUIR_reg_value((v)); \ +}) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define KVM_READ_PSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_lo) +#define KVM_READ_PSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_hi) +#define KVM_COPY_WRITE_PSP_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_lo, v) +#define KVM_COPY_WRITE_PSP_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_hi, v) +#define KVM_WRITE_PSP_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PSP_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE((v)); \ +}) +#define KVM_WRITE_PSP_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PSP_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_READ_PSP_LO_REG_VALUE KVM_READ_PSP_LO_REG_VALUE +#define KVM_NV_READ_PSP_HI_REG_VALUE KVM_READ_PSP_HI_REG_VALUE +#define KVM_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = KVM_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define KVM_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = KVM_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) + +#define KVM_NV_WRITE_PSP_REG_VALUE KVM_WRITE_PSP_REG_VALUE +#define KVM_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define KVM_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + KVM_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +#define BOOT_KVM_READ_PSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_lo) +#define BOOT_KVM_READ_PSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_hi) +#define BOOT_KVM_COPY_WRITE_PSP_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_lo, v) +#define BOOT_KVM_COPY_WRITE_PSP_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_hi, v) +#define BOOT_KVM_WRITE_PSP_LO_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PSP_LO_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE((v)); \ +}) +#define BOOT_KVM_WRITE_PSP_HI_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PSP_HI_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + BOOT_KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + BOOT_KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define BOOT_KVM_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + BOOT_KVM_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define KVM_READ_PCSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_lo) +#define KVM_READ_PCSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_hi) +#define KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(PCSP_lo, v) +#define KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(PCSP_hi, v) +#define KVM_WRITE_PCSP_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_PCSP_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_READ_PCSP_LO_REG_VALUE KVM_READ_PCSP_LO_REG_VALUE +#define KVM_NV_READ_PCSP_HI_REG_VALUE KVM_READ_PCSP_HI_REG_VALUE +#define KVM_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = KVM_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define KVM_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = KVM_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) + +#define KVM_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + KVM_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + KVM_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define KVM_NV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ + KVM_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) +#define KVM_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + KVM_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, PCSP_lo.PCSP_lo_half); \ +}) + +#define BOOT_KVM_READ_PCSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_lo) +#define BOOT_KVM_READ_PCSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_hi) +#define BOOT_KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v) \ + GUEST_SET_CPU_DSREG(PCSP_lo, v) +#define BOOT_KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v) \ + GUEST_SET_CPU_DSREG(PCSP_hi, v) +#define BOOT_KVM_WRITE_PCSP_LO_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_PCSP_HI_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_PCSP_REG_VALUE(hi_value, lo_value) \ +({ \ + BOOT_KVM_WRITE_PCSP_HI_REG_VALUE(hi_value); \ + BOOT_KVM_WRITE_PCSP_LO_REG_VALUE(lo_value); \ +}) +#define BOOT_KVM_WRITE_PCSP_REG(hi, lo) \ +({ \ + BOOT_KVM_WRITE_PCSP_REG_VALUE(hi.PCSP_hi_half, lo.PCSP_lo_half); \ +}) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define KVM_COPY_READ_CR0_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CR0_lo) +#define KVM_COPY_READ_CR0_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CR0_hi) +#define KVM_COPY_READ_CR1_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CR1_lo) +#define KVM_COPY_READ_CR1_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CR1_hi) +#define KVM_READ_CR0_LO_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR0_LO_REG_VALUE() \ + : \ + KVM_COPY_READ_CR0_LO_REG_VALUE()); \ +}) +#define KVM_READ_CR0_HI_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR0_HI_REG_VALUE() \ + : \ + KVM_COPY_READ_CR0_HI_REG_VALUE()); \ +}) +#define KVM_READ_CR1_LO_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR1_LO_REG_VALUE() \ + : \ + KVM_COPY_READ_CR1_LO_REG_VALUE()); \ +}) +#define KVM_READ_CR1_HI_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR1_HI_REG_VALUE() \ + : \ + KVM_COPY_READ_CR1_HI_REG_VALUE()); \ +}) + +#define KVM_COPY_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + GUEST_SET_CPU_DSREG(CR0_lo, CR0_lo_value) +#define KVM_COPY_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + GUEST_SET_CPU_DSREG(CR0_hi, CR0_hi_value) +#define KVM_COPY_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + GUEST_SET_CPU_DSREG(CR1_lo, CR1_lo_value) +#define KVM_COPY_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + GUEST_SET_CPU_DSREG(CR1_hi, CR1_hi_value) +#define KVM_WRITE_CR0_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR0_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_CR0_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR0_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(v); \ +}) +#define KVM_WRITE_CR1_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR1_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_CR1_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR1_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_READ_CR0_LO_REG_VALUE() KVM_READ_CR0_LO_REG_VALUE() +#define KVM_NV_READ_CR0_HI_REG_VALUE() KVM_READ_CR0_HI_REG_VALUE() +#define KVM_NV_READ_CR1_LO_REG_VALUE() KVM_READ_CR1_LO_REG_VALUE() +#define KVM_NV_READ_CR1_HI_REG_VALUE() KVM_READ_CR1_HI_REG_VALUE() + +#define KVM_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + KVM_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define KVM_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + KVM_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define KVM_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + KVM_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define KVM_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + KVM_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define KVM_READ_CTPR_REG_VALUE(reg_no) GUEST_GET_CPU_DSREG(CTPR##reg_no) +#define KVM_READ_CTPR1_REG_VALUE() KVM_READ_CTPR_REG_VALUE(1) +#define KVM_READ_CTPR2_REG_VALUE() KVM_READ_CTPR_REG_VALUE(2) +#define KVM_READ_CTPR3_REG_VALUE() KVM_READ_CTPR_REG_VALUE(3) + +#define KVM_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + GUEST_SET_CPU_DSREG(CTPR##reg_no, CTPR_value) +#define KVM_WRITE_CTPR1_REG_VALUE(CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(1, CTPR_value) +#define KVM_WRITE_CTPR2_REG_VALUE(CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(2, CTPR_value) +#define KVM_WRITE_CTPR3_REG_VALUE(CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(3, CTPR_value) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define KVM_READ_TIRs_num() GUEST_GET_CPU_SREG(TIRs_num) +#define KVM_WRITE_TIRs_num(TIRs_num_value) \ + GUEST_SET_CPU_SREG(TIRs_num, TIRs_num_value) +#define KVM_READ_TIR_HI_REG_VALUE() \ +({ \ + unsigned long TIR_hi_value; \ + int TIRs_num; \ + TIRs_num = KVM_READ_TIRs_num(); \ + if (TIRs_num >= 0) { \ + TIR_hi_value = GUEST_GET_CPU_TIR_hi(TIRs_num); \ + } else { \ + TIR_hi_value = 0; \ + } \ + TIR_hi_value; \ +}) +#define KVM_READ_TIR_LO_REG_VALUE() \ +({ \ + unsigned long TIR_lo_value; \ + int TIRs_num; \ + TIRs_num = KVM_READ_TIRs_num(); \ + if (TIRs_num >= 0) { \ + TIR_lo_value = GUEST_GET_CPU_TIR_lo(TIRs_num); \ + TIRs_num--; \ + KVM_WRITE_TIRs_num(TIRs_num); \ + } else { \ + TIR_lo_value = 0; \ + } \ + TIR_lo_value; \ +}) + +#define KVM_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + KVM_WRITE_TIRs_num(-1) +#define KVM_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + KVM_WRITE_TIRs_num(-1) + +/* + * Read double-word Stcak of Base Blocks Pointers (SBBP) + */ +#define KVM_READ_SBBP_REG_VALUE(no) GUEST_GET_CPU_SBBP(no) + +/* + * Read/write virtual deferred traps register - DTR + */ +#define KVM_READ_DTR_REG_VALUE() GUEST_GET_CPU_DSREG(DTR) + +#define KVM_WRITE_DTR_REG_VALUE(DTR_value) \ + GUEST_SET_CPU_DSREG(DTR, DTR_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define KVM_READ_USD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(USD_lo) +#define KVM_READ_USD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(USD_hi) +#define KVM_COPY_WRITE_USD_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_lo, v) +#define KVM_COPY_WRITE_USD_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_hi, v) +#define KVM_WRITE_USD_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_USD_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_USD_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_USD_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define KVM_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define KVM_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define KVM_NV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ + KVM_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) +#define KVM_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + KVM_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) + +#define KVM_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) \ +({ \ + KVM_WRITE_USBR_REG_VALUE(usbr); \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define KVM_NV_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) \ + KVM_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) + +#define BOOT_KVM_READ_USD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(USD_lo) +#define BOOT_KVM_READ_USD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(USD_hi) +#define BOOT_KVM_COPY_WRITE_USD_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_lo, v) +#define BOOT_KVM_COPY_WRITE_USD_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_hi, v) +#define BOOT_KVM_WRITE_USD_LO_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_USD_LO_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_USD_HI_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_USD_HI_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + BOOT_KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + BOOT_KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define BOOT_KVM_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + BOOT_KVM_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) +#define BOOT_KVM_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) \ +({ \ + BOOT_KVM_WRITE_USBR_REG_VALUE(usbr); \ + BOOT_KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + BOOT_KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define KVM_READ_PUSD_LO_REG_VALUE() KVM_READ_USD_LO_REG_VALUE() +#define KVM_READ_PUSD_HI_REG_VALUE() KVM_READ_USD_HI_REG_VALUE() + +#define KVM_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(PUSD_lo_value) +#define KVM_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define KVM_READ_SBR_REG_VALUE() GUEST_GET_CPU_DSREG(SBR) +#define KVM_COPY_WRITE_SBR_REG_VALUE(v) GUEST_SET_CPU_DSREG(SBR, v) +#define KVM_WRITE_SBR_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_SBR_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(v); \ +}) + +#define KVM_READ_USBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() +#define KVM_NV_READ_SBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() + +#define KVM_WRITE_USBR_REG_VALUE(USBR_value) KVM_WRITE_SBR_REG_VALUE(USBR_value) +#define KVM_NV_WRITE_SBR_REG_VALUE(SBR_value) KVM_WRITE_SBR_REG_VALUE(SBR_value) +#define KVM_WRITE_USBR_REG(USBR) \ + KVM_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +#define BOOT_KVM_READ_SBR_REG_VALUE() GUEST_GET_CPU_DSREG(SBR) +#define BOOT_KVM_COPY_WRITE_SBR_REG_VALUE(v) GUEST_SET_CPU_DSREG(SBR, v) +#define BOOT_KVM_WRITE_SBR_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_SBR_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(v); \ +}) + +#define BOOT_KVM_READ_USBR_REG_VALUE() BOOT_KVM_READ_SBR_REG_VALUE() + +#define BOOT_KVM_WRITE_USBR_REG_VALUE(v) \ + BOOT_KVM_WRITE_SBR_REG_VALUE(v) +#define BOOT_KVM_WRITE_USBR_REG(USBR) \ + BOOT_KVM_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define KVM_READ_WD_REG_VALUE() GUEST_GET_CPU_DSREG(WD) +#define KVM_READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = KVM_READ_WD_REG_VALUE(); \ + WD; \ +}) + +#define KVM_WRITE_WD_REG_VALUE(WD_value) \ + GUEST_SET_CPU_DSREG(WD, WD_value) +#define KVM_WRITE_WD_REG(WD) \ + KVM_WRITE_WD_REG_VALUE(WD.WD_reg) + +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define KVM_READ_LSR_REG_VALUE() GUEST_GET_CPU_DSREG(LSR) + +#define KVM_WRITE_LSR_REG_VALUE(LSR_value) \ + GUEST_SET_CPU_DSREG(LSR, LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define KVM_READ_ILCR_REG_VALUE() GUEST_GET_CPU_DSREG(ILCR) + +#define KVM_WRITE_ILCR_REG_VALUE(ILCR_value) \ + GUEST_SET_CPU_DSREG(ILCR, ILCR_value) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define KVM_GET_OSR0_REG_VALUE() GUEST_GET_CPU_DSREG(OSR0) + +#define KVM_SET_OSR0_REG_VALUE(osr0_value) \ + GUEST_SET_CPU_DSREG(OSR0, osr0_value) + +#define KVM_READ_CURRENT_REG_VALUE() \ +({ \ + unsigned long osr0_value; \ + if (IS_HV_GM()) { \ + osr0_value = NATIVE_GET_OSR0_REG_VALUE(); \ + } else { \ + osr0_value = KVM_GET_OSR0_REG_VALUE(); \ + } \ + osr0_value; \ +}) + +#define BOOT_KVM_READ_CURRENT_REG_VALUE() KVM_GET_OSR0_REG_VALUE() +#define KVM_WRITE_CURRENT_REG_VALUE(osr0_value) \ +({ \ + KVM_SET_OSR0_REG_VALUE(osr0_value); \ + if (IS_HV_CPU_HV_MMU_KVM()) { \ + NATIVE_SET_OSR0_REG_VALUE(osr0_value); \ + } \ +}) +#define BOOT_KVM_WRITE_CURRENT_REG_VALUE(osr0_value) \ +({ \ + KVM_SET_OSR0_REG_VALUE(osr0_value); \ + if (IS_HV_CPU_HV_MMU_KVM()) { \ + NATIVE_SET_OSR0_REG_VALUE(osr0_value); \ + } \ +}) + +#define KVM_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)KVM_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define BOOT_KVM_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)BOOT_KVM_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define KVM_WRITE_CURRENT_REG(TI) \ + KVM_WRITE_CURRENT_REG_VALUE((unsigned long)TI) +#define BOOT_KVM_WRITE_CURRENT_REG(TI) \ + BOOT_KVM_WRITE_CURRENT_REG_VALUE((unsigned long)TI) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define KVM_READ_OSEM_REG_VALUE() GUEST_GET_CPU_SREG(OSEM) + +#define KVM_WRITE_OSEM_REG_VALUE(OSEM_value) \ + GUEST_SET_CPU_SREG(OSEM, OSEM_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define KVM_READ_BGR_REG_VALUE() GUEST_GET_CPU_SREG(BGR) + +#define KVM_WRITE_BGR_REG_VALUE(BGR_value) \ + GUEST_SET_CPU_SREG(BGR, BGR_value) + +#define BOOT_KVM_WRITE_BGR_REG_VALUE(BGR_value) \ +({ \ + KVM_WRITE_BGR_REG_VALUE(BGR_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_BGR_REG_VALUE(BGR_value); \ +}) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define KVM_READ_CLKR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(clkr) + +/* + * Read/Write system clock registers (SCLKM) + */ +#define KVM_READ_SCLKR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkr) +#define KVM_READ_SCLKM1_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm1) +#define KVM_READ_SCLKM2_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm2) +#define KVM_READ_SCLKM3_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm3) + +#define KVM_WRITE_SCLKR_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKR, reg_value) +#define KVM_WRITE_SCLKM1_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKM1, reg_value) +#define KVM_WRITE_SCLKM2_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKM2, reg_value) +#define KVM_WRITE_SCLKM3_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKM3, reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define KVM_READ_CU_HW0_REG_VALUE() NATIVE_READ_CU_HW0_REG_VALUE() +#define KVM_READ_CU_HW1_REG_VALUE() \ +({ \ + u64 reg_value = -1; \ + if (machine.get_cu_hw1 != NULL) \ + reg_value = machine.get_cu_hw1(); \ + reg_value; \ +}) + +#define KVM_WRITE_CU_HW0_REG_VALUE(reg) GUEST_SET_CPU_DSREG(CU_HW0, reg) +#define KVM_WRITE_CU_HW1_REG_VALUE(reg) GUEST_SET_CPU_DSREG(CU_HW1, reg) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define KVM_READ_RPR_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.lo) +#define KVM_READ_RPR_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.hi) + +#define KVM_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + NATIVE_SET_DSREG_OPEN(rpr.lo, RPR_lo_value) +#define KVM_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + NATIVE_SET_DSREG_OPEN(rpr.hi, RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define KVM_READ_IP_REG_VALUE() NATIVE_GET_DSREG_CLOSED(ip) + +/* + * Read debug and monitors registers + */ +#define KVM_READ_DIBCR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibcr) +#define KVM_READ_DIBSR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibsr) +#define KVM_READ_DIMCR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(dimcr) +#define KVM_READ_DIBAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar0) +#define KVM_READ_DIBAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar1) +#define KVM_READ_DIBAR2_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar2) +#define KVM_READ_DIBAR3_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar3) +#define KVM_READ_DIMAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar0) +#define KVM_READ_DIMAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar1) + +#define KVM_WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibcr, DIBCR_value, 4) +#define KVM_WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibsr, DIBSR_value, 4) +#define KVM_WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimcr, DIMCR_value, 4) +#define KVM_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar0, DIBAR0_value, 4) +#define KVM_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar1, DIBAR1_value, 4) +#define KVM_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar2, DIBAR2_value, 4) +#define KVM_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar3, DIBAR3_value, 4) +#define KVM_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimar0, DIMAR0_value, 4) +#define KVM_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimar1, DIMAR1_value, 4) + +/* + * Read/write double-word Compilation Unit Types Descriptor (TSD) + */ +#define KVM_READ_TSD_REG_VALUE() GUEST_GET_CPU_DSREG(TSD) + +#define KVM_WRITE_TSD_REG_VALUE(TSD_value) \ + GUEST_SET_CPU_DSREG(TSD, TSD_value) + +/* + * Read/write word Processor State Register (PSR) + */ +#define KVM_READ_PSR_REG_VALUE() \ +({ \ +extern void dump_stack(void); \ + unsigned long PSR_value = GUEST_GET_CPU_SREG(E2K_PSR); \ + unsigned long vcpu_base; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_base); \ + if (((kvm_vcpu_state_t *)(vcpu_base))->irqs_under_upsr && \ + ((PSR_value & ~PSR_PM) == 0)) \ + dump_stack(); \ + if (IS_HV_GM()) \ + PSR_value = NATIVE_NV_READ_PSR_REG_VALUE(); \ + PSR_value; \ +}) +#define BOOT_KVM_READ_PSR_REG_VALUE() \ +({ \ + unsigned long PSR_value; \ + \ + if (BOOT_IS_HV_GM()) \ + PSR_value = NATIVE_NV_READ_PSR_REG_VALUE(); \ + else \ + PSR_value = GUEST_GET_CPU_SREG(E2K_PSR); \ + PSR_value; \ +}) + +#define KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr) \ +({ \ + KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \ + GUEST_CPU_SREG(E2K_PSR), PSR_value, \ + GUEST_IRQS_UNDER_UPSR(), under_upsr); \ + trace_vcpu_psr_update(PSR_value, under_upsr); \ +}) + +#define BOOT_KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr) \ +({ \ + KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \ + GUEST_CPU_SREG(E2K_PSR), PSR_value, \ + GUEST_IRQS_UNDER_UPSR(), under_upsr); \ +}) + +#define KVM_WRITE_SW_PSR_REG_VALUE(PSR_value) \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == \ + (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) \ + under_upsr = true; \ + if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == 0) \ + under_upsr = false; \ + KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr); \ +}) + +#define BOOT_KVM_WRITE_SW_PSR_REG_VALUE(PSR_value) \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == \ + (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) \ + under_upsr = true; \ + if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == 0) \ + under_upsr = false; \ + BOOT_KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr); \ +}) +#define KVM_WRITE_PSR_REG_VALUE(PSR_value) \ +({ \ + KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value); \ +}) +#define BOOT_KVM_WRITE_PSR_REG_VALUE(PSR_value) \ +({ \ + BOOT_KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value); \ +}) + +#define KVM_WRITE_PSR_REG(PSR) \ + KVM_WRITE_PSR_REG_VALUE((PSR).PSR_reg) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define KVM_READ_UPSR_REG_VALUE() \ +({ \ + unsigned long UPSR_value; \ + \ + if (IS_HV_GM()) \ + UPSR_value = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + else \ + UPSR_value = GUEST_GET_CPU_SREG(UPSR); \ + UPSR_value; \ +}) +#define BOOT_KVM_READ_UPSR_REG_VALUE() \ +({ \ + unsigned long UPSR_value; \ + \ + if (BOOT_IS_HV_GM()) \ + UPSR_value = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + else \ + UPSR_value = GUEST_GET_CPU_SREG(UPSR); \ + UPSR_value; \ +}) + +#define KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + KVM_DO_ATOMIC_WRITE_UPSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \ + GUEST_CPU_SREG(UPSR), UPSR_value); \ +}) + +#define BOOT_KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + KVM_DO_ATOMIC_WRITE_UPSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \ + GUEST_CPU_SREG(UPSR), UPSR_value); \ +}) + +#if defined(CONFIG_DIRECT_VIRQ_INJECTION) +#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value); \ + if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \ + if ((UPSR_value) & UPSR_IE) { \ + trace_vcpu_upsr_update(UPSR_value, true); \ + kvm_hypervisor_inject_interrupt(); \ + } \ + } else { \ + trace_vcpu_upsr_update(UPSR_value, false); \ + } \ +}) +#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + BOOT_KVM_ATOMIC_WRITE_UPSR_REG_VALUE(UPSR_value); \ + if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \ + if ((UPSR_value) & UPSR_IE) \ + HYPERVISOR_inject_interrupt(); \ + } \ +}) +#else /* ! CONFIG_DIRECT_VIRQ_INJECTION */ +#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + +#define KVM_WRITE_UPSR_REG(UPSR) \ + KVM_WRITE_UPSR_REG_VALUE((UPSR).UPSR_reg) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define KVM_READ_PFPFR_REG_VALUE() NATIVE_GET_SREG_OPEN(pfpfr) +#define KVM_READ_FPCR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpcr) +#define KVM_READ_FPSR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpsr) + +#define KVM_WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + NATIVE_SET_SREG_OPEN(pfpfr, PFPFR_value) +#define KVM_WRITE_FPCR_REG_VALUE(FPCR_value) \ + NATIVE_SET_SREG_OPEN(fpcr, FPCR_value) +#define KVM_WRITE_FPSR_REG_VALUE(FPSR_value) \ + NATIVE_SET_SREG_OPEN(fpsr, FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define KVM_READ_CS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CS_lo) +#define KVM_READ_CS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CS_hi) +#define KVM_READ_DS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(DS_lo) +#define KVM_READ_DS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(DS_hi) +#define KVM_READ_ES_LO_REG_VALUE() GUEST_GET_CPU_DSREG(ES_lo) +#define KVM_READ_ES_HI_REG_VALUE() GUEST_GET_CPU_DSREG(ES_hi) +#define KVM_READ_FS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(FS_lo) +#define KVM_READ_FS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(FS_hi) +#define KVM_READ_GS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(GS_lo) +#define KVM_READ_GS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(GS_hi) +#define KVM_READ_SS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(SS_lo) +#define KVM_READ_SS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(SS_hi) + +#define KVM_WRITE_CS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(CS_lo, sd) +#define KVM_WRITE_CS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(CS_hi, sd) +#define KVM_WRITE_DS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(DS_lo, sd) +#define KVM_WRITE_DS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(DS_hi, sd) +#define KVM_WRITE_ES_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(ES_lo, sd) +#define KVM_WRITE_ES_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(ES_hi, sd) +#define KVM_WRITE_FS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(FS_lo, sd) +#define KVM_WRITE_FS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(FS_hi, sd) +#define KVM_WRITE_GS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(GS_lo, sd) +#define KVM_WRITE_GS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(GS_hi, sd) +#define KVM_WRITE_SS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(SS_lo, sd) +#define KVM_WRITE_SS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(SS_hi, sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define KVM_READ_IDR_REG_VALUE() GUEST_GET_CPU_DSREG(IDR) + +/* + * Read/Write Processor Core Mode Register (CORE_MODE) + */ +#define KVM_READ_CORE_MODE_REG_VALUE() \ + GUEST_GET_CPU_DSREG(CORE_MODE) +#define KVM_WRITE_CORE_MODE_REG_VALUE(modes) \ + GUEST_SET_CPU_DSREG(CORE_MODE, modes) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is native (pure) guest kernel */ + +/* + * Set flags of updated VCPU registers + */ +#define PUT_UPDATED_CPU_REGS_FLAGS(flags) \ + KVM_PUT_UPDATED_CPU_REGS_FLAGS(flags) + +#define READ_VCPU_ID() KVM_READ_VCPU_ID() + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define READ_PSHTP_REG_VALUE() KVM_READ_PSHTP_REG_VALUE() +#define WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + KVM_WRITE_PSHTP_REG_VALUE(PSHTP_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define READ_PCSHTP_REG_SVALUE() KVM_READ_PCSHTP_REG_SVALUE() +#define WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + KVM_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define READ_OSCUD_LO_REG_VALUE() KVM_READ_OSCUD_LO_REG_VALUE() +#define READ_OSCUD_HI_REG_VALUE() KVM_READ_OSCUD_HI_REG_VALUE() +#define BOOT_READ_OSCUD_LO_REG_VALUE() BOOT_KVM_READ_OSCUD_LO_REG_VALUE() +#define BOOT_READ_OSCUD_HI_REG_VALUE() BOOT_KVM_READ_OSCUD_HI_REG_VALUE() + +#define WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) +#define BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + BOOT_KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + BOOT_KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define READ_OSGD_LO_REG_VALUE() KVM_READ_OSGD_LO_REG_VALUE() +#define READ_OSGD_HI_REG_VALUE() KVM_READ_OSGD_HI_REG_VALUE() +#define BOOT_READ_OSGD_LO_REG_VALUE() KVM_READ_OSGD_LO_REG_VALUE() +#define BOOT_READ_OSGD_HI_REG_VALUE() KVM_READ_OSGD_HI_REG_VALUE() + +#define WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + BOOT_KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + BOOT_KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +do { \ + KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ + KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ +} while (0) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define READ_CUD_LO_REG_VALUE() KVM_READ_CUD_LO_REG_VALUE() +#define READ_CUD_HI_REG_VALUE() KVM_READ_CUD_HI_REG_VALUE() +#define BOOT_READ_CUD_LO_REG_VALUE() KVM_READ_CUD_LO_REG_VALUE() +#define BOOT_READ_CUD_HI_REG_VALUE() KVM_READ_CUD_HI_REG_VALUE() + +#define WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) +#define BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + BOOT_KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + BOOT_KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define READ_GD_LO_REG_VALUE() KVM_READ_GD_LO_REG_VALUE() +#define READ_GD_HI_REG_VALUE() KVM_READ_GD_HI_REG_VALUE() +#define BOOT_READ_GD_LO_REG_VALUE() KVM_READ_GD_LO_REG_VALUE() +#define BOOT_READ_GD_HI_REG_VALUE() KVM_READ_GD_HI_REG_VALUE() + +#define WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) +#define BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + BOOT_KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + BOOT_KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define READ_PSP_LO_REG_VALUE() KVM_READ_PSP_LO_REG_VALUE() +#define READ_PSP_HI_REG_VALUE() KVM_READ_PSP_HI_REG_VALUE() +#define BOOT_READ_PSP_LO_REG_VALUE() KVM_READ_PSP_LO_REG_VALUE() +#define BOOT_READ_PSP_HI_REG_VALUE() KVM_READ_PSP_HI_REG_VALUE() + +#define WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define READ_PCSP_LO_REG_VALUE() KVM_READ_PCSP_LO_REG_VALUE() +#define READ_PCSP_HI_REG_VALUE() KVM_READ_PCSP_HI_REG_VALUE() +#define BOOT_READ_PCSP_LO_REG_VALUE() KVM_READ_PCSP_LO_REG_VALUE() +#define BOOT_READ_PCSP_HI_REG_VALUE() KVM_READ_PCSP_HI_REG_VALUE() + +#define WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + KVM_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + KVM_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + KVM_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + KVM_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define READ_CR0_LO_REG_VALUE() KVM_READ_CR0_LO_REG_VALUE() +#define READ_CR0_HI_REG_VALUE() KVM_READ_CR0_HI_REG_VALUE() +#define READ_CR1_LO_REG_VALUE() KVM_READ_CR1_LO_REG_VALUE() +#define READ_CR1_HI_REG_VALUE() KVM_READ_CR1_HI_REG_VALUE() + +#define WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + KVM_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + KVM_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + KVM_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + KVM_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define READ_CTPR_REG_VALUE(reg_no) KVM_READ_CTPR_REG_VALUE(reg_no) + +#define WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define READ_TIR_LO_REG_VALUE() KVM_READ_TIR_LO_REG_VALUE() +#define READ_TIR_HI_REG_VALUE() KVM_READ_TIR_HI_REG_VALUE() + +#define WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + KVM_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) +#define WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + KVM_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define READ_USD_LO_REG_VALUE() KVM_READ_USD_LO_REG_VALUE() +#define READ_USD_HI_REG_VALUE() KVM_READ_USD_HI_REG_VALUE() +#define BOOT_READ_USD_LO_REG_VALUE() KVM_READ_USD_LO_REG_VALUE() +#define BOOT_READ_USD_HI_REG_VALUE() KVM_READ_USD_HI_REG_VALUE() + +#define WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define READ_PUSD_LO_REG_VALUE() KVM_READ_PUSD_LO_REG_VALUE() +#define READ_PUSD_HI_REG_VALUE() KVM_READ_PUSD_HI_REG_VALUE() + +#define WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + KVM_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) +#define WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + KVM_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define READ_SBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() +#define READ_USBR_REG_VALUE() KVM_READ_USBR_REG_VALUE() +#define BOOT_READ_USBR_REG_VALUE() KVM_READ_USBR_REG_VALUE() +#define BOOT_READ_SBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() + +#define WRITE_SBR_REG_VALUE(SBR_value) \ + KVM_WRITE_SBR_REG_VALUE(SBR_value) +#define WRITE_USBR_REG_VALUE(USBR_value) \ + KVM_WRITE_USBR_REG_VALUE(USBR_value) +#define NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \ + KVM_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) +#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \ + KVM_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \ + KVM_WRITE_SBR_REG_VALUE(SBR_value) +#define BOOT_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \ + KVM_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define READ_WD_REG_VALUE() KVM_READ_WD_REG_VALUE() +#define WRITE_WD_REG_VALUE(WD_value) KVM_WRITE_WD_REG_VALUE(WD_value) + +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define READ_LSR_REG_VALUE() \ + KVM_READ_LSR_REG_VALUE() +#define WRITE_LSR_REG_VALUE(LSR_value) \ + KVM_WRITE_LSR_REG_VALUE(LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define READ_ILCR_REG_VALUE() \ + KVM_READ_ILCR_REG_VALUE() +#define WRITE_ILCR_REG_VALUE(ILCR_value) \ + KVM_WRITE_ILCR_REG_VALUE(ILCR_value) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define READ_CURRENT_REG_VALUE() KVM_READ_CURRENT_REG_VALUE() +#define BOOT_READ_CURRENT_REG_VALUE() BOOT_KVM_READ_CURRENT_REG_VALUE() + +#define WRITE_CURRENT_REG_VALUE(osr0_value) \ + KVM_WRITE_CURRENT_REG_VALUE(osr0_value) +#define BOOT_WRITE_CURRENT_REG_VALUE(osr0_value) \ + BOOT_KVM_WRITE_CURRENT_REG_VALUE(osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG_VALUE() \ + KVM_READ_OSEM_REG_VALUE() +#define WRITE_OSEM_REG_VALUE(OSEM_value) \ + KVM_WRITE_OSEM_REG_VALUE(OSEM_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define READ_BGR_REG_VALUE() KVM_READ_BGR_REG_VALUE() +#define BOOT_READ_BGR_REG_VALUE() KVM_READ_BGR_REG_VALUE() + +#define WRITE_BGR_REG_VALUE(BGR_value) \ + KVM_WRITE_BGR_REG_VALUE(BGR_value) +#define BOOT_WRITE_BGR_REG_VALUE(BGR_value) \ + BOOT_KVM_WRITE_BGR_REG_VALUE(BGR_value) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG_VALUE() KVM_READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG_VALUE() KVM_READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG_VALUE() KVM_READ_SCLKM1_REG_VALUE() +#define READ_SCLKM2_REG_VALUE() KVM_READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG_VALUE() KVM_READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKM1_REG_VALUE(reg_value) +#define WRITE_SCLKM2_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG_VALUE() KVM_READ_CU_HW0_REG_VALUE() +#define READ_CU_HW1_REG_VALUE() KVM_READ_CU_HW1_REG_VALUE() + +#define WRITE_CU_HW0_REG_VALUE(reg) KVM_WRITE_CU_HW0_REG_VALUE(reg) +#define WRITE_CU_HW1_REG_VALUE(reg) KVM_WRITE_CU_HW1_REG_VALUE(reg) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define READ_RPR_LO_REG_VALUE() KVM_READ_RPR_LO_REG_VALUE() +#define READ_RPR_HI_REG_VALUE() KVM_READ_RPR_HI_REG_VALUE() +#define READ_SBBP_REG_VALUE() KVM_READ_SBBP_REG_VALUE() + +#define WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + KVM_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) +#define WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + KVM_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG_VALUE() KVM_READ_IP_REG_VALUE() + +/* + * Read debug and monitors regigisters + */ +#define READ_DIBCR_REG_VALUE() KVM_READ_DIBCR_REG_VALUE() +#define READ_DIBSR_REG_VALUE() KVM_READ_DIBSR_REG_VALUE() +#define READ_DIMCR_REG_VALUE() KVM_READ_DIMCR_REG_VALUE() +#define READ_DIBAR0_REG_VALUE() KVM_READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG_VALUE() KVM_READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG_VALUE() KVM_READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG_VALUE() KVM_READ_DIBAR3_REG_VALUE() +#define READ_DIMAR0_REG_VALUE() KVM_READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG_VALUE() KVM_READ_DIMAR1_REG_VALUE() + +#define WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + KVM_WRITE_DIBCR_REG_VALUE(DIBCR_value) +#define WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + KVM_WRITE_DIBSR_REG_VALUE(DIBSR_value) +#define WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + KVM_WRITE_DIMCR_REG_VALUE(DIMCR_value) +#define WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + KVM_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) +#define WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + KVM_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) +#define WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + KVM_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) +#define WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + KVM_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) +#define WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + KVM_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) +#define WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + KVM_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD) + */ +#define READ_CUTD_REG_VALUE() \ + KVM_READ_CUTD_REG_VALUE() +#define READ_OSCUTD_REG_VALUE() KVM_READ_OSCUTD_REG_VALUE() +#define WRITE_CUTD_REG_VALUE(CUTD_value) \ + KVM_WRITE_CUTD_REG_VALUE(CUTD_value) +#define WRITE_OSCUTD_REG_VALUE(OSCUTD_value) \ + KVM_WRITE_OSCUTD_REG_VALUE(OSCUTD_value) +#define BOOT_READ_CUTD_REG_VALUE() BOOT_KVM_READ_CUTD_REG_VALUE() +#define BOOT_READ_OSCUTD_REG_VALUE() BOOT_KVM_READ_OSCUTD_REG_VALUE() +#define BOOT_WRITE_CUTD_REG_VALUE(CUTD_value) \ + BOOT_KVM_WRITE_CUTD_REG_VALUE(CUTD_value) +#define BOOT_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + BOOT_KVM_WRITE_OSCUTD_REG_VALUE(CUTD_value) + +/* + * Read word Compilation Unit Index Register (CUIR) + */ +#define READ_CUIR_REG_VALUE() KVM_READ_CUIR_REG_VALUE() +#define WRITE_CUIR_REG_VALUE(v) KVM_WRITE_CUIR_REG_VALUE(v) +#define READ_OSCUIR_REG_VALUE() KVM_READ_OSCUIR_REG_VALUE() +#define WRITE_OSCUIR_REG_VALUE(v) KVM_WRITE_OSCUIR_REG_VALUE(v) +#define BOOT_READ_CUIR_REG_VALUE() BOOT_KVM_READ_CUIR_REG_VALUE() +#define BOOT_WRITE_CUIR_REG_VALUE(v) BOOT_KVM_WRITE_CUIR_REG_VALUE(v) +#define BOOT_READ_OSCUIR_REG_VALUE() BOOT_KVM_READ_OSCUIR_REG_VALUE() +#define BOOT_WRITE_OSCUIR_REG_VALUE(v) BOOT_KVM_WRITE_OSCUIR_REG_VALUE(v) + +/* + * Read/write double-word Compilation Unit Types Descriptor (TSD) + */ +#define READ_TSD_REG_VALUE() \ + KVM_READ_TSD_REG_VALUE() +#define WRITE_TSD_REG_VALUE(TSD_value) \ + KVM_WRITE_TSD_REG_VALUE(TSD_value) + +/* + * Read/write double-word Type Descriptor (TD) and current Type Register (TR) + */ +#define READ_TD_REG_VALUE() KVM_READ_TD_REG_VALUE() +#define READ_TR_REG_VALUE() KVM_READ_TR_REG_VALUE() + +#define WRITE_TD_REG_VALUE(TD_value) KVM_WRITE_TD_REG_VALUE(TD_value) +#define WRITE_TR_REG_VALUE(TR_value) KVM_WRITE_TR_REG_VALUE(TR_value) + +/* + * Read/write word Processor State Register (PSR) + */ +#define READ_PSR_REG_VALUE() KVM_READ_PSR_REG_VALUE() +#define BOOT_READ_PSR_REG_VALUE() BOOT_KVM_READ_PSR_REG_VALUE() + +#define WRITE_PSR_REG_VALUE(PSR_value) \ + KVM_WRITE_PSR_REG_VALUE(PSR_value) +#define BOOT_WRITE_PSR_REG_VALUE(PSR_value) \ + BOOT_KVM_WRITE_PSR_REG_VALUE(PSR_value) +#define KVM_WRITE_PSR_IRQ_BARRIER(PSR_value) \ + KVM_WRITE_PSR_REG_VALUE(PSR_value) +#define WRITE_PSR_IRQ_BARRIER(PSR_value) \ + KVM_WRITE_PSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define READ_UPSR_REG_VALUE() KVM_READ_UPSR_REG_VALUE() +#define BOOT_READ_UPSR_REG_VALUE() BOOT_KVM_READ_UPSR_REG_VALUE() + +#define WRITE_UPSR_REG_VALUE(UPSR_value) \ + KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_WRITE_UPSR_REG_VALUE(UPSR_value) \ + BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#define WRITE_UPSR_IRQ_BARRIER(UPSR_value) \ + KVM_WRITE_UPSR_REG_VALUE(UPSR_value) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define READ_PFPFR_REG_VALUE() KVM_READ_PFPFR_REG_VALUE() +#define READ_FPCR_REG_VALUE() KVM_READ_FPCR_REG_VALUE() +#define READ_FPSR_REG_VALUE() KVM_READ_FPSR_REG_VALUE() + +#define WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + KVM_WRITE_PFPFR_REG_VALUE(PFPFR_value) +#define WRITE_FPCR_REG_VALUE(FPCR_value) \ + KVM_WRITE_FPCR_REG_VALUE(FPCR_value) +#define WRITE_FPSR_REG_VALUE(FPSR_value) \ + KVM_WRITE_FPSR_REG_VALUE(FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define READ_CS_LO_REG_VALUE() KVM_READ_CS_LO_REG_VALUE() +#define READ_CS_HI_REG_VALUE() KVM_READ_CS_HI_REG_VALUE() +#define READ_DS_LO_REG_VALUE() KVM_READ_DS_LO_REG_VALUE() +#define READ_DS_HI_REG_VALUE() KVM_READ_DS_HI_REG_VALUE() +#define READ_ES_LO_REG_VALUE() KVM_READ_ES_LO_REG_VALUE() +#define READ_ES_HI_REG_VALUE() KVM_READ_ES_HI_REG_VALUE() +#define READ_FS_LO_REG_VALUE() KVM_READ_FS_LO_REG_VALUE() +#define READ_FS_HI_REG_VALUE() KVM_READ_FS_HI_REG_VALUE() +#define READ_GS_LO_REG_VALUE() KVM_READ_GS_LO_REG_VALUE() +#define READ_GS_HI_REG_VALUE() KVM_READ_GS_HI_REG_VALUE() +#define READ_SS_LO_REG_VALUE() KVM_READ_SS_LO_REG_VALUE() +#define READ_SS_HI_REG_VALUE() KVM_READ_SS_HI_REG_VALUE() + +#define WRITE_CS_LO_REG_VALUE(sd) KVM_WRITE_CS_LO_REG_VALUE(sd) +#define WRITE_CS_HI_REG_VALUE(sd) KVM_WRITE_CS_HI_REG_VALUE(sd) +#define WRITE_DS_LO_REG_VALUE(sd) KVM_WRITE_DS_LO_REG_VALUE(sd) +#define WRITE_DS_HI_REG_VALUE(sd) KVM_WRITE_DS_HI_REG_VALUE(sd) +#define WRITE_ES_LO_REG_VALUE(sd) KVM_WRITE_ES_LO_REG_VALUE(sd) +#define WRITE_ES_HI_REG_VALUE(sd) KVM_WRITE_ES_HI_REG_VALUE(sd) +#define WRITE_FS_LO_REG_VALUE(sd) KVM_WRITE_FS_LO_REG_VALUE(sd) +#define WRITE_FS_HI_REG_VALUE(sd) KVM_WRITE_FS_HI_REG_VALUE(sd) +#define WRITE_GS_LO_REG_VALUE(sd) KVM_WRITE_GS_LO_REG_VALUE(sd) +#define WRITE_GS_HI_REG_VALUE(sd) KVM_WRITE_GS_HI_REG_VALUE(sd) +#define WRITE_SS_LO_REG_VALUE(sd) KVM_WRITE_SS_LO_REG_VALUE(sd) +#define WRITE_SS_HI_REG_VALUE(sd) KVM_WRITE_SS_HI_REG_VALUE(sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define READ_IDR_REG_VALUE() KVM_READ_IDR_REG_VALUE() +#define BOOT_READ_IDR_REG_VALUE() KVM_READ_IDR_REG_VALUE() + +/* + * Read/Write Processor Core Mode Register (CORE_MODE) and + */ +#define READ_CORE_MODE_REG_VALUE() \ + KVM_READ_CORE_MODE_REG_VALUE() +#define BOOT_READ_CORE_MODE_REG_VALUE() \ + KVM_READ_CORE_MODE_REG_VALUE() +#define WRITE_CORE_MODE_REG_VALUE(modes) \ + KVM_WRITE_CORE_MODE_REG_VALUE(modes) +#define BOOT_WRITE_CORE_MODE_REG_VALUE(modes) \ + KVM_WRITE_CORE_MODE_REG_VALUE(modes) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/csd_lock.h b/arch/e2k/include/asm/kvm/csd_lock.h new file mode 100644 index 000000000000..36b9998eeece --- /dev/null +++ b/arch/e2k/include/asm/kvm/csd_lock.h @@ -0,0 +1,43 @@ +#ifndef _ASM_E2K_KVM_CSD_LOCK_H +#define _ASM_E2K_KVM_CSD_LOCK_H +/* + * This file implements on host the arch-dependent parts of kvm guest + * csd_lock/csd_unlock functions to serialize access to per-cpu csd resources + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include + +#ifdef CONFIG_SMP + +#include +#include + +typedef struct csd_lock_waiter { + struct list_head wait_list; + struct kvm_vcpu *vcpu; + struct task_struct *task; + void *lock; +} csd_lock_waiter_t; + +/* max number of csd lock waiters structures: */ +/* on each VCPU 2 structures - current and next */ +#define KVM_MAX_CSD_LOCK_FREE_NUM (KVM_MAX_VCPUS * 2) + +extern int kvm_guest_csd_lock_ctl(struct kvm_vcpu *vcpu, + csd_ctl_t csd_ctl_no, void *lock); + +extern int kvm_guest_csd_lock_init(struct kvm *kvm); +extern void kvm_guest_csd_lock_destroy(struct kvm *kvm); + +#else /* ! CONFIG_SMP */ +#define kvm_guest_csd_lock_ctl(vcpu, csd_ctl_no, lock) (-ENOSYS) +#define kvm_guest_csd_lock_init(kvm) (0) +#define kvm_guest_csd_lock_destroy(kvm) +#endif /* CONFIG_SMP */ +#endif /* _ASM_E2K_KVM_CSD_LOCK_H */ \ No newline at end of file diff --git a/arch/e2k/include/asm/kvm/debug.h b/arch/e2k/include/asm/kvm/debug.h new file mode 100644 index 000000000000..9c99d454699e --- /dev/null +++ b/arch/e2k/include/asm/kvm/debug.h @@ -0,0 +1,122 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_DEBUG_H +#define _E2K_KVM_DEBUG_H + +/* do not include this header directly, only through asm/e2k_debug.h */ + +#include +#include + +extern bool kvm_debug; + +/* + * Some definitions to print/dump/show stacks + */ + +extern e2k_addr_t kvm_get_guest_phys_addr(struct task_struct *task, + e2k_addr_t virt); +extern void kvm_print_all_vm_stacks(void); +extern void kvm_print_vcpu_stack(struct kvm_vcpu *vcpu); +extern void kvm_dump_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window); + +#define IS_GUEST_USER_ADDR(task, addr) \ + (((e2k_addr_t)(addr)) < GUEST_TASK_SIZE) +#define GUEST_GET_PHYS_ADDR(task, addr) \ +({ \ + e2k_addr_t phys; \ + if (IS_GUEST_USER_ADDR(task, addr)) \ + phys = (unsigned long)user_address_to_pva(task, addr); \ + else \ + phys = (unsigned long)kernel_address_to_pva(addr); \ + phys; \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#else /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ +#define GET_PHYS_ADDR(task, addr) \ +({ \ + struct thread_info *ti = task_thread_info(task); \ + e2k_addr_t phys; \ + \ + if (paravirt_enabled() && !IS_HV_GM()) { \ + /* it is guest kernel or user address */ \ + phys = GUEST_GET_PHYS_ADDR(task, addr); \ + } else if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) { \ + /* it is native kernel or user process of host */ \ + phys = NATIVE_GET_PHYS_ADDR(task, addr); \ + } else { \ + /* it is virtual CPU process of host and it can run */ \ + /* host kernel (hypercall or trap), guest kernel */ \ + /* or guest user */ \ + phys = kvm_get_guest_phys_addr(task, addr); \ + } \ + phys; \ +}) +#define debug_guest_regs(task) \ + (paravirt_enabled() && !IS_HV_GM() || \ + test_ti_thread_flag(task_thread_info(task), \ + TIF_VIRTUALIZED_GUEST)) +#define get_cpu_type_name() \ + ((paravirt_enabled()) ? "VCPU" : "CPU") + +static inline void print_all_guest_stacks(void) +{ + kvm_print_all_vm_stacks(); +} +static inline void print_guest_vcpu_stack(struct kvm_vcpu *vcpu) +{ + kvm_print_vcpu_stack(vcpu); +} +static inline void +print_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window) +{ + kvm_dump_guest_stack(task, regs, show_reg_window); +} +#include +static inline void +host_ftrace_stop(void) +{ + if (paravirt_enabled()) + HYPERVISOR_ftrace_stop(); +} +static inline void +host_ftrace_dump(void) +{ + if (paravirt_enabled()) + HYPERVISOR_ftrace_dump(); +} + +#include + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + if (!paravirt_enabled() || IS_HV_GM()) + return native_read_instr_on_IP(ip, phys_ip); + else + return kvm_read_instr_on_IP(ip, phys_ip); +} +/* Write modified instruction word at IP address */ +static inline void +modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + if (!paravirt_enabled() || IS_HV_GM()) + native_modify_instr_on_IP(ip, phys_ip, instr_word); + else + kvm_modify_instr_on_IP(ip, phys_ip, instr_word); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_DEBUG_H */ diff --git a/arch/e2k/include/asm/kvm/gmmu_context.h b/arch/e2k/include/asm/kvm/gmmu_context.h new file mode 100644 index 000000000000..eaa0114612ed --- /dev/null +++ b/arch/e2k/include/asm/kvm/gmmu_context.h @@ -0,0 +1,340 @@ +/* + * KVM guest kernel virtual space context support + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GMMU_CONTEXT_H +#define _E2K_KVM_GMMU_CONTEXT_H + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_SWITCH_MODE +#undef DebugKVMSW +#define DEBUG_KVM_SWITCH_MODE 0 /* switch mm debugging */ +#define DebugKVMSW(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define GUEST_USER_PTRS_PER_PGD (GUEST_PAGE_OFFSET / PGDIR_SIZE) +#define GUEST_KERNEL_PGD_PTRS_START GUEST_USER_PTRS_PER_PGD +#define GUEST_KERNEL_PGD_PTRS_END (GUEST_KERNEL_MEM_END / PGDIR_SIZE) +#define GUEST_KERNEL_PTRS_PER_PGD (GUEST_KERNEL_PGD_PTRS_END - \ + GUEST_KERNEL_PGD_PTRS_START) +#define HOST_USER_PTRS_PER_PGD (HOST_PAGE_OFFSET / PGDIR_SIZE) + +#ifdef CONFIG_VIRTUALIZATION + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline void +copy_guest_user_pgd_to_kernel_root_pt(pgd_t *user_pgd) +{ + KVM_BUG_ON(MMU_IS_SEPARATE_PT()); + copy_user_pgd_to_kernel_pgd_range(cpu_kernel_root_pt, user_pgd, + 0, GUEST_USER_PTRS_PER_PGD); +} +static inline void +copy_guest_kernel_pgd_to_kernel_root_pt(pgd_t *user_pgd) +{ + KVM_BUG_ON(MMU_IS_SEPARATE_PT()); + copy_user_pgd_to_kernel_pgd_range(cpu_kernel_root_pt, user_pgd, + GUEST_KERNEL_PGD_PTRS_START, + GUEST_KERNEL_PGD_PTRS_END); +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +static inline int +kvm_init_new_context(struct kvm *kvm, gmm_struct_t *gmm) +{ + /* current cui of guest user will be inited later while */ + /* switch to new guest user process */ + __init_new_context(NULL, NULL, &gmm->context); + return 0; +} + +#ifdef CONFIG_KVM_HV_MMU +static inline pgd_t * +kvm_mmu_get_init_gmm_root(struct kvm *kvm) +{ + GTI_BUG_ON(pv_mmu_get_init_gmm(kvm) == NULL); + if (!VALID_PAGE(pv_mmu_get_init_gmm(kvm)->root_hpa)) + return NULL; + return (pgd_t *)__va(pv_mmu_get_init_gmm(kvm)->root_hpa); +} +static inline void +kvm_mmu_set_init_gmm_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + gmm_struct_t *gmm = pv_mmu_get_init_gmm(vcpu->kvm); + gpa_t root_gpa; + + if (gmm == NULL) + return; + KVM_BUG_ON(VALID_PAGE(gmm->root_hpa)); + if (VALID_PAGE(root)) { + gmm->root_hpa = root; + } + if (is_sep_virt_spaces(vcpu)) { + root_gpa = kvm_get_space_type_guest_os_root(vcpu); + } else { + root_gpa = kvm_get_space_type_guest_u_root(vcpu); + } + gmm->u_pptb = vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + gmm->os_pptb = vcpu->arch.mmu.get_vcpu_os_pptb(vcpu); + gmm->u_vptb = vcpu->arch.mmu.get_vcpu_u_vptb(vcpu); +} +static inline pgd_t * +kvm_mmu_get_gmm_root(struct gmm_struct *gmm) +{ + GTI_BUG_ON(gmm == NULL); + if (!VALID_PAGE(gmm->root_hpa)) + return NULL; + return (pgd_t *)__va(gmm->root_hpa); +} +static inline pgd_t * +kvm_mmu_load_the_gmm_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + pgd_t *root; + bool u_space = gmm != pv_vcpu_get_init_gmm(vcpu); + + GTI_BUG_ON(vcpu == NULL); + root = kvm_mmu_get_gmm_root(gmm); + GTI_BUG_ON(root == NULL); + + if (unlikely(!u_space)) { + if (unlikely(is_sep_virt_spaces(vcpu))) { + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->os_pptb); + kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root)); + } else { + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, gmm->u_pptb); + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->u_vptb); + kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root)); + kvm_set_space_type_spt_u_root(vcpu, (hpa_t)__pa(root)); + } + } else { + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, gmm->u_pptb); + kvm_set_space_type_spt_u_root(vcpu, (hpa_t)__pa(root)); + if (likely(!is_sep_virt_spaces(vcpu))) { + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->u_pptb); + kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root)); + } + } + return root; +} + +static inline pgd_t * +kvm_mmu_load_gmm_root(thread_info_t *next_ti, gthread_info_t *next_gti) +{ + struct kvm_vcpu *vcpu; + gmm_struct_t *next_gmm = next_gti->gmm; + pgd_t *root; + + vcpu = next_ti->vcpu; + root = kvm_mmu_load_the_gmm_root(vcpu, next_gmm); + return root; +} + +static inline pgd_t * +kvm_mmu_load_init_root(struct kvm_vcpu *vcpu) +{ + gmm_struct_t *init_gmm; + pgd_t *root; + + init_gmm = pv_vcpu_get_init_gmm(vcpu); + root = kvm_mmu_load_the_gmm_root(vcpu, init_gmm); + return root; +} +#else /* !CONFIG_KVM_HV_MMU */ +static inline pgd_t * +kvm_mmu_get_init_gmm_root(struct kvm *kvm) +{ + return NULL; +} +static inline pgd_t * +kvm_mmu_get_gmm_root(struct gmm_struct *gmm) +{ + return NULL; +} +static inline pgd_t * +kvm_mmu_load_the_gmm_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + return NULL; +} +static inline pgd_t * +kvm_mmu_load_gmm_root(thread_info_t *next_ti, gthread_info_t *next_gti) +{ + return kvm_mmu_get_gmm_root(next_gti->gmm); +} + +static inline pgd_t * +kvm_mmu_load_init_root(struct kvm_vcpu *vcpu) +{ + return kvm_mmu_get_init_gmm_root(vcpu->kvm); +} +#endif /* CONFIG_KVM_HV_MMU */ + +static inline void +switch_guest_pgd(pgd_t *next_pgd) +{ + thread_info_t *thread_info = native_current_thread_info(); + pgd_t *pgd_to_set; + + DebugKVMSW("CPU #%d %s(%d) kernel image pgd %px = 0x%lx\n", + raw_smp_processor_id(), current->comm, current->pid, + thread_info->kernel_image_pgd_p, + (thread_info->kernel_image_pgd_p) ? + pgd_val(*thread_info->kernel_image_pgd_p) + : + 0); + KVM_BUG_ON(next_pgd == NULL); + + if (unlikely(test_ti_thread_flag(thread_info, TIF_PARAVIRT_GUEST))) { +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) + pgd_to_set = NULL; + else +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + pgd_to_set = thread_info->vcpu_pgd; + if (pgd_to_set) { + /* copy user part of PT including guest kernel part */ + copy_pgd_range(pgd_to_set, next_pgd, + 0, USER_PTRS_PER_PGD); + } + } else { + pgd_to_set = next_pgd; + } + + KVM_BUG_ON(PCSHTP_SIGN_EXTEND(NATIVE_READ_PCSHTP_REG_SVALUE()) != 0); + + reload_root_pgd(pgd_to_set); + /* FIXME: support of guest secondary space is not yet implemented + reload_secondary_page_dir(mm); + */ +} + +#define DO_NOT_USE_ACTIVE_GMM /* turn OFF optimization */ + +static inline void +switch_guest_mm(gthread_info_t *next_gti, struct gmm_struct *next_gmm) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + gthread_info_t *cur_gti = pv_vcpu_get_gti(vcpu); + gmm_struct_t *active_gmm; + pgd_t *next_pgd; + + DebugKVMSW("started to switch guest mm from GPID #%d to GPID #%d\n", + cur_gti->gpid->nid.nr, next_gti->gpid->nid.nr); + active_gmm = pv_vcpu_get_active_gmm(vcpu); + if (next_gmm == NULL || + next_gti->gmm == NULL || + next_gti->gmm_in_release) { +#ifdef DO_NOT_USE_ACTIVE_GMM + /* switch to guest kernel thread, but optimization */ + /* has been turned OFF, so switch to init gmm & PTs */ + next_gmm = pv_vcpu_get_init_gmm(vcpu); +#else /* !DO_NOT_USE_ACTIVE_GMM */ + /* switch to guest kernel thread: do not switch mm */ + if (active_gmm == NULL) { + /* now active is guest kernel init mm */ + DebugKVMSW("task to switch is guest kernel thread, " + "active mm is init mm\n"); + } else { + DebugKVMSW("task to switch is guest kernel thread, " + "active mm is now %px #%d\n", + active_gmm, active_gmm->nid.nr); + } + goto out; +#endif /* DO_NOT_USE_ACTIVE_GMM */ + } else if (active_gmm == next_gmm) { + /* new mm is already active: so do not switch to mm again */ + DebugKVMSW("task to switch is guest user thread, but its mm is " + "already active, so do not switch to active mm %px #%d " + "again\n", + active_gmm, active_gmm->nid.nr); + goto out; + } + if (likely(!pv_vcpu_is_init_gmm(vcpu, next_gmm))) { + next_pgd = kvm_mmu_load_gmm_root(current_thread_info(), + next_gti); + pv_vcpu_set_gmm(vcpu, next_gmm); + } else { + next_pgd = kvm_mmu_load_init_root(vcpu); + pv_vcpu_clear_gmm(vcpu); + } + switch_guest_pgd(next_pgd); + pv_vcpu_set_active_gmm(vcpu, next_gmm); + DebugKVMSW("task to switch is guest user thread, and its mm is not " + "already active, so switch and make active mm %px #%d\n", + next_gmm, next_gmm->nid.nr); + return; +out: + if (DEBUG_KVM_SWITCH_MODE) { + /* any function call can fill old state of hardware stacks */ + /* so after all calls do flush stacks again */ + NATIVE_FLUSHCPU; + E2K_WAIT(_all_e); + } +} + +static inline void +kvm_switch_to_init_guest_mm(struct kvm_vcpu *vcpu) +{ + gthread_info_t *cur_gti = pv_vcpu_get_gti(vcpu); + gmm_struct_t *init_gmm; + gmm_struct_t *active_gmm; + pgd_t *root; + + init_gmm = pv_vcpu_get_init_gmm(vcpu); + active_gmm = pv_vcpu_get_active_gmm(vcpu); + if (unlikely(init_gmm == active_gmm)) { + /* already on init mm */ + return; + } + KVM_BUG_ON(cur_gti->gmm != active_gmm); + root = kvm_mmu_load_the_gmm_root(vcpu, init_gmm); + switch_guest_pgd(root); + cur_gti->gmm_in_release = true; + pv_vcpu_set_active_gmm(vcpu, init_gmm); + pv_vcpu_clear_gmm(vcpu); +} + +static inline void +kvm_guest_kernel_pgd_populate(struct mm_struct *mm, pgd_t *pgd) +{ + /* should be populated on page fault */ + /* while access by guest kernel or user */ +} +static inline void +kvm_guest_user_pgd_populate(gmm_struct_t *gmm, pgd_t *pgd) +{ + /* should be populated on page fault */ + /* while access by guest user */ +} + +static inline void +virt_kernel_pgd_populate(struct mm_struct *mm, pgd_t *pgd) +{ + kvm_guest_kernel_pgd_populate(mm, pgd); +} + +extern e2k_addr_t kvm_guest_user_address_to_pva(struct task_struct *task, + e2k_addr_t address); +static inline e2k_addr_t +guest_user_address_to_pva(struct task_struct *task, e2k_addr_t address) +{ + return kvm_guest_user_address_to_pva(task, address); +} +#else /* ! CONFIG_VIRTUALIZATION */ +static inline void +virt_kernel_pgd_populate(struct mm_struct *mm, pgd_t *pgd) +{ + /* nothing to do, none any guests */ +} +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! _E2K_KVM_GMMU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/kvm/gpid.h b/arch/e2k/include/asm/kvm/gpid.h new file mode 100644 index 000000000000..c1d9b9b4d559 --- /dev/null +++ b/arch/e2k/include/asm/kvm/gpid.h @@ -0,0 +1,69 @@ +#ifndef _ASM_E2K_KVM_GPID_H +#define _ASM_E2K_KVM_GPID_H + +/* + * Guest processes identifier (gpid) allocator + * Based on simplified include/linux/pid.h + */ + +#include +#include + +#include + +#define GPID_MAX_LIMIT (PID_MAX_LIMIT / 2) +#define RESERVED_GPIDS 300 + +#define GPIDMAP_ENTRIES ((GPID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) + +#define GPID_HASH_BITS 4 +#define GPID_HASH_SIZE NID_HASH_SIZE(GPID_HASH_BITS) + +struct kvm; + +typedef struct gpid { + kvm_nid_t nid; + struct gthread_info *gthread_info; +} gpid_t; + +typedef struct kvm_nid_table kvm_gpid_table_t; + +#define gpid_hashfn(nr) nid_hashfn(nr, GPID_HASH_BITS) + +extern gpid_t *kvm_alloc_gpid(kvm_gpid_table_t *gpid_table); +extern void kvm_do_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table); +extern void kvm_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table); +extern int kvm_gpidmap_init(struct kvm *kvm, kvm_gpid_table_t *gpid_table, + kvm_nidmap_t *gpid_nidmap, int gpidmap_entries, + struct hlist_head *gpid_hash, int gpid_hash_bits); +extern void kvm_gpidmap_destroy(kvm_gpid_table_t *gpid_table); + +#define for_each_guest_thread_info(gpid, entry, next, gpid_table) \ + for_each_guest_nid_node(gpid, entry, next, gpid_table, \ + nid.nid_chain) +#define gpid_entry(ptr) container_of(ptr, gpid_t, nid) +#define gpid_table_lock(gpid_table) \ + nid_table_lock(gpid_table) +#define gpid_table_unlock(gpid_table) \ + nid_table_unlock(gpid_table) +#define gpid_table_lock_irq(gpid_table) \ + nid_table_lock_irq(gpid_table) +#define gpid_table_unlock(gpid_table) \ + nid_table_unlock(gpid_table) +#define gpid_table_lock_irqsave(gpid_table, flags) \ + nid_table_lock_irqsave(gpid_table, flags) +#define gpid_table_unlock_irqrestore(gpid_table, flags) \ + nid_table_unlock_irqrestore(gpid_table, flags) + +static inline gpid_t * +kvm_find_gpid(kvm_gpid_table_t *gpid_table, int gpid_nr) +{ + kvm_nid_t *nid; + + nid = kvm_find_nid(gpid_table, gpid_nr, gpid_hashfn(gpid_nr)); + if (nid == NULL) + return NULL; + return gpid_entry(nid); +} + +#endif /* _ASM_E2K_KVM_GPID_H */ diff --git a/arch/e2k/include/asm/kvm/gregs.h b/arch/e2k/include/asm/kvm/gregs.h new file mode 100644 index 000000000000..a32ba9f9fd9e --- /dev/null +++ b/arch/e2k/include/asm/kvm/gregs.h @@ -0,0 +1,171 @@ +#ifndef _E2K_ASM_KVM_GREGS_H +#define _E2K_ASM_KVM_GREGS_H + +#include +#include +#include +#include +#include + +#ifdef CONFIG_VIRTUALIZATION +/* It is native host guest kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel */ + +#define HOST_ONLY_COPY_TO_VCPU_STATE_GREG(__k_gregs, __vs) \ +({ \ + (__k_gregs)->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base = (__vs); \ +}) + +#define HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(__k_gregs, __vs) \ +({ \ + (__vs) = (__k_gregs)->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base; \ +}) + +#define HOST_GET_SAVED_VCPU_STATE_GREG(__ti) \ +({ \ + unsigned long greg_vs; \ + \ + HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(&(__ti)->k_gregs, greg_vs); \ + greg_vs; \ +}) + +#define HOST_GET_SAVED_VCPU_STATE_GREG_AS_LIGHT(__ti) \ +({ \ + unsigned long greg_vs; \ + \ + HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(&(__ti)->k_gregs_light, \ + greg_vs); \ + greg_vs; \ +}) + +#define HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__) \ +({ \ + (vs__) = NATIVE_GET_UNTEGGED_DGREG(GUEST_VCPU_STATE_GREG); \ +}) +#define HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__) \ +({ \ + NATIVE_SET_DGREG(GUEST_VCPU_STATE_GREG, vs__); \ +}) + +#define HOST_INIT_VCPU_STATE_GREG(__ti) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \ + unsigned long vs; \ + \ + vs = k_gregs->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base; \ + HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs); \ +}) + +#ifdef CONFIG_KVM_HOST_MODE +#define HOST_SAVE_HOST_GREGS_TO(__k_gregs, only_kernel) \ +({ \ + kernel_gregs_t *k_gregs = (__k_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__); \ + HOST_ONLY_COPY_TO_VCPU_STATE_GREG(k_gregs, vs__); \ + } \ + ONLY_SAVE_KERNEL_GREGS(task__, cpu_id__, cpu_off__); \ + k_gregs->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base = task__; \ + k_gregs->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base = cpu_id__; \ + k_gregs->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base = cpu_off__; \ +}) + +#define HOST_SAVE_KERNEL_GREGS_AS_LIGHT(__ti) \ + HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs_light, false) + +#define HOST_SAVE_KERNEL_GREGS(__ti) \ + HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, true) + +#define HOST_SAVE_HOST_GREGS(__ti) \ + HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, false) + +#define HOST_SAVE_GUEST_KERNEL_GREGS(__gti) \ + HOST_SAVE_HOST_GREGS_TO(&(__gti)->gk_gregs, false) + +#define HOST_RESTORE_HOST_GREGS_FROM(__k_gregs, only_kernel) \ +({ \ + kernel_gregs_t *k_gregs = (__k_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(k_gregs, vs__); \ + HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__); \ + } \ + task__ = k_gregs->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base; \ + cpu_id__ = k_gregs->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base; \ + cpu_off__ = k_gregs->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base; \ + ONLY_RESTORE_KERNEL_GREGS(task__, cpu_id__, cpu_off__); \ +}) + +#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) \ + HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs_light, false) + +#define HOST_RESTORE_KERNEL_GREGS(_ti) \ + HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, true) + +#define HOST_RESTORE_HOST_GREGS(_ti) \ + HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, false) + +#define HOST_RESTORE_GUEST_KERNEL_GREGS(_gti) \ + HOST_RESTORE_HOST_GREGS_FROM(&(_gti)->gk_gregs, false) +#else /* !CONFIG_KVM_HOST_MODE */ +#define HOST_SAVE_HOST_GREGS(__ti) +#define HOST_RESTORE_HOST_GREGS(_ti) +#define HOST_SAVE_KERNEL_GREGS_AS_LIGHT(__ti) +#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) +#endif /* CONFIG_KVM_HOST_MODE */ + +#else /* ! CONFIG_VIRTUALIZATION */ +/* It is native host kernel without any virtualization */ +/* not used */ +#endif /* CONFIG_VIRTUALIZATION */ + +static inline void +copy_h_gregs_to_gregs(global_regs_t *dst, const host_gregs_t *src) +{ + tagged_memcpy_8(&dst->g[HOST_GREGS_PAIRS_START], src->g, + sizeof(src->g)); +} + +static inline void +copy_h_gregs_to_h_gregs(host_gregs_t *dst, const host_gregs_t *src) +{ + tagged_memcpy_8(dst->g, src->g, sizeof(src->g)); +} + +static inline void +get_h_gregs_from_gregs(host_gregs_t *dst, const global_regs_t *src) +{ + tagged_memcpy_8(dst->g, &src->g[HOST_GREGS_PAIRS_START], + sizeof(dst->g)); +} + +static inline void +copy_h_gregs_to_l_gregs(local_gregs_t *dst, const host_gregs_t *src) +{ + BUG_ON(HOST_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(&dst->g[HOST_GREGS_PAIRS_START - LOCAL_GREGS_START], + src->g, sizeof(src->g)); +} + +static inline void +get_h_gregs_from_l_regs(host_gregs_t *dst, const local_gregs_t *src) +{ + BUG_ON(HOST_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(dst->g, + &src->g[HOST_GREGS_PAIRS_START - LOCAL_GREGS_START], + sizeof(dst->g)); +} + +#endif /* _E2K_ASM_KVM_GREGS_H */ diff --git a/arch/e2k/include/asm/kvm/guest.h b/arch/e2k/include/asm/kvm/guest.h new file mode 100644 index 000000000000..d24110b431a1 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest.h @@ -0,0 +1,357 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines architecture specific interfaces, e2k version + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef _ASM_E2K_KVM_GUEST_H +#define _ASM_E2K_KVM_GUEST_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct kvm_cpu_regs { +#if defined(CONFIG_KVM_GUEST_KERNEL) && \ + defined(CONFIG_KVM_GUEST_HW_PV) && defined(CONFIG_KVM_HV_MMU) + u64 __pad1[24]; +#else /* ! CONFIG_KVM_GUEST_KERNEL || | + ! CONFIG_KVM_GUEST_HW_PV || ! CONFIG_KVM_HV_MMU */ + e2k_cud_lo_t CPU_CUD_lo; /* Compilation Unit Descriptor */ + e2k_cud_hi_t CPU_CUD_hi; + e2k_gd_lo_t CPU_GD_lo; /* CU Globals Descriptor */ + e2k_gd_hi_t CPU_GD_hi; + e2k_oscud_lo_t CPU_OSCUD_lo; /* OS Compilation Unit Descriptor */ + e2k_oscud_hi_t CPU_OSCUD_hi; + e2k_osgd_lo_t CPU_OSGD_lo; /* OS CU Globals Descriptor */ + e2k_osgd_hi_t CPU_OSGD_hi; + e2k_cutd_t CPU_CUTD; /* Compilation Unit Table Register */ + e2k_cuir_t CPU_CUIR; /* Compilation Unit Index Register */ + e2k_tsd_t CPU_TSD; /* Compilation Unit Types Descriptor */ + + e2k_usd_lo_t CPU_USD_lo; /* User Stack Descriptor Register */ + e2k_usd_hi_t CPU_USD_hi; + e2k_sbr_t CPU_SBR; /* Stack Base Register */ + e2k_psp_lo_t CPU_PSP_lo; /* Procedure Stack Pointer */ + e2k_psp_hi_t CPU_PSP_hi; + e2k_pshtp_t CPU_PSHTP; /* Procedure Stack Hardware */ + /* Top Pointer */ + e2k_pcsp_lo_t CPU_PCSP_lo; /* Procedure Chain Stack Pointer */ + e2k_pcsp_hi_t CPU_PCSP_hi; + e2k_cr0_lo_t CPU_CR0_lo; /* Current Chain Register */ + e2k_cr0_hi_t CPU_CR0_hi; + e2k_cr1_lo_t CPU_CR1_lo; + e2k_cr1_hi_t CPU_CR1_hi; + e2k_pcshtp_t CPU_PCSHTP; /* Procedure Chain Stack Hardware */ + /* Top Pointer */ +#endif /* CONFIG_KVM_GUEST_KERNEL && \ + CONFIG_KVM_GUEST_HW_PV && CONFIG_KVM_HV_MMU */ + e2k_ctpr_t CPU_CTPR1; /* Control Transfer Preparation */ + e2k_ctpr_t CPU_CTPR2; /* Registers */ + e2k_ctpr_t CPU_CTPR3; + e2k_tir_t CPU_TIRs[MAX_TIRs_NUM]; /* Trap Info Registers */ + int CPU_TIRs_num; /* number of occupied TIRs */ + u64 CPU_SBBP[SBBP_ENTRIES_NUM]; + e2k_wd_t CPU_WD; /* Window Descriptor Register */ + e2k_bgr_t CPU_BGR; /* Base Global Register */ + e2k_lsr_t CPU_LSR; /* Loop Status Register */ + e2k_ilcr_t CPU_ILCR; /* Initial Loop Counters Register */ + e2k_rpr_lo_t CPU_RPR_lo; /* Recovery point register */ + e2k_rpr_hi_t CPU_RPR_hi; + e2k_cutd_t CPU_OSCUTD; /* CUTD Register of OS */ + e2k_cuir_t CPU_OSCUIR; /* CUI register of OS */ + u64 CPU_OSR0; /* OS register #0 */ + u32 CPU_OSEM; /* OS Entries Mask */ + e2k_psr_t CPU_E2K_PSR; /* Processor State Register */ + e2k_upsr_t CPU_UPSR; /* User Processor State Register */ + e2k_pfpfr_t CPU_PFPFR; /* floating point control registers */ + e2k_fpcr_t CPU_FPCR; + e2k_fpsr_t CPU_FPSR; + u64 CPU_CLKR; /* CPU current clock regigister */ + u64 CPU_SCLKR; /* CPU system clock regigister */ + u64 CPU_SCLKM1; /* CPU system clock regigister 1 */ + u64 CPU_SCLKM2; /* CPU system clock regigister 2 */ + u64 CPU_SCLKM3; /* CPU system clock regigister 3 */ + u64 CPU_CU_HW0; /* Control Unit HardWare registers 0 */ + u64 CPU_CU_HW1; /* Control Unit HardWare registers 1 */ + u64 CPU_IP; /* Instruction Pointer register */ + e2k_idr_t CPU_IDR; /* Processor Identification Register */ + e2k_core_mode_t CPU_CORE_MODE; /* Processor Core Modes Register */ + u32 CPU_DIBCR; /* diagnostic and monitors registers */ + u32 CPU_DIBSR; + u64 CPU_DIMCR; + u64 CPU_DIBAR0; + u64 CPU_DIBAR1; + u64 CPU_DIBAR2; + u64 CPU_DIBAR3; + u64 CPU_DIMAR0; + u64 CPU_DIMAR1; + u64 CPU_CS_lo; /* Intel Segments registers */ + u64 CPU_CS_hi; + u64 CPU_DS_lo; + u64 CPU_DS_hi; + u64 CPU_ES_lo; + u64 CPU_ES_hi; + u64 CPU_FS_lo; + u64 CPU_FS_hi; + u64 CPU_GS_lo; + u64 CPU_GS_hi; + u64 CPU_SS_lo; + u64 CPU_SS_hi; + /* virtual register only to support paravirtualization mode */ + u32 CPU_VCPU_ID; /* VCPU # is set by host and can not */ + /* be modified */ +} kvm_cpu_regs_t; + +/* + * CPU registers status flags + */ +/* the next flags mark updating of some VCPU registers by guest and */ +/* host should recovere physical CPU registers from the memory copy */ +#define WD_UPDATED_CPU_REGS 0x00000001UL /* register WD */ +#define USD_UPDATED_CPU_REGS 0x00000002UL /* USD/SBR */ +#define CRS_UPDATED_CPU_REGS 0x00000004UL /* CR0/CR1 */ +#define HS_REGS_UPDATED_CPU_REGS 0x00000008UL /* PSP/PCSP/PSHTP */ + /* PCSHTP */ +#define MASK_UPDATED_CPU_REGS 0x0000ffffUL /* mask of all */ + /* updating flags */ + +#define KVM_SET_CPU_REGS_FLAGS(regs_status, flags_to_add) \ + ((regs_status) | (flags_to_add)) +#define KVM_CLEAR_CPU_REGS_FLAGS(regs_status, flags_to_clear) \ + (((regs_status) | (flags_to_clear)) ^ (flags_to_clear)) +#define KVM_TEST_CPU_REGS_FLAGS(regs_status, flags_to_test) \ + (((regs_status) & (flags_to_test)) != 0) +#define KVM_TEST_CPU_REGS_STATUS(regs_status) \ + ((regs_status) != 0) +#define TO_UPDATED_CPU_REGS_FLAGS(flags) \ + ((flags) & MASK_UPDATED_CPU_REGS) +#define KVM_SET_UPDATED_CPU_REGS_FLAGS(regs_status, flags_to_add) \ + KVM_SET_CPU_REGS_FLAGS(regs_status, \ + TO_UPDATED_CPU_REGS_FLAGS(flags_to_add)) +#define KVM_CLEAR_UPDATED_CPU_REGS_FLAGS(regs_status, flags_to_clear) \ + KVM_CLEAR_CPU_REGS_FLAGS(regs_status, \ + TO_UPDATED_CPU_REGS_FLAGS(flags_to_clear)) +#define KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, flag) \ + KVM_TEST_CPU_REGS_FLAGS(regs_status, \ + TO_UPDATED_CPU_REGS_FLAGS(flag)) +#define KVM_TEST_UPDATED_CPU_REGS_FLAGS(regs_status) \ + KVM_TEST_CPU_REGS_FLAGS(regs_status, MASK_UPDATED_CPU_REGS) +#define KVM_INIT_UPDATED_CPU_REGS_FLAGS(regs_status) \ + KVM_CLEAR_CPU_REGS_FLAGS(regs_status, MASK_UPDATED_CPU_REGS) + +typedef struct kvm_cpu_state { + unsigned long regs_status; /* CPU register status flags */ + kvm_cpu_regs_t regs; /* CPU registers state */ + e2k_aau_t aau; /* AAU registers state */ + u64 aaldi[AALDIS_REGS_NUM]; /* AALDI registers state */ + e2k_aalda_t aalda[AALDAS_REGS_NUM]; /* AALDA registers state */ +} kvm_cpu_state_t; + +typedef struct kvm_mmu_state { + mmu_reg_t regs[MMU_REGS_NUM]; /* MMU registers state */ + mmu_reg_t debug_regs[MMU_DEBUG_REGS_NUM]; /* MMU DEBUG */ + /* registers state */ + trap_cellar_t tcellar[MAX_TC_SIZE]; /* trap cellar */ + int tc_count; /* number of entries in */ + /* trap cellar */ +} kvm_mmu_state_t; + +typedef struct kvm_apic_state { + u8 regs[APIC_REGS_SIZE]; + atomic_t virqs_num; /* Local APIC unhendled and in */ + /* progress VIRQs number */ +} kvm_apic_state_t; + +typedef struct kvm_epic_state { + u8 regs[EPIC_REGS_SIZE]; + atomic_t virqs_num; /* CEPIC unhandled and in */ + /* progress VIRQs number */ +} kvm_epic_state_t; + +/* + * Guest Local APIC is virtual and emulates on host. + * So it can access to any local APIC from any virtual CPUs. + * Base address of local APIC is expanded by follow flag and CPU # + * to enable such access. + */ +#define KVM_LAPIC_BASE_CPU_FLAG 0x4000000000000000UL /* [62] */ +#define KVM_LAPIC_BASE_CPU_NUM_MASK 0x0fff000000000000UL /* [59:48] */ +#define KVM_LAPIC_BASE_CPU_NUM_SHIFT 48 +#define KVM_CPU_TO_LAPIC_BASE(cpu_id) \ + ((cpu_id) << KVM_LAPIC_BASE_CPU_NUM_SHIFT) +#define KVM_CPU_FROM_LAPIC_BASE(base) \ + ((((e2k_addr_t)(base)) & KVM_LAPIC_BASE_CPU_NUM_MASK) >> \ + KVM_LAPIC_BASE_CPU_NUM_SHIFT) +#define KVM_GET_LAPIC_REG_ADDR(addr) \ + ((addr) & MAX_PA_MASK) +#define KVM_SET_LAPIC_BASE_CPU(base, cpu_id) \ + ((((e2k_addr_t)(base)) & ~(KVM_LAPIC_BASE_CPU_FLAG | \ + KVM_LAPIC_BASE_CPU_NUM_MASK)) | \ + KVM_LAPIC_BASE_CPU_FLAG | \ + KVM_CPU_TO_LAPIC_BASE(cpu_id)) + +typedef struct kvm_virqs_state { + atomic_t timer_virqs_num; /* timer VIRQs unhendled and */ + /* in progress number */ + atomic_t hvc_virqs_num; /* Hypervisor Virtual Console */ + /* VIRQs unhendled and */ + /* in progress number */ +} kvm_virqs_state_t; + +typedef struct kvm_timespec { + long tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +} kvm_timespec_t; + +typedef struct kvm_time { + kvm_timespec_t wall_time; /* boot time */ + kvm_timespec_t sys_time; /* current system time */ +} kvm_time_t; + +/* + * Host machine info + */ +typedef struct kvm_host_info { + int mach_id; /* host machine ID */ + int cpu_rev; /* host CPU revision */ + int cpu_iset; /* host CPU instruction set version */ + bool mmu_support_pt_v6; /* host MMU support new MMU Page */ + /* Tables structures V6 */ + bool is_hv; /* host is hardware virtualized */ + bool support_hw_hc; /* host support hardware hypercals */ + unsigned long features; /* KVM and hypervisor features */ + /* see details */ + kvm_time_t time; /* current host time state */ +} kvm_host_info_t; + +/* + * Information about the state and running time of a VCPU. + * Based on Xen interface include/xen/interface/vcpu.h + */ + +/* + * VCPU is not runnable, but it is not blocked. + * This is a 'catch all' state for things like hotplug and pauses by the + * system administrator (or for critical sections in the hypervisor). + * RUNSTATE_blocked dominates this state (it is the preferred state). + */ +#define RUNSTATE_offline 0 + +/* VCPU is currently running on a physical CPU. */ +#define RUNSTATE_running 1 + +/* VCPU is runnable, but not currently scheduled on any physical CPU. */ +#define RUNSTATE_runnable 2 + +/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ +#define RUNSTATE_blocked 3 + +/* VCPU execute hypercall */ +#define RUNSTATE_in_hcall 4 + +/* VCPU is exited to handle exit request by QEMU. */ +#define RUNSTATE_in_QEMU 5 + +/* VCPU is interrupted and execute trap handler. */ +#define RUNSTATE_in_trap 6 + +/* VCPU is intercepted and is handling interception */ +#define RUNSTATE_in_intercept 7 +#define RUNSTATES_num (RUNSTATE_in_intercept + 1) + +typedef struct kvm_runstate_info { + /* VCPU's current state (RUNSTATE_*). */ + /* volatile */ int state; + /* When was current state entered (system time, ns)? */ + /* volatile */ uint64_t state_entry_time; + /* + * Time spent in each RUNSTATE_* (ns). The sum of these times is + * guaranteed not to drift from system time. + */ + /* volatile */ uint64_t time[RUNSTATES_num]; +} kvm_runstate_info_t; + +typedef struct kvm_vcpu_state { + kvm_cpu_state_t cpu; /* virtual CPU state */ + kvm_mmu_state_t mmu; /* virtual MMU state */ + kvm_apic_state_t lapic; /* virtual Local APIC state */ + kvm_epic_state_t cepic; /* virtual CEPIC state */ + kvm_virqs_state_t virqs; /* virtual IRQs state */ + /* (excluding VIRQs from LAPIC) */ + kvm_host_info_t *host; /* host machine and kernel info */ + /* VCPU's current running state */ + kvm_runstate_info_t runstate; + bool debug_mode_on; + bool irqs_under_upsr; + bool do_dump_state; /* dump all stacks */ + bool do_dump_stack; /* dump only active stack */ +#ifdef VCPU_REGS_DEBUG + vcpu_regs_trace_t trace; /* VCPU state trace */ +#endif /* VCPU_REGS_DEBUG */ +} kvm_vcpu_state_t; + +#define DEBUG_MODE_ON (vcpu->arch.kmap_vcpu_state->debug_mode_on) +#define DO_DUMP_VCPU_STATE(vcpu) \ + ((vcpu)->arch.kmap_vcpu_state->do_dump_state) +#define DO_DUMP_VCPU_STACK(vcpu) \ + ((vcpu)->arch.kmap_vcpu_state->do_dump_stack) +#define DO_DUMP_VCPU(vcpu) \ + (DO_DUMP_VCPU_STATE(vcpu) || DO_DUMP_VCPU_STACK(vcpu)) +#define VCPU_IRQS_UNDER_UPSR(vcpu) \ + (vcpu->arch.kmap_vcpu_state->irqs_under_upsr) + +/* + * Basic macroses to access to VCPU state from guest + */ +#ifdef CONFIG_USE_GD_TO_VCPU_ACCESS +#define KVM_GET_VCPU_STATE_BASE(res) \ +({ \ + e2k_osgd_lo_t osgd_lo = native_read_OSGD_lo_reg(); \ + res = osgd_lo.GD_lo_base; \ + res; \ +}) + +#define E2K_LOAD_GUEST_VCPU_STATE_W(offset) E2K_LOAD_GLOBAL_W(offset) +#define E2K_LOAD_GUEST_VCPU_STATE_D(offset) E2K_LOAD_GLOBAL_D(offset) +#define E2K_STORE_GUEST_VCPU_STATE_W(offset, value) \ + E2K_STORE_GLOBAL_W(offset, value) +#define E2K_STORE_GUEST_VCPU_STATE_D(offset, value) \ + E2K_STORE_GLOBAL_D(offset, value) +#else /* ! CONFIG_USE_GD_TO_VCPU_ACCESS */ + +#define DO_GET_VCPU_STATE_BASE(vcpu_state_greg_no, res) \ + E2K_MOVE_DGREG_TO_DREG(vcpu_state_greg_no, res) +#define KVM_GET_VCPU_STATE_BASE(res) \ + DO_GET_VCPU_STATE_BASE(GUEST_VCPU_STATE_GREG, res) +#define KVM_SAVE_VCPU_STATE_BASE(res) KVM_GET_VCPU_STATE_BASE(res) + +#define DO_SET_VCPU_STATE_BASE(vcpu_state_greg_no, res) \ + E2K_MOVE_DREG_TO_DGREG(vcpu_state_greg_no, res) +#define KVM_SET_VCPU_STATE_BASE(res) \ + DO_SET_VCPU_STATE_BASE(GUEST_VCPU_STATE_GREG, res) +#define KVM_RESTORE_VCPU_STATE_BASE(res) KVM_SET_VCPU_STATE_BASE(res) + +#define E2K_LOAD_GUEST_VCPU_STATE_W(offset) \ + E2K_LOAD_GREG_BASED_W(GUEST_VCPU_STATE_GREG, offset) +#define E2K_LOAD_GUEST_VCPU_STATE_D(offset) \ + E2K_LOAD_GREG_BASED_D(GUEST_VCPU_STATE_GREG, offset) +#define E2K_STORE_GUEST_VCPU_STATE_W(offset, value) \ + E2K_STORE_GREG_BASED_W(GUEST_VCPU_STATE_GREG, offset, value) +#define E2K_STORE_GUEST_VCPU_STATE_D(offset, value) \ + E2K_STORE_GREG_BASED_D(GUEST_VCPU_STATE_GREG, offset, value) +#endif /* CONFIG_USE_GD_TO_VCPU_ACCESS */ + +#endif /* _ASM_E2K_KVM_GUEST_H */ diff --git a/arch/e2k/include/asm/kvm/guest/Kbuild b/arch/e2k/include/asm/kvm/guest/Kbuild new file mode 100644 index 000000000000..97dad2660ccc --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/Kbuild @@ -0,0 +1,4 @@ + +### e2k virtualization guest + +unifdef-y += hvc_l.h diff --git a/arch/e2k/include/asm/kvm/guest/aau_context.h b/arch/e2k/include/asm/kvm/guest/aau_context.h new file mode 100644 index 000000000000..ea0e78bcedab --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/aau_context.h @@ -0,0 +1,325 @@ +/* + * KVM AAU registers model access + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ +#define _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ + +#include +#include + +#define KVM_SAVE_AAU_MASK_REGS(aau_context, aasr) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr); \ + } else { \ + PREFIX_SAVE_AAU_MASK_REGS(KVM, kvm, aau_context, aasr); \ + } \ +}) + +#define KVM_RESTORE_AAU_MASK_REGS(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + E2K_CMD_SEPARATOR; \ + NATIVE_RESTORE_AAU_MASK_REGS(aau_context); \ + } else { \ + PREFIX_RESTORE_AAU_MASK_REGS(KVM, kvm, aau_context); \ + } \ +}) + +#define KVM_SAVE_AADS(aau_regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AADS(aau_regs); \ + } else { \ + PREFIX_SAVE_AADS(KVM, kvm, aau_regs); \ + } \ +}) + +#define KVM_RESTORE_AADS(aau_regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_RESTORE_AADS(aau_regs); \ + } else { \ + PREFIX_RESTORE_AADS(KVM, kvm, aau_regs); \ + } \ +}) + +#define KVM_SAVE_AALDIS(regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDIS(regs); \ + } else { \ + PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \ + } \ +}) +#define KVM_SAVE_AALDIS_V2(regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDIS_V2(regs); \ + } else { \ + PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \ + } \ +}) +#define KVM_SAVE_AALDIS_V5(regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDIS_V5(regs); \ + } else { \ + PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \ + } \ +}) + +#define KVM_SAVE_AALDAS(aaldas_p) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDAS(aaldas_p); \ + } else { \ + PREFIX_SAVE_AALDAS(KVM, kvm, aaldas_p); \ + } \ +}) + +#define KVM_SAVE_AAFSTR(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AAFSTR(aau_context); \ + } else { \ + PREFIX_SAVE_AAFSTR(KVM, kvm, aau_context); \ + } \ +}) + +#define KVM_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AAU_REGS_FOR_PTRACE(regs, ti); \ + } else { \ + PREFIX_SAVE_AAU_REGS_FOR_PTRACE(KVM, kvm, regs, ti); \ + } \ +}) + +#define KVM_GET_ARRAY_DESCRIPTORS(aau_context) \ + PREFIX_GET_ARRAY_DESCRIPTORS_V5(KVM, kvm, aau_context) +#define KVM_GET_ARRAY_DESCRIPTORS_V2(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context); \ + } else { \ + KVM_GET_ARRAY_DESCRIPTORS(aau_context); \ + } \ +}) +#define KVM_GET_ARRAY_DESCRIPTORS_V5(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V5(aau_context); \ + } else { \ + KVM_GET_ARRAY_DESCRIPTORS(aau_context); \ + } \ +}) +#define KVM_SET_ARRAY_DESCRIPTORS(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SET_ARRAY_DESCRIPTORS(context); \ + } else { \ + PREFIX_SET_ARRAY_DESCRIPTORS(KVM, kvm, context); \ + } \ +}) + +#define KVM_GET_SYNCHRONOUS_PART(context) \ + PREFIX_GET_SYNCHRONOUS_PART_V5(KVM, kvm, context) +#define KVM_GET_SYNCHRONOUS_PART_V2(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V2(context); \ + } else { \ + KVM_GET_SYNCHRONOUS_PART(context); \ + } \ +}) +#define KVM_GET_SYNCHRONOUS_PART_V5(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V5(context); \ + } else { \ + KVM_GET_SYNCHRONOUS_PART(context); \ + } \ +}) + +#define KVM_GET_AAU_CONTEXT(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_AAU_CONTEXT(context); \ + } else { \ + PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \ + } \ +}) +#define KVM_GET_AAU_CONTEXT_V2(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_AAU_CONTEXT_V2(context); \ + } else { \ + PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \ + } \ +}) +#define KVM_GET_AAU_CONTEXT_V5(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_AAU_CONTEXT_V5(context); \ + } else { \ + PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \ + } \ +}) + +static inline void +kvm_save_aaldi(u64 *aaldis) +{ + KVM_SAVE_AALDIS(aaldis); +} +static inline void +kvm_save_aaldi_v2(u64 *aaldis) +{ + KVM_SAVE_AALDIS_V2(aaldis); +} +static inline void +kvm_save_aaldi_v5(u64 *aaldis) +{ + KVM_SAVE_AALDIS_V5(aaldis); +} + +static inline void +kvm_get_array_descriptors(e2k_aau_t *context) +{ + KVM_GET_ARRAY_DESCRIPTORS(context); +} +static inline void +kvm_get_array_descriptors_v2(e2k_aau_t *context) +{ + KVM_GET_ARRAY_DESCRIPTORS_V2(context); +} +static inline void +kvm_get_array_descriptors_v5(e2k_aau_t *context) +{ + KVM_GET_ARRAY_DESCRIPTORS_V5(context); +} + +static inline void +kvm_set_array_descriptors(const e2k_aau_t *context) +{ + KVM_SET_ARRAY_DESCRIPTORS(context); +} + +static inline void +kvm_get_synchronous_part(e2k_aau_t *context) +{ + KVM_GET_SYNCHRONOUS_PART(context); +} +static inline void +kvm_get_synchronous_part_v2(e2k_aau_t *context) +{ + KVM_GET_SYNCHRONOUS_PART_V2(context); +} +static inline void +kvm_get_synchronous_part_v5(e2k_aau_t *context) +{ + KVM_GET_SYNCHRONOUS_PART_V5(context); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +static inline void +kvm_get_aau_context(e2k_aau_t *context) +{ + KVM_GET_AAU_CONTEXT(context); +} +static inline void +kvm_get_aau_context_v2(e2k_aau_t *context) +{ + KVM_GET_AAU_CONTEXT_V2(context); +} +static inline void +kvm_get_aau_context_v5(e2k_aau_t *context) +{ + KVM_GET_AAU_CONTEXT_V5(context); +} + +static __always_inline void +kvm_set_aau_context(e2k_aau_t *context) +{ + /* AAU contesxt should restore host */ +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure kvm kernel without paravirtualization */ + +#define SAVE_AAU_MASK_REGS(aau_context, aasr) \ + KVM_SAVE_AAU_MASK_REGS(aau_context, aasr) + +#define RESTORE_AAU_MASK_REGS(aau_context) \ + KVM_RESTORE_AAU_MASK_REGS(aau_context) + +#define SAVE_AADS(aau_regs) \ + KVM_SAVE_AADS(aau_regs) + +#define RESTORE_AADS(aau_regs) \ + KVM_RESTORE_AADS(aau_regs) + +#define SAVE_AALDIS_V2(regs) KVM_SAVE_AALDIS_V2(regs) +#define SAVE_AALDIS_V5(regs) KVM_SAVE_AALDIS_V5(regs) + +#define SAVE_AALDA(aaldas) KVM_SAVE_AALDAS(aaldas) + +#define SAVE_AAFSTR(regs) KVM_SAVE_AAFSTR_REG(regs) + +#define SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ + KVM_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) + +#define GET_ARRAY_DESCRIPTORS_V2(context) \ + KVM_GET_ARRAY_DESCRIPTORS_V2(context) +#define GET_ARRAY_DESCRIPTORS_V5(context) \ + KVM_GET_ARRAY_DESCRIPTORS_V5(context) + +#define GET_SYNCHRONOUS_PART_V2(context) \ + KVM_GET_SYNCHRONOUS_PART_V2(context) +#define GET_SYNCHRONOUS_PART_V5(context) \ + KVM_GET_SYNCHRONOUS_PART_V5(context) + +#define GET_AAU_CONTEXT_V2(context) KVM_GET_AAU_CONTEXT_V2(context) +#define GET_AAU_CONTEXT_V5(context) KVM_GET_AAU_CONTEXT_V5(context) + +static inline void +save_aaldi(u64 *aaldis) +{ + kvm_save_aaldi(aaldis); +} +static inline void +set_array_descriptors(e2k_aau_t *context) +{ + kvm_set_array_descriptors(context); +} +static inline void +get_aau_context(e2k_aau_t *context) +{ + kvm_get_aau_context(context); +} + +static __always_inline void +set_aau_context(e2k_aau_t *context) +{ + kvm_set_aau_context(context); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/area_alloc.h b/arch/e2k/include/asm/kvm/guest/area_alloc.h new file mode 100644 index 000000000000..8a6d26099bfc --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/area_alloc.h @@ -0,0 +1,13 @@ +#ifndef __ASM_KVM_GUEST_AREA_ALLOC_H +#define __ASM_KVM_GUEST_AREA_ALLOC_H + +#ifdef __KERNEL__ + +#include + +#ifdef CONFIG_KVM_GUEST_KERNEL +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_AREA_ALLOC_H */ diff --git a/arch/e2k/include/asm/kvm/guest/atomic_api.h b/arch/e2k/include/asm/kvm/guest/atomic_api.h new file mode 100644 index 000000000000..aa49334f6bff --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/atomic_api.h @@ -0,0 +1,31 @@ +#ifndef _ASM_E2K_KVM_GUEST_ATOMIC_API_H_ +#define _ASM_E2K_KVM_GUEST_ATOMIC_API_H_ + +#include +#include + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +/* FIXME: here it is not implemented hardware bugs workarounds, */ +/* because of such workarounds contain privileged actions and */ +/* can be done only on host using appropriate hypercalls */ + +#define KVM_HWBUG_AFTER_LD_ACQ() NATIVE_HWBUG_AFTER_LD_ACQ() + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not virtualized based on pv_ops */ + +/* Guest virtual machine should examine host machine bugs too, but now */ +/* it is not implemented */ +#define virt_cpu_has(hwbug) false + +#define VIRT_HWBUG_AFTER_LD_ACQ() KVM_HWBUG_AFTER_LD_ACQ() +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_E2K_KVM_GUEST_ATOMIC_API_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot.h b/arch/e2k/include/asm/kvm/guest/boot.h new file mode 100644 index 000000000000..8bc08e429a0b --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot.h @@ -0,0 +1,152 @@ +/* + * E2K boot-time initializtion virtualization for KVM guest + * + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_BOOT_H_ +#define _E2K_KVM_GUEST_BOOT_H_ + +#ifndef __ASSEMBLY__ +#include + +#include +#include +#include + +extern void boot_kvm_setup_machine_id(bootblock_struct_t *bootblock); +extern int __init boot_kvm_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock); +extern e2k_size_t __init boot_kvm_get_bootblock_size(boot_info_t *bblock); + +extern void boot_kvm_panic(const char *fmt_v, ...); +extern void __init_recv boot_kvm_cpu_relax(void); + +#ifdef CONFIG_SMP +/* + * redefine for guest: number of loops in each iteration of waiting for + * synchronization completion + */ +#define BOOT_WAITING_FOR_SYNC_LOOPS (NR_CPUS * 160) + +extern int __init_recv boot_kvm_smp_cpu_config(boot_info_t *bootblock); +extern void __init_recv boot_kvm_smp_node_config(boot_info_t *bootblock); +#endif /* CONFIG_SMP */ + +extern void __init boot_kvm_reserve_all_bootmem(bool bsp, + boot_info_t *boot_info); +extern void __init boot_kvm_map_all_bootmem(bool bsp, boot_info_t *boot_info); +extern void __init_recv boot_kvm_map_needful_to_equal_virt_area( + e2k_addr_t stack_top_addr); +extern void __init_recv boot_kvm_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, + int cpuid, int cpus)); +extern void __init init_kvm_terminate_boot_init(bool bsp, int cpuid); +extern void __init boot_kvm_parse_param(bootblock_struct_t *bootblock); +extern void __init boot_kvm_clear_bss(void); +extern void __init boot_kvm_check_bootblock(bool bsp, + bootblock_struct_t *bootblock); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +static inline void +boot_setup_machine_id(bootblock_struct_t *bootblock) +{ + boot_kvm_setup_machine_id(bootblock); +} +static inline int __init +boot_loader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + return boot_kvm_probe_memory(nodes_phys_mem, bootblock); +} + +static inline e2k_size_t __init +boot_get_bootblock_size(boot_info_t *bootblock) +{ + return boot_kvm_get_bootblock_size(bootblock); +} + +#define boot_panic(fmt, args...) boot_kvm_panic(fmt, ##args) + +static inline void +boot_cpu_relax(void) +{ + boot_kvm_cpu_relax(); +} + +#ifdef CONFIG_SMP +static inline e2k_size_t __init +boot_smp_cpu_config(boot_info_t *bootblock) +{ + return boot_kvm_smp_cpu_config(bootblock); +} + +static inline void __init +boot_smp_node_config(boot_info_t *bootblock) +{ + boot_kvm_smp_node_config(bootblock); +} +#endif /* CONFIG_SMP */ + +static inline void __init +boot_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_kvm_reserve_all_bootmem(bsp, boot_info); +} + +static inline void __init +boot_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_kvm_map_all_bootmem(bsp, boot_info); +} + +static inline void __init_recv +boot_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + boot_kvm_map_needful_to_equal_virt_area(stack_top_addr); +} + +static inline void __init_recv +boot_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + boot_kvm_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +static inline void __init +init_terminate_boot_init(bool bsp, int cpuid) +{ + init_kvm_terminate_boot_init(bsp, cpuid); +} + +static inline void __init +boot_parse_param(bootblock_struct_t *bootblock) +{ + boot_kvm_parse_param(bootblock); +} + +static inline void __init +boot_clear_bss(void) +{ + boot_kvm_clear_bss(); +} +static inline void __init +boot_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + boot_kvm_check_bootblock(bsp, bootblock); +} + +/* pv_ops does not used in native host/guest mode */ +static inline void native_pv_ops_to_boot_ops(void) +{ +} +static inline void native_boot_pv_ops_to_ops(void) +{ +} + +#endif /* CONFIG_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_BOOT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot_flags.h b/arch/e2k/include/asm/kvm/guest/boot_flags.h new file mode 100644 index 000000000000..78c048d628d6 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot_flags.h @@ -0,0 +1,43 @@ +/* + * E2K boot info flags support on KVM guest + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_BOOT_FLAGS_H_ +#define _E2K_KVM_GUEST_BOOT_FLAGS_H_ + +#ifndef __ASSEMBLY__ +#include + +#include +#include + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on virtual "physical" address + * bootblock virtual address can be only read + */ + +#define GUEST_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + MAS_BYPASS_ALL_CACHES) + +#define GUEST_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value, \ + MAS_BYPASS_ALL_CACHES) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + GUEST_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + GUEST_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_BOOT_FLAGS_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot_mmu_context.h b/arch/e2k/include/asm/kvm/guest/boot_mmu_context.h new file mode 100644 index 000000000000..5a77e8375866 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot_mmu_context.h @@ -0,0 +1,36 @@ +/* + * guest boot-time mmu_context.h support + */ + +#ifndef _E2K_KVM_GUEST_BOOT_MMU_CONTEXT_H_ +#define _E2K_KVM_GUEST_BOOT_MMU_CONTEXT_H_ + +#include + +#include +#include +#include +#include + +/* + * Set guest kernel MMU state + */ + +extern void boot_kvm_set_kernel_MMU_state_before(void); +extern void boot_kvm_set_kernel_MMU_state_after(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +static inline void boot_set_kernel_MMU_state_before(void) +{ + boot_kvm_set_kernel_MMU_state_before(); +} + +static inline void boot_set_kernel_MMU_state_after(void) +{ + boot_kvm_set_kernel_MMU_state_after(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_BOOT_MMU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot_spinlock.h b/arch/e2k/include/asm/kvm/guest/boot_spinlock.h new file mode 100644 index 000000000000..2a8172571e1b --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot_spinlock.h @@ -0,0 +1,38 @@ +#ifndef __ASM_KVM_GUEST_BOOT_SPINLOCK_H +#define __ASM_KVM_GUEST_BOOT_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * boot-time spin_lock()/spin_unlock() fast and slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include + +extern void kvm_arch_boot_spin_lock_slow(void *lock); +extern void kvm_arch_boot_spin_locked_slow(void *lock); +extern void kvm_arch_boot_spin_unlock_slow(void *lock); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ + +#define arch_spin_relax(lock) kvm_cpu_relax() +#define arch_read_relax(lock) kvm_cpu_relax() +#define arch_write_relax(lock) kvm_cpu_relax() + +static inline void boot_arch_spin_lock_slow(boot_spinlock_t *lock) +{ + kvm_arch_boot_spin_lock_slow(lock); +} +static inline void boot_arch_spin_locked_slow(boot_spinlock_t *lock) +{ + kvm_arch_boot_spin_locked_slow(lock); +} +static inline void boot_arch_spin_unlock_slow(boot_spinlock_t *lock) +{ + kvm_arch_boot_spin_unlock_slow(lock); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_KVM_GUEST_BOOT_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/guest/bootinfo.h b/arch/e2k/include/asm/kvm/guest/bootinfo.h new file mode 100644 index 000000000000..0d46491dc612 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/bootinfo.h @@ -0,0 +1,44 @@ +/* + * E2K boot-time initializtion virtualization for KVM guest + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_BOOTINFO_H_ +#define _E2K_KVM_GUEST_BOOTINFO_H_ + +#ifndef __ASSEMBLY__ + +#include + +#include + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on virtual "physical" address + * bootblock virtual address can be only read + */ + +#define KVM_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + MAS_BYPASS_ALL_CACHES) + +#define KVM_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value, \ + MAS_BYPASS_ALL_CACHES) + +#ifdef CONFIG_KVM_GUEST_KERNEL + +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + KVM_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + KVM_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) + +#endif /* CONFIG_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_BOOTINFO_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/cacheflush.h b/arch/e2k/include/asm/kvm/guest/cacheflush.h new file mode 100644 index 000000000000..12e80ef0a24f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/cacheflush.h @@ -0,0 +1,115 @@ +#ifndef __ASM_KVM_GUEST_CACHEFLUSH_H +#define __ASM_KVM_GUEST_CACHEFLUSH_H + +#include + +#ifndef __ASSEMBLY__ + +struct icache_range_array; +struct vm_area_struct; +struct page; + +#ifdef CONFIG_SMP +/* + * Guest kernel functions can be run on any guest user processes and can have + * arbitrary MMU contexts to track which on host is not possible, therefore + * it is necessary to flush all instruction caches + */ +extern void smp_flush_icache_all(void); + +static inline void +kvm_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + smp_flush_icache_all(); +} +static inline void +kvm_smp_flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ + smp_flush_icache_all(); +} +static inline void +kvm_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + smp_flush_icache_all(); +} +static inline void +kvm_smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + smp_flush_icache_all(); +} +#endif /* CONFIG_SMP */ + +extern void kvm_flush_dcache_line(e2k_addr_t virt_addr); +extern void kvm_clear_dcache_l1_set(e2k_addr_t virt_addr, unsigned long set); +extern void kvm_flush_dcache_range(void *addr, size_t len); +extern void kvm_clear_dcache_l1_range(void *virt_addr, size_t len); +extern void kvm_write_dcache_l2_reg(unsigned long reg_val, + int reg_num, int bank_num); +extern unsigned long kvm_read_dcache_l2_reg(int reg_num, int bank_num); +extern void kvm_flush_icache_all(void); +extern void kvm_flush_icache_range(e2k_addr_t start, e2k_addr_t end); +extern void kvm_flush_icache_range_array( + struct icache_range_array *icache_range_arr); +extern void kvm_flush_icache_page(struct vm_area_struct *vma, + struct page *page); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#ifdef CONFIG_SMP +static inline void +smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + kvm_smp_flush_icache_range(start, end); +} +static inline void +smp_flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ + kvm_smp_flush_icache_range_array(icache_range_arr); +} +static inline void +smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + kvm_smp_flush_icache_page(vma, page); +} +static inline void +smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + kvm_smp_flush_icache_kernel_line(addr); +} +#endif /* CONFIG_SMP */ + +static inline void +flush_DCACHE_range(void *addr, size_t len) +{ + kvm_flush_dcache_range(addr, len); +} +static inline void +clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + kvm_clear_dcache_l1_range(virt_addr, len); +} +static inline void +__flush_icache_all(void) +{ + kvm_flush_icache_all(); +} +static inline void +__flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + kvm_flush_icache_range(start, end); +} +static inline void +__flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ + kvm_flush_icache_range_array(icache_range_arr); +} +static inline void +__flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + kvm_flush_icache_page(vma, page); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_KVM_GUEST_CACHEFLUSH_H */ diff --git a/arch/e2k/include/asm/kvm/guest/clkr.h b/arch/e2k/include/asm/kvm/guest/clkr.h new file mode 100644 index 000000000000..87e2ae45c6b2 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/clkr.h @@ -0,0 +1,17 @@ +#ifndef _ASM_E2K_KVM_GUEST_CLKR_H +#define _ASM_E2K_KVM_GUEST_CLKR_H + +#include +#include + +extern unsigned long long kvm_sched_clock(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline unsigned long long do_sched_clock(void) +{ + return kvm_sched_clock(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_E2K_KVM_GUEST_CLKR_H */ diff --git a/arch/e2k/include/asm/kvm/guest/console.h b/arch/e2k/include/asm/kvm/guest/console.h new file mode 100644 index 000000000000..4da88dd3ca93 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/console.h @@ -0,0 +1,33 @@ + +#ifndef _ASM_E2K_KVM_GUEST_CONSOLE_H_ +#define _ASM_E2K_KVM_GUEST_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include +#include + +static inline void +kvm_virt_console_dump_putc(char c) +{ +#if defined(CONFIG_HVC_L) && defined(CONFIG_EARLY_VIRTIO_CONSOLE) + if (early_virtio_cons_enabled) + hvc_l_raw_putc(c); +#endif /* CONFIG_HVC_L && CONFIG_EARLY_VIRTIO_CONSOLE */ +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +virt_console_dump_putc(char c) +{ + kvm_virt_console_dump_putc(c); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_E2K_KVM_GUEST_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/copy-hw-stacks.h b/arch/e2k/include/asm/kvm/guest/copy-hw-stacks.h new file mode 100644 index 000000000000..e616bcf5f099 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/copy-hw-stacks.h @@ -0,0 +1,568 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_COPY_HW_STACKS_H +#define _E2K_KVM_GUEST_COPY_HW_STACKS_H + +#include +#include +#include + +#include + +extern bool debug_ustacks; +#undef DEBUG_USER_STACKS_MODE +#undef DebugUST +#define DEBUG_USER_STACKS_MODE 0 /* guest user stacks debug mode */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_ustacks) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static inline void +kvm_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + fast_tagged_memory_copy(dst, src, size, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, true); +} + +static __always_inline void +kvm_collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_psp_hi_t k_psp_hi; + u64 ps_ind, ps_size; + u64 size; + + DebugUST("current host procedure stack index 0x%x, PSHTP 0x%x\n", + NATIVE_NV_READ_PSP_HI_REG().PSP_hi_ind, + NATIVE_NV_READ_PSHTP_REG().PSHTP_ind); + + KVM_COPY_STACKS_TO_MEMORY(); + ATOMIC_GET_HW_PS_SIZES(ps_ind, ps_size); + + size = ps_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PSTACK_TOP_SIZE) || (s64) size < 0); + + kvm_kernel_hw_stack_frames_copy(dst, src, size); + + k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + k_psp_hi.PSP_hi_ind = size; + HYPERVISOR_update_psp_hi(k_psp_hi.PSP_hi_half); + + DebugUST("move spilled procedure part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel procedure stack index is now 0x%x, " + "guest user PSHTP 0x%llx\n", + k_psp_hi.PSP_hi_ind, spilled_size); +} + +static __always_inline void +kvm_collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_pcsp_hi_t k_pcsp_hi; + u64 pcs_ind, pcs_size; + u64 size; + + DebugUST("current host chain stack index 0x%x, PCSHTP 0x%llx\n", + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_ind, + NATIVE_READ_PCSHTP_REG_SVALUE()); + + KVM_COPY_STACKS_TO_MEMORY(); + ATOMIC_GET_HW_PCS_SIZES(pcs_ind, pcs_size); + + size = pcs_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PCSTACK_TOP_SIZE) || (s64) size < 0); + + kvm_kernel_hw_stack_frames_copy(dst, src, size); + + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + k_pcsp_hi.PCSP_hi_ind = size; + HYPERVISOR_update_pcsp_hi(k_pcsp_hi.PCSP_hi_half); + + DebugUST("move spilled chain part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel chain stack index is now 0x%x, " + "guest user PCSHTP 0x%llx\n", + k_pcsp_hi.PCSP_hi_ind, spilled_size); +} + +static __always_inline int +copy_stack_page_from_kernel(void __user *dst, void *src, e2k_size_t to_copy, + bool is_chain) +{ + int ret; + + ret = HYPERVISOR_copy_hw_stacks_frames(dst, src, to_copy, is_chain); + return ret; +} + +static __always_inline int +copy_stack_page_to_user(void __user *dst, void *src, e2k_size_t to_copy, + bool is_chain) +{ + struct page *page = NULL; + unsigned long addr = (unsigned long)dst; + void *k_dst; + e2k_size_t offset; + mm_segment_t seg; + unsigned long ts_flag; + int npages; + int ret; + + if (to_copy == 0) + return 0; + + DebugUST("started to copy %s stack from kernel stack %px to user %px " + "size 0x%lx\n", + (is_chain) ? "chain" : "procedure", + src, dst, to_copy); + seg = get_fs(); + set_fs(K_USER_DS); + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + do { + npages = __get_user_pages_fast(addr, 1, 1, &page); + if (npages == 1) + break; + npages = get_user_pages_unlocked(addr, 1, &page, FOLL_WRITE); + if (npages == 1) + break; + clear_ts_flag(ts_flag); + set_fs(seg); + ret = -EFAULT; + goto failed; + } while (npages != 1); + clear_ts_flag(ts_flag); + set_fs(seg); + + offset = addr & ~PAGE_MASK; + k_dst = page_address(page) + offset; + DebugUST("copy stack frames from kernel %px to user %px, size 0x%lx\n", + src, k_dst, to_copy); + ret = copy_stack_page_from_kernel(k_dst, src, to_copy, is_chain); + if (ret != 0) { + pr_err("%s(): copy %s stack to user %px from kernel %px, " + "size 0x%lx failed, error %d\n", + __func__, (is_chain) ? "chain" : "procedure", + src, k_dst, to_copy, ret); + goto failed_copy; + } + +failed_copy: + put_page(page); +failed: + return ret; +} + +static __always_inline int +kvm_copy_user_stack_from_kernel(void __user *dst, void *src, + e2k_size_t to_copy, bool is_chain) +{ + e2k_size_t offset, len, copied = 0; + int ret; + + if (to_copy == 0) + return 0; + + DebugUST("started to copy %s stack from kernel stack %px to user %px " + "size 0x%lx\n", + (is_chain) ? "chain" : "procedure", + src, dst, to_copy); + + if (trace_guest_copy_hw_stack_enabled()) + trace_guest_copy_hw_stack(dst, src, to_copy, is_chain); + + do { + offset = (unsigned long)dst & ~PAGE_MASK; + len = min(to_copy, PAGE_SIZE - offset); + ret = copy_stack_page_to_user(dst, src, len, is_chain); + if (ret != 0) + goto failed; + dst += len; + src += len; + to_copy -= len; + copied += len; + } while (to_copy > 0); + + if (!is_chain && trace_guest_proc_stack_frame_enabled()) { + if (trace_guest_va_tlb_state_enabled()) { + trace_guest_va_tlb_state((e2k_addr_t)dst); + } + trace_proc_stack_frames((kernel_mem_ps_t *)(src - copied), + (kernel_mem_ps_t *)(src - copied), copied, + trace_guest_proc_stack_frame); + trace_proc_stack_frames((kernel_mem_ps_t *)(dst - copied), + (kernel_mem_ps_t *)(dst - copied), copied, + trace_guest_proc_stack_frame); + } + if (is_chain && trace_guest_chain_stack_frame_enabled()) { + if (trace_guest_va_tlb_state_enabled()) { + trace_guest_va_tlb_state((e2k_addr_t)dst); + } + trace_chain_stack_frames((e2k_mem_crs_t *)(src - copied), + (e2k_mem_crs_t *)(src - copied), copied, + trace_guest_chain_stack_frame); + trace_chain_stack_frames((e2k_mem_crs_t *)(dst - copied), + (e2k_mem_crs_t *)(dst - copied), copied, + trace_guest_chain_stack_frame); + } + + return 0; + +failed: + pr_err("%s(): failed, error %d\n", __func__, ret); + return ret; +} + +static __always_inline int +kvm_user_hw_stacks_copy(pt_regs_t *regs) +{ + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; + e2k_stacks_t *stacks; + void __user *dst; + void *src; + long copyed_ps_size, copyed_pcs_size, to_copy, from, there_are; + int ret; + + if (unlikely(irqs_disabled())) { + pr_err("%s() called with IRQs disabled PSP: 0x%lx UPSR: 0x%lx " + "under UPSR %d\n", + __func__, KVM_READ_PSR_REG_VALUE(), + KVM_READ_UPSR_REG_VALUE(), + kvm_get_vcpu_state()->irqs_under_upsr); + local_irq_enable(); + WARN_ON(true); + } + + stacks = ®s->stacks; + copyed_ps_size = regs->copyed.ps_size; + copyed_pcs_size = regs->copyed.pcs_size; + if (unlikely(copyed_ps_size || copyed_pcs_size)) { + /* stacks have been already copyed */ + BUG_ON(copyed_ps_size != GET_PSHTP_MEM_INDEX(stacks->pshtp) && + GET_PSHTP_MEM_INDEX(stacks->pshtp) != 0); + BUG_ON(copyed_pcs_size != PCSHTP_SIGN_EXTEND(stacks->pcshtp) && + PCSHTP_SIGN_EXTEND(stacks->pcshtp) != SZ_OF_CR); + return 0; + } + + ret = HYPERVISOR_copy_stacks_to_memory(); + if (ret != 0) { + pr_err("%s(): flush of kernel stacks failed, error %d\n", + __func__, ret); + goto failed; + } + + /* copy user part of procedure stack from kernel back to user */ + ATOMIC_READ_HW_STACKS_REGS(psp_lo.PSP_lo_half, psp_hi.PSP_hi_half, + pshtp.PSHTP_reg, + pcsp_lo.PCSP_lo_half, pcsp_hi.PCSP_hi_half, + pcshtp); + src = (void *)psp_lo.PSP_lo_base; + DebugUST("procedure stack at kernel from %px, size 0x%x, ind 0x%x, " + "pshtp 0x%llx\n", + src, psp_hi.PSP_hi_size, psp_hi.PSP_hi_ind, pshtp.PSHTP_reg); + BUG_ON(psp_hi.PSP_hi_ind > psp_hi.PSP_hi_size); + + if (stacks->psp_hi.PSP_hi_ind >= stacks->psp_hi.PSP_hi_size) { + /* procedure stack overflow, need expand */ + ret = handle_proc_stack_bounds(stacks, regs->trap); + if (unlikely(ret)) { + pr_err("%s(): could not handle process %s (%d) " + "procedure stack overflow, error %d\n", + __func__, current->comm, current->pid, ret); + goto failed; + } + } + to_copy = GET_PSHTP_MEM_INDEX(stacks->pshtp); + BUG_ON(to_copy < 0); + from = stacks->psp_hi.PSP_hi_ind - to_copy; + BUG_ON(from < 0); + dst = (void __user *)stacks->psp_lo.PSP_lo_base + from; + DebugUST("procedure stack at user from %px, ind 0x%x, " + "pshtp size to copy 0x%lx\n", + dst, stacks->psp_hi.PSP_hi_ind, to_copy); + there_are = stacks->psp_hi.PSP_hi_size - from; + if (there_are < to_copy) { + pr_err("%s(): user procedure stack overflow, there are 0x%lx " + "to copy need 0x%lx, not yet implemented\n", + __func__, there_are, to_copy); + BUG_ON(true); + } + if (to_copy > 0) { + ret = kvm_copy_user_stack_from_kernel(dst, src, to_copy, false); + if (ret != 0) { + pr_err("%s(): procedure stack copying from kernel %px " + "to user %px, size 0x%lx failed, error %d\n", + __func__, src, dst, to_copy, ret); + goto failed; + } + regs->copyed.ps_size = to_copy; + } + + /* copy user part of chain stack from kernel back to user */ + src = (void *)pcsp_lo.PCSP_lo_base; + DebugUST("chain stack at kernel from %px, size 0x%x, ind 0x%x, " + "pcshtp 0x%x\n", + src, pcsp_hi.PCSP_hi_size, pcsp_hi.PCSP_hi_ind, pcshtp); + BUG_ON(pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp) > + pcsp_hi.PCSP_hi_size); + if (stacks->pcsp_hi.PCSP_hi_ind >= stacks->pcsp_hi.PCSP_hi_size) { + /* chain stack overflow, need expand */ + ret = handle_chain_stack_bounds(stacks, regs->trap); + if (unlikely(ret)) { + pr_err("%s(): could not handle process %s (%d) " + "chain stack overflow, error %d\n", + __func__, current->comm, current->pid, ret); + goto failed; + } + } + to_copy = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + BUG_ON(to_copy < 0); + from = stacks->pcsp_hi.PCSP_hi_ind - to_copy; + BUG_ON(from < 0); + dst = (void *)stacks->pcsp_lo.PCSP_lo_base + from; + BUG_ON(to_copy > pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp)); + DebugUST("chain stack at user from %px, ind 0x%x, " + "pcshtp size to copy 0x%lx\n", + dst, stacks->pcsp_hi.PCSP_hi_ind, to_copy); + there_are = stacks->pcsp_hi.PCSP_hi_size - from; + if (there_are < to_copy) { + pr_err("%s(): user chain stack overflow, there are 0x%lx " + "to copy need 0x%lx, not yet implemented\n", + __func__, there_are, to_copy); + BUG_ON(true); + } + if (to_copy > 0) { + ret = kvm_copy_user_stack_from_kernel(dst, src, to_copy, true); + if (ret != 0) { + pr_err("%s(): chain stack copying from kernel %px " + "to user %px, size 0x%lx failed, error %d\n", + __func__, src, dst, to_copy, ret); + goto failed; + } + regs->copyed.pcs_size = to_copy; + } + +failed: + if (DEBUG_USER_STACKS_MODE) + debug_ustacks = false; + return ret; +} + +/* + * Copy additional frames injected to the guest kernel stack, but these frames + * are for guest user stack and should be copyed from kernel back to the top + * of user. + */ +static __always_inline int +kvm_copy_injected_pcs_frames_to_user(pt_regs_t *regs, int frames_num) +{ + e2k_size_t pcs_ind, pcs_size; + e2k_addr_t pcs_base; + int pcsh_top; + e2k_stacks_t *stacks; + void __user *dst; + void *src; + long copyed_frames_size, to_copy, from, there_are, frames_size; + int ret; + + BUG_ON(irqs_disabled()); + + frames_size = frames_num * SZ_OF_CR; + copyed_frames_size = regs->copyed.pcs_injected_frames_size; + if (unlikely(copyed_frames_size >= frames_size)) { + /* all frames have been already copyed */ + return 0; + } else { + /* copyed only part of frames - not implemented case */ + BUG_ON(copyed_frames_size != 0); + } + + stacks = ®s->stacks; + ATOMIC_GET_HW_PCS_SIZES_BASE_TOP(pcs_ind, pcs_size, pcs_base, pcsh_top); + + /* guest user stacks part spilled to kernel should be already copyed */ + BUG_ON(PCSHTP_SIGN_EXTEND(regs->copyed.pcs_size != stacks->pcshtp)); + + src = (void *)(pcs_base + regs->copyed.pcs_size); + DebugUST("chain stack at kernel from %px, size 0x%lx + 0x%lx, " + "ind 0x%lx, pcsh top 0x%x\n", + src, pcs_size, frames_size, pcs_ind, pcsh_top); + BUG_ON(regs->copyed.pcs_size + frames_size > pcs_ind + pcsh_top); + if (stacks->pcsp_hi.PCSP_hi_ind + frames_size > + stacks->pcsp_hi.PCSP_hi_size) { + /* user chain stack can overflow, need expand */ + ret = handle_chain_stack_bounds(stacks, regs->trap); + if (unlikely(ret)) { + pr_err("%s(): could not handle process %s (%d) " + "chain stack overflow, error %d\n", + __func__, current->comm, current->pid, ret); + goto failed; + } + } + to_copy = frames_size; + BUG_ON(to_copy < 0); + from = stacks->pcsp_hi.PCSP_hi_ind; + BUG_ON(from < regs->copyed.pcs_size); + dst = (void *)stacks->pcsp_lo.PCSP_lo_base + from; + DebugUST("chain stack at user from %px, ind 0x%x, " + "frames size to copy 0x%lx\n", + dst, stacks->pcsp_hi.PCSP_hi_ind, to_copy); + there_are = stacks->pcsp_hi.PCSP_hi_size - from; + if (there_are < to_copy) { + pr_err("%s(): user chain stack overflow, there are 0x%lx " + "to copy need 0x%lx, not yet implemented\n", + __func__, there_are, to_copy); + BUG_ON(true); + } + if (likely(to_copy > 0)) { + ret = kvm_copy_user_stack_from_kernel(dst, src, to_copy, true); + if (ret != 0) { + pr_err("%s(): chain stack copying from kernel %px " + "to user %px, size 0x%lx failed, error %d\n", + __func__, src, dst, to_copy, ret); + goto failed; + } + regs->copyed.pcs_injected_frames_size = to_copy; + /* increment chain stack pointer */ + stacks->pcsp_hi.PCSP_hi_ind += to_copy; + } else { + BUG_ON(true); + ret = 0; + } + +failed: + if (DEBUG_USER_STACKS_MODE) + debug_ustacks = false; + return ret; +} + +/** + * user_hw_stacks_prepare - prepare user hardware stacks that have been + * SPILLed to kernel back to user space + * @stacks - saved user stack registers + * @cur_window_q - size of current window in procedure stack, + * needed only if @copy_full is not set + * @syscall - true if called upon direct system call exit (no signal handlers) + * + * This does two things: + * + * 1) It is possible that upon kernel entry pcshtp == 0 in some cases: + * - user signal handler had pcshtp==0x20 before return to sigreturn() + * - user context had pcshtp==0x20 before return to makecontext_trampoline() + * - chain stack underflow happened + * So it is possible in sigreturn() and traps, but not in system calls. + * If we are using the trick with return to FILL user hardware stacks than + * we must have frame in chain stack to return to. So in this case kernel's + * chain stack is moved up by one frame (0x20 bytes). + * We also fill the new frame with actual user data and update stacks->pcshtp, + * this is needed to keep the coherent state where saved stacks->pcshtp values + * shows how much data from user space has been spilled to kernel space. + * + * 2) It is not possible to always FILL all of user data that have been + * SPILLed to kernel stacks. So we manually copy the leftovers that can + * not be FILLed to user space. + * This copy does not update stacks->pshtp and stacks->pcshtp. Main reason + * is signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space has been spilled to kernel space. + */ +static __always_inline int kvm_user_hw_stacks_prepare( + struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + e2k_pcshtp_t u_pcshtp = stacks->pcshtp; + int ret; + + BUG_ON(!kvm_trap_user_mode(regs)); + + BUG_ON(from & FROM_PV_VCPU_MODE); + + /* + * 1) Make sure there is free space in kernel chain stack to return to + */ + if (!syscall && u_pcshtp == 0) { + DebugUST("%s(): PCSHTP is empty\n", __func__); + } + + /* + * 2) User data copying will be done some later at + * kvm_prepare_user_hv_stacks() + */ + ret = kvm_user_hw_stacks_copy(regs); + if (ret != 0) { + pr_err("%s(): copying of hardware stacks failed< error %d\n", + __func__, ret); + do_exit(SIGKILL); + } + return ret; +} + +static inline int +kvm_ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return kvm_user_hw_stacks_copy(regs); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ + +static __always_inline void +kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + kvm_kernel_hw_stack_frames_copy(dst, src, size); +} + +static __always_inline void +collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + kvm_collapse_kernel_ps(dst, src, spilled_size); +} + +static __always_inline void +collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + kvm_collapse_kernel_pcs(dst, src, spilled_size); +} + +static __always_inline int +user_hw_stacks_copy(struct e2k_stacks *stacks, + pt_regs_t *regs, u64 cur_window_q, bool copy_full) +{ + return kvm_user_hw_stacks_copy(regs); +} + +static __always_inline void host_user_hw_stacks_prepare( + struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + if (regs->sys_num == __NR_e2k_longjmp2) { + /* hardware stacks already are prepared */ + return; + } + kvm_user_hw_stacks_prepare(stacks, regs, cur_window_q, + from, syscall); +} + +static inline int +ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return kvm_ret_from_fork_prepare_hv_stacks(regs); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* !(_E2K_KVM_GUEST_COPY_HW_STACKS_H) */ diff --git a/arch/e2k/include/asm/kvm/guest/cpu.h b/arch/e2k/include/asm/kvm/guest/cpu.h new file mode 100644 index 000000000000..9af3d556fa8e --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/cpu.h @@ -0,0 +1,31 @@ +#ifndef __ASM_KVM_GUEST_CPU_H +#define __ASM_KVM_GUEST_CPU_H + +#ifdef __KERNEL__ + +#include +#include +#include + +extern unsigned long kvm_get_cpu_running_cycles(void); + +static inline bool kvm_vcpu_host_support_hw_hc(void) +{ + kvm_host_info_t *host_info; + + host_info = kvm_get_host_info(); + return host_info->support_hw_hc; +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline unsigned long +get_cpu_running_cycles(void) +{ + return kvm_get_cpu_running_cycles(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_CPU_H */ diff --git a/arch/e2k/include/asm/kvm/guest/debug.h b/arch/e2k/include/asm/kvm/guest/debug.h new file mode 100644 index 000000000000..fb1f729a3ab8 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/debug.h @@ -0,0 +1,78 @@ +/* + * KVM guest kernel processes debugging support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_DEBUG_H +#define _E2K_KVM_GUEST_DEBUG_H + +#include + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +kvm_read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + /* guest image should be read on virtual physical IP */ + return *((u64 *)pa_to_vpa(phys_ip)); +} +/* Write modified instruction word at IP address */ +static inline void +kvm_modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + /* guest image should be writed on virtual physical IP */ + *((u64 *)pa_to_vpa(phys_ip)) = instr_word; +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +#include + +#define GET_PHYS_ADDR(task, addr) GUEST_GET_PHYS_ADDR(task, addr) + +#define debug_guest_regs(task) false /* none any guests */ +#define get_cpu_type_name() "VCPU" /* virtual CPU */ + +static inline void print_all_guest_stacks(void) +{ + /* nothing to do, guest has not other guest processes */ +} +static inline void print_guest_vcpu_stack(struct kvm_vcpu *vcpu) +{ + /* nothing to do, guest has not other guest processes */ +} +static inline void +print_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window) +{ + /* nothing to do, guest has not other guest processes */ +} +static inline void +host_ftrace_stop(void) +{ + HYPERVISOR_ftrace_stop(); +} +static inline void +host_ftrace_dump(void) +{ + HYPERVISOR_ftrace_dump(); +} + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + return kvm_read_instr_on_IP(ip, phys_ip); +} +/* Write modified instruction word at IP address */ +static inline void +modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + kvm_modify_instr_on_IP(ip, phys_ip, instr_word); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_DEBUG_H */ diff --git a/arch/e2k/include/asm/kvm/guest/e2k.h b/arch/e2k/include/asm/kvm/guest/e2k.h new file mode 100644 index 000000000000..dd726bd6d40d --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/e2k.h @@ -0,0 +1,34 @@ +#ifndef _ASM_KVM_GUEST_E2K_H_ +#define _ASM_KVM_GUEST_E2K_H_ + +/* Do not include the header directly, only through asm/e2k.h */ + + +#include + +#include + +#ifdef CONFIG_VIRTUALIZATION + +#if defined(CONFIG_KVM_GUEST_KERNEL) || defined(CONFIG_PARAVIRT_GUEST) +extern unsigned int guest_machine_id; +#define boot_guest_machine_id boot_get_vo_value(guest_machine_id) +#endif /* CONFIG_KVM_GUEST_KERNEL || CONFIG_PARAVIRT_GUEST */ + +#define machine_id guest_machine_id +#define boot_machine_id boot_guest_machine_id + +#define get_machine_id() machine_id +#define boot_get_machine_id() boot_machine_id +#define set_machine_id(mach_id) (machine_id = (mach_id)) +#define boot_set_machine_id(mach_id) (boot_machine_id = (mach_id)) + +extern void kvm_set_mach_type_id(void); +static inline void set_mach_type_id(void) +{ + kvm_set_mach_type_id(); +} + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* _ASM_KVM_GUEST_E2K_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/e2k_virt.h b/arch/e2k/include/asm/kvm/guest/e2k_virt.h new file mode 100644 index 000000000000..9132e0aaa78f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/e2k_virt.h @@ -0,0 +1,59 @@ +#ifndef _ASM_KVM_GUEST_E2K_VIRT_H_ +#define _ASM_KVM_GUEST_E2K_VIRT_H_ + +#include +#include + +#define E2K_VIRT_CPU_FAMILY 0xff +#define E2K_VIRT_CPU_MODEL IDR_E2K_VIRT_MDL +#define E2K_VIRT_CPU_REVISION 16 /* 2016 year */ +#define E2K_VIRT_CPU_ISET 07 /* month */ + +/* + * Machine (based on e2k virtual processor) topology: + * It is classical SMP system on common memory, so can have only + * one node and this node includes all CPUs + */ + +#define E2K_VIRT_NR_NODE_CPUS KVM_MAX_VCPUS /* all VCPUs */ +#define E2K_VIRT_MAX_NR_NODE_CPUS E2K_VIRT_NR_NODE_CPUS +#define e2k_virt_cpu_to_node(cpu) (0) +#define e2k_virt_node_to_cpumask(node, main_cpu_mask) \ + (main_cpu_mask) +#define e2k_virt_node_to_first_cpu(node, main_cpu_mask) \ + (0) /* CPU #0 should be allways */ + +#define boot_e2k_virt_cpu_to_node(cpu) e2k_virt_cpu_to_node(cpu) +#define boot_e2k_virt_node_to_cpumask(node, boot_main_cpu_mask) \ + (boot_main_cpu_mask) +#define boot_e2k_virt_node_to_first_cpu(node, boot_main_cpu_mask) \ + e2k_virt_node_to_first_cpu(node, boot_main_cpu_mask) + +/* + * Local APIC cluster mode is not used for e2k-virt, + * so APIC quad is the same as all CPUs combined to single quad #0 + */ +#define E2K_VIRT_NR_APIC_QUAD_CPUS E2K_VIRT_NR_NODE_CPUS +#define E2K_VIRT_MAX_APIC_QUADS 1 +#define e2k_virt_apic_quad_to_cpumask(quad, main_cpu_mask) \ +({ \ + main_cpu_mask; \ +}) +#define e2k_virt_cpu_to_apic_quad(cpu) (0) +#define e2K_virt_cpu_to_apic_cpu(cpu) (cpu) + +/* + * IO links and IO controllers topology + * E2K virtual machines use virtio interface to access to IO devices + * All other machines use IO links and own chipset and main IO buses controller + * is IOHUB. + * Without losing generality, IO controller of e2k-virt can consider + * as connected through simple IO link too, but it needs do not forget + * that IO controller is VIRTIO while details are essential + */ +#define E2K_VIRT_MAX_NUMIOLINKS 1 /* e2k-virt has only one IO */ + /* controller connected through */ + /* North bridge emulated by QEMU */ +#define E2K_VIRT_NODE_IOLINKS E2K_VIRT_MAX_NUMIOLINKS + +#endif /* _ASM_KVM_GUEST_E2K_VIRT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/fast_syscalls.h b/arch/e2k/include/asm/kvm/guest/fast_syscalls.h new file mode 100644 index 000000000000..51638f7ae37f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/fast_syscalls.h @@ -0,0 +1,42 @@ +#ifndef _ASM_E2K_KVM_GUEST_FAST_SYSCALLS_H +#define _ASM_E2K_KVM_GUEST_FAST_SYSCALLS_H + +#include +#include +#include + +int kvm_do_fast_clock_gettime(const clockid_t which_clock, + struct timespec *tp); +int kvm_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp); +int kvm_do_fast_gettimeofday(struct timeval *tv); +int kvm_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +static inline int +do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return kvm_do_fast_clock_gettime(which_clock, tp); +} + +static inline int +fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) +{ + return kvm_fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +do_fast_gettimeofday(struct timeval *tv) +{ + return kvm_do_fast_gettimeofday(tv); +} +static inline int +fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return kvm_fast_sys_siggetmask(oset, sigsetsize); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_E2K_KVM_GUEST_FAST_SYSCALLS_H */ + diff --git a/arch/e2k/include/asm/kvm/guest/gregs.h b/arch/e2k/include/asm/kvm/guest/gregs.h new file mode 100644 index 000000000000..2f34d243c5b0 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/gregs.h @@ -0,0 +1,90 @@ +#ifndef __KVM_GUEST_E2K_GREGS_H +#define __KVM_GUEST_E2K_GREGS_H + +/* Does not include this header directly, include */ + +#include + +#ifndef CONFIG_E2K_ISET_VER +#define KVM_SAVE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + machine.guest.save_host_gregs(&__ti->h_gregs); \ + } \ +}) +#define KVM_RESTORE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + machine.guest.restore_host_gregs(&__ti->h_gregs); \ + } \ +}) +#elif CONFIG_E2K_ISET_VER < 5 +#define KVM_SAVE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_SAVE_VCPU_STATE_GREGS_V2(__ti->h_gregs.g); \ + } \ +}) +#define KVM_RESTORE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_RESTORE_VCPU_STATE_GREGS_V2(__ti->h_gregs.g); \ + } \ +}) +#else /* CONFIG_E2K_ISET_VER >= 5 */ +#define KVM_SAVE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_SAVE_VCPU_STATE_GREGS_V5(__ti->h_gregs.g); \ + } \ +}) +#define KVM_RESTORE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_RESTORE_VCPU_STATE_GREGS_V5(__ti->h_gregs.g); \ + } \ +}) +#endif /* CONFIG_E2K_ISET_VER */ + +/* save/restore of globals is executed by host kernel, so guest do nothing */ +#define KVM_SAVE_KERNEL_GREGS_AND_SET(__ti) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_KERNEL_GREGS_AND_SET(__ti); \ + } \ +}) +#define KVM_RESTORE_KERNEL_GREGS_AND_FREE(__ti) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_RESTORE_KERNEL_GREGS(&(__ti)->k_gregs); \ + } \ +}) +#define KVM_RESTORE_KERNEL_GREGS_IN_SYSCALL(__ti) \ +({ \ + if (IS_HV_GM()) { \ + E2K_CMD_SEPARATOR; /* to do not have priv action */ \ + NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(__ti); \ + } else { \ + /* macros should be used only to return to guest */ \ + /* kernel from host, so restore kernel gregs state */ \ + ONLY_SET_KERNEL_GREGS(__ti); \ + } \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native KVM guest kernel (not paravirtualized) */ + +#define HOST_SAVE_VCPU_STATE_GREGS(__ti) +#define HOST_RESTORE_VCPU_STATE_GREGS(__ti) + +/* save/restore of globals is executed by host kernel, so guest do nothing */ +#define SAVE_KERNEL_GREGS_AND_SET(thread_info) \ + KVM_SAVE_KERNEL_GREGS_AND_SET(thread_info) +#define RESTORE_KERNEL_GREGS_AND_FREE(thread_info) \ + KVM_RESTORE_KERNEL_GREGS_AND_FREE(thread_info) +#define RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ + KVM_RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KVM_GUEST_E2K_GREGS_H */ diff --git a/arch/e2k/include/asm/kvm/guest/host_printk.h b/arch/e2k/include/asm/kvm/guest/host_printk.h new file mode 100644 index 000000000000..afddb99efa2f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/host_printk.h @@ -0,0 +1,31 @@ +/* + * KVM guest printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_HOST_PRINTK_H +#define _E2K_KVM_GUEST_HOST_PRINTK_H + +#include +#include + +extern int kvm_host_printk(const char *fmt, ...); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest */ +#define host_printk(fmt, args...) kvm_host_printk(fmt, ##args) + +#define host_pr_alert(fmt, args...) host_printk(fmt, ##args) +#define host_pr_cont(fmt, args...) host_printk(fmt, ##args) +#define host_pr_info(fmt, args...) host_printk(fmt, ##args) + +extern void host_dump_stack(void); +extern u64 host_print_all_TIRs(const e2k_tir_t *TIRs, u64 nr_TIRs); +extern void host_print_tc_record(const trap_cellar_t *tcellar, int num); +extern void host_print_all_TC(const trap_cellar_t *TC, int TC_count); +extern void host_print_pt_regs(const struct pt_regs *regs); + +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/kvm/guest/hvc_l.h b/arch/e2k/include/asm/kvm/guest/hvc_l.h new file mode 100644 index 000000000000..5823e1fd9af0 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/hvc_l.h @@ -0,0 +1,27 @@ +/****************************************************************************** + * hvc_l.h (based on XEN console interface + * + * HyperVisor Console I/O interface for Elbrus guest OSes. + * + * Copyright (c) 2005, Keir Fraser + * (c) 2013 Salavat Gilyazov + */ + +#ifndef __L_PUBLIC_IO_CONSOLE_H__ +#define __L_PUBLIC_IO_CONSOLE_H__ + +typedef uint32_t LCONS_RING_IDX; + +#define MASK_LCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) + +typedef struct lcons_interface { + char in[1024]; /* input buffer */ + char out[2048]; /* output buffer */ + LCONS_RING_IDX in_cons, in_prod; /* input buffer indexes */ + LCONS_RING_IDX out_cons, out_prod; /* output buffer indexes */ +} lcons_interface_t; + +#define LCONS_OUTPUT_NOTIFIER (('l'<<24) | ('c'<<16) | ('o'<<8) | 't') +#define LCONS_INPUT_NOTIFIER (('l'<<24) | ('c'<<16) | ('i'<<8) | 'n') + +#endif /* __L_PUBLIC_IO_CONSOLE_H__ */ diff --git a/arch/e2k/include/asm/kvm/guest/hw_stacks.h b/arch/e2k/include/asm/kvm/guest/hw_stacks.h new file mode 100644 index 000000000000..50c4ed951c8f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/hw_stacks.h @@ -0,0 +1,88 @@ +/* + * KVM guest hardware stacks access support + * + * Copyright (C) 2016 MCST + */ + +#ifndef _E2K_KVM_GUEST_HW_STACKS_H_ +#define _E2K_KVM_GUEST_HW_STACKS_H_ + +#ifndef __ASSEMBLY__ + +#include + +/* procedure chain stack items access */ +extern unsigned long kvm_get_active_cr0_lo_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern unsigned long kvm_get_active_cr0_hi_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern unsigned long kvm_get_active_cr1_lo_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern unsigned long kvm_get_active_cr1_hi_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern void kvm_put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); +extern void kvm_put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); +extern void kvm_put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); +extern void kvm_put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +/* + * Procedure chain stack items access + */ +static inline unsigned long +get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr1_hi_value(base, cr_ind); +} +static inline void +put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr0_lo_value(cr0_lo_value, base, cr_ind); +} +static inline void +put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr0_hi_value(cr0_hi_value, base, cr_ind); +} +static inline void +put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr1_lo_value(cr1_lo_value, base, cr_ind); +} +static inline void +put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr1_hi_value(cr1_hi_value, base, cr_ind); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_HW_STACKS_H_ */ + + diff --git a/arch/e2k/include/asm/kvm/guest/io.h b/arch/e2k/include/asm/kvm/guest/io.h new file mode 100644 index 000000000000..74007debe95f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/io.h @@ -0,0 +1,426 @@ + +#ifndef _E2K_KVM_GUEST_IO_H_ +#define _E2K_KVM_GUEST_IO_H_ + +#include + +#include + +#define GUEST_IO_PORTS_ADDRESS(port) (GUEST_IO_PORTS_VIRT_BASE + (port)) +static inline void +KVM_DEBUG_OUTB(u8 byte, u16 port) +{ + u8 __iomem *io_port = (u8 __iomem *)GUEST_IO_PORTS_ADDRESS(port); + + *io_port = byte; + wmb(); /* wait for write completed */ +} + +static inline u8 +KVM_DEBUG_INB(u16 port) +{ + u8 __iomem *io_port = (u8 __iomem *)GUEST_IO_PORTS_ADDRESS(port); + u8 data; + + data = *io_port; + rmb(); /* wait for read completed */ + return data; +} + +static inline u32 +KVM_DEBUG_INL(u16 port) +{ + u32 __iomem *io_port = (u32 __iomem *)GUEST_IO_PORTS_ADDRESS(port); + u32 data; + + data = *io_port; + rmb(); /* wait for read completed */ + return data; +} + +extern void kvm_writeb(u8 b, volatile void __iomem *addr); +extern void kvm_writew(u16 w, volatile void __iomem *addr); +extern void kvm_writel(u32 l, volatile void __iomem *addr); +extern void kvm_writell(u64 q, volatile void __iomem *addr); + +extern u8 kvm_readb(const volatile void __iomem *addr); +extern u16 kvm_readw(const volatile void __iomem *addr); +extern u32 kvm_readl(const volatile void __iomem *addr); +extern u64 kvm_readll(const volatile void __iomem *addr); + +extern void boot_kvm_writeb(u8 b, void __iomem *addr); +extern void boot_kvm_writew(u16 w, void __iomem *addr); +extern void boot_kvm_writel(u32 l, void __iomem *addr); +extern void boot_kvm_writell(u64 q, void __iomem *addr); + +extern u8 boot_kvm_readb(void __iomem *addr); +extern u16 boot_kvm_readw(void __iomem *addr); +extern u32 boot_kvm_readl(void __iomem *addr); +extern u64 boot_kvm_readll(void __iomem *addr); + +extern u8 kvm_inb(unsigned short port); +extern u16 kvm_inw(unsigned short port); +extern u32 kvm_inl(unsigned short port); + +extern void kvm_outb(unsigned char byte, unsigned short port); +extern void kvm_outw(unsigned short halfword, unsigned short port); +extern void kvm_outl(unsigned int word, unsigned short port); + +extern void kvm_outsb(unsigned short port, const void *src, unsigned long count); +extern void kvm_outsw(unsigned short port, const void *src, unsigned long count); +extern void kvm_outsl(unsigned short port, const void *src, unsigned long count); +extern void kvm_insb(unsigned short port, void *src, unsigned long count); +extern void kvm_insw(unsigned short port, void *src, unsigned long count); +extern void kvm_insl(unsigned short port, void *src, unsigned long count); +extern void kvm_conf_inb(unsigned int domain, unsigned int bus, + unsigned long port, u8 *byte); +extern void kvm_conf_inw(unsigned int domain, unsigned int bus, + unsigned long port, u16 *hword); +extern void kvm_conf_inl(unsigned int domain, unsigned int bus, + unsigned long port, u32 *word); +extern void kvm_conf_outb(unsigned int domain, unsigned int bus, + unsigned long port, u8 byte); +extern void kvm_conf_outw(unsigned int domain, unsigned int bus, + unsigned long port, u16 hword); +extern void kvm_conf_outl(unsigned int domain, unsigned int bus, + unsigned long port, u32 word); + +extern unsigned long kvm_notify_io(unsigned int notifier_io); + +extern int __init kvm_arch_pci_init(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops */ + +static inline void kvm_hv_writeb(u8 b, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeb(b, addr); + + kvm_writeb(b, addr); +} +static inline void kvm_hv_writew(u16 w, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writew(w, addr); + + kvm_writew(w, addr); +} +static inline void kvm_hv_writel(u32 l, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writel(l, addr); + + kvm_writel(l, addr); +} +static inline void kvm_hv_writeq(u64 q, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeq(q, addr); + + kvm_writell(q, addr); +} + +static inline void boot_writeb(u8 b, void __iomem *addr) +{ + boot_kvm_writeb(b, addr); +} +static inline void boot_writew(u16 w, void __iomem *addr) +{ + boot_kvm_writew(w, addr); +} +static inline void boot_writel(u32 l, void __iomem *addr) +{ + boot_kvm_writel(l, addr); +} +static inline void boot_writeq(u64 l, void __iomem *addr) +{ + boot_kvm_writell(l, addr); +} + +static inline u8 kvm_hv_readb(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readb(addr); + + return kvm_readb(addr); +} +static inline u16 kvm_hv_readw(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readw(addr); + + return kvm_readw(addr); +} +static inline u32 kvm_hv_readl(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readl(addr); + + return kvm_readl(addr); +} +static inline u64 kvm_hv_readq(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readq(addr); + + return kvm_readll(addr); +} + +static inline u8 boot_readb(void __iomem *addr) +{ + return boot_kvm_readb(addr); +} +static inline u16 boot_readw(void __iomem *addr) +{ + return boot_kvm_readw(addr); +} +static inline u32 boot_readl(void __iomem *addr) +{ + return boot_kvm_readl(addr); +} +static inline u64 boot_readq(void __iomem *addr) +{ + return boot_kvm_readll(addr); +} + +/* + * _relaxed() accessors. + */ +static inline u8 kvm_readb_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readb_relaxed(addr); + + return kvm_readb(addr); +} + +static inline u16 kvm_readw_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readw_relaxed(addr); + + return kvm_readw(addr); +} + +static inline u32 kvm_readl_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readl_relaxed(addr); + + return kvm_readl(addr); +} + +static inline u64 kvm_readq_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readq_relaxed(addr); + + return kvm_readll(addr); +} + +static inline void kvm_writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeb_relaxed(value, addr); + + kvm_writeb(value, addr); +} + +static inline void kvm_writew_relaxed(u16 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writew_relaxed(value, addr); + + kvm_writew(value, addr); +} + +static inline void kvm_writel_relaxed(u32 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writel_relaxed(value, addr); + + kvm_writel(value, addr); +} + +static inline void kvm_writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeq_relaxed(value, addr); + + kvm_writell(value, addr); +} + + +static inline u8 kvm_hv_inb(unsigned long port) +{ + if (IS_HV_GM()) + return native_inb(port); + + return kvm_inb(port); +} +static inline u16 kvm_hv_inw(unsigned long port) +{ + if (IS_HV_GM()) + return native_inw(port); + + return kvm_inw(port); +} +static inline u32 kvm_hv_inl(unsigned long port) +{ + if (IS_HV_GM()) + return native_inl(port); + + return kvm_inl(port); +} +static inline void kvm_hv_outb(unsigned char byte, unsigned long port) +{ + if (IS_HV_GM()) + return native_outb(byte, port); + + kvm_outb(byte, port); +} +static inline void kvm_hv_outw(unsigned short halfword, unsigned long port) +{ + if (IS_HV_GM()) + return native_outw(halfword, port); + + kvm_outw(halfword, port); +} +static inline void kvm_hv_outl(unsigned int word, unsigned long port) +{ + if (IS_HV_GM()) + return native_outl(word, port); + + kvm_outl(word, port); +} + +static inline void kvm_hv_outsb(unsigned short port, const void *src, unsigned long count) +{ + if (IS_HV_GM()) + return native_outsb(port, src, count); + + kvm_outsb(port, src, count); +} +static inline void kvm_hv_outsw(unsigned short port, const void *src, unsigned long count) +{ + if (IS_HV_GM()) + return native_outsw(port, src, count); + + kvm_outsw(port, src, count); +} +static inline void kvm_hv_outsl(unsigned short port, const void *src, unsigned long count) +{ + if (IS_HV_GM()) + return native_outsl(port, src, count); + + kvm_outsl(port, src, count); +} + +static inline void kvm_hv_insb(unsigned short port, void *dst, unsigned long count) +{ + if (IS_HV_GM()) + return native_insb(port, dst, count); + + kvm_insb(port, dst, count); +} +static inline void kvm_hv_insw(unsigned short port, void *dst, unsigned long count) +{ + if (IS_HV_GM()) + return native_insw(port, dst, count); + + kvm_insw(port, dst, count); +} +static inline void kvm_hv_insl(unsigned short port, void *dst, unsigned long count) +{ + if (IS_HV_GM()) + return native_insl(port, dst, count); + + kvm_insl(port, dst, count); +} + +static inline void +conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + if (IS_HV_GM()) + return native_conf_inb(domain, bus, port, byte); + + kvm_conf_inb(domain, bus, port, byte); +} +static inline void +conf_inw(unsigned int domain, unsigned int bus, unsigned long port, u16 *hword) +{ + if (IS_HV_GM()) + return native_conf_inw(domain, bus, port, hword); + + kvm_conf_inw(domain, bus, port, hword); +} +static inline void +conf_inl(unsigned int domain, unsigned int bus, unsigned long port, u32 *word) +{ + if (IS_HV_GM()) + return native_conf_inl(domain, bus, port, word); + + kvm_conf_inl(domain, bus, port, word); +} +static inline void +conf_outb(unsigned int domain, unsigned int bus, unsigned long port, u8 byte) +{ + if (IS_HV_GM()) + return native_conf_outb(domain, bus, port, byte); + + kvm_conf_outb(domain, bus, port, byte); +} +static inline void +conf_outw(unsigned int domain, unsigned int bus, unsigned long port, u16 hword) +{ + if (IS_HV_GM()) + return native_conf_outw(domain, bus, port, hword); + + kvm_conf_outw(domain, bus, port, hword); +} +static inline void +conf_outl(unsigned int domain, unsigned int bus, unsigned long port, u32 word) +{ + if (IS_HV_GM()) + return native_conf_outl(domain, bus, port, word); + + kvm_conf_outl(domain, bus, port, word); +} + +static inline void boot_debug_cons_outb(u8 byte, u16 port) +{ + KVM_DEBUG_OUTB(byte, port); +} +static inline u8 boot_debug_cons_inb(u16 port) +{ + return KVM_DEBUG_INB(port); +} +static inline u32 boot_debug_cons_inl(u16 port) +{ + return KVM_DEBUG_INL(port); +} +static inline void debug_cons_outb(u8 byte, u16 port) +{ + KVM_DEBUG_OUTB(byte, port); +} +static inline void debug_cons_outb_p(u8 byte, u16 port) +{ + KVM_DEBUG_OUTB(byte, port); +} +static inline u8 debug_cons_inb(u16 port) +{ + return KVM_DEBUG_INB(port); +} +static inline u32 debug_cons_inl(u16 port) +{ + return KVM_DEBUG_INL(port); +} + +static inline int __init arch_pci_init(void) +{ + return kvm_arch_pci_init(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_IO_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/irq.h b/arch/e2k/include/asm/kvm/guest/irq.h new file mode 100644 index 000000000000..34251df86379 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/irq.h @@ -0,0 +1,85 @@ + +#ifndef __E2K_ASM_KVM_GUEST_IRQ_H_ +#define __E2K_ASM_KVM_GUEST_IRQ_H_ + +#include +#include +#include + +/* Interrupt types. */ +typedef enum kvm_irq_type { + IRQT_UNBOUND = 0, + IRQT_VIRQ, + IRQT_IPI, +} kvm_irq_type_t; + +#define KVM_NR_IRQS NR_IRQS /* now limited by common NR IRQS */ + /* as on host */ +#define KVM_NR_VIRQS_PER_CPU 4 +#define KVM_NR_VCPUS (KVM_NR_VIRQS_PER_CPU * NR_CPUS) +/* + * Modes to handle Virtual IRQs (see field 'flags' below) + */ +#define BY_DIRECT_INJ_VIRQ_MODE 3 /* handle VIRQ by direct */ + /* injection on VCPU */ +#define BY_DIRECT_INJ_VIRQ_FLAG (1UL << BY_DIRECT_INJ_VIRQ_MODE) + +static inline unsigned long +kvm_get_default_virq_flags(int virq_id) +{ + unsigned long def_flags = 0; + +#ifdef CONFIG_DIRECT_VIRQ_INJECTION + def_flags |= BY_DIRECT_INJ_VIRQ_FLAG; +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + + return def_flags; +} + +typedef struct kvm_virq_info { + unsigned long mode; /* handling mode of virtual IRQ */ + /* (see above) */ + unsigned long flags; /* flags of virtual IRQ (see above) */ + int virq_nr; /* # of VIRQ */ + int gpid_nr; /* guest kernel thread ID on host */ + void *dev_id; /* VIRQ device ID */ + irq_handler_t handler; /* VIRQ handler */ + atomic_t *count; /* pointer to current atomic counter */ + /* of received VIRQs */ + /* counter is common for host & guest */ + struct task_struct *task; /* kernel thread task to handle VIRQ */ +} kvm_virq_info_t; + +/* + * Packed IRQ information: + * type - enum kvm_irq_type + * cpu - cpu this event channel is bound to + * index - type-specific information: + * PIRQ - vector, with MSB being "needs EIO" + * VIRQ - virq number + * IPI - IPI vector + * EVTCHN - + */ +typedef struct kvm_irq_info { + kvm_irq_type_t type; /* type */ + int cpu; /* cpu bound (-1 if not bound) */ + bool active; /* IRQ is active */ + + union { /* type-specific info */ + kvm_virq_info_t virq; + } u; +} kvm_irq_info_t; + +extern int kvm_request_virq(int virq, irq_handler_t handler, int cpu, + unsigned long irqflags, const char *name, void *dev); +static inline int +kvm_request_direct_virq(int virq, irq_handler_t handler, int cpu, + const char *name, void *dev) +{ + return kvm_request_virq(virq, handler, cpu, + BY_DIRECT_INJ_VIRQ_FLAG, name, dev); +} + +extern int kvm_free_virq(int virq, int cpu, void *dev); + +#endif /* __E2K_ASM_KVM_GUEST_IRQ_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/machdep.h b/arch/e2k/include/asm/kvm/guest/machdep.h new file mode 100644 index 000000000000..3e5fdf8e124f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/machdep.h @@ -0,0 +1,30 @@ +#ifndef _E2K_KVM_GUEST_MACHDEP_H_ +#define _E2K_KVM_GUEST_MACHDEP_H_ + +#include +#include + +#ifdef __KERNEL__ + +#ifdef CONFIG_VIRTUALIZATION + +typedef struct guest_machdep { + /* only for guest kernel and machines */ + int id; /* guest machine Id */ + int rev; /* guest VCPU revision */ + unsigned char iset_ver; /* Instruction set version */ + + /* guest interface functions */ +} guest_machdep_t; + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel (not paravirtualized based on pv_ops) */ +typedef struct host_machdep { + /* nothing to support and do */ +} host_machdep_t; +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_GUEST_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/mm_hooks.h b/arch/e2k/include/asm/kvm/guest/mm_hooks.h new file mode 100644 index 000000000000..c3b05c854e91 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/mm_hooks.h @@ -0,0 +1,19 @@ +#ifndef __ASM_KVM_GUEST_MM_HOOKS_H +#define __ASM_KVM_GUEST_MM_HOOKS_H + +#ifdef __KERNEL__ + +extern void kvm_get_mm_notifier_locked(struct mm_struct *mm); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +get_mm_notifier_locked(struct mm_struct *mm) +{ + /* create mm notifier to trace some events over mm */ + kvm_get_mm_notifier_locked(mm); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_KVM_GUEST_MM_HOOKS_H */ diff --git a/arch/e2k/include/asm/kvm/guest/mmu.h b/arch/e2k/include/asm/kvm/guest/mmu.h new file mode 100644 index 000000000000..ac57c7be3ed6 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/mmu.h @@ -0,0 +1,162 @@ +#ifndef __ASM_KVM_GUEST_MMU_H +#define __ASM_KVM_GUEST_MMU_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +extern void kvm_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store); +extern void kvm_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, + u8 *data_tag, u64 ld_rec_opc, int chan, + tc_cond_t cond); +extern void kvm_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u32 first_time, + tc_cond_t cond); +extern void kvm_recovery_faulted_load_to_greg(e2k_addr_t address, + u32 greg_num_d, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, + void *saved_greg_lo, void *saved_greg_hi, + tc_cond_t cond); +extern void kvm_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to); +extern void kvm_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to); +extern void kvm_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to); + +static inline void +kvm_handle_mpdma_fault(e2k_addr_t hva) +{ + /* model of sic NBSR is not used on guest */ +} + +static inline bool +kvm_is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + if (HOST_KERNEL_GREGS_PAIR_MASK == 0 || + !(HOST_KERNEL_GREGS_PAIR_MASK & (1UL << greg_num_d))) + /* register is not used by host and guest */ + /* to support virtualization */ + return false; + + *greg_copy = ti->h_gregs.g[greg_num_d - HOST_GREGS_PAIRS_START].xreg; + return true; +} + +#ifdef CONFIG_KVM_GUEST_KERNEL + +static inline int +guest_addr_to_host(void **addr, const pt_regs_t *regs) +{ + return native_guest_addr_to_host(addr); +} + +static inline void * +guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs) +{ + /* there are not any guests, so nothing convertion */ + return native_guest_ptr_to_host(ptr, size); +} + +static inline bool +is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + return kvm_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline void +recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + if (likely(IS_HV_GM())) + native_recovery_faulted_tagged_store(address, wr_data, + data_tag, st_rec_opc, data_ext, data_ext_tag, + opc_ext, chan, qp_store, atomic_store); + else + kvm_recovery_faulted_tagged_store(address, wr_data, + data_tag, st_rec_opc, data_ext, data_ext_tag, + opc_ext, chan, qp_store, atomic_store); +} +static inline void +recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan, + tc_cond_t cond) +{ + if (likely(IS_HV_GM())) + native_recovery_faulted_load(address, ld_val, + data_tag, ld_rec_opc, chan); + else + kvm_recovery_faulted_load(address, ld_val, + data_tag, ld_rec_opc, chan, cond); +} +static inline void +recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u32 first_time, + tc_cond_t cond) +{ + if (likely(IS_HV_GM())) + native_recovery_faulted_move(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc, chan, + qp_load, atomic_load, first_time); + else + kvm_recovery_faulted_move(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc, chan, + qp_load, atomic_load, first_time, cond); +} +static inline void +recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, int vr, + u64 ld_rec_opc, int chan, int qp_load, int atomic_load, + void *saved_greg_lo, void *saved_greg_hi, + tc_cond_t cond) +{ + if (likely(IS_HV_GM())) + native_recovery_faulted_load_to_greg(address, greg_num_d, + vr, ld_rec_opc, chan, qp_load, atomic_load, + saved_greg_lo, saved_greg_hi); + else + kvm_recovery_faulted_load_to_greg(address, greg_num_d, + vr, ld_rec_opc, chan, qp_load, atomic_load, + saved_greg_lo, saved_greg_hi, cond); +} +static inline void +move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + if (likely(IS_HV_GM())) + native_move_tagged_word(addr_from, addr_to); + else + kvm_move_tagged_word(addr_from, addr_to); +} +static inline void +move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + if (likely(IS_HV_GM())) + native_move_tagged_dword(addr_from, addr_to); + else + kvm_move_tagged_dword(addr_from, addr_to); +} +static inline void +move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + if (likely(IS_HV_GM())) + native_move_tagged_qword(addr_from, addr_to); + else + kvm_move_tagged_qword(addr_from, addr_to); +} + +static inline void +handle_mpdma_fault(e2k_addr_t hva) +{ + kvm_handle_mpdma_fault(hva); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_MMU_H */ diff --git a/arch/e2k/include/asm/kvm/guest/mmu_context.h b/arch/e2k/include/asm/kvm/guest/mmu_context.h new file mode 100644 index 000000000000..cef5f426d5a6 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/mmu_context.h @@ -0,0 +1,29 @@ +#ifndef __ASM_KVM_GUEST_MMU_CONTEXT_H +#define __ASM_KVM_GUEST_MMU_CONTEXT_H + +#ifdef __KERNEL__ + +#include + +extern void kvm_activate_mm(struct mm_struct *active_mm, + struct mm_struct *mm); +extern void kvm_get_mm_notifier_locked(struct mm_struct *mm); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + kvm_activate_mm(active_mm, mm); +} +static inline void +deactivate_mm(struct task_struct *dead_task, struct mm_struct *mm) +{ + native_deactivate_mm(dead_task, mm); + if (!dead_task->clear_child_tid || (atomic_read(&mm->mm_users) <= 1)) + HYPERVISOR_switch_to_guest_init_mm(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_KVM_GUEST_MMU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/kvm/guest/pgatomic.h b/arch/e2k/include/asm/kvm/guest/pgatomic.h new file mode 100644 index 000000000000..5c2aad990457 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/pgatomic.h @@ -0,0 +1,134 @@ +/* + * E2K page table atomic update operations. + * + * Copyright 2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_PGATOMIC_H +#define _E2K_KVM_GUEST_PGATOMIC_H + +#include + +#include +#include +#include + +#ifdef CONFIG_KVM_SHADOW_PT +extern pgprot_t kvm_pt_atomic_update(struct mm_struct *mm, + unsigned long addr, pgprot_t *ptp, + pt_atomic_op_t atomic_op, pgprotval_t prot_mask); +extern pgprot_t kvm_pt_atomic_clear_relaxed(pgprotval_t ptot_mask, + pgprot_t *pgprot); + +extern pte_t kvm_get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address); + +static inline pgprotval_t +kvm_pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_set_wrprotect_atomic(&pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_SET_WRPROTECT, _PAGE_INIT_WRITEABLE)); + } +} + +static inline pgprotval_t +kvm_pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_get_and_clear_atomic(&pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_GET_AND_CLEAR, _PAGE_INIT_VALID)); + } +} + +static inline pgprotval_t +kvm_pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_get_and_xchg_atomic(newval, &pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_GET_AND_XCHG, newval)); + } +} + +static inline pgprotval_t +kvm_pt_clear_relaxed_atomic(pgprotval_t prot_mask, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_clear_relaxed_atomic(prot_mask, + &pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_clear_relaxed(prot_mask, + pgprot)); + } +} + +static inline pgprotval_t +kvm_pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_clear_young_atomic(&pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_TEST_AND_CLEAR_YOUNG, + _PAGE_INIT_ACCESSED)); + } +} +#elif defined(CONFIG_KVM_GUEST_KERNEL) + #error "CONFIG_KVM_SHADOW_PT should be set for guest paravirtualized kernel" +#endif /* CONFIG_KVM_SHADOW_PT */ + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization on pv_ops) */ + +static inline pgprotval_t +pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return kvm_pt_set_wrprotect_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return kvm_pt_get_and_clear_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + return kvm_pt_get_and_xchg_atomic(mm, addr, newval, pgprot); +} + +static inline pgprotval_t +pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + return kvm_pt_clear_relaxed_atomic(mask, pgprot); +} + +static inline pgprotval_t +pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return kvm_pt_clear_young_atomic(mm, addr, pgprot); +} + +static inline pte_t get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address) +{ + return kvm_get_pte_for_address(vma, address); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_PGATOMIC_H */ diff --git a/arch/e2k/include/asm/kvm/guest/process.h b/arch/e2k/include/asm/kvm/guest/process.h new file mode 100644 index 000000000000..e669bea48020 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/process.h @@ -0,0 +1,534 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_PROCESS_H +#define _E2K_KVM_GUEST_PROCESS_H + +#include +#include +#include +#include + +/* real flush of hardware stacks should be done by host hypercall */ +/* so here nothing to do */ +#ifdef CONFIG_KVM_GUEST_HW_PV +# define KVM_FLUSHCPU \ +do \ + if (IS_HV_GM()) { \ + NATIVE_FLUSHCPU; \ + } \ +while (false) +# define KVM_FLUSHR \ +do \ + if (IS_HV_GM()) { \ + NATIVE_FLUSHR; \ + } \ +while (false) +# define KVM_FLUSHC \ +do \ + if (IS_HV_GM()) { \ + NATIVE_FLUSHC; \ + } \ +while (false) +# define BOOT_KVM_FLUSHCPU \ +do \ + if (BOOT_IS_HV_GM()) { \ + NATIVE_FLUSHCPU; \ + } \ +while (false) +# define BOOT_KVM_FLUSHR \ +do \ + if (BOOT_IS_HV_GM()) { \ + NATIVE_FLUSHR; \ + } \ +while (false) +# define BOOT_KVM_FLUSHC \ +do \ + if (BOOT_IS_HV_GM()) { \ + NATIVE_FLUSHC; \ + } \ +while (false) +#else /* ! CONFIG_KVM_GUEST_HW_PV */ +# define KVM_FLUSHCPU +# define KVM_FLUSHR +# define KVM_FLUSHC +# define BOOT_KVM_FLUSHCPU +# define BOOT_KVM_FLUSHR +# define BOOT_KVM_FLUSHC +#endif /* CONFIG_KVM_GUEST_HW_PV */ + +#define kvm_kernel_mode(regs) kvm_from_kernel_IP((regs)->crs.cr0_hi) + +#define kvm_from_trap_on_kernel(regs) \ + is_trap_from_kernel(regs, GUEST_TASK_SIZE) + +/* + * to define kernel_mode in trap we must use trap_ip from trap celler + * (in user code may be sys_call and psr may be after sys_call) + */ +#define kvm_trap_kernel_mode(regs) \ + (kvm_kernel_mode(regs) && kvm_from_trap_on_kernel(regs)) +#define kvm_trap_user_mode(regs) \ + (guest_user_mode(regs) && !LIGHT_HYPERCALL_MODE(regs)) + +static inline void KVM_COPY_STACKS_TO_MEMORY(void) +{ + if (IS_HV_GM()) + NATIVE_COPY_STACKS_TO_MEMORY(); + else + HYPERVISOR_copy_stacks_to_memory(); +} + +/* own VCPU state: directly accessible through global registers */ +static inline kvm_vcpu_state_t *kvm_get_vcpu_state(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return (kvm_vcpu_state_t *)(vcpu_base); +} + +/* + * Restore proper psize field of WD register + */ +static inline void +kvm_restore_wd_register_psize(e2k_wd_t wd_from) +{ + HYPERVISOR_update_wd_psize(wd_from.WD_psize); +} + +/* + * Preserve current p[c]shtp as they indicate how much to FILL when returning + */ +static inline void +kvm_preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks, + e2k_stacks_t *cur_stacks) +{ + /* guest user hardware stacks sizes to copy should be updated */ + /* after copying and therefore are not preserve */ +} + +static __always_inline void +kvm_jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from) +{ + if (from & FROM_SYSCALL_N_PROT) { + switch (regs->kernel_entry) { + case 1: + case 3: + case 4: + KVM_WRITE_UPSR_REG(E2K_KERNEL_UPSR_ENABLED); + regs->stack_regs_saved = true; + __E2K_JUMP_WITH_ARGUMENTS_8(handle_sys_call, + regs->sys_func, + regs->args[1], regs->args[2], + regs->args[3], regs->args[4], + regs->args[5], regs->args[6], + regs); + default: + BUG(); + } + } else if (from & FROM_SYSCALL_PROT_8) { + /* the syscall restart is not yet implemented */ + BUG(); + } else if (from & FROM_SYSCALL_PROT_10) { + /* the syscall restart is not yet implemented */ + BUG(); + } else { + BUG(); + } +} + +static inline void kvm_clear_virt_thread_struct(thread_info_t *ti) +{ + /* guest PID/MMID's can be received only after registration on host */ + ti->gpid_nr = -1; + ti->gmmid_nr = -1; +} + +static inline void kvm_release_task_struct(struct task_struct *task) +{ + thread_info_t *ti; + int ret; + + ti = task_thread_info(task); + BUG_ON(ti == NULL); + if (ti->gpid_nr == -1) { + /* the process was not registered on host, nothing to do */ + BUG_ON(ti->gmmid_nr != -1); + return; + } + + ret = HYPERVISOR_release_task_struct(ti->gpid_nr); + if (ret != 0) { + pr_err("%s(): could not release task struct of %s (%d) " + "GPID #%d on host, error %d\n", + __func__, task->comm, task->pid, ti->gpid_nr, ret); + } +} + +/* + * These functions for guest kernel, see comment for virtualization at + * arch/e2k/include/asm/ptrace.h + * In this case guest is main kernel and here knows that it is guest + * Extra kernel is host + * + * Get/set kernel stack limits of area reserved at the top of hardware stacks + * Kernel areas include two part: + * guest kernel stack reserved area at top of stack + * host kernel stack reserved area at top of stack + */ + +static __always_inline e2k_size_t +kvm_get_hw_ps_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_ps_user_size(hw_stacks); +} +static __always_inline e2k_size_t +kvm_get_hw_pcs_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_pcs_user_size(hw_stacks); +} +static __always_inline void +kvm_set_hw_ps_user_size(hw_stack_t *hw_stacks, e2k_size_t u_ps_size) +{ + set_hw_ps_user_size(hw_stacks, u_ps_size); +} +static __always_inline void +kvm_set_hw_pcs_user_size(hw_stack_t *hw_stacks, e2k_size_t u_pcs_size) +{ + set_hw_pcs_user_size(hw_stacks, u_pcs_size); +} + +/* + * Table of pointers to VCPUs state. + * Own VCPU state pointer is loaded on some global registers to direct access + * Other VCPUs state pointers can be accessible through this table + */ +extern kvm_vcpu_state_t *vcpus_state[NR_CPUS]; + +static inline kvm_vcpu_state_t *kvm_get_the_vcpu_state(long vcpu_id) +{ + return vcpus_state[vcpu_id]; +} + +#define KVM_ONLY_SET_GUEST_GREGS(ti) \ + KVM_SET_VCPU_STATE_BASE(kvm_get_the_vcpu_state( \ + smp_processor_id())) + +/* guest kernel does not support own guests and cannot be run as host */ +/* so has not the problem - nothing to do */ +/* see arch/e2k/include/asm/process.h for more details why and how */ +#define KVM_GUEST_UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) +#define KVM_GUEST_CHECK_VCPU_THREAD_CONTEXT(__ti) + +extern void kvm_vcpu_boot_thread_init(struct task_struct *boot_task); +extern int kvm_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg); +extern void kvm_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks); +extern void boot_kvm_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks); +extern void kvm_define_user_hw_stacks_sizes(hw_stack_t *hw_stacks); + +extern void kvm_release_hw_stacks(thread_info_t *dead_ti); +extern void kvm_release_kernel_stacks(thread_info_t *dead_ti); +extern int kvm_kmem_area_host_chunk(e2k_addr_t stack_base, + e2k_size_t stack_size, int hw_flag); +extern void kvm_kmem_area_unhost_chunk(e2k_addr_t stack_base, + e2k_size_t stack_size); +extern int kvm_switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel); + +extern int kvm_clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags); +extern int kvm_copy_spilled_user_stacks(e2k_stacks_t *child_stacks, + e2k_mem_crs_t *child_crs, sw_regs_t *new_sw_regs, + thread_info_t *new_ti); + +extern int kvm_copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, pt_regs_t *regs); + +extern void kvm_fix_process_pt_regs(thread_info_t *ti, e2k_stacks_t *stacks, + pt_regs_t *regs, pt_regs_t *old_regs); + +extern void __init kvm_setup_arch(void); + +#ifdef COMMON_KERNEL_USER_HW_STACKS +/* + * Free guest kernel hardware stacks after completion of sys_execve() + * and switch to new user process. The new process executes on own stacks + * and old kernel hardware stacks on which was run do_execve() can be released + * only after switch to new user stacks. + * WARNING: probably release of stacks should be done earlier (not while exit + * from the process and deactivate mm), perhaps as pending work + */ +static inline void +kvm_free_old_kernel_hardware_stacks(void) +{ + thread_info_t *ti = current_thread_info(); + + if (!test_ts_flag(TS_MAPPED_HW_STACKS_INVALID)) + /* it is not process after sys_execve() */ + return; + + if (test_ts_flag(TS_MAPPED_HW_STACKS)) { + release_old_hw_stack_mappings(ti); + } else { + release_old_kernel_hardware_stacks(ti); + } + + clear_ts_flag(TS_MAPPED_HW_STACKS_INVALID); +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ +#define E2K_FLUSHCPU KVM_FLUSHCPU +#define E2K_FLUSHR KVM_FLUSHR +#define E2K_FLUSHC KVM_FLUSHC +#define BOOT_FLUSHCPU KVM_FLUSHCPU +#define BOOT_FLUSHR KVM_FLUSHR +#define BOOT_FLUSHC KVM_FLUSHC + +#define ONLY_SET_GUEST_GREGS(ti) KVM_ONLY_SET_GUEST_GREGS(ti) + +#define UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) \ + KVM_GUEST_UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + KVM_GUEST_CHECK_VCPU_THREAD_CONTEXT(__ti) + +#define GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) +#define COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) +#define GOTO_DONE_TO_PARAVIRT_GUEST() +#define COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) + +#define do_map_user_hard_stack_to_kernel(node, kstart, ubase, size) \ + do_map_native_user_hard_stack_to_kernel(node, kstart, \ + ubase, size) +#define resume_vm_thread() /* none any virtual machines and threads */ + +static inline bool host_is_at_HV_GM_mode(void) +{ + /* the guest has not own guests, so cannot be as host */ + return false; +} + +static inline void COPY_STACKS_TO_MEMORY(void) +{ + KVM_COPY_STACKS_TO_MEMORY(); +} + +static inline void +restore_wd_register_psize(e2k_wd_t wd_from) +{ + kvm_restore_wd_register_psize(wd_from); +} + +static inline void +preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks, + e2k_stacks_t *cur_stacks) +{ + kvm_preserve_user_hw_stacks_to_copy(u_stacks, cur_stacks); +} + +static __always_inline void +host_exit_to_usermode_loop(struct pt_regs *regs, bool syscall, bool has_signal) +{ + /* native & guest kernels cannot be as host */ +} + +static __always_inline void +jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from) +{ + kvm_jump_to_ttable_entry(regs, from); +} + +static inline void +virt_cpu_thread_init(struct task_struct *boot_task) +{ + unsigned long vcpu_state_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_state_base); + task_thread_info(boot_task)->vcpu_state_base = vcpu_state_base; +} + +static inline int +copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + if (IS_HV_GM()) + return native_copy_kernel_stacks(new_task, fn, arg); + else + return kvm_copy_kernel_stacks(new_task, fn, arg); +} +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline int +copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, pt_regs_t *regs) +{ + if (IS_HV_GM()) + return native_copy_user_stacks(clone_flags, new_stk_base, + new_stk_sz, new_task, regs); + else + return kvm_copy_user_stacks(clone_flags, new_stk_base, + new_stk_sz, new_task, regs); +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +static inline void +define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + if (IS_HV_GM()) + native_do_define_kernel_hw_stacks_sizes(hw_stacks); + else + kvm_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +boot_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + if (BOOT_IS_HV_GM()) + native_do_define_kernel_hw_stacks_sizes(hw_stacks); + else + boot_kvm_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +define_user_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + if (IS_HV_GM()) + native_define_user_hw_stacks_sizes(hw_stacks); + else + kvm_define_user_hw_stacks_sizes(hw_stacks); +} + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +release_hw_stacks(thread_info_t *dead_ti) +{ + if (IS_HV_GM()) { + native_release_hw_stacks(dead_ti); + } else { + kvm_release_hw_stacks(dead_ti); + } +} +static inline void +release_kernel_stacks(thread_info_t *dead_ti) +{ + if (IS_HV_GM()) { + native_release_kernel_stacks(dead_ti); + } else { + kvm_release_kernel_stacks(dead_ti); + } +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) /* nothing to do */ + +static inline int +switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + if (likely(IS_HV_GM())) { + return native_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); + } else { + return kvm_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); + } +} + +static inline int +clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags) +{ + if (likely(IS_HV_GM())) { + return native_clone_prepare_spilled_user_stacks(child_stacks, + child_crs, regs, new_sw_regs, new_ti, + clone_flags); + } else { + return kvm_clone_prepare_spilled_user_stacks(child_stacks, + child_crs, regs, new_sw_regs, new_ti, + clone_flags); + } +} + +static inline int +copy_spilled_user_stacks(e2k_stacks_t *child_stacks, e2k_mem_crs_t *child_crs, + sw_regs_t *new_sw_regs, thread_info_t *new_ti) +{ + if (likely(IS_HV_GM())) { + native_copy_spilled_user_stacks(child_stacks, child_crs, + new_sw_regs, new_ti); + return 0; + } else { + return kvm_copy_spilled_user_stacks(child_stacks, child_crs, + new_sw_regs, new_ti); + } +} + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +free_old_kernel_hardware_stacks(void) +{ + if (likely(IS_HV_GM())) { + native_free_old_kernel_hardware_stacks(); + } else { + kvm_free_old_kernel_hardware_stacks(); + } +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +/* the function is not used in guest mode so only to compile without errors */ +static __always_inline __interrupt void +complete_switch_to_user_func(void) +{ + /* none own guests, so nothing to do in virtualization mode */ + /* but the function should switch interrupt control from UPSR to */ + /* PSR and set initial state of user UPSR */ + KVM_SET_USER_INITIAL_UPSR(E2K_USER_INITIAL_UPSR); +} +/* the function is not used in guest mode so only to compile without errors */ +static __always_inline __interrupt void +complete_go2user(thread_info_t *ti, long fn) +{ + /* none own guests, so nothing to do in virtualization mode */ + /* but the function should restore user UPSR state */ + KVM_WRITE_UPSR_REG(ti->upsr); +} + +#define clear_vm_thread_flags() /* own virtual machines is not */ + /* supported on guest */ + /* so nothing to clear */ + +static inline void +clear_virt_thread_struct(thread_info_t *ti) +{ + kvm_clear_virt_thread_struct(ti); +} + +static inline void virt_setup_arch(void) +{ + kvm_setup_arch(); +} + +static inline void free_virt_task_struct(struct task_struct *task) +{ + kvm_release_task_struct(task); +} + +#define usd_cannot_be_expanded(regs) user_stack_cannot_be_expanded() + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* !(_E2K_KVM_GUEST_PROCESS_H) */ diff --git a/arch/e2k/include/asm/kvm/guest/processor.h b/arch/e2k/include/asm/kvm/guest/processor.h new file mode 100644 index 000000000000..f38fe262109d --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/processor.h @@ -0,0 +1,64 @@ +/* + * KVM guest processor and processes support + * + * Copyright (C) 2014 MCST + */ + +#ifndef _E2K_KVM_GUEST_PROCESSOR_H_ +#define _E2K_KVM_GUEST_PROCESSOR_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +extern int kvm_prepare_start_thread_frames(unsigned long entry, + unsigned long sp); + +extern void kvm_default_idle(void); +extern void kvm_cpu_relax(void); +extern void kvm_cpu_relax_no_resched(void); + +/* defined at kernel/sched.c */ +extern void wake_up_idle_vcpu(int cpu); + +extern void kvm_print_machine_type_info(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +#define paravirt_enabled() true +#define boot_paravirt_enabled() paravirt_enabled() + +static inline int +prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + if (likely(IS_HV_GM())) { + return native_do_prepare_start_thread_frames(entry, sp); + } else { + return kvm_prepare_start_thread_frames(entry, sp); + } +} + +#define default_idle() kvm_default_idle() +#define cpu_relax() kvm_cpu_relax() +#define cpu_relax_no_resched() kvm_cpu_relax_no_resched() + +static inline void +print_machine_type_info(void) +{ + kvm_print_machine_type_info(); +} +static inline void +paravirt_banner(void) +{ + printk(KERN_INFO "Booting pure guest kernel (not paravirtualized " + "based on pv_ops)\n"); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_PROCESSOR_H_ */ + + diff --git a/arch/e2k/include/asm/kvm/guest/ptrace.h b/arch/e2k/include/asm/kvm/guest/ptrace.h new file mode 100644 index 000000000000..6c2979f2df50 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/ptrace.h @@ -0,0 +1,151 @@ +#ifndef __E2K_KVM_GUEST_PTRACE_H +#define __E2K_KVM_GUEST_PTRACE_H + +/* Does not include this header directly, include */ + +#include +#include +#include + +struct task_struct; + +static inline void +kvm_save_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + int ret; + + ret = HYPERVISOR_get_DAM(dam, DAM_ENTRIES_NUM); + if (ret != 0) { + pr_err("%s(): could not receive DAM state, error %d\n", + __func__, ret); + } +} + +static inline void kvm_atomic_load_osgd_to_gd(void) +{ + /* FIXME: it is not now understand what to do */ +} + +static inline e2k_addr_t +kvm_check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + if (likely(address < GUEST_TASK_SIZE)) + return 0; + if (address < NATIVE_TASK_SIZE) { + pr_err("Address 0x%016lx is guest kernel address\n", + address); + return -1; + } + pr_err("Address 0x%016lx is host kernel address\n", + address); + return -1; +} +#define KVM_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + false /* pure guest kernel has not own guests */ +#define KVM_IS_GUEST_ADDRESS_TO_HOST(address) \ + IS_HOST_KERNEL_ADDRESS(address) + +/* guest page table is pseudo PT and only host PT is used */ +/* to translate any guest addresses */ +#define kvm_print_host_user_address_ptes(mm, address) \ +({ \ + HYPERVISOR_print_guest_user_address_ptes((mm)->gmmid_nr, address); \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL + +#define user_mode(regs) is_user_mode(regs, GUEST_TASK_SIZE) +#define kernel_mode(regs) is_kernel_mode(regs, GUEST_TASK_SIZE) + +/* guest kernel can be: */ +/* user of host kernel, so USER MODE (pm = 0) */ +/* hardware virtualized guest kernel, so KERNEL MODE (pm = 1) */ +#define from_guest_kernel_mode(cr1_lo) \ + ((IS_HV_GM()) ? from_kernel_mode(cr1_lo) : \ + from_user_mode(cr1_lo)) +#define from_guest_kernel(cr0_hi, cr1_lo) \ + (from_guest_kernel_mode(cr1_lo) && \ + from_guest_kernel_IP(cr0_hi)) + +#define is_call_from_host_user(cr0_hi, cr1_lo) \ + (from_host_user_IP(cr0_hi) && from_host_user_mode(cr1_lo)) +#define is_call_from_host_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_user(cr0_hi, cr1_lo) : \ + from_host_user_mode(cr1_lo)) +#define is_call_from_guest_user(cr0_hi, cr1_lo) \ + (from_guest_user_IP(cr0_hi) && from_guest_user_mode(cr1_lo)) +#define is_call_from_guest_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_user(cr0_hi, cr1_lo) : \ + from_guest_user_mode(cr1_lo)) +#define is_call_from_host_kernel(cr0_hi, cr1_lo) \ + (from_host_kernel_IP(cr0_hi) && from_host_kernel_mode(cr1_lo)) +#define is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + from_host_kernel_mode(cr1_lo)) +#define is_call_from_guest_kernel(cr0_hi, cr1_lo) \ + from_guest_kernel(cr0_hi, cr1_lo) +#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_kernel(cr0_hi, cr1_lo) : \ + from_guest_kernel_mode(cr1_lo)) + +/* macroses to detect guest traps on host */ +/* Gust has not own nested VM, so nothing guests exist */ +/* and macroses should always return 'false' */ +#define trap_on_guest(regs) \ + false /* own guest is not supported */ +#define trap_on_pv_hv_guest(vcpu, regs) \ + false /* own guest is not supported */ +/* trap occurred on guest user or kernel */ +#define guest_trap_on_host(regs) \ + false /* own guest is not supported */ +#define guest_trap_on_pv_hv_host(vcpu, regs) \ + false /* own guest is not supported */ +/* trap occurred on guest kernel or user, but in host mode */ +/* and the trap can be due to guest or not */ +#define host_trap_on_guest(regs) \ + false /* own guest is not supported */ +/* trap occurred on guest user or kernel or on host but due to guest */ +#define due_to_guest_trap_on_host(regs) \ + false /* own guest is not supported */ +#define due_to_guest_trap_on_pv_hv_host(vcpu, regs) \ + false /* own guest is not supported */ + +#define ON_HOST_KERNEL() false /* it is guest, not host */ +#define call_from_user_mode(cr0_hi, cr1_lo) \ + is_call_from_user(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_kernel_mode(cr0_hi, cr1_lo) \ + is_call_from_kernel(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_user(regs) \ + call_from_user_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) +#define call_from_kernel(regs) \ + call_from_kernel_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) + +#define SAVE_DAM(dam) kvm_save_DAM(dam) + +static inline void atomic_load_osgd_to_gd(void) +{ + kvm_atomic_load_osgd_to_gd(); +} + +/* it is pure KVM guest kernel (not paravirtualized based on pv_ops) */ +#define LIGHT_HYPERCALL_MODE(regs) 0 /* hypercalls not supported */ +#define TI_GENERIC_HYPERCALL_MODE(thread_info) 0 /* hypercalls not supported */ +#define GENERIC_HYPERCALL_MODE() 0 /* hypercalls not supported */ +#define IN_LIGHT_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_GENERIC_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_HYPERCALL() 0 /* hypercalls not supported */ + +static inline e2k_addr_t +check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + return kvm_check_is_user_address(task, address); +} +#define IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + KVM_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) +#define IS_GUEST_ADDRESS_TO_HOST(address) \ + KVM_IS_GUEST_ADDRESS_TO_HOST(address) +#define print_host_user_address_ptes(mm, address) \ + kvm_print_host_user_address_ptes(mm, address) +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __E2K_KVM_GUEST_PTRACE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/pv_info.h b/arch/e2k/include/asm/kvm/guest/pv_info.h new file mode 100644 index 000000000000..16c3e1d4c2a1 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/pv_info.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016 MCST, Salavat Gilyazov atic@mcst.ru + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_KVM_GUEST_PV_INFO_H +#define __ASM_E2K_KVM_GUEST_PV_INFO_H + +#include + +/* + * e2k kernel general info + */ +#define KVM_PAGE_OFFSET GUEST_PAGE_OFFSET +#define KVM_TASK_SIZE PAGE_OFFSET +#define KVM_VMALLOC_START GUEST_VMALLOC_START +#define KVM_VMALLOC_END GUEST_VMALLOC_END +#define KVM_VMEMMAP_START GUEST_VMEMMAP_START +#define KVM_VMEMMAP_END GUEST_VMEMMAP_END + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#define paravirt_enabled() true +#define boot_paravirt_enabled() paravirt_enabled() + +#define PAGE_OFFSET KVM_PAGE_OFFSET +#define TASK_SIZE PAGE_OFFSET +#define VMALLOC_START KVM_VMALLOC_START +#define VMALLOC_END KVM_VMALLOC_END +#define VMEMMAP_START KVM_VMEMMAP_START +#define VMEMMAP_END KVM_VMEMMAP_END + +#define BOOT_PAGE_OFFSET PAGE_OFFSET +#define BOOT_TASK_SIZE TASK_SIZE +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_E2K_KVM_GUEST_PV_INFO_H */ diff --git a/arch/e2k/include/asm/kvm/guest/regs_state.h b/arch/e2k/include/asm/kvm/guest/regs_state.h new file mode 100644 index 000000000000..a39b1ae09d8b --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/regs_state.h @@ -0,0 +1,470 @@ +#ifndef _E2K_KVM_GUEST_REGS_STATE_H +#define _E2K_KVM_GUEST_REGS_STATE_H + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +extern void kvm_save_glob_regs(global_regs_t *gregs); +extern void kvm_save_glob_regs_dirty_bgr(global_regs_t *gregs); +extern void kvm_save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal); +extern void kvm_restore_glob_regs(const global_regs_t *gregs); +extern void kvm_restore_local_glob_regs(const local_gregs_t *l_gregs, + bool is_signal); +extern void kvm_get_all_user_glob_regs(global_regs_t *gregs); + +static inline void +guest_save_glob_regs_v2(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v2(gregs); +} + +static inline void +guest_save_glob_regs_v5(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v5(gregs); +} + +static inline void +guest_save_glob_regs_dirty_bgr_v2(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v2(gregs); +} + +static inline void +guest_save_glob_regs_dirty_bgr_v5(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v5(gregs); +} + +static inline void +guest_save_local_glob_regs_v2(local_gregs_t *l_gregs, bool is_signal) +{ + kvm_guest_save_local_gregs_v2(l_gregs, is_signal); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + copy_k_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->k_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + copy_h_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->h_gregs); +} + +static inline void +guest_save_local_glob_regs_v5(local_gregs_t *l_gregs, bool is_signal) +{ + kvm_guest_save_local_gregs_v5(l_gregs, is_signal); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + copy_k_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->k_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + copy_h_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->h_gregs); +} + +static inline void +guest_restore_glob_regs_v2(const global_regs_t *gregs) +{ + kvm_guest_restore_gregs_v2(gregs); +} + +static inline void +guest_restore_glob_regs_v5(const global_regs_t *gregs) +{ + kvm_guest_restore_gregs_v5(gregs); +} + +static inline void +guest_restore_local_glob_regs_v2(const local_gregs_t *l_gregs, bool is_signal) +{ + kvm_guest_restore_local_gregs_v2(l_gregs, is_signal); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, + l_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + get_h_gregs_from_l_regs(¤t_thread_info()->h_gregs, + l_gregs); +} + +static inline void +guest_restore_local_glob_regs_v5(const local_gregs_t *l_gregs, bool is_signal) +{ + kvm_guest_restore_local_gregs_v5(l_gregs, is_signal); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, + l_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + get_h_gregs_from_l_regs(¤t_thread_info()->h_gregs, + l_gregs); +} + +static inline void +guest_get_all_user_glob_regs(global_regs_t *gregs) +{ + machine.save_gregs(gregs); + copy_k_gregs_to_gregs(gregs, ¤t_thread_info()->k_gregs); + copy_h_gregs_to_gregs(gregs, ¤t_thread_info()->h_gregs); +} + +#ifdef CONFIG_GREGS_CONTEXT +#define KVM_INIT_G_REGS() \ +({ \ + unsigned long vcpu_base; \ + /* VCPU state base can be on global register, so save & restore */ \ + KVM_SAVE_VCPU_STATE_BASE(vcpu_base); \ + NATIVE_INIT_G_REGS(); \ + KVM_RESTORE_VCPU_STATE_BASE(vcpu_base); \ + clear_memory_8(¤t_thread_info()->h_gregs, \ + sizeof(current_thread_info()->h_gregs), ETAGEWD); \ +}) +#define BOOT_KVM_INIT_G_REGS() \ +({ \ + unsigned long vcpu_base; \ + /* VCPU state base can be on global register, so save & restore */ \ + KVM_SAVE_VCPU_STATE_BASE(vcpu_base); \ + NATIVE_BOOT_INIT_G_REGS(); \ + KVM_RESTORE_VCPU_STATE_BASE(vcpu_base); \ +}) + +#else /* ! CONFIG_GREGS_CONTEXT */ +#define KVM_INIT_G_REGS() +#define BOOT_KVM_INIT_G_REGS() NATIVE_BOOT_INIT_G_REGS() +#endif /* CONFIG_GREGS_CONTEXT */ + +#define KVM_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ +({ \ + u64 *greg_vals = (u64 *) g_user; \ + u8 *greg_tags = (u8 *) gtag_user; \ + u64 *glob_regs = (u64 *) gbase; \ + int greg_no; \ + \ + for (greg_no = 0; greg_no < E2K_GLOBAL_REGS_NUM; greg_no++) { \ + load_value_and_tagd((void *) glob_regs, \ + greg_vals, greg_tags); \ + glob_regs += 4; \ + greg_vals++; \ + greg_tags++; \ + } \ +}) + +#define KVM_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ +({ \ + u64 *greg_vals = (u64 *) g_user; \ + u8 *greg_tags = (u8 *) gtag_user; \ + u64 *glob_regs = (u64 *) gbase; \ + int greg_no; \ + u32 greg_tag; \ + \ + for (greg_no = 0; greg_no < E2K_GLOBAL_REGS_NUM; greg_no++) { \ + greg_tag = (u32) greg_tags[greg_no]; \ + store_tagged_dword((void *) glob_regs, \ + greg_vals[greg_no], greg_tag); \ + glob_regs += 4; \ + } \ +}) + +/* ptrace related guys: we do not use them on switching. */ +# define GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ +({ \ + if (likely(IS_HV_GM())) \ + NATIVE_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase); \ + else \ + KVM_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase); \ +}) + +# define SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ +({ \ + if (likely(IS_HV_GM())) \ + NATIVE_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user); \ + else \ + KVM_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user); \ +}) + +/* Save stack registers on guest kernel mode */ +#define KVM_SAVE_STACK_REGS(regs, ti, from_ti, trap) \ +do { \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_STACK_REGS(regs, ti, from_ti, trap); \ + } else if (!(regs)->stack_regs_saved) { \ + PREFIX_SAVE_STACK_REGS(KVM, regs, ti, from_ti, trap); \ + } else { \ + /* registers were already saved */ \ + ; \ + } \ +} while (false) + +/* Save hardware stack registers on guest kernel mode */ +#define KVM_SAVE_HW_STACKS_AT_TI(ti) \ +do { \ + struct hw_stacks *stacks = &(ti)->tmp_user_stacks; \ + \ + stacks->psp_lo = KVM_READ_PSP_LO_REG(); \ + stacks->psp_hi = KVM_READ_PSP_HI_REG(); \ + stacks->pshtp = KVM_READ_PSHTP_REG(); \ + stacks->pcsp_lo = KVM_READ_PCSP_LO_REG(); \ + stacks->pcsp_hi = KVM_READ_PCSP_HI_REG(); \ + stacks->pcshtp = KVM_READ_PCSHTP_REG_SVALUE(); \ +} while (0) + +#define KVM_DO_RESTORE_HS_REGS(regs, updated) \ +({ \ + PREFIX_RESTORE_HS_REGS(KVM, regs); \ + UPDATE_CPU_REGS_FLAGS(updated, HS_REGS_UPDATED_CPU_REGS); \ + PUT_UPDATED_CPU_REGS_FLAGS(updated); \ +}) +#define KVM_DO_RESTORE_USER_STACK_REGS(regs, in_syscall, updated) \ +({ \ + PREFIX_RESTORE_USER_STACK_REGS(KVM, regs, in_syscall); \ + UPDATE_CPU_REGS_FLAGS(updated, USD_UPDATED_CPU_REGS); \ + UPDATE_CPU_REGS_FLAGS(updated, CRS_UPDATED_CPU_REGS); \ + PUT_UPDATED_CPU_REGS_FLAGS(updated); \ +}) + +/* it is paravirtualized guest or native guest kernel */ +#define UPDATE_CPU_REGS_FLAGS(__updated, flags) \ + ((__updated) |= (flags)) +#define KVM_RESTORE_HS_REGS(regs) \ +({ \ + u64 updated = 0; \ + \ + KVM_DO_RESTORE_HS_REGS(regs, updated); \ +}) +#define KVM_RESTORE_USER_STACK_REGS(regs, in_syscall) \ +({ \ + u64 updated = 0; \ + \ + KVM_DO_RESTORE_USER_STACK_REGS(regs, in_syscall, updated); \ +}) +#define KVM_RESTORE_USER_TRAP_STACK_REGS(regs) \ + KVM_RESTORE_USER_STACK_REGS(regs, false) +#define KVM_RESTORE_USER_SYSCALL_STACK_REGS(regs) \ + KVM_RESTORE_USER_STACK_REGS(regs, true) +#define KVM_RESTORE_USER_CUT_REGS(ti, regs) /* CUTD is set by host */ + +#define KVM_RESTORE_COMMON_REGS(regs) /* should be restored by host */ + +#define KVM_SAVE_TRAP_CELLAR(regs, trap) \ +({ \ + kernel_trap_cellar_t *kernel_tcellar = \ + (kernel_trap_cellar_t *)KERNEL_TRAP_CELLAR; \ + kernel_trap_cellar_ext_t *kernel_tcellar_ext = \ + (kernel_trap_cellar_ext_t *) \ + ((void *) KERNEL_TRAP_CELLAR + TC_EXT_OFFSET); \ + trap_cellar_t *tcellar = (trap)->tcellar; \ + int cnt, cs_req_num = 0, cs_a4 = 0, max_cnt; \ + u64 kstack_pf_addr = 0; \ + bool end_flag = false, is_qp; \ + \ + max_cnt = KVM_READ_MMU_TRAP_COUNT(); \ + if (max_cnt < 3) { \ + max_cnt = 3 * HW_TC_SIZE; \ + end_flag = true; \ + } \ + (trap)->curr_cnt = -1; \ + (trap)->ignore_user_tc = 0; \ + (trap)->tc_called = 0; \ + (trap)->is_intc = false; \ + (trap)->from_sigreturn = 0; \ + CLEAR_CLW_REQUEST_COUNT(regs); \ + BUG_ON(max_cnt > 3 * HW_TC_SIZE); \ + for (cnt = 0; 3 * cnt < max_cnt; cnt++) { \ + tc_opcode_t opcode; \ + tc_cond_t condition; \ + \ + if (end_flag) \ + if (AW(kernel_tcellar[cnt].condition) == -1) \ + break; \ + \ + tcellar[cnt].address = kernel_tcellar[cnt].address; \ + condition = kernel_tcellar[cnt].condition; \ + tcellar[cnt].condition = condition; \ + AW(opcode) = AS(condition).opcode; \ + is_qp = (AS(opcode).fmt == LDST_QP_FMT || \ + cpu_has(CPU_FEAT_QPREG) && AS(condition).fmtc && \ + AS(opcode).fmt == LDST_QWORD_FMT); \ + if (AS(condition).clw) { \ + if (GET_CLW_REQUEST_COUNT(regs) == 0) { \ + SET_CLW_FIRST_REQUEST(regs, cnt); \ + } \ + INC_CLW_REQUEST_COUNT(regs); \ + } \ + if (is_qp) \ + tcellar[cnt].mask = kernel_tcellar_ext[cnt].mask; \ + if (AS(condition).store) { \ + e2k_addr_t kt = \ + (e2k_addr_t)&(kernel_tcellar[cnt].data); \ + e2k_addr_t t = \ + (e2k_addr_t)&(tcellar[cnt].data); \ + e2k_addr_t ktx = \ + (e2k_addr_t)&(kernel_tcellar_ext[cnt].data); \ + e2k_addr_t tx = \ + (e2k_addr_t)&(kernel_tcellar_ext[cnt].data); \ + kvm_move_tagged_dword(kt, t); \ + if (is_qp) { \ + kvm_move_tagged_dword(ktx, tx); \ + } \ + } else if (AS(condition).s_f && AS(condition).sru) { \ + if (cs_req_num == 0) \ + cs_a4 = tcellar[cnt].address & (1 << 4); \ + cs_req_num++; \ + } \ + tcellar[cnt].flags = 0; \ + } \ + (trap)->tc_count = cnt * 3; \ + kstack_pf_addr; \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel */ + +#define INIT_G_REGS() KVM_INIT_G_REGS() +#define BOOT_INIT_G_REGS() BOOT_KVM_INIT_G_REGS() + +#define SAVE_HW_STACKS_AT_TI(ti) KVM_SAVE_HW_STACKS_AT_TI(ti) +#define SAVE_STACK_REGS(regs, ti, from_ti, trap) \ + KVM_SAVE_STACK_REGS(regs, ti, from_ti, trap) +#define RESTORE_HS_REGS(regs) \ + KVM_RESTORE_HS_REGS(regs) +#define RESTORE_USER_STACK_REGS(regs, in_syscall) \ + KVM_RESTORE_USER_STACK_REGS(regs, in_syscall) +#define RESTORE_USER_TRAP_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, false) +#define RESTORE_USER_SYSCALL_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, true) +#define RESTORE_USER_CUT_REGS(ti, regs, in_sys_call) \ + KVM_RESTORE_USER_CUT_REGS(ti, regs) +#define RESTORE_COMMON_REGS(regs) \ + KVM_RESTORE_COMMON_REGS(regs) + +static inline void +save_glob_regs_v2(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_save_glob_regs_v2(gregs); + } else { + kvm_save_glob_regs(gregs); + } +} + +static inline void +save_glob_regs_v5(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_save_glob_regs_v5(gregs); + } else { + kvm_save_glob_regs(gregs); + } +} + +static inline void +save_glob_regs_dirty_bgr_v2(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_save_glob_regs_dirty_bgr_v2(gregs); + } else { + kvm_save_glob_regs_dirty_bgr(gregs); + } +} + +static inline void +save_glob_regs_dirty_bgr_v5(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + kvm_guest_save_gregs_dirty_bgr_v5(gregs); + } else { + kvm_save_glob_regs_dirty_bgr(gregs); + } +} + +static inline void +save_local_glob_regs_v2(local_gregs_t *l_gregs, bool is_signal) +{ + if (IS_HV_GM()) { + guest_save_local_glob_regs_v2(l_gregs, is_signal); + } else { + kvm_save_local_glob_regs(l_gregs, is_signal); + } +} + +static inline void +save_local_glob_regs_v5(local_gregs_t *l_gregs, bool is_signal) +{ + if (IS_HV_GM()) { + guest_save_local_glob_regs_v5(l_gregs, is_signal); + } else { + kvm_save_local_glob_regs(l_gregs, is_signal); + } +} + +static inline void +restore_glob_regs_v2(const global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_restore_glob_regs_v2(gregs); + } else { + kvm_restore_glob_regs(gregs); + } +} + +static inline void +restore_glob_regs_v5(const global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_restore_glob_regs_v5(gregs); + } else { + kvm_restore_glob_regs(gregs); + } +} + +static inline void +restore_local_glob_regs_v2(const local_gregs_t *l_gregs, bool is_signal) +{ + if (IS_HV_GM()) + guest_restore_local_glob_regs_v2(l_gregs, is_signal); + else + kvm_restore_local_glob_regs(l_gregs, is_signal); +} + +static inline void +restore_local_glob_regs_v5(const local_gregs_t *l_gregs, bool is_signal) +{ + if (IS_HV_GM()) + guest_restore_local_glob_regs_v5(l_gregs, is_signal); + else + kvm_restore_local_glob_regs(l_gregs, is_signal); +} + +static inline void +save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + machine.save_local_gregs(l_gregs, is_signal); +} +static inline void +restore_local_glob_regs(const local_gregs_t *l_gregs, bool is_signal) +{ + machine.restore_local_gregs(l_gregs, is_signal); +} + +static inline void +get_all_user_glob_regs(global_regs_t *gregs) +{ + if (IS_HV_GM()) + guest_get_all_user_glob_regs(gregs); + else + kvm_get_all_user_glob_regs(gregs); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ +#endif /* _E2K_KVM_GUEST_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/kvm/guest/secondary_space.h b/arch/e2k/include/asm/kvm/guest/secondary_space.h new file mode 100644 index 000000000000..e85b49790d65 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/secondary_space.h @@ -0,0 +1,21 @@ +/* + * Secondary space support for E2K binary compiler + * Guest kernel support + */ +#ifndef _ASM_KVM_GUEST_SECONDARY_SPACE_H +#define _ASM_KVM_GUEST_SECONDARY_SPACE_H + +/* do not include the header directly, use asm/secondary_space.h include */ + +#define KVM_IS_NEXT_ELBRUS_2S true +#define KVM_SS_ADDR_START 0x180000000000L +#define KVM_SS_SIZE 0x040000000000UL + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#define IS_NEXT_ELBRUS_2S KVM_IS_NEXT_ELBRUS_2S +#define SS_SIZE KVM_SS_SIZE +#define SS_ADDR_START KVM_SS_ADDR_START +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_KVM_GUEST_SECONDARY_SPACE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/setup.h b/arch/e2k/include/asm/kvm/guest/setup.h new file mode 100644 index 000000000000..8b657231603f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/setup.h @@ -0,0 +1,39 @@ +#ifndef _ASM_KVM_GUEST_MACHDEP_H_ +#define _ASM_KVM_GUEST_MACHDEP_H_ + +#include +#include + +#ifdef CONFIG_VIRTUALIZATION + +extern void __init boot_e2k_virt_setup_arch(void); +extern void __init e2k_virt_setup_machine(void); +extern void __init kvm_bsp_switch_to_init_stack(void); +extern void kvm_setup_bsp_idle_task(int cpu); +extern void setup_guest_interface(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void arch_setup_machine(void) +{ + native_setup_machine(); + if (IS_HV_GM()) { + setup_guest_interface(); + return; + } + e2k_virt_setup_machine(); +} + +static inline void bsp_switch_to_init_stack(void) +{ + kvm_bsp_switch_to_init_stack(); +} + +static inline void setup_bsp_idle_task(int cpu) +{ + kvm_setup_bsp_idle_task(cpu); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ +#endif /* _ASM_KVM_GUEST_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/sge.h b/arch/e2k/include/asm/kvm/guest/sge.h new file mode 100644 index 000000000000..4d124aa065ae --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/sge.h @@ -0,0 +1,75 @@ +#ifndef _E2K_ASM_KVM_GUEST_SGE_H +#define _E2K_ASM_KVM_GUEST_SGE_H + +#ifdef __KERNEL__ + +#include + +#include +#include +#include + +#undef DEBUG_GUEST_SGE_MODE +#undef DebugGSGE +#define DEBUG_GUEST_SGE_MODE 0 /* stack guard debugging */ +#define DebugGSGE(fmt, args...) \ +({ \ + if (DEBUG_GUEST_SGE_MODE) \ + pr_info(fmt, ##args); \ +}) + +#define printk printk_fixed_args +#define panic panic_fixed_args +static inline void +kvm_do_switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + int ret; + + ret = HYPERVISOR_switch_to_expanded_guest_proc_stack(delta_size, + delta_offset, decr_k_ps); + if (ret) { + panic("kvm_do_switch_to_expanded_proc_stack() host could not " + "switch to updated stack, error %d\n", + ret); + } +} +static inline void +kvm_do_switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + int ret; + + ret = HYPERVISOR_switch_to_expanded_guest_chain_stack(delta_size, + delta_offset, decr_k_pcs); + if (ret) { + panic("kvm_do_switch_to_expanded_chain_stack() host could not " + "switch to updated stack, error %d\n", + ret); + } +} + +#undef printk +#undef panic + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is native guest kernel (without paravirtualization) */ + +static inline void +switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + kvm_do_switch_to_expanded_proc_stack(delta_size, delta_offset, + decr_k_ps); +} +static inline void +switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + kvm_do_switch_to_expanded_chain_stack(delta_size, delta_offset, + decr_k_pcs); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_ASM_KVM_GUEST_SGE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/signal.h b/arch/e2k/include/asm/kvm/guest/signal.h new file mode 100644 index 000000000000..adcdba5814e9 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/signal.h @@ -0,0 +1,34 @@ +#ifndef _E2K_KVM_GUEST_SIGNAL_H_ +#define _E2K_KVM_GUEST_SIGNAL_H_ + +#include + +#ifndef __ASSEMBLY__ + +extern int kvm_signal_setup(struct pt_regs *regs); +extern int kvm_complete_long_jump(struct pt_regs *regs); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native paravirtualized guest kernel */ +/* or native kernel without virtualization support */ + +static inline int signal_setup(struct pt_regs *regs) +{ + return kvm_signal_setup(regs); +} + +static inline int complete_long_jump(struct pt_regs *regs) +{ + if (likely(IS_HV_GM())) { + return native_complete_long_jump(regs); + } else { + return kvm_complete_long_jump(regs); + } +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + + +#endif /* !__ASSEMBLY__ */ + +#endif /* !_E2K_KVM_GUEST_SIGNAL_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/smp.h b/arch/e2k/include/asm/kvm/guest/smp.h new file mode 100644 index 000000000000..9e6b9fdc1fbd --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/smp.h @@ -0,0 +1,84 @@ +#ifndef __ASM_KVM_GUEST_SMP_H +#define __ASM_KVM_GUEST_SMP_H + +#include + +extern void kvm_ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, + int cpu); +extern void kvm_setup_secondary_task(int cpu); + +extern void kvm_wait_for_cpu_booting(void); +extern void kvm_wait_for_cpu_wake_up(void); +extern int kvm_activate_cpu(int cpu_id); +extern int kvm_activate_all_cpus(void); + +extern void kvm_csd_lock_wait(call_single_data_t *data); +extern void kvm_csd_lock(call_single_data_t *data); +extern void kvm_arch_csd_lock_async(call_single_data_t *data); +extern void kvm_csd_unlock(call_single_data_t *data); + +extern void kvm_setup_pic_virq(unsigned int cpuid); +extern void kvm_startup_pic_virq(unsigned int cpuid); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +static inline void +ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, int cpu) +{ + kvm_ap_switch_to_init_stack(stack_base, cpuid, cpu); +} +static inline void setup_secondary_task(int cpu) +{ + kvm_setup_secondary_task(cpu); +} +static inline void +wait_for_cpu_booting(void) +{ + kvm_wait_for_cpu_booting(); +} +static inline void +wait_for_cpu_wake_up(void) +{ + kvm_wait_for_cpu_wake_up(); +} +static inline int +activate_cpu(int cpu_id) +{ + return kvm_activate_cpu(cpu_id); +} +static inline int +activate_all_cpus(void) +{ + return kvm_activate_all_cpus(); +} + +static inline void csd_lock_wait(call_single_data_t *data) +{ + kvm_csd_lock_wait(data); +} +static inline void csd_lock(call_single_data_t *data) +{ + kvm_csd_lock(data); +} +static inline void arch_csd_lock_async(call_single_data_t *data) +{ + kvm_arch_csd_lock_async(data); +} +static inline void csd_unlock(call_single_data_t *data) +{ + kvm_csd_unlock(data); +} + +static inline void +setup_local_pic_virq(unsigned int cpuid) +{ + kvm_setup_pic_virq(cpuid); +} +static inline void +startup_local_pic_virq(unsigned int cpuid) +{ + kvm_startup_pic_virq(cpuid); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_KVM_GUEST_SMP_H */ diff --git a/arch/e2k/include/asm/kvm/guest/spinlock.h b/arch/e2k/include/asm/kvm/guest/spinlock.h new file mode 100644 index 000000000000..58a4601e4ebc --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/spinlock.h @@ -0,0 +1,75 @@ +#ifndef __ASM_KVM_GUEST_SPINLOCK_H +#define __ASM_KVM_GUEST_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() fast and slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include + +extern void kvm_arch_spin_lock_slow(void *lock); +extern void kvm_wait_read_lock_slow(arch_rwlock_t *rw); +extern void kvm_wait_write_lock_slow(arch_rwlock_t *rw); +extern void kvm_arch_spin_locked_slow(void *lock); +extern void kvm_arch_read_locked_slow(arch_rwlock_t *rw); +extern void kvm_arch_write_locked_slow(arch_rwlock_t *rw); +extern void kvm_arch_spin_unlock_slow(void *lock); +extern void kvm_arch_read_unlock_slow(arch_rwlock_t *lock); +extern void kvm_arch_write_unlock_slow(arch_rwlock_t *lock); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ + +#define arch_spin_relax(lock) kvm_cpu_relax() +#define arch_read_relax(lock) kvm_cpu_relax() +#define arch_write_relax(lock) kvm_cpu_relax() + +static inline void +ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + kvm_wait_read_lock_slow(rw); +} +static inline void +ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + kvm_wait_write_lock_slow(rw); +} +static inline void +ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + kvm_arch_read_locked_slow(rw); +} +static inline void +ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + kvm_arch_write_locked_slow(rw); +} +static inline void +ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + kvm_arch_read_unlock_slow(rw); +} +static inline void +ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + kvm_arch_write_unlock_slow(rw); +} + +static inline void arch_spin_lock_slow(arch_spinlock_t *lock) +{ + kvm_arch_spin_lock_slow(lock); +} +static inline void arch_spin_locked_slow(arch_spinlock_t *lock) +{ + kvm_arch_spin_locked_slow(lock); +} +static inline void arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + kvm_arch_spin_unlock_slow(lock); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_KVM_GUEST_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/guest/stacks.h b/arch/e2k/include/asm/kvm/guest/stacks.h new file mode 100644 index 000000000000..e4a516fc5850 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/stacks.h @@ -0,0 +1,33 @@ +/* + * KVM guest stacks support + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_STACKS_H +#define _E2K_KVM_GUEST_STACKS_H + +#include +#include + +/* + * Guest kernel thread stacks descriptions + */ +#define KVM_GUEST_KERNEL_C_STACK_SIZE KERNEL_C_STACK_SIZE /* as on host */ +#define KVM_GUEST_KERNEL_PS_SIZE (16 * PAGE_SIZE) /* 64 KBytes */ +#define KVM_GUEST_KERNEL_PS_INIT_SIZE (1 * PAGE_SIZE) /* 4 KBytes */ +#define KVM_GUEST_KERNEL_PCS_SIZE (2 * PAGE_SIZE) /* 8 KBytes */ +#define KVM_GUEST_KERNEL_PCS_INIT_SIZE (1 * PAGE_SIZE) /* 4 KBytes */ + +/* + * Guest user task stacks descriptions + */ +#define KVM_GUEST_USER_DATA_STACK_SIZE \ + DEFAULT_USER_DATA_STACK_SIZE /* as on host */ +#define KVM_GUEST_USER_PS_MAX_SIZE USER_P_STACK_SIZE /* as on host */ +#define KVM_GUEST_USER_PS_INIT_SIZE USER_P_STACK_INIT_SIZE /* as on host */ +#define KVM_GUEST_USER_PS_PRESENT_SIZE USER_P_STACK_PRESENT_SIZE /* --''-- */ +#define KVM_GUEST_USER_PCS_MAX_SIZE USER_PC_STACK_SIZE /* as on host */ +#define KVM_GUEST_USER_PCS_INIT_SIZE USER_PC_STACK_INIT_SIZE /* as on host */ +#define KVM_GUEST_USER_PCS_PRESENT_SIZE USER_PC_STACK_PRESENT_SIZE /* --''-- */ + +#endif /* ! _E2K_KVM_GUEST_STACKS_H */ diff --git a/arch/e2k/include/asm/kvm/guest/string.h b/arch/e2k/include/asm/kvm/guest/string.h new file mode 100644 index 000000000000..b0049e442f0d --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/string.h @@ -0,0 +1,213 @@ +#ifndef _E2K_KVM_GUEST_STRING_H_ +#define _E2K_KVM_GUEST_STRING_H_ + +#include +#include + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ +static inline unsigned long +kvm_do_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + long ret; + + do { + ret = HYPERVISOR_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } while (ret == -EAGAIN); + + return ret; +} +static inline unsigned long +kvm_do_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + long ret; + + if (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)addr)) { + ret = HYPERVISOR_fast_tagged_guest_memory_set(addr, val, tag, + len, strd_opcode); + } else { + ret = HYPERVISOR_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + } + return ret; +} + +/* + * Extract tags from 32 bytes of data + * FIXME: need improve function to extract tags from any size of data + */ +static inline unsigned long +kvm_do_extract_tags_32(u16 *dst, const void *src) +{ + return HYPERVISOR_extract_tags_32(dst, src); +} + +#define DEBUG_GUEST_STRINGS + +#ifndef DEBUG_GUEST_STRINGS +static inline unsigned long +kvm_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + if (likely(IS_HV_GM())) + return native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + else + return kvm_do_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +kvm_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + if (likely(IS_HV_GM())) + return native_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + else + return kvm_do_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); +} + +static inline unsigned long +kvm_extract_tags_32(u16 *dst, const void *src) +{ + if (likely(IS_HV_GM())) + return native_extract_tags_32(dst, src); + else + return kvm_do_extract_tags_32(dst, src); +} +#else /* DEBUG_GUEST_STRINGS */ +extern unsigned long kvm_fast_tagged_memory_copy(void *dst, const void *src, + size_t len, + unsigned long strd_opcode, + unsigned long ldrd_opcode, + int prefetch); +extern unsigned long kvm_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode); +extern unsigned long boot_kvm_fast_tagged_memory_copy(void *dst, + const void *src, size_t len, + unsigned long strd_opcode, + unsigned long ldrd_opcode, + int prefetch); +extern unsigned long boot_kvm_fast_tagged_memory_set(void *addr, u64 val, + u64 tag, size_t len, u64 strd_opcode); + +extern unsigned long kvm_extract_tags_32(u16 *dst, const void *src); +#endif /* ! DEBUG_GUEST_STRINGS */ + +static inline int +kvm_fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* guest kernel does not support any nested guests */ + return kvm_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +kvm_fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* guest kernel does not support any nested guests */ + return kvm_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline void kvm_tagged_memcpy_8(void *dst, const void *src, size_t n) +{ + E2K_PREFETCH_L1_SPEC(src); + + __tagged_memcpy_8(dst, src, n); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +/** + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ + +#define tagged_memcpy_8(dst, src, n) \ +({ \ + if (likely(IS_HV_GM())) \ + native_tagged_memcpy_8(dst, src, n, \ + __alignof(*(dst)), __alignof(*(src))); \ + else \ + kvm_tagged_memcpy_8(dst, src, n); \ +}) + +static inline unsigned long +fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_fast_tagged_memory_copy(dst, src, len, strd_opcode, + ldrd_opcode, prefetch); +} +static inline unsigned long +boot_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return boot_kvm_fast_tagged_memory_copy(dst, src, len, strd_opcode, + ldrd_opcode, prefetch); +} +static inline unsigned long +fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return kvm_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +static inline void +boot_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + boot_kvm_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} + +static inline unsigned long +extract_tags_32(u16 *dst, const void *src) +{ + return kvm_extract_tags_32(dst, src); +} + +static inline int +fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_fast_tagged_memory_copy_to_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_fast_tagged_memory_copy_from_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_STRING_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/switch.h b/arch/e2k/include/asm/kvm/guest/switch.h new file mode 100644 index 000000000000..852c3caba2e9 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/switch.h @@ -0,0 +1,158 @@ +#ifndef _E2K_KVM_GUEST_SWITCH_H +#define _E2K_KVM_GUEST_SWITCH_H + +#include + +static inline void kvm_guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + /* guest cannot have own nested guests */ +} + +static inline void kvm_guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + /* guest cannot have own nested guests */ +} +static inline void +kvm_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + /* guest cannot have own nested guests */ + if (likely(!IS_HV_GM())) + /* there is not hardware virtualization support */ + /* so native trap handler cannot be used */ + return; + if (!(flags & EXIT_FROM_TRAP_SWITCH)) + return; + /* restore global registers used as native kernel too */ + native_trap_guest_enter(ti, regs, flags); +} +static inline void +kvm_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + /* guest cannot have own nested guests */ + if (likely(!IS_HV_GM())) + /* there is not hardware virtualization support */ + /* so native trap handler cannot be used */ + return; + native_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +kvm_guest_trap_pending(struct thread_info *ti) +{ + /* nothing guest can be */ + return false; +} + +static inline bool +kvm_trap_from_guest_user(struct thread_info *ti) +{ + return native_trap_from_guest_user(ti); +} + +static inline bool +kvm_syscall_from_guest_user(struct thread_info *ti) +{ + return native_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +kvm_trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return native_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +kvm_syscall_guest_get_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + return native_syscall_guest_get_restore_stacks(regs); +} + +/* + * The function should return bool is the system call from guest + */ +static inline bool kvm_guest_syscall_enter(struct pt_regs *regs) +{ + /* guest cannot have own nested guests */ + + return false; /* it is not nested guest system call */ +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravrtualized) */ + +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + kvm_guest_enter(ti, vcpu, flags); +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + kvm_guest_exit(ti, vcpu, flags); +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags) +{ + kvm_trap_guest_enter(ti, regs, flags); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + kvm_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +guest_trap_pending(struct thread_info *ti) +{ + return kvm_guest_trap_pending(ti); +} + +static inline bool +guest_trap_from_user(struct thread_info *ti) +{ + return kvm_trap_from_guest_user(ti); +} + +static inline bool +guest_syscall_from_user(struct thread_info *ti) +{ + return kvm_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +syscall_guest_get_restore_stacks(bool ts_host_at_vcpu_mode, struct pt_regs *regs) +{ + return kvm_syscall_guest_get_restore_stacks( + current_thread_info(), regs); +} + +#define ts_host_at_vcpu_mode() false + +/* + * The function should return bool is the system call from guest + */ +static inline bool guest_syscall_enter(struct pt_regs *regs, + bool ts_host_at_vcpu_mode) +{ + return kvm_guest_syscall_enter(regs); +} + +static inline void guest_exit_intc(struct pt_regs *regs, + bool intc_emul_flag) { } +static inline void guest_syscall_exit_trap(struct pt_regs *regs, + bool ts_host_at_vcpu_mode) { } + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_SWITCH_H */ diff --git a/arch/e2k/include/asm/kvm/guest/switch_to.h b/arch/e2k/include/asm/kvm/guest/switch_to.h new file mode 100644 index 000000000000..22cfc8339b9d --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/switch_to.h @@ -0,0 +1,217 @@ +#ifndef _ASM_KVM_GUEST_SWITCH_TO_H +#define _ASM_KVM_GUEST_SWITCH_TO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#undef DEBUG_KVM_SWITCH_MODE +#undef DebugKVMSW +#define DEBUG_KVM_SWITCH_MODE 0 /* KVM switching debugging */ +#define DebugKVMSW(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define KVM_UPSR_NMI_SAVE_AND_CLI(flags) \ +({ \ + flags = KVM_READ_UPSR_REG_VALUE(); \ + KVM_WRITE_UPSR_REG_VALUE(flags & ~UPSR_NMIE); \ +}) +#define KVM_RESTORE_USER_UPSR(user_upsr) \ +({ \ + KVM_WRITE_UPSR_REG_VALUE(user_upsr); \ +}) + +static inline struct task_struct * +kvm_ret_from_fork_get_prev_task(struct task_struct *prev) +{ + prev = current->thread.sw_regs.prev_task; + BUG_ON(prev == NULL); + return prev; +} + +static inline void +KVM_SAVE_TASK_STACKS_REGS_TO_SWITCH(struct task_struct *task, int save_ip) +{ + struct sw_regs *sw_regs = &task->thread.sw_regs; + unsigned long usd_lo; + unsigned long usd_hi; + + ATOMIC_DO_SAVE_ALL_STACKS_REGS(sw_regs, + &sw_regs->crs.cr1_hi, usd_lo, usd_hi); + + sw_regs->usd_lo.USD_lo_half = usd_lo; + sw_regs->usd_hi.USD_hi_half = usd_hi; + sw_regs->top = NATIVE_NV_READ_SBR_REG_VALUE(); + sw_regs->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + if (likely(save_ip)) { + sw_regs->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + sw_regs->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + } +} +static inline void +KVM_SAVE_TASK_REGS_TO_SWITCH(struct task_struct *task, int save_ip) +{ + + /* Save interrupt mask state and disable NMIs */ + KVM_UPSR_NMI_SAVE_AND_CLI(AW(task->thread.sw_regs.upsr)); + + KVM_SAVE_TASK_STACKS_REGS_TO_SWITCH(task, save_ip); + + /* global registers and user registers */ + /* will be saved by host kernel while real switch */ + +} + +/* + * now lcc has problem with structure on registers + * (It move these structures is stack memory) + */ +static inline void +KVM_RESTORE_TASK_STACKS_REGS_TO_SWITCH(struct task_struct *task, int restore_ip) +{ + u64 sbr = task->thread.sw_regs.top; + u64 usd_lo = AS_WORD(task->thread.sw_regs.usd_lo); + u64 usd_hi = AS_WORD(task->thread.sw_regs.usd_hi); + u64 psp_lo = AS_WORD(task->thread.sw_regs.psp_lo); + u64 psp_hi = AS_WORD(task->thread.sw_regs.psp_hi); + u64 pcsp_lo = AS_WORD(task->thread.sw_regs.pcsp_lo); + u64 pcsp_hi = AS_WORD(task->thread.sw_regs.pcsp_hi); + u64 cr_wd = AS_WORD(task->thread.sw_regs.crs.cr1_lo); + u64 cr_ussz = AS_WORD(task->thread.sw_regs.crs.cr1_hi); + + KVM_FLUSHCPU; + + KVM_WRITE_USBR_USD_REG_VALUE(sbr, usd_hi, usd_lo); + KVM_WRITE_PSP_REG_VALUE(psp_hi, psp_lo); + KVM_WRITE_PCSP_REG_VALUE(pcsp_hi, pcsp_lo); + + KVM_WRITE_CR1_LO_REG_VALUE(cr_wd); + KVM_WRITE_CR1_HI_REG_VALUE(cr_ussz); + if (unlikely(restore_ip)) { + KVM_WRITE_CR0_LO_REG_VALUE(AW(task->thread.sw_regs.crs.cr0_lo)); + KVM_WRITE_CR0_HI_REG_VALUE(AW(task->thread.sw_regs.crs.cr0_hi)); + } +} +static inline void +KVM_RESTORE_TASK_REGS_TO_SWITCH(struct task_struct *task, int restore_ip) +{ + KVM_RESTORE_TASK_STACKS_REGS_TO_SWITCH(task, restore_ip); + + /* global registers and user registers */ + /* will be restored by host kernel while real switch */ + + /* Enable interrupt */ + KVM_RESTORE_USER_UPSR(task->thread.sw_regs.upsr.UPSR_reg); +} + +static __always_inline struct task_struct * +kvm_do_switch_to(struct task_struct *prev, struct task_struct *next) +{ + thread_info_t *next_ti = task_thread_info(next); + struct sw_regs *sw_regs; + e2k_size_t ps_size; + e2k_size_t ps_ind; + e2k_size_t pcs_size; + e2k_size_t pcs_ind; + + DebugKVMSW("started on VCPU #%d to switch %s(%d/%d) parent %s (%d) " + "-> %s(%d/%d) parent %s (%d)\n", + smp_processor_id(), prev->comm, prev->pid, + task_thread_info(prev)->gpid_nr, + prev->real_parent->comm, prev->real_parent->pid, + next->comm, next->pid, task_thread_info(next)->gpid_nr, + next->real_parent->comm, next->real_parent->pid); + + /* Save interrupt mask state and disable NMIs */ + UPSR_ALL_SAVE_AND_CLI(AW(prev->thread.sw_regs.upsr)); + + ATOMIC_GET_HW_STACK_SIZES(ps_ind, ps_size, pcs_ind, pcs_size); + DebugKVMSW("prev task PS ind 0x%lx size 0x%lx\n", + ps_ind, ps_size); + DebugKVMSW("prev task PCS ind 0x%lx size 0x%lx\n", + pcs_ind, pcs_size); + if (ps_ind + MAX_SRF_SIZE >= ps_size || + pcs_ind + SZ_OF_CR >= pcs_size) { + /* + * Hardware stack(s) overflow and need expand stack(s) + * before switching to new process to avoid trap in + * hypercall while switch will be in progress. + * Provoke to trap now to handle stack bounds exception + */ + KVM_COPY_STACKS_TO_MEMORY(); + DebugKVMSW("copy stacks to memory to trap on bounds\n"); + } + KVM_SAVE_TASK_REGS_TO_SWITCH(prev, 1); + sw_regs = &prev->thread.sw_regs; + DebugKVMSW("prev task regs saved: PS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->psp_lo.PSP_lo_base, + sw_regs->psp_hi.PSP_hi_ind, sw_regs->psp_hi.PSP_hi_size); + DebugKVMSW("prev task regs saved: PCS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->pcsp_lo.PCSP_lo_base, + sw_regs->pcsp_hi.PCSP_hi_ind, sw_regs->pcsp_hi.PCSP_hi_size); + sw_regs = &next->thread.sw_regs; + DebugKVMSW("next task regs saved: PS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->psp_lo.PSP_lo_base, + sw_regs->psp_hi.PSP_hi_ind, sw_regs->psp_hi.PSP_hi_size); + DebugKVMSW("next task regs saved: PCS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->pcsp_lo.PCSP_lo_base, + sw_regs->pcsp_hi.PCSP_hi_ind, sw_regs->pcsp_hi.PCSP_hi_size); + + set_current_thread_info(next_ti, next); + + KVM_RESTORE_TASK_REGS_TO_SWITCH(next, 0); + + /* remember previous task to restore after real switch */ + sw_regs->prev_task = prev; + + /* real switch guest kernel stacks can be done only by hypervisor */ + DebugKVMSW("will start hypercall to switch real guest thread stacks\n"); + HYPERVISOR_switch_guest_thread_stacks( + next_ti->gpid_nr, next_ti->gmmid_nr); + + /* reload locals after hardware and local data stack switch */ + /* now its state contain previous switch from the current */ + next = current; + prev = current->thread.sw_regs.prev_task; + + /* Restore interrupt mask and enable NMIs */ + UPSR_RESTORE(AW(current->thread.sw_regs.upsr)); + + /* return actualized structure of previous task */ + return prev; +} + +#define kvm_switch_to(prev, next, last) \ +do { \ + if (IS_HV_GM()) { \ + native_switch_to(prev, next, last); \ + } else { \ + last = kvm_do_switch_to(prev, next); \ + e2k_finish_switch(last); \ + } \ +} while (0) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +static inline struct task_struct * +ret_from_fork_get_prev_task(struct task_struct *prev) +{ + return kvm_ret_from_fork_get_prev_task(prev); +} + +/* switch_to() should be only macros to update pointer 'prev' at */ +/* __schedule() function */ +#define switch_to(prev, next, last) kvm_switch_to(prev, next, last) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_KVM_GUEST_SWITCH_TO_H */ diff --git a/arch/e2k/include/asm/kvm/guest/sync_pg_tables.h b/arch/e2k/include/asm/kvm/guest/sync_pg_tables.h new file mode 100644 index 000000000000..36f19323a125 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/sync_pg_tables.h @@ -0,0 +1,19 @@ +/* Functions to sync shadow page tables with guest page tables + * without flushing tlb. Used only by guest kernels + * + * Copyright 2021 Andrey Alekhin (alekhin_amcst.ru) + */ + +#ifndef _E2K_GST_SYNC_PG_TABLES_H +#define _E2K_GST_SYNC_PG_TABLES_H + +#include +#include + +static inline void kvm_sync_addr_range(e2k_addr_t start, e2k_addr_t end) +{ + if (!IS_HV_GM()) + HYPERVISOR_sync_addr_range(start, end); +} + +#endif diff --git a/arch/e2k/include/asm/kvm/guest/system.h b/arch/e2k/include/asm/kvm/guest/system.h new file mode 100644 index 000000000000..c6c069152b90 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/system.h @@ -0,0 +1,227 @@ +/* + * KVM guest processor and processes support + * + * Copyright (C) 2014 MCST + */ + +#ifndef _E2K_KVM_GUEST_SYSTEM_H_ +#define _E2K_KVM_GUEST_SYSTEM_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +/* + * Guest kernel case assumption is that the host emulates hardware updates + * of CPU registers state on trap or system call, in particular PSR state. + * Hardware disables interrupt masks and switch interrupts control to PSR, + * so host sets VCPU registers (copy into memory) in same state. + * Trap handler should switch interrupts control from PSR to UPSR + * previously it should set UPSR to initial state for kernel with disabled + * interrupts (so UPSR disable interrupts) + * If trap occurs on guest kernel, then interrupts should be enabled + * and control should be under UPSR. So do not restore control under PSR and + * restore only UPSR state. + * Guest kernel cannot use 'done' instruction and restore PSR state + * saved into CR1.lo register, it should be done by host. + */ + +#define KVM_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ +({ \ + e2k_upsr_t __upsr_val; \ + \ + __upsr_val = nmirq_dis ? E2K_KERNEL_INITIAL_UPSR_WITH_DISABLED_NMI \ + : \ + E2K_KERNEL_INITIAL_UPSR; \ + KVM_WRITE_UPSR_REG_VALUE(__upsr_val.UPSR_reg); \ +}) +#define BOOT_KVM_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ +({ \ + e2k_upsr_t __upsr_val; \ + \ + __upsr_val = nmirq_dis ? E2K_KERNEL_INITIAL_UPSR_WITH_DISABLED_NMI \ + : \ + E2K_KERNEL_INITIAL_UPSR; \ + BOOT_KVM_WRITE_UPSR_REG_VALUE(__upsr_val.UPSR_reg); \ +}) +#define KVM_INIT_USER_UPSR_REG() \ + KVM_WRITE_UPSR_REG_VALUE(E2K_USER_INITIAL_UPSR.UPSR_reg) +#define KVM_INIT_USER_PSR() \ + KVM_ATOMIC_WRITE_PSR_REG_VALUE(E2K_USER_INITIAL_PSR.PSR_reg, \ + false) /* IRQs under UPSR */ + +#define KVM_DO_SAVE_PSR_REG(psr_reg) \ + (psr_reg.PSR_reg = KVM_READ_PSR_REG_VALUE()) +#define KVM_DO_SAVE_UPSR_REG(upsr_reg) \ + (upsr_reg.UPSR_reg = KVM_READ_UPSR_REG_VALUE()) +#define KVM_DO_SAVE_PSR_UPSR_REGS(psr_reg, upsr_reg, under_upsr) \ +({ \ + KVM_DO_SAVE_PSR_REG((psr_reg)); \ + KVM_DO_SAVE_UPSR_REG((upsr_reg)); \ + under_upsr = kvm_get_vcpu_state()->irqs_under_upsr; \ +}) +#define KVM_DO_RESTORE_PSR_REG(psr_reg) \ + (KVM_WRITE_PSR_REG_VALUE(psr_reg.PSR_reg)) +#define KVM_DO_RESTORE_UPSR_REG(upsr_reg) \ + (KVM_WRITE_UPSR_REG_VALUE(upsr_reg.UPSR_reg)) + +#define KVM_SWITCH_IRQ_TO_UPSR(disable_sge) \ + KVM_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define BOOT_KVM_SWITCH_IRQ_TO_UPSR() \ + BOOT_KVM_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define KVM_DO_RETURN_IRQ_TO_PSR(under_upsr, disable_sge) \ + KVM_ATOMIC_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_DISABLED), under_upsr) + +#define KVM_RETURN_IRQ_TO_PSR(under_upsr) \ + KVM_DO_RETURN_IRQ_TO_PSR(under_upsr, false) + +#define KVM_SET_USER_INITIAL_UPSR(upsr) \ +({ \ + KVM_RETURN_IRQ_TO_PSR(false); \ + KVM_WRITE_UPSR_REG(upsr); \ +}) + +#define KVM_CHECK_IRQ_UNDER_PSR(psr_reg, under_upsr) \ +({ \ + if (psr_reg.PSR_ie || psr_reg.PSR_uie || !psr_reg.PSR_pm) { \ + pr_err("#U1 PSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, under_upsr); \ + psr_reg.PSR_ie = 0; \ + psr_reg.PSR_uie = 0; \ + psr_reg.PSR_pm = 1; \ + WARN_ON(true); \ + } \ + if (under_upsr) { \ + pr_err("#U2 PSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, under_upsr); \ + kvm_get_vcpu_state()->irqs_under_upsr = false; \ + WARN_ON(true); \ + } \ +}) +#define KVM_CHECK_IRQ_UNDER_UPSR(psr_reg, upsr_reg, under_upsr, has_irqs) \ +({ \ + if (psr_reg.PSR_ie || !psr_reg.PSR_pm || \ + !psr_reg.PSR_uie && under_upsr) { \ + pr_err("#K1 PSR 0x%x UPSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, upsr_reg.UPSR_reg, \ + under_upsr); \ + psr_reg.PSR_ie = 0; \ + psr_reg.PSR_pm = 1; \ + if (under_upsr) \ + psr_reg.PSR_uie = 1; \ + KVM_WRITE_PSR_REG_VALUE(psr_reg.PSR_reg); \ + WARN_ON(true); \ + } \ + if (psr_reg.PSR_uie && !under_upsr) { \ + E2K_LMS_HALT_OK; \ + pr_err("#K2 PSR 0x%x UPSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, upsr_reg.UPSR_reg, \ + under_upsr); \ + kvm_get_vcpu_state()->irqs_under_upsr = true; \ + WARN_ON(true); \ + } \ + if (!upsr_reg.UPSR_ie && under_upsr && has_irqs) { \ + pr_err("#K3 PSR 0x%x UPSR 0x%x under upsr %d " \ + "has IRQs %d\n", \ + psr_reg.PSR_reg, upsr_reg.UPSR_reg, \ + under_upsr, has_irqs); \ + upsr_reg.UPSR_ie = 1; \ + KVM_WRITE_UPSR_REG_VALUE(upsr_reg.UPSR_reg); \ + WARN_ON(true); \ + } \ +}) +#define KVM_CHECK_IRQ_STATE(psr_reg, upsr_reg, under_upsr, \ + has_irqs, user_mode) \ +do { \ + if (user_mode) { \ + KVM_CHECK_IRQ_UNDER_PSR(psr_reg, under_upsr); \ + } else { \ + KVM_CHECK_IRQ_UNDER_UPSR(psr_reg, upsr_reg, \ + under_upsr, has_irqs); \ + } \ +} while (false) + +#define KVM_RETURN_TO_KERNEL_UPSR(upsr_reg) \ +({ \ + if (IS_HV_GM()) { \ + e2k_cr1_lo_t cr1_lo; \ + unsigned psr; \ +\ + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); \ + psr = cr1_lo.CR1_lo_psr; \ + KVM_WRITE_SW_PSR_REG_VALUE(psr); \ + NATIVE_RETURN_IRQ_TO_PSR(); \ + } else { \ + KVM_RETURN_IRQ_TO_PSR(true); \ + } \ + KVM_DO_RESTORE_UPSR_REG(upsr_reg); \ +}) +#define KVM_RETURN_TO_INIT_USER_UPSR() \ +({ \ + KVM_INIT_USER_PSR(); \ + KVM_INIT_USER_UPSR_REG(); \ +}) +#define KVM_SWITCH_TO_KERNEL_UPSR(psr_reg, upsr_reg, under_upsr, \ + irq_en, nmirq_dis) \ +({ \ + KVM_DO_SAVE_PSR_UPSR_REGS(psr_reg, upsr_reg, under_upsr); \ + KVM_DO_SWITCH_TO_KERNEL_UPSR(irq_en, nmirq_dis); \ + kvm_get_vcpu_state()->irqs_under_upsr = true; \ +}) + +#define KVM_DO_SWITCH_TO_KERNEL_UPSR(irq_en, nmirq_dis) \ + PREFIX_DO_SWITCH_TO_KERNEL_UPSR(KVM, kvm, \ + irq_en, nmirq_dis) +#define KVM_RETURN_TO_USER_UPSR(upsr_reg, under_upsr) \ +({ \ + KVM_RETURN_IRQ_TO_PSR(under_upsr); \ + KVM_DO_RESTORE_UPSR_REG(upsr_reg); \ +}) +#define KVM_SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(KVM) +#define KVM_SET_KERNEL_UPSR(disable_sge) \ + PREFIX_SET_KERNEL_UPSR(KVM, disable_sge) +#define BOOT_KVM_SET_KERNEL_UPSR() \ + BOOT_PREFIX_SET_KERNEL_UPSR(KVM) + +#define kvm_psr_and_upsr_irqs_disabled() \ +({ \ + e2k_psr_t psr; \ + e2k_upsr_t upsr; \ + bool under_upsr; \ + \ + KVM_DO_SAVE_PSR_UPSR_REGS(psr, upsr, under_upsr); \ + psr_and_upsr_irqs_disabled_flags(psr.PSR_reg, upsr.UPSR_reg); \ +}) + +extern void *kvm_nested_kernel_return_address(int n); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized) */ + +#define INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + KVM_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) +#define SET_KERNEL_UPSR(disable_sge) \ + KVM_SET_KERNEL_UPSR(disable_sge) +#define BOOT_SET_KERNEL_UPSR() \ + BOOT_KVM_SET_KERNEL_UPSR() +#define SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + KVM_SET_KERNEL_UPSR_WITH_DISABLED_NMI() +#define RETURN_TO_KERNEL_UPSR(upsr_reg) \ + KVM_RETURN_TO_KERNEL_UPSR(upsr_reg) + +static inline void * +nested_kernel_return_address(int n) +{ + return kvm_nested_kernel_return_address(n); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_SYSTEM_H_ */ + + diff --git a/arch/e2k/include/asm/kvm/guest/time.h b/arch/e2k/include/asm/kvm/guest/time.h new file mode 100644 index 000000000000..50c6b00feadb --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/time.h @@ -0,0 +1,29 @@ +#ifndef __ASM_KVM_GUEST_TIME_H +#define __ASM_KVM_GUEST_TIME_H + +#ifdef __KERNEL__ +#include + +extern unsigned long kvm_get_wallclock(void); +extern int kvm_set_wallclock(unsigned long now); +extern void kvm_clock_init(void); + +#ifdef CONFIG_PARAVIRT +/* FIXME: this method has support on arch-independent code */ +/* so it should be main method to account steal time */ + +extern unsigned long kvm_steal_clock(int cpu); +#endif /* CONFIG_PARAVIRT */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +arch_clock_init(void) +{ + kvm_clock_init(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_TIME_H */ diff --git a/arch/e2k/include/asm/kvm/guest/timex.h b/arch/e2k/include/asm/kvm/guest/timex.h new file mode 100644 index 000000000000..e80287ed5ca1 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/timex.h @@ -0,0 +1,26 @@ +#ifndef __ASM_KVM_GUEST_TIMEX_H +#define __ASM_KVM_GUEST_TIMEX_H + +#ifdef __KERNEL__ +#include + +extern void kvm_time_init(void); +extern int kvm_read_current_timer(unsigned long *timer_val); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +time_init(void) +{ + kvm_time_init(); +} +static inline int +read_current_timer(unsigned long *timer_val) +{ + return kvm_read_current_timer(timer_val); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_TIMEX_H */ diff --git a/arch/e2k/include/asm/kvm/guest/tlb_regs_types.h b/arch/e2k/include/asm/kvm/guest/tlb_regs_types.h new file mode 100644 index 000000000000..30091237c7f4 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/tlb_regs_types.h @@ -0,0 +1,17 @@ +#ifndef __ASM_KVM_GUEST_TLB_REGS_TYPES_H +#define __ASM_KVM_GUEST_TLB_REGS_TYPES_H + +#ifdef __KERNEL__ + +#include + +#include +#include + +extern probe_entry_t kvm_mmu_entry_probe(e2k_addr_t virt_addr); +extern probe_entry_t kvm_mmu_address_probe(e2k_addr_t virt_addr); +extern mmu_reg_t kvm_read_dtlb_reg(e2k_addr_t virt_addr); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_TLB_REGS_TYPES_H */ diff --git a/arch/e2k/include/asm/kvm/guest/tlbflush.h b/arch/e2k/include/asm/kvm/guest/tlbflush.h new file mode 100644 index 000000000000..fc191e10d55e --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/tlbflush.h @@ -0,0 +1,183 @@ +/* + * Guest MMU caches flushing support on guest kernel + * + * Guest VM is fake virtual memory support. + * Guest kernel manage own VM as any linux kernel, but all it is fake, + * real used page tables, real physical pages, real flushing is made by host. + * + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ +#ifndef _E2K_KVM_GUEST_TLBFLUSH_H +#define _E2K_KVM_GUEST_TLBFLUSH_H + +#include +#include + +#ifdef CONFIG_KVM_GUEST_KERNEL +extern void kvm_pv_flush_tlb_all(void); +extern void kvm_pv_flush_tlb_mm(struct mm_struct *mm); +extern void kvm_pv_flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr); +extern void kvm_pv_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void kvm_pv_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end); +extern void kvm_pv_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void kvm_pv_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, + e2k_addr_t end); +#ifndef CONFIG_SMP +static inline void +kvm_flush_tlb_all(void) +{ + if (IS_HV_GM()) + __flush_tlb_all(); + else + kvm_pv_flush_tlb_all(); +} +static inline void +kvm_flush_tlb_mm(struct mm_struct *mm) +{ + if (IS_HV_GM()) + __flush_tlb_mm(mm); + else + kvm_pv_flush_tlb_mm(mm); +} +static inline void +kvm_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + if (IS_HV_GM()) + __flush_tlb_page(vma->vm_mm, addr); + else + kvm_pv_flush_tlb_page(vma->vm_mm, addr); +} +static inline void +kvm_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_tlb_range(mm, start, end); + else + kvm_pv_flush_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_tlb_all(); + else + kvm_pv_flush_tlb_kernel_range(start, end); +} +static inline void +kvm_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_pmd_tlb_range(mm, start, end); + else + kvm_pv_flush_pmd_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_tlb_range_and_pgtables(mm, start, end); + else + kvm_pv_flush_tlb_range_and_pgtables(mm, start, end); +} +#else /* CONFIG_SMP */ +extern void native_smp_flush_tlb_all(void); +extern void native_smp_flush_tlb_mm(struct mm_struct *mm); +extern void native_smp_flush_tlb_page(struct vm_area_struct *vma, + e2k_addr_t addr); +extern void native_smp_flush_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_pmd_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +static inline void +kvm_flush_tlb_all(void) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_all(); + else + kvm_pv_flush_tlb_all(); +} +static inline void +kvm_flush_tlb_mm(struct mm_struct *mm) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_mm(mm); + else + kvm_pv_flush_tlb_mm(mm); +} +static inline void +kvm_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_page(vma, addr); + else + kvm_pv_flush_tlb_page(vma->vm_mm, addr); +} +static inline void +kvm_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_range(mm, start, end); + else + kvm_pv_flush_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_all(); + else + kvm_pv_flush_tlb_kernel_range(start, end); +} +static inline void +kvm_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_pmd_tlb_range(mm, start, end); + else + kvm_pv_flush_pmd_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_range_and_pgtables(mm, start, end); + else + kvm_pv_flush_tlb_range_and_pgtables(mm, start, end); +} +#endif /* CONFIG_SMP */ + +/* it is native KVM guest kernel (not paravirtualized) */ +/* guest kernel does not support other virtual machines and guests */ +static __always_inline bool +__flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt(void) +{ + return false; /* none any guests and guest addresses */ +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/kvm/guest/trace-defs.h b/arch/e2k/include/asm/kvm/guest/trace-defs.h new file mode 100644 index 000000000000..1d6d00916a52 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/trace-defs.h @@ -0,0 +1,41 @@ +#ifndef _E2K_KVM_GUEST_TRACE_DEFS_H_ +#define _E2K_KVM_GUEST_TRACE_DEFS_H_ + +#include + +#include +#include +#include + +static inline void +trace_kvm_get_gva_spt_translation(e2k_addr_t address, + pgdval_t *pgd, pudval_t *pud, pmdval_t *pmd, pteval_t *pte, int *pt_level) +{ + mmu_spt_trans_t spt_trans; + int ret; + + ret = HYPERVISOR_get_spt_translation(address, &spt_trans); + if (unlikely(ret != 0)) { + pr_err("%s() : host could not get guest address 0x%lx " + "translation at SPTs, error %d\n", + __func__, address, ret); + *pgd = -1; + *pt_level = E2K_PGD_LEVEL_NUM; + return; + } + *pt_level = spt_trans.pt_levels; + if (*pt_level <= E2K_PGD_LEVEL_NUM) { + *pgd = spt_trans.pgd; + } + if (*pt_level <= E2K_PUD_LEVEL_NUM) { + *pud = spt_trans.pud; + } + if (*pt_level <= E2K_PMD_LEVEL_NUM) { + *pmd = spt_trans.pmd; + } + if (*pt_level <= E2K_PTE_LEVEL_NUM) { + *pte = spt_trans.pte; + } +} + +#endif /* _E2K_KVM_GUEST_TRACE_DEFS_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/trace-hw-stacks.h b/arch/e2k/include/asm/kvm/guest/trace-hw-stacks.h new file mode 100644 index 000000000000..d0567da219d9 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/trace-hw-stacks.h @@ -0,0 +1,308 @@ +#if !defined(_KVM_GUEST_TRACE_COPY_HW_STACKS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _KVM_GUEST_TRACE_COPY_HW_STACKS_H + +#include +#include + +#include +#include +#include +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM guest + +TRACE_EVENT( + guest_copy_hw_stack, + + TP_PROTO(void *dst, void *src, unsigned long size, bool is_chain), + + TP_ARGS(dst, src, size, is_chain), + + TP_STRUCT__entry( + __field( void *, dst ) + __field( void *, src ) + __field( u64, size ) + __field( bool, is_chain ) + __field( pgdval_t, dst_pgd ) + __field( pudval_t, dst_pud ) + __field( pmdval_t, dst_pmd ) + __field( pteval_t, dst_pte ) + __field( int, dst_pt_level ) + __field( pgdval_t, src_pgd ) + __field( pudval_t, src_pud ) + __field( pmdval_t, src_pmd ) + __field( pteval_t, src_pte ) + __field( int, src_pt_level ) + __field( pgdval_t, dst_spt_pgd ) + __field( pudval_t, dst_spt_pud ) + __field( pmdval_t, dst_spt_pmd ) + __field( pteval_t, dst_spt_pte ) + __field( int, dst_spt_level ) + __field( pgdval_t, src_spt_pgd ) + __field( pudval_t, src_spt_pud ) + __field( pmdval_t, src_spt_pmd ) + __field( pteval_t, src_spt_pte ) + __field( int, src_spt_level ) + ), + + TP_fast_assign( + __entry->dst = dst; + __entry->src = src; + __entry->size = size; + __entry->is_chain = is_chain; + + trace_get_va_translation(current->mm, (e2k_addr_t)dst, + &__entry->dst_pgd, &__entry->dst_pud, &__entry->dst_pmd, + &__entry->dst_pte, &__entry->dst_pt_level); + trace_kvm_get_gva_spt_translation((e2k_addr_t)dst, + &__entry->dst_spt_pgd, &__entry->dst_spt_pud, + &__entry->dst_spt_pmd, &__entry->dst_spt_pte, + &__entry->dst_spt_level); + + trace_get_va_translation(current->mm, (e2k_addr_t)src, + &__entry->src_pgd, &__entry->src_pud, &__entry->src_pmd, + &__entry->src_pte, &__entry->src_pt_level); + trace_kvm_get_gva_spt_translation((e2k_addr_t)src, + &__entry->src_spt_pgd, &__entry->src_spt_pud, + &__entry->src_spt_pmd, &__entry->src_spt_pte, + &__entry->src_spt_level); + ), + + TP_printk("copy %s stack guest user <- guest kernel: dst %px " + "src %px size %llx\n" + " user guest dst %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s\n" + " user guest dst spt %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s\n" + " kernel guest src %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s\n" + " kernel guest src spt %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s", + (__entry->is_chain) ? "chain" : "procedure", + __entry->dst, + __entry->src, + __entry->size, + __entry->dst, + (__entry->dst_pt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->dst_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pgd, + __entry->dst_pt_level <= E2K_PGD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->dst_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pud, + __entry->dst_pt_level <= E2K_PUD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->dst_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pmd, + __entry->dst_pt_level <= E2K_PMD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->dst_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pte, + __entry->dst_pt_level <= E2K_PTE_LEVEL_NUM), + __entry->dst, + (__entry->dst_spt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->dst_spt_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_spt_pgd, + __entry->dst_spt_level <= E2K_PGD_LEVEL_NUM), + (__entry->dst_spt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->dst_spt_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_spt_pud, + __entry->dst_spt_level <= E2K_PUD_LEVEL_NUM), + (__entry->dst_spt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->dst_spt_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_spt_pmd, + __entry->dst_spt_level <= E2K_PMD_LEVEL_NUM), + (__entry->dst_spt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->dst_spt_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_spt_pte, + __entry->dst_spt_level <= E2K_PTE_LEVEL_NUM), + __entry->src, + (__entry->src_pt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->src_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pgd, + __entry->src_pt_level <= E2K_PGD_LEVEL_NUM), + (__entry->src_pt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->src_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pud, + __entry->src_pt_level <= E2K_PUD_LEVEL_NUM), + (__entry->src_pt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->src_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pmd, + __entry->src_pt_level <= E2K_PMD_LEVEL_NUM), + (__entry->src_pt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->src_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pte, + __entry->src_pt_level <= E2K_PTE_LEVEL_NUM), + __entry->src, + (__entry->src_spt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->src_spt_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_spt_pgd, + __entry->src_spt_level <= E2K_PGD_LEVEL_NUM), + (__entry->src_spt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->src_spt_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_spt_pud, + __entry->src_spt_level <= E2K_PUD_LEVEL_NUM), + (__entry->src_spt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->src_spt_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_spt_pmd, + __entry->src_spt_level <= E2K_PMD_LEVEL_NUM), + (__entry->src_spt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->src_spt_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_spt_pte, + __entry->src_spt_level <= E2K_PTE_LEVEL_NUM) + ) +); + +TRACE_EVENT( + guest_proc_stack_frame, + + TP_PROTO(kernel_mem_ps_t *ps_base, kernel_mem_ps_t *ps_frame), + + TP_ARGS(ps_base, ps_frame), + + TP_STRUCT__entry( + __field( kernel_mem_ps_t *, ps_base ) + __field_struct( kernel_mem_ps_t, ps_frame ) + __field( pgprotval_t, dtlb_entry ) + ), + + TP_fast_assign( + __entry->ps_base = ps_base; + __entry->ps_frame = *ps_frame; + __entry->dtlb_entry = HYPERVISOR_mmu_probe((e2k_addr_t)ps_base, + KVM_MMU_PROBE_ENTRY); + ), + + TP_printk(" %px (dtlb 0x%016lx) : 0x%016lx 0x%016lx", + __entry->ps_base, __entry->dtlb_entry, + __entry->ps_frame.word_lo, + __entry->ps_frame.word_hi) +); + +TRACE_EVENT( + guest_chain_stack_frame, + + TP_PROTO(e2k_mem_crs_t *pcs_base, e2k_mem_crs_t *pcs_frame), + + TP_ARGS(pcs_base, pcs_frame), + + TP_STRUCT__entry( + __field( e2k_mem_crs_t *, pcs_base ) + __field_struct( e2k_mem_crs_t, pcs_frame ) + __field( pgprotval_t, dtlb_entry ) + ), + + TP_fast_assign( + __entry->pcs_base = pcs_base; + __entry->pcs_frame = *pcs_frame; + __entry->dtlb_entry = HYPERVISOR_mmu_probe((e2k_addr_t)pcs_base, + KVM_MMU_PROBE_ENTRY); + ), + + TP_printk(" %px (dtlb 0x%016lx) : 0x%016llx 0x%016llx " + "0x%016llx 0x%016llx", + __entry->pcs_base, __entry->dtlb_entry, + __entry->pcs_frame.cr0_lo.CR0_lo_half, + __entry->pcs_frame.cr0_hi.CR0_hi_half, + __entry->pcs_frame.cr1_lo.CR1_lo_half, + __entry->pcs_frame.cr1_hi.CR1_hi_half) +); + +TRACE_EVENT( + guest_va_tlb_state, + + TP_PROTO(e2k_addr_t address), + + TP_ARGS(address), + + TP_STRUCT__entry( + __field( e2k_addr_t, address ) + __field( tlb_tag_t, set0_tag ) + __field_struct( pte_t, set0_entry ) + __field( tlb_tag_t, set1_tag ) + __field_struct( pte_t, set1_entry ) + __field( tlb_tag_t, set2_tag ) + __field_struct( pte_t, set2_entry ) + __field( tlb_tag_t, set3_tag ) + __field_struct( pte_t, set3_entry ) + __field( tlb_tag_t, setH_tag ) + __field_struct( pte_t, setH_entry ) + __field( u64, dtlb_entry ) + __field( unsigned long, mmu_pptb ) + __field( unsigned long, mmu_pid ) + ), + + TP_fast_assign( + __entry->address = address; + __entry->set0_tag = HYPERVISOR_get_tlb_set_tag(address, 0, false); + pte_val(__entry->set0_entry) = + HYPERVISOR_get_tlb_set_entry(address, 0, false); + __entry->set1_tag = HYPERVISOR_get_tlb_set_tag(address, 1, false); + pte_val(__entry->set1_entry) = + HYPERVISOR_get_tlb_set_entry(address, 1, false); + __entry->set2_tag = HYPERVISOR_get_tlb_set_tag(address, 2, false); + pte_val(__entry->set2_entry) = + HYPERVISOR_get_tlb_set_entry(address, 2, false); + __entry->set3_tag = HYPERVISOR_get_tlb_set_tag(address, 3, false); + pte_val(__entry->set3_entry) = + HYPERVISOR_get_tlb_set_entry(address, 3, false); + __entry->setH_tag = HYPERVISOR_get_tlb_set_tag(address, 3, true); + pte_val(__entry->setH_entry) = + HYPERVISOR_get_tlb_set_entry(address, 3, true); + __entry->dtlb_entry = HYPERVISOR_mmu_probe(address, + KVM_MMU_PROBE_ENTRY); + __entry->mmu_pptb = HYPERVISOR_get_host_mmu_pptb(); + __entry->mmu_pid = HYPERVISOR_get_host_mmu_pid(); + ), + + TP_printk(" 0x%016lx : dtlb 0x%016llx U_PPTB 0x%lx PID 0x%lx\n" + " TLB set #0 tag 0x%016lx entry 0x%016lx\n" + " TLB set #1 tag 0x%016lx entry 0x%016lx\n" + " TLB set #2 tag 0x%016lx entry 0x%016lx\n" + " TLB set #3 tag 0x%016lx entry 0x%016lx\n" + " TLB set #H tag 0x%016lx entry 0x%016lx", + __entry->address, __entry->dtlb_entry, + __entry->mmu_pptb, __entry->mmu_pid, + __entry->set0_tag, pte_val(__entry->set0_entry), + __entry->set1_tag, pte_val(__entry->set1_entry), + __entry->set2_tag, pte_val(__entry->set2_entry), + __entry->set3_tag, pte_val(__entry->set3_entry), + __entry->setH_tag, pte_val(__entry->setH_entry) + ) +); + +#endif /* _KVM_GUEST_TRACE_COPY_HW_STACKS_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm/guest +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace-hw-stacks + +/* This part must be outside protection */ +#include diff --git a/arch/e2k/include/asm/kvm/guest/trap_table.S.h b/arch/e2k/include/asm/kvm/guest/trap_table.S.h new file mode 100644 index 000000000000..8513b7b04a7b --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/trap_table.S.h @@ -0,0 +1,166 @@ +/* + * + * Copyright (C) 2018 MCST + * + * Defenition of guest kernel traps handling routines. + */ + +#ifndef _E2K_KVM_GUEST_TRAP_TABLE_ASM_H +#define _E2K_KVM_GUEST_TRAP_TABLE_ASM_H + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is guest virtualized kernel */ + +/* + * goto guest kernel system call table entry, if system call is from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1: temporary predicates + */ +.macro KVM_GOTO_PV_VCPU_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_TTABLE */ + +#ifdef CONFIG_KVM_GUEST_HW_PV +/* guest virtualization based on hardware virtualized hypervisor */ + +.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \ + drtmp0, drtmp1, predtmp, \ + predCUR, predVCPU, predEXTk + /* + * Native trap handler and these macroses is used only + * if IS_HW_GM() == true + * so need not any additional conditions to calculate. + * drti - pointer to thread_info + * predV5 - ISET is >= V5 + * predCUR - is now set to true (trap from user) and cannot be updated: + * trap on guest user and (GCURTI & GCURTASK & CPU_ID & CPU_OFF) + * should be saved same as native or host mode + * %predVCPU - save VCPU state pointer regs: + * set same as %predCUR + * predEXTk - need save kernel (predCUR) & need save extention (!predV5) + */ + { + pass \predV5, @p0; + pass \predCUR, @p1; + landp @p1, @p1, @p5; + landp ~@p0, @p1, @p4; + pass @p5, \predVCPU; + pass @p4, \predEXTk; + } +.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */ + +/* guest VCPU state registers are saved at thread_info->h_gregs */ +/* same as by host for paravirtualized guest */ + +.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp, rtmp0, rtmp1 + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V2 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE, \ + \rtmp0, \rtmp1 +.endm /* DO_SAVE_HOST_GREGS_V2 */ + +.macro DO_SAVE_HOST_GREGS_V5 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V5 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE +.endm /* DO_SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + DO_SAVE_HOST_GREGS_V2 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, \rtmp0, \rtmp1 +.endm /* SAVE_HOST_GREGS_V2 */ + +.macro SAVE_HOST_GREGS_V5 drti, predSAVE, drtmp + DO_SAVE_HOST_GREGS_V5 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, +.endm /* SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_UNEXT gvcpu, hvcpu, drti, drtmp + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp; + SAVE_GREG_UNEXT \gvcpu, \hvcpu, \drtmp +.endm /* SAVE_HOST_GREGS_UNEXT */ + +.global vcpus_state; + +#ifdef CONFIG_SMP +.macro SET_VCPU_STATE_GREGS_UNCOND drti, drtmp + ldw [ \drti + TSK_TI_CPU_DELTA ], \drtmp /* VCPU # */ + shld \drtmp, 3, \drtmp + ldd [ \drtmp + vcpus_state ], GVCPUSTATE +.endm /* SET_VCPU_STATE_GREGS */ +.macro SET_VCPU_STATE_GREGS drti, predSAVE, drtmp + ldw [ \drti + TSK_TI_CPU_DELTA ], \drtmp ? \predSAVE /* VCPU # */ + shld \drtmp, 3, \drtmp ? \predSAVE + ldd [ \drtmp + vcpus_state ], GVCPUSTATE ? \predSAVE +.endm /* SET_VCPU_STATE_GREGS */ +#else +.macro SET_VCPU_STATE_GREGS_UNCOND drti, drtmp + ldd [ 0 + vcpus_state ], GVCPUSTATE +.endm /* SET_VCPU_STATE_GREGS */ +.macro SET_VCPU_STATE_GREGS drti, predSAVE, drtmp + ldd [ 0 + vcpus_state ], GVCPUSTATE ? \predSAVE +.endm /* SET_VCPU_STATE_GREGS */ +#endif + +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + SAVE_HOST_GREGS_V2 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1 + SET_VCPU_STATE_GREGS \drti, \predSAVE, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_V2 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + SAVE_HOST_GREGS_V5 \drti, \predSAVE, \drtmp + SET_VCPU_STATE_GREGS \drti, \predSAVE, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_V5 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + SAVE_HOST_GREGS_UNEXT GVCPUSTATE, \ + VCPU_STATE_GREGS_PAIRS_INDEX, \ + \drti, \drtmp + SET_VCPU_STATE_GREGS_UNCOND \drti, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#else /* ! CONFIG_KVM_GUEST_HW_PV */ +/* It is virtualized guest based on paravirtualized hypervisor */ +/* without hardware support of virtualization */ + +.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \ + drtmp0, drtmp1, predtmp, \ + predCUR, predVCPU, predEXTk + /* not used */ +.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + /* not used */ +.endm /* SAVE_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + /* not used */ +.endm /* SAVE_GREGS_TO_VIRT */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + /* not used */ +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#endif /* CONFIG_KVM_GUEST_HW_PV */ +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_TRAP_TABLE_ASM_H */ diff --git a/arch/e2k/include/asm/kvm/guest/trap_table.h b/arch/e2k/include/asm/kvm/guest/trap_table.h new file mode 100644 index 000000000000..a5198b048bbb --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/trap_table.h @@ -0,0 +1,317 @@ +#ifndef __KVM_GUEST_E2K_TRAP_TABLE_H +#define __KVM_GUEST_E2K_TRAP_TABLE_H + +/* Does not include this header directly, include */ + +#include +#include + +extern int kvm_guest_ttable_entry0(void); +extern long kvm_guest_ttable_entry1(int sys_num, + u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5, u32 arg6); +extern long kvm_guest_ttable_entry3(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); +extern long kvm_guest_ttable_entry4(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); +extern long kvm_guest_ttable_entry5(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); +extern long kvm_guest_ttable_entry6(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); + +static __always_inline void kvm_init_pt_regs_copyed_fields(struct pt_regs *regs) +{ +#ifdef CONFIG_KVM_GUEST_KERNEL + if (likely(!regs->stack_regs_saved)) { + regs->copyed.ps_size = 0; + regs->copyed.pcs_size = 0; + regs->copyed.pcs_injected_frames_size = 0; + } else { + /* the regs is reused and all stacks should be already copyed */ + ; + } +#endif /* CONFIG_KVM_GUEST_KERNEL */ +} + +static __always_inline void kvm_init_pt_regs(struct pt_regs *regs) +{ + kvm_init_pt_regs_copyed_fields(regs); +} + +static __always_inline void +kvm_init_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + kvm_init_pt_regs(regs); +} +static __always_inline void +kvm_init_syscalls_handling(struct pt_regs *regs) +{ + kvm_init_traps_handling(regs, true); /* now as traps init */ +} + +static inline void kvm_clear_fork_child_pt_regs(struct pt_regs *childregs) +{ + native_clear_fork_child_pt_regs(childregs); + kvm_init_pt_regs_copyed_fields(childregs); +} + +#define KVM_FILL_HARDWARE_STACKS() /* host itself will fill */ + +extern void kvm_correct_trap_psp_pcsp(struct pt_regs *regs, + thread_info_t *thread_info); +extern void kvm_correct_scall_psp_pcsp(struct pt_regs *regs, + thread_info_t *thread_info); +extern void kvm_correct_trap_return_ip(struct pt_regs *regs, + unsigned long return_ip); + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +kvm_do_correct_trap_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + NATIVE_CORRECT_TRAP_PSP_PCSP(regs, thread_info); +} +static inline void +kvm_do_correct_scall_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + /* hardware stacks do not increment for system call on guest */ + /* so nothing to do */ +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +/* + * Guest trap handler on hardware stacks bounds can be called only on + * exceptions flags into TIRs, which be passed to guest by host handler. + * So nothing addition condition to run handler. + */ +static inline bool +kvm_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return false; +} +static inline bool +kvm_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return false; +} + +static inline void kvm_set_sge(void) +{ + KVM_WRITE_PSR_REG_VALUE((KVM_READ_PSR_REG_VALUE() | PSR_SGE)); +} +static inline void kvm_reset_sge(void) +{ + KVM_WRITE_PSR_REG_VALUE((KVM_READ_PSR_REG_VALUE() & ~PSR_SGE)); +} +static inline void boot_kvm_set_sge(void) +{ + BOOT_KVM_WRITE_PSR_REG_VALUE((BOOT_KVM_READ_PSR_REG_VALUE() | + PSR_SGE)); +} +static inline void boot_kvm_reset_sge(void) +{ + BOOT_KVM_WRITE_PSR_REG_VALUE((BOOT_KVM_READ_PSR_REG_VALUE() & + ~PSR_SGE)); +} +static inline void +kvm_stack_bounds_trap_enable(void) +{ + /* TODO now sge is always enabled (even in kernel), + * so this probably isn't needed anymore */ + kvm_set_sge(); +} + +static inline int +kvm_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + return native_do_aau_page_fault(regs, address, condition, mask, aa_no); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native KVM guest kernel (not paravirtualized) */ + +#define ttable_entry1 kvm_guest_ttable_entry1 +#define ttable_entry3 kvm_guest_ttable_entry3 +#define ttable_entry4 kvm_guest_ttable_entry4 +#define ttable_entry5 kvm_guest_ttable_entry5 +#define ttable_entry6 kvm_guest_ttable_entry6 + +#define get_ttable_entry1 \ +({ \ + ttable_entry_args_t ttable_entry; \ + ttable_entry = \ + ((IS_HV_GM()) ? (ttable_entry_args_t)native_ttable_entry1 : \ + (ttable_entry_args_t)kvm_guest_ttable_entry1); \ + ttable_entry; \ +}) +#define get_ttable_entry3 \ +({ \ + ttable_entry_args_t ttable_entry; \ + ttable_entry = \ + ((IS_HV_GM()) ? (ttable_entry_args_t)native_ttable_entry3 : \ + (ttable_entry_args_t)kvm_guest_ttable_entry3); \ + ttable_entry; \ +}) + +#define get_ttable_entry4 \ +({ \ + ttable_entry_args_t ttable_entry; \ + ttable_entry = \ + ((IS_HV_GM()) ? (ttable_entry_args_t)native_ttable_entry4 : \ + (ttable_entry_args_t)kvm_guest_ttable_entry4); \ + ttable_entry; \ +}) + +#define FILL_HARDWARE_STACKS() \ +do { \ + if (IS_HV_GM()) { \ + NATIVE_FILL_HARDWARE_STACKS(); \ + } else { \ + KVM_FILL_HARDWARE_STACKS(); \ + } \ +} while (false) + +static inline void +exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + kvm_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +#define handle_guest_traps(regs) /* none any guests */ + +static __always_inline void +init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + kvm_init_traps_handling(regs, user_mode_trap); +} +static __always_inline void +init_guest_syscalls_handling(struct pt_regs *regs) +{ + kvm_init_syscalls_handling(regs); +} +static inline bool +is_guest_TIRs_frozen(struct pt_regs *regs) +{ + return false; /* none any guest */ +} + +static inline void clear_fork_child_pt_regs(struct pt_regs *childregs) +{ + kvm_clear_fork_child_pt_regs(childregs); +} + +static inline bool +is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_is_proc_stack_bounds(ti, regs); +} +static inline bool +is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_is_chain_stack_bounds(ti, regs); +} +static inline void +stack_bounds_trap_enable(void) +{ + kvm_stack_bounds_trap_enable(); +} + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +correct_trap_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + kvm_do_correct_trap_psp_pcsp(regs, thread_info); +} +static inline void +correct_scall_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + kvm_do_correct_scall_psp_pcsp(regs, thread_info); +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +static inline void +correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + kvm_correct_trap_return_ip(regs, return_ip); +} + +static inline bool +handle_guest_last_wish(struct pt_regs *regs) +{ + return false; /* none any guest and any wishes from */ +} + +static inline int +do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + return kvm_do_aau_page_fault(regs, address, condition, mask, aa_no); +} + +/* + * Following functions run on host, check if traps occurred on guest user + * or kernel, so probably should be passed to guest kernel to handle. + * Guest has not any own guests + */ +static inline unsigned long +pass_aau_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_the_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo, + int trap_no) +{ + return 0; +} +static inline unsigned long +pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + return 0; +} +static inline unsigned long +pass_coredump_trap_to_guest(struct pt_regs *regs) +{ + return 0; +} +static inline unsigned long +pass_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_nm_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_clw_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline unsigned long +pass_page_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline void +complete_page_fault_to_guest(unsigned long what_complete) +{ +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KVM_GUEST_E2K_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/traps.h b/arch/e2k/include/asm/kvm/guest/traps.h new file mode 100644 index 000000000000..403a1478d795 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/traps.h @@ -0,0 +1,129 @@ +/* + * + * Copyright (C) 2012 MCST + * + * Defenition of kvm guest kernel traps handling routines. + */ + +#ifndef _E2K_ASM_KVM_GUEST_TRAPS_H +#define _E2K_ASM_KVM_GUEST_TRAPS_H + +#include +#include +#include + +extern int kvm_do_hw_stack_bounds(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds); +extern irqreturn_t guest_do_interrupt(struct pt_regs *regs); +extern unsigned long kvm_do_mmio_page_fault(struct pt_regs *regs, + trap_cellar_t *tcellar); +extern void kvm_sysrq_showstate_interrupt(struct pt_regs *regs); +extern void kvm_init_system_handlers_table(void); + +static inline void +kvm_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + native_instr_page_fault(regs, ftype, async_instr); +} + +static inline u64 +kvm_TIR0_clear_false_exceptions(u64 TIR_hi, int nr_TIRs) +{ + /* all false exceptions were cleared while traps passing to guest */ + return TIR_hi; +} + +extern int kvm_host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta); +extern int kvm_host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta); + +static inline unsigned long +kvm_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + e2k_addr_t address; + + if (!kernel_mode(regs)) { + /* trap on user and cannot be to IO */ + return 0; /* not handled */ + } + address = tcellar->address; + if (likely(((address < GUEST_VMALLOC_START || + address >= GUEST_VMALLOC_END)) && + !KVM_IS_VGA_VRAM_VIRT_ADDR(address))) { + /* address is out of address space area to IO remap or */ + /* VGA VRAM */ + return 0; /* not handled */ + } + return kvm_do_mmio_page_fault(regs, tcellar); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (paravirtualized pv_ops not used) */ + +static inline u64 +TIR0_clear_false_exceptions(u64 TIR_hi, int nr_TIRs) +{ + return kvm_TIR0_clear_false_exceptions(TIR_hi, nr_TIRs); +} + +static inline void +instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + kvm_instr_page_fault(regs, ftype, async_instr); +} + +/* The follow function should be deleted */ +static inline int +do_hw_stack_bounds(struct pt_regs *regs, bool proc_bounds, bool chain_bounds) +{ + return kvm_do_hw_stack_bounds(regs, proc_bounds, chain_bounds); +} + +static inline int host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + if (IS_HV_GM()) { + return native_host_apply_psp_delta_to_signal_stack(base, size, + start, end, delta); + } + return kvm_host_apply_psp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline int host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + if (IS_HV_GM()) { + return native_host_apply_pcsp_delta_to_signal_stack(base, size, + start, end, delta); + } + return kvm_host_apply_pcsp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline void +handle_interrupt(struct pt_regs *regs) +{ + guest_do_interrupt(regs); +} +static inline unsigned long +mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return kvm_mmio_page_fault(regs, tcellar); +} +static inline void +init_guest_system_handlers_table(void) +{ + kvm_init_system_handlers_table(); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_ASM_KVM_GUEST_TRAPS_H */ diff --git a/arch/e2k/include/asm/kvm/guest/v2p.h b/arch/e2k/include/asm/kvm/guest/v2p.h new file mode 100644 index 000000000000..9ad49032fb36 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/v2p.h @@ -0,0 +1,133 @@ +/* + * + * Heading of boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_KVM_GUEST_V2P_H +#define _E2K_KVM_GUEST_V2P_H + +#include + +#include +#include + +#ifndef __ASSEMBLY__ + + +#define kvm_pa(gvpa) __pa(gvpa) +#define kvm_va(gpa) __va(gpa) + +static inline void * +boot_kvm_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + unsigned long os_base; + + if ((e2k_addr_t)virt_pnt < KERNEL_BASE || + (e2k_addr_t)virt_pnt >= KERNEL_END) + return virt_pnt; + os_base = BOOT_KVM_READ_OSCUD_LO_REG_VALUE() & OSCUD_lo_base_mask; + if (os_base >= KERNEL_BASE) + return virt_pnt; + if (kernel_base == -1) + kernel_base = os_base; + return (void *)(kernel_base + ((e2k_addr_t)virt_pnt - KERNEL_BASE)); +} + +/* + * Guest kernel runs into virtual space, so functions can not be converted + * to virtual physical space (execute protection does not set) and + * should remain source virtual addresses + */ +static inline void * +boot_kvm_func_to_pa(void *virt_pnt) +{ + return boot_kvm_kernel_va_to_pa(virt_pnt, -1); +} + +static inline void * +boot_kvm_va_to_pa(void *virt_pnt) +{ + return boot_kvm_kernel_va_to_pa(virt_pnt, -1); +} + +/* + * KVM guest kernel booting on physical memory mapped + * to virtual space with GUEST_PAGE_OFFSET + * So it needs convert a virtual physical address to real physical. + */ +static inline e2k_addr_t +boot_kvm_vpa_to_pa(e2k_addr_t vpa) +{ + return (vpa >= GUEST_PAGE_OFFSET) ? kvm_pa((void *)vpa) : vpa; +} +static inline e2k_addr_t +boot_kvm_pa_to_vpa(e2k_addr_t pa) +{ + unsigned long os_base; + + os_base = BOOT_KVM_READ_OSCUD_LO_REG_VALUE() & OSCUD_lo_base_mask; + if (os_base >= GUEST_PAGE_OFFSET) + /* VPA is supported */ + return (e2k_addr_t)kvm_va((void *)pa); + else + /* nonpaging mode: all addresses can be only physical */ + return pa; +} +static inline e2k_addr_t +kvm_vpa_to_pa(e2k_addr_t vpa) +{ + return (vpa >= GUEST_PAGE_OFFSET) ? kvm_pa((void *)vpa) : vpa; +} +static inline e2k_addr_t +kvm_pa_to_vpa(e2k_addr_t pa) +{ + return (e2k_addr_t)kvm_va((void *)pa); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +static inline void * +boot_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return boot_kvm_kernel_va_to_pa(virt_pnt, kernel_base); +} +static inline void * +boot_func_to_pa(void *virt_pnt) +{ + return boot_kvm_func_to_pa(virt_pnt); +} + +static inline void * +boot_va_to_pa(void *virt_pnt) +{ + return boot_kvm_va_to_pa(virt_pnt); +} + +static inline e2k_addr_t +boot_vpa_to_pa(e2k_addr_t vpa) +{ + return boot_kvm_vpa_to_pa(vpa); +} +static inline e2k_addr_t +boot_pa_to_vpa(e2k_addr_t pa) +{ + return boot_kvm_pa_to_vpa(pa); +} + +static inline e2k_addr_t +vpa_to_pa(e2k_addr_t vpa) +{ + return kvm_vpa_to_pa(vpa); +} +static inline e2k_addr_t +pa_to_vpa(e2k_addr_t pa) +{ + return kvm_pa_to_vpa(pa); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* !(_E2K_KVM_GUEST_V2P_H) */ diff --git a/arch/e2k/include/asm/kvm/guest/vga.h b/arch/e2k/include/asm/kvm/guest/vga.h new file mode 100644 index 000000000000..061f38fd9fdd --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/vga.h @@ -0,0 +1,52 @@ +#ifndef _E2K_KVM_GUEST_VGA_H_ +#define _E2K_KVM_GUEST_VGA_H_ + +#include + +/* + * VGA screen support + * VGA Video Memory emulated as part of common guest VCPUs virtual memory + */ +#define KVM_VGA_VRAM_PHYS_BASE GUEST_VCPU_VRAM_PHYS_BASE +#define KVM_VGA_VRAM_OFFSET VGA_VRAM_PHYS_BASE /* a0000 - c0000 */ +#define KVM_VGA_VRAM_START KVM_VGA_VRAM_OFFSET +#define KVM_VGA_VRAM_SIZE VGA_VRAM_SIZE +#define KVM_VGA_VRAM_END (KVM_VGA_VRAM_START + KVM_VGA_VRAM_SIZE) +#define KVM_VGA_VRAM_VIRT_TO_PHYS(addr) virt_to_phys(addr) +#define KVM_VGA_VRAM_PHYS_TO_VIRT(addr) phys_to_virt(addr) + +#define KVM_IS_PHYS_MEM_MAP_ADDR(addr) \ + ((addr) >= GUEST_PAGE_OFFSET && \ + (addr) < (GUEST_PAGE_OFFSET + MAX_PM_SIZE)) +#define KVM_IS_VGA_VRAM_PHYS_ADDR(addr) \ + ((addr) >= KVM_VGA_VRAM_START && (addr) < KVM_VGA_VRAM_END) +#define KVM_IS_VGA_VRAM_VIRT_ADDR(addr) \ + KVM_IS_VGA_VRAM_PHYS_ADDR(KVM_VGA_VRAM_VIRT_TO_PHYS(addr)) + +extern void kvm_scr_writew(u16 val, volatile u16 *addr); +extern u16 kvm_scr_readw(volatile const u16 *addr); +extern void kvm_vga_writeb(u8 val, volatile u8 *addr); +extern u8 kvm_vga_readb(volatile const u8 *addr); + +#ifdef CONFIG_KVM_GUEST_KERNEL +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + kvm_scr_writew(val, addr); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return kvm_scr_readw(addr); +} +static inline void vga_writeb(u8 val, volatile u8 *addr) +{ + kvm_vga_writeb(val, addr); +} + +static inline u8 vga_readb(volatile const u8 *addr) +{ + return kvm_vga_readb(addr); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_VGA_H_ */ diff --git a/arch/e2k/include/asm/kvm/head.h b/arch/e2k/include/asm/kvm/head.h new file mode 100644 index 000000000000..61ef5baf029f --- /dev/null +++ b/arch/e2k/include/asm/kvm/head.h @@ -0,0 +1,111 @@ + +#ifndef _ASM_E2K_KVM_HEAD_H +#define _ASM_E2K_KVM_HEAD_H + +#include + +/* + * Kernel virtual memory layout + */ +#ifdef CONFIG_VIRTUALIZATION +/* + * it can be: + * paravirtualized host and guest kernel + * native host kernel with virtualization support + * pure guest kernel (not paravirtualized based on pv_ops) + */ + +/* 0x0000 e200 0000 0000 - 0x0000 e200 3fff ffff host image area + modules */ +#define HOST_KERNEL_IMAGE_AREA_BASE NATIVE_KERNEL_IMAGE_AREA_BASE +/* 0x0000 2e00 0000 0000 - 0x0000 2e00 3fff ffff shadow host image area + */ +/* modules at guest space */ +#define SHADOW_KERNEL_IMAGE_AREA_BASE 0x00002e0000000000 +#endif /* CONFIG_VIRTUALIZATION */ + +#if !defined(CONFIG_VIRTUALIZATION) +/* it is native kernel without any virtualization */ +#include + +#define E2K_KERNEL_IMAGE_AREA_BASE NATIVE_KERNEL_IMAGE_AREA_BASE +#elif !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ + +#define E2K_KERNEL_IMAGE_AREA_BASE HOST_KERNEL_IMAGE_AREA_BASE +#define GUEST_KERNEL_IMAGE_AREA_BASE SHADOW_KERNEL_IMAGE_AREA_BASE +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include + +#define E2K_KERNEL_IMAGE_AREA_BASE GUEST_KERNEL_IMAGE_AREA_BASE +#define GUEST_KERNEL_IMAGE_AREA_BASE SHADOW_KERNEL_IMAGE_AREA_BASE +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include + +#define E2K_KERNEL_IMAGE_AREA_BASE HOST_KERNEL_IMAGE_AREA_BASE +#define GUEST_KERNEL_IMAGE_AREA_BASE NATIVE_KERNEL_IMAGE_AREA_BASE +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_VIRTUALIZATION + +#define HOST_KERNEL_PHYS_MEM_VIRT_BASE HOST_PAGE_OFFSET /* 0x0000c000 ... */ +#define GUEST_KERNEL_PHYS_MEM_VIRT_BASE GUEST_PAGE_OFFSET /* 0x00002000 ... */ +#define GUEST_IO_PORTS_VIRT_BASE 0x00003f7e7e000000UL + +#define GUEST_NBSR_BASE THE_NODE_NBSR_PHYS_BASE(0); + +/* + * Guest physical memory (RAM) is emulated as one or more host virtual + * contigous areas (gfn + GUEST_PAGE_OFFSET) + * + * Probably it should be different for different architecture release + * 0x0000 0000 0000 0000 - 0x0000 00ef ffff ffff now limited like this, + * 0x0000 2000 0000 0000 - 0x0000 20ff ffff ffff but limit can be decremented + */ + +#define GUEST_RAM_PHYS_BASE 0x0000000000000000UL +#define GUEST_MAX_RAM_SIZE 0x000000f000000000UL +#define GUEST_RAM_VIRT_BASE \ + (GUEST_RAM_PHYS_BASE + GUEST_PAGE_OFFSET) + +/* + * Virtual memory (VRAM) is used to emulate VCPUs (CPU, MMU, SIC, VIRQ VCPU, + * other hardware) registers, tables, structures. + * Now it should be of size to locate VCPU state and CUT of guest kernel + * + * WARNING: VRAM physical base should not intersects with real physical + * memory address space layout. Probably it should be different for different + * architecture + * 0x0000 00ff 0000 0000 - 0x0000 00ff 00ff ffff should not be valid for + * 0x0000 20ff 0000 0000 - 0x0000 20ff 00ff ffff real memory layout + */ + +/* Macros defines VRAM for one VCPU or VIRQ VCPU, */ +/* but VRAM should be created for all VCPU and VIRQ VCPU */ +#define GUEST_ONE_VCPU_VRAM_SIZE (4 *4096) /* 4 pages */ +#define GUEST_VCPU_VRAM_PHYS_BASE 0x000000ff00000000UL +#define GUEST_MAX_VCPU_VRAM_SIZE 0x0000000001000000UL +#define GUEST_VCPU_VRAM_VIRT_BASE \ + (GUEST_VCPU_VRAM_PHYS_BASE + GUEST_PAGE_OFFSET) + +/* + * Virtual IO memory (IO-VRAM) is used to emulate ISA, VGA low memory ... + * Now this memory is used only to emulate VGA low memory + * + * WARNING: IO-VRAM physical base should not intersects with real physical + * memory address space layout and VRAM. + * Probably it should be different for different architecture release + * 0x0000 00ff 0100 0000 - 0x0000 00ff 010f ffff should not be valid for + * 0x0000 20ff 0100 0000 - 0x0000 20ff 010f ffff real memory layout and VRAM + */ + +#define GUEST_IO_VRAM_PHYS_BASE 0x000000ff01000000UL +#define GUEST_IO_VRAM_SIZE 0x0000000000100000UL +#define GUEST_IO_VRAM_VIRT_BASE \ + (GUEST_IO_VRAM_PHYS_BASE + GUEST_PAGE_OFFSET) + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! _ASM_E2K_KVM_HEAD_H */ diff --git a/arch/e2k/include/asm/kvm/host_printk.h b/arch/e2k/include/asm/kvm/host_printk.h new file mode 100644 index 000000000000..ab6c26b766c0 --- /dev/null +++ b/arch/e2k/include/asm/kvm/host_printk.h @@ -0,0 +1,41 @@ +/* + * Guest VM printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_HOST_PRINTK_H +#define _E2K_KVM_HOST_PRINTK_H + +#include + +#define HOST_PRINTK_BUFFER_MAX 128 /* max size of buffer to print */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host without any virtualization or */ +/* native kernel with virtualization support */ +#define host_printk(fmt, args...) printk(fmt, ##args) + +#define host_pr_alert(fmt, args...) pr_alert(fmt, ##args) +#define host_pr_cont(fmt, args...) pr_cont(fmt, ##args) +#define host_pr_info(fmt, args...) pr_info(fmt, ##args) + +#define host_dump_stack() dump_stack() +#define host_print_pt_regs(regs) print_pt_regs(regs) +#define host_print_all_TIRs(TIRs, nr_TIRs) \ + print_all_TIRs(TIRs, nr_TIRs) +#define host_print_tc_record(tcellar, num) \ + print_tc_record(tcellar, num) +#define host_print_all_TC(TC, TC_count) \ + print_all_TC(TC, TC_count) +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest (not virtualized based on pv_ops) */ +#include +#else + #error "Undefined type of virtualization" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/kvm/hvc-console.h b/arch/e2k/include/asm/kvm/hvc-console.h new file mode 100644 index 000000000000..299410b2e1b8 --- /dev/null +++ b/arch/e2k/include/asm/kvm/hvc-console.h @@ -0,0 +1,27 @@ +#ifndef L_HVC_CONSOLE_H +#define L_HVC_CONSOLE_H + +#include + +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE +extern int boot_hvc_l_cons_init(unsigned long console_base); +extern void boot_hvc_l_raw_putc(unsigned char c); +extern bool early_virtio_cons_enabled; +#define boot_early_virtio_cons_enabled \ + boot_get_vo_value(early_virtio_cons_enabled) +#else /* !CONFIG_EARLY_VIRTIO_CONSOLE */ +#define early_virtio_cons_enabled false +#define boot_early_virtio_cons_enabled false +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ + +#ifdef CONFIG_HVC_L +extern __init struct console *hvc_l_early_cons_init(int idx); +extern void hvc_l_raw_putc(unsigned char c); +#else /* !CONFIG_HVC_L */ +static inline struct console *hvc_l_early_cons_init(int idx) +{ + return NULL; +} +#endif /* CONFIG_HVC_L */ + +#endif /* L_HVC_CONSOLE_H */ diff --git a/arch/e2k/include/asm/kvm/hypercall.h b/arch/e2k/include/asm/kvm/hypercall.h new file mode 100644 index 000000000000..0b9752609862 --- /dev/null +++ b/arch/e2k/include/asm/kvm/hypercall.h @@ -0,0 +1,1541 @@ +/****************************************************************************** + * hypercall.h + * + * KVM host <-> guest Linux-specific hypervisor handling. + * + * Copyright (c) 2011-2012, Salavat Gilyazov + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _ASM_E2K_HYPERCALL_H +#define _ASM_E2K_HYPERCALL_H + +#include +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_KVM_GUEST_HW_HCALL +extern unsigned long light_hw_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6); +extern unsigned long generic_hw_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6, + unsigned long arg7); +#endif /* CONFIG_KVM_GUEST_HW_HCALL */ + +static inline unsigned long light_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + unsigned long ret; + +#ifdef CONFIG_KVM_GUEST_HW_HCALL +# ifdef CONFIG_KVM_GUEST_KERNEL + if (kvm_vcpu_host_support_hw_hc()) +# endif /* CONFIG_KVM_GUEST_KERNEL */ + return light_hw_hypercall(nr, arg1, arg2, arg3, + arg4, arg5, arg6); +#endif /* CONFIG_KVM_GUEST_HW_HCALL */ + + ret = E2K_SYSCALL(LIGHT_HYPERCALL_TRAPNUM, nr, 6, + arg1, arg2, arg3, arg4, arg5, arg6); + + return ret; +} +static inline unsigned long light_hypercall0(unsigned long nr) +{ + return light_hypercall(nr, 0, 0, 0, 0, 0, 0); +} + +static inline unsigned long light_hypercall1(unsigned long nr, + unsigned long arg1) +{ + return light_hypercall(nr, arg1, 0, 0, 0, 0, 0); +} + +static inline unsigned long light_hypercall2(unsigned long nr, + unsigned long arg1, unsigned long arg2) +{ + return light_hypercall(nr, arg1, arg2, 0, 0, 0, 0); +} + +static inline unsigned long light_hypercall3(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3) +{ + return light_hypercall(nr, arg1, arg2, arg3, 0, 0, 0); +} + +static inline unsigned long light_hypercall4(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4) +{ + return light_hypercall(nr, arg1, arg2, arg3, arg4, 0, 0); +} + +static inline unsigned long light_hypercall5(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5) +{ + return light_hypercall(nr, arg1, arg2, arg3, arg4, arg5, 0); +} + +static inline unsigned long light_hypercall6(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + return light_hypercall(nr, arg1, arg2, arg3, arg4, arg5, arg6); +} + +static inline unsigned long generic_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, unsigned long arg6, + unsigned long arg7) +{ + unsigned long ret; + +#ifdef CONFIG_KVM_GUEST_HW_HCALL +# ifdef CONFIG_KVM_GUEST_KERNEL + if (kvm_vcpu_host_support_hw_hc()) +# endif /* CONFIG_KVM_GUEST_KERNEL */ + return generic_hw_hypercall(nr, + arg1, arg2, arg3, arg4, arg5, arg6, arg7); +#endif /* CONFIG_KVM_GUEST_HW_HCALL */ + + ret = E2K_SYSCALL(GENERIC_HYPERCALL_TRAPNUM, nr, 7, + arg1, arg2, arg3, arg4, arg5, arg6, arg7); + return ret; +} +static inline unsigned long generic_hypercall0(unsigned long nr) +{ + return generic_hypercall(nr, 0, 0, 0, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall1(unsigned long nr, + unsigned long arg1) +{ + return generic_hypercall(nr, arg1, 0, 0, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall2(unsigned long nr, + unsigned long arg1, unsigned long arg2) +{ + return generic_hypercall(nr, arg1, arg2, 0, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall3(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3) +{ + return generic_hypercall(nr, arg1, arg2, arg3, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall4(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4) +{ + return generic_hypercall(nr, arg1, arg2, arg3, arg4, 0, 0, 0); +} + +static inline unsigned long generic_hypercall5(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5) +{ + return generic_hypercall(nr, arg1, arg2, arg3, arg4, arg5, 0, 0); +} + +static inline unsigned long generic_hypercall6(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + return generic_hypercall(nr, arg1, arg2, arg3, arg4, arg5, arg6, 0); +} + +/* + * KVM hypervisor (host) <-> guest lite hypercalls list + */ + +#define KVM_HCALL_COPY_STACKS_TO_MEMORY 1 /* copy (flush) hardware */ + /* stacks to memory */ +#define KVM_HCALL_SWITCH_GUEST_THREAD_STACKS 2 /* switch guest kernel thread */ + /* stacks from current to new */ + /* specified by GPID number */ +#define KVM_HCALL_GET_ACTIVE_CR_MEM_ITEM 3 /* get current active */ + /* procedure chain stack item */ +#define KVM_HCALL_PUT_ACTIVE_CR_MEM_ITEM 4 /* put current active */ + /* procedure chain stack item */ +#define KVM_HCALL_GET_HOST_RUNSTATE_KTIME 5 /* get host kernel current */ + /* time at terms of runstate */ + /* time (cycles) */ +#define KVM_HCALL_GET_GUEST_RUNNING_TIME 6 /* get running time of guest */ + /* VCPU at cycles */ +#define KVM_HCALL_GET_HOST_MMU_PPTB 7 /* get the host MMU PPTB register */ + /* state */ +#define KVM_HCALL_GET_TLB_SET_TAG 8 /* get tag of TLB line set */ +#define KVM_HCALL_GET_TLB_SET_ENTRY 9 /* get entry of TLB line set */ +#define KVM_HCALL_UPDATE_PSP_HI 10 /* write updated value to */ + /* PSP_hi register */ +#define KVM_HCALL_UPDATE_PCSP_HI 11 /* write updated value to */ + /* PCSP_hi register */ +#define KVM_HCALL_SETUP_IDLE_TASK 12 /* setup current task of */ + /* guest as task */ +#define KVM_HCALL_UPDATE_WD_PSIZE 13 /* write updated psize field */ + /* to the WD register */ +#define KVM_HCALL_GET_HOST_MMU_PID 14 /* get the host MMU PID register */ + /* state */ +#define KVM_HCALL_MOVE_TAGGED_DATA 15 /* move quad value from to */ +#define KVM_HCALL_UNFREEZE_TRAPS 16 /* unfreeze TIRs & trap */ + /* cellar */ +#define KVM_HCALL_SWITCH_TO_INIT_MM 17 /* deactivate current guest */ + /* mm, switch to init mm */ +#define KVM_HCALL_EXTRACT_TAGS_32 19 /* extract tags from 32 bytes */ + /* of data */ +#define KVM_HCALL_INJECT_INTERRUPT 20 /* inject interrupt to handle */ + /* pending VIRQs by guest */ +#define KVM_HCALL_VIRQ_HANDLED 21 /* info host about all VIRQs */ + /* were handled by guest */ +#define KVM_HCALL_TEST_PENDING_VIRQ 22 /* test pending VIRQs on VCPU */ +#define KVM_HCALL_READ_DTLB_REG 23 /* read DTLB entry for */ + /* virtual address */ +#define KVM_HCALL_GET_DAM 24 /* get current DAM state */ +#define KVM_HCALL_FLUSH_DCACHE_LINE 25 /* flush DCACHE line */ +#define KVM_HCALL_CLEAR_DCACHE_L1_SET 26 /* clear DCACHE L1 set */ +#define KVM_HCALL_FLUSH_DCACHE_RANGE 27 /* flush DCACHE range of */ + /* virtual addresses */ +#define KVM_HCALL_CLEAR_DCACHE_L1_RANGE 28 /* flush DCACHE L1 range of */ + /* virtual addresses */ +#define KVM_HCALL_MMU_PROBE 29 /* probe MMU entry or */ + /* address */ +#define KVM_HCALL_FLUSH_ICACHE_ALL 30 /* flush all ICACHE */ +/* notify host kernel aboout switch to updated procedure stack on guest */ +#define KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK 31 +/* notify host kernel aboout switch to updated procedure chain stack on guest */ +#define KVM_HCALL_SWITCH_TO_EXPANDED_CHAIN_STACK 32 + +typedef struct kvm_hw_stacks_flush { + unsigned long psp_lo; + unsigned long psp_hi; + unsigned long pcsp_lo; + unsigned long pcsp_hi; +} kvm_hw_stacks_flush_t; + +static inline unsigned long +HYPERVISOR_copy_stacks_to_memory(void) +{ + return light_hypercall1(KVM_HCALL_COPY_STACKS_TO_MEMORY, + (unsigned long)NULL); +} + +static inline unsigned long +HYPERVISOR_flush_hw_stacks_to_memory(kvm_hw_stacks_flush_t __user *hw_stacks) +{ + return light_hypercall1(KVM_HCALL_COPY_STACKS_TO_MEMORY, + (unsigned long)hw_stacks); +} + +static inline unsigned long +HYPERVISOR_switch_guest_thread_stacks(unsigned long gpid_nr, + unsigned long gmmid_nr) +{ + return light_hypercall2(KVM_HCALL_SWITCH_GUEST_THREAD_STACKS, + gpid_nr, gmmid_nr); +} + +static inline unsigned long +HYPERVISOR_get_active_cr_mem_item(unsigned long __user *cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, + e2k_addr_t cr_item) +{ + return light_hypercall4(KVM_HCALL_GET_ACTIVE_CR_MEM_ITEM, + (unsigned long)cr_value, + base, cr_ind, cr_item); +} +static inline unsigned long +HYPERVISOR_put_active_cr_mem_item(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, + e2k_addr_t cr_item) +{ + return light_hypercall4(KVM_HCALL_PUT_ACTIVE_CR_MEM_ITEM, + cr_value, base, cr_ind, cr_item); +} + +static inline unsigned long +HYPERVISOR_get_host_runstate_ktime(void) +{ + return light_hypercall0(KVM_HCALL_GET_HOST_RUNSTATE_KTIME); +} +static inline unsigned long +HYPERVISOR_get_guest_running_time(void) +{ + return light_hypercall0(KVM_HCALL_GET_GUEST_RUNNING_TIME); +} + +static inline unsigned long +HYPERVISOR_get_tlb_set_tag(e2k_addr_t va, int set_no, bool huge) +{ + return light_hypercall3(KVM_HCALL_GET_TLB_SET_TAG, va, set_no, huge); +} + +static inline unsigned long +HYPERVISOR_get_tlb_set_entry(e2k_addr_t va, int set_no, bool huge) +{ + return light_hypercall3(KVM_HCALL_GET_TLB_SET_ENTRY, va, set_no, huge); +} + +static inline unsigned long +HYPERVISOR_get_host_mmu_pptb(void) +{ + return light_hypercall0(KVM_HCALL_GET_HOST_MMU_PPTB); +} + +static inline unsigned long +HYPERVISOR_get_host_mmu_pid(void) +{ + return light_hypercall0(KVM_HCALL_GET_HOST_MMU_PID); +} + +static inline unsigned long +HYPERVISOR_update_psp_hi(unsigned long psp_hi_value) +{ + return light_hypercall1(KVM_HCALL_UPDATE_PSP_HI, psp_hi_value); +} + +static inline unsigned long +HYPERVISOR_update_wd_psize(unsigned long psize_value) +{ + return light_hypercall1(KVM_HCALL_UPDATE_WD_PSIZE, psize_value); +} + +static inline unsigned long +HYPERVISOR_update_pcsp_hi(unsigned long pcsp_hi_value) +{ + return light_hypercall1(KVM_HCALL_UPDATE_PCSP_HI, pcsp_hi_value); +} + +static inline unsigned long +HYPERVISOR_setup_idle_task(int cpu) +{ + return light_hypercall1(KVM_HCALL_SETUP_IDLE_TASK, (unsigned long)cpu); +} + +static inline unsigned long +HYPERVISOR_unfreeze_guest_traps(void) +{ + return light_hypercall0(KVM_HCALL_UNFREEZE_TRAPS); +} + +static inline unsigned long +HYPERVISOR_switch_to_guest_init_mm(void) +{ + return light_hypercall0(KVM_HCALL_SWITCH_TO_INIT_MM); +} + +union recovery_faulted_arg { + struct { + char vr; + char chan; + char qp; + char atomic; + u16 tag; + u16 tag_ext; + }; + u64 entire; +}; +static inline unsigned long +HYPERVISOR_move_tagged_data(int word_size, + e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + return light_hypercall3(KVM_HCALL_MOVE_TAGGED_DATA, + word_size, addr_from, addr_to); +} +static inline unsigned long +HYPERVISOR_extract_tags_32(u16 *dst, const void *src) +{ + return light_hypercall2(KVM_HCALL_EXTRACT_TAGS_32, + (unsigned long)dst, (unsigned long)src); +} + +static inline unsigned long +HYPERVISOR_inject_interrupt(void) +{ + return light_hypercall0(KVM_HCALL_INJECT_INTERRUPT); +} +extern unsigned long kvm_hypervisor_inject_interrupt(void); +static inline unsigned long +HYPERVISOR_virqs_handled(void) +{ + return light_hypercall0(KVM_HCALL_VIRQ_HANDLED); +} + +static inline unsigned long +HYPERVISOR_test_pending_virqs(void) +{ + return light_hypercall0(KVM_HCALL_TEST_PENDING_VIRQ); +} +static inline unsigned long +HYPERVISOR_read_dtlb_reg(e2k_addr_t virt_addr) +{ + return light_hypercall1(KVM_HCALL_READ_DTLB_REG, virt_addr); +} +static inline unsigned long +HYPERVISOR_get_DAM(unsigned long long *dam, int dam_entries) +{ + return light_hypercall2(KVM_HCALL_GET_DAM, (unsigned long)dam, + dam_entries); +} +static inline unsigned long +HYPERVISOR_flush_dcache_line(e2k_addr_t virt_addr) +{ + return light_hypercall1(KVM_HCALL_FLUSH_DCACHE_LINE, virt_addr); +} +static inline unsigned long +HYPERVISOR_clear_dcache_l1_set(e2k_addr_t virt_addr, unsigned long set) +{ + return light_hypercall2(KVM_HCALL_CLEAR_DCACHE_L1_SET, virt_addr, set); +} +static inline unsigned long +HYPERVISOR_flush_dcache_range(void *addr, size_t len) +{ + return light_hypercall2(KVM_HCALL_FLUSH_DCACHE_RANGE, + (unsigned long)addr, len); +} +static inline unsigned long +HYPERVISOR_clear_dcache_l1_range(void *addr, size_t len) +{ + return light_hypercall2(KVM_HCALL_CLEAR_DCACHE_L1_RANGE, + (unsigned long)addr, len); +} +static inline unsigned long +HYPERVISOR_flush_icache_all(void) +{ + return light_hypercall0(KVM_HCALL_FLUSH_ICACHE_ALL); +} + +typedef enum kvm_mmu_probe { + KVM_MMU_PROBE_ENTRY, + KVM_MMU_PROBE_ADDRESS, +} kvm_mmu_probe_t; + +static inline unsigned long +HYPERVISOR_mmu_probe(e2k_addr_t virt_address, kvm_mmu_probe_t what) +{ + return light_hypercall2(KVM_HCALL_MMU_PROBE, virt_address, what); +} + +static inline long +HYPERVISOR_switch_to_expanded_guest_proc_stack(long delta_size, + long delta_offset, bool decr_gk_ps) +{ + return light_hypercall3(KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK, + delta_size, delta_offset, (unsigned long)decr_gk_ps); +} +static inline long +HYPERVISOR_switch_to_expanded_guest_chain_stack(long delta_size, + long delta_offset, bool decr_gk_pcs) +{ + return light_hypercall3(KVM_HCALL_SWITCH_TO_EXPANDED_CHAIN_STACK, + delta_size, delta_offset, (unsigned long)decr_gk_pcs); +} + +/* + * KVM hypervisor (host) <-> guest generic hypercalls list + */ + +#define KVM_HCALL_PV_WAIT 1 /* suspend current vcpu until */ + /* it will be woken up by */ + /* call KVM_HCALL_PV_KICK */ +#define KVM_HCALL_PV_KICK 2 /* wake up vcpu suspended by */ + /* call KVM_HCALL_PV_WAIT */ +#define KVM_HCALL_RELEASE_TASK_STRUCT 8 /* release task struct on */ + /* host after task completion */ +#define KVM_HCALL_SET_CLOCKEVENT 10 /* set clock event */ +#define KVM_HCALL_COMPLETE_LONG_JUMP 12 /* long jump completion */ +#define KVM_HCALL_LAUNCH_SIG_HANDLER 14 /* launch guest user signal */ + /* handler */ +#define KVM_HCALL_SWITCH_TO_VIRT_MODE 16 /* switch from physical to */ + /* virtual addresses mode */ + /* (enable paging, TLB, TLU) */ +#define KVM_HCALL_APPLY_PSP_BOUNDS 17 /* update pending procedure */ + /* stack pointers after stack */ + /* bounds handling */ +#define KVM_HCALL_APPLY_PCSP_BOUNDS 18 /* update pending chain */ + /* stack pointers after stack */ + /* bounds handling */ +#define KVM_HCALL_CORRECT_TRAP_RETURN_IP 19 /* correct IP to return from */ + /* guest trap */ +#define KVM_HCALL_SWITCH_GUEST_KERNEL_STACKS 21 /* switch guest kernel thread */ + /* to new stacks */ +#define KVM_HCALL_COPY_GUEST_KERNEL_STACKS 22 /* copy guest kernel stacks */ + /* for kernel thread creation */ +#define KVM_HCALL_SWITCH_TO_GUEST_NEW_USER 28 /* end of execve() for guest */ + /* switch to new user stacks */ + /* start from user entry */ + /* point */ +#define KVM_HCALL_CLONE_GUEST_USER_STACKS 29 /* end of clone_user_stacks() */ + /* create new process thread */ + /* info agent on host and */ + /* register guest kernel */ + /* user local data stack */ +#define KVM_HCALL_COPY_GUEST_USER_STACKS 30 /* end of clone_user_stacks() */ + /* create new process thread */ + /* info agent on host and */ + /* register guest kernel */ + /* user local data stack */ +#define KVM_HCALL_UPDATE_HW_STACKS_FRAMES 36 /* update hardware stacks */ + /* frames */ +#define KVM_HCALL_PATCH_GUEST_DATA_AND_CHAIN_STACKS 38 + /* patch guest kernel data */ + /* chain stacks */ +#define KVM_HCALL_COPY_HW_STACKS_FRAMES 39 /* copy guest hardware stacks */ + /* (user<->kernel) on guest */ + /* addresses */ +#define KVM_HCALL_GET_GUEST_GLOB_REGS 40 /* get current state of */ + /* global registers, except */ + /* someones */ +#define KVM_HCALL_SET_GUEST_GLOB_REGS 41 /* set current state of */ + /* global registers, except */ + /* someones */ +#define KVM_HCALL_GET_GUEST_LOCAL_GLOB_REGS 42 /* get current state of */ + /* local global registers */ +#define KVM_HCALL_SET_GUEST_LOCAL_GLOB_REGS 43 /* set current state of */ + /* local global registers */ +#define KVM_HCALL_GET_ALL_GUEST_GLOB_REGS 44 /* get current state of all */ + /* guest user global regs */ +#define KVM_HCALL_BOOT_SPIN_LOCK_SLOW 47 /* slowpath of guest boot */ + /* time spinlock locking */ +#define KVM_HCALL_BOOT_SPIN_LOCKED_SLOW 48 /* slowpath of guest boot */ + /* time spinlock locking */ + /* is taken */ +#define KVM_HCALL_BOOT_SPIN_UNLOCK_SLOW 49 /* slowpath of guest boot */ + /* time spinlock unlocking */ +#define KVM_HCALL_GUEST_SPIN_LOCK_SLOW 50 /* slowpath of guest spinlock */ + /* locking */ +#define KVM_HCALL_GUEST_SPIN_LOCKED_SLOW 51 /* slowpath of guest spinlock */ + /* locking is taken */ +#define KVM_HCALL_GUEST_SPIN_UNLOCK_SLOW 52 /* slowpath of guest spinlock */ + /* unlocking */ +#define KVM_HCALL_GUEST_CSD_LOCK_CTL 53 /* serialize access to */ + /* per-cpu csd resources */ + /* wait and wake up on host */ +#define KVM_HCALL_GUEST_MM_DROP 63 /* drop host kernel agent */ + /* for the guest process mm */ +#define KVM_HCALL_ACTIVATE_GUEST_MM 64 /* activate host agent */ + /* for the new guest mm */ +#define KVM_HCALL_PT_ATOMIC_UPDATE 68 /* atomicaly update PT items */ +#define KVM_HCALL_SWITCH_GUEST_MM 78 /* switch host agent to */ + /* the next guest mm */ +#define KVM_HCALL_VCPU_MMU_STATE 79 /* common hcall to control */ + /* guest MMU state */ +#define KVM_HCALL_GUEST_IOPORT_REQ 80 /* guest kernel IO ports */ + /* in/out request */ +#define KVM_HCALL_GUEST_IOPORT_STRING_REQ 81 /* guest kernel IO ports */ + /* in/out string request */ +#define KVM_HCALL_GUEST_MMIO_REQ 82 /* guest kernel MMIO */ + /* read/write request */ +#define KVM_HCALL_CONSOLE_IO 83 /* raw write/read to/from */ + /* guest task console */ +#define KVM_HCALL_NOTIFY_IO 84 /* notify host on IO request */ + /* completion or starting */ +#define KVM_HCALL_GUEST_INTR_HANDLER 85 /* create guest interrupt */ + /* handler */ +#define KVM_HCALL_GUEST_INTR_THREAD 86 /* create guest interrupt */ + /* handler thread */ +#define KVM_HCALL_GUEST_FREE_INTR_HANDLER 87 /* stop guest interrupt */ + /* hansler thread */ +#define KVM_HCALL_WAIT_FOR_VIRQ 88 /* wait for the VIRQ */ +#define KVM_HCALL_GET_GUEST_DIRECT_VIRQ 90 /* register direct VIRQ */ +#define KVM_HCALL_FREE_GUEST_DIRECT_VIRQ 91 /* free direct VIRQ */ +#define KVM_HCALL_GUEST_VCPU_COMMON_IDLE 95 /* guest CPU on idle */ + /* wait for some events on */ + /* guest kernel */ +#define KVM_HCALL_GUEST_VCPU_RELAX 96 /* guest VCPU is waiting for */ + /* some event and relax real */ + /* CPU to schedule other */ + /* guest VCPU */ +#define KVM_HCALL_ACTIVATE_GUEST_VCPU 97 /* activate the VCPU, which */ + /* is waiting on idle mode */ +#define KVM_HCALL_ACTIVATE_GUEST_ALL_VCPUS 98 /* activate all VCPUs, which */ + /* is waiting on idle mode */ +#define KVM_HCALL_RECOVERY_FAULTED_TAGGED_GUEST_STORE 110 + /* recovery faulted store */ + /* tagged value operations */ +#define KVM_HCALL_RECOVERY_FAULTED_GUEST_LOAD 111 + /* recovery faulted load */ + /* value and tag */ +#define KVM_HCALL_RECOVERY_FAULTED_GUEST_MOVE 112 + /* recovery faulted move */ + /* value and tag to register */ + /* into procedure stack */ +#define KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GUEST_GREG 113 + /* recovery faulted load */ + /* value and tag to global */ + /* register */ +#define KVM_HCALL_MOVE_TAGGED_GUEST_DATA 114 /* move data value from to */ +#define KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_COPY 115 + /* fast tagged memory copy */ +#define KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_SET 116 + /* fast tagged memory set */ +#define KVM_HCALL_FAST_TAGGED_MEMORY_COPY 117 /* fast tagged memory copy */ +#define KVM_HCALL_FAST_TAGGED_MEMORY_SET 118 /* fast tagged memory set */ +#define KVM_HCALL_SHUTDOWN 120 /* shutdown of guest */ +#define KVM_HCALL_DUMP_GUEST_STACK 121 /* dump guest current stack */ +#define KVM_HCALL_FTRACE_STOP 122 /* stop host's ftrace */ +#define KVM_HCALL_FTRACE_DUMP 123 /* dump host's ftrace buffer */ +#define KVM_HCALL_DUMP_COMPLETION 125 /* show state or dump all */ + /* stacks is completed */ + +#define KVM_HCALL_HOST_PRINTK 130 /* guest printk() on host */ +#define KVM_HCALL_PRINT_GUEST_KERNEL_PTES 131 /* dump guest kernel address */ + /* page table entries */ +#define KVM_HCALL_PRINT_GUEST_USER_ADDRESS_PTES 132 /* dump guest user */ + /* address ptes on host */ +#ifdef CONFIG_KVM_ASYNC_PF +#define KVM_HCALL_PV_ENABLE_ASYNC_PF 133 /* enable async pf */ + /* on current vcpu */ +#endif /* CONFIG_KVM_ASYNC_PF */ +#define KVM_HCALL_FLUSH_TLB_RANGE 134 /* sync given address range */ + /* in page tables and flush tlb */ +#define KVM_HCALL_SYNC_ADDR_RANGE 135 /* sync ptes in page */ + /* tables without flushing tlb */ +#define KVM_HCALL_GET_SPT_TRANSLATION 137 /* get full translation of guest */ + /* address at shadow PTs */ +#define KVM_HCALL_RECOVERY_FAULTED_TAGGED_STORE 141 + /* recovery faulted store */ + /* tagged value operations */ +#define KVM_HCALL_RECOVERY_FAULTED_LOAD 142 /* recovery faulted load */ + /* value and tag */ +#define KVM_HCALL_RECOVERY_FAULTED_MOVE 143 + /* recovery faulted move */ + /* value and tag to register */ + /* into procedure stack */ +#define KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GREG 144 + /* recovery faulted load */ + /* value and tag to global */ + /* register */ + + +/* + * KVM hypervisor (host) <-> guest hypercalls structures + */ + +/* process and threads management */ +typedef struct kvm_task_info { + unsigned long flags; /* various task flags see header */ + /* asm/processor.h */ + unsigned long us_base; /* local data (user) stack base */ + unsigned long us_size; /* local data (user) stack size */ + unsigned long sp_offset; /* offset of local data stack pointer */ + /* SP from stack base */ + unsigned long ps_base; /* Procedure stack: base, */ + unsigned long ps_size; /* Procedure stack current: size, */ + unsigned long ps_ind; /* index */ + +/* should be unsed { */ + unsigned long ps_offset; /* current base offset, */ + unsigned long ps_top; /* current top, */ + unsigned long us_ps_size; /* user part of stack total size */ + unsigned long init_ps_size; /* and initial size. */ + unsigned long g_ps_size; /* guest kernel part of stack size, */ + unsigned long ps_reserved; /* current reserved size of guest */ + /* kernel */ +/* } should be unsed */ + + unsigned long pcs_base; /* Procedure chain stack: base, */ + unsigned long pcs_size; /* Chain stack current: size, */ + unsigned long pcs_ind; /* index */ + +/* should be unsed { */ + unsigned long pcs_offset; /* current base offset, */ + unsigned long pcs_top; /* current top, */ + unsigned long us_pcs_size; /* user part of stack total size */ + unsigned long init_pcs_size; /* and initial size. */ + unsigned long g_pcs_size; /* guest kernel part of stack size, */ + unsigned long pcs_reserved; /* current reserved size of guest */ + /* kernel part */ +/* } should be unsed */ + + /* new: guest user stacks */ + unsigned long u_us_base; /* local data (user) stack base */ + unsigned long u_us_size; /* local data (user) stack size */ + unsigned long u_sp_offset; /* offset of local data stack pointer */ + /* SP from stack base */ + unsigned long u_ps_base; /* Procedure stack: base, */ + unsigned long u_ps_size; /* Procedure stack current: size, */ + unsigned long u_ps_ind; /* index */ + unsigned long u_pcs_base; /* Procedure chain stack: base, */ + unsigned long u_pcs_size; /* Chain stack current: size, */ + unsigned long u_pcs_ind; /* index */ + + unsigned long cr0_lo; /* Chain iregister #0 lo */ + unsigned long cr0_hi; /* Chain iregister #0 hi */ + unsigned long cr1_wd; /* Chain register which contains wbs */ + unsigned long cr1_ussz; /* Chain register which contains */ + /* user stack size */ + unsigned long cud_base; /* OSCUD: base */ + unsigned long cud_size; /* and size */ + unsigned long gd_base; /* OSGD: base */ + unsigned long gd_size; /* and size */ + unsigned long cut_base; /* CUTD: base */ + unsigned long cut_size; /* and size */ + int cui; /* compilation unit index of code */ + bool kernel; /* task in kernel mode */ + unsigned long entry_point; /* entry point (address) of task */ + unsigned long gregs; /* pointer to the global registers */ + /* state of the new process */ +} kvm_task_info_t; + +/* hardware stack extention, update and change */ +typedef struct kvm_hw_stack { + unsigned long flags; /* various task flags see header */ + /* asm/processor.h */ + unsigned long base; /* Procedure stack: base, */ + unsigned long offset; /* current base offset, */ + unsigned long top; /* current top, */ + unsigned long us_size; /* user part of stack total size */ + unsigned long size; /* Procedure stack current: size, */ + unsigned long delta_ind; /* delta of index */ + unsigned long reserved; /* current reserved size of guest */ + /* kernel */ +} kvm_hw_stack_t; + +/* signal and signal handler management */ +typedef struct kvm_sig_info { + int sig; /* signal # */ + unsigned int protected; /* task is protected */ + void *siginfo; /* siginfo structure pointer */ + void *ucontext; /* ucontext structure pointer */ + int si_size; /* size of siginfo structure */ + int uc_size; /* size of ucontext structure */ + unsigned long handler; /* user handler function entry point */ + unsigned long sbr; /* user data stack */ + unsigned long usd_lo; /* to handle */ + unsigned long usd_hi; /* the signal */ +} kvm_sig_info_t; + +typedef struct kvm_stacks_info { + unsigned long top; /* top address (same as SBR pointer) */ + unsigned long usd_lo; /* curent state of stack pointer */ + unsigned long usd_hi; /* register: base & size */ + unsigned long psp_lo; /* Procedure stack pointer: */ + unsigned long psp_hi; /* base & index & size */ + unsigned long pshtp; + unsigned long pcsp_lo; /* Procedure chain stack */ + unsigned long pcsp_hi; /* pointer: base & index & size */ + unsigned pcshtp; + unsigned long cr0_lo; + unsigned long cr0_hi; + unsigned long cr1_lo; + unsigned long cr1_hi; +} kvm_stacks_info_t; +typedef kvm_stacks_info_t kvm_long_jump_info_t; + +/* guest kernel local data stack pointers update */ +typedef struct kvm_data_stack_info { + bool protected; /* protected stack flag */ + unsigned long top; /* top of the stack (register SBR) */ + unsigned long usd_base; /* USD pointer: base */ + unsigned long usd_size; /* size */ + unsigned long usd_ind; /* index (only for protected stack) */ +} kvm_data_stack_info_t; + +/* patch of fields od chain stack registers to update */ +typedef struct kvm_pcs_patch_info { + unsigned int ind; /* index of frame in bytes and */ + /* relative PCS base: */ + /* PCS_base + PCS_offset + PCSP.ind */ + unsigned int update_flags; /* flags of updated fields */ + /* see below */ + unsigned long IP; /* new IP [63:0] */ + unsigned int usd_size; /* data stack size in bytes */ + /* to calculate ussz fields */ + unsigned short wbs; /* quad registers number */ + unsigned short wpsz; /* quad registers number */ +} kvm_pcs_patch_info_t; + +/* chain stack registers updating flags (see structure above) */ +#define KVM_PCS_IP_UPDATE_FLAG 0x00000001 +#define KVM_PCS_USSZ_UPDATE_FLAG 0x00000100 +#define KVM_PCS_WBS_UPDATE_FLAG 0x00001000 +#define KVM_PCS_WPSZ_UPDATE_FLAG 0x00002000 + +#define KVM_MAX_PCS_FRAME_NUM_TO_PATCH 4 /* max number of chain stack */ + /* frames to can update */ + /* at once */ + +/* hardware stacks updating interface */ +#define KVM_MAX_PS_FRAME_NUM_TO_UPDATE 2 /* max number of procedure */ + /* stack frame to can update */ + /* one frame is 2 double-word */ + /* registers with extentions */ +#define KVM_MAX_PS_FRAME_SIZE_TO_UPDATE \ + (KVM_MAX_PS_FRAME_NUM_TO_UPDATE * EXT_4_NR_SZ) + +/* + * Common hypercal to get/set/control guest MMU state + */ + +/* flags of operations on guest MMU */ +#define INIT_STATE_GMMU_OPC 0x00000001UL +#define SET_OS_VAB_GMMU_OPC 0x00000010UL +#define CREATE_NEW_GMM_GMMU_OPC 0x00000100UL + +typedef struct vcpu_gmmu_info { + unsigned long opcode; /* operations on guest MMU */ + /* (see above) */ + bool sep_virt_space; /* guest use separate PTs for */ + /* OS and user virtual spaces */ + bool pt_v6; /* guest PTs are of v6 format */ + unsigned long mmu_cr; /* MMU control register */ + unsigned long pid; /* MMU PID (context) register */ + unsigned long trap_cellar; /* MMU trap cellar base */ + unsigned long u_pptb; /* physical base of user (for */ + /* separate PT mode) or united PT */ + unsigned long u_vptb; /* virtual base of user (for */ + /* separate PT mode) or united PT */ + unsigned long os_pptb; /* physical base of kernel PT */ + /* (only for separate PT mode) */ + unsigned long os_vptb; /* virtual base of kernel PT */ + /* (only for separate PT mode) */ + unsigned long os_vab; /* offset of kernel virtual space */ + /* into common virtual addresses */ + /* range */ + unsigned long gmmid_nr; /* return to guest: ID (#) of host */ + /* gmm struct created for new guest */ + /* mm struct */ +} vcpu_gmmu_info_t; + +static inline void HYPERVISOR_pv_wait(void) +{ + generic_hypercall0(KVM_HCALL_PV_WAIT); +} + +static inline void HYPERVISOR_pv_kick(int cpu) +{ + generic_hypercall1(KVM_HCALL_PV_KICK, cpu); +} + +static inline unsigned long +HYPERVISOR_release_task_struct(int gpid_nr) +{ + return generic_hypercall1(KVM_HCALL_RELEASE_TASK_STRUCT, (long)gpid_nr); +} + +static inline unsigned long +HYPERVISOR_set_clockevent(unsigned long delta) +{ + return generic_hypercall1(KVM_HCALL_SET_CLOCKEVENT, delta); +} + +static inline unsigned long +HYPERVISOR_complete_long_jump(kvm_long_jump_info_t *regs_state) +{ + return generic_hypercall1(KVM_HCALL_COMPLETE_LONG_JUMP, + (unsigned long)regs_state); +} + +static inline unsigned long +HYPERVISOR_launch_sig_handler(kvm_stacks_info_t *regs_state, + unsigned long sigreturn_entry, long sys_rval) +{ + return generic_hypercall3(KVM_HCALL_LAUNCH_SIG_HANDLER, + (unsigned long)regs_state, sigreturn_entry, sys_rval); +} + +static inline unsigned long +HYPERVISOR_apply_psp_bounds(unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + return generic_hypercall5(KVM_HCALL_APPLY_PSP_BOUNDS, + base, size, start, end, delta); +} + +static inline unsigned long +HYPERVISOR_apply_pcsp_bounds(unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + return generic_hypercall5(KVM_HCALL_APPLY_PCSP_BOUNDS, + base, size, start, end, delta); +} +static inline unsigned long +HYPERVISOR_correct_trap_return_ip(unsigned long return_ip) +{ + return generic_hypercall1(KVM_HCALL_CORRECT_TRAP_RETURN_IP, return_ip); +} + +static inline unsigned long +HYPERVISOR_guest_intr_handler(int irq, int virq_id, + int (*irq_handler)(int, void *), void *arg) +{ + return generic_hypercall4(KVM_HCALL_GUEST_INTR_HANDLER, irq, virq_id, + (unsigned long)irq_handler, + (unsigned long)arg); +} + +static inline unsigned long +HYPERVISOR_guest_intr_thread(int vcpu_id, int irq, int virq_id, int gpid_nr, + int (*irq_fn)(int, void *), void *arg) +{ + return generic_hypercall6(KVM_HCALL_GUEST_INTR_THREAD, + vcpu_id, irq, virq_id, + gpid_nr, (unsigned long)irq_fn, + (unsigned long)arg); +} + +static inline unsigned long +HYPERVISOR_guest_free_intr_handler(int irq, void *arg) +{ + return generic_hypercall2(KVM_HCALL_GUEST_FREE_INTR_HANDLER, irq, + (unsigned long)arg); +} + +static inline unsigned long +HYPERVISOR_get_guest_direct_virq(int irq, int virq_id) +{ + return generic_hypercall2(KVM_HCALL_GET_GUEST_DIRECT_VIRQ, + irq, virq_id); +} + +static inline unsigned long +HYPERVISOR_free_guest_direct_virq(int irq) +{ + return generic_hypercall1(KVM_HCALL_FREE_GUEST_DIRECT_VIRQ, irq); +} + +static inline unsigned long +HYPERVISOR_switch_to_virt_mode(kvm_task_info_t *task_info, + void (*func)(void *data, void *arg1, void *arg2), + void *data, void *arg1, void *arg2) +{ + return generic_hypercall5(KVM_HCALL_SWITCH_TO_VIRT_MODE, + (unsigned long)task_info, + (unsigned long)func, + (unsigned long)data, + (unsigned long)arg1, + (unsigned long)arg2); +} + +static inline unsigned long +HYPERVISOR_switch_guest_kernel_stacks(kvm_task_info_t *task_info, + char *entry_point, unsigned long *args, int args_num) +{ + return generic_hypercall4(KVM_HCALL_SWITCH_GUEST_KERNEL_STACKS, + (unsigned long)task_info, (unsigned long)entry_point, + (unsigned long)args, (unsigned long)args_num); +} + +static inline unsigned long +HYPERVISOR_update_hw_stacks_frames(e2k_mem_crs_t *pcs_frame, int pcs_frame_ind, + kernel_mem_ps_t *ps_frame, int ps_frame_ind, int ps_frame_size) +{ + return generic_hypercall5(KVM_HCALL_UPDATE_HW_STACKS_FRAMES, + (unsigned long)pcs_frame, + pcs_frame_ind, + (unsigned long)ps_frame, + ps_frame_ind, ps_frame_size); +} + +static inline unsigned long +HYPERVISOR_copy_hw_stacks_frames(void __user *dst, void __user *src, + long size, bool is_chain) +{ + return generic_hypercall4(KVM_HCALL_COPY_HW_STACKS_FRAMES, + (unsigned long)dst, (unsigned long)src, size, is_chain); +} +static inline unsigned long +HYPERVISOR_copy_guest_kernel_stacks(kvm_task_info_t *task_info) +{ + return generic_hypercall1(KVM_HCALL_COPY_GUEST_KERNEL_STACKS, + (unsigned long)task_info); +} + +static inline unsigned long +HYPERVISOR_switch_to_guest_new_user(kvm_task_info_t *task_info) +{ + return generic_hypercall1(KVM_HCALL_SWITCH_TO_GUEST_NEW_USER, + (unsigned long)task_info); +} + +static inline unsigned long +HYPERVISOR_clone_guest_user_stacks(kvm_task_info_t *task_info) +{ + return generic_hypercall1(KVM_HCALL_CLONE_GUEST_USER_STACKS, + (unsigned long)task_info); +} + +static inline unsigned long +HYPERVISOR_copy_guest_user_stacks(kvm_task_info_t *task_info, + vcpu_gmmu_info_t *gmmu_info) +{ + return generic_hypercall2(KVM_HCALL_COPY_GUEST_USER_STACKS, + (unsigned long)task_info, (unsigned long)gmmu_info); +} + +static inline unsigned long +HYPERVISOR_patch_guest_data_and_chain_stacks(kvm_data_stack_info_t *ds_patch, + kvm_pcs_patch_info_t pcs_patch[], int pcs_frames) +{ + return generic_hypercall3(KVM_HCALL_PATCH_GUEST_DATA_AND_CHAIN_STACKS, + (unsigned long)ds_patch, + (unsigned long)pcs_patch, pcs_frames); +} + +static inline unsigned long +HYPERVISOR_get_guest_glob_regs(unsigned long *gregs[2], + unsigned long not_get_gregs_mask, + bool dirty_bgr, unsigned int *bgr) +{ + return generic_hypercall4(KVM_HCALL_GET_GUEST_GLOB_REGS, + (unsigned long)gregs, not_get_gregs_mask, + (unsigned long)dirty_bgr, (unsigned long)bgr); +} +static inline unsigned long +HYPERVISOR_set_guest_glob_regs(unsigned long *gregs[2], + unsigned long not_set_gregs_mask, + bool dirty_bgr, unsigned int *bgr) +{ + return generic_hypercall4(KVM_HCALL_SET_GUEST_GLOB_REGS, + (unsigned long)gregs, not_set_gregs_mask, + (unsigned long)dirty_bgr, (unsigned long)bgr); +} +static inline unsigned long +HYPERVISOR_set_guest_glob_regs_dirty_bgr(unsigned long *gregs[2], + unsigned long not_set_gregs_mask) +{ + return generic_hypercall4(KVM_HCALL_SET_GUEST_GLOB_REGS, + (unsigned long)gregs, not_set_gregs_mask, + (unsigned long)false, (unsigned long)NULL); +} +static inline unsigned long +HYPERVISOR_get_guest_local_glob_regs(unsigned long *l_gregs[2], bool is_signal) +{ + return generic_hypercall2(KVM_HCALL_GET_GUEST_LOCAL_GLOB_REGS, + (unsigned long)l_gregs, is_signal); +} +static inline unsigned long +HYPERVISOR_set_guest_local_glob_regs(unsigned long *l_gregs[2], bool is_signal) +{ + return generic_hypercall2(KVM_HCALL_SET_GUEST_LOCAL_GLOB_REGS, + (unsigned long)l_gregs, is_signal); +} + +static inline unsigned long +HYPERVISOR_get_all_guest_glob_regs(unsigned long *gregs[2]) +{ + return generic_hypercall1(KVM_HCALL_GET_ALL_GUEST_GLOB_REGS, + (unsigned long)gregs); +} + +static inline unsigned long +HYPERVISOR_recovery_faulted_tagged_guest_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + union recovery_faulted_arg arg = { + .chan = chan, + .qp = !!qp_store, + .atomic = !!atomic_store, + .tag = data_tag, + .tag_ext = data_ext_tag + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_TAGGED_GUEST_STORE, + address, wr_data, st_rec_opc, data_ext, opc_ext, + arg.entire); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_guest_load(e2k_addr_t address, + u64 *ld_val, u8 *data_tag, u64 ld_rec_opc, int chan) +{ + return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_GUEST_LOAD, + address, (unsigned long)ld_val, + (unsigned long)data_tag, ld_rec_opc, chan); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_guest_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u32 first_time) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_GUEST_MOVE, + addr_from, addr_to, addr_to_hi, + ld_rec_opc, arg.entire, first_time); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_load_to_guest_greg(e2k_addr_t address, + u32 greg_num_d, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, + void *saved_greg_lo, void *saved_greg_hi) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GUEST_GREG, + address, greg_num_d, ld_rec_opc, arg.entire, + (unsigned long) saved_greg_lo, + (unsigned long) saved_greg_hi); +} + +static inline unsigned long +HYPERVISOR_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + union recovery_faulted_arg arg = { + .chan = chan, + .qp = !!qp_store, + .atomic = !!atomic_store, + .tag = data_tag, + .tag_ext = data_ext_tag + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_TAGGED_STORE, + address, wr_data, st_rec_opc, data_ext, opc_ext, + arg.entire); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, + u8 *data_tag, u64 ld_rec_opc, int chan) +{ + return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_LOAD, + address, (unsigned long)ld_val, + (unsigned long)data_tag, ld_rec_opc, chan); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u32 first_time) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_MOVE, + addr_from, addr_to, addr_to_hi, + ld_rec_opc, arg.entire, first_time); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan, int qp_load, + int atomic_load, void *saved_greg_lo, void *saved_greg_hi) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GREG, + address, greg_num_d, ld_rec_opc, arg.entire, + (unsigned long)saved_greg_lo, + (unsigned long)saved_greg_hi); +} +static inline unsigned long +HYPERVISOR_move_tagged_guest_data(int word_size, + e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + return generic_hypercall3(KVM_HCALL_MOVE_TAGGED_GUEST_DATA, + word_size, addr_from, addr_to); +} +static inline unsigned long +HYPERVISOR_fast_tagged_guest_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return generic_hypercall6(KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_COPY, + (unsigned long)dst, (unsigned long)src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +HYPERVISOR_fast_tagged_guest_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return generic_hypercall5(KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_SET, + (unsigned long)addr, val, tag, len, strd_opcode); +} + +static inline unsigned long +HYPERVISOR_boot_spin_lock_slow(void *lock, bool check_unlock) +{ + return generic_hypercall2(KVM_HCALL_BOOT_SPIN_LOCK_SLOW, + (unsigned long)lock, + (unsigned long)check_unlock); +} +static inline unsigned long +HYPERVISOR_boot_spin_locked_slow(void *lock) +{ + return generic_hypercall1(KVM_HCALL_BOOT_SPIN_LOCKED_SLOW, + (unsigned long)lock); +} +static inline unsigned long +HYPERVISOR_boot_spin_unlock_slow(void *lock, bool add_to_unlock) +{ + return generic_hypercall2(KVM_HCALL_BOOT_SPIN_UNLOCK_SLOW, + (unsigned long)lock, + (unsigned long)add_to_unlock); +} + +static inline unsigned long +HYPERVISOR_guest_spin_lock_slow(void *lock, bool check_unlock) +{ + return generic_hypercall2(KVM_HCALL_GUEST_SPIN_LOCK_SLOW, + (unsigned long)lock, + (unsigned long)check_unlock); +} +static inline unsigned long +HYPERVISOR_guest_spin_locked_slow(void *lock) +{ + return generic_hypercall1(KVM_HCALL_GUEST_SPIN_LOCKED_SLOW, + (unsigned long)lock); +} +static inline unsigned long +HYPERVISOR_guest_spin_unlock_slow(void *lock, bool add_to_unlock) +{ + return generic_hypercall2(KVM_HCALL_GUEST_SPIN_UNLOCK_SLOW, + (unsigned long)lock, + (unsigned long)add_to_unlock); +} + +typedef enum csd_ctl { + CSD_LOCK_CTL = 1, /* register csd lock wait */ + CSD_UNLOCK_CTL, /* unlock csd lock wait and wake up */ + /* waiting guest VCPU */ + CSD_LOCK_WAIT_CTL, /* wait for csd lock will be unlocked */ + CSD_LOCK_TRY_WAIT_CTL, /* try wait for asynchronous csd lock */ +} csd_ctl_t; + +static inline unsigned long +HYPERVISOR_guest_csd_lock_ctl(csd_ctl_t csd_ctl, void *lock) +{ + return generic_hypercall2(KVM_HCALL_GUEST_CSD_LOCK_CTL, + (unsigned long)csd_ctl, + (unsigned long)lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_lock(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_LOCK_CTL, lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_unlock(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_UNLOCK_CTL, lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_lock_wait(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_LOCK_WAIT_CTL, lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_lock_try_wait(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_LOCK_TRY_WAIT_CTL, lock); +} + +static inline unsigned long +HYPERVISOR_pt_atomic_update(int gmmid_nr, unsigned long gpa, + void __user *old_gpte, + unsigned atomic_op, unsigned long prot_mask) +{ + return generic_hypercall5(KVM_HCALL_PT_ATOMIC_UPDATE, (int)gmmid_nr, + gpa, (unsigned long)old_gpte, atomic_op, prot_mask); +} + +static inline unsigned long +HYPERVISOR_kvm_guest_mm_drop(unsigned long gmmid_nr) +{ + return generic_hypercall1(KVM_HCALL_GUEST_MM_DROP, gmmid_nr); +} + +static inline unsigned long +HYPERVISOR_kvm_activate_guest_mm(unsigned long active_gmmid_nr, + unsigned long gmmid_nr, e2k_addr_t u_phys_ptb) +{ + return generic_hypercall3(KVM_HCALL_ACTIVATE_GUEST_MM, + active_gmmid_nr, gmmid_nr, u_phys_ptb); +} + +static inline unsigned long +HYPERVISOR_kvm_switch_guest_mm(unsigned long gpid_nr, unsigned long gmmid_nr, + e2k_addr_t u_phys_ptb) +{ + return generic_hypercall3(KVM_HCALL_SWITCH_GUEST_MM, + gpid_nr, gmmid_nr, u_phys_ptb); +} + +static inline unsigned long +HYPERVISOR_vcpu_guest_mmu_state(vcpu_gmmu_info_t *mmu_info) +{ + return generic_hypercall1(KVM_HCALL_VCPU_MMU_STATE, + (unsigned long)mmu_info); +} + +/* guest kernel memory attribytes to set/update */ +typedef enum kvm_sma_mode { + KVM_SMA_RO, + KVM_SMA_RW, + KVM_SMA_NX, + KVM_SMA_X, + KVM_SMA_P, + KVM_SMA_NP, +} kvm_sma_mode_t; + +/* + * IO control hypercalls + */ + +/* + * Commands to HYPERVISOR_console_io(). + */ +#define CONSOLEIO_write 0 +#define CONSOLEIO_read 1 + +/* + * Some hypercalls return value + */ +#define RETURN_TO_HOST_APP_HCRET \ + (((u64)'r' << 56) | ((u64)'e' << 48) | \ + ((u64)'t' << 40) | ((u64)'2' << 32) | \ + ((u64)'h' << 24) | ((u64)'o' << 16) | \ + ((u64)'s' << 8) | ((u64)'t' << 0)) + +static inline unsigned long +HYPERVISOR_guest_ioport_request(unsigned short port, + unsigned int __user *data, unsigned char size, + unsigned char is_out) +{ + return generic_hypercall4(KVM_HCALL_GUEST_IOPORT_REQ, port, + (unsigned long)data, + size, is_out); +} +static inline unsigned long +HYPERVISOR_guest_ioport_string_request(unsigned short port, + const void __user *data, unsigned char size, unsigned int count, + unsigned char is_out) +{ + return generic_hypercall5(KVM_HCALL_GUEST_IOPORT_STRING_REQ, port, + (unsigned long)data, + size, count, is_out); +} +static inline unsigned long +HYPERVISOR_guest_mmio_request(unsigned long mmio_addr, + unsigned long __user *data, unsigned char size, + unsigned char is_write) +{ + return generic_hypercall4(KVM_HCALL_GUEST_MMIO_REQ, mmio_addr, + (unsigned long)data, + size, is_write); +} +static inline unsigned long +HYPERVISOR_console_io(int io_cmd, int size, char __user *str) +{ + return generic_hypercall3(KVM_HCALL_CONSOLE_IO, io_cmd, size, + (unsigned long)str); +} +static inline unsigned long +HYPERVISOR_notify_io(unsigned int notifier_io) +{ + return generic_hypercall1(KVM_HCALL_NOTIFY_IO, notifier_io); +} + +/* + * Kernel VM shut-down and panicking reason + */ +#define KVM_SHUTDOWN_POWEROFF 0x01 +#define KVM_SHUTDOWN_RESTART 0x02 +#define KVM_SHUTDOWN_PANIC 0x03 + +extern void smp_send_refresh(void); +static inline unsigned long +HYPERVISOR_kvm_shutdown(void *msg, unsigned long reason) +{ +#ifdef CONFIG_SMP + smp_send_refresh(); +#endif + return generic_hypercall2(KVM_HCALL_SHUTDOWN, (unsigned long)msg, + reason); +} +static inline unsigned long +HYPERVISOR_kvm_guest_vcpu_common_idle(long timeout, bool interruptable) +{ + return generic_hypercall2(KVM_HCALL_GUEST_VCPU_COMMON_IDLE, + timeout, interruptable); +} +static inline unsigned long +HYPERVISOR_kvm_guest_vcpu_relax(void) +{ + return generic_hypercall0(KVM_HCALL_GUEST_VCPU_RELAX); +} +#ifdef CONFIG_SMP +static inline unsigned long +HYPERVISOR_kvm_activate_guest_vcpu(int vcpu_id) +{ + return generic_hypercall1(KVM_HCALL_ACTIVATE_GUEST_VCPU, vcpu_id); +} +static inline unsigned long +HYPERVISOR_kvm_activate_guest_all_vcpus(void) +{ + return generic_hypercall0(KVM_HCALL_ACTIVATE_GUEST_ALL_VCPUS); +} +#endif /* CONFIG_SMP */ +static inline unsigned long +HYPERVISOR_host_printk(char *msg, int size) +{ + return generic_hypercall2(KVM_HCALL_HOST_PRINTK, (unsigned long)msg, + (unsigned long)size); +} + +/* + * The guest virtual address info of full track of translation + * at the host shadow PTs + */ + +typedef struct mmu_spt_trans { + int pt_levels; /* the last significant level of PT */ + unsigned long pgd; + unsigned long pud; + unsigned long pmd; + unsigned long pte; +} mmu_spt_trans_t; + +static inline unsigned long +HYPERVISOR_get_spt_translation(e2k_addr_t address, + mmu_spt_trans_t __user *trans_info) +{ + return generic_hypercall2(KVM_HCALL_GET_SPT_TRANSLATION, address, + (unsigned long)trans_info); +} + +static inline unsigned long +HYPERVISOR_print_guest_kernel_ptes(e2k_addr_t address) +{ + return generic_hypercall1(KVM_HCALL_PRINT_GUEST_KERNEL_PTES, address); +} +static inline unsigned long +HYPERVISOR_print_guest_user_address_ptes(int gmmid_nr, e2k_addr_t address) +{ + return generic_hypercall2(KVM_HCALL_PRINT_GUEST_USER_ADDRESS_PTES, + gmmid_nr, address); +} +static inline void +HYPERVISOR_dump_guest_stack(void) +{ + generic_hypercall0(KVM_HCALL_DUMP_GUEST_STACK); +} +static inline void +HYPERVISOR_ftrace_stop(void) +{ + generic_hypercall0(KVM_HCALL_FTRACE_STOP); +} +static inline void +HYPERVISOR_ftrace_dump(void) +{ + generic_hypercall0(KVM_HCALL_FTRACE_DUMP); +} +static inline void +HYPERVISOR_vcpu_show_state_completion(void) +{ + generic_hypercall0(KVM_HCALL_DUMP_COMPLETION); +} +static inline unsigned long +HYPERVISOR_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return generic_hypercall6(KVM_HCALL_FAST_TAGGED_MEMORY_COPY, + (unsigned long)dst, (unsigned long)src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +HYPERVISOR_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return generic_hypercall5(KVM_HCALL_FAST_TAGGED_MEMORY_SET, + (unsigned long)addr, val, tag, len, strd_opcode); +} +#ifdef CONFIG_KVM_ASYNC_PF +static inline int HYPERVISOR_pv_enable_async_pf(u64 apf_reason_gpa, + u64 apf_id_gpa, u32 apf_ready_vector, u32 irq_controller) +{ + return generic_hypercall4(KVM_HCALL_PV_ENABLE_ASYNC_PF, + apf_reason_gpa, apf_id_gpa, + apf_ready_vector, irq_controller); +} +#endif /* CONFIG_KVM_ASYNC_PF */ +static inline unsigned long +HYPERVISOR_flush_tlb_range(e2k_addr_t start_gva, e2k_addr_t end_gva) +{ + return generic_hypercall2(KVM_HCALL_FLUSH_TLB_RANGE, + start_gva, end_gva); +} +static inline void +HYPERVISOR_sync_addr_range(e2k_addr_t start_gva, e2k_addr_t end_gva) +{ + generic_hypercall2(KVM_HCALL_SYNC_ADDR_RANGE, + start_gva, end_gva); +} + +/* + * arguments: + * VIRQ number + * flag: "is VIRQ handling in progress and need wake up main VCPU thread, which + * can be on idle" + * results of waiting for VIRQ: + * > 0 : number of VIRQs waiting for handling + * = 0 : VIRQ handler should be stopped + * < 0 : if error detected + */ +#define KVM_VIRQ_RECEIVED(ret) ((ret) > 0) +#define KVM_VIRQ_STOPPED(ret) ((ret) == 0) +#define KVM_VIRQ_FAILED(ret) ((ret) < 0) + +static inline unsigned long +HYPERVISOR_wait_for_virq(int virq, bool in_progress) +{ + return generic_hypercall2(KVM_HCALL_WAIT_FOR_VIRQ, virq, in_progress); +} + +#endif /* _ASM_E2K_HYPERCALL_H */ diff --git a/arch/e2k/include/asm/kvm/hypervisor.h b/arch/e2k/include/asm/kvm/hypervisor.h new file mode 100644 index 000000000000..58e0ba238243 --- /dev/null +++ b/arch/e2k/include/asm/kvm/hypervisor.h @@ -0,0 +1,106 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines architecture specific interface hypervisor -> guest + * to know more about the KVM & hypervisor features + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef _ASM_E2K_KVM_HYPERVISOR_H +#define _ASM_E2K_KVM_HYPERVISOR_H + +#include + +#include +#include + +/* KVM and hypervisor features */ +/* (see field 'features' of kvm_host_info_t structure at asm/kvm/guest.h) */ +#define KVM_FEAT_HV_CPU_BIT 0 /* Hardware virtualized CPU is ON */ +#define KVM_FEAT_PV_CPU_BIT 1 /* ParaVirtualized CPU is ON */ +#define KVM_FEAT_HW_HCALL_BIT 4 /* HardWare supported HyperCALL is on */ +#define KVM_FEAT_PV_HCALL_BIT 5 /* ParaVirtualized HyperCALLs is on */ +#define KVM_FEAT_HV_MMU_BIT 8 /* Hardware Virtualized MMU is ON */ +#define KVM_FEAT_PV_MMU_BIT 9 /* MMU support is ParaVirtualization */ +#define KVM_FEAT_MMU_SPT_BIT 10 /* MMU support is based on shadow */ + /* paging */ +#define KVM_FEAT_MMU_TDP_BIT 11 /* MMU support is based on TDP */ +#define KVM_FEAT_HV_EPIC_BIT 16 /* Hardware Virtualized EPIC is ON */ +#define KVM_FEAT_PV_APIC_BIT 18 /* ParaVirtualized APIC is ON */ + /* Simulator into hypervisor */ +#define KVM_FEAT_PV_EPIC_BIT 19 /* ParaVirtualized EPIC is ON */ + /* Simulator into hypervisor */ + +/* bit mask of features to direct test */ +#define KVM_FEAT_HV_CPU_MASK (1UL << KVM_FEAT_HV_CPU_BIT) +#define KVM_FEAT_PV_CPU_MASK (1UL << KVM_FEAT_PV_CPU_BIT) +#define KVM_FEAT_HW_HCALL_MASK (1UL << KVM_FEAT_HW_HCALL_BIT) +#define KVM_FEAT_PV_HCALL_MASK (1UL << KVM_FEAT_PV_HCALL_BIT) +#define KVM_FEAT_HV_MMU_MASK (1UL << KVM_FEAT_HV_MMU_BIT) +#define KVM_FEAT_PV_MMU_MASK (1UL << KVM_FEAT_PV_MMU_BIT) +#define KVM_FEAT_MMU_SPT_MASK (1UL << KVM_FEAT_MMU_SPT_BIT) +#define KVM_FEAT_MMU_TDP_MASK (1UL << KVM_FEAT_MMU_TDP_BIT) +#define KVM_FEAT_HV_EPIC_MASK (1UL << KVM_FEAT_HV_EPIC_BIT) +#define KVM_FEAT_PV_APIC_MASK (1UL << KVM_FEAT_PV_APIC_BIT) +#define KVM_FEAT_PV_EPIC_MASK (1UL << KVM_FEAT_PV_EPIC_BIT) + +/* + * Basic function to access to host info on guest. + */ +#define GUEST_HOST_INFO_BASE (offsetof(kvm_vcpu_state_t, host)) + +static inline kvm_host_info_t *kvm_get_host_info(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return *((kvm_host_info_t **)(vcpu_base + GUEST_HOST_INFO_BASE)); +} + +static inline unsigned long kvm_hypervisor_features(void) +{ + return kvm_get_host_info()->features; +} +static inline bool kvm_test_hprv_feats_mask(unsigned long feature_mask) +{ + return (kvm_hypervisor_features() & feature_mask) != 0; +} +static inline bool kvm_test_hprv_full_feats_mask(unsigned long feature_mask) +{ + return (kvm_hypervisor_features() & feature_mask) == feature_mask; +} +static inline bool kvm_test_hprv_feats_bit(int feature_bit) +{ + return kvm_test_hprv_feats_mask(1UL << feature_bit); +} + +#define IS_HV_CPU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HV_CPU_MASK) +#define IS_PV_CPU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_CPU_MASK) +#define IS_HV_MMU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HV_MMU_MASK) +#define IS_PV_MMU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_MMU_MASK) +#define IS_HV_CPU_PV_MMU_KVM() \ + kvm_test_hprv_full_feats_mask(KVM_FEAT_HV_CPU_MASK | \ + KVM_FEAT_PV_MMU_MASK) +#define IS_HV_CPU_HV_MMU_KVM() \ + kvm_test_hprv_full_feats_mask(KVM_FEAT_HV_CPU_MASK | \ + KVM_FEAT_HV_MMU_MASK) +#define IS_PV_CPU_PV_MMU_KVM() \ + kvm_test_hprv_full_feats_mask(KVM_FEAT_PV_CPU_MASK | \ + KVM_FEAT_PV_MMU_MASK) +#define IS_MMU_SPT() kvm_test_hprv_feats_mask(KVM_FEAT_MMU_SPT_MASK) +#define IS_MMU_TDP() kvm_test_hprv_feats_mask(KVM_FEAT_MMU_TDP_MASK) +#define IS_HV_MMU_SPT() (IS_HV_MMU_KVM() && IS_MMU_SPT()) +#define IS_HV_MMU_TDP() (IS_HV_MMU_KVM() && IS_MMU_TDP()) +#define IS_HV_KVM() IS_HV_CPU_HV_MMU_KVM() +#define IS_PV_KVM() IS_PV_CPU_PV_MMU_KVM() +#define IS_HW_HCALL_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HW_HCALL_MASK) +#define IS_PV_HCALL_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_HCALL_MASK) +#define IS_HV_EPIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HV_EPIC_MASK) +#define IS_PV_APIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_APIC_MASK) +#define IS_PV_EPIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_EPIC_MASK) + + +#endif /* _ASM_E2K_KVM_HYPERVISOR_H */ diff --git a/arch/e2k/include/asm/kvm/irq.h b/arch/e2k/include/asm/kvm/irq.h new file mode 100644 index 000000000000..faf73d063882 --- /dev/null +++ b/arch/e2k/include/asm/kvm/irq.h @@ -0,0 +1,44 @@ + +#ifndef __E2K_ASM_KVM_IRQ_H_ +#define __E2K_ASM_KVM_IRQ_H_ + +#include +#include + +/* + * VIRTUAL INTERRUPTS + * + * Virtual interrupts that a guest OS may receive from KVM. + */ +#define KVM_VIRQ_TIMER 0 /* timer interrupt */ +#define KVM_VIRQ_HVC 1 /* HyperVisor Console interrupt */ +#define KVM_VIRQ_LAPIC 2 /* virtual local APIC interrupt */ +#define KVM_VIRQ_CEPIC 3 /* virtual CEPIC interrupt */ +#define KVM_NR_VIRQS (KVM_VIRQ_CEPIC + 1) + +#define KVM_MAX_NR_VIRQS (KVM_MAX_VIRQ_VCPUS * KVM_NR_VIRQS) + +#if KVM_NR_VIRQS > KVM_MAX_NR_VIRQS + #error "limit of max number of VIRQs exceeded" +#endif + +static inline const char *kvm_get_virq_name(int virq_id) +{ + switch (virq_id) { + case KVM_VIRQ_TIMER: + return "early_timer"; + case KVM_VIRQ_HVC: + return "hvc_virq"; + case KVM_VIRQ_LAPIC: + return "lapic"; + case KVM_VIRQ_CEPIC: + return "cepic"; + default: + return "???"; + } +} + +typedef int (*irq_thread_t)(void *); +extern int debug_guest_virqs; + +#endif /* __E2K_ASM_KVM_IRQ_H_ */ diff --git a/arch/e2k/include/asm/kvm/machdep.h b/arch/e2k/include/asm/kvm/machdep.h new file mode 100644 index 000000000000..0c8584d4bffb --- /dev/null +++ b/arch/e2k/include/asm/kvm/machdep.h @@ -0,0 +1,69 @@ +#ifndef _E2K_KVM_MACHDEP_H_ +#define _E2K_KVM_MACHDEP_H_ + +#include + +#ifdef __KERNEL__ + +typedef struct global_regs global_regs_t; +typedef struct kernel_gregs kernel_gregs_t; +typedef struct host_gregs host_gregs_t; + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization support */ +typedef struct host_machdep { + /* nothing to support and do */ +} host_machdep_t; +typedef struct guest_machdep { + /* none any guest */ +} guest_machdep_t; +#else /* CONFIG_VIRTUALIZATION */ +extern void kvm_save_host_gregs_v2(struct host_gregs *gregs); +extern void kvm_save_host_gregs_v5(struct host_gregs *gregs); +extern void kvm_restore_host_gregs_v5(const struct host_gregs *gregs); + +extern void kvm_guest_save_local_gregs_v2(struct local_gregs *gregs, + bool is_signal); +extern void kvm_guest_save_local_gregs_v5(struct local_gregs *gregs, + bool is_signal); +extern void kvm_guest_save_kernel_gregs_v2(kernel_gregs_t *gregs); +extern void kvm_guest_save_kernel_gregs_v5(kernel_gregs_t *gregs); +extern void kvm_guest_save_gregs_v2(struct global_regs *gregs); +extern void kvm_guest_save_gregs_v5(struct global_regs *gregs); +extern void kvm_guest_save_gregs_dirty_bgr_v2(struct global_regs *gregs); +extern void kvm_guest_save_gregs_dirty_bgr_v5(struct global_regs *gregs); +extern void kvm_guest_restore_gregs_v2(const global_regs_t *gregs); +extern void kvm_guest_restore_gregs_v5(const global_regs_t *gregs); +extern void kvm_guest_restore_kernel_gregs_v2(global_regs_t *gregs); +extern void kvm_guest_restore_kernel_gregs_v5(global_regs_t *gregs); +extern void kvm_guest_restore_local_gregs_v2(const struct local_gregs *gregs, + bool is_signal); +extern void kvm_guest_restore_local_gregs_v5(const struct local_gregs *gregs, + bool is_signal); + +#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or pure guest kernel */ +#include +#endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is native host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +typedef struct host_machdep { +} host_machdep_t; +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ +typedef struct guest_machdep { + /* cannot run as guest */ +} guest_machdep_t; +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/kvm/mm.h b/arch/e2k/include/asm/kvm/mm.h new file mode 100644 index 000000000000..c1c2313afd8e --- /dev/null +++ b/arch/e2k/include/asm/kvm/mm.h @@ -0,0 +1,100 @@ +#ifndef __E2K_KVM_HOST_MM_H +#define __E2K_KVM_HOST_MM_H + +#include +#include +#include +#include + +#include + +#define GMMID_MAX_LIMIT (GPID_MAX_LIMIT) +#define RESERVED_GMMIDS 1 /* 0 is reserved for init_mm */ + +#define GMMIDMAP_ENTRIES ((GMMID_MAX_LIMIT + 8*PAGE_SIZE - 1)/ \ + PAGE_SIZE/8) + +#define GMMID_HASH_BITS GPID_HASH_BITS +#define GMMID_HASH_SIZE NID_HASH_SIZE(GMMID_HASH_BITS) + +/* + * Guest mm structure agent on host + * The structure on host is only agent of real mm structure on guest, + * so sinchronization should be done by guest using real mm semaphores and + * spinlocks and here on host we does not use locking/unlocking + */ +typedef struct gmm_struct { + kvm_nid_t nid; /* numeric ID of the host agent */ + /* of guest mm structure */ + atomic_t mm_count; /* How many references to guest mm */ + /* shared mm */ +#ifdef CONFIG_GUEST_MM_SPT_LIST + struct list_head spt_list; /* shadow page tables pages list */ + spinlock_t spt_list_lock; /* spin lock to access to list */ + size_t spt_list_size; /* current numbers of SPs in list */ + size_t total_released; /* total number of allocated and */ + /* released SPs through list */ +#endif /* CONFIG_GUEST_MM_SPT_LIST */ +#ifdef CONFIG_KVM_HV_MMU + hpa_t root_hpa; /* physical base of root shadow PT */ + /* for guest mm on host */ + gfn_t root_gpa; /* 'physical' base of guest root PT */ + gpa_t os_pptb; /* guest kernel root PT physical base */ + gpa_t u_pptb; /* guest user root PT physical base */ + gva_t os_vptb; /* guest kernel root PT virtual base */ + gva_t u_vptb; /* guest user root PT virtual base */ + bool pt_synced; /* root guest PT was synced with */ + /* host shadow PT */ +#endif /* CONFIG_KVM_HV_MMU */ + spinlock_t page_table_lock; /* Protects page tables of mm */ + /* MMU context (PID) support */ + mm_context_t context; /* MMU context (PID) support for */ + /* the guest mm */ + cpumask_t cpu_vm_mask; /* mask of CPUs where the mm is */ + /* in use or was some early */ +} gmm_struct_t; + +/* same as accessor for struct mm_struct's cpu_vm_mask but for guest mm */ +#define gmm_cpumask(gmm) (&(gmm)->cpu_vm_mask) + +typedef struct kvm_nid_table gmmid_table_t; + +#define gmmid_hashfn(nr) nid_hashfn(nr, GMMID_HASH_BITS) + +struct kvm; + +extern int kvm_guest_mm_drop(struct kvm_vcpu *vcpu, int gmmid_nr); +extern int kvm_activate_guest_mm(struct kvm_vcpu *vcpu, + int active_gmmid_nr, int gmmid_nr, gpa_t u_phys_ptb); +extern int kvm_guest_pv_mm_init(struct kvm *kvm); +extern void kvm_guest_pv_mm_destroy(struct kvm *kvm); + +#define for_each_guest_mm(gmm, entry, next, gmmid_table) \ + for_each_guest_nid_node(gmm, entry, next, gmmid_table, \ + nid.nid_chain) +#define gmmid_entry(ptr) container_of(ptr, gmm_struct_t, nid) +#define gmmid_table_lock(gmmid_table) \ + nid_table_lock(gmmid_table) +#define gmmid_table_unlock(gmmid_table) \ + nid_table_unlock(gmmid_table) +#define gmmid_table_lock_irq(gmmid_table) \ + nid_table_lock_irq(gmmid_table) +#define gmmid_table_unlock(gmmid_table) \ + nid_table_unlock(gmmid_table) +#define gmmid_table_lock_irqsave(gmmid_table, flags) \ + nid_table_lock_irqsave(gmmid_table, flags) +#define gmmid_table_unlock_irqrestore(gmmid_table, flags) \ + nid_table_unlock_irqrestore(gmmid_table, flags) + +static inline gmm_struct_t * +kvm_find_gmmid(gmmid_table_t *gmmid_table, int gmmid_nr) +{ + kvm_nid_t *nid; + + nid = kvm_find_nid(gmmid_table, gmmid_nr, gmmid_hashfn(gmmid_nr)); + if (nid == NULL) + return NULL; + return gmmid_entry(nid); +} + +#endif /* __E2K_KVM_HOST_MM_H */ diff --git a/arch/e2k/include/asm/kvm/mm_hooks.h b/arch/e2k/include/asm/kvm/mm_hooks.h new file mode 100644 index 000000000000..a8340397b712 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mm_hooks.h @@ -0,0 +1,31 @@ +/* + * KVM guest mm hooks support + * Copyright 2021 Andrey I. Alekhin (alekhin_a@mcst.ru) + */ + +#ifndef _E2K_KVM_MM_HOOKS_H +#define _E2K_KVM_MM_HOOKS_H + +#include + +/* + * Virtualization support + */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* it is native host kernel with virtualization support */ +static inline void +get_mm_notifier_locked(struct mm_struct *mm) +{ + /* Do not need mmu notifier in native mode */ +} +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* !(_E2K_KVM_MM_HOOKS_H) */ diff --git a/arch/e2k/include/asm/kvm/mmu.h b/arch/e2k/include/asm/kvm/mmu.h new file mode 100644 index 000000000000..18f08200272e --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu.h @@ -0,0 +1,439 @@ +#ifndef __E2K_KVM_HOST_MMU_H +#define __E2K_KVM_HOST_MMU_H + +#include +#include +#include +#include +#include +#include +#include + +static inline bool is_ss(struct kvm_vcpu *vcpu) +{ + return false; +} +static inline bool is_sep_virt_spaces(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sep_virt_space; +} +static inline void set_sep_virt_spaces(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.sep_virt_space = true; +} +static inline void reset_sep_virt_spaces(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.sep_virt_space = false; +} +static inline bool is_shadow_paging(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.shadow_pt_on; +} +static inline void set_shadow_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.shadow_pt_on = true; + set_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); +} +static inline void reset_shadow_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.shadow_pt_on = false; +} +static inline bool is_phys_paging(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.phys_pt_on; +} +static inline void set_phys_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.phys_pt_on = true; +} +static inline void reset_phys_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.phys_pt_on = false; +} +static inline bool is_tdp_paging(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.tdp_on; +} +static inline void set_tdp_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.tdp_on = true; + set_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); +} +static inline void reset_tdp_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.tdp_on = false; +} + +static inline bool is_paging_flag(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.paging_on; +} +static inline void set_paging_flag(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.paging_on = true; +} +static inline void reset_paging_flag(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.paging_on = false; +} + +static inline bool is_pv_paging(struct kvm_vcpu *vcpu) +{ + return is_paging_flag(vcpu); +} +static inline bool is_spt_paging(struct kvm_vcpu *vcpu) +{ + return is_paging_flag(vcpu); +} +static inline bool is_hv_paging(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_VIRTUALIZATION + if (current_thread_info()->vcpu != vcpu) + return is_paging_flag(vcpu); +#endif + if (vcpu->arch.mmu.is_paging == NULL) + return is_paging_flag(vcpu); + + return vcpu->arch.mmu.is_paging(vcpu); +} + +static inline bool is_paging(struct kvm_vcpu *vcpu) +{ + if (is_tdp_paging(vcpu)) + return is_hv_paging(vcpu); + if (unlikely(vcpu->arch.is_pv)) + return is_pv_paging(vcpu); + if (unlikely(is_shadow_paging(vcpu))) + return is_spt_paging(vcpu); + + return is_paging_flag(vcpu); +} + +static inline bool is_spt_gpa_fault(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.spt_gpa_fault; +} +static inline void set_spt_gpa_fault(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.spt_gpa_fault = true; +} +static inline void reset_spt_gpa_fault(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.spt_gpa_fault = false; +} + +static inline unsigned long get_mmu_u_pptb_reg(void) +{ + return NATIVE_READ_MMU_U_PPTB_REG(); +} + +static inline unsigned long get_mmu_pid_reg(void) +{ + return NATIVE_READ_MMU_PID_REG(); +} + +static inline hpa_t +kvm_get_gp_phys_root(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.get_vcpu_gp_pptb(vcpu); +} +static inline void +kvm_set_gp_phys_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + vcpu->arch.mmu.set_vcpu_gp_pptb(vcpu, root); +} + +static inline hpa_t +kvm_get_space_type_spt_root(struct kvm_vcpu *vcpu, bool u_root) +{ + return (u_root) ? vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu) : + vcpu->arch.mmu.get_vcpu_sh_os_pptb(vcpu); +} +static inline hpa_t +kvm_get_space_type_spt_os_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_spt_root(vcpu, false); +} +static inline hpa_t +kvm_get_space_type_spt_u_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_spt_root(vcpu, true); +} +static inline void +kvm_set_space_type_spt_root(struct kvm_vcpu *vcpu, hpa_t root, bool u_root) +{ + if (u_root) { + vcpu->arch.mmu.set_vcpu_sh_u_pptb(vcpu, root); + } else { + vcpu->arch.mmu.set_vcpu_sh_os_pptb(vcpu, root); + } +} +static inline void +kvm_set_space_type_spt_os_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + kvm_set_space_type_spt_root(vcpu, root, false); +} +static inline void +kvm_set_space_type_spt_u_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + kvm_set_space_type_spt_root(vcpu, root, true); +} +static inline hpa_t +kvm_get_space_addr_spt_root(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu); + } else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) { + return vcpu->arch.mmu.get_vcpu_sh_os_pptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu); + } +} +static inline hpa_t +kvm_get_space_addr_root(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (likely(is_tdp_paging(vcpu) || + ((!is_paging(vcpu) || is_spt_gpa_fault(vcpu)) && + is_phys_paging(vcpu)))) { + return kvm_get_gp_phys_root(vcpu); + } else if (is_shadow_paging(vcpu)) { + return kvm_get_space_addr_spt_root(vcpu, gva); + } else { + KVM_BUG_ON(true); + return (hpa_t)-EINVAL; + } +} +static inline gpa_t +kvm_get_space_type_guest_root(struct kvm_vcpu *vcpu, bool u_root) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + KVM_BUG_ON(!u_root); + return (gpa_t)vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + } + return (u_root) ? (gpa_t)vcpu->arch.mmu.get_vcpu_u_pptb(vcpu) : + (gpa_t)vcpu->arch.mmu.get_vcpu_os_pptb(vcpu); +} +static inline gpa_t +kvm_get_space_type_guest_os_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_guest_root(vcpu, false); +} +static inline gpa_t +kvm_get_space_type_guest_u_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_guest_root(vcpu, true); +} + +static inline void +kvm_set_space_type_guest_root(struct kvm_vcpu *vcpu, gpa_t root, + bool u_root) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + KVM_BUG_ON(!u_root); + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, (pgprotval_t)root); + } else if (likely(u_root)) { + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, (pgprotval_t)root); + } else { + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, (pgprotval_t)root); + } +} +static inline void +kvm_set_space_type_guest_os_root(struct kvm_vcpu *vcpu, gpa_t root) +{ + kvm_set_space_type_guest_root(vcpu, root, false); +} +static inline void +kvm_set_space_type_guest_u_root(struct kvm_vcpu *vcpu, gpa_t root) +{ + kvm_set_space_type_guest_root(vcpu, root, true); +} +static inline gpa_t +kvm_get_space_addr_guest_root(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + return vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + } else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) { + return vcpu->arch.mmu.get_vcpu_os_pptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + } +} +static inline hpa_t +kvm_get_space_type_spt_vptb(struct kvm_vcpu *vcpu, bool u_root) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + /* common standard in linux: user and OS share virtual */ + /* space of user */ + KVM_BUG_ON(!u_root); + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } else if (u_root) { + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_sh_os_vptb(vcpu); + } +} +static inline hpa_t +kvm_get_space_addr_spt_vptb(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) { + return vcpu->arch.mmu.get_vcpu_sh_os_vptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } +} + +#define INVALID_GPA ((gpa_t)E2K_INVALID_PAGE) +#define IS_INVALID_GPA(gpa) ((gpa) == INVALID_GPA) + +static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) +{ + struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); + + return (struct kvm_mmu_page *)page_private(page); +} + +extern void kvm_get_spt_translation(struct kvm_vcpu *vcpu, e2k_addr_t address, + pgdval_t *pgd, pudval_t *pud, pmdval_t *pmd, + pteval_t *pte, int *pt_level); +extern unsigned long kvm_get_gva_to_hva(struct kvm_vcpu *vcpu, gva_t gva); + +static inline gpa_t kvm_hva_to_gpa(struct kvm *kvm, unsigned long hva) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int i; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gfn; + gpa_t gpa; + + hva_start = memslot->userspace_addr; + hva_end = hva_start + (memslot->npages << PAGE_SHIFT); + if (hva < hva_start || hva >= hva_end) + continue; + gfn = hva_to_gfn_memslot(hva, memslot); + gpa = (gfn << PAGE_SHIFT) + (hva & ~PAGE_MASK); + return gpa; + } + } + + return INVALID_GPA; +} + +static inline gpa_t +kvm_vcpu_hva_to_gpa(struct kvm_vcpu *vcpu, unsigned long hva) +{ + return kvm_hva_to_gpa(vcpu->kvm, hva); +} + +static inline void kvm_setup_host_mmu_info(struct kvm_vcpu *vcpu) +{ + if (is_tdp_paging(vcpu)) { + set_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + } else if (is_shadow_paging(vcpu)) { + set_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + } else { + KVM_BUG_ON(true); + } +} + +#ifdef CONFIG_KVM_SHADOW_PT_ENABLE +extern int kvm_pv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + trap_cellar_t *tcellar, bool user_mode); +extern int kvm_pv_mmu_instr_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr); +extern int kvm_pv_mmu_aau_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, e2k_addr_t address, + tc_cond_t cond, unsigned int aa_no); +extern long kvm_hv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + intc_info_mu_t *intc_info_mu); +extern int kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address, + bool async_instr, u32 error_code); +#else /* ! CONFIG_KVM_SHADOW_PT_ENABLE */ +static inline int +kvm_pv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + trap_cellar_t *tcellar, bool user_mode) +{ + /* page fault should be handled by host */ + return -1; +} +static inline long +kvm_hv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + intc_info_mu_t *intc_info_mu) +{ + /* page fault should be handled by host */ + return -1; +} +static inline int +kvm_pv_mmu_instr_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + /* page fault should be handled by host */ + return -1; +} +static inline int +kvm_pv_mmu_aau_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, e2k_addr_t address, + tc_cond_t cond, unsigned int aa_no) +{ + /* page fault should be handled by host */ + return -1; +} + +static inline int +kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address, + bool async_instr, u32 error_code) +{ + /* page fault should be handled by host */ + return -1; +} +#endif /* CONFIG_KVM_SHADOW_PT_ENABLE */ + +extern int kvm_guest_addr_to_host(void **addr); +extern void *kvm_guest_ptr_to_host_ptr(void *guest_ptr, int size, + bool need_inject); + +#ifdef CONFIG_KVM_HOST_MODE +/* it is native host kernel with virtualization support */ +static inline int +guest_addr_to_host(void **addr, const pt_regs_t *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + /* faulted addres is not paravirtualized guest one */ + return native_guest_addr_to_host(addr); + } + + return kvm_guest_addr_to_host(addr); +} +static inline void * +guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + /* faulted addres is not paravirtualized guest one */ + return native_guest_ptr_to_host(ptr, size); + } + + return kvm_guest_ptr_to_host_ptr(ptr, size, false); +} +#endif /* CONFIG_KVM_HOST_MODE */ + +#endif /* __E2K_KVM_HOST_MMU_H */ diff --git a/arch/e2k/include/asm/kvm/mmu_context.h b/arch/e2k/include/asm/kvm/mmu_context.h new file mode 100644 index 000000000000..be4e94fb6c80 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_context.h @@ -0,0 +1,37 @@ +/* + * KVM guest kernel virtual space context support + * Copyright 2011 Salavat S. Gilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_CONTEXT_H +#define _E2K_KVM_MMU_CONTEXT_H + +#include +#include +#include + +/* + * Virtualization support + */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* it is native host kernel with virtualization support */ +/* mm_alloc()/mmdrop() defined at include/linux/sched.h */ + +#define activate_mm(__active_mm, __mm) \ + native_activate_mm(__active_mm, __mm) +static inline void +deactivate_mm(struct task_struct *dead_task, struct mm_struct *mm) +{ + native_deactivate_mm(dead_task, mm); +} +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* !(_E2K_KVM_MMU_CONTEXT_H) */ diff --git a/arch/e2k/include/asm/kvm/mmu_hv_regs_access.h b/arch/e2k/include/asm/kvm/mmu_hv_regs_access.h new file mode 100644 index 000000000000..9c285087e6f6 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_hv_regs_access.h @@ -0,0 +1,318 @@ +/* + * E2K MMU virtualization extensions registers access + * + * Copyright 2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_HV_REGS_ACCESS_H_ +#define _E2K_KVM_MMU_HV_REGS_ACCESS_H_ + +#include +#include +#include + +#include +#include +#include + +/* + * MMU registers operations + */ + +#ifndef __ASSEMBLY__ + +#define READ_VIRT_CTRL_MU_REG_VALUE() NATIVE_GET_MMUREG(virt_ctrl_mu) +#define WRITE_VIRT_CTRL_MU_REG_VALUE(val) NATIVE_SET_MMUREG(virt_ctrl_mu, (val)) + +#define READ_G_W_IMASK_MMU_CR_REG_VALUE() \ + NATIVE_GET_MMUREG(g_w_imask_mmu_cr) +#define WRITE_G_W_IMASK_MMU_CR_REG_VALUE(val) \ + NATIVE_SET_MMUREG(g_w_imask_mmu_cr, (val)) + +#define READ_GID_REG_VALUE() NATIVE_GET_MMUREG(gid) +#define WRITE_GID_REG_VALUE(val) NATIVE_SET_MMUREG(gid, (val)) + +#define READ_GP_VPTB_REG_VALUE() NATIVE_GET_MMUREG(gp_vptb) +#define WRITE_GP_VPTB_REG_VALUE(val) NATIVE_SET_MMUREG(gp_vptb, (val)) + +#define READ_GP_PPTB_REG_VALUE() NATIVE_GET_MMUREG(gp_pptb) +#define WRITE_GP_PPTB_REG_VALUE(val) NATIVE_SET_MMUREG(gp_pptb, (val)) + +#define READ_INTC_INFO_MU() NATIVE_GET_MMUREG(intc_info_mu) +#define WRITE_INTC_INFO_MU(x) NATIVE_SET_MMUREG(intc_info_mu, (x)) + +#define READ_INTC_PTR_MU() NATIVE_GET_MMUREG(intc_ptr_mu) + +static inline void __save_intc_info_mu(intc_info_mu_t *info, int *num) +{ + long info_ptr, i = 0; + + /* + * The read of INTC_PTR will clear the hardware pointer, + * but the subsequent reads for INTC_INFO will increase + * it again until it reaches the same value it had before. + */ + info_ptr = READ_INTC_PTR_MU(); + if (!info_ptr) { + *num = -1; + return; + } + if (info_ptr % INTC_INFO_MU_ITEM_SIZE != 0) { + KVM_WARN_ON(true); + info_ptr = ((info_ptr + (INTC_INFO_MU_ITEM_SIZE - 1)) / + INTC_INFO_MU_ITEM_SIZE) * + INTC_INFO_MU_ITEM_SIZE; + } + + do { + info[i].no_restore = false; + info[i].modify_data = false; + info[i].hdr = (intc_info_mu_hdr_t) READ_INTC_INFO_MU(); + info[i].gpa = (u64) READ_INTC_INFO_MU(); + info[i].gva = (u64) READ_INTC_INFO_MU(); + NATIVE_STORE_TAGGED_MMUREG(&info[i].data, intc_info_mu); + info[i].condition = (tc_cond_t) READ_INTC_INFO_MU(); + NATIVE_STORE_TAGGED_MMUREG(&info[i].data_ext, intc_info_mu); + info[i].mask = (tc_mask_t) READ_INTC_INFO_MU(); + ++i; + info_ptr -= INTC_INFO_MU_ITEM_SIZE; + } while (info_ptr > 0); + + *num = i; +} + +#define fixup_intc_info_mu(info, num) \ +do { \ + int entries = *num; \ + if (entries > 0 && cpu_has(CPU_HWBUG_GUEST_ASYNC_PM)) { \ + e2k_mem_crs_t *frame; \ + e2k_pcsp_lo_t bu_pcsp_lo; \ + e2k_pcsp_hi_t bu_pcsp_hi; \ + \ + AW(bu_pcsp_lo) = READ_BU_PCSP_LO_REG_VALUE(); \ + AW(bu_pcsp_hi) = READ_BU_PCSP_HI_REG_VALUE(); \ + frame = (e2k_mem_crs_t *) (AS(bu_pcsp_lo).base + \ + AS(bu_pcsp_hi).ind); \ + --frame; \ + if (!AS(frame->cr1_lo).pm) { \ + int entry; \ + for (entry = 0; entry < entries; entry++) { \ + /* Protected mode accesses are always \ + * privileged, so keep "pm" for them. */ \ + if (AS(info[entry].condition).npsp) \ + AS(info[entry].condition).pm = 0; \ + } \ + } \ + } \ +} while (0) + +/* Use macro magic to escape header hell */ +#define save_intc_info_mu(info, num) \ +do { \ + __save_intc_info_mu(info, num); \ + fixup_intc_info_mu(info, num); \ +} while (0) + + +static inline void +restore_intc_info_mu(const intc_info_mu_t *info, int num) +{ + int i; + + /* + * 1) Clear the hardware pointer + */ + READ_INTC_PTR_MU(); + if (num == -1) + return; + + /* + * 2) Write the registers + */ + for (i = 0; i < num; i++) { + if (!info[i].no_restore) { + WRITE_INTC_INFO_MU(AW(info[i].hdr)); + WRITE_INTC_INFO_MU(info[i].gpa); + WRITE_INTC_INFO_MU(info[i].gva); + NATIVE_TAGGED_LOAD_TO_MMUREG(intc_info_mu, + &info[i].data); + WRITE_INTC_INFO_MU(AW(info[i].condition)); + NATIVE_TAGGED_LOAD_TO_MMUREG(intc_info_mu, + &info[i].data_ext); + WRITE_INTC_INFO_MU(AW(info[i].mask)); + } + } +} + +static inline void +modify_intc_info_mu_data(intc_info_mu_t *info, int num) +{ + int i; + + for (i = 0; i < num; i++) { + if (unlikely(info[i].modify_data)) { + info[i].data = info[i].mod_data; + info[i].data_ext = info[i].mod_data_ext; + } + } +} + +static inline void +kvm_set_intc_info_mu_modified_data(intc_info_mu_t *info, unsigned long data, + unsigned long data_ext) +{ + info->mod_data = data; + info->mod_data_ext = data_ext; + info->modify_data = true; +} +static inline void +kvm_reset_intc_info_mu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.mu_updated = false; +} +static inline void +kvm_set_intc_info_mu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.mu_updated = true; +} +static inline bool +kvm_get_intc_info_mu_is_updated(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.intc_ctxt.mu_updated; +} + +#define READ_SH_OS_PPTB_REG_VALUE() NATIVE_GET_MMUREG(sh_os_pptb) +#define WRITE_SH_OS_PPTB_REG_VALUE(val) NATIVE_SET_MMUREG(sh_os_pptb, (val)) + +#define READ_SH_OS_VPTB_REG_VALUE() NATIVE_GET_MMUREG(sh_os_vptb) +#define WRITE_SH_OS_VPTB_REG_VALUE(val) NATIVE_SET_MMUREG(sh_os_vptb, (val)) + +#define READ_SH_OS_VAB_REG_VALUE() NATIVE_GET_MMUREG(sh_os_vab) +#define WRITE_SH_OS_VAB_REG_VALUE(val) NATIVE_SET_MMUREG(sh_os_vab, (val)) + +#define READ_SH_PID_REG_VALUE() NATIVE_GET_MMUREG(sh_pid) +#define WRITE_SH_PID_REG_VALUE(val) NATIVE_SET_MMUREG(sh_pid, (val)) + +#define READ_CR_G_W_IMASK_REG_VALUE() NATIVE_GET_MMUREG(g_w_imask_mmu_cr) +#define WRITE_CR_G_W_IMASK_REG_VALUE(val) \ + NATIVE_SET_MMUREG(g_w_imask_mmu_cr, (val)) + +#define READ_SH_MMU_CR_REG_VALUE() NATIVE_GET_MMUREG(sh_mmu_cr) +#define WRITE_SH_MMU_CR_REG_VALUE(val) NATIVE_SET_MMUREG(sh_mmu_cr, (val)) + +extern unsigned long read_VIRT_CTRL_MU_reg_value(void); +extern void write_VIRT_CTRL_MU_reg_value(unsigned long value); +extern unsigned long read_GID_reg_value(void); +extern void write_GID_reg_value(unsigned long value); +extern unsigned long read_GP_VPTB_reg_value(void); +extern void write_GP_VPTB_reg_value(unsigned long value); +extern unsigned long read_GP_PPTB_reg_value(void); +extern void write_GP_PPTB_reg_value(unsigned long value); +extern unsigned long read_SH_OS_PPTB_reg_value(void); +extern void write_SH_OS_PPTB_reg_value(unsigned long value); +extern unsigned long read_SH_OS_VPTB_reg_value(void); +extern void write_SH_OS_VPTB_reg_value(unsigned long value); +extern unsigned long read_SH_OS_VAB_reg_value(void); +extern void write_SH_OS_VAB_reg_value(unsigned long value); +extern unsigned long read_SH_PID_reg_value(void); +extern void write_SH_PID_reg_value(unsigned long value); +extern unsigned long read_SH_MMU_CR_reg_value(void); +extern void write_SH_MMU_CR_reg_value(unsigned long value); +extern unsigned long read_G_W_IMASK_MMU_CR_reg_value(void); +extern void write_G_W_IMASK_MMU_CR_reg_value(unsigned long value); + +static inline virt_ctrl_mu_t read_VIRT_CTRL_MU_reg(void) +{ + virt_ctrl_mu_t virt_ctrl; + + virt_ctrl.VIRT_CTRL_MU_reg = read_VIRT_CTRL_MU_reg_value(); + return virt_ctrl; +} +static inline void write_VIRT_CTRL_MU_reg(virt_ctrl_mu_t virt_ctrl) +{ + write_VIRT_CTRL_MU_reg_value(virt_ctrl.VIRT_CTRL_MU_reg); +} + +static inline unsigned int read_GID_reg(void) +{ + return read_GID_reg_value(); +} +static inline void write_GID_reg(unsigned int mmu_gid) +{ + write_GID_reg_value(MMU_GID(mmu_gid)); +} + +static inline mmu_reg_t read_SH_MMU_CR_reg(void) +{ + return __mmu_reg(read_SH_MMU_CR_reg_value()); +} +static inline void write_SH_MMU_CR_reg(mmu_reg_t mmu_cr) +{ + write_SH_MMU_CR_reg_value(mmu_reg_val(mmu_cr)); +} + +static inline mmu_reg_t read_G_W_IMASK_MMU_CR_reg(void) +{ + return __mmu_reg(read_G_W_IMASK_MMU_CR_reg_value()); +} +static inline void write_G_W_IMASK_MMU_CR_reg(mmu_reg_t mmu_cr_mask) +{ + write_G_W_IMASK_MMU_CR_reg_value(mmu_reg_val(mmu_cr_mask)); +} + +static inline unsigned int read_SH_PID_reg(void) +{ + return read_SH_PID_reg_value(); +} +static inline void write_SH_PID_reg(unsigned int mmu_pid) +{ + write_SH_PID_reg_value(MMU_PID(mmu_pid)); +} + +static inline e2k_addr_t read_SH_OS_PPTB_reg(void) +{ + return read_SH_OS_PPTB_reg_value(); +} +static inline void write_SH_OS_PPTB_reg(e2k_addr_t phys_addr) +{ + write_SH_OS_PPTB_reg_value(MMU_ADDR_TO_PPTB(phys_addr)); +} + +static inline e2k_addr_t read_SH_OS_VPTB_reg(void) +{ + return read_SH_OS_VPTB_reg_value(); +} +static inline void write_SH_OS_VPTB_reg(e2k_addr_t virt_addr) +{ + write_SH_OS_VPTB_reg_value(MMU_ADDR_TO_VPTB(virt_addr)); +} + +static inline e2k_addr_t read_GP_PPTB_reg(void) +{ + return read_GP_PPTB_reg_value(); +} +static inline void write_GP_PPTB_reg(e2k_addr_t phys_addr) +{ + write_GP_PPTB_reg_value(MMU_ADDR_TO_PPTB(phys_addr)); +} + +static inline e2k_addr_t read_GP_VPTB_reg(void) +{ + return read_GP_VPTB_reg_value(); +} +static inline void write_GP_VPTB_reg(e2k_addr_t virt_addr) +{ + write_GP_VPTB_reg_value(MMU_ADDR_TO_VPTB(virt_addr)); +} + +static inline e2k_addr_t read_SH_OS_VAB_reg(void) +{ + return read_SH_OS_VAB_reg_value(); +} +static inline void write_SH_OS_VAB_reg(e2k_addr_t virt_addr) +{ + write_SH_OS_VAB_reg_value(MMU_ADDR_TO_VAB(virt_addr)); +} +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_MMU_HV_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/mmu_hv_regs_types.h b/arch/e2k/include/asm/kvm/mmu_hv_regs_types.h new file mode 100644 index 000000000000..36e0d78b5f05 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_hv_regs_types.h @@ -0,0 +1,253 @@ +/* + * asm-e2k/mmu_regs.h: E2K MMU structures & registers. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_HV_REGS_TYPES_H_ +#define _E2K_KVM_MMU_HV_REGS_TYPES_H_ + +#include +#include + +/* + * Structures of MMU registers for hardware virtualized extensions + */ + +/* MMU address to access to MMU internal registers */ + +#define _MMU_VIRT_CTRL_NO 0x40 /* MMU virtualization control */ +#define _MMU_GID_NO 0x41 /* guest machine ID */ +#define _MMU_GP_PPTB_NO 0x43 /* physical base of guest PTs */ +#define _MMU_INTC_INFO_NO 0x44 /* MMU intercept info */ +#define _MMU_INTC_PTR_NO 0x45 /* MMU intercept info pointer */ +#define _MMU_SH_OS_VPTB_NO 0x46 /* virtual base of guest shadow PTs */ +#define _MMU_SH_OS_PPTB_NO 0x47 /* physical base of guest shadow PTs */ +#define _MMU_CR_G_W_IMASK_NO 0x48 /* mask of MMU_CR bits access to */ + /* control intercepts */ +#define _MMU_SH_PID_NO 0x49 /* shadow register of process ID */ +#define _MMU_SH_MMU_CR_NO 0x4a /* shadow register of control reg. */ + +#define MMU_ADDR_VIRT_CTRL MMU_REG_NO_TO_MMU_ADDR(_MMU_VIRT_CTRL_NO) +#define MMU_ADDR_GID MMU_REG_NO_TO_MMU_ADDR(_MMU_GID_NO) +#define MMU_ADDR_GP_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_GP_PPTB_NO) +#define MMU_ADDR_INTC_INFO MMU_REG_NO_TO_MMU_ADDR(_MMU_INTC_INFO_NO) +#define MMU_ADDR_INTC_PTR MMU_REG_NO_TO_MMU_ADDR(_MMU_INTC_PTR_NO) +#define MMU_ADDR_SH_OS_VPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_VPTB_NO) +#define MMU_ADDR_SH_OS_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_PPTB_NO) +#define MMU_ADDR_CR_G_W_IMASK MMU_REG_NO_TO_MMU_ADDR(_MMU_CR_G_W_IMASK_NO) +#define MMU_ADDR_SH_PID MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_PID_NO) +#define MMU_ADDR_SH_MMU_CR MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_MMU_CR_NO) + +/* MMU internel register contents */ + +/* + * MMU Guest Process (machine #) ID MMU_GID + */ + +#define MMU_GID_SIZE MMU_PID_SIZE + +/* + * Kernel virtual memory context + */ +#define E2K_KERNEL_GID 0x000 /* defined by hardware */ + +#define MMU_GID(gid) MMU_PID(gid) +#define MMU_KERNEL_GID MMU_GID(E2K_KERNEL_GID) + +/* + * MMU Virtual Control register + */ + +typedef union virt_ctrl_mu { + struct { + u64 evn_c : 36; /* [35: 0] */ + u64 __resf : 28; /* [63:38] */ + }; + struct { + /* env_c: */ + u64 rr_mmu_cr : 1; /* [ 0] */ + u64 rr_pptb : 1; /* [ 1] */ + u64 rr_vptb : 1; /* [ 2] */ + u64 rr_apic_base : 1; /* [ 3] */ + u64 rr_mtrr_pat : 1; /* [ 4] */ + u64 rr_ph_pci_b : 1; /* [ 5] */ + u64 rr_dbg : 1; /* [ 6] */ + u64 rr_dbg1 : 1; /* [ 7] */ + u64 rw_mmu_cr : 1; /* [ 8] */ + u64 rw_pptb : 1; /* [ 9] */ + u64 rw_vptb : 1; /* [10] */ + u64 rw_apic_base : 1; /* [11] */ + u64 rw_mtrr_pat : 1; /* [12] */ + u64 rw_ph_pci_b : 1; /* [13] */ + u64 rw_dbg : 1; /* [14] */ + u64 rw_dbg1 : 1; /* [15] */ + u64 pma : 1; /* [16] */ + u64 fl_dc : 1; /* [17] */ + u64 fl_dcl : 1; /* [18] */ + u64 fl_ic : 1; /* [19] */ + u64 fl_icl_u : 1; /* [20] */ + u64 fl_icl_p : 1; /* [21] */ + u64 fl_tlb : 1; /* [22] */ + u64 fl_tlbpg : 1; /* [23] */ + u64 fl_tlb2pg : 1; /* [24] */ + u64 prb_entry : 1; /* [25] */ + u64 evn_c_res : 10; /* [35:26] */ + /* other fields */ + u64 gp_pt_en : 1; /* [36] */ + u64 sh_pt_en : 1; /* [37] */ + u64 __resb : 26; /* [63:38] */ + }; + u64 word; /* as entire register */ +} virt_ctrl_mu_t; +#define VIRT_CTRL_MU_evn_c evn_c /* events mask to intercept */ +#define VIRT_CTRL_MU_rr_mmu_cr rr_mmu_cr +#define VIRT_CTRL_MU_rr_u_pptb rr_pptb +#define VIRT_CTRL_MU_rr_u_vptb rr_vptb +#define VIRT_CTRL_MU_rr_apic_base rr_apic_base +#define VIRT_CTRL_MU_rr_mtrr_pat rr_mtrr_pat +#define VIRT_CTRL_MU_rr_ph_pci_b rr_ph_pci_b +#define VIRT_CTRL_MU_rr_dbg rr_dbg +#define VIRT_CTRL_MU_rr_dbg1 rr_dbg1 +#define VIRT_CTRL_MU_rw_mmu_cr rw_mmu_cr +#define VIRT_CTRL_MU_rw_u_pptb rw_pptb +#define VIRT_CTRL_MU_rw_u_vptb rw_vptb +#define VIRT_CTRL_MU_rw_apic_base rw_apic_base +#define VIRT_CTRL_MU_rw_mtrr_pat rw_mtrr_pat +#define VIRT_CTRL_MU_rw_ph_pci_b rw_ph_pci_b +#define VIRT_CTRL_MU_rw_dbg rw_dbg +#define VIRT_CTRL_MU_rw_dbg1 rw_dbg1 +#define VIRT_CTRL_MU_pma pma +#define VIRT_CTRL_MU_fl_dc fl_dc +#define VIRT_CTRL_MU_fl_dcl fl_dcl +#define VIRT_CTRL_MU_fl_ic fl_ic +#define VIRT_CTRL_MU_fl_icl_u fl_icl_u +#define VIRT_CTRL_MU_fl_icl_p fl_icl_p +#define VIRT_CTRL_MU_fl_tlb fl_tlb +#define VIRT_CTRL_MU_fl_tlbpg fl_tlbpg +#define VIRT_CTRL_MU_fl_tlb2pg fl_tlb2pg +#define VIRT_CTRL_MU_prb_entry prb_entry + /* GPA -> PA translation enable */ +#define VIRT_CTRL_MU_gp_pt_en gp_pt_en + /* shadow Page Tables enable */ +#define VIRT_CTRL_MU_sh_pt_en sh_pt_en +#define VIRT_CTRL_MU_reg word /* [63: 0] - entire register */ + +/* Bits mask of VIRT_CTRL_MU fields and flags */ +#define VIRT_CTRL_MU_ENV_C_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_evn_c = -1, }.word) +#define VIRT_CTRL_MU_RR_MMU_CR_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_mmu_cr = 1, }.word) +#define VIRT_CTRL_MU_RR_U_PPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_pptb = 1, }.word) +#define VIRT_CTRL_MU_RR_U_VPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_vptb = 1, }.word) +#define VIRT_CTRL_MU_RR_APIC_BASE_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_apic_base = 1, }.word) +#define VIRT_CTRL_MU_RR_MTRR_PAT_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_mtrr_pat = 1, }.word) +#define VIRT_CTRL_MU_RR_PH_PCI_B_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_ph_pci_b = 1, }.word) +#define VIRT_CTRL_MU_RR_DBG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_dbg = 1, }.word) +#define VIRT_CTRL_MU_RR_DBG1_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_dbg1 = 1, }.word) +#define VIRT_CTRL_MU_RW_MMU_CR_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_mmu_cr = 1, }.word) +#define VIRT_CTRL_MU_RW_U_PPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_pptb = 1, }.word) +#define VIRT_CTRL_MU_RW_U_VPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_vptb = 1, }.word) +#define VIRT_CTRL_MU_RW_APIC_BASE_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_apic_base = 1, }.word) +#define VIRT_CTRL_MU_RW_MTRR_PAT_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_mtrr_pat = 1, }.word) +#define VIRT_CTRL_MU_RW_PH_PCI_B_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_ph_pci_b = 1, }.word) +#define VIRT_CTRL_MU_RW_DBG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_dbg = 1, }.word) +#define VIRT_CTRL_MU_RW_DBG1_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_dbg1 = 1, }.word) +#define VIRT_CTRL_MU_PMA_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_pma = 1, }.word) +#define VIRT_CTRL_MU_FL_DC_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_dc = 1, }.word) +#define VIRT_CTRL_MU_FL_DCL_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_dcl = 1, }.word) +#define VIRT_CTRL_MU_FL_IC_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_ic = 1, }.word) +#define VIRT_CTRL_MU_FL_ICL_U_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_icl_u = 1, }.word) +#define VIRT_CTRL_MU_FL_ICL_P_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_icl_p = 1, }.word) +#define VIRT_CTRL_MU_FL_TLB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlb = 1, }.word) +#define VIRT_CTRL_MU_FL_TLBPG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlbpg = 1, }.word) +#define VIRT_CTRL_MU_FL_TLB2PG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlb2pg = 1, }.word) +#define VIRT_CTRL_MU_PRB_ENTRY_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_prb_entry = 1, }.word) +#define VIRT_CTRL_MU_GP_PT_EN_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_gp_pt_en = 1, }.word) +#define VIRT_CTRL_MU_SH_PT_EN_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_sh_pt_en = 1, }.word) + +typedef union { + struct { + u64 event_code : 8; + u64 guest_pt_lev_fin : 1; + u64 guest_pt_lev : 3; + u64 ignore_wr_rights : 1; + u64 __reserved : 51; + }; + u64 word; +} intc_info_mu_hdr_t; + +#define intc_mu_info_lo_get_event_code(x) ((x) & 0xff) + +/* Possible values for `INTC_INFO_MU[2 * j].event_code' */ +typedef enum e2k_int_info_mu_event_code { + IME_FORCED = 0, + IME_FORCED_GVA = 1, + IME_SHADOW_DATA = 2, + IME_GPA_DATA = 3, + IME_GPA_INSTR = 4, + IME_GPA_AINSTR = 5, + IME_RESERVED_6 = 6, + IME_RESERVED_7 = 7, + IME_MAS_IOADDR = 8, + IME_READ_MU = 9, + IME_WRITE_MU = 10, + IME_CACHE_FLUSH = 11, + IME_CACHE_LINE_FLUSH = 12, + IME_ICACHE_FLUSH = 13, + IME_ICACHE_LINE_FLUSH_USER = 14, + IME_ICACHE_LINE_FLUSH_SYSTEM = 15, + IME_TLB_FLUSH = 16, + IME_TLB_PAGE_FLUSH_LAST = 17, + IME_TLB_PAGE_FLUSH_UPPER = 18, + IME_TLB_ENTRY_PROBE = 19, + MU_INTC_EVENTS_MAX +} intc_info_mu_event_code_t; + +typedef struct { + intc_info_mu_hdr_t hdr; + unsigned long gpa; + unsigned long gva; + unsigned long data; + tc_cond_t condition; + unsigned long data_ext; + tc_mask_t mask; + bool no_restore; + bool modify_data; + unsigned long mod_data; + unsigned long mod_data_ext; +} intc_info_mu_t; + +#define INTC_INFO_MU_MAX 77 +#define INTC_PTR_MU_SIZE 7 +#define INTC_INFO_MU_ITEM_SIZE 7 +#define INTC_INFO_MU_ITEM_MAX (INTC_INFO_MU_MAX / INTC_INFO_MU_ITEM_SIZE) + +#endif /* _E2K_KVM_MMU_HV_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/kvm/mmu_regs_access.h b/arch/e2k/include/asm/kvm/mmu_regs_access.h new file mode 100644 index 000000000000..3b78f6d990a5 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_regs_access.h @@ -0,0 +1,852 @@ +/* + * E2K MMU registers access virtualization for KVM guest + * + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_REGS_ACCESS_H_ +#define _E2K_KVM_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Basic functions accessing MMU on guest. + */ +#define GUEST_MMU_REGS_BASE (offsetof(kvm_vcpu_state_t, mmu) + \ + offsetof(kvm_mmu_state_t, regs)) +#define GUEST_MMU_REG(reg_no) (GUEST_MMU_REGS_BASE + \ + ((reg_no) * sizeof(mmu_reg_t))) +#define GUEST_GET_MMU_REG(reg_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_MMU_REG(reg_no)) +#define GUEST_SET_MMU_REG(reg_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_MMU_REG(reg_no), value) +#define GUEST_TRAP_CELLAR_BASE (offsetof(kvm_vcpu_state_t, mmu) + \ + offsetof(kvm_mmu_state_t, tcellar)) +#define GUEST_TC_ENTRY(tc_no) (GUEST_TRAP_CELLAR_BASE + \ + ((tc_no) * sizeof(trap_cellar_t))) +#define GUEST_GET_TC_ADDRESS(tc_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_TC_ENTRY(tc_no) + \ + offsetof(trap_cellar_t, address)) +#define GUEST_GET_TC_CONDITION(tc_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_TC_ENTRY(tc_no) + \ + offsetof(trap_cellar_t, condition)) +#define GUEST_GET_TC_DATA(tc_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_TC_ENTRY(tc_no) + \ + offsetof(trap_cellar_t, data)) +#define GUEST_MMU_DEBUG_REGS_BASE \ + (offsetof(kvm_vcpu_state_t, mmu) + \ + offsetof(kvm_mmu_state_t, debug_regs)) +#define GUEST_MMU_DEBUG_REG(reg_no) \ + (GUEST_MMU_DEBUG_REGS_BASE + \ + ((reg_no) * sizeof(mmu_reg_t))) +#define GUEST_GET_MMU_DEBUG_REG(reg_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_MMU_DEBUG_REG(reg_no)) +#define GUEST_SET_MMU_DEBUG_REG(reg_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_MMU_DEBUG_REG(reg_no), value) + +/* + * Write/read MMU register + */ +static inline void KVM_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + GUEST_SET_MMU_REG(mmu_reg_no, mmu_reg); + if (IS_HV_GM()) + NATIVE_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t KVM_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + if (likely(IS_HV_GM())) { + return (mmu_reg_t)NATIVE_READ_MMU_REG(mmu_addr); + } else { + return (mmu_reg_t)GUEST_GET_MMU_REG(mmu_reg_no); + } +} +static inline void BOOT_KVM_WRITE_MMU_REG(mmu_addr_t mmu_addr, + mmu_reg_t mmu_reg) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + GUEST_SET_MMU_REG(mmu_reg_no, mmu_reg); + if (BOOT_IS_HV_GM()) + NATIVE_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t BOOT_KVM_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + if (likely(BOOT_IS_HV_GM())) { + return (mmu_reg_t)NATIVE_READ_MMU_REG(mmu_addr); + } else { + return (mmu_reg_t)GUEST_GET_MMU_REG(mmu_reg_no); + } +} + +static inline void KVM_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long KVM_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void KVM_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long KVM_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void KVM_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long KVM_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} +static inline void KVM_WRITE_MMU_PID_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_PID, reg_val); + if (IS_HV_GM()) { + /* FIXME: guest should fully control own PTs including */ + /* all hardware MMU registers, but it is not so now, */ + /* for example PT roots and context registers are controled */ + /* by hypervisor as for paravirtualized kernels */ + native_flush_TLB_all(); + } +} +static inline unsigned long KVM_READ_MMU_PID_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_PID)); +} + +static inline void BOOT_KVM_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long BOOT_KVM_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void BOOT_KVM_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long BOOT_KVM_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void BOOT_KVM_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long BOOT_KVM_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} +static inline void BOOT_KVM_WRITE_MMU_PID_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_PID, reg_val); + if (BOOT_IS_HV_GM()) { + /* FIXME: guest should fully control own PTs including */ + /* all hardware MMU registers, but it is not so now, */ + /* for example PT roots and context registers are controled */ + /* by hypervisor as for paravirtualized kernels */ + NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all); + } +} +static inline unsigned long BOOT_KVM_READ_MMU_PID_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_PID)); +} + +static inline unsigned int KVM_READ_MMU_TRAP_COUNT(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_TRAP_COUNT)); +} + +static inline void KVM_RESET_MMU_TRAP_COUNT(void) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_TRAP_COUNT, 0); +} + +static inline void BOOT_KVM_RESET_MMU_TRAP_COUNT(void) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_TRAP_COUNT, 0); +} + +static inline unsigned long KVM_READ_TC_ADDRESS(int tc_no) +{ + return GUEST_GET_TC_ADDRESS(tc_no); +} + +static inline tc_cond_t KVM_READ_TC_CONDITION(int tc_no) +{ + return (tc_cond_t)(u64)GUEST_GET_TC_CONDITION(tc_no); +} + +static inline unsigned long KVM_READ_TC_DATA(int tc_no) +{ + /* FIXME: data has tag, so here should be load tagged value */ + return GUEST_GET_TC_DATA(tc_no); +} + +/* + * Write/read Data TLB register + */ + +static inline void KVM_WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + if (IS_HV_GM()) { + NATIVE_WRITE_DTLB_REG(tlb_addr, mmu_reg); + } else { + panic("KVM_WRITE_DTLB_REG() is not yet implemented\n"); + } +} + +static inline mmu_reg_t KVM_READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + if (IS_HV_GM()) { + return NATIVE_READ_DTLB_REG(tlb_addr); + } else { + return kvm_read_dtlb_reg(tlb_addr); + } +} + +/* + * Flush TLB page/entry + */ + +static inline void +KVM_FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + if (IS_HV_GM()) { + /* FIXME: guest should fully control own PTs including */ + /* all hardware MMU registers, but it is not so now, */ + /* for example PT roots and context registers are controled */ + /* by hypervisor as for paravirtualized kernels */ + native_flush_TLB_all(); + } else if (IS_ENABLED(CONFIG_KVM_PARAVIRT_TLB_FLUSH)) { + HYPERVISOR_flush_tlb_range(flush_addr_get_va(flush_addr), + flush_addr_get_va(flush_addr)); + } +} + +/* + * Flush DCACHE line + */ + +static inline void +KVM_FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + if (IS_HV_GM()) { + /* + * Prevent putting privilidged instruction strd from + * NATIVE_FLUSH_DCACHE_LINE under predicate IS_HV_GM(). + * Even with false value of predicate it can cause priv. + * action exception in guest kernel. + */ + E2K_CMD_SEPARATOR; + NATIVE_FLUSH_DCACHE_LINE(virt_addr); + } else { + kvm_flush_dcache_line(virt_addr); + } +} + +/* + * Clear DCACHE L1 set + */ +static inline void +KVM_CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + if (IS_HV_GM()) { + NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set); + } else { + kvm_clear_dcache_l1_set(virt_addr, set); + } +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +KVM_WRITE_DCACHE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + if (IS_HV_GM()) { + NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num); + } else { + kvm_write_dcache_l2_reg(reg_val, reg_num, bank_num); + } +} +static inline unsigned long +KVM_READ_DCACHE_L2_REG(int reg_num, int bank_num) +{ + if (IS_HV_GM()) { + return NATIVE_READ_L2_REG(reg_num, bank_num); + } else { + return kvm_read_dcache_l2_reg(reg_num, bank_num); + } +} + +/* + * Flush ICACHE line + */ + +static inline void +KVM_FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + if (IS_HV_GM()) { + NATIVE_FLUSH_ICACHE_LINE(flush_op, flush_addr); + } else { + /* any switch to guest kernel now flush all TLB and caches */ + /* so precise flushing can be not implemented */ + pr_debug("KVM_FLUSH_ICACHE_LINE() is not yet implemented\n"); + } +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static inline void +KVM_FLUSH_CACHE_L12(flush_op_t flush_op) +{ + if (IS_HV_GM()) { + native_write_back_CACHE_L12(); + } else { + panic("KVM_FLUSH_CACHE_L12() is not yet implemented\n"); + } +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +KVM_FLUSH_TLB_ALL(flush_op_t flush_op) +{ + if (IS_HV_GM()) { + native_flush_TLB_all(); + } else if (IS_ENABLED(CONFIG_KVM_PARAVIRT_TLB_FLUSH)) { + HYPERVISOR_flush_tlb_range(0, E2K_VA_SIZE); + } +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static inline void +KVM_FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + if (IS_HV_GM()) { + native_flush_ICACHE_all(); + } else { + /* panic("KVM_FLUSH_ICACHE_ALL() is not yet implemented\n"); */ + } +} + +/* + * Get Entry probe for virtual address + */ + +static inline probe_entry_t +KVM_ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + if (IS_HV_GM()) { + return NATIVE_ENTRY_PROBE_MMU_OP(virt_addr); + } else { + return kvm_mmu_entry_probe(virt_addr); + } +} + +/* + * Get physical address for virtual address + */ + +static inline probe_entry_t +KVM_ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + if (IS_HV_GM()) { + return NATIVE_ADDRESS_PROBE_MMU_OP(virt_addr); + } else { + return kvm_mmu_address_probe(virt_addr); + } +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +KVM_READ_CLW_REG(clw_addr_t clw_addr) +{ + panic("KVM_READ_CLW_REG() is not yet implemented\n"); + return -1; +} + +/* + * Write CLW register + */ + +static inline void +KVM_WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val) +{ + panic("KVM_WRITE_CLW_REG() is not yet implemented\n"); +} + +/* + * KVM MMU DEBUG registers access + */ +static inline mmu_reg_t +KVM_READ_MMU_DEBUG_REG_VALUE(int reg_no) +{ + return GUEST_GET_MMU_DEBUG_REG(reg_no); +} +static inline void +KVM_WRITE_MMU_DEBUG_REG_VALUE(int reg_no, mmu_reg_t value) +{ + GUEST_SET_MMU_DEBUG_REG(reg_no, value); +} +static inline mmu_reg_t +KVM_READ_DDBAR0_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBAR1_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBAR2_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBAR3_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBCR_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBSR_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDMAR0_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDMAR1_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDMCR_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO); +} +static inline void +KVM_WRITE_DDBAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO, value); +} +static inline void +KVM_WRITE_DDBAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO, value); +} +static inline void +KVM_WRITE_DDBAR2_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO, value); +} +static inline void +KVM_WRITE_DDBAR3_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO, value); +} +static inline void +KVM_WRITE_DDBCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO, value); +} +static inline void +KVM_WRITE_DDBSR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO, value); +} +static inline void +KVM_WRITE_DDMAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO, value); +} +static inline void +KVM_WRITE_DDMAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO, value); +} +static inline void +KVM_WRITE_DDMCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO, value); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure kvm kernel without paravirtualization based on pv_ops */ + +static inline void WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + KVM_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return (mmu_reg_t)KVM_READ_MMU_REG(mmu_addr); +} + +#define BOOT_WRITE_MMU_REG(addr_val, reg_val) \ + BOOT_KVM_WRITE_MMU_REG(addr_val, reg_val) +#define BOOT_READ_MMU_REG(addr_val) \ + BOOT_KVM_READ_MMU_REG(addr_val) + +static inline void WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_PPTB(void) +{ + return KVM_READ_MMU_OS_PPTB_REG(); +} +static inline void WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VPTB(void) +{ + return KVM_READ_MMU_OS_VPTB_REG(); +} +static inline void WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VAB(void) +{ + return KVM_READ_MMU_OS_VAB_REG(); +} +static inline void WRITE_MMU_PID(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_PID_REG(reg_val); +} +static inline unsigned long READ_MMU_PID(void) +{ + return KVM_READ_MMU_PID_REG(); +} + +static inline void BOOT_WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_PPTB(void) +{ + return BOOT_KVM_READ_MMU_OS_PPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VPTB(void) +{ + return BOOT_KVM_READ_MMU_OS_VPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VAB(void) +{ + return BOOT_KVM_READ_MMU_OS_VAB_REG(); +} +static inline void BOOT_WRITE_MMU_PID(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_PID_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_PID(void) +{ + return BOOT_KVM_READ_MMU_PID_REG(); +} + +/* + * Write/read Data TLB register + */ + +static inline void WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + KVM_WRITE_DTLB_REG(tlb_addr, mmu_reg); +} + +static inline mmu_reg_t READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return KVM_READ_DTLB_REG(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static inline void +FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + KVM_FLUSH_TLB_ENTRY(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static inline void FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + KVM_FLUSH_DCACHE_LINE(virt_addr); +} +static inline void FLUSH_DCACHE_LINE_OFFSET(e2k_addr_t virt_addr, size_t offset) +{ + KVM_FLUSH_DCACHE_LINE(virt_addr + offset); +} + + +/* + * Clear DCACHE L1 set + */ +static inline void +CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + KVM_CLEAR_DCACHE_L1_SET(virt_addr, set); +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +WRITE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + KVM_WRITE_DCACHE_L2_REG(reg_val, reg_num, bank_num); +} +static inline unsigned long +READ_L2_REG(int reg_num, int bank_num) +{ + return KVM_READ_DCACHE_L2_REG(reg_num, bank_num); +} + +/* + * Flush ICACHE line + */ + +static inline void +FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + KVM_FLUSH_ICACHE_LINE(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static inline void +FLUSH_CACHE_L12(flush_op_t flush_op) +{ + KVM_FLUSH_CACHE_L12(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +FLUSH_TLB_ALL(flush_op_t flush_op) +{ + KVM_FLUSH_TLB_ALL(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static inline void +FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + KVM_FLUSH_ICACHE_ALL(flush_op); +} + +/* + * Get Entry probe for virtual address + */ + +static inline probe_entry_t +ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return KVM_ENTRY_PROBE_MMU_OP(virt_addr); +} + +/* + * Get physical address for virtual address + */ + +static inline probe_entry_t +ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return KVM_ADDRESS_PROBE_MMU_OP(virt_addr); +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +READ_CLW_REG(clw_addr_t clw_addr) +{ + return KVM_READ_CLW_REG(clw_addr); +} + +/* + * Write CLW register + */ + +static inline void +WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val) +{ + KVM_WRITE_CLW_REG(clw_addr, val); +} + +/* + * KVM MMU DEBUG registers access + */ +static inline mmu_reg_t +READ_DDBAR0_REG_VALUE(void) +{ + return KVM_READ_DDBAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR1_REG_VALUE(void) +{ + return KVM_READ_DDBAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR2_REG_VALUE(void) +{ + return KVM_READ_DDBAR2_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR3_REG_VALUE(void) +{ + return KVM_READ_DDBAR3_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBCR_REG_VALUE(void) +{ + return KVM_READ_DDBCR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBSR_REG_VALUE(void) +{ + return KVM_READ_DDBSR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR0_REG_VALUE(void) +{ + return KVM_READ_DDMAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR1_REG_VALUE(void) +{ + return KVM_READ_DDMAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMCR_REG_VALUE(void) +{ + return KVM_READ_DDMCR_REG_VALUE(); +} +static inline void +WRITE_DDBAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR0_REG_VALUE(value); +} +static inline void +WRITE_DDBAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR1_REG_VALUE(value); +} +static inline void +WRITE_DDBAR2_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR2_REG_VALUE(value); +} +static inline void +WRITE_DDBAR3_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR3_REG_VALUE(value); +} +static inline void +WRITE_DDBCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBCR_REG_VALUE(value); +} +static inline void +WRITE_DDBSR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBSR_REG_VALUE(value); +} +static inline void +WRITE_DDMAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDMAR0_REG_VALUE(value); +} +static inline void +WRITE_DDMAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDMAR1_REG_VALUE(value); +} +static inline void +WRITE_DDMCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDMCR_REG_VALUE(value); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/nid.h b/arch/e2k/include/asm/kvm/nid.h new file mode 100644 index 000000000000..9b9a88088d29 --- /dev/null +++ b/arch/e2k/include/asm/kvm/nid.h @@ -0,0 +1,89 @@ +#ifndef _ASM_E2K_KVM_NID_H +#define _ASM_E2K_KVM_NID_H + +/* + * Numeric identifier (nid) allocator + */ + +#include +#include +#include +#include + +typedef struct kvm_nidmap { + atomic_t nr_free; /* number of free elements */ + void *page; +} kvm_nidmap_t; + +struct kvm_nid_table { + raw_spinlock_t nidmap_lock; + struct kmem_cache *nid_cachep; + char nid_cache_name[32]; + int nidmap_entries; + int nid_hash_bits; + int nid_hash_size; + kvm_nidmap_t *nidmap; + struct hlist_head *nid_hash; + int nid_max_limit; + int reserved_nids; + int last_nid; + unsigned int nidhash_shift; +}; + +#define NID_HASH_SIZE(NID_HASH_BITS) (1 << (NID_HASH_BITS)) + +typedef struct kvm_nid { + int nr; + struct hlist_node nid_chain; +} kvm_nid_t; + +#define nid_hashfn(nr, NID_HASH_BITS) \ + hash_long((unsigned long)(nr), NID_HASH_BITS) + +extern int kvm_alloc_nid(struct kvm_nid_table *nid_table, kvm_nid_t *nid); +extern void kvm_do_free_nid(kvm_nid_t *nid, struct kvm_nid_table *nid_table); +extern void kvm_free_nid(kvm_nid_t *nid, struct kvm_nid_table *nid_table); +extern int kvm_nidmap_init(struct kvm_nid_table *nid_table, + int nid_max_limit, int reserved_nids, int last_nid); +extern void kvm_nidmap_destroy(struct kvm_nid_table *nid_table); + +static inline kvm_nid_t * +kvm_find_nid(struct kvm_nid_table *nid_table, int nid_nr, int hash_index) +{ + kvm_nid_t *nid; + unsigned long flags; + + raw_spin_lock_irqsave(&nid_table->nidmap_lock, flags); + hlist_for_each_entry(nid, + &(nid_table->nid_hash[hash_index]), + nid_chain) { + if (nid->nr == nid_nr) { + raw_spin_unlock_irqrestore(&nid_table->nidmap_lock, + flags); + return nid; + } + } + raw_spin_unlock_irqrestore(&nid_table->nidmap_lock, flags); + return NULL; +} + +#define for_each_guest_nid_node(node, entry, next, nid_table, \ + nid_hlist_member) \ + for ((entry) = 0; (entry) < (nid_table)->nid_hash_size; (entry)++) \ + hlist_for_each_entry_safe(node, next, \ + &((nid_table)->nid_hash[entry]), \ + nid_hlist_member) +#define nid_table_lock(nid_table) \ + raw_spin_lock(&(nid_table)->nidmap_lock) +#define nid_table_unlock(nid_table) \ + raw_spin_unlock(&(nid_table)->nidmap_lock) +#define nid_table_lock_irq(nid_table) \ + raw_spin_lock_irq(&(nid_table)->nidmap_lock) +#define nid_table_unlock_irq(nid_table) \ + raw_spin_unlock_irq(&(nid_table)->nidmap_lock) +#define nid_table_lock_irqsave(nid_table, flags) \ + raw_spin_lock_irqsave(&(nid_table)->nidmap_lock, flags) +#define nid_table_unlock_irqrestore(nid_table, flags) \ + raw_spin_unlock_irqrestore(&(nid_table)->nidmap_lock, flags) + +#endif /* _ASM_E2K_KVM_NID_H */ diff --git a/arch/e2k/include/asm/kvm/page.h b/arch/e2k/include/asm/kvm/page.h new file mode 100644 index 000000000000..b76a1e585035 --- /dev/null +++ b/arch/e2k/include/asm/kvm/page.h @@ -0,0 +1,60 @@ +/* + * + * Copyright 2016 MCST, Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_KVM_PAGE_H +#define _ASM_E2K_KVM_PAGE_H + +#ifdef __KERNEL__ + +#include + +#if defined(CONFIG_VIRTUALIZATION) +/* + * it can be: + * paravirtualized host/guest kernel + * native host kernel with virtualization support + * native guest kernel + * Shift up kernel virtual space and reserve area + * from 0x0000200000000000 + * to 0x0000400000000000 + * for guest kernel virtual space and it will be top of user virtual space + */ +/* #define NATIVE_PAGE_OFFSET 0x0000d00000000000 */ +#define HOST_PAGE_OFFSET NATIVE_PAGE_OFFSET /* 0x0000d00000000000 */ +#define GUEST_PAGE_OFFSET 0x0000200000000000 /* start and */ +#define GUEST_KERNEL_MEM_END 0x0000400000000000 /* end of guest */ + /* kernel virtual */ + /* space */ +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is host kernel with virtualization support or */ +/* paravirtualized host/guest kernel */ +#define __guest_pa(x) ((e2k_addr_t)(x) - GUEST_PAGE_OFFSET) +#define __guest_va(x) ((void *)((e2k_addr_t) (x) + GUEST_PAGE_OFFSET)) +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ + +#if !defined(CONFIG_VIRTUALIZATION) +/* it is native kernel without any virtualization */ + +#define guest_user_address_to_pva(task, addr) (-1) /* none guests */ + +#elif !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ +#define PAGE_OFFSET HOST_PAGE_OFFSET +#define BOOT_PAGE_OFFSET PAGE_OFFSET +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* !(__KERNEL__) */ + +#endif /* ! _ASM_E2K_KVM_PAGE_H */ diff --git a/arch/e2k/include/asm/kvm/page_track.h b/arch/e2k/include/asm/kvm/page_track.h new file mode 100644 index 000000000000..8fd16ef0da63 --- /dev/null +++ b/arch/e2k/include/asm/kvm/page_track.h @@ -0,0 +1,85 @@ +#ifndef _ASM_E2K_KVM_PAGE_TRACK_H +#define _ASM_E2K_KVM_PAGE_TRACK_H + +#ifdef CONFIG_KVM_HV_MMU +enum kvm_page_track_mode { + KVM_PAGE_TRACK_WRITE, + KVM_PAGE_TRACK_MAX, +}; + +/* + * The notifier represented by @kvm_page_track_notifier_node is linked into + * the head which will be notified when guest is triggering the track event. + * + * Write access on the head is protected by kvm->mmu_lock, read access + * is protected by track_srcu. + */ +struct kvm_page_track_notifier_head { + struct srcu_struct track_srcu; + struct hlist_head track_notifier_list; +}; + +struct kvm_page_track_notifier_node { + struct hlist_node node; + + /* + * It is called when guest is writing the write-tracked page + * and write emulation is finished at that time. + * + * @vcpu: the vcpu where the write access happened. + * @gpa: the physical address written by guest. + * @new: the data was written to the address. + * @bytes: the written length. + */ + void (*track_write)(struct kvm_vcpu *vcpu, struct gmm_struct *gmm, + gpa_t gpa, const u8 *new, int bytes); + /* + * It is called when memory slot is being moved or removed + * users can drop write-protection for the pages in that memory slot + * + * @kvm: the kvm where memory slot being moved or removed + * @slot: the memory slot being moved or removed + * @node: this node + */ + void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node); +}; + +void kvm_page_track_init(struct kvm *kvm); +void kvm_page_track_cleanup(struct kvm *kvm); + +void kvm_page_track_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); +int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages); + +void kvm_slot_page_track_add_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); +void kvm_slot_page_track_remove_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); +bool kvm_page_track_is_active(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode); + +void +kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); +void +kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); +void kvm_page_track_write(struct kvm_vcpu *vcpu, struct gmm_struct *gmm, + gpa_t gpa, const u8 *new, int bytes); +void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot); +#else /* ! CONFIG_KVM_HV_MMU */ +static inline void kvm_page_track_init(struct kvm *kvm) +{ + return; /* not used */ +} +static inline void kvm_page_track_cleanup(struct kvm *kvm) +{ + return; /* not used */ +} +#endif /* CONFIG_KVM_HV_MMU */ + +#endif /* _ASM_E2K_KVM_PAGE_TRACK_H */ diff --git a/arch/e2k/include/asm/kvm/pgtable-tdp.h b/arch/e2k/include/asm/kvm/pgtable-tdp.h new file mode 100644 index 000000000000..6bab8551160e --- /dev/null +++ b/arch/e2k/include/asm/kvm/pgtable-tdp.h @@ -0,0 +1,157 @@ +/* + * TDP - Two Dimensional Paging support + * GPA -> PA page table structure and common definitions. + * + * Second dimension page table to translate guest physical addresses (GPA) + * to host physical pages has same structure as native PT V6 + * + * Copyright 2018 MCST + */ + +#ifndef _ASM_E2K_KVM_PGTABLE_TDP_H +#define _ASM_E2K_KVM_PGTABLE_TDP_H + +/* + * NOTE: E2K TDP based on four levels of page tables. + */ + +#include +#include +#include + +/* max. number of physical address bits (architected) */ +#define E2K_MAX_PHYS_BITS_TDP E2K_MAX_PHYS_BITS_V6 + +#ifndef __ASSEMBLY__ + +/* + * TDP-PTE format + */ + +/* numbers of PTE's bits */ +#define _PAGE_P_BIT_TDP _PAGE_P_BIT_V6 /* Present */ +#define _PAGE_W_BIT_TDP _PAGE_W_BIT_V6 /* Writable */ +#define _PAGE_A_HW_BIT_TDP _PAGE_A_HW_BIT_V6 /* page Accessed */ +#define _PAGE_D_BIT_TDP _PAGE_D_BIT_V6 /* page Dirty */ +#define _PAGE_HUGE_BIT_TDP _PAGE_HUGE_BIT_V6 /* huge Page Size */ +#define _PAGE_MTCR_SHIFT_TDP 8 /* Memory Type */ +#define _PAGE_MTCR_BITS_NUM_TDP 2 /* Combination Rule */ +#define _PAGE_SW1_BIT_TDP _PAGE_SW1_BIT_V6 /* SoftWare bit #1 */ +#define _PAGE_SW2_BIT_TDP _PAGE_SW2_BIT_V6 /* SoftWare bit #2 */ +#define _PAGE_PFN_SHIFT_TDP _PAGE_PFN_SHIFT_V6 /* shift of Physical */ + /* Frame Number */ +#define _PAGE_MT_SHIFT_TDP _PAGE_MT_SHIFT_V6 /* shift of Memory */ + /* Type field */ +#define _PAGE_MT_BITS_NUM_TDP _PAGE_MT_BITS_NUM_V6 /* occupies 3 bits */ +#define _PAGE_NON_EX_BIT_TDP _PAGE_NON_EX_BIT_V6 /* NON EXecutable */ + +#define _PAGE_P_TDP (1ULL << _PAGE_P_BIT_TDP) +#define _PAGE_W_TDP (1ULL << _PAGE_W_BIT_TDP) +#define _PAGE_A_HW_TDP (1ULL << _PAGE_A_HW_BIT_TDP) +#define _PAGE_D_TDP (1ULL << _PAGE_D_BIT_TDP) +#define _PAGE_HUGE_TDP (1ULL << _PAGE_HUGE_BIT_TDP) +#define _PAGE_MTCR_TDP \ + (((1ULL << _PAGE_MTCR_BITS_NUM_TDP) - 1) << \ + _PAGE_MTCR_SHIFT_TDP) +#define _PAGE_SW1_TDP (1ULL << _PAGE_SW1_BIT_TDP) +#define _PAGE_SW2_TDP (1ULL << _PAGE_SW2_BIT_TDP) +#define _PAGE_PFN_TDP \ + ((((1ULL << E2K_MAX_PHYS_BITS_TDP) - 1) >> \ + PAGE_SHIFT) << \ + _PAGE_PFN_SHIFT_TDP) +#define _PAGE_MT_TDP \ + (((1ULL << _PAGE_MT_BITS_NUM_TDP) - 1) << _PAGE_MT_SHIFT_TDP) +#define _PAGE_NON_EX_TDP (1ULL << _PAGE_NON_EX_BIT_TDP) + +#define _PAGE_MT_GET_VAL_TDP(x) _PAGE_MT_GET_VAL(x) +#define _PAGE_MT_SET_VAL_TDP(x, mt) _PAGE_MT_SET_VAL(x, mt) + +/* convert physical address to page frame number for PTE */ +#define _PAGE_PADDR_TO_PFN_TDP(phys_addr) \ + _PAGE_PADDR_TO_PFN_V6(phys_addr) + +/* convert the page frame number from PTE to physical address */ +#define _PAGE_PFN_TO_PADDR_TDP(pte_val) _PAGE_PFN_TO_PADDR_V6(pte_val) + +static inline pteval_t +covert_uni_pte_flags_to_pte_val_tdp(const uni_pteval_t uni_flags) +{ + pteval_t pte_flags = 0; + + if (uni_flags & UNI_PAGE_PRESENT) + pte_flags |= (_PAGE_P_TDP); + if (uni_flags & UNI_PAGE_WRITE) + pte_flags |= (_PAGE_W_TDP); + if (uni_flags & UNI_PAGE_HW_ACCESS) + pte_flags |= (_PAGE_A_HW_TDP); + if (uni_flags & UNI_PAGE_DIRTY) + pte_flags |= (_PAGE_D_TDP); + if (uni_flags & UNI_PAGE_HUGE) + pte_flags |= (_PAGE_HUGE_TDP); + if (uni_flags & UNI_PAGE_NON_EX) + pte_flags |= (_PAGE_NON_EX_TDP); + if (uni_flags & UNI_PAGE_PFN) + pte_flags |= (_PAGE_PFN_TDP); + if (uni_flags & UNI_PAGE_MEM_TYPE) + pte_flags |= (_PAGE_MT_TDP); + if (uni_flags & UNI_PAGE_MEM_TYPE_RULE) + pte_flags |= (_PAGE_MTCR_TDP); + + BUG_ON(pte_flags == 0); + + return pte_flags; +} + +static inline pteval_t +fill_pte_val_tdp_flags(const uni_pteval_t uni_flags) +{ + return covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} +static inline pteval_t +get_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} +static inline bool +test_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return get_pte_val_tdp_flags(pte_val, uni_flags) != 0; +} +static inline pteval_t +set_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val | covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} +static inline pteval_t +clear_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & ~covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} + +static inline unsigned int +get_pte_val_tdp_memory_type(pteval_t pte_val) +{ + return _PAGE_MT_GET_VAL_TDP(pte_val); +} +static inline pteval_t +set_pte_val_tdp_memory_type(pteval_t pte_val, unsigned int memory_type) +{ + BUG_ON(memory_type != GEN_CACHE_MT && + memory_type != GEN_NON_CACHE_MT && + memory_type != GEN_NON_CACHE_ORDERED_MT && + memory_type != EXT_PREFETCH_MT && + memory_type != EXT_NON_PREFETCH_MT && + memory_type != EXT_CONFIG_MT && + memory_type != EXT_CACHE_MT); + + return _PAGE_MT_SET_VAL_TDP(pte_val, memory_type); +} + +static inline int get_tdp_root_level(void) +{ + return E2K_PT_LEVELS_NUM; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_KVM_PGTABLE_TDP_H */ diff --git a/arch/e2k/include/asm/kvm/pgtable-x86.h b/arch/e2k/include/asm/kvm/pgtable-x86.h new file mode 100644 index 000000000000..359ba15e88f0 --- /dev/null +++ b/arch/e2k/include/asm/kvm/pgtable-x86.h @@ -0,0 +1,104 @@ +/* + * E2K ISET X86 emulation page table structure and common definitions. + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_KVM_PGTABLE_X86_H +#define _ASM_E2K_KVM_PGTABLE_X86_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V1-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include + + +/* max. number of physical address bits (architected) */ +#define MAX_PHYS_BITS_X86_32 32 +#define MAX_PHYS_BITS_X86_64 48 + +/* virtual & physical page definitions */ +#define PAGE_SHIFT_X86 12 +#define PAGE_SIZE_X86 (1ULL << PAGE_SHIFT_X86) + +#ifndef __ASSEMBLY__ + +/* + * PTE format + */ + +#define _PAGE_BIT_PRESENT_X86 0 /* is present */ +#define _PAGE_W_BIT_X86 1 /* bit # of Writable */ +#define _PAGE_BIT_USER_X86 2 /* userspace addressable */ +#define _PAGE_BIT_PWT_X86 3 /* page write through */ +#define _PAGE_BIT_PCD_X86 4 /* page cache disabled */ +#define _PAGE_BIT_ACCESSED_X86 5 /* was accessed (raised by CPU) */ +#define _PAGE_BIT_DIRTY_X86 6 /* was written to (raised by CPU) */ +#define _PAGE_BIT_PSE_X86 7 /* 4 MB (or 2MB) page */ +#define _PAGE_BIT_PAT_X86 7 /* on 4KB pages */ +#define _PAGE_BIT_GLOBAL_X86 8 /* Global TLB entry */ +#define _PAGE_BIT_PAT_LARGE_X86 12 /* On 2MB or 1GB pages */ +#define _PAGE_PFN_SHIFT_X86 12 /* shift of PFN field */ +#define _PAGE_BIT_NX_X86 63 /* No execute: only valid after */ + /* cpuid check */ + +#define _PAGE_P_X86 (1ULL << _PAGE_BIT_PRESENT_X86) +#define _PAGE_W_X86 (1ULL << _PAGE_W_BIT_X86) +#define _PAGE_USER_X86 (1ULL << _PAGE_BIT_USER_X86) +#define _PAGE_PWT_X86 (1ULL << _PAGE_BIT_PWT_X86) +#define _PAGE_PCD_X86 (1ULL << _PAGE_BIT_PCD_X86) +#define _PAGE_A_X86 (1ULL << _PAGE_BIT_ACCESSED_X86) +#define _PAGE_D_X86 (1ULL << _PAGE_BIT_DIRTY_X86) +#define _PAGE_PSE_X86 (1ULL << _PAGE_BIT_PSE_X86) +#define _PAGE_PAT_X86 (1ULL << _PAGE_BIT_PAT_X86) +#define _PAGE_G_X86 (1ULL << _PAGE_BIT_GLOBAL_X86) +#define _PAGE_PAT_LARGE_X86 (1ULL << _PAGE_BIT_PAT_LARGE_X86) +#define _PAGE_NX_X86_32 (0ULL) /* has not such protection */ +#define _PAGE_NX_X86_PAE (1ULL << _PAGE_BIT_NX_X86) +#define _PAGE_NX_X86_64 (1ULL << _PAGE_BIT_NX_X86) + +#define _PAGE_PFN_X86_32 /* 0x0000_0000_ffff_f000 */ \ + ((((1ULL << MAX_PHYS_BITS_X86_32) - 1) >> \ + PAGE_SHIFT_X86) << \ + _PAGE_PFN_SHIFT_X86) +#define _PAGE_PFN_X86_64 /* 0x0000_ffff_ffff_f000 */ \ + ((((1ULL << MAX_PHYS_BITS_X86_64) - 1) >> \ + PAGE_SHIFT_X86) << \ + _PAGE_PFN_SHIFT_X86) + +/* Page table entries format */ +typedef u32 pt_element_x86_32_t; +typedef u64 pt_element_x86_64_t; + +#define X86_PTE_LEVEL_NUM 1 /* level number of native pte */ +#define X86_32_PGD_LEVEL_NUM 2 /* level number of pgd for 32 bits */ + /* physical & virtual addresses mode */ +#define X86_PAE_PGD_LEVEL_NUM 3 /* level number of pgd for 48 bits */ + /* physical & 32 bits virtual */ + /* addresses mode */ +#define X86_64_PGD_LEVEL_NUM 4 /* level number of pgd for 48 bits */ + /* physical & 48 bits virtual */ + /* addresses mode */ +#define X86_PDPE_LEVEL_NUM 3 /* pgd for PAE mode */ +#define X86_DIRECTORY_LEVEL_NUM 2 /* from this level starts direcrory */ + /* levels of PT */ + +#define MAX_HUGE_PAGES_LEVEL_X86_32 X86_32_PGD_LEVEL_NUM +#define MAX_HUGE_PAGES_LEVEL_X86_PAE (X86_PAE_PGD_LEVEL_NUM - 1) +#define MAX_HUGE_PAGES_LEVEL_X86_64 MAX_HUGE_PAGES_LEVEL + +/* one page table occupies on 4K page */ +#define PT_ENT_SHIFT_X86_32 2 /* 4 bytes, 2 bits */ +#define PT_ENT_SHIFT_X86_64 3 /* 8 bytes, 3 bits */ +#define PT_ENT_BITS_X86_32 (PAGE_SHIFT_X86 - PT_ENT_SHIFT_X86_32) +#define PT_ENT_BITS_X86_64 (PAGE_SHIFT_X86 - PT_ENT_SHIFT_X86_64) +#define PT_ENT_PER_PAGE_X86_32 (1 << PT_ENT_BITS_X86_32) +#define PT_ENT_PER_PAGE_X86_64 (1 << PT_ENT_BITS_X86_64) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_KVM_PGTABLE_X86_H */ diff --git a/arch/e2k/include/asm/kvm/pgtable.h b/arch/e2k/include/asm/kvm/pgtable.h new file mode 100644 index 000000000000..effd60c0347a --- /dev/null +++ b/arch/e2k/include/asm/kvm/pgtable.h @@ -0,0 +1,77 @@ +/* + * E2K page table operations. + * KVM virtualization support + * Copyright 2016 MCST, Salavat S. Gilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_PGTABLE_H +#define _E2K_KVM_PGTABLE_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables, while Linux assumes that + * there are three levels of page tables. + */ + +#include + +#include +#include + +#ifdef CONFIG_VIRTUALIZATION +#define HOST_VMALLOC_START NATIVE_VMALLOC_START + /* 0x0000 e400 0000 0000 */ +#define HOST_VMALLOC_END NATIVE_VMALLOC_END + /* 0x0000 e500 0000 0000 */ +#define HOST_VMEMMAP_START NATIVE_VMEMMAP_START + /* 0x0000 e600 0000 0000 */ +#define HOST_VMEMMAP_END NATIVE_VMEMMAP_END + /*<0x0000 e700 0000 0000 */ +#define GUEST_VMALLOC_START (SHADOW_KERNEL_IMAGE_AREA_BASE + \ + 0x008000000000UL) + /* 0x0000 2e80 0000 0000 */ +#define GUEST_VMALLOC_END (GUEST_VMALLOC_START + \ + 0x001000000000UL) + /* 0x0000 2e90 0000 0000 */ +#define GUEST_VMEMMAP_START (GUEST_VMALLOC_END + \ + 0x001000000000UL) + /* 0x0000 2ea0 0000 0000 */ +#define GUEST_VMEMMAP_END (GUEST_VMEMMAP_START + \ + (1UL << (E2K_MAX_PHYS_BITS - \ + PAGE_SHIFT)) * \ + sizeof(struct page)) + /*<0x0000 2f00 0000 0000 */ +#endif /* CONFIG_VIRTUALIZATION */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or native host with virtualization support */ +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#include +#else /* CONFIG_VIRTUALIZATION */ +/* it is native host with virtualization support */ +#define VMALLOC_START HOST_VMALLOC_START + /* 0x0000 e400 0000 0000 */ +#define VMALLOC_END HOST_VMALLOC_END + /* 0x0000 e500 0000 0000 */ +#define VMEMMAP_START HOST_VMEMMAP_START + /* 0x0000 e600 0000 0000 */ +#define VMEMMAP_END HOST_VMEMMAP_END + /*<0x0000 e700 0000 0000 */ +#endif /* ! CONFIG_VIRTUALIZATION */ +/* it is native kernel without any virtualization */ +/* or native host with virtualization support */ + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_PGTABLE_H */ diff --git a/arch/e2k/include/asm/kvm/process.h b/arch/e2k/include/asm/kvm/process.h new file mode 100644 index 000000000000..2c52152d6d43 --- /dev/null +++ b/arch/e2k/include/asm/kvm/process.h @@ -0,0 +1,495 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_PROCESS_H +#define _E2K_KVM_PROCESS_H + +#include + +#include +#include +#include +#include +#include +#include + +extern void kvm_clear_host_thread_info(thread_info_t *ti); +extern gthread_info_t *create_guest_start_thread_info(struct kvm_vcpu *vcpu); +extern int kvm_resume_vm_thread(void); + +extern int kvm_correct_guest_trap_return_ip(unsigned long return_ip); + +extern long return_pv_vcpu_syscall_fork(void); + +/* + * Is the CPU at guest Hardware Virtualized mode + * CORE_MODE.gmi is true only at guest HV mode + */ +static inline bool is_CPU_at_guest_hv_vm_mode(void) +{ + e2k_core_mode_t CORE_MODE; + + CORE_MODE.CORE_MODE_reg = native_read_CORE_MODE_reg_value(); + if (CORE_MODE.CORE_MODE_gmi) { + return true; + } + return false; +} +#ifdef CONFIG_KVM_HOST_MODE +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ +static inline bool host_is_at_HV_GM_mode(void) +{ + if (unlikely(!IS_HV_GM() && is_CPU_at_guest_hv_vm_mode())) + return true; + return false; +} +#endif /* CONFIG_KVM_HOST_MODE */ + +static __always_inline bool +is_guest_user_hardware_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (stack_base < GUEST_TASK_SIZE) { + return true; + } + return false; +} + +/* host kernel support virtualization and should update VCPU thread context */ +/* see arch/e2k/include/asm/process.h for more details why and how */ +static __always_inline void +kvm_host_update_vcpu_thread_context(struct task_struct **task, + struct thread_info **ti, struct pt_regs **regs, + struct gthread_info **gti, struct kvm_vcpu **vcpu) +{ + if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) + /* ot is not VCPU thread */ + return; + if ((ti != NULL) && (*ti == current_thread_info())) + /* thread is not changed, so need not updates */ + return; + if (ti != NULL) + *ti = current_thread_info(); + if (task != NULL) + *task = current; + if (regs != NULL) + *regs = current_thread_info()->pt_regs; + if (gti != NULL) + *gti = current_thread_info()->gthread_info; + if (vcpu != NULL) + *vcpu = current_thread_info()->vcpu; +} +#define KVM_HOST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) \ + kvm_host_update_vcpu_thread_context(__task, __ti, __regs, \ + __gti, __vcpu) +#define KVM_HOST_CHECK_VCPU_THREAD_CONTEXT(__ti) \ +do { \ + GTI_BUG_ON((__ti) != current_thread_info()); \ +} while (false) + +/* + * In some case local data stack cannot be expanded, + * here should be all cases for kvm guest. + * Guest kernel is user process of host and kernel threads are maintained by + * host, including all traps on guest kernel threads + */ +#define kvm_usd_cannot_be_expanded(regs) \ +({ \ + bool is; \ + \ + if (!test_thread_flag(TIF_VIRTUALIZED_GUEST) || \ + paravirt_enabled()) \ + /* Stack is not guest data stack */ \ + /* or it is guest and it cannot run own guest */ \ + is = false; \ + else if (user_stack_cannot_be_expanded() || \ + (regs->stacks.usd_lo.USD_lo_base >= GUEST_TASK_SIZE && \ + regs->stacks.usd_lo.USD_lo_base < HOST_TASK_SIZE)) \ + /* it is stack of guest kernel thread, kernel stacks */ \ + /* should not be expanded */ \ + is = true; \ + else { \ + /* it is not guest process or it is guest user */ \ + /* cannot be here */ \ + BUG_ON(true); \ + is = false; \ + } \ + is; \ +}) + +static inline void +kvm_clear_virt_thread_struct(thread_info_t *ti) +{ + if (likely(ti->vcpu == NULL)) { + /* it is not creation of host process */ + /* to support virtualization */ + return; + } + + /* + * Host VCPU thread can be only created by user process (for example + * by qemu) and only user process can clone the thread to handle + * some VCPU running exit reasons. + * But the new thread cannot be one more host VCPU thread, + * so clean up all about VCPU + */ + + /* VCPU thread should be only at host mode (handle exit reason), */ + /* not at running VCPU mode */ + KVM_BUG_ON(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)); + + ti->gthread_info = NULL; +} + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* host kernel with virtualization support */ + +#define UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ + KVM_HOST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + KVM_HOST_CHECK_VCPU_THREAD_CONTEXT(__ti) + +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#define KVM_GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) \ + E2K_GOTO_ARG1(return_to_paravirt_guest, ret_value) +#define KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) \ + DEF_COND_GOTO_ARG1(return_to_paravirt_guest, cond, ret_value) +#define KVM_GOTO_DONE_TO_PARAVIRT_GUEST() \ + E2K_GOTO(done_to_paravirt_guest) +#define KVM_COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) \ + DEF_COND_GOTO(done_to_paravirt_guest, cond) + +/* + * The function completes on host done to guest process after trap handling + */ +#define KVM_GET_PARAVIRT_GUEST_MODE(pv_guest, regs) \ +({ \ + bool pv_mode; \ + \ + pv_mode = test_thread_flag(TIF_PARAVIRT_GUEST); \ + /* trap can occur on light hypercall and handled as trap on user */ \ + /* but return will be on host kernel into light hypercall */ \ + /* In this case do not switch to guest shadow image */ \ + pv_mode &= from_host_user_mode((regs)->crs.cr1_lo); \ + \ + (pv_guest) = pv_mode; \ +}) + +/* + * Set global registers used by host to support virtualization + * Now only one (pair) register is used as pointer to VCPU state structure + */ +#ifndef CONFIG_USE_GD_TO_VCPU_ACCESS +#define SET_HOST_GREG(greg_no, value) NATIVE_SET_DGREG(greg_no, value) +#define GET_HOST_GREG(greg_no) NATIVE_GET_UNTEGGED_DGREG(greg_no) +#else /* CONFIG_USE_GD_TO_VCPU_ACCESS */ + #error "Global pointer to VCPU state can not be loadded to GD register" +#endif /* ! CONFIG_USE_GD_TO_VCPU_ACCESS */ + +extern noinline notrace __interrupt +void go2guest(long fn, bool priv_guest); + +#ifdef CONFIG_KVM_HOST_MODE +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ + +#define GET_GUEST_VCPU_STATE_POINTER(__vcpu) \ +({ \ + e2k_addr_t vs = (e2k_addr_t)((__vcpu)->arch.vcpu_state); \ + \ + vs = kvm_vcpu_hva_to_gpa(__vcpu, vs); \ + if (is_paging(__vcpu)) \ + vs = (e2k_addr_t)__guest_va(vs); \ + vs; \ +}) + +#define INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu) \ +({ \ + e2k_addr_t vs = GET_GUEST_VCPU_STATE_POINTER(__vcpu); \ + \ + HOST_ONLY_COPY_TO_VCPU_STATE_GREG(&(__ti)->k_gregs, vs); \ +}) + +#define INIT_HOST_GREGS_COPY(__ti, __vcpu) \ +({ \ + /* Zeroing global registers used by kernel */ \ + CLEAR_KERNEL_GREGS_COPY(__ti); \ + /* Set pointer to VCPU state to enable interface with guest */ \ + INIT_HOST_VCPU_STATE_GREG_COPY(__ti, vcpu); \ +}) + +static __always_inline void +host_exit_to_usermode_loop(struct pt_regs *regs, bool syscall, bool has_signal) +{ + KVM_BUG_ON(!host_test_intc_emul_mode(regs)); + + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); + + /* Check for rescheduling first */ + if (need_resched()) { + schedule(); + } + + if (has_signal) { + /* + * This is guest VCPU interception emulation, but + * there is (are) pending signal for host VCPU mode, + * so it need switch to host VCPU mode to handle + * signal and probably to kill VM + */ + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); + pv_vcpu_switch_to_host_from_intc(current_thread_info()); + } else if (likely(guest_trap_pending(current_thread_info()))) { + /* + * This is guest VCPU interception emulation and + * there is (are) the guest trap(s) to handle + */ + insert_pv_vcpu_traps(current_thread_info(), regs); + } else { + /* + * This is just a return from VCPU interception + * emulation mode to the continue execution + * of the guest paravirtualized VCPU. + * In such case: + * - the currents point to the host qemu-VCPU + * process structures; + * - the regs points to the host guest-VCPU + * process structure. + * So nothing works based on these non-interconnected + * structures cannot be running + */ + } + + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); +} + +#ifdef CONFIG_SMP +#define SAVE_GUEST_KERNEL_GREGS_COPY_TO(__k_gregs, __g_gregs, \ + only_kernel) \ +({ \ + kernel_gregs_t *kg = (__k_gregs); \ + kernel_gregs_t *gg = (__g_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__); \ + HOST_ONLY_COPY_TO_VCPU_STATE_GREG(gg, vs__); \ + } \ + ONLY_COPY_FROM_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \ + ONLY_COPY_TO_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \ +}) +#else /* ! CONFIG_SMP */ +#define SAVE_GUEST_KERNEL_GREGS_COPY_TO(__k_gregs, __g_gregs, \ + only_kernel) \ +({ \ + kernel_gregs_t *kg = (__k_gregs); \ + kernel_gregs_t *gg = (__g_gregs); \ + unsigned long task__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__); \ + HOST_ONLY_COPY_TO_VCPU_STATE_GREG(gg, vs__); \ + } \ + ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(kg, task__); \ + ONLY_COPY_TO_KERNEL_CURRENT_GREGS(gg, task__); \ +}) +#endif /* CONFIG_SMP */ + +#define SAVE_GUEST_KERNEL_GREGS_COPY(__ti, __gti) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs_light; \ + kernel_gregs_t *g_gregs = &(__gti)->gk_gregs; \ + \ + SAVE_GUEST_KERNEL_GREGS_COPY_TO(k_gregs, g_gregs, false); \ +}) + +#ifdef CONFIG_SMP +#define RESTORE_GUEST_KERNEL_GREGS_COPY_FROM(__k_gregs, __g_gregs, \ + only_kernel) \ +({ \ + kernel_gregs_t *kg = (__k_gregs); \ + kernel_gregs_t *gg = (__g_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(k_gregs, vs__); \ + HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__); \ + } \ + ONLY_COPY_FROM_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \ + ONLY_COPY_TO_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \ +}) +#else /* ! CONFIG_SMP */ +#define RESTORE_GUEST_KERNEL_GREGS_COPY_FROM(__k_gregs, __g_gregs, \ + only_kernel) \ +({ \ + kernel_gregs_t *kg = (__k_gregs); \ + kernel_gregs_t *gg = (__g_gregs); \ + unsigned long task__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(k_gregs, vs__); \ + HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__); \ + } \ + ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(gg, task__); \ + ONLY_COPY_TO_KERNEL_CURRENT_GREGS(kg, task__); \ +}) +#endif /* CONFIG_SMP */ + +#define RESTORE_GUEST_KERNEL_GREGS_COPY(__ti, __gti, __vcpu) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \ + kernel_gregs_t *g_gregs = &(__gti)->gu_gregs; \ + \ + RESTORE_GUEST_KERNEL_GREGS_COPY_FROM(k_gregs, g_gregs, true); \ + INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu); \ +}) + +#define printk printk_fixed_args +#define __trace_bprintk __trace_bprintk_fixed_args +#define panic panic_fixed_args + +/* + * The function completes on host switch to new user process (sys_execve()) + * of guest kernel. + */ +static __always_inline __interrupt void +kvm_complete_switch_to_user_func(void) +{ + thread_info_t *ti; + gthread_info_t *gti; + bool from_virt_guest; + bool from_pv_guest; + + /* current thread info/task pointer global registers were cleared */ + /* while all global registers were set to emty state */ + ti = NATIVE_READ_CURRENT_REG(); + gti = ti->gthread_info; + from_virt_guest = test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST); + from_pv_guest = test_ti_thread_flag(ti, TIF_PARAVIRT_GUEST); + + /* the function should switch interrupt control from UPSR to */ + /* PSR and set initial state of user UPSR */ + if (!from_virt_guest) { + NATIVE_SET_USER_INITIAL_UPSR(E2K_USER_INITIAL_UPSR); + } else { + KVM_SET_GUEST_USER_INITIAL_UPSR(ti); + } + + if (unlikely(from_virt_guest)) { + /* structure gregs into guest thread info structure will */ + /* contain user global registers from now */ + gti->gregs_active = 1; + gti->gregs_valid = 0; + gti->gregs_for_currents_valid = 0; + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(from_pv_guest, 0); + } +} + +/* + * The function completes return to guest user signal handler + */ +static __always_inline __interrupt void +kvm_complete_go2user(thread_info_t *ti, long fn) +{ + bool is_pv_guest; /* entry point fn is paravirtualized guest */ + /* kernel function */ + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) { + /* it is not guest process return to */ + /* but the function should restore user UPSR state */ + NATIVE_WRITE_UPSR_REG(ti->upsr); + return; + } + if ((e2k_addr_t)fn < GUEST_TASK_SIZE && + (ti->vcpu == NULL || is_paging(ti->vcpu))) { + /* it is guest user process return to */ + /* clear host global registers used for virtualization + CLEAR_HOST_GREGS(); + */ + /* the function should restore guest user UPSR state */ + KVM_RESTORE_GUEST_USER_UPSR(ti); + } else { + /* it is guest kernel process return to + ONLY_SET_HOST_GREGS(ti->vcpu, ti->vcpu->arch.vcpu_state); + */ + /* the function should restore guest kernel UPSR state */ + KVM_RESTORE_GUEST_KERNEL_UPSR(ti); + } + + is_pv_guest = ((e2k_addr_t)fn >= HOST_TASK_SIZE); + + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(is_pv_guest, 0); +} + +#undef printk +#undef __trace_bprintk +#undef panic +#else /* ! CONFIG_KVM_HOST_MODE */ +/* it is native kernel without any virtualization or */ +/* pure guest kernel */ + +#define INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu) +#define INIT_HOST_GREGS_COPY(__ti, __vcpu) + +#endif /* CONFIG_KVM_HOST_MODE */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#else /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ +#define usd_cannot_be_expanded(regs) kvm_usd_cannot_be_expanded(regs) +#define clear_vm_thread_flags() \ +({ \ + /* \ + * If it is new process with new virtual space forked by QEMU \ + * after VM creation and created process execve() other code, \ + * then it cannot inherit VM features \ + */ \ + clear_thread_flag(TIF_VM_CREATED); \ +}) + +#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) \ + KVM_GET_PARAVIRT_GUEST_MODE(pv_guest, regs) + +static inline void +clear_virt_thread_struct(thread_info_t *thread_info) +{ + kvm_clear_virt_thread_struct(thread_info); +} + +static __always_inline __interrupt void +complete_switch_to_user_func(void) +{ + kvm_complete_switch_to_user_func(); +} +static __always_inline __interrupt void +complete_go2user(thread_info_t *ti, long fn) +{ + kvm_complete_go2user(ti, fn); +} +static inline void free_virt_task_struct(struct task_struct *task) +{ + /* nothing to free */ +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_PROCESS_H */ diff --git a/arch/e2k/include/asm/kvm/ptrace.h b/arch/e2k/include/asm/kvm/ptrace.h new file mode 100644 index 000000000000..6471832b9edb --- /dev/null +++ b/arch/e2k/include/asm/kvm/ptrace.h @@ -0,0 +1,654 @@ +#ifndef _E2K_KVM_PTRACE_H +#define _E2K_KVM_PTRACE_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#endif /* __ASSEMBLY__ */ + +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_USE_AAU +#include +#endif /* CONFIG_USE_AAU */ +#include +#include + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +typedef enum inject_caller { + FROM_HOST_INJECT = 1 << 0, + FROM_PV_VCPU_TRAP_INJECT = 1 << 1, + FROM_PV_VCPU_SYSCALL_INJECT = 1 << 2, +} inject_caller_t; + +#ifdef CONFIG_VIRTUALIZATION + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ +#define BOOT_TASK_SIZE (BOOT_HOST_TASK_SIZE) +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel */ +#include +/* #define TASK_SIZE (GUEST_TASK_SIZE) */ +/* #define BOOT_TASK_SIZE (BOOT_GUEST_TASK_SIZE) */ +#else /* CONFIG_PARAVIRT_GUEST */ +/* it is paravirtualized host and guest kernel */ +#include +/* #define TASK_SIZE (PARAVIRT_TASK_SIZE) */ +/* #define BOOT_TASK_SIZE (BOOT_PARAVIRT_TASK_SIZE) */ +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +#endif /* CONFIG_VIRTUALIZATION */ + +#ifdef __KERNEL__ + +/* some global registers are used to support virtualization mode */ +/* (see usage and real numbers at asm/glob_regs.h) */ +typedef struct host_gregs { + struct e2k_greg g[HOST_KERNEL_GREGS_PAIRS_SIZE]; +} host_gregs_t; + +/* + * We could check CR.pm and TIR.ip here, but that is not needed + * because whenever CR.pm = 1 or TIR.ip < TASK_SIZE, SBR points + * to user space. So checking SBR alone is enough. + * + * Testing SBR is necessary because of HW bug #59886 - the 'ct' command + * (return to user) may be interrupted with closed interrupts. + * The result - kernel's ip, psr.pm=1, but SBR points to user space. + * This case should be detected as user mode. + * + * Checking via SBR is also useful for detecting fast system calls as + * user mode. + */ +#define is_user_mode(regs, __USER_SPACE_TOP__) \ + ((regs)->stacks.top < (__USER_SPACE_TOP__)) +#define is_kernel_mode(regs, __KERNEL_SPACE_BOTTOM__) \ + ((regs)->stacks.top >= (__KERNEL_SPACE_BOTTOM__)) + +#define from_kernel_mode(cr1_lo) ((cr1_lo).CR1_lo_pm) +#define from_user_mode(cr1_lo) (!((cr1_lo).CR1_lo_pm)) + +#define is_from_user_IP(cr0_hi, __USER_SPACE_TOP__) \ +({ \ + unsigned long IP; \ + bool ret; \ + IP = (cr0_hi).CR0_hi_IP; \ + ret = (IP < (__USER_SPACE_TOP__)); \ + ret; \ +}) +#define is_from_kernel_IP(cr0_hi, __KERNEL_SPACE_BOTTOM__) \ +({ \ + unsigned long IP; \ + bool ret; \ + IP = (cr0_hi).CR0_hi_IP; \ + ret = (IP >= (__KERNEL_SPACE_BOTTOM__)); \ + ret; \ +}) + +#define from_user_IP(cr0_hi) is_from_user_IP(cr0_hi, TASK_SIZE) +#define from_kernel_IP(cr0_hi) is_from_kernel_IP(cr0_hi, TASK_SIZE) + +#define is_trap_from_user(regs, __USER_SPACE_TOP__) \ +({ \ + e2k_tir_lo_t tir_lo; \ + tir_lo.TIR_lo_reg = (regs)->TIR_lo; \ + tir_lo.TIR_lo_ip < (__USER_SPACE_TOP__); \ +}) +#define is_trap_from_kernel(regs, __KERNEL_SPACE_BOTTOM__) \ +({ \ + e2k_tir_lo_t tir_lo; \ + tir_lo.TIR_lo_reg = (regs)->TIR_lo; \ + tir_lo.TIR_lo_ip >= (__KERNEL_SPACE_BOTTOM__); \ +}) + +#if !defined(CONFIG_KVM_GUEST_KERNEL) && !defined(CONFIG_PARAVIRT_GUEST) +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +static inline void atomic_load_osgd_to_gd(void) +{ + native_atomic_load_osgd_to_gd(); +} + +#define SAVE_DAM(__dam) NATIVE_SAVE_DAM(__dam) +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ + +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +#include +#else + #error "Undefined type of virtualization" +#endif /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ + +#if defined(CONFIG_VIRTUALIZATION) +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel (not paravirtualized based on pv_ops) */ +#define guest_task_mode(task) \ + test_ti_thread_flag(task_thread_info(task), \ + TIF_VIRTUALIZED_GUEST) +#define native_user_mode(regs) is_user_mode(regs, NATIVE_TASK_SIZE) +#define guest_user_mode(regs) is_user_mode(regs, GUEST_TASK_SIZE) +#define native_kernel_mode(regs) is_kernel_mode(regs, NATIVE_TASK_SIZE) +#define guest_kernel_mode(regs) \ + (is_kernel_mode(regs, GUEST_TASK_SIZE) && \ + !native_kernel_mode(regs)) + +#define from_host_user_IP(cr0_hi) \ + is_from_user_IP(cr0_hi, NATIVE_TASK_SIZE) +#define from_host_kernel_IP(cr0_hi) \ + is_from_kernel_IP(cr0_hi, NATIVE_TASK_SIZE) +#define from_guest_user_IP(cr0_hi) \ + is_from_user_IP(cr0_hi, GUEST_TASK_SIZE) +#define from_guest_kernel_IP(cr0_hi) \ + (is_from_kernel_IP(cr0_hi, GUEST_TASK_SIZE) && \ + !from_host_kernel_IP(cr0_hi)) + +#define from_host_user_mode(cr1_lo) from_user_mode(cr1_lo) +#define from_host_kernel_mode(cr1_lo) from_kernel_mode(cr1_lo) +/* guest user is user of guest kernel, so USER MODE (pm = 0) */ +#define from_guest_user_mode(cr1_lo) from_user_mode(cr1_lo) + +#define is_call_from_user(cr0_hi, cr1_lo, __HOST__) \ + ((__HOST__) ? \ + is_call_from_host_user(cr0_hi, cr1_lo) : \ + is_call_from_guest_user(cr0_hi, cr1_lo)) +#define is_call_from_kernel(cr0_hi, cr1_lo, __HOST__) \ + ((__HOST__) ? \ + is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + is_call_from_guest_kernel(cr0_hi, cr1_lo)) + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +#define user_mode(regs) \ + ((regs) ? is_user_mode(regs, TASK_SIZE) : false) +#define kernel_mode(regs) \ + ((regs) ? is_kernel_mode(regs, TASK_SIZE) : true) + +#ifdef CONFIG_KVM_HW_VIRTUALIZATION +/* guest kernel can be: */ +/* user of host kernel, so USER MODE (pm = 0) */ +/* hardware virtualized guest kernel, so KERNEL MODE (pm = 1) */ +#define from_guest_kernel_mode(cr1_lo) \ + (from_kernel_mode(cr1_lo) || from_user_mode(cr1_lo)) +#define from_guest_kernel(cr0_hi, cr1_lo) \ + (from_guest_kernel_mode(cr1_lo) && \ + from_guest_kernel_IP(cr0_hi)) +#else /* ! CONFIG_KVM_HW_VIRTUALIZATION */ +/* guest kernel is user of host kernel, so USER MODE (pm = 0) */ +#define from_guest_kernel_mode(cr1_lo) \ + from_user_mode(cr1_lo) +#define from_guest_kernel(cr0_hi, cr1_lo) \ + (from_guest_kernel_mode(cr1_lo) && \ + from_guest_kernel_IP(cr0_hi)) +#endif /* CONFIG_KVM_HW_VIRTUALIZATION */ + +#define is_trap_from_host_kernel(regs) \ + is_trap_from_kernel(regs, NATIVE_TASK_SIZE) + +#define is_call_from_host_user(cr0_hi, cr1_lo) \ + (from_host_user_IP(cr0_hi) && from_host_user_mode(cr1_lo)) +#define is_call_from_host_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_user(cr0_hi, cr1_lo) : \ + from_host_user_mode(cr1_lo)) +#define is_call_from_guest_user(cr0_hi, cr1_lo) \ + (from_guest_user_IP(cr0_hi) && from_guest_user_mode(cr1_lo)) +#define is_call_from_guest_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_user(cr0_hi, cr1_lo) : \ + from_guest_user_mode(cr1_lo)) +#define is_call_from_host_kernel(cr0_hi, cr1_lo) \ + (from_host_kernel_IP(cr0_hi) && from_host_kernel_mode(cr1_lo)) +#define is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + from_host_kernel_mode(cr1_lo)) +#define is_call_from_guest_kernel(cr0_hi, cr1_lo) \ + from_guest_kernel(cr0_hi, cr1_lo) +#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_kernel(cr0_hi, cr1_lo) : \ + from_guest_kernel_mode(cr1_lo)) + +#define is_trap_on_user(regs, __HOST__) \ + ((__HOST__) ? \ + trap_from_host_user(regs) : \ + trap_from_guest_user(regs)) +#define is_trap_on_kernel(regs, __HOST__) \ + ((__HOST__) ? \ + trap_from_host_kernel(regs) : \ + (trap_from_guest_kernel(regs) || \ + is_trap_from_host_kernel(regs))) + +#define ON_HOST_KERNEL() (NATIVE_NV_READ_PSR_REG_VALUE() & PSR_PM) + +#define __trap_from_user(regs) \ + is_trap_on_user(regs, ON_HOST_KERNEL()) +#define __trap_from_kernel(regs) \ + is_trap_on_kernel(regs, ON_HOST_KERNEL()) +#define trap_on_user(regs) __trap_from_user(regs) +#define trap_on_kernel(regs) __trap_from_kernel(regs) + +#define call_from_user_mode(cr0_hi, cr1_lo) \ + is_call_from_user(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_kernel_mode(cr0_hi, cr1_lo) \ + is_call_from_kernel(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_user(regs) \ + call_from_user_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) +#define call_from_kernel(regs) \ + call_from_kernel_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) + +#define __trap_from_host_user(regs) native_user_mode(regs) +#define __trap_from_host_kernel(regs) native_kernel_mode(regs) +#define __trap_from_guest_user(regs) guest_user_mode(regs) +#define __trap_from_guest_kernel(regs) guest_kernel_mode(regs) + +#define __call_from_kernel(regs) call_from_kernel(regs) +#define __call_from_user(regs) call_from_user(regs) + +#define trap_on_guest_kernel_mode(regs) \ + from_guest_kernel_mode((regs)->crs.cr1_lo) +#define trap_on_guest_kernel_IP(regs) \ + (from_guest_kernel_IP((regs)->crs.cr0_hi) && \ + !from_host_kernel_IP((regs)->crs.cr0_hi)) +#define host_trap_guest_user_mode(regs) \ + (from_guest_user_mode((regs)->crs.cr1_lo) && \ + __trap_from_guest_user(regs)) +#define host_trap_guest_kernel_mode(regs) \ + (from_guest_kernel((regs)->crs.cr0_hi, \ + (regs)->crs.cr1_lo) && \ + __trap_from_guest_kernel(regs)) +#define guest_trap_user_mode(regs) \ + (from_guest_kernel((regs)->crs.cr0_hi, \ + (regs)->crs.cr1_lo) && \ + __trap_from_guest_user(regs)) +#define guest_trap_kernel_mode(regs) \ + (from_guest_kernel((regs)->crs.cr0_hi, \ + (regs)->crs.cr1_lo) && \ + __trap_from_guest_kernel(regs)) + +#define trap_from_host_kernel_mode(regs) \ + from_host_kernel_mode((regs)->crs.cr1_lo) +#define trap_from_host_kernel_IP(regs) \ + from_host_kernel_IP((regs)->crs.cr0_hi) +#define trap_from_host_kernel(regs) \ + (trap_from_host_kernel_mode(regs) && \ + __trap_from_host_kernel(regs)) +#define trap_from_host_user(regs) \ + (from_host_user_mode((regs)->crs.cr1_lo) && \ + __trap_from_host_user(regs)) +/* macros to detect guest kernel traps on guest and on host */ +/* trap only on guest kernel */ +#define trap_from_guest_kernel(regs) \ + (from_guest_kernel_mode((regs)->crs.cr1_lo) && \ + __trap_from_guest_kernel(regs)) +/* macros to detect guest traps on host, guest has not own guest, so */ +/* macros should always return 'false' for guest */ +/* trap occurred on guest user only */ +#define trap_from_guest_user(regs) \ +({ \ + bool is; \ + \ + if (paravirt_enabled() || \ + !test_thread_flag(TIF_VIRTUALIZED_GUEST)) \ + /* It is guest and it cannot run own guest */ \ + /* or trap is not on guest process */ \ + is = false; \ + else if (host_trap_guest_user_mode(regs)) \ + is = true; \ + else \ + is = false; \ + is; \ +}) +/* macroses to detect guest traps on host, guest has not own guest, so */ +/* macroses should always return 'false' for guest */ +/* trap occurred on guest process (guest user or guest kernel or on host */ +/* while running guest process (guest VCPU thread) */ +#define trap_on_guest(regs) \ + (!paravirt_enabled() && \ + test_thread_flag(TIF_VIRTUALIZED_GUEST)) +#define trap_on_pv_hv_guest(vcpu, regs) \ + ((vcpu) != NULL && \ + !((vcpu)->arch.is_hv) && trap_on_guest(regs)) +/* guest trap occurred on guest user or kernel */ +#define guest_trap_on_host(regs) \ + (trap_on_guest(regs) && user_mode(regs)) +#define guest_trap_on_pv_hv_host(vcpu, regs) \ + (trap_on_pv_hv_guest(vcpu, regs) && user_mode(regs)) +/* trap occurred on guest kernel or user, but in host mode */ +/* and the trap can be due to guest or not */ +#define host_trap_on_guest(regs) \ + (guest_trap_on_host(regs) && \ + trap_from_host_kernel_mode(regs) && \ + trap_from_host_kernel_IP(regs)) +/* guest trap occurred on guest user or kernel or on host but due to guest */ +/* for example guest kernel address in hypercalls */ +#define due_to_guest_trap_on_host(regs) \ + (trap_on_guest(regs) && \ + (user_mode(regs) || \ + LIGHT_HYPERCALL_MODE(regs) || \ + GENERIC_HYPERCALL_MODE())) +#define due_to_guest_trap_on_pv_hv_host(vcpu, regs) \ + (trap_on_pv_hv_guest(vcpu, regs) && \ + (user_mode(regs) || \ + LIGHT_HYPERCALL_MODE(regs) || \ + GENERIC_HYPERCALL_MODE())) +/* page fault is from intercept */ +#define due_to_intc_page_fault(vcpu, regs) \ + ((vcpu) != NULL && \ + (vcpu)->arch.is_hv && \ + (regs)->trap->is_intc) +/* trap occurred on guest user only */ +#define guest_user_trap_on_host(regs) \ + (trap_on_guest(regs) && guest_trap_user_mode(regs)) +/* trap occurred on guest kernel only */ +#define guest_kernel_trap_on_host(regs) \ + (trap_on_guest(regs) && guest_trap_kernel_mode(regs)) + +/* macros to detect guest traps on guest and on host */ +/* trap on guest user, kernel or on host kernel due to guest */ +#define __guest_trap(regs) \ + (paravirt_enabled() || \ + test_thread_flag(TIF_VIRTUALIZED_GUEST)) + +#define addr_from_guest_user(addr) ((addr) < GUEST_TASK_SIZE) +#define addr_from_guest_kernel(addr) \ + ((addr) >= GUEST_TASK_SIZE && (addr) < HOST_TASK_SIZE) + +#define guest_user_addr_mode_page_fault(regs, instr_page, addr) \ + ((instr_page) ? guest_user_mode(regs) : \ + guest_user_mode(regs) || \ + (addr_from_guest_user(addr) && \ + (!trap_from_host_kernel(regs) || \ + LIGHT_HYPERCALL_MODE(regs) || \ + GENERIC_HYPERCALL_MODE()))) +/* macros to detect guest user address on host, */ +/* guest has not own guest, so macros should always return 'false' for guest */ +/* faulted address is from guest user space */ +#define guest_mode_page_fault(regs, instr_page, addr) \ + (trap_on_guest(regs) && \ + guest_user_addr_mode_page_fault(regs, \ + instr_page, addr)) +/* macros to detect instruction page fault on guest kernel access */ +/* such traps should be handled by host because of guest kernel */ +/* is user of host */ +#define guest_kernel_instr_page_fault(regs) \ + (trap_on_guest(regs) && \ + guest_trap_kernel_mode(regs) && \ + trap_on_guest_kernel_IP(regs)) +/* macros to detect instruction page fault on guest user access */ +/* such traps should be handled by guest kernel */ +#define guest_user_instr_page_fault(regs) \ + (trap_on_guest(regs) && guest_user_mode(regs)) + +static inline e2k_addr_t +check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + if (likely(address < TASK_SIZE)) + return 0; + if (!paravirt_enabled()) { + pr_err("Address 0x%016lx is host kernel address\n", + address); + return -1; + } else if (address < NATIVE_TASK_SIZE) { + pr_err("Address 0x%016lx is guest kernel address\n", + address); + return -1; + } else { + pr_err("Address 0x%016lx is host kernel address\n", + address); + return -1; + } +} +#define IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + (test_ti_thread_flag(task_thread_info(tsk), \ + TIF_VIRTUALIZED_GUEST) && \ + IS_GUEST_USER_ADDRESS(address)) +#define IS_GUEST_ADDRESS_TO_HOST(address) \ + (paravirt_enabled() && !IS_HV_GM() && \ + IS_HOST_KERNEL_ADDRESS(address)) + +#ifdef CONFIG_KVM_GUEST_HW_PV +/* FIXME Instead of ifdef, this should check for is_pv */ +#define print_host_user_address_ptes(mm, address) \ + native_print_host_user_address_ptes(mm, address) +#else +/* guest page table is pseudo PT and only host PT is used */ +/* to translate any guest addresses */ +#define print_host_user_address_ptes(mm, address) \ +({ \ + /* function is actual only for guest kernel */ \ + if (paravirt_enabled()) \ + HYPERVISOR_print_guest_user_address_ptes((mm)->gmmid_nr, \ + address); \ +}) +#endif /* CONFIG_KVM_GUEST_HW_PV */ +#else /* CONFIG_KVM_GUEST_KERNEL */ +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#else /* ! CONFIG_VIRTUALIZATION */ +/* it is native kernel without any virtualization */ + +#define guest_task_mode(task) false /* only native tasks */ + +#define user_mode(regs) is_user_mode(regs, TASK_SIZE) +#define kernel_mode(regs) is_kernel_mode(regs, TASK_SIZE) + +#define is_call_from_host_user(cr0_hi, cr1_lo) \ + (from_user_IP(cr0_hi) && from_user_mode(cr1_lo)) +#define is_call_from_host_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_user(cr0_hi, cr1_lo) : \ + from_user_mode(cr1_lo)) +#define is_call_from_guest_user(cr0_hi, cr1_lo) false +#define is_call_from_guest_user_IP(cr0_hi, cr1_lo, ignoreIP) false +#define is_call_from_host_kernel(cr0_hi, cr1_lo) \ + (from_kernel_IP(cr0_hi) && from_kernel_mode(cr1_lo)) +#define is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + from_kernel_mode(cr1_lo)) +#define is_call_from_guest_kernel(cr0_hi, cr1_lo) false +#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) false + +#define is_call_from_user(cr0_hi, cr1_lo, __HOST__) \ + is_call_from_host_user(cr0_hi, cr1_lo) +#define is_call_from_kernel(cr0_hi, cr1_lo, __HOST__) \ + is_call_from_host_kernel(cr0_hi, cr1_lo) + +#define __trap_from_user(regs) is_trap_from_user(regs, TASK_SIZE) +#define __trap_from_kernel(regs) is_trap_from_kernel(regs, TASK_SIZE) +#define trap_on_user(regs) user_mode(regs) +#define trap_on_kernel(regs) kernel_mode(regs) + +/* macroses to detect guest traps on host */ +/* Virtualization is off, so nothing guests exist, */ +/* so macroses should always return 'false' */ +#define trap_on_guest(regs) false +/* trap occurred on guest user or kernel */ +#define guest_trap_on_host(regs) \ + false /* guest is not supported */ +/* trap occurred on guest kernel or user, but in host mode */ +/* and the trap can be due to guest or not */ +#define host_trap_on_guest(regs) \ + false /* guest is not supported */ +/* trap occurred on guest user or kernel or on host but due to guest */ +#define due_to_guest_trap_on_host(regs) \ + false /* guest is not supported */ +/* page fault is from intercept */ +#define due_to_intc_page_fault(vcpu, regs) \ + false /* guest is not supported */ +/* trap occurred on guest user only */ +#define guest_user_trap_on_host(regs) \ + false /* guest is not supported */ +/* trap occurred on guest kernel only */ +#define guest_kernel_trap_on_host(regs) \ + false /* guest is not supported */ + +/* macros to detect guest traps on guest and on host */ +/* trap on guest user, kernel or on host kernel due to guest */ +#define __guest_trap(regs) \ + false /* guest is not supported */ +/* macros to detect guest kernel traps on guest and on host */ +/* trap only on guest kernel */ +#define trap_from_guest_kernel(regs) \ + false /* guest is not supported */ + +#define __call_from_kernel(regs) from_kernel_mode((regs)->crs.cr1_lo) +#define __call_from_user(regs) from_user_mode((regs)->crs.cr1_lo) + +#define ON_HOST_KERNEL() true +#define call_from_user_mode(cr0_hi, cr1_lo) \ + is_call_from_user(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_kernel_mode(cr0_hi, cr1_lo) \ + is_call_from_kernel(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_user(regs) \ + call_from_user_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) +#define call_from_kernel(regs) \ + call_from_kernel_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) + +static inline e2k_addr_t +check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + return native_check_is_user_address(task, address); +} +#define IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + NATIVE_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) +#define IS_GUEST_ADDRESS_TO_HOST(address) \ + NATIVE_IS_GUEST_ADDRESS_TO_HOST(address) +#define print_host_user_address_ptes(mm, address) \ + native_print_host_user_address_ptes(mm, address) + +#define guest_mode_page_fault(regs, instr_page, addr) false + +#endif /* CONFIG_VIRTUALIZATION */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without virtualization support */ +#define LIGHT_HYPERCALL_MODE(regs) 0 /* hypercalls not supported */ +#define TI_GENERIC_HYPERCALL_MODE(thread_info) 0 /* hypercalls not supported */ +#define GENERIC_HYPERCALL_MODE() 0 /* hypercalls not supported */ +#define IN_LIGHT_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_GENERIC_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_HYPERCALL() 0 /* hypercalls not supported */ +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized) */ +#include +#elif defined(CONFIG_VIRTUALIZATION) || defined(CONFIG_PARAVIRT_GUEST) +/* It is native host kernel with virtualization support on */ +/* or it is paravirtualized host and guest kernel */ + +#define LIGHT_HYPERCALL_MODE(pt_regs) \ +({ \ + pt_regs_t *__regs = (pt_regs); \ + bool is_ligh_hypercall; \ + \ + is_ligh_hypercall = __regs->flags.light_hypercall; \ + is_ligh_hypercall; \ +}) +#define TI_LIGHT_HYPERCALL_MODE(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + test_ti_thread_flag(__ti, TIF_LIGHT_HYPERCALL); \ +}) +#define IN_LIGHT_HYPERCALL() TI_LIGHT_HYPERCALL_MODE(current_thread_info()) +#define TI_GENERIC_HYPERCALL_MODE(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + test_ti_thread_flag(__ti, TIF_GENERIC_HYPERCALL); \ +}) +#define GENERIC_HYPERCALL_MODE() \ + TI_GENERIC_HYPERCALL_MODE(current_thread_info()) +#define IN_GENERIC_HYPERCALL() GENERIC_HYPERCALL_MODE() +#define IN_HYPERCALL() \ + (IN_LIGHT_HYPERCALL() || IN_GENERIC_HYPERCALL()) +#else /* ! CONFIG_VIRTUALIZATION && ! CONFIG_PARAVIRT_GUEST */ + #error "Unknown virtualization type" +#endif /* ! CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_KVM_HOST_MODE +/* It is native host kernel with virtualization support on */ + +/* + * Additional context for paravirtualized guest to save/restore at + * 'signal_stack_context' structure to handle traps/syscalls by guest + */ + +typedef struct pv_vcpu_ctxt { + inject_caller_t inject_from; /* reason of injection */ + int trap_no; /* number of recursive trap */ + u64 sys_rval; /* return value of guest system call */ + e2k_psr_t guest_psr; /* guest PSR state before trap */ + bool irq_under_upsr; /* is IRQ control under UOSR? */ + bool in_sig_handler; /* signal handler in progress */ + unsigned long sigreturn_entry; /* guest signal return start IP */ +} pv_vcpu_ctxt_t; + +#else /* !CONFIG_KVM_HOST_MODE */ +/* it is native kernel without any virtualization */ +/* or pure guest kernel (not paravirtualized) */ + +typedef struct pv_vcpu_ctxt { + /* empty structure */ +} pv_vcpu_ctxt_t; + +#endif /* CONFIG_KVM_HOST_MODE */ + +#ifdef CONFIG_VIRTUALIZATION + +static inline struct pt_regs *find_guest_user_regs(struct pt_regs *regs) +{ + struct pt_regs *guser_regs = regs; + do { + if (guest_user_mode(guser_regs)) + break; + if (guser_regs->next != NULL && + guser_regs->next <= guser_regs) { + /* pt_regs allocated only at the stack, stack grows */ + /* down, so next structure can be only above current */ + pr_err("%s(): invalid list of pt_regs structures: " + "next regs %px below current %px\n", + __func__, guser_regs->next, guser_regs); + WARN_ON(true); + guser_regs = NULL; + break; + } + guser_regs = guser_regs->next; + } while (guser_regs); + + return guser_regs; +} +#else /* ! CONFIG_VIRTUALIZATION */ +static inline struct pt_regs *find_guest_user_regs(struct pt_regs *regs) +{ + return NULL; +} +#endif /* CONFIG_VIRTUALIZATION */ + + +#if defined(CONFIG_SMP) +extern unsigned long profile_pc(struct pt_regs *regs); +#else +#define profile_pc(regs) instruction_pointer(regs) +#endif +extern void show_regs(struct pt_regs *); +extern int syscall_trace_entry(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); + +#endif /* __KERNEL__ */ +#endif /* _E2K_KVM_PTRACE_H */ + diff --git a/arch/e2k/include/asm/kvm/pv-emul.h b/arch/e2k/include/asm/kvm/pv-emul.h new file mode 100644 index 000000000000..02de00581fe8 --- /dev/null +++ b/arch/e2k/include/asm/kvm/pv-emul.h @@ -0,0 +1,287 @@ +#ifndef __KVM_E2K_PV_EMUL_H +#define __KVM_E2K_PV_EMUL_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_VIRTUALIZATION +static __always_inline void +kvm_set_intc_emul_flag(pt_regs_t *regs) +{ + regs->flags.trap_as_intc_emul = 1; +} + +static __always_inline bool +kvm_test_intc_emul_flag(pt_regs_t *regs) +{ + return regs->flags.trap_as_intc_emul; +} + +static __always_inline void +kvm_clear_intc_emul_flag(pt_regs_t *regs) +{ + regs->flags.trap_as_intc_emul = 0; +} + +static __always_inline bool +kvm_test_and_clear_intc_emul_flag(pt_regs_t *regs) +{ + bool is_emul = kvm_test_intc_emul_flag(regs); + kvm_clear_intc_emul_flag(regs); + return is_emul; +} +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ +static inline bool +host_test_intc_emul_mode(const struct pt_regs *regs) +{ + if (likely(native_current_thread_info()->vcpu == NULL)) { + return false; + } else if (regs == NULL) { + return false; + } else if (!kvm_test_intc_emul_flag((pt_regs_t *)regs)) { + /* host is not in interception emulation mode */ + return false; + } + + return true; +} + +extern void pv_vcpu_switch_to_host_from_intc(thread_info_t *ti); +extern void pv_vcpu_return_to_intc_mode(thread_info_t *ti, struct kvm_vcpu *vcpu); + +static inline void return_to_pv_vcpu_intc(struct kvm_vcpu *vcpu) +{ + pv_vcpu_return_to_intc_mode(current_thread_info(), vcpu); +} + +#else /* !CONFIG_KVM_HOST_MODE */ +/* it is not host kernel */ +static inline bool +host_test_intc_emul_mode(const pt_regs_t *regs) +{ + return false; +} + +static inline __interrupt void +pv_vcpu_switch_to_host_from_intc(thread_info_t *ti) +{ + /* nothing to do */ +} +#endif /* CONFIG_KVM_HOST_MODE */ + +static inline int kvm_get_vcpu_intc_TIRs_num(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.intc_ctxt.nr_TIRs; +} + +static inline bool kvm_check_is_vcpu_intc_TIRs_empty(struct kvm_vcpu *vcpu) +{ + if (kvm_get_vcpu_intc_TIRs_num(vcpu) < 0) + return true; + /* TIRs have traps */ + return false; +} + +static inline bool +kvm_check_is_vcpu_guest_stacks_empty(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + if (!regs->g_stacks_valid) { + /* guest kernel stacks is not even inited */ + return true; + } + return !!(regs->g_stacks.psp_hi.PSP_hi_ind == 0 && + regs->g_stacks.pcsp_hi.PCSP_hi_ind == 0 && + !regs->need_inject); +} + +static inline bool +kvm_is_vcpu_guest_stacks_pending(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + if (!kvm_check_is_vcpu_guest_stacks_empty(vcpu, regs)) { + if (!regs->g_stacks_active) { + return true; + } + } + return false; +} + +static inline void +kvm_clear_vcpu_guest_stacks_pending(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + if (regs->g_stacks_valid) { + regs->g_stacks_active = true; + } +} + +extern noinline void insert_pv_vcpu_traps(thread_info_t *ti, pt_regs_t *regs); +extern void insert_pv_vcpu_sigreturn(struct kvm_vcpu *vcpu, + pv_vcpu_ctxt_t *vcpu_ctxt, pt_regs_t *regs); + +extern void kvm_emulate_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs, + trap_pt_regs_t *trap); +extern void return_from_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs); +extern bool pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs); + +static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.sw_ctxt.in_hypercall; +} + +static inline gthread_info_t *pv_vcpu_get_gti(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + return vcpu->arch.gti; + } + return NULL; +} + +static inline void pv_vcpu_set_gti(struct kvm_vcpu *vcpu, gthread_info_t *gti) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + vcpu->arch.gti = gti; + } else { + KVM_BUG_ON(true); + } +} + +static inline int pv_vcpu_get_gpid_id(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gti; + + gti = pv_vcpu_get_gti(vcpu); + if (likely(gti != NULL)) { + return gti->gpid->nid.nr; + } else { + return -EINVAL; + } +} + +static inline gmm_struct_t *pv_mmu_get_init_gmm(struct kvm *kvm) +{ + return kvm->arch.init_gmm; +} + +static inline gmm_struct_t *pv_vcpu_get_init_gmm(struct kvm_vcpu *vcpu) +{ + return pv_mmu_get_init_gmm(vcpu->kvm); +} + +static inline bool pv_vcpu_is_init_gmm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + return gmm == pv_vcpu_get_init_gmm(vcpu); + } else { + KVM_BUG_ON(true); + } + return false; +} + +static inline void pv_vcpu_clear_gmm(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + vcpu->arch.mmu.gmm = NULL; + } else { + KVM_BUG_ON(true); + } +} + +static inline gmm_struct_t *pv_vcpu_get_gmm(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + if (vcpu->arch.mmu.gmm != NULL) { + return vcpu->arch.mmu.gmm; + } else { + return pv_vcpu_get_init_gmm(vcpu); + } + } else { + KVM_BUG_ON(true); + } + return NULL; +} + +static inline void pv_vcpu_set_gmm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + if (gmm != NULL && !pv_vcpu_is_init_gmm(vcpu, gmm)) { + vcpu->arch.mmu.gmm = gmm; + } else { + pv_vcpu_clear_gmm(vcpu); + } + } else { + KVM_BUG_ON(true); + } +} + +static inline gmm_struct_t *pv_vcpu_get_active_gmm(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + return vcpu->arch.mmu.active_gmm; + } else { + KVM_BUG_ON(true); + } + return NULL; +} + +static inline void +pv_vcpu_set_active_gmm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + vcpu->arch.mmu.active_gmm = gmm; + } else { + KVM_BUG_ON(true); + } +} + +static inline mm_context_t *pv_vcpu_get_gmm_context(struct kvm_vcpu *vcpu) +{ + return &pv_vcpu_get_gmm(vcpu)->context; +} + +#else /* !CONFIG_VIRTUALIZATION */ +static __always_inline void +kvm_set_intc_emul_flag(pt_regs_t *regs) +{ +} + +static __always_inline bool +kvm_test_intc_emul_flag(pt_regs_t *regs) +{ + return false; +} + +static __always_inline void +kvm_clear_intc_emul_flag(pt_regs_t *regs) +{ +} + +static __always_inline bool +kvm_test_and_clear_intc_emul_flag(pt_regs_t *regs) +{ + return false; +} +static inline bool +host_test_intc_emul_mode(const pt_regs_t *regs) +{ + return false; +} +static inline void insert_pv_vcpu_traps(thread_info_t *ti, pt_regs_t *regs) +{ +} + +static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu) +{ + return false; +} + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KVM_E2K_PV_EMUL_H */ diff --git a/arch/e2k/include/asm/kvm/regs_state.h b/arch/e2k/include/asm/kvm/regs_state.h new file mode 100644 index 000000000000..01970e15dd53 --- /dev/null +++ b/arch/e2k/include/asm/kvm/regs_state.h @@ -0,0 +1,458 @@ +#ifndef _E2K_KVM_REGS_STATE_H +#define _E2K_KVM_REGS_STATE_H + +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_VIRTUALIZATION + +#undef DEBUG_GREGS_MODE +#undef DebugGREGS +#define DEBUG_GREGS_MODE 0 /* global registers save/restore */ +#define DebugGREGS(fmt, args...) \ +({ \ + if (DEBUG_GREGS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_SAVE_GREGS_EXCEPT_HOST_V2(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_SAVE_GREGS_EXCEPT_HOST_V5(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) +#define DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) + +#define DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_RESTORE_GREGS_EXCEPT_HOST_V2(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_RESTORE_GREGS_EXCEPT_HOST_V5(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) +#define DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) + +#ifndef CONFIG_E2K_ISET_VER +#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + (machine.host.save_guest_gregs(gregs)) +#define RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + (machine.host.restore_guest_gregs(gregs)) +#elif CONFIG_E2K_ISET_VER < 5 +#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2((gregs)->g) +#define RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V2((gregs)->g) +#else /* CONFIG_E2K_ISET_VER >= 5 */ +#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5((gregs)->g) +#define RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V5((gregs)->g) +#endif /* CONFIG_E2K_ISET_VER */ + +#define SET_GUEST_USER_GREG(gregs, gr_no, gr_main, gr_ext) \ +({ \ + global_regs_t *greg_to = (gregs); \ + \ + greg_to->g[(gr_no)].base = (gr_main); \ + greg_to->g[(gr_no)].ext = (gr_ext); \ +}) +#define COPY_GUEST_USER_GREG_ON_MASK(gr_from, gr_to, gr_mask) \ +({ \ + global_regs_t *greg_from = (gr_from); \ + global_regs_t *greg_to = (gr_to); \ + unsigned long cur_mask = (gr_mask); \ + int gr_no = 0; \ + \ + while (cur_mask) { \ + if (cur_mask & 0x3UL) { \ + SAVE_GUEST_USER_GREG_PAIR( \ + greg_from->g, gr_no, greg_to->g, gr_no); \ + } \ + gr_no += 2; \ + cur_mask >>= 2; \ + } \ +}) +#define SAVE_GUEST_USER_GREG_PAIR(__ti_gregs, ti_gr_no, \ + __gti_gregs, gti_gr_no) \ +({ \ + NATIVE_MOVE_TAGGED_QWORD( \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 0].base), \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 1].base), \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 0].base), \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 1].base)); \ + (__gti_gregs)[(gti_gr_no) + 0].ext = \ + (__ti_gregs)[(ti_gr_no) + 0].ext; \ + (__gti_gregs)[(gti_gr_no) + 1].ext = \ + (__ti_gregs)[(ti_gr_no) + 1].ext; \ +}) +#define RESTORE_GUEST_USER_GREG_PAIR(__ti_gregs, ti_gr_no, \ + __gti_gregs, gti_gr_no) \ +({ \ + NATIVE_MOVE_TAGGED_QWORD( \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 0].base), \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 1].base), \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 0].base), \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 1].base)); \ + (__ti_gregs)[(ti_gr_no) + 0].ext = \ + (__gti_gregs)[(gti_gr_no) + 0].ext; \ + (__ti_gregs)[(ti_gr_no) + 1].ext = \ + (__gti_gregs)[(gti_gr_no) + 1].ext; \ +}) +#define COPY_GUEST_KERNEL_GREGS_FROM_TI(__ti_gregs, __gti_regs) \ +({ \ + SAVE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CURRENT_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CURRENT_GREGS_PAIRS_INDEX_LO); \ + SAVE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CPU_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CPU_GREGS_PAIRS_INDEX_LO); \ +}) +#define COPY_GUEST_KERNEL_GREGS_TO_TI(__ti_gregs, __gti_regs) \ +({ \ + RESTORE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CURRENT_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CURRENT_GREGS_PAIRS_INDEX_LO); \ + RESTORE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CPU_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CPU_GREGS_PAIRS_INDEX_LO); \ +}) + +#define SAVE_GUEST_KERNEL_GREGS_AT_GTI(__ti, __gti, __gregs) \ +({ \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + __gti->gregs_active, __gti->gregs_valid, \ + __gti->gregs_for_currents_valid); \ + WARN_ON(__gti->gregs_active && __gti->gregs_for_currents_valid);\ + SAVE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CURRENT_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CURRENT_GREGS_PAIR_LO); \ + SAVE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CPU_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CPU_GREGS_PAIR_LO); \ + __gti->gregs_for_currents_valid = 1; \ + DebugGREGS("set gregs_for_currents_valid %d\n", \ + __gti->gregs_for_currents_valid); \ +}) +#define RESTORE_GUEST_KERNEL_GREGS_AT_TI(__ti, __gti, __gregs) \ +({ \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + __gti->gregs_active, __gti->gregs_valid, \ + __gti->gregs_for_currents_valid); \ + WARN_ON(__gti->gregs_active && !__gti->gregs_for_currents_valid); \ + RESTORE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CURRENT_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CURRENT_GREGS_PAIR_LO); \ + RESTORE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CPU_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CPU_GREGS_PAIR_LO); \ + __gti->gregs_for_currents_valid = 0; \ + DebugGREGS("clear gregs_for_currents_valid %d\n", \ + __gti->gregs_for_currents_valid); \ +}) + +#define DO_INIT_GUEST_USER_UPSR(__gti, __upsr) \ +({ \ + (__gti)->u_upsr = __upsr; \ + (__gti)->u_upsr_valid = true; \ +}) +#define DO_INIT_GUEST_KERNEL_UPSR(__gti, __upsr) \ +({ \ + (__gti)->k_upsr = __upsr; \ + (__gti)->k_upsr_valid = true; \ +}) +#define DO_SAVE_GUEST_USER_UPSR(__gti, __upsr) \ +({ \ + GTI_BUG_ON((__gti)->u_upsr_valid); \ + DO_INIT_GUEST_USER_UPSR(__gti, __upsr); \ +}) +#define DO_SAVE_GUEST_KERNEL_UPSR(__gti, __upsr) \ +({ \ + GTI_BUG_ON((__gti)->k_upsr_valid); \ + DO_INIT_GUEST_KERNEL_UPSR(__gti, __upsr); \ +}) +#define SAVE_GUEST_USER_UPSR_AT_GTI(__ti, __gti) \ + DO_SAVE_GUEST_USER_UPSR(__gti, (__ti)->upsr) +#define SAVE_GUEST_KERNEL_UPSR_AT_GTI(__ti, __gti) \ + DO_SAVE_GUEST_KERNEL_UPSR(__gti, (__ti)->upsr) +#define DO_RESTORE_GUEST_USER_UPSR(__gti, upsr_value) \ +({ \ + GTI_BUG_ON(!(__gti)->u_upsr_valid); \ + (upsr_value) = (__gti)->u_upsr; \ + (__gti)->u_upsr_valid = false; \ +}) +#define DO_RESTORE_GUEST_KERNEL_UPSR(__gti, upsr_value) \ +({ \ + GTI_BUG_ON(!(__gti)->k_upsr_valid); \ + (upsr_value) = (__gti)->k_upsr; \ + (__gti)->k_upsr_valid = false; \ +}) +#define RESTORE_GUEST_USER_UPSR_AT_TI(__ti, __gti) \ + DO_RESTORE_GUEST_USER_UPSR(__gti, (__ti)->upsr) +#define RESTORE_GUEST_KERNEL_UPSR_AT_TI(__ti, __gti) \ + DO_RESTORE_GUEST_KERNEL_UPSR(__gti, (__ti)->upsr) + +/* It is native host/guest kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* Save/restore global registers used by host to support guest */ +#define SAVE_GUEST_HOST_GREGS_AT_TI(__greg_pair, __gl_regs) \ +({ \ + SAVE_GUEST_USER_GREG_PAIR( \ + __greg_pair, HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO, \ + __gl_regs, VCPU_STATE_GREGS_PAIR_LO); \ +}) +#define RESTORE_GUEST_HOST_GREGS_AT_TI(__greg_pair, __gl_regs) \ +({ \ + RESTORE_GUEST_USER_GREG_PAIR( \ + __greg_pair, HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO, \ + __gl_regs, VCPU_STATE_GREGS_PAIR_LO); \ +}) + +#define SAVE_GUEST_USER_REGS_AT_GTI(thread_info, gthread_info, save_upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = (gthread_info); \ + global_regs_t *__gregs = &__gti->gregs; \ + \ + if (test_ti_thread_flag(__ti, TIF_VIRTUALIZED_GUEST)) { \ + SAVE_GUEST_KERNEL_GREGS_AT_GTI(__ti, __gti, __gregs); \ + SAVE_GUEST_HOST_GREGS_AT_TI(__ti->h_gregs.g, \ + __gregs->g); \ + if (save_upsr) { \ + SAVE_GUEST_USER_UPSR_AT_GTI(__ti, __gti); \ + } \ + } \ +}) +#define RESTORE_GUEST_USER_REGS_AT_TI(thread_info, gthread_info, restore_upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = (gthread_info); \ + host_gregs_t *__greg_pair = &__ti->h_gregs; \ + global_regs_t *__gregs = &__gti->gregs; \ + \ + if (test_ti_thread_flag(__ti, TIF_VIRTUALIZED_GUEST)) { \ + RESTORE_GUEST_KERNEL_GREGS_AT_TI(__ti, __gti, __gregs); \ + RESTORE_GUEST_HOST_GREGS_AT_TI(__greg_pair->g, \ + __gregs->g); \ + if (restore_upsr) { \ + RESTORE_GUEST_USER_UPSR_AT_TI(__ti, __gti); \ + } \ + } \ +}) +#define KVM_INIT_GUEST_USER_UPSR(thread_info, __upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + \ + DO_INIT_GUEST_USER_UPSR(__gti, __upsr); \ +}) +#define KVM_SAVE_GUEST_KERNEL_UPSR(thread_info, __upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + \ + DO_SAVE_GUEST_KERNEL_UPSR(__gti, __upsr); \ +}) +#define KVM_SAVE_GUEST_USER_UPSR(thread_info, __upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + \ + DO_SAVE_GUEST_USER_UPSR(__gti, __upsr); \ +}) +#define KVM_RESTORE_GUEST_KERNEL_UPSR(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + e2k_upsr_t __upsr; \ + \ + DO_RESTORE_GUEST_KERNEL_UPSR(__gti, __upsr); \ + NATIVE_WRITE_UPSR_REG(__upsr); \ +}) +#define KVM_RESTORE_GUEST_USER_UPSR(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + e2k_upsr_t __upsr; \ + \ + DO_RESTORE_GUEST_USER_UPSR(__gti, __upsr); \ + NATIVE_WRITE_UPSR_REG(__upsr); \ +}) +#define KVM_SET_GUEST_USER_INITIAL_UPSR(thread_info) \ +({ \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); \ + KVM_RESTORE_GUEST_USER_UPSR(thread_info); \ +}) + +#define SAVE_GUEST_USER_GLOBAL_REGISTERS(gthread_info) \ +({ \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->gregs; \ + \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + gti->gregs_active, gti->gregs_valid, \ + gti->gregs_for_currents_valid); \ + /* user state of gregs which now are under current pointers */ \ + /* should be saved into current guest thread info structure */ \ + WARN_ON(gti->gregs_active && !gti->gregs_for_currents_valid); \ + \ + /* save current state of global registers excluding gregs */ \ + /* used by kernel */ \ + gregs->bgr = NATIVE_READ_BGR_REG(); \ + init_BGR_reg(); /* enable whole GRF */ \ + SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs); \ + NATIVE_WRITE_BGR_REG(gregs->bgr); \ + gti->gregs_valid = 1; \ + DebugGREGS("set gregs_valid %d\n", \ + gti->gregs_valid); \ +}) +#define SAVE_GUEST_USER_ALL_GLOBAL_REGISTERS(gthread_info, gti_base) \ +({ \ + gthread_info_t *__gti = (gthread_info); \ + gthread_info_t *src_gti = (gti_base); \ + global_regs_t *__gregs = &__gti->gregs; \ + global_regs_t *src_gregs = &src_gti->gregs; \ + \ + SAVE_GUEST_USER_GLOBAL_REGISTERS(__gti); \ + WARN_ON(__gti->gregs_for_currents_valid); \ + WARN_ON(!src_gti->gregs_for_currents_valid); \ + /* global register which now used by kernel */ \ + /* saved from parent global registers state */ \ + COPY_GUEST_USER_GREG_ON_MASK(src_gregs, __gregs, \ + GUEST_GREGS_MASK); \ + __gti->gregs_for_currents_valid = 1; \ + DebugGREGS("set gregs_for_currents_valid %d\n", \ + __gti->gregs_for_currents_valid); \ +}) + +#define RESTORE_GUEST_USER_GLOBAL_REGISTERS(gthread_info) \ +({ \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->gregs; \ + \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + gti->gregs_active, gti->gregs_valid, \ + gti->gregs_for_currents_valid); \ + WARN_ON(gti->gregs_active && !gti->gregs_valid); \ + WARN_ON(gti->gregs_active && !gti->gregs_for_currents_valid); \ + \ + /* restore current state of global registers excluding gregs */ \ + /* used by kernel */ \ + init_BGR_reg(); /* enable whole GRF */ \ + RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs); \ + NATIVE_WRITE_BGR_REG(gregs->bgr); \ + gti->gregs_valid = 0; \ + DebugGREGS("clear gregs_valid %d\n", \ + gti->gregs_valid); \ +}) +#define SAVE_PV_VCPU_GLOBAL_REGISTERS(gthread_info) \ +do { \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->sw_regs.gregs; \ + \ + machine.save_gregs_dirty_bgr(gregs); \ +} while (false) + +#define RESTORE_PV_VCPU_GLOBAL_REGISTERS(gthread_info) \ +do { \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->sw_regs.gregs; \ + \ + machine.restore_gregs(gregs); \ +} while (false) + +#endif /* CONFIG_VIRTUALIZATION */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#define NATIVE_RESTORE_USER_CUT_REGS(ti, regs) /* CUTD is already set */ +#define RESTORE_USER_CUT_REGS(ti, regs, in_syscall) \ + NATIVE_RESTORE_USER_CUT_REGS(ti, regs) +#elif defined(CONFIG_KVM_HOST_MODE) +/* it is native host kernel with virtualization support */ +#define HOST_RESTORE_USER_CUT_REGS(ti, regs, in_syscall) \ +do { \ + e2k_cutd_t cutd; \ + struct kvm_vcpu *vcpu; \ +\ + if (likely(!test_ti_status_flag((ti), TS_HOST_AT_VCPU_MODE))) { \ + /* host at native or hypervisor mode */ \ + /* so CUT context is alredy set */ \ + break; \ + } \ + vcpu = (ti)->vcpu; \ + if (pv_vcpu_trap_on_guest_kernel(regs)) { \ + /* guest kernel return to kernel, need not switch context */ \ + break; \ + } else if ((in_syscall) ? \ + host_return_to_injected_guest_syscall((ti), (regs)) \ + : \ + host_return_to_injected_guest_trap(ti, (regs))) { \ + /* it need switch to guest kernel context */ \ + cutd = vcpu->arch.hw_ctxt.sh_oscutd; \ + } else { \ + /* it need switch to guest user context */ \ + cutd = pv_vcpu_get_gti(vcpu)->stack_regs.cutd; \ + } \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(cutd.CUTD_reg); \ +} while (false) +#define RESTORE_USER_CUT_REGS(ti, regs, in_syscall) \ + HOST_RESTORE_USER_CUT_REGS(ti, regs, in_syscall) +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravrtualized) */ +#include +#else + #error "Undefined virtualization mode" +#endif /* !CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL) +#define SAVE_GUEST_VCPU_STATE_GREGS(regs) \ +({ \ + DO_SAVE_DGREG(GUEST_VCPU_STATE_GREG, \ + regs->guest_vcpu_state_greg); \ +}) +#define RESTORE_GUEST_VCPU_STATE_GREGS(regs) \ +({ \ + DO_RESTORE_DGREG(GUEST_VCPU_STATE_GREG, \ + regs->guest_vcpu_state_greg); \ +}) +#else /* ! CONFIG_KVM && ! CONFIG_KVM_GUEST_KERNEL */ +#define SAVE_GUEST_VCPU_STATE_GREGS(regs) +#define RESTORE_GUEST_VCPU_STATE_GREGS(regs) +#endif /* CONFIG_KVM || CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/kvm/runstate.h b/arch/e2k/include/asm/kvm/runstate.h new file mode 100644 index 000000000000..e54dda6d7367 --- /dev/null +++ b/arch/e2k/include/asm/kvm/runstate.h @@ -0,0 +1,479 @@ +#ifndef __KVM_E2K_RUNSTATE_H +#define __KVM_E2K_RUNSTATE_H + +#if defined(CONFIG_VIRTUALIZATION) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +#include +#include + +/* + * VCPU state structure contains virtual CPU running state info. + * The structure is common for host and guest and can (and should) + * be accessed by both. + * Guest access do through global pointer which should be load on some global + * register (GUEST_VCPU_STATE_GREG) or on special CPU register GD. + * But GD can be used only if guest kernel run as protected task + */ + +/* + * Basic accessing functions to/from virtual CPU running state info structure + * (see asm/kvm/guest.h) on host. + */ +static inline kvm_runstate_info_t * +kvm_get_vcpu_runstate(struct kvm_vcpu *vcpu) +{ + kvm_runstate_info_t *runstate; + + runstate = &(vcpu->arch.kmap_vcpu_state->runstate); + return runstate; +} +static inline int +kvm_get_guest_vcpu_runstate(struct kvm_vcpu *vcpu) +{ + return kvm_get_vcpu_runstate(vcpu)->state; +} +static inline void +kvm_set_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int state) +{ + kvm_get_vcpu_runstate(vcpu)->state = state; +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_entry_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_vcpu_runstate(vcpu)->state_entry_time; +} +static inline void +kvm_set_guest_vcpu_runstate_entry_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_get_vcpu_runstate(vcpu)->state_entry_time = time; +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_time(struct kvm_vcpu *vcpu, int runstate_type) +{ + return kvm_get_vcpu_runstate(vcpu)->time[runstate_type]; +} +static inline void +kvm_set_guest_vcpu_runstate_time(struct kvm_vcpu *vcpu, + int runstate_type, uint64_t time) +{ + kvm_get_vcpu_runstate(vcpu)->time[runstate_type] = time; +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_running_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_running); +} +static inline void +kvm_set_guest_vcpu_runstate_running_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_running, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_runnable_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_runnable); +} +static inline void +kvm_set_guest_vcpu_runstate_runnable_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_runnable, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_blocked_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_blocked); +} +static inline void +kvm_set_guest_vcpu_runstate_blocked_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_blocked, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_offline_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_offline); +} +static inline void +kvm_set_guest_vcpu_runstate_offline_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_offline, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_hcall_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_hcall); +} +static inline void +kvm_set_guest_vcpu_runstate_in_hcall_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_hcall, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_QEMU_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_QEMU); +} +static inline void +kvm_set_guest_vcpu_runstate_in_QEMU_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_QEMU, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_trap_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_trap); +} +static inline void +kvm_set_guest_vcpu_runstate_in_trap_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_trap, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_intercept_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_intercept); +} +static inline void +kvm_set_guest_vcpu_runstate_in_intercept_time(struct kvm_vcpu *vcpu, + uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_intercept, time); +} + +/* + * Interrupts should be disabled by caller + */ +static inline void +kvm_do_update_guest_vcpu_runstate(struct kvm_vcpu *vcpu, + int new_state, uint64_t entry_time) +{ + int old_state = kvm_get_guest_vcpu_runstate(vcpu); + uint64_t old_entry_time = kvm_get_guest_vcpu_runstate_entry_time(vcpu); + uint64_t old_time; + uint64_t old_time_add; + + if (entry_time > old_entry_time) { + old_time = kvm_get_guest_vcpu_runstate_time(vcpu, old_state); + old_time_add = entry_time - old_entry_time; + old_time += old_time_add; + kvm_set_guest_vcpu_runstate_time(vcpu, old_state, old_time); + } + + kvm_set_guest_vcpu_runstate(vcpu, new_state); + kvm_set_guest_vcpu_runstate_entry_time(vcpu, entry_time); +} +static inline void +kvm_update_guest_vcpu_runstate(struct kvm_vcpu *vcpu, + int new_state, uint64_t entry_time) +{ + unsigned long flags; + + raw_local_irq_save(flags); + kvm_do_update_guest_vcpu_runstate(vcpu, new_state, entry_time); + raw_local_irq_restore(flags); +} +/* Interrupts should be disabled by caller */ +static inline void +kvm_do_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + uint64_t cur_time; + + cur_time = nsecs_2cycles(ktime_to_ns(ktime_get())); + kvm_do_update_guest_vcpu_runstate(vcpu, new_state, cur_time); +} +static inline void +kvm_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + unsigned long flags; + + raw_local_irq_save(flags); + kvm_do_update_guest_vcpu_current_runstate(vcpu, new_state); + raw_local_irq_restore(flags); +} + +/* Interrupts should be disabled by caller */ +static inline void +kvm_do_init_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int init_state) +{ + uint64_t cur_time; + + cur_time = nsecs_2cycles(ktime_to_ns(ktime_get())); + kvm_set_guest_vcpu_runstate(vcpu, init_state); + kvm_set_guest_vcpu_runstate_entry_time(vcpu, cur_time); + kvm_set_guest_vcpu_runstate_time(vcpu, init_state, 0); +} +static inline void +kvm_init_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int init_state) +{ + unsigned long flags; + + raw_local_irq_save(flags); + kvm_do_init_guest_vcpu_runstate(vcpu, init_state); + raw_local_irq_restore(flags); +} + +static inline long +kvm_do_get_guest_vcpu_stolen_time(struct kvm_vcpu *vcpu) +{ + s64 running, blocked, runnable, offline, stolen, in_hcall, in_intercept; + int runstate; + uint64_t entry_time; + s64 now; + + now = nsecs_2cycles(ktime_to_ns(ktime_get())); + entry_time = kvm_get_guest_vcpu_runstate_entry_time(vcpu); + BUG_ON(now < entry_time); + + runstate = kvm_get_guest_vcpu_runstate(vcpu); + + running = kvm_get_guest_vcpu_runstate_running_time(vcpu); + if (runstate == RUNSTATE_running) + running += (now - entry_time); + in_hcall = kvm_get_guest_vcpu_runstate_in_hcall_time(vcpu); + if (runstate == RUNSTATE_in_hcall) + in_hcall += (now - entry_time); + blocked = kvm_get_guest_vcpu_runstate_blocked_time(vcpu); + if (runstate == RUNSTATE_blocked) + blocked += (now - entry_time); + in_intercept = kvm_get_guest_vcpu_runstate_in_intercept_time(vcpu); + if (runstate == RUNSTATE_in_intercept) + in_intercept += (now - entry_time); + + /* work out how much time the VCPU has not been runn*ing* */ + runnable = kvm_get_guest_vcpu_runstate_runnable_time(vcpu) + + kvm_get_guest_vcpu_runstate_in_QEMU_time(vcpu) + + kvm_get_guest_vcpu_runstate_in_trap_time(vcpu); + if (runstate == RUNSTATE_runnable || runstate == RUNSTATE_in_trap || + runstate == RUNSTATE_in_QEMU) + runnable += (now - entry_time); + offline = kvm_get_guest_vcpu_runstate_offline_time(vcpu); + if (runstate == RUNSTATE_offline) + offline += (now - entry_time); + + stolen = runnable + offline; + + BUG_ON(now < stolen + running + in_hcall + blocked + in_intercept); + + return stolen; +} +static inline long +kvm_get_guest_vcpu_stolen_time(struct kvm_vcpu *vcpu) +{ + s64 stolen_time; + unsigned long flags; + + raw_local_irq_save(flags); + stolen_time = kvm_do_get_guest_vcpu_stolen_time(vcpu); + raw_local_irq_restore(flags); + + return stolen_time; +} + +static inline long +kvm_do_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + s64 running, in_hcall, blocked, in_intercept; + int runstate; + uint64_t entry_time; + s64 now; + + do { + entry_time = kvm_get_guest_vcpu_runstate_entry_time(vcpu); + runstate = kvm_get_guest_vcpu_runstate(vcpu); + running = kvm_get_guest_vcpu_runstate_running_time(vcpu); + in_hcall = kvm_get_guest_vcpu_runstate_in_hcall_time(vcpu); + blocked = kvm_get_guest_vcpu_runstate_blocked_time(vcpu); + in_intercept = + kvm_get_guest_vcpu_runstate_in_intercept_time(vcpu); + now = nsecs_2cycles(ktime_to_ns(ktime_get())); + } while (entry_time != kvm_get_guest_vcpu_runstate_entry_time(vcpu)); + + BUG_ON(now < entry_time); + if (now > entry_time) { + if (runstate == RUNSTATE_running) + running += (now - entry_time); + if (runstate == RUNSTATE_in_hcall) + in_hcall += (now - entry_time); + if (runstate == RUNSTATE_blocked) + blocked += (now - entry_time); + if (runstate == RUNSTATE_in_intercept) + in_intercept += (now - entry_time); + } + + BUG_ON(now < in_hcall + blocked + running + in_intercept); + + return running + in_hcall + blocked + in_intercept; +} +static inline long +kvm_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + s64 running_time; + unsigned long flags; + + raw_local_irq_save(flags); + running_time = kvm_do_get_guest_vcpu_running_time(vcpu); + raw_local_irq_restore(flags); + + return running_time; +} + +/* Runstate time is measured with ktime_get() cycles, it has to be monotonic across all CPUs */ +static inline unsigned long +kvm_get_host_runstate_ktime(void) +{ + return nsecs_2cycles(ktime_to_ns(ktime_get())); +} + +/* + * IRQs should be disabled by caller + * It always is true while caller is light hypercall + */ +static inline unsigned long +kvm_get_guest_running_time(struct kvm_vcpu *vcpu) +{ + cycles_t running; + + running = kvm_do_get_guest_vcpu_running_time(vcpu); + return running; +} + +#define CONFIG_DEBUG_VCPU_RUNSTATE +#ifndef CONFIG_DEBUG_VCPU_RUNSTATE +/* guest VCPU run state should be updated in traps and interrupts */ +static inline void +kvm_set_guest_runstate_in_user_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + BUG_ON(vcpu == NULL); + BUG_ON(!irqs_disabled()); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_running); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); +} +static inline void +kvm_set_guest_runstate_out_user_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + BUG_ON(vcpu == NULL); + BUG_ON(!irqs_disabled()); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_trap); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); +} +static inline int +kvm_set_guest_runstate_in_kernel_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + int cur_runstate; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return -1; + vcpu = ti->vcpu; + if (vcpu == NULL) + return -1; /* It is VIRQ VCPU: run state is unused */ + BUG_ON(!irqs_disabled()); + cur_runstate = kvm_get_guest_vcpu_runstate(vcpu); + BUG_ON(cur_runstate != RUNSTATE_running); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); + return cur_runstate; +} +static inline void +kvm_set_guest_runstate_out_kernel_trap(int saved_runstate) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + if (vcpu == NULL) + return; /* It is VIRQ VCPU: run state is unused */ + BUG_ON(!irqs_disabled()); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_trap); + kvm_do_update_guest_vcpu_current_runstate(vcpu, saved_runstate); +} +#else /* CONFIG_DEBUG_VCPU_RUNSTATE */ +extern void kvm_set_guest_runstate_in_user_trap(void); +extern void kvm_set_guest_runstate_out_user_trap(void); +extern int kvm_set_guest_runstate_in_kernel_trap(void); +extern void kvm_set_guest_runstate_out_kernel_trap(int saved_runstate); +#endif /* ! CONFIG_DEBUG_VCPU_RUNSTATE */ + +#define SET_RUNSTATE_IN_USER_TRAP() kvm_set_guest_runstate_in_user_trap() +#define SET_RUNSTATE_OUT_USER_TRAP() kvm_set_guest_runstate_out_user_trap() +#define SET_RUNSTATE_IN_KERNEL_TRAP(cur_runstate) \ + (cur_runstate = kvm_set_guest_runstate_in_kernel_trap()) +#define SET_RUNSTATE_OUT_KERNEL_TRAP(cur_runstate) \ + kvm_set_guest_runstate_out_kernel_trap(cur_runstate) + +#else /* ! CONFIG_VIRTUALIZATION || CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or pure guest kernel (not paravirtualized based on pv_ops) */ +#define SET_RUNSTATE_IN_USER_TRAP() +#define SET_RUNSTATE_OUT_USER_TRAP() +#define SET_RUNSTATE_IN_KERNEL_TRAP(cur_runstate) +#define SET_RUNSTATE_OUT_KERNEL_TRAP(cur_runstate) +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +/* FIXME: follow function(s) should not be used on pure guest kernel mode */ +static inline int +kvm_get_guest_vcpu_runstate(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline long +kvm_do_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline long +kvm_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline unsigned long +kvm_get_guest_running_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_entry_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline void +kvm_do_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + /* guest can not support own guests */ +} +static inline void +kvm_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + /* guest can not support own guests */ +} +static inline void +kvm_do_init_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int init_state) +{ + /* guest can not support own guests */ +} +static inline unsigned long +kvm_get_host_runstate_ktime(void) +{ + return -1; /* guest can not support own guests */ +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ +#endif /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KVM_E2K_RUNSTATE_H */ diff --git a/arch/e2k/include/asm/kvm/spinlock.h b/arch/e2k/include/asm/kvm/spinlock.h new file mode 100644 index 000000000000..86ada457eb02 --- /dev/null +++ b/arch/e2k/include/asm/kvm/spinlock.h @@ -0,0 +1,40 @@ +#ifndef __ASM_E2K_KVM_SPINLOCK_H +#define __ASM_E2K_KVM_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else + #error "Unknown virtualization type" +#endif /* CONFIG_PARAVIRT_GUEST */ + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + arch_spinlock_t val; + u16 ticket, ready; + + wmb(); /* wait for all store completion */ + val.lock = __api_atomic16_add_return32_lock( + 1 << ARCH_SPINLOCK_HEAD_SHIFT, &lock->lock); + ticket = val.tail; + ready = val.head; + + if (unlikely(ticket != ready)) { + /* spinlock has more user(s): so activate it(s) */ + arch_spin_unlock_slow(lock); + } +} + +#endif /* __ASM_E2K_KVM_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/spinlock_slow.h b/arch/e2k/include/asm/kvm/spinlock_slow.h new file mode 100644 index 000000000000..1a2f2d31cd48 --- /dev/null +++ b/arch/e2k/include/asm/kvm/spinlock_slow.h @@ -0,0 +1,44 @@ +#ifndef _ASM_E2K_KVM_SPINLOCK_SLOW_H +#define _ASM_E2K_KVM_SPINLOCK_SLOW_H +/* + * This file implements on host the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include + +typedef struct spinlock_unlocked { + struct list_head unlocked_list; + struct thread_info *ti; + struct gthread_info *gti; + struct list_head checked_unlocked; /* list of tasks */ + /* which alredy */ + /* checked spin */ + /* was unlocked */ + + void *lock; +} spinlock_unlocked_t; + +#define SPINLOCK_HASH_BITS 6 +#define SPINLOCK_HASH_SHIFT 4 /* [9:4] hash bits */ +#define SPINLOCK_HASH_SIZE (1 << SPINLOCK_HASH_BITS) +#define spinlock_hashfn(lockp) \ + hash_long(((unsigned long)(lockp)) >> SPINLOCK_HASH_SHIFT, \ + SPINLOCK_HASH_BITS) +#define SPINUNLOCKED_LIST_SIZE 32 + +extern int kvm_guest_spin_lock_slow(struct kvm *kvm, void *lock, + bool check_unlock); +extern int kvm_guest_spin_locked_slow(struct kvm *kvm, void *lock); +extern int kvm_guest_spin_unlock_slow(struct kvm *kvm, void *lock, + bool add_to_unlock); + +extern int kvm_guest_spinlock_init(struct kvm *kvm); +extern void kvm_guest_spinlock_destroy(struct kvm *kvm); + +#endif /* _ASM_E2K_KVM_SPINLOCK_SLOW_H */ \ No newline at end of file diff --git a/arch/e2k/include/asm/kvm/stacks.h b/arch/e2k/include/asm/kvm/stacks.h new file mode 100644 index 000000000000..6c237d47d6b4 --- /dev/null +++ b/arch/e2k/include/asm/kvm/stacks.h @@ -0,0 +1,43 @@ +/* + * KVM guest stacks support + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_STACKS_H +#define _E2K_KVM_STACKS_H + +#include + +#ifdef CONFIG_VIRTUALIZATION +#ifdef CONFIG_KVM_GUEST +/* + * Guest kernel thread stacks descriptions + */ +#define VIRT_KERNEL_C_STACK_SIZE KVM_GUEST_KERNEL_C_STACK_SIZE +#define VIRT_KERNEL_PS_SIZE KVM_GUEST_KERNEL_PS_SIZE +#define VIRT_KERNEL_PS_INIT_SIZE KVM_GUEST_KERNEL_PS_INIT_SIZE +#define VIRT_KERNEL_PCS_SIZE KVM_GUEST_KERNEL_PCS_SIZE +#define VIRT_KERNEL_PCS_INIT_SIZE KVM_GUEST_KERNEL_PCS_INIT_SIZE + +#define VIRT_KERNEL_P_STACK_PAGES (VIRT_KERNEL_PS_SIZE / PAGE_SIZE) +#define VIRT_KERNEL_PC_STACK_PAGES (VIRT_KERNEL_PCS_SIZE / PAGE_SIZE) + +/* + * Guest user task stacks descriptions + */ +#define VIRT_USER_C_STACK_SIZE KVM_GUEST_USER_C_STACK_SIZE +#define VIRT_USER_PS_SIZE KVM_GUEST_USER_PS_SIZE +#define VIRT_USER_PS_INIT_SIZE KVM_GUEST_USER_PS_INIT_SIZE +#define VIRT_USER_PCS_SIZE KVM_GUEST_USER_PCS_SIZE +#define VIRT_USER_PCS_INIT_SIZE KVM_GUEST_USER_PCS_INIT_SIZE + +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type: +#endif /* CONFIG_KVM_GUEST*/ + +#else /* ! CONFIG_VIRTUALIZATION */ +#define VIRT_KERNEL_P_STACK_PAGES 0 +#define VIRT_KERNEL_PC_STACK_PAGES 0 +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! _E2K_KVM_STACKS_H */ diff --git a/arch/e2k/include/asm/kvm/string.h b/arch/e2k/include/asm/kvm/string.h new file mode 100644 index 000000000000..9f74649e1692 --- /dev/null +++ b/arch/e2k/include/asm/kvm/string.h @@ -0,0 +1,7 @@ +#ifndef _E2K_KVM_STRING_H_ +#define _E2K_KVM_STRING_H_ + +#include +#include + +#endif /* _E2K_KVM_STRING_H_ */ diff --git a/arch/e2k/include/asm/kvm/switch.h b/arch/e2k/include/asm/kvm/switch.h new file mode 100644 index 000000000000..a783b82ad2c3 --- /dev/null +++ b/arch/e2k/include/asm/kvm/switch.h @@ -0,0 +1,1374 @@ +#ifndef _E2K_KVM_SWITCH_H +#define _E2K_KVM_SWITCH_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_UPSR_FP_DISABLE + +/* + * See below the 'flags' argument of xxx_guest_enter()/xxx_guest_exit() + */ +#define FULL_CONTEXT_SWITCH 0x0001U /* save/restore full guest/host */ + /* context */ +#define FROM_HYPERCALL_SWITCH 0x0002U /* save/restore full guest/host */ + /* before/after hypercall */ +#define USD_CONTEXT_SWITCH 0x0004U /* save/restore local data stack */ +#define DEBUG_REGS_SWITCH 0x0008U /* save/restore debugging registers */ +#define DONT_CU_REGS_SWITCH 0x0010U /* do not save/restore CUT and CU */ + /* registers */ +#define DONT_MMU_CONTEXT_SWITCH 0x0020U /* do not switch MMU context */ +#define DONT_SAVE_KGREGS_SWITCH 0x0040U /* do not save and set kernel global */ + /* regs */ +#define DONT_AAU_CONTEXT_SWITCH 0x0080U /* do not switch AAU context */ +#define DONT_TRAP_MASK_SWITCH 0x0100U /* do not switch OSEM context */ +#define EXIT_FROM_INTC_SWITCH 0x1000U /* complete intercept emulation mode */ +#define EXIT_FROM_TRAP_SWITCH 0x2000U /* complete trap mode */ + +static inline void +native_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + /* nothing guests can be */ + + if (flags & EXIT_FROM_INTC_SWITCH) + return; + /* IMPORTANT: do NOT access current, current_thread_info() */ + /* and per-cpu variables after this point */ + if (flags & EXIT_FROM_TRAP_SWITCH) { + NATIVE_RESTORE_KERNEL_GREGS(&ti->k_gregs); + } +} +static inline void +native_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + /* nothing guests can be */ +} +static inline bool +native_guest_trap_pending(struct thread_info *ti) +{ + /* there is not any guest */ + return false; +} + +static inline bool +native_trap_from_guest_user(struct thread_info *ti) +{ + /* there is not any guest */ + return false; +} + +static inline bool +native_syscall_from_guest_user(struct thread_info *ti) +{ + /* there is not any guest */ + return false; +} + +static inline struct e2k_stacks * +native_trap_guest_get_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + return ®s->stacks; +} + +static inline struct e2k_stacks * +native_syscall_guest_get_restore_stacks(struct pt_regs *regs) +{ + return ®s->stacks; +} + +/* + * The function should return bool is the system call from guest + */ +static inline bool +native_guest_syscall_enter(struct pt_regs *regs) +{ + /* nothing guests can be */ + + return false; /* it is not guest system call */ +} + +#ifdef CONFIG_VIRTUALIZATION + +/* + * For interceptions just switch actual registers with saved values in 'sw_ctxt'. + * + * For hypercalls: + * 1) Enter hypercall. + * 2) Save previous values from 'sw_ctxt' to 'sw_ctxt->saved'. + * 3) Switch actual registers with saved values in @sw_ctxt. + * 4) Allocate stack with getsp. + * 5) After hypercall completion switch registers back to guest values. + * 6) Restore 'sw_ctxt' from 'sw_ctxt->saved' + * (because 'getsp' above has changed registers we cannot use their values). + */ +static inline void kvm_switch_stack_regs(struct kvm_sw_cpu_context *sw_ctxt, + bool ctxt_save, bool ctxt_restore) +{ + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + + KVM_BUG_ON(ctxt_save && ctxt_restore); + + if (!ctxt_restore) { + AW(usd_lo) = NATIVE_NV_READ_USD_LO_REG_VALUE(); + AW(usd_hi) = NATIVE_NV_READ_USD_HI_REG_VALUE(); + AW(sbr) = NATIVE_NV_READ_SBR_REG_VALUE(); + } + + NATIVE_NV_WRITE_USBR_USD_REG(sw_ctxt->sbr, sw_ctxt->usd_hi, sw_ctxt->usd_lo); + + if (ctxt_save) { + KVM_BUG_ON(sw_ctxt->saved.valid); + sw_ctxt->saved.sbr = sw_ctxt->sbr; + sw_ctxt->saved.usd_lo = sw_ctxt->usd_lo; + sw_ctxt->saved.usd_hi = sw_ctxt->usd_hi; + sw_ctxt->saved.valid = true; + } + if (!ctxt_restore) { + sw_ctxt->sbr = sbr; + sw_ctxt->usd_lo = usd_lo; + sw_ctxt->usd_hi = usd_hi; + } else { + KVM_BUG_ON(!sw_ctxt->saved.valid); + sw_ctxt->sbr = sw_ctxt->saved.sbr; + sw_ctxt->usd_lo = sw_ctxt->saved.usd_lo; + sw_ctxt->usd_hi = sw_ctxt->saved.usd_hi; + sw_ctxt->saved.valid = false; + } +} + +static inline void kvm_switch_fpu_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + e2k_upsr_t upsr; + + fpcr = NATIVE_NV_READ_FPCR_REG(); + fpsr = NATIVE_NV_READ_FPSR_REG(); + pfpfr = NATIVE_NV_READ_PFPFR_REG(); + upsr = NATIVE_NV_READ_UPSR_REG(); + + NATIVE_NV_WRITE_FPCR_REG(sw_ctxt->fpcr); + NATIVE_NV_WRITE_FPSR_REG(sw_ctxt->fpsr); + NATIVE_NV_WRITE_PFPFR_REG(sw_ctxt->pfpfr); + NATIVE_WRITE_UPSR_REG(sw_ctxt->upsr); + + sw_ctxt->fpcr = fpcr; + sw_ctxt->fpsr = fpsr; + sw_ctxt->pfpfr = pfpfr; + sw_ctxt->upsr = upsr; +} + +static inline void kvm_switch_cu_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + e2k_cutd_t cutd; + + AW(cutd) = NATIVE_NV_READ_CUTD_REG_VALUE(); + + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(AW(sw_ctxt->cutd)); + sw_ctxt->cutd = cutd; +} + +static inline void kvm_switch_mmu_pt_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + mmu_reg_t u_pptb; + mmu_reg_t u_vptb; + + u_pptb = NATIVE_READ_MMU_U_PPTB_REG(); + u_vptb = NATIVE_READ_MMU_U_VPTB_REG(); + + NATIVE_WRITE_MMU_U_PPTB_REG(sw_ctxt->sh_u_pptb); + NATIVE_WRITE_MMU_U_VPTB_REG(sw_ctxt->sh_u_vptb); + + sw_ctxt->sh_u_pptb = u_pptb; + sw_ctxt->sh_u_vptb = u_vptb; +} + +static inline void kvm_switch_mmu_tc_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + mmu_reg_t tc_hpa; + mmu_reg_t trap_count; + + tc_hpa = NATIVE_READ_MMU_TRAP_POINT(); + trap_count = NATIVE_READ_MMU_TRAP_COUNT(); + + NATIVE_WRITE_MMU_TRAP_POINT(sw_ctxt->tc_hpa); + NATIVE_WRITE_MMU_TRAP_COUNT(sw_ctxt->trap_count); + + sw_ctxt->tc_hpa = tc_hpa; + sw_ctxt->trap_count = trap_count; +} + +static inline void kvm_switch_mmu_regs(struct kvm_sw_cpu_context *sw_ctxt, + bool switch_tc) +{ + kvm_switch_mmu_pt_regs(sw_ctxt); + if (switch_tc) + kvm_switch_mmu_tc_regs(sw_ctxt); +} + +static inline void kvm_switch_to_guest_mmu_pid(struct kvm_vcpu *vcpu) +{ + mm_context_t *gmm_context; + unsigned long mask; + + gmm_context = pv_vcpu_get_gmm_context(vcpu); + mask = get_mmu_pid(gmm_context, smp_processor_id()); + reload_context_mask(mask); +} + +static inline unsigned long kvm_get_guest_mmu_pid(struct kvm_vcpu *vcpu) +{ + mm_context_t *gmm_context; + + gmm_context = pv_vcpu_get_gmm_context(vcpu); + return gmm_context->cpumsk[smp_processor_id()]; +} + +static inline void kvm_switch_to_host_mmu_pid(struct mm_struct *mm) +{ + unsigned long mask; + + mask = get_mmu_context(mm, smp_processor_id()); + reload_context_mask(mask); +} + +static inline void kvm_switch_debug_regs(struct kvm_sw_cpu_context *sw_ctxt, + int is_active) +{ + u64 b_dimar0, b_dimar1, b_ddmar0, b_ddmar1, b_dibar0, b_dibar1, + b_dibar2, b_dibar3, b_ddbar0, b_ddbar1, b_ddbar2, b_ddbar3, + a_dimar0, a_dimar1, a_ddmar0, a_ddmar1, a_dibar0, a_dibar1, + a_dibar2, a_dibar3, a_ddbar0, a_ddbar1, a_ddbar2, a_ddbar3; + e2k_dimcr_t b_dimcr, a_dimcr; + e2k_ddmcr_t b_ddmcr, a_ddmcr; + e2k_dibcr_t b_dibcr, a_dibcr; + e2k_dibsr_t b_dibsr, a_dibsr; + e2k_ddbcr_t b_ddbcr, a_ddbcr; + e2k_ddbsr_t b_ddbsr, a_ddbsr; + + b_dibcr = sw_ctxt->dibcr; + b_ddbcr = sw_ctxt->ddbcr; + b_dibsr = sw_ctxt->dibsr; + b_ddbsr = sw_ctxt->ddbsr; + b_dimcr = sw_ctxt->dimcr; + b_ddmcr = sw_ctxt->ddmcr; + b_dibar0 = sw_ctxt->dibar0; + b_dibar1 = sw_ctxt->dibar1; + b_dibar2 = sw_ctxt->dibar2; + b_dibar3 = sw_ctxt->dibar3; + b_ddbar0 = sw_ctxt->ddbar0; + b_ddbar1 = sw_ctxt->ddbar1; + b_ddbar2 = sw_ctxt->ddbar2; + b_ddbar3 = sw_ctxt->ddbar3; + b_dimar0 = sw_ctxt->dimar0; + b_dimar1 = sw_ctxt->dimar1; + b_ddmar0 = sw_ctxt->ddmar0; + b_ddmar1 = sw_ctxt->ddmar1; + + a_dibcr = NATIVE_READ_DIBCR_REG(); + a_ddbcr = NATIVE_READ_DDBCR_REG(); + a_dibsr = NATIVE_READ_DIBSR_REG(); + a_ddbsr = NATIVE_READ_DDBSR_REG(); + a_dimcr = NATIVE_READ_DIMCR_REG(); + a_ddmcr = NATIVE_READ_DDMCR_REG(); + a_dibar0 = NATIVE_READ_DIBAR0_REG_VALUE(); + a_dibar1 = NATIVE_READ_DIBAR1_REG_VALUE(); + a_dibar2 = NATIVE_READ_DIBAR2_REG_VALUE(); + a_dibar3 = NATIVE_READ_DIBAR3_REG_VALUE(); + a_ddbar0 = NATIVE_READ_DDBAR0_REG_VALUE(); + a_ddbar1 = NATIVE_READ_DDBAR1_REG_VALUE(); + a_ddbar2 = NATIVE_READ_DDBAR2_REG_VALUE(); + a_ddbar3 = NATIVE_READ_DDBAR3_REG_VALUE(); + a_ddmar0 = NATIVE_READ_DDMAR0_REG_VALUE(); + a_ddmar1 = NATIVE_READ_DDMAR1_REG_VALUE(); + a_dimar0 = NATIVE_READ_DIMAR0_REG_VALUE(); + a_dimar1 = NATIVE_READ_DIMAR1_REG_VALUE(); + + if (is_active) { + /* These two must be written first to disable monitoring */ + NATIVE_WRITE_DIBCR_REG(b_dibcr); + NATIVE_WRITE_DDBCR_REG(b_ddbcr); + } + NATIVE_WRITE_DIBAR0_REG_VALUE(b_dibar0); + NATIVE_WRITE_DIBAR1_REG_VALUE(b_dibar1); + NATIVE_WRITE_DIBAR2_REG_VALUE(b_dibar2); + NATIVE_WRITE_DIBAR3_REG_VALUE(b_dibar3); + NATIVE_WRITE_DDBAR0_REG_VALUE(b_ddbar0); + NATIVE_WRITE_DDBAR1_REG_VALUE(b_ddbar1); + NATIVE_WRITE_DDBAR2_REG_VALUE(b_ddbar2); + NATIVE_WRITE_DDBAR3_REG_VALUE(b_ddbar3); + NATIVE_WRITE_DDMAR0_REG_VALUE(b_ddmar0); + NATIVE_WRITE_DDMAR1_REG_VALUE(b_ddmar1); + NATIVE_WRITE_DIMAR0_REG_VALUE(b_dimar0); + NATIVE_WRITE_DIMAR1_REG_VALUE(b_dimar1); + NATIVE_WRITE_DIBSR_REG(b_dibsr); + NATIVE_WRITE_DDBSR_REG(b_ddbsr); + NATIVE_WRITE_DIMCR_REG(b_dimcr); + NATIVE_WRITE_DDMCR_REG(b_ddmcr); + if (!is_active) { + /* These two must be written last to enable monitoring */ + NATIVE_WRITE_DIBCR_REG(b_dibcr); + NATIVE_WRITE_DDBCR_REG(b_ddbcr); + } + + sw_ctxt->dibcr = a_dibcr; + sw_ctxt->ddbcr = a_ddbcr; + sw_ctxt->dibsr = a_dibsr; + sw_ctxt->ddbsr = a_ddbsr; + sw_ctxt->dimcr = a_dimcr; + sw_ctxt->ddmcr = a_ddmcr; + sw_ctxt->dibar0 = a_dibar0; + sw_ctxt->dibar1 = a_dibar1; + sw_ctxt->dibar2 = a_dibar2; + sw_ctxt->dibar3 = a_dibar3; + sw_ctxt->ddbar0 = a_ddbar0; + sw_ctxt->ddbar1 = a_ddbar1; + sw_ctxt->ddbar2 = a_ddbar2; + sw_ctxt->ddbar3 = a_ddbar3; + sw_ctxt->ddmar0 = a_ddmar0; + sw_ctxt->ddmar1 = a_ddmar1; + sw_ctxt->dimar0 = a_dimar0; + sw_ctxt->dimar1 = a_dimar1; + +} + +#ifdef CONFIG_CLW_ENABLE +static inline void kvm_switch_clw_regs(struct kvm_sw_cpu_context *sw_ctxt, bool guest_enter) +{ + if (guest_enter) { + native_write_US_CL_B(sw_ctxt->us_cl_b); + native_write_US_CL_UP(sw_ctxt->us_cl_up); + native_write_US_CL_M0(sw_ctxt->us_cl_m0); + native_write_US_CL_M1(sw_ctxt->us_cl_m1); + native_write_US_CL_M2(sw_ctxt->us_cl_m2); + native_write_US_CL_M3(sw_ctxt->us_cl_m3); + + NATIVE_WRITE_MMU_US_CL_D(sw_ctxt->us_cl_d); + } else { + sw_ctxt->us_cl_d = NATIVE_READ_MMU_US_CL_D(); + + DISABLE_US_CLW(); + + sw_ctxt->us_cl_b = native_read_US_CL_B(); + sw_ctxt->us_cl_up = native_read_US_CL_UP(); + sw_ctxt->us_cl_m0 = native_read_US_CL_M0(); + sw_ctxt->us_cl_m1 = native_read_US_CL_M1(); + sw_ctxt->us_cl_m2 = native_read_US_CL_M2(); + sw_ctxt->us_cl_m3 = native_read_US_CL_M3(); + } +} +#else +static inline void kvm_switch_clw_regs(struct kvm_sw_cpu_context *sw_ctxt, bool guest_enter) +{ + /* Nothing to do */ +} +#endif + + +static inline void +switch_ctxt_trap_enable_mask(struct kvm_sw_cpu_context *sw_ctxt) +{ + unsigned osem; + + osem = NATIVE_READ_OSEM_REG_VALUE(); + NATIVE_WRITE_OSEM_REG_VALUE(sw_ctxt->osem); + sw_ctxt->osem = osem; +} + +static inline void host_guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + if (likely(!(flags & DONT_TRAP_MASK_SWITCH))) { + switch_ctxt_trap_enable_mask(sw_ctxt); + /* In full virtualization mode guest sets his own OSEM */ + /* in thread_init() */ + if (!vcpu->is_hv) { + KVM_BUG_ON((NATIVE_READ_OSEM_REG_VALUE() & + HYPERCALLS_TRAPS_MASK) != + HYPERCALLS_TRAPS_MASK); + } + } else { + /* In full virtualization mode guest sets his own OSEM */ + /* in thread_init() */ + if (!vcpu->is_hv) { + KVM_BUG_ON((NATIVE_READ_OSEM_REG_VALUE() & + HYPERCALLS_TRAPS_MASK) != 0); + } + } + + if (flags & FROM_HYPERCALL_SWITCH) { + /* + * Hypercalls - both hardware and software virtualization + */ + KVM_BUG_ON(!sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = false; + + /* For hypercalls skip the extended part. */ + HOST_RESTORE_HOST_GREGS(ti); + + /* compilation units context */ + if (!(flags & DONT_CU_REGS_SWITCH)) { + kvm_switch_cu_regs(sw_ctxt); + } + + /* restore guest PT context (U_PPTB/U_VPTB) */ + if (!(flags & DONT_MMU_CONTEXT_SWITCH)) { + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + } + } else if (flags & FULL_CONTEXT_SWITCH) { + + /* + * Interceptions - hardware support is enabled + */ +#ifdef CONFIG_USE_AAU + if (!(flags & DONT_AAU_CONTEXT_SWITCH)) + machine.calculate_aau_aaldis_aaldas(NULL, ti, &sw_ctxt->aau_context); +#endif + + if (machine.flushts) + machine.flushts(); + + if (likely(!(flags & DONT_SAVE_KGREGS_SWITCH))) { + /* For interceptions restore extended part */ + NATIVE_RESTORE_KERNEL_GREGS(&ti->k_gregs); + } + + NATIVE_RESTORE_INTEL_REGS(sw_ctxt); + + /* Isolate from QEMU */ + /* TODO if we want to call QEMU from hypercalls,then + * we should switch more context in hypercalls - see + * the list in sw_ctxt definition */ + kvm_switch_fpu_regs(sw_ctxt); + kvm_switch_cu_regs(sw_ctxt); + if (likely(!(flags & DONT_MMU_CONTEXT_SWITCH))) { + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + } + +#ifdef CONFIG_USE_AAU + if (!(flags & DONT_AAU_CONTEXT_SWITCH)) { + /* + * We cannot rely on %aasr value since interception could have + * happened in guest user before "bap" or in guest trap handler + * before restoring %aasr, so we must restore all AAU registers. + */ + native_clear_apb(); + native_set_aau_context(&sw_ctxt->aau_context); + + /* + * It's important to restore AAD after all return operations. + */ + NATIVE_RESTORE_AADS(&sw_ctxt->aau_context); + } +#endif + } else { + /* + * Return from emulation of interseption to paravirtualized + * vcpu + */ + + /* switch to guest MMU context to continue guest execution */ + kvm_switch_mmu_regs(sw_ctxt, false); + } + + if (flags & DEBUG_REGS_SWITCH) + kvm_switch_debug_regs(sw_ctxt, true); + + KVM_BUG_ON(vcpu->is_hv && !NATIVE_READ_MMU_US_CL_D()); + + /* Switch data stack after all function calls */ + if (flags & USD_CONTEXT_SWITCH) { + if (!(flags & FROM_HYPERCALL_SWITCH) || !vcpu->is_hv) { + kvm_switch_stack_regs(sw_ctxt, false, false); + } else { + /* restore saved source pointers of host stack */ + kvm_switch_stack_regs(sw_ctxt, false, true); + } + + if (vcpu->is_hv) + kvm_switch_clw_regs(sw_ctxt, true); + } +} + +static inline void host_guest_enter_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, + bool from_sdisp) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + KVM_BUG_ON(!sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = false; + + HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(ti); + + kvm_switch_cu_regs(sw_ctxt); + + KVM_BUG_ON(vcpu->is_hv && !NATIVE_READ_MMU_US_CL_D()); + + /* Switch data stack after all function calls */ + if (!from_sdisp) { + if (!vcpu->is_hv) { + kvm_switch_stack_regs(sw_ctxt, false, false); + } else { + /* restore saved source pointers of host stack */ + kvm_switch_stack_regs(sw_ctxt, false, true); + kvm_switch_clw_regs(sw_ctxt, true); + } + } +} + +static inline void host_guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + if (likely(!(flags & DONT_TRAP_MASK_SWITCH))) { + switch_ctxt_trap_enable_mask(sw_ctxt); + } + KVM_BUG_ON(NATIVE_READ_OSEM_REG_VALUE() & HYPERCALLS_TRAPS_MASK); + + /* Switch data stack before all function calls */ + if (flags & USD_CONTEXT_SWITCH) { + if (vcpu->is_hv) + kvm_switch_clw_regs(sw_ctxt, false); + + if (!(flags & FROM_HYPERCALL_SWITCH) || !vcpu->is_hv) { + kvm_switch_stack_regs(sw_ctxt, false, false); + } else { + /* save source pointers of host stack */ + kvm_switch_stack_regs(sw_ctxt, true, false); + } + } + + KVM_BUG_ON(vcpu->is_hv && !NATIVE_READ_MMU_US_CL_D()); + + if (flags & FROM_HYPERCALL_SWITCH) { + /* + * Hypercalls - both hardware and software virtualization + */ + KVM_BUG_ON(sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = true; + + /* For hypercalls skip the extended part. */ + HOST_SAVE_HOST_GREGS(ti); + ONLY_SET_KERNEL_GREGS(ti); + + /* compilation units context */ + if (!(flags & DONT_CU_REGS_SWITCH)) { + kvm_switch_cu_regs(sw_ctxt); + } + /* save guest PT context (U_PPTB/U_VPTB) and restore host */ + /* user PT context */ + if (!(flags & DONT_MMU_CONTEXT_SWITCH)) { + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + } + } else if (flags & FULL_CONTEXT_SWITCH) { + + /* + * Interceptions - hardware support is enabled + */ +#ifdef CONFIG_USE_AAU + if (!(flags & DONT_AAU_CONTEXT_SWITCH)) { + e2k_aasr_t aasr; + + /* + * We cannot rely on %aasr value since interception could have + * happened in guest user before "bap" or in guest trap handler + * before restoring %aasr, so we must save all AAU registers. + * Several macroses use %aasr to determine, which registers to + * save/restore, so pass worst-case %aasr to them in + * sw_ctxt->aau_context, and save the actual guest value to + * sw_ctxt->aasr + */ + aasr = native_read_aasr_reg(); + SWITCH_GUEST_AAU_AASR(&aasr, &sw_ctxt->aau_context, 1); + /* + * This is placed before saving intc cellar since it is done + * with 'mmurr' instruction which requires AAU to be stopped. + * + * Do this before saving %sbbp as it uses 'alc' + * and thus zeroes %aaldm. + */ + NATIVE_SAVE_AAU_MASK_REGS(&sw_ctxt->aau_context, aasr); + + /* It's important to save AAD before all call operations. */ + NATIVE_SAVE_AADS(&sw_ctxt->aau_context); + /* + * Function calls are allowed from this point on, + * mark it with a compiler barrier. + */ + barrier(); + + /* Since iset v6 %aaldi must be saved too */ + NATIVE_SAVE_AALDIS(sw_ctxt->aau_context.aaldi); + + machine.get_aau_context(&sw_ctxt->aau_context); + + native_clear_apb(); + } +#endif + + /* No atomic/DAM operations are allowed before this point. + * Note that we cannot do this before saving AAU. */ + if (cpu_has(CPU_HWBUG_L1I_STOPS_WORKING)) + E2K_DISP_CTPRS(); + + if (likely(!(flags & DONT_SAVE_KGREGS_SWITCH))) { + /* For interceptions save extended part. */ + machine.save_kernel_gregs(&ti->k_gregs); + ONLY_SET_KERNEL_GREGS(ti); + } + + NATIVE_SAVE_INTEL_REGS(sw_ctxt); +#ifdef CONFIG_MLT_STORAGE + machine.invalidate_MLT(); +#endif + + /* Isolate from QEMU */ + kvm_switch_fpu_regs(sw_ctxt); + kvm_switch_cu_regs(sw_ctxt); + if (likely(!(flags & DONT_MMU_CONTEXT_SWITCH))) { + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + } + } else { + /* + * Starting emulation of interseption of paravirtualized vcpu + */ + + /* switch to hypervisor MMU context to emulate hw intercept */ + kvm_switch_mmu_regs(sw_ctxt, false); + } + + if (flags & DEBUG_REGS_SWITCH) + kvm_switch_debug_regs(sw_ctxt, false); +} + +static inline void host_guest_exit_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + KVM_BUG_ON(sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = true; + + KVM_BUG_ON(vcpu->is_hv && !NATIVE_READ_MMU_US_CL_D()); + + HOST_SAVE_KERNEL_GREGS_AS_LIGHT(ti); + ONLY_SET_KERNEL_GREGS(ti); + + kvm_switch_cu_regs(sw_ctxt); +} + +/* + * Some hypercalls return to guest from other exit point then + * usual hypercall return from. So it need some clearing hypercall track. + */ +static inline bool +host_hypercall_exit(struct kvm_vcpu *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + + if (sw_ctxt->in_hypercall) { + sw_ctxt->in_hypercall = false; + return true; + } + return false; +} + + +/* + * Some hypercalls return from hypercall to host. + * So it need some restore host context and some clearing hypercall track. + */ +static inline void hypercall_exit_to_host(struct kvm_vcpu *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + + switch_ctxt_trap_enable_mask(sw_ctxt); + + KVM_BUG_ON(!sw_ctxt->in_hypercall); + + host_hypercall_exit(vcpu); +} + +/* + * Save/restore VCPU host kernel thread context during switching from + * one guest threads (current) to other guest thread (next) + * It need now save only signal context, because of host kernel stacks + * are the same for all guest threads (processes). + */ +static inline void +pv_vcpu_save_host_context(struct kvm_vcpu *vcpu, gthread_info_t *cur_gti) +{ + cur_gti->signal.stack.base = current_thread_info()->signal_stack.base; + cur_gti->signal.stack.size = current_thread_info()->signal_stack.size; + cur_gti->signal.stack.used = current_thread_info()->signal_stack.used; + cur_gti->signal.traps_num = vcpu->arch.host_ctxt.signal.traps_num; + cur_gti->signal.in_work = vcpu->arch.host_ctxt.signal.in_work; + cur_gti->signal.syscall_num = vcpu->arch.host_ctxt.signal.syscall_num; + cur_gti->signal.in_syscall = vcpu->arch.host_ctxt.signal.in_syscall; +} + +static inline void +pv_vcpu_restore_host_context(struct kvm_vcpu *vcpu, gthread_info_t *next_gti) +{ + current_thread_info()->signal_stack.base = next_gti->signal.stack.base; + current_thread_info()->signal_stack.size = next_gti->signal.stack.size; + current_thread_info()->signal_stack.used = next_gti->signal.stack.used; + vcpu->arch.host_ctxt.signal.traps_num = next_gti->signal.traps_num; + vcpu->arch.host_ctxt.signal.in_work = next_gti->signal.in_work; + vcpu->arch.host_ctxt.signal.syscall_num = next_gti->signal.syscall_num; + vcpu->arch.host_ctxt.signal.in_syscall = next_gti->signal.in_syscall; +} + +static inline void +pv_vcpu_switch_guest_host_context(struct kvm_vcpu *vcpu, + gthread_info_t *cur_gti, gthread_info_t *next_gti) +{ + pv_vcpu_save_host_context(vcpu, cur_gti); + pv_vcpu_restore_host_context(vcpu, next_gti); +} + +static inline void +pv_vcpu_switch_kernel_pgd_range(struct kvm_vcpu *vcpu, int cpu) +{ + hpa_t vcpu_root; + + if (is_sep_virt_spaces(vcpu)) { + vcpu_root = kvm_get_space_type_spt_os_root(vcpu); + } else { + vcpu_root = kvm_get_space_type_spt_u_root(vcpu); + } + + copy_kernel_pgd_range(__va(vcpu_root), cpu_kernel_root_pt); +} + +static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu) +{ + kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + unsigned long *stack; + pt_regs_t *regs; + e2k_usd_hi_t k_usd_hi; + e2k_usd_lo_t k_usd_lo; + e2k_sbr_t k_sbr; + e2k_psp_lo_t k_psp_lo; + e2k_psp_hi_t k_psp_hi; + e2k_pcsp_lo_t k_pcsp_lo; + e2k_pcsp_hi_t k_pcsp_hi; + e2k_upsr_t upsr; + unsigned long base; + unsigned long size; + unsigned long used; + unsigned osem; + + /* keep current state of context */ + stack = current->stack; + regs = current_thread_info()->pt_regs; + upsr = current_thread_info()->upsr; + k_usd_lo = current_thread_info()->k_usd_lo; + k_usd_hi = current_thread_info()->k_usd_hi; + k_sbr.SBR_reg = (unsigned long)stack + KERNEL_C_STACK_SIZE + + KERNEL_C_STACK_OFFSET; + k_psp_lo = current_thread_info()->k_psp_lo; + k_psp_hi = current_thread_info()->k_psp_hi; + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + k_pcsp_hi = current_thread_info()->k_pcsp_hi; + + /* restore VCPU thread context */ + current->stack = host_ctxt->stack; + current_thread_info()->pt_regs = host_ctxt->pt_regs; + current_thread_info()->upsr = host_ctxt->upsr; + current_thread_info()->k_usd_hi = host_ctxt->k_usd_hi; + current_thread_info()->k_usd_lo = host_ctxt->k_usd_lo; + current_thread_info()->k_psp_lo = host_ctxt->k_psp_lo; + current_thread_info()->k_psp_hi = host_ctxt->k_psp_hi; + current_thread_info()->k_pcsp_lo = host_ctxt->k_pcsp_lo; + current_thread_info()->k_pcsp_hi = host_ctxt->k_pcsp_hi; + + /* save VCPU thread context */ + host_ctxt->stack = stack; + host_ctxt->pt_regs = regs; + host_ctxt->upsr = upsr; + host_ctxt->k_usd_lo = k_usd_lo; + host_ctxt->k_usd_hi = k_usd_hi; + host_ctxt->k_sbr = k_sbr; + host_ctxt->k_psp_lo = k_psp_lo; + host_ctxt->k_psp_hi = k_psp_hi; + host_ctxt->k_pcsp_lo = k_pcsp_lo; + host_ctxt->k_pcsp_hi = k_pcsp_hi; + + /* remember host/guest OSEM registers state & restore guest/host state */ + osem = host_ctxt->osem; + host_ctxt->osem = sw_ctxt->osem; + sw_ctxt->osem = osem; + + /* keep current signal stack state */ + base = current_thread_info()->signal_stack.base; + size = current_thread_info()->signal_stack.size; + used = current_thread_info()->signal_stack.used; + /* atomic trap_num is not used for host thread, so keep it in place */ + + /* restote VCPU thread signal stack state */ + current_thread_info()->signal_stack.base = host_ctxt->signal.stack.base; + current_thread_info()->signal_stack.size = host_ctxt->signal.stack.size; + current_thread_info()->signal_stack.used = host_ctxt->signal.stack.used; + + /* save VCPU thread signal stack state */ + host_ctxt->signal.stack.base = base; + host_ctxt->signal.stack.size = size; + host_ctxt->signal.stack.used = used; + /* atomic trap_num & in_work & syscall_num & in_syscall will not be */ + /* used for host thread, so keep it in place for last guest thread */ +} + +static inline void pv_vcpu_exit_to_host(struct kvm_vcpu *vcpu) +{ + /* save VCPU guest thread context */ + /* restore VCPU host thread context */ + pv_vcpu_switch_host_context(vcpu); +#ifdef DEBUG_UPSR_FP_DISABLE + if (unlikely(!current_thread_info()->upsr.UPSR_fe)) { + pr_err("%s(): switch to host QEMU process with disabled " + "FloatPoint mask, UPSR 0x%x\n", + __func__, current_thread_info()->upsr.UPSR_reg); + /* correct UPSR to enable float pointing */ + current_thread_info()->upsr.UPSR_fe = 1; + } + if (unlikely(!vcpu->arch.host_ctxt.upsr.UPSR_fe)) { + pr_err("%s(): switch from host VCPU process where disabled " + "FloatPoint mask, UPSR 0x%x\n", + __func__, vcpu->arch.host_ctxt.upsr.UPSR_reg); + } +#endif /* DEBUG_UPSR_FP_DISABLE */ +} + +static inline void pv_vcpu_enter_to_guest(struct kvm_vcpu *vcpu) +{ + /* save VCPU host thread context */ + /* restore VCPU guest thread context */ + pv_vcpu_switch_host_context(vcpu); +#ifdef DEBUG_UPSR_FP_DISABLE + if (unlikely(!current_thread_info()->upsr.UPSR_fe)) { + pr_err("%s(): switch to host VCPU process with disabled " + "FloatPoint mask, UPSR 0x%x\n", + __func__, current_thread_info()->upsr.UPSR_reg); + /* do not correct UPSR, maybe it should be */ + } +#endif /* DEBUG_UPSR_FP_DISABLE */ +} + +static inline void +host_switch_trap_enable_mask(struct thread_info *ti, struct pt_regs *regs, + bool guest_enter) +{ + struct kvm_vcpu *vcpu; + struct kvm_sw_cpu_context *sw_ctxt; + + if (trap_on_guest(regs)) { + vcpu = ti->vcpu; + sw_ctxt = &vcpu->arch.sw_ctxt; + if (guest_enter) { + /* return from trap, restore hypercall flag */ + sw_ctxt->in_hypercall = regs->in_hypercall; + } else { /* guest exit */ + /* enter to trap, save hypercall flag because of */ + /* trap handler can pass traps to guest and run */ + /* guest trap handler with recursive hypercalls */ + regs->in_hypercall = sw_ctxt->in_hypercall; + } + if (sw_ctxt->in_hypercall) { + /* mask should be already switched or */ + /* will be switched by hypercall */ + return; + } + switch_ctxt_trap_enable_mask(sw_ctxt); + } +} + +static __always_inline bool +pv_vcpu_trap_on_guest_kernel(pt_regs_t *regs) +{ + if (regs && is_trap_pt_regs(regs) && guest_kernel_mode(regs)) + return true; + + return false; +} + +static inline bool +host_guest_trap_pending(struct thread_info *ti) +{ + struct pt_regs *regs = ti->pt_regs; + struct kvm_vcpu *vcpu; + + if (likely(!regs || !is_trap_pt_regs(regs) || + !kvm_test_intc_emul_flag(regs))) { + /* it is not paravirtualized guest VCPU intercepts */ + /* emulation mode, so nothing to do more */ + return false; + } + vcpu = ti->vcpu; + if (!kvm_check_is_vcpu_intc_TIRs_empty(vcpu)) { + /* there are some injected traps for guest */ + kvm_clear_vcpu_guest_stacks_pending(vcpu, regs); + return true; + } + if (kvm_is_vcpu_guest_stacks_pending(vcpu, regs)) { + /* guest user spilled stacks is not empty, */ + /* so it need rocover its */ + return true; + } + return false; +} + +static inline bool +host_trap_from_guest_user(struct thread_info *ti) +{ + struct pt_regs *regs = ti->pt_regs; + + if (likely(!host_guest_trap_pending(ti) && regs->traps_to_guest == 0)) + return false; + return !pv_vcpu_trap_on_guest_kernel(ti->pt_regs); +} + +static inline bool +host_syscall_from_guest_user(struct thread_info *ti) +{ + struct pt_regs *regs = ti->pt_regs; + + if (likely(!regs || is_trap_pt_regs(regs) || + !kvm_test_intc_emul_flag(regs))) { + /* it is not paravirtualized guest VCPU intercepts */ + /* emulation mode, so nothing system calls from guest */ + return false; + } + KVM_BUG_ON(ti->vcpu == NULL); + KVM_BUG_ON(guest_kernel_mode(regs)); + return true; +} + +static inline void +host_trap_guest_exit_intc(struct thread_info *ti, struct pt_regs *regs) +{ + if (likely(!kvm_test_intc_emul_flag(regs))) { + /* it is not paravirtualized guest VCPU intercepts*/ + /* emulation mode, so nothing to do more */ + return; + } + kvm_clear_intc_emul_flag(regs); + + /* + * Return from trap on paravirtualized guest VCPU which was + * interpreted as interception + */ + return_from_pv_vcpu_intc(ti, regs); +} + +static inline bool +host_return_to_injected_guest_syscall(struct thread_info *ti, pt_regs_t *regs) +{ + struct kvm_vcpu *vcpu; + int syscall_num, in_syscall; + + vcpu = ti->vcpu; + syscall_num = atomic_read(&vcpu->arch.host_ctxt.signal.syscall_num); + in_syscall = atomic_read(&vcpu->arch.host_ctxt.signal.in_syscall); + + if (likely(syscall_num > 0)) { + if (in_syscall == syscall_num) { + /* all injected system calls are already handling */ + return false; + } + /* it need return to start injected system call */ + return true; + } + return false; +} + +static inline bool +host_return_to_injected_guest_trap(struct thread_info *ti, pt_regs_t *regs) +{ + struct kvm_vcpu *vcpu; + gthread_info_t *gti; + int traps_num, in_work; + + vcpu = ti->vcpu; + gti = pv_vcpu_get_gti(vcpu); + traps_num = atomic_read(&vcpu->arch.host_ctxt.signal.traps_num); + in_work = atomic_read(&vcpu->arch.host_ctxt.signal.in_work); + + if (unlikely(traps_num == 0)) { + /* there are nothing injected traps */ + return false; + } + if (traps_num == in_work) { + /* there are/(is) some injected to guest traps */ + /* but all the traps are already handling */ + return false; + } + + /* it need return to start handling of new injected trap */ + if (test_gti_thread_flag(gti, GTIF_KERNEL_THREAD) || + pv_vcpu_trap_on_guest_kernel(regs)) { + /* return to recursive injected trap at guest kernel mode */ + /* so all guest stacks were already switched to */ + return false; + } + + /* return from host trap to injected trap at user mode */ + /* so it need switch all guest user's stacks to kernel */ + return true; +} + +static inline struct e2k_stacks * +host_trap_guest_get_pv_vcpu_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + + if (host_return_to_injected_guest_trap(ti, regs)) { + /* it need switch to guest kernel context */ + return ®s->g_stacks; + } else { + /* it need switch to guest user context */ + return native_trap_guest_get_restore_stacks(ti, regs); + } +} + +static inline struct e2k_stacks * +host_syscall_guest_get_pv_vcpu_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + + if (host_return_to_injected_guest_syscall(ti, regs)) { + /* it need switch to guest kernel context */ + return ®s->g_stacks; + } else { + /* it need switch to guest user context */ + return native_syscall_guest_get_restore_stacks(regs); + } +} + +static inline struct e2k_stacks * +host_trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + if (test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)) { + /* host return to paravirtualized guest (VCPU) mode */ + return host_trap_guest_get_pv_vcpu_restore_stacks(ti, regs); + } + return native_trap_guest_get_restore_stacks(ti, regs); +} + +static inline void +host_trap_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + int traps_num, in_work; + + traps_num = atomic_read(&vcpu->arch.host_ctxt.signal.traps_num); + in_work = atomic_read(&vcpu->arch.host_ctxt.signal.in_work); + if (likely(traps_num <= 0)) { + /* it is return from host trap to guest (VCPU) mode */ + return; + } else if (traps_num == in_work) { + /* there are/(is) some injected to guest traps */ + /* but all the traps are already handling */ + return; + } + + /* it need return to start handling of new injected trap */ + atomic_inc(&vcpu->arch.host_ctxt.signal.in_work); +} + +static inline void +host_trap_guest_exit_trap(struct thread_info *ti, struct pt_regs *regs) +{ + if (test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)) { + /* host return to paravirtualized guest (VCPU) mode */ + host_trap_pv_vcpu_exit_trap(ti, regs); + } + + host_switch_trap_enable_mask(ti, regs, true); + + /* restore global regs of native kernel */ + native_trap_guest_enter(ti, regs, EXIT_FROM_TRAP_SWITCH); +} + +static inline void +host_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + if (flags & EXIT_FROM_INTC_SWITCH) { + host_trap_guest_exit_intc(ti, regs); + } + if (flags & EXIT_FROM_TRAP_SWITCH) { + host_trap_guest_exit_trap(ti, regs); + } +} + +static inline void +host_syscall_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + int syscall_num, in_syscall; + + syscall_num = atomic_read(&vcpu->arch.host_ctxt.signal.syscall_num); + in_syscall = atomic_read(&vcpu->arch.host_ctxt.signal.in_syscall); + if (likely(syscall_num == 0)) { + /* it is return from host syscall to guest (VCPU) mode */ + return; + } else if (in_syscall == syscall_num) { + /* there is some injected to guest system call */ + /* and all the call is already handling */ + return; + } + + /* it need return to start handling of new injected system call */ + atomic_inc(&vcpu->arch.host_ctxt.signal.in_syscall); +} + +extern void host_syscall_guest_exit_trap(struct thread_info *, struct pt_regs *); + +extern void kvm_init_pv_vcpu_intc_handling(struct kvm_vcpu *vcpu, pt_regs_t *regs); +extern int last_light_hcall; + +static inline void +host_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + if (likely(!test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE))) + return; + + clear_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE); + + /* + * Trap on paravirtualized guest VCPU is interpreted as intercept + */ + kvm_emulate_pv_vcpu_intc(ti, regs, trap); + + /* only after switch to host MMU context at previous function */ + host_switch_trap_enable_mask(ti, regs, false); +} + +/* + * The function should return bool 'is the system call from guest?' + */ +static inline bool host_guest_syscall_enter(struct pt_regs *regs, + bool ts_host_at_vcpu_mode) +{ + if (likely(!ts_host_at_vcpu_mode)) + return false; /* it is not guest system call */ + + clear_ts_flag(TS_HOST_AT_VCPU_MODE); + return pv_vcpu_syscall_intc(current_thread_info(), regs); +} +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest */ +# include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravrtualized) */ +# include +#else +/* it is native kernel without any virtualization or */ +/* host kernel with virtualization support */ +#ifndef CONFIG_VIRTUALIZATION +/* it is only native kernel without any virtualization */ +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ +} + +static inline void __guest_enter_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, + bool from_sdisp) +{ +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ +} +static inline void __guest_exit_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu) +{ +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags) +{ + native_trap_guest_enter(ti, regs, flags); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + native_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +guest_trap_pending(struct thread_info *ti) +{ + return native_guest_trap_pending(ti); +} + +static inline bool +guest_trap_from_user(struct thread_info *ti) +{ + return native_trap_from_guest_user(ti); +} + +static inline bool +guest_syscall_from_user(struct thread_info *ti) +{ + return native_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return native_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +syscall_guest_get_restore_stacks(bool ts_host_at_vcpu_mode, struct pt_regs *regs) +{ + return native_syscall_guest_get_restore_stacks(regs); +} + +#define ts_host_at_vcpu_mode() false + +/* + * The function should return bool is the system call from guest + */ +static inline bool guest_syscall_enter(struct pt_regs *regs, + bool ts_host_at_vcpu_mode) +{ + return native_guest_syscall_enter(regs); +} +static inline void guest_exit_intc(struct pt_regs *regs, + bool intc_emul_flag) { } +static inline void guest_syscall_exit_trap(struct pt_regs *regs, + bool ts_host_at_vcpu_mode) { } + +#else /* CONFIG_VIRTUALIZATION */ +/* it is only host kernel with virtualization support */ +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + host_guest_enter(ti, vcpu, flags); +} + +static inline void __guest_enter_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, + bool from_sdisp) +{ + host_guest_enter_light(ti, vcpu, from_sdisp); +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + host_guest_exit(ti, vcpu, flags); +} +static inline void __guest_exit_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu) +{ + host_guest_exit_light(ti, vcpu); +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags) +{ + host_trap_guest_enter(ti, regs, flags); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + host_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +guest_trap_pending(struct thread_info *ti) +{ + return host_guest_trap_pending(ti); +} + +static inline bool +guest_trap_from_user(struct thread_info *ti) +{ + return host_trap_from_guest_user(ti); +} + +static inline bool +guest_syscall_from_user(struct thread_info *ti) +{ + return host_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return host_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +syscall_guest_get_restore_stacks(bool ts_host_at_vcpu_mode, struct pt_regs *regs) +{ + if (unlikely(ts_host_at_vcpu_mode)) { + /* host return to paravirtualized guest (VCPU) mode */ + return host_syscall_guest_get_pv_vcpu_restore_stacks( + current_thread_info(), regs); + } + return native_syscall_guest_get_restore_stacks(regs); +} + +#define ts_host_at_vcpu_mode() unlikely(!!test_ts_flag(TS_HOST_AT_VCPU_MODE)) + +/* + * The function should return bool is the system call from guest + */ +static inline bool guest_syscall_enter(struct pt_regs *regs, + bool ts_host_at_vcpu_mode) +{ + return host_guest_syscall_enter(regs, ts_host_at_vcpu_mode); +} + +static inline void guest_exit_intc(struct pt_regs *regs, bool intc_emul_flag) +{ + if (unlikely(intc_emul_flag)) { + kvm_clear_intc_emul_flag(regs); + + /* + * Return from trap on paravirtualized guest VCPU which was + * interpreted as interception + */ + return_from_pv_vcpu_intc(current_thread_info(), regs); + } +} + +static inline void guest_syscall_exit_trap(struct pt_regs *regs, + bool ts_host_at_vcpu_mode) +{ + if (unlikely(ts_host_at_vcpu_mode)) + host_syscall_guest_exit_trap(current_thread_info(), regs); +} + +#endif /* ! CONFIG_VIRTUALIZATION */ +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! _E2K_KVM_SWITCH_H */ diff --git a/arch/e2k/include/asm/kvm/thread_info.h b/arch/e2k/include/asm/kvm/thread_info.h new file mode 100644 index 000000000000..943ad3324507 --- /dev/null +++ b/arch/e2k/include/asm/kvm/thread_info.h @@ -0,0 +1,555 @@ +/* + * kvm_thread_info.h: In-kernel KVM guest thread info related definitions + * Copyright (c) 2011, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __E2K_KVM_THREAD_INFO_H +#define __E2K_KVM_THREAD_INFO_H + +#include +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_VIRTUALIZATION + +struct kvm; +struct kvm_vcpu; +struct gmm_struct; + +/* TODO a security hole, undef it - bad guest can cause host to panic() */ +#define GTI_DEBUG_MODE + +#ifdef GTI_DEBUG_MODE +#define GTI_BUG_ON(cond) BUG_ON(cond) +#else /* ! GTI_DEBUG_MODE */ +#define GTI_BUG_ON(cond) do { } while (0) +#endif /* GTI_DEBUG_MODE */ + +/* + * Hardware & local data stacks registers state to save/restore guest stacks + * while hypercalls under paravirtualization without hardware support. + * It allows to emulate switch (on HCALL) and restore (on HRET) hardware + * supported extensions. + */ +typedef struct guest_hw_stack { + bool valid; /* stacks are valid */ + e2k_stacks_t stacks; /* pointers to local data & hardware */ + /* stacks */ + e2k_mem_crs_t crs; /* to startup & launch VCPU */ + e2k_cutd_t cutd; /* Compilation Unit table pointer */ +} guest_hw_stack_t; + +/* + * Guest kernel pt_regs structure to keep some info about guest state on host + * It needs to enable recursive traps, system calls, signal handlers + * (same as on host kernel) + */ +typedef enum gpt_regs_type { + undefined_regs_type, /* unknown or any type */ + start_regs_type, /* start activation of guest */ + trap_regs_type, /* beacause of trap */ + syscall_regs_type, /* system call */ + hypercall_regs_type, /* hypercall */ + sig_handler_regs_type, /* to run signal handler */ +} gpt_regs_type_t; + +typedef struct gpt_regs { + gpt_regs_type_t type; /* the reason to have been created */ + /* this instance of structure */ + + /* follow fields to save last guest and host activation state */ + /* to enable recursive kernel activations */ + int g_stk_frame_no; /* number of saved guest kernel */ + /* data stack activation (see below) */ + e2k_addr_t g_usd_size; /* guest kernel data stack size */ + int k_stk_frame_no; /* number of saved host kernel */ + /* data stack activation */ + e2k_addr_t k_usd_size; /* host kernel data stack size */ + e2k_size_t pcsp_ind; /* chain procedure stack index */ + struct pt_regs *pt_regs; /* head of pt_regs structure queue: */ + /* pointer to current pt_regs */ + + /* all structures type of this gpt_regs (same as pt_regs structures) */ + /* allocated into host kernel data stack because of recursive events */ + /* (traps, system calls, hypercalls, signale handler) handled by */ + /* kernel on one stack using stack discipline */ + /* So it can use index into this stack as reference to previous or */ + /* next same structure instead of real pointer (address) to avoid */ + /* complex addresses translation while copy/clone stacks (fork()) */ + e2k_addr_t next_index; /* index of next same structure, */ + /* into host kernel data stack */ + /* which keeps state of previous */ + /* guest kernel activation */ +} gpt_regs_t; + +typedef struct vcpu_l_gregs { + local_gregs_t gregs; /* guest user "local" global */ + /* registers to save updated on page */ + /* fault values */ + u64 updated; /* mask of updated registers "local" */ + /* global registers (see above) */ + bool valid; /* gregs is valid */ + int gener; /* generation of valid gregs */ +} vcpu_l_gregs_t; + +typedef struct kvm_signal_context { + /* signal stack area is used to store interrupted context */ + /* one for host thread and one for each guest threads */ + struct signal_stack stack; + atomic_t traps_num; /* counter of recursive traps at the */ + /* signal stack, actual only for guest */ + atomic_t in_work; /* counter of recursive traps in the */ + /* work, actual only for guest */ + atomic_t syscall_num; /* counter of recursive system calls at the */ + /* signal stack, actual only for guest */ + atomic_t in_syscall; /* counter of recursive system calls in the */ + /* work, actual only for guest */ +} kvm_signal_context_t; + +/* + * Guest kernel thread info structure + */ +typedef struct gthread_info { + gpid_t *gpid; + + /* guest kernel stacks pointers */ + e2k_usd_lo_t g_usd_lo; + e2k_usd_hi_t g_usd_hi; + e2k_sbr_t g_sbr; + e2k_psp_lo_t g_psp_lo; + e2k_psp_hi_t g_psp_hi; + e2k_pcsp_lo_t g_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi; + + e2k_size_t us_size; /* guest local data size: */ + /* kernel stack for kernel thread, */ + /* user stack for user */ + guest_hw_stack_t stack_regs; /* state of pointers to local & */ + /* hardware stacks of guest */ + + data_stack_t data_stack; + void *stack; + hw_stack_t hw_stacks; + vcpu_l_gregs_t l_gregs; /* guest user "local" global */ + /* registers to save updated on page */ + /* fault values */ + kernel_gregs_t gk_gregs; /* guest kernel global resiters state */ + /* some registers can be updated only */ + /* after migration to other VCPU */ + kernel_gregs_t gu_gregs; /* guest user global resiters state */ + /* only for global registers which */ + /* used by the guest kernel for its */ + /* own purposes */ + + /* the following flags to mark event: */ + /* hardware stacks bounds trap occured, but 'sge' on guest */ + /* is disabled and the trap is not yet passed to guest kernel */ + bool proc_bounds_waiting; /* procedure stack bounds */ + bool chain_bounds_waiting; /* chain stack bounds */ + + int g_stk_frame_no; /* current number of guest kernel */ + /* data stack activation */ + /* each kernel activation from user */ + /* (trap, system call, signal */ + /* handler) runs on kernel data stack */ + /* Activations can be nested, */ + /* so for each new nested activation */ + /* need new free frame of data stack */ + /* above (for data stack below) */ + /* previous activation */ + int k_stk_frame_no; /* current number of host kernel */ + /* data stack activation */ + + unsigned long flags; /* flags (see below) */ + struct gmm_struct *gmm; /* pointer to current guest mm agent */ + /* on host */ + /* NULL for guest kernel threads */ + hpa_t nonp_root_hpa; /* physical base of nonpaging root PT */ + bool gmm_in_release; /* guest mm is releasing (exit_mm()) */ + + /* following fields should be updated for each multi-stack process */ + /* (task, thread) while switching */ + struct kvm_vcpu *vcpu; /* pointer to VCPU where the thread */ + /* is running now or was run last */ + /* time */ + struct pt_regs fork_regs; /* pt_regs structure of last host */ + /* sys_fork() system call */ + u32 intr_counter; /* number of traps are handling */ + atomic_t signal_counter; /* number of signals are handling */ + struct sw_regs sw_regs; /* user special registers state */ + /* to save/restore while guest */ + /* process switch */ + kvm_signal_context_t signal; /* the host kernel's signal/trap */ + /* stack of contexts */ + + /* follow pointer should be updated by each recursive traps, */ + /* system calls, signal handler running */ + gpt_regs_t *gpt_regs; /* pointer to current state of */ + /* guest kernel */ + + e2k_upsr_t u_upsr; /* guest user is different from */ + e2k_upsr_t k_upsr; /* guest kernel UPSR */ + + /* FIXME: follow fields is added only to debug correct save/restore */ + /* guest UPSR state, delete after debugging completion with some */ + /* debug statements into other source files of kvm */ + bool u_upsr_valid; /* guest user upsr value is valid */ + bool k_upsr_valid; /* guest kernel upsr value is valid */ + + global_regs_t gregs; /* structure to keep guest user */ + /* global registers state */ + /* FIXME: this structure duplicates */ + /* similar structure into sw_regs, */ + /* should use structure into sw_regs */ + /* only */ + bool gregs_active; /* gregs structure above is active */ + /* for user global registers */ + bool gregs_valid; /* gregs structure above contains */ + /* actual global registers state */ + bool gregs_for_currents_valid; /* gregs used for currents pointers */ + /* contain actual global registers */ + /* state, when real global registers */ + /* is set to currents pointers */ + bool task_is_binco; /* the guest task is binary */ + /* application compiler */ + bool task_is_protect; /* the guest task is running at */ + /* protected mode */ +} gthread_info_t; + +/* guest stacks flag (field at structure above) */ + +/* + * Guest thread flags + */ +#define GTIF_VCPU_START_THREAD 0 /* this thread is booting (start) */ + /* guest kernel thread */ +#define GTIF_KERNEL_THREAD 1 /* this thread is guest kernel */ + /* thread */ +#define GTIF_THREAD_MIGRATED 2 /* the thread was migrated from one */ + /* VCPU to other */ +#define GTIF_USER_THREAD 4 /* the process is user thread on */ + /* common virtual memory (gmm) */ +#define GTIF_HW_PS_LOCKED 16 /* hardware procedure stack */ + /* was locked by host */ +#define GTIF_HW_PCS_LOCKED 17 /* hardware chain stack */ + /* was locked by host */ +#define GTIF_HW_PS_PRESENTED 18 /* hardware procedure stack */ + /* was populated by host */ +#define GTIF_HW_PCS_PRESENTED 19 /* hardware chain stack */ + /* was populated by host */ +#define GTIF_DATA_STACK_HOSTED 20 /* local data stack of guest */ + /* was registered on host */ +#define GTIF_HW_PS_HOSTED 21 /* hardware procedure stack of guest */ + /* was registered on host */ +#define GTIF_HW_PCS_HOSTED 22 /* hardware chain stack of guest */ + /* was registered on host */ +#define GTIF_HW_PS_HAS_GUARD_PAGE 28 /* hardware procedure stack of guest */ + /* has extra guard page */ +#define GTIF_HW_PCS_HAS_GUARD_PAGE 29 /* hardware chain stack of guest */ + /* has extra guard page */ + +#define _GTIF_VCPU_START_THREAD (1UL << GTIF_VCPU_START_THREAD) +#define _GTIF_KERNEL_THREAD (1UL << GTIF_KERNEL_THREAD) +#define _GTIF_THREAD_MIGRATED (1UL << GTIF_THREAD_MIGRATED) +#define _GTIF_USER_THREAD (1UL << GTIF_USER_THREAD) +#define _GTIF_HW_PS_LOCKED (1UL << GTIF_HW_PS_LOCKED) +#define _GTIF_HW_PCS_LOCKED (1UL << GTIF_HW_PCS_LOCKED) +#define _GTIF_HW_PS_PRESENTED (1UL << GTIF_HW_PS_PRESENTED) +#define _GTIF_HW_PCS_PRESENTED (1UL << GTIF_HW_PCS_PRESENTED) +#define _GTIF_DATA_STACK_HOSTED (1UL << GTIF_DATA_STACK_HOSTED) +#define _GTIF_HW_PS_HOSTED (1UL << GTIF_HW_PS_HOSTED) +#define _GTIF_HW_PCS_HOSTED (1UL << GTIF_HW_PCS_HOSTED) +#define _GTIF_HW_PS_HAS_GUARD_PAGE (1UL << GTIF_HW_PS_HAS_GUARD_PAGE) +#define _GTIF_HW_PCS_HAS_GUARD_PAGE (1UL << GTIF_HW_PCS_HAS_GUARD_PAGE) + +#define GTIF_ALL_STACKS_MASK \ + (_GTIF_HW_PS_LOCKED | _GTIF_HW_PCS_LOCKED | \ + _GTIF_HW_PS_PRESENTED | _GTIF_HW_PCS_PRESENTED | \ + _GTIF_DATA_STACK_HOSTED | _GTIF_HW_PS_HOSTED | \ + _GTIF_HW_PCS_HOSTED) + +static inline void set_the_flag(unsigned long *flags, int the_flag) +{ + set_bit(the_flag, flags); +} + +static inline void clear_the_flag(unsigned long *flags, int the_flag) +{ + clear_bit(the_flag, flags); +} + +static inline int test_and_set_the_flag(unsigned long *flags, int the_flag) +{ + return test_and_set_bit(the_flag, flags); +} + +static inline int test_and_clear_the_flag(unsigned long *flags, int the_flag) +{ + return test_and_clear_bit(the_flag, flags); +} + +static inline int test_the_flag(unsigned long *flags, int the_flag) +{ + return test_bit(the_flag, flags); +} + +static inline void set_gti_thread_flag(gthread_info_t *gti, int flag) +{ + set_the_flag(>i->flags, flag); +} + +static inline void clear_gti_thread_flag(gthread_info_t *gti, int flag) +{ + clear_the_flag(>i->flags, flag); +} + +static inline int test_and_set_gti_thread_flag(gthread_info_t *gti, int flag) +{ + return test_and_set_the_flag(>i->flags, flag); +} + +static inline int test_and_clear_gti_thread_flag(gthread_info_t *gti, int flag) +{ + return test_and_clear_the_flag(>i->flags, flag); +} + +static inline int test_gti_thread_flag(gthread_info_t *gti, int flag) +{ + return test_the_flag(>i->flags, flag); +} + +/* + * Hardware stacks bounds control + */ +static inline bool test_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + gthread_info_t *gti = ti->gthread_info; + bool is_waiting = false; + + if (gti == NULL) + return false; + if (trap_mask & exc_proc_stack_bounds_mask) + is_waiting |= gti->proc_bounds_waiting; + if (trap_mask & exc_chain_stack_bounds_mask) + is_waiting |= gti->chain_bounds_waiting; + return is_waiting; +} +static inline bool test_guest_proc_bounds_waiting(thread_info_t *ti) +{ + return test_guest_hw_stack_bounds_waiting(ti, + exc_proc_stack_bounds_mask); +} +static inline bool test_guest_chain_bounds_waiting(thread_info_t *ti) +{ + return test_guest_hw_stack_bounds_waiting(ti, + exc_chain_stack_bounds_mask); +} +static inline void set_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + gthread_info_t *gti = ti->gthread_info; + + if (gti == NULL) + return; + if (trap_mask & exc_proc_stack_bounds_mask) { + gti->proc_bounds_waiting = true; + } else if (trap_mask & exc_chain_stack_bounds_mask) { + gti->chain_bounds_waiting = true; + } else { + BUG_ON(true); + } +} +static inline void set_guest_proc_bounds_waiting(thread_info_t *ti) +{ + set_guest_hw_stack_bounds_waiting(ti, exc_proc_stack_bounds_mask); +} +static inline void set_guest_chain_bounds_waiting(thread_info_t *ti) +{ + set_guest_hw_stack_bounds_waiting(ti, exc_chain_stack_bounds_mask); +} +static inline bool +test_and_set_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + bool waiting; + + waiting = test_guest_hw_stack_bounds_waiting(ti, trap_mask); + if (!waiting) + set_guest_hw_stack_bounds_waiting(ti, trap_mask); + return waiting; +} +static inline bool test_and_set_guest_proc_bounds_waiting(thread_info_t *ti) +{ + return test_and_set_guest_hw_stack_bounds_waiting(ti, + exc_proc_stack_bounds_mask); +} +static inline bool test_and_set_guest_chain_bounds_waiting(thread_info_t *ti) +{ + return test_and_set_guest_hw_stack_bounds_waiting(ti, + exc_chain_stack_bounds_mask); +} +static inline void clear_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + gthread_info_t *gti = ti->gthread_info; + + if (gti == NULL) + return; + if (trap_mask & exc_proc_stack_bounds_mask) { + gti->proc_bounds_waiting = false; + } else if (trap_mask & exc_chain_stack_bounds_mask) { + gti->chain_bounds_waiting = false; + } else { + BUG_ON(true); + } +} +static inline void clear_guest_proc_bounds_waiting(thread_info_t *ti) +{ + clear_guest_hw_stack_bounds_waiting(ti, exc_proc_stack_bounds_mask); +} +static inline void clear_guest_chain_bounds_waiting(thread_info_t *ti) +{ + clear_guest_hw_stack_bounds_waiting(ti, exc_chain_stack_bounds_mask); +} +static inline bool +test_and_clear_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + bool waiting; + + waiting = test_guest_hw_stack_bounds_waiting(ti, trap_mask); + if (waiting) + clear_guest_hw_stack_bounds_waiting(ti, trap_mask); + return waiting; +} +static inline bool test_and_clear_guest_proc_bounds_waiting(thread_info_t *ti) +{ + return test_and_clear_guest_hw_stack_bounds_waiting(ti, + exc_proc_stack_bounds_mask); +} +static inline bool test_and_clear_guest_chain_bounds_waiting(thread_info_t *ti) +{ + return test_and_clear_guest_hw_stack_bounds_waiting(ti, + exc_chain_stack_bounds_mask); +} + +/* + * Set of functions to manipulate with gpt_regs structures + */ + +#define ATOMIC_GET_HW_PCS_INDEX() \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + \ + ATOMIC_READ_HW_PCS_SIZES(pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_hi.PCSP_hi_ind + pcshtp_val; \ +}) + +static inline e2k_size_t +gpt_regs_addr_to_index(thread_info_t *ti, gpt_regs_t *gregs) +{ + e2k_addr_t bottom; + e2k_addr_t addr; + + if (gregs == NULL) + return -1UL; + bottom = (e2k_addr_t)thread_info_task(ti)->stack; + addr = (e2k_addr_t)gregs; + GTI_BUG_ON(addr < bottom || + addr + sizeof(*gregs) > bottom + KERNEL_C_STACK_SIZE); + return addr - bottom; +} +static inline gpt_regs_t * +gpt_regs_index_to_addr(thread_info_t *ti, e2k_size_t gregs_index) +{ + e2k_addr_t bottom; + + if (gregs_index == -1UL) + return NULL; + bottom = (e2k_addr_t)thread_info_task(ti)->stack; + GTI_BUG_ON(gregs_index < 0 || + gregs_index + sizeof(gpt_regs_t) > KERNEL_C_STACK_SIZE); + return (gpt_regs_t *)(bottom + gregs_index); +} +static inline gpt_regs_t * +get_gpt_regs(thread_info_t *ti) +{ + gthread_info_t *gti = ti->gthread_info; + + GTI_BUG_ON(gti == NULL); + return gti->gpt_regs; +} +static inline void +set_gpt_regs(thread_info_t *ti, gpt_regs_t *gregs) +{ + gthread_info_t *gti = ti->gthread_info; + + GTI_BUG_ON(gti == NULL); + gti->gpt_regs = gregs; +} +static inline void +add_gpt_regs(thread_info_t *ti, gpt_regs_t *new_gregs, gpt_regs_type_t type) +{ + gpt_regs_t *cur_gregs; + + cur_gregs = get_gpt_regs(ti); + new_gregs->type = type; + new_gregs->next_index = gpt_regs_addr_to_index(ti, cur_gregs); + new_gregs->pcsp_ind = ATOMIC_GET_HW_PCS_INDEX(); + set_gpt_regs(ti, new_gregs); +} +static inline gpt_regs_t * +delete_gpt_regs(thread_info_t *ti) +{ + gpt_regs_t *cur_gregs; + + cur_gregs = get_gpt_regs(ti); + GTI_BUG_ON(cur_gregs == NULL); + set_gpt_regs(ti, gpt_regs_index_to_addr(ti, cur_gregs->next_index)); + return cur_gregs; +} +static inline gpt_regs_t * +get_next_gpt_regs(thread_info_t *ti, gpt_regs_t *gregs) +{ + BUG_ON(gregs == NULL); + return gpt_regs_index_to_addr(ti, gregs->next_index); +} + +extern int kvm_pv_guest_thread_info_init(struct kvm *kvm); +extern void kvm_pv_guest_thread_info_destroy(struct kvm *kvm); +extern void kvm_pv_clear_guest_thread_info(gthread_info_t *gthread_info); + +#else /* !CONFIG_VIRTUALIZATION */ + +typedef struct guest_hw_stack { + /* empty structure */ +} guest_hw_stack_t; + +typedef struct kvm_signal_context { + /* empty structure */ +} kvm_signal_context_t; +typedef struct gthread_info { + /* empty structure */ +} gthread_info_t; + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* __E2K_KVM_THREAD_INFO_H */ diff --git a/arch/e2k/include/asm/kvm/threads.h b/arch/e2k/include/asm/kvm/threads.h new file mode 100644 index 000000000000..809d7d2e6f9c --- /dev/null +++ b/arch/e2k/include/asm/kvm/threads.h @@ -0,0 +1,23 @@ +#ifndef _ASM_E2K_THREADS_H +#define _ASM_E2K_THREADS_H + + +/* + * e2k arch-dependent limits for the nr of threads virtual CPUs + * virtual IRQs + */ + +/* KVM manage */ +#define KVM_VM_MAX_LIMIT 1024 /* max number of VM IDs at use */ + +/* VCPUs manage */ +#define KVM_MAX_VCPUS 64 + +/* VIRQ manage */ +#define KVM_VIRQ_VCPUS_BASE_NO 0x01000000 + +/* VIRQ VCPU: one for each Local APIC */ +/* one for erly timer interrupt */ +#define KVM_MAX_VIRQ_VCPUS (KVM_MAX_VCPUS + 1) + +#endif /* _ASM_E2K_THREADS_H */ diff --git a/arch/e2k/include/asm/kvm/tlbflush.h b/arch/e2k/include/asm/kvm/tlbflush.h new file mode 100644 index 000000000000..aa86001c0708 --- /dev/null +++ b/arch/e2k/include/asm/kvm/tlbflush.h @@ -0,0 +1,165 @@ +/* + * Guest MMU caches flushing on KVM host + * + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ +#ifndef _E2K_KVM_TLBFLUSH_H +#define _E2K_KVM_TLBFLUSH_H + +#include +#include + + +/* + * Guest VM support on host + * TLB flushing: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables + */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +static __always_inline bool +__flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt(void) +{ + return false; /* none any guests and guest addresses */ +} +#else /* CONFIG_VIRTUALIZATION */ +extern void kvm_flush_guest_tlb_mm(struct gmm_struct *gmm); +extern void kvm_flush_guest_tlb_page(struct gmm_struct *gmm, e2k_addr_t addr); +extern void kvm_flush_guest_tlb_range(struct gmm_struct *gmm, + e2k_addr_t start, e2k_addr_t end); +extern void kvm_flush_guest_tlb_pgtables(struct gmm_struct *gmm, + e2k_addr_t start, e2k_addr_t end); +extern void kvm_flush_guest_tlb_range_and_pgtables(struct gmm_struct *gmm, + e2k_addr_t start, e2k_addr_t end); + +/* + * Functions to flush guest CPU root PT on host should return boolean value: + * true if address or MM is from guest VM space and flushing was done + * false if address or MM is not from guest VM space or flushing cannot + * be done + */ +extern bool kvm_do_flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, + e2k_addr_t addr); +extern bool kvm_do_flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern bool kvm_do_flush_guest_cpu_root_pt_mm(struct mm_struct *mm); +extern bool kvm_do_flush_guest_cpu_root_pt(void); + +static inline bool +kvm_flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt_page(vma, addr); +} + +static inline bool +kvm_flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt_range(mm, start, end); +} + +static inline bool +kvm_flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt_mm(mm); +} + +static inline bool +kvm_flush_guest_cpu_root_pt(void) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt(); +} + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host/guest kernel */ +static inline bool +__flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + return kvm_flush_guest_cpu_root_pt_page(vma, addr); +} +static inline bool +__flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + return kvm_flush_guest_cpu_root_pt_range(mm, start, end); +} +static inline bool +__flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + return kvm_flush_guest_cpu_root_pt_mm(mm); +} +static inline bool +__flush_guest_cpu_root_pt(void) +{ + return kvm_flush_guest_cpu_root_pt(); +} + +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ +#endif /* ! CONFIG_VIRTUALIZATION */ +#endif /* _E2K_KVM_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/kvm/trace-defs.h b/arch/e2k/include/asm/kvm/trace-defs.h new file mode 100644 index 000000000000..492280b07381 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace-defs.h @@ -0,0 +1,23 @@ +#ifndef _E2K_KVM_TRACE_DEFS_H_ +#define _E2K_KVM_TRACE_DEFS_H_ + +#include + +#include +#include +#include + +static inline void +trace_kvm_get_va_translation(struct kvm_vcpu *vcpu, e2k_addr_t address, + pgdval_t *pgd, pudval_t *pud, pmdval_t *pmd, pteval_t *pte, int *pt_level) +{ + kvm_get_spt_translation(vcpu, address, pgd, pud, pmd, pte, pt_level); +} + +static inline unsigned long +trace_kvm_get_gva_to_hva(struct kvm_vcpu *vcpu, gva_t gva) +{ + return kvm_get_gva_to_hva(vcpu, gva); +} + +#endif /* _E2K_KVM_TRACE_DEFS_H_ */ diff --git a/arch/e2k/include/asm/kvm/trace-hw-stacks.h b/arch/e2k/include/asm/kvm/trace-hw-stacks.h new file mode 100644 index 000000000000..8c8db3f4fef4 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace-hw-stacks.h @@ -0,0 +1,367 @@ +#if !defined(_KVM_TRACE_COPY_HW_STACKS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _KVM_TRACE_COPY_HW_STACKS_H + +#include +#include + +#include +#include +#include +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM host + +#ifdef CONFIG_KVM_HOST_MODE + +TRACE_EVENT( + host_copy_hw_stack, + + TP_PROTO(void *dst, void *src, unsigned long size, bool is_chain), + + TP_ARGS(dst, src, size, is_chain), + + TP_STRUCT__entry( + __field_struct(struct kvm_vcpu *, vcpu ) + __field( void *, dst ) + __field( void *, src ) + __field( u64, size ) + __field( bool, is_chain ) + __field( pgdval_t, dst_pgd ) + __field( pudval_t, dst_pud ) + __field( pmdval_t, dst_pmd ) + __field( pteval_t, dst_pte ) + __field( int, dst_pt_level ) + __field( pgdval_t, hva_pgd ) + __field( pudval_t, hva_pud ) + __field( pmdval_t, hva_pmd ) + __field( pteval_t, hva_pte ) + __field( int, hva_pt_level ) + __field( unsigned long, hva ) + ), + + TP_fast_assign( + unsigned long hva; + + __entry->vcpu = current_thread_info()->vcpu; + __entry->dst = dst; + __entry->src = src; + __entry->size = size; + __entry->is_chain = is_chain; + + trace_kvm_get_va_translation(__entry->vcpu, (e2k_addr_t)dst, + &__entry->dst_pgd, &__entry->dst_pud, &__entry->dst_pmd, + &__entry->dst_pte, &__entry->dst_pt_level); + + hva = trace_kvm_get_gva_to_hva(__entry->vcpu, (gva_t)dst); + __entry->hva = hva; + + trace_get_va_translation(current->mm, (e2k_addr_t)hva, + &__entry->hva_pgd, &__entry->hva_pud, &__entry->hva_pmd, + &__entry->hva_pte, &__entry->hva_pt_level); + ), + + TP_printk("VCPU #%d copy %s stack kernel guest <- kernel host: dst %px " + "src %px size %llx\n" + " kernel guest dst GVA %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s\n" + " kernel host dst HVA %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s", + __entry->vcpu->vcpu_id, + (__entry->is_chain) ? "chain" : "procedure", + __entry->dst, + __entry->src, + __entry->size, + __entry->dst, + (__entry->dst_pt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->dst_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pgd, + __entry->dst_pt_level <= E2K_PGD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->dst_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pud, + __entry->dst_pt_level <= E2K_PUD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->dst_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pmd, + __entry->dst_pt_level <= E2K_PMD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->dst_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pte, + __entry->dst_pt_level <= E2K_PTE_LEVEL_NUM), + (void *)__entry->hva, + (__entry->hva_pt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->hva_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->hva_pgd, + __entry->hva_pt_level <= E2K_PGD_LEVEL_NUM), + (__entry->hva_pt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->hva_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->hva_pud, + __entry->hva_pt_level <= E2K_PUD_LEVEL_NUM), + (__entry->hva_pt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->hva_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->hva_pmd, + __entry->hva_pt_level <= E2K_PMD_LEVEL_NUM), + (__entry->hva_pt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->hva_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->hva_pte, + __entry->hva_pt_level <= E2K_PTE_LEVEL_NUM) + ) +); + +TRACE_EVENT( + host_proc_stack_frame, + + TP_PROTO(kernel_mem_ps_t *ps_base, kernel_mem_ps_t *ps_frame), + + TP_ARGS(ps_base, ps_frame), + + TP_STRUCT__entry( + __field( kernel_mem_ps_t *, ps_base ) + __field_struct( kernel_mem_ps_t, ps_frame ) + __field( pgprotval_t, dtlb_entry ) + ), + + TP_fast_assign( + __entry->ps_base = ps_base; + __entry->ps_frame = *ps_frame; + __entry->dtlb_entry = get_MMU_DTLB_ENTRY((e2k_addr_t)ps_base); + ), + + TP_printk(" %px (dtlb 0x%016lx) : 0x%016lx 0x%016lx", + __entry->ps_base, __entry->dtlb_entry, + __entry->ps_frame.word_lo, __entry->ps_frame.word_hi) +); + +TRACE_EVENT( + host_chain_stack_frame, + + TP_PROTO(e2k_mem_crs_t *pcs_base, e2k_mem_crs_t *pcs_frame), + + TP_ARGS(pcs_base, pcs_frame), + + TP_STRUCT__entry( + __field( e2k_mem_crs_t *, pcs_base ) + __field_struct( e2k_mem_crs_t, pcs_frame ) + __field( pgprotval_t, dtlb_entry ) + ), + + TP_fast_assign( + __entry->pcs_base = pcs_base; + __entry->pcs_frame = *pcs_frame; + __entry->dtlb_entry = get_MMU_DTLB_ENTRY((e2k_addr_t)pcs_base); + ), + + TP_printk(" %px (dtlb 0x%016lx) : 0x%016llx 0x%016llx " + "0x%016llx 0x%016llx", + __entry->pcs_base, __entry->dtlb_entry, + __entry->pcs_frame.cr0_lo.CR0_lo_half, + __entry->pcs_frame.cr0_hi.CR0_hi_half, + __entry->pcs_frame.cr1_lo.CR1_lo_half, + __entry->pcs_frame.cr1_hi.CR1_hi_half) +); + +TRACE_EVENT( + host_copy_hva_area, + + TP_PROTO(void *dst, void *src, unsigned long size), + + TP_ARGS(dst, src, size), + + TP_STRUCT__entry( + __field( void *, dst ) + __field( void *, src ) + __field( u64, size ) + __field( pgdval_t, dst_pgd ) + __field( pudval_t, dst_pud ) + __field( pmdval_t, dst_pmd ) + __field( pteval_t, dst_pte ) + __field( int, dst_pt_level ) + __field( pgdval_t, src_pgd ) + __field( pudval_t, src_pud ) + __field( pmdval_t, src_pmd ) + __field( pteval_t, src_pte ) + __field( int, src_pt_level ) + ), + + TP_fast_assign( + __entry->dst = dst; + __entry->src = src; + __entry->size = size; + + trace_get_va_translation(current->mm, (e2k_addr_t)dst, + &__entry->dst_pgd, &__entry->dst_pud, &__entry->dst_pmd, + &__entry->dst_pte, &__entry->dst_pt_level); + trace_get_va_translation(current->mm, (e2k_addr_t)src, + &__entry->src_pgd, &__entry->src_pud, &__entry->src_pmd, + &__entry->src_pte, &__entry->src_pt_level); + ), + + TP_printk("copy area user guest <- kernel guest: dst %px " + "src %px size %llx\n" + " kernel guest dst HVA %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s\n" + " kernel guest dst HVA %px : pgd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pud 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pmd 0x%016lx : %s\n" + " Access mode: %s%s\n" + " pte 0x%016lx : %s\n" + " Access mode: %s%s", + __entry->dst, + __entry->src, + __entry->size, + __entry->dst, + (__entry->dst_pt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->dst_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pgd, + __entry->dst_pt_level <= E2K_PGD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->dst_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pud, + __entry->dst_pt_level <= E2K_PUD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->dst_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pmd, + __entry->dst_pt_level <= E2K_PMD_LEVEL_NUM), + (__entry->dst_pt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->dst_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->dst_pte, + __entry->dst_pt_level <= E2K_PTE_LEVEL_NUM), + __entry->src, + (__entry->src_pt_level <= E2K_PGD_LEVEL_NUM) ? + __entry->src_pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pgd, + __entry->src_pt_level <= E2K_PGD_LEVEL_NUM), + (__entry->src_pt_level <= E2K_PUD_LEVEL_NUM) ? + __entry->src_pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pud, + __entry->src_pt_level <= E2K_PUD_LEVEL_NUM), + (__entry->src_pt_level <= E2K_PMD_LEVEL_NUM) ? + __entry->src_pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pmd, + __entry->src_pt_level <= E2K_PMD_LEVEL_NUM), + (__entry->src_pt_level <= E2K_PTE_LEVEL_NUM) ? + __entry->src_pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->src_pte, + __entry->src_pt_level <= E2K_PTE_LEVEL_NUM) + ) +); + +TRACE_EVENT( + host_hva_area_line, + + TP_PROTO(u64 *hva_base, u64 size), + + TP_ARGS(hva_base, size), + + TP_STRUCT__entry( + __field( u64 *, hva_base ) + __array( u64, hva_line, 4 ) + __field( u64, size ) + __field( pgprotval_t, dtlb_entry ) + ), + + TP_fast_assign( + __entry->hva_base = hva_base; + __entry->hva_line[0] = + (size >= 1 * sizeof(u64)) ? hva_base[0] : -1; + __entry->hva_line[1] = + (size >= 2 * sizeof(u64)) ? hva_base[1] : -1; + __entry->hva_line[2] = + (size >= 3 * sizeof(u64)) ? hva_base[2] : -1; + __entry->hva_line[3] = + (size >= 4 * sizeof(u64)) ? hva_base[3] : -1; + __entry->size = size; + __entry->dtlb_entry = get_MMU_DTLB_ENTRY((e2k_addr_t)hva_base); + ), + + TP_printk(" %px (dtlb 0x%016lx) : 0x%016llx 0x%016llx " + "0x%016llx 0x%016llx", + __entry->hva_base, __entry->dtlb_entry, + __entry->hva_line[0], + __entry->hva_line[1], + __entry->hva_line[2], + __entry->hva_line[3] + ) +); +#else /* !CONFIG_KVM_HOST_MODE */ + +static inline bool trace_host_copy_hw_stack_enabled(void) +{ + return false; +} +static inline void +trace_host_copy_hw_stack(void *dst, void *src, unsigned long size, bool is_chain) +{ +} + +static inline bool trace_host_proc_stack_frame_enabled(void) +{ + return false; +} +static inline void +trace_host_proc_stack_frame(kernel_mem_ps_t *ps_base, kernel_mem_ps_t *ps_frame) +{ +} + +static inline bool trace_host_chain_stack_frame_enabled(void) +{ + return false; +} +static inline void +trace_host_chain_stack_frame(e2k_mem_crs_t *pcs_base, e2k_mem_crs_t *pcs_frame) +{ +} + +static inline bool trace_host_copy_hva_area_enabled(void) +{ + return false; +} +static inline void +trace_host_copy_hva_area(void *dst, void *src, unsigned long size) +{ +} + +static inline bool trace_host_hva_area_line_enabled(void) +{ + return false; +} +static inline void +trace_host_hva_area_line(u64 *hva_base, u64 size) +{ +} + +#endif /* CONFIG_KVM_HOST_MODE */ + +#endif /* _KVM_TRACE_COPY_HW_STACKS_H */ + +#ifdef CONFIG_KVM_HOST_MODE +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace-hw-stacks + +/* This part must be outside protection */ +#include +#endif /* CONFIG_KVM_HOST_MODE */ diff --git a/arch/e2k/include/asm/kvm/trace_kvm.h b/arch/e2k/include/asm/kvm/trace_kvm.h new file mode 100644 index 000000000000..1f116caa60c7 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace_kvm.h @@ -0,0 +1,1247 @@ +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoint for guest mode entry. + */ +TRACE_EVENT(kvm_entry, + TP_PROTO(unsigned int vcpu_id), + TP_ARGS(vcpu_id), + + TP_STRUCT__entry( + __field( unsigned int, vcpu_id ) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + ), + + TP_printk("vcpu %u", __entry->vcpu_id) +); + +/* + * Tracepoint for hypercall. + */ +TRACE_EVENT(kvm_hypercall, + TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3), + TP_ARGS(nr, a0, a1, a2, a3), + + TP_STRUCT__entry( + __field( unsigned long, nr ) + __field( unsigned long, a0 ) + __field( unsigned long, a1 ) + __field( unsigned long, a2 ) + __field( unsigned long, a3 ) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->a0 = a0; + __entry->a1 = a1; + __entry->a2 = a2; + __entry->a3 = a3; + ), + + TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", + __entry->nr, __entry->a0, __entry->a1, __entry->a2, + __entry->a3) +); + +/* + * Tracepoint for PIO. + */ +TRACE_EVENT(kvm_pio, + TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, + unsigned int count), + TP_ARGS(rw, port, size, count), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, port ) + __field( unsigned int, size ) + __field( unsigned int, count ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->port = port; + __entry->size = size; + __entry->count = count; + ), + + TP_printk("pio_%s at 0x%x size %d count %d", + __entry->rw ? "write" : "read", + __entry->port, __entry->size, __entry->count) +); + +/* + * Tracepoint for cpuid. + */ +TRACE_EVENT(kvm_cpuid, + TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, + unsigned long rcx, unsigned long rdx), + TP_ARGS(function, rax, rbx, rcx, rdx), + + TP_STRUCT__entry( + __field( unsigned int, function ) + __field( unsigned long, rax ) + __field( unsigned long, rbx ) + __field( unsigned long, rcx ) + __field( unsigned long, rdx ) + ), + + TP_fast_assign( + __entry->function = function; + __entry->rax = rax; + __entry->rbx = rbx; + __entry->rcx = rcx; + __entry->rdx = rdx; + ), + + TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx", + __entry->function, __entry->rax, + __entry->rbx, __entry->rcx, __entry->rdx) +); + +#define AREG(x) { APIC_##x, "APIC_" #x } + +#define kvm_trace_symbol_apic \ + AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ + AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ + AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ + AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ + AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI) +/* + * Tracepoint for apic access. + */ +TRACE_EVENT(kvm_apic, + TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), + TP_ARGS(rw, reg, val), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, reg ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->reg = reg; + __entry->val = val; + ), + + TP_printk("apic_%s %s = 0x%x", + __entry->rw ? "write" : "read", + __print_symbolic(__entry->reg, kvm_trace_symbol_apic), + __entry->val) +); + +#define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) +#define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) + +#define CREG(x) { CEPIC_##x, "CEPIC_" #x } + +#define kvm_trace_symbol_cepic \ + CREG(CTRL), CREG(ID), CREG(CTRL2), CREG(DAT), CREG(DAT2), \ + CREG(EPIC_INT), CREG(EPIC_INT2), CREG(EPIC_INT3), CREG(CPR), \ + CREG(ESR), CREG(ESR2), CREG(EOI), CREG(CIR), CREG(GSTBASE_LO), \ + CREG(GSTBASE_HI), CREG(GSTID), CREG(PNMIRR), CREG(ICR), \ + CREG(ICR2), CREG(TIMER_LVTT), CREG(TIMER_INIT), CREG(TIMER_CUR),\ + CREG(TIMER_DIV), CREG(SVR), CREG(PNMIRR_MASK), CREG(VECT_INTA) \ + +#define kvm_trace_symbol_epic kvm_trace_symbol_cepic + +/* + * Tracepoint for 32/64 epic access + */ +TRACE_EVENT(kvm_epic, + TP_PROTO(unsigned int rw, unsigned int dword, unsigned int reg, + unsigned long val), + TP_ARGS(rw, dword, reg, val), + + TP_STRUCT__entry( + __field(unsigned int, rw) + __field(unsigned int, dword) + __field(unsigned int, reg) + __field(unsigned long, val) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->dword = dword; + __entry->reg = reg; + __entry->val = val; + ), + + TP_printk("epic_%s%s %s = 0x%lx", + __entry->rw ? "write" : "read", + __entry->dword ? "64" : "32", + __print_symbolic(__entry->reg, kvm_trace_symbol_epic), + __entry->val) +); + +#define trace_kvm_epic_read_w(reg, val) trace_kvm_epic(0, 0, reg, val) +#define trace_kvm_epic_write_w(reg, val) trace_kvm_epic(1, 0, reg, val) +#define trace_kvm_epic_read_d(reg, val) trace_kvm_epic(0, 1, reg, val) +#define trace_kvm_epic_write_d(reg, val) trace_kvm_epic(1, 1, reg, val) + +/* + * Tracepoint for kvm guest exit: + */ +TRACE_EVENT(kvm_exit, + TP_PROTO(unsigned int exit_reason, unsigned long guest_rip), + TP_ARGS(exit_reason, guest_rip), + + TP_STRUCT__entry( + __field( unsigned int, exit_reason ) + __field( unsigned long, guest_rip ) + ), + + TP_fast_assign( + __entry->exit_reason = exit_reason; + __entry->guest_rip = guest_rip; + ), + + TP_printk("reason %d rip 0x%lx", + __entry->exit_reason, + __entry->guest_rip) +); + +/* + * Tracepoint for kvm interrupt injection: + */ +TRACE_EVENT(kvm_inj_virq, + TP_PROTO(unsigned int irq), + TP_ARGS(irq), + + TP_STRUCT__entry( + __field( unsigned int, irq ) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("irq %u", __entry->irq) +); + +/* + * Tracepoint for page fault. + */ +TRACE_EVENT(kvm_page_fault, + TP_PROTO(unsigned long fault_address, unsigned int error_code), + TP_ARGS(fault_address, error_code), + + TP_STRUCT__entry( + __field( unsigned long, fault_address ) + __field( unsigned int, error_code ) + ), + + TP_fast_assign( + __entry->fault_address = fault_address; + __entry->error_code = error_code; + ), + + TP_printk("address %lx error_code %x", + __entry->fault_address, __entry->error_code) +); + +/* + * Tracepoint for guest MSR access. + */ +TRACE_EVENT(kvm_msr, + TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data), + TP_ARGS(rw, ecx, data), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, ecx ) + __field( unsigned long, data ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->ecx = ecx; + __entry->data = data; + ), + + TP_printk("msr_%s %x = 0x%lx", + __entry->rw ? "write" : "read", + __entry->ecx, __entry->data) +); + +#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data) +#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data) + +/* + * Tracepoint for guest CR access. + */ +TRACE_EVENT(kvm_cr, + TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), + TP_ARGS(rw, cr, val), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, cr ) + __field( unsigned long, val ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->cr = cr; + __entry->val = val; + ), + + TP_printk("cr_%s %x = 0x%lx", + __entry->rw ? "write" : "read", + __entry->cr, __entry->val) +); + +#define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) +#define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) + +TRACE_EVENT(kvm_pic_set_irq, + TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), + TP_ARGS(chip, pin, elcr, imr, coalesced), + + TP_STRUCT__entry( + __field( __u8, chip ) + __field( __u8, pin ) + __field( __u8, elcr ) + __field( __u8, imr ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->chip = chip; + __entry->pin = pin; + __entry->elcr = elcr; + __entry->imr = imr; + __entry->coalesced = coalesced; + ), + + TP_printk("chip %u pin %u (%s%s)%s", + __entry->chip, __entry->pin, + (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", + (__entry->imr & (1 << __entry->pin)) ? "|masked":"", + __entry->coalesced ? " (coalesced)" : "") +); + +#define kvm_apic_dst_shorthand \ + {0x0, "dst"}, \ + {0x1, "self"}, \ + {0x2, "all"}, \ + {0x3, "all-but-self"} + +#define kvm_deliver_mode \ + {0x0, "Fixed"}, \ + {0x1, "LowPrio"}, \ + {0x2, "SMI"}, \ + {0x3, "Res3"}, \ + {0x4, "NMI"}, \ + {0x5, "INIT"}, \ + {0x6, "SIPI"}, \ + {0x7, "ExtINT"} + +TRACE_EVENT(kvm_apic_ipi, + TP_PROTO(__u32 icr_low, __u32 dest_id), + TP_ARGS(icr_low, dest_id), + + TP_STRUCT__entry( + __field( __u32, icr_low ) + __field( __u32, dest_id ) + ), + + TP_fast_assign( + __entry->icr_low = icr_low; + __entry->dest_id = dest_id; + ), + + TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", + __entry->dest_id, (u8)__entry->icr_low, + __print_symbolic((__entry->icr_low >> 8 & 0x7), + kvm_deliver_mode), + (__entry->icr_low & (1<<11)) ? "logical" : "physical", + (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", + (__entry->icr_low & (1<<15)) ? "level" : "edge", + __print_symbolic((__entry->icr_low >> 18 & 0x3), + kvm_apic_dst_shorthand)) +); + +TRACE_EVENT(kvm_apic_accept_irq, + TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced), + TP_ARGS(apicid, dm, tm, vec, coalesced), + + TP_STRUCT__entry( + __field( __u32, apicid ) + __field( __u16, dm ) + __field( __u8, tm ) + __field( __u8, vec ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->apicid = apicid; + __entry->dm = dm; + __entry->tm = tm; + __entry->vec = vec; + __entry->coalesced = coalesced; + ), + + TP_printk("apicid %x vec %u (%s|%s)%s", + __entry->apicid, __entry->vec, + __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), + __entry->tm ? "level" : "edge", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_epic_ipi, + TP_PROTO(__u32 dest_id, __u32 vector), + TP_ARGS(dest_id, vector), + + TP_STRUCT__entry( + __field(__u32, dest_id) + __field(__u32, vector) + ), + + TP_fast_assign( + __entry->dest_id = dest_id; + __entry->vector = vector; + ), + + TP_printk("dst %u vec %u", + __entry->dest_id, __entry->vector) +); + +#define kvm_epic_deliver_mode \ + {0x0, "fixed(0)"}, \ + {0x1, "fixed(1)"}, \ + {0x2, "smi"}, \ + {0x3, "nm_special"}, \ + {0x4, "nmi"}, \ + {0x5, "init"}, \ + {0x6, "startup"}, \ + {0x7, "reserved"} + +TRACE_EVENT(kvm_ioepic_set_irq, + TP_PROTO(__u32 dst, __u32 vector, __u32 dlvm, bool level, bool mask, + int pin, int pin_status, bool coalesced), + TP_ARGS(dst, vector, dlvm, level, mask, pin, pin_status, coalesced), + + TP_STRUCT__entry( + __field( __u32, dst ) + __field( __u32, vector ) + __field( __u32, dlvm ) + __field( bool, level ) + __field( bool, mask ) + __field( int, pin ) + __field( int, pin_status ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->dst = dst; + __entry->vector = vector; + __entry->dlvm = dlvm; + __entry->level = level; + __entry->mask = mask; + __entry->pin = pin; + __entry->pin = pin_status; + __entry->coalesced = coalesced; + ), + + TP_printk("pin %u %s. dst %d vec 0x%x (%s|%s%s)%s", + __entry->pin, __entry->pin_status ? "set" : "unset", + __entry->dst, __entry->vector, + __print_symbolic(__entry->dlvm, kvm_epic_deliver_mode), + __entry->level ? "level" : "edge", + __entry->mask ? "|masked" : "", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_epic_accept_irq, + TP_PROTO(__u32 epicid, __u16 dm, __u8 tm, __u32 vec, + bool coalesced), + TP_ARGS(epicid, dm, tm, vec, coalesced), + + TP_STRUCT__entry( + __field(__u32, epicid) + __field(__u16, dm) + __field(__u8, tm) + __field(__u32, vec) + __field(bool, coalesced) + ), + + TP_fast_assign( + __entry->epicid = epicid; + __entry->dm = dm; + __entry->tm = tm; + __entry->vec = vec; + __entry->coalesced = coalesced; + ), + + TP_printk("epicid %x vec %u (%s|%s)%s", + __entry->epicid, __entry->vec, + __print_symbolic(__entry->dm, kvm_epic_deliver_mode), + __entry->tm ? "level" : "edge", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_epic_eoi, + TP_PROTO(__u32 vector), + TP_ARGS(vector), + + TP_STRUCT__entry( + __field(__u32, vector) + ), + + TP_fast_assign( + __entry->vector = vector; + ), + + TP_printk("vector %u", __entry->vector) +); + +/* + * Tracepoint for nested VMRUN + */ +TRACE_EVENT(kvm_nested_vmrun, + TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, + __u32 event_inj, bool npt), + TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( __u64, vmcb ) + __field( __u64, nested_rip ) + __field( __u32, int_ctl ) + __field( __u32, event_inj ) + __field( bool, npt ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->vmcb = vmcb; + __entry->nested_rip = nested_rip; + __entry->int_ctl = int_ctl; + __entry->event_inj = event_inj; + __entry->npt = npt; + ), + + TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx " + "int_ctl: 0x%08x event_inj: 0x%08x npt: %s\n", + __entry->rip, __entry->vmcb, __entry->nested_rip, + __entry->int_ctl, __entry->event_inj, + __entry->npt ? "on" : "off") +); + +/* + * Tracepoint for #VMEXIT while nested + */ +TRACE_EVENT(kvm_nested_vmexit, + TP_PROTO(__u64 rip, __u32 exit_code, + __u64 exit_info1, __u64 exit_info2, + __u32 exit_int_info, __u32 exit_int_info_err), + TP_ARGS(rip, exit_code, exit_info1, exit_info2, + exit_int_info, exit_int_info_err), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( __u32, exit_code ) + __field( __u64, exit_info1 ) + __field( __u64, exit_info2 ) + __field( __u32, exit_int_info ) + __field( __u32, exit_int_info_err ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->exit_code = exit_code; + __entry->exit_info1 = exit_info1; + __entry->exit_info2 = exit_info2; + __entry->exit_int_info = exit_int_info; + __entry->exit_int_info_err = exit_int_info_err; + ), + TP_printk("rip: 0x%016llx reason: %d ext_inf1: 0x%016llx " + "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n", + __entry->rip, + __entry->exit_code, + __entry->exit_info1, __entry->exit_info2, + __entry->exit_int_info, __entry->exit_int_info_err) +); + +/* + * Tracepoint for #VMEXIT reinjected to the guest + */ +TRACE_EVENT(kvm_nested_vmexit_inject, + TP_PROTO(__u32 exit_code, + __u64 exit_info1, __u64 exit_info2, + __u32 exit_int_info, __u32 exit_int_info_err), + TP_ARGS(exit_code, exit_info1, exit_info2, + exit_int_info, exit_int_info_err), + + TP_STRUCT__entry( + __field( __u32, exit_code ) + __field( __u64, exit_info1 ) + __field( __u64, exit_info2 ) + __field( __u32, exit_int_info ) + __field( __u32, exit_int_info_err ) + ), + + TP_fast_assign( + __entry->exit_code = exit_code; + __entry->exit_info1 = exit_info1; + __entry->exit_info2 = exit_info2; + __entry->exit_int_info = exit_int_info; + __entry->exit_int_info_err = exit_int_info_err; + ), + + TP_printk("reason: %d ext_inf1: 0x%016llx " + "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n", + __entry->exit_code, + __entry->exit_info1, __entry->exit_info2, + __entry->exit_int_info, __entry->exit_int_info_err) +); + +/* + * Tracepoint for nested #vmexit because of interrupt pending + */ +TRACE_EVENT(kvm_nested_intr_vmexit, + TP_PROTO(__u64 rip), + TP_ARGS(rip), + + TP_STRUCT__entry( + __field( __u64, rip ) + ), + + TP_fast_assign( + __entry->rip = rip + ), + + TP_printk("rip: 0x%016llx\n", __entry->rip) +); + +/* + * Tracepoint for nested #vmexit because of interrupt pending + */ +TRACE_EVENT(kvm_invlpga, + TP_PROTO(__u64 rip, int asid, u64 address), + TP_ARGS(rip, asid, address), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( int, asid ) + __field( __u64, address ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->asid = asid; + __entry->address = address; + ), + + TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx\n", + __entry->rip, __entry->asid, __entry->address) +); + +/* + * Tracepoint for nested #vmexit because of interrupt pending + */ +TRACE_EVENT(kvm_skinit, + TP_PROTO(__u64 rip, __u32 slb), + TP_ARGS(rip, slb), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( __u32, slb ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->slb = slb; + ), + + TP_printk("rip: 0x%016llx slb: 0x%08x\n", + __entry->rip, __entry->slb) +); + +#define TIRHI_EXC_MASK 0x00000fffffffffffULL +#define TIRHI_ALS_MASK 0x0003f00000000000ULL +#define TIRHI_ALS_SHIFT 44ULL +#define TIRHI_MOVA_MASK 0x00f0000000000000ULL +#define TIRHI_MOVA0_MASK 0x0010000000000000ULL +#define TIRHI_MOVA1_MASK 0x0020000000000000ULL +#define TIRHI_MOVA2_MASK 0x0040000000000000ULL +#define TIRHI_MOVA3_MASK 0x0080000000000000ULL + +#define E2K_TRACE_PRINT_TIR_HI(entry) \ + (entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK)) ? \ + __print_flags(entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK), "|", \ + { TIRHI_MOVA0_MASK, "mova0" }, \ + { TIRHI_MOVA1_MASK, "mova1" }, \ + { TIRHI_MOVA2_MASK, "mova2" }, \ + { TIRHI_MOVA3_MASK, "mova3" }, \ + { exc_illegal_opcode_mask, "illegal_opcode" }, \ + { exc_priv_action_mask, "priv_action" }, \ + { exc_fp_disabled_mask, "fp_disabled" }, \ + { exc_fp_stack_u_mask, "fp_stack_u" }, \ + { exc_d_interrupt_mask, "d_interrupt" }, \ + { exc_diag_ct_cond_mask, "diag_ct_cond" }, \ + { exc_diag_instr_addr_mask, "diag_instr_addr" }, \ + { exc_illegal_instr_addr_mask, "illegal_instr_addr" }, \ + { exc_instr_debug_mask, "instr_debug" }, \ + { exc_window_bounds_mask, "window_bounds" }, \ + { exc_user_stack_bounds_mask, "user_stack_bounds" }, \ + { exc_proc_stack_bounds_mask, "proc_stack_bounds" }, \ + { exc_chain_stack_bounds_mask, "chain_stack_bounds" }, \ + { exc_fp_stack_o_mask, "fp_stack_o" }, \ + { exc_diag_cond_mask, "diag_cond" }, \ + { exc_diag_operand_mask, "diag_operand" }, \ + { exc_illegal_operand_mask, "illegal_operand" }, \ + { exc_array_bounds_mask, "array_bounds" }, \ + { exc_access_rights_mask, "access_rights" }, \ + { exc_addr_not_aligned_mask, "addr_not_aligned" }, \ + { exc_instr_page_miss_mask, "instr_page_miss" }, \ + { exc_instr_page_prot_mask, "instr_page_prot" }, \ + { exc_ainstr_page_miss_mask, "ainstr_page_miss" }, \ + { exc_ainstr_page_prot_mask, "ainstr_page_prot" }, \ + { exc_last_wish_mask, "last_wish" }, \ + { exc_base_not_aligned_mask, "base_not_aligned" }, \ + { exc_software_trap_mask, "software_trap" }, \ + { exc_data_debug_mask, "data_debug" }, \ + { exc_data_page_mask, "data_page" }, \ + { exc_recovery_point_mask, "recovery_point" }, \ + { exc_interrupt_mask, "interrupt" }, \ + { exc_nm_interrupt_mask, "nm_interrupt" }, \ + { exc_div_mask, "div" }, \ + { exc_fp_mask, "fp" }, \ + { exc_mem_lock_mask, "mem_lock" }, \ + { exc_mem_lock_as_mask, "mem_lock_as" }, \ + { exc_mem_error_out_cpu_mask, "mem_error_out_cpu" }, \ + { exc_mem_error_MAU_mask, "mem_error_MAU" }, \ + { exc_mem_error_L2_mask, "mem_error_L2" }, \ + { exc_mem_error_L1_35_mask, "mem_error_L1_35" }, \ + { exc_mem_error_L1_02_mask, "mem_error_L1_02" }, \ + { exc_mem_error_ICACHE_mask, "mem_error_ICACHE" } \ + ) : "(none)" + +TRACE_EVENT( + intc_tir, + + TP_PROTO(u64 tir_lo, u64 tir_hi), + + TP_ARGS(tir_lo, tir_hi), + + TP_STRUCT__entry( + __field( u64, tir_lo ) + __field( u64, tir_hi ) + ), + + TP_fast_assign( + __entry->tir_lo = tir_lo; + __entry->tir_hi = tir_hi; + ), + + TP_printk("TIR%lld: ip 0x%llx, als 0x%llx\n" + " exceptions: %s\n" + , + __entry->tir_hi >> 56, + __entry->tir_lo & E2K_VA_MASK, + (__entry->tir_hi & TIRHI_ALS_MASK) >> TIRHI_ALS_SHIFT, + E2K_TRACE_PRINT_TIR_HI(__entry->tir_hi) + ) +); + +TRACE_EVENT( + intc_ctprs, + + TP_PROTO(u64 ctpr1, u64 ctpr1_hi, u64 ctpr2, u64 ctpr2_hi, u64 ctpr3, + u64 ctpr3_hi), + + TP_ARGS(ctpr1, ctpr1_hi, ctpr2, ctpr2_hi, ctpr3, ctpr3_hi), + + TP_STRUCT__entry( + __field( u64, ctpr1 ) + __field( u64, ctpr1_hi ) + __field( u64, ctpr2 ) + __field( u64, ctpr2_hi ) + __field( u64, ctpr3 ) + __field( u64, ctpr3_hi ) + ), + + TP_fast_assign( + __entry->ctpr1 = ctpr1; + __entry->ctpr1_hi = ctpr1_hi; + __entry->ctpr2 = ctpr2; + __entry->ctpr2_hi = ctpr2_hi; + __entry->ctpr3 = ctpr3; + __entry->ctpr3_hi = ctpr3_hi; + ), + + TP_printk("ctpr1 0x%llx, ctpr1_hi 0x%llx\n" + "ctpr2 0x%llx, ctpr2_hi 0x%llx\n" + "ctpr3 0x%llx, ctpr3_hi 0x%llx\n", + __entry->ctpr1, __entry->ctpr1_hi, + __entry->ctpr2, __entry->ctpr2_hi, + __entry->ctpr3, __entry->ctpr3_hi) +); + +TRACE_EVENT( + intc_aau, + + TP_PROTO(const e2k_aau_t *aau_ctxt, u64 lsr, u64 lsr1, u64 ilcr, u64 ilcr1), + + TP_ARGS(aau_ctxt, lsr, lsr1, ilcr, ilcr1), + + TP_STRUCT__entry( + __field( u32, aasr ) + __field( u64, lsr ) + __field( u64, lsr1 ) + __field( u64, ilcr ) + __field( u64, ilcr1 ) + __field( u32, aafstr ) + __field( u64, aaldm ) + __field( u64, aaldv ) + + /* Synchronous part */ + __array( u64, aastis, AASTIS_REGS_NUM); + __field( u32, aasti_tags ); + + /* Asynchronous part */ + __array( u64, aainds, AAINDS_REGS_NUM); + __field( u32, aaind_tags ); + __array( u64, aaincrs, AAINCRS_REGS_NUM); + __field( u32, aaincr_tags ); + __array( u64, aads, AADS_REGS_NUM * 2); + + /* Since iset v6 */ + __array( u64, aaldi, AALDIS_REGS_NUM); + + ), + + TP_fast_assign( + int i; + + __entry->aasr = AW(aau_ctxt->guest_aasr); + __entry->lsr = lsr; + __entry->lsr1 = lsr1; + __entry->ilcr = ilcr; + __entry->ilcr1 = ilcr1; + __entry->aafstr = aau_ctxt->aafstr; + __entry->aaldm = AW(aau_ctxt->aaldm); + __entry->aaldv = AW(aau_ctxt->aaldv); + + for (i = 0; i < AADS_REGS_NUM; i++) { + __entry->aads[2 * i] = AW(aau_ctxt->aads[i]).lo; + __entry->aads[2 * i + 1] = AW(aau_ctxt->aads[i]).hi; + } + + for (i = 0; i < AAINCRS_REGS_NUM; i++) + __entry->aaincrs[i] = aau_ctxt->aaincrs[i]; + + __entry->aaincr_tags = aau_ctxt->aaincr_tags; + + for (i = 0; i < AAINDS_REGS_NUM; i++) + __entry->aainds[i] = aau_ctxt->aainds[i]; + + __entry->aaind_tags = aau_ctxt->aaind_tags; + + for (i = 0; i < AASTIS_REGS_NUM; i++) + __entry->aastis[i] = aau_ctxt->aastis[i]; + + __entry->aasti_tags = aau_ctxt->aasti_tags; + + for (i = 0; i < AALDIS_REGS_NUM; i++) + __entry->aaldi[i] = aau_ctxt->aaldi[i]; + ), + + TP_printk("aasr 0x%x, lsr 0x%llx, lsr1 0x%llx, ilcr 0x%llx, ilcr1 0x%llx\n" + "aaldv 0x%llx, aaldm = 0x%llx\n" + "aads lo/hi 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "aaincrs 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "aaincr_tags 0x%x\n" + "aainds 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "aaind_tags 0x%x\n" + "aastis 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "aasti_tags 0x%x\n" + "aaldis 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n", + __entry->aasr, __entry->lsr, __entry->lsr1, + __entry->ilcr, __entry->ilcr1, + __entry->aaldv, __entry->aaldm, + __entry->aads[0], __entry->aads[1], + __entry->aads[2], __entry->aads[3], + __entry->aads[4], __entry->aads[5], + __entry->aads[6], __entry->aads[7], + __entry->aads[8], __entry->aads[9], + __entry->aads[10], __entry->aads[11], + __entry->aads[12], __entry->aads[13], + __entry->aads[14], __entry->aads[15], + __entry->aads[16], __entry->aads[17], + __entry->aads[18], __entry->aads[19], + __entry->aads[20], __entry->aads[21], + __entry->aads[22], __entry->aads[23], + __entry->aads[24], __entry->aads[25], + __entry->aads[26], __entry->aads[27], + __entry->aads[28], __entry->aads[29], + __entry->aads[30], __entry->aads[31], + __entry->aads[32], __entry->aads[33], + __entry->aads[34], __entry->aads[35], + __entry->aads[36], __entry->aads[37], + __entry->aads[38], __entry->aads[39], + __entry->aads[40], __entry->aads[41], + __entry->aads[42], __entry->aads[43], + __entry->aads[44], __entry->aads[45], + __entry->aads[46], __entry->aads[47], + __entry->aads[48], __entry->aads[49], + __entry->aads[50], __entry->aads[51], + __entry->aads[52], __entry->aads[53], + __entry->aads[54], __entry->aads[55], + __entry->aads[56], __entry->aads[57], + __entry->aads[58], __entry->aads[59], + __entry->aads[60], __entry->aads[61], + __entry->aads[62], __entry->aads[63], + __entry->aaincrs[0], __entry->aaincrs[1], + __entry->aaincrs[2], __entry->aaincrs[3], + __entry->aaincrs[4], __entry->aaincrs[5], + __entry->aaincrs[6], __entry->aaincrs[7], + __entry->aaincr_tags, + __entry->aainds[0], __entry->aainds[1], + __entry->aainds[2], __entry->aainds[3], + __entry->aainds[4], __entry->aainds[5], + __entry->aainds[6], __entry->aainds[7], + __entry->aainds[8], __entry->aainds[9], + __entry->aainds[10], __entry->aainds[11], + __entry->aainds[12], __entry->aainds[13], + __entry->aainds[14], __entry->aainds[15], + __entry->aaind_tags, + __entry->aastis[0], __entry->aastis[1], + __entry->aastis[2], __entry->aastis[3], + __entry->aastis[4], __entry->aastis[5], + __entry->aastis[6], __entry->aastis[7], + __entry->aastis[8], __entry->aastis[9], + __entry->aastis[10], __entry->aastis[11], + __entry->aastis[12], __entry->aastis[13], + __entry->aastis[14], __entry->aastis[15], + __entry->aasti_tags, + __entry->aaldi[0], __entry->aaldi[1], + __entry->aaldi[2], __entry->aaldi[3], + __entry->aaldi[4], __entry->aaldi[5], + __entry->aaldi[6], __entry->aaldi[7], + __entry->aaldi[8], __entry->aaldi[9], + __entry->aaldi[10], __entry->aaldi[11], + __entry->aaldi[12], __entry->aaldi[13], + __entry->aaldi[14], __entry->aaldi[15], + __entry->aaldi[16], __entry->aaldi[17], + __entry->aaldi[18], __entry->aaldi[19], + __entry->aaldi[20], __entry->aaldi[21], + __entry->aaldi[22], __entry->aaldi[23], + __entry->aaldi[24], __entry->aaldi[25], + __entry->aaldi[26], __entry->aaldi[27], + __entry->aaldi[28], __entry->aaldi[29], + __entry->aaldi[30], __entry->aaldi[31]) +); + +TRACE_EVENT( + generic_hcall, + + TP_PROTO(unsigned long hcall_num, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + unsigned long arg6, unsigned long gsbr, + unsigned long cpu), + + TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, gsbr, cpu), + + TP_STRUCT__entry( + __field( u64, hcall_num ) + __field( u64, arg1 ) + __field( u64, arg2 ) + __field( u64, arg3 ) + __field( u64, arg4 ) + __field( u64, arg5 ) + __field( u64, arg6 ) + __field( u64, gsbr ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->hcall_num = hcall_num; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + __entry->arg4 = arg4; + __entry->arg5 = arg5; + __entry->arg6 = arg6; + __entry->gsbr = gsbr; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, generic hypercall %llu\n" + "Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx; gsbr: 0x%llx\n" + , + __entry->cpu, + __entry->hcall_num, + __entry->arg1, + __entry->arg2, + __entry->arg3, + __entry->arg4, + __entry->arg5, + __entry->arg6, + __entry->gsbr) +); + +TRACE_EVENT( + light_hcall, + + TP_PROTO(unsigned long hcall_num, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + unsigned long arg6, unsigned long cpu), + + TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, cpu), + + TP_STRUCT__entry( + __field( u64, hcall_num ) + __field( u64, arg1 ) + __field( u64, arg2 ) + __field( u64, arg3 ) + __field( u64, arg4 ) + __field( u64, arg5 ) + __field( u64, arg6 ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->hcall_num = hcall_num; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + __entry->arg4 = arg4; + __entry->arg5 = arg5; + __entry->arg6 = arg6; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, light hypercall %llu\n" + "Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + , + __entry->cpu, + __entry->hcall_num, + __entry->arg1, + __entry->arg2, + __entry->arg3, + __entry->arg4, + __entry->arg5, + __entry->arg6) +); + +TRACE_EVENT( + generic_hcall_exit, + + TP_PROTO(unsigned long ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( u64, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("Generic hypercall exit: %llu\n", __entry->ret) +); + +TRACE_EVENT( + light_hcall_exit, + + TP_PROTO(unsigned long ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( u64, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("Light hypercall exit: %llu\n", __entry->ret) +); + +TRACE_EVENT( + guest_switch_to, + + TP_PROTO(struct kvm_vcpu *vcpu, int gpid_from, int gmmid_from, + int gpid_to, int gmmid_to, struct sw_regs *next_gsw), + + TP_ARGS(vcpu, gpid_from, gmmid_from, gpid_to, gmmid_to, next_gsw), + + TP_STRUCT__entry( + __field( int, vcpu_id ) + __field( int, gpid_from ) + __field( int, gmmid_from ) + __field( int, gpid_to ) + __field( int, gmmid_to ) + __field( e2k_addr_t, top ) + __field_struct( e2k_usd_lo_t, usd_lo ) + __field_struct( e2k_usd_hi_t, usd_hi ) + __field_struct( e2k_psp_lo_t, psp_lo ) + __field_struct( e2k_psp_hi_t, psp_hi ) + __field_struct( e2k_pcsp_lo_t, pcsp_lo ) + __field_struct( e2k_pcsp_hi_t, pcsp_hi ) + __field( pgprotval_t, u_pptb ) + __field( gva_t, u_vptb ) + __field( hpa_t, root ) + __field( u64, mmu_pptb ) + __field( u64, mmu_pid ) + __field( u64, ctxt_pid ) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->gpid_from = gpid_from; + __entry->gmmid_from = gmmid_from; + __entry->gpid_to = gpid_to; + __entry->gmmid_to = gmmid_to; + __entry->top = next_gsw->top; + __entry->usd_lo = next_gsw->usd_lo; + __entry->usd_hi = next_gsw->usd_hi; + __entry->psp_lo = next_gsw->psp_lo; + __entry->psp_hi = next_gsw->psp_hi; + __entry->pcsp_lo = next_gsw->pcsp_lo; + __entry->pcsp_hi = next_gsw->pcsp_hi; + __entry->u_pptb = vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + __entry->u_vptb = vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + __entry->root = kvm_get_space_type_spt_u_root(vcpu); + __entry->mmu_pptb = get_mmu_u_pptb_reg(); + __entry->mmu_pid = get_mmu_pid_reg(); + __entry->ctxt_pid = kvm_get_guest_mmu_pid(vcpu); + ), + + TP_printk("VCPU #%d: switch from gpid #%d gmm #%d to gpid #%d gmm #%d\n" + " USD: base 0x%llx size 0x%x top at 0x%lx\n" + " PSP: base 0x%llx ind 0x%x size 0x%x\n" + " PCSP: base 0x%llx ind 0x%x size 0x%x\n" + " MMU: u_pptb 0x%lx u_vptb 0x%lx sh_pptb 0x%llx\n" + " mmu_upptb 0x%llx mmu_pid 0x%llx ctxt pid 0x%llx", + __entry->vcpu_id, + __entry->gpid_from, __entry->gmmid_from, + __entry->gpid_to, __entry->gmmid_to, + __entry->usd_lo.USD_lo_base, + __entry->usd_hi.USD_hi_size, + __entry->top, + __entry->psp_lo.PSP_lo_base, + __entry->psp_hi.PSP_hi_ind, + __entry->psp_hi.PSP_hi_size, + __entry->pcsp_lo.PCSP_lo_base, + __entry->pcsp_hi.PCSP_hi_ind, + __entry->pcsp_hi.PCSP_hi_size, + __entry->u_pptb, __entry->u_vptb, __entry->root, + __entry->mmu_pptb, __entry->mmu_pid, __entry->ctxt_pid + ) +); + +TRACE_EVENT( + vcpu_put, + + TP_PROTO(int vcpu, int cpu), + + TP_ARGS(vcpu, cpu), + + TP_STRUCT__entry( + __field( int, vcpu ) + __field( int, cpu ) + ), + + TP_fast_assign( + __entry->vcpu = vcpu; + __entry->cpu = cpu; + ), + + TP_printk("vcpu %d, cpu %d\n", __entry->vcpu, __entry->cpu) +); + +TRACE_EVENT( + vcpu_load, + + TP_PROTO(int vcpu, int last_cpu, int cpu), + + TP_ARGS(vcpu, last_cpu, cpu), + + TP_STRUCT__entry( + __field( int, vcpu ) + __field( int, last_cpu ) + __field( int, cpu ) + ), + + TP_fast_assign( + __entry->vcpu = vcpu; + __entry->last_cpu = last_cpu; + __entry->cpu = cpu; + ), + + TP_printk("vcpu %d, cpu %d, last_cpu %d\n", __entry->vcpu, __entry->cpu, + __entry->last_cpu) +); + +TRACE_EVENT( + kvm_handle_rmap_range, + + TP_PROTO(u64 hva_start, u64 hva_end, u64 gpa_start, u64 gpa_end, + void *handler), + + TP_ARGS(hva_start, hva_end, gpa_start, gpa_end, handler), + + TP_STRUCT__entry( + __field( u64, hva_start ) + __field( u64, hva_end ) + __field( u64, gpa_start ) + __field( u64, gpa_end ) + __field( void *, handler ) + ), + + TP_fast_assign( + __entry->hva_start = hva_start; + __entry->hva_end = hva_end; + __entry->gpa_start = gpa_start; + __entry->gpa_end = gpa_end; + __entry->handler = handler; + ), + + TP_printk("HVA 0x%llx - 0x%llx; GPA 0x%llx - 0x%llx; handler 0x%px\n", + __entry->hva_start, __entry->hva_end, + __entry->gpa_start, __entry->gpa_end, + __entry->handler) +); + +#endif /* _TRACE_KVM_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_kvm + +/* This part must be outside protection */ +#include diff --git a/arch/e2k/include/asm/kvm/trace_kvm_hv.h b/arch/e2k/include/asm/kvm/trace_kvm_hv.h new file mode 100644 index 000000000000..0a3de33b5978 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace_kvm_hv.h @@ -0,0 +1,525 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm_hv + +#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_HV_H + +#include +#include + +#define E2K_TRACE_PRINT_CU_HDR_LO(entry) \ + __print_flags(entry, "|", \ + { intc_cu_hdr_lo_rr_idr_mask, "rr_idr" }, \ + { intc_cu_hdr_lo_rr_clkr_mask, "rr_clkr" }, \ + { intc_cu_hdr_lo_rr_sclkr_mask, "rr_sclkr" }, \ + { intc_cu_hdr_lo_rr_dbg_mask, "rr_dbg" }, \ + { intc_cu_hdr_lo_rw_core_mode_mask, "rw_core_mode" }, \ + { intc_cu_hdr_lo_rw_clkr_mask, "rw_clkr" }, \ + { intc_cu_hdr_lo_rw_sclkr_mask, "rw_sclkr" }, \ + { intc_cu_hdr_lo_rw_sclkm3_mask, "rw_sclkm3" }, \ + { intc_cu_hdr_lo_rw_dbg_mask, "rw_dbg" }, \ + { intc_cu_hdr_lo_hcem_mask, "hcem" }, \ + { intc_cu_hdr_lo_virt_mask, "virt" }, \ + { intc_cu_hdr_lo_stop_mask, "stop" }, \ + { intc_cu_hdr_lo_hret_last_wish_mask, "hret_last_wish" }, \ + { intc_cu_hdr_lo_exc_instr_debug_mask, "exc_instr_debug" }, \ + { intc_cu_hdr_lo_exc_data_debug_mask, "exc_data_debug" }, \ + { intc_cu_hdr_lo_exc_instr_page_mask, "exc_instr_page" }, \ + { intc_cu_hdr_lo_exc_data_page_mask, "exc_data_page" }, \ + { intc_cu_hdr_lo_exc_mova_mask, "exc_mova" }, \ + { intc_cu_hdr_lo_exc_interrupt_mask, "exc_interrupt" }, \ + { intc_cu_hdr_lo_exc_nm_interrupt_mask, "exc_nm_interrupt" }, \ + { intc_cu_hdr_lo_hv_int_mask, "hv_int" }, \ + { intc_cu_hdr_lo_hv_nm_int_mask, "hv_nm_int" }, \ + { intc_cu_hdr_lo_g_tmr_mask, "g_tmr" }, \ + { intc_cu_hdr_lo_rr_mask, "rr" }, \ + { intc_cu_hdr_lo_rw_mask, "rw" }, \ + { intc_cu_hdr_lo_exc_mem_error_mask, "exc_mem_error" }, \ + { intc_cu_hdr_lo_wait_trap_mask, "wait_trap" }, \ + { intc_cu_hdr_lo_dbg_mask, "dbg" }, \ + { intc_cu_hdr_lo_tir_fz_mask, "tir_fz" }) + +#define E2K_TRACE_PRINT_CU_INFO_LO(entry) \ + __print_symbolic(intc_cu_info_lo_get_event_code(entry), \ + { ICE_FORCED, "FORCED" }, \ + { ICE_READ_CU, "READ_CU" }, \ + { ICE_WRITE_CU, "WRITE_CU" }, \ + { ICE_MASKED_HCALL, "MASKED_HCALL" }) + +#define E2K_TRACE_PRINT_MU_INFO_HDR(entry) \ + __print_symbolic(intc_mu_info_lo_get_event_code(entry), \ + { IME_FORCED, "FORCED" }, \ + { IME_FORCED_GVA, "FORCED_GVA" }, \ + { IME_SHADOW_DATA, "SHADOW_DATA" }, \ + { IME_GPA_DATA, "GPA_DATA" }, \ + { IME_GPA_INSTR, "GPA_INSTR" }, \ + { IME_GPA_AINSTR, "GPA_AINSTR" }, \ + { IME_MAS_IOADDR, "MAS_IOADDR" }, \ + { IME_READ_MU, "READ_MU" }, \ + { IME_WRITE_MU, "WRITE_MU" }, \ + { IME_CACHE_FLUSH, "CACHE_FLUSH" }, \ + { IME_CACHE_LINE_FLUSH, "CACHE_LINE_FLUSH" }, \ + { IME_ICACHE_FLUSH, "ICACHE_FLUSH" }, \ + { IME_ICACHE_LINE_FLUSH_USER, "ICACHE_LINE_FLUSH_USER" }, \ + { IME_ICACHE_LINE_FLUSH_SYSTEM, "ICACHE_LINE_FLUSH_SYSTEM" }, \ + { IME_TLB_FLUSH, "TLB_FLUSH" }, \ + { IME_TLB_PAGE_FLUSH_LAST, "TLB_PAGE_FLUSH_LAST" }, \ + { IME_TLB_PAGE_FLUSH_UPPER, "TLB_PAGE_FLUSH_UPPER" }, \ + { IME_TLB_ENTRY_PROBE, "TLB_ENTRY_PROBE" }) + +#define E2K_PRINT_INTC_CU_ENTRY(__entry, i) \ + (__entry->cu_num > i) ? \ + E2K_TRACE_PRINT_CU_INFO_LO(__entry->cu[2 * i]) : "(none)", \ + (__entry->cu_num > i) ? __entry->cu[2 * i] : 0ULL, \ + (__entry->cu_num > i) ? __entry->cu[2 * i + 1] : 0ULL + +#define E2K_PRINT_INTC_MU_ENTRY(__entry, mu_num, i) \ + (mu_num > i) ? \ + E2K_TRACE_PRINT_MU_INFO_HDR(__entry->mu[7 * i]) : "(none)", \ + (mu_num > i) ? __entry->mu[7 * i] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 1] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 2] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 3] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 4] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 5] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 6] : 0ULL + + +TRACE_EVENT( + intc, + + TP_PROTO(const struct kvm_intc_cpu_context *intc_ctxt, + u64 guest_ip, u64 cpu), + + TP_ARGS(intc_ctxt, guest_ip, cpu), + + TP_STRUCT__entry( + __field( int, cu_num ) + __field( int, mu_num ) + __field( u64, cu_hdr_lo ) + __array( u64, cu, INTC_INFO_CU_ENTRY_MAX ) + __array( u64, mu, INTC_INFO_MU_MAX ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->cu_num = intc_ctxt->cu_num; + __entry->mu_num = intc_ctxt->mu_num; + + if (__entry->cu_num >= 0) + __entry->cu_hdr_lo = AW(intc_ctxt->cu.header.lo); + + if (__entry->cu_num > 0) { + int i; + for (i = 0; i < __entry->cu_num; i++) { + __entry->cu[2 * i] = + AW(intc_ctxt->cu.entry[i].lo); + __entry->cu[2 * i + 1] = + intc_ctxt->cu.entry[i].hi; + } + } + + if (__entry->mu_num > 0) { + int i; + for (i = 0; i < __entry->mu_num; i++) { + __entry->mu[7 * i] = + AW(intc_ctxt->mu[i].hdr); + __entry->mu[7 * i + 1] = + intc_ctxt->mu[i].gpa; + __entry->mu[7 * i + 2] = + intc_ctxt->mu[i].gva; + __entry->mu[7 * i + 3] = + intc_ctxt->mu[i].data; + __entry->mu[7 * i + 4] = + AW(intc_ctxt->mu[i].condition); + __entry->mu[7 * i + 5] = + intc_ctxt->mu[i].data_ext; + __entry->mu[7 * i + 6] = + AW(intc_ctxt->mu[i].mask); + } + } + + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP 0x%llx, cu_num %d, mu_num %d\n" + "CU header: %s (0x%llx)\n" + "CU entry0: %s (0x%llx 0x%llx)\n" + "CU entry1: %s (0x%llx 0x%llx)\n" + "MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry1: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry2: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry3: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry4: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry5: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry6: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry7: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry8: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry9: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry10: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + , + __entry->cpu, __entry->guest_ip, + __entry->cu_num, __entry->mu_num, + (__entry->cu_num >= 0) ? + E2K_TRACE_PRINT_CU_HDR_LO(__entry->cu_hdr_lo) : "(none)", + (__entry->cu_num >= 0) ? __entry->cu_hdr_lo : 0, + E2K_PRINT_INTC_CU_ENTRY(__entry, 0), + E2K_PRINT_INTC_CU_ENTRY(__entry, 1), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 0), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 1), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 2), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 3), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 4), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 5), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 6), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 7), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 8), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 9), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 10)) +); + +TRACE_EVENT( + single_mu_intc, + + TP_PROTO(const intc_info_mu_t *mu, u64 guest_ip, u64 cpu), + + TP_ARGS(mu, guest_ip, cpu), + + TP_STRUCT__entry( + __array( u64, mu, INTC_INFO_MU_ITEM_SIZE ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->mu[0] = AW(mu[0].hdr); + __entry->mu[1] = mu[0].gpa; + __entry->mu[2] = mu[0].gva; + __entry->mu[3] = mu[0].data; + __entry->mu[4] = AW(mu[0].condition); + __entry->mu[5] = mu[0].data_ext; + __entry->mu[6] = AW(mu[0].mask); + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP: 0x%llx\n" + "MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + __entry->cpu, __entry->guest_ip, + E2K_PRINT_INTC_MU_ENTRY(__entry, 1, 0)) +); + +TRACE_EVENT( + double_mu_intc, + + TP_PROTO(const intc_info_mu_t *mu, u64 guest_ip, u64 cpu), + + TP_ARGS(mu, guest_ip, cpu), + + TP_STRUCT__entry( + __array( u64, mu, 2 * INTC_INFO_MU_ITEM_SIZE ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + int i; + for (i = 0; i < 2; i++) { + __entry->mu[7 * i] = + AW(mu[i].hdr); + __entry->mu[7 * i + 1] = + mu[i].gpa; + __entry->mu[7 * i + 2] = + mu[i].gva; + __entry->mu[7 * i + 3] = + mu[i].data; + __entry->mu[7 * i + 4] = + AW(mu[i].condition); + __entry->mu[7 * i + 5] = + mu[i].data_ext; + __entry->mu[7 * i + 6] = + AW(mu[i].mask); + } + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP: 0x%llx\n" + "MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry1: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + __entry->cpu, __entry->guest_ip, + E2K_PRINT_INTC_MU_ENTRY(__entry, 2, 0), + E2K_PRINT_INTC_MU_ENTRY(__entry, 2, 1)) +); + +TRACE_EVENT( + single_cu_intc, + + TP_PROTO(const intc_info_cu_hdr_t cu_hdr, u64 guest_ip, u64 cpu), + + TP_ARGS(cu_hdr, guest_ip, cpu), + + TP_STRUCT__entry( + __field( u64, cu_hdr_lo ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->cu_hdr_lo = AW(cu_hdr.lo); + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP: 0x%llx\n" + "CU header: %s (0x%llx)\n", + __entry->cpu, __entry->guest_ip, + E2K_TRACE_PRINT_CU_HDR_LO(__entry->cu_hdr_lo), + __entry->cu_hdr_lo) + +); + +TRACE_EVENT( + intc_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("Intercept exit %s(%d)\n", + (__entry->ret) ? "to QEMU " : "", + __entry->ret) +); + +TRACE_EVENT( + irq_delivery, + + TP_PROTO(unsigned int vector, unsigned int dlvm, int vcpu, + bool dam_active), + + TP_ARGS(vector, dlvm, vcpu, dam_active), + + TP_STRUCT__entry( + __field( u32, vector ) + __field( u32, dlvm ) + __field( int, vcpu ) + __field( bool, dam_active ) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->dlvm = dlvm; + __entry->vcpu = vcpu; + __entry->dam_active = dam_active; + ), + + TP_printk("to vcpu %d via %s, vector 0x%x, dlvm %d\n", __entry->vcpu, + __entry->dam_active ? "icr" : "pmirr", + __entry->vector, __entry->dlvm) +); + +TRACE_EVENT( + save_pmirr, + + TP_PROTO(u32 pmirr, u64 val), + + TP_ARGS(pmirr, val), + + TP_STRUCT__entry( + __field( u32, pmirr ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->pmirr = pmirr; + __entry->val = val; + ), + + TP_printk("pmirr#%d val 0x%llx\n", __entry->pmirr, __entry->val) +); + +TRACE_EVENT( + restore_pmirr, + + TP_PROTO(u32 pmirr, u64 val), + + TP_ARGS(pmirr, val), + + TP_STRUCT__entry( + __field( u32, pmirr ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->pmirr = pmirr; + __entry->val = val; + ), + + TP_printk("pmirr#%d val 0x%llx\n", __entry->pmirr, __entry->val) +); + +TRACE_EVENT( + save_pnmirr, + + TP_PROTO(u32 val), + + TP_ARGS(val), + + TP_STRUCT__entry( + __field( u32, val ) + ), + + TP_fast_assign( + __entry->val = val; + ), + + TP_printk("pnmirr val 0x%x\n", __entry->val) +); + +TRACE_EVENT( + restore_pnmirr, + + TP_PROTO(u32 val), + + TP_ARGS(val), + + TP_STRUCT__entry( + __field( u32, val ) + ), + + TP_fast_assign( + __entry->val = val; + ), + + TP_printk("pnmirr val 0x%x\n", __entry->val) +); + +TRACE_EVENT( + save_cir, + + TP_PROTO(u32 cir), + + TP_ARGS(cir), + + TP_STRUCT__entry( + __field( u32, cir ) + ), + + TP_fast_assign( + __entry->cir = cir; + ), + + TP_printk("cir 0x%x\n", __entry->cir) +); + +TRACE_EVENT( + restore_cir, + + TP_PROTO(u32 cir), + + TP_ARGS(cir), + + TP_STRUCT__entry( + __field( u32, cir ) + ), + + TP_fast_assign( + __entry->cir = cir; + ), + + TP_printk("cir 0x%x\n", __entry->cir) +); + +TRACE_EVENT( + complete_intc_info_io_read, + + TP_PROTO(unsigned long gpa, unsigned long data), + + TP_ARGS(gpa, data), + + TP_STRUCT__entry( + __field( unsigned long, gpa ) + __field( unsigned long, data ) + ), + + TP_fast_assign( + __entry->gpa = gpa; + __entry->data = data; + ), + + TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data) +); + +TRACE_EVENT( + complete_intc_info_io_write, + + TP_PROTO(unsigned long gpa, unsigned long data), + + TP_ARGS(gpa, data), + + TP_STRUCT__entry( + __field( unsigned long, gpa ) + __field( unsigned long, data ) + ), + + TP_fast_assign( + __entry->gpa = gpa; + __entry->data = data; + ), + + TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data) +); + +TRACE_EVENT( + intc_clw, + + TP_PROTO(bool us_cl_d, unsigned long us_cl_b, unsigned long us_cl_up, + unsigned long us_cl_m0, unsigned long us_cl_m1, + unsigned long us_cl_m2, unsigned long us_cl_m3), + + TP_ARGS(us_cl_d, us_cl_b, us_cl_up, us_cl_m0, us_cl_m1, us_cl_m2, us_cl_m3), + + TP_STRUCT__entry( + __field( bool, us_cl_d ) + __field( unsigned long, us_cl_b ) + __field( unsigned long, us_cl_up ) + __field( unsigned long, us_cl_m0 ) + __field( unsigned long, us_cl_m1 ) + __field( unsigned long, us_cl_m2 ) + __field( unsigned long, us_cl_m3 ) + ), + + TP_fast_assign( + __entry->us_cl_d = us_cl_d; + __entry->us_cl_b = us_cl_b; + __entry->us_cl_up = us_cl_up; + __entry->us_cl_m0 = us_cl_m0; + __entry->us_cl_m1 = us_cl_m1; + __entry->us_cl_m2 = us_cl_m2; + __entry->us_cl_m3 = us_cl_m3; + ), + + TP_printk("us_cl_d %d, us_cl_b 0x%lx, us_cl_up 0x%lx\n" + "us_cl_m0 0x%lx us_cl_m1 0x%lx us_cl_m2 0x%lx, us_cl_m3 0x%lx\n", + __entry->us_cl_d, __entry->us_cl_b, __entry->us_cl_up, + __entry->us_cl_m0, __entry->us_cl_m1, __entry->us_cl_m2, __entry->us_cl_m3) +); + +#endif /* _TRACE_KVM_HV_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_kvm_hv +#include diff --git a/arch/e2k/include/asm/kvm/trace_kvm_pv.h b/arch/e2k/include/asm/kvm/trace_kvm_pv.h new file mode 100644 index 000000000000..44da4592d3e7 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace_kvm_pv.h @@ -0,0 +1,207 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm_pv + +#if !defined(_TRACE_KVM_PV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_PV_H + +#include +#include +#include +#include +#include + +#define E2K_TC_TYPE_STORE (1ULL << 17) +#define E2K_TC_TYPE_S_F (1ULL << 19) +#define E2K_TC_TYPE_ROOT (1ULL << 27) +#define E2K_TC_TYPE_SCAL (1ULL << 28) +#define E2K_TC_TYPE_SRU (1ULL << 29) +#define E2K_TC_TYPE_SPEC (1ULL << 30) +#define E2K_TC_TYPE_PM (1ULL << 31) +#define E2K_TC_TYPE_NUM_ALIGN (1ULL << 50) +#define E2K_TC_TYPE_EMPT (1ULL << 51) +#define E2K_TC_TYPE_CLW (1ULL << 52) + +#define E2K_TC_TYPE (E2K_TC_TYPE_STORE | E2K_TC_TYPE_S_F | E2K_TC_TYPE_ROOT | \ + E2K_TC_TYPE_SCAL | E2K_TC_TYPE_SRU | E2K_TC_TYPE_SPEC | \ + E2K_TC_TYPE_PM | E2K_TC_TYPE_NUM_ALIGN | \ + E2K_TC_TYPE_EMPT | E2K_TC_TYPE_CLW) + +#define E2K_FAULT_TYPE_GLOBAL_SP (1ULL << 0) +#define E2K_FAULT_TYPE_PAGE_BOUND (1ULL << 1) +#define E2K_FAULT_TYPE_EXC_MEM_LOCK (1ULL << 2) +#define E2K_FAULT_TYPE_PH_PR_PAGE (1ULL << 3) +#define E2K_FAULT_TYPE_IO_PAGE (1ULL << 4) +#define E2K_FAULT_TYPE_ISYS_PAGE (1ULL << 5) +#define E2K_FAULT_TYPE_PROT_PAGE (1ULL << 6) +#define E2K_FAULT_TYPE_PRIV_PAGE (1ULL << 7) +#define E2K_FAULT_TYPE_ILLEGAL_PAGE (1ULL << 8) +#define E2K_FAULT_TYPE_NWRITE_PAGE (1ULL << 9) +#define E2K_FAULT_TYPE_PAGE_MISS (1ULL << 10) +#define E2K_FAULT_TYPE_PH_BOUND (1ULL << 11) +#define E2K_FAULT_TYPE_INTL_RES_BITS (1ULL << 12) + +TRACE_EVENT( + intc_trap_cellar, + + TP_PROTO(const trap_cellar_t *tc, int nr), + + TP_ARGS(tc, nr), + + TP_STRUCT__entry( + __field( int, nr ) + __field( u64, address ) + __field( u64, data_val ) + __field( u64, data_ext_val ) + __field( u8, data_tag ) + __field( u8, data_ext_tag ) + __field( u64, condition ) + __field( u64, mask ) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->address = tc->address; + load_value_and_tagd(&tc->data, + &__entry->data_val, &__entry->data_tag); + load_value_and_tagd(&tc->data_ext, + &__entry->data_ext_val, &__entry->data_ext_tag); + __entry->condition = AW(tc->condition); + __entry->mask = AW(tc->mask); + ), + + TP_printk("\n" + "Entry %d: address 0x%llx data %hhx 0x%llx data_ext %hhx 0x%llx\n" + "Register: address=0x%02hhx, vl=%d, vr=%d\n" + "Opcode: fmt=%d, n_prot=%d, fmtc=%d\n" + "Info1: chan=%d, mas=0x%02hhx, miss_lvl=%d, rcv=%d, dst_rcv=0x%03x\n" + "Info2: %s\n" + "Ftype: %s" + , + __entry->nr, __entry->address, __entry->data_tag, + __entry->data_val, __entry->data_ext_tag, __entry->data_ext_val, + AS((tc_cond_t) __entry->condition).address, + AS((tc_cond_t) __entry->condition).vl, + AS((tc_cond_t) __entry->condition).vr, + AS((tc_cond_t) __entry->condition).fmt, + AS((tc_cond_t) __entry->condition).npsp, + AS((tc_cond_t) __entry->condition).fmtc, + AS((tc_cond_t) __entry->condition).chan, + AS((tc_cond_t) __entry->condition).mas, + AS((tc_cond_t) __entry->condition).miss_lvl, + AS((tc_cond_t) __entry->condition).rcv, + AS((tc_cond_t) __entry->condition).dst_rcv, + __print_flags(__entry->condition & E2K_TC_TYPE, "|", + { E2K_TC_TYPE_STORE, "store" }, + { E2K_TC_TYPE_S_F, "s_f" }, + { E2K_TC_TYPE_ROOT, "root" }, + { E2K_TC_TYPE_SCAL, "scal" }, + { E2K_TC_TYPE_SRU, "sru" }, + { E2K_TC_TYPE_SPEC, "spec" }, + { E2K_TC_TYPE_PM, "pm" }, + { E2K_TC_TYPE_NUM_ALIGN, "num_align" }, + { E2K_TC_TYPE_EMPT, "empt" }, + { E2K_TC_TYPE_CLW, "clw" } + ), + __print_flags(AS((tc_cond_t) __entry->condition).fault_type, "|", + { E2K_FAULT_TYPE_GLOBAL_SP, "global_sp" }, + { E2K_FAULT_TYPE_PAGE_BOUND, "page_bound" }, + { E2K_FAULT_TYPE_EXC_MEM_LOCK, "exc_mem_lock" }, + { E2K_FAULT_TYPE_PH_PR_PAGE, "ph_pr_page" }, + { E2K_FAULT_TYPE_IO_PAGE, "io_page" }, + { E2K_FAULT_TYPE_ISYS_PAGE, "isys_page" }, + { E2K_FAULT_TYPE_PROT_PAGE, "prot_page" }, + { E2K_FAULT_TYPE_PRIV_PAGE, "priv_page" }, + { E2K_FAULT_TYPE_ILLEGAL_PAGE, "illegal_page" }, + { E2K_FAULT_TYPE_NWRITE_PAGE, "nwrite_page" }, + { E2K_FAULT_TYPE_PAGE_MISS, "page_miss" }, + { E2K_FAULT_TYPE_PH_BOUND, "ph_bound" }, + { E2K_FAULT_TYPE_INTL_RES_BITS, "intl_res_bits" } + )) +); + +#define kvm_trace_pv_symbol_inject_caller \ + { FROM_HOST_INJECT, "From host" }, \ + { FROM_PV_VCPU_TRAP_INJECT, "From VCPU trap" }, \ + { FROM_PV_VCPU_SYSCALL_INJECT, "From VCPU syscall" } + +TRACE_EVENT( + pv_injection, + + TP_PROTO(inject_caller_t from, const e2k_stacks_t *stacks, const e2k_mem_crs_t *crs, + int traps_num, int syscall_num), + + TP_ARGS(from, stacks, crs, traps_num, syscall_num), + + TP_STRUCT__entry( + __field( int, from ) + /* Stacks */ + __field( unsigned long, u_top ) + __field( u64, u_usd_lo ) + __field( u64, u_usd_hi ) + __field( unsigned long, top ) + __field( u64, usd_lo ) + __field( u64, usd_hi ) + __field( u64, psp_lo ) + __field( u64, psp_hi ) + __field( u64, pcsp_lo ) + __field( u64, pcsp_hi ) + __field( u64, pshtp ) + __field( unsigned int, pcshtp ) + /* CRs */ + __field( u64, cr0_lo ) + __field( u64, cr0_hi ) + __field( u64, cr1_lo ) + __field( u64, cr1_hi ) + /* Recursion level */ + __field( int, traps_num ) + __field( int, syscall_num ) + ), + + TP_fast_assign( + __entry->from = from; + __entry->u_top = stacks->u_top; + __entry->u_usd_lo = AW(stacks->u_usd_lo); + __entry->u_usd_hi = AW(stacks->u_usd_hi); + __entry->top = stacks->top; + __entry->usd_lo = AW(stacks->usd_lo); + __entry->usd_hi = AW(stacks->usd_hi); + __entry->psp_lo = AW(stacks->psp_lo); + __entry->psp_hi = AW(stacks->psp_hi); + __entry->pcsp_lo = AW(stacks->pcsp_lo); + __entry->pcsp_hi = AW(stacks->pcsp_hi); + __entry->pshtp = AW(stacks->pshtp); + __entry->pcshtp = stacks->pcshtp; + __entry->cr0_lo = AW(crs->cr0_lo); + __entry->cr0_hi = AW(crs->cr0_hi); + __entry->cr1_lo = AW(crs->cr1_lo); + __entry->cr1_hi = AW(crs->cr1_hi); + __entry->traps_num = traps_num; + __entry->syscall_num = syscall_num; + ), + + TP_printk("\n" + "%s. traps_num %d, syscall_num %d. Stacks:\n" + "u_top 0x%lx, u_usd_lo 0x%llx, u_usd_hi 0x%llx\n" + "top 0x%lx, usd_lo 0x%llx, usd_hi 0x%llx\n" + "psp_lo 0x%llx, psp_hi 0x%llx, pcsp_lo 0x%llx, pcsp_hi 0x%llx\n" + "pshtp 0x%llx, pcshtp 0x%x\n" + "cr0_lo 0x%llx, cr0_hi 0x%llx, cr1_lo 0x%llx, cr1_hi 0x%llx\n" + , + __print_symbolic(__entry->from, kvm_trace_pv_symbol_inject_caller), + __entry->traps_num, __entry->syscall_num, + __entry->u_top, __entry->u_usd_lo, __entry->u_usd_hi, + __entry->top, __entry->usd_lo, __entry->usd_hi, + __entry->psp_lo, __entry->psp_hi, __entry->pcsp_lo, __entry->pcsp_hi, + __entry->pshtp, __entry->pcshtp, + __entry->cr0_lo, __entry->cr0_hi, __entry->cr1_lo, __entry->cr1_hi) + +); + +#endif /* _TRACE_KVM_PV_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_kvm_pv +#include diff --git a/arch/e2k/include/asm/kvm/trap_table.S.h b/arch/e2k/include/asm/kvm/trap_table.S.h new file mode 100644 index 000000000000..1ccd5e4166ef --- /dev/null +++ b/arch/e2k/include/asm/kvm/trap_table.S.h @@ -0,0 +1,467 @@ +/* + * + * Copyright (C) 2015 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_KVM_TRAP_TABLE_ASM_H +#define _E2K_KVM_TRAP_TABLE_ASM_H + +#ifdef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +#include + +#if defined CONFIG_SMP +# define SMP_ONLY(...) __VA_ARGS__ +#else +# define SMP_ONLY(...) +#endif + +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +.macro DO_SWITCH_TO_KERNEL_IMAGE_PGD rti, predN, rtmp0, rtmp1, rtmp2 + /* do switch to host kernel image */ + /* thread_info_t *ti = rti */ + /* %pred_no is true to switch to kernel image pgd */ + /* if guest and host kernel images are loaded to equal */ + /* addresses, then switch from one to another must */ + /* flush all caches, including TLB */ + /* if (%pred_no) { */ + /* native_flush_TLB_all(); */ + /* native_flush_CACHE_all(); */ + /* E2K_WAIT_FLUSH; */ + /* } */ +{ + ldd [\rti + TI_KERNEL_IMAGE_PGD_P], \rtmp1 ? \predN; + ldd [\rti + TI_KERNEL_IMAGE_PGD], \rtmp2 ? \predN; + addd 0, 0, \rtmp0 ? \predN; +} +{ + std,2 \rtmp0, [_FLUSH_TLB_ALL_OP] MAS_TLB_FLUSH ? \predN; + std \rtmp2, [\rtmp1] ? \predN; +} +{ + std,2 \rtmp0, [_FLUSH_CACHE_L12_OP] MAS_CACHE_FLUSH ? \predN; +} + wait fl_c = 1; +.endm /* DO_SWITCH_TO_KERNEL_IMAGE_PGD */ + +.macro SWITCH_TO_KERNEL_IMAGE_PGD rti predN rtmp0, rtmp1, rtmp2 + /* switch to host kernel image */ + /* thread_info_t *ti = rti; */ + /* if (ti->flags & _TIF_PARAVIRT_GUEST) { */ + /* *ti->kernel_image_pgd_p = ti->kernel_image_pgd; */ + /* native_flush_TLB_all(); */ + /* native_flush_CACHE_all(); */ + /* E2K_WAIT_FLUSH; */ + /* } */ + /* rti: current_thread_info */ + ldd [\rti + TI_FLAGS], \rtmp0; + cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \predN; +{ + pass \predN, @p0; + landp ~@p0, ~@p0, @p4; + pass @p4, \predN; +} + DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \predN, \rtmp0, \rtmp1, \rtmp2 +.endm + +/* + * goto guest kernel system call table entry, if system call is from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1 ptmp2: temporary predicates + */ +.macro GOTO_GUEST_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* thread_info_t *ti = %dr7 */ + /* e2k_cr1_lo_t cr1_lo = NATIVE_READ_CR1_LO_REG(); */ + /* */ + /* if ((ti->flags & _TIF_VIRTUALIZED_GUEST)) && { */ + /* !(cr1_lo.CR1_lo_psr & PSR_PM)) { */ + /* if (ti->flags & _TIF_PARAVIRT_GUEST) { */ + /* DO_SWITCH_TO_KERNEL_IMAGE_PGD() */ + /* } */ + /* goto goto_guest_kernel_ttable_C( */ + /* sys_num << 32 | entry, */ + /* arg1, arg2, arg3, arg4, */ + /* arg5, arg6); */ + /* } */ + +.global goto_guest_kernel_ttable_C; +{ + rrd %cr1.lo, \rtmp1; + ldd [\rti + TI_FLAGS], \rtmp0; + sxt 2, %r0, %dr0; +} +{ + cmpandedb \rtmp1, CR1_LO_PSR_PM_MASK, \ptmp1; + cmpandedb \rtmp0, _TIF_VIRTUALIZED_GUEST, \ptmp0; + cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \ptmp2; +} +{ + pass \ptmp0, @p0; + pass \ptmp1, @p1; + landp ~@p0, @p1, @p4; + pass @p4, \ptmp0; + pass \ptmp2, @p2; + landp ~@p2, ~@p2, @p5; + pass @p5, \ptmp1; +} +{ + puttagd %dr1, 0, %dr1 ? \ptmp0; + puttagd %dr2, 0, %dr2 ? \ptmp0; + shld %dr0, 32, %dr0 ? \ptmp0; +} +{ + SMP_ONLY(shld,1 GCPUID_PREEMPT, 3, GCPUOFFSET ? \ptmp0) + puttagd %dr3, 0, %dr3 ? \ptmp0; + puttagd %dr4, 0, %dr4 ? \ptmp0; + ord %dr0, \entry_num, %dr0 ? \ptmp0; +} +{ + puttagd %dr5, 0, %dr5 ? \ptmp0; + puttagd %dr6, 0, %dr6 ? \ptmp0; +} + /* rti: current_thread_info */ + /* ptmp1 : predicate is true if need switch kernel image pgd */ + /* rtmp0, rtmp1, rtmp2: temporary registers */ + DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \ptmp1, \rtmp0, \rtmp1, \rtmp2 +{ + SMP_ONLY(ldd,2 [ __per_cpu_offset + GCPUOFFSET ], GCPUOFFSET ? \ptmp0) + puttagd %dr7, 0, %dr7 ? \ptmp0; + ibranch goto_guest_kernel_ttable_C ? \ptmp0; +} +#ifdef CONFIG_CPU_HWBUG_IBRANCH + {nop} {nop} +#endif +.endm /* GOTO_GUEST_KERNEL_TTABLE */ + +# ifdef CONFIG_PARAVIRT_GUEST +/* + * goto guest kernel system call table entry, if system call is from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1: temporary predicates + */ +.macro GOTO_PV_VCPU_KERNEL_TTABLE rti rtmp0 rtmp1 rtmp2 ptmp1 + /* thread_info_t *ti = %dr7 */ + /* e2k_cr1_lo_t cr1_lo = NATIVE_READ_CR1_LO_REG(); */ + /* */ + /* if (ti->flags & _TIF_PARAVIRT_GUEST) { */ + /* DO_SWITCH_TO_KERNEL_IMAGE_PGD() */ + /* } */ + +{ + ldd [\rti + TI_FLAGS], \rtmp0; + sxt 2, %r0, %dr0; +} +{ + cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \ptmp1; +} +{ + pass \ptmp1, @p2; + landp ~@p2, ~@p2, @p5; + pass @p5, \ptmp1; +} + /* rti: current_thread_info */ + /* ptmp1 : predicate is true if need switch kernel image pgd */ + /* rtmp0, rtmp1, rtmp2: temporary registers */ + DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \ptmp1, \rtmp0, \rtmp1, \rtmp2 +.endm /* GOTO_GUEST_KERNEL_TTABLE */ +# else +.macro GOTO_PV_VCPU_KERNEL_TTABLE rti rtmp0 rtmp1 rtmp2 ptmp1 +.endm +# endif /* CONFIG_PARAVIRT_GUEST */ + +/* + * goto guest kernel fast system call table entry, if system call is + * from guest user + * rtmpti: temporary register to read current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1 ptmp2: temporary predicates + */ +.macro GOTO_GUEST_KERNEL_FAST_TTABLE entry_num rtmpti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 +{ + setwd wsz = 8, nfx = 1; + nop 1; + rrd %osr0, \rtmpti; + ipd 2; +} + GOTO_GUEST_KERNEL_TTABLE \entry_num, \rtmpti, \rtmp0, \rtmp1, \rtmp2, \ + \ptmp0, \ptmp1, \ptmp2 +.endm /* GOTO_GUEST_KERNEL_FAST_TTABLE */ + +/* + * goto guest kernel protected system call table entry, if system call is + * from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1 ptmp2: temporary predicates + * FIXME: is not implemented + */ +.macro GOTO_GUEST_KERNEL_PROT_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + GOTO_GUEST_KERNEL_TTABLE \entry_num, \rti, \rtmp0, \rtmp1, \rtmp2, \ + \ptmp0, \ptmp1, \ptmp2 +.endm /* GOTO_GUEST_KERNEL_PROT_TTABLE */ + +#else /* ! CONFIG_KVM_HOST_MODE */ +.macro SWITCH_TO_KERNEL_IMAGE_PGD rti predN rtmp0, rtmp1, rtmp2 + /* not used */ +.endm /* SWITCH_TO_KERNEL_IMAGE_PGD */ + +.macro GOTO_GUEST_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_TTABLE */ +.macro GOTO_GUEST_KERNEL_FAST_TTABLE entry_num rtmpti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_FAST_TTABLE */ +.macro GOTO_GUEST_KERNEL_PROT_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_PROT_TTABLE */ +.macro GOTO_PV_VCPU_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 + /* not used */ +.endm /* GOTO_PV_VCPU_KERNEL_TTABLE */ +#endif /* CONFIG_KVM_HOST_MODE */ + +/* + * Save current state of pair of global registers with tags and extensions + * gpair_lo/gpair_hi is pair of adjacent global registers, lo is even + * and hi is odd (for example GCURTI/GCURTASK) + * kreg_lo, kreg_hi is pair of indexes of global registers into structure + * to save these k_gregs.g[kregd_lo/kreg_hi] + * rbase is register containing base address to save global + * registers pair values (for example glob_regs_t structure + * or thread_info_t thread_info->k_gregs/h_gregs) + * predSAVE conditional save on this predicate + * rtmp0/rtmp1 two temporary registers (for example %dr20, %dr21) + */ + +.macro SAVE_GREGS_PAIR_COND_V2 gpair_lo, gpair_hi, kreg_lo, kreg_hi, rbase, \ + predSAVE, rtmp0, rtmp1 +{ + strd,2 %dg\gpair_lo, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_lo * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; + strd,5 %dg\gpair_hi, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_hi * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; + movfi,1 %dg\gpair_lo, \rtmp0 ? \predSAVE; + movfi,4 %dg\gpair_hi, \rtmp1 ? \predSAVE; +} +{ + sth \rtmp0, [\rbase + (\kreg_lo * GLOB_REG_SIZE + \ + GLOB_REG_EXT)] ? \predSAVE; + sth \rtmp1, [\rbase + (\kreg_hi * GLOB_REG_SIZE + \ + GLOB_REG_EXT)] ? \predSAVE; +} +.endm /* SAVE_GREGS_PAIR_COND_V2 */ + +/* Bug 116851 - all strqp must be speculative if dealing with tags */ +.macro SAVE_GREGS_PAIR_COND_V5 gpair_lo, gpair_hi, kreg_lo, kreg_hi, rbase, \ + predSAVE +{ + strqp,2,sm %dg\gpair_lo, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_lo * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; + strqp,5,sm %dg\gpair_hi, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_hi * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; +} +.endm /* SAVE_GREGS_PAIR_COND_V5 */ + +.macro SAVE_GREG_UNEXT greg, kreg, rbase + strqp,sm \greg, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg * GLOB_REG_SIZE + \ + GLOB_REG_BASE)]; +.endm /* SAVE_GREG_UNEXT */ + +.macro SAVE_GREGS_PAIR_UNEXT greg1, greg2, kreg1, kreg2, rbase +{ + SAVE_GREG_UNEXT \greg1, kreg1, rbase + SAVE_GREG_UNEXT \greg2, kreg2, rbase +} +.endm /* SAVE_GREGS_PAIR_UNEXT */ + +.macro ASM_SET_KERNEL_GREGS_PAIR gpair_lo, gpair_hi, rval_lo, rval_hi +{ + addd \rval_lo, 0, %dg\gpair_lo; + addd \rval_hi, 0, %dg\gpair_hi; +} +.endm /* ASM_SET_CURRENTS_GREGS_PAIR */ + +.macro DO_ASM_SET_KERNEL_GREGS_PAIR gpair_lo, gpair_hi, rval_lo, rval_hi + ASM_SET_KERNEL_GREGS_PAIR \gpair_lo, \gpair_hi, \ + \rval_lo, \rval_hi +.endm /* DO_ASM_SET_KERNEL_GREGS_PAIR */ + +.macro SET_KERNEL_GREGS runused, rtask, rpercpu_off, rcpu + DO_ASM_SET_KERNEL_GREGS_PAIR \ + GUEST_VCPU_STATE_GREG, CURRENT_TASK_GREG, \ + \runused, \rtask + DO_ASM_SET_KERNEL_GREGS_PAIR \ + MY_CPU_OFFSET_GREG, SMP_CPU_ID_GREG, \ + \rpercpu_off, \rcpu +.endm /* SET_KERNEL_GREGS */ + +.macro ONLY_SET_KERNEL_GREGS runused, rtask, rpercpu_off, rcpu + SET_KERNEL_GREGS \runused, \rtask, \rpercpu_off, \rcpu +.endm /* ONLY_SET_KERNEL_GREGS */ + +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp, rtmp0, rtmp1 + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V2 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE, \ + \rtmp0, \rtmp1 +.endm /* DO_SAVE_HOST_GREGS_V2 */ + +.macro DO_SAVE_HOST_GREGS_V5 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V5 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE +.endm /* DO_SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + DO_SAVE_HOST_GREGS_V2 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, \rtmp0, \rtmp1 +.endm /* SAVE_HOST_GREGS_V2 */ + +.macro SAVE_HOST_GREGS_V5 drti, predSAVE, drtmp + DO_SAVE_HOST_GREGS_V5 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, +.endm /* SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + SAVE_HOST_GREGS_V2 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1 +.endm /* SAVE_HOST_GREGS_TO_VIRT_V2 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + SAVE_HOST_GREGS_V5 \drti, \predSAVE, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_V5 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + /* not used */ +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_KVM_HOST_MODE && ! CONFIG_KVM_GUEST_KERNEL */ +/* It is native host kernel without any virtualization */ +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + /* not used */ +.endm /* SAVE_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + /* not used */ +.endm /* SAVE_GREGS_TO_VIRT */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + /* not used */ +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#endif /* CONFIG_KVM_HOST_MODE */ + +/* + * same as C function above but as result should be set %pred28 + * argument sys_num_reg_no is assembler register number containing # of + * system call (for example %r0) + * %pred28 == 1 if system call is generic and doesn use pt_regs structure + */ +.macro IS_SYS_CALL_GENERIC sys_num_reg_no + cmpesb sys_num_reg_no, __NR_clone, %pred0; + cmpesb sys_num_reg_no, __NR_clone_thread, %pred1; + cmpesb sys_num_reg_no, __NR_fork, %pred2; + cmpesb sys_num_reg_no, __NR_vfork, %pred3; +{ + pass %pred0, @p0; + pass %pred1, @p1; + pass %pred2, @p2; + pass %pred3, @p3; + andp ~@p0, ~@p1, @p4; + andp ~@p2, ~@p3, @p5; + andp @p4, @p5, @p6; + pass @p6, %pred27; +} + cmpesb sys_num_reg_no, __NR_execve, %pred4; + cmpesb sys_num_reg_no, __NR_rt_sigreturn, %pred5; + cmpesb sys_num_reg_no, __NR_e2k_sigsetjmp, %pred6; + cmpesb sys_num_reg_no, __NR_e2k_longjmp, %pred7; +{ + pass %pred4, @p0; + pass %pred5, @p1; + pass %pred6, @p2; + pass %pred7, @p3; + andp ~@p0, ~@p1, @p4; + andp ~@p2, ~@p3, @p5; + andp @p4, @p5, @p6; + pass @p6, %pred30; +} + cmpesb sys_num_reg_no, __NR_e2k_longjmp2, %pred8; + cmpesb sys_num_reg_no, __NR_sigaltstack, %pred9; + cmpesb sys_num_reg_no, __NR_rt_sigaction, %pred10; + cmpesb sys_num_reg_no, __NR_rt_sigsuspend, %pred11; +{ + pass %pred8, @p0; + pass %pred9, @p1; + pass %pred10, @p2; + pass %pred11, @p3; + andp ~@p0, ~@p1, @p4; + andp ~@p2, ~@p3, @p5; + andp @p4, @p5, @p6; + pass @p6, %pred29; +} +{ + nop 2; + pass %pred27, @p0; + pass %pred30, @p1; + pass %pred29, @p2; + andp @p0, @p1, @p4; + andp @p2, @p4, @p5; + pass @p5, %pred28; +} + cmpesb sys_num_reg_no, __NR_ioctl, %pred13; + cmpesb sys_num_reg_no, __NR_ipc, %pred14; +{ + pass %pred13, @p0; + pass %pred14, @p1; + pass %pred28, @p2; + andp ~@p0, ~@p1, @p4; + andp @p2, @p4, @p5; + pass @p5, %pred28; +} +.endm /* IS_SYS_CALL_GENERIC */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_KVM_TRAP_TABLE_ASM_H */ diff --git a/arch/e2k/include/asm/kvm/trap_table.h b/arch/e2k/include/asm/kvm/trap_table.h new file mode 100644 index 000000000000..4a59da84f978 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trap_table.h @@ -0,0 +1,831 @@ +#ifndef __KVM_E2K_TRAP_TABLE_H +#define __KVM_E2K_TRAP_TABLE_H + +/* Does not include this header directly, include */ + +#ifndef __ASSEMBLY__ + +#include +#include + +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_GUEST_TRAPS_MODE +#undef DebugKVMGT +#define DEBUG_KVM_GUEST_TRAPS_MODE 0 /* KVM guest trap debugging */ +#define DebugKVMGT(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_TRAPS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE +#undef DebugKVMVGT +#define DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE 0 /* KVM verbose guest */ + /* trap debugging */ +#define DebugKVMVGT(fmt, args...) \ +({ \ + if (DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +# define TT_BUG_ON(cond) BUG_ON(cond) +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +# define TT_BUG_ON(cond) BUG_ON(cond) +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel or host kernel with virtualization support */ +# define TT_BUG_ON(cond) do { } while (0) +#endif /* CONFIG_PARAVIRT_GUEST */ + +/* structure of result of trap passing to guest functions */ +#define KVM_PASSED_TRAPS_MASK ((1ULL << (exc_max_num + 1)) - 1) +#define KVM_GUEST_KERNEL_ADDR_PF_BIT (exc_max_num + 1) +#define KVM_SHADOW_PT_PROT_PF_BIT (exc_max_num + 2) +#define KVM_PASS_RESULT_PF_BIT KVM_SHADOW_PT_PROT_PF_BIT +#define KVM_PASS_RESULT_PF_MASK \ + ((1ULL << (KVM_PASS_RESULT_PF_BIT + 1)) - 1) + +/* events to complete VCPU trap handling */ +#define KVM_PV_MMU_RESTORE_CONTEXT_PF_BIT (exc_max_num + 5) +#define KVM_SHADOW_NONP_PF_BIT (exc_max_num + 6) + +#define KVM_GUEST_KERNEL_ADDR_PF_MASK \ + (1ULL << KVM_GUEST_KERNEL_ADDR_PF_BIT) +#define KVM_SHADOW_PT_PROT_PF_MASK \ + (1ULL << KVM_SHADOW_PT_PROT_PF_BIT) +#define KVM_SHADOW_NONP_PF_MASK \ + (1ULL << KVM_SHADOW_NONP_PF_BIT) +#define KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK \ + (1ULL << KVM_PV_MMU_RESTORE_CONTEXT_PF_BIT) + +#define KVM_NOT_GUEST_TRAP_RESULT 0ULL +#define KVM_TRAP_IS_PASSED(trap_no) (1ULL << (trap_no)) +#define KVM_GUEST_KERNEL_ADDR_PF KVM_GUEST_KERNEL_ADDR_PF_MASK +#define KVM_SHADOW_PT_PROT_PF KVM_SHADOW_PT_PROT_PF_MASK +#define KVM_SHADOW_NONP_PF KVM_SHADOW_NONP_PF_MASK +#define KVM_NEED_COMPLETE_PF_MASK \ + (KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK) + +#define KVM_IS_ERROR_RESULT_PF(hret) ((long)(hret) < 0) +#define KVM_GET_PASS_RESULT_PF(hret) ((hret) & KVM_PASS_RESULT_PF_MASK) +#define KVM_IS_NOT_GUEST_TRAP(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) == KVM_NOT_GUEST_TRAP_RESULT) +#define KVM_GET_PASSED_TRAPS(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) & KVM_PASSED_TRAPS_MASK) +#define KVM_IS_TRAP_PASSED(hret) (KVM_GET_PASSED_TRAPS(hret) != 0) +#define KVM_IS_GUEST_KERNEL_ADDR_PF(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) == KVM_GUEST_KERNEL_ADDR_PF) +#define KVM_IS_SHADOW_PT_PROT_PF(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) == KVM_SHADOW_PT_PROT_PF) +#define KVM_IS_SHADOW_NONP_PF(hret) \ + ((hret) & KVM_SHADOW_NONP_PF_MASK) +#define KVM_GET_NEED_COMPLETE_PF(hret) \ + ((hret) & KVM_NEED_COMPLETE_PF_MASK) +#define KVM_IS_NEED_RESTORE_CONTEXT_PF(hret) \ + ((KVM_GET_NEED_COMPLETE_PF(hret) & \ + KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK) != 0) +#define KVM_CLEAR_NEED_RESTORE_CONTEXT_PF(hret) \ + (KVM_GET_NEED_COMPLETE_PF(hret) & \ + ~KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK) + +static inline unsigned int +kvm_host_is_kernel_data_stack_bounds(bool on_kernel, e2k_usd_lo_t usd_lo) +{ + return native_is_kernel_data_stack_bounds(true, usd_lo); +} + +#ifdef CONFIG_VIRTUALIZATION +/* It is native host guest kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel */ +static inline unsigned int +is_kernel_data_stack_bounds(bool on_kernel, e2k_usd_lo_t usd_lo) +{ + return kvm_host_is_kernel_data_stack_bounds(on_kernel, usd_lo); +} +#endif /* CONFIG_VIRTUALIZATION */ + +/* + * Hypervisor supports light hypercalls + * Lighte hypercalls does not: + * - switch to kernel stacks + * - use data stack + * - call any function wich can use stack + * So SBR does not switch to kernel stack, but we use + * SBR value to calculate user/kernel mode of trap/system call + * Light hypercals can be trapped (page fault on guest address, for example) + * In this case SBR value shows user trap mode, but trap occurs on hypervisor + * and we need know about it to do not save/restore global registers which + * used by kernel to optimaze access to current/current_thread_info() + */ + +#define CR1_LO_PSR_PM_SHIFT 57 /* privileged mode */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ + +#define handle_guest_traps(regs) /* none any guests */ +static __always_inline void +init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ +} +static __always_inline void +init_guest_syscalls_handling(struct pt_regs *regs) +{ +} + +static inline bool +is_guest_proc_stack_bounds(struct pt_regs *regs) +{ + return false; /* none any guest */ +} +static inline bool +is_guest_chain_stack_bounds(struct pt_regs *regs) +{ + return false; /* none any guest */ +} +static inline bool +is_guest_TIRs_frozen(struct pt_regs *regs) +{ + return false; /* none any guest */ +} + +static inline bool +handle_guest_last_wish(struct pt_regs *regs) +{ + return false; /* none any guest and any wishes from */ +} + +/* + * Following functions run on host, check if traps occurred on guest user + * or kernel, so probably should be passed to guest kernel to handle. + * None any guests when virtualization is off + */ +static inline unsigned long +pass_aau_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_the_trap_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + return 0; +} +static inline unsigned long +pass_coredump_trap_to_guest(struct pt_regs *regs) +{ + return 0; +} +static inline unsigned long +pass_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_nm_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_clw_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline unsigned long +pass_page_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline void +complete_page_fault_to_guest(unsigned long what_complete) +{ +} +#else /* CONFIG_VIRTUALIZATION */ + +/* + * Special section of kernel image to be common for host and guest kernel and + * to support paravirtualized host and guest running. + * Functions at this section have fixed addressees and offsets into both images + * Any function call from this section will run host image copy of this + * function because of only host image section mapped to both image - host and + * guest. + */ +#define __to_paravirt_guest \ + __attribute__((__section__(".to_paravirt_guest"))) +#define __to_guest \ + __attribute__((__section__(".to_guest"))) + +extern char __ptext_host_start[], __ptext_host_end[]; + +/* + * KVM guest kernel trap handling support + */ + +/* results of trap handling */ +typedef enum trap_hndl { + GUEST_TRAP_IMPOSSIBLE, /* guest kernel does not support */ + /* so guest trap cannot be occured */ + GUEST_TRAP_NOT_HANDLED, /* trap on guest, but guest kernel */ + /* cannot handle the trap */ + GUEST_TRAP_HANDLED, /* guest trap was successfully */ + /* handled */ + GUEST_TRAP_FAILED, /* guest trap handling failed */ +} trap_hndl_t; + +extern trap_hndl_t kvm_do_handle_guest_traps(struct pt_regs *regs); + +extern bool kvm_is_guest_TIRs_frozen(struct pt_regs *regs); +extern bool kvm_is_guest_proc_stack_bounds(struct pt_regs *regs); +extern bool kvm_is_guest_chain_stack_bounds(struct pt_regs *regs); +extern unsigned long kvm_host_aau_page_fault(struct kvm_vcpu *vcpu, + pt_regs_t *regs, + unsigned long TIR_hi, unsigned long TIR_lo); +extern unsigned long kvm_pass_the_trap_to_guest(struct kvm_vcpu *vcpu, + pt_regs_t *regs, + unsigned long TIR_hi, unsigned long TIR_lo, + int trap_no); +extern unsigned long kvm_pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds); +extern unsigned long kvm_pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo); +extern unsigned long kvm_pass_coredump_trap_to_guest(struct kvm_vcpu *vcpu, + struct pt_regs *regs); +extern unsigned long kvm_pass_clw_fault_to_guest(struct pt_regs *regs, + trap_cellar_t *tcellar); +extern unsigned long kvm_pass_page_fault_to_guest(struct pt_regs *regs, + trap_cellar_t *tcellar); +extern void kvm_complete_page_fault_to_guest(unsigned long what_complete); + +extern int do_hret_last_wish_intc(struct kvm_vcpu *vcpu, struct pt_regs *regs); + +extern void trap_handler_trampoline(void); +extern void syscall_handler_trampoline(void); +extern void trap_handler_trampoline_continue(void); +extern void syscall_handler_trampoline_continue(u64 sys_rval); +extern void syscall_fork_trampoline(void); +extern void syscall_fork_trampoline_continue(u64 sys_rval); +extern notrace long return_pv_vcpu_trap(void); +extern notrace long return_pv_vcpu_syscall(void); + +static __always_inline void +kvm_init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + regs->traps_to_guest = 0; /* only for host */ + regs->is_guest_user = false; /* only for host */ + regs->g_stacks_valid = false; /* only for host */ + if (user_mode_trap && test_thread_flag(TIF_LIGHT_HYPERCALL) && + (NATIVE_NV_READ_CR1_LO_REG().CR1_lo_pm)) { + regs->flags.light_hypercall = 1; + } +} + +static __always_inline void +kvm_init_guest_syscalls_handling(struct pt_regs *regs) +{ + regs->traps_to_guest = 0; /* only for host */ + regs->is_guest_user = true; /* only for host */ + regs->g_stacks_valid = false; /* only for host */ +} + +static inline void +kvm_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + KVM_WRITE_UPSR_REG_VALUE(upsr.UPSR_reg); + KVM_WRITE_USD_REG(usd_hi, usd_lo); + KVM_WRITE_SBR_REG_VALUE(sbr); +} + +/* + * The function should return boolen value 'true' if the trap is wish + * of host to inject VIRQs interrupt and return 'false' if the wish is not + * from host to deal with guest + */ +static inline bool +kvm_handle_guest_last_wish(struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + if (vcpu == NULL) { + /* it is not guest VCPU thread, or completed */ + return false; + } + if (vcpu->arch.trap_wish) { + /* some trap was injected, goto trap handling */ + regs->traps_to_guest |= vcpu->arch.trap_mask_wish; + vcpu->arch.trap_mask_wish = 0; + return true; + } + if (vcpu->arch.virq_wish) { + /* trap is only to interrupt guest kernel on guest mode */ + /* to provide injection of pending VIRQs on guest */ + if (!vcpu->arch.virq_injected) { + vcpu->arch.virq_injected = true; + vcpu->arch.virq_wish = false; + return true; + } /* else already injected */ + } + return false; +} + +/* + * Guest kernel (same as host) does not use AAU, so if trap occurred on + * guest kernel it is error. Do not pass the trap to guest, host will handle + * thr trap and kill the guest. + * Guest user can use AAU and if trap occurred on guest user, then this trap + * need pass to handle one by guest kernel + */ +static inline bool +kvm_should_pass_aau_kernel_trap_to_guest(struct pt_regs *regs) +{ + return false; +} +static inline bool +kvm_should_pass_aau_user_trap_to_guest(struct pt_regs *regs) +{ + return true; +} + +/* + * Some traps need not pass to guest, they can be handled by host only. + */ + +#define kvm_needless_guest_exc_mask (0UL | \ + exc_interrupt_mask | \ + exc_nm_interrupt_mask | \ + exc_mem_error_mask | \ + exc_data_page_mask | \ + 0UL) +#define kvm_guest_exc_mask (exc_all_mask & \ + ~kvm_needless_guest_exc_mask) + +static inline bool +kvm_should_pass_the_trap_to_guest(struct pt_regs *regs, int trap_no) +{ + unsigned long trap_mask = (1UL << trap_no); + + if (trap_no == exc_last_wish_num) { + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + if (vcpu->arch.is_hv) { + if (vcpu->arch.virq_wish || vcpu->arch.vm_exit_wish) { + /* it is last wish to support guest on host */ + /* do not pass to guest */ + return false; + } + } else if (vcpu->arch.is_pv) { + if (vcpu->arch.virq_wish) { + /* it is paravirtualized guest, pass */ + /* interrupt to guest, if it is enabled */ + ; + } else if (vcpu->arch.trap_wish) { + /* it is wish to inject some trap to guest */ + ; + } else { + /* there is not any wish for guest */ + return false; + } + } else { + KVM_BUG_ON(true); + } + } + if (trap_mask & kvm_guest_exc_mask) + return true; + return false; +} +/* + * Some traps will be passed to guest, but by host handler of the trap. + */ + +#define kvm_defer_guest_exc_mask (0UL | \ + exc_data_page_mask | \ + exc_mem_lock_mask | \ + exc_ainstr_page_miss_mask | \ + exc_ainstr_page_prot_mask | \ + 0UL) +#define kvm_pv_defer_guest_exc_mask (0UL) + +static inline bool +kvm_defered_pass_the_trap_to_guest(struct pt_regs *regs, int trap_no) +{ + unsigned long trap_mask = (1UL << trap_no); + + if (trap_mask & kvm_pv_defer_guest_exc_mask) + return true; + return false; +} + +/* + * The function controls traps handling by guest kernel. + * Traps were passed to guest kernel (set TIRs and trap cellar) before + * calling the function. + * Result of function is bool 'traps were handled by guest' + * If the trap is trap of guest user and was handled by guest kernel + * (probably with fault), then the function return bool 'true' and handling + * of this trap can be completed. + * If the trap is not trap of guest user or cannot be handled by guest kernel, + * then the function return bool 'false' and handling of this trap should + * be continued by host. + * WARNING: The function can be called only on host kernel (guest cannot + * run own guests. + */ +static inline bool kvm_handle_guest_traps(struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu; + int ret; + + vcpu = current_thread_info()->vcpu; + if (!due_to_guest_trap_on_pv_hv_host(vcpu, regs)) { + DebugKVMVGT("trap occurred outside of guest user and " + "kernel\n"); + return false; + } + if (regs->traps_to_guest == 0) { + DebugKVMVGT("it is recursive trap on host and can be handled " + "only by host\n"); + return false; + } + if (vcpu == NULL) { + DebugKVMVGT("it is not VCPU thread or VCPU is not yet " + "created\n"); + return false; + } + ret = kvm_do_handle_guest_traps(regs); + regs->traps_to_guest = 0; + + if (ret == GUEST_TRAP_HANDLED) { + DebugKVMGT("the guest trap handled\n"); + return true; + } else if (ret == GUEST_TRAP_FAILED) { + DebugKVMGT("the guest trap handled, but with fault\n"); + return true; + } else if (ret == GUEST_TRAP_NOT_HANDLED) { + DebugKVMGT("guest cannot handle the guest trap\n"); + return false; + } else if (ret == GUEST_TRAP_IMPOSSIBLE) { + DebugKVMGT("it is not guest user trap\n"); + return false; + } else { + BUG_ON(true); + } + return false; +} + +static inline bool +is_guest_proc_stack_bounds(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return kvm_is_guest_proc_stack_bounds(regs); +} +static inline bool +is_guest_chain_stack_bounds(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return kvm_is_guest_chain_stack_bounds(regs); +} + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* It is native host kernel with virtualization support on */ +/* or it is paravirtualized host and guest kernel */ +/* guest cannot support hypervisor mode and create own virtual machines, */ +/* so in paravirtualized mode the following functions are called only */ +/* on host mode and should not be used on guest mode */ + +#define handle_guest_traps(regs) kvm_handle_guest_traps(regs) + +static __always_inline void +init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + kvm_init_guest_traps_handling(regs, user_mode_trap); +} +static __always_inline void +init_guest_syscalls_handling(struct pt_regs *regs) +{ + kvm_init_guest_syscalls_handling(regs); +} + +static inline bool +is_guest_TIRs_frozen(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return kvm_is_guest_TIRs_frozen(regs); +} + +static inline bool +handle_guest_last_wish(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_handle_guest_last_wish(regs); +} +static inline void +kvm_host_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + if (!kvm_test_intc_emul_flag(regs)) { + native_instr_page_fault(regs, ftype, async_instr); + return; + } + + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, async_instr); +} + +static inline int +kvm_host_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + if (likely(!kvm_test_intc_emul_flag(regs))) { + return native_do_aau_page_fault(regs, address, condition, mask, + aa_no); + } + + return kvm_pv_mmu_aau_page_fault(current_thread_info()->vcpu, regs, + address, condition, aa_no); +} + +/* + * Following functions run on host, check if traps occurred on guest user + * or kernel, so probably sould be passed to guest kernel to handle. + * In some cases traps should be passed to guest, but need be preliminary + * handled by host (for example hardware stack bounds). + * Functions return flag or mask of traps which passed to guest and + * should not be handled by host + */ +static inline unsigned long +pass_aau_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + struct kvm_vcpu *vcpu; + + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + vcpu = current_thread_info()->vcpu; + + return kvm_host_aau_page_fault(vcpu, regs, TIR_hi, TIR_lo); +} +static inline unsigned long +pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_pass_stack_bounds_trap_to_guest(regs, + proc_bounds, chain_bounds); +} +static inline bool +pass_instr_page_fault_trap_to_guest(struct pt_regs *regs, int trap_no) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return true; + +} +static inline unsigned long +pass_the_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo, + int trap_no) +{ + struct kvm_vcpu *vcpu; + + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + vcpu = current_thread_info()->vcpu; + +/* + if (trap_no == exc_proc_stack_bounds_num) + return pass_stack_bounds_trap_to_guest(regs, true, false); + if (trap_no == exc_chain_stack_bounds_num) + return pass_stack_bounds_trap_to_guest(regs, false, true); + */ + + if (!kvm_should_pass_the_trap_to_guest(regs, trap_no)) { + DebugKVMVGT("trap #%d needs not handled by guest\n", + trap_no); + return 0; + } + if (trap_no == exc_instr_page_miss_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).page_miss = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 0); + return 1; + } + if (trap_no == exc_instr_page_prot_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).illegal_page = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 0); + return 1; + } + if (trap_no == exc_ainstr_page_miss_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).page_miss = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 1); + return 1; + } + if (trap_no == exc_ainstr_page_prot_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).illegal_page = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 1); + return 1; + } + if (trap_no == exc_last_wish_num) { + int r; + + r = do_hret_last_wish_intc(vcpu, regs); + if (r == 0) { + return 1; + } else { + return 0; + } + } + if (kvm_vcpu_in_hypercall(vcpu)) { + /* the trap on host, so handles it by host */ + return 0; + } + if (kvm_defered_pass_the_trap_to_guest(regs, trap_no)) { + DebugKVMVGT("trap #%d will be passed later by host " + "handler of the trap\n", trap_no); + return 0; + } + return kvm_pass_the_trap_to_guest(vcpu, regs, TIR_hi, TIR_lo, trap_no); +} +static inline unsigned long +pass_coredump_trap_to_guest(struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu; + + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + vcpu = current_thread_info()->vcpu; + + + return kvm_pass_coredump_trap_to_guest(vcpu, regs); +} + +/* + * Now interrupts are handled by guest only in bottom half style + * Host pass interrupts to special virtual IRQ process (activate VIRQ VCPU) + * This process activates specified for this VIRQ guest kernel thread + * to handle interrupt. + * So do not pass real interrupts to guest kernel + */ +static inline unsigned long +pass_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_nm_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + if (test_thread_flag(TIF_PSEUDOTHREAD)) { + /* it is VIRQ VCPU thread, it cannot handle interrupts */ + return 0; + } + if (!test_thread_flag(TIF_VIRQS_ACTIVE)) { + /* VIRQ VCPU thread is not yet active */ + return 0; + } + return kvm_pass_virqs_to_guest(regs, TIR_hi, TIR_lo); +} +static inline unsigned long +pass_clw_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_pass_clw_fault_to_guest(regs, tcellar); +} +static inline unsigned long +pass_page_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_pass_page_fault_to_guest(regs, tcellar); +} +static inline void +complete_page_fault_to_guest(unsigned long what_complete) +{ + kvm_complete_page_fault_to_guest(what_complete); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +static inline bool +native_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return (user_mode(regs) && + regs->stacks.psp_hi.PSP_hi_ind >= + regs->stacks.psp_hi.PSP_hi_size) || + regs->stacks.psp_hi.PSP_hi_ind <= 0; +} +static inline bool +native_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return (user_mode(regs) && + regs->stacks.pcsp_hi.PCSP_hi_ind >= + regs->stacks.pcsp_hi.PCSP_hi_size) || + regs->stacks.pcsp_hi.PSP_hi_ind <= 0; +} + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host/guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel with or without virtualization support */ +static inline bool +is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return native_is_proc_stack_bounds(ti, regs); +} +static inline bool +is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return native_is_chain_stack_bounds(ti, regs); +} + +#ifdef CONFIG_VIRTUALIZATION +/* it is host kernel with virtualization support */ +static inline void +instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + kvm_host_instr_page_fault(regs, ftype, async_instr); +} + +static inline int +do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + return kvm_host_do_aau_page_fault(regs, address, condition, mask, + aa_no); +} +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#else /* __ASSEMBLY__ */ +#include +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KVM_E2K_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/kvm/ttable-help.h b/arch/e2k/include/asm/kvm/ttable-help.h new file mode 100644 index 000000000000..f4704be96e6c --- /dev/null +++ b/arch/e2k/include/asm/kvm/ttable-help.h @@ -0,0 +1,51 @@ +/* + * + * Copyright (C) 2020 MCST + * + * Definitions of KVM traps handling routines. + */ + +#ifndef _E2K_KVM_TTABLE_HELP_H +#define _E2K_KVM_TTABLE_HELP_H + +#ifdef CONFIG_KVM_HOST_MODE +/* it is native kernel with virtualization support (hypervisor) */ + +#ifdef CONFIG_CPU_HW_CLEAR_RF + +# ifdef GENERATING_HEADER +# define RETURN_PV_VCPU_TRAP_SIZE 0x1 +# define HANDLE_PV_VCPU_SYS_CALL_SIZE 0x1 +# define HANDLE_PV_VCPU_SYS_FORK_SIZE 0x1 +# endif + +# define CLEAR_RETURN_PV_VCPU_TRAP_WINDOW() E2K_DONE() +# define CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW(r0) E2K_SYSCALL_RETURN(r0) + +#else /* ! CONFIG_CPU_HW_CLEAR_RF */ + +# ifdef GENERATING_HEADER +# define CLEAR_RETURN_PV_VCPU_TRAP_WINDOW() E2K_EMPTY_CMD(: "ctpr3") +# define CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define RETURN_PV_VCPU_TRAP_SIZE 0x1 +# define HANDLE_PV_VCPU_SYS_CALL_SIZE 0x1 +# define HANDLE_PV_VCPU_SYS_FORK_SIZE 0x1 +# endif + +#endif /* CONFIG_CPU_HW_CLEAR_RF */ + +#else /* !CONFIG_KVM_HOST_MODE */ +/* It is native guest kernel whithout virtualization support */ +/* Virtualiztion in guest mode cannot be supported */ + +# define CLEAR_RETURN_PV_VCPU_TRAP_WINDOW() +# define CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW(rval) +# define CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW(rval) + +#endif /* CONFIG_KVM_HOST_MODE */ + +#endif /* _E2K_KVM_TTABLE_HELP_H */ diff --git a/arch/e2k/include/asm/kvm/uaccess.h b/arch/e2k/include/asm/kvm/uaccess.h new file mode 100644 index 000000000000..2e86d1a1f4c6 --- /dev/null +++ b/arch/e2k/include/asm/kvm/uaccess.h @@ -0,0 +1,211 @@ +#ifndef _E2K_KVM_UACCESS_H_ +#define _E2K_KVM_UACCESS_H_ + +/* + * Host kernel access to User space memory, including guest user space + * Copyright (c) 2020, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include +#include + +static inline unsigned long +native_copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n) +{ + return __copy_in_user_with_tags(to, from, n); +} + +static inline unsigned long +native_copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n) +{ + return __copy_to_user_with_tags(to, from, n); +} + +static inline unsigned long +native_copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n) +{ + return __copy_from_user_with_tags(to, from, n); +} + +#define native_get_user(kval, uptr) get_user(kval, uptr) +#define native_put_user(kval, uptr) put_user(kval, uptr) + +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ + +#define host_get_user(kval, uptr, hregs) \ +({ \ + __typeof__(*(uptr)) __user *___pu_ptr = (uptr); \ + int sz_uptr = sizeof(*(uptr)); \ + long res; \ + \ + ___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \ + (uptr) \ + : \ + kvm_guest_ptr_to_host_ptr((uptr), sz_uptr, \ + true); \ + if (PTR_ERR(___pu_ptr) == -EAGAIN) \ + res = -EAGAIN; \ + else \ + res = (___pu_ptr) ? native_get_user(kval, ___pu_ptr) : \ + -EFAULT; \ + (res); \ +}) + +#define host_put_user(kval, uptr, hregs) \ +({ \ + __typeof__(*(uptr)) __user *___pu_ptr = (uptr); \ + int sz_uptr = sizeof(*(uptr)); \ + long res; \ + \ + ___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \ + (uptr) \ + : \ + kvm_guest_ptr_to_host_ptr((uptr), sz_uptr, \ + true); \ + if (PTR_ERR(___pu_ptr) == -EAGAIN) \ + res = -EAGAIN; \ + else \ + res = (___pu_ptr) ? native_put_user(kval, ___pu_ptr) : \ + -EFAULT; \ + (res); \ +}) + +extern unsigned long kvm_copy_in_user_with_tags(void __user *to, + const void __user *from, unsigned long n); +extern unsigned long kvm_copy_to_user_with_tags(void __user *to, + const void *from, unsigned long n); +extern unsigned long kvm_copy_from_user_with_tags(void *to, + const void __user *from, unsigned long n); + +static inline unsigned long +host_copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + return native_copy_in_user_with_tags(to, from, n); + } + return kvm_copy_in_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n, const struct pt_regs *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + return native_copy_to_user_with_tags(to, from, n); + } + return kvm_copy_to_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + return native_copy_from_user_with_tags(to, from, n); + } + return kvm_copy_from_user_with_tags(to, from, n); +} + +extern int kvm_vcpu_copy_host_to_guest(struct kvm_vcpu *vcpu, + const void *host, void __user *guest, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); +extern int kvm_vcpu_copy_host_from_guest(struct kvm_vcpu *vcpu, + void *host, const void __user *guest, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); + +static inline int +fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + struct kvm_vcpu *vcpu; + + if (likely(!host_test_intc_emul_mode(regs))) { + return native_fast_tagged_memory_copy_to_user(dst, src, + len, regs, + strd_opcode, ldrd_opcode, prefetch); + } + + vcpu = native_current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL); + return kvm_vcpu_copy_host_to_guest(vcpu, src, dst, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + struct kvm_vcpu *vcpu; + + if (likely(!host_test_intc_emul_mode(regs))) { + return native_fast_tagged_memory_copy_from_user(dst, src, + len, regs, + strd_opcode, ldrd_opcode, prefetch); + } + + vcpu = native_current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL); + return kvm_vcpu_copy_host_from_guest(vcpu, dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +#else /* !CONFIG_KVM_HOST_MODE */ +/* it is not host kernel, it is native kernel without virtualization */ + +#define host_get_user(kval, uptr, hregs) native_get_user(kval, uptr) +#define host_put_user(kval, uptr, hregs) native_put_user(kval, uptr) + +static inline unsigned long +host_copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + return native_copy_in_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n, const struct pt_regs *regs) +{ + return native_copy_to_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + return native_copy_from_user_with_tags(to, from, n); +} +#endif /* CONFIG_KVM_HOST_MODE */ + +static inline unsigned long +host_copy_to_user(void __user *to, const void *from, + unsigned long n, const struct pt_regs *regs) +{ + return host_copy_to_user_with_tags(to, from, n, regs); +} + +#endif /* _E2K_KVM_UACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/vcpu-regs-debug-inline.h b/arch/e2k/include/asm/kvm/vcpu-regs-debug-inline.h new file mode 100644 index 000000000000..660a3568cbb2 --- /dev/null +++ b/arch/e2k/include/asm/kvm/vcpu-regs-debug-inline.h @@ -0,0 +1,86 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_VCPU_REGS_DEBUG_INLINE_H +#define _E2K_KVM_VCPU_REGS_DEBUG_INLINE_H + +#include +#include +#include + +#ifdef VCPU_REGS_DEBUG + +static inline void init_vcpu_regs_trace(void) +{ + vcpu_regs_trace_t *trace; + + trace = get_vcpu_regs_trace_struct(); + atomic_set(&trace->count, 0); + vcpu_regs_trace_on = true; +} + +static inline void dump_vcpu_regs_entry(vcpu_regs_t *regs, int entry_no) +{ + u32 updated = regs->updated; + + pr_alert("[%02d] : PSR %02x\tUPSR %03x\tunder UPSR %d\n", + entry_no, regs->psr, regs->upsr, regs->under_upsr); + pr_alert(" updated : %s %s %s %s\n", + (updated & PSR_UPDATE_MASK) ? "PSR" : "", + (updated & UPSR_UPDATE_MASK) ? "UPSR" : "", + (updated & UNDER_UPSR_UPDATE_MASK) ? "under UPSR" : "", + (regs->injected) ? "injected IRQs" : ""); + pr_alert(" IP %pF called from IP %pF\n", + (void *)regs->IP, (void *)regs->IP_from); + pr_alert(" clock : start 0x%llx end 0x%llx delta 0x%llx\n", + regs->clock_start, regs->clock_end, + regs->clock_end - regs->clock_start); +} + +static inline void dump_vcpu_regs_trace(void) +{ + vcpu_regs_trace_t *trace; + vcpu_regs_t *regs; + int count, ent, num, entries; + + /* stop tracing */ + vcpu_regs_trace_on = false; + + trace = get_vcpu_regs_trace_struct(); + count = atomic_read(&trace->count); + pr_alert("CPU #%d : Trace of VCPU #%d some registers update history\n", + raw_smp_processor_id(), KVM_READ_VCPU_ID()); + if (count == 0) { + pr_alert(" trace is empty\n"); + return; + } + + entries = (count > MAX_VCPU_REGS_TRACE_NUM) ? + MAX_VCPU_REGS_TRACE_NUM : count; + for (ent = VCPU_REGS_TRACE_INDEX(count), num = 0; + num < entries; + ent = VCPU_REGS_TRACE_INDEX(ent - 1), num++) { + regs = &trace->regs[ent]; + dump_vcpu_regs_entry(regs, ent); + } +} +#else /* !VCPU_REGS_DEBUG */ + +#define vcpu_regs_trace_on false + +static inline void init_vcpu_regs_trace(void) +{ +} + +#define trace_vcpu_upsr_update(upsr_val, injected_irqs) +#define trace_vcpu_psr_update(psr_val, under_upsr) + +static inline void dump_vcpu_regs_trace(void) +{ +} + +#endif /* VCPU_REGS_DEBUG */ + +#endif /* ! _E2K_KVM_VCPU_REGS_DEBUG_INLINE_H */ diff --git a/arch/e2k/include/asm/kvm/vcpu-regs-debug.h b/arch/e2k/include/asm/kvm/vcpu-regs-debug.h new file mode 100644 index 000000000000..386e2263e0f3 --- /dev/null +++ b/arch/e2k/include/asm/kvm/vcpu-regs-debug.h @@ -0,0 +1,130 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_VCPU_REGS_DEBUG_H +#define _E2K_KVM_VCPU_REGS_DEBUG_H + +/* do not include this header directly, only through asm/kvm/guest.h */ + +#include + +#undef VCPU_REGS_DEBUG + +typedef struct vcpu_regs { + u64 IP; + u64 IP_from; + u64 clock_start; + u64 clock_end; + u32 psr; + u32 upsr; + bool under_upsr; + u8 updated; + bool injected; +} vcpu_regs_t; + +#define MAX_VCPU_REGS_TRACE_NUM 32 +#define VCPU_REGS_TRACE_MASK (MAX_VCPU_REGS_TRACE_NUM - 1) +#define VCPU_REGS_TRACE_INDEX(count) ((count) & VCPU_REGS_TRACE_MASK) + +typedef struct vcpu_regs_trace { + atomic_t count; + vcpu_regs_t regs[MAX_VCPU_REGS_TRACE_NUM]; +} vcpu_regs_trace_t; + +#define PSR_UPDATE_MASK 0x01U +#define UPSR_UPDATE_MASK 0x02U +#define UNDER_UPSR_UPDATE_MASK 0x04U + +#define GET_CLOCK_REG() NATIVE_READ_CLKR_REG_VALUE() + +#define GUEST_GET_IRQS_UNDER_UPSR() \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + under_upsr; \ +}) + +#ifdef VCPU_REGS_DEBUG + +#undef PSR +extern bool vcpu_regs_trace_on; +extern int vcpu_regs_trace_on_num; + +#define get_vcpu_regs_trace_struct() \ +({ \ + struct kvm_vcpu_state *vcpu_state; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + &vcpu_state->trace; \ +}) + +#define get_next_vcpu_regs_trace() \ +({ \ + vcpu_regs_trace_t *trace; \ + vcpu_regs_t *regs; \ + int count; \ + \ + if (likely(!vcpu_regs_trace_on)) { \ + regs = NULL; \ + } else { \ + trace = get_vcpu_regs_trace_struct(); \ + count = atomic_inc_return(&trace->count); \ + regs = &trace->regs[VCPU_REGS_TRACE_INDEX(count)]; \ + regs->clock_start = GET_CLOCK_REG(); \ + regs->IP = NATIVE_READ_IP_REG_VALUE(); \ + regs->IP_from = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + regs->updated = 0; \ + regs->psr = GUEST_GET_CPU_SREG(E2K_PSR); \ + regs->upsr = GUEST_GET_CPU_SREG(UPSR); \ + regs->under_upsr = GUEST_GET_IRQS_UNDER_UPSR(); \ + regs->injected = false; \ + } \ + regs; \ +}) + +#define trace_vcpu_upsr_update(upsr_val, injected_irqs) \ +do { \ + vcpu_regs_t *regs; \ + if (likely(!vcpu_regs_trace_on)) \ + break; \ + regs = get_next_vcpu_regs_trace(); \ + if (unlikely(regs == NULL)) \ + break; \ + regs->upsr = (upsr_val); \ + regs->updated |= UPSR_UPDATE_MASK; \ + if (injected_irqs) { \ + regs->injected = (injected_irqs); \ + } \ + E2K_CMD_SEPARATOR; \ + regs->clock_end = GET_CLOCK_REG(); \ +} while (false) + +#define trace_vcpu_psr_update(psr_val, under_upsr_mode) \ +do { \ + vcpu_regs_t *regs; \ + if (likely(!vcpu_regs_trace_on)) \ + break; \ + regs = get_next_vcpu_regs_trace(); \ + if (unlikely(regs == NULL)) \ + break; \ + regs->psr = (psr_val); \ + regs->updated |= PSR_UPDATE_MASK | UNDER_UPSR_UPDATE_MASK; \ + regs->under_upsr = (under_upsr_mode); \ + E2K_CMD_SEPARATOR; \ + regs->clock_end = GET_CLOCK_REG(); \ +} while (false) + +#else /* !VCPU_REGS_DEBUG */ + +#define vcpu_regs_trace_on false +#define trace_vcpu_upsr_update(upsr_val, injected_irqs) +#define trace_vcpu_psr_update(psr_val, under_upsr) + +#endif /* VCPU_REGS_DEBUG */ + +#endif /* ! _E2K_KVM_VCPU_REGS_DEBUG_H */ diff --git a/arch/e2k/include/asm/kvm_host.h b/arch/e2k/include/asm/kvm_host.h new file mode 100644 index 000000000000..b1c8ac7fd4bc --- /dev/null +++ b/arch/e2k/include/asm/kvm_host.h @@ -0,0 +1,1475 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines architecture specific interfaces, e2k version + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef _ASM_E2K_KVM_HOST_H +#define _ASM_E2K_KVM_HOST_H + +#include +#include +#include /* irq_handler_t */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define KVM_E2K_DEBUG + +#ifdef KVM_E2K_DEBUG +#define KVM_BUG_ON(__condition) BUG_ON(__condition) +#else /* ! KVM_E2K_DEBUG */ +#define KVM_BUG_ON(__condition) BUILD_BUG_ON_INVALID(__condition) +#endif /* KVM_E2K_DEBUG */ + +/* Avoid printk from arch-independent WARN_ON as + * KVM implementation uses __interrupt functions */ +#define KVM_WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ +}) + +#define KVM_E2K_SV_VM_TYPE_MASK (1 << KVM_E2K_SV_VM_TYPE) +#define KVM_E2K_SW_PV_VM_TYPE_MASK (1 << KVM_E2K_SW_PV_VM_TYPE) +#define KVM_E2K_HV_VM_TYPE_MASK (1 << KVM_E2K_HV_VM_TYPE) +#define KVM_E2K_HW_PV_VM_TYPE_MASK (1 << KVM_E2K_HW_PV_VM_TYPE) + +/* mask of available and supported by the hypervisor VM types */ +/* depends on hardware, CPU ISET, kernel & hypervisor configuration */ +extern unsigned int kvm_vm_types_available; + +/* atomic operations under mask values, for example kvm_vm_types_available */ +static inline void atomic_clear_mask(unsigned int mask, unsigned int *value) +{ + __api_atomic_op(mask, value, w, "andns", RELAXED_MB); +} +static inline void atomic_set_mask(unsigned int mask, unsigned int *value) +{ + __api_atomic_op(mask, value, w, "ors", RELAXED_MB); +} +static inline void atomic64_clear_mask(unsigned long mask, unsigned long *value) +{ + __api_atomic_op(mask, value, d, "andnd", RELAXED_MB); +} +static inline void atomic64_set_mask(unsigned long mask, unsigned long *value) +{ + __api_atomic_op(mask, value, d, "ord", RELAXED_MB); +} + +static inline bool +kvm_is_sv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_SV_VM_TYPE_MASK; +} +static inline bool +kvm_is_sw_pv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_SW_PV_VM_TYPE_MASK; +} +static inline bool +kvm_is_hv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_HV_VM_TYPE_MASK; +} +static inline bool +kvm_is_hw_pv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_HW_PV_VM_TYPE_MASK; +} + +#define KVM_USER_MEM_SLOTS 32 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + +#undef E2K_INVALID_PAGE +#define E2K_INVALID_PAGE (~(hpa_t)0) + +#define UNMAPPED_GVA (~(gpa_t)0) +#define arch_is_error_gpa(gpa) ((gpa_t)(gpa) == UNMAPPED_GVA) + +/* + * See include/linux/kvm_host.h + * For the normal pfn, the highest 12 bits should be zero, + * so we can mask bit 62 ~ bit 52 to indicate the error pfn, + * mask bit 63 to indicate the noslot pfn. + * Some arch e2k extensions: + * Bit 51 indicates NULL pagse + */ +#define KVM_PFN_NULL (1ULL << 51) + +/* number of page sizes supported by KVM */ +/* KVM can now support pages on each page table level, real MMU capabilities */ +/* are presenting by page table & page levels structures */ +#define KVM_NR_PAGE_SIZES MAX_HUGE_PAGES_LEVEL + +#define KVM_PT_LEVEL_HPAGE_SHIFT(ptl) get_pt_level_page_shift(ptl) +#define KVM_PT_LEVEL_HPAGE_SIZE(ptl) get_pt_level_page_size(ptl) +#define KVM_PT_LEVEL_HPAGE_MASK(ptl) get_pt_level_page_mask(ptl) +#define KVM_PT_LEVEL_PAGES_PER_HPAGE(ptl) \ + (KVM_PT_LEVEL_HPAGE_SIZE(ptl) / PAGE_SIZE) +#define KVM_PT_LEVEL_HPAGE_GFN_SHIFT(ptl) \ + (KVM_PT_LEVEL_HPAGE_SHIFT(ptl) - PAGE_SHIFT) + +static inline gfn_t +gfn_to_index(gfn_t gfn, gfn_t base_gfn, const pt_level_t *pt_level) +{ + /* KVM_PT_LEVEL_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ + KVM_BUG_ON(!pt_level->is_pte && !pt_level->is_huge); + return (gfn >> KVM_PT_LEVEL_HPAGE_GFN_SHIFT(pt_level)) - + (base_gfn >> KVM_PT_LEVEL_HPAGE_GFN_SHIFT(pt_level)); +} + +#define E2K_INVALID_PAGE (~(hpa_t)0) +#define ERROR_PAGE(x) IS_ERR_VALUE(x) +#define TO_ERROR_PAGE(x) ((hpa_t)((long)(x))) +#define PAGE_TO_ERROR(x) ((long)(x)) +#define IS_E2K_INVALID_PAGE(x) ((x) == E2K_INVALID_PAGE) +#define VALID_PAGE(x) (!IS_E2K_INVALID_PAGE(x) && !ERROR_PAGE(x)) + +#define KVM_PERMILLE_MMU_PAGES 20 +#define KVM_MIN_ALLOC_MMU_PAGES 64 +#define KVM_MMU_HASH_SHIFT 10 +#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) +#define KVM_MIN_FREE_MMU_PAGES 5 +#define KVM_REFILL_PAGES 25 + +#define KVM_ALIAS_SLOTS 4 + +#define KVM_SHADOW_SLOTS 8 + +#define KVM_HALT_POLL_NS_DEFAULT 400000 + +#define KVM_IRQCHIP_NUM_PINS KVM_IOEPIC_NUM_PINS + +#define KVM_MAX_EIOHUB_NUM MAX_NUMNODES + +#define ASYNC_PF_PER_VCPU 64 + +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + +struct kvm_vcpu; +struct kvm; + +extern struct mutex kvm_lock; +extern struct list_head vm_list; + +enum kvm_reg { + VCPU_REGS_UPSR = 0, + NR_VCPU_REGS +}; + +enum { + VCPU_SREG_USD_lo, + VCPU_SREG_USD_hi, + VCPU_SREG_PSP_lo, + VCPU_SREG_PSP_hi, + VCPU_SREG_PCSP_lo, + VCPU_SREG_PCSP_hi, + NR_VCPU_SREG +}; + +struct kvm_vcpu_stat { + /* follow fields should have any arch and arch-independent updated */ + u32 halt_successful_poll; + u32 halt_attempted_poll; + u64 halt_poll_invalid; + u32 halt_wakeup; + /* MMU stat */ + u64 pf_fixed; + u64 pf_guest; + u64 flush_gva; + u64 mmio_exits; + u64 signal_exits; +}; + +/* + * backup hardware stacks: + * 1) to spill/fill guest stacks while intercept/glaunch + * 2) to support hypercalls + */ +typedef struct bu_hw_stack { + hw_stack_area_t ps; /* procedure stack area */ + hw_stack_area_t pcs; /* chain stack area */ + e2k_psp_lo_t psp_lo; /* Procedure stack pointer: */ + e2k_psp_hi_t psp_hi; /* base & index & size */ + e2k_pcsp_lo_t pcsp_lo; /* Procedure chain stack */ + e2k_pcsp_hi_t pcsp_hi; /* pointer: base & index & size */ + int users; /* number of active hypercalls, */ + /* handlers on these stacks */ +} bu_hw_stack_t; + +/* sizes as common kernel stacks */ +#define HYPV_BACKUP_PS_SIZE KERNEL_P_STACK_SIZE +#define HYPV_BACKUP_PCS_SIZE KERNEL_PC_STACK_SIZE + +#define GET_BACKUP_PS_SIZE(bu_stacks) ((bu_stacks)->ps.size) +#define GET_BACKUP_PCS_SIZE(bu_stacks) ((bu_stacks)->pcs.size) +#define SET_BACKUP_PS_SIZE(bu_stacks, val) \ + (GET_BACKUP_PS_SIZE(bu_stacks) = (val)) +#define SET_BACKUP_PCS_SIZE(bu_stacks, val) \ + (GET_BACKUP_PCS_SIZE(bu_stacks) = (val)) +#define GET_BACKUP_PS_BASE(bu_stacks) GET_PS_BASE(bu_stacks) +#define GET_BACKUP_PCS_BASE(bu_stacks) GET_PCS_BASE(bu_stacks) + +/* + * guest VCPU boot-time stacks + */ +typedef struct vcpu_boot_stack { + data_stack_t data; /* local data stack */ + void *data_stack; /* data stack pointer at user space */ + hw_stack_area_t ps; /* procedure stack area */ + hw_stack_area_t pcs; /* chain stack area */ + void *proc_stack; /* procedure stack pointer at user */ + /* space */ + void *chain_stack; /* procedure chain stack pointer at */ + /* user space */ + guest_hw_stack_t regs; /* current registers state */ +} vcpu_boot_stack_t; + +#define GET_VCPU_BOOT_CS_BASE(boot_stacks) ((boot_stacks)->data.bottom) +#define GET_VCPU_BOOT_CS_TOP(boot_stacks) ((boot_stacks)->data.top) +#define GET_VCPU_BOOT_CS_SIZE(boot_stacks) ((boot_stacks)->data.size) +#define GET_VCPU_BOOT_PS_BASE(boot_stacks) GET_PS_BASE(boot_stacks) +#define GET_VCPU_BOOT_PS_SIZE(boot_stacks) ((boot_stacks)->ps.size) +#define GET_VCPU_BOOT_PCS_BASE(boot_stacks) GET_PCS_BASE(boot_stacks) +#define GET_VCPU_BOOT_PCS_SIZE(boot_stacks) ((boot_stacks)->pcs.size) +#define SET_VCPU_BOOT_CS_BASE(boot_stacks, val) \ + (GET_VCPU_BOOT_CS_BASE(boot_stacks) = (e2k_addr_t)(val)) +#define SET_VCPU_BOOT_CS_TOP(boot_stacks, val) \ + (GET_VCPU_BOOT_CS_TOP(boot_stacks) = (e2k_addr_t)(val)) +#define SET_VCPU_BOOT_CS_SIZE(boot_stacks, val) \ + (GET_VCPU_BOOT_CS_SIZE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PS_BASE(boot_stacks, val) \ + (GET_VCPU_BOOT_PS_BASE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PS_SIZE(boot_stacks, val) \ + (GET_VCPU_BOOT_PS_SIZE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PCS_BASE(boot_stacks, val) \ + (GET_VCPU_BOOT_PCS_BASE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PCS_SIZE(boot_stacks, val) \ + (GET_VCPU_BOOT_PCS_SIZE(boot_stacks) = (val)) + +struct kvm_vm_stat { + ulong mmu_shadow_zapped; + ulong mmu_pte_write; + ulong mmu_pte_updated; + ulong mmu_pde_zapped; + ulong mmu_flooded; + ulong mmu_recycled; + ulong mmu_cache_miss; + ulong mmu_unsync; + ulong remote_tlb_flush; + ulong lpages; +}; + +/* + * Don't want allocation failures within the mmu code, so need preallocate + * enough memory for a single page fault in a cache. + */ +#define KVM_NR_MEM_OBJS 400 +#define KVM_NR_MIN_MEM_OBJS 40 + +typedef struct kvm_mmu_memory_cache { + struct kmem_cache *kmem_cache; + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +} kvm_mmu_memory_cache_t; + +/* + * the pages used as guest page table on soft mmu are tracked by + * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used + * by indirect shadow page can not be more than 15 bits. + * + * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, + * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. + */ +typedef union kvm_mmu_page_role { + unsigned word; + struct { + unsigned level:4; + unsigned cr4_pae:1; + unsigned quadrant:2; + unsigned direct:1; + unsigned access:3; + unsigned invalid:1; + unsigned nxe:1; + unsigned cr0_wp:1; + unsigned smep_andnot_wp:1; + unsigned smap_andnot_wp:1; + unsigned unused:8; + + /* + * This is left at the top of the word so that + * kvm_memslots_for_spte_role can extract it with a + * simple shift. While there is room, give it a whole + * byte so it is also faster to load it from memory. + */ + unsigned smm:8; + }; +} kvm_mmu_page_role_t; + +typedef union kvm_mmu_root_flags { + unsigned word; + struct { + unsigned has_host_pgds:1; + unsigned has_guest_pgds:1; + unsigned unused:30; + }; +} kvm_mmu_root_flags_t; + +typedef struct kvm_rmap_head { + unsigned long val; +} kvm_rmap_head_t; + +typedef struct kvm_mmu_page { + struct list_head link; + struct hlist_node hash_link; + + /* + * The following two entries are used to key the shadow page in the + * hash table. + */ + gfn_t gfn; + kvm_mmu_page_role_t role; + kvm_mmu_root_flags_t root_flags; + + pgprot_t *spt; + gva_t gva; /* the shadow PT map guest virtual addresses from */ + /* hold the gfn of each spte inside spt */ + gfn_t *gfns; + bool unsync; + bool host_synced; /* host kernel range already synced */ + bool guest_kernel_synced; /* guest kernel range already synced */ + bool guest_user_synced; /* guest user range synced */ + bool released; /* root already under release */ + atomic_t atomic_unsync; /* there are atomicaly updated of PT entries */ + int root_count; /* Currently serving as active root */ + unsigned int unsync_children; + struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ + + /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ + unsigned long mmu_valid_gen; + + DECLARE_BITMAP(unsync_child_bitmap, 512); + +#ifdef CONFIG_X86_32 + /* + * Used out of the mmu-lock to avoid reading spte values while an + * update is in progress; see the comments in __get_spte_lockless(). + */ + int clear_spte_count; +#endif + + /* Number of writes since the last time traversal visited this page. */ + atomic_t write_flooding_count; +#ifdef CONFIG_GUEST_MM_SPT_LIST + struct list_head gmm_entry; /* entry at the gmm list of SPs */ + gmm_struct_t *gmm; /* the gmm in whose list the entry */ +#endif /* CONFIG_GUEST_MM_SPT_LIST */ +} kvm_mmu_page_t; + +/* page fault handling results */ +typedef enum pf_res { + PFRES_NO_ERR = 0, + PFRES_WRITE_TRACK, + PFRES_INJECTED, + PFRES_TRY_MMIO, + PFRES_ERR, + PFRES_RETRY, /* page fault is not handled and can */ + /* be retried on guest or should be handled */ + /* from begining by hypervisor */ +} pf_res_t; + +struct kvm_arch_exception; + +/* + * e2k supports 2 types of virtual space: + * - primary (native); + * - secondary (x86 emulation). + * enable 3 paging modes: + * - 4-level 64-bit, primary and secondary; + * - 3-level 64-bit, only secondary; + * - 2-level 32-bit, only secondary. + * Primary (native) virtual space can be managed as: + * - common linux standard: user and OS share single virtual space. In this + * case there is one united PT to translate user and kernel addresses and + * one PT root pointed to by MMU registers U_PPTB/U_VPTB; + * - separate user and OS virtual spaces: there are two separate PTs + * to translate user and kernel addresses and two separate PTs roots + * pointed to by MMU registers U_PPTB/U_VPTB (for users) and + * OS_PPTB/OS_VPTB (for kernel). + * + * The kvm_mmu structure abstracts the details of the current mmu mode. + */ +typedef struct kvm_mmu { + hpa_t sh_u_root_hpa; /* shadow PT root for user (and probably OS) */ + hpa_t sh_os_root_hpa; /* shadow PT root for OS (separate spoaces) */ + hpa_t gp_root_hpa; /* physical base of root PT to translate */ + /* guest physical addresses */ + gva_t sh_u_vptb; /* shadow base address to map user */ + /* (and probably OS) PTs to virtual space */ + gva_t sh_os_vptb; /* shadow base address to map kernel */ + /* (separate spaces) PTs to virtual space */ + gva_t sh_os_vab; /* common hypervisor and guest pointer to */ + /* border of user and OS spaces */ + hpa_t sh_root_hpa; + bool sep_virt_space; /* separate virtual spaces for OS and user */ + bool direct_map; + bool is_spae; /* extended physical memory is enabled */ + bool is_pse; /* 4 Mb pages is enable for secondary space */ + bool paging_on; /* virtual space translation mode is ON */ + bool shadow_pt_on; /* shadow PT mode is enabled */ + bool phys_pt_on; /* guest physical addresses PT is ON */ + bool tdp_on; /* two dimensional paging is ON */ + bool spt_gpa_fault; /* guest physical address access fault */ + /* at shodow paging mode */ + bool nx; /* not executable is ??? */ + bool u_context_on; /* guest user MMU context created and is ON */ + int pid; /* guest process ID (mmu context) */ + int pid2; /* guest process ID at secondary space */ + int root_level; + int shadow_root_level; + kvm_mmu_page_role_t base_role; + u64 *pae_root; + u64 *lm_root; + /* guest PT roots pointers */ + pgprotval_t os_pptb; /* guest OS primary (native) */ + /* page table physical base of VCPU */ + gva_t os_vptb; /* guest OS primary (native) */ + /* page table virtual base of VCPU */ + pgprotval_t u_pptb; /* guest user primary (native) */ + /* page table physical base of VCPU */ + gva_t u_vptb; /* guest user primary (native) */ + /* page table virtual base of VCPU */ + pgprotval_t u2_pptb; /* guest secondary page table */ + /* physical base of VCPU */ + gpa_t mpt_b; /* guest protection table base */ + /* of VCPU */ + pgprotval_t pdptes[4]; /* current root level PTEs registers */ + /* of VCPU for extended physical */ + /* address mode (SPAE) */ + gpa_t tc_gpa; /* guest 'physical address' of */ + /* trap cellar (TRAP_POINT MMU reg.) */ + struct page *tc_page; /* host page of guest trap cellar */ + void *tc_kaddr; /* host virtual address of guest */ + /* trap cellar */ + int tc_num; /* number of entries at trap cellar */ + /* same as TRAP_COUNT / 3 */ + gmm_struct_t *gmm; /* host agent of current guest mm */ + gmm_struct_t *active_gmm; /* only on host: current active */ + /* guest mm agent on host (same as */ + /* active_mm at native mode) */ + + /* MMU interceptions control registers state */ + virt_ctrl_mu_t virt_ctrl_mu; + mmu_reg_t g_w_imask_mmu_cr; + + /* MMU shadow control registers initial state */ + mmu_reg_t init_sh_mmu_cr; + mmu_reg_t init_sh_pid; + + /* Can have large pages at levels 2..last_nonleaf_level-1. */ + u8 last_nonleaf_level; + + /* MMU interface */ + bool (*is_paging)(struct kvm_vcpu *vcpu); + void (*set_vcpu_u_pptb)(struct kvm_vcpu *vcpu, pgprotval_t base); + void (*set_vcpu_sh_u_pptb)(struct kvm_vcpu *vcpu, hpa_t root); + void (*set_vcpu_os_pptb)(struct kvm_vcpu *vcpu, pgprotval_t base); + void (*set_vcpu_sh_os_pptb)(struct kvm_vcpu *vcpu, hpa_t root); + void (*set_vcpu_u_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_sh_u_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_os_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_sh_os_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_os_vab)(struct kvm_vcpu *vcpu, gva_t os_virt_base); + void (*set_vcpu_gp_pptb)(struct kvm_vcpu *vcpu, hpa_t root); + pgprotval_t (*get_vcpu_u_pptb)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_sh_u_pptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_os_pptb)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_sh_os_pptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_u_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_sh_u_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_os_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_sh_os_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_os_vab)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_gp_pptb)(struct kvm_vcpu *vcpu); + void (*set_vcpu_pt_context)(struct kvm_vcpu *vcpu, unsigned flags); + void (*init_vcpu_ptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_context_u_pptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_context_u_vptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_context_os_pptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_context_os_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_context_os_vab)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_context_gp_pptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_pdpte)(struct kvm_vcpu *vcpu, int index); + pf_res_t (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, + bool prefault, gfn_t *gfn, kvm_pfn_t *pfn); + void (*inject_page_fault)(struct kvm_vcpu *vcpu, + struct kvm_arch_exception *fault); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, + struct kvm_arch_exception *exception); + gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, + struct kvm_arch_exception *exception); + void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + pgprot_t *spte, const void *pte); + void (*sync_gva)(struct kvm_vcpu *vcpu, gva_t gva); + void (*sync_gva_range)(struct kvm_vcpu *vcpu, gva_t gva_start, + gva_t gva_end, bool flush_tlb); + int (*sync_page)(struct kvm_vcpu *vcpu, kvm_mmu_page_t *sp); +} kvm_mmu_t; + +typedef struct intc_mu_state { + unsigned long notifier_seq; /* 'mmu_notifier_seq' state before */ + /* gfn->pfn translation */ + pf_res_t pfres; /* page fault handling result */ + bool may_be_retried; /* the MMU request can be retied */ + bool ignore_notifier; /* the MMU request should ignore the + * MMU notifier status */ +} intc_mu_state_t; + +/* define exit reasons (requests) from guest to host */ +#define EXIT_REASON_VM_PANIC 0 +#define EXIT_REASON_MMIO_REQ 1 +#define EXIT_REASON_IOPORT_REQ 2 + +#define EXIT_NOTIFY_IO 3 + +#define EXIT_SHUTDOWN 4 + +typedef struct kvm_ioport_req { + uint64_t port; + uint64_t data; /* single data or guest address of string */ + u32 __user *user_data; /* user pointer to data */ + uint32_t count; + uint32_t cur_count; + uint8_t size; + uint8_t is_out; + uint8_t string; + uint8_t needed; + uint8_t completed; +} kvm_ioport_req_t; + +typedef struct kvm_lapic_irq { + u32 vector; + u32 delivery_mode; + u32 dest_mode; + u32 level; + u32 trig_mode; + u32 shorthand; + u32 dest_id; +} kvm_lapic_irq_t; + +/* + * Unlike kvm_lapic_irq, CEPIC doesn't have dest_mode (physical/logical) + * and level (polarity) fields + */ +typedef struct kvm_cepic_irq { + u32 vector; + u32 delivery_mode; + u32 trig_mode; + u32 shorthand; + u32 dest_id; +} kvm_cepic_irq_t; + +typedef struct kvm_mem_alias { + unsigned long alias_start; + unsigned long target_start; + gfn_t alias_base_gfn; + unsigned long npages; + u32 target_slot; +} kvm_mem_alias_t; + +typedef struct kvm_kernel_shadow { + unsigned long kernel_start; + unsigned long shadow_start; + unsigned long area_size; + u32 alias_slot; +} kvm_kernel_shadow_t; + +struct user_area_t; + +typedef struct kvm_mem_guest { + struct user_area *area; + kvm_guest_mem_type_t type; /* type of memory: RAM, VRAM */ +} kvm_mem_guest_t; + +/* + * Delivery modes of Virtual IRQs (see field 'flags' below) + */ +#define DIRECT_INJ_VIRQ_FLAG 0x0010UL /* direct injection of VIRQ */ + /* to VCPU process */ +typedef struct kvm_guest_virq { + int virq_id; /* VIRQ number */ + atomic_t *count; /* pointer to atomic counter */ + /* of unhandled VIRQs */ + unsigned long flags; /* delivery mode and other flags of */ + /* virtual IRQ (see above) */ + struct kvm_vcpu *vcpu; /* Virtual guest CPU to handle VIRQ */ + struct task_struct *host_task; /* host task structure of VIRQ */ + int stop_handler; /* VIRQ handler should be stopped */ +} kvm_guest_virq_t; + +/* + * Context that is saved and restored by software when + * switching from hypervisor to guest or vice versa. + */ +typedef struct kvm_sw_cpu_context { + int osem; + bool in_hypercall; + + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + + struct to_save { + bool valid; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + } saved; + + /* + * Host VCPU local data stack pointer registers state (to save/restore). + * It is relevant only for paravirtualization, since in this case + * there is one VCPU process, but there are two mode of its execution: + * as host part of VCPU (qemu) + * as guest part of VCPU + * and, accordingly, two stacks: host & guest. + */ + e2k_usd_lo_t host_usd_lo; + e2k_usd_hi_t host_usd_hi; + e2k_sbr_t host_sbr; + + e2k_mem_crs_t crs; /* only for PV guest */ + + /* + * TODO here goes stuff that can be not switched + * on hypercalls if we do not support calling QEMU from them + */ + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + + e2k_upsr_t upsr; + +#ifdef CONFIG_GREGS_CONTEXT + /* + * Guest has own global registers context (vcpu) different + * from QEMU (host). + * The VCPU (guest) context should be restored while vcpu_load() + * after saving the host (QEMU) context and be saved while vcpu_put() + * before restoring the host (QEMU) context. + * Only one current VCPU or host context should be saved and restored + * while switch to/from other processes. + */ + global_regs_t vcpu_gregs; + global_regs_t host_gregs; + kernel_gregs_t vcpu_k_gregs; + kernel_gregs_t host_k_gregs; + host_gregs_t vcpu_h_gregs; + host_gregs_t host_h_gregs; +#endif /* CONFIG_GREGS_CONTEXT */ + + e2k_cutd_t cutd; + + /* guest (hypervisor shadow) user page table bases: */ + mmu_reg_t sh_u_pptb; /* physical */ + mmu_reg_t sh_u_vptb; /* and virtual */ + mmu_reg_t tc_hpa; /* host physical base of VCPU */ + /* trap cellar */ + mmu_reg_t trap_count; + + e2k_dibcr_t dibcr; + e2k_ddbcr_t ddbcr; + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + u64 dimar0; + u64 dimar1; + u64 ddmar0; + u64 ddmar1; + u64 dibar0; + u64 dibar1; + u64 dibar2; + u64 dibar3; + u64 ddbar0; + u64 ddbar1; + u64 ddbar2; + u64 ddbar3; + +#ifdef CONFIG_USE_AAU + e2k_aau_t aau_context; +#endif + + u64 cs_lo; + u64 cs_hi; + u64 ds_lo; + u64 ds_hi; + u64 es_lo; + u64 es_hi; + u64 fs_lo; + u64 fs_hi; + u64 gs_lo; + u64 gs_hi; + u64 ss_lo; + u64 ss_hi; + u64 rpr_lo; + u64 rpr_hi; + u64 tcd; + +#ifdef CONFIG_CLW_ENABLE + mmu_reg_t us_cl_d; + clw_reg_t us_cl_b; + clw_reg_t us_cl_up; + clw_reg_t us_cl_m0; + clw_reg_t us_cl_m1; + clw_reg_t us_cl_m2; + clw_reg_t us_cl_m3; +#endif +} kvm_sw_cpu_context_t; + +/* + * Context that is saved and restored by software _only_ for + * interceptions when switching from hypervisor to guest or vice versa. + */ +typedef struct kvm_intc_cpu_context { + e2k_ctpr_t ctpr1; + e2k_ctpr_t ctpr2; + e2k_ctpr_t ctpr3; + e2k_ctpr_hi_t ctpr1_hi; + e2k_ctpr_hi_t ctpr2_hi; + e2k_ctpr_hi_t ctpr3_hi; + u64 lsr; + u64 ilcr; + u64 lsr1; + u64 ilcr1; + + int cu_num, mu_num; + int cur_mu; /* # of current MMU intercept in handling */ + intc_info_cu_t cu; + intc_info_mu_t mu[INTC_INFO_MU_ITEM_MAX]; + intc_mu_state_t mu_state[INTC_INFO_MU_ITEM_MAX]; + bool mu_updated; /* the mu info was updated, so need restote */ + /* on the registers */ + bool cu_updated; /* the cu info was updated, so need restore */ + /* on the registers */ + s8 nr_TIRs; + u64 exceptions; /* source mask of all exceptions in the TIRs */ + /* at the interception moment */ + /* should be the same as INTC_INFO_CU.hi.exc */ + u64 exc_to_create; /* mask of the new exceptions to add at TIRs */ + u64 exc_to_delete; /* mask of the exceptions to delete from TIRs */ + u64 exc_to_pass; /* mask of the exceptions from source */ + /* TIRs exceptions to pass to the guest */ + gva_t exc_IP_to_create; /* IP to create exceptions like instruction */ + /* page faults */ + e2k_tir_t TIRs[TIR_NUM]; + u64 sbbp[SBBP_ENTRIES_NUM]; + u64 intc_mu_to_move; + u64 cu_entry_handled; +} kvm_intc_cpu_context_t; + +struct kvm_epic_page; + +/* + * here goes context that is: + * - saved and restored by hardware when switching + * from hypervisor to guest or vice versa. + * - belongs only to guest and isn't used by hypervisor + * + * Then this context is: + * 1) Initialized by hypervisor in init_hw_ctxt() + * 2) Written to actual registers for the first time + * in write_hw_ctxt_to_registers() + * 3) Switched in save/restore_kvm_context_v6() + */ +typedef struct kvm_hw_cpu_context { + e2k_psp_lo_t sh_psp_lo; + e2k_psp_hi_t sh_psp_hi; + e2k_pcsp_lo_t sh_pcsp_lo; + e2k_pcsp_hi_t sh_pcsp_hi; + e2k_pshtp_t sh_pshtp; + e2k_pcshtp_t sh_pcshtp; + e2k_wd_t sh_wd; + e2k_psp_lo_t bu_psp_lo; + e2k_psp_hi_t bu_psp_hi; + e2k_pcsp_lo_t bu_pcsp_lo; + e2k_pcsp_hi_t bu_pcsp_hi; + + mmu_reg_t sh_mmu_cr; + mmu_reg_t sh_pid; + mmu_reg_t sh_os_pptb; + mmu_reg_t gp_pptb; + mmu_reg_t sh_os_vptb; + mmu_reg_t sh_os_vab; + mmu_reg_t gid; + + e2k_oscud_lo_t sh_oscud_lo; + e2k_oscud_hi_t sh_oscud_hi; + e2k_osgd_lo_t sh_osgd_lo; + e2k_osgd_hi_t sh_osgd_hi; + e2k_cutd_t sh_oscutd; + e2k_cuir_t sh_oscuir; + u64 sh_osr0; + e2k_core_mode_t sh_core_mode; + + virt_ctrl_cu_t virt_ctrl_cu; + virt_ctrl_mu_t virt_ctrl_mu; + mmu_reg_t g_w_imask_mmu_cr; + + struct kvm_epic_page *cepic; + + /* Secondary space registers */ + u64 u2_pptb; + u64 pid2; + u64 mpt_b; + u64 pci_l_b; + u64 ph_h_b; + u64 ph_hi_l_b; + u64 ph_hi_h_b; + u64 pat; + u64 pdpte0; + u64 pdpte1; + u64 pdpte2; + u64 pdpte3; +} kvm_hw_cpu_context_t; + +/* + * The structure needs only for paravitualized guest mode: + * !vcpu->arch.is_hv && vcpu->arch.is_pv + * + * Hypervisor context which should be different for host VCPU thread (qemu) + * and guest VCPUs threads to allow running the VCPU kernel thread as + * multi-threading. + * The one VCPY kernel thread is running the one of the following processes: + * the host application qemu (or some other to control VM); + * the guest kernel threads; + * the guest user threads; + * This structure is to save/restore and switch from/to the one host VCPU thread + * and many guest threads. + * If the host thread is now active then the thread info contains this active + * context and vcpu->arch.host_ctxt contains context of last running guest + * thread. + * If the guest thread is active then the thread_info contains context of this + * active guest thread and the structure vcpu->arch.host_ctxt contains context + * of the host thread. + * Contexts of not active guest threads were saved/restored at the such + * structures into its gthread_info + */ +typedef struct kvm_host_context { + /* the task->stack ponter */ + unsigned long *stack; + /* the host kernel's stack of local data */ + /* one for host thread and one common for all guest threads */ + e2k_usd_hi_t k_usd_hi; + e2k_usd_lo_t k_usd_lo; + e2k_sbr_t k_sbr; + /* the host kernel's hardware stacks */ + /* one for host thread and one common for all guest threads */ + e2k_psp_lo_t k_psp_lo; + e2k_psp_hi_t k_psp_hi; + e2k_pcsp_lo_t k_pcsp_lo; + e2k_pcsp_hi_t k_pcsp_hi; + + /* pointer to the top of 'pt_regs' structures list */ + pt_regs_t *pt_regs; + /* some additional items of processes context */ + e2k_upsr_t upsr; /* user UPSR register state */ + unsigned osem; /* OSEM register state */ + /* the host kernel's signal/trap stack of contexts */ + kvm_signal_context_t signal; +} kvm_host_context_t; + +#ifdef CONFIG_KVM_ASYNC_PF +struct kvm_apf_ready { + struct list_head link; + u32 apf_id; +}; +#endif /* CONFIG_KVM_ASYNC_PF */ + +struct kvm_vcpu_arch { + kvm_sw_cpu_context_t sw_ctxt; + kvm_intc_cpu_context_t intc_ctxt; + kvm_hw_cpu_context_t hw_ctxt; + kvm_host_context_t host_ctxt; + + int launched; + int last_exit; + bool is_pv; /* VCPU is under paravirtualized */ + /* support */ + bool is_hv; /* VCPU is under hardware virtualized */ + /* support */ + /* host switch to vcpu-host mode from host interception emulation mode */ + /* (trap or system call on PV mode) */ + bool from_pv_intc; + + kvm_vcpu_state_t *vcpu_state; + kvm_vcpu_state_t *kmap_vcpu_state; /* alias of VCPU state */ + /* mapped into kernel VM */ + /* space */ + e2k_cute_t *guest_cut; + e2k_addr_t guest_phys_base; /* guest image (kernel) physical base */ + char *guest_base; /* guest image (kernel) virtual base */ + e2k_size_t guest_size; /* guest image (kernel) size */ + e2k_size_t trap_offset; /* guest trap table #0 entry offset */ + /* from guest image base */ + char *trap_entry; /* guest trap table #0 base as entry */ + /* to paravirtualized trap handler */ + + bu_hw_stack_t hypv_backup; /* backup hardware stacks */ + vcpu_boot_stack_t boot_stacks; /* guest boot-time stacks */ + guest_hw_stack_t guest_stacks; /* guest hardware stacks state */ + /* to emulate harware supported */ + /* HCALLs */ + gthread_info_t *gti; /* host agent of current active guest */ + /* thread/process */ + + /* + * Paging state of the vcpu + * This context is always used to handle faults. + */ + kvm_mmu_t mmu; + + /* + * Pointer to the mmu context currently used for + * gva_to_gpa translations. + */ + kvm_mmu_t *walk_mmu; + + kvm_mmu_memory_cache_t mmu_pte_list_desc_cache; + kvm_mmu_memory_cache_t mmu_page_cache; + kvm_mmu_memory_cache_t mmu_page_header_cache; + + /* FIXME: Cache MMIO info is not fully implemented */ + u64 mmio_gva; + unsigned access; + gfn_t mmio_gfn; + u64 mmio_gen; + u64 mmio_data[1]; + u64 __user *mmio_user_data; + intc_info_mu_t *io_intc_info; + + /* + * Indicate whether the access faults on its page table in guest + * which is set when fix page fault and used to detect unhandeable + * instruction. + * FIXME: it is field from x86 arch, so does it need for e2k??? + */ + bool write_fault_to_shadow_pgtable; + + u64 apic_base; + struct kvm_lapic *apic; /* kernel irqchip context */ + int32_t apic_arb_prio; + + /* Software KVM CEPIC model */ + u64 epic_base; + struct kvm_cepic *epic; + + /* Hardware guest CEPIC support */ + raw_spinlock_t epic_dat_lock; /* lock to update dam_active */ + bool epic_dat_active; + struct hrtimer cepic_idle; + ktime_t cepic_idle_start_time; + + int mp_state; + int sipi_vector; + struct task_struct *guest_task; /* guest task */ + struct task_struct *host_task; /* host task: main VCPU host */ + /* or VIRQ VCPU host for VIRQ VCPUs */ + struct mutex lock; /* host and guest part of VCPU */ + /* including VIRQ VCPUs */ + /* synchronization */ + struct list_head vcpus_to_spin; /* list of VCPUs to support boot-time */ + /* spin lock/unlock */ + bool unhalted; /* VCPU was woken up by pv_kick */ + bool halted; /* VCPU is halted */ + bool on_idle; /* VCPU is on idle waiting for some */ + /* events for guest */ + bool on_spinlock; /* VCPU is on slow spinlock waiting */ + bool on_csd_lock; /* VCPU is waiting for csd unlocking */ + /* (IPI completion) */ + bool should_stop; /* guest VCPU thread should be */ + /* stopped and completed */ + bool virq_wish; /* trap 'last wish' is injection to */ + /* pass pending VIRQs to guest */ + bool virq_injected; /* interrupt is injected to handle */ + /* pending VIRQs by guest */ + bool on_virqs_handling; /* VCPU is handling pending VIRQs */ + bool vm_exit_wish; /* VCPU is need to VM exit and */ + /* exit reason handling */ + bool trap_wish; /* VCPU is need to inject traps */ + bool hcall_irqs_disabled; /* VCPU entered HCALL with disabled interrupts */ + unsigned long trap_mask_wish; /* mask of traps to wish */ + struct completion exited; /* guest VCPU thread completed */ + struct completion released; /* all VCPU threads completed and */ + /* VCPU can be freed */ + struct hrtimer hrt; /* local timer of VCPU */ + int hrt_virq_no; /* number of VIRQ of local timer */ + long hrt_period; /* period of hr timer */ + long hrt_running_start; /* VCPU running time when timer */ + /* was started */ + + char *entry_point; /* startup point of guest image */ + int args_num; /* arguments number to pass to guest */ + u64 args[KVM_VCPU_MAX_GUEST_ARGS]; /* arguments to pass */ + + /* Exit data for guest */ + uint32_t exit_reason; + kvm_ioport_req_t ioport; /* IO port access (in()/out()) */ + void *ioport_data; /* pointer to IO port data at */ + /* kvm_run page (now middle) */ + int64_t ioport_data_size; /* max size of IO port data area */ + uint32_t notifier_io; /* IO request notifier */ + + bool in_exit_req; /* VCPU is waiting for exit */ + /* request completion */ + /* exit request in progress */ + struct completion exit_req_done; /* exit request is completed */ + + struct list_head exit_reqs_list; /* exit requests list head */ + /* used only on main VCPU */ + struct list_head exit_req; /* the VCPU exit request */ + raw_spinlock_t exit_reqs_lock; /* to lock list of exit */ + /* requests */ + + struct work_struct dump_work; /* to schedule work to dump */ + /* guest VCPU state */ + + u8 event_exit_inst_len; + + uint32_t exit_shutdown_terminate; + +#ifdef CONFIG_KVM_ASYNC_PF + struct { + bool enabled; + struct gfn_to_hva_cache reason_gpa; /* hva of guest per-cpu */ + /* pv_apf_event.apf_reason */ + struct gfn_to_hva_cache id_gpa; /* hva of guest per-cpu */ + /* pv_apf_event.apf_id */ + u32 cnt; /* Counter of async pf */ + /* events on this vcpu. */ + u32 host_apf_reason; /* Reason for async pf: */ + /* page in swap or page ready. */ + u32 apf_ready_vector; /* Irq vector number to notify */ + /* that page is ready */ + u32 irq_controller; /* Type of irq controller to use */ + /* to notify guest that page is ready */ + bool in_pm; /* Is privilidged mode intercepted? */ + } apf; +#endif /* CONFIG_KVM_ASYNC_PF */ + + int node_id; + int hard_cpu_id; +}; + +#ifdef CONFIG_KVM_HV_MMU +typedef struct kvm_lpage_info { + int disallow_lpage; +} kvm_lpage_info_t; + +typedef struct kvm_arch_memory_slot { + kvm_rmap_head_t *rmap[KVM_NR_PAGE_SIZES]; + kvm_lpage_info_t *lpage_info[KVM_NR_PAGE_SIZES - 1]; + kvm_mem_guest_t guest_areas; + unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; +} kvm_arch_memory_slot_t; +#else /* ! CONFIG_KVM_HV_MMU */ +struct kvm_lpage_info { + int write_count; +}; + +struct kvm_arch_memory_slot { + unsigned long *rmap; + struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; + kvm_mem_guest_t guest_areas; +}; + +extern struct file_operations kvm_vm_fops; +#endif /* CONFIG_KVM_HV_MMU */ + +/* + * e2k-arch vcpu->requests bit members + */ +#define KVM_REQ_TRIPLE_FAULT 10 /* FIXME: not implemented */ +#define KVM_REQ_MMU_SYNC 11 /* FIXME: not implemented */ +#define KVM_REQ_PENDING_VIRQS 16 /* there are unhandled VIRQs */ + /* to inject on VCPU */ +#define KVM_REG_SHOW_STATE 17 /* bit should be cleared */ + /* after show state of VCPU */ + /* completion */ +#define KVM_REQ_KICK 18 /* VCPU should be kicked */ +#define KVM_REQ_VIRQS_INJECTED 20 /* pending VIRQs injected */ +#define KVM_REQ_SCAN_IOAPIC 23 /* scan IO-APIC */ +#define KVM_REQ_SCAN_IOEPIC 24 /* scan IO-EPIC */ + +#define kvm_set_pending_virqs(vcpu) \ + set_bit(KVM_REQ_PENDING_VIRQS, (void *)&vcpu->requests) +#define kvm_test_and_clear_pending_virqs(vcpu) \ + test_and_clear_bit(KVM_REQ_PENDING_VIRQS, \ + (void *)&vcpu->requests) +#define kvm_clear_pending_virqs(vcpu) \ + clear_bit(KVM_REQ_PENDING_VIRQS, (void *)&vcpu->requests) +#define kvm_test_pending_virqs(vcpu) \ + test_bit(KVM_REQ_PENDING_VIRQS, (const void *)&vcpu->requests) +#define kvm_set_virqs_injected(vcpu) \ + set_bit(KVM_REQ_VIRQS_INJECTED, (void *)&vcpu->requests) +#define kvm_test_and_clear_virqs_injected(vcpu) \ + test_and_clear_bit(KVM_REQ_VIRQS_INJECTED, \ + (void *)&vcpu->requests) +#define kvm_clear_virqs_injected(vcpu) \ + clear_bit(KVM_REQ_VIRQS_INJECTED, (void *)&vcpu->requests) +#define kvm_test_virqs_injected(vcpu) \ + test_bit(KVM_REQ_VIRQS_INJECTED, (void *)&vcpu->requests) +#define kvm_start_vcpu_show_state(vcpu) \ + test_and_set_bit(KVM_REG_SHOW_STATE, (void *)&vcpu->requests) +#define kvm_complete_vcpu_show_state(vcpu) \ +do { \ + if (test_and_clear_bit(KVM_REG_SHOW_STATE, (void *)&vcpu->requests)) \ + wake_up_bit((void *)&vcpu->requests, KVM_REG_SHOW_STATE); \ +} while (false) + +struct kvm_irq_mask_notifier { + void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); + int irq; + struct hlist_node link; +}; + +typedef const pt_struct_t * (*get_pt_struct_func_t)(struct kvm *kvm); +typedef const pt_struct_t * (*get_vcpu_pt_struct_func_t)(struct kvm_vcpu *vcpu); + +struct irq_remap_table { + bool enabled; + unsigned int host_pin; + unsigned int guest_pin; + int host_node; + int guest_node; + /* IOEPIC passthrough page start */ + hpa_t hpa; + gpa_t gpa; + struct pci_dev *vfio_dev; +}; + +struct kvm_arch { + unsigned long vm_type; /* virtual machine type */ + unsigned long flags; + int naliases; + int nshadows; + kvm_nid_t vmid; /* VM ID */ + unsigned int bsp_vcpu_id; + bool is_pv; /* paravirtualized VM */ + bool is_hv; /* hardware virtualized VM */ + bool shadow_pt_enable; /* shadow PT is supported and is base of */ + /* guest MMU emulation */ + bool phys_pt_enable; /* guest physical addresses PT is supported */ + /* by MMU and hypervisor */ + bool tdp_enable; /* two dimensional paging is supported */ + /* by hardware MMU and hypervisor */ + bool shadow_pt_set_up; /* shadow PT was set up, skip setup on other VCPUs */ + kvm_mem_alias_t aliases[KVM_ALIAS_SLOTS]; + kvm_kernel_shadow_t shadows[KVM_SHADOW_SLOTS]; + kvm_nidmap_t gpid_nidmap[GPIDMAP_ENTRIES]; + struct hlist_head gpid_hash[GPID_HASH_SIZE]; + kvm_gpid_table_t gpid_table; + struct kmem_cache *gti_cachep; + char gti_cache_name[32]; + kvm_nidmap_t gmmid_nidmap[GMMIDMAP_ENTRIES]; + struct hlist_head gmmid_hash[GMMID_HASH_SIZE]; + gmmid_table_t gmmid_table; + gmm_struct_t *init_gmm; /* host agent of guest kernel mm */ + + /* host page table structure to support guest MMU and PTs can be */ + /* different in common case */ + const pt_struct_t *host_pt_struct; /* abstractions for details */ + /* of the host page table structure */ + const pt_struct_t *guest_pt_struct; /* abstractions for details */ + /* of the guest page table structure */ + const pt_struct_t *gp_pt_struct; /* abstractions for details */ + /* of the guest physical page table */ + /* structure, if is enable */ + get_pt_struct_func_t get_host_pt_struct; + get_vcpu_pt_struct_func_t get_vcpu_pt_struct; + get_pt_struct_func_t get_gp_pt_struct; + +#ifdef CONFIG_KVM_HV_MMU + /* MMU nonpaging mode */ + hpa_t nonp_root_hpa; /* physical base of nonpaging root PT */ + /* MMU pages statistic */ + unsigned int n_used_mmu_pages; + unsigned int n_requested_mmu_pages; + unsigned int n_max_mmu_pages; + unsigned int indirect_shadow_pages; + unsigned long mmu_valid_gen; + struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; + + /* Hash table of struct kvm_mmu_page. */ + struct list_head active_mmu_pages; + struct list_head zapped_obsolete_pages; + struct kvm_page_track_notifier_node mmu_sp_tracker; + struct kvm_page_track_notifier_head track_notifier_head; +#endif /* CONFIG_KVM_HV_MMU */ + + kvm_host_info_t *host_info; /* host machine and kernel INFO */ + kvm_host_info_t *kmap_host_info; /* host machine and kernel INFO */ + /* mapped to kernel space */ + kvm_guest_info_t guest_info; /* guest machine and kernel INFO */ + raw_spinlock_t time_state_lock; /* lock to update VM time */ + bool is_epic; /* 0 - paravirt LAPIC/IO-APIC model */ + /* 1 - paravirt CEPIC/IO-EPIC model */ + struct kvm_ioapic *vioapic[KVM_MAX_EIOHUB_NUM]; + struct kvm_ioepic *ioepic[KVM_MAX_EIOHUB_NUM]; + int vapics_in_nmi_mode; + struct page *epic_pages; /* HW CEPIC support */ + struct list_head assigned_dev_head; + struct iommu_domain *iommu_domain; + struct irq_remap_table *irt; + unsigned long irq_sources_bitmap; + struct kvm_nbsr *nbsr; + struct kvm_lt *lt[KVM_MAX_EIOHUB_NUM]; + struct kvm_spmc *spmc[KVM_MAX_EIOHUB_NUM]; + + /* boot-time spinlocks manage */ + struct hlist_head boot_spinlock_hash[BOOT_SPINLOCK_HASH_SIZE]; + boot_spinlock_unlocked_t boot_spinunlocked_list[ + BOOT_SPINUNLOCKED_LIST_SIZE]; + struct list_head boot_spinunlocked_head; + struct list_head boot_spinunlocked_free; + struct list_head boot_spinunlocked_wait; + raw_spinlock_t boot_spinlock_hash_lock; + bool boot_spinlock_hash_disable; + +/* TODO memory leak!!! */ + /* spinlocks manage */ + struct hlist_head spinlock_hash[SPINLOCK_HASH_SIZE]; + spinlock_unlocked_t spinunlocked_list[SPINUNLOCKED_LIST_SIZE]; + struct list_head spinunlocked_head; + struct list_head spinunlocked_free; + struct list_head spinunlocked_wait; + raw_spinlock_t spinlock_hash_lock; + bool spinlock_hash_disable; + + /* VIRQ manage */ + raw_spinlock_t virq_lock; + kvm_guest_virq_t guest_virq[KVM_MAX_NR_VIRQS]; + int max_irq_no; /* max number of IRQ (from 0) */ +#ifdef CONFIG_SMP + /* guest CSD lock wait management on host */ + raw_spinlock_t csd_spinlock; + struct list_head csd_lock_wait_head; /* head of list of waiters */ + struct list_head csd_lock_free_head; /* head of list of free */ + /* structures */ + csd_lock_waiter_t csd_lock_free_list[KVM_MAX_CSD_LOCK_FREE_NUM]; +#endif /* CONFIG_SMP */ + + /* reads protected by irq_srcu, writes by irq_lock */ + struct hlist_head irq_ack_notifier_list; + + struct hlist_head mask_notifier_list; + + bool halted; /* VM is halted */ + /* sign of reboot VM, true - reboot */ + bool reboot; + + /* lock to update num_sclkr_run and common sh_sclkm3 + * for all vcpu-s of the guest */ + raw_spinlock_t sh_sclkr_lock; + int num_sclkr_run; + s64 sh_sclkm3; + unsigned int num_numa_nodes; + unsigned int max_nr_node_cpu; + + /* CEPIC timer frequency (Hz) */ + unsigned long cepic_freq; + + /* Multiplier for watchdog timer prescaler (allows to slow down + * its frequency) */ + unsigned long wd_prescaler_mult; + + /* Directly map legacy VGA area (0xa0000-0xbffff) to guest */ + bool legacy_vga_passthrough; +}; + +static inline bool kvm_has_passthrough_device(const struct kvm_arch *kvm) +{ + if (!kvm->irt) + return false; + return kvm->irt->vfio_dev != NULL; +} + +#ifdef CONFIG_KVM_ASYNC_PF + +/* Async page fault event descriptor */ +struct kvm_arch_async_pf { + u32 apf_id; /* Unique identifier of async page fault event */ +}; + +#endif /* CONFIG_KVM_ASYNC_PF */ + +#define arch_to_vcpu(arch_vcpu) container_of(arch_vcpu, struct kvm_vcpu, arch) + +/* + * KVM arch-dependent flags + */ +#define KVMF_PARAVIRT_GUEST 0 /* guest kernel is paravirtualized */ + /* and has shadow image address */ +#define KVMF_VCPU_STARTED 1 /* VCPUs (one or more) is started */ + /* VM real active */ +#define KVMF_IN_SHOW_STATE 8 /* show state of KVM (print all */ + /* stacks) is in progress */ +#define KVMF_NATIVE_KERNEL 32 /* guest is running native */ + /* e2k linux kernel */ +#define KVMF_PARAVIRT_KERNEL 33 /* guest is running paravirtualized */ + /* e2k linux kernel */ +#define KVMF_LINTEL 40 /* guest is running LIntel */ +#define KVMF_PARAVIRT_GUEST_MASK (1UL << KVMF_PARAVIRT_GUEST) +#define KVMF_VCPU_STARTED_MASK (1UL << KVMF_VCPU_STARTED) +#define KVMF_IN_SHOW_STATE_MASK (1UL << KVMF_IN_SHOW_STATE) +#define KVMF_NATIVE_KERNEL_MASK (1UL << KVMF_NATIVE_KERNEL) +#define KVMF_PARAVIRT_KERNEL_MASK (1UL << KVMF_PARAVIRT_KERNEL) +#define KVMF_LINTEL_MASK (1UL << KVMF_LINTEL) + +#define set_kvm_mode_flag(kvm, flag) \ +({ \ + set_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define clear_kvm_mode_flag(kvm, flag) \ +({ \ + clear_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define test_and_set_kvm_mode_flag(kvm, flag) \ +({ \ + test_and_set_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define test_and_clear_kvm_mode_flag(kvm, flag) \ +({ \ + test_and_clear_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define test_kvm_mode_flag(kvm, flag) \ +({ \ + test_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define kvm_clear_vcpu(kvm, vcpu_no) \ +({ \ + (kvm)->vcpus[vcpu_no] = NULL; \ + smp_wmb(); \ +}) + +struct kvm_e2k_info { + struct module *module; +}; + +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) +{ + /* nothing to do */ +} + +/* + * Shadow page tables root flags to get/set/update/alloc/free + */ +#define U_ROOT_PT_BIT 0 /* user shadow PT root */ +#define OS_ROOT_PT_BIT 1 /* kernel (OS) PT root */ +#define GP_ROOT_PT_BIT 2 /* hypervisor PT root to translate */ + /* guest physical addresses */ +#define SEP_VIRT_ROOT_PT_BIT 3 /* separate virtual spaces mode */ +#define DONT_SYNC_ROOT_PT_BIT 4 /* do not sync shadow PT root */ + +#define U_ROOT_PT_FLAG (1U << U_ROOT_PT_BIT) +#define OS_ROOT_PT_FLAG (1U << OS_ROOT_PT_BIT) +#define GP_ROOT_PT_FLAG (1U << GP_ROOT_PT_BIT) +#define SEP_VIRT_ROOT_PT_FLAG (1U << SEP_VIRT_ROOT_PT_BIT) +#define DONT_SYNC_ROOT_PT_FLAG (1U << DONT_SYNC_ROOT_PT_BIT) + +#define KVM_ARCH_WANT_MMU_NOTIFIER + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned flags); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + +extern void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); + +extern int kvm_wake_up_vcpu_host(struct kvm_vcpu *vcpu, int wait); +extern struct kvm_vcpu *kvm_get_vcpu_on_id(struct kvm *kvm, int vcpu_id); +extern struct kvm_vcpu *kvm_get_vcpu_on_hard_cpu_id(struct kvm *kvm, + int hard_cpu_id); +extern bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); + +extern void kvm_make_scan_ioapic_request(struct kvm *kvm); + +#ifdef CONFIG_KVM_ASYNC_PF +extern void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); +extern void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); +#endif /* CONFIG_KVM_ASYNC_PF */ + +#if defined(CONFIG_KVM_HW_VIRTUALIZATION) && \ + !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is hypervisor or host with virtualization support */ +extern void kvm_hv_epic_load(struct kvm_vcpu *vcpu); +extern void kvm_epic_invalidate_dat(struct kvm_vcpu_arch *vcpu); +extern void kvm_epic_enable_int(void); +extern void kvm_epic_timer_start(void); +extern void kvm_epic_timer_stop(bool skip_check); +extern void kvm_deliver_cepic_epic_interrupt(void); +extern void kvm_epic_vcpu_blocking(struct kvm_vcpu_arch *vcpu); +extern void kvm_epic_vcpu_unblocking(struct kvm_vcpu_arch *vcpu); +extern void kvm_init_cepic_idle_timer(struct kvm_vcpu *vcpu); + +#define VCPU_IDLE_TIMEOUT 1 +extern void kvm_epic_start_idle_timer(struct kvm_vcpu *vcpu); +extern void kvm_epic_stop_idle_timer(struct kvm_vcpu *vcpu); + +#else /* ! CONFIG_KVM_HW_VIRTUALIZATION || CONFIG_KVM_GUEST_KERNEL */ +/* it is host without virtualization support */ +/* or native paravirtualized guest */ +static inline void kvm_hv_epic_load(struct kvm_vcpu *vcpu) { } +static inline void kvm_epic_invalidate_dat(struct kvm_vcpu_arch *vcpu) { } +static inline void kvm_epic_enable_int(void) { } +static inline void kvm_epic_vcpu_blocking(struct kvm_vcpu_arch *vcpu) { } +static inline void kvm_epic_vcpu_unblocking(struct kvm_vcpu_arch *vcpu) { } +static inline void kvm_epic_timer_start(void) { } +static inline void kvm_epic_timer_stop(bool skip_check) { } +static inline void kvm_deliver_cepic_epic_interrupt(void) { } +static inline void kvm_init_cepic_idle_timer(struct kvm_vcpu *vcpu) { } +static inline void kvm_epic_start_idle_timer(struct kvm_vcpu *vcpu) { } +static inline void kvm_epic_stop_idle_timer(struct kvm_vcpu *vcpu) { } +#endif /* CONFIG_KVM_HW_VIRTUALIZATION && !CONFIG_KVM_GUEST_KERNEL */ + +extern struct work_struct kvm_dump_stacks; +extern void wait_for_print_all_guest_stacks(struct work_struct *work); + +#endif /* _ASM_E2K_KVM_HOST_H */ diff --git a/arch/e2k/include/asm/l-iommu.h b/arch/e2k/include/asm/l-iommu.h new file mode 100644 index 000000000000..a251ba41e13d --- /dev/null +++ b/arch/e2k/include/asm/l-iommu.h @@ -0,0 +1,160 @@ +#ifndef _E2K_IOMMU_H +#define _E2K_IOMMU_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + + +#ifdef CONFIG_PM_SLEEP +extern void l_iommu_stop_all(void); +#endif + +#define L_IOMMU_CTRL SIC_iommu_ctrl +#define L_IOMMU_FLUSH_ALL SIC_iommu_flush +#define L_IOMMU_FLUSH_ADDR SIC_iommu_flushP +#define L_IOMMU_ERROR SIC_iommu_err +#define L_IOMMU_ERROR1 SIC_iommu_err1 + +#define IO_PAGE_SHIFT 12 + +#define IOMMU_TABLES_NR 2 +#define IOMMU_LOW_TABLE 0 +#define IOMMU_HIGH_TABLE 1 + +typedef struct { unsigned iopte; } iopte_t; + +#define iopte_val(x) ((x).iopte) + + +#define MIN_IOMMU_WINSIZE (4*1024*1024*1024UL) +#define MAX_IOMMU_WINSIZE (512*1024*1024*1024UL) +#define DFLT_IOMMU_WINSIZE (4*1024*1024*1024UL) + +#define IOPTE_PAGE_MASK 0xfffffff0 +#define IOPTE_CACHE 0x00000004 /* Cached */ +#define IOPTE_STP_PREF_IOPTE 0x00000004 /* stop prefetch iopte */ +#define IOPTE_WRITE 0x00000001 /* Writeable */ +#define IOPTE_VALID 0x00000002 /* IOPTE is valid */ + +#define pa_to_iopte(addr) (((unsigned long)(addr) >> 8) & IOPTE_PAGE_MASK) +#define iopte_to_pa(iopte) (((unsigned long)(iopte) & IOPTE_PAGE_MASK) << 8) + + +#define addr_to_flush(__a) ((__a) >> IO_PAGE_SHIFT) + +static inline void __l_iommu_write(unsigned node, u32 val, unsigned long addr) +{ + sic_write_node_iolink_nbsr_reg(node, 0, addr, val); +} + +static inline u32 l_iommu_read(unsigned node, unsigned long addr) +{ + return sic_read_node_iolink_nbsr_reg(node, 0, addr); +} + +#define __l_iommu_set_ba __l_iommu_set_ba +static inline void __l_iommu_set_ba(unsigned node, unsigned long *ba) +{ + __l_iommu_write(node, pa_to_iopte(ba[IOMMU_LOW_TABLE]), + SIC_iommu_ba_lo); + __l_iommu_write(node, pa_to_iopte(ba[IOMMU_HIGH_TABLE]), + SIC_iommu_ba_hi); +} + +#define l_prefetch_iopte_supported l_prefetch_iopte_supported +static inline int l_prefetch_iopte_supported(void) +{ + return (int)machine.native_iset_ver >= ELBRUS_8C2_ISET; +} + +static inline void l_prefetch_iopte(iopte_t *iopte, int prefetch) +{ + if (prefetch) + iopte_val(iopte[0]) &= ~IOPTE_STP_PREF_IOPTE; + else + iopte_val(iopte[0]) |= IOPTE_STP_PREF_IOPTE; +} + +static inline void *l_iommu_map_table(unsigned long pa, unsigned long size) +{ + phys_addr_t start = pa; + pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); + struct page **pages; + phys_addr_t page_start; + unsigned int page_count; + unsigned int i; + void *vaddr; + + if (!cpu_has(CPU_HWBUG_IOMMU)) + return __va(pa); + + page_start = start - offset_in_page(start); + page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); + + pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return NULL; + + for (i = 0; i < page_count; i++) { + phys_addr_t addr = page_start + i * PAGE_SIZE; + pages[i] = pfn_to_page(addr >> PAGE_SHIFT); + } + + vaddr = vmap(pages, page_count, VM_MAP, prot); + kfree(pages); + + return vaddr; +} + +static inline void *l_iommu_unmap_table(void *va) +{ + void *p; + if (!cpu_has(CPU_HWBUG_IOMMU)) + return va; + p = page_address(vmalloc_to_page(va)) + offset_in_page(va); + vunmap(va); + return p; +} + +static inline int l_iommu_get_table(unsigned long iova) +{ + return iova & (~0UL << 32) ? IOMMU_HIGH_TABLE : IOMMU_LOW_TABLE; +} + +#define boot_l_iommu_supported() BOOT_HAS_MACHINE_L_IOMMU +#define l_iommu_supported() HAS_MACHINE_L_IOMMU + +extern int l_iommu_no_numa_bug; +extern int l_iommu_force_numa_bug_on; +extern unsigned long l_iommu_win_sz; + +#define l_iommu_has_numa_bug() (l_iommu_force_numa_bug_on || \ + (nr_online_nodes > 1 && l_iommu_no_numa_bug == 0 && \ + cpu_has(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE))) + +#define L_PGSIZE_BITMAP SZ_4K + +/* software MMU support */ + +#define E2K_SWIOTLB_DEFAULT_SIZE (64 * 1024 * 1024) +#define E2K_SWIOTLB_MIN_SIZE (8 * 1024 * 1024) +#define L_SWIOTLB_DEFAULT_SIZE E2K_SWIOTLB_DEFAULT_SIZE +#define L_SWIOTLB_MIN_SIZE E2K_SWIOTLB_MIN_SIZE + +#define SWIOTLB_POOL_DEFAULT_NUM 64 +#define SWIOTLB_ISA_POOL_DEFAULT_NUM 16 +#define SWIOTLB_POOL_DEFAULT_SIZE (SWIOTLB_POOL_DEFAULT_NUM * PAGE_SIZE) +#define SWIOTLB_ISA_POOL_DEFAULT_SIZE \ + (SWIOTLB_ISA_POOL_DEFAULT_NUM * PAGE_SIZE) + +#endif /* !(_E2K_IOMMU_H) */ diff --git a/arch/e2k/include/asm/l-mcmonitor.h b/arch/e2k/include/asm/l-mcmonitor.h new file mode 100644 index 000000000000..e69eacff6637 --- /dev/null +++ b/arch/e2k/include/asm/l-mcmonitor.h @@ -0,0 +1,87 @@ +#ifndef _E2K_L_MCMONITOR_H_ +#define _E2K_L_MCMONITOR_H_ + +#include +#include + +typedef e2k_mc_ecc_struct_t l_mc_ecc_struct_t; + +static inline u32 l_mc_get_error_cnt(l_mc_ecc_struct_t *ecc, int node, + int nr) +{ + ecc->E2K_MC_ECC_reg = sic_get_mc_ecc(node, nr); + return ecc->E2K_MC_ECC_secnt; +} + +static inline char *l_mc_get_error_str(l_mc_ecc_struct_t *ecc, int nr, + char *error_msg, int error_msg_len) +{ + snprintf(error_msg, error_msg_len, + "MC%d_ECC=0x%x (ee=%d dmode=%d of=%d ue=%d secnt=%d)", + nr, + ecc->E2K_MC_ECC_reg, + ecc->E2K_MC_ECC_ee, + ecc->E2K_MC_ECC_dmode, + ecc->E2K_MC_ECC_of, + ecc->E2K_MC_ECC_ue, ecc->E2K_MC_ECC_secnt); + return error_msg; +} + +static inline bool l_mcmonitor_eec_enabled(void) +{ + l_mc_ecc_struct_t ecc; + ecc.E2K_MC_ECC_reg = sic_get_mc_ecc(0, 0); + return ecc.E2K_MC_ECC_ee; +} + + +#define l_mcmonitor_supported() HAS_MACHINE_L_SIC + +/* CC handles 32 bytes at a time */ +#define L_MC_ECC_WORDS_NR 4 +#define L_MCMONITOR_TEST_SIZE (256 * L_MC_ECC_WORDS_NR) + +static inline void l_mcmonitor_fill_data(u64 *a, bool make_error) +{ + int i, mc = SIC_MC_COUNT; + int sz = L_MCMONITOR_TEST_SIZE / L_MC_ECC_WORDS_NR / sizeof(*a); + e2k_mc_ecc_struct_t mc_ecc[SIC_MAX_MC_COUNT]; + a = (void *)__pa(a); + + for (i = 0; i < mc; i++) + mc_ecc[i].E2K_MC_ECC_reg = sic_get_mc_ecc(0, i); + + for (i = 0; i < mc; i++) { + l_mc_ecc_struct_t e = mc_ecc[i]; + e.E2K_MC_ECC_dmode = 1; + sic_set_mc_ecc(0, i, e.E2K_MC_ECC_reg); + } + mb(); + + for (i = 0; i < sz; i++, a += L_MC_ECC_WORDS_NR) { + int j; + u64 d = 0; + for (j = 0; j < L_MC_ECC_WORDS_NR; j++) { + u64 v = d; + if (j == 0 && make_error) + v |= (1UL << (i % 64)); + boot_writeq(v, a + j); + } + mb(); + } + + for (i = 0; i < mc; i++) + sic_set_mc_ecc(0, i, mc_ecc[i].E2K_MC_ECC_reg); + mb(); +} + +static inline int l_mcmonitor_cmp(u64 *a) +{ + int i; + for (i = 0; i < L_MCMONITOR_TEST_SIZE / sizeof(*a); i++) { + if (a[i] != 0) + return -EFAULT; + } + return 0; +} +#endif /* _E2K_L_MCMONITOR_H_ */ diff --git a/arch/e2k/include/asm/l_ide.h b/arch/e2k/include/asm/l_ide.h new file mode 100644 index 000000000000..bc61de7c53c8 --- /dev/null +++ b/arch/e2k/include/asm/l_ide.h @@ -0,0 +1,6 @@ +#ifndef _ARCH_IDE_H_ +#define _ARCH_IDE_H_ + +#include + +#endif /*_ARCH_IDE_H_*/ diff --git a/arch/e2k/include/asm/l_pmc.h b/arch/e2k/include/asm/l_pmc.h new file mode 100644 index 000000000000..4e1e78bfa0d4 --- /dev/null +++ b/arch/e2k/include/asm/l_pmc.h @@ -0,0 +1,18 @@ +#pragma once + +/* Available working frequencies (in kHz) */ +#define PMC_L_FREQUENCY_1 1000000 +#define PMC_L_FREQUENCY_2 400000 +#define PMC_L_FREQUENCY_3 200000 +#define PMC_L_FREQUENCY_4 143000 + +/* PMC registers */ +#define PMC_L_COVFID_STATUS_REG 0x0 +#define PMC_L_P_STATE_CNTRL_REG 0x8 +#define PMC_L_P_STATE_STATUS_REG 0xc +#define PMC_L_P_STATE_VALUE_0_REG 0x10 +#define PMC_L_P_STATE_VALUE_1_REG 0x14 +#define PMC_L_P_STATE_VALUE_2_REG 0x18 +#define PMC_L_P_STATE_VALUE_3_REG 0x1c + +#include diff --git a/arch/e2k/include/asm/l_spmc.h b/arch/e2k/include/asm/l_spmc.h new file mode 100644 index 000000000000..44346115f6e2 --- /dev/null +++ b/arch/e2k/include/asm/l_spmc.h @@ -0,0 +1,6 @@ +#ifndef _ARCH_SPMC_H_ +#define _ARCH_SPMC_H_ + +#include + +#endif /*_ARCH_SPMC_H_*/ diff --git a/arch/e2k/include/asm/l_timer.h b/arch/e2k/include/asm/l_timer.h new file mode 100644 index 000000000000..750e237fa713 --- /dev/null +++ b/arch/e2k/include/asm/l_timer.h @@ -0,0 +1,14 @@ +#ifndef _ASM_L_TIMER_H +#define _ASM_L_TIMER_H + +#ifdef __KERNEL__ + +#include +#include + +#define L_TIMER_IS_ALLOWED() (HAS_MACHINE_E2K_IOHUB || IS_HV_GM()) + +#include + +#endif /* __KERNEL__ */ +#endif /* _ASM_L_TIMER_H */ diff --git a/arch/e2k/include/asm/l_timer_regs.h b/arch/e2k/include/asm/l_timer_regs.h new file mode 100644 index 000000000000..4c0f8cdcd7eb --- /dev/null +++ b/arch/e2k/include/asm/l_timer_regs.h @@ -0,0 +1,107 @@ +#ifndef _L_ASM_L_TIMER_REGS_H +#define _L_ASM_L_TIMER_REGS_H + +#include + +/* + * Elbrus System timer Registers (litlle endian) + */ + +typedef struct counter_limit_fields { + u32 unused : 9; /* [8:0] */ + u32 c_l : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_limit_fields_t; +typedef union counter_limit { + u32 word; + counter_limit_fields_t fields; +} counter_limit_t; +typedef struct counter_st_v_fields { + u32 unused : 9; /* [8:0] */ + u32 c_st_v : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_st_v_fields_t; +typedef union counter_st_v { + u32 word; + counter_st_v_fields_t fields; +} counter_st_v_t; +typedef struct counter_fields { + u32 unused : 9; /* [8:0] */ + u32 c : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_fields_t; +typedef union counter { + u32 word; + counter_fields_t fields; +} counter_t; +typedef struct counter_control_fields { + u32 s_s : 1; /* [0] */ + u32 inv_l : 1; /* [1] */ + u32 l_ini : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} counter_control_fields_t; +typedef union counter_control { + u32 word; + counter_control_fields_t fields; +} counter_control_t; +typedef struct wd_counter_l_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_l_fields_t; +typedef union wd_counter_l { + u32 word; + wd_counter_l_fields_t fields; +} wd_counter_l_t; +typedef struct wd_counter_h_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_h_fields_t; +typedef union wd_counter_h { + u32 word; + wd_counter_h_fields_t fields; +} wd_counter_h_t; +typedef struct wd_limit_fields { + u32 wd_l : 32; /* [31:0] */ +} wd_limit_fields_t; +typedef union wd_limit { + u32 word; + wd_limit_fields_t fields; +} wd_limit_t; +typedef struct power_counter_l_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_l_fields_t; +typedef union power_counter_l { + u32 word; + power_counter_l_fields_t fields; +} power_counter_l_t; +typedef struct power_counter_h_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_h_fields_t; +typedef union power_counter_h { + u32 word; + power_counter_h_fields_t fields; +} power_counter_h_t; +typedef struct wd_control_fields { + u32 w_m : 1; /* [0] */ + u32 w_out_e : 1; /* [1] */ + u32 w_evn : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} wd_control_fields_t; +typedef union wd_control { + u32 word; + wd_control_fields_t fields; +} wd_control_t; +typedef struct reset_counter_l_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_l_fields_t; +typedef union reset_counter_l { + u32 word; + reset_counter_l_fields_t fields; +} reset_counter_l_t; +typedef struct reset_counter_h_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_h_fields_t; +typedef union reset_counter_h { + u32 word; + reset_counter_h_fields_t fields; +} reset_counter_h_t; + +#endif /* _L_ASM_L_TIMER_REGS_H */ diff --git a/arch/e2k/include/asm/linkage.h b/arch/e2k/include/asm/linkage.h new file mode 100644 index 000000000000..186440a868b9 --- /dev/null +++ b/arch/e2k/include/asm/linkage.h @@ -0,0 +1,7 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 0 +#define __ALIGN_STR ".align 0" + +#endif diff --git a/arch/e2k/include/asm/linux_logo.h b/arch/e2k/include/asm/linux_logo.h new file mode 100644 index 000000000000..80315478820b --- /dev/null +++ b/arch/e2k/include/asm/linux_logo.h @@ -0,0 +1,48 @@ +/* $Id: linux_logo.h,v 1.1 2001/05/16 13:33:12 anonymous Exp $ + * include/asm-e2k/linux_logo.h: This is a linux logo + * to be displayed on boot. + * + * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu) + * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * + * You can put anything here, but: + * LINUX_LOGO_COLORS has to be less than 224 + * image size has to be 80x80 + * values have to start from 0x20 + * (i.e. RGB(linux_logo_red[0], + * linux_logo_green[0], + * linux_logo_blue[0]) is color 0x20) + * BW image has to be 80x80 as well, with MS bit + * on the left + * Serial_console ascii image can be any size, + * but should contain %s to display the version + */ + +#include +#include + +#define linux_logo_banner "Linux/E2K version " UTS_RELEASE + +#define LINUX_LOGO_COLORS 214 + +#ifdef INCLUDE_LINUX_LOGO_DATA + +#define INCLUDE_LINUX_LOGOBW +#define INCLUDE_LINUX_LOGO16 + +#include + +#else + +/* prototypes only */ +extern unsigned char linux_logo_red[]; +extern unsigned char linux_logo_green[]; +extern unsigned char linux_logo_blue[]; +extern unsigned char linux_logo[]; +extern unsigned char linux_logo_bw[]; +extern unsigned char linux_logo16_red[]; +extern unsigned char linux_logo16_green[]; +extern unsigned char linux_logo16_blue[]; +extern unsigned char linux_logo16[]; + +#endif diff --git a/arch/e2k/include/asm/local.h b/arch/e2k/include/asm/local.h new file mode 100644 index 000000000000..003157c8182c --- /dev/null +++ b/arch/e2k/include/asm/local.h @@ -0,0 +1,175 @@ +#pragma once + +#include +#include +#include + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + */ + +/* Use relaxed atomics if they are available */ +#if CONFIG_CPU_ISET >= 5 + +# include + +/* Default implementation uses READ_ONCE and WRITE_ONCE + * which are too slow on e2k because of "volatile" */ +# undef __local_inc +# undef __local_dec +# undef __local_add +# undef __local_sub + +#else + +typedef struct { long counter; } __attribute__ ((aligned)) local_t; + +# define LOCAL_INIT(i) { (i) } + +# define local_read(l) ((l)->counter) +# define local_set(l, i) (((l)->counter) = (i)) + +/* + * local_add - add long to local variable + * @i: long value to add + * @l: pointer of type local_t + */ +static inline void local_add(long i, local_t *l) +{ + unsigned long flags; + + raw_all_irq_save(flags); + l->counter += i; + raw_all_irq_restore(flags); +} + +/* + * local_sub - sub long from local variable + * @i: long value to sub + * @l: pointer of type local_t + */ +static inline void local_sub(long i, local_t *l) +{ + unsigned long flags; + + raw_all_irq_save(flags); + l->counter -= i; + raw_all_irq_restore(flags); +} + +# define local_inc(l) local_add(1,l) +# define local_dec(l) local_sub(1,l) + +/* + * local_add_return - add long to local variable and return the result + * @i: long value to add + * @l: pointer of type local_t + */ +static inline long local_add_return(long i, local_t *l) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + l->counter += i; + result = l->counter; + raw_all_irq_restore(flags); + + return result; +} + +/* + * local_sub_return - sub long from local variable and return the result + * @i: long value to sub + * @l: pointer of type local_t + */ +static inline long local_sub_return(long i, local_t *l) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + l->counter -= i; + result = l->counter; + raw_all_irq_restore(flags); + + return result; +} + +# define local_add_negative(i, l) (local_add_return(i,l) < 0) +# define local_sub_and_test(i, l) (local_sub_return(i,l) == 0) +# define local_inc_and_test(l) (local_add_return(1,l) == 0) +# define local_dec_and_test(l) (local_sub_return(1,l) == 0) + +static inline long local_cmpxchg(local_t *l, long o, long n) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + result = l->counter; + if (result == o) + l->counter = n; + raw_all_irq_restore(flags); + + return result; +} + +static inline long local_xchg(local_t *l, long n) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + result = l->counter; + l->counter = n; + raw_all_irq_restore(flags); + + return result; +} + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +static inline int local_add_unless(local_t *l, long a, long u) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + if (l->counter == u) { + result = 0; + } else { + l->counter += a; + result = 1; + } + raw_all_irq_restore(flags); + + return result; +} + +# define local_inc_return(l) local_add_return(1, l) +# define local_dec_return(l) local_sub_return(1, l) + +# define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +#endif + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. */ +#define __local_inc(l) ((l)->a.counter++) +#define __local_dec(l) ((l)->a.counter++) +#define __local_add(i, l) ((l)->a.counter += (i)) +#define __local_sub(i, l) ((l)->a.counter -= (i)) + +#ifdef CONFIG_HAVE_FTRACE_NMI_ENTER +# warning For the sake of atomicity nmie's must be disabled here along with ie's +#endif + diff --git a/arch/e2k/include/asm/machdep.h b/arch/e2k/include/asm/machdep.h new file mode 100644 index 000000000000..97a9ed91895f --- /dev/null +++ b/arch/e2k/include/asm/machdep.h @@ -0,0 +1,721 @@ +#ifndef _E2K_MACHDEP_H_ +#define _E2K_MACHDEP_H_ + +#include +#include + +#include + +#include +#include +#include +#include + +#ifdef __KERNEL__ + +struct cpuinfo_e2k; +struct pt_regs; +struct seq_file; +struct global_regs; +struct kernel_gregs; +struct local_gregs; +struct e2k_aau_context; +struct e2k_mlt; +struct kvm_vcpu_arch; +struct thread_info; +union e2k_dimtp; + +#include /* virtualization support */ + +typedef void (*restore_gregs_fn_t)(const struct global_regs *); +typedef void (*save_gregs_fn_t)(struct global_regs *); +typedef struct machdep { + int native_id; /* machine Id */ + int native_rev; /* cpu revision */ + e2k_iset_ver_t native_iset_ver; /* Instruction set version */ + bool cmdline_iset_ver; /* iset specified in cmdline */ + bool mmu_pt_v6; /* MMU is setting up to use */ + /* new page table structures */ + bool mmu_separate_pt; /* MMU was set to use */ + /* separate PTs for kernel */ + /* and users */ + bool L3_enable; /* cache L3 is enable */ + bool gmi; /* is hardware virtualized */ + /* guest VM */ + e2k_addr_t x86_io_area_base; + e2k_addr_t x86_io_area_size; + u8 max_nr_node_cpus; + u8 nr_node_cpus; + u8 node_iolinks; + e2k_addr_t pcicfg_area_phys_base; + e2k_size_t pcicfg_area_size; + e2k_addr_t nsr_area_phys_base; + e2k_size_t nbsr_area_offset; + e2k_size_t nbsr_area_size; + e2k_addr_t copsr_area_phys_base; + e2k_size_t copsr_area_size; + u8 mlt_size; + u8 tlb_lines_bits_num; + u64 tlb_addr_line_num; + u64 tlb_addr_line_num2; + u8 tlb_addr_line_num_shift2; + u8 tlb_addr_set_num; + u8 tlb_addr_set_num_shift; + e2k_size_t sic_mc_size; + u8 sic_mc_count; + u32 sic_mc1_ecc; + u32 sic_io_str1; + + unsigned long cpu_features[(NR_CPU_FEATURES + 63) / 64]; + + e2k_addr_t (*get_nsr_area_phys_base)(void); + void (*setup_apic_vector_handlers)(void); +#ifdef CONFIG_SMP + void (*clk_off)(void); + void (*clk_on)(int); +#endif + void (*C1_enter)(void); + void (*C3_enter)(void); + + /* Often used pointers are placed close to each other */ + + void (*save_kernel_gregs)(struct kernel_gregs *); + void (*save_gregs)(struct global_regs *); + void (*save_local_gregs)(struct local_gregs *, bool is_signal); + save_gregs_fn_t save_gregs_dirty_bgr; + restore_gregs_fn_t restore_gregs; + void (*save_gregs_on_mask)(struct global_regs *, bool dirty_bgr, + unsigned long not_save_gregs_mask); + void (*restore_local_gregs)(const struct local_gregs *, bool is_signal); + void (*restore_gregs_on_mask)(struct global_regs *, bool dirty_bgr, + unsigned long not_restore_gregs_mask); + + void (*save_dimtp)(union e2k_dimtp *); + void (*restore_dimtp)(const union e2k_dimtp *); + void (*clear_dimtp)(void); + + void (*save_kvm_context)(struct kvm_vcpu_arch *); + void (*restore_kvm_context)(const struct kvm_vcpu_arch *); + + void (*calculate_aau_aaldis_aaldas)(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); + void (*do_aau_fault)(int aa_field, struct pt_regs *regs); + void (*save_aaldi)(u64 *aaldis); + void (*get_aau_context)(struct e2k_aau_context *); + + unsigned long (*rrd)(int reg); + void (*rwd)(int reg, unsigned long value); + unsigned long (*boot_rrd)(int reg); + void (*boot_rwd)(int reg, unsigned long value); + + u64 (*get_cu_hw1)(void); + void (*set_cu_hw1)(u64); + +#ifdef CONFIG_MLT_STORAGE + void (*invalidate_MLT)(void); + void (*get_and_invalidate_MLT_context)(struct e2k_mlt *mlt_state); +#endif + + void (*flushts)(void); + + void (*setup_arch)(void); + void (*setup_cpu_info)(struct cpuinfo_e2k *c); + int (*show_cpuinfo)(struct seq_file *m, void *v); + void (*init_IRQ)(void); + + int (*set_wallclock)(unsigned long nowtime); + unsigned long (*get_wallclock)(void); + + void (*restart)(char *cmd); + void (*power_off)(void); + void (*halt)(void); + void (*arch_reset)(char *cmd); + void (*arch_halt)(void); + + int (*get_irq_vector)(void); + + /* virtualization support: guest kernel and host/hypervisor */ + host_machdep_t host; /* host additional fields (used only by */ + /* host at arch/e2k/kvm/xxx) */ + guest_machdep_t guest; /* guest additional fields (used only by */ + /* guest at arch/e2k/kvm/guest/xxx) */ +} machdep_t; + + +/* + * When executing on pure guest kernel, guest_cpu will be set to + * 'machine.guest.id', i.e. to what hardware guest *thinks* it's + * being executed on. + */ +typedef void (*cpuhas_initcall_t)(int cpu, int revision, int iset_ver, + int guest_cpu, struct machdep *machine); +extern cpuhas_initcall_t __cpuhas_initcalls[], __cpuhas_initcalls_end[]; + +/* + * feature = + * if ('is_static') + * 'static_cond' checked at build time; + * else + * 'dynamic_cond' checked in runtime; + */ +#ifndef BUILD_CPUHAS_INITIALIZERS +# define CPUHAS(feat, is_static, static_cond, dynamic_cond) \ + static const char feat##_is_static = !!(is_static); \ + static const char feat##_is_set_statically = !!(static_cond); + +#else /* #ifdef BUILD_CPUHAS_INITIALIZERS */ +# include +# define CPUHAS(feat, _is_static, static_cond, dynamic_cond) \ + __init \ + static void feat##_initializer(const int cpu, const int revision, \ + const int iset_ver, const int guest_cpu, \ + struct machdep *const machine) { \ + bool is_static = (_is_static); \ + if (is_static && (static_cond) || !is_static && (dynamic_cond)) \ + set_bit(feat, (machine)->cpu_features); \ + } \ + static cpuhas_initcall_t __cpuhas_initcall_##feat __used \ + __section(".cpuhas_initcall") = &feat##_initializer; +#endif + + +/* Most of these bugs are not emulated on simulator but + * set them anyway to make kernel running on a simulator + * behave in the same way as on real hardware. */ + +/* #47176 - Large pages do not work. + * Workaround - do not use them. */ +CPUHAS(CPU_HWBUG_LARGE_PAGES, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && revision < 1); +/* #56947 - lapic timer can lose interrupts. + * Workaround - do not use oneshot mode. */ +CPUHAS(CPU_HWBUG_LAPIC_TIMER, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && revision < 1); +/* #69194 - PIO reads can hang processor. + * Workaround - serialize PIO reads on every CPU. */ +CPUHAS(CPU_HWBUG_PIO_READS, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1); +/* #71610 - Atomic operations can be non-atomic + * Workaround - flush data cache line. + * This workaround increases the count of DCACHE flushes, + * Turmalin has hardware bug with flushes so don't use + * this workaround on it. */ +CPUHAS(CPU_HWBUG_ATOMIC, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL); +/* #58397, #76626 - CLW does not work. + * Workaround - do not use it. */ +CPUHAS(CPU_HWBUG_CLW, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1 || + cpu == IDR_E2S_MDL && revision == 0); +/* #76626 - "Page accessed" bit in PTE does not work. + * Workaround - always set it. */ +CPUHAS(CPU_HWBUG_PAGE_A, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1); +/* #78411 - Sometimes exc_illegal_instr_addr is generated + * instead of exc_instr_page_miss. + * Workaround - always return to user from exc_illegal_instr_addr. */ +CPUHAS(CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR, + !IS_ENABLED(CONFIG_CPU_E2S), + false, + cpu == IDR_E2S_MDL && revision <= 1); +/* #83160 - unaligned loads do not work + * Workaround - limit the stream of unaligned loads to less + * than 32 bytes per cycle and put "wait ld_c" before it. */ +CPUHAS(CPU_HWBUG_UNALIGNED_LOADS, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1) +/* # 83884 - es2 deadlocks on DMA to neighbour node. + * #100984 - e8c: DMA to neighbour node slows down. + * #136177 - no DMA through the links B and C. + * Workaround - allocate DMA buffers only in the device node. */ +CPUHAS(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E8C) && + !IS_ENABLED(CONFIG_CPU_E16C), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1 || + cpu == IDR_E8C_MDL && revision <= 2 || + cpu == IDR_E16C_MDL && revision == 0); +/* #83884 - es2 deadlock on DMA at + * (APIC_DEFAULT_PHYS_BASE & 0x7fffFFFF) address. + * Workaround - reserve the 4K page at this address. */ +CPUHAS(CPU_HWBUG_DMA_AT_APIC_ADDR, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL); +/* #88644 - data profiling events are lost if overflow happens + * under closed NM interrupts; also DDMCR writing does not clear + * pending exc_data_debug exceptions. + * Workaround - disable data monitor profiling in kernel. */ +CPUHAS(CPU_HWBUG_KERNEL_DATA_MONITOR, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL); +/* #89495 - write barrier does not work (even for atomics). + * Workaround - special command sequence after every read-acquire. */ +CPUHAS(CPU_HWBUG_WRITE_MEMORY_BARRIER, + !IS_ENABLED(CONFIG_CPU_E8C), + false, + cpu == IDR_E8C_MDL && revision <= 1); +/* #89653 - some hw counter won't reset, which may cause corruption of DMA. + * Workaround - reset machine until the counter sets in good value */ +CPUHAS(CPU_HWBUG_BAD_RESET, + !IS_ENABLED(CONFIG_CPU_E8C), + false, + cpu == IDR_E8C_MDL && revision <= 1); +/* #90514 - hardware hangs after modifying code with a breakpoint. + * Workaround - use HS.lng from the instruction being replaced. */ +CPUHAS(CPU_HWBUG_BREAKPOINT_INSTR, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E8C2_MDL); +/* #92834, #96516 - hang because of hardware problems. + * Workaround - boot activates watchdog, kernel should disable it */ +CPUHAS(CPU_HWBUG_E8C_WATCHDOG, + !IS_ENABLED(CONFIG_CPU_E8C), + false, + cpu == IDR_E8C_MDL && revision <= 1); +/* #94466 */ +CPUHAS(CPU_HWBUG_IOMMU, + !IS_ENABLED(CONFIG_CPU_E2S), + false, + cpu == IDR_E2S_MDL && revision <= 2); +/* #95860 - WC memory conflicts with DAM. + * Workaround - "wait st_c" between WC writes and cacheable loads */ +CPUHAS(CPU_HWBUG_WC_DAM, + !IS_ENABLED(CONFIG_CPU_E2S) && !IS_ENABLED(CONFIG_CPU_E8C) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_E2S_MDL && revision <= 2 || + cpu == IDR_E8C_MDL && revision <= 1 || + cpu == IDR_E8C2_MDL && revision == 0); +/* 96719 - combination of flags s_f=0, store=1, sru=1 is possible + * Workaround - treat it as s_f=1, store=1, sru=1 */ +CPUHAS(CPU_HWBUG_TRAP_CELLAR_S_F, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E8C2), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0); +/* #97594 - %cr1_lo.ss flag is lost if ext. interrupt arrives faster. + * Workaround - manually set %cr1_lo.ss again in interrupt handler */ +CPUHAS(CPU_HWBUG_SS, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL && revision <= 2 || + cpu == IDR_E8C_MDL && revision <= 2 || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0); +/* #99302 - %aaldv sometimes is not restored properly. + * Workaround - insert 'wait ma_c' barrier */ +CPUHAS(CPU_HWBUG_AAU_AALDV, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E8C2), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0); +/* #103223 - LAPIC does not send EoI to IO_APIC for level interrupts. + * Workaround - wait under closed interrupts until APIC_ISR clears */ +CPUHAS(CPU_HWBUG_LEVEL_EOI, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) || + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL); +/* #104865 - hardware might generate a false single step interrupt + * Workaround - clean frame 0 of PCS during the allocation */ +CPUHAS(CPU_HWBUG_FALSE_SS, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E1CP) || + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL && revision <= 2 || + cpu == IDR_E8C_MDL && revision <= 2 || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL); +/* #117649 - false exc_data_debug are generated based on _previous_ + * values in ld/st address registers. + * Workaround - forbid data breakpoint on the first 31 bytes + * (hardware prefetch works with 32 bytes blocks). */ +CPUHAS(CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C) && + !IS_ENABLED(CONFIG_CPU_E2C3), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) || + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL || + cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0); +/* #119084 - several TBL flushes in a row might fail to flush L1D. + * Workaround - insert "wait fl_c" immediately after every TLB flush */ +CPUHAS(CPU_HWBUG_TLB_FLUSH_L1D, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_E8C2_MDL); +/* #121311 - asynchronous entries in INTC_INFO_MU always have "pm" bit set. + * Workaround - use "pm" bit saved in guest's chain stack. */ +CPUHAS(CPU_HWBUG_GUEST_ASYNC_PM, + !IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3), + false, + cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0); +/* #122946 - conflict new interrupt while sync signal turning off. + * Workaround - wating for C0 after "wait int=1" */ +CPUHAS(CPU_HWBUG_E16C_SLEEP, + !IS_ENABLED(CONFIG_CPU_E16C), + false, + cpu == IDR_E16C_MDL && revision == 0); +/* #124206 - instruction buffer stops working + * Workaround - prepare %ctpr's in glaunch/trap handler entry; + * avoid rbranch in glaunch/trap handler entry and exit. */ +CPUHAS(CPU_HWBUG_L1I_STOPS_WORKING, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C) && + !IS_ENABLED(CONFIG_CPU_E2C3), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) || + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL || + cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0); +/* #124947 - CLW clearing by OS must be done on the same CPU that started the + * hardware clearing operation to avoid creating a stale L1 entry. + * Workaround - forbid migration until CLW clearing is finished in software. */ +CPUHAS(CPU_HWBUG_CLW_STALE_L1_ENTRY, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C), + IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) || + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E8C2_MDL || + cpu == IDR_E16C_MDL && revision == 0); +/* #125405 - CPU pipeline freeze feature conflicts with performance monitoring. + * Workaround - disable pipeline freeze when monitoring is enabled. + * + * Note (#132311): disable workaround on e16c.rev0/e2c3.rev0 since it conflicts + * with #134929 workaround. */ +CPUHAS(CPU_HWBUG_PIPELINE_FREEZE_MONITORS, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E16C) && + !IS_ENABLED(CONFIG_CPU_E2C3), + IS_ENABLED(CONFIG_CPU_E8C2) || IS_ENABLED(CONFIG_CPU_E12C), + cpu == IDR_E8C2_MDL || cpu == IDR_E12C_MDL || + cpu == IDR_E16C_MDL && revision > 0 || + cpu == IDR_E2C3_MDL && revision > 0); +/* #126587 - "wait ma_c=1" does not wait for all L2$ writebacks to complete + * when disabling CPU core with "wait trap=1" algorithm. + * Workaround - manually insert 66 NOPs before "wait trap=1" */ +CPUHAS(CPU_HWBUG_C3_WAIT_MA_C, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) || + IS_ENABLED(CONFIG_CPU_E1CP), + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E1CP_MDL); +/* #128127 - Intercepting SCLKM3 write does not prevent guest from writing it. + * Workaround - Update SH_SCLKM3 in intercept handler */ +CPUHAS(CPU_HWBUG_VIRT_SCLKM3_INTC, + !IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3), + false, + cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0); +/* #129848 - alignment of usd_hi write depends on current usd_lo.p + * Workaround - write usd_lo before usd_hi, while keeping 2 tact distance from sbr write. + * Valid sequences are: sbr, nop, usd.lo, usd.hi OR sbr, usd.lo, usd.hi, usd.lo */ +CPUHAS(CPU_HWBUG_USD_ALIGNMENT, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_ES2) || IS_ENABLED(CONFIG_CPU_E2S) || + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E1CP) || + IS_ENABLED(CONFIG_CPU_E8C2) || IS_ENABLED(CONFIG_CPU_E16C) || + IS_ENABLED(CONFIG_CPU_E2C3) || IS_ENABLED(CONFIG_CPU_E12C), + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL || + cpu == IDR_E16C_MDL || cpu == IDR_E2C3_MDL || + cpu == IDR_E12C_MDL); +/* #130039 - intercepting some specific sequences of call/return/setwd + * (that change WD.psize in a specific way) does not work. + * Workaround - avoid those sequences. */ +CPUHAS(CPU_HWBUG_VIRT_PSIZE_INTERCEPTION, + !IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3), + false, + cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0); + +/* #130066, #134351 - L1/L2 do not respect "lal"/"las"/"sas"/"st_rel" barriers. + * Workaround - do not use "las"/"sas"/"st_rel", and add 5 nops after "lal". + * #133605 - "lal"/"las"/"sas"/"sal" barriers do not work in certain conditions. + * Workaround - add {nop} before them. + * + * Note that #133605 workaround is split into two parts: + * CPU_NO_HWBUG_SOFT_WAIT - for e16c/e2c3 + * CPU_HWBUG_SOFT_WAIT_E8C2 - for e8c2 + * This is done because it is very convenient to merge #130066, #134351 + * and #133605 bugs workarounds together for e16c/e2c3. */ +CPUHAS(CPU_NO_HWBUG_SOFT_WAIT, + !IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3), + true, + !(cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0)); +CPUHAS(CPU_HWBUG_SOFT_WAIT_E8C2, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_E8C2_MDL); + +/* #132693 - C3 idle state does not work. + * Workaround - do not use it. */ +CPUHAS(CPU_HWBUG_C3, + !IS_ENABLED(CONFIG_CPU_E16C), + false, + cpu == IDR_E16C_MDL && revision == 0); + +/* + * Not bugs but features go here + */ + +/* On some processor's revisions writecombine memory + * in prefetchable PCI area is not allowed. */ +CPUHAS(CPU_FEAT_WC_PCI_PREFETCH, + !IS_ENABLED(CONFIG_CPU_ES2), + true, + !((cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL) && + revision == 0)); +/* #82499 - Instruction Cache must be handled carefully + * when flush_dc_line also flushes IC by physical address. */ +CPUHAS(CPU_FEAT_FLUSH_DC_IC, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 3, + iset_ver >= E2K_ISET_V3); +/* Rely on IDR instead of iset version to choose between APIC and EPIC. + * For guest we use it's own fake IDR so that we choose between APIC and + * EPIC based on what hardware guest *thinks* it's being executed on. */ +CPUHAS(CPU_FEAT_EPIC, + IS_ENABLED(CONFIG_E2K_MACHINE) && + !IS_ENABLED(CONFIG_KVM_GUEST_KERNEL), + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + guest_cpu != IDR_ES2_DSP_MDL && guest_cpu != IDR_ES2_RU_MDL && + guest_cpu != IDR_E2S_MDL && guest_cpu != IDR_E8C_MDL && + guest_cpu != IDR_E1CP_MDL && guest_cpu != IDR_E8C2_MDL); +/* Shows which user registers must be saved upon trap entry/exit */ +CPUHAS(CPU_FEAT_TRAP_V5, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET == 5, + iset_ver == E2K_ISET_V5); +CPUHAS(CPU_FEAT_TRAP_V6, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 6, + iset_ver >= E2K_ISET_V6); +/* QP registers: only since iset V5 */ +CPUHAS(CPU_FEAT_QPREG, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 5, + iset_ver >= E2K_ISET_V5); +/* Hardware prefetcher that resides in L2 and works on phys. addresses */ +CPUHAS(CPU_FEAT_HW_PREFETCHER, + IS_ENABLED(CONFIG_E2K_MACHINE), + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E16C) && + !IS_ENABLED(CONFIG_CPU_E2C3), + cpu != IDR_ES2_DSP_MDL && cpu != IDR_ES2_RU_MDL && + cpu != IDR_E2S_MDL && cpu != IDR_E8C_MDL && + cpu != IDR_E1CP_MDL && cpu != IDR_E8C2_MDL && + cpu != IDR_E16C_MDL && cpu != IDR_E2C3_MDL); +/* Optimized version of machine.iset check */ +CPUHAS(CPU_FEAT_ISET_V3, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 3, + iset_ver >= E2K_ISET_V3); +CPUHAS(CPU_FEAT_ISET_V5, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 5, + iset_ver >= E2K_ISET_V5); +CPUHAS(CPU_FEAT_ISET_V6, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 6, + iset_ver >= E2K_ISET_V6); + + +static inline unsigned long test_feature_dynamic(struct machdep *machine, int feature) +{ + unsigned long *addr = machine->cpu_features; + + return 1UL & (addr[feature / 64] >> (feature & 63)); +} + +#define test_feature(machine, feature) \ + ((feature##_is_static) ? \ + (feature##_is_set_statically) : \ + test_feature_dynamic(machine, feature)) + +#define boot_machine_has(machine_p, feature) \ + test_feature(machine_p, feature) +#define boot_cpu_has(feature) boot_machine_has(&boot_machine, feature) + +#ifdef CONFIG_BOOT_E2K +# define cpu_has(feature) test_feature(&machine, feature) +#elif defined(E2K_P2V) +# define cpu_has(feature) boot_cpu_has(feature) +#else +# define cpu_has(feature) test_feature(&machine, feature) +#endif + +/* Normally cpu_has() is passed symbolic name of feature (e.g. CPU_FEAT_*), + * use this one instead if only numeric value of feature is known. */ +#define cpu_has_by_value(feature) test_feature_dynamic(&machine, feature) + +extern void cpu_set_feature(struct machdep *machine, int feature); +extern void cpu_clear_feature(struct machdep *machine, int feature); + + +extern __nodedata machdep_t machine; +extern __nodedata pt_struct_t pgtable_struct; + +#define boot_machine (boot_get_vo_value(machine)) +#define boot_pgtable_struct ((pt_struct_t)boot_get_vo_value(pgtable_struct)) +#define boot_pgtable_struct_p boot_vp_to_pp(&pgtable_struct) + +#if CONFIG_CPU_ISET >= 6 +# define IS_CPU_ISET_V6() true +#elif CONFIG_CPU_ISET >= 1 +# define IS_CPU_ISET_V6() false +#elif CONFIG_CPU_ISET == 0 +# ifdef E2K_P2V +# define IS_CPU_ISET_V6() \ + (boot_machine.native_iset_ver >= E2K_ISET_V6) +# else /* ! E2K_P2V */ +# define IS_CPU_ISET_V6() \ + (machine.native_iset_ver >= E2K_ISET_V6) +# endif /* E2K_P2V */ +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, IS_CPU_ISET_V6 is defined dinamicaly" +# ifdef E2K_P2V +# define IS_CPU_ISET_V6() \ + (boot_machine.native_iset_ver >= E2K_ISET_V6) +# else /* ! E2K_P2V */ +# define IS_CPU_ISET_V6() \ + (machine.native_iset_ver >= E2K_ISET_V6) +# endif /* E2K_P2V */ +#endif /* CONFIG_CPU_ISET 0-6 */ + +#define IS_HV_GM() (machine.gmi) + +extern void save_kernel_gregs_v2(struct kernel_gregs *); +extern void save_kernel_gregs_v5(struct kernel_gregs *); +extern void save_gregs_v2(struct global_regs *); +extern void save_gregs_v5(struct global_regs *); +extern void save_local_gregs_v2(struct local_gregs *, bool is_signal); +extern void save_local_gregs_v5(struct local_gregs *, bool is_signal); +extern void save_gregs_dirty_bgr_v2(struct global_regs *); +extern void save_gregs_dirty_bgr_v5(struct global_regs *); +extern void save_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_save); +extern void save_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_save); +extern void restore_gregs_v2(const struct global_regs *); +extern void restore_gregs_v5(const struct global_regs *); +extern void restore_local_gregs_v2(const struct local_gregs *, bool is_signal); +extern void restore_local_gregs_v5(const struct local_gregs *, bool is_signal); +extern void restore_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_restore); +extern void restore_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_restore); +extern void save_dimtp_v6(union e2k_dimtp *); +extern void restore_dimtp_v6(const union e2k_dimtp *); +extern void clear_dimtp_v6(void); +extern void save_kvm_context_v6(struct kvm_vcpu_arch *); +extern void restore_kvm_context_v6(const struct kvm_vcpu_arch *); +extern void qpswitchd_sm(int); + +extern void calculate_aau_aaldis_aaldas_v2(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); +extern void calculate_aau_aaldis_aaldas_v5(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); +extern void calculate_aau_aaldis_aaldas_v6(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); +extern void do_aau_fault_v2(int aa_field, struct pt_regs *regs); +extern void do_aau_fault_v5(int aa_field, struct pt_regs *regs); +extern void do_aau_fault_v6(int aa_field, struct pt_regs *regs); +extern void save_aaldi_v2(u64 *aaldis); +extern void save_aaldi_v5(u64 *aaldis); +extern void get_aau_context_v2(struct e2k_aau_context *); +extern void get_aau_context_v5(struct e2k_aau_context *); + +extern void flushts_v3(void); + +extern unsigned long boot_native_read_IDR_reg_value(void); + +unsigned long rrd_v2(int); +unsigned long rrd_v3(int); +unsigned long rrd_v6(int); +void rwd_v2(int reg, unsigned long value); +void rwd_v3(int reg, unsigned long value); +void rwd_v6(int reg, unsigned long value); +unsigned long boot_rrd_v2(int); +unsigned long boot_rrd_v3(int); +unsigned long boot_rrd_v6(int); +void boot_rwd_v2(int reg, unsigned long value); +void boot_rwd_v3(int reg, unsigned long value); +void boot_rwd_v6(int reg, unsigned long value); + +/* Supported registers for machine->rrd()/rwd() */ +enum { + E2K_REG_CORE_MODE, + E2K_REG_HCEM, + E2K_REG_HCEB, + E2K_REG_OSCUTD, + E2K_REG_OSCUIR, +}; + +u64 native_get_cu_hw1_v2(void); +u64 native_get_cu_hw1_v5(void); +void native_set_cu_hw1_v2(u64); +void native_set_cu_hw1_v5(u64); + +void invalidate_MLT_v2(void); +void invalidate_MLT_v3(void); +void get_and_invalidate_MLT_context_v2(struct e2k_mlt *mlt_state); +void get_and_invalidate_MLT_context_v3(struct e2k_mlt *mlt_state); +void get_and_invalidate_MLT_context_v6(struct e2k_mlt *mlt_state); + +#ifdef CONFIG_SMP +void clock_off_v3(void); +void clock_on_v3(int cpu); +#endif + +void C1_enter_v2(void); +void C1_enter_v6(void); +void C3_enter_v3(void); +void C3_enter_v6(void); +#endif /* __KERNEL__ */ + +#endif /* _E2K_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/machdep_numa.h b/arch/e2k/include/asm/machdep_numa.h new file mode 100644 index 000000000000..b36e707e28e7 --- /dev/null +++ b/arch/e2k/include/asm/machdep_numa.h @@ -0,0 +1,23 @@ +#ifndef _E2K_MACHDEP_NUMA_H_ +#define _E2K_MACHDEP_NUMA_H_ + +#include +#include + +#ifdef CONFIG_NUMA +#define the_node_machine(nid) \ + ((machdep_t *)__va(vpa_to_pa( \ + node_kernel_va_to_pa(nid, &machine)))) +#define node_machine the_node_machine(numa_node_id()) +#define the_node_pgtable_struct(nid) \ + ((pt_struct_t *)__va(vpa_to_pa(node_kernel_va_to_pa(nid, \ + &pgtable_struct)))) +#define node_pgtable_struct the_node_pgtable_struct(numa_node_id()) +#else /* ! CONFIG_NUMA */ +#define the_node_machine(nid) (&machine) +#define node_machine the_node_machine(0) +#define the_node_pgtable_struct(nid) (&pgtable_struct) +#define node_pgtable_struct the_node_pgtable_struct(0) +#endif /* CONFIG_NUMA */ + +#endif diff --git a/arch/e2k/include/asm/mas.h b/arch/e2k/include/asm/mas.h new file mode 100644 index 000000000000..2481fbb08123 --- /dev/null +++ b/arch/e2k/include/asm/mas.h @@ -0,0 +1,156 @@ +#ifndef _E2K_MAS_H_ +#define _E2K_MAS_H_ + +#include + +#include + +#ifndef __ASSEMBLY__ + +/* new in iset v6 */ +typedef union { + struct { + u8 mod : 3; + u8 opc : 4; + u8 : 1; + } masf1; + struct { + u8 mod : 3; + u8 be : 1; + u8 m1 : 1 /* == 0 */; + u8 dc_ch : 2; + u8 : 1; + } masf2; + struct { + u8 mod : 3 /* == 3,7 */; + u8 be : 1; + u8 m1 : 1 /* == 1 */; + u8 m3 : 1; + u8 mt : 1; + u8 : 1; + } masf3; + struct { + u8 m2 : 2; + u8 ch1 : 1; + u8 be : 1; + u8 m1 : 1 /* == 1 */; + u8 dc_ch : 2; + u8 : 1; + } masf4; + u8 word; +} e2k_v6_mas_t; + +# define MAS_MT_0 0 +# define MAS_MT_1 1 + +# define MAS_STORE_RELEASE_V6(_mt) \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf3.mod = 3, \ + .masf3.be = 0, \ + .masf3.m1 = 1, \ + .masf3.m3 = 0, \ + .masf3.mt = (_mt) \ + }; \ + __mas.word; \ +}) + +# define MAS_LOAD_ACQUIRE_V6(_mt) \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf3.mod = 3, \ + .masf3.be = 0, \ + .masf3.m1 = 1, \ + .masf3.m3 = 0, \ + .masf3.mt = (_mt) \ + }; \ + __mas.word; \ +}) + +# define MAS_LOAD_ACQUIRE_V6(_mt) \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf3.mod = 3, \ + .masf3.be = 0, \ + .masf3.m1 = 1, \ + .masf3.m3 = 0, \ + .masf3.mt = (_mt) \ + }; \ + __mas.word; \ +}) + +/* Only ALC0 or ALC0/ALC2 for quadro */ +# define MAS_WATCH_FOR_MODIFICATION_V6 \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf4.m1 = 1, \ + .masf4.m2 = 1 \ + }; \ + __mas.word; \ +}) + +/* Note that 'root', 'spec' and 'store' must also be checked */ +static inline bool is_mas_secondary_lock_trap_on_store(unsigned int mas) +{ + return (mas & 3) == 1; +} + +/* Note that 'root', 'spec' and 'store' must also be checked */ +static inline bool is_mas_secondary_lock_trap_on_load_store(unsigned int mas) +{ + return (mas & 3) == 2; +} + +/* Note that 'chan', 'spec' and 'store' must also be checked */ +static inline bool is_mas_special_mmu_aau(unsigned int mas) +{ + return (mas & 7) == 7; +} + +/* mas is conflict check between ld and st */ +static inline bool is_mas_check(unsigned int mas) +{ + unsigned int m1 = (mas >> 3) & 0x2; + unsigned int big_endian = (mas >> 3) & 0x1; + unsigned int mod = mas & 0x7; + + return m1 == 0x0 && mod == 0x2 && !big_endian; +} + +/* mas is conflict check with unlock between ld and st */ +static inline bool is_mas_check_unlock(unsigned int mas) +{ + unsigned int m1 = (mas >> 3) & 0x2; + unsigned int big_endian = (mas >> 3) & 0x1; + unsigned int mod = mas & 0x7; + + return m1 == 0x0 && mod == 0x3 && !big_endian; +} + +/* mas is semi-speculative conflict lock check between ld and st */ +static inline bool is_mas_lock_check(unsigned int mas) +{ + unsigned int m1 = (mas >> 3) & 0x2; + unsigned int big_endian = (mas >> 3) & 0x1; + unsigned int mod = mas & 0x7; + unsigned int m2 = mas & 0x3; + + return (m1 == 0x0 && mod == 0x4 || m1 == 0x1 && m2 == 0x1) && + !big_endian; +} + +/* mas is speculative conflict lock check between ld and st */ +static inline bool is_mas_spec_lock_check(unsigned int mas) +{ + unsigned int m1 = (mas >> 3) & 0x2; + unsigned int big_endian = (mas >> 3) & 0x1; + unsigned int mod = mas & 0x7; + unsigned int m2 = mas & 0x3; + + return (m1 == 0x0 && mod == 0x7 || m1 == 0x1 && m2 == 0x3) && + !big_endian; +} + +#endif + +#endif /* _E2K_MAS_H_ */ diff --git a/arch/e2k/include/asm/mc146818rtc.h b/arch/e2k/include/asm/mc146818rtc.h new file mode 100644 index 000000000000..8e414ddc8a09 --- /dev/null +++ b/arch/e2k/include/asm/mc146818rtc.h @@ -0,0 +1,12 @@ +/* + * Machine dependent access functions for RTC registers. + */ +#ifndef _ASM_MC146818RTC_H +#define _ASM_MC146818RTC_H + +#include +#include + +#define RTC_IRQ 8 + +#endif /* _ASM_MC146818RTC_H */ diff --git a/arch/e2k/include/asm/mlt.h b/arch/e2k/include/asm/mlt.h new file mode 100644 index 000000000000..11f003dc0ce9 --- /dev/null +++ b/arch/e2k/include/asm/mlt.h @@ -0,0 +1,85 @@ +#ifndef _E2K_MLT_H_ +#define _E2K_MLT_H_ + +#include +#include +#include + + +#define NATIVE_MLT_SIZE (machine.mlt_size) +#define NATIVE_MAX_MLT_SIZE ES2_MLT_SIZE + +#define REG_MLT_N_SHIFT 7 +#define REG_MLT_DW_SHIFT 5 +#define REG_MLT_TYPE_SHIFT 0 + +#define REG_MLT_TYPE 5UL + + +typedef unsigned long e2k_mlt_line_t; + +typedef struct e2k_mlt_dw0_v2_fields +{ + e2k_mlt_line_t resc : 4; /* [3:0] */ + e2k_mlt_line_t mask : 8; /* [11:4] */ + e2k_mlt_line_t page : 28; /* [39:12]*/ + e2k_mlt_line_t opcod_size : 3; /* [42:40] */ + e2k_mlt_line_t rg : 8; /* [50:43] */ + e2k_mlt_line_t lock_mode : 1; /* [51] */ + e2k_mlt_line_t hit : 1; /* [52] */ + e2k_mlt_line_t val : 1; /* [53] */ + e2k_mlt_line_t unresolved : 10; /* [63:54] */ +} e2k_mlt_dw0_v2_fields_t; + +typedef struct e2k_mlt_dw0_v6_fields +{ + e2k_mlt_line_t val : 1; /* [0] */ + e2k_mlt_line_t hit : 1; /* [1] */ + e2k_mlt_line_t lock_mode : 1; /* [2] */ + e2k_mlt_line_t word_fst : 9; /* [11:3] */ + e2k_mlt_line_t page_fst : 36; /* [47:12]*/ + e2k_mlt_line_t mask_fst : 8; /* [55:48] */ + e2k_mlt_line_t rg : 8; /* [63:56] */ +} e2k_mlt_dw0_v6_fields_t; + +/* One reg (string) in MLT table */ +typedef struct e2k_mlt_entry { + union { + e2k_mlt_dw0_v2_fields_t v2_fields; + e2k_mlt_dw0_v6_fields_t v6_fields; + e2k_mlt_line_t word; + } dw0; + + union { + e2k_mlt_line_t word; + } dw1; + + union { + e2k_mlt_line_t word; + } dw2; +} e2k_mlt_entry_t; + +typedef struct e2k_mlt { + int num; /* number of entries in the MLT */ + e2k_mlt_entry_t mlt[NATIVE_MAX_MLT_SIZE]; /* valid MLT entries */ +} e2k_mlt_t; + +#define NATIVE_READ_MLT_REG(addr) \ + NATIVE_DO_READ_MAS(addr, MAS_MLT_REG, e2k_mlt_line_t, d, 2) + +typedef unsigned long e2k_dam_t; + +#define REG_DAM_N_SHIFT 7 +#define REG_DAM_TYPE_SHIFT 0 +#define REG_DAM_TYPE 4 + +#define NATIVE_READ_DAM_REG(addr) \ + NATIVE_DO_READ_MAS(addr, MAS_DAM_REG, e2k_dam_t, d, 2) + +#define NATIVE_SAVE_BINCO_REGS_FOR_PTRACE(regs) \ +do { \ + regs->rpr_lo = NATIVE_READ_RPR_LO_REG_VALUE(); \ + regs->rpr_hi = NATIVE_READ_RPR_HI_REG_VALUE(); \ +} while (0) + +#endif diff --git a/arch/e2k/include/asm/mm_hooks.h b/arch/e2k/include/asm/mm_hooks.h new file mode 100644 index 000000000000..d07df7b0ddce --- /dev/null +++ b/arch/e2k/include/asm/mm_hooks.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define no-op hooks to be included in asm/mmu_context.h + * for arch e2k. + */ +#ifndef _ASM_E2K_MM_HOOKS_H +#define _ASM_E2K_MM_HOOKS_H + +#include + +static inline void arch_unmap(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ + get_mm_notifier_locked(mm); +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, bool foreign) +{ + if (vma->vm_flags & VM_PRIVILEGED) { + /* We have only hardware and signal + * stacks in VM_PRIVILEGED area */ + if (execute) + return false; + + if (!test_ts_flag(TS_KERNEL_SYSCALL)) + return false; + } + + return true; +} +#endif /* _ASM_E2K_MM_HOOKS_H */ diff --git a/arch/e2k/include/asm/mman.h b/arch/e2k/include/asm/mman.h new file mode 100644 index 000000000000..1b3fd29aff54 --- /dev/null +++ b/arch/e2k/include/asm/mman.h @@ -0,0 +1,132 @@ +#ifndef _E2K_MMAN_H_ +#define _E2K_MMAN_H_ + +#include + +#include +#include + +#define MV_FLUSH 0x00000001 + +struct vm_area_struct; +struct file; +struct mm_struct; + +int make_all_vma_pages_valid(struct vm_area_struct *vma, int flags); +int make_vma_pages_valid(struct vm_area_struct *vma, + unsigned long start_addr, unsigned long end_addr); + +pte_t *get_user_address_pte(struct vm_area_struct *vma, e2k_addr_t address); + +int vm_munmap_notkillable(unsigned long start, size_t len); +unsigned long vm_mmap_notkillable(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset); + +typedef enum sma_mode { + SMA_RO, + SMA_RW, + SMA_NX, + SMA_X, + SMA_P, + SMA_NP, + SMA_WB_MT, + SMA_WC_MT, + SMA_UC_MT, +} sma_mode_t; + +int e2k_set_vmm_cui(struct mm_struct *mm, int cui, + unsigned long code_base, unsigned long code_end); + +#define VM_HW_STACK_COMMON_FLAGS (VM_PRIVILEGED | VM_DONTEXPAND) +#define VM_HW_STACK_PS_FLAGS (VM_HW_STACK_COMMON_FLAGS | VM_HW_STACK_PS) +#define VM_HW_STACK_PCS_FLAGS (VM_HW_STACK_COMMON_FLAGS | VM_HW_STACK_PCS) + +static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, + unsigned long pkey) +{ + unsigned long vm_flags; + unsigned long cui; + + /* Order of checks is important since + * 32BIT flag is set in protected mode */ + if (TASK_IS_PROTECTED(current)) + cui = GET_CUI_FROM_INT_PROT(prot); + else + cui = USER_CODES_UNPROT_INDEX(current); + + vm_flags = cui << VM_CUI_SHIFT; + + if (current_thread_info()->status & TS_MMAP_PRIVILEGED) + vm_flags |= VM_PRIVILEGED; + + if (current_thread_info()->status & TS_MMAP_PS) + vm_flags |= VM_HW_STACK_PS; + + if (current_thread_info()->status & TS_MMAP_PCS) + vm_flags |= VM_HW_STACK_PCS; + + if (current_thread_info()->status & TS_MMAP_SIGNAL_STACK) + vm_flags |= VM_SIGNAL_STACK; + + return vm_flags; +} +#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) + +static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) +{ + unsigned long page_prot; + + page_prot = vm_flags & VM_CUI; + + if (vm_flags & VM_PRIVILEGED) + page_prot = _PAGE_SET_PRIV(page_prot); + + return __pgprot(page_prot); +} +#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) + +static inline int arch_validate_prot(unsigned long prot, unsigned long addr) +{ + if (prot & PROT_CUI) + return 0; + return 1; +} +#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) + +static inline int arch_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + if (TASK_IS_BINCO(current) && + (!ADDR_IN_SS(addr) && ADDR_IN_SS(addr + len) || + ADDR_IN_SS(addr) && !ADDR_IN_SS(addr + len))) + return -EINVAL; + + return 0; +} +#define arch_mmap_check(addr, len, flags) arch_mmap_check(addr, len, flags) + +extern int no_writecombine; + +/* + * execute_mmu_operations() return values + */ +enum exec_mmu_ret { + /* Successfully executed, go to the next trap cellar record */ + EXEC_MMU_SUCCESS = 1, + /* Stop handling trap cellar and exit */ + EXEC_MMU_STOP, + /* Trap cellar record should be executed again */ + EXEC_MMU_REPEAT +}; +extern enum exec_mmu_ret execute_mmu_operations(trap_cellar_t *tcellar, + trap_cellar_t *next_tcellar, struct pt_regs *regs, + int zeroing, e2k_addr_t *addr, + bool (*is_spill_fill_recovery)(tc_cond_t cond, + e2k_addr_t address, bool s_f, + struct pt_regs *regs), + enum exec_mmu_ret (*calculate_rf_frame)(struct pt_regs *regs, + tc_cond_t cond, u64 **radr, + bool *load_to_rf)); + +#endif /* _E2K_MMAN_H_ */ diff --git a/arch/e2k/include/asm/mmu-regs-types-v2.h b/arch/e2k/include/asm/mmu-regs-types-v2.h new file mode 100644 index 000000000000..6bfbda13c2ba --- /dev/null +++ b/arch/e2k/include/asm/mmu-regs-types-v2.h @@ -0,0 +1,156 @@ +/* + * E2K ISET V2-V5 MMU structure and common definitions. + * + * Copyright 2018 (c) MCST, Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_MMU_REGS_TYPES_V2_H +#define _ASM_E2K_MMU_REGS_TYPES_V2_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V2-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +/* + * DTLB entry probe format + */ + +#define DTLB_ENTRY_ERROR_MASK_V2 0xbe00000000000000ULL +#define DTLB_ENTRY_PH_BOUND_V2 0x8000000000000000ULL +#define DTLB_ENTRY_ILLEGAL_PAGE_V2 0x4000000000000000ULL +#define DTLB_ENTRY_PAGE_MISS_V2 0x2000000000000000ULL +#define DTLB_ENTRY_PROBE_DISABLED_V2 0x0400000000000000ULL +#define DTLB_ENTRY_RES_BITS_V2 0x0200000000000000ULL +#define DTLB_ENTRY_MISS_LEVEL_MASK_V2 0x1800000000000000ULL + +#define DTLB_ENTRY_WR_V2 0x0000000000000002ULL +#define DTLB_ENTRY_NON_EX_U_S_V2 0x0000000000000004ULL +#define DTLB_ENTRY_PWT_V2 0x0000000000000008ULL +#define DTLB_ENTRY_PCD1_V2 0x0000000000000010ULL +#define DTLB_ENTRY_D_V2 0x0000000000000040ULL +#define DTLB_ENTRY_G_V2 0x0000000000000100ULL +#define DTLB_ENTRY_PCD2_V2 0x0000000000000200ULL +#define DTLB_ENTRY_NWA_V2 0x0000000000000400ULL +#define DTLB_ENTRY_PHA_V2 0x000000fffffff000ULL /* phys address */ +#define DTLB_ENTRY_VVA_V2 0x0000010000000000ULL /* VVA bit */ +#define DTLB_ENTRY_PV_V2 0x0000020000000000ULL +#define DTLB_ENTRY_INT_PR_NON_EX_V2 0x0000040000000000ULL +#define DTLB_ENTRY_INTL_RD_V2 0x0000200000000000ULL +#define DTLB_ENTRY_INTL_WR_V2 0x0000400000000000ULL +#define DTLB_ENTRY_WP_V2 0x0000800000000000ULL +#define DTLB_ENTRY_UC_V2 0x0001000000000000ULL + +/* MPT flags for 2/4Mb & 1Gb pages [46:45] */ +#define DTLB_ENTRY_MPT_FLAGS_V2 0x0000600000000000ULL + +#define DTLB_EP_RES_V2 0x0001ffffffffffffULL +#define DTLB_EP_FAULT_RES_V2 (~DTLB_EP_RES_V2) + +/* + * DTLB address probe result format + */ +#define PH_ADDR_AP_RES_V2 0x000000ffffffffffULL /* Physical address */ + /* normal result of */ + /* AP [39: 0] */ +#define DISABLE_AP_RES_V2 DISABLE_EP_RES_V2 /* AP diasble result */ + /* [62] */ +#define ILLEGAL_PAGE_AP_RES_V2 ILLEGAL_PAGE_EP_RES_V2 /* illegal page */ + /* [58] */ + +/* convert physical address to page frame number for DTLB */ +#define PA_TO_DTLB_ENTRY_PHA_V2(phys_addr) \ + (((e2k_addr_t)phys_addr) & DTLB_ENTRY_PHA_V2) +/* convert the page frame number from DTLB entry to physical address */ +#define DTLB_ENTRY_PHA_TO_PA_V2(dtlb_entry) \ + ((e2k_addr_t)(dtlb_entry) & DTLB_ENTRY_PHA_V2) + +static inline probe_entry_t +covert_uni_dtlb_flags_to_dtlb_val_v2(const uni_dtlb_t uni_flags) +{ + probe_entry_t dtlb_flags = 0; + + if (uni_flags & UNI_PAGE_WRITE) + dtlb_flags |= (DTLB_ENTRY_WR_V2); + if (uni_flags & UNI_PAGE_PRIV) + dtlb_flags |= (DTLB_ENTRY_PV_V2); + if (uni_flags & UNI_PAGE_VALID) + dtlb_flags |= (DTLB_ENTRY_VVA_V2); + if (uni_flags & UNI_PAGE_PROTECT) + dtlb_flags |= (DTLB_ENTRY_INT_PR_NON_EX_V2); + if (uni_flags & UNI_PAGE_GLOBAL) + dtlb_flags |= (DTLB_ENTRY_G_V2); + if (uni_flags & UNI_PAGE_DIRTY) + dtlb_flags |= (DTLB_ENTRY_D_V2); + if (uni_flags & UNI_PAGE_NWA) + dtlb_flags |= (DTLB_ENTRY_NWA_V2); + if (uni_flags & UNI_PAGE_MEM_TYPE) + dtlb_flags |= (DTLB_ENTRY_PCD1_V2 | DTLB_ENTRY_PCD2_V2 | + DTLB_ENTRY_PWT_V2); + if (uni_flags & UNI_PAGE_NON_EX) + dtlb_flags |= (DTLB_ENTRY_NON_EX_U_S_V2); + if (uni_flags & UNI_PAGE_PFN) + dtlb_flags |= (DTLB_ENTRY_PHA_V2); + if (uni_flags & UNI_PAGE_MEM_TYPE_MA) + dtlb_flags |= (DTLB_ENTRY_PCD1_V2 | DTLB_ENTRY_PCD2_V2 | + DTLB_ENTRY_PWT_V2); + if (uni_flags & UNI_PAGE_WRITE_INT) + dtlb_flags |= (DTLB_ENTRY_WP_V2); + if (uni_flags & UNI_PAGE_INTL_RD) + dtlb_flags |= (DTLB_ENTRY_INTL_RD_V2); + if (uni_flags & UNI_PAGE_INTL_WR) + dtlb_flags |= (DTLB_ENTRY_INTL_WR_V2); + if (uni_flags & UNI_DTLB_EP_RES) + dtlb_flags |= (DTLB_EP_RES_V2); + if (uni_flags & UNI_DTLB_PH_ADDR_AP_RES) + dtlb_flags |= (PH_ADDR_AP_RES_V2); + if (uni_flags & UNI_DTLB_ERROR_MASK) + dtlb_flags |= (DTLB_ENTRY_ERROR_MASK_V2); + if (uni_flags & UNI_DTLB_MISS_LEVEL) + dtlb_flags |= (DTLB_ENTRY_MISS_LEVEL_MASK_V2); + if (uni_flags & UNI_DTLB_SUCCESSFUL) + dtlb_flags |= (DTLB_ENTRY_PROBE_DISABLED_V2); + if (uni_flags & UNI_DTLB_RES_BITS) + dtlb_flags |= (DTLB_ENTRY_RES_BITS_V2); + + BUILD_BUG_ON(dtlb_flags == 0); + + return dtlb_flags; +} + +static inline probe_entry_t +fill_dtlb_val_v2_flags(const uni_dtlb_t uni_flags) +{ + return covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} +static inline probe_entry_t +get_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} +static inline bool +test_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return get_dtlb_val_v2_flags(dtlb_val, uni_flags) != 0; +} +static inline probe_entry_t +set_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val | covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} +static inline probe_entry_t +clear_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & ~covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_MMU_REGS_TYPES_V2_H */ diff --git a/arch/e2k/include/asm/mmu-regs-types-v6.h b/arch/e2k/include/asm/mmu-regs-types-v6.h new file mode 100644 index 000000000000..8dda520fe106 --- /dev/null +++ b/arch/e2k/include/asm/mmu-regs-types-v6.h @@ -0,0 +1,180 @@ +/* + * E2K ISET V6-... MMU structure and common definitions. + * + * Copyright 2018 (c) MCST, Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_MMU_REGS_TYPES_V6_H +#define _ASM_E2K_MMU_REGS_TYPES_V6_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V2-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +/* + * DTLB entry probe format + */ +#define DTLB_ENTRY_MT_exc_SHIFT_V6 8 /* shift of Memory Type field */ + /* for exceptions */ +#define DTLB_ENTRY_MT_BITS_NUM_V6 3 /* occupies 3 bits */ +#define DTLB_ENTRY_MT_ma_SHIFT_V6 48 /* shift of Memory Type field */ + /* for memory access */ +#define DTLB_ENTRY_PFN_SHIFT_V6 12 /* shift of Physical Page */ + /* Number */ + +#define DTLB_ENTRY_ERROR_MASK_V6 0xe000000000000000ULL +#define DTLB_ENTRY_PH_BOUND_V6 0x8000000000000000ULL +#define DTLB_ENTRY_ILLEGAL_PAGE_V6 0x4000000000000000ULL +#define DTLB_ENTRY_PAGE_MISS_V6 0x2000000000000000ULL +#define DTLB_ENTRY_PROBE_SUCCESSFUL_V6 0x0000000000000080ULL +#define DTLB_ENTRY_RES_BITS_V6 0x1000000000000000ULL +#define DTLB_ENTRY_MISS_LEVEL_MASK_V6 0x0000000000000007ULL +#define DTLB_ENTRY_TLB_HIT_V6 0x0000000000000020ULL + +#define DTLB_ENTRY_WR_exc_V6 0x0000000000000002ULL +#define DTLB_ENTRY_PV_or_U_S_V6 0x0000000000000004ULL +#define DTLB_ENTRY_VVA_V6 0x0000000000000008ULL +#define DTLB_ENTRY_INT_PR_V6 0x0000000000000010ULL +#define DTLB_ENTRY_G_V6 0x0000000000000020ULL +#define DTLB_ENTRY_D_V6 0x0000000000000040ULL +#define DTLB_ENTRY_NWA_V6 0x0000000000000080ULL +#define DTLB_ENTRY_MT_exc_V6 /* 0x0000000000000700ULL */ \ + (((1ULL << DTLB_ENTRY_MT_BITS_NUM_V6) - 1) << \ + DTLB_ENTRY_MT_exc_SHIFT_V6) +#define DTLB_ENTRY_NON_EX_V6 0x8000000000000000ULL +#define DTLB_ENTRY_PHA_V6 /* 0x0000fffffffff000ULL */ \ + ((((1ULL << E2K_MAX_PHYS_BITS_V6) - 1) >> \ + DTLB_ENTRY_PFN_SHIFT_V6) << \ + DTLB_ENTRY_PFN_SHIFT_V6) +#define DTLB_ENTRY_MT_ma_V6 /* 0x0007000000000000ULL */ \ + (((1ULL << DTLB_ENTRY_MT_BITS_NUM_V6) - 1) << \ + DTLB_ENTRY_MT_ma_SHIFT_V6) +#define DTLB_ENTRY_WR_int_V6 0x0008000000000000ULL +#define DTLB_ENTRY_INTL_RD_V6 0x0010000000000000ULL +#define DTLB_ENTRY_INTL_WR_V6 0x0020000000000000ULL + +/* MPT flags for 2/4Mb & 1Gb pages [46:45] */ +#define DTLB_ENTRY_MPT_FLAGS_V6 0x0000600000000000ULL + +#define DTLB_EP_RES_V6 0x003fffffffffffffULL +#define DTLB_EP_FAULT_RES_V6 (~DTLB_EP_RES_V6) + +/* + * DTLB address probe result format + */ +#define PH_ADDR_AP_RES_V6 /* 0x0000ffffffffffffULL */ \ + ((1ULL << E2K_MAX_PHYS_BITS_V6) - 1) +#define DISABLE_AP_RES_V6 DISABLE_EP_RES_V6 /* AP diasble result */ + /* [62] */ +#define ILLEGAL_PAGE_AP_RES_V6 ILLEGAL_PAGE_EP_RES_V6 /* illegal page */ + /* [58] */ + +/* convert physical address to page frame number for DTLB */ +#define PA_TO_DTLB_ENTRY_PHA_V6(phys_addr) \ + (((e2k_addr_t)phys_addr) & DTLB_ENTRY_PHA_V6) +/* convert the page frame number from DTLB entry to physical address */ +#define DTLB_ENTRY_PHA_TO_PA_V6(dtlb_entry) \ + ((e2k_addr_t)(dtlb_entry) & DTLB_ENTRY_PHA_V6) + +/* get/set Memory Type field from/to DTLB entry */ +#define DTLB_ENTRY_MT_exc_GET_VAL(x) \ + (((x) & DTLB_ENTRY_MT_exc_V6) >> DTLB_ENTRY_MT_exc_SHIFT_V6) +#define DTLB_ENTRY_MT_exc_SET_VAL(x, mt) \ + (((x) & ~DTLB_ENTRY_MT_exc_V6) | \ + (((probe_entry_t)(mt) << DTLB_ENTRY_MT_exc_SHIFT_V6) & \ + DTLB_ENTRY_MT_exc_V6)) +#define DTLB_ENTRY_MT_ma_GET_VAL(x) \ + (((x) & DTLB_ENTRY_MT_ma_V6) >> DTLB_ENTRY_MT_ma_SHIFT_V6) +#define DTLB_ENTRY_MT_ma_SET_VAL(x, mt) \ + (((x) & ~DTLB_ENTRY_MT_ma_V6) | \ + (((probe_entry_t)(mt) << DTLB_ENTRY_MT_ma_SHIFT_V6) & \ + DTLB_ENTRY_MT_ma_V6)) + +static inline probe_entry_t +covert_uni_dtlb_flags_to_dtlb_val_v6(const uni_dtlb_t uni_flags) +{ + probe_entry_t dtlb_flags = 0; + + if (uni_flags & UNI_PAGE_WRITE) + dtlb_flags |= (DTLB_ENTRY_WR_exc_V6); + if (uni_flags & UNI_PAGE_PRIV) + dtlb_flags |= (DTLB_ENTRY_PV_or_U_S_V6); + if (uni_flags & UNI_PAGE_VALID) + dtlb_flags |= (DTLB_ENTRY_VVA_V6); + if (uni_flags & UNI_PAGE_PROTECT) + dtlb_flags |= (DTLB_ENTRY_INT_PR_V6); + if (uni_flags & UNI_PAGE_GLOBAL) + dtlb_flags |= (DTLB_ENTRY_G_V6); + if (uni_flags & UNI_PAGE_DIRTY) + dtlb_flags |= (DTLB_ENTRY_D_V6); + if (uni_flags & UNI_PAGE_NWA) + dtlb_flags |= (DTLB_ENTRY_NWA_V6); + if (uni_flags & UNI_PAGE_MEM_TYPE) + dtlb_flags |= (DTLB_ENTRY_MT_exc_V6); + if (uni_flags & UNI_PAGE_NON_EX) + dtlb_flags |= (DTLB_ENTRY_NON_EX_V6); + if (uni_flags & UNI_PAGE_PFN) + dtlb_flags |= (DTLB_ENTRY_PHA_V6); + if (uni_flags & UNI_PAGE_MEM_TYPE_MA) + dtlb_flags |= (DTLB_ENTRY_MT_ma_V6); + if (uni_flags & UNI_PAGE_WRITE_INT) + dtlb_flags |= (DTLB_ENTRY_WR_int_V6); + if (uni_flags & UNI_PAGE_INTL_RD) + dtlb_flags |= (DTLB_ENTRY_INTL_RD_V6); + if (uni_flags & UNI_PAGE_INTL_WR) + dtlb_flags |= (DTLB_ENTRY_INTL_WR_V6); + if (uni_flags & UNI_DTLB_EP_RES) + dtlb_flags |= (DTLB_EP_RES_V6); + if (uni_flags & UNI_DTLB_PH_ADDR_AP_RES) + dtlb_flags |= (PH_ADDR_AP_RES_V6); + if (uni_flags & UNI_DTLB_ERROR_MASK) + dtlb_flags |= (DTLB_ENTRY_ERROR_MASK_V6); + if (uni_flags & UNI_DTLB_MISS_LEVEL) + dtlb_flags |= (DTLB_ENTRY_MISS_LEVEL_MASK_V6); + if (uni_flags & UNI_DTLB_SUCCESSFUL) + dtlb_flags |= (DTLB_ENTRY_PROBE_SUCCESSFUL_V6); + if (uni_flags & UNI_DTLB_RES_BITS) + dtlb_flags |= (DTLB_ENTRY_RES_BITS_V6); + + BUILD_BUG_ON(dtlb_flags == 0); + + return dtlb_flags; +} + +static inline probe_entry_t +fill_dtlb_val_v6_flags(const uni_dtlb_t uni_flags) +{ + return covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} +static inline probe_entry_t +get_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} +static inline bool +test_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return get_dtlb_val_v6_flags(dtlb_val, uni_flags) != 0; +} +static inline probe_entry_t +set_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val | covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} +static inline probe_entry_t +clear_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & ~covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_MMU_REGS_TYPES_V6_H */ diff --git a/arch/e2k/include/asm/mmu.h b/arch/e2k/include/asm/mmu.h new file mode 100644 index 000000000000..e9f4bcea39bc --- /dev/null +++ b/arch/e2k/include/asm/mmu.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _E2K_MMU_H_ +#define _E2K_MMU_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +/* hw_context_lifetime.state possible values. + * Actual values are important because we use atomic_inc/dec to switch states. + * "state" field helps to avoid double use, and "alive" field helps to avoid + * double free. */ +enum { + HWC_STATE_READY = 0U, /* The context is free to take */ + HWC_STATE_BUSY = 1U, /* A thread is currently executing on the context */ + HWC_STATE_COPYING = 2U /* The context is being copied in fork() */ +}; +#define HWC_STATE_SHIFT 0 +#define HWC_ALIVE_BIAS (1U << 16) +union hw_context_lifetime { + refcount_t refcount; + struct { + u16 state; + u16 alive; + }; +}; + +enum hw_context_fmt { + CTX_32_BIT, + CTX_64_BIT, + CTX_128_BIT +}; + +struct hw_context { + u64 key; /* For finding this context in the hash table */ + struct rhash_head hash_entry; + union hw_context_lifetime lifetime; + + struct { + typeof_member(struct pt_regs, stacks) stacks; + typeof_member(struct pt_regs, crs) crs; + typeof_member(struct pt_regs, wd) wd; + typeof_member(struct pt_regs, kernel_entry) kernel_entry; + } regs; + + /* After user_hw_stacks_copy_full() there is one user frame + * left in kernel chain stack, it's contents are saved here. */ + e2k_mem_crs_t prev_crs; + + /* Data from thread_info */ + struct { + data_stack_t u_stack; /* User data stack info */ + hw_stack_t u_hw_stack; /* User hardware stacks info */ + struct list_head getsp_adj; + struct list_head old_u_pcs_list; +#ifdef CONFIG_PROTECTED_MODE + global_store_t *g_list; + e2k_addr_t multithread_address; + struct rw_semaphore *lock; +#endif /* CONFIG_PROTECTED_MODE */ + struct signal_stack signal_stack; + } ti; + + /* Pointer to the corresponding user context */ + void __user *ucp; + /* Pointer to the next context to run */ + void __user *p_uc_link; + enum hw_context_fmt ptr_format; + + /* Used to free in a separate context (for better performance) */ + struct rcu_head rcu_head; + struct work_struct work; + struct mm_struct *mm; +} ____cacheline_aligned_in_smp; + +#ifdef CONFIG_PROTECTED_MODE + /* + * The list below is used to restore descriptors from pointers + * when kernel needs to pass a descriptor back to user protected space. + * The list stores pointer/descriptor pairs with some extra info if any. + * When kernel need to pass a descriptor to a signal handler operating + * in the protected mode, it looks for the given pointer in the list, + * and gets the descriptor it correcponds to if available. + */ +struct sival_ptr_list { + struct list_head link; /* connects links into the list */ + void __user *kernel_ptr; + unsigned long long int user_ptr_lo; + unsigned long long int user_ptr_hi; + unsigned char user_tags; + unsigned int signum; /* mq_notify needs keeping a single pointer + * for a particular registered signal + */ +}; +#endif + +typedef struct { + unsigned long cpumsk[NR_CPUS]; + atomic_t cur_cui; /* first free cui */ + atomic_t tstart; /* first free type for TSD */ + int tcount; + + /* + * Bit array for saving the information about + * busy and free entries in cut + */ + DECLARE_BITMAP(cut_mask, USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)); + /* + * Mutex lock for protecting of cut_mask + */ + struct mutex cut_mask_lock; + + /* + * For makecontext/swapcontext - a hash list of available contexts + */ + struct rhashtable hw_contexts; + + /* + * for multithreads coredump + * + * e2k arch has 3 stacks (2 hardware_stacks) + * for core file needed all stacks + * The threads must free pc & p stacks after finish_coredump + * The below structure are needed to delay free hardware_stacks + */ + struct list_head delay_free_stacks; + struct rw_semaphore core_lock; +#ifdef CONFIG_PROTECTED_MODE + allpools_t umpools; + struct list_head sival_ptr_list_head; + struct rw_semaphore sival_ptr_list_sem; + /* The field below controls different debug/error output + * purposed to support porting libraries to protected mode: + */ + unsigned long pm_sc_debug_mode; + /* Controls extra info and issues identified by kernel to journal. + * Use command 'dmesg' to display these messages. + * For particular controls see: + * arch/e2k/include/uapi/asm/protected_mode.h + */ +#endif /* CONFIG_PROTECTED_MODE */ + + /* List of cached user hardware stacks */ + struct list_head cached_stacks; + spinlock_t cached_stacks_lock; + size_t cached_stacks_size; +} mm_context_t; + + +/* Version for fast syscalls, so it must be inlined. + * Must be used only for current. */ +static inline u64 context_ti_key_fast_syscall(const struct thread_info *ti) +{ + struct pt_regs __user *u_regs = __signal_pt_regs_last(ti); + + if (u_regs) + return u_regs->stacks.top; + + return ti->u_stack.top; +} + +extern long hw_context_lookup_pcsp_and_switch(e2k_pcsp_lo_t pcsp_lo, + e2k_usd_lo_t usd_lo); +extern int hw_contexts_init(struct task_struct *p, mm_context_t *mm_context, + bool is_fork); +extern void hw_contexts_destroy(mm_context_t *mm_context); +extern long swapcontext(const void __user *ucp, int format); +extern void makecontext_trampoline(void); +extern void makecontext_trampoline_protected(void); +extern void makecontext_trampoline_continue(void); +extern void hw_context_deactivate_mm(struct task_struct *dead_task); + +struct ucontext; +extern long sys_setcontext(const struct ucontext __user *ucp, + int sigsetsize); +extern long sys_makecontext(struct ucontext __user *ucp, void (*func)(void), + u64 args_size, void __user *args, int sigsetsize); +extern long sys_freecontext(struct ucontext __user *ucp); +extern long sys_swapcontext(struct ucontext __user *oucp, + const struct ucontext __user *ucp, int sigsetsize); +#ifdef CONFIG_COMPAT +struct ucontext_32; +extern long compat_sys_setcontext(const struct ucontext_32 __user *ucp, + int sigsetsize); +extern long compat_sys_makecontext(struct ucontext_32 __user *ucp, + void (*func)(void), u64 args_size, void __user *args, + int sigsetsize); +extern long compat_sys_freecontext(struct ucontext_32 __user *ucp); +extern long compat_sys_swapcontext(struct ucontext_32 __user *oucp, + const struct ucontext_32 __user *ucp, int sigsetsize); +#endif +#ifdef CONFIG_PROTECTED_MODE +struct ucontext_prot; +extern long protected_sys_setcontext( + const struct ucontext_prot __user *ucp, + int sigsetsize); +extern long protected_sys_makecontext(struct ucontext_prot __user *ucp, + void (*func)(void), u64 args_size, void __user *args, + int sigsetsize); +extern long protected_sys_freecontext(struct ucontext_prot __user *ucp); +extern long protected_sys_swapcontext(struct ucontext_prot __user *oucp, + const struct ucontext_prot __user *ucp, int sigsetsize); +#endif + +struct vm_userfaultfd_ctx; +extern unsigned long mremap_to(unsigned long addr, unsigned long old_len, + unsigned long new_addr, unsigned long new_len, bool *locked, + struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap_early, + struct list_head *uf_unmap); + +#endif /* _E2K_MMU_H_ */ diff --git a/arch/e2k/include/asm/mmu_context.h b/arch/e2k/include/asm/mmu_context.h new file mode 100644 index 000000000000..0def5a0f1adc --- /dev/null +++ b/arch/e2k/include/asm/mmu_context.h @@ -0,0 +1,484 @@ +/* + * asm-e2k/mmu_context.h + */ + +#ifndef _E2K_MMU_CONTEXT_H_ +#define _E2K_MMU_CONTEXT_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * The high bits of the "context_cache" (and the "mm->context") are the + * CONTEXT _version_ code. A version of 0 is always considered invalid, + * so to invalidate another process only need to do "p->mm->context = 0". + * + * If more CONTEXT's than the processor has is needed, it invalidates all + * TLB's ('flush_tlb_all()') and starts a new CONTEXT version. + * That will automatically force a new CONTEXT for any other processes + * the next time they want to run. + * + * cpu_last_context(cpuid): + * 63 0 + * +-------------------------------+------------------+ + * | asn version of this processor | hardware CONTEXT | + * +-------------------------------+------------------+ + */ + +#define CTX_HARDWARE_BITS 12 +#define CTX_HARDWARE_MASK ((1UL << CTX_HARDWARE_BITS) - 1) +#define CTX_HARDWARE_MAX CTX_HARDWARE_MASK +#define CTX_VERSION_SHIFT CTX_HARDWARE_BITS +#define CTX_VERSION_SIZE (1UL << CTX_VERSION_SHIFT) +#define CTX_VERSION_MASK (~(CTX_VERSION_SIZE - 1)) +#define CTX_FIRST_VERSION_NUM 1UL +#define CTX_FIRST_VERSION (CTX_FIRST_VERSION_NUM << CTX_VERSION_SHIFT) + +#define CTX_HARDWARE(ctx) ((ctx) & CTX_HARDWARE_MASK) +#define CTX_VERSION(ctx) ((ctx) & CTX_VERSION_MASK) + +#ifdef CONFIG_SMP +#include +//spin_lock is needed: #define cpu_last_context(cpuid) (cpu_data[cpuid].mmu_last_context) +#define my_cpu_last_context() (my_cpu_data.mmu_last_context) +#define my_cpu_last_context1(num_cpu) (my_cpu_data1(num_cpu).mmu_last_context) +#else +extern unsigned long mmu_last_context; +//#define cpu_last_context(cpuid) mmu_last_context +#define my_cpu_last_context() mmu_last_context +#define my_cpu_last_context1(num_cpu) mmu_last_context +#endif /* CONFIG_SMP */ + +extern int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); +extern void arch_exit_mmap(struct mm_struct *mm); + +static inline void +reload_context_mask(unsigned long mask) +{ + set_MMU_CONT(CTX_HARDWARE(mask)); +} + +/* + * Get process new MMU context. This is needed when the page table + * pointer is changed or when the CONTEXT of the current process is updated + * This proc is called under closed interrupts or preempt_disable() + */ + +static inline unsigned long +get_new_mmu_pid(mm_context_t *context, int num_cpu) +{ + unsigned long ctx; + unsigned long next; + + /* Interrupts should be disabled to not bother about + * async-safety (calls to this function from the same + * CPU after it was interrupted). */ + + WARN_ON_ONCE(!__raw_all_irqs_disabled()); + + ctx = my_cpu_last_context1(num_cpu); + next = ctx + 1; + if (CTX_HARDWARE(next) == E2K_KERNEL_CONTEXT) + next ++; + if (CTX_VERSION(ctx) != CTX_VERSION(next)) { + flush_TLB_all(); + flush_ICACHE_all(); + if (CTX_VERSION(next) < CTX_FIRST_VERSION) { + next = CTX_FIRST_VERSION; + if (CTX_HARDWARE(next) == E2K_KERNEL_CONTEXT) + next ++; + } + } + + /* Another CPU might have written 0 to our cpu's mm context + * while we were getting the next context. But it is OK since + * we are changing the context anyway, and if this happens we + * will just rewrite that 0 with the new context. */ + context->cpumsk[num_cpu] = next; + my_cpu_last_context1(num_cpu) = next; + + return next; +} + +static inline unsigned long +get_new_mmu_context(struct mm_struct *mm, int num_cpu) +{ + return get_new_mmu_pid(&mm->context, num_cpu); +} + +/* + * Get the process current MMU context. + */ +static inline unsigned long +get_mmu_pid(mm_context_t *context, int cpu) +{ + unsigned long next; + + /* check if our CPU MASK is of an older generation and thus invalid: */ + next = context->cpumsk[cpu]; + if (unlikely(next == 0 || CTX_VERSION(my_cpu_last_context1(cpu)) + != CTX_VERSION(next))) + next = get_new_mmu_pid(context, cpu); + + return next; +} + +static inline unsigned long +get_mmu_context(struct mm_struct *mm, int cpu) +{ + return get_mmu_pid(&mm->context, cpu); +} + +/* + * Get the process current MMU context. + */ +static inline void +copy_mmu_pid(mm_context_t *pid_to, mm_context_t *pid_from) +{ + *pid_to = *pid_from; +} + +static inline void +reload_mmu_context(struct mm_struct *mm) +{ + unsigned long ctx, flags; + int cpu; + + raw_all_irq_save(flags); + cpu = smp_processor_id(); + ctx = get_new_mmu_context(mm, cpu); + reload_context_mask(ctx); + raw_all_irq_restore(flags); +} +static inline void +invalidate_mmu_context(struct mm_struct *mm) +{ + int cpu = raw_smp_processor_id(); +#ifdef CONFIG_SMP + /* + * Remove this cpu from mm_cpumask. This might be + * needed, for example, after sys_io_setup() if the + * kernel thread which was using this mm received + * flush ipi (unuse_mm() does not clear mm_cpumask). + * And maybe there are other such places where + * a kernel thread uses user mm. + */ + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +#endif + mm->context.cpumsk[cpu] = 0; +} + +extern inline void +enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) +{ +} + + +extern int __init_new_context(struct task_struct *p, struct mm_struct *mm, + mm_context_t *context); +static inline int init_new_context(struct task_struct *p, struct mm_struct *mm) +{ + return __init_new_context(p, mm, &mm->context); +} + +static inline int +init_new_mmu_pid(mm_context_t *context) +{ + return __init_new_context(NULL, NULL, context); +} + +extern void destroy_cached_stacks(mm_context_t *context); + +/* + * Destroy a dead context. This occurs when mmput drops the + * mm_users count to zero, the mmaps have been released, and + * all the page tables have been flushed. The function job + * is to destroy any remaining processor-specific state. + */ +static inline void destroy_context(struct mm_struct *mm) +{ + destroy_cached_stacks(&mm->context); +} + + +/* + * Force a context reload. This is needed when context is changed + */ +static inline void +reload_mmu_pid(mm_context_t *context, int num_cpu) +{ + unsigned long ctx = context->cpumsk[num_cpu]; + + if (!ctx) + ctx = get_new_mmu_pid(context, num_cpu); + set_MMU_CONT(CTX_HARDWARE(ctx)); +} +static inline void +reload_context(struct mm_struct *mm, int num_cpu) +{ + reload_mmu_pid(&mm->context, num_cpu); +} + +/* + * Force a root page table pointer reload. + */ +static inline void +reload_root_pgd(pgd_t *pgd) +{ + if (MMU_IS_SEPARATE_PT()) { + set_MMU_U_PPTB(__pa(pgd)); + } else { +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (!THERE_IS_DUP_KERNEL) { + set_MMU_U_PPTB(__pa(pgd)); + } +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + set_MMU_U_PPTB(__pa(pgd)); +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + } +} +extern inline void +reload_root_pt(struct mm_struct *mm) +{ + pgd_t *pgd; + + if (mm == &init_mm) { + pgd = cpu_kernel_root_pt; + if ((unsigned long) pgd >= KERNEL_BASE) + pgd = __va(kernel_va_to_pa(pgd)); + } else { + pgd = mm->pgd; + } + reload_root_pgd(pgd); +} +/* + * Force the kernel root page table pointer reload. + */ +static inline void +set_root_pt(pgd_t *root_pt) +{ + set_MMU_U_PPTB(__pa(root_pt)); + if (MMU_IS_SEPARATE_PT()) + set_MMU_OS_PPTB(__pa(root_pt)); +} + +/* + * Switch a root page table pointer and context. + */ +static inline void +reload_thread(struct mm_struct *mm) +{ + unsigned long flags; + int num_cpu; + + preempt_disable(); + num_cpu = raw_smp_processor_id(); + if (!MMU_IS_SEPARATE_PT()) { +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (THERE_IS_DUP_KERNEL) { + spin_lock(&mm->page_table_lock); + copy_user_pgd_to_kernel_root_pt(mm->pgd); + spin_unlock(&mm->page_table_lock); + } +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + } + raw_all_irq_save(flags); + reload_root_pt(mm); + reload_context(mm, num_cpu); + raw_all_irq_restore(flags); + preempt_enable(); +} + +static inline void +do_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next, int switch_pgd); + +/* + * Activate a new MM instance for the current task. + */ +static inline void +native_activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + do_switch_mm(active_mm, mm, NULL, false); +} + +static inline void call_switch_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm, struct task_struct *next, + int switch_pgd, int switch_mm) +{ + if (switch_mm || switch_pgd) + do_switch_mm(prev_mm, next_mm, next, switch_pgd); +} + +/* Virtualization support */ + +extern void native_deactivate_mm(struct task_struct *dead_task, + struct mm_struct *mm); + +#include + +/* + * Switch from address space PREV to address space NEXT. + * interrupt was disabled by caller + */ +static inline void +do_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next, int switch_pgd) +{ + int cpu = raw_smp_processor_id(); + unsigned long flags, mask; + + if (likely(prev_mm != next_mm)) { + raw_all_irq_save(flags); + + if (likely(next_mm)) { +#ifdef CONFIG_SMP + /* Start receiving flush ipis for the next mm */ + cpumask_set_cpu(cpu, mm_cpumask(next_mm)); + + /* Without a memory barrier, a following race can happen + * (CPU0 executes switch_mm, CPU1 executes flush_tlb): + * + * -----------------------------+----------------------- + * CPU0 | CPU1 + * -----------------------------+----------------------- + * read next_mm->context | + * for CPU0 | + * | set next_mm->context + * | for CPU0 to 0 + * the loaded value has older | + * context version -> update it | + * with get_new_mmu_context() | + * -> 0 in next_mm->context | execute memory barrier + * is rewritten | + * | CPU0 is not set in + * | mm_cpumask(next_mm), + * | so ipi's not send + * set CPU0 bit in | + * mm_cpumask(next_mm) | + * -----------------------------+----------------------- + * + * To avoid the races both CPU1 and CPU0 execute memory + * barriers: + * -----------------------------+----------------------- + * CPU0 | CPU1 + * -----------------------------+----------------------- + * set CPU0 bit in | set next_mm->context + * mm_cpumask(next_mm) | for CPU0 to 0 + * | + * execute memory barrier | execute memory barrier + * | + * read next_mm->context | CPU0 is not set in + * for CPU0 | mm_cpumask(next_mm), + * | so ipi's not send + * -----------------------------+----------------------- + * This way either CPU0 will see 0 in next_mm or + * CPU1 will send the flush ipi to CPU0, or both. + * + * This barrier could be smp_mb__after_atomic(), but + * the membarrier syscall requires a full memory + * barrier after storing to rq->curr, before going + * back to user-space. + */ + smp_mb(); +#endif + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + /* Load user page table */ + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { + copy_user_pgd_to_kernel_root_pt(next_mm->pgd); + } +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + + /* Switch context */ + reload_root_pt(next_mm); + mask = get_mmu_context(next_mm, cpu); + reload_context_mask(mask); + } + +#ifdef CONFIG_SMP + /* Stop flush ipis for the previous mm */ + if (likely(prev_mm)) + cpumask_clear_cpu(cpu, mm_cpumask(prev_mm)); +#endif + raw_all_irq_restore(flags); + } else { + /* Switching between threads, nothing to do here */ + } +} + +static inline void need_switch_mm(struct task_struct *prev, + struct task_struct *next, struct mm_struct *oldmm, + struct mm_struct *mm, int *switch_pgd, int *switch_mm) +{ + *switch_pgd = false; + *switch_mm = mm != NULL; +} + +/* + * Switch from address space PREV to address space NEXT. + */ +static inline void +switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + int switch_pgd, switch_mm; + + need_switch_mm(current, next, prev_mm, next_mm, + &switch_pgd, &switch_mm); + BUG_ON(switch_mm && switch_pgd); + + call_switch_mm(prev_mm, next_mm, next, switch_pgd, switch_mm); +} + +/* + * Set kernel MMU state + */ +static inline void +set_kernel_MMU_state(void) +{ + e2k_addr_t root_base = kernel_va_to_pa(cpu_kernel_root_pt); + + E2K_WAIT_ALL; + if (MMU_IS_SEPARATE_PT()) { + BUILD_BUG_ON(MMU_SEPARATE_KERNEL_VAB != PAGE_OFFSET); + WRITE_MMU_OS_VPTB(MMU_SEPARATE_KERNEL_VPTB); + WRITE_MMU_OS_PPTB(root_base); + WRITE_MMU_OS_VAB(MMU_SEPARATE_KERNEL_VAB); + WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + } else { + WRITE_MMU_U_VPTB(MMU_UNITED_KERNEL_VPTB); + WRITE_MMU_U_PPTB(root_base); + WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + } + E2K_WAIT_ALL; +} + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +extern inline void +set_secondary_space_MMU_state(void) +{ + unsigned long mmu_cr; + + mmu_cr = get_MMU_CR(); + mmu_cr |= _MMU_CR_UPT_EN; + if (machine.native_iset_ver >= E2K_ISET_V5) + mmu_cr |= _MMU_CR_SNXE; + set_MMU_CR(mmu_cr); +} +#else /* ! CONFIG_SECONDARY_SPACE_SUPPORT */ +#define set_secondary_space_MMU_state() +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + +extern void makecontext_trampoline_switched(void); + +#endif /* _E2K_MMU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/mmu_fault.h b/arch/e2k/include/asm/mmu_fault.h new file mode 100644 index 000000000000..a234ca253e60 --- /dev/null +++ b/arch/e2k/include/asm/mmu_fault.h @@ -0,0 +1,261 @@ +#ifndef _E2K_MMU_FAULT_H_ +#define _E2K_MMU_FAULT_H_ + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_PA_MODE +#undef DebugPA +#define DEBUG_PA_MODE 0 /* page table allocation */ +#define DebugPA(fmt, args...) \ +({ \ + if (DEBUG_PA_MODE) \ + pr_info(fmt, ##args); \ +}) + +static inline int +native_guest_addr_to_host(void **addr) +{ + /* there are not any guests, so nothing convertion */ + return 0; +} + +static inline void * +native_guest_ptr_to_host(void *ptr, int size) +{ + /* there are not any guests, so nothing convertion */ + return ptr; +} + +static inline void +native_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + if (atomic_store) { + NATIVE_RECOVERY_TAGGED_STORE_ATOMIC(address, wr_data, data_tag, + st_rec_opc, data_ext, data_ext_tag, opc_ext); + } else { + NATIVE_RECOVERY_TAGGED_STORE(address, wr_data, data_tag, + st_rec_opc, data_ext, data_ext_tag, opc_ext, + chan, qp_store); + } +} +static inline void +native_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + u64 val; + u32 tag; + + NATIVE_RECOVERY_TAGGED_LOAD_TO(address, ld_rec_opc, val, tag, chan); + *ld_val = val; + *data_tag = tag; +} +static inline void +native_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u32 first_time) +{ + if (atomic_load) { + NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_VR_ATOMIC(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc); + } else { + NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc, chan, qp_load, + first_time); + } +} + +static inline void +native_recovery_faulted_load_to_cpu_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan_opc, + int qp_load, int atomic_load) +{ + if (atomic_load) { + NATIVE_RECOVERY_LOAD_TO_A_GREG_VR_ATOMIC(address, + ld_rec_opc, greg_num_d, vr, qp_load); + } else { + NATIVE_RECOVERY_LOAD_TO_A_GREG_CH_VR(address, + ld_rec_opc, greg_num_d, chan_opc, vr, qp_load); + } +} + +static inline void +native_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan_opc, + int qp_load, int atomic_load, u64 *saved_greg_lo, + u64 *saved_greg_hi) +{ + if (!saved_greg_lo) { + native_recovery_faulted_load_to_cpu_greg(address, + greg_num_d, vr, ld_rec_opc, chan_opc, qp_load, + atomic_load); + } else { + native_recovery_faulted_move(address, + (u64) saved_greg_lo, (u64) saved_greg_hi, + vr, ld_rec_opc, chan_opc, qp_load, + atomic_load, 1); + } +} + +static inline bool +native_is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + /* native kernel does not use such registers */ + /* host kernel save/restore such registers itself */ + return false; +} + +static inline void +native_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + NATIVE_MOVE_TAGGED_WORD(addr_from, addr_to); +} +static inline void +native_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + NATIVE_MOVE_TAGGED_DWORD(addr_from, addr_to); +} +static inline void +native_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + NATIVE_MOVE_TAGGED_QWORD(addr_from, addr_from + sizeof(long), + addr_to, addr_to + sizeof(long)); +} + +extern void native_handle_mpdma_fault(e2k_addr_t hva); + +extern e2k_addr_t print_address_ptes(pgd_t *pgdp, e2k_addr_t address, + int kernel); + + +/* + * Paravirtualization support + */ +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline void +recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, u32 data_tag, + u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, u64 opc_ext, + int chan, int qp_store, int atomic_store) +{ + native_recovery_faulted_tagged_store(address, wr_data, data_tag, + st_rec_opc, data_ext, data_ext_tag, opc_ext, + chan, qp_store, atomic_store); +} +static inline void +recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan, tc_cond_t cond) +{ + native_recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); +} +static inline void +recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u64 *saved_greg_lo, + u64 *saved_greg_hi, tc_cond_t cond) +{ + native_recovery_faulted_load_to_greg(address, greg_num_d, + vr, ld_rec_opc, chan, qp_load, atomic_load, + saved_greg_lo, saved_greg_hi); +} +static inline void +recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u32 first_time, + tc_cond_t cond) +{ + native_recovery_faulted_move(addr_from, addr_to, addr_to_hi, vr, + ld_rec_opc, chan, qp_load, atomic_load, first_time); +} + +static inline bool +is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + return native_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline void +move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_word(addr_from, addr_to); +} +static inline void +move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_dword(addr_from, addr_to); +} +static inline void +move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_qword(addr_from, addr_to); +} +static inline void +handle_mpdma_fault(e2k_addr_t hva) +{ + native_handle_mpdma_fault(hva); +} + +# ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +static inline int guest_addr_to_host(void **addr, const pt_regs_t *regs) +{ + return native_guest_addr_to_host(addr); +} + +static inline void *guest_ptr_to_host(void *ptr, int size, const pt_regs_t *regs) +{ + return native_guest_ptr_to_host(ptr, size); +} +# else /* CONFIG_VIRTUALIZATION */ +/* it is native host kernel with virtualization support */ +#include +# endif /* !CONFIG_VIRTUALIZATION */ + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +static inline void +store_tagged_dword(void *address, u64 data, u32 tag) +{ + recovery_faulted_tagged_store((e2k_addr_t) address, data, tag, + TAGGED_MEM_STORE_REC_OPC, 0, 0, 0, 1, 0, 0); +} + +static inline void +load_value_and_tagd(const void *address, u64 *ld_val, u8 *ld_tag) +{ + recovery_faulted_load((e2k_addr_t) address, ld_val, ld_tag, + TAGGED_MEM_LOAD_REC_OPC, 0, + (tc_cond_t) {.word = 0}); +} + +static inline void +load_qvalue_and_tagq(e2k_addr_t address, u64 *val_lo, u64 *val_hi, + u8 *tag_lo, u8 *tag_hi) +{ + recovery_faulted_load(address, val_lo, tag_lo, + TAGGED_MEM_LOAD_REC_OPC, 0, + (tc_cond_t) {.word = 0}); + recovery_faulted_load(address + sizeof(long), val_hi, tag_hi, + TAGGED_MEM_LOAD_REC_OPC, 0, + (tc_cond_t) {.word = 0}); +} + +#endif /* _E2K_MMU_FAULT_H_ */ diff --git a/arch/e2k/include/asm/mmu_regs.h b/arch/e2k/include/asm/mmu_regs.h new file mode 100644 index 000000000000..cb51ad35911f --- /dev/null +++ b/arch/e2k/include/asm/mmu_regs.h @@ -0,0 +1,863 @@ +/* + * MMU structures & registers. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_MMU_REGS_H_ +#define _E2K_MMU_REGS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#endif /* __ASSEMBLY__ */ + +#include +#include +#include +#include +#include + +#undef DEBUG_MR_MODE +#undef DebugMR +#define DEBUG_MR_MODE 0 /* MMU registers access */ +#define DebugMR(...) DebugPrint(DEBUG_MR_MODE, ##__VA_ARGS__) + +#undef DEBUG_MCR_MODE +#undef DebugMCR +#define DEBUG_MCR_MODE 0 /* MMU CONTEXT registers access */ +#define DebugMCR(...) DebugPrint(DEBUG_MCR_MODE, ##__VA_ARGS__) + +#undef DEBUG_CLW_MODE +#undef DebugCLW +#define DEBUG_CLW_MODE 0 /* CLW registers access */ +#define DebugCLW(...) DebugPrint(DEBUG_CLW_MODE, ##__VA_ARGS__) + +#undef DEBUG_TLB_MODE +#undef DebugTLB +#define DEBUG_TLB_MODE 0 /* TLB registers access */ +#define DebugTLB(...) DebugPrint(DEBUG_TLB_MODE, ##__VA_ARGS__) + +/* + * MMU registers operations + */ + +#ifndef __ASSEMBLY__ +/* + * Write MMU register + */ +static inline void +write_MMU_reg(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + DebugMR("Write MMU reg 0x%lx value 0x%llx\n", + MMU_REG_NO_FROM_MMU_ADDR(mmu_addr), mmu_reg_val(mmu_reg)); + WRITE_MMU_REG(mmu_addr_val(mmu_addr), mmu_reg_val(mmu_reg)); +} + +static inline void +boot_write_MMU_reg(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + BOOT_WRITE_MMU_REG(mmu_addr_val(mmu_addr), mmu_reg_val(mmu_reg)); +} + +/* + * Read MMU register + */ + +static inline mmu_reg_t +read_MMU_reg(mmu_addr_t mmu_addr) +{ + DebugMR("Read MMU reg 0x%lx\n", + MMU_REG_NO_FROM_MMU_ADDR(mmu_addr)); + return __mmu_reg(READ_MMU_REG(mmu_addr_val(mmu_addr))); +} + +static inline mmu_reg_t +boot_read_MMU_reg(mmu_addr_t mmu_addr) +{ + return __mmu_reg(BOOT_READ_MMU_REG(mmu_addr_val(mmu_addr))); +} + +/* + * Read MMU Control register + */ +#define read_MMU_CR() read_MMU_reg(MMU_ADDR_CR) +#define READ_MMU_CR() \ + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO)) +static inline unsigned long +get_MMU_CR(void) +{ + unsigned long mmu_cr; + + DebugMR("Get MMU Control Register\n"); + mmu_cr = READ_MMU_CR(); + DebugMR("MMU Control Register state : 0x%lx\n", mmu_cr); + return mmu_cr; +} + +/* + * Write MMU Control register + */ +#define write_MMU_CR(mmu_cr) write_MMU_reg(MMU_ADDR_CR, mmu_cr) +#define WRITE_MMU_CR(mmu_cr) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +static inline void +set_MMU_CR(unsigned long mmu_cr) +{ + DebugMR("Set MMU Control Register to 0x%lx\n", mmu_cr); + WRITE_MMU_CR(mmu_cr); + DebugMR("Read MMU Control Register : 0x%llx\n", + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO))); +} +#define BOOT_WRITE_MMU_CR(mmu_cr) \ + BOOT_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +static inline void +boot_set_MMU_CR(unsigned long mmu_cr) +{ + BOOT_WRITE_MMU_CR(mmu_cr); +} + +/* + * Write MMU Context register + */ +#define write_MMU_CONT(mmu_cont) \ + WRITE_MMU_PID(mmu_cont) +#define WRITE_MMU_CONT(mmu_cont) \ + WRITE_MMU_PID(mmu_reg_val(mmu_cont)) +static inline void +set_MMU_CONT(unsigned long context) +{ + DebugMCR("Set MMU CONTEXT register to 0x%lx\n", context); + WRITE_MMU_PID(context); +} +#define BOOT_WRITE_MMU_CONT(mmu_cont) \ + BOOT_WRITE_MMU_PID(mmu_reg_val(mmu_cont)) +static inline void +boot_set_MMU_CONT(unsigned long context) +{ + BOOT_WRITE_MMU_CONT(context); +} + +/* + * Write MMU Control Register of secondary space table + */ +#define write_MMU_CR3_RG(mmu_page_dir) \ + write_MMU_reg(MMU_ADDR_CR3_RG, mmu_page_dir) +#define WRITE_MMU_CR3_RG(mmu_page_dir) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR3_RG_NO), \ + mmu_reg_val(mmu_page_dir)) +static inline void +set_MMU_CR3_RG(unsigned long mmu_page_dir) +{ + DebugMR("Set MMU INTEL page table base register to 0x%lx\n", + mmu_page_dir); + WRITE_MMU_CR3_RG(mmu_page_dir); +} + +#define get_MMU_CR3_RG() \ + (unsigned long)mmu_reg_val(read_MMU_reg(MMU_ADDR_CR3_RG)) +/* + * Write MMU page tables virtual base register + */ +#define WRITE_MMU_U_VPTB(mmu_virt_ptb) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +static inline void +set_MMU_U_VPTB(unsigned long mmu_virt_ptb) +{ + DebugMR("Set MMU page table virtual base register to 0x%lx\n", + mmu_virt_ptb); + WRITE_MMU_U_VPTB(mmu_virt_ptb); +} +#define BOOT_WRITE_MMU_U_VPTB(mmu_virt_ptb) \ + BOOT_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +static inline void +boot_set_MMU_U_VPTB(unsigned long mmu_virt_ptb) +{ + BOOT_WRITE_MMU_U_VPTB(mmu_virt_ptb); +} + +static inline void +set_MMU_OS_VPTB(unsigned long mmu_virt_ptb) +{ + DebugMR("Set MMU OS page table virtual base register to 0x%lx\n", + mmu_virt_ptb); + WRITE_MMU_OS_VPTB(mmu_virt_ptb); +} +static inline void +boot_set_MMU_OS_VPTB(unsigned long mmu_virt_ptb) +{ + BOOT_WRITE_MMU_OS_VPTB(mmu_virt_ptb); +} + +/* + * Write/read MMU root page table physical base register + */ +#define WRITE_MMU_U_PPTB(mmu_phys_ptb) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +#define READ_MMU_U_PPTB() \ + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO)) +static inline void +set_MMU_U_PPTB(unsigned long mmu_phys_ptb) +{ + DebugMR("Set MMU USER page table physical base register to 0x%lx\n", + mmu_phys_ptb); + WRITE_MMU_U_PPTB(mmu_phys_ptb); +} +#define BOOT_WRITE_MMU_U_PPTB(mmu_phys_ptb) \ + BOOT_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +static inline void +boot_set_MMU_U_PPTB(unsigned long mmu_phys_ptb) +{ + BOOT_WRITE_MMU_U_PPTB(mmu_phys_ptb); +} +static inline unsigned long +get_MMU_U_PPTB(void) +{ + return READ_MMU_U_PPTB(); +} + +static inline void +set_MMU_OS_PPTB(unsigned long mmu_phys_ptb) +{ + DebugMR("Set MMU OS root page table physical base register to 0x%lx\n", + mmu_phys_ptb); + WRITE_MMU_OS_PPTB(mmu_phys_ptb); +} +static inline void +boot_set_MMU_OS_PPTB(unsigned long mmu_phys_ptb) +{ + BOOT_WRITE_MMU_OS_PPTB(mmu_phys_ptb); +} + +/* + * Read MMU Trap Point register + */ +#define read_MMU_TRAP_POINT() read_MMU_reg(MMU_ADDR_TRAP_POINT) +#define READ_MMU_TRAP_POINT() \ + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_POINT_NO)) + +/* + * Set MMU Trap Point register + */ +#define write_MMU_TRAP_POINT(trap_cellar) \ + write_MMU_reg(MMU_ADDR_TRAP_POINT, \ + MMU_TRAP_POINT((e2k_addr_t)trap_cellar)) +#define WRITE_MMU_TRAP_POINT(trap_cellar) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_POINT_NO), \ + _MMU_TRAP_POINT((e2k_addr_t)trap_cellar)) +static inline void +set_MMU_TRAP_POINT(void *trap_cellar) +{ + DebugMR("Set MMU Trap Point register to %px\n", trap_cellar); + WRITE_MMU_TRAP_POINT(trap_cellar); +} +#define BOOT_WRITE_MMU_TRAP_POINT(trap_cellar) \ + BOOT_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_POINT_NO), \ + _MMU_TRAP_POINT((e2k_addr_t)trap_cellar)) +static inline void +boot_set_MMU_TRAP_POINT(void *trap_cellar) +{ + BOOT_WRITE_MMU_TRAP_POINT(trap_cellar); +} + +/* + * Set MMU Trap Counter register + */ +#define NATIVE_WRITE_MMU_TRAP_COUNT(counter) \ + NATIVE_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO), \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define write_MMU_TRAP_COUNT(counter) \ + write_MMU_reg(MMU_ADDR_TRAP_COUNT, \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define WRITE_MMU_TRAP_COUNT(counter) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO), \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define RESET_MMU_TRAP_COUNT() WRITE_MMU_TRAP_COUNT(0) +static inline void +set_MMU_TRAP_COUNT(unsigned int counter) +{ + DebugMR("Set MMU Trap Counter register to %d\n", counter); + WRITE_MMU_TRAP_COUNT(counter); +} +static inline void +reset_MMU_TRAP_COUNT(void) +{ + RESET_MMU_TRAP_COUNT(); +} +#define BOOT_WRITE_MMU_TRAP_COUNT(counter) \ + BOOT_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO), \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define BOOT_RESET_MMU_TRAP_COUNT() BOOT_WRITE_MMU_TRAP_COUNT(0) +static inline void +boot_reset_MMU_TRAP_COUNT(void) +{ + BOOT_RESET_MMU_TRAP_COUNT(); +} + +/* + * Read MMU Trap Counter register + */ +#define NATIVE_get_MMU_TRAP_COUNT() \ + ((unsigned int)mmu_reg_val(native_read_MMU_reg( \ + MMU_ADDR_TRAP_COUNT))) +#define NATIVE_READ_MMU_TRAP_COUNT() \ + ((unsigned int)(NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO)))) +#define get_MMU_TRAP_COUNT() \ + ((unsigned int)mmu_reg_val(read_MMU_reg(MMU_ADDR_TRAP_COUNT))) +#define READ_MMU_TRAP_COUNT() \ + ((unsigned int)(READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO)))) +static inline unsigned int +native_read_MMU_TRAP_COUNT(void) +{ + DebugMR("Read MMU Trap Counter register\n"); + return NATIVE_READ_MMU_TRAP_COUNT(); +} +static inline unsigned int +read_MMU_TRAP_COUNT(void) +{ + DebugMR("Read MMU Trap Counter register\n"); + return READ_MMU_TRAP_COUNT(); +} + +/* + * Set MMU Memory Protection Table Base register + */ +#define write_MMU_MPT_B(base) \ + write_MMU_reg(MMU_ADDR_MPT_B, base) +#define WRITE_MMU_MPT_B(base) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_MPT_B_NO), \ + mmu_reg_val(base)) +#define get_MMU_MPT_B() \ + read_MMU_reg(MMU_ADDR_MPT_B) +static inline void +set_MMU_MPT_B(unsigned long base) +{ + DebugMR("Set MMU Memory Protection Table Base register to 0x%lx\n", + base); + WRITE_MMU_MPT_B(base); +} + +/* + * Set MMU PCI Low Bound register + */ +#define write_MMU_PCI_L_B(bound) \ + write_MMU_reg(MMU_ADDR_PCI_L_B, bound) +#define WRITE_MMU_PCI_L_B(bound) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PCI_L_B_NO), \ + mmu_reg_val(bound)) +static inline void +set_MMU_PCI_L_B(unsigned long bound) +{ + DebugMR("Set MMU PCI low bound register to 0x%lx\n", bound); + WRITE_MMU_PCI_L_B(bound); +} + +/* + * Set MMU Phys High Bound register + */ +#define write_MMU_PH_H_B(bound) \ + write_MMU_reg(MMU_ADDR_PH_H_B, bound) +#define WRITE_MMU_PH_H_B(bound) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PH_H_B_NO), \ + mmu_reg_val(bound)) +static inline void +set_MMU_PH_H_B(unsigned long bound) +{ + DebugMR("Set MMU Physical memory high bound register to 0x%lx\n", + bound); + WRITE_MMU_PH_H_B(bound); +} + +/* + * Write User Stack Clean Window Disable register + */ +#define set_MMU_US_CL_D(val) \ + write_MMU_reg(MMU_ADDR_US_CL_D, val) +#define WRITE_MMU_US_CL_D(val) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), \ + mmu_reg_val(val)) +static inline void +write_MMU_US_CL_D(unsigned int disable_flag) +{ + DebugCLW("Set MMU US CLW Disable register to %d\n", disable_flag); + WRITE_MMU_US_CL_D(disable_flag); +} + +/* + * Read User Stack Clean Window Disable register + */ +#define get_MMU_US_CL_D() \ + read_MMU_reg(MMU_ADDR_US_CL_D) +#define READ_MMU_US_CL_D() \ + (unsigned int)READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_US_CL_D_NO)) + +/* + * Set Memory Type Range Registers ( MTRRS ) + */ + +#define WRITE_MTRR_REG(no, val) \ + WRITE_MMU_REG(MMU_ADDR_MTRR(no), mmu_reg_val(val)) + +static inline void +set_MMU_MTRR_REG(unsigned long no, long long value) +{ + DebugCLW("Set MTRR#%ld register to ox%llx\n", no, value); + WRITE_MTRR_REG(no, value); +} + +/* + * Get Memory Type Range Registers ( MTRRS ) + */ +#define get_MMU_MTRR_REG(no) \ + (unsigned long)READ_MMU_REG(MMU_ADDR_MTRR(no)) + +static inline unsigned int +read_MMU_US_CL_D(void) +{ + DebugCLW("Read MMU US CLW Disable register\n"); + return (unsigned int)READ_MMU_US_CL_D(); +} + + +/* + * Flush TLB page + */ +static inline void +____flush_TLB_page(flush_op_t flush_op, flush_addr_t flush_addr) +{ + unsigned long flags; + bool fl_c_needed = cpu_has(CPU_HWBUG_TLB_FLUSH_L1D); + + DebugTLB("Flush TLB page : op 0x%lx extended virtual addr 0x%lx\n", + flush_op_val(flush_op), flush_addr_val(flush_addr)); + + raw_all_irq_save(flags); + FLUSH_TLB_ENTRY(flush_op_val(flush_op), flush_addr_val(flush_addr)); + if (fl_c_needed) + __E2K_WAIT(_fl_c); + raw_all_irq_restore(flags); +} + +#define flush_TLB_page_begin() +#define flush_TLB_page_end() \ +do { \ + __E2K_WAIT(_fl_c | _ma_c); \ +} while (0) + +static inline void +__flush_TLB_page(e2k_addr_t virt_addr, unsigned long context) +{ + ____flush_TLB_page(flush_op_tlb_page_sys, + flush_addr_make_sys(virt_addr, context)); +} + +static inline void +flush_TLB_page(e2k_addr_t virt_addr, unsigned long context) +{ + flush_TLB_page_begin(); + __flush_TLB_page(virt_addr, context); + flush_TLB_page_end(); +} + +static inline void +__flush_TLB_kernel_page(e2k_addr_t virt_addr) +{ + __flush_TLB_page(virt_addr, E2K_KERNEL_CONTEXT); +} + +static inline void +flush_TLB_kernel_page(e2k_addr_t virt_addr) +{ + flush_TLB_page_begin(); + __flush_TLB_kernel_page(virt_addr); + flush_TLB_page_end(); +} + +static inline void +__flush_TLB_ss_page(e2k_addr_t virt_addr, unsigned long context) +{ + ____flush_TLB_page(flush_op_tlb_page_sys, + flush_addr_make_ss(virt_addr, context)); +} + +static inline void +flush_TLB_ss_page(e2k_addr_t virt_addr, unsigned long context) +{ + flush_TLB_page_begin(); + __flush_TLB_ss_page(virt_addr, context); + flush_TLB_page_end(); +} + +/* + * Flush DCACHE line + */ +#define flush_DCACHE_line_begin() \ +do { \ + E2K_WAIT_ST; \ +} while (0) + +#define flush_DCACHE_line_end() \ +do { \ + E2K_WAIT_FLUSH; \ +} while (0) + +static inline void __flush_DCACHE_line(e2k_addr_t virt_addr) +{ + FLUSH_DCACHE_LINE(virt_addr); +} +static inline void __flush_DCACHE_line_offset(e2k_addr_t virt_addr, size_t offset) +{ + FLUSH_DCACHE_LINE_OFFSET(virt_addr, offset); +} +static inline void +flush_DCACHE_line(e2k_addr_t virt_addr) +{ + DebugMR("Flush DCACHE line : virtual addr 0x%lx\n", virt_addr); + + flush_DCACHE_line_begin(); + __flush_DCACHE_line(virt_addr); + flush_DCACHE_line_end(); +} + +/* + * Clear DCACHE L1 set + */ +static inline void +clear_DCACHE_L1_set(e2k_addr_t virt_addr, unsigned long set) +{ + E2K_WAIT_ALL; + CLEAR_DCACHE_L1_SET(virt_addr, set); + E2K_WAIT_ST; +} + +/* + * Clear DCACHE L1 line + */ +static inline void +clear_DCACHE_L1_line(e2k_addr_t virt_addr) +{ + unsigned long set; + for (set = 0; set < E2K_DCACHE_L1_SETS_NUM; set++) + clear_DCACHE_L1_set(virt_addr, set); +} +/* + * Write DCACHE L2 registers + */ +static inline void +native_write_DCACHE_L2_reg(unsigned long reg_val, int reg_num, int bank_num) +{ + NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num); +} +static inline void +native_write_DCACHE_L2_CNTR_reg(unsigned long reg_val, int bank_num) +{ + native_write_DCACHE_L2_reg(reg_val, _E2K_DCACHE_L2_CTRL_REG, bank_num); +} +static inline void +write_DCACHE_L2_reg(unsigned long reg_val, int reg_num, int bank_num) +{ + WRITE_L2_REG(reg_val, reg_num, bank_num); +} +static inline void +write_DCACHE_L2_CNTR_reg(unsigned long reg_val, int bank_num) +{ + write_DCACHE_L2_reg(reg_val, _E2K_DCACHE_L2_CTRL_REG, bank_num); +} + +/* + * Read DCACHE L2 registers + */ +static inline unsigned long +native_read_DCACHE_L2_reg(int reg_num, int bank_num) +{ + return NATIVE_READ_L2_REG(reg_num, bank_num); +} +static inline unsigned long +native_read_DCACHE_L2_CNTR_reg(int bank_num) +{ + return native_read_DCACHE_L2_reg(_E2K_DCACHE_L2_CTRL_REG, bank_num); +} +static inline unsigned long +native_read_DCACHE_L2_ERR_reg(int bank_num) +{ + return native_read_DCACHE_L2_reg(_E2K_DCACHE_L2_ERR_REG, bank_num); +} +static inline unsigned long +read_DCACHE_L2_reg(int reg_num, int bank_num) +{ + return READ_L2_REG(reg_num, bank_num); +} +static inline unsigned long +read_DCACHE_L2_CNTR_reg(int bank_num) +{ + return read_DCACHE_L2_reg(_E2K_DCACHE_L2_CTRL_REG, bank_num); +} +static inline unsigned long +read_DCACHE_L2_ERR_reg(int bank_num) +{ + return read_DCACHE_L2_reg(_E2K_DCACHE_L2_ERR_REG, bank_num); +} + +/* + * Flush ICACHE line + */ +static inline void +__flush_ICACHE_line(flush_op_t flush_op, flush_addr_t flush_addr) +{ + DebugMR("Flush ICACHE line : op 0x%lx extended virtual addr 0x%lx\n", + flush_op_val(flush_op), flush_addr_val(flush_addr)); + FLUSH_ICACHE_LINE(flush_op_val(flush_op), flush_addr_val(flush_addr)); +} + +#define flush_ICACHE_line_begin() +#define flush_ICACHE_line_end() \ +do { \ + E2K_WAIT_FLUSH; \ +} while (0) + +static inline void +__flush_ICACHE_line_user(e2k_addr_t virt_addr) +{ + __flush_ICACHE_line(flush_op_icache_line_user, + flush_addr_make_user(virt_addr)); +} + +static inline void +flush_ICACHE_line_user(e2k_addr_t virt_addr) +{ + flush_ICACHE_line_begin(); + __flush_ICACHE_line_user(virt_addr); + flush_ICACHE_line_end(); +} + +static inline void +__flush_ICACHE_line_sys(e2k_addr_t virt_addr, unsigned long context) +{ + __flush_ICACHE_line(flush_op_icache_line_sys, + flush_addr_make_sys(virt_addr, context)); +} + +static inline void +flush_ICACHE_line_sys(e2k_addr_t virt_addr, unsigned long context) +{ + flush_ICACHE_line_begin(); + __flush_ICACHE_line_sys(virt_addr, context); + flush_ICACHE_line_end(); +} + +static inline void +flush_ICACHE_kernel_line(e2k_addr_t virt_addr) +{ + flush_ICACHE_line_sys(virt_addr, E2K_KERNEL_CONTEXT); +} + +/* + * Flush and invalidate CACHE(s) (invalidate all caches of the processor) + * WARNING: operation was deleted from instruction set begining V3-iset + */ + +static inline void +boot_native_invalidate_CACHE_L12(void) +{ + int invalidate_supported; + unsigned long flags; + + /* Invalidate operation was removed in E2S */ + invalidate_supported = BOOT_NATIVE_IS_MACHINE_ES2; + + raw_all_irq_save(flags); + E2K_WAIT_MA; + if (invalidate_supported) + NATIVE_FLUSH_CACHE_L12(_flush_op_invalidate_cache_L12); + else + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + E2K_WAIT_FLUSH; + raw_all_irq_restore(flags); +} + +/* + * Flush and write back CACHE(s) (write back and invalidate all caches + * of the processor) + * Flush cache is the same as write back + */ + +static inline void +native_raw_write_back_CACHE_L12(void) +{ + __E2K_WAIT(_ma_c); + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + __E2K_WAIT(_fl_c | _ma_c); +} + +static inline void +write_back_CACHE_L12(void) +{ + DebugMR("Flush : Write back all CACHEs (op 0x%lx)\n", + _flush_op_write_back_cache_L12); + FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +native_raw_flush_TLB_all(void) +{ + __E2K_WAIT(_st_c); + NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all); + __E2K_WAIT(_fl_c | _ma_c); +} + +static inline void +flush_TLB_all(void) +{ + DebugMR("Flush all TLBs (op 0x%lx)\n", _flush_op_tlb_all); + FLUSH_TLB_ALL(_flush_op_tlb_all); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ +static inline void +flush_ICACHE_all(void) +{ + DebugMR("Flush all ICACHE op 0x%lx\n", _flush_op_icache_all); + FLUSH_ICACHE_ALL(_flush_op_icache_all); +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +read_CLW_reg(clw_addr_t clw_addr) +{ + DebugCLW("Read CLW reg 0x%lx\n", clw_addr); + return READ_CLW_REG(clw_addr); +} + +static inline clw_reg_t +native_read_CLW_reg(clw_addr_t clw_addr) +{ + DebugCLW("Read CLW reg 0x%lx\n", clw_addr); + return NATIVE_READ_CLW_REG(clw_addr); +} + +/* + * Read CLW bottom register + */ +#define read_US_CL_B() read_CLW_reg(ADDR_US_CL_B) +#define READ_US_CL_B() READ_CLW_REG(ADDR_US_CL_B) +#define native_read_US_CL_B() native_read_CLW_reg(ADDR_US_CL_B) +#define NATIVE_READ_US_CL_B() NATIVE_READ_CLW_REG(ADDR_US_CL_B) + +/* + * Read CLW up register + */ +#define read_US_CL_UP() read_CLW_reg(ADDR_US_CL_UP) +#define READ_US_CL_UP() READ_CLW_REG(ADDR_US_CL_UP) +#define native_read_US_CL_UP() native_read_CLW_reg(ADDR_US_CL_UP) +#define NATIVE_READ_US_CL_UP() NATIVE_READ_CLW_REG(ADDR_US_CL_UP) + +/* + * Read CLW bit-mask registers + */ +#define read_US_CL_M0() read_CLW_reg(ADDR_US_CL_M0) +#define READ_US_CL_M0() READ_CLW_REG(ADDR_US_CL_M0) +#define read_US_CL_M1() read_CLW_reg(ADDR_US_CL_M1) +#define READ_US_CL_M1() READ_CLW_REG(ADDR_US_CL_M1) +#define read_US_CL_M2() read_CLW_reg(ADDR_US_CL_M2) +#define READ_US_CL_M2() READ_CLW_REG(ADDR_US_CL_M2) +#define read_US_CL_M3() read_CLW_reg(ADDR_US_CL_M3) +#define READ_US_CL_M3() READ_CLW_REG(ADDR_US_CL_M3) +#define native_read_US_CL_M0() native_read_CLW_reg(ADDR_US_CL_M0) +#define NATIVE_READ_US_CL_M0() NATIVE_READ_CLW_REG(ADDR_US_CL_M0) +#define native_read_US_CL_M1() native_read_CLW_reg(ADDR_US_CL_M1) +#define NATIVE_READ_US_CL_M1() NATIVE_READ_CLW_REG(ADDR_US_CL_M1) +#define native_read_US_CL_M2() native_read_CLW_reg(ADDR_US_CL_M2) +#define NATIVE_READ_US_CL_M2() NATIVE_READ_CLW_REG(ADDR_US_CL_M2) +#define native_read_US_CL_M3() native_read_CLW_reg(ADDR_US_CL_M3) +#define NATIVE_READ_US_CL_M3() NATIVE_READ_CLW_REG(ADDR_US_CL_M3) + +/* + * Write CLW register + */ + +static inline void +write_CLW_reg(clw_addr_t clw_addr, clw_reg_t val) +{ + DebugCLW("Write CLW reg 0x%lx value 0x%lx\n", clw_addr, val); + WRITE_CLW_REG(clw_addr, val); +} + +static inline void +native_write_CLW_reg(clw_addr_t clw_addr, clw_reg_t val) +{ + DebugCLW("Write CLW reg 0x%lx value 0x%lx\n", clw_addr, val); + NATIVE_WRITE_CLW_REG(clw_addr, val); +} + +/* + * Write CLW bottom register + */ +#define write_US_CL_B(val) write_CLW_reg(ADDR_US_CL_B, val) +#define WRITE_US_CL_B(val) WRITE_CLW_REG(ADDR_US_CL_B, val) +#define native_write_US_CL_B(val) native_write_CLW_reg(ADDR_US_CL_B, val) +#define NATIVE_WRITE_US_CL_B(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_B, val) + +/* + * Write CLW up register + */ +#define write_US_CL_UP(val) write_CLW_reg(ADDR_US_CL_UP, val) +#define WRITE_US_CL_UP(val) WRITE_CLW_REG(ADDR_US_CL_UP, val) +#define native_write_US_CL_UP(val) native_write_CLW_reg(ADDR_US_CL_UP, val) +#define NATIVE_WRITE_US_CL_UP(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_UP, val) + +/* + * Write CLW bit-mask registers + */ +#define write_US_CL_M0(val) write_CLW_reg(ADDR_US_CL_M0, val) +#define WRITE_US_CL_M0(val) WRITE_CLW_REG(ADDR_US_CL_M0, val) +#define write_US_CL_M1(val) write_CLW_reg(ADDR_US_CL_M1, val) +#define WRITE_US_CL_M1(val) WRITE_CLW_REG(ADDR_US_CL_M1, val) +#define write_US_CL_M2(val) write_CLW_reg(ADDR_US_CL_M2, val) +#define WRITE_US_CL_M2(val) WRITE_CLW_REG(ADDR_US_CL_M2, val) +#define write_US_CL_M3(val) write_CLW_reg(ADDR_US_CL_M3, val) +#define WRITE_US_CL_M3(val) WRITE_CLW_REG(ADDR_US_CL_M3, val) +#define native_write_US_CL_M0(val) native_write_CLW_reg(ADDR_US_CL_M0, val) +#define NATIVE_WRITE_US_CL_M0(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M0, val) +#define native_write_US_CL_M1(val) native_write_CLW_reg(ADDR_US_CL_M1, val) +#define NATIVE_WRITE_US_CL_M1(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M1, val) +#define native_write_US_CL_M2(val) native_write_CLW_reg(ADDR_US_CL_M2, val) +#define NATIVE_WRITE_US_CL_M2(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M2, val) +#define native_write_US_CL_M3(val) native_write_CLW_reg(ADDR_US_CL_M3, val) +#define NATIVE_WRITE_US_CL_M3(val) NATIVE_WRITE_CLW_REG(ADDR_US_CL_M3, val) + + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_MMU_REGS_H_ */ diff --git a/arch/e2k/include/asm/mmu_regs_access.h b/arch/e2k/include/asm/mmu_regs_access.h new file mode 100644 index 000000000000..946ce5f032fe --- /dev/null +++ b/arch/e2k/include/asm/mmu_regs_access.h @@ -0,0 +1,318 @@ +/* + * asm-e2k/mmu_regs_access.h: E2K MMU structures & registers. + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_MMU_REGS_ACCESS_H_ +#define _E2K_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#endif /* __ASSEMBLY__ */ + +#include +#include + +#include + +extern unsigned long native_read_MMU_OS_PPTB_reg_value(void); +extern void native_write_MMU_OS_PPTB_reg_value(unsigned long value); +extern unsigned long native_read_MMU_OS_VPTB_reg_value(void); +extern void native_write_MMU_OS_VPTB_reg_value(unsigned long value); +extern unsigned long native_read_MMU_OS_VAB_reg_value(void); +extern void native_write_MMU_OS_VAB_reg_value(unsigned long value); + +extern unsigned long boot_native_read_MMU_OS_PPTB_reg_value(void); +extern void boot_native_write_MMU_OS_PPTB_reg_value(unsigned long value); +extern unsigned long boot_native_read_MMU_OS_VPTB_reg_value(void); +extern void boot_native_write_MMU_OS_VPTB_reg_value(unsigned long value); +extern unsigned long boot_native_read_MMU_OS_VAB_reg_value(void); +extern void boot_native_write_MMU_OS_VAB_reg_value(unsigned long value); + +#define NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) \ + native_write_MMU_OS_PPTB_reg_value(reg_val) +#define NATIVE_READ_MMU_OS_PPTB_REG() \ + native_read_MMU_OS_PPTB_reg_value() +#define NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) \ + native_write_MMU_OS_VPTB_reg_value(reg_val) +#define NATIVE_READ_MMU_OS_VPTB_REG() \ + native_read_MMU_OS_VPTB_reg_value() +#define NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) \ + native_write_MMU_OS_VAB_reg_value(reg_val) +#define NATIVE_READ_MMU_OS_VAB_REG() \ + native_read_MMU_OS_VAB_reg_value() +#define NATIVE_READ_MMU_PID_REG() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PID_NO)) +#define NATIVE_WRITE_MMU_PID_REG(reg_val) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PID_NO), \ + mmu_reg_val(reg_val)) +#define NATIVE_READ_MMU_U_PPTB_REG() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO)) +#define NATIVE_WRITE_MMU_U_PPTB_REG(reg_val) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO), \ + mmu_reg_val(reg_val)) +#define NATIVE_READ_MMU_U_VPTB_REG() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO)) +#define NATIVE_WRITE_MMU_U_VPTB_REG(reg_val) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO), \ + mmu_reg_val(reg_val)) + +#define BOOT_NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) \ + boot_native_write_MMU_OS_PPTB_reg_value(reg_val) +#define BOOT_NATIVE_READ_MMU_OS_PPTB_REG() \ + boot_native_read_MMU_OS_PPTB_reg_value() +#define BOOT_NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) \ + boot_native_write_MMU_OS_VPTB_reg_value(reg_val) +#define BOOT_NATIVE_READ_MMU_OS_VPTB_REG() \ + boot_native_read_MMU_OS_VPTB_reg_value() +#define BOOT_NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) \ + boot_native_write_MMU_OS_VAB_reg_value(reg_val) +#define BOOT_NATIVE_READ_MMU_OS_VAB_REG() \ + boot_native_read_MMU_OS_VAB_reg_value() +#define BOOT_NATIVE_WRITE_MMU_PID_REG(reg_val) \ + NATIVE_WRITE_MMU_PID_REG(reg_val) +#define BOOT_NATIVE_READ_MMU_PID_REG() \ + NATIVE_READ_MMU_PID_REG() + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +/* + * MMU registers operations + */ + +#ifndef __ASSEMBLY__ +/* + * Write/read MMU register + */ +#define WRITE_MMU_REG(addr_val, reg_val) \ + NATIVE_WRITE_MMU_REG(addr_val, reg_val) +#define READ_MMU_REG(addr_val) \ + NATIVE_READ_MMU_REG(addr_val) + +#define BOOT_WRITE_MMU_REG(addr_val, reg_val) \ + BOOT_NATIVE_WRITE_MMU_REG(addr_val, reg_val) +#define BOOT_READ_MMU_REG(addr_val) \ + BOOT_NATIVE_READ_MMU_REG(addr_val) + +#define WRITE_MMU_OS_PPTB(reg_val) \ + NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) +#define READ_MMU_OS_PPTB() \ + NATIVE_READ_MMU_OS_PPTB_REG() +#define WRITE_MMU_OS_VPTB(reg_val) \ + NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) +#define READ_MMU_OS_VPTB() \ + NATIVE_READ_MMU_OS_VPTB_REG() +#define WRITE_MMU_OS_VAB(reg_val) \ + NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) +#define READ_MMU_OS_VAB() \ + NATIVE_READ_MMU_OS_VAB_REG() +#define WRITE_MMU_PID(reg_val) \ + NATIVE_WRITE_MMU_PID_REG(reg_val) +#define READ_MMU_PID() \ + NATIVE_READ_MMU_PID_REG() + +#define BOOT_WRITE_MMU_OS_PPTB(reg_val) \ + BOOT_NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) +#define BOOT_READ_MMU_OS_PPTB() \ + BOOT_NATIVE_READ_MMU_OS_PPTB_REG() +#define BOOT_WRITE_MMU_OS_VPTB(reg_val) \ + BOOT_NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) +#define BOOT_READ_MMU_OS_VPTB() \ + BOOT_NATIVE_READ_MMU_OS_VPTB_REG() +#define BOOT_WRITE_MMU_OS_VAB(reg_val) \ + BOOT_NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) +#define BOOT_READ_MMU_OS_VAB() \ + BOOT_NATIVE_READ_MMU_OS_VAB_REG() +#define BOOT_WRITE_MMU_PID(reg_val) \ + BOOT_NATIVE_WRITE_MMU_PID_REG(reg_val) +#define BOOT_READ_MMU_PID() \ + BOOT_NATIVE_READ_MMU_PID_REG() + +/* + * Write/read Data TLB register + */ +#define WRITE_DTLB_REG(tlb_addr, tlb_value) \ + NATIVE_WRITE_DTLB_REG(tlb_addr, tlb_value) + +#define READ_DTLB_REG(tlb_addr) \ + NATIVE_READ_DTLB_REG(tlb_addr) + +/* + * Flush TLB page/entry + */ +#define FLUSH_TLB_ENTRY(flush_op, addr) \ + NATIVE_FLUSH_TLB_ENTRY(flush_op, addr) + +/* + * Flush DCACHE line + */ +#define FLUSH_DCACHE_LINE(virt_addr) \ + NATIVE_FLUSH_DCACHE_LINE(virt_addr) +#define FLUSH_DCACHE_LINE_OFFSET(virt_addr, offset) \ + NATIVE_FLUSH_DCACHE_LINE_OFFSET((virt_addr), (offset)) + +/* + * Clear DCACHE L1 set + */ +#define CLEAR_DCACHE_L1_SET(virt_addr, set) \ + NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set) + +/* + * Write DCACHE L2 registers + */ +#define WRITE_L2_REG(reg_val, reg_num, bank_num) \ + NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num) + +/* + * Read DCACHE L2 registers + */ +#define READ_L2_REG(reg_num, bank_num) \ + NATIVE_READ_L2_REG(reg_num, bank_num) + +/* + * Flush ICACHE line + */ +#define FLUSH_ICACHE_LINE(flush_op, addr) \ + NATIVE_FLUSH_ICACHE_LINE(flush_op, addr) + +/* + * Flush and invalidate or write back L1/L2 CACHE(s) + */ +#define FLUSH_CACHE_L12(flush_op) \ + native_write_back_CACHE_L12() + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ +#define FLUSH_TLB_ALL(flush_op) \ + native_flush_TLB_all() + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ +#define FLUSH_ICACHE_ALL(flush_op) \ + native_flush_ICACHE_all() + +/* + * Get Entry probe for virtual address + */ +#define ENTRY_PROBE_MMU_OP(addr_val) \ + NATIVE_ENTRY_PROBE_MMU_OP(addr_val) + +/* + * Get physical address for virtual address + */ +#define ADDRESS_PROBE_MMU_OP(addr_val) \ + NATIVE_ADDRESS_PROBE_MMU_OP(addr_val) + +/* + * Read CLW register + */ +#define READ_CLW_REG(clw_addr) \ + NATIVE_READ_CLW_REG(clw_addr) + +/* + * Write CLW register + */ +#define WRITE_CLW_REG(clw_addr, val) \ + NATIVE_WRITE_CLW_REG(clw_addr, val) + +/* + * MMU DEBUG registers access + */ +#define READ_DDBAR0_REG_VALUE() NATIVE_READ_DDBAR0_REG_VALUE() +#define READ_DDBAR1_REG_VALUE() NATIVE_READ_DDBAR1_REG_VALUE() +#define READ_DDBAR2_REG_VALUE() NATIVE_READ_DDBAR2_REG_VALUE() +#define READ_DDBAR3_REG_VALUE() NATIVE_READ_DDBAR3_REG_VALUE() +#define READ_DDBCR_REG_VALUE() NATIVE_READ_DDBCR_REG_VALUE() +#define READ_DDBSR_REG_VALUE() NATIVE_READ_DDBSR_REG_VALUE() +#define READ_DDMAR0_REG_VALUE() NATIVE_READ_DDMAR0_REG_VALUE() +#define READ_DDMAR1_REG_VALUE() NATIVE_READ_DDMAR1_REG_VALUE() +#define READ_DDMCR_REG_VALUE() NATIVE_READ_DDMCR_REG_VALUE() +#define WRITE_DDBAR0_REG_VALUE(value) NATIVE_WRITE_DDBAR0_REG_VALUE(value) +#define WRITE_DDBAR1_REG_VALUE(value) NATIVE_WRITE_DDBAR1_REG_VALUE(value) +#define WRITE_DDBAR2_REG_VALUE(value) NATIVE_WRITE_DDBAR2_REG_VALUE(value) +#define WRITE_DDBAR3_REG_VALUE(value) NATIVE_WRITE_DDBAR3_REG_VALUE(value) +#define WRITE_DDBCR_REG_VALUE(value) NATIVE_WRITE_DDBCR_REG_VALUE(value) +#define WRITE_DDBSR_REG_VALUE(value) NATIVE_WRITE_DDBSR_REG_VALUE(value) +#define WRITE_DDMAR0_REG_VALUE(value) NATIVE_WRITE_DDMAR0_REG_VALUE(value) +#define WRITE_DDMAR1_REG_VALUE(value) NATIVE_WRITE_DDMAR1_REG_VALUE(value) +#define WRITE_DDMCR_REG_VALUE(value) NATIVE_WRITE_DDMCR_REG_VALUE(value) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#ifndef __ASSEMBLY__ + +#define READ_DDBAR0_REG() \ + READ_DDBAR0_REG_VALUE() +#define READ_DDBAR1_REG() \ + READ_DDBAR1_REG_VALUE() +#define READ_DDBAR2_REG() \ + READ_DDBAR2_REG_VALUE() +#define READ_DDBAR3_REG() \ + READ_DDBAR3_REG_VALUE() +#define READ_DDBCR_REG() \ +({ \ + e2k_ddbcr_t ddbcr; \ + \ + ddbcr.DDBCR_reg = READ_DDBCR_REG_VALUE(); \ + ddbcr; \ +}) +#define READ_DDBSR_REG() \ +({ \ + e2k_ddbsr_t ddbsr; \ + \ + ddbsr.DDBSR_reg = READ_DDBSR_REG_VALUE(); \ + ddbsr; \ +}) +#define READ_DDMAR0_REG() \ + READ_DDMAR0_REG_VALUE() +#define READ_DDMAR1_REG() \ + READ_DDMAR1_REG_VALUE() +#define READ_DDMCR_REG() \ +({ \ + e2k_ddmcr_t ddmcr; \ + \ + ddmcr.DDMCR_reg = READ_DDMCR_REG_VALUE(); \ + ddmcr; \ +}) +#define WRITE_DDBAR0_REG(value) \ + WRITE_DDBAR0_REG_VALUE(value) +#define WRITE_DDBAR1_REG(value) \ + WRITE_DDBAR1_REG_VALUE(value) +#define WRITE_DDBAR2_REG(value) \ + WRITE_DDBAR2_REG_VALUE(value) +#define WRITE_DDBAR3_REG(value) \ + WRITE_DDBAR3_REG_VALUE(value) +#define WRITE_DDBCR_REG(value) \ + WRITE_DDBCR_REG_VALUE(value.DDBCR_reg) +#define WRITE_DDBSR_REG(value) \ + WRITE_DDBSR_REG_VALUE(value.DDBSR_reg) +#define WRITE_DDMAR0_REG(value) \ + WRITE_DDMAR0_REG_VALUE(value) +#define WRITE_DDMAR1_REG(value) \ + WRITE_DDMAR1_REG_VALUE(value) +#define WRITE_DDMCR_REG(value) \ + WRITE_DDMCR_REG_VALUE(value.DDMCR_reg) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/mmu_regs_types.h b/arch/e2k/include/asm/mmu_regs_types.h new file mode 100644 index 000000000000..85e1ca7213d8 --- /dev/null +++ b/arch/e2k/include/asm/mmu_regs_types.h @@ -0,0 +1,932 @@ +/* + * asm-e2k/mmu_regs.h: E2K MMU structures & registers. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_MMU_REGS_TYPES_H_ +#define _E2K_MMU_REGS_TYPES_H_ + +#include + +/* + * MMU registers structures + */ + +/* MMU address to access to MMU internal registers */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t mmu_addr_t; +#define mmu_addr_val(mmu_addr) (mmu_addr) +#define __mmu_addr(mmu_addr_val) (mmu_addr_val) +#endif /* __ASSEMBLY__ */ + +#define _MMU_ADDR_REG_NO_SHIFT 4 /* [ 9: 4] */ + +#define _MMU_ADDR_REG_NO 0x0000000000000ff0 /* # of register */ + +#define _MMU_CR_NO 0x00 /* Control register */ +#define _MMU_CONT_NO 0x01 /* Context register */ +#define _MMU_PID_NO _MMU_CONT_NO /* renamed name of CONT */ +#define _MMU_CR3_RG_NO 0x02 /* CR3 register for secondary space */ +#define _MMU_U2_PPTB_NO _MMU_CR3_RG_NO /* renamed name of CR3 */ +#define _MMU_ELB_PTB_NO 0x03 /* ELBRUS page table virtual base */ +#define _MMU_U_VPTB_NO _MMU_ELB_PTB_NO /* renamed name of ELB_PTB */ +#define _MMU_ROOT_PTB_NO 0x04 /* Root Page Table Base register */ +#define _MMU_U_PPTB_NO _MMU_ROOT_PTB_NO /* renamed name of ROOT_PTB */ +#define _MMU_TRAP_POINT_NO 0x05 /* Trap Pointer register */ +#define _MMU_TRAP_COUNT_NO 0x06 /* Trap Counter register */ +#define _MMU_MPT_B_NO 0x07 /* Phys Protection Table Base */ + /* register for secondary space */ +#define _MMU_PCI_L_B_NO 0x08 /* PCI Low Bound register */ + /* for secondary space */ +#define _MMU_US_CL_D_NO 0x09 /* User Stack Clearing Disable */ + /* register */ +#define _MMU_PH_H_B_NO 0x0a /* Low Phys memory High Bound */ + /* for secondary space */ +#define _MMU_USED_4KB_DTLB_NO 0x0b /* ??? */ +#define _MMU_USED_DTLB_MPT_DW_NO 0x0c /* ??? */ +#define _MMU_APIC_BASE_NO 0x0d /* local APIC/EPIC registers base */ +#define _MMU_HW0_NO 0x0e /* MMU hardware register #0 */ +#define _MMU_PID2_NO 0x0f /* secondary process ID */ +#define _MMU_MTRR_START_NO 0x10 /* Memory Type Range Register */ + /* (first register) */ + /* for secondary space */ +#define _MMU_MTRR_PAIRS_END_NO 0x1f /* Memory Type Range Register */ + /* (mtrr15 - last pairs register) */ + /* for secondary space */ +#define _MMU_MTRR_END_NO 0x30 /* Memory Type Range Register */ + /* (last register) */ + /* for secondary space */ +#define _MMU_PAT_NO 0x31 /* page attribute table */ +#define _MMU_PH_HI_L_B_NO 0x32 /* High Phys memory Low Bound */ + /* for secondary space */ +#define _MMU_PH_HI_H_B_NO 0x33 /* High Phys memory High Bound */ + /* for secondary space */ +#define _MMU_OS_VPTB_NO 0x34 /* virtual base of kernel PTs */ +#define _MMU_OS_PPTB_NO 0x35 /* physical base of kernel PTs */ +#define _MMU_PDPTE0_NO 0x38 /* pud[0] 3-th level PT entry #0 */ +#define _MMU_PDPTE1_NO 0x39 /* pud[1] 3-th level PT entry #1 */ +#define _MMU_PDPTE2_NO 0x3a /* pud[2] 3-th level PT entry #2 */ +#define _MMU_PDPTE3_NO 0x3b /* pud[3] 3-th level PT entry #3 */ +#define _MMU_TLU_CACHE_NO 0x3c /* TLU cache ??? */ +#define _MMU_OS_VAB_NO 0x4c /* kernel virtual space base */ + /* PAGE_OFFSET */ +#define MMU_REGS_NUM (_MMU_MTRR_END_NO + 1) +#define MTRR_LAST_DEFAULT 0x806 /* Default value of last MTRR */ + +#define _MMU_REG_NO_TO_MMU_ADDR_VAL(reg_no) \ + (((reg_no) << _MMU_ADDR_REG_NO_SHIFT) & _MMU_ADDR_REG_NO) +#define MMU_REG_NO_TO_MMU_ADDR(reg_no) \ + __mmu_addr(_MMU_REG_NO_TO_MMU_ADDR_VAL(reg_no)) +#define MMU_REG_NO_FROM_MMU_ADDR(mmu_addr) \ + ((mmu_addr_val(mmu_addr) & _MMU_ADDR_REG_NO) >> \ + _MMU_ADDR_REG_NO_SHIFT) + +#define MMU_ADDR_CR MMU_REG_NO_TO_MMU_ADDR(_MMU_CR_NO) +#define MMU_ADDR_CONT MMU_REG_NO_TO_MMU_ADDR(_MMU_CONT_NO) +#define MMU_ADDR_PID MMU_ADDR_CONT /* renamed name */ +#define MMU_ADDR_CR3_RG MMU_REG_NO_TO_MMU_ADDR(_MMU_CR3_RG_NO) +#define MMU_ADDR_U2_PGTB MMU_ADDR_CR3_RG /* renamed name */ +#define MMU_ADDR_ELB_PTB MMU_REG_NO_TO_MMU_ADDR(_MMU_ELB_PTB_NO) +#define MMU_ADDR_U_VPTB MMU_ADDR_ELB_PTB /* rename name */ +#define MMU_ADDR_ROOT_PTB MMU_REG_NO_TO_MMU_ADDR(_MMU_ROOT_PTB_NO) +#define MMU_ADDR_U_PPTB MMU_ADDR_ROOT_PTB /* rename name */ +#define MMU_ADDR_TRAP_POINT MMU_REG_NO_TO_MMU_ADDR(_MMU_TRAP_POINT_NO) +#define MMU_ADDR_TRAP_COUNT MMU_REG_NO_TO_MMU_ADDR(_MMU_TRAP_COUNT_NO) +#define MMU_ADDR_MPT_B MMU_REG_NO_TO_MMU_ADDR(_MMU_MPT_B_NO) +#define MMU_ADDR_PCI_L_B MMU_REG_NO_TO_MMU_ADDR(_MMU_PCI_L_B_NO) +#define MMU_ADDR_US_CL_D MMU_REG_NO_TO_MMU_ADDR(_MMU_US_CL_D_NO) +#define MMU_ADDR_PH_H_B MMU_REG_NO_TO_MMU_ADDR(_MMU_PH_H_B_NO) +#define MMU_ADDR_WATCH_POINT MMU_REG_NO_TO_MMU_ADDR(_MMU_WATCH_POINT_NO) +#define MMU_ADDR_MTRR_START MMU_REG_NO_TO_MMU_ADDR(_MMU_MTRR_START_NO) +#define MMU_ADDR_MTRR_END MMU_REG_NO_TO_MMU_ADDR(_MMU_MTRR_END_NO) +#define MMU_ADDR_MTRR(no) MMU_REG_NO_TO_MMU_ADDR(no) +#define MMU_ADDR_OS_VPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_OS_VPTB_NO) +#define MMU_ADDR_OS_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_OS_PPTB_NO) +#define MMU_ADDR_SH_OS_VPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_VPTB_NO) +#define MMU_ADDR_SH_OS_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_PPTB_NO) +#define MMU_ADDR_OS_VAB MMU_REG_NO_TO_MMU_ADDR(_MMU_OS_VAB_NO) + +/* MMU internel register contents */ + +#ifndef __ASSEMBLY__ +typedef unsigned long long mmu_reg_t; +#define mmu_reg_val(mmu_reg) (mmu_reg) +#define __mmu_reg(mmu_reg_val) (mmu_reg_val) +#endif /* __ASSEMBLY__ */ + +/* Size of trap_cellar */ + +/* while bug for size of env for setjmp use TC_INTO_PT_REGS == 0 */ +#define MAX_TC_SIZE 10 + +/* + * MMU Control Register MMU_CR + */ + +#define _MMU_CR_CD_SHIFT 1 +#define _MMU_CR_IPD_SHIFT 11 + +#define _MMU_CR_TLB_EN 0x0000000000000001 /* translation enable */ +#define _MMU_CR_CD_MASK 0x0000000000000006 /* cache disable bits */ +#define _MMU_CR_SET1 0x0000000000000008 /* set #1 enable for */ + /* 4 MB pages */ +#define _MMU_CR_SET2 0x0000000000000010 /* set #2 enable for */ + /* 4 MB pages */ +#define _MMU_CR_SET3 0x0000000000000020 /* set #3 enable for */ + /* 4 MB pages */ +#define _MMU_CR_CR0_PG 0x0000000000000040 /* paging enable for */ + /* second space INTEL */ +#define _MMU_CR_CR4_PSE 0x0000000000000080 /* page size 4Mb */ + /* enable for second */ + /* space INTEL */ +#define _MMU_CR_CR0_CD 0x0000000000000100 /* cache disable for */ + /* secondary space */ + /* INTEL */ +#define _MMU_CR_TLU2_EN 0x0000000000000200 /* TLU enable for */ + /* secondary space */ + /* INTEL */ +#define _MMU_CR_LD_MPT 0x0000000000000400 /* memory protection */ + /* table enable for */ + /* LD from secondary */ + /* space INTEL */ +#define _MMU_CR_IPD_MASK 0x0000000000000800 /* Instruction */ + /* Prefetch Depth */ +#define _MMU_CR_UPT_EN 0x0000000000001000 /* enable UPT */ +#define _MMU_CR_SNXE 0x0000000000008000 /* enable SNXE */ + +#define _MMU_CR_CD_VAL(x) (((x) << _MMU_CR_CD_SHIFT) & _MMU_CR_CD_MASK) +#define _MMU_CD_EN _MMU_CR_CD_VAL(0UL) /* all caches enabled */ +#define _MMU_CD_D1_DIS _MMU_CR_CD_VAL(1UL) /* DCACHE1 disabled */ +#define _MMU_CD_D_DIS _MMU_CR_CD_VAL(2UL) /* DCACHE1, DCACHE2 disabled */ +#define _MMU_CD_DIS _MMU_CR_CD_VAL(3UL) /* DCACHE1, DCACHE2, ECACHE */ + /* disabled */ +#define _MMU_CR_IPD_VAL(x) (((x) << _MMU_CR_IPD_SHIFT) & _MMU_CR_IPD_MASK) +#define _MMU_IPD_DIS _MMU_CR_IPD_VAL(0UL) /* none prefetch */ +#define _MMU_IPD_2_LINE _MMU_CR_IPD_VAL(1UL) /* 2 line of ICACHE prefetch */ + +#ifdef CONFIG_IPD_DISABLE +#define KERNEL_MMU_IPD _MMU_IPD_DIS /* none prefetch */ +#else +#define KERNEL_MMU_IPD _MMU_IPD_2_LINE /* 2 line of ICACHE prefetch */ +#endif /* CONFIG_IPD_DISABLE */ + +#ifndef CONFIG_SECONDARY_SPACE_SUPPORT +#define _MMU_CR_SEC_SPACE_EN +#define _MMU_CR_SEC_SPACE_DIS +#else /* CONFIG_SECONDARY_SPACE_SUPPORT */ +#define _MMU_CR_SEC_SPACE_EN (_MMU_CR_CR0_PG | _MMU_CR_TLU2_EN) +#define _MMU_CR_SEC_SPACE_DIS (_MMU_CR_CR0_CD) +#endif /* ! CONFIG_SECONDARY_SPACE_SUPPORT */ + +#define __MMU_CR_KERNEL (_MMU_CR_TLB_EN | _MMU_CD_EN | KERNEL_MMU_IPD) +#define __MMU_CR_KERNEL_OFF (_MMU_CD_DIS | _MMU_IPD_DIS) + +#ifdef CONFIG_HUGETLB_PAGE +# define _MMU_CR_KERNEL (__MMU_CR_KERNEL | _MMU_CR_SET3) +#else +# define _MMU_CR_KERNEL (boot_cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + (__MMU_CR_KERNEL) : (__MMU_CR_KERNEL | _MMU_CR_SET3)) +#endif /* CONFIG_HUGETLB_PAGE */ + +#define MMU_CR_KERNEL __mmu_reg(_MMU_CR_KERNEL) +#define MMU_CR_KERNEL_OFF __mmu_reg(__MMU_CR_KERNEL_OFF) + +#define mmu_cr_set_tlb_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) | _MMU_CR_TLB_EN) + +#define mmu_cr_set_vaddr_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) | _MMU_CR_TLB_EN) + +#define mmu_cr_reset_tlb_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) & ~(_MMU_CR_TLB_EN)) + +#define mmu_cr_reset_vaddr_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) & ~(_MMU_CR_TLB_EN)) + +# define mmu_cr_set_large_pages(mmu_reg) \ + (mmu_reg_val(mmu_reg) | _MMU_CR_SET3) +# define mmu_cr_reset_large_pages(mmu_reg) \ + (mmu_reg_val(mmu_reg) & ~_MMU_CR_SET3) + +/* + * MMU Process ID Register MMU_PID (renamed name from MMU_CONT) + */ + +#define _MMU_CONTEXT 0x0000000000000fff +#define _MMU_CONTEXT_SIZE (_MMU_CONTEXT + 1) +#define MMU_PID_SIZE _MMU_CONTEXT_SIZE + +/* + * Kernel virtual memory context + */ +#define E2K_KERNEL_CONTEXT 0x000 +#define E2K_KERNEL_PID E2K_KERNEL_CONTEXT /* renamed name */ + +#define MMU_CONTEXT(context) __mmu_reg(context) +#define MMU_KERNEL_CONTEXT MMU_CONTEXT(E2K_KERNEL_CONTEXT) +#define MMU_PID(pid) MMU_CONTEXT(pid) /* renamed name */ +#define MMU_KERNEL_PID MMU_KERNEL_CONTEXT /* renamed name */ + +/* + * MMU Control Register of secondary space table MMU_CR3_RG + * The physical address of the INTEL page directory base, + * aligned to table size + */ + +#define _MMU_CR3_PAGE_DIR 0x0000000fffff000UL +#define _MMU_CR3_PCD 0x000000000000010UL +#define _MMU_CR3_PWT 0x000000000000008UL + +#define MMU_CR3_KERNEL(page_dir) \ + (((e2k_addr_t)(page_dir)) & _MMU_CR3_PAGE_DIR) + +/* + * MMU Page Table virtual Base Registers MMU_OS_VPTB & MMU_U_VPTB + * (renamed MMU_ELB_PTB) + * The virtual address of the page table beginning, aligned to table size + */ +/* OS page table virtual base for separate virtual spaces */ +#define _MMU_VPTB_MASK (PGDIR_MASK & E2K_VA_PAGE_MASK) +#define MMU_ADDR_TO_VPTB(virt_addr) \ + __mmu_reg((virt_addr) & _MMU_VPTB_MASK) + +/* Separate Page Tables virtual bases */ +#define MMU_SEPARATE_KERNEL_VPTB MMU_ADDR_TO_VPTB(KERNEL_VPTB_BASE_ADDR) +#define MMU_SEPARATE_USER_VPTB MMU_ADDR_TO_VPTB(USER_VPTB_BASE_ADDR) +/* United Page Tables virtual base */ +#define MMU_UNITED_KERNEL_VPTB MMU_ADDR_TO_VPTB(KERNEL_VPTB_BASE_ADDR) +#define MMU_UNITED_USER_VPTB MMU_UNITED_KERNEL_VPTB + +/* + * MMU Root Page Table physical Bases register MMU_OS_PPTB & MMU_U_PPTB + * (renamed MMU_ROOT_PTB) + * The physical address of the root elbrus page table beginning, + * aligned to table size + */ + +#define _MMU_PPTB_MASK (MAX_PA_MASK & PAGE_MASK) +#define MMU_ADDR_TO_PPTB(phys_addr) __mmu_reg((phys_addr) & _MMU_PPTB_MASK) +#define MMU_KERNEL_PPTB MMU_ADDR_TO_PPTB(KERNEL_PPTB_BASE_ADDR) + +/* Separate Page Tables physical bases */ +#define MMU_SEPARATE_KERNEL_PPTB MMU_KERNEL_PPTB +#define MMU_SEPARATE_USER_PPTB(phys_addr) MMU_ADDR_TO_PPTB(phys_addr) +/* United Page Tables virtual base */ +#define MMU_UNITED_KERNEL_PPTB MMU_KERNEL_PPTB +#define MMU_UNITED_USER_PPTB(phys_addr) MMU_ADDR_TO_PPTB(phys_addr) + +/* + * MMU Base address of virtual space of kernel MMU_OS_VAB + * The virtual address of the kernel space should be aligned to 2**44 + */ +#define _MMU_VAB_MASK 0x0000f00000000000 +#define MMU_ADDR_TO_VAB(virt_addr) __mmu_reg((virt_addr) & _MMU_VAB_MASK) +#define MMU_SEPARATE_KERNEL_VAB MMU_ADDR_TO_VAB(PAGE_OFFSET) + +/* + * MMU Trap Pointer register MMU_TRAP_POINT + * The physical address of the beginning of an area, where the attributes + * of nonexecuted requests to memory are stored in case of the exception + * arising on it ("cellar") + */ + +#define MMU_ALIGN_TRAP_POINT_BASE_V2 9 +#define MMU_ALIGN_TRAP_POINT_BASE_MASK_V2 \ + ((1UL << MMU_ALIGN_TRAP_POINT_BASE_V2) - 1) +#define MMU_TRAP_POINT_MASK_V2 ~MMU_ALIGN_TRAP_POINT_BASE_MASK_V2 +#define MMU_TRAP_CELLAR_MAX_SIZE_V2 64 /* double-words */ + +#define MMU_ALIGN_TRAP_POINT_BASE 10 +#define MMU_ALIGN_TRAP_POINT_BASE_MASK ((1UL << MMU_ALIGN_TRAP_POINT_BASE) - 1) +#define MMU_TRAP_POINT_MASK ~MMU_ALIGN_TRAP_POINT_BASE_MASK +#define MMU_TRAP_CELLAR_MAX_SIZE \ + ((TC_EXT_OFFSET + 7)/8 + 64) /* double-words */ + +#define _MMU_TRAP_POINT(phys_addr) ((phys_addr) & MMU_TRAP_POINT_MASK) +#define MMU_TRAP_POINT(phys_addr) __mmu_reg(_MMU_TRAP_POINT(phys_addr)) +#define MMU_KERNEL_TRAP_POINT MMU_TRAP_POINT(KERNEL_TRAP_CELLAR) + +/* + * MMU Trap Counter register MMU_TRAP_COUNT + * Number of double-words in the "cellar" of the trap + */ + +#define _MMU_TRAP_COUNT_MASK 0x000000000000002f +#define _MMU_TRAP_COUNT(counter) (counter & _MMU_TRAP_COUNT_MASK) +#define MMU_TRAP_COUNT(counter) __mmu_reg(_MMU_TRAP_COUNT(counter)) +#define MMU_TRAP_COUNT_GET(mmu_reg) _MMU_TRAP_COUNT(mmu_reg_val(mmu_reg)) + +#define mmu_trap_count_get(mmu_reg) MMU_TRAP_COUNT_GET(mmu_reg) + +/* + * MMU PCI Low Bound MMU_PCI_L_B + * Fix the boundary between PCIand main memory addresses + * for Intel accesses + */ + +#define _MMU_PCI_L_B 0x00000000ffc00000UL +#define _MMU_PCI_L_B_ALIGN_MASK 0x00000000003fffffUL + +/* + * MMU Phys High Bound MMU_PH_H_B + * Fix the high boundary Intel physical memory + * for Intel accesses + */ + +#define _MMU_PH_H_B 0x00000000ffc00000UL +#define _MMU_PH_H_B_ALIGN_MASK 0x00000000003fffffUL + +/* + * CACHEs (DCACHE & ICACHE) structure + */ + +#define E2K_DCACHE_L1_LINES_BITS_NUM 9 +#define E2K_DCACHE_L1_LINES_NUM (1 << E2K_DCACHE_L1_LINES_BITS_NUM) +#define E2K_DCACHE_L1_SETS_BITS_NUM 2 +#define E2K_DCACHE_L1_SETS_NUM (1 << E2K_DCACHE_L1_SETS_BITS_NUM) + +#define E2K_DCACHE_L2_LINES_BITS_NUM 10 +#define E2K_DCACHE_L2_LINES_NUM (1 << E2K_DCACHE_L2_LINES_BITS_NUM) +#define E2K_DCACHE_L2_SETS_BITS_NUM 2 +#define E2K_DCACHE_L2_SETS_NUM (1 << E2K_DCACHE_L2_SETS_BITS_NUM) + +#define E2K_ICACHE_SETS_NUM 4 +#define E2K_ICACHE_SET_SIZE 256 +#define E2K_ICACHE_SET_MASK (E2K_ICACHE_SET_SIZE - 1) +#define E2K_ICACHE_LINES_NUM 64 + +/* + * CACHEs (DCACHE & ICACHE) registers operations + */ + +/* CACHEs (DCACHE & ICACHE) registers access operations address */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t dcache_addr_t; +typedef dcache_addr_t dcache_l1_addr_t; +typedef dcache_addr_t dcache_l2_addr_t; +#endif /* ! __ASSEMBLY__ */ + +#define dcache_addr_val(dcache_addr) (dcache_addr) +#define dcache_l1_addr_val(dcache_l1_addr) dcache_addr_val(dcache_l1_addr) +#define dcache_l2_addr_val(dcache_l2_addr) dcache_addr_val(dcache_l2_addr) + +#define __dcache_addr(dcache_addr_val) (dcache_addr_val) +#define __dcache_l1_addr(dcache_l1_addr_val) __dcache_addr(dcache_l1_addr_val) +#define __dcache_l2_addr(dcache_l2_addr_val) __dcache_addr(dcache_l2_addr_val) + +#define _E2K_DCACHE_L1_SET 0x00000000C0000000 +#define _E2K_DCACHE_L1_TYPE 0x0000000020000000 +#define _E2K_DCACHE_L1_LINE 0x0000000000003FE0 +#define _E2K_DCACHE_L1_WORD 0x0000000000000018 + +#define _E2K_DCACHE_L1_SET_SHIFT 30 +#define _E2K_DCACHE_L1_TYPE_SHIFT 29 +#define _E2K_DCACHE_L1_LINE_SHIFT 5 +#define _E2K_DCACHE_L1_WORD_SHIFT 3 + +#define DCACHE_L1_VADDR_TO_ADDR(virt_addr) \ + ((virt_addr) & _E2K_DCACHE_L1_LINE) + +#define dcache_l1_set_set(addr, set) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_SET) | \ + ((set) << _E2K_DCACHE_L1_SET_SHIFT) & \ + _E2K_DCACHE_L1_SET)) +#define dcache_l1_get_set(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_SET) + +#define dcache_l1_set_type(addr, type) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_TYPE) | \ + ((type) << _E2K_DCACHE_L1_TYPE_SHIFT) & \ + _E2K_DCACHE_L1_TYPE)) +#define dcache_l1_get_type(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_TYPE) + +#define dcache_l1_set_line(addr, line) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_LINE) | \ + ((line) << _E2K_DCACHE_L1_LINE_SHIFT) & \ + _E2K_DCACHE_L1_LINE)) +#define dcache_l1_get_line(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_LINE) + +#define dcache_l1_set_word(addr, word) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_WORD) | \ + ((word) << _E2K_DCACHE_L1_WORD_SHIFT) & \ + _E2K_DCACHE_L1_WORD)) +#define dcache_l1_get_word(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_WORD) + +#define mk_dcache_l1_addr(virt_addr, set, type, word) \ +({ \ + dcache_l1_addr_t addr; \ + addr = __dcache_l1_addr(DCACHE_L1_VADDR_TO_ADDR(virt_addr)); \ + addr = dcache_l1_set_set(addr, set); \ + addr = dcache_l1_set_type(addr, type); \ + addr = dcache_l1_set_word(addr, word); \ + addr; \ +}) + +#define _E2K_DCACHE_L2_TYPE 0x0000000030000000 + #define _E2K_DCACHE_L2_DATA_TYPE 0x0 + #define _E2K_DCACHE_L2_REGS_TYPE 0x1 + #define _E2K_DCACHE_L2_TAG_TYPE 0x2 + #define _E2K_DCACHE_L2_REGS_TYPE2 0x3 +#define _E2K_DCACHE_L2_LINE 0x000000000007ffc0 +#define _E2K_DCACHE_L2_REG_NUM 0x000000000000ff00 + #define _E2K_DCACHE_L2_BIST_SIG1_REG 0x00 + #define _E2K_DCACHE_L2_BIST_SIG2_REG 0x01 + #define _E2K_DCACHE_L2_BISR_CTRL_REG 0x02 + #define _E2K_DCACHE_L2_CTRL_REG 0x03 + #define _E2K_DCACHE_L2_ECC_DBG_REG 0x04 + #define _E2K_DCACHE_L2_ERR_REG 0x05 + #define _E2K_DCACHE_L2_CNT_ERR1_REG 0x06 + #define _E2K_DCACHE_L2_CNT_ERR2_REG 0x07 + #define _E2K_DCACHE_L2_CTRL_EXT_REG 0x08 +#define _E2K_DCACHE_L2_BANK_NUM 0x00000000000000c0 +#define _E2K_DCACHE_L2_WORD 0x0000000000000038 + +#define _E2K_DCACHE_L2_TYPE_SHIFT 28 +#define _E2K_DCACHE_L2_LINE_SHIFT 6 +#define _E2K_DCACHE_L2_REG_NUM_SHIFT 8 +#define _E2K_DCACHE_L2_BANK_NUM_SHIFT 6 +#define _E2K_DCACHE_L2_WORD_SHIFT 3 + +#define E2K_L2_BANK_NUM 4 + +#define E2K_L2_CNTR_EN_CORR 0x0000000000000001 +#define E2K_L2_CNTR_EN_DET 0x0000000000000002 +#define E2K_L2_CNTR_EN_CINT 0x0000000000000004 + +#define DCACHE_L2_PADDR_TO_ADDR(phys_addr) \ + ((virt_addr) & _E2K_DCACHE_L2_LINE) + +#define dcache_l2_set_type(addr, type) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & ~_E2K_DCACHE_L2_TYPE) | \ + ((type) << _E2K_DCACHE_L2_TYPE_SHIFT) & \ + _E2K_DCACHE_L2_TYPE)) +#define dcache_l2_get_type(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_TYPE) + +#define dcache_l2_set_line(addr, line) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & ~_E2K_DCACHE_L2_LINE) | \ + ((index) << _E2K_DCACHE_L2_LINE_SHIFT) & \ + _E2K_DCACHE_L2_LINE)) +#define dcache_l2_get_line(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_LINE) + +#define dcache_l2_set_reg_num(addr, reg_num) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & \ + ~_E2K_DCACHE_L2_REG_NUM) | \ + ((reg_num) << _E2K_DCACHE_L2_REG_NUM_SHIFT) & \ + _E2K_DCACHE_L2_REG_NUM)) +#define dcache_l2_get_reg_num(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_REG_NUM_SHIFT) + +#define dcache_l2_set_bank_num(addr, bank_num) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & \ + ~_E2K_DCACHE_L2_BANK_NUM) | \ + ((bank_num) << _E2K_DCACHE_L2_BANK_NUM_SHIFT) & \ + _E2K_DCACHE_L2_BANK_NUM)) +#define dcache_l2_get_bank_num(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_BANK_NUM_SHIFT) + +#define dcache_l2_set_word(addr, word) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & ~_E2K_DCACHE_L2_WORD) | \ + ((word) << _E2K_DCACHE_L2_WORD_SHIFT) & \ + _E2K_DCACHE_L2_WORD)) +#define dcache_l2_get_word(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_WORD) + +#define mk_dcache_l2_addr(phys_addr, type, word) \ +({ \ + dcache_l2_addr_t addr = 0; \ + addr = __dcache_l2_addr(DCACHE_L1_PADDR_TO_ADDR(phys_addr)); \ + addr = dcache_l2_set_type(addr, type); \ + addr = dcache_l2_set_word(addr, word); \ + addr; \ +}) + +#define mk_dcache_l2_reg_addr(reg_num, bank_num) \ +({ \ + dcache_l2_addr_t addr = 0; \ + addr = dcache_l2_set_type(addr, _E2K_DCACHE_L2_REGS_TYPE); \ + addr = dcache_l2_set_reg_num(addr, reg_num); \ + addr = dcache_l2_set_bank_num(addr, bank_num); \ + addr; \ +}) + +/* + * ICACHE/DTLB/ITLB line flush operations + */ + +/* ICACHE/DTLB/ITLB line flush operations address */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t flush_op_t; +#endif /* ! __ASSEMBLY__ */ + +#define flush_op_val(flush_op) (flush_op) + +#define __flush_op(flush_op_val) (flush_op_val) + +#define _FLUSH_OP_TYPE 0x0000000000000007 /* type of */ + /* operation */ +#define _FLUSH_ICACHE_LINE_USER_OP 0x0000000000000000 +#define _FLUSH_TLB_PAGE_SYS_OP 0x0000000000000001 +#define _FLUSH_ICACHE_LINE_SYS_OP 0x0000000000000002 + +#define flush_op_get_type(flush_op) \ + (flush_op_val(flush_op) & _FLUSH_OP_TYPE) +#define flush_op_set_type(flush_op, type) \ + (__flush_op((flush_op_val(flush_op) & ~_FLUSH_OP_TYPE) | \ + ((type) & _FLUSH_OP_TYPE))) +#define flush_op_set_icache_line_user(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_ICACHE_LINE_USER_OP) +#define flush_op_set_icache_line_sys(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_ICACHE_LINE_SYS_OP) +#define flush_op_set_tlb_page_sys(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_TLB_PAGE_SYS_OP) +#define _flush_op_icache_line_user ((long)_FLUSH_ICACHE_LINE_USER_OP) +#define _flush_op_icache_line_sys ((long)_FLUSH_ICACHE_LINE_SYS_OP) +#define _flush_op_tlb_page_sys ((long)_FLUSH_TLB_PAGE_SYS_OP) +#define flush_op_icache_line_user __flush_op(_flush_op_icache_line_user) +#define flush_op_icache_line_sys __flush_op(_flush_op_icache_line_sys) +#define flush_op_tlb_page_sys __flush_op(_flush_op_tlb_page_sys) + +/* ICACHE/DTLB/ITLB line flush extended virtual address structure */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t flush_addr_t; +#endif /* ! __ASSEMBLY__ */ + +#define flush_addr_val(flush_addr) (flush_addr) + +#define __flush_addr(flush_addr_val) (flush_addr_val) + +#define _FLUSH_ADDR_CONTEXT_SHIFT 50 /* [61:50] */ + +#define _FLUSH_ADDR_VA 0x0000ffffffffffff /* virtual address */ + /* [47: 0] */ +#define _FLUSH_ADDR_CONTEXT 0x3ffc000000000000 /* context # */ +#define _FLUSH_ADDR_ROOT 0x4000000000000000 /* should be 0 */ +#define _FLUSH_ADDR_PHYS 0x8000000000000000 /* should be 0 */ + +#define FLUSH_VADDR_TO_VA(virt_addr) ((virt_addr) & _FLUSH_ADDR_VA) + +#define _FLUSH_ADDR_KERNEL(virt_addr) (FLUSH_VADDR_TO_VA(virt_addr) | \ + ((long)E2K_KERNEL_CONTEXT << _FLUSH_ADDR_CONTEXT_SHIFT)) + +#define FLUSH_ADDR_KERNEL(virt_addr) \ + __flush_addr(_FLUSH_ADDR_KERNEL(virt_addr)) + +#define flush_addr_get_va(flush_addr) \ + (flush_addr_val(flush_addr) & _FLUSH_ADDR_VA) +#define flush_addr_set_va(flush_addr, virt_addr) \ + (__flush_addr((flush_addr_val(flush_addr) & ~_FLUSH_ADDR_VA) | \ + ((va_page) & _FLUSH_ADDR_VA))) + +#define flush_addr_get_pid(flush_addr) \ + ((flush_addr_val(flush_addr) & _FLUSH_ADDR_CONTEXT) >> \ + _FLUSH_ADDR_CONTEXT_SHIFT) +#define flush_addr_get_context(flush_addr) \ + (flush_addr_val(flush_addr) & _FLUSH_ADDR_CONTEXT) +#define flush_addr_set_context(flush_addr, context) \ + (__flush_addr((flush_addr_val(flush_addr) & \ + ~_FLUSH_ADDR_CONTEXT) | \ + ((long)(context) << _FLUSH_ADDR_CONTEXT_SHIFT) & \ + _FLUSH_ADDR_CONTEXT)) +#define _flush_addr_make_sys(virt_addr, context, root) \ +({ \ + e2k_addr_t __addr_val = FLUSH_VADDR_TO_VA(virt_addr); \ + __addr_val |= (((long)(context) << \ + _FLUSH_ADDR_CONTEXT_SHIFT) & \ + _FLUSH_ADDR_CONTEXT); \ + if (root) \ + __addr_val |= _FLUSH_ADDR_ROOT; \ + __addr_val; \ +}) +#define _flush_addr_make_user(virt_addr) \ + FLUSH_VADDR_TO_VA(virt_addr) +#define flush_addr_make_sys(virt_addr, context) \ + __flush_addr(_flush_addr_make_sys(virt_addr, context, 0)) +#define flush_addr_make_user(virt_addr) \ + __flush_addr(_flush_addr_make_user(virt_addr)) +#define flush_addr_make_ss(virt_addr, context) \ + __flush_addr(_flush_addr_make_sys(virt_addr, context, 1)) + +/* + * CACHE(s) flush operations + */ + +/* CACHE(s) flush operations address */ + +#define _FLUSH_INVALIDATE_CACHE_L12_OP 0x0000000000000000 +#define _FLUSH_WRITE_BACK_CACHE_L12_OP 0x0000000000000001 + +/* instruction set begining V3 has not invalidate operation */ +/* only flush all caches (same as write back) */ +#define _FLUSH_CACHE_L12_OP _FLUSH_WRITE_BACK_CACHE_L12_OP + +#define flush_op_set_invalidate_cache_L12(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_INVALIDATE_CACHE_L12_OP) +#define flush_op_set_write_back_cache_L12(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_WRITE_BACK_CACHE_L12_OP) +#define flush_op_set_cache_all(flush_op) \ + flush_op_set_write_back_cache_L12(flush_op) +#define _flush_op_invalidate_cache_L12 ((long)_FLUSH_INVALIDATE_CACHE_L12_OP) +#define _flush_op_write_back_cache_L12 ((long)_FLUSH_WRITE_BACK_CACHE_L12_OP) +#define _flush_op_cache_all _flush_op_write_back_cache_L12 +#define flush_op_invalidate_cache_L12 \ + __flush_op(_flush_op_invalidate_cache_L12) +#define flush_op_write_back_cache_L12 \ + __flush_op(_flush_op_write_back_cache_L12) +#define flush_op_cache_all flush_op_write_back_cache_L12 + +/* + * ICACHE/TLB flush operations + */ + +/* ICACHE/TLB flush operations address */ + +#define _FLUSH_ICACHE_ALL_OP 0x0000000000000000 +#define _FLUSH_TLB_ALL_OP 0x0000000000000001 + +#define flush_op_set_icache_all(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_ICACHE_ALL_OP) +#define flush_op_set_tlb_all(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_TLB_ALL_OP) +#define _flush_op_icache_all ((long)_FLUSH_ICACHE_ALL_OP) +#define _flush_op_tlb_all ((long)_FLUSH_TLB_ALL_OP) +#define flush_op_icache_all __flush_op(_flush_op_icache_all) +#define flush_op_tlb_all __flush_op(_flush_op_tlb_all) + + +/* + * MU address to access to CLW internal registers + */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t clw_addr_t; +#endif /* __ASSEMBLY__ */ + +#define ADDR_US_CL_B 0x024 /* User stack bottom to clean */ +#define ADDR_US_CL_UP 0x124 /* User stack up to clean */ +#define ADDR_US_CL_M0 0x004 /* User stack bit-mask [0:63] */ +#define ADDR_US_CL_M1 0x084 /* User stack bit-mask [64:127] */ +#define ADDR_US_CL_M2 0x104 /* User stack bit-mask [128:195] */ +#define ADDR_US_CL_M3 0x184 /* User stack bit-mask [196:255] */ + +/* CLW internel register contents */ + +#ifndef __ASSEMBLY__ +typedef unsigned long clw_reg_t; +#endif /* __ASSEMBLY__ */ + +/* + * User Stack Window clean bit-mask structure + */ + +#define CLW_MASK_WORD_NUM 4 /* number of words in bit-mask */ +#define CLW_BITS_PER_MASK_WORD 64 /* number of bits in one bit-mask */ + /* word */ +#define CLW_BYTES_PER_BIT 32 /* one bit describes 32 bytes of */ + /* stack area */ +#define CLW_BYTES_PER_MASK /* number of bytes in full bit-mask */ \ + (CLW_BYTES_PER_BIT * CLW_MASK_WORD_NUM * CLW_BITS_PER_MASK_WORD) + +/* + * MMU DEBUG registers + */ + +#define _MMU_ADDR_REGS_TYPE 0x0000000000000007 /* [2:0] type of op. */ +#define _MMU_ADDR_DEBUG_REG_NO 0x00000000000001e0 /* [8:5] # of reg. */ +#define _MMU_ADDR_DEBUG_REG_NO_SHIFT 5 /* [8:5] */ +#define _MMU_ADDR_DEBUG_REG_TYPE 7 /* access to DEBUG */ + /* registers */ + +#define MMU_DDBAR0_REG_NO 0 /* Data access breakpoint address */ +#define MMU_DDBAR1_REG_NO 1 /* registers # 0 - 3 */ +#define MMU_DDBAR2_REG_NO 2 +#define MMU_DDBAR3_REG_NO 3 +#define MMU_DDBCR_REG_NO 4 /* Data access breakpoint control */ +#define MMU_DDBSR_REG_NO 5 /* Data access breakpoint status */ +#define MMU_DDMAR0_REG_NO 6 /* Data monitor accumulator */ +#define MMU_DDMAR1_REG_NO 7 /* registers # 0 - 1 */ +#define MMU_DDMCR_REG_NO 8 /* Data monitor control register */ +#define MMU_DEBUG_REGS_NUM (MMU_DDMCR_REG_NO + 1) + +#define _DEBUG_REG_NO_TO_MMU_ADDR(reg_no) \ + ((((reg_no) << _MMU_ADDR_DEBUG_REG_NO_SHIFT) & \ + _MMU_ADDR_DEBUG_REG_NO) | _MMU_ADDR_DEBUG_REG_TYPE) + +#ifndef __ASSEMBLY__ + +typedef union { + u32 half_word[2]; + struct { /* structure of register */ + u32 user : 1; /* [ 0: 0] */ + u32 system : 1; /* [ 1: 1] */ + u32 trap : 1; /* [ 2: 2] */ + u32 unused : 13; /* [15: 3] */ + u32 event : 7; /* [22:16] */ + u32 unused2 : 9; /* [31:23] */ + } fields[2]; + u64 word; +} e2k_ddmcr_t; +#define DDMCR_reg word + +typedef union { + struct { + u64 b0 : 8; + u64 unus7 : 4; + u64 b1 : 8; + u64 unus8 : 4; + u64 b2 : 8; + u64 unus9 : 4; + u64 b3 : 8; + u64 unu10 : 4; + u64 unu11 : 1; + u64 m0 : 1; + u64 m1 : 1; + u64 unu12 : 13; + }; + union { + struct { + u64 sprg0 : 1; + u64 spec0 : 1; + u64 aprg0 : 1; + u64 psf0 : 1; + u64 csf0 : 1; + u64 cut0 : 1; + u64 pt0 : 1; + u64 clw0 : 1; + u64 unus1 : 4; + + u64 sprg1 : 1; + u64 spec1 : 1; + u64 aprg1 : 1; + u64 psf1 : 1; + u64 csf1 : 1; + u64 cut1 : 1; + u64 pt1 : 1; + u64 clw1 : 1; + u64 unus2 : 4; + + u64 sprg2 : 1; + u64 spec2 : 1; + u64 aprg2 : 1; + u64 psf2 : 1; + u64 csf2 : 1; + u64 cut2 : 1; + u64 pt2 : 1; + u64 clw2 : 1; + u64 unus3 : 4; + + u64 sprg3 : 1; + u64 spec3 : 1; + u64 aprg3 : 1; + u64 psf3 : 1; + u64 csf3 : 1; + u64 cut3 : 1; + u64 pt3 : 1; + u64 clw3 : 1; + u64 unus4 : 4; + + u64 unus5 : 1; + u64 m0 : 1; /* [49] */ + u64 m1 : 1; /* [50] */ + u64 unus6 : 13; + }; + struct { + u64 b0 : 8; + u64 unus7 : 4; + u64 b1 : 8; + u64 unus8 : 4; + u64 b2 : 8; + u64 unus9 : 4; + u64 b3 : 8; + u64 unu10 : 4; + u64 unu11 : 16; + }; + } fields; + u64 word; +} e2k_ddbsr_t; +#define DDBSR_reg word + +#define E2K_DDBSR_MASK(cp_num) (0xffULL << ((cp_num) * 12)) +#define E2K_DDBSR_MASK_ALL_BP 0xff0ff0ff0ffULL + +typedef union { + struct { + u64 v0 : 1; + u64 root0 : 1; + u64 rw0 : 2; + u64 lng0 : 3; + u64 sync0 : 1; + u64 spec0 : 1; + u64 ap0 : 1; + u64 sf0 : 1; + u64 hw0 : 1; + u64 t0 : 1; + u64 __x0 : 1; + u64 v1 : 1; + u64 root1 : 1; + u64 rw1 : 2; + u64 lng1 : 3; + u64 sync1 : 1; + u64 spec1 : 1; + u64 ap1 : 1; + u64 sf1 : 1; + u64 hw1 : 1; + u64 t1 : 1; + u64 __x1 : 1; + u64 v2 : 1; + u64 root2 : 1; + u64 rw2 : 2; + u64 lng2 : 3; + u64 sync2 : 1; + u64 spec2 : 1; + u64 ap2 : 1; + u64 sf2 : 1; + u64 hw2 : 1; + u64 t2 : 1; + u64 __x2 : 1; + u64 v3 : 1; + u64 root3 : 1; + u64 rw3 : 2; + u64 lng3 : 3; + u64 sync3 : 1; + u64 spec3 : 1; + u64 ap3 : 1; + u64 sf3 : 1; + u64 hw3 : 1; + u64 t3 : 1; + u64 __x3 : 1; + u64 gm : 1; + }; + struct { + u64 v0 : 1; + u64 root0 : 1; + u64 rw0 : 2; + u64 lng0 : 3; + u64 sync0 : 1; + u64 spec0 : 1; + u64 ap0 : 1; + u64 sf0 : 1; + u64 hw0 : 1; + u64 t0 : 1; + u64 __x0 : 1; + u64 v1 : 1; + u64 root1 : 1; + u64 rw1 : 2; + u64 lng1 : 3; + u64 sync1 : 1; + u64 spec1 : 1; + u64 ap1 : 1; + u64 sf1 : 1; + u64 hw1 : 1; + u64 t1 : 1; + u64 __x1 : 1; + u64 v2 : 1; + u64 root2 : 1; + u64 rw2 : 2; + u64 lng2 : 3; + u64 sync2 : 1; + u64 spec2 : 1; + u64 ap2 : 1; + u64 sf2 : 1; + u64 hw2 : 1; + u64 t2 : 1; + u64 __x2 : 1; + u64 v3 : 1; + u64 root3 : 1; + u64 rw3 : 2; + u64 lng3 : 3; + u64 sync3 : 1; + u64 spec3 : 1; + u64 ap3 : 1; + u64 sf3 : 1; + u64 hw3 : 1; + u64 t3 : 1; + u64 __x3 : 1; + u64 gm : 1; + } fields; + u64 word; +} e2k_ddbcr_t; +#define DDBCR_reg word + +#define E2K_DDBCR_MASK(cp_num) (0x3FFFULL << ((cp_num) * 14)) +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_MMU_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/mmu_types.h b/arch/e2k/include/asm/mmu_types.h new file mode 100644 index 000000000000..cb77e4bbd0a9 --- /dev/null +++ b/arch/e2k/include/asm/mmu_types.h @@ -0,0 +1,873 @@ +#ifndef _E2K_MMU_TYPES_H_ +#define _E2K_MMU_TYPES_H_ + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * These are used to make use of C type-checking.. + */ +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; + +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pudval_t pud; } pud_t; +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pgprotval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pud_val(x) ((x).pud) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pud(x) ((pud_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#endif /* ! __ASSEMBLY__ */ + +/* one page table occupies one 4K page and has 512 entries */ +#define PT_ENTRIES_SHIFT 3 /* 8 bytes, 3 bits */ +#define PT_ENTRIES_BITS (PAGE_SHIFT - PT_ENTRIES_SHIFT) /* 9 bits */ +#define PT_ENTRIES_PER_PAGE (1 << PT_ENTRIES_BITS) /* 512 ptes */ + +/* + * Definitions for 4-th (root) level: + * + * PGDIR_SHIFT determines what a root-level page table entry + * can map: + * pages of 3-th level page table entries + * + * Cannot use the top 0xffff ff00 0000 0000 - 0xffff ffff ffff ffff addresses + * because virtual page table lives there. + */ +#define PGDIR_SHIFT (PAGE_SHIFT + 3 * PT_ENTRIES_BITS) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PAGE_PGD_SIZE PGDIR_SIZE +#define PTRS_PER_PGD (1UL << PT_ENTRIES_BITS) +#define PGD_TABLE_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0 + +/* + * Definitions for 3-th level: + * + * PUD_SHIFT determines the size of the area a 3-th level page tables + * can map: + * pages of second-level page table entries + */ +#define PUD_SHIFT (PAGE_SHIFT + 2 * PT_ENTRIES_BITS) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) +#define PAGE_PUD_SIZE PUD_SIZE +#define PTRS_PER_PUD (1UL << PT_ENTRIES_BITS) +#define PUD_TABLE_SIZE (PTRS_PER_PUD * sizeof(pud_t)) + +/* + * Definitions for 2-nd level: + * + * PMD_SHIFT determines the size of the area a middle level page tables + * can map: + * pages of first level page table entries + */ +#define PMD_SHIFT (PAGE_SHIFT + 1 * PT_ENTRIES_BITS) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PAGE_PMD_SIZE PMD_SIZE +#define PTRS_PER_PMD (1UL << PT_ENTRIES_BITS) +#define PMD_TABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) + +/* + * Definitions for first (page table entries) level: + * + * PTE - Entries per user pages. + */ +#define PTE_SHIFT (PAGE_SHIFT) /* PAGE_SHIFT */ +#define PTE_SIZE (1UL << PTE_SHIFT) /* PAGE_SIZE */ +#define PTE_MASK (~(PTE_SIZE-1)) /* PAGE_MASK */ +#define PTRS_PER_PTE (1UL << PT_ENTRIES_BITS) +#define PTE_TABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) + +/* + * The index in the 4-th (root-level) page table directory. + */ +#define pgd_index(virt_addr) (((virt_addr) >> PGDIR_SHIFT) & \ + (PTRS_PER_PGD - 1)) + +/* Additional trap cellar fields are located at this offset */ +#define TC_EXT_OFFSET 512 + +#ifndef __ASSEMBLY__ + +/* + * Hardware MMUs page tables have some differences from one ISET to other + * moreover each MMU supports a few different page tables: + * native (primary) + * secondary page tables for sevral modes (VA32, VA48, PA32, PA48 ...) + * The follow structures and tables present these differences for each instance + * to manage page tables as common item + */ + +/* max possible number of page table levels for all ISETs, types, modes */ +/* to can describe any supported by MMUs type of page tables */ +#define ARCH_MAX_PT_LEVELS 4 /* for 48 bits virtual address */ + /* and 48 bits physical address */ + +#define E2K_PT_LEVELS_NUM 4 /* native mode page tables have */ + /* equal number of levels up to now */ +#define E2K_PAGES_LEVEL_NUM 0 /* level number of physical pages */ +#define E2K_PTE_LEVEL_NUM 1 /* level number of native pte */ +#define E2K_PMD_LEVEL_NUM 2 /* level number of native pmd */ +#define E2K_PUD_LEVEL_NUM 3 /* level number of native pud */ +#define E2K_PGD_LEVEL_NUM 4 /* level number of native pgd */ + +/* max level # on which can map huge pages: pmd, pud */ +#define MAX_HUGE_PAGES_LEVEL E2K_PUD_LEVEL_NUM + +#define MAX_NUM_HUGE_PTES 2 /* max number of page table entries */ + /* to present one huge page on any */ + /* PT level */ + /* Now e2c+ MMU need 2 pmd entries */ + /* to present 4 Mb page */ + +typedef enum dtlb_type { + UNUSED_DTLB_TYPE, /* DTLB unused for page table entries */ + COMMON_DTLB_TYPE, /* common DTLB N-lines x M-sets */ + FULL_ASSOCIATIVE_DTLB_TYPE, /* full associative buffer */ + /* used for 1 Gb pages */ +} dtlb_type_t; + +typedef enum pt_type { + UNDEFINED_PT_TYPE, /* undefined (not known) PT type */ + E2K_PT_TYPE, /* arch native 4-th level PT */ + X86_2_LEVELS_PT_TYPE, /* secondary PT VA32 - PA32 */ + X86_3_LEVELS_PT_TYPE, /* secondary PT VA32 - PA48 */ + X86_4_LEVELS_PT_TYPE, /* secondary PT VA48 - PA48 */ +} pt_type_t; + +typedef struct pt_level { + int id; /* level ID (now is level #) */ + e2k_size_t pt_size; /* page size, which the level one */ + /* entry can map */ + e2k_size_t page_size; /* page or huge page size, which */ + /* the level real map, this size can */ + /* differ from pt_size above, because */ + /* of a few entries should present */ + /* one huge page */ + int pt_shift; /* youngest bit of page address, */ + /* which the level one entry can map */ + int page_shift; /* youngest bit of huge page address, */ + /* which the level is mapped */ + e2k_addr_t pt_mask; /* mask of page address, which */ + /* the level one entry can map */ + e2k_addr_t pt_offset; /* mask of offset into the level */ + /* page address */ + e2k_addr_t pt_index_mask; /* mask of index bits into the level */ + /* page address */ + e2k_addr_t page_mask; /* mask of huge page address, which */ + /* the level is mapped */ + e2k_addr_t page_offset; /* mask of offset into the level */ + /* huge page address */ + int ptrs_per_pt; /* number of entries in one table of */ + /* the level */ + bool is_pte; /* page table entries level */ + bool is_huge; /* the level can be as huge page */ + /* table entries */ + unsigned char huge_ptes; /* number of the level page table */ + /* entries to present one huge page */ + dtlb_type_t dtlb_type; /* DTLB cache type for page tables */ + /* entries on the level */ + + /* interface function to handle some things on the level */ + pte_t * (*boot_get_huge_pte)(e2k_addr_t virt_addr, pgprot_t *ptp); + pte_t * (*init_get_huge_pte)(e2k_addr_t virt_addr, pgprot_t *ptp); + pte_t * (*get_huge_pte)(e2k_addr_t virt_addr, pgprot_t *ptp); + void (*boot_set_pte)(e2k_addr_t addr, pte_t *ptep, pte_t pte, + bool host_map); + void (*init_pte_clear)(pte_t *ptep); + void (*split_pt_page)(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]); + void (*map_pt_huge_page_to_prev_level)(pgprot_t *pt_page, + e2k_addr_t phys_page, pgprot_t pgprot); + +} pt_level_t; + +typedef struct pt_struct { + pt_type_t type; /* PT type */ + const char *name; /* PT type name */ + bool pt_v6; /* PT entry structure V6 or above */ + int levels_num; /* real number of page table levels */ + + /* some useful PT entries structure bit mask */ + /* can be different for some PT types (depend on fields type, pt_v6) */ + pgprotval_t pfn_mask; /* pfn # */ + pgprotval_t accessed_mask; /* page accessed flags */ + pgprotval_t dirty_mask; /* page was updated */ + pgprotval_t present_mask; /* page is present */ + pgprotval_t valid_mask; /* page is present */ + pgprotval_t user_mask; /* page of user */ + pgprotval_t priv_mask; /* page is privileged */ + pgprotval_t non_exec_mask; /* page is not executable */ + pgprotval_t exec_mask; /* page is executable */ + + /* mask of bits available for software */ + pgprotval_t sw_bit1_mask; /* # 1 */ + pgprotval_t sw_bit2_mask; /* # 2 */ + pgprotval_t sw_mmio_mask; /* shadow pte is for MMIO */ + + /* some useful PT entries page protection base values */ + pgprotval_t ptd_kernel_prot; /* kernel PT directories protection */ + pgprotval_t ptd_user_prot; /* user PT directories protection */ + + /* interface function to get/set some protections */ + unsigned int (*get_pte_val_memory_type)(pgprot_t pte_val); + pgprot_t (*set_pte_val_memory_type)(pgprot_t pte_val, + unsigned int memory_type); + unsigned int (*get_pte_val_memory_type_rule)(pgprot_t pte_val); + pgprot_t (*set_pte_val_memory_type_rule)(pgprot_t pte_val, + unsigned int mtcr); + + /* level #0 is always physical page */ + pt_level_t levels[ARCH_MAX_PT_LEVELS + 1]; +} pt_struct_t; + +static inline int +get_pt_level_id(const pt_level_t *pt_level) +{ + /* now PT level is number of the level */ + return pt_level->id; +} + +static inline bool +is_page_pt_level(const pt_level_t *pt_level) +{ + return pt_level->is_pte; +} + +static inline bool +is_huge_pt_level(const pt_level_t *pt_level) +{ + return pt_level->is_huge; +} + +static inline e2k_size_t +get_pt_level_size(const pt_level_t *pt_level) +{ + return pt_level->pt_size; +} + +static inline e2k_size_t +get_pt_level_page_size(const pt_level_t *pt_level) +{ + return pt_level->page_size; +} + +static inline int +get_pt_level_shift(const pt_level_t *pt_level) +{ + return pt_level->pt_shift; +} + +static inline int +get_pt_level_page_shift(const pt_level_t *pt_level) +{ + return pt_level->page_shift; +} + +static inline e2k_addr_t +get_pt_level_mask(const pt_level_t *pt_level) +{ + return pt_level->pt_mask; +} +static inline e2k_addr_t +get_pt_level_offset(const pt_level_t *pt_level) +{ + return pt_level->pt_offset; +} +static inline e2k_addr_t +get_pt_level_addr_index(e2k_addr_t addr, const pt_level_t *pt_level) +{ + return (addr & pt_level->pt_index_mask) >> + get_pt_level_shift(pt_level); +} +static inline e2k_addr_t +set_pt_level_addr_index(e2k_addr_t addr, e2k_addr_t index, const pt_level_t *pt_level) +{ + return (addr & ~pt_level->pt_index_mask) | + ((index << get_pt_level_shift(pt_level)) & + pt_level->pt_index_mask); +} + +static inline e2k_addr_t +get_pt_level_page_mask(const pt_level_t *pt_level) +{ + return pt_level->page_mask; +} + +static inline e2k_addr_t +get_pt_level_page_offset(const pt_level_t *pt_level) +{ + return pt_level->page_offset; +} + +static inline int +get_ptrs_per_pt_level(const pt_level_t *pt_level) +{ + return pt_level->ptrs_per_pt; +} + +static inline int +get_pt_level_huge_ptes_num(const pt_level_t *pt_level) +{ + return pt_level->huge_ptes; +} + +static inline const pt_level_t * +get_pt_struct_level_on_id(const pt_struct_t *pt_struct, int level_id) +{ + /* now PT level is number of the level */ + return &pt_struct->levels[level_id]; +} + +static inline bool +is_page_pt_struct_level(const pt_struct_t *pt_struct, int level_id) +{ + return is_page_pt_level(&pt_struct->levels[level_id]); +} + +static inline bool +is_huge_pt_struct_level(const pt_struct_t *pt_struct, int level_id) +{ + return is_huge_pt_level(&pt_struct->levels[level_id]); +} + +static inline e2k_size_t +get_pt_struct_level_size(const pt_struct_t *pt_struct, int level_id) +{ + return get_pt_level_size(&pt_struct->levels[level_id]); +} + +static inline e2k_size_t +get_pt_struct_level_page_size(const pt_struct_t *pt_struct, int level_id) +{ + return get_pt_level_page_size(&pt_struct->levels[level_id]); +} + +static inline int +get_pt_struct_level_huge_ptes_num(const pt_struct_t *pt_struct, int level_id) +{ + return get_pt_level_huge_ptes_num(&pt_struct->levels[level_id]); +} + +/* This is definition of MMU TRAP_CELLAR types */ + +struct mmu_tc_dst { + unsigned address :8; /* [0-7] */ + unsigned vr :1; /* [8] */ + unsigned vl :1; /* [9] */ +}; + +typedef union { + unsigned word; + struct mmu_tc_dst fields; +} tc_dst_t; + +/* Maximum size for memory access from single channel is 8 + * (16 since e8c2) */ +#define E2K_MAX_FORMAT 16 + +struct mmu_tc_opcode { + unsigned fmt :3; /* [0-2] */ + unsigned npsp :1; /* [3] */ + unsigned fmtc :2; /* [4-5] */ +}; + +#endif /* ! __ASSEMBLY__ */ + +#define LDST_BYTE_FMT 1UL /* load/store byte (8 bits) */ +#define LDST_HALF_FMT 2UL /* load/store halfword (16 bits) */ +#define LDST_WORD_FMT 3UL /* load/store word (32 bits) */ +#define LDST_DWORD_FMT 4UL /* load/store double-word (64 bits) */ +#define LDST_QWORD_FMT 5UL /* load/store quad-word (128 bits) */ +#define LDST_QP_FMT 7UL /* load/store qpacked word (128 bits) */ + +#define LDRD_FMT_QWORD_A 0xdUL +#define LDRD_FMT_QPWORD_A 0xfUL + +#define TC_FMT_QPWORD_Q 0xdUL /* Single 16 position, tags as for Q */ +#define TC_FMT_QWORD_QP 0xfUL /* Two 16 positions, tags as for QP */ +#define TC_FMT_DWORD_Q 0x15UL /* Single 8 position, tags as for Q */ +#define TC_FMT_DWORD_QP 0x1fUL /* Single 8 position, tag as for QP */ + + +#ifndef __ASSEMBLY__ +static inline bool tc_fmt_has_valid_mask(int fmt) +{ + return fmt == LDST_QP_FMT || fmt == TC_FMT_QWORD_QP || fmt == TC_FMT_DWORD_QP; +} + + +typedef union { + unsigned word; + struct mmu_tc_opcode fields; +} tc_opcode_t; + +#define TC_OPCODE_FMT_FULL(opcode) (AS(opcode).fmt || (AS(opcode).fmtc << 3)) + +struct mmu_tc_fault_type { + unsigned global_sp :1; /* [35] */ + unsigned page_bound :1; /* [36] */ + unsigned exc_mem_lock :1; /* [37] */ + unsigned ph_pr_page :1; /* [38] */ + unsigned io_page :1; /* [39] */ + unsigned isys_page :1; /* [40] */ + unsigned prot_page :1; /* [41] */ + unsigned priv_page :1; /* [42] */ + unsigned illegal_page :1; /* [43] */ + unsigned nwrite_page :1; /* [44] */ + unsigned page_miss :1; /* [45] */ + unsigned ph_bound :1; /* [46] */ + unsigned intl_res_bits :1; /* [47] */ +}; + +typedef union { + unsigned word; + struct mmu_tc_fault_type fields; +} tc_fault_type_t; + +typedef union mmu_tc_cond_dword { + struct { + u64 dst :10; // [0-9] + u64 opcode :6; // [10-15] + u64 r0 :1; // [16] + u64 store :1; // [17] + u64 mode_80 :1; // [18] + u64 s_f :1; // [19] + u64 mas :7; // [20-26] + u64 root :1; // [27] + u64 scal :1; // [28] + u64 sru :1; // [29] + u64 spec :1; // [30] + u64 pm :1; // [31] + u64 chan :2; // [32-33] + u64 r1 :1; // [34] + u64 fault_type :13; // [35-47] + u64 miss_lvl :2; // [48-49] + u64 num_align :1; // [50] + u64 empt :1; // [51] + u64 clw :1; // [52] + u64 dst_rcv :10; // [53-62] + u64 rcv :1; // [63] + }; + struct { + u64 address :8; // [0-7] + u64 vr :1; // [8] + u64 vl :1; // [9] + u64 fmt :3; // [10-12] + /* Be careful: npsp=1 => access is not protected, + * but npsp=0 does not mean that access is protected. */ + u64 npsp :1; // [13] + u64 fmtc :2; // [14-15] + u64 ___x1 :19; // [34-16] + u64 global_sp :1; /* [35] */ + u64 page_bound :1; /* [36] */ + u64 exc_mem_lock :1; /* [37] */ + u64 ph_pr_page :1; /* [38] */ + u64 io_page :1; /* [39] */ + u64 isys_page :1; /* [40] */ + u64 prot_page :1; /* [41] */ + u64 priv_page :1; /* [42] */ + u64 illegal_page :1; /* [43] */ + u64 nwrite_page :1; /* [44] */ + u64 page_miss :1; /* [45] */ + u64 ph_bound :1; /* [46] */ + u64 intl_res_bits :1; /* [47] */ + u64 ___x0 :5; /* [52:48] */ + u64 dst_ind :8; /* [60:53] */ + u64 ___x2 :3; /* [63-61] */ + }; +} mmu_tc_cond_dword_t; + +typedef union { + struct { + u64 dst :10; // [0-9] + u64 opcode :6; // [10-15] + u64 r0 :1; // [16] + u64 store :1; // [17] + u64 mode_80 :1; // [18] + u64 s_f :1; // [19] + u64 mas :7; // [20-26] + u64 root :1; // [27] + u64 scal :1; // [28] + u64 sru :1; // [29] + u64 spec :1; // [30] + u64 pm :1; // [31] + u64 chan :2; // [32-33] + u64 r1 :1; // [34] + u64 fault_type :13; // [35-47] + u64 miss_lvl :2; // [48-49] + u64 num_align :1; // [50] + u64 empt :1; // [51] + u64 clw :1; // [52] + u64 dst_rcv :10; // [53-62] + u64 rcv :1; // [63] + }; + struct { + u64 address :8; // [0-7] + u64 vr :1; // [8] + u64 vl :1; // [9] + u64 fmt :3; // [10-12] + /* Be careful: npsp=1 => access is not protected, + * but npsp=0 does not mean that access is protected. */ + u64 npsp :1; // [13] + u64 fmtc :2; // [14-15] + u64 ___x1 :19; // [34-16] + u64 global_sp :1; /* [35] */ + u64 page_bound :1; /* [36] */ + u64 exc_mem_lock :1; /* [37] */ + u64 ph_pr_page :1; /* [38] */ + u64 io_page :1; /* [39] */ + u64 isys_page :1; /* [40] */ + u64 prot_page :1; /* [41] */ + u64 priv_page :1; /* [42] */ + u64 illegal_page :1; /* [43] */ + u64 nwrite_page :1; /* [44] */ + u64 page_miss :1; /* [45] */ + u64 ph_bound :1; /* [46] */ + u64 intl_res_bits :1; /* [47] */ + u64 ___x0 :5; /* [52:48] */ + u64 dst_ind :8; /* [60:53] */ + u64 ___x2 :3; /* [63-61] */ + }; + u64 word; + union mmu_tc_cond_dword fields; +} tc_cond_t; + +#define TC_COND_FMT_FULL(cond) (cond.fmt | (cond.fmtc << 3)) + +static inline bool tc_cond_is_special_mmu_aau(tc_cond_t cond) +{ + unsigned int mas = cond.mas; + int chan = cond.chan; + int store = cond.store; + int spec_mode = cond.spec; + + if (unlikely(is_mas_special_mmu_aau(mas) && (store || + !store && !spec_mode && (chan == 1 || chan == 3)))) + return true; + + return false; +} + +static inline bool tc_cond_is_check_ld(tc_cond_t cond) +{ + unsigned int mas = cond.mas; + int store = cond.store; + int spec_mode = cond.spec; + + return is_mas_check(mas) && !spec_mode && !store; +} + +static inline bool tc_cond_is_check_unlock_ld(tc_cond_t cond) +{ + unsigned int mas = cond.mas; + int store = cond.store; + int spec_mode = cond.spec; + + return is_mas_check_unlock(mas) && !spec_mode && !store; +} + +static inline bool tc_cond_is_lock_check_ld(tc_cond_t cond) +{ + unsigned int mas = cond.mas; + int store = cond.store; + int spec_mode = cond.spec; + + return is_mas_lock_check(mas) && spec_mode && !store; +} + +static inline bool tc_cond_is_spec_lock_check_ld(tc_cond_t cond) +{ + unsigned int mas = cond.mas; + int store = cond.store; + int spec_mode = cond.spec; + + return is_mas_spec_lock_check(mas) && spec_mode && !store; +} + +/* + * Caveat: for qword accesses this will return 16 bytes for + * the first entry in trap cellar and 8 bytes for the second one. + */ +static inline int tc_cond_to_size(tc_cond_t cond) +{ + const int fmt = TC_COND_FMT_FULL(cond); + int size; + + if (fmt == LDST_QP_FMT || fmt == TC_FMT_QPWORD_Q) { + size = 16; + } else if (fmt == LDST_QWORD_FMT || fmt == TC_FMT_QWORD_QP) { + if (cond.chan == 0 || cond.chan == 2) + size = 16; + else + size = 8; + } else if (fmt == TC_FMT_DWORD_Q || fmt == TC_FMT_DWORD_QP) { + size = 8; + } else { + size = 1 << ((fmt & 0x7) - 1); + } + + return size; +} + +typedef union { + u64 word; + struct { + u64 mask_lo : 8; /* [7-0] */ + u64 mask_hi : 8; /* [15-8] */ + u64 __reserved1 : 48; /* [63-16] */ + }; + struct { + u64 mask : 16; /* [15-0] */ + u64 __reserved2 : 48; /* [63-16] */ + }; +} tc_mask_t; + +static inline int +ldst_chan_opc_to_chan_num(int chan_opc) +{ + switch (chan_opc) { + case 0: return 0; + case 1: return 2; + case 2: return 3; + case 3: return 5; + default: return -1; + } +} +static inline int +ldst_chan_num_to_chan_opc(int chan_opc) +{ + switch (chan_opc) { + case 0: return 0; + case 2: return 1; + case 3: return 2; + case 5: return 3; + default: return -1; + } +} + +static inline bool +tc_cond_load_has_store_semantics(tc_cond_t condition, unsigned iset_ver) +{ + const unsigned mas = condition.mas; + const unsigned mod = (mas & MAS_MOD_MASK) >> MAS_MOD_SHIFT; + const unsigned chan = condition.chan; + const bool root = condition.root; + const bool spec = condition.spec; + + if (chan != 0) + return false; + if (spec) + return false; + return mod == _MAS_MODE_LOAD_OP_WAIT + || iset_ver < E2K_ISET_V3 && + mod == _MAS_MODE_LOAD_OP_TRAP_ON_LD + || root && iset_ver >= E2K_ISET_V3 && + ((mas & MAS_TRAP_ON_LD_ST_MASK) == + MAS_LOAD_SEC_TRAP_ON_LD_ST + || mas == MAS_SEC_SLT) + || iset_ver >= E2K_ISET_V5 && + mod == _MAS_MODE_LOAD_OP_WAIT_1 && + tc_cond_to_size(condition) == 16; +} + +static inline bool +tc_cond_is_store(tc_cond_t condition, unsigned iset_ver) +{ + const unsigned mas = condition.mas; + + if (condition.store && (mas != MAS_DCACHE_LINE_FLUSH)) + return true; + return tc_cond_load_has_store_semantics(condition, iset_ver); +} + +/* Trap cellar flags */ + +#define TC_DONE_FLAG 0x01 +#define TC_NESTED_EXC_FLAG 0x02 +#define TC_IS_HVA_FLAG 0x10 /* address at trap cellar is already */ + /* converted GVA->HVA */ + +/* + * Trap cellar as it is in hardware plus additional fields + */ +typedef struct { + unsigned long address; + unsigned long data; + tc_cond_t condition; + unsigned long data_ext; + tc_mask_t mask; + unsigned char flags; +} trap_cellar_t; + +/* bug 96719 - combination s_f = 0, store=1, sru =1 conside + * as s_f = 1, store=1, sru =1 + * */ +#define IS_SPILL(tc) (cpu_has(CPU_HWBUG_TRAP_CELLAR_S_F) && \ + AS((tc).condition).store && AS((tc).condition).sru) + +/* + * Trap cellar as it is in hardware + */ +typedef struct { + unsigned long address; + unsigned long data; + tc_cond_t condition; +} kernel_trap_cellar_t; + +typedef struct { + unsigned long __reserved; + unsigned long data; + tc_mask_t mask; +} kernel_trap_cellar_ext_t; + +/** + * is_record_asynchronous - return true if the record is asynchronous + * @cond: cond mask of record + * + * Asynchronous records are the ones that did not originate from wide + * instruction in user code, i.e. hardware-generated records. + * + * In current processor models (and probably in all future ones) only + * CLW records can mix with synchronous ones. + */ +static inline bool is_record_asynchronous(tc_cond_t cond) +{ + /* We use bitwise OR for performance */ + return cond.mode_80 | cond.s_f | cond.sru | cond.clw; +} + +/** + * tc_record_asynchronous - return true if the record + * in tcellar is asynchronous + * @tcellar: record in question + */ +static inline int tc_record_asynchronous(trap_cellar_t *tcellar) +{ + tc_cond_t cond = tcellar->condition; + + return is_record_asynchronous(cond); +} + +#endif /* ! __ASSEMBLY__ */ + +/* + * Second operand of Load and Store recovery instruction (LDRD & STRD): + * + * operation code and MAS flags + */ + +#define LDST_REC_OPC_INDEX_SHIFT 0 +#define LDST_REC_OPC_INDEX_SIZE 32 /* [31- 0] byte index */ +#define LDST_REC_OPC_MAS_SHIFT 32 +#define LDST_REC_OPC_MAS_SIZE 7 /* [38-32] MAS */ +#define LDST_REC_OPC_PROT_SHIFT 39 +#define LDST_REC_OPC_PROT_SIZE 1 /* [39] protected access */ +#define LDST_REC_OPC_FMT_SHIFT 40 +#define LDST_REC_OPC_FMT_SIZE 3 /* [42-40] format of access */ +#define LDST_REC_OPC_ROOT_SHIFT 43 +#define LDST_REC_OPC_ROOT_SIZE 1 /* [43] virtual space */ + /* type flag */ +#define LDST_REC_OPC_RG_SHIFT 44 +#define LDST_REC_OPC_RG_SIZE 8 /* [51-44] physical address */ + /* of an NR (in terms */ + /* of single-NR) used */ + /* for handling */ + /* memory locks */ + /* conflicts */ +#define LDST_REC_OPC_FMT_H_SHIFT 52 +#define LDST_REC_OPC_FMT_H_SIZE 1 /* [52] format of access */ +#define LDST_REC_OPC_MODE_H_SHIFT 53 +#define LDST_REC_OPC_MODE_H_SIZE 1 /* [53] mode (hi) of access */ +#define LDST_REC_OPC_UNUSED_SHIFT 54 +#define LDST_REC_OPC_UNUZED_SIZE 2 /* [55-54] unused bits */ + +#define LDST_REC_OPC_MASK_SHIFT 56 +#define LDST_REC_OPC_MASK_SIZE 8 + +#define LDST_REC_OPC_PROT (1UL << LDST_REC_OPC_PROT_SHIFT) +#define LDST_REC_OPC_ROOT (1UL << LDST_REC_OPC_ROOT_SHIFT) +#define LDST_REC_OPC_MODE_H (1UL << LDST_REC_OPC_MODE_H_SHIFT) +#define LDST_REC_OPC_MODE_MASK \ + (LDST_REC_OPC_PROT | LDST_REC_OPC_ROOT | LDST_REC_OPC_MODE_H) + +#define LDST_REC_OPC_GET_MODE(ldst_rec_opc) \ + ((((ldst_rec_opc) & LDST_REC_OPC_PROT) >> \ + (LDST_REC_OPC_PROT_SHIFT - 0)) | \ + (((ldst_rec_opc) & LDST_REC_OPC_ROOT) >> \ + (LDST_REC_OPC_ROOT_SHIFT - 1)) | \ + (((ldst_rec_opc) & LDST_REC_OPC_MODE_H) >> \ + (LDST_REC_OPC_MODE_H_SHIFT - 2))) +#define LDST_REC_OPC_SET_MODE(ldst_rec_opc, mode) \ + (((ldst_rec_opc) & ~LDST_REC_OPC_MODE_MASK) | \ + (((mode) & 0x01) << (LDST_REC_OPC_PROT_SHIFT - 0)) | \ + (((mode) & 0x02) << (LDST_REC_OPC_ROOT_SHIFT - 1)) | \ + (((mode) & 0x04) << (LDST_REC_OPC_MODE_H_SHIFT - 2))) + +#ifndef __ASSEMBLY__ +typedef union { + struct { + u64 index : 32; // [31- 0] + u64 mas : 7; // [38-32] + u64 prot : 1; // [39] + u64 fmt : 3; // [42-40] + u64 root : 1; // [43] + /* Used only on ES2 (E2K_ISET_V2), deprecated everywhere else. */ + u64 rg_deprecated : 8; // [51-44] + u64 fmt_h : 1; // [52] + u64 mode_h : 1; // [53] + u64 unused : 2; // [55-54] + u64 mask : 8; // [63-56] + }; + unsigned long word; +} ldst_rec_op_t; +#define LD_ST_REC_OPC_index(ld_st_rec) (ld_st_rec.index) +#define LD_ST_REC_OPC_mas(ld_st_rec) (ld_st_rec.mas) +#define LD_ST_REC_OPC_prot(ld_st_rec) (ld_st_rec.prot) +#define LD_ST_REC_OPC_fmt(ld_st_rec) (ld_st_rec.fmt) +#define LD_ST_REC_OPC_root(ld_st_rec) (ld_st_rec.root) +#define LD_ST_REC_OPC_fmt_h(ld_st_rec) (ld_st_rec.fmt_h) +#define LD_ST_REC_OPC_mode_h(ld_st_rec) (ld_st_rec.mode_h) +#define LD_ST_REC_OPC_mask(ld_st_rec) (ld_st_rec.mask) +#define LD_ST_REC_OPC_reg(ld_st_rec) (ld_st_rec.word) + +#endif /* ! __ASSEMBLY__ */ + +#define LDST_REC_OPC_BYPASS_L1 (MAS_BYPASS_L1_CACHE << \ + LDST_REC_OPC_MAS_SHIFT) +#define LDST_REC_OPC_BYPASS_CACHE (MAS_BYPASS_ALL_CACHES << \ + LDST_REC_OPC_MAS_SHIFT) + +#define TAGGED_MEM_LOAD_REC_OPC (0UL | \ + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_FILL_OPERATION << LDST_REC_OPC_MAS_SHIFT) +#define TAGGED_MEM_LOAD_REC_OPC_W (0UL | \ + LDST_WORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_FILL_OPERATION << LDST_REC_OPC_MAS_SHIFT) +#define TAGGED_MEM_STORE_REC_OPC (LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT) +#define TAGGED_MEM_STORE_REC_OPC_W (LDST_WORD_FMT << LDST_REC_OPC_FMT_SHIFT) +#define MEM_STORE_REC_OPC_B (LDST_BYTE_FMT << LDST_REC_OPC_FMT_SHIFT) + + +#endif /* _E2K_MMU_TYPES_H_ */ diff --git a/arch/e2k/include/asm/mmzone.h b/arch/e2k/include/asm/mmzone.h new file mode 100644 index 000000000000..6758e221c9aa --- /dev/null +++ b/arch/e2k/include/asm/mmzone.h @@ -0,0 +1,19 @@ +#ifndef _E2K_MMZONE_H_ +#define _E2K_MMZONE_H_ + +#include +#include + +#include +#include + +#define __NODE_DATA(ndata, nid) ((ndata)[(nid)]) + +#ifdef CONFIG_NEED_MULTIPLE_NODES +extern pg_data_t *node_data[]; +#define NODE_DATA(nid) __NODE_DATA(node_data, nid) +#endif + +#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) + +#endif /* _E2K_MMZONE_H_ */ diff --git a/arch/e2k/include/asm/module.h b/arch/e2k/include/asm/module.h new file mode 100644 index 000000000000..7cac067b62a0 --- /dev/null +++ b/arch/e2k/include/asm/module.h @@ -0,0 +1,13 @@ +#ifndef _E2K_MODULE_H_ +#define _E2K_MODULE_H_ +/* + * This file contains the E2K architecture specific module code. + */ + +struct mod_arch_specific { }; + +#define Elf_Shdr Elf64_Shdr +#define Elf_Sym Elf64_Sym +#define Elf_Ehdr Elf64_Ehdr + +#endif /* _E2K_MODULE_H_ */ diff --git a/arch/e2k/include/asm/monitors.h b/arch/e2k/include/asm/monitors.h new file mode 100644 index 000000000000..2193755b7841 --- /dev/null +++ b/arch/e2k/include/asm/monitors.h @@ -0,0 +1,49 @@ +/* + * arch/e2k/kernel/monitors.h + * + * This file contains declarations of interface functions for working with + * monitors. + * + * Copyright (C) 2009-2013 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#ifndef _MONITORS_H_ +#define _MONITORS_H_ + +#ifdef CONFIG_MONITORS + +#define MONITORS_COUNT 4 + +#define SICMONITORS_COUNT_PER_NODE 2 +#define SICMONITORS_COUNT \ + (MAX_NUMNODES * SICMONITORS_COUNT_PER_NODE) + +#define IPCCMONITORS_COUNT 1 +#define IOCCMONITORS_COUNT 1 + +typedef struct { + unsigned char cpu_num; + unsigned long dim0; + unsigned long dim1; + unsigned long ddm0; + unsigned long ddm1; +} monitor_registers_delta_t; + +extern const struct file_operations proc_pid_monitors_events_operations; +extern int monitors_used; + +void process_monitors(struct task_struct *task); +void init_monitors(struct task_struct *task); +void store_monitors_delta(struct task_struct *task); +void add_dead_proc_events(struct task_struct *task); +unsigned char get_monitors_mask(char *title); + +#define MONITORING_IS_ACTIVE unlikely(monitors_used) + +#else /* !CONFIG_MONITORS*/ +#define MONITORING_IS_ACTIVE false +static inline void init_monitors(struct task_struct *task) { }; +#endif /* CONFIG_MONITORS */ + +#endif /* _MONITORS_H_ */ + diff --git a/arch/e2k/include/asm/mpspec.h b/arch/e2k/include/asm/mpspec.h new file mode 100644 index 000000000000..74cd0567d561 --- /dev/null +++ b/arch/e2k/include/asm/mpspec.h @@ -0,0 +1,35 @@ +#ifndef __ASM_MPSPEC_H +#define __ASM_MPSPEC_H + +#ifdef __KERNEL__ + +#include +#include + +#include /* For __LITTLE_ENDIAN definition */ + +/* all addresses in MP table is physical so do not change them */ +#define mpc_addr_to_virt(addr) phys_to_virt(boot_vpa_to_pa(addr)) +#define mpc_addr_to_phys(addr) (addr) +#define mpc_addr(addr) \ + ((READ_OSCUD_LO_REG().OSCUD_lo_base >= PAGE_OFFSET) ? \ + (u64)mpc_addr_to_virt(addr) : mpc_addr_to_phys(addr)) + +static inline int +boot_mpf_do_checksum(unsigned char *mp, int len) +{ + int sum = 0; + + while (len--) + sum += *mp++; + + return 0x100 - (sum & 0xFF); +} + +#endif /* __KERNEL__ */ + +#if defined(__KERNEL__) || defined(__KVM_MPSPEC_SUPPORT__) +#include +#endif /* __KERNEL__ || __KVM_MPSPEC_SUPPORT__ */ + +#endif /* __ASM_MPSPEC_H */ diff --git a/arch/e2k/include/asm/msgbuf.h b/arch/e2k/include/asm/msgbuf.h new file mode 100644 index 000000000000..ffbd1efc0989 --- /dev/null +++ b/arch/e2k/include/asm/msgbuf.h @@ -0,0 +1,27 @@ +#ifndef _E2K_MSGBUF_H_ +#define _E2K_MSGBUF_H_ + +/* + * The msqid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_MSGBUF_H_ */ diff --git a/arch/e2k/include/asm/msidef.h b/arch/e2k/include/asm/msidef.h new file mode 100644 index 000000000000..dba1666a6a03 --- /dev/null +++ b/arch/e2k/include/asm/msidef.h @@ -0,0 +1,6 @@ +#ifndef __ASM_MSIDEF_H +#define __ASM_MSIDEF_H + +#include + +#endif diff --git a/arch/e2k/include/asm/namei.h b/arch/e2k/include/asm/namei.h new file mode 100644 index 000000000000..eaad75d608c6 --- /dev/null +++ b/arch/e2k/include/asm/namei.h @@ -0,0 +1,17 @@ +/* $Id: namei.h,v 1.1 2001/05/16 13:33:16 anonymous Exp $ + * linux/include/asm-e2k/namei.h + * + * Included from linux/fs/namei.c + */ + +#ifndef _E2K_NAMEI_H_ +#define _E2K_NAMEI_H_ + +/* This dummy routine maybe changed to something useful + * for /usr/gnemul/ emulation stuff. + * Look at asm-sparc/namei.h for details. + */ + +#define __emul_prefix() NULL + +#endif /* _E2K_NAMEI_H_ */ diff --git a/arch/e2k/include/asm/native_aau_regs_access.h b/arch/e2k/include/asm/native_aau_regs_access.h new file mode 100644 index 000000000000..eb8e06af49a0 --- /dev/null +++ b/arch/e2k/include/asm/native_aau_regs_access.h @@ -0,0 +1,1487 @@ +/* + * Native hardware AAU registers access + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _NATIVE_AAU_REGS_ACCESS_H_ +#define _NATIVE_AAU_REGS_ACCESS_H_ + +#include +#include +#include +#include +#include + +#ifdef DEBUG_AAU_REG_MODE + #define BUG_AAU() BUG_ON(true) +#else /* ! DEBUG_AAU_REG_MODE */ + #define BUG_AAU() +#endif /* DEBUG_AAU_REG_MODE */ + +/* macros to access to native hardware E2K AAU context */ + +#define NATIVE_GET_AAU_AAD(reg_mn, mem_p) \ + NATIVE_GET_AAUQREG(&AWP(mem_p), reg_mn) +#define NATIVE_GET_AAU_AADS(reg1, reg2, reg3, reg4, mem_p) \ + NATIVE_GET_AAUQREGS(&AWP(mem_p), reg1, reg2, reg3, reg4) +#define NATIVE_GET_AAU_AAIND_V2(reg_mnemonic) \ + NATIVE_GET_AAUREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAIND_V5(reg_mnemonic) \ + NATIVE_GET_AAUDREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAINDS_V2(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAINDS_V5(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAIND_TAG() \ + NATIVE_GET_AAUREG(aaind_tag, 2) +#define NATIVE_GET_AAU_AAINCR_V2(reg_mnemonic) \ + NATIVE_GET_AAUREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAINCR_V5(reg_mnemonic) \ + NATIVE_GET_AAUDREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAINCRS_V2(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAINCRS_V5(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAINCR_TAG() \ + NATIVE_GET_AAUREG(aaincr_tag, 2) +#define NATIVE_GET_AAU_AASTI_V2(reg_mnemonic) \ + NATIVE_GET_AAUREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AASTI_V5(reg_mnemonic) \ + NATIVE_GET_AAUDREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AASTIS_V2(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AASTIS_V5(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AASTI_TAG() \ + NATIVE_GET_AAUREG(aasti_tag, 2) +#define NATIVE_GET_AAU_AASR() \ + NATIVE_GET_AAUREG(aasr, 2) +#define NATIVE_GET_AAU_AAFSTR() \ + NATIVE_GET_AAUREG(aafstr, 5) +#define NATIVE_GET_AAU_AALDI_V2(reg_mn, lval, rval) \ + NATIVE_GET_AAUREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_GET_AAU_AALDI_V5(reg_mn, lval, rval) \ + NATIVE_GET_AAUDREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_GET_AAU_AALDA(reg_mn, lval, rval) \ + NATIVE_GET_AAUREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_GET_AAU_AALDV(lo, hi) \ + NATIVE_GET_AAUREGS(aaldv, aaldv, lo, hi) +#define NATIVE_GET_AAU_AALDM(lo, hi) \ + NATIVE_GET_AAUREGS(aaldm, aaldm, lo, hi) + +#define NATIVE_SET_AAU_AAD(reg_mn, mem_p) \ + NATIVE_SET_AAUQREG(reg_mn, &AWP(mem_p)) +#define NATIVE_SET_AAU_AADS(reg1, reg2, reg3, reg4, mem_p) \ + NATIVE_SET_AAUQREGS(&AWP(mem_p), reg1, reg2, reg3, reg4) +#define NATIVE_SET_AAU_AAIND(reg_mn, val) \ + NATIVE_SET_AAUDREG(reg_mn, val, 2) +#define NATIVE_SET_AAU_AAINDS(reg1, reg2, val1, val2) \ + NATIVE_SET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_SET_AAU_AAIND_TAG(val) \ + NATIVE_SET_AAUREG(aaind_tag, val, 2) +#define NATIVE_SET_AAU_AAIND_REG_AND_TAGS(reg, reg_tag, val, tags_val) \ + NATIVE_SET_AAUREGS(reg, reg_tag, val, tags_val) +#define NATIVE_SET_AAU_AAIND_AAINCR_TAGS(aaind, aaincr) \ + NATIVE_SET_AAUREGS(aaind_tag, aaincr_tag, (aaind), (aaincr)) +#define NATIVE_SET_AAU_AAINCR(reg_mn, val) \ + NATIVE_SET_AAUDREG(reg_mn, val, 5) +#define NATIVE_SET_AAU_AAINCRS(reg1, reg2, val1, val2) \ + NATIVE_SET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_SET_AAU_AAINCR_TAG(val) \ + NATIVE_SET_AAUREG(aaincr_tag, val, 5) +#define NATIVE_SET_AAU_AAINCR_REG_AND_TAGS(reg, reg_tag, val, tags_val) \ + NATIVE_SET_AAUREGS(reg, reg_tag, val, tags_val) +#define NATIVE_SET_AAU_AASTI(reg_mn, val) \ + NATIVE_SET_AAUDREG(reg_mn, val, 2) +#define NATIVE_SET_AAU_AASTIS(reg1, reg2, val1, val2) \ + NATIVE_SET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_SET_AAU_AASTI_TAG(val) \ + NATIVE_SET_AAUREG(aasti_tag, val, 2) +#define NATIVE_SET_AAU_AASR(val) \ + NATIVE_SET_AAUREG(aasr, val, 2) +#define NATIVE_SET_AAU_AAFSTR(val) \ + NATIVE_SET_AAUREG(aafstr, val, 5) +#define NATIVE_SET_AAU_AALDI(reg_mn, lval, rval) \ + NATIVE_SET_AAUDREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_SET_AAU_AALDA(reg_mn, lval, rval) \ + NATIVE_SET_AAUREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_SET_AAU_AALDV(lo, hi) \ + NATIVE_SET_AAUREGS(aaldv, aaldv, lo, hi) +#define NATIVE_SET_AAU_AALDM(lo, hi) \ + NATIVE_SET_AAUREGS(aaldm, aaldm, lo, hi) + +/* + * Native hardware AAU registers access function (can be paravirtualized) + * WARNING: please use only following functions to access to AAU context, + * do not use macroses above directly, because of macroses cannot be + * paravirtualized + */ + +static __always_inline u32 native_read_aasr_reg_value(void) +{ + return NATIVE_GET_AAU_AASR(); +} +static __always_inline void native_write_aasr_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AASR(reg_value); +} +static inline u32 native_read_aafstr_reg_value(void) +{ + return NATIVE_GET_AAU_AAFSTR(); +} +static __always_inline void native_write_aafstr_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AAFSTR(reg_value); +} + +static __always_inline e2k_aasr_t native_read_aasr_reg(void) +{ + e2k_aasr_t aasr; + + AW(aasr) = native_read_aasr_reg_value(); + return aasr; +} +static __always_inline void native_write_aasr_reg(e2k_aasr_t aasr) +{ + NATIVE_SET_AAU_AASR(AW(aasr)); +} + +static inline u32 native_read_aaind_reg_value_v2(int AAIND_no) +{ + switch (AAIND_no) { + case 0: return NATIVE_GET_AAU_AAIND_V2(aaind0); + case 1: return NATIVE_GET_AAU_AAIND_V2(aaind1); + case 2: return NATIVE_GET_AAU_AAIND_V2(aaind2); + case 3: return NATIVE_GET_AAU_AAIND_V2(aaind3); + case 4: return NATIVE_GET_AAU_AAIND_V2(aaind4); + case 5: return NATIVE_GET_AAU_AAIND_V2(aaind5); + case 6: return NATIVE_GET_AAU_AAIND_V2(aaind6); + case 7: return NATIVE_GET_AAU_AAIND_V2(aaind7); + case 8: return NATIVE_GET_AAU_AAIND_V2(aaind8); + case 9: return NATIVE_GET_AAU_AAIND_V2(aaind9); + case 10: return NATIVE_GET_AAU_AAIND_V2(aaind10); + case 11: return NATIVE_GET_AAU_AAIND_V2(aaind11); + case 12: return NATIVE_GET_AAU_AAIND_V2(aaind12); + case 13: return NATIVE_GET_AAU_AAIND_V2(aaind13); + case 14: return NATIVE_GET_AAU_AAIND_V2(aaind14); + case 15: return NATIVE_GET_AAU_AAIND_V2(aaind15); + default: + BUG_AAU(); + return 0; + } +} + +static inline u64 native_read_aaind_reg_value_v5(int AAIND_no) +{ + switch (AAIND_no) { + case 0: return NATIVE_GET_AAU_AAIND_V5(aaind0); + case 1: return NATIVE_GET_AAU_AAIND_V5(aaind1); + case 2: return NATIVE_GET_AAU_AAIND_V5(aaind2); + case 3: return NATIVE_GET_AAU_AAIND_V5(aaind3); + case 4: return NATIVE_GET_AAU_AAIND_V5(aaind4); + case 5: return NATIVE_GET_AAU_AAIND_V5(aaind5); + case 6: return NATIVE_GET_AAU_AAIND_V5(aaind6); + case 7: return NATIVE_GET_AAU_AAIND_V5(aaind7); + case 8: return NATIVE_GET_AAU_AAIND_V5(aaind8); + case 9: return NATIVE_GET_AAU_AAIND_V5(aaind9); + case 10: return NATIVE_GET_AAU_AAIND_V5(aaind10); + case 11: return NATIVE_GET_AAU_AAIND_V5(aaind11); + case 12: return NATIVE_GET_AAU_AAIND_V5(aaind12); + case 13: return NATIVE_GET_AAU_AAIND_V5(aaind13); + case 14: return NATIVE_GET_AAU_AAIND_V5(aaind14); + case 15: return NATIVE_GET_AAU_AAIND_V5(aaind15); + default: + BUG_AAU(); + return 0; + } +} +static inline void native_write_aaind_reg_value(int AAIND_no, u64 reg_value) +{ + switch (AAIND_no) { + case 0: + NATIVE_SET_AAU_AAIND(aaind0, reg_value); + break; + case 1: + NATIVE_SET_AAU_AAIND(aaind1, reg_value); + break; + case 2: + NATIVE_SET_AAU_AAIND(aaind2, reg_value); + break; + case 3: + NATIVE_SET_AAU_AAIND(aaind3, reg_value); + break; + case 4: + NATIVE_SET_AAU_AAIND(aaind4, reg_value); + break; + case 5: + NATIVE_SET_AAU_AAIND(aaind5, reg_value); + break; + case 6: + NATIVE_SET_AAU_AAIND(aaind6, reg_value); + break; + case 7: + NATIVE_SET_AAU_AAIND(aaind7, reg_value); + break; + case 8: + NATIVE_SET_AAU_AAIND(aaind8, reg_value); + break; + case 9: + NATIVE_SET_AAU_AAIND(aaind9, reg_value); + break; + case 10: + NATIVE_SET_AAU_AAIND(aaind10, reg_value); + break; + case 11: + NATIVE_SET_AAU_AAIND(aaind11, reg_value); + break; + case 12: + NATIVE_SET_AAU_AAIND(aaind12, reg_value); + break; + case 13: + NATIVE_SET_AAU_AAIND(aaind13, reg_value); + break; + case 14: + NATIVE_SET_AAU_AAIND(aaind14, reg_value); + break; + case 15: + NATIVE_SET_AAU_AAIND(aaind15, reg_value); + break; + default: + BUG_AAU(); + } +} + +#define PREFIX_READ_AAINDS_PAIR_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AAINDs_pair, value1, value2) \ +({ \ + switch (AAINDs_pair) { \ + case 0: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind0, aaind1, \ + value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind1, aaind2, \ + value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind2, aaind3, \ + value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind3, aaind4, \ + value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind4, aaind5, \ + value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind5, aaind6, \ + value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind6, aaind7, \ + value1, value2); \ + break; \ + case 7: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind7, aaind8, \ + value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind8, aaind9, \ + value1, value2); \ + break; \ + case 9: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind9, aaind10, \ + value1, value2); \ + break; \ + case 10: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind10, aaind11, \ + value1, value2); \ + break; \ + case 11: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind11, aaind12, \ + value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind12, aaind13, \ + value1, value2); \ + break; \ + case 13: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind13, aaind14, \ + value1, value2); \ + break; \ + case 14: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind14, aaind15, \ + value1, value2); \ + break; \ + case 15: \ + value1 = PV_TYPE##_GET_AAU_AAIND_##ISET(aaind15); \ + value2 = PV_TYPE##_GET_AAU_AAIND_TAG(); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AAINDS_PAIR_VALUE_V2(AAINDs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINDS_PAIR_VALUE(NATIVE, native, V2, v2, \ + AAINDs_pair, lo_value, hi_value) +#define NATIVE_READ_AAINDS_PAIR_VALUE_V5(AAINDs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINDS_PAIR_VALUE(NATIVE, native, V5, v5, \ + AAINDs_pair, lo_value, hi_value) +#define PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V2(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + PV_TYPE##_GET_AAU_AAINDS_V2(aaind15, aaind_tag, \ + reg_value, tags_value); \ +}) +#define NATIVE_READ_AAIND_REG15_AND_TAGS_VALUE_V2(reg_value, tags_value) \ + PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V2(NATIVE, native, \ + reg_value, tags_value) +#define PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V5(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + reg_value = PV_TYPE##_GET_AAU_AAIND_V5(aaind15); \ + tags_value = PV_TYPE##_GET_AAU_AAIND_TAG(); \ +}) +#define NATIVE_READ_AAIND_REG15_AND_TAGS_VALUE_V5(reg_value, tags_value) \ + PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V5(NATIVE, native, \ + reg_value, tags_value) + +static __always_inline void +native_write_aainds_pair_value(int AAINDs_pair, u64 lo_value, u64 hi_value) +{ + switch (AAINDs_pair) { + case 0: + NATIVE_SET_AAU_AAINDS(aaind0, aaind1, lo_value, hi_value); + break; + case 1: + NATIVE_SET_AAU_AAINDS(aaind1, aaind2, lo_value, hi_value); + break; + case 2: + NATIVE_SET_AAU_AAINDS(aaind2, aaind3, lo_value, hi_value); + break; + case 3: + NATIVE_SET_AAU_AAINDS(aaind3, aaind4, lo_value, hi_value); + break; + case 4: + NATIVE_SET_AAU_AAINDS(aaind4, aaind5, lo_value, hi_value); + break; + case 5: + NATIVE_SET_AAU_AAINDS(aaind5, aaind6, lo_value, hi_value); + break; + case 6: + NATIVE_SET_AAU_AAINDS(aaind6, aaind7, lo_value, hi_value); + break; + case 7: + NATIVE_SET_AAU_AAINDS(aaind7, aaind8, lo_value, hi_value); + break; + case 8: + NATIVE_SET_AAU_AAINDS(aaind8, aaind9, lo_value, hi_value); + break; + case 9: + NATIVE_SET_AAU_AAINDS(aaind9, aaind10, lo_value, hi_value); + break; + case 10: + NATIVE_SET_AAU_AAINDS(aaind10, aaind11, lo_value, hi_value); + break; + case 11: + NATIVE_SET_AAU_AAINDS(aaind11, aaind12, lo_value, hi_value); + break; + case 12: + NATIVE_SET_AAU_AAINDS(aaind12, aaind13, lo_value, hi_value); + break; + case 13: + NATIVE_SET_AAU_AAINDS(aaind13, aaind14, lo_value, hi_value); + break; + case 14: + NATIVE_SET_AAU_AAINDS(aaind14, aaind15, lo_value, hi_value); + break; + default: + BUG_AAU(); + } +} + +static inline u32 native_read_aaind_tags_reg_value(void) +{ + return NATIVE_GET_AAU_AAIND_TAG(); +} +static inline void native_write_aaind_tags_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AAIND_TAG(reg_value); +} +static inline u32 native_read_aaincr_reg_value_v2(int AAINCR_no) +{ + switch (AAINCR_no) { + case 0: return NATIVE_GET_AAU_AAINCR_V2(aaincr0); + case 1: return NATIVE_GET_AAU_AAINCR_V2(aaincr1); + case 2: return NATIVE_GET_AAU_AAINCR_V2(aaincr2); + case 3: return NATIVE_GET_AAU_AAINCR_V2(aaincr3); + case 4: return NATIVE_GET_AAU_AAINCR_V2(aaincr4); + case 5: return NATIVE_GET_AAU_AAINCR_V2(aaincr5); + case 6: return NATIVE_GET_AAU_AAINCR_V2(aaincr6); + case 7: return NATIVE_GET_AAU_AAINCR_V2(aaincr7); + default: + BUG_AAU(); + return 0; + } +} +static inline u64 native_read_aaincr_reg_value_v5(int AAINCR_no) +{ + switch (AAINCR_no) { + case 0: return NATIVE_GET_AAU_AAINCR_V5(aaincr0); + case 1: return NATIVE_GET_AAU_AAINCR_V5(aaincr1); + case 2: return NATIVE_GET_AAU_AAINCR_V5(aaincr2); + case 3: return NATIVE_GET_AAU_AAINCR_V5(aaincr3); + case 4: return NATIVE_GET_AAU_AAINCR_V5(aaincr4); + case 5: return NATIVE_GET_AAU_AAINCR_V5(aaincr5); + case 6: return NATIVE_GET_AAU_AAINCR_V5(aaincr6); + case 7: return NATIVE_GET_AAU_AAINCR_V5(aaincr7); + default: + BUG_AAU(); + return 0; + } +} +static inline void native_write_aaincr_reg_value(int AAINCR_no, u64 reg_value) +{ + switch (AAINCR_no) { + case 0: + NATIVE_SET_AAU_AAINCR(aaincr0, reg_value); + break; + case 1: + NATIVE_SET_AAU_AAINCR(aaincr1, reg_value); + break; + case 2: + NATIVE_SET_AAU_AAINCR(aaincr2, reg_value); + break; + case 3: + NATIVE_SET_AAU_AAINCR(aaincr3, reg_value); + break; + case 4: + NATIVE_SET_AAU_AAINCR(aaincr4, reg_value); + break; + case 5: + NATIVE_SET_AAU_AAINCR(aaincr5, reg_value); + break; + case 6: + NATIVE_SET_AAU_AAINCR(aaincr6, reg_value); + break; + case 7: + NATIVE_SET_AAU_AAINCR(aaincr7, reg_value); + break; + default: + BUG_AAU(); + } +} +static inline u32 native_read_aaincr_tags_reg_value(void) +{ + return NATIVE_GET_AAU_AAINCR_TAG(); +} +static inline void native_write_aaincr_tags_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AAINCR_TAG(reg_value); +} + +#define PREFIX_READ_AAINCRS_PAIR_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AAINCRs_pair, value1, value2) \ +({ \ + switch (AAINCRs_pair) { \ + case 0: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr0, aaincr1, \ + value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr1, aaincr2, \ + value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr2, aaincr3, \ + value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr3, aaincr4, \ + value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr4, aaincr5, \ + value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr5, aaincr6, \ + value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr6, aaincr7, \ + value1, value2); \ + break; \ + case 7: \ + value1 = PV_TYPE##_GET_AAU_AAINCR_##ISET(aaind15); \ + value2 = PV_TYPE##_GET_AAU_AAINCR_TAG(); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINCRS_PAIR_VALUE(NATIVE, native, V2, v2, \ + AAINCRs_pair, lo_value, hi_value) +#define NATIVE_READ_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINCRS_PAIR_VALUE(NATIVE, native, V5, v5, \ + AAINCRs_pair, lo_value, hi_value) +#define PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + PV_TYPE##_GET_AAU_AAINCRS_V2(aaincr7, aaincr_tag, \ + reg_value, tags_value); \ +}) +#define NATIVE_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(reg_value, tags_value) \ + PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(NATIVE, native, \ + reg_value, tags_value) +#define PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + reg_value = PV_TYPE##_GET_AAU_AAINCR_V5(aaincr7); \ + tags_value = PV_TYPE##_GET_AAU_AAINCR_TAG(); \ +}) +#define NATIVE_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(reg_value, tags_value) \ + PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(NATIVE, native, \ + reg_value, tags_value) + +static __always_inline void +native_write_aaincrs_pair_value(int AAINCRs_pair, u64 lo_value, u64 hi_value) +{ + switch (AAINCRs_pair) { + case 0: + NATIVE_SET_AAU_AAINCRS(aaincr0, aaincr1, lo_value, hi_value); + break; + case 1: + NATIVE_SET_AAU_AAINCRS(aaincr1, aaincr2, lo_value, hi_value); + break; + case 2: + NATIVE_SET_AAU_AAINCRS(aaincr2, aaincr3, lo_value, hi_value); + break; + case 3: + NATIVE_SET_AAU_AAINCRS(aaincr3, aaincr4, lo_value, hi_value); + break; + case 4: + NATIVE_SET_AAU_AAINCRS(aaincr4, aaincr5, lo_value, hi_value); + break; + case 5: + NATIVE_SET_AAU_AAINCRS(aaincr5, aaincr6, lo_value, hi_value); + break; + case 6: + NATIVE_SET_AAU_AAINCRS(aaincr6, aaincr7, lo_value, hi_value); + break; + default: + BUG_AAU(); + } +} + +static inline u32 native_read_aasti_reg_value_v2(int AASTI_no) +{ + switch (AASTI_no) { + case 0: return NATIVE_GET_AAU_AASTI_V2(aasti0); + case 1: return NATIVE_GET_AAU_AASTI_V2(aasti1); + case 2: return NATIVE_GET_AAU_AASTI_V2(aasti2); + case 3: return NATIVE_GET_AAU_AASTI_V2(aasti3); + case 4: return NATIVE_GET_AAU_AASTI_V2(aasti4); + case 5: return NATIVE_GET_AAU_AASTI_V2(aasti5); + case 6: return NATIVE_GET_AAU_AASTI_V2(aasti6); + case 7: return NATIVE_GET_AAU_AASTI_V2(aasti7); + case 8: return NATIVE_GET_AAU_AASTI_V2(aasti8); + case 9: return NATIVE_GET_AAU_AASTI_V2(aasti9); + case 10: return NATIVE_GET_AAU_AASTI_V2(aasti10); + case 11: return NATIVE_GET_AAU_AASTI_V2(aasti11); + case 12: return NATIVE_GET_AAU_AASTI_V2(aasti12); + case 13: return NATIVE_GET_AAU_AASTI_V2(aasti13); + case 14: return NATIVE_GET_AAU_AASTI_V2(aasti14); + case 15: return NATIVE_GET_AAU_AASTI_V2(aasti15); + default: + BUG_AAU(); + return 0; + } +} +static inline u64 native_read_aasti_reg_value_v5(int AASTI_no) +{ + switch (AASTI_no) { + case 0: return NATIVE_GET_AAU_AASTI_V5(aasti0); + case 1: return NATIVE_GET_AAU_AASTI_V5(aasti1); + case 2: return NATIVE_GET_AAU_AASTI_V5(aasti2); + case 3: return NATIVE_GET_AAU_AASTI_V5(aasti3); + case 4: return NATIVE_GET_AAU_AASTI_V5(aasti4); + case 5: return NATIVE_GET_AAU_AASTI_V5(aasti5); + case 6: return NATIVE_GET_AAU_AASTI_V5(aasti6); + case 7: return NATIVE_GET_AAU_AASTI_V5(aasti7); + case 8: return NATIVE_GET_AAU_AASTI_V5(aasti8); + case 9: return NATIVE_GET_AAU_AASTI_V5(aasti9); + case 10: return NATIVE_GET_AAU_AASTI_V5(aasti10); + case 11: return NATIVE_GET_AAU_AASTI_V5(aasti11); + case 12: return NATIVE_GET_AAU_AASTI_V5(aasti12); + case 13: return NATIVE_GET_AAU_AASTI_V5(aasti13); + case 14: return NATIVE_GET_AAU_AASTI_V5(aasti14); + case 15: return NATIVE_GET_AAU_AASTI_V5(aasti15); + default: + BUG_AAU(); + return 0; + } +} +static inline void native_write_aasti_reg_value(int AASTI_no, u64 reg_value) +{ + switch (AASTI_no) { + case 0: + NATIVE_SET_AAU_AASTI(aasti0, reg_value); + break; + case 1: + NATIVE_SET_AAU_AASTI(aasti1, reg_value); + break; + case 2: + NATIVE_SET_AAU_AASTI(aasti2, reg_value); + break; + case 3: + NATIVE_SET_AAU_AASTI(aasti3, reg_value); + break; + case 4: + NATIVE_SET_AAU_AASTI(aasti4, reg_value); + break; + case 5: + NATIVE_SET_AAU_AASTI(aasti5, reg_value); + break; + case 6: + NATIVE_SET_AAU_AASTI(aasti6, reg_value); + break; + case 7: + NATIVE_SET_AAU_AASTI(aasti7, reg_value); + break; + case 8: + NATIVE_SET_AAU_AASTI(aasti8, reg_value); + break; + case 9: + NATIVE_SET_AAU_AASTI(aasti9, reg_value); + break; + case 10: + NATIVE_SET_AAU_AASTI(aasti10, reg_value); + break; + case 11: + NATIVE_SET_AAU_AASTI(aasti11, reg_value); + break; + case 12: + NATIVE_SET_AAU_AASTI(aasti12, reg_value); + break; + case 13: + NATIVE_SET_AAU_AASTI(aasti13, reg_value); + break; + case 14: + NATIVE_SET_AAU_AASTI(aasti14, reg_value); + break; + case 15: + NATIVE_SET_AAU_AASTI(aasti15, reg_value); + break; + default: + BUG_AAU(); + } +} +static inline u32 native_read_aasti_tags_reg_value(void) +{ + return NATIVE_GET_AAU_AASTI_TAG(); +} +static inline void native_write_aasti_tags_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AASTI_TAG(reg_value); +} + +#define PREFIX_READ_AASTIS_PAIR_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AASTIs_pair, value1, value2) \ +({ \ + switch (AASTIs_pair) { \ + case 0: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti0, aasti1, \ + value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti1, aasti2, \ + value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti2, aasti3, \ + value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti3, aasti4, \ + value1, value2);\ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti4, aasti5, \ + value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti5, aasti6, \ + value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti6, aasti7, \ + value1, value2); \ + break; \ + case 7: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti7, aasti8, \ + value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti8, aasti9, \ + value1, value2); \ + break; \ + case 9: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti9, aasti10, \ + value1, value2); \ + break; \ + case 10: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti10, aasti11, \ + value1, value2); \ + break; \ + case 11: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti11, aasti12, \ + value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti12, aasti13, \ + value1, value2); \ + break; \ + case 13: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti13, aasti14, \ + value1, value2); \ + break; \ + case 14: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti14, aasti15, \ + value1, value2); \ + break; \ + case 15: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti15, aasti_tag, \ + value1, value2); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AASTIS_PAIR_VALUE_V2(AASTIs_pair, lo_value, hi_value) \ + PREFIX_READ_AASTIS_PAIR_VALUE(NATIVE, native, V2, v2, \ + AASTIs_pair, lo_value, hi_value) +#define NATIVE_READ_AASTIS_PAIR_VALUE_V5(AASTIs_pair, lo_value, hi_value) \ + PREFIX_READ_AASTIS_PAIR_VALUE(NATIVE, native, V5, v5, \ + AASTIs_pair, lo_value, hi_value) + +static __always_inline void +native_write_aastis_pair_value(int AASTIs_pair, u64 lo_value, u64 hi_value) +{ + switch (AASTIs_pair) { + case 0: + NATIVE_SET_AAU_AASTIS(aasti0, aasti1, lo_value, hi_value); + break; + case 1: + NATIVE_SET_AAU_AASTIS(aasti1, aasti2, lo_value, hi_value); + break; + case 2: + NATIVE_SET_AAU_AASTIS(aasti2, aasti3, lo_value, hi_value); + break; + case 3: + NATIVE_SET_AAU_AASTIS(aasti3, aasti4, lo_value, hi_value); + break; + case 4: + NATIVE_SET_AAU_AASTIS(aasti4, aasti5, lo_value, hi_value); + break; + case 5: + NATIVE_SET_AAU_AASTIS(aasti5, aasti6, lo_value, hi_value); + break; + case 6: + NATIVE_SET_AAU_AASTIS(aasti6, aasti7, lo_value, hi_value); + break; + case 7: + NATIVE_SET_AAU_AASTIS(aasti7, aasti8, lo_value, hi_value); + break; + case 8: + NATIVE_SET_AAU_AASTIS(aasti8, aasti9, lo_value, hi_value); + break; + case 9: + NATIVE_SET_AAU_AASTIS(aasti9, aasti10, lo_value, hi_value); + break; + case 10: + NATIVE_SET_AAU_AASTIS(aasti10, aasti11, lo_value, hi_value); + break; + case 11: + NATIVE_SET_AAU_AASTIS(aasti11, aasti12, lo_value, hi_value); + break; + case 12: + NATIVE_SET_AAU_AASTIS(aasti12, aasti13, lo_value, hi_value); + break; + case 13: + NATIVE_SET_AAU_AASTIS(aasti13, aasti14, lo_value, hi_value); + break; + case 14: + NATIVE_SET_AAU_AASTIS(aasti14, aasti15, lo_value, hi_value); + break; + case 15: + NATIVE_SET_AAU_AASTIS(aasti15, aasti_tag, lo_value, hi_value); + break; + default: + BUG_AAU(); + } +} + +#define PREFIX_READ_AALDI_REG_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AALDI_no, value1, value2) \ +({ \ + switch (AALDI_no) { \ + case 0: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi0, value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi1, value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi2, value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi3, value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi4, value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi5, value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi6, value1, value2); \ + break; \ + case 7: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi7, value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi8, value1, value2); \ + break; \ + case 9: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi9, value1, value2); \ + break; \ + case 10: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi10, value1, value2); \ + break; \ + case 11: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi11, value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi12, value1, value2); \ + break; \ + case 13: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi13, value1, value2); \ + break; \ + case 14: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi14, value1, value2); \ + break; \ + case 15: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi15, value1, value2); \ + break; \ + case 16: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi16, value1, value2); \ + break; \ + case 17: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi17, value1, value2); \ + break; \ + case 18: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi18, value1, value2); \ + break; \ + case 19: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi19, value1, value2); \ + break; \ + case 20: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi20, value1, value2); \ + break; \ + case 21: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi21, value1, value2); \ + break; \ + case 22: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi22, value1, value2); \ + break; \ + case 23: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi23, value1, value2); \ + break; \ + case 24: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi24, value1, value2); \ + break; \ + case 25: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi25, value1, value2); \ + break; \ + case 26: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi26, value1, value2); \ + break; \ + case 27: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi27, value1, value2); \ + break; \ + case 28: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi28, value1, value2); \ + break; \ + case 29: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi29, value1, value2); \ + break; \ + case 30: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi30, value1, value2); \ + break; \ + case 31: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi31, value1, value2); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AALDI_REG_VALUE_V2(AALDI_no, value1, value2) \ + PREFIX_READ_AALDI_REG_VALUE(NATIVE, native, V2, v2, \ + AALDI_no, value1, value2) +#define NATIVE_READ_AALDI_REG_VALUE_V5(AALDI_no, value1, value2) \ + PREFIX_READ_AALDI_REG_VALUE(NATIVE, native, V5, v5, \ + AALDI_no, value1, value2) + +static inline void +native_read_aaldi_reg_value_v2(int AALDI_no, u64 *l_value, u64 *r_value) +{ + u32 value1, value2; + + NATIVE_READ_AALDI_REG_VALUE_V2(AALDI_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +native_read_aaldi_reg_value_v5(int AALDI_no, u64 *l_value, u64 *r_value) +{ + u64 value1, value2; + + NATIVE_READ_AALDI_REG_VALUE_V5(AALDI_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +native_write_aaldi_reg_value(int AALDI_no, u64 l_value, u64 r_value) +{ + switch (AALDI_no) { + case 0: + NATIVE_SET_AAU_AALDI(aaldi0, l_value, r_value); + break; + case 1: + NATIVE_SET_AAU_AALDI(aaldi1, l_value, r_value); + break; + case 2: + NATIVE_SET_AAU_AALDI(aaldi2, l_value, r_value); + break; + case 3: + NATIVE_SET_AAU_AALDI(aaldi3, l_value, r_value); + break; + case 4: + NATIVE_SET_AAU_AALDI(aaldi4, l_value, r_value); + break; + case 5: + NATIVE_SET_AAU_AALDI(aaldi5, l_value, r_value); + break; + case 6: + NATIVE_SET_AAU_AALDI(aaldi6, l_value, r_value); + break; + case 7: + NATIVE_SET_AAU_AALDI(aaldi7, l_value, r_value); + break; + case 8: + NATIVE_SET_AAU_AALDI(aaldi8, l_value, r_value); + break; + case 9: + NATIVE_SET_AAU_AALDI(aaldi9, l_value, r_value); + break; + case 10: + NATIVE_SET_AAU_AALDI(aaldi10, l_value, r_value); + break; + case 11: + NATIVE_SET_AAU_AALDI(aaldi11, l_value, r_value); + break; + case 12: + NATIVE_SET_AAU_AALDI(aaldi12, l_value, r_value); + break; + case 13: + NATIVE_SET_AAU_AALDI(aaldi13, l_value, r_value); + break; + case 14: + NATIVE_SET_AAU_AALDI(aaldi14, l_value, r_value); + break; + case 15: + NATIVE_SET_AAU_AALDI(aaldi15, l_value, r_value); + break; + case 16: + NATIVE_SET_AAU_AALDI(aaldi16, l_value, r_value); + break; + case 17: + NATIVE_SET_AAU_AALDI(aaldi17, l_value, r_value); + break; + case 18: + NATIVE_SET_AAU_AALDI(aaldi18, l_value, r_value); + break; + case 19: + NATIVE_SET_AAU_AALDI(aaldi19, l_value, r_value); + break; + case 20: + NATIVE_SET_AAU_AALDI(aaldi20, l_value, r_value); + break; + case 21: + NATIVE_SET_AAU_AALDI(aaldi21, l_value, r_value); + break; + case 22: + NATIVE_SET_AAU_AALDI(aaldi22, l_value, r_value); + break; + case 23: + NATIVE_SET_AAU_AALDI(aaldi23, l_value, r_value); + break; + case 24: + NATIVE_SET_AAU_AALDI(aaldi24, l_value, r_value); + break; + case 25: + NATIVE_SET_AAU_AALDI(aaldi25, l_value, r_value); + break; + case 26: + NATIVE_SET_AAU_AALDI(aaldi26, l_value, r_value); + break; + case 27: + NATIVE_SET_AAU_AALDI(aaldi27, l_value, r_value); + break; + case 28: + NATIVE_SET_AAU_AALDI(aaldi28, l_value, r_value); + break; + case 29: + NATIVE_SET_AAU_AALDI(aaldi29, l_value, r_value); + break; + case 30: + NATIVE_SET_AAU_AALDI(aaldi30, l_value, r_value); + break; + case 31: + NATIVE_SET_AAU_AALDI(aaldi31, l_value, r_value); + break; + default: + BUG_AAU(); + l_value = 0; + r_value = 0; + } +} + +#define PREFIX_READ_AALDAS_REG_VALUE(PV_TYPE, pv_type, \ + AALDAs_no, value1, value2) \ +({ \ + switch (AALDAs_no) { \ + case 0: \ + PV_TYPE##_GET_AAU_AALDA(aalda0, value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AALDA(aalda4, value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AALDA(aalda8, value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AALDA(aalda12, value1, value2); \ + break; \ + case 16: \ + PV_TYPE##_GET_AAU_AALDA(aalda16, value1, value2); \ + break; \ + case 20: \ + PV_TYPE##_GET_AAU_AALDA(aalda20, value1, value2); \ + break; \ + case 24: \ + PV_TYPE##_GET_AAU_AALDA(aalda24, value1, value2); \ + break; \ + case 28: \ + PV_TYPE##_GET_AAU_AALDA(aalda28, value1, value2); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AALDAS_REG_VALUE(AALDAs_no, value1, value2) \ + PREFIX_READ_AALDAS_REG_VALUE(NATIVE, native, \ + AALDAs_no, value1, value2) + +static inline void +native_read_aaldas_reg_value(int AALDAs_no, u32 *l_value, u32 *r_value) +{ + u32 value1, value2; + + NATIVE_READ_AALDAS_REG_VALUE(AALDAs_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +native_write_aaldas_reg_value(int AALDAs_no, u32 l_value, u32 r_value) +{ + switch (AALDAs_no) { + case 0: + NATIVE_SET_AAU_AALDA(aalda0, l_value, r_value); + break; + case 4: + NATIVE_SET_AAU_AALDA(aalda4, l_value, r_value); + break; + case 8: + NATIVE_SET_AAU_AALDA(aalda8, l_value, r_value); + break; + case 12: + NATIVE_SET_AAU_AALDA(aalda12, l_value, r_value); + break; + case 16: + NATIVE_SET_AAU_AALDA(aalda16, l_value, r_value); + break; + case 20: + NATIVE_SET_AAU_AALDA(aalda20, l_value, r_value); + break; + case 24: + NATIVE_SET_AAU_AALDA(aalda24, l_value, r_value); + break; + case 28: + NATIVE_SET_AAU_AALDA(aalda28, l_value, r_value); + break; + default: + BUG_AAU(); + l_value = 0; + r_value = 0; + } +} +static inline void native_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + NATIVE_GET_AAU_AALDM(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static __always_inline void native_write_aaldm_reg_value(u32 lo_value, + u32 hi_value) +{ + NATIVE_SET_AAU_AALDM(lo_value, hi_value); +} +static inline void native_read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static __always_inline void native_write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void native_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + NATIVE_GET_AAU_AALDV(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static __always_inline void native_write_aaldv_reg_value(u32 lo_value, + u32 hi_value) +{ + NATIVE_SET_AAU_AALDV(lo_value, hi_value); +} +static inline void native_read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static __always_inline void native_write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_write_aaldv_reg_value(aaldv->lo, aaldv->hi); +} + +static inline void native_read_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + switch (AAD_no) { + case 0: + NATIVE_GET_AAU_AAD(aadr0, mem_p); + break; + case 1: + NATIVE_GET_AAU_AAD(aadr1, mem_p); + break; + case 2: + NATIVE_GET_AAU_AAD(aadr2, mem_p); + break; + case 3: + NATIVE_GET_AAU_AAD(aadr3, mem_p); + break; + case 4: + NATIVE_GET_AAU_AAD(aadr4, mem_p); + break; + case 5: + NATIVE_GET_AAU_AAD(aadr5, mem_p); + break; + case 6: + NATIVE_GET_AAU_AAD(aadr6, mem_p); + break; + case 7: + NATIVE_GET_AAU_AAD(aadr7, mem_p); + break; + case 8: + NATIVE_GET_AAU_AAD(aadr8, mem_p); + break; + case 9: + NATIVE_GET_AAU_AAD(aadr9, mem_p); + break; + case 10: + NATIVE_GET_AAU_AAD(aadr10, mem_p); + break; + case 11: + NATIVE_GET_AAU_AAD(aadr11, mem_p); + break; + case 12: + NATIVE_GET_AAU_AAD(aadr12, mem_p); + break; + case 13: + NATIVE_GET_AAU_AAD(aadr13, mem_p); + break; + case 14: + NATIVE_GET_AAU_AAD(aadr14, mem_p); + break; + case 15: + NATIVE_GET_AAU_AAD(aadr15, mem_p); + break; + case 16: + NATIVE_GET_AAU_AAD(aadr16, mem_p); + break; + case 17: + NATIVE_GET_AAU_AAD(aadr17, mem_p); + break; + case 18: + NATIVE_GET_AAU_AAD(aadr18, mem_p); + break; + case 19: + NATIVE_GET_AAU_AAD(aadr19, mem_p); + break; + case 20: + NATIVE_GET_AAU_AAD(aadr20, mem_p); + break; + case 21: + NATIVE_GET_AAU_AAD(aadr21, mem_p); + break; + case 22: + NATIVE_GET_AAU_AAD(aadr22, mem_p); + break; + case 23: + NATIVE_GET_AAU_AAD(aadr23, mem_p); + break; + case 24: + NATIVE_GET_AAU_AAD(aadr24, mem_p); + break; + case 25: + NATIVE_GET_AAU_AAD(aadr25, mem_p); + break; + case 26: + NATIVE_GET_AAU_AAD(aadr26, mem_p); + break; + case 27: + NATIVE_GET_AAU_AAD(aadr27, mem_p); + break; + case 28: + NATIVE_GET_AAU_AAD(aadr28, mem_p); + break; + case 29: + NATIVE_GET_AAU_AAD(aadr29, mem_p); + break; + case 30: + NATIVE_GET_AAU_AAD(aadr30, mem_p); + break; + case 31: + NATIVE_GET_AAU_AAD(aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +static inline void native_write_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + switch (AAD_no) { + case 0: + NATIVE_SET_AAU_AAD(aadr0, mem_p); + break; + case 1: + NATIVE_SET_AAU_AAD(aadr1, mem_p); + break; + case 2: + NATIVE_SET_AAU_AAD(aadr2, mem_p); + break; + case 3: + NATIVE_SET_AAU_AAD(aadr3, mem_p); + break; + case 4: + NATIVE_SET_AAU_AAD(aadr4, mem_p); + break; + case 5: + NATIVE_SET_AAU_AAD(aadr5, mem_p); + break; + case 6: + NATIVE_SET_AAU_AAD(aadr6, mem_p); + break; + case 7: + NATIVE_SET_AAU_AAD(aadr7, mem_p); + break; + case 8: + NATIVE_SET_AAU_AAD(aadr8, mem_p); + break; + case 9: + NATIVE_SET_AAU_AAD(aadr9, mem_p); + break; + case 10: + NATIVE_SET_AAU_AAD(aadr10, mem_p); + break; + case 11: + NATIVE_SET_AAU_AAD(aadr11, mem_p); + break; + case 12: + NATIVE_SET_AAU_AAD(aadr12, mem_p); + break; + case 13: + NATIVE_SET_AAU_AAD(aadr13, mem_p); + break; + case 14: + NATIVE_SET_AAU_AAD(aadr14, mem_p); + break; + case 15: + NATIVE_SET_AAU_AAD(aadr15, mem_p); + break; + case 16: + NATIVE_SET_AAU_AAD(aadr16, mem_p); + break; + case 17: + NATIVE_SET_AAU_AAD(aadr17, mem_p); + break; + case 18: + NATIVE_SET_AAU_AAD(aadr18, mem_p); + break; + case 19: + NATIVE_SET_AAU_AAD(aadr19, mem_p); + break; + case 20: + NATIVE_SET_AAU_AAD(aadr20, mem_p); + break; + case 21: + NATIVE_SET_AAU_AAD(aadr21, mem_p); + break; + case 22: + NATIVE_SET_AAU_AAD(aadr22, mem_p); + break; + case 23: + NATIVE_SET_AAU_AAD(aadr23, mem_p); + break; + case 24: + NATIVE_SET_AAU_AAD(aadr24, mem_p); + break; + case 25: + NATIVE_SET_AAU_AAD(aadr25, mem_p); + break; + case 26: + NATIVE_SET_AAU_AAD(aadr26, mem_p); + break; + case 27: + NATIVE_SET_AAU_AAD(aadr27, mem_p); + break; + case 28: + NATIVE_SET_AAU_AAD(aadr28, mem_p); + break; + case 29: + NATIVE_SET_AAU_AAD(aadr29, mem_p); + break; + case 30: + NATIVE_SET_AAU_AAD(aadr30, mem_p); + break; + case 31: + NATIVE_SET_AAU_AAD(aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +static __always_inline void native_read_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + switch (AADs_no) { + case 0: + NATIVE_GET_AAU_AADS(aadr0, aadr1, aadr2, aadr3, mem_p); + break; + case 4: + NATIVE_GET_AAU_AADS(aadr4, aadr5, aadr6, aadr7, mem_p); + break; + case 8: + NATIVE_GET_AAU_AADS(aadr8, aadr9, aadr10, aadr11, mem_p); + break; + case 12: + NATIVE_GET_AAU_AADS(aadr12, aadr13, aadr14, aadr15, mem_p); + break; + case 16: + NATIVE_GET_AAU_AADS(aadr16, aadr17, aadr18, aadr19, mem_p); + break; + case 20: + NATIVE_GET_AAU_AADS(aadr20, aadr21, aadr22, aadr23, mem_p); + break; + case 24: + NATIVE_GET_AAU_AADS(aadr24, aadr25, aadr26, aadr27, mem_p); + break; + case 28: + NATIVE_GET_AAU_AADS(aadr28, aadr29, aadr30, aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +static __always_inline void native_write_aads_4_reg(int AADs_no, + e2k_aadj_t *mem_p) +{ + switch (AADs_no) { + case 0: + NATIVE_SET_AAU_AADS(aadr0, aadr1, aadr2, aadr3, mem_p); + break; + case 4: + NATIVE_SET_AAU_AADS(aadr4, aadr5, aadr6, aadr7, mem_p); + break; + case 8: + NATIVE_SET_AAU_AADS(aadr8, aadr9, aadr10, aadr11, mem_p); + break; + case 12: + NATIVE_SET_AAU_AADS(aadr12, aadr13, aadr14, aadr15, mem_p); + break; + case 16: + NATIVE_SET_AAU_AADS(aadr16, aadr17, aadr18, aadr19, mem_p); + break; + case 20: + NATIVE_SET_AAU_AADS(aadr20, aadr21, aadr22, aadr23, mem_p); + break; + case 24: + NATIVE_SET_AAU_AADS(aadr24, aadr25, aadr26, aadr27, mem_p); + break; + case 28: + NATIVE_SET_AAU_AADS(aadr28, aadr29, aadr30, aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +/* Clear AAU to prepare it for restoring. + * Make this a macro to avoid include hell - it uses cpu_has() inside... */ +#define native_clear_apb() NATIVE_CLEAR_APB() + +#endif /* _NATIVE_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/native_cpu_regs_access.h b/arch/e2k/include/asm/native_cpu_regs_access.h new file mode 100644 index 000000000000..9a6f2ef5dd11 --- /dev/null +++ b/arch/e2k/include/asm/native_cpu_regs_access.h @@ -0,0 +1,596 @@ + +#ifndef _E2K_NATIVE_CPU_REGS_ACCESS_H_ +#define _E2K_NATIVE_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define NATIVE_READ_OSCUD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(oscud.lo) +#define NATIVE_READ_OSCUD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(oscud.hi) + +#define NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(oscud.lo, OSCUD_lo_value, 5) +#define NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(oscud.hi, OSCUD_hi_value, 5) +#define NATIVE_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define NATIVE_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define NATIVE_WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + NATIVE_WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define NATIVE_READ_OSGD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(osgd.lo) +#define NATIVE_READ_OSGD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(osgd.hi) + +#define NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(osgd.lo, OSGD_lo_value, 5) +#define NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(osgd.hi, OSGD_hi_value, 5) +#define NATIVE_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define NATIVE_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define NATIVE_WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ + NATIVE_SET_DSREGS_CLOSED_NOEXC(osgd.lo, osgd.hi, \ + OSGD_lo_value, OSGD_hi_value, 5) +#define NATIVE_WRITE_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + NATIVE_WRITE_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, \ + OSGD_lo.OSGD_lo_half); \ +}) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define NATIVE_READ_CUD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cud.lo) +#define NATIVE_READ_CUD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cud.hi) + +#define NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(cud.lo, CUD_lo_value, 4) +#define NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(cud.hi, CUD_hi_value, 4) +#define NATIVE_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define NATIVE_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) +#define NATIVE_WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define NATIVE_WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + NATIVE_WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define NATIVE_READ_GD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(gd.lo) +#define NATIVE_READ_GD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(gd.hi) + +#define NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(gd.lo, GD_lo_value, 4) +#define NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(gd.hi, GD_hi_value, 4) +#define NATIVE_WRITE_GD_LO_REG(GD_lo) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half) +#define NATIVE_WRITE_GD_HI_REG(GD_hi) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half) +#define NATIVE_WRITE_GD_REG_VALUE(GD_hi_value, GD_lo_value) \ +({ \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define NATIVE_WRITE_GD_REG(GD_hi, GD_lo) \ +({ \ + NATIVE_WRITE_GD_REG_VALUE(GD_hi.GD_hi_half, GD_lo.GD_lo_half); \ +}) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define NATIVE_NV_READ_PSP_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(psp.lo) +#define NATIVE_NV_READ_PSP_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(psp.hi) + +#define NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_SET_DSREG_OPEN(psp.lo, PSP_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(psp.hi, PSP_hi_value) + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define NATIVE_NV_READ_PSHTP_REG_VALUE() NATIVE_GET_DSREG_OPEN(pshtp) + +#define NATIVE_WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(pshtp, PSHTP_value, 5) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define NATIVE_NV_READ_PCSP_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(pcsp.lo) +#define NATIVE_NV_READ_PCSP_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(pcsp.hi) + +#define NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_SET_DSREG_OPEN(pcsp.lo, PCSP_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(pcsp.hi, PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define NATIVE_NV_READ_CR0_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr0.lo) +#define NATIVE_NV_READ_CR0_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr0.hi) +#define NATIVE_NV_READ_CR1_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr1.lo) +#define NATIVE_NV_READ_CR1_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr1.hi) + +#define NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr0.lo, CR0_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr0.hi, CR0_hi_value) +#define NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr1.lo, CR1_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr1.hi, CR1_hi_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define NATIVE_READ_PCSHTP_REG_SVALUE() \ + PCSHTP_SIGN_EXTEND(NATIVE_GET_SREG_OPEN(pcshtp)) + +#define NATIVE_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + NATIVE_SET_SREG_CLOSED_NOEXC(pcshtp, PCSHTP_svalue, 5) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define NATIVE_NV_READ_CTPR_REG_VALUE(reg_no) \ + NATIVE_GET_DSREG_OPEN(ctpr##reg_no) + +#define NATIVE_READ_CTPR_HI_REG_VALUE(reg_no) \ + NATIVE_GET_DSREG_CLOSED_CLOBBERS(ctpr##reg_no.hi, \ + __stringify(ctpr##reg_no)) + +#define NATIVE_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + NATIVE_SET_DSREG_OPEN(ctpr##reg_no, CTPR_value) + +#define NATIVE_WRITE_CTPR_HI_REG_VALUE(reg_no, value) \ + NATIVE_SET_DSREG_CLOSED_EXC_CLOBBERS(ctpr##reg_no.hi, value, \ + 4, __stringify(ctpr##reg_no)) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define NATIVE_READ_TIR_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(tir.lo) +#define NATIVE_READ_TIR_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(tir.hi) + +#define NATIVE_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(tir.lo, TIR_lo_value, 4) +#define NATIVE_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(tir.hi, TIR_hi_value, 4) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + * as the low/high word structure + */ +#define NATIVE_READ_TIR_LO_REG() \ +({ \ + e2k_tir_lo_t TIR_lo; \ + TIR_lo.TIR_lo_reg = NATIVE_READ_TIR_LO_REG_VALUE(); \ + TIR_lo; \ +}) +#define NATIVE_READ_TIR_HI_REG() \ +({ \ + e2k_tir_hi_t TIR_hi; \ + TIR_hi.TIR_hi_reg = NATIVE_READ_TIR_HI_REG_VALUE(); \ + TIR_hi; \ +}) +static inline e2k_tir_lo_t +native_read_TIR_lo_reg(void) +{ + return NATIVE_READ_TIR_LO_REG(); +} +static inline e2k_tir_hi_t +native_read_TIR_hi_reg(void) +{ + return NATIVE_READ_TIR_HI_REG(); +} + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define NATIVE_NV_READ_USD_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.lo) +#define NATIVE_NV_READ_USD_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.hi) + +#define NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + NATIVE_SET_DSREG_OPEN(usd.lo, USD_lo_value) +#define NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + NATIVE_SET_DSREG_OPEN(usd.hi, USD_hi_value) + +#define NATIVE_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \ +do { \ + NATIVE_NV_WRITE_USBR_REG_VALUE(usbr); \ + if (cpu_has(CPU_HWBUG_USD_ALIGNMENT)) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(usd_lo); \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(usd_hi); \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(usd_lo); \ +} while (0) + +#define NATIVE_NV_WRITE_USBR_USD_REG(usbr, usd_hi, usd_lo) \ +do { \ + NATIVE_NV_WRITE_USBR_REG(usbr); \ + if (cpu_has(CPU_HWBUG_USD_ALIGNMENT)) \ + NATIVE_NV_WRITE_USD_LO_REG(usd_lo); \ + NATIVE_NV_WRITE_USD_HI_REG(usd_hi); \ + NATIVE_NV_WRITE_USD_LO_REG(usd_lo); \ +} while (0) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define NATIVE_NV_READ_PUSD_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.lo) +#define NATIVE_NV_READ_PUSD_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.hi) + +#define NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + NATIVE_SET_DSREG_OPEN(usd.lo, PUSD_lo_value) +#define NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + NATIVE_SET_DSREG_OPEN(usd.hi, PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define NATIVE_NV_READ_USBR_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbr) +#define NATIVE_NV_READ_SBR_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbr) + +#define NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value) \ + NATIVE_SET_DSREG_OPEN(sbr, USBR_value) +#define NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value) \ + NATIVE_SET_DSREG_OPEN(sbr, SBR_value) +#define NATIVE_NV_WRITE_USBR_REG(USBR) \ + NATIVE_NV_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define NATIVE_READ_WD_REG_VALUE() NATIVE_GET_DSREG_OPEN(wd) + +#define NATIVE_WRITE_WD_REG_VALUE(WD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(wd, WD_value, 5) + +/* + * Read/write double-word Loop Status Register (LSR/LSR1) + */ +#define NATIVE_READ_LSR_REG_VALUE() NATIVE_GET_DSREG_OPEN(lsr) +#define NATIVE_READ_LSR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(lsr1) + +#define NATIVE_WRITE_LSR_REG_VALUE(LSR_value) \ + NATIVE_SET_DSREG_OPEN(lsr, LSR_value) +#define NATIVE_WRITE_LSR1_REG_VALUE(LSR1_value) \ + NATIVE_SET_DSREG_OPEN(lsr1, LSR1_value) + +/* + * Read/write double-word Loop Status Register (ILCR/ILCR1) + */ +#define NATIVE_READ_ILCR_REG_VALUE() NATIVE_GET_DSREG_OPEN(ilcr) +#define NATIVE_READ_ILCR1_REG_VALUE() NATIVE_GET_DSREG_CLOSED(ilcr1) + +#define NATIVE_WRITE_ILCR_REG_VALUE(ILCR_value) \ + NATIVE_SET_DSREG_OPEN(ilcr, ILCR_value) +#define NATIVE_WRITE_ILCR1_REG_VALUE(ILCR1_value) \ + NATIVE_SET_DSREG_CLOSED_EXC(ilcr1, ILCR1_value, 4) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define NATIVE_NV_READ_OSR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(osr0) + +#define NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value) \ + NATIVE_SET_DSREG_OPEN(osr0, osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define NATIVE_READ_OSEM_REG_VALUE() NATIVE_GET_SREG_CLOSED(osem) + +#define NATIVE_WRITE_OSEM_REG_VALUE(osem_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(osem, osem_value, 5) + +/* + * Read/write word Base Global Register (BGR) + */ +#define NATIVE_READ_BGR_REG_VALUE() NATIVE_GET_SREG_OPEN(bgr) + +#if __LCC__ > 126 || __LCC__ == 126 && __LCC_MINOR__ >= 7 +#define NATIVE_WRITE_BGR_REG_VALUE(BGR_value) \ + NATIVE_SET_SREG_OPEN(bgr, BGR_value) +#else +#define NATIVE_WRITE_BGR_REG_VALUE(BGR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(bgr, BGR_value, 5) +#endif + +/* + * Read CPU current clock register (CLKR) + */ +#define NATIVE_READ_CLKR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(clkr) +#define NATIVE_WRITE_CLKR_REG_VALUE() \ + NATIVE_SET_DSREG_CLOSED_NOEXC(clkr, 0, 4) + +/* + * Read/Write system clock registers (SCLKM) + */ +#define NATIVE_READ_SCLKR_REG_VALUE() NATIVE_GET_DSREG_OPEN(sclkr) +#define NATIVE_READ_SCLKM1_REG_VALUE() NATIVE_GET_DSREG_OPEN(sclkm1) +#define NATIVE_READ_SCLKM2_REG_VALUE() NATIVE_GET_DSREG_OPEN(sclkm2) +#define NATIVE_READ_SCLKM3_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm3) + +#define NATIVE_WRITE_SCLKR_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkr, reg_value, 4) +#define NATIVE_WRITE_SCLKM1_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkm1, reg_value, 4) +#define NATIVE_WRITE_SCLKM2_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkm2, reg_value, 4) +#define NATIVE_WRITE_SCLKM3_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkm3, reg_value, 4) + +extern unsigned long native_read_SCLKR_reg_value(void); +extern unsigned long native_read_SCLKM1_reg_value(void); +extern unsigned long native_read_SCLKM2_reg_value(void); +extern void native_write_SCLKR_reg_value(unsigned long reg_value); +extern void native_write_SCLKM1_reg_value(unsigned long reg_value); +extern void native_write_SCLKM2_reg_value(unsigned long reg_value); + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define NATIVE_READ_CU_HW0_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cu_hw0) +#define NATIVE_READ_CU_HW1_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cu_hw1) + +#define NATIVE_WRITE_CU_HW0_REG_VALUE(reg) \ + NATIVE_SET_DSREG_CLOSED_EXC(cu_hw0, reg, 5) +#define NATIVE_WRITE_CU_HW1_REG_VALUE(reg) \ + NATIVE_SET_DSREG_CLOSED_EXC(cu_hw1, reg, 5) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define NATIVE_READ_RPR_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.lo) +#define NATIVE_READ_RPR_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.hi) +#define NATIVE_READ_SBBP_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbbp) +#define NATIVE_WRITE_SBBP_REG_VALUE(x) \ + NATIVE_SET_DSREG_CLOSED_EXC(sbbp, (x), 0) + +#define NATIVE_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + NATIVE_SET_DSREG_OPEN(rpr.lo, RPR_lo_value) +#define NATIVE_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + NATIVE_SET_DSREG_OPEN(rpr.hi, RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define NATIVE_READ_IP_REG_VALUE() NATIVE_GET_DSREG_OPEN(ip) + +/* + * Read debug and monitors registers + */ +#define NATIVE_READ_DIBCR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibcr) +#define NATIVE_READ_DIBSR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibsr) +#define NATIVE_READ_DIMCR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(dimcr) +#define NATIVE_READ_DIBAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar0) +#define NATIVE_READ_DIBAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar1) +#define NATIVE_READ_DIBAR2_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar2) +#define NATIVE_READ_DIBAR3_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar3) +#define NATIVE_READ_DIMAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar0) +#define NATIVE_READ_DIMAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar1) + +#define NATIVE_WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibcr, DIBCR_value, 4) +#define NATIVE_WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibsr, DIBSR_value, 4) + +static inline bool is_event_pipe_frz_sensitive(int event) +{ + return event == 0x2e || + event >= 0x30 && event <= 0x3d || + event >= 0x48 && event <= 0x4a || + event >= 0x58 && event <= 0x5a || + event >= 0x68 && event <= 0x69; +} + +static inline bool is_dimcr_pipe_frz_sensitive(e2k_dimcr_t dimcr) +{ + return dimcr_enabled(dimcr, 0) && + is_event_pipe_frz_sensitive(AS(dimcr)[0].event) || + dimcr_enabled(dimcr, 1) && + is_event_pipe_frz_sensitive(AS(dimcr)[1].event); +} + +#define NATIVE_WRITE_DIMCR_REG_VALUE(DIMCR_value) \ +do { \ + e2k_dimcr_t __new_value = { .word = (DIMCR_value) }; \ + \ + if (cpu_has(CPU_HWBUG_PIPELINE_FREEZE_MONITORS)) { \ + e2k_dimcr_t __old_value = { .word = NATIVE_READ_DIMCR_REG_VALUE() }; \ + bool __old_sensitive = is_dimcr_pipe_frz_sensitive(__old_value); \ + bool __new_sensitive = is_dimcr_pipe_frz_sensitive(__new_value); \ + \ + if (__old_sensitive != __new_sensitive) { \ + unsigned long flags; \ + \ + raw_all_irq_save(flags); \ + \ + e2k_cu_hw0_t cu_hw0 = { .word = NATIVE_READ_CU_HW0_REG_VALUE() }; \ + cu_hw0.pipe_frz_dsbl = (__new_sensitive) ? 1 : 0; \ + NATIVE_WRITE_CU_HW0_REG_VALUE(cu_hw0.word); \ + \ + raw_all_irq_restore(flags); \ + } \ + } \ + \ + /* 6 cycles delay guarantess that all counting \ + * is stopped and %dibsr is updated accordingly. */ \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimcr, AW(__new_value), 5); \ +} while (0) +#define NATIVE_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + NATIVE_SET_DSREG_OPEN(dibar0, DIBAR0_value) +#define NATIVE_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + NATIVE_SET_DSREG_OPEN(dibar1, DIBAR1_value) +#define NATIVE_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + NATIVE_SET_DSREG_OPEN(dibar2, DIBAR2_value) +#define NATIVE_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + NATIVE_SET_DSREG_OPEN(dibar3, DIBAR3_value) +#define NATIVE_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + NATIVE_SET_DSREG_OPEN(dimar0, DIMAR0_value) +#define NATIVE_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + NATIVE_SET_DSREG_OPEN(dimar1, DIMAR1_value) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD/OSCUTD) + */ +#define NATIVE_NV_READ_CUTD_REG_VALUE() NATIVE_GET_DSREG_OPEN(cutd) +#define NATIVE_READ_OSCUTD_REG_VALUE() NATIVE_GET_DSREG_CLOSED(oscutd) + +#define NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cutd, CUTD_value) +#define NATIVE_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(oscutd, CUTD_value, 5) + +/* + * Read/write word Compilation Unit Index Register (CUIR/OSCUIR) + */ +#define NATIVE_READ_CUIR_REG_VALUE() NATIVE_GET_SREG_CLOSED(cuir) +#define NATIVE_READ_OSCUIR_REG_VALUE() NATIVE_GET_SREG_CLOSED(oscuir) +#define NATIVE_WRITE_CUIR_REG_VALUE(x) \ + NATIVE_SET_SREG_CLOSED_NOEXC(cuir, (x), 5) +#define NATIVE_WRITE_OSCUIR_REG_VALUE(x) \ + NATIVE_SET_SREG_CLOSED_NOEXC(oscuir, (x), 5) + +/* + * Read/write word Processor State Register (PSR) + */ +#define NATIVE_NV_READ_PSR_REG_VALUE() NATIVE_GET_SREG_OPEN(psr) + +#define NATIVE_WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_SET_SREG_OPEN(psr, PSR_value) +#define NATIVE_WRITE_PSR_IRQ_BARRIER(psr_val) \ + NATIVE_SET_PSR_IRQ_BARRIER(psr_val) + + +/* + * Read/write word User Processor State Register (UPSR) + */ +/* upsr reg - byte register, but linux used long flag + * to save arch_local_irq_save.To avoid casting to long(redundant stx command) + * we can used read long register.Suprisingly, but size of image.boot decreased + * 4096 byte + */ +#define NATIVE_NV_READ_UPSR_REG_VALUE() NATIVE_GET_DSREG_OPEN(upsr) +#define NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_SET_SREG_OPEN(upsr, UPSR_value) +#define NATIVE_WRITE_UPSR_IRQ_BARRIER(upsr_val) \ + NATIVE_SET_UPSR_IRQ_BARRIER(upsr_val) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define NATIVE_NV_READ_PFPFR_REG_VALUE() NATIVE_GET_SREG_OPEN(pfpfr) +#define NATIVE_NV_READ_FPCR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpcr) +#define NATIVE_NV_READ_FPSR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpsr) + +#define NATIVE_NV_WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + NATIVE_SET_SREG_OPEN(pfpfr, PFPFR_value) +#define NATIVE_NV_WRITE_FPCR_REG_VALUE(FPCR_value) \ + NATIVE_SET_SREG_OPEN(fpcr, FPCR_value) +#define NATIVE_NV_WRITE_FPSR_REG_VALUE(FPSR_value) \ + NATIVE_SET_SREG_OPEN(fpsr, FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define NATIVE_READ_CS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(cs.lo) +#define NATIVE_READ_CS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(cs.hi) +#define NATIVE_READ_DS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(ds.lo) +#define NATIVE_READ_DS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(ds.hi) +#define NATIVE_READ_ES_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(es.lo) +#define NATIVE_READ_ES_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(es.hi) +#define NATIVE_READ_FS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(fs.lo) +#define NATIVE_READ_FS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(fs.hi) +#define NATIVE_READ_GS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(gs.lo) +#define NATIVE_READ_GS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(gs.hi) +#define NATIVE_READ_SS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(ss.lo) +#define NATIVE_READ_SS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(ss.hi) + +#define NATIVE_CL_WRITE_CS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(cs.lo, sd) +#define NATIVE_CL_WRITE_CS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(cs.hi, sd) +#define NATIVE_CL_WRITE_DS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ds.lo, sd) +#define NATIVE_CL_WRITE_DS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ds.hi, sd) +#define NATIVE_CL_WRITE_ES_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(es.lo, sd) +#define NATIVE_CL_WRITE_ES_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(es.hi, sd) +#define NATIVE_CL_WRITE_FS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(fs.lo, sd) +#define NATIVE_CL_WRITE_FS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(fs.hi, sd) +#define NATIVE_CL_WRITE_GS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(gs.lo, sd) +#define NATIVE_CL_WRITE_GS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(gs.hi, sd) +#define NATIVE_CL_WRITE_SS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ss.lo, sd) +#define NATIVE_CL_WRITE_SS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ss.hi, sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define NATIVE_READ_IDR_REG_VALUE() NATIVE_GET_DSREG_OPEN(idr) + +/* + * Read/Write Processor Core Mode Register (CORE_MODE) + */ +#if __LCC__ > 125 || __LCC__ == 125 && __LCC_MINOR__ >= 8 +# define NATIVE_READ_CORE_MODE_REG_VALUE() NATIVE_GET_SREG_OPEN(core_mode) +#else +# define NATIVE_READ_CORE_MODE_REG_VALUE() NATIVE_GET_SREG_CLOSED(core_mode) +#endif +#define NATIVE_WRITE_CORE_MODE_REG_VALUE(modes) \ + NATIVE_SET_SREG_CLOSED_NOEXC(core_mode, modes, 5) + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_NATIVE_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/native_dcache_regs_access.h b/arch/e2k/include/asm/native_dcache_regs_access.h new file mode 100644 index 000000000000..383478d09a7f --- /dev/null +++ b/arch/e2k/include/asm/native_dcache_regs_access.h @@ -0,0 +1,78 @@ +/* + * native E2K MMU structures & registers. + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_NATIVE_DCACHE_REGS_ACCESS_H_ +#define _E2K_NATIVE_DCACHE_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include + +#endif /* __ASSEMBLY__ */ + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * Flush DCACHE line + */ +static inline void NATIVE_FLUSH_DCACHE_LINE(unsigned long addr) +{ + ldst_rec_op_t opc = { + .fmt = 4, + .mas = MAS_DCACHE_LINE_FLUSH, + .prot = 1 + }; + + NATIVE_RECOVERY_STORE(addr, 0x0, AW(opc), 2); +} + +static inline void NATIVE_FLUSH_DCACHE_LINE_OFFSET(unsigned long addr, size_t offset) +{ + ldst_rec_op_t opc = { + .fmt = 4, + .mas = MAS_DCACHE_LINE_FLUSH, + .prot = 1 + }; + + NATIVE_RECOVERY_STORE(addr, 0x0, AW(opc) | offset, 2); +} + +/* This can be used in non-privileged mode (e.g. guest kernel) but + * must not be used on user addresses (this does not have .prot = 1) */ +#define NATIVE_FLUSH_DCACHE_LINE_UNPRIV(virt_addr) \ + NATIVE_WRITE_MAS_D((virt_addr), 0, MAS_DCACHE_LINE_FLUSH) + + +/* + * Clear DCACHE L1 set + */ +#define NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set) \ + NATIVE_WRITE_MAS_D( \ + mk_dcache_l1_addr((virt_addr), set, 1, 0), \ + 0, MAS_DCACHE_L1_REG) + +/* + * Write DCACHE L2 registers + */ +#define NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num) \ + NATIVE_WRITE_MAS_D( \ + mk_dcache_l2_reg_addr(reg_num, bank_num), \ + (reg_val), \ + MAS_DCACHE_L2_REG) + +/* + * Read DCACHE L2 registers + */ +#define NATIVE_READ_L2_REG(reg_num, bank_num) \ + NATIVE_READ_MAS_D( \ + mk_dcache_l2_reg_addr(reg_num, bank_num), \ + MAS_DCACHE_L2_REG) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_NATIVE_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/native_mmu_regs_access.h b/arch/e2k/include/asm/native_mmu_regs_access.h new file mode 100644 index 000000000000..c3dbdbd29bf0 --- /dev/null +++ b/arch/e2k/include/asm/native_mmu_regs_access.h @@ -0,0 +1,338 @@ +/* + * native E2K MMU structures & registers. + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_NATIVE_MMU_REGS_ACCESS_H_ +#define _E2K_NATIVE_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +#include +#include +#include + + +#undef DEBUG_MR_MODE +#undef DebugMR +#define DEBUG_MR_MODE 0 /* MMU registers access */ +#define DebugMR(...) DebugPrint(DEBUG_MR_MODE, ##__VA_ARGS__) + + +#ifndef __ASSEMBLY__ + +/* + * Write/read MMU register + */ +#define NATIVE_WRITE_MMU_REG(addr_val, reg_val) \ + NATIVE_WRITE_MAS_D((addr_val), (reg_val), MAS_MMU_REG) + +#define NATIVE_READ_MMU_REG(addr_val) \ + NATIVE_READ_MAS_D((addr_val), MAS_MMU_REG) +#define NATIVE_WRITE_MMU_CR(mmu_cr) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +#define NATIVE_WRITE_MMU_TRAP_POINT(mmu_tc) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO), \ + mmu_reg_val(mmu_tc)) +#define NATIVE_READ_MMU_TRAP_POINT() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO)) +#define NATIVE_WRITE_MMU_US_CL_D(us_cl_d) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), \ + mmu_reg_val(us_cl_d)) +#define NATIVE_READ_MMU_US_CL_D() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO)) +#define NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(mmu_phys_ptb) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +#define NATIVE_READ_MMU_OS_PPTB_REG_VALUE() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO)) +#define NATIVE_WRITE_MMU_OS_VPTB_REG_VALUE(mmu_virt_ptb) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +#define NATIVE_READ_MMU_OS_VPTB_REG_VALUE() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO)) +#define NATIVE_WRITE_MMU_OS_VAB_REG_VALUE(kernel_offset) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO), \ + mmu_reg_val(kernel_offset)) +#define NATIVE_READ_MMU_OS_VAB_REG_VALUE() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO)) + +#define BOOT_NATIVE_WRITE_MMU_REG(addr_val, reg_val) \ + NATIVE_WRITE_MMU_REG(addr_val, reg_val) +#define BOOT_NATIVE_READ_MMU_REG(addr_val) \ + NATIVE_READ_MMU_REG(addr_val) + +#define BOOT_NATIVE_WRITE_MMU_CR(mmu_cr) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +#define BOOT_NATIVE_WRITE_MMU_TRAP_POINT(mmu_tc) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO), \ + mmu_reg_val(mmu_tc)) +#define BOOT_NATIVE_READ_MMU_TRAP_POINT() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO)) +#define BOOT_NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(mmu_phys_ptb) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +#define BOOT_NATIVE_READ_MMU_OS_PPTB_REG_VALUE() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO)) +#define BOOT_NATIVE_WRITE_MMU_OS_VPTB_REG_VALUE(mmu_virt_ptb) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +#define BOOT_NATIVE_READ_MMU_OS_VPTB_REG_VALUE() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO)) +#define BOOT_NATIVE_WRITE_MMU_OS_VAB_REG_VALUE(kernel_offset) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO), \ + mmu_reg_val(kernel_offset)) +#define BOOT_NATIVE_READ_MMU_OS_VAB_REG_VALUE() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO)) + +/* + * Write/read Data TLB register + */ +#define NATIVE_WRITE_DTLB_REG(tlb_addr, tlb_value) \ + NATIVE_WRITE_MAS_D((tlb_addr), (tlb_value), MAS_DTLB_REG) + +#define NATIVE_READ_DTLB_REG(tlb_addr) \ + NATIVE_READ_MAS_D((tlb_addr), MAS_DTLB_REG) + +/* + * Flush TLB page/entry + */ +#define NATIVE_FLUSH_TLB_ENTRY(flush_op, addr) \ + NATIVE_WRITE_MAS_D((flush_op), (addr), MAS_TLB_PAGE_FLUSH) + +/* + * Flush ICACHE line + */ +#define NATIVE_FLUSH_ICACHE_LINE(flush_op, addr) \ + NATIVE_WRITE_MAS_D((flush_op), (addr), MAS_ICACHE_LINE_FLUSH) + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +#define NATIVE_FLUSH_CACHE_L12(flush_op) \ + NATIVE_WRITE_MAS_D((flush_op), (0), MAS_CACHE_FLUSH) + +static inline void +native_invalidate_CACHE_L12(void) +{ + int invalidate_supported; + unsigned long flags; + + DebugMR("Flush : Invalidate all CACHEs (op 0x%lx)\n", + _flush_op_invalidate_cache_L12); + + /* Invalidate operation was removed in E2S */ + invalidate_supported = NATIVE_IS_MACHINE_ES2; + + raw_all_irq_save(flags); + E2K_WAIT_MA; + if (invalidate_supported) + NATIVE_FLUSH_CACHE_L12(_flush_op_invalidate_cache_L12); + else + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + E2K_WAIT_FLUSH; + raw_all_irq_restore(flags); +} + +static inline void +native_write_back_CACHE_L12(void) +{ + unsigned long flags; + + DebugMR("Flush : Write back all CACHEs (op 0x%lx)\n", + _flush_op_write_back_cache_L12); + raw_all_irq_save(flags); + E2K_WAIT_MA; + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + E2K_WAIT_FLUSH; + raw_all_irq_restore(flags); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +#define NATIVE_FLUSH_TLB_ALL(flush_op) \ + NATIVE_WRITE_MAS_D((flush_op), (0), MAS_TLB_FLUSH) + +static inline void +native_flush_TLB_all(void) +{ + unsigned long flags; + + DebugMR("Flush all TLBs (op 0x%lx)\n", _flush_op_tlb_all); + raw_all_irq_save(flags); + E2K_WAIT_ST; + NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all); + E2K_WAIT(_fl_c | _ma_c); + raw_all_irq_restore(flags); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +#define NATIVE_FLUSH_ICACHE_ALL(flush_op) \ + NATIVE_WRITE_MAS_D((flush_op), (0), MAS_ICACHE_FLUSH) + +static inline void +native_flush_ICACHE_all(void) +{ + DebugMR("Flush all ICACHE op 0x%lx\n", _flush_op_icache_all); + E2K_WAIT_ST; + NATIVE_FLUSH_ICACHE_ALL(_flush_op_icache_all); + E2K_WAIT_FLUSH; +} + +/* + * Get Entry probe for virtual address + */ +#define NATIVE_ENTRY_PROBE_MMU_OP(addr_val) \ + NATIVE_READ_MAS_D((addr_val), MAS_ENTRY_PROBE) + +/* + * Get physical address for virtual address + */ +#define NATIVE_ADDRESS_PROBE_MMU_OP(addr_val) \ + NATIVE_READ_MAS_D((addr_val), MAS_VA_PROBE) + +/* + * Read CLW register + */ +#define NATIVE_READ_CLW_REG(clw_addr) \ + NATIVE_READ_MAS_D_5((clw_addr), MAS_CLW_REG) + +/* + * Write CLW register + */ +#define NATIVE_WRITE_CLW_REG(clw_addr, val) \ + NATIVE_WRITE_MAS_D((clw_addr), (val), MAS_CLW_REG) + +/* + * native MMU DEBUG registers access + */ +#define NATIVE_READ_DDBAR0_REG_VALUE() \ + NATIVE_GET_MMUREG(ddbar0) +#define NATIVE_READ_DDBAR1_REG_VALUE() \ + NATIVE_GET_MMUREG(ddbar1) +#define NATIVE_READ_DDBAR2_REG_VALUE() \ + NATIVE_GET_MMUREG(ddbar2) +#define NATIVE_READ_DDBAR3_REG_VALUE() \ + NATIVE_GET_MMUREG(ddbar3) +#define NATIVE_READ_DDBCR_REG_VALUE() \ + NATIVE_GET_MMUREG(ddbcr) +#define NATIVE_READ_DDBSR_REG_VALUE() \ + NATIVE_GET_MMUREG(ddbsr) +#define NATIVE_READ_DDMAR0_REG_VALUE() \ + NATIVE_GET_MMUREG(ddmar0) +#define NATIVE_READ_DDMAR1_REG_VALUE() \ + NATIVE_GET_MMUREG(ddmar1) +#define NATIVE_READ_DDMCR_REG_VALUE() \ + NATIVE_GET_MMUREG(ddmcr) +#define NATIVE_WRITE_DDBAR0_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddbar0, value) +#define NATIVE_WRITE_DDBAR1_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddbar1, value) +#define NATIVE_WRITE_DDBAR2_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddbar2, value) +#define NATIVE_WRITE_DDBAR3_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddbar3, value) +#define NATIVE_WRITE_DDBCR_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddbcr, value) +#define NATIVE_WRITE_DDBSR_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddbsr, value) +#define NATIVE_WRITE_DDMAR0_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddmar0, value) +#define NATIVE_WRITE_DDMAR1_REG_VALUE(value) \ + NATIVE_SET_MMUREG(ddmar1, value) +/* 4 cycles delay guarantess that all counting + * is stopped and %ddbsr is updated accordingly. */ +#define NATIVE_WRITE_DDMCR_REG_VALUE(value) \ + NATIVE_SET_MMUREG_CLOSED(ddmcr, value, 3) + +#define NATIVE_READ_DDBAR0_REG() \ + NATIVE_READ_DDBAR0_REG_VALUE() +#define NATIVE_READ_DDBAR1_REG() \ + NATIVE_READ_DDBAR1_REG_VALUE() +#define NATIVE_READ_DDBAR2_REG() \ + NATIVE_READ_DDBAR2_REG_VALUE() +#define NATIVE_READ_DDBAR3_REG() \ + NATIVE_READ_DDBAR3_REG_VALUE() +#define NATIVE_READ_DDBCR_REG() \ +({ \ + e2k_ddbcr_t ddbcr; \ + \ + ddbcr.DDBCR_reg = NATIVE_READ_DDBCR_REG_VALUE(); \ + ddbcr; \ +}) +#define NATIVE_READ_DDBSR_REG() \ +({ \ + e2k_ddbsr_t ddbsr; \ + \ + ddbsr.DDBSR_reg = NATIVE_READ_DDBSR_REG_VALUE(); \ + ddbsr; \ +}) +#define NATIVE_READ_DDMAR0_REG() \ + NATIVE_READ_DDMAR0_REG_VALUE() +#define NATIVE_READ_DDMAR1_REG() \ + NATIVE_READ_DDMAR1_REG_VALUE() +#define NATIVE_READ_DDMCR_REG() \ +({ \ + e2k_ddmcr_t ddmcr; \ + \ + ddmcr.DDMCR_reg = NATIVE_READ_DDMCR_REG_VALUE(); \ + ddmcr; \ +}) +#define NATIVE_WRITE_DDBAR0_REG(value) \ + NATIVE_WRITE_DDBAR0_REG_VALUE(value) +#define NATIVE_WRITE_DDBAR1_REG(value) \ + NATIVE_WRITE_DDBAR1_REG_VALUE(value) +#define NATIVE_WRITE_DDBAR2_REG(value) \ + NATIVE_WRITE_DDBAR2_REG_VALUE(value) +#define NATIVE_WRITE_DDBAR3_REG(value) \ + NATIVE_WRITE_DDBAR3_REG_VALUE(value) +#define NATIVE_WRITE_DDBCR_REG(value) \ + NATIVE_WRITE_DDBCR_REG_VALUE(value.DDBCR_reg) +#define NATIVE_WRITE_DDBSR_REG(value) \ + NATIVE_WRITE_DDBSR_REG_VALUE(value.DDBSR_reg) +#define NATIVE_WRITE_DDMAR0_REG(value) \ + NATIVE_WRITE_DDMAR0_REG_VALUE(value) +#define NATIVE_WRITE_DDMAR1_REG(value) \ + NATIVE_WRITE_DDMAR1_REG_VALUE(value) +#define NATIVE_WRITE_DDMCR_REG(value) \ + NATIVE_WRITE_DDMCR_REG_VALUE(value.DDMCR_reg) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_NATIVE_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/nbsr_v6_regs.h b/arch/e2k/include/asm/nbsr_v6_regs.h new file mode 100644 index 000000000000..e94c5071d510 --- /dev/null +++ b/arch/e2k/include/asm/nbsr_v6_regs.h @@ -0,0 +1,301 @@ +#pragma once + +/* HC monitors */ +#define HC_MCR 0x360 +#define HC_MID 0x364 +#define HC_MAR0_LO 0x368 +#define HC_MAR0_HI 0x36c +#define HC_MAR1_LO 0x370 +#define HC_MAR1_HI 0x374 + +/* IOMMU monitors - all processors */ +#define IOMMU_MCR 0x3c0 +#define IOMMU_MID 0x3c4 +#define IOMMU_MAR0_LO 0x3c8 +#define IOMMU_MAR0_HI 0x3cc +#define IOMMU_MAR1_LO 0x3d0 +#define IOMMU_MAR1_HI 0x3d4 + +/* Additional IOMMU monitors - e2c3 only. + * EDBC_IOMMU_* registers are used only to broadcast + * writing into ED{26-31}_IOMMU_* registers. */ +#define EDBC_IOMMU_MCR 0x50c0 +#define EDBC_IOMMU_MID 0x50c4 +#define EDBC_IOMMU_MAR0_LO 0x50c8 +#define EDBC_IOMMU_MAR0_HI 0x50cc +#define EDBC_IOMMU_MAR1_LO 0x50d0 +#define EDBC_IOMMU_MAR1_HI 0x50d4 +#define ED26_IOMMU_MCR 0x5d40 +#define ED26_IOMMU_MID 0x5d44 +#define ED26_IOMMU_MAR0_LO 0x5d48 +#define ED26_IOMMU_MAR0_HI 0x5d4c +#define ED26_IOMMU_MAR1_LO 0x5d50 +#define ED26_IOMMU_MAR1_HI 0x5d54 +#define ED27_IOMMU_MCR 0x5dc0 +#define ED27_IOMMU_MID 0x5dc4 +#define ED27_IOMMU_MAR0_LO 0x5dc8 +#define ED27_IOMMU_MAR0_HI 0x5dcc +#define ED27_IOMMU_MAR1_LO 0x5dd0 +#define ED27_IOMMU_MAR1_HI 0x5dd4 +#define ED28_IOMMU_MCR 0x5e40 +#define ED28_IOMMU_MID 0x5e44 +#define ED28_IOMMU_MAR0_LO 0x5e48 +#define ED28_IOMMU_MAR0_HI 0x5e4c +#define ED28_IOMMU_MAR1_LO 0x5e50 +#define ED28_IOMMU_MAR1_HI 0x5e54 +#define ED29_IOMMU_MCR 0x5ec0 +#define ED29_IOMMU_MID 0x5ec4 +#define ED29_IOMMU_MAR0_LO 0x5ec8 +#define ED29_IOMMU_MAR0_HI 0x5ecc +#define ED29_IOMMU_MAR1_LO 0x5ed0 +#define ED29_IOMMU_MAR1_HI 0x5ed4 +#define ED30_IOMMU_MCR 0x5f40 +#define ED30_IOMMU_MID 0x5f44 +#define ED30_IOMMU_MAR0_LO 0x5f48 +#define ED30_IOMMU_MAR0_HI 0x5f4c +#define ED30_IOMMU_MAR1_LO 0x5f50 +#define ED30_IOMMU_MAR1_HI 0x5f54 +#define ED31_IOMMU_MCR 0x5fc0 +#define ED31_IOMMU_MID 0x5fc4 +#define ED31_IOMMU_MAR0_LO 0x5fc8 +#define ED31_IOMMU_MAR0_HI 0x5fcc +#define ED31_IOMMU_MAR1_LO 0x5fd0 +#define ED31_IOMMU_MAR1_HI 0x5fd4 + +/* MC monitors */ +#define MC_CH 0x400 +#define MC_CFG 0x418 +#define MC_OPMB 0x424 +#define MC_ECC 0x440 +#define MC_STATUS 0x44c +#define MC_MON_CTL 0x450 +#define MC_MON_CTR0 0x454 +#define MC_MON_CTR1 0x458 +#define MC_MON_CTRext 0x45c + +/* HMU monitors */ +#define HMU_MIC 0xd00 +#define HMU_MCR 0xd14 +#define HMU0_INT 0xd40 +#define HMU0_MAR0_LO 0xd44 +#define HMU0_MAR0_HI 0xd48 +#define HMU0_MAR1_LO 0xd4c +#define HMU0_MAR1_HI 0xd50 +#define HMU1_INT 0xd70 +#define HMU1_MAR0_LO 0xd74 +#define HMU1_MAR0_HI 0xd78 +#define HMU1_MAR1_LO 0xd7c +#define HMU1_MAR1_HI 0xd80 +#define HMU2_INT 0xda0 +#define HMU2_MAR0_LO 0xda4 +#define HMU2_MAR0_HI 0xda8 +#define HMU2_MAR1_LO 0xdac +#define HMU2_MAR1_HI 0xdb0 +#define HMU3_INT 0xdd0 +#define HMU3_MAR0_LO 0xdd4 +#define HMU3_MAR0_HI 0xdd8 +#define HMU3_MAR1_LO 0xddc +#define HMU3_MAR1_HI 0xde0 + +/* PREPIC monitors */ +#define PREPIC_MCR 0x8070 +#define PREPIC_MID 0x8074 +#define PREPIC_MAR0_LO 0x8080 +#define PREPIC_MAR0_HI 0x8084 +#define PREPIC_MAR1_LO 0x8090 +#define PREPIC_MAR1_HI 0x8094 + +/* + * HC monitor control register (HC_MCR) + */ +typedef union { + struct { + u32 v0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 v1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 __unused3 : 16; + }; + u32 word; +} e2k_hc_mcr_t; + +/* + * HC monitor ID register (HC_MID) + */ +typedef union { + struct { + u32 id0 : 16; + u32 id1 : 16; + }; + u32 word; +} e2k_hc_mid_t; + +/* + * IOMMU monitor control register (IOMMU_MCR) + */ +typedef union { + struct { + u32 v0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 v1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 __unused3 : 16; + }; + u32 word; +} e2k_iommu_mcr_t; + +/* + * IOMMU monitor ID register (IOMMU_MID) + */ +typedef union { + struct { + u32 id0 : 16; + u32 id1 : 16; + }; + u32 word; +} e2k_iommu_mid_t; + +/* + * MC status register (MC_STATUS) + */ +typedef union { + struct { + u32 ecc_err : 1; + u32 ddrint_err : 1; + u32 phyccm_par_err : 1; + u32 dmem_par_err : 1; + u32 bridge_par_err : 1; + u32 phy_interrupt : 1; + u32 phy_init_complete : 1; + u32 dfi_par_err : 1; + u32 meminit_finish : 1; + u32 mon0_of : 1; + u32 mon1_of : 1; + u32 dfi_err : 1; + u32 dfi_err_info : 1; + u32 par_alert_delay : 6; + u32 rst_done : 1; + u32 wrcrc_aleert_delay : 6; + u32 __unused : 6; + }; + u32 word; +} e2k_mc_status_t; + +/* + * MC channel select register (MC_CH) + */ +typedef union { + struct { + u32 n : 4; + u32 __unused : 28; + }; + u32 word; +} e2k_mc_ch_t; + +/* + * MC monitor control register (MC_MON_CTL) + */ +typedef union { + struct { + u32 rst0 : 1; + u32 rst1 : 1; + u32 frz0 : 1; + u32 frz1 : 1; + u32 ld0 : 1; + u32 ld1 : 1; + u32 es0 : 5; + u32 es1 : 5; + u32 lb0 : 8; + u32 lb1 : 8; + }; + struct { + u32 __pad : 16; + u32 ba0 : 2; + u32 bg0 : 2; + u32 cid0 : 3; + u32 all0 : 1; + u32 ba1 : 2; + u32 bg1 : 2; + u32 cid1 : 3; + u32 all1 : 1; + }; + u32 word; +} e2k_mc_mon_ctl_t; + +/* + * MC monitor #0,1 counter high (MC_MON_CTRext) + */ +typedef union { + u16 cnt[2]; + u32 word; +} e2k_mc_mon_ctrext_t; + + +/* + * HMU memory interleaving control register (HMU_MIC) + */ +typedef union { + struct { + u32 mcil_bit0 : 6; + u32 mcil_bit1 : 6; + u32 mcil_bit2 : 6; + u32 mcil_bit3 : 6; + u32 mcen : 8; + }; + u32 word; +} e2k_hmu_mic_t; + +/* + * HMU monitor control register (HMU_MCR) + */ +typedef union { + struct { + u32 v0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 v1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 flt0_off : 1; + u32 flt0_rqid : 7; + u32 flt0_cid : 1; + u32 flt0_bid : 1; + u32 flt0_xid : 1; + u32 flt1_off : 1; + u32 flt1_node : 2; + u32 flt1_rnode : 1; + u32 __unused3 : 1; + }; + u32 word; +} e2k_hmu_mcr_t; + +/* + * PREPIC monitor control register (PREPIC_MCR) + */ +typedef union { + struct { + u32 vc0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 vc1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 __unused3 : 16; + }; + u32 word; +} e2k_prepic_mcr_t; + +/* + * PREPIC monitor ID register (PREPIC_MID) + */ +typedef union { + struct { + u32 id0 : 16; + u32 id1 : 16; + }; + u32 word; +} e2k_prepic_mid_t; diff --git a/arch/e2k/include/asm/nmi.h b/arch/e2k/include/asm/nmi.h new file mode 100644 index 000000000000..0984dbad1d23 --- /dev/null +++ b/arch/e2k/include/asm/nmi.h @@ -0,0 +1,95 @@ +#ifndef _ASM_E2K_NMI_H +#define _ASM_E2K_NMI_H + +#include + +/* + * ATTENTION nmi_call_function_xxx() are actually more limited + * than smp_call_function_xxx(). + * + * 1) You cannot use ANY drivers (since they are usually NOT async-safe). + * + * 2) You cannot use printk() (as a consequence of 1). + * + * 3) Function must be fast and non-blocking. + * + * So instead of using printk() it is better to save your message + * into a temporary buffer and later print that buffer from the function + * which called nmi_call_function_xxx(). + */ +extern void nmi_call_function_init(void); +extern void nmi_call_function_interrupt(void); +#ifdef CONFIG_SMP +extern int nmi_call_function(void (*func)(void *), void *info, int wait, + int timeout_msec); +extern int nmi_call_function_mask(const cpumask_t *mask, void (*func)(void *), + void *info, int wait, int timeout_msec); +extern int nmi_call_function_single(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec); +extern int nmi_call_function_single_offline(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec); +# define nmi_on_each_cpu(func, info, wait, timeout_msec) \ + ({ \ + unsigned long __flags; \ + WARN_ON_ONCE(raw_nmi_irqs_disabled()); \ + raw_local_irq_save(__flags); \ + nmi_call_function(func, info, wait, timeout_msec); \ + raw_all_irq_disable(); \ + func(info); \ + if (!raw_irqs_disabled_flags(__flags)) \ + trace_hardirqs_on(); \ + raw_all_irq_restore(__flags); \ + 0; \ + }) +#else +static inline int nmi_call_function_mask(const cpumask_t *mask, + void (*func)(void *), void *info, int wait, int timeout_msec) +{ + unsigned long flags; + + if (cpumask_test_cpu(0, mask)) { + raw_all_irq_save(flags); + func(info); + raw_all_irq_restore(flags); + } + + return 0; +} + +static inline int nmi_call_function_single(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec) +{ + unsigned long flags; + + WARN_ON(cpu != 0); + + raw_all_irq_save(flags); + func(info); + raw_all_irq_restore(flags); + + return 0; +} + +static inline int nmi_call_function_single_offline(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec) +{ + BUG(); +} + +static inline int up_nmi_call_function(void (*func)(void *), void *info) +{ + return 0; +} +# define nmi_call_function(func, info, wait, timeout) \ + (up_nmi_call_function(func, info)) +# define nmi_on_each_cpu(func, info, wait, timeout) \ + ({ \ + unsigned long __flags; \ + raw_all_irq_save(__flags); \ + func(info); \ + raw_all_irq_restore(__flags); \ + 0; \ + }) +#endif + +#endif /* _ASM_E2K_NMI_H */ diff --git a/arch/e2k/include/asm/numnodes.h b/arch/e2k/include/asm/numnodes.h new file mode 100644 index 000000000000..1a06803f2f6d --- /dev/null +++ b/arch/e2k/include/asm/numnodes.h @@ -0,0 +1,124 @@ +#ifndef _E2K_NUMNODES_H +#define _E2K_NUMNODES_H + +#include +#include + +#if NODES_SHIFT > 0 + +#ifndef DEBUG_NODES_MODE +#define DEBUG_NODES_MODE 0 /* for each nodes from */ +#endif /* ! DEBUG_NODES_MODE */ +#define DebugN(...) DebugPrintCont(DEBUG_NODES_MODE ,##__VA_ARGS__) + +/* + * The define can be used only for preempt disable mode + * or argument 'from' is not dinamicaly recalculated expression + * as numa_node_id() + */ +#define for_each_node_mask_from_not_preempt(node, from, mask) \ + for ((node) = (from), \ + ({DebugN("for node %d = from %d, mask " \ + "0x%lx\n", node, from, mask.bits[0]); }), \ + ((!node_isset((node), (mask))) ? \ + ({DebugN(" node is not set "); \ + (node) = next_node((node), (mask)) \ + ; DebugN("so node is next %d\n", node);}) \ + : \ + ({DebugN(" node is set\n"); \ + (node);})); \ + ( ({DebugN("while node %d >= from %d ", node, from); \ + ((node) >= (from));}) ? ({DebugN(" ? node %d < " \ + "MAX_NUMNODES %d\n", \ + node, MAX_NUMNODES); \ + ((node) < MAX_NUMNODES);}) \ + : \ + ({DebugN(" : node < from\n"); \ + ((node) < (from));})); \ + (({DebugN("next: node %d >= from %d", node, from); \ + ((node) >= (from));}) ? \ + ({DebugN(" ? "); \ + ((node) = next_node((node), (mask))); \ + DebugN("node = next %d", node); \ + DebugN(" node %d >= MAX_NUMNODES %d ", \ + node, MAX_NUMNODES); \ + (node) >= MAX_NUMNODES;}) ? \ + ({DebugN(" ? "); \ + (({((node) = first_node((mask))); \ + DebugN("node = first %d >= from %d", \ + node, from); \ + (node) >= (from);}) ? \ + ({DebugN(" ? node = MAX_NUMNODES " \ + "%d\n", MAX_NUMNODES); \ + (node) = MAX_NUMNODES;}) \ + : \ + ({ DebugN(" : node %d\n", node); \ + (node);}));}) \ + : \ + ({DebugN(" : node %d\n", node); \ + (node);}) \ + : \ + ({DebugN(" : "); \ + ({((node) = next_node((node), (mask))); \ + DebugN("node = next %d", node); \ + (node) >= (from);}) ? \ + ({DebugN(" ? node = MAX_NUMNODES %d\n", \ + MAX_NUMNODES); \ + (node) = MAX_NUMNODES;}) \ + : \ + ({DebugN(" : node %d\n", node); \ + (node);});}))) +/* + * The define can be used at preempt enable mode, but you should pass + * additional temporary variable to keep 'from' value + */ +#define for_each_node_mask_from_preempt(node, from, mask, tmp_from) \ + tmp_from = (from); \ + for_each_node_mask_from_not_preempt((node), tmp_from, (mask)) +#else /* NODES_SHIFT == 0 */ +#define for_each_node_mask_from_not_preempt(node, from, mask) \ + for ((node) = (from); (node) < 1; (node)++) +#define for_each_node_mask_from_preempt(node, from, mask, tmp_from) \ + for_each_node_mask_from_not_preempt((node), (from), (mask)) +#endif /* NODES_SHIFT > 0 */ + +#define for_each_cpu_of_node(node, cpu, cpu_mask) \ + cpu_mask = node_to_present_cpumask(node); \ + for_each_cpu(cpu, &cpu_mask) + +#define for_each_node_from_not_preempt(node, from) \ + for_each_node_mask_from_not_preempt((node), (from), \ + node_possible_map) +#define for_each_node_from_preempt(node, from, tmp_from) \ + for_each_node_mask_from_preempt((node), (from), \ + node_possible_map, (tmp_from)) +#define for_each_online_node_from_not_preempt(node, from) \ + for_each_node_mask_from_not_preempt((node), (from), \ + node_online_map) +#define for_each_online_node_from_preempt(node, from, tmp_from) \ + for_each_node_mask_from_preempt((node), (from), \ + node_online_map, (tmp_from)) +#ifdef CONFIG_NUMA +#define calculate_node_has_not_dup_kernel_map(node_mask) \ +({ \ + nodemask_t node_present_map; \ + nodes_clear(node_present_map); \ + memcpy(node_present_map.bits, &phys_nodes_map, \ + sizeof(phys_nodes_map)); \ + nodes_andnot((node_mask), node_present_map, \ + node_has_dup_kernel_map); \ +}) +#define node_has_dup_kernel(nid) \ + node_isset((nid), node_has_dup_kernel_map) +#define for_each_node_has_dup_kernel(node) \ + for_each_node_mask((node), node_has_dup_kernel_map) +#define for_each_node_has_not_dup_kernel(node, node_mask) \ + calculate_node_has_not_dup_kernel_map((node_mask)); \ + for_each_node_mask((node), (node_mask)) +#else /* ! CONFIG_NUMA */ +#define node_has_dup_kernel(nid) ((nid) == 0) +#define for_each_node_has_dup_kernel(node) \ + for ((node) = 0; (node) < 1; (node)++) +#endif /* CONFIG_NUMA */ + +#endif /* _E2K_NUMNODES_H */ diff --git a/arch/e2k/include/asm/of_device.h b/arch/e2k/include/asm/of_device.h new file mode 100644 index 000000000000..342357ad03a7 --- /dev/null +++ b/arch/e2k/include/asm/of_device.h @@ -0,0 +1,8 @@ + +#ifndef E2K_OF_DEVICE_H +#define E2K_OF_DEVICE_H + +#include + +#endif + diff --git a/arch/e2k/include/asm/of_platform.h b/arch/e2k/include/asm/of_platform.h new file mode 100644 index 000000000000..d0abb27aae8d --- /dev/null +++ b/arch/e2k/include/asm/of_platform.h @@ -0,0 +1,4 @@ + +/* + * It is empty file just because it required to be included + */ diff --git a/arch/e2k/include/asm/openprom.h b/arch/e2k/include/asm/openprom.h new file mode 100644 index 000000000000..e7f6988110b5 --- /dev/null +++ b/arch/e2k/include/asm/openprom.h @@ -0,0 +1,47 @@ +/* $Id: openprom.h,v 1.1 2005/12/22 16:14:19 alexmipt Exp $ + * openprom.h: Prom structures and defines for access to the OPENBOOT + * prom routines and data areas. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2005 Alexander Shmelev (ashmelev@task.sun.mcst.ru) + */ + +#ifndef __OPENPROM_H +#define __OPENPROM_H + +/* Routines for traversing the prom device tree. */ +struct linux_nodeops { + int (*no_nextnode)(int node); + int (*no_child)(int node); + int (*no_proplen)(int node, char *name); + int (*no_getprop)(int node, char *name, char *val); + int (*no_setprop)(int node, char *name, char *val, int len); + char * (*no_nextprop)(int node, char *name); +}; + +/* More fun PROM structures for device probing. */ +#define PROMREG_MAX 16 +#define PROMVADDR_MAX 16 +#define PROMINTR_MAX 15 + +struct linux_prom_registers { + u32 which_io; /* Registers space */ + u32 phys_addr; /* The physical address of this register */ + u32 reg_size; /* How many bytes does this register take up? */ +}; + +/* Element of the "ranges" vector */ +struct linux_prom_ranges { + u32 ot_child_space; + u32 ot_child_base; /* Bus feels this */ + u32 ot_parent_space; + u32 ot_parent_base; /* CPU looks from here */ + u32 or_size; +}; + +struct linux_prom_irqs { + u32 pri; /* IRQ priority */ + u32 vector; /* This is foobar, what does it do? */ +}; + +#endif /* !(__OPENPROM_H) */ diff --git a/arch/e2k/include/asm/oplib.h b/arch/e2k/include/asm/oplib.h new file mode 100644 index 000000000000..b9b49c8d7c73 --- /dev/null +++ b/arch/e2k/include/asm/oplib.h @@ -0,0 +1,90 @@ +/* $Id: oplib.h,v 1.2 2007/09/05 12:05:52 kostin Exp $ + * oplib.h: Describes the interface and available routines in the + * Linux Prom library. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2005 Alexander Shmelev (ashmelev@task.sun.mcst.ru) + */ + +#ifndef __OPLIB_H +#define __OPLIB_H + +#include "openprom.h" +#include + +/* Root node of the prom device tree, this stays constant after + * initialization is complete. + */ +extern int prom_root_node; + +/* The functions... */ + +/* PROM device tree traversal functions... */ + +/* Get the child node of the given node, or zero if no child exists. */ +extern int prom_getchild(int parent_node); + +/* Get the next sibling node of the given node, or zero if no further + * siblings exist. + */ +extern int prom_getsibling(int node); + +/* Get the length, at the passed node, of the given property type. + * Returns -1 on error (ie. no such property at this node). + */ +extern int prom_getproplen(int thisnode, const char *property); + +/* Fetch the requested property using the given buffer. Returns + * the number of bytes the prom put into your buffer or -1 on error. + */ +extern int prom_getproperty(int thisnode, const char *property, + char *prop_buffer, int propbuf_size); + +/* Acquire an integer property. */ +extern int prom_getint(int node, char *property); + +/* Acquire an integer property, with a default value. */ +extern int prom_getintdefault(int node, char *property, int defval); + +/* Acquire a boolean property, 0=FALSE 1=TRUE. */ +extern int prom_getbool(int node, char *prop); + +/* Acquire a string property, null string on error. */ +extern void prom_getstring(int node, char *prop, char *buf, int bufsize); + +/* Does the passed node have the given "name"? YES=1 NO=0 */ +extern int prom_nodematch(int thisnode, char *name); + +/* Puts in buffer a prom name in the form name@x,y or name (x for which_io + * and y for first regs phys address + */ +extern int prom_getname(int node, char *buf, int buflen); + +/* Search all siblings starting at the passed node for "name" matching + * the given string. Returns the node on success, zero on failure. + */ +extern int prom_searchsiblings(int node_start, char *name); + +/* Return the first property type, as a string, for the given node. + * Returns a null string on error. + */ +extern char *prom_firstprop(int node, char *buffer); + +/* Returns the next property after the passed property for the given + * node. Returns null string on failure. + */ +extern char *prom_nextprop(int node, char *prev_property, char *buffer); + +/* Returns phandle of the path specified */ +extern int prom_finddevice(char *name); + +/* Returns 1 if the specified node has given property. */ +extern int prom_node_has_property(int node, char *property); + +/* Set the indicated property at the given node with the passed value. + * Returns the number of bytes of your value that the prom took. + */ +extern int prom_setprop(int node, const char *prop_name, char *prop_value, + int value_size); + +#endif /* !(__OPLIB_H) */ diff --git a/arch/e2k/include/asm/ord_rwlock.h b/arch/e2k/include/asm/ord_rwlock.h new file mode 100644 index 000000000000..6c31a80b5975 --- /dev/null +++ b/arch/e2k/include/asm/ord_rwlock.h @@ -0,0 +1,403 @@ +/* + * New SMP ordered read/write spinlock mechanism. + * Locking is ordered and later readers cannot outrun former writers. + * Locking order based on coupons (tickets) received while first try to get + * lock, if lock is already taken by other. + * + * read/write spinlocks initial state allowing 2^32 active readers and + * only one active writer. But coupon discipline allows simultaniously + * have only 2^16 registered users of the lock: active + waiters +*/ + + +#ifndef __ASM_ORD_RWLOCK_H +#define __ASM_ORD_RWLOCK_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +#undef DEBUG_RWLOCK_MODE +#undef DebugRW +#define DEBUG_RWLOCK_MODE 0 /* RW spinlocks debugging */ +#define DebugRW(fmt, args...) \ +({ \ + if (DEBUG_RWLOCK_MODE) \ + host_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SLOW_RWLOCK_MODE +#undef DebugSLRW +#define DEBUG_SLOW_RWLOCK_MODE 0 /* RW spinlocks slow path debugging */ +#define DebugSLRW(fmt, args...) \ +({ \ + if (DEBUG_SLOW_RWLOCK_MODE) \ + host_printk("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + */ + +static inline void +native_ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} + +#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or native guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel */ +/* or native kernel with virtualization support */ + +static inline void +ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + native_ord_wait_read_lock_slow(rw); +} +static inline void +ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + native_ord_wait_write_lock_slow(rw); +} +static inline void +ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + native_ord_arch_read_locked_slow(rw); +} +static inline void +ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + native_ord_arch_write_locked_slow(rw); +} +static inline void +ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + native_ord_arch_read_unlock_slow(rw); +} +static inline void +ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + native_ord_arch_write_unlock_slow(rw); +} + +#endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + +/* + * would read_trylock() succeed? + * @rw: the rwlock in question. + */ +static inline bool +ord_arch_read_can_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_can_lock_reader(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock can be taken\n"); + } else { + DebugRW("lock can not be taken\n"); + } + return success; +} + +/* + * would write_trylock() succeed? + * @rw: the rwlock in question. + */ +static inline bool +ord_arch_write_can_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_can_lock_writer(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock can be taken\n"); + } else { + DebugRW("lock can not be taken\n"); + } + return success; +} + +static inline void +ord_arch_read_lock_slow(arch_rwlock_t *rw, rwlock_val_t coupon) +{ + arch_rwlock_t rwcoupon; + arch_rwlock_t rwlock; + u16 ticket; + bool success; + + rwcoupon.lock = coupon; + ticket = rwcoupon.ticket; + DebugSLRW("coupon value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwcoupon.lock, ticket, rwcoupon.head, rwcoupon.count); + + do { + /* wait for waking up after some unlocking */ + ord_wait_read_lock_slow(rw); + /* try lock again */ + rwlock.lock = __api_atomic_add_slow_reader(rw, ticket, success); + DebugSLRW("current lock value 0x%lx: ticket 0x%x head 0x%x " + "count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (!likely(success)) { + DebugSLRW("lock is not taken again\n"); + } + } while (!success); + DebugSLRW("lock is taken\n"); + if (rwlock.ticket != rwlock.head) { + /* there are other waiters to take the lock */ + /* probably the next on queue is reader and it can */ + /* take lock too */ + DebugSLRW("ticket 0x%x head 0x%x there are other waiters " + "to wake up\n", + rwlock.ticket, rwlock.head); + } + if (rwlock.count < -1) { + /* there is previous active reader and it wake up alredy */ + /* the folowing readers and does not wait for notification */ + /* from activated readers */ + DebugSLRW("count %d there is previous active reader, so do " + "not wake once again, enter to critical immediately\n", + rwlock.count); + return; + } + ord_arch_read_locked_slow(rw); + DebugSLRW("enter to critical section\n"); +} + +static inline void +ord_arch_read_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_add_new_reader(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + return; + } + DebugRW("lock is not taken, goto slow path\n"); + + /* slow path to take read spinlock (as mutex) */ + ord_arch_read_lock_slow(rw, rwlock.lock); +} + +static inline bool +ord_arch_read_trylock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_try_add_new_reader(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + } else { + DebugRW("lock is not taken\n"); + } + return success; +} + +static inline void +ord_arch_read_unlock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + u16 ticket, head; + int count; + + rwlock.lock = __api_atomic_free_lock_reader(rw); + + ticket = rwlock.ticket; + head = rwlock.head; + count = rwlock.count; + DebugRW("current lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, ticket, head, count); + if (count < 0) { + DebugRW("there are other %d readers, do not wake up now\n", + -count); + return; + } else if (count != 0) { + pr_err("%s(): not zero readers lock counter %d\n", + __func__, count); + BUG_ON(true); + return; + } + if (ticket == head) { + DebugRW("there are not other waiters, nothing to wake up\n"); + return; + } + DebugSLRW("there are other waiters, wake up now\n"); + + /* slow path to unlock read spinlock */ + /* need wake up other threads waiting for unlocking */ + ord_arch_read_unlock_slow(rw); +} + +static inline void +ord_arch_write_lock_slow(arch_rwlock_t *rw, rwlock_val_t coupon) +{ + arch_rwlock_t rwcoupon; + arch_rwlock_t rwlock; + u16 ticket; + bool success; + + rwcoupon.lock = coupon; + ticket = rwcoupon.ticket; + DebugSLRW("coupon value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwcoupon.lock, ticket, rwcoupon.head, rwcoupon.count); + + do { + /* wait for waking up after some unlocking */ + ord_wait_write_lock_slow(rw); + /* try lock again */ + rwlock.lock = __api_atomic_add_slow_writer(rw, ticket, success); + DebugSLRW("current lock value 0x%lx: ticket 0x%x " + "head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (!likely(success)) { + DebugSLRW("lock is not taken again\n"); + } + } while (!success); + DebugSLRW("lock is taken\n"); + if (rwlock.ticket != rwlock.head) { + /* there are other waiters to take the lock */ + DebugSLRW("there are other waiters to wake up\n"); + } + ord_arch_write_locked_slow(rw); + DebugSLRW("enter to critical section\n"); +} + +static inline void +ord_arch_write_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_add_new_writer(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + return; + } + DebugRW("lock is not taken, goto slow path\n"); + + /* slow path to take read spinlock (as mutex) */ + ord_arch_write_lock_slow(rw, rwlock.lock); +} + +static inline bool +ord_arch_write_trylock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_try_add_new_writer(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + } else { + DebugRW("lock is not taken\n"); + } + return success; +} + +static inline void +ord_arch_write_unlock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + u16 ticket, head; + int count; + + rwlock.lock = __api_atomic_free_lock_writer(rw); + + ticket = rwlock.ticket; + head = rwlock.head; + count = rwlock.count; + DebugRW("current lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, ticket, head, count); + if (count != 0) { + pr_err("%s(): not zero writers counters %d\n", + __func__, count); + BUG_ON(true); + return; + } + if (ticket == head) { + DebugRW("ticket 0x%x head 0x%x there are not other waiters, " + "nothing to wake up\n", + ticket, head); + return; + } + DebugSLRW("ticket 0x%x head 0x%x there are other waiters, " + "wake up its now\n", + ticket, head); + + /* slow path to unlock read spinlock */ + /* need wake up other threads waiting for unlocking */ + ord_arch_write_unlock_slow(rw); +} + +#define arch_read_can_lock(rw) ord_arch_read_can_lock(rw) +#define arch_write_can_lock(rw) ord_arch_write_can_lock(rw) +#define arch_read_lock(rw) ord_arch_read_lock(rw) +#define arch_write_lock(rw) ord_arch_write_lock(rw) + +#define arch_read_unlock(rw) ord_arch_read_unlock(rw) +#define arch_write_unlock(rw) ord_arch_write_unlock(rw) +#define arch_read_trylock(rw) ord_arch_read_trylock(rw) +#define arch_write_trylock(rw) ord_arch_write_trylock(rw) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! __ASM_ORD_RWLOCK_H */ diff --git a/arch/e2k/include/asm/ord_rwlock_types.h b/arch/e2k/include/asm/ord_rwlock_types.h new file mode 100644 index 000000000000..b9747174a9b2 --- /dev/null +++ b/arch/e2k/include/asm/ord_rwlock_types.h @@ -0,0 +1,34 @@ +#ifndef __ASM_ORD_RWLOCK_TYPES_H +#define __ASM_ORD_RWLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +#include + +typedef unsigned long rwlock_val_t; +typedef struct { + union { + rwlock_val_t lock; /* as a single whole atomic value */ + struct { /* fields of lock value; */ + /* all or each of fields should be */ + /* updated only in atomic style: */ + u16 head; /* current # of active user of lock */ + u16 ticket; /* last # of potential (active or */ + /* waiting) user of the lock */ + s32 count; /* current counter of active users */ + /* readers is negative value and can */ + /* be from 0, -1, -2, ... max */ + /* negative value */ + /* writers can be only 0 or 1 */ + }; + }; +} arch_rwlock_t; +#define __ARCH_RW_LOCK_UNLOCKED { \ + { \ + .lock = 0 \ + } \ +} + +#endif /* __ASM_ORD_RWLOCK_TYPES_H */ diff --git a/arch/e2k/include/asm/override-lcc-warnings.h b/arch/e2k/include/asm/override-lcc-warnings.h new file mode 100644 index 000000000000..d973752ab8df --- /dev/null +++ b/arch/e2k/include/asm/override-lcc-warnings.h @@ -0,0 +1,13 @@ +/* identifier-list parameters may only be used in a function definition */ +#pragma diag_suppress 92 + +#pragma diag_suppress 1717 + +/* in 'goto *expr', expr must have type 'void *' (lcc bug #121409) */ +#pragma diag_suppress 1101 + +/* array of elements containing a flexible array member is nonstandard */ +#pragma diag_suppress 1717 + +/* a reduction in alignment without the 'packed' attribute is ignored */ +#pragma diag_suppress 1160 diff --git a/arch/e2k/include/asm/p2v/boot_bitops.h b/arch/e2k/include/asm/p2v/boot_bitops.h new file mode 100644 index 000000000000..6670feb97df2 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_bitops.h @@ -0,0 +1,71 @@ +#ifndef _E2K_P2V_BOOT_BITOPS_H_ +#define _E2K_P2V_BOOT_BITOPS_H_ + +#include + +#include +#include +#include + +#define bitops_get_mask(nr) (1UL << (nr & 63)); + +static inline void boot_set_bit(int nr, volatile void * addr) +{ + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + __api_atomic_op(mask, m, d, "ord", RELAXED_MB); +} + +static inline void boot_clear_bit(int nr, volatile void * addr) +{ + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + __api_atomic_op(mask, m, d, "andnd", RELAXED_MB); +} + +static inline void boot_change_bit(int nr, volatile void * addr) +{ + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + __api_atomic_op(mask, m, d, "xord", RELAXED_MB); +} + +static inline int boot_test_and_set_bit(int nr, volatile void * addr) +{ + long retval; + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + + retval = __api_atomic_fetch_op(mask, m, d, "ord", STRONG_MB); + + return (retval & mask) != 0; +} + +static inline int boot_test_and_clear_bit(int nr, volatile void * addr) +{ + long retval; + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + + retval = __api_atomic_fetch_op(mask, m, d, "andnd", STRONG_MB); + + return (retval & mask) != 0; +} + +static inline int boot_test_and_change_bit(int nr, volatile void * addr) +{ + long retval; + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + + retval = __api_atomic_fetch_op(mask, m, d, "xord", STRONG_MB); + + return (retval & mask) != 0; +} + +static inline int boot_test_bit(int nr, const volatile void *addr) +{ + return (1UL & (((unsigned long *)addr)[nr >> 6] >> (nr & 63))) != 0UL; +} + +#endif /* _E2K_P2V_BOOT_BITOPS_H_ */ diff --git a/arch/e2k/include/asm/p2v/boot_cacheflush.h b/arch/e2k/include/asm/p2v/boot_cacheflush.h new file mode 100644 index 000000000000..fa89713b664d --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_cacheflush.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +static __always_inline void boot_wait_for_flush_v5_L3(unsigned char *node_nbsr) +{ + l3_ctrl_t l3_ctrl; + + /* waiting for flush completion */ + do { + boot_cpu_relax(); + l3_ctrl.E2K_L3_CTRL_reg = + boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_ctrl); + } while (l3_ctrl.E2K_L3_CTRL_fl != 0); +} + +static __always_inline void boot_wait_for_flush_v4_L3(unsigned char *node_nbsr) +{ + l3_reg_t l3_diag; + + /* waiting for flush completion */ + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b0_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b1_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b2_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b3_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b4_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b5_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b6_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b7_diag_dw); + + __E2K_WAIT_ALL; +} + +static __always_inline void boot_native_flush_L3(int iset_ver, + unsigned char *node_nbsr) +{ + l3_ctrl_t l3_ctrl; + + if (iset_ver < E2K_ISET_V4) + /* cache L3 is absent */ + return; + + /* set bit of L3 control register to flash L3 */ + l3_ctrl.E2K_L3_CTRL_reg = + boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_ctrl); + l3_ctrl.E2K_L3_CTRL_fl = 1; + boot_do_sic_write_node_nbsr_reg(node_nbsr, SIC_l3_ctrl, + l3_ctrl.E2K_L3_CTRL_reg); + + /* waiting for flush completion */ + if (iset_ver > E2K_ISET_V4) + boot_wait_for_flush_v5_L3(node_nbsr); + else + boot_wait_for_flush_v4_L3(node_nbsr); +} + diff --git a/arch/e2k/include/asm/p2v/boot_console.h b/arch/e2k/include/asm/p2v/boot_console.h new file mode 100644 index 000000000000..e46238754d91 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_console.h @@ -0,0 +1,67 @@ +#ifndef _E2K_P2V_BOOT_CONSOLE_H_ +#define _E2K_P2V_BOOT_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include + +#ifdef CONFIG_SERIAL_BOOT_PRINTK + +# define boot_serial_boot_console_opts \ + boot_get_vo_value(serial_boot_console_opts) +# define boot_opts_entry(opts, member) \ +({ \ + serial_console_opts_t *opts_p = boot_vp_to_pp(opts); \ + typeof(opts_p->member) entry; \ + entry = opts_p->member; \ + ((typeof(opts_p->member))boot_vp_to_pp(entry)); \ +}) +# define boot_opts_func_entry(opts, func) \ +({ \ + serial_console_opts_t *opts_p = boot_vp_to_pp(opts); \ + typeof(opts_p->func) entry; \ + entry = opts_p->func; \ + ((typeof(opts_p->func))boot_func_to_pp(entry)); \ +}) +# define boot_serial_boot_console_opts_entry(entry) \ + boot_opts_entry(boot_serial_boot_console_opts, entry) +# define boot_serial_boot_console_opts_func_entry(func) \ + boot_opts_func_entry(boot_serial_boot_console_opts, func) + +extern unsigned char serial_dump_console_num; +#define boot_serial_boot_console_num boot_get_vo_value(serial_dump_console_num) + +extern void __init_recv boot_setup_serial_console(bool bsp, boot_info_t *); +#endif /* CONFIG_SERIAL_BOOT_PRINTK */ + +# ifdef CONFIG_SERIAL_AM85C30_BOOT_CONSOLE +extern serial_console_opts_t am85c30_serial_boot_console; +# endif + +#ifdef CONFIG_BOOT_PRINTK +extern void do_boot_printk(char const *fmt_v, ...); +extern void boot_vprintk(char const *fmt_v, va_list ap_v); +extern void boot_bug(const char *fmt_v, ...); +extern void boot_warning(const char *fmt_v, ...); + +#else /* !CONFIG_BOOT_PRINTK */ +# define do_boot_printk(...) +# define boot_vprintk(...) +static inline void boot_bug(const char *fmt_v, ...) +{ +} +static inline void boot_warning(const char *fmt_v, ...) +{ +} +#endif /* CONFIG_BOOT_PRINTK */ + +#define boot_printk if (DEBUG_BOOT_MODE) do_boot_printk + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_P2V_BOOT_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/p2v/boot_head.h b/arch/e2k/include/asm/p2v/boot_head.h new file mode 100644 index 000000000000..76ba796720cf --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_head.h @@ -0,0 +1,321 @@ +/* $Id: boot_head.h,v 1.21 2009/06/29 11:53:53 atic Exp $ + * + * Heading of boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_P2V_BOOT_HEAD_H +#define _E2K_P2V_BOOT_HEAD_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +#ifndef CONFIG_SMP +extern unsigned char boot_init_started; /* boot-time initialization */ + /* has been started */ +extern unsigned char _va_support_on; /* virtual addressing support */ + /* has turned on */ +#define boot_boot_init_started boot_get_vo_value(boot_init_started) +#define boot_va_support_on boot_get_vo_value(_va_support_on) +#define va_support_on _va_support_on +#else +extern unsigned char boot_init_started[NR_CPUS]; + /* boot-time initialization */ + /* has been started */ +extern unsigned char _va_support_on[NR_CPUS]; + /* virtual addressing support */ + /* has turned on */ +#define boot_boot_init_started \ + (boot_vp_to_pp((unsigned char *)boot_init_started)) \ + [boot_smp_processor_id()] +#define boot_va_support_on \ + (boot_vp_to_pp((unsigned char *)_va_support_on)) \ + [boot_smp_processor_id()] +#define va_support_on _va_support_on[boot_smp_processor_id()] +#endif /* CONFIG_SMP */ + +extern bootblock_struct_t *bootblock_phys; /* bootblock structure */ + /* physical pointer */ +extern bootblock_struct_t *bootblock_virt; /* bootblock structure */ + /* virtual pointer */ +#define boot_bootblock_phys boot_get_vo_value(bootblock_phys) +#define boot_bootblock_virt boot_get_vo_value(bootblock_virt) + +#ifdef CONFIG_E2K_MACHINE +# define boot_native_machine_id (native_machine_id) +#else +# if defined(CONFIG_ES2) || defined(CONFIG_E2S) || \ + defined(CONFIG_E8C) || defined(CONFIG_E1CP) || \ + defined(CONFIG_E8C2) || defined(CONFIG_E12C) || \ + defined(CONFIG_E16C) || defined(CONFIG_E2C3) +# define boot_native_machine_id (native_machine_id) +# else +# define boot_native_machine_id boot_get_vo_value(native_machine_id) +# endif + +# ifdef CONFIG_NUMA +# define boot_the_node_machine_id(nid) \ + boot_the_node_get_vo_value(nid, machine_id) +# define boot_node_machine_id \ + boot_the_node_machine_id(boot_numa_node_id()) +# endif +#endif + +#define boot_machine (boot_get_vo_value(machine)) + +#ifdef CONFIG_NUMA +#define boot_the_node_machine(nid) \ + ((machdep_t *)boot_the_node_vp_to_pp(nid, &machine)) +#define boot_node_machine(nid) \ + boot_the_node_machine(boot_numa_node_id()) +#else /* ! CONFIG_NUMA */ +#define boot_the_node_machine(nid) \ + ((machdep_t *)boot_vp_to_pp(&machine)) +#define boot_node_machine(nid) \ + boot_the_node_machine(0) +#endif /* CONFIG_NUMA */ + +extern e2k_addr_t start_of_phys_memory; /* start address of physical memory */ +extern e2k_addr_t end_of_phys_memory; /* end address + 1 of physical memory */ +extern e2k_size_t pages_of_phys_memory; /* number of pages of physical memory */ +extern e2k_addr_t kernel_image_size; /* size of full kernel image in the */ + /* memory ("text" + "data" + "bss") */ +#define boot_start_of_phys_memory boot_get_vo_value(start_of_phys_memory) +#define boot_end_of_phys_memory boot_get_vo_value(end_of_phys_memory) +#define boot_pages_of_phys_memory boot_get_vo_value(pages_of_phys_memory) +#define boot_kernel_image_size boot_get_vo_value(kernel_image_size) + +extern int phys_nodes_num; /* total number of online */ + /* nodes */ +extern unsigned long phys_nodes_map; /* map of all online nodes */ +extern int phys_mem_nodes_num; /* number of online nodes */ + /* only with memory */ +extern unsigned long phys_mem_nodes_map; /* map of online nodes */ + /* only with memory */ +#define boot_phys_nodes_num boot_get_vo_value(phys_nodes_num) +#define boot_phys_nodes_map boot_get_vo_value(phys_nodes_map) +#define boot_phys_mem_nodes_num boot_get_vo_value(phys_mem_nodes_num) +#define boot_phys_mem_nodes_map boot_get_vo_value(phys_mem_nodes_map) + +#ifdef CONFIG_NUMA +extern e2k_addr_t node_kernel_phys_base[MAX_NUMNODES]; +#define boot_node_kernel_phys_base(node_id) \ + boot_get_vo_value(node_kernel_phys_base[(node_id)]) +#define boot_kernel_phys_base \ + boot_node_kernel_phys_base(boot_numa_node_id()) +#define init_node_kernel_phys_base(node_id) \ + (node_kernel_phys_base[(node_id)]) +#define BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(node_id) \ + ((unsigned long)(boot_node_kernel_phys_base(node_id)) != \ + (unsigned long)-1) +#define BOOT_EARLY_NODE_HAS_DUP_KERNEL() \ + BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(boot_numa_node_id()) + +#define BOOT_TEST_AND_SET_NODE_LOCK(node_lock, node_done) \ +({ \ + int was_done; \ + boot_node_spin_lock((node_lock)); \ + was_done = (node_done); \ + if ((was_done)) { \ + boot_node_spin_unlock((node_lock)); \ + } \ + was_done; \ +}) +#define BOOT_NODE_UNLOCK(node_lock, node_done) \ +({ \ + (node_done) = 1; \ + boot_node_spin_unlock((node_lock)); \ +}) +#else /* ! CONFIG_NUMA */ +extern e2k_addr_t kernel_phys_base; /* physical address of kernel Image */ + /* begining */ +#define BOOT_IS_BSP_ID (boot_smp_processor_id() == 0) +#define boot_kernel_phys_base boot_get_vo_value(kernel_phys_base) +#define BOOT_TEST_AND_SET_NODE_LOCK(node_lock, node_done) (!BOOT_IS_BSP_ID) +#define BOOT_NODE_UNLOCK(node_lock, node_done) +#endif /* CONFIG_NUMA */ + +/* + * MMU Trap Cellar + */ +#ifndef CONFIG_SMP +extern unsigned long kernel_trap_cellar[MMU_TRAP_CELLAR_MAX_SIZE]; + +#define KERNEL_TRAP_CELLAR kernel_trap_cellar + +#define boot_kernel_trap_cellar boot_vp_to_pp((u64 *)kernel_trap_cellar) +#define boot_trap_cellar boot_kernel_trap_cellar +#define BOOT_KERNEL_TRAP_CELLAR boot_kernel_trap_cellar +#else /* CONFIG_SMP */ +extern unsigned long kernel_trap_cellar; + +/* + * Don't use hard_smp_processor_id() here to avoid function call in + * NATIVE_SAVE_TRAP_CELLAR(). + */ +#define KERNEL_TRAP_CELLAR \ + ((&kernel_trap_cellar) + MMU_TRAP_CELLAR_MAX_SIZE * \ + cpu_to_cpuid(raw_smp_processor_id())) + +#define boot_trap_cellar \ + boot_vp_to_pp((u64 *)(&kernel_trap_cellar) + \ + MMU_TRAP_CELLAR_MAX_SIZE * boot_smp_processor_id()) +#define boot_kernel_trap_cellar \ + boot_node_vp_to_pp((u64 *)(&kernel_trap_cellar) + \ + MMU_TRAP_CELLAR_MAX_SIZE * boot_smp_processor_id()) +#define BOOT_KERNEL_TRAP_CELLAR \ + ((&kernel_trap_cellar) + \ + MMU_TRAP_CELLAR_MAX_SIZE * boot_smp_processor_id()) +#endif /* ! CONFIG_SMP */ + +/* + * Native/guest VM indicator + */ +#define BOOT_IS_HV_GM() (boot_machine.gmi) + +/* + * Kernel Compilation units table + */ +extern e2k_cute_t kernel_CUT[MAX_KERNEL_CODES_UNITS]; +#define boot_kernel_CUT boot_node_vp_to_pp((e2k_cute_t *)kernel_CUT) + +/* + * Control process of boot-time initialization. + */ + +extern void boot_native_setup_machine_id(bootblock_struct_t *bootblock); +extern void boot_startup(bool bsp, bootblock_struct_t *bootblock); +extern void boot_native_clear_bss(void); +extern void __init boot_native_check_bootblock(bool bsp, + bootblock_struct_t *bootblock); +extern void boot_setup_iset_features(struct machdep *machine); +extern void boot_common_setup_arch_mmu(struct machdep *machine, + pt_struct_t *pt_struct); +extern void init_native_terminate_boot_init(bool bsp, int cpuid); +extern void init_start_kernel_init(bool bsp, int cpuid); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline void boot_setup_machine_id(bootblock_struct_t *bootblock) +{ + boot_native_setup_machine_id(bootblock); +} +static inline void boot_clear_bss(void) +{ + boot_native_clear_bss(); +} +static inline void __init +boot_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + boot_native_check_bootblock(bsp, bootblock); +} + +static inline void init_terminate_boot_init(bool bsp, int cpuid) +{ + init_native_terminate_boot_init(bsp, cpuid); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +/* + * Convert virtual address of kernel item to the consistent physical address, + * while booting process is continued into virtual memory space. + */ + +#ifndef CONFIG_NUMA +#define kernel_va_to_pa(virt_addr) \ + ((e2k_addr_t)(virt_addr) - KERNEL_BASE + kernel_phys_base) +#else /* CONFIG_NUMA */ +#define kernel_va_to_pa(virt_addr) \ + node_kernel_va_to_pa(numa_node_id(), virt_addr) +#endif /* ! CONFIG_NUMA */ + +/* + * Convert virtual address of kernel item to the consistent physical address + * on the given node, while booting process is continued into virtual memory + * space. + */ + +#ifndef CONFIG_NUMA +#define node_kernel_va_to_pa(node_id, virt_addr) \ + ((e2k_addr_t)(virt_addr) - KERNEL_BASE + kernel_phys_base) +#else /* CONFIG_NUMA */ +#define node_kernel_va_to_pa(node_id, virt_addr) \ +({ \ + unsigned long virt_offset = (e2k_addr_t)(virt_addr) - \ + KERNEL_BASE; \ + unsigned long kernel_base; \ + if ((e2k_addr_t)(virt_addr) >= (e2k_addr_t)__node_data_end) { \ + kernel_base = node_kernel_phys_base[BOOT_BS_NODE_ID]; \ + } else if (node_has_dup_kernel(node_id)) { \ + kernel_base = node_kernel_phys_base[node_id]; \ + } else { \ + kernel_base = node_kernel_phys_base[ \ + node_dup_kernel_nid(node_id)]; \ + } \ + kernel_base + virt_offset; \ +}) +#endif /* ! CONFIG_NUMA */ + +#ifdef CONFIG_NUMA +/* + * The next macroses should be used for NUMA mode to convert addresses on + * the current node + */ +static inline void * +boot_node_kernel_va_to_pa(int node_id, void *virt_pnt) +{ + unsigned long node_base; + + node_base = boot_node_kernel_phys_base(node_id); + if (node_base == (unsigned long)-1) { + node_base = boot_node_kernel_phys_base(BOOT_BS_NODE_ID); + } + return boot_kernel_va_to_pa(virt_pnt, node_base); +} +#define boot_the_node_vp_to_pp(node_id, virt_pnt) \ + boot_node_kernel_va_to_pa((node_id), (void *)(virt_pnt)) +#define boot_the_node_get_vo_value(node_id, virt_value_name) \ + *(typeof ( virt_value_name)*) \ + boot_the_node_vp_to_pp((node_id), \ + &(virt_value_name)) +#define boot_the_node_get_vo_name(node_id, virt_value_name) \ + *(typeof ( virt_value_name)*) \ + boot_the_node_vp_to_pp((node_id), \ + &(virt_value_name)) +#define boot_node_vp_to_pp(virt_pnt) \ + boot_the_node_vp_to_pp(boot_numa_node_id(), virt_pnt) +#define boot_node_get_vo_value(virt_value_name) \ + boot_the_node_get_vo_value(boot_numa_node_id(), \ + virt_value_name) +#define boot_node_get_vo_name(virt_value_name) \ + boot_the_node_get_vo_name(boot_numa_node_id(), \ + virt_value_name) +#else /* ! CONFIG_NUMA */ +#define boot_node_vp_to_pp(virt_pnt) boot_vp_to_pp(virt_pnt) +#define boot_node_get_vo_value(virt_value_name) \ + boot_get_vo_value(virt_value_name) +#define boot_node_get_vo_name(virt_value_name) \ + boot_node_get_vo_name(virt_value_name) +#endif /* CONFIG_NUMA */ + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_P2V_BOOT_HEAD_H) */ diff --git a/arch/e2k/include/asm/p2v/boot_init.h b/arch/e2k/include/asm/p2v/boot_init.h new file mode 100644 index 000000000000..07320e3ac716 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_init.h @@ -0,0 +1,372 @@ +/* $Id: boot_init.h,v 1.18 2009/06/29 11:53:32 atic Exp $ + * + * Boot-time initialization of Virtual memory support and switch + * from boot execution on physical memory to boot continuation + * on virtual memory + */ + +#ifndef _E2K_P2V_BOOT_INIT_H +#define _E2K_P2V_BOOT_INIT_H + +#include +#include + +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * The next structures describe list of the memory areas used by boot-time + * initialization. The item 'phys' points to physical base address of + * area, when the item 'virt' points to virtual base address of same area. + * All the used memory areas enumerate below. If a some new area will be used, + * then it should be added to the list of already known ones. + */ + +typedef struct mem_area_desc { /* an area descriptor */ + e2k_addr_t phys; /* physical base address area */ + e2k_addr_t virt; /* virtual base address of same area */ + e2k_size_t size; /* bytes size of the area */ + e2k_size_t phys_offset; /* physical offset of the area */ + e2k_size_t virt_offset; /* virtual offset of the area */ +} mem_area_desc_t; + +typedef struct node_mem_area_desc { /* node an area descriptor */ + mem_area_desc_t nodes[L_MAX_MEM_NUMNODES]; +} node_mem_area_desc_t; + +typedef struct bootmem_areas { /* list of all areas */ +#ifndef CONFIG_NUMA + mem_area_desc_t text; /* segment 'text' of kernel */ + mem_area_desc_t data; /* segment 'data' of kernel */ +#else /* CONFIG_NUMA */ + node_mem_area_desc_t text; /* nodes segment 'text' of kernel */ + node_mem_area_desc_t dup_data; /* nodes duplicated 'data' segment */ + node_mem_area_desc_t data; /* node segment 'data' of kernel */ +#endif /* ! CONFIG_NUMA */ +#ifndef CONFIG_SMP + /* + * Boot-time stacks to switch from physical memory to virtual memory + */ + mem_area_desc_t boot_ps; /* procedure stack of kernel */ + mem_area_desc_t boot_pcs; /* procedure chain stack of kernel */ + mem_area_desc_t boot_stack; /* kernel procedure local data stack */ +#else + /* + * Boot-time stacks to switch from physical memory to virtual memory + */ + mem_area_desc_t boot_ps[NR_CPUS]; + mem_area_desc_t boot_pcs[NR_CPUS]; + mem_area_desc_t boot_stack[NR_CPUS]; +#endif /* CONFIG_SMP */ + mem_area_desc_t bootinfo; /* boot-time information from loader */ +#ifdef CONFIG_BLK_DEV_INITRD + mem_area_desc_t initrd; /* initial disk info */ +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_L_IO_APIC + mem_area_desc_t mpf; /* MP floating table */ + mem_area_desc_t mpc; /* MP configuration table */ +#endif /* CONFIG_L_IO_APIC */ + mem_area_desc_t symtab; /* kernel symbols table */ + mem_area_desc_t strtab; /* kernel strings table */ + mem_area_desc_t x86_hw; /* PA 640K - 1M are reserved for PC's */ + /* integrated hardware: BIOS, VGA,... */ +} bootmem_areas_t; + +extern unsigned long totalram_real_pages; +#define boot_totalram_real_pages boot_get_vo_value(totalram_real_pages) + +extern bootmem_areas_t kernel_bootmem; +#define boot_kernel_bootmem boot_vp_to_pp(&kernel_bootmem) + +#ifndef CONFIG_NUMA +#define boot_text_phys_base boot_get_vo_value(kernel_bootmem.text.phys) +#define boot_text_virt_base boot_get_vo_value(kernel_bootmem.text.virt) +#define boot_text_size boot_get_vo_value(kernel_bootmem.text.size) + +#define boot_data_phys_base boot_get_vo_value(kernel_bootmem.data.phys) +#define boot_data_virt_base boot_get_vo_value(kernel_bootmem.data.virt) +#define boot_data_size boot_get_vo_value(kernel_bootmem.data.size) +#else /* CONFIG_NUMA */ +#define boot_node_text_phys_base(nid) \ + boot_get_vo_value(kernel_bootmem.text.nodes[(nid)].phys) +#define boot_node_text_virt_base(nid) \ + boot_get_vo_value(kernel_bootmem.text.nodes[(nid)].virt) +#define boot_node_text_size(nid) \ + boot_get_vo_value(kernel_bootmem.text.nodes[(nid)].size) + +#define boot_node_dup_data_phys_base(nid) \ + boot_get_vo_value(kernel_bootmem.dup_data.nodes[(nid)].phys) +#define boot_node_dup_data_virt_base(nid) \ + boot_get_vo_value(kernel_bootmem.dup_data.nodes[(nid)].virt) +#define boot_node_dup_data_size(nid) \ + boot_get_vo_value(kernel_bootmem.dup_data.nodes[(nid)].size) +#define boot_node_data_phys_base(nid) \ + boot_get_vo_value(kernel_bootmem.data.nodes[(nid)].phys) +#define boot_node_data_virt_base(nid) \ + boot_get_vo_value(kernel_bootmem.data.nodes[(nid)].virt) +#define boot_node_data_size(nid) \ + boot_get_vo_value(kernel_bootmem.data.nodes[(nid)].size) + +#define boot_text_phys_base boot_node_text_phys_base(boot_numa_node_id()) +#define boot_text_virt_base boot_node_text_virt_base(boot_numa_node_id()) +#define boot_text_size boot_node_text_size(boot_numa_node_id()) + +#define boot_dup_data_phys_base \ + boot_node_dup_data_phys_base(boot_numa_node_id()) +#define boot_dup_data_virt_base \ + boot_node_dup_data_virt_base(boot_numa_node_id()) +#define boot_dup_data_size \ + boot_node_dup_data_size(boot_numa_node_id()) +#define boot_data_phys_base boot_node_data_phys_base(boot_numa_node_id()) +#define boot_data_virt_base boot_node_data_virt_base(boot_numa_node_id()) +#define boot_data_size boot_node_data_size(boot_numa_node_id()) +#endif /* ! CONFIG_NUMA */ + +#ifndef CONFIG_SMP +#define boot_boot_ps_phys_base boot_get_vo_value(kernel_bootmem.boot_ps.phys) +#define boot_boot_ps_virt_base boot_get_vo_value(kernel_bootmem.boot_ps.virt) +#define boot_boot_ps_size boot_get_vo_value(kernel_bootmem.boot_ps.size) +#define kernel_boot_ps_phys_base(cpuid) kernel_bootmem.boot_ps.phys +#define kernel_boot_ps_virt_base(cpuid) kernel_bootmem.boot_ps.virt +#define kernel_boot_ps_size(cpuid) kernel_bootmem.boot_ps.size +#else +#define boot_boot_ps_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_ps[boot_smp_processor_id()].phys) +#define boot_boot_ps_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_ps[boot_smp_processor_id()].virt) +#define boot_boot_ps_size \ + boot_get_vo_value(kernel_bootmem.boot_ps[boot_smp_processor_id()].size) +#define kernel_boot_ps_phys_base(cpuid) kernel_bootmem.boot_ps[cpuid].phys +#define kernel_boot_ps_virt_base(cpuid) kernel_bootmem.boot_ps[cpuid].virt +#define kernel_boot_ps_size(cpuid) kernel_bootmem.boot_ps[cpuid].size +#endif /* CONFIG_SMP */ + +#ifndef CONFIG_SMP +#define boot_boot_pcs_phys_base boot_get_vo_value(kernel_bootmem.boot_pcs.phys) +#define boot_boot_pcs_virt_base boot_get_vo_value(kernel_bootmem.boot_pcs.virt) +#define boot_boot_pcs_size boot_get_vo_value(kernel_bootmem.boot_pcs.size) +#define kernel_boot_pcs_phys_base(cpuid) kernel_bootmem.boot_pcs.phys +#define kernel_boot_pcs_virt_base(cpuid) kernel_bootmem.boot_pcs.virt +#define kernel_boot_pcs_size(cpuid) kernel_bootmem.boot_pcs.size +#else +#define boot_boot_pcs_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_pcs[boot_smp_processor_id()].phys) +#define boot_boot_pcs_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_pcs[boot_smp_processor_id()].virt) +#define boot_boot_pcs_size \ + boot_get_vo_value(kernel_bootmem.boot_pcs[boot_smp_processor_id()].size) +#define kernel_boot_pcs_phys_base(cpuid) \ + kernel_bootmem.boot_pcs[cpuid].phys +#define kernel_boot_pcs_virt_base(cpuid) \ + kernel_bootmem.boot_pcs[cpuid].virt +#define kernel_boot_pcs_size(cpuid) \ + kernel_bootmem.boot_pcs[cpuid].size +#endif /* CONFIG_SMP */ + +#ifndef CONFIG_SMP +#define boot_boot_stack_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_stack.phys) +#define boot_boot_stack_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_stack.virt) +#define boot_boot_stack_size \ + boot_get_vo_value(kernel_bootmem.boot_stack.size) +#define boot_boot_stack_phys_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack.phys_offset) +#define boot_boot_stack_virt_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack.virt_offset) + +#define kernel_boot_stack_phys_base(cpuid) kernel_bootmem.boot_stack.phys +#define kernel_boot_stack_virt_base(cpuid) kernel_bootmem.boot_stack.virt +#define kernel_boot_stack_virt_offset(cpuid) \ + kernel_bootmem.boot_stack.virt_offset +#define kernel_boot_stack_size(cpuid) kernel_bootmem.boot_stack.size +#else +#define boot_boot_stack_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + phys) +#define boot_boot_stack_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + virt) +#define boot_boot_stack_size \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + size) +#define boot_boot_stack_phys_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + phys_offset) +#define boot_boot_stack_virt_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + virt_offset) +#define kernel_boot_stack_phys_base(cpuid) \ + kernel_bootmem.boot_stack[cpuid].phys +#define kernel_boot_stack_virt_base(cpuid) \ + kernel_bootmem.boot_stack[cpuid].virt +#define kernel_boot_stack_virt_offset(cpuid) \ + kernel_bootmem.boot_stack[cpuid].virt_offset +#define kernel_boot_stack_size(cpuid) \ + kernel_bootmem.boot_stack[cpuid].size +#endif /* CONFIG_SMP */ + +#define boot_bootinfo_phys_base boot_get_vo_value(kernel_bootmem.bootinfo.phys) +#define boot_bootinfo_virt_base boot_get_vo_value(kernel_bootmem.bootinfo.virt) +#define boot_bootinfo_size boot_get_vo_value(kernel_bootmem.bootinfo.size) + +#define init_bootinfo_phys_base kernel_bootmem.bootinfo.phys +#define init_bootinfo_virt_base kernel_bootmem.bootinfo.virt +#define init_bootinfo_size kernel_bootmem.bootinfo.size + +#ifdef CONFIG_BLK_DEV_INITRD +#define boot_initrd_phys_base boot_get_vo_value(kernel_bootmem.initrd.phys) +#define boot_initrd_virt_base boot_get_vo_value(kernel_bootmem.initrd.virt) +#define boot_initrd_size boot_get_vo_value(kernel_bootmem.initrd.size) + +#define init_initrd_phys_base kernel_bootmem.initrd.phys +#define init_initrd_virt_base kernel_bootmem.initrd.virt +#define init_initrd_size kernel_bootmem.initrd.size +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_L_IO_APIC +#define boot_mpf_phys_base boot_get_vo_value(kernel_bootmem.mpf.phys) +#define boot_mpf_virt_base boot_get_vo_value(kernel_bootmem.mpf.virt) +#define boot_mpf_size boot_get_vo_value(kernel_bootmem.mpf.size) + +#define init_mpf_phys_base kernel_bootmem.mpf.phys +#define init_mpf_virt_base kernel_bootmem.mpf.virt +#define init_mpf_size kernel_bootmem.mpf.size + +#define boot_mpc_phys_base boot_get_vo_value(kernel_bootmem.mpc.phys) +#define boot_mpc_virt_base boot_get_vo_value(kernel_bootmem.mpc.virt) +#define boot_mpc_size boot_get_vo_value(kernel_bootmem.mpc.size) + +#define init_mpc_phys_base kernel_bootmem.mpc.phys +#define init_mpc_virt_base kernel_bootmem.mpc.virt +#define init_mpc_size kernel_bootmem.mpc.size +#endif /* CONFIG_L_IO_APIC */ + +#define boot_symtab_phys_base boot_get_vo_value(kernel_bootmem.symtab.phys) +#define boot_symtab_virt_base boot_get_vo_value(kernel_bootmem.symtab.virt) +#define boot_symtab_size boot_get_vo_value(kernel_bootmem.symtab.size) + +#define init_symtab_phys_base kernel_bootmem.symtab.phys +#define init_symtab_virt_base kernel_bootmem.symtab.virt +#define init_symtab_size kernel_bootmem.symtab.size + +#define boot_strtab_phys_base boot_get_vo_value(kernel_bootmem.strtab.phys) +#define boot_strtab_virt_base boot_get_vo_value(kernel_bootmem.strtab.virt) +#define boot_strtab_size boot_get_vo_value(kernel_bootmem.strtab.size) + +#define init_strtab_phys_base kernel_bootmem.strtab.phys +#define init_strtab_virt_base kernel_bootmem.strtab.virt +#define init_strtab_size kernel_bootmem.strtab.size + +#define boot_x86_hw_phys_base boot_get_vo_value(kernel_bootmem.x86_hw.phys) +#define boot_x86_hw_size boot_get_vo_value(kernel_bootmem.x86_hw.size) + +#define init_x86_hw_phys_base kernel_bootmem.x86_hw.phys +#define init_x86_hw_size kernel_bootmem.x86_hw.size + +extern unsigned long disable_caches; +extern unsigned long disable_secondary_caches; +extern unsigned long disable_IP; + +#ifdef CONFIG_NUMA +extern boot_spinlock_t __initdata boot_node_map_lock[MAX_NUMNODES]; +extern int __initdata node_mem_mapped[MAX_NUMNODES]; +#define boot_node_mem_mapped \ + boot_get_vo_value(node_mem_mapped[boot_numa_node_id()]) +#else /* ! CONFIG_NUMA */ +#define boot_node_map_lock SPIN_LOCK_UNLOCKED; +#define boot_node_mem_mapped 0 +#endif /* CONFIG_NUMA */ + +/* + * Forwards of functions of Virtual memory support initialization + */ + +extern void boot_mem_init(bool bsp, int cpuid, boot_info_t *boot_info, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus)); +extern int boot_native_loader_probe_memory( + node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock); +extern int boot_biosx86_probe_memory( + node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock); +extern e2k_size_t boot_native_get_bootblock_size(boot_info_t *bblock); +extern void boot_native_reserve_all_bootmem(bool bsp, boot_info_t *boot_info); +extern void boot_reserve_stacks(boot_info_t *boot_info); +extern void boot_reserve_kernel_image(bool bsp, boot_info_t *boot_info); +extern void boot_reserve_bootblock(bool bsp, boot_info_t *boot_info); +extern void boot_native_map_all_bootmem(bool bsp, boot_info_t *boot_info); +extern void boot_map_kernel_image(bool populate_on_host); +extern void boot_map_kernel_boot_stacks(void); +extern void boot_map_all_phys_memory(void); +extern void boot_map_all_bootinfo_areas(boot_info_t *boot_info); +extern void init_mem_term(int cpuid); +extern void boot_native_map_needful_to_equal_virt_area( + e2k_addr_t stack_top_addr); +extern void boot_native_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus)); +extern void __init_recv switch_to_phys(void (*restart_sequel_func)(int)); +extern void __init_recv switch_to_phys_end(void); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline int __init +boot_loader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + return boot_native_loader_probe_memory(nodes_phys_mem, bootblock); +} + +static inline e2k_size_t __init +boot_get_bootblock_size(boot_info_t *bootblock) +{ + return boot_native_get_bootblock_size(bootblock); +} + +static inline void __init +boot_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_native_reserve_all_bootmem(bsp, boot_info); +} + +static inline void __init +boot_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_native_map_all_bootmem(bsp, boot_info); +} + +static inline void __init_recv +boot_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + boot_native_map_needful_to_equal_virt_area(stack_top_addr); +} + +static inline void __init_recv +boot_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + boot_native_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +/* pv_ops does not used in native host/guest mode */ +static inline void native_pv_ops_to_boot_ops(void) +{ +} +static inline void native_boot_pv_ops_to_ops(void) +{ +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* !(__ASSEMBLY__) */ +#endif /* _E2K_P2V_BOOT_INIT_H */ diff --git a/arch/e2k/include/asm/p2v/boot_map.h b/arch/e2k/include/asm/p2v/boot_map.h new file mode 100644 index 000000000000..174715c4f96c --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_map.h @@ -0,0 +1,95 @@ +/* $Id: boot_map.h,v 1.5 2008/12/19 12:57:15 atic Exp $ + * + * boot-time mappings physical memory areas to viertual kernel space. + */ +#ifndef _E2K_P2V_BOOT_MAP_H +#define _E2K_P2V_BOOT_MAP_H + +#include +#include +#include +#include +#include +#include + +/* + * Structures to simulate TLB contents + */ + +typedef struct e2k_tlb_item { /* an item of TLB */ + e2k_addr_t virt_addr; /* virtual address - tag */ + int valid_bit; /* the item valid flag */ + int pt_level_id; /* page table level # to calculate */ + /* page size */ +} e2k_tlb_item_t; +typedef struct e2k_tlb_line { /* a line of TLB */ + e2k_tlb_item_t sets[NATIVE_TLB_SETS_NUM]; + int sets_num; /* number of valid entries in */ + /* the line */ +} e2k_tlb_line_t; +typedef struct e2k_tlb { /* all TLB */ + e2k_tlb_line_t lines[NATIVE_MAX_TLB_LINES_NUM]; + int entries_num; /* number of valid entries in */ + /* the TLB */ +} e2k_tlb_t; + +#ifndef CONFIG_SMP +extern e2k_tlb_t dtlb_contents; +extern e2k_tlb_t itlb_contents; +#define boot_dtlb_contents boot_vp_to_pp(&dtlb_contents) +#define boot_itlb_contents boot_vp_to_pp(&itlb_contents) +#else +extern e2k_tlb_t dtlb_contents[NR_CPUS]; +extern e2k_tlb_t itlb_contents[NR_CPUS]; +#define boot_dtlb_contents \ + boot_vp_to_pp(&dtlb_contents[boot_smp_processor_id()]) +#define boot_itlb_contents \ + boot_vp_to_pp(&itlb_contents[boot_smp_processor_id()]) +#endif /* CONFIG_SMP */ + +#define DTLB_ACCESS_MASK 0x01 +#define ITLB_ACCESS_MASK 0x02 +#define ALL_TLB_ACCESS_MASK (DTLB_ACCESS_MASK | ITLB_ACCESS_MASK) + +/* + * Forwards of boot-time functions to map physical areas to kernel virtual space + */ + +extern void boot_init_mapping(void); +#ifdef CONFIG_NUMA +extern void boot_node_init_mapping(void); +#endif /* CONFIG_NUMA */ + +/* Page Tables common structure interface's functions */ +extern pte_t * __init_recv +boot_get_double_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern pte_t * __init_recv +boot_get_common_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern void __init_recv +boot_set_double_pte(e2k_addr_t addr, pte_t *ptep, pte_t pte, bool host_map); +extern void __init_recv +boot_set_common_pte(e2k_addr_t addr, pte_t *ptep, pte_t pte, bool host_map); +extern pte_t * __init_recv +init_get_double_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern pte_t * __init_recv +init_get_common_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern void __init_recv +init_double_pte_clear(pte_t *ptep); +extern void __init_recv +init_common_pte_clear(pte_t *ptep); + +extern long boot_map_phys_area(e2k_addr_t phys_area_addr, + e2k_size_t phys_area_size, e2k_addr_t area_virt_addr, + pgprot_t prot_flags, e2k_size_t page_size, + bool ignore_busy, bool host_map); +extern long boot_do_map_phys_area(e2k_addr_t phys_area_addr, + e2k_size_t phys_area_size, e2k_addr_t area_virt_addr, + pgprot_t prot_flags, const pt_level_t *pt_level, + bool ignore_busy, bool host_map); +extern int boot_map_to_equal_virt_area(e2k_addr_t area_addr, + e2k_size_t area_size, + pgprot_t prot_flags, tlb_tag_t tlb_prot_flags, + e2k_size_t max_page_size, int tlb_mask, int va); +extern int init_clear_temporary_ptes(int tlb_mask, int cpuid); + +#endif /* _E2K_P2V_BOOT_MAP_H */ diff --git a/arch/e2k/include/asm/p2v/boot_mmu_context.h b/arch/e2k/include/asm/p2v/boot_mmu_context.h new file mode 100644 index 000000000000..0c22b129f16f --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_mmu_context.h @@ -0,0 +1,91 @@ +/* + * boot-time mmu_context.h support + */ + +#ifndef _E2K_P2V_BOOT_MMU_CONTEXT_H_ +#define _E2K_P2V_BOOT_MMU_CONTEXT_H_ + +#include + +#include +#include +#include +#include + +/* + * Set kernel MMU state + */ + +static inline void boot_native_set_kernel_MMU_state_before(void) +{ + e2k_addr_t root_base = MMU_KERNEL_PPTB; + + E2K_WAIT_ALL; + + BOOT_WRITE_OSCUIR_REG_VALUE(0); + BOOT_WRITE_OSCUTD_REG_VALUE((unsigned long) boot_kernel_CUT); + + if (MMU_IS_SEPARATE_PT()) { + e2k_core_mode_t core_mode; + +#ifndef CONFIG_PARAVIRT_GUEST + BUILD_BUG_ON(MMU_SEPARATE_KERNEL_VAB != PAGE_OFFSET); +#endif /* ! CONFIG_PARAVIRT_GUEST */ + BOOT_WRITE_MMU_OS_VPTB(MMU_SEPARATE_KERNEL_VPTB); + BOOT_WRITE_MMU_OS_PPTB(root_base); + BOOT_WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + /* set user PT to kernel PT too/ as initial state */ + BOOT_WRITE_MMU_U_VPTB(MMU_SEPARATE_USER_VPTB); + BOOT_WRITE_MMU_U_PPTB(root_base); + + /* + * How to enable separate virt spaces: + * 1) On phys. memory set OS_VAB = 0 + * 2) Set CORE_MODE.sep_virt_space = 1 + * 3) Enable virtual memory in MMU_CR + * 4) Jump out from short address by calling any function + * by its absolute virtual address + * 5) Set proper virtual OS_VAB + */ + BOOT_WRITE_MMU_OS_VAB(0UL); + core_mode.CORE_MODE_reg = BOOT_READ_CORE_MODE_REG_VALUE(); + core_mode.CORE_MODE_sep_virt_space = 1; + BOOT_WRITE_CORE_MODE_REG_VALUE(core_mode.CORE_MODE_reg); + } else { + BOOT_WRITE_MMU_U_VPTB(MMU_UNITED_KERNEL_VPTB); + BOOT_WRITE_MMU_U_PPTB(root_base); + BOOT_WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + } + E2K_WAIT_ALL; +} + +static inline void boot_native_set_kernel_MMU_state_after(void) +{ + E2K_WAIT_ALL; + BOOT_WRITE_OSCUTD_REG_VALUE((unsigned long) kernel_CUT); + if (MMU_IS_SEPARATE_PT()) { + BOOT_WRITE_MMU_OS_VAB(MMU_SEPARATE_KERNEL_VAB); + } + E2K_WAIT_ALL; +} + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* it is native host kernel with virtualization support */ + +static inline void boot_set_kernel_MMU_state_before(void) +{ + boot_native_set_kernel_MMU_state_before(); +} + +static inline void boot_set_kernel_MMU_state_after(void) +{ + boot_native_set_kernel_MMU_state_after(); +} + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_P2V_BOOT_MMU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/p2v/boot_param.h b/arch/e2k/include/asm/p2v/boot_param.h new file mode 100644 index 000000000000..c56be02867ec --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_param.h @@ -0,0 +1,107 @@ +/* + * Boot-time command line parsing. + * + * Copyright (C) 2011-2013 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#ifndef __E2K_P2V_BOOT_PARAM_H +#define __E2K_P2V_BOOT_PARAM_H + +#include + +#include + +#define _boot_ctype (boot_vp_to_pp((unsigned char *)_ctype)) +#define __boot_ismask(x) (_boot_ctype[(int)(unsigned char)(x)]) + +#define boot_isalnum(c) ((__boot_ismask(c)&(_U|_L|_D)) != 0) +#define boot_isalpha(c) ((__boot_ismask(c)&(_U|_L)) != 0) +#define boot_iscntrl(c) ((__boot_ismask(c)&(_C)) != 0) +#define boot_isdigit(c) ((__boot_ismask(c)&(_D)) != 0) +#define boot_isgraph(c) ((__boot_ismask(c)&(_P|_U|_L|_D)) != 0) +#define boot_islower(c) ((__boot_ismask(c)&(_L)) != 0) +#define boot_isprint(c) ((__boot_ismask(c)&(_P|_U|_L|_D|_SP)) != 0) +#define boot_ispunct(c) ((__boot_ismask(c)&(_P)) != 0) +/* Note: isspace() must return false for %NUL-terminator */ +#define boot_isspace(c) ((__boot_ismask(c)&(_S)) != 0) +#define boot_isupper(c) ((__boot_ismask(c)&(_U)) != 0) +#define boot_isxdigit(c) ((__boot_ismask(c)&(_D|_X)) != 0) + +/* Works only for digits and letters, but small and fast */ +#define BOOT_TOLOWER(x) ((x) | 0x20) + +/* + * Example of usage: + * + * int test = 0; + * ..... + * int boot_test(char *str) + * { + * boot_get_option(&str, boot_vp_to_pp(&test)); + * return 0; + * } + * + * boot_param("test", boot_test); + * ..... + * Function 'boot_test' would be called in case of kernel command line + * contains parameter 'test'. Input argument 'str' would point to the + * value of 'test' parameter. + */ + +typedef struct boot_kernel_param { + const char *str; + int (*setup_func)(char *); +} boot_kernel_param_t; + +extern boot_kernel_param_t __boot_setup_start[], __boot_setup_end[]; + +/* + * Only for really core code. See moduleparam.h for the normal way. + * + * Force the alignment so the compiler doesn't space elements of the + * boot_kernel_param "array" too far apart in .boot.setup. + */ +#define __boot_setup_param(str, unique_id, fn) \ + static const char __boot_setup_str_##unique_id[] __initconst \ + __aligned(1) = str; \ + static struct boot_kernel_param __boot_setup_##unique_id \ + __used __section(.boot.setup) \ + __attribute__((aligned((sizeof(long))))) \ + = { __boot_setup_str_##unique_id, fn } + +#define boot_param(str, fn) \ + __boot_setup_param(str, fn, fn) + +char* boot_skip_spaces(const char *str); +int boot_get_option(char **str, int *pint); +long long boot_simple_strtoll(const char *cp, char **endp, unsigned int base); +long boot_simple_strtol(const char *cp, char **endp, unsigned int base); +unsigned long boot_simple_strtoul( + const char *cp, char **endp, unsigned int base); +unsigned long long boot_simple_strtoull( + const char *cp, char **endp, unsigned int base); +void boot_native_parse_param(bootblock_struct_t *bootblock); + +struct kernel_param; + +extern char saved_boot_cmdline[]; +#define boot_saved_boot_cmdline \ + boot_vp_to_pp((char *)saved_boot_cmdline) + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline void +boot_parse_param(bootblock_struct_t *bootblock) +{ + boot_native_parse_param(bootblock); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __E2K_P2V_BOOT_PARAM_H */ diff --git a/arch/e2k/include/asm/p2v/boot_phys.h b/arch/e2k/include/asm/p2v/boot_phys.h new file mode 100644 index 000000000000..80192251a44b --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_phys.h @@ -0,0 +1,275 @@ +/* $Id: boot_phys.h,v 1.5 2009/06/29 11:53:06 atic Exp $ + * + * Simple boot-time physical memory accounting and allocator. + * Discontiguous memory supports on physical memory banks level. + */ + +#ifndef _E2K_P2V_BOOT_PHYS_H +#define _E2K_P2V_BOOT_PHYS_H + +#include +#include +#include + +#include +#include +#include + +/* + * The structure 'boot_phys_bank_t' is the same as common kernel structure + * 'e2k_phys_bank_t' (see 'page.h' header). This structure is physical memory + * bank specifier and is used to hold the boot-time physical memory + * configuration of the machine. + * The array 'boot_phys_banks[]' contains base addresses and sizes of all + * physical memory banks. + * To reduce the boot-time map size, the boot map represents only needed + * to boot tasks first 'BOOT_MAX_PHYS_MEM_SIZE' bytes of real physical memory + * configuration. Creation of full physical memory map can be completed later, + * when virtual memory support will be ready. + */ + +typedef e2k_mem_map_t boot_mem_map_t; /* The same as common map */ + /* item : double-word */ + /* (64 bits == 64 pages) */ +typedef e2k_phys_bank_t boot_phys_bank_t; /* the same as common */ + /* memory bank structure */ +typedef node_phys_mem_t boot_phys_mem_t; /* The same as common */ + /* structure */ + +#define boot_phys_mem nodes_phys_mem /* The same as common banks */ + /* array */ + +#ifndef CONFIG_NUMA +#define BOOT_MAX_CPU_PHYS_MEM_SIZE (16UL * (1024 * 1024)) /* 16 Mbytes */ +/* some memory reserved by BIOS */ +#define BOOT_MAX_BIOS_PHYS_MEM_SIZE (16UL * (1024 * 1024)) /* 16 Mbytes */ + +#ifndef CONFIG_RECOVERY +#define BOOT_MAX_PHYS_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * NR_CPUS) +#else /* CONFIG_RECOVERY */ +#define BOOT_MAX_PHYS_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * NR_CPUS + \ + BOOT_MAX_BIOS_PHYS_MEM_SIZE) +#endif /* ! CONFIG_RECOVERY */ + +#else /* CONFIG_NUMA */ + +#define BOOT_MAX_CPU_PHYS_MEM_SIZE (16 * (1024 * 1024)) /* 16 Mbytes */ +/* some memory reserved by BIOS */ +#define BOOT_MAX_BIOS_PHYS_MEM_SIZE (16 * (1024 * 1024)) /* 16 Mbytes */ + +#ifndef CONFIG_RECOVERY +#define BOOT_MAX_NODE_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * MAX_NODE_CPUS) +#else /* CONFIG_RECOVERY */ +#define BOOT_MAX_NODE_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * MAX_NODE_CPUS + \ + BOOT_MAX_BIOS_PHYS_MEM_SIZE) +#endif /* ! CONFIG_RECOVERY */ + +typedef struct boot_node_mem_map { + boot_mem_map_t bitmap[(1UL * BOOT_MAX_NODE_MEM_SIZE * + L_MAX_NODE_PHYS_BANKS / PAGE_SIZE + + (sizeof(boot_mem_map_t) * 8 - 1)) / + (sizeof(boot_mem_map_t) * 8) + + L_MAX_NODE_PHYS_BANKS]; +} boot_node_mem_map_t; + +#endif /* ! CONFIG_NUMA */ + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + +# define LOW_MEMORY_ENABLED_DEFAULT true + +extern bool low_memory_enabled; +# define boot_low_memory_enabled boot_get_vo_value(low_memory_enabled) +# define LOW_MEMORY_ENABLED() low_memory_enabled +# define BOOT_LOW_MEMORY_ENABLED() boot_low_memory_enabled +# define BOOT_SET_LOW_MEMORY_ENABLED() (BOOT_LOW_MEMORY_ENABLED() = true) +# define BOOT_LOW_MEMORY_DISABLED() !boot_low_memory_enabled +# define BOOT_SET_LOW_MEMORY_DISABLED() (BOOT_LOW_MEMORY_ENABLED() = false) + +extern void * __init_recv boot_pa_to_high_pa(void *lo_pa, + boot_info_t *boot_info); +extern bool __init boot_has_lo_bank_remap_to_hi(boot_phys_bank_t *phys_bank, + boot_info_t *bootblock); +extern void __init boot_remap_low_memory(bool bsp, boot_info_t *boot_info); + +#else /* ! CONFIG_ONLY_HIGH_PHYS_MEM */ + +# define LOW_MEMORY_ENABLED() true +# define BOOT_LOW_MEMORY_ENABLED() true +# define BOOT_SET_LOW_MEMORY_ENABLED() +# define BOOT_SET_LOW_MEMORY_DISABLED() + +static inline __init_recv void * +boot_pa_to_high_pa(void *lo_pa, boot_info_t *boot_info) +{ + /* nothing convertion of physical addresses is need */ + return lo_pa; +} +static inline __init bool +boot_has_lo_bank_remap_to_hi(boot_phys_bank_t *phys_bank, + boot_info_t *bootblock) +{ + return false; +} + +static inline __init void +boot_reserve_low_memory(boot_info_t *boot_info) +{ + /* low physical memory can be used in full measure, */ + /* so it does not need to reserve preliminarily some areas */ +} + +static inline __init void +boot_remap_low_memory(bool bsp, boot_info_t *boot_info) +{ + /* low and high memory can be used */ + /* so nothing remapping from one to other need */ +} +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + +static inline __init void * +boot_pa_end_to_high(void *lo_pa_end, boot_info_t *boot_info) +{ + /* end address of bank can be start address of next, */ + /* so transform end address to last address into a bank */ + return boot_pa_to_high_pa(lo_pa_end - 1, boot_info) + 1; +} + +/* + * Forwards of functions to allocate boot-time physical memory + */ + +extern e2k_size_t boot_do_create_physmem_maps(boot_info_t *bootblock, + bool create); +static inline __init e2k_size_t +boot_create_physmem_maps(boot_info_t *bootblock) +{ + return boot_do_create_physmem_maps(bootblock, true); +} +static inline __init e2k_size_t +boot_update_physmem_maps(boot_info_t *bootblock) +{ + return boot_do_create_physmem_maps(bootblock, false); +} +extern short __init_recv boot_init_new_phys_bank(int node, + node_phys_mem_t *node_mem, + e2k_addr_t bank_start, e2k_size_t bank_size); +extern short __init boot_create_new_phys_bank(int node, + node_phys_mem_t *node_mem, + e2k_addr_t bank_start, e2k_size_t bank_size); +extern void __init_recv boot_add_new_phys_bank(int node, + node_phys_mem_t *node_mem, + e2k_phys_bank_t *new_phys_bank, + short new_bank_ind); +extern short __init_recv boot_delete_phys_bank_part(int node_id, + node_phys_mem_t *node_mem, + short bank, e2k_phys_bank_t *phys_bank, + e2k_addr_t from_addr, e2k_addr_t to_addr); +extern short __init boot_create_phys_bank_part(int node_id, + node_phys_mem_t *node_mem, + short bank, e2k_phys_bank_t *phys_bank, + e2k_addr_t from_addr, e2k_addr_t to_addr); + +/* reserved memory flags (see following function arguments) */ +#define BOOT_NOT_IGNORE_BUSY_BANK 0x0000 /* area cannot intersect with */ + /* any other areas while */ + /* is reserving */ +#define BOOT_IGNORE_BUSY_BANK 0x0001 /* area can intersect with */ + /* other such areas while */ + /* is reserving */ +#define BOOT_IGNORE_BANK_NOT_FOUND 0x0002 /* area can point to pages */ + /* outside of present banks */ +#define BOOT_CAN_BE_INTERSECTIONS 0x0004 /* area can intersect with */ + /* other such areas after */ + /* was reserved */ +#define BOOT_DELETE_PHYS_MEM 0x0008 /* delete area from available */ + /* physical memory */ +#define BOOT_ONLY_LOW_PHYS_MEM 0x0010 /* area can be always only */ + /* at low memory ( < 2**32) */ +#define BOOT_IGNORE_AT_HIGH_PHYS_MEM 0x0020 /* it does not need remap */ + /* area from low to high */ + /* physical memory */ +#define BOOT_EXCLUDE_AT_HIGH_PHYS_MEM 0x0040 /* area should be mapped */ + /* only at low memory */ + /* and excluded from high */ + /* physical memory */ +#define BOOT_RESERVED_TO_FREE_PHYS_MEM 0x0080 /* area is reserved to free */ + /* while bootmem freeing */ +#define BOOT_ONLY_HIGH_PHYS_MEM 0x0100 /* area should be always */ + /* only at high memory */ +#define BOOT_FIRST_HIGH_PHYS_MEM 0x0200 /* area should be preferably */ + /* at high memory */ +#define BOOT_ONLY_ON_NODE_ALLOC_MEM 0x1000 /* allocate memory only on */ + /* the specified node */ +#define BOOT_IS_TRY_ALLOC_MEM 0x2000 /* it is try to allocate, */ + /* so miss is not BUG */ +#define BOOT_MERGEABLE_ALLOC_MEM 0x4000 /* allocated area can be */ + /* merged with other areas */ + /* if memory type is equal */ + +extern bool __init_recv boot_has_node_low_memory(int node, + boot_info_t *bootblock); +extern bool __init boot_has_node_high_memory(int node, boot_info_t *bootblock); +extern bool __init_recv boot_has_high_memory(boot_info_t *bootblock); + +extern int boot_reserve_physmem(e2k_addr_t virt_phys_addr, + e2k_size_t mem_size, busy_mem_type_t mem_type, + unsigned short flags); +extern int boot_delete_physmem(e2k_addr_t virt_phys_addr, + e2k_size_t mem_size); +extern void __init boot_rereserve_bank_area(int node_id, + boot_phys_mem_t *node_mem, + short bank, short new_bank, + short area, e2k_busy_mem_t *busy_area); +extern void *boot_alloc_node_mem(int node_id, e2k_size_t mem_size, + e2k_size_t align, e2k_size_t page_size, + busy_mem_type_t mem_type, unsigned short flags); + +#ifndef CONFIG_NUMA +#define boot_alloc_phys_mem(mem_size, align, type) \ + boot_alloc_node_mem(0, (mem_size), (align), \ + PAGE_SIZE, (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_MERGEABLE_ALLOC_MEM | \ + BOOT_FIRST_HIGH_PHYS_MEM) +#define boot_node_alloc_physmem(node_id, mem_size, align, type) \ + boot_alloc_phys_mem(mem_size, align, type) + +#else /* CONFIG_NUMA */ +#define boot_node_alloc_physmem(node_id, mem_size, align, type) \ + boot_alloc_node_mem((node_id), (mem_size), (align), \ + PAGE_SIZE, (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_MERGEABLE_ALLOC_MEM | \ + BOOT_FIRST_HIGH_PHYS_MEM) +#define boot_node_alloc_large_physpages(node_id, mem_size, align, type, flags) \ + boot_alloc_node_mem((node_id), (mem_size), (align), \ + BOOT_E2K_LARGE_PAGE_SIZE, (type), (flags)) +#define boot_alloc_phys_mem(mem_size, align, type) \ + boot_node_alloc_physmem(boot_numa_node_id(), \ + (mem_size), (align), (type)) +#endif /* ! CONFIG_NUMA */ + +#define boot_node_try_alloc_low_mem(mem_size, align, page_size, type) \ + boot_alloc_node_mem(boot_numa_node_id(), (mem_size), \ + (align), (page_size), (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_ONLY_ON_NODE_ALLOC_MEM | \ + BOOT_ONLY_LOW_PHYS_MEM | \ + BOOT_EXCLUDE_AT_HIGH_PHYS_MEM | \ + BOOT_RESERVED_TO_FREE_PHYS_MEM | \ + BOOT_IS_TRY_ALLOC_MEM) + +#define boot_the_node_try_alloc_pages(node_id, mem_size, page_size, type) \ + boot_alloc_node_mem((node_id), (mem_size), (page_size), \ + (page_size), (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_ONLY_ON_NODE_ALLOC_MEM | \ + BOOT_FIRST_HIGH_PHYS_MEM | \ + BOOT_MERGEABLE_ALLOC_MEM | \ + BOOT_IS_TRY_ALLOC_MEM) + +extern long boot_map_physmem(pgprot_t prot_flags, e2k_size_t max_page_size); +extern void boot_expand_phys_banks_reserved_areas(void); + +#endif /* _E2K_P2V_BOOT_PHYS_H */ diff --git a/arch/e2k/include/asm/p2v/boot_smp.h b/arch/e2k/include/asm/p2v/boot_smp.h new file mode 100644 index 000000000000..4a861ee63579 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_smp.h @@ -0,0 +1,528 @@ +/* $Id: boot_smp.h,v 1.11 2008/06/11 20:02:07 atic Exp $ + * + * Heading of SMP boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_P2V_BOOT_SMP_H +#define _E2K_P2V_BOOT_SMP_H + +#include + +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * Atomic operations for boot-time initialization + */ + +#define boot_mb() mb() + +#define boot_atomic_read(value_p) \ + atomic_read((atomic_t *)boot_vp_to_pp(value_p)) +#define boot_atomic_set(value_p, count) \ + atomic_set((atomic_t *)boot_vp_to_pp(value_p), count) +#define boot_atomic_inc(value_p) \ + atomic_inc((atomic_t *)boot_vp_to_pp(value_p)) +#define boot_atomic_dec(value_p) \ + atomic_dec((atomic_t *)boot_vp_to_pp(value_p)) +#define boot_atomic_inc_return(value_p) \ + atomic_inc_return((atomic_t *)boot_vp_to_pp(value_p)) + +/* + * Current CPU logical # and total number of active CPUs + */ +extern atomic_t boot_cpucount; +#define boot_smp_get_processor_id() \ +({ \ + int cpu_id = boot_early_pic_read_id(); \ + boot_atomic_inc(&boot_cpucount); \ + cpu_id; \ +}) +#define boot_smp_processors_num() \ + boot_atomic_read(&boot_cpucount) +#define init_smp_processors_num() \ + atomic_read(&boot_cpucount) +#define boot_reset_smp_processors_num() \ + boot_atomic_set(&boot_cpucount, 0) +#define init_reset_smp_processors_num() \ + atomic_set(&boot_cpucount, 0) +#define boot_set_smp_processors_num(num) \ + boot_atomic_set(&boot_cpucount, num) +#define init_set_smp_processors_num(num) \ + atomic_set(&boot_cpucount, num) + +/* + * Special system register 'OSR0' is used to hold logical processor number + * while boot-time initialization. + * Later this register will be used to hold pointer to 'current' task structure + */ + +#define boot_smp_set_processor_id(cpuid) \ + boot_set_current_thread_info(cpuid) +#define boot_smp_processor_id() \ +({ \ + long cpuid = (long)boot_current_thread_info(); \ + \ + if (cpuid >= BOOT_TASK_SIZE) \ + cpuid = raw_smp_processor_id(); \ + cpuid; \ +}) + +#ifdef CONFIG_SMP +#define BOOT_IS_BSP(__bsp) (__bsp) +#define INIT_IS_BSP(__bsp) (__bsp) +#else /* ! CONFIG_SMP */ +#define BOOT_IS_BSP(__bsp) true +#define INIT_IS_BSP(__bsp) true +#endif /* CONFIG_SMP */ + +/* + * Simple IRQ save/restore operations for SMP boot-time initialization + */ + +#define boot_raw_local_irq_restore(x) BOOT_UPSR_RESTORE(x) +#define boot_raw_local_irq_disable() BOOT_UPSR_ALL_CLI() +#define boot_raw_local_save_flags(x) BOOT_UPSR_SAVE(x) +#define boot_raw_irqs_disabled_flags(x) __raw_irqs_disabled_flags(x) +#define boot_raw_local_irq_enable() BOOT_UPSR_ALL_STI() +#define boot_raw_local_irq_save(x) BOOT_UPSR_ALL_SAVE_AND_CLI(x) + +/* + * Simple spin lock operations for SMP boot-time initialization + */ + +#ifdef CONFIG_SMP + +# include + +# define boot_spin_trylock(lock) arch_boot_spin_trylock(boot_vp_to_pp(lock) +# define boot_spin_lock(lock) arch_boot_spin_lock(boot_vp_to_pp(lock)) +# define boot_spin_unlock(lock) arch_boot_spin_unlock(boot_vp_to_pp(lock)) +# define init_spin_trylock(lock) arch_boot_spin_trylock(lock) +# define init_spin_lock(lock) arch_boot_spin_lock(lock) +# define init_spin_unlock(lock) arch_boot_spin_unlock(lock) +#else /* ! CONFIG_SMP */ +# define boot_spin_trylock(lock_p) +# define boot_spin_lock(lock_p) +# define boot_spin_unlock(lock_p) +# define init_spin_trylock(lock_p) +# define init_spin_lock(lock_p) +# define init_spin_unlock(lock_p) +#endif /* CONFIG_SMP */ + +#define boot_spin_lock_irqsave(lock_p, flags) \ +({ \ + boot_raw_local_irq_save(flags); \ + boot_spin_lock(lock_p); \ +}) + +#define boot_spin_unlock_irqrestore(lock_p, flags) \ +({ \ + boot_spin_unlock(lock_p); \ + boot_raw_local_irq_restore(flags); \ +}) + +/* + * Simple spin lock operations for the CPU node boot-time initialization + */ + +#define boot_node_spin_trylock(lock_p) \ + boot_spin_trylock(&lock_p[boot_numa_node_id()]) +#define boot_node_spin_lock(lock_p) \ + boot_spin_lock(&lock_p[boot_numa_node_id()]) +#define boot_node_spin_unlock(lock_p) \ + boot_spin_unlock(&lock_p[boot_numa_node_id()]) + +#define boot_dup_node_spin_trylock(lock_p) \ + boot_spin_trylock(&lock_p[boot_my_node_dup_kernel_nid]) +#define boot_dup_node_spin_lock(lock_p) \ + boot_spin_lock(&lock_p[boot_my_node_dup_kernel_nid]) +#define boot_dup_node_spin_unlock(lock_p) \ + boot_spin_unlock(&lock_p[boot_my_node_dup_kernel_nid]) + +#define init_dup_node_spin_trylock(lock_p) \ + init_spin_trylock(&lock_p[init_my_node_dup_kernel_nid]) +#define init_dup_node_spin_lock(lock_p) \ + init_spin_lock(&lock_p[init_my_node_dup_kernel_nid]) +#define init_dup_node_spin_unlock(lock_p) \ + init_spin_unlock(&lock_p[init_my_node_dup_kernel_nid]) + +/* + * Simple event maintenance for boot-time initialization + */ + +#define boot_wait_for_event(event_p) \ +({ \ + atomic_t *error_flag_p = boot_vp_to_pp(&boot_error_flag); \ + while (!boot_atomic_read(event_p)) { \ + if (unlikely(atomic_read(error_flag_p))) { \ + BOOT_BUG_POINT(__func__); \ + BOOT_BUG("detected BOOT ERROR FLAG while " \ + "wait for event\n"); \ + } \ + boot_mb(); \ + } \ +}) +#define boot_read_event(event_p) \ + boot_atomic_read(event_p) +#define boot_set_event(event_p) \ + boot_atomic_set(event_p, 1) +#define boot_reset_event(event_p) \ + boot_atomic_set(event_p, 0) +#define boot_wait_for_boot_event(boot_event_p, error_flag_p) \ +({ \ + while (!atomic_read(boot_event_p)) { \ + if (unlikely(atomic_read(error_flag_p))) { \ + BOOT_BUG_POINT(__func__); \ + BOOT_BUG("detected BOOT ERROR FLAG while " \ + "wait for event\n"); \ + } \ + boot_mb(); \ + } \ +}) +#define boot_read_boot_event(boot_event_p) atomic_read(boot_event_p) +#define boot_set_boot_event(boot_event_p) atomic_set(boot_event_p, 1) +#define boot_reset_boot_event(boot_event_p) atomic_set(boot_event_p, 0) + +/* + * Physical number and map of live CPUs passed by loader/BIOS through + * bootinfo structure + */ + +extern int phys_cpu_present_num; /* number of present CPUs */ + /* (passed by BIOS thru */ + /* MP table) */ +extern int cpu_to_sync_num; /* real number of CPUs to make */ + /* sinchronization */ + +#define boot_set_phys_cpu(cpuid, mask) physid_set(cpuid, mask) +#define boot_test_phys_cpu(cpuid, mask) physid_isset(cpuid, mask) + +#define boot_phys_cpu_present_map_p boot_vp_to_pp(&phys_cpu_present_map) + +#define boot_set_phys_cpu_present(cpu) \ + boot_set_phys_cpu(cpu, *boot_phys_cpu_present_map_p) +#define boot_phys_cpu_present(cpu) \ + boot_test_phys_cpu(cpu, *boot_phys_cpu_present_map_p) + +#define boot_phys_cpu_present_num boot_get_vo_value(phys_cpu_present_num) +#ifdef CONFIG_SMP +# define boot_cpu_to_sync_num boot_get_vo_value(cpu_to_sync_num) +#else +# define boot_cpu_to_sync_num 0 +#endif + +#ifdef CONFIG_NUMA +extern atomic_t early_node_has_dup_kernel_num; + +#define boot_physid_to_cpu_mask(physid_mask_p) \ +({ \ + cpumask_t cpu_mask; \ + bitmap_copy(cpumask_bits(&cpu_mask), physid_mask_p->bits, \ + nr_cpumask_bits); \ + cpu_mask; \ +}) + +#define boot_node_to_cpumask(node) \ +({ \ + cpumask_t cpumask; \ + cpumask_t node_cpumask; \ + cpumask_t boot_main_cpu_mask = boot_physid_to_cpu_mask( \ + boot_phys_cpu_present_map_p); \ + bitmap_fill(cpumask_bits(&cpumask), boot_machine.nr_node_cpus); \ + cpumask_shift_left(&node_cpumask, (const cpumask_t *)&cpumask, \ + node * boot_machine.max_nr_node_cpus); \ + cpumask_and(&cpumask, &node_cpumask, &boot_main_cpu_mask); \ + cpumask; \ +}) + +#define boot___apicid_to_node boot_get_vo_value(__apicid_to_node) + +#define boot_cpu_to_node(cpu) ((cpu) / boot_machine.max_nr_node_cpus) +#define boot_numa_node_id() boot_cpu_to_node(boot_smp_processor_id()) +#define BOOT_BS_NODE_ID (0) +#define BOOT_IS_BS_NODE (boot_numa_node_id() == BOOT_BS_NODE_ID) + +#define boot_node_is_online(node) \ + (boot_phys_nodes_map & (1 << (node))) +#define boot_node_has_online_mem(nid) \ + (boot_nodes_phys_mem[nid].pfns_num != 0) + +#define boot_early_node_has_dup_kernel_from(node_from) \ +({ \ + int node = (node_from); \ + while (node < MAX_NUMNODES && \ + !BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(node)) { \ + node ++; \ + } \ + node; \ +}) +#define boot_early_next_node_has_dup_kernel(node_prev) \ + boot_early_node_has_dup_kernel_from((node_prev) + 1) +#define boot_node_has_dup_kernel_from(node_from) \ +({ \ + int node = (node_from); \ + while (node < MAX_NUMNODES && \ + !boot_the_node_has_dup_kernel(boot_numa_node_id(), \ + node)) { \ + node ++; \ + } \ + node; \ +}) +#define boot_next_node_has_dup_kernel(node_prev) \ +({ \ + int node_from = (node_prev) + 1; \ + boot_node_has_dup_kernel_from(node_from); \ +}) +#define boot_node_has_not_dup_kernel_from(node_from) \ +({ \ + int node = (node_from); \ + while (node < MAX_NUMNODES && (!boot_node_is_online(node) || \ + boot_the_node_has_dup_kernel(boot_numa_node_id(), \ + node))) { \ + node ++; \ + } \ + node; \ +}) +#define boot_next_node_has_not_dup_kernel(node_prev) \ +({ \ + int node_from = (node_prev) + 1; \ + boot_node_has_not_dup_kernel_from(node_from); \ +}) + +/* + * Get a next node which has own duplicated kernel image + * We start from the follow node and search in direct of increasing + * node number. If there is not more nodes, we start new search from + * node #1 and only at last we take node #0 so same algorithm is used + * while building zone lists on each node (see mm/page_alloc.c) + */ +#define boot_early_get_next_node_has_dup_kernel(node_prev) \ +({ \ + int node_next = boot_early_next_node_has_dup_kernel(node_prev); \ + if (node_next >= MAX_NUMNODES) { \ + node_next = boot_early_next_node_has_dup_kernel(0); \ + if (node_next >= MAX_NUMNODES) { \ + node_next = 0; /* BS node */ \ + } \ + } \ + node_next; \ +}) + +#define boot_for_each_node_has_online_mem(node) \ + for ((node) = 0, \ + ({while ((node) < MAX_NUMNODES && \ + !boot_node_has_online_mem(node))\ + (node) ++;}); \ + (node) < MAX_NUMNODES; \ + ({ (node) ++; while ((node) < MAX_NUMNODES && \ + !boot_node_has_online_mem(node))\ + (node) ++;})) + +#define boot_for_each_node_has_dup_kernel(node) \ + for ((node) = boot_node_has_dup_kernel_from(0); \ + (node) < MAX_NUMNODES; \ + (node) = boot_next_node_has_dup_kernel(node)) + +#define boot_for_each_node_has_not_dup_kernel(node) \ + for ((node) = boot_node_has_not_dup_kernel_from(0); \ + (node) < MAX_NUMNODES; \ + (node) = boot_next_node_has_not_dup_kernel(node)) + +#define boot_for_each_cpu(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next((cpu), (mask)), \ + (cpu) < NR_CPUS;) + +#define boot_for_each_online_cpu_of_node(node, cpu, cpu_mask) \ + cpu_mask = boot_node_to_cpumask(node); \ + boot_for_each_cpu(cpu, &cpu_mask) + +/* + * Next variables, arrays, structures have own copy on each nodes + */ +/* map of nodes which have duplicated kernel image and own page tables */ +#define boot_the_node_has_dup_kernel_map(nid) \ + boot_the_node_get_vo_value(nid, node_has_dup_kernel_map) +#define boot_the_node_has_dup_kernel(nid_where, nid_which) \ + boot_test_bit(nid_which, \ + boot_the_node_vp_to_pp(nid_where, \ + &(node_has_dup_kernel_map))) +#define boot_node_has_dup_kernel() \ + boot_the_node_has_dup_kernel(boot_numa_node_id(), \ + boot_numa_node_id()) +#define boot_the_node_set_has_dup_kernel(nid_where, nid_which) \ + boot_set_bit(nid_which, \ + boot_the_node_vp_to_pp(nid_where, \ + &(node_has_dup_kernel_map))) + +/* number of nodes which have duplicated kernel image and own page tables */ +#define boot_early_node_has_dup_kernel_num \ + boot_get_vo_value(early_node_has_dup_kernel_num) +#define boot_the_node_has_dup_kernel_num(nid) \ + boot_the_node_get_vo_value(nid, node_has_dup_kernel_num) +#define BOOT_EARLY_THERE_IS_DUP_KERNEL \ + boot_atomic_read(&boot_early_node_has_dup_kernel_num) +#define BOOT_EARLY_DUP_KERNEL_NUM \ + (boot_atomic_read(&boot_early_node_has_dup_kernel_num) + 1) +#define BOOT_THE_NODE_THERE_IS_DUP_KERNEL(nid) \ + boot_atomic_read(&boot_the_node_has_dup_kernel_num(nid)) +#define BOOT_THE_NODE_DUP_KERNEL_NUM(nid) \ + (boot_atomic_read(&boot_the_node_has_dup_kernel_num(nid)) + 1) +#define BOOT_NODE_THERE_IS_DUP_KERNEL() \ + BOOT_THE_NODE_THERE_IS_DUP_KERNEL(boot_numa_node_id()) +#define BOOT_NODE_DUP_KERNEL_NUM() \ + BOOT_THE_NODE_DUP_KERNEL_NUM(boot_numa_node_id()) + +/* array of node ID on which this node has kernel image and use page table */ +/* if the node has own copy of the kernel then node ID is own ID */ +/* if the node has not own copy of image and page table then node ID is */ +/* ID of node on which it use kernel image and page table (now in this case */ +/* node ID of BS NODE) */ +#define boot_the_node_dup_kernel_nid(nid) \ + ((int *)(boot_the_node_vp_to_pp(nid, \ + all_nodes_dup_kernel_nid))) +#define boot_dup_kernel_nid \ + boot_the_node_dup_kernel_nid(boot_numa_node_id()) +#define boot_node_dup_kernel_nid(node) \ + (boot_dup_kernel_nid[node]) +#define boot_my_node_dup_kernel_nid \ + boot_node_dup_kernel_nid(boot_numa_node_id()) +#define init_my_node_dup_kernel_nid \ + node_dup_kernel_nid(init_numa_node_id()) + +/* array of pointers to pg_dir (root page table) on each node */ +#define boot_the_node_pg_dir(nid) \ + ((pgd_t **)(boot_the_node_vp_to_pp(nid, \ + all_nodes_pg_dir))) +#define boot_node_pg_dir \ + boot_the_node_pg_dir(boot_numa_node_id()) + +#define init_cpu_to_node(cpu) ((cpu) / machine.max_nr_node_cpus) +#define init_numa_node_id() init_cpu_to_node(boot_early_pic_read_id()) +#else /* ! CONFIG_NUMA */ +#define BOOT_IS_BS_NODE 1 +#define boot_numa_node_id() 0 +#define boot_for_each_node_has_dup_kernel(node) \ + for ((node) = 0, (node) < 1; (node) ++) +#define boot_node_has_online_mem(nid) 1 + +#define init_numa_node_id() 0 +#endif /* CONFIG_NUMA */ + +extern void boot_setup_smp_cpu_config(boot_info_t *boot_info); + +/* + * Flag of error occured while boot-time initialization + */ + +extern atomic_t boot_error_flag; + +/* + * Synchronize all active processors at the specified point while boot-time + * initialization + */ + +#define BOOT_NO_ERROR_FLAG 0 + +#ifdef CONFIG_VIRTUALIZATION +#include /* to redefine synchronization times */ +#endif /* CONFIG_VIRTUALIZATION */ + +/* + * number of iterations of waiting for completion of synchronization + */ +#ifndef BOOT_WAITING_FOR_SYNC_ITER +#define BOOT_WAITING_FOR_SYNC_ITER (1000 * NR_CPUS) +#endif /* ! BOOT_WAITING_FOR_SYNC_ITER */ + +/* + * number of loops in each iteration of waiting for + * synchronization completion + */ + +#ifndef BOOT_WAITING_FOR_SYNC_LOOPS +#if defined(CONFIG_MEMLIMIT) && defined(CONFIG_EXT_MEMLIMIT) +#define BOOT_WAITING_FOR_SYNC_LOOPS (NR_CPUS * 64 * \ + (CONFIG_MEMLIMIT+CONFIG_EXT_MEMLIMIT)) +#else +#define BOOT_WAITING_FOR_SYNC_LOOPS (NR_CPUS * 16000) +#endif +#endif /* ! BOOT_WAITING_FOR_SYNC_LOOPS */ + +#ifdef CONFIG_SMP +typedef union cpu_sync_count { + atomic_t num_arrived; + u64 pad; +} cpu_sync_count_t; + +extern void __boot_sync_all_processors(atomic_t *num_arrived); +extern void __init_sync_all_processors(atomic_t *num_arrived, int cpus_to_sync); + +extern cpu_sync_count_t __cacheline_aligned_in_smp num_arrived; +# define boot_sync_all_processors() \ +do { \ + __boot_sync_all_processors(&num_arrived.num_arrived); \ +} while (0) + +/* number of CPUs arrived to sync while boot-time init completion */ +extern cpu_sync_count_t __cacheline_aligned_in_smp init_num_arrived; +# define init_sync_all_processors(cpus) \ +do { \ + __init_sync_all_processors(&init_num_arrived.num_arrived, cpus); \ +} while (0) +#else +# define boot_sync_all_processors() do { } while (0) +# define init_sync_all_processors(cpus) do { } while (0) +#endif + +extern int boot_native_smp_cpu_config(boot_info_t *bootblock); +extern int boot_biosx86_smp_cpu_config(boot_info_t *bootblock); +extern void boot_native_smp_node_config(boot_info_t *bootblock); +extern void boot_biosx86_smp_node_config(boot_info_t *bootblock); + +static inline void boot_native_cpu_relax(void) +{ + /* nothing to do */ +} + +#ifdef CONFIG_RECOVERY +extern void boot_recover_smp_cpu_config(boot_info_t *boot_info); +#endif /* CONFIG_RECOVERY */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline e2k_size_t __init +boot_smp_cpu_config(boot_info_t *bootblock) +{ + return boot_native_smp_cpu_config(bootblock); +} +static inline void __init +boot_smp_node_config(boot_info_t *bootblock) +{ + boot_native_smp_node_config(bootblock); +} +static inline void +boot_cpu_relax(void) +{ + boot_native_cpu_relax(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* !(__ASSEMBLY__) */ +#endif /* !(_E2K_P2V_BOOT_SMP_H) */ diff --git a/arch/e2k/include/asm/p2v/boot_spinlock.h b/arch/e2k/include/asm/p2v/boot_spinlock.h new file mode 100644 index 000000000000..199e7be4b6bc --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_spinlock.h @@ -0,0 +1,97 @@ +#pragma once + +#include +#include + + +# if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or native guest kernel */ +# include +# define arch_boot_spin_lock_slow(lock) \ + kvm_arch_boot_spin_lock_slow((lock)) +# define arch_boot_spin_locked_slow(lock) \ + kvm_arch_boot_spin_locked_slow((lock)) +# else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +# define arch_boot_spin_lock_slow(lock) do { } while (0) +# define arch_boot_spin_locked_slow(lock) do { } while (0) +# endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + + +static inline void boot_native_spin_unlock_wait(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + u16 next; + + val.lock = READ_ONCE(lock->lock); + + if (likely(val.head == val.tail)) + return; + + next = val.tail; + + do { + val.lock = READ_ONCE(lock->lock); + } while (val.head != val.tail && ((s16) (next - val.head) > 0)); +} + +static inline int boot_native_spin_is_locked(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + + val.lock = READ_ONCE(lock->lock); + + return val.head != val.tail; +} + +static __always_inline int boot_native_spin_value_unlocked(boot_spinlock_t lock) +{ + return lock.head == lock.tail; +} + +static inline int boot_native_spin_is_contended(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + + val.lock = READ_ONCE(lock->lock); + + return val.tail - val.head > 1; +} + +static inline int arch_boot_spin_trylock(boot_spinlock_t *lock) +{ + return __api_atomic_ticket_trylock(&lock->lock, + BOOT_SPINLOCK_TAIL_SHIFT); +} + +static inline void arch_boot_spin_lock(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + u16 ticket, ready; + + /* Tail must be in the high 16 bits, otherwise this atomic + * addition will corrupt head. */ + val.lock = __api_atomic32_add_oldval_lock(1 << BOOT_SPINLOCK_TAIL_SHIFT, + &lock->lock); + ticket = val.tail; + ready = val.head; + + if (likely(ticket == ready)) + return; + + do { + arch_boot_spin_lock_slow(lock); + } while (unlikely(ticket != (ready = READ_ONCE(lock->head)))); + + arch_boot_spin_locked_slow(lock); +} + +#ifndef arch_boot_spin_unlock +#define arch_boot_spin_unlock arch_boot_spin_unlock +static inline void arch_boot_spin_unlock(boot_spinlock_t *lock) +{ + smp_store_release(&lock->head, lock->head + 1); +} +#endif diff --git a/arch/e2k/include/asm/p2v/boot_spinlock_types.h b/arch/e2k/include/asm/p2v/boot_spinlock_types.h new file mode 100644 index 000000000000..f2115c2759d8 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_spinlock_types.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +#define BOOT_SPINLOCK_HEAD_SHIFT 0 +#define BOOT_SPINLOCK_TAIL_SHIFT 16 +typedef union { + u32 lock; + struct { + u16 head; + u16 tail; + }; +} boot_spinlock_t; + +#define __BOOT_SPIN_LOCK_UNLOCKED (boot_spinlock_t) { .lock = 0 } diff --git a/arch/e2k/include/asm/p2v/boot_v2p.h b/arch/e2k/include/asm/p2v/boot_v2p.h new file mode 100644 index 000000000000..ca7f5381a5fd --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_v2p.h @@ -0,0 +1,297 @@ +/* + * + * Heading of boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_P2V_BOOT_V2P_H +#define _E2K_P2V_BOOT_V2P_H + +#include +#include + +#include +#include +#include + +#define EOS_RAM_BASE_LABEL _data +#define KERNEL_START_LABEL _start /* start label of Linux Image */ +#define KERNEL_END_LABEL _end /* end label of Linux Image */ + +#ifdef __ASSEMBLY__ + +#define KERNEL_BASE [KERNEL_START_LABEL] /* virtual address of Linux */ + /* Image begining */ +#define KERNEL_END [KERNEL_END_LABEL] /* virtual address of Linux */ + /* Image end */ +#define EOS_RAM_BASE [EOS_RAM_BASE_LABEL] + +#else /* !(__ASSEMBLY__) */ + +#define EOS_RAM_BASE ((e2k_addr_t)&EOS_RAM_BASE_LABEL) + +#define KERNEL_BASE ((e2k_addr_t)&KERNEL_START_LABEL) +#define KERNEL_END ((e2k_addr_t)&KERNEL_END_LABEL) + +#define HIGH_PHYS_MEM_SHIFT 32 /* above 2**32 */ +#define HIGH_PHYS_MEM_BASE (1UL << HIGH_PHYS_MEM_SHIFT) +#define LOW_PHYS_MEM_MASK ((1UL << HIGH_PHYS_MEM_SHIFT) - 1) + +static inline bool +is_addr_from_low_memory(e2k_addr_t addr) +{ + return (addr < HIGH_PHYS_MEM_BASE) ? true : false; +} + +static inline bool +is_addr_from_high_memory(e2k_addr_t addr) +{ + return (addr >= HIGH_PHYS_MEM_BASE) ? true : false; +} + +/* + * Convert virtual address of pointer of global or static variable, array, + * structure, string or other item of linux image to the consistent physical + * address of one, while booting process is in the progress and virtual memory + * support is not yet ready. + * Linker loads Linux image to a virtual space and all enumerated above items + * have virtual addresses into the image. BIOS loader loads image to the + * some existing area of physical memory, virtual addressing is off and direct + * access to the items is impossible. + * Loader should write pointer of image text segment location in the physical + * memory to the 'OSCUD' register: + * OSCUD.OSCUD_base + * OSCUD.OSCUD_size + * and pointer of image data & bss segments location in the physical memory + * to the 'OSGD' register: + * OSGD.OSGD_base + * OSGD.OSGD_size + * These areas can intersect. + * If some item of the image (see above) is located into the text, data or + * bss segment, then to access it on absolute address (pointer) you should + * call this function to convert absolute virtual address to real physical + * address. + * + * Example: + * + * char boot_buf[81]; + * int boot_buf_size = 80; + * ....... + * void + * xxx_func() + * { + * char *buf = (char *)boot_va_to_pa((void *)boot_buf); + * int buf_size = *((int *)boot_va_to_pa( + * (e2k_addr_t)&boot_buf_size)); + * ....... + * } + * + * NOTE !!!!! It is rather to use the macroses defined below to access image + * objects instead of this function. The mocroses have more convenient + * interfaces + */ + +static inline void * +boot_native_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + unsigned long os_base; + + os_base = NATIVE_READ_OSCUD_LO_REG_VALUE() & OSCUD_lo_base_mask; + if (os_base >= NATIVE_KERNEL_VIRTUAL_SPACE_BASE) { + return virt_pnt; + } else if ((e2k_addr_t)virt_pnt >= KERNEL_BASE) { + if (kernel_base == -1) + kernel_base = os_base; + return (void *)(kernel_base + + ((e2k_addr_t)virt_pnt - KERNEL_BASE)); + } else { + return virt_pnt; + } +} + +static inline void * +boot_native_va_to_pa(void *virt_pnt) +{ + return boot_native_kernel_va_to_pa(virt_pnt, -1); +} + +static inline void * +boot_native_func_to_pa(void *virt_pnt) +{ + return boot_native_va_to_pa(virt_pnt); +} + +/* + * In some case kernel boot-time physical address can be appropriate virtual + * one. For example KVM guest kernel booting on physical memory mapped + * to virtual space with PAGE_OFFSET + * So it needs convert a virtual physical address to real physical. + * Native kernel booting on real physical memory, so convertion does not need + */ +static inline e2k_addr_t +boot_native_vpa_to_pa(e2k_addr_t vpa) +{ + return vpa; +} +static inline e2k_addr_t +boot_native_pa_to_vpa(e2k_addr_t pa) +{ + return pa; +} +static inline e2k_addr_t +native_vpa_to_pa(e2k_addr_t vpa) +{ + return vpa; +} +static inline e2k_addr_t +native_pa_to_vpa(e2k_addr_t pa) +{ + return pa; +} + +/* + * Convert pointer of global or static variable, array, structure, string or + * other item of linux image, which is located into the virtual linux text, + * data or bss segment to the consistent pointer with physical address of + * object, while booting process is in the progress and virtual memory + * support is not yet ready. + * See comments above ('boot_va_to_pa()' function declaration). + * + * Example of usage: + * + * char boot_buf[81]; + * + * ....... + * void + * xxx_func() + * { + * char *buf = boot_vp_to_pp(boot_buf); + * + * ....... + * } + */ + +#define boot_native_vp_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_native_va_to_pa((void *)(virt_pnt))) +#define boot_native_func_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_native_va_to_pa((void *)(virt_pnt))) +#define boot_vp_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_va_to_pa((void *)(virt_pnt))) +#define boot_func_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_func_to_pa((void *)(virt_pnt))) + +/* + * Get value of object (variable, array, structure, string or other item of + * linux image) which is located into the virtual linux text, data or bss + * segment, while booting process is in the progress and virtual memory support + * is not yet ready. + * See comments above ('boot_va_to_pa()' function declaration). + * + * Example of usage: + * + * static long *boot_long_p; + * int boot_buf_size = 80; + * + * ....... + * void + * xxx_func() + * { + * int buf_size = boot_get_vo_value(boot_buf_size); + * long *long_p = boot_get_vo_value(boot_long_p); + * + * long_p[0] = buf_size; + * ....... + * } + */ + +#define boot_native_get_vo_value(virt_value_name) \ + (*(boot_native_vp_to_pp(&virt_value_name))) +#define boot_get_vo_value(virt_value_name) \ + (*(boot_vp_to_pp(&virt_value_name))) + +/* + * Get name of object (variable, array, structure, string or other item of + * linux image) which is located into the virtual linux text, data or bss + * segment, while booting process is in the progress and virtual memory support + * is not yet ready. This name can be used to assign a value to the object. + * See comments above ('boot_va_to_pa()' function declaration). + * + * Example of usage: + * + * static int boot_memory_size; + * + * ....... + * void + * xxx_func() + * { + * int total_memory_size = 0; + * + * ....... + * boot_get_vo_name(boot_memory_size) = total_memory_size; + * ....... + * } + */ + +#define boot_native_get_vo_name(virt_value_name) \ + (*(typeof(virt_value_name) *)boot_native_vp_to_pp( \ + &virt_value_name)) +#define boot_get_vo_name(virt_value_name) \ + (*(typeof(virt_value_name) *)boot_vp_to_pp( \ + &virt_value_name)) + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ + +static inline void * +boot_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return boot_native_kernel_va_to_pa(virt_pnt, kernel_base); +} + +static inline void * +boot_func_to_pa(void *virt_pnt) +{ + return boot_native_va_to_pa(virt_pnt); +} + +static inline void * +boot_va_to_pa(void *virt_pnt) +{ + return boot_native_va_to_pa(virt_pnt); +} + +static inline e2k_addr_t +boot_vpa_to_pa(e2k_addr_t vpa) +{ + return boot_native_vpa_to_pa(vpa); +} + +static inline e2k_addr_t +boot_pa_to_vpa(e2k_addr_t pa) +{ + return boot_native_pa_to_vpa(pa); +} + +static inline e2k_addr_t +vpa_to_pa(e2k_addr_t vpa) +{ + return native_vpa_to_pa(vpa); +} + +static inline e2k_addr_t +pa_to_vpa(e2k_addr_t pa) +{ + return native_pa_to_vpa(pa); +} +#endif /* CONFIG_PARAVIRT_GUEST */ +#endif /* __ASSEMBLY__ */ + +#endif /* !(_E2K_P2V_BOOT_V2P_H) */ diff --git a/arch/e2k/include/asm/p2v/io.h b/arch/e2k/include/asm/p2v/io.h new file mode 100644 index 000000000000..67a00363d8ee --- /dev/null +++ b/arch/e2k/include/asm/p2v/io.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#define BOOT_E2K_X86_IO_PAGE_SIZE (boot_cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE) +#define BOOT_X86_IO_AREA_PHYS_BASE (boot_machine.x86_io_area_base) +#define BOOT_X86_IO_AREA_PHYS_SIZE (boot_machine.x86_io_area_size) + +static inline void boot_native_writeb(u8 b, void __iomem *addr) +{ + NATIVE_WRITE_MAS_B((e2k_addr_t)addr, b, MAS_IOADDR); +} + +static inline void boot_native_writew(u16 w, void __iomem *addr) +{ + NATIVE_WRITE_MAS_H((e2k_addr_t)addr, w, MAS_IOADDR); +} + +static inline void boot_native_writel(u32 l, void __iomem *addr) +{ + NATIVE_WRITE_MAS_W((e2k_addr_t)addr, l, MAS_IOADDR); +} + +static inline void boot_native_writeq(u64 q, void __iomem *addr) +{ + NATIVE_WRITE_MAS_D((e2k_addr_t)addr, q, MAS_IOADDR); +} + +static inline u8 boot_native_readb(void __iomem *addr) +{ + return NATIVE_READ_MAS_B((e2k_addr_t)addr, MAS_IOADDR); +} + +static inline u16 boot_native_readw(void __iomem *addr) +{ + return NATIVE_READ_MAS_H((e2k_addr_t)addr, MAS_IOADDR); +} + +static inline u32 boot_native_readl(void __iomem *addr) +{ + return NATIVE_READ_MAS_W((e2k_addr_t)addr, MAS_IOADDR); +} + +static inline u64 boot_native_readq(void __iomem *addr) +{ + return NATIVE_READ_MAS_D((e2k_addr_t)addr, MAS_IOADDR); +} + +//TODO seems like these are unused, probably should delete them + +static inline void boot_native_outb(u8 byte, u16 port) +{ + NATIVE_WRITE_MAS_B(BOOT_X86_IO_AREA_PHYS_BASE + port, byte, MAS_IOADDR); +} +static inline u8 boot_native_inb(u16 port) +{ + return (u8) NATIVE_READ_MAS_B(BOOT_X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); +} +static inline u32 boot_native_inl(u16 port) +{ + return (u32) NATIVE_READ_MAS_W(BOOT_X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); +} diff --git a/arch/e2k/include/asm/page.h b/arch/e2k/include/asm/page.h new file mode 100644 index 000000000000..27f44a426047 --- /dev/null +++ b/arch/e2k/include/asm/page.h @@ -0,0 +1,431 @@ +/* $Id: page.h,v 1.41 2009/07/24 12:02:54 kravtsunov_e Exp $ + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PAGE_H +#define _E2K_PAGE_H + +#include + +#define IOREMAP_MAX_ORDER PMD_SHIFT +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA + +#define E2K_4K_PAGE_SHIFT 12 /* 4 KBytes page */ +#define E2K_2M_PAGE_SHIFT 21 /* 2 MBytes page */ +#define E2K_4M_PAGE_SHIFT 22 /* 4 MBytes page */ + +#define E2K_SMALL_PAGE_SHIFT E2K_4K_PAGE_SHIFT /* 4 KBytes page */ + +/* + * large page: 4MBytes for E2C+ and 2MBytes for others + */ + +#define E2K_LARGE_PAGE_SHIFT \ + (!cpu_has(CPU_FEAT_ISET_V3) ? E2K_4M_PAGE_SHIFT : E2K_2M_PAGE_SHIFT) + +#define BOOT_E2K_LARGE_PAGE_SHIFT \ + (!boot_cpu_has(CPU_FEAT_ISET_V3) ? E2K_4M_PAGE_SHIFT : E2K_2M_PAGE_SHIFT) + +/* 4 KBytes page */ +#define PAGE_SHIFT E2K_SMALL_PAGE_SHIFT + +/* large page */ +#define LARGE_PAGE_SHIFT E2K_LARGE_PAGE_SHIFT + +#ifndef __ASSEMBLY__ +#define E2K_2M_PAGE_SIZE (1UL << E2K_2M_PAGE_SHIFT) +#define E2K_4M_PAGE_SIZE (1UL << E2K_4M_PAGE_SHIFT) +#endif /* !(__ASSEMBLY__) */ + +#define E2K_SMALL_PAGE_SIZE (1 << E2K_SMALL_PAGE_SHIFT) + +#define E2K_LARGE_PAGE_SIZE (1 << E2K_LARGE_PAGE_SHIFT) +#define BOOT_E2K_LARGE_PAGE_SIZE (1 << BOOT_E2K_LARGE_PAGE_SHIFT) + +#define PAGE_SIZE _BITUL(PAGE_SHIFT) +#define LARGE_PAGE_SIZE E2K_LARGE_PAGE_SIZE + +#if defined(CONFIG_CPU_ES2) +# define E2K_MAX_PAGE_SIZE (1 << E2K_4M_PAGE_SHIFT) +#else +# define E2K_MAX_PAGE_SIZE (1 << E2K_2M_PAGE_SHIFT) +#endif + +#define E2K_SMALL_PAGE_MASK (~(E2K_SMALL_PAGE_SIZE - 1)) + +#define E2K_LARGE_PAGE_MASK (~(E2K_LARGE_PAGE_SIZE - 1)) +#define BOOT_E2K_LARGE_PAGE_MASK (~(BOOT_E2K_LARGE_PAGE_SIZE - 1)) + +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define LARGE_PAGE_MASK E2K_LARGE_PAGE_MASK + +#define HPAGE_SHIFT E2K_LARGE_PAGE_SHIFT +#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HPAGE_PMD_MAX_ORDER (E2K_4M_PAGE_SHIFT - PAGE_SHIFT) + + +#ifdef __KERNEL__ + +#include +#include + +#include +#include +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#endif /* !(__ASSEMBLY__) */ + + +/* + * To align the pointer to the (next) page boundary or to the beginning of + * the page + */ + +/* + * Round up the beginning of the address. + */ +#define _PAGE_ALIGN_UP(addr, page_size) \ + ((e2k_addr_t)(addr) & ~(page_size-1)) + +/* + * Round down the end of the address. + */ +#define _PAGE_ALIGN_DOWN(addr, page_size) \ + (((e2k_addr_t)(addr) + (page_size-1)) & ~(page_size-1)) + +#define E2K_SMALL_PAGE_ALIGN_DOWN(addr) \ + _PAGE_ALIGN_DOWN(addr, E2K_SMALL_PAGE_SIZE) +#define E2K_SMALL_PAGE_ALIGN_UP(addr) \ + _PAGE_ALIGN_UP(addr, E2K_SMALL_PAGE_SIZE) +#define E2K_SMALL_PAGE_ALIGN(addr) E2K_SMALL_PAGE_ALIGN_DOWN(addr) + +#define E2K_LARGE_PAGE_ALIGN_DOWN(addr) \ + _PAGE_ALIGN_DOWN(addr, E2K_LARGE_PAGE_SIZE) +#define E2K_LARGE_PAGE_ALIGN_UP(addr) \ + _PAGE_ALIGN_UP(addr, E2K_LARGE_PAGE_SIZE) +#define E2K_LARGE_PAGE_ALIGN(addr) E2K_LARGE_PAGE_ALIGN_DOWN(addr) + + +#define PAGE_ALIGN_DOWN(addr) _PAGE_ALIGN_DOWN(addr, PAGE_SIZE) +#define PAGE_ALIGN_UP(addr) _PAGE_ALIGN_UP(addr, PAGE_SIZE) +#define LARGE_PAGE_ALIGN_DOWN(addr) _PAGE_ALIGN_DOWN(addr, LARGE_PAGE_SIZE) +#define LARGE_PAGE_ALIGN_UP(addr) _PAGE_ALIGN_UP(addr, LARGE_PAGE_SIZE) + +#define E2K_ALIGN_SIZE_UP(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_UP(addr, ((size)-1))) +#define E2K_ALIGN_SIZE_DOWN(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_DOWN(addr, ((size)-1))) +#define E2K_ALIGN_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK(addr, ((size)-1))) + +#define ALIGN_MASK_UP(addr, mask) ((addr) & ~(mask)) +#define ALIGN_MASK_DOWN(addr, mask) (((addr) + (mask)) & ~(mask)) +#define ALIGN_TO_MASK(addr, mask) ALIGN_MASK_DOWN(addr, mask) +#define ALIGN_SIZE_UP(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_UP(addr, ((size)-1))) +#define ALIGN_SIZE_DOWN(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_DOWN(addr, ((size)-1))) +#define ALIGN_TO_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_TO_MASK(addr, ((size)-1))) + +#ifndef __ASSEMBLY__ + +#define CLEAR_MEMORY_TAG ETAGNUM /* memory filling mode: zeroing */ +/* #define CLEAR_MEMORY_TAG ETAGEWD / * memory filling mode: emptying */ + +/* + * A _lot_ of the kernel time is spent clearing pages, so + * do this as fast as it possibly can. + * + * #95931: try to keep small pages in cache, but avoid cache trashing + * when clearing huge pages. + */ + +#define clear_memory_8(addr, size, tag) \ + fast_tagged_memory_set(addr, 0, tag, size, \ + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT) + +#define clear_page(addr) clear_memory_8((addr), PAGE_SIZE, CLEAR_MEMORY_TAG) + +#define clear_user_page(addr, vaddr, page) \ +do { \ + u64 strd_opcode; \ + /* Use WC stores to clear huge pages. \ + * e4c does not have shared L3 so cacheable stores are not _that_ \ + * bad and it also has hardware bug which forces to issue memory \ + * barrier after WC stores, so we avoid WC there. */ \ + if (!IS_MACHINE_E2S && PageCompound(page)) { \ + strd_opcode = LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_BYPASS_ALL_CACHES << LDST_REC_OPC_MAS_SHIFT; \ + } else { \ + strd_opcode = LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT; \ + } \ + fast_tagged_memory_set((addr), 0, CLEAR_MEMORY_TAG, \ + PAGE_SIZE, strd_opcode); \ +} while (0) + +#define clear_user_highpage(page, vaddr) \ + clear_user_page(page_address(page), (vaddr), (page)) + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +#define copy_user_highpage(to, from, vaddr, vma) \ + copy_page(page_address(to), page_address(from)) + +#define copy_tagged_page(to, from) __tagged_memcpy_8(to, from, PAGE_SIZE) + +#define copy_page(to, from) copy_tagged_page(to, from) +#define copy_user_page(to, from, vaddr, page) copy_page(to, from) + +typedef struct page *pgtable_t; + +#define __pa(x) ((e2k_addr_t)(x) - PAGE_OFFSET) +#define __va(x) ((void *)((e2k_addr_t) (x) + PAGE_OFFSET)) +#define __boot_pa(x) ((e2k_addr_t)(x) - BOOT_PAGE_OFFSET) +#define __boot_va(x) ((void *)((e2k_addr_t) (x) + BOOT_PAGE_OFFSET)) + +#define __pa_symbol(x) vpa_to_pa(kernel_va_to_pa((unsigned long) (x))) + +/* + * PFNs are real physical page numbers. However, mem_map only begins to record + * per-page information starting at pfn_base. + * This is to handle systems where the first physical page in the machine + * is not 0. + */ + +struct page; + +extern struct page *e2k_virt_to_page(const void *kaddr); + +#define phys_to_page(kaddr) pfn_to_page((kaddr) >> PAGE_SHIFT) +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define virt_to_page(kaddr) \ + (((e2k_addr_t)(kaddr) >= PAGE_OFFSET && \ + (e2k_addr_t)(kaddr) < PAGE_OFFSET + MAX_PM_SIZE) ? \ + phys_to_page(__pa(kaddr)) \ + : \ + e2k_virt_to_page((void *) (kaddr))) + + +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_to_phys __pa +#define phys_to_virt __va + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#define page_valid(page) pfn_valid(page_to_pfn(page)) +#define phys_addr_valid(addr) pfn_valid((addr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) ((e2k_addr_t)(kaddr) >= PAGE_OFFSET && \ + pfn_valid(virt_to_pfn(kaddr))) +#define kern_addr_valid(kaddr) page_valid(virt_to_page(kaddr)) + +#define boot_pa(x) ((BOOT_READ_OSCUD_LO_REG().OSCUD_lo_base >= \ + PAGE_OFFSET) \ + ? \ + __pa(x) : (void *)(boot_pa_to_vpa(x))) +#define boot_va(x) ((BOOT_READ_OSCUD_LO_REG().OSCUD_lo_base >= \ + PAGE_OFFSET) \ + ? \ + __va(x) : (void *)(boot_pa_to_vpa(x))) + +/* + * E2K architecture additional vm_flags + */ + +#define VM_HW_STACK_PS 0x00100000000UL /* procedure stack area */ +#define VM_TAGMAPPED 0x00200000000UL /* the tags area appropriate */ + /* to this data VM area was mapped */ +#define VM_HW_STACK_PCS 0x00400000000UL /* chain stack area */ +#define VM_WRITECOMBINED 0x00800000000UL +#define VM_PRIVILEGED 0x04000000000UL /* pages are privileged */ +#define VM_MPDMA 0x10000000000UL /* pages are under MPDMA */ + /* hardware protection */ +#define VM_SIGNAL_STACK 0x20000000000UL /* Signal stack area */ +#define VM_CUI 0xffff000000000000UL /* CUI for pages in VMA */ +#define VM_CUI_SHIFT 48 + +/* + * E2K architecture additional gup_flags + */ +#define FOLL_MPDMA 0x01000000 /* page is not writable only for DMA */ + +/* + * We don't set the valid bit for PROT_NONE areas, otherwise + * semi-speculative loads will cause page faults which is bad + * for performance when such loads come from an unrolled loop. + */ +#define VM_PAGESVALID (VM_READ | VM_WRITE | VM_EXEC) + +/* + * The following structure is used to hold the physical memory configuration + * of the machine. This is filled in 'boot_probe_memory()' and is later + * used by 'boot_mem_init()' to setup boot-time memory map and by 'mem_init()' + * to set up 'mem_map[]'. + * A base address of a bank should be page aligned. + * The structure item 'mem_bitmap' is a map pointer. The map bits represent + * the physical memory on the bank in terms of small pages (4 KB). + * To reduce the boot-time map size, the boot map represents only needed + * to boot tasks first 'BOOT_MAX_PHYS_MEM_SIZE' bytes of real physical memory + * configuration. Some of memory areas are prereserved ('text' & 'data' + * segments, stack, boot information etc) and have been allocated by BIOS or + * boot loaders. All these areas are known and listed in the header + * 'asm/boot_init.h' Such area can be allocated on any physical address and + * can be out of the boot map, which represents reserved memory + * The structure 'e2k_busy_mem_t' represents occupied memory areas in a bank, + * which can not be described by the boot map. + * Array of 'E2K_MAX_PHYS_BANKS' of these structures is statically allocated + * into the kernel image. + * The entry after the last valid one has 'pages_num == 0'. + */ + +typedef unsigned long e2k_mem_map_t; /* double-word (64 bits == 64 pages) */ + +typedef enum busy_mem_type { + undefined_mem_type = 0, /* unknown data area */ + boot_loader_mem_type, /* data and binary of boot loader */ + kernel_image_mem_type, /* kernel image (text & data) */ + data_stack_mem_type, /* local data stack */ + hw_stack_mem_type, /* hardware procedure or chain stack */ + page_table_mem_type, /* page tables */ + kernel_data_mem_type, /* kernel data/structures/tables */ + /* and other areas of memory */ + bootblock_mem_type, /* boot block (common data with boot */ + /* loader) */ + boot_time_data_mem_type, /* kernel data & structures allocated */ + /* while boot-time init */ + hw_reserved_mem_type, /* reserved for hardware purposes */ + /* (for example: bugs workaround) */ + hw_stripped_mem_type, /* hardware stripped out physical */ + /* memory (IO, ROM, VGA ...) */ + dma32_mem_type, /* low memory reserved for DMA and */ + /* bounce buffers */ +} busy_mem_type_t; + +typedef struct e2k_busy_mem { + e2k_size_t start_page; /* start page # of an area in a bank */ + e2k_size_t pages_num; /* number of occupied pages by the */ + /* area in the bank */ + short next; /* index of next area (last = -1) */ + /* busy areas is ordered list */ + /* on increase of addresses */ + unsigned short flags; /* boot-time busy area flags */ + /* (see asm/boot_phys.h) */ + busy_mem_type_t type; /* memory type */ +} e2k_busy_mem_t; + +#define BOOT_RESERVED_AREAS_SIZE (2 * PAGE_SIZE) + +/* max number of prereserved areas at boot-time */ +#define BOOT_MAX_PRERESERVED_AREAS \ + (1 + /* 0-page (hardware bug workaround) */ \ + 3 * NR_CPUS + /* 3 stacks (data/procedure/chain) */ \ + /* on each CPU */ \ + 1 + /* kernel image 'text' section */ \ + 1 + /* kernel image 'protexted text' */ \ + 1 + /* kernel '.data..ro_after_init' */ \ + 1 + /* kernel 'data/bss' */ \ + 1 + /* kernel 'init' */ \ + 1 + /* low IO memory (VGA memory) */ \ + 1 + /* bootblock */ \ + 1 + /* INITRD */ \ + 1 + /* mp table */ \ + 1 + /* MP floating table */ \ + 1 * L_MAX_BUSY_AREAS + /* boot loader busy memory */ \ + 1 + /* list of all occupied areas */ \ + 0) + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM +/* max number of prereserved areas at low physical memory */ +#define MAX_PRERESERVED_LOW_AREAS 2 +#else /* ! CONFIG_ONLY_HIGH_PHYS_MEM */ +#define MAX_PRERESERVED_LOW_AREAS 0 /* none such areas */ +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + +#define E2K_MAX_PRERESERVED_AREAS \ + (BOOT_MAX_PRERESERVED_AREAS + MAX_PRERESERVED_LOW_AREAS) + +typedef struct e2k_phys_bank { + e2k_addr_t base_addr; /* base physical address of the start */ + /* page of the bank */ + e2k_size_t pages_num; /* total number of pages in the bank */ + bool maybe_remapped_to_hi; /* the low bank can be remapped to */ + /* high range bank */ + short next; /* index of next bank (last = -1) */ + /* node bnaks is ordered list */ + /* on increase of addresses */ + atomic64_t free_pages_num; /* current number of free pages */ + /* in the map */ + short busy_areas_num; /* number of areas in the list of */ + /* occupied areas in the bank */ + short first_area; /* index of first busy area */ + e2k_busy_mem_t busy_areas_prereserved[E2K_MAX_PRERESERVED_AREAS]; + /* list of all occupied areas in the */ + /* bank, which not included to the */ + /* memory bitmap */ + e2k_busy_mem_t *busy_areas; /* pointer to list of all occupied */ + /* areas in the bank */ + bool mapped[L_MAX_MEM_NUMNODES]; + /* the bank was already mapped */ + /* on node [node_id] */ +} e2k_phys_bank_t; + +typedef struct node_phys_mem { + e2k_size_t start_pfn; /* start page number on the node */ + e2k_size_t pfns_num; /* number of pages on the node */ + /* including holes between banks */ + short banks_num; /* total number of banks in the list */ + short first_bank; /* index of first bank on the list */ + /* (starts from start_pfn) */ + e2k_phys_bank_t banks[L_MAX_NODE_PHYS_BANKS]; +} node_phys_mem_t; + +extern node_phys_mem_t nodes_phys_mem[L_MAX_MEM_NUMNODES]; + +#define boot_nodes_phys_mem \ + (boot_vp_to_pp((node_phys_mem_t *)nodes_phys_mem)) + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#ifndef CONFIG_E2K_HAS_OPT_BITOPS +#include +#else +#include +static inline int get_order(unsigned long size) +{ + int lz = 0; + + size = (size - 1) >> PAGE_SHIFT; + lz = E2K_LZCNTD(size); + + return BITS_PER_LONG - lz; +} +#endif /* CONFIG_E2K_HAS_OPT_BITOPS */ + +struct task_struct; + +extern e2k_addr_t node_kernel_address_to_phys(int node, e2k_addr_t address); +extern e2k_addr_t user_address_to_pva(struct task_struct *tsk, + e2k_addr_t address); +extern e2k_addr_t kernel_address_to_pva(e2k_addr_t address); + +#define ARCH_ZONE_DMA_BITS 32 + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(__KERNEL__) */ + +#include + +#endif /* !(_E2K_PAGE_H) */ diff --git a/arch/e2k/include/asm/page_io.h b/arch/e2k/include/asm/page_io.h new file mode 100644 index 000000000000..75d88f4a2b52 --- /dev/null +++ b/arch/e2k/include/asm/page_io.h @@ -0,0 +1,53 @@ +/* $Id: page_io.h,v 1.6 2007/09/05 12:05:52 kostin Exp $ + * + */ + +#ifndef _E2K_PAGE_IO_H +#define _E2K_PAGE_IO_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +#undef DEBUG_TAG_MODE +#undef DebugTM +#define DEBUG_TAG_MODE 0 /* Tag memory */ +#define DebugTM(...) DebugPrint(DEBUG_TAG_MODE ,##__VA_ARGS__) + + +#define TAGS_BITS_PER_LONG 4 +#define TAGS_BYTES_PER_PAGE (PAGE_SIZE / sizeof(long) * \ + TAGS_BITS_PER_LONG / 8) + +#define TAGS_PAGES 0xfff +#define TAGS_READ_PAGES 0xff + +#define TAGS_PAGES_FOR_COMPRESS 0xff +#define TAGS_READ_PAGES_FOR_COMPRESS 0xff + +struct tags_swap_page_table { + struct page **pages; + struct page **read_pages; + int index; /* last used page */ + int index_read; /* last used page for readpage */ + int size[2]; + spinlock_t lock_pages; + spinlock_t lock_read_pages; +}; + +extern void tags_swap_init(unsigned type, unsigned long *map); +extern void e2k_remove_swap(struct swap_info_struct *sis); +extern void restore_tags_for_data(u64 *, u8 *); +extern u32 save_tags_from_data(u64 *, u8 *); +extern void get_page_with_tags(u8 *, u8 *, int *); +extern u8 *alloc_page_with_tags(void); +extern void free_page_with_tags(u8 *); +extern int check_tags(unsigned type, unsigned long beg, unsigned long end); + +#endif //_E2K_PAGE_IO_H diff --git a/arch/e2k/include/asm/paravirt.h b/arch/e2k/include/asm/paravirt.h new file mode 100644 index 000000000000..77ebeb07c90e --- /dev/null +++ b/arch/e2k/include/asm/paravirt.h @@ -0,0 +1,8 @@ + +#ifndef __ASM_E2K_PARAVIRT_H +#define __ASM_E2K_PARAVIRT_H + +#include +#include + +#endif /* __ASM_E2K_PARAVIRT_H */ diff --git a/arch/e2k/include/asm/paravirt/aau_context.h b/arch/e2k/include/asm/paravirt/aau_context.h new file mode 100644 index 000000000000..1cabf8b7a2bf --- /dev/null +++ b/arch/e2k/include/asm/paravirt/aau_context.h @@ -0,0 +1,255 @@ +/* + * AAU registers description, macroses for load/store AAU context + * paravirtualized case + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_ASM_PARAVIRT_AAU_CONTEXT_H_ +#define _E2K_ASM_PARAVIRT_AAU_CONTEXT_H_ + +#include +#include + +#ifdef CONFIG_KVM_GUEST +#include +#include + +#define PV_SAVE_AAU_MASK_REGS(aau_context, aasr) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr); \ + } else { \ + KVM_SAVE_AAU_MASK_REGS(aau_context, aasr); \ + } \ +}) + +#define PV_RESTORE_AAU_MASK_REGS(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_AAU_MASK_REGS(aau_context); \ + } else { \ + KVM_RESTORE_AAU_MASK_REGS(aau_context); \ + } \ +}) + +#define PV_SAVE_AADS(aau_regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AADS(aau_regs); \ + } else { \ + KVM_SAVE_AADS(aau_regs); \ + } \ +}) + +#define PV_RESTORE_AADS(aau_regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_AADS(aau_regs); \ + } else { \ + KVM_RESTORE_AADS(aau_regs); \ + } \ +}) + +#define PV_SAVE_AALDIS_V2(regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AALDIS_V2(regs); \ + } else { \ + KVM_SAVE_AALDIS(regs); \ + } \ +}) +#define PV_SAVE_AALDIS_V5(regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AALDIS_V5(regs); \ + } else { \ + KVM_SAVE_AALDIS(regs); \ + } \ +}) + +#define PV_SAVE_AALDAS(aaldas_p) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AALDAS(aaldas_p); \ + } else { \ + KVM_SAVE_AALDAS(aaldas_p); \ + } \ +}) + +#define PV_SAVE_AAFSTR(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AAFSTR(aau_context); \ + } else { \ + KVM_SAVE_AAFSTR(aau_context); \ + } \ +}) + +#define PV_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AAU_REGS_FOR_PTRACE(regs, ti); \ + } else { \ + KVM_SAVE_AAU_REGS_FOR_PTRACE(regs, ti); \ + } \ +}) + +#define PV_GET_ARRAY_DESCRIPTORS_V2(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context); \ + } else { \ + kvm_get_array_descriptors(aau_context); \ + } \ +}) +#define PV_GET_ARRAY_DESCRIPTORS_V5(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V5(aau_context); \ + } else { \ + kvm_get_array_descriptors(aau_context); \ + } \ +}) + +#define PV_SET_ARRAY_DESCRIPTORS(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SET_ARRAY_DESCRIPTORS(aau_context); \ + } else { \ + kvm_set_array_descriptors(aau_context); \ + } \ +}) + +#define PV_GET_SYNCHRONOUS_PART_V2(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V2(aau_context); \ + } else { \ + kvm_get_synchronous_part(aau_context); \ + } \ +}) +#define PV_GET_SYNCHRONOUS_PART_V5(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V5(aau_context); \ + } else { \ + kvm_get_synchronous_part(aau_context); \ + } \ +}) + +#define PV_GET_AAU_CONTEXT_V2(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_AAU_CONTEXT_V2(aau_context); \ + } else { \ + kvm_get_aau_context(aau_context); \ + } \ +}) +#define PV_GET_AAU_CONTEXT_V5(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_AAU_CONTEXT_V5(aau_context); \ + } else { \ + kvm_get_aau_context(aau_context); \ + } \ +}) + +static inline void +pv_get_array_descriptors_v2(e2k_aau_t *context) +{ + PV_GET_ARRAY_DESCRIPTORS_V2(context); +} +static inline void +pv_get_array_descriptors_v5(e2k_aau_t *context) +{ + PV_GET_ARRAY_DESCRIPTORS_V5(context); +} + +static inline void +pv_get_synchronous_part_v2(e2k_aau_t *context) +{ + PV_GET_SYNCHRONOUS_PART_V2(context); +} +static inline void +pv_get_synchronous_part_v5(e2k_aau_t *context) +{ + PV_GET_SYNCHRONOUS_PART_V5(context); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +static inline void +pv_get_aau_context_v2(e2k_aau_t *context) +{ + PV_GET_AAU_CONTEXT_V2(context); +} +static inline void +pv_get_aau_context_v5(e2k_aau_t *context) +{ + PV_GET_AAU_CONTEXT_V5(context); +} + +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#define SAVE_AAU_MASK_REGS(aau_context, aasr) \ + PV_SAVE_AAU_MASK_REGS(aau_context, aasr) + +#define RESTORE_AAU_MASK_REGS(aau_context) \ + PV_RESTORE_AAU_MASK_REGS(aau_context) + +#define SAVE_AADS(aau_regs) \ + PV_SAVE_AADS(aau_regs) + +#define RESTORE_AADS(aau_regs) \ + PV_RESTORE_AADS(aau_regs) + +#define SAVE_AALDIS_V2(regs) \ + PV_SAVE_AALDIS_V2(regs) +#define SAVE_AALDIS_V5(regs) \ + PV_SAVE_AALDIS_V5(regs) + +#define SAVE_AALDA(aaldas) \ + PV_SAVE_AALDAS(aaldas) + +#define SAVE_AAFSTR(regs) \ + PV_SAVE_AAFSTR_REG(regs) + +#define SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ + PV_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) + +#define GET_ARRAY_DESCRIPTORS_V2(aau_context) \ + PV_GET_ARRAY_DESCRIPTORS_V2(aau_context) +#define GET_ARRAY_DESCRIPTORS_V5(aau_context) \ + PV_GET_ARRAY_DESCRIPTORS_V5(aau_context) + +#define GET_SYNCHRONOUS_PART_V2(aau_context) \ + PV_GET_SYNCHRONOUS_PART_V2(aau_context) +#define GET_SYNCHRONOUS_PART_V5(aau_context) \ + PV_GET_SYNCHRONOUS_PART_V5(aau_context) + +#define GET_AAU_CONTEXT_V2(context) \ + PV_GET_AAU_CONTEXT_V2(context) +#define GET_AAU_CONTEXT_V5(context) \ + PV_GET_AAU_CONTEXT_V5(context) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_AAU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/paravirt/aau_regs_access.h b/arch/e2k/include/asm/paravirt/aau_regs_access.h new file mode 100644 index 000000000000..bb135ede625f --- /dev/null +++ b/arch/e2k/include/asm/paravirt/aau_regs_access.h @@ -0,0 +1,511 @@ +/* + * AAU registers description, macroses for load/store AAU context + * paravirtualized case + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +/* do not include this header directly, only through */ + +#ifndef _E2K_ASM_PARAVIRT_AAU_REGS_ACCESS_H_ +#define _E2K_ASM_PARAVIRT_AAU_REGS_ACCESS_H_ + +#include +#include + +static inline unsigned int pv_read_aasr_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aasr_reg_value(); + else + return pv_cpu_ops.read_aasr_reg_value(); +} +static inline void pv_write_aasr_reg_value(unsigned int reg_value) +{ + if (!paravirt_enabled()) + native_write_aasr_reg_value(reg_value); + else + pv_cpu_ops.write_aasr_reg_value(reg_value); +} +static inline unsigned int pv_read_aafstr_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aafstr_reg_value(); + else + return pv_cpu_ops.read_aafstr_reg_value(); +} +static inline void pv_write_aafstr_reg_value(unsigned int reg_value) +{ + if (!paravirt_enabled()) + native_write_aafstr_reg_value(reg_value); + else + pv_cpu_ops.write_aafstr_reg_value(reg_value); +} + +static __always_inline e2k_aasr_t +pv_read_aasr_reg(void) +{ + e2k_aasr_t aasr; + + AW(aasr) = pv_read_aasr_reg_value(); + return aasr; +} +static __always_inline void +pv_write_aasr_reg(e2k_aasr_t aasr) +{ + pv_write_aasr_reg_value(AW(aasr)); +} + +#ifdef CONFIG_KVM_GUEST +#include + +static inline u32 +pv_read_aaind_reg_value_v2(int AAIND_no) +{ + if (!paravirt_enabled()) + return native_read_aaind_reg_value_v2(AAIND_no); + else + return kvm_read_aaind_reg_value(AAIND_no); +} +static inline u64 +pv_read_aaind_reg_value_v5(int AAIND_no) +{ + if (!paravirt_enabled()) + return native_read_aaind_reg_value_v5(AAIND_no); + else + return kvm_read_aaind_reg_value(AAIND_no); +} +static inline void +pv_write_aaind_reg_value(int AAIND_no, u64 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaind_reg_value(AAIND_no, reg_value); + else + kvm_write_aaind_reg_value(AAIND_no, reg_value); +} + +static inline void +pv_read_aainds_pair_value_v2(int AAINDs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u32 value1, value2; + + NATIVE_READ_AAINDS_PAIR_VALUE_V2(AAINDs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aainds_pair_value(AAINDs_pair, lo_value, hi_value); + } +} +static inline void +pv_read_aainds_pair_value_v5(int AAINDs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u64 value1, value2; + + NATIVE_READ_AAINDS_PAIR_VALUE_V5(AAINDs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aainds_pair_value(AAINDs_pair, lo_value, hi_value); + } +} + +static inline void +pv_write_aainds_pair_value(int AAINDs_pair, u64 lo_value, u64 hi_value) +{ + if (!paravirt_enabled()) + native_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value); + else + kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value); +} +static inline u32 +pv_read_aaind_tags_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aaind_tags_reg_value(); + else + return kvm_read_aaind_tags_reg_value(); +} +static inline void +pv_write_aaind_tags_reg_value(u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaind_tags_reg_value(reg_value); + else + kvm_write_aaind_tags_reg_value(reg_value); +} +static inline u32 +pv_read_aaincr_reg_value(int AAINCR_no) +{ + if (!paravirt_enabled()) + return native_read_aaincr_reg_value_v2(AAINCR_no); + else + return kvm_read_aaincr_reg_value(AAINCR_no); +} +static inline u64 +pv_read_aaincr_reg_value_v5(int AAINCR_no) +{ + if (!paravirt_enabled()) + return native_read_aaincr_reg_value_v5(AAINCR_no); + else + return kvm_read_aaincr_reg_value(AAINCR_no); +} +static inline void +pv_write_aaincr_reg_value(int AAINCR_no, u64 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaincr_reg_value(AAINCR_no, reg_value); + else + kvm_write_aaincr_reg_value(AAINCR_no, reg_value); +} +static inline u32 +pv_read_aaincr_tags_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aaincr_tags_reg_value(); + else + return kvm_read_aaincr_tags_reg_value(); +} +static inline void +pv_write_aaincr_tags_reg_value(u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaincr_tags_reg_value(reg_value); + else + kvm_write_aaincr_tags_reg_value(reg_value); +} + +static inline void +pv_read_aaincrs_pair_value_v2(int AAINCRs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u32 value1, value2; + + NATIVE_READ_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); + } +} +static inline void +pv_read_aaincrs_pair_value_v5(int AAINCRs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u64 value1, value2; + + NATIVE_READ_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); + } +} + +static inline void +pv_write_aaincrs_pair_value(int AAINCRs_pair, u64 lo_value, u64 hi_value) +{ + if (!paravirt_enabled()) + native_write_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); + else + kvm_write_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); +} +static inline u32 +pv_read_aasti_reg_value_v2(int AASTI_no) +{ + if (!paravirt_enabled()) + return native_read_aasti_reg_value_v2(AASTI_no); + else + return kvm_read_aasti_reg_value(AASTI_no); +} +static inline u64 +pv_read_aasti_reg_value_v5(int AASTI_no) +{ + if (!paravirt_enabled()) + return native_read_aasti_reg_value_v5(AASTI_no); + else + return kvm_read_aasti_reg_value(AASTI_no); +} +static inline void +pv_write_aasti_reg_value(int AASTI_no, u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aasti_reg_value(AASTI_no, reg_value); + else + kvm_write_aasti_reg_value(AASTI_no, reg_value); +} +static inline u32 +pv_read_aasti_tags_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aasti_tags_reg_value(); + else + return kvm_read_aasti_tags_reg_value(); +} +static inline void +pv_write_aasti_tags_reg_value(u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aasti_tags_reg_value(reg_value); + else + kvm_write_aasti_tags_reg_value(reg_value); +} + +static inline void +pv_read_aastis_pair_value_v2(int AASTIs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u32 value1, value2; + + NATIVE_READ_AASTIS_PAIR_VALUE_V2(AASTIs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aastis_pair_value(AASTIs_pair, lo_value, hi_value); + } +} +static inline void +pv_read_aastis_pair_value_v5(int AASTIs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u64 value1, value2; + + NATIVE_READ_AASTIS_PAIR_VALUE_V5(AASTIs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aastis_pair_value(AASTIs_pair, lo_value, hi_value); + } +} + +static inline void +pv_write_aastis_pair_value(int AASTIs_pair, u64 lo_value, u64 hi_value) +{ + if (!paravirt_enabled()) + native_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value); + else + kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value); +} + +static inline void +pv_read_aaldi_reg_value_v2(int AALDI_no, u64 *l_value, u64 *r_value) +{ + if (!paravirt_enabled()) + native_read_aaldi_reg_value_v2(AALDI_no, l_value, r_value); + else + kvm_read_aaldi_reg_value(AALDI_no, l_value, r_value); +} +static inline void +pv_read_aaldi_reg_value_v5(int AALDI_no, u64 *l_value, u64 *r_value) +{ + if (!paravirt_enabled()) + native_read_aaldi_reg_value_v5(AALDI_no, l_value, r_value); + else + kvm_read_aaldi_reg_value(AALDI_no, l_value, r_value); +} + +static inline void +pv_write_aaldi_reg_value(int AALDI_no, u64 l_value, u64 r_value) +{ + if (!paravirt_enabled()) + native_write_aaldi_reg_value(AALDI_no, l_value, r_value); + else + kvm_write_aaldi_reg_value(AALDI_no, l_value, r_value); +} + +static inline void +pv_read_aaldas_reg_value(int AALDAs_no, u32 *l_value, u32 *r_value) +{ + if (!paravirt_enabled()) + native_read_aaldas_reg_value(AALDAs_no, l_value, r_value); + else + kvm_read_aaldas_reg_value(AALDAs_no, l_value, r_value); +} + +static inline void +pv_write_aaldas_reg_value(int AALDAs_no, u32 l_value, u32 r_value) +{ + if (!paravirt_enabled()) + native_write_aaldas_reg_value(AALDAs_no, l_value, r_value); + else + kvm_write_aaldas_reg_value(AALDAs_no, l_value, r_value); +} +static inline void +pv_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value) +{ + if (!paravirt_enabled()) + native_read_aaldm_reg_value(lo_value, hi_value); + else + kvm_read_aaldm_reg_value(lo_value, hi_value); +} +static inline void +pv_write_aaldm_reg_value(u32 lo_value, u32 hi_value) +{ + if (!paravirt_enabled()) + native_write_aaldm_reg_value(lo_value, hi_value); + else + kvm_write_aaldm_reg_value(lo_value, hi_value); +} +static inline void +pv_read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + if (!paravirt_enabled()) + native_read_aaldm_reg(aaldm); + else + kvm_read_aaldm_reg(aaldm); +} +static inline void +pv_write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + if (!paravirt_enabled()) + native_write_aaldm_reg(aaldm); + else + kvm_write_aaldm_reg(aaldm); +} +static inline void +pv_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value) +{ + if (!paravirt_enabled()) + native_read_aaldv_reg_value(lo_value, hi_value); + else + kvm_read_aaldv_reg_value(lo_value, hi_value); +} +static inline void +pv_write_aaldv_reg_value(u32 lo_value, u32 hi_value) +{ + if (!paravirt_enabled()) + native_write_aaldv_reg_value(lo_value, hi_value); + else + kvm_write_aaldv_reg_value(lo_value, hi_value); +} +static inline void +pv_read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + if (!paravirt_enabled()) + native_read_aaldv_reg(aaldv); + else + kvm_read_aaldv_reg(aaldv); +} +static inline void +pv_write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + if (!paravirt_enabled()) + native_write_aaldv_reg(aaldv); + else + kvm_write_aaldv_reg(aaldv); +} + +static inline void +pv_read_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_read_aad_reg(AAD_no, mem_p); + else + kvm_read_aad_reg(AAD_no, mem_p); +} + +static inline void +pv_write_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_write_aad_reg(AAD_no, mem_p); + else + kvm_write_aad_reg(AAD_no, mem_p); +} + +static inline void +pv_read_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_read_aads_4_reg(AADs_no, mem_p); + else + kvm_read_aads_4_reg(AADs_no, mem_p); +} + +static inline void +pv_write_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_write_aads_4_reg(AADs_no, mem_p); + else + kvm_write_aads_4_reg(AADs_no, mem_p); +} + +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#include + +static __always_inline u32 +read_aasr_reg_value(void) +{ + return pv_read_aasr_reg_value(); +} +static __always_inline void +write_aasr_reg_value(u32 reg_value) +{ + pv_write_aasr_reg_value(reg_value); +} +static __always_inline e2k_aasr_t +read_aasr_reg(void) +{ + return pv_read_aasr_reg(); +} +static __always_inline void +write_aasr_reg(e2k_aasr_t aasr) +{ + pv_write_aasr_reg(aasr); +} +static inline u32 +read_aafstr_reg_value(void) +{ + return pv_read_aafstr_reg_value(); +} +static inline void +write_aafstr_reg_value(u32 reg_value) +{ + pv_write_aafstr_reg_value(reg_value); +} +static inline void +read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + pv_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static inline void +write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + pv_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void +read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + pv_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static inline void +write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + pv_write_aaldm_reg_value(aaldv->lo, aaldv->hi); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/apic.h b/arch/e2k/include/asm/paravirt/apic.h new file mode 100644 index 000000000000..39131a4e6eee --- /dev/null +++ b/arch/e2k/include/asm/paravirt/apic.h @@ -0,0 +1,56 @@ +#ifndef __ASM_PARAVIRT_APIC_H +#define __ASM_PARAVIRT_APIC_H + +#ifdef __KERNEL__ +#include +#include + +/* + * Basic functions accessing virtual Local APICs on guest. + */ + +static inline unsigned int pv_arch_apic_read(unsigned int reg) +{ + return pv_apic_ops.apic_read(reg); +} + +static inline void pv_arch_apic_write(unsigned int reg, unsigned int v) +{ + pv_apic_ops.apic_write(reg, v); +} + +static inline unsigned int boot_pv_arch_apic_read(unsigned int reg) +{ + return BOOT_PARAVIRT_APIC_READ(reg); +} + +static inline void boot_pv_arch_apic_write(unsigned int reg, unsigned int v) +{ + BOOT_PARAVIRT_APIC_WRITE(reg, v); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +static inline void arch_apic_write(unsigned int reg, unsigned int v) +{ + pv_arch_apic_write(reg, v); +} + +static inline unsigned int arch_apic_read(unsigned int reg) +{ + return pv_arch_apic_read(reg); +} +static inline void boot_arch_apic_write(unsigned int reg, unsigned int v) +{ + boot_pv_arch_apic_write(reg, v); +} + +static inline unsigned int boot_arch_apic_read(unsigned int reg) +{ + return boot_pv_arch_apic_read(reg); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_APIC_H */ diff --git a/arch/e2k/include/asm/paravirt/area_alloc.h b/arch/e2k/include/asm/paravirt/area_alloc.h new file mode 100644 index 000000000000..271220bc6ae1 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/area_alloc.h @@ -0,0 +1,152 @@ +#ifndef __ASM_PARAVIRT_AREA_ALLOC_H +#define __ASM_PARAVIRT_AREA_ALLOC_H + +#ifdef __KERNEL__ + +#include +#include + +static inline int +pv_register_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.register_kernel_hw_stack == NULL) + return 0; + return pv_cpu_ops.register_kernel_hw_stack(stack_base, stack_size); +} + +static inline int +boot_pv_register_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(register_kernel_hw_stack) == NULL) + return 0; + return BOOT_PARAVIRT_GET_CPU_FUNC( + register_kernel_hw_stack)(stack_base, stack_size); +} + +static inline int +pv_register_kernel_data_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.register_kernel_data_stack == NULL) + return 0; + return pv_cpu_ops.register_kernel_data_stack(stack_base, stack_size); +} + +static inline int +boot_pv_register_kernel_data_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(register_kernel_data_stack) == NULL) + return 0; + return BOOT_PARAVIRT_GET_CPU_FUNC( + register_kernel_data_stack)(stack_base, stack_size); +} + +static inline void +pv_unregister_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.unregister_kernel_hw_stack == NULL) + return; + pv_cpu_ops.unregister_kernel_hw_stack(stack_base, stack_size); +} + +static inline void +boot_pv_unregister_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(unregister_kernel_hw_stack) == NULL) + return; + BOOT_PARAVIRT_GET_CPU_FUNC( + unregister_kernel_hw_stack)(stack_base, stack_size); +} + +static inline void +pv_unregister_kernel_data_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.unregister_kernel_hw_stack == NULL) + return; + pv_cpu_ops.unregister_kernel_hw_stack(stack_base, stack_size); +} + +static inline void +boot_pv_unregister_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(unregister_kernel_data_stack) == NULL) + return; + BOOT_PARAVIRT_GET_CPU_FUNC( + unregister_kernel_data_stack)(stack_base, stack_size); +} + +static inline int +pv_kmem_area_host_chunk(e2k_addr_t stack_base, e2k_size_t stack_size, + int hw_stack) +{ + if (pv_cpu_ops.kmem_area_host_chunk == NULL) + return 0; + return pv_cpu_ops.kmem_area_host_chunk(stack_base, stack_size, + hw_stack); +} + +static inline void +pv_kmem_area_unhost_chunk(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.kmem_area_unhost_chunk == NULL) + return; + pv_cpu_ops.kmem_area_unhost_chunk(stack_base, stack_size); +} + +#ifdef CONFIG_PARAVIRT_GUEST +static inline int register_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return pv_register_kernel_hw_stack(stack_base, stack_size); +} +static inline int register_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return pv_register_kernel_data_stack(stack_base, stack_size); +} +static inline int boot_register_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return boot_pv_register_kernel_hw_stack(stack_base, stack_size); +} +static inline int boot_register_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return boot_pv_register_kernel_data_stack(stack_base, stack_size); +} +static inline void unregister_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + pv_unregister_kernel_hw_stack(stack_base, stack_size); +} +static inline void unregister_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + pv_unregister_kernel_data_stack(stack_base, stack_size); +} +static inline void boot_unregister_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + boot_pv_unregister_kernel_hw_stack(stack_base, stack_size); +} +static inline void boot_unregister_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + boot_pv_unregister_kernel_data_stack(stack_base, stack_size); +} +static inline int +kmem_area_host_chunk(e2k_addr_t stack_base, + e2k_size_t stack_size, int hw_flag) +{ + return pv_kmem_area_host_chunk(stack_base, stack_size, hw_flag); +} +static inline void +kmem_area_unhost_chunk(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + pv_kmem_area_unhost_chunk(stack_base, stack_size); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_AREA_ALLOC_H */ diff --git a/arch/e2k/include/asm/paravirt/atomic_api.h b/arch/e2k/include/asm/paravirt/atomic_api.h new file mode 100644 index 000000000000..a80fe0275913 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/atomic_api.h @@ -0,0 +1,38 @@ +#ifndef _ASM_E2K_PARAVIRT_ATOMIC_API_H_ +#define _ASM_E2K_PARAVIRT_ATOMIC_API_H_ + +#include +#include + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +/* FIXME: on guest it is not implemented hardware bugs workarounds, */ +/* because of such workarounds contain privileged actions and */ +/* can be done only on host using appropriate hypercalls */ + +#ifdef CONFIG_KVM_GUEST +#include + +#define PV_HWBUG_AFTER_LD_ACQ() NATIVE_HWBUG_AFTER_LD_ACQ() +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +/* FIXME: examine bare hardware bugs only for host */ +/* guest virtual machine should examine host machine bugs too, but now */ +/* it is not implemented */ +#define virt_cpu_has(hwbug) ((!paravirt_enabled()) && cpu_has(hwbug)) + +#define VIRT_HWBUG_AFTER_LD_ACQ() PV_HWBUG_AFTER_LD_ACQ() +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_E2K_PARAVIRT_ATOMIC_API_H_ */ diff --git a/arch/e2k/include/asm/paravirt/boot.h b/arch/e2k/include/asm/paravirt/boot.h new file mode 100644 index 000000000000..3e3cd3603064 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/boot.h @@ -0,0 +1,201 @@ +/* + * E2K boot-time initializtion virtualization for paravirtualized kernel + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_ASM_PARAVIRT_BOOT_H_ +#define _E2K_ASM_PARAVIRT_BOOT_H_ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +static inline void +boot_pv_setup_machine_id(bootblock_struct_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_setup_machine_id)(bootblock); +} +static inline int +boot_pv_loader_probe_memory(struct node_phys_mem *nodes_phys_mem, + boot_info_t *bootblock) +{ + return BOOT_PARAVIRT_GET_BOOT_FUNC(boot_loader_probe_memory) + (nodes_phys_mem, bootblock); +} + +static inline e2k_size_t +boot_pv_get_bootblock_size(boot_info_t *bootblock) +{ + return BOOT_PARAVIRT_GET_BOOT_FUNC(boot_get_bootblock_size) + (bootblock); +} + +static inline void +boot_pv_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_reserve_all_bootmem)(bsp, boot_info); +} + +static inline void +boot_pv_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_map_all_bootmem)(bsp, boot_info); +} +static inline void +boot_pv_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_map_needful_to_equal_virt_area) + (stack_top_addr); +} +static inline void +boot_pv_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_kernel_switch_to_virt) + (bsp, cpuid, boot_init_sequel_func); +} +static inline void +boot_pv_cpu_relax(void) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_cpu_relax)(); +} + +#ifdef CONFIG_SMP +static inline int +boot_pv_smp_cpu_config(boot_info_t *bootblock) +{ + return BOOT_PARAVIRT_GET_BOOT_FUNC(boot_smp_cpu_config)(bootblock); +} +static inline void +boot_pv_smp_node_config(boot_info_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_smp_node_config)(bootblock); +} +#endif /* CONFIG_SMP */ + +static inline void +boot_pv_clear_bss(void) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_clear_bss)(); +} +static inline void __init +boot_pv_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_check_bootblock)(bsp, bootblock); +} +static inline void +pv_init_terminate_boot_init(bool bsp, int cpuid) +{ + pv_boot_ops.init_terminate_boot_init(bsp, cpuid); +} + +static inline void +boot_pv_parse_param(bootblock_struct_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_parse_param)(bootblock); +} + +#define boot_pv_panic(fmt, args...) \ + BOOT_PARAVIRT_GET_BOOT_FUNC(do_boot_panic)(fmt, ##args); + +extern void native_pv_ops_to_boot_ops(void); +extern void native_boot_pv_ops_to_ops(void); + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void +boot_setup_machine_id(bootblock_struct_t *bootblock) +{ + boot_pv_setup_machine_id(bootblock); +} +static inline int __init +boot_loader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + return boot_pv_loader_probe_memory(nodes_phys_mem, bootblock); +} + +static inline e2k_size_t __init +boot_get_bootblock_size(boot_info_t *bootblock) +{ + return boot_pv_get_bootblock_size(bootblock); +} + +#define boot_panic(fmt, args...) boot_pv_panic(fmt, ##args) + +static inline void +boot_cpu_relax(void) +{ + boot_pv_cpu_relax(); +} + +#ifdef CONFIG_SMP +static inline e2k_size_t __init +boot_smp_cpu_config(boot_info_t *bootblock) +{ + return boot_pv_smp_cpu_config(bootblock); +} + +static inline void __init +boot_smp_node_config(boot_info_t *bootblock) +{ + boot_pv_smp_node_config(bootblock); +} +#endif /* CONFIG_SMP */ + +static inline void __init +boot_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_pv_reserve_all_bootmem(bsp, boot_info); +} + +static inline void __init +boot_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_pv_map_all_bootmem(bsp, boot_info); +} + +static inline void __init_recv +boot_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + boot_pv_map_needful_to_equal_virt_area(stack_top_addr); +} + +static inline void __init_recv +boot_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + boot_pv_kernel_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +static inline void __init +init_terminate_boot_init(bool bsp, int cpuid) +{ + pv_init_terminate_boot_init(bsp, cpuid); +} + +static inline void __init +boot_parse_param(bootblock_struct_t *bootblock) +{ + boot_pv_parse_param(bootblock); +} + +static inline void __init +boot_clear_bss(void) +{ + boot_pv_clear_bss(); +} +static inline void __init +boot_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + boot_pv_check_bootblock(bsp, bootblock); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_ASM_PARAVIRT_BOOT_H_ */ diff --git a/arch/e2k/include/asm/paravirt/boot_flags.h b/arch/e2k/include/asm/paravirt/boot_flags.h new file mode 100644 index 000000000000..a7219350dc19 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/boot_flags.h @@ -0,0 +1,52 @@ +/* + * E2K boot info flags support on paravirtualized kernel + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_ASM_PARAVIRT_BOOT_FLAGS_H_ +#define _E2K_ASM_PARAVIRT_BOOT_FLAGS_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +#ifdef CONFIG_KVM_GUEST +#include +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on virtual "physical" address + * bootblock virtual address can be only read + */ + +#define PV_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + ((!paravirt_enabled()) ? \ + NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + : \ + GUEST_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field)) +#define PV_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + ((!paravirt_enabled()) ? \ + NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) \ + : \ + GUEST_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value)) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + PV_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + PV_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _E2K_ASM_PARAVIRT_BOOT_FLAGS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/cacheflush.h b/arch/e2k/include/asm/paravirt/cacheflush.h new file mode 100644 index 000000000000..98023398eb73 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/cacheflush.h @@ -0,0 +1,88 @@ +#ifndef __ASM_PARAVIRT_CACHEFLUSH_H +#define __ASM_PARAVIRT_CACHEFLUSH_H + +#include +#include + +#ifdef CONFIG_SMP +static inline void +pv_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_icache_range(start, end); +} +static inline void +pv_smp_flush_icache_range_array(void *icache_range_arr) +{ + pv_cpu_ops.smp_flush_icache_range_array(icache_range_arr); +} +static inline void +pv_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + pv_cpu_ops.smp_flush_icache_page(vma, page); +} +static inline void +pv_smp_flush_icache_all(void) +{ + pv_cpu_ops.smp_flush_icache_all(); +} +static inline void +pv_smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + pv_cpu_ops.smp_flush_icache_kernel_line(addr); +} +#endif /* CONFIG_SMP */ + +static inline void +pv_flush_DCACHE_range(void *addr, size_t len) +{ + pv_mmu_ops.flush_dcache_range(addr, len); +} +static inline void +pv_clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + pv_mmu_ops.clear_dcache_l1_range(virt_addr, len); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#ifdef CONFIG_SMP +static inline void +smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_icache_range(start, end); +} +static inline void +smp_flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ + pv_smp_flush_icache_range_array(icache_range_arr); +} +static inline void +smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + pv_smp_flush_icache_page(vma, page); +} +static inline void +smp_flush_icache_all(void) +{ + pv_smp_flush_icache_all(); +} +static inline void +smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + pv_smp_flush_icache_kernel_line(addr); +} +#endif /* CONFIG_SMP */ + +static inline void +flush_DCACHE_range(void *addr, size_t len) +{ + pv_flush_DCACHE_range(addr, len); +} +static inline void +clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + pv_clear_DCACHE_L1_range(virt_addr, len); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_PARAVIRT_CACHEFLUSH_H */ diff --git a/arch/e2k/include/asm/paravirt/clkr.h b/arch/e2k/include/asm/paravirt/clkr.h new file mode 100644 index 000000000000..0e4d54c19ff6 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/clkr.h @@ -0,0 +1,20 @@ +#ifndef _ASM_E2K_PARAVIRT_CLKR_H +#define _ASM_E2K_PARAVIRT_CLKR_H + +#include +#include + +static inline unsigned long long pv_do_sched_clock(void) +{ + return pv_time_ops.do_sched_clock(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline unsigned long long do_sched_clock(void) +{ + return pv_do_sched_clock(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_PARAVIRT_CLKR_H */ diff --git a/arch/e2k/include/asm/paravirt/console.h b/arch/e2k/include/asm/paravirt/console.h new file mode 100644 index 000000000000..85d476820a52 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/console.h @@ -0,0 +1,39 @@ + +#ifndef _ASM_E2K_PARAVIRT_CONSOLE_H_ +#define _ASM_E2K_PARAVIRT_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include + +#ifdef CONFIG_KVM_GUEST +#include +#include + +static inline void +pv_virt_console_dump_putc(char c) +{ + if (!paravirt_enabled()) + native_virt_console_dump_putc(c); + else + kvm_virt_console_dump_putc(c); +} +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline void +virt_console_dump_putc(char c) +{ + pv_virt_console_dump_putc(c); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_E2K_PARAVIRT_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/paravirt/cpu.h b/arch/e2k/include/asm/paravirt/cpu.h new file mode 100644 index 000000000000..1da54e450baf --- /dev/null +++ b/arch/e2k/include/asm/paravirt/cpu.h @@ -0,0 +1,99 @@ +#ifndef __ASM_E2K_PARAVIRT_CPU_H +#define __ASM_E2K_PARAVIRT_CPU_H + +#ifdef __KERNEL__ +#include +#include +#include + +#ifdef CONFIG_KVM_GUEST +#include + +#define PV_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } else { \ + KVM_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } \ +}) +#define PV_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } else { \ + KVM_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } \ +}) +#define PV_IS_ID_VIRQ_VCPU_NO(cpu_id) \ +({ \ + bool pv_is; \ + if (!paravirt_enabled()) { \ + pv_is = NATIVE_IS_ID_VIRQ_VCPU_NO(cpu_id); \ + } else { \ + pv_is = KVM_IS_ID_VIRQ_VCPU_NO(cpu_id); \ + } \ + pv_is; \ +}) +#define PV_IS_ID_VCPU_ID(cpu_id) \ +({ \ + bool pv_is; \ + if (!paravirt_enabled()) { \ + pv_is = NATIVE_IS_ID_VCPU_ID(cpu_id); \ + } else { \ + pv_is = KVM_IS_ID_VCPU_ID(cpu_id); \ + } \ + pv_is; \ +}) +#define PV_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } else { \ + KVM_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } \ +}) +#define PV_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } else { \ + KVM_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } \ +}) +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +static inline unsigned long +pv_get_cpu_running_cycles(void) +{ + return pv_time_ops.get_cpu_running_cycles(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ + +#define VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ + PV_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) +#define VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ + PV_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) +#define IS_ID_VIRQ_VCPU_NO(cpu_id) \ + PV_IS_ID_VIRQ_VCPU_NO(cpu_id) +#define IS_ID_VCPU_ID(cpu_id) \ + PV_IS_ID_VCPU_ID(cpu_id) +#define CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ + PV_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) +#define CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ + PV_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) + +static inline unsigned long +get_cpu_running_cycles(void) +{ + return pv_get_cpu_running_cycles(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_E2K_PARAVIRT_CPU_H */ diff --git a/arch/e2k/include/asm/paravirt/cpu_regs_access.h b/arch/e2k/include/asm/paravirt/cpu_regs_access.h new file mode 100644 index 000000000000..d4a4e10a3991 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/cpu_regs_access.h @@ -0,0 +1,1833 @@ + +#ifndef _E2K_PARAVIRT_CPU_REGS_ACCESS_H_ +#define _E2K_PARAVIRT_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +static inline void PV_PUT_UPDATED_CPU_REGS_FLAGS(unsigned long flags) +{ + if (pv_cpu_ops.put_updated_cpu_regs_flags == NULL) + return; + pv_cpu_ops.put_updated_cpu_regs_flags(flags); +} + +static inline unsigned long long PV_READ_OSCUD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSCUD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_OSCUD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSCUD_hi_reg_value(); +} + +static inline void PV_WRITE_OSCUD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSCUD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_OSCUD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSCUD_hi_reg_value(reg_value); +} + +static inline unsigned long long BOOT_PV_READ_OSCUD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSCUD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_OSCUD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSCUD_hi_reg_value); +} + +static inline void BOOT_PV_WRITE_OSCUD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSCUD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_OSCUD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSCUD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_OSGD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSGD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_OSGD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSGD_hi_reg_value(); +} +static inline unsigned long long BOOT_PV_READ_OSGD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSGD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_OSGD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSGD_hi_reg_value); +} + +static inline void PV_WRITE_OSGD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSGD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_OSGD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSGD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_OSGD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSGD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_OSGD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSGD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_CUD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CUD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUD_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_CUD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_CUD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_CUD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_CUD_hi_reg_value); +} + +static inline void PV_WRITE_CUD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CUD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CUD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CUD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_CUD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_CUD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_CUD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_CUD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_GD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_GD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_GD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_GD_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_GD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_GD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_GD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_GD_hi_reg_value); +} + +static inline void PV_WRITE_GD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_GD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_GD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_GD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_GD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_GD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_PSP_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSP_LO_REG_VALUE(); + else + return pv_cpu_ops.read_PSP_lo_reg_value(); +} + +static inline unsigned long long PV_READ_PSP_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSP_HI_REG_VALUE(); + else + return pv_cpu_ops.read_PSP_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_PSP_LO_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PSP_LO_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PSP_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_PSP_HI_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PSP_HI_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PSP_hi_reg_value); +} + +static inline void PV_WRITE_PSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSP_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_PSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSP_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_PSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PSP_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_PSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PSP_hi_reg_value, reg_value); +} +#define PV_NV_READ_PSP_LO_REG_VALUE() PV_READ_PSP_LO_REG_VALUE() +#define PV_NV_READ_PSP_HI_REG_VALUE() PV_READ_PSP_HI_REG_VALUE() +#define PV_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = PV_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define PV_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = PV_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define PV_READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = PV_READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = PV_READ_PSP_LO_REG(); \ + PSP; \ +}) +#define PV_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define PV_NV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define PV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define PV_NV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ + PV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) +#define PV_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + PV_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +static inline unsigned long long PV_READ_PSHTP_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSHTP_REG_VALUE(); + else + return pv_cpu_ops.read_PSHTP_reg_value(); +} + +static inline void PV_WRITE_PSHTP_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PSHTP_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSHTP_reg_value(reg_value); +} +#define PV_NV_READ_PSHTP_REG_VALUE() PV_READ_PSHTP_REG_VALUE() +#define PV_READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP_reg; \ + PSHTP_reg.word = PV_READ_PSHTP_REG_VALUE(); \ + PSHTP_reg; \ +}) +#define PV_WRITE_PSHTP_REG(PSHTP_reg) \ +({ \ + PV_WRITE_PSHTP_REG_VALUE(AS_WORD(PSHTP_reg)); \ +}) +#define PV_STRIP_PSHTP_WINDOW() PV_WRITE_PSHTP_REG_VALUE(0) + +static inline unsigned long long PV_READ_PCSP_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PCSP_LO_REG_VALUE(); + else + return pv_cpu_ops.read_PCSP_lo_reg_value(); +} + +static inline unsigned long long PV_READ_PCSP_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PCSP_HI_REG_VALUE(); + else + return pv_cpu_ops.read_PCSP_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_PCSP_LO_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PCSP_LO_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PCSP_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_PCSP_HI_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PCSP_HI_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PCSP_hi_reg_value); +} + +static inline void PV_WRITE_PCSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PCSP_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_PCSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PCSP_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_PCSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PCSP_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_PCSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PCSP_hi_reg_value, reg_value); +} +#define PV_NV_READ_PCSP_LO_REG_VALUE() PV_READ_PCSP_LO_REG_VALUE() +#define PV_NV_READ_PCSP_HI_REG_VALUE() PV_READ_PCSP_HI_REG_VALUE() +#define PV_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = PV_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define PV_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = PV_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define PV_READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = PV_READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = PV_READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define PV_READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = PV_READ_PCSP_REG(); \ +}) +#define PV_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define PV_NV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define PV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define PV_NV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ + PV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) +#define PV_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ + PV_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half) + +static inline int PV_READ_PCSHTP_REG_SVALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_READ_PCSHTP_REG_SVALUE(); + else + return pv_cpu_ops.read_PCSHTP_reg_value(); +} + +static inline void PV_WRITE_PCSHTP_REG_SVALUE(int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PCSHTP_REG_SVALUE(reg_value); + else + pv_cpu_ops.write_PCSHTP_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_CR0_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR0_LO_REG_VALUE(); + else + return pv_cpu_ops.read_CR0_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CR0_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR0_HI_REG_VALUE(); + else + return pv_cpu_ops.read_CR0_hi_reg_value(); +} + +static inline unsigned long long PV_READ_CR1_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR1_LO_REG_VALUE(); + else + return pv_cpu_ops.read_CR1_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CR1_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR1_HI_REG_VALUE(); + else + return pv_cpu_ops.read_CR1_hi_reg_value(); +} + +static inline void PV_WRITE_CR0_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR0_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CR0_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR0_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_CR1_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR1_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CR1_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR1_hi_reg_value(reg_value); +} +#define PV_NV_READ_CR0_LO_REG_VALUE() PV_READ_CR0_LO_REG_VALUE() +#define PV_NV_READ_CR0_HI_REG_VALUE() PV_READ_CR0_HI_REG_VALUE() +#define PV_NV_READ_CR1_LO_REG_VALUE() PV_READ_CR1_LO_REG_VALUE() +#define PV_NV_READ_CR1_HI_REG_VALUE() PV_READ_CR1_HI_REG_VALUE() +#define PV_READ_CR0_LO_REG() \ +({ \ + e2k_cr0_lo_t CR0_lo; \ + CR0_lo.CR0_lo_half = PV_READ_CR0_LO_REG_VALUE(); \ + CR0_lo; \ +}) +#define PV_READ_CR0_HI_REG() \ +({ \ + e2k_cr0_hi_t CR0_hi; \ + CR0_hi.CR0_hi_half = PV_READ_CR0_HI_REG_VALUE(); \ + CR0_hi; \ +}) +#define PV_READ_CR1_LO_REG() \ +({ \ + e2k_cr1_lo_t CR1_lo; \ + CR1_lo.CR1_lo_half = PV_READ_CR1_LO_REG_VALUE(); \ + CR1_lo; \ +}) +#define PV_READ_CR1_HI_REG() \ +({ \ + e2k_cr1_hi_t CR1_hi; \ + CR1_hi.CR1_hi_half = PV_READ_CR1_HI_REG_VALUE(); \ + CR1_hi; \ +}) +#define PV_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + PV_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define PV_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + PV_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define PV_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + PV_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define PV_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + PV_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +static inline unsigned long long PV_READ_CTPR_REG_VALUE(int reg_no) +{ + return pv_cpu_ops.read_CTPR_reg_value(reg_no); +} + +static inline void PV_WRITE_CTPR_REG_VALUE(int reg_no, unsigned long reg_value) +{ + pv_cpu_ops.write_CTPR_reg_value(reg_no, reg_value); +} + +static inline unsigned long long PV_READ_USD_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_USD_LO_REG_VALUE(); + else + return pv_cpu_ops.read_USD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_USD_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_USD_HI_REG_VALUE(); + else + return pv_cpu_ops.read_USD_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_USD_LO_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_USD_LO_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_USD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_USD_HI_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_USD_HI_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_USD_hi_reg_value); +} + +static inline void PV_WRITE_USD_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_USD_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_USD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_USD_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_USD_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_USD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_USD_LO_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_USD_LO_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_USD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_USD_HI_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_USD_HI_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_USD_hi_reg_value, reg_value); +} +#define PV_READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = PV_READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) +#define PV_READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = PV_READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) +#define PV_READ_USD_REG() \ +({ \ + usd_struct_t USD; \ + USD.USD_hi_struct = PV_READ_USD_HI_REG(); \ + USD.USD_lo_struct = PV_READ_USD_LO_REG(); \ + USD; \ +}) +#define PV_NV_WRITE_USD_LO_REG_VALUE(usd_lo_value) \ + PV_WRITE_USD_LO_REG_VALUE(usd_lo_value) +#define PV_NV_WRITE_USD_HI_REG_VALUE(usd_hi_value) \ + PV_WRITE_USD_HI_REG_VALUE(usd_hi_value) +#define PV_WRITE_USD_LO_REG(USD_lo) \ + PV_WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) +#define PV_WRITE_USD_HI_REG(USD_hi) \ + PV_WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) +#define PV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + PV_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + PV_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define PV_NV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ + PV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) +#define PV_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + PV_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) + +static inline unsigned long long PV_READ_PUSD_LO_REG_VALUE(void) +{ + return PV_READ_USD_LO_REG_VALUE(); +} + +static inline unsigned long long PV_READ_PUSD_HI_REG_VALUE(void) +{ + return PV_READ_USD_HI_REG_VALUE(); +} + +static inline void PV_WRITE_PUSD_LO_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_USD_LO_REG_VALUE(reg_value); +} + +static inline void PV_WRITE_PUSD_HI_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_USD_HI_REG_VALUE(reg_value); +} +#define PV_READ_PUSD_LO_REG() \ +({ \ + e2k_pusd_lo_t PUSD_lo; \ + PUSD_lo.PUSD_lo_half = PV_READ_PUSD_LO_REG_VALUE(); \ + PUSD_lo; \ +}) +#define PV_READ_PUSD_HI_REG() \ +({ \ + e2k_pusd_hi_t PUSD_hi; \ + PUSD_hi.PUSD_hi_half = PV_READ_PUSD_HI_REG_VALUE(); \ + PUSD_hi; \ +}) +#define PV_READ_PUSD_REG() \ +({ \ + pusd_struct_t PUSD; \ + PUSD.PUSD_hi_struct = PV_READ_PUSD_HI_REG(); \ + PUSD.PUSD_lo_struct = PV_READ_PUSD_LO_REG(); \ + PUSD; \ +}) +#define PV_READ_PUSD_REG_TO(PUSD) \ +({ \ + *PUSD = PV_READ_PUSD_REG(); \ +}) +#define PV_WRITE_PUSD_LO_REG(PUSD_lo) \ + PV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo.PUSD_lo_half) +#define PV_WRITE_PUSD_HI_REG(PUSD_hi) \ + PV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi.PUSD_hi_half) +#define PV_WRITE_PUSD_REG_VALUE(PUSD_hi_value, PUSD_lo_value) \ +({ \ + PV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value); \ + PV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value); \ +}) +#define PV_WRITE_PUSD_REG(PUSD_hi, PUSD_lo) \ +({ \ + PV_WRITE_PUSD_REG_VALUE(PUSD_hi.PUSD_hi_half, \ + PUSD_lo.PUSD_lo_half); \ +}) + +static inline unsigned long long PV_READ_SBR_REG_VALUE(void) +{ + return pv_cpu_ops.read_SBR_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_SBR_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_SBR_reg_value); +} + +static inline void PV_WRITE_SBR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SBR_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_SBR_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_SBR_reg_value, reg_value); +} +#define PV_NV_READ_SBR_REG_VALUE() PV_READ_SBR_REG_VALUE() +#define PV_READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = PV_READ_SBR_REG_VALUE(); \ + SBR; \ +}) +#define PV_NV_WRITE_SBR_REG_VALUE(SBR_value) \ + PV_WRITE_SBR_REG_VALUE(SBR_value) + +static inline unsigned long long PV_READ_USBR_REG_VALUE(void) +{ + return PV_READ_SBR_REG_VALUE(); +} + +static inline unsigned long long BOOT_PV_READ_USBR_REG_VALUE(void) +{ + return BOOT_PV_READ_SBR_REG_VALUE(); +} + +static inline void PV_WRITE_USBR_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_SBR_REG_VALUE(reg_value); +} + +static inline void BOOT_PV_WRITE_USBR_REG_VALUE(unsigned long reg_value) +{ + BOOT_PV_WRITE_SBR_REG_VALUE(reg_value); +} +#define PV_READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = PV_READ_USBR_REG_VALUE(); \ + USBR; \ +}) +#define PV_WRITE_USBR_REG(USBR) \ + PV_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +static inline unsigned long long PV_READ_WD_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_READ_WD_REG_VALUE(); + else + return pv_cpu_ops.read_WD_reg_value(); +} + +static inline void PV_WRITE_WD_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_WD_REG_VALUE(reg_value); + else + pv_cpu_ops.write_WD_reg_value(reg_value); +} +#define PV_READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = PV_READ_WD_REG_VALUE(); \ + WD; \ +}) +#define PV_WRITE_WD_REG(WD) \ + PV_WRITE_WD_REG_VALUE(WD.WD_reg) + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS +static inline unsigned long long PV_READ_LSR_REG_VALUE(void) +{ + return pv_cpu_ops.read_LSR_reg_value(); +} + +static inline void PV_WRITE_LSR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_LSR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_ILCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_ILCR_reg_value(); +} + +static inline void PV_WRITE_ILCR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_ILCR_reg_value(reg_value); +} +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +static inline unsigned long long PV_READ_OSR0_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSR0_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_OSR0_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSR0_reg_value); +} + +static inline void PV_WRITE_OSR0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSR0_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_OSR0_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSR0_reg_value, reg_value); +} + +static inline unsigned int PV_READ_OSEM_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSEM_reg_value(); +} + +static inline void PV_WRITE_OSEM_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_OSEM_reg_value(reg_value); +} + +static inline unsigned int PV_READ_BGR_REG_VALUE(void) +{ + return pv_cpu_ops.read_BGR_reg_value(); +} + +static inline unsigned int BOOT_PV_READ_BGR_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_BGR_reg_value); +} + +static inline void PV_WRITE_BGR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_BGR_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_BGR_REG_VALUE(unsigned int reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_BGR_reg_value, reg_value); +} +#define PV_READ_BGR_REG() \ +({ \ + e2k_bgr_t BGR; \ + BGR.BGR_reg = PV_READ_BGR_REG_VALUE(); \ + BGR; \ +}) +#define PV_WRITE_BGR_REG(BGR) \ + PV_WRITE_BGR_REG_VALUE(BGR.BGR_reg) + +static inline unsigned long long PV_READ_CLKR_REG_VALUE(void) +{ + return pv_cpu_ops.read_CLKR_reg_value(); +} + +static inline void PV_WRITE_CLKR_REG_VALUE(void) +{ + pv_cpu_ops.write_CLKR_reg_value(); +} + +static inline unsigned long long PV_READ_SCLKR_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKR_reg_value(); +} + +static inline void PV_WRITE_SCLKR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SCLKR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_SCLKM1_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKM1_reg_value(); +} + +static inline void PV_WRITE_SCLKM1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SCLKM1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_SCLKM2_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKM2_reg_value(); +} + +static inline unsigned long long PV_READ_SCLKM3_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKM3_reg_value(); +} + +static inline void PV_WRITE_SCLKM2_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SCLKM2_reg_value(reg_value); +} + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +static inline unsigned long long PV_READ_CU_HW0_REG_VALUE(void) +{ + return pv_cpu_ops.read_CU_HW0_reg_value(); +} +static inline unsigned long long PV_READ_CU_HW1_REG_VALUE(void) +{ + return pv_cpu_ops.read_CU_HW1_reg_value(); +} +static inline void PV_WRITE_CU_HW0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CU_HW0_reg_value(reg_value); +} +static inline void PV_WRITE_CU_HW1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CU_HW1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_RPR_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_RPR_lo_reg_value(); +} + +static inline unsigned long long PV_READ_RPR_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_RPR_hi_reg_value(); +} + +static inline void PV_WRITE_RPR_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_RPR_lo_reg_value(reg_value); +} +static inline void PV_CL_WRITE_RPR_LO_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_RPR_LO_REG_VALUE(reg_value); +} + +static inline void PV_WRITE_RPR_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_RPR_hi_reg_value(reg_value); +} +static inline void PV_CL_WRITE_RPR_HI_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_RPR_HI_REG_VALUE(reg_value); +} +#define PV_READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = PV_READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define PV_READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = PV_READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +#define PV_WRITE_RPR_LO_REG(RPR_lo) \ + PV_WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define PV_CL_WRITE_RPR_LO_REG(RPR_lo) \ + PV_CL_WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define PV_WRITE_RPR_HI_REG(RPR_hi) \ + PV_WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) +#define PV_CL_WRITE_RPR_HI_REG(RPR_hi) \ + PV_CL_WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) + +static inline unsigned long long PV_READ_SBBP_REG_VALUE(void) +{ + return pv_cpu_ops.read_SBBP_reg_value(); +} + +static inline unsigned long long PV_READ_IP_REG_VALUE(void) +{ + return pv_cpu_ops.read_IP_reg_value(); +} + +static inline unsigned int PV_READ_DIBCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBCR_reg_value(); +} + +static inline void PV_WRITE_DIBCR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_DIBCR_reg_value(reg_value); +} + +static inline unsigned int PV_READ_DIBSR_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBSR_reg_value(); +} + +static inline void PV_WRITE_DIBSR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_DIBSR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIMCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIMCR_reg_value(); +} + +static inline void PV_WRITE_DIMCR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIMCR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR0_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR0_reg_value(); +} + +static inline void PV_WRITE_DIBAR0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR0_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR1_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR1_reg_value(); +} + +static inline void PV_WRITE_DIBAR1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR2_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR2_reg_value(); +} + +static inline void PV_WRITE_DIBAR2_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR2_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR3_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR3_reg_value(); +} + +static inline void PV_WRITE_DIBAR3_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR3_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIMAR0_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIMAR0_reg_value(); +} + +static inline void PV_WRITE_DIMAR0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIMAR0_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIMAR1_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIMAR1_reg_value(); +} + +static inline void PV_WRITE_DIMAR1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIMAR1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_CUTD_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUTD_reg_value(); +} + +static inline void PV_NV_NOIRQ_WRITE_CUTD_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CUTD_reg_value(reg_value); +} + +static inline unsigned int PV_READ_CUIR_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUIR_reg_value(); +} + +static inline unsigned int PV_READ_UPSR_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_UPSR_REG_VALUE(); + else + return pv_cpu_ops.read_UPSR_reg_value(); +} + +static inline unsigned int BOOT_PV_READ_UPSR_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_UPSR_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_UPSR_reg_value); +} + +static inline void PV_WRITE_UPSR_REG_VALUE(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_UPSR_REG_VALUE(reg_value); + else + pv_cpu_ops.write_UPSR_reg_value(reg_value); +} +static inline void PV_WRITE_UPSR_REG(e2k_upsr_t UPSR) +{ + PV_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg); +} + +static inline void BOOT_PV_WRITE_UPSR_REG_VALUE(unsigned int reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_WRITE_UPSR_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_UPSR_reg_value, reg_value); +} +static inline void BOOT_PV_WRITE_UPSR_REG(e2k_upsr_t UPSR) +{ + BOOT_PV_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg); +} + +static inline void PV_WRITE_UPSR_IRQ_BARRIER(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_UPSR_IRQ_BARRIER(reg_value); + else + pv_cpu_ops.write_UPSR_irq_barrier(reg_value); +} + +static inline unsigned int PV_READ_PSR_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSR_REG_VALUE(); + else + return pv_cpu_ops.read_PSR_reg_value(); +} + +static inline unsigned int BOOT_PV_READ_PSR_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PSR_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PSR_reg_value); +} + +static inline void PV_WRITE_PSR_REG_VALUE(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PSR_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSR_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_PSR_REG_VALUE(unsigned int reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_WRITE_PSR_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PSR_reg_value, reg_value); +} + +static inline void PV_WRITE_PSR_IRQ_BARRIER(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PSR_IRQ_BARRIER(reg_value); + else + pv_cpu_ops.write_PSR_irq_barrier(reg_value); +} + +static inline unsigned int PV_READ_PFPFR_REG_VALUE(void) +{ + return pv_cpu_ops.read_PFPFR_reg_value(); +} + +static inline void PV_WRITE_PFPFR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_PFPFR_reg_value(reg_value); +} + +static inline unsigned int PV_READ_FPCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_FPCR_reg_value(); +} + +static inline void PV_WRITE_FPCR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_FPCR_reg_value(reg_value); +} + +static inline unsigned int PV_READ_FPSR_REG_VALUE(void) +{ + return pv_cpu_ops.read_FPSR_reg_value(); +} + +static inline void PV_WRITE_FPSR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_FPSR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_CS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_CS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_CS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_DS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_DS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_DS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_DS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_ES_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_ES_lo_reg_value(); +} + +static inline unsigned long long PV_READ_ES_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_ES_hi_reg_value(); +} + +static inline unsigned long long PV_READ_FS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_FS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_FS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_FS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_GS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_GS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_GS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_GS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_SS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_SS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_SS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_SS_hi_reg_value(); +} + +static inline void PV_WRITE_CS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_DS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_DS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_ES_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_ES_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_ES_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_ES_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_FS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_FS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_FS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_FS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_GS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_GS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_SS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_SS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SS_hi_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_IDR_REG_VALUE(void) +{ + if (pv_cpu_ops.read_IDR_reg_value) + return pv_cpu_ops.read_IDR_reg_value(); + return 0; +} +static inline unsigned long long BOOT_PV_READ_IDR_REG_VALUE(void) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(boot_read_IDR_reg_value)) { + return BOOT_PARAVIRT_READ_REG(boot_read_IDR_reg_value); + } + return 0; +} + +static inline unsigned int PV_READ_CORE_MODE_REG_VALUE(void) +{ + if (pv_cpu_ops.read_CORE_MODE_reg_value) + return pv_cpu_ops.read_CORE_MODE_reg_value(); + return 0; +} +static inline unsigned int BOOT_PV_READ_CORE_MODE_REG_VALUE(void) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(boot_read_CORE_MODE_reg_value)) { + return BOOT_PARAVIRT_READ_REG(boot_read_CORE_MODE_reg_value); + } + return 0; +} +static inline void PV_WRITE_CORE_MODE_REG_VALUE(unsigned int modes) +{ + if (pv_cpu_ops.write_CORE_MODE_reg_value) + pv_cpu_ops.write_CORE_MODE_reg_value(modes); +} +static inline void BOOT_PV_WRITE_CORE_MODE_REG_VALUE(unsigned int modes) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(boot_write_CORE_MODE_reg_value)) + BOOT_PARAVIRT_WRITE_REG(boot_write_CORE_MODE_reg_value, modes); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +/* + * Set flags of updated VCPU registers + */ + +static inline void PUT_UPDATED_CPU_REGS_FLAGS(unsigned long flags) +{ + PV_PUT_UPDATED_CPU_REGS_FLAGS(flags); +} + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define READ_PSHTP_REG_VALUE() PV_READ_PSHTP_REG_VALUE() + +#define WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + PV_WRITE_PSHTP_REG_VALUE(PSHTP_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define READ_PCSHTP_REG_SVALUE() PV_READ_PCSHTP_REG_SVALUE() + +#define WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + PV_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define READ_OSCUD_LO_REG_VALUE() PV_READ_OSCUD_LO_REG_VALUE() +#define READ_OSCUD_HI_REG_VALUE() PV_READ_OSCUD_HI_REG_VALUE() +#define BOOT_READ_OSCUD_LO_REG_VALUE() BOOT_PV_READ_OSCUD_LO_REG_VALUE() +#define BOOT_READ_OSCUD_HI_REG_VALUE() BOOT_PV_READ_OSCUD_HI_REG_VALUE() + +#define WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + PV_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + PV_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) +#define BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + BOOT_PV_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + BOOT_PV_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define READ_OSGD_LO_REG_VALUE() PV_READ_OSGD_LO_REG_VALUE() +#define READ_OSGD_HI_REG_VALUE() PV_READ_OSGD_HI_REG_VALUE() +#define BOOT_READ_OSGD_LO_REG_VALUE() BOOT_PV_READ_OSGD_LO_REG_VALUE() +#define BOOT_READ_OSGD_HI_REG_VALUE() BOOT_PV_READ_OSGD_HI_REG_VALUE() + +#define WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + PV_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + PV_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + BOOT_PV_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + BOOT_PV_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +do { \ + PV_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ + PV_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ +} while (0) + + /* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define READ_CUD_LO_REG_VALUE() PV_READ_CUD_LO_REG_VALUE() +#define READ_CUD_HI_REG_VALUE() PV_READ_CUD_HI_REG_VALUE() +#define BOOT_READ_CUD_LO_REG_VALUE() BOOT_PV_READ_CUD_LO_REG_VALUE() +#define BOOT_READ_CUD_HI_REG_VALUE() BOOT_PV_READ_CUD_HI_REG_VALUE() + +#define WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + PV_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + PV_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) +#define BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + BOOT_PV_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + BOOT_PV_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define READ_GD_LO_REG_VALUE() PV_READ_GD_LO_REG_VALUE() +#define READ_GD_HI_REG_VALUE() PV_READ_GD_HI_REG_VALUE() +#define BOOT_READ_GD_LO_REG_VALUE() BOOT_PV_READ_GD_LO_REG_VALUE() +#define BOOT_READ_GD_HI_REG_VALUE() BOOT_PV_READ_GD_HI_REG_VALUE() + +#define WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + PV_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + PV_WRITE_GD_HI_REG_VALUE(GD_hi_value) +#define BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + BOOT_PV_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + BOOT_PV_WRITE_GD_HI_REG_VALUE(GD_hi_value) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define READ_PSP_LO_REG_VALUE() PV_READ_PSP_LO_REG_VALUE() +#define READ_PSP_HI_REG_VALUE() PV_READ_PSP_HI_REG_VALUE() +#define BOOT_READ_PSP_LO_REG_VALUE() BOOT_PV_READ_PSP_LO_REG_VALUE() +#define BOOT_READ_PSP_HI_REG_VALUE() BOOT_PV_READ_PSP_HI_REG_VALUE() + +#define WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + BOOT_PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + BOOT_PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define READ_PCSP_LO_REG_VALUE() PV_READ_PCSP_LO_REG_VALUE() +#define READ_PCSP_HI_REG_VALUE() PV_READ_PCSP_HI_REG_VALUE() +#define BOOT_READ_PCSP_LO_REG_VALUE() BOOT_PV_READ_PCSP_LO_REG_VALUE() +#define BOOT_READ_PCSP_HI_REG_VALUE() BOOT_PV_READ_PCSP_HI_REG_VALUE() + +#define WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + BOOT_PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + BOOT_PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define READ_CR0_LO_REG_VALUE() PV_READ_CR0_LO_REG_VALUE() +#define READ_CR0_HI_REG_VALUE() PV_READ_CR0_HI_REG_VALUE() +#define READ_CR1_LO_REG_VALUE() PV_READ_CR1_LO_REG_VALUE() +#define READ_CR1_HI_REG_VALUE() PV_READ_CR1_HI_REG_VALUE() + +#define WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + PV_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + PV_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + PV_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + PV_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define READ_CTPR_REG_VALUE(reg_no) PV_READ_CTPR_REG_VALUE(reg_no) + +#define WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + PV_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define READ_TIR_LO_REG_VALUE() PV_READ_TIR_LO_REG_VALUE() +#define READ_TIR_HI_REG_VALUE() PV_READ_TIR_HI_REG_VALUE() + +#define WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + PV_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) +#define WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + PV_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define READ_USD_LO_REG_VALUE() PV_READ_USD_LO_REG_VALUE() +#define READ_USD_HI_REG_VALUE() PV_READ_USD_HI_REG_VALUE() +#define BOOT_READ_USD_LO_REG_VALUE() BOOT_PV_READ_USD_LO_REG_VALUE() +#define BOOT_READ_USD_HI_REG_VALUE() BOOT_PV_READ_USD_HI_REG_VALUE() + +#define WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + PV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + PV_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + BOOT_PV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + BOOT_PV_WRITE_USD_HI_REG_VALUE(USD_hi_value) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define READ_PUSD_LO_REG_VALUE() PV_READ_PUSD_LO_REG_VALUE() +#define READ_PUSD_HI_REG_VALUE() PV_READ_PUSD_HI_REG_VALUE() + +#define WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + PV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) +#define WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + PV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define READ_SBR_REG_VALUE() PV_READ_SBR_REG_VALUE() +#define READ_USBR_REG_VALUE() PV_READ_USBR_REG_VALUE() +#define BOOT_READ_USBR_REG_VALUE() BOOT_PV_READ_USBR_REG_VALUE() +#define BOOT_READ_SBR_REG_VALUE() BOOT_PV_READ_SBR_REG_VALUE() + +#define WRITE_SBR_REG_VALUE(SBR_value) \ + PV_WRITE_SBR_REG_VALUE(SBR_value) +#define WRITE_USBR_REG_VALUE(USBR_value) \ + PV_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \ + BOOT_PV_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \ + BOOT_PV_WRITE_SBR_REG_VALUE(SBR_value) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define READ_WD_REG_VALUE() PV_READ_WD_REG_VALUE() +#define WRITE_WD_REG_VALUE(WD_value) PV_WRITE_WD_REG_VALUE(WD_value) + +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define READ_LSR_REG_VALUE() \ + PV_READ_LSR_REG_VALUE() +#define WRITE_LSR_REG_VALUE(LSR_value) \ + PV_WRITE_LSR_REG_VALUE(LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define READ_ILCR_REG_VALUE() \ + PV_READ_ILCR_REG_VALUE() +#define WRITE_ILCR_REG_VALUE(ILCR_value) \ + PV_WRITE_ILCR_REG_VALUE(ILCR_value) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define READ_OSR0_REG_VALUE() PV_READ_OSR0_REG_VALUE() +#define BOOT_READ_OSR0_REG_VALUE() BOOT_PV_READ_OSR0_REG_VALUE() + +#define WRITE_OSR0_REG_VALUE(osr0_value) \ + PV_WRITE_OSR0_REG_VALUE(osr0_value) +#define BOOT_WRITE_OSR0_REG_VALUE(osr0_value) \ + BOOT_PV_WRITE_OSR0_REG_VALUE(osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG_VALUE() \ + PV_READ_OSEM_REG_VALUE() +#define WRITE_OSEM_REG_VALUE(OSEM_value) \ + PV_WRITE_OSEM_REG_VALUE(OSEM_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define READ_BGR_REG_VALUE() PV_READ_BGR_REG_VALUE() +#define BOOT_READ_BGR_REG_VALUE() BOOT_PV_READ_BGR_REG_VALUE() + +#define WRITE_BGR_REG_VALUE(BGR_value) \ + PV_WRITE_BGR_REG_VALUE(BGR_value) +#define BOOT_WRITE_BGR_REG_VALUE(BGR_value) \ + BOOT_PV_WRITE_BGR_REG_VALUE(BGR_value) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG_VALUE() PV_READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG_VALUE() PV_READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG_VALUE() PV_READ_SCLKM1_REG_VALUE() +#define READ_SCLKM2_REG_VALUE() PV_READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG_VALUE() PV_READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG_VALUE(reg_value) \ + PV_WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG_VALUE(reg_value) \ + PV_WRITE_SCLKM1_REG_VALUE(reg_value) +#define WRITE_SCLKM2_REG_VALUE(reg_value) \ + PV_WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG_VALUE(reg_value) \ + PV_WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG_VALUE() PV_READ_CU_HW0_REG_VALUE() +#define READ_CU_HW1_REG_VALUE() PV_READ_CU_HW1_REG_VALUE() +#define WRITE_CU_HW0_REG_VALUE(reg_value) \ + PV_WRITE_CU_HW0_REG_VALUE(reg_value) +#define WRITE_CU_HW1_REG_VALUE(reg_value) \ + PV_WRITE_CU_HW1_REG_VALUE(reg_value) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define READ_RPR_LO_REG_VALUE() PV_READ_RPR_LO_REG_VALUE() +#define READ_RPR_HI_REG_VALUE() PV_READ_RPR_HI_REG_VALUE() +#define READ_SBBP_REG_VALUE() PV_READ_SBBP_REG_VALUE() + +#define WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + PV_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) +#define WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + PV_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG_VALUE() PV_READ_IP_REG_VALUE() + +/* + * Read debug and monitors regigisters + */ +#define READ_DIBCR_REG_VALUE() PV_READ_DIBCR_REG_VALUE() +#define READ_DIBSR_REG_VALUE() PV_READ_DIBSR_REG_VALUE() +#define READ_DIMCR_REG_VALUE() PV_READ_DIMCR_REG_VALUE() +#define READ_DIBAR0_REG_VALUE() PV_READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG_VALUE() PV_READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG_VALUE() PV_READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG_VALUE() PV_READ_DIBAR3_REG_VALUE() +#define READ_DIMAR0_REG_VALUE() PV_READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG_VALUE() PV_READ_DIMAR1_REG_VALUE() + +#define WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + PV_WRITE_DIBCR_REG_VALUE(DIBCR_value) +#define WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + PV_WRITE_DIBSR_REG_VALUE(DIBSR_value) +#define WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + PV_WRITE_DIMCR_REG_VALUE(DIMCR_value) +#define WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + PV_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) +#define WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + PV_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) +#define WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + PV_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) +#define WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + PV_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) +#define WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + PV_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) +#define WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + PV_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD) + */ +#define READ_CUTD_REG_VALUE() \ + PV_READ_CUTD_REG_VALUE() +#define WRITE_CUTD_REG_VALUE(CUTD_value) \ + PV_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value) + +/* + * Read/write word Compilation Unit Index Register (CUIR) + */ +#define READ_CUIR_REG_VALUE() \ + PV_READ_CUIR_REG_VALUE() + +/* + * Read/write double-word Compilation Unit Types Descriptor (TSD) + */ +#define READ_TSD_REG_VALUE() \ + PV_READ_TSD_REG_VALUE() +#define WRITE_TSD_REG_VALUE(TSD_value) \ + PV_WRITE_TSD_REG_VALUE(TSD_value) + +/* + * Read/write word Processor State Register (PSR) + */ +#define READ_PSR_REG_VALUE() PV_READ_PSR_REG_VALUE() +#define BOOT_READ_PSR_REG_VALUE() BOOT_PV_READ_PSR_REG_VALUE() + +#define WRITE_PSR_REG_VALUE(PSR_value) \ + PV_WRITE_PSR_REG_VALUE(PSR_value) +#define BOOT_WRITE_PSR_REG_VALUE(PSR_value) \ + BOOT_PV_WRITE_PSR_REG_VALUE(PSR_value) +#define WRITE_PSR_IRQ_BARRIER(PSR_value) \ + PV_WRITE_PSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define READ_UPSR_REG_VALUE() PV_READ_UPSR_REG_VALUE() +#define BOOT_READ_UPSR_REG_VALUE() BOOT_PV_READ_UPSR_REG_VALUE() + +#define WRITE_UPSR_REG_VALUE(UPSR_value) \ + PV_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_WRITE_UPSR_REG_VALUE(UPSR_value) \ + BOOT_PV_WRITE_UPSR_REG_VALUE(UPSR_value) +#define WRITE_UPSR_IRQ_BARRIER(PSR_value) \ + PV_WRITE_UPSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define READ_PFPFR_REG_VALUE() PV_READ_PFPFR_REG_VALUE() +#define READ_FPCR_REG_VALUE() PV_READ_FPCR_REG_VALUE() +#define READ_FPSR_REG_VALUE() PV_READ_FPSR_REG_VALUE() + +#define WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + PV_WRITE_PFPFR_REG_VALUE(PFPFR_value) +#define WRITE_FPCR_REG_VALUE(FPCR_value) \ + PV_WRITE_FPCR_REG_VALUE(FPCR_value) +#define WRITE_FPSR_REG_VALUE(FPSR_value) \ + PV_WRITE_FPSR_REG_VALUE(FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define READ_CS_LO_REG_VALUE() PV_READ_CS_LO_REG_VALUE() +#define READ_CS_HI_REG_VALUE() PV_READ_CS_HI_REG_VALUE() +#define READ_DS_LO_REG_VALUE() PV_READ_DS_LO_REG_VALUE() +#define READ_DS_HI_REG_VALUE() PV_READ_DS_HI_REG_VALUE() +#define READ_ES_LO_REG_VALUE() PV_READ_ES_LO_REG_VALUE() +#define READ_ES_HI_REG_VALUE() PV_READ_ES_HI_REG_VALUE() +#define READ_FS_LO_REG_VALUE() PV_READ_FS_LO_REG_VALUE() +#define READ_FS_HI_REG_VALUE() PV_READ_FS_HI_REG_VALUE() +#define READ_GS_LO_REG_VALUE() PV_READ_GS_LO_REG_VALUE() +#define READ_GS_HI_REG_VALUE() PV_READ_GS_HI_REG_VALUE() +#define READ_SS_LO_REG_VALUE() PV_READ_SS_LO_REG_VALUE() +#define READ_SS_HI_REG_VALUE() PV_READ_SS_HI_REG_VALUE() + +#define WRITE_CS_LO_REG_VALUE(sd) PV_WRITE_CS_LO_REG_VALUE(sd) +#define WRITE_CS_HI_REG_VALUE(sd) PV_WRITE_CS_HI_REG_VALUE(sd) +#define WRITE_DS_LO_REG_VALUE(sd) PV_WRITE_DS_LO_REG_VALUE(sd) +#define WRITE_DS_HI_REG_VALUE(sd) PV_WRITE_DS_HI_REG_VALUE(sd) +#define WRITE_ES_LO_REG_VALUE(sd) PV_WRITE_ES_LO_REG_VALUE(sd) +#define WRITE_ES_HI_REG_VALUE(sd) PV_WRITE_ES_HI_REG_VALUE(sd) +#define WRITE_FS_LO_REG_VALUE(sd) PV_WRITE_FS_LO_REG_VALUE(sd) +#define WRITE_FS_HI_REG_VALUE(sd) PV_WRITE_FS_HI_REG_VALUE(sd) +#define WRITE_GS_LO_REG_VALUE(sd) PV_WRITE_GS_LO_REG_VALUE(sd) +#define WRITE_GS_HI_REG_VALUE(sd) PV_WRITE_GS_HI_REG_VALUE(sd) +#define WRITE_SS_LO_REG_VALUE(sd) PV_WRITE_SS_LO_REG_VALUE(sd) +#define WRITE_SS_HI_REG_VALUE(sd) PV_WRITE_SS_HI_REG_VALUE(sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define READ_IDR_REG_VALUE() PV_READ_IDR_REG_VALUE() +#define BOOT_READ_IDR_REG_VALUE() BOOT_PV_READ_IDR_REG_VALUE() + +/* + * Processor Core Mode Register (CORE_MODE) and + */ +#define READ_CORE_MODE_REG_VALUE() PV_READ_CORE_MODE_REG_VALUE() +#define BOOT_READ_CORE_MODE_REG_VALUE() BOOT_PV_READ_CORE_MODE_REG_VALUE() +#define WRITE_CORE_MODE_REG_VALUE(modes) \ + PV_WRITE_CORE_MODE_REG_VALUE(modes) +#define BOOT_WRITE_CORE_MODE_REG_VALUE(modes) \ + BOOT_PV_WRITE_CORE_MODE_REG_VALUE(modes) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_PARAVIRT_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/e2k.h b/arch/e2k/include/asm/paravirt/e2k.h new file mode 100644 index 000000000000..aff4671f4236 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/e2k.h @@ -0,0 +1,53 @@ +#ifndef _ASM_PARAVIRT_E2K_H_ +#define _ASM_PARAVIRT_E2K_H_ + +/* Do not include the header directly, only through asm/e2k.h */ + + +#include +#include +#include +#include + +#define pv_get_machine_id() \ + ((paravirt_enabled()) ? guest_machine_id : native_machine_id) +#define boot_pv_get_machine_id() \ + ((boot_paravirt_enabled()) ? \ + guest_machine_id : boot_native_machine_id) +#define pv_set_machine_id(mach_id) \ +({ \ + if (paravirt_enabled()) \ + guest_machine_id = (mach_id); \ + else \ + native_machine_id = (mach_id); \ +}) +#define boot_pv_set_machine_id(mach_id) \ +({ \ + if (boot_paravirt_enabled()) \ + guest_machine_id = (mach_id); \ + else \ + boot_native_machine_id = (mach_id); \ +}) + +static inline void +pv_set_mach_type_id(void) +{ + pv_init_ops.set_mach_type_id(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define get_machine_id() pv_get_machine_id() +#define boot_get_machine_id() boot_pv_get_machine_id() +#define set_machine_id(mach_id) pv_set_machine_id(mach_id) +#define boot_set_machine_id(mach_id) boot_pv_set_machine_id(mach_id) + +static inline void set_mach_type_id(void) +{ + pv_set_mach_type_id(); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_PARAVIRT_E2K_H_ */ diff --git a/arch/e2k/include/asm/paravirt/epic.h b/arch/e2k/include/asm/paravirt/epic.h new file mode 100644 index 000000000000..2f891fbc1011 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/epic.h @@ -0,0 +1,77 @@ +#ifndef __ASM_PARAVIRT_EPIC_H +#define __ASM_PARAVIRT_EPIC_H + +#ifdef __KERNEL__ +#include +#include + +/* + * Basic functions accessing virtual CEPICs on guest. + */ + +static inline unsigned int pv_epic_read_w(unsigned int reg) +{ + return pv_epic_ops.epic_read_w(reg); +} + +static inline void pv_epic_write_w(unsigned int reg, unsigned int v) +{ + pv_epic_ops.epic_write_w(reg, v); +} + +static inline unsigned long pv_epic_read_d(unsigned int reg) +{ + return pv_epic_ops.epic_read_d(reg); +} + +static inline void pv_epic_write_d(unsigned int reg, unsigned long v) +{ + pv_epic_ops.epic_write_d(reg, v); +} + +static inline unsigned int boot_pv_epic_read_w(unsigned int reg) +{ + return BOOT_PARAVIRT_EPIC_READ_W(reg); +} + +static inline void boot_pv_epic_write_w(unsigned int reg, unsigned int v) +{ + BOOT_PARAVIRT_EPIC_WRITE_W(reg, v); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +static inline void epic_write_w(unsigned int reg, unsigned int v) +{ + pv_epic_write_w(reg, v); +} + +static inline unsigned int epic_read_w(unsigned int reg) +{ + return pv_epic_read_w(reg); +} + +static inline void epic_write_d(unsigned int reg, unsigned long v) +{ + pv_epic_write_d(reg, v); +} + +static inline unsigned long epic_read_d(unsigned int reg) +{ + return pv_epic_read_w(reg); +} + +static inline void boot_epic_write_w(unsigned int reg, unsigned int v) +{ + boot_pv_epic_write_w(reg, v); +} + +static inline unsigned int boot_epic_read_w(unsigned int reg) +{ + return boot_pv_epic_read_w(reg); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_EPIC_H */ diff --git a/arch/e2k/include/asm/paravirt/fast_syscalls.h b/arch/e2k/include/asm/paravirt/fast_syscalls.h new file mode 100644 index 000000000000..85bb758d16a9 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/fast_syscalls.h @@ -0,0 +1,59 @@ +#ifndef _ASM_E2K_PARAVIRT_FAST_SYSCALLS_H +#define _ASM_E2K_PARAVIRT_FAST_SYSCALLS_H + +#include +#include + +static inline int +pv_do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return pv_cpu_ops.do_fast_clock_gettime(which_clock, tp); +} + +static inline int +pv_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp) +{ + return pv_cpu_ops.fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +pv_do_fast_gettimeofday(struct timeval *tv) +{ + return pv_cpu_ops.do_fast_gettimeofday(tv); +} + +static inline int +pv_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return pv_cpu_ops.fast_sys_siggetmask(oset, sigsetsize); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +static inline int +do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return pv_do_fast_clock_gettime(which_clock, tp); +} + +static inline int +fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) +{ + return pv_fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +do_fast_gettimeofday(struct timeval *tv) +{ + return pv_do_fast_gettimeofday(tv); +} +static inline int +fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return pv_fast_sys_siggetmask(oset, sigsetsize); +} +#endif /* ! CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_PARAVIRT_FAST_SYSCALLS_H */ + diff --git a/arch/e2k/include/asm/paravirt/host_printk.h b/arch/e2k/include/asm/paravirt/host_printk.h new file mode 100644 index 000000000000..d10d6f7ae718 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/host_printk.h @@ -0,0 +1,21 @@ +/* + * KVM guest printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_HOST_PRINTK_H +#define _E2K_PARAVIRT_HOST_PRINTK_H + +#include +#include +#include + +#define pv_host_printk(fmt, args...) (pv_cpu_ops.host_printk(fmt, ##args)) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#define host_printk(fmt, args...) pv_host_printk(fmt, ##args) +#endif /* ! CONFIG_PARAVIRT_GUEST */ + +#endif /* ! _E2K_PARAVIRT_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/paravirt/hw_stacks.h b/arch/e2k/include/asm/paravirt/hw_stacks.h new file mode 100644 index 000000000000..5bc6a846fad2 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/hw_stacks.h @@ -0,0 +1,141 @@ +/* + * KVM guest hardware stacks access support + * + * Copyright (C) 2016 MCST + */ + +#ifndef _E2K_PARAVIRT_HW_STACKS_H_ +#define _E2K_PARAVIRT_HW_STACKS_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +/* procedure chain stack items access */ + +static inline unsigned long +pv_get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr0_lo_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +pv_get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr0_hi_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +pv_get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr1_lo_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +pv_get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr1_hi_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr1_hi_value(base, cr_ind); +} +static inline void +pv_put_active_cr0_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr0_lo_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr0_lo_value(cr_value, base, cr_ind); +} +static inline void +pv_put_active_cr0_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr0_hi_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr0_hi_value(cr_value, base, cr_ind); +} +static inline void +pv_put_active_cr1_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr1_lo_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr1_lo_value(cr_value, base, cr_ind); +} +static inline void +pv_put_active_cr1_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr1_hi_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr1_hi_value(cr_value, base, cr_ind); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +/* + * Procedure chain stack items access + */ +static inline unsigned long +get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr1_hi_value(base, cr_ind); +} +static inline void +put_active_cr0_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr0_lo_value(cr_value, base, cr_ind); +} +static inline void +put_active_cr0_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr0_hi_value(cr_value, base, cr_ind); +} +static inline void +put_active_cr1_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr1_lo_value(cr_value, base, cr_ind); +} +static inline void +put_active_cr1_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr1_hi_value(cr_value, base, cr_ind); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_PARAVIRT_HW_STACKS_H_ */ + + diff --git a/arch/e2k/include/asm/paravirt/io.h b/arch/e2k/include/asm/paravirt/io.h new file mode 100644 index 000000000000..c8904c7c1d44 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/io.h @@ -0,0 +1,294 @@ + +#ifndef _E2K_ASM_PARAVIRT_IO_H_ +#define _E2K_ASM_PARAVIRT_IO_H_ + +#include +#include + + +static inline u8 boot_pv_readb(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readb, addr); +} +static inline u16 boot_pv_readw(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readw, addr); +} +static inline u32 boot_pv_readl(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readl, addr); +} +static inline u64 boot_pv_readll(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readll, addr); +} +static inline void boot_pv_writeb(u8 b, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writeb, b, addr); +} +static inline void boot_pv_writew(u16 w, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writew, w, addr); +} +static inline void boot_pv_writel(u32 l, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writel, l, addr); +} +static inline void boot_pv_writell(u64 q, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writell, q, addr); +} + +static inline u8 pv_readb(void __iomem *addr) +{ + return pv_io_ops.readb(addr); +} +static inline u16 pv_readw(void __iomem *addr) +{ + return pv_io_ops.readw(addr); +} +static inline u32 pv_readl(void __iomem *addr) +{ + return pv_io_ops.readl(addr); +} +static inline u64 pv_readll(void __iomem *addr) +{ + return pv_io_ops.readll(addr); +} +static inline void pv_writeb(u8 b, void __iomem *addr) +{ + pv_io_ops.writeb(b, addr); +} +static inline void pv_writew(u16 w, void __iomem *addr) +{ + pv_io_ops.writew(w, addr); +} +static inline void pv_writel(u32 l, void __iomem *addr) +{ + pv_io_ops.writel(l, addr); +} +static inline void pv_writeq(u64 q, void __iomem *addr) +{ + pv_io_ops.writell(q, addr); +} + +static inline u8 pv_inb(unsigned long port) +{ + return pv_io_ops.inb(port); +} +static inline void pv_outb(unsigned char byte, unsigned long port) +{ + pv_io_ops.outb(byte, port); +} +static inline void pv_outw(u16 halfword, unsigned long port) +{ + pv_io_ops.outw(halfword, port); +} +static inline u16 pv_inw(unsigned long port) +{ + return pv_io_ops.inw(port); +} +static inline void pv_outl(u32 word, unsigned long port) +{ + pv_io_ops.outl(word, port); +} +static inline u32 pv_inl(unsigned long port) +{ + return pv_io_ops.inl(port); +} + +static inline void pv_outsb(unsigned long port, const void *src, unsigned long count) +{ + pv_io_ops.outsb(port, src, count); +} +static inline void pv_outsw(unsigned long port, const void *src, unsigned long count) +{ + pv_io_ops.outsw(port, src, count); +} +static inline void pv_outsl(unsigned long port, const void *src, unsigned long count) +{ + pv_io_ops.outsl(port, src, count); +} +static inline void pv_insb(unsigned long port, void *dst, unsigned long count) +{ + pv_io_ops.insb(port, dst, count); +} +static inline void pv_insw(unsigned long port, void *dst, unsigned long count) +{ + pv_io_ops.insw(port, dst, count); +} +static inline void pv_insl(unsigned long port, void *dst, unsigned long count) +{ + pv_io_ops.insl(port, dst, count); +} + +static inline void +pv_conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + pv_io_ops.conf_inb(domain, bus, port, byte); +} +static inline void +pv_conf_inw(unsigned int domain, unsigned int bus, unsigned long port, + u16 *hword) +{ + pv_io_ops.conf_inw(domain, bus, port, hword); +} +static inline void +pv_conf_inl(unsigned int domain, unsigned int bus, unsigned long port, + u32 *word) +{ + pv_io_ops.conf_inl(domain, bus, port, word); +} +static inline void +pv_conf_outb(unsigned int domain, unsigned int bus, unsigned long port, + u8 byte) +{ + pv_io_ops.conf_outb(domain, bus, port, byte); +} +static inline void +pv_conf_outw(unsigned int domain, unsigned int bus, unsigned long port, + u16 hword) +{ + pv_io_ops.conf_outw(domain, bus, port, hword); +} +static inline void +pv_conf_outl(unsigned int domain, unsigned int bus, unsigned long port, + u32 word) +{ + pv_io_ops.conf_outl(domain, bus, port, word); +} + +static inline void boot_pv_debug_cons_outb(u8 byte, u16 port) +{ + BOOT_PARAVIRT_OUT_OP(boot_debug_cons_outb, byte, port); +} + +static inline u8 boot_pv_debug_cons_inb(u16 port) +{ + return BOOT_PARAVIRT_IN_OP(boot_debug_cons_inb, port); +} + +static inline u32 boot_pv_debug_cons_inl(u16 port) +{ + return BOOT_PARAVIRT_IN_OP(boot_debug_cons_inl, port); +} + +static inline void pv_debug_cons_outb(u8 byte, u16 port) +{ + pv_boot_ops.debug_cons_outb(byte, port); +} + +static inline void pv_debug_cons_outb_p(u8 byte, u16 port) +{ + pv_boot_ops.debug_cons_outb(byte, port); +} + +static inline u8 pv_debug_cons_inb(u16 port) +{ + return pv_boot_ops.debug_cons_inb(port); +} + +static inline u32 pv_debug_cons_inl(u16 port) +{ + return pv_boot_ops.debug_cons_inl(port); +} + +static inline int __init pv_arch_pci_init(void) +{ + return pv_io_ops.pci_init(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void boot_writeb(u8 b, void __iomem *addr) +{ + boot_pv_writeb(b, addr); +} +static inline void boot_writew(u16 w, void __iomem *addr) +{ + boot_pv_writew(w, addr); +} +static inline void boot_writel(u32 l, void __iomem *addr) +{ + boot_pv_writel(l, addr); +} +static inline u8 boot_readb(void __iomem *addr) +{ + return boot_pv_readb(addr); +} +static inline u16 boot_readw(void __iomem *addr) +{ + return boot_pv_readw(addr); +} +static inline u32 boot_readl(void __iomem *addr) +{ + return boot_pv_readl(addr); +} + +static inline void +conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + pv_conf_inb(domain, bus, port, byte); +} +static inline void +conf_inw(unsigned int domain, unsigned int bus, unsigned long port, u16 *hword) +{ + pv_conf_inw(domain, bus, port, hword); +} +static inline void +conf_inl(unsigned int domain, unsigned int bus, unsigned long port, u32 *word) +{ + pv_conf_inl(domain, bus, port, word); +} +static inline void +conf_outb(unsigned int domain, unsigned int bus, unsigned long port, u8 byte) +{ + pv_conf_outb(domain, bus, port, byte); +} +static inline void +conf_outw(unsigned int domain, unsigned int bus, unsigned long port, u16 hword) +{ + pv_conf_outw(domain, bus, port, hword); +} +static inline void +conf_outl(unsigned int domain, unsigned int bus, unsigned long port, u32 word) +{ + pv_conf_outl(domain, bus, port, word); +} + +static inline void boot_debug_cons_outb(u8 byte, u16 port) +{ + boot_pv_debug_cons_outb(byte, port); +} +static inline u8 boot_debug_cons_inb(u16 port) +{ + return boot_pv_debug_cons_inb(port); +} +static inline u32 boot_debug_cons_inl(u16 port) +{ + return boot_pv_debug_cons_inl(port); +} +static inline void debug_cons_outb(u8 byte, u16 port) +{ + pv_debug_cons_outb(byte, port); +} +static inline void debug_cons_outb_p(u8 byte, u16 port) +{ + pv_debug_cons_outb(byte, port); +} +static inline u8 debug_cons_inb(u16 port) +{ + return pv_debug_cons_inb(port); +} +static inline u32 debug_cons_inl(u16 port) +{ + return pv_debug_cons_inl(port); +} + +static inline int __init arch_pci_init(void) +{ + return pv_arch_pci_init(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_IO_H_ */ diff --git a/arch/e2k/include/asm/paravirt/mm_hooks.h b/arch/e2k/include/asm/paravirt/mm_hooks.h new file mode 100644 index 000000000000..f9cb001c4898 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mm_hooks.h @@ -0,0 +1,14 @@ +#ifndef __ASM_PARAVIRT_GUEST_MM_HOOKS_H +#define __ASM_PARAVIRT_GUEST_MM_HOOKS_H + +#ifdef __KERNEL__ + +#ifdef CONFIG_PARAVIRT_GUEST +static inline void +get_mm_notifier_locked(struct mm_struct *mm) +{ +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_PARAVIRT_GUEST_MM_HOOKS_H */ diff --git a/arch/e2k/include/asm/paravirt/mman.h b/arch/e2k/include/asm/paravirt/mman.h new file mode 100644 index 000000000000..c713735a4c03 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mman.h @@ -0,0 +1,203 @@ +#ifndef __ASM_PARAVIRT_MMAN_H +#define __ASM_PARAVIRT_MMAN_H + +#ifdef __KERNEL__ + +#include +#include + +struct mm_struct; +struct vm_area_struct; +struct task_struct; +struct vmap_area; + +/* Memory management mman */ + +static inline int +pv_remap_area_pages(unsigned long address, unsigned long phys_addr, + unsigned long size, unsigned long flags) +{ + return pv_mmu_ops.remap_area_pages(address, phys_addr, size, flags); +} + +static inline int +pv_host_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + if (pv_mmu_ops.host_guest_vmap_area != NULL) + return pv_mmu_ops.host_guest_vmap_area(start, end); + else + return 0; +} +static inline int +pv_unhost_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + if (pv_mmu_ops.unhost_guest_vmap_area != NULL) + return pv_mmu_ops.unhost_guest_vmap_area(start, end); + else + return 0; +} + +/* + * Memory management mman + */ +extern inline void pv_free_mm(struct mm_struct *mm) +{ + pv_mmu_ops.free_mm(mm); +} +extern inline struct mm_struct *pv_mm_init(struct mm_struct *mm, + struct task_struct *p, + struct user_namespace *user_ns) +{ + return pv_mmu_ops.mm_init(mm, p, user_ns); +} +static inline int pv_make_host_pages_valid(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, + bool chprot, bool flush) +{ + return pv_mmu_ops.make_host_pages_valid(vma, start_addr, end_addr, + chprot, flush); +} +static inline int +pv_set_memory_attr_on_host(e2k_addr_t start, e2k_addr_t end, + enum sma_mode mode) +{ + return pv_mmu_ops.set_memory_attr_on_host(start, end, mode); +} +static inline int pv_access_process_vm(struct task_struct *tsk, + unsigned long addr, void *buf, int len, + unsigned int gup_flags) +{ + return pv_mmu_ops.access_process_vm(tsk, addr, buf, len, gup_flags); +} + +extern inline struct vmap_area *pv_alloc_vmap_area(unsigned long size, + unsigned long align, + unsigned long vstart, unsigned long vend, + int node, gfp_t gfp_mask) +{ + return pv_mmu_ops.alloc_vmap_area(size, align, vstart, vend, + node, gfp_mask); +} +extern inline void pv__free_vmap_area(struct vmap_area *va) +{ + pv_mmu_ops.__free_vmap_area(va); +} +#ifdef CONFIG_SMP +extern inline struct vm_struct ** +pv_pcpu_get_vm_areas(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align) +{ + return pv_mmu_ops.pcpu_get_vm_areas(offsets, sizes, nr_vms, align); +} +#endif /* CONFIG_SMP */ +static inline void +pv_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pv_mmu_ops.free_pgd_range(tlb, addr, end, floor, ceiling); +} +extern inline void pv_free_unmap_vmap_area(struct vmap_area *va) +{ + pv_mmu_ops.free_unmap_vmap_area(va); +} + +extern inline void pv_unmap_initmem(void *start, void *end) +{ + if (pv_mmu_ops.unmap_initmem) { + pv_mmu_ops.unmap_initmem(start, end); + } +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* Memory management mman */ +static inline void free_mm(struct mm_struct *mm) +{ + pv_free_mm(mm); +} +static inline struct mm_struct *mm_init(struct mm_struct *mm, + struct task_struct *p, + struct user_namespace *user_ns) +{ + return pv_mm_init(mm, p, user_ns); +} +static inline int make_host_pages_valid(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, + bool chprot, bool flush) +{ + return pv_make_host_pages_valid(vma, start_addr, end_addr, + chprot, flush); +} +static inline int +set_memory_attr_on_host(e2k_addr_t start, e2k_addr_t end, + enum sma_mode mode) +{ + return pv_set_memory_attr_on_host(start, end, mode); +} +static inline int access_process_vm(struct task_struct *tsk, + unsigned long addr, void *buf, int len, + unsigned int gup_flags) +{ + return pv_access_process_vm(tsk, addr, buf, len, gup_flags); +} + +static inline struct vmap_area *alloc_vmap_area(unsigned long size, + unsigned long align, + unsigned long vstart, unsigned long vend, + int node, gfp_t gfp_mask) +{ + return pv_alloc_vmap_area(size, align, vstart, vend, + node, gfp_mask); +} +static inline void __free_vmap_area(struct vmap_area *va) +{ + pv__free_vmap_area(va); +} +#ifdef CONFIG_SMP +static inline struct vm_struct ** +pcpu_get_vm_areas(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align) +{ + return pv_pcpu_get_vm_areas(offsets, sizes, nr_vms, align); +} +#endif /* CONFIG_SMP */ +static inline void +free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pv_free_pgd_range(tlb, addr, end, floor, ceiling); +} +static inline void free_unmap_vmap_area(struct vmap_area *va) +{ + pv_free_unmap_vmap_area(va); +} +static inline void unmap_initmem(void *start, void *end) +{ + pv_unmap_initmem(start, end); +} +static inline int +remap_area_pages(unsigned long address, unsigned long phys_addr, + unsigned long size, unsigned long flags) +{ + return pv_remap_area_pages(address, phys_addr, size, flags); +} + +static inline int +host_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + return pv_host_guest_vmap_area(start, end); +} +static inline int +unhost_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + return pv_unhost_guest_vmap_area(start, end); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_MMAN_H */ diff --git a/arch/e2k/include/asm/paravirt/mmu.h b/arch/e2k/include/asm/paravirt/mmu.h new file mode 100644 index 000000000000..084834691222 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mmu.h @@ -0,0 +1,130 @@ +#ifndef __ASM_PARAVIRT_MMU_H +#define __ASM_PARAVIRT_MMU_H + +#ifdef __KERNEL__ + +#include +#include + +static inline long +pv_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, u32 data_tag, + u64 st_rec_opc, int chan) +{ + return pv_mmu_ops.recovery_faulted_tagged_store(address, wr_data, + data_tag, st_rec_opc, chan); +} +static inline long +pv_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + return pv_mmu_ops.recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); +} +static inline long +pv_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + int format, int vr, u64 ld_rec_opc, int chan) +{ + return pv_mmu_ops.recovery_faulted_move(addr_from, addr_to, + format, vr, ld_rec_opc, chan); +} +static inline long +pv_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, int format, + int vr, u64 ld_rec_opc, int chan, void *saved_greg) +{ + return pv_mmu_ops.recovery_faulted_load_to_greg(address, greg_num_d, + format, vr, ld_rec_opc, chan, saved_greg); +} + +static inline bool +pv_is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + if (!paravirt_enabled()) + return native_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); + else + return kvm_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline void +pv_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_mmu_ops.move_tagged_word(addr_from, addr_to); +} +static inline void +pv_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_mmu_ops.move_tagged_dword(addr_from, addr_to); +} +static inline void +pv_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_mmu_ops.move_tagged_qword(addr_from, addr_to); +} +static inline void +pv_save_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + pv_mmu_ops.save_DAM(dam); +} + +#ifdef CONFIG_PARAVIRT_GUEST +static inline long +recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, int chan) +{ + return pv_recovery_faulted_tagged_store(address, wr_data, data_tag, + st_rec_opc, chan); +} +static inline long +recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + return pv_recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); +} +static inline long +recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + int format, int vr, u64 ld_rec_opc, int chan) +{ + return pv_recovery_faulted_move(addr_from, addr_to, format, vr, + ld_rec_opc, chan); +} +static inline long +recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int format, int vr, u64 ld_rec_opc, int chan, void *saved_greg) +{ + return pv_recovery_faulted_load_to_greg(address, greg_num_d, + format, vr, ld_rec_opc, chan, saved_greg); +} +static inline bool +is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + return pv_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline void +move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_move_tagged_word(addr_from, addr_to); +} +static inline void +move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_move_tagged_dword(addr_from, addr_to); +} +static inline void +move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_move_tagged_qword(addr_from, addr_to); +} + +static inline void +save_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + pv_save_DAM(dam); +} +#define SAVE_DAM(dam) save_DAM(dam) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_MMU_H */ diff --git a/arch/e2k/include/asm/paravirt/mmu_context.h b/arch/e2k/include/asm/paravirt/mmu_context.h new file mode 100644 index 000000000000..9203ea13cbc6 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mmu_context.h @@ -0,0 +1,37 @@ +#ifndef __ASM_PARAVIRT_MMU_CONTEXT_H +#define __ASM_PARAVIRT_MMU_CONTEXT_H + +#ifdef __KERNEL__ + +#include +#include + +static inline void +pv_activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + pv_mmu_ops.activate_mm(active_mm, mm); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void +activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + pv_activate_mm(active_mm, mm); +} + +static inline void call_switch_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm, struct task_struct *next, + int switch_pgd, int switch_mm) +{ + if (!paravirt_enabled() || IS_HV_GM()) + native_call_switch_mm(prev_mm, next_mm, next, + switch_pgd, switch_mm); + else + kvm_call_switch_mm(prev_mm, next_mm, next, + switch_pgd, switch_mm); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_PARAVIRT_MMU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/paravirt/mmu_regs_access.h b/arch/e2k/include/asm/paravirt/mmu_regs_access.h new file mode 100644 index 000000000000..67d76d33f6c0 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mmu_regs_access.h @@ -0,0 +1,651 @@ +/* + * E2K MMU registers access paravirtualization + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_MMU_REGS_ACCESS_H_ +#define _E2K_PARAVIRT_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +/* + * Write/read MMU register + */ +#ifdef CONFIG_KVM_GUEST +#include + +static inline void +PV_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + pv_mmu_ops.write_mmu_reg(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t +PV_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return pv_mmu_ops.read_mmu_reg(mmu_addr); +} + +static inline void PV_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long PV_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(PV_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void PV_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long PV_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(PV_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void PV_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long PV_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(PV_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} + +static inline void +BOOT_PV_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + BOOT_PARAVIRT_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t +BOOT_PV_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return BOOT_PARAVIRT_READ_MMU_REG(mmu_addr); +} + +static inline void BOOT_PV_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long BOOT_PV_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(BOOT_PV_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void BOOT_PV_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long BOOT_PV_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(BOOT_PV_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void BOOT_PV_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long BOOT_PV_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(BOOT_PV_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} + +/* + * Write/read Data TLB register + */ + +static inline void +PV_WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + pv_mmu_ops.write_dtlb_reg(tlb_addr, mmu_reg); +} + +static inline mmu_reg_t +PV_READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return pv_mmu_ops.read_dtlb_reg(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static inline void +PV_FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + pv_mmu_ops.flush_tlb_entry(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static inline void +PV_FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + pv_mmu_ops.flush_dcache_line(virt_addr); +} + +/* + * Clear DCACHE L1 set + */ +static inline void +PV_CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + pv_mmu_ops.clear_dcache_l1_set(virt_addr, set); +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +PV_WRITE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + pv_mmu_ops.write_dcache_l2_reg(reg_val, reg_num, bank_num); +} +static inline unsigned long +PV_READ_L2_REG(int reg_num, int bank_num) +{ + return pv_mmu_ops.read_dcache_l2_reg(reg_num, bank_num); +} + +/* + * Flush ICACHE line + */ +static inline void +PV_FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + pv_mmu_ops.flush_icache_line(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ +static inline void +PV_FLUSH_CACHE_L12(flush_op_t flush_op) +{ + pv_mmu_ops.flush_cache_all(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ +static inline void +PV_FLUSH_TLB_ALL(flush_op_t flush_op) +{ + pv_mmu_ops.do_flush_tlb_all(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ +static inline void +PV_FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + pv_mmu_ops.flush_icache_all(flush_op); +} + +/* + * Get Entry probe for virtual address + */ +static inline probe_entry_t +PV_ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return pv_mmu_ops.entry_probe_mmu_op(virt_addr); +} + +/* + * Get physical address for virtual address + */ +static inline probe_entry_t +PV_ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return pv_mmu_ops.address_probe_mmu_op(virt_addr); +} + +/* + * Read CLW register + */ +static inline clw_reg_t +PV_READ_CLW_REG(clw_addr_t clw_addr) +{ + return pv_mmu_ops.read_clw_reg(clw_addr); +} + +/* + * Write CLW register + */ +static inline void +PV_WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val) +{ + pv_mmu_ops.write_clw_reg(clw_addr, val); +} + +/* + * MMU DEBUG registers access + */ +static inline void +PV_WRITE_MMU_DEBUG_REG_VALUE(int reg_no, mmu_reg_t mmu_reg) +{ + pv_mmu_ops.write_mmu_debug_reg(reg_no, mmu_reg); +} +static inline mmu_reg_t +PV_READ_MMU_DEBUG_REG_VALUE(int reg_no) +{ + return pv_mmu_ops.read_mmu_debug_reg(reg_no); +} + +static inline mmu_reg_t +PV_READ_DDBAR0_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBAR1_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBAR2_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBAR3_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBCR_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBSR_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDMAR0_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDMAR1_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDMCR_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO); +} +static inline void +PV_WRITE_DDBAR0_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBAR1_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBAR2_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBAR3_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBCR_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBSR_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDMAR0_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDMAR1_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDMCR_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO, mmu_reg); +} +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +static inline void WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return (mmu_reg_t)PV_READ_MMU_REG(mmu_addr); +} + +static inline void WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_PPTB(void) +{ + return PV_READ_MMU_OS_PPTB_REG(); +} +static inline void WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VPTB(void) +{ + return PV_READ_MMU_OS_VPTB_REG(); +} +static inline void WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VAB(void) +{ + return PV_READ_MMU_OS_VAB_REG(); +} + +static inline void +BOOT_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + BOOT_PV_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t +BOOT_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return BOOT_PV_READ_MMU_REG(mmu_addr); +} + +static inline void BOOT_WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_PPTB(void) +{ + return BOOT_PV_READ_MMU_OS_PPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VPTB(void) +{ + return BOOT_PV_READ_MMU_OS_VPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VAB(void) +{ + return BOOT_PV_READ_MMU_OS_VAB_REG(); +} + +/* + * Write/read Data TLB register + */ + +static inline void +WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + PV_WRITE_DTLB_REG(tlb_addr, mmu_reg); +} + +static inline mmu_reg_t +READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return PV_READ_DTLB_REG(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static inline void +FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + PV_FLUSH_TLB_ENTRY(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static inline void FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + PV_FLUSH_DCACHE_LINE(virt_addr); +} +static inline void FLUSH_DCACHE_LINE_OFFSET(e2k_addr_t virt_addr, size_t offset) +{ + PV_FLUSH_DCACHE_LINE(virt_addr + offset); +} + +/* + * Clear DCACHE L1 set + */ +static inline void +CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + PV_CLEAR_DCACHE_L1_SET(virt_addr, set); +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +WRITE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + PV_WRITE_L2_REG(reg_val, reg_num, bank_num); +} +static inline unsigned long +READ_L2_REG(int reg_num, int bank_num) +{ + return PV_READ_L2_REG(reg_num, bank_num); +} + +/* + * Flush ICACHE line + */ + +static inline void +FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + PV_FLUSH_ICACHE_LINE(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static inline void +FLUSH_CACHE_L12(flush_op_t flush_op) +{ + PV_FLUSH_CACHE_L12(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +FLUSH_TLB_ALL(flush_op_t flush_op) +{ + PV_FLUSH_TLB_ALL(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static inline void +FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + PV_FLUSH_ICACHE_ALL(flush_op); +} + +/* + * Get Entry probe for virtual address + */ + +static inline probe_entry_t +ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return PV_ENTRY_PROBE_MMU_OP(virt_addr); +} + +/* + * Get physical address for virtual address + */ + +static inline probe_entry_t +ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return PV_ADDRESS_PROBE_MMU_OP(virt_addr); +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +READ_CLW_REG(clw_addr_t clw_addr) +{ + return PV_READ_CLW_REG(clw_addr); +} + +/* + * Write CLW register + */ + +static inline void +WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val) +{ + PV_WRITE_CLW_REG(clw_addr, val); +} + +/* + * KVM MMU DEBUG registers access + */ +static inline mmu_reg_t +READ_DDBAR0_REG_VALUE(void) +{ + return PV_READ_DDBAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR1_REG_VALUE(void) +{ + return PV_READ_DDBAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR2_REG_VALUE(void) +{ + return PV_READ_DDBAR2_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR3_REG_VALUE(void) +{ + return PV_READ_DDBAR3_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBCR_REG_VALUE(void) +{ + return PV_READ_DDBCR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBSR_REG_VALUE(void) +{ + return PV_READ_DDBSR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR0_REG_VALUE(void) +{ + return PV_READ_DDMAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR1_REG_VALUE(void) +{ + return PV_READ_DDMAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMCR_REG_VALUE(void) +{ + return PV_READ_DDMCR_REG_VALUE(); +} +static inline void +WRITE_DDBAR0_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR0_REG_VALUE(value); +} +static inline void +WRITE_DDBAR1_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR1_REG_VALUE(value); +} +static inline void +WRITE_DDBAR2_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR2_REG_VALUE(value); +} +static inline void +WRITE_DDBAR3_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR3_REG_VALUE(value); +} +static inline void +WRITE_DDBCR_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBCR_REG_VALUE(value); +} +static inline void +WRITE_DDBSR_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBSR_REG_VALUE(value); +} +static inline void +WRITE_DDMAR0_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDMAR0_REG_VALUE(value); +} +static inline void +WRITE_DDMAR1_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDMAR1_REG_VALUE(value); +} +static inline void +WRITE_DDMCR_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDMCR_REG_VALUE(value); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_PARAVIRT_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/pgatomic.h b/arch/e2k/include/asm/paravirt/pgatomic.h new file mode 100644 index 000000000000..fa018d73dda4 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pgatomic.h @@ -0,0 +1,109 @@ +/* + * E2K paravirtualized page table atomic operations. + * + * Copyright 2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_PGATOMIC_H +#define _E2K_PARAVIRT_PGATOMIC_H + +#include + +#include +#include +#include +#include + +static inline pgprotval_t +pv_pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_set_wrprotect_atomic(mm, addr, pgprot); + } else { + return native_pt_set_wrprotect_atomic(&pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_get_and_clear_atomic(mm, addr, pgprot); + } else { + return native_pt_get_and_clear_atomic(&pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_get_and_xchg_atomic(mm, addr, newval, pgprot); + } else { + return native_pt_get_and_xchg_atomic(newval, &pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_clear_relaxed_atomic(mask, pgprot); + } else { + return native_pt_clear_relaxed_atomic(mask, &pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_clear_young_atomic(mm, addr, pgprot); + } else { + return native_pt_clear_young_atomic(&pgprot->pgprot); + } +} + +#if defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ + +static inline pgprotval_t +pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return pv_pt_set_wrprotect_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return pv_pt_get_and_clear_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + return pv_pt_get_and_xchg_atomic(mm, addr, newval, pgprot); +} + +static inline pgprotval_t +pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + return pv_pt_clear_relaxed_atomic(mask, pgprot); +} + +static inline pgprotval_t +pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return pv_pt_clear_young_atomic(mm, addr, pgprot); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! _E2K_PARAVIRT_PGATOMIC_H */ diff --git a/arch/e2k/include/asm/paravirt/pgtable.h b/arch/e2k/include/asm/paravirt/pgtable.h new file mode 100644 index 000000000000..c90401b5508b --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pgtable.h @@ -0,0 +1,212 @@ +#ifndef __ASM_PARAVIRT_PGTABLE_H +#define __ASM_PARAVIRT_PGTABLE_H + +#ifdef __KERNEL__ + +#include +#include + +static inline void +pv_write_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval, + bool only_validate, bool to_move) +{ + pv_mmu_ops.write_pte_at(mm, addr, ptep, pteval, only_validate, to_move); +} +static inline void +pv_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_write_pte_at(mm, addr, ptep, pteval, false, false); +} +static inline void +pv_validate_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_write_pte_at(mm, addr, ptep, pteval, true, false); +} +static inline void boot_pv_set_pte_at(unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + BOOT_PARAVIRT_GET_MMU_FUNC(boot_set_pte_at)(addr, ptep, pteval); +} +/* private case set pte for guest kernel address */ +static inline void pv_set_pte(pte_t *ptep, pte_t pteval) +{ + pv_mmu_ops.set_pte(ptep, pteval); +} + +static inline void +pv_write_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval, + bool only_validate) +{ + pv_mmu_ops.write_pmd_at(mm, addr, pmdp, pmdval, only_validate); +} +static inline void +pv_set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + pv_write_pmd_at(mm, addr, pmdp, pmdval, false); +} +static inline void +pv_validate_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + pv_write_pmd_at(mm, addr, pmdp, pmdval, true); +} + +static inline void +pv_write_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval, + bool only_validate) +{ + pv_mmu_ops.write_pud_at(mm, addr, pudp, pudval, only_validate); +} +static inline void +pv_set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval) +{ + pv_write_pud_at(mm, addr, pudp, pudval, false); +} +static inline void +pv_validate_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval) +{ + pv_write_pud_at(mm, addr, pudp, pudval, true); +} + +static inline void +pv_write_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval, + bool only_validate) +{ + pv_mmu_ops.write_pgd_at(mm, addr, pgdp, pgdval, only_validate); +} +static inline void +pv_set_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval) +{ + pv_write_pgd_at(mm, addr, pgdp, pgdval, false); +} +static inline void +pv_validate_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval) +{ + pv_write_pgd_at(mm, addr, pgdp, pgdval, true); +} + +static inline pte_t pv_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return pv_mmu_ops.ptep_get_and_clear(mm, addr, ptep, false); +} +static inline void pv_ptep_wrprotect_atomic(struct mm_struct *mm, + e2k_addr_t addr, pte_t *ptep) +{ + if (pv_mmu_ops.ptep_wrprotect_atomic) + pv_mmu_ops.ptep_wrprotect_atomic(mm, addr, ptep); +} + +static inline pte_t pv_get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address) +{ + return pv_mmu_ops.get_pte_for_address(vma, address); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +#include + +#define set_pte_not_present_at set_pte_at +static inline void +set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_set_pte_at(mm, addr, ptep, pteval); +} +static inline void +validate_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_validate_pte_at(mm, addr, ptep, pteval); +} +static inline void boot_set_pte_at(unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + boot_pv_set_pte_at(addr, ptep, pteval); +} +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + pv_set_pte(ptep, pteval); +} + +static inline void +set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + pv_set_pmd_at(mm, addr, pmdp, pmdval); +} +static inline void +validate_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, + pmd_t pmdval) +{ + pv_validate_pmd_at(mm, addr, pmdp, pmdval); +} + +static inline void +set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval) +{ + pv_set_pud_at(mm, addr, pudp, pudval); +} +static inline void +validate_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp) +{ + pv_validate_pud_at(mm, addr, pudp, __pud(_PAGE_INIT_VALID)); +} +static inline void +invalidate_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp) +{ + pv_validate_pud_at(mm, addr, pudp, __pud(0)); +} + +static inline void +set_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval) +{ + pv_set_pgd_at(mm, addr, pgdp, pgdval); +} +static inline void +validate_pgd_at(struct mm_struct *mm, unsigned long addr, pgd_t *pgdp) +{ + pv_validate_pgd_at(mm, addr, pgdp, __pgd(_PAGE_INIT_VALID)); +} +static inline void +invalidate_pgd_at(struct mm_struct *mm, unsigned long addr, pgd_t *pgdp) +{ + pv_validate_pgd_at(mm, addr, pgdp, __pgd(0)); +} + +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return pv_ptep_get_and_clear(mm, addr, ptep); +} +static inline void ptep_wrprotect_atomic(struct mm_struct *mm, + e2k_addr_t addr, pte_t *ptep) +{ + pv_ptep_wrprotect_atomic(mm, addr, ptep); +} + +static inline pte_t get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address) +{ + return pv_get_pte_for_address(vma, address); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_PGTABLE_H */ diff --git a/arch/e2k/include/asm/paravirt/process.h b/arch/e2k/include/asm/paravirt/process.h new file mode 100644 index 000000000000..a78adf749a38 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/process.h @@ -0,0 +1,344 @@ +/* + * KVM paravirtualized kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_PROCESS_H +#define _E2K_PARAVIRT_PROCESS_H + +#include +#include +#include + +static inline void PV_FLUSH_STACKS(void) +{ + if (!paravirt_enabled()) + native_flush_stacks(); + else + pv_cpu_ops.flush_stacks(); +} + +static inline void PV_FLUSH_REGS_STACK(void) +{ + if (!paravirt_enabled()) + native_flush_regs_stack(); + else + pv_cpu_ops.flush_regs_stack(); +} + +static inline void PV_FLUSH_CHAIN_STACK(void) +{ + if (!paravirt_enabled()) + native_flush_chain_stack(); + else + pv_cpu_ops.flush_chain_stack(); +} + +static inline void BOOT_PV_FLUSH_STACKS(void) +{ + if (!boot_paravirt_enabled()) + native_flush_stacks(); + else + BOOT_PARAVIRT_FLUSH(flush_stacks); +} + +static inline void BOOT_PV_FLUSH_REGS_STACK(void) +{ + if (!boot_paravirt_enabled()) + native_flush_regs_stack(); + else + BOOT_PARAVIRT_FLUSH(flush_regs_stack); +} + +static inline void BOOT_PV_FLUSH_CHAIN_STACK(void) +{ + if (!boot_paravirt_enabled()) + native_flush_chain_stack(); + else + BOOT_PARAVIRT_FLUSH(flush_chain_stack); +} + +static inline void PV_COPY_STACKS_TO_MEMORY(void) +{ + pv_cpu_ops.copy_stacks_to_memory(); +} + +#define PV_FLUSHCPU PV_FLUSH_STACKS() +#define PV_FLUSHR PV_FLUSH_REGS_STACK() +#define PV_FLUSHC PV_FLUSH_CHAIN_STACK() + +/* see arch/e2k/include/asm/process.h for more details why and how */ +#define PV_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ +({ \ + if (paravirt_enabled()) { \ + KVM_GUEST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu); \ + } else { \ + KVM_HOST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu); \ + } \ +}) +#define PV_CHECK_VCPU_THREAD_CONTEXT(__ti) \ +({ \ + if (paravirt_enabled()) { \ + KVM_GUEST_CHECK_VCPU_THREAD_CONTEXT(__ti); \ + } else { \ + KVM_HOST_CHECK_VCPU_THREAD_CONTEXT(__ti); \ + } \ +}) + +#define PV_ONLY_SET_GUEST_GREGS(__ti) \ +({ \ + if (paravirt_enabled()) { \ + KVM_ONLY_SET_GUEST_GREGS(__ti); \ + } else { \ + NATIVE_ONLY_SET_GUEST_GREGS(__ti); \ + } \ +}) + +static inline void +pv_virt_cpu_thread_init(struct task_struct *boot_task) +{ + if (pv_cpu_ops.virt_cpu_thread_init) + pv_cpu_ops.virt_cpu_thread_init(boot_task); +} + +static inline int +pv_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + return pv_cpu_ops.copy_kernel_stacks(new_task, fn, arg); +} + +static inline int +pv_copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, struct pt_regs *regs) +{ + return pv_cpu_ops.copy_user_stacks(clone_flags, + new_stk_base, new_stk_sz, new_task, regs); +} + +static inline void +pv_define_kernel_hw_stacks_sizes(struct hw_stack *hw_stacks) +{ + pv_cpu_ops.define_kernel_hw_stacks_sizes(hw_stacks); +} +static inline void +boot_pv_define_kernel_hw_stacks_sizes(struct hw_stack *hw_stacks) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(define_kernel_hw_stacks_sizes)(hw_stacks); +} + +static inline void +pv_define_user_hw_stacks_sizes(struct hw_stack *hw_stacks) +{ + pv_cpu_ops.define_user_hw_stacks_sizes(hw_stacks); +} + +static inline void +pv_release_hw_stacks(struct thread_info *dead_ti) +{ + pv_cpu_ops.release_hw_stacks(dead_ti); +} + +static inline void +pv_release_kernel_stacks(struct thread_info *dead_ti) +{ + pv_cpu_ops.release_kernel_stacks(dead_ti); +} + +static inline int +pv_switch_to_new_user(struct e2k_stacks *stacks, struct hw_stack *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return pv_cpu_ops.switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); +} + +static inline int +pv_do_map_user_hard_stack_to_kernel(int nid, + e2k_addr_t kernel_start, e2k_addr_t user_stack_base, + e2k_size_t kernel_size) +{ + return pv_cpu_ops.do_map_user_hard_stack_to_kernel(nid, + kernel_start, user_stack_base, kernel_size); +} +static __always_inline int +pv_complete_switch_to_kernel_hardware_stacks(e2k_addr_t ps, e2k_addr_t cs, + unsigned long *delta_proc, unsigned long *delta_chain, + bool to_exit) +{ + if (!paravirt_enabled()) + return native_complete_switch_to_kernel_hardware_stacks(ps, cs, + delta_proc, delta_chain, to_exit); + else + return kvm_complete_switch_to_kernel_hardware_stacks(ps, cs, + delta_proc, delta_chain, to_exit); +} +static __always_inline int +pv_complete_switch_from_kernel_hardware_stacks( + unsigned long delta_proc, unsigned long delta_chain) +{ + if (!paravirt_enabled()) { + native_complete_switch_from_kernel_hardware_stacks( + delta_proc, delta_chain); + return 0; + } else { + return kvm_complete_switch_from_kernel_hardware_stacks( + delta_proc, delta_chain); + } +} +static inline void +pv_free_old_kernel_hardware_stacks(void) +{ + pv_cpu_ops.free_old_kernel_hardware_stacks(); +} + +static inline void +pv_fix_process_pt_regs(struct thread_info *ti, struct e2k_stacks *stacks, + struct pt_regs *regs, struct pt_regs *old_regs) +{ + if (pv_cpu_ops.fix_process_pt_regs) + pv_cpu_ops.fix_process_pt_regs(ti, stacks, regs, old_regs); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#define E2K_FLUSHCPU PV_FLUSHCPU +#define E2K_FLUSHR PV_FLUSHR +#define E2K_FLUSHC PV_FLUSHC +#define BOOT_FLUSHCPU BOOT_PV_FLUSH_STACKS() +#define BOOT_FLUSHR BOOT_PV_FLUSH_REGS_STACK() +#define BOOT_FLUSHC BOOT_PV_FLUSH_CHAIN_STACK() + +static inline void COPY_STACKS_TO_MEMORY(void) +{ + PV_COPY_STACKS_TO_MEMORY(); +} + +#define UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ + PV_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + PV_CHECK_VCPU_THREAD_CONTEXT(__ti) +#define ONLY_SET_GUEST_GREGS(ti) PV_ONLY_SET_GUEST_GREGS(ti) + +#ifdef CONFIG_KVM_GUEST + #define GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) \ + KVM_GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) + #define COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) \ + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) + #define GOTO_DONE_TO_PARAVIRT_GUEST() \ + KVM_GOTO_DONE_TO_PARAVIRT_GUEST() + #define COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) \ + KVM_COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) +#else + #error "undefined guest kernel type" +#endif /* CONFIG_KVM_GUEST */ + +static inline int +do_map_user_hard_stack_to_kernel(int nid, + e2k_addr_t kernel_start, e2k_addr_t user_stack_base, + e2k_size_t kernel_size) +{ + return pv_do_map_user_hard_stack_to_kernel(nid, + kernel_start, user_stack_base, kernel_size); +} + +static inline void +virt_cpu_thread_init(struct task_struct *boot_task) +{ + pv_virt_cpu_thread_init(boot_task); +} + +static inline int +copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + return pv_copy_kernel_stacks(new_task, fn, arg); +} +static inline int +copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, pt_regs_t *regs) +{ + return pv_copy_user_stacks(clone_flags, new_stk_base, new_stk_sz, + new_task, regs); +} + +static inline void +define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + pv_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +boot_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + boot_pv_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +define_user_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + pv_define_user_hw_stacks_sizes(hw_stacks); +} + +static inline void +release_hw_stacks(struct thread_info *dead_ti) +{ + pv_release_hw_stacks(dead_ti); +} +static inline void +release_kernel_stacks(struct thread_info *dead_ti) +{ + pv_release_kernel_stacks(dead_ti); +} + +static inline int +switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return pv_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); +} + +static __always_inline int +complete_switch_to_kernel_hardware_stacks(e2k_addr_t ps, e2k_addr_t cs, + unsigned long *delta_proc, unsigned long *delta_chain, + bool to_exit) +{ + return pv_complete_switch_to_kernel_hardware_stacks(ps, cs, + delta_proc, delta_chain, to_exit); +} + +static __always_inline int +complete_switch_from_kernel_hardware_stacks( + unsigned long delta_proc, unsigned long delta_chain) +{ + return pv_complete_switch_from_kernel_hardware_stacks( + delta_proc, delta_chain); +} + +static inline void +free_old_kernel_hardware_stacks(void) +{ + pv_free_old_kernel_hardware_stacks(); +} + +static inline void +fix_process_pt_regs(thread_info_t *ti, e2k_stacks_t *stacks, + pt_regs_t *regs, pt_regs_t *old_regs) +{ + pv_fix_process_pt_regs(ti, stacks, regs, old_regs); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* !_E2K_PARAVIRT_PROCESS_H */ diff --git a/arch/e2k/include/asm/paravirt/processor.h b/arch/e2k/include/asm/paravirt/processor.h new file mode 100644 index 000000000000..63cd4844f17d --- /dev/null +++ b/arch/e2k/include/asm/paravirt/processor.h @@ -0,0 +1,72 @@ +/* + * KVM guest processor and processes support + * + * Copyright (C) 2014 MCST + */ + +#ifndef _E2K_PARAVIRT_PROCESSOR_H_ +#define _E2K_PARAVIRT_PROCESSOR_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +static inline int +pv_prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + return pv_cpu_ops.prepare_start_thread_frames(entry, sp); +} + +#define pv_default_idle (pv_cpu_ops.cpu_default_idle) +#define pv_cpu_relax() (pv_cpu_ops.cpu_relax()) +#define pv_cpu_relax_no_resched() (pv_cpu_ops.cpu_relax_no_resched()) + +static inline void +pv_print_machine_type_info(void) +{ + pv_init_ops.print_machine_type_info(); +} + +static inline void +pv_paravirt_banner(void) +{ + if (pv_init_ops.banner) + pv_init_ops.banner(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +static inline int +prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + return pv_prepare_start_thread_frames(entry, sp); +} + +#define default_idle pv_default_idle + +static inline void cpu_relax(void) +{ + pv_cpu_relax(); +} +static inline void cpu_relax_no_resched(void) +{ + pv_cpu_relax_no_resched(); +} + +static inline void print_machine_type_info(void) +{ + pv_print_machine_type_info(); +} + +static inline void paravirt_banner(void) +{ + pv_paravirt_banner(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_PARAVIRT_PROCESSOR_H_ */ + + diff --git a/arch/e2k/include/asm/paravirt/pv_info.h b/arch/e2k/include/asm/paravirt/pv_info.h new file mode 100644 index 000000000000..b47fee7e6f11 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pv_info.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2016 MCST, Salavat Gilyazov atic@mcst.ru + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_PARAVIRT_INFO_H +#define __ASM_E2K_PARAVIRT_INFO_H + +#ifndef __ASSEMBLY__ + +#include + +/* + * general info + */ +typedef struct pv_info { + int paravirt_enabled; + unsigned long page_offset; + unsigned long vmalloc_start; + unsigned long vmalloc_end; + unsigned long vmemmap_start; + unsigned long vmemmap_end; + const char *name; +} pv_info_t; + +extern pv_info_t pv_info; + +/* + * general info + */ +#define PARAVIRT_ENABLED (pv_info.paravirt_enabled) +#define BOOT_PARAVIRT_ENABLED boot_get_vo_value(PARAVIRT_ENABLED) + +#ifndef CONFIG_BOOT_E2K +#define pv_paravirt_enabled() PARAVIRT_ENABLED +#define boot_pv_paravirt_enabled() BOOT_PARAVIRT_ENABLED +#else /* CONFIG_BOOT_E2K */ +#define pv_paravirt_enabled() false +#define boot_pv_paravirt_enabled() false +#endif /* ! CONFIG_BOOT_E2K */ + +#define PV_PAGE_OFFSET (pv_info.page_offset) +#define PV_TASK_SIZE PV_PAGE_OFFSET +#define PV_VMALLOC_START (pv_info.vmalloc_start) +#define PV_VMALLOC_END (pv_info.vmalloc_end) +#define PV_VMEMMAP_START (pv_info.vmemmap_start) +#define PV_VMEMMAP_END (pv_info.vmemmap_end) + +#define BOOT_PV_PAGE_OFFSET \ + boot_get_vo_value(PV_PAGE_OFFSET) +#define BOOT_PV_TASK_SIZE BOOT_PV_PAGE_OFFSET + +#ifdef CONFIG_PARAVIRT_GUEST + +static inline int paravirt_enabled(void) +{ + return pv_paravirt_enabled(); +} +#define boot_paravirt_enabled() boot_pv_paravirt_enabled() + +#define PAGE_OFFSET PV_PAGE_OFFSET +#define TASK_SIZE PV_TASK_SIZE +#define VMALLOC_START PV_VMALLOC_START +#define VMALLOC_END PV_VMALLOC_END +#define VMEMMAP_START PV_VMEMMAP_START +#define VMEMMAP_END PV_VMEMMAP_END + +#define BOOT_PAGE_OFFSET BOOT_PV_PAGE_OFFSET +#define BOOT_TASK_SIZE BOOT_PV_TASK_SIZE +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __ASM_E2K_PARAVIRT_INFO_H */ diff --git a/arch/e2k/include/asm/paravirt/pv_ops.h b/arch/e2k/include/asm/paravirt/pv_ops.h new file mode 100644 index 000000000000..fc454dd05928 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pv_ops.h @@ -0,0 +1,731 @@ +/****************************************************************************** + * Copyright (c) 2008 MCST (C) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_PARAVIRT_PV_OPS_H +#define __ASM_E2K_PARAVIRT_PV_OPS_H + +#ifdef CONFIG_PARAVIRT + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * Should use call_single_data_t instead, but there is a problem with forward + * declaration of typedef. + */ +struct __call_single_data; + +struct pt_regs; +struct e2k_stacks; +struct hw_stack; +struct hw_stack_area; +struct thread_info; +struct as_sa_handler_arg; +struct global_regs; +struct local_gregs; +struct page; +struct trap_cellar; +struct user_namespace; +enum pte_mem_type; +struct mmu_gather; + +#define INLINE_FUNC_CALL ((void *)-1UL) /* inline function should be */ + /* called here */ + +/* + * booting hooks. + */ + +struct node_phys_mem; +struct mm_struct; +struct vm_area_struct; +struct thread_info; +struct timespec; +struct timeval; + +typedef struct pv_boot_ops { + void (*boot_setup_machine_id)(bootblock_struct_t *bootblock); + int (*boot_loader_probe_memory)( + struct node_phys_mem *nodes_phys_mem, + boot_info_t *bootblock); + e2k_size_t (*boot_get_bootblock_size)(boot_info_t *bblock); + void (*boot_cpu_relax)(void); +#ifdef CONFIG_SMP + int (*boot_smp_cpu_config)(boot_info_t *bootblock); + void (*boot_smp_node_config)(boot_info_t *bootblock); +#endif /* CONFIG_SMP */ + void (*boot_reserve_all_bootmem)(bool bsp, boot_info_t *boot_info); + void (*boot_map_all_bootmem)(bool bsp, boot_info_t *boot_info); + void (*boot_map_needful_to_equal_virt_area) + (e2k_addr_t stack_top_addr); + void (*boot_kernel_switch_to_virt)(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, + int cpus_to_sync)); + void (*boot_clear_bss)(void); + void (*boot_check_bootblock)(bool bsp, + bootblock_struct_t *bootblock); + + void (*init_terminate_boot_init)(bool bsp, int cpuid); + void (*boot_parse_param)(bootblock_struct_t *bootblock); + void (*boot_debug_cons_outb)(u8 byte, u16 port); + u8 (*boot_debug_cons_inb)(u16 port); + u32 (*boot_debug_cons_inl)(u16 port); + void (*debug_cons_outb)(u8 byte, u16 port); + u8 (*debug_cons_inb)(u16 port); + u32 (*debug_cons_inl)(u16 port); + void (*do_boot_panic)(const char *fmt_v, ...); +} pv_boot_ops_t; + +extern pv_boot_ops_t pv_boot_ops; +extern pv_boot_ops_t *cur_pv_boot_ops; /* pointer to boot-time tables of OPs */ + +#define BOOT_PARAVIRT_GET_BOOT_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_boot_ops, func_name) + +#define BOOT_PARAVIRT_OUT_OP(op_func, value, port) \ +({ \ + BOOT_PARAVIRT_GET_BOOT_FUNC(op_func)(value, port); \ +}) +#define BOOT_PARAVIRT_IN_OP(op_func, port) \ +({ \ + BOOT_PARAVIRT_GET_BOOT_FUNC(op_func)(port); \ +}) + +/* + * initialization hooks. + */ + +typedef struct pv_init_ops { + void (*banner)(void); + void (*set_mach_type_id)(void); + void (*print_machine_type_info)(void); +} pv_init_ops_t; + +extern pv_init_ops_t pv_init_ops; + +extern unsigned long return_to_paravirt_guest(unsigned long ret_value); +extern void done_to_paravirt_guest(void); +extern void __init kvm_init_paravirt_guest(void); + +typedef struct pv_cpu_ops { + unsigned long (*read_OSCUD_lo_reg_value)(void); + unsigned long (*read_OSCUD_hi_reg_value)(void); + void (*write_OSCUD_lo_reg_value)(unsigned long reg_value); + void (*write_OSCUD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_OSGD_lo_reg_value)(void); + unsigned long long (*read_OSGD_hi_reg_value)(void); + void (*write_OSGD_lo_reg_value)(unsigned long reg_value); + void (*write_OSGD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_CUD_lo_reg_value)(void); + unsigned long long (*read_CUD_hi_reg_value)(void); + void (*write_CUD_lo_reg_value)(unsigned long reg_value); + void (*write_CUD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_GD_lo_reg_value)(void); + unsigned long long (*read_GD_hi_reg_value)(void); + void (*write_GD_lo_reg_value)(unsigned long reg_value); + void (*write_GD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_PSP_lo_reg_value)(void); + unsigned long long (*read_PSP_hi_reg_value)(void); + void (*write_PSP_lo_reg_value)(unsigned long reg_value); + void (*write_PSP_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_PSHTP_reg_value)(void); + void (*write_PSHTP_reg_value)(unsigned long reg_value); + unsigned long long (*read_PCSP_lo_reg_value)(void); + unsigned long long (*read_PCSP_hi_reg_value)(void); + void (*write_PCSP_lo_reg_value)(unsigned long reg_value); + void (*write_PCSP_hi_reg_value)(unsigned long reg_value); + int (*read_PCSHTP_reg_value)(void); + void (*write_PCSHTP_reg_value)(int reg_value); + unsigned long long (*read_CR0_lo_reg_value)(void); + unsigned long long (*read_CR0_hi_reg_value)(void); + unsigned long long (*read_CR1_lo_reg_value)(void); + unsigned long long (*read_CR1_hi_reg_value)(void); + void (*write_CR0_lo_reg_value)(unsigned long reg_value); + void (*write_CR0_hi_reg_value)(unsigned long reg_value); + void (*write_CR1_lo_reg_value)(unsigned long reg_value); + void (*write_CR1_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_CTPR_reg_value)(int reg_no); + void (*write_CTPR_reg_value)(int reg_no, unsigned long reg_value); + unsigned long long (*read_USD_lo_reg_value)(void); + unsigned long long (*read_USD_hi_reg_value)(void); + void (*write_USD_lo_reg_value)(unsigned long reg_value); + void (*write_USD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_SBR_reg_value)(void); + void (*write_SBR_reg_value)(unsigned long reg_value); + unsigned long long (*read_WD_reg_value)(void); + void (*write_WD_reg_value)(unsigned long reg_value); +#ifdef NEED_PARAVIRT_LOOP_REGISTERS + unsigned long long (*read_LSR_reg_value)(void); + void (*write_LSR_reg_value)(unsigned long reg_value); + unsigned long long (*read_ILCR_reg_value)(void); + void (*write_ILCR_reg_value)(unsigned long reg_value); +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + unsigned long long (*read_OSR0_reg_value)(void); + void (*write_OSR0_reg_value)(unsigned long reg_value); + unsigned int (*read_OSEM_reg_value)(void); + void (*write_OSEM_reg_value)(unsigned int reg_value); + unsigned int (*read_BGR_reg_value)(void); + void (*write_BGR_reg_value)(unsigned int reg_value); + unsigned long long (*read_CLKR_reg_value)(void); + void (*write_CLKR_reg_value)(void); + unsigned long long (*read_SCLKR_reg_value)(void); + void (*write_SCLKR_reg_value)(unsigned long reg_value); + unsigned long long (*read_SCLKM1_reg_value)(void); + void (*write_SCLKM1_reg_value)(unsigned long reg_value); + unsigned long long (*read_SCLKM2_reg_value)(void); + unsigned long long (*read_SCLKM3_reg_value)(void); + void (*write_SCLKM2_reg_value)(unsigned long reg_value); + unsigned long long (*read_CU_HW0_reg_value)(void); + void (*write_CU_HW0_reg_value)(unsigned long reg_value); + unsigned long long (*read_CU_HW1_reg_value)(void); + void (*write_CU_HW1_reg_value)(unsigned long reg_value); + unsigned long long (*read_RPR_lo_reg_value)(void); + unsigned long long (*read_RPR_hi_reg_value)(void); + void (*write_RPR_lo_reg_value)(unsigned long reg_value); + void (*write_RPR_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_SBBP_reg_value)(void); + unsigned long long (*read_IP_reg_value)(void); + unsigned int (*read_DIBCR_reg_value)(void); + unsigned int (*read_DIBSR_reg_value)(void); + unsigned long long (*read_DIMCR_reg_value)(void); + unsigned long long (*read_DIBAR0_reg_value)(void); + unsigned long long (*read_DIBAR1_reg_value)(void); + unsigned long long (*read_DIBAR2_reg_value)(void); + unsigned long long (*read_DIBAR3_reg_value)(void); + unsigned long long (*read_DIMAR0_reg_value)(void); + unsigned long long (*read_DIMAR1_reg_value)(void); + void (*write_DIBCR_reg_value)(unsigned int reg_value); + void (*write_DIBSR_reg_value)(unsigned int reg_value); + void (*write_DIMCR_reg_value)(unsigned long reg_value); + void (*write_DIBAR0_reg_value)(unsigned long reg_value); + void (*write_DIBAR1_reg_value)(unsigned long reg_value); + void (*write_DIBAR2_reg_value)(unsigned long reg_value); + void (*write_DIBAR3_reg_value)(unsigned long reg_value); + void (*write_DIMAR0_reg_value)(unsigned long reg_value); + void (*write_DIMAR1_reg_value)(unsigned long reg_value); + unsigned long long (*read_CUTD_reg_value)(void); + void (*write_CUTD_reg_value)(unsigned long reg_value); + unsigned int (*read_CUIR_reg_value)(void); + unsigned int (*read_PSR_reg_value)(void); + void (*write_PSR_reg_value)(unsigned int reg_value); + unsigned int (*read_UPSR_reg_value)(void); + void (*write_UPSR_reg_value)(unsigned int reg_value); + void (*write_PSR_irq_barrier)(unsigned int reg_value); + void (*write_UPSR_irq_barrier)(unsigned int reg_value); + unsigned int (*read_PFPFR_reg_value)(void); + void (*write_PFPFR_reg_value)(unsigned int reg_value); + unsigned int (*read_FPCR_reg_value)(void); + void (*write_FPCR_reg_value)(unsigned int reg_value); + unsigned int (*read_FPSR_reg_value)(void); + void (*write_FPSR_reg_value)(unsigned int reg_value); + unsigned long long (*read_CS_lo_reg_value)(void); + unsigned long long (*read_CS_hi_reg_value)(void); + unsigned long long (*read_DS_lo_reg_value)(void); + unsigned long long (*read_DS_hi_reg_value)(void); + unsigned long long (*read_ES_lo_reg_value)(void); + unsigned long long (*read_ES_hi_reg_value)(void); + unsigned long long (*read_FS_lo_reg_value)(void); + unsigned long long (*read_FS_hi_reg_value)(void); + unsigned long long (*read_GS_lo_reg_value)(void); + unsigned long long (*read_GS_hi_reg_value)(void); + unsigned long long (*read_SS_lo_reg_value)(void); + unsigned long long (*read_SS_hi_reg_value)(void); + void (*write_CS_lo_reg_value)(unsigned long reg_value); + void (*write_CS_hi_reg_value)(unsigned long reg_value); + void (*write_DS_lo_reg_value)(unsigned long reg_value); + void (*write_DS_hi_reg_value)(unsigned long reg_value); + void (*write_ES_lo_reg_value)(unsigned long reg_value); + void (*write_ES_hi_reg_value)(unsigned long reg_value); + void (*write_FS_lo_reg_value)(unsigned long reg_value); + void (*write_FS_hi_reg_value)(unsigned long reg_value); + void (*write_GS_lo_reg_value)(unsigned long reg_value); + void (*write_GS_hi_reg_value)(unsigned long reg_value); + void (*write_SS_lo_reg_value)(unsigned long reg_value); + void (*write_SS_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_IDR_reg_value)(void); + unsigned long long (*boot_read_IDR_reg_value)(void); + unsigned int (*read_CORE_MODE_reg_value)(void); + unsigned int (*boot_read_CORE_MODE_reg_value)(void); + void (*write_CORE_MODE_reg_value)(unsigned int modes); + void (*boot_write_CORE_MODE_reg_value)(unsigned int modes); + + void (*put_updated_cpu_regs_flags)(unsigned long flags); + + unsigned int (*read_aasr_reg_value)(void); + void (*write_aasr_reg_value)(unsigned int reg_value); + unsigned int (*read_aafstr_reg_value)(void); + void (*write_aafstr_reg_value)(unsigned int reg_value); + + void (*flush_stacks)(void); + void (*flush_regs_stack)(void); + void (*flush_chain_stack)(void); + void (*copy_stacks_to_memory)(void); + unsigned long long (*get_active_cr0_lo_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + unsigned long long (*get_active_cr0_hi_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + unsigned long long (*get_active_cr1_lo_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + unsigned long long (*get_active_cr1_hi_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + void (*put_active_cr0_lo_value)(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*put_active_cr0_hi_value)(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*put_active_cr1_lo_value)(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*put_active_cr1_hi_value)(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*correct_trap_psp_pcsp)(struct pt_regs *regs, + struct thread_info *thread_info); + void (*correct_scall_psp_pcsp)(struct pt_regs *regs, + struct thread_info *thread_info); + void (*correct_trap_return_ip)(struct pt_regs *regs, + unsigned long return_ip); + void *(*nested_kernel_return_address)(int n); + void (*virt_cpu_thread_init)(struct task_struct *boot_task); + int (*prepare_start_thread_frames)(unsigned long entry, + unsigned long sp); + int (*copy_kernel_stacks)(struct task_struct *new_task, + unsigned long fn, unsigned long arg); + int (*copy_user_stacks)(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, + struct pt_regs *regs); + void (*define_kernel_hw_stacks_sizes)(struct hw_stack *hw_stacks); + void (*define_user_hw_stacks_sizes)(struct hw_stack *hw_stacks); + void (*switch_to_expanded_proc_stack)(long delta_size, + long delta_offset, bool decr_k_ps); + void (*switch_to_expanded_chain_stack)(long delta_size, + long delta_offset, bool decr_k_pcs); + void (*stack_bounds_trap_enable)(void); + bool (*is_proc_stack_bounds)(struct thread_info *ti, + struct pt_regs *regs); + bool (*is_chain_stack_bounds)(struct thread_info *ti, + struct pt_regs *regs); + + void (*release_hw_stacks)(struct thread_info *dead_ti); + void (*release_kernel_stacks)(struct thread_info *dead_ti); + int (*register_kernel_hw_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + int (*register_kernel_data_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + void (*unregister_kernel_hw_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + void (*unregister_kernel_data_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + int (*kmem_area_host_chunk)(e2k_addr_t stack_base, + e2k_size_t stack_size, int hw_stack); + void (*kmem_area_unhost_chunk)(e2k_addr_t stack_base, + e2k_size_t stack_size); + int (*switch_to_new_user)(struct e2k_stacks *stacks, + struct hw_stack *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel); + int (*do_map_user_hard_stack_to_kernel)(int nid, + e2k_addr_t kernel_start, e2k_addr_t user_stack_base, + e2k_size_t kernel_size); + int (*do_switch_to_kernel_hardware_stacks)(void); + void (*free_old_kernel_hardware_stacks)(void); + void (*instr_page_fault)(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr); + unsigned long (*mmio_page_fault)(struct pt_regs *regs, + struct trap_cellar *tcellar); + int (*do_hw_stack_bounds)(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds); + irqreturn_t (*handle_interrupt)(struct pt_regs *regs); + void (*init_guest_system_handlers_table)(void); + void (*fix_process_pt_regs)(struct thread_info *ti, + struct e2k_stacks *stacks, struct pt_regs *regs, + struct pt_regs *old_regs); + int (*run_user_handler)(struct as_sa_handler_arg *arg); + long (*trap_table_entry1)(int sys_num, ...); + long (*trap_table_entry3)(int sys_num, ...); + long (*trap_table_entry4)(int sys_num, ...); + + int (*do_fast_clock_gettime)(const clockid_t which_clock, + struct timespec *tp); + int (*fast_sys_clock_gettime)(const clockid_t which_clock, + struct timespec __user *tp); + int (*do_fast_gettimeofday)(struct timeval *tv); + int (*fast_sys_siggetmask)(u64 __user *oset, size_t sigsetsize); + + unsigned long (*fast_tagged_memory_copy)(void *dst, const void *src, + size_t len, unsigned long strd_opcode, + unsigned long ldrd_opcode, int prefetch); + unsigned long (*fast_tagged_memory_set)(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode); + unsigned long (*extract_tags_32)(u16 *dst, const void *src); + void (*save_local_glob_regs)(struct local_gregs *l_gregs, + bool is_signal); + void (*restore_local_glob_regs)(struct local_gregs *l_gregs, + bool is_signal); + void (*restore_kernel_gregs_in_syscall)(struct thread_info *ti); + void (*get_all_user_glob_regs)(struct global_regs *gregs); + void (*arch_setup_machine)(void); + void (*cpu_default_idle)(void); + void (*cpu_relax)(void); + void (*cpu_relax_no_resched)(void); +#ifdef CONFIG_SMP + void (*wait_for_cpu_booting)(void); + void (*wait_for_cpu_wake_up)(void); + int (*activate_cpu)(int cpu_id); + int (*activate_all_cpus)(void); + void (*csd_lock_wait)(struct __call_single_data *data); + void (*csd_lock)(struct __call_single_data *data); + void (*arch_csd_lock_async)(struct __call_single_data *data); + void (*csd_unlock)(struct __call_single_data *data); + void (*setup_local_pic_virq)(unsigned int cpuid); + void (*startup_local_pic_virq)(unsigned int cpuid); + void (*smp_flush_tlb_all)(void); + void (*smp_flush_tlb_mm)(struct mm_struct *mm); + void (*smp_flush_tlb_page)(struct vm_area_struct *vma, + e2k_addr_t addr); + void (*smp_flush_tlb_range)(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_pmd_tlb_range)(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_tlb_range_and_pgtables)(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_icache_range)(e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_icache_range_array)( + void *icache_range_arr); + void (*smp_flush_icache_page)(struct vm_area_struct *vma, + struct page *page); + void (*smp_flush_icache_all)(void); + void (*smp_flush_icache_kernel_line)(e2k_addr_t addr); +#endif /* CONFIG_SMP */ + int (*host_printk)(const char *fmt, ...); + + void (*arch_spin_lock_slow)(void *lock); + void (*arch_spin_locked_slow)(void *lock); + void (*arch_spin_unlock_slow)(void *lock); + + void (*ord_wait_read_lock_slow)(arch_rwlock_t *rw); + void (*ord_wait_write_lock_slow)(arch_rwlock_t *rw); + void (*ord_arch_read_locked_slow)(arch_rwlock_t *rw); + void (*ord_arch_write_locked_slow)(arch_rwlock_t *rw); + void (*ord_arch_read_unlock_slow)(arch_rwlock_t *rw); + void (*ord_arch_write_unlock_slow)(arch_rwlock_t *rw); +} pv_cpu_ops_t; + +extern pv_cpu_ops_t pv_cpu_ops; +extern pv_cpu_ops_t *cur_pv_cpu_ops; + +/* FIXME: this include should be deleted, paravirt_enable() should migrate */ +/* from processor.h to some other common include */ +#include + +#define PATCH_GOTO_AND_RETURN_CTPR_NO 1 /* ctpr1 */ + +#define GOTO_FUNC_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) +#define GOTO_CPU_FUNC_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) \ + GOTO_FUNC_PATCH(pv_cpu_ops.patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) +#define DEF_GOTO_CPU_FUNC_PATCH(patch_addr) \ + GOTO_CPU_FUNC_PATCH(patch_addr, \ + PATCH_RETURN_ADDR_GREG, \ + PATCH_GOTO_AND_RETURN_CTPR_NO, \ + "", "") + +#define GOTO_READ_SREG_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, POST) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, "", POST) +#define GOTO_WRITE_SREG_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, "") +#define GOTO_SET_UPDATED_FLAGS_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, "") +#define READ_SREG_PATCH_POST(reg_value_greg_no, res) \ + E2K_MOVE_GREG_TO_REG(reg_value_greg_no, res) +#define READ_DSREG_PATCH_POST(reg_value_greg_no, res) \ + E2K_MOVE_DGREG_TO_DREG(reg_value_greg_no, res) +#define DEF_READ_SREG_PATCH_POST(res) \ + READ_SREG_PATCH_POST(PATCH_RES_GREG, res) +#define DEF_READ_DSREG_PATCH_POST(res) \ + READ_DSREG_PATCH_POST(PATCH_RES_GREG, res) + +#define PV_WRITE_SREG_PATCH_PRE(reg_value_greg_no, reg_value) \ + E2K_MOVE_REG_TO_GREG(reg_value_greg_no, reg_value) +#define PV_WRITE_DSREG_PATCH_PRE(reg_value_greg_no, reg_value) \ + E2K_MOVE_DREG_TO_DGREG(reg_value_greg_no, reg_value) +#define DEF_WRITE_SREG_PATCH_PRE(reg_value) \ + PV_WRITE_SREG_PATCH_PRE(PATCH_ARG_GREG, reg_value) +#define DEF_WRITE_DSREG_PATCH_PRE(reg_value) \ + PV_WRITE_DSREG_PATCH_PRE(PATCH_ARG_GREG, reg_value) + +#define BOOT_PARAVIRT_GET_CPU_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_cpu_ops, func_name) +#define BOOT_PARAVIRT_READ_REG(reg_func) \ + (BOOT_PARAVIRT_GET_CPU_FUNC(reg_func)()) +#define BOOT_PARAVIRT_WRITE_REG(reg_func, reg_value) \ + (BOOT_PARAVIRT_GET_CPU_FUNC(reg_func)(reg_value)) +#define BOOT_GET_CPU_PATCH_LABEL(patch_addr) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_cpu_ops, patch_addr) + +#define BOOT_PARAVIRT_FLUSH(reg_func) \ + (BOOT_PARAVIRT_GET_CPU_FUNC(reg_func)()) + +typedef struct pv_apic_ops { + void (*apic_write)(unsigned int reg, unsigned int v); + unsigned int (*apic_read) (unsigned int reg); + void (*boot_apic_write)(unsigned int reg, unsigned int v); + unsigned int (*boot_apic_read) (unsigned int reg); +} pv_apic_ops_t; + +extern pv_apic_ops_t pv_apic_ops; +extern pv_apic_ops_t *cur_pv_apic_ops; + +#define BOOT_PARAVIRT_GET_APIC_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_apic_ops, func_name) +#define BOOT_PARAVIRT_APIC_READ(reg) \ + (BOOT_PARAVIRT_GET_APIC_FUNC(boot_apic_read)(reg)) +#define BOOT_PARAVIRT_APIC_WRITE(reg, reg_value) \ + (BOOT_PARAVIRT_GET_APIC_FUNC(boot_apic_write)(reg, reg_value)) + +typedef struct pv_epic_ops { + void (*epic_write_w)(unsigned int reg, unsigned int v); + unsigned int (*epic_read_w)(unsigned int reg); + void (*epic_write_d)(unsigned int reg, unsigned long v); + unsigned long (*epic_read_d)(unsigned int reg); + void (*boot_epic_write_w)(unsigned int reg, unsigned int v); + unsigned int (*boot_epic_read_w)(unsigned int reg); +} pv_epic_ops_t; + +extern pv_epic_ops_t pv_epic_ops; +extern pv_epic_ops_t *cur_pv_epic_ops; + +#define BOOT_PARAVIRT_GET_EPIC_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_epic_ops, func_name) +#define BOOT_PARAVIRT_EPIC_READ_W(reg) \ + (BOOT_PARAVIRT_GET_EPIC_FUNC(boot_epic_read_w)(reg)) +#define BOOT_PARAVIRT_EPIC_WRITE_W(reg, reg_value) \ + (BOOT_PARAVIRT_GET_EPIC_FUNC(boot_epic_write_w)(reg, reg_value)) + +typedef struct pv_mmu_ops { + long (*recovery_faulted_tagged_store)(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, int chan); + long (*recovery_faulted_load)(e2k_addr_t address, u64 *ld_val, + u8 *data_tag, u64 ld_rec_opc, int chan); + long (*recovery_faulted_move)(e2k_addr_t addr_from, e2k_addr_t addr_to, + int format, int vr, u64 ld_rec_opc, int chan); + long (*recovery_faulted_load_to_greg)(e2k_addr_t address, + u32 greg_num_d, int format, int vr, + u64 ld_rec_opc, int chan, void *saved_greg); + void (*move_tagged_word)(e2k_addr_t addr_from, e2k_addr_t addr_to); + void (*move_tagged_dword)(e2k_addr_t addr_from, e2k_addr_t addr_to); + void (*move_tagged_qword)(e2k_addr_t addr_from, e2k_addr_t addr_to); + void (*write_mmu_reg)(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg); + mmu_reg_t (*read_mmu_reg)(mmu_addr_t mmu_addr); + void (*write_dtlb_reg)(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg); + mmu_reg_t (*read_dtlb_reg)(tlb_addr_t tlb_addr); + void (*flush_tlb_entry)(flush_op_t flush_op, flush_addr_t flush_addr); + void (*flush_dcache_line)(e2k_addr_t virt_addr); + void (*clear_dcache_l1_set)(e2k_addr_t virt_addr, unsigned long set); + void (*flush_dcache_range)(void *addr, size_t len); + void (*clear_dcache_l1_range)(void *virt_addr, size_t len); + void (*flush_icache_line)(flush_op_t flush_op, flush_addr_t flush_addr); + void (*write_dcache_l2_reg)(unsigned long reg_val, + int reg_num, int bank_num); + unsigned long (*read_dcache_l2_reg)(int reg_num, int bank_num); + void (*flush_cache_all)(flush_op_t flush_op); + void (*do_flush_tlb_all)(flush_op_t flush_op); + void (*flush_icache_all)(flush_op_t flush_op); + probe_entry_t (*entry_probe_mmu_op)(e2k_addr_t virt_addr); + probe_entry_t (*address_probe_mmu_op)(e2k_addr_t virt_addr); + clw_reg_t (*read_clw_reg)(clw_addr_t clw_addr); + void (*write_clw_reg)(clw_addr_t clw_addr, clw_reg_t val); + void (*save_DAM)(unsigned long long *dam); + void (*write_mmu_debug_reg)(int reg_no, mmu_reg_t mmu_reg); + mmu_reg_t (*read_mmu_debug_reg)(int reg_no); + void (*boot_set_pte_at)(unsigned long addr, + pte_t *ptep, pte_t pteval); + void (*write_pte_at)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval, + bool only_validate, bool to_move); + void (*set_pte)(pte_t *ptep, pte_t pteval); + void (*write_pmd_at)(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval, bool only_validate); + void (*write_pud_at)(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval, bool only_validate); + void (*write_pgd_at)(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval, bool only_validate); + pte_t (*ptep_get_and_clear)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, bool to_move); + void (*ptep_wrprotect_atomic)(struct mm_struct *mm, + e2k_addr_t addr, pte_t *ptep); + pte_t (*get_pte_for_address)(struct vm_area_struct *vma, + e2k_addr_t address); + int (*remap_area_pages)(unsigned long address, unsigned long phys_addr, + unsigned long size, + enum pte_mem_type memory_type); + int (*host_guest_vmap_area)(e2k_addr_t start, e2k_addr_t end); + int (*unhost_guest_vmap_area)(e2k_addr_t start, e2k_addr_t end); + + /* memory management - mman.h */ + void (*free_mm)(struct mm_struct *); + struct mm_struct *(*mm_init)(struct mm_struct *mm, + struct task_struct *p, + struct user_namespace *user_ns); + void (*activate_mm)(struct mm_struct *active_mm, struct mm_struct *mm); + int (*make_host_pages_valid)(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, + bool chprot, bool flush); + int (*set_memory_attr_on_host)(e2k_addr_t start, e2k_addr_t end, + int mode); + int (*access_process_vm)(struct task_struct *task, + unsigned long addr, void *buf, int len, + unsigned int gup_flags); + + /* memory management - mm.h */ + void (*free_pgd_range)(struct mmu_gather *tlb, unsigned long addr, + unsigned long end, unsigned long floor, unsigned long ceiling); + + /* kernel virtual memory allocation */ + struct vmap_area *(*alloc_vmap_area)(unsigned long size, + unsigned long align, + unsigned long vstart, unsigned long vend, + int node, gfp_t gfp_mask); + void (*__free_vmap_area)(struct vmap_area *va); +#ifdef CONFIG_SMP + struct vm_struct **(*pcpu_get_vm_areas)(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align); +#endif /* CONFIG_SMP */ + void (*free_unmap_vmap_area)(struct vmap_area *va); + + /* unmap __init areas */ + void (*unmap_initmem)(void *start, void *end); + +} pv_mmu_ops_t; + +extern pv_mmu_ops_t pv_mmu_ops; +extern pv_mmu_ops_t *cur_pv_mmu_ops; + +#define BOOT_PARAVIRT_GET_MMU_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_mmu_ops, func_name) +#define BOOT_PARAVIRT_READ_MMU_REG(mmu_addr) \ + BOOT_PARAVIRT_GET_MMU_FUNC(read_mmu_reg)(mmu_addr) +#define BOOT_PARAVIRT_WRITE_MMU_REG(mmu_addr, mmu_reg) \ + BOOT_PARAVIRT_GET_MMU_FUNC(write_mmu_reg)(mmu_addr, mmu_reg) + +typedef struct pv_irq_ops { +} pv_irq_ops_t; + +extern pv_irq_ops_t pv_irq_ops; + +typedef struct pv_time_ops { + void (*time_init)(void); + void (*clock_init)(void); + int (*read_current_timer)(unsigned long *timer_val); + unsigned long (*get_cpu_running_cycles)(void); + unsigned long long (*do_sched_clock)(void); + unsigned long (*steal_clock)(int cpu); +} pv_time_ops_t; + +extern pv_time_ops_t pv_time_ops; + +typedef struct pv_io_ops { + void (*boot_writeb)(u8 b, void __iomem *addr); + void (*boot_writew)(u16 w, void __iomem *addr); + void (*boot_writel)(u32 l, void __iomem *addr); + void (*boot_writell)(u64 q, void __iomem *addr); + u8 (*boot_readb)(void __iomem *addr); + u16 (*boot_readw)(void __iomem *addr); + u32 (*boot_readl)(void __iomem *addr); + u64 (*boot_readll)(void __iomem *addr); + void (*writeb)(u8 b, void __iomem *addr); + void (*writew)(u16 w, void __iomem *addr); + void (*writel)(u32 l, void __iomem *addr); + void (*writell)(u64 q, void __iomem *addr); + u8 (*readb)(void __iomem *addr); + u16 (*readw)(void __iomem *addr); + u32 (*readl)(void __iomem *addr); + u64 (*readll)(void __iomem *addr); + u8 (*inb)(unsigned short port); + void (*outb)(unsigned char byte, unsigned short port); + void (*outw)(u16 halfword, unsigned short port); + u16 (*inw)(unsigned short port); + void (*outl)(u32 word, unsigned short port); + u32 (*inl)(unsigned short port); + void (*outsb)(unsigned short port, const void *src, unsigned long count); + void (*outsw)(unsigned short port, const void *src, unsigned long count); + void (*outsl)(unsigned short port, const void *src, unsigned long count); + void (*insb)(unsigned short port, void *dst, unsigned long count); + void (*insw)(unsigned short port, void *dst, unsigned long count); + void (*insl)(unsigned short port, void *dst, unsigned long count); + void (*conf_inb)(unsigned int domain, unsigned int bus, + unsigned long port, u8 *byte); + void (*conf_inw)(unsigned int domain, unsigned int bus, + unsigned long port, u16 *hword); + void (*conf_inl)(unsigned int domain, unsigned int bus, + unsigned long port, u32 *word); + void (*conf_outb)(unsigned int domain, unsigned int bus, + unsigned long port, u8 byte); + void (*conf_outw)(unsigned int domain, unsigned int bus, + unsigned long port, u16 hword); + void (*conf_outl)(unsigned int domain, unsigned int bus, + unsigned long port, u32 word); + + void (*scr_writew)(u16 val, volatile u16 *addr); + u16 (*scr_readw)(volatile const u16 *addr); + void (*vga_writeb)(u8 val, volatile u8 *addr); + u8 (*vga_readb)(volatile const u8 *addr); + + int (*pci_init)(void); +} pv_io_ops_t; + +extern pv_io_ops_t pv_io_ops; +extern pv_io_ops_t *cur_pv_io_ops; + +#define BOOT_PARAVIRT_GET_IO_OPS_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_io_ops, func_name) +#define BOOT_PARAVIRT_IO_READ(io_func, io_addr) \ + (BOOT_PARAVIRT_GET_IO_OPS_FUNC(io_func)(io_addr)) +#define BOOT_PARAVIRT_IO_WRITE(io_func, io_value, io_addr) \ + (BOOT_PARAVIRT_GET_IO_OPS_FUNC(io_func)(io_value, io_addr)) + +#endif /* ! __ASSEMBLY__ */ +#endif /* CONFIG_PARAVIRT */ + +#endif /* __ASM_E2K_PARAVIRT_PV_OPS_H */ diff --git a/arch/e2k/include/asm/paravirt/regs_state.h b/arch/e2k/include/asm/paravirt/regs_state.h new file mode 100644 index 000000000000..b7294b96f078 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/regs_state.h @@ -0,0 +1,87 @@ +#ifndef _E2K_PARAVIRT_REGS_STATE_H +#define _E2K_PARAVIRT_REGS_STATE_H + +#include +#include +#include + +#define PV_INIT_G_REGS() \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_INIT_G_REGS(regs); \ + } else { \ + KVM_INIT_G_REGS(regs); \ + } \ +}) + +static inline void +pv_save_local_glob_regs(local_gregs_t *l_gregs, is_signal) +{ + pv_cpu_ops.save_local_glob_regs(l_gregs, is_signal); +} +static inline void +pv_restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + pv_cpu_ops.restore_local_glob_regs(l_gregs, is_signal); +} +static inline void +pv_get_all_user_glob_regs(struct global_regs *gregs) +{ + pv_cpu_ops.get_all_user_glob_regs(gregs); +} +static inline void +pv_restore_kernel_gregs_in_syscall(struct thread_info *ti) +{ + pv_cpu_ops.restore_kernel_gregs_in_syscall(ti); +} + +/* Save stack registers on guest kernel mode */ + +#define PV_SAVE_STACK_REGS(regs, ti, user, trap) \ + PREFIX_SAVE_STACK_REGS(PV, regs, ti, user, trap) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define INIT_G_REGS() PV_INIT_G_REGS() + +static inline void +save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + pv_save_local_glob_regs(l_gregs, is_signal); +} +static inline void +restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + pv_restore_local_glob_regs(l_gregs, is_signal); +} +static inline void +get_all_user_glob_regs(struct global_regs *gregs) +{ + pv_get_all_user_glob_regs(gregs); +} + +#define RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ + pv_restore_kernel_gregs_in_syscall(thread_info) + +#define SAVE_STACK_REGS(regs, ti, user, trap) \ + PV_SAVE_STACK_REGS(regs, ti, user, trap) +#define RESTORE_HS_REGS(regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_HS_REGS(regs); \ + } else { \ + KVM_RESTORE_HS_REGS(regs); \ + } \ +}) +#define RESTORE_USER_STACK_REGS(regs, restore_hs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_USER_STACK_REGS(regs, restore_hs); \ + } else { \ + KVM_RESTORE_USER_STACK_REGS(regs, restore_hs); \ + } \ +}) +#endif /* CONFIG_PARAVIRT_GUEST */ +#endif /* _E2K_PARAVIRT_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/paravirt/secondary_space.h b/arch/e2k/include/asm/paravirt/secondary_space.h new file mode 100644 index 000000000000..8e91e74b809e --- /dev/null +++ b/arch/e2k/include/asm/paravirt/secondary_space.h @@ -0,0 +1,35 @@ +/* + * Secondary space support for E2K binary compiler + * Paravirtualized host and guest kernel support + */ +#ifndef _ASM_PARAVIRT_SECONDARY_SPACE_H +#define _ASM_PARAVIRT_SECONDARY_SPACE_H + +/* do not include the header directly, use asm/secondary_space.h include */ +#include + +#ifdef CONFIG_KVM_GUEST +#include + +#define PV_IS_NEXT_ELBRUS_2S \ + ((!paravirt_enabled()) ? \ + NATIVE_IS_NEXT_ELBRUS_2S : KVM_IS_NEXT_ELBRUS_2S) +#define PV_SS_SIZE \ + ((!paravirt_enabled()) ? \ + NATIVE_SS_SIZE : KVM_SS_SIZE) +#define PV_SS_ADDR_START \ + ((!paravirt_enabled()) ? \ + NATIVE_SS_ADDR_START : KVM_SS_ADDR_START) +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type */ +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define IS_NEXT_ELBRUS_2S PV_IS_NEXT_ELBRUS_2S +#define SS_SIZE PV_SS_SIZE +#define SS_ADDR_START PV_SS_ADDR_START +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_PARAVIRT_SECONDARY_SPACE_H */ diff --git a/arch/e2k/include/asm/paravirt/setup.h b/arch/e2k/include/asm/paravirt/setup.h new file mode 100644 index 000000000000..a8f72b68e443 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/setup.h @@ -0,0 +1,21 @@ +#ifndef _ASM_PARAVIRT_MACHDEP_H_ +#define _ASM_PARAVIRT_MACHDEP_H_ + +#include +#include + +static inline void +pv_arch_setup_machine(void) +{ + pv_cpu_ops.arch_setup_machine(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void arch_setup_machine(void) +{ + pv_arch_setup_machine(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_PARAVIRT_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/paravirt/sge.h b/arch/e2k/include/asm/paravirt/sge.h new file mode 100644 index 000000000000..1ad3f2e1f090 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/sge.h @@ -0,0 +1,46 @@ +#ifndef _E2K_ASM_PARAVIRT_SGE_H +#define _E2K_ASM_PARAVIRT_SGE_H + +#ifdef __KERNEL__ + +#include +#include + +static inline void +pv_switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + if (pv_cpu_ops.switch_to_expanded_proc_stack) + pv_cpu_ops.switch_to_expanded_proc_stack(delta_size, + delta_offset, decr_k_ps); +} +static inline void +pv_switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + if (pv_cpu_ops.switch_to_expanded_chain_stack) + pv_cpu_ops.switch_to_expanded_chain_stack(delta_size, + delta_offset, decr_k_pcs); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +static inline void +switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + pv_switch_to_expanded_proc_stack(delta_size, delta_offset, + decr_k_ps); +} +static inline void +switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + pv_switch_to_expanded_chain_stack(delta_size, delta_offset, + decr_k_pcs); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_ASM_PARAVIRT_SGE_H */ diff --git a/arch/e2k/include/asm/paravirt/smp.h b/arch/e2k/include/asm/paravirt/smp.h new file mode 100644 index 000000000000..11ecedb7be43 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/smp.h @@ -0,0 +1,121 @@ +#ifndef __ASM_PARAVIRT_SMP_H +#define __ASM_PARAVIRT_SMP_H + +#include +#include + +#ifdef CONFIG_SMP +static inline void +pv_wait_for_cpu_booting(void) +{ + pv_cpu_ops.wait_for_cpu_booting(); +} +static inline void +pv_wait_for_cpu_wake_up(void) +{ + pv_cpu_ops.wait_for_cpu_wake_up(); +} +static inline int +pv_activate_cpu(int cpu_id) +{ + return pv_cpu_ops.activate_cpu(cpu_id); +} +static inline int +pv_activate_all_cpus(void) +{ + return pv_cpu_ops.activate_all_cpus(); +} + +static inline void +pv_csd_lock_wait(call_single_data_t *data) +{ + pv_cpu_ops.csd_lock_wait(data); +} +static inline void +pv_csd_lock(call_single_data_t *data) +{ + pv_cpu_ops.csd_lock(data); +} +static inline void +pv_arch_csd_lock_async(call_single_data_t *data) +{ + pv_cpu_ops.arch_csd_lock_async(data); +} +static inline void +pv_csd_unlock(call_single_data_t *data) +{ + pv_cpu_ops.csd_unlock(data); +} + +static inline void +pv_setup_local_pic_virq(unsigned int cpuid) +{ + if (pv_cpu_ops.setup_local_pic_virq != NULL) + pv_cpu_ops.setup_local_pic_virq(cpuid); +} +static inline void +pv_startup_local_pic_virq(unsigned int cpuid) +{ + if (pv_cpu_ops.startup_local_pic_virq != NULL) + pv_cpu_ops.startup_local_pic_virq(cpuid); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void +wait_for_cpu_booting(void) +{ + pv_wait_for_cpu_booting(); +} +static inline void +wait_for_cpu_wake_up(void) +{ + pv_wait_for_cpu_wake_up(); +} +static inline int +activate_cpu(int cpu_id) +{ + return pv_activate_cpu(cpu_id); +} +static inline int +activate_all_cpus(void) +{ + return pv_activate_all_cpus(); +} + +static inline void +csd_lock_wait(call_single_data_t *data) +{ + pv_csd_lock_wait(data); +} +static inline void +csd_lock(call_single_data_t *data) +{ + pv_csd_lock(data); +} +static inline void +arch_csd_lock_async(call_single_data_t *data) +{ + pv_arch_csd_lock_async(data); +} +static inline void +csd_unlock(call_single_data_t *data) +{ + pv_csd_unlock(data); +} + +static inline void +setup_local_pic_virq(unsigned int cpuid) +{ + pv_setup_local_pic_virq(cpuid); +} + +static inline void +startup_local_pic_virq(unsigned int cpuid) +{ + pv_startup_local_pic_virq(cpuid); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* CONFIG_SMP */ +#endif /* __ASM_PARAVIRT_SMP_H */ diff --git a/arch/e2k/include/asm/paravirt/spinlock.h b/arch/e2k/include/asm/paravirt/spinlock.h new file mode 100644 index 000000000000..777ea8e9f110 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/spinlock.h @@ -0,0 +1,151 @@ +#ifndef __ASM_PARAVIRT_SPINLOCK_H +#define __ASM_PARAVIRT_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() fast and slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include + +static inline void +pv_arch_spin_lock_slow(arch_spinlock_t *lock) +{ + pv_cpu_ops.arch_spin_lock_slow(lock); +} +static inline void +pv_arch_spin_locked_slow(arch_spinlock_t *lock) +{ + pv_cpu_ops.arch_spin_locked_slow(lock); +} +static inline void +pv_arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + pv_cpu_ops.arch_spin_unlock_slow(lock); +} + +static inline void +pv_ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_wait_read_lock_slow) + pv_cpu_ops.ord_wait_read_lock_slow(rw); +} +static inline void +pv_ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_wait_write_lock_slow) + pv_cpu_ops.ord_wait_write_lock_slow(rw); +} +static inline void +pv_ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_read_locked_slow) + pv_cpu_ops.ord_arch_read_locked_slow(rw); +} +static inline void +pv_ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_write_locked_slow) + pv_cpu_ops.ord_arch_write_locked_slow(rw); +} +static inline void +pv_ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_read_unlock_slow) + pv_cpu_ops.ord_arch_read_unlock_slow(rw); +} +static inline void +pv_ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_write_unlock_slow) + pv_cpu_ops.ord_arch_write_unlock_slow(rw); +} + +static inline void +boot_pv_arch_spin_lock_slow(arch_spinlock_t *lock) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(arch_spin_lock_slow)(lock); +} +static inline void +boot_pv_arch_spin_locked_slow(arch_spinlock_t *lock) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(arch_spin_locked_slow)(lock); +} +static inline void +boot_pv_arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(arch_spin_unlock_slow)(lock); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define arch_spin_relax(lock) pv_cpu_relax() +#define arch_read_relax(lock) pv_cpu_relax() +#define arch_write_relax(lock) pv_cpu_relax() + +static inline void +arch_spin_lock_slow(arch_spinlock_t *lock) +{ + pv_arch_spin_lock_slow(lock); +} +static inline void +arch_spin_locked_slow(arch_spinlock_t *lock) +{ + pv_arch_spin_locked_slow(lock); +} +static inline void +arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + pv_arch_spin_unlock_slow(lock); +} +/* boot-time functions */ +static inline void boot_arch_spin_lock_slow(arch_spinlock_t *lock) +{ + boot_pv_arch_spin_lock_slow(lock); +} +static inline void boot_arch_spin_locked_slow(arch_spinlock_t *lock) +{ + boot_pv_arch_spin_locked_slow(lock); +} +static inline void boot_arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + boot_pv_arch_spin_unlock_slow(lock); +} + +static inline void +ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + pv_ord_wait_read_lock_slow(rw); +} +static inline void +ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + pv_ord_wait_write_lock_slow(rw); +} +static inline void +ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_read_locked_slow(rw); +} +static inline void +ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_write_locked_slow(rw); +} +static inline void +ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_read_unlock_slow(rw); +} +static inline void +ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_write_unlock_slow(rw); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_PARAVIRT_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/paravirt/string.h b/arch/e2k/include/asm/paravirt/string.h new file mode 100644 index 000000000000..6eafc537a6aa --- /dev/null +++ b/arch/e2k/include/asm/paravirt/string.h @@ -0,0 +1,88 @@ +#ifndef _ASM_E2K_PARAVIRT_STRING_H +#define _ASM_E2K_PARAVIRT_STRING_H + +#include +#include + +#ifndef CONFIG_BOOT_E2K +static inline unsigned long +pv_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return pv_cpu_ops.fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +boot_pv_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return BOOT_PARAVIRT_GET_CPU_FUNC(fast_tagged_memory_copy)(dst, src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +pv_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return pv_cpu_ops.fast_tagged_memory_set(addr, val, + tag, len, strd_opcode); +} +static inline void +boot_pv_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(fast_tagged_memory_set)(addr, val, tag, len, + strd_opcode); +} +#endif /* ! CONFIG_BOOT_E2K */ + +static inline unsigned long +pv_extract_tags_32(u16 *dst, const void *src) +{ + return pv_cpu_ops.extract_tags_32(dst, src); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is native guest kernel (not paravirtualized based on pv_ops) */ + +#ifndef CONFIG_BOOT_E2K +static inline unsigned long +fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return pv_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +boot_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return boot_pv_fast_tagged_memory_copy(dst, src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return pv_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +static inline void +boot_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + boot_pv_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +#endif /* CONFIG_BOOT_E2K */ + +static inline unsigned long +extract_tags_32(u16 *dst, const void *src) +{ + return pv_extract_tags_32(dst, src); +} +#endif /* ! CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_PARAVIRT_STRING_H */ + diff --git a/arch/e2k/include/asm/paravirt/switch.h b/arch/e2k/include/asm/paravirt/switch.h new file mode 100644 index 000000000000..d64fba853e9c --- /dev/null +++ b/arch/e2k/include/asm/paravirt/switch.h @@ -0,0 +1,54 @@ +#ifndef _E2K_PARAVIRT_SWITCH_H +#define _E2K_PARAVIRT_SWITCH_H + +static inline void pv_guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + if (!paravirt_enabled()) + host_guest_enter(ti, vcpu, switch_usd, hypercall); +} + +static inline void pv_guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + if (!paravirt_enabled()) + host_guest_exit(ti, vcpu, switch_usd, hypercall); +} + +static inline void +pv_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs) +{ + host_trap_guest_enter(ti, regs); +} +static inline void +pv_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs) +{ + host_trap_guest_exit(ti, regs); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest */ + +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + pv_guest_enter(ti, vcpu, switch_usd, hypercall); +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + pv_guest_exit(ti, vcpu, switch_usd, hypercall); +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs) +{ + pv_trap_guest_enter(ti, regs); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs) +{ + pv_trap_guest_exit(ti, regs); +} +#endif /* CONFIG_PARAVIRT_GUEST */ +#endif /* ! _E2K_PARAVIRT_SWITCH_H */ diff --git a/arch/e2k/include/asm/paravirt/switch_to.h b/arch/e2k/include/asm/paravirt/switch_to.h new file mode 100644 index 000000000000..36f1099ddb3a --- /dev/null +++ b/arch/e2k/include/asm/paravirt/switch_to.h @@ -0,0 +1,28 @@ +#ifndef _ASM_PARAVIRT_SWITCH_TO_H +#define _ASM_PARAVIRT_SWITCH_TO_H + +#ifdef __KERNEL__ + +#include +#include + +/* switch_to() should be only macros to update pointer 'prev' at */ +/* __schedule() function. It is important for guest kernel */ +#define pv_switch_to(prev, next, last) \ +do { \ + if (!paravirt_enabled()) \ + native_switch_to(prev, next, last); \ + else \ + kvm_switch_to(prev, next, last); \ +} while (false) + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#define switch_to(prev, next, last) pv_switch_to(prev, next, last) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_PARAVIRT_SWITCH_TO_H */ diff --git a/arch/e2k/include/asm/paravirt/system.h b/arch/e2k/include/asm/paravirt/system.h new file mode 100644 index 000000000000..b89004da320f --- /dev/null +++ b/arch/e2k/include/asm/paravirt/system.h @@ -0,0 +1,69 @@ +/* + * Paravirtualized PV_OPs support + * + * Copyright (C) 2016 MCST + */ + +#ifndef _E2K_PARAVIRT_SYSTEM_H_ +#define _E2K_PARAVIRT_SYSTEM_H_ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +#define PV_SWITCH_IRQ_TO_UPSR(disable_sge) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SWITCH_IRQ_TO_UPSR(disable_sge); \ + } else { \ + KVM_SWITCH_IRQ_TO_UPSR(disable_sge); \ + } \ +}) +#define BOOT_PV_SWITCH_IRQ_TO_UPSR(disable_sge) \ +({ \ + if (!boot_paravirt_enabled()) { \ + BOOT_NATIVE_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)); \ + } else { \ + BOOT_KVM_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)); \ + } \ +}) +#define PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + PREFIX_INIT_KERNEL_UPSR_REG(PV, irq_en, nmirq_dis) +#define BOOT_PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + BOOT_PREFIX_INIT_KERNEL_UPSR_REG(PV, irq_en, nmirq_dis) +#define PV_SET_KERNEL_UPSR_WITH_DISABLED_NMI(disable_sge) \ + PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(PV, disable_sge) +#define BOOT_PV_SET_KERNEL_UPSR() \ + BOOT_PREFIX_SET_KERNEL_UPSR(PV) + +static inline void * +pv_nested_kernel_return_address(int n) +{ + return pv_cpu_ops.nested_kernel_return_address(n); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) +#define BOOT_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + BOOT_PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) +#define SET_KERNEL_UPSR_WITH_DISABLED_NMI(disable_sge) \ + PV_SET_KERNEL_UPSR_WITH_DISABLED_NMI(disable_sge) +#define BOOT_SET_KERNEL_UPSR() BOOT_PV_SET_KERNEL_UPSR() + +static inline void * +nested_kernel_return_address(int n) +{ + return pv_nested_kernel_return_address(n); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_PARAVIRT_SYSTEM_H_ */ + + diff --git a/arch/e2k/include/asm/paravirt/time.h b/arch/e2k/include/asm/paravirt/time.h new file mode 100644 index 000000000000..3772546824a5 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/time.h @@ -0,0 +1,37 @@ +#ifndef __ASM_E2K_PARAVIRT_TIME_H +#define __ASM_E2K_PARAVIRT_TIME_H + +#ifdef __KERNEL__ +#include +#include + +extern struct static_key paravirt_steal_enabled; + +static inline void +pv_arch_clock_init(void) +{ + return pv_time_ops.clock_init(); +} +static inline unsigned long +pv_steal_clock(int cpu) +{ + return pv_time_ops.steal_clock(cpu); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline void +arch_clock_init(void) +{ + pv_arch_clock_init(); +} +static inline u64 +paravirt_steal_clock(int cpu) +{ + return pv_steal_clock(cpu); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_E2K_PARAVIRT_TIME_H */ diff --git a/arch/e2k/include/asm/paravirt/timex.h b/arch/e2k/include/asm/paravirt/timex.h new file mode 100644 index 000000000000..db375c60a7da --- /dev/null +++ b/arch/e2k/include/asm/paravirt/timex.h @@ -0,0 +1,36 @@ +#ifndef __ASM_E2K_PARAVIRT_TIMEX_H +#define __ASM_E2K_PARAVIRT_TIMEX_H + +#ifdef __KERNEL__ +#include +#include + +static inline void +pv_time_init(void) +{ + pv_time_ops.time_init(); +} + +static inline int +pv_read_current_timer(unsigned long *timer_val) +{ + return pv_time_ops.read_current_timer(timer_val); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline void +time_init(void) +{ + pv_time_init(); +} +static inline int +read_current_timer(unsigned long *timer_val) +{ + return pv_read_current_timer(timer_val); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_E2K_PARAVIRT_TIMEX_H */ diff --git a/arch/e2k/include/asm/paravirt/tlbflush.h b/arch/e2k/include/asm/paravirt/tlbflush.h new file mode 100644 index 000000000000..daf9c9d87246 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/tlbflush.h @@ -0,0 +1,85 @@ +/* + * Host and guest MMU caches flushing on paravirtualized kernel + * + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ +#ifndef _E2K_PARAVIRT_TLBFLUSH_H +#define _E2K_PARAVIRT_TLBFLUSH_H + +#include +#include + +#ifdef CONFIG_SMP +static inline void +pv_smp_flush_tlb_all(void) +{ + pv_cpu_ops.smp_flush_tlb_all(); +} +static inline void +pv_smp_flush_tlb_mm(struct mm_struct *mm) +{ + pv_cpu_ops.smp_flush_tlb_mm(mm); +} +static inline void +pv_smp_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + pv_cpu_ops.smp_flush_tlb_page(vma, addr); +} +static inline void +pv_smp_flush_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_tlb_range(mm, start, end); +} +static inline void +pv_smp_flush_pmd_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_pmd_tlb_range(mm, start, end); +} +static inline void +pv_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_tlb_range_and_pgtables(mm, start, end); +} +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#ifdef CONFIG_SMP +static inline void +smp_flush_tlb_all(void) +{ + pv_smp_flush_tlb_all(); +} +static inline void +smp_flush_tlb_mm(struct mm_struct *mm) +{ + pv_smp_flush_tlb_mm(mm); +} +static inline void +smp_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + pv_smp_flush_tlb_page(vma, addr); +} +static inline void +smp_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_tlb_range(mm, start, end); +} +static inline void +smp_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_pmd_tlb_range(mm, start, end); +} +static inline void +smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_tlb_range_and_pgtables(mm, start, end); +} +#endif /* CONFIG_SMP */ +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_PARAVIRT_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/paravirt/trap_table.h b/arch/e2k/include/asm/paravirt/trap_table.h new file mode 100644 index 000000000000..11539c06e14e --- /dev/null +++ b/arch/e2k/include/asm/paravirt/trap_table.h @@ -0,0 +1,106 @@ +#ifndef __E2K_PARAVIRT_TRAP_TABLE_H +#define __E2K_PARAVIRT_TRAP_TABLE_H + +#include +#include +#include + +#define pv_ttable_entry1 (pv_cpu_ops.trap_table_entry1) +#define pv_ttable_entry3 (pv_cpu_ops.trap_table_entry3) +#define pv_ttable_entry4 (pv_cpu_ops.trap_table_entry4) + +static inline void +pv_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + if (!paravirt_enabled()) + native_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); + else + kvm_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +static inline void pv_stack_bounds_trap_enable(void) +{ + if (pv_cpu_ops.stack_bounds_trap_enable) + pv_cpu_ops.stack_bounds_trap_enable(); +} +static inline bool +pv_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_cpu_ops.is_proc_stack_bounds(ti, regs); +} +static inline bool +pv_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_cpu_ops.is_chain_stack_bounds(ti, regs); +} + +#define PV_RETURN_TO_USER_PSP_PCSP(thread_info) \ + PREFIX_RETURN_TO_USER_PSP_PCSP(PV, thread_info) + +static inline void +pv_correct_trap_psp_pcsp(struct pt_regs *regs, struct thread_info *thread_info) +{ + pv_cpu_ops.correct_trap_psp_pcsp(regs, thread_info); +} +static inline void +pv_correct_scall_psp_pcsp(struct pt_regs *regs, struct thread_info *thread_info) +{ + pv_cpu_ops.correct_scall_psp_pcsp(regs, thread_info); +} +static inline void +pv_correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + pv_cpu_ops.correct_trap_return_ip(regs, return_ip); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +/* It is paravirtualized host/guest kernel */ +#define RETURN_TO_USER_PSP_PCSP(thread_info) \ + PV_RETURN_TO_USER_PSP_PCSP(thread_info) + +#define ttable_entry1 pv_ttable_entry1 +#define ttable_entry3 pv_ttable_entry3 +#define ttable_entry4 pv_ttable_entry4 + +static inline void +exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + pv_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +static inline bool +is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_is_proc_stack_bounds(ti, regs); +} +static inline bool +is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_is_chain_stack_bounds(ti, regs); +} +static inline void +stack_bounds_trap_enable(void) +{ + pv_stack_bounds_trap_enable(); +} +static inline void +correct_trap_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + pv_correct_trap_psp_pcsp(regs, thread_info); +} +static inline void +correct_scall_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + pv_correct_scall_psp_pcsp(regs, thread_info); +} +static inline void +correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + pv_correct_trap_return_ip(regs, return_ip); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __E2K_PARAVIRT_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/paravirt/traps.h b/arch/e2k/include/asm/paravirt/traps.h new file mode 100644 index 000000000000..ba371779616e --- /dev/null +++ b/arch/e2k/include/asm/paravirt/traps.h @@ -0,0 +1,87 @@ +/* + * + * Copyright (C) 2016 MCST + * + * Defenition of paravirtualized kernel traps handling routines. + */ + +#ifndef _E2K_ASM_PARAVIRT_TRAPS_H +#define _E2K_ASM_PARAVIRT_TRAPS_H + +#include +#include + +#define pv_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs) \ +({ \ + u64 TIR; \ + \ + if (!paravirt_enabled()) { \ + TIR = native_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs); \ + } else { \ + TIR = kvm_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs); \ + } \ + TIR; \ +}) + +static inline void +pv_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + pv_cpu_ops.instr_page_fault(regs, ftype, async_instr); +} + +static inline unsigned long +pv_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return pv_cpu_ops.mmio_page_fault(regs, (struct trap_cellar *)tcellar); +} +static inline int +pv_do_hw_stack_bounds(struct pt_regs *regs, bool proc_bounds, bool chain_bounds) +{ + return pv_cpu_ops.do_hw_stack_bounds(regs, proc_bounds, chain_bounds); +} +static inline irqreturn_t +pv_handle_interrupt(struct pt_regs *regs) +{ + return pv_cpu_ops.handle_interrupt(regs); +} +static inline void +pv_init_guest_system_handlers_table(void) +{ + pv_cpu_ops.init_guest_system_handlers_table(); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +#define TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs) \ + pv_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs) + +static inline void +instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + pv_instr_page_fault(regs, ftype, async_instr); +} +static inline int +do_hw_stack_bounds(struct pt_regs *regs, bool proc_bounds, bool chain_bounds) +{ + return pv_do_hw_stack_bounds(regs, proc_bounds, chain_bounds); +} +static inline void +handle_interrupt(struct pt_regs *regs) +{ + pv_handle_interrupt(regs); +} +static inline unsigned long +mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return pv_mmio_page_fault(regs, tcellar); +} +static inline void +init_guest_system_handlers_table(void) +{ + pv_init_guest_system_handlers_table(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_TRAPS_H */ diff --git a/arch/e2k/include/asm/paravirt/v2p.h b/arch/e2k/include/asm/paravirt/v2p.h new file mode 100644 index 000000000000..9d63662ec28b --- /dev/null +++ b/arch/e2k/include/asm/paravirt/v2p.h @@ -0,0 +1,129 @@ +/* + * + * Heading of boot-time initialization. + * + * Copyright (C) 2011 Salavat Guiliazov + */ + +#ifndef _E2K_ASM_PARAVIRT_V2P_H +#define _E2K_ASM_PARAVIRT_V2P_H + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * boot-time virtual to physical conversions hooks. + */ + +typedef struct pv_v2p_ops { + void *(*boot_kernel_va_to_pa)(void *virt_pnt, + unsigned long kernel_base); + void *(*boot_func_to_pa)(void *virt_pnt); + e2k_addr_t (*boot_vpa_to_pa)(e2k_addr_t vpa); + e2k_addr_t (*boot_pa_to_vpa)(e2k_addr_t pa); + e2k_addr_t (*vpa_to_pa)(e2k_addr_t vpa); + e2k_addr_t (*pa_to_vpa)(e2k_addr_t pa); +} pv_v2p_ops_t; + +extern pv_v2p_ops_t pv_v2p_ops; +extern pv_v2p_ops_t *cur_pv_v2p_ops; /* pointer to boot-time tables of OPs */ + +#define BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(boot_ops, func_name) \ +({ \ + typeof(boot_ops) ops = boot_native_get_vo_value(boot_ops); \ + typeof(boot_ops->func_name) func; \ + func = boot_native_vp_to_pp(ops)->func_name; \ + func; \ +}) + +#define BOOT_PARAVIRT_GET_V2P_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_v2p_ops, func_name) + +static inline void * +boot_pv_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_kernel_va_to_pa) + (virt_pnt, kernel_base); +} + +static inline void * +boot_pv_func_to_pa(void *virt_pnt) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_func_to_pa)(virt_pnt); +} + +static inline e2k_addr_t +boot_pv_vpa_to_pa(e2k_addr_t vpa) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_vpa_to_pa)(vpa); +} +static inline e2k_addr_t +boot_pv_pa_to_vpa(e2k_addr_t pa) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_pa_to_vpa)(pa); +} + +static inline void * +boot_pv_va_to_pa(void *virt_pnt) +{ + return boot_pv_kernel_va_to_pa(virt_pnt, -1); +} + +static inline e2k_addr_t +pv_vpa_to_pa(e2k_addr_t vpa) +{ + return pv_v2p_ops.vpa_to_pa(vpa); +} +static inline e2k_addr_t +pv_pa_to_vpa(e2k_addr_t pa) +{ + return pv_v2p_ops.pa_to_vpa(pa); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void * +boot_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return boot_pv_kernel_va_to_pa(virt_pnt, kernel_base); +} +static inline void * +boot_func_to_pa(void *virt_pnt) +{ + return boot_pv_func_to_pa(virt_pnt); +} + +static inline void * +boot_va_to_pa(void *virt_pnt) +{ + return boot_pv_va_to_pa(virt_pnt); +} + +static inline e2k_addr_t +boot_vpa_to_pa(e2k_addr_t vpa) +{ + return boot_pv_vpa_to_pa(vpa); +} +static inline e2k_addr_t +boot_pa_to_vpa(e2k_addr_t pa) +{ + return boot_pv_pa_to_vpa(pa); +} + +static inline e2k_addr_t +vpa_to_pa(e2k_addr_t vpa) +{ + return pv_vpa_to_pa(vpa); +} +static inline e2k_addr_t +pa_to_vpa(e2k_addr_t pa) +{ + return pv_pa_to_vpa(pa); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASSEMBLY__ */ + +#endif /* ! _E2K_ASM_PARAVIRT_V2P_H */ diff --git a/arch/e2k/include/asm/paravirt/vga.h b/arch/e2k/include/asm/paravirt/vga.h new file mode 100644 index 000000000000..e523b7e7b529 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/vga.h @@ -0,0 +1,52 @@ + +#ifndef _E2K_PARAVIRT_VGA_H_ +#define _E2K_PARAVIRT_VGA_H_ + +#include +#include +#include + +/* + * VGA screen support + * VGA Video Memory emulated as part of common guest VCPUs virtual memory + */ + +static inline void pv_scr_writew(u16 val, volatile u16 *addr) +{ + pv_io_ops.scr_writew(val, addr); +} +static inline u16 pv_scr_readw(volatile const u16 *addr) +{ + return pv_io_ops.scr_readw(addr); +} +static inline void pv_vga_writeb(u8 val, volatile u8 *addr) +{ + pv_io_ops.vga_writeb(val, addr); +} +static inline u8 pv_vga_readb(volatile const u8 *addr) +{ + return pv_io_ops.vga_readb(addr); +} + +#ifdef CONFIG_PARAVIRT_GUEST +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + pv_scr_writew(val, addr); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return pv_scr_readw(addr); +} +static inline void vga_writeb(u8 val, volatile u8 *addr) +{ + pv_vga_writeb(val, addr); +} + +static inline u8 vga_readb(volatile const u8 *addr) +{ + return pv_vga_readb(addr); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_PARAVIRT_VGA_H_ */ diff --git a/arch/e2k/include/asm/parport.h b/arch/e2k/include/asm/parport.h new file mode 100644 index 000000000000..b1d86815d37e --- /dev/null +++ b/arch/e2k/include/asm/parport.h @@ -0,0 +1,18 @@ +/* + * parport.h: ia32-specific parport initialisation + * + * Copyright (C) 1999, 2000 Tim Waugh + * + * This file should only be included by drivers/parport/parport_pc.c. + */ + +#ifndef _ASM_I386_PARPORT_H +#define _ASM_I386_PARPORT_H 1 + +static int parport_pc_find_isa_ports (int autoirq, int autodma); +static int parport_pc_find_nonpci_ports (int autoirq, int autodma) +{ + return parport_pc_find_isa_ports (autoirq, autodma); +} + +#endif /* !(_ASM_I386_PARPORT_H) */ diff --git a/arch/e2k/include/asm/pci.h b/arch/e2k/include/asm/pci.h new file mode 100644 index 000000000000..94d29a69caff --- /dev/null +++ b/arch/e2k/include/asm/pci.h @@ -0,0 +1,53 @@ +#ifndef _E2K_PCI_H +#define _E2K_PCI_H + +#ifdef __KERNEL__ + +#define HAVE_PCI_LEGACY 1 +#define HAVE_MULTIROOT_BUS_PCI_DOMAINS 1 /* each IOHUB has own */ + /* config space */ + +extern unsigned long pci_mem_start; +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM (pci_mem_start) +#define PCIBIOS_MAX_MEM_32 0xffffffffUL + +#define PCIBIOS_MIN_CARDBUS_IO 0x4000 + +#define PCI_ARCH_CACHE_LINE_SIZE 32 +/* Dynamic DMA mapping stuff. + * i386 has everything mapped statically. + */ + +#include +#include +#include +#include +#include + +/* The PCI address space does equal the physical memory + * address space. The networking and block device layers use + * this boolean for bounce buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (1) + +struct pci_raw_ops { + int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val); + int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val); +}; + +extern struct pci_raw_ops *raw_pci_ops; + +#define HAVE_PCI_MMAP +#define arch_can_pci_mmap_wc() 1 + +/* generic elbrus pci stuff */ +#include + +/* generic pci stuff */ +#include +#endif /* __KERNEL__ */ + +#endif /* _E2K_PCI_H */ diff --git a/arch/e2k/include/asm/percpu.h b/arch/e2k/include/asm/percpu.h new file mode 100644 index 000000000000..9e99b81ba860 --- /dev/null +++ b/arch/e2k/include/asm/percpu.h @@ -0,0 +1,74 @@ +#ifndef _E2K_PERCPU_H_ +#define _E2K_PERCPU_H_ + +#ifndef CONFIG_SMP +# define set_my_cpu_offset(off) +#else + +# include +# include + +# define __my_cpu_offset __my_cpu_offset +register unsigned long __my_cpu_offset DO_ASM_GET_GREG_MEMONIC( + MY_CPU_OFFSET_GREG); + +# define set_my_cpu_offset(off) do {__my_cpu_offset = (off); } while (0) + +# define this_cpu_read_1(pcp) __arch_this_cpu_read((pcp), "b") +# define this_cpu_read_2(pcp) __arch_this_cpu_read((pcp), "h") +# define this_cpu_read_4(pcp) __arch_this_cpu_read((pcp), "w") +# define this_cpu_read_8(pcp) __arch_this_cpu_read((pcp), "d") + +# define this_cpu_write_1(pcp, val) __arch_this_cpu_write((pcp), (val), "b") +# define this_cpu_write_2(pcp, val) __arch_this_cpu_write((pcp), (val), "h") +# define this_cpu_write_4(pcp, val) __arch_this_cpu_write((pcp), (val), "w") +# define this_cpu_write_8(pcp, val) __arch_this_cpu_write((pcp), (val), "d") + +/* Use relaxed atomics if they are available */ +# if CONFIG_CPU_ISET >= 5 +# define this_cpu_xchg_1(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "b") +# define this_cpu_xchg_2(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "h") +# define this_cpu_xchg_4(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "w") +# define this_cpu_xchg_8(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "d") + +# define this_cpu_cmpxchg_1(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg((oval), (nval), (pcp), "b", 0x4) +# define this_cpu_cmpxchg_2(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg((oval), (nval), (pcp), "h", 0x5) +# define this_cpu_cmpxchg_4(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg_word((oval), (nval), (pcp)) +# define this_cpu_cmpxchg_8(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg_dword((oval), (nval), (pcp)) + +# define this_cpu_and_1(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "b", "ands")) +# define this_cpu_and_2(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "h", "ands")) +# define this_cpu_and_4(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "w", "ands")) +# define this_cpu_and_8(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "d", "andd")) + +# define this_cpu_or_1(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "b", "ors")) +# define this_cpu_or_2(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "h", "ors")) +# define this_cpu_or_4(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "w", "ors")) +# define this_cpu_or_8(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "d", "ord")) + +# define this_cpu_add_1(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "b", "adds")) +# define this_cpu_add_2(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "h", "adds")) +# define this_cpu_add_4(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "w", "adds")) +# define this_cpu_add_8(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "d", "addd")) + +# define this_cpu_add_return_1(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "b", "adds") +# define this_cpu_add_return_2(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "h", "adds") +# define this_cpu_add_return_4(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "w", "adds") +# define this_cpu_add_return_8(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "d", "addd") +# endif + +#endif + +#include + +/* For EARLY_PER_CPU_* definitions */ +#include + +DECLARE_PER_CPU(unsigned long, cpu_loops_per_jiffy); + +#endif /* _E2K_PERCPU_H_ */ + diff --git a/arch/e2k/include/asm/perf_event.h b/arch/e2k/include/asm/perf_event.h new file mode 100644 index 000000000000..a450604a6bda --- /dev/null +++ b/arch/e2k/include/asm/perf_event.h @@ -0,0 +1,101 @@ +#pragma once + +#include +#include +#include +#include +#include + +static inline void set_perf_event_pending(void) {} +static inline void clear_perf_event_pending(void) {} + +void perf_data_overflow_handle(struct pt_regs *); +void perf_instr_overflow_handle(struct pt_regs *); +void dimtp_overflow(struct perf_event *event); + +#define perf_arch_fetch_caller_regs perf_arch_fetch_caller_regs +static __always_inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, + unsigned long ip) +{ + SAVE_STACK_REGS(regs, current_thread_info(), false, false); + WARN_ON_ONCE(instruction_pointer(regs) != ip); +} + +static inline e2k_dimcr_t dimcr_pause(void) +{ + e2k_dimcr_t dimcr, dimcr_old; + + /* + * Stop counting for more precise group counting and also + * to avoid races when one counter overflows while another + * is being handled. + * + * Writing %dimcr also clears other pending exc_instr_debug + */ + dimcr = READ_DIMCR_REG(); + dimcr_old = dimcr; + AS(dimcr)[0].user = 0; + AS(dimcr)[0].system = 0; + AS(dimcr)[1].user = 0; + AS(dimcr)[1].system = 0; + WRITE_DIMCR_REG(dimcr); + + return dimcr_old; +} + +static inline e2k_ddmcr_t ddmcr_pause(void) +{ + e2k_ddmcr_t ddmcr, ddmcr_old; + + /* + * Stop counting for more precise group counting and also + * to avoid races when one counter overflows while another + * is being handled. + * + * Writing %ddmcr also clears other pending exc_data_debug + */ + ddmcr = READ_DDMCR_REG(); + ddmcr_old = ddmcr; + AS(ddmcr)[0].user = 0; + AS(ddmcr)[0].system = 0; + AS(ddmcr)[1].user = 0; + AS(ddmcr)[1].system = 0; + WRITE_DDMCR_REG(ddmcr); + + return ddmcr_old; +} + +#ifdef CONFIG_PERF_EVENTS +extern void dimcr_continue(e2k_dimcr_t dimcr_old); +extern void ddmcr_continue(e2k_ddmcr_t ddmcr_old); +#else +static inline void dimcr_continue(e2k_dimcr_t dimcr_old) +{ + e2k_dimcr_t dimcr; + + /* + * Restart counting + */ + dimcr = READ_DIMCR_REG(); + AS(dimcr)[0].user = AS(dimcr_old)[0].user; + AS(dimcr)[0].system = AS(dimcr_old)[0].system; + AS(dimcr)[1].user = AS(dimcr_old)[1].user; + AS(dimcr)[1].system = AS(dimcr_old)[1].system; + WRITE_DIMCR_REG(dimcr); +} + +static inline void ddmcr_continue(e2k_ddmcr_t ddmcr_old) +{ + e2k_ddmcr_t ddmcr; + + /* + * Restart counting + */ + ddmcr = READ_DDMCR_REG(); + AS(ddmcr)[0].user = AS(ddmcr_old)[0].user; + AS(ddmcr)[0].system = AS(ddmcr_old)[0].system; + AS(ddmcr)[1].user = AS(ddmcr_old)[1].user; + AS(ddmcr)[1].system = AS(ddmcr_old)[1].system; + WRITE_DDMCR_REG(ddmcr); +} +#endif diff --git a/arch/e2k/include/asm/perf_event_types.h b/arch/e2k/include/asm/perf_event_types.h new file mode 100644 index 000000000000..c689c6c9a8d5 --- /dev/null +++ b/arch/e2k/include/asm/perf_event_types.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include + +#define ARCH_PERFMON_OS (1 << 16) +#define ARCH_PERFMON_USR (1 << 17) + +DECLARE_PER_CPU(struct perf_event * [4], cpu_events); + +#ifdef CONFIG_PERF_EVENTS +DECLARE_PER_CPU(u8, perf_monitors_used); +DECLARE_PER_CPU(u8, perf_bps_used); +# define perf_read_monitors_used() __this_cpu_read(perf_monitors_used) +# define perf_read_bps_used() __this_cpu_read(perf_bps_used) +#else /* ! CONFIG_PERF_EVENTS */ +# define perf_read_monitors_used() 0 +# define perf_read_bps_used() 0 +#endif /* CONFIG_PERF_EVENTS */ + +/* + * Bitmask for perf_monitors_used + * + * DIM0 has all counters from DIM1 and some more. So events for + * DIM1 are marked with DIM0_DIM1, and the actual used monitor + * will be determined at runtime. + */ +enum { + _DDM0 = 0, + _DDM1, + _DIM0, + _DIM1, + _DDM0_DDM1, + _DIM0_DIM1, + MAX_HW_MONITORS +}; +#define DDM0 (1 << _DDM0) +#define DDM1 (1 << _DDM1) +#define DIM0 (1 << _DIM0) +#define DIM1 (1 << _DIM1) +#define DDM0_DDM1 (1 << _DDM0_DDM1) +#define DIM0_DIM1 (1 << _DIM0_DIM1) diff --git a/arch/e2k/include/asm/perf_event_uncore.h b/arch/e2k/include/asm/perf_event_uncore.h new file mode 100644 index 000000000000..a660cf0d3373 --- /dev/null +++ b/arch/e2k/include/asm/perf_event_uncore.h @@ -0,0 +1,116 @@ +#ifndef _ASM_E2K_PERF_EVENT_UNCORE_H +#define _ASM_E2K_PERF_EVENT_UNCORE_H + +#include +#include + +#define UNCORE_PMU_NAME_LEN 32 + +#define E2K_UNCORE_HAS_IPCC \ + (IS_MACHINE_E2S || IS_MACHINE_E8C) + +#define E2K_UNCORE_HAS_IOCC \ + (IS_MACHINE_E2S || IS_MACHINE_ES2 || IS_MACHINE_E1CP) + +#define E2K_UNCORE_HAS_SIC (HAS_MACHINE_L_SIC && \ + (IS_MACHINE_E2S || IS_MACHINE_E8C || IS_MACHINE_E8C2)) + +#define E2K_UNCORE_HAS_SIC_L3 (E2K_UNCORE_HAS_SIC && \ + (IS_MACHINE_E8C || IS_MACHINE_E8C2)) + +#define E2K_UNCORE_HAS_HMU \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_IOMMU \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_HC \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_MC \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_PREPIC \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +enum { + E2K_UNCORE_IOCC = 1, + E2K_UNCORE_IPCC, + E2K_UNCORE_SIC, + E2K_UNCORE_HMU, + E2K_UNCORE_IOMMU, + E2K_UNCORE_HC, + E2K_UNCORE_MC, + E2K_UNCORE_PREPIC +}; + +extern int __init register_iocc_pmus(void); +extern int __init register_ipcc_pmus(void); +extern int __init register_sic_pmus(void); +extern int __init register_hmu_pmus(void); +extern int __init register_iommu_pmus(void); +extern int __init register_hc_pmus(void); +extern int __init register_mc_pmus(void); +extern int __init register_prepic_pmus(void); + +extern int e2k_uncore_event_init(struct perf_event *event); + +extern int e2k_uncore_add(struct perf_event *event, int flags); +extern void e2k_uncore_del(struct perf_event *event, int flags); +extern void e2k_uncore_start(struct perf_event *event, int flags); +extern void e2k_uncore_stop(struct perf_event *event, int flags); +extern void e2k_uncore_read(struct perf_event *event); + +extern const struct attribute_group e2k_cpumask_attr_group; + +struct e2k_uncore_valid_events { + int first; + int last; +}; + +struct e2k_uncore { + char name[UNCORE_PMU_NAME_LEN]; + int type; + int num_counters; + int node; + int idx_at_node; + + /* + * Array of valid event numbers. + * Must be terminated with { -1, -1 } + */ + struct e2k_uncore_valid_events *valid_events; + int (*validate_event)(struct e2k_uncore *, struct hw_perf_event *); + u64 (*get_event)(struct hw_perf_event *); + int (*add_event)(struct e2k_uncore *, struct perf_event *); + + struct e2k_uncore_reg_ops *reg_ops; + struct pmu pmu; + struct perf_event *events[0]; +}; + +/* + * We implement this functions to generalize access to + * monitor registers. (void *) arguments for flexibility. + */ +struct e2k_uncore_reg_ops { + u64 (*get_cnt)(struct e2k_uncore *uncore, struct hw_perf_event *); + void (*set_cfg)(struct e2k_uncore *uncore, + struct hw_perf_event *, bool); + void (*set_cnt)(struct e2k_uncore *uncore, struct hw_perf_event *, u64); +}; + +struct e2k_uncore_event_desc { + struct kobj_attribute attr; + const char *config; +}; + +extern ssize_t e2k_uncore_event_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); +#define E2K_UNCORE_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, e2k_uncore_event_show, NULL), \ + .config = _config, \ +} + +#endif /* _ASM_E2K_PERF_EVENT_UNCORE_H */ diff --git a/arch/e2k/include/asm/pgalloc.h b/arch/e2k/include/asm/pgalloc.h new file mode 100644 index 000000000000..148ebeec040c --- /dev/null +++ b/arch/e2k/include/asm/pgalloc.h @@ -0,0 +1,598 @@ +/* + * pgalloc.h: the functions and defines necessary to allocate + * page tables. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_PGALLOC_H +#define _E2K_PGALLOC_H + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_PT_MODE +#undef DebugPT +#define DEBUG_PT_MODE 0 /* page table */ +#define DebugPT(...) DebugPrint(DEBUG_PT_MODE ,##__VA_ARGS__) + +#undef DEBUG_PA_MODE +#undef DebugPA +#define DEBUG_PA_MODE 0 /* page table allocation */ +#define DebugPA(...) DebugPrint(DEBUG_PA_MODE ,##__VA_ARGS__) + +extern struct cpuinfo_e2k cpu_data[NR_CPUS]; + +extern void __init *node_early_get_zeroed_page(int nid); + +extern int mem_init_done; + +static inline void pgd_ctor(pgd_t *pgd) +{ + int root_pt_index; + pgd_t *init_pgd; + + init_pgd = node_pgd_offset_kernel(numa_node_id(), 0UL); + + if (MMU_IS_SEPARATE_PT()) { + root_pt_index = pgd_index(USER_VPTB_BASE_ADDR); + } else { + /* + * Check for whether we use mm->pgd to store kernel's pgd. + * If (COPY_USER_PGD_TO_KERNEL_ROOT_PT && THERE_IS_DUP_KERNEL), + * then kernel's pgd are kept in per-cpu pagetables. + */ + if (!IS_ENABLED(CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT) || + !THERE_IS_DUP_KERNEL) + copy_kernel_pgd_range(pgd, init_pgd); + root_pt_index = pgd_index(MMU_UNITED_USER_VPTB); + } + /* One PGD entry is the VPTB self-map. */ + vmlpt_pgd_set(&pgd[root_pt_index], pgd); +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd; + gfp_t gfp = GFP_KERNEL_ACCOUNT; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + + pgd = (pgd_t *) get_zeroed_page(gfp); + if (!pgd) + return NULL; + + pgd_ctor(pgd); + + return pgd; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + BUILD_BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); + free_page((unsigned long) pgd); +} + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + gfp_t gfp = GFP_KERNEL_ACCOUNT; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + + return (pud_t *) get_zeroed_page(gfp); +} + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + BUILD_BUG_ON(PTRS_PER_PUD * sizeof(pud_t) != PAGE_SIZE); + free_page((unsigned long) pud); +} + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + struct page *page; + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + page = alloc_page(gfp); + if (unlikely(!page)) + return NULL; + if (unlikely(!pgtable_pmd_page_ctor(page))) { + __free_page(page); + return NULL; + } + + return (pmd_t *) page_address(page); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + struct page *page = phys_to_page(__pa(pmd)); + + BUILD_BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); + pgtable_pmd_page_dtor(page); + __free_page(page); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + struct page *page; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (unlikely(!page)) + return NULL; + + return (pte_t *) page_address(page); +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + __free_page(phys_to_page(__pa(pte))); +} + +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) +{ + struct page *page; + + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (unlikely(!page)) + return NULL; + + if (unlikely(!pgtable_pte_page_ctor(page))) { + __free_page(page); + return NULL; + } + + return page; +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page) +{ + BUILD_BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); + pgtable_pte_page_dtor(pte_page); + __free_page(pte_page); +} + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID +static inline void +pud_page_validate(pgd_t *pgdp, pud_t *pudp) +{ + int i; + + if (pgd_val(*pgdp) != _PAGE_INIT_VALID) + return; + trace_pt_update("Validating pud page at 0x%lx (pgd at 0x%lx = 0x%lx)\n", + pudp, pgdp, pgd_val(*pgdp)); + for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { + WARN_ON(pud_val(*pudp)); + *pudp = __pud(_PAGE_INIT_VALID); + } +} +#else /* ! CONFIG_MAKE_ALL_PAGES_VALID */ +static inline void +pud_page_validate(pgd_t *pgdp, pud_t *pudp) +{ + /* nothing to do */ +} +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + +static inline void +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + BUG_ON(mm != &init_mm); + pgd_set_k(pgd, pud); + virt_kernel_pgd_populate(mm, pgd); +} + +static inline void +pgd_populate_user(pgd_t *pgd, pud_t *pud) +{ + pud_page_validate(pgd, pud); + pgd_set_u(pgd, pud); +} + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline bool +pgd_populate_cpu_root_pt(struct mm_struct *mm, pgd_t *pgd) +{ + unsigned long pgd_ind; + pgd_t *cpu_pgd; + bool only_populate; + + if (MMU_IS_SEPARATE_PT()) + return false; + if (!THERE_IS_DUP_KERNEL) + return false; + if (current->active_mm != mm) + return false; + + preempt_disable(); + pgd_ind = pgd_to_index(pgd); + cpu_pgd = &cpu_kernel_root_pt[pgd_ind]; + only_populate = (pgd_none(*cpu_pgd) && !pgd_none(*pgd)); + /* + * FIXME: follow two IFs only for debug purpose to detect + * case of user PGD updating + */ + if (!pgd_none(*cpu_pgd) && + (_PAGE_CLEAR_ACCESSED(pgd_val(*pgd)) != + _PAGE_CLEAR_ACCESSED(pgd_val(*cpu_pgd)))) { + pr_err("%s(): updated CPU #%d kernel root pgd %px " + "from 0x%lx to 0x%lx\n", + __func__, raw_smp_processor_id(), + cpu_pgd, pgd_val(*cpu_pgd), pgd_val(*pgd)); + dump_stack(); + } + if (pgd_none_full(*pgd)) { + pr_err("%s(): cleared CPU #%d kernel root pgd %px " + "from 0x%lx to 0x%lx\n", + __func__, raw_smp_processor_id(), + cpu_pgd, pgd_val(*cpu_pgd), pgd_val(*pgd)); + dump_stack(); + } + if (pgd_val(*cpu_pgd) != pgd_val(*pgd)) { + *cpu_pgd = *pgd; + __flush_tlb_page(mm, (e2k_addr_t) cpu_pgd); + } + DebugPT("CPU #%d set kernel root pgd %px to 0x%lx\n", + smp_processor_id(), cpu_pgd, pgd_val(*cpu_pgd)); + preempt_enable(); + + return only_populate; +} +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#define pgd_populate_cpu_root_pt(mm, pgd) false +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline void +pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + unsigned long mask; + + BUG_ON(mm == NULL); + if (unlikely(mm == &init_mm)) { + pgd_populate_kernel(mm, pgd, pud); + return; + } + + /* + * PGD should be set into two root page tables (main and + * CPU's) and in atomic style, so close interrupts to preserve + * from smp call for flush_tlb_all() between two settings, + * while the CPU restore CPU's root PGD from main. In this case + * CPU's PGD will be restored as populated when we wait for not + * yet populated state (see above pgd_populate_cpu_root_pt()) + */ + raw_local_irq_save(mask); + pgd_populate_user(pgd, pud); /* order of setting is */ + pgd_populate_cpu_root_pt(mm, pgd); /* significant, if IRQs */ + virt_kernel_pgd_populate(mm, pgd); /* do not close and flush */ + /* of TLB can restore */ + /* second PGD from first */ + raw_local_irq_restore(mask); +} + +static inline void +pgd_populate_not_present(struct mm_struct *mm, e2k_addr_t addr, pgd_t *pgd) +{ + unsigned long mask; + + /* See comment in pgd_populate() */ + raw_local_irq_save(mask); + validate_pgd_at(mm, addr, pgd); + pgd_populate_cpu_root_pt(mm, pgd); + raw_local_irq_restore(mask); +} + +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +static inline void +pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ +#ifdef CONFIG_VIRTUALIZATION + unsigned long mask; +#endif /* CONFIG_VIRTUALIZATION */ + + BUG_ON(mm == NULL); + if (unlikely(mm == &init_mm)) { + pgd_populate_kernel(mm, pgd, pud); + return; + } + +#ifdef CONFIG_VIRTUALIZATION + /* see comment above: pgd_populate() */ + /* for CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + raw_local_irq_save(mask); +#endif /* CONFIG_VIRTUALIZATION */ + + pgd_populate_user(pgd, pud); + +#ifdef CONFIG_VIRTUALIZATION + virt_kernel_pgd_populate(mm, pgd); + raw_local_irq_restore(mask); +#endif /* CONFIG_VIRTUALIZATION */ +} + +static inline void +pgd_populate_not_present(struct mm_struct *mm, e2k_addr_t addr, pgd_t *pgd) +{ + validate_pgd_at(mm, addr, pgd); +} + +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +static inline void +node_pgd_populate_kernel(int nid, struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + node_pgd_set_k(nid, pgd, pud); +} + +extern pud_t *node_pud_alloc_kernel(int nid, pgd_t *pgd, e2k_addr_t address); +extern pmd_t *node_pmd_alloc_kernel(int nid, pud_t *pud, e2k_addr_t address); +extern pte_t *node_pte_alloc_kernel(int nid, pmd_t *pmd, e2k_addr_t address); + +static inline pud_t * +node_early_pud_alloc(int nid, pgd_t *pgd, unsigned long address) +{ + pud_t *pud; + + if (!pgd_none(*pgd)) { + DebugPT("pud was allocated already " + "at addr 0x%lx\n", pgd_val(*pgd)); + return pud_offset(pgd, address); + } + pud = (pud_t *) node_early_get_zeroed_page(nid); + DebugPT("allocated pud at addr 0x%px\n", pud); + node_pgd_populate_kernel(nid, (&init_mm), pgd, pud); + return pud_offset(pgd, address); +} + +static inline pud_t * +early_pud_alloc(pgd_t *pgd, unsigned long address) +{ + return node_early_pud_alloc(numa_node_id(), pgd, address); +} + +static inline void +pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_set_k(pud, pmd); +} + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID +static inline void +pmd_page_validate(pud_t *pudp, pmd_t *pmdp) +{ + int i; + + if (pud_val(*pudp) != _PAGE_INIT_VALID) + return; + + trace_pt_update("Validating pmd page at 0x%lx (pud at 0x%lx = 0x%lx)\n", + pmdp, pudp, pud_val(*pudp)); + for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { + WARN_ON(pmd_val(*pmdp)); + *pmdp = __pmd(_PAGE_INIT_VALID); + } +} +#else /* ! CONFIG_MAKE_ALL_PAGES_VALID */ +static inline void +pmd_page_validate(pud_t *pudp, pmd_t *pmdp) +{ + /* nothing to do */ +} +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + +static inline void +pud_populate_user(pud_t *pud, pmd_t *pmd) +{ + pmd_page_validate(pud, pmd); + pud_set_u(pud, pmd); +} + +static inline void +pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + BUG_ON(mm == NULL); + if (unlikely(mm == &init_mm)) { + pud_set_k(pud, pmd); + return; + } + pud_populate_user(pud, pmd); +} + +static inline pmd_t * +node_early_pmd_alloc(int nid, pud_t *pud, unsigned long address) +{ + pmd_t *pmd; + + if (!pud_none(*pud)) { + DebugPT("pmd was allocated already " + "at addr 0x%lx\n", pud_val(*pud)); + return pmd_offset(pud, address); + } + pmd = (pmd_t *) node_early_get_zeroed_page(nid); + DebugPT("allocated pmd at addr 0x%px\n", pmd); + pud_populate_kernel((&init_mm), pud, pmd); + return pmd_offset(pud, address); +} + +static inline pmd_t * +early_pmd_alloc(pud_t *pud, unsigned long address) +{ + return node_early_pmd_alloc(numa_node_id(), pud, address); +} + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +{ + pmd_set_k(pmd, pte); +} + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID +static inline void +pte_page_validate(pmd_t *pmdp, pte_t *ptep) +{ + int i; + + if (pmd_val(*pmdp) != _PAGE_INIT_VALID) + return; + + trace_pt_update("Validating pte page at 0x%lx (pmd at 0x%lx = 0x%lx)\n", + ptep, pmdp, pmd_val(*pmdp)); + for (i = 0; i < PTRS_PER_PTE; i++, ptep++) + *ptep = pte_mkvalid(*ptep); +} +#else /* ! CONFIG_MAKE_ALL_PAGES_VALID */ +static inline void +pte_page_validate(pmd_t *pmdp, pte_t *ptep) +{ + /* nothing to do */ +} +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + +#define pmd_pgtable(pmd) pmd_page(pmd) + +static inline void +pmd_populate_user(pmd_t *pmd, pte_t *pte) +{ + pte_page_validate(pmd, pte); + pmd_set_u(pmd, pte); +} +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + BUG_ON(mm == NULL); + + if (unlikely(mm == &init_mm)) { + pmd_set_k(pmd, (pte_t *)page_address(pte)); + return; + } + pmd_populate_user(pmd, page_address(pte)); +} + +static inline pte_t * +node_early_pte_alloc(int nid, pmd_t *pmd, unsigned long address) +{ + pte_t *pte = (pte_t *) node_early_get_zeroed_page(nid); + + if (!pmd_none(*pmd)) { + DebugPT("pte was allocated already " + "at addr 0x%lx\n", pmd_val(*pmd)); + return pte_offset_kernel(pmd, address); + } + pte = (pte_t *) node_early_get_zeroed_page(nid); + DebugPT("allocated pte at addr 0x%px\n", pte); + pmd_populate_kernel(&init_mm, pmd, pte); + return pte_offset_kernel(pmd, address); +} + +static inline pte_t * +early_pte_alloc(pmd_t *pmd, unsigned long address) +{ + return node_early_pte_alloc(numa_node_id(), pmd, address); +} + +#ifdef CONFIG_NUMA +extern int node_map_vm_area(int nid_from, nodemask_t nodes_to, + unsigned long address, unsigned long size); + +static inline int +all_nodes_map_vm_area(int nid_from, unsigned long address, unsigned long size) +{ + return node_map_vm_area(nid_from, node_has_dup_kernel_map, + address, size); +} + +static inline int +all_other_nodes_map_vm_area(int nid_from, unsigned long address, + unsigned long size) +{ + return node_map_vm_area(nid_from, node_has_dup_kernel_map, + address, size); +} + +extern void node_unmap_kernel_vm_area_noflush(nodemask_t nodes, + unsigned long address, unsigned long end); +extern void node_unmap_vm_area_noflush(nodemask_t nodes, + struct vm_struct *area); + +static inline void +all_nodes_unmap_kernel_vm_area_noflush(unsigned long start, unsigned long end) +{ + node_unmap_kernel_vm_area_noflush(node_has_dup_kernel_map, start, end); +} + +static inline void +all_nodes_unmap_vm_area_noflush(struct vm_struct *area) +{ + node_unmap_vm_area_noflush(node_has_dup_kernel_map, area); +} + +static inline nodemask_t +get_node_has_dup_kernel_map(int nid_to_clear) +{ + nodemask_t nodes_map = node_has_dup_kernel_map; + int dup_nid = node_dup_kernel_nid(nid_to_clear); + + if (nid_to_clear != dup_nid) { + node_clear(dup_nid, nodes_map); + } else { + node_clear(nid_to_clear, nodes_map); + } + return nodes_map; +} + +static inline void +all_other_nodes_unmap_vm_area_noflush(int the_nid, struct vm_struct *area) +{ + nodemask_t nodes_map = get_node_has_dup_kernel_map(the_nid); + + node_unmap_vm_area_noflush(nodes_map, area); +} +extern void node_unmap_kmem_area(nodemask_t nodes, + unsigned long address, unsigned long size); + +static inline void +all_nodes_unmap_kmem_area(unsigned long address, unsigned long size) +{ + node_unmap_kmem_area(node_has_dup_kernel_map, address, size); +} + +static inline void +all_other_nodes_unmap_kmem_area(int the_nid, unsigned long address, + unsigned long size) +{ + nodemask_t nodes_map = get_node_has_dup_kernel_map(the_nid); + + node_unmap_kmem_area(nodes_map, address, size); +} +#else /* ! CONFIG_NUMA */ +static inline int +all_other_nodes_map_vm_area(int nid_from, unsigned long address, + unsigned long size) +{ + return 0; +} +#endif /* CONFIG_NUMA */ + +#endif /* _E2K_PGALLOC_H */ diff --git a/arch/e2k/include/asm/pgatomic.h b/arch/e2k/include/asm/pgatomic.h new file mode 100644 index 000000000000..7b3b7a1c2b15 --- /dev/null +++ b/arch/e2k/include/asm/pgatomic.h @@ -0,0 +1,131 @@ +/* + * E2K page table atomic update operations. + * + * Copyright 2001-2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PGATOMIC_H +#define _E2K_PGATOMIC_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables, while Linux assumes that + * there are three levels of page tables. + */ + +#include + +#include +#include +#include + +/* + * Atomic operations under page table items. + * WARNING: these values should be agreed with guest kernel because of + * are used by hypercall to support atomic modifications on guest. + * So do not change/delete the values, only can add new operations and values. + */ +typedef enum pt_atomic_op { + ATOMIC_GET_AND_XCHG, + ATOMIC_GET_AND_CLEAR, + ATOMIC_SET_WRPROTECT, + ATOMIC_TEST_AND_CLEAR_YOUNG, + ATOMIC_TEST_AND_CLEAR_RELAXED, +} pt_atomic_op_t; + +static inline pgprotval_t +native_pt_set_wrprotect_atomic(pgprotval_t *pgprot) +{ + pgprotval_t newval = __api_atomic_op(_PAGE_INIT_WRITEABLE, pgprot, + d, "andnd", RELAXED_MB); + trace_pt_update("pt_set_wrprotect: entry at 0x%lx: -> 0x%lx\n", + pgprot, newval); + return newval; +} + +static inline pgprotval_t +native_pt_get_and_clear_atomic(pgprotval_t *pgprot) +{ + pgprotval_t oldval = __api_atomic_fetch_op(_PAGE_INIT_VALID, pgprot, + d, "andd", RELAXED_MB); + trace_pt_update("pt_get_and_clear: entry at 0x%lx: 0x%lx -> 0x%lx\n", + pgprot, oldval, oldval & _PAGE_INIT_VALID); + return oldval; +} + +static inline pgprotval_t +native_pt_get_and_xchg_atomic(pgprotval_t newval, pgprotval_t *pgprot) +{ + pgprotval_t oldval = __api_xchg_return(newval, pgprot, d, RELAXED_MB); + trace_pt_update("pt_get_and_xchg: entry at 0x%lx: 0x%lx -> 0x%lx\n", + pgprot, oldval, newval); + return oldval; +} + +static inline pgprotval_t +native_pt_clear_relaxed_atomic(pgprotval_t mask, pgprotval_t *pgprot) +{ + pgprotval_t oldval = __api_atomic_fetch_op(mask, pgprot, d, + "andnd", RELAXED_MB); + trace_pt_update("pt_clear: entry at 0x%lx: 0x%lx -> 0x%lx\n", + pgprot, oldval, oldval & ~mask); + return oldval; +} + +static inline pgprotval_t +native_pt_clear_young_atomic(pgprotval_t *pgprot) +{ + pgprotval_t oldval = __api_atomic_fetch_op(_PAGE_INIT_ACCESSED, pgprot, + d, "andnd", RELAXED_MB); + trace_pt_update("pt_clear_young: entry at 0x%lx: 0x%lx -> 0x%lx\n", + pgprot, oldval, oldval & ~_PAGE_INIT_ACCESSED); + return oldval; +} + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* paravirtualized host and guest kernel on pv_ops */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native kernel without virtualization support */ +/* host kernel with virtualization support */ + +static inline pgprotval_t +pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return native_pt_set_wrprotect_atomic(&pgprot->pgprot); +} + +static inline pgprotval_t +pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return native_pt_get_and_clear_atomic(&pgprot->pgprot); +} + +static inline pgprotval_t +pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + return native_pt_get_and_xchg_atomic(newval, &pgprot->pgprot); +} + +static inline pgprotval_t +pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + return native_pt_clear_relaxed_atomic(mask, &pgprot->pgprot); +} + +static inline pgprotval_t +pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return native_pt_clear_young_atomic(&pgprot->pgprot); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_PGATOMIC_H */ diff --git a/arch/e2k/include/asm/pgd.h b/arch/e2k/include/asm/pgd.h new file mode 100644 index 000000000000..c9e659337631 --- /dev/null +++ b/arch/e2k/include/asm/pgd.h @@ -0,0 +1,293 @@ +/* + * pgd.h: the functions and defines necessary to manage + * root level of page tables - pgd + * + * Copyright 2013 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_PGD_H +#define _E2K_PGD_H + +#include + +#include +#include + +#undef DEBUG_PA_MODE +#undef DebugPA +#define DEBUG_PA_MODE 0 /* page table allocation */ +#define DebugPA(fmt, args...) \ +({ \ + if (DEBUG_PA_MODE) \ + pr_info(fmt, ##args); \ +}) + +/* + * The pointer of kernel root-level page table directory + * The Page table directory is allocated and created at boot-time + */ + +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +typedef struct pg_dir { + pgd_t pg_dir[PTRS_PER_PGD]; +} pg_dir_t; +extern pg_dir_t all_cpus_swapper_pg_dir[NR_CPUS]; +#define swapper_pg_dir (all_cpus_swapper_pg_dir[0].pg_dir) +#endif /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#ifndef CONFIG_NUMA +#define kernel_root_pt ((pgd_t *)swapper_pg_dir) +#define boot_root_pt boot_vp_to_pp(kernel_root_pt) +#define node_pg_dir(nid) ((nid), &swapper_pg_dir) +#define cpu_pg_dir(cpu) kernel_root_pt +#define the_cpu_pg_dir cpu_pg_dir +#define cpu_kernel_root_pt cpu_pg_dir(dummy) +#else /* CONFIG_NUMA */ +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +extern pgd_t __nodedata *all_nodes_pg_dir[MAX_NUMNODES]; +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +extern pg_dir_t __nodedata *all_nodes_pg_dir[MAX_NUMNODES]; +#endif /* ! ONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#define node_pg_dir(nid) (all_nodes_pg_dir[nid]) + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +#define my_cpu_pg_dir \ +({ \ + pgd_t *pgdp; \ + int my_cpu = hard_smp_processor_id(); \ + \ + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { \ + pgdp = all_cpus_swapper_pg_dir[my_cpu].pg_dir; \ + } else { \ + pgdp = swapper_pg_dir; \ + } \ + pgdp; \ +}) +#define the_node_pg_dir(nid) \ +({ \ + pg_dir_t *node_pgds; \ + pg_dir_t *pg_dir; \ + \ + node_pgds = node_pg_dir(nid); \ + pg_dir = &node_pgds[0]; \ + pg_dir; \ +}) +#define the_node_kernel_root_pt(nid) \ +({ \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = the_node_pg_dir(nid); \ + pgdp = pg_dir->pg_dir; \ + pgdp; \ +}) +#define the_cpu_pg_dir(cpu) \ +({ \ + int the_cpu = (cpu); \ + int nid = cpu_to_node(the_cpu); \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = the_node_pg_dir(nid); \ + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { \ + pgdp = pg_dir[cpu_to_cpuid(the_cpu)].pg_dir; \ + } else { \ + pgdp = pg_dir->pg_dir; \ + } \ + pgdp; \ +}) +#define boot_the_node_root_pg_dir(nid) \ +({ \ + pg_dir_t *pg_dir; \ + \ + pg_dir = boot_the_node_vp_to_pp(nid, all_cpus_swapper_pg_dir); \ + pg_dir; \ +}) +#define boot_the_node_root_pt(nid) \ +({ \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = boot_the_node_root_pg_dir(nid); \ + pgdp = pg_dir->pg_dir; \ + pgdp; \ +}) +#define boot_node_cpu_pg_dir(nid, cpuid) \ +({ \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = boot_the_node_root_pg_dir(nid); \ + if (!MMU_IS_SEPARATE_PT() && BOOT_NODE_THERE_IS_DUP_KERNEL()) { \ + pgdp = pg_dir[cpuid].pg_dir; \ + } else { \ + pgdp = pg_dir->pg_dir; \ + } \ + pgdp; \ +}) +#define boot_cpu_pg_dir(cpuid) \ + boot_node_cpu_pg_dir(boot_numa_node_id(), cpuid) +#define cpu_kernel_root_pt my_cpu_pg_dir +#define boot_cpu_kernel_root_pt boot_cpu_pg_dir(boot_smp_processor_id()) +#define boot_node_root_pt boot_the_node_root_pt(boot_numa_node_id()) +#define boot_root_pt boot_cpu_kernel_root_pt +#define kernel_root_pt cpu_kernel_root_pt +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#define boot_the_node_root_pt(nid) \ + boot_the_node_vp_to_pp(nid, swapper_pg_dir) +#define boot_node_root_pt boot_node_vp_to_pp(swapper_pg_dir) +#define boot_root_pt boot_node_root_pt +#define kernel_root_pt node_pg_dir(numa_node_id()) +#define cpu_pg_dir(cpu) kernel_root_pt +#define cpu_kernel_root_pt cpu_pg_dir(dummy) +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#endif /* ! CONFIG_NUMA */ + +static inline void +clear_pgd_range(pgd_t *dst_pgd, int start_index, int end_index) +{ + int index; + + BUG_ON(start_index > PTRS_PER_PGD); + BUG_ON(end_index > PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT() && end_index > USER_PTRS_PER_PGD); + for (index = start_index; index < end_index; index++) { + DebugPA("clear_pgd_range() clear pgd #%d 0x%px = 0x%lx\n", + index, + &dst_pgd[index], pgd_val(dst_pgd[index])); + dst_pgd[index] = __pgd(0); + } +} + +static inline void +copy_pgd_range(pgd_t *dst_pgd, pgd_t *src_pgd, int start_index, int end_index) +{ + int index; + + BUG_ON(start_index > PTRS_PER_PGD); + BUG_ON(end_index > PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT() && end_index > USER_PTRS_PER_PGD); + for (index = start_index; index < end_index; index++) { + dst_pgd[index] = src_pgd[index]; + DebugPA("copy_pgd_range() copy pgd #%d 0x%px = 0x%lx to " + "pgd 0x%px\n", + index, + &src_pgd[index], pgd_val(src_pgd[index]), + &dst_pgd[index]); + } +} + +static inline void +copy_kernel_pgd_range(pgd_t *dst_pgd, pgd_t *src_pgd) +{ + copy_pgd_range(dst_pgd, src_pgd, USER_PTRS_PER_PGD, PTRS_PER_PGD); +} + +static inline void +set_pgd_range(pgd_t *dst_pgd, pgd_t pgd_to_set, int start_index, int end_index) +{ + int index; + + BUG_ON(start_index > PTRS_PER_PGD); + BUG_ON(end_index > PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT() && end_index > USER_PTRS_PER_PGD); + for (index = start_index; index < end_index; index++) { + dst_pgd[index] = pgd_to_set; + DebugPA("set_pgd_range() set pgd #%d 0x%px to 0x%lx\n", + index, + &dst_pgd[index], pgd_val(pgd_to_set)); + } +} + +static inline void +set_kernel_pgd_range(pgd_t *dst_pgd, pgd_t pgd_to_set) +{ + set_pgd_range(dst_pgd, pgd_to_set, USER_PTRS_PER_PGD, PTRS_PER_PGD); +} + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline void +copy_one_user_pgd_to_kernel_pgd(pgd_t *kernel_pgd, pgd_t *user_pgd, int index) +{ + BUG_ON(index >= USER_PTRS_PER_PGD); + BUG_ON(MMU_IS_SEPARATE_PT()); + kernel_pgd[index] = user_pgd[index]; + DebugPA("copy_one_user_pgd_to_kernel_pgd() CPU #%d copy one user pgd " + "#%d 0x%px = 0x%lx to kernel root pt 0x%px\n", + raw_smp_processor_id(), index, + &user_pgd[index], pgd_val(user_pgd[index]), + &kernel_pgd[index]); +} +static inline void +copy_user_pgd_to_kernel_pgd_addr(pgd_t *kernel_pgd, pgd_t *user_pgd, + e2k_addr_t addr) +{ + copy_one_user_pgd_to_kernel_pgd(kernel_pgd, user_pgd, + pgd_index(addr)); +} +static inline void +copy_user_pgd_to_kernel_root_pt_addr(pgd_t *user_pgd, e2k_addr_t addr) +{ + copy_user_pgd_to_kernel_pgd_addr(cpu_kernel_root_pt, user_pgd, + addr); +} + +static inline void +copy_user_pgd_to_kernel_pgd_range(pgd_t *kernel_pgd, pgd_t *user_pgd, + int start_index, int end_index) +{ +#if DEBUG_PA_MODE + int index; +#endif + BUG_ON(start_index >= USER_PTRS_PER_PGD); + BUG_ON(end_index > USER_PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT()); +#if DEBUG_PA_MODE + for (index = start_index; index < end_index; index++) + DebugPA("copy_user_pgd_to_kernel_pgd_range() CPU #%d copy " + "user pgd #%d 0x%px = 0x%lx to kernel root pt 0x%px\n", + raw_smp_processor_id(), index, + &user_pgd[index], pgd_val(user_pgd[index]), + &kernel_pgd[index]); +#endif + memcpy(&kernel_pgd[start_index], &user_pgd[start_index], + sizeof(pgd_t) * (end_index - start_index)); +} + +static inline void +copy_user_pgd_to_kernel_pgd_addr_range(pgd_t *kernel_pgd, pgd_t *user_pgd, + e2k_addr_t start_addr, e2k_addr_t end_addr) +{ + copy_user_pgd_to_kernel_pgd_range(kernel_pgd, user_pgd, + pgd_index(start_addr), + pgd_index(_PAGE_ALIGN_DOWN(end_addr, PGDIR_SIZE))); +} +static inline void +copy_user_pgd_to_kernel_root_pt_addr_range(pgd_t *user_pgd, + e2k_addr_t start_addr, e2k_addr_t end_addr) +{ + copy_user_pgd_to_kernel_pgd_addr_range(cpu_kernel_root_pt, user_pgd, + start_addr, end_addr); +} + +static inline void +copy_user_pgd_to_kernel_pgd(pgd_t *kernel_pgd, pgd_t *user_pgd) +{ + copy_user_pgd_to_kernel_pgd_range(kernel_pgd, user_pgd, + 0, USER_PTRS_PER_PGD); +} + +static inline void +copy_user_pgd_to_kernel_root_pt(pgd_t *user_pgd) +{ + copy_user_pgd_to_kernel_pgd(cpu_kernel_root_pt, user_pgd); +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#endif /* _E2K_PGD_H */ diff --git a/arch/e2k/include/asm/pgtable-v2.h b/arch/e2k/include/asm/pgtable-v2.h new file mode 100644 index 000000000000..cdfc6d991cb4 --- /dev/null +++ b/arch/e2k/include/asm/pgtable-v2.h @@ -0,0 +1,412 @@ +/* + * E2K ISET V2-V5 page table structure and common definitions. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_PGTABLE_V2_H +#define _ASM_E2K_PGTABLE_V2_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V2-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include + +#define E2K_MAX_PHYS_BITS_V2 40 /* max. number of physical address */ + /* bits (architected) */ + +#ifndef __ASSEMBLY__ + +/* + * PTE format + */ + +#define _PAGE_W_BIT_V2 1 /* bit # of Writable */ +#define _PAGE_CD1_BIT_V2 4 /* right bit of Cache disable */ +#define _PAGE_CD2_BIT_V2 9 /* left bit of Cache disable */ +#define _PAGE_A_HW_BIT_V2 5 /* bit # of Accessed Page */ +#define _PAGE_D_BIT_V2 6 /* bit # of Page Dirty */ +#define _PAGE_HUGE_BIT_V2 7 /* bit # of Page Size */ +#define _PAGE_AVAIL_BIT_V2 11 /* prog bit Page Available */ +#define _PAGE_PFN_SHIFT_V2 12 /* shift of PFN field */ +#define _PAGE_CU_BITS_V2 48 /* bits # of Compilation Unit */ + +#define _PAGE_P_V2 0x0000000000000001ULL /* Page Present bit */ +#define _PAGE_W_V2 0x0000000000000002ULL /* Writable (0 - only read) */ +#define _PAGE_UU2_V2 0x0000000000000004ULL /* unused bit # 2 */ +#define _PAGE_PWT_V2 0x0000000000000008ULL /* Write Through */ +#define _PAGE_CD1_V2 (1UL << _PAGE_CD1_BIT_V2) /* 0x0000000000000010 */ + /* Cache disable (right bit) */ +#define _PAGE_A_HW_V2 (1UL << _PAGE_A_HW_BIT_V2) /* Accessed Page */ +#define _PAGE_D_V2 (1UL << _PAGE_D_BIT_V2) /* Page Dirty */ +#define _PAGE_HUGE_V2 0x0000000000000080ULL /* Page Size */ +#define _PAGE_G_V2 0x0000000000000100ULL /* Global Page */ +#define _PAGE_CD2_V2 (1UL << _PAGE_CD2_BIT_V2) /* 0x0000000000000200 */ + /* Cache disable (left bit) */ +#define _PAGE_NWA_V2 0x0000000000000400ULL /* Prohibit address writing */ +/* + * The _PAGE_PROTNONE bit is set only when the _PAGE_PRESENT bit + * is cleared, so we can use almost any bits for it. Must make + * sure though that pte_modify() will work with _PAGE_PROTNONE. + */ +#define _PAGE_PROTNONE_V2 _PAGE_NWA_V2 +#define _PAGE_AVAIL_V2 (1UL << _PAGE_AVAIL_BIT_V2) +#define _PAGE_SPECIAL_V2 _PAGE_AVAIL_V2 +#define _PAGE_GFN_V2 _PAGE_AVAIL_V2 /* Page is mapped to guest */ + /* physical memory */ +#define _PAGE_PFN_V2 0x000000fffffff000ULL /* Physical Page Number */ +#define _PAGE_VALID_V2 0x0000010000000000ULL /* Valid Page */ +#define _PAGE_PV_V2 0x0000020000000000ULL /* PriVileged Page */ +#define _PAGE_INT_PR_V2 0x0000040000000000ULL /* Integer address access */ + /* Protection */ +#define _PAGE_NON_EX_V2 0x0000080000000000ULL /* Non Executable Page */ +#define _PAGE_RES_V2 0x0000f00000000000ULL /* Reserved bits */ +#define _PAGE_RES_44_V2 0x0000100000000000ULL /* SG bit was previously */ +#define _PAGE_SEC_MAP_V2 0x0000200000000000ULL /* Secondary space mapping */ + /* Software only bit */ +#define _PAGE_INTL_PR_V2 0x0000400000000000ULL /* used as Intel PR in TLB */ + /* should be 1 for Elbrus */ +#define _PAGE_INTL_WP_V2 0x0000800000000000ULL /* used as Intel WR in TLB */ + /* should be 0 for Elbrus */ +#define _PAGE_A_SW_V2 (1UL << _PAGE_A_SW_BIT_V2) /* Accessed Page */ + /* (software emulation) */ +#define _PAGE_C_UNIT_V2 0xffff000000000000ULL /* Compilation Unit */ +#define _PAGE_MMIO_SW_V2 0x0c00000000000000ULL /* pte is MMIO software flag */ + +/* + * #76626 - hardware access bit should always be set. So we do not + * touch it and use software bit for things like pte_mkyoung(). + */ +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) && \ + defined(CONFIG_CPU_ES2) +# define _PAGE_A_SW_BIT_V2 47 /* bit # of Accessed Page */ + /* (software emulated) */ +# define _PAGE_A_BIT_V2 (cpu_has(CPU_HWBUG_PAGE_A) ? \ + _PAGE_A_SW_BIT_V2 : _PAGE_A_HW_BIT_V2) +#else +# define _PAGE_A_SW_BIT_V2 _PAGE_A_HW_BIT_V2 +# define _PAGE_A_BIT_V2 _PAGE_A_HW_BIT_V2 +#endif +#define _PAGE_A_V2 (1UL << _PAGE_A_BIT_V2) /* Accessed Page */ + +/* Cache disable flags */ +#define _PAGE_CD_MASK_V2 (_PAGE_CD1_V2 | _PAGE_CD2_V2) +#define _PAGE_CD_VAL_V2(x) ((x & 0x1ULL) << _PAGE_CD1_BIT_V2 | \ + (x & 0x2ULL) << (_PAGE_CD2_BIT_V2 - 1)) +#define _PAGE_CD_EN_V2 _PAGE_CD_VAL_V2(0UL) /* all caches enabled */ +#define _PAGE_CD_D1_DIS_V2 _PAGE_CD_VAL_V2(1UL) /* DCACHE1 disabled */ +#define _PAGE_CD_D_DIS_V2 _PAGE_CD_VAL_V2(2UL) /* DCACHE1, DCACHE2 disabled */ +#define _PAGE_CD_DIS_V2 _PAGE_CD_VAL_V2(3UL) /* DCACHE1, DCACHE2, ECACHE */ + /* disabled */ +#define _PAGE_PWT_DIS_V2 0UL /* Page Write Through */ + /* disabled */ +#define _PAGE_PWT_EN_V2 _PAGE_PWT_V2 /* Page Write Through */ + /* enabled */ + +/* some useful PT entries protection basis values */ +#define _PAGE_KERNEL_RX_NOT_GLOB_V2 \ + (_PAGE_P_V2 | _PAGE_VALID_V2 | \ + _PAGE_PV_V2 | _PAGE_A_HW_V2) +#define _PAGE_KERNEL_RO_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V2 | _PAGE_NON_EX_V2) +#define _PAGE_KERNEL_RWX_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V2 | \ + _PAGE_W_V2 | _PAGE_D_V2) +#define _PAGE_KERNEL_RW_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V2 | _PAGE_NON_EX_V2) +#define _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V2 | _PAGE_HUGE_V2) +#define _PAGE_KERNEL_RX_GLOB_V2 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_RO_GLOB_V2 \ + (_PAGE_KERNEL_RO_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_RWX_GLOB_V2 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_RW_GLOB_V2 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_HUGE_RW_GLOB_V2 \ + (_PAGE_KERNEL_HUGE_RW_NOT_GLOB_V2 | _PAGE_G_V2) +#ifdef CONFIG_GLOBAL_CONTEXT +#define _PAGE_KERNEL_RX_V2 _PAGE_KERNEL_RX_GLOB_V2 +#define _PAGE_KERNEL_RO_V2 _PAGE_KERNEL_RO_GLOB_V2 +#define _PAGE_KERNEL_RWX_V2 _PAGE_KERNEL_RWX_GLOB_V2 +#define _PAGE_KERNEL_RW_V2 _PAGE_KERNEL_RW_GLOB_V2 +#define _PAGE_KERNEL_HUGE_RW_V2 _PAGE_KERNEL_HUGE_RW_GLOB_V2 +#else /* ! CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_RX_V2 _PAGE_KERNEL_RX_NOT_GLOB_V2 +#define _PAGE_KERNEL_RO_V2 _PAGE_KERNEL_RO_NOT_GLOB_V2 +#define _PAGE_KERNEL_RWX_V2 _PAGE_KERNEL_RWX_NOT_GLOB_V2 +#define _PAGE_KERNEL_RW_V2 _PAGE_KERNEL_RW_NOT_GLOB_V2 +#define _PAGE_KERNEL_HUGE_RW_V2 _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V2 +#endif /* CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_V2 _PAGE_KERNEL_RW_V2 +#define _PAGE_KERNEL_HUGE_V2 _PAGE_KERNEL_HUGE_RW_V2 +#define _PAGE_KERNEL_IMAGE_V2 _PAGE_KERNEL_RX_V2 +#define _PAGE_KERNEL_MODULE_V2 _PAGE_KERNEL_RWX_V2 +#define _PAGE_KERNEL_PT_V2 _PAGE_KERNEL_V2 +#define _PAGE_USER_PT_V2 _PAGE_KERNEL_RW_NOT_GLOB_V2 + +/* convert physical address to page frame number for PTE */ +#define _PAGE_PADDR_TO_PFN_V2(phys_addr) \ + (((e2k_addr_t)phys_addr) & _PAGE_PFN_V2) + +/* convert the page frame number from PTE to physical address */ +#define _PAGE_PFN_TO_PADDR_V2(pte_val) \ + (((e2k_addr_t)(pte_val) & _PAGE_PFN_V2)) + +/* get/set pte Compilation Unit Index field */ +#define _PAGE_INDEX_TO_CUNIT_V2(index) \ + (((pteval_t)(index) << _PAGE_CU_BITS_V2) & _PAGE_C_UNIT_V2) +#define _PAGE_INDEX_FROM_CUNIT_V2(prot) \ + (((prot) & _PAGE_C_UNIT_V2) >> _PAGE_CU_BITS_V2) +#define SET_PAGE_CUI_V2(pte_val, cui) \ + (((pte_val) & ~_PAGE_C_UNIT_V2) | _PAGE_INDEX_TO_CUNIT_V2(cui)) + +/* PTE flags mask to can update/reduce and restricted to update */ +#define _PAGE_CHG_MASK_V2 (_PAGE_PFN_V2 | _PAGE_A_HW_V2 | _PAGE_A_V2 | \ + _PAGE_D_V2 | _PAGE_SPECIAL_V2 | \ + _PAGE_CD1_V2 | _PAGE_CD2_V2 | _PAGE_PWT_V2) +#define _HPAGE_CHG_MASK_V2 (_PAGE_CHG_MASK_V2 | _PAGE_HUGE_V2) +#define _PROT_REDUCE_MASK_V2 (_PAGE_P_V2 | _PAGE_W_V2 | _PAGE_A_HW_V2 | \ + _PAGE_A_V2 | _PAGE_D_V2 | _PAGE_VALID_V2 | \ + _PAGE_G_V2 | \ + _PAGE_CD_MASK_V2 | _PAGE_PWT_V2) +#define _PROT_RESTRICT_MASK_V2 (_PAGE_PV_V2 | _PAGE_NON_EX_V2 | \ + _PAGE_INT_PR_V2) +static inline pteval_t +get_pte_val_v2_changeable_mask(void) +{ + return _PAGE_CHG_MASK_V2; +} +static inline pteval_t +get_huge_pte_val_v2_changeable_mask(void) +{ + return _HPAGE_CHG_MASK_V2; +} +static inline pteval_t +get_pte_val_v2_reduceable_mask(void) +{ + return _PROT_REDUCE_MASK_V2; +} +static inline pteval_t +get_pte_val_v2_restricted_mask(void) +{ + return _PROT_RESTRICT_MASK_V2; +} + +static inline pteval_t +covert_uni_pte_flags_to_pte_val_v2(const uni_pteval_t uni_flags) +{ + pteval_t pte_flags = 0; + + if (uni_flags & UNI_PAGE_PRESENT) + pte_flags |= (_PAGE_P_V2); + if (uni_flags & UNI_PAGE_WRITE) + pte_flags |= (_PAGE_W_V2); + if (uni_flags & UNI_PAGE_PRIV) + pte_flags |= (_PAGE_PV_V2); + if (uni_flags & UNI_PAGE_VALID) + pte_flags |= (_PAGE_VALID_V2); + if (uni_flags & UNI_PAGE_PROTECT) + pte_flags |= (_PAGE_INT_PR_V2); + if (uni_flags & UNI_PAGE_HW_ACCESS) + pte_flags |= (_PAGE_A_HW_V2); + if (uni_flags & UNI_PAGE_DIRTY) + pte_flags |= (_PAGE_D_V2); + if (uni_flags & UNI_PAGE_HUGE) + pte_flags |= (_PAGE_HUGE_V2); + if (uni_flags & UNI_PAGE_GLOBAL) + pte_flags |= (_PAGE_G_V2); + if (uni_flags & UNI_PAGE_NWA) + pte_flags |= (_PAGE_NWA_V2); + if (uni_flags & UNI_PAGE_NON_EX) + pte_flags |= (_PAGE_NON_EX_V2); + if (uni_flags & UNI_PAGE_PROTNONE) + pte_flags |= (_PAGE_PROTNONE_V2); + if (uni_flags & UNI_PAGE_AVAIL) + pte_flags |= (_PAGE_AVAIL_V2); + if (uni_flags & UNI_PAGE_SW_ACCESS) + pte_flags |= (_PAGE_A_SW_V2); + if (uni_flags & UNI_PAGE_SPECIAL) + pte_flags |= (_PAGE_SPECIAL_V2); + if (uni_flags & UNI_PAGE_GFN) + pte_flags |= (_PAGE_GFN_V2); + if (uni_flags & UNI_PAGE_ACCESSED) + pte_flags |= (_PAGE_A_V2); + if (uni_flags & UNI_PAGE_PFN) + pte_flags |= (_PAGE_PFN_V2); + if (uni_flags & UNI_PAGE_MEM_TYPE) + pte_flags |= (_PAGE_CD_MASK_V2 | _PAGE_PWT_V2); + + BUG_ON(pte_flags == 0); + + return pte_flags; +} + +static inline pteval_t +fill_pte_val_v2_flags(const uni_pteval_t uni_flags) +{ + return covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} +static inline pteval_t +get_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} +static inline bool +test_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return get_pte_val_v2_flags(pte_val, uni_flags) != 0; +} +static inline pteval_t +set_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val | covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} +static inline pteval_t +clear_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & ~covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} + +static inline pteval_t +convert_kernel_text_pte_val_v2_to_protected(pteval_t kernel_text_pte_val, + e2k_addr_t cui) +{ + return SET_PAGE_CUI_V2(kernel_text_pte_val, cui); +} +static inline pte_mem_type_t get_pte_val_v2_memory_type(pteval_t pte_val) +{ + pteval_t caches_mask; + + caches_mask = pte_val & (_PAGE_CD_MASK_V2 | _PAGE_PWT_V2); + + /* convert old PTE style fields to new PTE memory type + * see iset 8.2.4. 2) + * + * Use the same default values as what was used in older + * kernels in pgprot_noncached()/pgprot_writecombine(). */ + if (caches_mask & _PAGE_PWT_V2) + return EXT_CONFIG_MT; + else if (caches_mask & _PAGE_CD_MASK_V2) + return GEN_NON_CACHE_MT; + else + return GEN_CACHE_MT; +} + +static inline pteval_t +set_pte_val_v2_memory_type(pteval_t pte_val, pte_mem_type_t memory_type) +{ + pteval_t caches_mask; + + /* convert new PTE style memory type to old PTE caches mask */ + /* see iset 8.2.4. 2) */ + if (memory_type == GEN_CACHE_MT || memory_type == EXT_CACHE_MT) + caches_mask = _PAGE_CD_EN_V2 | _PAGE_PWT_DIS_V2; + else if (memory_type == GEN_NON_CACHE_MT || memory_type == EXT_PREFETCH_MT) + caches_mask = _PAGE_CD_DIS_V2 | _PAGE_PWT_DIS_V2; + else if (memory_type == EXT_NON_PREFETCH_MT || memory_type == EXT_CONFIG_MT || + memory_type == GEN_NON_CACHE_ORDERED_MT) + caches_mask = _PAGE_CD_DIS_V2 | _PAGE_PWT_EN_V2; + else + BUG(); + pte_val &= ~(_PAGE_CD_MASK_V2 | _PAGE_PWT_V2); + pte_val |= caches_mask; + return pte_val; +} + +/* + * Encode and de-code a swap entry + * + * Format of swap offset: + * if ! (CONFIG_MAKE_ALL_PAGES_VALID): + * bits 20-63: swap offset + * else if (CONFIG_MAKE_ALL_PAGES_VALID) + * bits 20-39: low part of swap offset + * bit 40 : _PAGE_VALID (must be one) + * bits 41-63: hi part of swap offset + */ +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +static inline unsigned long +get_swap_offset_v2(swp_entry_t swap_entry) +{ + return swap_entry.val >> __SWP_OFFSET_SHIFT; +} +static inline swp_entry_t +create_swap_entry_v2(unsigned long type, unsigned long offset) +{ + swp_entry_t swap_entry; + + swap_entry.val = type << __SWP_TYPE_SHIFT; + swap_entry.val |= (offset << __SWP_OFFSET_SHIFT); + + return swap_entry; +} +static inline pte_t +convert_swap_entry_to_pte_v2(swp_entry_t swap_entry) +{ + pte_t pte; + + pte_val(pte) = swap_entry.val; + return pte; +} +#else /* CONFIG_MAKE_ALL_PAGES_VALID */ +# define INSERT_VALID(off) (((off) & (_PAGE_VALID_V2 - 1UL)) | \ + (((off) & ~(_PAGE_VALID_V2 - 1UL)) << 1)) +# define REMOVE_VALID(off) (((off) & (_PAGE_VALID_V2 - 1UL)) | \ + (((off >> 1) & ~(_PAGE_VALID_V2 - 1UL)))) +static inline unsigned long +insert_valid_bit_to_offset(unsigned long offset) +{ + return (offset & (_PAGE_VALID_V2 - 1UL)) | + ((offset & ~(_PAGE_VALID_V2 - 1UL)) << 1); +} +static inline unsigned long +remove_valid_bit_from_entry(swp_entry_t swap_entry) +{ + unsigned long entry = swap_entry.val; + + return (entry & (_PAGE_VALID_V2 - 1UL)) | + ((entry >> 1) & ~(_PAGE_VALID_V2 - 1UL)); +} +# define __swp_offset_v2(entry) (REMOVE_VALID((entry).val) >> \ + __SWP_OFFSET_SHIFT) +# define __swp_entry_v2(type, off) ((swp_entry_t) { \ + (((type) << __SWP_TYPE_SHIFT) | \ + INSERT_VALID(((off) << __SWP_OFFSET_SHIFT))) }) +# define __swp_entry_to_pte_v2(entry) ((pte_t) { (entry).val | _PAGE_VALID }) +static inline unsigned long +get_swap_offset_v2(swp_entry_t swap_entry) +{ + unsigned long entry = remove_valid_bit_from_entry(swap_entry); + + return entry >> __SWP_OFFSET_SHIFT; +} +static inline swp_entry_t +create_swap_entry_v2(unsigned long type, unsigned long offset) +{ + swp_entry_t swap_entry; + + swap_entry.val = type << __SWP_TYPE_SHIFT; + swap_entry.val |= + insert_valid_bit_to_offset(offset << __SWP_OFFSET_SHIFT); + + return swap_entry; +} +static inline pte_t +convert_swap_entry_to_pte_v2(swp_entry_t swap_entry) +{ + pte_t pte; + + pte_val(pte) = swap_entry.val | _PAGE_VALID_V2; + return pte; +} +#endif /* ! CONFIG_MAKE_ALL_PAGES_VALID */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_PGTABLE_V2_H */ diff --git a/arch/e2k/include/asm/pgtable-v6.h b/arch/e2k/include/asm/pgtable-v6.h new file mode 100644 index 000000000000..e4174c2c33ba --- /dev/null +++ b/arch/e2k/include/asm/pgtable-v6.h @@ -0,0 +1,339 @@ +/* + * E2K ISET V6 page table structure and common definitions. + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_PGTABLE_V6_H +#define _ASM_E2K_PGTABLE_V6_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V6 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * PTE-V6 format + */ + +/* numbers of PTE's bits */ +#define _PAGE_P_BIT_V6 0 /* Present */ +#define _PAGE_W_BIT_V6 1 /* Writable */ +#define _PAGE_PV_BIT_V6 2 /* PriVileged */ +#define _PAGE_VALID_BIT_V6 3 /* Valid */ +#define _PAGE_INT_PR_BIT_V6 4 /* PRotected */ +#define _PAGE_A_HW_BIT_V6 5 /* page Accessed */ +#define _PAGE_D_BIT_V6 6 /* page Dirty */ +#define _PAGE_HUGE_BIT_V6 7 /* huge Page Size */ +#define _PAGE_G_BIT_V6 8 /* Global page */ +#define _PAGE_NWA_BIT_V6 9 /* No Writable Address */ +#define _PAGE_SW1_BIT_V6 10 /* SoftWare bit #1 */ +#define _PAGE_SW2_BIT_V6 11 /* SoftWare bit #2 */ +#define _PAGE_PFN_SHIFT_V6 12 /* shift of Physical Page Number */ +#define _PAGE_MT_SHIFT_V6 60 /* shift of Memory Type field */ +#define _PAGE_MT_BITS_NUM_V6 3 /* occupies 3 bits */ +#define _PAGE_NON_EX_BIT_V6 63 /* NON EXecutable */ + +#define _PAGE_P_V6 (1ULL << _PAGE_P_BIT_V6) +#define _PAGE_W_V6 (1ULL << _PAGE_W_BIT_V6) +#define _PAGE_PV_V6 (1ULL << _PAGE_PV_BIT_V6) +#define _PAGE_VALID_V6 (1ULL << _PAGE_VALID_BIT_V6) +#define _PAGE_INT_PR_V6 (1ULL << _PAGE_INT_PR_BIT_V6) +#define _PAGE_A_HW_V6 (1ULL << _PAGE_A_HW_BIT_V6) +#define _PAGE_D_V6 (1ULL << _PAGE_D_BIT_V6) +#define _PAGE_HUGE_V6 (1ULL << _PAGE_HUGE_BIT_V6) +#define _PAGE_G_V6 (1ULL << _PAGE_G_BIT_V6) +#define _PAGE_NWA_V6 (1ULL << _PAGE_NWA_BIT_V6) +#define _PAGE_SW1_V6 (1ULL << _PAGE_SW1_BIT_V6) +#define _PAGE_SW2_V6 (1ULL << _PAGE_SW2_BIT_V6) +#define _PAGE_MMIO_SW_V6 0x0c00000000000000ULL /* pte is MMIO */ + /* software flag */ +#define _PAGE_PFN_V6 \ + ((((1ULL << E2K_MAX_PHYS_BITS_V6) - 1) >> \ + PAGE_SHIFT) << \ + _PAGE_PFN_SHIFT_V6) +#define _PAGE_MT_V6 \ + (((1ULL << _PAGE_MT_BITS_NUM_V6) - 1) << _PAGE_MT_SHIFT_V6) +#define _PAGE_NON_EX_V6 (1ULL << _PAGE_NON_EX_BIT_V6) + +/* + * The _PAGE_PROTNONE bit is set only when the _PAGE_PRESENT bit + * is cleared, so we can use almost any bits for it. Must make + * sure though that pte_modify() will work with _PAGE_PROTNONE. + */ +#define _PAGE_PROTNONE_V6 _PAGE_NWA_V6 +#define _PAGE_SOFTWARE_MT _PAGE_SW1_V6 +#define _PAGE_AVAIL_V6 _PAGE_SW2_V6 +#define _PAGE_SPECIAL_V6 _PAGE_SW2_V6 +#define _PAGE_GFN_V6 _PAGE_SW2_V6 /* Page is mapped to guest */ + /* physical memory */ + +/* + * Bug #76626 - hardware access bit should always be set. + * This bug is actual for e2c/e2c+ only CPUs, + * so CPUs based on iset v6 cannot have the bug all the more + * and software bit can be identical to hardware one. + * Software bit is not used in the case of V6, but is not deleted + * for compatibility. + */ +#define _PAGE_A_SW_BIT_V6 _PAGE_A_HW_BIT_V6 +#define _PAGE_A_BIT_V6 _PAGE_A_HW_BIT_V6 +#define _PAGE_A_SW_V6 _PAGE_A_HW_V6 +#define _PAGE_A_V6 _PAGE_A_HW_V6 + +#define _PAGE_MT_GET_VAL(x) (((x) & _PAGE_MT_V6) >> _PAGE_MT_SHIFT_V6) +#define _PAGE_MT_SET_VAL(x, mt) \ + (((x) & ~_PAGE_MT_V6) | \ + (((pteval_t)(mt) << _PAGE_MT_SHIFT_V6) & _PAGE_MT_V6)) + +/* some useful PT entries protection basis values */ +#define _PAGE_KERNEL_RX_NOT_GLOB_V6 \ + (_PAGE_P_V6 | _PAGE_VALID_V6 | \ + _PAGE_PV_V6 | _PAGE_A_HW_V6) +#define _PAGE_KERNEL_RO_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V6 | _PAGE_NON_EX_V6) +#define _PAGE_KERNEL_RWX_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V6 | \ + _PAGE_W_V6 | _PAGE_D_V6) +#define _PAGE_KERNEL_RW_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V6 | _PAGE_NON_EX_V6) +#define _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V6 | _PAGE_HUGE_V6) +#define _PAGE_KERNEL_RX_GLOB_V6 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_RO_GLOB_V6 \ + (_PAGE_KERNEL_RO_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_RWX_GLOB_V6 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_RW_GLOB_V6 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_HUGE_RW_GLOB_V6 \ + (_PAGE_KERNEL_HUGE_RW_NOT_GLOB_V6 | _PAGE_G_V6) +#ifdef CONFIG_GLOBAL_CONTEXT +#define _PAGE_KERNEL_RX_V6 _PAGE_KERNEL_RX_GLOB_V6 +#define _PAGE_KERNEL_RO_V6 _PAGE_KERNEL_RO_GLOB_V6 +#define _PAGE_KERNEL_RWX_V6 _PAGE_KERNEL_RWX_GLOB_V6 +#define _PAGE_KERNEL_RW_V6 _PAGE_KERNEL_RW_GLOB_V6 +#define _PAGE_KERNEL_HUGE_RW_V6 _PAGE_KERNEL_HUGE_RW_GLOB_V6 +#else /* ! CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_RX_V6 _PAGE_KERNEL_RX_NOT_GLOB_V6 +#define _PAGE_KERNEL_RO_V6 _PAGE_KERNEL_RO_NOT_GLOB_V6 +#define _PAGE_KERNEL_RWX_V6 _PAGE_KERNEL_RWX_NOT_GLOB_V6 +#define _PAGE_KERNEL_RW_V6 _PAGE_KERNEL_RW_NOT_GLOB_V6 +#define _PAGE_KERNEL_HUGE_RW_V6 _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V6 +#endif /* CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_V6 _PAGE_KERNEL_RW_V6 +#define _PAGE_KERNEL_HUGE_V6 _PAGE_KERNEL_HUGE_RW_V6 +#define _PAGE_KERNEL_IMAGE_V6 _PAGE_KERNEL_RX_V6 +#define _PAGE_KERNEL_MODULE_V6 _PAGE_KERNEL_RWX_V6 +#define _PAGE_KERNEL_PT_V6 _PAGE_KERNEL_V6 +#define _PAGE_USER_PT_V6 _PAGE_KERNEL_RW_NOT_GLOB_V6 + +/* convert physical address to page frame number for PTE */ +#define _PAGE_PADDR_TO_PFN_V6(phys_addr) \ + (((e2k_addr_t)phys_addr) & _PAGE_PFN_V6) + +/* convert the page frame number from PTE to physical address */ +#define _PAGE_PFN_TO_PADDR_V6(pte_val) \ + ((e2k_addr_t)(pte_val) & _PAGE_PFN_V6) + +/* get/set pte Compilation Unit Index field */ +/* PTE V6 has not more field CUI, so fix error */ +#define _PAGE_INDEX_TO_CUNIT_V6(index) BUILD_BUG_ON(true) +#define _PAGE_INDEX_FROM_CUNIT_V6(prot) BUILD_BUG_ON(true) +#define SET_PAGE_CUI_V6(pte_val, cui) BUILD_BUG_ON(true) + +/* PTE flags mask to can update/reduce and restricted to update */ +#define _PAGE_CHG_MASK_V6 (_PAGE_PFN_V6 | _PAGE_A_V6 | _PAGE_D_V6 | \ + _PAGE_SW1_V6 | _PAGE_SW2_V6 | \ + _PAGE_MT_V6) +#define _HPAGE_CHG_MASK_V6 (_PAGE_CHG_MASK_V6 | _PAGE_HUGE_V6) +#define _PROT_REDUCE_MASK_V6 (_PAGE_P_V6 | _PAGE_W_V6 | _PAGE_A_V6 | \ + _PAGE_D_V6 | _PAGE_VALID_V6 | _PAGE_G_V6 | \ + _PAGE_MT_V6) +#define _PROT_RESTRICT_MASK_V6 (_PAGE_PV_V6 | _PAGE_NON_EX_V6 | \ + _PAGE_INT_PR_V6) +static inline pteval_t +get_pte_val_v6_changeable_mask(void) +{ + return _PAGE_CHG_MASK_V6; +} +static inline pteval_t +get_huge_pte_val_v6_changeable_mask(void) +{ + return _HPAGE_CHG_MASK_V6; +} +static inline pteval_t +get_pte_val_v6_reduceable_mask(void) +{ + return _PROT_REDUCE_MASK_V6; +} +static inline pteval_t +get_pte_val_v6_restricted_mask(void) +{ + return _PROT_RESTRICT_MASK_V6; +} + +static inline pteval_t +covert_uni_pte_flags_to_pte_val_v6(const uni_pteval_t uni_flags) +{ + pteval_t pte_flags = 0; + + if (uni_flags & UNI_PAGE_PRESENT) + pte_flags |= (_PAGE_P_V6); + if (uni_flags & UNI_PAGE_WRITE) + pte_flags |= (_PAGE_W_V6); + if (uni_flags & UNI_PAGE_PRIV) + pte_flags |= (_PAGE_PV_V6); + if (uni_flags & UNI_PAGE_VALID) + pte_flags |= (_PAGE_VALID_V6); + if (uni_flags & UNI_PAGE_PROTECT) + pte_flags |= (_PAGE_INT_PR_V6); + if (uni_flags & UNI_PAGE_HW_ACCESS) + pte_flags |= (_PAGE_A_HW_V6); + if (uni_flags & UNI_PAGE_DIRTY) + pte_flags |= (_PAGE_D_V6); + if (uni_flags & UNI_PAGE_HUGE) + pte_flags |= (_PAGE_HUGE_V6); + if (uni_flags & UNI_PAGE_GLOBAL) + pte_flags |= (_PAGE_G_V6); + if (uni_flags & UNI_PAGE_NWA) + pte_flags |= (_PAGE_NWA_V6); + if (uni_flags & UNI_PAGE_NON_EX) + pte_flags |= (_PAGE_NON_EX_V6); + if (uni_flags & UNI_PAGE_PROTNONE) + pte_flags |= (_PAGE_PROTNONE_V6); + if (uni_flags & UNI_PAGE_AVAIL) + pte_flags |= (_PAGE_SW2_V6); + if (uni_flags & UNI_PAGE_SW_ACCESS) + pte_flags |= (_PAGE_A_SW_V6); + if (uni_flags & UNI_PAGE_SPECIAL) + pte_flags |= (_PAGE_SPECIAL_V6); + if (uni_flags & UNI_PAGE_GFN) + pte_flags |= (_PAGE_GFN_V6); + if (uni_flags & UNI_PAGE_ACCESSED) + pte_flags |= (_PAGE_A_V6); + if (uni_flags & UNI_PAGE_PFN) + pte_flags |= (_PAGE_PFN_V6); + if (uni_flags & UNI_PAGE_MEM_TYPE) + pte_flags |= (_PAGE_MT_V6); + + BUG_ON(pte_flags == 0); + + return pte_flags; +} + +static inline pteval_t +fill_pte_val_v6_flags(const uni_pteval_t uni_flags) +{ + return covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} +static inline pteval_t +get_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} +static inline bool +test_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return get_pte_val_v6_flags(pte_val, uni_flags) != 0; +} +static inline pteval_t +set_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val | covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} +static inline pteval_t +clear_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & ~covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} + +static inline pteval_t +convert_kernel_text_pte_val_v6_to_protected(pteval_t kernel_text_pte_val) +{ + return kernel_text_pte_val; +} + +static inline pte_mem_type_t get_pte_val_v6_memory_type(pteval_t pte_val) +{ + int hardware_mt = _PAGE_MT_GET_VAL(pte_val); + + if (!(pte_val & _PAGE_SOFTWARE_MT)) + return hardware_mt; + + if (hardware_mt == GEN_CACHE_MT) { + return EXT_CACHE_MT; + } else if (hardware_mt == GEN_NON_CACHE_MT) { + return GEN_NON_CACHE_ORDERED_MT; + } else { + WARN_ON_ONCE(1); + return EXT_CACHE_MT; + } +} + +static inline pteval_t +set_pte_val_v6_memory_type(pteval_t pte_val, pte_mem_type_t memory_type) +{ + BUG_ON(memory_type != GEN_CACHE_MT && + memory_type != GEN_NON_CACHE_MT && + memory_type != GEN_NON_CACHE_ORDERED_MT && + memory_type != EXT_PREFETCH_MT && + memory_type != EXT_NON_PREFETCH_MT && + memory_type != EXT_CONFIG_MT && + memory_type != EXT_CACHE_MT); + + if (memory_type == EXT_CACHE_MT) { + pte_val |= _PAGE_SOFTWARE_MT; + memory_type = GEN_CACHE_MT; + } else if (memory_type == GEN_NON_CACHE_ORDERED_MT) { + pte_val |= _PAGE_SOFTWARE_MT; + memory_type = GEN_NON_CACHE_MT; + } else { + pte_val &= ~_PAGE_SOFTWARE_MT; + } + return _PAGE_MT_SET_VAL(pte_val, memory_type); +} + +/* + * Encode and de-code a swap entry + * + * Format of swap offset: + * bits 20-63: swap offset + */ +static inline unsigned long +get_swap_offset_v6(swp_entry_t swap_entry) +{ + return swap_entry.val >> __SWP_OFFSET_SHIFT; +} +static inline swp_entry_t +create_swap_entry_v6(unsigned long type, unsigned long offset) +{ + swp_entry_t swap_entry; + + swap_entry.val = type << __SWP_TYPE_SHIFT; + swap_entry.val |= (offset << __SWP_OFFSET_SHIFT); + + return swap_entry; +} +static inline pte_t +convert_swap_entry_to_pte_v6(swp_entry_t swap_entry) +{ + pte_t pte; + + pte_val(pte) = swap_entry.val; +#ifdef CONFIG_MAKE_ALL_PAGES_VALID + pte_val(pte) |= _PAGE_VALID_V6; +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + return pte; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_PGTABLE_V6_H */ diff --git a/arch/e2k/include/asm/pgtable.h b/arch/e2k/include/asm/pgtable.h new file mode 100644 index 000000000000..9423d74c0c39 --- /dev/null +++ b/arch/e2k/include/asm/pgtable.h @@ -0,0 +1,831 @@ +/* + * pgtable.h: E2K page table operations. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PGTABLE_H +#define _E2K_PGTABLE_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables, while Linux assumes that + * there are three levels of page tables. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * The defines and routines to manage and access the four-level + * page table. + */ + +#define set_pte(ptep, pteval) \ + native_set_pte(ptep, pteval, false) +#define set_pte_at(mm, addr, ptep, pteval) \ +do { \ + trace_pt_update("set_pte_at: mm 0x%lx, addr 0x%lx, ptep 0x%lx, value 0x%lx\n", \ + (mm), (addr), (ptep), pte_val(pteval)); \ + native_set_pte(ptep, pteval, false); \ +} while (0) +#define set_pte_not_present_at(mm, addr, ptep, pteval) \ +do { \ + trace_pt_update("set_pte_not_present_at: mm 0x%lx, addr 0x%lx, ptep 0x%lx, value 0x%lx\n", \ + (mm), (addr), (ptep), pte_val(pteval)); \ + native_set_pte(ptep, pteval, true); \ +} while (0) +#define validate_pte_at(mm, addr, ptep, pteval) \ +do { \ + trace_pt_update("validate_pte_at: mm 0x%lx, addr 0x%lx, ptep 0x%lx, value 0x%lx\n", \ + (mm), (addr), (ptep), pte_val(pteval)); \ + native_set_pte_noflush(ptep, pteval); \ +} while (0) +#define boot_set_pte_at(addr, ptep, pteval) \ + native_set_pte(ptep, pteval, false) +#define boot_set_pte_kernel(addr, ptep, pteval) \ + boot_set_pte_at(addr, ptep, pteval) + +#define set_pmd(pmdp, pmdval) \ + native_set_pmd(pmdp, pmdval) +#define set_pmd_at(mm, addr, pmdp, pmdval) \ +({ \ + (void)(mm); \ + (void)(addr); \ + trace_pt_update("set_pmd_at: mm 0x%lx, addr 0x%lx, pmdp 0x%lx, value 0x%lx\n", \ + (mm), (addr), (pmdp), pmd_val(pmdval)); \ + native_set_pmd(pmdp, pmdval); \ +}) +#define validate_pmd_at(mm, addr, pmdp, pmdval) \ +do { \ + trace_pt_update("validate_pmd_at: mm 0x%lx, addr 0x%lx, pmdp 0x%lx, value 0x%lx\n", \ + (mm), (addr), (pmdp), pmd_val(pmdval)); \ + native_set_pmd_noflush(pmdp, pmdval); \ +} while (0) + +#define set_pud(pudp, pudval) \ + native_set_pud(pudp, pudval) +#define set_pud_at(mm, addr, pudp, pudval) \ +do { \ + trace_pt_update("set_pud_at: mm 0x%lx, addr 0x%lx, pudp 0x%lx, value 0x%lx\n", \ + (mm), (addr), (pudp), pud_val(pudval)); \ + native_set_pud(pudp, pudval); \ +} while (0) +#define validate_pud_at(mm, addr, pudp) \ + set_pud_at(mm, addr, pudp, __pud(_PAGE_INIT_VALID)) +#define invalidate_pud_at(mm, addr, pudp) \ + set_pud_at(mm, addr, pudp, __pud(0)) + +#define set_pgd(pgdp, pgdval) \ + native_set_pgd(pgdp, pgdval) +#define set_pgd_at(mm, addr, pgdp, pgdval) \ +do { \ + trace_pt_update("set_pgd_at: mm 0x%lx, addr 0x%lx, pgdp 0x%lx, value 0x%lx\n", \ + (mm), (addr), (pgdp), pgd_val(pgdval)); \ + native_set_pgd(pgdp, pgdval); \ +} while (0) +#define validate_pgd_at(mm, addr, pgdp) \ + set_pgd_at(mm, addr, pgdp, __pgd(_PAGE_INIT_VALID)) +#define invalidate_pgd_at(mm, addr, pgdp) \ + set_pgd_at(mm, addr, pgdp, __pgd(0)) + +#define get_pte_for_address(vma, address) \ + native_do_get_pte_for_address(vma, address) + +#define pgd_clear_kernel(pgdp) (pgd_val(*(pgdp)) = 0UL) +#define pud_clear_kernel(pudp) (pud_val(*(pudp)) = 0UL) +#define pmd_clear_kernel(pmdp) (pmd_val(*(pmdp)) = 0UL) +#define pte_clear_kernel(ptep) (pte_val(*(ptep)) = 0UL) + +/* pte_page() returns the 'struct page *' corresponding to the PTE: */ +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pud_page(pud) pfn_to_page(pud_pfn(pud)) +#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) + + +#define boot_pte_page(pte) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pte_val(pte))) + +#define pmd_set_k(pmdp, ptep) (*(pmdp) = mk_pmd_addr(ptep, \ + PAGE_KERNEL_PTE)) +#define pmd_set_u(pmdp, ptep) (*(pmdp) = mk_pmd_addr(ptep, \ + PAGE_USER_PTE)) +#define pmd_clear(pmdp) \ +do { \ + u64 __pmdval = _PAGE_INIT_VALID; \ + trace_pt_update("pmd_clear: pmdp 0x%lx, value 0x%lx\n", \ + (pmdp), _PAGE_INIT_VALID); \ + native_set_pmd(pmdp, __pmd(__pmdval)); \ +} while (0) + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)__va(_PAGE_PFN_TO_PADDR(pmd_val(pmd))); +} + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)__va(_PAGE_PFN_TO_PADDR(pud_val(pud))); +} + +static inline unsigned long pgd_page_vaddr(pgd_t pgd) +{ + return (unsigned long)__va(_PAGE_PFN_TO_PADDR(pgd_val(pgd))); +} + + +#define boot_pmd_set_k(pmdp, ptep) \ + (*(pmdp) = mk_pmd_phys(boot_vpa_to_pa((e2k_addr_t)(ptep)), \ + PAGE_KERNEL_PTE)) +#define boot_pmd_set_u(pmdp, ptep) \ + (*(pmdp) = mk_pmd_phys(boot_vpa_to_pa((e2k_addr_t)(ptep)), \ + PAGE_USER_PTE)) +#define boot_pmd_page(pmd) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pmd_val(pmd))) + +#define pud_set_k(pudp, pmdp) (*(pudp) = mk_pud_addr(pmdp, \ + PAGE_KERNEL_PMD)) +#define pud_set_u(pudp, pmdp) (*(pudp) = mk_pud_addr(pmdp, \ + PAGE_USER_PMD)) +static inline void pud_clear(pud_t *pud) +{ + pud_val(*pud) = _PAGE_INIT_VALID; +} + +#define boot_pud_set_k(pudp, pmdp) \ + (*(pudp) = mk_pud_phys(boot_vpa_to_pa((e2k_addr_t)(pmdp)), \ + PAGE_KERNEL_PMD)) +#define boot_pud_set_u(pudp, pmdp) \ + (*(pudp) = mk_pud_phys(boot_vpa_to_pa((e2k_addr_t)(pmdp)), \ + PAGE_USER_PMD)) +#define boot_pud_page(pud) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pud_val(pud))) + +#define mk_pgd_phys_k(pudp) mk_pgd_addr(pudp, PAGE_KERNEL_PUD) + +#ifndef CONFIG_NUMA +#define pgd_set_k(pgdp, pudp) (*(pgdp) = mk_pgd_phys_k(pudp)) +#define node_pgd_set_k(nid, pgdp, pudp) pgd_set_k(pgdp, pudp) +#else /* CONFIG_NUMA */ +extern void node_pgd_set_k(int nid, pgd_t *pgdp, pud_t *pudp); +static void inline pgd_set_k(pgd_t *pgdp, pud_t *pudp) +{ + node_pgd_set_k(numa_node_id(), pgdp, pudp); +} +#endif /* ! CONFIG_NUMA */ + +#define vmlpt_pgd_set(pgdp, lpt) pgd_set_u(pgdp, (pud_t *)(lpt)) +#define pgd_set_u(pgdp, pudp) (*(pgdp) = mk_pgd_addr(pudp, \ + PAGE_USER_PUD)) +static inline void pgd_clear_one(pgd_t *pgd) +{ + pgd_val(*pgd) = _PAGE_INIT_VALID; +} + + +#define boot_mk_pgd_phys_k(pudp) \ + mk_pgd_phys(boot_vpa_to_pa((e2k_addr_t)(pudp)), PAGE_KERNEL_PUD) +#define boot_mk_pgd_phys_u(pudp) \ + mk_pgd_phys(boot_vpa_to_pa((e2k_addr_t)(pudp)), PAGE_USER_PUD) +#ifndef CONFIG_NUMA +#define boot_pgd_set_k(pgdp, pudp) (*(pgdp) = boot_mk_pgd_phys_k(pudp)) +#define boot_pgd_set_u(pgdp, pudp) (*(pgdp) = boot_mk_pgd_phys_u(pudp)) + +#else /* CONFIG_NUMA */ +extern void boot_pgd_set(pgd_t *my_pgdp, pud_t *pudp, int user); +#define boot_pgd_set_k(pgdp, pudp) boot_pgd_set(pgdp, pudp, 0) +#define boot_pgd_set_u(pgdp, pudp) boot_pgd_set(pgdp, pudp, 1) +#endif /* ! CONFIG_NUMA */ + +#define boot_vmlpt_pgd_set(pgdp, lpt) (*(pgdp) = boot_mk_pgd_phys_k( \ + (pud_t *)(lpt))) +#define boot_pgd_page(pgd) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pgd_val(pgd))) + +static inline void native_set_pte_noflush(pte_t *ptep, pte_t pteval) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); + *ptep = pteval; +} + +static inline void native_set_pmd_noflush(pmd_t *pmdp, pmd_t pmdval) +{ + *pmdp = pmdval; +} + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) +#include + +/* + * When instruction page changes its physical address, we must + * flush old physical address from Instruction Cache, otherwise + * it could be accessed by its virtual address. + * + * Since we do not know whether the instruction page will change + * its address in the future, we have to be conservative here. + */ +static inline void flush_pte_from_ic(pte_t val) +{ + unsigned long address; + + address = (unsigned long) __va(_PAGE_PFN_TO_PADDR(pte_val(val))); + __flush_icache_range(address, address + PTE_SIZE); +} + +static inline void flush_pmd_from_ic(pmd_t val) +{ + unsigned long address; + + address = (unsigned long) __va(_PAGE_PFN_TO_PADDR(pmd_val(val))); + __flush_icache_range(address, address + PMD_SIZE); +} + +static inline void flush_pud_from_ic(pud_t val) +{ + /* pud is too large to step through it, so flush everything at once */ + __flush_icache_all(); +} + +static __always_inline void native_set_pte(pte_t *ptep, pte_t pteval, + bool known_not_present) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); + + BUILD_BUG_ON(!__builtin_constant_p(known_not_present)); + /* If we know that pte is not present, then this means + * that instruction buffer has been flushed already + * and we can avoid the check altogether. */ + if (known_not_present) { + *ptep = pteval; + } else { + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + pte_t oldpte = *ptep; + + *ptep = pteval; + + if (have_flush_dc_ic && pte_present_and_exec(oldpte) && + (!pte_present_and_exec(pteval) || + pte_pfn(oldpte) != pte_pfn(pteval))) + flush_pte_from_ic(oldpte); + } +} + +static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmdval) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + pmd_t oldpmd = *pmdp; + + *pmdp = pmdval; + + if (have_flush_dc_ic && pmd_present_and_exec_and_huge(oldpmd) && + (!pmd_present_and_exec_and_huge(pmdval) || + pmd_pfn(oldpmd) != pmd_pfn(pmdval))) + flush_pmd_from_ic(oldpmd); +} + +static inline void native_set_pud(pud_t *pudp, pud_t pudval) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + pud_t oldpud = *pudp; + + *pudp = pudval; + + if (have_flush_dc_ic && pud_present_and_exec_and_huge(oldpud) && + (!pud_present_and_exec_and_huge(pudval) || + pud_pfn(oldpud) != pud_pfn(pudval))) + flush_pud_from_ic(oldpud); +} + +static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgdval) +{ + *pgdp = pgdval; +} +#else +# define native_set_pte(ptep, pteval, known_not_present) (*(ptep) = (pteval)) +# define native_set_pmd(pmdp, pmdval) (*(pmdp) = (pmdval)) +# define native_set_pud(pudp, pudval) (*(pudp) = (pudval)) +# define native_set_pgd(pgdp, pgdval) (*(pgdp) = (pgdval)) +#endif + +/* + * Remap I/O pages at `pfn' of size `size' with page protection + * `prot' into virtual address `from'. + * + * This function is used only on device memory and track_pfn_remap() + * will explicitly set "External" memory type. + * + * And remap_pfn_range() should be used on RAM only and not on device + * memory, but in practice many drivers violate API and just use + * remap_pfn_range() everywhere. In this case track_pfn_remap() will + * determine the required type. */ +#define io_remap_pfn_range(vma, addr, pfn, size, prot) \ +({ \ + unsigned long __irp_pfn = (pfn); \ + VM_WARN_ON_ONCE(pfn_valid(__irp_pfn)); \ + remap_pfn_range((vma), (addr), (pfn), (size), (prot)); \ +}) + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, unsigned long size); + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vm_insert_pfn(). + * + * This does not cover vm_insert_page so if some bad driver decides + * to use it on I/O memory we could get into trouble. + */ +extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn); + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} +#define MK_IOSPACE_PFN(space, pfn) (pfn) +#define GET_IOSPACE(pfn) 0 +#define GET_PFN(pfn) (pfn) + +#ifndef __ASSEMBLY__ + +#define NATIVE_VMALLOC_START (NATIVE_KERNEL_IMAGE_AREA_BASE + \ + 0x020000000000UL) + /* 0x0000 e400 0000 0000 */ +/* We need big enough vmalloc area since usage of pcpu_embed_first_chunk() + * on e2k leads to having pcpu area span large ranges, and vmalloc area + * should be able to span those same ranges (see pcpu_embed_first_chunk()). */ +#define NATIVE_VMALLOC_END (NATIVE_VMALLOC_START + 0x100000000000UL) + /* 0x0000 f400 0000 0000 */ +#define NATIVE_VMEMMAP_START NATIVE_VMALLOC_END + /* 0x0000 f400 0000 0000 */ +#define NATIVE_VMEMMAP_END (NATIVE_VMEMMAP_START + \ + (1ULL << (E2K_MAX_PHYS_BITS - PAGE_SHIFT)) * \ + sizeof(struct page)) + /* 0x0000 f800 0000 0000 - for 64 bytes struct page */ + /* 0x0000 fc00 0000 0000 - for 128 bytes struct page */ + +/* + * The module space starts from end of resident kernel image and + * both areas should be within 2 ** 30 bits of the virtual addresses. + */ +#define MODULES_VADDR E2K_MODULES_START /* 0x0000 e200 0xxx x000 */ +#define MODULES_END E2K_MODULES_END /* 0x0000 e200 4000 0000 */ + +/* virtualization support */ +#include + +#define pte_clear_not_present_full(mm, addr, ptep, fullmm) \ +do { \ + u64 __pteval; \ + __pteval = _PAGE_INIT_VALID; \ + set_pte_not_present_at(mm, addr, ptep, __pte(__pteval)); \ +} while (0) + + +#define pte_clear(mm, addr, ptep) \ +do { \ + u64 __pteval; \ + __pteval = _PAGE_INIT_VALID; \ + set_pte_at(mm, addr, ptep, __pte(__pteval)); \ +} while (0) + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + int mm_users = atomic_read(&mm->mm_users); + pte_t oldpte; + + prefetch_offset(ptep, PREFETCH_STRIDE); + if (mm == &init_mm) { + /* In kernel there is no swap or thp, valid page + * is always mapped, so do not keep the valid bit. + * This is important because in kernel we cannot + * tolerate spurious page faults from h.-s. loads. */ + oldpte = __pte(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) ptep)); + } else { + oldpte = __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) ptep)); + } + + /* mm_users check is for the fork() case: we do not + * want to spend time flushing when we are exiting. */ + if (have_flush_dc_ic && mm_users != 0 && pte_present_and_exec(oldpte)) + flush_pte_from_ic(oldpte); + + return oldpte; +} +#else +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); + if (mm == &init_mm) { + /* In kernel there is no swap or thp, valid page + * is always mapped, so do not keep the valid bit. */ + return __pte(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) ptep)); + } else { + return __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) ptep)); + } +} +#endif + +#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) +# define vmemmap ((struct page *)VMEMMAP_START) +#endif + +#include + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; +extern struct page *zeroed_page; +extern u64 zero_page_nid_to_pfn[MAX_NUMNODES]; +extern struct page *zero_page_nid_to_page[MAX_NUMNODES]; + +#define ZERO_PAGE(vaddr) zeroed_page + +#define is_zero_pfn is_zero_pfn +static inline int is_zero_pfn(unsigned long pfn) +{ + int node; + + for_each_node_has_dup_kernel(node) + if (zero_page_nid_to_pfn[node] == pfn) + return 1; + + return 0; +} + +#define my_zero_pfn my_zero_pfn +static inline u64 my_zero_pfn(unsigned long addr) +{ + u64 pfn = 0; + int node = numa_node_id(); + + if (node_has_dup_kernel(node)) { + pfn = zero_page_nid_to_pfn[node]; + } else { + for_each_node_has_dup_kernel(node) { + pfn = zero_page_nid_to_pfn[node]; + break; + } + } + + return pfn; +} + +static inline int is_zero_page(struct page *page) +{ + int node; + + for_each_node_has_dup_kernel(node) + if (zero_page_nid_to_page[node] == page) + return 1; + + return 0; +} + +extern void paging_init(void); + +/* + * The index and offset in the root-level page table directory. + */ +/* to find an entry in a kernel root page-table-directory */ +#define pgd_offset_k(virt_addr) ((pgd_t *)kernel_root_pt + \ + pgd_index(virt_addr)) +#define boot_pgd_offset_k(virt_addr) ((pgd_t *)boot_root_pt + \ + boot_pgd_index(virt_addr)) +#ifdef CONFIG_NUMA +extern pgd_t *node_pgd_offset_kernel(int nid, e2k_addr_t virt_addr); +#else /* ! CONFIG_NUMA */ +#define node_pgd_offset_kernel(nid, virt_addr) \ +({ \ + (nid); \ + pgd_offset_k(virt_addr); \ +}) +#endif /* CONFIG_NUMA */ + +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +#define pgd_clear(pgdp) pgd_clear_one(pgdp) +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +static inline int +pgd_clear_cpu_root_pt(pgd_t *pgd) +{ + pgd_t *pgd_table = pgd_to_page(pgd); + unsigned long pgd_ind; + pgd_t *cpu_pgd; + + if (MMU_IS_SEPARATE_PT()) + /* CPU pgd is not user and user pgds do not copy to some */ + /* other PGDs */ + return 0; + if (!THERE_IS_DUP_KERNEL) + return 0; + if (!current->active_mm || current->active_mm->pgd != pgd_table) + return 0; + pgd_ind = pgd_to_index(pgd); + cpu_pgd = &cpu_kernel_root_pt[pgd_ind]; + if (pgd_none(*cpu_pgd)) { + pr_err("%s(): CPU #%u kernel root pgd %px already clean 0x%lx\n", + __func__, raw_smp_processor_id(), + cpu_pgd, pgd_val(*cpu_pgd)); + } + pgd_clear_one(cpu_pgd); + return 1; +} +static inline void +pgd_clear(pgd_t *pgd) +{ + unsigned long mask; + + /* + * PGD clearing should be done into two root page tables (main and + * CPU's) and in atomic style, so close interrupts to preserve + `* from smp call for flush_tlb_all() between two clearing + * while the CPU restore CPU's root PGD from main. In this case + * CPU's PGD will be restored as clean when we wait for not + * yet cleared state (see above pgd_clear_cpu_root_pt()) + */ + raw_local_irq_save(mask); + pgd_clear_one(pgd); /* order of clearing is significant */ + pgd_clear_cpu_root_pt(pgd); /* if interrupts do not close */ + /* and flush of TLB can restore */ + /* second PGD from first PGD */ + raw_local_irq_restore(mask); +} +#endif /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +/* + * The index and offset in the upper page table directory. + */ +#define pud_offset(dir, address) ((pud_t *)pgd_page_vaddr(*(dir)) + \ + pud_index(address)) + +/* + * The index and offset in the middle page table directory + */ +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \ + pmd_index(address)) + +/* + * The index and offset in the third-level page table. + */ +#define pte_offset_kernel(pmd, address) \ + ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) +#define pte_offset_map(pmd, address) \ +({ \ + pte_t *__pom_pte = pte_offset_kernel((pmd), (address)); \ + prefetch_nospec(__pom_pte); \ + __pom_pte; \ +}) + +#define pte_unmap(pte) do { } while (0) + +#define boot_pte_index(virt_addr) pte_index(virt_addr) +#define boot_pte_offset(pmdp, addr) ((pte_t *)boot_pmd_page(*(pmdp)) + \ + boot_pte_index(addr)) + +/* + * Encode and de-code a swap entry + */ +static inline unsigned long +mmu_get_swap_offset(swp_entry_t swap_entry, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_swap_offset_v6(swap_entry); + else + return get_swap_offset_v2(swap_entry); +} +static inline swp_entry_t +mmu_create_swap_entry(unsigned long type, unsigned long offset, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return create_swap_entry_v6(type, offset); + else + return create_swap_entry_v2(type, offset); +} +static inline pte_t +mmu_convert_swap_entry_to_pte(swp_entry_t swap_entry, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return convert_swap_entry_to_pte_v6(swap_entry); + else + return convert_swap_entry_to_pte_v2(swap_entry); +} +static inline unsigned long __swp_offset(swp_entry_t swap_entry) +{ + return mmu_get_swap_offset(swap_entry, MMU_IS_PT_V6()); +} +static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) +{ + return mmu_create_swap_entry(type, offset, MMU_IS_PT_V6()); +} +static inline pte_t __swp_entry_to_pte(swp_entry_t swap_entry) +{ + return mmu_convert_swap_entry_to_pte(swap_entry, MMU_IS_PT_V6()); +} +static inline pmd_t __swp_entry_to_pmd(swp_entry_t swap_entry) +{ + return __pmd(pte_val(__swp_entry_to_pte(swap_entry))); +} + +/* + * atomic versions of the some PTE manipulations: + */ +static inline pte_t +native_do_get_pte_for_address(struct vm_area_struct *vma, e2k_addr_t address) +{ + probe_entry_t probe_pte; + + probe_pte = get_MMU_DTLB_ENTRY(address); + if (DTLB_ENTRY_TEST_SUCCESSFUL(probe_entry_val(probe_pte)) && + DTLB_ENTRY_TEST_VVA(probe_entry_val(probe_pte))) { + return __pte(_PAGE_SET_PRESENT(probe_entry_val(probe_pte))); + } else if (!DTLB_ENTRY_TEST_SUCCESSFUL(probe_entry_val(probe_pte))) { + return __pte(0); + } else { + return __pte(probe_entry_val(probe_pte)); + } +} + +static inline int +ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte; + + prefetch_offset(ptep, PREFETCH_STRIDE); + pte_val(pte) = pt_clear_young_atomic(vma->vm_mm, addr, + (pgprot_t *)ptep); + return pte_young(pte); +} + +static inline void +ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); + pt_set_wrprotect_atomic(mm, addr, (pgprot_t *) ptep); +} + +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); + +#define pgd_addr_bound(addr) (((addr) + PGDIR_SIZE) & PGDIR_MASK) +#define pud_addr_bound(addr) (((addr) + PUD_SIZE) & PUD_MASK) +#define pmd_addr_bound(addr) (((addr) + PMD_SIZE) & PMD_MASK) + +#if defined CONFIG_TRANSPARENT_HUGEPAGE +# if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + int mm_users = atomic_read(&mm->mm_users); + pmd_t oldpmd; + + if (mm == &init_mm) { + /* See comment in ptep_get_and_clear() */ + oldpmd = __pmd(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) pmdp)); + } else { + oldpmd = __pmd(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) pmdp)); + } + + /* mm_users check is for the fork() case: we do not + * want to spend time flushing when we are exiting. */ + if (have_flush_dc_ic && mm_users != 0 && + pmd_present_and_exec_and_huge(oldpmd)) + flush_pmd_from_ic(oldpmd); + + return oldpmd; +} +# else +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + if (mm == &init_mm) { + /* See comment in ptep_get_and_clear() */ + return __pmd(pt_get_and_xchg_atomic(mm, addr, 0ull, (pgprot_t *) pmdp)); + } else { + return __pmd(pt_get_and_clear_atomic(mm, addr, (pgprot_t *) pmdp)); + } +} +# endif +#endif + +/* interface functions to handle some things on the PT level */ +void split_simple_pmd_page(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]); +void split_multiple_pmd_page(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]); +void map_pud_huge_page_to_simple_pmds(pgprot_t *pmd_page, e2k_addr_t phys_page, + pgprot_t pgprot); +void map_pud_huge_page_to_multiple_pmds(pgprot_t *pmd_page, + e2k_addr_t phys_page, pgprot_t pgprot); + +#endif /* !(__ASSEMBLY__) */ + +extern void memmap_init(unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn); + +#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) + +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +#define __HAVE_PFNMAP_TRACKING +#include + + +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + pmd_t pmd; + + pmd_val(pmd) = pt_clear_young_atomic(vma->vm_mm, addr, + (pgprot_t *)pmdp); + return pmd_young(pmd); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + pt_set_wrprotect_atomic(mm, addr, (pgprot_t *)pmdp); +} + +#define pmdp_establish pmdp_establish +static inline pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); +} + +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +#else +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); +} + +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* !(_E2K_PGTABLE_H) */ diff --git a/arch/e2k/include/asm/pgtable_def.h b/arch/e2k/include/asm/pgtable_def.h new file mode 100644 index 000000000000..5583cce0eec6 --- /dev/null +++ b/arch/e2k/include/asm/pgtable_def.h @@ -0,0 +1,1321 @@ +/* + * pgtable_def.h: E2K page table common definitions. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_PGTABLE_DEF_H +#define _ASM_E2K_PGTABLE_DEF_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRACE_PT_UPDATES 0 +#if TRACE_PT_UPDATES +# define trace_pt_update(...) \ +do { \ + if (system_state == SYSTEM_RUNNING) \ + trace_printk(__VA_ARGS__); \ +} while (0) +#else +# define trace_pt_update(...) do { } while (0) +#endif + +#ifndef __ASSEMBLY__ + +/* max. number of physical address bits (architected) */ +static inline int +mmu_max_phys_addr_bits(bool mmu_pt_v6) +{ + return (mmu_pt_v6) ? E2K_MAX_PHYS_BITS_V6 : E2K_MAX_PHYS_BITS_V2; +} +static inline int +e2k_max_phys_addr_bits(void) +{ + return mmu_max_phys_addr_bits(MMU_IS_PT_V6()); +} +#define E2K_PHYS_BITS_NUM e2k_max_phys_addr_bits() +#define E2K_MAX_PHYS_BITS E2K_MAX_PHYS_BITS_V6 + +/* + * Hardware MMUs page tables have some differences from one ISET to other + * moreover each MMU supports a few different page tables: + * native (primary) + * secondary page tables for sevral modes (VA32, VA48, PA32, PA48 ...) + * The follow interface to manage page tables as common item + */ + +static inline const pt_level_t * +get_pt_level_on_id(int level_id) +{ + /* now PT level is number of level */ + return get_pt_struct_level_on_id(&pgtable_struct, level_id); +} + +static inline bool +is_huge_pmd_level(void) +{ + return is_huge_pt_struct_level(&pgtable_struct, E2K_PMD_LEVEL_NUM); +} + +static inline bool +is_huge_pud_level(void) +{ + return is_huge_pt_struct_level(&pgtable_struct, E2K_PUD_LEVEL_NUM); +} + +static inline bool +is_huge_pgd_level(void) +{ + return is_huge_pt_struct_level(&pgtable_struct, E2K_PGD_LEVEL_NUM); +} + +static inline e2k_size_t +get_e2k_pt_level_size(int level_id) +{ + return get_pt_struct_level_size(&pgtable_struct, level_id); +} +static inline e2k_size_t +get_pgd_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PGD_LEVEL_NUM); +} +static inline e2k_size_t +get_pud_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PUD_LEVEL_NUM); +} +static inline e2k_size_t +get_pmd_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PMD_LEVEL_NUM); +} +static inline e2k_size_t +get_pte_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PTE_LEVEL_NUM); +} + +static inline e2k_size_t +get_e2k_pt_level_page_size(int level_id) +{ + return get_pt_struct_level_page_size(&pgtable_struct, level_id); +} +static inline e2k_size_t +get_pgd_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PGD_LEVEL_NUM); +} +static inline e2k_size_t +get_pud_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PUD_LEVEL_NUM); +} +static inline e2k_size_t +get_pmd_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PMD_LEVEL_NUM); +} +static inline e2k_size_t +get_pte_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PTE_LEVEL_NUM); +} + +static inline int +get_e2k_pt_level_huge_ptes_num(int level_id) +{ + return get_pt_struct_level_huge_ptes_num(&pgtable_struct, level_id); +} +static inline int +get_pgd_level_huge_ptes_num(void) +{ + return get_e2k_pt_level_huge_ptes_num(E2K_PGD_LEVEL_NUM); +} +static inline int +get_pud_level_huge_ptes_num(void) +{ + return get_e2k_pt_level_huge_ptes_num(E2K_PUD_LEVEL_NUM); +} +static inline int +get_pmd_level_huge_ptes_num(void) +{ + return get_e2k_pt_level_huge_ptes_num(E2K_PMD_LEVEL_NUM); +} + +/* + * PTE format + */ + +static inline pteval_t +mmu_phys_addr_to_pte_pfn(e2k_addr_t phys_addr, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return _PAGE_PADDR_TO_PFN_V6(phys_addr); + else + return _PAGE_PADDR_TO_PFN_V2(phys_addr); +} +static inline e2k_addr_t +mmu_pte_pfn_to_phys_addr(pteval_t pte_val, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return _PAGE_PFN_TO_PADDR_V6(pte_val); + else + return _PAGE_PFN_TO_PADDR_V2(pte_val); +} + +static inline pteval_t +phys_addr_to_pte_pfn(e2k_addr_t phys_addr) +{ + return mmu_phys_addr_to_pte_pfn(phys_addr, MMU_IS_PT_V6()); +} +static inline e2k_addr_t +pte_pfn_to_phys_addr(pteval_t pte_val) +{ + return mmu_pte_pfn_to_phys_addr(pte_val, MMU_IS_PT_V6()); +} +#define _PAGE_PADDR_TO_PFN(phys_addr) phys_addr_to_pte_pfn(phys_addr) +#define _PAGE_PFN_TO_PADDR(pte_val) pte_pfn_to_phys_addr(pte_val) + +static inline pteval_t +mmu_kernel_protected_text_pte_val(pteval_t kernel_text_pte_val, e2k_addr_t cui, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return convert_kernel_text_pte_val_v6_to_protected( + kernel_text_pte_val); + else + return convert_kernel_text_pte_val_v2_to_protected( + kernel_text_pte_val, cui); +} +static inline pteval_t +kernel_protected_text_pte_val(pteval_t kernel_text_pte_val, e2k_addr_t cui) +{ + return mmu_kernel_protected_text_pte_val(kernel_text_pte_val, cui, + MMU_IS_PT_V6()); +} +#define _PAGE_KERNEL_PROT_TEXT(kernel_text_pte_val, cui) \ + kernel_protected_text_pte_val(kernel_text_pte_val, cui) + +/* PTE Memory Type */ +static inline enum pte_mem_type get_pte_val_memory_type(pteval_t pte_val) +{ + if (MMU_IS_PT_V6()) + return get_pte_val_v6_memory_type(pte_val); + else + return get_pte_val_v2_memory_type(pte_val); +} +static inline pteval_t set_pte_val_memory_type(pteval_t pte_val, + pte_mem_type_t memory_type) +{ + if (MMU_IS_PT_V6()) + return set_pte_val_v6_memory_type(pte_val, memory_type); + else + return set_pte_val_v2_memory_type(pte_val, memory_type); +} +#define _PAGE_GET_MEM_TYPE(pte_val) \ + get_pte_val_memory_type(pte_val) +#define _PAGE_SET_MEM_TYPE(pte_val, memory_type) \ + set_pte_val_memory_type(pte_val, memory_type) + +static inline pteval_t +mmu_fill_pte_val_flags(const uni_pteval_t uni_flags, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return fill_pte_val_v6_flags(uni_flags); + else + return fill_pte_val_v2_flags(uni_flags); +} +static inline pteval_t +mmu_get_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_flags(pte_val, uni_flags); + else + return get_pte_val_v2_flags(pte_val, uni_flags); +} +static inline bool +mmu_test_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + return mmu_get_pte_val_flags(pte_val, uni_flags, mmu_pt_v6) != 0; +} +static inline pteval_t +mmu_set_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return set_pte_val_v6_flags(pte_val, uni_flags); + else + return set_pte_val_v2_flags(pte_val, uni_flags); +} +static inline pteval_t +mmu_clear_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return clear_pte_val_v6_flags(pte_val, uni_flags); + else + return clear_pte_val_v2_flags(pte_val, uni_flags); +} +static __must_check inline pteval_t +fill_pte_val_flags(const uni_pteval_t uni_flags) +{ + return mmu_fill_pte_val_flags(uni_flags, MMU_IS_PT_V6()); +} +static __must_check inline pteval_t +get_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_get_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +static __must_check inline bool +test_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_test_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +static __must_check inline pteval_t +set_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_set_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +static __must_check inline pteval_t +clear_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_clear_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +#define _PAGE_INIT(uni_flags) fill_pte_val_flags(uni_flags) +#define _PAGE_GET(pte_val, uni_flags) get_pte_val_flags(pte_val, uni_flags) +#define _PAGE_TEST(pte_val, uni_flags) test_pte_val_flags(pte_val, uni_flags) +#define _PAGE_SET(pte_val, uni_flags) set_pte_val_flags(pte_val, uni_flags) +#define _PAGE_CLEAR(pte_val, uni_flags) clear_pte_val_flags(pte_val, uni_flags) + +static inline pteval_t +mmu_get_pte_val_changeable_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_changeable_mask(); + else + return get_pte_val_v2_changeable_mask(); +} +static inline pteval_t +mmu_get_huge_pte_val_changeable_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_huge_pte_val_v6_changeable_mask(); + else + return get_huge_pte_val_v2_changeable_mask(); +} +static inline pteval_t +mmu_get_pte_val_reduceable_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_reduceable_mask(); + else + return get_pte_val_v2_reduceable_mask(); +} +static inline pteval_t +mmu_get_pte_val_restricted_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_restricted_mask(); + else + return get_pte_val_v2_restricted_mask(); +} +static inline pteval_t +get_pte_val_changeable_mask(void) +{ + return mmu_get_pte_val_changeable_mask(MMU_IS_PT_V6()); +} +static inline pteval_t +get_huge_pte_val_changeable_mask(void) +{ + return mmu_get_huge_pte_val_changeable_mask(MMU_IS_PT_V6()); +} +static inline pteval_t +get_pte_val_reduceable_mask(void) +{ + return mmu_get_pte_val_reduceable_mask(MMU_IS_PT_V6()); +} +static inline pteval_t +get_pte_val_restricted_mask(void) +{ + return mmu_get_pte_val_restricted_mask(MMU_IS_PT_V6()); +} + +#define _PAGE_CHG_MASK get_pte_val_changeable_mask() +#define _HPAGE_CHG_MASK get_huge_pte_val_changeable_mask() +#define _PROT_REDUCE_MASK get_pte_val_reduceable_mask() +#define _PROT_RESTRICT_MASK get_pte_val_restricted_mask() + +/* some the most popular PTEs */ +#define _PAGE_INIT_VALID _PAGE_INIT(UNI_PAGE_VALID) +#define _PAGE_GET_VALID(pte_val) _PAGE_GET(pte_val, UNI_PAGE_VALID) +#define _PAGE_TEST_VALID(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_VALID) +#define _PAGE_SET_VALID(pte_val) _PAGE_SET(pte_val, UNI_PAGE_VALID) +#define _PAGE_CLEAR_VALID(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_VALID) + +#define _PAGE_INIT_PRESENT _PAGE_INIT(UNI_PAGE_PRESENT) +#define _PAGE_GET_PRESENT(pte_val) _PAGE_GET(pte_val, UNI_PAGE_PRESENT) +#define _PAGE_TEST_PRESENT(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_PRESENT) +#define _PAGE_SET_PRESENT(pte_val) _PAGE_SET(pte_val, UNI_PAGE_PRESENT) +#define _PAGE_CLEAR_PRESENT(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_PRESENT) + +#define _PAGE_INIT_PROTNONE _PAGE_INIT(UNI_PAGE_PROTNONE) +#define _PAGE_GET_PROTNONE(pte_val) _PAGE_GET(pte_val, UNI_PAGE_PROTNONE) +#define _PAGE_TEST_PROTNONE(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_PROTNONE) +#define _PAGE_SET_PROTNONE(pte_val) _PAGE_SET(pte_val, UNI_PAGE_PROTNONE) +#define _PAGE_CLEAR_PROTNONE(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_PROTNONE) + +#define _PAGE_INIT_WRITEABLE _PAGE_INIT(UNI_PAGE_WRITE) +#define _PAGE_GET_WRITEABLE(pte_val) _PAGE_GET(pte_val, UNI_PAGE_WRITE) +#define _PAGE_TEST_WRITEABLE(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_WRITE) +#define _PAGE_SET_WRITEABLE(pte_val) _PAGE_SET(pte_val, UNI_PAGE_WRITE) +#define _PAGE_CLEAR_WRITEABLE(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_WRITE) + +#define _PAGE_INIT_PRIV _PAGE_INIT(UNI_PAGE_PRIV) +#define _PAGE_GET_PRIV(pte_val) _PAGE_GET(pte_val, UNI_PAGE_PRIV) +#define _PAGE_TEST_PRIV(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_PRIV) +#define _PAGE_SET_PRIV(pte_val) _PAGE_SET(pte_val, UNI_PAGE_PRIV) +#define _PAGE_CLEAR_PRIV(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_PRIV) + +#define _PAGE_INIT_ACCESSED _PAGE_INIT(UNI_PAGE_ACCESSED) +#define _PAGE_GET_ACCESSED(pte_val) _PAGE_GET(pte_val, UNI_PAGE_ACCESSED) +#define _PAGE_TEST_ACCESSED(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_ACCESSED) +#define _PAGE_SET_ACCESSED(pte_val) _PAGE_SET(pte_val, UNI_PAGE_ACCESSED) +#define _PAGE_CLEAR_ACCESSED(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_ACCESSED) + +#define _PAGE_INIT_DIRTY _PAGE_INIT(UNI_PAGE_DIRTY) +#define _PAGE_GET_DIRTY(pte_val) _PAGE_GET(pte_val, UNI_PAGE_DIRTY) +#define _PAGE_TEST_DIRTY(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_DIRTY) +#define _PAGE_SET_DIRTY(pte_val) _PAGE_SET(pte_val, UNI_PAGE_DIRTY) +#define _PAGE_CLEAR_DIRTY(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_DIRTY) + +#define _PAGE_INIT_HUGE _PAGE_INIT(UNI_PAGE_HUGE) +#define _PAGE_GET_HUGE(pte_val) _PAGE_GET(pte_val, UNI_PAGE_HUGE) +#define _PAGE_TEST_HUGE(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_HUGE) +#define _PAGE_SET_HUGE(pte_val) _PAGE_SET(pte_val, UNI_PAGE_HUGE) +#define _PAGE_CLEAR_HUGE(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_HUGE) + +#define _PAGE_INIT_NOT_EXEC _PAGE_INIT(UNI_PAGE_NON_EX) +#define _PAGE_GET_NOT_EXEC(pte_val) _PAGE_GET(pte_val, UNI_PAGE_NON_EX) +#define _PAGE_TEST_NOT_EXEC(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_NON_EX) +#define _PAGE_SET_NOT_EXEC(pte_val) _PAGE_SET(pte_val, UNI_PAGE_NON_EX) +#define _PAGE_CLEAR_NOT_EXEC(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_NON_EX) + +#define _PAGE_INIT_EXECUTEABLE ((pteval_t)0ULL) +#define _PAGE_TEST_EXECUTEABLE(pte_val) (!_PAGE_TEST_NOT_EXEC(pte_val)) +#define _PAGE_SET_EXECUTEABLE(pte_val) _PAGE_CLEAR_NOT_EXEC(pte_val) +#define _PAGE_CLEAR_EXECUTEABLE(pte_val) _PAGE_SET_NOT_EXEC(pte_val) + +#define _PAGE_INIT_SPECIAL _PAGE_INIT(UNI_PAGE_SPECIAL) +#define _PAGE_GET_SPECIAL(pte_val) _PAGE_GET(pte_val, UNI_PAGE_SPECIAL) +#define _PAGE_TEST_SPECIAL(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_SPECIAL) +#define _PAGE_SET_SPECIAL(pte_val) _PAGE_SET(pte_val, UNI_PAGE_SPECIAL) +#define _PAGE_CLEAR_SPECIAL(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_SPECIAL) + +#define _PAGE_PFN_MASK _PAGE_INIT(UNI_PAGE_PFN) + +#define _PAGE_KERNEL_RX_NOT_GLOB \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_PRIV | UNI_PAGE_HW_ACCESS) +#define _PAGE_KERNEL_RO_NOT_GLOB \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_PRIV | UNI_PAGE_HW_ACCESS | \ + UNI_PAGE_NON_EX) +#define _PAGE_KERNEL_RWX_NOT_GLOB \ + _PAGE_SET(_PAGE_KERNEL_RX_NOT_GLOB, \ + UNI_PAGE_WRITE | UNI_PAGE_DIRTY) +#define _PAGE_KERNEL_RW_NOT_GLOB \ + _PAGE_SET(_PAGE_KERNEL_RWX_NOT_GLOB, UNI_PAGE_NON_EX) +#define _PAGE_KERNEL_HUGE_RW_NOT_GLOB \ + _PAGE_SET(_PAGE_KERNEL_RW_NOT_GLOB, UNI_PAGE_HUGE) +#ifdef CONFIG_GLOBAL_CONTEXT +#define _PAGE_KERNEL_RX \ + _PAGE_SET(_PAGE_KERNEL_RX_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_RO \ + _PAGE_SET(_PAGE_KERNEL_RO_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_RWX \ + _PAGE_SET(_PAGE_KERNEL_RWX_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_RW \ + _PAGE_SET(_PAGE_KERNEL_RW_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_HUGE_RW \ + _PAGE_SET(_PAGE_KERNEL_HUGE_RW_NOT_GLOB, UNI_PAGE_GLOBAL) +#else /* ! CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_RX _PAGE_KERNEL_RX_NOT_GLOB +#define _PAGE_KERNEL_RO _PAGE_KERNEL_RO_NOT_GLOB +#define _PAGE_KERNEL_RWX _PAGE_KERNEL_RWX_NOT_GLOB +#define _PAGE_KERNEL_RW _PAGE_KERNEL_RW_NOT_GLOB +#define _PAGE_KERNEL_HUGE_RW _PAGE_KERNEL_HUGE_RW_NOT_GLOB +#endif /* CONFIG_GLOBAL_CONTEXT */ + +#define _PAGE_KERNEL _PAGE_KERNEL_RW +#define _PAGE_KERNEL_HUGE _PAGE_KERNEL_HUGE_RW +#define _PAGE_KERNEL_IMAGE _PAGE_KERNEL_RX +#define _PAGE_KERNEL_PT _PAGE_KERNEL +#define _PAGE_USER_PT _PAGE_KERNEL_RW_NOT_GLOB +#define _PAGE_KERNEL_PTE _PAGE_KERNEL_PT +#define _PAGE_KERNEL_PMD _PAGE_KERNEL_PT +#define _PAGE_KERNEL_PUD _PAGE_KERNEL_PT +#define _PAGE_USER_PTE _PAGE_USER_PT +#define _PAGE_USER_PMD _PAGE_USER_PT +#define _PAGE_USER_PUD _PAGE_USER_PT + +#define _PAGE_IO_MAP_BASE _PAGE_KERNEL_RW +#define _PAGE_IO_MAP \ + _PAGE_SET_MEM_TYPE(_PAGE_IO_MAP_BASE, EXT_NON_PREFETCH_MT) +#define _PAGE_IO_PORTS \ + _PAGE_SET_MEM_TYPE(_PAGE_IO_MAP_BASE, EXT_NON_PREFETCH_MT) + +#define _PAGE_KERNEL_SWITCHING_IMAGE \ + _PAGE_SET_MEM_TYPE(_PAGE_KERNEL_RX_NOT_GLOB, EXT_CONFIG_MT) + +#define _PAGE_PROT_MASK \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_WRITE | UNI_PAGE_NON_EX) + +#define _PAGE_USER \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_WRITE | UNI_PAGE_DIRTY | UNI_PAGE_NON_EX) +#define _PAGE_USER_RO_ACCESSED \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_NON_EX) + +#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) +#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_RWX) +#define PAGE_KERNEL_LARGE __pgprot(_PAGE_KERNEL_HUGE) +#define PAGE_KERNEL_PTE __pgprot(_PAGE_KERNEL_PTE) +#define PAGE_KERNEL_PMD __pgprot(_PAGE_KERNEL_PMD) +#define PAGE_KERNEL_PUD __pgprot(_PAGE_KERNEL_PUD) +#define PAGE_USER_PTE __pgprot(_PAGE_USER_PTE) +#define PAGE_USER_PMD __pgprot(_PAGE_USER_PMD) +#define PAGE_USER_PUD __pgprot(_PAGE_USER_PUD) + +#define PAGE_KERNEL_NOCACHE PAGE_IO_MAP + +#define PAGE_USER __pgprot(_PAGE_USER) +#define PAGE_USER_RO_ACCESSED __pgprot(_PAGE_USER_RO_ACCESSED) + +#define PAGE_KERNEL_TEXT __pgprot(_PAGE_KERNEL_IMAGE) + +#define PAGE_KERNEL_DATA \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, \ + UNI_PAGE_WRITE | UNI_PAGE_DIRTY | \ + UNI_PAGE_NON_EX)) +#define PAGE_KERNEL_STACK \ + __pgprot(_PAGE_SET(_PAGE_KERNEL, UNI_PAGE_NON_EX)) + +#define PAGE_USER_HWS __pgprot(_PAGE_KERNEL_RW_NOT_GLOB) +#define PAGE_USER_PS PAGE_USER_HWS +#define PAGE_USER_PCS PAGE_USER_HWS +#define PAGE_USER_STACK \ + __pgprot(_PAGE_SET(_PAGE_USER, UNI_PAGE_NON_EX)) + +#define PAGE_TAG_MEMORY __pgprot(_PAGE_KERNEL_RW_NOT_GLOB) + +#define PAGE_BOOTINFO \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_INITRD \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_MPT \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_KERNEL_NAMETAB \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_MAPPED_PHYS_MEM __pgprot(_PAGE_KERNEL) + +#define PAGE_CNTP_MAPPED_MEM \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_X86_IO_PORTS __pgprot(_PAGE_IO_PORTS) + +#define PAGE_IO_MAP __pgprot(_PAGE_IO_MAP) + +#define PAGE_KERNEL_SWITCHING_TEXT __pgprot(_PAGE_KERNEL_SWITCHING_IMAGE) +#define PAGE_KERNEL_SWITCHING_DATA \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_SWITCHING_IMAGE, \ + UNI_PAGE_WRITE | UNI_PAGE_NON_EX)) +#define PAGE_KERNEL_SWITCHING_US_STACK \ + __pgprot(_PAGE_SET_MEM_TYPE(_PAGE_KERNEL_RW_NOT_GLOB, \ + EXT_CONFIG_MT)) + +#define PAGE_SHARED \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_WRITE | UNI_PAGE_NON_EX)) +#define PAGE_SHARED_EX \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_WRITE)) +#define PAGE_COPY_NEX \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_NON_EX)) +#define PAGE_COPY_EX \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS)) + +#define PAGE_COPY PAGE_COPY_NEX + +#define PAGE_READONLY \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_NON_EX)) +#define PAGE_EXECUTABLE \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS)) + +/* + * PAGE_NONE is used for NUMA hinting faults and should be valid. + * __P000/__S000 are used for mmap(PROT_NONE) mapping and should be not valid. + */ +#define PAGE_ENPTY __pgprot(0ULL) +#define PAGE_NONE \ + __pgprot(_PAGE_INIT(UNI_PAGE_PROTNONE | UNI_PAGE_HW_ACCESS | \ + UNI_PAGE_VALID)) +#define PAGE_NONE_INVALID \ + __pgprot(_PAGE_INIT(UNI_PAGE_PROTNONE | UNI_PAGE_HW_ACCESS)) + +#define PAGE_INT_PR \ + __pgprot(_PAGE_INIT(UNI_PAGE_INT_PR)) + +/* + * Next come the mappings that determine how mmap() protection bits + * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The + * _P version gets used for a private shared memory segment, the _S + * version gets used for a shared memory segment with MAP_SHARED on. + * In a private shared memory segment, we do a copy-on-write if a task + * attempts to write to the page. + */ + /* initial boot-time page protections are not used, */ + /* so can be set to empty state */ + /* xwr */ +#define __P000 PAGE_ENPTY /* PAGE_NONE_INVALID */ +#define __P001 PAGE_ENPTY /* PAGE_READONLY */ +#define __P010 PAGE_ENPTY /* PAGE_COPY_NEX */ +#define __P011 PAGE_ENPTY /* PAGE_COPY_NEX */ +#define __P100 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __P101 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __P110 PAGE_ENPTY /* PAGE_COPY_EX */ +#define __P111 PAGE_ENPTY /* PAGE_COPY_EX */ + +#define __S000 PAGE_ENPTY /* PAGE_NONE_INVALID */ +#define __S001 PAGE_ENPTY /* PAGE_READONLY */ +#define __S010 PAGE_ENPTY /* PAGE_SHARED */ +#define __S011 PAGE_ENPTY /* PAGE_SHARED */ +#define __S100 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __S101 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __S110 PAGE_ENPTY /* PAGE_SHARED_EX */ +#define __S111 PAGE_ENPTY /* PAGE_SHARED_EX */ + +/* real protection map */ + /* xwr */ +#define PROT_MAP_P000 PAGE_NONE_INVALID +#define PROT_MAP_P001 PAGE_READONLY +#define PROT_MAP_P010 PAGE_COPY_NEX +#define PROT_MAP_P011 PAGE_COPY_NEX +#define PROT_MAP_P100 PAGE_EXECUTABLE +#define PROT_MAP_P101 PAGE_EXECUTABLE +#define PROT_MAP_P110 PAGE_COPY_EX +#define PROT_MAP_P111 PAGE_COPY_EX + +#define PROT_MAP_S000 PAGE_NONE_INVALID +#define PROT_MAP_S001 PAGE_READONLY +#define PROT_MAP_S010 PAGE_SHARED +#define PROT_MAP_S011 PAGE_SHARED +#define PROT_MAP_S100 PAGE_EXECUTABLE +#define PROT_MAP_S101 PAGE_EXECUTABLE +#define PROT_MAP_S110 PAGE_SHARED_EX +#define PROT_MAP_S111 PAGE_SHARED_EX + +static inline void +create_protection_map(pgprot_t prot_map[16]) +{ + prot_map[0] = PROT_MAP_P000; + prot_map[1] = PROT_MAP_P001; + prot_map[2] = PROT_MAP_P010; + prot_map[3] = PROT_MAP_P011; + prot_map[4] = PROT_MAP_P100; + prot_map[5] = PROT_MAP_P101; + prot_map[6] = PROT_MAP_P110; + prot_map[7] = PROT_MAP_P111; + + prot_map[8 + 0] = PROT_MAP_S000; + prot_map[8 + 1] = PROT_MAP_S001; + prot_map[8 + 2] = PROT_MAP_S010; + prot_map[8 + 3] = PROT_MAP_S011; + prot_map[8 + 4] = PROT_MAP_S100; + prot_map[8 + 5] = PROT_MAP_S101; + prot_map[8 + 6] = PROT_MAP_S110; + prot_map[8 + 7] = PROT_MAP_S111; +} + +#define pgd_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pgd 0x%016lx.\n", \ + __FILE__, __LINE__, pgd_val(e)); \ + dump_stack(); \ + } while (0) +#define pud_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pud 0x%016lx.\n", \ + __FILE__, __LINE__, pud_val(e)); \ + dump_stack(); \ + } while (0) +#define pmd_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pmd 0x%016lx.\n", \ + __FILE__, __LINE__, pmd_val(e)); \ + dump_stack(); \ + } while (0) +#define pte_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pte 0x%016lx.\n", \ + __FILE__, __LINE__, pte_val(e)); \ + dump_stack(); \ + } while (0) + +/* + * This takes a physical page address and protection bits to make + * pte/pmd/pud/pgd + */ +#define mk_pte_phys(phys_addr, pgprot) \ + (__pte(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) +#define mk_pmd_phys(phys_addr, pgprot) \ + (__pmd(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) +#define mk_pud_phys(phys_addr, pgprot) \ + (__pud(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) +#define mk_pgd_phys(phys_addr, pgprot) \ + (__pgd(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) + +#define mk_pmd_addr(virt_addr, pgprot) \ + (__pmd(_PAGE_PADDR_TO_PFN(__pa(virt_addr)) | pgprot_val(pgprot))) +#define mk_pud_addr(virt_addr, pgprot) \ + (__pud(_PAGE_PADDR_TO_PFN(__pa(virt_addr)) | pgprot_val(pgprot))) +#define mk_pgd_addr(virt_addr, pgprot) \ + (__pgd(_PAGE_PADDR_TO_PFN(__pa(virt_addr)) | pgprot_val(pgprot))) + +/* + * Conversion functions: convert page frame number (pfn) and + * a protection value to a page table entry (pte). + */ +#define pfn_pte(pfn, pgprot) mk_pte_phys((pfn) << PAGE_SHIFT, pgprot) +#define pfn_pmd(pfn, pgprot) mk_pmd_phys((pfn) << PAGE_SHIFT, pgprot) + +/* + * Currently all these mappings correlate to what arm64 uses + * and there must be a good reason to use anything else. + * + * Any changes here should take into account set_general_mt() + * and set_external_mt(). + */ +#define pgprot_device(prot) \ + (__pgprot(_PAGE_SET_MEM_TYPE(pgprot_val(prot), EXT_NON_PREFETCH_MT))) +#define pgprot_noncached(prot) \ + (__pgprot(_PAGE_SET_MEM_TYPE(pgprot_val(prot), GEN_NON_CACHE_ORDERED_MT))) +/* pgprot_writecombine() can be used both for RAM and devices, and while + * "general" memory type can be used for devices using "external" type + * for RAM is prohibited as it disables cache snooping. So by default + * use "general" memory type for it. */ +#define pgprot_writecombine(prot) \ + __pgprot(_PAGE_SET_MEM_TYPE(pgprot_val(prot), GEN_NON_CACHE_MT)) + +#define pgprot_writethrough pgprot_writecombine + +/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ +#define PTE_PFN_MASK _PAGE_PFN_MASK + +/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ +#define PTE_FLAGS_MASK (~PTE_PFN_MASK) + +/* PTE_PROT_MASK extracts the protection flags from a (pte|pmd|pud|pgd)val_t */ +#define PTE_PROTECTS_MASK _PAGE_INIT(UNI_PAGE_WRITE | UNI_PAGE_NON_EX) + +/* PT_USER_FLAGS_MASK extracts the flags from a user (pgd|pud|pmd)val_t */ +#define PMD_USER_FLAGS_MASK _PAGE_SET(_PAGE_USER_PTE, UNI_PAGE_HUGE) +#define PUD_USER_FLAGS_MASK (_PAGE_USER_PMD) +#define PGD_USER_FLAGS_MASK (_PAGE_USER_PUD) + +static inline pteval_t pte_flags(pte_t pte) +{ + return pte_val(pte) & PTE_FLAGS_MASK; +} +static inline pteval_t pte_only_flags(pte_t pte) +{ + return pte_flags(pte) & ~PTE_PROTECTS_MASK; +} +static inline pteval_t pte_only_protects(pte_t pte) +{ + return pte_val(pte) & PTE_PROTECTS_MASK; +} + +static inline pmdval_t pmd_user_flags(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_USER_FLAGS_MASK; +} + +static inline pudval_t pud_user_flags(pud_t pud) +{ + return pud_val(pud) & PUD_USER_FLAGS_MASK; +} + +static inline pgdval_t pgd_user_flags(pgd_t pgd) +{ + return pgd_val(pgd) & PGD_USER_FLAGS_MASK; +} + +/* + * Extract pfn from pte. + */ +#define pte_pfn(pte) (_PAGE_PFN_TO_PADDR(pte_val(pte)) >> PAGE_SHIFT) +#define pmd_pfn(pmd) (_PAGE_PFN_TO_PADDR(pmd_val(pmd)) >> PAGE_SHIFT) +#define pud_pfn(pud) (_PAGE_PFN_TO_PADDR(pud_val(pud)) >> PAGE_SHIFT) +#define pgd_pfn(pgd) (_PAGE_PFN_TO_PADDR(pgd_val(pgd)) >> PAGE_SHIFT) + +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) + +#define mk_pfn_pte(pfn, pte) \ + pfn_pte(pfn, __pgprot(pte_val(pte) & ~_PAGE_PFN_MASK)) +#define mk_clone_pte(page, pte) \ + pfn_pte(page_to_pfn(page), \ + __pgprot(pte_val(pte) & ~_PAGE_PFN_MASK)) +#define mk_not_present_pte(pgprot) \ + __pte(_PAGE_CLEAR_PRESENT(pgprot_val(pgprot))) +#define mk_guest_pfn_prot(pgprot) \ + __pgprot(_PAGE_SET(pgprot_val(pgprot), UNI_PAGE_GFN)) +#define mk_pte_pgprot(pte, pgprot) \ + __pte(pte_val(pte) | pgprot_val(pgprot)) +#define mk_pmd_pgprot(pmd, pgprot) \ + __pmd(pmd_val(pmd) | pgprot_val(pgprot)) +#define page_pte_prot(page, prot) mk_pte(page, prot) +#define page_pte(page) page_pte_prot(page, __pgprot(0)) + +#define pgprot_modify_mask(old_prot, newprot_val, prot_mask) \ + (__pgprot(((pgprot_val(old_prot) & ~(prot_mask)) | \ + ((newprot_val) & (prot_mask))))) + +#define pgprot_large_size_set(prot) \ + __pgprot(_PAGE_SET_HUGE(pgprot_val(prot))) +#define pgprot_small_size_set(prot) \ + __pgprot(_PAGE_CLEAR_HUGE(pgprot_val(prot))) +#define pgprot_present_flag_set(prot) \ + pgprot_modify_mask(prot, _PAGE_INIT_PRESENT, _PAGE_INIT_PRESENT) +#define pgprot_present_flag_reset(prot) \ + pgprot_modify_mask(prot, 0UL, \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_PFN)) +#define _pgprot_reduce(src_prot_val, reduced_prot_val) \ + (((src_prot_val) & ~(_PROT_REDUCE_MASK)) | \ + (((src_prot_val) & (_PROT_REDUCE_MASK)) | \ + ((reduced_prot_val) & (_PROT_REDUCE_MASK)))) +#define _pgprot_restrict(src_prot_val, restricted_prot_val) \ + (((src_prot_val) & ~(_PROT_RESTRICT_MASK)) | \ + (((src_prot_val) & (_PROT_RESTRICT_MASK)) & \ + ((restricted_prot_val) & (_PROT_RESTRICT_MASK)))) +#define pgprot_reduce(src_prot, reduced_prot) \ + (__pgprot(_pgprot_reduce(pgprot_val(src_prot), \ + pgprot_val(reduced_prot)))) +#define pgprot_restrict(src_prot, restricted_prot) \ + (__pgprot(_pgprot_restrict(pgprot_val(src_prot), \ + pgprot_val(restricted_prot)))) +#define pte_reduce_prot(src_pte, reduced_prot) \ + (__pte(_pgprot_reduce(pte_val(src_pte), \ + pgprot_val(reduced_prot)))) +#define pte_restrict_prot(src_pte, restricted_prot) \ + (__pte(_pgprot_restrict(pte_val(src_pte), \ + pgprot_val(restricted_prot)))) +#define pgprot_priv(pgprot) _PAGE_TEST_PRIV(pgprot_val(pgprot)) +#define pgprot_present(pgprot) _PAGE_TEST_PRESENT(pgprot_val(pgprot)) +#define pgprot_valid(pgprot) _PAGE_TEST_VALID(pgprot_val(pgprot)) +#define pgprot_write(pgprot) _PAGE_TEST_WRITEABLE(pgprot_val(pgprot)) +#define pgprot_huge(pgprot) _PAGE_TEST_HUGE(pgprot_val(pgprot)) +#define pgprot_special(pgprot) _PAGE_TEST_SPECIAL(pgprot_val(pgprot)) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pteval_t val = pte_val(pte); + + val &= _PAGE_CHG_MASK; + val |= pgprot_val(newprot) & ~_PAGE_CHG_MASK; + + return __pte(val); +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmdval_t val = pmd_val(pmd); + + val &= _HPAGE_CHG_MASK; + val |= pgprot_val(newprot) & ~_HPAGE_CHG_MASK; + + return __pmd(val); +} + +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +# define pte_none(pte) (!pte_val(pte)) +#else +# define pte_none(pte) (_PAGE_CLEAR_VALID(pte_val(pte)) == 0) +#endif + +#define pte_valid(pte) _PAGE_TEST_VALID(pte_val(pte)) +#define pte_present(pte) \ + _PAGE_TEST(pte_val(pte), UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE) +#define pte_secondary(pte) (pte_val(pte) & _PAGE_SEC_MAP) +#define pte_priv(pte) _PAGE_TEST_PRIV(pte_val(pte)) +#define pte_clear_guest(pte) \ + (__pte(_PAGE_CLEAR(pte_val(pte), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) +#define pte_set_priv(pte) __pte(_PAGE_SET_PRIV(pte_val(pte))) +#define pte_large_page(pte) _PAGE_TEST_HUGE(pte_val(pte)) +#define pte_set_small_size(pte) __pte(_PAGE_CLEAR_HUGE(pte_val(pte))) +#define pte_set_large_size(pte) __pte(_PAGE_SET_HUGE(pte_val(pte))) + +#define pte_accessible(mm, pte) \ + (mm_tlb_flush_pending(mm) ? pte_present(pte) : \ + _PAGE_TEST_PRESENT(pte_val(pte))) + +#ifdef CONFIG_NUMA_BALANCING +/* + * These return true for PAGE_NONE too but the kernel does not care. + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return _PAGE_GET(pte_val(pte), UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE) == + _PAGE_INIT_PROTNONE; +} +static inline int pmd_protnone(pmd_t pmd) +{ + return _PAGE_GET(pmd_val(pmd), UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE) == + _PAGE_INIT_PROTNONE; +} +# define pte_present_and_exec(pte) (pte_present(pte) && pte_exec(pte)) +#else /* ! CONFIG_NUMA_BALANCING */ +# define pte_present_and_exec(pte) \ + (_PAGE_GET(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_NON_EX) == \ + _PAGE_INIT_PRESENT) +#endif /* CONFIG_NUMA_BALANCING */ + +/* Since x86 uses Write Combine both for external devices + * (meaning optimization if CPU accesses) and for RAM + * (meaning avoid cache allocation) we do the same here + * as that is what drivers expect. */ +#define is_mt_wb(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_CACHE_MT || __im_mt == EXT_CACHE_MT); \ +}) +#define is_mt_wc(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_NON_CACHE_MT || __im_mt == EXT_PREFETCH_MT); \ +}) +#define is_mt_uc(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_NON_CACHE_ORDERED_MT || \ + __im_mt == EXT_NON_PREFETCH_MT || __im_mt == EXT_CONFIG_MT); \ +}) +#define is_mt_general(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_NON_CACHE_MT || __im_mt == GEN_CACHE_MT); \ +}) +#define is_mt_external(mt) (!is_mt_general(mt)) + +static inline pgprot_t set_general_mt(pgprot_t prot) +{ + pte_mem_type_t mt = get_pte_val_memory_type(pgprot_val(prot)); + + switch (mt) { + case EXT_CACHE_MT: + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + GEN_CACHE_MT)); + break; + case EXT_NON_PREFETCH_MT: + /* pgprot_device() case */ + case EXT_PREFETCH_MT: + case EXT_CONFIG_MT: + mt = GEN_NON_CACHE_MT; + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + GEN_NON_CACHE_MT)); + break; + case GEN_NON_CACHE_ORDERED_MT: + /* pgprot_noncached() case */ + case GEN_NON_CACHE_MT: + /* pgprot_writecombine() and pgprot_writethrough() case */ + case GEN_CACHE_MT: + break; + default: + WARN_ON_ONCE(1); + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + GEN_NON_CACHE_MT)); + break; + } + + return prot; +} + +static inline pgprot_t set_external_mt(pgprot_t prot) +{ + pte_mem_type_t mt = get_pte_val_memory_type(pgprot_val(prot)); + + switch (mt) { + case GEN_CACHE_MT: + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_CACHE_MT)); + break; + case GEN_NON_CACHE_MT: + /* pgprot_writecombine() and pgprot_writethrough() case */ + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_PREFETCH_MT)); + break; + case GEN_NON_CACHE_ORDERED_MT: + /* pgprot_noncached() case */ + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_NON_PREFETCH_MT)); + break; + case EXT_NON_PREFETCH_MT: + /* pgprot_device() case */ + case EXT_PREFETCH_MT: + case EXT_CONFIG_MT: + case EXT_CACHE_MT: + break; + default: + WARN_ON_ONCE(1); + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_CONFIG_MT)); + break; + } + + return prot; +} + +/* + * See comment in pmd_present() - since _PAGE_HUGE bit stays on at all times + * (both during split_huge_page and when the _PAGE_PROTNONE bit gets set) + * we can check only the _PAGE_HUGE bit. + */ +#define pmd_present_and_exec_and_huge(pmd) \ + (_PAGE_GET(pmd_val(pmd), UNI_PAGE_NON_EX | \ + UNI_PAGE_HUGE) == \ + _PAGE_INIT_HUGE) + +#define pud_present_and_exec_and_huge(pud) \ + (_PAGE_GET(pud_val(pud), UNI_PAGE_PRESENT | \ + UNI_PAGE_NON_EX | UNI_PAGE_HUGE) == \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_HUGE)) + +/* + * See comment in pmd_present() - since _PAGE_HUGE bit stays on at all times + * (both during split_huge_page and when the _PAGE_PROTNONE bit gets set) we + * should not return "pmd_none() == true" when the _PAGE_HUGE bit is set. + */ +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +# define pmd_none(pmd) (pmd_val(pmd) == 0) +#else +# define pmd_none(pmd) (_PAGE_CLEAR_VALID(pmd_val(pmd)) == 0) +#endif + +#define pmd_valid(pmd) _PAGE_TEST_VALID(pmd_val(pmd)) + +/* This will return true for huge pages as expected by arch-independent part */ +static inline int pmd_bad(pmd_t pmd) +{ + return unlikely(_PAGE_CLEAR(pmd_val(pmd) & PTE_FLAGS_MASK, + UNI_PAGE_GLOBAL) != _PAGE_USER_PTE); +} + +#define user_pmd_huge(pmd) _PAGE_TEST_HUGE(pmd_val(pmd)) +#define kernel_pmd_huge(pmd) \ + (is_huge_pmd_level() && _PAGE_TEST_HUGE(pmd_val(pmd))) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage has_transparent_hugepage +static inline int has_transparent_hugepage(void) +{ + return cpu_has(CPU_FEAT_ISET_V3); +} + +#define pmd_trans_huge(pmd) user_pmd_huge(pmd) +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +/* + * Checking for _PAGE_HUGE is needed too because + * split_huge_page will temporarily clear the present bit (but + * the _PAGE_HUGE flag will remain set at all times while the + * _PAGE_PRESENT bit is clear). + */ +#define pmd_present(pmd) \ + _PAGE_TEST(pmd_val(pmd), UNI_PAGE_PRESENT | \ + UNI_PAGE_PROTNONE | UNI_PAGE_HUGE) +#define pmd_write(pmd) _PAGE_TEST_WRITEABLE(pmd_val(pmd)) +#define pmd_exec(pmd) _PAGE_TEST_EXECUTEABLE(pmd_val(pmd)) +#define pmd_dirty(pmd) _PAGE_TEST_DIRTY(pmd_val(pmd)) +#define pmd_young(pmd) _PAGE_TEST_ACCESSED(pmd_val(pmd)) +#define pmd_wb(pmd) is_mt_wb(_PAGE_GET_MEM_TYPE(pmd_val(pmd))) +#define pmd_wc(pmd) is_mt_wc(_PAGE_GET_MEM_TYPE(pmd_val(pmd))) +#define pmd_uc(pmd) is_mt_uc(_PAGE_GET_MEM_TYPE(pmd_val(pmd))) + +#define pmd_wrprotect(pmd) (__pmd(_PAGE_CLEAR_WRITEABLE(pmd_val(pmd)))) +#define pmd_mkwrite(pmd) (__pmd(_PAGE_SET_WRITEABLE(pmd_val(pmd)))) +#define pmd_mkexec(pmd) (__pmd(_PAGE_SET_EXECUTEABLE(pmd_val(pmd)))) +#define pmd_mknotexec(pmd) (__pmd(_PAGE_CLEAR_EXECUTEABLE(pmd_val(pmd)))) +#define pmd_mkpresent(pmd) (__pmd(_PAGE_SET_PRESENT(pmd_val(pmd)))) +#define pmd_mk_present_valid(pmd) (__pmd(_PAGE_SET(pmd_val(pmd), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +#define pmd_mknotpresent(pmd) \ + (__pmd(_PAGE_CLEAR(pmd_val(pmd), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE))) +#define pmd_mknot_present_valid(pmd) (__pmd(_PAGE_CLEAR(pmd_val(pmd), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE | UNI_PAGE_VALID))) +#define pmd_mknotvalid(pmd) (__pmd(_PAGE_CLEAR_VALID(pmd_val(pmd)))) +#define pmd_mkold(pmd) (__pmd(_PAGE_CLEAR_ACCESSED(pmd_val(pmd)))) +#define pmd_mkyoung(pmd) (__pmd(_PAGE_SET_ACCESSED(pmd_val(pmd)))) +#define pmd_mkclean(pmd) (__pmd(_PAGE_CLEAR_DIRTY(pmd_val(pmd)))) +#define pmd_mkdirty(pmd) (__pmd(_PAGE_SET_DIRTY(pmd_val(pmd)))) +#define pmd_mkhuge(pmd) (__pmd(_PAGE_SET_HUGE(pmd_val(pmd)))) +#define pmd_clear_guest(pmd) \ + (__pmd(_PAGE_CLEAR(pmd_val(pmd), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) +static inline pmd_t pmd_mk_wb(pmd_t pmd) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pmd_val(pmd)))) + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), EXT_CACHE_MT)); + else + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), GEN_CACHE_MT)); +} +static inline pmd_t pmd_mk_wc(pmd_t pmd) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pmd_val(pmd)))) + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), EXT_PREFETCH_MT)); + else + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), GEN_NON_CACHE_MT)); +} +static inline pmd_t pmd_mk_uc(pmd_t pmd) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pmd_val(pmd)))) + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), EXT_NON_PREFETCH_MT)); + else + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), GEN_NON_CACHE_ORDERED_MT)); +} + +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +# define pud_none(pud) (pud_val(pud) == 0) +#else +# define pud_none(pud) (_PAGE_CLEAR_VALID(pud_val(pud)) == 0) +#endif + +#define pud_valid(pud) _PAGE_TEST_VALID(pud_val(pud)) + +/* This will return true for huge pages as expected by arch-independent part */ +static inline int pud_bad(pud_t pud) +{ + return unlikely(_PAGE_CLEAR(pud_val(pud) & PTE_FLAGS_MASK, + UNI_PAGE_GLOBAL) != _PAGE_USER_PMD); +} + +#define pud_present(pud) _PAGE_TEST_PRESENT(pud_val(pud)) +#define pud_write(pud) _PAGE_TEST_WRITEABLE(pud_val(pud)) +#define pud_exec(pud) _PAGE_TEST_EXECUTEABLE(pud_val(pud)) +#define user_pud_huge(pud) _PAGE_TEST_HUGE(pud_val(pud)) +#define kernel_pud_huge(pud) \ + (is_huge_pud_level() && _PAGE_TEST_HUGE(pud_val(pud))) +#define pud_wb(pud) is_mt_wb(_PAGE_GET_MEM_TYPE(pud_val(pud))) +#define pud_wc(pud) is_mt_wc(_PAGE_GET_MEM_TYPE(pud_val(pud))) +#define pud_uc(pud) is_mt_uc(_PAGE_GET_MEM_TYPE(pud_val(pud))) + +#define pud_clear_guest(pud) \ + (__pud(_PAGE_CLEAR(pud_val(pud), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) +#define pud_wrprotect(pud) (__pud(_PAGE_CLEAR_WRITEABLE(pud_val(pud)))) +#define pud_mkwrite(pud) (__pud(_PAGE_SET_WRITEABLE(pud_val(pud)))) +#define pud_mkexec(pud) (__pud(_PAGE_SET_EXECUTEABLE(pud_val(pud)))) +#define pud_mknotexec(pud) (__pud(_PAGE_CLEAR_EXECUTEABLE(pud_val(pud)))) +#define pud_mkpresent(pud) (__pud(_PAGE_SET_PRESENT(pud_val(pud)))) +#define pud_mk_present_valid(pud) (__pud(_PAGE_SET(pud_val(pud), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +#define pud_mknotpresent(pud) (__pud(_PAGE_CLEAR_PRESENT(pud_val(pud)))) +#define pud_mknot_present_valid(pud) (__pud(_PAGE_CLEAR(pud_val(pud), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +#define pud_mknotvalid(pud) (__pud(_PAGE_CLEAR_VALID(pud_val(pud)))) +static inline pud_t pud_mk_wb(pud_t pud) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pud_val(pud)))) + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), EXT_CACHE_MT)); + else + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), GEN_CACHE_MT)); +} +static inline pud_t pud_mk_wc(pud_t pud) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pud_val(pud)))) + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), EXT_PREFETCH_MT)); + else + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), GEN_NON_CACHE_MT)); +} +static inline pud_t pud_mk_uc(pud_t pud) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pud_val(pud)))) + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), EXT_NON_PREFETCH_MT)); + else + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), GEN_NON_CACHE_ORDERED_MT)); +} + +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +#define pgd_none(pgd) (!pgd_val(pgd)) +#else +#define pgd_none(pgd) (_PAGE_CLEAR_VALID(pgd_val(pgd)) == 0) +#endif +#define pgd_none_full(pgd) (!pgd_val(pgd)) +#define pgd_valid(pgd) _PAGE_TEST_VALID(pgd_val(pgd)) +#define pgd_mknotvalid(pgd) (__pgd(_PAGE_CLEAR_VALID(pgd_val(pgd)))) + +static inline int pgd_bad(pgd_t pgd) +{ + return unlikely(_PAGE_CLEAR(pgd_val(pgd) & PTE_FLAGS_MASK, + UNI_PAGE_GLOBAL) != _PAGE_USER_PUD); +} +#define pgd_present(pgd) _PAGE_TEST_PRESENT(pgd_val(pgd)) +#define user_pgd_huge(pgd) _PAGE_TEST_HUGE(pgd_val(pgd)) +#define kernel_pgd_huge(pgd) \ + (is_huge_pgd_level() && _PAGE_TEST_HUGE(pgd_val(pgd))) +#define pgd_clear_guest(pgd) \ + (__pgd(_PAGE_CLEAR(pgd_val(pgd), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) + +/* + * The following have defined behavior only work if pte_present() is true. + */ +#define pte_read(pte) (true) +#define pte_write(pte) _PAGE_TEST_WRITEABLE(pte_val(pte)) +#define pte_exec(pte) _PAGE_TEST_EXECUTEABLE(pte_val(pte)) +#define pte_dirty(pte) _PAGE_TEST_DIRTY(pte_val(pte)) +#define pte_young(pte) _PAGE_TEST_ACCESSED(pte_val(pte)) +#define pte_huge(pte) _PAGE_TEST_HUGE(pte_val(pte)) +#define pte_special(pte) _PAGE_TEST_SPECIAL(pte_val(pte)) +#define pte_wb(pte) is_mt_wb(_PAGE_GET_MEM_TYPE(pte_val(pte))) +#define pte_wc(pte) is_mt_wc(_PAGE_GET_MEM_TYPE(pte_val(pte))) +#define pte_uc(pte) is_mt_uc(_PAGE_GET_MEM_TYPE(pte_val(pte))) + +#define pte_wrprotect(pte) (__pte(_PAGE_CLEAR_WRITEABLE(pte_val(pte)))) +#define pte_mkwrite(pte) (__pte(_PAGE_SET_WRITEABLE(pte_val(pte)))) +#define pte_mkexec(pte) (__pte(_PAGE_SET_EXECUTEABLE(pte_val(pte)))) +#define pte_mknotexec(pte) (__pte(_PAGE_CLEAR_EXECUTEABLE(pte_val(pte)))) +#define pte_mkpresent(pte) (__pte(_PAGE_SET_PRESENT(pte_val(pte)))) +#define pte_mk_present_valid(pte) (__pte(_PAGE_SET(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +#define pte_mknotpresent(pte) \ + (__pte(_PAGE_CLEAR(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE))) +#define pte_mknot_present_valid(pte) (__pte(_PAGE_CLEAR(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE | UNI_PAGE_VALID))) +#define pte_mkold(pte) (__pte(_PAGE_CLEAR_ACCESSED(pte_val(pte)))) +#define pte_mkyoung(pte) (__pte(_PAGE_SET_ACCESSED(pte_val(pte)))) +#define pte_mkclean(pte) (__pte(_PAGE_CLEAR_DIRTY(pte_val(pte)))) +#define pte_mkdirty(pte) (__pte(_PAGE_SET_DIRTY(pte_val(pte)))) +#define pte_mkvalid(pte) (__pte(_PAGE_SET_VALID(pte_val(pte)))) +#define pte_mkhuge(pte) \ + (__pte(_PAGE_SET(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_HUGE))) +#define pte_mkspecial(pte) (__pte(_PAGE_SET_SPECIAL(pte_val(pte)))) +#define pte_mknotvalid(pte) (__pte(_PAGE_CLEAR_VALID(pte_val(pte)))) +static inline pte_t pte_mk_wb(pte_t pte) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pte_val(pte)))) + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), EXT_CACHE_MT)); + else + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), GEN_CACHE_MT)); +} +static inline pte_t pte_mk_wc(pte_t pte) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pte_val(pte)))) + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), EXT_PREFETCH_MT)); + else + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), GEN_NON_CACHE_MT)); +} +static inline pte_t pte_mk_uc(pte_t pte) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pte_val(pte)))) + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), EXT_NON_PREFETCH_MT)); + else + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), GEN_NON_CACHE_ORDERED_MT)); +} + +/* + * The index and offset in the root-level page table directory. + */ +#define pgd_index(virt_addr) (((virt_addr) >> PGDIR_SHIFT) & \ + (PTRS_PER_PGD - 1)) +#define pgd_offset(mm, virt_addr) ((mm)->pgd + pgd_index(virt_addr)) +#define pgd_to_index(pgdp) ((((unsigned long)(pgdp)) / \ + (sizeof(pgd_t))) & \ + (PTRS_PER_PGD - 1)) +#define pgd_to_page(pgdp) ((pgdp) - pgd_to_index(pgdp)) +#define boot_pgd_index(virt_addr) pgd_index(virt_addr) + +#define VIRT_ADDR_VPTB_BASE(va) \ + ((MMU_IS_SEPARATE_PT()) ? \ + (((va) >= MMU_SEPARATE_KERNEL_VAB) ? \ + KERNEL_VPTB_BASE_ADDR : USER_VPTB_BASE_ADDR) \ + : \ + MMU_UNITED_KERNEL_VPTB) +/* + * The index and offset in the upper page table directory. + */ +#define pud_index(virt_addr) ((virt_addr >> PUD_SHIFT) & \ + (PTRS_PER_PUD - 1)) +#define pud_virt_offset(virt_addr) (VIRT_ADDR_VPTB_BASE(virt_addr) | \ + ((pmd_virt_offset(virt_addr) & \ + PTE_MASK) >> \ + (E2K_VA_SIZE - PGDIR_SHIFT))) +#define boot_pud_index(virt_addr) pud_index(virt_addr) +#define boot_pud_offset(pgdp, addr) ((pud_t *)boot_pgd_page(*(pgdp)) + \ + boot_pud_index(addr)) + +/* + * The index and offset in the middle page table directory + */ +#define pmd_index(virt_addr) ((virt_addr >> PMD_SHIFT) & \ + (PTRS_PER_PMD - 1)) +#define pmd_virt_offset(virt_addr) (VIRT_ADDR_VPTB_BASE(virt_addr) | \ + ((pte_virt_offset(virt_addr) & \ + PTE_MASK) >> \ + (E2K_VA_SIZE - PGDIR_SHIFT))) +#define boot_pmd_index(virt_addr) pmd_index(virt_addr) +#define boot_pmd_offset(pudp, addr) ((pmd_t *)boot_pud_page(*(pudp)) + \ + boot_pmd_index(addr)) + +/* + * The index and offset in the third-level page table. + */ +#define pte_index(virt_addr) ((virt_addr >> PAGE_SHIFT) & \ + (PTRS_PER_PTE - 1)) +#define pte_virt_offset(virt_addr) (VIRT_ADDR_VPTB_BASE(virt_addr) | \ + (((virt_addr) & PTE_MASK) >> \ + (E2K_VA_SIZE - PGDIR_SHIFT))) + +#define boot_pte_index(virt_addr) pte_index(virt_addr) +#define boot_pte_offset(pmdp, addr) ((pte_t *)boot_pmd_page(*(pmdp)) + \ + boot_pte_index(addr)) + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_ASM_E2K_PGTABLE_DEF_H) */ diff --git a/arch/e2k/include/asm/pgtable_types.h b/arch/e2k/include/asm/pgtable_types.h new file mode 100644 index 000000000000..557fd3952d86 --- /dev/null +++ b/arch/e2k/include/asm/pgtable_types.h @@ -0,0 +1,194 @@ +#ifndef _E2K_PGTABLE_TYPES_H_ +#define _E2K_PGTABLE_TYPES_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +#if CONFIG_CPU_ISET >= 6 +# ifdef CONFIG_MMU_PT_V6 +# define MMU_IS_PT_V6() true +# else /* ! CONFIG_MMU_PT_V6 */ +# define MMU_IS_PT_V6() false +# endif /* CONFIG_MMU_PT_V6 */ +# ifdef CONFIG_MMU_SEP_VIRT_SPACE +# ifndef CONFIG_DYNAMIC_SEP_VIRT_SPACE +# define MMU_IS_SEPARATE_PT() true +# else /* CONFIG_DYNAMIC_SEP_VIRT_SPACE */ +# ifdef E2K_P2V +# define MMU_IS_SEPARATE_PT() (boot_machine.mmu_separate_pt) +# else /* ! E2K_P2V */ +# define MMU_IS_SEPARATE_PT() (machine.mmu_separate_pt) +# endif /* E2K_P2V */ +# endif /* ! CONFIG_DYNAMIC_SEP_VIRT_SPACE */ +# else /* ! MMU_SEP_VIRT_SPACE */ +# define MMU_IS_SEPARATE_PT() false +# endif /* MMU_SEP_VIRT_SPACE */ +#elif CONFIG_CPU_ISET >= 2 +# define MMU_IS_PT_V6() false +# define MMU_IS_SEPARATE_PT() false +#elif CONFIG_CPU_ISET == 0 +# ifdef E2K_P2V +# define MMU_IS_PT_V6() (boot_machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (boot_machine.mmu_separate_pt) +# else /* ! E2K_P2V */ +# define MMU_IS_PT_V6() (machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (machine.mmu_separate_pt) +# endif /* E2K_P2V */ +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, MMU pt_v6 mode is defined dinamicaly" +# warning "Undefined CPU ISET VERSION #, MMU sep_pt mode is defined dinamicaly" +# ifdef E2K_P2V +# define MMU_IS_PT_V6() (boot_machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (boot_machine.mmu_separate_pt) +# else /* ! E2K_P2V */ +# define MMU_IS_PT_V6() (machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (machine.mmu_separate_pt) +# endif /* E2K_P2V */ +#endif /* CONFIG_CPU_ISET 0-6 */ + +/* max. number of physical address bits (architected) */ +#define E2K_MAX_PHYS_BITS_V2 40 /* on V1-V5 */ +#define E2K_MAX_PHYS_BITS_V6 48 /* from V6-... */ + +/* + * Memory types, the same as PTE.MT field values, + * see iset 8.2.3. 1) + */ +typedef enum pte_mem_type { + GEN_CACHE_MT = 0, + GEN_NON_CACHE_MT = 1, + EXT_PREFETCH_MT = 4, + EXT_NON_PREFETCH_MT = 6, + EXT_CONFIG_MT = 7, + /* See comment in ioremap_cache() */ + EXT_CACHE_MT = 8, + /* This is the same as GEN_NON_CACHE_MT but with additional bit + * set so that track_pfn_*() functions can understand if this + * is EXT_PREFETCH_MT (i.e. came from pgprot_writecombine()) + * or EXT_NON_PREFETCH_MT (i.e. came from pgprot_noncached()). + * + * This is needed to distinguish between the following cases: + * 1) pgprot_noncached() + vm_insert_page() + * 2) pgprot_writecombine() + vm_insert_page() + * 3) pgprot_noncached() + some other mapping function + * 4) pgprot_writecombine() + some other mapping function + * + * If we are mapping device ("External") then track_pfn_insert() + * and track_pfn_remap() functions will convert the type (cases + * 3 and 4). And by default set hardware "General" type (cases 1 + * and 2) because vm_insert_page() does not call track_pfn_*() + * functions, and "General" type has cache coherency properly + * enabled unlike "External" type. */ + GEN_NON_CACHE_ORDERED_MT = 9, +} pte_mem_type_t; + +typedef enum pte_mem_type_rule { + MOST_STRONG_MTCR = 0, + FROM_HYPERVISOR_MTCR = 2, + FROM_GUEST_MTCR = 3, +} pte_mem_type_rule_t; + +/* arch-independent structure of page table entries */ +typedef enum uni_page_bits { + UNI_PAGE_PRESENT_BIT, /* Present */ + UNI_PAGE_WRITE_BIT, /* Writable */ + UNI_PAGE_PRIV_BIT, /* PriVileged */ + UNI_PAGE_VALID_BIT, /* Valid */ + UNI_PAGE_PROTECT_BIT, /* PRotected */ + UNI_PAGE_HW_ACCESS_BIT, /* page hardware Accessed */ + UNI_PAGE_DIRTY_BIT, /* page Dirty */ + UNI_PAGE_HUGE_BIT, /* huge Page Size */ + UNI_PAGE_GLOBAL_BIT, /* Global page */ + UNI_PAGE_NWA_BIT, /* No Writable Address */ + UNI_PAGE_NON_EX_BIT, /* NON EXecutable */ + UNI_PAGE_PROTNONE_BIT, /* software PROTection NONE */ + UNI_PAGE_AVAIL_BIT, /* software AVAILable */ + UNI_PAGE_SW_ACCESS_BIT, /* page software Accessed */ + UNI_PAGE_SPECIAL_BIT, /* software SPECIAL */ + UNI_PAGE_GFN_BIT, /* software Guest page Frame Number */ + UNI_PAGE_ACCESSED_BIT, /* page hardware/software Accessed */ + UNI_PAGE_PFN_BIT, /* Physical Page Number field */ + UNI_PAGE_MEM_TYPE_BIT, /* Memory Type field */ + UNI_PAGE_MEM_TYPE_RULE_BIT, /* Memory Type Combination Rule field */ + UNI_PAGE_MEM_TYPE_MA_BIT, /* Memory Type to memory access */ + /* DTLB field */ + UNI_PAGE_WRITE_INT_BIT, /* Write protected physical address */ + /* DTLB field */ + UNI_PAGE_INTL_RD_BIT, /* Intel Read protection */ + /* DTLB field */ + UNI_PAGE_INTL_WR_BIT, /* Intel Write protection */ + /* DTLB field */ + UNI_DTLB_EP_RES_BIT, /* DTLB entry probe result field */ + /* for successful probe completion */ + UNI_DTLB_PH_ADDR_AP_RES_BIT, /* physical address for successful */ + /* DTLB address probe result */ + UNI_DTLB_ERROR_MASK_BIT, /* DTLB entry probe faults mask */ + /* for unsuccessful probe completion */ + UNI_DTLB_MISS_LEVEL_BIT, /* miss level DTLB field */ + UNI_DTLB_SUCCESSFUL_BIT, /* seccessful translation flag */ + /* for DTLB probe operation */ + UNI_DTLB_RES_BITS_BIT, /* reserved bits of DTLB probe */ + /* result */ +} uni_page_bits_t; + +typedef const unsigned long uni_pteval_t; +typedef const unsigned long uni_dtlb_t; + +#define UNI_PAGE_PRESENT (uni_pteval_t)(1ULL << UNI_PAGE_PRESENT_BIT) +#define UNI_PAGE_WRITE (uni_pteval_t)(1ULL << UNI_PAGE_WRITE_BIT) +#define UNI_PAGE_PRIV (uni_pteval_t)(1ULL << UNI_PAGE_PRIV_BIT) +#define UNI_PAGE_VALID (uni_pteval_t)(1ULL << UNI_PAGE_VALID_BIT) +#define UNI_PAGE_PROTECT (uni_pteval_t)(1ULL << UNI_PAGE_PROTECT_BIT) +#define UNI_PAGE_HW_ACCESS (uni_pteval_t)(1ULL << UNI_PAGE_HW_ACCESS_BIT) +#define UNI_PAGE_DIRTY (uni_pteval_t)(1ULL << UNI_PAGE_DIRTY_BIT) +#define UNI_PAGE_HUGE (uni_pteval_t)(1ULL << UNI_PAGE_HUGE_BIT) +#define UNI_PAGE_GLOBAL (uni_pteval_t)(1ULL << UNI_PAGE_GLOBAL_BIT) +#define UNI_PAGE_NWA (uni_pteval_t)(1ULL << UNI_PAGE_NWA_BIT) +#define UNI_PAGE_NON_EX (uni_pteval_t)(1ULL << UNI_PAGE_NON_EX_BIT) +#define UNI_PAGE_PROTNONE (uni_pteval_t)(1ULL << UNI_PAGE_PROTNONE_BIT) +#define UNI_PAGE_AVAIL (uni_pteval_t)(1ULL << UNI_PAGE_AVAIL_BIT) +#define UNI_PAGE_SW_ACCESS (uni_pteval_t)(1ULL << UNI_PAGE_SW_ACCESS_BIT) +#define UNI_PAGE_SPECIAL (uni_pteval_t)(1ULL << UNI_PAGE_SPECIAL_BIT) +#define UNI_PAGE_GFN (uni_pteval_t)(1ULL << UNI_PAGE_GFN_BIT) +#define UNI_PAGE_ACCESSED (uni_pteval_t)(1ULL << UNI_PAGE_ACCESSED_BIT) +#define UNI_PAGE_PFN (uni_pteval_t)(1ULL << UNI_PAGE_PFN_BIT) +#define UNI_PAGE_MEM_TYPE (uni_pteval_t)(1ULL << UNI_PAGE_MEM_TYPE_BIT) +#define UNI_PAGE_MEM_TYPE_RULE \ + (uni_pteval_t)(1ULL << UNI_PAGE_MEM_TYPE_RULE_BIT) +#define UNI_PAGE_MEM_TYPE_MA (uni_dtlb_t)(1ULL << UNI_PAGE_MEM_TYPE_MA_BIT) +#define UNI_PAGE_WRITE_INT (uni_dtlb_t)(1ULL << UNI_PAGE_WRITE_INT_BIT) +#define UNI_PAGE_INTL_RD (uni_dtlb_t)(1ULL << UNI_PAGE_INTL_RD_BIT) +#define UNI_PAGE_INTL_WR (uni_dtlb_t)(1ULL << UNI_PAGE_INTL_WR_BIT) +#define UNI_DTLB_EP_RES (uni_dtlb_t)(1ULL << UNI_DTLB_EP_RES_BIT) +#define UNI_DTLB_PH_ADDR_AP_RES \ + (uni_dtlb_t)(1ULL << UNI_DTLB_PH_ADDR_AP_RES_BIT) +#define UNI_DTLB_ERROR_MASK (uni_dtlb_t)(1ULL << UNI_DTLB_ERROR_MASK_BIT) +#define UNI_DTLB_MISS_LEVEL (uni_dtlb_t)(1ULL << UNI_DTLB_MISS_LEVEL_BIT) +#define UNI_DTLB_SUCCESSFUL (uni_dtlb_t)(1ULL << UNI_DTLB_SUCCESSFUL_BIT) +#define UNI_DTLB_RES_BITS (uni_dtlb_t)(1ULL << UNI_DTLB_RES_BITS_BIT) + +/* + * Encode and de-code a swap entry + * + * Format of swap pte: + * bits 0, _PAGE_PROTNONE : present bits (must be zero) + * bits 13-19: swap-type + * bits 20-63: swap offset (MMU PTE version dependent, see pgtable-v*.h) + */ +#define __SWP_TYPE_BITS 7 +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > \ + __SWP_TYPE_BITS) +#define __SWP_TYPE_SHIFT (PAGE_SHIFT + 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_SHIFT + __SWP_TYPE_BITS) +#define __FILE_PGOFF_SHIFT (PAGE_SHIFT + 1) + +#define __swp_type(entry) (((entry).val >> __SWP_TYPE_SHIFT) & \ + ((1U << __SWP_TYPE_BITS) - 1)) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __pmd_to_swp_entry(pte) ((swp_entry_t) { pmd_val(pmd) }) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_PGTABLE_TYPES_H_ */ diff --git a/arch/e2k/include/asm/pic.h b/arch/e2k/include/asm/pic.h new file mode 100644 index 000000000000..2c2683d627b3 --- /dev/null +++ b/arch/e2k/include/asm/pic.h @@ -0,0 +1,150 @@ +#ifndef __ASM_E2K_PIC_H +#define __ASM_E2K_PIC_H + +/* + * Choose between APIC and EPIC basic functions + */ + +#include +#include +#include + +/* P2V */ +static inline unsigned int boot_epic_is_bsp(void) +{ + union cepic_ctrl reg; + + reg.raw = boot_epic_read_w(CEPIC_CTRL); + return reg.bits.bsp_core; +} + +static inline unsigned int boot_apic_is_bsp(void) +{ + return BootStrap(boot_arch_apic_read(APIC_BSP)); +} + +static inline unsigned int boot_epic_read_id(void) +{ + return cepic_id_full_to_short(boot_epic_read_w(CEPIC_ID)); +} + +static inline unsigned int boot_apic_read_id(void) +{ + return GET_APIC_ID(boot_arch_apic_read(APIC_ID)); +} + +/* + * Reading PIC registers is mainly done in early P2V: before and slightly + * after the initialization of boot_machine structure. Unfortunately, the + * structure is initalized by BSP, and AP may proceed to read PIC in the mean + * time. The barrier can't be used that early either. As such, PIC reads in P2V + * can't rely on CPU_FEAT_EPIC. We manually read the IDR register instead. + */ +#ifdef CONFIG_EPIC +static inline bool boot_early_pic_is_bsp(void) +{ + e2k_idr_t idr; + unsigned int reg; + + idr = boot_read_IDR_reg(); + if (idr.IDR_mdl >= IDR_E12C_MDL) + reg = boot_epic_is_bsp(); + else + reg = boot_apic_is_bsp(); + + return !!reg; +} + +static inline unsigned int boot_early_pic_read_id(void) +{ + e2k_idr_t idr; + + idr = boot_read_IDR_reg(); + if (idr.IDR_mdl >= IDR_E12C_MDL) + return boot_epic_read_id(); + else + return boot_apic_read_id(); +} +#else +static inline bool boot_early_pic_is_bsp(void) +{ + return !!boot_apic_is_bsp(); +} + +static inline unsigned int boot_early_pic_read_id(void) +{ + return boot_apic_read_id(); +} +#endif + +/* Kernel */ +#ifndef E2K_P2V +#include + +#ifdef CONFIG_EPIC +static inline bool read_pic_bsp(void) +{ + if (cpu_has_epic()) + return read_epic_bsp(); + else + return !!BootStrap(arch_apic_read(APIC_BSP)); +} + +extern void __init_recv e2k_setup_secondary_apic(void); +static inline void __init_recv e2k_setup_secondary_pic(void) +{ + if (cpu_has_epic()) + setup_cepic(); + else + e2k_setup_secondary_apic(); +} + +extern void __init_recv setup_prepic(void); +static inline void __init_recv setup_processor_pic(void) +{ + if (cpu_has_epic()) + setup_prepic(); +} +#else /* CONFIG_EPIC */ +static inline bool read_pic_bsp(void) +{ + return !!BootStrap(arch_apic_read(APIC_BSP)); +} + +extern void __init_recv e2k_setup_secondary_apic(void); +static inline void __init_recv e2k_setup_secondary_pic(void) +{ + e2k_setup_secondary_apic(); +} + +extern void __init_recv e2k_apic_map_logical_id(int recovery); +static inline void __init_recv e2k_pic_map_logical_id(int recovery) +{ + e2k_apic_map_logical_id(recovery); +} + +static inline void __init_recv setup_processor_pic(void) +{ + /* Nothing to do */ +} + +extern void setup_secondary_APIC_clock(void); +static inline void __init_recv setup_secondary_pic_clock(void) +{ + setup_secondary_APIC_clock(); +} + +static inline int pic_get_vector(void) +{ + return apic_get_vector(); +} + +struct pci_dev; +int ioepic_pin_to_msi_ioapic_irq(unsigned int pin, struct pci_dev *dev); +static inline int ioepic_pin_to_irq_pic(unsigned int pin, struct pci_dev *dev) +{ + return ioepic_pin_to_msi_ioapic_irq(pin, dev); +} +#endif /* CONFIG_EPIC */ +#endif /* E2K_P2V */ +#endif /* __ASM_E2K_PIC_H */ diff --git a/arch/e2k/include/asm/poll.h b/arch/e2k/include/asm/poll.h new file mode 100644 index 000000000000..c98509d3149e --- /dev/null +++ b/arch/e2k/include/asm/poll.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/posix_types.h b/arch/e2k/include/asm/posix_types.h new file mode 100644 index 000000000000..575deb1e035d --- /dev/null +++ b/arch/e2k/include/asm/posix_types.h @@ -0,0 +1,11 @@ +#ifndef _E2K_POSIX_TYPES_H_ +#define _E2K_POSIX_TYPES_H_ + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. + */ + +#include + +#endif /* _E2K_POSIX_TYPES_H_ */ diff --git a/arch/e2k/include/asm/preempt.h b/arch/e2k/include/asm/preempt.h new file mode 100644 index 000000000000..6269381761f5 --- /dev/null +++ b/arch/e2k/include/asm/preempt.h @@ -0,0 +1,124 @@ +#pragma once + +#include + +register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC( + SMP_CPU_ID_GREG); + +#define PREEMPT_COUNTER_SHIFT 33ull + +#define PREEMPT_NEED_RESCHED 0x100000000ull + +#define PREEMPT_ENABLED (0) + +static __always_inline int preempt_count(void) +{ + return __cpu_preempt_reg >> PREEMPT_COUNTER_SHIFT; +} + +/* + * must be macros to avoid header recursion hell + */ +#define init_task_preempt_count(p) do { } while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + __cpu_preempt_reg = (u64) (u32) __cpu_preempt_reg; \ +} while (0) + +static __always_inline void set_preempt_need_resched(void) +{ + __cpu_preempt_reg |= PREEMPT_NEED_RESCHED; +} + +static __always_inline void clear_preempt_need_resched(void) +{ + __cpu_preempt_reg &= ~PREEMPT_NEED_RESCHED; +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return __cpu_preempt_reg & PREEMPT_NEED_RESCHED; +} + +/* + * The various preempt_count set/add/sub methods + * + * Careful here: an interrupt can arrive at any moment and set or clear + * the PREEMPT_NEED_RESCHED flag. We want to change preempt_count + * in a safe way so that the flag set in interrupt is not lost. + */ + +static __always_inline void preempt_count_set(int pc) +{ + E2K_INSFD_ATOMIC(pc, + 31 /*shift*/ | (33 /*size*/ << 6) | (1 /*me1hi*/ << 13), + __cpu_preempt_reg); +} + + +static __always_inline void __preempt_count_add(int val) +{ + E2K_ADDD_ATOMIC(__cpu_preempt_reg, (u64) (u32) val << PREEMPT_COUNTER_SHIFT); +} + +static __always_inline void __preempt_count_sub(int val) +{ + E2K_SUBD_ATOMIC(__cpu_preempt_reg, (u64) (u32) val << PREEMPT_COUNTER_SHIFT); +} + +static __always_inline bool __preempt_count_dec_and_test(void) +{ + u64 old; + + E2K_SUBD_ATOMIC__SHRD32(__cpu_preempt_reg, 1ull << PREEMPT_COUNTER_SHIFT, old); +#ifdef CONFIG_PREEMPT_LAZY + if ((__cpu_preempt_reg >> 32ull) & ~1ull) /* as in arm64 */ + return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else + return unlikely(old == 1); +#endif +} + +static __always_inline void init_preempt_count_resched(int pc, int resched) +{ + __cpu_preempt_reg = (u64) (u32) __cpu_preempt_reg; + preempt_count_set(pc); + if (resched) + set_preempt_need_resched(); +} + +/* + * Returns true when we need to resched and can (barring IRQ state). + */ +static __always_inline bool should_resched(int preempt_offset) +{ +#ifdef CONFIG_PREEMPT_LAZY + u64 tmp_par = (u64) (u32) preempt_offset << 1; + u64 tmp = __cpu_preempt_reg >> 32ull; + + if (tmp == tmp_par) + return true; + + /* preempt count == 0 ? */ + tmp &= ~1ull; /* ~PREEMPT_NEED_RESCHED */ + if (tmp != tmp_par) + return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else + return unlikely((__cpu_preempt_reg >> 32ull) == + (((u64) (u32) preempt_offset << 1) | 1)); +#endif +} + +#ifdef CONFIG_PREEMPTION +extern asmlinkage void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() +extern asmlinkage void preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() preempt_schedule_notrace() +#endif /* CONFIG_PREEMPTION */ + diff --git a/arch/e2k/include/asm/process.h b/arch/e2k/include/asm/process.h new file mode 100644 index 000000000000..a238ca10d275 --- /dev/null +++ b/arch/e2k/include/asm/process.h @@ -0,0 +1,1065 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * include/asm-e2k/process.h + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PROCESS_H +#define _E2K_PROCESS_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* host mode support */ + +#undef DEBUG_SS_MODE +#undef DebugSS +#define DEBUG_SS_MODE 0 /* stack switching */ +#define DebugSS(fmt, args...) \ +({ \ + if (DEBUG_SS_MODE) \ + pr_info(fmt, ##args); \ +}) + +#undef DEBUG_US_MODE +#undef DebugUS +#define DEBUG_US_MODE 0 /* user stacks */ +#define DebugUS(fmt, args...) \ +({ \ + if (DEBUG_US_MODE) \ + pr_info(fmt, ##args); \ +}) + +#undef DEBUG_HS_MODE +#undef DebugHS +#define DEBUG_HS_MODE 0 /* hardware stacks */ +#define DebugHS(...) DebugPrint(DEBUG_HS_MODE ,##__VA_ARGS__) + +#undef DEBUG_KS_MODE +#undef DebugKS +#define DEBUG_KS_MODE 0 /* kernel stacks */ +#define DebugKS(fmt, args...) \ +({ \ + if (DEBUG_KS_MODE) \ + pr_info(fmt, ##args); \ +}) + +#undef DEBUG_EXECVE_MODE +#undef DebugEX +#define DEBUG_EXECVE_MODE 0 /* execve and exit */ +#define DebugEX(...) DebugPrint(DEBUG_EXECVE_MODE, ##__VA_ARGS__) + +#undef DEBUG_KVM_OLD_MODE +#undef DebugOLD +#define DEBUG_KVM_OLD_MODE 0 +#define DebugOLD(fmt, args...) \ +({ \ + if (DEBUG_KVM_OLD_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PROCESS_MODE +#undef DbgP +#define DEBUG_PROCESS_MODE 0 /* processes */ +#define DbgP(fmt, args...) \ +({ \ + if (DEBUG_PROCESS_MODE) \ + pr_info(fmt, ##args); \ +}) + +/* + * additional arch-dep flags for clone() + */ +#define CLONE_GUEST_KERNEL 0x0000010000000000 /* guest kernel */ + /* thread creation */ + +/* + * Stack frame types (determinate based on chain registers state): + * host kernel frame - PSR.pm == 1 + * user frame (host and guest) - PSR.pm == 0 && IP < TASK_SIZE + * guest kernel frame - PSP.pm == 0 && IP >= GUEST_TASK_SIZE + */ +typedef enum stack_frame { + kernel_frame_type = 0, /* host kernel frame */ + user_frame_type = 1, /* user (host or guest) frame */ + guest_kernel_frame_type, /* guest kernel frame type */ + undefined_frame_type, +} stack_frame_t; + +static inline stack_frame_t +get_kernel_stack_frame_type(void) +{ + if (!paravirt_enabled() || IS_HV_GM()) + return kernel_frame_type; + return guest_kernel_frame_type; +} + +static inline stack_frame_t +get_user_stack_frame_type(void) +{ + return user_frame_type; +} + +static inline stack_frame_t +get_the_stack_frame_type(e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo, + bool guest, bool ignore_IP) +{ + if (likely(!guest)) { + /* host kernel: guest kernel is host user */ + if (is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP)) { + return kernel_frame_type; + } else if (is_call_from_host_user_IP(cr0_hi, cr1_lo, + true)) { + return user_frame_type; + } else { + pr_err("%s(): unknown host stack frame type: " + "CR0_hi 0x%llx CR1_lo 0x%llx\n", + __func__, cr0_hi.CR0_hi_half, + cr1_lo.CR1_lo_half); + } + } else { + /* guest kernel: frames can be host kernel, guest kernel */ + /* or guest user */ + if (is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP)) { + return kernel_frame_type; + } else if (is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, + ignore_IP)) { + return guest_kernel_frame_type; + } else if (is_call_from_guest_user_IP(cr0_hi, cr1_lo, + ignore_IP)) { + return user_frame_type; + } else { + pr_err("%s(): unknown guest stack frame type: " + "CR0_hi 0x%llx CR1_lo 0x%llx\n", + __func__, cr0_hi.CR0_hi_half, + cr1_lo.CR1_lo_half); + } + } + return undefined_frame_type; +} +static inline stack_frame_t +get_stack_frame_type(e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo) +{ + return get_the_stack_frame_type(cr0_hi, cr1_lo, + paravirt_enabled() && !IS_HV_GM(), false); +} + +static inline stack_frame_t +get_stack_frame_type_IP(e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo, + bool ignore_IP) +{ + return get_the_stack_frame_type(cr0_hi, cr1_lo, + paravirt_enabled() && !IS_HV_GM(), ignore_IP); +} + +static inline stack_frame_t +get_task_stack_frame_type_IP(struct task_struct *task, + e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo, bool ignore_IP) +{ + return get_the_stack_frame_type(cr0_hi, cr1_lo, + paravirt_enabled() && !IS_HV_GM() || + guest_task_mode(task), + ignore_IP); +} + +static __always_inline int +is_kernel_hardware_stacks(e2k_addr_t p_stack_base, e2k_addr_t pc_stack_base) +{ + if (p_stack_base >= TASK_SIZE || pc_stack_base >= TASK_SIZE) { + return 1; + } + return 0; +} + +extern int native_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg); + +static __always_inline e2k_size_t +get_hw_ps_area_user_size(hw_stack_area_t *ps) +{ + return ps->size; +} +static __always_inline e2k_size_t +get_hw_pcs_area_user_size(hw_stack_area_t *pcs) +{ + return pcs->size; +} +static __always_inline void +set_hw_ps_area_user_size(hw_stack_area_t *ps, e2k_size_t u_ps_size) +{ + ps->size = u_ps_size; +} +static __always_inline void +set_hw_pcs_area_user_size(hw_stack_area_t *pcs, e2k_size_t u_pcs_size) +{ + pcs->size = u_pcs_size; +} + +static __always_inline e2k_size_t +get_hw_ps_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_ps_area_user_size(&hw_stacks->ps); +} +static __always_inline e2k_size_t +get_hw_pcs_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_pcs_area_user_size(&hw_stacks->pcs); +} +static __always_inline void +set_hw_ps_user_size(hw_stack_t *hw_stacks, e2k_size_t u_ps_size) +{ + set_hw_ps_area_user_size(&hw_stacks->ps, u_ps_size); +} +static __always_inline void +set_hw_pcs_user_size(hw_stack_t *hw_stacks, e2k_size_t u_pcs_size) +{ + set_hw_pcs_area_user_size(&hw_stacks->pcs, u_pcs_size); +} + +#define NATIVE_ONLY_SET_GUEST_GREGS(ti) \ + /* it is native or host kernel, nothing to set */ + +static __always_inline int +native_switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return 0; /* to continue switching at native mode */ +} + +extern int native_clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags); +extern void native_copy_spilled_user_stacks(e2k_stacks_t *child_stacks, + e2k_mem_crs_t *child_crs, struct sw_regs *new_sw_regs, + const struct thread_info *new_ti); + +/* + * Functions create all kernel hardware stacks(PS & PCS) + */ +static inline void +native_do_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + set_hw_ps_user_size(hw_stacks, KERNEL_P_STACK_SIZE); + set_hw_pcs_user_size(hw_stacks, KERNEL_PC_STACK_SIZE); +} + +extern void native_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks); +extern void native_define_user_hw_stacks_sizes(hw_stack_t *hw_stacks); + +extern void init_sw_user_regs(struct sw_regs *sw_regs, + bool save_gregs, bool save_binco_regs); + +/* + * The function should be only in-line nad cannot call any other not in-line + * functions, including printk() or other debugging print + */ +static __always_inline void +goto_new_user_hard_stk(e2k_stacks_t *stacks) +{ + /* + * Optimization to do not flush chain stack. + * + * Old stacks are not needed anymore, do not flush procedure + * registers and chain registers - only strip sizes + */ + NATIVE_STRIP_PSHTP_WINDOW(); + NATIVE_STRIP_PCSHTP_WINDOW(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + /* + * Since we are switching to user stacks their sizes + * have been stripped already, so use RAW_* writes. + */ + NATIVE_NV_WRITE_PSP_REG(stacks->psp_hi, stacks->psp_lo); + NATIVE_NV_WRITE_PCSP_REG(stacks->pcsp_hi, stacks->pcsp_lo); +} + +typedef void (*start_fn)(u64 __start); + +extern void switch_to_user_func(long dummy, + start_fn start_func, e2k_size_t us_size, int cui); +#ifdef CONFIG_PROTECTED_MODE +extern void pm_exit_robust_list(struct task_struct *curr); +extern void protected_switch_to_user_func(long r0, long r1, + start_fn start_func, e2k_size_t us_size, int cui); +#endif + +static inline void native_flush_stacks(void) +{ + NATIVE_FLUSHCPU; +} +static inline void native_flush_regs_stack(void) +{ + NATIVE_FLUSHR; +} +static inline void native_flush_chain_stack(void) +{ + NATIVE_FLUSHC; +} +#define NATIVE_COPY_STACKS_TO_MEMORY() \ +({ \ + NATIVE_FLUSHCPU; \ +}) + +/* + * Native threads can be scheduled and migrate to other CPUs, + * but never can change task and thread info structures. + * Host threads on which are implemented guest VCPUs cannot change + * task and thread info structures too. But these threads are multithreaded, + * it means there are many guest kernel and user processes which are runing + * under that host thread. Each guest process has own stacks, context and + * switch from one guest process to other causes switching of context and + * stacks, including host kernel stacks allocated for every such process and + * used to its run. Guest process stacks and other context is kept into + * structure guest thread info. Guest process switching means change of + * guest thread info, but host thread info and task structure stays the same. + * However there is complexity because of guest processes migration from one + * VCPU to other. In this case some local variables into dinamic chain of + * host functions calls (local and procedure registers stacks) can keep values + * of old host VCPU thread. + * For example, guest process was trapped on VCPU #0 and after trap handling + * completion megrated to VCPU #1. Trap on VCPU #0 can set some variables to + * pointers to current and current_thread_info() structures: + * VCPU #0 ti = current_thread_info(); + * task = current; + * Further host trap handler start guest trap handler: + * VCPU #0 user_trap_handler() + * kvm_start_guest_trap_handler() + * kvm_trap_handler() + * schedule() + * ...... + * switch_to() VCPU #1 + * VCPU #1 is other host thread and has own current & current_thread_info + * other than on VCPU #0 + * schedule() + * kvm_trap_handler() + * kvm_start_guest_trap_handler() + * user_trap_handler() + * In this place variables ti and task contain pointers to old structures, + * point to task & thread info structures of VCPU #0, so should be updated to + * point to new structures of VCPU #1. + * Good style is using only direct current & current_thread_info() macroses, + * but there are some derived values: regs, vcpu, gti ... and them should be + * updated too. + * Follow macroses to help to solve this problem + */ +/* native kernel does not support virtualization and cannot be run as host */ +/* so has not the problem - nothing to do */ +#define NATIVE_UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) +#define NATIVE_CHECK_VCPU_THREAD_CONTEXT(__ti) + +static inline int __is_privileged_range(struct vm_area_struct *vma, + e2k_addr_t start, e2k_addr_t end) +{ + while (vma && vma->vm_start < end) { + if (vma->vm_flags & VM_PRIVILEGED) + return 1; + vma = vma->vm_next; + } + + return 0; +} + +static inline int is_privileged_range(e2k_addr_t start, e2k_addr_t end) +{ + return start >= USER_HW_STACKS_BASE || end >= USER_HW_STACKS_BASE; +} + +extern int do_update_vm_area_flags(e2k_addr_t start, e2k_size_t len, + vm_flags_t flags_to_set, vm_flags_t flags_to_clear); +extern int create_cut_entry(int tcount, + unsigned long code_base, unsigned code_sz, + unsigned long glob_base, unsigned glob_sz); +extern int free_cut_entry(unsigned long glob_base, size_t glob_sz, + unsigned long *code_base, size_t *code_sz); +extern void fill_cut_entry(e2k_cute_t *cute_p, + unsigned long code_base, unsigned code_sz, + unsigned long glob_base, unsigned glob_sz); + +extern int alloc_user_hw_stacks(hw_stack_t *hw_stacks, size_t p_size, size_t pc_size); +extern void free_user_hw_stacks(hw_stack_t *hw_stacks); +extern void free_user_old_pc_stack_areas(struct list_head *old_u_pcs_list); +extern int free_user_hardware_stacks(hw_stack_t *u_hw_stacks); + +#define ATOMIC_GET_HW_STACK_INDEXES(ps_ind, pcs_ind) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_SIZES(psp_hi_val, pshtp_val, \ + pcsp_hi_val, pcshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) + +#define ATOMIC_GET_HW_STACKS_IND_AND_TOP(ps_ind, pshtop, pcs_ind, pcshtop) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_SIZES(psp_hi_val, pshtp_val, \ + pcsp_hi_val, pcshtop); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_ind = psp_hi.PSP_hi_ind; \ + pshtop = GET_PSHTP_MEM_INDEX(pshtp); \ + pcs_ind = pcsp_hi.PCSP_hi_ind; \ +}) + +#define ATOMIC_GET_HW_PS_SIZES(ps_ind, ps_size) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_PS_SIZES(psp_hi_val, pshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ +}) + +#define ATOMIC_GET_HW_PS_SIZES_AND_BASE(ps_ind, ps_size, ps_base) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_P_STACK_REGS(psp_lo_val, psp_hi_val, pshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_PS_SIZES_BASE_TOP(ps_ind, ps_size, ps_base, pshtop) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_P_STACK_REGS(psp_lo_val, psp_hi_val, pshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind; \ + pshtop = GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_PCS_SIZES(pcs_ind, pcs_size) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + \ + ATOMIC_READ_HW_PCS_SIZES(pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) + +#define ATOMIC_GET_HW_PCS_SIZES_AND_BASE(pcs_ind, pcs_size, pcs_base) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_PC_STACK_REGS(pcsp_lo_val, pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_PCS_SIZES_BASE_TOP(pcs_ind, pcs_size, pcs_base, pcshtp) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + e2k_pcshtp_t pcshtp_val; \ + \ + ATOMIC_READ_PC_STACK_REGS(pcsp_lo_val, pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind; \ + pcs_base = pcsp_lo.PCSP_lo_base; \ + pcshtp = PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) + +#define ATOMIC_GET_HW_PCS_IND_AND_BASE(pcs_ind, pcs_base) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_PC_STACK_REGS(pcsp_lo_val, pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_STACKS_SIZES_AND_BASE(ps_ind, ps_size, ps_base, \ + pcs_ind, pcs_size, pcs_base) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp; \ + e2k_pshtp_t pshtp; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_HW_STACKS_REGS(psp_lo, psp_hi, pshtp_val, \ + pcsp_lo, pcsp_hi, pcshtp); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_STACKS_IND_AND_BASE(ps_ind, ps_base, \ + pcs_ind, pcs_base) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp; \ + e2k_pshtp_t pshtp; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_HW_STACKS_REGS(psp_lo_val, psp_hi_val, pshtp_val, \ + pcsp_lo_val, pcsp_hi_val, pcshtp); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_STACK_SIZES(ps_ind, ps_size, pcs_ind, pcs_size) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_SIZES(psp_hi_val, pshtp_val, \ + pcsp_hi_val, pcshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) +#define ATOMIC_GET_HW_HWS_SIZES(hws_ind, hws_size, is_pcs) \ +({ \ + if (is_pcs) { \ + ATOMIC_GET_HW_PCS_SIZES(hws_ind, hws_size); \ + } else { \ + ATOMIC_GET_HW_PS_SIZES(hws_ind, hws_size); \ + } \ +}) + +#define ATOMIC_DO_SAVE_HW_STACKS_REGS(st_regs) \ +({ \ + unsigned long psp_lo; \ + unsigned long psp_hi; \ + unsigned long pshtp_val; \ + unsigned long pcsp_lo; \ + unsigned long pcsp_hi; \ + unsigned int pcshtp; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_REGS(psp_lo, psp_hi, pshtp_val, \ + pcsp_lo, pcsp_hi, pcshtp); \ + (st_regs)->psp_hi.PSP_hi_half = psp_hi; \ + (st_regs)->psp_lo.PSP_lo_half = psp_lo; \ + pshtp.PSHTP_reg = pshtp_val; \ + (st_regs)->psp_hi.PSP_hi_ind += GET_PSHTP_MEM_INDEX(pshtp); \ + (st_regs)->pcsp_hi.PCSP_hi_half = pcsp_hi; \ + (st_regs)->pcsp_lo.PCSP_lo_half = pcsp_lo; \ + (st_regs)->pcsp_hi.PCSP_hi_ind += PCSHTP_SIGN_EXTEND(pcshtp); \ +}) + +static inline void +atomic_save_hw_stacks_regs(e2k_stacks_t *stacks) +{ + ATOMIC_DO_SAVE_HW_STACKS_REGS(stacks); +} + +#define ATOMIC_DO_SAVE_ALL_STACKS_REGS(st_regs, cr1_hi_p, usd_lo, usd_hi) \ +({ \ + unsigned long psp_lo; \ + unsigned long psp_hi; \ + unsigned long pshtp_val; \ + unsigned long pcsp_lo; \ + unsigned long pcsp_hi; \ + unsigned int pcshtp; \ + unsigned long cr1_hi_val; /* cotains ussz field for data stack */ \ + e2k_pshtp_t pshtp; \ + e2k_cr1_hi_t cr1_hi; \ + \ + ATOMIC_READ_ALL_STACKS_REGS(psp_lo, psp_hi, pshtp_val, \ + pcsp_lo, pcsp_hi, pcshtp, \ + (usd_lo), (usd_hi), cr1_hi_val); \ + (st_regs)->psp_hi.PSP_hi_half = psp_hi; \ + (st_regs)->psp_lo.PSP_lo_half = psp_lo; \ + pshtp.PSHTP_reg = pshtp_val; \ + (st_regs)->psp_hi.PSP_hi_ind += GET_PSHTP_MEM_INDEX(pshtp); \ + (st_regs)->pcsp_hi.PCSP_hi_half = pcsp_hi; \ + (st_regs)->pcsp_lo.PCSP_lo_half = pcsp_lo; \ + (st_regs)->pcsp_hi.PCSP_hi_ind += PCSHTP_SIGN_EXTEND(pcshtp); \ + cr1_hi.CR1_hi_half = cr1_hi_val; \ + *(cr1_hi_p) = cr1_hi; \ +}) +#define ATOMIC_SAVE_ALL_STACKS_REGS(st_regs, cr1_hi_p) \ +({ \ + unsigned long usd_lo; \ + unsigned long usd_hi; \ + \ + ATOMIC_DO_SAVE_ALL_STACKS_REGS(st_regs, cr1_hi_p, \ + usd_lo, usd_hi); \ + (st_regs)->usd_hi.USD_hi_half = usd_hi; \ + (st_regs)->usd_lo.USD_lo_half = usd_lo; \ +}) + +static inline void +atomic_save_all_stacks_regs(e2k_stacks_t *stacks, e2k_cr1_hi_t *cr1_hi_p) +{ + ATOMIC_SAVE_ALL_STACKS_REGS(stacks, cr1_hi_p); +} + +#define user_stack_cannot_be_expanded() test_thread_flag(TIF_USD_NOT_EXPANDED) + +#define set_user_stack_cannot_be_expanded() \ +({ \ + if (TASK_IS_PROTECTED(current)) { \ + set_thread_flag(TIF_USD_NOT_EXPANDED);\ + } \ +}) + +typedef enum restore_caller { + FROM_SYSCALL_N_PROT = 1 << 1, + FROM_SYSCALL_PROT_8 = 1 << 2, + FROM_SYSCALL_PROT_10 = 1 << 3, + FROM_USER_TRAP = 1 << 4, + FROM_SIGRETURN = 1 << 5, + FROM_RET_FROM_FORK = 1 << 6, + FROM_MAKECONTEXT = 1 << 7, + FROM_RETURN_PV_VCPU_TRAP = 1 << 8, + FROM_PV_VCPU_SYSCALL = 1 << 10, + FROM_PV_VCPU_SYSFORK = 1 << 11, +} restore_caller_t; + +#define FROM_PV_VCPU_MODE (FROM_RETURN_PV_VCPU_TRAP | \ + FROM_PV_VCPU_SYSCALL | \ + FROM_PV_VCPU_SYSFORK) + +#ifndef CONFIG_VIRTUALIZATION +/* it native kernel without virtualization support */ + +/* + * Is the CPU at guest Hardware Virtualized mode + * CORE_MODE.gmi is true only at guest HV mode + */ +static inline bool host_is_at_HV_GM_mode(void) +{ + /* native kernel does not support VMs and cannot be at guest mode */ + return false; +} +#define usd_cannot_be_expanded(regs) user_stack_cannot_be_expanded() + /* all user stacks can be */ + /* expanded if it possible */ +#define clear_vm_thread_flags() /* virtual machines is not supported */ + /* nothing to clear */ + +#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) /* nothing to do */ + +#define UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ + NATIVE_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + NATIVE_CHECK_VCPU_THREAD_CONTEXT(__ti) + +static inline void +clear_virt_thread_struct(thread_info_t *thread_info) +{ + /* virtual machines is not supported */ +} + +static __always_inline void +host_exit_to_usermode_loop(struct pt_regs *regs, bool syscall, bool has_signal) +{ + /* native & guest kernels cannot be as host */ +} + +static __always_inline __interrupt void +complete_switch_to_user_func(void) +{ + /* virtualization not supported, so nothing to do */ + /* but the function should switch interrupt control from UPSR to */ + /* PSR and set initial state of user UPSR */ + NATIVE_SET_USER_INITIAL_UPSR(E2K_USER_INITIAL_UPSR); +} +static __always_inline __interrupt void +complete_go2user(thread_info_t *ti, long fn) +{ + /* virtualization not supported, so nothing to do */ + /* but the function should restore user UPSR state */ + NATIVE_WRITE_UPSR_REG(ti->upsr); +} +static inline void free_virt_task_struct(struct task_struct *task) +{ + /* virtual machines is not supported */ +} +#elif defined(CONFIG_KVM_HOST_MODE) +/* It is native host kernel with virtualization support */ +/* or paravirtualized host and guest */ + #include +#endif /* ! CONFIG_VIRTUALIZATION */ + +/* + * Restore proper psize field of WD register + */ +static inline void +native_restore_wd_register_psize(e2k_wd_t wd_from) +{ + e2k_wd_t wd; + + raw_all_irq_disable(); + wd = NATIVE_READ_WD_REG(); + wd.psize = wd_from.WD_psize; + NATIVE_WRITE_WD_REG(wd); + raw_all_irq_enable(); +} + +/* + * Preserve current p[c]shtp as they indicate how much to FILL when returning + */ +static inline void +native_preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks, + e2k_stacks_t *cur_stacks) +{ + u_stacks->pshtp = cur_stacks->pshtp; + u_stacks->pcshtp = cur_stacks->pcshtp; +} + +/** + * find_in_u_pcs_list - find frame offset from old_u_pcs_list + * @frame - frame to search + * @delta - chain stack offset will be returned here + * + * Returns 0 on success. + */ +static inline int __find_in_old_u_pcs_list(unsigned long frame, + unsigned long *delta, unsigned long pcs_base, + unsigned long pcs_top, struct list_head *old_u_pcs_list) +{ + struct old_pcs_area *u_pcs; + int ret = -ESRCH; + + if (frame >= pcs_base && frame < pcs_top) { + *delta = 0; + return 0; + } + + list_for_each_entry(u_pcs, old_u_pcs_list, list_entry) { + if (frame >= (unsigned long) u_pcs->base && + frame < (unsigned long) u_pcs->base + + u_pcs->size) { + *delta = pcs_base - (unsigned long) u_pcs->base; + ret = 0; + break; + } + } + + return ret; +} + +static inline int find_in_old_u_pcs_list(unsigned long frame, + unsigned long *delta) +{ + unsigned long pcs_base, pcs_top; + + pcs_base = (unsigned long) CURRENT_PCS_BASE(); + pcs_top = pcs_base + + get_hw_pcs_user_size(¤t_thread_info()->u_hw_stack); + + return __find_in_old_u_pcs_list(frame, delta, pcs_base, pcs_top, + ¤t_thread_info()->old_u_pcs_list); +} + +static inline int __copy_old_u_pcs_list(struct list_head *to, + const struct list_head *from) +{ + const struct old_pcs_area *u_pcs_from; + struct old_pcs_area *u_pcs_to; + + list_for_each_entry(u_pcs_from, from, list_entry) { + u_pcs_to = kmalloc(sizeof(struct old_pcs_area), GFP_KERNEL); + if (unlikely(!u_pcs_to)) + return -ENOMEM; + + u_pcs_to->base = u_pcs_from->base; + u_pcs_to->size = u_pcs_from->size; + + list_add_tail(&u_pcs_to->list_entry, to); + } + + return 0; +} + +static inline int copy_old_u_pcs_list(struct thread_info *to, + const struct thread_info *from) +{ + return __copy_old_u_pcs_list(&to->old_u_pcs_list, &from->old_u_pcs_list); +} + +static inline int +update_vm_area_flags(e2k_addr_t start, e2k_size_t len, + vm_flags_t flags_to_set, vm_flags_t flags_to_clear) +{ + int error = 0; + + down_write(¤t->mm->mmap_sem); + len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); + start &= PAGE_MASK; + + error = do_update_vm_area_flags(start, len, flags_to_set, + flags_to_clear); + + up_write(¤t->mm->mmap_sem); + return error; +} + +extern unsigned long *__alloc_thread_stack_node(int node); +extern void __free_thread_stack(void *address); + +extern struct task_struct *init_tasks[]; + +extern e2k_addr_t get_nested_kernel_IP(pt_regs_t *regs, int n); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* paravirtualized kernel (host and guest) */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization) */ +#include +#elif defined(CONFIG_VIRTUALIZATION) || !defined(CONFIG_VIRTUALIZATION) +/* native kernel with virtualization support */ +/* native kernel without virtualization support */ +#define E2K_FLUSHCPU NATIVE_FLUSHCPU +#define E2K_FLUSHR NATIVE_FLUSHR +#define E2K_FLUSHC NATIVE_FLUSHC +#define COPY_STACKS_TO_MEMORY() NATIVE_COPY_STACKS_TO_MEMORY() +#define GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) +#define COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) +#define GOTO_DONE_TO_PARAVIRT_GUEST() +#define COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) + +#define ONLY_SET_GUEST_GREGS(ti) NATIVE_ONLY_SET_GUEST_GREGS(ti) + +static inline void +restore_wd_register_psize(e2k_wd_t wd_from) +{ + native_restore_wd_register_psize(wd_from); +} + +static inline void +preserve_user_hw_stacks_to_copy(e2k_stacks_t *u_stacks, + e2k_stacks_t *cur_stacks) +{ + native_preserve_user_hw_stacks_to_copy(u_stacks, cur_stacks); +} + +#if !defined(CONFIG_VIRTUALIZATION) +/* native kernel without virtualization support */ +#define do_map_user_hard_stack_to_kernel(node, kstart, ubase, size) \ + do_map_native_user_hard_stack_to_kernel(node, kstart, \ + ubase, size) +#define resume_vm_thread() /* none any virtual machines and threads */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +static inline void +virt_cpu_thread_init(struct task_struct *boot_task) +{ + /* nothing to do */ +} + +static inline int +copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + return native_copy_kernel_stacks(new_task, fn, arg); +} + +#define define_user_hw_stacks_sizes(hw_stacks) \ + native_define_user_hw_stacks_sizes(hw_stacks) + +static __always_inline int +switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return native_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); +} +static inline int clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags) +{ + return native_clone_prepare_spilled_user_stacks(child_stacks, + child_crs, regs, new_sw_regs, new_ti, clone_flags); +} +static inline int +copy_spilled_user_stacks(struct e2k_stacks *child_stacks, + e2k_mem_crs_t *child_crs, struct sw_regs *new_sw_regs, + const struct thread_info *new_ti) +{ + native_copy_spilled_user_stacks(child_stacks, child_crs, + new_sw_regs, new_ti); + return 0; +} + +#else /* ??? */ + #error "Undefined virtualization mode" +#endif /* CONFIG_PARAVIRT_GUEST */ + +DECLARE_PER_CPU(void *, reserve_hw_stacks); +static inline int on_reserve_stacks(void) +{ + e2k_pcsp_lo_t pcsp_lo; + unsigned long res_base; + + WARN_ON_ONCE(!psr_and_upsr_irqs_disabled()); + + pcsp_lo = READ_PCSP_LO_REG(); + res_base = (unsigned long) raw_cpu_read(reserve_hw_stacks); + + return AS(pcsp_lo).base >= res_base + KERNEL_PC_STACK_OFFSET && + AS(pcsp_lo).base < res_base + KERNEL_PC_STACK_OFFSET + + KERNEL_PC_STACK_SIZE; +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* This function is used to fixup ret_stack, so make sure it itself + * does not rely on correct values in ret_stack by using "notrace". */ +notrace +static inline void apply_graph_tracer_delta(unsigned long delta) +{ + int i, last; + + if (likely(!current->ret_stack)) + return; + + last = current->curr_ret_stack; + for (i = min(last, FTRACE_RETFUNC_DEPTH - 1); i >= 0; i--) + current->ret_stack[i].fp += delta; +} +#else +static inline void apply_graph_tracer_delta(unsigned long delta) +{ +} +#endif + +extern int user_hw_stacks_copy_full(struct e2k_stacks *stacks, + pt_regs_t *regs, e2k_mem_crs_t *crs); + +extern e2k_addr_t get_nested_kernel_IP(pt_regs_t *regs, int n); +extern unsigned long remap_e2k_stack(unsigned long addr, + unsigned long old_size, unsigned long new_size, bool after); + +extern int find_cui_by_ip(unsigned long ip); + +#endif /* _E2K_PROCESS_H */ + diff --git a/arch/e2k/include/asm/processor.h b/arch/e2k/include/asm/processor.h new file mode 100644 index 000000000000..71cdd2a1fcdc --- /dev/null +++ b/arch/e2k/include/asm/processor.h @@ -0,0 +1,460 @@ +/* + * include/asm-e2k/processor.h + * + * Copyright (C) 2001 MCST + */ + +#ifndef _E2K_PROCESSOR_H_ +#define _E2K_PROCESSOR_H_ +#ifndef __ASSEMBLY__ + +#include +#include + +#include +#include +#include +#include +#include +#include + + +/* We want to use OSGD for fast access to task_struct */ +#define ARCH_MIN_TASKALIGN E2K_ALIGN_GLOBALS_SZ + +/* + * CPU type, hardware bug flags, and per-CPU state. + */ +typedef struct cpuinfo_e2k { + __u8 family; + __u8 model; + __u8 revision; + char vendor[16]; + __u64 proc_freq; /* frequency of processor */ +#ifdef CONFIG_SMP + int cpu; + __u64 mmu_last_context; + __u64 ipi_count; +#endif +} cpuinfo_e2k_t; + +extern cpuinfo_e2k_t cpu_data[NR_CPUS]; + +#define my_cpu_data1(num_cpu) cpu_data[num_cpu] + +#define my_cpu_data cpu_data[smp_processor_id()] +#define raw_my_cpu_data cpu_data[raw_smp_processor_id()] + +#define STACK_TOP (current->thread.flags & E2K_FLAG_32BIT ? \ + USER32_STACK_TOP : USER64_STACK_TOP) +#define STACK_TOP_MAX USER64_STACK_TOP + +#define HAVE_ARCH_PICK_MMAP_LAYOUT +#define HAVE_ARCH_UNMAPPED_AREA + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE \ + PAGE_ALIGN((current->thread.flags & \ + (E2K_FLAG_32BIT | E2K_FLAG_PROTECTED_MODE)) ? \ + (TASK32_SIZE / 3) : (TASK_SIZE / 3)) + +/* + * Size of io_bitmap in longwords: 32 is ports 0-0x3ff. + */ +#define IO_BITMAP_SIZE 32 +#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) +#define INVALID_IO_BITMAP_OFFSET 0x8000 + +typedef struct thread_struct { +#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION + /* Used as a temporary area */ + struct { + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t u_psp_lo; + e2k_psp_hi_t u_psp_hi; + e2k_pcsp_lo_t u_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi; + int from; + bool return_to_user; +# if defined(CONFIG_VIRTUALIZATION) && !defined(CONFIG_KVM_GUEST_KERNEL) + bool from_paravirt_guest; +# endif + } fill; +#endif + u32 context; /* context of running process */ + struct sw_regs sw_regs; /* switch regs */ + + struct { + struct { + e2k_dibcr_t dibcr; + u64 dibar0; + u64 dibar1; + u64 dibar2; + u64 dibar3; + e2k_ddbcr_t ddbcr; + u64 ddbar0; + u64 ddbar1; + u64 ddbar2; + u64 ddbar3; + } regs; + /* user breakpoints set via ptrace */ + struct perf_event *hbp_data[HBP_NUM]; + struct perf_event *hbp_instr[HBP_NUM]; + } debug; + + /* Used by an old implementation of C3 sleep state */ + struct { + e2k_dibcr_t dibcr; + e2k_ddbcr_t ddbcr; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + } C3; + + unsigned long flags; /* various flags (e.g. for mmap) */ +} thread_t; +#endif /* !__ASSEMBLY__ */ + +/* + * Thread flags + */ +#define E2K_FLAG_32BIT 0x01 /* task is older 32-bit binary */ +#define E2K_FLAG_PROTECTED_MODE 0x02 /* task is running in protected mode */ +#define E2K_FLAG_BIN_COMP_CODE 0x04 /* task is binary compiler code */ +#define PRINT_FUNCY_STACK_WORKS_BIT 3 /* do print_stack */ +#define PRINT_FUNCY_STACK_WORKS \ + (1UL << PRINT_FUNCY_STACK_WORKS_BIT) /* 0x08 */ +#define E2K_FLAG_PRINT_ALL_TASK 0x10 /* do print_stack */ +#define PRINT_STACK_WORKS 0x20 /* do print_stack */ +#define E2K_FLAG_64BIT_BINCO 0x40 /* 32-bit binco is running 64-bit x86 */ +#define E2K_FLAG_3P_ELF32 0x80 /* can be removed when only elf64 3P */ + /* is supported */ + +/* + * Various task info flags (is common for host and guest task) + * See last 'flags' argument of function switch_to_new_user() and + * same as field 'flags' of structure kvm_task_info_t (asm/kvm/hypervisor.h) + */ +#define BIN_32_CODE_TASK_FLAG_BIT 2 /* task is 32-bit binary */ + /* application */ +#define PROTECTED_CODE_TASK_FLAG_BIT 3 /* task is running in */ + /* protected mode */ +#define BIN_COMP_CODE_TASK_FLAG_BIT 4 /* task is binary application */ + /* compiler code */ +#define CLONE_SETTLS_TASK_FLAG_BIT 5 /* set new TLS for thread */ +#define DO_PRESENT_HW_STACKS_TASK_FLAG_BIT 8 /* hardware stacks should be */ + /* made present (populated) */ +#define DO_LOCK_HW_STACKS_TASK_FLAG_BIT 9 /* hardware stacks should be */ + /* locked */ +#define PS_HAS_NOT_GUARD_PAGE_TASK_BIT 12 /* hardware procedure stack */ + /* has not extra guard page */ +#define PCS_HAS_NOT_GUARD_PAGE_TASK_BIT 13 /* hardware chain stack */ + /* has not extra guard page */ +#define SWITCH_TO_COMPLETE_TASK_BIT 14 /* switch to kernel hardware */ + /* stacks to complete task */ + /* else to change user stacks */ + /* and return to them */ +#define RETURN_TO_USER_STACKS_TASK_BIT 15 /* return to user hardware */ + /* stacks, else switch to */ + /* kernel stacks */ + +#define BIN_32_CODE_TASK_FLAG (1UL << BIN_32_CODE_TASK_FLAG_BIT) +#define BIN_COMP_CODE_TASK_FLAG (1UL << BIN_COMP_CODE_TASK_FLAG_BIT) +#define PROTECTED_CODE_TASK_FLAG (1UL << PROTECTED_CODE_TASK_FLAG_BIT) +#define CLONE_SETTLS_TASK_FLAG (1UL << CLONE_SETTLS_TASK_FLAG_BIT) +#define DO_PRESENT_HW_STACKS_TASK_FLAG \ + (1UL << DO_PRESENT_HW_STACKS_TASK_FLAG_BIT) +#define DO_LOCK_HW_STACKS_TASK_FLAG \ + (1UL << DO_LOCK_HW_STACKS_TASK_FLAG_BIT) +#define PS_HAS_NOT_GUARD_PAGE_TASK_FLAG \ + (1UL << PS_HAS_NOT_GUARD_PAGE_TASK_BIT) +#define PCS_HAS_NOT_GUARD_PAGE_TASK_FLAG \ + (1UL << PCS_HAS_NOT_GUARD_PAGE_TASK_BIT) +#define SWITCH_TO_COMPLETE_TASK_FLAG (1UL << SWITCH_TO_COMPLETE_TASK_BIT) +#define RETURN_TO_USER_STACKS_TASK_FLAG (1UL << RETURN_TO_USER_STACKS_TASK_BIT) + +#ifndef __ASSEMBLY__ + +#define K_STK_BASE(thr) ((thr)->k_stk_base) +#define K_STK_TOP(thr) ((thr)->k_stk_base + KERNEL_C_STACK_SIZE) + + +#define INIT_THREAD { 0 } + +#define INIT_MMAP \ +{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } + +extern void start_thread(struct pt_regs *regs, + unsigned long entry, unsigned long sp); +extern int native_do_prepare_start_thread_frames(unsigned long entry, + unsigned long sp); +extern long do_sys_execve(unsigned long entry, unsigned long sp, int kernel); + +/* Forward declaration, a strange C thing */ +struct task_struct; +struct mm_struct; + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *); +/* + * create a kernel thread without removing it from tasklists + */ +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +extern void thread_init(void); + +/* + * Prepare to copy thread state - unlazy all lazy status + */ +#define prepare_to_copy(tsk) do { } while (0) + +/* Copy and release all segment info associated with a VM */ + +#define copy_segments(tsk, mm) do { } while (0) /* We don't have */ +#define release_segments(mm) do { } while (0) /* segments on E2K */ + +extern unsigned long boot_option_idle_override; +extern unsigned long idle_halt; +extern void native_default_idle(void); +extern bool idle_nomwait; + +unsigned long get_wchan(struct task_struct *p); +#define KSTK_EIP(tsk) \ +({ \ + struct pt_regs *pt_regs = task_thread_info(tsk)->pt_regs; \ + (pt_regs) ? \ + (unsigned long)AS_STRUCT(pt_regs->crs.cr0_hi).ip << 3 : \ + 0UL; \ +}) +#define KSTK_ESP(tsk) \ +({ \ + struct pt_regs *pt_regs = task_thread_info(tsk)->pt_regs; \ + (pt_regs) ? AS_STRUCT(pt_regs->stacks.usd_lo).base : \ + task_thread_info(tsk)->u_stack.top; \ +}) + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +# define TASK_IS_BINCO(tsk) (tsk->thread.flags & E2K_FLAG_BIN_COMP_CODE) +#else +# define TASK_IS_BINCO(tsk) 0UL +#endif + +#ifdef CONFIG_PROTECTED_MODE +# define TASK_IS_PROTECTED(tsk) (tsk->thread.flags & E2K_FLAG_PROTECTED_MODE) +#else +# define TASK_IS_PROTECTED(tsk) 0UL +#endif + +#define native_cpu_relax() \ + __asm__ __volatile__("{nop 7}" ::: "memory", PREEMPTION_CLOBBERS) +#define cpu_relax_lowlatency() cpu_relax() +#define native_cpu_relax_no_resched() native_cpu_relax() + +#define ARCH_HAS_PREFETCH +static inline void prefetch(const void *ptr) +{ + E2K_PREFETCH_L1_SPEC(ptr); +} + +#define ARCH_HAS_PREFETCHW +static inline void prefetchw(const void *ptr) +{ + E2K_PREFETCH_L1_SPEC(ptr); +} + +static inline void prefetch_nospec(const void *ptr) +{ + E2K_PREFETCH_L1_NOSPEC(ptr); +} + +#define prefetch_offset(ptr, offset) \ +do { \ + /* Use fully speculative load since ptr could be bad */ \ + E2K_PREFETCH_L1_SPEC_OFFSET((ptr), (offset)); \ +} while (0) + +#define prefetch_nospec_offset(ptr, offset) \ +do { \ + E2K_PREFETCH_L2_NOSPEC_OFFSET((ptr), (offset)); \ +} while (0) + +/* Use L2 cache line size since we are prefetching to L2 */ +#define PREFETCH_STRIDE 64 + +static __always_inline void prefetch_nospec_range(const void *addr, size_t len) +{ + s64 i, rem, prefetched; + + if (__builtin_constant_p(len) && len < 24 * PREFETCH_STRIDE) { + if (len > 0) + prefetch_nospec(addr); + if (len > PREFETCH_STRIDE) + prefetch_nospec_offset(addr, PREFETCH_STRIDE); + if (len > 2 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 2 * PREFETCH_STRIDE); + if (len > 3 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 3 * PREFETCH_STRIDE); + if (len > 4 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 4 * PREFETCH_STRIDE); + if (len > 5 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 5 * PREFETCH_STRIDE); + if (len > 6 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 6 * PREFETCH_STRIDE); + if (len > 7 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 7 * PREFETCH_STRIDE); + if (len > 8 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 8 * PREFETCH_STRIDE); + if (len > 9 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 9 * PREFETCH_STRIDE); + if (len > 10 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 10 * PREFETCH_STRIDE); + if (len > 11 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 11 * PREFETCH_STRIDE); + if (len > 12 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 12 * PREFETCH_STRIDE); + if (len > 13 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 13 * PREFETCH_STRIDE); + if (len > 14 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 14 * PREFETCH_STRIDE); + if (len > 15 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 15 * PREFETCH_STRIDE); + if (len > 16 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 16 * PREFETCH_STRIDE); + if (len > 17 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 17 * PREFETCH_STRIDE); + if (len > 18 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 18 * PREFETCH_STRIDE); + if (len > 19 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 19 * PREFETCH_STRIDE); + if (len > 20 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 20 * PREFETCH_STRIDE); + if (len > 21 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 21 * PREFETCH_STRIDE); + if (len > 22 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 22 * PREFETCH_STRIDE); + if (len > 23 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr, 23 * PREFETCH_STRIDE); + + return; + } + + rem = len % (4 * PREFETCH_STRIDE); + prefetched = len / (4 * PREFETCH_STRIDE); + + for (i = 0; i <= (s64) len - 256; i += 256) + E2K_PREFETCH_L2_NOSPEC_256(addr + i); + + if (rem > 0) + prefetch_nospec(addr + prefetched); + if (rem > PREFETCH_STRIDE) + prefetch_nospec_offset(addr + prefetched, PREFETCH_STRIDE); + if (rem > 2 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr + prefetched, 2 * PREFETCH_STRIDE); + if (rem > 3 * PREFETCH_STRIDE) + prefetch_nospec_offset(addr + prefetched, 3 * PREFETCH_STRIDE); +} + +extern u64 cacheinfo_get_l1d_line_size(void); +extern void show_cacheinfo(struct seq_file *m); +extern int get_cpuinfo(char *buffer); +extern void native_print_machine_type_info(void); + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* pure guest kernel (not paravirtualized) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ + +#define prepare_start_thread_frames(entry, sp) \ + native_prepare_start_thread_frames(entry, sp) + +#define default_idle() native_default_idle() +#define cpu_relax() native_cpu_relax() +#define cpu_relax_no_resched() native_cpu_relax_no_resched() + +static inline void +print_machine_type_info(void) +{ + native_print_machine_type_info(); +} + +#ifdef CONFIG_VIRTUALIZATION +/* it is host kernel with virtualization support */ +static inline void +paravirt_banner(void) +{ + printk(KERN_INFO "Booting host kernel with virtualization support\n"); +} +#else /* ! CONFIG_VIRTUALIZATION */ +/* it is native kernel without any virtualization */ +static inline void +paravirt_banner(void) +{ + printk(KERN_INFO "Booting native kernel without any virtualization " + "support\n"); +} +#endif /* CONFIG_VIRTUALIZATION */ +#endif /* CONFIG_PARAVIRT */ + +#endif /* !__ASSEMBLY__ */ + +/* + * If there are user pt_regs, return them. + * Return the first kernel pt_regs otherwise. + * + * This way it should be compatible with all other architectures + * which always return the first pt_regs structure. + */ +#define current_pt_regs() \ +({ \ + struct pt_regs *__cpr_pt_regs = current_thread_info()->pt_regs; \ + if (__cpr_pt_regs) \ + __cpr_pt_regs = find_entry_regs(__cpr_pt_regs); \ + __cpr_pt_regs; \ +}) + +#define task_pt_regs(task) \ +({ \ + struct pt_regs *__tpr_pt_regs = task_thread_info(task)->pt_regs; \ + if (__tpr_pt_regs) \ + __tpr_pt_regs = find_entry_regs(__tpr_pt_regs); \ + __tpr_pt_regs; \ +}) + +static inline int cpu_max_cores_num(void) +{ + if (IS_MACHINE_E1CP) + return 1; + else if (IS_MACHINE_ES2 || IS_MACHINE_E2C3) + return 2; + else if (IS_MACHINE_E2S) + return 4; + else if (IS_MACHINE_E8C || IS_MACHINE_E8C2) + return 8; + else if (IS_MACHINE_E12C) + return 12; + else if (IS_MACHINE_E16C) + return 16; + else + BUG(); +} + +static inline bool range_includes(unsigned long addr1, size_t size1, + unsigned long addr2, size_t size2) +{ + return addr2 >= addr1 && addr2 + size2 <= addr1 + size1; +} + +static inline bool range_intersects(unsigned long addr1, size_t size1, + unsigned long addr2, size_t size2) +{ + return addr1 + size1 > addr2 && addr2 + size2 > addr1; +} + +#endif /* _E2K_PROCESSOR_H_ */ diff --git a/arch/e2k/include/asm/prom.h b/arch/e2k/include/asm/prom.h new file mode 100644 index 000000000000..a79f45ee2ec8 --- /dev/null +++ b/arch/e2k/include/asm/prom.h @@ -0,0 +1,17 @@ +#ifndef __E2K_PROM_H +#define __E2K_PROM_H + +#ifdef CONFIG_OF +#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 +#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 + +#define of_compat_cmp(s1, s2, l) strncmp((s1), (s2), (l)) +#define of_prop_cmp(s1, s2) strcasecmp((s1), (s2)) +#define of_node_cmp(s1, s2) strcmp((s1), (s2)) + +int of_getintprop_default(struct device_node *np, const char *name, int def); + +#define of_node_to_nid(dp) (NUMA_NO_NODE) +#endif + +#endif /* __E2K_PROM_H */ diff --git a/arch/e2k/include/asm/prot_loader.h b/arch/e2k/include/asm/prot_loader.h new file mode 100644 index 000000000000..a9aae0e13d53 --- /dev/null +++ b/arch/e2k/include/asm/prot_loader.h @@ -0,0 +1,176 @@ +#ifndef _E2K_PROT_LOADER_H_ +#define _E2K_PROT_LOADER_H_ + +#include + +#define USE_ELF64 0 + +#define ARGS_AS_ONE_ARRAY + +#define E2k_ELF_ARGV_IND 0 +#define E2k_ELF_ENVP_IND 1 +#define E2k_ELF_AUX_IND 2 +#define E2k_ELF_ARG_NUM_AP 3 + +#define DT_PLTGOTSZ 0x7000101b +#define DT_INIT_GOT 0x7000101c + +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG +#define futex_cmpxchg_enabled 1 +#else +extern int __read_mostly futex_cmpxchg_enabled; +#endif + +typedef struct { + e2k_pl_t mdd_init_got; + e2k_ptr_t mdd_got; +} umdd_t; +#define MDD_PROT_SIZE ((sizeof(umdd_t) + 15) & ~15) + +typedef struct { + e2k_pl_lo_t mdd_init_got; + e2k_pl_lo_t mdd_init; + e2k_pl_lo_t mdd_fini; + e2k_pl_lo_t mdd_start; + e2k_ptr_t mdd_got; + /* By a call descriptors of the areas of memory containing preparations + * are located here (without external tags) for formation of the tagged + * values placed in sections .gott (OT), .gctt (CT) and .gompt (OMP) + * of the loaded module. */ + e2k_ptr_t mdd_gtt[3]; +} umdd_old_t; +#define MDD_OLD_PROT_SIZE ((sizeof(umdd_old_t) + 15) & ~15) + +typedef struct { + u64 got_addr; + u64 got_len; + u32 cui; + u64 init_got_point; + u64 entry_point; + u64 init_point; + u64 fini_point; +} kmdd_t; + + /* It's here for compatibility with old loader */ + +typedef enum { + RTL_FT_NONE, /* The type isn't defined */ + RTL_FT_EXE, /* Loading file */ + RTL_FT_LIB, /* Dynamic library */ + RTL_FT_DRV /* System driver */ +} rtl_FileType_t; + +typedef struct rtl_Unit_s rtl_Unit_t; + +struct rtl_Unit_s { + char *u_code; /* The pointer on code sector */ + char *u_data; /* The pointer on data sector */ + char *u_name; /* Module name */ + char *u_fullname; /* Module full name */ + char *u_type_map; + char *u_type_structs; /* The pointer on the array of structures + * for construction of templates */ + char *u_type_structs_end; /* The pionter for the end of the array + * of structures */ + rtl_Unit_t *u_next; /* The pointer on the following module */ + rtl_Unit_t *u_prev; /* The pointer on the previous module */ + char *u_init; /* The pointer on function of initialization + * of the module */ + char *u_fini; /* The pointer on function of a finalization + * of the module */ + unsigned long long u_entry; /* Entry point */ + rtl_FileType_t u_mtype; /* Module type */ + unsigned int u_num; /* Number of the module */ + unsigned int u_tnum; /* Number of the first class of the + * module */ + unsigned int u_tcount; /* Quantity of classes in the module */ + + struct { + unsigned long long ub_code; /* Base address of a code */ + unsigned long long ub_data; /* Base address of data */ + unsigned long long ub_bss; + unsigned long long ub_brk; /* Address brk */ + } base; + + struct { + unsigned long long uc_start; /* Code start address */ + unsigned long long uc_dataend; /* End address data file */ + unsigned long long uc_allocend; /* End address all data */ + unsigned long long uc_mapend; /* End address mapped memory */ + unsigned long long uc_mapoff; /* Initial offset of a code in the + * file */ + unsigned int uc_prot; /* Flags of protection of code pages */ + } code; + + struct { + unsigned long long ud_start; /* Initial address of data */ + unsigned long long ud_dataend; /* End address of file data */ + unsigned long long ud_allocend; /* End address of all data */ + unsigned long long ud_mapend; /* End address of mapped memory */ + unsigned long long ud_mapoff; /* Initial offset of data on the file */ + unsigned int ud_prot; /* Pprtection data data pages */ + } data; + +/* ELF file */ + char *u_eheader; /* The pointer on file title */ + char *u_pheader; /* The pointer on program title */ + char *u_symtab; /* The pointer on the dynamic character table */ + char *u_symtab_st; /* The pointer on the static character table */ + char *u_strtab; /* The pointer on the dynamic strings table */ + char *u_strtab_st; /* The pointer on the static strings table */ + unsigned int *u_hash; /* The pointer on hash the table of symbolic + * names */ + char *u_got; /* The pointer on the global table of offsets */ + char *u_gtt; /* The pointer on global tables of types */ + char *u_type; /* The pointer on the table of types for Xi ++ */ + char *u_dynrel; /* The pointer on the table of dynamic + * relocation */ + char *u_gttrel; /* The pointer on the table of relocation for + * GTT */ + char *u_typerel; /* The pointer on the table of relocation for + * types */ + char *u_dyn; /* The pointer on dynamic section */ + char *u_tobj; /* The pointer on section of the description of + * classes */ + char *u_tcast; /* The pointer on section of coercions */ + char *u_typed; /* The pointer on section of descriptors of + * types */ +struct { + unsigned long long ul_code; /* Code segment size */ + unsigned long long ul_data; /* Data segment size */ + unsigned long long ul_strtab; /* Size of the strings table */ + unsigned long long ul_strtab_st; /* Size of the strings table */ + unsigned long long ul_type; /* The size of the table of types + * for Xi ++ */ + unsigned long long ul_dynrel; /* Size of the table of dynamic + * relocation */ + unsigned long long ul_gttrel; /* The size of the table of + * relocation for GTT */ + unsigned long long ul_typerel; /* The size of the table of relocation + * for types */ + unsigned int ul_symtab; /* Quantity of elements of the dynamic + * character table */ + unsigned int ul_symtab_st; /* Quantity of elements of the static + * character table */ + unsigned int ul_hash; /* Size table hash */ + unsigned int ul_gtt; /* Size of the global table of types */ + unsigned int ul_tobj; /* Size of section of member data */ + unsigned int ul_typed; /* Size of section of types */ + unsigned int ul_tcast; /* Size of section of reductions */ +} len; + +}; + +/* Global Type Table (GTT) correction. C++ stuff hadling. */ +extern void rtl32_CorrectionType( rtl_Unit_t *unit_p ); + +extern long sys_load_cu_elf32_3P(char *name, kmdd_t *mdd); +extern long sys_load_cu_elf64_3P(char *name, kmdd_t *mdd); + +extern long sys_unload_cu_elf32_3P(unsigned long glob_base, + size_t glob_size); +extern long sys_unload_cu_elf64_3P(unsigned long glob_base, + size_t glob_size); + +#endif /* _E2K_PROT_LOADER_H_ */ + diff --git a/arch/e2k/include/asm/protected_syscalls.h b/arch/e2k/include/asm/protected_syscalls.h new file mode 100644 index 000000000000..867563bec625 --- /dev/null +++ b/arch/e2k/include/asm/protected_syscalls.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/e2k/include/asm/protected_syscalls.h, v 1.0 25/12/2019. + * + * Copyright (C) 2019 MCST + */ + +/****************** PROTECTED SYSTEM CALL DEBUG DEFINES *******************/ + +#ifndef _E2K_PROTECTED_SYSCALLS_H_ +#define _E2K_PROTECTED_SYSCALLS_H_ + +#ifdef CONFIG_PROTECTED_MODE + +#include +#include + +#undef DYNAMIC_DEBUG_SYSCALLP_ENABLED +#define DYNAMIC_DEBUG_SYSCALLP_ENABLED 1 /* Dynamic prot. syscalls control */ + +#if (!DYNAMIC_DEBUG_SYSCALLP_ENABLED) + +/* Static debug defines (old style): */ + +#undef DEBUG_SYSCALLP +#define DEBUG_SYSCALLP 0 /* System Calls trace */ +#undef DEBUG_SYSCALLP_CHECK +#define DEBUG_SYSCALLP_CHECK 1 /* Protected System Call args checks/warnings */ +#define PM_SYSCALL_WARN_ONLY 1 + +#if DEBUG_SYSCALLP +#define DbgSCP printk +#else +#define DbgSCP(...) +#endif /* DEBUG_SYSCALLP */ + +#if DEBUG_SYSCALLP_CHECK +#define DbgSCP_ERR(fmt, ...) pr_err(fmt, ##__VA_ARGS__) +#define DbgSCP_WARN(fmt, ...) pr_warn(fmt, ##__VA_ARGS__) +#define DbgSCP_ALERT(fmt, ...) pr_alert(fmt, ##__VA_ARGS__) +#else +#define DbgSC_ERR(...) +#define DbgSC_WARN(...) +#define DbgSC_ALERT(...) +#endif /* DEBUG_SYSCALLP_CHECK */ + +#else /* DYNAMIC_DEBUG_SYSCALLP_ENABLED */ + +/* Dynamic debug defines (new style): + * When enabled, environment variables control syscall + * debug/diagnostic output. + * To enable particular control: export =1 + * To disnable particular control: export =0 + * + * The options are as follows: + * + * PM_SC_DBG_MODE_DEBUG - Output basic debug info on system calls to journal; + * + * PM_SC_DBG_MODE_COMPLEX_WRAPPERS - Output debug info on protected + * complex syscall wrappers to journal; + * PM_SC_DBG_MODE_CHECK - Report issue to journal if syscall arg + * mismatch expected format; + * PM_SC_DBG_MODE_WARN_ONLY - If error in arg format detected, + * don't block syscall but run it anyway; + * PM_SC_DBG_MODE_CONV_STRUCT - Output to journal debug info on converting + * structures in syscall args; + * PM_SC_DBG_MODE_SIGNALS - Output to system journal debug info related + * to signal manipulation; + * PM_SC_DBG_MODE_NO_ERR_MESSAGES - Blocks diagnostic messages to journal + * (may be useful when running latency-sensitive tests/applications); + * + * PM_MM_CHECK_4_DANGLING_POINTERS - Enable check for dangling descriptors + * allocated with 'malloc' (libc specific); + * + * PM_SC_DBG_MODE_ALL - Enable all debug/diagnostic output to system journal; + * + * PM_SC_DBG_MODE_DISABLED - Disable debug/diagnostic output to system journal. + */ + +#include "asm/syscalls.h" + +#define DbgSCP(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_DEBUG)) \ + pr_info("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#define DbgSCP_ERR(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CHECK) \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) +#define DbgSCP_ALERT(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CHECK) \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_alert("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) +#define DbgSCP_WARN(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CHECK) \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_warn("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#define PM_SYSCALL_WARN_ONLY \ + (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_WARN_ONLY)) + /* Backward compatibility with syscalls */ + /* NB> It may happen legacy s/w written incompatible with + * context protection principles. + * For example, tests for syscalls may be of that kind + * to intentionally pass bad arguments to syscalls to check + * if behavior is correct in that case. + * This define, being activated, eases argument check control + * when doing system calls in the protected execution mode: + * - a warning still gets reported to the journal, but + * - system call is not blocked at it is normally done. + */ + +#define DEBUG_SYSCALLP_CHECK 1 /* protected syscall args checks enabled */ + +#endif /* DYNAMIC_DEBUG_SYSCALLP_ENABLED */ + +/* If running in the orthodox protected mode, deliver exception to break execution: */ +#define PM_EXCEPTION_IF_ORTH_MODE(signo, code, errno) \ +do { \ + if (!PM_SYSCALL_WARN_ONLY) \ + pm_deliver_exception(signo, code, errno); \ +} while (0) + +/**************************** END of DEBUG DEFINES ***********************/ + + +static inline +long make_ap_lo(e2k_addr_t base, long size, long offset, int access) +{ + return MAKE_AP_LO(base, size, offset, access); +} + +static inline +long make_ap_hi(e2k_addr_t base, long size, long offset, int access) +{ + return MAKE_AP_HI(base, size, offset, access); +} + +static inline +int e2k_ptr_itag(long low) +{ + e2k_ptr_t ptr; + + AW(ptr).lo = low; + + return AS(ptr).itag; +} + +static inline +int e2k_ptr_rw(long low) +{ + e2k_ptr_t ptr; + + AW(ptr).lo = low; + + return AS(ptr).rw; +} + +static inline +unsigned long e2k_ptr_ptr(long low, long hiw, unsigned int min_size) +{ + e2k_ptr_t ptr; + unsigned int ptr_size; + + AW(ptr).lo = low; + AW(ptr).hi = hiw; + ptr_size = AS(ptr).size - AS(ptr).curptr; + + if (ptr_size < min_size) { + DbgSCP_ALERT(" Pointer is too small: %d < %d\n", + ptr_size, min_size); + return 0; + } else { + return E2K_PTR_PTR(ptr, GET_SBR_HI()); + } +} + +static inline +unsigned long e2k_ptr_curptr(long low, long hiw) +{ + e2k_ptr_t ptr; + + AW(ptr).lo = low; + AW(ptr).hi = hiw; + + return AS(ptr).curptr; +} + +static inline +unsigned int e2k_ptr_size(long low, long hiw, unsigned int min_size) +{ + e2k_ptr_hi_t hi; + unsigned int ptr_size; + + AW(hi) = hiw; + ptr_size = AS(hi).size - AS(hi).curptr; + + if (ptr_size < min_size) { + DbgSCP_ALERT(" Pointer is too small: %d < %d\n", + ptr_size, min_size); + return 0; + } else { + return ptr_size; + } +} + +static inline int e2k_ptr_str_check(char __user *str, u64 max_size) +{ + long slen; + + slen = strnlen_user(str, max_size); + + if (unlikely(!slen || slen > max_size)) + return 1; + + return 0; +} + +static inline char __user *e2k_ptr_str(long low, long hiw, u64 sbr_hi) +{ + char __user *str; + e2k_ptr_hi_t hi = { .word = hiw }; + + str = (char __user *) __E2K_PTR_PTR(low, hiw, sbr_hi); + + if (!e2k_ptr_str_check(str, AS(hi).size - AS(hi).curptr)) + return str; + + return NULL; +} + +extern void pm_deliver_exception(int signo, int code, int errno); + + +#else /* #ifndef CONFIG_PROTECTED_MODE */ + +#define DbgSCP(...) +#define DbgSC_ERR(...) +#define DbgSC_WARN(...) +#define DbgSC_ALERT(...) + +#endif /* CONFIG_PROTECTED_MODE */ + + +#endif /* _E2K_PROTECTED_SYSCALLS_H_ */ + diff --git a/arch/e2k/include/asm/ptrace-abi.h b/arch/e2k/include/asm/ptrace-abi.h new file mode 100644 index 000000000000..3043bd7faecc --- /dev/null +++ b/arch/e2k/include/asm/ptrace-abi.h @@ -0,0 +1,78 @@ +#ifndef _ASM_PTRACE_ABI_H +#define _ASM_PTRACE_ABI_H + +#define PTRACE_OLDSETOPTIONS 21 + +/* only useful for access 32bit programs / kernels */ +#define PTRACE_GET_THREAD_AREA 25 +#define PTRACE_SET_THREAD_AREA 26 + +#ifdef __x86_64__ +# define PTRACE_ARCH_PRCTL 30 +#endif + +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 + +#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ + +#ifndef __ASSEMBLY__ +#include + +/* configuration/status structure used in PTRACE_BTS_CONFIG and + PTRACE_BTS_STATUS commands. +*/ +struct ptrace_bts_config { + /* requested or actual size of BTS buffer in bytes */ + __u32 size; + /* bitmask of below flags */ + __u32 flags; + /* buffer overflow signal */ + __u32 signal; + /* actual size of bts_struct in bytes */ + __u32 bts_size; +}; +#endif /* __ASSEMBLY__ */ + +#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ +#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ +#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG on buffer overflow + instead of wrapping around */ +#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */ + +#define PTRACE_BTS_CONFIG 40 +/* Configure branch trace recording. + ADDR points to a struct ptrace_bts_config. + DATA gives the size of that buffer. + A new buffer is allocated, if requested in the flags. + An overflow signal may only be requested for new buffers. + Returns the number of bytes read. +*/ +#define PTRACE_BTS_STATUS 41 +/* Return the current configuration in a struct ptrace_bts_config + pointed to by ADDR; DATA gives the size of that buffer. + Returns the number of bytes written. +*/ +#define PTRACE_BTS_SIZE 42 +/* Return the number of available BTS records for draining. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_GET 43 +/* Get a single BTS record. + DATA defines the index into the BTS array, where 0 is the newest + entry, and higher indices refer to older entries. + ADDR is pointing to struct bts_struct (see asm/ds.h). +*/ +#define PTRACE_BTS_CLEAR 44 +/* Clear the BTS buffer. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_DRAIN 45 +/* Read all available BTS records and clear the buffer. + ADDR points to an array of struct bts_struct. + DATA gives the size of that buffer. + BTS records are read from oldest to newest. + Returns number of BTS records drained. +*/ + +#endif /* _ASM_PTRACE_ABI_H */ diff --git a/arch/e2k/include/asm/ptrace.h b/arch/e2k/include/asm/ptrace.h new file mode 100644 index 000000000000..45929d68b8ee --- /dev/null +++ b/arch/e2k/include/asm/ptrace.h @@ -0,0 +1,828 @@ +#ifndef _E2K_PTRACE_H +#define _E2K_PTRACE_H + + +#ifndef __ASSEMBLY__ +#include +#include + +#include +#endif /* __ASSEMBLY__ */ + +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_USE_AAU +#include +#endif /* CONFIG_USE_AAU */ +#include +#include + +#endif /* __ASSEMBLY__ */ +#include +#include + +#define TASK_TOP TASK_SIZE + +/* + * User process size in MA32 mode. + */ +#define TASK32_SIZE (0xf0000000UL) + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +#include +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +#include + +#include + +struct mm_struct; + +typedef struct pt_regs ptregs_t; +typedef struct sw_regs sw_regs_t; + +struct e2k_greg { + union { + u64 xreg[2]; /* extended register */ + struct { + u64 base; /* main part of value */ + u64 ext; /* extended part of floating point */ + /* value */ + }; + }; +} __aligned(16); /* must be aligned for stgdq/stqp/ldqp to work */ + +#ifdef CONFIG_GREGS_CONTEXT + +typedef struct global_regs { + struct e2k_greg g[E2K_GLOBAL_REGS_NUM]; + e2k_bgr_t bgr; +} global_regs_t; + +/* Sometimes we only want to save %g16-%g31 (so called "local" gregs) */ +typedef struct local_gregs { + struct e2k_greg g[LOCAL_GREGS_NUM]; + e2k_bgr_t bgr; +} local_gregs_t; + +/* gN and gN+1 global registers hold pointers to current in kernel, */ +/* gN+2 and gN+3 are used for per-cpu data pointer and current cpu id. */ +/* Now N = 16 (see real numbers at asm/glob_regs.h) */ +typedef struct kernel_gregs { + struct e2k_greg g[KERNEL_GREGS_NUM]; +} kernel_gregs_t; +#endif /* CONFIG_GREGS_CONTEXT */ + +#define HW_TC_SIZE 7 + +/* trap_pt_regs->flags */ +#define TRAP_PCSP_FILL_ADJUSTED 0x0001 +#define TRAP_PSP_FILL_ADJUSTED 0x0002 +#define TRAP_SRP_FLAG 0x0004 +#define TRAP_RP_FLAG 0x0008 + +typedef struct trap_pt_regs { + u64 TIR_hi; /* Trap info registers */ + u64 TIR_lo; + int TIR_no; /* current handled TIRs # */ + s8 nr_TIRs; + s8 tc_count; + s8 curr_cnt; + char ignore_user_tc; + char tc_called; + char from_sigreturn; + bool is_intc; /* intercept page fault */ + u8 nr_trap; /* number of trap */ + u8 nr_page_fault_exc; /* number of page fault trap */ + int prev_state; + int flags; + e2k_addr_t srp_ip; + e2k_tir_t TIRs[TIR_NUM]; + trap_cellar_t tcellar[HW_TC_SIZE]; + u64 *sbbp; +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + e2k_mlt_t mlt_state; /* MLT state for binco */ +#endif +} trap_pt_regs_t; + +union pt_regs_flags { + struct { + /* execute_mmu_operations() is working */ + u32 exec_mmu_op : 1; + /* nested exception appeared while + * execute_mmu_operations() was working */ + u32 exec_mmu_op_nested : 1; + /* A signal's handler will be called upon return to userspace */ + u32 sig_call_handler : 1; + /* System call should be restarted after signal's handler */ + u32 sig_restart_syscall : 1; + /* Used to distinguish between entry8 and entry10 for protected syscalls */ + u32 protected_entry10 : 1; + /* From hardware guest interception */ + u32 kvm_hw_intercept : 1; + /* trap or system call is on or from guest */ + u32 trap_as_intc_emul : 1; + /* Trap occurred in light hypercall */ + u32 light_hypercall : 1; + }; + u32 word; +}; + +typedef struct pt_regs { + struct pt_regs *next; /* the previous regs structure */ + struct trap_pt_regs *trap; +#ifdef CONFIG_USE_AAU + e2k_aau_t *aau_context; /* aau registers */ +#endif + e2k_stacks_t stacks; /* current state of all stacks */ + /* registers */ + e2k_mem_crs_t crs; /* current chain window regs state */ + e2k_wd_t wd; /* current window descriptor */ + int sys_num; /* to restart sys_call */ + int kernel_entry; + union pt_regs_flags flags; + e2k_ctpr_t ctpr1; /* CTPRj for control transfer */ + e2k_ctpr_t ctpr2; + e2k_ctpr_t ctpr3; + e2k_ctpr_hi_t ctpr1_hi; + e2k_ctpr_hi_t ctpr2_hi; + e2k_ctpr_hi_t ctpr3_hi; + u64 lsr; /* loops */ + u64 ilcr; /* initial loop value */ + u64 lsr1; + u64 ilcr1; + int interrupt_vector; +#ifdef CONFIG_EPIC + unsigned int epic_core_priority; +#endif + long sys_rval; + long args[13]; /* unused, arg1, ... arg12 */ + long tags; + long rval1; + long rval2; + int return_desk; + int rv1_tag; + int rv2_tag; +#ifdef CONFIG_CLW_ENABLE + int clw_cpu; + int clw_count; + int clw_first; + clw_reg_t us_cl_m[CLW_MASK_WORD_NUM]; + clw_reg_t us_cl_up; + clw_reg_t us_cl_b; +#endif /* CONFIG_CLW_ENABLE */ + /* for bin_comp */ + u64 rpr_lo; + u64 rpr_hi; +#ifdef CONFIG_VIRTUALIZATION + u64 sys_func; /* need only for guest */ + e2k_stacks_t g_stacks; /* current state of guest kernel */ + /* stacks registers */ + bool g_stacks_valid; /* the state of guest kernel stacks */ + /* registers is valid */ + bool g_stacks_active; /* the guest kernel stacks */ + /* registers is in active work */ + bool stack_regs_saved; /* stack state regs was already */ + /* saved */ + bool need_inject; /* flag for unconditional injection */ + /* trap to guest to avoid acces to */ + /* guest user space in trap context */ + bool in_hypercall; /* trap is occured in hypercall */ + bool is_guest_user; /* trap/system call on/from guest */ + /* user */ + unsigned long traps_to_guest; /* mask of traps passed to guest */ + /* and are not yet handled by guest */ + /* need only for host */ +#ifdef CONFIG_KVM_GUEST_KERNEL +/* only for guest kernel */ + /* already copyed back part of guest user hardware stacks */ + /* spilled to guest kernel stacks */ + struct { + e2k_size_t ps_size; /* procedure stack copyed size */ + e2k_size_t pcs_size; /* chain stack copyesd size */ + /* The frames injected to support 'signal stack' */ + /* and trampolines to return from user to kernel */ + e2k_size_t pcs_injected_frames_size; + } copyed; +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL) + e2k_svd_gregs_t guest_vcpu_state_greg; +#endif /* CONFIG_KVM || CONFIG_KVM_GUEST_KERNEL */ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + scall_times_t *scall_times; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +} pt_regs_t; + +static inline struct trap_pt_regs * +pt_regs_to_trap_regs(struct pt_regs *regs) +{ + return PTR_ALIGN((void *) regs + sizeof(*regs), 8); +} + +#ifdef CONFIG_USE_AAU +static inline e2k_aau_t * +pt_regs_to_aau_regs(struct pt_regs *regs) +{ + struct trap_pt_regs *trap; + + trap = pt_regs_to_trap_regs(regs); + + return PTR_ALIGN((void *) trap + sizeof(*trap), 8); +} +#else /* ! CONFIG_USE_AAU */ +static inline e2k_aau_t * +pt_regs_to_aau_regs(struct pt_regs *regs) +{ + return NULL; +} +#endif +static inline bool +is_sys_call_pt_regs(struct pt_regs *regs) +{ + return regs->trap == NULL && regs->kernel_entry != 0; +} +static inline bool +is_trap_pt_regs(struct pt_regs *regs) +{ + return regs->trap != NULL && regs->kernel_entry == 0; +} + +typedef struct sw_regs { + e2k_mem_crs_t crs; + e2k_addr_t top; /* top of all user data stacks */ + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_psp_lo_t psp_lo; /* procedure stack pointer(as empty)*/ + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; /* procedure chaine stack pointer */ + e2k_pcsp_hi_t pcsp_hi; /* (as empty) */ + e2k_upsr_t upsr; + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + e2k_cutd_t cutd; + +#ifdef CONFIG_VIRTUALIZATION + struct task_struct *prev_task; /* task switch to current from */ +#endif /* CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_GREGS_CONTEXT + struct global_regs gregs; +#endif + + /* + * These two are shared by monitors and breakpoints. Monitors + * are accessed by userspace directly through sys_ptrace and + * breakpoints are accessed through CONFIG_HW_BREAKPOINT layer + * (i.e. ptrace does not write directly to breakpoint registers). + * + * For this reason breakpoints related registers are moved out + * from sw_regs as they are managed by arch-independent layer + * instead of arch-dependent switch_to() function. For dibsr and + * ddbsr only monitors-related fields are accessed in switch_to(). + */ + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + + u64 dimar0; + u64 dimar1; + e2k_dimcr_t dimcr; + u64 ddmar0; + u64 ddmar1; + e2k_ddmcr_t ddmcr; + e2k_dimtp_t dimtp; + + /* + * in the case we switch from/to a BINCO task, we + * need to backup/restore these registers in task switching + */ + u64 cs_lo; + u64 cs_hi; + u64 ds_lo; + u64 ds_hi; + u64 es_lo; + u64 es_hi; + u64 fs_lo; + u64 fs_hi; + u64 gs_lo; + u64 gs_hi; + u64 ss_lo; + u64 ss_hi; + + /* Additional registers for BINCO */ + u64 rpr_lo; + u64 rpr_hi; +#ifdef CONFIG_TC_STORAGE + u64 tcd; +#endif +} sw_regs_t; + +typedef struct jmp_info { + u64 sigmask; + u64 ip; + u64 cr1lo; + u64 pcsplo; + u64 pcsphi; + u32 pcshtp; + u32 br; + u64 usd_lo; + u32 reserved; + u32 wd_hi32; +} e2k_jmp_info_t; + +#define __HAVE_ARCH_KSTACK_END + +static inline int kstack_end(void *addr) +{ + return (e2k_addr_t)addr >= READ_SBR_REG_VALUE(); +} + +#define NATIVE_SAVE_DAM(__dam) \ +do { \ + int i; \ + e2k_addr_t addr = (REG_DAM_TYPE << REG_DAM_TYPE_SHIFT); \ + for (i = 0; i < DAM_ENTRIES_NUM; i++) \ + (__dam)[i] = NATIVE_READ_DAM_REG(addr | \ + (i << REG_DAM_N_SHIFT)); \ +} while (0) + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + +/* e2k extentions */ +#define PTRACE_PEEKPTR 0x100 +#define PTRACE_POKEPTR 0x101 +#define PTRACE_PEEKTAG 0x120 +#define PTRACE_POKETAG 0x121 +#define PTRACE_EXPAND_STACK 0x130 + +#define from_trap(regs) ((regs)->trap != NULL) +#define from_syscall(regs) (!from_trap(regs)) + +static inline u64 user_stack_pointer(struct pt_regs *regs) +{ + e2k_usd_lo_t usd_lo = regs->stacks.usd_lo; + u64 sp; + + if (!AS(usd_lo).p) { + sp = AS(usd_lo).base; + } else { + e2k_pusd_lo_t pusd_lo; + AW(pusd_lo) = AW(usd_lo); + sp = AS(pusd_lo).base + (regs->stacks.top & ~0xffffffffULL); + } + + return sp; +} + +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return AS(regs->stacks.usd_lo).base; +} + +static inline void native_atomic_load_osgd_to_gd(void) +{ + E2K_LOAD_OSGD_TO_GD(); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n) + +{ + unsigned long addr = kernel_stack_pointer(regs); + + addr += n * sizeof(unsigned long); + + if (addr >= kernel_stack_pointer(regs) && addr < regs->stacks.top) + return *(unsigned long *) addr; + else + return 0; +} + +/* Query offset/name of register from its name/offset */ +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); + +#define REGS_B_REGISTER_FLAG (1 << 30) +#define REGS_PRED_REGISTER_FLAG (1 << 29) +#define REGS_TIR1_REGISTER_FLAG (1 << 28) + +extern unsigned long regs_get_register(const struct pt_regs *regs, + unsigned int offset); + +#define from_trap(regs) ((regs)->trap != NULL) +#define from_syscall(regs) (!from_trap(regs)) + +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + /* System call audit case: %b[0] is not ready yet */ + if (from_syscall(regs)) + return regs->sys_rval; + + /* kretprobe case - get %b[0] */ + return regs_get_register(regs, 0 | REGS_B_REGISTER_FLAG); +} + +static inline e2k_addr_t +native_check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + if (likely(address < NATIVE_TASK_SIZE)) + return 0; + pr_err("Address 0x%016lx is native kernel address\n", + address); + return -1; +} +#define NATIVE_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + false /* native kernel has not guests */ +#define NATIVE_IS_GUEST_ADDRESS_TO_HOST(address) \ + false /* native kernel has not guests */ + +/* guest page table is pseudo PT and only host PT is used */ +/* to translate any guest addresses */ +static inline void +native_print_host_user_address_ptes(struct mm_struct *mm, e2k_addr_t address) +{ + /* this function is actual only for guest */ + /* native kernel can not be guest kernel */ +} + +/** + * calculate_e2k_dstack_parameters - get user data stack free area parameters + * @stacks: stack registers + * @sp: stack pointer will be returned here + * @stack_size: free area size will be returned here + * @top: stack area top will be returned here + */ +static inline void calculate_e2k_dstack_parameters( + const struct e2k_stacks *stacks, + u64 *sp, u64 *stack_size, u64 *top) +{ + e2k_usd_lo_t usd_lo = stacks->usd_lo; + e2k_usd_hi_t usd_hi = stacks->usd_hi; + unsigned long sbr = stacks->top; + + if (top) + *top = sbr; + + if (AS(usd_lo).p) { + e2k_pusd_lo_t pusd_lo; + e2k_pusd_hi_t pusd_hi; + unsigned long usbr; + + usbr = sbr & ~E2K_PROTECTED_STACK_BASE_MASK; + AW(pusd_lo) = AW(usd_lo); + AW(pusd_hi) = AW(usd_hi); + *sp = usbr + (AS(pusd_lo).base & ~E2K_ALIGN_PUSTACK_MASK); + *stack_size = AS(pusd_hi).size & ~E2K_ALIGN_PUSTACK_MASK; + } else { + *sp = AS(usd_lo).base; + *stack_size = AS(usd_hi).size; + } +} + +/* virtualization support */ +#include + +struct signal_stack_context { + struct pt_regs regs; + struct trap_pt_regs trap; + struct k_sigaction sigact; + e2k_aau_t aau_regs; +#ifdef CONFIG_GREGS_CONTEXT + struct local_gregs l_gregs; +#endif + u64 sbbp[SBBP_ENTRIES_NUM]; + struct pv_vcpu_ctxt vcpu_ctxt; +}; + +#define __signal_pt_regs_last(ti) \ +({ \ + struct pt_regs __user *__sig_regs; \ + if (ti->signal_stack.used) { \ + __sig_regs = &((struct signal_stack_context __user *) \ + (ti->signal_stack.base))->regs; \ + } else { \ + __sig_regs = NULL; \ + } \ + __sig_regs; \ +}) +#define signal_pt_regs_last() __signal_pt_regs_last(current_thread_info()) + +#define signal_pt_regs_first() \ +({ \ + struct pt_regs __user *__sig_regs; \ + if (current_thread_info()->signal_stack.used) { \ + __sig_regs = &((struct signal_stack_context __user *) \ + (current_thread_info()->signal_stack.base + \ + current_thread_info()->signal_stack.used - \ + sizeof(struct signal_stack_context)))->regs; \ + } else { \ + __sig_regs = NULL; \ + } \ + __sig_regs; \ +}) + +#define signal_pt_regs_for_each(__regs) \ + for (__regs = signal_pt_regs_first(); \ + __regs && (u64) __regs >= \ + current_thread_info()->signal_stack.base; \ + __regs = (struct pt_regs __user *) ((void *) __regs - \ + sizeof(struct signal_stack_context))) + +/** + * signal_pt_regs_to_trap - to be used inside of signal_pt_regs_for_each(); + * will return trap_pt_regs pointer corresponding + * to the passed pt_regs structure. + * @__u_regs: pt_regs pointer returned by signal_pt_regs_for_each() + * + * EXAMPLE: + * signal_pt_regs_for_each(u_regs) { + * struct trap_pt_regs __user *u_trap = signal_pt_regs_to_trap(u_regs); + * if (IS_ERR(u_trap)) + * ;// Caught -EFAULT from get_user() + * if (IS_NULL(u_trap)) + * ;// Not interrupt pt_regs + */ +#define signal_pt_regs_to_trap(__u_regs) \ +({ \ + struct pt_regs __user *__spr_u_regs = (__u_regs); \ + struct trap_pt_regs __user *u_trap; \ + \ + if (__get_user(u_trap, &__spr_u_regs->trap)) {\ + u_trap = ERR_PTR(-EFAULT); \ + } else if (u_trap) { \ + u_trap = (struct trap_pt_regs __user *) \ + ((void __user *) __spr_u_regs - \ + offsetof(struct signal_stack_context, regs) + \ + offsetof(struct signal_stack_context, trap)); \ + } \ + u_trap; \ +}) + +#define arch_ptrace_stop_needed(...) (true) +/* current->thread_info->pt_regs may be zero if ptrace_stop() + * was called from load_elf_binary() (it happens if gdb has + * set PTRACE_O_TRACEEXEC flag). */ +#define arch_ptrace_stop(...) \ +do { \ + struct pt_regs *__pt_regs = current_thread_info()->pt_regs; \ + if (__pt_regs) { \ + if (!test_ts_flag(TS_USER_EXECVE)) \ + user_hw_stacks_copy_full(&__pt_regs->stacks, \ + __pt_regs, NULL); \ + SAVE_AAU_REGS_FOR_PTRACE(__pt_regs, current_thread_info()); \ + if (!paravirt_enabled()) { \ + /* FIXME: it need implement for guest kernel */ \ + NATIVE_SAVE_BINCO_REGS_FOR_PTRACE(__pt_regs); \ + } \ + } \ +} while (0) + +static inline int syscall_from_kernel(const struct pt_regs *regs) +{ + return from_syscall(regs) && !user_mode(regs); +} + +static inline int syscall_from_user(const struct pt_regs *regs) +{ + return from_syscall(regs) && user_mode(regs); +} + +static inline int trap_from_kernel(const struct pt_regs *regs) +{ + return from_trap(regs) && !user_mode(regs); +} + +static inline int trap_from_user(const struct pt_regs *regs) +{ + return from_trap(regs) && user_mode(regs); +} + +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + AS(regs->crs.cr0_hi).ip = val >> 3; +} + +/* IMPORTANT: this only works after parse_TIR_registers() + * has set trap->TIR_lo. So this doesn't work for NMIs. */ +static inline unsigned long get_trap_ip(const struct pt_regs *regs) +{ + e2k_tir_lo_t tir_lo; + + tir_lo.TIR_lo_reg = regs->trap->TIR_lo; + return tir_lo.TIR_lo_ip; +} + +static inline unsigned long get_return_ip(const struct pt_regs *regs) +{ + return (unsigned long) (AS(regs->crs.cr0_hi).ip << 3); +} + +static inline unsigned long instruction_pointer(const struct pt_regs *regs) +{ + return get_return_ip(regs); +} + + +#ifdef CONFIG_DEBUG_PT_REGS +#define CHECK_PT_REGS_LOOP(regs) \ +({ \ + if ((regs) != NULL) { \ + if ((regs)->next == (regs)) { \ + pr_err("LOOP in regs list: regs 0x%px next 0x%px\n", \ + (regs), (regs)->next); \ + dump_stack(); \ + } \ + } \ +}) +#define CHECK_PT_REGS_CHAIN(regs, bottom, top) \ +({ \ + pt_regs_t *next_regs = (regs); \ + pt_regs_t *prev_regs = (pt_regs_t *)(bottom); \ + while ((next_regs) != NULL) { \ + if ((bottom) < TASK_SIZE) \ + break; \ + if ((e2k_addr_t)next_regs > (e2k_addr_t)((top) - sizeof(pt_regs_t))) { \ + pr_err("%s(): next regs %px above top 0x%llx\n", \ + __func__, next_regs, \ + (top) - sizeof(pt_regs_t)); \ + print_pt_regs(next_regs); \ + WARN_ON(true); \ + } else if ((e2k_addr_t)next_regs == (e2k_addr_t)prev_regs) { \ + pr_err("%s(): next regs %px is same as previous %px\n", \ + __func__, next_regs, prev_regs); \ + print_pt_regs(next_regs); \ + BUG_ON(true); \ + } else if ((e2k_addr_t)next_regs < (e2k_addr_t)prev_regs) { \ + pr_err("%s(): next regs %px below previous %px\n", \ + __func__, next_regs, prev_regs); \ + print_pt_regs(next_regs); \ + BUG_ON(true); \ + } \ + prev_regs = next_regs; \ + next_regs = next_regs->next; \ + } \ +}) + +/* + * The hook to find 'ct' command ( return to user) + * be interrapted with cloused interrupt / HARDWARE problem #59886/ + */ +#define CHECK_CT_INTERRUPTED(regs) \ +({ \ + struct pt_regs *__regs = regs; \ + do { \ + if (__call_from_user(__regs) || __trap_from_user(__regs)) \ + break; \ + __regs = __regs->next; \ + } while (__regs); \ + if (!__regs) { \ + printk(" signal delivery started on kernel instruction" \ + " top = 0x%lx TIR_lo=0x%lx " \ + " crs.cr0_hi.ip << 3 = 0x%lx\n", \ + (regs)->stacks.top, (regs)->TIR_lo, \ + instruction_pointer(regs)); \ + dump_stack(); \ + } \ +}) +#else /* ! CONFIG_DEBUG_PT_REGS */ +#define CHECK_PT_REGS_LOOP(regs) /* nothing */ +#define CHECK_PT_REGS_CHAIN(regs, bottom, top) +#define CHECK_CT_INTERRUPTED(regs) +#endif /* CONFIG_DEBUG_PT_REGS */ + +static inline struct pt_regs *find_user_regs(const struct pt_regs *regs) +{ + do { + CHECK_PT_REGS_LOOP(regs); + + if (user_mode(regs) && !regs->flags.kvm_hw_intercept) + break; + + regs = regs->next; + } while (regs); + + return (struct pt_regs *) regs; +} + +/* + * Finds the first pt_regs corresponding to the kernel entry + * (i.e. user mode pt_regs) if this is a user thread. + * + * Finds the first pt_regs structure if this is a kernel thread. + */ +static inline struct pt_regs *find_entry_regs(const struct pt_regs *regs) +{ + const struct pt_regs *prev_regs; + + do { + CHECK_PT_REGS_LOOP(regs); + + if (user_mode(regs) && !regs->flags.kvm_hw_intercept) + goto found; + + prev_regs = regs; + regs = regs->next; + } while (regs); + + /* Return the first pt_regs structure for kernel threads */ + regs = prev_regs; + +found: + return (struct pt_regs *) regs; +} + +static inline struct pt_regs *find_host_regs(const struct pt_regs *regs) +{ + while (regs) { + CHECK_PT_REGS_LOOP(regs); + + if (likely(!regs->flags.kvm_hw_intercept)) + break; + + regs = regs->next; + }; + + return (struct pt_regs *) regs; +} + +static inline struct pt_regs *find_trap_host_regs(const struct pt_regs *regs) +{ + while (regs) { + CHECK_PT_REGS_LOOP(regs); + + if (from_trap(regs) && !regs->flags.kvm_hw_intercept) + break; + + regs = regs->next; + }; + + return (struct pt_regs *) regs; +} + +#define count_trap_regs(regs) \ +({ \ + struct pt_regs *__regs = regs; \ + int traps = 0; \ + while (__regs) { \ + if (from_trap(regs)) \ + traps++; \ + __regs = __regs->next; \ + } \ + traps; \ +}) +#define current_is_in_trap() \ + (count_trap_regs(current_thread_info()->pt_regs) > 0) + +#define count_user_regs(regs) \ +({ \ + struct pt_regs *__regs = regs; \ + int regs_num = 0; \ + while (__regs) { \ + CHECK_PT_REGS_LOOP(__regs); \ + if (user_mode(regs)) \ + regs_num++; \ + __regs = __regs->next; \ + } \ + regs_num; \ +}) + +#if defined(CONFIG_SMP) +extern unsigned long profile_pc(struct pt_regs *regs); +#else +#define profile_pc(regs) instruction_pointer(regs) +#endif +extern void show_regs(struct pt_regs *); +extern int syscall_trace_entry(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); + +#define arch_has_single_step() (1) + +extern long common_ptrace(struct task_struct *child, long request, unsigned long addr, + unsigned long data, bool compat); + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_PTRACE_H */ diff --git a/arch/e2k/include/asm/pv_info.h b/arch/e2k/include/asm/pv_info.h new file mode 100644 index 000000000000..58f634cd71c6 --- /dev/null +++ b/arch/e2k/include/asm/pv_info.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2016 MCST, Salavat Gilyazov atic@mcst.ru + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_PV_INFO_H +#define __ASM_E2K_PV_INFO_H + +#include + +/* + * e2k kernel general info + */ + +/* + * Even 32-bit applications must have big TASK_SIZE since hardware + * stacks are placed behind the 4Gb boundary. + */ +/* Virtual space is splitted into two parts: user and kernel spaces. */ +/* Kernel virtual space takes high area and starts from the following base */ +#define NATIVE_KERNEL_VIRTUAL_SPACE_BASE 0x0000d00000000000 + +/* direct mapping of all physical memory starts now from kernel virtual */ +/* space beginning, but cannot map all possible 2**48 bytes */ +#define NATIVE_PAGE_OFFSET NATIVE_KERNEL_VIRTUAL_SPACE_BASE + +/* Users virtual spaces take low area from 0 right up to kernel base */ +#define NATIVE_TASK_SIZE NATIVE_KERNEL_VIRTUAL_SPACE_BASE + +#ifdef CONFIG_MMU_SEP_VIRT_SPACE +/* Users Separate Page Tables virtual base at the top of user space */ +/* 0x0000 cf80 0000 0000 */ +#define USER_VPTB_BASE_SIZE PGDIR_SIZE +#define USER_VPTB_BASE_ADDR (NATIVE_TASK_SIZE - USER_VPTB_BASE_SIZE) +#else /* ! CONFIG_MMU_SEP_VIRT_SPACE */ +#define USER_VPTB_BASE_SIZE 0 +#define USER_VPTB_BASE_ADDR KERNEL_VPTB_BASE_ADDR +#endif /* CONFIG_MMU_SEP_VIRT_SPACE */ + +/* virtualization support */ +#include + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#define IS_HOST_KERNEL_ADDRESS(addr) ((addr) >= NATIVE_TASK_SIZE) +#define IS_HOST_USER_ADDRESS(addr) ((addr) < NATIVE_TASK_SIZE) +#define IS_GUEST_KERNEL_ADDRESS(addr) false +#define IS_GUEST_USER_ADDRESS(addr) false +#define IS_GUEST_PHYS_ADDRESS(addr) false +#else /* CONFIG_VIRTUALIZATION */ +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel (not paravirtualized based on pv_ops) */ +#define HOST_TASK_SIZE (HOST_PAGE_OFFSET) +#define GUEST_TASK_SIZE (GUEST_PAGE_OFFSET) +#define HOST_TASK_TOP HOST_TASK_SIZE +#define GUEST_TASK_TOP GUEST_TASK_SIZE +#define BOOT_HOST_TASK_SIZE HOST_TASK_SIZE +#define BOOT_GUEST_TASK_SIZE GUEST_TASK_SIZE + +#define IS_HOST_KERNEL_ADDRESS(addr) ((addr) >= HOST_TASK_SIZE) +#define IS_HOST_USER_ADDRESS(addr) ((addr) < HOST_TASK_SIZE) +#define IS_GUEST_KERNEL_ADDRESS(addr) ((addr) >= GUEST_TASK_SIZE && \ + (addr) < HOST_TASK_SIZE) +#define IS_GUEST_USER_ADDRESS(addr) ((addr) < GUEST_TASK_SIZE) +#define IS_GUEST_PHYS_ADDRESS(addr) \ + ((e2k_addr_t)(addr) >= GUEST_PAGE_OFFSET && \ + (e2k_addr_t)(addr) < GUEST_PAGE_OFFSET + MAX_PM_SIZE) +#endif /* ! CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#define is_paravirt_kernel() true /* it is paravirtualized */ + /* host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* pure guest kernel (not paravirtualized) */ +#define is_paravirt_kernel() false +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or host kernel with virtualization support */ +#define TASK_SIZE NATIVE_TASK_SIZE + +#define paravirt_enabled() (IS_HV_GM() || false) +#define boot_paravirt_enabled() (BOOT_IS_HV_GM() || false) +#define is_paravirt_kernel() false + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without virtualization support */ +#define KERNEL_VIRTUAL_SPACE_BASE NATIVE_KERNEL_VIRTUAL_SPACE_BASE +#define PAGE_OFFSET NATIVE_PAGE_OFFSET +#define VMALLOC_START NATIVE_VMALLOC_START +#define VMALLOC_END NATIVE_VMALLOC_END +#define VMEMMAP_START NATIVE_VMEMMAP_START +#define VMEMMAP_END NATIVE_VMEMMAP_END + +#define BOOT_KERNEL_VIRTUAL_SPACE_BASE KERNEL_VIRTUAL_SPACE_BASE +#define BOOT_PAGE_OFFSET PAGE_OFFSET +#define BOOT_TASK_SIZE TASK_SIZE +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_E2K_PV_INFO_H */ diff --git a/arch/e2k/include/asm/qspinlock.h b/arch/e2k/include/asm/qspinlock.h new file mode 100644 index 000000000000..6ae0bda69cfd --- /dev/null +++ b/arch/e2k/include/asm/qspinlock.h @@ -0,0 +1,92 @@ +#ifndef _ASM_E2K_QSPINLOCK_H +#define _ASM_E2K_QSPINLOCK_H + +#include +#include + +/* + * Ideally, the spinning time should be at least a few times + * the typical cacheline load time from memory (~100 cycles on e2k), + * and atomic_cond_read_relaxed() iteration takes ~20 cycles. + */ +#define _Q_PENDING_LOOPS (1 << 5) + +#ifndef CONFIG_PARAVIRT_SPINLOCKS + +# define queued_spin_unlock queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + * + * A store_release() on the least-significant byte that also + * acts as a hardware memory barrier on device writes (in place + * of dropped mmiowb()). + */ +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + store_release(&lock->locked, 0); +} + +#else + +#include +#include + +#include +#include + +static __always_inline void pv_wait(u8 *ptr, u8 val) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi && + READ_ONCE(*ptr) == val) + HYPERVISOR_pv_wait(); +} + + +static __always_inline void pv_kick(int cpu) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) + HYPERVISOR_pv_kick(cpu); +} + +extern void __pv_init_lock_hash(void); + +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + * + * A store_release() on the least-significant byte that also + * acts as a hardware memory barrier on device writes (in place + * of dropped mmiowb()). + */ +static inline void native_queued_spin_unlock(struct qspinlock *lock) +{ + store_release(&lock->locked, 0); +} + +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) + __pv_queued_spin_lock_slowpath(lock, val); + else + native_queued_spin_lock_slowpath(lock, val); +} + +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +# define queued_spin_unlock queued_spin_unlock +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) + __pv_queued_spin_unlock(lock); + else + native_queued_spin_unlock(lock); +} + +#endif /* !CONFIG_PARAVIRT_SPINLOCKS */ + +#include + +#endif /* _ASM_E2K_QSPINLOCK_H */ diff --git a/arch/e2k/include/asm/qspinlock_paravirt.h b/arch/e2k/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..98a931cec474 --- /dev/null +++ b/arch/e2k/include/asm/qspinlock_paravirt.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_QSPINLOCK_PARAVIRT_H +#define __ASM_QSPINLOCK_PARAVIRT_H + +#endif diff --git a/arch/e2k/include/asm/regs_state.h b/arch/e2k/include/asm/regs_state.h new file mode 100644 index 000000000000..a8fd2004d30b --- /dev/null +++ b/arch/e2k/include/asm/regs_state.h @@ -0,0 +1,1381 @@ +#ifndef _E2K_REGS_STATE_H +#define _E2K_REGS_STATE_H + +/* + * Some macroses (start with PREFIX_) can use in three modes and can operate + * with virtualized or paravirtualized functions, resorces, other macroses. + * Such macroses should not be used directly instead of it need use: + * NATIVE_XXX macroses for native, host and hypervisor kernel mode + * in all functions which can be called only on native + * running mode; + * KVM_XXX macroses for guest virtualized kernel in all functions, + * which can be called only on guest running mode; + * PV_XXX macroses for paravirtualized kernel and such macroses use + * pv_ops structures to call paravirtualized actions. + * These macroses can be used in all functions, which can + * be called only on paravirtualized running mode and + * structures pv_ops_yyy exist. + * XXX (pure macros without prefix) macroses for host and guest virtualized + * kernel mode in all functions, which can be called + * both running mode host and guest. These macroses depend + * on configuration (compilation) mode and turn into one + * of above three macroses type + * If kernel configured and compiled as native with or + * without virtualization support/ then XXX turn into + * NATIVE_XXX. + * if kernel configured and compiled as pure guest, then + * XXX turn into KVM_XXX + * if kernel configured and compiled as paravirtualized and + * can be run both mode as host and as guest, then + * XXX turn into PV_XXX + * PV_TYPE argument in macroses is prefix and can be as above: + * NATIVE native kernel with or without virtualization support + * KVM guest kernel (can be run only as paravirtualized + * guest kernel) + * PV paravirtualized kernel (can be run as host and as guest + * paravirtualized kernels) + */ + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MLT_STORAGE +#include +#endif + +#endif /* __ASSEMBLY__ */ + +#include + +//#define CONTROL_USD_BASE_SIZE + +#ifdef CONTROL_USD_BASE_SIZE +#define CHECK_USD_BASE_SIZE(regs) \ +({ \ + u64 base = (regs)->stacks.usd_lo.USD_lo_base; \ + u64 size = (regs)->stacks.usd_hi.USD_hi_size; \ + if ((base - size) & ~PAGE_MASK) { \ + printk("Not page size aligned USD_base 0x%lx - " \ + "USD_size 0x%lx = 0x%lx\n", \ + base, size, base - size); \ + dump_stack(); \ + } \ +}) +#else +#define CHECK_USD_BASE_SIZE(regs) +#endif + +/* set/restore some kernel state registers to initial state */ + +static inline void native_set_kernel_CUTD(void) +{ + e2k_cutd_t k_cutd; + + k_cutd.CUTD_reg = 0; + k_cutd.CUTD_base = (e2k_addr_t)kernel_CUT; + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(k_cutd); +} + +#define NATIVE_CLEAR_DAM \ +({ \ + NATIVE_SET_MMUREG(dam_inv, 0); \ +}) + +/* + * Macros to save and restore registers. + */ + +#define COPY_U_HW_STACKS_FROM_TI(__stacks, ti) \ +do { \ + e2k_psp_lo_t __psp_lo = ti->tmp_user_stacks.psp_lo; \ + e2k_psp_hi_t __psp_hi = ti->tmp_user_stacks.psp_hi; \ + e2k_pshtp_t __pshtp = ti->tmp_user_stacks.pshtp; \ + e2k_pcsp_lo_t __pcsp_lo = ti->tmp_user_stacks.pcsp_lo; \ + e2k_pcsp_hi_t __pcsp_hi = ti->tmp_user_stacks.pcsp_hi; \ + e2k_pcshtp_t __pcshtp = ti->tmp_user_stacks.pcshtp; \ +\ + (__stacks)->psp_lo = __psp_lo; \ + (__stacks)->psp_hi = __psp_hi; \ + (__stacks)->psp_hi.PSP_hi_ind += GET_PSHTP_MEM_INDEX(__pshtp); \ + (__stacks)->pcsp_lo = __pcsp_lo; \ + (__stacks)->pcsp_hi = __pcsp_hi; \ + (__stacks)->pcsp_hi.PCSP_hi_ind += PCSHTP_SIGN_EXTEND(__pcshtp); \ + (__stacks)->pshtp = __pshtp; \ + (__stacks)->pcshtp = __pcshtp; \ +} while (0) + +#define COPY_U_HW_STACKS_TO_STACKS(__stacks_to, __stacks_from) \ +do { \ + e2k_stacks_t *stacks_to = (__stacks_to); \ + e2k_stacks_t *stacks_from = (__stacks_from); \ +\ + stacks_to->psp_lo = stacks_from->psp_lo; \ + stacks_to->psp_hi = stacks_from->psp_hi; \ + stacks_to->pcsp_lo = stacks_from->pcsp_lo; \ + stacks_to->pcsp_hi = stacks_from->pcsp_hi; \ + stacks_to->pshtp = stacks_from->pshtp; \ + stacks_to->pcshtp = stacks_from->pcshtp; \ +} while (0) + +/* usd regs are saved already */ +#define PREFIX_SAVE_STACK_REGS(PV_TYPE, regs, ti, from_ti, flushc) \ +do { \ + /* This flush reserves space for the next trap. */ \ + if (flushc) \ + PV_TYPE##_FLUSHC; \ + if (from_ti) { \ + COPY_U_HW_STACKS_FROM_TI(&(regs)->stacks, ti); \ + } else { \ + u64 pshtp; \ + u32 pcshtp; \ + u64 psp_hi; \ + u64 pcsp_hi; \ + pshtp = PV_TYPE##_NV_READ_PSHTP_REG_VALUE(); \ + pcshtp = PV_TYPE##_READ_PCSHTP_REG_SVALUE(); \ + (regs)->stacks.psp_lo.PSP_lo_half = \ + PV_TYPE##_NV_READ_PSP_LO_REG_VALUE(); \ + psp_hi = PV_TYPE##_NV_READ_PSP_HI_REG_VALUE(); \ + pcsp_hi = PV_TYPE##_NV_READ_PCSP_HI_REG_VALUE(); \ + (regs)->stacks.pcsp_lo.PCSP_lo_half = \ + PV_TYPE##_NV_READ_PCSP_LO_REG_VALUE();\ + if (!flushc) \ + pcsp_hi += pcshtp; \ + psp_hi += GET_PSHTP_MEM_INDEX((e2k_pshtp_t)pshtp); \ + AW((regs)->stacks.pshtp) = pshtp; \ + (regs)->stacks.pcshtp = pcshtp; \ + AW((regs)->stacks.psp_hi) = psp_hi; \ + AW((regs)->stacks.pcsp_hi) = pcsp_hi; \ + } \ + AW((regs)->crs.cr0_lo) = PV_TYPE##_NV_READ_CR0_LO_REG_VALUE(); \ + AW((regs)->crs.cr0_hi) = PV_TYPE##_NV_READ_CR0_HI_REG_VALUE(); \ + AW((regs)->crs.cr1_lo) = PV_TYPE##_NV_READ_CR1_LO_REG_VALUE(); \ + AW((regs)->crs.cr1_hi) = PV_TYPE##_NV_READ_CR1_HI_REG_VALUE(); \ + AW((regs)->wd) = PV_TYPE##_READ_WD_REG_VALUE(); \ + CHECK_USD_BASE_SIZE(regs); \ +} while (0) + +/* Save stack registers on kernel native/host/hypervisor mode */ +#define NATIVE_SAVE_STACK_REGS(regs, ti, from_ti, flushc) \ + PREFIX_SAVE_STACK_REGS(NATIVE, regs, ti, from_ti, flushc) + +#define STORE_USER_REGS_TO_THREAD_INFO(thread_info, \ + stk_bottom, stk_top, stk_sz) \ +({ \ + (thread_info)->u_stack.bottom = stk_bottom; \ + (thread_info)->u_stack.top = stk_top; \ + (thread_info)->u_stack.size = stk_sz; \ +}) + +/* + * Interrupts should be disabled by caller to read all hardware + * stacks registers in coordinated state + * Hardware stacks do not copy or flush to memory + */ +#define ATOMIC_SAVE_CURRENT_STACK_REGS(stacks, crs) \ +({ \ + ATOMIC_SAVE_ALL_STACKS_REGS(stacks, &(crs)->cr1_hi); \ + \ + (stacks)->top = NATIVE_NV_READ_SBR_REG_VALUE(); \ + (crs)->cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); \ + (crs)->cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); \ + (crs)->cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); \ + \ + /* \ + * Do not copy copy_user_stacks()'s kernel data stack frame \ + */ \ + (stacks)->usd_lo.USD_lo_base += \ + (((crs)->cr1_hi.CR1_hi_ussz << 4) - \ + (stacks)->usd_hi.USD_hi_size); \ + (stacks)->usd_hi.USD_hi_size = \ + ((crs)->cr1_hi.CR1_hi_ussz << 4); \ +}) + +#define NATIVE_DO_SAVE_MONITOR_COUNTERS(sw_regs) \ +do { \ + sw_regs->ddmar0 = NATIVE_READ_DDMAR0_REG_VALUE(); \ + sw_regs->ddmar1 = NATIVE_READ_DDMAR1_REG_VALUE(); \ + sw_regs->dimar0 = NATIVE_READ_DIMAR0_REG_VALUE(); \ + sw_regs->dimar1 = NATIVE_READ_DIMAR1_REG_VALUE(); \ +} while (0) +#define NATIVE_SAVE_MONITOR_COUNTERS(task) \ +do { \ + struct sw_regs *sw_regs = &((task)->thread.sw_regs); \ + NATIVE_DO_SAVE_MONITOR_COUNTERS(sw_regs); \ +} while (0) + +static inline void native_save_user_only_regs(struct sw_regs *sw_regs) +{ + if (machine.save_dimtp) + machine.save_dimtp(&sw_regs->dimtp); + + /* Skip breakpoints-related fields handled by + * ptrace_hbp_triggered() and arch-independent + * hardware breakpoints support */ + AW(sw_regs->dibsr) &= E2K_DIBSR_MASK_ALL_BP; + AW(sw_regs->dibsr) |= NATIVE_READ_DIBSR_REG_VALUE() & + ~E2K_DIBSR_MASK_ALL_BP; + AW(sw_regs->ddbsr) &= E2K_DDBSR_MASK_ALL_BP; + AW(sw_regs->ddbsr) |= NATIVE_READ_DDBSR_REG_VALUE() & + ~E2K_DDBSR_MASK_ALL_BP; + + sw_regs->ddmcr = NATIVE_READ_DDMCR_REG(); + sw_regs->dimcr = NATIVE_READ_DIMCR_REG(); + if (!MONITORING_IS_ACTIVE) + NATIVE_DO_SAVE_MONITOR_COUNTERS(sw_regs); +} + +#if (E2K_MAXGR_d == 32) + +/* Save/Restore global registers */ +#define SAVE_GREGS_PAIR(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg, iset) \ + NATIVE_SAVE_GREG(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg, \ + iset) +#define SAVE_GREGS_PAIR_V2(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_SAVE_GREG_V2(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) +#define SAVE_GREGS_PAIR_V5(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_SAVE_GREG_V5(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) + +/* + * Registers gN-g(N+3) are reserved by ABI. Now N=16. + * These registers hold pointers to current, so we can skip saving and + * restoring them on context switch and upon entering/exiting signal handlers + * (they are stored in thread_info) + */ +#define DO_SAVE_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_SAVE) \ +do { \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 0) | (1 << 1))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 0, 1, 0, 1, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 2) | (1 << 3))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 2, 3, 2, 3, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 4) | (1 << 5))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 4, 5, 4, 5, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 6) | (1 << 7))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 6, 7, 6, 7, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 8) | (1 << 9))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 8, 9, 8, 9, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 10) | (1 << 11))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 10, 11, 10, 11, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 12) | (1 << 13))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 12, 13, 12, 13, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 14) | (1 << 15))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 14, 15, 14, 15, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 16) | (1 << 17))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 16, 17, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 18) | (1 << 19))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 18, 19, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 20) | (1 << 21))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 20, 21, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 22) | (1 << 23))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 22, 23, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 24) | (1 << 25))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 24, 25, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 26) | (1 << 27))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 26, 27, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 28) | (1 << 29))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 28, 29, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 30) | (1 << 31))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 30, 31, 30, 31, iset); \ + } \ +} while (0) +#define DO_SAVE_LOCAL_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_SAVE) \ +do { \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 16) | (1 << 17))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 0, 1, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 18) | (1 << 19))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 2, 3, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 20) | (1 << 21))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 4, 5, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 22) | (1 << 23))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 6, 7, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 24) | (1 << 25))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 8, 9, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 26) | (1 << 27))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 10, 11, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 28) | (1 << 29))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 12, 13, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 30) | (1 << 31))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 14, 15, 30, 31, iset); \ + } \ +} while (0) + +#define SAVE_ALL_GREGS(gregs, iset) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, 0UL) +#define SAVE_GREGS_EXCEPT_NO(gregs, iset, GREGS_PAIR_NO_NOT_SAVE) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, \ + (1 << GREGS_PAIR_NO_NOT_SAVE)) +#define SAVE_GREGS_EXCEPT_KERNEL(gregs, iset) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, KERNEL_GREGS_MASK) +#define SAVE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) + +# define SAVE_GREGS(gregs, save_global, iset) \ +do { \ + if (save_global) { \ + SAVE_GREGS_EXCEPT_KERNEL(gregs, iset); \ + } else { \ + SAVE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset); \ + } \ +} while (false) + +/* Same as SAVE_GREGS but saves %g16-%g31 registers only */ +# define SAVE_GREGS_SIGNAL(gregs, iset) \ +do { \ + DO_SAVE_LOCAL_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)); \ +} while (false) + +#define RESTORE_GREGS_PAIR(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg, iset) \ + NATIVE_RESTORE_GREG(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg, \ + iset) +#define RESTORE_GREGS_PAIR_V2(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_RESTORE_GREG_V2(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) +#define RESTORE_GREGS_PAIR_V5(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_RESTORE_GREG_V5(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) + +#define DO_RESTORE_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_RESTORE) \ +do { \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 0) | (1 << 1))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 0, 1, 0, 1, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 2) | (1 << 3))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 2, 3, 2, 3, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 4) | (1 << 5))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 4, 5, 4, 5, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 6) | (1 << 7))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 6, 7, 6, 7, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 8) | (1 << 9))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 8, 9, 8, 9, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 10) | (1 << 11))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 10, 11, 10, 11, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 12) | (1 << 13))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 12, 13, 12, 13, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 14) | (1 << 15))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 14, 15, 14, 15, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 16) | (1 << 17))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 16, 17, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 18) | (1 << 19))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 18, 19, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 20) | (1 << 21))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 20, 21, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 22) | (1 << 23))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 22, 23, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 24) | (1 << 25))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 24, 25, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 26) | (1 << 27))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 26, 27, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 28) | (1 << 29))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 28, 29, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 30) | (1 << 31))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 30, 31, 30, 31, iset); \ + } \ +} while (0) + +#define DO_RESTORE_LOCAL_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_RESTORE) \ +do { \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 16) | (1 << 17))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 0, 1, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 18) | (1 << 19))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 2, 3, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 20) | (1 << 21))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 4, 5, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 22) | (1 << 23))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 6, 7, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 24) | (1 << 25))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 8, 9, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 26) | (1 << 27))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 10, 11, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 28) | (1 << 29))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 12, 13, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 30) | (1 << 31))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 14, 15, 30, 31, iset); \ + } \ +} while (0) + +#define RESTORE_ALL_GREGS(gregs, iset) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, 0UL) +#define RESTORE_GREGS_EXCEPT_NO(gregs, iset, GREGS_PAIR_NO_NOT_RESTORE) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, \ + (1 << GREGS_PAIR_NO_NOT_RESTORE)) +#define RESTORE_GREGS_EXCEPT_KERNEL(gregs, iset) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, KERNEL_GREGS_MASK) +#define RESTORE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) + +# define RESTORE_GREGS(gregs, restore_global, iset) \ +do { \ + if (restore_global) { \ + RESTORE_GREGS_EXCEPT_KERNEL(gregs, iset); \ + } else { \ + RESTORE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset); \ + } \ +} while (false) + +/* Same as RESTORE_GREGS but restores %g16-%g31 registers only */ +# define RESTORE_GREGS_SIGNAL(gregs, iset) \ +do { \ + DO_RESTORE_LOCAL_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)); \ +} while (false) + +#ifdef CONFIG_GREGS_CONTEXT +#define NATIVE_INIT_G_REGS() \ +({ \ + init_BGR_reg(); \ + NATIVE_GREGS_SET_EMPTY(); \ + clear_memory_8(¤t_thread_info()->k_gregs, \ + sizeof(current_thread_info()->k_gregs), ETAGEWD); \ +}) +#else /* ! CONFIG_GREGS_CONTEXT */ +#define NATIVE_INIT_G_REGS() +#endif /* CONFIG_GREGS_CONTEXT */ + +#define NATIVE_BOOT_INIT_G_REGS() \ +({ \ + native_boot_init_BGR_reg(); \ + E2K_ALL_GREGS_SET_EMPTY(); \ +}) + +/* ptrace related guys: we do not use them on switching. */ +# define NATIVE_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ +({ \ + void * g_u = g_user; \ + void * gt_u = gtag_user; \ + \ + E2K_GET_GREGS_FROM_THREAD(g_u, gt_u, gbase); \ +}) + +# define NATIVE_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ +({ \ + void * g_u = g_user; \ + void * gt_u = gtag_user; \ + \ + E2K_SET_GREGS_TO_THREAD(gbase, g_u, gt_u); \ +}) + +#if defined(CONFIG_PARAVIRT_GUEST) +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +#include +#else /* !CONFIG_PARAVIRT_GUEST && !CONFIG_KVM_GUEST_KERNEL */ + +#define GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ + NATIVE_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) + +#define SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ + NATIVE_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) + +#endif /* !CONFIG_PARAVIRT_GUEST && !CONFIG_KVM_GUEST_KERNEL */ + +#else /* E2K_MAXGR_d != 32 */ + +# error "Unsupported E2K_MAXGR_d value" + +#endif /* E2K_MAXGR_d */ + +static inline void +native_save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + void (*save_local_gregs)(struct local_gregs *, bool is_signal); + + save_local_gregs = machine.save_local_gregs; + + copy_k_gregs_to_l_gregs(l_gregs, ¤t_thread_info()->k_gregs); + save_local_gregs(l_gregs, is_signal); +} +static inline void +native_restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + void (*restore_local_gregs)(const struct local_gregs *, bool is_signal); + + restore_local_gregs = machine.restore_local_gregs; + + get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, l_gregs); + restore_local_gregs(l_gregs, is_signal); +} + +static inline void +native_get_all_user_glob_regs(global_regs_t *gregs) +{ + machine.save_gregs(gregs); + copy_k_gregs_to_gregs(gregs, ¤t_thread_info()->k_gregs); +} + +#define DO_SAVE_UPSR_REG_VALUE(upsr_reg, upsr_reg_value) \ + { AS_WORD(upsr_reg) = (upsr_reg_value); } + +#define NATIVE_DO_SAVE_UPSR_REG(upsr_reg) \ + DO_SAVE_UPSR_REG_VALUE((upsr_reg), \ + NATIVE_NV_READ_UPSR_REG_VALUE()) +#define DO_SAVE_UPSR_REG(upsr_reg) \ + DO_SAVE_UPSR_REG_VALUE((upsr_reg), READ_UPSR_REG_VALUE()) + +#define NATIVE_SAVE_UPSR_REG(regs) NATIVE_DO_SAVE_UPSR_REG((regs)->upsr) +#define SAVE_UPSR_REG(regs) DO_SAVE_UPSR_REG((regs)->upsr) + +#define DO_RESTORE_UPSR_REG(upsr_reg) \ + { WRITE_UPSR_REG(upsr_reg); } +#define NATIVE_DO_RESTORE_UPSR_REG(upsr_reg) \ + { NATIVE_WRITE_UPSR_REG(upsr_reg); } + +#define NATIVE_RESTORE_UPSR_REG(regs) NATIVE_DO_RESTORE_UPSR_REG((regs)->upsr) +#define RESTORE_UPSR_REG(regs) DO_RESTORE_UPSR_REG((regs)->upsr) + +#define NATIVE_SAVE_RPR_REGS(regs) \ +({ \ + regs->rpr_lo = NATIVE_READ_RPR_LO_REG_VALUE(); \ + regs->rpr_hi = NATIVE_READ_RPR_HI_REG_VALUE(); \ +}) + +#define NATIVE_SAVE_INTEL_REGS(regs) \ +do { \ + regs->cs_lo = NATIVE_READ_CS_LO_REG_VALUE(); \ + regs->cs_hi = NATIVE_READ_CS_HI_REG_VALUE(); \ + regs->ds_lo = NATIVE_READ_DS_LO_REG_VALUE(); \ + regs->ds_hi = NATIVE_READ_DS_HI_REG_VALUE(); \ + regs->es_lo = NATIVE_READ_ES_LO_REG_VALUE(); \ + regs->es_hi = NATIVE_READ_ES_HI_REG_VALUE(); \ + regs->fs_lo = NATIVE_READ_FS_LO_REG_VALUE(); \ + regs->fs_hi = NATIVE_READ_FS_HI_REG_VALUE(); \ + regs->gs_lo = NATIVE_READ_GS_LO_REG_VALUE(); \ + regs->gs_hi = NATIVE_READ_GS_HI_REG_VALUE(); \ + regs->ss_lo = NATIVE_READ_SS_LO_REG_VALUE(); \ + regs->ss_hi = NATIVE_READ_SS_HI_REG_VALUE(); \ + NATIVE_SAVE_RPR_REGS(regs); \ + if (IS_ENABLED(CONFIG_TC_STORAGE)) { \ + NATIVE_FLUSH_ALL_TC; \ + regs->tcd = NATIVE_GET_TCD(); \ + } \ +} while (0) + +#define NATIVE_RESTORE_INTEL_REGS(regs) \ +do { \ + u64 cs_lo = regs->cs_lo; \ + u64 cs_hi = regs->cs_hi; \ + u64 ds_lo = regs->ds_lo; \ + u64 ds_hi = regs->ds_hi; \ + u64 es_lo = regs->es_lo; \ + u64 es_hi = regs->es_hi; \ + u64 fs_lo = regs->fs_lo; \ + u64 fs_hi = regs->fs_hi; \ + u64 gs_lo = regs->gs_lo; \ + u64 gs_hi = regs->gs_hi; \ + u64 ss_lo = regs->ss_lo; \ + u64 ss_hi = regs->ss_hi; \ + u64 rpr_lo = regs->rpr_lo; \ + u64 rpr_hi = regs->rpr_hi; \ + u64 tcd = regs->tcd; \ + NATIVE_CL_WRITE_CS_LO_REG_VALUE(cs_lo); \ + NATIVE_CL_WRITE_CS_HI_REG_VALUE(cs_hi); \ + NATIVE_CL_WRITE_DS_LO_REG_VALUE(ds_lo); \ + NATIVE_CL_WRITE_DS_HI_REG_VALUE(ds_hi); \ + NATIVE_CL_WRITE_ES_LO_REG_VALUE(es_lo); \ + NATIVE_CL_WRITE_ES_HI_REG_VALUE(es_hi); \ + NATIVE_CL_WRITE_FS_LO_REG_VALUE(fs_lo); \ + NATIVE_CL_WRITE_FS_HI_REG_VALUE(fs_hi); \ + NATIVE_CL_WRITE_GS_LO_REG_VALUE(gs_lo); \ + NATIVE_CL_WRITE_GS_HI_REG_VALUE(gs_hi); \ + NATIVE_CL_WRITE_SS_LO_REG_VALUE(ss_lo); \ + NATIVE_CL_WRITE_SS_HI_REG_VALUE(ss_hi); \ + NATIVE_WRITE_RPR_LO_REG_VALUE(rpr_lo); \ + NATIVE_WRITE_RPR_HI_REG_VALUE(rpr_hi); \ + if (IS_ENABLED(CONFIG_TC_STORAGE)) \ + NATIVE_SET_TCD(tcd); \ +} while (0) + +/* + * Procedure stack (PS) and procedure chain stack (PCS) hardware filling and + * spilling is asynchronous process. Page fault traps can overlay to this + * asynchronous process and some filling and spilling requests can be not + * completed. These requests were dropped by MMU to trap cellar. + * We should save not completed filling data before starting of spilling + * current procedure chain stack to preserve from filling data loss + */ + +#define NATIVE_SAVE_TRAP_CELLAR(regs, trap) \ +({ \ + kernel_trap_cellar_t *kernel_tcellar = \ + (kernel_trap_cellar_t *)KERNEL_TRAP_CELLAR; \ + kernel_trap_cellar_ext_t *kernel_tcellar_ext = \ + (kernel_trap_cellar_ext_t *) \ + ((void *) KERNEL_TRAP_CELLAR + TC_EXT_OFFSET); \ + trap_cellar_t *tcellar = (trap)->tcellar; \ + int cnt, cs_req_num = 0, cs_a4 = 0, off, max_cnt; \ + u64 kstack_pf_addr = 0, stack = (u64) current->stack; \ + bool end_flag = false, is_qp; \ + \ + max_cnt = NATIVE_READ_MMU_TRAP_COUNT(); \ + if (max_cnt < 3) { \ + max_cnt = 3 * HW_TC_SIZE; \ + end_flag = true; \ + } \ + (trap)->curr_cnt = -1; \ + (trap)->ignore_user_tc = 0; \ + (trap)->tc_called = 0; \ + (trap)->is_intc = false; \ + (trap)->from_sigreturn = 0; \ + CLEAR_CLW_REQUEST_COUNT(regs); \ + BUG_ON(max_cnt > 3 * HW_TC_SIZE); \ + for (cnt = 0; 3 * cnt < max_cnt; cnt++) { \ + tc_opcode_t opcode; \ + tc_cond_t condition; \ + \ + if (end_flag) \ + if (AW(kernel_tcellar[cnt].condition) == -1) \ + break; \ + \ + tcellar[cnt].address = kernel_tcellar[cnt].address; \ + condition = kernel_tcellar[cnt].condition; \ + tcellar[cnt].condition = condition; \ + AW(opcode) = AS(condition).opcode; \ + is_qp = (AS(opcode).fmt == LDST_QP_FMT || \ + cpu_has(CPU_FEAT_QPREG) && AS(condition).fmtc && \ + AS(opcode).fmt == LDST_QWORD_FMT); \ + if (AS(condition).clw) { \ + if (GET_CLW_REQUEST_COUNT(regs) == 0) { \ + SET_CLW_FIRST_REQUEST(regs, cnt); \ + } \ + INC_CLW_REQUEST_COUNT(regs); \ + } \ + if (is_qp) \ + tcellar[cnt].mask = kernel_tcellar_ext[cnt].mask; \ + if (AS(condition).store) { \ + NATIVE_MOVE_TAGGED_DWORD( \ + &(kernel_tcellar[cnt].data), \ + &(tcellar[cnt].data)); \ + if (is_qp) { \ + NATIVE_MOVE_TAGGED_DWORD( \ + &(kernel_tcellar_ext[cnt].data), \ + &(tcellar[cnt].data_ext)); \ + } \ + } else if (AS(condition).s_f && AS(condition).sru) { \ + if (cs_req_num == 0) \ + cs_a4 = tcellar[cnt].address & (1 << 4); \ + cs_req_num++; \ + } \ + if (unlikely((AS(condition).s_f || IS_SPILL(tcellar[cnt])) && \ + tcellar[cnt].address >= stack && \ + tcellar[cnt].address < stack + KERNEL_STACKS_SIZE)) \ + kstack_pf_addr = tcellar[cnt].address; \ + tcellar[cnt].flags = 0; \ + } \ + (trap)->tc_count = cnt * 3; \ + if (unlikely(GET_CLW_REQUEST_COUNT(regs) && \ + cpu_has(CPU_HWBUG_CLW_STALE_L1_ENTRY))) \ + SET_CLW_CPU(regs, raw_smp_processor_id()); \ + if (cs_req_num > 0) { \ + /* recover chain stack pointers to repeat FILL */ \ + e2k_pcshtp_t pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); \ + s64 pcshtp_ext = PCSHTP_SIGN_EXTEND(pcshtp); \ + e2k_pcsp_hi_t PCSP_hi = NATIVE_NV_READ_PCSP_HI_REG(); \ + if (!cs_a4) { \ + off = cs_req_num * 32; \ + } else { \ + off = (cs_req_num - 1) * 32 + 16; \ + } \ + pcshtp_ext -= off; \ + PCSP_hi.PCSP_hi_ind += off; \ + NATIVE_WRITE_PCSHTP_REG_SVALUE(pcshtp_ext); \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(PCSP_hi); \ + } \ + kstack_pf_addr; \ +}) + +#ifdef CONFIG_CLW_ENABLE +/* + * If requests from CLW unit (user stack window clearing) were not + * completed, and they were droped to the kernel trap cellar, + * then we should save CLW unit state before switch to other stack + * and restore CLW state after return to the user stack + */ +# define CLEAR_CLW_REQUEST_COUNT(regs) ((regs)->clw_count = 0) +# define INC_CLW_REQUEST_COUNT(regs) ((regs)->clw_count++) +# define GET_CLW_REQUEST_COUNT(regs) ((regs)->clw_count) +# define SET_CLW_FIRST_REQUEST(regs, cnt) ((regs)->clw_first = (cnt)) +# define GET_CLW_FIRST_REQUEST(regs) ((regs)->clw_first) +# define SET_CLW_CPU(regs, cpu) ((regs)->clw_cpu = (cpu)) +#define ENABLE_US_CLW() \ +do { \ + if (!cpu_has(CPU_HWBUG_CLW)) \ + write_MMU_US_CL_D(0); \ +} while (0) +# define DISABLE_US_CLW() write_MMU_US_CL_D(1) +#else /* !CONFIG_CLW_ENABLE */ +# define CLEAR_CLW_REQUEST_COUNT(regs) +# define INC_CLW_REQUEST_COUNT(regs) +# define GET_CLW_REQUEST_COUNT(regs) (0) +# define SET_CLW_FIRST_REQUEST(regs, cnt) +# define GET_CLW_FIRST_REQUEST(regs) (0) +# define SET_CLW_CPU(regs, cpu) +# define ENABLE_US_CLW() +# define DISABLE_US_CLW() +#endif /* CONFIG_CLW_ENABLE */ + +#define NATIVE_RESTORE_COMMON_REGS(regs) \ +do { \ + u64 ctpr1 = AW(regs->ctpr1), ctpr2 = AW(regs->ctpr2), \ + ctpr3 = AW(regs->ctpr3), ctpr1_hi = AW(regs->ctpr1_hi), \ + ctpr2_hi = AW(regs->ctpr2_hi), ctpr3_hi = AW(regs->ctpr3_hi), \ + lsr = regs->lsr, lsr1 = regs->lsr1, \ + ilcr = regs->ilcr, ilcr1 = regs->ilcr1; \ + \ + NATIVE_RESTORE_COMMON_REGS_VALUES(ctpr1, ctpr2, ctpr3, ctpr1_hi, \ + ctpr2_hi, ctpr3_hi, lsr, lsr1, ilcr, ilcr1); \ +} while (0) + +#define PREFIX_RESTORE_USER_CRs(PV_TYPE, regs) \ +({ \ + u64 cr0_hi = AS_WORD((regs)->crs.cr0_hi); \ + u64 cr0_lo = AS_WORD((regs)->crs.cr0_lo); \ + u64 cr1_hi = AS_WORD((regs)->crs.cr1_hi); \ + u64 cr1_lo = AS_WORD((regs)->crs.cr1_lo); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(cr0_hi); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(cr0_lo); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(cr1_hi); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(cr1_lo); \ +}) + +#define PREFIX_RESTORE_USER_STACK_REGS(PV_TYPE, regs, in_syscall) \ +({ \ + thread_info_t *ti = current_thread_info(); \ + e2k_stacks_t *stacks; \ + u64 usd_lo; \ + u64 usd_hi; \ + u64 top; \ + \ + stacks = (in_syscall) ? \ + syscall_guest_get_restore_stacks(ti, regs) \ + : \ + trap_guest_get_restore_stacks(ti, regs); \ + usd_lo = AS_WORD(stacks->usd_lo); \ + usd_hi = AS_WORD(stacks->usd_hi); \ + top = stacks->top; \ + PREFIX_RESTORE_USER_CRs(PV_TYPE, regs); \ + CHECK_USD_BASE_SIZE(regs); \ + PV_TYPE##_NV_WRITE_USBR_USD_REG_VALUE(top, usd_hi, usd_lo); \ + RESTORE_USER_CUT_REGS(ti, regs, in_syscall); \ +}) +#define NATIVE_RESTORE_USER_CRs(regs) \ + PREFIX_RESTORE_USER_CRs(NATIVE, regs) +#define NATIVE_RESTORE_USER_STACK_REGS(regs, insyscall) \ + PREFIX_RESTORE_USER_STACK_REGS(NATIVE, regs, insyscall) + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravrtualized) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without any virtualization */ +/* or native host kernel with virtualization support */ + +/* Save stack registers on kernel native/host/hypervisor mode */ +#define SAVE_STACK_REGS(regs, ti, user, trap) \ + NATIVE_SAVE_STACK_REGS(regs, ti, user, trap) + +#define RESTORE_USER_STACK_REGS(regs, in_syscall) \ + NATIVE_RESTORE_USER_STACK_REGS(regs, in_syscall) +#define RESTORE_USER_TRAP_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, false) +#define RESTORE_USER_SYSCALL_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, true) +#define RESTORE_COMMON_REGS(regs) \ + NATIVE_RESTORE_COMMON_REGS(regs) + +#define INIT_G_REGS() NATIVE_INIT_G_REGS() +#define BOOT_INIT_G_REGS() NATIVE_BOOT_INIT_G_REGS() + +static inline void +save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + native_save_local_glob_regs(l_gregs, is_signal); +} +static inline void +restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + native_restore_local_glob_regs(l_gregs, is_signal); +} + +static inline void +get_all_user_glob_regs(global_regs_t *gregs) +{ + native_get_all_user_glob_regs(gregs); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#if CONFIG_CPU_ISET >= 6 +static inline void restore_dimtp(const e2k_dimtp_t *dimtp) +{ + NATIVE_SET_DSREGS_CLOSED_NOEXC(dimtp.lo, dimtp.hi, + dimtp->lo, dimtp->hi, 4); +} +static inline void clear_dimtp(void) +{ + NATIVE_SET_DSREGS_CLOSED_NOEXC(dimtp.lo, dimtp.hi, 0ull, 0ull, 4); +} +#elif CONFIG_CPU_ISET == 0 +static inline void restore_dimtp(const e2k_dimtp_t *dimtp) +{ + if (machine.restore_dimtp) + machine.restore_dimtp(dimtp); +} +static inline void clear_dimtp(void) +{ + if (machine.restore_dimtp) { + e2k_dimtp_t dimtp = {{ 0 }}; + machine.restore_dimtp(&dimtp); + } +} +#else +static inline void restore_dimtp(const e2k_dimtp_t *dimtp) { } +static inline void clear_dimtp(void) { } +#endif + +#define NATIVE_RESTORE_MONITOR_COUNTERS(sw_regs) \ +do { \ + e2k_ddmcr_t ddmcr = sw_regs->ddmcr; \ + u64 ddmar0 = sw_regs->ddmar0; \ + u64 ddmar1 = sw_regs->ddmar1; \ + e2k_dimcr_t dimcr = sw_regs->dimcr; \ + u64 dimar0 = sw_regs->dimar0; \ + u64 dimar1 = sw_regs->dimar1; \ + \ + restore_dimtp(&sw_regs->dimtp); \ + NATIVE_WRITE_DDMAR0_REG_VALUE(ddmar0); \ + NATIVE_WRITE_DDMAR1_REG_VALUE(ddmar1); \ + NATIVE_WRITE_DIMAR0_REG_VALUE(dimar0); \ + NATIVE_WRITE_DIMAR1_REG_VALUE(dimar1); \ + NATIVE_WRITE_DDMCR_REG(ddmcr); \ + NATIVE_WRITE_DIMCR_REG(dimcr); \ +} while (0) + +/* + * When we use monitor registers, we count monitor events for the whole system, + * so DIMCR, DDMCR, DIMAR0, DIMAR1, DDMAR0, DDMAR1, DIBSR, DDBSR registers are + * not dependent on process and should not be restored while process switching. + */ +static inline void native_restore_user_only_regs(struct sw_regs *sw_regs) +{ + e2k_dibsr_t dibsr = sw_regs->dibsr; + e2k_ddbsr_t ddbsr = sw_regs->ddbsr; + + /* Skip breakpoints-related fields handled by + * ptrace_hbp_triggered() and arch-independent + * hardware breakpoints support */ + AW(dibsr) &= ~E2K_DIBSR_MASK_ALL_BP; + AW(dibsr) |= NATIVE_READ_DIBSR_REG_VALUE() & E2K_DIBSR_MASK_ALL_BP; + AW(ddbsr) &= ~E2K_DDBSR_MASK_ALL_BP; + AW(ddbsr) |= NATIVE_READ_DDBSR_REG_VALUE() & E2K_DDBSR_MASK_ALL_BP; + + if (!MONITORING_IS_ACTIVE) { + NATIVE_WRITE_DIBSR_REG(dibsr); + NATIVE_WRITE_DDBSR_REG(ddbsr); + NATIVE_RESTORE_MONITOR_COUNTERS(sw_regs); + } +} + +static inline void native_clear_user_only_regs(void) +{ + u8 monitors_used = perf_read_monitors_used(); + u8 bps_used = perf_read_bps_used(); + if (!bps_used) { + NATIVE_WRITE_DIBCR_REG_VALUE(0); + NATIVE_WRITE_DDBCR_REG_VALUE(0); + } + if (!MONITORING_IS_ACTIVE) { + if (!monitors_used) { + NATIVE_WRITE_DIMCR_REG_VALUE(0); + NATIVE_WRITE_DIBSR_REG_VALUE(0); + NATIVE_WRITE_DDMCR_REG_VALUE(0); + NATIVE_WRITE_DDBSR_REG_VALUE(0); + } else { + e2k_dimcr_t dimcr = NATIVE_READ_DIMCR_REG(); + e2k_ddmcr_t ddmcr = NATIVE_READ_DDMCR_REG(); + e2k_dibsr_t dibsr = NATIVE_READ_DIBSR_REG(); + e2k_ddbsr_t ddbsr = NATIVE_READ_DDBSR_REG(); + if (!(monitors_used & DIM0)) { + dimcr.half_word[0] = 0; + dibsr.m0 = 0; + } + if (!(monitors_used & DIM1)) { + dimcr.half_word[1] = 0; + dibsr.m1 = 0; + } + if (!(monitors_used & DDM0)) { + ddmcr.half_word[0] = 0; + ddbsr.m0 = 0; + } + if (!(monitors_used & DDM1)) { + ddmcr.half_word[1] = 0; + ddbsr.m1 = 0; + } + NATIVE_WRITE_DIMCR_REG(dimcr); + NATIVE_WRITE_DDMCR_REG(ddmcr); + NATIVE_WRITE_DIBSR_REG(dibsr); + NATIVE_WRITE_DDBSR_REG(ddbsr); + } + } + clear_dimtp(); +} + + +/* + * Set some special registers in accordance with + * E2K API specifications. + */ +#define GET_FPU_DEFAULTS(fpsr, fpcr, pfpfr) \ +({ \ + AW(fpsr) = 0; \ + AW(pfpfr) = 0; \ + AW(fpcr) = 32; \ + \ + /* masks */ \ + AS_STRUCT(pfpfr).im = 1; \ + AS_STRUCT(pfpfr).dm = 1; \ + AS_STRUCT(pfpfr).zm = 1; \ + AS_STRUCT(pfpfr).om = 1; \ + AS_STRUCT(pfpfr).um = 1; \ + AS_STRUCT(pfpfr).pm = 1; \ + \ + /* flags ! NEEDSWORK ! */ \ + AS_STRUCT(pfpfr).pe = 1; \ + AS_STRUCT(pfpfr).ue = 1; \ + AS_STRUCT(pfpfr).oe = 1; \ + AS_STRUCT(pfpfr).ze = 1; \ + AS_STRUCT(pfpfr).de = 1; \ + AS_STRUCT(pfpfr).ie = 1; \ + /* rounding */ \ + AS_STRUCT(pfpfr).rc = 0; \ + \ + AS_STRUCT(pfpfr).fz = 0; \ + AS_STRUCT(pfpfr).dpe = 0; \ + AS_STRUCT(pfpfr).due = 0; \ + AS_STRUCT(pfpfr).doe = 0; \ + AS_STRUCT(pfpfr).dze = 0; \ + AS_STRUCT(pfpfr).dde = 0; \ + AS_STRUCT(pfpfr).die = 0; \ + \ + AS_STRUCT(fpcr).im = 1; \ + AS_STRUCT(fpcr).dm = 1; \ + AS_STRUCT(fpcr).zm = 1; \ + AS_STRUCT(fpcr).om = 1; \ + AS_STRUCT(fpcr).um = 1; \ + AS_STRUCT(fpcr).pm = 1; \ + /* rounding */ \ + AS_STRUCT(fpcr).rc = 0; \ + AS_STRUCT(fpcr).pc = 3; \ + \ + /* flags ! NEEDSWORK ! */ \ + AS_STRUCT(fpsr).pe = 1; \ + AS_STRUCT(fpsr).ue = 1; \ + AS_STRUCT(fpsr).oe = 1; \ + AS_STRUCT(fpsr).ze = 1; \ + AS_STRUCT(fpsr).de = 1; \ + AS_STRUCT(fpsr).ie = 1; \ + \ + AS_STRUCT(fpsr).es = 0; \ + AS_STRUCT(fpsr).c1 = 0; \ +}) +#define INIT_SPECIAL_REGISTERS() \ +({ \ + e2k_fpsr_t fpsr; \ + e2k_pfpfr_t pfpfr; \ + e2k_fpcr_t fpcr; \ + \ + GET_FPU_DEFAULTS(fpsr, fpcr, pfpfr); \ + \ + NATIVE_NV_WRITE_PFPFR_REG(pfpfr); \ + NATIVE_NV_WRITE_FPCR_REG(fpcr); \ + NATIVE_NV_WRITE_FPSR_REG(fpsr); \ +}) + +/* Declarate here to prevent loop #include. */ +#define PT_PTRACED 0x00000001 + +#ifdef CONFIG_MLT_STORAGE +static inline void invalidate_MLT(void) +{ +# if CONFIG_CPU_ISET >= 3 + NATIVE_SET_MMUREG(mlt_inv, 0); +# else + machine.invalidate_MLT(); +# endif +} +#else +static inline void invalidate_MLT(void) { } +#endif + +static inline void +NATIVE_DO_SAVE_TASK_USER_REGS_TO_SWITCH(struct sw_regs *sw_regs, + bool task_is_binco, bool task_traced) +{ + if (unlikely(task_is_binco)) + NATIVE_SAVE_INTEL_REGS((sw_regs)); + + invalidate_MLT(); + + sw_regs->cutd = NATIVE_NV_READ_CUTD_REG(); + + if (unlikely(task_traced)) + native_save_user_only_regs(sw_regs); +} + +static inline void +NATIVE_SAVE_TASK_REGS_TO_SWITCH(struct task_struct *task) +{ +#ifdef CONFIG_VIRTUALIZATION + const int task_is_binco = TASK_IS_BINCO(task) || task_thread_info(task)->virt_machine; +#else + const int task_is_binco = TASK_IS_BINCO(task); +#endif + struct mm_struct *mm = task->mm; + struct sw_regs *sw_regs = &task->thread.sw_regs; + save_gregs_fn_t save_gregs_dirty_bgr_fn = machine.save_gregs_dirty_bgr; + e2k_fpcr_t fpcr = NATIVE_NV_READ_FPCR_REG(); + e2k_fpsr_t fpsr = NATIVE_NV_READ_FPSR_REG(); + e2k_pfpfr_t pfpfr = NATIVE_NV_READ_PFPFR_REG(); + + /* Make sure we do not call scheduler from NMI context */ + WARN_ONCE(!AS(sw_regs->upsr).nmie, + "Non-maskable interrupts are disabled\n"); + + /* Kernel does not use MLT so skip invalidation for kernel threads */ + NATIVE_DO_SAVE_TASK_USER_REGS_TO_SWITCH(sw_regs, task_is_binco, + !!(task->ptrace & PT_PTRACED)); + + if (mm) { + sw_regs->fpcr = fpcr; + sw_regs->fpsr = fpsr; + sw_regs->pfpfr = pfpfr; +#ifdef CONFIG_GREGS_CONTEXT + save_gregs_dirty_bgr_fn(&task->thread.sw_regs.gregs); +#endif + + /* + * If AAU was not cleared then at a trap exit of next user + * AAU will start working, so clear it explicitly here. + */ + native_clear_apb(); + } + + NATIVE_FLUSHCPU; + + sw_regs->top = NATIVE_NV_READ_SBR_REG_VALUE(); + sw_regs->usd_hi = NATIVE_NV_READ_USD_HI_REG(); + sw_regs->usd_lo = NATIVE_NV_READ_USD_LO_REG(); + + sw_regs->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + sw_regs->crs.cr1_hi = NATIVE_NV_READ_CR1_HI_REG(); + sw_regs->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + sw_regs->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + + /* These will wait for the flush so we give + * the flush some time to finish. */ + sw_regs->psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + sw_regs->psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + sw_regs->pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + sw_regs->pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); +} + +/* + * now lcc has problem with structure on registers + * (It moves these structures in stack memory) + */ +static inline void +NATIVE_DO_RESTORE_TASK_USER_REGS_TO_SWITCH(struct sw_regs *sw_regs, + bool task_is_binco, bool task_traced) +{ + e2k_cutd_t cutd = sw_regs->cutd; + + if (unlikely(task_traced)) + native_restore_user_only_regs(sw_regs); + else /* Do this always when we don't test prev_task->ptrace */ + native_clear_user_only_regs(); + + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(cutd); + + NATIVE_CLEAR_DAM; + + if (unlikely(task_is_binco)) { + if (machine.flushts) + machine.flushts(); + NATIVE_RESTORE_INTEL_REGS(sw_regs); + } +} + +static inline void +NATIVE_RESTORE_TASK_REGS_TO_SWITCH(struct task_struct *task, + struct thread_info *ti) +{ + struct sw_regs *sw_regs = &task->thread.sw_regs; + u64 top = sw_regs->top; + u64 usd_lo = AS_WORD(sw_regs->usd_lo); + u64 usd_hi = AS_WORD(sw_regs->usd_hi); + u64 psp_lo = AS_WORD(sw_regs->psp_lo); + u64 psp_hi = AS_WORD(sw_regs->psp_hi); + u64 pcsp_lo = AS_WORD(sw_regs->pcsp_lo); + u64 pcsp_hi = AS_WORD(sw_regs->pcsp_hi); + e2k_cr0_lo_t cr0_lo = sw_regs->crs.cr0_lo; + e2k_cr0_hi_t cr0_hi = sw_regs->crs.cr0_hi; + e2k_cr1_lo_t cr1_lo = sw_regs->crs.cr1_lo; + e2k_cr1_hi_t cr1_hi = sw_regs->crs.cr1_hi; +#ifdef CONFIG_VIRTUALIZATION + const int task_is_binco = TASK_IS_BINCO(task) || ti->virt_machine; +#else + const int task_is_binco = TASK_IS_BINCO(task); +#endif + struct mm_struct *mm = task->mm; + restore_gregs_fn_t restore_gregs_fn = machine.restore_gregs; + + NATIVE_FLUSHCPU; + + NATIVE_NV_WRITE_USBR_USD_REG_VALUE(top, usd_hi, usd_lo); + NATIVE_NV_WRITE_PSP_REG_VALUE(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG_VALUE(pcsp_hi, pcsp_lo); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_DO_RESTORE_TASK_USER_REGS_TO_SWITCH(sw_regs, task_is_binco, + !!(task->ptrace & PT_PTRACED)); + + if (mm) { + e2k_fpcr_t fpcr = sw_regs->fpcr; + e2k_fpsr_t fpsr = sw_regs->fpsr; + e2k_pfpfr_t pfpfr = sw_regs->pfpfr; + +#ifdef CONFIG_GREGS_CONTEXT + restore_gregs_fn(&task->thread.sw_regs.gregs); +#endif + NATIVE_NV_WRITE_FPCR_REG(fpcr); + NATIVE_NV_WRITE_FPSR_REG(fpsr); + NATIVE_NV_WRITE_PFPFR_REG(pfpfr); + } +} + +static inline void +NATIVE_SWITCH_TO_KERNEL_STACK(e2k_addr_t ps_base, e2k_size_t ps_size, + e2k_addr_t pcs_base, e2k_size_t pcs_size, + e2k_addr_t ds_base, e2k_size_t ds_size) +{ + e2k_rwap_lo_struct_t reg_lo; + e2k_rwap_hi_struct_t reg_hi; + e2k_rwap_lo_struct_t stack_reg_lo; + e2k_rwap_hi_struct_t stack_reg_hi; + e2k_usbr_t usbr; + + /* + * Set Procedure Stack and Procedure Chain stack registers + * to the begining of initial PS and PCS stacks + */ + NATIVE_FLUSHCPU; + reg_lo.PSP_lo_half = 0; + reg_lo.PSP_lo_base = ps_base; + reg_lo._PSP_lo_rw = E2K_PSP_RW_PROTECTIONS; + reg_hi.PSP_hi_half = 0; + reg_hi.PSP_hi_size = ps_size; + reg_hi.PSP_hi_ind = 0; + NATIVE_NV_WRITE_PSP_REG(reg_hi, reg_lo); + reg_lo.PCSP_lo_half = 0; + reg_lo.PCSP_lo_base = pcs_base; + reg_lo._PCSP_lo_rw = E2K_PCSR_RW_PROTECTIONS; + reg_hi.PCSP_hi_half = 0; + reg_hi.PCSP_hi_size = pcs_size; + reg_hi.PCSP_hi_ind = 0; + NATIVE_NV_WRITE_PCSP_REG(reg_hi, reg_lo); + + + /* + * Set stack pointers to the begining of kernel initial data stack + */ + + usbr.USBR_base = ds_base + ds_size; + + /* + * Reserve additional 64 bytes for parameters area. + * Compiler might use it to temporarily store the function's parameters + */ + + stack_reg_lo.USD_lo_half = 0; + stack_reg_lo.USD_lo_p = 0; + stack_reg_lo.USD_lo_base = ds_base + ds_size - 64; + + stack_reg_hi.USD_hi_half = 0; + stack_reg_hi.USD_hi_size = ds_size - 64; + + NATIVE_NV_WRITE_USBR_USD_REG(usbr, stack_reg_hi, stack_reg_lo); +} + +/* + * There are TIR_NUM(19) tir regs. Bits 64 - 56 is current tir nr + * After each E2K_GET_DSREG(tir.lo) we will read next tir. + * For more info see instruction set doc. + * Read tir regs order is significant + */ +#define SAVE_TIRS(TIRs, TIRs_num, from_intc) \ +({ \ + unsigned long nr_TIRs = -1, TIR_hi, TIR_lo = 0; \ + unsigned long all_interrupts = 0; \ + do { \ + TIR_hi = NATIVE_READ_TIR_HI_REG_VALUE(); \ + if (unlikely(from_intc && GET_NR_TIRS(TIR_hi) >= TIR_NUM)) \ + break; \ + TIR_lo = NATIVE_READ_TIR_LO_REG_VALUE(); \ + ++nr_TIRs; \ + TIRs[GET_NR_TIRS(TIR_hi)].TIR_lo.TIR_lo_reg = TIR_lo; \ + TIRs[GET_NR_TIRS(TIR_hi)].TIR_hi.TIR_hi_reg = TIR_hi; \ + all_interrupts |= TIR_hi; \ + } while(GET_NR_TIRS(TIR_hi)); \ + TIRs_num = nr_TIRs; \ + \ + all_interrupts & (exc_all_mask | aau_exc_mask); \ +}) +#define UNFREEZE_TIRs() NATIVE_WRITE_TIR_LO_REG_VALUE(0) +#define SAVE_SBBP(sbbp) \ +do { \ + int i; \ + for (i = 0; i < SBBP_ENTRIES_NUM; i++) \ + (sbbp)[i] = NATIVE_READ_SBBP_REG_VALUE(); \ +} while (0) + +static inline void set_osgd_task_struct(struct task_struct *task) +{ + e2k_gd_lo_t gd_lo; + e2k_gd_hi_t gd_hi; + + AW(gd_lo) = 0; + AW(gd_hi) = 0; + AS(gd_lo).base = (u64) task; + AS(gd_lo).rw = E2K_GD_RW_PROTECTIONS; + AS(gd_hi).size = round_up(sizeof(struct task_struct), + E2K_ALIGN_GLOBALS_SZ); + + BUG_ON(!IS_ALIGNED((u64) task, E2K_ALIGN_GLOBALS_SZ)); + WRITE_OSGD_REG(gd_hi, gd_lo); + atomic_load_osgd_to_gd(); +} + +static inline void +native_set_current_thread_info(struct thread_info *thread, + struct task_struct *task) +{ + NATIVE_WRITE_CURRENT_REG(thread); + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, task); + set_osgd_task_struct(task); +} + +static inline void +set_current_thread_info(struct thread_info *thread, struct task_struct *task) +{ + WRITE_CURRENT_REG(thread); + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, task); + set_osgd_task_struct(task); +} + +#define SAVE_PSYSCALL_RVAL(regs, _rval, _rval1, _rval2, _rv1_tag, \ + _rv2_tag, _return_desk) \ +({ \ + (regs)->sys_rval = (_rval); \ + (regs)->rval1 = (_rval1); \ + (regs)->rval2 = (_rval2); \ + (regs)->rv1_tag = (_rv1_tag); \ + (regs)->rv2_tag = (_rv2_tag); \ + (regs)->return_desk = (_return_desk); \ +}) + +#define SAVE_SYSCALL_RVAL(regs, rval) \ +({ \ + (regs)->sys_rval = (rval); \ +}) + +#endif /* _E2K_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/rlimits.h b/arch/e2k/include/asm/rlimits.h new file mode 100644 index 000000000000..71b4efb93a95 --- /dev/null +++ b/arch/e2k/include/asm/rlimits.h @@ -0,0 +1,13 @@ +#ifndef _E2K_RLIMITS_H_ +#define _E2K_RLIMITS_H_ + +#define PS_RLIM_CUR (128*1024*1024) +#define PCS_RLIM_CUR (8*1024*1024) + +/* + * Hard stacks rlimits numbers + */ +#define RLIMIT_P_STACK_EXT 16 +#define RLIMIT_PC_STACK_EXT 17 + +#endif /* _E2K_RLIMITS_H_ */ diff --git a/arch/e2k/include/asm/rtc.h b/arch/e2k/include/asm/rtc.h new file mode 100644 index 000000000000..cd445acb1bfa --- /dev/null +++ b/arch/e2k/include/asm/rtc.h @@ -0,0 +1,6 @@ +#ifndef _E2K_RTC_H +#define _E2K_RTC_H + +extern noinline int sclk_register(void *); + +#endif diff --git a/arch/e2k/include/asm/rwsem.h b/arch/e2k/include/asm/rwsem.h new file mode 100644 index 000000000000..728a334d4df5 --- /dev/null +++ b/arch/e2k/include/asm/rwsem.h @@ -0,0 +1,212 @@ +/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG + * + * Written by David Howells (dhowells@redhat.com). + * + * Derived from asm-x86/semaphore.h + * + * + * The MSW of the count is the negated number of active writers and waiting + * lockers, and the LSW is the total number of active locks + * + * The lock count is initialized to 0 (no active and no waiting lockers). + * + * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an + * uncontended lock. This can be determined because XADD returns the old value. + * Readers increment by 1 and see a positive value when uncontended, negative + * if there are writers (and maybe) readers waiting (in which case it goes to + * sleep). + * + * The value of WAITING_BIAS supports up to 32766 waiting processes. This can + * be extended to 65534 by manually checking the whole MSW rather than relying + * on the S flag. + * + * The value of ACTIVE_BIAS supports up to 65535 active processes. + * + * This should be totally fair - if anything is waiting, a process that wants a + * lock will go to the back of the queue. When the currently active lock is + * released, if there's a writer at the front of the queue, then that and only + * that will be woken up; if there's a bunch of consequtive readers at the + * front, then they'll all be woken up, but no other readers will be. + */ + +#ifndef _E2K_RWSEM_H +#define _E2K_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ + +#include +#include + +#define RWSEM_UNLOCKED_VALUE 0L +#define RWSEM_ACTIVE_BIAS 1L +#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ + +static inline int ___down_read(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + newcount = sem->count.counter + RWSEM_ACTIVE_READ_BIAS; + sem->count.counter = newcount; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_READ_BIAS, + &sem->count, d, "addd", LOCK_MB); +#endif + + return (newcount <= 0); +} + +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(___down_read(sem))) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_killable(struct rw_semaphore *sem) +{ + if (unlikely(___down_read(sem))) + if (IS_ERR(rwsem_down_read_failed_killable(sem))) + return -EINTR; + + return 0; +} + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + if (sem->count.counter >= 0) + sem->count.counter += RWSEM_ACTIVE_READ_BIAS; + newcount = sem->count.counter; +#else + newcount = __api_atomic64_add_if_not_negative(RWSEM_ACTIVE_READ_BIAS, + &sem->count, LOCK_MB); +#endif + + return newcount > 0; +} + +/* + * lock for writing + */ +static inline long ___down_write(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + newcount = sem->count.counter + RWSEM_ACTIVE_WRITE_BIAS; + sem->count.counter = newcount; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count, d, "addd", LOCK_MB); +#endif + return newcount; +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + if (unlikely(___down_write(sem) != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline int __down_write_killable(struct rw_semaphore *sem) +{ + if (unlikely(___down_write(sem) != RWSEM_ACTIVE_WRITE_BIAS)) + if (IS_ERR(rwsem_down_write_failed_killable(sem))) + return -EINTR; + return 0; +} + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long oldcount; + +#ifndef CONFIG_SMP + oldcount = sem->count.counter; + if (oldcount == RWSEM_UNLOCKED_VALUE) + sem->count.counter = RWSEM_ACTIVE_WRITE_BIAS; +#else + oldcount = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); +#endif + + return oldcount == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + sem->count.counter -= RWSEM_ACTIVE_READ_BIAS; + newcount = sem->count.counter; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_READ_BIAS, + &sem->count, d, "subd", RELEASE_MB); +#endif + + if (unlikely(newcount < -1)) + if ((newcount & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS; + newcount = sem->count.counter; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count, d, "subd", RELEASE_MB); +#endif + if (unlikely(newcount < 0)) + rwsem_wake(sem); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + newcount = sem->count.counter - RWSEM_WAITING_BIAS; + sem->count.counter = newcount; +#else + newcount = __api_atomic_op(RWSEM_WAITING_BIAS, + &sem->count, d, "subd", RELEASE_MB); +#endif + + if (unlikely(newcount < 0)) + rwsem_downgrade_wake(sem); +} + +#endif /* __KERNEL__ */ +#endif /* _E2K_RWSEM_H */ diff --git a/arch/e2k/include/asm/sbus.h b/arch/e2k/include/asm/sbus.h new file mode 100644 index 000000000000..8061c7acd92e --- /dev/null +++ b/arch/e2k/include/asm/sbus.h @@ -0,0 +1,59 @@ +#ifndef E2K_SBUS_H +#define E2K_SBUS_H + +#include +#include + + +/** + * SBus accessors. + */ + +#define _ALIGN_MASK (~(uint64_t)0x3) + +static __inline__ u8 _sbus_readb(unsigned long addr) +{ + /* PCI2SBUS doesn't receive 1-byte read good. It's hardware bug */ + + return (*(volatile uint32_t *)(addr & _ALIGN_MASK) >> (addr & (0x3))*0x8) & 0xFF; +} + +static __inline__ u16 _sbus_readw(unsigned long addr) +{ + return be16_to_cpu((readw(addr))); +} + +static __inline__ u32 _sbus_readl(unsigned long addr) +{ + return be32_to_cpu((readl(addr))); +} + +static __inline__ void _sbus_writeb(u8 b, unsigned long addr) +{ + writeb(b, addr); +} + +static __inline__ void _sbus_writew(u16 w, unsigned long addr) +{ + writew(cpu_to_be16(w), addr); +} + +static __inline__ void _sbus_writel(u32 l, unsigned long addr) +{ + writel(cpu_to_be32(l), addr); +} + +#define sbus_readb(a) _sbus_readb((unsigned long)(a)) +#define sbus_readw(a) _sbus_readw((unsigned long)(a)) +#define sbus_readl(a) _sbus_readl((unsigned long)(a)) +#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)(a)) +#define sbus_writew(v, a) _sbus_writew(v, (unsigned long)(a)) +#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)(a)) + +static inline int sbus_addr_is_valid(unsigned long ba) +{ + u8 value = sbus_readb(ba); + return (value == 0xFD) || (value == 0xF1); +} + +#endif diff --git a/arch/e2k/include/asm/sclkr.h b/arch/e2k/include/asm/sclkr.h new file mode 100644 index 000000000000..424724b2db7c --- /dev/null +++ b/arch/e2k/include/asm/sclkr.h @@ -0,0 +1,105 @@ +#ifndef _ASM_E2K_SCLKR_H +#define _ASM_E2K_SCLKR_H + +#include +#include +#include +#include + +#include +#include + +extern __interrupt u64 fast_syscall_read_sclkr(void); + +/* SCLKR/SCLKM1/SCLKM2 implemented only on machine from e2s */ +extern unsigned long native_read_SCLKR_reg_value(void); +extern unsigned long native_read_SCLKM1_reg_value(void); +extern unsigned long native_read_SCLKM2_reg_value(void); +extern void native_write_SCLKR_reg_value(unsigned long reg_value); +extern void native_write_SCLKM1_reg_value(unsigned long reg_value); +extern void native_write_SCLKM2_reg_value(unsigned long reg_value); + +struct prev_sclkr { + atomic64_t res; +} ____cacheline_aligned_in_smp; +extern struct prev_sclkr prev_sclkr; + +#define SCLKR_NO 0 +#define SCLKR_INT 1 +#define SCLKR_RTC 2 +#define SCLKR_EXT 3 + +extern struct clocksource clocksource_sclkr; +extern long long sclkr_sched_offset; +extern int sclkr_initialized; + + +#define SCLKR_SRC_LEN 4 +extern char sclkr_src[SCLKR_SRC_LEN]; +extern int sclkr_mode; +extern int sclk_register(void *); +extern struct clocksource clocksource_sclkr; +extern int proc_sclkr(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern void sclk_set_deviat(int dev); +extern u64 raw_read_sclkr(void); +DECLARE_PER_CPU(int, ema_freq); + +extern __interrupt u64 fast_syscall_read_sclkr(void); +extern struct clocksource *curr_clocksource; +extern int redpill; + +#define xchg_prev_sclkr_res(res) \ + __api_atomic64_fetch_xchg_if_below(res, &prev_sclkr.res.counter, RELAXED_MB) + +#define SHF_ALPHA 2 +static __always_inline u64 sclkr_to_ns(u64 sclkr, u64 freq) +{ + u64 sclkr_sec, sclkr_lo, res, before; + e2k_sclkm1_t sclkm1 = READ_SCLKM1_REG(); +#ifdef CONFIG_SMP + struct thread_info *ti = READ_CURRENT_REG(); + struct task_struct *task = (void *) ti + - offsetof(struct task_struct, thread_info); + typeof(ema_freq) *pema_freq = per_cpu_ptr(&ema_freq, task->cpu); +#else + typeof(ema_freq) *pema_freq = + (typeof(ema_freq) *)per_cpu_ptr(&ema_freq, 0); +#endif + /* we can not use __this_cpu_read/write(ema_freq) in fast syscall : */ + + sclkr_sec = sclkr >> 32; + sclkr_lo = (u64) (u32) sclkr; + + if (sclkr_lo >= freq) + sclkr_lo = freq - 1; + + /* Using exponential moving average (ema) of frequency + * ema = alpha * cur_freq + (1 - alpha) * ema; + * makes got time more smooth but belated frequency is used + * shorter: ema = ema + (cur_freq - ema) * alpha; + * alpha = 2 / (period + 1) + * if moving average period = 3 alpha = 1/2 or use SHF_ALPHA = 1 + * if moving average period = 7 alpha = 1/4 or use SHF_ALPHA = 2 + * + * 1 << (SHF_ALPHA - 1) is added for rounding. + */ + *pema_freq += (freq - *pema_freq + (1 << (SHF_ALPHA - 1))) >> SHF_ALPHA; + res = sclkr_sec * NSEC_PER_SEC + sclkr_lo * NSEC_PER_SEC / *pema_freq; + + /* sclkm3 has a summary time when guest was out of cpu */ + if (!redpill && sclkm1.sclkm3) + res -= READ_SCLKM3_REG(); + before = xchg_prev_sclkr_res(res); + if (before > res) + res = before; + + return res; +} + +static inline bool use_sclkr_sched_clock(void) +{ + return machine.native_iset_ver >= E2K_ISET_V3 && sclkr_initialized; +} + +#endif /* _ASM_E2K_SCLKR_H */ diff --git a/arch/e2k/include/asm/seccomp.h b/arch/e2k/include/asm/seccomp.h new file mode 100644 index 000000000000..af4633e0cd4b --- /dev/null +++ b/arch/e2k/include/asm/seccomp.h @@ -0,0 +1,18 @@ +/* + * copy from arch/arm64/include/asm/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include + +#include + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/e2k/include/asm/secondary_space.h b/arch/e2k/include/asm/secondary_space.h new file mode 100644 index 000000000000..2877cb692897 --- /dev/null +++ b/arch/e2k/include/asm/secondary_space.h @@ -0,0 +1,108 @@ +/* + * Secondary space support for E2K binary compiler + * asm/secondary_space.h + */ +#ifndef _SECONDARY_SPACE_H +#define _SECONDARY_SPACE_H + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#endif /* !__ASSEMBLY__ */ + +#define BINCO_PROTOCOL_VERSION 4 + +#define NATIVE_IS_NEXT_ELBRUS_2S \ + ((int)machine.native_iset_ver >= ELBRUS_2S_ISET) +#define NATIVE_SS_SIZE \ + ((NATIVE_IS_NEXT_ELBRUS_2S) ? \ + (0x800000000000UL) : (0x100000000UL)) +#define NATIVE_SS_ADDR_START \ + (NATIVE_IS_NEXT_ELBRUS_2S ? \ + 0x0000400000000000L : 0x0000100000000000L) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +#define IS_NEXT_ELBRUS_2S NATIVE_IS_NEXT_ELBRUS_2S +#define SS_SIZE NATIVE_SS_SIZE +#define SS_ADDR_START NATIVE_SS_ADDR_START +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +/* + * If updating this value - do not forget to update E2K_ARG3_MASK - + * mask for 63-45 bits and PAGE_OFFSET. + */ +#define SS_ADDR_END (SS_ADDR_START + SS_SIZE) + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +#define ADDR_IN_SS(a) ((a >= SS_ADDR_START) && (a < SS_ADDR_END)) +#else +#define ADDR_IN_SS(a) 0 +#endif + +#define DEBUG_SS_MODE 0 /* Secondary Space Debug */ +#define DebugSS(...) DebugPrint(DEBUG_SS_MODE, ##__VA_ARGS__) + +#ifndef __ASSEMBLY__ + +extern +s64 sys_el_binary(s64 work, s64 arg2, s64 arg3, s64 arg4); + +/* + * Intreface of el_binary() syscall + * Work argument(arg1) values: + */ +#define GET_SECONDARY_SPACE_OFFSET 0 +#define SET_SECONDARY_REMAP_BOUND 1 +#define SET_SECONDARY_DESCRIPTOR 2 +#define SET_SECONDARY_MTRR_DEPRECATED 3 +#define GET_SECONDARY_MTRR_DEPRECATED 4 +#define GET_SNXE_USAGE 5 +#define TGKILL_INFO_DEPRECATED 6 +#define SIG_EXIT_GROUP 7 +#define FLUSH_CMD_CACHES_DEPRECATED 8 +#define SET_SC_RSTRT_IGNORE_DEPRECATED 9 +#define SET_RP_BOUNDS_AND_IP 10 +#define SET_SECONDARY_64BIT_MODE 11 +#define GET_PROTOCOL_VERSION 12 +#define SET_IC_NEED_FLUSH_ON_SWITCH 13 +#define GET_UPT_SEC_AD_SHIFT_DSBL 14 +#define SET_UPT_SEC_AD_SHIFT_DSBL 15 + +/* Selector numbers for GET_SECONDARY_SPACE_OFFSET */ +enum sel_num { + CS_SELECTOR = 0, + DS_SELECTOR = 1, + ES_SELECTOR = 2, + SS_SELECTOR = 3, + FS_SELECTOR = 4, + GS_SELECTOR = 5, +}; + +#define E2K_ARG3_MASK (0xffffe000ffffffffLL) +#define I32_ADDR_TO_E2K(arg) \ +({ \ + s64 argm; \ + argm = arg; \ + if (machine.native_iset_ver < E2K_ISET_V3) { \ + argm &= E2K_ARG3_MASK; \ + argm |= SS_ADDR_START; \ + } \ + argm; \ +}) + +#endif /* !__ASSEMBLY__ */ +#endif /* _SECONDARY_SPACE_H */ diff --git a/arch/e2k/include/asm/sections.h b/arch/e2k/include/asm/sections.h new file mode 100644 index 000000000000..05917ef523fd --- /dev/null +++ b/arch/e2k/include/asm/sections.h @@ -0,0 +1,54 @@ +#ifndef _E2K_SECTIONS_H +#define _E2K_SECTIONS_H + +/* nothing to see, move along */ +#ifndef __ASSEMBLY__ +#include +#endif /* ! __ASSEMBLY__ */ + +#if (defined __e2k__) && (defined __LCC__) +#define __interrupt __attribute__((__check_stack__)) +#else +#define __interrupt __attribute__((__interrupt__)) +#endif + +#ifndef CONFIG_RECOVERY +#define __init_recv __init +#define __initdata_recv __initdata +#else +#define __init_recv +#define __initdata_recv +#endif /* ! (CONFIG_RECOVERY) */ + +#if !defined(CONFIG_RECOVERY) && !defined(CONFIG_SERIAL_PRINTK) && \ + !defined(CONFIG_LMS_CONSOLE) && !defined(CONFIG_E2K_KEXEC) +#define __init_cons __init +#else +#define __init_cons +#endif /* boot console used after init completion */ + +#ifndef __ASSEMBLY__ +extern char _start[]; +extern char __ttable_start[]; +extern char __ttable_end[]; +extern char __C1_wait_trap_start[], __C1_wait_trap_end[]; +extern char __C3_wait_trap_start[], __C3_wait_trap_end[]; +extern char __init_text_begin[], __init_text_end[]; +extern char __init_data_begin[], __init_data_end[]; +extern char __node_data_start[], __node_data_end[]; +extern char __common_data_begin[], __common_data_end[]; +extern char _edata_bss[]; +extern char _t_entry[], _t_entry_end[]; +extern char __entry_handlers_start[], __entry_handlers_end[]; +extern char __start_ro_after_init[], __end_ro_after_init[]; +#endif /* ! __ASSEMBLY__ */ + +#ifdef CONFIG_NUMA +#define __nodedata __section(.node.data) +#define __NODEDATA .section ".node.data","aw" +#else /* ! CONFIG_NUMA */ +#define __nodedata __section(data) +#define __NODEDATA .data +#endif /* CONFIG_NUMA */ + +#endif /* _E2K_SECTIONS_H */ diff --git a/arch/e2k/include/asm/sembuf.h b/arch/e2k/include/asm/sembuf.h new file mode 100644 index 000000000000..8a95f0994263 --- /dev/null +++ b/arch/e2k/include/asm/sembuf.h @@ -0,0 +1,22 @@ +#ifndef _E2K_SEMBUF_H_ +#define _E2K_SEMBUF_H_ + +/* + * The semid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_SEMBUF_H_ */ diff --git a/arch/e2k/include/asm/serial.h b/arch/e2k/include/asm/serial.h new file mode 100644 index 000000000000..d08ffe7d9587 --- /dev/null +++ b/arch/e2k/include/asm/serial.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/arch/e2k/include/asm/set_memory.h b/arch/e2k/include/asm/set_memory.h new file mode 100644 index 000000000000..638fb7b2aa59 --- /dev/null +++ b/arch/e2k/include/asm/set_memory.h @@ -0,0 +1,24 @@ +#ifndef _ASM_E2K_SET_MEMORY_H +#define _ASM_E2K_SET_MEMORY_H + +#include +#include + +extern int set_memory_ro(unsigned long addr, int numpages); +extern int set_memory_rw(unsigned long addr, int numpages); +extern int set_memory_x(unsigned long addr, int numpages); +extern int set_memory_nx(unsigned long addr, int numpages); + +extern int set_memory_uc(unsigned long addr, int numpages); +extern int set_memory_wc(unsigned long addr, int numpages); +extern int set_memory_wb(unsigned long addr, int numpages); + +extern int set_pages_array_uc(struct page **pages, int addrinarray); +extern int set_pages_array_wc(struct page **pages, int addrinarray); +extern int set_pages_array_wb(struct page **pages, int addrinarray); + +int set_pages_uc(struct page *page, int numpages); +int set_pages_wc(struct page *page, int numpages); +int set_pages_wb(struct page *page, int numpages); + +#endif diff --git a/arch/e2k/include/asm/setup.h b/arch/e2k/include/asm/setup.h new file mode 100644 index 000000000000..b7b7f842223e --- /dev/null +++ b/arch/e2k/include/asm/setup.h @@ -0,0 +1,55 @@ +#ifndef __E2K_SETUP_H +#define __E2K_SETUP_H + +#include +#include +#include +#include + +extern void __init e2k_start_kernel(void); +extern void __init native_setup_machine(void); +extern void __init e2k_start_kernel_switched_stacks(void); +extern void e2k_start_secondary_switched_stacks(int cpuid, int cpu); + +static inline void native_bsp_switch_to_init_stack(void) +{ + unsigned long stack_base = (unsigned long) &init_stack; + + NATIVE_SWITCH_TO_KERNEL_STACK( + stack_base + KERNEL_P_STACK_OFFSET, KERNEL_P_STACK_SIZE, + stack_base + KERNEL_PC_STACK_OFFSET, KERNEL_PC_STACK_SIZE, + stack_base + KERNEL_C_STACK_OFFSET, KERNEL_C_STACK_SIZE); +} + +static inline void native_setup_bsp_idle_task(int cpu) +{ + /* + * Set pointer of current task structure to kernel initial task + */ + set_current_thread_info(&init_task.thread_info, &init_task); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ +/* It is native host or host with virtualization support */ +static inline void arch_setup_machine(void) +{ + native_setup_machine(); +} +static inline void bsp_switch_to_init_stack(void) +{ + native_bsp_switch_to_init_stack(); +} +static inline void setup_bsp_idle_task(int cpu) +{ + native_setup_bsp_idle_task(cpu); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#include +#endif /* __E2K_SETUP_H */ diff --git a/arch/e2k/include/asm/shmbuf.h b/arch/e2k/include/asm/shmbuf.h new file mode 100644 index 000000000000..a31068840143 --- /dev/null +++ b/arch/e2k/include/asm/shmbuf.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _E2K_ASM_SHMBUF_H_ +#define _E2K_ASM_SHMBUF_H_ + +#include + +#if defined(CONFIG_PROTECTED_MODE) +/* Outputs shared segment size for the given ID: */ +unsigned long get_shm_segm_size(int shmid); +#endif + +#endif /* _E2K_ASM_SHMBUF_H_ */ diff --git a/arch/e2k/include/asm/shmparam.h b/arch/e2k/include/asm/shmparam.h new file mode 100644 index 000000000000..b19db208e082 --- /dev/null +++ b/arch/e2k/include/asm/shmparam.h @@ -0,0 +1,11 @@ +#ifndef _E2K_SHMPARAM_H_ +#define _E2K_SHMPARAM_H_ + +/* + * SHMLBA controls minimum alignment at which shared memory segments + * get attached. + */ + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _E2K_SHMPARAM_H_ */ diff --git a/arch/e2k/include/asm/sic_regs.h b/arch/e2k/include/asm/sic_regs.h new file mode 100644 index 000000000000..ddb92ea71173 --- /dev/null +++ b/arch/e2k/include/asm/sic_regs.h @@ -0,0 +1,1405 @@ +#ifndef _E2K_SIC_REGS_H_ +#define _E2K_SIC_REGS_H_ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#define SIC_IO_LINKS_COUNT 2 + +/* + * NBSR registers addresses (offsets in NBSR area) + */ + +#define SIC_st_p 0x00 + +#define SIC_st_core0 0x100 +#define SIC_st_core1 0x104 +#define SIC_st_core2 0x108 +#define SIC_st_core3 0x10c +#define SIC_st_core4 0x110 +#define SIC_st_core5 0x114 +#define SIC_st_core6 0x118 +#define SIC_st_core7 0x11c +#define SIC_st_core8 0x120 +#define SIC_st_core9 0x124 +#define SIC_st_core10 0x128 +#define SIC_st_core11 0x12c +#define SIC_st_core12 0x130 +#define SIC_st_core13 0x134 +#define SIC_st_core14 0x138 +#define SIC_st_core15 0x13c + +#define SIC_st_core(num) (0x100 + (num) * 4) + +#define SIC_rt_ln 0x08 +#define SIC_rt_lcfg0 0x10 +#define SIC_rt_lcfg1 0x14 +#define SIC_rt_lcfg2 0x18 +#define SIC_rt_lcfg3 0x1c + +#define SIC_rt_mhi0 0x20 +#define SIC_rt_mhi1 0x24 +#define SIC_rt_mhi2 0x28 +#define SIC_rt_mhi3 0x2c + +#define SIC_rt_mlo0 0x30 +#define SIC_rt_mlo1 0x34 +#define SIC_rt_mlo2 0x38 +#define SIC_rt_mlo3 0x3c + +#define SIC_rt_pcim0 0x40 +#define SIC_rt_pcim1 0x44 +#define SIC_rt_pcim2 0x48 +#define SIC_rt_pcim3 0x4c + +#define SIC_rt_pciio0 0x50 +#define SIC_rt_pciio1 0x54 +#define SIC_rt_pciio2 0x58 +#define SIC_rt_pciio3 0x5c + +#define SIC_rt_ioapic0 0x60 +#define SIC_rt_ioapic1 0x64 +#define SIC_rt_ioapic2 0x68 +#define SIC_rt_ioapic3 0x6c + +#define SIC_rt_pcimp_b0 0x70 +#define SIC_rt_pcimp_b1 0x74 +#define SIC_rt_pcimp_b2 0x78 +#define SIC_rt_pcimp_b3 0x7c + +#define SIC_rt_pcimp_e0 0x80 +#define SIC_rt_pcimp_e1 0x84 +#define SIC_rt_pcimp_e2 0x88 +#define SIC_rt_pcimp_e3 0x8c + +#define SIC_rt_ioapic10 0x1060 +#define SIC_rt_ioapic11 0x1064 +#define SIC_rt_ioapic12 0x1068 +#define SIC_rt_ioapic13 0x106c + +#define SIC_rt_ioapicintb 0x94 +#define SIC_rt_lapicintb 0xa0 + +#define SIC_rt_msi 0xb0 +#define SIC_rt_msi_h 0xb4 + +#define SIC_rt_pcicfgb 0x90 +#define SIC_rt_pcicfged 0x98 + +/* PREPIC */ +#define SIC_prepic_version 0x8000 +#define SIC_prepic_ctrl 0x8010 +#define SIC_prepic_id 0x8020 +#define SIC_prepic_ctrl2 0x8030 +#define SIC_prepic_err_stat 0x8040 +#define SIC_prepic_err_msg_lo 0x8050 +#define SIC_prepic_err_msg_hi 0x8054 +#define SIC_prepic_err_int 0x8060 +#define SIC_prepic_mcr 0x8070 +#define SIC_prepic_mid 0x8074 +#define SIC_prepic_mar0_lo 0x8080 +#define SIC_prepic_mar0_hi 0x8084 +#define SIC_prepic_mar1_lo 0x8090 +#define SIC_prepic_mar1_hi 0x8094 +#define SIC_prepic_linp0 0x8c00 +#define SIC_prepic_linp1 0x8c04 +#define SIC_prepic_linp2 0x8c08 +#define SIC_prepic_linp3 0x8c0c +#define SIC_prepic_linp4 0x8c10 +#define SIC_prepic_linp5 0x8c14 + +/* Host Controller */ +#define SIC_hc_ctrl 0x0340 + +/* IOMMU */ +#define SIC_iommu_ctrl 0x0380 +#define SIC_iommu_ba_lo 0x0390 +#define SIC_iommu_ba_hi 0x0394 +#define SIC_iommu_dtba_lo 0x0398 +#define SIC_iommu_dtba_hi 0x039c +#define SIC_iommu_flush 0x03a0 +#define SIC_iommu_flushP 0x03a4 +#define SIC_iommu_cmd_c_lo 0x03a0 +#define SIC_iommu_cmd_c_hi 0x03a4 +#define SIC_iommu_cmd_d_lo 0x03a8 +#define SIC_iommu_cmd_d_hi 0x03ac +#define SIC_iommu_err 0x03b0 +#define SIC_iommu_err1 0x03b4 +#define SIC_iommu_err_info_lo 0x03b8 +#define SIC_iommu_err_info_hi 0x03bc + +#define SIC_edbc_iommu_ctrl 0x5080 +#define SIC_edbc_iommu_ba_lo 0x5090 +#define SIC_edbc_iommu_ba_hi 0x5094 +#define SIC_edbc_iommu_dtba_lo 0x5098 +#define SIC_edbc_iommu_dtba_hi 0x509c +#define SIC_edbc_iommu_cmd_c_lo 0x50a0 +#define SIC_edbc_iommu_cmd_c_hi 0x50a4 +#define SIC_edbc_iommu_err 0x50b0 +#define SIC_edbc_iommu_err1 0x50b4 +#define SIC_edbc_iommu_err_info_lo 0x50b8 +#define SIC_edbc_iommu_err_info_hi 0x50bc + +#define SIC_iommu_reg_base SIC_iommu_ctrl +#define SIC_iommu_reg_size 0x0080 +#define SIC_e2c3_iommu_nr 0x0007 +#define SIC_embedded_iommu_base 0x5d00 +#define SIC_embedded_iommu_size SIC_iommu_reg_size + +/* IO link & RDMA */ +#define SIC_iol_csr 0x900 +#define SIC_io_vid 0x700 +#define SIC_io_csr 0x704 +#define SIC_io_str 0x70c +#define SIC_io_str_hi 0x72c +#define SIC_rdma_vid 0x880 +#define SIC_rdma_cs 0x888 + +/* Second IO link */ +#define SIC_iol_csr1 0x1900 +#define SIC_io_vid1 0x1700 +#define SIC_io_csr1 0x1704 +#define SIC_io_str1 0x170c +#define SIC_rdma_vid1 0x1880 +#define SIC_rdma_cs1 0x1888 + +/* DSP */ +#define SIC_ic_ir0 0x2004 +#define SIC_ic_ir1 0x2008 +#define SIC_ic_mr0 0x2010 +#define SIC_ic_mr1 0x2014 + +/* Monitors */ +#define SIC_sic_mcr 0xc30 +#define SIC_sic_mar0_lo 0xc40 +#define SIC_sic_mar0_hi 0xc44 +#define SIC_sic_mar1_lo 0xc48 +#define SIC_sic_mar1_hi 0xc4c + +/* Interrupt register */ +#define SIC_sic_int 0xc60 + +/* MC */ + +#define SIC_MAX_MC_COUNT E16C_SIC_MC_COUNT +#define SIC_MC_COUNT (machine.sic_mc_count) + +#define SIC_MC_BASE 0x400 +#define SIC_MC_SIZE (machine.sic_mc_size) + +#define SIC_mc0_ecc 0x400 +#define SIC_mc1_ecc (machine.sic_mc1_ecc) +#define SIC_mc2_ecc 0x480 +#define SIC_mc3_ecc 0x4c0 + +#define SIC_mc0_opmb 0x414 +#define SIC_mc1_opmb 0x454 +#define SIC_mc2_opmb 0x494 +#define SIC_mc3_opmb 0x4d4 + +#define SIC_mc0_cfg 0x418 +#define SIC_mc1_cfg 0x458 +#define SIC_mc2_cfg 0x498 +#define SIC_mc3_cfg 0x4d8 + +/* IPCC */ +#define SIC_IPCC_LINKS_COUNT 3 +#define SIC_ipcc_csr1 0x604 +#define SIC_ipcc_csr2 0x644 +#define SIC_ipcc_csr3 0x684 +#define SIC_ipcc_str1 0x60c +#define SIC_ipcc_str2 0x64c +#define SIC_ipcc_str3 0x68c + +/* Power management */ +#define SIC_pwr_mgr 0x280 + +/* E12C/E16C/E2C3 Power Control System (PCS) registers + * PMC base =0x1000 is added */ +#define PMC_FREQ_CFG 0x1100 +#define PMC_FREQ_STEPS 0x1104 +#define PMC_FREQ_C2 0x1108 +#define PMC_FREQ_CORE_0_MON 0x1200 +#define PMC_FREQ_CORE_0_CTRL 0x1204 +#define PMC_FREQ_CORE_0_SLEEP 0x1208 +#define PMC_FREQ_CORE_N_MON(n) (PMC_FREQ_CORE_0_MON + n * 16) +#define PMC_FREQ_CORE_N_CTRL(n) (PMC_FREQ_CORE_0_CTRL + n * 16) +#define PMC_FREQ_CORE_N_SLEEP(n) ((PMC_FREQ_CORE_0_SLEEP) + n * 16) +#define PMC_SYS_MON_1 0x1504 +/* PMC_FREQ_CORE_0_SLEEP fields: */ +typedef union { + struct { + u32 cmd : 3; + u32 pad1 : 13; + u32 status : 3; + u32 pad2 : 8; + u32 ctrl_enable : 1; + u32 alter_disable : 1; + u32 bfs_bypass : 1; + u32 pin_en : 1; + u32 pad3 : 1; + }; + u32 word; +} freq_core_sleep_t; + +/* PMC_FREQ_CORE_0_MON fields: */ +typedef union { + struct { + u32 divF_curr : 6; + u32 divF_target : 6; + u32 divF_limit_hi : 6; + u32 divF_limit_lo : 6; + u32 divF_init : 6; + u32 bfs_bypass : 1; + u32 : 1; + }; + u32 word; +} freq_core_mon_t; + +/* PMC_FREQ_CORE_0_CTRL fields: */ +typedef union { + struct { + u32 enable : 1; + u32 mode : 3; + u32 progr_divF : 6; + u32 progr_divF_max : 6; + u32 decr_dsbl : 1; + u32 pin_en : 1; + u32 clk_en : 1; + u32 log_en : 1; + u32 sleep_c2 : 1; + u32 w_trap : 1; + u32 ev_term : 1; + u32 mon_Fmax : 1; + u32 divF_curr : 6; + u32 bfs_bypass : 1; + u32 rmwen : 1; + }; + u32 word; +} freq_core_ctrl_t; + +/* PMC_SYS_MON_1 fields: */ +typedef union { + struct { + u32 machine_gen_alert : 1; + u32 machine_pwr_alert : 1; + u32 cpu_pwr_alert : 1; + u32 mc47_pwr_alert : 1; + u32 mc03_pwr_alert : 1; + u32 mc47_dimm_event : 1; + u32 mc03_dimm_event : 1; + u32 reserved : 2; + u32 mc7_fault : 1; + u32 mc6_fault : 1; + u32 mc5_fault : 1; + u32 mc4_fault : 1; + u32 mc3_fault : 1; + u32 mc2_fault : 1; + u32 mc1_fault : 1; + u32 mc0_fault : 1; + u32 cpu_fault : 1; + u32 pin_sataeth_config : 1; + u32 pin_iplc_pe_pre_det : 2; + u32 pin_iplc_pe_config : 2; + u32 pin_ipla_flip_en : 1; + u32 pin_iowl_pe_pre_det : 4; + u32 pin_iowl_pe_config : 2; + u32 pin_efuse_mode : 2; + }; + u32 word; +} sys_mon_1_t; + +/* E8C2 Power Control System (PCS) registers */ + +#define SIC_pcs_ctrl0 0x0cb0 +#define SIC_pcs_ctrl1 0x0cb4 +#define SIC_pcs_ctrl2 0x0cb8 +#define SIC_pcs_ctrl3 0x0cbc +#define SIC_pcs_ctrl4 0x0cc0 +#define SIC_pcs_ctrl5 0x0cc4 +#define SIC_pcs_ctrl6 0x0cc8 +#define SIC_pcs_ctrl7 0x0ccc +#define SIC_pcs_ctrl8 0x0cd0 +#define SIC_pcs_ctrl9 0x0cd4 + +/* PCS_CTRL1 fields: */ +typedef union { + struct { + u32 pcs_mode : 4; + u32 n_fprogr : 6; + u32 n_fmin : 6; + u32 n_fminmc : 6; + u32 n : 6; + u32 : 4; + }; + u32 word; +} pcs_ctrl1_t; + +/* PCS_CTRL3 fields: */ +typedef union { + struct { + u32 n_fpin : 6; + u32 : 2; + u32 bfs_freq : 4; + u32 pll_bw : 3; + u32 : 1; + u32 pll_mode : 3; + u32 : 1; + u32 iol_bitrate : 3; + u32 : 1; + u32 ipl_bitrate : 3; + u32 : 1; + u32 l_equaliz : 1; + u32 l_preemph : 1; + u32 bfs_adj_dsbl : 1; + u32 : 1; + }; + u32 word; +} pcs_ctrl3_t; + +/* Cache L3 */ +#define SIC_l3_ctrl 0x3000 +#define SIC_l3_serv 0x3004 +#define SIC_l3_diag_ac 0x3008 +#define SIC_l3_bnda 0x300c +#define SIC_l3_bndb 0x3010 +#define SIC_l3_bndc 0x3014 +#define SIC_l3_seal 0x3018 +#define SIC_l3_l3tl 0x301c +#define SIC_l3_emrg 0x3020 +/* bank #0 */ +#define SIC_l3_b0_diag_dw 0x3100 +#define SIC_l3_b0_eccd_ld 0x3108 +#define SIC_l3_b0_eccd_dm 0x310c +#define SIC_l3_b0_eerr 0x3110 +#define SIC_l3_b0_bist0 0x3114 +#define SIC_l3_b0_bist1 0x3118 +#define SIC_l3_b0_bist2 0x311c +#define SIC_l3_b0_emrg_r0 0x3120 +#define SIC_l3_b0_emrg_r1 0x3124 +/* bank #1 */ +#define SIC_l3_b1_diag_dw 0x3140 +#define SIC_l3_b1_eccd_ld 0x3148 +#define SIC_l3_b1_eccd_dm 0x314c +#define SIC_l3_b1_eerr 0x3150 +#define SIC_l3_b1_bist0 0x3154 +#define SIC_l3_b1_bist1 0x3158 +#define SIC_l3_b1_bist2 0x315c +#define SIC_l3_b1_emrg_r0 0x3160 +#define SIC_l3_b1_emrg_r1 0x3164 +/* bank #2 */ +#define SIC_l3_b2_diag_dw 0x3180 +#define SIC_l3_b2_eccd_ld 0x3188 +#define SIC_l3_b2_eccd_dm 0x318c +#define SIC_l3_b2_eerr 0x3190 +#define SIC_l3_b2_bist0 0x3194 +#define SIC_l3_b2_bist1 0x3198 +#define SIC_l3_b2_bist2 0x319c +#define SIC_l3_b2_emrg_r0 0x31a0 +#define SIC_l3_b2_emrg_r1 0x31a4 +/* bank #3 */ +#define SIC_l3_b3_diag_dw 0x31c0 +#define SIC_l3_b3_eccd_ld 0x31c8 +#define SIC_l3_b3_eccd_dm 0x31cc +#define SIC_l3_b3_eerr 0x31d0 +#define SIC_l3_b3_bist0 0x31d4 +#define SIC_l3_b3_bist1 0x31d8 +#define SIC_l3_b3_bist2 0x31dc +#define SIC_l3_b3_emrg_r0 0x31e0 +#define SIC_l3_b3_emrg_r1 0x31e4 +/* bank #4 */ +#define SIC_l3_b4_diag_dw 0x3200 +#define SIC_l3_b4_eccd_ld 0x3208 +#define SIC_l3_b4_eccd_dm 0x320c +#define SIC_l3_b4_eerr 0x3210 +#define SIC_l3_b4_bist0 0x3214 +#define SIC_l3_b4_bist1 0x3218 +#define SIC_l3_b4_bist2 0x321c +#define SIC_l3_b4_emrg_r0 0x3220 +#define SIC_l3_b4_emrg_r1 0x3224 +/* bank #5 */ +#define SIC_l3_b5_diag_dw 0x3240 +#define SIC_l3_b5_eccd_ld 0x3248 +#define SIC_l3_b5_eccd_dm 0x324c +#define SIC_l3_b5_eerr 0x3250 +#define SIC_l3_b5_bist0 0x3254 +#define SIC_l3_b5_bist1 0x3258 +#define SIC_l3_b5_bist2 0x325c +#define SIC_l3_b5_emrg_r0 0x3260 +#define SIC_l3_b5_emrg_r1 0x3264 +/* bank #6 */ +#define SIC_l3_b6_diag_dw 0x3280 +#define SIC_l3_b6_eccd_ld 0x3288 +#define SIC_l3_b6_eccd_dm 0x328c +#define SIC_l3_b6_eerr 0x3290 +#define SIC_l3_b6_bist0 0x3294 +#define SIC_l3_b6_bist1 0x3298 +#define SIC_l3_b6_bist2 0x329c +#define SIC_l3_b6_emrg_r0 0x32a0 +#define SIC_l3_b6_emrg_r1 0x32a4 +/* bank #7 */ +#define SIC_l3_b7_diag_dw 0x32c0 +#define SIC_l3_b7_eccd_ld 0x32c8 +#define SIC_l3_b7_eccd_dm 0x32cc +#define SIC_l3_b7_eerr 0x32d0 +#define SIC_l3_b7_bist0 0x32d4 +#define SIC_l3_b7_bist1 0x32d8 +#define SIC_l3_b7_bist2 0x32dc +#define SIC_l3_b7_emrg_r0 0x32e0 +#define SIC_l3_b7_emrg_r1 0x32e4 + +/* Host Controller */ +#define SIC_hc_mcr 0x360 +#define SIC_hc_mid 0x364 +#define SIC_hc_mar0_lo 0x368 +#define SIC_hc_mar0_hi 0x36c +#define SIC_hc_mar1_lo 0x370 +#define SIC_hc_mar1_hi 0x374 +#define SIC_hc_ioapic_eoi 0x37c + +/* Binary compiler Memory protection registers */ +#define BC_MM_CTRL 0x0800 +#define BC_MM_MLO_LB 0x0808 +#define BC_MM_MLO_HB 0x080c +#define BC_MM_MHI_BASE 0x0810 +#define BC_MM_MHI_BASE_H 0x0814 +#define BC_MM_MHI_MASK 0x0818 +#define BC_MM_MHI_MASK_H 0x081c +#define BC_MM_MHI_LB 0x0820 +#define BC_MM_MHI_LB_H 0x0824 +#define BC_MM_MHI_HB 0x0828 +#define BC_MM_MHI_HB_H 0x082c +#define BC_MP_CTRL 0x0830 +#define BC_MP_STAT 0x0834 +#define BC_MP_T_BASE 0x0838 +#define BC_MP_T_BASE_H 0x083c +#define BC_MP_T_H_BASE 0x0840 +#define BC_MP_T_H_BASE_H 0x0844 +#define BC_MP_T_HB 0x0848 +#define BC_MP_T_H_LB 0x0850 +#define BC_MP_T_H_LB_H 0x0854 +#define BC_MP_T_H_HB 0x0858 +#define BC_MP_T_H_HB_H 0x085c +#define BC_MP_T_CORR 0x0860 +#define BC_MP_T_CORR_H 0x0864 +#define BC_MP_B_BASE 0x0868 +#define BC_MP_B_BASE_H 0x086c +#define BC_MP_B_HB 0x0870 +#define BC_MP_B_PUT 0x0874 +#define BC_MP_B_GET 0x0878 +#define BC_MM_REG_END (BC_MP_B_GET + 4) + +#define BC_MM_REG_BASE BC_MM_CTRL +#define BC_MM_REG_SIZE (BC_MM_REG_END - BC_MM_REG_BASE) +#define BC_MM_REG_NUM (BC_MM_REG_SIZE / 4) + +#define EFUSE_RAM_ADDR 0x0cc0 +#define EFUSE_RAM_DATA 0x0cc4 + +#ifndef __ASSEMBLY__ +/* + * Read/Write RT_LCFGj Regs + */ +#define ES2_CLN_BITS 4 /* 4 bits - cluster # */ +#define E8C_CLN_BITS 2 /* 2 bits - cluster # */ +#if defined(CONFIG_ES2) || defined(CONFIG_E2S) +#define E2K_MAX_CL_NUM ((1 << ES2_CLN_BITS) - 1) +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) +#define E2K_MAX_CL_NUM ((1 << E8C_CLN_BITS) - 1) +#elif defined(CONFIG_E12C) || defined(CONFIG_E16C) || defined(CONFIG_E2C3) +#define E2K_MAX_CL_NUM 0 /* Cluster number field was deleted */ +#endif /* CONFIG_ES2 || CONFIG_E2S */ + +/* SCCFG */ +#define SIC_sccfg 0xc00 + +typedef unsigned int e2k_rt_lcfg_t; /* Read/write pointer (32 bits) */ +typedef struct es2_rt_lcfg_fields { + e2k_rt_lcfg_t vp : 1; /* [0] */ + e2k_rt_lcfg_t vb : 1; /* [1] */ + e2k_rt_lcfg_t vics : 1; /* [2] */ + e2k_rt_lcfg_t vio : 1; /* [3] */ + e2k_rt_lcfg_t pln : 2; /* [5:4] */ + e2k_rt_lcfg_t cln : 4; /* [9:6] */ + e2k_rt_lcfg_t unused : 22; /* [31:10] */ +} es2_rt_lcfg_fields_t; +typedef struct e8c_rt_lcfg_fields { + e2k_rt_lcfg_t vp : 1; /* [0] */ + e2k_rt_lcfg_t vb : 1; /* [1] */ + e2k_rt_lcfg_t vics : 1; /* [2] */ + e2k_rt_lcfg_t vio : 1; /* [3] */ + e2k_rt_lcfg_t pln : 2; /* [5:4] */ + e2k_rt_lcfg_t cln : 2; /* [7:6] */ + e2k_rt_lcfg_t unused : 24; /* [31:8] */ +} e8c_rt_lcfg_fields_t; +typedef es2_rt_lcfg_fields_t e2s_rt_lcfg_fields_t; +typedef union e2k_rt_lcfg_struct { /* Structure of lower word */ + es2_rt_lcfg_fields_t es2_fields; /* as fields */ + e8c_rt_lcfg_fields_t e8c_fields; /* as fields */ + e2k_rt_lcfg_t word; /* as entire register */ +} e2k_rt_lcfg_struct_t; + +#define ES2_RT_LCFG_vp(__reg) ((__reg).es2_fields.vp) +#define ES2_RT_LCFG_vb(__reg) ((__reg).es2_fields.vb) +#define ES2_RT_LCFG_vics(__reg) ((__reg).es2_fields.vics) +#define ES2_RT_LCFG_vio(__reg) ((__reg).es2_fields.vio) +#define ES2_RT_LCFG_pln(__reg) ((__reg).es2_fields.pln) +#define ES2_RT_LCFG_cln(__reg) ((__reg).es2_fields.cln) +#define ES2_RT_LCFG_reg(__reg) ((__reg).word) + +#define E2S_RT_LCFG_vp ES2_RT_LCFG_vp +#define E2S_RT_LCFG_vb ES2_RT_LCFG_vb +#define E2S_RT_LCFG_vics ES2_RT_LCFG_vics +#define E2S_RT_LCFG_vio ES2_RT_LCFG_vio +#define E2S_RT_LCFG_pln ES2_RT_LCFG_pln +#define E2S_RT_LCFG_cln ES2_RT_LCFG_cln +#define E2S_RT_LCFG_reg ES2_RT_LCFG_reg + +#define E8C_RT_LCFG_vp(__reg) ((__reg).e8c_fields.vp) +#define E8C_RT_LCFG_vb(__reg) ((__reg).e8c_fields.vb) +#define E8C_RT_LCFG_vics(__reg) ((__reg).e8c_fields.vics) +#define E8C_RT_LCFG_vio(__reg) ((__reg).e8c_fields.vio) +#define E8C_RT_LCFG_pln(__reg) ((__reg).e8c_fields.pln) +#define E8C_RT_LCFG_cln(__reg) ((__reg).e8c_fields.cln) +#define E8C_RT_LCFG_reg(__reg) ((__reg).word) + +/* FIXME: now as on e8c, but can be changed need DOCs */ +#define E12C_RT_LCFG_vp(__reg) E8C_RT_LCFG_vp(__reg) +#define E12C_RT_LCFG_vb(__reg) E8C_RT_LCFG_vb(__reg) +#define E12C_RT_LCFG_vics(__reg) E8C_RT_LCFG_vics(__reg) +#define E12C_RT_LCFG_vio(__reg) E8C_RT_LCFG_vio(__reg) +#define E12C_RT_LCFG_pln(__reg) E8C_RT_LCFG_pln(__reg) +#define E12C_RT_LCFG_cln(__reg) E8C_RT_LCFG_cln(__reg) +#define E12C_RT_LCFG_reg(__reg) E8C_RT_LCFG_reg(__reg) + +#define E2K_RT_LCFG_vp ES2_RT_LCFG_vp +#define E2K_RT_LCFG_vb ES2_RT_LCFG_vb +#define E2K_RT_LCFG_vics ES2_RT_LCFG_vics +#define E2K_RT_LCFG_vio ES2_RT_LCFG_vio +#if defined(CONFIG_ES2) || defined(CONFIG_E2S) +#define E2K_RT_LCFG_pln ES2_RT_LCFG_pln +#define E2K_RT_LCFG_cln ES2_RT_LCFG_cln +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) +#define E2K_RT_LCFG_pln E8C_RT_LCFG_pln +#define E2K_RT_LCFG_cln E8C_RT_LCFG_cln +#elif defined(CONFIG_E12C) || defined(CONFIG_E16C) || defined(CONFIG_E2C3) +#define E2K_RT_LCFG_pln E12C_RT_LCFG_pln +#define E2K_RT_LCFG_cln E12C_RT_LCFG_cln +#endif /* CONFIG_ES2 || CONFIG_E2S */ +#define E2K_RT_LCFG_reg ES2_RT_LCFG_reg + +/* + * Read/Write RT_PCIIOj Regs + */ +typedef unsigned int e2k_rt_pciio_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pciio_fields { + e2k_rt_pciio_t unused1 : 12; /* [11:0] */ + e2k_rt_pciio_t bgn : 4; /* [15:12] */ + e2k_rt_pciio_t unused2 : 12; /* [27:16] */ + e2k_rt_pciio_t end : 4; /* [31:28] */ +} e2k_rt_pciio_fields_t; +typedef union e2k_rt_pciio_struct { /* Structure of lower word */ + e2k_rt_pciio_fields_t fields; /* as fields */ + e2k_rt_pciio_t word; /* as entire register */ +} e2k_rt_pciio_struct_t; + +#define E2K_SIC_ALIGN_RT_PCIIO 12 /* 4 Kb */ +#define E2K_SIC_SIZE_RT_PCIIO (1 << E2K_SIC_ALIGN_RT_PCIIO) +#define E2K_RT_PCIIO_bgn fields.bgn +#define E2K_RT_PCIIO_end fields.end +#define E2K_RT_PCIIO_reg word + +/* + * Read/Write RT_PCIMj Regs + */ +typedef unsigned int e2k_rt_pcim_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pcim_fields { + e2k_rt_pcim_t unused1 : 11; /* [10:0] */ + e2k_rt_pcim_t bgn : 5; /* [15:11] */ + e2k_rt_pcim_t unused2 : 11; /* [26:16] */ + e2k_rt_pcim_t end : 5; /* [31:27] */ +} e2k_rt_pcim_fields_t; +typedef union e2k_rt_pcim_struct { /* Structure of lower word */ + e2k_rt_pcim_fields_t fields; /* as fields */ + e2k_rt_pcim_t word; /* as entire register */ +} e2k_rt_pcim_struct_t; + +#define E2K_SIC_ALIGN_RT_PCIM 27 /* 128 Mb */ +#define E2K_SIC_SIZE_RT_PCIM (1 << E2K_SIC_ALIGN_RT_PCIM) +#define E2K_RT_PCIM_bgn fields.bgn +#define E2K_RT_PCIM_end fields.end +#define E2K_RT_PCIM_reg word + +/* + * Read/Write RT_PCIMPj Regs + */ +typedef unsigned int e2k_rt_pcimp_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pcimp_struct { + e2k_rt_pcimp_t addr; /* [PA_MSB: 0] */ +} e2k_rt_pcimp_struct_t; + +#define E2K_SIC_ALIGN_RT_PCIMP 27 /* 128 Mb */ +#define E2K_SIC_SIZE_RT_PCIMP (1 << E2K_SIC_ALIGN_RT_PCIMP) +#define E2K_RT_PCIMP_bgn addr +#define E2K_RT_PCIMP_end addr +#define E2K_RT_PCIMP_reg addr + +/* + * Read/Write RT_PCICFGB Reg + */ +typedef unsigned int e2k_rt_pcicfgb_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pcicfgb_fields { + e2k_rt_pcicfgb_t unused1 : 3; /* [2:0] */ + e2k_rt_pcicfgb_t bgn : 18; /* [20:3] */ + e2k_rt_pcicfgb_t unused2 : 11; /* [31:21] */ +} e2k_rt_pcicfgb_fields_t; +typedef union e2k_rt_pcicfgb_struct { /* Structure of lower word */ + e2k_rt_pcicfgb_fields_t fields; /* as fields */ + e2k_rt_pcicfgb_t word; /* as entire register */ +} e2k_rt_pcicfgb_struct_t; + +#define E2K_SIC_ALIGN_RT_PCICFGB 28 /* 256 Mb */ +#define E2K_SIC_SIZE_RT_PCICFGB (1 << E2K_SIC_ALIGN_RT_PCICFGB) +#define E2K_RT_PCICFGB_bgn fields.bgn +#define E2K_RT_PCICFGB_reg word + +/* + * Read/Write RT_MLOj Regs + */ +typedef unsigned int e2k_rt_mlo_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_mlo_fields { + e2k_rt_mlo_t unused1 : 11; /* [10:0] */ + e2k_rt_mlo_t bgn : 5; /* [15:11] */ + e2k_rt_mlo_t unused2 : 11; /* [26:16] */ + e2k_rt_mlo_t end : 5; /* [31:27] */ +} e2k_rt_mlo_fields_t; +typedef union e2k_rt_mlo_struct { /* Structure of lower word */ + e2k_rt_mlo_fields_t fields; /* as fields */ + e2k_rt_mlo_t word; /* as entire register */ +} e2k_rt_mlo_struct_t; + +#define E2K_SIC_ALIGN_RT_MLO 27 /* 128 Mb */ +#define E2K_SIC_SIZE_RT_MLO (1 << E2K_SIC_ALIGN_RT_MLO) +#define E2K_RT_MLO_bgn fields.bgn +#define E2K_RT_MLO_end fields.end +#define E2K_RT_MLO_reg word + +/* memory *bank minimum size, so base address of bank align */ +#define E2K_SIC_MIN_MEMORY_BANK (256 * 1024 * 1024) /* 256 Mb */ + +/* + * Read/Write RT_MHIj Regs + */ +typedef unsigned int e2k_rt_mhi_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_mhi_fields { + e2k_rt_mhi_t bgn : 16; /* [15: 0] */ + e2k_rt_mhi_t end : 16; /* [31:16] */ +} e2k_rt_mhi_fields_t; +typedef union e2k_rt_mhi_struct { /* Structure of lower word */ + e2k_rt_mhi_fields_t fields; /* as fields */ + e2k_rt_mhi_t word; /* as entire register */ +} e2k_rt_mhi_struct_t; + +#define E2K_SIC_ALIGN_RT_MHI 32 /* 4 Gb */ +#define E2K_SIC_SIZE_RT_MHI (1UL << E2K_SIC_ALIGN_RT_MHI) +#define E2K_RT_MHI_bgn fields.bgn +#define E2K_RT_MHI_end fields.end +#define E2K_RT_MHI_reg word + +/* + * Read/Write RT_IOAPICj Regs + */ +typedef unsigned int e2k_rt_ioapic_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_ioapic_fields { + e2k_rt_ioapic_t unused1 : 12; /* [11:0] */ + e2k_rt_ioapic_t bgn : 9; /* [20:12] */ + e2k_rt_ioapic_t unused2 : 11; /* [31:21] */ +} e2k_rt_ioapic_fields_t; +typedef union e2k_rt_ioapic_struct { /* Structure of lower word */ + e2k_rt_ioapic_fields_t fields; /* as fields */ + e2k_rt_ioapic_t word; /* as entire register */ +} e2k_rt_ioapic_struct_t; + +#define E2K_SIC_ALIGN_RT_IOAPIC 12 /* 4 Kb */ +#define E2K_SIC_SIZE_RT_IOAPIC (1 << E2K_SIC_ALIGN_RT_IOAPIC) +#define E2K_SIC_IOAPIC_SIZE E2K_SIC_SIZE_RT_IOAPIC +#define E2K_SIC_IOAPIC_FIX_ADDR_SHIFT 21 +#define E2K_SIC_IOAPIC_FIX_ADDR_MASK \ + ~((1UL << E2K_SIC_IOAPIC_FIX_ADDR_SHIFT) - 1) +#define E2K_RT_IOAPIC_bgn fields.bgn +#define E2K_RT_IOAPIC_reg word + +/* + * Read/Write RT_MSI Regs + */ +#define E2K_SIC_ALIGN_RT_MSI 20 /* 1 Mb */ + +typedef unsigned int e2k_rt_msi_t; /* Read/write pointer (low 32 bits) */ +typedef unsigned int e2k_rt_msi_h_t; /* Read/write pointer (high 32 bits) */ +typedef struct e2k_rt_msi_fields { + e2k_rt_msi_t unused : E2K_SIC_ALIGN_RT_MSI; /* [19:0] */ + e2k_rt_msi_t bgn : (32 - E2K_SIC_ALIGN_RT_MSI); /* [31:20] */ +} e2k_rt_msi_fields_t; +typedef struct e2k_rt_msi_h_fields { + e2k_rt_msi_h_t bgn : 32; /* [63:32] */ +} e2k_rt_msi_h_fields_t; +typedef union e2k_rt_msi_struct { /* Structure of lower word */ + e2k_rt_msi_fields_t fields; /* as fields */ + e2k_rt_msi_t word; /* as entire register */ +} e2k_rt_msi_struct_t; +typedef union e2k_rt_msi_h_struct { /* Structure of higher word */ + e2k_rt_msi_h_fields_t fields; /* as fields */ + e2k_rt_msi_h_t word; /* as entire register */ +} e2k_rt_msi_h_struct_t; + +#define E2K_SIC_SIZE_RT_MSI (1 << E2K_SIC_ALIGN_RT_MSI) +#define E2K_RT_MSI_bgn fields.bgn +#define E2K_RT_MSI_end E2K_RT_MSI_bgn +#define E2K_RT_MSI_reg word +#define E2K_RT_MSI_H_bgn fields.bgn +#define E2K_RT_MSI_H_end E2K_RT_MSI_H_bgn +#define E2K_RT_MSI_H_reg word +#define E2K_RT_MSI_DEFAULT_BASE 0x120000000UL + +/* + * Read/Write ST_P Regs + */ +typedef unsigned int e2k_st_p_t; /* Read/write pointer (32 bits) */ +typedef struct es2_st_p_fields { + e2k_st_p_t type : 4; /* [3:0] */ + e2k_st_p_t id : 8; /* [11:4] */ + e2k_st_p_t pn : 8; /* [19:12] */ + e2k_st_p_t coh_on : 1; /* [20] */ + e2k_st_p_t pl_val : 3; /* [23:21] */ + e2k_st_p_t mlc : 1; /* [24] */ + e2k_st_p_t unused : 7; /* [31:25] */ +} es2_st_p_fields_t; +typedef es2_st_p_fields_t e2s_st_p_fields_t; +typedef es2_st_p_fields_t e8c_st_p_fields_t; +typedef union e2k_st_p_struct { /* Structure of lower word */ + es2_st_p_fields_t es2_fields; /* as fields for es2 */ + e2k_st_p_t word; /* as entire register */ +} e2k_st_p_struct_t; + +#define ES2_ST_P_type es2_fields.type +#define ES2_ST_P_id es2_fields.id +#define ES2_ST_P_coh_on es2_fields.coh_on +#define ES2_ST_P_pl_val es2_fields.pl_val +#define ES2_ST_P_mlc es2_fields.mlc +#define ES2_ST_P_pn es2_fields.pn +#define ES2_ST_P_reg word + +#define E2S_ST_P_type ES2_ST_P_type +#define E2S_ST_P_id ES2_ST_P_id +#define E2S_ST_P_coh_on ES2_ST_P_coh_on +#define E2S_ST_P_pl_val ES2_ST_P_pl_val +#define E2S_ST_P_mlc ES2_ST_P_mlc +#define E2S_ST_P_pn ES2_ST_P_pn +#define E2S_ST_P_reg ES2_ST_P_reg + +#define E8C_ST_P_type ES2_ST_P_type +#define E8C_ST_P_id ES2_ST_P_id +#define E8C_ST_P_coh_on ES2_ST_P_coh_on +#define E8C_ST_P_pl_val ES2_ST_P_pl_val +#define E8C_ST_P_mlc ES2_ST_P_mlc +#define E8C_ST_P_pn ES2_ST_P_pn +#define E8C_ST_P_reg ES2_ST_P_reg + +#define E2K_ST_P_type ES2_ST_P_type +#define E2K_ST_P_reg ES2_ST_P_reg + +#define E2K_ST_P_pl_val ES2_ST_P_pl_val +#define E2K_ST_P_mlc ES2_ST_P_mlc +#define E2K_ST_P_pn ES2_ST_P_pn + +/* + * ST_CORE core state register + */ +struct e2k_st_core_fields { + u32 val : 1; /* [0] */ + u32 wait_init : 1; /* [1] */ + u32 wait_trap : 1; /* [2] */ + u32 stop_dbg : 1; /* [3] */ + u32 clk_off : 1; /* [4] */ + u32 unused : 27; /* [31:5] */ +}; +typedef union { + struct { + u32 : 5; + u32 pmc_rst : 1; /* [5] */ + u32 : 26; + } e1cp; + struct { + u32 val : 1; /* [0] */ + u32 wait_init : 1; /* [1] */ + u32 wait_trap : 1; /* [2] */ + u32 stop_dbg : 1; /* [3] */ + u32 clk_off : 1; /* [4] */ + u32 unused : 27; /* [31:5] */ + }; + struct e2k_st_core_fields fields; /* as fields for e2k */ + u32 word; /* as entire register */ +} e2k_st_core_t; + +#define E2K_ST_CORE_val(__reg) ((__reg).fields.val) +#define E2K_ST_CORE_wait_init(__reg) ((__reg).fields.wait_init) +#define E2K_ST_CORE_wait_trap(__reg) ((__reg).fields.wait_trap) +#define E2K_ST_CORE_stop_dbg(__reg) ((__reg).fields.stop_dbg) +#define E2K_ST_CORE_clk_off(__reg) ((__reg).fields.clk_off) +#define E2K_ST_CORE_reg(__reg) ((__reg).word) + +#define ES2_ST_CORE_val E2K_ST_CORE_val +#define ES2_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define ES2_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define ES2_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define ES2_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define ES2_ST_CORE_reg E2K_ST_CORE_reg + +#define E2S_ST_CORE_val E2K_ST_CORE_val +#define E2S_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define E2S_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define E2S_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define E2S_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define E2S_ST_CORE_reg E2K_ST_CORE_reg + +#define E8C_ST_CORE_val E2K_ST_CORE_val +#define E8C_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define E8C_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define E8C_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define E8C_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define E8C_ST_CORE_reg E2K_ST_CORE_reg + +#define E1CP_ST_CORE_val E2K_ST_CORE_val +#define E1CP_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define E1CP_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define E1CP_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define E1CP_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define E1CP_ST_CORE_pmc_rst(__reg) ((__reg).fields.pmc_rst) +#define E1CP_ST_CORE_reg E2K_ST_CORE_reg + +/* + * IO Link control state register + */ +typedef unsigned int e2k_iol_csr_t; /* single word (32 bits) */ +typedef struct e2k_iol_csr_fields { + e2k_iol_csr_t mode : 1; /* [0] */ + e2k_iol_csr_t abtype : 7; /* [7:1] */ + e2k_iol_csr_t unused : 24; /* [31:8] */ +} e2k_iol_csr_fields_t; +typedef union e2k_iol_csr_struct { /* Structure of word */ + e2k_iol_csr_fields_t fields; /* as fields */ + e2k_iol_csr_t word; /* as entire register */ +} e2k_iol_csr_struct_t; + +#define E2K_IOL_CSR_mode fields.mode /* type of controller */ + /* on the link */ +#define E2K_IOL_CSR_abtype fields.abtype /* type of abonent */ + /* on the link */ +#define E2K_IOL_CSR_reg word +#define IOHUB_IOL_MODE 1 /* controller is IO HUB */ +#define RDMA_IOL_MODE 0 /* controller is RDMA */ +#define IOHUB_ONLY_IOL_ABTYPE 1 /* abonent has only IO HUB */ + /* controller */ +#define RDMA_ONLY_IOL_ABTYPE 2 /* abonent has only RDMA */ + /* controller */ +#define RDMA_IOHUB_IOL_ABTYPE 3 /* abonent has RDMA and */ + /* IO HUB controller */ + +/* + * IO channel control/status register + */ +typedef unsigned int e2k_io_csr_t; /* single word (32 bits) */ +typedef struct e2k_io_csr_fields { + e2k_io_csr_t srst : 1; /* [0] */ + e2k_io_csr_t unused1 : 3; /* [3:1] */ + e2k_io_csr_t bsy_ie : 1; /* [4] */ + e2k_io_csr_t err_ie : 1; /* [5] */ + e2k_io_csr_t to_ie : 1; /* [6] */ + e2k_io_csr_t lsc_ie : 1; /* [7] */ + e2k_io_csr_t unused2 : 4; /* [11:8] */ + e2k_io_csr_t bsy_ev : 1; /* [12] */ + e2k_io_csr_t err_ev : 1; /* [13] */ + e2k_io_csr_t to_ev : 1; /* [14] */ + e2k_io_csr_t lsc_ev : 1; /* [15] */ + e2k_io_csr_t unused3 : 14; /* [29:16] */ + e2k_io_csr_t link_tu : 1; /* [30] */ + e2k_io_csr_t ch_on : 1; /* [31] */ +} e2k_io_csr_fields_t; +typedef union e2k_io_csr_struct { /* Structure of word */ + e2k_io_csr_fields_t fields; /* as fields */ + e2k_io_csr_t word; /* as entire register */ +} e2k_io_csr_struct_t; + +#define E2K_IO_CSR_srst fields.srst /* sofrware reset flag */ +#define E2K_IO_CSR_bsy_ie fields.bsy_ie /* flag of interrupt enable */ + /* on receiver busy */ +#define E2K_IO_CSR_err_ie fields.err_ie /* flag of interrupt enable */ + /* on CRC-error */ +#define E2K_IO_CSR_to_ie fields.to_ie /* flag of interrupt enable */ + /* on timeout */ +#define E2K_IO_CSR_lsc_ie fields.lsc_ie /* flag of interrupt enable */ + /* on link state changed */ +#define E2K_IO_CSR_bsy_ev fields.bsy_ev /* flag of interrupt */ + /* on receiver busy */ +#define E2K_IO_CSR_err_ev fields.err_ev /* flag of interrupt */ + /* on CRC-error */ +#define E2K_IO_CSR_to_ev fields.to_ev /* flag of interrupt */ + /* on timeout */ +#define E2K_IO_CSR_lsc_ev fields.lsc_ev /* flag of interrupt */ + /* on link state changed */ +#define E2K_IO_CSR_link_tu fields.link_tu /* flag of trening */ + /* in progress */ +#define E2K_IO_CSR_ch_on fields.ch_on /* flag of chanel */ + /* is ready and online */ +#define E2K_IO_CSR_reg word +#define IO_IS_ON_IO_CSR 1 /* IO controller is ready */ + /* and online */ +/* + * IO channel statistic register + */ +typedef unsigned int e2k_io_str_t; /* single word (32 bits) */ +typedef struct e2k_io_str_fields { + e2k_io_str_t rc : 24; /* [23:0] */ + e2k_io_str_t rcol : 1; /* [24] */ + e2k_io_str_t reserved : 4; /* [28:25] */ + e2k_io_str_t bsy_rce : 1; /* [29] */ + e2k_io_str_t err_rce : 1; /* [30] */ + e2k_io_str_t to_rce : 1; /* [31] */ +} e2k_io_str_fields_t; +typedef union e2k_io_str_struct { /* Structure of word */ + e2k_io_str_fields_t fields; /* as fields */ + e2k_io_str_t word; /* as entire register */ +} e2k_io_str_struct_t; + +#define E2K_IO_STR_rc fields.rc /* repeat counter */ +#define E2K_IO_STR_rcol fields.rcol /* repeat counter overload */ +#define E2K_IO_STR_bsy_rce fields.bsy_rce /* busy repeat count enable */ +#define E2K_IO_STR_err_rce fields.err_rce /* CRC-error repeat count */ + /* enable */ +#define E2K_IO_STR_to_rce fields.to_rce /* TO repeat count enable */ +#define E2K_IO_STR_reg word + +/* + * RDMA controller state register + */ +typedef unsigned int e2k_rdma_cs_t; /* single word (32 bits) */ +typedef struct e2k_rdma_cs_fields { + e2k_rdma_cs_t ptocl : 16; /* [15:0] */ + e2k_rdma_cs_t unused1 : 10; /* [25:16] */ + e2k_rdma_cs_t srst : 1; /* [26] */ + e2k_rdma_cs_t mor : 1; /* [27] */ + e2k_rdma_cs_t mow : 1; /* [28] */ + e2k_rdma_cs_t fch_on : 1; /* [29] */ + e2k_rdma_cs_t link_tu : 1; /* [30] */ + e2k_rdma_cs_t ch_on : 1; /* [31] */ +} e2k_rdma_cs_fields_t; +typedef union e2k_rdma_cs_struct { /* Structure of word */ + e2k_rdma_cs_fields_t fields; /* as fields */ + e2k_rdma_cs_t word; /* as entire register */ +} e2k_rdma_cs_struct_t; + +#define E2K_RDMA_CS_ptocl fields.ptocl /* timeout clock */ +#define E2K_RDMA_CS_srst fields.srst /* sofrware reset flag */ +#define E2K_RDMA_CS_mor fields.mor /* flag of not completed */ + /* readings */ +#define E2K_RDMA_CS_mow fields.mow /* flag of not completed */ + /* writings */ +#define E2K_RDMA_CS_fch_on fields.fch_on /* flag of chanel */ + /* forced set on */ +#define E2K_RDMA_CS_link_tu fields.link_tu /* flag of trenning */ + /* in progress */ +#define E2K_RDMA_CS_ch_on fields.ch_on /* flag of chanel */ + /* is ready and online */ +#define E2K_RDMA_CS_reg word + +/* + * Read/Write PWR_MGR0 register + */ +struct e2k_pwr_mgr_fields { + u32 core0_clk : 1; /* [0] */ + u32 core1_clk : 1; /* [1] */ + u32 ic_clk : 1; /* [2] */ + u32 unused1 : 13; /* [15:3] */ + u32 snoop_wait : 2; /* [17:16] */ + u32 unused2 : 14; /* [31:18] */ +}; +typedef union { + struct { + u32 core0_clk : 1; /* [0] */ + u32 core1_clk : 1; /* [1] */ + u32 ic_clk : 1; /* [2] */ + u32 unused1 : 13; /* [15:3] */ + u32 snoop_wait : 2; /* [17:16] */ + u32 unused2 : 14; /* [31:18] */ + }; + struct e2k_pwr_mgr_fields fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_pwr_mgr_t; + +#define E2K_PWR_MGR0_core0_clk fields.core0_clk /* core #0 clock on/off */ +#define E2K_PWR_MGR0_core1_clk fields.core1_clk /* core #1 clock on/off */ +#define E2K_PWR_MGR0_ic_clk fields.ic_clk /* dsp clock on/off */ +#define E2K_PWR_MGR0_snoop_wait fields.snoop_wait /* delay before off */ + /* for snoop-requests */ + /* handling */ +#define E2K_PWR_MGR0_reg word + +/* + * Monitor control register (SIC_MCR) + */ +typedef unsigned int e2k_sic_mcr_t; /* single word (32 bits) */ +typedef struct e2k_sic_mcr_fields { + e2k_sic_mcr_t v0 : 1; /* [0] */ + e2k_sic_mcr_t unused1 : 1; /* [1] */ + e2k_sic_mcr_t es0 : 6; /* [7:2] */ + e2k_sic_mcr_t v1 : 1; /* [8] */ + e2k_sic_mcr_t unused2 : 1; /* [9] */ + e2k_sic_mcr_t es1 : 6; /* [15:10] */ + e2k_sic_mcr_t mcn : 5; /* [20:16] */ + e2k_sic_mcr_t mcnmo : 1; /* [21:21] */ + e2k_sic_mcr_t unused3 : 10; /* [31:22] */ +} e2k_sic_mcr_fields_t; +typedef union e2k_sic_mcr_struct { /* Structure of word */ + e2k_sic_mcr_fields_t fields; /* as fields */ + e2k_sic_mcr_t word; /* as entire register */ +} e2k_sic_mcr_struct_t; + +#define E2K_SIC_MCR_v0 fields.v0 /* monitor #0 valid */ +#define E2K_SIC_MCR_es0 fields.es0 /* monitor #0 event */ + /* specifier */ +#define E2K_SIC_MCR_v1 fields.v1 /* monitor #1 valid */ +#define E2K_SIC_MCR_es1 fields.es1 /* monitor #1 event */ + /* specifier */ +#define E2K_SIC_MCR_reg word + +/* + * Monitor accumulator register hi part (SIC_MAR0_hi, SIC_MAR1_hi) + */ +typedef unsigned int e2k_sic_mar_hi_t; /* single word (32 bits) */ +typedef struct e2k_sic_mar_hi_fields { + e2k_sic_mar_hi_t val : 31; /* [30:0] */ + e2k_sic_mar_hi_t of : 1; /* [31] */ +} e2k_sic_mar_hi_fields_t; +typedef union e2k_sic_mar_hi_struct { /* Structure of word */ + e2k_sic_mar_hi_fields_t fields; /* as fields */ + e2k_sic_mar_hi_t word; /* as entire register */ +} e2k_sic_mar_hi_struct_t; + +#define E2K_SIC_MAR_HI_val fields.val /* high part of events */ + /* counter */ +#define E2K_SIC_MAR_HI_of fields.of /* overflow flag */ +#define E2K_SIC_MAR_HI_reg word + +/* + * Monitor accumulator register lo part (SIC_MAR0_lo, SIC_MAR1_lo) + */ +typedef unsigned int e2k_sic_mar_lo_t; /* single word (32 bits) */ + +#define E2K_SIC_MAR_LO_val fields.val /* low part of events */ + /* counter */ +#define E2K_SIC_MAR_LO_reg word + +/* + * Read/Write MCX_ECC (X={0, 1, 2, 3}) registers + */ +typedef unsigned int e2k_mc_ecc_t; /* single word (32 bits) */ +typedef struct e2k_mc_ecc_fields { + e2k_mc_ecc_t ee : 1; /* [0] */ + e2k_mc_ecc_t dmode : 1; /* [1] */ + e2k_mc_ecc_t of : 1; /* [2] */ + e2k_mc_ecc_t ue : 1; /* [3] */ + e2k_mc_ecc_t reserved : 12; /* [15:4] */ + e2k_mc_ecc_t secnt : 16; /* [31:16] */ +} e2k_mc_ecc_fields_t; +typedef union e2k_mc_ecc_struct { /* Structure word */ + e2k_mc_ecc_fields_t fields; /* as fields */ + e2k_mc_ecc_t word; /* as entire register */ +} e2k_mc_ecc_struct_t; + +#define E2K_MC_ECC_ee fields.ee /* ECC mode on/off */ +#define E2K_MC_ECC_dmode fields.dmode /* diagnostic mode on/off */ +#define E2K_MC_ECC_of fields.of /* single error counter */ + /* overflow flag */ +#define E2K_MC_ECC_ue fields.ue /* multiple-error flag */ +#define E2K_MC_ECC_secnt fields.secnt /* single error counter */ +#define E2K_MC_ECC_reg word + + +/* + * Read/Write MCX_OPMb (X={0, 1, 2, 3}) registers + * ! only for P1 processor type ! + */ +typedef unsigned int e2k_mc_opmb_t; /* single word (32 bits) */ +typedef struct e2k_mc_opmb_fields { + e2k_mc_opmb_t ct0 : 3; /* [0:2] */ + e2k_mc_opmb_t ct1 : 3; /* [3:5] */ + e2k_mc_opmb_t pbm0 : 2; /* [6:7] */ + e2k_mc_opmb_t pbm1 : 2; /* [8:9] */ + e2k_mc_opmb_t rm : 1; /* [10] */ + e2k_mc_opmb_t rdodt : 1; /* [11] */ + e2k_mc_opmb_t wrodt : 1; /* [12] */ + e2k_mc_opmb_t bl8int : 1; /* [13] */ + e2k_mc_opmb_t mi_fast : 1; /* [14] */ + e2k_mc_opmb_t mt : 1; /* [15] */ + e2k_mc_opmb_t il : 1; /* [16] */ + e2k_mc_opmb_t rcven_del : 2; /* [17:18] */ + e2k_mc_opmb_t mc_ps : 1; /* [19] */ + e2k_mc_opmb_t arp_en : 1; /* [20] */ + e2k_mc_opmb_t flt_brop : 1; /* [21] */ + e2k_mc_opmb_t flt_rdpr : 1; /* [22] */ + e2k_mc_opmb_t flt_blk : 1; /* [23] */ + e2k_mc_opmb_t parerr : 1; /* [24] */ + e2k_mc_opmb_t cmdpack : 1; /* [25] */ + e2k_mc_opmb_t sldwr : 1; /* [26] */ + e2k_mc_opmb_t sldrd : 1; /* [27] */ + e2k_mc_opmb_t mirr : 1; /* [28] */ + e2k_mc_opmb_t twrwr : 2; /* [29:30] */ + e2k_mc_opmb_t mcln : 1; /* [31] */ +} e2k_mc_opmb_fields_t; +typedef union e2k_mc_opmb_struct { /* Structure word */ + e2k_mc_opmb_fields_t fields; /* as fields */ + e2k_mc_opmb_t word; /* as entire register */ +} e2k_mc_opmb_struct_t; + +#define E2K_MC_OPMB_pbm0 fields.pbm0 /* physycal bank map slot 0 */ +#define E2K_MC_OPMB_pbm1 fields.pbm1 /* physycal bank map slot 1 */ +#define E2K_MC_OPMB_reg word + +/* + * Read/Write MCX_CFG (X={0, 1, 2, 3}) registers + * P9, E2C3, E12 and E16 processor type + */ +typedef unsigned int e2k_mc_cfg_t; /* single word (32 bits) */ +typedef struct e2k_mc_cfg_fields { + e2k_mc_cfg_t ct0 : 3; /* [0:2] */ + e2k_mc_cfg_t ct1 : 3; /* [3:5] */ + e2k_mc_cfg_t pbm0 : 2; /* [6:7] */ + e2k_mc_cfg_t pbm1 : 2; /* [8:9] */ + e2k_mc_cfg_t rm : 1; /* [10] */ + e2k_mc_cfg_t reserve1 : 2; /* [11:12] */ + e2k_mc_cfg_t mirr : 1; /* [13] */ + e2k_mc_cfg_t sf : 3; /* [14:16] */ + e2k_mc_cfg_t mt : 1; /* [17] */ + e2k_mc_cfg_t cs8 : 1; /* [18] */ + e2k_mc_cfg_t ptrr_mode : 2; /* [19:20] */ + e2k_mc_cfg_t mcrc : 1; /* [21] */ + e2k_mc_cfg_t odt_ext : 2; /* [22:23] */ + e2k_mc_cfg_t pbswap : 1; /* [24] */ + e2k_mc_cfg_t dqw : 2; /* [25:26] */ + e2k_mc_cfg_t pda_sel : 5; /* [27:31] */ +} e2k_mc_cfg_fields_t; +typedef union e2k_mc_cfg_struct { /* Structure word */ + e2k_mc_cfg_fields_t fields; /* as fields */ + e2k_mc_cfg_t word; /* as entire register */ +} e2k_mc_cfg_struct_t; + +#define E2K_MC_CFG_pbm0 fields.pbm0 /* physycal bank map slot 0 */ +#define E2K_MC_CFG_pbm1 fields.pbm1 /* physycal bank map slot 1 */ +#define E2K_MC_CFG_reg word + +/* + * Read/Write IPCC_CSRX (X={1, 2, 3}) registers + */ +typedef unsigned int e2k_ipcc_csr_t; /* single word (32 bits) */ +typedef struct e2k_ipcc_csr_fields { + e2k_ipcc_csr_t link_scale : 4; /* [3:0] */ + e2k_ipcc_csr_t cmd_code : 3; /* [6:4] */ + e2k_ipcc_csr_t cmd_active : 1; /* [7] */ + e2k_ipcc_csr_t reserved : 1; /* [8] */ + e2k_ipcc_csr_t terr_vc_num : 3; /* [11:9] */ + e2k_ipcc_csr_t rx_oflw_uflw : 1; /* [12] */ + e2k_ipcc_csr_t event_imsk : 3; /* [15:13] */ + e2k_ipcc_csr_t ltssm_state : 5; /* [20:16] */ + e2k_ipcc_csr_t cmd_cmpl_sts : 3; /* [23:21] */ + e2k_ipcc_csr_t link_width : 4; /* [27:24] */ + e2k_ipcc_csr_t event_sts : 3; /* [30:28] */ + e2k_ipcc_csr_t link_state : 1; /* [31] */ +} e2k_ipcc_csr_fields_t; +typedef union e2k_ipcc_csr_struct { /* Structure word */ + e2k_ipcc_csr_fields_t fields; /* as fields */ + e2k_ipcc_csr_t word; /* as entire register */ +} e2k_ipcc_csr_struct_t; + +#define E2K_IPCC_CSR_link_scale fields.link_scale +#define E2K_IPCC_CSR_cmd_code fields.cmd_code +#define E2K_IPCC_CSR_cmd_active fields.cmd_active +#define E2K_IPCC_CSR_terr_vc_num fields.terr_vc_num +#define E2K_IPCC_CSR_rx_oflw_uflw fields.rx_oflw_uflw +#define E2K_IPCC_CSR_event_imsk fields.event_imsk +#define E2K_IPCC_CSR_ltssm_state fields.ltssm_state +#define E2K_IPCC_CSR_cmd_cmpl_sts fields.cmd_cmpl_sts +#define E2K_IPCC_CSR_link_width fields.link_width +#define E2K_IPCC_CSR_event_sts fields.event_sts +#define E2K_IPCC_CSR_link_state fields.link_state +#define E2K_IPCC_CSR_reg word + +/* + * Read/Write IPCC_STRX (X={1, 2, 3}) registers + */ +typedef unsigned int e2k_ipcc_str_t; /* single word (32 bits) */ +typedef struct e2k_ipcc_str_fields { + e2k_ipcc_str_t ecnt : 29; /* [28:0] */ + e2k_ipcc_str_t eco : 1; /* [29] */ + e2k_ipcc_str_t ecf : 2; /* [31:30] */ +} e2k_ipcc_str_fields_t; +typedef union e2k_ipcc_str_struct { /* Structure word */ + e2k_ipcc_str_fields_t fields; /* as fields */ + e2k_ipcc_str_t word; /* as entire register */ +} e2k_ipcc_str_struct_t; + +#define E2K_IPCC_STR_ecnt fields.ecnt /* event counter */ +#define E2K_IPCC_STR_eco fields.eco /* event counter overflow */ +#define E2K_IPCC_STR_ecf fields.ecf /* event counter filter */ +#define E2K_IPCC_STR_reg word + +/* + * Read/Write SIC_SCCFG register + */ +typedef unsigned int e2k_sic_sccfg_t; /* single word (32 bits) */ +typedef struct e2k_sic_sccfg_fields { + e2k_sic_sccfg_t diren : 1; /* [0] */ + e2k_sic_sccfg_t dircacheen : 1; /* [1] */ + e2k_sic_sccfg_t unused : 30; /* [31:2] */ +} e2k_sic_sccfg_fields_t; +typedef union e2k_sic_sccfg_struct { /* Structure word */ + e2k_sic_sccfg_fields_t fields; /* as fields */ + e2k_sic_sccfg_t word; /* as entire register */ +} e2k_sic_sccfg_struct_t; + +#define E2K_SIC_SCCFG_diren fields.diren /* directory enabled */ +#define E2K_SIC_SCCFG_dircacheen fields.dircacheen /* directory cache enabled */ +#define E2K_SIC_SCCFG_reg word + +/* + * Cache L3 registers structures + */ +/* Control register */ +typedef unsigned int l3_reg_t; /* Read/write register (32 bits) */ +typedef struct l3_ctrl_fields { + l3_reg_t fl : 1; /* [0] flush L3 */ + l3_reg_t cl : 1; /* [1] clear L3 */ + l3_reg_t rdque : 1; /* [2] read queues */ + l3_reg_t rnc_rrel : 1; /* [3] wait RREL for Rnc */ + l3_reg_t lru_separate : 1; /* [4] LRU separate */ + l3_reg_t pipe_ablk_s1 : 1; /* [5] pipe address block S1 */ + l3_reg_t pipe_ablk_s2 : 1; /* [6] pipe address block S2 */ + l3_reg_t sleep_blk : 1; /* [7] sleep block */ + l3_reg_t wbb_forced : 1; /* [8] WBB forced */ + l3_reg_t wbb_refill : 1; /* [9] WBB refill */ + l3_reg_t wbb_timeron : 1; /* [10] WBB release on timer */ + l3_reg_t wbb_timer : 7; /* [17:11] WBB timer set */ + l3_reg_t wbb_tfullon : 1; /* [18] WBB release on timer */ + /* at full state */ + l3_reg_t wbb_tfull : 7; /* [25:19] WBB timer at full */ + /* state set */ + l3_reg_t seal_gblk : 1; /* [26] sealed global block */ + l3_reg_t seal_lblk : 1; /* [27] sealed local block */ + l3_reg_t reserved : 4; /* [31:28] reserved bits */ +} l3_ctrl_fields_t; +typedef union l3_ctrl { /* Structure word */ + l3_ctrl_fields_t fields; /* as fields */ + l3_reg_t word; /* as entire register */ +} l3_ctrl_t; + +#define E2K_L3_CTRL_fl fields.fl +#define E2K_L3_CTRL_cl fields.cl +#define E2K_L3_CTRL_rdque fields.rdque +#define E2K_L3_CTRL_rnc_rrel fields.rnc_rrel +#define E2K_L3_CTRL_lru_separate fields.lru_separate +#define E2K_L3_CTRL_pipe_ablk_s1 fields.pipe_ablk_s1 +#define E2K_L3_CTRL_pipe_ablk_s2 fields.pipe_ablk_s2 +#define E2K_L3_CTRL_sleep_blk fields.sleep_blk +#define E2K_L3_CTRL_wbb_forced fields.wbb_forced +#define E2K_L3_CTRL_wbb_refill fields.wbb_refill +#define E2K_L3_CTRL_wbb_timeron fields.wbb_timeron +#define E2K_L3_CTRL_wbb_timer fields.wbb_timer +#define E2K_L3_CTRL_wbb_tfullon fields.wbb_tfullon +#define E2K_L3_CTRL_wbb_tfull fields.wbb_tfull +#define E2K_L3_CTRL_seal_gblk fields.seal_gblk +#define E2K_L3_CTRL_seal_lblk fields.seal_lblk +#define E2K_L3_CTRL_reg word + +/* + * Read/Write BC_MP_T_CORR register + */ +typedef unsigned int bc_mp_t_corr_t; /* single word (32 bits) */ +typedef struct bc_mp_t_corr_fields { + bc_mp_t_corr_t corr : 1; /* [0] */ + bc_mp_t_corr_t value : 1; /* [1] */ + bc_mp_t_corr_t unused : 10; /* [11:2] */ + bc_mp_t_corr_t addr : 20; /* [31:12] */ +} bc_mp_t_corr_fields_t; +typedef union bc_mp_t_corr_struct { /* Structure word */ + bc_mp_t_corr_fields_t fields; /* as fields */ + bc_mp_t_corr_t word; /* as entire register */ +} bc_mp_t_corr_struct_t; + +#define E2K_MP_T_CORR_corr fields.corr +#define E2K_MP_T_CORR_value fields.value +#define E2K_MP_T_CORR_addr fields.addr +#define E2K_MP_T_CORR_reg word + +/* + * Read/Write BC_MP_T_CORR_H + */ +typedef unsigned int bc_mp_t_corr_h_t; /* single word (32 bits) */ +typedef struct bc_mp_t_corr_h_fields { + bc_mp_t_corr_h_t addr; /* [31:0]*/ +} bc_mp_t_corr_h_fields_t; +typedef union bc_mp_t_corr_h_struct { /* Structure word */ + bc_mp_t_corr_h_fields_t fields; /* as fields */ + bc_mp_t_corr_h_t word; /* as entire register */ +} bc_mp_t_corr_h_struct_t; + +#define E2K_MP_T_CORR_H_addr fields.addr +#define E2K_MP_T_CORR_H_reg word + +/* + * Read/Write BC_MP_CTRL register + */ +typedef unsigned int bc_mp_ctrl_t; /* single word (32 bits) */ +typedef struct bc_mp_ctrl_fields { + bc_mp_ctrl_t unused0 : 12; /* [11:0] */ + bc_mp_ctrl_t mp_en : 1; /* [12] */ + bc_mp_ctrl_t b_en : 1; /* [13] */ + bc_mp_ctrl_t unused1 : 18; /* [31:14] */ +} bc_mp_ctrl_fields_t; +typedef union bc_mp_ctrl_struct { /* Structure word */ + bc_mp_ctrl_fields_t fields; /* as fields */ + bc_mp_ctrl_t word; /* as entire register */ +} bc_mp_ctrl_struct_t; + +#define E2K_MP_CTRL_mp_en fields.mp_en +#define E2K_MP_CTRL_b_en fields.b_en +#define E2K_MP_CTRL_reg word + +/* + * Read/Write BC_MP_STAT register + */ +typedef unsigned int bc_mp_stat_t; /* single word (32 bits) */ +typedef struct bc_mp_stat_fields { + bc_mp_stat_t unused0 : 12; /* [11:0] */ + bc_mp_stat_t b_ne : 1; /* [12] */ + bc_mp_stat_t b_of : 1; /* [13] */ + bc_mp_stat_t unused1 : 18; /* [31:14] */ +} bc_mp_stat_fields_t; +typedef union bc_mp_stat_struct { /* Structure word */ + bc_mp_stat_fields_t fields; /* as fields */ + bc_mp_stat_t word; /* as entire register */ +} bc_mp_stat_struct_t; + +#define E2K_MP_STAT_b_ne fields.b_ne +#define E2K_MP_STAT_b_of fields.b_of +#define E2K_MP_STAT_reg word + +#endif /* ! __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _E2K_SIC_REGS_H_ */ diff --git a/arch/e2k/include/asm/sic_regs_access.h b/arch/e2k/include/asm/sic_regs_access.h new file mode 100644 index 000000000000..601762ce71c5 --- /dev/null +++ b/arch/e2k/include/asm/sic_regs_access.h @@ -0,0 +1,95 @@ +#ifndef _E2K_SIC_REGS_ACCESS_H_ +#define _E2K_SIC_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#include +#include + +#undef DEBUG_BOOT_NBSR_MODE +#undef DebugBNBSR +#define DEBUG_BOOT_NBSR_MODE 0 /* early NBSR access */ +#define DebugBNBSR(fmt, args...) \ + ({ if (DEBUG_BOOT_NBSR_MODE) \ + do_boot_printk(fmt, ##args); }) + +#define SIC_io_reg_offset(io_link, reg) ((reg) + 0x1000 * (io_link)) + +#ifndef CONFIG_BOOT_E2K +#define nbsr_early_read(addr) boot_readl((addr)) +#define nbsr_early_write(value, addr) boot_writel((value), (addr)) +#else /* CONFIG_BOOT_E2K */ +#define nbsr_early_read(addr) boot_native_readl((addr)) +#define nbsr_early_write(value, addr) boot_native_writel((value), (addr)) +#endif /* ! CONFIG_BOOT_E2K */ + +static inline unsigned int +boot_do_sic_read_node_nbsr_reg(unsigned char *node_nbsr, int reg_offset) +{ + unsigned char *addr; + unsigned int reg_value; + + addr = node_nbsr + reg_offset; + reg_value = nbsr_early_read(addr); + DebugBNBSR("boot_sic_read_node_nbsr_reg() the node reg 0x%x read 0x%x " + "from 0x%lx\n", + reg_offset, reg_value, addr); + return reg_value; +} + +static inline void +boot_do_sic_write_node_nbsr_reg(unsigned char *node_nbsr, int reg_offset, + unsigned int reg_val) +{ + unsigned char *addr; + + addr = node_nbsr + reg_offset; + nbsr_early_write(reg_val, addr); + DebugBNBSR("boot_sic_write_node_nbsr_reg() the node reg 0x%x write " + "0x%x to 0x%lx\n", + reg_offset, reg_val, addr); +} +static inline unsigned int +boot_sic_read_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + + node_nbsr = BOOT_THE_NODE_NBSR_PHYS_BASE(node_id); + return boot_do_sic_read_node_nbsr_reg(node_nbsr, reg_offset); +} +static inline void +boot_sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_val) +{ + unsigned char *node_nbsr; + + node_nbsr = BOOT_THE_NODE_NBSR_PHYS_BASE(node_id); + boot_do_sic_write_node_nbsr_reg(node_nbsr, reg_offset, reg_val); +} + +#define nbsr_read(addr) readl((addr)) +#define nbsr_readll(addr) readq((addr)) +#define nbsr_readw(addr) readw((addr)) +#define nbsr_write(value, addr) writel((value), (addr)) +#define nbsr_writell(value, addr) writeq((value), (addr)) +#define nbsr_writew(value, addr) writew((value), (addr)) +#define nbsr_write_relaxed(value, addr) writel_relaxed((value), (addr)) + +unsigned int sic_get_mc_ecc(int node, int num); +void sic_set_mc_ecc(int node, int num, unsigned int reg_value); + +unsigned int sic_get_mc_opmb(int node, int num); +unsigned int sic_get_mc_cfg(int node, int num); + +unsigned int sic_get_ipcc_csr(int node, int num); +void sic_set_ipcc_csr(int node, int num, unsigned int val); + +unsigned int sic_get_ipcc_str(int node, int num); +void sic_set_ipcc_str(int node, int num, unsigned int val); + +unsigned int sic_get_io_str(int node, int num); +void sic_set_io_str(int node, int num, unsigned int val); +#endif /* __KERNEL__ */ + +#include + +#endif /* _E2K_SIC_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/sigcontext.h b/arch/e2k/include/asm/sigcontext.h new file mode 100644 index 000000000000..faf1647b8ce0 --- /dev/null +++ b/arch/e2k/include/asm/sigcontext.h @@ -0,0 +1,22 @@ +#ifndef _E2K_SIGCONTEXT_H_ +#define _E2K_SIGCONTEXT_H_ + +#include + +#ifdef CONFIG_PROTECTED_MODE +struct sigcontext_prot { + unsigned long long cr0_lo; + unsigned long long cr0_hi; + unsigned long long cr1_lo; + unsigned long long cr1_hi; + unsigned long long sbr; /* 21 Stack base register: top of */ + /* local data (user) stack */ + unsigned long long usd_lo; /* 22 Local data (user) stack */ + unsigned long long usd_hi; /* 23 descriptor: base & size */ + unsigned long long psp_lo; /* 24 Procedure stack pointer: */ + unsigned long long psp_hi; /* 25 base & index & size */ + unsigned long long pcsp_lo; /* 26 Procedure chain stack */ + unsigned long long pcsp_hi; /* 27 pointer: base & index & size */ +}; +#endif +#endif /* _E2K_SIGCONTEXT_H_ */ diff --git a/arch/e2k/include/asm/signal.h b/arch/e2k/include/asm/signal.h new file mode 100644 index 000000000000..2cd7749a7013 --- /dev/null +++ b/arch/e2k/include/asm/signal.h @@ -0,0 +1,212 @@ +#ifndef _E2K_SIGNAL_H_ +#define _E2K_SIGNAL_H_ + +#include + + +#undef DEBUG_SIG_MODE +#undef DebugSig +#define DEBUG_SIG_MODE 0 /* Signal handling */ +#if DEBUG_SIG_MODE +# define DebugSig printk +#else +# define DebugSig(...) +#endif + +#undef DEBUG_SLJ_MODE +#undef DebugSLJ +#define DEBUG_SLJ_MODE 0 /* Signal long jump handling */ +#define DebugSLJ(...) DebugPrint(DEBUG_SLJ_MODE ,##__VA_ARGS__) + + +#define __ARCH_HAS_SA_RESTORER + +/* + * exc_mem_lock_as can arrive at inside of a critical section since + * it uses non-maskable interrupts, + * + * But in PREEMPT_RT force_sig_info() must be called with + * enabled preemption because spinlocks are mutexes + * + * Fix this by delaying signal sending. + */ +#ifdef CONFIG_PREEMPT_RT +# define ARCH_RT_DELAYS_SIGNAL_SEND +#endif + + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +#define _NSIG_BPW32 32 +#define _NSIG_WORDS32 (_NSIG / _NSIG_BPW32) + +#include +#include + +#include +#include + +# ifndef __ASSEMBLY__ + +typedef struct { + e2k_ptr_t ss_sp; + int ss_flags; + size_t ss_size; +} stack_prot_t; + +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +typedef unsigned long old_sigset_t; + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +typedef struct prot_sigaction_old { + e2k_pl_lo_t sa_handler; + u64 sa_flags; + e2k_pl_lo_t sa_restorer; + sigset_t sa_mask; +} prot_sigaction_old_t; + +typedef struct prot_sigaction { + e2k_pl_t sa_handler; + u64 sa_flags; + u64 _unused; + e2k_pl_t sa_restorer; + sigset_t sa_mask; +} prot_sigaction_t; + +#include + +struct pt_regs; +struct siginfo; +struct ucontext; +struct as_sa_handler_arg; + +#define ptrace_signal_deliver() do { } while (0) + + +#define DO_SDBGPRINT(message) \ +do { \ + e2k_tir_lo_t tir_lo; \ + void *cr_ip, *tir_ip; \ + \ + tir_lo.TIR_lo_reg = (regs)->trap->TIR_lo; \ + \ + tir_ip = (void *)tir_lo.TIR_lo_ip; \ + cr_ip = (void *)GET_IP_CR0_HI((regs)->crs.cr0_hi); \ + \ + if (tir_ip == cr_ip) \ + pr_info("%s: IP=%px %s(pid=%d)\n", \ + message, tir_ip, current->comm, \ + current->pid); \ + else \ + pr_info("%s: IP=%px(interrupt IP=%px) %s(pid=%d)\n", \ + message, tir_ip, cr_ip, current->comm, \ + current->pid); \ +} while (false) + +#define SDBGPRINT(message) \ +do { \ + if (debug_signal) \ + DO_SDBGPRINT(message); \ +} while (0) + +#define SDBGPRINT_WITH_STACK(message) \ +do { \ + if (debug_signal) { \ + DO_SDBGPRINT(message); \ + dump_stack(); \ + } \ +} while (0) + +struct signal_stack; +extern void free_signal_stack(struct signal_stack *signal_stack); +extern struct signal_stack_context __user *pop_signal_stack(void); +extern struct signal_stack_context __user *get_signal_stack(void); +extern int setup_signal_stack(struct pt_regs *regs, bool is_signal); + +#define GET_SIG_RESTORE_STACK(ti, sbr, usd_lo, usd_hi) \ +do { \ + /* Reserve 64 bytes for kernel per C calling convention */ \ + u64 used_dstack_size = round_up(64, E2K_ALIGN_STACK); \ + \ + sbr = (u64)thread_info_task(ti)->stack + KERNEL_C_STACK_SIZE + \ + KERNEL_C_STACK_OFFSET; \ + AW(usd_lo) = AW((ti)->k_usd_lo); \ + AW(usd_hi) = AW((ti)->k_usd_hi); \ + AS(usd_lo).base -= used_dstack_size; \ + AS(usd_hi).size -= used_dstack_size; \ +} while (false) + +/* The topmost dispatcher for any signals. */ +/* Implemented in arch/e2k/kernel/signal.c */ +extern void do_signal(struct pt_regs *); +extern int signal_rt_frame_setup(struct pt_regs *regs); +extern int prepare_sighandler_frame(struct e2k_stacks *stacks, + u64 pframe[32], e2k_mem_crs_t *crs); + +extern int native_signal_setup(struct pt_regs *regs); + +static inline int native_complete_long_jump(struct pt_regs *regs) +{ + /* nithing to do for native kernel & host */ + return 0; +} + +extern long do_sigreturn(void); +extern void sighandler_trampoline(void); +extern void sighandler_trampoline_continue(void); + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +extern long sys_tgkill_info(int pid, int tgid, struct siginfo __user *uinfo); + +#define set_delayed_signal_handling(ti) \ +do { \ + set_ti_status_flag(ti, TS_DELAYED_SIG_HANDLING); \ +} while (0) + +#define clear_delayed_signal_handling(ti) \ +do { \ + clear_ti_status_flag(ti, TS_DELAYED_SIG_HANDLING); \ +} while (0) + +#define test_delayed_signal_handling(p, ti) \ + (unlikely(test_ti_status_flag(ti, TS_DELAYED_SIG_HANDLING)) && \ + !__fatal_signal_pending(p)) +#else +#define set_delayed_signal_handling(ti) +#define clear_delayed_signal_handling(ti) +#define test_delayed_signal_handling(p, ti) (false) +#endif + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is native paravirtualized guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized kernel (host and guest) */ +#include +#else /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ +/* native kernel with virtualization support */ +/* native kernel without virtualization support */ + +static inline int signal_setup(struct pt_regs *regs) +{ + return native_signal_setup(regs); +} + +static inline int complete_long_jump(struct pt_regs *regs) +{ + return native_complete_long_jump(regs); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + + +# endif /* __ASSEMBLY__ */ + +#endif /* _E2K_SIGNAL_H_ */ diff --git a/arch/e2k/include/asm/simul.h b/arch/e2k/include/asm/simul.h new file mode 100644 index 000000000000..56082f5836a9 --- /dev/null +++ b/arch/e2k/include/asm/simul.h @@ -0,0 +1,60 @@ +#ifndef _E2K_SIMUL_H_ +#define _E2K_SIMUL_H_ + +#include + + +#ifdef CONFIG_E2K_MACHINE +# ifdef CONFIG_E2K_SIMULATOR +# define E2K_HALT_OK() E2K_LMS_HALT_OK +# define E2K_HALT_ERROR(err_no) \ +({ \ + dump_stack(); \ + console_flush_on_panic(CONSOLE_REPLAY_ALL); \ + E2K_LMS_HALT_ERROR(err_no); \ +}) +# define BOOT_E2K_HALT_OK() E2K_LMS_HALT_OK +# define BOOT_E2K_HALT_ERROR(err_no) E2K_LMS_HALT_ERROR(err_no) +# else +# define E2K_HALT_OK() {while(1);} +# define E2K_HALT_ERROR(err_no) panic("HALT_ERROR(%d)\n", err_no) +# define BOOT_E2K_HALT_OK() {while(1);} +# define BOOT_E2K_HALT_ERROR(err_no) boot_panic("HALT_ERROR(%d)\n", err_no) +# endif +#else /* ! CONFIG_E2K_MACHINE */ +# define E2K_HALT_OK() \ +({ \ + if (NATIVE_IS_MACHINE_SIM) { \ + E2K_LMS_HALT_OK; \ + } \ + while (1) { \ + } \ +}) +# define E2K_HALT_ERROR(err_no) \ +({ \ + if (NATIVE_IS_MACHINE_SIM) { \ + dump_stack(); \ + console_flush_on_panic(CONSOLE_REPLAY_ALL); \ + E2K_LMS_HALT_ERROR(err_no); \ + } \ + panic("HALT_ERROR(%d)\n", err_no); \ +}) +# define BOOT_E2K_HALT_OK() \ +({ \ + if (BOOT_NATIVE_IS_MACHINE_SIM) { \ + E2K_LMS_HALT_OK; \ + } \ + while (1) { \ + } \ +}) +# define BOOT_E2K_HALT_ERROR(err_no) \ +({ \ + if (BOOT_NATIVE_IS_MACHINE_SIM) { \ + E2K_LMS_HALT_ERROR(err_no); \ + } else { \ + boot_panic("HALT_ERROR(%d)\n", err_no); \ + } \ +}) +#endif /* CONFIG_E2K_MACHINE */ + +#endif /* _E2K_SIMUL_H_ */ diff --git a/arch/e2k/include/asm/smp-boot.h b/arch/e2k/include/asm/smp-boot.h new file mode 100644 index 000000000000..155b93682cd1 --- /dev/null +++ b/arch/e2k/include/asm/smp-boot.h @@ -0,0 +1,52 @@ +#ifndef __ASM_SMP_BOOT_H +#define __ASM_SMP_BOOT_H + +#include + +#include + +#ifndef ASSEMBLY + +#ifdef CONFIG_SMP + +extern struct task_struct *idle_tasks[NR_CPUS]; + +extern void e2k_start_secondary_switched_stacks(int cpuid, int cpu); +extern void native_setup_secondary_task(int cpu); + +static inline void +native_ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, int cpu) +{ + NATIVE_SWITCH_TO_KERNEL_STACK( + stack_base + KERNEL_P_STACK_OFFSET, KERNEL_P_STACK_SIZE, + stack_base + KERNEL_PC_STACK_OFFSET, KERNEL_PC_STACK_SIZE, + stack_base + KERNEL_C_STACK_OFFSET, KERNEL_C_STACK_SIZE); + + E2K_JUMP_WITH_ARGUMENTS(e2k_start_secondary_switched_stacks, 2, + cpuid, cpu); +} + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ +static inline void +ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, int cpu) +{ + native_ap_switch_to_init_stack(stack_base, cpuid, cpu); +} + +static inline void setup_secondary_task(int cpu) +{ + native_setup_secondary_task(cpu); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* CONFIG_SMP */ +#endif /* !ASSEMBLY */ +#endif /* __ASM_SMP_BOOT_H */ diff --git a/arch/e2k/include/asm/smp.h b/arch/e2k/include/asm/smp.h new file mode 100644 index 000000000000..a70d82c5e3e6 --- /dev/null +++ b/arch/e2k/include/asm/smp.h @@ -0,0 +1,240 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ +#ifndef ASSEMBLY +#include +#include +#include +#include +#endif + +#ifdef CONFIG_L_LOCAL_APIC +#ifndef ASSEMBLY +#include +#include +#include +#include +#include +#include +#include +#endif /* !ASSEMBLY */ +#endif /* CONFIG_L_LOCAL_APIC */ + +#ifdef CONFIG_SMP +#ifndef ASSEMBLY + +typedef struct tlb_page { + struct vm_area_struct *vma; + e2k_addr_t addr; +} tlb_page_t; + +typedef struct tlb_range { + struct mm_struct *mm; + e2k_addr_t start; + e2k_addr_t end; +} tlb_range_t; + +typedef struct icache_page { + struct vm_area_struct *vma; + struct page *page; +} icache_page_t; + +struct call_data_struct { + void (*func) (void *info); + void *info; + atomic_t started; + atomic_t finished; + int wait; +}; + +/* + * Private routines/data + */ + +extern atomic_t cpu_present_num; +extern unsigned long smp_invalidate_needed; +extern int pic_mode; +extern cpumask_t callin_go; + +extern void e2k_start_secondary(int cpuid); +extern void start_secondary_resume(int cpuid, int cpu); +extern void wait_for_startup(int cpuid, int hotplug); +extern void smp_send_reschedule(int cpu); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +extern void smp_send_refresh(void); + +#ifdef CONFIG_DATA_BREAKPOINT +typedef struct hw_data_bp { + void *address; + int size; + bool write; + bool read; + bool stop; + int cp_num; +} hw_data_bp_t; +extern atomic_t hw_data_breakpoint_num; +#define DATA_BREAKPOINT_ON (atomic_read(&hw_data_breakpoint_num) >= 0) + +extern void smp_set_data_breakpoint(void *address, u64 size, + bool write, bool read, bool stop, const int cp_num); +extern int smp_reset_data_breakpoint(void *address); +#else /* ! CONFIG_DATA_BREAKPOINT */ +#define DATA_BREAKPOINT_ON false +#endif /* CONFIG_DATA_BREAKPOINT */ + +extern void native_wait_for_cpu_booting(void); +extern void native_wait_for_cpu_wake_up(void); +extern int native_activate_cpu(int vcpu_id); +extern int native_activate_all_cpus(void); + +#ifdef CONFIG_RECOVERY +extern int cpu_recover(unsigned int cpu); +extern void smp_prepare_boot_cpu_to_recover(void); +extern void smp_prepare_cpus_to_recover(unsigned int max_cpus); +extern void smp_cpus_recovery_done(unsigned int max_cpus); +#endif /* CONFIG_RECOVERY */ + +/* + * General functions that each host system must provide. + */ + +/* + * This function is needed by all SMP systems. It must _always_ be valid + * from the initial startup. + */ +register unsigned long long __cpu_preempt_reg DO_ASM_GET_GREG_MEMONIC( + SMP_CPU_ID_GREG); +#define raw_smp_processor_id() ((unsigned int) __cpu_preempt_reg) + +#define set_smp_processor_id(cpu) \ +do { \ + __cpu_preempt_reg = (__cpu_preempt_reg & 0xffffffff00000000ull) | \ + ((u64) (u32) (cpu)); \ +} while (0) + +#endif /* !ASSEMBLY */ + +#define NO_PROC_ID 0xFF /* No processor magic marker */ + +/* + * This magic constant controls our willingness to transfer + * a process across CPUs. Such a transfer incurs misses on the L1 + * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My + * gut feeling is this will vary by board in value. For a board + * with separate L2 cache it probably depends also on the RSS, and + * for a board with shared L2 cache it ought to decay fast as other + * processes are run. + */ + +#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */ + +#else /* ! CONFIG_SMP */ +static inline void e2k_start_secondary(int cpuid) { } + +#define native_wait_for_cpu_booting() +#define native_wait_for_cpu_wake_up() +#define native_activate_cpu(vcpu_id) 0 +#define native_activate_all_cpus(void) 0 + +#define DATA_BREAKPOINT_ON false + +#endif /* CONFIG_SMP */ + +#ifndef ASSEMBLY + +extern int hard_smp_processor_id(void); + +#endif /* ! ASSEMBLY */ + +#ifdef CONFIG_HOTPLUG_CPU +/* Upping and downing of CPUs */ +extern int __cpu_disable (void); +extern void __cpu_die (unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + +/* this description from include/linux/smp.h */ +/* do not forgot update here, if will be updated there */ +enum { + CSD_FLAG_LOCK = 0x01, + CSD_FLAG_SYNCHRONOUS = 0x02, + CSD_FLAG_LOCK_ASYNC = 0x10, +}; + +#if defined(CONFIG_VIRTUALIZATION) +#include + +extern void native_csd_lock_wait(call_single_data_t *csd); +extern void native_csd_lock(call_single_data_t *csd); +extern void native_arch_csd_lock_async(call_single_data_t *csd); +extern void native_csd_unlock(call_single_data_t *csd); +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ +static inline void +wait_for_cpu_booting(void) +{ + native_wait_for_cpu_booting(); +} +static inline void +wait_for_cpu_wake_up(void) +{ + native_wait_for_cpu_wake_up(); +} +static inline int +activate_cpu(int cpu_id) +{ + return native_activate_cpu(cpu_id); +} +static inline int +activate_all_cpus(void) +{ + return native_activate_all_cpus(); +} + +#if defined(CONFIG_VIRTUALIZATION) +static inline void csd_lock_wait(call_single_data_t *data) +{ + native_csd_lock_wait(data); +} +static inline void csd_lock(call_single_data_t *data) +{ + native_csd_lock(data); +} +static inline void arch_csd_lock_async(call_single_data_t *csd) +{ + native_arch_csd_lock_async(csd); +} +static inline void csd_unlock(call_single_data_t *data) +{ + native_csd_unlock(data); +} +#endif /* CONFIG_VIRTUALIZATION */ + +static inline void +setup_local_pic_virq(unsigned int cpu) +{ + /* native and host kernel does not use virtual IRQs */ + /* and its handlers */ +} +static inline void +startup_local_pic_virq(unsigned int cpuid) +{ + /* native and host kernel does not use virtual IRQs */ + /* and its handlers */ +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_SMP_H */ diff --git a/arch/e2k/include/asm/socket.h b/arch/e2k/include/asm/socket.h new file mode 100644 index 000000000000..6b71384b9d8b --- /dev/null +++ b/arch/e2k/include/asm/socket.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/sockios.h b/arch/e2k/include/asm/sockios.h new file mode 100644 index 000000000000..def6d4746ee7 --- /dev/null +++ b/arch/e2k/include/asm/sockios.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/sparsemem.h b/arch/e2k/include/asm/sparsemem.h new file mode 100644 index 000000000000..63144ee52d89 --- /dev/null +++ b/arch/e2k/include/asm/sparsemem.h @@ -0,0 +1,10 @@ +#ifndef _ASM_E2K_SPARSEMEM_H +#define _ASM_E2K_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM + +# define SECTION_SIZE_BITS 28 +# define MAX_PHYSMEM_BITS CONFIG_E2K_PA_BITS + +#endif /* CONFIG_SPARSEMEM */ +#endif /* _ASM_E2K_SPARSEMEM_H */ diff --git a/arch/e2k/include/asm/spinlock.h b/arch/e2k/include/asm/spinlock.h new file mode 100644 index 000000000000..b9900f68c06e --- /dev/null +++ b/arch/e2k/include/asm/spinlock.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_SPINLOCK_H +#define _ASM_E2K_SPINLOCK_H + +/* How long a lock should spin before we consider blocking */ +#define SPIN_THRESHOLD (1 << 11) + +#include + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * NOTE! it is quite common to have readers in interrupts + * but no interrupt writers. For those circumstances we + * can "mix" irq-safe locks - any writer needs to get a + * irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + */ + +#include + +#endif /* _ASM_E2K_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/spinlock_types.h b/arch/e2k/include/asm/spinlock_types.h new file mode 100644 index 000000000000..e4100ac34159 --- /dev/null +++ b/arch/e2k/include/asm/spinlock_types.h @@ -0,0 +1,10 @@ +#ifndef _ASM_E2K_SPINLOCK_TYPES_H +#define _ASM_E2K_SPINLOCK_TYPES_H + +#include + +#include + +#include + +#endif /* _ASM_E2K_SPINLOCK_TYPES_H */ diff --git a/arch/e2k/include/asm/stacks.h b/arch/e2k/include/asm/stacks.h new file mode 100644 index 000000000000..0f1a4cc64a7e --- /dev/null +++ b/arch/e2k/include/asm/stacks.h @@ -0,0 +1,177 @@ +/* + * include/asm-e2k/stack.h + * + * Copyright 2004 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_STACKS_H +#define _E2K_STACKS_H + +#include +#include /* virtualization support */ + +/* + * User's high address space is reserved for tag memory mapping. + * Tags of all user virtual pages are mapped to user virtual space + * To each quad-word of data (16 bytes) corresponds 1 byte of tag + * Virtual pages of tags live at the end of virtual user space + * + * 0x0000 0000 0000 1000 - 0x0000 0100 0000 0000 All user virtula space from + * 'text' start to 'TASK_SIZE' + * 0x0000 00f0 0000 0000 - 0x0000 00ff ffff ffff Tags memory virtual space + */ +#define USER_TAG_MEM_SIZE (TASK_SIZE / 16) /* 1/16 of */ + /* total user */ + /* memory */ + /* size */ +#define USER_TAG_MEM_BASE \ + (TASK_SIZE - USER_VPTB_BASE_SIZE - USER_TAG_MEM_SIZE) + +/* + * User's high address below tags memory space is reserved for CUT. + */ + +#define USER_CUT_AREA_SIZE (PAGE_SIZE) +#define USER_CUT_AREA_BASE (USER_TAG_MEM_BASE - USER_CUT_AREA_SIZE) + +#ifndef __ASSEMBLY__ +/* + * The structure define state of all e2k stacks: + * hardware pointers and registers + */ + +typedef struct e2k_stacks { +#ifdef CONFIG_KVM_HOST_MODE + /* gthread_info uses these fields */ + e2k_addr_t u_top; + e2k_usd_lo_t u_usd_lo; + e2k_usd_hi_t u_usd_hi; +#endif + e2k_addr_t top; /* top address (same as SBR pointer) */ + e2k_usd_lo_t usd_lo; /* curent state of stack pointer */ + e2k_usd_hi_t usd_hi; /* register: base & size */ + e2k_psp_lo_t psp_lo; /* Procedure stack pointer: */ + e2k_psp_hi_t psp_hi; /* base & index & size */ + e2k_pcsp_lo_t pcsp_lo; /* Procedure chain stack */ + e2k_pcsp_hi_t pcsp_hi; /* pointer: base & index & size */ + /* %px[c]sp.ind in this structure holds includes %px[c]shtp part, + * and saved %px[c]shtp values show how much of user stack has + * been SPILLed to kernel. This is done for convenience - add + * %px[c]shtp just once instead of pretty much always. */ + e2k_pshtp_t pshtp; + e2k_pcshtp_t pcshtp; +} e2k_stacks_t; + +typedef struct data_stack { + e2k_addr_t bottom; /* data stack bottom */ + e2k_size_t size; /* data stack size */ + e2k_addr_t top; /* Top of the stack (as SBR) */ +} data_stack_t; + +/* + * Hardware stacks desription: procedure and chain stacks + * Both stacks have resident part at current top of the stack to ensure + * kernel function execution while trap and system calls handling + */ +typedef struct hw_stack_area { + void *base; /* Hardware stack base pointer */ + e2k_size_t size; /* Hardware stack total size */ +} hw_stack_area_t; + +typedef struct hw_stack { + hw_stack_area_t ps; /* Current procedure stack area */ + hw_stack_area_t pcs; /* Current chain stack area */ +} hw_stack_t; + +typedef struct old_pcs_area { + void *base; /* Hardware stack base pointer */ + long size; /* Hardware stack total size */ + struct list_head list_entry; +} old_pcs_area_t; + +#define GET_PS_BASE(hw_stacks) ((hw_stacks)->ps.base) +#define GET_PCS_BASE(hw_stacks) ((hw_stacks)->pcs.base) + +#define CURRENT_PS_BASE() (current_thread_info()->u_hw_stack.ps.base) +#define CURRENT_PCS_BASE() (current_thread_info()->u_hw_stack.pcs.base) + +#define SET_PS_BASE(hw_stacks, val) (GET_PS_BASE(hw_stacks) = (val)) +#define SET_PCS_BASE(hw_stacks, val) (GET_PCS_BASE(hw_stacks) = (val)) + +#endif /* ! __ASSEMBLY__ */ + +/* + * Data and hardware user stacks descriptions. + */ +#define USER_P_STACKS_MAX_SIZE E2K_ALL_STACKS_MAX_SIZE /* 128 Gbytes */ +#define USER_PC_STACKS_MAX_SIZE USER_P_STACKS_MAX_SIZE + +#define _min_(a, b) ((a) < (b) ? (a) : (b)) +#define USER_P_STACKS_BASE (USER_CUT_AREA_BASE - USER_P_STACKS_MAX_SIZE) +#define USER_PC_STACKS_BASE USER_P_STACKS_BASE +#define USER_HW_STACKS_BASE _min_(USER_P_STACKS_BASE, USER_PC_STACKS_BASE) + +#define USER_P_STACK_INIT_SIZE (4 * PAGE_SIZE) +#define USER_PC_STACK_INIT_SIZE PAGE_SIZE + +#define USER_C_STACK_BYTE_INCR (4 * PAGE_SIZE) +/* Software user stack for 64-bit mode. */ +#define USER64_STACK_TOP USER_PC_STACKS_BASE +/* Software user stack for 32-bit mode. */ +#define USER32_STACK_TOP TASK32_SIZE + +/* + * This macro definition is to limit deafault user stack size + * (see asm/resource.h) + */ +#define E2K_STK_LIM USER64_MAIN_C_STACK_SIZE + +/* + * Kernel stack ((software & hardware) descriptions + */ +#define K_DATA_GAP_SIZE E2K_ALIGN_STACK +#define KERNEL_C_STACK_SIZE (5 * PAGE_SIZE - K_DATA_GAP_SIZE) + +/* Maybe implement do_softirq_own_stack() and reduce this to 7 pages + * Having separate stack for hardware interrupts IRQ handling will allow to + * reduce this further - prbly to ~4 pages. */ +#define KERNEL_P_STACK_SIZE (9 * PAGE_SIZE) +#define NATIVE_KERNEL_P_STACK_PAGES (KERNEL_P_STACK_SIZE / PAGE_SIZE) + +#define KERNEL_PC_STACK_SIZE \ + (2 * PAGE_SIZE) /* 8 Kbytes (256 functions calls) */ +#define NATIVE_KERNEL_PC_STACK_PAGES (KERNEL_PC_STACK_SIZE / PAGE_SIZE) + +#define MAX_KERNEL_P_STACK_PAGES \ + (NATIVE_KERNEL_P_STACK_PAGES + VIRT_KERNEL_P_STACK_PAGES) +#define MAX_KERNEL_PC_STACK_PAGES \ + (NATIVE_KERNEL_PC_STACK_PAGES + VIRT_KERNEL_PC_STACK_PAGES) +#define MAX_KERNEL_HW_STACK_PAGES \ + (MAX_KERNEL_P_STACK_PAGES + MAX_KERNEL_PC_STACK_PAGES) + +/* + * 3 kernel stacks are allocated together and lie in memory + * in the following order: + * + * ------------------------------------------------------------------> higher + * K_DATA_GAP_SIZE | DATA | PROCEDURE | PAGE_SIZE | CHAIN | PAGE_SIZE + * + * Two unused pages are needed to properly handle hardware + * stack overflow: on overflow PSR.sge checking is disabled + * and stack will be spilled after its own boundary and then + * kernel_hw_stack_fatal_error() will print full stack. + * + * Arch-independent part expects data stack to be the first + * one (see end_of_stack()), that's also the reason to skip + * the first E2K_ALIGN_STACK bytes to keep the magic value + * intact. + */ +#define KERNEL_STACKS_SIZE (K_DATA_GAP_SIZE + KERNEL_C_STACK_SIZE + \ + KERNEL_P_STACK_SIZE + KERNEL_PC_STACK_SIZE + 2 * PAGE_SIZE) +#define KERNEL_C_STACK_OFFSET K_DATA_GAP_SIZE +#define KERNEL_P_STACK_OFFSET (KERNEL_C_STACK_OFFSET + KERNEL_C_STACK_SIZE) +#define KERNEL_PC_STACK_OFFSET (KERNEL_P_STACK_OFFSET + \ + KERNEL_P_STACK_SIZE + PAGE_SIZE) + +#endif /* _E2K_STACKS_H */ + diff --git a/arch/e2k/include/asm/stacktrace.h b/arch/e2k/include/asm/stacktrace.h new file mode 100644 index 000000000000..7aa5cb522635 --- /dev/null +++ b/arch/e2k/include/asm/stacktrace.h @@ -0,0 +1,4 @@ +#ifndef _ASM_E2K_STACKTRACE_H +#define _ASM_E2K_STACKTRACE_H + +#endif /* _ASM_E2K_STACKTRACE_H */ diff --git a/arch/e2k/include/asm/stat.h b/arch/e2k/include/asm/stat.h new file mode 100644 index 000000000000..36e5dac79e7c --- /dev/null +++ b/arch/e2k/include/asm/stat.h @@ -0,0 +1,30 @@ +#ifndef _E2K_STAT_H_ +#define _E2K_STAT_H_ + +#include + + +/* + * "struct stat64" should be the same as glibc "struct stat64" + */ +struct stat64 { + unsigned long st_dev; + unsigned long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned long st_rdev; + unsigned long st_size; + unsigned int st_blksize; + unsigned int __unused1; + unsigned long st_blocks; + int st_atime; + unsigned int st_atime_nsec; + int st_mtime; + unsigned int st_mtime_nsec; + int st_ctime; + unsigned int st_ctime_nsec; +}; + +#endif /* _E2K_STAT_H_ */ diff --git a/arch/e2k/include/asm/statfs.h b/arch/e2k/include/asm/statfs.h new file mode 100644 index 000000000000..8f2a792d2841 --- /dev/null +++ b/arch/e2k/include/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _E2K_STATFS_H_ +#define _E2K_STATFS_H_ + +#include + +#endif /* _E2K_STATFS_H_ */ diff --git a/arch/e2k/include/asm/string.h b/arch/e2k/include/asm/string.h new file mode 100644 index 000000000000..a38a953d3dad --- /dev/null +++ b/arch/e2k/include/asm/string.h @@ -0,0 +1,522 @@ +#ifndef _E2K_STRING_H_ +#define _E2K_STRING_H_ + +#include + +#include + +#define __HAVE_ARCH_STRNLEN +extern size_t strnlen(const char *s, size_t count) __pure; + +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *s) __pure; + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *dst, const void *src, size_t count); + +#define __HAVE_ARCH_MEMCMP +extern int __memcmp(const void *cs, const void *ct, size_t count) __pure; +#define memcmp(dst, src, n) _memcmp((dst), (src), (n)) +static inline int _memcmp(const void *s1, const void *s2, size_t n) +{ + if (__builtin_constant_p(n)) { + u64 v1, v2; + if (n == 16) { + v1 = *(u64 *) s1; + v2 = *(u64 *) s2; + u64 v21 = *(u64 *) (s1 + 8); + u64 v22 = *(u64 *) (s2 + 8); + if (v1 != v2) + return (__swab64(v1) > __swab64(v2)) ? 1 : -1; + if (v21 == v22) + return 0; + return (__swab64(v21) > __swab64(v22)) ? 1 : -1; + } + if (n == 8) { + v1 = *(u64 *) s1; + v2 = *(u64 *) s2; + if (v1 == v2) + return 0; + return (__swab64(v1) > __swab64(v2)) ? 1 : -1; + } + if (n == 4) { + v1 = *(u32 *) s1; + v2 = *(u32 *) s2; + if (v1 == v2) + return 0; + return (__swab32(v1) > __swab32(v2)) ? 1 : -1; + } + if (n == 2) { + v1 = *(u16 *) s1; + v2 = *(u16 *) s2; + return (u32) __swab16(v1) - (u32) __swab16(v2); + } + if (n == 1) { + v1 = *(u8 *) s1; + v2 = *(u8 *) s2; + return v1 - v2; + } + } + + E2K_PREFETCH_L1_SPEC(s1); + E2K_PREFETCH_L1_SPEC(s1); + return __memcmp(s1, s2, n); +} + +#define __HAVE_ARCH_MEMSET +#ifdef __HAVE_ARCH_MEMSET +extern void __memset(void *, long, size_t); +#if defined E2K_P2V && !defined CONFIG_BOOT_E2K +extern void *boot_memset(void *s_va, int c, size_t count); +# define memset boot_memset +#else +# define memset(dst, c, n) _memset(dst, c, n, __alignof(*(dst))) +#endif +static inline void *_memset(void *dst, int c, size_t n, + const unsigned long dst_align) +{ + u64 cc; + + if (__builtin_constant_p(c)) { + cc = (u8) c; + cc |= cc << 8; + cc |= cc << 16; + cc |= cc << 32; + } else { + cc = __builtin_e2k_pshufb(c, c, 0); + } + + if (__builtin_constant_p(n) && dst_align >= 8 && n < 136) { + /* Inline small aligned memset's */ + u64 *l_dst = dst; + + if (n >= 8) + l_dst[0] = cc; + if (n >= 16) + l_dst[1] = cc; + if (n >= 24) + l_dst[2] = cc; + if (n >= 32) + l_dst[3] = cc; + if (n >= 40) + l_dst[4] = cc; + if (n >= 48) + l_dst[5] = cc; + if (n >= 56) + l_dst[6] = cc; + if (n >= 64) + l_dst[7] = cc; + if (n >= 72) + l_dst[8] = cc; + if (n >= 80) + l_dst[9] = cc; + if (n >= 88) + l_dst[10] = cc; + if (n >= 96) + l_dst[11] = cc; + if (n >= 104) + l_dst[12] = cc; + if (n >= 112) + l_dst[13] = cc; + if (n >= 120) + l_dst[14] = cc; + if (n >= 128) + l_dst[15] = cc; + + /* Set the tail */ + if (n & 4) + *(u32 *) (dst + (n & ~0x7UL)) = cc; + if (n & 2) + *(u16 *) (dst + (n & ~0x3UL)) = cc; + if (n & 1) + *(u8 *) (dst + (n & ~0x1UL)) = cc; + } else if (__builtin_constant_p(n) && n <= 24) { + int i; + /* Inline small memset's */ + char *c_dst = dst; + for (i = 0; i < n; i++) + c_dst[i] = c; + } else { + __memset(dst, cc, n); + } + + return dst; +} +#endif /* __HAVE_ARCH_MEMSET */ + +#define __HAVE_ARCH_MEMCPY +#ifdef __HAVE_ARCH_MEMCPY +#define memcpy_nocache memcpy_nocache +extern void memcpy_nocache(void *dst, const void *src, size_t n); +extern void *__memcpy(void *dst, const void *src, size_t n); +#if defined E2K_P2V && !defined CONFIG_BOOT_E2K +extern void *boot_memcpy(void *dest_va, const void *src_va, size_t count); +# define memcpy boot_memcpy +#else +# define memcpy(dst, src, n) _memcpy(dst, src, n, __alignof(*(dst))) +#endif +static inline void *_memcpy(void *__restrict dst, + const void *__restrict src, + size_t n, const unsigned long dst_align) +{ + /* + * As measurements show, an unaligned dst causes a 20x slowdown, + * but unaligned src causes only a 2x slowdown. + * + * We can manually assure dst's alignment, but what about src? + * + * Consider the following situations: + * 1) src is 8 bytes aligned. Just do the copy. + * 2) src is 4 bytes aligned. Copying with unaligned loads will cause + * a 100% slowdown, the same as copying with 4-bytes words. So we can + * treat this case the same way as the previous one. + * 3) src is 2-bytes aligned or unaligned. Copying with 2-bytes + * (1-byte for unaligned) will cause a 4x slowdown (8x slowdown for + * unaligned), so copying with unaligned doublewords is preferred + * as it causes only 2x slowdown. + * + * To sum it up: the best way to copy is to assure dst's 8-bytes + * alignment and do the copy with 8-bytes words. + */ + + if (__builtin_constant_p(n) && dst_align >= 8 && n < 136) { + /* Inline small aligned memcpy's */ + const u64 *__restrict l_src = src; + u64 *__restrict l_dst = dst; + + if (n >= 8) + l_dst[0] = l_src[0]; + if (n >= 16) + l_dst[1] = l_src[1]; + if (n >= 24) + l_dst[2] = l_src[2]; + if (n >= 32) + l_dst[3] = l_src[3]; + if (n >= 40) + l_dst[4] = l_src[4]; + if (n >= 48) + l_dst[5] = l_src[5]; + if (n >= 56) + l_dst[6] = l_src[6]; + if (n >= 64) + l_dst[7] = l_src[7]; + if (n >= 72) + l_dst[8] = l_src[8]; + if (n >= 80) + l_dst[9] = l_src[9]; + if (n >= 88) + l_dst[10] = l_src[10]; + if (n >= 96) + l_dst[11] = l_src[11]; + if (n >= 104) + l_dst[12] = l_src[12]; + if (n >= 112) + l_dst[13] = l_src[13]; + if (n >= 120) + l_dst[14] = l_src[14]; + if (n >= 128) + l_dst[15] = l_src[15]; + + /* Copy the tail */ + if (n & 4) + *(u32 *) (dst + (n & ~0x7UL)) = + *(u32 *) (src + (n & ~0x7UL)); + if (n & 2) + *(u16 *) (dst + (n & ~0x3UL)) = + *(u16 *) (src + (n & ~0x3UL)); + if (n & 1) + *(u8 *) (dst + (n & ~0x1UL)) = + *(u8 *) (src + (n & ~0x1UL)); + } else { + E2K_PREFETCH_L2_SPEC(src); + __memcpy(dst, src, n); + } + + return dst; +} +#endif /* __HAVE_ARCH_MEMCPY */ + +extern unsigned long __recovery_memset_8(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode); +/* Since iset v5 we can use this with 16-bytes aligned addr and len */ +extern unsigned long __recovery_memset_16(void *addr, u64 val, u64 tag, + size_t len, u64 strqp_opcode); +#define recovery_memset_8(addr, val, tag, len, strd_opcode) \ +({ \ + u64 ___strd_opcode = (strd_opcode); \ + unsigned long __ret; \ + __ret = __recovery_memset_8((addr), (val), (tag), (len), \ + ___strd_opcode); \ + if (HAS_HWBUG_WC_DAM && \ + (((___strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & \ + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES)) \ + __E2K_WAIT(_st_c); \ + __ret; \ +}) + +extern void __tagged_memcpy_8(void *dst, const void *src, size_t len); + +/* + * recovery_memcpy_8() - optimized memory copy using strd/ldrd instructions + * + * Maximum allowed size is 8 Kb (it can copy bigger blocks, but performance + * will hurt because of bad prefetching policy). + * + * All parameters must be 8-bytes aligned (but if tags are not copied + * then dst and src can be unaligned). + * + * For the best performance it is recommended to copy memory with 8192 + * bytes blocks. + * + * 'strd_opcode' can be used to specify cache policy: usually L1 cache + * is disabled to avoid its pollution (disabling L2 cache slows copying + * of blocks larger than the size of the memory buffers). + * + * When copying from/to physical/IO memory, disable prefetch through the + * last argument. + * + * On success returns len. On error returns the number of bytes actually + * copied, which can be a little less than the actual copied size. + * (For error returns to work the page fault handler should be set up + * with SET_USR_PFAULT("recovery_memcpy_fault")). + */ +extern unsigned long __recovery_memcpy_8(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); +/* Since iset v5 we can use this with 16-bytes aligned src, dst and len */ +extern unsigned long __recovery_memcpy_16(void *dst, const void *src, size_t len, + unsigned long strqp_opcode, unsigned long ldrqp_opcode, + int prefetch); +#ifdef E2K_P2V +# define HAS_HWBUG_WC_DAM (IS_ENABLED(CONFIG_CPU_E2S) || \ + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E8C2)) +#else +# define HAS_HWBUG_WC_DAM cpu_has(CPU_HWBUG_WC_DAM) +#endif +#define recovery_memcpy_8(dst, src, len, strd_opcode, ldrd_opcode, prefetch) \ +({ \ + unsigned long __ret; \ + u64 ___strd_opcode = (strd_opcode); \ + __ret = __recovery_memcpy_8((dst), (src), (len), ___strd_opcode, \ + (ldrd_opcode), (prefetch)); \ + if (HAS_HWBUG_WC_DAM && \ + (((___strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & \ + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES)) \ + __E2K_WAIT(_st_c); \ + __ret; \ +}) + +/** + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ +static inline unsigned long native_fast_tagged_memory_copy( + void *dst, const void *src, size_t len, + unsigned long strd_opcode, + unsigned long ldrd_opcode, int prefetch) +{ + unsigned long ret; + ldst_rec_op_t st_op, ld_op; + + AW(st_op) = strd_opcode; + AW(ld_op) = ldrd_opcode; + + if (CONFIG_CPU_ISET >= 5 && !st_op.fmt_h && !ld_op.fmt_h && + st_op.fmt == LDST_QWORD_FMT && ld_op.fmt == LDST_QWORD_FMT && + !((u64) dst & 0xf) && !((u64) src & 0xf) && !(len & 0xf)) { + ret = __recovery_memcpy_16(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } else { + ret = __recovery_memcpy_8(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } + + if (HAS_HWBUG_WC_DAM && + ((strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES) + __E2K_WAIT(_st_c); + + return ret; +} + +static inline unsigned long native_fast_tagged_memory_set( + void *addr, u64 val, u64 tag, size_t len, u64 strd_opcode) +{ + unsigned long ret; + ldst_rec_op_t st_op; + + AW(st_op) = strd_opcode; + + if (CONFIG_CPU_ISET >= 5 && !((u64) addr & 0xf) && !(len & 0xf) && + !st_op.fmt_h && st_op.fmt == LDST_QWORD_FMT) { + ret = __recovery_memset_16(addr, val, tag, len, strd_opcode); + } else { + ret = __recovery_memset_8(addr, val, tag, len, strd_opcode); + } + + if (HAS_HWBUG_WC_DAM && + ((strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES) + __E2K_WAIT(_st_c); + + return ret; +} + +#define boot_native_fast_tagged_memory_copy(...) recovery_memcpy_8(__VA_ARGS__) + +#define boot_native_fast_tagged_memory_set(...) recovery_memset_8(__VA_ARGS__) + +static inline int +native_fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* native kernel does not support any guests */ + return native_fast_tagged_memory_copy((void *)dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +native_fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* native kernel does not support any guests */ + return native_fast_tagged_memory_copy(dst, (const void *)src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline unsigned long +native_extract_tags_32(u16 *dst, const void *src) +{ + NATIVE_EXTRACT_TAGS_32(dst, src); + return 0; +} + +static inline void native_tagged_memcpy_8(void *__restrict dst, + const void *__restrict src, size_t n, + const unsigned long dst_align, + const unsigned long src_align) +{ + if (__builtin_constant_p(n) && src_align >= 8 && dst_align >= 8 && + (n == 64 || n == 56 || n == 48 || n == 40 || + n == 32 || n == 24 || n == 16 || n == 8)) { + /* Inline small aligned memcpy's */ + if (n == 64) + E2K_TAGGED_MEMMOVE_64(dst, src); + else if (n == 56) + E2K_TAGGED_MEMMOVE_56(dst, src); + else if (n == 48) + E2K_TAGGED_MEMMOVE_48(dst, src); + else if (n == 40) + E2K_TAGGED_MEMMOVE_40(dst, src); + else if (n == 32) + E2K_TAGGED_MEMMOVE_32(dst, src); + else if (n == 24) + E2K_TAGGED_MEMMOVE_24(dst, src); + else if (n == 16) + E2K_TAGGED_MEMMOVE_16(dst, src); + else + E2K_TAGGED_MEMMOVE_8(dst, src); + } else { + E2K_PREFETCH_L2_SPEC(src); + + __tagged_memcpy_8(dst, src, n); + } +} + +/** + * tagged_memcpy_8() - copy memory along with tags + * + * All parameters must be 8-bytes aligned. + */ +#if defined(CONFIG_PARAVIRT_GUEST) +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +#include +#else /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ +#define tagged_memcpy_8(dst, src, n) \ +({ \ + native_tagged_memcpy_8(dst, src, n, \ + __alignof(*(dst)), __alignof(*(src))); \ +}) +#endif /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ + +extern void boot_fast_memcpy(void *, const void *, size_t); +extern notrace void boot_fast_memset(void *s_va, long c, size_t count); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host/guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ +/** + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ +static inline unsigned long +fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return native_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} + +static inline unsigned long +boot_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return boot_native_fast_tagged_memory_copy(dst, src, len, strd_opcode, + ldrd_opcode, prefetch); +} +static inline void +boot_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + boot_native_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +static inline unsigned long +extract_tags_32(u16 *dst, const void *src) +{ + return native_extract_tags_32(dst, src); +} + +#ifndef CONFIG_KVM_HOST_MODE +/* it is native kernel without virtualization support */ +static inline int +fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy_to_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy_from_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} +#endif /* !CONFIG_KVM_HOST_MODE */ + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_STRING_H_ */ diff --git a/arch/e2k/include/asm/swap_info.h b/arch/e2k/include/asm/swap_info.h new file mode 100644 index 000000000000..c7f3662318b9 --- /dev/null +++ b/arch/e2k/include/asm/swap_info.h @@ -0,0 +1,16 @@ +#ifndef _E2K_SWAP_INFO_H +#define _E2K_SWAP_INFO_H + +#include + +typedef struct swap_page_info { + struct swap_page_info *next; + struct mm_struct* mm; + e2k_addr_t addr; +} swap_page_info_t; + +#define PageSwapInfo(page) (page)->swap_info +#define PageWithSwapInfo(page) (PageSwapInfo(page) != NULL) +#define ClearPageSwapInfo(page) PageSwapInfo(page) = NULL + +#endif diff --git a/arch/e2k/include/asm/switch_to.h b/arch/e2k/include/asm/switch_to.h new file mode 100644 index 000000000000..c8111b842768 --- /dev/null +++ b/arch/e2k/include/asm/switch_to.h @@ -0,0 +1,106 @@ +#ifndef _ASM_L_SWITCH_TO_H +#define _ASM_L_SWITCH_TO_H + +#ifdef __KERNEL__ + +#include +#include +#include + +extern void preempt_schedule_irq(void); + +extern long __ret_from_fork(struct task_struct *prev); + +static inline struct task_struct * +native_ret_from_fork_get_prev_task(struct task_struct *prev) +{ + return prev; +} + +static inline int +native_ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return 0; /* nothing to do */ +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else +/* it is native kernel without any virtualization */ +/* or it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +static inline struct task_struct * +ret_from_fork_get_prev_task(struct task_struct *prev) +{ + return native_ret_from_fork_get_prev_task(prev); +} + +static inline int +ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return native_ret_from_fork_prepare_hv_stacks(regs); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +extern struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); + +#define native_switch_to(prev, next, last) \ +do { \ + last = __switch_to(prev, next); \ + e2k_finish_switch(last); \ +} while (0) + +#define prepare_arch_switch(next) \ +do { \ + prefetch_nospec_range(&next->thread.sw_regs, \ + offsetof(struct sw_regs, cs_lo)); \ + /* It works under CONFIG_MCST_RT */ \ + SAVE_CURR_TIME_SWITCH_TO; \ + prepare_monitor_regs(next); \ +} while (0) + +#define e2k_finish_switch(prev) \ +do { \ + CALCULATE_TIME_SWITCH_TO; \ + finish_monitor_regs(prev); \ +} while (0) + +#ifdef CONFIG_MONITORS +#define prepare_monitor_regs(next) \ +do { \ + if (MONITORING_IS_ACTIVE) \ + store_monitors_delta(current); \ +} while (0) +#define finish_monitor_regs(prev) \ +do { \ + if (MONITORING_IS_ACTIVE) { \ + prev->thread.sw_regs.ddmcr = NATIVE_READ_DDMCR_REG(); \ + prev->thread.sw_regs.dimcr = NATIVE_READ_DIMCR_REG(); \ + process_monitors(current); \ + } \ +} while (0) +#else /* !CONFIG_MONITORS */ +#define prepare_monitor_regs(next) +#define finish_monitor_regs(next) +#endif /* CONFIG_MONITORS */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ + +#define switch_to(prev, next, last) native_switch_to(prev, next, last) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_L_SWITCH_TO_H */ diff --git a/arch/e2k/include/asm/sync_pg_tables.h b/arch/e2k/include/asm/sync_pg_tables.h new file mode 100644 index 000000000000..08dfc51c7f31 --- /dev/null +++ b/arch/e2k/include/asm/sync_pg_tables.h @@ -0,0 +1,18 @@ +/* Functions to sync shadow page tables with guest page tables + * without flushing tlb. Used only by guest kernels + * + * Copyright 2021 Andrey Alekhin (alekhin_amcst.ru) + */ +#ifndef _E2K_SYNC_PG_TABLES_H +#define _E2K_SYNC_PG_TABLES_H + +#if defined(CONFIG_KVM_GUEST_KERNEL) + +#include + +#define sync_addr_range kvm_sync_addr_range +#else +#define sync_addr_range +#endif /* !CONFIG_KVM_GUEST_KERNEL */ + +#endif diff --git a/arch/e2k/include/asm/syscall.h b/arch/e2k/include/asm/syscall.h new file mode 100644 index 000000000000..11263785fc29 --- /dev/null +++ b/arch/e2k/include/asm/syscall.h @@ -0,0 +1,62 @@ +#ifndef _E2K_SYSCALLS_H +#define _E2K_SYSCALLS_H + +#include + +/* The system call number is given by the user in 1 */ +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + + return (regs && from_syscall(regs)) ? regs->sys_num : -1; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->sys_rval; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->sys_rval = val; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + unsigned int n = 6, j; + unsigned long *p = ®s->args[1]; + + for (j = 0; j < n; j++) { + args[j] = p[j]; + } +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + unsigned int n = 6, j; + unsigned long *p = ®s->args[1]; + + for (j = 0; j < n; j++) { + p[j] = args[j]; + } +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_E2K; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* Do nothing */ +} + +#endif /* _E2K_SYSCALLS_H */ diff --git a/arch/e2k/include/asm/syscalls.h b/arch/e2k/include/asm/syscalls.h new file mode 100644 index 000000000000..3cfa53e5460f --- /dev/null +++ b/arch/e2k/include/asm/syscalls.h @@ -0,0 +1,526 @@ +/* + * syscalls.h - Linux syscall interfaces (arch-specific) + * + * Copyright (c) 2008 Jaswinder Singh Rajput + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + */ + +#ifndef _ASM_E2K_SYSCALLS_H +#define _ASM_E2K_SYSCALLS_H + +#include +#include +#include +#include + +extern unsigned long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long off); +extern unsigned long sys_mmap2(unsigned long addr, unsigned long len, + int prot, int flags, int fd, long pgoff); +extern pid_t sys_clone_thread(unsigned long flags, unsigned long arg2, + unsigned long long arg3, int __user *parent_tidptr, + int __user *child_tidptr, unsigned long tls); +extern long sys_e2k_longjmp2(struct jmp_info *regs, u64 retval); +extern long sys_e2k_syswork(long syswork, long arg2, + long arg3, long arg4, long arg5); +extern long e2k_sys_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp); +extern long e2k_sys_execveat(int fd, const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, + int flags); + +extern long sys_stat64(const char __user *filename, + struct stat64 __user *statbuf); +extern long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); +extern long sys_lstat64(const char __user *filename, + struct stat64 __user *statbuf); + +extern asmlinkage long sys_set_backtrace(unsigned long *__user buf, + size_t count, size_t skip, unsigned long flags); +extern asmlinkage long sys_get_backtrace(unsigned long *__user buf, + size_t count, size_t skip, unsigned long flags); +extern long sys_access_hw_stacks(unsigned long mode, + unsigned long long __user *frame_ptr, char __user *buf, + unsigned long buf_size, void __user *real_size); + +extern long e2k_sys_prlimit64(pid_t pid, unsigned int resource, + const struct rlimit64 __user *new_rlim, + struct rlimit64 __user *old_rlim); +extern long e2k_sys_getrlimit(unsigned int resource, + struct rlimit __user *rlim); +#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT +extern long e2k_sys_old_getrlimit(unsigned int resource, + struct rlimit __user *rlim); +#endif +extern long e2k_sys_setrlimit(unsigned int resource, + struct rlimit __user *rlim); + +#ifdef CONFIG_PROTECTED_MODE +extern long protected_sys_clean_descriptors(void __user *addr, + unsigned long size, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs); +/* Flags for the function above see in arch/include/uapi/asm/protected_mode.h */ +/* 0 - clean freed descriptor list */ + +extern long protected_sys_rt_sigaction(int sig, + const void __user *ptr, void __user *ptr2, + const size_t sigsetsize); +extern long protected_sys_rt_sigaction_ex(int sig, + const void __user *ptr, void __user *ptr2, + const size_t sigsetsize); +extern long protected_sys_mq_notify(const long a1, + const unsigned long __user a2); +extern long protected_sys_timer_create(const long a1, /* clockid */ + const unsigned long __user a2, /* sevp */ + const unsigned long __user a3, /* timerid */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_rt_sigtimedwait(const unsigned long __user a1, + const unsigned long __user a2, const unsigned long __user a3, + const unsigned long a4); +extern long protected_sys_sysctl(const unsigned long __user a1); +extern long protected_sys_clone(const unsigned long a1, /* flags */ + const unsigned long a2, /* new_stackptr */ + const unsigned long __user a3,/* parent_tidptr */ + const unsigned long __user a4,/* child_tidptr */ + const unsigned long __user a5,/* tls */ + const unsigned long unused6, + struct pt_regs *regs); +extern long protected_sys_execve(const unsigned long __user a1,/* filename*/ + const unsigned long __user a2,/* argv[] */ + const unsigned long __user a3,/* envp[] */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_execveat(const unsigned long dirfd, /*a1 */ + const unsigned long __user pathname,/* a2 */ + const unsigned long __user argv, /* a3 */ + const unsigned long __user envp, /* a4 */ + const unsigned long flags, /* a5 */ + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_futex(const unsigned long __user uaddr, + const unsigned long futex_op, + const unsigned long val, + const unsigned long a4, /* timeout/val2 */ + const unsigned long __user uaddr2, + const unsigned long val3, + const struct pt_regs *regs); +extern long protected_sys_getgroups(const long a1, /* size */ + const unsigned long __user a2, /* list[] */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_setgroups(const long a1, /* size */ + const unsigned long __user a2, /* list[] */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_ipc(const unsigned long call, /* a1 */ + const long first, /* a2 */ + const unsigned long second, /* a3 */ + const unsigned long third, /* a4 */ + const unsigned long __user ptr, /* a5 */ + const long fifth, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_mmap(const unsigned long a1, /* start */ + const unsigned long a2, /* length */ + const unsigned long a3, /* prot */ + const unsigned long a4, /* flags */ + const unsigned long a5, /* fd */ + const unsigned long a6, /* offset/bytes */ + struct pt_regs *regs); +extern long protected_sys_mmap2(const unsigned long a1, /* start */ + const unsigned long a2, /* length */ + const unsigned long a3, /* prot */ + const unsigned long a4, /* flags */ + const unsigned long a5, /* fd */ + const unsigned long a6, /* offset/pages */ + struct pt_regs *regs); +extern long protected_sys_munmap(const unsigned long __user a1, /* addr */ + const unsigned long a2, /* length */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs); +extern long protected_sys_mremap(const unsigned long __user old_address, + const unsigned long old_size, + const unsigned long new_size, + const unsigned long flags, + const unsigned long new_address, + const unsigned long a6, /* unused */ + struct pt_regs *regs); +extern long protected_sys_readv(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_semctl(const long semid, /* a1 */ + const long semnum, /* a2 */ + const long cmd, /* a3 */ + const unsigned long __user ptr, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_shmat(const long shmid, /* a1 */ + const unsigned long __user shmaddr, /* a2 */ + const long shmflg, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs); +extern long protected_sys_writev(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_preadv(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h */ + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_pwritev(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h */ + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_preadv2(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h*/ + const unsigned long a6, /* flags */ + const struct pt_regs *regs); +extern long protected_sys_pwritev2(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h*/ + const unsigned long a6, /* flags */ + const struct pt_regs *regs); +extern long protected_sys_socketcall(const unsigned long a1, /* call */ + const unsigned long __user a2, /* args */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_sendmsg(const unsigned long sockfd, + const unsigned long __user msg, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_sendmmsg(const unsigned long sockfd, + const unsigned long __user msgvec, + const unsigned long vlen, + const unsigned long flags, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_recvmsg(const unsigned long socket, + const unsigned long __user message, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_recvmmsg(const unsigned long socket, + const unsigned long __user message, + const unsigned long vlen, + const unsigned long flags, + const unsigned long __user timeout, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_olduselib(const unsigned long __user a1, /* library */ + const unsigned long __user a2); /* umdd */ + /* NB> 'olduselib' is obsolete syscall; unsupported in CPU ISET V6 */ +extern long protected_sys_uselib(const unsigned long __user a1, /* library */ + const unsigned long __user a2); /* umdd */ +extern long protected_sys_sigaltstack(const stack_prot_t __user *ss_128, + stack_prot_t __user *old_ss_128, + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_unuselib(const unsigned long __user a1, /* addr */ + const unsigned long a2, + const unsigned long a3, + const unsigned long a4, + const unsigned long a5, + const unsigned long a6, + struct pt_regs *regs); +extern long protected_sys_get_backtrace(const unsigned long __user buf, + size_t count, size_t skip, + unsigned long flags, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_set_backtrace(const unsigned long __user buf, + size_t count, size_t skip, + unsigned long flags, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_set_robust_list( + const unsigned long __user listhead, /* a1 */ + const size_t len, /* a2 */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_get_robust_list(const unsigned long pid, + unsigned long __user head_ptr, + unsigned long __user len_ptr); +extern long protected_sys_process_vm_readv(const unsigned long pid, /*a1*/ + const struct iovec __user *lvec, /* a2 */ + unsigned long liovcnt, /* a3 */ + const struct iovec __user *rvec, /* a4 */ + unsigned long riovcnt, /* a5 */ + unsigned long flags, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_process_vm_writev(const unsigned long pid, /*a1*/ + const struct iovec __user *lvec, /* a2 */ + unsigned long liovcnt, /* a3 */ + const struct iovec __user *rvec, /* a4 */ + unsigned long riovcnt, /* a5 */ + unsigned long flags, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_vmsplice(int fd, /* a1 */ + const struct iovec __user *iov, /* a2 */ + unsigned long nr_segs, /*a3 */ + unsigned int flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_keyctl(const int operation, /* a1 */ + const unsigned long arg2, + const unsigned long arg3, + const unsigned long arg4, + const unsigned long arg5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_prctl(const int option, /* a1 */ + const unsigned long arg2, + const unsigned long arg3, + const unsigned long arg4, + const unsigned long arg5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_ioctl(const int fd, /* a1 */ + const unsigned long request, /* a2 */ + const unsigned long __user argp, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_bpf(const int cmd, /* a1 */ + const unsigned long __user attr, /* a2 */ + const unsigned int size, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_epoll_ctl(const unsigned long epfd, /* a1 */ + const unsigned long op, /* a2 */ + const unsigned long fd, /* a3 */ + void __user *event, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_epoll_wait(const unsigned long epfd, /* a1 */ + void __user *event, /* a2 */ + const long maxevents, /* a3 */ + const long timeout, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_epoll_pwait(const unsigned long epfd, /* a1 */ + void __user *event, /* a2 */ + const long maxevents, /* a3 */ + const long timeout, /* a4 */ + const unsigned long sigmask, /* a5 */ + const unsigned long sigsetsize, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_pselect6(const long nfds, /* a1 */ + const unsigned long readfds, /* a2 */ + const unsigned long writefds, /* a3 */ + const unsigned long exceptfds, /* a4 */ + const unsigned long timeout, /* a5 */ + const unsigned long sigmask, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_rt_sigqueueinfo(const long tgid, /* a1 */ + const long sig, /* a2 */ + const unsigned long __user uinfo, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_rt_tgsigqueueinfo(const long tgid, /* a1 */ + const long tid, /* a2 */ + const long sig, /* a3 */ + const unsigned long __user uinfo, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_pidfd_send_signal(const long pidfd, /* a1 */ + const long sig, /* a2 */ + const unsigned long __user info, /* a3 */ + unsigned long flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_waitid(const long which, /* a1 */ + const long pid, /* a2 */ + const unsigned long __user *infop, /* a3 */ + const long options, /* a4 */ + const unsigned long __user *ru, /* a5 */ + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_io_uring_register(const unsigned long fd, /* a1 */ + const unsigned long opcode, /* a2 */ + const unsigned long __user arg, /* a3 */ + const unsigned long nr_args, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_kexec_load(const unsigned long entry, /* a1 */ + const unsigned long nr_segments, /* a2 */ + const unsigned long __user segments, /* a3 */ + const unsigned long flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); + +extern int arch_init_pm_sc_debug_mode(const int debug_mask); + +/* + * Storing descriptor attributes in the sival_ptr_list list. + * The descriptor may be restored with the function that follows. + */ +static inline +void store_descriptor_attrs(void *kernel_ptr, + const long user_ptr_lo, const long user_ptr_hi, + const int ptr_tags, const int signum) +{ + struct sival_ptr_list *new; + + /* Saving kernel_ptr in sival_ptr_list: */ + new = kmalloc(sizeof(*new), GFP_KERNEL); + new->kernel_ptr = kernel_ptr; + new->user_ptr_lo = user_ptr_lo; + new->user_ptr_hi = user_ptr_hi; + new->user_tags = ptr_tags; + new->signum = signum; + + /* Saving sival_ptr in sival_ptr_list: */ + down_write(&(current->mm->context.sival_ptr_list_sem)); + /* Add new element as head of the list */ + list_add(&new->link, &(current->mm->context.sival_ptr_list_head)); + up_write(&(current->mm->context.sival_ptr_list_sem)); +} + +/* + * Returns record with saved descriptor attributes (in sival_ptr_list) + * for the given pointer/signal number couple or NULL. + */ +static inline +struct sival_ptr_list *get_descriptor_attrs(const void *kernel_ptr, + const int signum) +{ + struct sival_ptr_list *ret = NULL, *siptrl_ln; + + /* + * We look thru sival_ptr_list to find a record with the same + * kernel pointer and signum if specified: + */ + down_read(¤t->mm->context.sival_ptr_list_sem); + list_for_each_entry(siptrl_ln, + ¤t->mm->context.sival_ptr_list_head, link) { + if (siptrl_ln->kernel_ptr == kernel_ptr) { + if (!signum || (siptrl_ln->signum == signum)) { + ret = siptrl_ln; + break; + } + } + } + up_read(¤t->mm->context.sival_ptr_list_sem); + return ret; +} +#endif /* CONFIG_PROTECTED_MODE */ + +#ifdef CONFIG_COMPAT +extern long compat_sys_lseek(unsigned int fd, int offset, unsigned int whence); +extern long compat_sys_sigpending(u32 *); +extern long compat_sys_sigprocmask(int, u32 *, u32 *); +extern long sys32_pread64(unsigned int fd, char __user *ubuf, + compat_size_t count, unsigned long poslo, unsigned long poshi); +extern long sys32_pwrite64(unsigned int fd, char __user *ubuf, + compat_size_t count, unsigned long poslo, unsigned long poshi); +extern long sys32_readahead(int fd, unsigned long offlo, + unsigned long offhi, compat_size_t count); +extern long sys32_fadvise64(int fd, unsigned long offlo, + unsigned long offhi, compat_size_t len, int advice); +extern long sys32_fadvise64_64(int fd, + unsigned long offlo, unsigned long offhi, + unsigned long lenlo, unsigned long lenhi, int advice); +extern long sys32_sync_file_range(int fd, + unsigned long off_low, unsigned long off_high, + unsigned long nb_low, unsigned long nb_high, int flags); +extern long sys32_fallocate(int fd, int mode, + unsigned long offlo, unsigned long offhi, + unsigned long lenlo, unsigned long lenhi); +extern long sys32_truncate64(const char __user *path, + unsigned long low, unsigned long high); +extern long sys32_ftruncate64(unsigned int fd, + unsigned long low, unsigned long high); +extern long compat_e2k_sys_execve(const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); +extern long compat_e2k_sys_execveat(int fd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, + int flags); +extern asmlinkage long compat_sys_set_backtrace(unsigned int *__user buf, + size_t count, size_t skip, unsigned long flags); +extern asmlinkage long compat_sys_get_backtrace(unsigned int *__user buf, + size_t count, size_t skip, unsigned long flags); +extern long compat_sys_access_hw_stacks(unsigned long mode, + unsigned long long __user *frame_ptr, char __user *buf, + unsigned long buf_size, void __user *real_size); +extern long compat_e2k_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +extern long compat_e2k_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +#endif + + +#endif /* _ASM_E2K_SYSCALLS_H */ diff --git a/arch/e2k/include/asm/system.h b/arch/e2k/include/asm/system.h new file mode 100644 index 000000000000..f6a574b4073a --- /dev/null +++ b/arch/e2k/include/asm/system.h @@ -0,0 +1,664 @@ +/* + * asm-e2k/system.h + */ +#ifndef _E2K_SYSTEM_H_ +#define _E2K_SYSTEM_H_ + +#ifndef __ASSEMBLY__ +#include +#endif /* !(__ASSEMBLY__) */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) + +#define NATIVE_PSR_SET_LAST_WISH() \ +do { \ + unsigned long __psr = NATIVE_NV_READ_PSR_REG_VALUE(); \ + __psr |= PSR_LW; \ + NATIVE_WRITE_PSR_REG_VALUE(__psr); \ +} while (0) + +#define PSR_SET_LAST_WISH() \ +do { \ + unsigned long __psr = READ_PSR_REG_VALUE(); \ + __psr |= PSR_LW; \ + WRITE_PSR_REG_VALUE(__psr); \ +} while (0) + +#define boot_native_set_sge() \ +({ \ + NATIVE_WRITE_PSR_REG_VALUE( \ + NATIVE_NV_READ_PSR_REG_VALUE() | PSR_SGE); \ +}) + +static inline bool native_sge_is_set(void) +{ + e2k_psr_t psr = NATIVE_NV_READ_PSR_REG(); + + return psr.PSR_sge; +} + +#ifdef CONFIG_E2K_PROFILING +typedef struct { + // FIRST ELEMENT + long max_disable_interrupt; // max #ticks of disable_interrupt + long sum_disable_interrupt; // all #ticks of disable_interrupt + long number_interrupt; // number of interrupts + long number_irqs; // number of closed irq + long number_system_call; // number of system_call + + long max_disable_interrupt_after_dump; // max #ticks of disable_interrupt + // after last read or write profile file + long interrupts[exc_max_num]; // interrupt table + long interrupts_time[exc_max_num]; // interrupt time (in ticks) + long max_interrupts_time[exc_max_num]; // max interrupt time (in ticks) + long syscall[NR_syscalls]; // syscall table + long syscall_time[NR_syscalls]; // syscall time (in ticks) + long max_syscall_time[NR_syscalls]; // max syscall time + long clk; // time of interrupt's begining + // NR_VECTORS 256 + long max_do_irq_time[256]; // max DO_IRQ's time + long do_irq[256]; // number of DO_IRQ + long do_irq_time[256]; // time of DO_IRQ + long clk_of_do_irq; // time of DO_IRQ's begining + long last_element; +} disable_interrupt_t ; + +extern unsigned long get_cmos_time(void); +extern disable_interrupt_t disable_interrupt[NR_CPUS]; + +#define read_ticks(n) (n = NATIVE_READ_CLKR_REG_VALUE()) + +#define add_info_interrupt(n, ticks) \ +({ long t; int cpu; \ + t = NATIVE_READ_CLKR_REG_VALUE() - ticks; \ + cpu = boot_smp_processor_id(); \ + disable_interrupt[cpu].interrupts[n]++; \ + disable_interrupt[cpu].interrupts_time[n] += t; \ + if (t > disable_interrupt[cpu].max_interrupts_time[n]) { \ + disable_interrupt[cpu].max_interrupts_time[n] = t; \ + } \ +}) + +#define add_info_syscall(n, ticks) \ +({ long t; int cpu; \ + cpu = boot_smp_processor_id(); \ + t = NATIVE_READ_CLKR_REG_VALUE() - ticks; \ + disable_interrupt[cpu].syscall[n]++; \ + disable_interrupt[cpu].syscall_time[n] += t; \ + if (t > disable_interrupt[cpu].max_syscall_time[n]) { \ + disable_interrupt[cpu].max_syscall_time[n] = t; \ + } \ +}) + +typedef struct { + long max_time; + long full_time; + long begin_time; + long number; + long beg_ip; + long beg_parent_ip; + long end_ip; + long end_parent_ip; + long max_beg_ip; + long max_beg_parent_ip; + long max_end_ip; + long max_begin_time; + long max_end_parent_ip; +} time_info_t; + +/* + * For adding new element you need do following things: + * - add new "time_info_t" element after last one + * - add name of your elemen in system_info_name (file e2k_sysworks.c) + * - create two new define similar below: + * #define info_save_mmu_reg(tick) \ + * store_max_time_in_system_info(tick,max_mmu_reg) + * #define info_save_mmu_reg(tick) - null for not CONFIG_E2K_PROFILING + * - used your new define for merging what you want + */ +typedef struct { + time_info_t max_disabled_interrupt; // max time of disabled inerrupts + time_info_t max_stack_reg; // max time of saving of stack_registers + time_info_t max_tir_reg; // max time for storing TIR + time_info_t max_mmu_reg; // max time for storing mmu registers + time_info_t max_restore_stack_reg; // max time for restoring of stack_registers + time_info_t max_restoring_reg; // max time for restoring all registers + time_info_t max_restore_mmu_reg; // max time for restoring mmu registers + time_info_t max_cpu_idle; // max time for cpu_idle +} system_info_t ; + +extern char* system_info_name[]; +extern system_info_t system_info[NR_CPUS]; +extern int enable_collect_interrupt_ticks; +#define collect_disable_interrupt_ticks() \ +({ int cpu; \ + cpu = boot_smp_processor_id(); \ + if (system_info[cpu].max_disabled_interrupt.begin_time >0){ \ + store_max_time_in_system_info( \ + system_info[cpu].max_disabled_interrupt.begin_time, \ + max_disabled_interrupt); \ + system_info[cpu].max_disabled_interrupt.begin_time = 0; \ + } \ +}) + +#define mark_disable_interrupt_ticks() \ + store_begin_ip_in_system_info(max_disabled_interrupt) + +#define store_do_irq_ticks() \ +({ int cpu = boot_smp_processor_id(); \ + disable_interrupt[cpu].clk_of_do_irq = NATIVE_READ_CLKR_REG_VALUE(); \ +}) + +#define define_time_of_do_irq(N) \ +({ long t; int cpu; \ + cpu = boot_smp_processor_id(); \ + t = NATIVE_READ_CLKR_REG_VALUE() - \ + disable_interrupt[cpu].clk_of_do_irq; \ + disable_interrupt[cpu].do_irq_time[N] += t; \ + disable_interrupt[cpu].do_irq[N]++; \ + if (disable_interrupt[cpu].max_do_irq_time[N] < t) { \ + disable_interrupt[cpu].max_do_irq_time[N] = t; \ + } \ +}) +#define info_save_stack_reg(tick) \ + store_max_time_in_system_info(tick,max_stack_reg) +#define info_restore_stack_reg(tick) \ + store_max_time_in_system_info(tick,max_restore_stack_reg) + +#define info_save_mmu_reg(tick) \ + store_max_time_in_system_info(tick,max_mmu_reg) + +#define info_restore_mmu_reg(tick) \ + store_max_time_in_system_info(tick,max_restore_mmu_reg) + +#define info_save_tir_reg(tick) \ + store_max_time_in_system_info(tick,max_tir_reg) + +#define info_restore_all_reg(tick) \ + store_max_time_in_system_info(tick,max_restoring_reg); \ + +#define cpu_idle_time() \ + store_begin_time_in_system_info(max_cpu_idle) +#define calculate_cpu_idle_time() \ + calculate_max_time_in_system_info(max_cpu_idle) + +#define store_begin_time_in_system_info(FIELD) \ +({ long t; int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + if (enable_collect_interrupt_ticks) { \ + cpu = boot_smp_processor_id(); \ + t = NATIVE_READ_CLKR_REG_VALUE(); \ + AS_WORD(cr0_hi) = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.begin_time = tick; \ + system_info[cpu].FIELD.beg_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.beg_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + } \ +}) +#define store_begin_ip_in_system_info(FIELD) \ +({ \ + int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id(); \ + disable_interrupt[cpu].clk = NATIVE_READ_CLKR_REG_VALUE(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.beg_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.beg_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ +}) + +#define store_begin_time_ip_in_system_info(cpu, tick, FIELD) \ +({ \ + register e2k_cr0_hi_t cr0_hi; \ + if (enable_collect_interrupt_ticks) { \ + system_info[cpu].FIELD.begin_time = tick; \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.beg_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.beg_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + } \ +}) + +#define store_end_ip_in_system_info(mutex, FIELD) \ +({ \ + int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.beg_ip = mutex->ip; \ + system_info[cpu].FIELD.beg_parent_ip = mutex->caller; \ + system_info[cpu].FIELD.end_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.end_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ +}) + +#define calculate_max_time_in_system_info(FIELD) \ +({ \ + long t; int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id(); \ + if (enable_collect_interrupt_ticks) { \ + t = NATIVE_READ_CLKR_REG_VALUE()-system_info[cpu]. \ + FIELD.begin_time; \ + system_info[cpu].FIELD.number++; \ + system_info[cpu].FIELD.full_time += t; \ + if (system_info[cpu].FIELD.max_time < t) { \ + system_info[cpu].FIELD.max_time = t; \ + system_info[cpu].FIELD.max_beg_ip = \ + system_info[cpu].FIELD.beg_ip; \ + system_info[cpu].FIELD.max_beg_parent_ip = \ + system_info[cpu].FIELD.beg_parent_ip; \ + system_info[cpu].FIELD.max_end_ip = \ + NATIVE_READ_IP_REG_VALUE(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.max_end_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + system_info[cpu].FIELD.max_begin_time = \ + system_info[cpu].FIELD.begin_time; \ + } \ + system_info[cpu].FIELD.begin_time = 0; \ + } \ +}) + +extern long TIME; +#define store_max_time_in_system_info(tick, FIELD) \ +({ \ + long t; int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id(); \ + t = NATIVE_READ_CLKR_REG_VALUE()-tick; \ + if (enable_collect_interrupt_ticks) { \ + system_info[cpu].FIELD.number++; \ + system_info[cpu].FIELD.full_time += t; \ + if (system_info[cpu].FIELD.max_time < t) { \ + system_info[cpu].FIELD.max_time = t; \ + system_info[cpu].FIELD.max_beg_ip = \ + system_info[cpu].FIELD.beg_ip; \ + system_info[cpu].FIELD.max_beg_parent_ip = \ + system_info[cpu].FIELD.beg_parent_ip; \ + system_info[cpu].FIELD.max_end_ip = \ + NATIVE_READ_IP_REG_VALUE(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.max_end_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + system_info[cpu].FIELD.max_begin_time = \ + system_info[cpu].FIELD.begin_time; \ + } \ + system_info[cpu].FIELD.begin_time = 0; \ + } \ +}) + +#define UPSR_RESTORE(__src_upsr) \ +({ \ + unsigned long upsr1 = READ_UPSR_REG_VALUE(); \ + int _cond_ = (upsr1 & UPSR_IE) != ((__src_upsr) & UPSR_IE); \ + if (enable_collect_interrupt_ticks && _cond_) { \ + if (__src_upsr & UPSR_IE) { \ + collect_disable_interrupt_ticks(); \ + } else { \ + mark_disable_interrupt_ticks(); \ + } \ + } \ + WRITE_UPSR_IRQ_BARRIER(__src_upsr); \ +}) + +#define condition_mark_disable_interrupt_ticks(_cond_) \ +({ \ + if (enable_collect_interrupt_ticks) { \ + mark_disable_interrupt_ticks(); \ + } \ +}) + +#define condition_collect_disable_interrupt_ticks(_cond_) \ +({ \ + if (enable_collect_interrupt_ticks && _cond_) { \ + collect_disable_interrupt_ticks(); \ + } \ +}) + +# else /* !CONFIG_E2K_PROFILING */ + +#define store_max_time_in_system_info(tick,FIELD) +#define calculate_max_time_in_system_info(FIELD) +#define store_begin_time_in_system_info(FIELD) +#define store_begin_ip_in_system_info(FIELD) +#define info_save_tir_reg(tick) +#define info_restore_all_reg(tick) +#define info_save_stack_reg(tick) +#define info_restore_stack_reg(tick) +#define info_save_mmu_reg(tick) +#define info_restore_mmu_reg(tick) +#define cpu_idle_time() +#define calculate_cpu_idle_time() +#define store_do_irq_ticks() +#define define_time_of_do_irq(N) +#define condition_collect_disable_interrupt_ticks(_cond_) +#define condition_mark_disable_interrupt_ticks(_cond_) +#define collect_disable_interrupt_ticks() +#define mark_disable_interrupt_ticks() +#define add_info_syscall(n, ticks) +#define add_info_interrupt(n, ticks) +#define read_ticks(n) +#define UPSR_RESTORE(__src_upsr) (WRITE_UPSR_IRQ_BARRIER(__src_upsr)) +#endif /* CONFIG_E2K_PROFILING */ + +#define E2K_KERNEL_PSR_ENABLED ((e2k_psr_t) { { \ + pm : 1, \ + ie : 1, \ + sge : 1, \ + lw : 0, \ + uie : 1, \ + nmie : 1, \ + unmie : 1, \ +} }) + +#define E2K_KERNEL_PSR_DISABLED ((e2k_psr_t) { { \ + pm : 1, \ + ie : 0, \ + sge : 1, \ + lw : 0, \ + uie : 0, \ + nmie : 0, \ + unmie : 0, \ +} }) + +#define E2K_KERNEL_PSR_DIS_LWISH_EN ((e2k_psr_t) { { \ + pm : 1, \ + ie : 0, \ + sge : 1, \ + lw : 1, \ + uie : 0, \ + nmie : 0, \ + unmie : 0, \ +} }) + +#define E2K_KERNEL_PSR_LWISH_DIS ((e2k_psr_t) { { \ + pm : 1, \ + ie : 0, \ + sge : 1, \ + lw : 0, \ + uie : 0, \ + nmie : 0, \ + unmie : 0, \ +} }) + +#ifndef CONFIG_ACCESS_CONTROL +#define E2K_KERNEL_UPSR_ENABLED_ASM 0xa1 +#define E2K_KERNEL_UPSR_DISABLED_ALL_ASM 0x01 +#define E2K_KERNEL_UPSR_DISABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + a20 : 0, \ + ie : 0, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_ENABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + a20 : 0, \ + ie : 1, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_DISABLED_ALL ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + a20 : 0, \ + ie : 0, \ + nmie : 0 \ +} }) +#else +#define E2K_KERNEL_UPSR_ENABLED_ASM 0xa5 +#define E2K_KERNEL_UPSR_DISABLED_ALL_ASM 0x05 +#define E2K_KERNEL_UPSR_DISABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 1, \ + a20 : 0, \ + ie : 0, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_ENABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 1, \ + a20 : 0, \ + ie : 1, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_DISABLED_ALL ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 1, \ + a20 : 0, \ + ie : 0, \ + nmie : 0 \ +} }) +#endif /* ! (CONFIG_ACCESS_CONTROL) */ + +#define E2K_KERNEL_INITIAL_UPSR E2K_KERNEL_UPSR_DISABLED +#define E2K_KERNEL_INITIAL_UPSR_WITH_DISABLED_NMI \ + E2K_KERNEL_UPSR_DISABLED_ALL + +#define E2K_USER_INITIAL_UPSR ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + di : 0, \ + wp : 0, \ + ie : 0, \ + a20 : 0, \ + nmie : 0 \ +} }) + +#define E2K_USER_INITIAL_PSR ((e2k_psr_t) { { \ + pm : 0, \ + ie : 1, \ + sge : 1, \ + lw : 0, \ + uie : 0, \ + nmie : 1, \ + unmie : 0 \ +} }) + +#define PREFIX_INIT_KERNEL_UPSR_REG(PV_TYPE, irq_en, nmirq_dis) \ +do { \ + e2k_upsr_t upsr = E2K_KERNEL_UPSR_DISABLED; \ + if (irq_en) \ + AS(upsr).ie = 1; \ + if (nmirq_dis) \ + AS(upsr).nmie = 0; \ + PV_TYPE##_WRITE_UPSR_REG(upsr); \ +} while (0) +#define BOOT_PREFIX_INIT_KERNEL_UPSR_REG(PV_TYPE, irq_en, nmirq_dis) \ +do { \ + e2k_upsr_t upsr = E2K_KERNEL_UPSR_DISABLED; \ + if (irq_en) \ + AS(upsr).ie = 1; \ + if (nmirq_dis) \ + AS(upsr).nmie = 0; \ + BOOT_##PV_TYPE##_WRITE_UPSR_REG(upsr); \ +} while (0) + +#define NATIVE_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + PREFIX_INIT_KERNEL_UPSR_REG(NATIVE, irq_en, nmirq_dis) + +#define BOOT_NATIVE_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + BOOT_PREFIX_INIT_KERNEL_UPSR_REG(NATIVE, irq_en, nmirq_dis) + +#define NATIVE_INIT_USER_UPSR_REG() \ + NATIVE_WRITE_UPSR_REG(E2K_USER_INITIAL_UPSR) +#define INIT_USER_UPSR_REG() WRITE_UPSR_REG(E2K_USER_INITIAL_UPSR) + +#define PREFIX_SET_KERNEL_UPSR(PV_TYPE) \ +({ \ + PV_TYPE##_INIT_KERNEL_UPSR_REG(false, false); \ + PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ +}) + +#define PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(PV_TYPE) \ +({ \ + PV_TYPE##_INIT_KERNEL_UPSR_REG(false, true); \ + PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ +}) + +#define BOOT_PREFIX_SET_KERNEL_UPSR(PV_TYPE) \ +({ \ + BOOT_##PV_TYPE##_INIT_KERNEL_UPSR_REG(false, false); \ + BOOT_##PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ +}) + +#define NATIVE_SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(NATIVE) +#define BOOT_NATIVE_SET_KERNEL_UPSR() \ + BOOT_PREFIX_SET_KERNEL_UPSR(NATIVE) + +/* + * UPSR should be saved and set to kernel initial state (where interrupts + * are disabled) independently of trap or interrupt occurred on user + * or kernel process. + * In user process case it is as above. + * In kernel process case: + * Kernel process can be interrupted (so UPSR enable interrupts) + * Hardware trap or system call operation disables interrupts mask + * in PSR and PSR becomes main register to control interrupts. + * Trap handler should switch interrupts control from PSR to UPSR + * previously it should set UPSR to initial state for kernel with disabled + * interrupts (so UPSR disable interrupts) + * If trap handler returns to trap point without UPSR restore, then + * interrupted kernel process will have UPSR with disabled interrupts. + * So UPSR should be saved and restored in any case + * + * Trap can occur on light hypercall, where switch of user data stack + * to kernel stack is not executed, so these traps handle as user traps + * (SBR < TASK_SIZE) Light hypercall already switch control from PSR to UPSR + * so it need save current UPSR state (enable/disable interrupts) + * to restore this state before return to trapped light hypercall. + * Interrupt control by UPSR will be restored from PSR saved into CR1_LO + */ + +#define PREFIX_DO_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type, \ + irq_en, nmirq_dis) \ +({ \ + PV_TYPE##_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis); \ + PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ + if (!irq_en) \ + trace_hardirqs_off(); \ +}) +#define PREFIX_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type, \ + upsr_reg, irq_en, nmirq_dis) \ +({ \ + PV_TYPE##_DO_SAVE_UPSR_REG((upsr_reg)); \ + PREFIX_DO_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type, \ + irq_en, nmirq_dis); \ +}) +#define BOOT_PREFIX_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type) \ +({ \ + unsigned long cur_upsr; \ + BOOT_##PV_TYPE##_DO_SAVE_UPSR_REG((cur_upsr)); \ + BOOT_##PV_TYPE##_INIT_KERNEL_UPSR_REG(false, false); \ + BOOT_##PV_TYPE##_SWITCH_IRQ_TO_UPSR(false); \ +}) +/* Native version of macroses (all read/write from/to real registers) */ +#define NATIVE_DO_SWITCH_TO_KERNEL_UPSR(irq_en, nmirq_dis) \ + PREFIX_DO_SWITCH_TO_KERNEL_UPSR(NATIVE, native, \ + irq_en, nmirq_dis) +#define NATIVE_SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) \ + PREFIX_SWITCH_TO_KERNEL_UPSR(NATIVE, native, \ + upsr_reg, irq_en, nmirq_dis) + +#define NATIVE_RETURN_TO_KERNEL_UPSR(upsr_reg) \ +do { \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); \ + NATIVE_DO_RESTORE_UPSR_REG(upsr_reg); \ +} while (false) + +#define PREFIX_RETURN_TO_USER_UPSR(PV_TYPE, pv_type, upsr_reg, under_upsr) \ +do { \ + PV_TYPE##_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); \ + PV_TYPE##_DO_RESTORE_UPSR_REG(upsr_reg); \ +} while (false) +#define NATIVE_RETURN_TO_USER_UPSR(upsr_reg) \ + PREFIX_RETURN_TO_USER_UPSR(NATIVE, native, upsr_reg, true) +#define NATIVE_RETURN_PSR_IRQ_TO_USER_UPSR(upsr_reg, lwish_en) \ +do { \ + if (lwish_en) { \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DIS_LWISH_EN)); \ + } else { \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_LWISH_DIS)); \ + } \ + NATIVE_DO_RESTORE_UPSR_REG(upsr_reg); \ +} while (false) + +#ifdef CONFIG_ACCESS_CONTROL +#define ACCESS_CONTROL_DISABLE_AND_SAVE(upsr_to_save) \ +({ \ + e2k_upsr_t upsr; \ + upsr_to_save = read_UPSR_reg(); \ + upsr = upsr_to_save; \ + AS_STRUCT(upsr).ac = 0; \ + write_UPSR_reg(upsr); \ +}) + +#define ACCESS_CONTROL_RESTORE(upsr_to_restore) \ +({ \ + write_UPSR_reg(upsr_to_restore); \ +}) +#else /* !CONFIG_ACCESS_CONTROL */ +#define ACCESS_CONTROL_DISABLE_AND_SAVE(upsr_to_save) do { } while (0) +#define ACCESS_CONTROL_RESTORE(upsr_to_restore) do { } while (0) +#endif /* CONFIG_ACCESS_CONTROL */ + +extern void * __e2k_read_kernel_return_address(int n); +/* If n == 0 we can read return address directly from cr0.hi */ +#define __e2k_kernel_return_address(n) \ + ({ (n == 0) ? \ + ((void *) (NATIVE_NV_READ_CR0_HI_REG_VALUE() & ~7UL)) \ + : \ + __e2k_read_kernel_return_address(n); }) + +#ifndef CONFIG_CPU_HW_CLEAR_RF +typedef void (*clear_rf_t)(void); +extern const clear_rf_t clear_rf_fn[]; + +static __always_inline void clear_rf_kernel_except_current(u64 num_q) +{ + clear_rf_fn[num_q](); +} +#endif + +#define SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) \ + NATIVE_SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) +#define RETURN_TO_USER_UPSR(upsr_reg) \ + NATIVE_RETURN_TO_USER_UPSR(upsr_reg) + +#if defined(CONFIG_PARAVIRT_GUEST) +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +#include +#else /* native kernel */ +#define INIT_KERNEL_UPSR_REG NATIVE_INIT_KERNEL_UPSR_REG +#define BOOT_SET_KERNEL_UPSR() BOOT_NATIVE_SET_KERNEL_UPSR() +#define RETURN_TO_KERNEL_UPSR(upsr_reg) \ + NATIVE_RETURN_TO_KERNEL_UPSR(upsr_reg) +#define SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + NATIVE_SET_KERNEL_UPSR_WITH_DISABLED_NMI() + +static inline void *nested_kernel_return_address(int n) +{ + return __e2k_read_kernel_return_address(n); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_SYSTEM_H_ */ diff --git a/arch/e2k/include/asm/tag_mem.h b/arch/e2k/include/asm/tag_mem.h new file mode 100644 index 000000000000..f95503957098 --- /dev/null +++ b/arch/e2k/include/asm/tag_mem.h @@ -0,0 +1,388 @@ +/* $Id: tag_mem.h,v 1.6 2008/09/18 14:57:23 atic Exp $ + * + * Heading of tag's memory management. + * + * Copyright (C) 2003 Salavat Guiliazov + */ + +#ifndef _E2K_TAG_MEM_H +#define _E2K_TAG_MEM_H + +#include +#include +#include + +#include +#include +#include +#include +#include + +#undef DEBUG_TAG_MODE +#undef DebugTM +#define DEBUG_TAG_MODE 0 /* Tag memory */ +#define DebugTM(...) DebugPrint(DEBUG_TAG_MODE ,##__VA_ARGS__) + +#ifndef __ASSEMBLY__ + + +/* + * Definition to convert data virtual address to appropriate virtual addresses + * of tags and on the contrary: + * + * to each quad-word of data (16 bytes) corresponds 1 byte of tag + * + * 0x0000 00f0 0000 0000 - 0x0000 00ff ffff ffff USER_TAG_MEM_BASE - + * USER_TAG_MEM_SIZE + * + * Data page 0x0000 0000 0000 0000 <-> Tag page 0x0000 00f0 0000 0000 + * 0x0000 0000 0000 1000 <-> 0x0000 00f0 0000 0100 + * ..................... ..................... + * 0x0000 0000 0001 0000 <-> 0x0000 00f0 0000 1000 + * ..................... ..................... + * 0x0000 00e0 0000 0000 <-> 0x0000 00fe 0000 0000 + * ..................... ..................... + * 0x0000 00ef ffff f000 <-> 0x0000 00fe ffff ff00 + */ + +#define DATA_PAGES_PER_TAGS_PAGE(block_size) 16 +#define ONE_PAGE_TAGS_AREA_SIZE (PAGE_SIZE / DATA_PAGES_PER_TAGS_PAGE(0)) +#define virt_to_tag(data_addr) (USER_TAG_MEM_BASE + ((data_addr) >> 4)) +#define tag_to_virt(tag_addr) (((tag_addr) - USER_TAG_MEM_BASE) << 4) + +/* + * Is the specified address from tags virtual space + */ +#define is_tags_area_addr(addr) ((addr) >= USER_TAG_MEM_BASE && \ + (addr) < (USER_TAG_MEM_BASE+USER_TAG_MEM_SIZE)) + +/* + * vma->vm_flags for tags area + */ + +#define TAG_VM_FLAGS (VM_READ | VM_WRITE) + +/* + * Structure of tags into the memory + * One byte contains tag of one quad-word data, which consists of + * two double-word (low and high) and/or + * four single-word (0, 1, 2 and 3) + */ + +typedef struct mem_tag_s { + u8 w0 : 2; /* [1:0] tag of single word # 0 */ + u8 w1 : 2; /* [3:2] tag of single word # 1 */ + u8 w2 : 2; /* [5:4] tag of single word # 2 */ + u8 w3 : 2; /* [7:6] tag of single word # 3 */ +} mem_tag_s_t; + +typedef struct mem_tag_d { + u8 lo : 4; /* [3:0] tag of low double word */ + u8 hi : 4; /* [7:4] tag of high double word */ +} mem_tag_d_t; + +typedef struct mem_tag_q { + u8 qw : 8; /* [7:0] tag of quad-word */ +} mem_tag_q_t; + +typedef union mem_tag { + mem_tag_s_t sing; /* as fields of single word tags */ + mem_tag_d_t doub; /* as fields of double word tags */ + mem_tag_q_t quad; /* as fields of double word tags */ + u8 tags; /* as entire value */ +} mem_tag_t; +#define dw_lo_mem_tag doub.lo +#define dw_hi_mem_tag doub.hi +#define qw_mem_tag quad.qw + +extern void __init swap_info_cache_init(void); + +extern int add_swap_info_to_page(struct mm_struct* mm, struct page* page, e2k_addr_t addr); + +extern int add_swap_info_to_page_next(struct mm_struct* mm, struct page* page, e2k_addr_t addr); + +extern swap_page_info_t* get_swap_info_from_page(struct page* page); + +extern void free_swap_info_struct(swap_page_info_t* info); + +#ifdef CONFIG_SOFTWARE_SWAP_TAGS +extern inline void +remove_swap_info_from_page(struct page* page) { + swap_page_info_t *info; + do { + info = get_swap_info_from_page(page); + free_swap_info_struct(info); + } while (PageWithSwapInfo(page)); +} +#endif /* CONFIG_SOFTWARE_SWAP_TAGS */ + +/* + * Forwards of tags memory management functions + */ + +extern int load_page_tags(struct mm_struct* mm, e2k_addr_t page_addr); + +extern struct vm_area_struct *create_tags_vma(struct mm_struct *mm, + e2k_addr_t data_addr); +extern e2k_addr_t get_tags_address(struct mm_struct *mm, e2k_addr_t data_addr, + int write); +extern struct page *get_tags_page(struct mm_struct *mm, e2k_addr_t data_addr, + int write_access); + +extern int do_tag_munmap(struct mm_struct *mm, e2k_addr_t data_addr, + e2k_size_t data_len); + +/* + * Save tags of data memory area started from address 'data_addr' + * The starting address of data area and area size should be quad-word * 8 + * aligned: + * 1 quad-word (16 bytes) occupies 1 byte of tags + * 8 tags packed into 1 double-word + * Argument 'len' specifies saved area size in bytes, so it should be + * quad-word * 8 multiple + */ +extern inline int +do_save_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, e2k_size_t len, + int copy_data, e2k_addr_t copy_addr) +{ + int tags_in_dw = sizeof (u64) / sizeof (mem_tag_t); + int dw_data_in_dw_tags = tags_in_dw * (sizeof (u64) * 2); + u64 *data_area = (u64 *)(_PAGE_ALIGN_DOWN(data_addr, + dw_data_in_dw_tags)); + u64 *tags_area = (u64 *)(_PAGE_ALIGN_DOWN(tags_addr, tags_in_dw)); + u64 *copy_area = (u64 *)(_PAGE_ALIGN_DOWN(copy_addr, + dw_data_in_dw_tags)); + register u64 q0_dw_lo; + register u64 q0_dw_hi; + register u64 q1_dw_lo; + register u64 q1_dw_hi; + register mem_tag_t q0_tag = {tags : 0}; + register mem_tag_t q1_tag = {tags : 0}; + int all_tags_is_numeric = 1; + u64 tags_dw = 0; + int tag_num, dw_num; + int tag_shift; + int i = 0; + + DebugTM("started for data addr 0x%lx tag addr 0x%lx and size 0x%lx bytes\n", + data_addr, tags_addr, len); + len -= (((e2k_addr_t)data_area) - data_addr); + len = _PAGE_ALIGN_UP(len, tags_in_dw); + + for (tag_num = 0; tag_num < len / dw_data_in_dw_tags; tag_num ++) { + tags_dw = 0; + tag_shift = 0; + for (dw_num = 0; dw_num < dw_data_in_dw_tags / sizeof (u64); + dw_num += 4) { + E2K_LOAD_TAGGED_QWORD_AND_TAGS(&data_area[0], q0_dw_lo, + q0_dw_hi, q0_tag.dw_lo_mem_tag, + q0_tag.dw_hi_mem_tag); + E2K_LOAD_TAGGED_QWORD_AND_TAGS(&data_area[2], q1_dw_lo, + q1_dw_hi, q1_tag.dw_lo_mem_tag, + q1_tag.dw_hi_mem_tag); + data_area += 4; + tags_dw |= (((u64)(q0_tag.qw_mem_tag)) << tag_shift); + tag_shift += (sizeof (mem_tag_t) * 8); + tags_dw |= (((u64)(q1_tag.qw_mem_tag)) << tag_shift); + tag_shift += (sizeof (mem_tag_t) * 8); + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 0)), + q0_dw_lo); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 1)), + q0_dw_hi); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 2)), + q1_dw_lo); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 3)), + q1_dw_hi); + } + if (copy_data) { + E2K_STORE_VALUE_WITH_TAG(©_area[0], + q0_dw_lo, ETAGNVD); + E2K_STORE_VALUE_WITH_TAG(©_area[1], + q0_dw_hi, ETAGNVD); + E2K_STORE_VALUE_WITH_TAG(©_area[2], + q1_dw_lo, ETAGNVD); + E2K_STORE_VALUE_WITH_TAG(©_area[3], + q1_dw_hi, ETAGNVD); + + copy_area += 4; + } + } + tags_area[0] = tags_dw; + if (tags_dw != ETAGNVQ) + all_tags_is_numeric = 0; + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" tags[0x%03x] = 0x%016lx\n", + (u32)(sizeof (*tags_area) * tag_num), tags_dw); + i += dw_num; + } + tags_area ++; + } + DebugTM("finished with data addr 0x%px tag addr 0x%px\n", + data_area, tags_area); + return all_tags_is_numeric; +} + +extern inline int +save_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, + e2k_size_t len) +{ + return do_save_mem_area_tags(data_addr, tags_addr, len, 0, 0); +} + +extern inline int +save_mem_page_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr) +{ + return save_mem_area_tags(data_addr, tags_addr, PAGE_SIZE); +} + +extern inline int +save_mem_data_page_tags(struct mm_struct *mm, e2k_addr_t data_addr) +{ + e2k_addr_t tags_addr; + + tags_addr = get_tags_address(mm, data_addr, 1); + if (tags_addr == (e2k_addr_t)0) + return -1; + save_mem_page_tags(data_addr, tags_addr); + return 0; +} + +/* + * Restore tags of data memory area started from address 'data_addr' + * The starting address of data area and area size should be quad-word * 8 + * aligned: + * 1 quad-word (16 bytes) occupies 1 byte of tags + * 8 tags packed into 1 double-word + * Argument 'len' specifies restored area size in bytes, so it should be + * quad-word * 8 multiple + */ +extern inline void +do_restore_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, + e2k_size_t len, int copy_data, e2k_addr_t copy_addr) +{ + int tags_in_dw = sizeof (u64) / sizeof (mem_tag_t); + int dw_data_in_dw_tags = tags_in_dw * (sizeof (u64) * 2); + u64 *data_area = (u64 *)(_PAGE_ALIGN_DOWN(data_addr, + dw_data_in_dw_tags)); + u64 *tags_area = (u64 *)(_PAGE_ALIGN_DOWN(tags_addr, tags_in_dw)); + u64 *copy_area = (u64 *)(_PAGE_ALIGN_DOWN(copy_addr, + dw_data_in_dw_tags)); + register u64 q_dw_lo; + register u64 q_dw_hi; + register mem_tag_t q_tag = {tags : 0}; + register u64 tags_dw = 0; + int tag_num, dw_num; + int i = 0; + + DebugTM("started for data addr 0x%lx tag " + "addr 0x%lx and size 0x%lx bytes\n", + data_addr, tags_addr, len); + len -= (((e2k_addr_t)data_area) - data_addr); + len = _PAGE_ALIGN_UP(len, tags_in_dw); + + for (tag_num = 0; tag_num < len / dw_data_in_dw_tags; tag_num ++) { + tags_dw = tags_area[0]; + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" tags[0x%03x] = 0x%016lx\n", + (u32)(sizeof (*tags_area) * tag_num), tags_dw); + } + for (dw_num = 0; dw_num < dw_data_in_dw_tags / sizeof (u64); + dw_num += 2) { + E2K_LOAD_TAGGED_QWORD(&data_area[0], q_dw_lo, q_dw_hi); + q_tag.qw_mem_tag = tags_dw; + tags_dw >>= (sizeof (mem_tag_t) * 8); + if (!copy_data) { + /* After E2K_PUTTAGD must STRONGLY follow STORE_TAG asm + * to avoid compiler's problems */ + E2K_STORE_TAGGED_QWORD(&data_area[0], + q_dw_lo, q_dw_hi, + q_tag.dw_lo_mem_tag, q_tag.dw_hi_mem_tag); + + } else { + E2K_STORE_TAGGED_QWORD(©_area[0], + q_dw_lo, q_dw_hi, + q_tag.dw_lo_mem_tag, q_tag.dw_hi_mem_tag); + copy_area += 2; + } + data_area += 2; + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 0)), + q_dw_lo); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 1)), + q_dw_hi); + i += 2; + } + } + tags_area ++; + } + DebugTM("finished with data addr 0x%px tag " + "addr 0x%px\n", data_area, tags_area); +} +extern inline void +restore_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, + e2k_size_t len) +{ + do_restore_mem_area_tags(data_addr, tags_addr, len, 0, 0); +} + +extern inline void +restore_mem_page_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr) +{ + restore_mem_area_tags(data_addr, tags_addr, PAGE_SIZE); +} + +extern inline int +restore_mem_data_page_tags(struct mm_struct *mm, e2k_addr_t data_addr) +{ + e2k_addr_t tags_addr; + + data_addr &= PAGE_MASK; + tags_addr = get_tags_address(mm, data_addr, 0); + if (tags_addr == (e2k_addr_t)0) + return -1; + restore_mem_page_tags(data_addr, tags_addr); + return 0; +} + +extern int save_swapped_page_tags(struct mm_struct *mm, + struct page *swapped_page, e2k_addr_t data_addr); + +extern int restore_swapped_page_tags(struct mm_struct *mm, + struct page *swapped_page, e2k_addr_t data_addr); + +extern inline int +save_swapped_page_tags2(swap_page_info_t* info, struct page* page) { + return save_swapped_page_tags(info->mm, page, info->addr); +} + +#ifdef CONFIG_SOFTWARE_SWAP_TAGS +extern inline int +save_swapped_page_tags_from_page(struct page* page) { + int ret = 0; + swap_page_info_t* info; + do { + info = get_swap_info_from_page(page); + if (!is_tags_area_addr(info->addr)) + ret = save_swapped_page_tags2(info, page); +// free_swap_info_struct(info); + } while (PageWithSwapInfo(page)); + return ret; +} +#endif /* CONFIG_SOFTWARE_SWAP_TAGS */ + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_TAG_MEM_H) */ diff --git a/arch/e2k/include/asm/tags.h b/arch/e2k/include/asm/tags.h new file mode 100644 index 000000000000..960cca910577 --- /dev/null +++ b/arch/e2k/include/asm/tags.h @@ -0,0 +1,60 @@ +/* + * + * Definitions of most popular hardware tags. + * + */ + +#ifndef _E2K_TAGS_H_ +#define _E2K_TAGS_H_ + +#ifndef __ASSEMBLY__ + +/* Internal tags list. Old style */ +#define E2K_AP_ITAG 0x0 +#define E2K_PL_ITAG 0x1 +#define E2K_PL_V2_ITAG 0x0 +#define E2K_SAP_ITAG 0x4 + +#define ITAG_MASK ULL(0x4000000040000000) +#define ITAGDWD_DEBUG (ITAG_MASK | ULL(0xdead0000dead)) +#define ITAGDWD_IO_DEBUG (ITAG_MASK | ULL(0x10dead0010dead)) + +/* + * Definition of basic external tags. Old style. + */ +#define E2K_NUMERIC_ETAG 0x0 +#define E2K_NULLPTR_ETAG 0x0 +#define E2K_PL_ETAG 0xA +#define E2K_PLLO_ETAG 0xF +#define E2K_PLHI_ETAG 0xF +#define E2K_AP_HI_ETAG 0xC +#define E2K_AP_LO_ETAG 0xF +#define E2K_SAP_HI_ETAG 0xC +#define E2K_SAP_LO_ETAG 0xF + +/* External tags. New style */ + +#define ETAGNUM 0x00 /* Num. value. generic */ + +#define ETAGNVS 0x00 /* Num. value single */ +#define ETAGEWS 0x01 /* Empty value. single */ +#define ETAGDWS 0x01 /* Diagnostic value. single */ + +#define ETAGNVD 0x00 /* Num. value double */ +#define ETAGNPD 0x00 /* Null pointer */ +#define ETAGEWD 0x05 /* Empty value. double */ +#define ETAGDWD 0x05 /* Diagnostic value. double */ +#define ETAGPLD 0x0A /* Procedure label (v1-v5) */ + +#define ETAGNVQ 0x00 /* Num. value quadro */ +#define ETAGNPQ 0x00 /* Null pointer */ +#define ETAGDWQ 0x55 /* Empty quadro */ +#define ETAGAPQ 0xCF /* Array pointer */ +#define ETAGSAP 0xCF /* Stack array pointer */ +#define ETAGPLQ 0xFF /* Procedure label (v6-...) */ + +#define ETAGBADQ 0xee /* Invalid tag for quadro object */ + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_TAGS_H_) */ diff --git a/arch/e2k/include/asm/termbits.h b/arch/e2k/include/asm/termbits.h new file mode 100644 index 000000000000..8484205b4842 --- /dev/null +++ b/arch/e2k/include/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMBITS_H_ +#define _E2K_TERMBITS_H_ + +#include + +#endif /* _E2K_TERMBITS_H_ */ diff --git a/arch/e2k/include/asm/termios.h b/arch/e2k/include/asm/termios.h new file mode 100644 index 000000000000..8b3d2b070e48 --- /dev/null +++ b/arch/e2k/include/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMIOS_H_ +#define _E2K_TERMIOS_H_ + +#include + +#endif /* _E2K_TERMIOS_H_ */ diff --git a/arch/e2k/include/asm/thread_info.h b/arch/e2k/include/asm/thread_info.h new file mode 100644 index 000000000000..eef7c47f379d --- /dev/null +++ b/arch/e2k/include/asm/thread_info.h @@ -0,0 +1,442 @@ +/* + * $Id: thread_info.h,v 1.29 2009/08/19 07:47:20 panteleev_p Exp $ + * thread_info.h: E2K low-level thread information + * + */ +#ifndef _E2K_THREAD_INFO_H +#define _E2K_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +#include +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +#ifdef CONFIG_MONITORS +#include +#endif /* CONFIG_MONITORS */ +#endif /* __ASSEMBLY__ */ + + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long seg; +} mm_segment_t; + +struct signal_stack { + unsigned long base; + unsigned long size; + unsigned long used; +}; + +#ifdef CONFIG_VIRTUALIZATION +struct gthread_info; +#endif /* CONFIG_VIRTUALIZATION */ + +typedef struct thread_info { + unsigned long flags; /* low level flags */ + + unsigned long status; /* thread synchronous flags */ + int preempt_lazy_count; /* 0 => lazy preemptable + <0 => BUG */ + long long irq_enter_clk; /* CPU clock when irq enter */ + /* occured */ + mm_segment_t addr_limit; /* thread address space */ + struct pt_regs *pt_regs; /* head of pt_regs */ + /* structure queue: */ + /* pointer to current */ + /* pt_regs */ + e2k_usd_hi_t k_usd_hi; /* Kernel current data */ + /* stack size */ + e2k_usd_lo_t k_usd_lo; /* Kernel current data */ + /* stack base */ + + /* Kernel's hardware stacks */ + e2k_psp_lo_t k_psp_lo; + e2k_psp_hi_t k_psp_hi; + e2k_pcsp_lo_t k_pcsp_lo; + e2k_pcsp_hi_t k_pcsp_hi; + + /* Because we don't have pt_regs ready upon kernel entry we + * temporarily save stack registers here, then copy to pt_regs */ + struct hw_stacks tmp_user_stacks; + /* Because we have to use the same kernel entry for both user + * and kernel interrupts, we have to save user's global registers + * to some temporary area, only after we copy them to pt_regs if + * this was user interrupt. */ + struct kernel_gregs tmp_k_gregs; + + struct kernel_gregs k_gregs; + + struct kernel_gregs k_gregs_light; + + struct restart_block restart_block; + e2k_upsr_t upsr; /* kernel upsr */ + + data_stack_t u_stack; /* User data stack info */ + hw_stack_t u_hw_stack; /* User hardware stacks info */ + + /* These fields are needed only for uhws_mode = UHWS_MODE_PSEUDO */ + struct list_head old_u_pcs_list; /* chain stack old areas list */ + + struct list_head getsp_adj; + + long usr_pfault_jump; /* where to jump if */ + /* copy_*_user has bad addr */ + e2k_cutd_t u_cutd; /* Compilation Unit Table */ + /* base (register) */ +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + int times_index; + long times_num; + kernel_times_t times[MAX_KERNEL_TIMES_NUM]; + scall_times_t *fork_scall_times; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + u64 ss_rmp_bottom; /* lower mremap addr for + * secondary space area */ + u64 rp_start; /* recovery point range start */ + u64 rp_end; /* recovery point range end */ + u64 rp_ret_ip; /* recovery point return IP */ + int last_ic_flush_cpu; /* last cpu, where IC was */ + /* flushed on migration */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + +#ifdef CONFIG_PROTECTED_MODE + global_store_t *g_list; + e2k_addr_t multithread_address; /* It needs to interpretate globals + * pointed to stack */ + struct rw_semaphore *lock; /* can't include linux/rt_lock.h*/ + long __user *pm_robust_list; +#endif /* CONFIG_PROTECTED_MODE */ +#ifdef CONFIG_MONITORS + monitor_registers_delta_t monitors_delta; + atomic64_t monitors_count[NR_CPUS][MONITORS_COUNT]; +#endif /* CONFIG_MONITORS */ + + /* follow fields to save guest kernel hardware stacks info to free */ + /* them after sys_execve() and switch to new process */ + void *old_ps_base; /* old kernel procedure stack base */ + e2k_size_t old_ps_size; /* and size */ + void *old_pcs_base; /* old kernel chain stack base */ + e2k_size_t old_pcs_size; /* and size */ + + /* Support {make/get/set}context: current context goes here. + * On thread creation this is NULL and will be allocated as + * needed. */ + struct hw_context *this_hw_context; + + struct { + unsigned long entry; + unsigned long sp; + } execve; + + /* registers for ptrace */ + unsigned long long dam[DAM_ENTRIES_NUM]; + e2k_aalda_t aalda[AALDAS_REGS_NUM]; + + bool last_wish; + struct ksignal ksig; + + /* signal stack area is used to store interrupted context */ + struct signal_stack signal_stack; + +#ifdef CONFIG_VIRTUALIZATION + pgd_t *kernel_image_pgd_p; /* pointer to host kernel image pgd */ + pgd_t kernel_image_pgd; /* host kernel image pgd value */ + pgd_t shadow_image_pgd; /* paravirtualized guest image shadow */ + /* pgd value */ + pgd_t *vcpu_pgd; /* root PGD for the VCPU */ + /* (can be NULL if need not) */ + pgd_t *host_pgd; /* root PGD for the host thread */ + /* (VCPU host thread mm->pgd) */ + void *virt_machine; /* pointer to main structure of */ + /* virtual machine for */ + /* paravirtualized guest */ + struct kvm_vcpu *vcpu; /* KVM VCPU state for host */ + unsigned long vcpu_state_base; /* base of VCPU state fo guest */ + int (*paravirt_page_prefault) /* paravirtualized guest page */ + /* prefault handler */ + (pt_regs_t *regs, trap_cellar_t *tcellar); + struct gthread_info *gthread_info; /* only on host: current */ + /* guest kernel thread info */ + int gpid_nr; /* only on guest: the guest */ + /* kernel thread ID number */ + /* on host kernel */ + int gmmid_nr; /* only on guest: the guest */ + /* thread mm ID number on */ + /* the host */ + struct list_head tasks_to_spin; /* only on host: list of tasks */ + /* to support spin lock/unlock */ + struct gthread_info *gti_to_spin; /* guest thread waitin for the */ + /* spin lock/unlock */ + int should_stop; /* on host: guest kernel thread */ + /* should be stopped */ + /* structure to save state of user global registers, which are */ + /* used to support virtualization and PV OPs by kernel */ + host_gregs_t h_gregs; /* state of user global registers */ + /* used by host to support guest */ + /* kernel */ +#endif /* CONFIG_VIRTUALIZATION */ +} __aligned(SMP_CACHE_BYTES) thread_info_t; + +#endif /* !__ASSEMBLY__ */ + +/* + * Thread information flags: + * + * TIF_SYSCALL_TRACE is known to be 0 via blbs. + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_POLLING_NRFLAG 4 /* poll_idle is polling NEED_RESCHED */ +#define TIF_32BIT 5 /* 32-bit binary */ +#define TIF_MEMDIE 6 +#define TIF_KERNEL_TRACE 7 /* kernel trace active */ +#define TIF_NOHZ 8 +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ +#define TIF_SECCOMP 10 /* secure computing */ +#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */ +#define TIF_USD_NOT_EXPANDED 14 /* local data stack cannot be */ + /* expanded (fixed size) */ + /* not used yet */ +#define TIF_BAD_USD_SIZE 15 /* checker detected kernel USD size */ + /* is wrong */ +#define TIF_USR_CONTROL_INTERRUPTS 16 /* user can control interrupts */ +#define TIF_WILL_RESCHED 17 /* task will be rescheduled soon */ +/* Following flags only for virtualization support */ +#define TIF_VM_CREATED 19 /* task is running as virtual kernel */ + /* and created virtual machine */ +#define TIF_MULTITHREADING 20 /* task is running as multithreading */ + /* for example host/guest kernel main */ + /* threads */ +#define TIF_VIRTUALIZED_HOST 21 /* thread is host part of VCPU to run */ + /* virtualized kernel */ +#define TIF_VIRTUALIZED_GUEST 22 /* thread is guest part of VCPU */ + /* to run virtualized kernel */ +#define TIF_PARAVIRT_GUEST 23 /* user is paravitualized guest */ + /* kernel */ +#define TIF_PSEUDOTHREAD 24 /* the thread is pseudo only to run */ + /* on VIRQ VCPU as starter of VIRQ */ + /* handler */ +#define TIF_VIRQS_ACTIVE 26 /* the thread is ready to inject */ + /* VIRQS interrupt */ +#define TIF_LIGHT_HYPERCALL 28 /* hypervisor is executing light */ + /* hypercall */ +#define TIF_GENERIC_HYPERCALL 29 /* hypervisor is executing generic */ + /* hypercall */ +/* End of flags only for virtualization support */ +#define TIF_SYSCALL_TRACEPOINT 30 /* syscall tracepoint instrumentation */ +#define TIF_NAPI_WORK 31 /* napi_wq_worker() is running */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_32BIT (1 << TIF_32BIT) +#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE) +#define _TIF_NOHZ (1 << TIF_NOHZ) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_USD_NOT_EXPANDED (1 << TIF_USD_NOT_EXPANDED) +#define _TIF_BAD_USD_SIZE (1 << TIF_BAD_USD_SIZE) +#define _TIF_USR_CONTROL_INTERRUPTS (1 << TIF_USR_CONTROL_INTERRUPTS) +#define _TIF_WILL_RESCHED (1 << TIF_WILL_RESCHED) +#define _TIF_VM_CREATED (1 << TIF_VM_CREATED) +#define _TIF_MULTITHREADING (1 << TIF_MULTITHREADING) +#define _TIF_VIRTUALIZED_HOST (1 << TIF_VIRTUALIZED_HOST) +#define _TIF_VIRTUALIZED_GUEST (1 << TIF_VIRTUALIZED_GUEST) +#define _TIF_PARAVIRT_GUEST (1 << TIF_PARAVIRT_GUEST) +#define _TIF_PSEUDOTHREAD (1 << TIF_PSEUDOTHREAD) +#define _TIF_VIRQS_ACTIVE (1 << TIF_VIRQS_ACTIVE) +#define _TIF_LIGHT_HYPERCALL (1 << TIF_LIGHT_HYPERCALL) +#define _TIF_GENERIC_HYPERCALL (1 << TIF_GENERIC_HYPERCALL) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_NAPI_WORK (1 << TIF_NAPI_WORK) + +#define _TIF_WORK_SYSCALL_TRACE (_TIF_SYSCALL_TRACE | \ + _TIF_KERNEL_TRACE | \ + _TIF_SYSCALL_TRACEPOINT | \ + _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | \ + _TIF_NOHZ) + +/* Work to do on return to userspace. */ +#define _TIF_WORK_MASK (_TIF_NOTIFY_RESUME | \ + _TIF_SIGPENDING | \ + _TIF_NEED_RESCHED) + +/* Work to do on return to userspace with exception of signals. + * This is used when it is not enough to check _TIF_SIGPENDING. */ +#define _TIF_WORK_MASK_NOSIG (_TIF_NOTIFY_RESUME | \ + _TIF_NEED_RESCHED) + +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_DELAYED_SIG_HANDLING 0x00000001 +#define TS_MMAP_PRIVILEGED 0x00000004 +#define TS_MMAP_PS 0x00000008 +#define TS_MMAP_PCS 0x00000010 +#define TS_MMAP_SIGNAL_STACK 0x00000020 +#define TS_KERNEL_SYSCALL 0x00000040 +#define TS_USER_EXECVE 0x00000080 +#define TS_SINGLESTEP_KERNEL 0x00000100 +#define TS_SINGLESTEP_USER 0x00000200 +/* the host thread is switching to VCPU running mode + * and wait for interception (trap on PV mode) */ +#define TS_HOST_AT_VCPU_MODE 0x00001000 + +#define THREAD_SIZE KERNEL_STACKS_SIZE +#define THREAD_SIZE_ORDER order_base_2(KERNEL_STACKS_SIZE / PAGE_SIZE) + +#ifndef __ASSEMBLY__ + +/* + * flag set/clear/test wrappers + * - pass TS_xxxx constants to these functions + */ + +static inline unsigned long set_ti_status_flag(struct thread_info *ti, + unsigned long flag) +{ + unsigned long old_flags; + + old_flags = ti->status; + ti->status = old_flags | flag; + + return ~old_flags & flag; +} + +static inline void clear_ti_status_flag(struct thread_info *ti, + unsigned long flag) +{ + ti->status &= ~flag; +} + +static inline unsigned long test_ti_status_flag(struct thread_info *ti, + unsigned long flag) +{ + return ti->status & flag; +} + +static inline unsigned long test_and_clear_ti_status_flag( + struct thread_info *ti, int flag) +{ + typeof(ti->status) status = ti->status; + ti->status = status & ~flag; + return status & flag; +} + +#define set_ts_flag(flag) \ + set_ti_status_flag(current_thread_info(), flag) +#define clear_ts_flag(flag) \ + clear_ti_status_flag(current_thread_info(), flag) +#define test_ts_flag(flag) \ + test_ti_status_flag(current_thread_info(), flag) +#define test_and_clear_ts_flag(flag) \ + test_and_clear_ti_status_flag(current_thread_info(), flag) + +#define native_current_thread_info() current_thread_info() +#define boot_current_thread_info() BOOT_READ_CURRENT_REG() + +/* + * Registers (%osr0 & %gdN) usually hold pointer to current thread info + * structure. But these registers used to hold CPU # while boot-time + * initialization process + */ +#define boot_set_current_thread_info(cpu_id) \ +({ \ + BOOT_WRITE_CURRENT_REG_VALUE(cpu_id); \ + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, NULL); \ +}) + + /* support multithreading for protected mode */ +#define NUM_THREAD(x) ((x)->orig_psr_lw) /* number of threads (type = TYPE_INIT) */ +#define WAS_MULTITHREADING (current_thread_info()->g_list \ + && NUM_THREAD(current_thread_info()->g_list) >= 1) + +#ifdef CONFIG_PROTECTED_MODE +static inline void clear_g_list(struct thread_info *thread_info) +{ + /* These are initialized from interrupt handler when a thread + * writes SAP to a global variable or when creating a new thread + * (for details see comment in arch/e2k/3p/global_sp.c) */ + thread_info->g_list = NULL; + thread_info->multithread_address = 0; + thread_info->lock = NULL; +} +#else /* CONFIG_PROTECTED_MODE */ +void clear_g_list(struct thread_info *thread_info) { } +#endif + +#define thread_info_task(ti) \ + container_of(ti, struct task_struct, thread_info) + +#define INIT_OLD_U_HW_STACKS \ + .old_u_pcs_list = LIST_HEAD_INIT(init_task.thread_info.old_u_pcs_list), + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +# define INIT_LAST_IC_FLUSH_CPU .last_ic_flush_cpu = -1, +#else +# define INIT_LAST_IC_FLUSH_CPU +#endif + +/* + * Macros/functions for gaining access to the thread information structure. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .addr_limit = KERNEL_DS, \ + .k_usd_lo = (e2k_usd_lo_t) { \ + .word = (unsigned long) init_stack + \ + KERNEL_C_STACK_OFFSET + KERNEL_C_STACK_SIZE, \ + }, \ + .k_usd_hi = (e2k_usd_hi_t) { \ + .fields.size = KERNEL_C_STACK_SIZE \ + }, \ + INIT_OLD_U_HW_STACKS \ + INIT_LAST_IC_FLUSH_CPU \ + .preempt_lazy_count = 0, \ +} + +extern void arch_task_cache_init(void); + +/* Hardware stacks must be aligned on page boundary */ +#define THREAD_ALIGN PAGE_SIZE + +#ifndef ASM_OFFSETS_C +extern void clear_thread_info(struct task_struct *task); +#endif /* ASM_OFFSETS_C */ + +/* + * Thread information allocation. + */ + +extern unsigned long *alloc_thread_stack_node(struct task_struct *, int); +extern void free_thread_stack(struct task_struct *tsk); +extern int free_vm_stack_cache(unsigned int cpu); +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_THREAD_INFO_H */ diff --git a/arch/e2k/include/asm/time.h b/arch/e2k/include/asm/time.h new file mode 100644 index 000000000000..8ba575d37dd9 --- /dev/null +++ b/arch/e2k/include/asm/time.h @@ -0,0 +1,42 @@ +/* + * based on include/asm-i386/mach-default/mach_time.h + * + * Machine specific set RTC function for generic. + * Split out from time.c by Osamu Tomita + */ +#ifndef _E2K_TIME_H +#define _E2K_TIME_H + +#include +#include + +#define mach_set_wallclock(nowtime) (machine.set_wallclock(nowtime)) +#define mach_get_wallclock() (machine.get_wallclock()) + +extern void native_clock_init(void); + +#ifdef CONFIG_PARAVIRT +/* It need only to account stolen time by guest */ + +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern unsigned long native_steal_clock(int cpu); +#endif /* CONFIG_PARAVIRT */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel with or without virtualization support */ +static inline void arch_clock_init(void) +{ + native_clock_init(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +extern void arch_clock_setup(void); + +#endif /* !_E2K_TIME_H */ diff --git a/arch/e2k/include/asm/timer.h b/arch/e2k/include/asm/timer.h new file mode 100644 index 000000000000..7e5cfd6f648e --- /dev/null +++ b/arch/e2k/include/asm/timer.h @@ -0,0 +1,12 @@ +#ifndef _ASM_TIMER_H +#define _ASM_TIMER_H + +#include +#include + +#define TICK_SIZE (tick_nsec / 1000) + +extern bool disable_apic_timer; +extern bool disable_epic_timer; + +#endif diff --git a/arch/e2k/include/asm/timex.h b/arch/e2k/include/asm/timex.h new file mode 100644 index 000000000000..a43f2f98dfca --- /dev/null +++ b/arch/e2k/include/asm/timex.h @@ -0,0 +1,77 @@ +/* + * linux/include/asm-e2k/timex.h + * + * E2K architecture timex specifications + */ +#ifndef _E2K_TIMEX_H_ +#define _E2K_TIMEX_H_ + +#include + +typedef unsigned long cycles_t; + +#define ARCH_HAS_READ_CURRENT_TIMER +static inline cycles_t get_cycles(void) +{ + return NATIVE_READ_CLKR_REG_VALUE(); +} +#define UNSET_CPU_FREQ ((u32)(-1)) +extern u32 cpu_freq_hz; +extern u64 cpu_clock_psec; /* number of pikoseconds in one CPU clock */ + +static inline long long cycles_2_psec(cycles_t cycles) +{ + return cycles * cpu_clock_psec; +} + +static inline long long cycles_2nsec(cycles_t cycles) +{ + return cycles_2_psec(cycles) / 1000; +} + +static inline long long cycles_2usec(cycles_t cycles) +{ + return cycles_2_psec(cycles) / 1000000; +} + +static inline cycles_t psecs_2_cycles(long long psecs) +{ + return psecs / cpu_clock_psec; +} + +static inline cycles_t nsecs_2cycles(long long nsecs) +{ + return psecs_2_cycles(nsecs * 1000); +} + +static inline cycles_t usecs_2cycles(long long usecs) +{ + return psecs_2_cycles(usecs * 1000000); +} + +static inline cycles_t get_cycles_rate(void) +{ + return (cycles_t)cpu_freq_hz; +} + +extern void __init native_time_init(void); +extern int native_read_current_timer(unsigned long *timer_val); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* native kernel or native kernel with virtualization support */ +static inline void time_init(void) +{ + native_time_init(); +} +static inline int read_current_timer(unsigned long *timer_val) +{ + return native_read_current_timer(timer_val); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_TIMEX_H_ */ diff --git a/arch/e2k/include/asm/tlb.h b/arch/e2k/include/asm/tlb.h new file mode 100644 index 000000000000..a18351aec0d9 --- /dev/null +++ b/arch/e2k/include/asm/tlb.h @@ -0,0 +1,29 @@ +#ifndef _E2K_TLB_H +#define _E2K_TLB_H + +#define tlb_flush(tlb) \ +{ \ + if (!(tlb)->fullmm && !(tlb)->need_flush_all) \ + flush_tlb_mm_range((tlb)->mm, (tlb)->start, (tlb)->end); \ + else \ + flush_tlb_mm((tlb)->mm); \ +} + +#define tlb_start_vma(tlb, vma) \ +do { \ +} while (0) + +#define tlb_end_vma(tlb, vma) \ +do { \ +} while (0) + +#define __tlb_remove_tlb_entry(tlb, ptep, address) \ + do { } while (0) + +#include + +#define __pud_free_tlb(tlb, pudp, start) pud_free((tlb)->mm, pudp) +#define __pmd_free_tlb(tlb, pmdp, start) pmd_free((tlb)->mm, pmdp) +#define __pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) + +#endif /* _E2K_TLB_H */ diff --git a/arch/e2k/include/asm/tlb_regs_access.h b/arch/e2k/include/asm/tlb_regs_access.h new file mode 100644 index 000000000000..1d20f93c8a93 --- /dev/null +++ b/arch/e2k/include/asm/tlb_regs_access.h @@ -0,0 +1,153 @@ +#ifndef _E2K_TLB_REGS_ACCESS_H_ +#define _E2K_TLB_REGS_ACCESS_H_ + +#include +#include + +/* + * DTLB/ITLB registers operations + */ + +/* + * Write Data TLB tag register + */ +static inline void +write_DTLB_tag_reg(tlb_addr_t tlb_addr, tlb_tag_t tlb_tag) +{ + DebugMR("Write DTLB addr 0x%lx tag 0x%lx\n", + tlb_addr_val(tlb_addr), tlb_tag_val(tlb_tag)); + WRITE_DTLB_REG(tlb_addr_val(tlb_addr), tlb_tag_val(tlb_tag)); +} + +/* + * Write Data TLB entry register + */ +static inline void +write_DTLB_entry_reg(tlb_addr_t tlb_addr, mmu_reg_t pte) +{ + DebugMR("Write DTLB addr 0x%lx entry 0x%llx\n", + tlb_addr_val(tlb_addr), pte); + WRITE_DTLB_REG(tlb_addr_val(tlb_addr), pte); +} + +/* + * Read Data TLB tag register + */ +static inline tlb_tag_t +read_DTLB_tag_reg(tlb_addr_t tlb_addr) +{ + tlb_tag_t tlb_tag; + tlb_tag_val(tlb_tag) = READ_DTLB_REG(tlb_addr_val(tlb_addr)); + DebugTLB("Read DTLB tag 0x%lx for addr 0x%lx\n", + tlb_tag_val(tlb_tag), tlb_addr_val(tlb_addr)); + return tlb_tag; +} +static inline tlb_tag_t +read_DTLB_va_tag_reg(e2k_addr_t virt_addr, int set_num, int large_page) +{ + tlb_addr_t tlb_addr; + tlb_addr = tlb_addr_tag_access; + tlb_addr = tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, + large_page); + tlb_addr = tlb_addr_set_set_num(tlb_addr, set_num); + return read_DTLB_tag_reg(tlb_addr); +} + +/* + * Read Data TLB entry register + */ +static inline mmu_reg_t +read_DTLB_entry_reg(tlb_addr_t tlb_addr) +{ + mmu_reg_t pte; + pte = READ_DTLB_REG(tlb_addr_val(tlb_addr)); + DebugTLB("Read DTLB entry 0x%llx for addr 0x%lx\n", + pte, tlb_addr_val(tlb_addr)); + return pte; +} +static inline mmu_reg_t +read_DTLB_va_entry_reg(e2k_addr_t virt_addr, int set_num, int large_page) +{ + tlb_addr_t tlb_addr; + tlb_addr = tlb_addr_entry_access; + tlb_addr = tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, + large_page); + tlb_addr = tlb_addr_set_set_num(tlb_addr, set_num); + return read_DTLB_entry_reg(tlb_addr); +} + +/* + * Get Entry probe for virtual address + */ + +#define GET_MMU_DTLB_ENTRY(virt_addr) \ + (unsigned long)ENTRY_PROBE_MMU_OP(probe_addr_val(virt_addr)) +static inline probe_entry_t +get_MMU_DTLB_ENTRY(e2k_addr_t virt_addr) +{ + DebugMR("Get DTLB entry probe for virtual address 0x%lx\n", + virt_addr); + return __probe_entry(GET_MMU_DTLB_ENTRY(virt_addr)); +} + +/* + * Get physical address for virtual address + */ + +#define GET_MMU_PHYS_ADDR(virt_addr) \ + ((unsigned long)ADDRESS_PROBE_MMU_OP(probe_addr_val(virt_addr))) +static inline probe_entry_t +get_MMU_phys_addr(e2k_addr_t virt_addr) +{ + DebugMR("Get physical address for virtual address 0x%lx\n", + virt_addr); + return __probe_entry(GET_MMU_PHYS_ADDR(virt_addr)); +} + +typedef struct tlb_set_state { + tlb_tag_t tlb_tag; + pte_t tlb_entry; +} tlb_set_state_t; + +typedef struct tlb_line_state { + e2k_addr_t va; + bool huge; + tlb_set_state_t sets[NATIVE_TLB_SETS_NUM]; +} tlb_line_state_t; + +static inline tlb_tag_t +get_va_tlb_set_tag(e2k_addr_t addr, int set_no, bool large_page) +{ + return read_DTLB_va_tag_reg(addr, set_no, large_page); +} + +static inline pte_t +get_va_tlb_set_entry(e2k_addr_t addr, int set_no, bool large_page) +{ + pte_t tlb_entry; + + pte_val(tlb_entry) = read_DTLB_va_entry_reg(addr, set_no, large_page); + return tlb_entry; +} + +static inline void +get_va_tlb_state(tlb_line_state_t *tlb, e2k_addr_t addr, bool large_page) +{ + tlb_set_state_t *set_state; + int set_no; + + tlb->va = addr; + tlb->huge = large_page; + + for (set_no = 0; set_no < NATIVE_TLB_SETS_NUM; set_no++) { + set_state = &tlb->sets[set_no]; + tlb_tag_t tlb_tag; + pte_t tlb_entry; + tlb_tag = get_va_tlb_set_tag(addr, set_no, large_page); + tlb_entry = get_va_tlb_set_entry(addr, set_no, large_page); + set_state->tlb_tag = tlb_tag; + set_state->tlb_entry; + } +} + +#endif /* !_E2K_TLB_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/tlb_regs_types.h b/arch/e2k/include/asm/tlb_regs_types.h new file mode 100644 index 000000000000..da9f27e55730 --- /dev/null +++ b/arch/e2k/include/asm/tlb_regs_types.h @@ -0,0 +1,428 @@ +#ifndef _E2K_TLB_REGS_TYPES_H_ +#define _E2K_TLB_REGS_TYPES_H_ + +#include +#include + + +/* now DTLB entry format is different on iset V6 vs V2-V5 */ +#if CONFIG_CPU_ISET >= 6 +# ifdef CONFIG_MMU_PT_V6 +# define MMU_IS_DTLB_V6() true +# else /* ! CONFIG_MMU_PT_V6 */ +# define MMU_IS_DTLB_V6() false +# endif /* CONFIG_MMU_PT_V6 */ +#elif CONFIG_CPU_ISET >= 2 +# define MMU_IS_DTLB_V6() false +#elif CONFIG_CPU_ISET == 0 +# ifdef E2K_P2V +# define MMU_IS_DTLB_V6() \ + (boot_machine.mmu_pt_v6) +# else /* ! E2K_P2V */ +# define MMU_IS_DTLB_V6() \ + (machine.mmu_pt_v6) +# endif /* E2K_P2V */ +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, MMU pt_v6 mode is defined dinamicaly" +# ifdef E2K_P2V +# define MMU_IS_DTLB_V6() \ + (boot_machine.mmu_pt_v6) +# else /* ! E2K_P2V */ +# define MMU_IS_DTLB_V6() \ + (machine.mmu_pt_v6) +# endif /* E2K_P2V */ +#endif /* CONFIG_CPU_ISET 0-6 */ + + +/* + * TLB (DTLB & ITLB) structure + */ + +#define NATIVE_TLB_LINES_BITS_NUM (machine.tlb_lines_bits_num) +#define BOOT_NATIVE_TLB_LINES_BITS_NUM (boot_machine.tlb_lines_bits_num) + +#define NATIVE_TLB_LINES_NUM (1 << NATIVE_TLB_LINES_BITS_NUM) +#define BOOT_NATIVE_TLB_LINES_NUM (1 << BOOT_NATIVE_TLB_LINES_BITS_NUM) + +#define NATIVE_MAX_TLB_LINES_NUM (1 << ES2_TLB_LINES_BITS_NUM) + +#define NATIVE_TLB_SETS_NUM 4 +#define BOOT_NATIVE_TLB_SETS_NUM NATIVE_TLB_SETS_NUM +#define NATIVE_TLB_LARGE_PAGE_SET_NO 3 /* large page entries */ + /* occupied this set in each */ + /* line */ + +/* + * DTLB/ITLB registers operations + */ + +/* DTLB/ITLB registers access operations address */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t tlb_addr_t; +typedef tlb_addr_t dtlb_addr_t; +typedef tlb_addr_t itlb_addr_t; +#endif /* ! __ASSEMBLY__ */ + +#define tlb_addr_val(tlb_addr) (tlb_addr) +#define dtlb_addr_val(dtlb_addr) tlb_addr_val(dtlb_addr) +#define itlb_addr_val(itlb_addr) tlb_addr_val(itlb_addr) + +#define __tlb_addr(tlb_addr_val) (tlb_addr_val) +#define __dtlb_addr(dtlb_addr_val) __tlb_addr(dtlb_addr_val) +#define __itlb_addr(dtlb_addr_val) __tlb_addr(itlb_addr_val) + + +/* Virtual page address translation to TLB line & set */ + +#define _TLB_ADDR_LINE_NUM_SHIFT 12 /* [19:12] */ + +#define E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (machine.tlb_addr_line_num)) >> \ + _TLB_ADDR_LINE_NUM_SHIFT) +#define BOOT_E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (boot_machine.tlb_addr_line_num)) >> \ + _TLB_ADDR_LINE_NUM_SHIFT) + +#define E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (machine.tlb_addr_line_num2)) >> \ + (machine.tlb_addr_line_num_shift2)) +#define BOOT_E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (boot_machine.tlb_addr_line_num2)) >> \ + (boot_machine.tlb_addr_line_num_shift2)) + +#define VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) \ + ((large_page) ? E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) : \ + E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr)) +#define BOOT_VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) \ + ((large_page) ? BOOT_E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) : \ + BOOT_E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr)) + +#define _TLB_ADDR_TYPE 0x0000000000000007 /* type of operation */ +#define _TLB_ADDR_TAG_ACCESS 0x0000000000000000 /* tag access oper. */ + /* type */ +#define _TLB_ADDR_ENTRY_ACCESS 0x0000000000000001 /* entry access oper. */ + /* type */ + +#define tlb_addr_set_type(tlb_addr, type) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & ~_TLB_ADDR_TYPE) | \ + ((type) & _TLB_ADDR_TYPE))) +#define tlb_addr_set_tag_access(tlb_addr) \ + tlb_addr_set_type(tlb_addr, _TLB_ADDR_TAG_ACCESS) +#define tlb_addr_set_entry_access(tlb_addr) \ + tlb_addr_set_type(tlb_addr, _TLB_ADDR_ENTRY_ACCESS) +#define tlb_addr_tag_access _TLB_ADDR_TAG_ACCESS +#define tlb_addr_entry_access _TLB_ADDR_ENTRY_ACCESS + +#define tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, large_page) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~((machine.tlb_addr_line_num) | \ + (machine.tlb_addr_line_num2))) | \ + (VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + _TLB_ADDR_LINE_NUM_SHIFT) | \ + (VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + (machine.tlb_addr_line_num_shift2)))) +#define boot_tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, large_page) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~((boot_machine.tlb_addr_line_num) | \ + (boot_machine.tlb_addr_line_num2))) | \ + (BOOT_VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + _TLB_ADDR_LINE_NUM_SHIFT) | \ + (BOOT_VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + (boot_machine.tlb_addr_line_num_shift2)))) + +#define tlb_addr_set_set_num(tlb_addr, set_num) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~(machine.tlb_addr_set_num)) | \ + (((set_num) << (machine.tlb_addr_set_num_shift)) & \ + (machine.tlb_addr_set_num)))) +#define boot_tlb_addr_set_set_num(tlb_addr, set_num) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~(boot_machine.tlb_addr_set_num)) | \ + (((set_num) << (boot_machine.tlb_addr_set_num_shift)) & \ + (boot_machine.tlb_addr_set_num)))) + +/* DTLB/ITLB tag structure */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t tlb_tag_t; +typedef tlb_tag_t dtlb_tag_t; +typedef tlb_tag_t itlb_tag_t; +#endif /* ! __ASSEMBLY__ */ + +#define tlb_tag_val(tlb_tag) (tlb_tag) +#define dtlb_tag_val(dtlb_tag) tlb_tag_val(dtlb_tag) +#define itlb_tag_val(itlb_tag) tlb_tag_val(itlb_tag) + +#define __tlb_tag(tlb_tag_val) (tlb_tag_val) +#define __dtlb_tag(dtlb_tag_val) __tlb_tag(dtlb_tag_val) +#define __itlb_tag(dtlb_tag_val) __tlb_tag(itlb_tag_val) + +#define _TLB_TAG_VA_TAG_SHIFT 7 /* [35: 7] */ +#define _TLB_TAG_CONTEXT_SHIFT 36 /* [47:36] */ + +#define _TLB_TAG_VA_TAG 0x0000000fffffff80 /* tag of virtual */ + /* address [47:19] */ + /* [18:12] - line # */ +#define _TLB_TAG_CONTEXT 0x0000fff000000000 /* context # */ +#define _TLB_TAG_ROOT 0x0001000000000000 /* should be 0 */ +#define _TLB_TAG_PHYS 0x0002000000000000 /* should be 0 */ +#define _TLB_TAG_G 0x0004000000000000 /* global page */ +#define _TLB_TAG_USED 0x0008000000000000 /* used flag */ +#define _TLB_TAG_VALID 0x0010000000000000 /* valid bit */ + +#define TLB_VADDR_TO_VA_TAG(virt_addr) \ + ((((virt_addr) >> PAGE_SHIFT) & _TLB_TAG_VA_TAG) << \ + _TLB_TAG_VA_TAG_SHIFT) + +#define _TLB_TAG_KERNEL_IMAGE (_TLB_TAG_VALID | _TLB_TAG_USED | \ + ((long)E2K_KERNEL_CONTEXT << _TLB_TAG_CONTEXT_SHIFT)) +#define _TLB_KERNEL_SWITCHING_IMAGE _TLB_TAG_KERNEL_IMAGE +#define _TLB_KERNEL_US_STACK (_TLB_TAG_VALID | _TLB_TAG_USED | \ + ((long)E2K_KERNEL_CONTEXT << _TLB_TAG_CONTEXT_SHIFT)) + +#define TLB_KERNEL_SWITCHING_TEXT __tlb_tag(_TLB_KERNEL_SWITCHING_IMAGE) +#define TLB_KERNEL_SWITCHING_DATA __tlb_tag(_TLB_KERNEL_SWITCHING_IMAGE) +#define TLB_KERNEL_SWITCHING_US_STACK __tlb_tag(_TLB_KERNEL_US_STACK) + +#define tlb_tag_get_va_tag(tlb_tag) \ + (tlb_tag_val(tlb_tag) & _TLB_TAG_VA_TAG) +#define tlb_tag_set_va_tag(tlb_tag, va_page) \ + (__tlb_tag((tlb_tag_val(tlb_tag) & ~_TLB_TAG_VA_TAG) | \ + ((va_page) & _TLB_TAG_VA_TAG))) +#define tlb_tag_set_vaddr_va_tag(tlb_tag, virt_addr) \ + (__tlb_tag((tlb_tag_val(tlb_tag) & ~_TLB_TAG_VA_TAG) | \ + TLB_VADDR_TO_VA_TAG(virt_addr))) + +#define tlb_tag_get_context(tlb_tag) \ + (tlb_tag_val(tlb_tag) & _TLB_TAG_CONTEXT) +#define tlb_tag_set_context(tlb_tag, context) \ + (__tlb_tag((tlb_tag_val(tlb_tag) & ~_TLB_TAG_CONTEXT) | \ + ((context) << _TLB_TAG_CONTEXT_SHIFT) & _TLB_TAG_CONTEXT)) + +/* + * This takes a virtual page address and protection bits to make + * TLB tag: tlb_tag_t + */ +#define mk_tlb_tag_vaddr(virt_addr, tag_pgprot) \ + (__tlb_tag(TLB_VADDR_TO_VA_TAG(virt_addr) | tlb_tag_val(tag_pgprot))) + +/* DTLB/ITLB entry structure is the same as PTE structure of page tables */ + +/* + * TLB address probe operations , TLB Entry_probe operations + */ + +/* Virtual address for TLB address probe & Entry probe operations */ +#ifndef __ASSEMBLY__ +typedef e2k_addr_t probe_addr_t; + +#define probe_addr_val(probe_addr) (probe_addr) + +#define __probe_addr(probe_addr_val) (probe_addr_val) +#endif /* __ASSEMBLY__ */ + +#define _PROBE_ADDR_VA 0x0000ffffffffffff /* virtual address */ + /* [47: 0] */ + +/* Result of TLB Entry probe operation */ +#ifndef __ASSEMBLY__ +typedef unsigned long probe_entry_t; + +#define probe_entry_val(probe_entry) (probe_entry) + +#define __probe_entry(probe_entry_val) (probe_entry_val) + +#include +#include +#include + +#if DTLB_ENTRY_PH_BOUND_V2 == DTLB_ENTRY_PH_BOUND_V6 +# define DTLB_ENTRY_PH_BOUND DTLB_ENTRY_PH_BOUND_V6 +#else +# error "Page table PH_BOUND bit is different for V2 vs V6" +#endif +#if DTLB_ENTRY_ILLEGAL_PAGE_V2 == DTLB_ENTRY_ILLEGAL_PAGE_V6 +# define DTLB_ENTRY_ILLEGAL_PAGE DTLB_ENTRY_ILLEGAL_PAGE_V6 +#else +# error "Page table ILLEGAL_PAGE bit is different for V2 vs V6" +#endif +#if DTLB_ENTRY_PAGE_MISS_V2 == DTLB_ENTRY_PAGE_MISS_V6 +# define DTLB_ENTRY_PAGE_MISS DTLB_ENTRY_PAGE_MISS_V6 +#else +# error "Page table PAGE_MISS bit is different for V2 vs V6" +#endif + +static inline probe_entry_t +mmu_fill_dtlb_val_flags(const uni_dtlb_t uni_flags, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return fill_dtlb_val_v6_flags(uni_flags); + else + return fill_dtlb_val_v2_flags(uni_flags); +} +static inline probe_entry_t +mmu_get_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_dtlb_val_v6_flags(dtlb_val, uni_flags); + else + return get_dtlb_val_v2_flags(dtlb_val, uni_flags); +} +static inline bool +mmu_test_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + return mmu_get_dtlb_val_flags(dtlb_val, uni_flags, mmu_pt_v6) != 0; +} +static inline probe_entry_t +mmu_set_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return set_dtlb_val_v6_flags(dtlb_val, uni_flags); + else + return set_dtlb_val_v2_flags(dtlb_val, uni_flags); +} +static inline probe_entry_t +mmu_clear_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return clear_dtlb_val_v6_flags(dtlb_val, uni_flags); + else + return clear_dtlb_val_v2_flags(dtlb_val, uni_flags); +} +static inline probe_entry_t +fill_dtlb_val_flags(const uni_dtlb_t uni_flags) +{ + return mmu_fill_dtlb_val_flags(uni_flags, MMU_IS_DTLB_V6()); +} +static inline probe_entry_t +get_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_get_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +static inline bool +test_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_test_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +static inline probe_entry_t +set_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_set_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +static inline probe_entry_t +clear_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_clear_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +#define DTLB_ENTRY_INIT(uni_flags) fill_dtlb_val_flags(uni_flags) +#define DTLB_ENTRY_GET(dtlb_val, uni_flags) \ + get_dtlb_val_flags(dtlb_val, uni_flags) +#define DTLB_ENTRY_TEST(dtlb_val, uni_flags) \ + test_dtlb_val_flags(dtlb_val, uni_flags) +#define DTLB_ENTRY_SET(dtlb_val, uni_flags) \ + set_dtlb_val_flags(dtlb_val, uni_flags) +#define DTLB_ENTRY_CLEAR(dtlb_val, uni_flags) \ + clear_dtlb_val_flags(dtlb_val, uni_flags) + +#define DTLB_ENTRY_ERROR_MASK DTLB_ENTRY_INIT(UNI_DTLB_ERROR_MASK) +#define DTLB_ENTRY_MISS_LEVEL_MASK \ + DTLB_ENTRY_INIT(UNI_DTLB_MISS_LEVEL) +#define DTLB_ENTRY_PROBE_SUCCESSFUL \ + DTLB_ENTRY_INIT(UNI_DTLB_SUCCESSFUL) +#define DTLB_ENTRY_RES_BITS DTLB_ENTRY_INIT(UNI_DTLB_RES_BITS) +#define DTLB_ENTRY_WR DTLB_ENTRY_INIT(UNI_PAGE_WRITE) +#define DTLB_ENTRY_PV DTLB_ENTRY_INIT(UNI_PAGE_PRIV) +#define DTLB_ENTRY_VVA DTLB_ENTRY_INIT(UNI_PAGE_VALID) +#define DTLB_EP_RES DTLB_ENTRY_INIT(UNI_DTLB_EP_RES) +#define DTLB_EP_FAULT_RES (~DTLB_EP_RES) +#define DTLB_ENTRY_TEST_WRITEABLE(dtlb_val) \ + DTLB_ENTRY_TEST(dtlb_val, UNI_PAGE_WRITE) +#define DTLB_ENTRY_TEST_VVA(dtlb_val) \ + DTLB_ENTRY_TEST(dtlb_val, UNI_PAGE_VALID) +#define DTLB_ENTRY_TEST_SUCCESSFUL(dtlb_val) \ + ((MMU_IS_DTLB_V6()) ? \ + DTLB_ENTRY_TEST(dtlb_val, UNI_DTLB_SUCCESSFUL) \ + : \ + !DTLB_ENTRY_TEST(dtlb_val, UNI_DTLB_SUCCESSFUL)) + +static inline probe_entry_t +mmu_phys_addr_to_dtlb_pha(e2k_addr_t phys_addr, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return PA_TO_DTLB_ENTRY_PHA_V6(phys_addr); + else + return PA_TO_DTLB_ENTRY_PHA_V2(phys_addr); +} +static inline e2k_addr_t +mmu_dtlb_pha_to_phys_addr(probe_entry_t dtlb_val, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return DTLB_ENTRY_PHA_TO_PA_V6(dtlb_val); + else + return DTLB_ENTRY_PHA_TO_PA_V2(dtlb_val); +} + +static inline probe_entry_t +phys_addr_to_dtlb_pha(e2k_addr_t phys_addr) +{ + return mmu_phys_addr_to_dtlb_pha(phys_addr, MMU_IS_DTLB_V6()); +} +static inline e2k_addr_t +dtlb_pha_to_phys_addr(probe_entry_t dtlb_val) +{ + return mmu_dtlb_pha_to_phys_addr(dtlb_val, MMU_IS_DTLB_V6()); +} +#define PA_TO_DTLB_ENTRY_PHA(phys_addr) phys_addr_to_dtlb_pha(phys_addr) +#define DTLB_ENTRY_PHA_TO_PA(dtlb_val) dtlb_pha_to_phys_addr(dtlb_val) + +/* physical memory bound (x86) [63] */ +#define PH_BOUND_EP_RES DTLB_ENTRY_PH_BOUND +/* illegal page [62] */ +#define ILLEGAL_PAGE_EP_RES DTLB_ENTRY_ILLEGAL_PAGE +/* page miss [61] */ +#define PAGE_MISS_EP_RES DTLB_ENTRY_PAGE_MISS +/* miss level [60:59] */ +#define MISS_LEVEL_EP_RES DTLB_ENTRY_MISS_LEVEL_MASK +/* reserved bits [57] */ +#define RES_BITS_EP_RES DTLB_ENTRY_RES_BITS + +/* + * DTLB address probe result format + */ +/* Physical address of successfull DTLB address probe [39: 0]/[47:0] */ +#define PH_ADDR_AP_RES DTLB_ENTRY_INIT(UNI_DTLB_PH_ADDR_AP_RES) +/* AP disable result [62] */ +#define DISABLE_AP_RES ILLEGAL_PAGE_EP_RES +/* page miss [61] */ +#define PAGE_MISS_AP_RES PAGE_MISS_EP_RES +/* illegal page [58] */ +#define ILLEGAL_PAGE_AP_RES ILLEGAL_PAGE_EP_RES + +#define PH_ADDR_IS_PRESENT(ap_res) (((ap_res) & ~PH_ADDR_AP_RES) == 0) +#define PH_ADDR_IS_MISS(ap_res) ((ap_res) & PAGE_MISS_AP_RES) +#define PH_ADDR_IS_VALID(ap_res) ((PH_ADDR_IS_PRESENT(ap_res) || \ + PH_ADDR_IS_MISS(ap_res))) +#define PH_ADDR_IS_INVALID(ap_res) ((ap_res) & ILLEGAL_PAGE_AP_RES) +#define GET_PROBE_PH_ADDR(ap_res) ((ap_res) & PH_ADDR_AP_RES)\ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/e2k/include/asm/tlbflush.h b/arch/e2k/include/asm/tlbflush.h new file mode 100644 index 000000000000..fabcd4422739 --- /dev/null +++ b/arch/e2k/include/asm/tlbflush.h @@ -0,0 +1,131 @@ +/* $Id: tlbflush.h,v 1.3 2006/09/12 13:12:54 tokot Exp $ + * pgalloc.h: the functions and defines necessary to allocate + * page tables. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_TLBFLUSH_H +#define _E2K_TLBFLUSH_H + +#include + + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + */ + +extern void __flush_tlb_all(void); +extern void __flush_tlb_mm(struct mm_struct *mm); +extern void __flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr); +extern void __flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void __flush_pmd_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __flush_tlb_pgtables(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void __flush_tlb_page_and_pgtables(struct mm_struct *mm, + unsigned long addr); + +extern void __flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void __flush_tlb_address(e2k_addr_t addr); +extern void __flush_tlb_address_pgtables(e2k_addr_t addr); + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +extern void __flush_cpu_root_pt_mm(struct mm_struct *mm); +extern void __flush_cpu_root_pt(void); +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#define __flush_cpu_root_pt_mm(mm) +#define __flush_cpu_root_pt() +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#define flush_tlb_all kvm_flush_tlb_all +#define flush_tlb_mm kvm_flush_tlb_mm +#define flush_tlb_page(vma, addr) kvm_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) \ + kvm_flush_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_kernel_range(start, end) \ + kvm_flush_tlb_kernel_range(start, end) +#define flush_pmd_tlb_range(vma, start, end) \ + kvm_flush_pmd_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_mm_range(mm, start, end) \ + kvm_flush_tlb_range(mm, start, end) +#define flush_tlb_range_and_pgtables(mm, start, end) \ + kvm_flush_tlb_range_and_pgtables(mm, start, end) +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ + +#ifndef CONFIG_SMP + +#define flush_tlb_all __flush_tlb_all +#define flush_tlb_mm __flush_tlb_mm +#define flush_tlb_page(vma, addr) __flush_tlb_page(vma->vm_mm, addr) +#define flush_tlb_range(vma, start, end) \ + __flush_tlb_range(vma->vm_mm, start, end) +#define flush_pmd_tlb_range(vma, start, end) \ + __flush_pmd_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_mm_range(mm, start, end) \ + __flush_tlb_range(mm, start, end) +#define flush_tlb_range_and_pgtables(mm, start, end) \ + __flush_tlb_range_and_pgtables(mm, start, end) + +#else /* CONFIG_SMP */ + +#include + +extern void native_smp_flush_tlb_all(void); +extern void native_smp_flush_tlb_mm(struct mm_struct *mm); +extern void native_smp_flush_tlb_page(struct vm_area_struct *vma, + e2k_addr_t addr); +extern void native_smp_flush_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_pmd_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + +#define flush_tlb_all native_smp_flush_tlb_all +#define flush_tlb_mm native_smp_flush_tlb_mm +#define flush_tlb_page(vma, addr) native_smp_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) \ + native_smp_flush_tlb_range(vma->vm_mm, start, end) +#define flush_pmd_tlb_range(vma, start, end) \ + native_smp_flush_pmd_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_mm_range(mm, start, end) \ + native_smp_flush_tlb_range(mm, start, end) +#define flush_tlb_range_and_pgtables(mm, start, end) \ + native_smp_flush_tlb_range_and_pgtables(mm, start, end) + +#endif /* CONFIG_SMP */ + +#define flush_tlb_kernel_range(start, end) flush_tlb_all() + +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *pte) +{ +} + +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd) +{ +} + +#include + +#endif /* _E2K_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/topology.h b/arch/e2k/include/asm/topology.h new file mode 100644 index 000000000000..2353ab22fdc1 --- /dev/null +++ b/arch/e2k/include/asm/topology.h @@ -0,0 +1,173 @@ +#ifndef _E2K_TOPOLOGY_H_ +#define _E2K_TOPOLOGY_H_ + +#include +#ifdef CONFIG_NUMA +#include +#endif /* CONFIG_NUMA */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Max CPUS needs to allocate static array of structures */ +#define MAX_NR_CPUS CONFIG_NR_CPUS + +/* + * IO links/controllers/buses topology: + * each node of e2k machines can have from 1 to MAX_NODE_IOLINKS IO links + * which can be connected to IOHUB or RDMA + * Real possible number of IO links on node is described by following + * macroses for every type of machines + */ + +#define MAX_NODE_IOLINKS E2K_MAX_NODE_IOLINKS +#define E2K_NODE_IOLINKS (machine.node_iolinks) +#define MACH_NODE_NUMIOLINKS E2K_NODE_IOLINKS + +/* + * IOLINK can be represented by global domain number (unique at system and + * corresponds to bit number at iolinkmask_t bit map structure) + * and as pair: node # and local link number on the node. + * It needs convert from one presentation to other + */ + +#define node_iolink_to_domain(node, link) \ + ((node) * (E2K_NODE_IOLINKS) + (link)) +#define node_iohub_to_domain(node, link) \ + node_iolink_to_domain((node), (link)) +#define node_rdma_to_domain(node, link) \ + node_iolink_to_domain((node), (link)) +#define iolink_domain_to_node(domain) \ + ((domain) / (E2K_NODE_IOLINKS)) +#define iolink_domain_to_link(domain) \ + ((domain) % (E2K_NODE_IOLINKS)) +#define iohub_domain_to_node(domain) iolink_domain_to_node(domain) +#define iohub_domain_to_link(domain) iolink_domain_to_link(domain) + +#define for_each_iolink_of_node(link) \ + for ((link) = 0; (link) < E2K_NODE_IOLINKS; (link) ++) + +#define pcibus_to_node(bus) __pcibus_to_node(bus) +#define pcibus_to_link(bus) __pcibus_to_link(bus) + +#define mach_early_iohub_online(node, link) \ + e2k_early_iohub_online((node), (link)) +#define mach_early_sic_init() + +extern int __init_recv cpuid_to_cpu(int cpuid); + +#ifdef CONFIG_L_LOCAL_APIC +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); +#define cpu_to_cpuid(cpu) early_per_cpu(x86_cpu_to_apicid, cpu) +#else +/* + * That case wouldn't work, we should delete CONFIG_L_LOCAL_APIC in future + */ +#define cpu_to_cpuid(cpu) BUILD_BUG() +#endif + +#ifdef CONFIG_NUMA +extern void __init numa_init(void); + +extern s16 __apicid_to_node[NR_CPUS]; + +extern int __nodedata __cpu_to_node[NR_CPUS]; +#define cpu_to_node(cpu) __cpu_to_node[cpu] + +extern cpumask_t __nodedata __node_to_cpu_mask[MAX_NUMNODES]; +#define node_to_cpu_mask(node) __node_to_cpu_mask[node] + +#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) + +#define __node_to_cpumask_and(node, cpu_mask) \ +({ \ + cpumask_t cpumask = node_to_cpu_mask(node); \ + cpumask_and(&cpumask, &cpumask, &cpu_mask); \ + cpumask; \ +}) + +#define node_to_cpumask(node) \ + __node_to_cpumask_and(node, *cpu_online_mask) +#define node_to_present_cpumask(node) \ + __node_to_cpumask_and(node, *cpu_present_mask) + +#define __node_to_first_cpu(node, cpu_mask) \ +({ \ + cpumask_t node_cpumask; \ + node_cpumask = __node_to_cpumask_and(node, cpu_mask); \ + cpumask_first((const struct cpumask *)&node_cpumask); \ +}) + +#define node_to_first_cpu(node) \ + __node_to_first_cpu(node, *cpu_online_mask) +#define node_to_first_present_cpu(node) \ + __node_to_first_cpu(node, *cpu_present_mask) + +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == NUMA_NO_NODE ? \ + cpu_online_mask : \ + cpumask_of_node(pcibus_to_node(bus))) + +/* Mappings between node number and cpus on that node. */ +extern struct cpumask node_to_cpumask_map[MAX_NUMNODES]; + +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +static inline const struct cpumask *cpumask_of_node(int node) +{ + return &node_to_cpumask_map[node]; +} + +extern void setup_node_to_cpumask_map(void); + +extern nodemask_t __nodedata node_has_dup_kernel_map; +extern atomic_t __nodedata node_has_dup_kernel_num; +extern int __nodedata all_nodes_dup_kernel_nid[/*MAX_NUMNODES*/]; + +#define node_dup_kernel_nid(nid) (all_nodes_dup_kernel_nid[nid]) +#define THERE_IS_DUP_KERNEL atomic_read(&node_has_dup_kernel_num) +#define DUP_KERNEL_NUM \ + (atomic_read(&node_has_dup_kernel_num) + 1) + +#define topology_physical_package_id(cpu) cpu_to_node(cpu) +#else /* ! CONFIG_NUMA */ + +#define numa_node_id() 0 + +static inline void numa_init(void) { } + +#define node_has_dup_kernel_map nodemask_of_node(0) +#define node_has_dup_kernel_num 0 +#define node_dup_kernel_nid(nid) 0 +#define THERE_IS_DUP_KERNEL 0 + +#define node_to_first_cpu(node) 0 +#define node_to_first_present_cpu(node) 0 +#define node_to_present_cpumask(node) (*cpu_present_mask) +#define node_to_possible_cpumask(node) cpumask_of_cpu(0) + +#define topology_physical_package_id(cpu) 0 +#endif /* CONFIG_NUMA */ + +#define node_has_online_mem(nid) (nodes_phys_mem[nid].pfns_num != 0) + +#define topology_core_id(cpu) (cpu) +#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu)) + +#include + +static inline void arch_fix_phys_package_id(int num, u32 slot) +{ +} + +static inline int is_duplicated_code(unsigned long ip) +{ + return ip >= (unsigned long) _stext && ip < (unsigned long) _etext; +} +extern const struct cpumask *cpu_coregroup_mask(int cpu); +#endif /* _E2K_TOPOLOGY_H_ */ diff --git a/arch/e2k/include/asm/trace-clock.h b/arch/e2k/include/asm/trace-clock.h new file mode 100644 index 000000000000..b5576cad51cf --- /dev/null +++ b/arch/e2k/include/asm/trace-clock.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2008, Mathieu Desnoyers + * + * Trace clock definitions for Sparc64. + */ + +#ifndef _ASM_E2K_TRACE_CLOCK_H +#define _ASM_E2K_TRACE_CLOCK_H + +#include + +static inline u32 trace_clock_read32(void) +{ + return get_cycles(); +} + +static inline u64 trace_clock_read64(void) +{ + return get_cycles(); +} + +static inline unsigned int trace_clock_frequency(void) +{ + return get_cycles_rate(); +} + +static inline u32 trace_clock_freq_scale(void) +{ + return 1; +} + +static inline void get_trace_clock(void) +{ +} + +static inline void put_trace_clock(void) +{ +} + +static inline void set_trace_clock_is_sync(int state) +{ +} +#endif /* _ASM_E2K_TRACE_CLOCK_H */ diff --git a/arch/e2k/include/asm/trace-defs.h b/arch/e2k/include/asm/trace-defs.h new file mode 100644 index 000000000000..89d25896714e --- /dev/null +++ b/arch/e2k/include/asm/trace-defs.h @@ -0,0 +1,115 @@ +#ifndef _E2K_TRACE_DEFS_H_ +#define _E2K_TRACE_DEFS_H_ + +#include + +#include +#include + +static inline void +trace_get_va_translation(struct mm_struct *mm, e2k_addr_t address, + pgdval_t *pgd, pudval_t *pud, pmdval_t *pmd, pteval_t *pte, int *pt_level) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + if (likely(address < TASK_SIZE)) { + pgdp = pgd_offset(mm, address); + + *pgd = pgd_val(*pgdp); + *pt_level = E2K_PGD_LEVEL_NUM; + + if (!pgd_huge(*pgdp) && !pgd_none(*pgdp) && !pgd_bad(*pgdp)) { + pudp = pud_offset(pgdp, address); + + *pud = pud_val(*pudp); + *pt_level = E2K_PUD_LEVEL_NUM; + + if (!pud_huge(*pudp) && !pud_none(*pudp) && + !pud_bad(*pudp)) { + pmdp = pmd_offset(pudp, address); + + *pmd = pmd_val(*pmdp); + *pt_level = E2K_PMD_LEVEL_NUM; + + if (!pmd_huge(*pmdp) && !pmd_none(*pmdp) && + !pmd_bad(*pmdp)) { + ptep = pte_offset_map(pmdp, address); + + *pte = pte_val(*ptep); + *pt_level = E2K_PTE_LEVEL_NUM; + } + } + } + return; + } + + pgdp = pgd_offset_k(address); + *pgd = pgd_val(*pgdp); + *pt_level = E2K_PGD_LEVEL_NUM; + + if (!kernel_pgd_huge(*pgdp) && !pgd_none(*pgdp) && !pgd_bad(*pgdp)) { + pudp = pud_offset(pgdp, address); + *pud = pud_val(*pudp); + *pt_level = E2K_PUD_LEVEL_NUM; + + if (!kernel_pud_huge(*pudp) && !pud_none(*pudp) && + !pud_bad(*pudp)) { + pmdp = pmd_offset(pudp, address); + *pmd = pmd_val(*pmdp); + *pt_level = E2K_PMD_LEVEL_NUM; + + if (!kernel_pmd_huge(*pmdp) && !pmd_none(*pmdp) && + !pmd_bad(*pmdp)) { + ptep = pte_offset_kernel(pmdp, address); + *pte = pte_val(*ptep); + *pt_level = E2K_PTE_LEVEL_NUM; + } + } + } +} + +/* + * Save DTLB entries. + * + * Do not access not existing entries to avoid + * creating "empty" records in DTLB for no reason. + */ +static inline void +trace_get_dtlb_translation(struct mm_struct *mm, e2k_addr_t address, + u64 *dtlb_pgd, u64 *dtlb_pud, u64 *dtlb_pmd, u64 *dtlb_pte, int pt_level) +{ + *dtlb_pgd = get_MMU_DTLB_ENTRY(address); + + if (pt_level <= E2K_PUD_LEVEL_NUM) + *dtlb_pud = get_MMU_DTLB_ENTRY(pud_virt_offset(address)); + + if (pt_level <= E2K_PMD_LEVEL_NUM) + *dtlb_pmd = get_MMU_DTLB_ENTRY(pmd_virt_offset(address)); + + if (pt_level <= E2K_PTE_LEVEL_NUM) + *dtlb_pte = get_MMU_DTLB_ENTRY(pte_virt_offset(address)); +} + +#define mmu_print_pt_flags(entry, print, mmu_pt_v6) \ + (mmu_pt_v6) ? E2K_TRACE_PRINT_PT_V6_FLAGS(entry, print) \ + : \ + E2K_TRACE_PRINT_PT_V2_FLAGS(entry, print) +#define print_pt_flags(entry, print) \ + mmu_print_pt_flags(entry, print, MMU_IS_PT_V6()) + +#define E2K_TRACE_PRINT_PT_FLAGS(entry, print) print_pt_flags(entry, print) + + +#define mmu_print_dtlb_entry(entry, mmu_dtlb_v6) \ + ((mmu_dtlb_v6) ? E2K_TRACE_PRINT_DTLB_ENTRY_V2(entry) \ + : \ + E2K_TRACE_PRINT_DTLB_ENTRY_V6(entry)) +#define print_dtlb_entry(entry) \ + mmu_print_dtlb_entry(entry, MMU_IS_DTLB_V6()) + +#define E2K_TRACE_PRINT_DTLB(entry) print_dtlb_entry(entry) + +#endif /* _E2K_TRACE_DEFS_H_ */ diff --git a/arch/e2k/include/asm/trace-mmu-dtlb-v2.h b/arch/e2k/include/asm/trace-mmu-dtlb-v2.h new file mode 100644 index 000000000000..bcee44ac1e2a --- /dev/null +++ b/arch/e2k/include/asm/trace-mmu-dtlb-v2.h @@ -0,0 +1,37 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_MMU_DTLB_V2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_MMU_DTLB_V2_H + +#include + +#define E2K_TRACE_PRINT_DTLB_ENTRY_V2(entry) \ + ((entry & DTLB_ENTRY_ERROR_MASK_V2) ? \ + __print_flags(entry & DTLB_ENTRY_ERROR_MASK_V2, "|", \ + { DTLB_ENTRY_PH_BOUND_V2, "ph_bound" }, \ + { DTLB_ENTRY_PAGE_MISS_V2, "page_miss" }, \ + { DTLB_ENTRY_PROBE_DISABLED_V2, \ + "DTLB probe disabled" }, \ + { DTLB_ENTRY_RES_BITS_V2, "res_bits" } \ + ) : \ + __print_flags(entry & ~DTLB_ENTRY_PHA_V2, "|", \ + { DTLB_ENTRY_WR_V2, "writable" }, \ + { DTLB_ENTRY_NON_EX_U_S_V2, "Non_ex-U_S" }, \ + { DTLB_ENTRY_PWT_V2, "PWT" }, \ + { DTLB_ENTRY_PCD1_V2, "CD1" }, \ + { DTLB_ENTRY_PCD2_V2, "CD2" }, \ + { DTLB_ENTRY_D_V2, "dirty" }, \ + { DTLB_ENTRY_G_V2, "global" }, \ + { DTLB_ENTRY_NWA_V2, "NWA" }, \ + { DTLB_ENTRY_VVA_V2, "valid" }, \ + { DTLB_ENTRY_PV_V2, "privileged" }, \ + { DTLB_ENTRY_INT_PR_NON_EX_V2, \ + "int_pr-non_ex" }, \ + { DTLB_ENTRY_INTL_RD_V2, "intl_rd" }, \ + { DTLB_ENTRY_INTL_WR_V2, "intl_wr" }, \ + { DTLB_ENTRY_WP_V2, "WP" }, \ + { DTLB_ENTRY_UC_V2, "UC" } \ + )) + +#endif /* _TRACE_E2K_MMU_DTLB_V2_H */ diff --git a/arch/e2k/include/asm/trace-mmu-dtlb-v6.h b/arch/e2k/include/asm/trace-mmu-dtlb-v6.h new file mode 100644 index 000000000000..8dc7f19bfe29 --- /dev/null +++ b/arch/e2k/include/asm/trace-mmu-dtlb-v6.h @@ -0,0 +1,42 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_MMU_DTLB_V6_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_MMU_DTLB_V6_H + +#include + +#define E2K_TRACE_PRINT_DTLB_ENTRY_V6(entry) \ + ((entry & DTLB_ENTRY_ERROR_MASK_V6) ? \ + __print_flags(entry & DTLB_ENTRY_ERROR_MASK_V6, "|", \ + { DTLB_ENTRY_PH_BOUND_V6, "ph_bound" }, \ + { DTLB_ENTRY_PAGE_MISS_V6, "page_miss" }, \ + { DTLB_ENTRY_PROBE_SUCCESSFUL_V6, \ + "DTLB probe successful" }, \ + { DTLB_ENTRY_RES_BITS_V6, "res_bits" } \ + ) : \ + ({ __print_flags(entry & ~DTLB_ENTRY_PHA_V6, "|", \ + { DTLB_ENTRY_WR_exc_V6, "writable" }, \ + { DTLB_ENTRY_PV_or_U_S_V6, "Priv/U_S" }, \ + { DTLB_ENTRY_D_V6, "dirty" }, \ + { DTLB_ENTRY_G_V6, "global" }, \ + { DTLB_ENTRY_NWA_V6, "NWA" }, \ + { DTLB_ENTRY_VVA_V6, "valid" }, \ + { DTLB_ENTRY_NON_EX_V6, "non_ex" }, \ + { DTLB_ENTRY_INT_PR_V6, "int_pr" }, \ + { DTLB_ENTRY_INTL_RD_V6, "intl_rd" }, \ + { DTLB_ENTRY_INTL_WR_V6, "intl_wr" }, \ + { DTLB_ENTRY_WR_int_V6, "WR_int" } \ + ); \ + __print_symbolic(DTLB_ENTRY_MT_exc_GET_VAL(entry), \ + { GEN_CACHE_MT, "General Cacheable" }, \ + { GEN_NON_CACHE_MT, "General nonCacheable" }, \ + { EXT_PREFETCH_MT, "External Prefetchable" }, \ + { EXT_NON_PREFETCH_MT, "External nonPrefetchable" }, \ + { EXT_CONFIG_MT, "External Configuration" }, \ + { 2, "Reserved-2" }, \ + { 3, "Reserved-3" }, \ + { 5, "Reserved-5" }); \ + })) + +#endif /* _TRACE_E2K_MMU_DTLB_V6_H */ diff --git a/arch/e2k/include/asm/trace.h b/arch/e2k/include/asm/trace.h new file mode 100644 index 000000000000..533bb79f5a25 --- /dev/null +++ b/arch/e2k/include/asm/trace.h @@ -0,0 +1,364 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define E2K_TC_TYPE_STORE (1ULL << 17) +#define E2K_TC_TYPE_S_F (1ULL << 19) +#define E2K_TC_TYPE_ROOT (1ULL << 27) +#define E2K_TC_TYPE_SCAL (1ULL << 28) +#define E2K_TC_TYPE_SRU (1ULL << 29) +#define E2K_TC_TYPE_SPEC (1ULL << 30) +#define E2K_TC_TYPE_PM (1ULL << 31) +#define E2K_TC_TYPE_NUM_ALIGN (1ULL << 50) +#define E2K_TC_TYPE_EMPT (1ULL << 51) +#define E2K_TC_TYPE_CLW (1ULL << 52) + +#define E2K_TC_TYPE (E2K_TC_TYPE_STORE | E2K_TC_TYPE_S_F | E2K_TC_TYPE_ROOT | \ + E2K_TC_TYPE_SCAL | E2K_TC_TYPE_SRU | E2K_TC_TYPE_SPEC | \ + E2K_TC_TYPE_PM | E2K_TC_TYPE_NUM_ALIGN | \ + E2K_TC_TYPE_EMPT | E2K_TC_TYPE_CLW) + +#define E2K_FAULT_TYPE_GLOBAL_SP (1ULL << 0) +#define E2K_FAULT_TYPE_PAGE_BOUND (1ULL << 1) +#define E2K_FAULT_TYPE_EXC_MEM_LOCK (1ULL << 2) +#define E2K_FAULT_TYPE_PH_PR_PAGE (1ULL << 3) +#define E2K_FAULT_TYPE_IO_PAGE (1ULL << 4) +#define E2K_FAULT_TYPE_ISYS_PAGE (1ULL << 5) +#define E2K_FAULT_TYPE_PROT_PAGE (1ULL << 6) +#define E2K_FAULT_TYPE_PRIV_PAGE (1ULL << 7) +#define E2K_FAULT_TYPE_ILLEGAL_PAGE (1ULL << 8) +#define E2K_FAULT_TYPE_NWRITE_PAGE (1ULL << 9) +#define E2K_FAULT_TYPE_PAGE_MISS (1ULL << 10) +#define E2K_FAULT_TYPE_PH_BOUND (1ULL << 11) +#define E2K_FAULT_TYPE_INTL_RES_BITS (1ULL << 12) + +TRACE_EVENT( + trap_cellar, + + TP_PROTO(const trap_cellar_t *tc, int nr), + + TP_ARGS(tc, nr), + + TP_STRUCT__entry( + __field( int, nr ) + __field( u64, address ) + __field( u64, data_val ) + __field( u64, data_ext_val ) + __field( u8, data_tag ) + __field( u8, data_ext_tag ) + __field( u64, condition ) + __field( u64, mask ) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->address = tc->address; + load_value_and_tagd(&tc->data, + &__entry->data_val, &__entry->data_tag); + load_value_and_tagd(&tc->data_ext, + &__entry->data_ext_val, &__entry->data_ext_tag); + __entry->condition = AW(tc->condition); + __entry->mask = AW(tc->mask); + ), + + TP_printk("\n" + "Entry %d: address 0x%llx data %hhx 0x%llx data_ext %hhx 0x%llx\n" + "Register: address=0x%02hhx, vl=%d, vr=%d\n" + "Opcode: fmt=%d, n_prot=%d, fmtc=%d\n" + "Info1: chan=%d, mas=0x%02hhx, miss_lvl=%d, rcv=%d, dst_rcv=0x%03x\n" + "Info2: %s\n" + "Ftype: %s" + , + __entry->nr, __entry->address, __entry->data_tag, + __entry->data_val, __entry->data_ext_tag, __entry->data_ext_val, + AS((tc_cond_t) __entry->condition).address, + AS((tc_cond_t) __entry->condition).vl, + AS((tc_cond_t) __entry->condition).vr, + AS((tc_cond_t) __entry->condition).fmt, + AS((tc_cond_t) __entry->condition).npsp, + AS((tc_cond_t) __entry->condition).fmtc, + AS((tc_cond_t) __entry->condition).chan, + AS((tc_cond_t) __entry->condition).mas, + AS((tc_cond_t) __entry->condition).miss_lvl, + AS((tc_cond_t) __entry->condition).rcv, + AS((tc_cond_t) __entry->condition).dst_rcv, + __print_flags(__entry->condition & E2K_TC_TYPE, "|", + { E2K_TC_TYPE_STORE, "store" }, + { E2K_TC_TYPE_S_F, "s_f" }, + { E2K_TC_TYPE_ROOT, "root" }, + { E2K_TC_TYPE_SCAL, "scal" }, + { E2K_TC_TYPE_SRU, "sru" }, + { E2K_TC_TYPE_SPEC, "spec" }, + { E2K_TC_TYPE_PM, "pm" }, + { E2K_TC_TYPE_NUM_ALIGN, "num_align" }, + { E2K_TC_TYPE_EMPT, "empt" }, + { E2K_TC_TYPE_CLW, "clw" } + ), + __print_flags(AS((tc_cond_t) __entry->condition).fault_type, "|", + { E2K_FAULT_TYPE_GLOBAL_SP, "global_sp" }, + { E2K_FAULT_TYPE_PAGE_BOUND, "page_bound" }, + { E2K_FAULT_TYPE_EXC_MEM_LOCK, "exc_mem_lock" }, + { E2K_FAULT_TYPE_PH_PR_PAGE, "ph_pr_page" }, + { E2K_FAULT_TYPE_IO_PAGE, "io_page" }, + { E2K_FAULT_TYPE_ISYS_PAGE, "isys_page" }, + { E2K_FAULT_TYPE_PROT_PAGE, "prot_page" }, + { E2K_FAULT_TYPE_PRIV_PAGE, "priv_page" }, + { E2K_FAULT_TYPE_ILLEGAL_PAGE, "illegal_page" }, + { E2K_FAULT_TYPE_NWRITE_PAGE, "nwrite_page" }, + { E2K_FAULT_TYPE_PAGE_MISS, "page_miss" }, + { E2K_FAULT_TYPE_PH_BOUND, "ph_bound" }, + { E2K_FAULT_TYPE_INTL_RES_BITS, "intl_res_bits" } + )) +); + +TRACE_EVENT( + unhandled_page_fault, + + TP_PROTO(unsigned long address), + + TP_ARGS(address), + + TP_STRUCT__entry( + __field( unsigned long, address ) + __field( u64, dtlb_entry ) + __field( u64, dtlb_pud ) + __field( u64, dtlb_pmd ) + __field( u64, dtlb_pte ) + __field( pgdval_t, pgd ) + __field( pudval_t, pud ) + __field( pmdval_t, pmd ) + __field( pteval_t, pte ) + __field( int, pt_level ) + ), + + TP_fast_assign( + __entry->address = address; + + trace_get_va_translation(current->mm, address, + &__entry->pgd, &__entry->pud, &__entry->pmd, + &__entry->pte, &__entry->pt_level); + + /* + * Save DTLB entries. + * + * Do not access not existing entries to avoid + * creating "empty" records in DTLB for no reason. + */ + trace_get_dtlb_translation(current->mm, address, + &__entry->dtlb_entry, &__entry->dtlb_pud, + &__entry->dtlb_pmd, &__entry->dtlb_pte, + __entry->pt_level); + ), + + TP_printk("\n" + "Page table for address 0x%lx (all f's are returned if the entry has not been read)\n" + " pgd 0x%lx: %s\n" + " Access mode: %s%s\n" + " pud 0x%lx: %s\n" + " Access mode: %s%s\n" + " pmd 0x%lx: %s\n" + " Access mode: %s%s\n" + " pte 0x%lx: %s\n" + " Access mode: %s%s\n" + "Probed DTLB entries:\n" + " pud address entry 0x%llx: %s\n" + " pmd address entry 0x%llx: %s\n" + " pte address entry 0x%llx: %s\n" + " address entry 0x%llx: %s" + , + __entry->address, + (__entry->pt_level <= E2K_PGD_LEVEL_NUM) ? __entry->pgd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pgd, + __entry->pt_level <= E2K_PGD_LEVEL_NUM), + (__entry->pt_level <= E2K_PUD_LEVEL_NUM) ? __entry->pud : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pud, + __entry->pt_level <= E2K_PUD_LEVEL_NUM), + (__entry->pt_level <= E2K_PMD_LEVEL_NUM) ? __entry->pmd : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pmd, + __entry->pt_level <= E2K_PMD_LEVEL_NUM), + (__entry->pt_level <= E2K_PTE_LEVEL_NUM) ? __entry->pte : -1UL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pte, + __entry->pt_level <= E2K_PTE_LEVEL_NUM), + (__entry->pt_level <= E2K_PUD_LEVEL_NUM) ? __entry->dtlb_pud : -1ULL, + (__entry->pt_level <= E2K_PUD_LEVEL_NUM) ? + E2K_TRACE_PRINT_DTLB(__entry->dtlb_pud) : "(not read)", + (__entry->pt_level <= E2K_PMD_LEVEL_NUM) ? __entry->dtlb_pmd : -1ULL, + (__entry->pt_level <= E2K_PMD_LEVEL_NUM) ? + E2K_TRACE_PRINT_DTLB(__entry->dtlb_pmd) : "(not read)", + (__entry->pt_level <= E2K_PTE_LEVEL_NUM) ? __entry->dtlb_pte : -1ULL, + (__entry->pt_level <= E2K_PTE_LEVEL_NUM) ? + E2K_TRACE_PRINT_DTLB(__entry->dtlb_pte) : "(not read)", + __entry->dtlb_entry, + E2K_TRACE_PRINT_DTLB(__entry->dtlb_entry)) +); + +#define TIRHI_EXC_MASK 0x00000fffffffffffULL +#define TIRHI_ALS_MASK 0x0003f00000000000ULL +#define TIRHI_ALS_SHIFT 44ULL +#define TIRHI_MOVA_MASK 0x00f0000000000000ULL +#define TIRHI_MOVA0_MASK 0x0010000000000000ULL +#define TIRHI_MOVA1_MASK 0x0020000000000000ULL +#define TIRHI_MOVA2_MASK 0x0040000000000000ULL +#define TIRHI_MOVA3_MASK 0x0080000000000000ULL + +#define E2K_TRACE_PRINT_TIR_HI(entry) \ + (entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK)) ? \ + __print_flags(entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK), "|", \ + { TIRHI_MOVA0_MASK, "mova0" }, \ + { TIRHI_MOVA1_MASK, "mova1" }, \ + { TIRHI_MOVA2_MASK, "mova2" }, \ + { TIRHI_MOVA3_MASK, "mova3" }, \ + { exc_illegal_opcode_mask, "illegal_opcode" }, \ + { exc_priv_action_mask, "priv_action" }, \ + { exc_fp_disabled_mask, "fp_disabled" }, \ + { exc_fp_stack_u_mask, "fp_stack_u" }, \ + { exc_d_interrupt_mask, "d_interrupt" }, \ + { exc_diag_ct_cond_mask, "diag_ct_cond" }, \ + { exc_diag_instr_addr_mask, "diag_instr_addr" }, \ + { exc_illegal_instr_addr_mask, "illegal_instr_addr" }, \ + { exc_instr_debug_mask, "instr_debug" }, \ + { exc_window_bounds_mask, "window_bounds" }, \ + { exc_user_stack_bounds_mask, "user_stack_bounds" }, \ + { exc_proc_stack_bounds_mask, "proc_stack_bounds" }, \ + { exc_chain_stack_bounds_mask, "chain_stack_bounds" }, \ + { exc_fp_stack_o_mask, "fp_stack_o" }, \ + { exc_diag_cond_mask, "diag_cond" }, \ + { exc_diag_operand_mask, "diag_operand" }, \ + { exc_illegal_operand_mask, "illegal_operand" }, \ + { exc_array_bounds_mask, "array_bounds" }, \ + { exc_access_rights_mask, "access_rights" }, \ + { exc_addr_not_aligned_mask, "addr_not_aligned" }, \ + { exc_instr_page_miss_mask, "instr_page_miss" }, \ + { exc_instr_page_prot_mask, "instr_page_prot" }, \ + { exc_ainstr_page_miss_mask, "ainstr_page_miss" }, \ + { exc_ainstr_page_prot_mask, "ainstr_page_prot" }, \ + { exc_last_wish_mask, "last_wish" }, \ + { exc_base_not_aligned_mask, "base_not_aligned" }, \ + { exc_software_trap_mask, "software_trap" }, \ + { exc_data_debug_mask, "data_debug" }, \ + { exc_data_page_mask, "data_page" }, \ + { exc_recovery_point_mask, "recovery_point" }, \ + { exc_interrupt_mask, "interrupt" }, \ + { exc_nm_interrupt_mask, "nm_interrupt" }, \ + { exc_div_mask, "div" }, \ + { exc_fp_mask, "fp" }, \ + { exc_mem_lock_mask, "mem_lock" }, \ + { exc_mem_lock_as_mask, "mem_lock_as" }, \ + { exc_mem_error_out_cpu_mask, "mem_error_out_cpu" }, \ + { exc_mem_error_MAU_mask, "mem_error_MAU" }, \ + { exc_mem_error_L2_mask, "mem_error_L2" }, \ + { exc_mem_error_L1_35_mask, "mem_error_L1_35" }, \ + { exc_mem_error_L1_02_mask, "mem_error_L1_02" }, \ + { exc_mem_error_ICACHE_mask, "mem_error_ICACHE" } \ + ) : "(none)" + +TRACE_EVENT( + tir, + + TP_PROTO(u64 tir_lo, u64 tir_hi), + + TP_ARGS(tir_lo, tir_hi), + + TP_STRUCT__entry( + __field( u64, tir_lo ) + __field( u64, tir_hi ) + ), + + TP_fast_assign( + __entry->tir_lo = tir_lo; + __entry->tir_hi = tir_hi; + ), + + TP_printk("\n" + "TIR%lld: ip 0x%llx, als 0x%llx\n" + " exceptions: %s" + , + __entry->tir_hi >> 56, + __entry->tir_lo & E2K_VA_MASK, + (__entry->tir_hi & TIRHI_ALS_MASK) >> TIRHI_ALS_SHIFT, + E2K_TRACE_PRINT_TIR_HI(__entry->tir_hi) + ) +); + +/* How many last IPs are saved in hardware TIR_lo trace for debugging */ +#define TIR_HW_TRACE_LENGTH 512 +/* How many IPs to save to ring buffer in one event. Limited because: + * 1) It is assumed by ring buffer internals that events are small. + * 2) When dumping events with [ftrace_dump_on_oops] we are limited + * by printk() which outputs ~1000 symbols (LOG_LINE_MAX) at maximum. */ +#define TIR_TRACE_LENGTH 16 +#define TIR_TRACE_PARTS 32 + +/* Output last IPs executed before a trap _without_ + * regions that executed with frozen TIRs (i.e. + * without trap entry up to UNFREEZE_TIRs() call). */ +TRACE_EVENT( + tir_ip_trace, + + TP_PROTO(int part), + + TP_ARGS(part), + + TP_STRUCT__entry( + __field(int, part) + __array(void *, ip, TIR_TRACE_LENGTH) + ), + + TP_fast_assign( + int i; + + BUILD_BUG_ON(TIR_TRACE_PARTS * TIR_TRACE_LENGTH != TIR_HW_TRACE_LENGTH); + BUG_ON(part < 1 || part > TIR_TRACE_PARTS); + __entry->part = part; + + for (i = 0; i < TIR_TRACE_LENGTH; i++) { + e2k_tir_lo_t tir_lo; + + /* Read additional debug TIRs */ + NATIVE_READ_TIR_HI_REG(); + tir_lo = NATIVE_READ_TIR_LO_REG(); + + __entry->ip[i] = (void *) tir_lo.TIR_lo_ip; + } + + /* For TP_printk below */ + BUILD_BUG_ON(TIR_TRACE_LENGTH != 16); + ), + + TP_printk("last %d IPs (part %d/%d):\n" + " %pS %pS %pS %pS\n" + " %pS %pS %pS %pS\n" + " %pS %pS %pS %pS\n" + " %pS %pS %pS %pS\n", + TIR_TRACE_LENGTH * TIR_TRACE_PARTS, __entry->part, TIR_TRACE_PARTS, + __entry->ip[0], __entry->ip[1], __entry->ip[2], __entry->ip[3], + __entry->ip[4], __entry->ip[5], __entry->ip[6], __entry->ip[7], + __entry->ip[8], __entry->ip[9], __entry->ip[10], __entry->ip[11], + __entry->ip[12], __entry->ip[13], __entry->ip[14], __entry->ip[15] + ) +); + + +#endif /* _TRACE_E2K_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace +#include diff --git a/arch/e2k/include/asm/trace_clock.h b/arch/e2k/include/asm/trace_clock.h new file mode 100644 index 000000000000..4bdc71bb133b --- /dev/null +++ b/arch/e2k/include/asm/trace_clock.h @@ -0,0 +1,12 @@ +#ifndef _ASM_E2K_TRACE_CLOCK_H +#define _ASM_E2K_TRACE_CLOCK_H + +#include +#include + +extern notrace u64 trace_clock_e2k_clkr(void); + +#define ARCH_TRACE_CLOCKS \ + { trace_clock_e2k_clkr, "e2k-clkr", .in_ns = 0 }, + +#endif /* _ASM_E2K_TRACE_CLOCK_H */ diff --git a/arch/e2k/include/asm/trace_pgtable-v2.h b/arch/e2k/include/asm/trace_pgtable-v2.h new file mode 100644 index 000000000000..49930ffacf07 --- /dev/null +++ b/arch/e2k/include/asm/trace_pgtable-v2.h @@ -0,0 +1,35 @@ +#if !defined(_TRACE_E2K_PGTABLE_V2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_PGTABLE_V2_H + +#include + +#define E2K_TRACE_PRINT_PT_V2_FLAGS(entry, print) \ + (print) ? (__print_flags(entry & (_PAGE_P_V2 | _PAGE_VALID_V2 | \ + _PAGE_HUGE_V2 | _PAGE_G_V2 | \ + _PAGE_NWA_V2 | _PAGE_AVAIL_V2 | \ + _PAGE_INT_PR_V2), "|", \ + { _PAGE_P_V2 , "present" }, \ + { _PAGE_VALID_V2 , "valid" }, \ + { _PAGE_HUGE_V2, "large" }, \ + { _PAGE_G_V2, "global" }, \ + { _PAGE_NWA_V2, "not_write_address" }, \ + { _PAGE_AVAIL_V2, "OS" }, \ + { _PAGE_INT_PR_V2, "integer_protect" } \ + )) : "(none)", \ + (print) ? (__print_flags(entry & (_PAGE_PV_V2 | _PAGE_NON_EX_V2 | \ + _PAGE_W_V2 | _PAGE_D_V2 | \ + _PAGE_A_HW_V2), "|", \ + { _PAGE_PV_V2, "privileged" }, \ + { _PAGE_NON_EX_V2, "non_executable" }, \ + { _PAGE_W_V2, "writable" }, \ + { _PAGE_D_V2, "dirty" }, \ + { _PAGE_A_HW_V2, "accessed" } \ + )) : "(none)", \ + (print && entry != -1ULL && (entry & ~_PAGE_VALID_V2)) ? \ + (((entry & _PAGE_CD_MASK_V2) != _PAGE_CD_MASK_V2) ? \ + "|cacheable" \ + : ((entry & _PAGE_PWT_V2) ? \ + "|uncacheable" : "|write_combine")) \ + : "" \ + +#endif /* _TRACE_E2K_PGTABLE_V2_H */ diff --git a/arch/e2k/include/asm/trace_pgtable-v6.h b/arch/e2k/include/asm/trace_pgtable-v6.h new file mode 100644 index 000000000000..3f600924e2e8 --- /dev/null +++ b/arch/e2k/include/asm/trace_pgtable-v6.h @@ -0,0 +1,45 @@ +#if !defined(_TRACE_E2K_PGTABLE_V6_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_PGTABLE_V6_H + +#include + +#define E2K_TRACE_PRINT_PT_V6_FLAGS(entry, print) \ + (print) ? (__print_flags(entry & (_PAGE_P_V6 | _PAGE_VALID_V6 | \ + _PAGE_HUGE_V6 | _PAGE_G_V6 | \ + _PAGE_NWA_V6 | \ + _PAGE_SW1_V6 | _PAGE_SW2_V6 | \ + _PAGE_INT_PR_V6), "|", \ + { _PAGE_P_V6 , "present" }, \ + { _PAGE_VALID_V6 , "valid" }, \ + { _PAGE_HUGE_V6, "large" }, \ + { _PAGE_G_V6, "global" }, \ + { _PAGE_NWA_V6, "not_write_address" }, \ + { _PAGE_SW1_V6, "OS-1" }, \ + { _PAGE_SW2_V6, "OS-2" }, \ + { _PAGE_INT_PR_V6, "integer_protect" } \ + )) : "(none)", \ + (print) ? (__print_flags(entry & (_PAGE_PV_V6 | _PAGE_NON_EX_V6 | \ + _PAGE_W_V6 | _PAGE_D_V6 | \ + _PAGE_A_HW_V6), "|", \ + { _PAGE_PV_V6, "privileged" }, \ + { _PAGE_NON_EX_V6, "non_executable" }, \ + { _PAGE_W_V6, "writable" }, \ + { _PAGE_D_V6, "dirty" }, \ + { _PAGE_A_HW_V6, "accessed" } \ + )) : "(none)", \ + (print && entry != -1ULL && (entry & ~_PAGE_VALID_V6)) ? \ + (__print_symbolic(_PAGE_MT_GET_VAL(entry), \ + { GEN_CACHE_MT, "General Cacheable" }, \ + { GEN_NON_CACHE_MT, "General nonCacheable" }, \ + { GEN_NON_CACHE_ORDERED_MT, \ + "General nonCacheable Ordered (same as GnC in hardware)" }, \ + { EXT_PREFETCH_MT, "External Prefetchable" }, \ + { EXT_NON_PREFETCH_MT, "External nonPrefetchable" }, \ + { EXT_CONFIG_MT, "External Configuration" }, \ + { EXT_CACHE_MT, "External Cached (same as GC in hardware)" }, \ + { 2, "Reserved-2" }, \ + { 3, "Reserved-3" }, \ + { 5, "Reserved-5" })) \ + : "" \ + +#endif /* _TRACE_E2K_PGTABLE_V6_H */ diff --git a/arch/e2k/include/asm/trap_def.h b/arch/e2k/include/asm/trap_def.h new file mode 100644 index 000000000000..9b00bd7c8b61 --- /dev/null +++ b/arch/e2k/include/asm/trap_def.h @@ -0,0 +1,275 @@ +/* + * + * Copyright (C) 2001 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAP_DEF_H +#define _E2K_TRAP_DEF_H + +#include + +#define GET_NR_TIRS(tir_hi) ((tir_hi >> 56) & 0xff) +#define GET_CLEAR_TIR_HI(tir_no) (((tir_no) & 0xffUL) << 56) +#define GET_CLEAR_TIR_LO(tir_no) 0UL + +/* get aa field of tir_hi register */ +#define GET_AA_TIRS(tir_hi) ((tir_hi >> 52) & 0x0f) +#define SET_AA_TIRS(tir_hi, aa_field) ((tir_hi) | (((aa_field) & 0x0f) << 52)) +/* get IP field of tir_lo register */ +#define GET_IP_TIRS(tir_lo) ((tir_lo) & E2K_VA_MASK) +/* get IP field of cr0_hi register */ +#define GET_IP_CR0_HI(cr0_hi) ((cr0_hi).CR0_hi_ip << E2K_ALIGN_INS) + +/* + * Trap Info Register: the numbers of exceptions + */ + +#define exc_illegal_opcode_num 0 /* 00 */ +#define exc_priv_action_num 1 /* 01 */ +#define exc_fp_disabled_num 2 /* 02 */ +#define exc_fp_stack_u_num 3 /* 03 */ +#define exc_d_interrupt_num 4 /* 04 */ +#define exc_diag_ct_cond_num 5 /* 05 */ +#define exc_diag_instr_addr_num 6 /* 06 */ +#define exc_illegal_instr_addr_num 7 /* 07 */ +#define exc_instr_debug_num 8 /* 08 */ +#define exc_window_bounds_num 9 /* 09 */ +#define exc_user_stack_bounds_num 10 /* 10 */ +#define exc_proc_stack_bounds_num 11 /* 11 */ +#define exc_chain_stack_bounds_num 12 /* 12 */ +#define exc_fp_stack_o_num 13 /* 13 */ +#define exc_diag_cond_num 14 /* 14 */ +#define exc_diag_operand_num 15 /* 15 */ +#define exc_illegal_operand_num 16 /* 16 */ +#define exc_array_bounds_num 17 /* 17 */ +#define exc_access_rights_num 18 /* 18 */ +#define exc_addr_not_aligned_num 19 /* 19 */ +#define exc_instr_page_miss_num 20 /* 20 */ +#define exc_instr_page_prot_num 21 /* 21 */ +#define exc_ainstr_page_miss_num 22 /* 22 */ +#define exc_ainstr_page_prot_num 23 /* 23 */ +#define exc_last_wish_num 24 /* 24 */ +#define exc_base_not_aligned_num 25 /* 25 */ +#define exc_software_trap_num 26 /* 26 */ + +#define exc_data_debug_num 28 /* 28 */ +#define exc_data_page_num 29 /* 29 */ + +#define exc_recovery_point_num 31 /* 31 */ +#define exc_interrupt_num 32 /* 32 */ +#define exc_nm_interrupt_num 33 /* 33 */ +#define exc_div_num 34 /* 34 */ +#define exc_fp_num 35 /* 35 */ +#define exc_mem_lock_num 36 /* 36 */ +#define exc_mem_lock_as_num 37 /* 37 */ +#define exc_data_error_num 38 /* 38 */ +#define exc_mem_error_out_cpu_num 38 /* 38 */ +#define exc_mem_error_MAU_num 39 /* 39 */ +#define exc_mem_error_L2_num 40 /* 40 */ +#define exc_mem_error_L1_35_num 41 /* 41 */ +#define exc_mem_error_L1_02_num 42 /* 42 */ +#define exc_mem_error_ICACHE_num 43 /* 43 */ + +#define exc_max_num 43 + +#define exc_mova_ch_0_num 52 /* [52] TIR.aa.[0] */ +#define exc_mova_ch_1_num 53 /* [53] TIR.aa.[1] */ +#define exc_mova_ch_2_num 54 /* [54] TIR.aa.[2] */ +#define exc_mova_ch_3_num 55 /* [55] TIR.aa.[3] */ + +#define core_dump_num 38 /* 38 */ +#define masked_hw_stack_bounds_num 60 /* hardware stacks bounds */ + /* trap is occured but masked */ + +/* + * Trap Info Register: the bit mask of exceptions + */ + +#define exc_illegal_opcode_mask (1UL << exc_illegal_opcode_num) +#define exc_priv_action_mask (1UL << exc_priv_action_num) +#define exc_fp_disabled_mask (1UL << exc_fp_disabled_num) +#define exc_fp_stack_u_mask (1UL << exc_fp_stack_u_num) +#define exc_d_interrupt_mask (1UL << exc_d_interrupt_num) +#define exc_diag_ct_cond_mask (1UL << exc_diag_ct_cond_num) +#define exc_diag_instr_addr_mask (1UL << exc_diag_instr_addr_num) +#define exc_illegal_instr_addr_mask (1UL << exc_illegal_instr_addr_num) +#define exc_instr_debug_mask (1UL << exc_instr_debug_num) +#define exc_window_bounds_mask (1UL << exc_window_bounds_num) +#define exc_user_stack_bounds_mask (1UL << exc_user_stack_bounds_num) +#define exc_proc_stack_bounds_mask (1UL << exc_proc_stack_bounds_num) +#define exc_chain_stack_bounds_mask (1UL << exc_chain_stack_bounds_num) +#define exc_fp_stack_o_mask (1UL << exc_fp_stack_o_num) +#define exc_diag_cond_mask (1UL << exc_diag_cond_num) +#define exc_diag_operand_mask (1UL << exc_diag_operand_num) +#define exc_illegal_operand_mask (1UL << exc_illegal_operand_num) +#define exc_array_bounds_mask (1UL << exc_array_bounds_num) +#define exc_access_rights_mask (1UL << exc_access_rights_num) +#define exc_addr_not_aligned_mask (1UL << exc_addr_not_aligned_num) +#define exc_instr_page_miss_mask (1UL << exc_instr_page_miss_num) +#define exc_instr_page_prot_mask (1UL << exc_instr_page_prot_num) +#define exc_ainstr_page_miss_mask (1UL << exc_ainstr_page_miss_num) +#define exc_ainstr_page_prot_mask (1UL << exc_ainstr_page_prot_num) +#define exc_last_wish_mask (1UL << exc_last_wish_num) +#define exc_base_not_aligned_mask (1UL << exc_base_not_aligned_num) +#define exc_software_trap_mask (1UL << exc_software_trap_num) + +#define exc_data_debug_mask (1UL << exc_data_debug_num) +#define exc_data_page_mask (1UL << exc_data_page_num) + +#define exc_recovery_point_mask (1UL << exc_recovery_point_num) +#define exc_interrupt_mask (1UL << exc_interrupt_num) +#define exc_nm_interrupt_mask (1UL << exc_nm_interrupt_num) +#define exc_div_mask (1UL << exc_div_num) +#define exc_fp_mask (1UL << exc_fp_num) +#define exc_mem_lock_mask (1UL << exc_mem_lock_num) +#define exc_mem_lock_as_mask (1UL << exc_mem_lock_as_num) +#define exc_data_error_mask (1UL << exc_data_error_num) +#define exc_mem_error_out_cpu_mask (1UL << exc_mem_error_out_cpu_num) +#define exc_mem_error_MAU_mask (1UL << exc_mem_error_MAU_num) +#define exc_mem_error_L2_mask (1UL << exc_mem_error_L2_num) +#define exc_mem_error_L1_35_mask (1UL << exc_mem_error_L1_35_num) +#define exc_mem_error_L1_02_mask (1UL << exc_mem_error_L1_02_num) +#define exc_mem_error_ICACHE_mask (1UL << exc_mem_error_ICACHE_num) +#define exc_mem_error_mask (exc_mem_error_out_cpu_mask | \ + exc_mem_error_MAU_mask | \ + exc_mem_error_L2_mask | \ + exc_mem_error_L1_35_mask | \ + exc_mem_error_L1_02_mask | \ + exc_mem_error_ICACHE_mask) + +#define exc_mova_ch_0_mask (1UL << exc_mova_ch_0_num) +#define exc_mova_ch_1_mask (1UL << exc_mova_ch_1_num) +#define exc_mova_ch_2_mask (1UL << exc_mova_ch_2_num) +#define exc_mova_ch_3_mask (1UL << exc_mova_ch_3_num) + +#define exc_all_mask ((1UL << (exc_max_num + 1)) - 1UL) +#define aau_exc_mask (exc_mova_ch_0_mask | \ + exc_mova_ch_1_mask | \ + exc_mova_ch_2_mask | \ + exc_mova_ch_3_mask) + +#define core_dump_mask (1UL << core_dump_num) +#define masked_hw_stack_bounds_mask (1UL << masked_hw_stack_bounds_num) + +#define sync_exc_mask (exc_illegal_opcode_mask | \ + exc_priv_action_mask | \ + exc_fp_disabled_mask | \ + exc_fp_stack_u_mask | \ + exc_diag_ct_cond_mask | \ + exc_diag_instr_addr_mask | \ + exc_illegal_instr_addr_mask | \ + exc_window_bounds_mask | \ + exc_user_stack_bounds_mask | \ + exc_fp_stack_o_mask | \ + exc_diag_cond_mask | \ + exc_diag_operand_mask | \ + exc_illegal_operand_mask | \ + exc_array_bounds_mask | \ + exc_access_rights_mask | \ + exc_addr_not_aligned_mask | \ + exc_instr_page_miss_mask | \ + exc_instr_page_prot_mask | \ + exc_base_not_aligned_mask | \ + exc_software_trap_mask) + +#define async_exc_mask (exc_proc_stack_bounds_mask | \ + exc_chain_stack_bounds_mask | \ + exc_instr_debug_mask | \ + exc_ainstr_page_miss_mask | \ + exc_ainstr_page_prot_mask | \ + exc_interrupt_mask | \ + exc_nm_interrupt_mask | \ + exc_mem_lock_as_mask | \ + exc_data_error_mask | \ + exc_mem_error_mask) + +#define defer_exc_mask (exc_data_page_mask | \ + exc_mem_lock_mask | \ + exc_d_interrupt_mask | \ + exc_last_wish_mask) + +/* Mask of non-maskable interrupts. "exc_data_debug" and "exc_instr_debug" + * actually can be either maskable or non-maskable depending on the watched + * event, but we assume the worst case (non-maskable). */ +#define non_maskable_exc_mask (exc_nm_interrupt_mask | \ + exc_data_debug_mask | \ + exc_instr_debug_mask | \ + exc_mem_lock_as_mask) + +#define have_tc_exc_mask (exc_data_page_mask | \ + exc_mem_lock_mask) + +#define fp_es (1UL << 7) /* - error summary status; es set if anyone */ + /* of unmasked exception flags is set; */ + +#define fp_pe (1UL << 5) /* - precision exception flag; */ +#define fp_ue (1UL << 4) /* - underflow exception flag; */ +#define fp_oe (1UL << 3) /* - overflow exception flag; */ +#define fp_ze (1UL << 2) /* - divide by zero exception flag; */ +#define fp_de (1UL << 1) /* - denormalized operand exception flag; */ +#define fp_ie (1UL << 0) /* - invalid operation exception flag; */ + +#ifndef __ASSEMBLY__ +/* + * do_page_fault() return values + */ +enum pf_ret { + /* Could not handle fault, must return to handle signals */ + PFR_SIGPENDING = 1, + /* The page fault was handled */ + PFR_SUCCESS, + /* In some cases kernel addresses can be in Trap Cellar if VLIW command + * consisted of a several load/store operations and one of them caused + * page fault trap */ + PFR_KERNEL_ADDRESS, + /* Do not handle speculative access */ + PFR_IGNORE, + /* Controlled access from kernel to user memory */ + PFR_CONTROLLED_ACCESS, + /* needs to change SAP to AP for multi_threading of protected mode */ + PFR_AP_THREAD_READ, + /* trap on paravirtualized guest kernel and is handled by host: */ + /* such result means the trap was handled by hypervisor and it need */ + /* recover faulted operation */ + PFR_KVM_KERNEL_ADDRESS, +}; +#endif /* ! __ASSEMBLY__ */ + +/* + * Common system calls (trap table entries numbers) + */ +#define LINUX_SYSCALL32_TRAPNUM 1 /* Use E2K trap entry #1 */ +#define LINUX_SYSCALL64_TRAPNUM 3 /* Use E2K trap entry #3 */ +#define LINUX_SYSCALL_TRAPNUM_OLD 4 /* Deprecated */ +#define LINUX_FAST_SYSCALL32_TRAPNUM 5 +#define LINUX_FAST_SYSCALL64_TRAPNUM 6 +#define LINUX_FAST_SYSCALL128_TRAPNUM 7 + +/* + * Hypercalls + */ +#define LINUX_HCALL_GENERIC_TRAPNUM 0 /* guest hardware hypercalls */ +#define LINUX_HCALL_LIGHT_TRAPNUM 1 /* guest light hardware hypercalls */ + +/* + * Definition of ttable entry number used for protected system calls. + * This is under agreement with protected mode compiler/plib team. + */ +#define PMODE_SYSCALL_TRAPNUM 8 +#define PMODE_NEW_SYSCALL_TRAPNUM 10 + +#define GENERIC_HYPERCALL_TRAPNUM 16 /* guest software hypercalls */ +#define LIGHT_HYPERCALL_TRAPNUM 17 /* guest light software hypercalls */ + +#define HYPERCALLS_TRAPS_MASK ((1U << GENERIC_HYPERCALL_TRAPNUM) | \ + (1U << LIGHT_HYPERCALL_TRAPNUM)) + +/* + * One trap table entry byte size + */ +#define LINUX_SYSCALL_ENTRY_SIZE 0x800 /* one entry max size is */ + /* PAGE_SIZE / 2 */ + +#endif /* _E2K_TRAP_DEF_H */ diff --git a/arch/e2k/include/asm/trap_table.S.h b/arch/e2k/include/asm/trap_table.S.h new file mode 100644 index 000000000000..ae06353ce191 --- /dev/null +++ b/arch/e2k/include/asm/trap_table.S.h @@ -0,0 +1,167 @@ +/* + * + * Copyright (C) 2020 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAP_TABLE_ASM_H +#define _E2K_TRAP_TABLE_ASM_H + +#ifdef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +#include + +#if defined CONFIG_SMP +# define SMP_ONLY(...) __VA_ARGS__ +#else +# define SMP_ONLY(...) +#endif + +/* + * Important: the first memory access in kernel is store, not load. + * This is needed to flush SLT before trying to load anything. + */ +#define SWITCH_HW_STACKS_FROM_USER(...) \ + { \ + /* Wait for FPU exceptions before switching stacks */ \ + wait all_e = 1; \ + rrd %osr0, GVCPUSTATE; \ + stgdq,sm %qg16, 0, TSK_TI_G_VCPU_STATE; \ + } \ + { \ + rrd %psp.hi, GCURTASK; \ + stgdq,sm %qg18, 0, TSK_TI_G_MY_CPU_OFFSET; \ + cmpesb,1 0, 0, %pred0; \ + /* Do restore %rpr (it's clobbered by "crp" below) */ \ + cmpesb,3 0, 0, %pred1; \ + } \ + { \ + /* 'crp' instruction also clears %rpr besides the generations \ + * table, so make sure we preserve %rpr value. */ \ + rrd %rpr.lo, GCPUOFFSET; \ + } \ + { \ + rrd %rpr.hi, GCPUID_PREEMPT; \ + /* Disable load/store generations */ \ + crp; \ + } \ + SWITCH_HW_STACKS(TSK_TI_, ##__VA_ARGS__) + +/* + * This assumes that GVCPUSTATE points to current_thread_info() + * and %psp.hi has been read into GCURTASK. + * %pred0 == true if this is a kernel trap handler. + * + * %pred0 - set to "true" if PSP/PCSP should be switched. + * %pred1 - set to "true" if RPR should be restored. + * + * Does the following: + * + * 1) Saves global registers either to 'thread_info.tmp_k_gregs' or to + * 'thread_info.k_gregs'. The first area is used for trap handler since + * we do not know whether it is from user or from kernel and whether + * global registers have been saved already to 'thread_info.k_gregs'. + * + * 2) Saves stack registers to 'thread_info.tmp_user_stacks'. If this is + * not a kernel trap then these values will be copied to pt_regs later. + * + * 3) Updates global and stack registers with kernel values (if not in + * kernel trap where it's been done already) + */ +#define SWITCH_HW_STACKS(PREFIX, ...) \ + { \ + rwd GCPUOFFSET, %rpr.lo ? %pred1; \ + ldd,2 GVCPUSTATE, TI_K_PSP_LO, GCPUOFFSET; \ + __VA_ARGS__ \ + } \ + { \ + rwd GCPUID_PREEMPT, %rpr.hi ? %pred1; \ + ldd,2 GVCPUSTATE, TI_K_PCSP_LO, GCPUID_PREEMPT; \ + } \ + { \ + rrd %psp.lo, GCURTASK ? %pred0; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSP_HI ? %pred0; \ + SMP_ONLY(ldgdd,5 0, TSK_TI_TMP_G_MY_CPU_OFFSET_EXT, GCPUID_PREEMPT ? ~ %pred0;) \ + } \ + { \ + rrd %pcsp.hi, GCURTASK ? %pred0; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSP_LO ? %pred0; \ + /* Nothing to do if this is kernel (%pred0 == false) */ \ + subd,1 GVCPUSTATE, TSK_TI, GCURTASK ? ~ %pred0; \ + SMP_ONLY(ldgdd,5 0, TSK_TI_TMP_G_MY_CPU_OFFSET, GCPUOFFSET ? ~ %pred0;) \ + ibranch trap_handler_switched_stacks ? ~ %pred0; \ + } \ + { \ + rrd %pcsp.lo, GCURTASK; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSP_HI; \ + } \ + { \ + rwd GCPUOFFSET, %psp.lo; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSP_LO; \ + ldrd,5 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_MY_CPU_OFFSET_EXT, GCPUOFFSET; \ + } \ + { \ + rwd GCPUID_PREEMPT, %pcsp.lo; \ + ldd,2 GVCPUSTATE, TI_K_PSP_HI, GCURTASK; \ + ldrd,5 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_VCPU_STATE_EXT, GCPUID_PREEMPT; \ + } \ + { \ + rwd GCURTASK, %psp.hi; \ + ldd,2 GVCPUSTATE, TI_K_PCSP_HI, GCURTASK; \ + } \ + { \ + rwd GCURTASK, %pcsp.hi; \ + } \ + { \ + rrd %pshtp, GCURTASK; \ + } \ + { \ + ldrd,3 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_CPU_ID_PREEMPT, GCPUOFFSET; \ + strd,5 GCPUOFFSET, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_CPU_ID_PREEMPT; \ + } \ + { \ + ldrd,3 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_TASK, GCPUID_PREEMPT; \ + strd,5 GCPUID_PREEMPT, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_TASK; \ + } \ + { \ + nop 2; /* ld -> use */ \ + rrd %pcshtp, GCURTASK; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSHTP; \ + } \ + { \ + subd,1 GVCPUSTATE, TSK_TI, GCURTASK; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSHTP; \ + strd,5 GCPUOFFSET, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_MY_CPU_OFFSET_EXT; \ + } \ + { \ + SMP_ONLY(ldw,3 GVCPUSTATE, TSK_TI_CPU_DELTA, GCPUID_PREEMPT;) \ + strd,5 GCPUID_PREEMPT, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_VCPU_STATE_EXT; \ + } + +.macro HANDLER_TRAMPOLINE ctprN, scallN, fn, wbsL +/* Force load OSGD->GD. Alternative is to use non-0 CUI for kernel */ +{ + sdisp \ctprN, \scallN +} + /* CPU_HWBUG_VIRT_PSIZE_INTERCEPTION */ + { nop } { nop } { nop } { nop } + call \ctprN, wbs=\wbsL + disp \ctprN, \fn + SWITCH_HW_STACKS_FROM_USER() + SMP_ONLY(shld,1 GCPUID_PREEMPT, 3, GCPUOFFSET) +{ + SMP_ONLY(ldd,2 [ __per_cpu_offset + GCPUOFFSET ], GCPUOFFSET) + ct \ctprN +} +.endm /* HANDLER_TRAMPOLINE */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_TRAP_TABLE_ASM_H */ diff --git a/arch/e2k/include/asm/trap_table.h b/arch/e2k/include/asm/trap_table.h new file mode 100644 index 000000000000..cb16cebcfa9b --- /dev/null +++ b/arch/e2k/include/asm/trap_table.h @@ -0,0 +1,267 @@ +/* + * + * Copyright (C) 2001 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAP_TABLE_H +#define _E2K_TRAP_TABLE_H + +#include +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#ifdef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +#define GDB_BREAKPOINT_STUB_MASK 0xffffffffffffff8fUL +#define GDB_BREAKPOINT_STUB 0x0dc0c08004000001UL + +typedef long (*ttable_entry_args_t)(int sys_num, ...); + +static inline bool +is_gdb_breakpoint_trap(struct pt_regs *regs) +{ + u64 *instr = (u64 *)GET_IP_CR0_HI(regs->crs.cr0_hi); + u64 sylab; + + host_get_user(sylab, instr, regs); + return (sylab & GDB_BREAKPOINT_STUB_MASK) == GDB_BREAKPOINT_STUB; +} + +extern void kernel_stack_overflow(unsigned int overflows); + +static inline void native_clear_fork_child_pt_regs(struct pt_regs *childregs) +{ + childregs->sys_rval = 0; + /* + * Remove all pointers to parent's data stack + * (these are not needed anyway for system calls) + */ + childregs->trap = NULL; + childregs->aau_context = NULL; +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + childregs->scall_times = NULL; +#endif + childregs->next = NULL; +} + +static inline unsigned int +native_is_kernel_data_stack_bounds(bool trap_on_kernel, e2k_usd_lo_t usd_lo) +{ + /* In native case this check is done in assembler in ttable_entry0 */ + return false; +} + +static inline void +native_stack_bounds_trap_enable(void) +{ + /* 'sge' flag unused while trap/system calls handling */ + /* so nithing to do */ +} +static inline void +native_correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + if (regs == NULL) { + regs = current_thread_info()->pt_regs; + BUG_ON(regs == NULL); + } + regs->crs.cr0_hi.CR0_hi_IP = return_ip; +} + +static inline int +native_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + (void)do_page_fault(regs, address, condition, mask, 0); + return 0; +} + +extern long native_ttable_entry1(int sys_num, ...); +extern long native_ttable_entry3(int sys_num, ...); +extern long native_ttable_entry4(int sys_num, ...); + +#define do_ttable_entry_name(entry) # entry +#define ttable_entry_name(entry) do_ttable_entry_name(entry) + +#define ttable_entry1_name ttable_entry_name(ttable_entry1) +#define ttable_entry3_name ttable_entry_name(ttable_entry3) +#define ttable_entry4_name ttable_entry_name(ttable_entry4) + +#define ttable_entry1_func(sys_num, ...) \ +({ \ + long rval; \ + rval = ttable_entry1(sys_num, ##__VA_ARGS__); \ + rval; \ +}) +#define ttable_entry3_func(sys_num, ...) \ +({ \ + long rval; \ + rval = ttable_entry3(sys_num, ##__VA_ARGS__); \ + rval; \ +}) +#define ttable_entry4_func(sys_num, ...) \ +({ \ + long rval; \ + rval = ttable_entry4(sys_num, ##__VA_ARGS__); \ + rval; \ +}) + +#ifndef CONFIG_VIRTUALIZATION +#if CONFIG_CPU_ISET >= 5 +# define SYS_RET_TYPE long +#else /* ! CONFIG_CPU_ISET < 5 */ +# define SYS_RET_TYPE void +#endif /* CONFIG_CPU_ISET >= 5 */ +#else /* CONFIG_VIRTUALIZATION */ +# define SYS_RET_TYPE long +#endif /* ! CONFIG_VIRTUALIZATION */ + +typedef unsigned long (*system_call_func)(unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6); + +typedef unsigned long (*protected_system_call_func)(unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6, + struct pt_regs *regs); +static inline void +native_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + NATIVE_EXIT_HANDLE_SYSCALL(sbr, usd_hi.USD_hi_half, usd_lo.USD_lo_half, + upsr.UPSR_reg); +} + +extern SYS_RET_TYPE notrace handle_sys_call(system_call_func sys_call, + long arg1, long arg2, long arg3, long arg4, + long arg5, long arg6, struct pt_regs *regs); + +extern const system_call_func sys_call_table[NR_syscalls]; +extern const system_call_func sys_call_table_32[NR_syscalls]; +extern const protected_system_call_func sys_call_table_entry8[NR_syscalls]; +extern const system_call_func sys_protcall_table[NR_syscalls]; +extern const system_call_func sys_call_table_deprecated[NR_syscalls]; + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is host kernel with virtualization support */ + +#define FILL_HARDWARE_STACKS() NATIVE_FILL_HARDWARE_STACKS() + +static inline void clear_fork_child_pt_regs(struct pt_regs *childregs) +{ + native_clear_fork_child_pt_regs(childregs); +} + +static inline void +correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + native_correct_trap_return_ip(regs, return_ip); +} +static inline void +stack_bounds_trap_enable(void) +{ + native_stack_bounds_trap_enable(); +} + +#define ttable_entry1 native_ttable_entry1 +#define ttable_entry3 native_ttable_entry3 +#define ttable_entry4 native_ttable_entry4 + +#define get_ttable_entry1 ((ttable_entry_args_t)native_ttable_entry1) +#define get_ttable_entry3 ((ttable_entry_args_t)native_ttable_entry3) +#define get_ttable_entry4 ((ttable_entry_args_t)native_ttable_entry4) + +static inline void +exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + native_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +static inline unsigned long +kvm_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ + +#define instr_page_fault(__regs, __ftype, __async) \ + native_do_instr_page_fault(__regs, __ftype, __async) + +static inline int +do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + return native_do_aau_page_fault(regs, address, condition, mask, aa_no); +} + +static inline unsigned int +is_kernel_data_stack_bounds(bool on_kernel, e2k_usd_lo_t usd_lo) +{ + return native_is_kernel_data_stack_bounds(on_kernel, usd_lo); +} +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#else /* __ASSEMBLY__ */ + +/* + * Global registers map used by kernel + * Numbers of used global registers see at arch/e2k/include/asm/glob_regs.h + */ + +#define GET_GREG_MEMONIC(greg_no) %dg ## greg_no +#define DO_GET_GREG_MEMONIC(greg_no) GET_GREG_MEMONIC(greg_no) + +#define GCURTASK DO_GET_GREG_MEMONIC(CURRENT_TASK_GREG) +#define GCPUOFFSET DO_GET_GREG_MEMONIC(MY_CPU_OFFSET_GREG) +#define GCPUID_PREEMPT DO_GET_GREG_MEMONIC(SMP_CPU_ID_GREG) +/* Macroses for virtualization support on assembler */ +#define GVCPUSTATE DO_GET_GREG_MEMONIC(GUEST_VCPU_STATE_GREG) + +#endif /* ! __ASSEMBLY__ */ + +#include + +#ifndef __ASSEMBLY__ +__always_inline /* For CPU_HWBUG_VIRT_PSIZE_INTERCEPTION */ +static void init_pt_regs_for_syscall(struct pt_regs *regs) +{ + regs->next = NULL; + regs->trap = NULL; + +#ifdef CONFIG_USE_AAU + regs->aau_context = NULL; +#endif + + AW(regs->flags) = 0; + init_guest_syscalls_handling(regs); +} +#endif + +#endif /* _E2K_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/traps.h b/arch/e2k/include/asm/traps.h new file mode 100644 index 000000000000..065c01b25ee7 --- /dev/null +++ b/arch/e2k/include/asm/traps.h @@ -0,0 +1,244 @@ +/* linux/include/asm-e2k/traps.h, v 1.0 03/07/2001. + * + * Copyright (C) 2001 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAPS_H +#define _E2K_TRAPS_H + +#include +#include +#include +#include + +typedef void (*exc_function)(struct pt_regs *regs); +extern const exc_function exc_tbl[]; +extern const char *exc_tbl_name[]; + +#define S_SIG(regs, signo, trapno, code) \ +do { \ + int nr_TIRs; \ + e2k_tir_hi_t tir_hi; \ + struct trap_pt_regs *trap = (regs)->trap; \ + void __user *addr; \ + \ + if (trap) { \ + AW(tir_hi) = trap->TIR_hi; \ + nr_TIRs = GET_NR_TIRS(AW(tir_hi)); \ + addr = (void __user *) (trap->TIRs[nr_TIRs].TIR_lo.TIR_lo_ip); \ + } else { \ + addr = 0; \ + } \ + \ + force_sig_fault(signo, code, addr, trapno); \ +} while (0) + +extern int pf_on_page_boundary(unsigned long address, tc_cond_t cond); +extern bool is_spurious_qp_store(bool store, unsigned long address, + int fmt, tc_mask_t mask, unsigned long *pf_address); +extern void parse_TIR_registers(struct pt_regs *regs, u64 exceptions); +extern void do_aau_fault(int aa_field, struct pt_regs *regs); +extern int handle_proc_stack_bounds(struct e2k_stacks *stacks, + struct trap_pt_regs *trap); +extern int handle_chain_stack_bounds(struct e2k_stacks *stacks, + struct trap_pt_regs *trap); +extern int do_page_fault(struct pt_regs *const regs, e2k_addr_t address, + tc_cond_t condition, tc_mask_t mask, const int instr_page); +#ifdef CONFIG_KVM_ASYNC_PF +extern void do_pv_apf_wake(struct pt_regs *regs); +#endif /* */ +extern void do_trap_cellar(struct pt_regs *regs, int only_system_tc); + +extern irqreturn_t native_do_interrupt(struct pt_regs *regs); +extern void do_nm_interrupt(struct pt_regs *regs); +extern void native_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr); + +extern void do_trap_cellar(struct pt_regs *regs, int only_system_tc); + +extern int constrict_user_data_stack(struct pt_regs *regs, unsigned long incr); +extern int expand_user_data_stack(struct pt_regs *regs, unsigned long incr); +extern void do_notify_resume(struct pt_regs *regs); + +extern void coredump_in_future(void); + +enum getsp_action { + GETSP_OP_FAIL = 1, + GETSP_OP_SIGSEGV, + GETSP_OP_INCREMENT, + GETSP_OP_DECREMENT +}; +extern enum getsp_action parse_getsp_operation(const struct pt_regs *regs, + int *incr, void __user **fault_addr); + +static inline unsigned int user_trap_init(void) +{ + /* Enable system calls for user's processes. */ + unsigned int linux_osem = 0; + + /* Enable deprecated generic ttable2 syscall entry. */ + linux_osem = 1 << LINUX_SYSCALL_TRAPNUM_OLD; + + /* Enable ttable1 syscall entry - 32-bit syscalls only */ + linux_osem |= 1 << LINUX_SYSCALL32_TRAPNUM; + /* Enable ttable3 syscall entry - 64-bit syscalls only */ + linux_osem |= 1 << LINUX_SYSCALL64_TRAPNUM; + + /* Enable fast syscalls entries. */ + linux_osem |= 1 << LINUX_FAST_SYSCALL32_TRAPNUM; + linux_osem |= 1 << LINUX_FAST_SYSCALL64_TRAPNUM; + linux_osem |= 1 << LINUX_FAST_SYSCALL128_TRAPNUM; + +#ifdef CONFIG_PROTECTED_MODE + linux_osem |= (1 << PMODE_SYSCALL_TRAPNUM); + linux_osem |= (1 << PMODE_NEW_SYSCALL_TRAPNUM); +#endif /* CONFIG_PROTECTED_MODE */ + + return linux_osem; +} + +static inline unsigned int guest_trap_init(void) +{ + /* Enable system calls for user's processes. */ + unsigned int linux_osem = user_trap_init(); + +#ifdef CONFIG_KVM_HOST_MODE + linux_osem |= HYPERCALLS_TRAPS_MASK; +#endif + + return linux_osem; +} + +static inline unsigned int user_hcall_init(void) +{ + unsigned int linux_hcem = 0; + + linux_hcem = 1 << LINUX_HCALL_GENERIC_TRAPNUM; + linux_hcem |= 1 << LINUX_HCALL_LIGHT_TRAPNUM; + + return linux_hcem; +} + +extern char __hypercalls_begin[]; +static inline void kernel_trap_mask_init(void) +{ + WRITE_OSEM_REG(user_trap_init()); +#ifdef CONFIG_KVM_HOST_MODE + machine.rwd(E2K_REG_HCEM, user_hcall_init()); + machine.rwd(E2K_REG_HCEB, (unsigned long) __hypercalls_begin); +#endif +} + +static inline int +native_host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + /* native & host kernel cannot be paravirtualized guest */ + return 0; +} + +static inline int +native_host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + /* native & host kernel cannot be paravirtualized guest */ + return 0; +} + +/* + * MMIO page fault cannot occur on native or host mode, + * so ignore such traps + */ +static inline unsigned long +native_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} + +#ifdef CONFIG_VIRTUALIZATION +extern void native_sysrq_showstate_interrupt(struct pt_regs *regs); +static inline void +native_init_guest_system_handlers_table(void) +{ + if (paravirt_enabled()) { + /* It is native guest */ + setup_PIC_vector_handler(SYSRQ_SHOWSTATE_EPIC_VECTOR, + native_sysrq_showstate_interrupt, 1, + "native_sysrq_showstate_interrupt"); + } +} +# ifndef CONFIG_KVM +# error "Undefined guest virtualization type" +# endif /* CONFIG_KVM */ +#else /* ! CONFIG_VIRTUALIZATION */ +/* it is native host kernel without virtualization support */ +static inline void +native_init_guest_system_handlers_table(void) +{ + /* Nothing to do */ +} +# define SET_RUNSTATE_IN_USER_TRAP() +# define SET_RUNSTATE_OUT_USER_TRAP() +# define SET_RUNSTATE_IN_KERNEL_TRAP(cur_runstate) +# define SET_RUNSTATE_OUT_KERNEL_TRAP(cur_runstate) +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* It is pure guest kernel (not paravirtualized) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ + +#define TIR0_clear_false_exceptions(TIR_hi, nr_TIRs) \ + native_TIR0_clear_false_exceptions(TIR_hi, nr_TIRs) + +static inline void +handle_interrupt(struct pt_regs *regs) +{ + native_do_interrupt(regs); +} + +extern int apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, unsigned long end, + unsigned long delta); +extern int apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, unsigned long end, + unsigned long delta); + +static inline int host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + return native_host_apply_psp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline int host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + return native_host_apply_pcsp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline unsigned long +mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return native_mmio_page_fault(regs, tcellar); +} +static inline void +init_guest_system_handlers_table(void) +{ + native_init_guest_system_handlers_table(); +} +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_TRAPS_H */ diff --git a/arch/e2k/include/asm/types.h b/arch/e2k/include/asm/types.h new file mode 100644 index 000000000000..c574c9faff43 --- /dev/null +++ b/arch/e2k/include/asm/types.h @@ -0,0 +1,11 @@ +#ifndef _E2K_TYPES_H_ +#define _E2K_TYPES_H_ + +#include + +#ifndef __ASSEMBLY__ + +typedef struct linux_binprm linux_binprm_t; + +#endif /* !(__ASSEMBLY__) */ +#endif /* _E2K_TYPES_H_ */ diff --git a/arch/e2k/include/asm/uaccess.h b/arch/e2k/include/asm/uaccess.h new file mode 100644 index 000000000000..948ed7b3ee46 --- /dev/null +++ b/arch/e2k/include/asm/uaccess.h @@ -0,0 +1,448 @@ +#ifndef _E2K_UACCESS_H_ +#define _E2K_UACCESS_H_ + +/* + * User space memory access functions + * asm/uaccess.h + */ +#include + +#include +#include +#include +#include +#include +#ifdef CONFIG_PROTECTED_MODE +#include +#endif + +#undef DEBUG_UACCESS_MODE +#undef DEBUG_UACCESS_FAULT +#undef DebugUA +#undef DebugUAF +#define DEBUG_UACCESS_MODE 0 +#define DEBUG_UACCESS_FAULT 0 +#define DebugUA \ + if (DEBUG_UACCESS_MODE) printk +#if DEBUG_UACCESS_MODE || DEBUG_UACCESS_FAULT +# define DebugUAF printk +#else +# define DebugUAF(...) +#endif + + + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +/* Even kernel should not access page tables with get_user()/put_user() */ +#define KERNEL_DS MAKE_MM_SEG(KERNEL_VPTB_BASE_ADDR) + +#define USER_ADDR_MAX USER_HW_STACKS_BASE +#define USER_DS MAKE_MM_SEG(USER_ADDR_MAX) + +/* + * Sometimes kernel wants to access hardware stacks, + * in which case we can use this limit. + * + * IMPORTANT: in this case kernel must check that it accesses + * only the stacks of the current thread. Writing another + * thread's hardware stacks shall not be possible. + */ +#define K_USER_DS MAKE_MM_SEG(PAGE_OFFSET) + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a,b) ((a).seg == (b).seg) + +#define user_addr_max() (current_thread_info()->addr_limit.seg) + +extern int __verify_write(const void *addr, unsigned long size); +extern int __verify_read(const void *addr, unsigned long size); + +static inline bool __range_ok(unsigned long addr, unsigned long size, + unsigned long limit) +{ + BUILD_BUG_ON(!__builtin_constant_p(TASK32_SIZE)); + + if (__builtin_constant_p(size) && size <= TASK32_SIZE) + return likely(addr <= limit - size); + + /* Arbitrary sizes? Be careful about overflow */ + return likely(addr + size >= size && addr + size <= limit); +} + +#define access_ok(addr, size) \ +({ \ + __chk_user_ptr(addr); \ + likely(__range_ok((unsigned long __force) (addr), (size), \ + user_addr_max())); \ +}) + +struct exception_table_entry +{ + unsigned long insn; + unsigned long fixup; +}; + + +/* + * The macros to work safely in kernel with user memory: + * + * TRY_USR_PFAULT { + * ... // "try" code which accesses user memory + * } CATCH_USR_PFAULT { + * ... // "catch" code after page fault (bad user's address) + * } END_USR_PFAULT + * + * NOTE1: these macros can be used inside of "catch" code, + * but can *not* be used inside of "try" code + * NOTE2: the compiler believes that after global label we CAN'T use + * local context so __result__ must be initialized after the label. + * NOTE3: any variable that is used in both "if" and "else" blocks must + * be marked with "volatile" keyword. Caveat: mark the variable + * itself and not the memory it is pointing to, i.e: + * int *volatile ptr <=== RIGHT + * volatile int *ptr <=== WRONG + */ + +#define TRY_USR_PFAULT \ + might_fault(); \ + __TRY_USR_PFAULT + +#pragma unknown_control_flow(set_usr_pfault_jump) +static __always_inline void set_usr_pfault_jump(void) +{ + SAVE_CURRENT_ADDR(¤t_thread_info()->usr_pfault_jump); +} + +#define __TRY_USR_PFAULT \ + unsigned long _usr_pfault_jmp = current_thread_info()->usr_pfault_jump; \ + set_usr_pfault_jump(); \ + if (likely(current_thread_info()->usr_pfault_jump)) { + +#define CATCH_USR_PFAULT \ + E2K_CMD_SEPARATOR; \ + current_thread_info()->usr_pfault_jump = _usr_pfault_jmp; \ + } else { \ + current_thread_info()->usr_pfault_jump = _usr_pfault_jmp; + +#define END_USR_PFAULT \ + E2K_CMD_SEPARATOR; \ + } + +#define SET_USR_PFAULT(name) \ + unsigned long _usr_pfault_jmp = \ + current_thread_info()->usr_pfault_jump; \ + GET_LBL_ADDR(name, current_thread_info()->usr_pfault_jump) + +#define RESTORE_USR_PFAULT \ +({ \ + unsigned long __pfault_result = current_thread_info()->usr_pfault_jump;\ + current_thread_info()->usr_pfault_jump = _usr_pfault_jmp; \ + unlikely(!__pfault_result); \ +}) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the uglyness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + */ + + /** + * get user + */ + +extern int __get_user_bad(void) __attribute__((noreturn)); + +#define __get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *___gu_ptr = (ptr); \ + int __ret_gu; \ + switch (sizeof(*___gu_ptr)) { \ + case 1: \ + GET_USER_ASM(x, ___gu_ptr, b, __ret_gu); break; \ + case 2: \ + GET_USER_ASM(x, ___gu_ptr, h, __ret_gu); break; \ + case 4: \ + GET_USER_ASM(x, ___gu_ptr, w, __ret_gu); break; \ + case 8: \ + GET_USER_ASM(x, ___gu_ptr, d, __ret_gu); break; \ + default: \ + __ret_gu = -EFAULT; __get_user_bad(); break; \ + } \ + (int) builtin_expect_wrapper(__ret_gu, 0); \ +}) + +#define get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + might_fault(); \ + access_ok(__gu_ptr, sizeof(*__gu_ptr)) ? \ + __get_user((x), __gu_ptr) : \ + ((x) = (__typeof__(x)) 0, -EFAULT); \ +}) + + + /** + * put user + */ + +extern int __put_user_bad(void) __attribute__((noreturn)); + +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *___pu_ptr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __ret_pu; \ + switch (sizeof(*___pu_ptr)) { \ + case 1: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, b, __ret_pu); break; \ + case 2: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, h, __ret_pu); break; \ + case 4: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, w, __ret_pu); break; \ + case 8: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, d, __ret_pu); break; \ + default: \ + __ret_pu = -EFAULT; __put_user_bad(); break; \ + } \ + (int) builtin_expect_wrapper(__ret_pu, 0); \ +}) + +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) *__pu_ptr = (ptr); \ + might_fault(); \ + (access_ok(__pu_ptr, sizeof(*__pu_ptr))) ? \ + __put_user((x), __pu_ptr) : -EFAULT; \ +}) + +#define raw_copy_to_user raw_copy_in_user + +extern unsigned long raw_copy_from_user(void *to, const void *from, + unsigned long n); +extern unsigned long raw_copy_in_user(void *to, const void *from, + unsigned long n); + +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +extern unsigned long __copy_user_with_tags(void *to, const void *from, + unsigned long n); + +#define __copy_in_user_with_tags __copy_user_with_tags +#define __copy_to_user_with_tags __copy_user_with_tags +#define __copy_from_user_with_tags __copy_user_with_tags + +static inline +unsigned long copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n) +{ + if (likely(access_ok(from, n) && access_ok(to, n))) + n = __copy_in_user_with_tags(to, from, n); + + return n; +} + +static inline +unsigned long copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n) +{ + if (access_ok(to, n)) + n = __copy_to_user_with_tags(to, from, n); + + return n; +} + +static inline +unsigned long copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n) +{ + if (access_ok(from, n)) + n = __copy_from_user_with_tags(to, from, n); + + return n; +} + +#define strlen_user(str) strnlen_user(str, ~0UL >> 1) +long strnlen_user(const char __user *str, long count) __pure; + +long __strncpy_from_user(char *dst, const char *src, long count); + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} + + +unsigned long __fill_user(void __user *mem, unsigned long len, const u8 b); + +static inline __must_check unsigned long +fill_user(void __user *to, unsigned long n, const u8 b) +{ + if (!access_ok(to, n)) + return n; + + return __fill_user(to, n, b); +} + +#define __clear_user(mem, len) __fill_user(mem, len, 0) +#define clear_user(to, n) fill_user(to, n, 0) + + +unsigned long __fill_user_with_tags(void *, unsigned long, unsigned long, unsigned long); + +/* Filling aligned user pointer 'to' with 'n' bytes of 'dw' double words: */ +static inline __must_check unsigned long +fill_user_with_tags(void __user *to, unsigned long n, unsigned long tag, unsigned long dw) +{ + if (!access_ok(to, n)) + return n; + + return __fill_user_with_tags(to, n, tag, dw); +} + +static inline __must_check unsigned long +clear_user_with_tags(void __user *ptr, unsigned long length, unsigned long tag) +{ + return fill_user_with_tags(ptr, length, tag, 0); +} + +#ifdef CONFIG_PROTECTED_MODE + +static inline int PUT_USER_AP(e2k_ptr_t *ptr, u64 base, + u64 len, u64 off, u64 rw) +{ + if ((long)ptr & 0xf) { + /* not aligned */ + return -EFAULT; + } + + TRY_USR_PFAULT { + if (base == 0) { + E2K_STORE_NULLPTR_QWORD(&AWP(ptr).lo); + } else { + u64 tmp; + + tmp = MAKE_AP_HI(base, len, off, rw); + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_VALUE_WITH_TAG(&AWP(ptr).hi, tmp, + E2K_AP_HI_ETAG); + tmp = MAKE_AP_LO(base, len, off, rw); + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_VALUE_WITH_TAG(&AWP(ptr).lo, tmp, + E2K_AP_LO_ETAG); + } + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +static inline int PUT_USER_PL_V2(e2k_pl_lo_t *plp, u64 entry) +{ + e2k_pl_lo_t tmp = MAKE_PL_V2(entry).lo; + + if ((long)plp & (sizeof(e2k_pl_lo_t) - 1)) { + /* not aligned */ + return -EFAULT; + } + + TRY_USR_PFAULT { + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_VALUE_WITH_TAG(plp, tmp.PL_lo_value, E2K_PL_ETAG); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +static inline int PUT_USER_PL_V6(e2k_pl_t *plp, u64 entry, u32 cui) +{ + e2k_pl_t tmp = MAKE_PL_V6(entry, cui); + + if ((long)plp & (sizeof(e2k_pl_t) - 1)) { + /* not aligned */ + return -EFAULT; + } + + TRY_USR_PFAULT { + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_TAGGED_QWORD(plp, tmp.PLLO_value, tmp.PLHI_value, + E2K_PLLO_ETAG, E2K_PLHI_ETAG); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +static inline int CPU_PUT_USER_PL(e2k_pl_t *plp, u64 entry, u32 cui, + bool is_cpu_iset_v6) +{ + if (is_cpu_iset_v6) { + return PUT_USER_PL_V6(plp, entry, cui); + } else { + int ret = put_user(0UL, &plp->PLHI_value); + if (ret) + return ret; + return PUT_USER_PL_V2(&plp->PLLO_item, entry); + } +} + +static inline int PUT_USER_PL(e2k_pl_t *plp, u64 entry, u32 cui) +{ + return CPU_PUT_USER_PL(plp, entry, cui, IS_CPU_ISET_V6()); +} + +#define __GET_USER_VAL_TAGD(val, tag, ptr) \ +({ \ + int res; \ + TRY_USR_PFAULT { \ + /* FIXME: should be paravirtualized */ \ + NATIVE_LOAD_VAL_AND_TAGD((ptr), (val), (tag)); \ + res = 0; \ + } CATCH_USR_PFAULT { \ + res = -EFAULT; \ + } END_USR_PFAULT \ + res; \ +}) + +#define GET_USER_VAL_TAGD(val, tag, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__guvt_ptr = (ptr); \ + access_ok(__guvt_ptr, sizeof(*__guvt_ptr)) ? \ + __GET_USER_VAL_TAGD((val), (tag), __guvt_ptr) : -EFAULT; \ +}) + +#endif /* CONFIG_PROTECTED_MODE */ + +#endif /* _E2K_UACCESS_H_ */ diff --git a/arch/e2k/include/asm/ucontext.h b/arch/e2k/include/asm/ucontext.h new file mode 100644 index 000000000000..46144b2bebee --- /dev/null +++ b/arch/e2k/include/asm/ucontext.h @@ -0,0 +1,48 @@ +#ifndef _E2K_UCONTEXT_H +#define _E2K_UCONTEXT_H + +#include +#include + +struct ucontext_32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + struct sigcontext uc_mcontext; + union { + compat_sigset_t uc_sigmask;/* mask last for extensibility */ + unsigned long long pad[16]; + }; + struct extra_ucontext uc_extra; /* for compatibility */ +}; + +#ifdef CONFIG_PROTECTED_MODE +struct ucontext_prot { + unsigned long uc_flags; + unsigned long __align; + e2k_ptr_t uc_link; + stack_prot_t uc_stack; + struct sigcontext_prot uc_mcontext; + union { + sigset_t uc_sigmask; + unsigned long long pad[16]; + }; + struct extra_ucontext uc_extra; /* for compatibility */ +}; +#endif /* CONFIG_PROTECTED_MODE */ + +typedef struct rt_sigframe { + u64 __pad_args[8]; /* Reserve space in data stack for the handler */ + union { + siginfo_t info; + compat_siginfo_t compat_info; + }; + union { + struct ucontext uc; + struct ucontext_32 uc_32; + struct ucontext_prot uc_prot; + }; +} rt_sigframe_t; + +extern int restore_rt_frame(rt_sigframe_t *, struct k_sigaction *); +#endif /* ! _E2K_UCONTEXT_H */ diff --git a/arch/e2k/include/asm/umalloc.h b/arch/e2k/include/asm/umalloc.h new file mode 100644 index 000000000000..ae7335fc0176 --- /dev/null +++ b/arch/e2k/include/asm/umalloc.h @@ -0,0 +1,70 @@ + +#ifndef _E2K_UMALLOC_H_ +#define _E2K_UMALLOC_H_ + + +#include + + + +typedef struct { + u32 m_size; + u32 m_alloced; + u32 m_used; + u32 m_real; +} mallocstat_t; + + + // Small chunk pools + +struct subpoolhdr; +typedef struct subpoolhdr subpoolhdr_t; + +struct mem_moved_poolhdr; +typedef struct mem_moved_poolhdr mem_moved_poolhdr_t; + +// we can't use include/list.h !!! +struct list_head_um{ + struct list_head_um *next, *prev; +}; + +// we can't use linux/rt_lock.h !!! +struct rt_mutex_um{ + u64 tmp[30]; +}; +typedef struct { + struct list_head_um head; + struct rt_mutex_um lock; + u32 mainp; + u32 size; // size of chunk +} umlc_pool_t; + + +#define MAX_CHUNKS 10 +// index of big chunk +#define BIG_CHUNK_IND (MAX_CHUNKS-1) + + +// heap descriptor +typedef struct { + umlc_pool_t pools[MAX_CHUNKS]; + atomic_t gc_lock; // lock for garber collection + mem_moved_poolhdr_t *mem_moved; // reference to mem_moved hdr + u32 allused; // common size of valid arrays to the moment + u32 allreal; // sum real sizes of mallocs + u32 allsize; // common size occupied by heap +} allpools_t; + + +extern void dump_malloc_cart(void); +extern e2k_addr_t sys_malloc(size_t size); +extern void sys_free(e2k_addr_t addr, size_t size); +extern void init_sem_malloc(allpools_t *allpools); +extern int mem_set_empty_tagged_dw(void __user *ptr, s64 size, u64 dw); +struct task_struct; +extern void init_pool_malloc(struct task_struct *, struct task_struct *); + +extern int clean_descriptors(void __user *list, unsigned long list_size); +extern int clean_single_descriptor(e2k_ptr_t descriptor); +#endif /* _E2K_UMALLOC_H_ */ + diff --git a/arch/e2k/include/asm/unaligned.h b/arch/e2k/include/asm/unaligned.h new file mode 100644 index 000000000000..e203c29c41f6 --- /dev/null +++ b/arch/e2k/include/asm/unaligned.h @@ -0,0 +1,25 @@ +#ifndef _E2K_UNALIGNED_H_ +#define _E2K_UNALIGNED_H_ + +/* + * The e2K arch can do unaligned accesses itself as i386. + * + * The strange macros are there to make sure these can't + * be misused in a way that makes them not work on other + * architectures where unaligned accesses aren't as simple. + * + * BUT there is a hardware bug which forbids usage of + * unaligned accesses and DAM together. + */ + +#ifdef CONFIG_ACCESS_CONTROL +# include +#else +# include +# include + +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#endif + +#endif /* _E2K_UNALIGNED_H_ */ diff --git a/arch/e2k/include/asm/unistd.h b/arch/e2k/include/asm/unistd.h new file mode 100644 index 000000000000..b545a73d90d4 --- /dev/null +++ b/arch/e2k/include/asm/unistd.h @@ -0,0 +1,60 @@ +#ifndef _E2K_UNISTD_H_ +#define _E2K_UNISTD_H_ + +#include +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ +#include + +#define NR_fast_syscalls_mask 0x7 +/* Must be a power of 2 (for simpler checks in assembler entry) */ +#define NR_fast_syscalls 8 + + +#define __NR__brk __NR_brk +#define __NR_newstat __NR_stat +#define __NR_newlstat __NR_lstat +#define __NR_newfstat __NR_fstat +#define __NR_olduselib __NR_uselib + +/* On e2k these are called "pread" and "pwrite" */ +#define __IGNORE_pread64 +#define __IGNORE_pwrite64 + +#define __IGNORE_semget +#define __IGNORE_semctl + +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_IPC +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_TIME32 +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_UTIME32 +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NEWFSTATAT +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_COMPAT_SYS_TIME +#define __ARCH_WANT_COMPAT_SYS_GETDENTS64 + +#endif /* _E2K_UNISTD_H_ */ diff --git a/arch/e2k/include/asm/user.h b/arch/e2k/include/asm/user.h new file mode 100644 index 000000000000..4cebe8eb097e --- /dev/null +++ b/arch/e2k/include/asm/user.h @@ -0,0 +1,225 @@ +#ifndef _E2K_USER_H_ +#define _E2K_USER_H_ + +#include +#include + + +/* When the kernel dumps core, it starts by dumping the user struct - + * this will be used by gdb to figure out where the data and stack segments + * are within the file, and what virtual addresses to use. + */ + +#define __CHECK_USER 0x1034567887654301ULL +#define __CHECK_KERNEL 0x1234567887654321ULL +#define MLT_NUM (16 * 3) + +/* FIXME comments + * ATTENTION!!! Any change should be sumited to debuger + */ + +struct user_regs_struct { + unsigned long long sizeof_struct; // interface with debuger + unsigned long long g[32]; + + unsigned long long psr; + unsigned long long upsr; + + unsigned long long oscud_lo; + unsigned long long oscud_hi; + unsigned long long osgd_lo; + unsigned long long osgd_hi; + unsigned long long osem; + unsigned long long osr0; + + unsigned long long pfpfr; + unsigned long long fpcr; + unsigned long long fpsr; + + unsigned long long usbr; + unsigned long long usd_lo; + unsigned long long usd_hi; + + unsigned long long psp_lo; + unsigned long long psp_hi; + unsigned long long pshtp; + + unsigned long long cr0_lo; + unsigned long long cr0_hi; + unsigned long long cr1_lo; + unsigned long long cr1_hi; + + unsigned long long cwd; + + unsigned long long pcsp_lo; + unsigned long long pcsp_hi; + unsigned long long pcshtp; + + unsigned long long cud_lo; + unsigned long long cud_hi; + unsigned long long gd_lo; + unsigned long long gd_hi; + + unsigned long long cs_lo; + unsigned long long cs_hi; + unsigned long long ds_lo; + unsigned long long ds_hi; + unsigned long long es_lo; + unsigned long long es_hi; + unsigned long long fs_lo; + unsigned long long fs_hi; + unsigned long long gs_lo; + unsigned long long gs_hi; + unsigned long long ss_lo; + unsigned long long ss_hi; + + unsigned long long aad[32*2]; /* %aad0.lo, %aad0.hi, %aad1.lo ... */ + unsigned long long aaind[16]; + unsigned long long aaincr[8]; + unsigned long long aaldi[64]; + unsigned long long aaldv; + unsigned long long aalda[64]; + unsigned long long aaldm; + unsigned long long aasr; + unsigned long long aafstr; + unsigned long long aasti[16]; + + unsigned long long clkr; + unsigned long long dibcr; + unsigned long long ddbcr; + unsigned long long dibar[4]; + unsigned long long ddbar[4]; + unsigned long long dimcr; + unsigned long long ddmcr; + unsigned long long dimar[2]; + unsigned long long ddmar[2]; + unsigned long long dibsr; + unsigned long long ddbsr; + unsigned long long dtcr; + unsigned long long dtarf; + unsigned long long dtart; + + unsigned long long wd; + + unsigned long long br; + unsigned long long bgr; + + unsigned long long ip; + unsigned long long nip; + unsigned long long ctpr1; + unsigned long long ctpr2; + unsigned long long ctpr3; + + unsigned long long eir; + + unsigned long long tr; /* unused */ + + unsigned long long cutd; + unsigned long long cuir; + unsigned long long tsd; /* unused */ + + unsigned long long lsr; + unsigned long long ilcr; + + long long sys_rval; + long long sys_num; + long long arg1; + long long arg2; + long long arg3; + long long arg4; + long long arg5; + long long arg6; + +/* + * Some space for backup/restore of extensions and tags of global registers. + * now places in the end of structure + */ + unsigned char gtag[32]; + unsigned short gext[32]; +/* + * additional part (for binary compiler) + */ + unsigned long long rpr_hi; + unsigned long long rpr_lo; + + unsigned long long tir_lo [TIR_NUM]; + unsigned long long tir_hi [TIR_NUM]; + + unsigned long long trap_cell_addr [MAX_TC_SIZE]; + unsigned long long trap_cell_val [MAX_TC_SIZE]; + unsigned char trap_cell_tag [MAX_TC_SIZE]; + unsigned long long trap_cell_info [MAX_TC_SIZE]; + + unsigned long long dam [DAM_ENTRIES_NUM]; + + unsigned long long sbbp [SBBP_ENTRIES_NUM]; + + unsigned long long mlt [MLT_NUM]; + +/* + * CPU info + */ + unsigned long long idr; + unsigned long long core_mode; + +/* + * iset v5 additions + */ + unsigned long long lsr1; + unsigned long long ilcr1; + + unsigned long long gext_v5[32]; + unsigned char gext_tag_v5[32]; +/* + * Not actual registers, but still useful information + */ + unsigned long long chain_stack_base; + unsigned long long proc_stack_base; + +/* + * iset v6 additions + */ + unsigned long long dimtp_lo; + unsigned long long dimtp_hi; + unsigned long long ctpr1_hi; + unsigned long long ctpr2_hi; + unsigned long long ctpr3_hi; +/* + * Please, include new fields below + */ +}; + +struct user_pt_regs { + /* empty */ +}; + +struct user{ +/* + * We start with the registers, to mimic the way that "memory" is returned + * from the ptrace(3,...) function. + */ + struct user_regs_struct regs; /* Where the registers */ + /* are actually stored */ + +/* The rest of this junk is to help gdb figure out what goes where */ + + unsigned long int u_tsize; /* Text segment size (pages). */ + unsigned long int u_dsize; /* Data segment size (pages). */ + unsigned long int u_ssize; /* Stack segment size (pages). */ + unsigned long start_code; /* text starting address */ + unsigned long start_data; /* data starting address */ + unsigned long start_stack; /* stack starting address */ + long int signal; /* Signal that caused the core dump. */ + int reserved; /* No longer used */ + struct user_pt_regs * u_ar0; /* Used by gdb to help find the */ + /* values for the registers. */ + unsigned long magic; /* To uniquely identify a core file */ + char u_comm[32]; /* User command that was responsible */ +}; + +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* _E2K_USER_H_ */ diff --git a/arch/e2k/include/asm/vga.h b/arch/e2k/include/asm/vga.h new file mode 100644 index 000000000000..7c0324f3aa3c --- /dev/null +++ b/arch/e2k/include/asm/vga.h @@ -0,0 +1,86 @@ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares + */ + +#ifndef _LINUX_ASM_VGA_H_ +#define _LINUX_ASM_VGA_H_ + +#include +#include + +/* + * On the PC, we can just recalculate addresses and then + * access the videoram directly without any black magic. + */ + +#define E2K_VGA_DIRECT_IOMEM + +#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) + +#define VGA_VRAM_PHYS_BASE 0x00000a0000UL /* VGA video RAM low memory */ +#define VGA_VRAM_SIZE 0x0000020000UL /* a0000 - c0000 */ + +#ifdef E2K_VGA_DIRECT_IOMEM + +#define native_scr_writew(val, addr) (*(addr) = (val)) +#define native_scr_readw(addr) (*(addr)) + +#define native_vga_readb(addr) (*(addr)) +#define native_vga_writeb(val, addr) (*(addr) = (val)) + +#else + +#define VT_BUF_HAVE_RW + +static inline void native_scr_writew(u16 val, volatile u16 *addr) +{ + native_writew(val, addr); +} +static inline u16 native_scr_readw(volatile const u16 *addr) +{ + return native_readw(addr); +} +static inline void native_vga_writeb(u8 val, volatile u8 *addr) +{ + native_writeb(val, addr); +} + +static inline u8 native_vga_readb(volatile const u8 *addr) +{ + return native_readb(addr); +} + +#endif /* E2K_VGA_DIRECT_IOMEM */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native host kernel with or whithout visrtualizaton */ + +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + native_scr_writew(val, addr); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return native_scr_readw(addr); +} +static inline void vga_writeb(u8 val, volatile u8 *addr) +{ + native_vga_writeb(val, addr); +} + +static inline u8 vga_readb(volatile const u8 *addr) +{ + return native_vga_readb(addr); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif diff --git a/arch/e2k/include/asm/vmlinux.lds.h b/arch/e2k/include/asm/vmlinux.lds.h new file mode 100644 index 000000000000..b6b1d96b5322 --- /dev/null +++ b/arch/e2k/include/asm/vmlinux.lds.h @@ -0,0 +1,14 @@ +#ifndef __ASM_E2K_VMLINUX_LDS_H +#define __ASM_E2K_VMLINUX_LDS_H + +#include + +#define E2K_BOOT_SETUP(bootsetup_align) \ + .boot.data : AT(ADDR(.boot.data) - LOAD_OFFSET) { \ + . = ALIGN(bootsetup_align); \ + __boot_setup_start = .; \ + *(.boot.setup) \ + __boot_setup_end = .; \ + } + +#endif /* __ASM_E2K_VMLINUX_LDS_H */ diff --git a/arch/e2k/include/asm/word-at-a-time.h b/arch/e2k/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..be22d42ba4cf --- /dev/null +++ b/arch/e2k/include/asm/word-at-a-time.h @@ -0,0 +1,50 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include + +/* Unused */ +struct word_at_a_time { }; +#define WORD_AT_A_TIME_CONSTANTS { } + +/* This will give us 0xff for a zero char and 0x00 elsewhere */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, + const struct word_at_a_time *c) +{ + *bits = __builtin_e2k_pcmpeqb(a, 0); + return *bits; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, + const struct word_at_a_time *c) +{ + return bits; +} + +/* This will give us 0xff until the first zero char (excluding it) */ +static inline unsigned long create_zero_mask(unsigned long bits) +{ + return (bits - 1) & ~bits; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return __builtin_e2k_popcntd(mask) >> 3; +} + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + return LOAD_UNALIGNED_ZEROPAD(addr); +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/e2k/include/uapi/asm/Kbuild b/arch/e2k/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..f66554cd5c45 --- /dev/null +++ b/arch/e2k/include/uapi/asm/Kbuild @@ -0,0 +1 @@ +# SPDX-License-Identifier: GPL-2.0 diff --git a/arch/e2k/include/uapi/asm/a.out.h b/arch/e2k/include/uapi/asm/a.out.h new file mode 100644 index 000000000000..15284b3c73ab --- /dev/null +++ b/arch/e2k/include/uapi/asm/a.out.h @@ -0,0 +1,35 @@ +#ifndef __E2K_A_OUT_H__ +#define __E2K_A_OUT_H__ + +#ifndef __ASSEMBLY__ + +struct exec { + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned int a_text; /* length of text, in bytes */ + unsigned int a_data; /* length of data, in bytes */ + + /* length of uninitialized data area for file, in bytes */ + unsigned int a_bss; + + /* length of symbol table data in file, in bytes */ + unsigned int a_syms; + unsigned int a_entry; /* start address */ + + /* length of relocation info for text, in bytes */ + unsigned int a_trsize; + + /* length of relocation info for data, in bytes */ + unsigned int a_drsize; +}; + +#endif /* __ASSEMBLY__ */ + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#endif + +#endif /* __E2K_A_OUT_H__ */ diff --git a/arch/e2k/include/uapi/asm/auxvec.h b/arch/e2k/include/uapi/asm/auxvec.h new file mode 100644 index 000000000000..777896c8f811 --- /dev/null +++ b/arch/e2k/include/uapi/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef _E2K_AUXVEC_H +#define _E2K_AUXVEC_H + +#endif /* _E2K_AUXVEC_H */ diff --git a/arch/e2k/include/uapi/asm/bitsperlong.h b/arch/e2k/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..0697e907d859 --- /dev/null +++ b/arch/e2k/include/uapi/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_E2K_BITSPERLONG_H +#define __ASM_E2K_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_E2K_BITSPERLONG_H */ diff --git a/arch/e2k/include/uapi/asm/bootinfo.h b/arch/e2k/include/uapi/asm/bootinfo.h new file mode 100644 index 000000000000..cddc2ee29656 --- /dev/null +++ b/arch/e2k/include/uapi/asm/bootinfo.h @@ -0,0 +1,211 @@ +#ifndef _UAPI_E2K_BOOTINFO_H_ +#define _UAPI_E2K_BOOTINFO_H_ + +/* + * The mother board types + */ + +#define MB_TYPE_MIN 0 +#define MB_TYPE_E2K_BASE 0x00 +#define MB_TYPE_ES2_BASE (MB_TYPE_E2K_BASE + 20) +#define MB_TYPE_E1CP_BASE (MB_TYPE_E2K_BASE + 50) +#define MB_TYPE_ES4_BASE (MB_TYPE_E2K_BASE + 70) +#define MB_TYPE_E8C_BASE (MB_TYPE_E2K_BASE + 80) +#define MB_TYPE_MAX a0 + +#define MB_TYPE_ES2_PLATO1 (MB_TYPE_ES2_BASE + 0) +#define MB_TYPE_ES2_BUTTERFLY (MB_TYPE_ES2_BASE + 1) +#define MB_TYPE_ES2_RTC_FM33256 (MB_TYPE_ES2_BASE + 2) /* FM332aa56 rtc */ +#define MB_TYPE_ES2_RTC_CY14B101P (MB_TYPE_ES2_BASE + 3) /* CY14B101P rtc */ +#define MB_TYPE_ES2_APORIA (MB_TYPE_ES2_BASE + 5) /* APORIA */ +#define MB_TYPE_ES2_NT (MB_TYPE_ES2_BASE + 6) /* Nosimyi terminal */ +/* Use this when CLKRs are not synchronized across the system */ +#define MB_TYPE_ES2_RTC_CY14B101P_MULTICLOCK (MB_TYPE_ES2_BASE + 7) +#define MB_TYPE_ES2_CUB_COM (MB_TYPE_ES2_BASE + 8) +#define MB_TYPE_ES2_MBCUB_C (MB_TYPE_ES2_BASE + 11) +#define MB_TYPE_ES2_MB3S1_C (MB_TYPE_ES2_BUTTERFLY) +#define MB_TYPE_ES2_MB3S_C_K (MB_TYPE_ES2_BASE + 14) +#define MB_TYPE_ES2_MGA3D (MB_TYPE_ES2_BASE + 15) +#define MB_TYPE_ES2_BC_M4211 (MB_TYPE_ES2_BASE + 16) +#define MB_TYPE_ES2_EL2S4 (MB_TYPE_ES2_BASE + 17) +/* By default all mb_versions > MB_TYPE_ES2_EL2S4 + * have cy14b101p rt clock. If no - fix is_cy14b101p_exist() + * in arch/l/kernel/i2c-spi/core.c + */ + +#define MB_TYPE_E1CP_PMC (MB_TYPE_E1CP_BASE + 0) /* E1CP with PMC */ +#define MB_TYPE_E1CP_IOHUB2_RAZBRAKOVSCHIK /* IOHUB2 razbrakovschik */ \ + (MB_TYPE_E1CP_BASE + 1) +#define MB_TYPE_MBE1C_PC (MB_TYPE_E1CP_BASE + 2) /* E1CP with PMC */ + +#define MB_TYPE_ES4_MBE2S_PC (MB_TYPE_ES4_BASE + 0) +#define MB_TYPE_ES4_PC401 (MB_TYPE_ES4_BASE + 1) + +#define MB_TYPE_E8C (MB_TYPE_E8C_BASE + 0) + + +/* + * The cpu types + */ + +#define CPU_TYPE_E2S 0x03 /* E2S */ +#define CPU_TYPE_ES2_DSP 0x04 /* E2C+ */ +#define CPU_TYPE_ES2_RU 0x06 /* E2C Micron */ +#define CPU_TYPE_E8C 0x07 /* E8C */ +#define CPU_TYPE_E1CP 0x08 /* E1C+ */ +#define CPU_TYPE_E8C2 0x09 /* E8C */ +#define CPU_TYPE_E12C 0xa /* E12C */ +#define CPU_TYPE_E16C 0xb /* E16C */ +#define CPU_TYPE_E2C3 0xc /* E2C3 */ + +#define CPU_TYPE_SIMUL 0x3e /* simulator */ + +#define CPU_TYPE_MASK 0x3f /* mask of CPU type */ +#define PROC_TYPE_MASK 0xc0 /* mask of MicroProcessor type */ + +#define GET_CPU_TYPE(type) (((type) & CPU_TYPE_MASK) >> 0) + +/* + * The cpu types names + */ + +#define GET_CPU_TYPE_NAME(type_field) \ +({ \ + unsigned char type = GET_CPU_TYPE(type_field); \ + char *name; \ + \ + switch (type) { \ + case CPU_TYPE_E2S: \ + name = "E2S"; \ + break; \ + case CPU_TYPE_ES2_DSP: \ + name = "E2C+DSP"; \ + break; \ + case CPU_TYPE_ES2_RU: \ + name = "E1C"; \ + break; \ + case CPU_TYPE_E8C: \ + name = "E8C"; \ + break; \ + case CPU_TYPE_E1CP: \ + name = "E1C+"; \ + break; \ + case CPU_TYPE_E8C2: \ + name = "E8C2"; \ + break; \ + case CPU_TYPE_E12C: \ + name = "E12C"; \ + break; \ + case CPU_TYPE_E16C: \ + name = "E16C"; \ + break; \ + case CPU_TYPE_E2C3: \ + name = "E2C3"; \ + break; \ + case CPU_TYPE_SIMUL: \ + name = "SIMUL"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + \ + name; \ +}) + +/* + * The mother board types names + */ + +#define GET_MB_TYPE_NAME(type) \ +({ \ + char *name; \ + \ + switch (type) { \ + case MB_TYPE_ES2_MB3S1_C: \ + name = "MB3S1/C"; \ + break; \ + case MB_TYPE_ES2_MBCUB_C: \ + case MB_TYPE_ES2_PLATO1: \ + name = "MBKUB/C"; \ + break; \ + case MB_TYPE_ES2_MB3S_C_K: \ + name = "MB3S/C-K"; \ + break; \ + case MB_TYPE_ES2_NT: \ + name = "NT-ELBRUS-S"; \ + break; \ + case MB_TYPE_ES2_CUB_COM: \ + name = "CUB-COM"; \ + break; \ + case MB_TYPE_ES2_RTC_FM33256: \ + name = "MONOCUB+FM33256"; \ + break; \ + case MB_TYPE_ES2_RTC_CY14B101P: \ + name = "MONOCUB"; \ + break; \ + case MB_TYPE_ES2_RTC_CY14B101P_MULTICLOCK: \ + name = "MP1C1/V"; \ + break; \ + case MB_TYPE_ES2_EL2S4: \ + name = "EL2S4"; \ + break; \ + case MB_TYPE_ES2_MGA3D: \ + name = "MGA3D"; \ + break; \ + case MB_TYPE_ES2_BC_M4211: \ + name = "BC-M4211"; \ + break; \ + case MB_TYPE_E1CP_PMC: \ + name = "E1C+ PMC"; \ + break; \ + case MB_TYPE_E1CP_IOHUB2_RAZBRAKOVSCHIK: \ + name = "IOHUB2 razbrakovschik"; \ + break; \ + case MB_TYPE_MBE1C_PC: \ + name = "MBE1C-PC"; \ + break; \ + case MB_TYPE_ES4_MBE2S_PC: \ + name = "MBE2S-PC"; \ + break; \ + case MB_TYPE_ES4_PC401: \ + name = "PC-401"; \ + break; \ + case MB_TYPE_E8C: \ + name = "E8C"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + \ + name; \ +}) + +#define GET_MB_USED_IN(type) \ +({ \ + char *name; \ + \ + switch (type) { \ + case MB_TYPE_ES2_PLATO1: \ + name = "Plato with softreset error"; \ + break; \ + case MB_TYPE_ES2_MBCUB_C: \ + name = "APM VK-2, APM VK-120, BV632, BV631"; \ + break; \ + case MB_TYPE_ES2_MB3S1_C: \ + name = "ELBRUS-3C-CVS, ELBRUS-3C"; \ + break; \ + case MB_TYPE_ES2_RTC_FM33256: \ + name = "MONOCUB+FM33256"; \ + break; \ + case MB_TYPE_ES2_RTC_CY14B101P: \ + name = "MONOCUB-M, MONOCUB-PC"; \ + break; \ + default: \ + name = NULL; \ + } \ + \ + name; \ +}) + + +#endif /* _UAPI_E2K_BOOTINFO_H_ */ diff --git a/arch/e2k/include/uapi/asm/byteorder.h b/arch/e2k/include/uapi/asm/byteorder.h new file mode 100644 index 000000000000..77bdb7990745 --- /dev/null +++ b/arch/e2k/include/uapi/asm/byteorder.h @@ -0,0 +1,10 @@ +#ifndef _E2K_BYTEORDER_H_ +#define _E2K_BYTEORDER_H_ + +#include + +#define __BYTEORDER_HAS_U64__ + +#include + +#endif /* _E2K_BYTEORDER_H_ */ diff --git a/arch/e2k/include/uapi/asm/e2k_api.h b/arch/e2k/include/uapi/asm/e2k_api.h new file mode 100644 index 000000000000..7da16194e227 --- /dev/null +++ b/arch/e2k/include/uapi/asm/e2k_api.h @@ -0,0 +1,353 @@ +#ifndef _UAPI_E2K_API_H_ +#define _UAPI_E2K_API_H_ + + +#ifndef __ASSEMBLY__ +typedef unsigned char __e2k_u8_t; +typedef unsigned short int __e2k_u16_t; +typedef unsigned int __e2k_u32_t; +typedef unsigned long long __e2k_u64_t; +typedef void *__e2k_ptr_t; +#endif /* __ASSEMBLY__ */ + +#ifndef __KERNEL__ + +#define E2K_SET_REG(reg_no, val) \ +({ \ + asm volatile ("adds \t0x0, %0, %%r" #reg_no \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define E2K_SET_DREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%dr" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#define E2K_SET_DGREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%dg" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) +#define E2K_SET_DGREG_NV(reg_no, val) \ +({ \ + asm ("addd \t%0, 0, %%dg" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + + +#define E2K_GET_BREG(reg_no) \ +({ \ + register __e2k_u32_t res; \ + asm volatile ("adds \t0x0, %%b[" #reg_no "], %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_GET_DBREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd \t0x0, %%db[" #reg_no "], %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_BREG(reg_no, val) \ +({ \ + asm volatile ("adds \t0x0, %0, %%b[" #reg_no "]" \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define E2K_SET_DBREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%db[" #reg_no "]" \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#define E2K_GET_SREG(reg_mnemonic) \ +({ \ + register __e2k_u32_t res; \ + asm volatile ("rrs \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_GET_DSREG(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("rrd \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_SREG(reg_mnemonic, val) \ +({ \ + asm volatile ("rws \t%0, %%" #reg_mnemonic \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define E2K_SET_DSREG(reg_mnemonic, val) \ +({ \ + asm volatile ("rwd \t%0, %%" #reg_mnemonic \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#endif /* __KERNEL__ */ + +#ifndef __ASSEMBLY__ + +typedef unsigned long __e2k_syscall_arg_t; + +#define E2K_SYSCALL_CLOBBERS \ + "ctpr1", "ctpr2", "ctpr3", \ + "b[0]", "b[1]", "b[2]", "b[3]", \ + "b[4]", "b[5]", "b[6]", "b[7]" + +/* Transaction operation transaction of argument type + * __e2k_syscall_arg_t */ +#ifdef __ptr64__ +#define __E2K_SYSCAL_ARG_ADD "addd,s" +#else +#define __E2K_SYSCAL_ARG_ADD "adds,s" +#endif + +#define __E2K_SYSCALL_0(_trap, _sys_num, _arg1) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_1(_trap, _sys_num, _arg1) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_2(_trap, _sys_num, _arg1, _arg2) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_3(_trap, _sys_num, _arg1, _arg2, _arg3) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_4(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_5(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4, _arg5) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_6(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_7(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6, _arg7) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg7], %%b[7]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)), \ + [arg7] "ri" ((__e2k_syscall_arg_t) (_arg7)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define E2K_SYSCALL(trap, sys_num, num_args, args...) \ + __E2K_SYSCALL_##num_args(trap, sys_num, args) + +#define ASM_CALL_8_ARGS(func_name_to_call, _arg0, _arg1, _arg2, _arg3, \ + _arg4, _arg5, _arg6, _arg7) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ( \ + "{\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg0], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + "disp %%ctpr1, " #func_name_to_call "\n\t" \ + "}\n\t" \ + "{\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg7], %%b[7]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]\n\t" \ + : \ + [res] "=r" (__res) \ + : \ + [arg0] "ri" ((__e2k_syscall_arg_t) (_arg0)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)), \ + [arg7] "ri" ((__e2k_syscall_arg_t) (_arg7)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#endif /* !__ASSEMBLY__ */ + + +#endif /* _UAPI_E2K_API_H_ */ diff --git a/arch/e2k/include/uapi/asm/e2k_syswork.h b/arch/e2k/include/uapi/asm/e2k_syswork.h new file mode 100644 index 000000000000..00bb50a03f44 --- /dev/null +++ b/arch/e2k/include/uapi/asm/e2k_syswork.h @@ -0,0 +1,96 @@ +#ifndef _UAPI_E2K_SYSWORK_H_ +#define _UAPI_E2K_SYSWORK_H_ + +#include +#include + +/* + * IDE info + */ +#define ALL_IDE 1 +#define USING_DMA 2 + +/* + * works for e2k_syswork + */ +#define PRINT_MMAP 1 +#define PRINT_STACK 2 +#define PRINT_TASKS 3 +#define GET_ADDR_PROT 4 +#define PRINT_REGS 6 +#define PRINT_ALL_MMAP 7 +#define FLUSH_CMD_CACHES 8 +#define SET_TAGS 17 +#define CHECK_TAGS 18 +#define IDE_INFO 20 +#define INSTR_EXEC 21 +#define IREQ_SET_TO_CPU 22 +#define PRINT_T_TRUSS 23 +#define START_CLI_INFO 24 +#define PRINT_CLI_INFO 25 +#define SYS_MKNOD 26 +#define SET_DBG_MODE 27 +#define E2K_SC_RESTART 28 +#define PRINT_PIDS 29 +#define ADD_END_OF_WORK 30 +#define GET_END_OF_WORK 31 +#define START_OF_WORK 32 +#define DO_E2K_HALT 33 +#ifdef CONFIG_PROC_FS +#define PRINT_STATM 35 +#endif +#define READ_ECMOS 36 +#define WRITE_ECMOS 37 +#define KERNEL_TRACE_BEGIN 38 +#define KERNEL_TRACE_END 39 +#define PRINT_INTERRUPT_INFO 40 +#define CLEAR_INTERRUPT_INFO 41 +#define STOP_INTERRUPT_INFO 42 +#define START_TRACE_EVENT 45 +#define STOP_TRACE_EVENT 46 +#define START_TRACE_LAST_EVENT 48 +#define STOP_TRACE_LAST_EVENT 49 +#define READ_BOOT 53 +#define WRITE_BOOT 54 +#define READ_PTRACE_REGS 55 +#define WRITE_PTRACE_REGS 56 +#define GET_CONTEXT 57 +#define FAST_RETURN 58 /* Using to estimate time needed */ + /* for entering to OS */ +#define TEST_OVERFLOW 59 /* To test kernel procedure/chain */ + /* stack overflow */ +#define E2K_ACCESS_VM 60 /* Read/write current procedure */ + /* stack */ +#define KERNEL_FTRACE 61 +#define USER_CONTROL_INTERRUPT 62 /* user can control all interrupts */ + /* (for degugging hardware) */ + + +/* modes for sys_access_hw_stacks */ +enum { + E2K_READ_CHAIN_STACK, + E2K_READ_PROCEDURE_STACK, + E2K_WRITE_PROCEDURE_STACK, + E2K_GET_CHAIN_STACK_OFFSET, + E2K_GET_CHAIN_STACK_SIZE, + E2K_GET_PROCEDURE_STACK_SIZE, + E2K_READ_CHAIN_STACK_EX, + E2K_READ_PROCEDURE_STACK_EX, + E2K_WRITE_PROCEDURE_STACK_EX, + E2K_WRITE_CHAIN_STACK_EX, +}; + +typedef struct icache_range { + unsigned long long start; + unsigned long long end; +} icache_range_t; + +#define e2k_syswork(arg1, arg2, arg3) \ +({ \ + long __res; \ + __res = E2K_SYSCALL(LINUX_SYSCALL_TRAPNUM, __NR_e2k_syswork, 3, \ + arg1, arg2, arg3); \ + (int)__res; \ +}) + +#endif /* _UAPI_E2K_SYSWORK_H_ */ diff --git a/arch/e2k/include/uapi/asm/errno.h b/arch/e2k/include/uapi/asm/errno.h new file mode 100644 index 000000000000..969b34374728 --- /dev/null +++ b/arch/e2k/include/uapi/asm/errno.h @@ -0,0 +1,6 @@ +#ifndef _I386_ERRNO_H +#define _I386_ERRNO_H + +#include + +#endif diff --git a/arch/e2k/include/uapi/asm/fcntl.h b/arch/e2k/include/uapi/asm/fcntl.h new file mode 100644 index 000000000000..27fa498f5288 --- /dev/null +++ b/arch/e2k/include/uapi/asm/fcntl.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/arch/e2k/include/uapi/asm/ioctl.h b/arch/e2k/include/uapi/asm/ioctl.h new file mode 100644 index 000000000000..b279fe06dfe5 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ioctl.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/ioctls.h b/arch/e2k/include/uapi/asm/ioctls.h new file mode 100644 index 000000000000..dd1229114d10 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ioctls.h @@ -0,0 +1,17 @@ +#ifndef _E2K_IOCTLS_H_ +#define _E2K_IOCTLS_H_ + +/* + * We are too far from real ioctl handling and it is difficult to predict + * any errors now. So I accept i386(ia64) ioctl's stuff as the basis. + */ + + +#include +#include + +#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define TIOCGDB 0x547F /* enable GDB stub mode on this tty */ + +#endif /* _E2K_IOCTLS_H_ */ diff --git a/arch/e2k/include/uapi/asm/ipcbuf.h b/arch/e2k/include/uapi/asm/ipcbuf.h new file mode 100644 index 000000000000..61689ce5eb3a --- /dev/null +++ b/arch/e2k/include/uapi/asm/ipcbuf.h @@ -0,0 +1,27 @@ +#ifndef _E2K_IPCBUF_H_ +#define _E2K_IPCBUF_H_ + +/* + * The ipc64_perm structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit seq + * - 2 miscellaneous 64-bit values + */ + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; + unsigned short __pad1; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_IPCBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/iset_ver.h b/arch/e2k/include/uapi/asm/iset_ver.h new file mode 100644 index 000000000000..e288fcd16785 --- /dev/null +++ b/arch/e2k/include/uapi/asm/iset_ver.h @@ -0,0 +1,55 @@ +#ifndef _E2K_UAPI_ISET_VER_H_ +#define _E2K_UAPI_ISET_VER_H_ + +#ifndef __ASSEMBLY__ + +/* + * IMPORTANT: instruction sets are numbered in increasing order, + * each next iset being backwards compatible with all the + * previous ones. + */ +typedef enum e2k_iset_ver { + E2K_ISET_GENERIC, + E2K_ISET_V2 = 2, + E2K_ISET_V3 = 3, + E2K_ISET_V4 = 4, + E2K_ISET_V5 = 5, + E2K_ISET_V6 = 6, +} e2k_iset_ver_t; + +#define E2K_ISET_V2_MASK (1 << E2K_ISET_V2) +#define E2K_ISET_V3_MASK (1 << E2K_ISET_V3) +#define E2K_ISET_V4_MASK (1 << E2K_ISET_V4) +#define E2K_ISET_V5_MASK (1 << E2K_ISET_V5) +#define E2K_ISET_V6_MASK (1 << E2K_ISET_V6) + +#define E2K_ISET_SINCE_V2_MASK (-1) +#define E2K_ISET_SINCE_V3_MASK (E2K_ISET_SINCE_V2_MASK & ~E2K_ISET_V2_MASK) +#define E2K_ISET_SINCE_V4_MASK (E2K_ISET_SINCE_V3_MASK & ~E2K_ISET_V3_MASK) +#define E2K_ISET_SINCE_V5_MASK (E2K_ISET_SINCE_V4_MASK & ~E2K_ISET_V4_MASK) +#define E2K_ISET_SINCE_V6_MASK (E2K_ISET_SINCE_V5_MASK & ~E2K_ISET_V5_MASK) + +enum { + /* generic e2k iset */ + ELBRUS_GENERIC_ISET = E2K_ISET_GENERIC, + /* Cubic, Turmalin */ + ELBRUS_S_ISET = E2K_ISET_V2, + /* E2S (E4C) */ + ELBRUS_2S_ISET = E2K_ISET_V3, + /* E8C */ + ELBRUS_8C_ISET = E2K_ISET_V4, + /* E1C+ */ + ELBRUS_1CP_ISET = E2K_ISET_V4, + /* E8C2 */ + ELBRUS_8C2_ISET = E2K_ISET_V5, + /* E12C */ + ELBRUS_12C_ISET = E2K_ISET_V6, + /* E16C */ + ELBRUS_16C_ISET = E2K_ISET_V6, + /* E2C3 */ + ELBRUS_2C3_ISET = E2K_ISET_V6, +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* !_E2K_UAPI_ISET_VER_H_ */ diff --git a/arch/e2k/include/uapi/asm/kexec.h b/arch/e2k/include/uapi/asm/kexec.h new file mode 100644 index 000000000000..9f80db8c1a6d --- /dev/null +++ b/arch/e2k/include/uapi/asm/kexec.h @@ -0,0 +1,26 @@ +#ifndef _UAPI_E2K_KEXEC_H_ +#define _UAPI_E2K_KEXEC_H_ + +#include +#include + +#define E2K_KEXEC_IOCTL_BASE 'E' + +struct kexec_reboot_param { + char *cmdline; + int cmdline_size; + void *image; + u64 image_size; + void *initrd; + u64 initrd_size; +}; + +struct lintel_reboot_param { + void *image; + u64 image_size; +}; + +#define KEXEC_REBOOT _IOR(E2K_KEXEC_IOCTL_BASE, 0, struct kexec_reboot_param) +#define LINTEL_REBOOT _IOR(E2K_KEXEC_IOCTL_BASE, 0, struct lintel_reboot_param) + +#endif diff --git a/arch/e2k/include/uapi/asm/kvm.h b/arch/e2k/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..2147e9afee1a --- /dev/null +++ b/arch/e2k/include/uapi/asm/kvm.h @@ -0,0 +1,488 @@ +#ifndef _UAPI_ASM_E2K_KVM_H +#define _UAPI_ASM_E2K_KVM_H + +/* + * KVM e2k specific structures and definitions + * + */ + +#ifndef __ASSEMBLY__ + +#include +#include + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +/* Select e2k specific features in */ +#define __KVM_HAVE_IOAPIC +#define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_PIT +#define __KVM_HAVE_DEVICE_ASSIGNMENT +#define __KVM_HAVE_USER_NMI +#define __KVM_HAVE_GUEST_DEBUG +#define __KVM_HAVE_MSIX +#define __KVM_HAVE_MCE +#define __KVM_HAVE_VCPU_EVENTS + +/* KVM (for /dev/kvm fds) capabilities (especially for e2k arch) */ +/* number of CAPs is advisedly very big to don't intersect with other arch'es */ +/* Other arch'es CAPs can be amplified and appended in future */ +/* The better place for these defines should be at arch-indep header */ +/* include/uapi/linux/kvm.h as for all other arch'es */ +#define KVM_CAP_E2K_SV_VM 300 /* paravirtualized guest without any */ + /* hardware support */ +#define KVM_CAP_E2K_SW_PV_VM 301 /* paravirtualized kernel without any */ + /* hardware support and can be run */ + /* as host (hypervisor) and as guest */ + /* especial case to debug purposes */ +#define KVM_CAP_E2K_HV_VM 302 /* fully virtualized guest machines */ + /* using hardware extensions */ +#define KVM_CAP_E2K_HW_PV_VM 303 /* paravirtualized guest machines */ + /* using hardware extensions */ + +/* Flags are bits 63:32 of KVM_CREATE_VM argument */ +#define KVM_E2K_VM_TYPE_MASK 0x00000000ffffffffULL +#define KVM_E2K_VM_FLAG_MASK 0xffffffff00000000ULL +/* VM types, to be used as argument to KVM_CREATE_VM */ +#define KVM_E2K_SV_VM_TYPE 0 /* software virtualized guest without */ + /* any hardware support */ + /* now it is default VM type because */ + /* of can be available for all CPUs */ +#define KVM_E2K_SW_PV_VM_TYPE 1 /* paravirtualized kernel without any */ + /* hardware support and can be run */ + /* as host (hypervisor) and as guest */ + /* especial case to debug purposes */ +#define KVM_E2K_HV_VM_TYPE 2 /* fully virtualized guest machines */ + /* using hardware extensions */ +#define KVM_E2K_HW_PV_VM_TYPE 3 /* paravirtualized guest machines */ + /* using hardware extensions */ + +#define KVM_E2K_EPIC_VM_FLAG 0x100000000ULL /* choose between paravirt */ + /* APIC and EPIC models */ + +/* KVM MMU capabilities */ +#define KVM_CAP_E2K_SHADOW_PT_MMU 310 /* is shadow PT enabled */ +#define KVM_CAP_E2K_TDP_MMU 311 /* is Two Dimensial Paging */ + /* mode enabled */ + +/* Architectural interrupt line count. */ +#define KVM_NR_INTERRUPTS 256 + +typedef struct kvm_memory_alias { + __u32 slot; /* this has a different namespace than memory slots */ + __u32 flags; + __u64 guest_alias_addr; + __u64 memory_size; + __u64 target_addr; +} kvm_memory_alias_t; + +/* arch e2k additions flags for kvm_memory_region::flags */ +#define KVM_MEM_ADD_TYPE 0x0010 /* region should be added with */ + /* type of memory */ +#define KVM_MEM_VCPU_RAM 0x0020 /* memory region is common RAM */ +#define KVM_MEM_VCPU_VRAM 0x0040 /* memory region is virtual */ + /* registers emulation memory */ +#define KVM_MEM_IO_VRAM 0x0080 /* memory region is virtual IO memory */ + /* to emulate ISA, VGA VRAM (low) */ +#define KVM_MEM_USER_RAM 0x0100 /* memory is mapped to user space of */ + /* host application */ + /* (in our case QEMU) */ +#define KVM_MEM_TYPE_MASK (KVM_MEM_VCPU_RAM | KVM_MEM_VCPU_VRAM | \ + KVM_MEM_IO_VRAM | KVM_MEM_USER_RAM) + +typedef enum { + guest_ram_mem_type = 0x01, /* memory is common RAM (low & high) */ + guest_vram_mem_type = 0x02, /* memory is virtual registers */ + /* memory (VCPU, VMMU ... emulation) */ + guest_io_vram_mem_type = 0x03, /* memory is virtual IO memory */ + /* to emulate ISA, VGA-VRAM (low) */ + guest_user_ram_mem_type = 0x04, /* memory is mapped to user space of */ + /* host application (QEMU) */ +} kvm_guest_mem_type_t; + +#define KVM_MAX_VRAM_AREAS 4 /* max number of VRAM areas */ + +/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ +struct kvm_pic_state { + __u8 last_irr; /* edge detection */ + __u8 irr; /* interrupt request register */ + __u8 imr; /* interrupt mask register */ + __u8 isr; /* interrupt service register */ + __u8 priority_add; /* highest irq priority */ + __u8 irq_base; + __u8 read_reg_select; + __u8 poll; + __u8 special_mask; + __u8 init_state; + __u8 auto_eoi; + __u8 rotate_on_auto_eoi; + __u8 special_fully_nested_mode; + __u8 init4; /* true if 4 byte init */ + __u8 elcr; /* PIIX edge/trigger selection */ + __u8 elcr_mask; +}; + +typedef struct kvm_kernel_area_shadow { + __u32 slot; /* this has a different namespace than memory and */ + /* alias slots */ + __u32 flags; + __u64 kernel_addr; /* host kernel area base address */ + __u64 area_size; + __u64 guest_shadow_addr; /* guest kernel base address */ + /* should be alias of memory region */ +} kvm_kernel_area_shadow_t; + + +#define KVM_IOAPIC_NUM_PINS 24 + +struct kvm_ioapic_state { + __u64 base_address; + __u32 ioregsel; + __u32 id; + __u32 irr; + __u32 pad; + union { + __u64 bits; + struct { + __u8 vector; + __u8 delivery_mode:3; + __u8 dest_mode:1; + __u8 delivery_status:1; + __u8 polarity:1; + __u8 remote_irr:1; + __u8 trig_mode:1; + __u8 mask:1; + __u8 reserve:7; + __u8 reserved[4]; + __u8 dest_id; + } fields; + } redirtbl[KVM_IOAPIC_NUM_PINS]; + __u32 node_id; +}; + +#define KVM_IOEPIC_NUM_PINS 64 + +#define KVM_IRQCHIP_PIC_MASTER 0 +#define KVM_IRQCHIP_PIC_SLAVE 1 +#define KVM_IRQCHIP_IOAPIC 2 +#define KVM_IRQCHIP_IOEPIC_NODE0 3 +#define KVM_IRQCHIP_IOEPIC_NODE1 4 +#define KVM_IRQCHIP_IOEPIC_NODE2 5 +#define KVM_IRQCHIP_IOEPIC_NODE3 6 +#define KVM_NR_IRQCHIPS 7 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 upsr; +}; + +/* for KVM_GET_LAPIC and KVM_SET_LAPIC */ +#define KVM_APIC_REG_SIZE 0x400 +struct kvm_lapic_state { + char regs[KVM_APIC_REG_SIZE]; +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { + /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ + __u64 USD_lo, USD_hi; + __u64 PSP_lo, PSP_hi; + __u64 PCSP_lo, PCSP_hi; + __u64 apic_base; + __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +struct kvm_debug_exit_arch { + __u32 exception; + __u32 pad; + __u64 pc; + __u64 dr6; + __u64 dr7; +}; + +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 +#define KVM_GUESTDBG_USE_HW_BP 0x00020000 +#define KVM_GUESTDBG_INJECT_DB 0x00040000 +#define KVM_GUESTDBG_INJECT_BP 0x00080000 + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { + __u64 debugreg[8]; +}; + +/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ +#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 +#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 + +/* for KVM_GET/SET_VCPU_EVENTS */ +struct kvm_vcpu_events { + struct { + __u8 injected; + __u8 nr; + __u8 has_error_code; + __u8 pad; + __u32 error_code; + } exception; + struct { + __u8 injected; + __u8 nr; + __u8 soft; + __u8 pad; + } interrupt; + struct { + __u8 injected; + __u8 pending; + __u8 masked; + __u8 pad; + } nmi; + __u32 sipi_vector; + __u32 flags; + __u32 reserved[10]; +}; + +/* for KVM_GET_PIT and KVM_SET_PIT */ +struct kvm_pit_channel_state { + __u32 count; /* can be 65536 */ + __u16 latched_count; + __u8 count_latched; + __u8 status_latched; + __u8 status; + __u8 read_state; + __u8 write_state; + __u8 write_latch; + __u8 rw_mode; + __u8 mode; + __u8 bcd; + __u8 gate; + __s64 count_load_time; +}; + +struct kvm_pit_state { + struct kvm_pit_channel_state channels[3]; +}; +#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001 +struct kvm_pit_state2 { + struct kvm_pit_channel_state channels[3]; + __u32 flags; + __u32 reserved[9]; +}; + +struct kvm_reinject_control { + __u8 pit_reinject; + __u8 reserved[31]; +}; +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* + * e2k arch specific kvm dev/vm/vcpu ioctl's + */ + +/* + * Guest machine info + */ +typedef struct kvm_guest_info { + int cpu_mdl; /* guest CPU model (as at IDR) */ + int cpu_rev; /* guest CPU revision (as at IDR) */ + int cpu_iset; /* guest CPU instruction set version */ + _Bool is_stranger; /* guest is stranger type */ + /* of CPU/machine */ + _Bool mmu_support_pt_v6; /* guest MMU support new MMU Page */ + /* Tables structures V6 */ + _Bool is_pv; /* guest is paravirtualized */ + /* and should not be run as bare */ + unsigned long features; /* guest features */ + /* see details */ +} kvm_guest_info_t; + +typedef struct kvm_guest_area_alloc { + void *region; /* guest memory region to allocate area */ + /* or NULL if any region */ + void *area; /* allocated area - result of ioctl() */ + unsigned long start; /* start address to allocate */ + /* or 0 if any address */ + unsigned long size; /* area size (bytes) */ + unsigned long align; /* the area beginning align */ + unsigned long flags; /* allocation flags and modes */ + kvm_guest_mem_type_t type; /* type of memory: RAM, VRAM */ +} kvm_guest_area_alloc_t; + +typedef struct kvm_guest_area_reserve { + unsigned long start; /* start address to reserve */ + unsigned long size; /* area size (bytes) */ + kvm_guest_mem_type_t type; /* type of memory: RAM, VRAM */ +} kvm_guest_area_reserve_t; + +/* guest area allocation flags */ +#define KVM_ALLOC_AREA_PRESENT 0x00000001ULL +#define KVM_ALLOC_AREA_LOCKED 0x00000002ULL +#define KVM_ALLOC_AREA_ZEROED 0x00000004ULL +/* protections as part of flags */ +#define KVM_ALLOC_AREA_PROT_READ 0x00010000ULL +#define KVM_ALLOC_AREA_PROT_WRITE 0x00020000ULL +#define KVM_ALLOC_AREA_PROT_EXEC 0x00040000ULL +/* some additional features */ +#define KVM_ALLOC_AREA_HUGE 0x00100000ULL /* prefered mapping */ + /* to huge pages */ +#define KVM_ALLOC_AREA_MAP_FLAGS \ + (KVM_ALLOC_AREA_PROT_READ | KVM_ALLOC_AREA_PROT_WRITE | \ + KVM_ALLOC_AREA_PROT_EXEC | \ + KVM_ALLOC_AREA_HUGE) + +/* guest addresses map */ +#define KVM_GUEST_PAGE_OFFSET 0x00000010 +#define KVM_GUEST_KERNEL_IMAGE_BASE 0x00000020 +#define KVM_GUEST_VCPU_VRAM_PHYS_BASE 0x00000040 +#define KVM_GUEST_VCPU_VRAM_VIRT_BASE 0x00000080 +#define KVM_GUEST_VCPU_VRAM_SIZE 0x00000100 +#define KVM_GUEST_IO_VRAM_PHYS_BASE 0x00000200 +#define KVM_GUEST_IO_VRAM_VIRT_BASE 0x00000400 +#define KVM_GUEST_IO_VRAM_SIZE 0x00000800 +#define KVM_HOST_PAGE_OFFSET 0x00001000 +#define KVM_HOST_KERNEL_IMAGE_BASE 0x00002000 +#define KVM_KERNEL_AREAS_SIZE 0x00004000 +#define KVM_SHADOW_KERNEL_IMAGE_BASE 0x00008000 +#define KVM_GUEST_IO_PORTS_BASE 0x00010000 +#define KVM_GUEST_NBSR_BASE_NODE_0 0x00020000 +#define KVM_GUEST_NBSR_BASE_NODE_1 0x00040000 +#define KVM_GUEST_NBSR_BASE_NODE_2 0x00080000 +#define KVM_GUEST_NBSR_BASE_NODE_3 0x00100000 + +/* flags of IO ports area mapping for guest */ +#define KVM_IO_PORTS_MMAP 0x1ff00000000 /* > max physical memory */ + +#define KVM_VCPU_MAX_GUEST_ARGS 4 + +typedef struct kvm_vcpu_guest_startup { + char *kernel_base; /* base address of guest kernel image */ + long kernel_size; /* guest kernel image size */ + char *entry_point; /* entry point to startup guest image */ + int args_num; /* number of additional arguments to pass */ + /* to guest image */ + unsigned long args[KVM_VCPU_MAX_GUEST_ARGS]; /* arguments */ + unsigned long flags; /* flags specifying guest properties */ + /* see details below */ + unsigned long trap_off; /* trap entry (ttable #0) offset from guest */ + /* kernel image base */ +} kvm_vcpu_guest_startup_t; + +typedef enum kvm_pci_region_type { + kvm_pci_undef_region_type, + kvm_pci_io_type, /* PCI IO ports region */ + kvm_pci_mem_type, /* PCI memory region */ + kvm_pci_pref_mem_type, /* PCI prefetchable memory region */ +} kvm_pci_region_type_t; + +typedef struct kvm_pci_region { + int node_id; /* the node # */ + kvm_pci_region_type_t type; /* the region type: IO/MEM/PMEM */ + unsigned long base; /* the base address of the region */ + unsigned long size; /* the region size */ +} kvm_pci_region_t; + +typedef struct kvm_base_addr_node { + int node_id; /* the node # */ + unsigned long base; /* the base address */ +} kvm_base_addr_node_t; + +typedef struct kvm_guest_nbsr_state { + int node_id; + unsigned int rt_pcim0; + unsigned int rt_pcim1; + unsigned int rt_pcim2; + unsigned int rt_pcim3; + unsigned int rt_pciio0; + unsigned int rt_pciio1; + unsigned int rt_pciio2; + unsigned int rt_pciio3; + unsigned int rt_pcimp_b0; + unsigned int rt_pcimp_b1; + unsigned int rt_pcimp_b2; + unsigned int rt_pcimp_b3; + unsigned int rt_pcimp_e0; + unsigned int rt_pcimp_e1; + unsigned int rt_pcimp_e2; + unsigned int rt_pcimp_e3; + unsigned int rt_pcicfgb; + unsigned long rt_msi; + unsigned int iommu_ctrl; + unsigned long iommu_ptbar; + unsigned long iommu_dtbar; + unsigned long iommu_err; + unsigned long iommu_err_info; + unsigned int prepic_ctrl2; + unsigned int prepic_err_stat; + unsigned int prepic_err_int; + unsigned int prepic_linp0; + unsigned int prepic_linp1; + unsigned int prepic_linp2; + unsigned int prepic_linp3; + unsigned int prepic_linp4; + unsigned int prepic_linp5; +} kvm_guest_nbsr_state_t; + +#endif /* __ASSEMBLY__ */ + +/* + * Flags specifying guest properties (see field flags above) + */ +#define NATIVE_KERNEL_IMAGE_GUEST_FLAG 0x0000000000000001UL +#define PARAVIRT_KERNEL_IMAGE_GUEST_FLAG 0x0000000000000002UL +#define LINTEL_IMAGE_GUEST_FLAG 0x0000000000000100UL + +#define E2K_SYSCALL_TRAP_ENTRY_SIZE (2 * 1024) /* 2Kb */ +#define KVM_GUEST_STARTUP_SYSCALL_NUM 12 /* # of system call to launch */ + /* guest using such method */ +#define KVM_GUEST_STARTUP_ENTRY_NUM 32 /* # of trap table entry to */ + /* launch guest using direct */ + /* control transfer (call or */ + /* return) */ +#define KVM_PV_VCPU_TRAP_ENTRY_NUM 36 /* # of trap table entry to */ + /* launch trap handler of */ + /* paravirtualized guest */ + /* (same as ttable #0) */ + +#ifndef __ASSEMBLY__ + +#define KVM_GET_GUEST_ADDRESS _IOWR(KVMIO, 0xe2, unsigned long *) +#define KVM_SETUP_VCPU _IO(KVMIO, 0xe3) +#define KVM_ALLOC_GUEST_AREA _IOWR(KVMIO, 0xe4, \ + kvm_guest_area_alloc_t) +#define KVM_VCPU_GUEST_STARTUP _IOW(KVMIO, 0xe5, \ + kvm_vcpu_guest_startup_t) +#define KVM_SET_KERNEL_IMAGE_SHADOW _IOW(KVMIO, 0xe6, \ + kvm_kernel_area_shadow_t) +#define KVM_SET_IRQCHIP_BASE _IOW(KVMIO, 0xe7, unsigned long) +#define KVM_SET_SYS_TIMER_BASE _IOW(KVMIO, 0xe8, unsigned long) +#define KVM_SET_SPMC_CONF_BASE _IOW(KVMIO, 0xe9, unsigned long) +#define KVM_RESERVE_GUEST_AREA _IOWR(KVMIO, 0xea, \ + kvm_guest_area_reserve_t) +#define KVM_SET_SPMC_CONF_BASE_SPMC_IN_QEMU \ + _IOW(KVMIO, 0xeb, unsigned long) +#define KVM_SET_I2C_SPI_CONF_BASE _IOW(KVMIO, 0xec, unsigned long) +#define KVM_SET_GUEST_INFO _IOW(KVMIO, 0xed, unsigned long) +#define KVM_GET_NBSR_STATE _IOR(KVMIO, 0xee, \ + kvm_guest_nbsr_state_t) +#define KVM_CREATE_SIC_NBSR _IO(KVMIO, 0xef) +#define KVM_SET_PCI_REGION _IOW(KVMIO, 0xf0, kvm_pci_region_t) +#define KVM_SET_COUNT_NUMA_NODES _IOW(KVMIO, 0xf1, unsigned long) +#define KVM_SET_MAX_NR_NODE_CPU _IOW(KVMIO, 0xf2, unsigned long) +#define KVM_SET_CEPIC_FREQUENCY _IOW(KVMIO, 0xf3, unsigned long) +#define KVM_SET_WD_PRESCALER_MULT _IOW(KVMIO, 0xf4, unsigned long) +/* IOCTL 0xf5 reserved for Imagination GPU passthrough */ +#define KVM_SET_LEGACY_VGA_PASSTHROUGH _IOW(KVMIO, 0xf6, unsigned long) + +/* e2k-specific exit reasons from KVM to userspace assistance */ +#define KVM_EXIT_E2K_NOTIFY_IO 33 +#define KVM_EXIT_E2K_RESTART 37 +#define KVM_EXIT_E2K_PANIC 38 + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_ASM_E2K_KVM_H */ diff --git a/arch/e2k/include/uapi/asm/kvm_para.h b/arch/e2k/include/uapi/asm/kvm_para.h new file mode 100644 index 000000000000..daae6cde50bb --- /dev/null +++ b/arch/e2k/include/uapi/asm/kvm_para.h @@ -0,0 +1,57 @@ +#ifndef _ASM_E2K_KVM_PARA_H +#define _ASM_E2K_KVM_PARA_H + +#include + +#define KVM_FEATURE_CLOCKSOURCE 0 +#define KVM_FEATURE_NOP_IO_DELAY 1 +#define KVM_FEATURE_MMU_OP 2 + +#define MSR_KVM_WALL_CLOCK 0x11 +#define MSR_KVM_SYSTEM_TIME 0x12 + +#define KVM_MAX_MMU_OP_BATCH 32 + +/* Operations for KVM_HC_MMU_OP */ +#define KVM_MMU_OP_WRITE_PTE 1 +#define KVM_MMU_OP_FLUSH_TLB 2 +#define KVM_MMU_OP_RELEASE_PT 3 + +/* Payload for KVM_HC_MMU_OP */ +struct kvm_mmu_op_header { + __u32 op; + __u32 pad; +}; + +struct kvm_mmu_op_write_pte { + struct kvm_mmu_op_header header; + __u64 pte_phys; + __u64 pte_val; +}; + +struct kvm_mmu_op_flush_tlb { + struct kvm_mmu_op_header header; +}; + +struct kvm_mmu_op_release_pt { + struct kvm_mmu_op_header header; + __u64 pt_phys; +}; + +#ifdef __KERNEL__ + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +extern void kvmclock_init(void); + +#endif + +#endif /* _ASM_E2K_KVM_PARA_H */ diff --git a/arch/e2k/include/uapi/asm/mas.h b/arch/e2k/include/uapi/asm/mas.h new file mode 100644 index 000000000000..93bc317065e9 --- /dev/null +++ b/arch/e2k/include/uapi/asm/mas.h @@ -0,0 +1,411 @@ +#ifndef _E2K_UAPI_MAS_H_ +#define _E2K_UAPI_MAS_H_ + +/* + * Memory Address Specifier. + * + * +-----------------------+-------+ + * | opc | | + * +-------+-------+-------+ mod + + * | dc | na | be | | + * +-------+-------+-------+-------+ + * 6-----5 4 3 2-----0 + * + * be - big endian flag + * na - non-aligned access flag + * dc - DCACHEs disable flag + * opc - special MMU or AAU operation opcode + * mod - operation modifier + * + */ + + + +/* MAS masks */ + +#define MAS_MOD_MASK 0x07 +#define MAS_OPC_MASK 0x78 +#define MAS_ENDIAN_MASK 0x08 +#define MAS_NONALIGNED_MASK 0x10 +#define MAS_DCACHE_MASK 0x60 + +/* MAS bits */ + +#define MAS_MOD_BITS 3 +#define MAS_OPC_BITS 4 +#define MAS_ENDIAN_BITS 1 +#define MAS_NONALIGNED_BITS 1 +#define MAS_DCACHE_BITS 2 + +/* MAS shifts */ + +#define MAS_MOD_SHIFT 0x00 +#define MAS_OPC_SHIFT 0x03 +#define MAS_ENDIAN_SHIFT 0x03 +#define MAS_NONALIGNED_SHIFT 0x04 +#define MAS_DCACHE_SHIFT 0x05 + +#define MAS_MOD_DEFAULT 0x0UL +#define MAS_ENDIAN_DEFAULT 0x0UL +#define MAS_NONALIGNED_DEFAULT 0x0UL +#define MAS_DCACHE_DEFAULT 0x0UL + +/* LOAD (non-speculative) MAS modes for channels 0 and 3 */ + +#define _MAS_MODE_LOAD_OPERATION 0UL /* an operation */ +#define _MAS_MODE_LOAD_PA 1UL /* reading by physical */ + /* address */ +#define _MAS_MODE_LOAD_OP_CHECK 2UL /* conditional operation */ + /* depending on the "check" */ + /* lock state */ +#define _MAS_MODE_LOAD_OP_UNLOCK 3UL /* an operation with memory */ + /* location depending on the */ + /* "check" lock state and */ + /* memory location unlocking */ +#define _MAS_MODE_LOAD_OP_SPEC 3UL /* speculative load */ +#define _MAS_MODE_FILL_OP 4UL /* fill operation */ +#define _MAS_MODE_LOAD_OP_LOCK_CHECK 4UL /* semi-speculative operation */ + /* with memory lock check */ +#define _MAS_MODE_LOAD_OP_TRAP_ON_STORE 5UL /* an operation with locking */ + /* of the memory location by */ + /* the "trap on store" lock - */ + /* until v2 */ +#define _MAS_MODE_LOAD_OP_TRAP_ON_LD 6UL /* an operation with locking */ + /* of the memory location by */ + /* the "trap on load/store" */ +#define _MAS_MODE_LOAD_OP_WAIT 7UL /* an operation with locking */ + /* of the memory location by */ + /* the "wait" lock */ +#define _MAS_MODE_LOAD_OP_SPEC_LOCK_CHECK 7UL /* speculative operation */ + /* with memory lock check */ +#define _MAS_MODE_DAM_LOAD 7UL /* an operation with looking */ + /* of the disambiguation*/ + /* memory table */ + +/* LOAD (non-speculative) MAS modes for channels 0 and 2 */ +#define _MAS_MODE_LOAD_OP_WAIT_1 5UL /* an operation with locking */ + /* of the memory location by */ + /* the "wait" lock - since V5 */ + +/* LOAD (non-speculative) MAS modes for channels 2 and 5. */ + +#define MAS_MODE_LOAD_OPERATION 0UL /* an operation */ +#define MAS_MODE_LOAD_PA 1UL /* reading by physical */ + /* address */ +#define MAS_MODE_LOAD_OP_CHECK 2UL /* conditional operation */ + /* depending on the "check" */ + /* lock state */ +#define MAS_MODE_LOAD_OP_UNLOCK 3UL /* an operation with memory */ + /* location depending on the */ + /* "check" lock state and */ + /* memory location unlocking */ +#define MAS_MODE_LOAD_OP_SPEC 3UL /* speculative load */ +#define MAS_MODE_FILL_OP 4UL /* fill operation */ +#define MAS_MODE_LOAD_OP_LOCK_CHECK 4UL /* semi-speculative operation */ + /* with memory lock check */ +#define MAS_MODE_LOAD_RESERVED3 5UL /* reserved */ +#define MAS_MODE_LOAD_IOPAGE 6UL /* I/O page access operation */ +#define MAS_MODE_LOAD_MMU_AAU_SPEC 7UL /* special MMU or AAU */ + /* operation */ +#define MAS_MODE_LOAD_OP_SPEC_LOCK_CHECK 7UL /* speculative operation */ + /* with memory lock check */ + + +/* STORE MAS modes for channels 2 and 5. */ + +#ifndef __ASSEMBLY__ +#define MAS_MODE_STORE_OPERATION 0UL /* an operation */ +#define MAS_MODE_STORE_PA 1UL /* writing by physical */ + /* address */ +#define MAS_MODE_STORE_OP_WAIT 2UL /* conditional operation */ + /* depending on the "wait" */ + /* lock state with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_OP_UNLOCK 3UL /* an operation with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_RESERVED2 4UL +#define MAS_MODE_STORE_NOP_UNLOCK 5UL /* same as */ + /* MAS_MODE_STORE_OP_UNLOCK */ + /* but no operation performed */ +#define MAS_MODE_STORE_IOPAGE 6UL /* I/O page access operation */ +#define MAS_MODE_STORE_MMU_AAU_SPEC 7UL /* special MMU or AAU */ + /* operation */ +#else /* __ASSEMBLY__ */ +#define MAS_MODE_STORE_OPERATION 0 /* an operation */ +#define MAS_MODE_STORE_PA 1 /* writing by physical */ + /* address */ +#define MAS_MODE_STORE_OP_WAIT 2 /* conditional operation */ + /* depending on the "wait" */ + /* lock state with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_OP_UNLOCK 3 /* an operation with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_RESERVED2 4 +#define MAS_MODE_STORE_NOP_UNLOCK 5 /* same as */ + /* MAS_MODE_STORE_OP_UNLOCK */ + /* but no operation performed */ +#define MAS_MODE_STORE_IOPAGE 6 /* I/O page access operation */ +#define MAS_MODE_STORE_MMU_AAU_SPEC 7 /* special MMU or AAU */ + /* operation */ +#endif /* ! __ASSEMBLY__ */ + + +/* "Secondary" MAS'es (appeared in e2s) */ + +/* LOAD */ + +#define MAS_TRAP_ON_STORE_MASK 0x3 +#define MAS_LOAD_SEC_TRAP_ON_STORE 1UL /* secondary trap on store */ + +#define MAS_TRAP_ON_LD_ST_MASK 0x3 /* secondary trap */ +#define MAS_LOAD_SEC_TRAP_ON_LD_ST 2UL /* on load/store */ + +/* STORE */ + +#define MAS_SEC_NOP_UNLOCK_MASK 0x3 +#define MAS_MODE_STORE_SEC_NOP_UNLOCK 2UL /* secondary unlock */ + +/* BOTH */ + +/* Secondary SLT operation - both ld and st + * ld: channels 0 and 2 + * st: channels 2 and 5 */ +#define MAS_SEC_SLT 0x78UL + + +/* MAS "endian"-ness */ + +#define MAS_ENDIAN_LITTLE 0UL +#define MAS_ENDIAN_BIG 1UL + +/* MAS non-aligned access switch */ + +#define MAS_ALIGNED_ADDR 0UL +#define MAS_NONALIGNED_ADDR 1UL + + +/* MAS cache enablers */ + +#define MAS_CACHE_12E_V_E 0UL /* virtual, all caches enabled */ +#define MAS_CACHE_2E_P_E 0UL /* physical, DCACHE2,ECACHE */ + /* enabled only */ +#define MAS_CACHE_2E_E 1UL /* DCACHE2 and ECACHE enabled only*/ +#define MAS_CACHE_E_E 2UL /* ECACHE enabled only */ +#define MAS_CACHE_N_E 3UL /* nothing enabled */ + +/* MAS cache disablers */ + +#define MAS_CACHE_N_V_D 0UL /* virtual, nothing disabled */ +#define MAS_CACHE_1_P_D 0UL /* physical, DCACHE1 disabled only */ +#define MAS_CACHE_1_D 1UL /* DCACHE1 disabled only */ +#define MAS_CACHE_12_D 2UL /* DCACHE1 and DCACHE2 disabled */ +#define MAS_CACHE_ALL_D 3UL /* all caches disabled */ + +/* + * MAS OPCs + */ + +#ifndef __ASSEMBLY__ +/* mandatory group */ +#define MAS_OPC_CACHE_FLUSH 0UL /* Cache(s) flush operations */ +#define MAS_OPC_DCACHE_LINE_FLUSH 1UL /* Data cache(s) line flush */ + /* operations */ + +#define MAS_OPC_ICACHE_LINE_FLUSH 2UL /* Instruction cache(s) line */ + /* flush */ + /* operations */ +#define MAS_OPC_TLB_PAGE_FLUSH 2UL /* TLB page flush operations */ + +#define MAS_OPC_RESERVED1 3UL + +#define MAS_OPC_ICACHE_FLUSH 4UL /* Instruction cache(s) flush */ + /* operations */ +#define MAS_OPC_TLB_FLUSH 4UL /* TLB flush operations */ + +#define MAS_OPC_TLB_ADDR_PROBE 5UL /* TLB address probe */ + /* operations */ +#define MAS_OPC_TLB_ENTRY_PROBE 6UL /* TLB entry probe operations */ +#define MAS_OPC_AAU_REG 7UL /* AAU registers access */ + +/* optional group */ +#define MAS_OPC_MMU_REG 8UL /* MMU registers access */ +#define MAS_OPC_DTLB_REG 9UL /* DTLB registers access */ +#define MAS_OPC_L1_REG 10UL /* L1 cache registers access */ +#define MAS_OPC_L2_REG 11UL /* L2 cache registers access */ + +#define MAS_OPC_ICACHE_REG 12UL /* ICACHE registers access */ +#define MAS_OPC_ITLB_REG 12UL /* ITLB registers access */ + +#define MAS_OPC_DAM_REG 13UL /* DAM register(s) access */ +#define MAS_OPC_MLT_REG 13UL /* MLT register(s) access */ +#define MAS_OPC_CLW_REG 13UL /* CLW register(s) access */ +#define MAS_OPC_SNOOP_REG 13UL /* SNOOP register(s) access */ +#define MAS_OPC_MMU_DEBUG_REG 13UL /* MMU DEBUG registers access */ + +#define MAS_OPC_PCS_REG 14UL /* PCS (Procedure Chain Stack)*/ + /* registers */ + /* operations */ +#define MAS_OPC_RESERVED2 15UL +#else /* __ASSEMBLY__ */ +/* mandatory group */ +#define MAS_OPC_CACHE_FLUSH 0 /* Cache(s) flush operations */ +#define MAS_OPC_DCACHE_LINE_FLUSH 1 /* Data cache(s) line flush */ + /* operations */ + +#define MAS_OPC_ICACHE_LINE_FLUSH 2 /* Instruction cache(s) line */ + /* flush */ + /* operations */ +#define MAS_OPC_TLB_PAGE_FLUSH 2 /* TLB page flush operations */ + +#define MAS_OPC_RESERVED1 3 + +#define MAS_OPC_ICACHE_FLUSH 4 /* Instruction cache(s) flush */ + /* operations */ +#define MAS_OPC_TLB_FLUSH 4 /* TLB flush operations */ + +#define MAS_OPC_TLB_ADDR_PROBE 5 /* TLB address probe */ + /* operations */ +#define MAS_OPC_TLB_ENTRY_PROBE 6 /* TLB entry probe operations */ +#define MAS_OPC_AAU_REG 7 /* AAU registers access */ + +/* optional group */ +#define MAS_OPC_MMU_REG 8 /* MMU registers access */ +#define MAS_OPC_DTLB_REG 9 /* DTLB registers access */ +#define MAS_OPC_L1_REG 10 /* L1 cache registers access */ +#define MAS_OPC_L2_REG 11 /* L2 cache registers access */ + +#define MAS_OPC_ICACHE_REG 12 /* ICACHE registers access */ +#define MAS_OPC_ITLB_REG 12 /* ITLB registers access */ + +#define MAS_OPC_DAM_REG 13 /* DAM register(s) access */ +#define MAS_OPC_MLT_REG 13 /* MLT register(s) access */ +#define MAS_OPC_CLW_REG 13 /* CLW register(s) access */ +#define MAS_OPC_SNOOP_REG 13 /* SNOOP register(s) access */ +#define MAS_OPC_MMU_DEBUG_REG 13 /* MMU DEBUG registers access */ + +#define MAS_OPC_PCS_REG 14 /* PCS (Procedure Chain Stack)*/ + /* registers */ + /* operations */ +#define MAS_OPC_RESERVED2 15 +#endif /* ! __ASSEMBLY__ */ + +/* Popular complex MASes for some special Linux/E2K situations */ + +#define MAS_LOAD_OPERATION (MAS_MODE_LOAD_OPERATION << MAS_MOD_SHIFT) +#define MAS_STORE_OPERATION (MAS_MODE_STORE_OPERATION << MAS_MOD_SHIFT) +#define MAS_BYPASS_L1_CACHE (MAS_CACHE_1_D << MAS_DCACHE_SHIFT) +#define MAS_BYPASS_L12_CACHES (MAS_CACHE_12_D << MAS_DCACHE_SHIFT) +#define MAS_BYPASS_ALL_CACHES (MAS_CACHE_ALL_D << MAS_DCACHE_SHIFT) +#define MAS_NONALIGNED (MAS_NONALIGNED_ADDR << MAS_NONALIGNED_SHIFT) +#define MAS_IOADDR (MAS_MODE_STORE_IOPAGE << MAS_MOD_SHIFT) +#define MAS_BIGENDIAN (MAS_ENDIAN_BIG << MAS_ENDIAN_SHIFT) +#define MAS_FILL_OPERATION (MAS_MODE_FILL_OP << MAS_MOD_SHIFT) +#define MAS_LOAD_PA (MAS_MODE_LOAD_PA << MAS_MOD_SHIFT) +#define MAS_LOAD_SPEC (MAS_MODE_LOAD_OP_SPEC << MAS_MOD_SHIFT) +#define MAS_STORE_PA (MAS_MODE_STORE_PA << MAS_MOD_SHIFT) + +/* CACHE(s) flush */ +#define MAS_CACHE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_CACHE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* DCACHE line flush */ +#define MAS_DCACHE_LINE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_DCACHE_LINE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* DCACHE L1 registers access */ +#define MAS_DCACHE_L1_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_L1_REG << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* DCACHE L2 registers access */ +#define MAS_DCACHE_L2_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_L2_REG << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* ICACHE line and DTLB page flush */ +#define MAS_ICACHE_LINE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_ICACHE_LINE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) +#define MAS_TLB_PAGE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_PAGE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* ICACHE and TLB flush */ +#define MAS_ICACHE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_ICACHE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) +#define MAS_TLB_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* MMU registers access */ +#define MAS_MMU_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_MMU_REG << MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* Data TLB registers access */ +#define MAS_DTLB_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_DTLB_REG << MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* CLW registers access */ +#define MAS_CLW_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_CLW_REG << MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* MMU DEBUG registers access */ +#define MAS_MMU_DEBUG_REG \ + ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_MMU_DEBUG_REG << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* VA probe */ +#define MAS_VA_PROBE ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_ADDR_PROBE << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* DTLB entry probe */ +#define MAS_ENTRY_PROBE ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_ENTRY_PROBE << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* Locking "wait" */ +#define MAS_WAIT_LOCK (_MAS_MODE_LOAD_OP_WAIT << MAS_MOD_SHIFT) +#define MAS_WAIT_LOCK_Q (_MAS_MODE_LOAD_OP_WAIT_1 << MAS_MOD_SHIFT) + +/* Ulocking "wait" */ +#define MAS_WAIT_UNLOCK (MAS_MODE_STORE_OP_WAIT << MAS_MOD_SHIFT) + +/* Locking trap on store */ +#define MAS_MLT_STORE_LOCK (_MAS_MODE_LOAD_OP_TRAP_ON_STORE << \ + MAS_MOD_SHIFT) +#define MAS_MLT_SEC_STORE_LOCK (MAS_LOAD_SEC_TRAP_ON_STORE) + +/* Locking trap on load/store */ +#define MAS_MLT_LOAD_LOCK (_MAS_MODE_LOAD_OP_TRAP_ON_LD << MAS_MOD_SHIFT) +#define MAS_MLT_SEC_LD_ST_LOCK (MAS_LOAD_SEC_TRAP_ON_LD_ST) + +#define MAS_MLT_STORE_UNLOCK (MAS_MODE_STORE_OP_UNLOCK << MAS_MOD_SHIFT) + +#define MAS_MLT_NOP_UNLOCK (MAS_MODE_STORE_NOP_UNLOCK << MAS_MOD_SHIFT) + +#define MAS_MLT_SEC_NOP_UNLOCK (MAS_MODE_STORE_SEC_NOP_UNLOCK << MAS_MOD_SHIFT) + +#define MAS_MLT_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + (((MAS_OPC_MMU_REG | MAS_OPC_ICACHE_FLUSH | \ + MAS_OPC_DCACHE_LINE_FLUSH) << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* DAM table */ +#define MAS_DAM_REG ((_MAS_MODE_DAM_LOAD << MAS_MOD_SHIFT) | \ + (((MAS_OPC_DAM_REG) << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +#endif /* _E2K_UAPI_MAS_H_ */ diff --git a/arch/e2k/include/uapi/asm/mman.h b/arch/e2k/include/uapi/asm/mman.h new file mode 100644 index 000000000000..4a0c90e66775 --- /dev/null +++ b/arch/e2k/include/uapi/asm/mman.h @@ -0,0 +1,101 @@ +#ifndef _UAPI_E2K_MMAN_H_ +#define _UAPI_E2K_MMAN_H_ + + +/* + * Copyright (C) 1998-2000 Hewlett-Packard Co + * Copyright (C) 1998-2000 David Mosberger-Tang + * + * Adopted for Linux/E2K. To be extended for proper E2K mem. management. + */ + +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_GROWSDOWN 0x20 /* mprotect flag: extend change */ + /* to start of growsdown vma */ +#define PROT_GROWSUP 0x40 /* mprotect flag: extend change */ + /* to end of growsup vma */ +#define PROT_CUI 0xffff00 +#define PROT_CUI_SHIFT 8 +#define PROT_CUI_MASK 0xFFFF + +#define GET_CUI_FROM_INT_PROT(prot) (((prot) >> PROT_CUI_SHIFT) & \ + PROT_CUI_MASK) +#define PUT_CUI_TO_INT_PROT(prot, cui) ((((cui) & PROT_CUI_MASK) << \ + PROT_CUI_SHIFT) | prot) + +/* 0x01 - 0x03 are defined in linux/mman.h */ +#define MAP_TYPE 0x00000f /* Mask for type of mapping */ +#define MAP_ANONYMOUS 0x000010 /* don't use a file */ +#define MAP_FIXED 0x000100 /* Interpret addr exactly */ +#define MAP_DENYWRITE 0x000800 /* ETXTBSY */ +#define MAP_GROWSDOWN 0x001000 /* stack-like segment */ +#define MAP_GROWSUP 0x002000 /* register stack-like segment */ +#define MAP_EXECUTABLE 0x004000 /* mark it as an executable */ +#define MAP_LOCKED 0x008000 /* pages are locked */ +#define MAP_NORESERVE 0x010000 /* don't check for reservations */ +#define MAP_POPULATE 0x020000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x040000 /* do not block on IO */ +#define MAP_FIRST32 0x080000 /* in protected mode map in */ + /* first 2 ** 32 area */ +#define MAP_WRITECOMBINED 0x100000 /* Write combine */ +#define MAP_HUGETLB 0x200000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x400000 /* MAP_FIXED which doesn't unmap */ + /* underlying mapping */ + +#define MAP_STACK MAP_GROWSDOWN + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_INVALIDATE 2 /* invalidate the caches */ +#define MS_SYNC 4 /* synchronous memory sync */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ +#define MCL_ONFAULT 4 /* lock all pages that are faulted in */ + + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_DONTNEED 4 /* don't need these pages */ + +/* common parameters: try to keep these consistent across architectures */ +#define MADV_FREE 8 /* free pages only if memory pressure */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_HWPOISON 100 /* poison a page for testing */ +#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ + +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, + overrides the coredump filter bits */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE) + +#endif /* _UAPI_E2K_MMAN_H_ */ diff --git a/arch/e2k/include/uapi/asm/msgbuf.h b/arch/e2k/include/uapi/asm/msgbuf.h new file mode 100644 index 000000000000..d8d912d8b2f4 --- /dev/null +++ b/arch/e2k/include/uapi/asm/msgbuf.h @@ -0,0 +1,27 @@ +#ifndef _E2K_MSGBUF_H_ +#define _E2K_MSGBUF_H_ + +/* + * The msqid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_MSGBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/poll.h b/arch/e2k/include/uapi/asm/poll.h new file mode 100644 index 000000000000..c98509d3149e --- /dev/null +++ b/arch/e2k/include/uapi/asm/poll.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/posix_types.h b/arch/e2k/include/uapi/asm/posix_types.h new file mode 100644 index 000000000000..575deb1e035d --- /dev/null +++ b/arch/e2k/include/uapi/asm/posix_types.h @@ -0,0 +1,11 @@ +#ifndef _E2K_POSIX_TYPES_H_ +#define _E2K_POSIX_TYPES_H_ + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. + */ + +#include + +#endif /* _E2K_POSIX_TYPES_H_ */ diff --git a/arch/e2k/include/uapi/asm/protected_mode.h b/arch/e2k/include/uapi/asm/protected_mode.h new file mode 100644 index 000000000000..61b0d2f48378 --- /dev/null +++ b/arch/e2k/include/uapi/asm/protected_mode.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * arch/e2k/include/asm/protected_mode.h, v 1.0 07/04/2020. + * + * Copyright (C) 2020 MCST + */ + +/****************** E2K PROTECTED MODE SPECIFIC STUFF *******************/ + +#ifndef _E2K_PROTECTED_MODE_H_ +#define _E2K_PROTECTED_MODE_H_ + +/* + * PROTECTED MODE DEBUG CONTROLS: + * When control below is set, kernel reports extra info and issues + * identified to journal. Use command 'dmesg' to display these messages. + * Set up corresponding env vars to 0/1 to control particular checks + * or use arch_prctl() syscall to setup debug mode. + */ +/* Protected syscall debug mode initialized: */ +#define PM_SC_DBG_MODE_INIT 0x0001 +/* Output debug info on system calls to system journal: */ +#define PM_SC_DBG_MODE_DEBUG 0x0002 +/* Output debug info on protected complex syscall wrappers to system journal: */ +#define PM_SC_DBG_MODE_COMPLEX_WRAPPERS 0x0004 +/* Report issue to journal if syscall arg doesn't match expected format: */ +#define PM_SC_DBG_MODE_CHECK 0x0008 +/* If error in arg format detected, don't block syscall and proceed with execution: */ +#define PROTECTED_MODE_SOFT 0x0010 +/* Output to journal debug info on converting structures in syscall args: */ +#define PM_SC_DBG_MODE_CONV_STRUCT 0x0020 +/* Output to journal debug info related to signal manipulation: */ +#define PM_SC_DBG_MODE_SIGNALS 0x0040 +/* Don't output to journal warnings/alerts/errors (for better performance): */ +#define PM_SC_DBG_MODE_NO_ERR_MESSAGES 0x0080 + +/* libc specific mmu control stuff: */ + +/* Enable check for dangling descriptors: */ +#define PM_MM_CHECK_4_DANGLING_POINTERS 0x1000 +/* Zeroing freed descriptor contents: */ +#define PM_MM_ZEROING_FREED_POINTERS 0x2000 +/* Emptying freed descriptor contents / light check for dangling descriptors: */ +#define PM_MM_EMPTYING_FREED_POINTERS 0x4000 + +/* Enable all debug/diagnostic output to system journal: */ +#define PM_SC_DBG_MODE_ALL 0xffff406f +/* Disable debug/diagnostic output to system journal: */ +#define PM_SC_DBG_MODE_DISABLED PM_SC_DBG_MODE_INIT + +#define IF_PM_DBG_MODE(mask) \ + (current->mm->context.pm_sc_debug_mode & (mask)) + +#define PM_SC_DBG_MODE_DEFAULT (PM_SC_DBG_MODE_CHECK \ + | PM_MM_EMPTYING_FREED_POINTERS) + +/* For backward compatibility: */ +#define PM_SC_DBG_MODE_WARN_ONLY PROTECTED_MODE_SOFT + +/* + * Arch-specific options for arch_prctl() syscall: + */ + +/* PM debug mode controls */ +# define PR_PM_DBG_MODE_SET 8192 +# define PR_PM_DBG_MODE_GET 8193 +# define PR_PM_DBG_MODE_RESET 8194 +# define PR_PM_DBG_MODE_ADD 8195 /* adds to existing debug mode */ +# define PR_PM_DBG_MODE_DEL 8196 /* removes from existing mode */ + + +/* + * Flags for the protected_sys_clean_descriptors() function: + */ +/* 0 - clean freed descriptor list */ +#define CLEAN_DESCRIPTORS_SINGLE 1 /* clean single descriptor 'addr' */ +#define CLEAN_DESCRIPTORS_NO_GARB_COLL 2 /* No garbidge collection */ + +#endif /* _E2K_PROTECTED_MODE_H_ */ diff --git a/arch/e2k/include/uapi/asm/ptrace-abi.h b/arch/e2k/include/uapi/asm/ptrace-abi.h new file mode 100644 index 000000000000..3043bd7faecc --- /dev/null +++ b/arch/e2k/include/uapi/asm/ptrace-abi.h @@ -0,0 +1,78 @@ +#ifndef _ASM_PTRACE_ABI_H +#define _ASM_PTRACE_ABI_H + +#define PTRACE_OLDSETOPTIONS 21 + +/* only useful for access 32bit programs / kernels */ +#define PTRACE_GET_THREAD_AREA 25 +#define PTRACE_SET_THREAD_AREA 26 + +#ifdef __x86_64__ +# define PTRACE_ARCH_PRCTL 30 +#endif + +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 + +#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ + +#ifndef __ASSEMBLY__ +#include + +/* configuration/status structure used in PTRACE_BTS_CONFIG and + PTRACE_BTS_STATUS commands. +*/ +struct ptrace_bts_config { + /* requested or actual size of BTS buffer in bytes */ + __u32 size; + /* bitmask of below flags */ + __u32 flags; + /* buffer overflow signal */ + __u32 signal; + /* actual size of bts_struct in bytes */ + __u32 bts_size; +}; +#endif /* __ASSEMBLY__ */ + +#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ +#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ +#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG on buffer overflow + instead of wrapping around */ +#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */ + +#define PTRACE_BTS_CONFIG 40 +/* Configure branch trace recording. + ADDR points to a struct ptrace_bts_config. + DATA gives the size of that buffer. + A new buffer is allocated, if requested in the flags. + An overflow signal may only be requested for new buffers. + Returns the number of bytes read. +*/ +#define PTRACE_BTS_STATUS 41 +/* Return the current configuration in a struct ptrace_bts_config + pointed to by ADDR; DATA gives the size of that buffer. + Returns the number of bytes written. +*/ +#define PTRACE_BTS_SIZE 42 +/* Return the number of available BTS records for draining. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_GET 43 +/* Get a single BTS record. + DATA defines the index into the BTS array, where 0 is the newest + entry, and higher indices refer to older entries. + ADDR is pointing to struct bts_struct (see asm/ds.h). +*/ +#define PTRACE_BTS_CLEAR 44 +/* Clear the BTS buffer. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_DRAIN 45 +/* Read all available BTS records and clear the buffer. + ADDR points to an array of struct bts_struct. + DATA gives the size of that buffer. + BTS records are read from oldest to newest. + Returns number of BTS records drained. +*/ + +#endif /* _ASM_PTRACE_ABI_H */ diff --git a/arch/e2k/include/uapi/asm/ptrace.h b/arch/e2k/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..7b4654262c98 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ptrace.h @@ -0,0 +1,11 @@ +#ifndef _UAPI_E2K_PTRACE_H +#define _UAPI_E2K_PTRACE_H + + +#ifndef __ASSEMBLY__ + +/* 0x4200-0x4300 are reserved for architecture-independent additions. */ +#define PTRACE_SETOPTIONS 0x4200 + +#endif /* __ASSEMBLY__ */ +#endif /* _UAPI_E2K_PTRACE_H */ diff --git a/arch/e2k/include/uapi/asm/resource.h b/arch/e2k/include/uapi/asm/resource.h new file mode 100644 index 000000000000..5b5656873aee --- /dev/null +++ b/arch/e2k/include/uapi/asm/resource.h @@ -0,0 +1,12 @@ +#ifndef _E2K_RESOURCE_H_ +#define _E2K_RESOURCE_H_ + +#include + +/* + * Redefine resource limits for e2k + */ +#undef _STK_LIM +#define _STK_LIM (16*1024*1024) + +#endif /* _E2K_RESOURCE_H_ */ diff --git a/arch/e2k/include/uapi/asm/sembuf.h b/arch/e2k/include/uapi/asm/sembuf.h new file mode 100644 index 000000000000..816081cd29ba --- /dev/null +++ b/arch/e2k/include/uapi/asm/sembuf.h @@ -0,0 +1,22 @@ +#ifndef _E2K_SEMBUF_H_ +#define _E2K_SEMBUF_H_ + +/* + * The semid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_SEMBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/setup.h b/arch/e2k/include/uapi/asm/setup.h new file mode 100644 index 000000000000..7b4ae516d6dd --- /dev/null +++ b/arch/e2k/include/uapi/asm/setup.h @@ -0,0 +1,6 @@ +#ifndef _UAPI_ASM_E2K_SETUP_H +#define _UAPI_ASM_E2K_SETUP_H + +#define COMMAND_LINE_SIZE 512 + +#endif /* _UAPI_ASM_E2K_SETUP_H */ diff --git a/arch/e2k/include/uapi/asm/shmbuf.h b/arch/e2k/include/uapi/asm/shmbuf.h new file mode 100644 index 000000000000..c4bbaf613e3a --- /dev/null +++ b/arch/e2k/include/uapi/asm/shmbuf.h @@ -0,0 +1,38 @@ +#ifndef _E2K_SHMBUF_H_ +#define _E2K_SHMBUF_H_ + +/* + * The shmid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused1; + unsigned long __unused2; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _E2K_SHMBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/sigcontext.h b/arch/e2k/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..b129843d0370 --- /dev/null +++ b/arch/e2k/include/uapi/asm/sigcontext.h @@ -0,0 +1,72 @@ +#ifndef _UAPI_E2K_SIGCONTEXT_H_ +#define _UAPI_E2K_SIGCONTEXT_H_ + +#define MAX_TC_SIZE 10 + +#define TIR_NUM 19 +#define DAM_ENTRIES_NUM 32 +#define SBBP_ENTRIES_NUM 32 + +/* from user.h !!! */ +#define MLT_NUM (16 * 3) + +struct sigcontext { + unsigned long long cr0_lo; + unsigned long long cr0_hi; + unsigned long long cr1_lo; + unsigned long long cr1_hi; + unsigned long long sbr; /* 21 Stack base register: top of */ + /* local data (user) stack */ + unsigned long long usd_lo; /* 22 Local data (user) stack */ + unsigned long long usd_hi; /* 23 descriptor: base & size */ + unsigned long long psp_lo; /* 24 Procedure stack pointer: */ + unsigned long long psp_hi; /* 25 base & index & size */ + unsigned long long pcsp_lo; /* 26 Procedure chain stack */ + unsigned long long pcsp_hi; /* 27 pointer: base & index & size */ +/* + * additional part (for binary compiler) + */ + unsigned long long rpr_hi; + unsigned long long rpr_lo; + + unsigned long long nr_TIRs; + unsigned long long tir_lo[TIR_NUM]; + unsigned long long tir_hi[TIR_NUM]; + unsigned long long trap_cell_addr[MAX_TC_SIZE]; + unsigned long long trap_cell_val[MAX_TC_SIZE]; + unsigned char trap_cell_tag[MAX_TC_SIZE]; + unsigned long long trap_cell_info[MAX_TC_SIZE]; + + unsigned long long dam[DAM_ENTRIES_NUM]; + + unsigned long long sbbp[SBBP_ENTRIES_NUM]; + + unsigned long long mlt[MLT_NUM]; + + unsigned long long upsr; +}; +/* + * This structure is used for compatibility + * All new fields must be added in this structure + */ +struct extra_ucontext { + int sizeof_extra_uc; /* size of used fields(in bytes) */ + int curr_cnt; /* current index into trap_celler */ + int tc_count; /* trap_celler records count */ + + /* + * For getcontext() + */ + int fpcr; + int fpsr; + int pfpfr; + + unsigned long long ctpr1; + unsigned long long ctpr2; + unsigned long long ctpr3; + + int sc_need_rstrt; +}; + + +#endif /* _UAPI_E2K_SIGCONTEXT_H_ */ diff --git a/arch/e2k/include/uapi/asm/siginfo.h b/arch/e2k/include/uapi/asm/siginfo.h new file mode 100644 index 000000000000..ddc8b3f3f62c --- /dev/null +++ b/arch/e2k/include/uapi/asm/siginfo.h @@ -0,0 +1,34 @@ +#ifndef _E2K_SIGINFO_H_ +#define _E2K_SIGINFO_H_ + +#include +#include + +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) +#define __ARCH_SI_TRAPNO +#define __ARCH_SI_BAND_T int + +#include + +#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) +#define SIGEV_PAD_SIZE32 ((SIGEV_MAX_SIZE/sizeof(int)) - 3) + +/* + * SIGSEGV si_codes + */ +#define SEGV_BOUNDS 3 /* Bounds overflow */ +#undef NSIGSEGV +#define NSIGSEGV 3 + +/* + * SIGTRAP si_codes + */ +#define DIAG_CT 3 /* Diagnostic CT condition */ +#define DIAG_ADDR 4 /* Diagnostic address */ +#define DIAG_PRED 5 /* Diagnostic predicate */ +#define DIAG_OP 6 /* Diagnostic operand */ +#define MEM_LOCK 7 /* Memory lock */ +#undef NSIGTRAP +#define NSIGTRAP 6 + +#endif /* _E2K_SIGINFO_H_ */ diff --git a/arch/e2k/include/uapi/asm/signal.h b/arch/e2k/include/uapi/asm/signal.h new file mode 100644 index 000000000000..c8c9ce79c8de --- /dev/null +++ b/arch/e2k/include/uapi/asm/signal.h @@ -0,0 +1,97 @@ +#ifndef _UAPI_E2K_SIGNAL_H_ +#define _UAPI_E2K_SIGNAL_H_ + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for + * backwards compatibility */ +#define SIGUNUSED 31 + +#define SIGRESTART 33 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND +#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ + +#define SA_RESTORER 0x04000000 + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 8192 + + +# ifndef __ASSEMBLY__ +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + + +# endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_E2K_SIGNAL_H_ */ diff --git a/arch/e2k/include/uapi/asm/socket.h b/arch/e2k/include/uapi/asm/socket.h new file mode 100644 index 000000000000..6b71384b9d8b --- /dev/null +++ b/arch/e2k/include/uapi/asm/socket.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/sockios.h b/arch/e2k/include/uapi/asm/sockios.h new file mode 100644 index 000000000000..def6d4746ee7 --- /dev/null +++ b/arch/e2k/include/uapi/asm/sockios.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/stat.h b/arch/e2k/include/uapi/asm/stat.h new file mode 100644 index 000000000000..2d430b55c671 --- /dev/null +++ b/arch/e2k/include/uapi/asm/stat.h @@ -0,0 +1,46 @@ +#ifndef _UAPI_E2K_STAT_H_ +#define _UAPI_E2K_STAT_H_ + +/* + * Tuned up to match GNU libc defaults. + */ + +#include + +#define STAT_HAVE_NSEC 1 + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +struct stat { + dev_t st_dev; + ino_t st_ino; + mode_t st_mode; + nlink_t st_nlink; + uid_t st_uid; + gid_t st_gid; + dev_t st_rdev; + off_t st_size; + off_t st_blksize; + off_t st_blocks; + time_t st_atime; + unsigned long st_atime_nsec; + time_t st_mtime; + unsigned long st_mtime_nsec; + time_t st_ctime; + unsigned long st_ctime_nsec; +}; + + +#endif /* _UAPI_E2K_STAT_H_ */ diff --git a/arch/e2k/include/uapi/asm/statfs.h b/arch/e2k/include/uapi/asm/statfs.h new file mode 100644 index 000000000000..8f2a792d2841 --- /dev/null +++ b/arch/e2k/include/uapi/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _E2K_STATFS_H_ +#define _E2K_STATFS_H_ + +#include + +#endif /* _E2K_STATFS_H_ */ diff --git a/arch/e2k/include/uapi/asm/termbits.h b/arch/e2k/include/uapi/asm/termbits.h new file mode 100644 index 000000000000..8484205b4842 --- /dev/null +++ b/arch/e2k/include/uapi/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMBITS_H_ +#define _E2K_TERMBITS_H_ + +#include + +#endif /* _E2K_TERMBITS_H_ */ diff --git a/arch/e2k/include/uapi/asm/termios.h b/arch/e2k/include/uapi/asm/termios.h new file mode 100644 index 000000000000..8b3d2b070e48 --- /dev/null +++ b/arch/e2k/include/uapi/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMIOS_H_ +#define _E2K_TERMIOS_H_ + +#include + +#endif /* _E2K_TERMIOS_H_ */ diff --git a/arch/e2k/include/uapi/asm/types.h b/arch/e2k/include/uapi/asm/types.h new file mode 100644 index 000000000000..4b9667b62b1c --- /dev/null +++ b/arch/e2k/include/uapi/asm/types.h @@ -0,0 +1,28 @@ +#ifndef _UAPI_E2K_TYPES_H_ +#define _UAPI_E2K_TYPES_H_ + +#include + +/* + * This file is never included by application software unless + * explicitly requested (e.g., via linux/types.h) in which case the + * application is Linux specific so (user-) name space pollution is + * not a major issue. However, for interoperability, libraries still + * need to be careful to avoid a name clashes. + */ + +#ifndef __ASSEMBLY__ + +typedef unsigned long e2k_addr_t; /* phys & virt address (64 bits) */ +typedef unsigned long e2k_size_t; /* size of objects (64 bits) */ + /* what should it be ????? */ + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + + +#endif /* !(__ASSEMBLY__) */ + +#endif /* _UAPI_E2K_TYPES_H_ */ diff --git a/arch/e2k/include/uapi/asm/ucontext.h b/arch/e2k/include/uapi/asm/ucontext.h new file mode 100644 index 000000000000..51f3af16b178 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ucontext.h @@ -0,0 +1,17 @@ +#ifndef _UAPI_E2K_UCONTEXT_H +#define _UAPI_E2K_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + union { + sigset_t uc_sigmask;/* mask last for extensibility */ + unsigned long long pad[16]; + }; + struct extra_ucontext uc_extra; /* for compatibility */ +}; + + +#endif /* _UAPI_E2K_UCONTEXT_H */ diff --git a/arch/e2k/include/uapi/asm/unistd.h b/arch/e2k/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..7d0ba046afab --- /dev/null +++ b/arch/e2k/include/uapi/asm/unistd.h @@ -0,0 +1,470 @@ +#ifndef _UAPI_E2K_UNISTD_H_ +#define _UAPI_E2K_UNISTD_H_ + +/* + * Taken from i386 sub-tree. + * Migration to E2K is still in progress. Please, be patient. + */ + + +#ifdef __ptr64__ +#define LINUX_SYSCALL_TRAPNUM LINUX_SYSCALL64_TRAPNUM +#else /* !__ptr64__ */ +#define LINUX_SYSCALL_TRAPNUM LINUX_SYSCALL32_TRAPNUM +#endif /* __ptr64__ */ + +#define LINUX_SYSCALL_TRAPNUM_OLD 4 /* Deprecated */ +#define LINUX_SYSCALL32_TRAPNUM 1 /* Use E2K trap entry #1 */ +#define LINUX_SYSCALL64_TRAPNUM 3 /* Use E2K trap entry #3 */ +#define LINUX_FAST_SYSCALL32_TRAPNUM 5 +#define LINUX_FAST_SYSCALL64_TRAPNUM 6 +#define LINUX_FAST_SYSCALL128_TRAPNUM 7 + +/* + * This file contains the system call numbers. + */ + +#define __NR_restart_syscall 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_lchown 16 +#define __NR_break 17 +#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +#define __NR_stty 31 +#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 +#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_select 82 +#define __NR_symlink 83 +#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +#define __NR_olduname 109 +#define __NR_iopl 110 +#define __NR_vhangup 111 +#define __NR_idle 112 +#define __NR_vm86old 113 +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +#define __NR_modify_ldt 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 +#define __NR_mlock 150 +#define __NR_munlock 151 +#define __NR_mlockall 152 +#define __NR_munlockall 153 +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 +#define __NR_mremap 163 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +#define __NR_vm86 166 +#define __NR_query_module 167 +#define __NR_poll 168 +#define __NR_nfsservctl 169 +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_prctl 172 +#define __NR_rt_sigreturn 173 +#define __NR_rt_sigaction 174 +#define __NR_rt_sigprocmask 175 +#define __NR_rt_sigpending 176 +#define __NR_rt_sigtimedwait 177 +#define __NR_rt_sigqueueinfo 178 +#define __NR_rt_sigsuspend 179 +#define __NR_pread 180 +#define __NR_pwrite 181 +#define __NR_chown 182 +#define __NR_getcwd 183 +#define __NR_capget 184 +#define __NR_capset 185 +#define __NR_sigaltstack 186 +#define __NR_sendfile 187 +#define __NR_getpmsg 188 /* some people actually want streams */ +#define __NR_putpmsg 189 /* some people actually want streams */ +#define __NR_vfork 190 +#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 + +#define __NR_pidfd_send_signal 205 +#define __NR_pidfd_open 206 + +#define __NR_pivot_root 217 +#define __NR_mincore 218 +#define __NR_madvise 219 +#define __NR_madvise1 219 /* delete when C lib stub is removed */ +#define __NR_getdents64 220 +#define __NR_fcntl64 221 +#define __NR_core 222 /* for analys kernel core */ +#define __NR_macctl 223 /* MCST trust linux */ +#define __NR_newfstatat 224 +#define __NR_emergency 225 +#define __NR_e2k_sigsetjmp 226 /* setjmp e2k specific */ +#define __NR_e2k_longjmp 227 /* longjmp e2k specific */ +#define __NR_e2k_syswork 228 /* e2k_syswork */ +#define __NR_clone_thread 229 +#define __NR_clone2 __NR_clone_thread /* don't delete old name */ +#define __NR_e2k_longjmp2 230 /* Second Edition */ +#define __NR_soft_debug 231 +#define __NR_setxattr 232 +#define __NR_lsetxattr 233 +#define __NR_fsetxattr 234 +#define __NR_getxattr 235 +#define __NR_lgetxattr 236 +#define __NR_fgetxattr 237 +#define __NR_listxattr 238 +#define __NR_llistxattr 239 +#define __NR_flistxattr 240 +#define __NR_removexattr 241 +#define __NR_lremovexattr 242 +#define __NR_fremovexattr 243 +#define __NR_gettid 244 +#define __NR_readahead 245 +#define __NR_tkill 246 +#define __NR_sendfile64 247 +#define __NR_futex 248 +#define __NR_sched_setaffinity 249 +#define __NR_sched_getaffinity 250 +#define __NR_pipe2 251 +#define __NR_set_backtrace 252 +#define __NR_get_backtrace 253 +#define __NR_access_hw_stacks 254 +#define __NR_el_posix 255 +#define __NR_io_uring_setup 256 +#define __NR_io_uring_enter 257 +#define __NR_io_uring_register 258 +#define __NR_set_tid_address 259 +#define __NR_el_binary 260 +#define __NR_timer_create 261 +#define __NR_timer_settime 262 +#define __NR_timer_gettime 263 +#define __NR_timer_getoverrun 264 +#define __NR_timer_delete 265 +#define __NR_clock_settime 266 +#define __NR_clock_gettime 267 +#define __NR_clock_getres 268 +#define __NR_clock_nanosleep 269 + +/* added for compatibility with x86_64 */ +#define __NR_msgget 270 +#define __NR_msgctl 271 +#define __NR_msgrcv 272 +#define __NR_msgsnd 273 +#define __NR_semget 274 +#define __NR_semctl 275 +#define __NR_semtimedop 276 +#define __NR_semop 277 +#define __NR_shmget 278 +#define __NR_shmctl 279 +#define __NR_shmat 280 +#define __NR_shmdt 281 + +#define __NR_open_tree 282 +#define __NR_move_mount 283 + +#define __NR_rseq 284 +#define __NR_io_pgetevents 285 +#define __NR_accept4 286 + +#define __NR_sched_setattr 287 +#define __NR_sched_getattr 288 + +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 +#define __NR_inotify_init 291 +#define __NR_inotify_add_watch 292 +#define __NR_inotify_rm_watch 293 + +#define __NR_io_setup 294 +#define __NR_io_destroy 295 +#define __NR_io_getevents 296 +#define __NR_io_submit 297 +#define __NR_io_cancel 298 +#define __NR_fadvise64 299 + +#define __NR_exit_group 300 +#define __NR_lookup_dcookie 301 +#define __NR_epoll_create 302 +#define __NR_epoll_ctl 303 +#define __NR_epoll_wait 304 +#define __NR_remap_file_pages 305 +#define __NR_statfs64 306 +#define __NR_fstatfs64 307 +#define __NR_tgkill 308 +#define __NR_utimes 309 +#define __NR_fadvise64_64 310 +#define __NR_vserver 311 +#define __NR_mbind 312 +#define __NR_get_mempolicy 313 +#define __NR_set_mempolicy 314 +#define __NR_mq_open 315 +#define __NR_mq_unlink 316 +#define __NR_mq_timedsend 317 +#define __NR_mq_timedreceive 318 +#define __NR_mq_notify 319 +#define __NR_mq_getsetattr 320 +#define __NR_kexec_load 321 +#define __NR_waitid 322 +#define __NR_add_key 323 +#define __NR_request_key 324 +#define __NR_keyctl 325 +#define __NR_mcst_rt 326 +#define __NR_getcpu 327 +#define __NR_move_pages 328 +#define __NR_splice 329 +#define __NR_vmsplice 330 +#define __NR_tee 331 +#define __NR_migrate_pages 332 +#define __NR_utimensat 333 +#define __NR_rt_tgsigqueueinfo 334 +#define __NR_openat 335 +#define __NR_mkdirat 336 +#define __NR_mknodat 337 +#define __NR_fchownat 338 +#define __NR_unlinkat 339 +#define __NR_renameat 340 +#define __NR_linkat 341 +#define __NR_symlinkat 342 +#define __NR_readlinkat 343 +#define __NR_fchmodat 344 +#define __NR_faccessat 345 +#define __NR_epoll_pwait 346 +#define __NR_signalfd4 347 +#define __NR_eventfd2 348 +#define __NR_recvmmsg 349 +#define __NR_cnt_point 350 +#define __NR_timerfd_create 351 +#define __NR_timerfd_settime 352 +#define __NR_timerfd_gettime 353 +#define __NR_preadv 354 +#define __NR_pwritev 355 +#define __NR_fallocate 356 +#define __NR_sync_file_range 357 +#define __NR_dup3 358 +#define __NR_inotify_init1 359 +#define __NR_epoll_create1 360 +#define __NR_fstatat64 361 +#define __NR_futimesat 362 +#define __NR_perf_event_open 363 +#define __NR_unshare 364 +#define __NR_get_robust_list 365 +#define __NR_set_robust_list 366 +#define __NR_pselect6 367 +#define __NR_ppoll 368 +#define __NR_setcontext 369 +#define __NR_makecontext 370 +#define __NR_swapcontext 371 +#define __NR_freecontext 372 +#define __NR_fanotify_init 373 +#define __NR_fanotify_mark 374 +#define __NR_prlimit64 375 +#define __NR_clock_adjtime 376 +#define __NR_syncfs 377 +#define __NR_sendmmsg 378 +#define __NR_setns 379 +#define __NR_process_vm_readv 380 +#define __NR_process_vm_writev 381 +#define __NR_kcmp 382 +#define __NR_finit_module 383 +#define __NR_renameat2 384 +#define __NR_getrandom 385 +#define __NR_memfd_create 386 +#define __NR_bpf 387 +#define __NR_execveat 388 +#define __NR_userfaultfd 389 +#define __NR_membarrier 390 +#define __NR_mlock2 391 +#define __NR_seccomp 392 +#define __NR_shutdown 393 +#define __NR_copy_file_range 394 +#define __NR_preadv2 395 +#define __NR_pwritev2 396 +/* free (unused) entries - reserve 397 - 399 */ +#define __NR_name_to_handle_at 400 +#define __NR_open_by_handle_at 401 +#define __NR_statx 402 +/* added for compatibility with x86_64 */ +#define __NR_socket 403 +#define __NR_connect 404 +#define __NR_accept 405 +#define __NR_sendto 406 +#define __NR_recvfrom 407 +#define __NR_sendmsg 408 +#define __NR_recvmsg 409 +#define __NR_bind 410 +#define __NR_listen 411 +#define __NR_getsockname 412 +#define __NR_getpeername 413 +#define __NR_socketpair 414 +#define __NR_setsockopt 415 +#define __NR_getsockopt 416 +/* free (unused) entries - reserve 417 - 418 */ +#define __NR_arch_prctl 419 +/* added for combability of protected system calls v1-v5 & v6 */ +#define __NR_newuselib 420 +#define __NR_rt_sigaction_ex 421 +/* protected Mode specific memory allocation syscall number */ +#define __NR_get_mem 422 +#define __NR_free_mem 423 +/* protected mode specific clean memory from old invalid descriptors */ +#define __NR_clean_descriptors 424 +/* protected mode specific unloading module from memory */ +#define __NR_unuselib 425 +#define __NR_clone3 426 +#define __NR_fsopen 427 +#define __NR_fsconfig 428 +#define __NR_fsmount 429 +#define __NR_fspick 430 + +#define NR_syscalls 431 + +/* compatibility with x86_64 */ +#define __NR_pread64 __NR_pread +#define __NR_pwrite64 __NR_pwrite + +/* Fast system calls */ +#define __NR_fast_sys_gettimeofday 0 +#define __NR_fast_sys_clock_gettime 1 +#define __NR_fast_sys_getcpu 2 +#define __NR_fast_sys_siggetmask 3 +#define __NR_fast_sys_getcontext 4 +#define __NR_fast_sys_set_return 5 + +#endif /* _UAPI_E2K_UNISTD_H_ */ diff --git a/arch/e2k/kernel/.gitignore b/arch/e2k/kernel/.gitignore new file mode 100644 index 000000000000..27a182861b63 --- /dev/null +++ b/arch/e2k/kernel/.gitignore @@ -0,0 +1,4 @@ +mkclearwindow +ttable_asm.h +ttable_wbs.h +vmlinux.lds \ No newline at end of file diff --git a/arch/e2k/kernel/Makefile b/arch/e2k/kernel/Makefile new file mode 100644 index 000000000000..2627e6340eeb --- /dev/null +++ b/arch/e2k/kernel/Makefile @@ -0,0 +1,148 @@ +# +# Makefile for the linux kernel. +# /arch/e2k/kernel +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +subdir-ccflags-y := -Werror -Wswitch -Wenum-compare + +extra-y := vmlinux.lds +EXTRA_AFLAGS := $(CFLAGS) +EXTRA_CFLAGS := $(CFLAGS) -Ifs + +ifdef CONFIG_VIRTUALIZATION + EXTRA_CFLAGS += -Wframe-larger-than=6144 +endif + +ifneq ($(CONFIG_CPU_HW_CLEAR_RF),y) +obj-y += clear_rf.o +extra-y += ttable_asm.h +hostprogs-y := mkclearwindow +endif +extra-y += ttable_wbs.h ttable_tmp.o + +quiet_cmd_gen_ttable_wbs_h = GEN $@ +cmd_gen_ttable_wbs_h = rm -f $(obj)/ttable_wbs.h; \ + touch $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define USER_TRAP_HANDLER_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define TTABLE_ENTRY_8_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define TTABLE_ENTRY_10_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '/<__ret_from_fork>/,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define RET_FROM_FORK_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define MAKECONTEXT_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define HANDLE_SYS_CALL_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define DO_SIGRETURN_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define RETURN_PV_VCPU_TRAP_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define HANDLE_PV_VCPU_SYS_FORK_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h; \ + $(OBJDUMP) -me2k -d $(obj)/ttable_tmp.o | sed -n -e '//,/<*>:/ s/.*setwd wsz = \(0x[0-9a-f][0-9a-f]*\).*/\#define HANDLE_PV_VCPU_SYS_CALL_SIZE \1/p' | sort -k 3 -g | tail -1 >> $(obj)/ttable_wbs.h + +$(obj)/ttable_tmp.o: $(src)/ttable_tmp.c FORCE + +$(obj)/ttable_wbs.h: $(obj)/ttable_tmp.o FORCE + $(call if_changed,gen_ttable_wbs_h) + +ifneq ($(CONFIG_CPU_HW_CLEAR_RF),y) +quiet_cmd_mkclearwindow = GEN $@ +cmd_mkclearwindow = $(obj)/mkclearwindow > $@ + +$(obj)/mkclearwindow: $(obj)/ttable_wbs.h + +$(obj)/ttable_asm.h: $(obj)/ttable_wbs.h $(obj)/mkclearwindow FORCE + $(call if_changed,mkclearwindow) + +$(obj)/ttable.o: $(obj)/ttable_asm.h +else +$(obj)/ttable.o: $(obj)/ttable_wbs.h +endif + +# -fexclude-ctpr2 - to make sure that AAU is not zeroed before we get to it +# -fno-dam - hardware bug 124206 workaround (CPU_HWBUG_L1I_STOPS_WORKING) +CFLAGS_ttable.o := -fno-dam -fexclude-ctpr2 +CFLAGS_ttable_tmp.o := -fno-dam -fexclude-ctpr2 -DGENERATING_HEADER + +# To compile gregs and ctpr saving for iset v6 +AFLAGS_REMOVE_trap_table.o = $(CFLAGS_ALL_CPUS) +AFLAGS_trap_table.o += -march=elbrus-v6 + +# We should no instrument these files +CFLAGS_REMOVE_ttable.o := -fprofile-generate-kernel +CFLAGS_REMOVE_ttable_tmp.o := -fprofile-generate-kernel + +CFLAGS_ptrace.o += -Wframe-larger-than=12288 + +obj-y += trap_table.o ttable.o page_tables.o process.o copy-hw-stacks.o \ + entry_user.o signal.o io.o setup.o time.o sys_e2k.o traps.o \ + systable.o ptrace.o e2k_syswork.o sys_32.o $(GDBSTART) \ + proc_context.o backtrace.o trace_clock.o topology.o \ + rtc.o e2k.o trace.o nmi.o getsp.o e2k-iommu.o alternative.o \ + convert_array.o protected_mq_notify.o protected_timer_create.o \ + protected_syscalls.o + +ifneq ($(CONFIG_CPU_HAS_FILL_INSTRUCTION),y) +obj-y += fill_handler_entry.o +endif +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o + +CFLAGS_process.o += -fno-tail-calls + +obj-y += cpu/ + +ifeq ($(CONFIG_E2K_MACHINE),y) +else +#CFLAGS_e2k_sic.o = $(CFLAGS_E2K_SIC) +endif +obj-y += e2k_sic.o + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_trace_stack.o = -pg +CFLAGS_REMOVE_time.o = -pg +CFLAGS_REMOVE_smpboot.o = -pg +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_e2k_sic.o = -pg +CFLAGS_REMOVE_hw_breakpoint.o = -pg +endif + +ifdef CONFIG_CPU_ES2 +CFLAGS_REMOVE_sclkr.o = $(CFLAGS_ALL_CPUS) +CFLAGS_sclkr.o += -march=elbrus-v3 +endif + +obj-$(CONFIG_SCLKR_CLOCKSOURCE) += sclkr.o +obj-$(CONFIG_SCLKR_CLOCKSOURCE) += proc_sclkr.o + +obj-$(CONFIG_ELF_CORE) += elfcore.o +obj-$(CONFIG_PERF_EVENTS) += perf_event/ +obj-$(CONFIG_MONITORS) += monitors.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SMP) += smpboot.o +obj-$(CONFIG_IOCTL_ELF32) += ioctl32.o +obj-$(CONFIG_RECOVERY) += recovery.o +obj-$(CONFIG_MODULES) += module.o ksyms.o +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_SECONDARY_SPACE_SUPPORT) += sec_space.o +obj-$(CONFIG_PRECISE_TIME) += precise_time.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o +obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace_graph_entry.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_E2K_STACKS_TRACER) += trace_stack.o +obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o +ifeq ($(PROFILE_GENERATE), 1) +obj-$(CONFIG_EPROF_KERNEL) += libeprof/ +endif +obj-$(CONFIG_OF) += devtree.o +obj-$(CONFIG_E2K_KEXEC) += kexec.o + +# For E2K_WAIT_ALL and atomic operations support on phys memory +CFLAGS_kexec.o += -DE2K_P2V +ifeq ($(call cc-option-yn,-fno-semi-spec-ld -fno-spec-ld),y) + CFLAGS_kexec.o += -fno-semi-spec-ld -fno-spec-ld +else + CFLAGS_kexec.o += -fno-ld-spec +endif + +GCOV_PROFILE_kexec.o := n # Since it disables virtual memory + +kernelclean: dummy diff --git a/arch/e2k/kernel/alternative.c b/arch/e2k/kernel/alternative.c new file mode 100644 index 000000000000..48f33c8c807b --- /dev/null +++ b/arch/e2k/kernel/alternative.c @@ -0,0 +1,96 @@ +#include +#include +#include + +static void __init_or_module add_padding(void *insns, + unsigned int len, void *ip) +{ + memset(insns, 0, len); + + while (len >= 64) { + *(u32 *) insns = 0x00000070; + insns += 64; + len -= 64; + } + + switch (len) { + case 56: + *(u32 *) insns = 0x00000060; + break; + case 48: + *(u32 *) insns = 0x00000050; + break; + case 40: + *(u32 *) insns = 0x00000040; + break; + case 32: + *(u32 *) insns = 0x00000030; + break; + case 24: + *(u32 *) insns = 0x00000020; + break; + case 16: + *(u32 *) insns = 0x00000010; + break; + case 8: + case 0: + break; + default: + panic("Bad altinstr padding length %d at %px\n", len, ip); + } +} + +void __init_or_module apply_alternatives(struct alt_instr *start, + struct alt_instr *end) +{ + struct alt_instr *a; + u8 *instr, *replacement; + + /* + * The scan order should be from start to end. A later scanned + * alternative code can overwrite previously scanned alternative code. + */ + for (a = start; a < end; a++) { + int node; + + if (!cpu_has_by_value(a->facility)) + continue; + + instr = (u8 *) &a->instr_offset + a->instr_offset; + replacement = (u8 *) &a->repl_offset + a->repl_offset; + + if (unlikely(a->instrlen % 8 || a->replacementlen % 8)) { + WARN_ONCE(1, "alternative instructions length is not divisible by 8, skipping patching\n"); + continue; + } + + for_each_node_has_dup_kernel(node) { + unsigned long instr_phys; + u8 *instr_va; + + instr_phys = node_kernel_address_to_phys(node, + (unsigned long) instr); + if (IS_ERR_VALUE(instr_phys)) { + WARN_ONCE(1, "could not apply alternative instruction on node %d, skipping patching\n", node); + continue; + } + + instr_va = (u8 *) __va(instr_phys); + memcpy(instr_va, replacement, a->replacementlen); + + if (a->instrlen > a->replacementlen) + add_padding(instr_va + a->replacementlen, + a->instrlen - a->replacementlen, instr); + + /* Modules are not duplicated */ + if (!is_duplicated_code((unsigned long) instr)) + break; + } + } +} + +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; +void __init apply_alternative_instructions(void) +{ + apply_alternatives(__alt_instructions, __alt_instructions_end); +} diff --git a/arch/e2k/kernel/asm-offsets.c b/arch/e2k/kernel/asm-offsets.c new file mode 100644 index 000000000000..257078f62d45 --- /dev/null +++ b/arch/e2k/kernel/asm-offsets.c @@ -0,0 +1,238 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#define ASM_OFFSETS_C 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_VIRTUALIZATION +#include +#endif /* CONFIG_VIRTUALIZATION */ + +void common(void) { + +OFFSET(TSK_TI_FLAGS, task_struct, thread_info.flags); +OFFSET(TSK_U_STACK_TOP, task_struct, thread_info.u_stack.top); +OFFSET(TSK_K_USD_LO, task_struct, thread_info.k_usd_lo); +OFFSET(TSK_K_USD_HI, task_struct, thread_info.k_usd_hi); +OFFSET(TSK_IRQ_ENTER_CLK, task_struct, thread_info.irq_enter_clk); +OFFSET(TSK_UPSR, task_struct, thread_info.upsr); + +OFFSET(TI_FLAGS, thread_info, flags); +OFFSET(TI_K_USD_LO, thread_info, k_usd_lo); +OFFSET(TI_K_USD_HI, thread_info, k_usd_hi); +OFFSET(TI_K_PSP_LO, thread_info, k_psp_lo); +OFFSET(TI_K_PSP_HI, thread_info, k_psp_hi); +OFFSET(TI_K_PCSP_LO, thread_info, k_pcsp_lo); +OFFSET(TI_K_PCSP_HI, thread_info, k_pcsp_hi); + +OFFSET(TSK_TI_TMP_U_PSP_LO, task_struct, thread_info.tmp_user_stacks.psp_lo); +OFFSET(TSK_TI_TMP_U_PSP_HI, task_struct, thread_info.tmp_user_stacks.psp_hi); +OFFSET(TSK_TI_TMP_U_PCSP_LO, task_struct, thread_info.tmp_user_stacks.pcsp_lo); +OFFSET(TSK_TI_TMP_U_PCSP_HI, task_struct, thread_info.tmp_user_stacks.pcsp_hi); +OFFSET(TSK_TI_TMP_U_PSHTP, task_struct, thread_info.tmp_user_stacks.pshtp); +OFFSET(TSK_TI_TMP_U_PCSHTP, task_struct, thread_info.tmp_user_stacks.pcshtp); + +OFFSET(TSK_TI_G_VCPU_STATE, task_struct, + thread_info.k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_G_TASK, task_struct, + thread_info.k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_G_MY_CPU_OFFSET, task_struct, + thread_info.k_gregs.g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_G_CPU_ID_PREEMPT, task_struct, + thread_info.k_gregs.g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_G_VCPU_STATE_EXT, task_struct, + thread_info.k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].ext); +OFFSET(TSK_TI_G_TASK_EXT, task_struct, + thread_info.k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].ext); +OFFSET(TSK_TI_G_MY_CPU_OFFSET_EXT, task_struct, + thread_info.k_gregs.g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].ext); +OFFSET(TSK_TI_G_CPU_ID_PREEMPT_EXT, task_struct, + thread_info.k_gregs.g[SMP_CPU_ID_GREGS_PAIRS_INDEX].ext); + +OFFSET(TSK_TI_TMP_G_VCPU_STATE, task_struct, + thread_info.tmp_k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_TMP_G_TASK, task_struct, + thread_info.tmp_k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_TMP_G_MY_CPU_OFFSET, task_struct, + thread_info.tmp_k_gregs.g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_TMP_G_CPU_ID_PREEMPT, task_struct, + thread_info.tmp_k_gregs.g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base); +OFFSET(TSK_TI_TMP_G_VCPU_STATE_EXT, task_struct, + thread_info.tmp_k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].ext); +OFFSET(TSK_TI_TMP_G_TASK_EXT, task_struct, + thread_info.tmp_k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].ext); +OFFSET(TSK_TI_TMP_G_MY_CPU_OFFSET_EXT, task_struct, + thread_info.tmp_k_gregs.g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].ext); +OFFSET(TSK_TI_TMP_G_CPU_ID_PREEMPT_EXT, task_struct, + thread_info.tmp_k_gregs.g[SMP_CPU_ID_GREGS_PAIRS_INDEX].ext); +#ifdef CONFIG_VIRTUALIZATION +OFFSET(TI_VCPU, thread_info, vcpu); +#endif /* CONFIG_VIRTUALIZATION */ +OFFSET(TI_KERNEL_GREGS, thread_info, k_gregs.g); +OFFSET(TI_G_VCPU_STATE, thread_info, + k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base); +OFFSET(TI_G_TASK, thread_info, k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].base); +OFFSET(TI_G_MY_CPU_OFFSET, thread_info, + k_gregs.g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base); +OFFSET(TI_G_CPU_ID_PREEMPT, thread_info, k_gregs.g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base); +OFFSET(TI_G_VCPU_STATE_EXT, thread_info, + k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].ext); +OFFSET(TI_G_TASK_EXT, thread_info, + k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].ext); +OFFSET(TI_G_MY_CPU_OFFSET_EXT, thread_info, + k_gregs.g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].ext); +OFFSET(TI_G_CPU_ID_PREEMPT_EXT, thread_info, + k_gregs.g[SMP_CPU_ID_GREGS_PAIRS_INDEX].ext); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +OFFSET(TSK_CURR_RET_STACK, task_struct, curr_ret_stack); +#endif +OFFSET(TSK_PTRACE, task_struct, ptrace); +OFFSET(TSK_THREAD, task_struct, thread); +OFFSET(TSK_STACK, task_struct, stack); +OFFSET(TSK_FLAGS, task_struct, flags); +OFFSET(TSK_THREAD_FLAGS, task_struct, thread.flags); +OFFSET(TSK_TI, task_struct, thread_info); + +OFFSET(TT_FLAGS, thread_struct, flags); + +#ifdef CONFIG_CLW_ENABLE +OFFSET(PT_US_CL_M0, pt_regs, us_cl_m[0]); +OFFSET(PT_US_CL_M1, pt_regs, us_cl_m[1]); +OFFSET(PT_US_CL_M2, pt_regs, us_cl_m[2]); +OFFSET(PT_US_CL_M3, pt_regs, us_cl_m[3]); +OFFSET(PT_US_CL_UP, pt_regs, us_cl_up); +OFFSET(PT_US_CL_B, pt_regs, us_cl_b); +#endif + +#ifdef CONFIG_VIRTUALIZATION +OFFSET(TI_KERNEL_IMAGE_PGD_P, thread_info, kernel_image_pgd_p); +OFFSET(TI_KERNEL_IMAGE_PGD, thread_info, kernel_image_pgd); +OFFSET(TI_SHADOW_IMAGE_PGD, thread_info, shadow_image_pgd); +OFFSET(TI_GTHREAD_INFO, thread_info, gthread_info); +OFFSET(TI_VCPU, thread_info, vcpu); +OFFSET(TI_HOST_GREGS_TO_VIRT, thread_info, h_gregs.g); + +OFFSET(GLOB_REG_BASE, e2k_greg, base); +OFFSET(GLOB_REG_EXT, e2k_greg, ext); +DEFINE(GLOB_REG_SIZE, sizeof(struct e2k_greg)); + +OFFSET(VCPU_ARCH_VCPU_STATE, kvm_vcpu, arch.vcpu_state); +OFFSET(VCPU_STATE_CPU_REGS, kvm_vcpu_state, cpu.regs); +OFFSET(VCPU_ARCH_CTXT_SBR, kvm_vcpu, arch.sw_ctxt.sbr); +OFFSET(VCPU_ARCH_CTXT_USD_HI, kvm_vcpu, arch.sw_ctxt.usd_hi); +OFFSET(VCPU_ARCH_CTXT_USD_LO, kvm_vcpu, arch.sw_ctxt.usd_lo); +OFFSET(VCPU_ARCH_CTXT_SAVED_VALID, kvm_vcpu, arch.sw_ctxt.saved.valid); +OFFSET(VCPU_ARCH_CTXT_SAVED_SBR, kvm_vcpu, arch.sw_ctxt.saved.sbr); +OFFSET(VCPU_ARCH_CTXT_SAVED_USD_HI, kvm_vcpu, arch.sw_ctxt.saved.usd_hi); +OFFSET(VCPU_ARCH_CTXT_SAVED_USD_LO, kvm_vcpu, arch.sw_ctxt.saved.usd_lo); + +#ifdef CONFIG_CLW_ENABLE +OFFSET(VCPU_ARCH_CTXT_US_CL_D, kvm_vcpu, arch.sw_ctxt.us_cl_d); +OFFSET(VCPU_ARCH_CTXT_US_CL_B, kvm_vcpu, arch.sw_ctxt.us_cl_b); +OFFSET(VCPU_ARCH_CTXT_US_CL_UP, kvm_vcpu, arch.sw_ctxt.us_cl_up); +OFFSET(VCPU_ARCH_CTXT_US_CL_M0, kvm_vcpu, arch.sw_ctxt.us_cl_m0); +OFFSET(VCPU_ARCH_CTXT_US_CL_M1, kvm_vcpu, arch.sw_ctxt.us_cl_m1); +OFFSET(VCPU_ARCH_CTXT_US_CL_M2, kvm_vcpu, arch.sw_ctxt.us_cl_m2); +OFFSET(VCPU_ARCH_CTXT_US_CL_M3, kvm_vcpu, arch.sw_ctxt.us_cl_m3); +#endif +#endif /* CONFIG_VIRTUALIZATION */ + +OFFSET(PT_TRAP, pt_regs, trap); +OFFSET(PT_CTRP1, pt_regs, ctpr1); +OFFSET(PT_CTRP2, pt_regs, ctpr2); +OFFSET(PT_CTRP3, pt_regs, ctpr3); +OFFSET(PT_CTPR1_HI, pt_regs, ctpr1_hi); +OFFSET(PT_CTPR2_HI, pt_regs, ctpr2_hi); +OFFSET(PT_CTPR3_HI, pt_regs, ctpr3_hi); +OFFSET(PT_LSR, pt_regs, lsr); +OFFSET(PT_ILCR, pt_regs, ilcr); +OFFSET(PT_LSR1, pt_regs, lsr1); +OFFSET(PT_ILCR1, pt_regs, ilcr1); +OFFSET(PT_STACK, pt_regs, stacks); +OFFSET(PT_SYS_NUM, pt_regs, sys_num); +OFFSET(PT_KERNEL_ENTRY, pt_regs, kernel_entry); +OFFSET(PT_ARG_5, pt_regs, args[5]); +OFFSET(PT_ARG_6, pt_regs, args[6]); +OFFSET(PT_ARG_7, pt_regs, args[7]); +OFFSET(PT_ARG_8, pt_regs, args[8]); +OFFSET(PT_ARG_9, pt_regs, args[9]); +OFFSET(PT_ARG_10, pt_regs, args[10]); +OFFSET(PT_ARG_11, pt_regs, args[11]); +OFFSET(PT_ARG_12, pt_regs, args[12]); + +OFFSET(ST_USD_HI, e2k_stacks, usd_hi); +OFFSET(ST_USD_LO, e2k_stacks, usd_lo); +OFFSET(ST_TOP, e2k_stacks, top); +OFFSET(PT_NEXT, pt_regs, next); +BLANK(); + +#ifdef CONFIG_VIRTUALIZATION +OFFSET(PT_G_STACK, pt_regs, g_stacks); +OFFSET(G_ST_TOP, e2k_stacks, top); +OFFSET(G_ST_SBR, e2k_stacks, top); +#endif /* CONFIG_VIRTUALIZATION */ + +/*DEFINE(NATIVE_TASK_SIZE, NATIVE_TASK_SIZE);*/ /* defined at asm/pv_info.h */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_VIRTUALIZATION) +/*DEFINE(TASK_SIZE, NATIVE_TASK_SIZE);*/ +#else /* CONFIG_PARAVIRT_GUEST || CONFIG_VIRTUALIZATION */ +/*DEFINE(HOST_TASK_SIZE, HOST_TASK_SIZE);*/ /* defined at asm/pv_info.h */ +/*DEFINE(GUEST_TASK_SIZE, GUEST_TASK_SIZE);*/ /* defined at asm/pv_info.h */ +#if !defined(CONFIG_PARAVIRT_GUEST) +#ifndef CONFIG_KVM_GUEST_KERNEL +/*DEFINE(TASK_SIZE, NATIVE_TASK_SIZE);*/ +#else /* only guest kernel */ +/*DEFINE(TASK_SIZE, GUEST_TASK_SIZE);*/ +#endif /* ! CONFIG_GUEST_KERNEL */ +#else /* CONFIG_PARAVIRT_GUEST */ +/*DEFINE(TASK_SIZE, TASK_SIZE);*/ +#endif /* ! CONFIG_PARAVIRT_GUEST */ +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_VIRTUALIZATION */ + +DEFINE(PTRACE_SZOF, sizeof (struct pt_regs)); +DEFINE(TRAP_PTREGS_SZOF, sizeof(struct trap_pt_regs)); +#ifdef CONFIG_USE_AAU +DEFINE(AAU_SZOF, sizeof(e2k_aau_t)); +#endif +DEFINE(PT_PTRACED, PT_PTRACED); +DEFINE(E2K_FLAG_32BIT, E2K_FLAG_32BIT); +DEFINE(MAX_NR_CPUS, NR_CPUS); +DEFINE(E2K_KERNEL_UPSR_ENABLED, E2K_KERNEL_UPSR_ENABLED_ASM); +DEFINE(E2K_KERNEL_UPSR_DISABLED_ALL, E2K_KERNEL_UPSR_DISABLED_ALL_ASM); +DEFINE(E2K_LSR_VLC, E2K_LSR_VLC); +DEFINE(KERNEL_C_STACK_OFFSET, KERNEL_C_STACK_OFFSET); +DEFINE(KERNEL_C_STACK_SIZE, KERNEL_C_STACK_SIZE); +DEFINE(KERNEL_P_STACK_SIZE, KERNEL_P_STACK_SIZE); +DEFINE(KERNEL_PC_STACK_SIZE, KERNEL_PC_STACK_SIZE); +DEFINE(KERNEL_STACKS_SIZE, KERNEL_STACKS_SIZE); +DEFINE(CPU_HWBUG_USD_ALIGNMENT, CPU_HWBUG_USD_ALIGNMENT); +DEFINE(CPU_FEAT_TRAP_V5, CPU_FEAT_TRAP_V5); +DEFINE(CPU_FEAT_TRAP_V6, CPU_FEAT_TRAP_V6); +DEFINE(CPU_FEAT_QPREG, CPU_FEAT_QPREG); +DEFINE(USER_ADDR_MAX, USER_ADDR_MAX); + +DEFINE(KERNEL_CUT_BYTE_SIZE, sizeof (kernel_CUT)); +DEFINE(TSK_TI_STACK_DELTA, offsetof(struct task_struct, stack) - + offsetof(struct task_struct, thread_info)); +#ifdef CONFIG_SMP +DEFINE(TSK_TI_CPU_DELTA, offsetof(struct task_struct, cpu) - + offsetof(struct task_struct, thread_info)); +#endif +#ifdef CONFIG_PREEMPT_LAZY +OFFSET(TASK_TI_flags, task_struct, thread_info.flags); +OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); +#endif + +} diff --git a/arch/e2k/kernel/backtrace.c b/arch/e2k/kernel/backtrace.c new file mode 100644 index 000000000000..1fb6fef6c454 --- /dev/null +++ b/arch/e2k/kernel/backtrace.c @@ -0,0 +1,282 @@ +#include +#include +#include +#include +#include + +/* Does nothing, just return */ +extern void sys_backtrace_return(void); + +static int is_privileged_return(u64 ip) +{ + return ip == (u64) &sys_backtrace_return; +} + +struct get_backtrace_args { + int skip; + int nr_read; + int count; + int step; + void __user *buf; +}; + +static int get_backtrace_fn(e2k_mem_crs_t *frame, unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct get_backtrace_args *args = (struct get_backtrace_args *) arg; + void __user *buf = args->buf; + int step = args->step; + u64 ip; + + if (args->nr_read >= args->count) + return 1; + + ip = AS(frame->cr0_hi).ip << 3; + + /* Skip kernel frames */ + if (!is_privileged_return(ip) && ip >= TASK_SIZE) + return 0; + + if (args->skip) { + --(args->skip); + return 0; + } + + if (!is_privileged_return(ip) && !access_ok(ip, 8)) + return -EFAULT; + + /* Special case of "just return" function */ + if (is_privileged_return(ip)) + ip = -1ULL; + + if ((step == 8) ? __put_user(ip, (u64 *) buf) : + __put_user(ip, (u32 *) buf)) + return -EFAULT; + + args->buf += step; + ++(args->nr_read); + + return 0; +} + +static long do_get_backtrace(void __user *buf, size_t count, size_t skip, + unsigned long flags, int step) +{ + long ret; + struct get_backtrace_args args; + + args.buf = buf; + args.step = step; + args.count = count; + args.nr_read = 0; + args.skip = skip + 1; /* Skip caller's frame */ + + if (flags) + return -EINVAL; + + if (!access_ok(buf, count * step)) + return -EFAULT; + + ret = parse_chain_stack(PCS_USER, NULL, get_backtrace_fn, &args); + + if (args.nr_read) + ret = args.nr_read; + + return ret; +} + +SYSCALL_DEFINE4(get_backtrace, unsigned long *__user, buf, + size_t, count, size_t, skip, unsigned long, flags) +{ + return do_get_backtrace(buf, count, skip, flags, 8); +} + +asmlinkage long compat_sys_get_backtrace(unsigned int *__user buf, + size_t count, size_t skip, unsigned long flags) +{ + return do_get_backtrace(buf, count, skip, flags, 4); +} + +struct set_backtrace_args { + int skip; + int nr_written; + int count; + int step; + void __user *buf; + struct vm_area_struct *vma; + struct vm_area_struct *pvma; +}; + +static int set_backtrace_fn(e2k_mem_crs_t *frame, unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct set_backtrace_args *args = (struct set_backtrace_args *) arg; + void __user *buf = args->buf; + int step = args->step; + struct vm_area_struct *vma = args->vma; + struct vm_area_struct *pvma = args->pvma; + u64 prev_ip, ip; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + int ret; + + if (args->nr_written >= args->count) + return 1; + + prev_ip = AS(frame->cr0_hi).ip << 3; + + /* Skip kernel frames */ + if (!is_privileged_return(prev_ip) && prev_ip >= TASK_SIZE) + return 0; + + if (args->skip) { + --(args->skip); + return 0; + } + + if ((step == 8) ? __get_user(ip, (u64 *) buf) : + __get_user(ip, (u32 *) buf)) + return -EFAULT; + + /* Special case of "just return" function */ + if (step == 8 && ip == -1ULL || step != 8 && ip == 0xffffffffULL) + ip = (u64) &sys_backtrace_return; + + if (!is_privileged_return(prev_ip) && (!pvma || + pvma->vm_start > prev_ip || pvma->vm_end <= prev_ip)) { + pvma = find_vma(current->mm, prev_ip); + if (!pvma || prev_ip < pvma->vm_start) + return -ESRCH; + args->pvma = pvma; + } + + if (!is_privileged_return(ip)) { + if (!access_ok(ip, 8)) + return -EFAULT; + + if (!vma || vma->vm_start > ip || vma->vm_end <= ip) { + if (ip >= pvma->vm_start && ip < pvma->vm_end) { + vma = pvma; + } else { + vma = find_vma(current->mm, ip); + if (!vma || ip < vma->vm_start) + return -ESRCH; + } + args->vma = vma; + } + + /* Forbid changing of special return value into normal + * one - to avoid cases when user changes to special and + * back to normal function to avoid security checks. */ + if (is_privileged_return(prev_ip)) + return -EPERM; + + /* Check that the permissions are the same - i.e. if + * the original was not writable then the new instruction + * is not writable too. */ + if ((pvma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)) ^ + (vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC))) + return -EPERM; + + /* Check that the exception handling code + * resides in the same executable. */ + if (pvma->vm_file != vma->vm_file) + return -EPERM; + } + + cr0_hi = frame->cr0_hi; + cr1_lo = frame->cr1_lo; + + if (is_privileged_return(ip)) { + AS(cr1_lo).pm = 1; + if (machine.native_iset_ver < E2K_ISET_V6) + AS(cr1_lo).ic = 0; + else + AS(cr1_lo).cui = KERNEL_CODES_INDEX; + } else { + AS(cr1_lo).pm = 0; + if (machine.native_iset_ver < E2K_ISET_V6) { + AS(cr1_lo).ic = 1; + } else { + int cui = find_cui_by_ip(ip); + + if (cui < 0) + return cui; + + AS(cr1_lo).cui = cui; + } + } + AS(cr0_hi).ip = ip >> 3; + + if (flags & PCF_FLUSH_NEEDED) + E2K_FLUSHC; + if (is_privileged_return(ip)) + ret = put_cr0_hi(cr0_hi, real_frame_addr, 0); + else + ret = put_cr1_lo(cr1_lo, real_frame_addr, 0); + if (ret) + return ret; + + if (flags & PCF_FLUSH_NEEDED) + E2K_FLUSHC; + if (is_privileged_return(ip)) + ret = put_cr1_lo(cr1_lo, real_frame_addr, 0); + else + ret = put_cr0_hi(cr0_hi, real_frame_addr, 0); + if (ret) { + /* Stack is not consistent anymore */ + force_sig(SIGBUS); + return ret; + } + + args->buf += step; + ++(args->nr_written); + + return 0; +} + +static long do_set_backtrace(void *__user buf, size_t count, size_t skip, + unsigned long flags, int step) +{ + struct mm_struct *mm = current->mm; + struct set_backtrace_args args; + long ret; + + if (flags) + return -EINVAL; + + if (!access_ok(buf, count * step)) + return -EFAULT; + + down_read(&mm->mmap_sem); + + args.skip = skip + 1; /* Skip caller's frame */ + args.nr_written = 0; + args.count = count; + args.step = step; + args.buf = buf; + args.vma = NULL; + args.pvma = NULL; + ret = parse_chain_stack(PCS_USER, NULL, set_backtrace_fn, &args); + + up_read(&mm->mmap_sem); + + if (args.nr_written) + ret = args.nr_written; + + return ret; +} + + +SYSCALL_DEFINE4(set_backtrace, unsigned long *__user, buf, + size_t, count, size_t, skip, unsigned long, flags) +{ + return do_set_backtrace(buf, count, skip, flags, 8); +} + +asmlinkage long compat_sys_set_backtrace(unsigned int *__user buf, + size_t count, size_t skip, unsigned long flags) +{ + return do_set_backtrace(buf, count, skip, flags, 4); +} + diff --git a/arch/e2k/kernel/clear_rf.S b/arch/e2k/kernel/clear_rf.S new file mode 100644 index 000000000000..fd3031ce673b --- /dev/null +++ b/arch/e2k/kernel/clear_rf.S @@ -0,0 +1,491 @@ +#include + +.global clear_rf_6 +.type clear_rf_6,@function +clear_rf_6: + { + nop 5 + return %ctpr3 + setwd wsz=6 + addd 0, 0, %dr0 + addd 0, 0, %dr1 + addd 0, 0, %dr2 + addd 0, 0, %dr3 + addd 0, 0, %dr4 + addd 0, 0, %dr5 + } + { + addd 0, 0, %dr6 + addd 0, 0, %dr7 + addd 0, 0, %dr8 + addd 0, 0, %dr9 + addd 0, 0, %dr10 + addd 0, 0, %dr11 + ct %ctpr3 + } +.size clear_rf_6, .-clear_rf_6 + +.global clear_rf_9 +.type clear_rf_9,@function +clear_rf_9: + { + nop 4 + return %ctpr3 + setwd wsz=9 + addd 0, 0, %dr0 + addd 0, 0, %dr1 + addd 0, 0, %dr2 + addd 0, 0, %dr3 + addd 0, 0, %dr4 + addd 0, 0, %dr5 + } + { + addd 0, 0, %dr6 + addd 0, 0, %dr7 + addd 0, 0, %dr8 + addd 0, 0, %dr9 + addd 0, 0, %dr10 + addd 0, 0, %dr11 + } + { + addd 0, 0, %dr12 + addd 0, 0, %dr13 + addd 0, 0, %dr14 + addd 0, 0, %dr15 + addd 0, 0, %dr16 + addd 0, 0, %dr17 + ct %ctpr3 + } +.size clear_rf_9, .-clear_rf_9 + +.global clear_rf_18 +.type clear_rf_18,@function +clear_rf_18: + { + nop 2 + disp %ctpr1, clear_rf_18_loop + setwd wsz=18 + setbn rbs=0, rsz=17, rcur=0 + rwd 6UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_18_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[12] + addd 0, 0, %db[13] + addd 0, 0, %db[24] + addd 0, 0, %db[25] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_18, .-clear_rf_18 + +.global clear_rf_21 +.type clear_rf_21,@function +clear_rf_21: + { + nop 2 + disp %ctpr1, clear_rf_21_loop + setwd wsz=21 + setbn rbs=0, rsz=20, rcur=0 + rwd 7UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_21_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[18] + addd 0, 0, %db[19] + addd 0, 0, %db[36] + addd 0, 0, %db[37] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_21, .-clear_rf_21 + +.global clear_rf_24 +.type clear_rf_24,@function +clear_rf_24: + { + nop 2 + disp %ctpr1, clear_rf_24_loop + setwd wsz=24 + setbn rbs=0, rsz=23, rcur=0 + rwd 8UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_24_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[18] + addd 0, 0, %db[19] + addd 0, 0, %db[36] + addd 0, 0, %db[37] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_24, .-clear_rf_24 + +.global clear_rf_27 +.type clear_rf_27,@function +clear_rf_27: + { + nop 2 + disp %ctpr1, clear_rf_27_loop + setwd wsz=27 + setbn rbs=0, rsz=26, rcur=0 + rwd 9UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_27_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[18] + addd 0, 0, %db[19] + addd 0, 0, %db[36] + addd 0, 0, %db[37] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_27, .-clear_rf_27 + +.global clear_rf_36 +.type clear_rf_36,@function +clear_rf_36: + { + nop 2 + disp %ctpr1, clear_rf_36_loop + setwd wsz=36 + setbn rbs=0, rsz=35, rcur=0 + rwd 12UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_36_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[24] + addd 0, 0, %db[25] + addd 0, 0, %db[48] + addd 0, 0, %db[49] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_36, .-clear_rf_36 + +.global clear_rf_45 +.type clear_rf_45,@function +clear_rf_45: + { + nop 2 + disp %ctpr1, clear_rf_45_loop + setwd wsz=45 + setbn rbs=0, rsz=44, rcur=0 + rwd 15UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_45_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[30] + addd 0, 0, %db[31] + addd 0, 0, %db[60] + addd 0, 0, %db[61] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_45, .-clear_rf_45 + +.global clear_rf_54 +.type clear_rf_54,@function +clear_rf_54: + { + nop 2 + disp %ctpr1, clear_rf_54_loop + setwd wsz=54 + setbn rbs=0, rsz=53, rcur=0 + rwd 18UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_54_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[36] + addd 0, 0, %db[37] + addd 0, 0, %db[72] + addd 0, 0, %db[73] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_54, .-clear_rf_54 + +.global clear_rf_63 +.type clear_rf_63,@function +clear_rf_63: + { + nop 2 + disp %ctpr1, clear_rf_63_loop + setwd wsz=63 + setbn rbs=0, rsz=62, rcur=0 + rwd 21UL | E2K_LSR_VLC, %lsr + } + { + return %ctpr3 + } +clear_rf_63_loop: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[42] + addd 0, 0, %db[43] + addd 0, 0, %db[84] + addd 0, 0, %db[85] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_63, .-clear_rf_63 + +.global clear_rf_78 +.type clear_rf_78,@function +clear_rf_78: + { + nop 1 + disp %ctpr1, clear_rf_78_loop_1 + setwd wsz=78 + setbn rbs=0, rsz=62, rcur=0 + rwd 21UL | E2K_LSR_VLC, %lsr + } + { + disp %ctpr2, clear_rf_78_loop_2 + } + { + return %ctpr3 + } +clear_rf_78_loop_1: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[42] + addd 0, 0, %db[43] + addd 0, 0, %db[84] + addd 0, 0, %db[85] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + { + nop 3 + setbn rbs=63, rsz=14, rcur=0 + rwd 5UL | E2K_LSR_VLC, %lsr + } +clear_rf_78_loop_2: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[10] + addd 0, 0, %db[11] + addd 0, 0, %db[20] + addd 0, 0, %db[21] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr2 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_78, .-clear_rf_78 + +.global clear_rf_90 +.type clear_rf_90,@function +clear_rf_90: + { + nop 1 + disp %ctpr1, clear_rf_90_loop_1 + setwd wsz=90 + setbn rbs=0, rsz=62, rcur=0 + rwd 21UL | E2K_LSR_VLC, %lsr + } + { + disp %ctpr2, clear_rf_90_loop_2 + } + { + return %ctpr3 + } +clear_rf_90_loop_1: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[42] + addd 0, 0, %db[43] + addd 0, 0, %db[84] + addd 0, 0, %db[85] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + { + nop 3 + setbn rbs=63, rsz=26, rcur=0 + rwd 9UL | E2K_LSR_VLC, %lsr + } +clear_rf_90_loop_2: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[18] + addd 0, 0, %db[19] + addd 0, 0, %db[36] + addd 0, 0, %db[37] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr2 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_90, .-clear_rf_90 + +.global clear_rf_99 +.type clear_rf_99,@function +clear_rf_99: + { + nop 1 + disp %ctpr1, clear_rf_90_loop_1 + setwd wsz=90 + setbn rbs=0, rsz=62, rcur=0 + rwd 21UL | E2K_LSR_VLC, %lsr + } + { + disp %ctpr2, clear_rf_90_loop_2 + } + { + return %ctpr3 + } +clear_rf_99_loop_1: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[42] + addd 0, 0, %db[43] + addd 0, 0, %db[84] + addd 0, 0, %db[85] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + { + nop 3 + setbn rbs=63, rsz=35, rcur=0 + rwd 12UL | E2K_LSR_VLC, %lsr + } +clear_rf_99_loop_2: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[18] + addd 0, 0, %db[19] + addd 0, 0, %db[36] + addd 0, 0, %db[37] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr2 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_99, .-clear_rf_99 + +.global clear_rf_108 +.type clear_rf_108,@function +clear_rf_108: + { + nop 1 + disp %ctpr1, clear_rf_108_loop_1 + setwd wsz=108 + setbn rbs=0, rsz=62, rcur=0 + rwd 21UL | E2K_LSR_VLC, %lsr + } + { + disp %ctpr2, clear_rf_108_loop_2 + } + { + return %ctpr3 + } +clear_rf_108_loop_1: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[42] + addd 0, 0, %db[43] + addd 0, 0, %db[84] + addd 0, 0, %db[85] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr1 ? %NOT_LOOP_END + } + { + nop 3 + setbn rbs=63, rsz=44, rcur=0 + rwd 15UL | E2K_LSR_VLC, %lsr + } +clear_rf_108_loop_2: + { + loop_mode + addd 0, 0, %db[0] + addd 0, 0, %db[1] + addd 0, 0, %db[30] + addd 0, 0, %db[31] + addd 0, 0, %db[60] + addd 0, 0, %db[61] + alc alcf = 1, alct = 1 + abn abnf = 1, abnt = 1 + ct %ctpr2 ? %NOT_LOOP_END + } + ct %ctpr3 +.size clear_rf_108, .-clear_rf_108 diff --git a/arch/e2k/kernel/convert_array.c b/arch/e2k/kernel/convert_array.c new file mode 100644 index 000000000000..f069ef7e19e3 --- /dev/null +++ b/arch/e2k/kernel/convert_array.c @@ -0,0 +1,851 @@ +/* linux/arch/e2k/kernel/convert_array.c, v 1.0 02/11/2019. + * + * This is a utility to support interactions between kernel and + * E2K protected mode user layer. + * + * The function 'convert_array' converts complex protected area structures, + * which can contain protected user pointers to memory (descriptors), + * and/or function pointers (descriptors), into regular C-structures + * (non-protected). + * + * Copyright (C) 2019 MCST + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PROTECTED_MODE + +#if (DYNAMIC_DEBUG_SYSCALLP_ENABLED) + /* NB> PM debug module must have been initialized by + * the moment of 'convert_array' invocation; + * we can use simple defines over here. + * For full ones see . + */ +#undef DbgSCP +#if defined(CONFIG_THREAD_INFO_IN_TASK) && defined(CONFIG_SMP) +#define DbgSCP(fmt, ...) \ +do { \ + if (pm_sc_debug_mode & PM_SC_DBG_MODE_CONV_STRUCT) \ + pr_info("%s [%.3d#%d]: %s: " fmt, current->comm, \ + current->cpu, current->pid, \ + __func__, ##__VA_ARGS__); \ +} while (0) +#else /* no 'cpu' field in 'struct task_struct' */ +#define DbgSCP(fmt, ...) \ +do { \ + if (pm_sc_debug_mode & PM_SC_DBG_MODE_CONV_STRUCT) \ + pr_info("%s [#%d]: %s: " fmt, current->comm, \ + current->pid, \ + __func__, ##__VA_ARGS__); \ +} while (0) +#endif /* no 'cpu' field in 'struct task_struct' */ + +#undef DbgSCP_ERRMSG +#define DbgSCP_ERRMSG(ErrMsgHeader, fmt, ...) \ +do { \ + if (pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_err("%s [%d]:: %s: " fmt, current->comm, current->pid, \ + ErrMsgHeader, ##__VA_ARGS__); \ +} while (0) + +#undef DbgSCP_ERR +#define DbgSCP_ERR(fmt, ...) DbgSCP_ERRMSG(__func__, fmt, ##__VA_ARGS__) + +#undef DbgSCP_ALERT +#define DbgSCP_ALERT(fmt, ...) \ +do { \ + if (pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_alert("%s [%d]:: %s(): " fmt, current->comm, current->pid, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#undef DbgSCP_WARN +#define DbgSCP_WARN(fmt, ...) \ +do { \ + if (pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_alert("%s [%d]:: %s(): " fmt, current->comm, current->pid, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#undef PM_SYSCALL_WARN_ONLY +#define PM_SYSCALL_WARN_ONLY \ + (pm_sc_debug_mode & PM_SC_DBG_MODE_WARN_ONLY) + +#endif /* DYNAMIC_DEBUG_SYSCALLP_ENABLED */ + +#define CONVERT_WARN_ONLY PM_SYSCALL_WARN_ONLY + /* Backward compatibility execution mode. + * When legacy soft delivers to convert_array data to process, + * it may contain data that don't match specified masks. + * This is mainly the case when descriptor expected by + * mask specified is not found in the array structure. + * Normally convert_array reports error to journal and exits + * with corresponding error code. If this var is set, + * convert_array still reports error message but leaves input + * data intact, and returns as if everything were OK (i.e. 0). + */ + +/* Aligning pointer to the upper bound: */ + +static inline +int *align_ptr_up(int *ptr, const int alignment) +{ + if (((unsigned long) ptr) % alignment) + ptr = (int *) (((unsigned long) ptr + + alignment - 1) & ~(alignment - 1)); + + return ptr; +} + +/* TODO: Get rid of PUT_USER_OR_KERNEL when entry #10 is gone and + * Replace 'PUT_USER_OR_KERNEL' with 'put_user' + */ +#define PUT_USER_OR_KERNEL(_mode, ptr, val) \ +do { \ + if (_mode) { \ + if (put_user(val, ptr)) { \ + failed_2_write = 1; \ + goto out; \ + } \ + } else { \ + *ptr = val; \ + } \ +} while (0) + +/* + * This function converts the array of structures, which can contain + * protected user pointers to memory, function descriptors, and int values. + * prot_array - pointer to original (user-type) array + * new_array - pointer for putting of converted array + * max_prot_array_size - the maximum size, which protected array should take + * fields - number of enries in each element + * items - number of identical elements in the array to convert + * mask_type - mask for encoding of field type in each element + * 4 bits per each entry: + * --- 0000 (0x0) - int + * --- 0001 (0x1) - long + * --- 0010 (0x2) - pointer to function + * --- 0011 (0x3) - descriptor (pointer to memory) + * --- 0100 (0x4) - descriptor or int + * --- 0101 (0x5) - descriptor or long + * --- 0110 (0x6) - descriptor or Fptr + * --- 0111 (0x7) - everything is possible + * --- 1*** (0x8) - may be uninitialized (empty tag allowed) + * mask_align - mask for encoding of alignment of the NEXT! field + * 4 bits per each entry: + * --- 00 (0x0) - next field aligned as int (to 4 bytes) + * --- 01 (0x1) - next field aligned as long (to 8 bytes) + * --- 10 (0x2) - not used yet + * --- 11 (0x3) - next field aligned as pointer (to 16 bytes) + * mask_rw - mask for encoding access type of the structure elements + * 4 bits per each entry: + * --- 01 (0x1) - the field's content gets read by syscall (READ-able) + * --- 10 (0x2) - the field's content gets updated by syscall (WRITE-able) + * --- 11 (0x3) - the field is both READ-able and WRITE-able + * --- 00 (0x0) - default type; the same as (READ-able) + * rval_mode - error (return value) reporting mode mask: + * 0 - report only critical problems in prot_array structure; + * 1 - return with -EFAULT if wrong tag in 'int' field; + * 2 - --'-- --'-- 'long' field; + * 4 - --'-- --'-- 'func' field; + * 8 - --'-- --'-- 'descr' field; + * 16 - ignore errors in 'int' field; + * 32 - --'-- --'-- 'long' field; + * 64 - --'-- --'-- 'func' field; + * 128 - --'-- --'-- 'descr' field. + * Returns: 0 - if converted OK; + * error number - otherwise. + */ + +extern int get_pm_struct(long __user *prot_array, + long *new_array, + const int max_prot_array_size, const int fields, + const int items, const long mask_type, + const long mask_align, const long mask_rw, + const int rval_mode) +{ +/* Field type, 4 bits: (mask_type & 0xf) */ +#define _INT_FIELD 0x0 /* int value */ +#define _LONG_FIELD 0x1 /* long value */ +#define _FUNC_FIELD 0x2 /* pointer to function */ +#define _PTR_FIELD 0x3 /* pointer to memory */ +#define _INT_PTR_FIELD 0x4 /* int or pointer value */ +#define _LONG_PTR_FIELD 0x5 /* long or pointer value */ +#define _PTR__FUNC_FIELD 0x6 /* descriptor or func.ptr */ +#define _TAG_DEFINED_FIELD 0x7 /* everything is possible */ +#define _UNINITIALIZED_FIELD 0x8 /* tag may be ETAGEWS or ETAGEWD */ + +/* Alignment of the NEXT! field, 2 bits: mask_align & 0x3 */ +#define _INT_ALIGN 0x0 /* next field aligned as int (to 4 bytes) */ +#define _LONG_ALIGN 0x1 /* next field aligned as long (to 8 bytes) */ +#define _PTR_ALIGN 0x3 /* next field aligned as pointer (to 16 bytes) */ + +/* Access field type, 2 bits: mask_rw & 0x3 */ +#define _READABLE 0x1 /* field gets read by syscall */ +#define _WRITEABLE 0x2 /* field gets updated by syscall */ + + int struct_len, prot_len; + int elem_type, alignment; + long pat_type, pat_align, pat_rw, val_long; + int __user *ptr_from; + int __user *ptr_to; + int user_mode = 0; + int may_be_uninitialized; + unsigned long pm_sc_debug_mode = current->mm->context.pm_sc_debug_mode; + int misaligned_ptr_from = 0; /* normally ptr_from must be aligned */ + int rval = 0; /* result of the function */ + int failed_2_write = 0; + int val_int, tag; + int i, j; + + DbgSCP("struct128 = 0x%lx, struct64 = 0x%lx, size = %d\n", + prot_array, new_array, max_prot_array_size); + DbgSCP("fields = %d, items = %d, mask_t = 0x%lx, mask_a = 0x%lx, mask_rw = 0x%lx\n", + fields, items, mask_type, mask_align, mask_rw); + + if (!prot_array) { + DbgSCP("Empty prot.array to convert\n"); + return -EINVAL; + } + + /* Check main parameters for validity */ + if (!new_array || !fields || (items <= 0)) { + DbgSCP_ERR("pid#%d: Wrong parameters for convert_array\n", + current->pid); + return -EINVAL; + } + + if ((uintptr_t)prot_array & 0x7) { + DbgSCP_ERR("pid#%d: Unaligned input protected array (0x%lx)\n", + current->pid, prot_array); + return -EINVAL; + } + + /* Counting protected structure size: */ + pat_align = mask_align; + struct_len = 0; + ptr_from = (int *) prot_array; + + for (i = 0; i < fields; i++) { + alignment = ((pat_align & 0xf) + 1) * sizeof(int); + if (((unsigned long) ptr_from) % alignment) { + misaligned_ptr_from = 1; + struct_len += ((((unsigned long) ptr_from + + alignment - 1) & ~(alignment - 1)) + - ((unsigned long) ptr_from)); + ptr_from = (int *) (((unsigned long) ptr_from + + alignment - 1) & ~(alignment - 1)); + } else { + struct_len += alignment; + ptr_from += (alignment / sizeof(int)); + } + pat_align >>= 4; + } + + if (struct_len & 0x7) { + /* Input structure size must be factor of 8. */ + /* Extending size (required by NATIVE_LOAD_VAL_AND_TAGW): */ + struct_len = (struct_len + 8) & ~0x7; + } + + /* Count the size of the whole array */ + prot_len = struct_len * items; + + /* Nothing to be converted if real size of the array is zero */ + if (prot_len == 0) { + DbgSCP("prot_len == 0; returning 0\n"); + return 0; + } + + /* + * Real size of the array can't be more than maximum + * size of this array in user space + */ + if (prot_len > max_prot_array_size) { + DbgSCP_ERR("prot_len (%d) > max_prot_array_size(%d)\n", + prot_len, max_prot_array_size); + if (misaligned_ptr_from) + DbgSCP_ERR("prot_array (0x%lx) must be properly aligned\n", + prot_array); + return -EFAULT; + } + + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CONV_STRUCT)) { + /* Displaying input (protected) array: */ + long *lptr = kmalloc(prot_len + 16, GFP_KERNEL); + long *mem_lptr = lptr; + + /* NB> Alignment required not to lose tags */ + lptr = (long *) (((uintptr_t) lptr + 15) & ~0xf); + /* Copy original array with tags to tmp array for converting */ + if (copy_from_user_with_tags(lptr, prot_array, prot_len)) { + DbgSCP_ERR("pid#%d Copying original array (0x%lx : %d) failed\n", + current->pid, (long) lptr, prot_len); + kfree(lptr); + rval = -EFAULT; + goto out; + } + pr_info("get_pm_struct: sizeof(struct128) = %zd (words):\n", + items * (struct_len / sizeof(int))); + if (items > 1) + pr_info("[array size: %d]\n", items); + for (j = 0; j < items; j++) { + if (items > 1) + pr_info("[element #%d]\n", j); + for (i = 0; i < (struct_len / sizeof(long)); i++) { + NATIVE_LOAD_VAL_AND_TAGD((long *) lptr, + val_long, tag); + pr_info("\t[0x%x] 0x%.8x.%.8x\n", tag, + (int)(*lptr >> 32), (int)*lptr); + lptr++; + } + } + kfree(mem_lptr); + } + + /* Check ptr_to: user or kernel address */ + ptr_to = (int *) new_array; + if ((long) ptr_to < TASK_SIZE) { + user_mode = 1; + } + +TRY_USR_PFAULT { + + /* Detailed analysis of data encoded in the input structure(s): */ + for (i = 0; i < items; i++) { + ptr_from = (int *)((uintptr_t) prot_array + struct_len * i); + pat_type = mask_type; + pat_align = mask_align; + pat_rw = mask_rw; + + /* Handle each entry in the strcut */ + for (j = 0; j < fields; j++) { + + elem_type = pat_type & 0x7; + may_be_uninitialized = pat_type & _UNINITIALIZED_FIELD; +/* + DbgSCP("round %d: type=%d from=0x%lx to=0x%lx\n", + j, elem_type, (long)ptr_from, (long)ptr_to); +*/ + /* Load the field by type specified in mask_type */ +load_current_element: + switch (elem_type) { + case _INT_FIELD: + /* Load word (4 bytes) with tags */ + NATIVE_LOAD_VAL_AND_TAGW((int *) ptr_from, + val_int, tag); + + if ((tag == ETAGEWS) && may_be_uninitialized) + val_int = 0; /* we don't copy trash */ + /* Check for valid 'int' field */ + else if (likely((pat_rw & 0xf) != _WRITEABLE) + && (tag != ETAGNUM) + && !(rval_mode + & CONV_ARR_IGNORE_INT_FLD_ERR)) { +#define ERROR_UNINIT_FLDI \ + "uninitialized value (tag=0x%x) at struct128[%d]: 0x%x\n" +#define ERROR_UNEXPECTED_VALI \ + "unexpected value (tag=0x%x) at struct128[%d]: 0x%x\n" + if (tag == ETAGEWS) + DbgSCP_ALERT(ERROR_UNINIT_FLDI, + tag, j, val_int); + else + DbgSCP_ALERT(ERROR_UNEXPECTED_VALI, + tag, j, val_int); +#define IGNORING_ARR_ELEM \ + "ignoring struct128[%d]; replaced with zero\n" + /* Don't copy field of another type */ + if (val_int && (!CONVERT_WARN_ONLY)) { + DbgSCP_ALERT(IGNORING_ARR_ELEM, + i); + val_int = 0; + } + if (rval_mode & CONV_ARR_WRONG_INT_FLD) + rval = -EFAULT; + } + if ((long)ptr_to & 1) { /* write at higher word */ + PUT_USER_OR_KERNEL(user_mode, + (int *) ptr_to, val_int); + } else { /* write at lower word */ + /* NB> To avoid trash in higher word, + * we save it as long val. + */ + val_long = (long) val_int; + PUT_USER_OR_KERNEL(user_mode, + (long *) ptr_to, val_long); + } + + /* Move on ptr_from and ptr_to: */ + ptr_from++; + ptr_to++; + + break; + case _LONG_FIELD: + /* Load dword (8 bytes) with tags */ + NATIVE_LOAD_VAL_AND_TAGD((long *) ptr_from, + val_long, tag); + + if ((tag == ETAGEWD) && may_be_uninitialized) + val_long = 0; /* we don't copy trash */ + /* Check for valid 'long' field */ + else if (likely((pat_rw & 0xf) != _WRITEABLE) + && (tag != ETAGNUM) + && !(rval_mode + & CONV_ARR_IGNORE_LONG_FLD_ERR)) { +#define ERROR_UNINIT_FLDL \ + "uninitialized value (tag=0x%x) at struct128[%d]: 0x%lx\n" +#define ERROR_UNEXPECTED_VALL \ + "unexpected value (tag=0x%x) at struct128[%d]: 0x%lx\n" + if (tag == ETAGEWD) + DbgSCP_ALERT(ERROR_UNINIT_FLDL, + tag, j, val_long); + else + DbgSCP_ALERT(ERROR_UNEXPECTED_VALL, + tag, j, val_long); + /* Don't copy field of another type */ + if (val_long && (!CONVERT_WARN_ONLY)) { + DbgSCP_ALERT(IGNORING_ARR_ELEM, + i); + val_long = 0; + } + if (rval_mode & CONV_ARR_WRONG_LONG_FLD) + rval = -EFAULT; + } + PUT_USER_OR_KERNEL(user_mode, + (long *) ptr_to, val_long); + + /* Move on ptr_from and ptr_to: */ + ptr_from += 2; + ptr_to += 2; + + break; + case _FUNC_FIELD: + /* Load dword (8 bytes) with tags */ + NATIVE_LOAD_VAL_AND_TAGD((long *) ptr_from, + val_long, tag); + + if ((tag == ETAGEWD) && may_be_uninitialized) + val_long = 0; /* we don't copy trash */ + /* Check for valid func field */ + else if (likely((pat_rw & 0xf) != _WRITEABLE) + && (tag != ETAGPLD) && val_long + && (!(rval_mode + & CONV_ARR_IGNORE_FUNC_FLD_ERR)) + || tag) { +#define ERROR_UNEXPECTED_ELEMENTF \ + "not function pointer (tag=0x%x) at struct128[%d]: 0x%lx\n" + DbgSCP_ALERT(ERROR_UNEXPECTED_ELEMENTF, + tag, j, val_long); + if (rval_mode & CONV_ARR_WRONG_FUNC_FLD) + rval = -EFAULT; + if (!CONVERT_WARN_ONLY) + goto out; + } + PUT_USER_OR_KERNEL(user_mode, + (long *) ptr_to, val_long); + + /* Move on ptr_from and ptr_to: */ + ptr_from += 4; + ptr_to += 2; + + break; + case _PTR_FIELD: { + /* + * Load dword (8 bytes) with tags + * (the first half of descriptor) + */ + NATIVE_LOAD_VAL_AND_TAGD((long *) ptr_from, + val_long, tag); + + long next_val_long; + int dtag; + e2k_ptr_t __ptr__; + + /* + * The next dword (8 bytes) is + * the second half of descriptor + */ + ptr_from += 2; + NATIVE_LOAD_VAL_AND_TAGD((long *) ptr_from, + next_val_long, dtag); + dtag = tag | (dtag << 4); + + /* Copy valid pointer field */ + if ((dtag == ETAGAPQ) || + (pat_rw & 0xf) == _WRITEABLE) { + AW(__ptr__).lo = val_long; + AW(__ptr__).hi = next_val_long; + PUT_USER_OR_KERNEL( + user_mode, (long *) ptr_to, + E2K_PTR_PTR(__ptr__, + GET_SBR_HI())); + goto eo_ptr_field; + } + if ((tag == ETAGEWD) && may_be_uninitialized) + val_long = 0; /* we don't copy trash */ + /* Something different found: */ + else if ((val_long || next_val_long) + && (!(rval_mode + & CONV_ARR_IGNORE_DSCR_FLD_ERR)) + || tag) { +#define ERR_NOT_DSCR \ + "not descriptor (tag=0x%x) at struct128[%d]: 0x%lx : 0x%lx\n" + DbgSCP_ALERT(ERR_NOT_DSCR, dtag, j, + val_long, next_val_long); + if (rval_mode & CONV_ARR_WRONG_DSCR_FLD) + rval = -EFAULT; + if (!CONVERT_WARN_ONLY) + goto out; + } + PUT_USER_OR_KERNEL(user_mode, + (long *) ptr_to, val_long); +eo_ptr_field: + /* Move on ptr_from and ptr_to: */ + ptr_from += 2; + ptr_to += 2; + + break; + } + case _INT_PTR_FIELD: + case _LONG_PTR_FIELD: { + /* Check for descriptor tag in the field: */ + NATIVE_LOAD_VAL_AND_TAGD((long *) ptr_from, + val_long, tag); + if (tag == E2K_AP_LO_ETAG) + /* This must be descriptor: */ + elem_type = _PTR_FIELD; + else /* This is 'int' or 'long' */ + elem_type &= 0x3; + /* _INT_PTR_FIELD -> _INT_FIELD */ + /* _LONG_PTR_FIELD -> _LONG_FIELD */ + goto load_current_element; + } + case _PTR__FUNC_FIELD: { + /* Check for descriptor tag in the field: */ + NATIVE_LOAD_VAL_AND_TAGD((long *) ptr_from, + val_long, tag); + elem_type = (tag == E2K_AP_LO_ETAG) ? _PTR_FIELD + : _FUNC_FIELD; + goto load_current_element; + } + case _TAG_DEFINED_FIELD: { + /* Check for tag in the field: */ + NATIVE_LOAD_VAL_AND_TAGD((long *) ptr_from, + val_long, tag); + if (tag == E2K_AP_LO_ETAG) + elem_type = _PTR_FIELD; + else if (tag == E2K_PL_ETAG) + elem_type = _FUNC_FIELD; + else /* This is 'int' or 'long' */ + elem_type = _INT_FIELD; + goto load_current_element; + } + default: + /* Otherwise it is something invalid. */ + return -EFAULT; + } + + /* Fixing ptr_from/ptr_to alignment: */ + alignment = pat_align & 0xf; + ptr_from = align_ptr_up(ptr_from, /* 128 bit */ + (alignment + 1) * sizeof(int)); + if (alignment) + ptr_to = align_ptr_up(ptr_to, 8); /* 64 bit */ +/* + DbgSCP("alignment=%d from->0x%lx to->0x%lx\n", + alignment, (long)ptr_from, (long)ptr_to); +*/ + /* Moving on structure field masks: */ + pat_type >>= 4; + pat_align >>= 4; + pat_rw >>= 4; + } + } + +} CATCH_USR_PFAULT { +#define ERR_FATAL "FATAL ERROR: failed to read from 0x%lx (field %d) !!!\n" + DbgSCP_ALERT(ERR_FATAL, (long) ptr_from, j); + return -EFAULT; +} END_USR_PFAULT + + DbgSCP("The array was converted successfully\n"); + + if (!arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CONV_STRUCT)) + goto out; + /* Printing out the converted array content: */ + ptr_to = (int *) new_array; + for (i = 0; i < items; i++) { + pat_type = mask_type; + pat_align = mask_align; + for (j = 0; j < fields; j++) { + pr_info("convert_array struct128[%d]=", + i * fields + j); + /* Outputs a field based upon mask_type */ + switch (pat_type & 0x7) { + case _INT_FIELD: { + pr_info("[INT] \t%d / 0x%x\n", + *(int *)ptr_to, *(int *)ptr_to); + ptr_to++; + break; + } + case _LONG_FIELD: { + pr_info("[LONG]\t%ld / 0x%lx\n", + *(long *)ptr_to, *(long *)ptr_to); + ptr_to += 2; + break; + } + case _FUNC_FIELD: { + pr_info("[FPTR]\t0x%lx\n", + *(unsigned long *)ptr_to); + ptr_to += 2; + break; + } + case _INT_PTR_FIELD: + case _LONG_PTR_FIELD: + case _PTR_FIELD: { + pr_info("[PTR] \t0x%lx\n", + *(unsigned long *)ptr_to); + ptr_to += 2; + break; + } + default: + /* Otherwise it is something invalid. */ + pr_err("Error in %s print:\n", __func__); + pr_err("\t\titem=%d field=%d pat_type=%d\n", + i, j, (int)pat_type & 0xf); + } + /* Check for correct alignment: */ + if ((pat_align & 0xf) != _INT_ALIGN) + if ((unsigned long)ptr_to & 0x7) + ptr_to++; /* even address */ + pat_type >>= 4; + pat_align >>= 4; + } + } + struct_len = ((unsigned long) ptr_to - (unsigned long) new_array) + / sizeof(int); /* in words */ + ptr_to = (int *)new_array; + pr_info("%s: sizeof(ptr_to=0x%lx) = %d (words):\n", + __func__, (long) ptr_to, struct_len); + for (i = 0; i < struct_len; i++) { + pr_info("\t0x%.8x\n", *ptr_to); + ptr_to++; + } + +out: + if (failed_2_write) { +#define ERR_FATAL_WRITE "FATAL ERROR: failed to write at 0x%lx (field %d) !!!\n" + DbgSCP_ALERT(ERR_FATAL_WRITE, (long) ptr_to, j /*field*/); + + } + + return rval; +} + + +/* + * This function checks protected syscall arguments on correspondence with + * the given mask: + * args_array - pointer to argument array (tag-less) + * tags - argument tags (4 bits per arg; lower to higher bits ordered) + * arg_num - number of arguments + * mask_type - mask for encoding of field type in each element + * 4 bits per each entry: + * --- 00 (0x0) - int + * --- 01 (0x1) - long + * --- 10 (0x2) - pointer to function + * --- 11 (0x3) - pointer to memory + * --- 0100 (0x4) - descriptor or int + * --- 0101 (0x5) - descriptor or long + * rval_mode - error (return value) reporting mode mask: + * 0 - report only critical problems; + * 1 - return with -EFAULT if wrong tag in 'int' field; + * 2 - --'-- --'-- 'long' field; + * 4 - --'-- --'-- 'func' field; + * 8 - --'-- --'-- 'descr' field; + * 16 - ignore errors in 'int' field; + * 32 - --'-- --'-- 'long' field; + * 64 - --'-- --'-- 'func' field; + * 128 - --'-- --'-- 'descr' field. + * Returns: 0 - if converted OK; + * error number - otherwise. + */ +extern int check_args_array4(const long *args_array, + const long arg_tags, + const int arg_num, + const long mask_type, + const int rval_mode, + const char *ErrMsgHeader) +{ + long arg_types; + long *argument; + long tag; + unsigned long pm_sc_debug_mode = current->mm->context.pm_sc_debug_mode; + int rval = 0; /* result of the function */ + int j, t, uninit; + + DbgSCP("args_array=0x%lx, tags=0x%lx, arg_num=%d, mask_type=x%lx\n", + args_array, arg_tags, arg_num, mask_type); + + /* Check main parameters for validity */ + if (!args_array || !arg_num) { + DbgSCP_ERR("Wrong parameters for %s\n", __func__); + return -EINVAL; + } + + /* Checking for correctness of each argument type: */ + argument = (long *) args_array; + tag = arg_tags; + arg_types = mask_type; + for (j = 0; j < arg_num; + j++, argument += 2, tag >>= 8, arg_types >>= 4) { + + uninit = arg_types & _UNINITIALIZED_FIELD; + t = tag & 0xf; + switch (arg_types & 0x7) { + case _INT_FIELD: + if ((t == ETAGNUM) || ((t == ETAGEWS) && uninit)) + break; + if (!(rval_mode & CONV_ARR_IGNORE_INT_FLD_ERR)) { +#define ERROR_UNEXPECTED_ARG_TYPE_I \ + "unexpected value (tag=0x%lx) at arg #%d: %d\n" + DbgSCP_ERRMSG(ErrMsgHeader, + ERROR_UNEXPECTED_ARG_TYPE_I, + (tag & 0xf), j + 1, (int) *argument); + if (rval_mode & CONV_ARR_WRONG_INT_FLD) + rval = -EFAULT; + } + break; + case _LONG_FIELD: + if ((t == ETAGNUM) || ((t == ETAGEWD) && uninit)) + break; + if (!(rval_mode & CONV_ARR_IGNORE_LONG_FLD_ERR)) { +#define ERROR_UNEXPECTED_ARG_TYPE_L \ + "unexpected value (tag=0x%lx) at arg #%d: %ld\n" + DbgSCP_ERRMSG(ErrMsgHeader, + ERROR_UNEXPECTED_ARG_TYPE_L, + (tag & 0xf), j + 1, *argument); + if (rval_mode & CONV_ARR_WRONG_LONG_FLD) + rval = -EFAULT; + } + break; + case _FUNC_FIELD: + if ((t == ETAGPLD) || ((t == ETAGEWD) && uninit)) + break; + if (*argument + && (!(rval_mode & CONV_ARR_IGNORE_FUNC_FLD_ERR)) + || (tag & 0xf)) { +#define ERROR_UNEXPECTED_ARG_TYPE_F \ + "not function pointer (tag=0x%lx) at prot_array[%d]: %ld\n" + DbgSCP_ERRMSG(ErrMsgHeader, + ERROR_UNEXPECTED_ARG_TYPE_L, + (tag & 0xf), j + 1, *argument); + if (rval_mode & CONV_ARR_WRONG_FUNC_FLD) + rval = -EFAULT; + if (!CONVERT_WARN_ONLY) + goto out; + } + break; + case _PTR_FIELD: { + long val_long, next_val_long; + int dtag; + + dtag = tag & 0xff; + + if ((dtag == ETAGAPQ) || ((t == ETAGEWD) && uninit)) + break; + /* Something different found: */ + val_long = *argument; + next_val_long = *(argument + 1); + if ((val_long || next_val_long) + && (!(rval_mode + & CONV_ARR_IGNORE_DSCR_FLD_ERR)) + || dtag) { +#define ERR_NOT_DSCR_P \ + "not descriptor (tag=0x%x) at arg #%d: 0x%lx : 0x%lx\n" + DbgSCP_ERRMSG(ErrMsgHeader, + ERR_NOT_DSCR_P, dtag, j + 1, + val_long, next_val_long); + if (rval_mode & CONV_ARR_WRONG_DSCR_FLD) + rval = -EFAULT; + if (!CONVERT_WARN_ONLY) + goto out; + } + break; + case _INT_PTR_FIELD: + case _LONG_PTR_FIELD: + /* any type may be over here */ + break; + } + default: + /* Otherwise it is something invalid. */ + return -EFAULT; + } + } +out: + return rval; +} + + +static inline +unsigned long get_mask4_from_mask2(unsigned long mask2) +{ + unsigned long mask4 = 0; + int i; + + for (i = 0; mask2; i++, mask2 >>= 2) + mask4 |= (mask2 & 0x3) << (i * 4); + if (current->mm->context.pm_sc_debug_mode & PM_SC_DBG_MODE_CONV_STRUCT) + pr_info("%s : mask4 = 0x%lx\n", __func__, mask4); + return mask4; +} + +/* This function realizes old mask format with 2 bits per structure field */ + +extern int convert_array_3(long __user *prot_array, long *new_array, + const int max_prot_array_size, + const int fields, const int items, + unsigned long mask_type, unsigned long mask_align, + unsigned long mask_rw, const int rval_mode) +{ + long mask_type4, mask_align4, mask_rw4; + + mask_type4 = get_mask4_from_mask2(mask_type); + mask_align4 = get_mask4_from_mask2(mask_align); + mask_rw4 = get_mask4_from_mask2(mask_rw); + + return get_pm_struct(prot_array, new_array, + max_prot_array_size, fields, items, + mask_type4, mask_align4, mask_rw4, rval_mode); +} + +/* This function realizes old mask format with 2 bits per structure field */ + +extern int check_args_array(const long *args_array, + const long arg_tags, + const int arg_num, + unsigned long mask_type, + const int rval_mode, + const char *ErrMsgHeader) +{ + long mask_type4; + + mask_type4 = get_mask4_from_mask2(mask_type); + + return check_args_array4(args_array, arg_tags, arg_num, + mask_type4, rval_mode, ErrMsgHeader); +} + +#endif /* CONFIG_PROTECTED_MODE */ diff --git a/arch/e2k/kernel/copy-hw-stacks.c b/arch/e2k/kernel/copy-hw-stacks.c new file mode 100644 index 000000000000..f43ec9cde03f --- /dev/null +++ b/arch/e2k/kernel/copy-hw-stacks.c @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright 2021 mcst.ru + */ + +#include + +#include +#include + +#define CREATE_TRACE_POINTS +#include + +/** + * user_hw_stacks_copy_full - copy part of user stacks that was SPILLed + * into kernel back to user stacks. + * @stacks - saved user stack registers + * @regs - pt_regs pointer + * @crs - last frame to copy + * + * If @crs is not NULL then the frame pointed to by it will also be copied + * to userspace. Note that 'stacks->pcsp_hi.ind' is _not_ updated after + * copying since it would leave stack in inconsistent state (with two + * copies of the same @crs frame), this is left to the caller. * + * + * Inlining this reduces the amount of memory to copy in + * collapse_kernel_hw_stacks(). + */ +int user_hw_stacks_copy_full(struct e2k_stacks *stacks, + pt_regs_t *regs, e2k_mem_crs_t *crs) +{ + return do_user_hw_stacks_copy_full(stacks, regs, crs); +} + diff --git a/arch/e2k/kernel/cpu/Makefile b/arch/e2k/kernel/cpu/Makefile new file mode 100644 index 000000000000..76692412d5bc --- /dev/null +++ b/arch/e2k/kernel/cpu/Makefile @@ -0,0 +1,55 @@ +obj-y := iset_v2.o iset_v3.o iset_v5.o iset_v6.o cacheinfo.o recovery_string_v5.o + +CFLAGS_REMOVE_iset_v2.o = $(CFLAGS_ALL_CPUS) +CFLAGS_REMOVE_iset_v3.o = $(CFLAGS_ALL_CPUS) +CFLAGS_REMOVE_iset_v5.o = $(CFLAGS_ALL_CPUS) +CFLAGS_REMOVE_iset_v6.o = $(CFLAGS_ALL_CPUS) +AFLAGS_REMOVE_recovery_string_v5.o = $(CFLAGS_ALL_CPUS) + +CFLAGS_iset_v2.o := -march=elbrus-v2 +CFLAGS_iset_v3.o := -march=elbrus-v3 +CFLAGS_iset_v5.o := -march=elbrus-v5 +CFLAGS_iset_v6.o := -march=elbrus-v6 +AFLAGS_recovery_string_v5.o := -march=elbrus-v5 + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_es2.o = -pg +CFLAGS_REMOVE_e2s.o = -pg +CFLAGS_REMOVE_e8c.o = -pg +CFLAGS_REMOVE_e1cp.o = -pg +CFLAGS_REMOVE_e8c2.o = -pg +CFLAGS_REMOVE_e12c.o = -pg +CFLAGS_REMOVE_e16c.o = -pg +CFLAGS_REMOVE_e2c3.o = -pg +endif + +ifeq ($(CONFIG_E2K_MACHINE),y) +obj-$(CONFIG_E2K_ES2_DSP) += es2.o +obj-$(CONFIG_E2K_ES2_RU) += es2.o +obj-$(CONFIG_E2K_E2S) += e2s.o +obj-$(CONFIG_E2K_E8C) += e8c.o +obj-$(CONFIG_E2K_E1CP) += e1cp.o +obj-$(CONFIG_E2K_E8C2) += e8c2.o +obj-$(CONFIG_E2K_E12C) += e12c.o +obj-$(CONFIG_E2K_E16C) += e16c.o +obj-$(CONFIG_E2K_E2C3) += e2c3.o +else +CFLAGS_es2.o = $(CFLAGS_ES2) +CFLAGS_e2s.o = $(CFLAGS_E2S) +CFLAGS_e8c.o = $(CFLAGS_E8C) +CFLAGS_e1cp.o = $(CFLAGS_E1CP) +CFLAGS_e8c2.o = $(CFLAGS_E8C2) +CFLAGS_e12c.o = $(CFLAGS_E12C) +CFLAGS_e16c.o = $(CFLAGS_E16C) +CFLAGS_e2c3.o = $(CFLAGS_E2C3) +obj-$(CONFIG_CPU_E2S) += e2s.o +obj-$(CONFIG_CPU_ES2) += es2.o +obj-$(CONFIG_CPU_E8C) += e8c.o +obj-$(CONFIG_CPU_E1CP) += e1cp.o +obj-$(CONFIG_CPU_E8C2) += e8c2.o +obj-$(CONFIG_CPU_E12C) += e12c.o +obj-$(CONFIG_CPU_E16C) += e16c.o +obj-$(CONFIG_CPU_E2C3) += e2c3.o +endif + diff --git a/arch/e2k/kernel/cpu/cacheinfo.c b/arch/e2k/kernel/cpu/cacheinfo.c new file mode 100644 index 000000000000..74e9de9e4c78 --- /dev/null +++ b/arch/e2k/kernel/cpu/cacheinfo.c @@ -0,0 +1,216 @@ +/* + * Extract CPU cache information and expose it via sysfs. + */ + +#include +#include +#include + +struct e2k_cache_info { + u32 cpu_mdl : 8; + u32 level : 3; + u32 type : 3; + u32 private : 1; + u32 associativity : 8; + u32 physical_line_partition : 8; + unsigned int cache_size; + unsigned int line_size; + unsigned int attributes; +}; + +static const struct e2k_cache_info e2k_caches[] = { + { IDR_ES2_DSP_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 64 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_ES2_DSP_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_ES2_DSP_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_ES2_RU_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 64 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_ES2_RU_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_ES2_RU_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E2S_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 128 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E2S_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E2S_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 2 * 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E8C_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 128 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E8C_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E8C_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 512 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E8C_MDL, 3, CACHE_TYPE_UNIFIED, 0, 16, 8, 16 * 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E1CP_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 128 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E1CP_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E1CP_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 2 * 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E8C2_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 128 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E8C2_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E8C2_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 512 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E8C2_MDL, 3, CACHE_TYPE_UNIFIED, 0, 16, 8, 16 * 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E12C_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 128 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E12C_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E12C_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E12C_MDL, 3, CACHE_TYPE_UNIFIED, 1, 16, 16, 24 * 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E16C_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 128 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E16C_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E16C_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E16C_MDL, 3, CACHE_TYPE_UNIFIED, 0, 16, 16, 32 * 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_E2C3_MDL, 1, CACHE_TYPE_INST, 1, 4, 1, 128 * 1024, 256, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E2C3_MDL, 1, CACHE_TYPE_DATA, 1, 4, 1, 64 * 1024, 32, + CACHE_READ_ALLOCATE | CACHE_WRITE_THROUGH }, + { IDR_E2C3_MDL, 2, CACHE_TYPE_UNIFIED, 1, 4, 4, 2 * 1024 * 1024, 64, + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE | CACHE_WRITE_BACK }, + { IDR_NONE, 0, 0, 0, 0, 0, 0, 0, 0 } +}; + +static void ci_leaf_init(struct cacheinfo *this_leaf, + const struct e2k_cache_info *ci, int cpu) +{ + int num_sets; + + this_leaf->level = ci->level; + this_leaf->type = ci->type; + this_leaf->coherency_line_size = ci->line_size; + this_leaf->ways_of_associativity = ci->associativity; + this_leaf->size = ci->cache_size; + this_leaf->physical_line_partition = ci->physical_line_partition; + this_leaf->attributes = ci->attributes; + + num_sets = this_leaf->size / this_leaf->coherency_line_size; + num_sets /= this_leaf->ways_of_associativity; + this_leaf->number_of_sets = num_sets; + + if (ci->private) + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + else + cpumask_copy(&this_leaf->shared_cpu_map, cpu_cpu_mask(cpu)); + + /* Unlike s390, we do not disable sysfs for shared caches */ + this_leaf->disable_sysfs = false; + this_leaf->priv = (void *)ci; +} + +int init_cache_level(unsigned int cpu) +{ + int cpu_mdl = machine.native_id & MACHINE_ID_CPU_TYPE_MASK; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + unsigned int max_level = 0, leaves = 0; + const struct e2k_cache_info *ci; + + if (!this_cpu_ci) + return -EINVAL; + + for (ci = e2k_caches; ci->cpu_mdl != IDR_NONE; ci++) { + if (ci->cpu_mdl != cpu_mdl) + continue; + + ++leaves; + if (ci->level > max_level) + max_level = ci->level; + } + + if (WARN_ONCE(!leaves, "Provide cache info for the new processor\n")) + return -EINVAL; + + this_cpu_ci->num_levels = max_level; + this_cpu_ci->num_leaves = leaves; + + return 0; +} + +int populate_cache_leaves(unsigned int cpu) +{ + int cpu_mdl = machine.native_id & MACHINE_ID_CPU_TYPE_MASK; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + const struct e2k_cache_info *ci; + + if (!this_leaf) + return -EINVAL; + + for (ci = e2k_caches; ci->cpu_mdl != IDR_NONE; ci++) { + if (ci->cpu_mdl != cpu_mdl) + continue; + + ci_leaf_init(this_leaf, ci, cpu); + ++this_leaf; + } + + this_cpu_ci->cpu_map_populated = true; + + return 0; +} + + +static const char * const cache_type_string[] = { + "", + "Instruction", + "Data", + "", + "Unified", +}; + +u64 cacheinfo_get_l1d_line_size() +{ + struct cpu_cacheinfo *this_cpu_ci; + struct cacheinfo *cache; + int idx; + + this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); + for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { + cache = this_cpu_ci->info_list + idx; + if (cache->level != 1 || + cache->type != CACHE_TYPE_DATA && + cache->type != CACHE_TYPE_UNIFIED) + continue; + + return cache->coherency_line_size; + } + + WARN_ON_ONCE(system_state == SYSTEM_RUNNING); + + return 32; +} + +void show_cacheinfo(struct seq_file *m) +{ + struct cpu_cacheinfo *this_cpu_ci; + struct cacheinfo *cache; + const struct e2k_cache_info *ci; + int idx; + + this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); + for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { + cache = this_cpu_ci->info_list + idx; + ci = cache->priv; + seq_printf(m, "cache%-11d: ", idx); + seq_printf(m, "level=%d ", cache->level); + seq_printf(m, "type=%s ", cache_type_string[cache->type]); + seq_printf(m, "scope=%s ", ci->private ? "Private" : "Shared"); + seq_printf(m, "size=%dK ", cache->size >> 10); + seq_printf(m, "line_size=%u ", cache->coherency_line_size); + seq_printf(m, "associativity=%d", cache->ways_of_associativity); + seq_puts(m, "\n"); + } +} diff --git a/arch/e2k/kernel/cpu/e12c.c b/arch/e2k/kernel/cpu/e12c.c new file mode 100644 index 000000000000..267e4b9b3f94 --- /dev/null +++ b/arch/e2k/kernel/cpu/e12c.c @@ -0,0 +1,51 @@ +#include +#include +#include +#include +#include +#include + +#include + +static e2k_addr_t e12c_get_nsr_area_phys_base(void) +{ + return E12C_NSR_AREA_PHYS_BASE; +} + +static void __init_recv +e12c_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_12C_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +e12c_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = e12c_setup_cpu_info; + } +} + +void __init +e12c_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = e12c_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = pic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + e12c_get_nsr_area_phys_base; + the_node_machine(nid)->setup_apic_vector_handlers = NULL; + } +} diff --git a/arch/e2k/kernel/cpu/e16c.c b/arch/e2k/kernel/cpu/e16c.c new file mode 100644 index 000000000000..7dbb4ec8dca5 --- /dev/null +++ b/arch/e2k/kernel/cpu/e16c.c @@ -0,0 +1,51 @@ +#include +#include +#include +#include +#include +#include + +#include + +static e2k_addr_t e16c_get_nsr_area_phys_base(void) +{ + return E16C_NSR_AREA_PHYS_BASE; +} + +static void __init_recv +e16c_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_16C_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +e16c_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = e16c_setup_cpu_info; + } +} + +void __init +e16c_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = e16c_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = pic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + e16c_get_nsr_area_phys_base; + the_node_machine(nid)->setup_apic_vector_handlers = NULL; + } +} diff --git a/arch/e2k/kernel/cpu/e1cp.c b/arch/e2k/kernel/cpu/e1cp.c new file mode 100644 index 000000000000..b66be1ef481b --- /dev/null +++ b/arch/e2k/kernel/cpu/e1cp.c @@ -0,0 +1,53 @@ +#include +#include +#include +#include +#include +#include + +#include + +static void e1cp_setup_apic_vector_handlers(void) +{ + setup_PIC_vector_handler(LVT4_INTERRUPT_VECTOR, sic_error_interrupt, 1, + "sic_error_interrupt"); +} + +static void __init_recv +e1cp_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_1CP_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +e1cp_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = e1cp_setup_cpu_info; + } +} + +void __init +e1cp_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = e1cp_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = apic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + early_get_legacy_nbsr_base; + the_node_machine(nid)->setup_apic_vector_handlers = + e1cp_setup_apic_vector_handlers; + } +} diff --git a/arch/e2k/kernel/cpu/e2c3.c b/arch/e2k/kernel/cpu/e2c3.c new file mode 100644 index 000000000000..23b0068107ba --- /dev/null +++ b/arch/e2k/kernel/cpu/e2c3.c @@ -0,0 +1,49 @@ +#include +#include +#include +#include + +#include + +static e2k_addr_t e2c3_get_nsr_area_phys_base(void) +{ + return E2C3_NSR_AREA_PHYS_BASE; +} + +static void __init_recv +e2c3_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_2C3_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +e2c3_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = e2c3_setup_cpu_info; + } +} + +void __init +e2c3_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = e2c3_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = pic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + e2c3_get_nsr_area_phys_base; + the_node_machine(nid)->setup_apic_vector_handlers = NULL; + } +} diff --git a/arch/e2k/kernel/cpu/e2s.c b/arch/e2k/kernel/cpu/e2s.c new file mode 100644 index 000000000000..e1d1a57f0ce3 --- /dev/null +++ b/arch/e2k/kernel/cpu/e2s.c @@ -0,0 +1,58 @@ +#include +#include +#include +#include +#include +#include + +#include + +static e2k_addr_t e2s_get_nsr_area_phys_base(void) +{ + return E2S_NSR_AREA_PHYS_BASE; +} + +static void e2s_setup_apic_vector_handlers(void) +{ + setup_PIC_vector_handler(LVT4_INTERRUPT_VECTOR, sic_error_interrupt, 1, + "sic_error_interrupt"); +} + +static void __init_recv +e2s_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_2S_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +e2s_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = e2s_setup_cpu_info; + } +} + +void __init +e2s_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = e2s_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = apic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + e2s_get_nsr_area_phys_base; + the_node_machine(nid)->setup_apic_vector_handlers = + e2s_setup_apic_vector_handlers; + } +} diff --git a/arch/e2k/kernel/cpu/e8c.c b/arch/e2k/kernel/cpu/e8c.c new file mode 100644 index 000000000000..4f5ecad2aaec --- /dev/null +++ b/arch/e2k/kernel/cpu/e8c.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +#include + +static e2k_addr_t e8c_get_nsr_area_phys_base(void) +{ + return E8C_NSR_AREA_PHYS_BASE; +} + +static void e8c_setup_apic_vector_handlers(void) +{ + setup_PIC_vector_handler(LVT4_INTERRUPT_VECTOR, sic_error_interrupt, 1, + "sic_error_interrupt"); +} + +static void __init_recv +e8c_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_8C_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +e8c_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = e8c_setup_cpu_info; + } +} + +void __init +e8c_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = e8c_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = apic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + e8c_get_nsr_area_phys_base; + the_node_machine(nid)->setup_apic_vector_handlers = + e8c_setup_apic_vector_handlers; + } +} diff --git a/arch/e2k/kernel/cpu/e8c2.c b/arch/e2k/kernel/cpu/e8c2.c new file mode 100644 index 000000000000..d772d77e6a5d --- /dev/null +++ b/arch/e2k/kernel/cpu/e8c2.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +#include + +static e2k_addr_t e8c2_get_nsr_area_phys_base(void) +{ + return E8C2_NSR_AREA_PHYS_BASE; +} + +static void e8c2_setup_apic_vector_handlers(void) +{ + setup_PIC_vector_handler(LVT4_INTERRUPT_VECTOR, sic_error_interrupt, 1, + "sic_error_interrupt"); +} + +static void __init_recv +e8c2_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_8C2_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +e8c2_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = e8c2_setup_cpu_info; + } +} + +void __init +e8c2_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = e8c2_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = apic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + e8c2_get_nsr_area_phys_base; + the_node_machine(nid)->setup_apic_vector_handlers = + e8c2_setup_apic_vector_handlers; + } +} diff --git a/arch/e2k/kernel/cpu/es2.c b/arch/e2k/kernel/cpu/es2.c new file mode 100644 index 000000000000..77806b273b4e --- /dev/null +++ b/arch/e2k/kernel/cpu/es2.c @@ -0,0 +1,59 @@ +#include +#include +#include +#include +#include + +#include + +static e2k_addr_t es2_get_nsr_area_phys_base(void) +{ + return ES2_NSR_AREA_PHYS_BASE; +} + +static void es2_setup_apic_vector_handlers(void) +{ +#if defined(CONFIG_ELDSP) || defined(CONFIG_ELDSP_MODULE) + setup_PIC_vector_handler(LVT3_INTERRUPT_VECTOR, eldsp_interrupt, 1, + "eldsp_interrupt"); +#endif +} + +static void __init_recv +es2_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = ELBRUS_S_ISET; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +static void __init +es2_setup_arch(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_cpu_info = es2_setup_cpu_info; + } +} + +void __init +es2_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->setup_arch = es2_setup_arch; + the_node_machine(nid)->arch_reset = NULL; + the_node_machine(nid)->arch_halt = NULL; + the_node_machine(nid)->get_irq_vector = apic_get_vector; + the_node_machine(nid)->get_nsr_area_phys_base = + es2_get_nsr_area_phys_base; + the_node_machine(nid)->setup_apic_vector_handlers = + es2_setup_apic_vector_handlers; + } +} diff --git a/arch/e2k/kernel/cpu/iset_v2.c b/arch/e2k/kernel/cpu/iset_v2.c new file mode 100644 index 000000000000..e1b3849f06f9 --- /dev/null +++ b/arch/e2k/kernel/cpu/iset_v2.c @@ -0,0 +1,558 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************************* DEBUG DEFINES ********************************/ +#undef DEBUG_PF_MODE +#define DEBUG_PF_MODE 0 /* Page fault */ +#define DebugPF(...) DebugPrint(DEBUG_PF_MODE ,##__VA_ARGS__) +/******************************************************************************/ + +unsigned long rrd_v2(int reg) +{ + return 0; +} + +void rwd_v2(int reg, unsigned long value) +{ +} + +u64 native_get_cu_hw1_v2() +{ + panic("No %%cu_hw1 in instruction set v2\n"); +} + +void native_set_cu_hw1_v2(u64 cu_hw1) +{ + panic("No %%cu_hw1 in instruction set v2\n"); +} + +__section(".entry.text") +notrace __interrupt +void save_local_gregs_v2(struct local_gregs *gregs, bool is_signal) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + if (is_signal) + SAVE_GREGS_SIGNAL(gregs->g, E2K_ISET_V2); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void save_kernel_gregs_v2(struct kernel_gregs *gregs) +{ + NATIVE_SAVE_GREG(&gregs->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX], + &gregs->g[CURRENT_TASK_GREGS_PAIRS_INDEX], + GUEST_VCPU_STATE_GREG, CURRENT_TASK_GREG, E2K_ISET_V2); + NATIVE_SAVE_GREG(&gregs->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX], + &gregs->g[SMP_CPU_ID_GREGS_PAIRS_INDEX], + MY_CPU_OFFSET_GREG, SMP_CPU_ID_GREG, E2K_ISET_V2); +} + +notrace __interrupt +void save_gregs_on_mask_v2(struct global_regs *gregs, bool dirty_bgr, + unsigned long mask_not_save) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + if (mask_not_save == (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) { + /* it is same case as save all excluding global register */ + /* %g0 - %g15 and registers used by kernel %gN - %gN+3 */ + /* now N=16 see asm/glob_regs.h */ + SAVE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs->g, E2K_ISET_V2); + } else if (mask_not_save == KERNEL_GREGS_MASK) { + /* it is same case as save all excluding registers used */ + /* by kernel %gN - %gN+3 now N=16 see asm/glob_regs.h */ + SAVE_GREGS_EXCEPT_KERNEL(gregs->g, E2K_ISET_V2); + } else if (mask_not_save == 0) { + /* save all registers */ + SAVE_ALL_GREGS(gregs->g, E2K_ISET_V2); + } else { + /* common case with original mask */ + DO_SAVE_GREGS_ON_MASK(gregs->g, E2K_ISET_V2, mask_not_save); + } + if (!dirty_bgr) + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void save_gregs_v2(struct global_regs *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + SAVE_GREGS_EXCEPT_KERNEL(gregs->g, E2K_ISET_V2); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void save_gregs_dirty_bgr_v2(struct global_regs *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + SAVE_GREGS(gregs->g, true, E2K_ISET_V2); +} + +__section(".entry.text") +notrace __interrupt +void restore_local_gregs_v2(const struct local_gregs *gregs, bool is_signal) +{ + init_BGR_reg(); + if (is_signal) + RESTORE_GREGS_SIGNAL(gregs->g, E2K_ISET_V2); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void restore_gregs_on_mask_v2(struct global_regs *gregs, bool dirty_bgr, + unsigned long mask_not_restore) +{ + init_BGR_reg(); /* enable whole GRF */ + if (mask_not_restore == (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) { + /* it is same case as restore all excluding global register */ + /* %g0 - %g15 and registers used by kernel %gN - %gN+3 */ + /* now N=16 see asm/glob_regs.h */ + RESTORE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs->g, E2K_ISET_V2); + } else if (mask_not_restore == KERNEL_GREGS_MASK) { + /* it is same case as restore all excluding registers used */ + /* by kernel %gN - %gN+3 now N=16 see asm/glob_regs.h */ + RESTORE_GREGS_EXCEPT_KERNEL(gregs->g, E2K_ISET_V2); + } else if (mask_not_restore == 0) { + /* restore all registers */ + RESTORE_ALL_GREGS(gregs->g, E2K_ISET_V2); + } else { + /* common case with original mask */ + DO_RESTORE_GREGS_ON_MASK(gregs->g, E2K_ISET_V2, + mask_not_restore); + } + if (!dirty_bgr) + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void restore_gregs_v2(const struct global_regs *gregs) +{ + init_BGR_reg(); /* enable whole GRF */ + RESTORE_GREGS(gregs->g, true, E2K_ISET_V2); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +#ifdef CONFIG_USE_AAU +/* calculate current array prefetch buffer indices values + * (see chapter 1.10.2 in "Scheduling") */ +void calculate_aau_aaldis_aaldas_v2(const struct pt_regs *regs, + struct thread_info *ti, e2k_aau_t *context) +{ + u64 areas, area_num, iter_count; + e2k_aalda_t *aaldas = ti->aalda; + u64 *aaldis = context->aaldi; + /* get_user() is used here */ + WARN_ON_ONCE(regs && __raw_irqs_disabled()); + + DebugPF("started for aasr 0x%x, aafstr 0x%x\n", + context->aasr.word, context->aafstr); + + memset(aaldas, 0, AALDAS_REGS_NUM * sizeof(aaldas[0])); + memset(aaldis, 0, AALDIS_REGS_NUM * sizeof(aaldis[0])); + + /* It is first guest run to set initial state of AAU */ + if (unlikely(!regs)) + return; + + /* See bug 33621 comment 2 and bug 52350 comment 29 */ + iter_count = get_lcnt(regs->ilcr) - get_lcnt(regs->lsr); + if (get_ldmc(regs->lsr) && !get_lcnt(regs->lsr)) + iter_count += get_ecnt(regs->ilcr) - get_ecnt(regs->lsr) - 1; + + /* + * Calculate areas in the following order: + * + * 0 -> 32 -> 1 -> 33 -> 2 -> ... -> 62 -> 31 -> 63 + * + * until all the set bits in aaldv are checked. + */ + for (area_num = 0, areas = AW(context->aaldv); areas != 0; + areas &= ~(1UL << area_num), + area_num = (area_num < 32) ? (area_num + 32) + : (area_num - 31)) { + e2k_fapb_instr_t *fapb_addr; + e2k_fapb_instr_t fapb; + e2k_aalda_t tmp_aalda; + u64 step, ind, iter; + + if (!(AW(context->aaldv) & (1UL << area_num))) + continue; + + iter = iter_count + ((AW(context->aaldm) & (1UL << area_num)) + >> area_num); + DebugPF("current area #%lld iter count %lld, iter %lld\n", + area_num, iter_count, iter); + + if (iter == 0) { + AW(context->aaldv) &= ~(1UL << area_num); + continue; + } + + if (area_num < 32) + fapb_addr = (e2k_fapb_instr_t *) + (AS(regs->ctpr2).ta_base + 16 * area_num); + else + fapb_addr = (e2k_fapb_instr_t *) + (AS(regs->ctpr2).ta_base + 8 + + 16 * (area_num - 32)); + +# if __LCC__ >= 120 + /* + * tmp is used to avoid compiler issue with passing + * union's fields into inline asm. Bug 76907. + */ + u64 tmp; + long ret_get_user; + + ret_get_user = host_get_user(tmp, (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + fapb.word = tmp; +# else + long ret_get_user; + + ret_get_user = host_get_user(AW(fapb), (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } +# endif + DebugPF("FAPB at %px instruction 0x%llx, fmt %d, si %d\n", + fapb_addr, AW(fapb), AS(fapb).fmt, AS(fapb).si); + + if (area_num >= 32 && AS(fapb).dpl) { + /* See bug #53880 */ + pr_info_once("%s [%d]: AAU is working in dpl mode " + "(FAPB at %px)\n", + current->comm, current->pid, fapb_addr); + + aaldas[area_num] = aaldas[area_num - 32]; + aaldis[area_num] = aaldis[area_num - 32]; + continue; + } + + if (!AS(fapb).fmt) + continue; + + AS(tmp_aalda).root = (AS(context->aads[AS(fapb).d]).lo.tag == + AAD_AAUDS); + + if (AS(fapb).si) { + AS(tmp_aalda).cincr = 0; + AS(tmp_aalda).exc = 0; + aaldas[area_num] = tmp_aalda; + DebugPF("calculated aalda[%lld] is 0x%x\n", + area_num, tmp_aalda.word); + continue; + } + + ind = (context->aainds[AS(fapb).ind] + AS(fapb).disp) + & 0xffffffffULL; + step = (context->aaincrs[AS(fapb).incr] << (AS(fapb).fmt - 1)) + & 0xffffffffULL; + if (context->aaincrs[AS(fapb).incr] >> 31) + step = step | 0xffffffff00000000ULL; + ind += step * iter; + DebugPF("calculated ind 0x%llx step 0x%llx iter 0x%llx\n", + ind, step, iter); + if (ind >> 32) { + AS(tmp_aalda).cincr = 1; + AS(tmp_aalda).exc = AALDA_EIO; + } else { + AS(tmp_aalda).cincr = 0; + AS(tmp_aalda).exc = 0; + } + + aaldas[area_num] = tmp_aalda; + DebugPF("calculated aalda[%lld] is 0x%x\n", + area_num, tmp_aalda.word); + + aaldis[area_num] = ind & 0xffffffffULL; + DebugPF("calculated aaldi[%lld] is 0x%llx\n", + area_num, aaldis[area_num]); + } + + return; + +die: + force_sig(SIGSEGV); +} + +/* See chapter 1.10.3 in "Scheduling" */ +void do_aau_fault_v2(int aa_field, struct pt_regs *regs) +{ + const e2k_aau_t *const aau_regs = regs->aau_context; + u32 aafstr = aau_regs->aafstr; + unsigned int aa_bit = 0; + u64 iter_count; + tc_cond_t condition; + tc_mask_t mask; + int ret; + long ret_get_user; + + regs->trap->nr_page_fault_exc = exc_data_page_num; + + /* See bug 33621 comment 2 and bug 52350 comment 29 */ + iter_count = get_lcnt(regs->ilcr) - get_lcnt(regs->lsr); + if (get_ldmc(regs->lsr) && !get_lcnt(regs->lsr)) + iter_count += get_ecnt(regs->ilcr) - get_ecnt(regs->lsr) - 1; + + DebugPF("enter, aa_field 0x%x, aasr 0x%x, aafstr = 0x%x\n", + aa_field, aau_regs->aasr.word, aafstr); + + /* condition.store = 0 + * condition.fault_type = 0 */ + AW(condition) = 0; + AS(condition).fmt = LDST_BYTE_FMT; + AS(condition).spec = 1; + AW(mask) = 0; + + while (aa_bit < 4) { + u64 area_num, mrng, d_num, addr1, addr2; + e2k_fapb_instr_t *fapb_addr; + e2k_fapb_instr_t fapb; + u32 step, ind, disp; + + if (!(aa_field & 0x1) || !(aafstr & 0x1)) + goto next_area; + + area_num = (aafstr >> 1) & 0x3f; + DebugPF("got interrupt on %d mova channel, area %lld\n", + aa_bit, area_num); + + if (area_num < 32) + fapb_addr = (e2k_fapb_instr_t *)(AS(regs->ctpr2).ta_base + + 16 * area_num); + else + fapb_addr = (e2k_fapb_instr_t *)(AS(regs->ctpr2).ta_base + + 16 * (area_num - 32) + 8); + + ret_get_user = host_get_user(AW(fapb), (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + + DebugPF("FAPB at %px instruction 0x%llx\n", + fapb_addr, AW(fapb)); + + if (area_num >= 32 && AS(fapb).dpl) { + /* See bug #53880 */ + pr_notice_once("%s [%d]: AAU is working in dpl mode (FAPB at %px)\n", + current->comm, current->pid, fapb_addr); + area_num -= 32; + fapb_addr -= 1; + ret_get_user = host_get_user(AW(fapb), + (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + } + + if (!AS(aau_regs->aasr).iab) { + WARN_ONCE(1, "%s [%d]: AAU fault happened but iab in AASR register was not set\n", + current->comm, current->pid); + goto die; + } + step = aau_regs->aaincrs[AS(fapb).incr] << (AS(fapb).fmt - 1); + disp = AS(fapb).disp + step * iter_count; + d_num = AS(fapb).d; + if (unlikely(AS(fapb).si)) + pr_notice_once("WARNING: %s (%d): uses secondary " + "indexes at IP 0x%lx, ignoring\n", + current->comm, current->pid, fapb_addr); + + ind = aau_regs->aainds[AS(fapb).ind] + disp; + mrng = AS(fapb).mrng ?: 32; + + if (AS(aau_regs->aads[d_num]).lo.tag == AAD_AAUSAP) { + addr1 = AS(aau_regs->aads[d_num]).lo.sap_base + ind + + (regs->stacks.top & ~0xffffffffULL); + } else { + addr1 = AS(aau_regs->aads[d_num]).lo.ap_base + ind; + } + addr2 = addr1 + mrng - 1; + DebugPF("AAD #%lld addr 0x%llx index 0x%x mrng" + " 0x%llx, disp 0x%x, step 0x%x\n", + d_num, addr1, ind, mrng, disp, step); + if (unlikely((addr1 & ~E2K_VA_MASK) || (addr2 & ~E2K_VA_MASK))){ + pr_notice_once("Bad address: addr 0x%llx," + " ind 0x%x, mrng 0x%llx," + " disp 0x%x, step 0x%x, fapb 0x%lx\n", + addr1, ind, mrng, disp, + step, (unsigned long)AW(fapb)); + + addr1 &= E2K_VA_MASK; + addr2 &= E2K_VA_MASK; + } + DebugPF("address1 = 0x%llx, address2 = 0x%llx, mrng=%lld\n", + addr1, addr2, mrng); + + ret = do_aau_page_fault(regs, addr1, condition, mask, aa_bit); + if (ret) { + if (ret == 2) { + /* + * Special case of trap handling on host: + * host inject the trap to guest + */ + return; + } + goto die; + } + if ((addr1 & 0xfffUL) > (addr2 & 0xfffUL)) { + ret = do_aau_page_fault(regs, addr2, condition, mask, + aa_bit); + if (ret) { + if (ret == 2) { + /* + * Special case of trap handling on host: + * host inject the trap to guest + */ + return; + } + goto die; + } + } + +next_area: + aa_bit++; + aafstr >>= 8; + aa_field >>= 1; + } + + DebugPF("exit aau fault handler\n"); + + return; + +die: + force_sig(SIGSEGV); +} + +notrace void save_aaldi_v2(u64 *aaldis) +{ + SAVE_AALDIS_V2(aaldis); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +notrace void get_aau_context_v2(e2k_aau_t *context) +{ + GET_AAU_CONTEXT_V2(context); +} +#endif /* CONFIG_USE_AAU */ + +#ifdef CONFIG_MLT_STORAGE +static bool read_MLT_entry_v2(e2k_mlt_entry_t *mlt, int entry_num) +{ + AS_WORD(mlt->dw0) = NATIVE_READ_MLT_REG( + (REG_MLT_TYPE << REG_MLT_TYPE_SHIFT) | + (entry_num << REG_MLT_N_SHIFT)); + + if (!AS_V2_STRUCT(mlt->dw0).val) + return false; + + AS_WORD(mlt->dw1) = NATIVE_READ_MLT_REG( + (1 << REG_MLT_DW_SHIFT) | + (REG_MLT_TYPE << REG_MLT_TYPE_SHIFT) | + (entry_num << REG_MLT_N_SHIFT)); + AS_WORD(mlt->dw2) = NATIVE_READ_MLT_REG( + (2 << REG_MLT_DW_SHIFT) | + (REG_MLT_TYPE << REG_MLT_TYPE_SHIFT) | + (entry_num << REG_MLT_N_SHIFT)); + + return true; +} + +static void invalidate_MLT_entry_v2(e2k_mlt_entry_t *mlt) +{ + ldst_rec_op_t opc = { + .fmt = 1, + .mas = MAS_MLT_NOP_UNLOCK, + }; + + opc.rg_deprecated = AS_V2_STRUCT(mlt->dw0).rg; + + NATIVE_RECOVERY_STORE(&opc, 0x0, AW(opc), 2); +} + +void invalidate_MLT_v2() +{ + int i; + + for (i = 0; i < NATIVE_MLT_SIZE; i++) { + e2k_mlt_entry_t mlt; + + if (read_MLT_entry_v2(&mlt, i)) + invalidate_MLT_entry_v2(&mlt); + } +} + +void get_and_invalidate_MLT_context_v2(e2k_mlt_t *mlt_state) +{ + int i; + + mlt_state->num = 0; + + for (i = 0; i < NATIVE_MLT_SIZE; i++) { + e2k_mlt_entry_t *mlt = &mlt_state->mlt[mlt_state->num]; + + if (read_MLT_entry_v2(mlt, i)) { + invalidate_MLT_entry_v2(mlt); + mlt_state->num++; + } + } +} +#endif + +__section(".C1_wait_trap.text") +static noinline notrace void C1_wait_trap(void) +{ + /* Interrupts must be enabled in the ".wait_trap.text" section + * so that the wakeup IRQ is not missed by handle_wtrap(). */ + local_irq_enable(); + + C1_WAIT_TRAP_V3(); + /* Will not get here */ +} + +void __cpuidle C1_enter_v2(void) +{ + /* C1 state: just stop until a trap wakes us */ + WARN_ON_ONCE(!irqs_disabled()); + C1_wait_trap(); + local_irq_disable(); +} diff --git a/arch/e2k/kernel/cpu/iset_v3.c b/arch/e2k/kernel/cpu/iset_v3.c new file mode 100644 index 000000000000..44b8a5bbd14d --- /dev/null +++ b/arch/e2k/kernel/cpu/iset_v3.c @@ -0,0 +1,183 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned long rrd_v3(int reg) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + return NATIVE_READ_CORE_MODE_REG_VALUE(); + } + + return 0; +} + +void rwd_v3(int reg, unsigned long value) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + NATIVE_WRITE_CORE_MODE_REG_VALUE(value); + return; + } +} + +void flushts_v3(void) +{ + E2K_FLUSHTS; +} + +#ifdef CONFIG_MLT_STORAGE +void invalidate_MLT_v3() +{ + NATIVE_SET_MMUREG(mlt_inv, 0); +} + +static bool read_MLT_entry_v3(e2k_mlt_entry_t *mlt, int entry_num) +{ + AW(mlt->dw0) = NATIVE_READ_MLT_REG(REG_MLT_TYPE << REG_MLT_TYPE_SHIFT | + entry_num << REG_MLT_N_SHIFT); + + if (!AS_V2_STRUCT(mlt->dw0).val) + return false; + + AW(mlt->dw1) = NATIVE_READ_MLT_REG(1 << REG_MLT_DW_SHIFT | + REG_MLT_TYPE << REG_MLT_TYPE_SHIFT | + entry_num << REG_MLT_N_SHIFT); + AW(mlt->dw2) = NATIVE_READ_MLT_REG(2 << REG_MLT_DW_SHIFT | + REG_MLT_TYPE << REG_MLT_TYPE_SHIFT | + entry_num << REG_MLT_N_SHIFT); + + return true; +} + +void get_and_invalidate_MLT_context_v3(e2k_mlt_t *mlt_state) +{ + int i; + + mlt_state->num = 0; + + for (i = 0; i < NATIVE_MLT_SIZE; i++) { + e2k_mlt_entry_t *mlt = &mlt_state->mlt[mlt_state->num]; + + if (read_MLT_entry_v3(mlt, i)) + mlt_state->num++; + } + + NATIVE_SET_MMUREG(mlt_inv, 0); +} +#endif + +/* SCLKR/SCLKM1/SCLKM2 implemented only on machine from e2s */ + +unsigned long native_read_SCLKR_reg_value(void) +{ + return NATIVE_READ_SCLKR_REG_VALUE(); +} + +unsigned long native_read_SCLKM1_reg_value(void) +{ + return NATIVE_READ_SCLKM1_REG_VALUE(); +} + +unsigned long native_read_SCLKM2_reg_value(void) +{ + return NATIVE_READ_SCLKM2_REG_VALUE(); +} + +void native_write_SCLKR_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_SCLKR_REG_VALUE(reg_value); +} + +void native_write_SCLKM1_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_SCLKM1_REG_VALUE(reg_value); +} + +void native_write_SCLKM2_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_SCLKM2_REG_VALUE(reg_value); +} + +__section(".C3_wait_trap.text") +static noinline notrace void C3_wait_trap(bool nmi_only) +{ + e2k_st_core_t st_core; + int cpuid = read_pic_id(); + int reg = SIC_st_core(cpuid % cpu_max_cores_num()); + int node = numa_node_id(); + phys_addr_t nbsr_phys = sic_get_node_nbsr_phys_base(node); + + /* Only NMIs that go through APIC are allowed: if we receive local + * NMI (or just a local exception) hardware will block. So here we + * disable all other sources (and reenable them in handle_wtrap()); + * it must be done under all closed interrupts so that handle_wtrap() + * does not try to read uninitalized values from [current->thread.C3]. + * + * Newer processors have a much better "wait int" interface that + * doesn't have this problem (and some others) and should be used + * instead. */ + WARN_ON_ONCE(!raw_all_irqs_disabled()); + NATIVE_SET_MMUREG(mlt_inv, 0); + current->thread.C3.ddbcr = READ_DDBCR_REG(); + current->thread.C3.dibcr = READ_DIBCR_REG(); + current->thread.C3.ddmcr = READ_DDMCR_REG(); + current->thread.C3.dimcr = READ_DIMCR_REG(); + + WRITE_DDBCR_REG_VALUE(0); + WRITE_DIBCR_REG_VALUE(0); + WRITE_DDMCR_REG_VALUE(0); + WRITE_DIMCR_REG_VALUE(0); + + AW(st_core) = sic_read_node_nbsr_reg(node, reg); + st_core.val = 0; + if (IS_MACHINE_E1CP) + st_core.e1cp.pmc_rst = 1; + + /* Interrupts must be enabled in the ".wait_trap.text" section + * so that the wakeup IRQ is not missed by handle_wtrap(). */ + if (nmi_only) + raw_local_irq_disable(); + else + local_irq_enable(); + + C3_WAIT_TRAP_V3(AW(st_core), nbsr_phys + reg); + /* Will not get here */ +} + +void __cpuidle C3_enter_v3(void) +{ + WARN_ON_ONCE(!irqs_disabled()); + raw_all_irq_disable(); + C3_wait_trap(false); + local_irq_disable(); +} + +#ifdef CONFIG_SMP +void clock_off_v3(void) +{ + unsigned long flags; + + /* Make sure we do not race with `callin_go` write */ + raw_all_irq_save(flags); + if (!cpumask_test_cpu(read_pic_id(), &callin_go)) + C3_wait_trap(true); + raw_all_irq_restore(flags); +} + +static void clock_on_v3_ipi(void *unused) +{ + /* Handling is done in handle_wtrap() */ +} + +void clock_on_v3(int cpu) +{ + /* Wake CPU disabled by clk_off(CPU_HOTPLUG_CLOCK_OFF) */ + nmi_call_function_single_offline(cpu, clock_on_v3_ipi, NULL, true, 0); +} +#endif diff --git a/arch/e2k/kernel/cpu/iset_v5.c b/arch/e2k/kernel/cpu/iset_v5.c new file mode 100644 index 000000000000..ae82056e3700 --- /dev/null +++ b/arch/e2k/kernel/cpu/iset_v5.c @@ -0,0 +1,454 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************************* DEBUG DEFINES ********************************/ +#undef DEBUG_PF_MODE +#define DEBUG_PF_MODE 0 /* Page fault */ +#define DebugPF(...) DebugPrint(DEBUG_PF_MODE ,##__VA_ARGS__) +/******************************************************************************/ + +u64 native_get_cu_hw1_v5() +{ + return NATIVE_READ_CU_HW1_REG_VALUE(); +} + +void native_set_cu_hw1_v5(u64 cu_hw1) +{ + NATIVE_WRITE_CU_HW1_REG_VALUE(cu_hw1); + E2K_WAIT_ALL; +} + +__section(".entry.text") +notrace __interrupt +void save_local_gregs_v5(struct local_gregs *gregs, bool is_signal) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + if (is_signal) + SAVE_GREGS_SIGNAL(gregs->g, E2K_ISET_V5); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void save_kernel_gregs_v5(struct kernel_gregs *gregs) +{ + NATIVE_SAVE_GREG(&gregs->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX], + &gregs->g[CURRENT_TASK_GREGS_PAIRS_INDEX], + GUEST_VCPU_STATE_GREG, CURRENT_TASK_GREG, E2K_ISET_V5); + NATIVE_SAVE_GREG(&gregs->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX], + &gregs->g[SMP_CPU_ID_GREGS_PAIRS_INDEX], + MY_CPU_OFFSET_GREG, SMP_CPU_ID_GREG, E2K_ISET_V5); +} + +notrace __interrupt +void save_gregs_on_mask_v5(struct global_regs *gregs, bool dirty_bgr, + unsigned long mask_not_save) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + if (mask_not_save == (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) { + /* it is same case as save all excluding global register */ + /* %g0 - %g15 and registers used by kernel %gN - %gN+3 */ + /* now N=16 see asm/glob_regs.h */ + SAVE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs->g, E2K_ISET_V5); + } else if (mask_not_save == KERNEL_GREGS_MASK) { + /* it is same case as save all excluding registers used */ + /* by kernel %gN - %gN+3 now N=16 see asm/glob_regs.h */ + SAVE_GREGS_EXCEPT_KERNEL(gregs->g, E2K_ISET_V5); + } else if (mask_not_save == 0) { + /* save all registers */ + SAVE_ALL_GREGS(gregs->g, E2K_ISET_V5); + } else { + /* common case with original mask */ + DO_SAVE_GREGS_ON_MASK(gregs->g, E2K_ISET_V5, mask_not_save); + } + if (!dirty_bgr) + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void save_gregs_v5(struct global_regs *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + SAVE_GREGS(gregs->g, true, E2K_ISET_V5); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void save_gregs_dirty_bgr_v5(struct global_regs *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + SAVE_GREGS(gregs->g, true, E2K_ISET_V5); +} + +__section(".entry.text") +notrace __interrupt +void restore_local_gregs_v5(const struct local_gregs *gregs, bool is_signal) +{ + init_BGR_reg(); + if (is_signal) + RESTORE_GREGS_SIGNAL(gregs->g, E2K_ISET_V5); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void restore_gregs_on_mask_v5(struct global_regs *gregs, bool dirty_bgr, + unsigned long mask_not_restore) +{ + init_BGR_reg(); /* enable whole GRF */ + if (mask_not_restore == (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) { + /* it is same case as restore all excluding global register */ + /* %g0 - %g15 and registers used by kernel %gN - %gN+3 */ + /* now N=16 see asm/glob_regs.h */ + RESTORE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs->g, E2K_ISET_V5); + } else if (mask_not_restore == KERNEL_GREGS_MASK) { + /* it is same case as restore all excluding registers used */ + /* by kernel %gN - %gN+3 now N=16 see asm/glob_regs.h */ + RESTORE_GREGS_EXCEPT_KERNEL(gregs->g, E2K_ISET_V5); + } else if (mask_not_restore == 0) { + /* restore all registers */ + RESTORE_ALL_GREGS(gregs->g, E2K_ISET_V5); + } else { + /* common case with original mask */ + DO_RESTORE_GREGS_ON_MASK(gregs->g, E2K_ISET_V5, + mask_not_restore); + } + if (!dirty_bgr) + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +__section(".entry.text") +notrace __interrupt +void restore_gregs_v5(const struct global_regs *gregs) +{ + init_BGR_reg(); /* enable whole GRF */ + RESTORE_GREGS(gregs->g, true, E2K_ISET_V5); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace +void qpswitchd_sm(int greg) +{ + E2K_QPSWITCHD_SM_GREG(greg); +} + +#ifdef CONFIG_USE_AAU +static inline u64 signext(u64 val, int nr) +{ + s64 sval = (s64) val; + + return (u64) ((sval << (63 - nr)) >> (63 - nr)); +} + +/* calculate current array prefetch buffer indices values + * (see chapter 1.10.2 in "Scheduling") */ +void calculate_aau_aaldis_aaldas_v5(const struct pt_regs *regs, + struct thread_info *ti, e2k_aau_t *context) +{ + u64 areas, area_num, iter_count; + e2k_aalda_t *aaldas = ti->aalda; + u64 *aaldis = context->aaldi; + /* get_user() is used here */ + WARN_ON_ONCE(regs && __raw_irqs_disabled()); + + memset(aaldas, 0, AALDAS_REGS_NUM * sizeof(aaldas[0])); + memset(aaldis, 0, AALDIS_REGS_NUM * sizeof(aaldis[0])); + + /* It is first guest run to set initial state of AAU */ + if (unlikely(!regs)) + return; + + /* See bug 33621 comment 2 and bug 52350 comment 29 */ + iter_count = regs->ilcr1 - regs->lsr1; + if (get_ldmc(regs->lsr) && !regs->lsr1) + iter_count += get_ecnt(regs->ilcr) - get_ecnt(regs->lsr) - 1; + + /* + * Calculate areas in the following order: + * + * 0 -> 32 -> 1 -> 33 -> 2 -> ... -> 62 -> 31 -> 63 + * + * until all the set bits in aaldv are checked. + */ + for (area_num = 0, areas = AW(context->aaldv); areas != 0; + areas &= ~(1UL << area_num), + area_num = (area_num < 32) ? (area_num + 32) + : (area_num - 31)) { + e2k_fapb_instr_t *fapb_addr; + e2k_fapb_instr_t fapb; + e2k_aalda_t tmp_aalda; + u64 step, ind, iter; + + if (!(AW(context->aaldv) & (1UL << area_num))) + continue; + + iter = iter_count + ((AW(context->aaldm) & (1UL << area_num)) + >> area_num); + + if (iter == 0) { + AW(context->aaldv) &= ~(1UL << area_num); + continue; + } + + if (area_num < 32) + fapb_addr = (e2k_fapb_instr_t *) + (AS(regs->ctpr2).ta_base + 16 * area_num); + else + fapb_addr = (e2k_fapb_instr_t *) + (AS(regs->ctpr2).ta_base + 8 + + 16 * (area_num - 32)); + +# if __LCC__ >= 120 + /* + * tmp is used to avoid compiler issue with passing + * union's fields into inline asm. Bug 76907. + */ + u64 tmp; + long ret_get_user; + + ret_get_user = host_get_user(tmp, (u64 *)fapb_addr, regs); + + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + fapb.word = tmp; +# else + long ret_get_user; + + ret_get_user = host_get_user(AW(fapb), (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } +# endif + + if (area_num >= 32 && AS(fapb).dpl) { + /* See bug #53880 */ + pr_info_once("%s [%d]: AAU is working in dpl mode " + "(FAPB at %px)\n", + current->comm, current->pid, fapb_addr); + + aaldas[area_num] = aaldas[area_num - 32]; + aaldis[area_num] = aaldis[area_num - 32]; + continue; + } + + if (!AS(fapb).fmt) + continue; + + step = context->aaincrs[AS(fapb).incr]; + step = signext(step, 48); + step = step << (AS(fapb).fmt - 1); + + ind = context->aainds[AS(fapb).ind]; + ind = signext(ind, 48); + ind += AS(fapb).disp + step * iter; + + AS(tmp_aalda).exc = 0; + AS(tmp_aalda).root = (AS(context->aads[AS(fapb).d]).lo.tag == + AAD_AAUDS); + + aaldas[area_num] = tmp_aalda; + + aaldis[area_num] = ind; + } + + return; + +die: + force_sig(SIGSEGV); +} + +/* See chapter 1.10.3 in "Scheduling" */ +void do_aau_fault_v5(int aa_field, struct pt_regs *regs) +{ + const e2k_aau_t *const aau_regs = regs->aau_context; + u32 aafstr = aau_regs->aafstr; + unsigned int aa_bit = 0; + u64 iter_count; + tc_cond_t condition; + tc_mask_t mask; + long ret_get_user; + + regs->trap->nr_page_fault_exc = exc_data_page_num; + + /* See bug 33621 comment 2 and bug 52350 comment 29 */ + iter_count = regs->ilcr1 - regs->lsr1; + if (get_ldmc(regs->lsr) && !regs->lsr1) + iter_count += get_ecnt(regs->ilcr) - get_ecnt(regs->lsr) - 1; + + DebugPF("do_aau_fault: enter aau fault handler, TICKS = %ld\n" + "aa_field = 0x%x\ndo_aau_fault: aafstr = 0x%x\n", + get_cycles(), aa_field, aafstr); + + /* condition.store = 0 + * condition.fault_type = 0 */ + AW(condition) = 0; + AS(condition).fmt = LDST_BYTE_FMT; + AS(condition).spec = 1; + AW(mask) = 0; + + while (aa_bit < 4) { + u64 area_num, mrng, addr1, addr2, step, ind, d_num; + e2k_fapb_instr_t *fapb_addr; + e2k_fapb_instr_t fapb; + int ret; + + if (!(aa_field & 0x1) || !(aafstr & 0x1)) + goto next_area; + + area_num = (aafstr >> 1) & 0x3f; + DebugPF("do_aau_fault: got interrupt on %d mova channel, " + "area %lld\n", + aa_bit, area_num); + + if (area_num < 32) + fapb_addr = (e2k_fapb_instr_t *)(AS(regs->ctpr2).ta_base + + 16 * area_num); + else + fapb_addr = (e2k_fapb_instr_t *)(AS(regs->ctpr2).ta_base + + 16 * (area_num - 32) + 8); + + ret_get_user = host_get_user(AW(fapb), (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + + if (area_num >= 32 && AS(fapb).dpl) { + /* See bug #53880 */ + pr_notice_once("%s [%d]: AAU is working in dpl mode " + "(FAPB at %px)\n", + current->comm, current->pid, fapb_addr); + area_num -= 32; + fapb_addr -= 1; + ret_get_user = host_get_user(AW(fapb), + (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + } + + if (!AS(aau_regs->aasr).iab) { + WARN_ONCE(1, "%s [%d]: AAU fault happened but iab in " + "AASR register was not set\n", + current->comm, current->pid); + goto die; + } + + step = aau_regs->aaincrs[AS(fapb).incr]; + step = signext(step, 48); + step = step << (AS(fapb).fmt - 1); + + ind = aau_regs->aainds[AS(fapb).ind]; + ind = signext(ind, 48); + ind += AS(fapb).disp + step * iter_count; + + mrng = AS(fapb).mrng ?: 32; + + d_num = AS(fapb).d; + if (AS(aau_regs->aads[d_num]).lo.tag == AAD_AAUSAP) { + addr1 = AS(aau_regs->aads[d_num]).lo.sap_base + ind + + (regs->stacks.top & ~0xffffffffULL); + } else { + addr1 = AS(aau_regs->aads[d_num]).lo.ap_base + ind; + } + addr2 = addr1 + mrng - 1; + if (unlikely((addr1 & ~E2K_VA_MASK) || (addr2 & ~E2K_VA_MASK))){ + pr_notice_once("Bad address: addr 0x%llx, " + "ind 0x%llx, mrng 0x%llx," + " step 0x%llx, fapb 0x%llx\n", + addr1, ind, mrng, step, + (unsigned long long)AW(fapb)); + + addr1 &= E2K_VA_MASK; + addr2 &= E2K_VA_MASK; + } + DebugPF("do_aau_fault: address1 = 0x%llx, address2 = 0x%llx, " + "mrng=%lld\n", + addr1, addr2, mrng); + + ret = do_aau_page_fault(regs, addr1, condition, mask, aa_bit); + if (ret) { + if (ret == 2) { + /* + * Special case of trap handling on host: + * host inject the trap to guest + */ + return; + } + goto die; + } + if ((addr1 & PAGE_MASK) != (addr2 & PAGE_MASK)) { + ret = do_aau_page_fault(regs, addr2, condition, mask, + aa_bit); + if (ret) { + if (ret == 2) { + /* + * Special case of trap handling on host: + * host inject the trap to guest + */ + return; + } + goto die; + } + } + +next_area: + aa_bit++; + aafstr >>= 8; + aa_field >>= 1; + } + + DebugPF("do_aau_fault: exit aau fault handler, TICKS = %ld\n", + get_cycles()); + + return; + +die: + force_sig(SIGSEGV); +} + +notrace void save_aaldi_v5(u64 *aaldis) +{ + SAVE_AALDIS_V5(aaldis); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +notrace void get_aau_context_v5(e2k_aau_t *context) +{ + GET_AAU_CONTEXT_V5(context); +} +#endif /* CONFIG_USE_AAU */ + diff --git a/arch/e2k/kernel/cpu/iset_v6.c b/arch/e2k/kernel/cpu/iset_v6.c new file mode 100644 index 000000000000..5647f3b4bacd --- /dev/null +++ b/arch/e2k/kernel/cpu/iset_v6.c @@ -0,0 +1,688 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************************* DEBUG DEFINES ********************************/ +#undef DEBUG_PF_MODE +#define DEBUG_PF_MODE 0 /* Page fault */ +#define DebugPF(...) DebugPrint(DEBUG_PF_MODE ,##__VA_ARGS__) +/******************************************************************************/ + +unsigned long rrd_v6(int reg) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + return NATIVE_READ_CORE_MODE_REG_VALUE(); + case E2K_REG_HCEM: + return READ_HCEM_REG(); + case E2K_REG_HCEB: + return READ_HCEB_REG(); + case E2K_REG_OSCUTD: + return NATIVE_READ_OSCUTD_REG_VALUE(); + case E2K_REG_OSCUIR: + return NATIVE_READ_OSCUIR_REG_VALUE(); + } + + return 0; +} + +void rwd_v6(int reg, unsigned long value) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + NATIVE_WRITE_CORE_MODE_REG_VALUE(value); + return; + case E2K_REG_HCEM: + WRITE_HCEM_REG(value); + return; + case E2K_REG_HCEB: + WRITE_HCEB_REG(value); + return; + case E2K_REG_OSCUTD: + NATIVE_WRITE_OSCUTD_REG_VALUE(value); + return; + case E2K_REG_OSCUIR: + NATIVE_WRITE_OSCUIR_REG_VALUE(value); + return; + } +} + +void save_dimtp_v6(e2k_dimtp_t *dimtp) +{ + dimtp->lo = NATIVE_GET_DSREG_CLOSED(dimtp.lo); + dimtp->hi = NATIVE_GET_DSREG_CLOSED(dimtp.hi); +} + +void restore_dimtp_v6(const e2k_dimtp_t *dimtp) +{ + NATIVE_SET_DSREGS_CLOSED_NOEXC(dimtp.lo, dimtp.hi, dimtp->lo, dimtp->hi, 4); +} + +void clear_dimtp_v6(void) +{ + NATIVE_SET_DSREGS_CLOSED_NOEXC(dimtp.lo, dimtp.hi, 0ull, 0ull, 4); +} + +#ifdef CONFIG_MLT_STORAGE +static bool read_MLT_entry_v6(e2k_mlt_entry_t *mlt, int entry_num) +{ + AW(mlt->dw0) = NATIVE_READ_MLT_REG( + (REG_MLT_TYPE << REG_MLT_TYPE_SHIFT) | + (entry_num << REG_MLT_N_SHIFT)); + + if (!AS_V6_STRUCT(mlt->dw0).val) + return false; + + AW(mlt->dw1) = NATIVE_READ_MLT_REG(1 << REG_MLT_DW_SHIFT | + REG_MLT_TYPE << REG_MLT_TYPE_SHIFT | + entry_num << REG_MLT_N_SHIFT); + AW(mlt->dw2) = NATIVE_READ_MLT_REG(2 << REG_MLT_DW_SHIFT | + REG_MLT_TYPE << REG_MLT_TYPE_SHIFT | + entry_num << REG_MLT_N_SHIFT); + + return true; +} + +void get_and_invalidate_MLT_context_v6(e2k_mlt_t *mlt_state) +{ + int i; + + mlt_state->num = 0; + + for (i = 0; i < NATIVE_MLT_SIZE; i++) { + e2k_mlt_entry_t *mlt = &mlt_state->mlt[mlt_state->num]; + + if (read_MLT_entry_v6(mlt, i)) + mlt_state->num++; + } + + NATIVE_SET_MMUREG(mlt_inv, 0); +} +#endif + +unsigned long native_read_MMU_OS_PPTB_reg_value(void) +{ + return NATIVE_READ_MMU_OS_PPTB_REG_VALUE(); +} +void native_write_MMU_OS_PPTB_reg_value(unsigned long value) +{ + NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(value); +} + +unsigned long native_read_MMU_OS_VPTB_reg_value(void) +{ + return NATIVE_READ_MMU_OS_VPTB_REG_VALUE(); +} +void native_write_MMU_OS_VPTB_reg_value(unsigned long value) +{ + NATIVE_WRITE_MMU_OS_VPTB_REG_VALUE(value); +} + +unsigned long native_read_MMU_OS_VAB_reg_value(void) +{ + return NATIVE_READ_MMU_OS_VAB_REG_VALUE(); +} +void native_write_MMU_OS_VAB_reg_value(unsigned long value) +{ + NATIVE_WRITE_MMU_OS_VAB_REG_VALUE(value); +} + +#if defined(CONFIG_KVM_HW_VIRTUALIZATION) && \ + !defined(CONFIG_KVM_GUEST_KERNEL) +static void clear_guest_epic(void) +{ + union cepic_ctrl2 reg; + + reg.raw = epic_read_w(CEPIC_CTRL2); + reg.bits.clear_gst = 1; + epic_write_w(CEPIC_CTRL2, reg.raw); +} + +static void save_epic_context(struct kvm_vcpu_arch *vcpu) +{ + epic_page_t *cepic = vcpu->hw_ctxt.cepic; + union cepic_epic_int reg_epic_int; + unsigned int i; + + /* Shuld not happen: scheduler is always called with open interrupts + * so CEPIC_EPIC_INT must have been delivered before calling vcpu_put + * (and in case we are in kvm_arch_vcpu_blocking() - it is also called + * with open interrupts). */ + reg_epic_int.raw = epic_read_w(CEPIC_EPIC_INT); + WARN_ON_ONCE(reg_epic_int.bits.stat); + + kvm_epic_timer_stop(false); + kvm_epic_invalidate_dat(vcpu); + + cepic->ctrl = epic_read_guest_w(CEPIC_CTRL); + cepic->id = epic_read_guest_w(CEPIC_ID); + cepic->cpr = epic_read_guest_w(CEPIC_CPR); + cepic->esr = epic_read_guest_w(CEPIC_ESR); + cepic->esr2.raw = epic_read_guest_w(CEPIC_ESR2); + cepic->icr.raw = epic_read_guest_d(CEPIC_ICR); + cepic->timer_lvtt.raw = epic_read_guest_w(CEPIC_TIMER_LVTT); + cepic->timer_init = epic_read_guest_w(CEPIC_TIMER_INIT); + cepic->timer_cur = epic_read_guest_w(CEPIC_TIMER_CUR); + cepic->timer_div = epic_read_guest_w(CEPIC_TIMER_DIV); + cepic->svr = epic_read_guest_w(CEPIC_SVR); + cepic->pnmirr_mask = epic_read_guest_w(CEPIC_PNMIRR_MASK); + + /* Save PMIRR, PNMIRR, ESR_NEW and CIR, and clear them in hardware */ + for (i = 0; i < CEPIC_PMIRR_NR_DREGS; i++) { + u64 pmirr_reg = epic_read_guest_d(CEPIC_PMIRR + i * 8); + u64 pmirr_old = atomic64_fetch_or(pmirr_reg, &cepic->pmirr[i]); + u64 pmirr_new = pmirr_old | pmirr_reg; + if (pmirr_new) + trace_save_pmirr(i, pmirr_new); + } + atomic_or(epic_read_guest_w(CEPIC_PNMIRR), &cepic->pnmirr); + if (cepic->pnmirr.counter) + trace_save_pnmirr(cepic->pnmirr.counter); + + atomic_or(epic_read_guest_w(CEPIC_ESR_NEW), &cepic->esr_new); + cepic->cir.raw = epic_read_guest_w(CEPIC_CIR); + if (cepic->cir.bits.stat) + trace_save_cir(cepic->cir.raw); + + WARN_ONCE(cepic->icr.bits.stat || cepic->esr2.bits.stat || + cepic->timer_lvtt.bits.stat, + "CEPIC stat bit is set upon guest saving: icr 0x%llx, esr2 0x%x, timer_lvtt 0x%x", + cepic->icr.raw, cepic->esr2.raw, cepic->timer_lvtt.raw); + + clear_guest_epic(); +} + +static void restore_epic_context(const struct kvm_vcpu_arch *vcpu) +{ + epic_page_t *cepic = vcpu->hw_ctxt.cepic; + unsigned int i, j, epic_pnmirr; + unsigned long epic_pmirr; + + kvm_hv_epic_load(arch_to_vcpu(vcpu)); + + /* + * If cir.stat = 1, then cir.vect should be raised in PMIRR instead + * CEPIC_CIR is not restored here to avoid overwriting another interrupt + */ + if (cepic->cir.bits.stat) { + unsigned int vector = cepic->cir.bits.vect; + + trace_restore_cir(cepic->cir.raw); + set_bit(vector & 0x3f, + (void *)&cepic->pmirr[vector >> 6].counter); + cepic->cir.raw = 0; + } + epic_write_guest_w(CEPIC_CTRL, cepic->ctrl); + epic_write_guest_w(CEPIC_ID, cepic->id); + epic_write_guest_w(CEPIC_CPR, cepic->cpr); + epic_write_guest_w(CEPIC_ESR, cepic->esr); + epic_write_guest_w(CEPIC_ESR2, cepic->esr2.raw); + epic_write_guest_d(CEPIC_ICR, cepic->icr.raw); + epic_write_guest_w(CEPIC_TIMER_LVTT, cepic->timer_lvtt.raw); + epic_write_guest_w(CEPIC_TIMER_INIT, cepic->timer_init); + epic_write_guest_w(CEPIC_TIMER_CUR, cepic->timer_cur); + epic_write_guest_w(CEPIC_TIMER_DIV, cepic->timer_div); + epic_write_guest_w(CEPIC_SVR, cepic->svr); + epic_write_guest_w(CEPIC_PNMIRR_MASK, cepic->pnmirr_mask); + for (i = 0; i < CEPIC_PMIRR_NR_DREGS; i++) { + epic_pmirr = cepic->pmirr[i].counter; + if (epic_pmirr) + cepic->pmirr[i].counter = 0; + if (epic_bgi_mode) { + for (j = 0; j < 64; j++) + if (cepic->pmirr_byte[64 * i + j]) { + epic_pmirr |= 1UL << j; + cepic->pmirr_byte[64 * i + j] = 0; + } + } + + if (epic_pmirr) { + epic_write_d(CEPIC_PMIRR_OR + i * 8, epic_pmirr); + trace_restore_pmirr(i, epic_pmirr); + } + } + epic_pnmirr = cepic->pnmirr.counter; + if (epic_pnmirr) { + atomic_set(&cepic->pnmirr, 0); + trace_restore_pnmirr(epic_pnmirr); + } + if (epic_bgi_mode) { + for (j = 5; j < 14; j++) + if (cepic->pnmirr_byte[j]) { + epic_pnmirr |= 1UL << (j + 4); + cepic->pnmirr_byte[j] = 0; + } + } + epic_write_w(CEPIC_PNMIRR_OR, epic_pnmirr); + epic_write_w(CEPIC_ESR_NEW_OR, cepic->esr_new.counter); + cepic->esr_new.counter = 0; + + kvm_epic_timer_start(); + kvm_epic_enable_int(); +} + +void kvm_epic_vcpu_blocking(struct kvm_vcpu_arch *vcpu) +{ + save_epic_context(vcpu); +} + +void kvm_epic_vcpu_unblocking(struct kvm_vcpu_arch *vcpu) +{ + restore_epic_context(vcpu); +} + +void save_kvm_context_v6(struct kvm_vcpu_arch *vcpu) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->hw_ctxt; + unsigned long flags; + struct kvm_arch *ka = &arch_to_vcpu(vcpu)->kvm->arch; + + /* + * Stack registers + */ + AW(hw_ctxt->sh_psp_lo) = READ_SH_PSP_LO_REG_VALUE(); + AW(hw_ctxt->sh_psp_hi) = READ_SH_PSP_HI_REG_VALUE(); + AW(hw_ctxt->sh_pcsp_lo) = READ_SH_PCSP_LO_REG_VALUE(); + AW(hw_ctxt->sh_pcsp_hi) = READ_SH_PCSP_HI_REG_VALUE(); + AW(hw_ctxt->sh_pshtp) = READ_SH_PSHTP_REG_VALUE(); + hw_ctxt->sh_pcshtp = READ_SH_PCSHTP_REG_VALUE(); + AW(hw_ctxt->sh_wd) = READ_SH_WD_REG_VALUE(); + AW(hw_ctxt->bu_psp_lo) = READ_BU_PSP_LO_REG_VALUE(); + AW(hw_ctxt->bu_psp_hi) = READ_BU_PSP_HI_REG_VALUE(); + AW(hw_ctxt->bu_pcsp_lo) = READ_BU_PCSP_LO_REG_VALUE(); + AW(hw_ctxt->bu_pcsp_hi) = READ_BU_PCSP_HI_REG_VALUE(); + + /* + * MMU shadow context + */ + hw_ctxt->sh_mmu_cr = READ_SH_MMU_CR_REG_VALUE(); + hw_ctxt->sh_pid = READ_SH_PID_REG_VALUE(); + hw_ctxt->sh_os_pptb = READ_SH_OS_PPTB_REG_VALUE(); + hw_ctxt->gp_pptb = READ_GP_PPTB_REG_VALUE(); + hw_ctxt->sh_os_vptb = READ_SH_OS_VPTB_REG_VALUE(); + hw_ctxt->sh_os_vab = READ_SH_OS_VAB_REG_VALUE(); + hw_ctxt->gid = READ_GID_REG_VALUE(); + + /* + * CPU shadow context + */ + AW(hw_ctxt->sh_oscud_lo) = READ_SH_OSCUD_LO_REG_VALUE(); + AW(hw_ctxt->sh_oscud_hi) = READ_SH_OSCUD_HI_REG_VALUE(); + AW(hw_ctxt->sh_osgd_lo) = READ_SH_OSGD_LO_REG_VALUE(); + AW(hw_ctxt->sh_osgd_hi) = READ_SH_OSGD_HI_REG_VALUE(); + AW(hw_ctxt->sh_oscutd) = READ_SH_OSCUTD_REG_VALUE(); + AW(hw_ctxt->sh_oscuir) = READ_SH_OSCUIR_REG_VALUE(); + + hw_ctxt->sh_osr0 = READ_SH_OSR0_REG_VALUE(); + + raw_spin_lock_irqsave(&ka->sh_sclkr_lock, flags); + /* The last still runnig vcpu saves it sclkm3. + * Guest time run paused */ + if (ka->num_sclkr_run-- == 1) { + ka->sh_sclkm3 = (long long) READ_SH_SCLKM3_REG_VALUE() - + (long long) raw_read_sclkr(); + } + raw_spin_unlock_irqrestore(&ka->sh_sclkr_lock, flags); + + AW(hw_ctxt->sh_core_mode) = READ_SH_CORE_MODE_REG_VALUE(); + + /* + * VIRT_CTRL_* registers + */ + AW(hw_ctxt->virt_ctrl_cu) = READ_VIRT_CTRL_CU_REG_VALUE(); + AW(hw_ctxt->virt_ctrl_mu) = READ_VIRT_CTRL_MU_REG_VALUE(); + hw_ctxt->g_w_imask_mmu_cr = READ_G_W_IMASK_MMU_CR_REG_VALUE(); + + /* + * INTC_INFO_* registers have to be saved immediately upon + * interception to handle it, so they are not saved here, + * but the hardware pointers should be cleared and VCPU marked as + * updated to recover ones if need. + */ + READ_INTC_PTR_CU(); + kvm_set_intc_info_cu_is_updated(arch_to_vcpu(vcpu)); + READ_INTC_PTR_MU(); + kvm_set_intc_info_mu_is_updated(arch_to_vcpu(vcpu)); + + /* + * CEPIC context + * See comment before kvm_arch_vcpu_blocking() for details + * about KVM_MP_STATE_HALTED + */ + if (cpu_has(CPU_FEAT_EPIC) && vcpu->mp_state != KVM_MP_STATE_HALTED) + save_epic_context(vcpu); + + /* + * Binco context + */ + hw_ctxt->u2_pptb = NATIVE_GET_MMUREG(u2_pptb); + hw_ctxt->pid2 = NATIVE_GET_MMUREG(pid2); + hw_ctxt->mpt_b = NATIVE_GET_MMUREG(mpt_b); + hw_ctxt->pci_l_b = NATIVE_GET_MMUREG(pci_l_b); + hw_ctxt->ph_h_b = NATIVE_GET_MMUREG(ph_h_b); + hw_ctxt->ph_hi_l_b = NATIVE_GET_MMUREG(ph_hi_l_b); + hw_ctxt->ph_hi_h_b = NATIVE_GET_MMUREG(ph_hi_h_b); + hw_ctxt->pat = NATIVE_GET_MMUREG(pat); + hw_ctxt->pdpte0 = NATIVE_GET_MMUREG(pdpte0); + hw_ctxt->pdpte1 = NATIVE_GET_MMUREG(pdpte1); + hw_ctxt->pdpte2 = NATIVE_GET_MMUREG(pdpte2); + hw_ctxt->pdpte3 = NATIVE_GET_MMUREG(pdpte3); +} + +void restore_kvm_context_v6(const struct kvm_vcpu_arch *vcpu) +{ + const struct kvm_hw_cpu_context *hw_ctxt = &vcpu->hw_ctxt; + unsigned long flags; + struct kvm_arch *ka = &arch_to_vcpu(vcpu)->kvm->arch; + + /* + * Stack registers + */ + WRITE_SH_PSP_LO_REG_VALUE(AW(hw_ctxt->sh_psp_lo)); + WRITE_SH_PSP_HI_REG_VALUE(AW(hw_ctxt->sh_psp_hi)); + WRITE_SH_PCSP_LO_REG_VALUE(AW(hw_ctxt->sh_pcsp_lo)); + WRITE_SH_PCSP_HI_REG_VALUE(AW(hw_ctxt->sh_pcsp_hi)); + WRITE_SH_PSHTP_REG_VALUE(AW(hw_ctxt->sh_pshtp)); + WRITE_SH_PCSHTP_REG_SVALUE(hw_ctxt->sh_pcshtp); + WRITE_SH_WD_REG_VALUE(AW(hw_ctxt->sh_wd)); + WRITE_BU_PSP_LO_REG_VALUE(AW(hw_ctxt->bu_psp_lo)); + WRITE_BU_PSP_HI_REG_VALUE(AW(hw_ctxt->bu_psp_hi)); + WRITE_BU_PCSP_LO_REG_VALUE(AW(hw_ctxt->bu_pcsp_lo)); + WRITE_BU_PCSP_HI_REG_VALUE(AW(hw_ctxt->bu_pcsp_hi)); + + /* + * MMU shadow context + */ + WRITE_SH_MMU_CR_REG_VALUE(hw_ctxt->sh_mmu_cr); + WRITE_SH_PID_REG_VALUE(hw_ctxt->sh_pid); + WRITE_SH_OS_PPTB_REG_VALUE(hw_ctxt->sh_os_pptb); + WRITE_GP_PPTB_REG_VALUE(hw_ctxt->gp_pptb); + WRITE_SH_OS_VPTB_REG_VALUE(hw_ctxt->sh_os_vptb); + WRITE_SH_OS_VAB_REG_VALUE(hw_ctxt->sh_os_vab); + WRITE_GID_REG_VALUE(hw_ctxt->gid); + + /* + * CPU shadow context + */ + WRITE_SH_OSCUD_LO_REG_VALUE(AW(hw_ctxt->sh_oscud_lo)); + WRITE_SH_OSCUD_HI_REG_VALUE(AW(hw_ctxt->sh_oscud_hi)); + WRITE_SH_OSGD_LO_REG_VALUE(AW(hw_ctxt->sh_osgd_lo)); + WRITE_SH_OSGD_HI_REG_VALUE(AW(hw_ctxt->sh_osgd_hi)); + WRITE_SH_OSCUTD_REG_VALUE(AW(hw_ctxt->sh_oscutd)); + WRITE_SH_OSCUIR_REG_VALUE(AW(hw_ctxt->sh_oscuir)); + + WRITE_SH_OSR0_REG_VALUE(hw_ctxt->sh_osr0); + /* sclkm3 = sclkm3 + ("current raw_read_sclkr()" - + * "raw_read_sclkr() when last vcpu leaves cpu") + * sclkm3 has a summary time when each vcpu of guest was out of cpu + */ + raw_spin_lock_irqsave(&ka->sh_sclkr_lock, flags); + /* The first activated vcpu calculates sclkm3 for + * itself and all subsequent activated vcpu-s.*/ + if (ka->num_sclkr_run++ == 0) { + ka->sh_sclkm3 = (long long)raw_read_sclkr() + + ka->sh_sclkm3; + /* Guest time run resumed + * (including still sleeping vcpu-s) */ + } + WRITE_SH_SCLKM3_REG_VALUE(ka->sh_sclkm3); + raw_spin_unlock_irqrestore(&ka->sh_sclkr_lock, flags); + + WRITE_SH_CORE_MODE_REG_VALUE(AW(hw_ctxt->sh_core_mode)); + + /* + * VIRT_CTRL_* registers + */ + WRITE_VIRT_CTRL_CU_REG_VALUE(AW(hw_ctxt->virt_ctrl_cu)); + WRITE_VIRT_CTRL_MU_REG_VALUE(AW(hw_ctxt->virt_ctrl_mu)); + WRITE_G_W_IMASK_MMU_CR_REG_VALUE(hw_ctxt->g_w_imask_mmu_cr); + + /* + * INTC_INFO_* registers were saved immediately upon + * interception to handle it, and will be restored + * by interceptions handler if it need. + */ + + /* + * CEPIC context + */ + if (cpu_has(CPU_FEAT_EPIC) && vcpu->mp_state != KVM_MP_STATE_HALTED) + restore_epic_context(vcpu); + + /* + * Binco context + */ + NATIVE_SET_MMUREG(u2_pptb, hw_ctxt->u2_pptb); + NATIVE_SET_MMUREG(pid2, hw_ctxt->pid2); + NATIVE_SET_MMUREG(mpt_b, hw_ctxt->mpt_b); + NATIVE_SET_MMUREG(pci_l_b, hw_ctxt->pci_l_b); + NATIVE_SET_MMUREG(ph_h_b, hw_ctxt->ph_h_b); + NATIVE_SET_MMUREG(ph_hi_l_b, hw_ctxt->ph_hi_l_b); + NATIVE_SET_MMUREG(ph_hi_h_b, hw_ctxt->ph_hi_h_b); + NATIVE_SET_MMUREG(pat, hw_ctxt->pat); + NATIVE_SET_MMUREG(pdpte0, hw_ctxt->pdpte0); + NATIVE_SET_MMUREG(pdpte1, hw_ctxt->pdpte1); + NATIVE_SET_MMUREG(pdpte2, hw_ctxt->pdpte2); + NATIVE_SET_MMUREG(pdpte3, hw_ctxt->pdpte3); +} +#else /* !CONFIG_KVM_HW_VIRTUALIZATION || CONFIG_KVM_GUEST_KERNEL */ +void restore_kvm_context_v6(const struct kvm_vcpu_arch *vcpu) +{ +} + +void save_kvm_context_v6(struct kvm_vcpu_arch *vcpu) +{ +} +#endif /* CONFIG_KVM_HW_VIRTUALIZATION && !CONFIG_KVM_GUEST_KERNEL */ + +#ifdef CONFIG_USE_AAU +/* calculate current array prefetch buffer indices values + * (see chapter 1.10.2 in "Scheduling") */ +void calculate_aau_aaldis_aaldas_v6(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context) +{ + memset(ti->aalda, 0, AALDAS_REGS_NUM * sizeof(ti->aalda[0])); +} + +/* See chapter 1.10.3 in "Scheduling" */ +void do_aau_fault_v6(int aa_field, struct pt_regs *regs) +{ + const e2k_aau_t *const aau_regs = regs->aau_context; + u32 aafstr = aau_regs->aafstr; + unsigned int aa_bit = 0; + tc_cond_t condition; + tc_mask_t mask; + long ret_get_user; + + regs->trap->nr_page_fault_exc = exc_data_page_num; + + DebugPF("do_aau_fault: enter aau fault handler, TICKS = %ld\n" + "aa_field = 0x%x\ndo_aau_fault: aafstr = 0x%x\n", + get_cycles(), aa_field, aafstr); + + /* condition.store = 0 + * condition.fault_type = 0 */ + AW(condition) = 0; + AS(condition).fmt = LDST_BYTE_FMT; + AS(condition).spec = 1; + AW(mask) = 0; + + while (aa_bit < 4) { + u64 area_num, mrng, addr1, addr2, d_num; + e2k_fapb_instr_t *fapb_addr; + e2k_fapb_instr_t fapb; + int ret; + + if (!(aa_field & 0x1) || !(aafstr & 0x1)) + goto next_area; + + area_num = (aafstr >> 1) & 0x3f; + DebugPF("do_aau_fault: got interrupt on %d mova channel, area %lld\n", + aa_bit, area_num); + + if (area_num < 32) + fapb_addr = (e2k_fapb_instr_t *)(AS(regs->ctpr2).ta_base + + 16 * area_num); + else + fapb_addr = (e2k_fapb_instr_t *)(AS(regs->ctpr2).ta_base + + 16 * (area_num - 32) + 8); + + ret_get_user = host_get_user(AW(fapb), (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + + if (area_num >= 32 && AS(fapb).dpl) { + /* See bug #53880 */ + pr_notice_once("%s [%d]: AAU is working in dpl mode (FAPB at %px)\n", + current->comm, current->pid, fapb_addr); + area_num -= 32; + fapb_addr -= 1; + ret_get_user = host_get_user(AW(fapb), + (u64 *)fapb_addr, regs); + if (ret_get_user) { + if (ret_get_user == -EAGAIN) + break; + else + goto die; + } + } + + if (!AS(aau_regs->aasr).iab) { + WARN_ONCE(1, "%s [%d]: AAU fault happened but iab in AASR register was not set\n", + current->comm, current->pid); + goto die; + } + + mrng = AS(fapb).mrng ?: 32; + + d_num = AS(fapb).d; + if (AS(aau_regs->aads[d_num]).lo.tag == AAD_AAUSAP) { + addr1 = AS(aau_regs->aads[d_num]).lo.sap_base + + (regs->stacks.top & ~0xffffffffULL); + } else { + addr1 = AS(aau_regs->aads[d_num]).lo.ap_base; + } + addr1 += aau_regs->aaldi[area_num]; + addr2 = addr1 + mrng - 1; + if (unlikely((addr1 & ~E2K_VA_MASK) || (addr2 & ~E2K_VA_MASK))) { + pr_notice_once("Bad address: addr 0x%llx, ind 0x%llx, mrng 0x%llx, fapb 0x%llx\n", + addr1, aau_regs->aaldi[area_num], mrng, + (unsigned long long) AW(fapb)); + + addr1 &= E2K_VA_MASK; + addr2 &= E2K_VA_MASK; + } + DebugPF("do_aau_fault: address1 = 0x%llx, address2 = 0x%llx, mrng=%lld\n", + addr1, addr2, mrng); + + do_aau_page_fault(regs, addr1, condition, mask, aa_bit); + if (ret) { + if (ret == 2) { + /* + * Special case of trap handling on host: + * host inject the trap to guest + */ + return; + } + goto die; + } + if ((addr1 & PAGE_MASK) != (addr2 & PAGE_MASK)) { + ret = do_aau_page_fault(regs, addr2, condition, mask, + aa_bit); + if (ret) { + if (ret == 2) { + /* + * Special case of trap handling on host: + * host inject the trap to guest + */ + return; + } + goto die; + } + } + +next_area: + aa_bit++; + aafstr >>= 8; + aa_field >>= 1; + } + + DebugPF("do_aau_fault: exit aau fault handler, TICKS = %ld\n", + get_cycles()); + + return; + +die: + force_sig(SIGSEGV); +} +#endif /* CONFIG_USE_AAU */ + +/* mem_wait_idle() waits for interrupt or modification + * of need_resched. Note that there can be spurious + * wakeups as only whole cache lines can be watched. */ +static void __cpuidle mem_wait_idle(void) +{ + NATIVE_READ_MAS_D_CH(¤t_thread_info()->flags, + MAS_WATCH_FOR_MODIFICATION_V6, 0); + if (!need_resched()) + E2K_WAIT(_mem_mod | _int); +} + +void __cpuidle C1_enter_v6(void) +{ + if (IS_HV_GM()) { + /* Do not set TIF_POLLING_NRFLAG in guest since + * "wait int" here will be intercepted and guest + * will be put to sleep. */ + mem_wait_idle(); + } else { + if (!current_set_polling_and_test()) + mem_wait_idle(); + current_clr_polling(); + } +} + +void __cpuidle C3_enter_v6(void) +{ + unsigned long flags; + unsigned int node = numa_node_id(); + phys_addr_t nbsr_phys = sic_get_node_nbsr_phys_base(node); + int core = read_pic_id() % cpu_max_cores_num(); + int reg = PMC_FREQ_CORE_N_SLEEP(core); + freq_core_sleep_t C3 = { .cmd = 3 }; + + raw_all_irq_save(flags); + + C3_WAIT_INT_V6(AW(C3), nbsr_phys + reg); + + if (cpu_has(CPU_HWBUG_E16C_SLEEP)) { + freq_core_sleep_t fr_state; + do { + fr_state.word = sic_read_node_nbsr_reg(node, reg); + } while (fr_state.status != 0 /* C0 */); + } + + raw_all_irq_restore(flags); +} diff --git a/arch/e2k/kernel/cpu/recovery_string_v5.S b/arch/e2k/kernel/cpu/recovery_string_v5.S new file mode 100644 index 000000000000..67dafcb1fce7 --- /dev/null +++ b/arch/e2k/kernel/cpu/recovery_string_v5.S @@ -0,0 +1,468 @@ +#include + +.text +.global $__recovery_memcpy_16 +.type __recovery_memcpy_16,@function +$__recovery_memcpy_16: +.ignore ld_st_style +/* + * dr0 - dst + * dr1 - src + * dr2 - len + * dr3 - strqp opcode + * dr4 - ldrqp opcode + * r5 - enable prefetching + * + * Does not return a value. + */ + { + setwd wsz = 0x14, nfx = 0 + ipd 1 + disp %ctpr2, 5f /* very_small_size */ + + setbn rsz = 0xb, rbs = 0x8, rcur = 0x0 + setbp psz = 0x1 + + /* dr14 holds the number of copied bytes + * in case pagefault happens */ + addd,4 0x0, 0x0, %dr14 + + addd,3 %dr4, 0x0, %dr10 + addd,5 %dr4, 0x10, %dr11 + } + { + ipd 1 + disp %ctpr1, 6f /* small_size */ + + addd,5 %dr4, 0x20, %dr12 + + /* %pred26 == 'true' if 'size' is zero (i.e. 'size' < 16) */ + cmpbdb,0 %dr2, 0x10, %pred26 + /* %pred27 == 'false' if 'size' >= 32 bytes */ + cmpbdb,1 %dr2, 0x20, %pred27 + + /* %pred28 == 'false' if 'size' >= 48 bytes */ + cmpbdb,3 %dr2, 0x30, %pred28 + /* %pred25 == 'true' if 'size' <= 64 bytes */ + cmpledb,4 %dr2, 0x40, %pred25 + } + { + return %ctpr3 + + addd,5 %dr4, 0x30, %dr13 + + cmpbdb,0 %dr2, 0x80, %pred12 + /* %pred29 == 'false' if 'size' >= 64 bytes */ + cmpbdb,1 %dr2, 0x40, %pred29 + + /* If %pred6 is 'false' then the remaining 32-bytes + * tail has to be copied after the main copying loop + * which copies data in 64-bytes blocks. */ + cmpandedb,3 %dr2, 0x20, %pred6 + /* %pred7 == 'size' < 384 (minimum allowed size + * for the optimized copying algorythm - 12 cachelines + * for unrolling) */ + cmpbdb,4 %dr2, 0x180, %pred7 + } + { + /* %pred8 == 'true' if 'size' is a multiple of 32 */ + cmpandedb,1 %dr2, 0x10, %pred8 + + addd,0 %dr3, 0x0, %dr6 + addd,2 %dr3, 0x20, %dr8 + + addd,4 %dr3, 0x10, %dr7 + + ldrqp,3 [ %dr1 + %dr10 ], %db[10] ? ~ %pred26 + ldrqp,5 [ %dr1 + %dr11 ], %db[11] ? ~ %pred27 + } + { + addd,4 %dr3, 0x30, %dr9 + + ldrqp,3 [ %dr1 + %dr12 ], %db[22] ? ~ %pred28 + ldrqp,5 [ %dr1 + %dr13 ], %db[23] ? ~ %pred29 + + ct %ctpr2 ? %pred25 + } + { + ipd 0 + disp %ctpr2, 8f /* copy_tail_small */ + + addd,1 %dr1, 0x1a0, %db[0] + + /* Check whether prefetching is disabled */ + cmpesb,4 %r5, 0, %pred15 + + addd,0 %dr1, 0x40, %dr5 + addd,2 %dr1, 0x80, %dr4 + + addd,5 %dr1, %dr2, %dr3 + + /* If the block is small, use simple loop without unrolling */ + ct %ctpr1 ? %pred7 + } + { + ipd 0 + disp %ctpr3, 2f /* skip_prefetch_loop */ + + cmpbedb,4 %dr2, 0x360, %pred2 + + ldrqp,0 [ %dr5 + %dr10 ], %db[8] + ldrqp,2 [ %dr5 + %dr11 ], %db[9] + ldrqp,3 [ %dr5 + %dr12 ], %db[20] + ldrqp,5 [ %dr5 + %dr13 ], %db[21] + addd %dr5, 0x80, %dr5 + } + { + ipd 0 + disp %ctpr1, 1f /* prefetch */ + + cmpbedb,4 %dr2, 0x180, %pred3 + + ldrqp,0 [ %dr4 + %dr10 ], %db[6] + ldrqp,2 [ %dr4 + %dr11 ], %db[7] + ldrqp,3 [ %dr4 + %dr12 ], %db[18] + ldrqp,5 [ %dr4 + %dr13 ], %db[19] + addd %dr4, 0x80, %dr4 + } + { + ipd 1 + disp %ctpr2, 3f /* copy */ + + subd,4 %dr14, 0x40, %dr14 + + ldrqp,0 [ %dr5 + %dr10 ], %db[4] + ldrqp,2 [ %dr5 + %dr11 ], %db[5] + ldrqp,3 [ %dr5 + %dr12 ], %db[16] + ldrqp,5 [ %dr5 + %dr13 ], %db[17] + addd %dr5, 0x80, %dr5 + } + { + cmpbdb,4 %dr2, 0x560, %pred0 + + addd,1 %dr1, 0x1a0, %db[1] + + ldrqp,0 [ %dr4 + %dr10 ], %db[2] + ldrqp,2 [ %dr4 + %dr11 ], %db[3] + ldrqp,3 [ %dr4 + %dr12 ], %db[14] + ldrqp,5 [ %dr4 + %dr13 ], %db[15] + + ct %ctpr3 ? %pred15 + } + + /* Load the src block into the L2 cache - prefetching to L1 + * is neither practical (only 1 line is fetched per cycle) + * nor needed (this loop is unrolled enough to do not worry + * about latency). */ + { + subd,4 %dr3, 0x660, %dr4 + ldb,sm [ %dr1 + 0x160 ] MAS_BYPASS_L1_CACHE, %empty ? ~ %pred3 + ldb,sm [ %dr1 + 0x180 ] MAS_BYPASS_L1_CACHE, %empty ? ~ %pred3 + + ct %ctpr3 ? %pred2 + } +1: /* prefetch */ + { + /* pred1 = dr4 <= db[0] = + * = dr1 + dr2 - 0x660 <= dr1 + prefetched = + * = dr2 - prefetched <= 0x660 = + * = size - prefetched <= 0x660 */ + cmpbedb,4 %dr4, %db[0], %pred1 + ldb,0,sm [ %db[0] + 0 ] MAS_BYPASS_L1_CACHE, %empty + ldb,2,sm [ %db[0] + 0x40 ] MAS_BYPASS_L1_CACHE, %empty + ldb,3,sm [ %db[0] + 0x80 ] MAS_BYPASS_L1_CACHE, %empty + ldb,5,sm [ %db[0] + 0xc0 ] MAS_BYPASS_L1_CACHE, %empty + addd %db[0], 0x200, %db[0] + } + { + ldb,0,sm [ %db[1] + 0x100 ] MAS_BYPASS_L1_CACHE, %empty + ldb,2,sm [ %db[1] + 0x140 ] MAS_BYPASS_L1_CACHE, %empty + ldb,3,sm [ %db[1] + 0x180 ] MAS_BYPASS_L1_CACHE, %empty + ldb,5,sm [ %db[1] + 0x1c0 ] MAS_BYPASS_L1_CACHE, %empty + addd %db[1], 0x200, %db[1] + abp abpf = 1, abpt = 1 + ct %ctpr1 ? ~ %pred0 + } + +2: /* skip_prefetch_loop */ + /* Copy the page */ + { + ipd 1 + disp %ctpr1, 4f /* copy_tail */ + + ldb,0,sm [ %db[0] + 0 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + ldb,2,sm [ %db[0] + 0x40 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + ldb,3,sm [ %db[0] + 0x80 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + ldb,5,sm [ %db[0] + 0xc0 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + + cmpbdb,1 %dr2, 0x1c0, %pred0 + + /* dr3 = dr1 + dr2 - 0xc0 */ + subd,4 %dr3, 0xc0, %dr3 + } +3: /* copy */ + { + cmpldb,4 %dr3, %dr5, %pred1 + + ldrqp,0 [ %dr5 + %dr10 ], %db[0] + ldrqp,2 [ %dr5 + %dr11 ], %db[1] + ldrqp,3 [ %dr5 + %dr12 ], %db[12] + ldrqp,5 [ %dr5 + %dr13 ], %db[13] + addd %dr5, 0x40, %dr5 + } + { + /* If trap happens on previous instruction %dr14 + * will be negative, so we check for that in trap + * handler. */ + addd,3 %dr14, 0x40, %dr14 + + /* Bug 116851 - all strqp must be speculative + * if dealing with tags */ + strqp,2,sm [ %dr0 + %dr6 ], %db[10] + strqp,5,sm [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x40, %dr6 + addd,4 %dr7, 0x40, %dr7 + } + { + strqp,2,sm [ %dr0 + %dr8 ], %db[22] + strqp,5,sm [ %dr0 + %dr9 ], %db[23] + addd,1 %dr8, 0x40, %dr8 + addd,4 %dr9, 0x40, %dr9 + abn abnf = 1, abnt = 1 + abp abpf = 1, abpt = 1 + ct %ctpr2 ? ~ %pred0 + } + /* Copy the remaining tail */ + { + subd,1 %dr2, 0xc0, %dr3 + ldrqp,0 [ %dr5 + %dr10 ], %db[0] ? ~ %pred6 + ldrqp,2 [ %dr5 + %dr11 ], %db[1] ? ~ %pred6 + addd,3 %dr10, 0x20, %dr10 ? ~ %pred6 + cmpedb 0x0, 0x0, %pred0 + return %ctpr3 + } + { + ldrqp,3 [ %dr5 + %dr10 ], %dr13 ? ~ %pred8 + } +4: /* copy_tail */ + { + addd,3 %dr14, 0x40, %dr14 + cmpbesb %r6, %r3, %pred1 + strqp,2,sm [ %dr0 + %dr6 ], %db[10] + strqp,5,sm [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x40, %dr6 + addd,4 %dr7, 0x40, %dr7 + } + { + strqp,2,sm [ %dr0 + %dr8 ], %db[22] + strqp,5,sm [ %dr0 + %dr9 ], %db[23] + addd,1 %dr8, 0x40, %dr8 + addd,4 %dr9, 0x40, %dr9 + abn abnf = 1, abnt = 1 + abp abpf = 1, abpt = 1 + ct %ctpr1 ? %pred0 + } + { + addd,3 %dr14, 0x40, %dr14 + strqp,2,sm [ %dr0 + %dr6 ], %db[10] ? ~ %pred6 + strqp,5,sm [ %dr0 + %dr7 ], %db[11] ? ~ %pred6 + addd,1 %dr6, 0x20, %dr6 ? ~ %pred6 + } + { + addd,3 %dr14, 0x20, %dr14 ? ~ %pred6 + strqp,sm [ %dr0 + %dr6 ], %dr13 ? ~ %pred8 + } + { + addd,3 %dr2, 0x0, %dr0 + ct %ctpr3 + } + + +5: /* very_small_size */ + { + strqp,sm [ %dr0 + %dr6 ], %db[10] ? ~ %pred26 + strqp,sm [ %dr0 + %dr7 ], %db[11] ? ~ %pred27 + } + { + addd,0 %dr14, 0x20, %dr14 ? ~ %pred27 + strqp,sm [ %dr0 + %dr8 ], %db[22] ? ~ %pred28 + strqp,sm [ %dr0 + %dr9 ], %db[23] ? ~ %pred29 + } + { + /* Return should not be in the same instruction + * with memory access, otherwise we will return + * on page fault and page fault handler will + * return from our caller. */ + addd,3 %dr2, 0x0, %dr0 + ct %ctpr3 + } + + +6: /* small_size */ + { + ipd 0 + disp %ctpr1, 7f /* copy_small */ + + cmpbdb %dr2, 0xc0, %pred0 + subd,4 %dr3, 0xc0, %dr3 + + subd,3 %dr14, 0x40, %dr14 ? ~ %pred12 + + ct %ctpr2 ? %pred12 + } +7: /* copy_small */ + { + cmpldb,4 %dr3, %dr5, %pred1 + + ldrqp,0 [ %dr5 + %dr10 ], %db[8] + ldrqp,3 [ %dr5 + %dr11 ], %db[9] + ldrqp,2 [ %dr5 + %dr12 ], %db[20] + ldrqp,5 [ %dr5 + %dr13 ], %db[21] + addd %dr5, 0x40, %dr5 + } + { + /* If trap happens on previous instruction %dr14 + * will be negative, so we check for that in trap + * handler. */ + addd,3 %dr14, 0x40, %dr14 + + strqp,2,sm [ %dr0 + %dr6 ], %db[10] + strqp,5,sm [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x40, %dr6 + addd,4 %dr7, 0x40, %dr7 + } + { + strqp,2,sm [ %dr0 + %dr8 ], %db[22] + strqp,5,sm [ %dr0 + %dr9 ], %db[23] + addd,1 %dr8, 0x40, %dr8 + addd,4 %dr9, 0x40, %dr9 + + abn abnf = 1, abnt = 1 + abp abpf = 1, abpt = 1 + ct %ctpr1 ? ~ %pred0 + } +8: /* copy_tail_small */ + { + addd,4 %dr14, 0x40, %dr14 ? ~ %pred12 + + ldrqp,0 [ %dr5 + %dr10 ], %db[8] ? ~ %pred6 + ldrqp,3 [ %dr5 + %dr11 ], %db[9] ? ~ %pred6 + addd,1 %dr10, 0x20, %dr10 ? ~ %pred6 + } + { + ldrqp,2 [ %dr5 + %dr10 ], %dr13 ? ~ %pred8 + } + { + strqp,2,sm [ %dr0 + %dr6 ], %db[10] + strqp,5,sm [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x40, %dr6 + addd,4 %dr7, 0x40, %dr7 + } + { + addd,3 %dr14, 0x20, %dr14 + strqp,2,sm [ %dr0 + %dr8 ], %db[22] + strqp,5,sm [ %dr0 + %dr9 ], %db[23] + } + { + addd,3 %dr14, 0x20, %dr14 + strqp,2,sm [ %dr0 + %dr6 ], %db[8] ? ~ %pred6 + strqp,5,sm [ %dr0 + %dr7 ], %db[9] ? ~ %pred6 + addd,1 %dr6, 0x20, %dr6 ? ~ %pred6 + } + { + addd,3 %dr14, 0x20, %dr14 ? ~ %pred6 + strqp,2,sm [ %dr0 + %dr6 ], %dr13 ? ~ %pred8 + } + { + addd,3 %dr2, 0x0, %dr0 + ct %ctpr3 + } +.size $__recovery_memcpy_16, . - $__recovery_memcpy_16 + +.global $__recovery_memset_16 +.type __recovery_memset_16,@function +$__recovery_memset_16: +.ignore ld_st_style +/* + * dr0 - dst + * dr1 - value + * dr2 - tag + * dr3 - size + * dr4 - strqp opcode + */ + { + setwd wsz = 0x5, nfx = 0 + setbp psz = 0x1 + + ipd 0 + disp %ctpr2, 2f /* store_tail */ + } + { + ipd 0 + disp %ctpr1, 1f /* store */ + + cmpbesb,0 %r3, 0x30, %pred4 + cmpandesb,1 %r3, 0x20, %pred2 + + puttagd,2 %dr1, %dr2, %dr1 + + /* dr9 holds the number of cleared bytes in case + * pagefault happens. */ + subd,3 0x0, 0x40, %dr9 + } + { + return %ctpr3 + + cmpbsb,0 %r3, 0x80, %pred0 + + qppackdl,sm,1 %dr1, %dr1, %dr1 + } + { + cmpandesb,1 %r3, 0x10, %pred3 + + subs,0 %r3, 0xc0, %r3 + + addd,5 %dr4, 0x10, %dr5 + } + { + addd,1 %dr4, 0x20, %dr6 + addd,4 %dr4, 0x30, %dr7 + + ct %ctpr2 ? %pred4 + } + +1: /* store */ + { + cmplsb %r3, %r4, %pred1 + strqp,2,sm [ %dr0 + %dr4 ], %dr1 + strqp,5,sm [ %dr0 + %dr5 ], %dr1 + addd,1 %dr4, 0x40, %dr4 + addd,4 %dr5, 0x40, %dr5 + addd,3 %dr9, 0x40, %dr9 + } + { + strqp,2,sm [ %dr0 + %dr6 ], %dr1 + strqp,5,sm [ %dr0 + %dr7 ], %dr1 + addd,1 %dr6, 0x40, %dr6 + addd,4 %dr7, 0x40, %dr7 + abp abpf = 1, abpt = 1 + ct %ctpr1 ? ~ %pred0 + } + +2: /* store_tail */ + { + strqp,2,sm [ %dr0 + %dr4 ], %dr1 ? ~ %pred2 + strqp,5,sm [ %dr0 + %dr5 ], %dr1 ? ~ %pred2 + addd,3 %dr9, 0x40, %dr9 + addd,1 %dr4, 0x20, %dr4 ? ~ %pred2 + } + { + strqp,2,sm [ %dr0 + %dr4 ], %dr1 ? ~ %pred3 + addd,3 %dr9, 0x20, %dr9 ? ~ %pred2 + } + { + addd,3 %dr9, 0x10, %dr0 ? ~ %pred3 + addd,4 %dr9, 0, %dr0 ? %pred3 + ct %ctpr3 + } +.size $__recovery_memset_16, . - $__recovery_memset_16 diff --git a/arch/e2k/kernel/devtree.c b/arch/e2k/kernel/devtree.c new file mode 100644 index 000000000000..51dfb0ba8cfe --- /dev/null +++ b/arch/e2k/kernel/devtree.c @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +int devtree_detected = 1; + + +void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + return memblock_alloc(size, align); +} + +/* + * This function will create device nodes in sysfs coresponding to nodes + * described in dtb. + */ +int __init e2k_publish_devices(void) +{ + if (!of_have_populated_dt()) { + return 0; + } + return of_platform_populate(NULL, of_default_bus_match_table, + NULL, NULL); +} +device_initcall(e2k_publish_devices); + +static u64 get_device_tree_addr(void) +{ + return bootblock_virt->info.bios.devtree; +} + +u32 get_dtb_size(void) +{ + u32 *blob_addr = (u32 *)get_device_tree_addr(); + u32 dtb_data = boot_readl((void __iomem *)blob_addr); + u32 magic = OF_DT_HEADER; + + if (be32_to_cpu(dtb_data) != magic) { + printk(KERN_ERR "DevTree: disabled (incorrect magic): %x\n", + dtb_data); + return 0; + } + dtb_data = boot_readl((void __iomem *)(blob_addr + 1)); + + return __be32_to_cpu(dtb_data); +} + +void get_dtb_from_boot(u8 *blob, u32 len) +{ + u32 *blob_addr = (u32 *)get_device_tree_addr(); + u8 *dtb_ptr = (u8 *)blob_addr; + int i; + + for (i = 0; i < len; i++) { + u8 dt = boot_readb((void __iomem *)dtb_ptr); + blob[i] = dt; + dtb_ptr++; + } + + return; +} + +int __init device_tree_init(void) +{ +#ifdef CONFIG_DTB_L_TEST + initial_boot_params = (struct boot_param_header *)test_blob; +#else + u32 sz = get_dtb_size(); + if (sz == 0) { + printk(KERN_ERR "DevTree: device tree size is 0\n"); + devtree_detected = 0; + return -1; + } else { + printk(KERN_INFO "DevTree: device tree size is %d\n", sz); + } + + u8 *dt = memblock_alloc(sz, SMP_CACHE_BYTES); + if (dt == NULL) { + printk(KERN_ERR "DevTree: not enough memory\n"); + devtree_detected = 0; + return -2; + } + + get_dtb_from_boot(dt, sz); + + initial_boot_params = (struct boot_param_header *)dt; +#endif + unflatten_device_tree(); + return 0; +} diff --git a/arch/e2k/kernel/e2k-iommu.c b/arch/e2k/kernel/e2k-iommu.c new file mode 100644 index 000000000000..328dff298646 --- /dev/null +++ b/arch/e2k/kernel/e2k-iommu.c @@ -0,0 +1,1522 @@ +#define DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_PASSTHROUGH_MODE +#undef DebugPT +#define DEBUG_PASSTHROUGH_MODE 0 /* IOMMU Passthrough debugging */ +#define DebugPT(fmt, args...) \ +({ \ + if (DEBUG_PASSTHROUGH_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define E2K_DTE_MAX_BUS_NR (1 << 8) +#define E2K_DTE_ENTRIES_NR (E2K_DTE_MAX_BUS_NR * 256) +#define E2K_MAX_DOMAIN_ID (1 << 12) + +struct dte { +/* P - DTE tanslation validity */ + unsigned long h_present : 1; +/* + * Host Translation Enable - whether to allow DMA request translation + * to HPA space, when HTE = 0 all DMA requests from this device will + * use physical addresses + */ + unsigned long h_enable : 1; +/* + * Host Translation Caching Enable + */ + unsigned long h_cached : 1; +/* + * Prefetch Buffer Enable - prefetch enabled for this device, see chapter 5.7.1. + */ + unsigned long h_prefetch : 1; + long reserved1 : 5; +/* + * Host Virtual Address Width - VA width in native mode and GPA width in + * guest virtual mode: + */ +#define E2K_DTE_HVAW_30_BITS 1 +#define E2K_DTE_HVAW_39_BITS 2 +#define E2K_DTE_HVAW_48_BITS 3 + long h_addr_width : 3; +/* + * Host Page Table Pointer - page table root in native mode, + * and also PT second level root in guest virtual mode + */ + long h_page_table : 36; + long reserved2 : 16; + + unsigned long reserved3 : 1; +/* + * Guest Translation Enable - whether to allow translation of DMA requests, + * when GTE = 0 all DMA requests of this device will be blocked + */ + unsigned long g_enable : 1; +/* + * Guest Translation Caching Enable + */ + unsigned long g_cached : 1; + long reserved4 : 6; + +/* + * Guest Virtual Address Width - VA width in native mode and GPA width + * in guest virtual mode: + */ + long g_addr_width : 3; + +/* + * Guest Page Table Pointer - page table root in native mode + * and also PT second level root in guest virtual mode + */ + long g_page_table : 36; + long reserved5 : 16; + +/* + * Interrupt Enable - enable receiving interrupts from this device + */ + unsigned long int_enable : 1; + long reserved6 : 11; +/* + * Guest Interrupt Table Pointer - address of structure for guest interrupts bookkeeping + */ + long int_table : 36; + long reserved7 : 16; + + long reserved8 : 16; +/* + * EDID.did: + * Domain ID in native mode + * or + * Guest ID in passthrough mode + */ + long id : 12; +/* + * EDID.g: + * 0 - native mode + * 1 - passthrough mode + */ + unsigned long guest : 1; + long reserved9 : 3; + +/* + * Guest Emulated Version Number - version of IOMMU that guest OS uses + */ +#define E2K_DTE_E8C_VERSION 1 +#define E2K_DTE_E8C2_VERSION 2 +#define E2K_DTE_E8C16_VERSION 3 + long g_iommu_version : 4; + long reserved10 : 28; +} __packed; + +#define IO_PTE_PRESENT (1 << 0) +#define IO_PTE_WRITE (1 << 1) +#define IO_PTE_READ (1 << 2) +#define IO_PTE_PREFETCH (1 << 3) +#define IO_PTE_PAGE_SIZE (1 << 7) + + +#define IO_PAGE_SHIFT 12 +#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT) +#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1)) +#define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE) + + +#define E2K_IOMMU_MAX_LEVELS 4 +#define E2K_IOMMU_START_LVL() 0 + +#define E2K_IOMMU_GRANULE() IO_PAGE_SIZE + +static int e2k_iommu_no_domains = 0; +static int e2k_iommu_direct_map = 0; +static struct iommu_ops e2k_iommu_ops; +typedef u64 io_pte; +static int e2k_iommu_map(struct iommu_domain *iommu_domain, + unsigned long iova, phys_addr_t paddr, size_t size, + int iommu_prot); + +/* IOPTE accessors */ +#define iopte_deref(pte) __va(iopte_to_pa(pte)) + +#define iopte_leaf(pte) (pte & IO_PTE_PAGE_SIZE) +#define iopte_present(pte) (pte & IO_PTE_PRESENT) + +#define pa_to_iopte(addr) ((io_pte)(addr & IO_PAGE_MASK)) +#define iopte_to_pa(iopte) ((phys_addr_t)(iopte) & IO_PAGE_MASK) + +#define E2K_IOMMU_PER_LEVEL_SHIFT ilog2(E2K_IOMMU_GRANULE() / sizeof(io_pte)) +#define E2K_IOMMU_PER_LEVEL_MASK (~((1 << E2K_IOMMU_PER_LEVEL_SHIFT) - 1)) + +#define E2K_IOMMU_LVL_SHIFT(lvl) \ + ((E2K_IOMMU_MAX_LEVELS - (lvl + 1)) * E2K_IOMMU_PER_LEVEL_SHIFT) + +#define E2K_IOMMU_LEVEL_SHIFT(lvl) (IO_PAGE_SHIFT + E2K_IOMMU_LVL_SHIFT(lvl)) + +#define E2K_IOMMU_LVL_IDX(addr,lvl) \ + ((addr >> E2K_IOMMU_LEVEL_SHIFT(lvl)) & ~E2K_IOMMU_PER_LEVEL_MASK) + +#define E2K_IOMMU_LEVEL_MASK(lvl) \ + (~((1UL << E2K_IOMMU_LEVEL_SHIFT(lvl)) - 1)) + +const long e2k_iommu_page_sizes[] = { + -1, SZ_1G, SZ_2M, SZ_4K +}; +#define E2K_IOMMU_PGSIZE(lvl) e2k_iommu_page_sizes[lvl] + +static io_pte e2k_iommu_prot_to_pte(int prot) +{ + io_pte pte = 0; + if (prot & IOMMU_READ) + pte |= IO_PTE_READ; + + if (prot & IOMMU_WRITE) + pte |= IO_PTE_WRITE; + return pte; +} + +struct e2k_iommu { + struct dte *dtable; + io_pte *default_pgtable; + spinlock_t lock; + int node; + struct iommu_group *default_group; + struct iommu_device iommu; /* IOMMU core handle */ +}; + +struct e2k_iommu_domain { + struct mutex mutex; + io_pte *pgtable; + struct e2k_iommu *e2k_iommu; + int id; + + struct iommu_domain domain; /* generic domain data structure */ +}; + +static struct e2k_iommu_domain *to_e2k_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct e2k_iommu_domain, domain); +} + +static struct pci_dev *e2k_dev_to_parent_pcidev(struct device *dev) +{ + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + BUG_ON(!dev); + BUG_ON(!dev_is_pci(dev)); + return to_pci_dev(dev); +} + +static struct e2k_iommu *dev_to_iommu(struct device *dev) +{ + struct pci_dev *pdev = e2k_dev_to_parent_pcidev(dev); + struct iohub_sysdata *sd = pdev->bus->sysdata; + return sd->l_iommu; +} + +static u16 to_sid(int bus, int slot, int func) +{ + return (bus << 8) | (slot << 3) | func; +} + +static u16 dev_to_sid(struct device *dev) +{ + struct pci_dev *pdev = e2k_dev_to_parent_pcidev(dev); + int bus = pdev->bus->number; + int devfn = pdev->devfn; + return to_sid(bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); +} + +static struct dte *dev_to_dte(struct e2k_iommu *i, struct device *dev) +{ + if (i->dtable) + return i->dtable + dev_to_sid(dev); + return NULL; +} + +#define E2K_IOMMU_CTRL SIC_iommu_ctrl +# define IOMMU_CTRL_GT_EN (1 << 13) +# define IOMMU_CTRL_DEV_TABLE_EN (1 << 12) +# define IOMMU_CTRL_NEW_VERS (3 << 8) +# define IOMMU_CTRL_PREFETCH_EN 0x00000040 /* enable prefeth TTE */ +# define IOMMU_CTRL_CASHABLE_TTE 0x00000020 /* Cachable TTE */ +# define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */ +#define E2K_IOMMU_PTBAR SIC_iommu_ba_lo +# define IOMMU_PTBAR_DFLT_SZ (3 << 9) /* 48-bit */ +# define IOMMU_PTBAR_PREFETCH_EN (1 << 2) /* enable prefeth TTE */ +# define IOMMU_PTBAR_CASHABLE_TTE (1 << 1) +# define IOMMU_PTBAR_PRESENT (1 << 0) + +#define E2K_IOMMU_DTBAR SIC_iommu_dtba_lo +# define IOMMU_DTBAR_DFLT_SZ (E2K_DTE_MAX_BUS_NR << 2) +# define IOMMU_DTBAR_CASHABLE_DTE (1 << 1) +# define IOMMU_DTBAR_PRESENT (1 << 0) + +#define E2K_IOMMU_CMD SIC_iommu_cmd_c_lo +# define E2K_IOMMU_CMD_ERR 2 +# define E2K_IOMMU_CMD_RUN 1 +# define FL_ALL 0 /*Flush All*/ +# define FL_ID 1 /*Flush Guest/Domain*/ +# define FL_SID 3 /*Flush Device*/ +# define FL_PTE 4 /* Flush virtual page of guest or hypervisor */ +# define FL_PDE 5 /* Flush intermediate translations */ +# define FL_SLPDE 0x06 /* Flush GPA -> HPA translation */ +# define FL_DTE 0x07 /* Flush Device Table row */ +# define FL_TLB 0x08 /*Flush TLB*/ +# define FL_TLU 0x09 /*Flush TLU*/ +# define FL_SLTLU 0x0a /*Flush SLTLU*/ +# define FL_DTLB 0x0b /*Flush DTLB*/ +# define DRA_PTE 0x10 /* Associative diagnostic read of PTE from cache */ +# define DRA_PDE 0x11 /* Associative diagnostic read of */ + /* intermediate translations from cache */ +# define DRA_SLPDE 0x12 /* Associative diagnostic read of */ + /* GPA -> HPA translations from cache */ +# define DRA_DTE 0x13 /* Associative diagnostic read of */ + /* Device Table row from cache */ +# define DRND_PTE 0x14 /* Diagnostic read of TLB line data */ +# define DRND_PDE 0x15 /* Diagnostic read of TLU line data */ +# define DRND_SLPDE 0x16 /* Diagnostic read of SLTLU line data */ +# define DRND_DTE 0x17 /* Diagnostic read of DTLB line data */ +# define DRNT_PTE 0x18 /* Diagnostic read of TLB line tags */ +# define DRNT_PDE 0x19 /* Diagnostic read of TLU line tags */ +# define DRNT_SLPDE 0x1a /* Diagnostic read of SLTLU line tags */ +# define DRNT_DTE 0x1b /* Diagnostic read of DTLB line tags */ + + +#define E2K_IOMMU_DATA SIC_iommu_cmd_d_lo +#define E2K_IOMMU_ERR SIC_iommu_err + +#define E2K_IOMMU_MMU_MISS (1 << 0) +#define E2K_IOMMU_PROT_VIOL_WR (1 << 1) +#define E2K_IOMMU_PROT_VIOL_RD (1 << 2) +#define E2K_IOMMU_MLT_HIT (1 << 3) +#define E2K_IOMMU_PTE_ERR (1 << 4) +#define E2K_IOMMU_ADDR_RANGE (1 << 5) +#define E2K_IOMMU_BUS_RANGE (1 << 6) +#define E2K_IOMMU_MSI (1 << 7) +#define E2K_IOMMU_CEP_OVERFLOW (1 << 8) + +#define E2K_IOMMU_ERR_INFO SIC_iommu_err_info_lo +#define E2K_IOMMU_EDBC_OFFSET (SIC_edbc_iommu_ctrl - SIC_iommu_ctrl) +#define E2K_IOMMU_EMBEDDED_OFFSET (SIC_embedded_iommu_base - SIC_iommu_ctrl) +#define E2K_IOMMU_NR SIC_e2c3_iommu_nr +#define E2K_IOMMU_EDID_GUEST_MASK (1 << 12) + +union iommu_cmd_c { + u64 raw; + struct { + u64 rs : 1, + cs : 1, + __reserved1 : 2, + code : 5, + __reserved2 : 3, + addr : 36, + id : 16; + } __packed bits; +}; + +static u32 e2k_iommu_read(unsigned int node, int iommu, unsigned long addr) +{ + if (iommu) /* for embedded devices */ + addr += E2K_IOMMU_EMBEDDED_OFFSET + + (iommu - 1) * SIC_iommu_reg_size; + return sic_read_node_iolink_nbsr_reg(node, 0, addr); +} + +static u64 e2k_iommu_readll(unsigned node, int iommu, unsigned long addr) +{ + if (iommu) /* for embedded devices */ + addr += E2K_IOMMU_EMBEDDED_OFFSET + + (iommu - 1) * SIC_iommu_reg_size; + return sic_readll_node_iolink_nbsr_reg(node, 0, addr); +} + +static void e2k_iommu_write(unsigned node, u32 val, unsigned long addr) +{ + sic_write_node_iolink_nbsr_reg(node, 0, addr, val); + /* for embedded devices */ + sic_write_node_iolink_nbsr_reg(node, 0, + addr + E2K_IOMMU_EDBC_OFFSET, val); +} + +static void e2k_iommu_writell(unsigned node, u64 val, unsigned long addr) +{ + sic_writell_node_iolink_nbsr_reg(node, 0, addr, val); + /* for embedded devices */ + sic_writell_node_iolink_nbsr_reg(node, 0, + addr + E2K_IOMMU_EDBC_OFFSET, val); +} + +static void e2k_iommu_flush(struct e2k_iommu *i, u64 iova, u64 id, u64 cmd) +{ + u64 v = (iova & IO_PAGE_MASK) | (cmd << 4) | + (id << 48) | E2K_IOMMU_CMD_RUN; + e2k_iommu_writell(i->node, v, E2K_IOMMU_CMD); +} + +static void e2k_iommu_flush_dev(struct e2k_iommu *i, struct device *dev) +{ + e2k_iommu_flush(i, 0, dev_to_sid(dev), FL_SID); +} + +static void e2k_iommu_flush_pte(struct e2k_iommu_domain *d, u64 iova) +{ + e2k_iommu_flush(d->e2k_iommu, iova, d->id, FL_PTE); + trace_flush(d->id, iova); +} + +static void e2k_iommu_flush_pde(struct e2k_iommu_domain *d, u64 iova, int lvl) +{ + iova &= E2K_IOMMU_LEVEL_MASK(lvl); + iova |= (~E2K_IOMMU_LEVEL_MASK(lvl)) >> 1; + e2k_iommu_flush(d->e2k_iommu, iova, d->id, FL_PDE); +} + +static void e2k_iommu_flush_domain(struct e2k_iommu_domain *d) +{ + e2k_iommu_flush(d->e2k_iommu, 0, d->id, FL_ID); +} + +static void e2k_iommu_flush_all(struct e2k_iommu *i) +{ + e2k_iommu_flush(i, 0, 0, FL_ALL); +} + +void e2k_iommu_flush_page(struct device *dev, + const void *virt, phys_addr_t phys) +{ + struct e2k_iommu_domain *d = + to_e2k_domain(iommu_get_domain_for_dev(dev)); + e2k_iommu_flush_pte(d, (unsigned long)virt); +} +static void __e2k_iommu_free_pgtable(struct e2k_iommu_domain *d, + unsigned long iova, int lvl, io_pte *ptep); + +static void e2k_iommu_init_hw(struct e2k_iommu *i) +{ + int node = i->node; + u64 d = __pa(i->dtable) | IOMMU_DTBAR_PRESENT | + IOMMU_DTBAR_CASHABLE_DTE | IOMMU_DTBAR_DFLT_SZ; + u64 p = __pa(i->default_pgtable) | IOMMU_PTBAR_PRESENT | + IOMMU_PTBAR_CASHABLE_TTE | IOMMU_PTBAR_DFLT_SZ; + u32 c = IOMMU_CTRL_NEW_VERS | IOMMU_CTRL_PREFETCH_EN | + IOMMU_CTRL_CASHABLE_TTE | IOMMU_CTRL_ENAB; + + if (i->dtable) { + p = 0; + c |= IOMMU_CTRL_DEV_TABLE_EN; + } else { + d = 0; + } + e2k_iommu_write(node, 0, E2K_IOMMU_CTRL); + /* clear errors & unmask interrupts */ + e2k_iommu_writell(node, 0, E2K_IOMMU_ERR); + e2k_iommu_flush_all(i); + + e2k_iommu_writell(node, p, E2K_IOMMU_PTBAR); + e2k_iommu_writell(node, d, E2K_IOMMU_DTBAR); + e2k_iommu_write (node, c, E2K_IOMMU_CTRL); + /* enable error sending to device */ + c = sic_read_node_nbsr_reg(node, SIC_hc_ctrl); + sic_write_node_nbsr_reg(node, SIC_hc_ctrl, c | 1); +} + +static void *__e2k_iommu_alloc_pages(size_t size, gfp_t gfp, int node) +{ + int order = get_order(size); + struct page *page = alloc_pages_node(node, + GFP_ATOMIC | __GFP_ZERO, order); + if (!page) + return NULL; + + return page_address(page); +} + +static void __e2k_iommu_free_pages(void *pages, size_t size) +{ + free_pages((unsigned long)pages, get_order(size)); +} + +static int __e2k_iommu_direct_map(struct iommu_domain *d, + unsigned long start, + unsigned long end) +{ + int ret = 0; + unsigned long s = rounddown(start, SZ_2M), a; + unsigned long e = roundup(end, SZ_2M); + + for (a = s; a <= e && !ret; a += SZ_2M) { + /*FIXME: check the result*/ + e2k_iommu_map(d, a, a, SZ_2M, + IOMMU_READ | IOMMU_WRITE); + } + + return ret; +} +/*TODO: iommu_default_passthrough()*/ +static int _e2k_iommu_direct_map(struct e2k_iommu *iommu) +{ + int ret = 0, nid; + struct e2k_iommu_domain d = { + .pgtable = iommu->default_pgtable, + .e2k_iommu = iommu, + }; + for_each_online_node(nid) { + unsigned long start_pfn, end_pfn; + int i; + + for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { + ret = __e2k_iommu_direct_map(&d.domain, + PFN_PHYS(start_pfn), PFN_PHYS(end_pfn)); + if (ret) + return ret; + } + } + return ret; +} + +static struct e2k_iommu *__e2k_iommu_init(int node, struct device *parent) +{ + struct e2k_iommu *i; + if (node < 0) + node = 0; + + i = kzalloc_node(sizeof(*i), GFP_KERNEL, node); + if (!i) + return i; + + if (e2k_iommu_no_domains) { + i->default_pgtable = + __e2k_iommu_alloc_pages(E2K_IOMMU_GRANULE(), + GFP_KERNEL, i->node); + } else { + i->dtable = __e2k_iommu_alloc_pages(E2K_DTE_ENTRIES_NR * + sizeof(struct dte), GFP_KERNEL, node); + } + if (!i->default_pgtable && !i->dtable) + goto fail; + + i->node = node; + spin_lock_init(&i->lock); + if (e2k_iommu_direct_map) + _e2k_iommu_direct_map(i); + + iommu_device_sysfs_add(&i->iommu, parent, NULL, "iommu%d", node); + iommu_device_set_ops(&i->iommu, &e2k_iommu_ops); + iommu_device_register(&i->iommu); + e2k_iommu_init_hw(i); + return i; +fail: + kfree(i); + return NULL; +} + +static void __e2k_iommu_set_pte(io_pte *ptep, io_pte pte) +{ + *ptep = pte; +} + +static int e2k_iommu_init_pte(struct e2k_iommu_domain *d, unsigned long iova, + phys_addr_t paddr, io_pte prot, int lvl, io_pte *ptep) +{ + io_pte pte = pa_to_iopte(paddr) | prot | + IO_PTE_PRESENT | IO_PTE_PAGE_SIZE; + io_pte old = *ptep; + if (iopte_leaf(old)) { + /* We require an unmap first */ + WARN_ON(1); + return -EEXIST; + } else if (iopte_present(old)) { + /* + * We need to unmap and free the old table before + * overwriting it with a block entry. + */ + __e2k_iommu_set_pte(ptep, 0); + __e2k_iommu_free_pgtable(d, iova, lvl + 1, iopte_deref(old)); + } + __e2k_iommu_set_pte(ptep, pte); + return 0; +} + +static int __e2k_iommu_map(struct e2k_iommu_domain *d, unsigned long iova, + phys_addr_t paddr, size_t size, io_pte prot, + int lvl, io_pte *ptep) +{ + io_pte *cptep, pte; + size_t page_size = E2K_IOMMU_PGSIZE(lvl); + + /* Find our entry at the current level */ + ptep += E2K_IOMMU_LVL_IDX(iova, lvl); + + /* If we can install a leaf entry at this level, then do so */ + if (size == page_size) + return e2k_iommu_init_pte(d, iova, paddr, prot, lvl, ptep); + + /* We can't allocate tables at the final level */ + if (WARN_ON(lvl >= E2K_IOMMU_MAX_LEVELS - 1)) + return -EINVAL; + + /* Grab a pointer to the next level */ + pte = *ptep; + if (!pte) { + io_pte oldpte; + cptep = __e2k_iommu_alloc_pages(E2K_IOMMU_GRANULE(), + GFP_ATOMIC, d->e2k_iommu->node); + if (!cptep) + return -ENOMEM; + + pte = pa_to_iopte(__pa(cptep)) | IO_PTE_READ | IO_PTE_PRESENT; + oldpte = cmpxchg64(ptep, 0ULL, pte); + if (oldpte) { /* Someone else set it while we were thinking; + use theirs. */ + __e2k_iommu_free_pages(cptep, E2K_IOMMU_GRANULE()); + pte = oldpte; + } + } else if (iopte_leaf(pte)) { + /* We require an unmap first */ + WARN_ON(1); + return -EEXIST; + } + cptep = iopte_deref(pte); + + return __e2k_iommu_map(d, iova, paddr, size, prot, lvl + 1, cptep); +} + +static void __e2k_iommu_free_pgtable(struct e2k_iommu_domain *d, + unsigned long iova, int lvl, io_pte *ptep) +{ + io_pte *start, *end; + unsigned long table_size = E2K_IOMMU_GRANULE(); + + start = ptep; + + /* Only leaf entries at the last level */ + if (lvl == E2K_IOMMU_MAX_LEVELS - 1) + end = ptep; + else + end = (void *)ptep + table_size; + + while (ptep != end) { + io_pte pte = *ptep++; + + if (!pte || WARN_ON(iopte_leaf(pte))) + continue; + + __e2k_iommu_free_pgtable(d, iova, lvl + 1, iopte_deref(pte)); + } + e2k_iommu_flush_pde(d, iova, lvl); + __e2k_iommu_free_pages(start, table_size); +} + +static size_t __e2k_iommu_unmap(struct e2k_iommu_domain *d, unsigned long iova, + size_t size, int lvl, io_pte *ptep) +{ + io_pte pte; + size_t page_size = E2K_IOMMU_PGSIZE(lvl); + + /* Something went horribly wrong and we ran out of page table */ + if (WARN_ON(lvl == E2K_IOMMU_MAX_LEVELS)) + return 0; + + ptep += E2K_IOMMU_LVL_IDX(iova, lvl); + pte = *ptep; + if (WARN_ON(!pte)) + return 0; + + /* If the size matches this level, we're in the right place */ + if (size == page_size) { + __e2k_iommu_set_pte(ptep, 0); + if (!iopte_leaf(pte)) { + __e2k_iommu_free_pgtable(d, iova, + lvl + 1, iopte_deref(pte)); + e2k_iommu_flush_domain(d); + } + return size; + } else if (iopte_leaf(pte)) { + WARN_ON(1); + return 0; + } + + /* Keep on walkin' */ + ptep = iopte_deref(pte); + return __e2k_iommu_unmap(d, iova, size, lvl + 1, ptep); +} + +static phys_addr_t __e2k_iommu_iova_to_phys(io_pte *pgtable, dma_addr_t iova, + io_pte *pout) +{ + io_pte pte, *ptep = pgtable; + int lvl = E2K_IOMMU_START_LVL(); + if (pout) + *pout = 0; + + do { + /* Valid IOPTE pointer? */ + if (!ptep) + return 0; + + /* Grab the IOPTE we're interested in */ + pte = *(ptep + E2K_IOMMU_LVL_IDX(iova, lvl)); + + /* Valid entry? */ + if (!pte) + return 0; + + /* Leaf entry? */ + if (iopte_leaf(pte)) + goto found_translation; + + /* Take it to the next level */ + ptep = iopte_deref(pte); + } while (++lvl < E2K_IOMMU_MAX_LEVELS); + + /* Ran out of page tables to walk */ + return 0; + +found_translation: + if (pout) + *pout = pte; + return iopte_to_pa(pte) + iova % E2K_IOMMU_PGSIZE(lvl); +} + +static void __e2k_iommu_error_interrupt(char *str, int len, int iommu, + u64 err, u64 err_i, struct dte *dte, io_pte *pgtable) +{ + int node = numa_node_id(); + io_pte pte; + int cpu = smp_processor_id(), s; + char *e; + dma_addr_t iova; + int bus, slot, func; + + if (node < 0) + node = 0; + + iova = err_i & IO_PAGE_MASK & ((1UL << 48) - 1); + bus = (err_i >> (8 + 48)) & 0xff; + slot = (err_i >> (3 + 48)) & 0x1f, + func = (err_i >> (0 + 48)) & 0x07; + + e = err & E2K_IOMMU_MMU_MISS ? "Page miss" + : err & E2K_IOMMU_PROT_VIOL_WR ? "Write protection error" + : err & E2K_IOMMU_PROT_VIOL_RD ? "Read protection error" + : err & E2K_IOMMU_PTE_ERR ? "PTE Error" + : err & E2K_IOMMU_ADDR_RANGE ? "Address Range Violation" + : err & E2K_IOMMU_BUS_RANGE ? "Bus Range Violation" + : err & E2K_IOMMU_MSI ? "MSI Protection" + : err & E2K_IOMMU_CEP_OVERFLOW ? "CEP overflow" + : "Unknown error"; + + s = snprintf(str, len, "IOMMU:%d:%d: error on cpu %d:\n" + "\t%s at address 0x%llx " + "(device: %02x:%02x.%d, error regs:%llx,%llx).\n", + node, iommu, cpu, + e, iova, + bus, slot, func, + err, err_i); + if (dte) { + dte += to_sid(bus, slot, func); + pgtable = dte->h_page_table ? + __va(dte->h_page_table << IO_PAGE_SHIFT) : + NULL; + } + if (pgtable) { + __e2k_iommu_iova_to_phys(pgtable, iova, &pte); + s += snprintf(str + s, len - s, "\t pte:%08llx -> pa:%08llx\n", + (u64)(pte), iopte_to_pa(pte)); + } +} + +void e2k_iommu_error_interrupt(void) +{ + int node = numa_node_id(), i; + char str[1024]; + + if (node < 0) + node = 0; + + for (i = 0; i < E2K_IOMMU_NR; i++) { + u64 err = e2k_iommu_readll(node, i, E2K_IOMMU_ERR); + u64 err_i = e2k_iommu_readll(node, i, E2K_IOMMU_ERR_INFO); + struct dte *dte = __va(e2k_iommu_readll(node, i, + E2K_IOMMU_DTBAR) & IO_PAGE_MASK); + io_pte *pte = __va(e2k_iommu_readll(node, i, E2K_IOMMU_PTBAR) & + IO_PAGE_MASK); + if (err == 0 || err == ~0ULL) + continue; + if (e2k_iommu_no_domains) + dte = NULL; + __e2k_iommu_error_interrupt(str, sizeof(str), i, err, err_i, + dte, pte); + e2k_iommu_writell(node, err, E2K_IOMMU_ERR); + break; + } + + debug_dma_dump_mappings(NULL); + + if (iommu_panic_off) + pr_emerg("%s", str); + else + panic(str); +} +/* + * This function checks if the driver got a valid device from the caller to + * avoid dereferencing invalid pointers. + */ +static bool e2k_iommu_check_device(struct device *dev) +{ + if (!dev || !dev->dma_mask) + return false; + + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + + if (!dev || !dev_is_pci(dev)) + return false; + return true; +} + +static const struct pci_device_id e2c3_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGA25)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_IMAGINATION_VXE)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_IMAGINATION_VXD)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_VP9_BIGEV2)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_VP9_G2)}, + { } /* terminate list */ +}; + +static bool e2c3_disable_iommu_for_devices[ARRAY_SIZE(e2c3_devices)] = { + false, + false, /*PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650*/ +}; + +static int e2k_iommu_attach_device_with_iommu_disabled(struct device *dev) +{ + unsigned long flags; + struct dte *dte; + struct dte dteval = { + .h_present = 1, + .int_enable = 1, /* enable interruts */ + .id = 0, /* FIXME: */ + }; + struct e2k_iommu *i = dev_to_iommu(dev); + + if (!dev_is_pci(dev)) + goto out; + + dte = dev_to_dte(i, dev); + + if (dte) { + spin_lock_irqsave(&i->lock, flags); + memcpy(dte, &dteval, sizeof(struct dte)); + spin_unlock_irqrestore(&i->lock, flags); + } +out: + return 0; +} + +#define PCI_E2C3_HW_NCTRL 0x40 +#define PCI_E2C3_DISABLE_IOMMU (1 << 5) +static bool e2k_iommu_disable_for_device(struct device *dev) +{ + u32 v = 0; + struct pci_dev *pdev = e2k_dev_to_parent_pcidev(dev); + const struct pci_device_id *id = pci_match_id(e2c3_devices, pdev); + int i = id - e2c3_devices; + bool disabled, disable; + + if (!id) + return false; + + pci_read_config_dword(pdev, PCI_E2C3_HW_NCTRL, &v); + disabled = v & PCI_E2C3_DISABLE_IOMMU; + disable = e2c3_disable_iommu_for_devices[i]; + + if (disabled && disable) + return true; + if (!disabled && !disable) + return false; + if (pdev->device == PCI_DEVICE_ID_MCST_MGA25) + return false; /* driver mga2 handles iommu itself */ + + if (disable) { + v |= PCI_E2C3_DISABLE_IOMMU; + dev_info(dev, "disabling iommu\n"); + e2k_iommu_attach_device_with_iommu_disabled(dev); + } else { + v &= ~PCI_E2C3_DISABLE_IOMMU; + dev_info(dev, "enabling iommu\n"); + } + pci_write_config_dword(pdev, PCI_E2C3_HW_NCTRL, v); + return disable; +} + +static char *e2k_iommu_cfg_for_device(char *str, bool disable) +{ + while (*str) { + int vendor, device, i; + int ret = sscanf(str, "%x:%x", &vendor, &device); + if (ret != 2) + return str; + for (i = 0; !(e2c3_devices[i].vendor == vendor && + e2c3_devices[i].device == device) && + i < ARRAY_SIZE(e2c3_devices) - 1; i++); + + if (i < ARRAY_SIZE(e2c3_devices) - 1) + e2c3_disable_iommu_for_devices[i] = disable; + + str += strcspn(str, ","); + while (*str == ',') + str++; + } + return str; +} + +void e2k_iommu_virt_enable(int node) +{ + unsigned int val; + + DebugPT("e2k_iommu: enabling virtualization support (node %d)\n", node); + + val = e2k_iommu_read(node, 0, E2K_IOMMU_CTRL); + if (!(val & IOMMU_CTRL_GT_EN)) { + val |= IOMMU_CTRL_GT_EN; + e2k_iommu_write(node, val, E2K_IOMMU_CTRL); + } + +} + +/* Handle intercepted guest writes and reads */ +void e2k_iommu_guest_write_ctrl(u32 reg_value) +{ + if (reg_value & IOMMU_CTRL_ENAB) + DebugPT("e2k_iommu: guest enabled IOMMU support %s\n", + reg_value & IOMMU_CTRL_DEV_TABLE_EN ? + "with device table enabled: passthrough not supported" : + "with device table disabled: passthrough supported"); +} + +/* Enable second level of DMA translation */ +void e2k_iommu_setup_guest_2d_dte(struct kvm *kvm, u64 g_page_table) +{ + struct irq_remap_table *irt = kvm->arch.irt; + struct device *dev; + struct e2k_iommu *iommu; + struct e2k_iommu_domain *domain; + struct dte *dte_old, dte_new; + unsigned long flags; + + dev = &irt->vfio_dev->dev; + iommu = dev_to_iommu(dev); + domain = to_e2k_domain(iommu_get_domain_for_dev(dev)); + dte_old = dev_to_dte(iommu, dev); + + memcpy(&dte_new, dte_old, sizeof(struct dte)); + + dte_new.g_enable = 1; + dte_new.g_cached = 1; + dte_new.g_addr_width = E2K_DTE_HVAW_48_BITS; + dte_new.g_page_table = g_page_table >> IO_PAGE_SHIFT; + + spin_lock_irqsave(&iommu->lock, flags); + + memcpy(dte_old, &dte_new, sizeof(struct dte)); + + spin_unlock_irqrestore(&iommu->lock, flags); + + e2k_iommu_flush_domain(domain); +} + +void e2k_iommu_flush_guest(struct kvm *kvm, u64 command) +{ + struct irq_remap_table *irt = kvm->arch.irt; + u32 edid = (u32) kvm->arch.vmid.nr | E2K_IOMMU_EDID_GUEST_MASK; + struct device *dev; + struct e2k_iommu *iommu; + union iommu_cmd_c reg; + + dev = &irt->vfio_dev->dev; + iommu = dev_to_iommu(dev); + + reg.raw = command; + + if (!reg.bits.rs) { + pr_err("e2k_iommu: ignore guests's command without cmd_c.rs\n"); + return; + } + + switch (reg.bits.code) { + case FL_PTE: + e2k_iommu_flush(iommu, reg.bits.addr << IO_PAGE_SHIFT, edid, + FL_PTE); + break; + case FL_ALL: + e2k_iommu_flush(iommu, 0, edid, FL_ID); + break; + default: + pr_err("e2k_iommu: ignore unsupported guest's command %d\n", + reg.bits.code); + break; + } +} + +#ifdef CONFIG_PM +static int e2k_iommu_suspend(void) +{ + struct pci_bus *bus; + + list_for_each_entry(bus, &pci_root_buses, node) { + struct iohub_sysdata *sd = bus->sysdata; + struct e2k_iommu *i = sd->l_iommu; + if (i) { + /* Just stop the IOMMU. All the necessary flushing is + * done when re-initializing it in e2k_iommu_init_hw() */ + e2k_iommu_write(i->node, 0, E2K_IOMMU_CTRL); + } + } + + return 0; +} + +static void e2k_iommu_resume(void) +{ + struct pci_bus *bus; + + list_for_each_entry(bus, &pci_root_buses, node) { + struct iohub_sysdata *sd = bus->sysdata; + struct e2k_iommu *i = sd->l_iommu; + if (i) + e2k_iommu_init_hw(i); + } +} + +static struct syscore_ops e2k_iommu_syscore_ops = { + .resume = e2k_iommu_resume, + .suspend = e2k_iommu_suspend, +}; + +static void __init e2k_iommu_init_pm_ops(void) +{ + register_syscore_ops(&e2k_iommu_syscore_ops); +} + +#else +static void e2k_iommu_init_pm_ops(void) {} +#endif /* CONFIG_PM */ +#if defined CONFIG_IOMMU_DEBUGFS + +#include +#include + + +static void e2k_iommu_wr(int node, u64 iova, u64 id, u64 cmd) +{ + u64 v = (iova & IO_PAGE_MASK) | (cmd << 4) | + (id << 48) | E2K_IOMMU_CMD_RUN; + e2k_iommu_writell(node, v, E2K_IOMMU_CMD); +} + +static void e2k_iommu_read_tag_and_data(int node, int iommu, int line, + u64 *tag, u64 *data) +{ + u64 v; + e2k_iommu_wr(node, line, 0, DRNT_PTE); + v = e2k_iommu_readll(node, iommu, E2K_IOMMU_CMD); + if (v & E2K_IOMMU_CMD_ERR) { /*tag is not valid */ + *tag = 0; + *data = 0; + return; + } + *tag = e2k_iommu_readll(node, iommu, E2K_IOMMU_DATA); + e2k_iommu_wr(node, line, 0, DRND_PTE); + *data = e2k_iommu_readll(node, iommu, E2K_IOMMU_DATA); +} + +static int e2k_iommu_debugfs_show(struct seq_file *s, void *null) +{ + int n, i, l; + for_each_online_node(n) { + for (i = 0; i < E2K_IOMMU_NR; i++) { + seq_printf(s, "iommu[%d][%d]: line, tag, data:\n", n, i); + for (l = 0; l < 256; l++) { + u64 tag, data; + e2k_iommu_read_tag_and_data(n, i, l << (12 + 4), &tag, &data); + if (tag & 0xff && !(data & 1)) + continue; + seq_printf(s, "% 2x: %016llx %016llx\n", l, tag, data); + } + } + } + return 0; +} + +static int e2k_iommu_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, e2k_iommu_debugfs_show, NULL); +} + +static const struct file_operations e2k_iommu_debugfs_operations = { + .open = e2k_iommu_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif /* CONFIG_IOMMU_DEBUGFS */ + +static int __init e2k_iommu_debugfs_init(void) +{ +#if defined CONFIG_IOMMU_DEBUGFS + struct dentry *dentry = debugfs_create_file("tlb", S_IFREG | S_IRUGO, + iommu_debugfs_dir, NULL, + &e2k_iommu_debugfs_operations); + return IS_ERR(dentry) ? PTR_ERR(dentry) : 0; +#else /* CONFIG_IOMMU_DEBUGFS */ + return 0; +#endif /* CONFIG_IOMMU_DEBUGFS */ +} + +/* IOMMU API */ +static int e2k_iommu_map(struct iommu_domain *iommu_domain, + unsigned long iova, phys_addr_t paddr, size_t size, + int iommu_prot) +{ + struct e2k_iommu_domain *d = to_e2k_domain(iommu_domain); + + io_pte *ptep = d->pgtable; + int lvl = E2K_IOMMU_START_LVL(); + io_pte prot; + + if(WARN_ON(!IS_ALIGNED(paddr, size))) + return -EINVAL; + if(WARN_ON(!IS_ALIGNED(iova, size))) + return -EINVAL; + if(WARN_ON(size != SZ_4K && size != SZ_2M && size != SZ_1G)) + return -EINVAL; + + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + + prot = e2k_iommu_prot_to_pte(iommu_prot); + return __e2k_iommu_map(d, iova, paddr, size, prot, lvl, ptep); +} + +static size_t e2k_iommu_unmap(struct iommu_domain *iommu_domain, + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) +{ + struct e2k_iommu_domain *d = to_e2k_domain(iommu_domain); + size_t unmapped; + io_pte *ptep = d->pgtable; + int lvl = E2K_IOMMU_START_LVL(); + + if(WARN_ON(!IS_ALIGNED(iova, size))) + return 0; + if(WARN_ON(size != SZ_4K && size != SZ_2M && size != SZ_1G)) + return 0; + + unmapped = __e2k_iommu_unmap(d, iova, size, lvl, ptep); + if (unmapped > 0) + e2k_iommu_flush_pte(d, iova); + + return unmapped; +} + +static phys_addr_t e2k_iommu_iova_to_phys(struct iommu_domain *iommu_domain, + dma_addr_t iova) +{ + struct e2k_iommu_domain *d = to_e2k_domain(iommu_domain); + return __e2k_iommu_iova_to_phys(d->pgtable, iova, NULL); +} + +static void e2k_iommu_detach_device(struct iommu_domain *iommu_domain, + struct device *dev) +{ + struct e2k_iommu *i; + struct e2k_iommu_domain *d = to_e2k_domain(iommu_domain); + unsigned long flags; + struct dte *dte; + if (!e2k_iommu_check_device(dev)) + return; + i = dev_to_iommu(dev); + + if (!dev_is_pci(dev)) + goto out; + dev->archdata.domain = NULL; + dte = dev_to_dte(i, dev); + if (dte) { + spin_lock_irqsave(&i->lock, flags); + memset(dte, 0, sizeof(*dte)); + spin_unlock_irqrestore(&i->lock, flags); + } +out: + e2k_iommu_flush_dev(i, dev); +} + +static int e2k_iommu_attach_device(struct iommu_domain *iommu_domain, + struct device *dev) +{ + struct e2k_iommu *i; + struct e2k_iommu_domain *d = to_e2k_domain(iommu_domain); + unsigned long flags; + struct dte *dte; + struct dte dteval = { + .h_present = 1, + .h_enable = 1, + .h_cached = 1, + .h_prefetch = 1, + .h_addr_width = E2K_DTE_HVAW_48_BITS, + .h_page_table = __pa(d->pgtable) >> IO_PAGE_SHIFT, + .int_enable = 1, + .id = iommu_group_id(dev->iommu_group), + }; + + if (!e2k_iommu_check_device(dev)) + return -EINVAL; + i = dev_to_iommu(dev); + + if (dev->archdata.domain) + e2k_iommu_detach_device(&dev->archdata.domain->domain, + dev); + + dev->archdata.domain = d; + + if (!dev_is_pci(dev)) + goto out; + + dte = dev_to_dte(i, dev); + + mutex_lock(&d->mutex); + if (!d->e2k_iommu) { + d->e2k_iommu = i; + d->pgtable = i->default_pgtable; + + if (iommu_domain->type == IOMMU_DOMAIN_UNMANAGED) { + struct kvm *kvm = dev->archdata.kvm; + unsigned long int_table; + u32 edid; + + /* Should be initialized in kvm_setup_passthrough() */ + BUG_ON(!kvm); + + int_table = __pa(page_address(kvm->arch.epic_pages)); + edid = (u32) kvm->arch.vmid.nr | + E2K_IOMMU_EDID_GUEST_MASK; + + e2k_iommu_virt_enable(i->node); + dteval.int_table = int_table >> IO_PAGE_SHIFT; + dteval.id = kvm->arch.vmid.nr; + dteval.guest = 1; + + d->id = edid; + } else { + d->id = iommu_group_id(dev->iommu_group); + } + } else if (WARN_ON(d->e2k_iommu != i)) { + mutex_unlock(&d->mutex); + return -EINVAL; + } + + if (!d->pgtable) + d->pgtable = __e2k_iommu_alloc_pages(E2K_IOMMU_GRANULE(), + GFP_KERNEL, i->node); + + if (!d->pgtable) { + mutex_unlock(&d->mutex); + return -ENOMEM; + } + mutex_unlock(&d->mutex); + + dteval.h_page_table = __pa(d->pgtable) >> IO_PAGE_SHIFT; + if (dte) { + spin_lock_irqsave(&i->lock, flags); + memcpy(dte, &dteval, sizeof(struct dte)); + spin_unlock_irqrestore(&i->lock, flags); + } +out: + return 0; +} + +static struct iommu_domain *__e2k_iommu_domain_alloc(unsigned type, int node) +{ + struct e2k_iommu_domain *d = kzalloc_node(sizeof(*d), GFP_KERNEL, node); + if (!d) + return NULL; + + if (type == IOMMU_DOMAIN_DMA) { + if (iommu_get_dma_cookie(&d->domain) != 0) + goto err_pgtable; + } else if (type != IOMMU_DOMAIN_UNMANAGED) { + goto err_pgtable; + } + mutex_init(&d->mutex); + d->domain.geometry.aperture_start = 0; + d->domain.geometry.aperture_end = (1UL << 48) - 1; + d->domain.geometry.force_aperture = true; + + return &d->domain; + +err_pgtable: + kfree(d); + return NULL; +} + +static struct iommu_domain *e2k_iommu_domain_alloc(unsigned type) +{ + return __e2k_iommu_domain_alloc(type, -1); +} + +static void e2k_iommu_domain_free(struct iommu_domain *iommu_domain) +{ + struct e2k_iommu_domain *d = to_e2k_domain(iommu_domain); + io_pte *ptep = d->pgtable; + + iommu_put_dma_cookie(iommu_domain); + __e2k_iommu_free_pgtable(d, 0, E2K_IOMMU_START_LVL(), ptep); + + if (!d->e2k_iommu || (d->e2k_iommu->default_pgtable != ptep)) + __e2k_iommu_free_pages(ptep, E2K_IOMMU_GRANULE()); + + if (d->e2k_iommu) + e2k_iommu_flush_domain(d); + + kfree(d); +} + +static int e2k_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + struct e2k_iommu *i; + + if (!e2k_iommu_check_device(dev)) + return -ENODEV; + if (e2k_iommu_disable_for_device(dev)) + return -ENODEV; + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); + + i = dev_to_iommu(dev); + + iommu_group_put(group); + iommu_device_link(&i->iommu, dev); + iommu_setup_dma_ops(dev, 0, dma_get_mask(dev) + 1); + + return 0; +} + +static void e2k_iommu_remove_device(struct device *dev) +{ + struct e2k_iommu *i; + + if (!e2k_iommu_check_device(dev)) + return; + i = dev_to_iommu(dev); + iommu_device_unlink(&i->iommu, dev); + iommu_group_remove_device(dev); +} + +static struct iommu_group *e2k_iommu_device_group(struct device *dev) +{ + struct pci_dev *p; + struct e2k_iommu *i; + + if (!e2k_iommu_check_device(dev)) + return NULL; + i = dev_to_iommu(dev); + + if (i->default_group) + return iommu_group_ref_get(i->default_group); + if (e2k_iommu_no_domains) { + unsigned long flags; + spin_lock_irqsave(&i->lock, flags); + i->default_group = generic_device_group(dev); + spin_unlock_irqrestore(&i->lock, flags); + return i->default_group; + } + if (!dev_is_pci(dev)) { + p = e2k_dev_to_parent_pcidev(dev); + return iommu_group_ref_get(p->dev.iommu_group); + } + /* hw bug: ohci uses ehci device-id, so put them to one group */ + p = to_pci_dev(dev); + if (p->vendor == PCI_VENDOR_ID_MCST_TMP && + (p->device == PCI_DEVICE_ID_MCST_OHCI || + p->device == PCI_DEVICE_ID_MCST_EHCI)) { + struct pci_dev *pdev = pci_get_domain_bus_and_slot( + pci_domain_nr(p->bus), + p->bus->number, + PCI_DEVFN(PCI_SLOT(p->devfn), + PCI_FUNC(p->devfn) ^ 1)); + if (!pdev) + return NULL; + if (pdev->dev.iommu_group) + return iommu_group_ref_get(pdev->dev.iommu_group); + else + generic_device_group(dev); + } + return generic_device_group(dev); +} + +static bool e2k_iommu_capable(enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + case IOMMU_CAP_INTR_REMAP: + return true; /* MSIs are just memory writes */ + case IOMMU_CAP_NOEXEC: + return true; + default: + return false; + } +} + +#define VGA_MEMORY_OFFSET 0x000A0000 +#define VGA_MEMORY_SIZE 0x00020000 +#define RT_MSI_MEMORY_SIZE 0x100000 /* 1 Mb */ +static void e2k_iommu_get_resv_regions(struct device *dev, + struct list_head *head) +{ + struct pci_dev *pdev; + struct iohub_sysdata *sd; + struct iommu_resv_region *region; + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + if (!e2k_iommu_check_device(dev)) + return; + + pdev = e2k_dev_to_parent_pcidev(dev); + sd = pdev->bus->sysdata; + + if (!sd->pci_msi_addr_lo) + return; + + region = iommu_alloc_resv_region(((u64)sd->pci_msi_addr_hi) + << 32 | sd->pci_msi_addr_lo, RT_MSI_MEMORY_SIZE, + prot, IOMMU_RESV_MSI); + if (!region) + return; + list_add_tail(®ion->list, head); + + region = iommu_alloc_resv_region(VGA_MEMORY_OFFSET, VGA_MEMORY_SIZE, + prot, IOMMU_RESV_RESERVED); + if (!region) + return; + list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); +} + +static void e2k_iommu_put_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) + kfree(entry); +} + +static struct iommu_ops e2k_iommu_ops = { + .map = e2k_iommu_map, + .unmap = e2k_iommu_unmap, + .iova_to_phys = e2k_iommu_iova_to_phys, + + .domain_alloc = e2k_iommu_domain_alloc, + .domain_free = e2k_iommu_domain_free, + .attach_dev = e2k_iommu_attach_device, + .detach_dev = e2k_iommu_detach_device, + .add_device = e2k_iommu_add_device, + .remove_device = e2k_iommu_remove_device, + .device_group = e2k_iommu_device_group, + .capable = e2k_iommu_capable, + + .get_resv_regions = e2k_iommu_get_resv_regions, + .put_resv_regions = e2k_iommu_put_resv_regions, + + .pgsize_bitmap = SZ_4K | SZ_2M | SZ_1G, +}; + +static int __init e2k_iommu_setup(char *str) +{ + while (*str) { + if (!strncmp(str, "no-domains", 10)) { + e2k_iommu_no_domains = 1; + } else if (!strncmp(str, "direct-map", 10)) { + e2k_iommu_direct_map = 1; + e2k_iommu_no_domains = 1; + } else if (!strncmp(str, "disable:", 8)) { + e2k_iommu_cfg_for_device(str + 8, true); + } else if (!strncmp(str, "enable:", 7)) { + e2k_iommu_cfg_for_device(str + 7, false); + } + str += strcspn(str, ","); + while (*str == ',') + str++; + } + return 1; +} +__setup("e2k-iommu=", e2k_iommu_setup); + +static int __init e2k_iommu_init(void) +{ + int ret; + struct pci_bus *b; + + if (!HAS_MACHINE_E2K_IOMMU || (l_use_swiotlb && !e2k_iommu_direct_map)) + return 0; + + BUILD_BUG_ON(sizeof(struct dte) != 32); + + list_for_each_entry(b, &pci_root_buses, node) { + struct iohub_sysdata *sd; + struct e2k_iommu *i; + int node = 0; + + sd = b->sysdata; +#ifdef CONFIG_IOHUB_DOMAINS + node = sd->node; +#endif + i = __e2k_iommu_init(node, &b->dev); + if (!i) + return -ENOMEM; + sd->l_iommu = i; + } + if (e2k_iommu_direct_map) + return 0; + + ret = bus_set_iommu(&pci_bus_type, &e2k_iommu_ops); + if (ret) + return ret; + ret = bus_set_iommu(&platform_bus_type, &e2k_iommu_ops); + if (ret) + return ret; + e2k_iommu_init_pm_ops(); + e2k_iommu_debugfs_init(); + return ret; +} + +/* + * Needs to be done after pci initialization which are subsys_initcall. + */ +subsys_initcall_sync(e2k_iommu_init); diff --git a/arch/e2k/kernel/e2k.c b/arch/e2k/kernel/e2k.c new file mode 100644 index 000000000000..df393ab9e0e2 --- /dev/null +++ b/arch/e2k/kernel/e2k.c @@ -0,0 +1,438 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_PROTECTED_MODE +#include +#endif /* CONFIG_PROTECTED_MODE */ + + +#undef DEBUG_IRQ_MODE +#undef DebugIRQ +#define DEBUG_IRQ_MODE 0 /* interrupts */ +#define DebugIRQ(...) DebugPrint(DEBUG_IRQ_MODE ,##__VA_ARGS__) + + +extern char *get_mach_type_name(void); + + +static int soft_reset_off = 0; + +static int __init +disable_soft_reset(char *str) +{ + soft_reset_off = 1; + return 1; +} +__setup("softresetoff", disable_soft_reset); + +static int __init +enable_soft_reset(char *str) +{ + soft_reset_off = 0; + return 1; +} +__setup("softreseton", enable_soft_reset); + +static int __init +setup_soft_reset(char *str) +{ + if (strcmp(str, "on") == 0) { + soft_reset_off = 0; + } else if (strcmp(str, "off") == 0) { + soft_reset_off = 1; + } else { + printk("SOFT RESET enable/disable is not changed and is %s\n", + (soft_reset_off) ? "off" : "on"); + } + return 1; +} +__setup("softreset=", setup_soft_reset); + +int native_show_cpuinfo(struct seq_file *m, void *v) +{ + struct cpuinfo_e2k *c = v; + unsigned long last = cpumask_last(cpu_online_mask); + u64 freq; + int cpu; + +#ifdef CONFIG_SMP + cpu = c->cpu; + if (!cpu_online(cpu)) + return 0; +#else + cpu = 0; +#endif + freq = (measure_cpu_freq(cpu) + 500000) / 1000000; + + seq_printf(m, + "processor\t: %d\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %d\n" + "model name\t: %s\n" + "revision\t: %u\n" + "cpu MHz\t\t: %llu\n" + "bogomips\t: %llu.%02u\n\n", + cpu, c->family >= 5 ? ELBRUS_CPU_VENDOR : mcst_mb_name, + c->family, c->model, GET_CPU_TYPE_NAME(c->model), + c->revision, freq, 2 * freq, 0); + + + if (last == cpu) + show_cacheinfo(m); + + return 0; +} + +/* Add for rdma_sic module */ +int rdma_present = 0; +EXPORT_SYMBOL(rdma_present); + +#if IS_ENABLED(CONFIG_ELDSP) +void (*eldsp_interrupt_p)(struct pt_regs *regs) = NULL; +EXPORT_SYMBOL(eldsp_interrupt_p); + +void eldsp_interrupt(struct pt_regs *regs) +{ + static int int_eldsp_error = 0; + + ack_APIC_irq(); + irq_enter(); + if (eldsp_interrupt_p) { + eldsp_interrupt_p(regs); + } else { + if (!int_eldsp_error) + printk("eldsp: attempt calling null handler\n"); + int_eldsp_error++; + } + inc_irq_stat(irq_eldsp_count); + irq_exit(); +} +#endif + + +int iommu_panic_off = 0; + +static int __init +disable_iommu_panic(char *str) +{ + iommu_panic_off = 1; + return 1; +} +__setup("iommupanicoff", disable_iommu_panic); + + +#define L_IOMMU_MLT_HIT 0x8 +#define L_IOMMU_PROT_VIOL_RD 0x4 +#define L_IOMMU_PROT_VIOL_WR 0x2 +#define L_IOMMU_MMU_ERR_ADDR 0x1 + +static void iommu_interrupt(struct pt_regs *regs) +{ + int node = numa_node_id(), n; + int cpu = smp_processor_id(); + unsigned long fsr = 0, fsr2 = 0, addr; + char *err; + char str[1024]; + + ack_pic_irq(); + irq_enter(); + + fsr = l_iommu_read(node, L_IOMMU_ERROR); + fsr2 = l_iommu_read(node, L_IOMMU_ERROR1); + + addr = (fsr & (~0xf)) << (IO_PAGE_SHIFT - 4); + + err = fsr & L_IOMMU_MLT_HIT ? "Multihit" + : fsr & L_IOMMU_PROT_VIOL_WR ? "Write protection error" + : fsr & L_IOMMU_MMU_ERR_ADDR ? "Page miss" + : fsr & L_IOMMU_PROT_VIOL_RD ? "Read protection error" + : "Unknown error"; + n = snprintf(str, sizeof(str), + "IOMMU:%d: error on cpu %d:\n" + "\t%s at address 0x%lx " + "(device: %lx:%lx:%lx, error regs:%lx,%lx).\n", + node, cpu, + err, addr, + (fsr2 >> 8) & 0xff, (fsr2 >> 3) & 0x1f, + (fsr2 >> 0) & 0x7, + fsr, fsr2); + + debug_dma_dump_mappings(NULL); + if (iommu_panic_off) + pr_emerg("%s", str); + else + panic(str); + + irq_exit(); +} + +#ifdef CONFIG_EPIC +void __init e2k_init_IRQ_epic(void) +{ + int ret; + + /* + * Initialize interrupt[] array of system interrupts' handlers. + */ + epic_init_system_handlers_table(); + + /* guest IRQs additional handlers */ + init_guest_system_handlers_table(); + + setup_bsp_epic(); + + /* SIC access should be initialized before IOEPIC (for RT_MSI) */ + ret = e2k_sic_init(); + if (ret) + panic("e2k_sic_init() failed, error %d\n", ret); + + /* + * Initialize both IO-APICs and IO-EPICs + */ + if (nr_ioapics) + setup_IO_APIC(); + if (nr_ioepics) + setup_io_epic(); + +} +#endif + +void __init e2k_init_IRQ_apic(void) +{ + init_bsp_APIC(); + + /* + * Initialize interrupt[] array of system interrupts' handlers. + */ + l_init_system_handlers_table(); + + if (l_iommu_supported()) + setup_PIC_vector_handler(LVT3_INTERRUPT_VECTOR, + iommu_interrupt, 1, "iommu_interrupt"); + + if (machine.setup_apic_vector_handlers) + machine.setup_apic_vector_handlers(); + + /* guest IRQs additional handlers */ + init_guest_system_handlers_table(); + + default_setup_apic_routing(); + + if (!verify_local_APIC()) + pr_emerg("LAPIC is broken, trying to continue...\n"); + + connect_bsp_APIC(); + setup_local_APIC(); + + /* + * Enable IO APIC before setting up error vector. + */ + enable_IO_APIC(); + + bsp_end_local_APIC_setup(); + + if (apic->setup_portio_remap) + apic->setup_portio_remap(); + + /* SIC access should be initialized before IOAPIC (for EPIC EOI) */ + if (HAS_MACHINE_L_SIC) { + int ret = e2k_sic_init(); + if (ret != 0) { + panic("e2k_sic_init() failed, error %d\n", ret); + } + } + + setup_IO_APIC(); +} + +#ifdef CONFIG_EPIC +void __init e2k_init_IRQ(void) +{ + if (cpu_has_epic()) + return e2k_init_IRQ_epic(); + else + return e2k_init_IRQ_apic(); +} +#else +void __init e2k_init_IRQ(void) +{ + return e2k_init_IRQ_apic(); +} +#endif + +void e2k_restart(char *cmd) +{ + while (soft_reset_off) { + E2K_CMD_SEPARATOR; + } + + if (machine.arch_reset) + machine.arch_reset(cmd); + + /* Never reached */ + printk("System did not restart, so it can be done only by hands\n"); +} + +static void do_halt(void) +{ + while (soft_reset_off) { + E2K_CMD_SEPARATOR; + } + if (machine.arch_halt) { + machine.arch_halt(); + } + + E2K_HALT_OK(); +} + +void e2k_power_off(void) +{ + printk("System power off...\n"); + do_halt(); +} + +void e2k_halt(void) +{ + printk("System halted.\n"); + do_halt(); +} + +/* + * Power off function, if any + */ +void (*pm_power_off)(void) = e2k_power_off; +EXPORT_SYMBOL(pm_power_off); + +/* + * machine structure is constant structure so can has own copy + * on each node in the case of NUMA + * Copy the structure to all nodes + */ +static void __init +native_e2k_setup_machine(void) +{ + int nid; + + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->show_cpuinfo = native_show_cpuinfo; + the_node_machine(nid)->init_IRQ = e2k_init_IRQ; + the_node_machine(nid)->restart = e2k_restart; + the_node_machine(nid)->power_off = e2k_power_off; + the_node_machine(nid)->halt = e2k_halt; + } +} + +void __init +native_setup_machine(void) +{ +#ifdef CONFIG_E2K_MACHINE +# if defined(CONFIG_E2K_ES2_DSP) || defined(CONFIG_E2K_ES2_RU) + es2_setup_machine(); +# elif defined(CONFIG_E2K_E2S) + e2s_setup_machine(); +# elif defined(CONFIG_E2K_E8C) + e8c_setup_machine(); +# elif defined(CONFIG_E2K_E1CP) + e1cp_setup_machine(); +# elif defined(CONFIG_E2K_E8C2) + e8c2_setup_machine(); +# elif defined(CONFIG_E2K_E12C) + e12c_setup_machine(); +# elif defined(CONFIG_E2K_E16C) + e16c_setup_machine(); +# elif defined(CONFIG_E2K_E2C3) + e2c3_setup_machine(); +# else +# error "E2K MACHINE type does not defined" +# endif +#else /* ! CONFIG_E2K_MACHINE */ + switch (machine.native_id) + { +#if CONFIG_E2K_MINVER == 2 + case MACHINE_ID_ES2_DSP_LMS: + case MACHINE_ID_ES2_RU_LMS: + case MACHINE_ID_ES2_DSP: + case MACHINE_ID_ES2_RU: + es2_setup_machine(); + break; +#endif +#if CONFIG_E2K_MINVER <= 3 + case MACHINE_ID_E2S_LMS: + case MACHINE_ID_E2S: + e2s_setup_machine(); + break; +#endif +#if CONFIG_E2K_MINVER <= 4 + case MACHINE_ID_E8C_LMS: + case MACHINE_ID_E8C: + e8c_setup_machine(); + break; + case MACHINE_ID_E1CP_LMS: + case MACHINE_ID_E1CP: + e1cp_setup_machine(); + break; +#endif +#if CONFIG_E2K_MINVER <= 5 + case MACHINE_ID_E8C2_LMS: + case MACHINE_ID_E8C2: + e8c2_setup_machine(); + break; +#endif +#if CONFIG_E2K_MINVER <= 6 + case MACHINE_ID_E12C_LMS: + case MACHINE_ID_E12C: + e12c_setup_machine(); + break; + case MACHINE_ID_E16C_LMS: + case MACHINE_ID_E16C: + e16c_setup_machine(); + break; + case MACHINE_ID_E2C3_LMS: + case MACHINE_ID_E2C3: + e2c3_setup_machine(); + break; +#endif /* CONFIG_E2K_MINVER */ + default: + panic("setup_arch(): !!! UNKNOWN MACHINE TYPE !!!\n"); + machine.setup_arch = NULL; + break; + } +#endif /* CONFIG_E2K_MACHINE */ + + native_e2k_setup_machine(); +} diff --git a/arch/e2k/kernel/e2k_sic.c b/arch/e2k/kernel/e2k_sic.c new file mode 100644 index 000000000000..97dbf3e9b815 --- /dev/null +++ b/arch/e2k/kernel/e2k_sic.c @@ -0,0 +1,674 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#undef DEBUG_SIC_MODE +#undef DebugSIC +#define DEBUG_SIC_MODE 0 /* SIC mapping & init */ +#define DebugSIC(fmt, args...) \ + ({ if (DEBUG_SIC_MODE) \ + pr_debug(fmt, ##args); }) + +#undef DEBUG_ERALY_NBSR_MODE +#undef DebugENBSR +#define DEBUG_ERALY_NBSR_MODE 0 /* early NBSR access */ +#define DebugENBSR(...) DebugPrint(DEBUG_ERALY_NBSR_MODE ,##__VA_ARGS__) + + +extern int __initdata max_iolinks; +extern int __initdata max_node_iolinks; + +e2k_addr_t sic_get_io_area_max_size(void) +{ + if (E2K_FULL_SIC_IO_AREA_SIZE >= E2K_LEGACY_SIC_IO_AREA_SIZE) + return E2K_FULL_SIC_IO_AREA_SIZE; + else + return E2K_LEGACY_SIC_IO_AREA_SIZE; +} + +static DEFINE_RAW_SPINLOCK(sic_mc_reg_lock); + +static unsigned int +sic_read_node_mc_nbsr_reg(int node, int channel, int reg_offset) +{ + unsigned int reg_val; + unsigned long flags; + + if (machine.native_iset_ver >= E2K_ISET_V6) { + raw_spin_lock_irqsave(&sic_mc_reg_lock, flags); + sic_write_node_nbsr_reg(node, MC_CH, channel); + } + + reg_val = sic_read_node_nbsr_reg(node, reg_offset); + + if (machine.native_iset_ver >= E2K_ISET_V6) + raw_spin_unlock_irqrestore(&sic_mc_reg_lock, flags); + + return reg_val; +} + +static void +sic_write_node_mc_nbsr_reg(int node, int channel, int reg_offset, unsigned int reg_value) +{ + unsigned long flags; + + if (machine.native_iset_ver >= E2K_ISET_V6) { + raw_spin_lock_irqsave(&sic_mc_reg_lock, flags); + sic_write_node_nbsr_reg(node, MC_CH, channel); + } + + sic_write_node_nbsr_reg(node, reg_offset, reg_value); + + if (machine.native_iset_ver >= E2K_ISET_V6) + raw_spin_unlock_irqrestore(&sic_mc_reg_lock, flags); +} + +static int sic_mc_ecc_reg_offset(int node, int num) +{ + if (machine.native_iset_ver < E2K_ISET_V6) { + switch (num) { + case 0: + return SIC_mc0_ecc; + case 1: + return SIC_mc1_ecc; + case 2: + return SIC_mc2_ecc; + case 3: + return SIC_mc3_ecc; + }; + } else { + return MC_ECC; + } + + return 0; +} + +unsigned int sic_get_mc_ecc(int node, int num) +{ + int reg_offset; + + if (reg_offset = sic_mc_ecc_reg_offset(node, num)) + return sic_read_node_mc_nbsr_reg(node, num, reg_offset); + + return 0; +} +EXPORT_SYMBOL(sic_get_mc_ecc); + +void sic_set_mc_ecc(int node, int num, unsigned int reg_value) +{ + int reg_offset; + + if (reg_offset = sic_mc_ecc_reg_offset(node, num)) + sic_write_node_mc_nbsr_reg(node, num, reg_offset, reg_value); +} + + +static int sic_mc_opmb_reg_offset(int node, int num) +{ + if (machine.native_iset_ver < E2K_ISET_V6) { + switch (num) { + case 0: + return SIC_mc0_opmb; + case 1: + return SIC_mc1_opmb; + case 2: + return SIC_mc2_opmb; + case 3: + return SIC_mc3_opmb; + }; + } else { + return MC_OPMB; + } + + return 0; +} + +unsigned int sic_get_mc_opmb(int node, int num) +{ + int reg_offset; + + if (reg_offset = sic_mc_opmb_reg_offset(node, num)) + return sic_read_node_mc_nbsr_reg(node, num, reg_offset); + + return 0; +} +EXPORT_SYMBOL(sic_get_mc_opmb); + +static int sic_mc_cfg_reg_offset(int node, int num) +{ + if (machine.native_iset_ver < E2K_ISET_V6) { + switch (num) { + case 0: + return SIC_mc0_cfg; + case 1: + return SIC_mc1_cfg; + case 2: + return SIC_mc2_cfg; + case 3: + return SIC_mc3_cfg; + }; + } else { + return MC_CFG; + } + + return 0; +} + +unsigned int sic_get_mc_cfg(int node, int num) +{ + int reg_offset; + + if (reg_offset = sic_mc_cfg_reg_offset(node, num)) + return sic_read_node_mc_nbsr_reg(node, num, reg_offset); + + return 0; +} +EXPORT_SYMBOL(sic_get_mc_cfg); + +static int sic_ipcc_csr_reg_offset(int num) +{ + switch (num) { + case 1: + return SIC_ipcc_csr1; + case 2: + return SIC_ipcc_csr2; + case 3: + return SIC_ipcc_csr3; + }; + + return 0; +} + +unsigned int sic_get_ipcc_csr(int node, int num) +{ + int reg_offset; + + if (reg_offset = sic_ipcc_csr_reg_offset(num)) + return sic_read_node_nbsr_reg(node, reg_offset); + + return 0; +} + +void sic_set_ipcc_csr(int node, int num, unsigned int reg_value) +{ + int reg_offset; + + if (reg_offset = sic_ipcc_csr_reg_offset(num)) + sic_write_node_nbsr_reg(node, reg_offset, reg_value); +} + +static int sic_ipcc_str_reg_offset(int num) +{ + switch (num) { + case 1: + return SIC_ipcc_str1; + case 2: + return SIC_ipcc_str2; + case 3: + return SIC_ipcc_str3; + }; + + return 0; +} + +unsigned int sic_get_ipcc_str(int node, int num) +{ + int reg_offset; + + if (reg_offset = sic_ipcc_str_reg_offset(num)) + return sic_read_node_nbsr_reg(node, reg_offset); + + return 0; +} + +void sic_set_ipcc_str(int node, int num, unsigned int val) +{ + int reg_offset; + + if (reg_offset = sic_ipcc_str_reg_offset(num)) + sic_write_node_nbsr_reg(node, reg_offset, val); +} + +static int sic_io_str_reg_offset(int num) +{ + switch (num) { + case 0: + return SIC_io_str; + case 1: + return machine.sic_io_str1; + }; + + return 0; +} + +unsigned int sic_get_io_str(int node, int num) +{ + int reg_offset; + + if (reg_offset = sic_io_str_reg_offset(num)) + return sic_read_node_nbsr_reg(node, reg_offset); + + return 0; +} + +void sic_set_io_str(int node, int num, unsigned int val) +{ + int reg_offset; + + if (reg_offset = sic_io_str_reg_offset(num)) + sic_write_node_nbsr_reg(node, reg_offset, val); +} + +static void create_nodes_io_config(void); + +int __init +e2k_early_iohub_online(int node, int link) +{ + e2k_iol_csr_struct_t io_link; + e2k_io_csr_struct_t io_hub; + int domain = node_iolink_to_domain(node, link); + int iohub_on = 0; + + DebugENBSR("started on node %d link %d\n", + node, link); + if (!node_online(node)) + return 0; + if (domain >= max_iolinks) + return 0; + if (link >= max_node_iolinks) + return 0; + /* FIXME: IO link registers of SIC mutate to WLCC registers */ + /* on legacy SIC */ + /* now we assume IO link on node #0 connected to IOHUB online */ + if (HAS_MACHINE_E2K_LEGACY_SIC) { + iohub_on = 1; + } else { + io_link.E2K_IOL_CSR_reg = early_sic_read_node_iolink_nbsr_reg( + node, link, SIC_iol_csr); + if (io_link.E2K_IOL_CSR_mode != IOHUB_IOL_MODE) + return 0; + io_hub.E2K_IO_CSR_reg = early_sic_read_node_iolink_nbsr_reg( + node, link, SIC_io_csr); + if (io_hub.E2K_IO_CSR_ch_on) { + iohub_on = 1; + } + } + DebugENBSR("IOHUB of node %d link %d %s\n", + node, link, (iohub_on) ? "ON" : "OFF"); + return iohub_on; +} + +/* + * SIC area mapping and init + */ +unsigned char *nodes_nbsr_base[MAX_NUMNODES]; +EXPORT_SYMBOL_GPL(nodes_nbsr_base); + +phys_addr_t nodes_nbsr_phys_base[MAX_NUMNODES]; + +int __init +e2k_sic_init(void) +{ + unsigned char *nbsr_base; + unsigned long long phys_base; + int node; + int ret = 0; + + if (!HAS_MACHINE_L_SIC) { + printk("e2k_sic_init() the arch has not SIC\n"); + return -ENODEV; + } + for_each_online_node(node) { + phys_base = (unsigned long long)THE_NODE_NBSR_PHYS_BASE(node); + nbsr_base = ioremap(phys_base, NODE_NBSR_SIZE); + if (nbsr_base == NULL) { + printk("e2k_sic_init() could not map NBSR registers " + "of node #%d, phys base 0x%llx, size 0x%lx\n", + node, phys_base, NODE_NBSR_SIZE); + ret =-ENOMEM; + } + DebugSIC("map NBSR of node #%d phys base " + "0x%llx, size 0x%lx to virtual addr 0x%px\n", + node, phys_base, NODE_NBSR_SIZE, nbsr_base); + nodes_nbsr_base[node] = nbsr_base; + nodes_nbsr_phys_base[node] = phys_base; + } + create_nodes_io_config(); + return ret; +} + +unsigned long domain_to_pci_conf_base[MAX_NUMIOLINKS] = { + [ 0 ... (MAX_NUMIOLINKS-1) ] = 0 + }; + +#ifdef CONFIG_IOHUB_DOMAINS +static void create_nodes_pci_conf(void) +{ + + int domain; + + for_each_iohub(domain) { + domain_to_pci_conf_base[domain] = + sic_domain_pci_conf_base(domain); + DebugSIC("IOHUB domain #%d (node %d, " + "IO link %d) PCI CFG base 0x%lx\n", + domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain), + domain_to_pci_conf_base[domain]); + } + +} +#else /* !CONFIG_IOHUB_DOMAINS: */ +static void create_nodes_pci_conf(void) +{ + domain_to_pci_conf_base[0] = + sic_domain_pci_conf_base(0); +} +#endif /* !CONFIG_IOHUB_DOMAINS */ + +#ifdef CONFIG_IOHUB_DOMAINS +/* + * IO Links of all nodes configuration + */ + +static void create_iolink_config(int node, int link) +{ + e2k_iol_csr_struct_t io_link; + e2k_io_csr_struct_t io_hub; + e2k_rdma_cs_struct_t rdma; + int link_on; + + link_on = 0; + + /* FIXME: IO link registers of SIC mutate to WLCC registers */ + /* on legacy SIC */ + /* now we assume IO link on node #0 connected to IOHUB online */ + if (HAS_MACHINE_E2K_LEGACY_SIC) { + io_link.E2K_IOL_CSR_reg = 0; + io_link.E2K_IOL_CSR_mode = IOHUB_IOL_MODE; + io_link.E2K_IOL_CSR_abtype = IOHUB_ONLY_IOL_ABTYPE; + } else { + io_link.E2K_IOL_CSR_reg = sic_read_node_iolink_nbsr_reg( + node, link, SIC_iol_csr); + } + printk(KERN_INFO "Node #%d IO LINK #%d is", node, link); + if (io_link.E2K_IOL_CSR_mode == IOHUB_IOL_MODE) { + node_iohub_set(node, link, iolink_iohub_map); + iolink_iohub_num ++; + printk(" IO HUB controller"); + /* FIXME: IO link registers of SIC mutate to WLCC registers */ + /* on legacy SIC */ + /* now we assume IO link on node #0 connected to IOHUB online */ + if (HAS_MACHINE_E2K_LEGACY_SIC) { + io_hub.E2K_IO_CSR_reg = 0; + io_hub.E2K_IO_CSR_ch_on = 1; + } else { + io_hub.E2K_IO_CSR_reg = sic_read_node_iolink_nbsr_reg( + node, link, SIC_io_csr); + } + if (io_hub.E2K_IO_CSR_ch_on) { + node_iohub_set(node, link, iolink_online_iohub_map); + iolink_online_iohub_num ++; + link_on = 1; + printk(" ON"); + } else { + printk(" OFF"); + } + } else { + if (machine.native_iset_ver <= E2K_ISET_V3) { + node_rdma_set(node, link, iolink_rdma_map); + iolink_rdma_num++; + printk(" RDMA controller"); + rdma.E2K_RDMA_CS_reg = sic_read_node_iolink_nbsr_reg( + node, link, SIC_rdma_cs); + if (rdma.E2K_RDMA_CS_ch_on) { + node_rdma_set(node, link, + iolink_online_rdma_map); + iolink_online_rdma_num++; + link_on = 1; + printk(" ON 0x%08x", rdma.E2K_RDMA_CS_reg); + } else { + printk(" OFF 0x%08x", rdma.E2K_RDMA_CS_reg); + } + } else { + printk(" not connected"); + } + } + if (link_on) { + int ab_type = io_link.E2K_IOL_CSR_abtype; + printk(" connected to"); + switch (ab_type) { + case IOHUB_ONLY_IOL_ABTYPE: + printk(" IO HUB controller"); + break; + case RDMA_ONLY_IOL_ABTYPE: + printk(" RDMA controller"); + break; + case RDMA_IOHUB_IOL_ABTYPE: + printk(" IO HUB/RDMA controller"); + break; + default: + printk(" unknown controller"); + break; + } + } + printk("\n"); +} + +static void __init create_nodes_io_config(void) +{ + int node; + int link; + + for_each_online_node(node) { + for_each_iolink_of_node(link) { + if (iolinks_num >= max_iolinks) + break; + if (link >= max_node_iolinks) + break; + iolinks_num ++; + create_iolink_config(node, link); + } + if (iolinks_num >= max_iolinks) + break; + } + if (iolinks_num > 1) { + printk(KERN_INFO "Total IO links %d: IOHUBs %d, RDMAs %d\n", + iolinks_num, iolink_iohub_num, iolink_rdma_num); + } + create_nodes_pci_conf(); +} +#else /* !CONFIG_IOHUB_DOMAINS */ + + /* + * IO Link of nodes configuration + */ +nodemask_t node_iohub_map = NODE_MASK_NONE; +nodemask_t node_online_iohub_map = NODE_MASK_NONE; +int node_iohub_num = 0; +int node_online_iohub_num = 0; +nodemask_t node_rdma_map = NODE_MASK_NONE; +nodemask_t node_online_rdma_map = NODE_MASK_NONE; +int node_rdma_num = 0; +int node_online_rdma_num = 0; + +static void __init create_nodes_io_config(void) +{ + int node; + e2k_iol_csr_struct_t io_link; + e2k_io_csr_struct_t io_hub; + e2k_rdma_cs_struct_t rdma; + int link_on; + + for_each_online_node(node) { + link_on = 0; + /* FIXME: IO link registers of SIC mutate to WLCC registers */ + /* on legacy SIC */ + /* now we assume IO link on node #0 connected to IOHUB online */ + if (HAS_MACHINE_E2K_LEGACY_SIC) { + io_link.E2K_IOL_CSR_reg = 0; + io_link.E2K_IOL_CSR_mode = IOHUB_IOL_MODE; + io_link.E2K_IOL_CSR_abtype = IOHUB_ONLY_IOL_ABTYPE; + } else { + io_link.E2K_IOL_CSR_reg = sic_read_node_nbsr_reg(node, + SIC_iol_csr); + } + printk("Node #%d IO LINK is", node); + if (io_link.E2K_IOL_CSR_mode == IOHUB_IOL_MODE) { + node_set(node, node_iohub_map); + node_iohub_num ++; + printk(" IO HUB controller"); + /* FIXME: IO link registers of SIC mutate to WLCC */ + /* registers on legacy SIC */ + /* now we assume IO link on node #0 connected to */ + /* IOHUB online */ + if (HAS_MACHINE_E2K_LEGACY_SIC) { + io_hub.E2K_IO_CSR_reg = 0; + io_hub.E2K_IO_CSR_ch_on = 1; + } else { + io_hub.E2K_IO_CSR_reg = + sic_read_node_nbsr_reg(node, + SIC_io_csr); + } + if (io_hub.E2K_IO_CSR_ch_on) { + node_set(node, node_online_iohub_map); + node_online_iohub_num ++; + link_on = 1; + printk(" ON"); + } else { + printk(" OFF"); + } + } else { + node_set(node, node_rdma_map); + node_rdma_num ++; + printk(" RDMA controller"); + rdma.E2K_RDMA_CS_reg = + sic_read_node_nbsr_reg(node, SIC_rdma_cs); + if (rdma.E2K_RDMA_CS_ch_on) { + node_set(node, node_online_rdma_map); + node_online_rdma_num ++; + link_on = 1; + printk(" ON 0x%08x", rdma.E2K_RDMA_CS_reg); + } else { + printk(" OFF 0x%08x", rdma.E2K_RDMA_CS_reg); + } + } + if (link_on) { + int ab_type = io_link.E2K_IOL_CSR_abtype; + printk(" connected to"); + switch (ab_type) { + case IOHUB_ONLY_IOL_ABTYPE: + printk(" IO HUB controller"); + break; + case RDMA_ONLY_IOL_ABTYPE: + printk(" RDMA controller"); + break; + case RDMA_IOHUB_IOL_ABTYPE: + printk(" IO HUB/RDMA controller"); + break; + default: + printk(" unknown controller"); + break; + } + } + printk("\n"); + } + create_nodes_pci_conf(); +} +/* Add for rdma_sic module */ +EXPORT_SYMBOL(node_rdma_num); +EXPORT_SYMBOL(node_online_rdma_map); +EXPORT_SYMBOL(node_online_rdma_num); + +#endif /* !CONFIG_IOHUB_DOMAINS */ + +static DEFINE_RAW_SPINLOCK(sic_error_lock); + +static void sic_mc_regs_dump(int node) +{ + if (machine.native_iset_ver < E2K_ISET_V6) { + int offset = SIC_MC_BASE; + + pr_emerg("MC registers dump:\n"); + for (; offset < SIC_MC_BASE + SIC_MC_SIZE; offset += 4) + pr_emerg("%x ", sic_read_node_nbsr_reg(node, offset)); + pr_emerg("\n"); + } else { + u32 hmu_mic = sic_read_node_nbsr_reg(node, HMU_MIC); + int i = 0; + + pr_emerg("HMU_MIC 0x%x\n", hmu_mic); + + hmu_mic = (hmu_mic & 0xff000000) >> 24; + + for (; i < SIC_MAX_MC_COUNT; i++) { + if (hmu_mic & (1 << i)) { + pr_emerg("MC_STATUS[%d] 0x%x", i, + sic_read_node_mc_nbsr_reg(node, i, MC_STATUS)); + } + } + } +} + +static void sic_hmu_regs_dump(int node) +{ + pr_emerg("HMU0_INT 0x%x HMU1_INT 0x%x HMU2_INT 0x%x HMU3_INT 0x%x\n", + sic_read_node_nbsr_reg(node, HMU0_INT), + sic_read_node_nbsr_reg(node, HMU1_INT), + sic_read_node_nbsr_reg(node, HMU2_INT), + sic_read_node_nbsr_reg(node, HMU3_INT)); +} + +void do_sic_error_interrupt(void) +{ + int node; + unsigned long flags; + + if (!raw_spin_trylock_irqsave(&sic_error_lock, flags)) + return; + + for_each_online_node(node) { + pr_emerg("----- NODE%d -----\n", node); + + pr_emerg("%s_INT=0x%x\n", + (machine.native_iset_ver < E2K_ISET_V6) ? "SIC" : "XMU", + sic_read_node_nbsr_reg(node, SIC_sic_int)); + + if (machine.native_iset_ver >= E2K_ISET_V6) + sic_hmu_regs_dump(node); + + sic_mc_regs_dump(node); + } + + raw_spin_unlock_irqrestore(&sic_error_lock, flags); +} + +void sic_error_interrupt(struct pt_regs *regs) +{ + ack_pic_irq(); + irq_enter(); + + do_sic_error_interrupt(); + + panic("SIC error interrupt received on CPU%d:\n", + smp_processor_id()); + + irq_exit(); +} diff --git a/arch/e2k/kernel/e2k_syswork.c b/arch/e2k/kernel/e2k_syswork.c new file mode 100644 index 000000000000..28cf19dae5d8 --- /dev/null +++ b/arch/e2k/kernel/e2k_syswork.c @@ -0,0 +1,3808 @@ +/* + * This file contains various syswork to run them from user. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DEBUG_ACCVM 0 +#if DEBUG_ACCVM +# define DebugACCVM(...) pr_info(__VA_ARGS__) +#else +# define DebugACCVM(...) +#endif + +#define DEBUG_E2K_SYS_WORK 0 +#define DbgESW(...) DebugPrint(DEBUG_E2K_SYS_WORK, ##__VA_ARGS__) + +#define DEBUG_GETCONTEXT 0 +#define DebugGC(...) DebugPrint(DEBUG_GETCONTEXT, ##__VA_ARGS__) + +#undef DEBUG_DUMP_STACK_MODE +#undef DebugDS +#define DEBUG_DUMP_STACK_MODE 0 +#define DebugDS(...) DebugPrint(DEBUG_DUMP_STACK_MODE, ##__VA_ARGS__) + +#undef DebugCM +#undef DEBUG_CORE_MODE +#define DEBUG_CORE_MODE 0 +#define DebugCM(...) DebugPrint(DEBUG_CORE_MODE, ##__VA_ARGS__) + +extern int task_statm(struct mm_struct *, int *, int *, int *, int *); + +long e2k_lx_dbg = 0; +int end_of_work = 0; + +int jtag_stop_var; +EXPORT_SYMBOL(jtag_stop_var); + +void *kernel_symtab; +long kernel_symtab_size; +void *kernel_strtab; +long kernel_strtab_size; + +e2k_addr_t print_kernel_address_ptes(e2k_addr_t address); + +int debug_userstack = 0; +static int __init userstack_setup(char *str) +{ + debug_userstack = 1; + return 1; +} +__setup("debug_userstack", userstack_setup); + +#ifdef CONFIG_DATA_STACK_WINDOW +int debug_datastack = 0; +static int __init datastack_setup(char *str) +{ + debug_datastack = 1; + return 1; +} +__setup("debug_datastack", datastack_setup); +#endif + + +#ifdef CONFIG_E2K_PROFILING +disable_interrupt_t disable_interrupt[NR_CPUS]; +EXPORT_SYMBOL(disable_interrupt); +char *system_info_name[] = { + "disabled interrupts ", + "storing stack_register", + "storing TIR", + "storing all registers", + "storing debug registers", + "storing aau registers", + "restoring of stack_registers", + "restoring all registers", + "restoring debug registers", + "restoring aau registers", + "cpu_idle", + "spin_lock", + "mutex_lock", + "hard_irq", + "switch", + "soft_irq", + "preempt_count", +}; +system_info_t system_info[NR_CPUS]; +int enable_collect_interrupt_ticks = 0; +EXPORT_SYMBOL( system_info); +EXPORT_SYMBOL(enable_collect_interrupt_ticks); + +long TIME=0; +long TIME1=0; +extern const char *exc_tbl_name[]; +extern unsigned long sys_call_table[NR_syscalls]; + +static void clear_interrupt_info(void) +{ + memset(&system_info[0], 0, sizeof(system_info)); + memset(&disable_interrupt[0], 0, sizeof(disable_interrupt)); + + TIME = READ_CLKR_REG(); + enable_collect_interrupt_ticks = 1; +} + +static void print_interrupt_info(void) +{ + int i, j; + time_info_t* pnt; + long freq; + int print_ip = 0; // !!!! tmp + + enable_collect_interrupt_ticks = 0; + freq = cpu_data[0].proc_freq /1000000 ; + + pr_info("\t\t ==============PROFILE INFO=(%ld(ticks) /%ld(mks)/)" + "==============\n", + TIME1 - TIME, (TIME1 - TIME) / freq); + + for_each_possible_cpu(j) { + + pr_info("\t\t\t CPU%d\n", j); + pnt = (time_info_t*) &system_info[j].max_disabled_interrupt; + for (i = 0; i < sizeof(system_info_name)/sizeof(void *); i++) { + pr_info(" %30s max time=%10ld average=%10ld " + "number=%10ld\n", + system_info_name[i], + pnt->max_time / freq, + pnt->full_time/freq / + ((pnt->number == 0) ? 1 : pnt->number), + pnt->number); + if (print_ip) { + pr_info(" time=%10lx ", pnt->max_begin_time); + printk("\t\t\t %pS", pnt->max_beg_ip); + printk(" (%pS) ---\n", pnt->max_beg_parent_ip); + printk("\t\t\t\t %pS", pnt->max_end_ip); + printk("( %pS)\n", pnt->max_end_parent_ip); + } + pnt++; + } + pr_info("\n\t\t\t\t system calls\n"); + for (i = 0; i < NR_syscalls; i++) { + if (disable_interrupt[j].syscall[i]) { + printk(" %30pS ", sys_call_table[i]); + printk("average=%5ld number=%10ld \n", + disable_interrupt[j].syscall_time[i]/freq/ + ((disable_interrupt[j].syscall[i] == 0)? 1 + : disable_interrupt[j].syscall[i]), + disable_interrupt[j].syscall[i]); + } + } + + printk("\n\t\t\t\t interrupts \n"); + for (i = 0; i < exc_max_num; i++) { + if (disable_interrupt[j].interrupts[i]) { + printk(" %30s max time=%5ld average=%5ld number=%10ld \n", + exc_tbl_name[i], + disable_interrupt[j].max_interrupts_time[i]/freq , + disable_interrupt[j].interrupts_time[i]/freq/ + ((disable_interrupt[j].interrupts[i] == 0) ?1 + : disable_interrupt[j].interrupts[i]), + disable_interrupt[j].interrupts[i]); + } + + } + printk("\n\t\t\t\t DO_IRQ \n"); + for (i = 0; i < NR_VECTORS; i++) { + if (disable_interrupt[j].do_irq[i]) { + printk(" %5d max time=%5ld average=%5ld number=%10ld \n", + i, + disable_interrupt[j].max_do_irq_time[i]/freq , + disable_interrupt[j].do_irq_time[i]/freq/ + ((disable_interrupt[j].do_irq[i] ==0)? 1 + : disable_interrupt[j].do_irq[i]), + disable_interrupt[j].do_irq[i]); + } + } + + } + +}; +static void stop_interrupt_info(void) +{ + TIME1 = READ_CLKR_REG(); + enable_collect_interrupt_ticks = 0; + + printk(" start =%lx stop_interrupt_info =%lx " + " begin_time(max_disabled_interrupt 0) =%lx" + " end_time =%lx max_time =%lx " + " begin_time(max_disabled_interrupt 1) =%lx " + " end_time =%lx max_time =%lx \n", + TIME, TIME1, system_info[0].max_disabled_interrupt.begin_time, + system_info[0].max_disabled_interrupt.begin_time + +system_info[0].max_disabled_interrupt.max_time, + system_info[0].max_disabled_interrupt.max_time, + system_info[1].max_disabled_interrupt.begin_time, + system_info[1].max_disabled_interrupt.begin_time + +system_info[1].max_disabled_interrupt.max_time, + system_info[1].max_disabled_interrupt.max_time); + + }; + +#else /* !CONFIG_E2K_PROFILING */ +static void print_interrupt_info(void) {}; +static void clear_interrupt_info(void) {}; +static void stop_interrupt_info(void) {}; +#endif /* CONFIG_E2K_PROFILING */ + +/* Preallocate psp and data stack cache for the boot CPU so that + * it can print full stacks from other CPUs as early as possible + * (at boot time sysrq is handled by the boot CPU). */ +char psp_stack_cache[SIZE_PSP_STACK]; +#ifdef CONFIG_DATA_STACK_WINDOW +char k_data_stack_cache[SIZE_DATA_STACK]; +#endif + +__initdata +static char chain_stack_cache[NR_CPUS][SIZE_CHAIN_STACK]; + + +/* Initially 'chain_stack_cache' array is used but later + * it is discarded together with .init.data and replaced + * with kmalloc()'ed memory (see print_stack_init()). */ +__refdata struct stack_regs stack_regs_cache[NR_CPUS] = { + [0].psp_stack_cache = psp_stack_cache, +#ifdef CONFIG_DATA_STACK_WINDOW + [0].k_data_stack_cache = k_data_stack_cache, +#endif + [0].chain_stack_cache = chain_stack_cache[0], +}; + +__init +void setup_stack_print() +{ + int i; + + for (i = 0; i < NR_CPUS; i++) + stack_regs_cache[i].chain_stack_cache = chain_stack_cache[i]; +} + +static int careful_tagged_copy(void *dst, void *src, unsigned long sz) +{ + SET_USR_PFAULT("$.recovery_memcpy_fault"); + fast_tagged_memory_copy(dst, src, sz, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + 0); /* prefetch */ + if (RESTORE_USR_PFAULT) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_SMP +static int get_cpu_regs_nmi(int cpu, struct task_struct *task, + struct stack_regs *const regs); +#endif + +#ifdef CONFIG_DATA_STACK_WINDOW +static void copy_k_data_stack_regs(const struct pt_regs *limit_regs, + struct stack_regs *regs) +{ + struct pt_regs *pt_regs; + int i; + + if (!regs->show_k_data_stack) + return; + + if (!limit_regs || kernel_mode(limit_regs)) { + e2k_usd_lo_t usd_lo; + e2k_pusd_lo_t pusd_lo; + e2k_usd_hi_t usd_hi; + e2k_addr_t sbr; + + if (!limit_regs) { + usd_lo = NATIVE_NV_READ_USD_LO_REG(); + usd_hi = NATIVE_NV_READ_USD_HI_REG(); + sbr = NATIVE_NV_READ_SBR_REG_VALUE(); + } else { + usd_lo = limit_regs->stacks.usd_lo; + usd_hi = limit_regs->stacks.usd_hi; + sbr = limit_regs->stacks.top; + } + AW(pusd_lo) = AW(usd_lo); + regs->base_k_data_stack = (void *) ((AS(usd_lo).p) ? + (sbr + AS(pusd_lo).base) : AS(usd_lo).base); + /* We found the current data stack frame, but + * of intereset is our parent's frame. Move up. */ + regs->base_k_data_stack = regs->base_k_data_stack - + AS(usd_hi).size + 16 * AS(regs->crs.cr1_hi).ussz; + regs->real_k_data_stack_addr = regs->base_k_data_stack; + regs->size_k_data_stack = sbr - + (unsigned long) regs->base_k_data_stack; + } else { + regs->base_k_data_stack = NULL; + } + + pt_regs = find_host_regs(current_thread_info()->pt_regs); + + for (i = 0; i < MAX_PT_REGS_SHOWN; i++) { + if (!pt_regs) + break; + + regs->pt_regs[i].valid = 1; + regs->pt_regs[i].addr = (unsigned long) pt_regs; + pt_regs = find_host_regs(pt_regs->next); + } +} +#else +static void copy_k_data_stack_regs(const struct pt_regs *limit_regs, + struct stack_regs *regs) +{ +} +#endif + +static void copy_trap_stack_regs(const struct pt_regs *limit_regs, + struct stack_regs *regs) +{ + struct pt_regs *trap_pt_regs; + int i; + + if (!regs->show_trap_regs) + return; + + trap_pt_regs = find_trap_host_regs(current_thread_info()->pt_regs); + + while (trap_pt_regs && limit_regs && + (AS(trap_pt_regs->stacks.pcsp_lo).base + + AS(trap_pt_regs->stacks.pcsp_hi).ind) > + (AS(limit_regs->stacks.pcsp_lo).base + + AS(limit_regs->stacks.pcsp_hi).ind)) + trap_pt_regs = find_trap_host_regs(trap_pt_regs->next); + + for (i = 0; i < MAX_USER_TRAPS; i++) { + if (!trap_pt_regs) + break; + + regs->trap[i].frame = AS(trap_pt_regs->stacks.pcsp_lo).base + + AS(trap_pt_regs->stacks.pcsp_hi).ind; + regs->trap[i].ctpr1 = trap_pt_regs->ctpr1; + regs->trap[i].ctpr1_hi = trap_pt_regs->ctpr1_hi; + regs->trap[i].ctpr2 = trap_pt_regs->ctpr2; + regs->trap[i].ctpr2_hi = trap_pt_regs->ctpr2_hi; + regs->trap[i].ctpr3 = trap_pt_regs->ctpr3; + regs->trap[i].ctpr3_hi = trap_pt_regs->ctpr3_hi; + regs->trap[i].lsr = trap_pt_regs->lsr; + regs->trap[i].ilcr = trap_pt_regs->ilcr; + if (machine.native_iset_ver >= E2K_ISET_V5) { + regs->trap[i].lsr1 = trap_pt_regs->lsr1; + regs->trap[i].ilcr1 = trap_pt_regs->ilcr1; + } + if (trap_pt_regs->trap && trap_pt_regs->trap->sbbp) { + memcpy(regs->trap[i].sbbp, trap_pt_regs->trap->sbbp, + sizeof(regs->trap[i].sbbp)); + } else { + memset(regs->trap[i].sbbp, 0, + sizeof(regs->trap[i].sbbp)); + } + regs->trap[i].valid = 1; + + trap_pt_regs = find_trap_host_regs(trap_pt_regs->next); + } +} + +static unsigned long copy_user_hardware_stack(void *dst, void *src, u64 sz) +{ + unsigned long n; + int ret; + + /* We are currently on reserve stacks which means + * that this function is trying to access kernel's stacks */ + if (on_reserve_stacks()) + if (careful_tagged_copy(dst, src, sz)) + return sz; + + n = (u64)src + sz - PAGE_ALIGN_UP((u64)src + sz); + if (n == 0) + n = PAGE_SIZE; + n = min(n, sz); + + src = src + sz - n; + dst = dst + sz - n; + + while (sz > 0) { + /* Trying to handle page fault for user hardware stack + * might lead to accessing swap which is not a good idea + * if we want to reliably print stack */ + pagefault_disable(); + ret = copy_e2k_stack_from_user(dst, src, n, NULL); + pagefault_enable(); + + if (ret) + return sz; + + sz -= n; + n = min(sz, PAGE_SIZE); + src -= n; + dst -= n; + } + + WARN_ON(sz); + + return 0; +} + +static void copy_proc_stack_regs(const struct pt_regs *limit_regs, + struct stack_regs *regs) +{ + const struct pt_regs *pt_regs; + void *dst, *src; + u64 sz; + + if (!regs->psp_stack_cache) { + regs->base_psp_stack = NULL; + return; + } + + if (limit_regs && on_reserve_stacks()) { + /* Hardware stack overflow happened. First we copy + * SPILLed to reserve stacks part of kernel stacks. */ + src = (void *) AS(READ_PSP_LO_REG()).base; + if (limit_regs) { + sz = GET_PSHTP_MEM_INDEX(limit_regs->stacks.pshtp); + regs->orig_base_psp_stack_k = + AS(limit_regs->stacks.psp_lo).base + + AS(limit_regs->stacks.psp_hi).ind - sz; + } else { + sz = min(AS(regs->psp_hi).ind, + (u64) SIZE_PSP_STACK); + regs->orig_base_psp_stack_k = (u64) src; + } + } else if (limit_regs && + AS(limit_regs->stacks.psp_lo).base < PAGE_OFFSET) { + /* Trying to get user stacks through NMI. First we + * copy SPILLed to kernel part of user stacks. */ + sz = GET_PSHTP_MEM_INDEX(limit_regs->stacks.pshtp); + src = (void *) AS(current_thread_info()->k_psp_lo).base; + regs->orig_base_psp_stack_k = (u64) src; + } else { + /* Trying to get all stacks; start with kernel. */ + sz = min((int) AS(regs->psp_hi).ind, SIZE_PSP_STACK); + src = (void *) AS(regs->psp_lo).base; + regs->orig_base_psp_stack_k = (u64) src; + } + dst = regs->psp_stack_cache + SIZE_PSP_STACK - sz; + + if (careful_tagged_copy(dst, src, sz)) { + pr_alert("WARNING current procedure stack not available at %px\n", + src); + /* We can still print chain stack */ + regs->base_psp_stack = NULL; + return; + } + + regs->base_psp_stack = dst; + regs->size_psp_stack = sz; + + if (on_reserve_stacks()) { + pt_regs = limit_regs; + regs->user_size_psp_stack = 0; + } else { + pt_regs = current_pt_regs(); + if (pt_regs && !user_mode(pt_regs)) + pt_regs = NULL; + regs->user_size_psp_stack = (pt_regs) ? + GET_PSHTP_MEM_INDEX(pt_regs->stacks.pshtp) : 0; + } + if (pt_regs) { + unsigned long copied; + + src = (void *) AS(pt_regs->stacks.psp_lo).base; + sz = AS(pt_regs->stacks.psp_hi).ind - + GET_PSHTP_MEM_INDEX(pt_regs->stacks.pshtp); + if (sz > regs->base_psp_stack - regs->psp_stack_cache) { + s64 delta = sz - (u64) (regs->base_psp_stack - + regs->psp_stack_cache); + sz -= delta; + src += delta; + } + dst = regs->base_psp_stack - sz; + + copied = sz - copy_user_hardware_stack(dst, src, sz); + + if (copied) { + regs->base_psp_stack -= copied; + regs->size_psp_stack += copied; + regs->orig_base_psp_stack_k -= copied; + regs->orig_base_psp_stack_u = (u64) src + sz - copied; + regs->user_size_psp_stack += (on_reserve_stacks()) ? 0 : copied; + } else { + regs->orig_base_psp_stack_u = 0; + } + } else { + regs->orig_base_psp_stack_u = 0; + } +} + +static int copy_chain_stack_regs(const struct pt_regs *limit_regs, + struct stack_regs *regs) +{ + const struct pt_regs *pt_regs; + void *dst, *src; + u64 sz, free_dst_sz = SIZE_CHAIN_STACK; + + if (!regs->chain_stack_cache) { + regs->base_chain_stack = NULL; + return -ENOMEM; + } + + if (on_reserve_stacks()) { + /* Hardware stack overflow happened. First we copy + * SPILLed to reserve stacks part of kernel stacks. */ + src = (void *) AS(READ_PCSP_LO_REG()).base; + if (limit_regs) { + sz = PCSHTP_SIGN_EXTEND(limit_regs->stacks.pcshtp); + regs->orig_base_chain_stack_k = + AS(limit_regs->stacks.pcsp_lo).base + + AS(limit_regs->stacks.pcsp_hi).ind - sz; + } else { + sz = min((u64) AS(regs->pcsp_hi).ind, free_dst_sz); + regs->orig_base_chain_stack_k = (u64) src; + } + } else if (limit_regs && + AS(limit_regs->stacks.pcsp_lo).base < PAGE_OFFSET) { + /* Trying to get user stacks through NMI. First we + * copy SPILLed to kernel part of user stacks. */ + sz = PCSHTP_SIGN_EXTEND(limit_regs->stacks.pcshtp); + src = (void *) AS(current_thread_info()->k_pcsp_lo).base; + regs->orig_base_chain_stack_k = (u64) src; + } else { + /* Trying to get all stacks; start with kernel. */ + sz = min((u64) AS(regs->pcsp_hi).ind, free_dst_sz); + src = (void *) AS(regs->pcsp_lo).base; + regs->orig_base_chain_stack_k = (u64) src; + } + dst = regs->chain_stack_cache + free_dst_sz - sz; + + if (careful_tagged_copy(dst, src, sz)) { + pr_alert("WARNING current chain stack not available at %px\n", + src); + regs->base_chain_stack = NULL; + return -EFAULT; + } + + regs->base_chain_stack = dst; + regs->size_chain_stack = sz; + + if (on_reserve_stacks()) { + pt_regs = limit_regs; + regs->user_size_chain_stack = 0; + } else { + pt_regs = current_pt_regs(); + if (!pt_regs || !user_mode(pt_regs)) { + pt_regs = NULL; + regs->user_size_chain_stack = 0; + } else { + regs->user_size_chain_stack = + PCSHTP_SIGN_EXTEND(pt_regs->stacks.pcshtp); + if (limit_regs && + AS(limit_regs->stacks.pcsp_lo).base < PAGE_OFFSET) + regs->user_size_chain_stack += SZ_OF_CR; + } + } + + if (pt_regs) { + src = (void *) AS(pt_regs->stacks.pcsp_lo).base; + sz = AS(pt_regs->stacks.pcsp_hi).ind - + PCSHTP_SIGN_EXTEND(pt_regs->stacks.pcshtp); + if (sz > regs->base_chain_stack - regs->chain_stack_cache) { + s64 delta = sz - (u64) (regs->base_chain_stack - + regs->chain_stack_cache); + sz -= delta; + src += delta; + } + dst = regs->base_chain_stack - sz; + + if (!copy_user_hardware_stack(dst, src, sz)) { + regs->base_chain_stack -= sz; + regs->size_chain_stack += sz; + regs->orig_base_chain_stack_k -= sz; + regs->orig_base_chain_stack_u = (u64) src; + regs->user_size_chain_stack += (on_reserve_stacks()) ? 0 : sz; + } else { + regs->orig_base_chain_stack_u = 0; + } + } else { + regs->orig_base_chain_stack_u = 0; + } + + return 0; +} + +/* + * Copy full or last part of a process stack to a buffer. + * Is inlined to make sure that the return will not flush stack. + * + * Must be called with maskable interrupts disabled because + * stack registers are protected by IRQ-disable. + */ +noinline void copy_stack_regs(struct task_struct *task, + const struct pt_regs *limit_regs, struct stack_regs *regs) +{ + struct sw_regs *sw_regs; + int i; + void *dst, *src; + u64 sz; + + if (unlikely(!raw_irqs_disabled())) + printk("copy_stack_regs called with enabled interrupts!\n"); + + regs->valid = 0; +#ifdef CONFIG_GREGS_CONTEXT + regs->gregs_valid = 0; +#endif + regs->ignore_banner = false; + for (i = 0; i < MAX_USER_TRAPS; i++) + regs->trap[i].valid = 0; + for (i = 0; i < MAX_PT_REGS_SHOWN; i++) + regs->pt_regs[i].valid = 0; + + if (task == current) { + unsigned long flags; + + raw_all_irq_save(flags); + COPY_STACKS_TO_MEMORY(); + + if (limit_regs) { + regs->pcsp_lo = limit_regs->stacks.pcsp_lo; + regs->pcsp_hi = limit_regs->stacks.pcsp_hi; + regs->psp_lo = limit_regs->stacks.psp_lo; + regs->psp_hi = limit_regs->stacks.psp_hi; + regs->crs = limit_regs->crs; + } else { + e2k_mem_crs_t *frame; + unsigned int pcshtp; + e2k_pshtp_t pshtp; + + ATOMIC_READ_PC_STACK_REGS(AW(regs->pcsp_lo), + AW(regs->pcsp_hi), pcshtp); + AS(regs->pcsp_hi).ind += PCSHTP_SIGN_EXTEND(pcshtp); + ATOMIC_READ_P_STACK_REGS(AW(regs->psp_lo), + AW(regs->psp_hi), AW(pshtp)); + AS(regs->psp_hi).ind += GET_PSHTP_MEM_INDEX(pshtp); + + frame = (e2k_mem_crs_t *) (AS(regs->pcsp_lo).base + + AS(regs->pcsp_hi).ind); + regs->crs = *(frame - 1); + AS(regs->pcsp_hi).ind -= SZ_OF_CR; + AS(regs->psp_hi).ind -= + AS(NATIVE_NV_READ_CR1_LO_REG()).wbs * EXT_4_NR_SZ; + } + +#ifdef CONFIG_GREGS_CONTEXT + get_all_user_glob_regs(®s->gregs); + regs->gregs_valid = 1; +#endif + + copy_trap_stack_regs(limit_regs, regs); + + copy_k_data_stack_regs(limit_regs, regs); + + copy_proc_stack_regs(limit_regs, regs); + + if (!copy_chain_stack_regs(limit_regs, regs)) { + regs->task = current; + regs->valid = 1; + } + + raw_all_irq_restore(flags); + + return; + } + +#ifdef CONFIG_SMP + while (task_curr(task)) { + /* get regs using NMI */ + if (-ESRCH == get_cpu_regs_nmi(task_cpu(task), task, regs)) + continue; + if (regs->valid) + return; + + /* Still no luck, fall back to sw_regs */ + pr_alert(" * * * * * * * * * ATTENTION * * * * * * * * *\n" + "Could not get %s[%d] stack using NMI,\n" + "used sw_regs instead. The stack is unreliable!\n" + " * * * * * * * * * * * * * * * * * * * * * * *\n", + task->comm, task->pid); + + break; + } +#endif + + sw_regs = &task->thread.sw_regs; + + regs->crs = sw_regs->crs; + regs->pcsp_lo = sw_regs->pcsp_lo; + regs->pcsp_hi = sw_regs->pcsp_hi; + regs->psp_lo = sw_regs->psp_lo; + regs->psp_hi = sw_regs->psp_hi; +#ifdef CONFIG_DATA_STACK_WINDOW + regs->base_k_data_stack = NULL; +#endif + + /* + * We are here. This means that NMI failed and we will be + * printing stack using sw_regs. Copy another task's + * registers accessing them directly at physical address. + */ + + /* + * Copy a part (or all) of the chain stack. + * If it fails then leave regs->valid set to 0. + */ + regs->base_chain_stack = (void *) regs->chain_stack_cache; + if (!regs->base_chain_stack) + goto out; + + regs->size_chain_stack = min(AS(regs->pcsp_hi).ind, + (u64) SIZE_CHAIN_STACK); + + sz = regs->size_chain_stack; + dst = regs->base_chain_stack; + src = (void *) (AS(regs->pcsp_lo).base + AS(regs->pcsp_hi).ind - sz); + if (unlikely(((long) dst & 0x7) || ((long) src & 0x7) || + ((long) sz & 0x7) || (u64) src < PAGE_OFFSET)) { + pr_alert("Bad chain registers: src %lx, dst %lx, sz %llx\n", + src, dst, sz); + goto out; + } + + regs->orig_base_chain_stack_k = (u64) src; + regs->orig_base_chain_stack_u = 0; + + /* Do the copy */ + if (careful_tagged_copy(dst, src, sz)) { + pr_alert("WARNING chain stack not available at %px\n", src); + goto out; + } + + /* Copy a part (or all) of the procedure stack. + * Do _not_ set regs->valid to 0 if it fails + * (we can still print stack albeit without register windows) */ + regs->base_psp_stack = (void *) regs->psp_stack_cache; + if (!regs->base_psp_stack) + goto finish_copying_psp_stack; + + regs->size_psp_stack = min(AS(regs->psp_hi).ind, (u64) SIZE_PSP_STACK); + + sz = regs->size_psp_stack; + dst = regs->base_psp_stack; + src = (void *) (AS(regs->psp_lo).base + AS(regs->psp_hi).ind - sz); + if (unlikely(((long) dst & 0x7) || ((long) src & 0x7) || + ((long) sz & 0x7) || (u64) src < PAGE_OFFSET)) { + pr_alert("Bad psp registers: src %lx, dst %lx, sz %llx\n", + src, dst, sz); + /* We can still print chain stack */ + regs->base_psp_stack = NULL; + goto finish_copying_psp_stack; + } + + regs->orig_base_psp_stack_k = (u64) src; + regs->orig_base_psp_stack_u = 0; + + if (careful_tagged_copy(dst, src, sz)) { + pr_alert("WARNING procedure stack not available at %px\n", src); + /* We can still print chain stack */ + regs->base_psp_stack = NULL; + goto finish_copying_psp_stack; + } +finish_copying_psp_stack: + + regs->task = task; + regs->valid = 1; + +out: + return; +} + + +#ifdef CONFIG_SMP +struct nmi_copy_stack_args { + struct task_struct *task; + struct stack_regs *regs; + int ret; +}; + +static void nmi_copy_current_stack_regs(void *arg) +{ + struct nmi_copy_stack_args *args = arg; + + if (args->task && args->task != current) { + /* + * Race: needed task is no longer running + */ + args->ret = -ESRCH; + return; + } + + copy_stack_regs(current, find_host_regs(current_thread_info()->pt_regs), args->regs); +} + +static int get_cpu_regs_nmi(int cpu, struct task_struct *task, + struct stack_regs *const regs) +{ + struct nmi_copy_stack_args args; + int attempt; + + /* get regs using NMI, try several times + * waiting for a total of 30 seconds. */ + regs->valid = 0; + + /* Paravirt guest does not use nmi IPI to dump stacks */ + if (paravirt_enabled() && !IS_HV_GM()) + return 0; + + args.task = task; + args.regs = regs; + args.ret = 0; + for (attempt = 0; attempt < 3; attempt++) { + nmi_call_function_single(cpu, nmi_copy_current_stack_regs, + &args, 1, 10000); + if (args.ret) + return args.ret; + + if (regs->valid) { + if (task && regs->task != task) + return -ESRCH; + + break; + } + } + + return 0; +} +#endif /* CONFIG_SMP */ + + +#ifdef CONFIG_CLI_CHECK_TIME +cli_info_t cli_info[2]; +tt0_info_t tt0_info[2]; +int cli_info_needed = 0; + +void +start_cli_info(void) +{ + pr_info("start_cli_info: clock %ld\n", READ_CLKR_REG()); + memset(cli_info, 0, sizeof(cli_info)); + cli_info_needed = 1; +} + +void tt0_prolog_ticks(long ticks) +{ + if (cli_info_needed && Max_tt0_prolog < ticks) { + Max_tt0_prolog = ticks; + } +} + +void +print_cli_info(void) +{ + + printk("print_cli_info: for CPU 0\n"); + printk("Max_tt0_prolog %ld\n", tt0_info[0].max_tt0_prolog); + printk("max_cli %ld max_cli_ip 0x%lx max_cli_cl %ld end_cl %ld\n", + cli_info[0].max_cli, + cli_info[0].max_cli_ip, + cli_info[0].max_cli_cl, + cli_info[0].max_cli_cl + cli_info[0].max_cli); + + printk("max_gcli %ld max_gcli_ip 0x%lx max_gcli_cl %ld\n", + cli_info[0].max_gcli, + cli_info[0].max_gcli_ip, + cli_info[0].max_gcli_cl); + printk("\n"); + if (num_online_cpus() == 1) return; + + printk("print_cli_info: for CPU 1\n"); + printk("Max_tt0_prolog %ld\n", tt0_info[1].max_tt0_prolog); + printk("max_cli %ld max_cli_ip 0x%lx max_cli_cl %ld end_cl %ld\n", + cli_info[1].max_cli, + cli_info[1].max_cli_ip, + cli_info[1].max_cli_cl, + cli_info[1].max_cli_cl + cli_info[1].max_cli); + + printk("max_gcli %ld max_gcli_ip 0x%lx max_gcli_cl %ld\n", + cli_info[1].max_gcli, + cli_info[1].max_gcli_ip, + cli_info[1].max_gcli_cl); +} +#else // CONFIG_CLI_CHECK_TIME +void +print_cli_info(void) {} +#endif + +void print_mmap(struct task_struct *task) +{ + char path[256]; + struct mm_struct *mm = task->mm; + struct vm_area_struct *vma; + struct file *vm_file; + int locked; + long all_sz = 0; + + if (!mm) { + pr_alert(" There aren't mmap areas for pid %d \n", task->pid); + return; + } + + /* + * This function is used when everything goes south + * so do not try too hard to lock mmap_sem + */ + locked = down_read_trylock(&mm->mmap_sem); + + pr_alert("============ MMAP AREAS for pid %d =============\n", task->pid); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + vm_file = vma->vm_file; + pr_cont("ADDR 0x%-10lx END 0x%-10lx ", + vma->vm_start, vma->vm_end); + all_sz += vma->vm_end - vma->vm_start; + if (vma->vm_flags & VM_WRITE) + pr_cont(" WR "); + if (vma->vm_flags & VM_READ) + pr_cont(" RD "); + if (vma->vm_flags & VM_EXEC) + pr_cont(" EX "); + pr_cont(" PROT 0x%lx FLAGS 0x%lx", + pgprot_val(vma->vm_page_prot), vma->vm_flags); + if (vm_file) { + struct seq_buf s; + + seq_buf_init(&s, path, sizeof(path)); + seq_buf_path(&s, &vm_file->f_path, "\n"); + if (seq_buf_used(&s) < sizeof(path)) + path[seq_buf_used(&s)] = 0; + else + path[sizeof(path) - 1] = 0; + + pr_cont(" %s\n", path); + } else { + pr_cont("\n"); + } + } + printk("============ END OF MMAP AREAS all_sz %ld ======\n", all_sz); + + if (locked) + up_read(&mm->mmap_sem); +} + +/* + * print_reg_window - print local registers from psp stack + * @window_base - pointer to the window in psp stack + * @window_size - size of the window in psp stack (in quadro registers) + * @fx - do print extensions? + */ +static void print_reg_window(u64 window_base, int window_size, + int fx, e2k_cr1_hi_t cr1_hi) +{ + int qreg, dreg, dreg_ind; + u64 *rw = (u64 *)window_base; + u64 qreg_lo, qreg_hi, ext_lo, ext_hi; + u8 tag_lo, tag_hi, tag_ext_lo, tag_ext_hi; + char brX0_name[6], brX1_name[6]; + u64 rbs, rsz, rcur; + + rbs = AS(cr1_hi).rbs; + rsz = AS(cr1_hi).rsz; + rcur = AS(cr1_hi).rcur; + + for (qreg = window_size - 1; qreg >= 0; qreg --) { + dreg_ind = qreg * (EXT_4_NR_SZ / sizeof (*rw)); + + load_value_and_tagd(&rw[dreg_ind + 0], &qreg_lo, &tag_lo); + if (machine.native_iset_ver < E2K_ISET_V5) { + load_value_and_tagd(&rw[dreg_ind + 1], + &qreg_hi, &tag_hi); + if (fx) { + ext_lo = rw[dreg_ind + 2]; + ext_hi = rw[dreg_ind + 3]; + } + } else { + load_value_and_tagd(&rw[dreg_ind + 2], + &qreg_hi, &tag_hi); + if (fx) { + load_value_and_tagd(&rw[dreg_ind + 1], + &ext_lo, &tag_ext_lo); + load_value_and_tagd(&rw[dreg_ind + 3], + &ext_hi, &tag_ext_hi); + } + } + + dreg = qreg * 2; + + /* Calculate %br[] register number */ + if (qreg >= rbs && qreg <= (rbs + rsz) && rsz >= rcur) { + int qbr, brX0, brX1; + + qbr = (qreg - rbs) + ((rsz + 1) - rcur); + + while (qbr > rsz) + qbr -= rsz + 1; + + brX0 = 2 * qbr; + brX1 = 2 * qbr + 1; + + snprintf(brX0_name, 7, "%sb%d/", (brX0 < 10) ? " " : + ((brX0 < 100) ? " " : ""), brX0); + snprintf(brX1_name, 7, "%sb%d/", (brX0 < 10) ? " " : + ((brX0 < 100) ? " " : ""), brX1); + } else { + memset(brX0_name, ' ', 5); + memset(brX1_name, ' ', 5); + brX0_name[5] = 0; + brX1_name[5] = 0; + } + + if (fx) { + if (machine.native_iset_ver < E2K_ISET_V5) { + pr_alert(" %sr%-3d: %hhx 0x%016llx %04hx %sr%-3d: %hhx 0x%016llx %04hx\n", + brX0_name, dreg, tag_lo, qreg_lo, + (u16) ext_lo, brX1_name, dreg + 1, + tag_hi, qreg_hi, (u16) ext_hi); + } else { + pr_alert(" %sr%-3d: %hhx 0x%016llx ext: %hhx %016llx\n", + brX1_name, dreg + 1, tag_hi, qreg_hi, + tag_ext_hi, ext_hi); + pr_alert(" %sr%-3d: %hhx 0x%016llx ext: %hhx %016llx\n", + brX0_name, dreg, tag_lo, qreg_lo, + tag_ext_lo, ext_lo); + } + } else { + pr_alert(" %sr%-3d: %hhx 0x%016llx %sr%-3d: %hhx 0x%016llx\n", + brX0_name, dreg, tag_lo, qreg_lo, + brX1_name, dreg + 1, tag_hi, qreg_hi); + } + } +} + +static inline void print_predicates(e2k_cr0_lo_t cr0_lo, e2k_cr1_hi_t cr1_hi) +{ + u64 pf = AS(cr0_lo).pf; + u64 i, values = 0, tags = 0; + + for (i = 0; i < 32; i++) { + values |= (pf & (1ULL << 2 * i)) >> i; + tags |= (pf & (1ULL << (2 * i + 1))) >> (i + 1); + } + pr_info(" predicates[31:0] %08x ptags[31:0] %08x " + "psz %d pcur %d\n", + (u32) values, (u32) tags, + cr1_hi.CR1_hi_psz, cr1_hi.CR1_hi_pcur); +} + +u64 print_all_TIRs(const e2k_tir_t *TIRs, u64 nr_TIRs) +{ + e2k_tir_hi_t tir_hi; + e2k_tir_lo_t tir_lo; + u64 all_interrupts = 0; + int i; + + printk("TIR all registers:\n"); + for (i = nr_TIRs; i >= 0; i --) { + tir_hi = TIRs[i].TIR_hi; + tir_lo = TIRs[i].TIR_lo; + + all_interrupts |= AW(tir_hi); + + pr_alert("TIR.hi[%d]: 0x%016llx : exc 0x%011llx al 0x%x aa 0x%x #%d\n", + i, AW(tir_hi), tir_hi.exc, tir_hi.al, tir_hi.aa, tir_hi.j); + + if (tir_hi.exc) { + u64 exc = tir_hi.exc; + int nr_intrpt; + + pr_alert(" "); + for (nr_intrpt = __ffs64(exc); exc != 0; + exc &= ~(1UL << nr_intrpt), + nr_intrpt = __ffs64(exc)) + pr_cont(" %s", exc_tbl_name[nr_intrpt]); + pr_cont("\n"); + } + + pr_alert("TIR.lo[%d]: 0x%016llx : IP 0x%012llx\n", + i, tir_lo.TIR_lo_reg, tir_lo.TIR_lo_ip); + } + + return all_interrupts & (exc_all_mask | aau_exc_mask); +} + +void print_tc_record(const trap_cellar_t *tcellar, int num) +{ + tc_fault_type_t ftype; + tc_dst_t dst ; + tc_opcode_t opcode; + u64 data; + u8 data_tag; + + AW(dst) = AS(tcellar->condition).dst; + AW(opcode) = AS(tcellar->condition).opcode; + AW(ftype) = AS(tcellar->condition).fault_type; + + load_value_and_tagd(&tcellar->data, &data, &data_tag); + /* FIXME: data has tag, but E2K_LOAD_TAGGED_DWORD() is privileged */ + /* action? guest will be trapped */ + if (!paravirt_enabled()) + load_value_and_tagd(&tcellar->data, &data, &data_tag); + else { + data = tcellar->data; + data_tag = 0; + } + printk(" record #%d: address 0x%016llx data 0x%016llx tag 0x%x\n" + " condition 0x%016llx:\n" + " dst 0x%05x: address 0x%04x, vl %d, vr %d\n" + " opcode 0x%03x: fmt 0x%02x, npsp 0x%x\n" + " store 0x%x, s_f 0x%x, mas 0x%x\n" + " root 0x%x, scal 0x%x, sru 0x%x\n" + " chan 0x%x, se 0x%x, pm 0x%x\n" + " fault_type 0x%x:\n" + " intl_res_bits = %d MLT_trap = %d\n" + " ph_pr_page = %d page_bound = %d\n" + " io_page = %d isys_page = %d\n" + " prot_page = %d priv_page = %d\n" + " illegal_page = %d nwrite_page = %d\n" + " page_miss = %d ph_bound = %d\n" + " global_sp = %d\n" + " miss_lvl 0x%x, num_align 0x%x, empt 0x%x\n" + " clw 0x%x, rcv 0x%x dst_rcv 0x%x\n", + num, + (u64) tcellar->address, data, data_tag, + (u64) AW(tcellar->condition), + (u32)AW(dst), (u32)(AS(dst).address), (u32)(AS(dst).vl), + (u32)(AS(dst).vr), + (u32)AW(opcode), (u32)(AS(opcode).fmt),(u32)(AS(opcode).npsp), + (u32)AS(tcellar->condition).store, + (u32)AS(tcellar->condition).s_f, + (u32)AS(tcellar->condition).mas, + (u32)AS(tcellar->condition).root, + (u32)AS(tcellar->condition).scal, + (u32)AS(tcellar->condition).sru, + (u32)AS(tcellar->condition).chan, + (u32)AS(tcellar->condition).spec, + (u32)AS(tcellar->condition).pm, + (u32)AS(tcellar->condition).fault_type, + (u32)AS(ftype).intl_res_bits, (u32)(AS(ftype).exc_mem_lock), + (u32)AS(ftype).ph_pr_page, (u32)AS(ftype).page_bound, + (u32)AS(ftype).io_page, (u32)AS(ftype).isys_page, + (u32)AS(ftype).prot_page, (u32)AS(ftype).priv_page, + (u32)AS(ftype).illegal_page, (u32)AS(ftype).nwrite_page, + (u32)AS(ftype).page_miss, (u32)AS(ftype).ph_bound, + (u32)AS(ftype).global_sp, + (u32)AS(tcellar->condition).miss_lvl, + (u32)AS(tcellar->condition).num_align, + (u32)AS(tcellar->condition).empt, + (u32)AS(tcellar->condition).clw, + (u32)AS(tcellar->condition).rcv, + (u32)AS(tcellar->condition).dst_rcv); +} + +void print_all_TC(const trap_cellar_t *TC, int TC_count) +{ + int i; + + if (!TC_count) + return; + + printk("TRAP CELLAR all %d records:\n", TC_count / 3); + for (i = 0; i < TC_count / 3; i++) + print_tc_record(&TC[i], i); +} + +/* + * Print pt_regs + */ +void print_pt_regs(const pt_regs_t *regs) +{ + const e2k_mem_crs_t *crs = ®s->crs; + const e2k_upsr_t upsr = current_thread_info()->upsr; + + if (!regs) + return; + + pr_info(" PT_REGS value:\n"); + + pr_info("usd: base 0x%llx, size 0x%x, p %d, sbr: 0x%lx\n", regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, regs->stacks.usd_lo.USD_lo_p, regs->stacks.top); + + pr_info("psp: base %llx, ind %x, size %x PSHTP ind 0x%llx\n", + AS(regs->stacks.psp_lo).base, + AS(regs->stacks.psp_hi).ind, AS(regs->stacks.psp_hi).size, + GET_PSHTP_MEM_INDEX(regs->stacks.pshtp)); + pr_info("pcsp: base %llx, ind %x, size %x PCSHTP ind 0x%llx\n", + AS(regs->stacks.pcsp_lo).base, + AS(regs->stacks.pcsp_hi).ind, AS(regs->stacks.pcsp_hi).size, + PCSHTP_SIGN_EXTEND(regs->stacks.pcshtp)); + + pr_info("cr0.lo: pf 0x%llx, cr0.hi: ip 0x%llx\n", + AS(crs->cr0_lo).pf, AS(crs->cr0_hi).ip << 3); + pr_info("cr1.lo: unmie %d, nmie %d, uie %d, lw %d, sge %d, ie %d, pm %d\n" + " cuir 0x%x, wbs 0x%x, wpsz 0x%x, wfx %d, ss %d, ein %d\n", + AS(crs->cr1_lo).unmie, AS(crs->cr1_lo).nmie, AS(crs->cr1_lo).uie, + AS(crs->cr1_lo).lw, AS(crs->cr1_lo).sge, AS(crs->cr1_lo).ie, + AS(crs->cr1_lo).pm, AS(crs->cr1_lo).cuir, AS(crs->cr1_lo).wbs, + AS(crs->cr1_lo).wpsz, AS(crs->cr1_lo).wfx, AS(crs->cr1_lo).ss, + AS(crs->cr1_lo).ein); + pr_info("cr1.hi: ussz 0x%x, wdbl %d\n" + " rbs 0x%x, rsz 0x%x, rcur 0x%x, psz 0x%x, pcur 0x%x\n", + AS(crs->cr1_hi).ussz, AS(crs->cr1_hi).wdbl, AS(crs->cr1_hi).rbs, + AS(crs->cr1_hi).rsz, AS(crs->cr1_hi).rcur, AS(crs->cr1_hi).psz, + AS(crs->cr1_hi).pcur); + pr_info("WD: base 0x%x, size 0x%x, psize 0x%x, fx %d, dbl %d\n", + regs->wd.base, regs->wd.size, regs->wd.psize, regs->wd.fx, regs->wd.dbl); + if (from_syscall(regs)) { + pr_info("regs->kernel_entry: %d, syscall #%d\n", + regs->kernel_entry, regs->sys_num); + } else { + const struct trap_pt_regs *trap = regs->trap; + u64 exceptions; + + pr_info("ctpr1: base 0x%llx, tag 0x%x, opc 0x%x, ipd 0x%x\n", + AS(regs->ctpr1).ta_base, AS(regs->ctpr1).ta_tag, + AS(regs->ctpr1).opc, AS(regs->ctpr1).ipd); + pr_info("ctpr2: base 0x%llx, tag 0x%x, opcode 0x%x, prefetch 0x%x\n", + AS(regs->ctpr2).ta_base, AS(regs->ctpr2).ta_tag, + AS(regs->ctpr2).opc, AS(regs->ctpr2).ipd); + pr_info("ctpr3: base 0x%llx, tag 0x%x, opcode 0x%x, prefetch 0x%x\n", + AS(regs->ctpr3).ta_base, AS(regs->ctpr3).ta_tag, + AS(regs->ctpr3).opc, AS(regs->ctpr3).ipd); + pr_info("regs->trap: 0x%px, AAU context at 0x%px\n", + regs->trap, regs->aau_context); + + exceptions = print_all_TIRs(trap->TIRs, trap->nr_TIRs); + print_all_TC(trap->tcellar, trap->tc_count); + if (exceptions & exc_data_debug_mask) { + pr_info("ddbcr 0x%llx, ddmcr 0x%llx, ddbsr 0x%llx\n", + READ_DDBCR_REG_VALUE(), READ_DDMCR_REG_VALUE(), + READ_DDBSR_REG_VALUE()); + pr_info("ddbar0 0x%llx, ddbar1 0x%llx, ddbar2 0x%llx, ddbar3 0x%llx\n", + READ_DDBAR0_REG_VALUE(), READ_DDBAR1_REG_VALUE(), + READ_DDBAR2_REG_VALUE(), READ_DDBAR3_REG_VALUE()); + pr_info("ddmar0 0x%llx, ddmar1 0x%llx\n", + READ_DDMAR0_REG_VALUE(), READ_DDMAR1_REG_VALUE()); + } + if (exceptions & exc_instr_debug_mask) { + pr_info("dibcr 0x%x, dimcr 0x%llx, dibsr 0x%x\n", + READ_DIBCR_REG_VALUE(), READ_DIMCR_REG_VALUE(), + READ_DIBSR_REG_VALUE()); + pr_info("dibar0 0x%llx, dibar1 0x%llx, dibar2 0x%llx, dibar3 0x%llx\n", + READ_DIBAR0_REG_VALUE(), READ_DIBAR1_REG_VALUE(), + READ_DIBAR2_REG_VALUE(), READ_DIBAR3_REG_VALUE()); + pr_info("dimar0 0x%llx, dimar1 0x%llx\n", + READ_DIMAR0_REG_VALUE(), READ_DIMAR1_REG_VALUE()); + } + } + pr_info("UPSR: 0x%x : fe %d\n", + upsr.UPSR_reg, upsr.UPSR_fe); +} + + +#ifdef CONFIG_PROC_FS +int +print_statm(task_pages_info_t *tmm, pid_t pid) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = get_task_mm(tsk); + task_pages_info_t umm; + + if (!pid || (pid == current->pid)) + goto get_mm; + + do { + tsk = next_task(tsk); + if (tsk->pid == pid) { + mm = get_task_mm(tsk); + break; + } + } while(tsk != current); + + if (tsk == current) + return -1; + +get_mm: + if (tsk->mm) { + umm.size = task_statm(mm, &umm.shared, &umm.text, &umm.data, + &umm.resident); + if (!copy_to_user((void *)tmm, (void *)&umm, + sizeof(task_pages_info_t))) + return 0; + } + + return -1; +} +#endif + +void +print_pids(void) +{ + struct task_struct *g = NULL, *p = NULL; + + do_each_thread(g, p) { + if (!p) { + pr_info("print_pids: task pointer == NULL\n"); + } else { + pr_info("print_pids: pid %d state 0x%lx policy %d name %s\n", + p->pid, p->state, p->policy, p->comm); + } + } while_each_thread(g, p); +} + +void notrace arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self) +{ +#ifdef CONFIG_SMP + struct stack_regs *stack_regs; + int cpu, this_cpu; +#endif + unsigned long flags; + + /* stack_regs_cache[] is protected by IRQ-disable + * (we assume that NMI handlers will not call print_stack() and + * do not disable NMIs here as they are used by copy_stack_regs()) */ + raw_local_irq_save(flags); + + if (!exclude_self) + print_stack_frames(current, NULL, 0); + +#ifdef CONFIG_SMP + this_cpu = raw_smp_processor_id(); + stack_regs = &stack_regs_cache[this_cpu]; + + for_each_cpu(cpu, mask) { + if (cpu == this_cpu) + continue; + + stack_regs->show_trap_regs = debug_trap; + stack_regs->show_user_regs = debug_userstack; +# ifdef CONFIG_DATA_STACK_WINDOW + stack_regs->show_k_data_stack = debug_datastack; +# endif + get_cpu_regs_nmi(cpu, NULL, stack_regs); + if (stack_regs->valid == 0) { + pr_alert("WARNING could not get stack from CPU #%d, stack will not be printed\n", + cpu); + continue; + } + + pr_alert("NMI backtrace for cpu %d\n", cpu); + print_chain_stack(stack_regs, 0); + } +#endif + raw_local_irq_restore(flags); +} + +void +print_all_mmap(void) +{ + struct task_struct *g = NULL, *p = NULL; + + read_lock(&tasklist_lock); + do_each_thread(g, p) { + print_mmap(p); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +notrace +static int get_chain_frame(e2k_mem_crs_t *dst, e2k_mem_crs_t *src, + int flags, struct task_struct *p) +{ + if (p != current || (flags & PCS_USER)) { + unsigned long ts_flag; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + /* + * Can be under closed interrupts here. If page fault + * happens we just bail out, otherwise we would have + * to SPILL chain stack again. + */ + __TRY_USR_PFAULT { + *dst = *src; + } CATCH_USR_PFAULT { + clear_ts_flag(ts_flag); + return -EFAULT; + } END_USR_PFAULT + clear_ts_flag(ts_flag); + + return 0; + } + + if (WARN_ON_ONCE(!(flags & PCS_USER) && (unsigned long) src < TASK_SIZE)) + return -EFAULT; + + *dst = *src; + + return 0; +} + +notrace +static int ____parse_chain_stack(int flags, struct task_struct *p, + parse_chain_fn_t func, void *arg, unsigned long delta_user, + unsigned long top, unsigned long bottom, + bool *interrupts_enabled, unsigned long *irq_flags) +{ + int ret = 0, func_flags = 0; + e2k_mem_crs_t *frame; + unsigned long chain_stack_bytes_fixed = 0; + + if (flags & PCS_OPEN_IRQS) + func_flags |= PCF_IRQS_CLOSE_NEEDED; + if (*interrupts_enabled == false) { + func_flags |= PCF_FLUSH_NEEDED; + if (flags & PCS_OPEN_IRQS) { + raw_all_irq_restore(*irq_flags); + *interrupts_enabled = true; + } + } + + for (frame = ((e2k_mem_crs_t *) top) - 1; + (unsigned long) frame >= bottom; frame--) { + e2k_mem_crs_t copied_frame; + + ret = get_chain_frame(&copied_frame, frame, flags, p); + if (unlikely(ret)) + break; + + ret = func(&copied_frame, (unsigned long) frame, + (unsigned long) frame + delta_user, + func_flags, arg); + if (ret) + break; + + if (p == current) { + chain_stack_bytes_fixed += SZ_OF_CR; + /* Use '>' instead of '>=' because in some cases + * we need to write previous frame (for example + * when setting last wish exception). */ + if (chain_stack_bytes_fixed > E2K_MAXCR * 16) { + if (!(*interrupts_enabled)) { + raw_all_irq_restore(*irq_flags); + *interrupts_enabled = true; + } + func_flags &= ~PCF_FLUSH_NEEDED; + } + } + } + + if (p == current && !(*interrupts_enabled)) { + raw_all_irq_restore(*irq_flags); + *interrupts_enabled = true; + } + + return ret; +} + +notrace noinline +static int __parse_chain_stack(int flags, struct task_struct *p, + parse_chain_fn_t func, void *arg) +{ + bool interrupts_enabled = true; + unsigned long irq_flags; + u64 pcs_base, pcs_ind; + u64 actual_base; + int ret; + + if (p == current) { + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + + raw_all_irq_save(irq_flags); + interrupts_enabled = false; + + NATIVE_FLUSHC; + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + pcs_base = AS(pcsp_lo).base; + pcs_ind = AS(pcsp_hi).ind; + + if (flags & PCS_USER) { + struct pt_regs *regs = current_pt_regs(); + unsigned long spilled_to_kernel, delta_user, user_size; + + if (WARN_ON(!regs)) + return -EINVAL; + + /* + * In this case part of the user's stack was + * spilled into kernel so go over it first, + * then check user's stack. + * + * First pass: part of user stack spilled to kernel. + */ + spilled_to_kernel = PCSHTP_SIGN_EXTEND( + regs->stacks.pcshtp); + if (spilled_to_kernel < 0) + spilled_to_kernel = 0; + + /* + * The last user frame is not accounted for in %pcshtp + */ + user_size = spilled_to_kernel + SZ_OF_CR; + + /* + * The very last frame does not have any useful information + */ + if (AS(regs->stacks.pcsp_hi).ind == spilled_to_kernel) { + user_size -= SZ_OF_CR; + pcs_base += SZ_OF_CR; + } + + delta_user = AS(regs->stacks.pcsp_lo).base + + AS(regs->stacks.pcsp_hi).ind - + spilled_to_kernel - + AS(current_thread_info()->k_pcsp_lo).base; + ret = ____parse_chain_stack(flags, p, func, arg, + delta_user, pcs_base + user_size, + pcs_base, &interrupts_enabled, &irq_flags); + if (ret) + return ret; + + /* + * Second pass: stack in user + */ + pcsp_lo = regs->stacks.pcsp_lo; + pcsp_hi = regs->stacks.pcsp_hi; + AS(pcsp_hi).ind -= spilled_to_kernel; + pcs_base = AS(pcsp_lo).base; + pcs_ind = AS(pcsp_hi).ind; + } else if (!(current->flags & PF_KTHREAD)) { + struct pt_regs *regs = current_pt_regs(); + unsigned long spilled_to_kernel; + + /* 'regs' can be zero when kernel thread + * tries to execve() user binary. */ + if (regs) { + spilled_to_kernel = PCSHTP_SIGN_EXTEND( + regs->stacks.pcshtp); + + if (WARN_ON_ONCE(spilled_to_kernel > pcs_ind)) + return -EINVAL; + + /* + * Skip part of user's stack + * that was spilled to kernel + */ + pcs_base += spilled_to_kernel; + pcs_ind -= spilled_to_kernel; + } + } + + actual_base = (pcs_base >= PAGE_OFFSET) ? pcs_base : + (u64) CURRENT_PCS_BASE(); + } else { + pcs_base = p->thread.sw_regs.pcsp_lo.PCSP_lo_base; + pcs_ind = p->thread.sw_regs.pcsp_hi.PCSP_hi_ind; + + actual_base = pcs_base; + } + + /* The very last frame does not have any useful information */ + actual_base += SZ_OF_CR; + return ____parse_chain_stack(flags, p, func, arg, 0, pcs_base + pcs_ind, + actual_base, &interrupts_enabled, &irq_flags); +} + +/** + * parse_chain_stack - parse chain stack backwards starting from the last frame + * @flags: set PCS_USER for user stack parsing and unset for kernel stack, + * set PCS_OPEN_IRQS if @func should be called with open interrupts + * @p: process to parse + * @func: function to call + * @arg: function argument + * + * See comment before parse_chain_fn_t for other arguments explanation. + * + * IMPORTANT: if @func wants to modify frame contents it must flush + * chain stack if "flush_needed" is set. + */ +notrace noinline long parse_chain_stack(int flags, struct task_struct *p, + parse_chain_fn_t func, void *arg) +{ + int ret; + + if (!p) + p = current; + + /* + * Too much hassle to support when no one needs this + */ + if (p != current && (flags & PCS_USER)) + return -ENOTSUPP; + + if (p != current) { + if (!try_get_task_stack(p)) + return -ESRCH; + } + + ret = __parse_chain_stack(flags, p, func, arg); + + if (p != current) + put_task_stack(p); + + return ret; +} + + +#ifdef CONFIG_USR_CONTROL_INTERRUPTS +static notrace int correct_psr_register(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + u64 maska = (u64) arg1; + u64 cr_ip = AS_WORD(frame->cr0_hi); + e2k_cr1_lo_t cr1_lo; + + if ((cr_ip < TASK_SIZE)) { + AS_WORD(cr1_lo) = AS_WORD(frame->cr1_lo); + AS_STRUCT(cr1_lo).psr = maska; + + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + if (put_cr1_lo(cr1_lo, real_frame_addr, 0)) + return -1; + } + return 0; +} +#endif /* CONFIG_USR_CONTROL_INTERRUPTS */ + +static int get_addr_name(u64 addr, char *buf, size_t len, + unsigned long *start_addr_p, struct mm_struct *mm) +{ + struct vm_area_struct *vma; + int ret = 0, locked; + + if (addr >= TASK_SIZE || !mm) + return -ENOENT; + + /* + * This function is used when everything goes south + * so do not try too hard to lock mmap_sem + */ + locked = down_read_trylock(&mm->mmap_sem); + + vma = find_vma(mm, addr); + if (!vma || vma->vm_start > addr || !vma->vm_file) { + ret = -ENOENT; + goto out_unlock; + } + + /* seq_buf_path() locks init_fs.seq which is normally + * locked with enabled interrupts, so we cannot reliably + * call it if we are in interrupt */ + if (!in_irq()) { + struct seq_buf s; + + seq_buf_init(&s, buf, len); + seq_buf_path(&s, &vma->vm_file->f_path, "\n"); + + if (seq_buf_used(&s) < len) + buf[seq_buf_used(&s)] = 0; + else + buf[len - 1] = 0; + } else { + buf[0] = 0; + } + + /* Assume that load_base == vm_start */ + if (start_addr_p) + *start_addr_p = vma->vm_start; + +out_unlock: + if (locked) + up_read(&mm->mmap_sem); + + return ret; +} + + +static DEFINE_RAW_SPINLOCK(print_stack_lock); + +/** + * print_stack_frames - print task's stack to console + * @task: which task's stack to print? + * @pt_regs: skip stack on top of this pt_regs structure + * @show_reg_window: print local registers? + */ +noinline void +print_stack_frames(struct task_struct *task, struct pt_regs *pt_regs, + int show_reg_window) +{ + unsigned long flags; + int cpu; + bool used; + struct stack_regs *stack_regs; + + /* if this is guest, stop tracing in host to avoid buffer overwrite */ + host_ftrace_stop(); + + if (!task) + task = current; + + if (test_and_set_bit(PRINT_FUNCY_STACK_WORKS_BIT, + &task->thread.flags)) { + pr_alert(" %d: print_stack: works already on pid %d\n", + current->pid, task->pid); + if (task != current) + return; + } + + /* + * stack_regs_cache[] is protected by IRQ-disable + * (we assume that NMI handlers will not call dump_stack() and + * do not disable NMIs here as they are used by copy_stack_regs()) + */ + raw_local_irq_save(flags); + + if (task == current) { + pr_alert("%s", linux_banner); + } + + cpu = raw_smp_processor_id(); + stack_regs = &stack_regs_cache[cpu]; + + used = xchg(&stack_regs->used, 1); + if (used) { + pr_alert(" %d: print stack: works already on cpu %d\n", + current->pid, cpu); + } else { + stack_regs->show_trap_regs = debug_trap; + stack_regs->show_user_regs = debug_userstack; +#ifdef CONFIG_DATA_STACK_WINDOW + stack_regs->show_k_data_stack = debug_datastack; +#endif + copy_stack_regs(task, pt_regs, stack_regs); + + /* All checks of stacks validity are + * performed in print_chain_stack() */ + + print_chain_stack(stack_regs, show_reg_window); + } + + /* if task is host of guest VM or VCPU, then print guest stacks */ + print_guest_stack(task, stack_regs, show_reg_window); + + stack_regs->used = 0; + + raw_local_irq_restore(flags); + + clear_bit(PRINT_FUNCY_STACK_WORKS_BIT, &task->thread.flags); +} + +static inline void print_funcy_ip(u64 addr, u64 cr_base, u64 cr_ind, + struct task_struct *task, u64 orig_base) +{ + unsigned long start_addr; + char buf[64]; + int traced = 0; + + if (addr < TASK_SIZE) { + if (!get_addr_name(addr, buf, sizeof(buf), + &start_addr, task->mm)) { + pr_alert(" 0x%-12llx %s (@0x%lx)\n", addr, + buf, start_addr); + } else { + pr_alert(" 0x%-12llx \n", addr); + } + + return; + } + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (task->ret_stack) { + int index; + for (index = 0; index <= task->curr_ret_stack; index++) + if (task->ret_stack[index].fp == orig_base + cr_ind) { + addr = task->ret_stack[index].ret; + traced = 1; + break; + } + } +#endif + + pr_alert(" 0x%-12llx %pF%s", addr, (void *) addr, + (traced) ? " (traced)" : ""); +} + +/* This function allocates memory necessary to print + * procedure stack registers from other CPUs */ +static int __init print_stack_init(void) +{ + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) + stack_regs_cache[cpu].chain_stack_cache = NULL; + + for_each_possible_cpu(cpu) { + stack_regs_cache[cpu].chain_stack_cache = + kmalloc(SIZE_CHAIN_STACK, GFP_KERNEL); + BUG_ON(stack_regs_cache[cpu].chain_stack_cache == NULL); + + if (cpu == 0) + continue; + + stack_regs_cache[cpu].psp_stack_cache = kmalloc(SIZE_PSP_STACK, + GFP_KERNEL); + if (stack_regs_cache[cpu].psp_stack_cache == NULL) { + printk("WARNING print_stack_init: no memory, printing " + "running tasks' register stacks from " + "CPU #%d will not be done\n", cpu); + continue; + } +#ifdef CONFIG_DATA_STACK_WINDOW + stack_regs_cache[cpu].k_data_stack_cache = kmalloc( + SIZE_DATA_STACK, GFP_KERNEL); + if (stack_regs_cache[cpu].k_data_stack_cache == NULL) { + printk("WARNING print_stack_init: no memory, printing " + "running tasks' kernel data stacks from" + " CPU #%d will not be done\n", cpu); + continue; + } +#endif + } + + return 0; +} +/* Initialize printing stacks from other CPUs before initializing those CPUs */ +early_initcall(print_stack_init); + +int print_window_regs = 0; +static int __init print_window_regs_setup(char *str) +{ + print_window_regs = 1; + return 1; +} +__setup("print_window_regs", print_window_regs_setup); + +int debug_protected_mode = 0; +static int __init init_debug_protected_mode(void) +{ + e2k_core_mode_t core_mode; + + if (machine.native_iset_ver < E2K_ISET_V3) + return 0; + core_mode.CORE_MODE_reg = machine.rrd(E2K_REG_CORE_MODE); + debug_protected_mode = core_mode.CORE_MODE_no_stack_prot; + DebugCM(" init debug_protected_mode =%d\n", debug_protected_mode); + return 1; +} +early_initcall(init_debug_protected_mode); + +static void nmi_set_no_stack_prot(void *arg) +{ + e2k_core_mode_t core_mode; + if (machine.native_iset_ver < E2K_ISET_V3) { + return; + } + core_mode.CORE_MODE_reg = READ_CORE_MODE_REG_VALUE(); + core_mode.CORE_MODE_no_stack_prot = !!arg; + WRITE_CORE_MODE_REG_VALUE(core_mode.CORE_MODE_reg); + + DebugCM(" debug_protected_mode =%d cpu=%d\n", debug_protected_mode, + raw_smp_processor_id()); +} + +void set_protected_mode_flags(void) +{ + on_each_cpu(nmi_set_no_stack_prot, + (void *) (long) debug_protected_mode, 1); +} + +/* bug 115090: always set %core_mode.no_stack_prot */ +static int initialize_no_stack_prot(void) +{ + debug_protected_mode = 1; + set_protected_mode_flags(); + + return 0; +} +arch_initcall(initialize_no_stack_prot); + +#ifdef CONFIG_DATA_STACK_WINDOW +static void print_k_data_stack(struct stack_regs *regs, int *pt_regs_num, + unsigned long base, u64 size) +{ + unsigned long delta = regs->real_k_data_stack_addr - + regs->base_k_data_stack; + bool pt_regs_valid = regs->pt_regs[*pt_regs_num].valid; + unsigned long pt_regs_addr = regs->pt_regs[*pt_regs_num].addr; + unsigned long addr; + bool show_pt_regs; + + if (!size) + return; + + if (pt_regs_valid && pt_regs_addr >= (unsigned long) base + delta && + pt_regs_addr < (unsigned long) base + delta + size) { + show_pt_regs = 1; + (*pt_regs_num)++; + } else { + show_pt_regs = 0; + } + + printk(" DATA STACK from %lx to %llx\n", base + delta, + base + delta + size); + for (addr = base; addr < base + size; addr += 16) { + u8 tag_lo, tag_hi; + u64 value_lo, value_hi; + bool is_pt_regs_addr = show_pt_regs + && (addr + delta) >= pt_regs_addr + && (addr + delta) < (pt_regs_addr + + sizeof(struct pt_regs)); + + load_qvalue_and_tagq(addr, &value_lo, &value_hi, + &tag_lo, &tag_hi); + printk(" %lx (%s+0x%-3lx): %x %016llx %x %016llx\n", + addr + delta, + (is_pt_regs_addr) ? "pt_regs" : "", + (is_pt_regs_addr) ? (addr + delta - pt_regs_addr) : + (addr - base), + tag_lo, value_lo, tag_hi, value_hi); + } +} +#endif + + +/* + * Must be called with disabled interrupts + */ +void print_chain_stack(struct stack_regs *regs, int show_reg_window) +{ + unsigned long flags; + bool disable_nmis; + struct task_struct *task = regs->task; + u32 attempt, locked = 0; + u64 new_chain_base = (u64) regs->base_chain_stack; + u64 orig_chain_base, orig_psp_base; + s64 cr_ind = regs->size_chain_stack; + s64 kernel_size_chain_stack = regs->size_chain_stack - + regs->user_size_chain_stack; + e2k_mem_crs_t crs = regs->crs; + u64 new_psp_base = (u64) regs->base_psp_stack; + s64 psp_ind = regs->size_psp_stack; + s64 kernel_size_psp_stack = regs->size_psp_stack - + regs->user_size_psp_stack; + stack_frame_t cur_frame; + bool ignore_ip = false; + int trap_num = 0; +#ifdef CONFIG_DATA_STACK_WINDOW + e2k_cr1_lo_t prev_cr1_lo; + e2k_cr1_hi_t prev_k_cr1_hi; + bool show_k_data_stack = !!regs->base_k_data_stack; + int pt_regs_num = 0; + void *base_k_data_stack = regs->base_k_data_stack; + u64 size_k_data_stack = regs->size_k_data_stack; +#endif + int last_user_windows = 2; + int i; + int timeout = is_prototype() ? 150000 : 30000; + + if (!regs->valid) { + pr_alert(" BUG print_chain_stack pid=%d valid=0\n", + (task) ? task->pid : -1); + return; + } + if (!regs->base_chain_stack) { + pr_alert(" BUG could not get task %s (%d) stack registers, " + "stack will not be printed\n", + task->comm, task->pid); + return; + } + + if (unlikely(!raw_irqs_disabled())) + pr_alert("WARNING: print_chain_stack called with enabled interrupts\n"); + + /* If task is current, disable NMIs so that interrupts handlers + * will not spill our stacks.*/ + disable_nmis = (task == current); + if (disable_nmis) + raw_all_irq_save(flags); + /* Try locking the spinlock (with 30 seconds timeout) */ + attempt = 0; + do { + if (raw_spin_trylock(&print_stack_lock)) { + locked = 1; + break; + } + + /* Wait for 0.001 second. */ + if (disable_nmis) + raw_all_irq_restore(flags); + udelay(1000); + if (disable_nmis) + raw_all_irq_save(flags); + } while (attempt++ < timeout); + if (disable_nmis) { + COPY_STACKS_TO_MEMORY(); + } + + debug_userstack |= (print_window_regs && debug_guest_regs(task)); + + if (!regs->ignore_banner) { + if (IS_KERNEL_THREAD(task, task->mm)) { + pr_info("Task %s (%d) is Kernel Thread\n", + task->comm, task->pid); + } else { + pr_info("Task %s (%d) is User Thread\n", + task->comm, task->pid); + } + + pr_alert("PROCESS: %s, PID: %d, %s: %d, state: %c %s " + "(0x%lx), flags: 0x%x\n", + task->comm == NULL ? "NULL" : task->comm, + task->pid, + get_cpu_type_name(), + task_cpu(task), task_state_to_char(task), +#ifdef CONFIG_SMP + task_curr(task) ? "oncpu" : "", +#else + "", +#endif + task->state, task->flags); + } + + if (!regs->base_psp_stack) { + pr_alert(" WARNING could not get task %s (%d) procedure stack " + "registers, register windows will not be printed\n", + task->comm, task->pid); + show_reg_window = 0; + } else { + show_reg_window = show_reg_window && (task == current || + print_window_regs || task_curr(task) || + debug_guest_regs(task)); + } + + /* Print header */ + if (show_reg_window) { + pr_alert(" PSP: base 0x%016llx ind 0x%08x size 0x%08x\n", + AS_STRUCT(regs->psp_lo).base, + AS_STRUCT(regs->psp_hi).ind, + AS_STRUCT(regs->psp_hi).size); + pr_alert(" PCSP: base 0x%016llx ind 0x%08x size 0x%08x\n", + AS_STRUCT(regs->pcsp_lo).base, + AS_STRUCT(regs->pcsp_hi).ind, + AS_STRUCT(regs->pcsp_hi).size); + pr_alert(" ---------------------------------------------------------------------\n" + " IP (hex) PROCEDURE/FILE(@ Library load address)\n" + " ---------------------------------------------------------------------\n"); + } + + for (;;) { + if (kernel_size_chain_stack > 0) { + orig_chain_base = regs->orig_base_chain_stack_k; + kernel_size_chain_stack -= SZ_OF_CR; + } else { + orig_chain_base = regs->orig_base_chain_stack_u; + } + print_funcy_ip(AS(crs.cr0_hi).ip << 3, new_chain_base, cr_ind, + task, orig_chain_base); + + if (show_reg_window) { + psp_ind -= AS(crs.cr1_lo).wbs * EXT_4_NR_SZ; + + if (regs->show_trap_regs && trap_num < MAX_USER_TRAPS && + regs->trap[trap_num].valid && + regs->trap[trap_num].frame == + orig_chain_base + cr_ind) { + if (machine.native_iset_ver >= E2K_ISET_V6) { + pr_alert(" ctpr1 %llx:%llx ctpr2 %llx:%llx ctpr3 %llx:%llx\n" + " lsr %llx ilcr %llx lsr1 %llx ilcr1 %llx\n", + AW(regs->trap[trap_num].ctpr1_hi), + AW(regs->trap[trap_num].ctpr1), + AW(regs->trap[trap_num].ctpr2_hi), + AW(regs->trap[trap_num].ctpr2), + AW(regs->trap[trap_num].ctpr3_hi), + AW(regs->trap[trap_num].ctpr3), + regs->trap[trap_num].lsr, + regs->trap[trap_num].ilcr, + regs->trap[trap_num].lsr1, + regs->trap[trap_num].ilcr1); + } else if (machine.native_iset_ver == E2K_ISET_V5) { + pr_alert(" ctpr1 %llx ctpr2 %llx ctpr3 %llx\n" + " lsr %llx ilcr %llx lsr1 %llx ilcr1 %llx\n", + AW(regs->trap[trap_num].ctpr1), + AW(regs->trap[trap_num].ctpr2), + AW(regs->trap[trap_num].ctpr3), + regs->trap[trap_num].lsr, + regs->trap[trap_num].ilcr, + regs->trap[trap_num].lsr1, + regs->trap[trap_num].ilcr1); + } else { + pr_alert(" ctpr1 %llx ctpr2 %llx ctpr3 %llx\n" + " lsr %llx ilcr %llx\n", + AW(regs->trap[trap_num].ctpr1), + AW(regs->trap[trap_num].ctpr2), + AW(regs->trap[trap_num].ctpr3), + regs->trap[trap_num].lsr, + regs->trap[trap_num].ilcr); + } + for (i = 0; i < SBBP_ENTRIES_NUM; i += 4) { + pr_alert(" sbbp%-2d 0x%-12llx 0x%-12llx 0x%-12llx 0x%-12llx\n", + i, regs->trap[trap_num].sbbp[i], + regs->trap[trap_num].sbbp[i + 1], + regs->trap[trap_num].sbbp[i + 2], + regs->trap[trap_num].sbbp[i + 3]); + } + ++trap_num; + } + cur_frame = get_task_stack_frame_type_IP(task, + crs.cr0_hi, crs.cr1_lo, ignore_ip); + if (cur_frame != user_frame_type || + regs->show_user_regs || last_user_windows) { + /* Show a couple of last user windows - usually + * there is something useful there */ + if ((cur_frame == user_frame_type) && + last_user_windows) + --last_user_windows; + + if (kernel_size_psp_stack > 0) { + orig_psp_base = regs->orig_base_psp_stack_k; + kernel_size_psp_stack -= AS(crs.cr1_lo).wbs * EXT_4_NR_SZ; + } else { + orig_psp_base = regs->orig_base_psp_stack_u; + } + + pr_alert(" PCSP: 0x%llx, PSP: 0x%llx/0x%x\n", + orig_chain_base + cr_ind, + orig_psp_base + psp_ind, + AS(crs.cr1_lo).wbs * EXT_4_NR_SZ); + + print_predicates(crs.cr0_lo, crs.cr1_hi); + + if (psp_ind < 0 && cr_ind > 0) { + pr_alert("! Invalid Register Window index (psp.ind) 0x%llx", + psp_ind); + } else if (psp_ind >= 0) { + print_reg_window(new_psp_base + psp_ind, + AS(crs.cr1_lo).wbs, + AS(crs.cr1_lo).wfx, crs.cr1_hi); + } + } + } +#ifdef CONFIG_DATA_STACK_WINDOW + if (show_k_data_stack && + call_from_kernel_mode(crs.cr0_hi, crs.cr1_lo)) { + u64 k_window_size; + s64 cur_chain_index; + + /* To find data stack window size we have to + * read cr1.hi from current *and* previous frames */ + cur_chain_index = cr_ind; + do { + cur_chain_index -= SZ_OF_CR; + if (cur_chain_index < 0) + /* This is a thread created with clone + * and we have reached the last kernel + * frame. */ + break; + + get_kernel_cr1_lo(&prev_cr1_lo, new_chain_base, + cur_chain_index); + } while (!AS(prev_cr1_lo).pm); + + if (cur_chain_index < 0) { + k_window_size = size_k_data_stack; + } else { + get_kernel_cr1_hi(&prev_k_cr1_hi, + new_chain_base, cur_chain_index); + + k_window_size = 16 * AS(prev_k_cr1_hi).ussz - + 16 * AS(crs.cr1_hi).ussz; + if (k_window_size > size_k_data_stack) { + /* The stack is suspiciously large */ + k_window_size = size_k_data_stack; + pr_alert(" This is the last frame or it was not copied fully\n" + "The stack is suspiciously large (0x%llx)\n", + k_window_size); + show_k_data_stack = 0; + } + } + print_k_data_stack(regs, &pt_regs_num, (unsigned long) + base_k_data_stack, k_window_size); + base_k_data_stack += k_window_size; + size_k_data_stack -= k_window_size; + if (!size_k_data_stack) + show_k_data_stack = 0; + } +#endif + + if (cr_ind < SZ_OF_CR) + break; + + cr_ind -= SZ_OF_CR; + + /* + * Last frame is bogus (from execve or clone), skip it. + * + * For kernel threads there is one more reserved frame + * (for start_thread()) + */ + if ((cr_ind == 0 || + cr_ind == SZ_OF_CR && (task->flags & PF_KTHREAD)) && + (task == current || + regs->size_chain_stack < SIZE_CHAIN_STACK)) + break; + + crs = *(e2k_mem_crs_t *) (new_chain_base + cr_ind); + } + + if (cr_ind < 0) + pr_alert("INVALID cr_ind SHOULD BE 0\n"); + +#ifdef CONFIG_GREGS_CONTEXT + if (show_reg_window && regs->show_user_regs && regs->gregs_valid) { + int i; + + pr_alert(" Global registers: bgr.cur = %d, bgr.val = 0x%x\n", + AS(regs->gregs.bgr).cur, AS(regs->gregs.bgr).val); + for (i = 0; i < 32; i += 2) { + u64 val_lo, val_hi; + u8 tag_lo, tag_hi; + + load_value_and_tagd(®s->gregs.g[i + 0].base, + &val_lo, &tag_lo); + load_value_and_tagd(®s->gregs.g[i + 1].base, + &val_hi, &tag_hi); + + if (machine.native_iset_ver < E2K_ISET_V5) { + pr_alert(" g%-3d: %hhx %016llx %04hx " + "g%-3d: %hhx %016llx %04hx\n", + i, tag_lo, val_lo, + (u16) regs->gregs.g[i].ext, + i + 1, tag_hi, val_hi, + (u16) regs->gregs.g[i+1].ext); + } else { + u64 ext_lo_val, ext_hi_val; + u8 ext_lo_tag, ext_hi_tag; + + load_value_and_tagd(®s->gregs.g[i + 0].ext, + &ext_lo_val, &ext_lo_tag); + load_value_and_tagd(®s->gregs.g[i + 1].ext, + &ext_hi_val, &ext_hi_tag); + + pr_alert(" g%-3d: %hhx %016llx ext: %hhx %016llx\n", + i, tag_lo, val_lo, + ext_lo_tag, ext_lo_val); + pr_alert(" g%-3d: %hhx %016llx ext: %hhx %016llx\n", + i + 1, tag_hi, val_hi, + ext_hi_tag, ext_hi_val); + } + } + } +#endif + + if (locked) + raw_spin_unlock(&print_stack_lock); + if (disable_nmis) + raw_all_irq_restore(flags); +} + +static int sim_panic(struct notifier_block *this, unsigned long ev, void *ptr) +{ + if (NATIVE_IS_MACHINE_SIM) { + kmsg_dump(KMSG_DUMP_PANIC); + bust_spinlocks(0); + debug_locks_off(); + console_flush_on_panic(CONSOLE_REPLAY_ALL); + E2K_LMS_HALT_ERROR(100); + } + return 0; +} + +static struct notifier_block sim_panic_block = { + .notifier_call = sim_panic, +}; + +static int __init sim_panic_init(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &sim_panic_block); + return 0; +} +early_initcall(sim_panic_init); + +static int +el_sys_mknod(char *dir, char *node) +{ + long rval; + mode_t mode; + int maj = 254; + int min = 254; + dev_t dev; + + mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); + rval = sys_mkdir(dir, mode); + printk("el_sys_mknod: sys_mkdir %s rval %ld\n", dir, rval); + dev = (maj << 8) | min; + rval = sys_mknod(node, mode | S_IFCHR, dev); + printk("el_sys_mknod: sys_mknod %s rval %ld dev 0x%ulx\n", + node, rval, dev); + return rval; +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + print_stack_frames(task, NULL, 1); +} + +void +set_tags(__e2k_u64_t* addr, long params) { + long size,i; + register __e2k_u64_t dword; + register __e2k_u32_t tag; + + size = params >> 12; // page size + if ((size < 0) || ((size % sizeof(__e2k_u64_t)) != 0)) { + printk("set_tags() Wrong parameter size in 'set_tags' - 0x%lx\n",size); + return; + } + dword = ((params & (0xff0)) >> 4) | ((params & 0xff0) << 4); + dword = dword | (dword << 16); + dword = dword | (dword << 32); + tag = (params & 0xf); + + /* After E2K_PUTTAGD must STRONGLY follow STORE_TAG asm + * to avoid compiler's problems */ + for(i = 0; i < size/sizeof(__e2k_u64_t); i++) { + NATIVE_STORE_VALUE_WITH_TAG(addr, dword, tag); + addr++; + } +} + +static long +check_tags(__e2k_u64_t* addr, long params) { + long size,i; + long res = 0; + u64 dword, dval; + u8 tag, tval; + + size = params >> 12; + if ((size < 0) || ((size % sizeof(__e2k_u64_t)) != 0)) { + printk("check_tags() Wrong parameter size in 'set_tags' - 0x%lx\n", + size); + return -1; + } + + dword = ((params & (0xff0)) >> 4) | ((params & 0xff0) << 4); + dword = dword | (dword << 16); + dword = dword | (dword << 32); + tag = (params & 0xf); + + for(i = 0; i < size/sizeof(__e2k_u64_t); i++) { + TRY_USR_PFAULT { + load_value_and_tagd(addr, &dval, &tval); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + if (dword != dval) { + printk("check_tags() DWORD 0x%llx differs from expected value 0x%llx, address 0x%px\n", dval, dword, addr); + return -1; + res = -1; + } + if (tag != tval) { + printk("check_tags() TAG 0x%d differs from expected value 0x%d, address 0x%px\n", tval, tag, addr); + return -1; + res = -1; + } + addr++; + } + + return res; +} + +extern e2k_addr_t print_kernel_address_ptes(e2k_addr_t address); +extern void print_vma_and_ptes(struct vm_area_struct *vma, e2k_addr_t address); + +long +get_addr_prot(long addr) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + long pgprot; + + tsk = current; + mm = tsk->mm; + down_read(&mm->mmap_sem); + vma = find_vma(mm, addr); + if (vma == NULL) { + pgprot = pmd_virt_offset(addr); + goto end; + } + pgprot = pgprot_val(vma->vm_page_prot); + +end: up_read(&mm->mmap_sem); + if (vma != NULL) + print_vma_and_ptes(vma, addr); + else + print_kernel_address_ptes(addr); + return pgprot; +} +static void +print_ide_info(int hwif_nr, int unit_nr, ide_drive_t *drive) +{ + + printk("hwif %d unit %d\n", hwif_nr, unit_nr); + printk("drive->name %s\n", drive->name); + printk("drive->media %d\n", drive->media); + printk("drive->using_dma %d\n", + (drive->dev_flags & IDE_DFLAG_USING_DMA) == 1); + +} +static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* FIXME : ide_hwifs deleted */ +static void +all_ide(int what) +{ + unsigned int i, unit; + ide_hwif_t *hwif; + ide_drive_t *drive; + + for (i = 0; i < MAX_HWIFS; ++i) { + hwif = &ide_hwifs[i]; + if (!hwif->present) continue; + for (unit = 0; unit < MAX_DRIVES; ++unit) { + drive = hwif->devices[unit]; + if (!(drive->dev_flags & IDE_DFLAG_PRESENT)) continue; + if (what == ALL_IDE) { + print_ide_info(i, unit, drive); + continue; + } + if (what == USING_DMA) { + if (drive->dev_flags & IDE_DFLAG_USING_DMA) { + printk("IDE %s WITH USING_DMA\n", + drive->name); + } else { + printk("IDE %s WITHOUT USING_DMA\n", + drive->name); + } + break; + } + } + } +} +long +ide_info(long what) +{ + switch(what) { + case ALL_IDE: + all_ide(ALL_IDE); + break; + case USING_DMA: + all_ide(USING_DMA); + break; + default: + printk("Unknowing ide_info\n"); + break; + } + return 0; + +} + +static long val_1; +static long val_2; +static caddr_t addr1; +static caddr_t addr2; +long +instr_exec(info_instr_exec_t *info) +{ + long la[4]; + long rval = -1; + + switch(info->instr_type) { + case PAR_WRITE: + + if (info->addr1 < 0 && info->addr2 < TASK_SIZE) { + addr1 = (void *)&la[0]; + addr2 = (void *)info->addr2; + val_1 = info->val_1; + val_2 = info->val_2; + printk("instr_exec:\n"); + E2K_PARALLEL_WRITE(addr1, val_1, addr2, val_2); + rval = la[0]; + } + break; + default: + printk("Unknowing instr_exec\n"); + break; + } + return rval; +} + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +static void +sys_e2k_print_syscall_times(scall_times_t *times) +{ + e2k_clock_t clock_time, func_time; + int syscall_num = times->syscall_num; + + printk(" System call # %d execution time info:\n", syscall_num); + clock_time = CALCULATE_CLOCK_TIME(times->start, times->end); + printk(" total execution time \t\t\t\t% 8ld\n", clock_time); + func_time = CALCULATE_CLOCK_TIME(times->scall_switch, + times->scall_done); + printk(" execution time without syscall function \t\t% 8ld\n", + clock_time - func_time ); + clock_time = CALCULATE_CLOCK_TIME(times->start, times->pt_regs_set); + printk(" pt_regs structure calculation \t\t\t% 8ld\n", clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->pt_regs_set, + times->save_stack_regs); + printk(" stacks registers saving \t\t\t\t% 8ld\n", clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->save_stack_regs, + times->save_sys_regs); + printk(" system registers saving \t\t\t\t% 8ld\n", clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->save_sys_regs, + times->save_stacks_state); + printk(" stacks state saving \t\t\t\t% 8ld\n", clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->save_stacks_state, + times->save_thread_state); + printk(" thread info state saving \t\t\t\t% 8ld\n", clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->save_thread_state, + times->scall_switch); + printk(" time after all savings and before sys " + "func. \t% 8ld\n", clock_time); + printk(" syscall function execution time \t\t\t% 8ld\n", + func_time); + if (syscall_num == __NR_exit || syscall_num == __NR_execve) + return; + clock_time = CALCULATE_CLOCK_TIME(times->scall_done, + times->restore_thread_state); + printk(" thread info state restoring \t\t\t% 8ld\n", + clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->restore_thread_state, + times->check_pt_regs); + printk(" pt_regs structure checking \t\t\t% 8ld\n", + clock_time); + if (times->signals_num != 0) { + clock_time = CALCULATE_CLOCK_TIME(times->check_pt_regs, + times->do_signal_start); + printk(" time between checking and start " + "signal handling \t% 8ld\n", + clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->do_signal_start, + times->do_signal_done); + printk(" signal handling time \t\t\t\t% 8ld\n", + clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->do_signal_done, + times->restore_start); + printk(" time between signal handling and " + "start restoring \t% 8ld\n", + clock_time); + } else { + clock_time = CALCULATE_CLOCK_TIME(times->check_pt_regs, + times->restore_start); + printk(" time after checking and before " + "start restoring \t% 8ld\n", + clock_time); + } + clock_time = CALCULATE_CLOCK_TIME(times->restore_start, + times->restore_user_regs); + printk(" time of user registers restoring \t\t% 8ld\n", + clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->restore_user_regs, + times->end); + printk(" time after restoring and before return \t\t% 8ld\n", + clock_time); + printk(" after sys call PSP.ind 0x%lx + PSHTP 0x%lx\n", + times->psp_ind, + GET_PSHTP_MEM_INDEX(times->pshtp)); + printk(" before done: PSP.ind 0x%lx + PSHTP 0x%lx\n", + times->psp_ind_to_done, + GET_PSHTP_MEM_INDEX(times->pshtp_to_done)); +} +static void +sys_e2k_print_trap_times(trap_times_t *times) +{ + e2k_clock_t clock_time; + int tir; + + printk(" Trap info start clock 0x%016lx end clock 0x%016lx\n", + times->start, times->end); + clock_time = CALCULATE_CLOCK_TIME(times->start, times->end); + printk(" total execution time \t\t\t\t% 8ld\n", clock_time); + clock_time = CALCULATE_CLOCK_TIME(times->start, times->pt_regs_set); + printk(" pt_regs structure calculation \t\t\t% 8ld\n", clock_time); + tir = times->nr_TIRs; + printk(" TIRs number %d\n", tir); + for ( ; tir >= 0; tir --) { + printk(" TIR[%02d].hi 0x%016lx .lo 0x%016lx\n", + tir, + times->TIRs[tir].TIR_hi.TIR_hi_reg, + times->TIRs[tir].TIR_lo.TIR_lo_reg); + } + printk(" Total handled trap number %d\n", times->trap_num); + printk(" Procedure stack bounds %s handled: PSP.ind 0x%lx " + "size 0x%lx (PSP.ind 0x%lx + PSHTP 0x%lx)\n", + (times->ps_bounds) ? "WAS" : "was NOT", + times->psp_hi.PSP_hi_ind, + times->psp_hi.PSP_hi_size, + times->psp_ind, + GET_PSHTP_MEM_INDEX(times->pshtp)); + printk(" Chain procedure stack bounds %s handled: PCSP.ind " + "0x%lx size 0x%lx\n", + (times->pcs_bounds) ? "WAS" : "was NOT", + times->pcsp_hi.PCSP_hi_ind, + times->pcsp_hi.PCSP_hi_size); + printk(" PSP to done ind 0x%lx size 0x%lx PSHTP 0x%lx\n", + times->psp_hi_to_done.PSP_hi_ind, + times->psp_hi_to_done.PSP_hi_size, + GET_PSHTP_MEM_INDEX(times->pshtp_to_done)); + printk(" PCSP to done ind 0x%lx size 0x%lx\n", + times->pcsp_hi_to_done.PCSP_hi_ind, + times->pcsp_hi_to_done.PCSP_hi_size); + printk(" CTPRs saved 1 : 0x%016lx 2 : 0x%016lx 3 : 0x%016lx\n", + times->ctpr1, times->ctpr2, times->ctpr3); + printk(" CTPRs to done 1 : 0x%016lx 2 : 0x%016lx 3 : 0x%016lx\n", + times->ctpr1_to_done, times->ctpr2_to_done, times->ctpr3_to_done); +} + +void +sys_e2k_print_kernel_times(struct task_struct *task, + kernel_times_t *times, long times_num, int times_index) +{ + kernel_times_t *cur_times; + times_type_t type; + int count; + int times_count; + int cur_index; + unsigned long flags; + + raw_local_irq_save(flags); + if (times_num >= MAX_KERNEL_TIMES_NUM) { + times_count = MAX_KERNEL_TIMES_NUM; + cur_index = times_index; + } else { + times_count = times_num; + cur_index = times_index - times_num; + if (cur_index < 0) + cur_index += MAX_KERNEL_TIMES_NUM; + } + printk("Kernel execution time info, process %d (\"%s\"), records " + "# %d, total events %ld\n", + task->pid, task->comm == NULL ? "NULL": task->comm, + times_count, times_num); + for (count = 0; count < times_count; count ++) { + cur_times = ×[cur_index]; + type = cur_times->type; + switch (type) { + case SYSTEM_CALL_TT: + sys_e2k_print_syscall_times(&cur_times->of.syscall); + break; + case TRAP_TT: + sys_e2k_print_trap_times(&cur_times->of.trap); + break; + default: + printk("Unknown kernel times structure type\n"); + } + cur_index ++; + if (cur_index >= MAX_KERNEL_TIMES_NUM) + cur_index = 0; + } + raw_local_irq_restore(flags); +} +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + +#define CMOS_CSUM_ENABLE +#undef CHECK_CSUM + +static unsigned long ecmos_read(unsigned long port) +{ +#ifdef CHECK_CSUM + int i; + u32 sss = 0; + for (i = CMOS_BASE; i < CMOS_BASE + CMOS_SIZE - 2; i++) { + sss += (long) bios_read(i); + } + printk(" --- sum = %x\n", sss); + printk(" --- csum = %x\n", (bios_read(CMOS_BASE + BIOS_CSUM + 1) << 8) + | bios_read(CMOS_BASE + BIOS_CSUM)); +#endif + + if (port < CMOS_BASE || port > (CMOS_BASE + CMOS_SIZE - 1)) + return -1; + return (unsigned long)bios_read(port); +} + +static long ecmos_write(unsigned long port, unsigned char val) +{ +#ifdef CMOS_CSUM_ENABLE + unsigned int sum; + unsigned char byte; +#endif + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (port < BIOS_BOOT_KNAME || port > (BIOS_BOOT_KNAME + name_length)) + return -1; +#ifdef CMOS_CSUM_ENABLE + sum = ecmos_read(BIOS_CSUM2) << 8; + sum |= ecmos_read(BIOS_CSUM); + byte = ecmos_read(port); + sum = sum - byte + val; + bios_write(sum & 0xff, BIOS_CSUM); + bios_write((sum >> 8) & 0xff, BIOS_CSUM2); +#endif + bios_write(val, port); + return 0; +} + +static struct e2k_bios_param new_bios_sets; +static char cached_cmdline [cmdline_length + 1] = "\0"; + +static long read_boot_settings(struct e2k_bios_param *bios_settings) +{ + + int i, fd; + long rval; + mode_t mode; + mm_segment_t fs; + + /* kernel_name */ + for(i = 0; i < name_length; i++) + new_bios_sets.kernel_name[i] = bios_read(i + BIOS_BOOT_KNAME); + new_bios_sets.kernel_name[i] = '\0'; + /* cmd_line */ + if (!cached_cmdline[0]) { + fs = get_fs(); + set_fs(get_ds()); + + mode = (S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); + + rval = sys_mkdir("/boot", mode); + rval = sys_mount("/dev/hda1", "/boot", "ext3", 0, NULL); + if (rval < 0) { + rval = sys_mount("/dev/hda1", "/boot", "ext2", 0, NULL); + } + fd = sys_open("/boot/boot/cmdline", O_RDONLY, 0); + new_bios_sets.command_line[0] = '\0'; + if (fd >= 0) { + rval = sys_read(fd, new_bios_sets.command_line, + cmdline_length + 1); + memcpy(cached_cmdline, new_bios_sets.command_line, + cmdline_length + 1); + rval = sys_close(fd); + } + rval = sys_umount("/boot", 0); + set_fs(fs); + } + memcpy(new_bios_sets.command_line, cached_cmdline, cmdline_length + 1); + /* booting_item */ + new_bios_sets.booting_item = bios_read(BIOS_BOOT_ITEM); + /* device number(0 - 3) */ + new_bios_sets.dev_num = bios_read(BIOS_DEV_NUM); + /* 3 - 38400 other - 115200 */ + new_bios_sets.serial_rate = bios_read(BIOS_SERIAL_RATE); + if (new_bios_sets.serial_rate == 3) + new_bios_sets.serial_rate = 38400; + else + new_bios_sets.serial_rate = 115200; + /* boot waiting seconds */ + new_bios_sets.autoboot_timer = bios_read(BIOS_AUTOBOOT_TIMER); + /* architecture type */ + new_bios_sets.machine_type = bios_read(BIOS_MACHINE_TYPE); + + if (copy_to_user(bios_settings, &new_bios_sets, sizeof(e2k_bios_param_t))) + return -EFAULT; + return 0; +} + +static long write_boot_settings(struct e2k_bios_param *bios_settings) +{ + int i, fd; + long rval; + mode_t mode; + mm_segment_t fs; +#ifdef CMOS_CSUM_ENABLE + unsigned int checksum; +#endif + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (copy_from_user(&new_bios_sets, bios_settings, sizeof(e2k_bios_param_t))) + return -EFAULT; + /* kernel_name */ + if (new_bios_sets.kernel_name[0]) { + for(i = 0; i < name_length; i++) + bios_write(new_bios_sets.kernel_name[i],i+BIOS_BOOT_KNAME); + } + /* cmd_line */ + if (new_bios_sets.command_line[0]) { + fs = get_fs(); + set_fs(get_ds()); + + mode = (S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); + + rval = sys_mkdir("/boot", mode); + rval = sys_mount("/dev/hda1", "/boot", "ext3", 0, NULL); + if (rval < 0) { + rval = sys_mount("/dev/hda1", "/boot", "ext2", 0, NULL); + } + fd = sys_open("/boot/boot/cmdline", O_WRONLY, 0); + if (fd < 0) { + cached_cmdline[0] = '\0'; + } else { + rval = sys_write(fd, new_bios_sets.command_line, cmdline_length + 1); + if (rval < 0) cached_cmdline[0] = '\0'; + else memcpy(cached_cmdline, new_bios_sets.command_line, + cmdline_length + 1); + rval = sys_close(fd); + } + rval = sys_umount("/boot", 0); + set_fs(fs); + } + /* booting_item */ + bios_write(BIOS_TEST_FLAG, 0); // reset test flag + if (new_bios_sets.booting_item != BIOS_UNSET_ONE) + bios_write(new_bios_sets.booting_item, BIOS_BOOT_ITEM); + /* device number(0 - 3) */ + if (new_bios_sets.dev_num != BIOS_UNSET_ONE) + bios_write(new_bios_sets.dev_num, BIOS_DEV_NUM); + /* 3 - 38400 other - 115200 */ + if (new_bios_sets.serial_rate != BIOS_UNSET_ONE) { + if (new_bios_sets.serial_rate == 38400) + bios_write(3, BIOS_SERIAL_RATE); + else + bios_write(1, BIOS_SERIAL_RATE); + } + /* boot waiting seconds */ + if (new_bios_sets.autoboot_timer != BIOS_UNSET_ONE) + bios_write(new_bios_sets.autoboot_timer, BIOS_AUTOBOOT_TIMER); + /* architecture type */ + if (new_bios_sets.machine_type != BIOS_UNSET_ONE) + bios_write(new_bios_sets.machine_type, BIOS_MACHINE_TYPE); + + /* checksum */ +#ifdef CMOS_CSUM_ENABLE + checksum = _bios_checksum(); + bios_write((checksum) & 0xff, BIOS_CSUM); + bios_write((checksum >> 8) & 0xff, BIOS_CSUM2); +#endif + return 0; +} + +#ifdef CONFIG_DEBUG_KERNEL +/* + * Bellow procedures are using for testing kernel procedure/chain/data stacks oferflow. + * Launch: e2k_syswork(TEST_OVERFLOW, 0, 0); + */ + +noinline static int overflow(int recur, u64 x) +{ + psp_struct_t PSP_my = {{{0}}, {{0}}}; + pcsp_struct_t PCSP_my = {{{0}}, {{0}}}; + u64 psp_base, psp_size, psp_ind, rval; + u64 pcsp_base, pcsp_size, pcsp_ind; + u64 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10; + /* u64 a[512]; So we can get exc_array_bounds - data stack oferflow */ + + PSP_my = READ_PSP_REG(); + psp_base = PSP_my.PSP_base; + psp_size = PSP_my.PSP_size; + psp_ind = PSP_my.PSP_ind; + + PCSP_my = READ_PCSP_REG(); + pcsp_base = PCSP_my.PCSP_base; + pcsp_size = PCSP_my.PCSP_size; + pcsp_ind = PCSP_my.PCSP_ind; + + t0 = recur + 1; + pr_info("overflow recur:%d psp_base:0x%llx psp_size:0x%llx psp_ind:0x%llx " + "tick:%lld\n", + recur, psp_base, psp_size, psp_ind, READ_CLKR_REG()); + pr_info("\tpcsp_base:0x%llx pcsp_size:0x%llx pcsp_ind:0x%llx\n", + pcsp_base, pcsp_size, pcsp_ind); + t1 = psp_base + 1; t2 = psp_base + 2; t3 = psp_base + 3; + t4 = psp_size + 4; t5 = psp_size + 5; t6 = psp_size + 6; t7 = psp_size + 7; + t8 = psp_ind + 8; t9 = psp_ind + 9; t10 = psp_ind + 10; + + if ((t0 % 10) > 5) + rval = overflow(t0, t1+t2+t3+t4+t5); + else + rval = overflow(t0, t6+t7+t8+t9+t10); + return rval; +} + +static int over_thread(void *__unused) +{ + struct task_struct *cur = current; + psp_struct_t PSP_my = {{{0}}, {{0}}}; + int psp_base, psp_size, psp_ind, rval; + + pr_info("over_thread start mm:%px name:%s pid:%d\n", + cur->mm, cur->comm, cur->pid); + PSP_my = READ_PSP_REG(); + psp_base = PSP_my.PSP_base; + psp_size = PSP_my.PSP_size; + psp_ind = PSP_my.PSP_ind; + pr_info("over_thread psp_base:0x%x psp_size:0x%x psp_ind:0x%x\n", + psp_base, psp_size, psp_ind); + rval = overflow(0, 1); + + pr_info("over_thread exiting\n"); + return 0; +} +#endif + +struct get_cr_args { + long __user *cr_storage; + long num; +}; + +static int __get_cr(e2k_mem_crs_t *frame, unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct get_cr_args *args = (struct get_cr_args *) arg; + + if (args->num) { + args->num--; + return 0; + } + + if (copy_to_user(args->cr_storage, frame, sizeof(e2k_mem_crs_t))) { + DebugGC("Unhandled page fault\n"); + return -EFAULT; + } + + return 1; +} + +/* + * libunwind support. + * num - user section number in user Procedure Chain stack; + * cr_storage - storage to put cr0.lo, cr0.hi, cr1.lo, cr1.hi. + */ +static long get_cr(long num, long __user *cr_storage) +{ + struct get_cr_args args; + long ret; + + DebugGC("get_cr num:0x%lx cr_storage:%px\n", num, cr_storage); + + args.cr_storage = cr_storage; + args.num = num; + + ret = parse_chain_stack(PCS_USER, NULL, __get_cr, &args); + if (IS_ERR_VALUE(ret)) + return ret; + + return 0; +} + +static inline void copy_chain_frame_unpriv(e2k_mem_crs_t *to, + e2k_mem_crs_t *from, u8 cr_mask) +{ + if (cr_mask & 0x1) + to->cr0_lo = from->cr0_lo; + + if (cr_mask & 0x2) + AS(to->cr0_hi).ip = AS(from->cr0_hi).ip; + + if (cr_mask & 0x4) { + AS(to->cr1_lo).cui = AS(from->cr1_lo).cui; + if (machine.native_iset_ver < E2K_ISET_V6) + AS(to->cr1_lo).ic = AS(from->cr1_lo).ic; + AS(to->cr1_lo).ss = AS(from->cr1_lo).ss; + } + + if (cr_mask & 0x8) { + AS(to->cr1_hi).ussz = AS(from->cr1_hi).ussz; + AS(to->cr1_hi).wdbl = AS(from->cr1_hi).wdbl; + AS(to->cr1_hi).br = AS(from->cr1_hi).br; + } +} + +struct copy_chain_args { + void __user *buf; + unsigned long start; + unsigned long end; +}; + +static u8 calculate_cr_mask(unsigned long start, unsigned long end, + unsigned long corrected_frame_addr) +{ + u8 cr_mask = 0; + + if (range_includes(start, end - start, corrected_frame_addr, 8)) + cr_mask |= 0x1; + if (range_includes(start, end - start, corrected_frame_addr + 8, 8)) + cr_mask |= 0x2; + if (range_includes(start, end - start, corrected_frame_addr + 16, 8)) + cr_mask |= 0x4; + if (range_includes(start, end - start, corrected_frame_addr + 24, 8)) + cr_mask |= 0x8; + + return cr_mask; +} + +static int __read_current_chain_stack(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct copy_chain_args *args = (struct copy_chain_args *) arg; + unsigned long ts_flag, start = args->start, end = args->end; + e2k_mem_crs_t read_frame; + size_t size, offset; + u8 cr_mask; + int ret; + + if (corrected_frame_addr + SZ_OF_CR <= start) + return 1; + + if (corrected_frame_addr >= end) + return 0; + + DebugACCVM("Reading frame 0x%lx to 0x%lx (pm %d, wbs 0x%x)\n", + corrected_frame_addr, (unsigned long) buf, + AS(frame->cr1_lo).pm, AS(frame->cr1_lo).wbs); + + if (start <= corrected_frame_addr && + end >= corrected_frame_addr + SZ_OF_CR) { + cr_mask = 0xf; + size = 32; + offset = 0; + } else { + cr_mask = calculate_cr_mask(start, end, corrected_frame_addr); + size = hweight8(cr_mask) * 8; + offset = (ffs((u32) cr_mask) - 1) * 8; + } + + args->buf -= size; + + memset(&read_frame, 0, sizeof(read_frame)); + if (!AS(frame->cr1_lo).pm) + copy_chain_frame_unpriv(&read_frame, frame, cr_mask); + AS(read_frame.cr1_lo).wbs = AS(frame->cr1_lo).wbs; + AS(read_frame.cr1_lo).wpsz = AS(frame->cr1_lo).wpsz; + AS(read_frame.cr1_lo).pm = AS(frame->cr1_lo).pm; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user(args->buf, (void *) &read_frame + offset, + sizeof(read_frame)); + clear_ts_flag(ts_flag); + if (ret) { + DebugACCVM("Unhandled page fault\n"); + return -EFAULT; + } + + return 0; +} + +static int __write_current_chain_stack(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct copy_chain_args *args = (struct copy_chain_args *) arg; + unsigned long ts_flag, start = args->start, end = args->end; + e2k_mem_crs_t write_frame, user_frame; + size_t size, offset; + u8 cr_mask; + int ret; + + if (corrected_frame_addr + SZ_OF_CR <= start) + return 1; + + if (corrected_frame_addr >= end || AS(frame->cr1_lo).pm) + return 0; + + DebugACCVM("Writing frame 0x%lx to 0x%lx (pm %d, wbs 0x%x)\n", + (unsigned long) buf, corrected_frame_addr, + AS(frame->cr1_lo).pm, AS(frame->cr1_lo).wbs); + + if (start <= corrected_frame_addr && + end >= corrected_frame_addr + SZ_OF_CR) { + cr_mask = 0xf; + size = 32; + offset = 0; + } else { + cr_mask = calculate_cr_mask(start, end, corrected_frame_addr); + size = hweight8(cr_mask) * 8; + offset = (ffs((u32) cr_mask) - 1) * 8; + } + + args->buf -= size; + + if (__copy_from_user((void *) &user_frame + offset, args->buf, size)) { + DebugACCVM("Unhandled page fault\n"); + return -EFAULT; + } + + write_frame = *frame; + copy_chain_frame_unpriv(&write_frame, &user_frame, cr_mask); + + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user((void __user *) real_frame_addr + offset, + (void *) &write_frame + offset, size); + clear_ts_flag(ts_flag); + if (ret) { + DebugACCVM("Unhandled page fault\n"); + return -EFAULT; + } + + return 0; +} + +static long read_current_chain_stack(void __user *buf, + unsigned long src, unsigned long size) +{ + struct copy_chain_args args; + long ret; + + if (!IS_ALIGNED(src, SZ_OF_CR) || !IS_ALIGNED(size, SZ_OF_CR)) { + DebugACCVM("src or size is not aligned\n"); + return -EINVAL; + } + + args.buf = buf + size; + args.start = src; + args.end = src + size; + + ret = parse_chain_stack(PCS_USER, NULL, __read_current_chain_stack, &args); + if (IS_ERR_VALUE(ret)) + return ret; + + return 0; +} + +long write_current_chain_stack(unsigned long dst, void __user *buf, + unsigned long size) +{ + struct copy_chain_args args; + long ret; + + if (!IS_ALIGNED(dst, 8) || !IS_ALIGNED(size, 8)) { + DebugACCVM("dst or size is not aligned\n"); + return -EINVAL; + } + + args.buf = buf + size; + args.start = dst; + args.end = dst + size; + + ret = parse_chain_stack(PCS_USER, NULL, __write_current_chain_stack, &args); + if (IS_ERR_VALUE(ret)) + return ret; + + return 0; +} + +struct copy_proc_args { + void __user *buf; + void __user *p_stack; + s64 size; + s64 spilled_size; + unsigned long ps_frame_top; + int kernel_mode; + int write; +}; + +static int __copy_current_proc_stack(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct copy_proc_args *args = (struct copy_proc_args *) arg; + unsigned long ps_frame_top = args->ps_frame_top; + void __user *p_stack = args->p_stack; + void __user *buf = args->buf; + int kernel_mode = args->kernel_mode; + s64 spilled_size = args->spilled_size, size = args->size; + unsigned long ps_frame_size, copy_bottom, copy_top, len; + int ret; + + if ((s64) size <= 0) + return 1; + + ps_frame_size = AS(frame->cr1_lo).wbs * EXT_4_NR_SZ; + + DebugACCVM("Considering frame under 0x%lx (prev pm %d, pm %d), frame size 0x%lx, size 0x%llx\n", + ps_frame_top, kernel_mode, AS(frame->cr1_lo).pm, + ps_frame_size, size); + + if (corrected_frame_addr == (u64) CURRENT_PCS_BASE() + SZ_OF_CR) { + /* We have reached the end of stack, do the copy */ + ps_frame_top -= ps_frame_size; + } else if (!kernel_mode ^ !AS(frame->cr1_lo).pm) { + /* Boundary crossed, do the copy */ + args->kernel_mode = !kernel_mode; + } else { + if (ps_frame_top > (u64) p_stack) { + /* Continue batching frames... */ + goto next_frame; + } + /* Reached the end of requested area, do the copy */ + } + + copy_top = (u64) p_stack + size; + copy_bottom = max3((u64) ps_frame_top, (u64) p_stack, + (u64) CURRENT_PS_BASE()); + if (copy_top <= copy_bottom) { + /* Have not reached requested frame yet */ + goto next_frame; + } + + len = copy_top - copy_bottom; + if (!args->write) { + DebugACCVM("%s 0x%lx bytes from 0x%lx to 0x%lx\n", + (kernel_mode) ? "Clearing" : "Reading", len, + p_stack + size - len, buf + size - len); + if (kernel_mode) { + if (__clear_user((void *) (buf + size - len), len)) + return -EFAULT; + } else { + unsigned long ts_flag; + + if (spilled_size) { + s64 copy_size = min((s64) len, spilled_size); + + ret = copy_e2k_stack_to_user(buf + size - copy_size, + (void *) (AS(current_thread_info()->k_psp_lo).base + + spilled_size - copy_size), + copy_size, NULL); + if (ret) + return ret; + + args->spilled_size -= copy_size; + len -= copy_size; + size -= copy_size; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_in_user_with_tags(buf + size - len, + p_stack + size - len, len); + clear_ts_flag(ts_flag); + if (ret) + return -EFAULT; + } + } else if (!kernel_mode) { + /* Writing user frames to stack */ + unsigned long ts_flag; + + DebugACCVM("Writing 0x%lx bytes from 0x%lx to 0x%lx\n", + len, buf + size - len, p_stack + size - len); + if (spilled_size) { + s64 copy_size = min((s64) len, spilled_size); + + ret = copy_user_to_current_hw_stack((void *) + (AS(current_thread_info()->k_psp_lo).base + + spilled_size - copy_size), + buf + size - copy_size, copy_size, NULL, false); + if (ret) + return ret; + + args->spilled_size -= copy_size; + len -= copy_size; + size -= copy_size; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_in_user_with_tags(p_stack + size - len, + buf + size - len, len); + clear_ts_flag(ts_flag); + if (ret) + return -EFAULT; + } + + args->size = size - len; + +next_frame: + args->ps_frame_top -= ps_frame_size; + + return 0; +} + + +long copy_current_proc_stack(void __user *buf, void __user *p_stack, + unsigned long size, int write, unsigned long ps_used_top) +{ + struct copy_proc_args args; + unsigned long ps_spilled_size; + long ret; + + raw_all_irq_disable(); + /* Dump procedure stack frames to memory */ + if (!write) + COPY_STACKS_TO_MEMORY(); + raw_all_irq_enable(); + + args.buf = buf; + args.p_stack = p_stack; + args.size = size; + args.ps_frame_top = ps_used_top; + args.kernel_mode = true; + args.write = write; + + ps_spilled_size = GET_PSHTP_MEM_INDEX(current_pt_regs()->stacks.pshtp); + if (ps_used_top - ps_spilled_size < (unsigned long) p_stack + size) { + args.spilled_size = (unsigned long) p_stack + size - + (ps_used_top - ps_spilled_size); + } else { + args.spilled_size = 0; + } + + ret = parse_chain_stack(PCS_USER, NULL, __copy_current_proc_stack, &args); + if (IS_ERR_VALUE(ret)) + return ret; + + return 0; +} + +static long do_access_hw_stacks(unsigned long mode, + unsigned long long __user *frame_ptr, char __user *buf, + unsigned long buf_size, void __user *real_size, int compat) +{ + struct pt_regs *regs = current_pt_regs(); + unsigned long pcs_base, pcs_used_top, ps_base, ps_used_top; + unsigned long long frame; + long ret; + + /* Filter out illegal requests immediately. */ + if ((unsigned int) mode > E2K_WRITE_CHAIN_STACK_EX) + return -EINVAL; + + if (mode == E2K_READ_CHAIN_STACK || mode == E2K_READ_PROCEDURE_STACK || + mode == E2K_READ_CHAIN_STACK_EX || + mode == E2K_READ_PROCEDURE_STACK_EX || + mode == E2K_WRITE_PROCEDURE_STACK_EX || + mode == E2K_WRITE_CHAIN_STACK_EX || + mode == E2K_GET_CHAIN_STACK_OFFSET) { + if (get_user(frame, frame_ptr)) + return -EFAULT; + } + + if (mode == E2K_GET_CHAIN_STACK_OFFSET) { + unsigned long delta, offset; + + /* + * IMPORTANT: frame should *not* include pcshtp value, + * because then it might point outside of chain stack + * window (when setjmp is executing outside of allocated + * chain stack window and the next SPILL will cause + * a stack overflow exception). + */ + if (find_in_old_u_pcs_list(frame, &delta)) + return -ESRCH; + + offset = frame + delta - (u64) CURRENT_PCS_BASE(); + + return put_user(offset, (u64 __user *) real_size); + } + + /* + * Calculate stack frame addresses + */ + pcs_base = (unsigned long) CURRENT_PCS_BASE(); + ps_base = (unsigned long) CURRENT_PS_BASE(); + + pcs_used_top = AS(regs->stacks.pcsp_lo).base + + AS(regs->stacks.pcsp_hi).ind; + ps_used_top = AS(regs->stacks.psp_lo).base + AS(regs->stacks.psp_hi).ind; + + if (real_size && (mode == E2K_READ_CHAIN_STACK || + mode == E2K_READ_PROCEDURE_STACK)) { + unsigned long used_size; + + if (mode == E2K_READ_CHAIN_STACK) + used_size = frame - pcs_base; + else /* mode == E2K_READ_PROCEDURE_STACK */ + used_size = frame - ps_base; + + if (compat) + ret = put_user(used_size, (u32 __user *) real_size); + else + ret = put_user(used_size, (u64 __user *) real_size); + if (ret) + return -EFAULT; + } + + switch (mode) { + case E2K_GET_CHAIN_STACK_SIZE: + /* + * To start unwinding procedure stack obtained by + * E2K_READ_PROC_STACK_EX from its top, the user + * needs one extra chain stack frame containing + * `%cr's related the top function. + */ + ret = put_user(pcs_used_top - pcs_base + SZ_OF_CR, + (u64 __user *) real_size); + break; + case E2K_GET_PROCEDURE_STACK_SIZE: + ret = put_user(ps_used_top - ps_base, (u64 __user *) real_size); + break; + case E2K_READ_CHAIN_STACK: + case E2K_READ_CHAIN_STACK_EX: + if (!access_ok(buf, buf_size)) + return -EFAULT; + + if (mode == E2K_READ_CHAIN_STACK) { + if (frame < pcs_base || frame > pcs_used_top) + return -EAGAIN; + + if (frame - pcs_base > buf_size) + return -ENOMEM; + + ret = read_current_chain_stack(buf, pcs_base, + frame - pcs_base); + } else { /* mode == E2K_READ_CHAIN_STACK_EX */ + if ((pcs_used_top + SZ_OF_CR) - (pcs_base + frame) < + buf_size) + return -EINVAL; + + ret = read_current_chain_stack(buf, pcs_base + frame, + buf_size); + } + break; + case E2K_READ_PROCEDURE_STACK: + case E2K_READ_PROCEDURE_STACK_EX: + if (!access_ok(buf, buf_size)) + return -EFAULT; + +#if DEBUG_ACCVM + dump_stack(); +#endif + + if (mode == E2K_READ_PROCEDURE_STACK) { + if (frame < ps_base || frame > ps_used_top) + return -EAGAIN; + + if (frame - ps_base > buf_size) + return -ENOMEM; + + ret = copy_current_proc_stack(buf, + (void __user *) ps_base, + frame - ps_base, false, ps_used_top); + } else { /* mode == E2K_READ_PROCEDURE_STACK_EX */ + if (ps_used_top < (ps_base + frame) + buf_size) + return -EINVAL; + + ret = copy_current_proc_stack(buf, + (void __user *) (ps_base + frame), + buf_size, false, ps_used_top); + } + break; + case E2K_WRITE_CHAIN_STACK_EX: + if (!access_ok(buf, buf_size)) + return -EFAULT; + + if ((pcs_used_top + SZ_OF_CR) - (pcs_base + frame) < buf_size) + return -EINVAL; + + ret = write_current_chain_stack(pcs_base + frame, + buf, buf_size); + break; + case E2K_WRITE_PROCEDURE_STACK: + case E2K_WRITE_PROCEDURE_STACK_EX: + if (!access_ok(buf, buf_size)) + return -EFAULT; + + if (mode == E2K_WRITE_PROCEDURE_STACK) { + if (buf_size > ps_used_top - ps_base) + return -EINVAL; + + ret = copy_current_proc_stack(buf, + (void __user *) ps_base, + buf_size, true, ps_used_top); + } else { /* mode == E2K_WRITE_PROCEDURE_STACK_EX */ + if (ps_used_top < (ps_base + frame) + buf_size) + return -EINVAL; + + ret = copy_current_proc_stack(buf, + (void __user *) (ps_base + frame), + buf_size, true, ps_used_top); + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +long sys_access_hw_stacks(unsigned long mode, + unsigned long long __user *frame_ptr, char __user *buf, + unsigned long buf_size, void __user *real_size) +{ + return do_access_hw_stacks(mode, frame_ptr, buf, buf_size, + real_size, false); +} + +long compat_sys_access_hw_stacks(unsigned long mode, + unsigned long long __user *frame_ptr, char __user *buf, + unsigned long buf_size, void __user *real_size) +{ + return do_access_hw_stacks(mode, frame_ptr, buf, buf_size, + real_size, true); +} + + +static long +flush_cmd_caches(e2k_addr_t user_range_arr, e2k_size_t len) +{ + icache_range_array_t icache_range_arr; + + /* + * Snooping is done by hardware since V3 + */ + if (machine.native_iset_ver >= E2K_ISET_V3) + return -EPERM; + + icache_range_arr.ranges = + kmalloc(sizeof(icache_range_t) * len, GFP_KERNEL); + icache_range_arr.count = len; + icache_range_arr.mm = current->mm; + if (copy_from_user(icache_range_arr.ranges, + (const void *)user_range_arr, + sizeof(icache_range_t) * len)) { + kfree(icache_range_arr.ranges); + return -EFAULT; + } + + flush_icache_range_array(&icache_range_arr); + kfree(icache_range_arr.ranges); + return 0; +} + +/* + * sys_e2k_syswork() is to run different system work from user + */ +static int sc_restart = 0; +asmlinkage long +sys_e2k_syswork(long syswork, long arg2, long arg3, long arg4, long arg5) +{ + + long rval = 0; + + if (syswork == FAST_RETURN) /* Using to estimate time needed for entering to OS */ + return rval; + + if (syswork == E2K_ACCESS_VM) + /* DEPRECATED, DON'T USE */ + return -ENOSYS; + + if (!capable(CAP_SYS_ADMIN)) + goto user_syswork; + + switch(syswork) { + case PRINT_MMAP: + print_mmap(current); + break; + case PRINT_ALL_MMAP: + print_all_mmap(); + break; + case PRINT_STACK: + dump_stack(); + break; +#ifdef CONFIG_PROC_FS + case PRINT_STATM: + rval = print_statm((task_pages_info_t *)arg2, (pid_t) arg3); + break; +#endif + case GET_ADDR_PROT: + rval = get_addr_prot(arg2); + break; + case PRINT_TASKS: + /* + * Force stacks dump kernel thread to run as soon as we yield: + * to do core dump all stacks + */ + show_state(); + break; + case PRINT_REGS: + DbgESW("PRINT_PT_REGS\n"); + print_cpu_regs((char *) arg2); + break; + case SET_TAGS: + DbgESW("setting tags: address 0x%lx params 0x%lx\n", + arg2, arg3); + set_tags((__e2k_u64_t*)arg2, arg3); + break; + case CHECK_TAGS: + DbgESW("checking tags: address 0x%lx params 0x%lx\n", + arg2, arg3); + rval = check_tags((__e2k_u64_t*)arg2, arg3); + break; + case IDE_INFO: + rval = ide_info(arg2); + break; + case INSTR_EXEC: + rval = instr_exec((info_instr_exec_t *)arg2); + break; + case START_CLI_INFO: + #ifdef CONFIG_CLI_CHECK_TIME + start_cli_info(); + #endif + rval = 0; + break; + case PRINT_CLI_INFO: + print_cli_info(); + rval = 0; + break; + case SYS_MKNOD: + rval = el_sys_mknod((char *) arg2, (char *) arg3); + break; + case SET_DBG_MODE: + e2k_lx_dbg = arg2; + rval = 0; + break; + case START_OF_WORK: + end_of_work = 0; + rval = 0; + break; + case ADD_END_OF_WORK: + end_of_work++; + rval = 0; + break; + case GET_END_OF_WORK: + rval = end_of_work; + break; + case DO_E2K_HALT: + rval = end_of_work; + printk("sys_e2k_syswork: E2K_HALT_OK\n"); + E2K_HALT_OK(); + break; + case READ_ECMOS: + rval = ecmos_read((u64)arg2); + break; + case WRITE_ECMOS: + rval = ecmos_write((u64)arg2, (u8)arg3); + break; + case READ_BOOT: + rval = read_boot_settings((e2k_bios_param_t *)arg2); + break; + case WRITE_BOOT: + rval = write_boot_settings((e2k_bios_param_t *)arg2); + break; + case E2K_SC_RESTART: + if (sc_restart) { + DbgESW("restart\n"); + sc_restart = 0; + return 0; + } + DbgESW("start\n"); + sc_restart = 1; + force_sig(SIGUSR1); + rval = -ERESTARTNOINTR; + break; + case PRINT_PIDS: + print_pids(); + rval = 0; + break; + case PRINT_INTERRUPT_INFO: + print_interrupt_info(); + rval = 0; + break; + case CLEAR_INTERRUPT_INFO: + clear_interrupt_info(); + rval = 0; + break; + case STOP_INTERRUPT_INFO: + stop_interrupt_info(); + rval = 0; + break; + case TEST_OVERFLOW: +#ifdef CONFIG_DEBUG_KERNEL + { + struct task_struct *over; + + over = kthread_run(over_thread, NULL, "over_thr"); + if (IS_ERR(over)) + printk(" ============IS_ERR============\n"); + printk(" ============over_thread OK============\n"); + } +#endif /* CONFIG_DEBUG_KERNEL */ + break; + case USER_CONTROL_INTERRUPT: +#ifndef CONFIG_USR_CONTROL_INTERRUPTS + printk("The kernel was compiled w/o " + " CONFIG_USR_CONTROL_INTERRUPTS\n"); +# else /* CONFIG_USR_CONTROL_INTERRUPTS */ + { + unsigned long psr; + arg2 = !!arg2; + current_thread_info()->flags &= ~_TIF_USR_CONTROL_INTERRUPTS; + current_thread_info()->flags |= + arg2 << TIF_USR_CONTROL_INTERRUPTS; + if (arg2) { + psr = (PSR_UIE | PSR_UNMIE | PSR_NMIE | PSR_IE | PSR_SGE); + } else { + psr = (PSR_NMIE | PSR_IE | PSR_SGE); + } + parse_chain_stack(PCS_USER, current, correct_psr_register, + (void *) psr); + } +#endif /* CONFIG_USR_CONTROL_INTERRUPTS */ + break; + default: + rval = -1; + goto user_syswork; + } + return rval; + +user_syswork: + switch(syswork) { + case GET_CONTEXT: + rval = get_cr((long)arg2, (long *)arg3); + break; + case FLUSH_CMD_CACHES: + rval = flush_cmd_caches(arg2, arg3); + break; + default: + rval = -1; + pr_info_ratelimited("Unknown e2k_syswork %ld\n", syswork); + break; + } + return rval; +} + + +void nmi_set_hardware_data_breakpoint(struct data_breakpoint_params *params) +{ + set_hardware_data_breakpoint((unsigned long) params->address, + params->size, params->write, params->read, + params->stop, params->cp_num, 1); +} + + +/* Special versions of printk() and panic() to use inside of body_of_entry2.c + * and ttable_entry10_C()i and other functions with disabled data stack. + * + * We cannot use functions with variable number of arguments in functions with + * __interrupt attribute. The attribute makes compiler put all local variables + * in registers and do not use stack, but these functions pass their parameters + * through stack and thus conflict with the attribute. So we deceive the + * compiler: put functions that use stack inside of functions that do not + * use it. + * + * Why use the __interrupt attribute? It is needed because there are calls to + * clone() and fork() inside of body_of_entry2.c. The stack and frame pointers + * are cached in local registers, and since the child inherits those registers' + * contents from its parent, it has pointers to parent's stack in them. Since + * there is no way to make compiler re-read all those pointers, we use this + * workaround: add __interrupt attribute and stop using the stack altogether. + * Functions with constant number of arguments re-read stack and frame pointers + * in the beginning so they can be called safely. */ +notrace void __printk_fixed_args(char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, u64 a7) +{ + printk(fmt, a1, a2, a3, a4, a5, a6, a7); +} + +__noreturn notrace void __panic_fixed_args(char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, u64 a7) +{ + panic(fmt, a1, a2, a3, a4, a5, a6, a7); +} + +#ifdef CONFIG_TRACING +notrace void ____trace_bprintk_fixed_args(unsigned long ip, + char *fmt, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6) +{ + __trace_bprintk(ip, fmt, a1, a2, a3, a4, a5, a6); +} +#endif + +notrace void __do_boot_printk_fixed_args(char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, u64 a7) +{ + do_boot_printk(fmt, a1, a2, a3, a4, a5, a6, a7); +} +notrace int __snprintf_fixed_args(char *buf, size_t size, const char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +{ + return snprintf(buf, size, fmt, a1, a2, a3, a4, a5); +} diff --git a/arch/e2k/kernel/elfcore.c b/arch/e2k/kernel/elfcore.c new file mode 100644 index 000000000000..647b7f5b69ff --- /dev/null +++ b/arch/e2k/kernel/elfcore.c @@ -0,0 +1,150 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +/* + * from file binfmt_elf.c + */ + +static struct vm_area_struct *first_vma(struct task_struct *tsk, + struct vm_area_struct *gate_vma) +{ + struct vm_area_struct *ret = tsk->mm->mmap; + + if (ret) + return ret; + return gate_vma; +} +/* + * Helper function for iterating across a vma list. It ensures that the caller + * will visit `gate_vma' prior to terminating the search. + */ +static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, + struct vm_area_struct *gate_vma) +{ + struct vm_area_struct *ret; + + ret = this_vma->vm_next; + if (ret) + return ret; + if (this_vma == gate_vma) + return NULL; + return gate_vma; +} + +/* + * Support for tags dumping + */ + +Elf64_Half elf_core_extra_phdrs(void) +{ + struct pt_regs *regs = find_host_regs(current_thread_info()->pt_regs); + + /* + * Dump all user registers + */ + if (regs) + do_user_hw_stacks_copy_full(®s->stacks, regs, NULL); + + return current->mm->map_count; +} + +int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +{ + struct elf_phdr phdr; + struct vm_area_struct *vma; + unsigned long mm_flags = cprm->mm_flags; + struct vm_area_struct *gate_vma = get_gate_vma(current->mm); + + for (vma = first_vma(current, gate_vma); vma != NULL; + vma = next_vma(vma, gate_vma)) { + phdr.p_type = PT_E2K_TAGS; + phdr.p_offset = offset; + phdr.p_vaddr = vma->vm_start; + phdr.p_paddr = 0; + phdr.p_filesz = vma_dump_size(vma, mm_flags) / 16; + phdr.p_memsz = 0; + offset += phdr.p_filesz; + phdr.p_flags = 0; + phdr.p_align = 1; + if (!dump_emit(cprm, &phdr, sizeof(phdr))) + return 0; + } + return 1; +} + +int elf_core_write_extra_data(struct coredump_params *cprm) +{ + struct vm_area_struct *vma; + unsigned long mm_flags = cprm->mm_flags; + struct vm_area_struct *gate_vma = get_gate_vma(current->mm); + unsigned long addr; + unsigned long end; + struct page *page; + int stop; + + for (vma = first_vma(current, gate_vma); vma != NULL; + vma = next_vma(vma, gate_vma)) { + end = vma->vm_start + vma_dump_size(vma, mm_flags); + for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { + page = get_dump_page(addr); + if (page) { + /* 2 bytes of tags correspond + * to 32 bytes of data */ + u16 tags[PAGE_SIZE / 32]; + void *kaddr = kmap(page); + int i; + + for (i = 0; i < PAGE_SIZE / 32; i++) { + extract_tags_32(&tags[i], + kaddr + 32 * i); + } + stop = !dump_emit(cprm, tags, sizeof(tags)); + kunmap(page); + put_page(page); + } else { + /* The last pages of CUT are not allocated + * and they might be skipped in tags section + * of core file, so we have to write the very + * last page to make sure that core file size + * is the same as declared in ELF headers. */ + if (addr == end - PAGE_SIZE) { + stop = !dump_emit(cprm, + (void *)empty_zero_page, + PAGE_SIZE / 16); + } else { + stop = !dump_skip(cprm, PAGE_SIZE / 16); + } + } + if (stop) + return 0; + } + } + return 1; +} + +size_t elf_core_extra_data_size(struct coredump_params *cprm) +{ + struct vm_area_struct *vma; + unsigned long mm_flags = cprm->mm_flags; + struct vm_area_struct *gate_vma = get_gate_vma(current->mm); + unsigned long addr; + unsigned long end; + size_t size = 0; + + for (vma = first_vma(current, gate_vma); vma != NULL; + vma = next_vma(vma, gate_vma)) { + end = vma->vm_start + vma_dump_size(vma, mm_flags); + for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { + size += PAGE_SIZE / 16; + } + } + return size; +} diff --git a/arch/e2k/kernel/entry_user.S b/arch/e2k/kernel/entry_user.S new file mode 100644 index 000000000000..da2a206c3d07 --- /dev/null +++ b/arch/e2k/kernel/entry_user.S @@ -0,0 +1,127 @@ +#include +#include +#include +#include +#include +#include + +/* + * Functions to deal with execve() + * + * void switch_to_user_func(long dummy, + * start_fn start_func, e2k_size_t us_size, int cui); + * + */ + +.global $switch_to_user_func +.type switch_to_user_func,@function +$switch_to_user_func: + { + setwd wsz = 0x8, nfx = 0x1 + setbn rsz = 0x3, rbs = 0x4, rcur = 0x0 + setbp psz = 0x0 + + rrd %cr1.hi, %dr4 + ldd [ GCURTASK + TSK_TI + TI_K_USD_HI ], %dr5 + } + { + disp %ctpr1, $do_switch_to_user_func + + andd %dr4, 0xfffffffff, %dr4 + andd %dr5, 0xfffffff000000000, %dr5 + } + { + ord %dr4, %dr5, %dr4 + } + { + nop 4 + + /* Correct value in %cr1.hi (currently it holds + * user data stack size while cr0.hi holds kernel IP) */ + rwd %dr4, %cr1.hi + + addd 0, %dr1, %db[0] /* start_func */ + addd 0, %dr2, %db[1] /* us_size */ + addd 0, %dr3, %db[2] /* cui */ + } + { + /* Clear kernel information from user's registers */ + addd 0, 0, %db[3] + addd 0, 0, %db[4] + addd 0, 0, %db[5] + addd 0, 0, %db[6] + addd 0, 0, %db[7] + call %ctpr1, wbs = 0x4 + } +.size $switch_to_user_func, . - $switch_to_user_func + +#ifdef CONFIG_PROTECTED_MODE +/* + * void protected_switch_to_user_func(long r0, long r1, + * start_fn start_func, e2k_size_t us_size, int cui); + * + */ +.global $protected_switch_to_user_func +.type protected_switch_to_user_func,@function +$protected_switch_to_user_func: + { + setwd wsz = 0x9, nfx = 0x1 + setbn rsz = 0x3, rbs = 0x5, rcur = 0x0 + setbp psz = 0x0 + + rrd %cr1.hi, %dr5 + ldd [ GCURTASK + TSK_TI + TI_K_USD_HI ], %dr6 + } + { + disp %ctpr1, $do_switch_to_user_func + + andd %dr5, 0xfffffffff, %dr5 + andd %dr6, 0xfffffff000000000, %dr6 + + puttagd %dr0, 15, %dr0 + puttagd %dr1, 12, %dr1 + } + { + ord %dr5, %dr6, %dr5 + } + { + nop 4 + + /* Correct value in %cr1.hi (currently it holds + * user data stack size while cr0.hi holds kernel IP) */ + rwd %dr5, %cr1.hi + + addd 0, %dr2, %db[0] /* start_func */ + addd 0, %dr3, %db[1] /* us_size */ + addd 0, %dr4, %db[2] /* cui */ + } + + /* + * Set `rtld_fini' parameter to NULL so that the main executable's + * `_start ()' can distinguish between the cases when it's invoked + * directly by the kernel or ld.so. + */ + { + movtd 0x0, %dr2 + movtd 0x0, %dr3 + } + { + call %ctpr1, wbs = 0x5 + } +.size $protected_switch_to_user_func, . - $protected_switch_to_user_func +#endif /* CONFIG_PROTECTED_MODE */ + +/* + * Functions to deal with sys_{get/set}_backtrace() + */ + +.text +.global $sys_backtrace_return +.type sys_backtrace_return,@function +$sys_backtrace_return: + { + nop 3 + return %ctpr3 + } + ct %ctpr3 +.size $sys_backtrace_return, . - $sys_backtrace_return diff --git a/arch/e2k/kernel/fill_handler_entry.S b/arch/e2k/kernel/fill_handler_entry.S new file mode 100644 index 000000000000..5443f936d78b --- /dev/null +++ b/arch/e2k/kernel/fill_handler_entry.S @@ -0,0 +1,1348 @@ +#include + +.section ".entry.text", "ax" + +.global fill_handler_0 +fill_handler_0: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=0 + } +.global fill_handler_1 +fill_handler_1: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=1 + } +.global fill_handler_2 +fill_handler_2: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=2 + } +.global fill_handler_3 +fill_handler_3: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=3 + } +.global fill_handler_4 +fill_handler_4: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=4 + } +.global fill_handler_5 +fill_handler_5: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=5 + } +.global fill_handler_6 +fill_handler_6: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=6 + } +.global fill_handler_7 +fill_handler_7: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=7 + } +.global fill_handler_8 +fill_handler_8: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=8 + } +.global fill_handler_9 +fill_handler_9: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=9 + } +.global fill_handler_10 +fill_handler_10: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=10 + } +.global fill_handler_11 +fill_handler_11: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=11 + } +.global fill_handler_12 +fill_handler_12: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=12 + } +.global fill_handler_13 +fill_handler_13: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=13 + } +.global fill_handler_14 +fill_handler_14: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=14 + } +.global fill_handler_15 +fill_handler_15: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=15 + } +.global fill_handler_16 +fill_handler_16: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=16 + } +.global fill_handler_17 +fill_handler_17: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=17 + } +.global fill_handler_18 +fill_handler_18: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=18 + } +.global fill_handler_19 +fill_handler_19: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=19 + } +.global fill_handler_20 +fill_handler_20: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=20 + } +.global fill_handler_21 +fill_handler_21: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=21 + } +.global fill_handler_22 +fill_handler_22: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=22 + } +.global fill_handler_23 +fill_handler_23: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=23 + } +.global fill_handler_24 +fill_handler_24: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=24 + } +.global fill_handler_25 +fill_handler_25: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=25 + } +.global fill_handler_26 +fill_handler_26: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=26 + } +.global fill_handler_27 +fill_handler_27: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=27 + } +.global fill_handler_28 +fill_handler_28: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=28 + } +.global fill_handler_29 +fill_handler_29: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=29 + } +.global fill_handler_30 +fill_handler_30: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=30 + } +.global fill_handler_31 +fill_handler_31: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=31 + } +.global fill_handler_32 +fill_handler_32: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=32 + } +.global fill_handler_33 +fill_handler_33: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=33 + } +.global fill_handler_34 +fill_handler_34: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=34 + } +.global fill_handler_35 +fill_handler_35: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=35 + } +.global fill_handler_36 +fill_handler_36: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=36 + } +.global fill_handler_37 +fill_handler_37: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=37 + } +.global fill_handler_38 +fill_handler_38: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=38 + } +.global fill_handler_39 +fill_handler_39: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=39 + } +.global fill_handler_40 +fill_handler_40: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=40 + } +.global fill_handler_41 +fill_handler_41: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=41 + } +.global fill_handler_42 +fill_handler_42: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=42 + } +.global fill_handler_43 +fill_handler_43: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=43 + } +.global fill_handler_44 +fill_handler_44: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=44 + } +.global fill_handler_45 +fill_handler_45: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=45 + } +.global fill_handler_46 +fill_handler_46: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=46 + } +.global fill_handler_47 +fill_handler_47: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=47 + } +.global fill_handler_48 +fill_handler_48: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=48 + } +.global fill_handler_49 +fill_handler_49: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=49 + } +.global fill_handler_50 +fill_handler_50: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=50 + } +.global fill_handler_51 +fill_handler_51: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=51 + } +.global fill_handler_52 +fill_handler_52: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=52 + } +.global fill_handler_53 +fill_handler_53: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=53 + } +.global fill_handler_54 +fill_handler_54: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=54 + } +.global fill_handler_55 +fill_handler_55: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=55 + } +.global fill_handler_56 +fill_handler_56: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=56 + } +.global fill_handler_57 +fill_handler_57: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=57 + } +.global fill_handler_58 +fill_handler_58: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=58 + } +.global fill_handler_59 +fill_handler_59: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=59 + } +.global fill_handler_60 +fill_handler_60: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=60 + } +.global fill_handler_61 +fill_handler_61: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=61 + } +.global fill_handler_62 +fill_handler_62: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=62 + } +.global fill_handler_63 +fill_handler_63: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=63 + } +.global fill_handler_64 +fill_handler_64: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=64 + } +.global fill_handler_65 +fill_handler_65: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=65 + } +.global fill_handler_66 +fill_handler_66: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=66 + } +.global fill_handler_67 +fill_handler_67: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=67 + } +.global fill_handler_68 +fill_handler_68: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=68 + } +.global fill_handler_69 +fill_handler_69: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=69 + } +.global fill_handler_70 +fill_handler_70: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=70 + } +.global fill_handler_71 +fill_handler_71: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=71 + } +.global fill_handler_72 +fill_handler_72: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=72 + } +.global fill_handler_73 +fill_handler_73: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=73 + } +.global fill_handler_74 +fill_handler_74: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=74 + } +.global fill_handler_75 +fill_handler_75: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=75 + } +.global fill_handler_76 +fill_handler_76: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=76 + } +.global fill_handler_77 +fill_handler_77: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=77 + } +.global fill_handler_78 +fill_handler_78: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=78 + } +.global fill_handler_79 +fill_handler_79: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=79 + } +.global fill_handler_80 +fill_handler_80: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=80 + } +.global fill_handler_81 +fill_handler_81: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=81 + } +.global fill_handler_82 +fill_handler_82: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=82 + } +.global fill_handler_83 +fill_handler_83: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=83 + } +.global fill_handler_84 +fill_handler_84: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=84 + } +.global fill_handler_85 +fill_handler_85: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=85 + } +.global fill_handler_86 +fill_handler_86: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=86 + } +.global fill_handler_87 +fill_handler_87: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=87 + } +.global fill_handler_88 +fill_handler_88: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=88 + } +.global fill_handler_89 +fill_handler_89: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=89 + } +.global fill_handler_90 +fill_handler_90: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=90 + } +.global fill_handler_91 +fill_handler_91: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=91 + } +.global fill_handler_92 +fill_handler_92: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=92 + } +.global fill_handler_93 +fill_handler_93: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=93 + } +.global fill_handler_94 +fill_handler_94: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=94 + } +.global fill_handler_95 +fill_handler_95: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=95 + } +.global fill_handler_96 +fill_handler_96: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=96 + } +.global fill_handler_97 +fill_handler_97: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=97 + } +.global fill_handler_98 +fill_handler_98: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=98 + } +.global fill_handler_99 +fill_handler_99: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=99 + } +.global fill_handler_100 +fill_handler_100: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=100 + } +.global fill_handler_101 +fill_handler_101: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=101 + } +.global fill_handler_102 +fill_handler_102: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=102 + } +.global fill_handler_103 +fill_handler_103: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=103 + } +.global fill_handler_104 +fill_handler_104: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=104 + } +.global fill_handler_105 +fill_handler_105: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=105 + } +.global fill_handler_106 +fill_handler_106: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=106 + } +.global fill_handler_107 +fill_handler_107: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=107 + } +.global fill_handler_108 +fill_handler_108: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=108 + } +.global fill_handler_109 +fill_handler_109: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=109 + } +.global fill_handler_110 +fill_handler_110: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=110 + } +.global fill_handler_111 +fill_handler_111: + { + movtd GVCPUSTATE, %ctpr1 + } + { + nop 3 + rrd %osr0, GVCPUSTATE + } + { + call %ctpr1, wbs=111 + } diff --git a/arch/e2k/kernel/ftrace.c b/arch/e2k/kernel/ftrace.c new file mode 100644 index 000000000000..a504e02462f4 --- /dev/null +++ b/arch/e2k/kernel/ftrace.c @@ -0,0 +1,581 @@ +/* + * Code for replacing ftrace calls with jumps. + * + * Copyright (C) 2007-2008 Steven Rostedt + * + * Thanks goes to Ingo Molnar, for suggesting the idea. + * Mathieu Desnoyers, for suggesting postponing the modifications. + * Arjan van de Ven, for keeping me straight, and explaining to me + * the dangers of modifying code on the run. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_STACKTRACE + #include +#endif /* CONFIG_STACKTRACE */ + +#undef DEBUG_FTRACE_MODE +#undef DebugFTRACE +#define DEBUG_FTRACE_MODE 0 +#if DEBUG_FTRACE_MODE +#define DebugFTRACE(...) printk(__VA_ARGS__) +#else +#define DebugFTRACE(...) +#endif + +#define NO_ANY_USER 0 +#define ONLY_THIS_USER 1 +#define ALL_CLONE_USER 2 +#define ONLY_THIS_USER_WO_FP_REG 3 +#define ALL_CLONE_WO_FP_REG 4 + +#undef DBG +#undef DEBUG_STACK_TRACE +//#define DEBUG_STACK_TRACE + +#ifdef DEBUG_STACK_TRACE +# define DBG(fmt, args...) printk(fmt, ##args) +# define CHECK_STACK(x, reg) check_last_wish(x, reg) +# define CHECK_FLAGS(x) check_flags(x) +#else /* !DEBUG_STACK_TRACE */ +# define DBG(fmt, args...) +# define CHECK_STACK(x, reg) +# define CHECK_FLAGS(x) +#endif /* DEBUG_STACK_TRACE */ + +/* for debugging */ +#define MAX_ONCE 10 +#define MAX_1 0 +#define MAX_2 0 +#define MAX_3 0 +#define MAX_4 0 + +int ONCE[MAX_ONCE]; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +extern void (*ftrace_graph_return)(struct ftrace_graph_ret *); +extern int (*ftrace_graph_entry)(struct ftrace_graph_ent *); +extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *); +#endif + +void __interrupt ftrace_stub(unsigned long selfpc, unsigned long frompc, + struct ftrace_ops *op, struct pt_regs *regs) +{ + return; +} + +extern ftrace_func_t ftrace_trace_function; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +# if E2K_MAXSR > 112 +# error Bad configuration +# endif +extern void return_to_handler_0(void); +extern void return_to_handler_1(void); +extern void return_to_handler_2(void); +extern void return_to_handler_3(void); +extern void return_to_handler_4(void); +extern void return_to_handler_5(void); +extern void return_to_handler_6(void); +extern void return_to_handler_7(void); +extern void return_to_handler_8(void); +extern void return_to_handler_9(void); +extern void return_to_handler_10(void); +extern void return_to_handler_11(void); +extern void return_to_handler_12(void); +extern void return_to_handler_13(void); +extern void return_to_handler_14(void); +extern void return_to_handler_15(void); +extern void return_to_handler_16(void); +extern void return_to_handler_17(void); +extern void return_to_handler_18(void); +extern void return_to_handler_19(void); +extern void return_to_handler_20(void); +extern void return_to_handler_21(void); +extern void return_to_handler_22(void); +extern void return_to_handler_23(void); +extern void return_to_handler_24(void); +extern void return_to_handler_25(void); +extern void return_to_handler_26(void); +extern void return_to_handler_27(void); +extern void return_to_handler_28(void); +extern void return_to_handler_29(void); +extern void return_to_handler_30(void); +extern void return_to_handler_31(void); +extern void return_to_handler_32(void); +extern void return_to_handler_33(void); +extern void return_to_handler_34(void); +extern void return_to_handler_35(void); +extern void return_to_handler_36(void); +extern void return_to_handler_37(void); +extern void return_to_handler_38(void); +extern void return_to_handler_39(void); +extern void return_to_handler_40(void); +extern void return_to_handler_41(void); +extern void return_to_handler_42(void); +extern void return_to_handler_43(void); +extern void return_to_handler_44(void); +extern void return_to_handler_45(void); +extern void return_to_handler_46(void); +extern void return_to_handler_47(void); +extern void return_to_handler_48(void); +extern void return_to_handler_49(void); +extern void return_to_handler_50(void); +extern void return_to_handler_51(void); +extern void return_to_handler_52(void); +extern void return_to_handler_53(void); +extern void return_to_handler_54(void); +extern void return_to_handler_55(void); +extern void return_to_handler_56(void); +extern void return_to_handler_57(void); +extern void return_to_handler_58(void); +extern void return_to_handler_59(void); +extern void return_to_handler_60(void); +extern void return_to_handler_61(void); +extern void return_to_handler_62(void); +extern void return_to_handler_63(void); +extern void return_to_handler_64(void); +extern void return_to_handler_65(void); +extern void return_to_handler_66(void); +extern void return_to_handler_67(void); +extern void return_to_handler_68(void); +extern void return_to_handler_69(void); +extern void return_to_handler_70(void); +extern void return_to_handler_71(void); +extern void return_to_handler_72(void); +extern void return_to_handler_73(void); +extern void return_to_handler_74(void); +extern void return_to_handler_75(void); +extern void return_to_handler_76(void); +extern void return_to_handler_77(void); +extern void return_to_handler_78(void); +extern void return_to_handler_79(void); +extern void return_to_handler_80(void); +extern void return_to_handler_81(void); +extern void return_to_handler_82(void); +extern void return_to_handler_83(void); +extern void return_to_handler_84(void); +extern void return_to_handler_85(void); +extern void return_to_handler_86(void); +extern void return_to_handler_87(void); +extern void return_to_handler_88(void); +extern void return_to_handler_89(void); +extern void return_to_handler_90(void); +extern void return_to_handler_91(void); +extern void return_to_handler_92(void); +extern void return_to_handler_93(void); +extern void return_to_handler_94(void); +extern void return_to_handler_95(void); +extern void return_to_handler_96(void); +extern void return_to_handler_97(void); +extern void return_to_handler_98(void); +extern void return_to_handler_99(void); +extern void return_to_handler_100(void); +extern void return_to_handler_101(void); +extern void return_to_handler_102(void); +extern void return_to_handler_103(void); +extern void return_to_handler_104(void); +extern void return_to_handler_105(void); +extern void return_to_handler_106(void); +extern void return_to_handler_107(void); +extern void return_to_handler_108(void); +extern void return_to_handler_109(void); +extern void return_to_handler_110(void); +extern void return_to_handler_111(void); + +typedef void (*ftrace_return_handler_t)(void); + +static const ftrace_return_handler_t return_to_handlers_table[E2K_MAXSR] = { + &return_to_handler_0, &return_to_handler_1, &return_to_handler_2, + &return_to_handler_3, &return_to_handler_4, &return_to_handler_5, + &return_to_handler_6, &return_to_handler_7, &return_to_handler_8, + &return_to_handler_9, &return_to_handler_10, &return_to_handler_11, + &return_to_handler_12, &return_to_handler_13, &return_to_handler_14, + &return_to_handler_15, &return_to_handler_16, &return_to_handler_17, + &return_to_handler_18, &return_to_handler_19, &return_to_handler_20, + &return_to_handler_21, &return_to_handler_22, &return_to_handler_23, + &return_to_handler_24, &return_to_handler_25, &return_to_handler_26, + &return_to_handler_27, &return_to_handler_28, &return_to_handler_29, + &return_to_handler_30, &return_to_handler_31, &return_to_handler_32, + &return_to_handler_33, &return_to_handler_34, &return_to_handler_35, + &return_to_handler_36, &return_to_handler_37, &return_to_handler_38, + &return_to_handler_39, &return_to_handler_40, &return_to_handler_41, + &return_to_handler_42, &return_to_handler_43, &return_to_handler_44, + &return_to_handler_45, &return_to_handler_46, &return_to_handler_47, + &return_to_handler_48, &return_to_handler_49, &return_to_handler_50, + &return_to_handler_51, &return_to_handler_52, &return_to_handler_53, + &return_to_handler_54, &return_to_handler_55, &return_to_handler_56, + &return_to_handler_57, &return_to_handler_58, &return_to_handler_59, + &return_to_handler_60, &return_to_handler_61, &return_to_handler_62, + &return_to_handler_63, &return_to_handler_64, &return_to_handler_65, + &return_to_handler_66, &return_to_handler_67, &return_to_handler_68, + &return_to_handler_69, &return_to_handler_70, &return_to_handler_71, + &return_to_handler_72, &return_to_handler_73, &return_to_handler_74, + &return_to_handler_75, &return_to_handler_76, &return_to_handler_77, + &return_to_handler_78, &return_to_handler_79, &return_to_handler_80, + &return_to_handler_81, &return_to_handler_82, &return_to_handler_83, + &return_to_handler_84, &return_to_handler_85, &return_to_handler_86, + &return_to_handler_87, &return_to_handler_88, &return_to_handler_89, + &return_to_handler_90, &return_to_handler_91, &return_to_handler_92, + &return_to_handler_93, &return_to_handler_94, &return_to_handler_95, + &return_to_handler_96, &return_to_handler_97, &return_to_handler_98, + &return_to_handler_99, &return_to_handler_100, &return_to_handler_101, + &return_to_handler_102, &return_to_handler_103, &return_to_handler_104, + &return_to_handler_105, &return_to_handler_106, &return_to_handler_107, + &return_to_handler_108, &return_to_handler_109, &return_to_handler_110, + &return_to_handler_111 +}; + + +__noreturn void panic_ftrace_graph_cr(void) +{ + int i; + + if (current->ret_stack) { + for (i = 0; i <= current->curr_ret_stack; i++) + pr_emerg("%d: %pS -> %pS\n", i, + (void *) current->ret_stack[i].ret, + (void *) current->ret_stack[i].func); + } else { + pr_emerg("no ret_stack in return_to_handler\n"); + } + + panic("BUG in ftrace - returned to return_to_handler!\n"); +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + + +#ifdef CONFIG_DYNAMIC_FTRACE +int __init ftrace_dyn_arch_init(void) +{ + return 0; +} + +/* + * Since we modify code with one atomic store, there is no need for + * stop_machine() call: at any given point of time the code being + * modified is correct. + */ +void arch_ftrace_update_code(int command) +{ + ftrace_modify_all_code(command); + + /* Use IPI if icache flush works only on local cpu */ + if (!cpu_has(CPU_FEAT_FLUSH_DC_IC)) + flush_icache_all(); +} + +# define SS_CT_SHIFT 5 +# define SS_CT_MASK (0xf << SS_CT_SHIFT) + +#ifdef CONFIG_STACKTRACE +static unsigned long init_trace_data_enabled[100]; + +static struct stack_trace saved_trace_enabled = { + .nr_entries = 0, + .max_entries = ARRAY_SIZE(init_trace_data_enabled), + .entries = init_trace_data_enabled, + .skip = 0, +}; + +static unsigned long init_trace_data_disabled[100]; + +static struct stack_trace saved_trace_disabled = { + .nr_entries = 0, + .max_entries = ARRAY_SIZE(init_trace_data_disabled), + .entries = init_trace_data_disabled, + .skip = 0, +}; +#endif /* CONFIG_STACKTRACE */ + +static inline int e2k_modify_call(const unsigned long addr, + const unsigned long ip, + const unsigned long phys_ip, + const int enable) +{ + union { + struct { + instr_hs_t HS; + instr_ss_t SS; + }; + unsigned long instr_word; + } instruction; + +# if DEBUG_FTRACE_MODE + if (enable) + DebugFTRACE("Enabling _mcount at %lx (phys %lx)\n", + ip, phys_ip); + else + DebugFTRACE("Disabling _mcount at %lx (phys %lx)\n", + ip, phys_ip); +# endif + + if (addr != FTRACE_ADDR) { + pr_info("Passed addr is %lx instead of %lx\n", + addr, FTRACE_ADDR); + return -EINVAL; + } + + /* Read the header and stubs syllables. */ + instruction.instr_word = read_instr_on_IP(ip, phys_ip); + + /* Check that the stubs syllable is present. */ + if (!instruction.HS.s) { + pr_info("Instruction at %lx does not have stubs syllable!\n" + "Code: %llx\n", ip, *((u64 *) &instruction)); + return -EINVAL; + } + + /* Sanity check: test that the CS1 syllable + * (which contains actual call) is present. */ + if (!(instruction.HS.c & 2)) { + pr_info("Instruction at %lx does not have CS1 syllable!\n" + "Code: %llx\n", ip, *((u64 *) &instruction)); + return -EINVAL; + } + + if (enable) { + /* Check that the condition is not 1 already. */ + if (((instruction.SS.ctcond & SS_CT_MASK) >> SS_CT_SHIFT) == 1) { +#ifdef CONFIG_STACKTRACE + /* FIXME */ + extern struct stack_trace saved_trace_enabled; + printk(" stack_trace_print saved_trace_enabled\n"); + stack_trace_print(saved_trace_enabled.entries, + saved_trace_enabled.nr_entries, 0); +#endif /* CONFIG_STACKTRACE */ + printk("Enabling _mcount at %lx (phys %lx)\n", + ip, phys_ip); + WARN_ON(1); + return -EINVAL; + } + + /* Set the condition to 1. */ + instruction.SS.ctcond &= ~SS_CT_MASK; + instruction.SS.ctcond |= 1 << SS_CT_SHIFT; + } else { + /* Check that the condition is not 0 already. */ + if ((instruction.SS.ctcond & SS_CT_MASK) == 0) { +#ifdef CONFIG_STACKTRACE + /* FIXME */ + extern struct stack_trace saved_trace_enabled; + extern struct stack_trace saved_trace_disabled; + printk(" stack_trace_print saved_trace_disabled\n"); + stack_trace_print(saved_trace_disabled.entries, + saved_trace_disabled.nr_entries, 0); + printk(" stack_trace_print saved_trace_enabled\n"); + stack_trace_print(saved_trace_enabled.entries, + saved_trace_enabled.nr_entries, 0); +#endif /* CONFIG_STACKTRACE */ + printk("Disabling _mcount at %lx (phys %lx)\n", + ip, phys_ip); + WARN_ON(1); + return -EINVAL; + } + + /* Set the condition to 0. */ + instruction.SS.ctcond &= ~SS_CT_MASK; + } + + /* Write the modified syllable. */ + modify_instr_on_IP(ip, phys_ip, instruction.instr_word); + + /* Flush the instruction cache for the node being modified */ + if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) { + unsigned long flush_addr = (unsigned long) __va(phys_ip); + flush_icache_range(flush_addr, flush_addr + 8); + } + + return 0; +} + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip, phys_ip; + int node, ret = 0; + + for_each_node_has_dup_kernel(node) { + phys_ip = node_kernel_address_to_phys(node, ip); + if (phys_ip == -EINVAL) { + ret = -EFAULT; + WARN_ON_ONCE(1); + break; + } + + ret = e2k_modify_call(addr, ip, phys_ip, 0); + if (ret) + return ret; + + if (!THERE_IS_DUP_KERNEL) + break; + + /* Modules are not duplicated */ + if (!is_duplicated_code(ip)) + break; + } + + return ret; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip, phys_ip; + int node, ret = 0; + + for_each_node_has_dup_kernel(node) { + phys_ip = node_kernel_address_to_phys(node, ip); + if (phys_ip == -EINVAL) { + ret = -EFAULT; + WARN_ON_ONCE(1); + break; + } + + ret = e2k_modify_call(addr, ip, phys_ip, 1); + if (ret) + return ret; + + if (!THERE_IS_DUP_KERNEL) + break; + + /* Modules are not duplicated */ + if (!is_duplicated_code(ip)) + break; + } + + return ret; +} + +static ftrace_func_t current_tracer_function = &ftrace_stub; + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + current_tracer_function = func; + + return 0; +} + +# ifdef CONFIG_FUNCTION_GRAPH_TRACER +static int ftrace_graph_caller_enabled = 0; + +int ftrace_enable_ftrace_graph_caller(void) +{ + ftrace_graph_caller_enabled = 1; + + return 0; +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + ftrace_graph_caller_enabled = 0; + + return 0; +} +# endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +__kprobes +void _mcount(const e2k_cr0_hi_t frompc) +{ + unsigned long selfpc; + + selfpc = NATIVE_NV_READ_CR0_HI_REG_VALUE(); + + if (current_tracer_function != ftrace_stub) + current_tracer_function(selfpc, AW(frompc), + function_trace_op, NULL); + +# ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (ftrace_graph_caller_enabled) { + unsigned long flags; + e2k_mem_crs_t *frame; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_cr0_hi_t *parent; + u64 wbs; + int index; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + raw_all_irq_save(flags); + E2K_FLUSHC; + pcsp_hi = READ_PCSP_HI_REG(); + pcsp_lo = READ_PCSP_LO_REG(); + + /* Find frame of the function being traced. */ + frame = ((e2k_mem_crs_t *) (AS(pcsp_lo).base + + AS(pcsp_hi).ind)) - 1; + parent = &frame->cr0_hi; + + wbs = frame->cr1_lo.fields.wbs; + + if (unlikely(frame->cr1_lo.fields.wpsz > 4 || + wbs >= E2K_MAXSR)) { + static int once = 1; + /* return_to_hook() currently assumes that the + * parameters area is 8 registers long. This is + * in accordance with existing conventions. */ + if (once) { + once = 0; + pr_err("Bug in ftrace - psize(%d) is not 4 or wbs(%lld) is too big!\n", + frame->cr1_lo.fields.wpsz, wbs); + WARN_ON(1); + goto out; + } + } + + /* Leaf call optimization leads to having two calls on the same + * level and only one return. So we skip the second call. */ + index = current->curr_ret_stack; + if (current->ret_stack && index >= 0 && + index < FTRACE_RETFUNC_DEPTH && + current->ret_stack[index].fp == (u64) frame) + goto out; + + ASP(parent).ip = ((u64) return_to_handlers_table[wbs]) >> 3; + + if (unlikely(function_graph_enter(AW(frompc), selfpc, + (unsigned long)frame, + (unsigned long *)parent) == -EBUSY)) { + E2K_FLUSHC; + *parent = frompc; + } + +out: + raw_all_irq_restore(flags); + } +# endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +} +EXPORT_SYMBOL(_mcount); +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FTRACE_SYSCALLS +extern system_call_func sys_call_table[]; + +unsigned long __init arch_syscall_addr(int nr) +{ + return (unsigned long)sys_call_table[nr]; +} +#endif + diff --git a/arch/e2k/kernel/ftrace_graph_entry.S b/arch/e2k/kernel/ftrace_graph_entry.S new file mode 100644 index 000000000000..026b9a4c3b11 --- /dev/null +++ b/arch/e2k/kernel/ftrace_graph_entry.S @@ -0,0 +1,802 @@ +#include + +/* + * This function cannot be implemented in C because we don't know whether + * the original function that was replaced by this hook should return + * any value. If it returns value, then it is in dr0-dr7 and these + * registers should not be changed. There is no way to make compiler + * obey this in C, so the only solution is to use assembler (assembler + * is also better performance-wise). + * + * dr0-dr7 - original return value, should not be changed + * db[0] - used to load current->curr_ret_stack and compare it with 0 + * r8 - original value of upsr + * r9 - new value of upsr + * db[1] - stores base filed from pcsp.lo + * db[2] - stores cr_ind field from pcsp.hi + * db[3] - stores pcshtp + * + * (db[1], db[2] and db[3] are used this way only when + * CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is set) + */ + +/* This is what return_to_hook does: +u64 return_to_hook(u64 dr0, u64 dr1, u64 dr2, u64 dr3, + u64 dr4, u64 dr5, u64 dr6, u64 dr7) +{ + u64 original_return_point; + unsigned long flags; +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; + u64 cr_ind; + u64 base; +#endif + + if (current->curr_ret_stack < 0) + return; + + raw_all_irq_save(flags); +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + pcsp_lo = READ_PCSP_LO_REG(); + pcsp_hi = READ_PCSP_HI_REG(); + base = AS(pcsp_lo).base; + cr_ind = AS_STRUCT(pcsp_hi).ind; + pcshtp = READ_PCSHTP_REG_SVALUE(); + + original_return_point = ftrace_return_to_handler(base + + cr_ind + pcshtp); +#else + original_return_point = ftrace_return_to_handler(0); +#endif + E2K_SET_DSREG(cr0.hi, original_return_point); + raw_all_irq_restore(flags); +}*/ + +.global return_to_hook +.type return_to_hook,@function +return_to_hook: +{ + setwd wsz = 0x9, nfx = 0x1 + /* Allocate space for the first argument + * of ftrace_return_to_handler(). */ + getsp -16, %empty + setbn rsz = 0x3, rbs = 0x5, rcur = 0x0 +} +{ + rrs %upsr, %r8 + + disp %ctpr1, $ftrace_return_to_handler + + /* Avoid touching dr0-dr7, use r8, r9 and db[] instead */ + ldw [ %g17 + TSK_CURR_RET_STACK ], %b[0] +} +{ + nop 2 + + cmplsb %b[0], 0, %pred0 + + return %ctpr3 + ipd 0 +} +{ + ands %r8, _f16s,_lts0lo 0xff5f, %r9 + + ct %ctpr3 ? %pred0 +} +{ + nop 4 + rws %r9, %upsr +} +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + rrd %pcshtp, %db[3] + rrd %pcsp.lo, %db[1] +{ + rrd %pcsp.hi, %db[2] + shld %db[3], 53, %db[3] +} +{ + andd %db[1], 0x0000ffffffffffff, %db[1] + sard %db[3], 53, %db[3] +} +{ + addd %db[3], %db[1], %db[0] + getfd %db[2], _f16s,_lts0hi 0x6800, %db[2] +} +{ + /* There must be three commands between reading pcsp and a call */ + addd %db[0], %db[2], %db[0] +} +{ + call %ctpr1, wbs = 0x5 +} +#else + /* db[0] is initialized already and it's + * value in this case does not matter. */ + call %ctpr1, wbs = 0x5 +#endif + /* Order is important: first we change cr0.hi + * and then use it in return. */ + rwd %db[0], %cr0.hi +{ + nop 4 + ipd 2 + return %ctpr3 + rws %r8, %upsr +} + ct %ctpr3 +.size return_to_hook, .-return_to_hook +.global panic_ftrace_graph_cr +.global return_to_handler_0 +return_to_handler_0: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=0 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=0 +.global return_to_handler_1 +return_to_handler_1: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=1 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=1 +.global return_to_handler_2 +return_to_handler_2: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=2 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=2 +.global return_to_handler_3 +return_to_handler_3: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=3 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=3 +.global return_to_handler_4 +return_to_handler_4: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=4 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=4 +.global return_to_handler_5 +return_to_handler_5: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=5 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=5 +.global return_to_handler_6 +return_to_handler_6: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=6 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=6 +.global return_to_handler_7 +return_to_handler_7: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=7 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=7 +.global return_to_handler_8 +return_to_handler_8: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=8 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=8 +.global return_to_handler_9 +return_to_handler_9: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=9 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=9 +.global return_to_handler_10 +return_to_handler_10: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=10 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=10 +.global return_to_handler_11 +return_to_handler_11: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=11 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=11 +.global return_to_handler_12 +return_to_handler_12: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=12 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=12 +.global return_to_handler_13 +return_to_handler_13: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=13 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=13 +.global return_to_handler_14 +return_to_handler_14: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=14 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=14 +.global return_to_handler_15 +return_to_handler_15: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=15 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=15 +.global return_to_handler_16 +return_to_handler_16: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=16 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=16 +.global return_to_handler_17 +return_to_handler_17: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=17 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=17 +.global return_to_handler_18 +return_to_handler_18: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=18 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=18 +.global return_to_handler_19 +return_to_handler_19: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=19 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=19 +.global return_to_handler_20 +return_to_handler_20: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=20 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=20 +.global return_to_handler_21 +return_to_handler_21: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=21 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=21 +.global return_to_handler_22 +return_to_handler_22: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=22 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=22 +.global return_to_handler_23 +return_to_handler_23: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=23 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=23 +.global return_to_handler_24 +return_to_handler_24: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=24 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=24 +.global return_to_handler_25 +return_to_handler_25: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=25 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=25 +.global return_to_handler_26 +return_to_handler_26: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=26 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=26 +.global return_to_handler_27 +return_to_handler_27: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=27 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=27 +.global return_to_handler_28 +return_to_handler_28: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=28 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=28 +.global return_to_handler_29 +return_to_handler_29: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=29 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=29 +.global return_to_handler_30 +return_to_handler_30: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=30 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=30 +.global return_to_handler_31 +return_to_handler_31: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=31 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=31 +.global return_to_handler_32 +return_to_handler_32: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=32 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=32 +.global return_to_handler_33 +return_to_handler_33: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=33 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=33 +.global return_to_handler_34 +return_to_handler_34: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=34 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=34 +.global return_to_handler_35 +return_to_handler_35: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=35 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=35 +.global return_to_handler_36 +return_to_handler_36: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=36 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=36 +.global return_to_handler_37 +return_to_handler_37: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=37 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=37 +.global return_to_handler_38 +return_to_handler_38: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=38 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=38 +.global return_to_handler_39 +return_to_handler_39: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=39 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=39 +.global return_to_handler_40 +return_to_handler_40: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=40 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=40 +.global return_to_handler_41 +return_to_handler_41: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=41 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=41 +.global return_to_handler_42 +return_to_handler_42: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=42 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=42 +.global return_to_handler_43 +return_to_handler_43: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=43 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=43 +.global return_to_handler_44 +return_to_handler_44: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=44 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=44 +.global return_to_handler_45 +return_to_handler_45: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=45 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=45 +.global return_to_handler_46 +return_to_handler_46: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=46 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=46 +.global return_to_handler_47 +return_to_handler_47: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=47 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=47 +.global return_to_handler_48 +return_to_handler_48: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=48 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=48 +.global return_to_handler_49 +return_to_handler_49: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=49 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=49 +.global return_to_handler_50 +return_to_handler_50: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=50 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=50 +.global return_to_handler_51 +return_to_handler_51: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=51 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=51 +.global return_to_handler_52 +return_to_handler_52: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=52 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=52 +.global return_to_handler_53 +return_to_handler_53: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=53 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=53 +.global return_to_handler_54 +return_to_handler_54: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=54 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=54 +.global return_to_handler_55 +return_to_handler_55: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=55 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=55 +.global return_to_handler_56 +return_to_handler_56: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=56 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=56 +.global return_to_handler_57 +return_to_handler_57: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=57 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=57 +.global return_to_handler_58 +return_to_handler_58: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=58 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=58 +.global return_to_handler_59 +return_to_handler_59: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=59 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=59 +.global return_to_handler_60 +return_to_handler_60: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=60 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=60 +.global return_to_handler_61 +return_to_handler_61: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=61 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=61 +.global return_to_handler_62 +return_to_handler_62: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=62 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=62 +.global return_to_handler_63 +return_to_handler_63: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=63 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=63 +.global return_to_handler_64 +return_to_handler_64: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=64 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=64 +.global return_to_handler_65 +return_to_handler_65: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=65 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=65 +.global return_to_handler_66 +return_to_handler_66: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=66 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=66 +.global return_to_handler_67 +return_to_handler_67: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=67 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=67 +.global return_to_handler_68 +return_to_handler_68: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=68 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=68 +.global return_to_handler_69 +return_to_handler_69: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=69 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=69 +.global return_to_handler_70 +return_to_handler_70: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=70 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=70 +.global return_to_handler_71 +return_to_handler_71: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=71 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=71 +.global return_to_handler_72 +return_to_handler_72: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=72 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=72 +.global return_to_handler_73 +return_to_handler_73: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=73 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=73 +.global return_to_handler_74 +return_to_handler_74: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=74 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=74 +.global return_to_handler_75 +return_to_handler_75: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=75 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=75 +.global return_to_handler_76 +return_to_handler_76: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=76 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=76 +.global return_to_handler_77 +return_to_handler_77: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=77 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=77 +.global return_to_handler_78 +return_to_handler_78: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=78 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=78 +.global return_to_handler_79 +return_to_handler_79: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=79 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=79 +.global return_to_handler_80 +return_to_handler_80: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=80 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=80 +.global return_to_handler_81 +return_to_handler_81: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=81 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=81 +.global return_to_handler_82 +return_to_handler_82: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=82 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=82 +.global return_to_handler_83 +return_to_handler_83: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=83 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=83 +.global return_to_handler_84 +return_to_handler_84: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=84 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=84 +.global return_to_handler_85 +return_to_handler_85: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=85 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=85 +.global return_to_handler_86 +return_to_handler_86: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=86 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=86 +.global return_to_handler_87 +return_to_handler_87: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=87 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=87 +.global return_to_handler_88 +return_to_handler_88: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=88 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=88 +.global return_to_handler_89 +return_to_handler_89: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=89 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=89 +.global return_to_handler_90 +return_to_handler_90: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=90 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=90 +.global return_to_handler_91 +return_to_handler_91: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=91 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=91 +.global return_to_handler_92 +return_to_handler_92: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=92 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=92 +.global return_to_handler_93 +return_to_handler_93: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=93 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=93 +.global return_to_handler_94 +return_to_handler_94: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=94 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=94 +.global return_to_handler_95 +return_to_handler_95: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=95 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=95 +.global return_to_handler_96 +return_to_handler_96: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=96 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=96 +.global return_to_handler_97 +return_to_handler_97: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=97 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=97 +.global return_to_handler_98 +return_to_handler_98: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=98 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=98 +.global return_to_handler_99 +return_to_handler_99: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=99 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=99 +.global return_to_handler_100 +return_to_handler_100: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=100 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=100 +.global return_to_handler_101 +return_to_handler_101: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=101 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=101 +.global return_to_handler_102 +return_to_handler_102: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=102 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=102 +.global return_to_handler_103 +return_to_handler_103: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=103 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=103 +.global return_to_handler_104 +return_to_handler_104: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=104 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=104 +.global return_to_handler_105 +return_to_handler_105: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=105 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=105 +.global return_to_handler_106 +return_to_handler_106: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=106 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=106 +.global return_to_handler_107 +return_to_handler_107: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=107 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=107 +.global return_to_handler_108 +return_to_handler_108: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=108 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=108 +.global return_to_handler_109 +return_to_handler_109: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=109 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=109 +.global return_to_handler_110 +return_to_handler_110: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=110 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=110 +.global return_to_handler_111 +return_to_handler_111: + disp %ctpr1, return_to_hook + call %ctpr1, wbs=111 + disp %ctpr1, panic_ftrace_graph_cr + call %ctpr1, wbs=111 diff --git a/arch/e2k/kernel/getsp.c b/arch/e2k/kernel/getsp.c new file mode 100644 index 000000000000..c5006d4408d8 --- /dev/null +++ b/arch/e2k/kernel/getsp.c @@ -0,0 +1,292 @@ +/* + * arch/e2k/kernel/getsp.c + * + * GETSP operation parser + * + * Copyright 2017 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + + +#undef DEBUG_US_EXPAND +#undef DebugUS +#define DEBUG_US_EXPAND 0 /* User stacks */ +#define DebugUS(...) DebugPrint(DEBUG_US_EXPAND ,##__VA_ARGS__) + + +static int parse_getsp_literal_operand(e2k_addr_t trap_ip, instr_hs_t hs, + instr_als_t als0, int *incr, void __user **fault_addr) +{ + instr_syl_t *lts; + int lts_num, lts_mask = 0, lts_shift = 0; + bool lts_sign_ext = false; + + lts_num = als0.alf2.src2 & INSTR_SRC2_LTS_NUM_MASK; + + if ((als0.alf2.src2 & INSTR_SRC2_BIT_MASK) == INSTR_SRC2_16BIT_VALUE) { + WARN_ON_ONCE(lts_num > 1); + if (als0.alf2.src2 & INSTR_SRC2_LTS_SHIFT_MASK) { + lts_shift = INSTR_LTS_16BIT_SHIFT; + lts_mask = INSTR_LTS_16BIT_SHIFT_MASK; + lts_sign_ext = false; + } else { + lts_shift = INSTR_LTS_16BIT_NOSHIFT; + lts_mask = INSTR_LTS_16BIT_NOSHIFT_MASK; + lts_sign_ext = true; + } + } else if ((als0.alf2.src2 & INSTR_SRC2_BIT_MASK) == + INSTR_SRC2_32BIT_VALUE) { + lts_mask = INSTR_LTS_32BIT_MASK; + lts_shift = INSTR_LTS_32BIT_SHIFT; + lts_sign_ext = false; + } else { + DebugUS("not known literal operand\n"); + return -EINVAL; + } + + lts = &E2K_GET_INSTR_SYL(trap_ip, (hs.lng + 1) * 2 - hs.pl - hs.cd - lts_num - 1); + if (get_user(*incr, lts)) { + *fault_addr = lts; + return -EFAULT; + } + + DebugUS("LTS%d=0x%x lng=%d pl=%d cd=%d lts_shift=0x%x lts_mask=0x%x\n", + lts_num, *incr, hs.lng, hs.pl, hs.cd, + lts_shift, lts_mask); + + *incr = ((s32) ((*incr) & lts_mask)) >> lts_shift; + if (lts_sign_ext) + *incr = (((s32) *incr) << INSTR_LTS_16BIT_SHIFT) >> + INSTR_LTS_16BIT_SHIFT; + + return 0; +} + +static int get_getsp_greg(int greg_num, int *greg) +{ + struct thread_info* ti = current_thread_info(); + register u32 gr; + u32 tag; + + switch (greg_num) { + case 16: + E2K_LOAD_VAL_AND_TAG(&ti->k_gregs.g[0].base, gr, tag); + break; + case 17: + E2K_LOAD_VAL_AND_TAG(&ti->k_gregs.g[1].base, gr, tag); + break; + case 18: + E2K_LOAD_VAL_AND_TAG(&ti->k_gregs.g[2].base, gr, tag); + break; + case 19: + E2K_LOAD_VAL_AND_TAG(&ti->k_gregs.g[3].base, gr, tag); + break; + case 20: + E2K_GET_GREG_VAL_AND_TAG(20, gr, tag); + break; + case 21: + E2K_GET_GREG_VAL_AND_TAG(21, gr, tag); + break; + case 22: + E2K_GET_GREG_VAL_AND_TAG(22, gr, tag); + break; + case 23: + E2K_GET_GREG_VAL_AND_TAG(23, gr, tag); + break; + case 24: + E2K_GET_GREG_VAL_AND_TAG(24, gr, tag); + break; + case 25: + E2K_GET_GREG_VAL_AND_TAG(25, gr, tag); + break; + case 26: + E2K_GET_GREG_VAL_AND_TAG(26, gr, tag); + break; + case 27: + E2K_GET_GREG_VAL_AND_TAG(27 , gr, tag); + break; + case 28: + E2K_GET_GREG_VAL_AND_TAG(28, gr, tag); + break; + case 29: + E2K_GET_GREG_VAL_AND_TAG(29, gr, tag); + break; + case 30: + E2K_GET_GREG_VAL_AND_TAG(30, gr, tag); + break; + case 31: + E2K_GET_GREG_VAL_AND_TAG(31, gr, tag); + break; + default: + DebugUS("Invalid greg_num %d\n", greg_num); + return -ESRCH; + } + + if (tag) { + DebugUS("Invalid tag 0x%x for greg num %d with greg val %u\n", + tag, greg_num, gr); + return -EINVAL; + } + + *greg = gr; + + return 0; +} + +static int parse_getsp_greg_operand(instr_als_t als0, int *incr) +{ + int greg_num; + e2k_bgr_t bgr, oldbgr; + unsigned long flags; + int ret; + + greg_num = als0.alf2.src2 & INSTR_SRC2_GREG_NUM_MASK; + + raw_local_irq_save(flags); + + bgr = READ_BGR_REG(); + oldbgr = bgr; + bgr.BGR_val = E2K_INITIAL_BGR_VAL; + WRITE_BGR_REG_VALUE(AW(bgr)); + + if ((ret = get_getsp_greg(greg_num, incr))) + DebugUS("greg num %d, greg val 0x%x\n", greg_num, *incr); + + WRITE_BGR_REG_VALUE(AW(oldbgr)); + + raw_local_irq_restore(flags); + + return ret; +} + +static int parse_getsp_reg_operand(instr_src_t src2, const struct pt_regs *regs, + int *incr, void __user **fault_addr) +{ + unsigned long ps_top = AS(regs->stacks.psp_lo).base + AS(regs->stacks.psp_hi).ind; + unsigned long u_ps_top = ps_top - GET_PSHTP_MEM_INDEX(regs->stacks.pshtp); + int ind_d, offset_d, ret; + unsigned long raddr, flags; + + if (!src2.rt7) { + /* Instruction set 6.3.1.1 */ + e2k_br_t br = { .word = AS(regs->crs.cr1_hi).br }; + int rnum_d = AW(src2) & ~0x80; + ind_d = 2 * br.rbs + (2 * br.rcur + rnum_d) % br_rsz_full_d(br); + } else { + /* Instruction set 6.3.1.2 */ + ind_d = AW(src2) & ~0xc0; + } + + offset_d = 2 * AS(regs->crs.cr1_lo).wbs - ind_d; + raddr = ps_top - ((offset_d + 1) / 2) * 32; + if (offset_d % 2) + raddr += ((machine.native_iset_ver < E2K_ISET_V5) ? 8 : 16); + if (raddr < PAGE_OFFSET && raddr >= u_ps_top) + raddr += AS(current_thread_info()->k_psp_lo).base - u_ps_top; + + raw_all_irq_save(flags); + COPY_STACKS_TO_MEMORY(); + raw_all_irq_restore(flags); + ret = __get_user(*incr, (int __user *) raddr); + if (ret) + *fault_addr = (int __user *) raddr; + return ret; +} + +enum getsp_action parse_getsp_operation(const struct pt_regs *regs, int *incr, + void __user **fault_addr) +{ + instr_syl_t __user *syl; + instr_hs_t hs; + instr_als_t als0 = { .word = 0 }; + instr_ales_t ales0 = { .word = 0 }; + instr_src_t src2; + e2k_tir_hi_t tir_hi = { .word = regs->trap->TIR_hi }; + e2k_tir_lo_t tir_lo = { .word = regs->trap->TIR_lo }; + unsigned long trap_ip = tir_lo.TIR_lo_ip; + int ret; + + *incr = USER_C_STACK_BYTE_INCR; + + DebugUS("started for IP 0x%lx, TIR_hi_al 0x%x\n", trap_ip, tir_hi.TIR_hi_al); + if (!(tir_hi.TIR_hi_al & ALS0_mask)) { + DebugUS("exception is not for ALS0\n"); + return GETSP_OP_FAIL; + } + + syl = &E2K_GET_INSTR_HS(trap_ip); + if (get_user(AW(hs), syl)) { + *fault_addr = syl; + return GETSP_OP_SIGSEGV; + } + if (!hs.al0) { + DebugUS("missing ALS0 Syllable: 0x%08x\n", AW(hs)); + return GETSP_OP_FAIL; + } + + syl = &E2K_GET_INSTR_ALS0(trap_ip, hs.s); + if (get_user(AW(als0), syl)) { + *fault_addr = syl; + return GETSP_OP_SIGSEGV; + } + DebugUS("ALS0 syllable 0x%08x get from addr 0x%px\n", AW(als0), syl); + + if (als0.alf2.cop != GETSP_ALS_COP && als0.alf2.cop != DRTOAP_ALS_COP || + als0.alf2.cop == GETSP_ALS_COP && !hs.ale0 || + als0.alf2.opce != USD_ALS_OPCE) { + DebugUS("ALS0 0x%x is neither GETSP nor GETSAP\n", AW(als0)); + return GETSP_OP_FAIL; + } + if (als0.alf2.opc == GETSP_ALS_COP) { + instr_semisyl_t __user *half_syl = &E2K_GET_INSTR_ALES0(trap_ip, hs.mdl); + if (get_user(AW(ales0), half_syl)) { + *fault_addr = half_syl; + return GETSP_OP_SIGSEGV; + } + DebugUS("ALES0 syllable 0x%04x get from addr 0x%px\n", AW(ales0), half_syl); + if (ales0.alef2.opc2 != EXT_ALES_OPC2) { + DebugUS("ALES0 opcode #2 0x%02x is not EXT, so it is not GETSP\n", + ales0.alef2.opc2); + return GETSP_OP_FAIL; + } + } + + AW(src2) = als0.alf2.src2; + if ((als0.alf2.src2 & INSTR_SRC2_BIT_MASK) == INSTR_SRC2_16BIT_VALUE || + (als0.alf2.src2 & INSTR_SRC2_BIT_MASK) == INSTR_SRC2_32BIT_VALUE) { + ret = parse_getsp_literal_operand(trap_ip, hs, als0, incr, fault_addr); + } else if ((als0.alf2.src2 & INSTR_SRC2_GREG_MASK) == INSTR_SRC2_GREG_VALUE) { + ret = parse_getsp_greg_operand(als0, incr); + } else if (!src2.rt7 || src2.rt7 && !src2.rt6) { + ret = parse_getsp_reg_operand(src2, regs, incr, fault_addr); + } else { + ret = -EINVAL; + } + if (ret == -EFAULT) { + return GETSP_OP_SIGSEGV; + } else if (ret) { + DebugUS("Parsing getsp operation at 0x%lx (HS 0x%x, ALS0 0x%x, ALES0 0x%hx) failed with %d", + trap_ip, AW(hs), AW(als0), AW(ales0), ret); + return GETSP_OP_FAIL; + } + + if (*incr < 0) { + *incr = round_up(-(*incr), PAGE_SIZE); + *incr = max(USER_C_STACK_BYTE_INCR, (unsigned long)*incr); + DebugUS("expand on %d bytes detected\n", *incr); + return GETSP_OP_INCREMENT; + } + + DebugUS("constrict on %d bytes detected\n", *incr); + return GETSP_OP_DECREMENT; +} + diff --git a/arch/e2k/kernel/hotplug-cpu.c b/arch/e2k/kernel/hotplug-cpu.c new file mode 100644 index 000000000000..75a9d744f7e2 --- /dev/null +++ b/arch/e2k/kernel/hotplug-cpu.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +void arch_cpu_idle_dead(void) +{ + unsigned int cpu = raw_smp_processor_id(); + unsigned int cpuid = hard_smp_processor_id(); + + /* Make sure idle task is using init_mm */ + idle_task_exit(); + + /* Tell __cpu_die() that this CPU is now safe to dispose of */ + (void)cpu_report_death(); + + /* Unplug cpu and wait for a plug */ + wait_for_startup(cpuid, true); + WARN_ON_ONCE(!physid_isset(cpuid, phys_cpu_present_map)); + + /* If we return, we re-enter start_secondary */ + start_secondary_resume(cpuid, cpu); +} + +/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ +static void fixup_irqs(void) +{ + irq_migrate_all_off_this_cpu(); + fixup_irqs_pic(); +} + +/* + * __cpu_disable runs on the processor to be shutdown. + */ +int __cpu_disable(void) +{ + lock_vector_lock(); + set_cpu_online(raw_smp_processor_id(), false); + unlock_vector_lock(); + + fixup_irqs(); + + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + if (!cpu_wait_death(cpu, 5)) { + pr_err("CPU %u didn't die...\n", cpu); + return; + } + + pr_info("CPU %u is now offline\n", cpu); +} diff --git a/arch/e2k/kernel/hw_breakpoint.c b/arch/e2k/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..3438a9c2d9a3 --- /dev/null +++ b/arch/e2k/kernel/hw_breakpoint.c @@ -0,0 +1,436 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + + +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) +{ + if (cpu_has(CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG) && + attr->bp_type != HW_BREAKPOINT_X) { + if (attr->bp_addr >= 0 && attr->bp_addr <= 30) + return -EINVAL; + } + + /* Type */ + switch (attr->bp_type) { + case HW_BREAKPOINT_W: + case HW_BREAKPOINT_R: + case HW_BREAKPOINT_W | HW_BREAKPOINT_R: + break; + case HW_BREAKPOINT_X: + /* + * We don't allow kernel breakpoints in places that are not + * acceptable for kprobes. On non-kprobes kernels, we don't + * allow kernel breakpoints at all. + */ + if (attr->bp_addr >= TASK_SIZE) { +#ifdef CONFIG_KPROBES + if (within_kprobe_blacklist(attr->bp_addr)) + return -EINVAL; +#else + return -EINVAL; +#endif + } + break; + default: + return -EINVAL; + } + + switch (attr->bp_len) { + case HW_BREAKPOINT_LEN_1: + case HW_BREAKPOINT_LEN_2: + case HW_BREAKPOINT_LEN_4: + case HW_BREAKPOINT_LEN_8: + case HW_BREAKPOINT_LEN_16: + break; + default: + return -EINVAL; + } + + if (attr->bp_addr & (attr->bp_len - 1)) + return -EINVAL; + + if (attr->bp_addr > E2K_VA_MASK) + return -EINVAL; + + hw->address = attr->bp_addr; + hw->type = attr->bp_type; + hw->len = attr->bp_len; + hw->ss = 0; + + return 0; +} + +/* + * Check for virtual address in kernel space. + */ +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) +{ + unsigned long max_addr = user_addr_max(); + + /* + * We don't need to worry about (addr + len - 1) overflowing: + * we already require that va is aligned to a multiple of len. + */ + return hw->address >= max_addr || hw->address + hw->len - 1 >= max_addr; +} + + +/* + * Stores the breakpoints currently in use on each breakpoint address + * register for each cpus + */ +static DEFINE_PER_CPU(struct perf_event *, bp_instr_slot[HBP_NUM]); +static DEFINE_PER_CPU(struct perf_event *, bp_data_slot[HBP_NUM]); + +DEFINE_PER_CPU(u8, perf_bps_used); + +static inline struct perf_event **get_bp_slot_ptr(int bp_num, int is_data_bp) +{ + struct perf_event **slot; + + if (is_data_bp) + slot = this_cpu_ptr(&bp_data_slot[bp_num]); + else + slot = this_cpu_ptr(&bp_instr_slot[bp_num]); + + return slot; +} + +static inline u32 get_bp_mask(int is_data_bp, int bp_num) +{ + return 1 << (4 * !!is_data_bp + bp_num); +} + +static inline int __arch_install_hw_breakpoint(int bp_num, int is_data_bp, + struct arch_hw_breakpoint *info) +{ + unsigned long flags; + int ret; + + raw_all_irq_save(flags); + if (is_data_bp) + ret = set_hardware_data_breakpoint(info->address, info->len, + !!(info->type & HW_BREAKPOINT_W), + !!(info->type & HW_BREAKPOINT_R), + 0, bp_num, 1); + else + ret = set_hardware_instr_breakpoint(info->address, + 0, bp_num, 1); + + if (!ret) + __this_cpu_or(perf_bps_used, get_bp_mask(is_data_bp, bp_num)); + raw_all_irq_restore(flags); + + return ret; +} + +/* + * Install a perf counter breakpoint. + * + * We seek a free debug address register and use it for this + * breakpoint. Eventually we enable it in the debug control register. + * + * Atomic: we hold the counter->ctx->lock and we only handle variables + * and registers local to this cpu. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + int i, is_data_bp; + + is_data_bp = info->type & (HW_BREAKPOINT_R|HW_BREAKPOINT_W); + + for (i = 0; i < HBP_NUM; i++) { + struct perf_event **slot = get_bp_slot_ptr(i, is_data_bp); + + if (!*slot) { + *slot = bp; + break; + } + } + + if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) + return -EBUSY; + + return __arch_install_hw_breakpoint(i, is_data_bp, info); +} + + +static inline void __arch_uninstall_hw_breakpoint(int bp_num, int is_data_bp) +{ + unsigned long flags; + int ret; + + raw_all_irq_save(flags); + if (is_data_bp) + ret = set_hardware_data_breakpoint(0, 1, 0, 0, 0, bp_num, 0); + else + ret = set_hardware_instr_breakpoint(0, 0, bp_num, 0); + + if (!ret) + __this_cpu_and(perf_bps_used, ~get_bp_mask(is_data_bp, bp_num)); + raw_all_irq_restore(flags); +} + +/* + * Uninstall the breakpoint contained in the given counter. + * + * First we search the debug address register it uses and then we disable + * it. + */ +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + int i, is_data_bp; + + is_data_bp = info->type & (HW_BREAKPOINT_R|HW_BREAKPOINT_W); + + for (i = 0; i < HBP_NUM; i++) { + struct perf_event **slot = get_bp_slot_ptr(i, is_data_bp); + + if (*slot == bp) { + *slot = NULL; + break; + } + } + + if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) + return; + + __arch_uninstall_hw_breakpoint(i, is_data_bp); +} + + +void bp_data_overflow_handle(struct pt_regs *regs) +{ + e2k_ddbsr_t ddbsr; + int bp_num, handled = 0; + + ddbsr = READ_DDBSR_REG(); + + for (bp_num = 0; bp_num < HBP_NUM; bp_num++) { + struct perf_event *bp; + + if ((AW(ddbsr) & E2K_DDBSR_MASK(bp_num)) == 0) + continue; + + /* + * The counter may be concurrently released but that can only + * occur from a call_rcu() path. We can then safely fetch + * the breakpoint, use its callback, touch its counter + * while we are in an rcu_read_lock() path. + */ + rcu_read_lock(); + + bp = *get_bp_slot_ptr(bp_num, 1); + if (!bp) { + rcu_read_unlock(); + continue; + } + + ++handled; + + perf_bp_event(bp, regs); + + AW(ddbsr) &= ~E2K_DDBSR_MASK(bp_num); + + rcu_read_unlock(); + } + + if (handled) + WRITE_DDBSR_REG(ddbsr); +} + +static int __set_single_step_breakpoint(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + u64 target_frame = (u64) arg; + e2k_cr1_lo_t cr1_lo = frame->cr1_lo; + + if (target_frame != corrected_frame_addr) + return 0; + + AS(cr1_lo).ss = 1; + + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + + /* Set exc_instr_debug exception to fire on return */ + if (put_cr1_lo(cr1_lo, real_frame_addr, 0)) + return -EFAULT; + + return 1; +} + +static void set_single_step_breakpoint(struct pt_regs *regs) +{ + u64 target_frame; + long ret; + + target_frame = AS(regs->stacks.pcsp_lo).base + + AS(regs->stacks.pcsp_hi).ind; + ret = parse_chain_stack((user_mode(regs)) ? PCS_USER : 0, NULL, + __set_single_step_breakpoint, (void *) target_frame); + if (ret == 0) + ret = -ESRCH; + + if (IS_ERR_VALUE(ret)) { + pr_info("Could not set single step breakpoint in current chain stack, PCSP: 0x%llx 0x%llx\n", + AW(regs->stacks.pcsp_lo), AW(regs->stacks.pcsp_hi)); + force_sig(SIGKILL); + } +} + +void bp_instr_overflow_handle(struct pt_regs *regs) +{ + e2k_dibsr_t dibsr; + int bp_num, handled = 0, set_singlestep = 0; + + dibsr = READ_DIBSR_REG(); + + /* + * Re-arm handled breakpoints if needed + */ + if (dibsr.ss) { + if (test_ts_flag(TS_SINGLESTEP_USER)) { + /* User set this singlestep, rearm it since on + * e2k 'ss' bit is cleared by hardware after + * delivering interrupt. */ + set_singlestep = 1; + dibsr.ss = 0; + ++handled; + /* If user does a system call then exc_instr_debug will + * arrive on the first instruction of kernel entry, + * do not send signal in this case since gdb is not + * expecting a kernel IP */ + if (call_from_user(regs)) + S_SIG(regs, SIGTRAP, exc_instr_debug_num, + TRAP_HWBKPT); + } + + for (bp_num = 0; bp_num < HBP_NUM; bp_num++) { + struct arch_hw_breakpoint *info; + struct perf_event *bp; + + /* + * The counter may be concurrently released but + * that can only occur from a call_rcu() path. + */ + rcu_read_lock(); + bp = *get_bp_slot_ptr(bp_num, 0); + if (!bp) { + rcu_read_unlock(); + continue; + } + + info = counter_arch_bp(bp); + if (info->ss) { + __arch_install_hw_breakpoint(bp_num, 1, info); + info->ss = 0; + + ++handled; + dibsr.ss = 0; + } + rcu_read_unlock(); + } + } + + for (bp_num = 0; bp_num < HBP_NUM; bp_num++) { + struct arch_hw_breakpoint *info; + struct perf_event *bp; + + if ((AW(dibsr) & E2K_DIBSR_MASK(bp_num)) == 0) + continue; + + /* + * The counter may be concurrently released but that can only + * occur from a call_rcu() path. We can then safely fetch + * the breakpoint, use its callback, touch its counter + * while we are in an rcu_read_lock() path. + */ + rcu_read_lock(); + + bp = *get_bp_slot_ptr(bp_num, 0); + if (!bp) { + rcu_read_unlock(); + continue; + } + + ++handled; + + perf_bp_event(bp, regs); + + info = counter_arch_bp(bp); + __arch_uninstall_hw_breakpoint(bp_num, 0); + info->ss = 1; + set_singlestep = 1; + + AW(dibsr) &= ~E2K_DIBSR_MASK(bp_num); + + rcu_read_unlock(); + } + + if (handled) + WRITE_DIBSR_REG(dibsr); + + /* + * Set "single step" breakpoint - we cannot just return because + * instruction breakpoint generates a _synchronous_ exception. + */ + if (set_singlestep) + set_single_step_breakpoint(regs); +} + +int hw_breakpoint_exceptions_notify( + struct notifier_block *unused, unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ +} + +/* + * Unregister breakpoints from this task and reset the pointers in + * the thread_struct. + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + struct thread_struct *thread = &tsk->thread; + int i; + + for (i = 0; i < HBP_NUM; i++) { + if (thread->debug.hbp_data[i]) { + unregister_hw_breakpoint(thread->debug.hbp_data[i]); + thread->debug.hbp_data[i] = NULL; + } + if (thread->debug.hbp_instr[i]) { + unregister_hw_breakpoint(thread->debug.hbp_instr[i]); + thread->debug.hbp_instr[i] = NULL; + } + } +} + +/* + * Set ptrace breakpoint pointers to zero for this task. + * This is required in order to prevent child processes from unregistering + * breakpoints held by their parent. + */ +void clear_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + struct thread_struct *thread = &tsk->thread; + + memset(thread->debug.hbp_data, 0, sizeof(thread->debug.hbp_data)); + memset(thread->debug.hbp_instr, 0, sizeof(thread->debug.hbp_instr)); +} diff --git a/arch/e2k/kernel/io.c b/arch/e2k/kernel/io.c new file mode 100644 index 000000000000..bda5bc2ae53e --- /dev/null +++ b/arch/e2k/kernel/io.c @@ -0,0 +1,198 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#undef DEBUG_CIO_MODE +#undef DebugCIO +#define DEBUG_CIO_MODE 0 /* configuration space */ + /* input/output functions */ +#define DebugCIO(...) DebugPrint(DEBUG_CIO_MODE, ##__VA_ARGS__) + +asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on) +{ + return 0; +} + +/* + * E2C+/E2C/E2S/E8C/E1C+ configuration area access + */ + +static inline unsigned long do_get_domain_pci_conf_base(unsigned int domain) +{ + unsigned long conf_base; + + if (!HAS_MACHINE_L_SIC) { + pr_err("%s(): machine has not NBSR to calculate " + "PCI CFG base\n", + __func__); + return -1; + } + if (!iohub_online(domain)) { + pr_err("%s(): IOHUB domain # %d (node %d, link %d) " + "is not online\n", + __func__, domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain)); + return -1; + } + conf_base = domain_pci_conf_base(domain); + if (conf_base == 0) { + pr_err("%s(): IOHUB domain # %d (node %d, link %d) " + "PCI CFG base did not set\n", + __func__, domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain)); + return -1; + } + return conf_base; +} + +static inline unsigned long do_get_domain_pci_conf_size(unsigned int domain) +{ + unsigned long conf_size; + + if (!HAS_MACHINE_L_SIC) { + pr_err("%s(): machine has not NBSR to calculate " + "PCI CFG base\n", + __func__); + return -1; + } + if (!iohub_online(domain)) { + pr_err("%s(): IOHUB domain # %d (node %d, link %d) " + "is not online\n", + __func__, domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain)); + return -1; + } + conf_size = domain_pci_conf_size(domain); + if (conf_size == 0) { + pr_err("%s(): IOHUB domain # %d (node %d, link %d) " + "PCI CFG base did not set\n", + __func__, domain, iohub_domain_to_node(domain), + iohub_domain_to_link(domain)); + return -1; + } + return conf_size; +} + +unsigned long get_domain_pci_conf_base(unsigned int domain) +{ + return do_get_domain_pci_conf_base(domain); +} + +unsigned long get_domain_pci_conf_size(unsigned int domain) +{ + return do_get_domain_pci_conf_size(domain); +} + +void +native_conf_inb(unsigned int domain, unsigned int bus, unsigned long port, + u8 *byte) +{ + unsigned long conf_base; + unsigned long conf_port; + + conf_base = do_get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + *byte = NATIVE_READ_MAS_B(conf_port, MAS_IOADDR); + __io_par(); + DebugCIO("value %x read from port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) *byte, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); +} + +void +native_conf_inw(unsigned int domain, unsigned int bus, unsigned long port, + u16 *hword) +{ + unsigned long conf_base; + unsigned long conf_port; + + conf_base = do_get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + *hword = NATIVE_READ_MAS_H(conf_port, MAS_IOADDR); + __io_par(); + DebugCIO("value %x read from port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) *hword, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); +} + +void +native_conf_inl(unsigned int domain, unsigned int bus, unsigned long port, + u32 *word) +{ + unsigned long conf_base; + unsigned long conf_port; + + conf_base = do_get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + *word = NATIVE_READ_MAS_W(conf_port, MAS_IOADDR); + __io_par(); + DebugCIO("value %x read from port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) *word, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); +} + +void +native_conf_outb(unsigned int domain, unsigned int bus, unsigned long port, + u8 byte) +{ + unsigned long conf_base; + unsigned long conf_port; + + conf_base = do_get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + DebugCIO("value %x write to port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) byte, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); + __io_pbw(); + NATIVE_WRITE_MAS_B(conf_port, byte, MAS_IOADDR); + __io_paw(); +} + +void +native_conf_outw(unsigned int domain, unsigned int bus, unsigned long port, + u16 hword) +{ + unsigned long conf_base; + unsigned long conf_port; + + conf_base = do_get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + DebugCIO("value %x write to port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) hword, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); + __io_pbw(); + NATIVE_WRITE_MAS_H(conf_port, hword, MAS_IOADDR); + __io_paw(); +} + +void +native_conf_outl(unsigned int domain, unsigned int bus, unsigned long port, + u32 word) +{ + unsigned long conf_base; + unsigned long conf_port; + + conf_base = do_get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + DebugCIO("value %x write to port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) word, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); + __io_pbw(); + NATIVE_WRITE_MAS_W(conf_port, word, MAS_IOADDR); + __io_paw(); +} diff --git a/arch/e2k/kernel/ioctl32.c b/arch/e2k/kernel/ioctl32.c new file mode 100644 index 000000000000..2c6455b90269 --- /dev/null +++ b/arch/e2k/kernel/ioctl32.c @@ -0,0 +1,30 @@ +/* + * linux/arch/e2k/kernel/ioctl32.c + * Conversion between 32bit and 64bit native e2k ioctls. + */ + +#include +#include + +#define INCLUDES +#include "compat_ioctl.c" + +#define CODE +#include "compat_ioctl.c" + +typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, + struct file *); +#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl) +#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL }, +#define IOCTL_TABLE_START \ + struct ioctl_trans ioctl_start[] = { +#define IOCTL_TABLE_END \ + }; + +IOCTL_TABLE_START +#define DECLARES +#include "compat_ioctl.c" +#include +IOCTL_TABLE_END + +int ioctl_table_size = ARRAY_SIZE(ioctl_start); diff --git a/arch/e2k/kernel/kexec.c b/arch/e2k/kernel/kexec.c new file mode 100644 index 000000000000..2159465e97a1 --- /dev/null +++ b/arch/e2k/kernel/kexec.c @@ -0,0 +1,1038 @@ +/* + * drivers/mcst/kexec.c + * + * Elbrus kexec pseudo driver. + * + * Copyright (C) 2015-2020 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define IMAGE_KERNEL_CODE_OFFSET 0x10000 +#define IMAGE_BOOTBLOCK_OFFSET 0x100 + +#define IMAGE_LINTEL_ENTRY_OFFSET 0x800 + +#define KEXEC_CHUNKS_COUNT_MAX 16 + +#define __switch_to_phys__ __attribute__((__section__(".switch_to_phys"))) + +#undef DEBUG_KEXEC_MODE +#undef DebugKE +#define DEBUG_KEXEC_MODE 0 +#define DebugBootKE if (DEBUG_KEXEC_MODE) do_boot_printk +#define DebugKE(fmt, ...) \ + if (DEBUG_KEXEC_MODE) \ + pr_err("%d %d %s: " fmt, raw_smp_processor_id(), \ + current->pid, __func__, ##__VA_ARGS__) + +struct smp_kexec_reboot_param; +typedef void (*kexec_reboot_func_ptr)(struct smp_kexec_reboot_param *); + + +struct kexec_mem_chunk { + void *start; + int size; +}; + +struct kexec_mem_ptr { + struct kexec_mem_chunk chunks[KEXEC_CHUNKS_COUNT_MAX]; + int chunks_count; + u64 size; + u64 valid_size; + u64 phys_addr; +}; + +struct smp_kexec_reboot_param { + struct bootblock_struct *bootblock; + struct kexec_mem_ptr *image; + struct kexec_mem_ptr *initrd; + kexec_reboot_func_ptr reboot; +}; + +static DEFINE_MUTEX(kexec_mutex); + + +/* + * kexec memory block + */ + +static void free_kexec_mem(struct kexec_mem_ptr *mem) +{ + struct kexec_mem_chunk *chunk; + int i; + + if (!mem->size) + return; + + for (i = 0, chunk = mem->chunks; i < mem->chunks_count; i++, chunk++) { + DebugKE("free memory from 0x%llx of 0x%x bytes\n", + chunk->start, chunk->size); + free_pages_exact(chunk->start, chunk->size); + + if (memblock_free(virt_to_phys(chunk->start), chunk->size)) + DebugKE("remove memory chunk %d from memblock failed\n", + i); + else + DebugKE("remove memory chunk %d from memblock succeeded\n", + i); + } +} + +static int alloc_kexec_mem(struct kexec_mem_ptr *mem, u64 size) +{ + int chunk_size, ret; + u64 max_chunk_size = (PAGE_SIZE << (MAX_ORDER - 1)); + struct kexec_mem_chunk *chunk; + + DebugKE("allocating kexec memory started for size 0x%llx\n", + size); + + if (!size) { + mem->size = 0; + return 0; + } + + mem->valid_size = size; + + size = PAGE_ALIGN_DOWN(size); + + mem->size = size; + mem->chunks_count = 0; + + chunk = mem->chunks; + chunk_size = (size < max_chunk_size) ? size : max_chunk_size; + + while ((chunk_size >= PAGE_SIZE) && size && + (mem->chunks_count < KEXEC_CHUNKS_COUNT_MAX)) { + DebugKE("allocating memory of size 0x%x bytes from 0x%llx bytes\n", + chunk_size, size); + + if (chunk->start = alloc_pages_exact(chunk_size, GFP_ATOMIC)) { + DebugKE("memory of chunk %d allocated from 0x%llx\n", + mem->chunks_count, chunk->start); + + chunk->size = chunk_size; + mem->chunks_count++; + + if (ret = memblock_reserve(virt_to_phys(chunk->start), + chunk_size)) { + DebugKE("adding memory chunk %d to memblock failed\n", + mem->chunks_count - 1); + goto out; + } else { + DebugKE("adding memory chunk %d to memblock succeeded\n", + mem->chunks_count - 1); + } + + chunk++; + + size -= chunk_size; + + if (chunk_size > size) + chunk_size = size; + } else { + DebugKE("allocating memory failed\n"); + + chunk_size /= 2; + } + } + + if (!size) { + DebugKE("allocating kexec memory succeed\n"); + return 0; + } + + ret = -ENOMEM; + +out: + DebugKE("allocating kexec memory failed\n"); + + free_kexec_mem(mem); + + return -ENOMEM; +} + +static int copy_kexec_mem_from_user(struct kexec_mem_ptr *to, + const void __user *from) +{ + u64 offset = 0; + int i; + + if (!to->size) + return 0; + + for (i = 0; i < to->chunks_count; i++) { + u64 copy_size; + + copy_size = (i == to->chunks_count - 1) ? + to->chunks[i].size + to->valid_size - to->size : + to->chunks[i].size; + + DebugKE("copy 0x%llx bytes from 0x%llx to 0x%llx\n", + copy_size, from + offset, to->chunks[i].start); + if (copy_from_user(to->chunks[i].start, from + offset, + copy_size)) { + DebugKE("failed to copy memory from user\n"); + return -EFAULT; + } + + offset += to->chunks[i].size; + } + + return 0; +} + +static void unreserve_continuous_kexec_mem(struct kexec_mem_ptr *mem) +{ + DebugKE("free memblock memory from 0x%llx size 0x%llx\n", + mem->phys_addr, mem->size); + if (memblock_free(mem->phys_addr, mem->size)) + DebugKE("memblock memory free failed\n"); +} + +static int find_continuous_kexec_mem(struct kexec_mem_ptr *mem, bool huge_align, + bool lowmem) +{ + u64 align = (huge_align) ? HPAGE_SIZE : PAGE_SIZE; + u64 base = 0; + u64 end = memblock.current_limit; + int ret = 0; + + if (!mem->size) + return 0; + + if (lowmem) { + e2k_rt_mlo_struct_t mlo; + + mlo.E2K_RT_MLO_reg = sic_read_node_nbsr_reg(0, SIC_rt_mlo0); + + base = mlo.E2K_RT_MLO_bgn << E2K_SIC_ALIGN_RT_MLO; + end = mlo.E2K_RT_MLO_end << E2K_SIC_ALIGN_RT_MLO; + } + + DebugKE("find continuous address for kexec memory in range from 0x%llx to 0x%llx\n", + base, end); + mem->phys_addr = + memblock_find_in_range(base, end, mem->size, align); + + if (!mem->phys_addr) { + DebugKE("failed to find continuous address for kexec memory 0x%llx\n", + mem); + return -ENOMEM; + } else { + DebugKE("continuous address for kexec memory 0x%llx is 0x%llx\n", + mem, mem->phys_addr); + + DebugKE("reserve memblock memory from 0x%llx size 0x%llx\n", + mem->phys_addr, mem->size); + if (ret = memblock_reserve(mem->phys_addr, mem->size)) + DebugKE("memblock memory reserve failed\n"); + } + + return 0; +} + +static void kexec_mem_to_phys(struct kexec_mem_ptr *mem) +{ + int i; + + if (!mem->size) + return; + + for (i = 0; i < mem->chunks_count; i++) { + DebugKE("converting chunk %d virt address 0x%llx\n", + i, mem->chunks[i].start); + mem->chunks[i].start = + (void *)virt_to_phys(mem->chunks[i].start); + DebugKE("chunk %d phys address 0x%llx\n", + i, mem->chunks[i].start); + } +} + +static void boot_merge_kexec_mem(struct kexec_mem_ptr *mem) +{ + u64 offset = 0; + int i; + + if (!mem->size) + return; + + for (i = 0; i < mem->chunks_count; i++) { + DebugBootKE("copy 0x%x bytes from 0x%llx to 0x%llx\n", + mem->chunks[i].size, mem->chunks[i].start, + mem->phys_addr + offset); + boot_fast_memcpy((void *)(mem->phys_addr + offset), + mem->chunks[i].start, mem->chunks[i].size); + + offset += mem->chunks[i].size; + } +} + + +/* + * bootblock block + */ + +static void free_bootblock_mem(bootblock_struct_t *bootblock) +{ + DebugKE("free bootblock memory from 0x%llx\n", bootblock); + kfree(bootblock); + + if (memblock_free(virt_to_phys(bootblock), PAGE_SIZE)) + DebugKE("remove bootblock memory from memblock failed\n"); + else + DebugKE("remove bootblock memory from memblock succeeded\n"); +} + +static int alloc_bootblock_mem(struct bootblock_struct **bootblock) +{ + int ret; + + DebugKE("allocating bootblock memory of size %d bytes\n", + BOOTBLOCK_SIZE); + if (!(*bootblock = kmalloc(BOOTBLOCK_SIZE, GFP_ATOMIC))) { + DebugKE("allocating bootblock memory failed\n"); + return -ENOMEM; + } + DebugKE("bootblock memory allocated from 0x%llx\n", *bootblock); + + if (ret = memblock_reserve(virt_to_phys(*bootblock), PAGE_SIZE)) + DebugKE("adding bootblock memory to memblock failed\n"); + else + DebugKE("adding bootblock memory to memblock succeeded\n"); + + return 0; +} + +static int copy_bootblock_mem(struct bootblock_struct *to, + struct bootblock_struct *from) +{ + DebugKE("copy %d bytes of bootblock from 0x%llx to 0x%llx\n", + BOOTBLOCK_SIZE, from, to); + memcpy(to, from, BOOTBLOCK_SIZE); + return 0; +} + + +/* + * initrd block + */ + +static void free_initrd_mem(struct kexec_mem_ptr *initrd) +{ + DebugKE("free initrd memory 0x%llx\n", initrd); + free_kexec_mem(initrd); +} + +static int alloc_initrd_mem(u64 initrd_size, struct kexec_mem_ptr *initrd) +{ + DebugKE("allocating initrd memory 0x%llx of size 0x%llx\n", + initrd, initrd_size); + return alloc_kexec_mem(initrd, initrd_size); +} + +static int copy_initrd_mem(struct kexec_mem_ptr *to, const void __user *from) +{ + DebugKE("copy initrd memory 0x%llx from user 0x%llx\n", to, from); + return copy_kexec_mem_from_user(to, from); +} + +static void unreserve_continuous_initrd_mem(struct kexec_mem_ptr *initrd) +{ + DebugKE("unreserve continuous memory for initrd 0x%llx\n", initrd); + unreserve_continuous_kexec_mem(initrd); +} + +static int find_continuous_initrd_mem(struct kexec_mem_ptr *initrd) +{ + DebugKE("try to find continuous memory for initrd 0x%llx\n", initrd); + return find_continuous_kexec_mem(initrd, 0, 0); +} + +static void boot_merge_initrd_mem(struct kexec_mem_ptr *initrd) +{ + DebugBootKE("merge chunks of initrd memory 0x%llx\n", initrd); + return boot_merge_kexec_mem(initrd); +} + + +/* + * kernel image block + */ + +static void free_kernel_code_mem(struct kexec_mem_ptr *image) +{ + DebugKE("free kernel code memory 0x%llx\n", image); + free_kexec_mem(image); +} + +static int alloc_kernel_code_mem(u64 image_size, struct kexec_mem_ptr *image) +{ + DebugKE("allocating kernel code memory 0x%llx of size 0x%llx\n", + image, image_size); + return alloc_kexec_mem(image, image_size); +} + +static int +copy_kernel_code_mem(struct kexec_mem_ptr *to, const void __user *from) +{ + DebugKE("copy kernel code memory 0x%llx from user 0x%llx\n", to, from); + return copy_kexec_mem_from_user(to, from); +} + +static void unreserve_continuous_kernel_code_mem(struct kexec_mem_ptr *image) +{ + DebugKE("unreserve continuous memory for kernel code 0x%llx\n", image); + unreserve_continuous_kexec_mem(image); +} + +static int find_continuous_kernel_code_mem(struct kexec_mem_ptr *image) +{ + DebugKE("try to find continuous memory for kernel code 0x%llx\n", + image); + return find_continuous_kexec_mem(image, 1, 0); +} + +static void boot_merge_kernel_code_mem(struct kexec_mem_ptr *image) +{ + DebugBootKE("merge chunks of kernel code memory 0x%llx\n", image); + return boot_merge_kexec_mem(image); +} + + +/* + * lintel code block + */ + +static void free_lintel_code_mem(struct kexec_mem_ptr *image) +{ + DebugKE("free lintel code memory 0x%llx\n", image); + free_kexec_mem(image); +} + +static int alloc_lintel_code_mem(u64 image_size, struct kexec_mem_ptr *image) +{ + DebugKE("allocating lintel code memory 0x%llx of size 0x%llx\n", + image, image_size); + return alloc_kexec_mem(image, image_size); +} + +static int +copy_lintel_code_mem(struct kexec_mem_ptr *to, const void __user *from) +{ + DebugKE("copy lintel code memory 0x%llx from user 0x%llx\n", to, from); + return copy_kexec_mem_from_user(to, from); +} + +static int find_continuous_lintel_code_mem(struct kexec_mem_ptr *image) +{ + DebugKE("try to find continuous memory for lintel code 0x%llx\n", + image); + return find_continuous_kexec_mem(image, 0, 1); +} + +static void boot_merge_lintel_code_mem(struct kexec_mem_ptr *image) +{ + DebugBootKE("merge chunks of lintel code memory 0x%llx\n", image); + return boot_merge_kexec_mem(image); +} + + +/* + * smp and switch to phys block + */ + +static void smp_kexec_reboot_param_to_phys(struct smp_kexec_reboot_param *p) +{ + DebugKE("converting smp param 0x%llx to phys\n", p); + + DebugKE("converting bootblock virt address 0x%llx\n", p->bootblock); + p->bootblock = (void *)virt_to_phys(p->bootblock); + DebugKE("bootblock phys address 0x%llx\n", p->bootblock); + + DebugKE("converting image virt address 0x%llx\n", p->image); + kexec_mem_to_phys(p->image); + p->image = (void *)virt_to_phys(p->image); + DebugKE("image phys address 0x%llx\n", p->image); + + DebugKE("converting reboot virt address 0x%llx\n", p->reboot); + p->reboot = (kexec_reboot_func_ptr)kernel_va_to_pa(p->reboot); + DebugKE("reboot phys address 0x%llx\n", p->reboot); + + if (!p->initrd) + return; + + DebugKE("converting initrd virt address 0x%llx\n", p->initrd); + kexec_mem_to_phys(p->initrd); + p->initrd = (void *)virt_to_phys(p->initrd); + DebugKE("initrd phys address 0x%llx\n", p->initrd); +} + +static noinline void __switch_to_phys__ +kexec_switch_to_phys(struct smp_kexec_reboot_param *p) +{ + bootmem_areas_t *bootmem = &kernel_bootmem; + e2k_rwap_lo_struct_t reg_lo; + e2k_rwap_hi_struct_t reg_hi; + e2k_rwap_lo_struct_t stack_reg_lo; + e2k_rwap_hi_struct_t stack_reg_hi; + e2k_usbr_t usbr = { {0} }; + int cpuid = hard_smp_processor_id(); + + NATIVE_FLUSHCPU; + + /* + * Take into account PS guard page from ttable_entry12 + */ + reg_lo.PSP_lo_half = 0; +#ifndef CONFIG_SMP + reg_lo.PSP_lo_base = bootmem->boot_ps.phys; +#else + reg_lo.PSP_lo_base = bootmem->boot_ps[cpuid].phys; +#endif + reg_lo._PSP_lo_rw = E2K_PSP_RW_PROTECTIONS; + reg_hi.PSP_hi_half = 0; +#ifndef CONFIG_SMP + reg_hi.PSP_hi_size = bootmem->boot_ps.size + PAGE_SIZE; +#else + reg_hi.PSP_hi_size = bootmem->boot_ps[cpuid].size + PAGE_SIZE; +#endif + reg_hi.PSP_hi_ind = 0; + NATIVE_NV_WRITE_PSP_REG(reg_hi, reg_lo); + + /* + * Take into account PCS guard page from ttable_entry12 + */ + reg_lo.PCSP_lo_half = 0; +#ifndef CONFIG_SMP + reg_lo.PCSP_lo_base = bootmem->boot_pcs.phys; +#else + reg_lo.PCSP_lo_base = bootmem->boot_pcs[cpuid].phys; +#endif + reg_lo._PCSP_lo_rw = E2K_PCSR_RW_PROTECTIONS; + reg_hi.PCSP_hi_half = 0; +#ifndef CONFIG_SMP + reg_hi.PCSP_hi_size = bootmem->boot_pcs.size + PAGE_SIZE; +#else + reg_hi.PCSP_hi_size = bootmem->boot_pcs[cpuid].size + PAGE_SIZE; +#endif + reg_hi.PCSP_hi_ind = 0; + NATIVE_NV_WRITE_PCSP_REG(reg_hi, reg_lo); + +#ifndef CONFIG_SMP + bootmem->boot_stack.phys_offset = bootmem->boot_stack.size; +#else + bootmem->boot_stack[cpuid].phys_offset = + bootmem->boot_stack[cpuid].size; +#endif + + stack_reg_lo.USD_lo_half = 0; + stack_reg_hi.USD_hi_half = 0; +#ifndef CONFIG_SMP + usbr.USBR_base = bootmem->boot_stack.phys + bootmem->boot_stack.size; + stack_reg_lo.USD_lo_base = bootmem->boot_stack.phys + + bootmem->boot_stack.phys_offset; + stack_reg_hi.USD_hi_size = bootmem->boot_stack.phys_offset; +#else + usbr.USBR_base = bootmem->boot_stack[cpuid].phys + + bootmem->boot_stack[cpuid].size; + stack_reg_lo.USD_lo_base = bootmem->boot_stack[cpuid].phys + + bootmem->boot_stack[cpuid].phys_offset; + stack_reg_hi.USD_hi_size = bootmem->boot_stack[cpuid].phys_offset; +#endif + stack_reg_lo.USD_lo_p = 0; + NATIVE_NV_WRITE_USBR_USD_REG(usbr, stack_reg_hi, stack_reg_lo); + +#ifndef CONFIG_NUMA + reg_lo.CUD_lo_base = bootmem->text.phys; +#else + reg_lo.CUD_lo_base = bootmem->text.nodes[BOOT_BS_NODE_ID].phys; +#endif + reg_lo._CUD_lo_rw = E2K_CUD_RW_PROTECTIONS; + reg_lo.CUD_lo_c = CUD_CFLAG_SET; + NATIVE_WRITE_CUD_LO_REG(reg_lo); + NATIVE_WRITE_OSCUD_LO_REG(reg_lo); + +#ifndef CONFIG_NUMA + reg_lo.GD_lo_base = bootmem->data.phys; +#else + reg_lo.GD_lo_base = bootmem->data.nodes[BOOT_BS_NODE_ID].phys; +#endif + reg_lo._GD_lo_rw = E2K_GD_RW_PROTECTIONS; + NATIVE_WRITE_GD_LO_REG(reg_lo); + NATIVE_WRITE_OSGD_LO_REG(reg_lo); + + WRITE_CURRENT_REG_VALUE(cpuid); + + E2K_CLEAR_CTPRS(); + __E2K_WAIT_ALL; + + NATIVE_WRITE_MMU_CR(MMU_CR_KERNEL_OFF); + __E2K_WAIT_ALL; + + E2K_JUMP_ABSOLUTE_WITH_ARGUMENTS_1(p->reboot, p); +} + +static void do_kexec_reboot(void *info) +{ + struct smp_kexec_reboot_param *param = info; + + all_irq_disable(); + disable_local_APIC(); + + DebugKE("switch to phys memory started for smp param 0x%llx\n", param); + kexec_switch_to_phys( + (struct smp_kexec_reboot_param *)virt_to_phys(param)); +} + + +/* + * common helpful block + */ + +static void unreserve_stack_mem(u64 stack) +{ + DebugKE("unreserve stack memory from 0x%llx size 0x%lx\n", + stack - PAGE_SIZE, 2 * PAGE_SIZE); + if (memblock_free(stack - PAGE_SIZE, 2 * PAGE_SIZE)) + DebugKE("stack memory unreserve failed\n"); +} + +static int reserve_stack_mem(u64 stack) +{ + int ret = 0; + + stack = PAGE_ALIGN_UP(stack); + + DebugKE("reserve stack memory from 0x%llx size 0x%lx\n", + stack - 2 * PAGE_SIZE, 4 * PAGE_SIZE); + if (ret = memblock_reserve(stack - 2 * PAGE_SIZE, 4 * PAGE_SIZE)) + DebugKE("stack memory reserve failed\n"); + + return ret; +} + + +/* + * kernel exec block + */ + +static void boot_scall2(bootblock_struct_t *bootblock) +{ + E2K_SYSCALL(START_KERNEL_SYSCALL, 0, 1, bootblock); +} + +static void +boot_kexec_setup_image_regs(bootblock_struct_t *bootblock, u64 image) +{ + e2k_rwap_lo_struct_t reg_lo; + e2k_rwap_hi_struct_t reg_hi; + u64 base, size; + + base = image; + size = bootblock->info.kernel_size; + + reg_lo.CUD_lo_base = base; + reg_lo.CUD_lo_c = E2K_CUD_CHECKED_FLAG; + reg_lo._CUD_lo_rw = E2K_CUD_RW_PROTECTIONS; + reg_hi.CUD_hi_size = size; + reg_hi._CUD_hi_curptr = 0; + WRITE_CUD_REG(reg_hi, reg_lo); + WRITE_OSCUD_REG(reg_hi, reg_lo); + + reg_lo.GD_lo_base = base; + reg_lo._GD_lo_rw = E2K_GD_RW_PROTECTIONS; + reg_hi.GD_hi_size = size; + reg_hi._GD_hi_curptr = 0; + WRITE_GD_REG(reg_hi, reg_lo); + WRITE_OSGD_REG(reg_hi, reg_lo); +} + +static void boot_kexec_reboot_sequel(struct smp_kexec_reboot_param *p) +{ + struct bootblock_struct *bootblock = p->bootblock; + + boot_sync_all_processors(); + + /* + * Be sure, these functions are properly working on phys memory + */ + flush_TLB_all(); + flush_ICACHE_all(); + + boot_native_invalidate_CACHE_L12(); + + if (boot_early_pic_is_bsp()) { + boot_merge_initrd_mem(p->initrd); + boot_merge_kernel_code_mem(p->image); + } + + boot_sync_all_processors(); + + DebugBootKE("Jumping to ttable_entry12 of kernel base 0x%llx on cpu %ld\n", + p->image->phys_addr, boot_smp_processor_id()); + + boot_kexec_setup_image_regs(bootblock, p->image->phys_addr); + boot_scall2(bootblock); +} + +static int kexec_setup_bootblock(bootblock_struct_t *bootblock, + bootblock_struct_t *image_bootblock, + struct kexec_mem_ptr *image, struct kexec_mem_ptr *initrd, + char *cmdline) +{ + int ret = 0; + u64 kernel_size; + u32 kernel_csum; + int cmdline_len; + + DebugKE("image_bootblock is 0x%llx\n", image_bootblock); + + DebugKE("get %ld bytes of image_bootblock from 0x%llx to 0x%llx\n", + sizeof(kernel_size), + &image_bootblock->info.kernel_size, + &kernel_size); + if (ret = get_user(kernel_size, &image_bootblock->info.kernel_size)) { + DebugKE("failed to get kernel_size from image_bootblock\n"); + return ret; + } + DebugKE("kernel_size is 0x%llx\n", kernel_size); + + DebugKE("get %ld bytes of image_bootblock from 0x%llx to 0x%llx\n", + sizeof(kernel_csum), + &image_bootblock->info.kernel_csum, + &kernel_csum); + if (ret = get_user(kernel_csum, &image_bootblock->info.kernel_csum)) { + DebugKE("failed to get kernel_csum from image_bootblock\n"); + return ret; + } + DebugKE("kernel_csum is 0x%x\n", kernel_csum); + + DebugKE("setup bootblock variables\n"); + + bootblock->info.kernel_size = kernel_size; + bootblock->info.kernel_csum = kernel_csum; + + cmdline_len = strlen(cmdline); + if (cmdline_len >= KSTRMAX_SIZE) { + strcpy(bootblock->info.kernel_args_string, + KERNEL_ARGS_STRING_EX_SIGNATURE); + strcpy(bootblock->info.bios.kernel_args_string_ex, cmdline); + } else { + strcpy(bootblock->info.kernel_args_string, cmdline); + } + + bootblock->info.kernel_base = image->phys_addr; + + bootblock->info.ramdisk_base = initrd->phys_addr; + bootblock->info.ramdisk_size = initrd->size; + + return ret; +} + +static long kexec_reboot(struct kexec_reboot_param __user *param) +{ + struct kexec_reboot_param p; + char cmdline[KSTRMAX_SIZE_EX]; + struct kexec_mem_ptr image, initrd; + bootblock_struct_t *bootblock; + bootblock_struct_t *image_bootblock; + usd_struct_t usd; + struct smp_kexec_reboot_param smp_param; + int ret = 0; + + DebugKE("copy %ld bytes of kexec_reboot_param struct from 0x%llx to 0x%llx\n", + sizeof(struct kexec_reboot_param), param, &p); + if (copy_from_user(&p, param, sizeof(struct kexec_reboot_param))) { + DebugKE("failed to copy kexec_reboot_param struct from user\n"); + return -EFAULT; + } + DebugKE("cmdline=0x%llx cmdline_size=%d image=0x%llx image_size=0x%llx\n", + p.cmdline, p.cmdline_size, p.image, p.image_size); + + if (p.cmdline_size >= KSTRMAX_SIZE_EX) { + DebugKE("cmdline_size %d > %d\n", + p.cmdline_size, KSTRMAX_SIZE_EX); + return -EINVAL; + } + + DebugKE("copy %d bytes of cmdline from 0x%llx to 0x%llx\n", + p.cmdline_size, p.cmdline, cmdline); + if (copy_from_user(cmdline, p.cmdline, p.cmdline_size)) { + DebugKE("failed to copy cmdline from user\n"); + return -EFAULT; + } + cmdline[p.cmdline_size] = 0; + DebugKE("cmdline is '%s'\n", cmdline); + + image_bootblock = + (bootblock_struct_t *)(p.image + IMAGE_BOOTBLOCK_OFFSET); + + read_USD_reg(&usd); + + if (ret = alloc_bootblock_mem(&bootblock)) + return ret; + + if (ret = alloc_kernel_code_mem( + p.image_size - IMAGE_KERNEL_CODE_OFFSET, &image)) + goto out_bootblock; + + if (ret = alloc_initrd_mem(p.initrd_size, &initrd)) + goto out_code; + + if (ret = copy_bootblock_mem(bootblock, bootblock_virt)) + goto out_initrd; + + if (ret = copy_kernel_code_mem(&image, + p.image + IMAGE_KERNEL_CODE_OFFSET)) + goto out_initrd; + + if (ret = copy_initrd_mem(&initrd, p.initrd)) + goto out_initrd; + + if (ret = reserve_stack_mem(usd.USD_base)) + goto out_initrd; + + if (ret = find_continuous_kernel_code_mem(&image)) + goto out_stack; + + if (ret = find_continuous_initrd_mem(&initrd)) + goto out_code_cont; + + if (DEBUG_KEXEC_MODE) + __memblock_dump_all(); + + if (ret = kexec_setup_bootblock(bootblock, image_bootblock, &image, + &initrd, cmdline)) + goto out_initrd_cont; + + DebugKE("shutdown devices, point of noreturn\n"); + kernel_restart_prepare(NULL); + l_iommu_stop_all(); + disable_IO_APIC(); + + smp_param.bootblock = bootblock; + smp_param.image = ℑ + smp_param.initrd = &initrd; + smp_param.reboot = boot_kexec_reboot_sequel; + + smp_kexec_reboot_param_to_phys(&smp_param); + + smp_call_function(do_kexec_reboot, &smp_param, 0); + do_kexec_reboot(&smp_param); + + BUG(); + +out_initrd_cont: + unreserve_continuous_initrd_mem(&initrd); +out_code_cont: + unreserve_continuous_kernel_code_mem(&image); +out_stack: + unreserve_stack_mem(usd.USD_base); +out_initrd: + free_initrd_mem(&initrd); +out_code: + free_kernel_code_mem(&image); +out_bootblock: + free_bootblock_mem(bootblock); + + return ret; +} + + +/* + * lintel exec block + */ + +static void boot_lintel_reboot_sequel(struct smp_kexec_reboot_param *p) +{ + struct bootblock_struct *bootblock = p->bootblock; + u64 jmp_addr = p->image->phys_addr + p->image->valid_size - + IMAGE_LINTEL_ENTRY_OFFSET; + + boot_sync_all_processors(); + + /* + * Be sure, these functions are properly working on phys memory + */ + flush_TLB_all(); + flush_ICACHE_all(); + + boot_native_invalidate_CACHE_L12(); + + if (boot_early_pic_is_bsp()) + boot_merge_lintel_code_mem(p->image); + + boot_sync_all_processors(); + + DebugKE("Jumping to lintel entry 0x%llx on cpu %d\n", + jmp_addr, boot_early_pic_read_id()); + E2K_MOVE_DREG_TO_DGREG(1, bootblock); + ((void (*)(void))jmp_addr)(); +} + +static long lintel_reboot(struct lintel_reboot_param __user *param) +{ + struct lintel_reboot_param p; + struct kexec_mem_ptr image; + bootblock_struct_t *bootblock; + usd_struct_t usd; + struct smp_kexec_reboot_param smp_param; + int ret = 0; + + DebugKE("copy %ld bytes of lintel_reboot_param struct from 0x%llx to 0x%llx\n", + sizeof(struct lintel_reboot_param), param, &p); + if (copy_from_user(&p, param, sizeof(struct lintel_reboot_param))) { + DebugKE("failed to copy lintel_reboot_param struct from user\n"); + return -EFAULT; + } + + DebugKE("image=0x%llx image_size=0x%llx\n", p.image, p.image_size); + + if (!PAGE_ALIGNED(p.image_size)) + return -EINVAL; + + read_USD_reg(&usd); + + if (ret = alloc_bootblock_mem(&bootblock)) + return ret; + + if (ret = alloc_lintel_code_mem(p.image_size, &image)) + goto out_bootblock; + + if (ret = copy_bootblock_mem(bootblock, bootblock_virt)) + goto out_code; + + if (ret = copy_lintel_code_mem(&image, p.image)) + goto out_code; + + if (ret = reserve_stack_mem(usd.USD_base)) + goto out_code; + + if (ret = find_continuous_lintel_code_mem(&image)) + goto out_stack; + + if (DEBUG_KEXEC_MODE) + __memblock_dump_all(); + + DebugKE("shutdown devices, point of noreturn\n"); + kernel_restart_prepare(NULL); + disable_IO_APIC(); + + smp_param.bootblock = bootblock; + smp_param.image = ℑ + smp_param.initrd = 0; + smp_param.reboot = boot_lintel_reboot_sequel; + + smp_kexec_reboot_param_to_phys(&smp_param); + + smp_call_function(do_kexec_reboot, &smp_param, 0); + do_kexec_reboot(&smp_param); + + BUG(); + +out_stack: + unreserve_stack_mem(usd.USD_base); +out_code: + free_lintel_code_mem(&image); +out_bootblock: + free_bootblock_mem(bootblock); + + return ret; +} + + +/* + * common init block + */ + +static long kexec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct pid_namespace *pid_ns = task_active_pid_ns(current); + long ret = 0; + + if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT)) + return -EPERM; + + if (!mutex_trylock(&kexec_mutex)) + return -EBUSY; + + DebugKE("kexec ioctl started: cmd=0x%x, arg=0x%lx\n", cmd, arg); + + switch (cmd) { + case KEXEC_REBOOT: + ret = kexec_reboot((struct kexec_reboot_param *)arg); + break; + case LINTEL_REBOOT: + ret = lintel_reboot((struct lintel_reboot_param *)arg); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&kexec_mutex); + + return ret; +} + +static const struct file_operations kexec_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = kexec_ioctl, +}; + +static struct miscdevice kexec_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "kexec", + .fops = &kexec_fops, +}; + +static int __init kexec_init(void) +{ + int rval = 0; + + rval = misc_register(&kexec_miscdev); + if (rval) { + pr_info("kexec: cannot register miscdev on minor %d (err %d)\n", + kexec_miscdev.minor, rval); + return rval; + } + + DebugKE("kexec driver registered on minor %d: KEXEC_REBOOT=0x%lx LINTEL_REBOOT=0x%lx\n", + kexec_miscdev.minor, KEXEC_REBOOT, LINTEL_REBOOT); + + return rval; +} + +static void __exit kexec_exit(void) +{ + misc_deregister(&kexec_miscdev); + DebugKE("kexec driver deregistered\n"); +} + +module_init(kexec_init); +module_exit(kexec_exit); + +MODULE_AUTHOR("Pavel V. Panteleev"); +MODULE_DESCRIPTION("Elbrus kernel and lintel exec driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/e2k/kernel/kprobes.c b/arch/e2k/kernel/kprobes.c new file mode 100644 index 000000000000..48a94d0701e9 --- /dev/null +++ b/arch/e2k/kernel/kprobes.c @@ -0,0 +1,488 @@ +/* + * Kernel Probes (KProbes) + * arch/e2k/kernel/kprobes.c + */ + +#include +#include +#include +#include +#include + +#include +#include + +DEFINE_PER_CPU(struct kprobe *, current_kprobe); +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void replace_instruction(unsigned long *src, unsigned long phys_dst, + int instr_size) +{ + int i; + + for (i = 0; i < instr_size / 8; i++) + NATIVE_WRITE_MAS_D(phys_dst + 8 * i, src[i], MAS_STORE_PA); +} + +static unsigned long copy_instr(unsigned long *src, unsigned long *dst, + int duplicated_dst) +{ + unsigned long phys_ip_dst; + int node; + int instr_size; + instr_cs0_t *cs0; + void *instr; + + /* + * Copy instruction to a local variable + */ + instr_size = get_instr_size_by_vaddr((unsigned long) src); + instr = __builtin_alloca(instr_size); + memcpy(instr, src, instr_size); + + /* + * Jump values must be corrected when they are relative to %ip + */ + cs0 = find_cs0(instr); + if (cs0) { + instr_hs_t *hs = (instr_hs_t *) &E2K_GET_INSTR_HS(instr); + instr_ss_t *ss; + signed long delta; + + if (hs->s) + ss = (instr_ss_t *) &E2K_GET_INSTR_SS(instr); + else + ss = NULL; + + if (cs0->ctp_opc == CS0_CTP_OPC_DISP && cs0->ctpr || + cs0->ctp_opc == CS0_CTP_OPC_LDISP && cs0->ctpr == 2 || + cs0->ctp_opc == CS0_CTP_OPC_PUTTSD && cs0->ctpr == 0 || + cs0->ctp_opc == CS0_CTP_OPC_IBRANCH && !cs0->ctpr && + ss && !ss->ctop) { + delta = (signed long) src - (signed long) dst; + cs0->cof2.disp += delta >> 3L; + } else if (cs0->ctp_opc == CS0_CTP_OPC_PREF && !cs0->ctpr) { + signed long pref_dst = (signed long) src + + ((signed long) cs0->pref.pdisp << 40L) >> 33L; + + delta = pref_dst - (signed long) dst; + cs0->pref.pdisp = delta >> 7L; + } + } + + for_each_node_has_dup_kernel(node) { + phys_ip_dst = node_kernel_address_to_phys(node, + (e2k_addr_t) dst); + if (phys_ip_dst == -EINVAL) { + printk(KERN_ALERT"kprobes: can't find phys_ip\n"); + return -EFAULT; + } + + replace_instruction(instr, phys_ip_dst, instr_size); + + if (!duplicated_dst || !THERE_IS_DUP_KERNEL) + break; + + /* Modules are not duplicated */ + if (!is_duplicated_code((unsigned long) dst)) + break; + } + + return instr_size; +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + int instr_size; + + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) + return -ENOMEM; + + instr_size = copy_instr((unsigned long *)p->addr, + (unsigned long *)p->ainsn.insn, false); + if (instr_size < 0) { + printk(KERN_ALERT"kprobes: can't get instruction size\n"); + return -EFAULT; + } + + /* + * We need to store one additional instruction after the copied one + * to make sure processor won't generate exc_illegal_opcode instead + * of exc_last_wish/exc_instr_debug (exc_illegal_opcode has priority). + */ + *(unsigned long *) &p->ainsn.insn[instr_size] = 0UL; + + return 0; +} + +static void arch_replace_insn_all_nodes(unsigned long insn, unsigned long ip) +{ + unsigned long phys_ip; + int node; + + for_each_node_has_dup_kernel(node) { + phys_ip = node_kernel_address_to_phys(node, ip); + if (phys_ip == -EINVAL) { + printk(KERN_ALERT"kprobes: can't find phys_ip\n"); + WARN_ON_ONCE(1); + break; + } + + NATIVE_WRITE_MAS_D(phys_ip, insn, MAS_STORE_PA); + + if (!THERE_IS_DUP_KERNEL) + break; + } +} + +static void flush_instruction(struct kprobe *p) +{ + unsigned long addr = (unsigned long) p->addr; + + flush_icache_range(addr, addr + + MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + unsigned long break_instr; + + break_instr = KPROBE_BREAK_1; + + if (cpu_has(CPU_HWBUG_BREAKPOINT_INSTR)) { + instr_hs_t *hs, *break_hs; + + hs = (instr_hs_t *) p->addr; + break_hs = (instr_hs_t *) &break_instr; + + break_hs->lng = hs->lng; + } + + arch_replace_insn_all_nodes(break_instr, (unsigned long)p->addr); + flush_instruction(p); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + copy_instr((unsigned long *) p->ainsn.insn, + (unsigned long *) p->addr, true); + flush_instruction(p); +} + +static unsigned long __kprobes +install_interrupt(struct pt_regs *regs, int single_step) +{ + unsigned long flags, base, index, spilled, ret_addr; + e2k_mem_crs_t *frame; + + raw_all_irq_save(flags); + + base = AS(regs->stacks.pcsp_lo).base; + index = AS(regs->stacks.pcsp_hi).ind; + spilled = AS(READ_PCSP_HI_REG()).ind; + + if (spilled <= index) + E2K_FLUSHC; + + frame = (e2k_mem_crs_t *) (base + index); + + /* Set exception to fire on return */ + if (single_step) { + if (cpu_has(CPU_HWBUG_SS)) + set_ts_flag(TS_SINGLESTEP_KERNEL); + AS(frame->cr1_lo).ss = 1; + AS(frame->cr1_lo).ie = 0; + AS(frame->cr1_lo).nmie = 0; + } else { + AS(frame->cr1_lo).lw = 1; + } + + /* Save return ip */ + --frame; + ret_addr = AS(frame->cr0_hi).ip << 3; + + raw_all_irq_restore(flags); + + return ret_addr; +} + +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + install_interrupt(regs, true); + + regs->crs.cr0_hi.fields.ip = (u64)(p->ainsn.insn) >> 3; +} + +static int maybe_is_call(void *instr) +{ + instr_cs1_t *cs1; + instr_hs_t *hs; + instr_ss_t *ss; + + hs = (instr_hs_t *) &E2K_GET_INSTR_HS(instr); + if (hs->s) + ss = (instr_ss_t *) &E2K_GET_INSTR_SS(instr); + else + ss = NULL; + + cs1 = find_cs1(instr); + if (cs1 && ss && ss->ctop && cs1->opc == CS1_OPC_CALL) + return true; + + return false; +} + +static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long slot_addr = (unsigned long) p->ainsn.insn; + int instr_size = get_instr_size_by_vaddr(slot_addr); + + if ((AS(regs->crs.cr0_hi).ip << 3) == slot_addr + instr_size) { + /* + * Instruction did not jump so set the next %ip + * to point after the kprobed instruction. + */ + AS(regs->crs.cr0_hi).ip = (u64)(p->addr + instr_size) >> 3; + } else if (maybe_is_call(p->ainsn.insn)) { + /* + * Instruction could be a call. In this case + * check _previous_ chain stack frame. + */ + unsigned long flags, base, index, spilled; + e2k_mem_crs_t *frame; + + raw_all_irq_save(flags); + + base = AS(regs->stacks.pcsp_lo).base; + index = AS(regs->stacks.pcsp_hi).ind; + spilled = AS(READ_PCSP_HI_REG()).ind; + + if (spilled < index) + E2K_FLUSHC; + + frame = (e2k_mem_crs_t *) (base + index); + --frame; + + if ((AS(frame->cr0_hi).ip << 3) == slot_addr + instr_size) + AS(frame->cr0_hi).ip = (u64)(p->addr + instr_size) >> 3; + + raw_all_irq_restore(flags); + } +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + kprobe_opcode_t *addr; + struct kprobe_ctlblk *kcb; + + addr = (kprobe_opcode_t *)instruction_pointer(regs); + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing. + */ + preempt_disable(); + + /* Check we're not actually recursing */ + if (kprobe_running()) + goto no_kprobe; + + p = get_kprobe(addr); + if (!p) + goto no_kprobe; + + set_current_kprobe(p); + if (p->pre_handler && p->pre_handler(p, regs)) + /* handler has already set things up, so skip ss setup */ + return 1; + + prepare_singlestep(p, regs); + + kcb = get_kprobe_ctlblk(); + + kcb->kprobe_status = KPROBE_HIT_SS; + + return 1; + +no_kprobe: + preempt_enable_no_resched(); + + return 0; +} + +void __kprobes kprobe_instr_debug_handle(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long flags, base, spilled, index; + e2k_mem_crs_t *frame; + e2k_dibsr_t dibsr; + bool singlestep; + + if (!cur || kcb->kprobe_status != KPROBE_HIT_SS) + return; + + /* + * Make sure another overflow does not happen while + * we are handling this one to avoid races. + */ + raw_all_irq_save(flags); + dibsr = READ_DIBSR_REG(); + singlestep = AS(dibsr).ss; + if (singlestep) { + if (cpu_has(CPU_HWBUG_SS)) + clear_ts_flag(TS_SINGLESTEP_KERNEL); + AS(dibsr).ss = 0; + WRITE_DIBSR_REG(dibsr); + } + + /* + * Re-enable interrupts in %psr + */ + base = AS(regs->stacks.pcsp_lo).base; + index = AS(regs->stacks.pcsp_hi).ind; + spilled = AS(READ_PCSP_HI_REG()).ind; + if (spilled <= index) + E2K_FLUSHC; + frame = (e2k_mem_crs_t *) (base + index); + if (AS(frame->cr1_lo).uie) + AS(frame->cr1_lo).ie = 1; + if (AS(frame->cr1_lo).unmie) + AS(frame->cr1_lo).nmie = 1; + + raw_all_irq_restore(flags); + + if (!singlestep) + return; + + if (cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + resume_execution(cur, regs); + reset_current_kprobe(); + preempt_enable_no_resched(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * No need to call resume_execution() or do anything else - + * exc_instr_debug will still be delivered. + */ + + return 0; +} +NOKPROBE_SYMBOL(kprobe_fault_handler); + +/* + * Handling exceptions + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BREAKPOINT: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + default: + break; + } + + return ret; +} + +int __init arch_init_kprobes(void) +{ + return 0; +} + + +bool arch_within_kprobe_blacklist(unsigned long addr) +{ + return (addr >= (unsigned long) __kprobes_text_start && + addr < (unsigned long) __kprobes_text_end) || + (addr >= (unsigned long) _t_entry && + addr < (unsigned long) _t_entry_end) || + (addr >= (unsigned long) __entry_handlers_start && + addr < (unsigned long) __entry_handlers_end); +} + +#ifdef CONFIG_KRETPROBES +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) install_interrupt(regs, false); +} + +int arch_trampoline_kprobe(struct kprobe *p) +{ + /* We don't use trampoline */ + return 0; +} +NOKPROBE_SYMBOL(arch_trampoline_kprobe); + +int kretprobe_last_wish_handle(struct pt_regs *regs) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *tmp; + unsigned long flags, ret_address; + int handled = 0; + + INIT_HLIST_HEAD(&empty_rp); + + /* + * It is possible to have multiple instances associated with a given + * exc_last_wish because more than one return probe was registered + * for a target function. + */ + kretprobe_hash_lock(current, &head, &flags); + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + ret_address = (unsigned long) ri->ret_addr; + if (ret_address != instruction_pointer(regs)) + /* + * Any other instances associated with this task + * are for other calls deeper on the call stack. + */ + break; + + if (ri->rp && ri->rp->handler) { + handled = 1; + ri->rp->handler(ri, regs); + } + + recycle_rp_inst(ri, &empty_rp); + } + kretprobe_hash_unlock(current, &flags); + + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + + return handled; +} +#endif diff --git a/arch/e2k/kernel/ksyms.c b/arch/e2k/kernel/ksyms.c new file mode 100644 index 000000000000..bc5e9f567211 --- /dev/null +++ b/arch/e2k/kernel/ksyms.c @@ -0,0 +1,7 @@ +#ifdef CONFIG_PARAVIRT_SPINLOCKS + +#include + +EXPORT_SYMBOL(__pv_queued_spin_unlock); + +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ diff --git a/arch/e2k/kernel/libeprof/Makefile b/arch/e2k/kernel/libeprof/Makefile new file mode 100644 index 000000000000..d8f1095f629b --- /dev/null +++ b/arch/e2k/kernel/libeprof/Makefile @@ -0,0 +1,19 @@ + +ifeq ($(LCC_124),y) +obj-y := libkeprof_24.o +else +obj-y := libkeprof_cur.o +endif + +empty:= +space:= $(empty) $(empty) +ORIG_CFLAGS := $(KBUILD_CFLAGS) +ORIG_CFLAGS := $(subst $(space)-fprofile-generate-kernel$(space),$(space),$(ORIG_CFLAGS)) +ORIG_CFLAGS := $(subst $(space)-fprofile-use="$(PROFILE_USE)"$(space),$(space),$(ORIG_CFLAGS)) +KBUILD_CFLAGS = $(ORIG_CFLAGS) + +ifeq ($(LCC_124),y) +CFLAGS_REMOVE_libkeprof_24.o := -pg +else +CFLAGS_REMOVE_libkeprof_cur.o := -pg +endif diff --git a/arch/e2k/kernel/libeprof/libkeprof_24.c b/arch/e2k/kernel/libeprof/libkeprof_24.c new file mode 100644 index 000000000000..0d8a2bd49796 --- /dev/null +++ b/arch/e2k/kernel/libeprof/libkeprof_24.c @@ -0,0 +1,1781 @@ + +#include +#include +#include +#include +#include +#include + +/*****************************************************************************/ +/********************************* prof_defs.h *******************************/ +/*****************************************************************************/ + +#ifdef ECOMP_SUPPORT_INTERNAL_CHECK +#define PROF_CHECK +#endif /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +#define PROF_FILE_IDEN_LEN (5 + 1) /* Profile identification sign length */ + +#define PROF_FILE_IDEN_STR_C "VER.C" + +#define PROF_BYTES_IN_UINT 4 /*!< @brief prof_UInt_t type size */ +#define PROF_BYTES_IN_UINT64 8 /*!< @brief prof_UInt64_t type size */ +#define PROF_BITS_IN_BYTE 8 /*!< @brief Bits in byte */ + +/********************************* prof_defs.h *******************************/ + + +#define PROFILE_FILENAME "kernel_profile" + +const int prof_BuffSize = 20 * 1024 * 1024; + +/** + * @brief Buffer for profile content + * + * Allocation of 20 Mb + */ +static char prof_ProfileBuff[prof_BuffSize] = {0}; + +/** + * @brief Flag is enabled if saving to buffer launched at first time + */ +static bool save_first_time = true; + +/** + * @brief Number of times the profile dump proc is called + */ +static int profile_counters_num = 1; + +/** + * @brief The increment value. While preparing dumps we set it to zero so that + * our counters should not be broken + */ +unsigned long ecomp_ProfileIncrement = 0; + +/*****************************************************************************/ +/******************************** prof_types.h *******************************/ +/*****************************************************************************/ + +typedef unsigned int prof_UInt_t; + +typedef unsigned long long prof_UInt64_t; + +/** + * @brief Profile mode + */ +typedef enum { +PROF_FUNC_ATTR_EDGES = 0x2, /*!< Edges */ +PROF_FUNC_ATTR_LOOPS = 0x4, /*!< Loop profile */ +PROF_FUNC_ATTR_VALUE_PROF = 0x40, /*!< Value profiling */ +PROF_FUNC_ATTR_LOOPSOUTER = 0x100 /*!< Outer loop profile */ +} prof_FuncAttr_t; + +/******************************** prof_types.h *******************************/ + +/*****************************************************************************/ +/******************************** prof_utils.h *******************************/ +/*****************************************************************************/ + +/****************************************************************************** + *********************************** Macross ********************************** + *****************************************************************************/ + +#define PROF_ERROR_MSG_LENGTH 1024 + +#define prof_IsStrStartEQ(str1, str2) \ +(\ +strncmp(str1, str2, strlen(str2)) == 0 \ +) + +#ifdef ECOMP_SUPPORT_INTERNAL_CHECK + +#define PROF_ASSERT(cond) \ +do { if (!(cond)) panic("assertion failed"); } while (0); + +#else /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +#define PROF_ASSERT(cond) + +#endif /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +/********************************** Macross **********************************/ + +/****************************************************************************** + ************* ********************* Types ************************************ + *****************************************************************************/ + +/** + * @brief Custom bool + */ +typedef enum { +PROF_FALSE = 0, +PROF_TRUE = 1 +} prof_Bool_t; + +/************************************ Types **********************************/ + +/******************************** prof_utils.h *******************************/ + +/*****************************************************************************/ +/******************************** prof_utils.c *******************************/ +/*****************************************************************************/ + +/** + * @brief Memory allocation + * + * @returns Pointer to allocated memory + */ +static void * +prof_Malloc(size_t size) { /* [in] Size of allocation */ + void *ptr; /* Pointer to allocated area */ + + ptr = kmalloc(size, /*GFP_USER*/GFP_KERNEL); + + if (ptr == NULL) { + /* Break if can't allocate memory */ + panic("Not enough memory for profiling\n"); + } + + return ptr; +} /* prof_Malloc */ + +/** + * @brief Memory allocation and initialization + * + * @returns Pointer to allocated memory + */ +static void * +prof_Calloc(size_t len, /* [in] Number of elements */ + size_t size) { /* [in] Element size */ + void *ptr; + + ptr = kcalloc(len, size, GFP_KERNEL); + + if (ptr == NULL) { + /* Break if can't allocate memory */ + panic("Not enough memory for profiling\n"); + } + + return ptr; +} /* prof_Calloc */ + +/** + * @brief Free allocated memory + */ +static void +prof_Free(void *ptr) { /* [in,out] Allocated memory */ +} /* prof_Free */ + +/******************************** prof_utils.c *******************************/ + + + +/*****************************************************************************/ +/******************************** prof_hashrt.h ******************************/ +/*****************************************************************************/ + +#define PROF_HASH_ALL_ENTRIES(entry, table) \ +((entry) = prof_HashGetFirstEntry((table)); \ +(entry) != NULL; \ +(entry) = prof_HashGetNextEntry((entry))) + +/******************************************************************************* + ************************************ Types ************************************ + ******************************************************************************/ + +/** + * @brief Key and value types for hash table + */ +typedef enum { +PROF_HASH_VOID_PTR_TYPE, +PROF_HASH_STRING_TYPE, +PROF_HASH_UINT64_TYPE +} prof_HashType_t; + +/** + * @brief Hash table entry + */ +typedef struct prof_HashEntry_r { + union { + const char *s_key;/*!< String key value */ + prof_UInt64_t uint64_key; /*!< Integer key value */ + }; + + struct prof_HashEntry_r *next;/*!< Next entry with current index */ + struct prof_HashEntry_r *prev;/*!< Previous entry with current index */ + struct prof_HashEntry_r *next_in_table;/*!< Next index in the table */ +/*< Previous index in the table */ + struct prof_HashEntry_r *prev_in_table; + + union { + void *v_value;/*!< User type value in hash */ + char *s_value;/*!< String value in hash */ + prof_UInt64_t uint64_value;/*!< Int value in hash */ + }; +} prof_HashEntry_t; + +/** + * @brief Hash table + */ +typedef struct { + prof_HashEntry_t **hash_table;/*!< Hash element array */ + prof_UInt_t table_dimension;/*!< Array size for hash */ + prof_UInt_t size;/*!< Entry number */ + prof_HashEntry_t *first;/*!< First entry in hash */ + prof_HashEntry_t *last;/*!< Last entry in hash */ + prof_HashType_t key_type;/*!< Key type */ + prof_HashType_t val_type;/*!< Value type */ +} prof_HashTable_t; + +/******************************** prof_hashrt.h ******************************/ + + + +/*****************************************************************************/ +/******************************** prof_hashrt.c ******************************/ +/*****************************************************************************/ + +/******************************************************************************* + *********************************** Macross *********************************** + ******************************************************************************/ + +#define PROF_HASH_ARRAY_SIZE_LN 10 + +#define PROF_HASH_ARRAY_SIZE (1 << PROF_HASH_ARRAY_SIZE_LN) + +#define PROF_HASH_MASK (PROF_HASH_ARRAY_SIZE - 1) + +#define PROF_HASH_DOWN_SHIFT (sizeof(prof_UInt_t) * 8 - PROF_HASH_ARRAY_SIZE_LN) + +#define PROF_HASH_RANDOM_INDEX(i) /* hash function */ \ +(\ +(((((prof_UInt_t) (i))*1103515245) >> PROF_HASH_DOWN_SHIFT) & \ +((unsigned)PROF_HASH_MASK))\ +) + +/*********************************** Macross **********************************/ + +/******************************************************************************* + ************************ Work with type prof_HashTable_t ********************** + ******************************************************************************/ + +/** + * @brief Create new table + * + * @returns Created table or NULL + */ +prof_HashTable_t * +prof_HashCreate(prof_HashType_t key_type,/* [in] Key type */ +prof_HashType_t value_type) {/* [in] Record type */ + prof_HashTable_t *self; /* Creating table */ + + self = (prof_HashTable_t *) prof_Malloc(sizeof(prof_HashTable_t)); + + if (self == NULL) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashTable_t)); + + self->table_dimension = PROF_HASH_ARRAY_SIZE; + self->hash_table = (prof_HashEntry_t **) +prof_Calloc(PROF_HASH_ARRAY_SIZE, sizeof(prof_HashEntry_t *)); + self->key_type = key_type; + self->val_type = value_type; + + if (self->hash_table == NULL) { + prof_Free(self); + return NULL; + } + + return self; +} /* prof_HashCreate */ + +/** + * @brief Delete table with records (dat ain records should be deleted separately) + */ +void +prof_HashDestroy(prof_HashTable_t *table) { /* [in,out] Deleting table */ + prof_Free(table->hash_table); + prof_Free(table); +} /* prof_HashDestroy */ + +/** + * @brief Create new record + */ +static prof_HashEntry_t * +prof_HashCreateEntryByString(const char *key, +void *value) { + prof_HashEntry_t *self; + + self = (prof_HashEntry_t *) prof_Malloc(sizeof(prof_HashEntry_t)); + + if (!self) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashEntry_t)); + + self->s_key = key; + self->v_value = value; + + return self; +} /* prof_HashCreateEntryByString */ + +/** + * @brief Create new record + */ +static prof_HashEntry_t * +prof_HashCreateVoidPtrEntryByUInt64(prof_UInt64_t key, +void *value) { + prof_HashEntry_t *self; + + self = (prof_HashEntry_t *) prof_Malloc(sizeof(prof_HashEntry_t)); + + if (!self) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashEntry_t)); + + self->uint64_key = key; + self->v_value = value; + + return self; +} /* prof_HashCreateVoidPtrEntryByUInt64 */ + +/** + * @brief Create new record + */ +static prof_HashEntry_t * +prof_HashCreateUInt64EntryByUInt64(prof_UInt64_t key, +prof_UInt64_t value) { + prof_HashEntry_t *self; + + self = (prof_HashEntry_t *) prof_Malloc(sizeof(prof_HashEntry_t)); + + if (!self) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashEntry_t)); + + self->uint64_key = key; + self->uint64_value = value; + + return self; +} /* prof_HashCreateUInt64EntryByUInt64 */ + +/** + * @brief Generate hash value by key (string) + */ +static prof_UInt_t +prof_HashStringFunc(const char *key) +{ + int i; + prof_UInt_t table_index, + summ, + length; + + summ = 0; + length = strlen(key); + + for (i = 0; i < length; i++) { + summ += (unsigned char)key[i]; + } + + table_index = PROF_HASH_RANDOM_INDEX(summ); + + return table_index; +} /* prof_HashStringFunc */ + +/** + * @brief Generate hash value by key (string) + */ +static prof_UInt_t +prof_HashUint64Func(prof_UInt64_t key) { + prof_UInt_t table_index; + + table_index = PROF_HASH_RANDOM_INDEX(key); + + return table_index; +} /* prof_HashUint64Func */ + +prof_UInt64_t +prof_HashGetEntryUInt64Key(const prof_HashEntry_t *entry) { + return entry->uint64_key; +} /* prof_HashGetEntryUInt64Key */ + +void * +prof_HashGetEntryVoidPtrVal(const prof_HashEntry_t *entry) { + return entry->v_value; +} /* prof_HashGetEntryVal */ + +char * +prof_HashGetEntryStringVal(const prof_HashEntry_t *entry) { + return entry->s_value; +} /* prof_HashGetEntryStringVal */ + +prof_UInt64_t +prof_HashGetEntryUInt64Val(const prof_HashEntry_t *entry) { + return entry->uint64_value; +} /* prof_HashGetEntryUInt64Val */ + +void +prof_HashSetEntryUInt64Val(prof_HashEntry_t *entry, + prof_UInt64_t val) { + entry->uint64_value = val; +} /* prof_HashGetEntryUInt64Val */ + +static prof_HashEntry_t * +prof_HashFindByString(const prof_HashTable_t *self, +const char *key) { + prof_UInt_t index; + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); + + index = prof_HashStringFunc(key); + entry = self->hash_table[index]; + + while (entry != NULL) { + if (strcmp(entry->s_key, key) == 0) { + return entry; + } + + entry = entry->next; + } + + return NULL; +} /* prof_HashFindByString */ + +void * +prof_HashFindByStringAndGetVoidPtrValue(const prof_HashTable_t *self, +const char *key) { + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_VOID_PTR_TYPE); + + entry = prof_HashFindByString(self, key); + + if (entry == NULL) { + return NULL; + } + + return prof_HashGetEntryVoidPtrVal(entry); +} /* prof_HashFindByStringAndGetVoidPtrValue */ + +prof_HashEntry_t * +prof_HashFindByUInt64(const prof_HashTable_t *self, +prof_UInt64_t key) { + prof_UInt_t index; + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); + + index = prof_HashUint64Func(key); + entry = self->hash_table[index]; + + while (entry != NULL) { + if (entry->uint64_key == key) { + return entry; + } + + entry = entry->next; + } + + return NULL; +} /* prof_HashFindByUInt64 */ + +char * +prof_HashFindByUInt64AndGetCharValue(const prof_HashTable_t *self, +prof_UInt64_t key) { + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); + + entry = prof_HashFindByUInt64(self, key); + + if (entry == NULL) { + return NULL; + } + + return prof_HashGetEntryVoidPtrVal(entry); +} /* prof_HashFindByUInt64AndGetCharValue */ + +static void +prof_AddEntryByIndex(prof_HashTable_t *self, + prof_UInt_t index, + prof_HashEntry_t *new_entry) { + prof_HashEntry_t *place; + + place = self->hash_table[index]; + self->size++; + + if (place != NULL) { + while (place->next != NULL) { + place = place->next; + } + + place->next = new_entry; + new_entry->prev = place; + } else { + self->hash_table[index] = new_entry; + } + + if (self->first == NULL) { + self->first = new_entry; + self->last = new_entry; + } else { + new_entry->prev_in_table = self->last; + self->last->next_in_table = new_entry; + self->last = new_entry; + } +} /* prof_AddEntryByIndex */ + +/** + * @brief Add entry to the table with given key + * + * @note If an element with given key exist, a function does nothing + * + * @warning Key should be in a heap because it is not copied + */ +void +prof_HashAddVoidPtrValueByString(prof_HashTable_t *self, +const char *key, +void *value) { + prof_UInt_t index; + prof_HashEntry_t *new_entry; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_VOID_PTR_TYPE); + + if (prof_HashFindByStringAndGetVoidPtrValue(self, key) != NULL) { + return; + } + + index = prof_HashStringFunc(key); + new_entry = prof_HashCreateEntryByString(key, value); + + prof_AddEntryByIndex(self, index, new_entry); +} /* prof_HashAddVoidPtrValueByString */ + +/** + * @brief Add entry to the table with given key + * + * @note If an element with given key exist, a function does nothing + * + * @warning Key should be in a heap because it is not copied + */ +void +prof_HashAddStringValueByUInt64(prof_HashTable_t *self, +prof_UInt64_t key, +char *value) { + prof_UInt_t index; + prof_HashEntry_t *new_entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_STRING_TYPE); + + if (prof_HashFindByUInt64AndGetCharValue(self, key) != NULL) { + return; + } + + index = prof_HashUint64Func(key); + new_entry = prof_HashCreateVoidPtrEntryByUInt64(key, value); + + prof_AddEntryByIndex(self, index, new_entry); +} /* prof_HashAddStringValueByUInt64 */ + +/** + * @brief Add entry to the table with given key + * + * @note If an element with given key exist, a function does nothing + * + * @warning Key should be in a heap because it is not copied + */ +void +prof_HashAddUInt64ValueByUInt64(prof_HashTable_t *self, +prof_UInt64_t key, +prof_UInt64_t value) { + prof_UInt_t index; + prof_HashEntry_t *new_entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_UINT64_TYPE); + + if (prof_HashFindByUInt64AndGetCharValue(self, key) != NULL) { + return; + } + + index = prof_HashUint64Func(key); + new_entry = prof_HashCreateUInt64EntryByUInt64(key, value); + + prof_AddEntryByIndex(self, index, new_entry); +} /* prof_HashAddStringValueByUInt64 */ + +void +prof_HashDeleteEntryByString(prof_HashTable_t *self, + const char *key) { + prof_HashEntry_t *entry; + int index; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); + + entry = prof_HashFindByString(self, key); + + if (entry == NULL) { + return; + } + + if (self->first == entry) { + self->first = entry->next_in_table; + } + + if (self->last == entry) { + self->last = entry->prev_in_table; + } + + if (entry->prev != NULL) { + entry->prev->next = entry->next; + } + + if (entry->next != NULL) { + entry->next->prev = entry->prev; + } + + if (entry->prev_in_table != NULL) { + entry->prev_in_table->next_in_table = entry->next_in_table; + } + + if (entry->next_in_table != NULL) { + entry->next_in_table->prev_in_table = entry->prev_in_table; + } + + index = prof_HashStringFunc(key); + + if (self->hash_table[index] == entry) { + if (entry->next != NULL) { + self->hash_table[index] = entry->next; + } else { + self->hash_table[index] = NULL; + } + } + + prof_Free(entry); +} /* prof_HashDeleteEntryByString */ + +void +prof_HashForEachEntry(prof_HashTable_t *self_p, + void (*user_func)(void *)) { + int cur_entry_num; + +PROF_ASSERT(self_p->hash_table != NULL); + + for (cur_entry_num = 0; + cur_entry_num < self_p->table_dimension; + cur_entry_num++) { + prof_HashEntry_t *cur_entry; + + cur_entry = self_p->hash_table[cur_entry_num]; + + while (cur_entry != NULL) { + user_func((void *)cur_entry->v_value); + cur_entry = cur_entry->next; + } + } +} /* prof_HashForEachEntry */ + +prof_HashEntry_t * +prof_HashGetFirstEntry(const prof_HashTable_t *table) { + return table->first; +} /* prof_HashGetFirstEntry */ + +prof_HashEntry_t * +prof_HashGetNextEntry(const prof_HashEntry_t *entry) { + return entry->next_in_table; +} /* prof_HashGetFirstEntry */ + +prof_UInt_t +prof_HashGetElementNum(const prof_HashTable_t *self) { + return self->size; +} /* prof_HashGetElementNum */ + +/****************************** prof_HashTable_t *****************************/ + +/******************************** prof_hashrt.c ******************************/ + + + + +/*****************************************************************************/ +/******************************* prof_profilert.h ****************************/ +/*****************************************************************************/ + +/** + * @brief Container with procedure profile data + */ +typedef struct { + /* General data */ + char *name;/*!< Procedure name */ + prof_FuncAttr_t attr;/*!< Options of profile dumps */ + prof_UInt_t cfg_checksum;/*!< Procedure checksum */ + + /* Edge profile */ + prof_UInt64_t *edge_counters;/*!< Edge counter array */ + prof_UInt_t num_edges;/*!< Number of edges inside procedure */ + + off_t edges_in_file;/*!< Offset for edges array in file */ +/*< Number of effectively dumped edge counters */ + prof_UInt_t dumped_edges_number; +} prof_Func_t; + +/** + * @brief Module profile + */ +typedef struct { + char *name;/*!< Module name */ +/*! Table with procedures of module */ + prof_HashTable_t *procedures; +/*! Offset for module info in file */ + off_t module_offset; +} prof_Module_t; + +/** + * @struct prof_ModulesEntry_t + * + * @brief Program modules table entry. Key is modules name value is pointer to an object + */ +typedef prof_HashEntry_t prof_ModulesEntry_t; + +/** + * @struct prof_ModulesTable_t + * @brief Program modules table + */ +typedef prof_HashTable_t prof_ModulesTable_t; + +typedef enum { + PROF_COUNT_OFFSETS = 0, /*!< Only count offsets. No write to buffer */ + PROF_WRITE_OFFSETS /*!< Only write to buffer. no offset count */ +} prof_FileWriteMode_t; + +/** + * @brief Program profile + */ +typedef struct { + prof_ModulesTable_t *modules; /*!< Table with modules */ +/*!< Buffer to dump profile info */ + char *prof_buff; +/*!< Current position in file */ + prof_UInt_t cur_buf_pos; +/*!< Offset of data as if it has been writen in file */ + prof_UInt_t offset; +/*!< Current file write mode */ + prof_FileWriteMode_t write_mode; +} prof_Program_t; + +/******************************************************************************/ + +#define PROF_ADDR_TO_NAME_ALL_ENTRIES(entry, table) \ +PROF_HASH_ALL_ENTRIES((entry), (table)) + +/******************************* prof_profilert.h ****************************/ + + + +/*****************************************************************************/ +/******************************* prof_profilert.c ****************************/ +/*****************************************************************************/ + +/******************************************************************************* + ******************************* prof_Func_t *********************************** + ******************************************************************************/ + +void +prof_FuncSetAttr(prof_Func_t *func, + prof_FuncAttr_t attr) { + func->attr = attr; +} /* prof_FuncSetAttr */ + +/** + * @brief Create object for function profile + */ +static prof_Func_t * +prof_FuncCreate(const char *name, + prof_FuncAttr_t attr) { + prof_Func_t *func; + + func = (prof_Func_t *) prof_Malloc(sizeof(prof_Func_t)); + memset(func, 0, sizeof(prof_Func_t)); + + func->name = kstrdup(name, GFP_KERNEL); + prof_FuncSetAttr(func, attr); + + return func; +} /* prof_FuncCreate */ + +const char * +prof_FuncGetName(const prof_Func_t *func) { + return func->name; +} /* prof_FuncGetName */ + +prof_FuncAttr_t +prof_FuncGetAttr(const prof_Func_t *func) { + return func->attr; +} /* prof_FuncGetAttr */ + +static void +prof_FuncSetEdgesNum(prof_Func_t *func, + unsigned num_edges, + prof_Bool_t create_arrays) { + func->num_edges = num_edges; + + if (create_arrays) { + func->edge_counters = +(prof_UInt64_t *)prof_Malloc(sizeof(prof_UInt64_t) * num_edges); + memset(func->edge_counters, +-1, +sizeof(prof_UInt64_t) * num_edges); + } +} /* prof_FuncSetEdgesNum */ + +prof_UInt_t +prof_FuncGetNumEdges(const prof_Func_t *func) { + return func->num_edges; +} /* prof_FuncGetNumEdges */ + +prof_UInt64_t +prof_FuncGetEdgeCounter(const prof_Func_t *func, +prof_UInt_t edge_num) { + return func->edge_counters[edge_num]; +} /* prof_FuncGetEdgeCounter */ + +void +prof_FuncSetEdgeCounter(const prof_Func_t *func, +prof_UInt_t edge_num, +prof_UInt64_t counter) { + func->edge_counters[edge_num] = counter; +} /* prof_FuncSetEdgeCounter */ + +prof_UInt_t +prof_FuncGetDumpedEdgesNum(const prof_Func_t *func) { + return func->dumped_edges_number; +} /* prof_FuncGetDumpedEdgesNum */ + +void +prof_FuncSetDumpedEdgesNum(prof_Func_t *func, +prof_UInt_t number) { + func->dumped_edges_number = number; +} /* prof_FuncSetDumpedEdgesNum */ + +void +prof_FuncIncrDumpedEdges(prof_Func_t *func) { + func->dumped_edges_number++; +} /* prof_FuncIncrDumpedEdges */ + +prof_UInt_t +prof_FuncGetChecksum(const prof_Func_t *func) { + return func->cfg_checksum; +} /* prof_FuncGetChecksum */ + +void +prof_FuncSetChecksum(prof_Func_t *func, +prof_UInt_t sum) { + func->cfg_checksum = sum; +} /* prof_FuncGetChecksum */ + +static void +prof_FuncDestroy(prof_Func_t *func) { + if (prof_FuncGetNumEdges(func) != 0) { + prof_Free(func->edge_counters); + } + + prof_Free(func->name); + prof_Free(func); +} /* prof_FuncDestroy */ + +/********************************** prof_Func_t *******************************/ + +/******************************************************************************* + ********************************** prof_Module_t ****************************** + ******************************************************************************/ + +/** + * @brief Create object with module profile + */ +static prof_Module_t * +prof_ModuleCreate(const char *name) { + prof_Module_t *module; + + module = (prof_Module_t *) prof_Malloc(sizeof(prof_Module_t)); + memset(module, 0, sizeof(prof_Module_t)); + + module->procedures = prof_HashCreate(PROF_HASH_STRING_TYPE, +PROF_HASH_VOID_PTR_TYPE); + module->name = kstrdup(name, GFP_KERNEL); + + return module; +} /* prof_ModuleCreate */ + +static void +prof_ModuleDestroy(prof_Module_t *module) { + prof_HashForEachEntry(module->procedures, +(void (*)(void *)) prof_FuncDestroy); + prof_HashDestroy(module->procedures); + prof_Free(module->name); + + prof_Free(module); +} /* prof_ModuleDestroy */ + +static void +prof_ModuleAddFunction(prof_Module_t *module, +prof_Func_t *function) { + prof_HashAddVoidPtrValueByString(module->procedures, +prof_FuncGetName(function), +function); +} /* prof_ModuleAddFunction */ + +void +prof_ModuleRemoveFunction(prof_Module_t *module, +const char *func_name) { + prof_HashDeleteEntryByString(module->procedures, func_name); +} /* prof_ModuleRemoveFunction*/ + +prof_Func_t * +prof_ModuleFindFunction(prof_Module_t *module, +const char *function_name) { + prof_Func_t *function; + + function = prof_HashFindByStringAndGetVoidPtrValue( +module->procedures, +function_name); + + return function; +} /* prof_ModuleFindFunction */ + +prof_HashTable_t * +prof_ModuleFunctions(prof_Module_t *module) { + return module->procedures; +} /* prof_ModuleFunctions */ + +/** + * @brief Find or create procedure inside module + */ +prof_Func_t * +prof_ModuleFindOrCreateFunction(prof_Module_t *module, +const char *function_name, +prof_FuncAttr_t func_attr, +unsigned edge_max_num, +unsigned loop_max_num, +unsigned loop_outer_max_num, +unsigned loop_outer_outer_max_num, +unsigned crc, +prof_UInt_t vprof_opers) { + prof_Func_t *function; + + function = prof_HashFindByStringAndGetVoidPtrValue(module->procedures, +function_name); + + if (function == NULL) { + function = prof_FuncCreate(function_name, +func_attr); + prof_FuncSetEdgesNum(function, +edge_max_num, +PROF_FALSE); + +PROF_ASSERT(crc != 0); + prof_HashAddVoidPtrValueByString(module->procedures, +function_name, +function); + } + + return function; +} /* prof_ModuleFindOrCreateFunction */ + +char * +prof_ModuleGetName(const prof_Module_t *module) { + return module->name; +} /* prof_ModuleGetName */ + +prof_UInt_t +prof_ModuleGetNumFunctions(const prof_Module_t *module) { + return prof_HashGetElementNum(module->procedures); +} /* prof_ModuleGetNumFunctions */ + +off_t +prof_ModuleGetOffset(const prof_Module_t *module) { + return module->module_offset; +} /* prof_ModuleGetOffset */ + +void +prof_ModuleSetOffset(prof_Module_t *module, +off_t offset) { + module->module_offset = offset; +} /* prof_ModuleSetOffset */ + +/********************************* prof_Module_t ******************************/ + +/******************************************************************************* + ********************************* prof_Program_t ****************************** + ******************************************************************************/ + +prof_Program_t * +prof_ProgCreate(prof_Bool_t is_vprof) { +prof_Program_t *profile; + + profile = (prof_Program_t *) prof_Malloc(sizeof(prof_Program_t)); + memset(profile, 0, sizeof(prof_Program_t)); + + profile->modules = prof_HashCreate(PROF_HASH_STRING_TYPE, +PROF_HASH_VOID_PTR_TYPE); + + profile->prof_buff = (char *)&prof_ProfileBuff; + profile->offset = 0; + profile->write_mode = PROF_COUNT_OFFSETS; + profile->cur_buf_pos = 0; + + return profile; +} /* prof_ProgCreate */ + +prof_ModulesTable_t * +prof_ProgGetModules(const prof_Program_t *profile) { + return profile->modules; +} /* prof_ProgGetModules */ + +void +prof_ProgDestroy(prof_Program_t *profile) { + prof_HashForEachEntry(prof_ProgGetModules(profile), +(void (*)(void *)) prof_ModuleDestroy); + + prof_HashDestroy(prof_ProgGetModules(profile)); + + prof_Free(profile); +} /* prof_ProgDestroy */ + +prof_Module_t * +prof_ProgFindModule(const prof_Program_t *profile, +const char *module_name) { + prof_Module_t *module; + + module = prof_HashFindByStringAndGetVoidPtrValue( + prof_ProgGetModules(profile), + module_name); + + return module; +} /* prof_ProgFindModule */ + +prof_Module_t * +prof_ProgFindOrCreateModule(prof_Program_t *profile, +const char *module_name) { + prof_Module_t *module; + + module = prof_ProgFindModule(profile, + module_name); + + if (module == NULL) { + + module = prof_ModuleCreate(module_name); + + prof_HashAddVoidPtrValueByString(prof_ProgGetModules(profile), +prof_ModuleGetName(module), +module); + } + + return module; +} /* prof_ProgFindOrCreateModule */ + +prof_UInt_t +prof_ProgGetNumModules(const prof_Program_t *profile) { + return prof_HashGetElementNum(profile->modules); +} /* prof_ProgGetNumModules */ + + +/*********************************** prof_Program_t **************************/ + +/******************************* prof_profilert.c ****************************/ + + + + +/*****************************************************************************/ +/********************************* prof_librt.h ******************************/ +/*****************************************************************************/ + +void +__BUILTIN_ecomp_prof_IncrLoopCounters(int loop_num, +int loop_oter_num, +int outer_loop_num, +int *current_iters, +int *iter_counters, +int *iter_outer_counters); + +void +__BUILTIN_ecomp_prof_RegProcSTDN_lib(const char *module_name, +const char *proc_name, +unsigned edges, +unsigned loops, +unsigned outer_loops, +unsigned outer_outer_loops, +unsigned stub6, +unsigned stub1, +unsigned stub2, +unsigned prof_opers_num, +prof_UInt_t stub9, +prof_UInt64_t *edge_counters, +prof_UInt64_t **loop_numbers, +prof_UInt64_t **loop_counters, +prof_UInt64_t *loop_outer_counters, +prof_UInt64_t *loop_outer_outer_counters, +prof_UInt64_t *outer_loop, +prof_UInt64_t *outer_outer_loop, +void *vprof_counters, +void *stub7, +void *stub8, +prof_FuncAttr_t func_attr, +unsigned cfg_checksum, +unsigned stub4, +unsigned stub5, +unsigned stub10); + + +/********************************* prof_librt.h ******************************/ + + + +/*****************************************************************************/ +/********************************* prof_librt.c ******************************/ +/*****************************************************************************/ + +#ifdef ECOMP_SUPPORT_INTERNAL_CHECK +#define PROF_DEBUG +#endif /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +static prof_Program_t *prof_ProgramProfile = NULL; + + +/* #define PROF_DEBUG */ + +#ifdef PROF_DEBUG + +static prof_Bool_t prof_IsDebugSave = PROF_FALSE; + +static prof_Bool_t prof_IsDebugRuntime = PROF_FALSE; + +#if 0 +static prof_Bool_t prof_IsDebugOffset = PROF_FALSE; +#endif /* 0 */ + +#define prof_DebugRuntime(actions) \ +{ \ + if (prof_IsDebugRuntime) { \ + actions; \ + } \ +} /* prof_DebugRuntime */ + +#define prof_DebugSave(actions) \ +{ \ + if (prof_IsDebugSave) { \ + actions; \ + } \ +} /* prof_DebugSave */ + +#if 0 +#define prof_DebugOffset(actions) \ +{ \ + if (prof_IsDebugOffset) { \ + actions; \ + } \ +} /* prof_DebugOffset */ +#else /* 0 */ +#define prof_DebugOffset(actions) +#endif /* 0 */ + +#else /* PROF_DEBUG */ + +#define prof_DebugSave(action) +#define prof_DebugRuntime(actions) +#define prof_DebugOffset(actions) + +#endif /* PROF_DEBUG */ + +static void +prof_IncrOffset(prof_UInt_t off) { + prof_ProgramProfile->offset += off; +} /* prof_IncrOffset */ + +static prof_UInt_t +prof_GetCurrentOffset(void) { + return prof_ProgramProfile->offset; +} /* prof_GetCurrentOffset */ + +static void +prof_IncrCurBufPos(void) { + prof_ProgramProfile->cur_buf_pos++; +} /* prof_IncrCurBufPos */ + +static prof_UInt_t +prof_GetCurrentBufPos(void) { + return prof_ProgramProfile->cur_buf_pos; +} /* prof_GetCurrentBufPos */ + +static void +prof_Write(const char *buf, + size_t nbyte) { + int i; + + for (i = 0; i < nbyte; i++) { + prof_ProgramProfile->prof_buff[prof_GetCurrentBufPos()] = +buf[i]; + prof_IncrCurBufPos(); + } +} /* prof_Write */ + +static void +prof_DumpUInt(int file_descr, + prof_UInt_t val) { + if (prof_ProgramProfile->write_mode == PROF_WRITE_OFFSETS) { + prof_UInt_t temp; + unsigned char uint_arr[PROF_BYTES_IN_UINT]; + int i; + + temp = val; + for (i = 0; i < PROF_BYTES_IN_UINT; i++) { + val >>= PROF_BITS_IN_BYTE; + uint_arr[i] = temp - (val << PROF_BITS_IN_BYTE); + temp = val; + } + + prof_Write((char *)uint_arr, +PROF_BYTES_IN_UINT); + } else { + prof_IncrOffset(PROF_BYTES_IN_UINT); + + } +} /* prof_DumpUInt */ + +static void +prof_DumpUInt64(int file_descr, + prof_UInt64_t val) { + if (prof_ProgramProfile->write_mode == PROF_WRITE_OFFSETS) { + prof_UInt64_t temp; + unsigned char uint64_arr[PROF_BYTES_IN_UINT64]; + int i; + + temp = val; + for (i = 0; i < PROF_BYTES_IN_UINT64; i++) { + val = val >> PROF_BITS_IN_BYTE; + uint64_arr[i] = temp - (val << PROF_BITS_IN_BYTE); + temp = val; + } + + prof_Write((char *)uint64_arr, +PROF_BYTES_IN_UINT64); + } else { + prof_IncrOffset(PROF_BYTES_IN_UINT64); + } +} /* prof_DumpUInt64 */ + +static void +prof_DumpString(int file_descr, + const char *out_string, + size_t size) { + if (prof_ProgramProfile->write_mode == PROF_WRITE_OFFSETS) { + prof_Write(out_string, size); + } else { + prof_IncrOffset(size); + } +} /* prof_DumpString */ + +static void +prof_DumpProgramHeader(prof_Program_t *program_profile, +int file_descript) { + prof_DumpString(file_descript, +PROF_FILE_IDEN_STR_C, +PROF_FILE_IDEN_LEN); + + prof_DumpUInt(file_descript, prof_ProgGetNumModules(program_profile)); + prof_DumpUInt(file_descript, 0); + prof_DumpUInt(file_descript, 0); +} /* prof_DumpProgramHeader */ + +static void +prof_DumpModuleHeader(prof_Module_t *module_p, + int fd) { + int name_len; + name_len = strlen(prof_ModuleGetName(module_p)) + 1; + prof_DumpUInt(fd, name_len); + prof_DumpString(fd, prof_ModuleGetName(module_p), name_len); + prof_DumpUInt(fd, prof_ModuleGetNumFunctions(module_p)); + prof_DumpUInt(fd, (prof_UInt_t)prof_ModuleGetOffset(module_p)); +} /* prof_DumpModuleHeader */ + +static void +prof_DumpFuncHeader(prof_Func_t *func_p, +int fd) { + int name_len; + prof_DebugSave(pr_info( +"\n\n . prof_debug_save: Start writing proc '%s' header:\n", +prof_FuncGetName(func_p));); + + name_len = strlen(prof_FuncGetName(func_p)) + 1; + prof_DebugSave(pr_info( +" . prof_debug_save: Saving name len: %d\n", +name_len);); + prof_DumpUInt(fd, name_len); + prof_DumpString(fd, prof_FuncGetName(func_p), name_len); + + prof_DebugSave(pr_info(" . prof_debug_save: Saving attr:"); +pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_EDGES) ? +" PROF_FUNC_ATTR_EDGES" : ""); +pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_LOOPS) ? +" PROF_FUNC_ATTR_LOOPS" : ""); +pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_VALUE_PROF) ? +" PROF_FUNC_ATTR_VALUE_PROF" : ""); +pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_LOOPSOUTER) ? +" PROF_FUNC_ATTR_LOOPSOUTER" : ""); +pr_info("\n");); + prof_DumpUInt(fd, prof_FuncGetAttr(func_p)); + + prof_DebugSave(pr_info( +" . prof_debug_save: Saving edges_in_file: %ld\n", +(long)func_p->edges_in_file);); + prof_DumpUInt(fd, func_p->edges_in_file); + + /* out max number of edges */ + prof_DebugSave(pr_info( +" . prof_debug_save: Saving num_edges: %u\n", +prof_FuncGetNumEdges(func_p));); + prof_DumpUInt(fd, prof_FuncGetNumEdges(func_p)); + + /* out real number of edges */ + prof_DebugSave(pr_info( +" . prof_debug_save: Saving out_num_edges: %u\n", +prof_FuncGetDumpedEdgesNum(func_p));); + prof_DumpUInt(fd, prof_FuncGetDumpedEdgesNum(func_p)); + + prof_DebugSave(pr_info( +" . prof_debug_save: Saving loops_in_file: %ld\n", +(long)0);); + prof_DumpUInt(fd, 0); /* stub for loops profile */ + + /* out max number of loops */ + prof_DumpUInt(fd, 0); /* stub for loop profile */ + + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + { + prof_DebugSave(pr_info( +" . prof_debug_save: Saving cfg_checksum: %u\n", +prof_FuncGetChecksum(func_p));); + prof_DumpUInt(fd, prof_FuncGetChecksum(func_p)); + } + + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + + prof_DebugSave(pr_info( +" . prof_debug_save: Finish writing proc '%s' header:\n\n\n", + func_p->name);); +} /* prof_DumpFuncHeader */ + +static void +prof_DumpEdgeProfile(prof_Func_t *func_p, + int fd) { + prof_UInt_t edge_number, + index; + + prof_FuncSetDumpedEdgesNum(func_p, 0); + edge_number = prof_FuncGetNumEdges(func_p); + + for (index = 0; index < edge_number; index++) { + prof_UInt64_t counter; + + counter = prof_FuncGetEdgeCounter(func_p, index); + + if (counter == 0 || + counter == -1) { + continue; + } + + prof_DebugSave(pr_info( +" . prof_debug_save: Saving edge %d counter %llu\n", + index, + counter);); + + prof_DumpUInt(fd, index); + prof_DumpUInt64(fd, counter); + prof_FuncIncrDumpedEdges(func_p); + } + +PROF_ASSERT(prof_FuncGetNumEdges(func_p) >= prof_FuncGetDumpedEdgesNum(func_p)); +} /* prof_DumpEdgeProfile */ + +static void +prof_DumpFuncProfile(prof_Func_t *func_p, + int fd) { + prof_FuncAttr_t func_attr = prof_FuncGetAttr(func_p); + + prof_DebugSave(pr_info( +"\n\n . prof_debug_save: Start writing proc '%s' profile:\n", +prof_FuncGetName(func_p));); + + func_p->edges_in_file = prof_GetCurrentOffset(); + + if (func_attr & PROF_FUNC_ATTR_EDGES) { + prof_DebugSave(pr_info( +" . prof_debug_save: Saving edge counters\n");); + + prof_DumpEdgeProfile(func_p, fd); + } + + prof_DebugSave(pr_info( +" . prof_debug_save: Finish writing proc '%s' profile:\n\n\n", +prof_FuncGetName(func_p));); +} /* prof_DumpFuncProfile */ + +static void +prof_DumpModuleProfile(prof_Module_t *module_p, + int fd) { + prof_HashEntry_t *proc_entry; + + prof_ModuleSetOffset(module_p, prof_GetCurrentOffset()); + +PROF_ASSERT(module_p->procedures != 0); + + for PROF_HASH_ALL_ENTRIES(proc_entry, prof_ModuleFunctions(module_p)) + { + prof_DumpFuncHeader(prof_HashGetEntryVoidPtrVal(proc_entry), + fd); + } + + for PROF_HASH_ALL_ENTRIES(proc_entry, prof_ModuleFunctions(module_p)) + { + prof_DumpFuncProfile(prof_HashGetEntryVoidPtrVal(proc_entry), + fd); + } + +} /* prof_DumpModuleProfile */ + +static void +prof_DumpModuleFuncHeader(prof_Module_t *module_p, + int fd) { + prof_HashEntry_t *proc_entry; + + if (prof_ProgramProfile->write_mode == PROF_COUNT_OFFSETS) + module_p->module_offset = prof_GetCurrentOffset(); + + for PROF_HASH_ALL_ENTRIES(proc_entry, prof_ModuleFunctions(module_p)) + { + prof_DumpFuncHeader(prof_HashGetEntryVoidPtrVal(proc_entry), + fd); + } +} /* prof_DumpModuleFuncHeader */ + +/** + * @brief Set counter increment to 0 + */ +static void +prof_MakeIncrStepZero(void) { + ecomp_ProfileIncrement = 0; +} /* prof_MakeIncrStepZero */ + +/** + * @brief Set counter increment to 1 + */ +void +prof_MakeIncrStepOne(void) { + ecomp_ProfileIncrement = 1; +} /* prof_MakeIncrStepOne */ + +/** + * @brief Clear buffer and variables for profile repeated dump + */ +static void +prof_ClearBuffer(void) { + int i; + + save_first_time = true; + + prof_ProgramProfile->offset = 0; + prof_ProgramProfile->write_mode = PROF_COUNT_OFFSETS; + prof_ProgramProfile->cur_buf_pos = 0; + + for (i = 0; i < prof_BuffSize; i++) + prof_ProfileBuff[i] = 0; +} /* prof_ClearBuffer */ + +void +prof_SaveFile(prof_Program_t *program_profile) { + int fd = 0; + prof_ModulesEntry_t *entry; + + if (!save_first_time) { + /* Fixed disagreeable effect of multiple entrer in this + * function. It should be entered only once */ + return; + } + + save_first_time = false; + prof_MakeIncrStepZero(); + prof_ProgramProfile->offset = 0; + prof_ProgramProfile->write_mode = PROF_COUNT_OFFSETS; + prof_ProgramProfile->cur_buf_pos = 0; + prof_ClearBuffer(); + + prof_DebugSave(pr_info( +" . prof_debug_save: Start saving program header\n");); + prof_DumpProgramHeader(program_profile, fd); + prof_DebugSave(pr_info( +" . prof_debug_save: Finish saving program header\n");); + + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleHeader(prof_HashGetEntryVoidPtrVal(entry), + fd); + } + + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleProfile(prof_HashGetEntryVoidPtrVal(entry), + fd); + } + + prof_ProgramProfile->write_mode = PROF_WRITE_OFFSETS; + + prof_DebugSave(pr_info( +" . prof_debug_save: Start saving program header\n");); + prof_DumpProgramHeader(program_profile, fd); + prof_DebugSave(pr_info( +" . prof_debug_save: Finish saving program header\n");); + + prof_DebugSave(pr_info( +" . prof_debug_save: Start saving module headers\n");); + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleHeader(prof_HashGetEntryVoidPtrVal(entry), +fd); + } + prof_DebugSave(pr_info( +" . prof_debug_save: Finish saving module headers\n");); + + prof_DebugSave(pr_info( +" . prof_debug_save: Start saving counters\n");); + /* TODO save pointers in file for functions */ + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleProfile(prof_HashGetEntryVoidPtrVal(entry), +fd); + } + prof_DebugSave(pr_info( +" . prof_debug_save: Finish saving counters\n");); +} /* prof_SaveFile */ + +/** + * @brief Dump of buffer into file + */ +static void +prof_DumpFile(struct seq_file *s) { /* [in] File descriptor */ + seq_write(s, prof_ProgramProfile->prof_buff, prof_GetCurrentBufPos()); +} /* prof_DumpFile */ + +void +__BUILTIN_ecomp_prof_RegProcSTDN_lib( +const char *module_name, +const char *proc_name, +unsigned edges, +unsigned loops, +unsigned outer_loops, +unsigned outer_outer_loops, +unsigned stub6, +unsigned stub1, +unsigned stub2, +unsigned prof_opers_num, +prof_UInt_t stub9, +prof_UInt64_t *edge_counters, +prof_UInt64_t **loop_numbers, +prof_UInt64_t **loop_counters, +prof_UInt64_t *loop_outer_counters, +prof_UInt64_t *loop_outer_outer_counters, +prof_UInt64_t *outer_loop, +prof_UInt64_t *outer_outer_loop, +void *vprof_counters, +void *stub7, +void *stub8, +prof_FuncAttr_t func_attr, +unsigned int cfg_checksum, +unsigned int stub4, +unsigned int stub5, +unsigned int stub10) +{ + prof_Module_t *module_p; + prof_Func_t *func_p; + + prof_DebugRuntime(pr_info( +"\n\n . prof_IsDebugRuntime: Start proc `%s' from module `%s' registration\n", + proc_name, + module_name);); + + module_p = prof_ProgFindOrCreateModule(prof_ProgramProfile, +module_name); + + prof_DebugRuntime(pr_info( +" . prof_IsDebugRuntime: Found or created object for module `%s'\n", +module_name);); + + func_p = prof_HashFindByStringAndGetVoidPtrValue(module_p->procedures, +proc_name); + if (func_p == NULL) { + int i = 0; + prof_DebugRuntime(pr_info( +" . prof_IsDebugRuntime: Creating new object for proc `%s' with checksum %u\n", +proc_name, +cfg_checksum); + pr_info( +" . prof_IsDebugRuntime: Proc attr"); + pr_info( +"%s", (func_attr & PROF_FUNC_ATTR_EDGES) ? " PROF_FUNC_ATTR_EDGES" : ""); + pr_info( +"%s", (func_attr & PROF_FUNC_ATTR_LOOPS) ? " PROF_FUNC_ATTR_LOOPS" : ""); + pr_info( +"%s", (func_attr & PROF_FUNC_ATTR_VALUE_PROF) ? +" PROF_FUNC_ATTR_VALUE_PROF" : ""); + pr_info( +"%s", (func_attr & PROF_FUNC_ATTR_LOOPSOUTER) ? +" PROF_FUNC_ATTR_LOOPSOUTER" : ""); + pr_info( +"\n");); + + func_p = prof_ModuleFindOrCreateFunction(module_p, +proc_name, +func_attr, +edges, +loops, +outer_loops, +outer_outer_loops, +cfg_checksum, +prof_opers_num); + + func_p->edge_counters = (prof_UInt64_t *) edge_counters; + prof_FuncSetChecksum(func_p, cfg_checksum); + + } else { + panic( +" . eprof_init_print: Multiple proc `%s' definition in module `%s'\n", +proc_name, +module_name); + } + + prof_DebugRuntime(pr_info( +"\n . prof_IsDebugRuntime: Finish proc `%s' from module `%s' registration\n\n", +proc_name, +module_name);); + +} /* __BUILTIN_ecomp_prof_RegProcSTDN_lib */ + +void +__BUILTIN_ecomp_prof_CreateProfileObj(prof_Bool_t is_vprof, +prof_Bool_t is_parallel, /* STUB */ +const char *path) { + if (prof_ProgramProfile != NULL) { + return; + } + + prof_DebugRuntime(pr_info( +" . prof_IsDebugRuntime: Using value profile: %s\n", +(is_vprof) ? "YES" : "NO");); + + prof_ProgramProfile = prof_ProgCreate(is_vprof); +} /* __BUILTIN_ecomp_prof_CreateProfileObj */ + +void +__BUILTIN_prof_PrintModuleInited(const char *module_name) { + + prof_DebugRuntime(pr_info(" . Module inited %s!!!\n", module_name);); +} /* __BUILTIN_prof_PrintModuleInited */ + +void +__BUILTIN_ecomp_prof_AtomicAdd64(prof_UInt64_t *res) { + atomic64_add(ecomp_ProfileIncrement, res); +} /* __BUILTIN_ecomp_prof_AtomicAdd64 */ + +/********************************* prof_librt.c ******************************/ + + +static int profile_seq_show(struct seq_file *s, void *v) +{ + int num = *((loff_t *) v); + + if (num >= profile_counters_num) + return 0; + + prof_DumpFile(s); + + return 0; +} + +static void *profile_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= profile_counters_num) + return 0; + + prof_SaveFile(prof_ProgramProfile); + + return (void *) pos; +} + +static void *profile_seq_next(struct seq_file *s, void *v, + loff_t *pos) { + if ((*pos)++ >= profile_counters_num) + return 0; + return (void *) pos; +} + +static void profile_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations profile_seq_ops = { + .start = profile_seq_start, + .next = profile_seq_next, + .stop = profile_seq_stop, + .show = profile_seq_show +}; + +static int profile_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &profile_seq_ops); +} + +static ssize_t profile_write(struct file *file, const char __user *buf, + size_t count, loff_t *data) { + prof_ClearBuffer(); + + return count; +} + +static const struct file_operations profile_proc_fops = { + .owner = THIS_MODULE, + .open = profile_proc_open, + .write = profile_write, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static int __init kernel_profile_init(void) +{ + proc_create(PROFILE_FILENAME, S_IRUGO | S_IWUSR, + NULL, &profile_proc_fops); + return 0; +} + +module_init(kernel_profile_init); + diff --git a/arch/e2k/kernel/libeprof/libkeprof_cur.c b/arch/e2k/kernel/libeprof/libkeprof_cur.c new file mode 100644 index 000000000000..d15c91dcdf33 --- /dev/null +++ b/arch/e2k/kernel/libeprof/libkeprof_cur.c @@ -0,0 +1,1736 @@ + +#include +#include +#include +#include +#include +#include + +/*****************************************************************************/ +/********************************* prof_defs.h *******************************/ +/*****************************************************************************/ + +#ifdef ECOMP_SUPPORT_INTERNAL_CHECK +#define PROF_CHECK +#endif /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +#define PROF_FILE_IDEN_LEN (5 + 1) /* Profile identification sign length */ + +#define PROF_FILE_IDEN_STR_C "VER.C" + +#define PROF_BYTES_IN_UINT 4 /*!< @brief prof_UInt_t type size */ +#define PROF_BYTES_IN_UINT64 8 /*!< @brief prof_UInt64_t type size */ +#define PROF_BITS_IN_BYTE 8 /*!< @brief Bits in byte */ + +/********************************* prof_defs.h *******************************/ + + +#define PROFILE_FILENAME "kernel_profile" + +const int prof_BuffSize = 20 * 1024 * 1024; + +/** + * @brief Buffer for profile content + * + * Allocation of 20 Mb + */ +static char prof_ProfileBuff[prof_BuffSize] = {0}; + +/** + * @brief Flag is enabled if saving to buffer launched at first time + */ +static bool save_first_time = true; + +/** + * @brief Number of times the profile dump proc is called + */ +static int profile_counters_num = 1; + +/** + * @brief The increment value. While preparing dumps we set it to zero so that + * our counters should not be broken + */ +unsigned long ecomp_ProfileIncrement = 0; + +/*****************************************************************************/ +/******************************** prof_types.h *******************************/ +/*****************************************************************************/ + +typedef unsigned int prof_UInt_t; + +typedef unsigned long long prof_UInt64_t; + +/** + * @brief Profile mode + */ +typedef enum { +PROF_FUNC_ATTR_EDGES = 0x2, /*!< Edges */ +PROF_FUNC_ATTR_LOOPS = 0x4, /*!< Loop profile */ +PROF_FUNC_ATTR_VALUE_PROF = 0x40, /*!< Value profiling */ +PROF_FUNC_ATTR_LOOPSOUTER = 0x100 /*!< Outer loop profile */ +} prof_FuncAttr_t; + +/******************************** prof_types.h *******************************/ + +/*****************************************************************************/ +/******************************** prof_utils.h *******************************/ +/*****************************************************************************/ + +/****************************************************************************** + *********************************** Macross ********************************** + *****************************************************************************/ + +#define PROF_ERROR_MSG_LENGTH 1024 + +#define prof_IsStrStartEQ(str1, str2) \ +(\ +strncmp(str1, str2, strlen(str2)) == 0 \ +) + +#ifdef ECOMP_SUPPORT_INTERNAL_CHECK + +#define PROF_ASSERT(cond) \ +do { if (!(cond)) panic("assertion failed"); } while (0); + +#else /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +#define PROF_ASSERT(cond) + +#endif /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +/*********************************** Macross *********************************/ + +/****************************************************************************** + ************************************ Types *********************************** + *****************************************************************************/ + +/** + * @brief Custom bool + */ +typedef enum { +PROF_FALSE = 0, +PROF_TRUE = 1 +} prof_Bool_t; + +/************************************ Types **********************************/ + +/******************************** prof_utils.h *******************************/ + +/*****************************************************************************/ +/******************************** prof_utils.c *******************************/ +/*****************************************************************************/ + +/** + * @brief Memory allocation + * + * @returns Pointer to allocated memory + */ +void * +prof_Malloc(size_t size) { /* [in] Size of allocation */ + void *ptr; /* Pointer to allocated area */ + + ptr = kmalloc(size, /*GFP_USER*/GFP_KERNEL); + + if (ptr == NULL) { + /* Break if can't allocate memory */ + panic("Not enough memory for profiling\n"); + } + + return ptr; +} /* prof_Malloc */ + +/** + * @brief Memory allocation and initialization + * + * @returns Pointer to allocated memory + */ +void * +prof_Calloc(size_t len, /* [in] Number of elements */ + size_t size) { /* [in] Element size */ + void *ptr; + + ptr = kcalloc(len, size, GFP_KERNEL); + + if (ptr == NULL) { + /* Break if can't allocate memory */ + panic("Not enough memory for profiling\n"); + } + + return ptr; +} /* prof_Calloc */ + +/** + * @brief Free allocated memory + */ +void +prof_Free(void *ptr) { /* [in,out] Allocated memory */ +} /* prof_Free */ + +/******************************** prof_utils.c *******************************/ + + + +/*****************************************************************************/ +/******************************** prof_hashrt.h ******************************/ +/*****************************************************************************/ + +#define PROF_HASH_ALL_ENTRIES(entry, table) \ + ((entry) = prof_HashGetFirstEntry((table)); \ + (entry) != NULL; \ + (entry) = prof_HashGetNextEntry((entry))) + +/******************************************************************************* + ************************************ Types ************************************ + ******************************************************************************/ + +/** + * @brief Key and value types for hash table + */ +typedef enum { +PROF_HASH_VOID_PTR_TYPE, +PROF_HASH_STRING_TYPE, +PROF_HASH_UINT64_TYPE +} prof_HashType_t; + +/** + * @brief Hash table entry + */ +typedef struct prof_HashEntry_r { + union { + const char *s_key; /*!< String key value */ + prof_UInt64_t uint64_key; /*!< Integer key value */ + }; + +/*!< Next entry with current index */ + struct prof_HashEntry_r *next; +/*!< Previous entry with current index */ + struct prof_HashEntry_r *prev; +/*!< Next index in the table */ + struct prof_HashEntry_r *next_in_table; +/*!< Previous index in the table */ + struct prof_HashEntry_r *prev_in_table; + + union { + void *v_value;/*!< User type value in hash */ + char *s_value;/*!< String value in hash */ + prof_UInt64_t uint64_value;/*!< Int value in hash */ + }; +} prof_HashEntry_t; + +/** + * @brief Hash table + */ +typedef struct { + prof_HashEntry_t **hash_table; /*!< Hash element array */ + prof_UInt_t table_dimension; /*!< Array size for hash */ + prof_UInt_t size; /*!< Entry number */ + prof_HashEntry_t *first; /*!< First entry in hash */ + prof_HashEntry_t *last; /*!< Last entry in hash */ + prof_HashType_t key_type; /*!< Key type */ + prof_HashType_t val_type; /*!< Value type */ +} prof_HashTable_t; + +/******************************** prof_hashrt.h ******************************/ + + + +/*****************************************************************************/ +/******************************** prof_hashrt.c ******************************/ +/*****************************************************************************/ + +/******************************************************************************* + *********************************** Macross *********************************** + ******************************************************************************/ + +#define PROF_HASH_ARRAY_SIZE_LN 10 + +#define PROF_HASH_ARRAY_SIZE (1 << PROF_HASH_ARRAY_SIZE_LN) + +#define PROF_HASH_MASK (PROF_HASH_ARRAY_SIZE - 1) + +#define PROF_HASH_DOWN_SHIFT (sizeof(prof_UInt_t) * 8 - PROF_HASH_ARRAY_SIZE_LN) + +#define PROF_HASH_RANDOM_INDEX(i) \ +(\ +(((((prof_UInt_t) (i))*1103515245) >> PROF_HASH_DOWN_SHIFT) & \ +((unsigned)PROF_HASH_MASK))\ +) + +/*********************************** Macross **********************************/ + +/******************************************************************************* + ************************ Work with type prof_HashTable_t ********************** + ******************************************************************************/ + +/** + * @brief Create new table + * + * @returns Created table or NULL + */ +prof_HashTable_t * +prof_HashCreate(prof_HashType_t key_type, /* [in] Key type */ + prof_HashType_t value_type) { /* [in] Record type */ + prof_HashTable_t *self; /* Creating table */ + + self = (prof_HashTable_t *) prof_Malloc(sizeof(prof_HashTable_t)); + + if (self == NULL) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashTable_t)); + + self->table_dimension = PROF_HASH_ARRAY_SIZE; + self->hash_table = (prof_HashEntry_t **) +prof_Calloc(PROF_HASH_ARRAY_SIZE, sizeof(prof_HashEntry_t *)); + self->key_type = key_type; + self->val_type = value_type; + + if (self->hash_table == NULL) { + prof_Free(self); + return NULL; + } + + return self; +} /* prof_HashCreate */ + +/** + * @brief Delete table with records (dat ain records should be deleted separately) + */ +void +prof_HashDestroy(prof_HashTable_t *table) /* [in,out] Deleting table */ +{ + prof_Free(table->hash_table); + prof_Free(table); +} /* prof_HashDestroy */ + +/** + * @brief Create new record + */ +static prof_HashEntry_t * +prof_HashCreateEntryByString(const char *key, + void *value) { + prof_HashEntry_t *self; + + self = (prof_HashEntry_t *) prof_Malloc(sizeof(prof_HashEntry_t)); + + if (!self) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashEntry_t)); + + self->s_key = key; + self->v_value = value; + + return self; +} /* prof_HashCreateEntryByString */ + +/** + * @brief Create new record + */ +static prof_HashEntry_t * +prof_HashCreateVoidPtrEntryByUInt64(prof_UInt64_t key, + void *value) { + prof_HashEntry_t *self; + + self = (prof_HashEntry_t *) prof_Malloc(sizeof(prof_HashEntry_t)); + + if (!self) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashEntry_t)); + + self->uint64_key = key; + self->v_value = value; + + return self; +} /* prof_HashCreateVoidPtrEntryByUInt64 */ + +/** + * @brief Create new record + */ +static prof_HashEntry_t * +prof_HashCreateUInt64EntryByUInt64(prof_UInt64_t key, + prof_UInt64_t value) { + prof_HashEntry_t *self; + + self = (prof_HashEntry_t *) prof_Malloc(sizeof(prof_HashEntry_t)); + + if (!self) { + return NULL; + } + + memset(self, 0, sizeof(prof_HashEntry_t)); + + self->uint64_key = key; + self->uint64_value = value; + + return self; +} /* prof_HashCreateUInt64EntryByUInt64 */ + +/** + * @brief Generate hash value by key (string) + */ +static prof_UInt_t +prof_HashStringFunc(const char *key) { + int i; + prof_UInt_t table_index, + summ, + length; + + summ = 0; + length = strlen(key); + + for (i = 0; i < length; i++) { + summ += (unsigned char)key[i]; + } + + table_index = PROF_HASH_RANDOM_INDEX(summ); + + return table_index; +} /* prof_HashStringFunc */ + +/** + * @brief Generate hash value by key (string) + */ +static prof_UInt_t +prof_HashUint64Func(prof_UInt64_t key) { + prof_UInt_t table_index; + + table_index = PROF_HASH_RANDOM_INDEX(key); + + return table_index; +} /* prof_HashUint64Func */ + +prof_UInt64_t +prof_HashGetEntryUInt64Key(const prof_HashEntry_t *entry) { + return entry->uint64_key; +} /* prof_HashGetEntryUInt64Key */ + +void * +prof_HashGetEntryVoidPtrVal(const prof_HashEntry_t *entry) { + return entry->v_value; +} /* prof_HashGetEntryVal */ + +char * +prof_HashGetEntryStringVal(const prof_HashEntry_t *entry) { + return entry->s_value; +} /* prof_HashGetEntryStringVal */ + +prof_UInt64_t +prof_HashGetEntryUInt64Val(const prof_HashEntry_t *entry) { + return entry->uint64_value; +} /* prof_HashGetEntryUInt64Val */ + +void +prof_HashSetEntryUInt64Val(prof_HashEntry_t *entry, + prof_UInt64_t val) { + entry->uint64_value = val; +} /* prof_HashGetEntryUInt64Val */ + +static prof_HashEntry_t * +prof_HashFindByString(const prof_HashTable_t *self, + const char *key) { + prof_UInt_t index; + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); + + index = prof_HashStringFunc(key); + entry = self->hash_table[index]; + + while (entry != NULL) { + if (strcmp(entry->s_key, key) == 0) { + return entry; + } + + entry = entry->next; + } + + return NULL; +} /* prof_HashFindByString */ + +void * +prof_HashFindByStringAndGetVoidPtrValue(const prof_HashTable_t *self, + const char *key) { + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_VOID_PTR_TYPE); + + entry = prof_HashFindByString(self, key); + + if (entry == NULL) { + return NULL; + } + + return prof_HashGetEntryVoidPtrVal(entry); +} /* prof_HashFindByStringAndGetVoidPtrValue */ + +prof_HashEntry_t * +prof_HashFindByUInt64(const prof_HashTable_t *self, + prof_UInt64_t key) { + prof_UInt_t index; + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); + + index = prof_HashUint64Func(key); + entry = self->hash_table[index]; + + while (entry != NULL) { + if (entry->uint64_key == key) { + return entry; + } + + entry = entry->next; + } + + return NULL; +} /* prof_HashFindByUInt64 */ + +char * +prof_HashFindByUInt64AndGetCharValue(const prof_HashTable_t *self, + prof_UInt64_t key) { + prof_HashEntry_t *entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); + + entry = prof_HashFindByUInt64(self, key); + + if (entry == NULL) { + return NULL; + } + + return prof_HashGetEntryVoidPtrVal(entry); +} /* prof_HashFindByUInt64AndGetCharValue */ + +static void +prof_AddEntryByIndex(prof_HashTable_t *self, + prof_UInt_t index, + prof_HashEntry_t *new_entry) { + prof_HashEntry_t *place; + + place = self->hash_table[index]; + self->size++; + + if (place != NULL) { + while (place->next != NULL) { + place = place->next; + } + + place->next = new_entry; + new_entry->prev = place; + } else { + self->hash_table[index] = new_entry; + } + + if (self->first == NULL) { + self->first = new_entry; + self->last = new_entry; + } else { + new_entry->prev_in_table = self->last; + self->last->next_in_table = new_entry; + self->last = new_entry; + } +} /* prof_AddEntryByIndex */ + +/** + * @brief Add entry to the table with given key + * + * @note If an element with given key exist, a function does nothing + * + * @warning Key should be in a heap because it is not copied + */ +void +prof_HashAddVoidPtrValueByString(prof_HashTable_t *self, + const char *key, + void *value) { + prof_UInt_t index; + prof_HashEntry_t *new_entry; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_VOID_PTR_TYPE); + + if (prof_HashFindByStringAndGetVoidPtrValue(self, key) != NULL) { + return; + } + + index = prof_HashStringFunc(key); + new_entry = prof_HashCreateEntryByString(key, value); + + prof_AddEntryByIndex(self, index, new_entry); +} /* prof_HashAddVoidPtrValueByString */ + +/** + * @brief Add entry to the table with given key + * + * @note If an element with given key exist, a function does nothing + * + * @warning Key should be in a heap because it is not copied + */ +void +prof_HashAddStringValueByUInt64(prof_HashTable_t *self, + prof_UInt64_t key, + char *value) { + prof_UInt_t index; + prof_HashEntry_t *new_entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_STRING_TYPE); + + if (prof_HashFindByUInt64AndGetCharValue(self, key) != NULL) { + return; + } + + index = prof_HashUint64Func(key); + new_entry = prof_HashCreateVoidPtrEntryByUInt64(key, value); + + prof_AddEntryByIndex(self, index, new_entry); +} /* prof_HashAddStringValueByUInt64 */ + +/** + * @brief Add entry to the table with given key + * + * @note If an element with given key exist, a function does nothing + * + * @warning Key should be in a heap because it is not copied + */ +void +prof_HashAddUInt64ValueByUInt64(prof_HashTable_t *self, + prof_UInt64_t key, + prof_UInt64_t value) { + prof_UInt_t index; + prof_HashEntry_t *new_entry; + +PROF_ASSERT(self->key_type == PROF_HASH_UINT64_TYPE); +PROF_ASSERT(self->val_type == PROF_HASH_UINT64_TYPE); + + if (prof_HashFindByUInt64AndGetCharValue(self, key) != NULL) { + return; + } + + index = prof_HashUint64Func(key); + new_entry = prof_HashCreateUInt64EntryByUInt64(key, value); + + prof_AddEntryByIndex(self, index, new_entry); +} /* prof_HashAddStringValueByUInt64 */ + +void +prof_HashDeleteEntryByString(prof_HashTable_t *self, + const char *key) { + prof_HashEntry_t *entry; + int index; + +PROF_ASSERT(self->key_type == PROF_HASH_STRING_TYPE); + + entry = prof_HashFindByString(self, key); + + if (entry == NULL) { + return; + } + + if (self->first == entry) { + self->first = entry->next_in_table; + } + + if (self->last == entry) { + self->last = entry->prev_in_table; + } + + if (entry->prev != NULL) { + entry->prev->next = entry->next; + } + + if (entry->next != NULL) { + entry->next->prev = entry->prev; + } + + if (entry->prev_in_table != NULL) { + entry->prev_in_table->next_in_table = entry->next_in_table; + } + + if (entry->next_in_table != NULL) { + entry->next_in_table->prev_in_table = entry->prev_in_table; + } + + index = prof_HashStringFunc(key); + + if (self->hash_table[index] == entry) { + if (entry->next != NULL) { + self->hash_table[index] = entry->next; + } else { + self->hash_table[index] = NULL; + } + } + + prof_Free(entry); +} /* prof_HashDeleteEntryByString */ + +void +prof_HashForEachEntry(prof_HashTable_t *self_p, + void (*user_func)(void *)) { + int cur_entry_num; + +PROF_ASSERT(self_p->hash_table != NULL); + + for (cur_entry_num = 0; + cur_entry_num < self_p->table_dimension; + cur_entry_num++) { + prof_HashEntry_t *cur_entry; + + cur_entry = self_p->hash_table[cur_entry_num]; + + while (cur_entry != NULL) { + user_func((void *)cur_entry->v_value); + cur_entry = cur_entry->next; + } + } +} /* prof_HashForEachEntry */ + +prof_HashEntry_t * +prof_HashGetFirstEntry(const prof_HashTable_t *table) { + return table->first; +} /* prof_HashGetFirstEntry */ + +prof_HashEntry_t * +prof_HashGetNextEntry(const prof_HashEntry_t *entry) { + return entry->next_in_table; +} /* prof_HashGetFirstEntry */ + +prof_UInt_t +prof_HashGetElementNum(const prof_HashTable_t *self) { + return self->size; +} /* prof_HashGetElementNum */ + +/****************************** prof_HashTable_t *****************************/ + +/******************************** prof_hashrt.c ******************************/ + + + + +/*****************************************************************************/ +/******************************* prof_profilert.h ****************************/ +/*****************************************************************************/ + +/** + * @brief Container with procedure profile data + */ +typedef struct { + /* General data */ + char *name; /*!< Procedure name */ + prof_FuncAttr_t attr; /*!< Options of profile dumps */ + prof_UInt_t cfg_checksum; /*!< Procedure checksum */ + prof_Bool_t is_excluded; + + /* Edge profile */ + prof_UInt64_t *edge_counters; /*!< Edge counter array */ + prof_UInt_t num_edges;/*!< Number of edges inside procedure */ + + off_t edges_in_file;/*!< Offset for edges array in file */ +/*!< Number of effectively dumped edge counters */ + prof_UInt_t dumped_edges_number; +} prof_Func_t; + +/** + * @brief Module profile + */ +typedef struct { + char *name; /*!< Module name */ + /*!< Table with procedures of module */ + prof_HashTable_t *procedures; +/*!< Offset for module info in file */ + off_t module_offset; +} prof_Module_t; + +/** + * @struct prof_ModulesEntry_t + * + * @brief Program modules table entry. Key is modules name value is pointer to an object + */ +typedef prof_HashEntry_t prof_ModulesEntry_t; + +/** + * @struct prof_ModulesTable_t + * @brief Program modules table + */ +typedef prof_HashTable_t prof_ModulesTable_t; + +typedef enum { + PROF_COUNT_OFFSETS = 0, /*!< Only count offsets. No write to buffer */ + PROF_WRITE_OFFSETS /*!< Only write to buffer. no offset count */ +} prof_FileWriteMode_t; + +/** + * @brief Program profile + */ +typedef struct { + prof_ModulesTable_t *modules; /*!< Table with modules */ + + char *prof_buff; /*!< Buffer to dump profile info */ + prof_UInt_t cur_buf_pos; /*!< Current position in file */ +/*!< Offset of data as if it has been writen in file */ + prof_UInt_t offset; + prof_FileWriteMode_t write_mode; /*!< Current file write mode */ +} prof_Program_t; + +/******************************************************************************/ + +#define PROF_ADDR_TO_NAME_ALL_ENTRIES(entry, table) \ +PROF_HASH_ALL_ENTRIES((entry), (table)) + +/******************************* prof_profilert.h ****************************/ + + + +/*****************************************************************************/ +/******************************* prof_profilert.c ****************************/ +/*****************************************************************************/ + +/******************************************************************************* + ******************************* prof_Func_t *********************************** + ******************************************************************************/ + +void +prof_FuncSetAttr(prof_Func_t *func, + prof_FuncAttr_t attr) { + func->attr = attr; +} /* prof_FuncSetAttr */ + +/** + * @brief Create object for function profile + */ +static prof_Func_t * +prof_FuncCreate(const char *name, + prof_FuncAttr_t attr) { + prof_Func_t *func; + + func = (prof_Func_t *) prof_Malloc(sizeof(prof_Func_t)); + memset(func, 0, sizeof(prof_Func_t)); + + func->name = kstrdup(name, GFP_KERNEL); + prof_FuncSetAttr(func, attr); + + return func; +} /* prof_FuncCreate */ + +const char * +prof_FuncGetName(const prof_Func_t *func) { + return func->name; +} /* prof_FuncGetName */ + +prof_FuncAttr_t +prof_FuncGetAttr(const prof_Func_t *func) { + return func->attr; +} /* prof_FuncGetAttr */ + +static void +prof_FuncSetEdgesNum(prof_Func_t *func, + unsigned num_edges, + prof_Bool_t create_arrays) { + func->num_edges = num_edges; + + if (create_arrays) { + func->edge_counters = (prof_UInt64_t *) +prof_Malloc(sizeof(prof_UInt64_t) * num_edges); + memset(func->edge_counters, +-1, +sizeof(prof_UInt64_t) * num_edges); + } +} /* prof_FuncSetEdgesNum */ + +prof_UInt_t +prof_FuncGetNumEdges(const prof_Func_t *func) { + return func->num_edges; +} /* prof_FuncGetNumEdges */ + +prof_UInt64_t +prof_FuncGetEdgeCounter(const prof_Func_t *func, + prof_UInt_t edge_num) { + return func->edge_counters[edge_num]; +} /* prof_FuncGetEdgeCounter */ + +void +prof_FuncSetEdgeCounter(const prof_Func_t *func, + prof_UInt_t edge_num, + prof_UInt64_t counter) { + func->edge_counters[edge_num] = counter; +} /* prof_FuncSetEdgeCounter */ + +prof_UInt_t +prof_FuncGetDumpedEdgesNum(const prof_Func_t *func) { + return func->dumped_edges_number; +} /* prof_FuncGetDumpedEdgesNum */ + +void +prof_FuncSetDumpedEdgesNum(prof_Func_t *func, + prof_UInt_t number) { + func->dumped_edges_number = number; +} /* prof_FuncSetDumpedEdgesNum */ + +void +prof_FuncIncrDumpedEdges(prof_Func_t *func) { + func->dumped_edges_number++; +} /* prof_FuncIncrDumpedEdges */ + +prof_UInt_t +prof_FuncGetChecksum(const prof_Func_t *func) { + return func->cfg_checksum; +} /* prof_FuncGetChecksum */ + +void +prof_FuncSetChecksum(prof_Func_t *func, + prof_UInt_t sum) { + func->cfg_checksum = sum; +} /* prof_FuncGetChecksum */ + +static void +prof_FuncDestroy(prof_Func_t *func) { + if (prof_FuncGetNumEdges(func) != 0) { + prof_Free(func->edge_counters); + } + + prof_Free(func->name); + prof_Free(func); +} /* prof_FuncDestroy */ + +/********************************** prof_Func_t *******************************/ + +/******************************************************************************* + ********************************** prof_Module_t ****************************** + ******************************************************************************/ + +/** + * @brief Create object with module profile + */ +static prof_Module_t * +prof_ModuleCreate(const char *name) { + prof_Module_t *module; + + module = (prof_Module_t *) prof_Malloc(sizeof(prof_Module_t)); + memset(module, 0, sizeof(prof_Module_t)); + + module->procedures = prof_HashCreate(PROF_HASH_STRING_TYPE, +PROF_HASH_VOID_PTR_TYPE); + module->name = kstrdup(name, GFP_KERNEL); + + return module; +} /* prof_ModuleCreate */ + +static void +prof_ModuleDestroy(prof_Module_t *module) { + prof_HashForEachEntry(module->procedures, +(void (*)(void *)) prof_FuncDestroy); + prof_HashDestroy(module->procedures); + prof_Free(module->name); + + prof_Free(module); +} /* prof_ModuleDestroy */ + +void +prof_ModuleAddFunction(prof_Module_t *module, + prof_Func_t *function) { + prof_HashAddVoidPtrValueByString(module->procedures, + prof_FuncGetName(function), + function); +} /* prof_ModuleAddFunction */ + +void +prof_ModuleRemoveFunction(prof_Module_t *module, + const char *func_name) { + prof_HashDeleteEntryByString(module->procedures, func_name); +} /* prof_ModuleRemoveFunction*/ + +prof_Func_t * +prof_ModuleFindFunction(prof_Module_t *module, + const char *function_name) { + prof_Func_t *function; + + function = prof_HashFindByStringAndGetVoidPtrValue(module->procedures, + function_name); + + return function; +} /* prof_ModuleFindFunction */ + +prof_HashTable_t * +prof_ModuleFunctions(prof_Module_t *module) { + return module->procedures; +} /* prof_ModuleFunctions */ + +/** + * @brief Find or create procedure inside module + */ +prof_Func_t * +prof_ModuleFindOrCreateFunction(prof_Module_t *module, +const char *function_name, +prof_FuncAttr_t func_attr, +unsigned edge_max_num, +unsigned loop_max_num, +unsigned loop_outer_max_num, +unsigned loop_outer_outer_max_num, +unsigned crc, +prof_UInt_t vprof_opers) { + prof_Func_t *function; + + function = prof_HashFindByStringAndGetVoidPtrValue(module->procedures, +function_name); + + if (function == NULL) { + function = prof_FuncCreate(function_name, +func_attr); + prof_FuncSetEdgesNum(function, +edge_max_num, +PROF_FALSE); + +PROF_ASSERT(crc != 0); + prof_HashAddVoidPtrValueByString(module->procedures, +function_name, +function); + } + + return function; +} /* prof_ModuleFindOrCreateFunction */ + +char * +prof_ModuleGetName(const prof_Module_t *module) { + return module->name; +} /* prof_ModuleGetName */ + +prof_UInt_t +prof_ModuleGetNumFunctions(const prof_Module_t *module) { + return prof_HashGetElementNum(module->procedures); +} /* prof_ModuleGetNumFunctions */ + +off_t +prof_ModuleGetOffset(const prof_Module_t *module) { + return module->module_offset; +} /* prof_ModuleGetOffset */ + +void +prof_ModuleSetOffset(prof_Module_t *module, + off_t offset) { + module->module_offset = offset; +} /* prof_ModuleSetOffset */ + +/********************************* prof_Module_t ******************************/ + +/******************************************************************************* + ********************************* prof_Program_t ****************************** + ******************************************************************************/ + +prof_Program_t * +prof_ProgCreate(prof_Bool_t is_vprof) { + prof_Program_t *profile; + + profile = (prof_Program_t *) prof_Malloc(sizeof(prof_Program_t)); + memset(profile, 0, sizeof(prof_Program_t)); + + profile->modules = prof_HashCreate(PROF_HASH_STRING_TYPE, +PROF_HASH_VOID_PTR_TYPE); + + profile->prof_buff = (char *)&prof_ProfileBuff; + profile->offset = 0; + profile->write_mode = PROF_COUNT_OFFSETS; + profile->cur_buf_pos = 0; + + return profile; +} /* prof_ProgCreate */ + +prof_ModulesTable_t * +prof_ProgGetModules(const prof_Program_t *profile) { + return profile->modules; +} /* prof_ProgGetModules */ + +void +prof_ProgDestroy(prof_Program_t *profile) { + prof_HashForEachEntry(prof_ProgGetModules(profile), + (void (*)(void *)) prof_ModuleDestroy); + + prof_HashDestroy(prof_ProgGetModules(profile)); + + prof_Free(profile); +} /* prof_ProgDestroy */ + +prof_Module_t * +prof_ProgFindModule(const prof_Program_t *profile, + const char *module_name) { + prof_Module_t *module; + + module = prof_HashFindByStringAndGetVoidPtrValue( + prof_ProgGetModules(profile), + module_name); + + return module; +} /* prof_ProgFindModule */ + +prof_Module_t * +prof_ProgFindOrCreateModule(prof_Program_t *profile, + const char *module_name) { + prof_Module_t *module; + + module = prof_ProgFindModule(profile, + module_name); + + if (module == NULL) { + module = prof_ModuleCreate(module_name); + + prof_HashAddVoidPtrValueByString(prof_ProgGetModules(profile), + prof_ModuleGetName(module), + module); + } + + return module; +} /* prof_ProgFindOrCreateModule */ + +prof_UInt_t +prof_ProgGetNumModules(const prof_Program_t *profile) { + return prof_HashGetElementNum(profile->modules); +} /* prof_ProgGetNumModules */ + + +/*********************************** prof_Program_t **************************/ + +/******************************* prof_profilert.c ****************************/ + + + + +/*****************************************************************************/ +/********************************* prof_librt.h ******************************/ +/*****************************************************************************/ + +void +__BUILTIN_ecomp_prof_IncrLoopCounters(int loop_num, + int loop_oter_num, + int outer_loop_num, + int *current_iters, + int *iter_counters, + int *iter_outer_counters); + +void +__BUILTIN_ecomp_prof_RegProcSTDN(const char *module_name, +const char *proc_name, +prof_FuncAttr_t func_attr, +prof_UInt_t cfg_checksum, +prof_UInt_t edges, +prof_UInt_t edges_comdat, +prof_UInt64_t *edge_counters, +prof_UInt_t loops, +prof_UInt_t outer_loops, +prof_UInt_t outer_outer_loops, +prof_UInt64_t **loop_numbers, +prof_UInt64_t **loop_counters, +void *loop_outer_counters, +void *loop_outer_outer_counters, +prof_UInt_t prof_opers_num, +void *vprof_counters, +prof_Bool_t is_excluded); + + + +/********************************* prof_librt.h ******************************/ + + + +/*****************************************************************************/ +/********************************* prof_librt.c ******************************/ +/*****************************************************************************/ + +#ifdef ECOMP_SUPPORT_INTERNAL_CHECK +#define PROF_DEBUG +#endif /* ECOMP_SUPPORT_INTERNAL_CHECK */ + +static prof_Program_t *prof_ProgramProfile = NULL; + + +/* #define PROF_DEBUG */ + +#ifdef PROF_DEBUG + +static prof_Bool_t prof_IsDebugSave = PROF_FALSE; + +static prof_Bool_t prof_IsDebugRuntime = PROF_FALSE; + +#if 0 +static prof_Bool_t prof_IsDebugOffset = PROF_FALSE; +#endif /* 0 */ + +#define prof_DebugRuntime(actions) \ +{ \ + if (prof_IsDebugRuntime) { \ + actions; \ + } \ +} /* prof_DebugRuntime */ + +#define prof_DebugSave(actions) \ +{ \ + if (prof_IsDebugSave) { \ + actions; \ + } \ +} /* prof_DebugSave */ + +#if 0 +#define prof_DebugOffset(actions) \ +{ \ + if (prof_IsDebugOffset) { \ + actions; \ + } \ +} /* prof_DebugOffset */ +#else /* 0 */ +#define prof_DebugOffset(actions) +#endif /* 0 */ + +#else /* PROF_DEBUG */ + +#define prof_DebugSave(action) +#define prof_DebugRuntime(actions) +#define prof_DebugOffset(actions) + +#endif /* PROF_DEBUG */ + +static void +prof_IncrOffset(prof_UInt_t off) { + prof_ProgramProfile->offset += off; +} /* prof_IncrOffset */ + +static prof_UInt_t +prof_GetCurrentOffset(void) { + return prof_ProgramProfile->offset; +} /* prof_GetCurrentOffset */ + +static void +prof_IncrCurBufPos(void) { + prof_ProgramProfile->cur_buf_pos++; +} /* prof_IncrCurBufPos */ + +static prof_UInt_t +prof_GetCurrentBufPos(void) { + return prof_ProgramProfile->cur_buf_pos; +} /* prof_GetCurrentBufPos */ + +static void +prof_Write(const char *buf, + size_t nbyte) { + int i; + + for (i = 0; i < nbyte; i++) { + prof_ProgramProfile->prof_buff[prof_GetCurrentBufPos()] = +buf[i]; + prof_IncrCurBufPos(); + } +} /* prof_Write */ + +static void +prof_DumpUInt(int file_descr, + prof_UInt_t val) { + if (prof_ProgramProfile->write_mode == PROF_WRITE_OFFSETS) { + prof_UInt_t temp; + unsigned char uint_arr[PROF_BYTES_IN_UINT]; + int i; + + temp = val; + for (i = 0; i < PROF_BYTES_IN_UINT; i++) { + val >>= PROF_BITS_IN_BYTE; + uint_arr[i] = temp - (val << PROF_BITS_IN_BYTE); + temp = val; + } + + prof_Write((char *)uint_arr, +PROF_BYTES_IN_UINT); + } else { + prof_IncrOffset(PROF_BYTES_IN_UINT); + + } +} /* prof_DumpUInt */ + +static void +prof_DumpUInt64(int file_descr, + prof_UInt64_t val) { + if (prof_ProgramProfile->write_mode == PROF_WRITE_OFFSETS) { + prof_UInt64_t temp; + unsigned char uint64_arr[PROF_BYTES_IN_UINT64]; + int i; + + temp = val; + for (i = 0; i < PROF_BYTES_IN_UINT64; i++) { + val = val >> PROF_BITS_IN_BYTE; + uint64_arr[i] = temp - (val << PROF_BITS_IN_BYTE); + temp = val; + } + + prof_Write((char *)uint64_arr, +PROF_BYTES_IN_UINT64); + } else { + prof_IncrOffset(PROF_BYTES_IN_UINT64); + } +} /* prof_DumpUInt64 */ + +static void +prof_DumpString(int file_descr, + const char *out_string, + size_t size) { + if (prof_ProgramProfile->write_mode == PROF_WRITE_OFFSETS) { + prof_Write(out_string, size); + } else { + prof_IncrOffset(size); + } +} /* prof_DumpString */ + +static void +prof_DumpProgramHeader(prof_Program_t *program_profile, + int file_descript) { + prof_DumpString(file_descript, +PROF_FILE_IDEN_STR_C, +PROF_FILE_IDEN_LEN); + + prof_DumpUInt(file_descript, prof_ProgGetNumModules(program_profile)); + prof_DumpUInt(file_descript, 0); + prof_DumpUInt(file_descript, 0); +} /* prof_DumpProgramHeader */ + +static void +prof_DumpModuleHeader(prof_Module_t *module_p, + int fd) { + int name_len; + name_len = strlen(prof_ModuleGetName(module_p)) + 1; + prof_DumpUInt(fd, name_len); + prof_DumpString(fd, prof_ModuleGetName(module_p), name_len); + prof_DumpUInt(fd, prof_ModuleGetNumFunctions(module_p)); + prof_DumpUInt(fd, (prof_UInt_t)prof_ModuleGetOffset(module_p)); +} /* prof_DumpModuleHeader */ + +static void +prof_DumpFuncHeader(prof_Func_t *func_p, + int fd) { + int name_len; + prof_DebugSave(pr_info("\n\n . prof_debug_save: Start writing proc '%s' header:\n", + prof_FuncGetName(func_p));); + + name_len = strlen(prof_FuncGetName(func_p)) + 1; + prof_DebugSave(pr_info(" . prof_debug_save: Saving name len: %d\n", + name_len);); + prof_DumpUInt(fd, name_len); + prof_DumpString(fd, prof_FuncGetName(func_p), name_len); + + prof_DebugSave( + pr_info(" . prof_debug_save: Saving attr:"); + pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_EDGES) ? + " PROF_FUNC_ATTR_EDGES" : ""); + pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_LOOPS) ? + " PROF_FUNC_ATTR_LOOPS" : ""); + pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_VALUE_PROF) ? + " PROF_FUNC_ATTR_VALUE_PROF" : ""); + pr_info("%s", (prof_FuncGetAttr(func_p) & PROF_FUNC_ATTR_LOOPSOUTER) ? + " PROF_FUNC_ATTR_LOOPSOUTER" : ""); + pr_info("\n");); + prof_DumpUInt(fd, prof_FuncGetAttr(func_p)); + + prof_DebugSave(pr_info(" . prof_debug_save: Saving edges_in_file: %ld\n", + (long)func_p->edges_in_file);); + prof_DumpUInt(fd, func_p->edges_in_file); + + /* out max number of edges */ + prof_DebugSave(pr_info(" . prof_debug_save: Saving num_edges: %u\n", + prof_FuncGetNumEdges(func_p));); + prof_DumpUInt(fd, prof_FuncGetNumEdges(func_p)); + + /* out real number of edges */ + prof_DebugSave(pr_info(" . prof_debug_save: Saving out_num_edges: %u\n", + prof_FuncGetDumpedEdgesNum(func_p));); + prof_DumpUInt(fd, prof_FuncGetDumpedEdgesNum(func_p)); + + prof_DebugSave(pr_info(" . prof_debug_save: Saving loops_in_file: %ld\n", + (long)0);); + prof_DumpUInt(fd, 0); /* stub for loops profile */ + + /* out max number of loops */ + prof_DumpUInt(fd, 0); /* stub for loop profile */ + + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + { + prof_DebugSave(pr_info(" . prof_debug_save: Saving cfg_checksum: %u\n", + prof_FuncGetChecksum(func_p));); + prof_DumpUInt(fd, prof_FuncGetChecksum(func_p)); + } + + prof_DebugSave(pr_info( " . prof_debug_save: Saving is_excluded: %u\n", + func_p->is_excluded);); + prof_DumpUInt(fd, func_p->is_excluded); + + prof_DumpUInt(fd, 0); + + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + prof_DumpUInt(fd, 0); + + prof_DebugSave(pr_info(" . prof_debug_save: Finish writing proc '%s' header:\n\n\n", + func_p->name);); +} /* prof_DumpFuncHeader */ + +static void +prof_DumpEdgeProfile(prof_Func_t *func_p, + int fd) { + prof_UInt_t edge_number, + index; + + prof_FuncSetDumpedEdgesNum(func_p, 0); + edge_number = prof_FuncGetNumEdges(func_p); + + for (index = 0; index < edge_number; index++) { + prof_UInt64_t counter; + + counter = prof_FuncGetEdgeCounter(func_p, index); + + if (counter == 0 || + counter == -1) { + continue; + } + + prof_DebugSave(pr_info(" . prof_debug_save: Saving edge %d counter %llu\n", + index, counter);); + + prof_DumpUInt(fd, index); + prof_DumpUInt64(fd, counter); + prof_FuncIncrDumpedEdges(func_p); + } + +PROF_ASSERT(prof_FuncGetNumEdges(func_p) >= prof_FuncGetDumpedEdgesNum(func_p)); +} /* prof_DumpEdgeProfile */ + +static void +prof_DumpFuncProfile(prof_Func_t *func_p, + int fd) { + prof_FuncAttr_t func_attr = prof_FuncGetAttr(func_p); + + prof_DebugSave(pr_info("\n\n . prof_debug_save: Start writing proc '%s' profile:\n", + prof_FuncGetName(func_p));); + + func_p->edges_in_file = prof_GetCurrentOffset(); + + if (func_attr & PROF_FUNC_ATTR_EDGES) { + prof_DebugSave(pr_info(" . prof_debug_save: Saving edge counters\n");); + + prof_DumpEdgeProfile(func_p, fd); + } + + prof_DebugSave(pr_info(" . prof_debug_save: Finish writing proc '%s' profile:\n\n\n", + prof_FuncGetName(func_p));); +} /* prof_DumpFuncProfile */ + +static void +prof_DumpModuleProfile(prof_Module_t *module_p, + int fd) { + prof_HashEntry_t *proc_entry; + + prof_ModuleSetOffset(module_p, prof_GetCurrentOffset()); + +PROF_ASSERT(module_p->procedures != 0); + + for PROF_HASH_ALL_ENTRIES(proc_entry, prof_ModuleFunctions(module_p)) + { + prof_DumpFuncHeader(prof_HashGetEntryVoidPtrVal(proc_entry), + fd); + } + + for PROF_HASH_ALL_ENTRIES(proc_entry, prof_ModuleFunctions(module_p)) + { + prof_DumpFuncProfile(prof_HashGetEntryVoidPtrVal(proc_entry), + fd); + } + +} /* prof_DumpModuleProfile */ + +static void +prof_DumpModuleFuncHeader(prof_Module_t *module_p, + int fd) { + prof_HashEntry_t *proc_entry; + + if (prof_ProgramProfile->write_mode == PROF_COUNT_OFFSETS) + module_p->module_offset = prof_GetCurrentOffset(); + + for PROF_HASH_ALL_ENTRIES(proc_entry, prof_ModuleFunctions(module_p)) + { + prof_DumpFuncHeader(prof_HashGetEntryVoidPtrVal(proc_entry), + fd); + } +} /* prof_DumpModuleFuncHeader */ + +/** + * @brief Set counter increment to 0 + */ +static void +prof_MakeIncrStepZero(void) { + ecomp_ProfileIncrement = 0; +} /* prof_MakeIncrStepZero */ + +/** + * @brief Set counter increment to 1 + */ +void +prof_MakeIncrStepOne(void) { + ecomp_ProfileIncrement = 1; +} /* prof_MakeIncrStepOne */ + +/** + * @brief Clear buffer and variables for profile repeated dump + */ +static void +prof_ClearBuffer(void) { + int i; + + save_first_time = true; + + prof_ProgramProfile->offset = 0; + prof_ProgramProfile->write_mode = PROF_COUNT_OFFSETS; + prof_ProgramProfile->cur_buf_pos = 0; + + for (i = 0; i < prof_BuffSize; i++) + prof_ProfileBuff[i] = 0; +} /* prof_ClearBuffer */ + +void +__BUILTIN_ecomp_prof_SaveFile(prof_Program_t *program_profile) { + int fd = 0; + prof_ModulesEntry_t *entry; + + if (!save_first_time) { + /* Fixed disagreeable effect of multiple entrer in this + * function. It should be entered only once */ + return; + } + + save_first_time = false; + prof_MakeIncrStepZero(); + prof_ProgramProfile->offset = 0; + prof_ProgramProfile->write_mode = PROF_COUNT_OFFSETS; + prof_ProgramProfile->cur_buf_pos = 0; + prof_ClearBuffer(); + + prof_DebugSave(pr_info(" . prof_debug_save: Start saving program header\n");); + prof_DumpProgramHeader(program_profile, fd); + prof_DebugSave(pr_info(" . prof_debug_save: Finish saving program header\n");); + + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleHeader(prof_HashGetEntryVoidPtrVal(entry), + fd); + } + + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleProfile(prof_HashGetEntryVoidPtrVal(entry), + fd); + } + + prof_ProgramProfile->write_mode = PROF_WRITE_OFFSETS; + + prof_DebugSave(pr_info(" . prof_debug_save: Start saving program header\n");); + prof_DumpProgramHeader(program_profile, fd); + prof_DebugSave(pr_info(" . prof_debug_save: Finish saving program header\n");); + + prof_DebugSave(pr_info(" . prof_debug_save: Start saving module headers\n");); + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleHeader(prof_HashGetEntryVoidPtrVal(entry), + fd); + } + prof_DebugSave(pr_info(" . prof_debug_save: Finish saving module headers\n");); + + prof_DebugSave(pr_info(" . prof_debug_save: Start saving counters\n");); + /* TODO save pointers in file for functions */ + for PROF_HASH_ALL_ENTRIES(entry, prof_ProgGetModules(program_profile)) + { + prof_DumpModuleProfile(prof_HashGetEntryVoidPtrVal(entry), + fd); + } + prof_DebugSave(pr_info(" . prof_debug_save: Finish saving counters\n");); +} /* __BUILTIN_ecomp_prof_SaveFile */ + +/** + * @brief Dump of buffer into file + */ +static void +prof_DumpFile(struct seq_file *s) { /* [in] File descriptor */ + seq_write(s, prof_ProgramProfile->prof_buff, prof_GetCurrentBufPos()); +} /* prof_DumpFile */ + +void +__BUILTIN_ecomp_prof_RegProcSTDN(const char *module_name, + const char *proc_name, + prof_FuncAttr_t func_attr, + prof_UInt_t cfg_checksum, + prof_UInt_t edges, + prof_UInt_t edges_comdat, + prof_UInt64_t *edge_counters, + prof_UInt_t loops, + prof_UInt_t outer_loops, + prof_UInt_t outer_outer_loops, + prof_UInt64_t **loop_numbers, + prof_UInt64_t **loop_counters, + void *loop_outer_counters, + void *loop_outer_outer_counters, + prof_UInt_t prof_opers_num, + void *vprof_counters, + prof_Bool_t is_excluded) +{ + prof_Module_t *module_p; + prof_Func_t *func_p; + + prof_DebugRuntime(pr_info("\n\n . prof_IsDebugRuntime: Start proc `%s' from module `%s' registration\n", + proc_name, module_name);); + + module_p = prof_ProgFindOrCreateModule(prof_ProgramProfile, + module_name); + + prof_DebugRuntime(pr_info(" . prof_IsDebugRuntime: Found or created object for module `%s'\n", + module_name);); + + func_p = prof_HashFindByStringAndGetVoidPtrValue(module_p->procedures, + proc_name); + if (func_p == NULL) { + int i = 0; + prof_DebugRuntime( + pr_info(" . prof_IsDebugRuntime: Creating new object for proc `%s' with checksum %u\n", + proc_name, cfg_checksum); + pr_info(" . prof_IsDebugRuntime: Proc attr"); + pr_info("%s", (func_attr & PROF_FUNC_ATTR_EDGES) ? + " PROF_FUNC_ATTR_EDGES" : ""); + pr_info("%s", (func_attr & PROF_FUNC_ATTR_LOOPS) ? + " PROF_FUNC_ATTR_LOOPS" : ""); + pr_info("%s", (func_attr & PROF_FUNC_ATTR_VALUE_PROF) ? + " PROF_FUNC_ATTR_VALUE_PROF" : ""); + pr_info("%s", (func_attr & PROF_FUNC_ATTR_LOOPSOUTER) ? + " PROF_FUNC_ATTR_LOOPSOUTER" : ""); + pr_info("\n");); + + func_p = prof_ModuleFindOrCreateFunction(module_p, proc_name, + func_attr, edges, loops, outer_loops, + outer_outer_loops, cfg_checksum, + prof_opers_num); + + func_p->edge_counters = (prof_UInt64_t *) edge_counters; + func_p->is_excluded = is_excluded; + prof_FuncSetChecksum(func_p, cfg_checksum); + + } else { + if( !is_excluded ) { + panic(" . eprof_init_print: Multiple proc `%s' definition in module `%s'\n", + proc_name, module_name); + } else + { + return; + } + } + + prof_DebugRuntime(pr_info("\n . prof_IsDebugRuntime: Finish proc `%s' from module `%s' registration\n\n", + proc_name, module_name);); + +} /* __BUILTIN_ecomp_prof_RegProcSTDN_lib */ + +void +__BUILTIN_ecomp_prof_CreateProfileObj(prof_Bool_t is_vprof, + prof_Bool_t is_parallel, /* STUB */ + const char *path) { + if (prof_ProgramProfile != NULL) { + return; + } + + prof_DebugRuntime(pr_info(" . prof_IsDebugRuntime: Using value profile: %s\n", + (is_vprof) ? "YES" : "NO");); + + prof_ProgramProfile = prof_ProgCreate(is_vprof); +} /* __BUILTIN_ecomp_prof_CreateProfileObj */ + +void +__BUILTIN_prof_PrintModuleInited(const char *module_name) { + + prof_DebugRuntime(pr_info(" . Module inited %s!!!\n", module_name);); +} /* __BUILTIN_prof_PrintModuleInited */ + +void +__BUILTIN_ecomp_prof_AtomicAdd64(prof_UInt64_t *res) { + atomic64_add(ecomp_ProfileIncrement, res); +} /* __BUILTIN_ecomp_prof_AtomicAdd64 */ + +/********************************* prof_librt.c ******************************/ + + +static int profile_seq_show(struct seq_file *s, void *v) +{ + int num = *((loff_t *) v); + + if (num >= profile_counters_num) + return 0; + + prof_DumpFile(s); + + return 0; +} + +static void *profile_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= profile_counters_num) + return 0; + + __BUILTIN_ecomp_prof_SaveFile(prof_ProgramProfile); + + return (void *) pos; +} + +static void *profile_seq_next(struct seq_file *s, void *v, + loff_t *pos) +{ + if ((*pos)++ >= profile_counters_num) + return 0; + return (void *) pos; +} + +static void profile_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations profile_seq_ops = { + .start = profile_seq_start, + .next = profile_seq_next, + .stop = profile_seq_stop, + .show = profile_seq_show +}; + +static int profile_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &profile_seq_ops); +} + +static ssize_t profile_write(struct file *file, const char __user *buf, + size_t count, loff_t *data) +{ + prof_ClearBuffer(); + + return count; +} + +static const struct file_operations profile_proc_fops = { + .owner = THIS_MODULE, + .open = profile_proc_open, + .write = profile_write, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static int __init kernel_profile_init(void) +{ + proc_create(PROFILE_FILENAME, S_IRUGO | S_IWUSR, + NULL, &profile_proc_fops); + return 0; +} + +module_init(kernel_profile_init); + diff --git a/arch/e2k/kernel/mkclearwindow.c b/arch/e2k/kernel/mkclearwindow.c new file mode 100644 index 000000000000..1a6eefc3eb83 --- /dev/null +++ b/arch/e2k/kernel/mkclearwindow.c @@ -0,0 +1,153 @@ +#include +#include + +#include "../../../include/generated/autoconf.h" +#include "ttable_wbs.h" + +#define B "\t\t\"" +#define E "\\n\" \\\n" + +enum { + TYPE_INTERRUPT, + TYPE_SYSCALL, + TYPE_SYSCALL_PROT, + TYPE_SETBN +}; + + +static int return_printed; +static void print_header(int rbs, int rsz, int type) +{ + printf(B "{" E); + printf(B "setbn rbs=%d, rsz=%d, rcur=0" E, rbs, rsz); + if ((type == TYPE_SYSCALL || type == TYPE_SYSCALL_PROT) && + !return_printed) { + return_printed = 1; + printf(B "return %%%%ctpr3" E); + } + if (type == TYPE_SYSCALL_PROT) { + printf(B "puttagd %%[_r2], %%[_tag2], %%%%dr2" E); + printf(B "puttagd %%[_r3], %%[_tag3], %%%%dr3" E); + printf(B "addd %%[_r0], 0, %%%%dr0" E); + printf(B "addd %%[_r1], 0, %%%%dr1" E); + } + if (type == TYPE_SYSCALL) + printf(B "addd %%[_r0], 0, %%%%dr0" E); + printf(B "}" E); +} + +/* + * @name - macro name + * @regs - number of *quadro* registers to clear + * @keep - number of *double* registers to keep + * @interrupt - should we use "done" or "return" + "ct" to return + */ +static void print_clear_macro(char *name, int regs, int type) +{ + int i, bn = 0, keep; + + return_printed = 0; + + switch (type) { + case TYPE_INTERRUPT: + keep = 0; + break; + case TYPE_SYSCALL: + keep = 1; + break; + case TYPE_SYSCALL_PROT: + keep = 4; + break; + default: + exit(1); + } + + printf("#define %s(", name); + for (i = 0; i < keep; i++) + printf("r%d%s", i, (i + 1 != keep) ? ", " : ""); + if (type == TYPE_SYSCALL_PROT) + printf(", tag2, tag3"); + printf( ") \\\n" + "do { \\\n" + "\tasm volatile ( \\\n"); + + for (i = 0; i < regs; i++) { + if (i == 0) { + bn = 0; + print_header(0, (regs < 64) ? (regs - 1) : 63, type); + } + if (i == 63) { + bn = 0; + print_header(63, regs - 63 - 1, TYPE_SETBN); + } + if ((bn % 3) == 0) + printf(B "{" E); + + if (2 * i >= keep) + printf(B "addd 0, 0, %%%%db[%d]" E, 2 * bn); + + if (2 * i + 1 >= keep) + printf(B "addd 0, 0, %%%%db[%d]" E, 2 * bn + 1); + + if ((bn % 3) == 2 || i + 1 == regs) + printf(B "}" E); + + ++bn; + } + + if (type == TYPE_INTERRUPT) { + /* #80747: must repeat interrupted barriers */ + printf(B "{nop 3; wait st_c=1} {done}" E); + } else { + /* System call return */ + printf(B "{" E); + printf(B "ct %%%%ctpr3" E); + printf(B "}" E); + } + + printf("\t\t::"); + for (i = 0; i < keep; i++) + printf(" [_r%d] \"ir\" (r%d)%s", + i, i, (i + 1 != keep) ? "," : ""); + if (type == TYPE_SYSCALL_PROT) + printf(", \\\n\t\t[_tag2] \"ir\" (tag2), [_tag3] \"ir\" (tag3)"); + printf(" \\\n\t\t: \"ctpr3\""); + printf("); \\\n"); + printf("} while (0)\n"); +} + +int main(void) +{ + print_clear_macro("CLEAR_USER_TRAP_HANDLER_WINDOW", + USER_TRAP_HANDLER_SIZE, TYPE_INTERRUPT); + print_clear_macro("CLEAR_TTABLE_ENTRY_10_WINDOW", + TTABLE_ENTRY_10_SIZE, TYPE_SYSCALL); + print_clear_macro("CLEAR_TTABLE_ENTRY_10_WINDOW_PROT", + TTABLE_ENTRY_10_SIZE, TYPE_SYSCALL_PROT); + print_clear_macro("CLEAR_TTABLE_ENTRY_8_WINDOW", + TTABLE_ENTRY_8_SIZE, TYPE_SYSCALL); + print_clear_macro("CLEAR_TTABLE_ENTRY_8_WINDOW_PROT", + TTABLE_ENTRY_8_SIZE, TYPE_SYSCALL_PROT); + print_clear_macro("CLEAR_RET_FROM_FORK_WINDOW", + RET_FROM_FORK_SIZE, TYPE_SYSCALL); + print_clear_macro("CLEAR_MAKECONTEXT_WINDOW", + MAKECONTEXT_SIZE, TYPE_SYSCALL); + print_clear_macro("CLEAR_HANDLE_SYS_CALL_WINDOW", + HANDLE_SYS_CALL_SIZE, TYPE_SYSCALL); + print_clear_macro("CLEAR_DO_SIGRETURN_INTERRUPT", + DO_SIGRETURN_SIZE, TYPE_INTERRUPT); + print_clear_macro("CLEAR_DO_SIGRETURN_SYSCALL", + DO_SIGRETURN_SIZE, TYPE_SYSCALL); + print_clear_macro("CLEAR_DO_SIGRETURN_SYSCALL_PROT", + DO_SIGRETURN_SIZE, TYPE_SYSCALL_PROT); +#ifdef CONFIG_KVM_HOST_MODE + print_clear_macro("CLEAR_RETURN_PV_VCPU_TRAP_WINDOW", + RETURN_PV_VCPU_TRAP_SIZE, TYPE_INTERRUPT); + print_clear_macro("CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW", + HANDLE_PV_VCPU_SYS_CALL_SIZE, TYPE_SYSCALL); + print_clear_macro("CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW", + HANDLE_PV_VCPU_SYS_FORK_SIZE, TYPE_SYSCALL); +#endif + + return 0; +} diff --git a/arch/e2k/kernel/module.c b/arch/e2k/kernel/module.c new file mode 100644 index 000000000000..158a5ef78da2 --- /dev/null +++ b/arch/e2k/kernel/module.c @@ -0,0 +1,130 @@ +/* + * Kernel module help for E2K. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, + unsigned long start, unsigned long end); + +void *module_alloc(unsigned long size) +{ + if (PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR) + return NULL; + + return __vmalloc_node_range(size, 8, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} + +int apply_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; + Elf64_Sym *sym; + u64 *location; + u32 *loc32; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + Elf64_Addr v; + + /* This is where to make the change */ + location = (u64 *) ((u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset); + loc32 = (u32 *) location; + + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + + ELF64_R_SYM(rel[i].r_info); + v = sym->st_value + rel[i].r_addend; + + switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) { + case R_E2K_32_ABS: + *loc32 = v; + break; + + case R_E2K_64_ABS: + *location = v; + break; + + case R_E2K_64_ABS_LIT: + loc32[0] = (u32)(v >> 32); + loc32[1] = (u32)(v & 0xffffffff); + break; + + case R_E2K_64_CALL: + /* As far as field r_addend holds offset within + * wide command to where we change, we need to + * deduct r_addend in order to obtain correct address. + * Therefore we need to add r_addend to the address, + * where we change to. + */ + v -= (Elf64_Addr) location; + v -= rel[i].r_addend; + loc32 = (Elf32_Addr *) ((char *)loc32 + rel[i].r_addend); + + *loc32 = (*loc32 & 0xf0000000) | ((v >> 3) & 0x0fffffff); + break; + + case R_E2K_DISP: + v -= (Elf64_Addr) location; + *loc32 = (*loc32 & 0xf0000000) | ((v >> 3) & 0x0fffffff); + break; + + case R_E2K_32_PC: + v -= (Elf64_Addr) location; + *loc32 = v; + if ((s64) v > INT_MAX || (s64) v < INT_MIN) + goto overflow; + break; + + default: + printk(KERN_ERR "module %s: Unknown relocation: %d\n", + me->name, + (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff)); + return -ENOEXEC; + }; + } + + return 0; + +overflow: + pr_err("module %s: Relocation (type %u) overflow\n", + me->name, ELF64_R_TYPE(rel[i].r_info) & 0xff); + return -ERANGE; +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + char *secstrings; + + secstrings = (void *) hdr + sechdrs[hdr->e_shstrndx].sh_offset; + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (!strcmp(".altinstructions", secstrings + s->sh_name)) { + /* patch .altinstructions */ + void *aseg = (void *) s->sh_addr; + + apply_alternatives(aseg, aseg + s->sh_size); + } + } + + return 0; +} diff --git a/arch/e2k/kernel/monitors.c b/arch/e2k/kernel/monitors.c new file mode 100644 index 000000000000..8418784e3c61 --- /dev/null +++ b/arch/e2k/kernel/monitors.c @@ -0,0 +1,2527 @@ +/* + * arch/e2k/kernel/monitors.c + * + * This file contains implementation of interface functions for working with + * monitors and implementation of mechanism for adjusting monitors. + * + * Copyright (C) 2009-2016 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "../../../fs/proc/internal.h" /* for get_proc_task() */ + + +#define MONITORS_FILENAME "monitors" +static struct proc_dir_entry *monitors_dir_entry; + +/* + * Monitors + */ + +#define MONITORS_SETTINGS_FILENAME "monitors_settings" +#define MONITORS_EVENTS_FILENAME "monitors_events" +#define MONITORS_DEAD_PROC_EVENTS_FILENAME "monitors_dead_proc_events" +#define MONITORS_HELP_FILENAME "monitors_help" + +#define MONITORS_MODE_NAME_LEN 1 +#define MONITORS_SETTINGS_STR_MAX_SIZE 256 + +#define MONITORS_MODES_COUNT 3 +#define SYSTEM_MODE 1 +#define USER_MODE 2 +#define COMMON_MODE (SYSTEM_MODE | USER_MODE) + +struct monitors_mode_info { + char *name; + unsigned char mode; +}; + +static struct monitors_mode_info monitors_mode_info[] = { + {"S", SYSTEM_MODE}, + {"U", USER_MODE }, + {"C", COMMON_MODE} +}; + +struct monitors_info { + int mode; + unsigned short event; + unsigned char is_used; +}; + +static struct monitors_info monitors[MONITORS_COUNT]; + +int monitors_used __read_mostly = 0; + +#undef DIM0 +#undef DIM1 +#undef DDM0 +#undef DDM1 + +enum { + DIM0, + DIM1, + DDM0, + DDM1 +}; + +static char *monitors_id_names[] = { + "I0", + "I1", + "D0", + "D1" +}; + +struct monitors_events_range { + unsigned short start; + unsigned short end; +}; + +#define DDM0_EVENTS_RANGE_COUNT_V2 7 +#define DDM1_EVENTS_RANGE_COUNT_V2 7 +#define DIM_EVENTS_RANGE_COUNT_V2 9 + +static struct monitors_events_range ddm0_monitors_events_list_v2[] = { + {0x00, 0x03}, {0x10, 0x16}, {0x20, 0x21}, {0x30, 0x3a}, {0x40, 0x46}, + {0x48, 0x4f}, {0x70, 0x72} +}; + +static struct monitors_events_range ddm1_monitors_events_list_v2[] = { + {0x00, 0x02}, {0x10, 0x17}, {0x20, 0x21}, {0x30, 0x3a}, {0x40, 0x48}, + {0x4a, 0x4f}, {0x70, 0x72} +}; + +static struct monitors_events_range dim_monitors_events_list_v2[] = { + {0x00, 0x0a}, {0x10, 0x1f}, {0x20, 0x26}, {0x30, 0x3c}, {0x40, 0x4a}, + {0x50, 0x5a}, {0x60, 0x69}, {0x70, 0x74}, {0x7c, 0x7e} +}; + +#define DDM0_EVENTS_RANGE_COUNT_V3 7 +#define DDM1_EVENTS_RANGE_COUNT_V3 6 +#define DIM_EVENTS_RANGE_COUNT_V3 9 + +static struct monitors_events_range ddm0_monitors_events_list_v3[] = { + {0x00, 0x03}, {0x10, 0x19}, {0x20, 0x24}, {0x30, 0x3a}, {0x40, 0x46}, + {0x48, 0x48}, {0x4a, 0x4b} +}; + +static struct monitors_events_range ddm1_monitors_events_list_v3[] = { + {0x00, 0x02}, {0x10, 0x19}, {0x20, 0x24}, {0x30, 0x3a}, {0x40, 0x48}, + {0x4a, 0x4b} +}; + +static struct monitors_events_range dim_monitors_events_list_v3[] = { + {0x00, 0x03}, {0x07, 0x0a}, {0x0f, 0x26}, {0x30, 0x3d}, {0x40, 0x4a}, + {0x50, 0x5a}, {0x60, 0x69}, {0x70, 0x74}, {0x7c, 0x7e} +}; + +#define DDM0_EVENTS_RANGE_COUNT_V5 6 +#define DDM1_EVENTS_RANGE_COUNT_V5 7 +#define DIM_EVENTS_RANGE_COUNT_V5 9 + +static struct monitors_events_range ddm0_monitors_events_list_v5[] = { + {0x00, 0x04}, {0x10, 0x19}, {0x20, 0x24}, {0x30, 0x3a}, {0x40, 0x48}, + {0x4a, 0x4b} +}; + +static struct monitors_events_range ddm1_monitors_events_list_v5[] = { + {0x00, 0x02}, {0x04, 0x04}, {0x10, 0x19}, {0x20, 0x24}, {0x30, 0x3a}, + {0x40, 0x48}, {0x4a, 0x4b} +}; + +static struct monitors_events_range dim_monitors_events_list_v5[] = { + {0x00, 0x03}, {0x07, 0x0a}, {0x0f, 0x26}, {0x2d, 0x3d}, {0x40, 0x4a}, + {0x50, 0x5a}, {0x60, 0x69}, {0x70, 0x74}, {0x7c, 0x7e} +}; + + + + +#define DDM0_EVENTS_RANGE_COUNT_V6 5 +#define DDM1_EVENTS_RANGE_COUNT_V6 6 +#define DIM_EVENTS_RANGE_COUNT_V6 5 + +static struct monitors_events_range ddm0_monitors_events_list_v6[] = { + {0x00, 0x07}, {0x10, 0x1c}, {0x20, 0x24}, {0x30, 0x3a}, {0x40, 0x4f}, +}; + +static struct monitors_events_range ddm1_monitors_events_list_v6[] = { + {0x00, 0x07}, {0x10, 0x1c}, {0x20, 0x24}, {0x30, 0x3a}, {0x40, 0x4b}, + {0x4d, 0x4f} +}; + +static struct monitors_events_range dim_monitors_events_list_v6[] = { + {0x00, 0x03}, {0x07, 0x0a}, {0x0f, 0x27}, {0x2d, 0x74}, {0x7c, 0x7e} +}; + + + + + +static struct monitors_events_range *ddm0_monitors_events_list; +static struct monitors_events_range *ddm1_monitors_events_list; +static struct monitors_events_range *dim_monitors_events_list; + +static unsigned char ddm0_monitors_events_range_count; +static unsigned char ddm1_monitors_events_range_count; +static unsigned char dim_monitors_events_range_count; + +static atomic64_t common_events_count[NR_CPUS][MONITORS_COUNT]; + +typedef struct { + unsigned long monitors_count[NR_CPUS][MONITORS_COUNT]; + struct monitors_info monitors[MONITORS_COUNT]; + pid_t pid; + cpumask_t cpus_mask; +} dead_proc_events_t; + +#define DEAD_PROC_EVENTS_COUNT 20 + +static dead_proc_events_t dead_proc_events_buf[DEAD_PROC_EVENTS_COUNT]; +static dead_proc_events_t dead_proc_events_buf_tmp[DEAD_PROC_EVENTS_COUNT]; +static char dead_proc_events_buf_id = -1; +static char dead_proc_events_buf_id_tmp; +static unsigned char dead_proc_events_buf_full = 0; +static unsigned char dead_proc_events_buf_full_tmp; + +static DEFINE_RAW_SPINLOCK(monitors_lock); +static DEFINE_RAW_SPINLOCK(dead_proc_lock); +static DEFINE_RAW_SPINLOCK(dead_proc_lock_tmp); + +/* + * SIC monitors + */ + +#define SICMONITORS_SETTINGS_FILENAME "sicmonitors_settings" +#define SICMONITORS_EVENTS_FILENAME "sicmonitors_events" +#define SICMONITORS_HELP_FILENAME "sicmonitors_help" + +#define SICMONITORS_SETTINGS_STR_MAX_SIZE 256 + +#define HAS_MACHINE_SICMONITORS \ + (IS_MACHINE_E2S || IS_MACHINE_E8C || IS_MACHINE_E8C2) + +struct sicmonitors_info { + unsigned short event; + unsigned char is_used; +}; + +static struct sicmonitors_info sicmonitors[SICMONITORS_COUNT]; + +enum { + MCM0, + MCM1, +}; + +static char *sicmonitors_id_names[] = { + "M0", + "M1", +}; + +#define sicmonitors_events_range monitors_events_range + +#define MCM0_EVENTS_RANGE_COUNT_E4C 1 +#define MCM1_EVENTS_RANGE_COUNT_E4C 1 + +static struct sicmonitors_events_range mcm0_sicmonitors_events_list_e4c[] = { + {0x00, 0x08} +}; + +static struct sicmonitors_events_range mcm1_sicmonitors_events_list_e4c[] = { + {0x00, 0x06} +}; + +#define MCM0_EVENTS_RANGE_COUNT_E8C 2 +#define MCM1_EVENTS_RANGE_COUNT_E8C 2 + +static struct sicmonitors_events_range mcm0_sicmonitors_events_list_e8c[] = { + {0x00, 0x05}, {0x20, 0x3f} +}; + +static struct sicmonitors_events_range mcm1_sicmonitors_events_list_e8c[] = { + {0x00, 0x05}, {0x20, 0x3f} +}; + +#define MCM0_EVENTS_RANGE_COUNT_E8C2 2 +#define MCM1_EVENTS_RANGE_COUNT_E8C2 2 + +static struct sicmonitors_events_range mcm0_sicmonitors_events_list_e8c2[] = { + {0x00, 0x1b}, {0x20, 0x3f} +}; + +static struct sicmonitors_events_range mcm1_sicmonitors_events_list_e8c2[] = { + {0x00, 0x1b}, {0x20, 0x3f} +}; + +static struct sicmonitors_events_range *mcm0_sicmonitors_events_list; +static struct sicmonitors_events_range *mcm1_sicmonitors_events_list; + +static unsigned char mcm0_sicmonitors_events_range_count; +static unsigned char mcm1_sicmonitors_events_range_count; + +static DEFINE_RAW_SPINLOCK(sicmonitors_lock); + +/* + * IPCC monitors + */ + +#define IPCCMONITORS_SETTINGS_FILENAME "ipccmonitors_settings" +#define IPCCMONITORS_EVENTS_FILENAME "ipccmonitors_events" +#define IPCCMONITORS_HELP_FILENAME "ipccmonitors_help" + +#define IPCCMONITORS_SETTINGS_STR_MAX_SIZE 16 + +#define HAS_MACHINE_IPCCMONITORS \ + (IS_MACHINE_E2S || IS_MACHINE_E8C) + +struct ipccmonitors_info { + unsigned short event; + unsigned char is_used; +}; + +static struct ipccmonitors_info ipccmonitors; + +struct ipccmonitors_event_info { + char *name; + unsigned short event; +}; + +#define IPCC_EVENTS_COUNT 2 + +enum { + IPCC_LERR, + IPCC_RTRY, +}; + +static struct ipccmonitors_event_info IPCC_event_info[] = { + {"IPCC_LERR", 0x01}, + {"IPCC_RTRY", 0x02}, +}; + +static DEFINE_RAW_SPINLOCK(ipccmonitors_lock); + +/* + * IOCC monitors + */ + +#define IOCCMONITORS_SETTINGS_FILENAME "ioccmonitors_settings" +#define IOCCMONITORS_EVENTS_FILENAME "ioccmonitors_events" +#define IOCCMONITORS_HELP_FILENAME "ioccmonitors_help" + +#define E2K_IO_STR_EVENT_SHIFT 29 +#define E2K_IO_STR_EVENT_MASK 0xE0000000 + +#define IOCCMONITORS_SETTINGS_STR_MAX_SIZE 16 + +#define HAS_MACHINE_IOCCMONITORS \ + (IS_MACHINE_ES2 || IS_MACHINE_E2S || IS_MACHINE_E1CP) + +struct ioccmonitors_info { + unsigned short event; + unsigned char is_used; +}; + +static struct ioccmonitors_info ioccmonitors; + +struct ioccmonitors_event_info { + char *name; + unsigned short event; +}; + +#define IOCC_EVENTS_COUNT 4 + +enum { + IOCC_BSY_RC, + IOCC_ERR_RC, + IOCC_TO_RC, + IOCC_CMN_RC, +}; + +static struct ioccmonitors_event_info IOCC_event_info[] = { + {"IOCC_BSY_RC", 0x01}, + {"IOCC_ERR_RC", 0x02}, + {"IOCC_TO_RC", 0x04}, + {"IOCC_CMN_RC", 0x07}, +}; + +static DEFINE_RAW_SPINLOCK(ioccmonitors_lock); + + +/* + * Monitors + */ + +static inline int dim_check_start_monitoring(int monitor) +{ + e2k_dimcr_t dimcr_reg; + unsigned char user, new_user; + unsigned char system, new_system; + unsigned short event, new_event; + unsigned char new_mode; + + if (monitors[monitor].is_used) { + dimcr_reg = READ_DIMCR_REG(); + + user = dimcr_reg.fields[monitor].user; + system = dimcr_reg.fields[monitor].system; + event = dimcr_reg.fields[monitor].event; + + new_mode = monitors_mode_info[monitors[monitor].mode].mode; + new_user = (new_mode & USER_MODE) ? 1 : 0; + new_system = (new_mode & SYSTEM_MODE) ? 1 : 0; + new_event = monitors[monitor].event; + + if (user != new_user || system != new_system || + event != new_event) + return 1; + } + + return 0; +} + +static inline unsigned char ddm_check_start_monitoring(int monitor) +{ + e2k_ddmcr_t ddmcr_reg; + unsigned char user, new_user; + unsigned char system, new_system; + unsigned short event, new_event = 0; + unsigned char new_mode; + unsigned char num; + + if (monitors[monitor].is_used) { + ddmcr_reg = READ_DDMCR_REG(); + + num = monitor - 2; + + user = ddmcr_reg.fields[num].user; + system = ddmcr_reg.fields[num].system; + event = ddmcr_reg.fields[num].event; + + new_mode = monitors_mode_info[monitors[monitor].mode].mode; + new_user = (new_mode & USER_MODE) ? 1 : 0; + new_system = (new_mode & SYSTEM_MODE) ? 1 : 0; + new_event = monitors[monitor].event; + + if (user != new_user || system != new_system || + event != new_event) + return 1; + } + + return 0; +} + +static inline unsigned char check_start_monitoring(int monitor) +{ + if (monitor == DIM0 || monitor == DIM1) + return dim_check_start_monitoring(monitor); + else if (monitor == DDM0 || monitor == DDM1) + return ddm_check_start_monitoring(monitor); + + return 0; +} + +static inline unsigned char dim_check_process_start_monitoring( + int monitor, struct task_struct *task) +{ + e2k_dimcr_t dimcr_reg; + unsigned char user, proc_user; + unsigned char system, proc_system; + unsigned short event, proc_event; + unsigned char proc_mode; + + if (monitors[monitor].is_used) { + dimcr_reg = task->thread.sw_regs.dimcr; + + user = dimcr_reg.fields[monitor].user; + system = dimcr_reg.fields[monitor].system; + event = dimcr_reg.fields[monitor].event; + + proc_mode = monitors_mode_info[monitors[monitor].mode].mode; + proc_user = (proc_mode & USER_MODE) ? 1 : 0; + proc_system = (proc_mode & SYSTEM_MODE) ? 1 : 0; + proc_event = monitors[monitor].event; + + if (user != proc_user || system != proc_system || + event != proc_event) + return 1; + } + + return 0; +} + +static inline unsigned char ddm_check_process_start_monitoring( + int monitor, struct task_struct *task) +{ + e2k_ddmcr_t ddmcr_reg; + unsigned char user, proc_user; + unsigned char system, proc_system; + unsigned short event, proc_event = 0; + unsigned char proc_mode; + unsigned char num; + + if (monitors[monitor].is_used) { + ddmcr_reg = task->thread.sw_regs.ddmcr; + + num = monitor - 2; + + user = ddmcr_reg.fields[num].user; + system = ddmcr_reg.fields[num].system; + event = ddmcr_reg.fields[num].event; + + proc_mode = monitors_mode_info[monitors[monitor].mode].mode; + proc_user = (proc_mode & USER_MODE) ? 1 : 0; + proc_system = (proc_mode & SYSTEM_MODE) ? 1 : 0; + proc_event = monitors[monitor].event; + + if (user != proc_user || system != proc_system || + event != proc_event) + return 1; + } + + return 0; +} + +static inline unsigned char check_process_start_monitoring( + int monitor, struct task_struct *task) +{ + if (monitor == DIM0 || monitor == DIM1) + return dim_check_process_start_monitoring(monitor, task); + else if (monitor == DDM0 || monitor == DDM1) + return ddm_check_process_start_monitoring(monitor, task); + + return 0; +} + +static inline void dim_start_monitoring(int monitor) +{ + e2k_dimcr_t dimcr_reg; + unsigned char user; + unsigned char system; + unsigned short event; + unsigned char mode; + + dimcr_reg = READ_DIMCR_REG(); + + mode = monitors_mode_info[monitors[monitor].mode].mode; + user = (mode & USER_MODE) ? 1 : 0; + system = (mode & SYSTEM_MODE) ? 1 : 0; + event = monitors[monitor].event; + + dimcr_reg.fields[monitor].user = user; + dimcr_reg.fields[monitor].system = system; + dimcr_reg.fields[monitor].event = event; + + WRITE_DIMCR_REG(dimcr_reg); + + /* + * We should reset dimar0 and dimar1 at the end or we can receive some + * events of previous type. + */ + if (monitor == DIM0) + WRITE_DIMAR0_REG_VALUE(0); + else if (monitor == DIM1) + WRITE_DIMAR1_REG_VALUE(0); +} + +static inline void ddm_start_monitoring(int monitor) +{ + e2k_ddmcr_t ddmcr_reg; + unsigned char user; + unsigned char system; + unsigned short event; + unsigned char mode; + unsigned char num; + + ddmcr_reg = READ_DDMCR_REG(); + + mode = monitors_mode_info[monitors[monitor].mode].mode; + user = (mode & USER_MODE) ? 1 : 0; + system = (mode & SYSTEM_MODE) ? 1 : 0; + event = monitors[monitor].event; + + num = monitor - 2; + + ddmcr_reg.fields[num].user = user; + ddmcr_reg.fields[num].system = system; + ddmcr_reg.fields[num].event = event; + + WRITE_DDMCR_REG(ddmcr_reg); + + /* + * We should reset ddmar0 and ddmar1 at the end or we can receive some + * events of previous type. + */ + if (monitor == DDM0) + WRITE_DDMAR0_REG_VALUE(0); + else if (monitor == DDM1) + WRITE_DDMAR1_REG_VALUE(0); +} + +static inline void start_monitoring(int monitor) +{ + if (monitor == DIM0 || monitor == DIM1) + dim_start_monitoring(monitor); + else if (monitor == DDM0 || monitor == DDM1) + ddm_start_monitoring(monitor); +} + +static inline void start_process_monitoring(int monitor, + struct task_struct *task) +{ + struct thread_info *thread_info = task_thread_info(task); + unsigned char i; + + for (i = 0; i < NR_CPUS; i++) + atomic64_set(&thread_info->monitors_count[i][monitor], 0); + + /* + * When monitoring is activated for the process, monitoring might + * already been activated. So, we need right values of + * thread_info->monitors_delta.dim0, thread_info->monitors_delta.dim1, + * thread_info->monitors_delta.ddm0 and + * thread_info->monitors_delta.ddm1 to count right value of + * delta_event_count, when the processor will be switched on the + * process next time. + */ + switch (monitor) { + case DIM0: + thread_info->monitors_delta.dim0 = 0; + break; + case DIM1: + thread_info->monitors_delta.dim1 = 0; + break; + case DDM0: + thread_info->monitors_delta.ddm0 = 0; + break; + case DDM1: + thread_info->monitors_delta.ddm1 = 0; + break; + } +} + +void process_monitors(struct task_struct *task) +{ + struct thread_info *thread_info = task_thread_info(task); + unsigned long delta_event_count; + unsigned char cpu_num; + unsigned long flags; + unsigned char i; + + if (!thread_info) + return; + + raw_spin_lock_irqsave(&monitors_lock, flags); + + for (i = 0; i < MONITORS_COUNT; i++) { + if (check_process_start_monitoring(i, task)) + start_process_monitoring(i, task); + + if (check_start_monitoring(i)) + start_monitoring(i); + + if (monitors[i].is_used) { + switch (i) { + case DIM0: + delta_event_count = + thread_info->monitors_delta.dim0; + task->thread.sw_regs.dimar0 = + READ_DIMAR0_REG_VALUE(); + break; + case DIM1: + delta_event_count = + thread_info->monitors_delta.dim1; + task->thread.sw_regs.dimar1 = + READ_DIMAR1_REG_VALUE(); + break; + case DDM0: + delta_event_count = + thread_info->monitors_delta.ddm0; + task->thread.sw_regs.ddmar0 = + READ_DDMAR0_REG_VALUE(); + break; + case DDM1: + delta_event_count = + thread_info->monitors_delta.ddm1; + task->thread.sw_regs.ddmar1 = + READ_DDMAR1_REG_VALUE(); + break; + default: + delta_event_count = 0; + } + + /* + * We do it here, because we are not already + * interested in common_events_count for monitor, when + * it is stopping, and because there is no events for + * monitor, when it is started. + */ + cpu_num = thread_info->monitors_delta.cpu_num; + atomic64_add(delta_event_count, + &common_events_count[cpu_num][i]); + atomic64_add(delta_event_count, + &thread_info->monitors_count[cpu_num][i]); + } + } + + raw_spin_unlock_irqrestore(&monitors_lock, flags); +} + +void init_monitors(struct task_struct *task) +{ + AW(task->thread.sw_regs.dimcr) = 0; + AW(task->thread.sw_regs.ddmcr) = 0; +} + +void store_monitors_delta(struct task_struct *task) +{ + struct thread_info *thread_info = task_thread_info(task); + unsigned long initial_count; + unsigned long current_count; + unsigned long flags; + + if (!thread_info) + return; + + raw_spin_lock_irqsave(&monitors_lock, flags); + + thread_info->monitors_delta.cpu_num = task_cpu(task); + + if (monitors[DIM0].is_used) { + initial_count = task->thread.sw_regs.dimar0; + current_count = READ_DIMAR0_REG_VALUE(); + thread_info->monitors_delta.dim0 = + current_count - initial_count; + } else + thread_info->monitors_delta.dim0 = 0; + + if (monitors[DIM1].is_used) { + initial_count = task->thread.sw_regs.dimar1; + current_count = READ_DIMAR1_REG_VALUE(); + thread_info->monitors_delta.dim1 = + current_count - initial_count; + } else + thread_info->monitors_delta.dim1 = 0; + + if (monitors[DDM0].is_used) { + initial_count = task->thread.sw_regs.ddmar0; + current_count = READ_DDMAR0_REG_VALUE(); + thread_info->monitors_delta.ddm0 = + current_count - initial_count; + } else + thread_info->monitors_delta.ddm0 = 0; + + if (monitors[DDM1].is_used) { + initial_count = task->thread.sw_regs.ddmar1; + current_count = READ_DDMAR1_REG_VALUE(); + thread_info->monitors_delta.ddm1 = + current_count - initial_count; + } else + thread_info->monitors_delta.ddm1 = 0; + + raw_spin_unlock_irqrestore(&monitors_lock, flags); +} + +void add_dead_proc_events(struct task_struct *task) +{ + /* + * We can have a situation, when a monitoring event or mode have been + * changed or monitoring has been started or stopped after the last + * call of process_monitors function and before the call of this + * function. In this case we will have wrong data in + * dead_proc_events_buf. But as this situation is almost impossible, + * we do nothing to avoid it. + */ + + struct thread_info *thread_info = task_thread_info(task); + unsigned char id; + unsigned long flags; + + if (!thread_info) + return; + + raw_spin_lock_irqsave(&dead_proc_lock, flags); + + if (++dead_proc_events_buf_id == DEAD_PROC_EVENTS_COUNT) { + dead_proc_events_buf_full = 1; + dead_proc_events_buf_id = 0; + } + + id = dead_proc_events_buf_id; + + raw_spin_lock(&monitors_lock); + memcpy(dead_proc_events_buf[id].monitors, monitors, + sizeof(struct monitors_info) * MONITORS_COUNT); + raw_spin_unlock(&monitors_lock); + + memcpy(dead_proc_events_buf[id].monitors_count, + thread_info->monitors_count, + sizeof(atomic64_t) * NR_CPUS * MONITORS_COUNT); + + dead_proc_events_buf[id].pid = task->pid; + dead_proc_events_buf[id].cpus_mask = *cpu_online_mask; + + raw_spin_unlock_irqrestore(&dead_proc_lock, flags); +} + +unsigned char get_monitors_mask(char *title) +{ + unsigned char mask = 0; + unsigned char title_start = 0; + unsigned short len = 0; + unsigned short event_id; + char event_name[8]; + unsigned long flags; + unsigned char i; + + raw_spin_lock_irqsave(&monitors_lock, flags); + + for (i = 0; i < MONITORS_COUNT; i++) { + if (!monitors[i].is_used) + continue; + + event_id = monitors[i].event; + + memset(event_name, 0, 8); + sprintf(event_name, "0x%X", event_id); + + mask |= 1 << i; + + if (title_start) { + sprintf(title + len, "%s", event_name); + len += strlen(event_name); + title_start = 0; + } else { + sprintf(title + len, " %s", event_name); + len += strlen(event_name) + 1; + } + } + + raw_spin_unlock_irqrestore(&monitors_lock, flags); + + title[len] = 0; + + return mask; +} + +static int pid_monitors_events_show(struct seq_file *file, void *data) +{ + struct inode *inode; + struct task_struct *task; + struct thread_info *thread_info; + e2k_ddmcr_t mcr_reg; + unsigned char user; + unsigned char system; + unsigned char mode; + unsigned long count; + unsigned char num; + unsigned long flags; + unsigned char i, j; + + inode = file->private; + if (!inode) + return 0; + + task = get_proc_task(inode); + if (!task) + return 0; + + thread_info = task_thread_info(task); + if (!thread_info) + return 0; + + raw_spin_lock_irqsave(&monitors_lock, flags); + + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; + + for (j = 0; j < MONITORS_COUNT; j++) { + if (!monitors[j].is_used) + continue; + + mode = monitors_mode_info[monitors[j].mode].mode; + user = (mode & USER_MODE) ? 1 : 0; + system = (mode & SYSTEM_MODE) ? 1 : 0; + + switch (j) { + case DIM0: + AW(mcr_reg) = AW(task->thread.sw_regs.dimcr); + break; + case DIM1: + AW(mcr_reg) = AW(task->thread.sw_regs.dimcr); + break; + case DDM0: + mcr_reg = task->thread.sw_regs.ddmcr; + break; + case DDM1: + mcr_reg = task->thread.sw_regs.ddmcr; + break; + default: + continue; + } + + num = j % 2; + + count = atomic64_read( + &thread_info->monitors_count[i][j]); + + /* + * We should do it, because we can have a situation, + * when a monitoring event or mode have been changed + * or monitoring has been started, but the process, + * for which we want to see a count of monitoring + * events, has not yet started processing, so a count + * of monitoring events, taken from the process + * context, is invalid. + */ + if (mcr_reg.fields[num].event != monitors[j].event + || mcr_reg.fields[num].user != user + || mcr_reg.fields[num].system != system) + count = 0; + + seq_printf(file, "CPU%d:%s:%s:0x%x=%lu\n", + i, + monitors_id_names[j], + monitors_mode_info[monitors[j].mode].name, + monitors[j].event, + count); + } + } + + raw_spin_unlock_irqrestore(&monitors_lock, flags); + + put_task_struct(task); + + return 0; +} + +static int pid_monitors_events_open(struct inode *inode, struct file *file) +{ + single_open(file, pid_monitors_events_show, inode); + return 0; +} + +const struct file_operations proc_pid_monitors_events_operations = +{ + .owner = THIS_MODULE, + .open = pid_monitors_events_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static inline unsigned short monitors_settings_string_get_next_word_len( + char *str, unsigned char *is_last) +{ + unsigned short len; + + *is_last = 0; + len = strcspn(str, " \n"); + + if (str[len] == '\n' || len == strlen(str)) + *is_last = 1; + + return len; +} + +static inline char lookup_monitors_id(char *str, unsigned short len) +{ + unsigned char i; + char *name; + + for (i = 0; i < MONITORS_COUNT; i++) { + name = monitors_id_names[i]; + + if (len == strlen(name) && strncmp(str, name, len) == 0) + return i; + } + + return -1; +} + +static inline char lookup_monitors_mode_id(char *str, unsigned short len) +{ + unsigned char i; + char *name; + + if (len > MONITORS_MODE_NAME_LEN) + return -1; + + for (i = 0; i < MONITORS_MODES_COUNT; i++) { + name = monitors_mode_info[i].name; + + if (len == strlen(name) && strncmp(str, name, len) == 0) + return i; + } + + return -1; +} + +static inline int lookup_monitors_event(char *str, + unsigned short len, int monitor) +{ + int event = -1; + struct monitors_events_range *monitors_events_list; + unsigned char monitors_events_range_count; + unsigned char i; + + switch (monitor) { + case DIM0: + case DIM1: + monitors_events_range_count = dim_monitors_events_range_count; + monitors_events_list = dim_monitors_events_list; + break; + case DDM0: + monitors_events_range_count = ddm0_monitors_events_range_count; + monitors_events_list = ddm0_monitors_events_list; + break; + case DDM1: + monitors_events_range_count = ddm1_monitors_events_range_count; + monitors_events_list = ddm1_monitors_events_list; + break; + default: + return -1; + } + + sscanf(str, "0x%X", &event); + + for (i = 0; i < monitors_events_range_count; i++) { + if (monitors_events_list[i].start <= event && + monitors_events_list[i].end >= event) + return event; + } + + return -1; +} + +static inline void parse_monitors_settings_string(char *str) +{ + unsigned short i = 0; + unsigned char j; + unsigned short len1 = 0, len2 = 0, len3 = 0; + unsigned char is_last = 0; + struct monitors_info new_monitors[MONITORS_COUNT]; + int new_monitors_used = 0; + char monitor_id; + char mode_id; + int event; + unsigned long flags; + + memset(new_monitors, 0, sizeof(struct monitors_info) * + MONITORS_COUNT); + + while (!is_last) { + if (i % 3 == 0) { + len1 = monitors_settings_string_get_next_word_len( + str, &is_last); + + /* + * We check, if input string is an empty string, or if + * it is an invalid string (without monitor name or + * number), or if it is a valid string. + */ + if (is_last && (i || len1 > 1 || (len1 && + strncmp(str, "\n", 1)))) { + pr_err("Failed to adjust monitors (invalid " + "settings string).\n"); + return; + } + } else if (i % 3 == 1) { + len2 = monitors_settings_string_get_next_word_len( + str + len1 + 1, &is_last); + + if (is_last) { + pr_err("Failed to adjust monitors (invalid " + "settings string).\n"); + return; + } + } else { + len3 = monitors_settings_string_get_next_word_len( + str + len1 + len2 + 2, &is_last); + + monitor_id = lookup_monitors_id(str, len1); + if (monitor_id == -1) { + pr_err("Failed to adjust monitors (invalid " + "monitor name).\n"); + return; + } + + mode_id = lookup_monitors_mode_id( + str + len1 + 1, len2); + if (mode_id == -1) { + pr_err("Failed to adjust monitors (invalid " + "mode name).\n"); + return; + } + + event = lookup_monitors_event(str + len1 + len2 + 2, + len3, monitor_id); + if (event == -1) { + pr_err("Failed to adjust monitors (invalid " + "event number).\n"); + return; + } + + new_monitors[(unsigned char) + monitor_id].is_used = 1; + new_monitors[(unsigned char) + monitor_id].event = event; + new_monitors[(unsigned char) + monitor_id].mode = mode_id; + + str += len1 + len2 + len3 + 3; + } + + i++; + } + + raw_spin_lock_irqsave(&monitors_lock, flags); + + for (i = 0; i < MONITORS_COUNT; i++) { + if ((new_monitors[i].is_used && !monitors[i].is_used) || + (new_monitors[i].is_used && monitors[i].is_used && + (new_monitors[i].event != monitors[i].event || + new_monitors[i].mode != + monitors[i].mode))) { + for (j = 0; j < NR_CPUS; j++) + atomic64_set(&common_events_count[j][i], 0); + } + + if (new_monitors[i].is_used) + new_monitors_used |= 1 << i; + } + + memcpy(monitors, new_monitors, + sizeof(struct monitors_info) * MONITORS_COUNT); + + monitors_used = new_monitors_used; + + raw_spin_unlock_irqrestore(&monitors_lock, flags); +} + +static ssize_t monitors_settings_write(struct file *file, + const char __user *buffer, size_t count, loff_t *data) +{ + char monitors_settings_buffer[MONITORS_SETTINGS_STR_MAX_SIZE]; + int ret; + + memset(monitors_settings_buffer, 0, sizeof(char) * + MONITORS_SETTINGS_STR_MAX_SIZE); + + if (count > MONITORS_SETTINGS_STR_MAX_SIZE - 1) { + pr_err("Failed to adjust monitors (too long settings " + "string).\n"); + ret = -EINVAL; + } else if (copy_from_user(monitors_settings_buffer, buffer, count)) { + pr_err("Failed to adjust monitors (kernel error).\n"); + ret = -EFAULT; + } else { + parse_monitors_settings_string(monitors_settings_buffer); + ret = count; + } + + return ret; +} + +static int monitors_settings_proc_show(struct seq_file *m, void *data) +{ + unsigned short event; + unsigned char str_start = 1; + char *mode_name; + unsigned long flags; + unsigned char i; + + raw_spin_lock_irqsave(&monitors_lock, flags); + + for (i = 0; i < MONITORS_COUNT; i++) { + if (monitors[i].is_used) { + event = monitors[i].event; + mode_name = monitors_mode_info[monitors[i].mode].name; + + if (str_start) { + seq_printf(m, "%s %s 0x%x", + monitors_id_names[i], + mode_name, + event); + str_start = 0; + } else + seq_printf(m, " %s %s 0x%x", + monitors_id_names[i], + mode_name, + event); + } + } + + raw_spin_unlock_irqrestore(&monitors_lock, flags); + + if (!str_start) + seq_printf(m, "%s", "\n"); + + return 0; +} + +static int monitors_events_proc_show(struct seq_file *m, void *data) +{ + unsigned long flags; + unsigned char i, j; + + raw_spin_lock_irqsave(&monitors_lock, flags); + + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; + + for (j = 0; j < MONITORS_COUNT; j++) { + if (!monitors[j].is_used) + continue; + + seq_printf(m, "CPU%d:%s:%s:0x%x=%lld\n", + i, + monitors_id_names[j], + monitors_mode_info[monitors[j].mode].name, + monitors[j].event, + atomic64_read(&common_events_count[i][j])); + } + } + + raw_spin_unlock_irqrestore(&monitors_lock, flags); + + return 0; +} + +static int monitors_dead_proc_events_seq_show(struct seq_file *s, void *v) +{ + unsigned char dead_proc_id; + dead_proc_events_t dead_proc_event; + pid_t pid; + cpumask_t cpus_mask; + struct monitors_info monitor; + unsigned long events_count; + unsigned char i, j; + + dead_proc_id = *((loff_t *)v); + + if (dead_proc_events_buf_full_tmp) + dead_proc_id = + (dead_proc_events_buf_id_tmp + dead_proc_id + 1) % + DEAD_PROC_EVENTS_COUNT; + + dead_proc_event = dead_proc_events_buf_tmp[dead_proc_id]; + pid = dead_proc_event.pid; + cpus_mask = dead_proc_event.cpus_mask; + + seq_printf(s, "pid=%d:\n", pid); + + for (i = 0; i < NR_CPUS; i++) { + if (!cpumask_test_cpu(i, &cpus_mask)) + continue; + + for (j = 0; j < MONITORS_COUNT; j++) { + monitor = dead_proc_event.monitors[j]; + + if (!monitor.is_used) + continue; + + events_count = + dead_proc_event.monitors_count[i][j]; + + seq_printf(s, "CPU%d:%s:%s:0x%x=%lu\n", + i, + monitors_id_names[j], + monitors_mode_info[monitor.mode].name, + monitor.event, + events_count); + } + } + + return 0; +} + +static void *monitors_dead_proc_events_seq_start( + struct seq_file *s, loff_t *pos) +{ + unsigned char dead_proc_count; + unsigned long flags; + + raw_spin_lock(&dead_proc_lock_tmp); + + if (*pos == 0) { + raw_spin_lock_irqsave(&dead_proc_lock, flags); + + memcpy(dead_proc_events_buf_tmp, dead_proc_events_buf, + sizeof(dead_proc_events_t) * DEAD_PROC_EVENTS_COUNT); + dead_proc_events_buf_id_tmp = dead_proc_events_buf_id; + dead_proc_events_buf_full_tmp = dead_proc_events_buf_full; + + dead_proc_events_buf_id = -1; + dead_proc_events_buf_full = 0; + + raw_spin_unlock_irqrestore(&dead_proc_lock, flags); + } + + if (dead_proc_events_buf_full_tmp) + dead_proc_count = DEAD_PROC_EVENTS_COUNT; + else + dead_proc_count = dead_proc_events_buf_id_tmp + 1; + + if (*pos >= dead_proc_count) + return 0; + + return (void *)pos; +} + +static void *monitors_dead_proc_events_seq_next(struct seq_file *s, void *v, + loff_t *pos) +{ + unsigned char dead_proc_count; + + (*pos)++; + + if (dead_proc_events_buf_full_tmp) + dead_proc_count = DEAD_PROC_EVENTS_COUNT; + else + dead_proc_count = dead_proc_events_buf_id_tmp + 1; + + if (*pos >= dead_proc_count) + return 0; + + return (void *)pos; +} + +static void monitors_dead_proc_events_seq_stop(struct seq_file *s, void *v) +{ + /* + * We unlock dead_proc_lock_tmp here, because we could not lock it + * for a long time and perform user code with it is locked. But one + * could 'cat /proc/monitors/dead_proc_events' simultaneously with us. + * In this case we recieve wrong data for some dead processes. Now we + * do nothing with it. + */ + raw_spin_unlock(&dead_proc_lock_tmp); +} + +static const struct seq_operations monitors_dead_proc_events_seq_ops = { + .start = monitors_dead_proc_events_seq_start, + .next = monitors_dead_proc_events_seq_next, + .stop = monitors_dead_proc_events_seq_stop, + .show = monitors_dead_proc_events_seq_show +}; + +static int monitors_dead_proc_events_proc_open(struct inode *inode, + struct file *file) +{ + return seq_open(file, &monitors_dead_proc_events_seq_ops); +} + +static const struct file_operations monitors_dead_proc_events_proc_ops = { + .owner = THIS_MODULE, + .open = monitors_dead_proc_events_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static int monitors_help_seq_show(struct seq_file *s, void *v) +{ + unsigned char id = *((loff_t *)v); + unsigned char i; + + if (id == DIM0) { + seq_printf(s, "I0, I1 events:\n"); + for (i = 0; i < dim_monitors_events_range_count; i++) { + if (i) + seq_printf(s, ", "); + if (dim_monitors_events_list[i].start != + dim_monitors_events_list[i].end) + seq_printf(s, "0x%x, ..., 0x%x", + dim_monitors_events_list[i].start, + dim_monitors_events_list[i].end); + else + seq_printf(s, "0x%x", + dim_monitors_events_list[i].end); + } + } else if (id == DDM0) { + seq_printf(s, "\nD0 events:\n"); + for (i = 0; i < ddm0_monitors_events_range_count; i++) { + if (i) + seq_printf(s, ", "); + if (ddm0_monitors_events_list[i].start != + ddm0_monitors_events_list[i].end) + seq_printf(s, "0x%x, ..., 0x%x", + ddm0_monitors_events_list[i].start, + ddm0_monitors_events_list[i].end); + else + seq_printf(s, "0x%x", + ddm0_monitors_events_list[i].end); + } + } else if (id == DDM1) { + seq_printf(s, "\nD1 events:\n"); + for (i = 0; i < ddm1_monitors_events_range_count; i++) { + if (i) + seq_printf(s, ", "); + if (ddm1_monitors_events_list[i].start != + ddm1_monitors_events_list[i].end) + seq_printf(s, "0x%x, ..., 0x%x", + ddm1_monitors_events_list[i].start, + ddm1_monitors_events_list[i].end); + else + seq_printf(s, "0x%x", + ddm1_monitors_events_list[i].end); + } + } else if (id == MONITORS_COUNT) { + seq_printf(s, "\nSetting example:\n" + "echo \"D0 S 0x1 D1 C 0x2\" > " + "/proc/monitors/monitors_settings\n"); + } + + return 0; +} + +static void *monitors_help_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= MONITORS_COUNT + 1) + return 0; + return (void *)pos; +} + +static void *monitors_help_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + (*pos)++; + if (*pos >= MONITORS_COUNT + 1) + return 0; + return (void *)pos; +} + +static void monitors_help_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations monitors_help_seq_ops = { + .start = monitors_help_seq_start, + .next = monitors_help_seq_next, + .stop = monitors_help_seq_stop, + .show = monitors_help_seq_show +}; + +static int monitors_help_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &monitors_help_seq_ops); +} + +static const struct file_operations monitors_help_proc_ops = { + .owner = THIS_MODULE, + .open = monitors_help_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + + +/* + * SIC monitors + */ + +static int sicmonitors_events_proc_show(struct seq_file *m, void *data) +{ + e2k_sic_mar_lo_t mar_lo; + e2k_sic_mar_hi_struct_t mar_hi; + int monitor_id; + int node; + unsigned long flags; + unsigned char i; + + raw_spin_lock_irqsave(&sicmonitors_lock, flags); + + for (i = 0; i < SICMONITORS_COUNT; i++) { + if (sicmonitors[i].is_used) { + node = i / SICMONITORS_COUNT_PER_NODE; + monitor_id = i % SICMONITORS_COUNT_PER_NODE; + + switch (monitor_id) { + case MCM0: + mar_lo = sic_read_node_nbsr_reg( + node, SIC_sic_mar0_lo); + mar_hi.E2K_SIC_MAR_HI_reg = + sic_read_node_nbsr_reg( + node, SIC_sic_mar0_hi); + break; + case MCM1: + mar_lo = sic_read_node_nbsr_reg( + node, SIC_sic_mar1_lo); + mar_hi.E2K_SIC_MAR_HI_reg = + sic_read_node_nbsr_reg( + node, SIC_sic_mar1_hi); + break; + default: + continue; + } + + seq_printf(m, "NODE%d:%s:0x%x=0x%x%x\n", node, + sicmonitors_id_names[monitor_id], + sicmonitors[i].event, + mar_hi.E2K_SIC_MAR_HI_val, mar_lo); + } + } + + raw_spin_unlock_irqrestore(&sicmonitors_lock, flags); + + return 0; +} + +static inline char lookup_sicmonitors_id(char *str, unsigned short len) +{ + unsigned char i; + char *name; + + for (i = 0; i < SICMONITORS_COUNT; i++) { + name = sicmonitors_id_names[i]; + + if (len == strlen(name) && strncmp(str, name, len) == 0) + return i; + } + + return -1; +} + +static inline int lookup_sicmonitors_event(char *str, unsigned short len, + int monitor) +{ + int event = -1; + struct sicmonitors_events_range *sicmonitors_events_list; + unsigned char sicmonitors_events_range_count; + unsigned char i; + + switch (monitor) { + case MCM0: + sicmonitors_events_range_count = + mcm0_sicmonitors_events_range_count; + sicmonitors_events_list = mcm0_sicmonitors_events_list; + break; + case MCM1: + sicmonitors_events_range_count = + mcm1_sicmonitors_events_range_count; + sicmonitors_events_list = mcm1_sicmonitors_events_list; + break; + default: + return -1; + } + + sscanf(str, "0x%X", &event); + + for (i = 0; i < sicmonitors_events_range_count; i++) { + if (sicmonitors_events_list[i].start <= event && + sicmonitors_events_list[i].end >= event) + return event; + } + + return -1; +} + +static inline bool is_mcr_l3_event(u64 event) +{ + if (IS_MACHINE_E8C && event >= 32 && event < 64) + return true; + + if (IS_MACHINE_E8C2 && event >= 32 && event < 64) + return true; + + return false; +} + +static void sicmonitors_adjust(int node, int monitor, + struct sicmonitors_info *new_monitors) +{ + e2k_sic_mcr_struct_t mcr_reg; + e2k_sic_mar_lo_t mar_lo; + e2k_sic_mar_hi_struct_t mar_hi; + int idx; + u64 event; + + idx = node * SICMONITORS_COUNT_PER_NODE + monitor; + event = new_monitors[idx].event; + + mcr_reg.E2K_SIC_MCR_reg = sic_read_node_nbsr_reg(node, SIC_sic_mcr); + mar_lo = 0; + mar_hi.E2K_SIC_MAR_HI_reg = 0; + if (is_mcr_l3_event(event)) + AS(mcr_reg).mcnmo = 1; + + if (monitor == MCM0) { + mcr_reg.E2K_SIC_MCR_v0 = new_monitors[idx].is_used; + mcr_reg.E2K_SIC_MCR_es0 = event; + + sic_write_node_nbsr_reg( + node, SIC_sic_mcr, mcr_reg.E2K_SIC_MCR_reg); + sic_write_node_nbsr_reg( + node, SIC_sic_mar0_lo, mar_lo); + sic_write_node_nbsr_reg( + node, SIC_sic_mar0_hi, mar_hi.E2K_SIC_MAR_HI_reg); + } else if (monitor == MCM1) { + mcr_reg.E2K_SIC_MCR_v1 = new_monitors[idx].is_used; + mcr_reg.E2K_SIC_MCR_es1 = event; + + sic_write_node_nbsr_reg( + node, SIC_sic_mcr, mcr_reg.E2K_SIC_MCR_reg); + sic_write_node_nbsr_reg( + node, SIC_sic_mar1_lo, mar_lo); + sic_write_node_nbsr_reg( + node, SIC_sic_mar1_hi, mar_hi.E2K_SIC_MAR_HI_reg); + } +} + +static inline void parse_sicmonitors_settings_string(char *str) +{ + unsigned short i = 0; + unsigned short len1 = 0, len2 = 0, len3 = 0; + unsigned char is_last = 0; + struct sicmonitors_info new_monitors[SICMONITORS_COUNT]; + int node; + char monitor_id; + int event; + unsigned long flags; + + memset(new_monitors, 0, sizeof(struct sicmonitors_info) * + SICMONITORS_COUNT); + + while (!is_last) { + if (i % 3 == 0) { + len1 = monitors_settings_string_get_next_word_len( + str, &is_last); + + /* + * We check, if input string is an empty string, or if + * it is an invalid string (without sicmonitor name or + * number), or if it is a valid string. + */ + if (is_last && (i || len1 > 1 || (len1 && + strncmp(str, "\n", 1)))) { + pr_err("Failed to adjust sicmonitors (invalid " + "settings string).\n"); + return; + } + } else if (i % 3 == 1) { + len2 = monitors_settings_string_get_next_word_len( + str + len1 + 1, &is_last); + + if (is_last) { + pr_err("Failed to adjust sicmonitors (invalid " + "settings string).\n"); + return; + } + } else { + len3 = monitors_settings_string_get_next_word_len( + str + len1 + len2 + 2, &is_last); + + node = NUMA_NO_NODE; + sscanf(str, "%d", &node); + if (!node_online(node)) { + pr_err("Failed to adjust sicmonitors (invalid " + "node number).\n"); + return; + } + + monitor_id = lookup_sicmonitors_id( + str + len1 + 1, len2); + if (monitor_id == -1) { + pr_err("Failed to adjust sicmonitors (invalid " + "monitor name).\n"); + return; + } + + event = lookup_sicmonitors_event( + str + len1 + len2 + 2, + len3, monitor_id); + if (event == -1) { + pr_err("Failed to adjust sicmonitors (invalid " + "event number).\n"); + return; + } + + monitor_id += node * SICMONITORS_COUNT_PER_NODE; + + new_monitors[(unsigned char) + monitor_id].is_used = 1; + new_monitors[(unsigned char) + monitor_id].event = event; + + str += len1 + len2 + len3 + 3; + } + + i++; + } + + raw_spin_lock_irqsave(&sicmonitors_lock, flags); + + for (i = 0; i < SICMONITORS_COUNT; i++) { + if (new_monitors[i].is_used != sicmonitors[i].is_used || + new_monitors[i].event != + sicmonitors[i].event) { + sicmonitors_adjust(i / SICMONITORS_COUNT_PER_NODE, + i % SICMONITORS_COUNT_PER_NODE, new_monitors); + } + } + + memcpy(sicmonitors, new_monitors, + sizeof(struct sicmonitors_info) * SICMONITORS_COUNT); + + raw_spin_unlock_irqrestore(&sicmonitors_lock, flags); +} + +static ssize_t sicmonitors_settings_write(struct file *file, + const char __user *buffer, size_t count, loff_t *data) +{ + char monitors_settings_buffer[SICMONITORS_SETTINGS_STR_MAX_SIZE]; + + memset(monitors_settings_buffer, 0, sizeof(char) * + SICMONITORS_SETTINGS_STR_MAX_SIZE); + + if (count > SICMONITORS_SETTINGS_STR_MAX_SIZE - 1) { + pr_err("Failed to adjust sicmonitors (too long settings string).\n"); + return -EINVAL; + } + + if (copy_from_user(monitors_settings_buffer, buffer, count)) { + pr_err("Failed to adjust sicmonitors (kernel error).\n"); + return -EFAULT; + } + + parse_sicmonitors_settings_string(monitors_settings_buffer); + + return count; +} + +static int sicmonitors_settings_proc_show(struct seq_file *m, void *data) +{ + int monitor_id; + int node; + unsigned char str_start = 1; + unsigned long flags; + unsigned char i; + + raw_spin_lock_irqsave(&sicmonitors_lock, flags); + + for (i = 0; i < SICMONITORS_COUNT; i++) { + if (sicmonitors[i].is_used) { + node = i / SICMONITORS_COUNT_PER_NODE; + monitor_id = i % SICMONITORS_COUNT_PER_NODE; + + if (str_start) { + seq_printf(m, "NODE%d %s 0x%x", + node, + sicmonitors_id_names[monitor_id], + sicmonitors[i].event); + str_start = 0; + } else + seq_printf(m, " NODE%d %s 0x%x", + node, + sicmonitors_id_names[monitor_id], + sicmonitors[i].event); + } + } + + raw_spin_unlock_irqrestore(&sicmonitors_lock, flags); + + if (!str_start) + seq_printf(m, "%s", "\n"); + + return 0; +} + +static int sicmonitors_help_seq_show(struct seq_file *s, void *v) +{ + unsigned char id = *((loff_t *)v); + unsigned char i; + + if (id == MCM0) { + seq_printf(s, "M0 events:\n"); + for (i = 0; i < mcm0_sicmonitors_events_range_count; i++) { + if (i) + seq_printf(s, ", "); + if (mcm0_sicmonitors_events_list[i].start != + mcm0_sicmonitors_events_list[i].end) + seq_printf(s, "0x%x, ..., 0x%x", + mcm0_sicmonitors_events_list[i].start, + mcm0_sicmonitors_events_list[i].end); + else + seq_printf(s, "0x%x", + mcm0_sicmonitors_events_list[i].end); + } + } else if (id == MCM1) { + seq_printf(s, "\nM1 events:\n"); + for (i = 0; i < mcm1_sicmonitors_events_range_count; i++) { + if (i) + seq_printf(s, ", "); + if (mcm1_sicmonitors_events_list[i].start != + mcm1_sicmonitors_events_list[i].end) + seq_printf(s, "0x%x, ..., 0x%x", + mcm1_sicmonitors_events_list[i].start, + mcm1_sicmonitors_events_list[i].end); + else + seq_printf(s, "0x%x", + mcm1_sicmonitors_events_list[i].end); + } + } else if (id == SICMONITORS_COUNT_PER_NODE) { + seq_printf(s, "\nSetting example:\n" + "echo \"0 M0 0x2 0 M1 0x3\" > " + "/proc/monitors/sicmonitors_settings\n"); + } + + return 0; +} + +static void *sicmonitors_help_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= SICMONITORS_COUNT_PER_NODE + 1) + return 0; + return (void *)pos; +} + +static void *sicmonitors_help_seq_next(struct seq_file *s, void *v, + loff_t *pos) +{ + (*pos)++; + if (*pos >= SICMONITORS_COUNT_PER_NODE + 1) + return 0; + return (void *)pos; +} + +static void sicmonitors_help_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations sicmonitors_help_seq_ops = { + .start = sicmonitors_help_seq_start, + .next = sicmonitors_help_seq_next, + .stop = sicmonitors_help_seq_stop, + .show = sicmonitors_help_seq_show +}; + +static int sicmonitors_help_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &sicmonitors_help_seq_ops); +} + +static const struct file_operations sicmonitors_help_proc_ops = { + .owner = THIS_MODULE, + .open = sicmonitors_help_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + + +/* + * IPCC monitors + */ + +static int ipccmonitors_events_proc_show(struct seq_file *m, void *data) +{ + e2k_ipcc_str_struct_t ipcc_str_reg; + struct ipccmonitors_event_info event; + unsigned short event_id; + int node; + unsigned long flags; + unsigned char i; + + raw_spin_lock_irqsave(&ipccmonitors_lock, flags); + + if (ipccmonitors.is_used) { + event_id = ipccmonitors.event; + event = IPCC_event_info[event_id]; + + for_each_online_node(node) { + for (i = 1; i < SIC_IPCC_LINKS_COUNT + 1; i++) { + ipcc_str_reg.E2K_IPCC_STR_reg = + sic_get_ipcc_str(node, i); + + seq_printf(m, "NODE%d:IPCC%d:%s(0x%x)=0x%x\n", + node, i, + event.name, + event.event, + ipcc_str_reg.E2K_IPCC_STR_ecnt); + } + } + } + + raw_spin_unlock_irqrestore(&ipccmonitors_lock, flags); + + return 0; +} + +static inline short lookup_ipccmonitors_event_id(char *str, unsigned short len) +{ + unsigned char i; + char *name; + unsigned short event; + int input_event; + unsigned char is_event_as_num = 0; + + is_event_as_num = sscanf(str, "0x%X", &input_event); + + for (i = 0; i < IPCC_EVENTS_COUNT; i++) { + name = IPCC_event_info[i].name; + event = IPCC_event_info[i].event; + + /* + * We can set event, using event name or using event number. + */ + if ((len == strlen(name) && strncmp(str, name, len) == 0) || + (is_event_as_num && event == input_event)) + return i; + } + + return -1; +} + +static void ipccmonitors_adjust(struct ipccmonitors_info new_monitors) +{ + e2k_ipcc_str_struct_t ipcc_str_reg; + unsigned short event_id = new_monitors.event; + int node; + int i; + + for_each_online_node(node) { + for (i = 1; i < SIC_IPCC_LINKS_COUNT + 1; i++) { + ipcc_str_reg.E2K_IPCC_STR_reg = + sic_get_ipcc_str(node, i); + + ipcc_str_reg.E2K_IPCC_STR_ecf = + (new_monitors.is_used ? + IPCC_event_info[event_id].event : 0); + ipcc_str_reg.E2K_IPCC_STR_eco = 1; + + sic_set_ipcc_str( + node, i, ipcc_str_reg.E2K_IPCC_STR_reg); + } + } +} + +static inline void parse_ipccmonitors_settings_string(char *str) +{ + unsigned short len = 0; + unsigned char is_last = 0; + struct ipccmonitors_info new_monitors; + short event_id; + unsigned long flags; + + memset(&new_monitors, 0, sizeof(struct ipccmonitors_info)); + + len = monitors_settings_string_get_next_word_len(str, &is_last); + + if (!is_last) { + pr_err("Failed to adjust ipccmonitors (invalid settings " + "string).\n"); + return; + } + + if (len && strncmp(str, "\n", 1)) { + event_id = lookup_ipccmonitors_event_id(str, len); + if (event_id == -1) { + pr_err("Failed to adjust ipccmonitors (invalid event " + "name or number).\n"); + return; + } + + new_monitors.is_used = 1; + new_monitors.event = event_id; + } + + raw_spin_lock_irqsave(&ipccmonitors_lock, flags); + + if (new_monitors.is_used != ipccmonitors.is_used || + new_monitors.event != + ipccmonitors.event) { + ipccmonitors_adjust(new_monitors); + } + + memcpy(&ipccmonitors, &new_monitors, sizeof(struct ipccmonitors_info)); + + raw_spin_unlock_irqrestore(&ipccmonitors_lock, flags); +} + +static ssize_t ipccmonitors_settings_write(struct file *file, + const char __user *buffer, size_t count, loff_t *data) +{ + char monitors_settings_buffer[IPCCMONITORS_SETTINGS_STR_MAX_SIZE]; + int ret; + + memset(monitors_settings_buffer, 0, sizeof(char) * + IPCCMONITORS_SETTINGS_STR_MAX_SIZE); + + if (count > IPCCMONITORS_SETTINGS_STR_MAX_SIZE - 1) { + pr_err("Failed to adjust ipccmonitors (too long settings " + "string).\n"); + ret = -EINVAL; + } else if (copy_from_user(monitors_settings_buffer, buffer, count)) { + pr_err("Failed to adjust ipccmonitors (kernel error).\n"); + ret = -EFAULT; + } else { + parse_ipccmonitors_settings_string(monitors_settings_buffer); + ret = count; + } + + return ret; +} + +static int ipccmonitors_settings_proc_show(struct seq_file *m, void *data) +{ + struct ipccmonitors_event_info event; + unsigned short event_id; + unsigned char is_used; + unsigned long flags; + + raw_spin_lock_irqsave(&ipccmonitors_lock, flags); + is_used = ipccmonitors.is_used; + event_id = ipccmonitors.event; + raw_spin_unlock_irqrestore(&ipccmonitors_lock, flags); + + if (is_used) { + event = IPCC_event_info[event_id]; + seq_printf(m, "%s(0x%x)\n", event.name, event.event); + } + + return 0; +} + +static int ipccmonitors_help_seq_show(struct seq_file *s, void *v) +{ + unsigned char i; + + seq_printf(s, "Events:\n"); + + for (i = 0; i < IPCC_EVENTS_COUNT; i++) + seq_printf(s, "%s=0x%x\n", + IPCC_event_info[i].name, + IPCC_event_info[i].event); + + seq_printf(s, "\nSetting example:\n" + "echo \"0x1\" > /proc/monitors/ipccmonitors_settings\n" + "echo \"IPCC_LERR\" > /proc/monitors/ipccmonitors_settings\n"); + + return 0; +} + +static void *ipccmonitors_help_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= IPCCMONITORS_COUNT) + return 0; + return (void *)pos; +} + +static void *ipccmonitors_help_seq_next(struct seq_file *s, void *v, + loff_t *pos) +{ + (*pos)++; + if (*pos >= IPCCMONITORS_COUNT) + return 0; + return (void *)pos; +} + +static void ipccmonitors_help_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations ipccmonitors_help_seq_ops = { + .start = ipccmonitors_help_seq_start, + .next = ipccmonitors_help_seq_next, + .stop = ipccmonitors_help_seq_stop, + .show = ipccmonitors_help_seq_show +}; + +static int ipccmonitors_help_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &ipccmonitors_help_seq_ops); +} + +static const struct file_operations ipccmonitors_help_proc_ops = { + .owner = THIS_MODULE, + .open = ipccmonitors_help_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + + +/* + * IOCC monitors + */ + +static int ioccmonitors_events_proc_show(struct seq_file *m, void *data) +{ + e2k_io_str_struct_t io_str_reg; + struct ioccmonitors_event_info event; + unsigned short event_id; + int node; + char *s; + unsigned long flags; + unsigned char i; + + raw_spin_lock_irqsave(&ioccmonitors_lock, flags); + + if (ioccmonitors.is_used) { + event_id = ioccmonitors.event; + event = IOCC_event_info[event_id]; + + for_each_online_node(node) { + for (i = 0; i < SIC_IO_LINKS_COUNT; i++) { + io_str_reg.E2K_IO_STR_reg = + sic_get_io_str(node, i); + + if (!i) + s = "IOCC"; + else if (IS_MACHINE_E2S) + s = "IOCC_HI"; + else + s = "IOCC1"; + + seq_printf(m, "NODE%d:%s:%s(0x%x)=0x%x\n", + node, s, + event.name, + event.event, + io_str_reg.E2K_IO_STR_rc); + } + } + } + + raw_spin_unlock_irqrestore(&ioccmonitors_lock, flags); + + return 0; +} + +static inline short lookup_ioccmonitors_event_id(char *str, unsigned short len) +{ + unsigned char i; + char *name; + unsigned short event; + int input_event; + unsigned char is_event_as_num = 0; + + is_event_as_num = sscanf(str, "0x%X", &input_event); + + for (i = 0; i < IOCC_EVENTS_COUNT; i++) { + name = IOCC_event_info[i].name; + event = IOCC_event_info[i].event; + + /* + * We can set event, using event name or using event number. + */ + if ((len == strlen(name) && strncmp(str, name, len) == 0) || + (is_event_as_num && event == input_event)) + return i; + } + + return -1; +} + +static void ioccmonitors_adjust(struct ioccmonitors_info new_monitors) +{ + e2k_io_str_struct_t io_str_reg; + unsigned short event_id = new_monitors.event; + int node; + int i; + + for_each_online_node(node) { + for (i = 0; i < SIC_IO_LINKS_COUNT; i++) { + io_str_reg.E2K_IO_STR_reg = sic_get_io_str(node, i); + + io_str_reg.E2K_IO_STR_reg &= ~E2K_IO_STR_EVENT_MASK; + if (new_monitors.is_used) + io_str_reg.E2K_IO_STR_reg |= + IOCC_event_info[event_id].event << + E2K_IO_STR_EVENT_SHIFT; + io_str_reg.E2K_IO_STR_rcol = 1; + + sic_set_io_str(node, i, io_str_reg.E2K_IO_STR_reg); + } + } +} + +static inline void parse_ioccmonitors_settings_string(char *str) +{ + unsigned short len = 0; + unsigned char is_last = 0; + struct ioccmonitors_info new_monitors; + short event_id; + unsigned long flags; + + memset(&new_monitors, 0, sizeof(struct ioccmonitors_info)); + + len = monitors_settings_string_get_next_word_len(str, &is_last); + + if (!is_last) { + pr_err("Failed to adjust ioccmonitors (invalid settings " + "string).\n"); + return; + } + + if (len && strncmp(str, "\n", 1)) { + event_id = lookup_ioccmonitors_event_id(str, len); + if (event_id == -1) { + pr_err("Failed to adjust ioccmonitors (invalid event " + "name or number).\n"); + return; + } + + new_monitors.is_used = 1; + new_monitors.event = event_id; + } + + raw_spin_lock_irqsave(&ioccmonitors_lock, flags); + + if (new_monitors.is_used != ioccmonitors.is_used || + new_monitors.event != + ioccmonitors.event) { + ioccmonitors_adjust(new_monitors); + } + + memcpy(&ioccmonitors, &new_monitors, sizeof(struct ioccmonitors_info)); + + raw_spin_unlock_irqrestore(&ioccmonitors_lock, flags); +} + +static ssize_t ioccmonitors_settings_write(struct file *file, + const char __user *buffer, size_t count, loff_t *data) +{ + char monitors_settings_buffer[IOCCMONITORS_SETTINGS_STR_MAX_SIZE]; + int ret; + + memset(monitors_settings_buffer, 0, sizeof(char) * + IOCCMONITORS_SETTINGS_STR_MAX_SIZE); + + if (count > IOCCMONITORS_SETTINGS_STR_MAX_SIZE - 1) { + pr_err("Failed to adjust ioccmonitors (too long settings " + "string).\n"); + ret = -EINVAL; + } else if (copy_from_user(monitors_settings_buffer, buffer, count)) { + pr_err("Failed to adjust ioccmonitors (kernel error).\n"); + ret = -EFAULT; + } else { + parse_ioccmonitors_settings_string(monitors_settings_buffer); + ret = count; + } + + return ret; +} + +static int ioccmonitors_settings_proc_show(struct seq_file *m, void *data) +{ + struct ioccmonitors_event_info event; + unsigned short event_id; + unsigned char is_used; + unsigned long flags; + + raw_spin_lock_irqsave(&ioccmonitors_lock, flags); + is_used = ioccmonitors.is_used; + event_id = ioccmonitors.event; + raw_spin_unlock_irqrestore(&ioccmonitors_lock, flags); + + if (is_used) { + event = IOCC_event_info[event_id]; + seq_printf(m, "%s(0x%x)\n", event.name, event.event); + } + + return 0; +} + +static int ioccmonitors_help_seq_show(struct seq_file *s, void *v) +{ + unsigned char i; + + seq_printf(s, "Events:\n"); + + for (i = 0; i < IOCC_EVENTS_COUNT; i++) + seq_printf(s, "%s=0x%x\n", + IOCC_event_info[i].name, + IOCC_event_info[i].event); + + seq_printf(s, "\nSetting example:\n" + "echo \"0x1\" > /proc/monitors/ioccmonitors_settings\n" + "echo \"IOCC_BSY_RC\" > /proc/monitors/ioccmonitors_settings\n"); + + return 0; +} + +static void *ioccmonitors_help_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= IOCCMONITORS_COUNT) + return 0; + return (void *)pos; +} + +static void *ioccmonitors_help_seq_next(struct seq_file *s, void *v, + loff_t *pos) +{ + (*pos)++; + if (*pos >= IOCCMONITORS_COUNT) + return 0; + return (void *)pos; +} + +static void ioccmonitors_help_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations ioccmonitors_help_seq_ops = { + .start = ioccmonitors_help_seq_start, + .next = ioccmonitors_help_seq_next, + .stop = ioccmonitors_help_seq_stop, + .show = ioccmonitors_help_seq_show +}; + +static int ioccmonitors_help_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &ioccmonitors_help_seq_ops); +} + +static const struct file_operations ioccmonitors_help_proc_ops = { + .owner = THIS_MODULE, + .open = ioccmonitors_help_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static int monitors_settings_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, monitors_settings_proc_show, NULL); +} + +static const struct file_operations monitors_settings_proc_fops = { + .open = monitors_settings_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = monitors_settings_write, +}; + +static int monitors_events_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, monitors_events_proc_show, NULL); +} + +static const struct file_operations monitors_events_proc_fops = { + .open = monitors_events_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int sicmonitors_settings_proc_open(struct inode *inode, + struct file *file) +{ + return single_open(file, sicmonitors_settings_proc_show, NULL); +} + +static const struct file_operations sicmonitors_settings_proc_fops = { + .open = sicmonitors_settings_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = sicmonitors_settings_write, +}; + +static int sicmonitors_events_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, sicmonitors_events_proc_show, NULL); +} + +static const struct file_operations sicmonitors_events_proc_fops = { + .open = sicmonitors_events_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ipccmonitors_settings_proc_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ipccmonitors_settings_proc_show, NULL); +} + +static const struct file_operations ipccmonitors_settings_proc_fops = { + .open = ipccmonitors_settings_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = ipccmonitors_settings_write, +}; + +static int ipccmonitors_events_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, ipccmonitors_events_proc_show, NULL); +} + +static const struct file_operations ipccmonitors_events_proc_fops = { + .open = ipccmonitors_events_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ioccmonitors_settings_proc_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ioccmonitors_settings_proc_show, NULL); +} + +static const struct file_operations ioccmonitors_settings_proc_fops = { + .open = ioccmonitors_settings_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = ioccmonitors_settings_write, +}; + +static int ioccmonitors_events_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, ioccmonitors_events_proc_show, NULL); +} + +static const struct file_operations ioccmonitors_events_proc_fops = { + .open = ioccmonitors_events_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +/* + * Init + */ + +#define setup_monitors(iset) \ +({ \ + ddm0_monitors_events_list = ddm0_monitors_events_list_v##iset; \ + ddm1_monitors_events_list = ddm1_monitors_events_list_v##iset; \ + dim_monitors_events_list = dim_monitors_events_list_v##iset; \ + ddm0_monitors_events_range_count = \ + sizeof(ddm0_monitors_events_list_v##iset) / \ + sizeof(struct monitors_events_range); \ + ddm1_monitors_events_range_count = \ + sizeof(ddm1_monitors_events_list_v##iset) / \ + sizeof(struct monitors_events_range); \ + dim_monitors_events_range_count = \ + sizeof(dim_monitors_events_list_v##iset) / \ + sizeof(struct monitors_events_range); \ +}) + +static void monitors_init(void) +{ + switch (machine.native_iset_ver) { + case E2K_ISET_V2: + setup_monitors(2); + break; + case E2K_ISET_V3: + case E2K_ISET_V4: + setup_monitors(3); + break; + case E2K_ISET_V5: + setup_monitors(5); + break; + case E2K_ISET_V6: + setup_monitors(6); + break; + default: + BUG(); + } + + proc_create(MONITORS_SETTINGS_FILENAME, S_IRUGO | S_IWUSR, + monitors_dir_entry, &monitors_settings_proc_fops); + proc_create(MONITORS_EVENTS_FILENAME, S_IRUGO, + monitors_dir_entry, &monitors_events_proc_fops); + proc_create(MONITORS_DEAD_PROC_EVENTS_FILENAME, S_IRUGO, + monitors_dir_entry, &monitors_dead_proc_events_proc_ops); + proc_create(MONITORS_HELP_FILENAME, S_IRUGO, + monitors_dir_entry, &monitors_help_proc_ops); +} + +static void sicmonitors_init(void) +{ + if (!HAS_MACHINE_SICMONITORS) + return; + + if (IS_MACHINE_E2S) { + mcm0_sicmonitors_events_list = mcm0_sicmonitors_events_list_e4c; + mcm1_sicmonitors_events_list = mcm1_sicmonitors_events_list_e4c; + mcm0_sicmonitors_events_range_count = + MCM0_EVENTS_RANGE_COUNT_E4C; + mcm1_sicmonitors_events_range_count = + MCM1_EVENTS_RANGE_COUNT_E4C; + } else if (IS_MACHINE_E8C) { + mcm0_sicmonitors_events_list = mcm0_sicmonitors_events_list_e8c; + mcm1_sicmonitors_events_list = mcm1_sicmonitors_events_list_e8c; + mcm0_sicmonitors_events_range_count = + MCM0_EVENTS_RANGE_COUNT_E8C; + mcm1_sicmonitors_events_range_count = + MCM1_EVENTS_RANGE_COUNT_E8C; + } else if (IS_MACHINE_E8C2) { + mcm0_sicmonitors_events_list = + mcm0_sicmonitors_events_list_e8c2; + mcm1_sicmonitors_events_list = + mcm1_sicmonitors_events_list_e8c2; + mcm0_sicmonitors_events_range_count = + MCM0_EVENTS_RANGE_COUNT_E8C2; + mcm1_sicmonitors_events_range_count = + MCM1_EVENTS_RANGE_COUNT_E8C2; + } else { + BUG(); + } + + proc_create(SICMONITORS_SETTINGS_FILENAME, S_IRUGO | S_IWUSR, + monitors_dir_entry, &sicmonitors_settings_proc_fops); + proc_create(SICMONITORS_EVENTS_FILENAME, S_IRUGO, + monitors_dir_entry, &sicmonitors_events_proc_fops); + proc_create(SICMONITORS_HELP_FILENAME, S_IRUGO, + monitors_dir_entry, &sicmonitors_help_proc_ops); +} + +static void ipccmonitors_init(void) +{ + if (!HAS_MACHINE_IPCCMONITORS) + return; + + proc_create(IPCCMONITORS_SETTINGS_FILENAME, S_IRUGO | S_IWUSR, + monitors_dir_entry, &ipccmonitors_settings_proc_fops); + proc_create(IPCCMONITORS_EVENTS_FILENAME, S_IRUGO, + monitors_dir_entry, &ipccmonitors_events_proc_fops); + proc_create(IPCCMONITORS_HELP_FILENAME, S_IRUGO, + monitors_dir_entry, &ipccmonitors_help_proc_ops); +} + +static void ioccmonitors_init(void) +{ + if (!HAS_MACHINE_IOCCMONITORS) + return; + + proc_create(IOCCMONITORS_SETTINGS_FILENAME, S_IRUGO, + monitors_dir_entry, &ioccmonitors_settings_proc_fops); + proc_create(IOCCMONITORS_EVENTS_FILENAME, S_IRUGO, + monitors_dir_entry, &ioccmonitors_events_proc_fops); + proc_create(IOCCMONITORS_HELP_FILENAME, S_IRUGO, + monitors_dir_entry, &ioccmonitors_help_proc_ops); +} + +static int __init monitors_module_init(void) +{ + monitors_dir_entry = proc_mkdir(MONITORS_FILENAME, NULL); + if (!monitors_dir_entry) + return 0; + + monitors_init(); + sicmonitors_init(); + ipccmonitors_init(); + ioccmonitors_init(); + + return 0; +} + +module_init(monitors_module_init); + diff --git a/arch/e2k/kernel/nmi.c b/arch/e2k/kernel/nmi.c new file mode 100644 index 000000000000..c137348f0fc6 --- /dev/null +++ b/arch/e2k/kernel/nmi.c @@ -0,0 +1,385 @@ +#include +#include +#include +#include +#include +#include + +/* + * NMI IPI support + * + * nmi_call_function_xxx() support is implemented here. + * + * These function work like smp_call_function() but they are + * using non-maskable interrupts internally so they can be + * used from interrupt context. They also accept an additional + * 'timeout' parameter for more robust execution. + * + * + * ATTENTION nmi_call_function_xxx() are actually more limited + * than smp_call_function_xxx(). + * + * 1) You cannot use ANY drivers (since they are usually NOT async-safe). + * + * 2) You cannot use printk() (as a consequence of 1). + * + * 3) Function must be fast and non-blocking. + * + * So instead of using printk() it is better to save your message + * into a temporary buffer and later print that buffer from the function + * which called nmi_call_function_xxx(). + */ + +#ifdef CONFIG_SMP + +enum { + NMI_CSD_FLAG_LOCK = 0x01, + NMI_CSD_FLAG_SYNCHRONOUS = 0x02, +}; + +struct nmi_call_single_data { + union { + struct list_head list; + struct llist_node llist; + }; + smp_call_func_t func; + void *info; + u16 flags; +}; + +struct nmi_call_function_data { + struct nmi_call_single_data csd[NR_CPUS]; + struct cpumask cpumask; + struct cpumask cpumask_ipi; +}; + +static struct llist_head __cacheline_aligned_in_smp call_single_queue[NR_CPUS]; +static struct nmi_call_function_data __cacheline_aligned_in_smp + nmi_cfd_data[NR_CPUS]; +static struct nmi_call_single_data nmi_csd_data[NR_CPUS]; + +void nmi_call_function_init(void) +{ + int i; + + for_each_possible_cpu(i) + init_llist_head(&call_single_queue[i]); +} + +/* + * nmi_csd_lock/nmi_csd_unlock used to serialize access to per-cpu csd resources + * + * For non-synchronous ipi calls the csd can still be in use by the + * previous function call. For multi-cpu calls its even more interesting + * as we'll have to ensure no other cpu is observing our csd. + */ +static void nmi_csd_lock_wait(struct nmi_call_single_data *csd, + int timeout_msec) +{ + if (timeout_msec) { + int waited_us = 0; + int one_wait_us = 10; + + while (READ_ONCE(csd->flags) & NMI_CSD_FLAG_LOCK) { + udelay(one_wait_us); + waited_us += one_wait_us; + if (waited_us >= USEC_PER_MSEC * timeout_msec) { + pr_alert("nmi_csd_lock_wait(): wait exit on timeout\n"); + break; + } + } + } else { + while (READ_ONCE(csd->flags) & NMI_CSD_FLAG_LOCK) + cpu_relax(); + } + + /* + * Make sure that whatever data was changed by the called + * function is available now + */ + smp_mb(); +} + +static void nmi_csd_lock(struct nmi_call_single_data *csd) +{ + nmi_csd_lock_wait(csd, 30000); + csd->flags |= NMI_CSD_FLAG_LOCK; + + /* + * prevent CPU from reordering the above assignment + * to ->flags with any subsequent assignments to other + * fields of the specified nmi_call_single_data structure: + */ + smp_mb(); +} + +static void nmi_csd_unlock(struct nmi_call_single_data *csd) +{ + if (unlikely(!(csd->flags & NMI_CSD_FLAG_LOCK))) + pr_alert("Error in nmi_call_function(): caller did not lock the queue entry\n"); + + /* + * ensure we're all done before releasing data: + */ + smp_store_release(&csd->flags, 0); +} + +/* + * nmi_call_function_single - Run a function on a specific CPU + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait until function has completed on other CPUs. + * @timeout_msec: Maximum waiting time in milliseconds (0 means + * no timeout). + * + * Unlike smp_call_function_single(), this function can be called from + * interrupt context because it uses non-maskable interrupts internally. + * + * Returns 0 on success, else a negative status code. + * + * ATTENTION + * + * 1) You cannot use ANY drivers (since they are usually NOT async-safe). + * + * 2) You cannot use printk() (as a consequence of 1). + * + * 3) Function must be fast and non-blocking. + */ +static int __nmi_call_function_single(int cpu, void (*func) (void *info), void *info, + int wait, int timeout_msec, bool offline) +{ + struct nmi_call_single_data *csd; + struct nmi_call_single_data csd_stack = { + .flags = NMI_CSD_FLAG_LOCK | NMI_CSD_FLAG_SYNCHRONOUS + }; + unsigned long flags, nmi_flags; + int this_cpu, err = 0; + + /* + * Can deadlock when called with NMI interrupts disabled. + */ + if (unlikely(psr_and_upsr_nm_irqs_disabled())) { + WARN_ONCE(1, "nmi_call_function() called with NMIs disabled"); + wait = 0; + } + + raw_local_irq_save(flags); + + this_cpu = raw_smp_processor_id(); + + csd = &csd_stack; + if (!wait) { + csd = &nmi_csd_data[this_cpu]; + nmi_csd_lock(csd); + } + + if (cpu == this_cpu) { + raw_all_irq_save(nmi_flags); + func(info); + raw_all_irq_restore(nmi_flags); + goto out; + } + + if (unlikely((unsigned) cpu >= nr_cpu_ids || + offline && cpu_online(cpu) || + !offline && !cpu_online(cpu))) { + err = -ENXIO; + goto out; + } + + csd->func = func; + csd->info = info; + + /* Send a message to the target CPU */ + if (llist_add(&csd->llist, &call_single_queue[cpu])) + apic->send_IPI_mask(cpumask_of(cpu), NMI_VECTOR); + + /* Optionally wait for the CPU to complete */ + if (wait) + nmi_csd_lock_wait(csd, timeout_msec); + +out: + raw_local_irq_restore(flags); + + return err; +} + +int nmi_call_function_single(int cpu, void (*func) (void *info), void *info, + int wait, int timeout_msec) +{ + return __nmi_call_function_single(cpu, func, info, wait, timeout_msec, false); +} + +int nmi_call_function_single_offline(int cpu, void (*func) (void *info), void *info, + int wait, int timeout_msec) +{ + return __nmi_call_function_single(cpu, func, info, wait, timeout_msec, true); +} + +/** + * nmi_call_function(): Run a function on all other CPUs. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed + * on other CPUs. + * @timeout_msec: Maximum waiting time in milliseconds (0 means + * no timeout). + * + * Returns 0. + * + * The main difference between this and smp_call_function() is that + * here we use NMIs to send interrupts. So only non-maskable interrupts + * must be enabled when calling it. + * + * ATTENTION + * + * 1) You cannot use ANY drivers (since they are usually NOT async-safe). + * + * 2) You cannot use printk() (as a consequence of 1). + * + * 3) Function must be fast and non-blocking. + */ +static int nmi_call_function_many(const struct cpumask *mask, + void (*func)(void *), void *info, int wait, int timeout_msec) +{ + struct nmi_call_function_data *cfd; + int cpu, next_cpu, this_cpu = raw_smp_processor_id(); + + /* + * Can deadlock when called with NMI interrupts disabled. + */ + if (unlikely(psr_and_upsr_nm_irqs_disabled())) { + WARN_ONCE(1, "nmi_call_function() called with NMIs disabled"); + wait = 0; + } + + /* + * Should not be possible since we always disable interrupts + * in NMI handlers. + */ + WARN_ON_ONCE(in_nmi()); + + /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ + cpu = cpumask_first_and(mask, cpu_online_mask); + if (cpu == this_cpu) + cpu = cpumask_next_and(cpu, mask, cpu_online_mask); + + /* No online cpus? We're done. */ + if (cpu >= nr_cpu_ids) + return 0; + + /* Do we have another CPU which isn't us? */ + next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); + if (next_cpu == this_cpu) + next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); + + /* Fastpath: do that cpu by itself. */ + if (next_cpu >= nr_cpu_ids) + return nmi_call_function_single(cpu, func, info, wait, + timeout_msec); + + cfd = &nmi_cfd_data[this_cpu]; + + cpumask_and(&cfd->cpumask, mask, cpu_online_mask); + cpumask_clear_cpu(this_cpu, &cfd->cpumask); + + /* Some callers race with other cpus changing the passed mask */ + if (unlikely(!cpumask_weight(&cfd->cpumask))) + return 0; + + cpumask_clear(&cfd->cpumask_ipi); + for_each_cpu(cpu, &cfd->cpumask) { + struct nmi_call_single_data *csd = &cfd->csd[cpu]; + + nmi_csd_lock(csd); + if (wait) + csd->flags |= NMI_CSD_FLAG_SYNCHRONOUS; + csd->func = func; + csd->info = info; + + if (llist_add(&csd->llist, &call_single_queue[cpu])) + cpumask_set_cpu(cpu, &cfd->cpumask_ipi); + } + + /* Send a message to all CPUs in the map */ + apic->send_IPI_mask_allbutself(&cfd->cpumask_ipi, NMI_VECTOR); + + /* Optionally wait for the CPUs to complete */ + if (wait) { + for_each_cpu(cpu, &cfd->cpumask) { + struct nmi_call_single_data *csd; + + csd = &cfd->csd[cpu]; + nmi_csd_lock_wait(csd, timeout_msec); + } + } + + return 0; +} + +int nmi_call_function(void (*func)(void *), void *info, int wait, + int timeout_msec) +{ + unsigned long flags; + int ret; + + raw_local_irq_save(flags); + ret = nmi_call_function_many(cpu_online_mask, func, info, wait, + timeout_msec); + raw_local_irq_restore(flags); + + return ret; +} + +int nmi_call_function_mask(const cpumask_t *mask, + void (*func)(void *), void *info, int wait, int timeout_msec) +{ + unsigned long flags; + int ret; + + raw_local_irq_save(flags); + ret = nmi_call_function_many(mask, func, info, wait, timeout_msec); + raw_local_irq_restore(flags); + + return ret; +} + +/* + * Invoked to handle an NMI IPI (currently such IPIs + * are used only to call functions). + */ +noinline void nmi_call_function_interrupt(void) +{ + struct llist_head *head; + struct llist_node *entry; + struct nmi_call_single_data *csd, *csd_next; + int cpu = raw_smp_processor_id(); + + head = &call_single_queue[cpu]; + entry = llist_del_all(head); + entry = llist_reverse_order(entry); + + WARN_ONCE(!psr_and_upsr_nm_irqs_disabled(), + "nmi_call_function() called with NMIs disabled"); + + llist_for_each_entry_safe(csd, csd_next, entry, llist) { + smp_call_func_t func = csd->func; + void *info = csd->info; + + /* Do we wait until *after* callback? */ + if (csd->flags & NMI_CSD_FLAG_SYNCHRONOUS) { + func(info); + nmi_csd_unlock(csd); + } else { + nmi_csd_unlock(csd); + func(info); + } + } +} +#else /* ! CONFIG_SMP */ +noinline void nmi_call_function_interrupt(void) +{ + panic("%s(): in not SMP mode\n", __func__); +} +#endif /* CONFIG_SMP */ + diff --git a/arch/e2k/kernel/page_tables.S b/arch/e2k/kernel/page_tables.S new file mode 100644 index 000000000000..4b6e0496adcb --- /dev/null +++ b/arch/e2k/kernel/page_tables.S @@ -0,0 +1,57 @@ +/* $Id: page_tables.S,v 1.10 2008/04/21 18:57:22 atic Exp $ */ +// +// special page tables to support boot and kernel initialization +// + +#include +#include +#include +#include +#include +#include +#include +#include + + + __NODEDATA +#ifdef CONFIG_NUMA + /* We want to map zero page with small pages to write-protect it, + * so in !NUMA case it goes into different section */ + .align PAGE_SIZE + .global empty_zero_page +empty_zero_page: + .skip PAGE_SIZE +#endif + +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + .global swapper_pg_dir + .align PAGE_SIZE +swapper_pg_dir: + .skip PAGE_SIZE +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + .global all_cpus_swapper_pg_dir +all_cpus_swapper_pg_dir: + .skip PAGE_SIZE * NR_CPUS /* each CPU has own copy of page dir */ +#endif /* ! COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + + .align (1 << MMU_ALIGN_TRAP_POINT_BASE) // 0x200 + .global kernel_trap_cellar +#ifndef CONFIG_NUMA + .data +#else /* CONFIG_NUMA */ + __NODEDATA +#endif /* ! CONFIG_NUMA */ + +kernel_trap_cellar: +#ifndef CONFIG_SMP + .skip MMU_TRAP_CELLAR_MAX_SIZE * 8 // 128 double-words +#else + .skip MMU_TRAP_CELLAR_MAX_SIZE * 8 * NR_CPUS // 128 double-words + // for each CPU +#endif /* CONFIG_SMP */ + + .global kernel_CUT + .align (1 << E2K_ALIGN_CUT) // 32 bytes +kernel_CUT: + .skip KERNEL_CUT_BYTE_SIZE + diff --git a/arch/e2k/kernel/perf_event/Makefile b/arch/e2k/kernel/perf_event/Makefile new file mode 100644 index 000000000000..db3d4e6fa4c5 --- /dev/null +++ b/arch/e2k/kernel/perf_event/Makefile @@ -0,0 +1,14 @@ +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_perf_event.o = -pg +CFLAGS_REMOVE_uncore.o = -pg +CFLAGS_REMOVE_uncore_sic.o = -pg +CFLAGS_REMOVE_uncore_hmu.o = -pg +CFLAGS_REMOVE_uncore_iommu.o = -pg +CFLAGS_REMOVE_uncore_hc.o = -pg +CFLAGS_REMOVE_uncore_mc.o = -pg +CFLAGS_REMOVE_uncore_prepic.o = -pg +endif + +obj-y := perf_event.o uncore.o uncore_sic.o uncore_hmu.o uncore_iommu.o \ + uncore_hc.o uncore_mc.o uncore_prepic.o dimtp_trace.o diff --git a/arch/e2k/kernel/perf_event/dimtp_trace.c b/arch/e2k/kernel/perf_event/dimtp_trace.c new file mode 100644 index 000000000000..a91346896893 --- /dev/null +++ b/arch/e2k/kernel/perf_event/dimtp_trace.c @@ -0,0 +1,567 @@ +/* + * This is mainly copied from drivers/perf/arm_spe_pmu.c + * so any updates to that file should be merged here. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct dimtp_buf { + int nr_pages; + bool snapshot; + void *base; +}; + +struct dimtp_pmu { + struct pmu pmu; + struct perf_output_handle __percpu *handle; +}; + +typedef union { + struct { + u64 event : 8; + u64 mode : 4; + u64 __unused : 52; + }; + u64 word; +} dimtp_config_attr_t; + +/* Returns -1 in case of bad configuration */ +static inline int config_to_size(dimtp_config_attr_t cfg) +{ + switch (cfg.mode) { + case 2: + case 8: + return 8; + case 3: + case 9: + return 16; + case 10: + return 32; + case 11: + return 64; + case 12: + return 128; + case 13: + return 256; + default: + return -1; + } +} + +PMU_FORMAT_ATTR(event, "config:0-7"); +PMU_FORMAT_ATTR(mode, "config:8-11"); + +static struct attribute *dimtp_format_attr[] = { + &format_attr_event.attr, + &format_attr_mode.attr, + NULL, +}; + +/* Convert a free-running index from perf into an DIMTP buffer offset */ +#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) +static inline u64 perf_idx_round_down(u64 idx, struct dimtp_buf *buf) { + u64 buf_size = (buf)->nr_pages << PAGE_SHIFT; + + return (idx / buf_size) * buf_size; +} +static inline u64 perf_idx_round_up(u64 idx, const struct dimtp_buf *buf) { + u64 buf_size = (buf)->nr_pages << PAGE_SHIFT; + + return ((idx + buf_size - 1) / buf_size) * buf_size; +} + +#define to_dimtp_pmu(p) (container_of(p, struct dimtp_pmu, pmu)) + +static struct attribute_group dimtp_format_group = { + .name = "format", + .attrs = dimtp_format_attr, +}; + +static const struct attribute_group *dimtp_attr_groups[] = { + &dimtp_format_group, + NULL, +}; + + +static void dimtp_perf_aux_output_end(struct perf_output_handle *handle) +{ + e2k_dimtp_t dimtp; + struct dimtp_buf *buf = perf_get_aux(handle); + u64 offset, size; + + machine.save_dimtp(&dimtp); + offset = dimtp.ind; + size = offset - PERF_IDX2OFF(handle->head, buf); + + if (buf->snapshot) + handle->head += size; + + perf_aux_output_end(handle, size); +} + +/* Perf callbacks */ +static int dimtp_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + dimtp_config_attr_t config = { .word = event->attr.config }; + struct dimtp_pmu *dimtp_pmu = to_dimtp_pmu(event->pmu); + + if (attr->type != event->pmu->type) + return -ENOENT; + + if (attr->exclude_idle) + return -EOPNOTSUPP; + + /* + * Feedback-directed frequency throttling doesn't work when we + * have a buffer of samples. We'd need to manually count the + * samples in the buffer when it fills up and adjust the event + * count to reflect that. Instead, just force the user to specify + * a sample period. + */ + if (attr->freq) { + pr_info_ratelimited("%s: feedback-directed frequency throttling does not work with dimtp, please provide a sampling period\n", + event->pmu->name); + return -EINVAL; + } + + if (event->hw.sample_period < 5) { + pr_info_ratelimited("%s: dimtp counting period %llu is imprecise and not allowed, please enter a value that is >=5\n", + event->pmu->name, event->hw.sample_period); + return -EINVAL; + } + + if (config_to_size(config) <= 0) { + pr_info_ratelimited("%s: bad dimtp.mode %d\n", + event->pmu->name, config.mode); + return -EINVAL; + } + + /* + * Save configuration + */ + if (!event->attr.exclude_user) + event->hw.config |= ARCH_PERFMON_USR; + if (!event->attr.exclude_kernel) + event->hw.config |= ARCH_PERFMON_OS; + + return 0; +} + +static void dimtp_pad_buf(struct perf_output_handle *handle, unsigned long len) +{ + struct dimtp_buf *buf = perf_get_aux(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + memset(buf->base + head, 0, len); + if (!buf->snapshot) + perf_aux_output_skip(handle, len); +} + +static u64 __dimtp_next_off(struct perf_output_handle *handle, + const struct dimtp_buf *buf) +{ + u64 head, limit, tail, wakeup; + + pr_debug("Initial handle: head 0x%lx, size 0x%lx, wakeup 0x%lx\n", + handle->head, handle->size, handle->wakeup); + + /* + * The head can be misaligned if we used perf_aux_output_skip + * to consume handle->size bytes and CIRC_SPACE was used in + * perf_aux_output_begin to compute the size, which always + * leaves one entry free. + * + * Deal with this by padding to the next alignment boundary and + * moving the head index. If we run out of buffer space, we'll + * reduce handle->size to zero and end up reporting truncation. + */ + head = PERF_IDX2OFF(handle->head, buf); + if (!IS_ALIGNED(head, E2K_DIMTP_ALIGN)) { + unsigned long delta = roundup(head, E2K_DIMTP_ALIGN) - head; + + delta = min(delta, handle->size); + dimtp_pad_buf(handle, delta); + head = PERF_IDX2OFF(handle->head, buf); + WARN_ON_ONCE((s64) handle->size < 0); + } + + pr_debug("Aligned handle: head 0x%lx, size 0x%lx, wakeup 0x%lx\n", + handle->head, handle->size, handle->wakeup); + + /* If we've run out of free space, then nothing more to do */ + if (!handle->size) + goto no_space; + + /* Compute the tail and wakeup indices now that we've aligned head */ + tail = PERF_IDX2OFF(handle->head + handle->size, buf); + wakeup = PERF_IDX2OFF(handle->wakeup, buf); + + /* + * Avoid clobbering unconsumed data. We know we have space, so + * if we see head == tail we know that the buffer is empty. If + * head > tail, then there's nothing to clobber prior to + * wrapping. + */ + if (head < tail) + limit = round_down(tail, E2K_DIMTP_ALIGN); + else + limit = buf->nr_pages * PAGE_SIZE; + + pr_debug("Computed head 0x%llx, tail 0x%llx, wakeup 0x%llx, limit 0x%llx\n", + head, tail, wakeup, limit); + + /* + * Wakeup may be arbitrarily far into the future. If it's not in + * the current generation, either we'll wrap before hitting it, + * or it's in the past and has been handled already. + * + * If there's a wakeup before we wrap, arrange to be woken up by + * the page boundary following it. Keep the tail boundary if + * that's lower. + */ + if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) { + limit = min(limit, round_up(wakeup, E2K_DIMTP_ALIGN)); + pr_debug("limit 0x%llx now rounded down to wakeup\n", limit); + } + + if (limit <= head) { + dimtp_pad_buf(handle, handle->size); + goto no_space; + } + + return limit; + +no_space: + pr_debug("Truncating handle: head 0x%lx, size 0x%lx, wakeup 0x%lx\n", + handle->head, handle->size, handle->wakeup); + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + perf_aux_output_end(handle, 0); + return 0; +} + + +static u64 dimtp_next_off(struct perf_output_handle *handle, + const struct dimtp_buf *buf, dimtp_config_attr_t config) +{ + u64 limit = __dimtp_next_off(handle, buf); + u64 head = PERF_IDX2OFF(handle->head, buf); + + /* + * If the head has come too close to the end of the buffer, + * then pad to the end and recompute the limit. + */ + if (limit && (limit - head < config_to_size(config))) { + dimtp_pad_buf(handle, limit - head); + limit = __dimtp_next_off(handle, buf); + } + + return limit; +} + +static u64 dimtp_next_snapshot_off(struct perf_output_handle *handle, + const struct dimtp_buf *buf, dimtp_config_attr_t config) +{ + u64 limit, head; + + head = PERF_IDX2OFF(handle->head, buf); + + /* + * On e2k entry beginning is market with 63rd bit + * so there is no need to split buffer in two parts + * as ARM SPE driver does - contents of the whole + * buffer will still be parseable. + */ + limit = buf->nr_pages * PAGE_SIZE; + + /* + * If we're within max record size of the limit, we must + * pad, move the head index and recompute the limit. + */ + if (limit - head < config_to_size(config)) { + dimtp_pad_buf(handle, limit - head); + handle->head = perf_idx_round_up(handle->head, buf); + } + + return limit; +} + +static void dimtp_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + dimtp_config_attr_t config = { .word = event->attr.config }; + struct dimtp_pmu *dimtp_pmu = to_dimtp_pmu(event->pmu); + struct perf_output_handle *handle = this_cpu_ptr(dimtp_pmu->handle); + struct dimtp_buf *buf; + e2k_dimcr_t dimcr; + e2k_dimtp_t dimtp; + u64 limit; + + hwc->state = 0; + + /* Start a new aux session */ + buf = perf_aux_output_begin(handle, event); + if (!buf) { + event->hw.state |= PERF_HES_STOPPED; + return; + } + + limit = (buf->snapshot) ? dimtp_next_snapshot_off(handle, buf, config) : + dimtp_next_off(handle, buf, config); + + if (flags & PERF_EF_RELOAD) { + u64 left = local64_read(&hwc->period_left); + WRITE_DIMAR0_REG_VALUE(-left); + + WRITE_DIMAR1_REG_VALUE(-hwc->sample_period); + } + + dimtp.base = (unsigned long) buf->base; + dimtp.ind = PERF_IDX2OFF(handle->head, buf); + dimtp.size = limit; + dimtp.rw = 3; + machine.restore_dimtp(&dimtp); + + AW(dimcr) = 0; + dimcr.mode = config.mode; + AS(dimcr)[0].event = config.event; + AS(dimcr)[0].user = !!(hwc->config & ARCH_PERFMON_USR); + AS(dimcr)[0].system = !!(hwc->config & ARCH_PERFMON_OS); + WRITE_DIMCR_REG(dimcr); +} + +static void dimtp_stop(struct perf_event *event, int flags) +{ + e2k_dimcr_t dimcr; + struct dimtp_pmu *dimtp_pmu = to_dimtp_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct perf_output_handle *handle = this_cpu_ptr(dimtp_pmu->handle); + + /* If we're already stopped, then nothing to do */ + if (hwc->state & PERF_HES_STOPPED) + return; + + /* Stop all trace generation */ + dimcr = READ_DIMCR_REG(); + dimcr.mode = 0; + AS(dimcr)[0].system = 0; + AS(dimcr)[0].user = 0; + AS(dimcr)[1].system = 0; + AS(dimcr)[1].user = 0; + WRITE_DIMCR_REG(dimcr); + + if (flags & PERF_EF_UPDATE) { + u64 left; + + /* + * If there's a fault pending then ensure we contain it + * to this buffer, since we might be on the context-switch + * path. + */ + if (perf_get_aux(handle)) { + e2k_dibsr_t dibsr; + + dimtp_perf_aux_output_end(handle); + + dibsr = READ_DIBSR_REG(); + if (dibsr.m0) { + dibsr.m0 = 0; + WRITE_DIBSR_REG(dibsr); + } + } + + left = READ_DIMAR0_REG_VALUE(); + left = (left) ? -left : 1; + local64_set(&hwc->period_left, left); + hwc->state |= PERF_HES_UPTODATE; + } + + hwc->state |= PERF_HES_STOPPED; +} + +static int dimtp_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int ret = 0; + + if (WARN_ON_ONCE(!hwc->sample_period)) + return -EOPNOTSUPP; + + if (__this_cpu_read(perf_monitors_used) & (DIM0 | DIM1)) + return -ENOSPC; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (flags & PERF_EF_START) { + dimtp_start(event, PERF_EF_RELOAD); + if (hwc->state & PERF_HES_STOPPED) + ret = -EINVAL; + } + + if (!ret) { + __this_cpu_write(cpu_events[0], event); + __this_cpu_or(perf_monitors_used, (DIM0 | DIM1)); + } + + return ret; +} + +static void dimtp_del(struct perf_event *event, int flags) +{ + dimtp_stop(event, PERF_EF_UPDATE); + + BUG_ON((__this_cpu_read(perf_monitors_used) & (DIM0 | DIM1) != + (DIM0 | DIM1))); + __this_cpu_write(cpu_events[0], NULL); + __this_cpu_and(perf_monitors_used, ~(DIM0 | DIM1)); +} + +static void dimtp_read(struct perf_event *event) +{ +} + +static void *dimtp_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) +{ + int i, cpu = event->cpu; + struct page **pglist; + struct dimtp_buf *buf; + + if (!nr_pages) + return NULL; + + if (cpu == -1) + cpu = raw_smp_processor_id(); + + buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu)); + if (!buf) + return NULL; + + pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); + if (!pglist) + goto error_free_buf; + + for (i = 0; i < nr_pages; ++i) + pglist[i] = virt_to_page(pages[i]); + + buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); + if (!buf->base) + goto error_free_pglist; + + buf->nr_pages = nr_pages; + buf->snapshot = snapshot; + + kfree(pglist); + return buf; + +error_free_pglist: + kfree(pglist); + error_free_buf: + kfree(buf); + return NULL; +} + +static void dimtp_free_aux(void *aux) +{ + struct dimtp_buf *buf = aux; + + vunmap(buf->base); + kfree(buf); +} + +static struct dimtp_pmu dimtp_pmu = { + .pmu = { + .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE, + + /* + * We hitch a ride on the software context here, so that + * we can support per-task profiling (which is not possible + * with the invalid context as it doesn't get sched callbacks). + * This requires that userspace either uses a dummy event for + * perf_event_open, since the aux buffer is not setup until + * a subsequent mmap, or creates the profiling event in a + * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it + * once the buffer has been created. + */ + .task_ctx_nr = perf_sw_context, + + .event_init = dimtp_event_init, + .add = dimtp_add, + .del = dimtp_del, + + .start = dimtp_start, + .stop = dimtp_stop, + .read = dimtp_read, + + .setup_aux = dimtp_setup_aux, + .free_aux = dimtp_free_aux, + + .attr_groups = dimtp_attr_groups, + } +}; + +void dimtp_overflow(struct perf_event *event) +{ + struct perf_output_handle *handle; + + WARN_ON_ONCE(event->pmu->type != dimtp_pmu.pmu.type); + + handle = this_cpu_ptr(dimtp_pmu.handle); + if (!perf_get_aux(handle)) + return; + + dimtp_perf_aux_output_end(handle); + + /* + * Ensure perf callbacks have completed, which may disable the + * profiling buffer in response to a TRUNCATION flag. + */ + irq_work_run(); + + /* + * We handled the fault (the buffer was full), so resume + * profiling as long as we didn't detect truncation. + */ + if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) + return; + + /* Start a new aux session */ + dimtp_start(event, PERF_EF_RELOAD); +} + +static int __init dimtp_pmu_init(void) +{ + int ret; + + if (machine.native_iset_ver < E2K_ISET_V6) + return 0; + + dimtp_pmu.handle = alloc_percpu(typeof(*dimtp_pmu.handle)); + if (!dimtp_pmu.handle) + return -ENOMEM; + + ret = perf_pmu_register(&dimtp_pmu.pmu, "dimtp_trace", -1); + if (ret) + goto out_free_handle; + + return 0; + +out_free_handle: + free_percpu(dimtp_pmu.handle); + return ret; +} +arch_initcall(dimtp_pmu_init); diff --git a/arch/e2k/kernel/perf_event/perf_event.c b/arch/e2k/kernel/perf_event/perf_event.c new file mode 100644 index 000000000000..03e84627cdc0 --- /dev/null +++ b/arch/e2k/kernel/perf_event/perf_event.c @@ -0,0 +1,1377 @@ +#include +#include +#include + +static inline bool is_glue(u64 ip) +{ + return ip >= (u64) __entry_handlers_start && + ip < (u64) __entry_handlers_end + || ip >= (u64) _t_entry && ip < (u64) _t_entry_end; +} + + +struct save_stack_address_args { + struct perf_callchain_entry_ctx *entry; + u64 top; + u64 type; +}; + +static int save_stack_address(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct save_stack_address_args *args = arg; + struct perf_callchain_entry_ctx *entry = args->entry; + u64 top = args->top; + u64 type = args->type; + u64 ip; + + if (unlikely(entry->nr >= entry->max_stack)) + return 1; + + /* + * Skip entries that correspond to the perf itself. + */ + if (corrected_frame_addr > top) + return 0; + + /* + * When storing user callchain, skip all kernel entries. + * When storing kernel callchain, stop at the first user entry. + */ + if (AS(frame->cr1_lo).pm) { + if (type != PERF_CONTEXT_KERNEL) + return 0; + } else { + if (type != PERF_CONTEXT_USER) + return 1; + } + + ip = AS_STRUCT(frame->cr0_hi).ip << 3; + + /* + * Skip syscall and trap glue cause it obfuscates the trace. + */ + if (!is_glue(ip)) + perf_callchain_store(entry, ip); + + return 0; +} + +/* + * Save stack-backtrace addresses into a perf_callchain_entry buffer. + */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct save_stack_address_args args; + + args.entry = entry; + args.top = AS(regs->stacks.pcsp_lo).base + AS(regs->stacks.pcsp_hi).ind; + args.type = PERF_CONTEXT_USER; + parse_chain_stack(PCS_USER, NULL, save_stack_address, &args); +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct save_stack_address_args args; + + args.entry = entry; + args.top = AS(regs->stacks.pcsp_lo).base + AS(regs->stacks.pcsp_hi).ind; + args.type = PERF_CONTEXT_KERNEL; + parse_chain_stack(0, NULL, save_stack_address, &args); +} + + +DEFINE_PER_CPU(struct perf_event * [4], cpu_events); + +static struct pmu e2k_pmu; + +static void e2k_pmu_read(struct perf_event *event); + +static int handle_event(struct perf_event *event, struct pt_regs *regs) +{ + struct hw_perf_event *hwc = &event->hw; + struct perf_sample_data data; + + /* + * For some reason this is not done automatically... + */ + if (hwc->sample_period) + hwc->last_period = hwc->sample_period; + + /* + * Update event->count + */ + e2k_pmu_read(event); + + perf_sample_data_init(&data, 0, hwc->last_period); + + if (!(hwc->config & ARCH_PERFMON_OS)) { + regs = find_user_regs(regs); + if (WARN_ON_ONCE(!regs)) + return 0; + } + + if (!(event->attr.exclude_idle && is_idle_task(current))) + return perf_event_overflow(event, &data, regs); + + return 0; +} + +static s64 monitor_pause(struct perf_event *event, + struct hw_perf_event *hwc, int update); + +DEFINE_PER_CPU(u8, perf_monitors_used); + +void dimcr_continue(e2k_dimcr_t dimcr_old) +{ + struct perf_event *event0, *event1; + e2k_dimcr_t dimcr; + + event0 = __this_cpu_read(cpu_events[0]); + event1 = __this_cpu_read(cpu_events[1]); + + /* + * Restart counting + */ + BUG_ON(event0 && event0->hw.idx != 0 || event1 && event1->hw.idx != 1); + dimcr = READ_DIMCR_REG(); + AS(dimcr)[0].user = (!event0) + ? AS(dimcr_old)[0].user + : (!(event0->hw.state & PERF_HES_STOPPED) && + (event0->hw.config & ARCH_PERFMON_USR)); + AS(dimcr)[0].system = (!event0) + ? AS(dimcr_old)[0].system + : (!(event0->hw.state & PERF_HES_STOPPED) && + (event0->hw.config & ARCH_PERFMON_OS)); + AS(dimcr)[1].user = (!event1) + ? AS(dimcr_old)[1].user + : (!(event1->hw.state & PERF_HES_STOPPED) && + (event1->hw.config & ARCH_PERFMON_USR)); + AS(dimcr)[1].system = (!event1) + ? AS(dimcr_old)[1].system + : (!(event1->hw.state & PERF_HES_STOPPED) && + (event1->hw.config & ARCH_PERFMON_OS)); + WRITE_DIMCR_REG(dimcr); +} + +void ddmcr_continue(e2k_ddmcr_t ddmcr_old) +{ + struct perf_event *event0, *event1; + e2k_ddmcr_t ddmcr; + + event0 = __this_cpu_read(cpu_events[2]); + event1 = __this_cpu_read(cpu_events[3]); + + /* + * Restart counting + */ + BUG_ON(event0 && event0->hw.idx != 0 || event1 && event1->hw.idx != 1); + ddmcr = READ_DDMCR_REG(); + AS(ddmcr)[0].user = (!event0) + ? AS(ddmcr_old)[0].user + : (!(event0->hw.state & PERF_HES_STOPPED) && + (event0->hw.config & ARCH_PERFMON_USR)); + AS(ddmcr)[0].system = (!event0) + ? AS(ddmcr_old)[0].system + : (!(event0->hw.state & PERF_HES_STOPPED) && + (event0->hw.config & ARCH_PERFMON_OS)); + AS(ddmcr)[1].user = (!event1) + ? AS(ddmcr_old)[1].user + : (!(event1->hw.state & PERF_HES_STOPPED) && + (event1->hw.config & ARCH_PERFMON_USR)); + AS(ddmcr)[1].system = (!event1) + ? AS(ddmcr_old)[1].system + : (!(event1->hw.state & PERF_HES_STOPPED) && + (event1->hw.config & ARCH_PERFMON_OS)); + WRITE_DDMCR_REG(ddmcr); +} + +static s64 handle_event_overflow(const char *name, + struct perf_event *event, struct pt_regs *regs) +{ + struct hw_perf_event *hwc = &event->hw; + s64 period; + + int ret = handle_event(event, regs); + if (ret) + monitor_pause(event, hwc, 0); + + period = hwc->sample_period; + local64_set(&hwc->prev_count, period); + + pr_debug("%s event %lx %shandled, new period %lld\n", + name, event, (ret) ? "could not be " : "", period); + + return period; +} + +void perf_data_overflow_handle(struct pt_regs *regs) +{ + e2k_ddbsr_t ddbsr; + struct perf_event *event0, *event1; + u8 monitors_used; + + monitors_used = __this_cpu_read(perf_monitors_used); + event0 = __this_cpu_read(cpu_events[2]); + event1 = __this_cpu_read(cpu_events[3]); + + ddbsr = READ_DDBSR_REG(); + + pr_debug("data overflow, ddbsr %llx, monitors_used 0x%hhx, events 0x%lx/0x%lx\n", + AW(ddbsr), monitors_used, event0, event1); + + if (ddbsr.m0 && event0 && (monitors_used & DDM0)) { + s64 period = handle_event_overflow("DDM0", event0, regs); + WRITE_DDMAR0_REG(-period); + ddbsr.m0 = 0; + } + + if (ddbsr.m1 && event1 && (monitors_used & DDM1)) { + s64 period = handle_event_overflow("DDM1", event1, regs); + WRITE_DDMAR1_REG(-period); + ddbsr.m1 = 0; + } + + /* + * Clear status fields + */ + WRITE_DDBSR_REG(ddbsr); +} + +void perf_instr_overflow_handle(struct pt_regs *regs) +{ + e2k_dibsr_t dibsr; + struct perf_event *event0, *event1; + u8 monitors_used; + + monitors_used = __this_cpu_read(perf_monitors_used); + event0 = __this_cpu_read(cpu_events[0]); + event1 = __this_cpu_read(cpu_events[1]); + + dibsr = READ_DIBSR_REG(); + + pr_debug("instr overflow, dibsr %x, monitors_used 0x%hhx, events 0x%lx/0x%lx\n", + AW(dibsr), monitors_used, event0, event1); + + if (dibsr.m0 && event0 && (monitors_used & DIM0)) { + /* This could be an event from DIMTP overflow */ + if (event0->pmu->type != e2k_pmu.type) { + dimtp_overflow(event0); + } else { + s64 period = handle_event_overflow("DIM0", event0, regs); + WRITE_DIMAR0_REG_VALUE(-period); + } + dibsr.m0 = 0; + } + + if (dibsr.m1 && event1 && (monitors_used & DIM1)) { + s64 period = handle_event_overflow("DIM1", event1, regs); + WRITE_DIMAR1_REG_VALUE(-period); + dibsr.m1 = 0; + } + + /* + * Clear status fields + */ + WRITE_DIBSR_REG(dibsr); +} + +static void monitor_resume(struct hw_perf_event *hwc, int reload, s64 period) +{ + unsigned long flags; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + e2k_dibcr_t dibcr; + u8 monitor, event_id; + int num; + + raw_all_irq_save(flags); + + monitor = (hwc->config & 0xff00) >> 8; + event_id = hwc->config & 0xff; + num = hwc->idx; + + /* Clear PERF_HES_STOPPED */ + hwc->state = 0; + + dibcr = READ_DIBCR_REG(); + WARN_ON(AS(dibcr).stop); + + switch (monitor) { + case DIM0: + case DIM1: + case DIM0_DIM1: + dimcr = READ_DIMCR_REG(); + AS(dimcr)[num].user = !!(hwc->config & ARCH_PERFMON_USR); + AS(dimcr)[num].system = !!(hwc->config & ARCH_PERFMON_OS); + AS(dimcr)[num].trap = 1; + AS(dimcr)[num].event = event_id; + if (reload) { + period = -period; + + if (num == 1) + WRITE_DIMAR1_REG_VALUE(period); + else + WRITE_DIMAR0_REG_VALUE(period); + } + WRITE_DIMCR_REG(dimcr); + break; + case DDM0: + case DDM1: + case DDM0_DDM1: + ddmcr = READ_DDMCR_REG(); + AS(ddmcr)[num].user = !!(hwc->config & ARCH_PERFMON_USR); + AS(ddmcr)[num].system = !!(hwc->config & ARCH_PERFMON_OS); + AS(ddmcr)[num].trap = 1; + AS(ddmcr)[num].event = event_id; + if (reload) { + period = -period; + + if (num == 1) + WRITE_DDMAR1_REG_VALUE(period); + else + WRITE_DDMAR0_REG_VALUE(period); + } + WRITE_DDMCR_REG(ddmcr); + break; + default: + BUG_ON(1); + } + + pr_debug("event %hhx:%02hhx: resuming\n", monitor, event_id); + + raw_all_irq_restore(flags); +} + +static s64 monitor_pause(struct perf_event *event, + struct hw_perf_event *hwc, int update) +{ + unsigned long flags; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + e2k_dibcr_t dibcr; + u8 monitor, event_id; + s64 left = 0; + int num, overflow; + + raw_all_irq_save(flags); + + monitor = (hwc->config & 0xff00) >> 8; + event_id = hwc->config & 0xff; + num = hwc->idx; + + hwc->state |= PERF_HES_STOPPED; + + dibcr = READ_DIBCR_REG(); + WARN_ON(AS(dibcr).stop); + + switch (monitor) { + case DIM0: + case DIM1: + case DIM0_DIM1: + dimcr = READ_DIMCR_REG(); + AS(dimcr)[num].user = 0; + AS(dimcr)[num].system = 0; + WRITE_DIMCR_REG(dimcr); + if (update) { + e2k_dibsr_t dibsr; + + dibsr = READ_DIBSR_REG(); + + overflow = (num == 1 && AS(dibsr).m1) || + (num == 0 && AS(dibsr).m0); + + if (overflow) { + left = 1; + pr_debug("event DIM%d: left 0 (1)\n", num); + /* See comment in monitor_disable() */ + if (num == 1) + AS(dibsr).m1 = 0; + else + AS(dibsr).m0 = 0; + } else { + left = (num == 1) ? READ_DIMAR1_REG_VALUE() : + READ_DIMAR0_REG_VALUE(); + left = -left; + + pr_debug("event DIM%d: left %lld, dimcr 0x%llx/0x%llx, dibsr 0x%x/0x%x\n", + num, left, AW(dimcr), + READ_DIMCR_REG_VALUE(), + AW(dibsr), READ_DIBSR_REG_VALUE()); + } + + /* We clear m0/m1 even if it is not set. The problem + * is that %dibsr is still updated asynchronously + * for several cycles after %dimcr write, so it + * can be set _after_ we had read %dibsr. */ + WRITE_DIBSR_REG(dibsr); + } + break; + case DDM0: + case DDM1: + case DDM0_DDM1: + ddmcr = READ_DDMCR_REG(); + AS(ddmcr)[num].user = 0; + AS(ddmcr)[num].system = 0; + WRITE_DDMCR_REG(ddmcr); + if (update) { + e2k_ddbsr_t ddbsr; + + ddbsr = READ_DDBSR_REG(); + + overflow = (num == 1 && AS(ddbsr).m1) || + (num == 0 && AS(ddbsr).m0); + + if (overflow) { + left = 1; + pr_debug("event DDM%d: left 0 (1)\n", num); + if (num == 1) + AS(ddbsr).m1 = 0; + else + AS(ddbsr).m0 = 0; + } else { + left = (num == 1) ? READ_DDMAR1_REG_VALUE() : + READ_DDMAR0_REG_VALUE(); + left = -left; + + /* + * We could receive some other interrupt right + * when ddmar overflowed. Then exc_data_debug + * could be lost along with the setting of + * %ddbsr.m1 if interrupts in %psr had been + * closed just before exc_data_debug arrived. + */ + if (cpu_has(CPU_HWBUG_KERNEL_DATA_MONITOR) && + is_sampling_event(event) && left <= 0) { + pr_debug("event DDM%d: hardware bug, left %lld\n", + num, left); + left = 1; + } + pr_debug("event DDM%d: left %lld\n", num, left); + } + + /* We clear m0/m1 even if it is not set. The problem + * is that %ddbsr is still updated asynchronously + * for several cycles after %ddmcr write, so it + * can be set _after_ we had read %ddbsr. */ + WRITE_DDBSR_REG(ddbsr); + } + break; + default: + BUG_ON(1); + } + + pr_debug("event %hhx:%02hhx: pausing\n", + monitor, event_id); + + raw_all_irq_restore(flags); + + return left; +} + +static int monitor_enable(u32 monitor, u32 event_id, s64 period, + struct perf_event *event, int run) +{ + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + e2k_dibcr_t dibcr; + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + int num, ret = 0; + + raw_all_irq_save(flags); + + period = -period; + + dibcr = READ_DIBCR_REG(); + WARN_ON(AS(dibcr).stop); + + switch (monitor) { + case DIM0: + case DIM1: + case DIM0_DIM1: + if (monitor == DIM0_DIM1) { + if (!(__this_cpu_read(perf_monitors_used) & DIM1)) { + num = 1; + } else if (!(__this_cpu_read(perf_monitors_used) + & DIM0)) { + num = 0; + } else { + ret = -ENOSPC; + break; + } + + hwc->idx = num; + } else { + num = (monitor == DIM1); + + if (num == 1 && + (__this_cpu_read(perf_monitors_used) & DIM1) || + num == 0 && + (__this_cpu_read(perf_monitors_used) & DIM0)) { + ret = -ENOSPC; + break; + } + } + + dimcr = READ_DIMCR_REG(); + AS(dimcr)[num].user = run && (hwc->config & ARCH_PERFMON_USR) && + !(hwc->state & PERF_HES_STOPPED); + AS(dimcr)[num].system = 0; + AS(dimcr)[num].trap = 1; + AS(dimcr)[num].event = event_id; + WRITE_DIMCR_REG(dimcr); + + dibsr = READ_DIBSR_REG(); + + if (num == 1) { + WRITE_DIMAR1_REG_VALUE(period); + AS(dibsr).m1 = 0; + + __this_cpu_write(cpu_events[1], event); + + __this_cpu_or(perf_monitors_used, DIM1); + } else { + WRITE_DIMAR0_REG_VALUE(period); + AS(dibsr).m0 = 0; + + __this_cpu_write(cpu_events[0], event); + + __this_cpu_or(perf_monitors_used, DIM0); + } + + WRITE_DIBSR_REG(dibsr); + + /* + * Start the monitor now that the preparations are done. + */ + if (run && (hwc->config & ARCH_PERFMON_OS) && + !(hwc->state & PERF_HES_STOPPED)) { + AS(dimcr)[num].system = 1; + WRITE_DIMCR_REG(dimcr); + } + break; + case DDM0: + case DDM1: + case DDM0_DDM1: + if (monitor == DDM0_DDM1) { + if (!(__this_cpu_read(perf_monitors_used) & DDM1)) { + num = 1; + } else if (!(__this_cpu_read(perf_monitors_used) + & DDM0)) { + num = 0; + } else { + ret = -ENOSPC; + break; + } + + hwc->idx = num; + } else { + num = (monitor == DDM1); + + if (num == 1 && + (__this_cpu_read(perf_monitors_used) & DDM1) || + num == 0 && + (__this_cpu_read(perf_monitors_used) & DDM0)) { + ret = -ENOSPC; + break; + } + } + + ddmcr = READ_DDMCR_REG(); + AS(ddmcr)[num].user = run && (hwc->config & ARCH_PERFMON_USR) && + !(hwc->state & PERF_HES_STOPPED); + AS(ddmcr)[num].system = 0; + AS(ddmcr)[num].trap = 1; + AS(ddmcr)[num].event = event_id; + WRITE_DDMCR_REG(ddmcr); + + ddbsr = READ_DDBSR_REG(); + + if (num == 1) { + WRITE_DDMAR1_REG_VALUE(period); + AS(ddbsr).m1 = 0; + + __this_cpu_write(cpu_events[3], event); + + __this_cpu_or(perf_monitors_used, DDM1); + } else { + WRITE_DDMAR0_REG_VALUE(period); + AS(ddbsr).m0 = 0; + + __this_cpu_write(cpu_events[2], event); + + __this_cpu_or(perf_monitors_used, DDM0); + } + + WRITE_DDBSR_REG(ddbsr); + + /* + * Start the monitor now that the preparations are done. + */ + if (run && (hwc->config & ARCH_PERFMON_OS) && + !(hwc->state & PERF_HES_STOPPED)) { + AS(ddmcr)[num].system = 1; + WRITE_DDMCR_REG(ddmcr); + } + break; + default: + BUG_ON(1); + } + + raw_all_irq_restore(flags); + + return ret; +} + +static DEFINE_PER_CPU(int, hw_perf_disable_count); + +static s64 monitor_disable(struct hw_perf_event *hwc) +{ + unsigned long flags; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + s64 left; + int monitor, num; + + monitor = (hwc->config & 0xff00) >> 8; + num = hwc->idx; + + BUG_ON(!!__this_cpu_read(hw_perf_disable_count) ^ + !!raw_all_irqs_disabled()); + BUG_ON(!raw_irqs_disabled()); + + switch (monitor) { + case DIM0: + case DIM1: + case DIM0_DIM1: + dimcr = READ_DIMCR_REG(); + AS(dimcr)[num].user = 0; + AS(dimcr)[num].system = 0; + /* Note that writing of %dimcr has an important side effect: + * it cancels any other pending exc_instr_debug that arrived + * while we were still handling this one. */ + WRITE_DIMCR_REG(dimcr); + + raw_all_irq_save(flags); + + left = (num == 1) ? READ_DIMAR1_REG_VALUE() : + READ_DIMAR0_REG_VALUE(); + left = -left; + + dibsr = READ_DIBSR_REG(); + + if (num == 1) { + __this_cpu_write(cpu_events[1], NULL); + + BUG_ON(!(__this_cpu_read(perf_monitors_used) & DIM1)); + __this_cpu_and(perf_monitors_used, ~DIM1); + + if (AS(dibsr).m1) { + left = 1; + pr_debug("event DIM1: left 0 (1)\n"); + /* + * Now clear DIBSR, otherwise an interrupt might + * arrive _after_ the event was disabled, and + * event handler might re-enable counting (e.g. + * if event's frequency has been changed). + * + * We set left to 1 so that the interrupt will + * arrive again after the task has been + * scheduled in. + * + * NOTE: this will lose one event and cause + * one spurious interrupt. + */ + AS(dibsr).m1 = 0; + } else { + pr_debug("event DIM1: left %lld\n", left); + } + } else { + __this_cpu_write(cpu_events[0], NULL); + + BUG_ON(!(__this_cpu_read(perf_monitors_used) & DIM0)); + __this_cpu_and(perf_monitors_used, ~DIM0); + + if (AS(dibsr).m0) { + left = 1; + pr_debug("event DIM0: left 0 (1)\n"); + AS(dibsr).m0 = 0; + } else { + pr_debug("event DIM0: left %lld\n", left); + } + } + + /* We clear m0/m1 even if it is not set. The problem + * is that %dibsr is still updated asynchronously + * for several cycles after %dimcr write, so it + * can be set _after_ we had read %dibsr. */ + WRITE_DIBSR_REG(dibsr); + break; + case DDM0: + case DDM1: + case DDM0_DDM1: + ddmcr = READ_DDMCR_REG(); + AS(ddmcr)[num].user = 0; + AS(ddmcr)[num].system = 0; + /* Note that writing of %ddmcr has an important side effect: + * it cancels any other pending exc_data_debug that arrived + * while we were still handling this one. */ + WRITE_DDMCR_REG(ddmcr); + + raw_all_irq_save(flags); + + ddbsr = READ_DDBSR_REG(); + + if (num == 1) { + __this_cpu_write(cpu_events[3], NULL); + + BUG_ON(!(__this_cpu_read(perf_monitors_used) & DDM1)); + __this_cpu_and(perf_monitors_used, ~DDM1); + + if (AS(ddbsr).m1) { + left = 1; + pr_debug("event DDM1: left 0 (1)\n"); + AS(ddbsr).m1 = 0; + } else { + left = READ_DDMAR1_REG_VALUE(); + left = -left; + pr_debug("event DDM1: left %lld\n", left); + } + } else { + __this_cpu_write(cpu_events[2], NULL); + + BUG_ON(!(__this_cpu_read(perf_monitors_used) & DDM0)); + __this_cpu_and(perf_monitors_used, ~DDM0); + + if (AS(ddbsr).m0) { + left = 1; + pr_debug("event DDM0: left 0 (1)\n"); + AS(ddbsr).m0 = 0; + } else { + left = READ_DDMAR0_REG_VALUE(); + left = -left; + pr_debug("event DDM0: left %lld\n", left); + } + } + + /* We clear m0/m1 even if it is not set. The problem + * is that %ddbsr is still updated asynchronously + * for several cycles after %ddmcr write, so it + * can be set _after_ we had read %ddbsr. */ + WRITE_DDBSR_REG(ddbsr); + break; + default: + BUG_ON(1); + } + + raw_all_irq_restore(flags); + + return left; +} + +static s64 monitor_read(u32 monitor, int idx) +{ + s64 left; + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + + if (monitor == DIM0_DIM1) + monitor = (idx) ? DIM1 : DIM0; + else if (monitor == DDM0_DDM1) + monitor = (idx) ? DDM1 : DDM0; + + switch (monitor) { + case DIM0: + dibsr = READ_DIBSR_REG(); + if (AS(dibsr).m0) { + left = 0; + } else { + left = READ_DIMAR0_REG_VALUE(); + left = -left; + } + pr_debug("reading DIM0: left %lld (dibsr %d)\n", + left, AS(dibsr).m0); + break; + case DIM1: + dibsr = READ_DIBSR_REG(); + if (AS(dibsr).m1) { + left = 0; + } else { + left = READ_DIMAR1_REG_VALUE(); + left = -left; + } + pr_debug("reading DIM1: left %lld (dibsr %d)\n", + left, AS(dibsr).m1); + break; + case DDM0: + ddbsr = READ_DDBSR_REG(); + if (AS(ddbsr).m0) { + left = 0; + } else { + left = READ_DDMAR0_REG_VALUE(); + left = -left; + } + pr_debug("reading DDM0: left %lld (ddbsr %d)\n", + left, AS(ddbsr).m0); + break; + case DDM1: + ddbsr = READ_DDBSR_REG(); + if (AS(ddbsr).m1) { + left = 0; + } else { + left = READ_DDMAR1_REG_VALUE(); + left = -left; + } + pr_debug("reading DDM1: left %lld (ddbsr %d)\n", + left, AS(ddbsr).m1); + break; + default: + BUG_ON(1); + } + + return left; +} + + +/* + * On e2k add() and del() functions are more complex than on other + * architectures: besides starting/stopping the counting they also + * update perf_event structure. + * + * This allows us to select the appropriate counter for DIM0_DIM1 events + * dynamically. Since perf tries to schedule different event groups + * together, we cannot select counter at event initialization time. + * + * Unfortunately, because of this we must handle overflows from disable() + * if we catch them, and this can lead to spurious interrupts from monitors + * if an interrupt was handled here. + */ +static int e2k_pmu_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + s64 period; + u8 monitor, event_id; + + monitor = (hwc->config & 0xff00) >> 8; + event_id = hwc->config & 0xff; + + pr_debug("event %lx: enabling %hhx:%02hhx\n" + "sample_period %lld, left %ld\n", + event, monitor, event_id, hwc->sample_period, + local64_read(&hwc->period_left)); + + if (hwc->sample_period) + hwc->last_period = hwc->sample_period; + + if (hwc->sample_period && local64_read(&hwc->period_left)) + period = local64_read(&hwc->period_left); + else + period = hwc->sample_period; + + local64_set(&hwc->prev_count, period); + + /* + * Zero period means counting from 0 + * (i.e. we will never stop in this life since + * counters are 64-bits long) + */ + return monitor_enable((u32) monitor, (u32) event_id, period, + event, flags & PERF_EF_START); +} + +static void e2k_pmu_update(struct perf_event *event, s64 left) +{ + struct hw_perf_event *hwc = &event->hw; + s64 prev; + + prev = local64_xchg(&hwc->prev_count, left); + + local64_add(prev - left, &event->count); + + pr_debug("event %lx: updating %llx:%02llx\n" + "sample_period %lld, count %ld (+%lld)\n" + "left previously %lld, left now %lld\n", + event, (hwc->config & 0xff00) >> 8, hwc->config & 0xff, + hwc->sample_period, local64_read(&event->count), + prev - left, prev, left); +} + +static void e2k_pmu_del(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + u64 left; + + left = monitor_disable(hwc); + local64_set(&hwc->period_left, left); + + pr_debug("event %lx: disabling %llx:%02llx\n" + "sample_period %lld, left %lld\n", + event, (hwc->config & 0xff00) >> 8, + hwc->config & 0xff, hwc->sample_period, left); + + e2k_pmu_update(event, left); +} + +static void e2k_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u8 monitor; + s64 left; + + monitor = (hwc->config & 0xff00) >> 8; + left = monitor_read((u32) monitor, hwc->idx); + + e2k_pmu_update(event, left); +} + +static void e2k_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left; + + left = monitor_pause(event, hwc, flags & PERF_EF_UPDATE); + + if (flags & PERF_EF_UPDATE) { + local64_set(&hwc->period_left, left); + + pr_debug("event %lx: pausing %llx:%02llx\n" + "sample_period %lld, left %lld\n", + event, (hwc->config & 0xff00) >> 8, + hwc->config & 0xff, hwc->sample_period, left); + + e2k_pmu_update(event, left); + } +} + + +static void e2k_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = 0; + + pr_debug("event %lx: resuming %llx:%02llx\n" + "sample_period %lld\n", + event, (hwc->config & 0xff00) >> 8, hwc->config & 0xff, + hwc->sample_period); + + if (flags & PERF_EF_RELOAD) { + left = local64_read(&hwc->period_left); + + local64_set(&hwc->prev_count, (u64) left); + + pr_debug("event %lx: period_left %lld\n", event, left); + } + + monitor_resume(hwc, flags & PERF_EF_RELOAD, left); +} + + +static u8 hardware_events_map[PERF_COUNT_HW_MAX][2] = { + /* PERF_COUNT_HW_CPU_CYCLES */ + {0, 0}, + /* PERF_COUNT_HW_INSTRUCTIONS */ + {DIM0_DIM1, 0x14}, + /* PERF_COUNT_HW_CACHE_REFERENCES */ + {DDM0, 0x40}, + /* PERF_COUNT_HW_CACHE_MISSES */ + {0, 0}, + /* PERF_COUNT_HW_BRANCH_INSTRUCTIONS */ + {0, 0}, + /* PERF_COUNT_HW_BRANCH_MISSES */ + {0, 0}, + /* PERF_COUNT_HW_BUS_CYCLES */ + {0, 0}, + /* PERF_COUNT_HW_STALLED_CYCLES_FRONTEND */ + {DIM0_DIM1, 0x18}, + /* PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x19 + 0x2e + 0x2f */ + {0, 0}, + /* PERF_COUNT_HW_REF_CPU_CYCLES */ + {0, 0} +}; + +__init +static int init_perf_events_map(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V2) { + hardware_events_map[PERF_COUNT_HW_CPU_CYCLES][0] = DIM0_DIM1; + hardware_events_map[PERF_COUNT_HW_CPU_CYCLES][1] = 0x72; + } + + if (machine.native_iset_ver >= E2K_ISET_V6) { + hardware_events_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS][0] = DIM0_DIM1; + hardware_events_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS][1] = 0x27; + + hardware_events_map[PERF_COUNT_HW_CACHE_MISSES][0] = DDM0; + hardware_events_map[PERF_COUNT_HW_CACHE_MISSES][1] = 0x4e; + } + + return 0; +} +pure_initcall(init_perf_events_map); + +static u8 hw_cache_events_map_v2[PERF_COUNT_HW_CACHE_MAX][PERF_COUNT_HW_CACHE_OP_MAX][PERF_COUNT_HW_CACHE_RESULT_MAX][2] = { + [PERF_COUNT_HW_CACHE_L1D] = { + [PERF_COUNT_HW_CACHE_OP_WRITE] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM1, 0x1} + } + }, + [PERF_COUNT_HW_CACHE_LL] = { + [PERF_COUNT_HW_CACHE_OP_WRITE] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM1, 0x41} + } + }, +}; + +static u8 hw_cache_events_map_v6[PERF_COUNT_HW_CACHE_MAX][PERF_COUNT_HW_CACHE_OP_MAX][PERF_COUNT_HW_CACHE_RESULT_MAX][2] = { + [PERF_COUNT_HW_CACHE_L1D] = { + [PERF_COUNT_HW_CACHE_OP_READ] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM0, 0x5}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x5} + }, + [PERF_COUNT_HW_CACHE_OP_WRITE] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM0, 0x1}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x3} + }, + [PERF_COUNT_HW_CACHE_OP_PREFETCH] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM0, 0x7}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x6} + } + }, + [PERF_COUNT_HW_CACHE_LL] = { + [PERF_COUNT_HW_CACHE_OP_READ] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM0, 0x4d}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x4e} + }, + [PERF_COUNT_HW_CACHE_OP_WRITE] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM1, 0x41}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x4d} + }, + [PERF_COUNT_HW_CACHE_OP_PREFETCH] = { + /* bug 109342 comment 11: + * LL-prefetch = l1d-prefetch-miss */ + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM1, 0x6}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x4f} + } + }, + [PERF_COUNT_HW_CACHE_DTLB] = { + [PERF_COUNT_HW_CACHE_OP_READ] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM0, 0x1a}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x1a} + }, + [PERF_COUNT_HW_CACHE_OP_WRITE] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM0, 0x1b}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x1b} + }, + [PERF_COUNT_HW_CACHE_OP_PREFETCH] = { + [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = {DDM0, 0x1c}, + [PERF_COUNT_HW_CACHE_RESULT_MISS] = {DDM1, 0x1c} + } + } +}; + +#define MAX_EVENTS 256 +static char hw_raw_event_to_iset[MAX_HW_MONITORS][MAX_EVENTS] = { + [_DDM0] = { + [0x0 ... 0x3] = E2K_ISET_SINCE_V2_MASK, + [0x10 ... 0x16] = E2K_ISET_SINCE_V2_MASK, + [0x20 ... 0x21] = E2K_ISET_SINCE_V2_MASK, + [0x30 ... 0x3a] = E2K_ISET_SINCE_V2_MASK, + [0x40 ... 0x46] = E2K_ISET_SINCE_V2_MASK, + [0x48] = E2K_ISET_SINCE_V2_MASK, + [0x4a ... 0x4b] = E2K_ISET_SINCE_V2_MASK, + [0x70 ... 0x72] = E2K_ISET_SINCE_V2_MASK, + + [0x17 ... 0x19] = E2K_ISET_SINCE_V3_MASK, + [0x22 ... 0x24] = E2K_ISET_SINCE_V3_MASK, + + [0x4] = E2K_ISET_SINCE_V5_MASK, + [0x47] = E2K_ISET_SINCE_V5_MASK, + + [0x5 ... 0x7] = E2K_ISET_SINCE_V6_MASK, + [0x1a ... 0x1c] = E2K_ISET_SINCE_V6_MASK, + + [0x49] = E2K_ISET_V2_MASK | E2K_ISET_SINCE_V6_MASK, + [0x4c ... 0x4f] = E2K_ISET_V2_MASK | E2K_ISET_SINCE_V6_MASK, + }, + [_DDM1] = { + [0x0 ... 0x2] = E2K_ISET_SINCE_V2_MASK, + [0x10 ... 0x16] = E2K_ISET_SINCE_V2_MASK, + [0x20 ... 0x21] = E2K_ISET_SINCE_V2_MASK, + [0x30 ... 0x3a] = E2K_ISET_SINCE_V2_MASK, + [0x40 ... 0x48] = E2K_ISET_SINCE_V2_MASK, + [0x4a ... 0x4b] = E2K_ISET_SINCE_V2_MASK, + [0x70 ... 0x72] = E2K_ISET_SINCE_V2_MASK, + + [0x17 ... 0x19] = E2K_ISET_SINCE_V3_MASK, + [0x22 ... 0x23] = E2K_ISET_SINCE_V3_MASK, + + [0x4] = E2K_ISET_SINCE_V5_MASK, + + [0x3 ... 0x7] = E2K_ISET_SINCE_V6_MASK, + [0x1a ... 0x1c] = E2K_ISET_SINCE_V6_MASK, + [0x49] = E2K_ISET_SINCE_V6_MASK, + + [0x4c] = E2K_ISET_V2_MASK, + + [0x4d ... 0x4f] = E2K_ISET_V2_MASK | E2K_ISET_SINCE_V6_MASK, + }, + [_DIM0] = { + [0x0 ... 0x3] = E2K_ISET_SINCE_V2_MASK, + [0x7 ... 0xa] = E2K_ISET_SINCE_V2_MASK, + [0x10 ... 0x1f] = E2K_ISET_SINCE_V2_MASK, + [0x20 ... 0x26] = E2K_ISET_SINCE_V2_MASK, + [0x30 ... 0x3c] = E2K_ISET_SINCE_V2_MASK, + [0x40 ... 0x4a] = E2K_ISET_SINCE_V2_MASK, + [0x50 ... 0x5a] = E2K_ISET_SINCE_V2_MASK, + [0x60 ... 0x69] = E2K_ISET_SINCE_V2_MASK, + [0x70 ... 0x74] = E2K_ISET_SINCE_V2_MASK, + + [0xf] = E2K_ISET_SINCE_V3_MASK, + [0x3d] = E2K_ISET_SINCE_V3_MASK, + + [0x2d ... 0x2f] = E2K_ISET_SINCE_V5_MASK, + + [0x27] = E2K_ISET_SINCE_V6_MASK, + + [0x4 ... 0x6] = E2K_ISET_V2_MASK, + [0x25 ... 0x26] = E2K_ISET_SINCE_V2_MASK, + }, + [_DIM1] = { + /* Almost same as _DIM0 - only 0xf/0x25/0x26 events differ */ + [0x0 ... 0x3] = E2K_ISET_SINCE_V2_MASK, + [0x7 ... 0xa] = E2K_ISET_SINCE_V2_MASK, + [0x10 ... 0x1f] = E2K_ISET_SINCE_V2_MASK, + [0x20 ... 0x24] = E2K_ISET_SINCE_V2_MASK, + [0x30 ... 0x3c] = E2K_ISET_SINCE_V2_MASK, + [0x40 ... 0x4a] = E2K_ISET_SINCE_V2_MASK, + [0x50 ... 0x5a] = E2K_ISET_SINCE_V2_MASK, + [0x60 ... 0x69] = E2K_ISET_SINCE_V2_MASK, + [0x70 ... 0x74] = E2K_ISET_SINCE_V2_MASK, + + [0x3d] = E2K_ISET_SINCE_V3_MASK, + + [0x2d ... 0x2f] = E2K_ISET_SINCE_V5_MASK, + + [0x27] = E2K_ISET_SINCE_V6_MASK, + + [0x4 ... 0x6] = E2K_ISET_V2_MASK, + }, + [_DDM0_DDM1] = { + /* Intersection of DDM0/DDM1 */ + [0x4] = E2K_ISET_SINCE_V5_MASK, + }, + [_DIM0_DIM1] = { + /* Intersection of DIM0/DIM1 */ + [0x0 ... 0x3] = E2K_ISET_SINCE_V2_MASK, + [0x7 ... 0xa] = E2K_ISET_SINCE_V2_MASK, + [0x10 ... 0x1f] = E2K_ISET_SINCE_V2_MASK, + [0x20 ... 0x24] = E2K_ISET_SINCE_V2_MASK, + [0x30 ... 0x3c] = E2K_ISET_SINCE_V2_MASK, + [0x40 ... 0x4a] = E2K_ISET_SINCE_V2_MASK, + [0x50 ... 0x5a] = E2K_ISET_SINCE_V2_MASK, + [0x60 ... 0x69] = E2K_ISET_SINCE_V2_MASK, + [0x70 ... 0x74] = E2K_ISET_SINCE_V2_MASK, + + [0x3d] = E2K_ISET_SINCE_V3_MASK, + + [0x2d ... 0x2f] = E2K_ISET_SINCE_V5_MASK, + + [0x27] = E2K_ISET_SINCE_V6_MASK, + + [0x4 ... 0x6] = E2K_ISET_V2_MASK | E2K_ISET_V2_MASK, + }, +}; + +static int event_attr_to_monitor_and_id(struct perf_event_attr *attr, + u8 *monitor, u8 *event_id) +{ + switch (attr->type) { + case PERF_TYPE_RAW: + *monitor = (attr->config & 0xff00) >> 8; + *event_id = attr->config & 0xff; + + if (*monitor >= MAX_HW_MONITORS || + *event_id >= MAX_EVENTS) + return -EINVAL; + + if (0 == (hw_raw_event_to_iset[*monitor][*event_id] & + (1 << machine.native_iset_ver))) + return -EINVAL; + + *monitor = 1 << *monitor; + break; + case PERF_TYPE_HARDWARE: { + u64 num = attr->config; + + if (unlikely(num >= PERF_COUNT_HW_MAX)) + return -EINVAL; + + *monitor = hardware_events_map[num][0]; + *event_id = hardware_events_map[num][1]; + break; + } + case PERF_TYPE_HW_CACHE: { + u64 type, op, result; + + type = attr->config & 0xff; + op = (attr->config >> 8) & 0xff; + result = (attr->config >> 16) & 0xff; + + if (unlikely(type >= PERF_COUNT_HW_CACHE_MAX + || op >= PERF_COUNT_HW_CACHE_OP_MAX + || result >= PERF_COUNT_HW_CACHE_RESULT_MAX)) + return -EINVAL; + + if (machine.native_iset_ver >= E2K_ISET_V6) { + *monitor = hw_cache_events_map_v6[type][op][result][0]; + *event_id = hw_cache_events_map_v6[type][op][result][1]; + } else { + *monitor = hw_cache_events_map_v2[type][op][result][0]; + *event_id = hw_cache_events_map_v2[type][op][result][1]; + } + break; + } + default: + return -ENOENT; + } + + if (unlikely(!*monitor)) { + pr_debug("hardware perf_event: config not supported\n"); + return -EINVAL; + } + + return 0; +} + +int e2k_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int err; + u8 monitor, event_id; + + err = event_attr_to_monitor_and_id(&event->attr, &monitor, &event_id); + if (err) + goto error; + + /* + * Good, this event will fit. Save configuration. + */ + hwc->config = (monitor << 8) | event_id; + hwc->idx = (monitor == DIM0 || monitor == DDM0) ? 0 : 1; + + if (!event->attr.exclude_user) + hwc->config |= ARCH_PERFMON_USR; + if (!event->attr.exclude_kernel) + hwc->config |= ARCH_PERFMON_OS; + + if (is_sampling_event(event) && + cpu_has(CPU_HWBUG_KERNEL_DATA_MONITOR) && + (monitor == DDM0 || monitor == DDM1)) + hwc->config &= ~ARCH_PERFMON_OS; + + pr_debug("perf event %lld initialized with config %hhx:%hhx\n", + event->id, monitor, event_id); + + return 0; + +error: + pr_debug("perf event init failed with %d (type %d, config %llx)\n", + err, event->attr.type, event->attr.config); + + return err; +} + + +/* + * hw counters enabling/disabling. + * + * Masking NMIs delays hardware counters delivering. + */ + +static DEFINE_PER_CPU(unsigned long, saved_flags); + +static void e2k_pmu_disable(struct pmu *pmu) +{ + unsigned long flags; + int count; + + /* + * Note: this does not stop monitors counting, so it is + * possible to get interrupt from a monitor if it is not + * disabled inside this pmu_disable/pmu_enable section. + * For monitors that indeed are disabled the pending + * interrupt is cleared when writing to %dimcr/%ddmcr. + */ + raw_all_irq_save(flags); + + count = __this_cpu_add_return(hw_perf_disable_count, 1) - 1; + if (!count) + __this_cpu_write(saved_flags, flags); +} + +static void e2k_pmu_enable(struct pmu *pmu) +{ + int count; + + count = __this_cpu_add_return(hw_perf_disable_count, -1); + + if (!count) { + unsigned long flags = __this_cpu_read(saved_flags); + + /* Enable NMIs to get all interrupts that might + * have arrived while we were disabling perf */ + raw_all_irq_restore(flags); + + BUG_ON(raw_nmi_irqs_disabled_flags(flags)); + } +} + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *e2k_cpu_format_attrs[] = { + &format_attr_event.attr, + NULL +}; + +static const struct attribute_group e2k_cpu_format_attr_group = { + .name = "format", + .attrs = e2k_cpu_format_attrs +}; + +/* Needed for event aliases from tools/perf/pmu-events/ to work */ +static const struct attribute_group *e2k_pmu_attr_groups[] = { + &e2k_cpu_format_attr_group, + NULL +}; + +/* Performance monitoring unit for e2k */ +static struct pmu e2k_pmu = { + .pmu_enable = e2k_pmu_enable, + .pmu_disable = e2k_pmu_disable, + + .event_init = e2k_pmu_event_init, + .add = e2k_pmu_add, + .del = e2k_pmu_del, + + .start = e2k_pmu_start, + .stop = e2k_pmu_stop, + .read = e2k_pmu_read, + + .attr_groups = e2k_pmu_attr_groups, +}; + + +static int __init init_hw_perf_events(void) +{ + return perf_pmu_register(&e2k_pmu, "cpu", PERF_TYPE_RAW); +} +early_initcall(init_hw_perf_events); + diff --git a/arch/e2k/kernel/perf_event/uncore.c b/arch/e2k/kernel/perf_event/uncore.c new file mode 100644 index 000000000000..c769fc9c231f --- /dev/null +++ b/arch/e2k/kernel/perf_event/uncore.c @@ -0,0 +1,311 @@ +#include +#include +#include + +static cpumask_t uncore_cpu_mask; +/* + * To add new monitor to uncore perf: + * + * 1) Implement 3 register access functions which can: + * - set current events count + * - get current events count + * - set config field of monitor + * + * 2) Fill struct e2k_uncore_reg_ops with this functions + * (for example ipcc_reg_ops) + * + * 3) Fill struct e2k_uncore_event_desc with event descriptions + * (for example sic_MCM_events) + * Be careful, values from descriptions are passed by parser + * to perf_event_open() into attr.config field + * + * 4) Fill struct attribute + * (for example e2k_sic_MCM_events_attrs) + * + * 5) Fill struct attribute_group + * (for example e2k_sic_MCM_attr_group) + * + * 6) Fill struct e2k_uncore with + * Main fields: + * - pmu.event_init = e2k_uncore_event_init + * - pmu.add = e2k_uncore_add + * - pmu.del = e2k_uncore_del + * - pmu.start = e2k_uncore_start + * - pmu.stop = e2k_uncore_stop + * - pmu.read = e2k_uncore_read + * - pmu.reg_ops = ~struct from 1) step~ + * - pmu.attr_groups = ~struct from 5) step~ + * - .name = ~name~ (is used by sysfs) + * + * Optional fields: + * You can use other fields of e2k_uncore + * (.node, .idx_at_node) as you want + * (for example: allow reg access functions write + * directly into neccessary registers). + * If you want add another fields. + * + * 7) Create array of valid events terminated with -1 and fill + * pmu.valid_events with it. It is used for error check + * (for example iocc_valid_events). + * + * 8) Pass e2k_uncore.pmu and e2k_uncore.name to perf_pmu_register() + * + */ + +ssize_t e2k_uncore_event_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct e2k_uncore_event_desc *event = + container_of(attr, struct e2k_uncore_event_desc, attr); + return sprintf(buf, "%s", event->config); +} + +static ssize_t uncore_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask); +} + +static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); + +static struct attribute *uncore_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +const struct attribute_group e2k_cpumask_attr_group = { + .attrs = uncore_pmu_attrs, +}; + +static struct e2k_uncore *event_to_e2k_uncore(struct perf_event *event) +{ + return container_of(event->pmu, struct e2k_uncore, pmu); +} + + +void e2k_uncore_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct e2k_uncore *uncore = event_to_e2k_uncore(event); + + if (flags & PERF_EF_RELOAD) + uncore->reg_ops->set_cnt(uncore, hwc, + local64_read(&hwc->prev_count)); + + hwc->state = 0; + + uncore->reg_ops->set_cfg(uncore, hwc, true); + perf_event_update_userpage(event); +} + + +static int uncore_validate_event(struct perf_event *event) +{ + struct e2k_uncore *uncore = event_to_e2k_uncore(event); + u64 event_id; + int i, ret; + + event_id = uncore->get_event(&event->hw); + + ret = -EINVAL; + for (i = 0; uncore->valid_events[i].first != -1 || + uncore->valid_events[i].last != -1; i++) { + if (event_id >= uncore->valid_events[i].first && + event_id <= uncore->valid_events[i].last) { + ret = 0; + break; + } + } + if (ret) { + pr_info_ratelimited("uncore: event %llu does not exist\n", + event_id); + return ret; + } + + if (uncore->validate_event) { + ret = uncore->validate_event(uncore, &event->hw); + if (ret) + return ret; + } + + return 0; +} + +int e2k_uncore_event_init(struct perf_event *event) +{ + struct e2k_uncore *uncore = event_to_e2k_uncore(event); + struct hw_perf_event *hwc = &event->hw; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Only sampling events are supported */ + if (event->attr.sample_period) + return -EINVAL; + + /* IPCC and IOCC counters don't have usr/os/guest/host bits */ + if (event->attr.exclude_user || event->attr.exclude_kernel || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + hwc->config = event->attr.config; + hwc->idx = -1; + + return uncore_validate_event(event); +} + +int e2k_uncore_add(struct perf_event *event, int flags) +{ + int i, ret; + struct e2k_uncore *uncore = event_to_e2k_uncore(event); + struct hw_perf_event *hwc = &event->hw; + + /* are we already assigned? */ + if (hwc->idx != -1 && uncore->events[hwc->idx] == event) + goto out; + + for (i = 0; i < uncore->num_counters; i++) { + if (uncore->events[i] == event) { + hwc->idx = i; + goto out; + } + } + + /* if didn't find, take the first available counter */ + hwc->idx = -1; + if (uncore->add_event) { + ret = uncore->add_event(uncore, event); + if (ret) + return ret; + } else { + for (i = 0; i < uncore->num_counters; i++) { + if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { + hwc->idx = i; + break; + } + } + } + +out: + if (hwc->idx == -1) + return -EBUSY; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + e2k_uncore_start(event, PERF_EF_RELOAD); + + return 0; +} + +void e2k_uncore_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct e2k_uncore *uncore = event_to_e2k_uncore(event); + u64 prev, new; + + new = uncore->reg_ops->get_cnt(uncore, hwc); + prev = local64_xchg(&hwc->prev_count, new); + + local64_add(new - prev, &event->count); + + pr_debug("event %px: updating, prev_count was %lld, now %lld, added delta %lld\n", + event, prev, new, new - prev); +} + +void e2k_uncore_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct e2k_uncore *uncore = event_to_e2k_uncore(event); + + uncore->reg_ops->set_cfg(uncore, hwc, false); + hwc->state |= PERF_HES_STOPPED; + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + e2k_uncore_read(event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +void e2k_uncore_del(struct perf_event *event, int flags) +{ + int i; + struct e2k_uncore *uncore = event_to_e2k_uncore(event); + struct hw_perf_event *hwc = &event->hw; + + e2k_uncore_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < uncore->num_counters; i++) { + if (cmpxchg(&uncore->events[i], event, NULL) == event) + break; + } + + hwc->idx = -1; +} + +static int __init e2k_uncore_init(void) +{ + int init_ret = 0, ret; + + cpumask_set_cpu(0, &uncore_cpu_mask); + + if (E2K_UNCORE_HAS_IPCC) { + ret = register_ipcc_pmus(); + if (ret) { + pr_info("WARNING Could not register IPCC pmu\n"); + init_ret = ret; + } + } + if (E2K_UNCORE_HAS_IOCC) { + ret = register_iocc_pmus(); + if (ret) { + pr_info("WARNING Could not register IOCC pmu\n"); + init_ret = ret; + } + } + if (E2K_UNCORE_HAS_SIC) { + ret = register_sic_pmus(); + if (ret) { + pr_info("WARNING Could not register SIC pmu\n"); + init_ret = ret; + } + } + if (E2K_UNCORE_HAS_HMU) { + ret = register_hmu_pmus(); + if (ret) { + pr_info("WARNING Could not register HMU pmu\n"); + init_ret = ret; + } + } + if (E2K_UNCORE_HAS_IOMMU) { + ret = register_iommu_pmus(); + if (ret) { + pr_info("WARNING Could not register IOMMU pmu\n"); + init_ret = ret; + } + } + if (E2K_UNCORE_HAS_HC) { + ret = register_hc_pmus(); + if (ret) { + pr_info("WARNING Could not register HC pmu\n"); + init_ret = ret; + } + } + if (E2K_UNCORE_HAS_PREPIC) { + ret = register_prepic_pmus(); + if (ret) { + pr_info("WARNING Could not register HC pmu\n"); + init_ret = ret; + } + } + if (E2K_UNCORE_HAS_MC) { + ret = register_mc_pmus(); + if (ret) { + pr_info("WARNING Could not register MC pmu\n"); + init_ret = ret; + } + } + + return init_ret; +} +device_initcall(e2k_uncore_init); diff --git a/arch/e2k/kernel/perf_event/uncore_hc.c b/arch/e2k/kernel/perf_event/uncore_hc.c new file mode 100644 index 000000000000..87e2666d31be --- /dev/null +++ b/arch/e2k/kernel/perf_event/uncore_hc.c @@ -0,0 +1,229 @@ +#include +#include +#include +#include +#include +#include +#include + +static struct e2k_uncore *e2k_uncore_hc[MAX_NUMNODES]; + +typedef union { + struct { + u64 event : 8; + u64 counter : 1; + u64 id : 16; + u64 __unused : 39; + }; + u64 word; +} hc_config_attr_t; + +PMU_FORMAT_ATTR(event, "config:0-7"); +/* 1 bit reserved for software setting of used counter */ +PMU_FORMAT_ATTR(id, "config:9-24"); + +static struct attribute *hc_mcr_format_attr[] = { + &format_attr_event.attr, + &format_attr_id.attr, + NULL, +}; + +static struct e2k_uncore_valid_events hc_mcr_valid_events[] = { + { 0x0, 0xe }, + { -1, -1} +}; + +static struct attribute_group hc_mcr_format_group = { + .name = "format", + .attrs = hc_mcr_format_attr, +}; + +static const struct attribute_group *hc_mcr_attr_group[] = { + &hc_mcr_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static u64 get_hc_str_cnt(struct e2k_uncore *uncore, struct hw_perf_event *hwc) +{ + u32 mar_lo = 0, mar_hi = 0; + hc_config_attr_t config = { .word = hwc->config }; + int node = uncore->node; + u64 val; + + switch (config.counter) { + case 0: + do { + mar_hi = sic_read_node_nbsr_reg(node, HC_MAR0_HI); + mar_lo = sic_read_node_nbsr_reg(node, HC_MAR0_LO); + } while (mar_hi != sic_read_node_nbsr_reg(node, HC_MAR0_HI)); + break; + case 1: + do { + mar_hi = sic_read_node_nbsr_reg(node, HC_MAR1_HI); + mar_lo = sic_read_node_nbsr_reg(node, HC_MAR1_LO); + } while (mar_hi != sic_read_node_nbsr_reg(node, HC_MAR1_HI)); + break; + } + + val = ((u64) mar_hi << 32UL) | (u64) mar_lo; + + pr_debug("hw_event %px: get_cnt %lld\n", hwc, val); + + return val; +} + +static void modify_mid(int node, hc_config_attr_t config) +{ + e2k_hc_mid_t mid; + + AW(mid) = sic_read_node_nbsr_reg(node, HC_MID); + if (config.counter) + mid.id1 = config.id; + else + mid.id0 = config.id; + sic_write_node_nbsr_reg(node, HC_MID, AW(mid)); +} + +static void modify_mcr(int node, hc_config_attr_t config, bool enable) +{ + e2k_hc_mcr_t mcr; + + AW(mcr) = sic_read_node_nbsr_reg(node, HC_MCR); + if (config.counter) { + mcr.v1 = !!enable; + mcr.es1 = config.event; + } else { + mcr.v0 = !!enable; + mcr.es0 = config.event; + } + sic_write_node_nbsr_reg(node, HC_MCR, AW(mcr)); + + pr_debug("set_cfg 0x%x\n", AW(mcr)); +} + +static void set_hc_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + hc_config_attr_t config = { .word = hwc->config }; + int node = uncore->node; + + if (enable) { + modify_mid(node, config); + modify_mcr(node, config, enable); + } else { + modify_mcr(node, config, enable); + modify_mid(node, config); + } +} + +static void set_hc_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + u32 mar_lo, mar_hi; + hc_config_attr_t config = { .word = hwc->config }; + int node = uncore->node; + + mar_lo = val; + mar_hi = val >> 32; + + switch (config.counter) { + case 0: + sic_write_node_nbsr_reg(node, HC_MAR0_LO, mar_lo); + sic_write_node_nbsr_reg(node, HC_MAR0_HI, mar_hi); + break; + case 1: + sic_write_node_nbsr_reg(node, HC_MAR1_LO, mar_lo); + sic_write_node_nbsr_reg(node, HC_MAR1_HI, mar_hi); + break; + } + + pr_debug("hw_event %px: set_cnt %lld\n", hwc, val); +} + + +static struct e2k_uncore_reg_ops hc_reg_ops = { + .get_cnt = get_hc_str_cnt, + .set_cfg = set_hc_str_cfg, + .set_cnt = set_hc_str_cnt, +}; + +static u64 hc_get_event(struct hw_perf_event *hwc) +{ + hc_config_attr_t config = { .word = hwc->config }; + + return config.event; +} + +static int hc_add_event(struct e2k_uncore *uncore, struct perf_event *event) +{ + hc_config_attr_t config = { .word = event->hw.config }, config2; + int i, empty_slot = -1, used_counter = -1; + + /* validate against running counters */ + for (i = 0; i < uncore->num_counters; i++) { + struct perf_event *event2 = READ_ONCE(uncore->events[i]); + + if (!event2) { + empty_slot = i; + continue; + } + + AW(config2) = event2->hw.config; + used_counter = config2.counter; + } + + /* take the first available slot */ + if (empty_slot == -1) + return -ENOSPC; + + config.counter = !used_counter; + event->hw.config = AW(config); + + if (cmpxchg(&uncore->events[empty_slot], NULL, event) != NULL) + return -ENOSPC; + + event->hw.idx = empty_slot; + + return 0; +} + +int __init register_hc_pmus() +{ + int i, counters = 2; + + for_each_online_node(i) { + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_HC; + + uncore->pmu.event_init = e2k_uncore_event_init, + uncore->pmu.task_ctx_nr = perf_invalid_context, + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = hc_get_event; + uncore->add_event = hc_add_event; + + uncore->reg_ops = &hc_reg_ops; + uncore->num_counters = counters; + + uncore->node = i; + + uncore->valid_events = hc_mcr_valid_events; + uncore->pmu.attr_groups = hc_mcr_attr_group; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, "uncore_hc_%d", i); + + e2k_uncore_hc[i] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} diff --git a/arch/e2k/kernel/perf_event/uncore_hmu.c b/arch/e2k/kernel/perf_event/uncore_hmu.c new file mode 100644 index 000000000000..0ff556534f85 --- /dev/null +++ b/arch/e2k/kernel/perf_event/uncore_hmu.c @@ -0,0 +1,392 @@ +#include +#include +#include +#include +#include +#include +#include + +static struct e2k_uncore *e2k_uncore_hmu[MAX_NUMNODES]; + +typedef union { + struct { + u64 event : 8; + u64 counter : 1; + u64 flt0_off : 1; + u64 flt0_rqid : 7; + u64 flt0_cid : 1; + u64 flt0_bid : 1; + u64 flt0_xid : 1; + u64 flt1_off : 1; + u64 flt1_node : 2; + u64 flt1_rnode : 1; + u64 hmu_mask : 4; + u64 __unused : 36; + }; + u64 word; +} hmu_config_attr_t; + +PMU_FORMAT_ATTR(event, "config:0-7"); +PMU_FORMAT_ATTR(counter, "config:8"); +PMU_FORMAT_ATTR(flt0_off, "config:9"); +PMU_FORMAT_ATTR(flt0_rqid, "config:10-16"); +PMU_FORMAT_ATTR(flt0_cid, "config:17"); +PMU_FORMAT_ATTR(flt0_bid, "config:18"); +PMU_FORMAT_ATTR(flt0_xid, "config:19"); +PMU_FORMAT_ATTR(flt1_off, "config:20"); +PMU_FORMAT_ATTR(flt1_node, "config:21-22"); +PMU_FORMAT_ATTR(flt1_rnode, "config:23"); +PMU_FORMAT_ATTR(hmu_mask, "config:24-27"); + +static struct attribute *hmu_mcr_format_attr[] = { + &format_attr_event.attr, + &format_attr_counter.attr, + &format_attr_flt0_off.attr, + &format_attr_flt0_rqid.attr, + &format_attr_flt0_cid.attr, + &format_attr_flt0_bid.attr, + &format_attr_flt0_xid.attr, + &format_attr_flt1_off.attr, + &format_attr_flt1_node.attr, + &format_attr_flt1_rnode.attr, + &format_attr_hmu_mask.attr, + NULL, +}; + +enum { + MCM0 = 0, + MCM1, +}; + + +static struct attribute_group hmu_mcr_format_group = { + .name = "format", + .attrs = hmu_mcr_format_attr, +}; + +static const struct attribute_group *hmu_mcr_attr_group[] = { + &hmu_mcr_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static u64 get_hmu_str_cnt(struct e2k_uncore *uncore, struct hw_perf_event *hwc) +{ + u32 hmu0_mar_lo = 0, hmu0_mar_hi = 0, hmu1_mar_lo = 0, hmu1_mar_hi = 0, + hmu2_mar_lo = 0, hmu2_mar_hi = 0, hmu3_mar_lo = 0, hmu3_mar_hi = 0; + hmu_config_attr_t config = { .word = hwc->config }; + u64 val, hmu_mask = config.hmu_mask; + int node = uncore->node; + int trace0 = (!hmu_mask || (hmu_mask & 1)), + trace1 = (!hmu_mask || (hmu_mask & 2)), + trace2 = (!hmu_mask || (hmu_mask & 4)), + trace3 = (!hmu_mask || (hmu_mask & 8)); + + switch (config.counter) { + case 0: + do { + if (!trace0) + break; + hmu0_mar_hi = sic_read_node_nbsr_reg(node, HMU0_MAR0_HI); + hmu0_mar_lo = sic_read_node_nbsr_reg(node, HMU0_MAR0_LO); + } while (hmu0_mar_hi != sic_read_node_nbsr_reg(node, HMU0_MAR0_HI)); + + if (READ_IDR_REG().mdl == IDR_E2C3_MDL) + break; + + do { + if (!trace1) + break; + hmu1_mar_hi = sic_read_node_nbsr_reg(node, HMU1_MAR0_HI); + hmu1_mar_lo = sic_read_node_nbsr_reg(node, HMU1_MAR0_LO); + } while (hmu1_mar_hi != sic_read_node_nbsr_reg(node, HMU1_MAR0_HI)); + do { + if (!trace2) + break; + hmu2_mar_hi = sic_read_node_nbsr_reg(node, HMU2_MAR0_HI); + hmu2_mar_lo = sic_read_node_nbsr_reg(node, HMU2_MAR0_LO); + } while (hmu2_mar_hi != sic_read_node_nbsr_reg(node, HMU2_MAR0_HI)); + do { + if (!trace3) + break; + hmu3_mar_hi = sic_read_node_nbsr_reg(node, HMU3_MAR0_HI); + hmu3_mar_lo = sic_read_node_nbsr_reg(node, HMU3_MAR0_LO); + } while (hmu3_mar_hi != sic_read_node_nbsr_reg(node, HMU3_MAR0_HI)); + break; + case 1: + do { + if (!trace0) + break; + hmu0_mar_hi = sic_read_node_nbsr_reg(node, HMU0_MAR1_HI); + hmu0_mar_lo = sic_read_node_nbsr_reg(node, HMU0_MAR1_LO); + } while (hmu0_mar_hi != sic_read_node_nbsr_reg(node, HMU0_MAR1_HI)); + + if (READ_IDR_REG().mdl == IDR_E2C3_MDL) + break; + + do { + if (!trace1) + break; + hmu1_mar_hi = sic_read_node_nbsr_reg(node, HMU1_MAR1_HI); + hmu1_mar_lo = sic_read_node_nbsr_reg(node, HMU1_MAR1_LO); + } while (hmu1_mar_hi != sic_read_node_nbsr_reg(node, HMU1_MAR1_HI)); + do { + if (!trace2) + break; + hmu2_mar_hi = sic_read_node_nbsr_reg(node, HMU2_MAR1_HI); + hmu2_mar_lo = sic_read_node_nbsr_reg(node, HMU2_MAR1_LO); + } while (hmu2_mar_hi != sic_read_node_nbsr_reg(node, HMU2_MAR1_HI)); + do { + if (!trace3) + break; + hmu3_mar_hi = sic_read_node_nbsr_reg(node, HMU3_MAR1_HI); + hmu3_mar_lo = sic_read_node_nbsr_reg(node, HMU3_MAR1_LO); + } while (hmu3_mar_hi != sic_read_node_nbsr_reg(node, HMU3_MAR1_HI)); + break; + } + + val = 0; + + if (trace0) + val += ((u64) hmu0_mar_hi << 32UL) | (u64) hmu0_mar_lo; + if (trace1) + val += ((u64) hmu1_mar_hi << 32UL) | (u64) hmu1_mar_lo; + if (trace2) + val += ((u64) hmu2_mar_hi << 32UL) | (u64) hmu2_mar_lo; + if (trace3) + val += ((u64) hmu3_mar_hi << 32UL) | (u64) hmu3_mar_lo; + + pr_debug("hw_event %px: get_cnt %lld\n", hwc, val); + + return val; +} + +static void set_hmu_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + int node = uncore->node; + hmu_config_attr_t config = { .word = hwc->config }; + u64 event = config.event; + e2k_hmu_mcr_t mcr; + + AW(mcr) = sic_read_node_nbsr_reg(node, HMU_MCR); + + mcr.flt0_off = config.flt0_off; + mcr.flt0_rqid = config.flt0_rqid; + mcr.flt0_cid = config.flt0_cid; + mcr.flt0_bid = config.flt0_bid; + mcr.flt0_xid = config.flt0_xid; + mcr.flt1_off = config.flt1_off; + mcr.flt1_node = config.flt1_node; + mcr.flt1_rnode = config.flt1_rnode; + + switch (config.counter) { + case 0: + mcr.v0 = !!enable; + mcr.es0 = event; + break; + case 1: + mcr.v1 = !!enable; + mcr.es1 = event; + break; + } + + sic_write_node_nbsr_reg(node, HMU_MCR, AW(mcr)); + + pr_debug("hw_event %px: set_cfg 0x%x\n", hwc, AW(mcr)); +} + +static void set_hmu_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + u32 hmu0_mar_lo = 0, hmu0_mar_hi = 0, hmu1_mar_lo = 0, hmu1_mar_hi = 0, + hmu2_mar_lo = 0, hmu2_mar_hi = 0, hmu3_mar_lo = 0, hmu3_mar_hi = 0; + hmu_config_attr_t config = { .word = hwc->config }; + u64 hmu_mask = config.hmu_mask; + int node = uncore->node; + int trace0 = (!hmu_mask || (hmu_mask & 1)), + trace1 = (!hmu_mask || (hmu_mask & 2)), + trace2 = (!hmu_mask || (hmu_mask & 4)); + + /* Use any counter enabled in config, it doesn't matter which one */ + if (trace0) { + hmu0_mar_lo = val; + hmu0_mar_hi = val >> 32; + } else if (trace1) { + hmu1_mar_lo = val; + hmu1_mar_hi = val >> 32; + } else if (trace2) { + hmu2_mar_lo = val; + hmu2_mar_hi = val >> 32; + } else { + hmu3_mar_lo = val; + hmu3_mar_hi = val >> 32; + } + + switch (config.counter) { + case 0: + sic_write_node_nbsr_reg(node, HMU0_MAR0_LO, hmu0_mar_lo); + sic_write_node_nbsr_reg(node, HMU0_MAR0_HI, hmu0_mar_hi); + + if (READ_IDR_REG().mdl == IDR_E2C3_MDL) + break; + + sic_write_node_nbsr_reg(node, HMU1_MAR0_LO, hmu1_mar_lo); + sic_write_node_nbsr_reg(node, HMU1_MAR0_HI, hmu1_mar_hi); + sic_write_node_nbsr_reg(node, HMU2_MAR0_LO, hmu2_mar_lo); + sic_write_node_nbsr_reg(node, HMU2_MAR0_HI, hmu2_mar_hi); + sic_write_node_nbsr_reg(node, HMU3_MAR0_LO, hmu3_mar_lo); + sic_write_node_nbsr_reg(node, HMU3_MAR0_HI, hmu3_mar_hi); + break; + case 1: + sic_write_node_nbsr_reg(node, HMU0_MAR1_LO, hmu0_mar_lo); + sic_write_node_nbsr_reg(node, HMU0_MAR1_HI, hmu0_mar_hi); + + if (READ_IDR_REG().mdl == IDR_E2C3_MDL) + break; + + sic_write_node_nbsr_reg(node, HMU1_MAR1_LO, hmu1_mar_lo); + sic_write_node_nbsr_reg(node, HMU1_MAR1_HI, hmu1_mar_hi); + sic_write_node_nbsr_reg(node, HMU2_MAR1_LO, hmu2_mar_lo); + sic_write_node_nbsr_reg(node, HMU2_MAR1_HI, hmu2_mar_hi); + sic_write_node_nbsr_reg(node, HMU3_MAR1_LO, hmu3_mar_lo); + sic_write_node_nbsr_reg(node, HMU3_MAR1_HI, hmu3_mar_hi); + break; + } + + pr_debug("hw_event %px: set_cnt %lld\n", hwc, val); +} + + +static struct e2k_uncore_reg_ops hmu_reg_ops = { + .get_cnt = get_hmu_str_cnt, + .set_cfg = set_hmu_str_cfg, + .set_cnt = set_hmu_str_cnt, +}; + +static u64 hmu_get_event(struct hw_perf_event *hwc) +{ + hmu_config_attr_t config = { .word = hwc->config }; + + return config.event; +} + +static struct e2k_uncore_valid_events hmu_mcr_valid_events[] = { + { 0x0, 0x23 }, + { -1, -1} +}; + +static int hmu_validate_event(struct e2k_uncore *uncore, + struct hw_perf_event *hwc) +{ + hmu_config_attr_t config = { .word = hwc->config }; + u64 event = config.event; + + if (config.counter == 0 && event == 0x23 || + config.counter == 1 && event == 0x20) + return -EINVAL; + + if (READ_IDR_REG().mdl != IDR_E2C3_MDL && event == 0xd) { + pr_info_ratelimited("uncore_hmu: event 0xd exists on e2c3 only\n"); + return -EINVAL; + } + + if (READ_IDR_REG().mdl != IDR_E2C3_MDL && config.hmu_mask && + (config.hmu_mask & ~1ull)) { + pr_info_ratelimited("uncore_hmu: there are no HMU{1-3} registers on e2c3\n"); + return -EINVAL; + } + + return 0; +} + +static int hmu_add_event(struct e2k_uncore *uncore, struct perf_event *event) +{ + hmu_config_attr_t config = { .word = event->hw.config }; + int i; + + /* validate against running counters */ + for (i = 0; i < uncore->num_counters; i++) { + struct perf_event *event2 = READ_ONCE(uncore->events[i]); + hmu_config_attr_t config2; + + if (!event2) + continue; + + AW(config2) = event2->hw.config; + + /* + * Check that there is no conflict with same counter in HMU + */ + if (config.counter == config2.counter) + return -ENOSPC; + + if (config.flt0_off || config2.flt0_off) { + /* Must use the same configuration */ + if (config.flt0_off != config2.flt0_off || + config.flt0_rqid != config2.flt0_rqid || + config.flt0_cid != config2.flt0_cid || + config.flt0_bid != config2.flt0_bid || + config.flt0_xid != config2.flt0_xid) + return -ENOSPC; + } + + if (config.flt1_off || config2.flt1_off) { + /* Must use the same configuration */ + if (config.flt1_node != config2.flt1_node || + config.flt1_rnode != config2.flt1_rnode) + return -ENOSPC; + } + } + + /* take the first available slot */ + for (i = 0; i < uncore->num_counters; i++) { + if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { + event->hw.idx = i; + return 0; + } + } + + return -ENOSPC; +} + +int __init register_hmu_pmus() +{ + int i, counters = 2; + + for_each_online_node(i) { + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_HMU; + + uncore->pmu.event_init = e2k_uncore_event_init, + uncore->pmu.task_ctx_nr = perf_invalid_context, + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = hmu_get_event; + uncore->add_event = hmu_add_event; + uncore->validate_event = hmu_validate_event; + + uncore->reg_ops = &hmu_reg_ops; + uncore->num_counters = counters; + + uncore->node = i; + + uncore->valid_events = hmu_mcr_valid_events; + uncore->pmu.attr_groups = hmu_mcr_attr_group; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, "uncore_hmu_%d", i); + + e2k_uncore_hmu[i] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} diff --git a/arch/e2k/kernel/perf_event/uncore_iommu.c b/arch/e2k/kernel/perf_event/uncore_iommu.c new file mode 100644 index 000000000000..ee576ed84a65 --- /dev/null +++ b/arch/e2k/kernel/perf_event/uncore_iommu.c @@ -0,0 +1,464 @@ +#include +#include +#include +#include +#include +#include +#include + +static struct e2k_uncore *e2k_uncore_iommu[MAX_NUMNODES]; + +typedef union { + struct { + u64 event : 8; + u64 counter : 1; + u64 id : 16; + u64 iommu_mask : 7; + u64 __unused : 32; + }; + u64 word; +} iommu_config_attr_t; + +PMU_FORMAT_ATTR(event, "config:0-7"); +/* 1 bit reserved for software setting of used counter */ +PMU_FORMAT_ATTR(id, "config:9-24"); +PMU_FORMAT_ATTR(iommu_mask, "config:25-31"); + +static struct attribute *iommu_mcr_format_attr[] = { + &format_attr_event.attr, + &format_attr_id.attr, + &format_attr_iommu_mask.attr, + NULL, +}; + +static struct e2k_uncore_valid_events iommu_mcr_valid_events[] = { + { 0x0, 0x19 }, + { 0x20, 0x21 }, + { -1, -1} +}; + +static struct attribute_group iommu_mcr_format_group = { + .name = "format", + .attrs = iommu_mcr_format_attr, +}; + +static const struct attribute_group *iommu_mcr_attr_group[] = { + &iommu_mcr_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static u64 get_iommu_str_cnt(struct e2k_uncore *uncore, struct hw_perf_event *hwc) +{ + u32 mar_lo = 0, mar_hi = 0, mar26_lo = 0, mar26_hi = 0, + mar27_lo = 0, mar27_hi = 0, mar28_lo = 0, mar28_hi = 0, + mar29_lo = 0, mar29_hi = 0, mar30_lo = 0, mar30_hi = 0, + mar31_lo = 0, mar31_hi = 0; + iommu_config_attr_t config = { .word = hwc->config }; + u64 val, iommu_mask = config.iommu_mask; + int node = uncore->node; + bool trace0, trace26, trace27, trace28, trace29, trace30, trace31; + + if (READ_IDR_REG().mdl == IDR_E2C3_MDL) { + trace0 = !iommu_mask || (iommu_mask & 0x1); + trace26 = !iommu_mask || (iommu_mask & 0x2); + trace27 = !iommu_mask || (iommu_mask & 0x4); + trace28 = !iommu_mask || (iommu_mask & 0x8); + trace29 = !iommu_mask || (iommu_mask & 0x10); + trace30 = !iommu_mask || (iommu_mask & 0x20); + trace31 = !iommu_mask || (iommu_mask & 0x40); + } else { + trace0 = true; + trace26 = trace27 = trace28 = trace29 = trace30 = trace31 = false; + } + + switch (config.counter) { + case 0: + do { + if (!trace0) + break; + mar_hi = sic_read_node_nbsr_reg(node, IOMMU_MAR0_HI); + mar_lo = sic_read_node_nbsr_reg(node, IOMMU_MAR0_LO); + } while (mar_hi != sic_read_node_nbsr_reg(node, IOMMU_MAR0_HI)); + do { + if (!trace26) + break; + mar26_hi = sic_read_node_nbsr_reg(node, ED26_IOMMU_MAR0_HI); + mar26_lo = sic_read_node_nbsr_reg(node, ED26_IOMMU_MAR0_LO); + } while (mar26_hi != sic_read_node_nbsr_reg(node, ED26_IOMMU_MAR0_HI)); + do { + if (!trace27) + break; + mar27_hi = sic_read_node_nbsr_reg(node, ED27_IOMMU_MAR0_HI); + mar27_lo = sic_read_node_nbsr_reg(node, ED27_IOMMU_MAR0_LO); + } while (mar27_hi != sic_read_node_nbsr_reg(node, ED27_IOMMU_MAR0_HI)); + do { + if (!trace28) + break; + mar28_hi = sic_read_node_nbsr_reg(node, ED28_IOMMU_MAR0_HI); + mar28_lo = sic_read_node_nbsr_reg(node, ED28_IOMMU_MAR0_LO); + } while (mar28_hi != sic_read_node_nbsr_reg(node, ED28_IOMMU_MAR0_HI)); + do { + if (!trace29) + break; + mar29_hi = sic_read_node_nbsr_reg(node, ED29_IOMMU_MAR0_HI); + mar29_lo = sic_read_node_nbsr_reg(node, ED29_IOMMU_MAR0_LO); + } while (mar29_hi != sic_read_node_nbsr_reg(node, ED29_IOMMU_MAR0_HI)); + do { + if (!trace30) + break; + mar30_hi = sic_read_node_nbsr_reg(node, ED30_IOMMU_MAR0_HI); + mar30_lo = sic_read_node_nbsr_reg(node, ED30_IOMMU_MAR0_LO); + } while (mar30_hi != sic_read_node_nbsr_reg(node, ED30_IOMMU_MAR0_HI)); + do { + if (!trace31) + break; + mar31_hi = sic_read_node_nbsr_reg(node, ED31_IOMMU_MAR0_HI); + mar31_lo = sic_read_node_nbsr_reg(node, ED31_IOMMU_MAR0_LO); + } while (mar31_hi != sic_read_node_nbsr_reg(node, ED31_IOMMU_MAR0_HI)); + break; + case 1: + do { + if (!trace0) + break; + mar_hi = sic_read_node_nbsr_reg(node, IOMMU_MAR1_HI); + mar_lo = sic_read_node_nbsr_reg(node, IOMMU_MAR1_LO); + } while (mar_hi != sic_read_node_nbsr_reg(node, IOMMU_MAR1_HI)); + do { + if (!trace26) + break; + mar26_hi = sic_read_node_nbsr_reg(node, ED26_IOMMU_MAR1_HI); + mar26_lo = sic_read_node_nbsr_reg(node, ED26_IOMMU_MAR1_LO); + } while (mar26_hi != sic_read_node_nbsr_reg(node, ED26_IOMMU_MAR1_HI)); + do { + if (!trace27) + break; + mar27_hi = sic_read_node_nbsr_reg(node, ED27_IOMMU_MAR1_HI); + mar27_lo = sic_read_node_nbsr_reg(node, ED27_IOMMU_MAR1_LO); + } while (mar27_hi != sic_read_node_nbsr_reg(node, ED27_IOMMU_MAR1_HI)); + do { + if (!trace28) + break; + mar28_hi = sic_read_node_nbsr_reg(node, ED28_IOMMU_MAR1_HI); + mar28_lo = sic_read_node_nbsr_reg(node, ED28_IOMMU_MAR1_LO); + } while (mar28_hi != sic_read_node_nbsr_reg(node, ED28_IOMMU_MAR1_HI)); + do { + if (!trace29) + break; + mar29_hi = sic_read_node_nbsr_reg(node, ED29_IOMMU_MAR1_HI); + mar29_lo = sic_read_node_nbsr_reg(node, ED29_IOMMU_MAR1_LO); + } while (mar29_hi != sic_read_node_nbsr_reg(node, ED29_IOMMU_MAR1_HI)); + do { + if (!trace30) + break; + mar30_hi = sic_read_node_nbsr_reg(node, ED30_IOMMU_MAR1_HI); + mar30_lo = sic_read_node_nbsr_reg(node, ED30_IOMMU_MAR1_LO); + } while (mar30_hi != sic_read_node_nbsr_reg(node, ED30_IOMMU_MAR1_HI)); + do { + if (!trace31) + break; + mar31_hi = sic_read_node_nbsr_reg(node, ED31_IOMMU_MAR1_HI); + mar31_lo = sic_read_node_nbsr_reg(node, ED31_IOMMU_MAR1_LO); + } while (mar31_hi != sic_read_node_nbsr_reg(node, ED31_IOMMU_MAR1_HI)); + break; + } + + val = 0; + + if (trace0) + val += ((u64) mar_hi << 32UL) | (u64) mar_lo; + if (trace26) + val += ((u64) mar26_hi << 32UL) | (u64) mar26_lo; + if (trace27) + val += ((u64) mar27_hi << 32UL) | (u64) mar27_lo; + if (trace28) + val += ((u64) mar28_hi << 32UL) | (u64) mar28_lo; + if (trace29) + val += ((u64) mar29_hi << 32UL) | (u64) mar29_lo; + if (trace30) + val += ((u64) mar30_hi << 32UL) | (u64) mar30_lo; + if (trace31) + val += ((u64) mar31_hi << 32UL) | (u64) mar31_lo; + + pr_debug("hw_event %px: get_cnt %lld\n", hwc, val); + + return val; +} + +static void modify_mid(int mid_reg, int node, iommu_config_attr_t config) +{ + e2k_iommu_mid_t mid; + + AW(mid) = sic_read_node_nbsr_reg(node, mid_reg); + if (config.counter) + mid.id1 = config.id; + else + mid.id0 = config.id; + sic_write_node_nbsr_reg(node, mid_reg, AW(mid)); +} + +static void modify_mcr(int mcr_reg, int node, + iommu_config_attr_t config, bool enable) +{ + e2k_iommu_mcr_t mcr; + + AW(mcr) = sic_read_node_nbsr_reg(node, mcr_reg); + if (config.counter) { + mcr.v1 = !!enable; + mcr.es1 = config.event; + } else { + mcr.v0 = !!enable; + mcr.es0 = config.event; + } + sic_write_node_nbsr_reg(node, mcr_reg, AW(mcr)); + + pr_debug("set_cfg 0x%x\n", AW(mcr)); +} + +static void modify_mid_mcr(int mid_reg, int mcr_reg, int node, + iommu_config_attr_t config, bool enable) +{ + if (enable) { + modify_mid(mid_reg, node, config); + modify_mcr(mcr_reg, node, config, enable); + } else { + modify_mcr(mcr_reg, node, config, enable); + modify_mid(mid_reg, node, config); + } +} + +static void set_iommu_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + iommu_config_attr_t config = { .word = hwc->config }; + u64 iommu_mask = config.iommu_mask; + int node = uncore->node; + bool trace0, trace26, trace27, trace28, trace29, trace30, trace31; + + if (READ_IDR_REG().mdl == IDR_E2C3_MDL) { + trace0 = !iommu_mask || (iommu_mask & 0x1); + trace26 = !iommu_mask || (iommu_mask & 0x2); + trace27 = !iommu_mask || (iommu_mask & 0x4); + trace28 = !iommu_mask || (iommu_mask & 0x8); + trace29 = !iommu_mask || (iommu_mask & 0x10); + trace30 = !iommu_mask || (iommu_mask & 0x20); + trace31 = !iommu_mask || (iommu_mask & 0x40); + } else { + trace0 = true; + trace26 = trace27 = trace28 = trace29 = trace30 = trace31 = false; + } + + if (trace0) + modify_mid_mcr(IOMMU_MID, IOMMU_MCR, node, config, enable); + if (trace26) + modify_mid_mcr(ED26_IOMMU_MID, ED26_IOMMU_MCR, node, config, enable); + if (trace27) + modify_mid_mcr(ED27_IOMMU_MID, ED27_IOMMU_MCR, node, config, enable); + if (trace28) + modify_mid_mcr(ED28_IOMMU_MID, ED28_IOMMU_MCR, node, config, enable); + if (trace29) + modify_mid_mcr(ED29_IOMMU_MID, ED29_IOMMU_MCR, node, config, enable); + if (trace30) + modify_mid_mcr(ED30_IOMMU_MID, ED30_IOMMU_MCR, node, config, enable); + if (trace31) + modify_mid_mcr(ED31_IOMMU_MID, ED31_IOMMU_MCR, node, config, enable); +} + +static void set_iommu_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + u32 mar_lo = 0, mar_hi = 0, mar26_lo = 0, mar26_hi = 0, + mar27_lo = 0, mar27_hi = 0, mar28_lo = 0, mar28_hi = 0, + mar29_lo = 0, mar29_hi = 0, mar30_lo = 0, mar30_hi = 0, + mar31_lo = 0, mar31_hi = 0; + iommu_config_attr_t config = { .word = hwc->config }; + u64 iommu_mask = config.iommu_mask; + int node = uncore->node; + bool trace0, trace26, trace27, trace28, trace29, trace30, trace31; + + if (READ_IDR_REG().mdl == IDR_E2C3_MDL) { + trace0 = !iommu_mask || (iommu_mask & 0x1); + trace26 = !iommu_mask || (iommu_mask & 0x2); + trace27 = !iommu_mask || (iommu_mask & 0x4); + trace28 = !iommu_mask || (iommu_mask & 0x8); + trace29 = !iommu_mask || (iommu_mask & 0x10); + trace30 = !iommu_mask || (iommu_mask & 0x20); + trace31 = !iommu_mask || (iommu_mask & 0x40); + } else { + trace0 = true; + trace26 = trace27 = trace28 = trace29 = trace30 = trace31 = false; + } + + /* Use any IOMMU enabled in config, it doesn't matter which one */ + if (trace0) { + mar_lo = val; + mar_hi = val >> 32; + } else if (trace26) { + mar26_lo = val; + mar26_hi = val >> 32; + } else if (trace27) { + mar27_lo = val; + mar27_hi = val >> 32; + } else if (trace28) { + mar28_lo = val; + mar28_hi = val >> 32; + } else if (trace29) { + mar29_lo = val; + mar29_hi = val >> 32; + } else if (trace30) { + mar30_lo = val; + mar30_hi = val >> 32; + } else { + mar31_lo = val; + mar31_hi = val >> 32; + } + + switch (config.counter) { + case 0: + sic_write_node_nbsr_reg(node, IOMMU_MAR0_LO, mar_lo); + sic_write_node_nbsr_reg(node, IOMMU_MAR0_HI, mar_hi); + + if (READ_IDR_REG().mdl != IDR_E2C3_MDL) + break; + + sic_write_node_nbsr_reg(node, ED26_IOMMU_MAR0_LO, mar26_lo); + sic_write_node_nbsr_reg(node, ED26_IOMMU_MAR0_HI, mar26_hi); + sic_write_node_nbsr_reg(node, ED27_IOMMU_MAR0_LO, mar27_lo); + sic_write_node_nbsr_reg(node, ED27_IOMMU_MAR0_HI, mar27_hi); + sic_write_node_nbsr_reg(node, ED28_IOMMU_MAR0_LO, mar28_lo); + sic_write_node_nbsr_reg(node, ED28_IOMMU_MAR0_HI, mar28_hi); + sic_write_node_nbsr_reg(node, ED29_IOMMU_MAR0_LO, mar29_lo); + sic_write_node_nbsr_reg(node, ED29_IOMMU_MAR0_HI, mar29_hi); + sic_write_node_nbsr_reg(node, ED30_IOMMU_MAR0_LO, mar30_lo); + sic_write_node_nbsr_reg(node, ED30_IOMMU_MAR0_HI, mar30_hi); + sic_write_node_nbsr_reg(node, ED31_IOMMU_MAR0_LO, mar31_lo); + sic_write_node_nbsr_reg(node, ED31_IOMMU_MAR0_HI, mar31_hi); + break; + case 1: + sic_write_node_nbsr_reg(node, IOMMU_MAR1_LO, mar_lo); + sic_write_node_nbsr_reg(node, IOMMU_MAR1_HI, mar_hi); + + if (READ_IDR_REG().mdl != IDR_E2C3_MDL) + break; + + sic_write_node_nbsr_reg(node, ED26_IOMMU_MAR1_LO, mar26_lo); + sic_write_node_nbsr_reg(node, ED26_IOMMU_MAR1_HI, mar26_hi); + sic_write_node_nbsr_reg(node, ED27_IOMMU_MAR1_LO, mar27_lo); + sic_write_node_nbsr_reg(node, ED27_IOMMU_MAR1_HI, mar27_hi); + sic_write_node_nbsr_reg(node, ED28_IOMMU_MAR1_LO, mar28_lo); + sic_write_node_nbsr_reg(node, ED28_IOMMU_MAR1_HI, mar28_hi); + sic_write_node_nbsr_reg(node, ED29_IOMMU_MAR1_LO, mar29_lo); + sic_write_node_nbsr_reg(node, ED29_IOMMU_MAR1_HI, mar29_hi); + sic_write_node_nbsr_reg(node, ED30_IOMMU_MAR1_LO, mar30_lo); + sic_write_node_nbsr_reg(node, ED30_IOMMU_MAR1_HI, mar30_hi); + sic_write_node_nbsr_reg(node, ED31_IOMMU_MAR1_LO, mar31_lo); + sic_write_node_nbsr_reg(node, ED31_IOMMU_MAR1_HI, mar31_hi); + break; + } + + pr_debug("hw_event %px: set_cnt %lld\n", hwc, val); +} + + +static struct e2k_uncore_reg_ops iommu_reg_ops = { + .get_cnt = get_iommu_str_cnt, + .set_cfg = set_iommu_str_cfg, + .set_cnt = set_iommu_str_cnt, +}; + +static u64 iommu_get_event(struct hw_perf_event *hwc) +{ + iommu_config_attr_t config = { .word = hwc->config }; + + return config.event; +} + + +static int iommu_validate_event(struct e2k_uncore *uncore, + struct hw_perf_event *hwc) +{ + iommu_config_attr_t config = { .word = hwc->config }; + + if (READ_IDR_REG().mdl != IDR_E2C3_MDL && config.iommu_mask && + (config.iommu_mask & ~1ull)) { + pr_info_ratelimited("uncore_iommu: IOMMU{26-31} registers exist only on e2c3\n"); + return -EINVAL; + } + + return 0; +} + +static int iommu_add_event(struct e2k_uncore *uncore, struct perf_event *event) +{ + iommu_config_attr_t config = { .word = event->hw.config }, config2; + int i, empty_slot = -1, used_counter = -1; + + /* validate against running counters */ + for (i = 0; i < uncore->num_counters; i++) { + struct perf_event *event2 = READ_ONCE(uncore->events[i]); + + if (!event2) { + empty_slot = i; + continue; + } + + AW(config2) = event2->hw.config; + used_counter = config2.counter; + } + + /* take the first available slot */ + if (empty_slot == -1) + return -ENOSPC; + + config.counter = !used_counter; + event->hw.config = AW(config); + + if (cmpxchg(&uncore->events[empty_slot], NULL, event) != NULL) + return -ENOSPC; + + event->hw.idx = empty_slot; + + return 0; +} + +int __init register_iommu_pmus() +{ + int i, counters = 2; + + for_each_online_node(i) { + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_IOMMU; + + uncore->pmu.event_init = e2k_uncore_event_init; + uncore->pmu.task_ctx_nr = perf_invalid_context; + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = iommu_get_event; + uncore->add_event = iommu_add_event; + uncore->validate_event = iommu_validate_event; + + uncore->reg_ops = &iommu_reg_ops; + uncore->num_counters = counters; + + uncore->node = i; + + uncore->valid_events = iommu_mcr_valid_events; + uncore->pmu.attr_groups = iommu_mcr_attr_group; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, + "uncore_iommu_%d", i); + + e2k_uncore_iommu[i] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} diff --git a/arch/e2k/kernel/perf_event/uncore_mc.c b/arch/e2k/kernel/perf_event/uncore_mc.c new file mode 100644 index 000000000000..e0ea9e78ad11 --- /dev/null +++ b/arch/e2k/kernel/perf_event/uncore_mc.c @@ -0,0 +1,309 @@ +#include +#include +#include +#include +#include +#include +#include + +static u8 mc_enabled[MAX_NUMNODES] __read_mostly; +static struct e2k_uncore *e2k_uncore_mc[MAX_NUMNODES]; + +typedef union { + struct { + u64 event : 8; + u64 counter : 1; + u64 mc_mask : 8; + u64 lb : 8; + u64 __unused : 39; + }; + u64 word; +} mc_config_attr_t; + +PMU_FORMAT_ATTR(event, "config:0-7"); +/* 1 bit reserved for software setting of used counter */ +PMU_FORMAT_ATTR(mc_mask, "config:9-16"); +PMU_FORMAT_ATTR(lb, "config:17-24"); + +static struct attribute *mc_format_attr[] = { + &format_attr_event.attr, + &format_attr_mc_mask.attr, + &format_attr_lb.attr, + NULL, +}; + +static struct e2k_uncore_valid_events mc_valid_events[] = { + { 0x0, 0x16 }, + { -1, -1} +}; + +static struct attribute_group mc_format_group = { + .name = "format", + .attrs = mc_format_attr, +}; + +static const struct attribute_group *mc_attr_group[] = { + &mc_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static u64 get_mc_str_cnt(struct e2k_uncore *uncore, struct hw_perf_event *hwc) +{ + mc_config_attr_t config = { .word = hwc->config }; + u64 val, ch, counter = config.counter; + DECLARE_BITMAP(mc_mask, 8); + int node = uncore->node; + + mc_mask[0] = (config.mc_mask ?: 0xff) & mc_enabled[node]; + + val = 0; + for_each_set_bit(ch, mc_mask, 8) { + e2k_mc_mon_ctrext_t mc_mon_ctrext; + u32 mc_mon_ctr; + e2k_mc_ch_t mc_ch; + + AW(mc_ch) = 0; + mc_ch.n = ch; + + sic_write_node_nbsr_reg(node, MC_CH, AW(mc_ch)); + + do { + AW(mc_mon_ctrext) = sic_read_node_nbsr_reg(node, MC_MON_CTRext); + if (counter) + mc_mon_ctr = sic_read_node_nbsr_reg(node, MC_MON_CTR1); + else + mc_mon_ctr = sic_read_node_nbsr_reg(node, MC_MON_CTR0); + } while (AW(mc_mon_ctrext) != sic_read_node_nbsr_reg(node, MC_MON_CTRext)); + + val += ((u64) mc_mon_ctrext.cnt[counter] << 32UL) | (u64) mc_mon_ctr; + } + + pr_debug("hw_event %px: get_cnt %lld\n", hwc, val); + + return val; +} + +static void set_mc_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + mc_config_attr_t config = { .word = hwc->config }; + u64 ch, counter = config.counter; + DECLARE_BITMAP(mc_mask, 8); + e2k_mc_mon_ctl_t mon_ctl; + int node = uncore->node; + + mc_mask[0] = (config.mc_mask ?: 0xff) & mc_enabled[node]; + + for_each_set_bit(ch, mc_mask, 8) { + e2k_mc_ch_t mc_ch; + + AW(mc_ch) = 0; + mc_ch.n = ch; + + sic_write_node_nbsr_reg(node, MC_CH, AW(mc_ch)); + + if (enable) { + /* Two-step process: + * 1) Set mon_ctl.ld to load initial counters values + * 2) Clear mon_ctl.frz to start counting */ + AW(mon_ctl) = sic_read_node_nbsr_reg(node, MC_MON_CTL); + if (counter) + mon_ctl.ld1 = 1; + else + mon_ctl.ld0 = 1; + sic_write_node_nbsr_reg(node, MC_MON_CTL, AW(mon_ctl)); + if (counter) + mon_ctl.ld1 = 0; + else + mon_ctl.ld0 = 0; + } else { + AW(mon_ctl) = sic_read_node_nbsr_reg(node, MC_MON_CTL); + } + if (counter) { + mon_ctl.frz1 = !enable; + mon_ctl.es1 = config.event; + mon_ctl.lb1 = config.lb; + } else { + mon_ctl.frz0 = !enable; + mon_ctl.es0 = config.event; + mon_ctl.lb0 = config.lb; + } + sic_write_node_nbsr_reg(node, MC_MON_CTL, AW(mon_ctl)); + + pr_debug("hw_event %px: set_cfg 0x%x, channel %lld\n", + hwc, AW(mon_ctl), ch); + } +} + +static void set_mc_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + mc_config_attr_t config = { .word = hwc->config }; + u64 write_val, ch, counter = config.counter; + int node = uncore->node; + DECLARE_BITMAP(mc_mask, 8); + e2k_mc_mon_ctrext_t mc_mon_ctrext; + u32 mc_mon_ctr; + + mc_mask[0] = (config.mc_mask ?: 0xff) & mc_enabled[node]; + + write_val = val; + for_each_set_bit(ch, mc_mask, 8) { + e2k_mc_ch_t mc_ch; + + AW(mc_ch) = 0; + mc_ch.n = ch; + + sic_write_node_nbsr_reg(node, MC_CH, AW(mc_ch)); + + AW(mc_mon_ctrext) = sic_read_node_nbsr_reg(node, MC_MON_CTRext); + mc_mon_ctrext.cnt[counter] = write_val >> 32; + mc_mon_ctr = write_val; + + sic_write_node_nbsr_reg(node, MC_MON_CTRext, AW(mc_mon_ctrext)); + if (counter) + sic_write_node_nbsr_reg(node, MC_MON_CTR1, mc_mon_ctr); + else + sic_write_node_nbsr_reg(node, MC_MON_CTR0, mc_mon_ctr); + + /* Set only one configured counter to passed value, + * it doesn't matter which one. */ + write_val = 0; + } + + pr_debug("hw_event %px: set_cnt %lld\n", hwc, val); +} + + +static struct e2k_uncore_reg_ops mc_reg_ops = { + .get_cnt = get_mc_str_cnt, + .set_cfg = set_mc_str_cfg, + .set_cnt = set_mc_str_cnt, +}; + +static u64 mc_get_event(struct hw_perf_event *hwc) +{ + mc_config_attr_t config = { .word = hwc->config }; + + return config.event; +} + +static int mc_validate_event(struct e2k_uncore *uncore, + struct hw_perf_event *hwc) +{ + mc_config_attr_t config = { .word = hwc->config }; + u64 ch, event = config.event; + DECLARE_BITMAP(mc_mask, 8); + int node = uncore->node; + + if (config.lb && (event < 0xe || event > 0x15)) { + pr_info_ratelimited("uncore_mc: logical bank filter is not available for event %llu\n", + event); + return -EINVAL; + } + + mc_mask[0] = (config.mc_mask ?: 0xff) & mc_enabled[node]; + + /* All is good, read MC_STATUS to clear overflow bits */ + for_each_set_bit(ch, mc_mask, 8) { + e2k_mc_status_t mc_status; + e2k_mc_ch_t mc_ch; + + AW(mc_ch) = 0; + mc_ch.n = ch; + + sic_write_node_nbsr_reg(node, MC_CH, AW(mc_ch)); + + AW(mc_status) = sic_read_node_nbsr_reg(node, MC_STATUS); + + if (mc_status.ddrint_err || mc_status.phy_interrupt || + mc_status.phyccm_par_err || mc_status.ecc_err || + mc_status.bridge_par_err || mc_status.dfi_err || + mc_status.dmem_par_err) { + pr_alert("WARNING: When reading MC_STATUS to clear \"mon{0,1}_of\" bits, some other bits have been cleared too: 0x%x\n", + AW(mc_status)); + WARN_ON(1); + } + } + + return 0; +} + +static int mc_add_event(struct e2k_uncore *uncore, struct perf_event *event) +{ + mc_config_attr_t config = { .word = event->hw.config }, config2; + int i, empty_slot = -1, used_counter = -1; + + /* validate against running counters */ + for (i = 0; i < uncore->num_counters; i++) { + struct perf_event *event2 = READ_ONCE(uncore->events[i]); + + if (!event2) { + empty_slot = i; + continue; + } + + AW(config2) = event2->hw.config; + used_counter = config2.counter; + } + + /* take the first available slot */ + if (empty_slot == -1) + return -ENOSPC; + + config.counter = !used_counter; + event->hw.config = AW(config); + + if (cmpxchg(&uncore->events[empty_slot], NULL, event) != NULL) + return -ENOSPC; + + event->hw.idx = empty_slot; + + return 0; +} + +int __init register_mc_pmus() +{ + int i, counters = 2; + + for_each_online_node(i) { + e2k_hmu_mic_t hmu_mic; + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_MC; + + uncore->pmu.event_init = e2k_uncore_event_init; + uncore->pmu.task_ctx_nr = perf_invalid_context; + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = mc_get_event; + uncore->add_event = mc_add_event; + uncore->validate_event = mc_validate_event; + + uncore->reg_ops = &mc_reg_ops; + uncore->num_counters = counters; + + uncore->node = i; + AW(hmu_mic) = sic_read_node_nbsr_reg(i, HMU_MIC); + mc_enabled[i] = hmu_mic.mcen; + + uncore->valid_events = mc_valid_events; + uncore->pmu.attr_groups = mc_attr_group; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, + "uncore_mc_%d", i); + + e2k_uncore_mc[i] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} diff --git a/arch/e2k/kernel/perf_event/uncore_prepic.c b/arch/e2k/kernel/perf_event/uncore_prepic.c new file mode 100644 index 000000000000..690e89df6240 --- /dev/null +++ b/arch/e2k/kernel/perf_event/uncore_prepic.c @@ -0,0 +1,231 @@ +#include +#include +#include +#include +#include +#include +#include + +static struct e2k_uncore *e2k_uncore_prepic[MAX_NUMNODES]; + +typedef union { + struct { + u64 event : 8; + u64 counter : 1; + u64 id : 16; + u64 __unused : 39; + }; + u64 word; +} prepic_config_attr_t; + +PMU_FORMAT_ATTR(event, "config:0-7"); +/* 1 bit reserved for software setting of used counter */ +PMU_FORMAT_ATTR(id, "config:9-24"); + +static struct attribute *prepic_mcr_format_attr[] = { + &format_attr_event.attr, + &format_attr_id.attr, + NULL, +}; + +static struct e2k_uncore_valid_events prepic_mcr_valid_events[] = { + { 0, 7 }, + { -1, -1} +}; + +static struct attribute_group prepic_mcr_format_group = { + .name = "format", + .attrs = prepic_mcr_format_attr, +}; + +static const struct attribute_group *prepic_mcr_attr_group[] = { + &prepic_mcr_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static u64 get_prepic_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc) +{ + u32 mar_lo = 0, mar_hi = 0; + prepic_config_attr_t config = { .word = hwc->config }; + int node = uncore->node; + u64 val; + + switch (config.counter) { + case 0: + do { + mar_hi = sic_read_node_nbsr_reg(node, PREPIC_MAR0_HI); + mar_lo = sic_read_node_nbsr_reg(node, PREPIC_MAR0_LO); + } while (mar_hi != sic_read_node_nbsr_reg(node, PREPIC_MAR0_HI)); + break; + case 1: + do { + mar_hi = sic_read_node_nbsr_reg(node, PREPIC_MAR1_HI); + mar_lo = sic_read_node_nbsr_reg(node, PREPIC_MAR1_LO); + } while (mar_hi != sic_read_node_nbsr_reg(node, PREPIC_MAR1_HI)); + break; + } + + val = ((u64) mar_hi << 32UL) | (u64) mar_lo; + + pr_debug("hw_event %px: get_cnt %lld\n", hwc, val); + + return val; +} + +static void modify_mid(int node, prepic_config_attr_t config) +{ + e2k_prepic_mid_t mid; + + AW(mid) = sic_read_node_nbsr_reg(node, PREPIC_MID); + if (config.counter) + mid.id1 = config.id; + else + mid.id0 = config.id; + sic_write_node_nbsr_reg(node, PREPIC_MID, AW(mid)); +} + +static void modify_mcr(int node, prepic_config_attr_t config, bool enable) +{ + e2k_prepic_mcr_t mcr; + + AW(mcr) = sic_read_node_nbsr_reg(node, PREPIC_MCR); + if (config.counter) { + mcr.vc1 = !!enable; + mcr.es1 = config.event; + } else { + mcr.vc0 = !!enable; + mcr.es0 = config.event; + } + sic_write_node_nbsr_reg(node, PREPIC_MCR, AW(mcr)); + + pr_debug("set_cfg 0x%x\n", AW(mcr)); +} + +static void set_prepic_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + prepic_config_attr_t config = { .word = hwc->config }; + int node = uncore->node; + + if (enable) { + modify_mid(node, config); + modify_mcr(node, config, enable); + } else { + modify_mcr(node, config, enable); + modify_mid(node, config); + } +} + +static void set_prepic_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + u32 mar_lo, mar_hi; + prepic_config_attr_t config = { .word = hwc->config }; + int node = uncore->node; + + mar_lo = val; + mar_hi = val >> 32; + + switch (config.counter) { + case 0: + sic_write_node_nbsr_reg(node, PREPIC_MAR0_LO, mar_lo); + sic_write_node_nbsr_reg(node, PREPIC_MAR0_HI, mar_hi); + break; + case 1: + sic_write_node_nbsr_reg(node, PREPIC_MAR1_LO, mar_lo); + sic_write_node_nbsr_reg(node, PREPIC_MAR1_HI, mar_hi); + break; + } + + pr_debug("hw_event %px: set_cnt %lld\n", hwc, val); +} + + +static struct e2k_uncore_reg_ops prepic_reg_ops = { + .get_cnt = get_prepic_str_cnt, + .set_cfg = set_prepic_str_cfg, + .set_cnt = set_prepic_str_cnt, +}; + +static u64 prepic_get_event(struct hw_perf_event *hwc) +{ + prepic_config_attr_t config = { .word = hwc->config }; + + return config.event; +} + +static int prepic_add_event(struct e2k_uncore *uncore, struct perf_event *event) +{ + prepic_config_attr_t config = { .word = event->hw.config }, config2; + int i, empty_slot = -1, used_counter = -1; + + /* validate against running counters */ + for (i = 0; i < uncore->num_counters; i++) { + struct perf_event *event2 = READ_ONCE(uncore->events[i]); + + if (!event2) { + empty_slot = i; + continue; + } + + AW(config2) = event2->hw.config; + used_counter = config2.counter; + } + + /* take the first available slot */ + if (empty_slot == -1) + return -ENOSPC; + + config.counter = !used_counter; + event->hw.config = AW(config); + + if (cmpxchg(&uncore->events[empty_slot], NULL, event) != NULL) + return -ENOSPC; + + event->hw.idx = empty_slot; + + return 0; +} + +int __init register_prepic_pmus() +{ + int i, counters = 2; + + for_each_online_node(i) { + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_PREPIC; + + uncore->pmu.event_init = e2k_uncore_event_init; + uncore->pmu.task_ctx_nr = perf_invalid_context; + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = prepic_get_event; + uncore->add_event = prepic_add_event; + + uncore->reg_ops = &prepic_reg_ops; + uncore->num_counters = counters; + + uncore->node = i; + + uncore->valid_events = prepic_mcr_valid_events; + uncore->pmu.attr_groups = prepic_mcr_attr_group; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, + "uncore_prepic_%d", i); + + e2k_uncore_prepic[i] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} diff --git a/arch/e2k/kernel/perf_event/uncore_sic.c b/arch/e2k/kernel/perf_event/uncore_sic.c new file mode 100644 index 000000000000..6be2a744e652 --- /dev/null +++ b/arch/e2k/kernel/perf_event/uncore_sic.c @@ -0,0 +1,619 @@ +#include +#include +#include +#include +#include +#include +#include + + +static struct e2k_uncore *e2k_uncore_ipcc[MAX_NUMNODES][SIC_IPCC_LINKS_COUNT]; +static struct e2k_uncore *e2k_uncore_iocc[MAX_NUMNODES][SIC_IO_LINKS_COUNT]; +static struct e2k_uncore *e2k_uncore_sic[MAX_NUMNODES]; + +typedef union { + struct { + u64 event : 9; + u64 l3_cpu : 11; + u64 l3_select_cpu : 1; + u64 __unused : 43; + }; + u64 word; +} sic_config_attr_t; + +/* event for MCR: 0xNMM, where N selects a counter + * and MM selects an event in the counter */ +PMU_FORMAT_ATTR(event, "config:0-8"); +PMU_FORMAT_ATTR(l3_cpu, "config:9-19"); +PMU_FORMAT_ATTR(l3_select_cpu, "config:20"); + +static struct attribute *e2k_mcm_wo_l3_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute *e2k_mcm_with_l3_format_attr[] = { + &format_attr_event.attr, + &format_attr_l3_cpu.attr, + &format_attr_l3_select_cpu.attr, + NULL, +}; + +static struct attribute *e2k_uncore_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + + +static u64 get_ipcc_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc) +{ + e2k_ipcc_str_struct_t reg; + int node = uncore->node; + int idx = uncore->idx_at_node; + + reg.E2K_IPCC_STR_reg = sic_get_ipcc_str(node, idx); + + /* see comment in set_ipcc_str_cnt() */ + return reg.E2K_IPCC_STR_ecnt + hwc->last_tag; +} + +static void set_ipcc_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + e2k_ipcc_str_struct_t reg; + int node = uncore->node; + int idx = uncore->idx_at_node; + sic_config_attr_t config = { .word = hwc->config }; + u64 event = config.event; + + reg.E2K_IPCC_STR_reg = sic_get_ipcc_str(node, idx); + + if (enable) + reg.E2K_IPCC_STR_ecf = event; + else + reg.E2K_IPCC_STR_ecf = 0; + + sic_set_ipcc_str(node, idx, reg.E2K_IPCC_STR_reg); +} + +static void set_ipcc_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + e2k_ipcc_str_struct_t reg; + int node = uncore->node; + int idx = uncore->idx_at_node; + + /* ipcc counter cannot be set, only cleared, so `val' + * is saved in memory instead of register */ + hwc->last_tag = val; + + reg.E2K_IPCC_STR_reg = sic_get_ipcc_str(node, idx); + + reg.E2K_IPCC_STR_eco = 1; + + sic_set_ipcc_str(node, idx, reg.E2K_IPCC_STR_reg); +} + +static struct e2k_uncore_reg_ops ipcc_reg_ops = { + .get_cnt = get_ipcc_str_cnt, + .set_cfg = set_ipcc_str_cfg, + .set_cnt = set_ipcc_str_cnt, +}; + +static u64 get_iocc_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc) +{ + e2k_io_str_struct_t reg; + int node = uncore->node; + int idx = uncore->idx_at_node; + + reg.E2K_IO_STR_reg = sic_get_io_str(node, idx); + + /* see comment in set_iocc_str_cnt() */ + return reg.E2K_IO_STR_rc + hwc->last_tag; +} + +#define E2K_IO_STR_EVENT_MASK 0xE0000000 +#define E2K_IO_STR_EVENT_SHIFT 29 +static void set_iocc_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + e2k_io_str_struct_t reg; + int node = uncore->node; + int idx = uncore->idx_at_node; + sic_config_attr_t config = { .word = hwc->config }; + u64 event = config.event; + + reg.E2K_IO_STR_reg = sic_get_ipcc_str(node, idx); + reg.E2K_IO_STR_reg &= ~E2K_IO_STR_EVENT_MASK; + if (enable) + reg.E2K_IO_STR_reg |= event << E2K_IO_STR_EVENT_SHIFT; + + sic_set_io_str(node, idx, reg.E2K_IO_STR_reg); +} + +static void set_iocc_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + e2k_io_str_struct_t reg; + int node = uncore->node; + int idx = uncore->idx_at_node; + + /* iocc counter cannot be set, only cleared, so `val' + * is saved in memory instead of register */ + hwc->last_tag = val; + + reg.E2K_IO_STR_reg = sic_get_io_str(node, idx); + reg.E2K_IO_STR_rcol = 1; + + sic_set_io_str(node, idx, reg.E2K_IO_STR_reg); +} + +static struct e2k_uncore_reg_ops iocc_reg_ops = { + .get_cnt = get_iocc_str_cnt, + .set_cfg = set_iocc_str_cfg, + .set_cnt = set_iocc_str_cnt, +}; + +enum { + MCM0 = 0, + MCM1, +}; + +static u64 get_sic_str_cnt(struct e2k_uncore *uncore, struct hw_perf_event *hwc) +{ + e2k_sic_mar_lo_t mar_lo = 0; + e2k_sic_mar_hi_t mar_hi = 0; + sic_config_attr_t config = { .word = hwc->config }; + u64 val, event = config.event; + int node = uncore->node; + + switch (event >> 8) { + case MCM0: + do { + mar_hi = sic_read_node_nbsr_reg(node, SIC_sic_mar0_hi); + mar_lo = sic_read_node_nbsr_reg(node, SIC_sic_mar0_lo); + } while (mar_hi != sic_read_node_nbsr_reg(node, SIC_sic_mar0_hi)); + break; + case MCM1: + do { + mar_lo = sic_read_node_nbsr_reg(node, SIC_sic_mar1_lo); + mar_hi = sic_read_node_nbsr_reg(node, SIC_sic_mar1_hi); + } while (mar_hi != sic_read_node_nbsr_reg(node, SIC_sic_mar1_hi)); + break; + } + + val = ((u64) mar_hi << 32UL) | (u64) mar_lo; + + pr_debug("hw_event %px: get_cnt %lld\n", hwc, val); + + return val; +} + +static void set_sic_str_cfg(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, bool enable) +{ + e2k_sic_mcr_struct_t mcr_reg; + int node = uncore->node; + sic_config_attr_t config = { .word = hwc->config }; + u64 event = config.event; + + mcr_reg.E2K_SIC_MCR_reg = sic_read_node_nbsr_reg(node, SIC_sic_mcr); + + if (E2K_UNCORE_HAS_SIC_L3) { + u64 cpu = config.l3_cpu; + + if (config.l3_select_cpu && cpu_present(cpu)) { + AS(mcr_reg).mcnmo = 0; + AS(mcr_reg).mcn = default_cpu_present_to_apicid(cpu); + } else { + AS(mcr_reg).mcnmo = 1; + } + } + + switch (event >> 8) { + case MCM0: + mcr_reg.E2K_SIC_MCR_v0 = !!enable; + mcr_reg.E2K_SIC_MCR_es0 = event & 0xff; + break; + case MCM1: + mcr_reg.E2K_SIC_MCR_v1 = !!enable; + mcr_reg.E2K_SIC_MCR_es1 = event & 0xff; + break; + } + + sic_write_node_nbsr_reg(node, SIC_sic_mcr, mcr_reg.E2K_SIC_MCR_reg); + + pr_debug("hw_event %px: set_cfg 0x%x\n", hwc, AW(mcr_reg)); +} + +static void set_sic_str_cnt(struct e2k_uncore *uncore, + struct hw_perf_event *hwc, u64 val) +{ + e2k_sic_mar_lo_t mar_lo; + e2k_sic_mar_hi_t mar_hi; + sic_config_attr_t config = { .word = hwc->config }; + u64 event = config.event; + int node = uncore->node; + + mar_lo = val; + mar_hi = val >> 32; + + switch (event >> 8) { + case MCM0: + sic_write_node_nbsr_reg(node, SIC_sic_mar0_lo, mar_lo); + sic_write_node_nbsr_reg(node, SIC_sic_mar0_hi, mar_hi); + break; + case MCM1: + sic_write_node_nbsr_reg(node, SIC_sic_mar1_lo, mar_lo); + sic_write_node_nbsr_reg(node, SIC_sic_mar1_hi, mar_hi); + break; + } + + pr_debug("hw_event %px: set_cnt %lld\n", hwc, val); +} + +static struct e2k_uncore_reg_ops sic_reg_ops = { + .get_cnt = get_sic_str_cnt, + .set_cfg = set_sic_str_cfg, + .set_cnt = set_sic_str_cnt, +}; + +static struct e2k_uncore_event_desc ipcc_events[] = { + E2K_UNCORE_EVENT_DESC(phl_errors, "event=0x1"), + E2K_UNCORE_EVENT_DESC(retry_ops, "event=0x2"), + { /*end: all zeroes */ }, +}; + +static struct e2k_uncore_valid_events ipcc_valid_events[] = { + { 1, 2 }, + { -1, -1} +}; + +static struct attribute *e2k_ipcc_events_attrs[] = { + &ipcc_events[0].attr.attr, + &ipcc_events[1].attr.attr, + NULL, +}; + +static const struct attribute_group e2k_ipcc_events_group = { + .name = "events", + .attrs = e2k_ipcc_events_attrs, +}; + +static const struct attribute_group e2k_ipcc_format_group = { + .name = "format", + .attrs = e2k_uncore_format_attr, +}; + +static const struct attribute_group *e2k_ipcc_attr_group[] = { + &e2k_ipcc_events_group, + &e2k_ipcc_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static struct e2k_uncore_event_desc iocc_events[] = { + E2K_UNCORE_EVENT_DESC(busy, "event=0x1"), + E2K_UNCORE_EVENT_DESC(crc_err, "event=0x2"), + E2K_UNCORE_EVENT_DESC(time_out, "event=0x4"), + E2K_UNCORE_EVENT_DESC(cmn_rc, "event=0x7"), + { /*end: all zeroes */ }, +}; + +static struct e2k_uncore_valid_events iocc_valid_events[] = { + { 1, 2 }, + { 4, 4 }, + { 7, 7 }, + { -1, -1} +}; + + +static struct attribute *e2k_iocc_events_attrs[] = { + &iocc_events[0].attr.attr, + &iocc_events[1].attr.attr, + &iocc_events[2].attr.attr, + NULL, +}; + +static const struct attribute_group e2k_iocc_events_group = { + .name = "events", + .attrs = e2k_iocc_events_attrs, +}; + +static const struct attribute_group e2k_iocc_format_group = { + .name = "format", + .attrs = e2k_uncore_format_attr, +}; + +static const struct attribute_group *e2k_iocc_attr_group[] = { + &e2k_iocc_events_group, + &e2k_iocc_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static struct e2k_uncore_event_desc sic_MCM_events[] = { + E2K_UNCORE_EVENT_DESC(mc_read, "event=0x0"), + E2K_UNCORE_EVENT_DESC(mc_write_local, "event=0x1"), + E2K_UNCORE_EVENT_DESC(mc_read_local_cores, "event=0x2"), + E2K_UNCORE_EVENT_DESC(mc_write, "event=0x100"), + E2K_UNCORE_EVENT_DESC(mc_read_local, "event=0x101"), + E2K_UNCORE_EVENT_DESC(mc_write_local_cores, "event=0x102"), + { /*end: all zeroes */ }, +}; + +static struct e2k_uncore_valid_events sic_MCM_e4c_valid_events[] = { + { 0x0, 0x8 }, + { 0x100, 0x106 }, + { -1, -1} +}; +static struct e2k_uncore_valid_events sic_MCM_e8c_valid_events[] = { + { 0x0, 0x5 }, + { 0x100, 0x105 }, + { 0x20, 0x3f }, + { 0x120, 0x13f }, + { -1, -1} +}; +static struct e2k_uncore_valid_events sic_MCM_e8c2_valid_events[] = { + { 0x0, 0x1b }, + { 0x100, 0x11b }, + { 0x20, 0x3f }, + { 0x120, 0x13f }, + { -1, -1} +}; + +static struct attribute *e2k_sic_MCM_events_attrs[] = { + &sic_MCM_events[0].attr.attr, + &sic_MCM_events[1].attr.attr, + &sic_MCM_events[2].attr.attr, + &sic_MCM_events[3].attr.attr, + &sic_MCM_events[4].attr.attr, + &sic_MCM_events[5].attr.attr, + NULL, +}; + +static const struct attribute_group e2k_sic_MCM_events_group = { + .name = "events", + .attrs = e2k_sic_MCM_events_attrs, +}; + +static struct attribute_group e2k_sic_MCM_format_group = { + .name = "format", +}; + +static const struct attribute_group *e2k_sic_MCM_attr_group[] = { + &e2k_sic_MCM_events_group, + &e2k_sic_MCM_format_group, + &e2k_cpumask_attr_group, + NULL, +}; + +static int is_l3_config(u64 config) +{ + if (!IS_MACHINE_E8C && !IS_MACHINE_E8C2) + return 0; + + return (config & 0xff) >= 0x20 && (config & 0xff) <= 0x3f; +} + +static u64 sic_get_event(struct hw_perf_event *hwc) +{ + sic_config_attr_t config = { .word = hwc->config }; + + return config.event; +} + +int __init register_ipcc_pmus() +{ + int node, cnt, counters = 1; + + for_each_online_node(node) + for (cnt = 0; cnt < SIC_IPCC_LINKS_COUNT; cnt++) { + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_IPCC; + + uncore->pmu.attr_groups = + (const struct attribute_group **) e2k_ipcc_attr_group; + uncore->pmu.task_ctx_nr = perf_invalid_context; + uncore->pmu.event_init = e2k_uncore_event_init; + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = sic_get_event; + + uncore->reg_ops = &ipcc_reg_ops; + uncore->num_counters = counters; + + uncore->node = node; + uncore->idx_at_node = cnt; + + uncore->valid_events = ipcc_valid_events; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, "ipcc_%d_%d", node, cnt); + + e2k_uncore_ipcc[node][cnt] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} + +int __init register_iocc_pmus() +{ + int node, cnt, counters = 1; + + for_each_online_node(node) + for (cnt = 0; cnt < SIC_IO_LINKS_COUNT; cnt++) { + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_IOCC; + + uncore->pmu.attr_groups = + (const struct attribute_group **) e2k_iocc_attr_group; + uncore->pmu.task_ctx_nr = perf_invalid_context, + uncore->pmu.event_init = e2k_uncore_event_init; + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = sic_get_event; + + uncore->reg_ops = &iocc_reg_ops; + uncore->num_counters = counters; + + uncore->node = node; + uncore->idx_at_node = cnt; + + uncore->valid_events = iocc_valid_events; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, "iocc_%d_%d", node, cnt); + + e2k_uncore_iocc[node][cnt] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} + + +static int sic_validate_event(struct e2k_uncore *uncore, + struct hw_perf_event *hwc) +{ + sic_config_attr_t config = { .word = hwc->config }; + + /* + * Check that proper cpu is selected + */ + if (E2K_UNCORE_HAS_SIC_L3 && config.l3_select_cpu) { + u64 cpu = config.l3_cpu, event = config.event; + + if (event < 32) { + pr_info_ratelimited("uncore_sic: L3 parameters specified for non-L3 event 0x%llx\n", + event); + return -EINVAL; + } + if (cpu >= nr_cpu_ids || !cpu_present(cpu)) { + pr_info_ratelimited("uncore_sic: cpu %lld does not exist\n", + cpu); + return -EINVAL; + } + if (cpu_to_node(cpu) != uncore->node) { + pr_info_ratelimited("uncore_sic: cpu %lld does not exist on node %d\n", + cpu, uncore->node); + return -EINVAL; + } + } + + return 0; +} + +static int sic_add_event(struct e2k_uncore *uncore, struct perf_event *event) +{ + sic_config_attr_t config = { .word = event->hw.config }; + u64 event_id = config.event; + int i; + + /* validate against running counters */ + for (i = 0; i < uncore->num_counters; i++) { + struct perf_event *event2 = READ_ONCE(uncore->events[i]); + sic_config_attr_t config2; + + if (!event2) + continue; + + AW(config2) = event2->hw.config; + + /* + * Check that there is no conflict with same counter in SIC + */ + if ((event_id >> 8) == (config2.event >> 8)) + return -ENOSPC; + + /* + * Check that there is no conflict with cpu selection in %MCR + */ + if (E2K_UNCORE_HAS_SIC_L3 && + is_l3_config(config.word) && is_l3_config(config2.word) && + (config.l3_select_cpu != config2.l3_select_cpu || + config.l3_cpu != config2.l3_cpu)) + return -ENOSPC; + } + + /* take the first available slot */ + for (i = 0; i < uncore->num_counters; i++) { + if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { + event->hw.idx = i; + return 0; + } + } + + return -ENOSPC; +} + +int __init register_sic_pmus() +{ + int i, counters = 2; + + for_each_online_node(i) { + struct e2k_uncore *uncore = kzalloc(sizeof(struct e2k_uncore) + + counters * sizeof(void *), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->type = E2K_UNCORE_SIC; + + uncore->pmu.event_init = e2k_uncore_event_init, + uncore->pmu.task_ctx_nr = perf_invalid_context, + uncore->pmu.add = e2k_uncore_add; + uncore->pmu.del = e2k_uncore_del; + uncore->pmu.start = e2k_uncore_start; + uncore->pmu.stop = e2k_uncore_stop; + uncore->pmu.read = e2k_uncore_read; + + uncore->get_event = sic_get_event; + uncore->add_event = sic_add_event; + uncore->validate_event = sic_validate_event; + + uncore->reg_ops = &sic_reg_ops; + uncore->num_counters = counters; + + uncore->node = i; + + if (E2K_UNCORE_HAS_SIC_L3) + e2k_sic_MCM_format_group.attrs = e2k_mcm_with_l3_format_attr; + else + e2k_sic_MCM_format_group.attrs = e2k_mcm_wo_l3_format_attr; + + if (IS_MACHINE_E2S) + uncore->valid_events = sic_MCM_e4c_valid_events; + else if (IS_MACHINE_E8C) + uncore->valid_events = sic_MCM_e8c_valid_events; + else if (IS_MACHINE_E8C2) + uncore->valid_events = sic_MCM_e8c2_valid_events; + else + BUG(); + + uncore->pmu.attr_groups = e2k_sic_MCM_attr_group; + + snprintf(uncore->name, UNCORE_PMU_NAME_LEN, "sic_%d_MCM", i); + + e2k_uncore_sic[i] = uncore; + perf_pmu_register(&uncore->pmu, uncore->name, -1); + } + + return 0; +} diff --git a/arch/e2k/kernel/proc_context.c b/arch/e2k/kernel/proc_context.c new file mode 100644 index 000000000000..f1c969a79286 --- /dev/null +++ b/arch/e2k/kernel/proc_context.c @@ -0,0 +1,1856 @@ +/* + * Copyright 2016 (C) MCST + * + * Makecontext/freecontext implementation for Elbrus architecture + * + * 1. Every context has a user hardware stack associated with it. + * Those stacks are organized in a hash table with "struct hw_context" + * as entries. + * + * 2. Contexts are a property of a process so the hash table is located + * in 'mm_struct' structure (mm->context.hw_contexts). + * + * 3. There can be multiple contexts in userspace (think "struct ucontext") + * associated with the same hardware stack (think "struct hw_context"), + * and we have to be able to find corresponding hw_context from ucontext. + * To do that we need some key which will uniquely identify "struct hw_context". + * + * Forked child must be able to just copy kernel contexts without modifying + * userspace. Thus using kernel pointers as a key is out of question. + * So we will use whatever ends up in "pt_regs->stacks.sbr" as a key with + * one caveat: we skip altstack entries entirely and find the first sbr. + * + * 4. Stacks that are in use have hw_context->state set to prevent them + * from being freed under our feet. This state is checked to make sure + * that setcontext/swapcontext is only possible to an unused stack. + * + * 5. When we switch to a context that is on current hardware stack, we + * do a longjmp to a saved location. The same limitations as for setjmp/longjmp + * apply. + * + * When we switch to a context that is on another hardware stack, we + * first save current context and switch all registers, then check if + * stack unwinding is necessary (and do a longjmp if it is). + * + * 6. When context created by makecontext() exits it should return + * to the kernel trampoline which will switch to kernel data stack + * and then switch to the context mentioned in uc_link or call do_exit(). + * + * 7. The original context from main() is not in the hash table, but we + * have to put it there on the first switch. + * + * 8. There are 2 ways defined in POSIX to save a context: getcontext() + * and swapcontext(). So on e2k user application calls into glibc which + * in turn makes a system call into kernel, and %cr registers contain + * information about glibc's frame and not the application's one. + * + * To work around this: + * - sys_swapcontext will save %cr registers from the previous user's frame; + * - fast_sys_getcontext does not save %cr registers, instead it is done in + * glibc (because there is no performant way to do so in a fast syscall). + * + * 9. Synchronization is based on Documentation/RCU/rcuref.txt, pattern C, + * with a twist: we need to mark current context "busy" so that no other + * thread will use it simultaneously with current thread. + +enum {STATE_BIAS = 1U, ALIVE_BIAS=1U<<16}; +enum { + HWC_STATE_READY = 0U, + HWC_STATE_BUSY = 1U +}; + +union hw_context_lifetime { + refcount_t refcount; + struct { + u16 state; + u16 alive; + }; +} lifetime; + +1. add() { + alloc_object + ... + // For main context also set HWC_STATE_BUSY + el->lifetime.alive = 1; + el->lifetime.state = (main) ? HWC_STATE_BUSY : HWC_STATE_READY; + spin_lock(&list_lock); + add_element + spin_unlock(&list_lock); +} + +2. search_and_reference() { + rcu_read_lock(); + el = search_for_element + for (;;) { + old_state = cmpxchg(&el->lifetime.state, + HWC_STATE_READY, new_state); + if (likely(old_state == HWC_STATE_READY)) + break; + if (old_state == HWC_STATE_BUSY) { + el = NULL; + break; + } + + while (READ_ONCE(el->lifetime.state) == HWC_STATE_COPYING) + cpu_relax(); + } + ... + rcu_read_unlock(); + this_hw_context = el +} + +3. This will be called only for BUSY contexts +release_referenced() { + el = this_hw_context + this_hw_context = NULL + if (refcount_sub_and_test(state << HWC_STATE_SHIFT, &el->lifetime.refcount)) + kfree(el); + ... +} + +4. delete() { + spin_lock(&list_lock); + ... + remove_element + spin_unlock(&list_lock); + ... + call_rcu(&el->rcu_head, &element_free); + ... +} + +5. element_free() { + if (refcount_sub_and_test(HWC_ALIVE_BIAS, &el->lifetime.refcount)) + kfree(el); +} + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PROTECTED_MODE +#include +#include +#include +#endif /* CONFIG_PROTECTED_MODE */ + +#define DEBUG_CTX_MODE 0 /* setcontext/swapcontext */ +#if DEBUG_CTX_MODE +#define DebugCTX(...) DebugPrint(DEBUG_CTX_MODE, ##__VA_ARGS__) +#else +#define DebugCTX(...) +#endif + + +static inline u64 context_current_key(void) +{ + struct pt_regs __user *u_regs = signal_pt_regs_last(); + + if (u_regs) { + u64 top; + + if (__get_user(top, &u_regs->stacks.top)) + return -EFAULT; + + return top; + } + + return current_thread_info()->u_stack.top; +} + +static u32 ctx_key_hashfn(const void *data, u32 len, u32 seed) +{ + return jhash2(data, FIELD_SIZEOF(struct hw_context, key) / sizeof(u32), + seed); +} + +static u32 ctx_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct hw_context *ctx = data; + + return jhash2((const u32 *) &ctx->key, + sizeof(ctx->key) / sizeof(u32), seed); +} + +static inline int ctx_obj_cmpfn(struct rhashtable_compare_arg *arg, + const void *obj) +{ + const void *key = arg->key; + const struct hw_context *ctx = obj; + + return memcmp(&ctx->key, key, sizeof(ctx->key)); +} + +static const struct rhashtable_params hash_params = { + .key_len = FIELD_SIZEOF(struct hw_context, key), + .key_offset = offsetof(struct hw_context, key), + .head_offset = offsetof(struct hw_context, hash_entry), + .hashfn = &ctx_key_hashfn, + .obj_hashfn = &ctx_obj_hashfn, + .obj_cmpfn = &ctx_obj_cmpfn, + .automatic_shrinking = true, +}; + + +notrace noinline __interrupt __section(".entry.text") +void makecontext_trampoline_continue() +{ + if (TASK_IS_PROTECTED(current)) + DISABLE_US_CLW(); + + /* + * Switch to kernel stacks. + */ + NATIVE_NV_WRITE_USBR_USD_REG_VALUE( + (u64) current->stack + KERNEL_C_STACK_SIZE, + AW(current_thread_info()->k_usd_hi), + AW(current_thread_info()->k_usd_lo)); + + /* + * Switch to %upsr for interrupts control + */ + DO_SAVE_UPSR_REG(current_thread_info()->upsr); + SET_KERNEL_UPSR_WITH_DISABLED_NMI(); + + E2K_JUMP(makecontext_trampoline_switched); +} + +static struct kmem_cache *hw_context_cache; + +static __init int hw_context_cache_init(void) +{ + hw_context_cache = KMEM_CACHE(hw_context, + SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT); + + return 0; +} +late_initcall(hw_context_cache_init); + +/** + * alloc_hw_context - allocate kernel stacks for a context + * @main_context - is this main thread's context? + * @u_stk_size - user data stack size + * + * For the main thread stacks are already allocated and we only + * have to save their parameters. + */ +static struct hw_context *alloc_hw_context(bool main_context, size_t u_stk_size, + unsigned long key) +{ + struct hw_context *ctx; + hw_stack_t *hw_stacks; + + ctx = kmem_cache_zalloc(hw_context_cache, GFP_KERNEL); + if (!ctx) + return NULL; + + ctx->key = key; + /* For main context it is referenced from the beginning + * (see search_and_reference() above) */ + ctx->lifetime.alive = 1; + ctx->lifetime.state = (main_context) ? HWC_STATE_BUSY : HWC_STATE_READY; + + INIT_LIST_HEAD(&ctx->ti.old_u_pcs_list); + INIT_LIST_HEAD(&ctx->ti.getsp_adj); + + if (main_context) { + /* + * Stacks have been allocated already + */ + ctx->ti.u_hw_stack = current_thread_info()->u_hw_stack; + + DebugCTX("ctx %lx allocated for main\n", ctx); + return ctx; + } + + hw_stacks = &ctx->ti.u_hw_stack; + define_user_hw_stacks_sizes(hw_stacks); + + if (alloc_user_hw_stacks(hw_stacks, get_hw_ps_user_size(hw_stacks), + get_hw_pcs_user_size(hw_stacks))) + goto free_context; + + DebugCTX("allocated ctx %lx with key=0x%lx and user stacks p: %px, pc: %px\n", + ctx, key, GET_PS_BASE(hw_stacks), GET_PCS_BASE(hw_stacks)); + + return ctx; + +free_context: + kmem_cache_free(hw_context_cache, ctx); + + DebugCTX("failed\n"); + return NULL; +} + +/** + * free_hw_context - free kernel stacks + * @ctx - context to free + * @ctx_in_use - free "struct hw_context" but keep the context itself + * @keep_hw_stacks - set if we want to keep hardware stacks mapped + */ +static void free_hw_context(struct hw_context *ctx, bool ctx_in_use, + bool keep_hw_stacks) +{ + if (!ctx_in_use) { + free_user_old_pc_stack_areas(&ctx->ti.old_u_pcs_list); + free_getsp_adj(&ctx->ti.getsp_adj); + + /* + * If the whole process is exiting we do not free + * address space - it is neither needed nor possible + * because by the time of destroy_context() call + * current->mm pointer has been set to NULL already. + */ + if (!keep_hw_stacks) { + free_signal_stack(&ctx->ti.signal_stack); + free_user_hw_stacks(&ctx->ti.u_hw_stack); + } + } + + DebugCTX("ctx %lx freed (%s stack areas, %s stack memory)\n", + ctx, ctx_in_use ? "without" : "with", + keep_hw_stacks ? "without" : "with"); + + kmem_cache_free(hw_context_cache, ctx); +} + +static inline int take_reference(struct hw_context *ctx, u16 new_state) +{ + /* search_and_reference() (see above) */ + u16 old_state; + int ret; + + for (;;) { + old_state = cmpxchg(&ctx->lifetime.state, + HWC_STATE_READY, new_state); + if (likely(old_state == HWC_STATE_READY)) { + ret = 0; + break; + } + if (old_state == HWC_STATE_BUSY) { + ret = -EBUSY; + break; + } + + /* Wait for fork() to finish copying this context */ + while (READ_ONCE(ctx->lifetime.state) == HWC_STATE_COPYING) + cpu_relax(); + } + + return ret; +} + +static inline int release_reference(struct hw_context *ctx, u16 ref_state) +{ + int ret; + + /* release_referenced() (see above) */ + ret = refcount_sub_and_test(ref_state << HWC_STATE_SHIFT, + &ctx->lifetime.refcount); + if (unlikely(ret)) + free_hw_context(ctx, false, false); + + return ret; +} + + +static void context_free_work_fn(struct work_struct *work) +{ + struct hw_context *ctx = container_of(work, typeof(*ctx), work); + struct mm_struct *mm = ctx->mm; + + use_mm(mm); + free_hw_context(ctx, false, false); + unuse_mm(mm); + + mmput(mm); +} + +static void context_free_rcu(struct rcu_head *head) +{ + struct hw_context *ctx = container_of(head, typeof(*ctx), rcu_head); + + if (refcount_sub_and_test(HWC_ALIVE_BIAS, &ctx->lifetime.refcount)) { + INIT_WORK(&ctx->work, context_free_work_fn); + /* Use system_long_wq to avoid slowing system down too much */ + queue_work(system_long_wq, &ctx->work); + } else { + mmput_async(ctx->mm); + } +} + +static void context_free(struct hw_context *ctx) +{ + /* + * element_free() (see above). + * + * For performance reasons (we do not want to wait for a grace period + * in a user thread) the context is freed as follows: + * 1) rcu kthread waits for the grace period to end and wakes a kworker + * (because it's not possible to unmap user hardware stacks from + * softirq context which is used by call_rcu()) + * 2) kworker thread will unmap user hardware stacks and free kernel + * memory. + * 3) In the unlikely case that the context is still in use by some + * other thread it will be freed by that thread instead. + */ + mmget(current->mm); + ctx->mm = current->mm; + call_rcu(&ctx->rcu_head, &context_free_rcu); +} + +void hw_context_deactivate_mm(struct task_struct *dead_task) +{ + struct thread_info *ti = task_thread_info(dead_task); + mm_context_t *mm_context = &dead_task->mm->context; + struct hw_context *ctx; + + if (!ti->this_hw_context) + return; + + /* + * After thread exits, remove corresponding context from the hash table. + */ + ctx = ti->this_hw_context; + ti->this_hw_context = NULL; + + if (WARN_ON_ONCE(ctx->lifetime.state != HWC_STATE_BUSY)) + return; + + if (!release_reference(ctx, HWC_STATE_BUSY)) { + /* delete() (see above) */ + + /* freecontext() hasn't been called yet, so remove + * the context ourselves. Be careful of concurrent + * freecontext() execution. */ + if (rhashtable_remove_fast(&mm_context->hw_contexts, + &ctx->hash_entry, hash_params)) { + pr_info_ratelimited("%s [%d]: context in use was found to be freed by freecontext_e2k()\n", + current->comm, current->pid); + return; + } + + context_free(ctx); + } +} + +static int copy_context(struct task_struct *p, + struct hw_context *dst, const struct hw_context *src) +{ + int ret; + + memcpy(dst, src, sizeof(*dst)); + + INIT_LIST_HEAD(&dst->ti.old_u_pcs_list); + INIT_LIST_HEAD(&dst->ti.getsp_adj); + dst->lifetime.alive = 1; + if (src == current_thread_info()->this_hw_context) + dst->lifetime.state = HWC_STATE_BUSY; + else + dst->lifetime.state = HWC_STATE_READY; + + ret = __copy_old_u_pcs_list(&dst->ti.old_u_pcs_list, + &src->ti.old_u_pcs_list); + if (ret) + return ret; + + ret = __copy_getsp_adj(&dst->ti.getsp_adj, &src->ti.getsp_adj); + if (ret) + return ret; + + DebugCTX("context 0x%lx copied to 0x%lx, key 0x%llx\n", + src, dst, src->key); + return 0; +} + +static void hw_context_destroy_one(void *ptr, void *unused) +{ + DebugCTX("ctx %lx free on exit\n", ptr); + + /* + * No 'mm' at this point so don't try to free user stacks + */ + free_hw_context((struct hw_context *) ptr, false, true); +} + +/** + * hw_contexts_init - called on process creation to prepare contexts hash table + * @mm - mm that is being created + */ +int hw_contexts_init(struct task_struct *p, mm_context_t *mm_context, + bool is_fork) +{ + struct hw_context *ctx; + struct rhashtable_iter iter; + int ret; + + ret = rhashtable_init(&mm_context->hw_contexts, &hash_params); + if (ret || !is_fork) + return ret; + + /* + * Copy all contexts on fork + */ + rhashtable_walk_enter(¤t->mm->context.hw_contexts, &iter); + + do { + rhashtable_walk_start(&iter); + + while ((ctx = rhashtable_walk_next(&iter)) && !IS_ERR(ctx)) { + struct hw_context *new; + + ret = take_reference(ctx, HWC_STATE_COPYING); + if (ret) + continue; + + rhashtable_walk_stop(&iter); + + new = kmem_cache_alloc(hw_context_cache, GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto error_drop_reference; + } + + ret = copy_context(p, new, ctx); + if (ret) { + free_hw_context(new, false, true); + goto error_drop_reference; + } + + ret = rhashtable_lookup_insert_key( + &mm_context->hw_contexts, &new->key, + &new->hash_entry, hash_params); + if (ret) { + free_hw_context(new, false, true); + if (ret != -EEXIST) + goto error_drop_reference; + } + + (void) release_reference(ctx, HWC_STATE_COPYING); + + rhashtable_walk_start(&iter); + } + + rhashtable_walk_stop(&iter); + } while (cond_resched(), ctx == ERR_PTR(-EAGAIN)); + + rhashtable_walk_exit(&iter); + + /* + * Copy current context if it exists + */ + if (current_thread_info()->this_hw_context) { + struct hw_context *new = kmem_cache_alloc(hw_context_cache, + GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto error; + } + + ret = copy_context(p, new, + current_thread_info()->this_hw_context); + if (ret) { + free_hw_context(new, false, true); + goto error; + } + + ret = rhashtable_lookup_insert_key( + &mm_context->hw_contexts, &new->key, + &new->hash_entry, hash_params); + if (ret) { + free_hw_context(new, false, true); + if (ret != -EEXIST) + goto error; + } + + task_thread_info(p)->this_hw_context = new; + } + + return 0; + + +error_drop_reference: + (void) release_reference(ctx, HWC_STATE_COPYING); + + rhashtable_walk_exit(&iter); + +error: + rhashtable_free_and_destroy(&mm_context->hw_contexts, + hw_context_destroy_one, NULL); + + DebugCTX("context copying on fork failed with %d\n", ret); + return ret; +} + +/** + * hw_contexts_destroy - called on process exit to free all contexts + * @mm - mm that is being freed + */ +void hw_contexts_destroy(mm_context_t *mm_context) +{ + /* By this point there is no one left to write to hash table */ + rhashtable_free_and_destroy(&mm_context->hw_contexts, + hw_context_destroy_one, NULL); +} + +static int set_user_ap(void *ptr, unsigned long addr, size_t len) +{ + unsigned long ts_flag; + e2k_ptr_t qptr; + + qptr = MAKE_AP(addr, len); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + TRY_USR_PFAULT { + E2K_SET_TAGS_AND_STORE_QUADRO(qptr, ptr); + } CATCH_USR_PFAULT { + clear_ts_flag(ts_flag); + return -EFAULT; + } END_USR_PFAULT + clear_ts_flag(ts_flag); + + return 0; +} + +__always_inline /* For performance since some arguments are constants */ +static long do_swapcontext(void __user *oucp, const void __user *ucp, + bool save_prev_ctx, int format); + +long hw_context_lookup_pcsp_and_switch(e2k_pcsp_lo_t jmp_pcsp_lo, + e2k_usd_lo_t jmp_usd_lo) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct rhashtable_iter iter; + void __user *ucp = NULL; + struct hw_context *ctx; + u64 stack_top; + unsigned long stack_ptr; + + /* + * Fastpath: try to guess key from jmp_usd_lo + */ + down_read(&mm->mmap_sem); + stack_ptr = AS(jmp_usd_lo).base; + vma = find_vma_intersection(current->mm, stack_ptr, stack_ptr + 1); + stack_top = (vma) ? vma->vm_end : 0; + up_read(&mm->mmap_sem); + + if (stack_top) { + rcu_read_lock(); + + ctx = rhashtable_lookup(&mm->context.hw_contexts, + &stack_top, hash_params); + if (ctx) { + unsigned long base, top, delta; + + base = (unsigned long) ctx->ti.u_hw_stack.pcs.base; + top = base + ctx->ti.u_hw_stack.pcs.size; + if (__find_in_old_u_pcs_list(AS(jmp_pcsp_lo).base, + &delta, base, top, + &ctx->ti.old_u_pcs_list)) { + ctx = NULL; + } else { + ucp = ctx->ucp; + } + } + + rcu_read_unlock(); + + if (ctx) + goto found; + } + + /* + * Slowpath: duly search through all contexts if guessing didn't work + */ + + rhashtable_walk_enter(&mm->context.hw_contexts, &iter); + + do { + rhashtable_walk_start(&iter); + + while ((ctx = rhashtable_walk_next(&iter)) && + !IS_ERR(ctx)) { + unsigned long base, top, delta; + + base = (unsigned long) ctx->ti.u_hw_stack.pcs.base; + top = base + ctx->ti.u_hw_stack.pcs.size; + if (!__find_in_old_u_pcs_list(AS(jmp_pcsp_lo).base, + &delta, base, top, + &ctx->ti.old_u_pcs_list)) { + ucp = ctx->ucp; + break; + } + } + + rhashtable_walk_stop(&iter); + } while (cond_resched(), ctx == ERR_PTR(-EAGAIN)); + + rhashtable_walk_exit(&iter); + + if (!ctx) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): corrupted setjmp_buf\n"); + force_sig(SIGKILL); + return -EINVAL; + } else if (WARN_ON_ONCE(IS_ERR(ctx))) + return -EINVAL; + +found: + if (!ucp) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): trying to longjmp to an unsaved context (it was left through longjmp()/setcontext() instead of swapcontext())\n"); + force_sig(SIGKILL); + return -EINVAL; + } + + DebugCTX("found ctx 0x%lx with key 0x%llx for pcsp 0x%llx\n", + ctx, ctx->key, AW(jmp_pcsp_lo)); + + return do_swapcontext(NULL, ucp, false, ctx->ptr_format); +} + +struct longjmp_regs { + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; +}; + + +/** + * makecontext_prepare_user_stacks - set up all stacks for a user function execution + * @ctx: hardware context + * @func: user function + * @args_size: size of all arguments + * @args: pointer to arguments + * @u_stk_base: user data stack base + * @u_stk_size: user data stack size + * @protected: protected mode execution + * + * The first frame in the context is set to point to a kernel function + * which will handle return from @func, and the second frame points to + * @func. + */ +static int makecontext_prepare_user_stacks(struct longjmp_regs *user_regs, + struct hw_context *ctx, void (*func)(void), + u64 args_size, void __user *args, + void *u_stk_base, size_t u_stk_size, bool protected) +{ + struct pt_regs *regs = current_pt_regs(); + e2k_stacks_t stacks; + e2k_mem_crs_t __user *cs_frames; + e2k_mem_crs_t crs_trampoline, crs_user; + void __user *ps_frame; + u64 args_registers_size, args_stack_size, func_frame_size; + unsigned long ts_flag, func_frame_ptr; + int ret, i; + + if (ALIGN(args_size, 16) + (protected ? 16 : 0) > u_stk_size) + return -EINVAL; + + AW(stacks.pcsp_lo) = 0; + AS(stacks.pcsp_lo).base = (u64) GET_PCS_BASE(&ctx->ti.u_hw_stack); + AS(stacks.pcsp_lo).rw = 3; + AS(stacks.pcsp_hi).size = get_hw_pcs_user_size(&ctx->ti.u_hw_stack); + + AW(stacks.psp_lo) = 0; + AS(stacks.psp_lo).base = (u64) GET_PS_BASE(&ctx->ti.u_hw_stack); + AS(stacks.psp_lo).rw = 3; + AS(stacks.psp_hi).size = get_hw_ps_user_size(&ctx->ti.u_hw_stack); + + /* + * Leave space for trampoline's frame so that there is space + * for the user function to return to _and_ for one empty + * frame which is needed for return trick to work in + * user_hw_stacks_restore(). + */ + AS(stacks.pcsp_hi).ind = 3 * SZ_OF_CR; + + /* + * And this is space for user function and makecontext_trampoline() + */ + AS(stacks.psp_hi).ind = (protected ? 16 : 8) * EXT_4_NR_SZ; + + ps_frame = GET_PS_BASE(&ctx->ti.u_hw_stack) + (protected ? 8 : 4) * EXT_4_NR_SZ; + + /* + * Calculate user function frame's parameters. + */ + if (protected) { + args_registers_size = min(args_size, (u64) 128 - 16); + /* Data stack must be 16-bytes aligned. */ + func_frame_size = ALIGN(args_size, 16) + 16; + } else { + args_registers_size = min(args_size, 64ULL); + /* Data stack must be 16-bytes aligned. */ + func_frame_size = ALIGN(args_size, 16); + } + args_stack_size = args_size - args_registers_size; + func_frame_ptr = (unsigned long) u_stk_base + u_stk_size + - func_frame_size; + if (!access_ok(func_frame_ptr, func_frame_size)) + return -EFAULT; + DebugCTX("arguments: base 0x%lx, size %lld (regs %lld + stack %lld)\n", + args, args_size, args_registers_size, args_stack_size); + + stacks.top = (u64) u_stk_base + u_stk_size; + AS(stacks.usd_hi).ind = 0; + AS(stacks.usd_hi).size = u_stk_size - func_frame_size; + if (protected) { + e2k_pusd_lo_t pusd_lo; + + /* Check that the stack does not cross 4Gb boundary */ + if (((u64) u_stk_base & ~0xffffffffULL) != + (stacks.top & ~0xffffffffULL)) { + DebugCTX("stack crosses 4Gb boundary\n"); + return -EINVAL; + } + + /* + * Set PSL to 2 (we must allow for two returns: + * first to user function and second to the trampoline) + */ + AW(pusd_lo) = 0; + AS(pusd_lo).base = func_frame_ptr; + AS(pusd_lo).rw = 3; + AS(pusd_lo).psl = 2; + + /* + * Set 'protected' bit + */ + AS(pusd_lo).p = 1; + + AW(stacks.usd_lo) = AW(pusd_lo); + + /* + * Put descriptor of user function frame in %qr0. + */ + if (set_user_ap(ps_frame, func_frame_ptr, args_size + 16)) + return -EFAULT; + ps_frame += EXT_4_NR_SZ; + } else { + AW(stacks.usd_lo) = 0; + AS(stacks.usd_lo).base = func_frame_ptr; + AS(stacks.usd_lo).rw = 3; + } + + /* + * Put arguments into registers and user data stack + */ + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + TRY_USR_PFAULT { + for (i = 0; i < args_registers_size / 16; i++) { + u64 reg1_offset; +#if DEBUG_CTX_MODE + u64 val_lo, val_hi; + u8 tag_lo, tag_hi; + load_qvalue_and_tagq((e2k_addr_t)(args + 16 * i), + &val_lo, &val_hi, &tag_lo, &tag_hi); + DebugCTX("register arguments: 0x%llx 0x%llx\n", + val_lo, val_hi); +#endif + + reg1_offset = (machine.native_iset_ver < E2K_ISET_V5) ? + 8 : 16; + + if (protected) { + /* We have to check for SAP */ + u64 val_lo, val_hi; + u8 tag_lo, tag_hi; + e2k_sap_lo_t sap; + e2k_ap_lo_t ap; + + load_qvalue_and_tagq( + (e2k_addr_t)(args + 16 * i), + &val_lo, &val_hi, &tag_lo, &tag_hi); + if (((tag_hi << 4) | tag_lo) == ETAGAPQ && + ((val_lo & AP_ITAG_MASK) >> + AP_ITAG_SHIFT) == SAP_ITAG) { + /* SAP was passed, convert to AP + * for the new context since it has + * separate data stack. */ + AW(sap) = val_lo; + AW(ap) = 0; + AS(ap).itag = AP_ITAG; + AS(ap).rw = AS(sap).rw; + AS(ap).base = AS(sap).base + + ((u64)current->stack & + 0xFFFF00000000UL); + val_lo = AW(ap); + DebugCTX("\tfixed SAP: 0x%llx 0x%llx\n", + val_lo, val_hi); + } + /* FIXME: should be paravirtualized */ + __NATIVE_STORE_TAGGED_QWORD( + ps_frame + EXT_4_NR_SZ * i, + val_lo, val_hi, tag_lo, tag_hi, + reg1_offset); + } else { + /* FIXME: should be paravirtualized */ + NATIVE_MOVE_TAGGED_DWORD(args + 16 * i, + ps_frame + EXT_4_NR_SZ * i); + NATIVE_MOVE_TAGGED_DWORD(args + 16 * i + 8, + ps_frame + EXT_4_NR_SZ * i + + reg1_offset); + } + } + + if (2 * i < args_registers_size / 8) { +#if DEBUG_CTX_MODE + u64 val; + u8 tag; + + /* FIXME: should be paravirtualized */ + NATIVE_LOAD_VAL_AND_TAGD(args + 16 * i, val, tag); + DebugCTX("register arguments: 0x%llx\n", val); +#endif + /* FIXME: should be paravirtualized */ + NATIVE_MOVE_TAGGED_DWORD(args + 16 * i, + ps_frame + EXT_4_NR_SZ * i); + } + +#if DEBUG_CTX_MODE + for (i = 0; i + 1 < args_stack_size / 8; i += 2) { + u64 val_lo, val_hi; + u8 tag_lo, tag_hi; + load_qvalue_and_tagq((e2k_addr_t) + (args + args_registers_size + 8 * i), + &val_lo, &val_hi, &tag_lo, &tag_hi); + DebugCTX("stack arguments: 0x%llx 0x%llx\n", + val_lo, val_hi); + } +#endif + } CATCH_USR_PFAULT { + clear_ts_flag(ts_flag); + return -EFAULT; + } END_USR_PFAULT + clear_ts_flag(ts_flag); + + if (args_stack_size) { + DebugCTX("Copying stack arguments to 0x%lx\n", + (void *) func_frame_ptr + 64); + if (copy_in_user_with_tags( + (void *) func_frame_ptr + (protected ? 128 : 64), + args + args_registers_size, args_stack_size)) + return -EFAULT; + } + + /* + * Initialize thread_info + */ + ctx->ti.u_stack.bottom = (u64) u_stk_base; + ctx->ti.u_stack.size = u_stk_size; + ctx->ti.u_stack.top = (u64) u_stk_base + u_stk_size; +#ifdef CONFIG_PROTECTED_MODE + ctx->ti.g_list = 0; + ctx->ti.multithread_address = 0; + ctx->ti.lock = NULL; +#endif + ctx->ti.signal_stack.base = 0; + ctx->ti.signal_stack.size = 0; + ctx->ti.signal_stack.used = 0; + + /* + * Set chain stack for the trampoline and user function + */ + cs_frames = (e2k_mem_crs_t __user *) GET_PCS_BASE(&ctx->ti.u_hw_stack); + + /* makecontext_trampoline()->do_longjmp() expects parameter area + * size (cr1_lo.wbs/cr1_lo.wpsz) according to the C ABI: 4 or 8. */ + ret = chain_stack_frame_init(&crs_trampoline, protected ? + makecontext_trampoline_protected : makecontext_trampoline, + KERNEL_C_STACK_SIZE, E2K_KERNEL_PSR_DISABLED, + protected ? 8 : 4, protected ? 8 : 4, false); + ret = ret ?: chain_stack_frame_init(&crs_user, func, AS(stacks.usd_hi).size, + E2K_USER_INITIAL_PSR, protected ? 8 : 4, protected ? 8 : 4, true); + if (ret) + return ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __clear_user(&cs_frames[1], SZ_OF_CR); + ret = ret ?: __copy_to_user(&cs_frames[2], &crs_trampoline, SZ_OF_CR); + ret = ret ?: __copy_to_user(&cs_frames[3], &crs_user, SZ_OF_CR); + clear_ts_flag(ts_flag); + if (ret) + return -EFAULT; + + ctx->regs.crs = crs_user; + + /* do_swapcontext() loads values from ctx->prev_ctx, + * this way it's faster. */ + ctx->prev_crs = crs_trampoline; + + /* + * Prepare new pt_regs + */ + ctx->regs.wd = regs->wd; + ctx->regs.kernel_entry = regs->kernel_entry; + + ctx->regs.stacks = stacks; + + /* + * Save parameters for jumping through sys_setcontext()->do_longjmp() + */ + user_regs->pcsp_lo = stacks.pcsp_lo; + user_regs->pcsp_hi = stacks.pcsp_hi; + user_regs->cr0_hi = ctx->prev_crs.cr0_hi; + user_regs->cr1_lo = ctx->prev_crs.cr1_lo; + user_regs->cr1_hi = ctx->prev_crs.cr1_hi; + + return 0; +} + +static inline struct hw_context *hw_context_lookup_and_get(u64 key, + mm_context_t *mm_context) +{ + struct hw_context *ctx; + + rcu_read_lock(); + + ctx = rhashtable_lookup(&mm_context->hw_contexts, &key, hash_params); + if (unlikely(!ctx)) { + ctx = ERR_PTR(-ESRCH); + } else { + int ret = take_reference(ctx, HWC_STATE_BUSY); + if (ret) + ctx = ERR_PTR(ret); + } + + rcu_read_unlock(); + + return ctx; +} + +static void makecontext_prepare_ucp_pointer(struct hw_context *ctx, + void __user *ucp, int format) +{ + struct ucontext_32 __user *ucp_32 = ucp; + struct ucontext __user *ucp_64 = ucp; + struct ucontext_prot __user *ucp_128 = ucp; + + ctx->ucp = ucp; + ctx->ptr_format = format; + if (format == CTX_32_BIT) + ctx->p_uc_link = &ucp_32->uc_link; + else if (format == CTX_64_BIT) + ctx->p_uc_link = &ucp_64->uc_link; + else + ctx->p_uc_link = &ucp_128->uc_link; +} + +/** + * makecontext_prepare_ucp_contents - initialize user structure + * @ucp - structure to initialize + * @format, @sigsetsize, @key, @user_regs - values to initialize with + */ +static int makecontext_prepare_ucp_contents(void __user *ucp, int format, + int sigsetsize, u64 key, const struct longjmp_regs *user_regs) +{ + struct ucontext_32 __user *ucp_32 = ucp; + struct ucontext __user *ucp_64 = ucp; + struct ucontext_prot __user *ucp_128 = ucp; + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + int ret; + + /* + * Initialize user structure + */ + GET_FPU_DEFAULTS(fpsr, fpcr, pfpfr); + if (format == CTX_32_BIT) { + ret = __clear_user(&ucp_32->uc_sigmask, sigsetsize); + ret |= __put_user(key, &ucp_32->uc_mcontext.sbr); + ret |= __put_user(AW(user_regs->cr0_hi), &ucp_32->uc_mcontext.cr0_hi); + ret |= __put_user(AW(user_regs->cr1_lo), &ucp_32->uc_mcontext.cr1_lo); + ret |= __put_user(AW(user_regs->cr1_hi), &ucp_32->uc_mcontext.cr1_hi); + ret |= __put_user(AW(user_regs->pcsp_lo), &ucp_32->uc_mcontext.pcsp_lo); + /* Nasty hack: this is a new context so there is no point in + * calling do_swapcontext() -> do_longjmp(). So we manually + * subtract SZ_OF_CR here to avoid the call; it would've been + * done otherwise because the newly created context does not + * have glibc's swapcontext() function in it and the check + * before the call would return false positive. */ + ret |= __put_user(AW(user_regs->pcsp_hi) - SZ_OF_CR, &ucp_32->uc_mcontext.pcsp_hi); + ret |= __put_user(AW(fpcr), &ucp_32->uc_extra.fpcr); + ret |= __put_user(AW(fpsr), &ucp_32->uc_extra.fpsr); + ret |= __put_user(AW(pfpfr), &ucp_32->uc_extra.pfpfr); + } else if (format == CTX_64_BIT) { + ret = __clear_user(&ucp_64->uc_sigmask, sigsetsize); + ret |= __put_user(key, &ucp_64->uc_mcontext.sbr); + ret |= __put_user(AW(user_regs->cr0_hi), &ucp_64->uc_mcontext.cr0_hi); + ret |= __put_user(AW(user_regs->cr1_lo), &ucp_64->uc_mcontext.cr1_lo); + ret |= __put_user(AW(user_regs->cr1_hi), &ucp_64->uc_mcontext.cr1_hi); + ret |= __put_user(AW(user_regs->pcsp_lo), &ucp_64->uc_mcontext.pcsp_lo); + /* See comment about SZ_OF_CR for 32-bit mode */ + ret |= __put_user(AW(user_regs->pcsp_hi) - SZ_OF_CR, &ucp_64->uc_mcontext.pcsp_hi); + ret |= __put_user(AW(fpcr), &ucp_64->uc_extra.fpcr); + ret |= __put_user(AW(fpsr), &ucp_64->uc_extra.fpsr); + ret |= __put_user(AW(pfpfr), &ucp_64->uc_extra.pfpfr); + } else { /* CTX_128_BIT */ + ret = __clear_user(&ucp_128->uc_sigmask, sigsetsize); + ret |= __put_user(key, &ucp_128->uc_mcontext.sbr); + ret |= __put_user(AW(user_regs->cr0_hi), &ucp_128->uc_mcontext.cr0_hi); + ret |= __put_user(AW(user_regs->cr1_lo), &ucp_128->uc_mcontext.cr1_lo); + ret |= __put_user(AW(user_regs->cr1_hi), &ucp_128->uc_mcontext.cr1_hi); + ret |= __put_user(AW(user_regs->pcsp_lo), &ucp_128->uc_mcontext.pcsp_lo); + /* See comment about SZ_OF_CR for 32-bit mode */ + ret |= __put_user(AW(user_regs->pcsp_hi) - SZ_OF_CR, &ucp_128->uc_mcontext.pcsp_hi); + ret |= __put_user(AW(fpcr), &ucp_128->uc_extra.fpcr); + ret |= __put_user(AW(fpsr), &ucp_128->uc_extra.fpsr); + ret |= __put_user(AW(pfpfr), &ucp_128->uc_extra.pfpfr); + } + + return ret ? -EFAULT : 0; +} + +static int makecontext_prepare_ctx_and_ucp(struct hw_context *ctx, + void __user *ucp, int sigsetsize, int format, + void (*func)(void), u64 args_size, void __user *args, + void *u_stk_base, size_t u_stk_size, u64 key) +{ + struct longjmp_regs user_regs; + int ret; + + makecontext_prepare_ucp_pointer(ctx, ucp, format); + + ret = makecontext_prepare_user_stacks(&user_regs, ctx, func, args_size, args, + u_stk_base, u_stk_size, format == CTX_128_BIT); + if (ret) + return ret; + + ret = makecontext_prepare_ucp_contents(ucp, format, sigsetsize, key, &user_regs); + if (ret) + return ret; + + if (format == CTX_128_BIT) { + /* + * Fix global pointers before making the context available + */ + mark_all_global_sp(current_thread_info()->pt_regs, current->pid); + } + + return 0; +} + + +static long do_makecontext(void __user *ucp, void (*func)(void), + u64 args_size, void __user *args, int sigsetsize, int format) +{ + struct ucontext_32 __user *ucp_32 = ucp; + struct ucontext __user *ucp_64 = ucp; + struct ucontext_prot __user *ucp_128 = ucp; + void *u_stk_base; + size_t u_stk_size; + struct hw_context *ctx, *same_key_ctx; + mm_context_t *mm_context = ¤t->mm->context; + u64 key; + int ret; + + DebugCTX("ucp %lx started\n", ucp); + + ret = -EFAULT; + if (format == CTX_32_BIT && + access_ok(ucp, sizeof(struct ucontext_32))) { + u32 __u_stk_base; + + ret = __get_user(__u_stk_base, &ucp_32->uc_stack.ss_sp); + ret = (ret) ?: __get_user(u_stk_size, &ucp_32->uc_stack.ss_size); + if (ret) + return ret; + + u_stk_base = (void *) (unsigned long) __u_stk_base; + } else if (format == CTX_64_BIT && + access_ok(ucp, sizeof(struct ucontext))) { + ret = __get_user(u_stk_base, &ucp_64->uc_stack.ss_sp); + ret = (ret) ?: __get_user(u_stk_size, &ucp_64->uc_stack.ss_size); + if (ret) + return ret; + } else if (access_ok(ucp, sizeof(struct ucontext_prot))) { + /* CTX_128_BIT */ + e2k_ptr_t stack_ptr; + + ret = __copy_from_user(&stack_ptr, &ucp_128->uc_stack.ss_sp, 16); + ret = (ret) ?: __get_user(u_stk_size, &ucp_128->uc_stack.ss_size); + if (ret) + return ret; + + if (AS(stack_ptr).size < u_stk_size) + return -EINVAL; + + u_stk_base = (void *) E2K_PTR_PTR(stack_ptr, GET_SBR_HI()); + } + if (ret) + return ret; + + u_stk_size -= PTR_ALIGN(u_stk_base, E2K_ALIGN_STACK) - u_stk_base; + u_stk_base = PTR_ALIGN(u_stk_base, E2K_ALIGN_STACK); + u_stk_size = round_down(u_stk_size, E2K_ALIGN_STACK); + DebugCTX("user stack at %lx, size=%lx\n", u_stk_base, u_stk_size); + + if (!access_ok(u_stk_base, u_stk_size)) + return -EFAULT; + + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + key = (unsigned long) u_stk_base + u_stk_size; + ctx = hw_context_lookup_and_get(key, mm_context); + if (!IS_ERR(ctx)) { + /* Fast path: if context with the same key + * exists and is not used, we can just use it. */ + free_user_old_pc_stack_areas(&ctx->ti.old_u_pcs_list); + free_getsp_adj(&ctx->ti.getsp_adj); + free_signal_stack(&ctx->ti.signal_stack); + + memset(&ctx->regs, 0, sizeof(ctx->regs)); + memset(&ctx->prev_crs, 0, sizeof(ctx->prev_crs)); + + ret = makecontext_prepare_ctx_and_ucp(ctx, ucp, sigsetsize, format, + func, args_size, args, u_stk_base, u_stk_size, key); + if (ret) { + /* The context is broken since we have + * failed to reuse it, just drop it now. */ + if (!rhashtable_remove_fast(&mm_context->hw_contexts, + &ctx->hash_entry, hash_params)) + context_free(same_key_ctx); + return ret; + } + + /* Successfully reused, now mark the context as ready */ + if (release_reference(ctx, HWC_STATE_BUSY)) { + pr_info_ratelimited("%s [%d]: context 0x%px was passed to makecontext_e2k() and freecontext_e2k() at the same time\n", + current->comm, current->pid, ucp); + return -EINVAL; + } + + DebugCTX("ctx %lx reused, key=%llx\n", ctx, key); + } else { + /* Slow path: duly allocate a new context */ + ctx = alloc_hw_context(false, u_stk_size, key); + if (!ctx) + return -ENOMEM; + + ret = makecontext_prepare_ctx_and_ucp(ctx, ucp, sigsetsize, format, + func, args_size, args, u_stk_base, u_stk_size, key); + if (ret) { + free_hw_context(ctx, false, false); + return ret; + } + + do { + rcu_read_lock(); + + /* add() (see above) */ + same_key_ctx = rhashtable_lookup_get_insert_key(&mm_context->hw_contexts, + &ctx->key, &ctx->hash_entry, hash_params); + if (IS_ERR(same_key_ctx)) { + rcu_read_unlock(); + free_hw_context(ctx, false, false); + return PTR_ERR(same_key_ctx); + } + + /* + * If there is a context with the same key then silently + * drop it. This is for programs like gccgo where it is + * hard to properly handle an error from makecontext(). + */ + if (likely(!same_key_ctx)) { + rcu_read_unlock(); + } else { + /* delete() (see above) */ + ret = rhashtable_remove_fast(&mm_context->hw_contexts, + &same_key_ctx->hash_entry, hash_params); + rcu_read_unlock(); + + if (!ret) { + DebugCTX("removed duplicate ctx %lx with the same key\n", + same_key_ctx); + context_free(same_key_ctx); + } + } + } while (same_key_ctx); + + DebugCTX("added ctx %lx with key %llx\n", ctx, key); + } + + return 0; +} + + +long sys_makecontext(struct ucontext __user *ucp, void (*func)(void), + u64 args_size, void __user *args, int sigsetsize) +{ + return do_makecontext(ucp, func, args_size, args, + sigsetsize, CTX_64_BIT); +} + +#ifdef CONFIG_COMPAT +long compat_sys_makecontext(struct ucontext_32 __user *ucp, + void (*func)(void), u64 args_size, void __user *args, + int sigsetsize) +{ + return do_makecontext(ucp, func, args_size, args, + sigsetsize, CTX_32_BIT); +} +#endif + +#ifdef CONFIG_PROTECTED_MODE +long protected_sys_makecontext(struct ucontext_prot __user *ucp, + void (*func)(void), u64 args_size, + void __user *args, int sigsetsize) +{ + return do_makecontext(ucp, func, args_size, args, + sigsetsize, CTX_128_BIT); +} +#endif + +static long do_freecontext(u64 key) +{ + mm_context_t *mm_context = ¤t->mm->context; + struct hw_context *ctx; + long ret; + + /* delete() (see above) */ + rcu_read_lock(); + + ctx = rhashtable_lookup(&mm_context->hw_contexts, &key, hash_params); + if (!ctx) { + ret = -ENOENT; + } else if (ctx == current_thread_info()->this_hw_context) { + ret = -EBUSY; + } else { + ret = rhashtable_remove_fast(&mm_context->hw_contexts, + &ctx->hash_entry, hash_params); + } + + rcu_read_unlock(); + + DebugCTX("ctx %lx for key 0x%llx, ret %ld\n", ctx, key, ret); + if (ret) + return ret; + + context_free(ctx); + + return 0; +} + + +long sys_freecontext(struct ucontext __user *ucp) +{ + u64 free_key; + + if (get_user(free_key, &ucp->uc_mcontext.sbr)) + return -EFAULT; + + return do_freecontext(free_key); +} + +#ifdef CONFIG_COMPAT +long compat_sys_freecontext(struct ucontext_32 __user *ucp) +{ + u64 free_key; + + if (get_user(free_key, &ucp->uc_mcontext.sbr)) + return -EFAULT; + + return do_freecontext(free_key); +} +#endif + +#ifdef CONFIG_PROTECTED_MODE +long protected_sys_freecontext(struct ucontext_prot __user *ucp) +{ + u64 free_key; + + if (get_user(free_key, &ucp->uc_mcontext.sbr)) + return -EFAULT; + + return do_freecontext(free_key); +} +#endif + +/* + * Actually do the switch to another hardware stack described by ucp. + * + * Called from sys_setcontext() or sys_swapcontext(). + */ +__always_inline /* Just to copy less in user_hw_stacks_copy_full() */ +static void switch_hw_contexts(struct pt_regs *__restrict regs, + struct hw_context *__restrict prev_ctx, + struct hw_context *__restrict next_ctx, + e2k_fpcr_t fpcr, e2k_fpsr_t fpsr, e2k_pfpfr_t pfpfr) +{ + struct thread_info *ti = current_thread_info(); + e2k_mem_crs_t *__restrict k_crs = (e2k_mem_crs_t * __restrict) AS(ti->k_pcsp_lo).base; + e2k_pcshtp_t pcshtp = regs->stacks.pcshtp; + e2k_pshtp_t pshtp = regs->stacks.pshtp; + +#if DEBUG_CTX_MODE + DebugCTX("Before switching:\n"); + print_stack_frames(current, NULL, 0); +#endif + + /* + * 2) Fill the bottom of kernel stack with the next context's data + */ + + /* Now that we can no longer fail we can modify the next context. + * + * IMPORTANT: must not fail after this point, or will have to + * free the @next_ctx. */ + + raw_all_irq_disable(); + E2K_FLUSHC; + prev_ctx->prev_crs = k_crs[0]; + k_crs[0] = next_ctx->prev_crs; + k_crs[1] = next_ctx->regs.crs; + + /* + * 3) Switch thread_info + */ + prev_ctx->ti.u_stack = ti->u_stack; + prev_ctx->ti.u_hw_stack = ti->u_hw_stack; + prev_ctx->ti.signal_stack = ti->signal_stack; +#ifdef CONFIG_PROTECTED_MODE + prev_ctx->ti.g_list = ti->g_list; + prev_ctx->ti.multithread_address = ti->multithread_address; + prev_ctx->ti.lock = ti->lock; +#endif + + /* Not everything in pt_regs should be saved and restored + * (e.g. system call number), so copy only the necessary part. */ + prev_ctx->regs.stacks = regs->stacks; + prev_ctx->regs.crs = regs->crs; + prev_ctx->regs.wd = regs->wd; + /* 'kernel_entry' might be 0 if we get here through uc_link */ + prev_ctx->regs.kernel_entry = regs->kernel_entry; + + /* Function calls are allowed after this point (actually this + * should go after k_crs[] modification above but we put it + * here to not hinder compiler optimizations) */ + barrier(); + + /* FIXME: should be paravirtualized */ + NATIVE_CLEAR_DAM; + + list_splice_init(&ti->old_u_pcs_list, &prev_ctx->ti.old_u_pcs_list); + list_splice_init(&next_ctx->ti.old_u_pcs_list, &ti->old_u_pcs_list); + list_splice_init(&ti->getsp_adj, &prev_ctx->ti.getsp_adj); + list_splice_init(&next_ctx->ti.getsp_adj, &ti->getsp_adj); + + regs->stacks = next_ctx->regs.stacks; + regs->stacks.pcshtp = pcshtp; + regs->stacks.pshtp = pshtp; + regs->crs = next_ctx->regs.crs; + regs->wd = next_ctx->regs.wd; + /* 'kernel_entry' might be 0 if we get here through uc_link */ + regs->kernel_entry = next_ctx->regs.kernel_entry; + + WRITE_FPCR_REG(fpcr); + WRITE_FPSR_REG(fpsr); + WRITE_PFPFR_REG(pfpfr); + + ti->u_stack = next_ctx->ti.u_stack; + ti->u_hw_stack = next_ctx->ti.u_hw_stack; + ti->signal_stack = next_ctx->ti.signal_stack; +#ifdef CONFIG_PROTECTED_MODE + ti->g_list = next_ctx->ti.g_list; + ti->multithread_address = next_ctx->ti.multithread_address; + ti->lock = next_ctx->ti.lock; +#endif + +#if DEBUG_CTX_MODE + DebugCTX("After switching:\n"); + print_stack_frames(current, NULL, 0); +#endif + raw_all_irq_enable(); +} + +__always_inline +static void save_ctx_32_bit(struct ucontext_32 __user *__restrict oucp, + u64 prev_key, e2k_mem_crs_t *__restrict k_crs, + e2k_fpcr_t fpcr, e2k_fpsr_t fpsr, e2k_pfpfr_t pfpfr, + sigset_t current_blocked_sigset) +{ + const struct pt_regs *regs = current_thread_info()->pt_regs; + + *((u64 *) &oucp->uc_sigmask) = current_blocked_sigset.sig[0]; + oucp->uc_mcontext.sbr = prev_key; + oucp->uc_mcontext.cr0_hi = AW(k_crs->cr0_hi); + oucp->uc_mcontext.cr1_lo = AW(k_crs->cr1_lo); + oucp->uc_mcontext.cr1_hi = AW(k_crs->cr1_hi); + oucp->uc_mcontext.pcsp_lo = AW(regs->stacks.pcsp_lo); + /* We want stack to point to user frame that called us, + * not to the glibc glue */ + oucp->uc_mcontext.pcsp_hi = AW(regs->stacks.pcsp_hi) - SZ_OF_CR; + oucp->uc_extra.fpcr = AW(fpcr); + oucp->uc_extra.fpsr = AW(fpsr); + oucp->uc_extra.pfpfr = AW(pfpfr); +} + +__always_inline +static void save_ctx_64_bit(struct ucontext __user *__restrict oucp, + u64 prev_key, e2k_mem_crs_t *__restrict k_crs, + e2k_fpcr_t fpcr, e2k_fpsr_t fpsr, e2k_pfpfr_t pfpfr, + sigset_t current_blocked_sigset) +{ + const struct pt_regs *regs = current_thread_info()->pt_regs; + + *((u64 *) &oucp->uc_sigmask) = current_blocked_sigset.sig[0]; + oucp->uc_mcontext.sbr = prev_key; + oucp->uc_mcontext.cr0_hi = AW(k_crs->cr0_hi); + oucp->uc_mcontext.cr1_lo = AW(k_crs->cr1_lo); + oucp->uc_mcontext.cr1_hi = AW(k_crs->cr1_hi); + oucp->uc_mcontext.pcsp_lo = AW(regs->stacks.pcsp_lo); + /* We want stack to point to user frame that called us, + * not to the glibc glue */ + oucp->uc_mcontext.pcsp_hi = AW(regs->stacks.pcsp_hi) - SZ_OF_CR; + oucp->uc_extra.fpcr = AW(fpcr); + oucp->uc_extra.fpsr = AW(fpsr); + oucp->uc_extra.pfpfr = AW(pfpfr); +} + +__always_inline +static void save_ctx_128_bit(struct ucontext_prot __user *__restrict oucp, + u64 prev_key, e2k_mem_crs_t *__restrict k_crs, + e2k_fpcr_t fpcr, e2k_fpsr_t fpsr, e2k_pfpfr_t pfpfr, + sigset_t current_blocked_sigset) +{ + const struct pt_regs *regs = current_thread_info()->pt_regs; + + *((u64 *) &oucp->uc_sigmask) = current_blocked_sigset.sig[0]; + oucp->uc_mcontext.sbr = prev_key; + oucp->uc_mcontext.cr0_hi = AW(k_crs->cr0_hi); + oucp->uc_mcontext.cr1_lo = AW(k_crs->cr1_lo); + oucp->uc_mcontext.cr1_hi = AW(k_crs->cr1_hi); + oucp->uc_mcontext.pcsp_lo = AW(regs->stacks.pcsp_lo); + /* We want stack to point to user frame that called us, + * not to the glibc glue */ + oucp->uc_mcontext.pcsp_hi = AW(regs->stacks.pcsp_hi) - SZ_OF_CR; + oucp->uc_extra.fpcr = AW(fpcr); + oucp->uc_extra.fpsr = AW(fpsr); + oucp->uc_extra.pfpfr = AW(pfpfr); +} + +#if _NSIG != 64 +# error We read u64 value here... +#endif +__always_inline /* For performance since some arguments are constants */ +static long do_swapcontext(void __user *oucp, const void __user *ucp, + bool save_prev_ctx, int format) +{ + const struct ucontext_32 __user *ucp_32 = ucp; + const struct ucontext __user *ucp_64 = ucp; + const struct ucontext_prot __user *ucp_128 = ucp; + struct ucontext_32 __user *oucp_32 = oucp; + struct ucontext __user *oucp_64 = oucp; + struct ucontext_prot __user *oucp_128 = oucp; + u64 next_key, prev_key, sigset; + sigset_t k_sigset, current_blocked_sigset = current->blocked; + struct hw_context *prev_ctx; + mm_context_t *mm_context = ¤t->mm->context; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + e2k_fpcr_t prev_fpcr = READ_FPCR_REG(); + e2k_fpsr_t prev_fpsr = READ_FPSR_REG(); + e2k_pfpfr_t prev_pfpfr = READ_PFPFR_REG(); + struct pt_regs *regs = current_thread_info()->pt_regs; + e2k_mem_crs_t *__restrict k_crs = (e2k_mem_crs_t *__restrict) + AS(current_thread_info()->k_pcsp_lo).base; + int ret; + + DebugCTX("oucp=%lx ucp=%lx started\n", oucp, ucp); + BUILD_BUG_ON(sizeof(current->blocked.sig[0]) != 8); + + prev_ctx = current_thread_info()->this_hw_context; + + /* + * 1) Check user pointers + */ + if (format == CTX_32_BIT) { + if (!access_ok(ucp, sizeof(struct ucontext_32)) || + save_prev_ctx && !access_ok(oucp, + sizeof(struct ucontext_32))) + return -EFAULT; + } else if (format == CTX_64_BIT) { + if (!access_ok(ucp, sizeof(struct ucontext)) || + save_prev_ctx && !access_ok(oucp, + sizeof(struct ucontext))) + return -EFAULT; + } else { + /* CTX_128_BIT */ + if (!access_ok(ucp, sizeof(struct ucontext_prot)) || + save_prev_ctx && !access_ok(oucp, + sizeof(struct ucontext_prot))) + return -EFAULT; + } + + /* + * 2) If this is the first time this thread is changing contexts + * we'll have to allocate memory for the main context. + */ + if (likely(prev_ctx)) { + prev_key = prev_ctx->key; + } else { + prev_key = context_current_key(); + if (IS_ERR_VALUE(prev_key)) + return prev_key; + + DebugCTX("will save main context, key 0x%llx\n", prev_key); + prev_ctx = alloc_hw_context(true, + current_thread_info()->u_stack.size, prev_key); + if (!prev_ctx) + return -ENOMEM; + + prev_ctx->ucp = oucp; + prev_ctx->ptr_format = format; + if (format == CTX_32_BIT) + prev_ctx->p_uc_link = &oucp_32->uc_link; + else if (format == CTX_64_BIT) + prev_ctx->p_uc_link = &oucp_64->uc_link; + else /* CTX_128_BIT */ + prev_ctx->p_uc_link = &oucp_128->uc_link; + + if (save_prev_ctx) { + u64 *sbr_addr; + if (format == CTX_32_BIT) + sbr_addr = &oucp_32->uc_mcontext.sbr; + else if (format == CTX_64_BIT) + sbr_addr = &oucp_64->uc_mcontext.sbr; + else /* CTX_128_BIT */ + sbr_addr = &oucp_128->uc_mcontext.sbr; + if (put_user(prev_key, sbr_addr)) { + free_hw_context(prev_ctx, true, false); + return -EFAULT; + } + } + + /* add() (see above) */ + if ((ret = rhashtable_lookup_insert_key(&mm_context->hw_contexts, + &prev_key, &prev_ctx->hash_entry, hash_params))) { + DebugCTX("insert failed with %d\n", ret); + free_hw_context(prev_ctx, true, false); + return ret; + } + current_thread_info()->this_hw_context = prev_ctx; + } + + /* + * 3) Save previous context's stack into userspace. + * + * This also ensures there is enough user data in the next context + * to fill the bottom of kernel stack (where user data lies), and + * SPILLs chain stack so that it can be saved in the next step + * (see [k_crs]). + */ + ret = do_user_hw_stacks_copy_full(®s->stacks, regs, NULL); + if (unlikely(ret)) + return ret; + + if (WARN_ON_ONCE(GET_PSHTP_MEM_INDEX(regs->stacks.pshtp) || + PCSHTP_SIGN_EXTEND(regs->stacks.pcshtp) != SZ_OF_CR)) + return -EINVAL; + + /* + * 4) Save previous ucontext and load the next one. + */ + TRY_USR_PFAULT { + if (format == CTX_32_BIT) { + next_key = ucp_32->uc_mcontext.sbr; + AW(fpcr) = ucp_32->uc_extra.fpcr; + AW(fpsr) = ucp_32->uc_extra.fpsr; + AW(pfpfr) = ucp_32->uc_extra.pfpfr; + sigset = *(u64 *) &ucp_32->uc_sigmask; + AW(pcsp_lo) = ucp_32->uc_mcontext.pcsp_lo; + AW(pcsp_hi) = ucp_32->uc_mcontext.pcsp_hi; + AW(cr0_hi) = ucp_32->uc_mcontext.cr0_hi; + AW(cr1_lo) = ucp_32->uc_mcontext.cr1_lo; + AW(cr1_hi) = ucp_32->uc_mcontext.cr1_hi; + } else if (format == CTX_64_BIT) { + next_key = ucp_64->uc_mcontext.sbr; + AW(fpcr) = ucp_64->uc_extra.fpcr; + AW(fpsr) = ucp_64->uc_extra.fpsr; + AW(pfpfr) = ucp_64->uc_extra.pfpfr; + sigset = *(u64 *) &ucp_64->uc_sigmask; + AW(pcsp_lo) = ucp_64->uc_mcontext.pcsp_lo; + AW(pcsp_hi) = ucp_64->uc_mcontext.pcsp_hi; + AW(cr0_hi) = ucp_64->uc_mcontext.cr0_hi; + AW(cr1_lo) = ucp_64->uc_mcontext.cr1_lo; + AW(cr1_hi) = ucp_64->uc_mcontext.cr1_hi; + } else { /* CTX_128_BIT */ + next_key = ucp_128->uc_mcontext.sbr; + AW(fpcr) = ucp_128->uc_extra.fpcr; + AW(fpsr) = ucp_128->uc_extra.fpsr; + AW(pfpfr) = ucp_128->uc_extra.pfpfr; + sigset = *(u64 *) &ucp_128->uc_sigmask; + AW(pcsp_lo) = ucp_128->uc_mcontext.pcsp_lo; + AW(pcsp_hi) = ucp_128->uc_mcontext.pcsp_hi; + AW(cr0_hi) = ucp_128->uc_mcontext.cr0_hi; + AW(cr1_lo) = ucp_128->uc_mcontext.cr1_lo; + AW(cr1_hi) = ucp_128->uc_mcontext.cr1_hi; + } + + if (save_prev_ctx) { + /* Frame at k_crs[0] was SPILLed at the previous step */ + if (format == CTX_32_BIT) { + save_ctx_32_bit(oucp_32, prev_key, k_crs, + prev_fpcr, prev_fpsr, prev_pfpfr, + current_blocked_sigset); + } else if (format == CTX_64_BIT) { + save_ctx_64_bit(oucp_64, prev_key, k_crs, + prev_fpcr, prev_fpsr, prev_pfpfr, + current_blocked_sigset); + } else { /* CTX_128_BIT */ + save_ctx_128_bit(oucp_128, prev_key, k_crs, + prev_fpcr, prev_fpsr, prev_pfpfr, + current_blocked_sigset); + } + } + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + DebugCTX("prev_key %llx, next_key %llx\n", prev_key, next_key); + + /* + * 5) Do the switch + */ + if (prev_key != next_key) { + struct hw_context *next_ctx; + + next_ctx = hw_context_lookup_and_get(next_key, mm_context); + if (unlikely(IS_ERR(next_ctx))) + return PTR_ERR(next_ctx); + + DebugCTX("switching from ctx %lx to ctx %lx\n", prev_ctx, next_ctx); + + switch_hw_contexts(regs, prev_ctx, next_ctx, fpcr, fpsr, pfpfr); + current_thread_info()->this_hw_context = next_ctx; + + (void) release_reference(prev_ctx, HWC_STATE_BUSY); + } + + /* + * 6) Do we need to jump backwards in the new context? + * + * Skip glibc glue by subtracting SZ_OF_CR (the same this is done + * when saving context in getcontext() and for oucp in swapcontext()) + */ + if (AS(regs->stacks.pcsp_lo).base + AS(regs->stacks.pcsp_hi).ind - SZ_OF_CR != + AS(pcsp_lo).base + AS(pcsp_hi).ind || + k_crs[0].cr0_hi.ip != cr0_hi.ip) { + /* Check if switch_hw_contexts() has restored FPU state already */ + bool fpu_restored = (prev_key != next_key); + + /* A hack to make do_longjmp() restore + * blocked signals mask */ + sigset |= sigmask(SIGKILL); + + DebugCTX("calling longjmp\n"); + /* There is no place in struct ucontext to save %wd.psize + * without breaking ABI, so we assume C calling convention + * value of 4 (8 for protected mode). */ + return do_longjmp(0, sigset, cr0_hi, cr1_lo, pcsp_lo, pcsp_hi, + AS(cr1_hi).br, format == CTX_128_BIT ? 0x80 : 0x40, + fpcr, fpsr, pfpfr, !fpu_restored); + } + + k_sigset.sig[0] = sigset; + if (!sigequalsets(¤t_blocked_sigset, &k_sigset)) + set_current_blocked(&k_sigset); + + return 0; +} + +long swapcontext(const void __user *ucp, int format) +{ + return do_swapcontext(NULL, ucp, false, format); +} + +long sys_swapcontext(struct ucontext __user *oucp, + const struct ucontext __user *ucp, + int sigsetsize) +{ + if (unlikely(sigsetsize != sizeof(sigset_t))) + return -EINVAL; + + return do_swapcontext(oucp, ucp, true, CTX_64_BIT); +} + +long sys_setcontext(const struct ucontext __user *ucp, int sigsetsize) +{ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + DebugCTX("ucp=%lx current key=0x%llx next key=0x%llx\n", + ucp, prev_key, next_key); + return do_swapcontext(NULL, ucp, false, CTX_64_BIT); +} + +#ifdef CONFIG_COMPAT +long compat_sys_swapcontext(struct ucontext_32 __user *oucp, + const struct ucontext_32 __user *ucp, int sigsetsize) +{ + if (unlikely(sigsetsize != sizeof(sigset_t))) + return -EINVAL; + + return do_swapcontext(oucp, ucp, true, CTX_32_BIT); +} + +long compat_sys_setcontext(const struct ucontext_32 __user *ucp, + int sigsetsize) +{ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + DebugCTX("ucp=%lx current key=0x%llx next key=0x%llx\n", + ucp, prev_key, next_key); + return do_swapcontext(NULL, ucp, false, CTX_32_BIT); +} +#endif + +#ifdef CONFIG_PROTECTED_MODE +long protected_sys_swapcontext(struct ucontext_prot __user *oucp, + const struct ucontext_prot __user *ucp, int sigsetsize) +{ + if (unlikely(sigsetsize != sizeof(sigset_t))) + return -EINVAL; + + return do_swapcontext(oucp, ucp, true, CTX_128_BIT); +} + +long protected_sys_setcontext(const struct ucontext_prot __user *ucp, + int sigsetsize) +{ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + DebugCTX("ucp=%lx current key=0x%llx next key=0x%llx\n", + ucp, prev_key, next_key); + return do_swapcontext(NULL, ucp, false, CTX_128_BIT); +} +#endif diff --git a/arch/e2k/kernel/proc_sclkr.c b/arch/e2k/kernel/proc_sclkr.c new file mode 100644 index 000000000000..479d791be369 --- /dev/null +++ b/arch/e2k/kernel/proc_sclkr.c @@ -0,0 +1,94 @@ +/* + * arch/l/kernel/proc_sclkr.c + * + * This file contains support for of sclkr clocksource. + * + * Copyright (C) 2015 Leonid Ananiev (leoan@mcst.ru) + */ + +#include +#include +#include + +#include +#include + +char sclkr_src[SCLKR_SRC_LEN] = "no"; /* no, ext, rtc, int */ +int sclkr_mode = -1; +EXPORT_SYMBOL_GPL(sclkr_mode); + +static int sclkr_set(int cmdline) +{ + int ret = 0; + static struct task_struct *sclkregistask; + int new_sclkr_mode = -1; + + if (!strcmp(sclkr_src, "no")) + new_sclkr_mode = SCLKR_NO; + if (!strcmp(sclkr_src, "ext")) + new_sclkr_mode = SCLKR_EXT; + if (!strcmp(sclkr_src, "rtc")) + new_sclkr_mode = SCLKR_RTC; + if (!strcmp(sclkr_src, "int")) + new_sclkr_mode = SCLKR_INT; + if (new_sclkr_mode < 0) { + pr_err(KERN_ERR "Possible sclkr modes are:\n" + "no, ext, rtc, int\n"); + return -EINVAL; + } + pr_warn("sclkr is set to %s (mod_no=%d) by %s\n", + sclkr_src, new_sclkr_mode, + cmdline ? "cmdline" : "echo...>/proc"); + if (cmdline) { + sclkr_mode = new_sclkr_mode; + } else { + sclkregistask = kthread_run(sclk_register, + (void *) (long) new_sclkr_mode, "sclkregister"); + if (IS_ERR(sclkregistask)) { + ret = PTR_ERR(sclkregistask); + pr_err(KERN_ERR "Failed to start sclk register thread," + " error: %d\n", ret); + return ret; + } + } + return ret; +} +int proc_sclkr(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dostring(ctl, write, buffer, lenp, ppos); + if (write) { + ret = sclkr_set(0); + } + return ret; +} +static int __init sclkr_deviat(char *str) +{ + sclk_set_deviat(simple_strtol(str, NULL, 0)); + return 0; +} +__setup("sclkd=", sclkr_deviat); +static int __init sclkr_setup(char *s) +{ + if (!s || (strcmp(s, "no") && strcmp(s, "rtc") && + strcmp(s, "ext") && strcmp(s, "int"))) { + pr_err(KERN_ERR "Possible sclkr cmdline modes are:\n" + "no, ext, rtc, int\n"); + return -EINVAL; + } + strncpy(sclkr_src, s, SCLKR_SRC_LEN); + sclkr_set(1); + return 0; +} +__setup("sclkr=", sclkr_setup); + +int redpill = 1; /* enable by defualt */ +static int __init redpill_init(char *str) +{ + redpill = simple_strtol(str, NULL, 0); + return 0; +} +__setup("redpill=", redpill_init); diff --git a/arch/e2k/kernel/process.c b/arch/e2k/kernel/process.c new file mode 100644 index 000000000000..15faef2690ac --- /dev/null +++ b/arch/e2k/kernel/process.c @@ -0,0 +1,2729 @@ +/* + * arch/e2k/kernel/process.c + * + * This file handles the arch-dependent parts of process handling + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_MONITORS +#include +#endif /* CONFIG_MONITORS */ + +#ifdef CONFIG_PROTECTED_MODE +#include +#include +#include +#endif /* CONFIG_PROTECTED_MODE */ + +#include <../../../kernel/time/tick-sched.h> + + +#undef DEBUG_PROCESS_MODE +#undef DebugP +#define DEBUG_PROCESS_MODE 0 /* processes */ +#define DebugP(...) DebugPrint(DEBUG_PROCESS_MODE, ##__VA_ARGS__) + +#undef DEBUG_QUEUED_TASK_MODE +#undef DebugQT +#define DEBUG_QUEUED_TASK_MODE 0 /* queue task and release */ +#define DebugQT(...) DebugPrint(DEBUG_QUEUED_TASK_MODE, ##__VA_ARGS__) + +#undef DEBUG_QUEUED_STACK_MODE +#undef DebugQS +#define DEBUG_QUEUED_STACK_MODE 0 /* queue stck and release */ +#define DebugQS(...) DebugPrint(DEBUG_QUEUED_STACK_MODE, ##__VA_ARGS__) + +#undef DEBUG_EXECVE_MODE +#undef DebugEX +#define DEBUG_EXECVE_MODE 0 /* execve and exit */ +#define DebugEX(...) DebugPrint(DEBUG_EXECVE_MODE, ##__VA_ARGS__) + +#undef DEBUG_GUEST_EXEC_MODE +#undef DebugGEX +#define DEBUG_GUEST_EXEC_MODE 0 /* guest execve debugging */ +#define DebugGEX(...) DebugPrint(DEBUG_GUEST_EXEC_MODE, ##__VA_ARGS__) + +#undef DEBUG_DATA_STACK_MODE +#undef DebugDS +#define DEBUG_DATA_STACK_MODE 0 /* user data stack */ +#define DebugDS(...) DebugPrint(DEBUG_DATA_STACK_MODE, ##__VA_ARGS__) + +#undef DEBUG_CU_MODE +#undef DebugCU +#define DEBUG_CU_MODE 0 /* compilation unit */ +#define DebugCU(...) DebugPrint(DEBUG_CU_MODE, ##__VA_ARGS__) + +#undef DEBUG_US_MODE +#undef DebugUS +#define DEBUG_US_MODE 0 /* user stacks */ +#define DebugUS(...) DebugPrint(DEBUG_US_MODE, ##__VA_ARGS__) + +#undef DEBUG_KS_MODE +#undef DebugKS +#define DEBUG_KS_MODE 0 /* kernel stacks */ +#define DebugKS(...) DebugPrint(DEBUG_KS_MODE, ##__VA_ARGS__) + +#undef DEBUG_US_FRAMES_MODE +#undef DebugUSF +#define DEBUG_US_FRAMES_MODE 0 /* user stack frames */ +#define DebugUSF(...) DebugPrint(DEBUG_US_FRAMES_MODE, ##__VA_ARGS__) + +#undef DEBUG_HS_MODE +#undef DebugHS +#define DEBUG_HS_MODE 0 /* Hard Stack Clone and Alloc */ +#define DebugHS(...) DebugPrint(DEBUG_HS_MODE, ##__VA_ARGS__) + +#undef DEBUG_SPRs_MODE +#define DEBUG_SPRs_MODE 0 /* stack pointers registers */ + +#undef DEBUG_COPY_USER_MODE +#undef DebugCPY +#define DEBUG_COPY_USER_MODE 0 /* KVM process copy debug */ +#define DebugCPY(fmt, args...) \ +({ \ + if (DEBUG_COPY_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +extern bool debug_clone_guest; +#undef DEBUG_CLONE_USER_MODE +#undef DebugCLN +#define DEBUG_CLONE_USER_MODE 0 /* KVM thread clone debug */ +#define DebugCLN(fmt, args...) \ +({ \ + if (DEBUG_CLONE_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_CpuR_MODE +#define DEBUG_CpuR_MODE 0 /* CPU registers */ + +#undef DEBUG_CORE_DUMP +#undef DebugCD +#define DEBUG_CORE_DUMP 0 /* coredump */ +#define DebugCD(...) DebugPrint(DEBUG_CORE_DUMP, ##__VA_ARGS__) + + +struct user_stack_free_work { + unsigned long stack_base; + e2k_size_t max_stack_size; + struct mm_struct *mm; + struct delayed_work work; +}; + + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + memcpy(dst, src, sizeof(*dst)); + clear_thread_info(dst); + + return 0; +} + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +char *debug_process_name = NULL; +int debug_process_name_len = 0; + +static int __init debug_process_name_setup(char *str) +{ + debug_process_name = str; + debug_process_name_len = strlen(debug_process_name); + return 1; +} + +__setup("procdebug=", debug_process_name_setup); +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + +bool idle_nomwait = false; +static int __init mem_wait_idle_setup(char *str) +{ + if (strcmp(str, "nomwait") == 0) { + /* disable memory wait type idle */ + idle_nomwait = true; + pr_info("Disable memory wait type idle\n"); + } else { + pr_warning("Unknown command line idle= arg %s, can be 'nomwait'\n", + str); + } + return 1; +} + +__setup("idle=", mem_wait_idle_setup); + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_HW_STACK_PS) + return "[procedure stack]"; + else if (vma->vm_flags & VM_HW_STACK_PCS) + return "[chain stack]"; + else if (vma->vm_flags & VM_SIGNAL_STACK) + return "[signal stack]"; + return NULL; +} + +static void clean_pc_stack_zero_frame(void *addr, bool user) +{ + unsigned long ts_flag; + e2k_mem_crs_t *pcs = addr; + + if (user) + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + __clear_user(pcs, sizeof(*pcs)); + if (user) + clear_ts_flag(ts_flag); +} + +unsigned long *__alloc_thread_stack_node(int node) +{ + void *address; + struct page *page; + + //TODO when arch-indep. part is fixed switch back to + //alloc_pages_exact_nid() instead to not waste memory. + page = alloc_pages_node(node, + GFP_KERNEL_ACCOUNT | __GFP_NORETRY | __GFP_NOWARN, + THREAD_SIZE_ORDER); + address = (page) ? page_address(page) : NULL; +#ifdef CONFIG_VMAP_STACK + if (!address) + address = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, + VMALLOC_START, VMALLOC_END, GFP_KERNEL_ACCOUNT, + PAGE_KERNEL, 0, node, + __builtin_return_address(0)); +#endif + + if (cpu_has(CPU_HWBUG_FALSE_SS) && address) + clean_pc_stack_zero_frame(address + KERNEL_PC_STACK_OFFSET, false); + + return address; +} + + +unsigned long *alloc_thread_stack_node(struct task_struct *task, int node) +{ + unsigned long *stack = __alloc_thread_stack_node(node); + task->stack = stack; +#ifdef CONFIG_VMAP_STACK + task->stack_vm_area = find_vm_area(stack); +#endif + return stack; +} + +void __free_thread_stack(void *address) +{ +#ifdef CONFIG_VMAP_STACK + if (!is_vmalloc_addr(address)) + __free_pages(virt_to_page(address), THREAD_SIZE_ORDER); + else + vfree(address); +#else + free_pages_exact(address, THREAD_SIZE); +#endif +} + +void free_thread_stack(struct task_struct *task) +{ + struct thread_info *ti = task_thread_info(task); + + if (!task->stack) + return; + + AW(ti->k_psp_lo) = 0; + AW(ti->k_psp_hi) = 0; + AW(ti->k_pcsp_lo) = 0; + AW(ti->k_pcsp_hi) = 0; + AW(ti->k_usd_lo) = 0; + AW(ti->k_usd_hi) = 0; + + __free_thread_stack(task->stack); + task->stack = NULL; +#ifdef CONFIG_VMAP_STACK + task->stack_vm_area = NULL; +#endif +} + +int free_vm_stack_cache(unsigned int cpu) +{ + return 0; +} + +static void user_stack_free_work_fn(struct work_struct *work) +{ + struct user_stack_free_work *w; + unsigned long stack_base; + e2k_size_t max_stack_size; + struct mm_struct *mm; + int ret; + + w = container_of(to_delayed_work(work), typeof(*w), work); + stack_base = w->stack_base; + max_stack_size = w->max_stack_size; + mm = w->mm; + + use_mm(mm); + ret = vm_munmap_notkillable(stack_base, max_stack_size); + DebugHS("stack base 0x%lx max stack size 0x%lx, munmap returned %d\n", + stack_base, max_stack_size, ret); + unuse_mm(mm); + + if (ret == 0) { + kfree(w); + mmput(mm); + } else if (ret == -ENOMEM) { + queue_delayed_work(system_long_wq, to_delayed_work(work), + msecs_to_jiffies(MSEC_PER_SEC)); + } else { + BUG(); + } +} + +static void free_user_stack(void *stack_base, e2k_size_t max_stack_size) +{ + int ret; + + ret = vm_munmap_notkillable((unsigned long) stack_base, max_stack_size); + DebugHS("stack base 0x%llx max stack size 0x%lx, munmap returned %d\n", + (u64) stack_base, max_stack_size, ret); + if (ret == -ENOMEM) { + struct user_stack_free_work *work = kmalloc(sizeof(*work), GFP_KERNEL); + + BUG_ON(!work); + + work->stack_base = (unsigned long) stack_base; + work->max_stack_size = max_stack_size; + work->mm = current->mm; + + mmget(current->mm); + + INIT_DELAYED_WORK(&work->work, user_stack_free_work_fn); + queue_delayed_work(system_long_wq, &work->work, + msecs_to_jiffies(MSEC_PER_SEC)); + } else if (ret != 0) { + BUG(); + } +} + +static void *alloc_user_hard_stack(size_t stack_size, + unsigned long user_stacks_base, int type) +{ + e2k_addr_t stack_addr; + struct thread_info *ti = current_thread_info(); + hw_stack_t *u_hw_stacks = &ti->u_hw_stack; + e2k_addr_t u_ps_base; + e2k_addr_t u_pcs_base; + unsigned long ti_status; + + BUG_ON(!IS_ALIGNED(stack_size, PAGE_SIZE) || !current->mm); + + /* + * In the case of pseudo discontinuous user hardware stacks one + * shouldn't reuse already freed memory of user hardware stacks, + * otherwise there will be a problem with longjmp (we won't be + * able to find needed area unambiguously). + */ + if (GET_PS_BASE(u_hw_stacks)) { + u_ps_base = (e2k_addr_t)GET_PS_BASE(u_hw_stacks); + user_stacks_base = max(user_stacks_base, u_ps_base); + } + + if (GET_PCS_BASE(u_hw_stacks)) { + u_pcs_base = (e2k_addr_t)GET_PCS_BASE(u_hw_stacks); + user_stacks_base = max(user_stacks_base, u_pcs_base); + } + + ti_status = (type == HW_STACK_TYPE_PS) ? TS_MMAP_PS : TS_MMAP_PCS; + ti_status |= TS_MMAP_PRIVILEGED; + + current_thread_info()->status |= ti_status; + stack_addr = vm_mmap_notkillable(NULL, user_stacks_base, stack_size, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0); + current_thread_info()->status &= ~ti_status; + + if (IS_ERR_VALUE(stack_addr)) { + DebugHS("mmap() returned error %ld\n", (long) stack_addr); + WARN_ONCE(stack_addr != -ENOMEM, "vm_mmap failed with %ld\n", + stack_addr); + return NULL; + } + + if (stack_addr < user_stacks_base) { + DebugHS("bad stack base\n"); + goto out_unmap; + } + + DebugHS("stack addr 0x%lx, size 0x%lx\n", stack_addr, stack_size); + + return (void *) stack_addr; + +out_unmap: + vm_munmap_notkillable(stack_addr, stack_size); + return NULL; +} + +static void alloc_user_p_stack(struct hw_stack_area *ps, size_t stack_area_size) +{ + ps->base = alloc_user_hard_stack(stack_area_size, + USER_P_STACKS_BASE, HW_STACK_TYPE_PS); + if (!ps->base) + return; + set_hw_ps_area_user_size(ps, stack_area_size); +} + +static void alloc_user_pc_stack(struct hw_stack_area *pcs, size_t stack_area_size) +{ + pcs->base = alloc_user_hard_stack(stack_area_size, + USER_PC_STACKS_BASE, HW_STACK_TYPE_PCS); + if (!pcs->base) + return; + + if (cpu_has(CPU_HWBUG_FALSE_SS)) + clean_pc_stack_zero_frame(pcs->base, true); + + set_hw_pcs_area_user_size(pcs, stack_area_size); +} + +static void free_user_p_stack(hw_stack_area_t *ps) +{ + free_user_stack(ps->base, get_hw_ps_area_user_size(ps)); + ps->base = NULL; +} + +static void free_user_pc_stack(hw_stack_area_t *pcs) +{ + free_user_stack(pcs->base, get_hw_pcs_area_user_size(pcs)); + pcs->base = NULL; +} + +struct cached_stacks_entry { + hw_stack_t stack; + /* Entry in mm_context_t->cached_stacks */ + struct list_head list_entry; +}; + +int alloc_user_hw_stacks(hw_stack_t *hw_stacks, size_t p_size, size_t pc_size) +{ + size_t p_limit = current->signal->rlim[RLIMIT_P_STACK_EXT].rlim_cur; + size_t pc_limit = current->signal->rlim[RLIMIT_PC_STACK_EXT].rlim_cur; + mm_context_t *context = ¤t->mm->context; + + p_size = round_up(min(p_size, p_limit), PAGE_SIZE); + pc_size = round_up(min(pc_size, pc_limit), PAGE_SIZE); + + /* Fast path: check cache first */ + while (!list_empty(&context->cached_stacks)) { + struct cached_stacks_entry *cached; + size_t cached_p_size, cached_pc_size; + + spin_lock(&context->cached_stacks_lock); + if ((cached = list_first_entry_or_null(&context->cached_stacks, + struct cached_stacks_entry, list_entry))) { + list_del(&cached->list_entry); + context->cached_stacks_size -= cached->stack.pcs.size + + cached->stack.ps.size; + } + spin_unlock(&context->cached_stacks_lock); + if (unlikely(!cached)) + continue; + + cached_p_size = cached->stack.ps.size; + cached_pc_size = cached->stack.pcs.size; + if (unlikely(cached_pc_size > pc_limit || cached_p_size > p_limit || + cached_pc_size < pc_size || cached_p_size < p_size)) { + /* User has changed limits on hardware stacks + * and this cached stack is too big now; + * or requirements for stacks have changed + * after this one was put into the cache. */ + free_user_p_stack(&cached->stack.ps); + free_user_pc_stack(&cached->stack.pcs); + kfree(cached); + continue; + } + + /* Found suitable stack. We can avoid zeroing it: + * stacks are from the same mm anyway, and different + * threads of the same process don't have anything to + * hide from each other. */ + *hw_stacks = cached->stack; + kfree(cached); + return 0; + } + + /* Slow path: actually mmap() stacks */ + alloc_user_p_stack(&hw_stacks->ps, p_size); + if (!hw_stacks->ps.base) + return -ENOMEM; + + alloc_user_pc_stack(&hw_stacks->pcs, pc_size); + if (!hw_stacks->pcs.base) { + free_user_p_stack(&hw_stacks->ps); + return -ENOMEM; + } + + return 0; +} + +void free_user_hw_stacks(hw_stack_t *hw_stacks) +{ + mm_context_t *context = ¤t->mm->context; + + /* Fast path: try to put into cache first. + * Limit cached stacks size to reduce memory usage. + * Note that hardware stacks freeing is delayed to + * a kworker in some cases, thus we can't access + * 'current' here. */ + if (hw_stacks->ps.base && hw_stacks->pcs.base && + context->cached_stacks_size + hw_stacks->ps.size + + hw_stacks->pcs.size < SZ_1M) { + struct cached_stacks_entry *cached = kmalloc(sizeof(*cached), GFP_KERNEL); + + if (cached) { + cached->stack = *hw_stacks; + + spin_lock(&context->cached_stacks_lock); + INIT_LIST_HEAD(&cached->list_entry); + list_add(&cached->list_entry, &context->cached_stacks); + context->cached_stacks_size += hw_stacks->ps.size + + hw_stacks->pcs.size; + spin_unlock(&context->cached_stacks_lock); + + hw_stacks->ps.base = NULL; + hw_stacks->pcs.base = NULL; + return; + } + } + + /* Slow path: actually munmap() stacks */ + if (hw_stacks->ps.base) + free_user_p_stack(&hw_stacks->ps); + if (hw_stacks->pcs.base) + free_user_pc_stack(&hw_stacks->pcs); +} + +void destroy_cached_stacks(mm_context_t *context) +{ + struct cached_stacks_entry *cached, *next; + + /* This is called upon mm freeing so + * there is no need to take the spinlock */ + list_for_each_entry_safe(cached, next, &context->cached_stacks, list_entry) { + list_del(&cached->list_entry); + kfree(cached); + } +} + +void free_user_old_pc_stack_areas(struct list_head *old_u_pcs_list) +{ + struct old_pcs_area *user_old_pc_stack; + struct old_pcs_area *n; + + list_for_each_entry_safe(user_old_pc_stack, n, old_u_pcs_list, + list_entry) { + list_del(&user_old_pc_stack->list_entry); + kfree(user_old_pc_stack); + } +} + +void arch_release_task_struct(struct task_struct *tsk) +{ + /* free virtual part of task structure */ + free_virt_task_struct(tsk); +} + +static u64 get_user_main_c_stack(unsigned long sp, unsigned long *stack_top) +{ + e2k_addr_t stack_start; + struct vm_area_struct *vma; + + DebugDS("started: sp 0x%lx\n", sp); + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, sp); + DebugDS("find_vma() returned VMA 0x%px start 0x%lx end 0x%lx\n", + vma, vma->vm_start, vma->vm_end); + + BUG_ON(!(vma->vm_flags & VM_GROWSDOWN)); + + *stack_top = vma->vm_end; + stack_start = vma->vm_start; + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID + if (make_vma_pages_valid(vma, vma->vm_start, vma->vm_end)) { + DebugDS("make valid failed\n"); + return 0; + } +#endif + + up_read(¤t->mm->mmap_sem); + + DebugDS("returns stack base 0x%lx\n", stack_start); + return stack_start; +} + +/* + * This function allocates user's memory for needs of Compilation Unit Table. + */ + +static int create_cu_table(struct mm_struct *mm, unsigned long *cut_size_p, + int *cui_p) +{ + unsigned long cut_start, cut_size = USER_CUT_AREA_SIZE; + int cui; + + DebugCU("started: cut base 0x%lx, size 0x%lx\n", + USER_CUT_AREA_BASE, cut_size); + + set_ts_flag(TS_MMAP_PRIVILEGED); + cut_start = vm_mmap_notkillable(NULL, USER_CUT_AREA_BASE, cut_size, + PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0); + clear_ts_flag(TS_MMAP_PRIVILEGED); + DebugCU("vm_mmap() returned %ld\n", cut_start); + if (IS_ERR_VALUE(cut_start)) + return cut_start; + + if (TASK_IS_PROTECTED(current)) { + DebugEX("create_cut_entry for new protected loader\n"); + cui = create_cut_entry(mm->context.tcount, + mm->start_code, mm->end_code, + mm->start_data, mm->end_data); + } else { + DebugEX("create_cut_entry for unprotected mode\n"); + cui = create_cut_entry(0, + 0L, current->thread.flags & E2K_FLAG_32BIT ? + TASK32_SIZE : TASK_SIZE, + 0L, current->thread.flags & E2K_FLAG_32BIT ? + TASK32_SIZE : TASK_SIZE); + } + if (cui < 0) + return cui; + + BUG_ON(cui != (TASK_IS_PROTECTED(current) ? + USER_CODES_PROT_INDEX : USER_CODES_UNPROT_INDEX(current))); + + *cui_p = cui; + *cut_size_p = cut_size; + + return 0; +} + +static int +create_user_hard_stacks(hw_stack_t *hw_stacks, struct e2k_stacks *stacks) +{ + e2k_size_t user_psp_size = get_hw_ps_user_size(hw_stacks); + e2k_size_t user_pcsp_size = get_hw_pcs_user_size(hw_stacks); + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + int ret; + + ret = alloc_user_hw_stacks(hw_stacks, user_psp_size, user_pcsp_size); + if (ret) + return ret; + + DebugCLN("allocated user Procedure stack at %px, size 0x%lx, Chain stack at %px, size 0x%lx\n", + hw_stacks->ps.base, user_psp_size, + hw_stacks->pcs.base, user_pcsp_size); + + AW(psp_lo) = 0; + AW(psp_hi) = 0; + AW(pcsp_lo) = 0; + AW(pcsp_hi) = 0; + + AS(psp_lo).base = (unsigned long) GET_PS_BASE(hw_stacks); + AS(psp_hi).size = user_psp_size; + AS(pcsp_lo).base = (unsigned long) GET_PCS_BASE(hw_stacks); + AS(pcsp_hi).size = user_pcsp_size; + + stacks->psp_lo = psp_lo; + stacks->psp_hi = psp_hi; + stacks->pcsp_lo = pcsp_lo; + stacks->pcsp_hi = pcsp_hi; + AW(stacks->pshtp) = 0; + stacks->pcshtp = 0; + + return 0; +} + +/* + * Functions create all user hardware stacks(PS & PCS) including + * kernel part of the hardware stacks of current task + */ +void native_define_user_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + set_hw_ps_user_size(hw_stacks, USER_P_STACK_INIT_SIZE); + set_hw_pcs_user_size(hw_stacks, USER_PC_STACK_INIT_SIZE); +} + +void show_regs(struct pt_regs *regs) +{ + print_stack_frames(current, regs, 1); + print_pt_regs(regs); +} + +static int check_wchan(e2k_mem_crs_t *frame, unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + unsigned long *p_ip = arg; + unsigned long ip; + + ip = AS(frame->cr0_hi).ip << 3; + + if (!in_sched_functions(ip)) { + *p_ip = ip; + return 1; + } + + return 0; +} + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long ip = 0; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + parse_chain_stack(0, p, check_wchan, &ip); + + return ip; +} + +int free_user_hardware_stacks(hw_stack_t *u_hw_stacks) +{ + thread_info_t *ti = current_thread_info(); + + free_user_old_pc_stack_areas(&ti->old_u_pcs_list); + DebugEX("freed user old PCS list head 0x%px\n", + &ti->old_u_pcs_list); + + if (atomic_read(¤t->mm->mm_users) <= 1) { + DebugEX("last thread: do not free stacks - mmput will release all mm\n"); + SET_PS_BASE(u_hw_stacks, NULL); + SET_PCS_BASE(u_hw_stacks, NULL); + return 0; + } + + BUG_ON((unsigned long) GET_PS_BASE(u_hw_stacks) >= TASK_SIZE || + (unsigned long) GET_PCS_BASE(u_hw_stacks) >= TASK_SIZE); + + /* + * Don't free hw_stack (they are nedeed for coredump) + * The hw_stacks will be freeded in coredump_finish + */ + if (current->mm->core_state) { + DebugCD("core dump detected\n"); + create_delayed_free_hw_stacks(); + SET_PS_BASE(u_hw_stacks, NULL); + SET_PCS_BASE(u_hw_stacks, NULL); + return 0; + } + + if (GET_PS_BASE(u_hw_stacks) || GET_PCS_BASE(u_hw_stacks)) { + DebugEX("will free user PS from base 0x%px, size 0x%lx, user PCS from base 0x%px, size 0x%lx\n", + u_hw_stacks->ps.base, get_hw_ps_user_size(u_hw_stacks), + u_hw_stacks->pcs.base, get_hw_pcs_user_size(u_hw_stacks)); + } + free_user_hw_stacks(u_hw_stacks); + + return 0; +} + +static inline void +native_goto_new_user_hard_stk(e2k_stacks_t *stacks) +{ + unsigned long flags; + + DebugEX("will switch stacks\n"); + + raw_all_irq_save(flags); + + /* + * Optimization to do not flush chain stack. + * + * Old stacks are not needed anymore, do not flush procedure + * registers and chain registers - only strip sizes + */ + STRIP_PSHTP_WINDOW(); + STRIP_PCSHTP_WINDOW(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks). + */ + E2K_WAIT(_ma_c); + + /* + * Since we are switching to user stacks their sizes + * have been stripped already, so use RAW_* writes. + */ + NATIVE_NV_WRITE_PSP_REG(stacks->psp_hi, stacks->psp_lo); + NATIVE_NV_WRITE_PCSP_REG(stacks->pcsp_hi, stacks->pcsp_lo); + + raw_all_irq_restore(flags); +} + +#define printk printk_fixed_args +#define panic panic_fixed_args +__section(".entry.text") +notrace noinline __interrupt void +do_switch_to_user_func(start_fn start_func, e2k_size_t us_size, int cui) +{ + e2k_cr0_hi_t cr_ip; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr_ussz; + e2k_psr_t psr; +#ifdef CONFIG_PROTECTED_MODE + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_pusd_lo_t pusd_lo; + e2k_pusd_hi_t pusd_hi; + e2k_usbr_t u_sbr; +#endif /* CONFIG_PROTECTED_MODE */ + + DebugCU("func = 0x%016lx\n", *((long *)start_func)); + DebugP("entered: func 0x%px\n", start_func); + DebugSPRs("start"); + + cr_ip = NATIVE_NV_READ_CR0_HI_REG(); + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + cr_ussz = NATIVE_NV_READ_CR1_HI_REG(); + + /* + * Go to down to sys_exec() procedure chain stack using PSCP info + * And get 'ussz' field of 'sys_exec()' function to restore + * user stack state before 'switch_to_user_func()' call + */ + AS_STRUCT(cr_ussz).ussz = us_size >> 4; + + AS_WORD(psr) = 0; + AS_STRUCT(psr).sge = 1; + AS_STRUCT(psr).ie = 1; /* sti(); */ + AS_STRUCT(psr).nmie = 1; /* nmi enable */ + AS_STRUCT(psr).pm = 0; /* user mode */ + AS_STRUCT(cr1_lo).psr = AS_WORD(psr); + AS_STRUCT(cr_ip).ip = (u64)start_func >> 3; /* start user IP */ + + /* + * Force CUD/GD/TSD update by the values stored in CUTE + * Entry #1 - for both 32bit and protected mode + */ + AS(cr1_lo).cui = cui; + if (machine.native_iset_ver < E2K_ISET_V6) + AS(cr1_lo).ic = 0; + + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr_ussz); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr_ip); + +#ifdef CONFIG_CLI_CHECK_TIME + sti_return(); +#endif + +#ifdef CONFIG_PROTECTED_MODE + if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) { + usd_hi = NATIVE_NV_READ_USD_HI_REG(); + usd_lo = NATIVE_NV_READ_USD_LO_REG(); + + AW(u_sbr) = AS_STRUCT(usd_lo).base & + ~E2K_PROTECTED_STACK_BASE_MASK; + + AW(pusd_lo) = 0; + AW(pusd_hi) = 0; + AS_STRUCT(pusd_lo).base = AS_STRUCT(usd_lo).base & + E2K_PROTECTED_STACK_BASE_MASK; + AS_STRUCT(pusd_lo).base &= ~E2K_ALIGN_PUSTACK_MASK; + AS_STRUCT(pusd_lo).p = 1; + AS_STRUCT(pusd_hi).size = AS_STRUCT(usd_hi).size & + ~E2K_ALIGN_PUSTACK_MASK; + AS_STRUCT(pusd_lo).psl = 2; + AS_STRUCT(pusd_lo).rw = RW_ENABLE; + + NATIVE_NV_WRITE_USBR_USD_REG(u_sbr, pusd_hi, pusd_lo); + ENABLE_US_CLW(); + } else { + DISABLE_US_CLW(); + } +#endif /* CONFIG_PROTECTED_MODE */ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + if (current_thread_info()->pt_regs) { + E2K_SAVE_CLOCK_REG(current_thread_info()->pt_regs-> + scall_times->scall_done); + E2K_SAVE_CLOCK_REG(current_thread_info()->pt_regs-> + scall_times->end); + } +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + + /* Set global registers to empty state to prevent other user */ + /* or kernel current pointers access */ + INIT_G_REGS(); + + + /* + * User function will be executed under PSR interrupts control + * and kernel should return interrupts mask control to PSR register + * The follow function should do it + */ + complete_switch_to_user_func(); + + /* Prevent kernel information leakage. 4 quadro registers + * already contain user's parameters.*/ +#ifndef CONFIG_CPU_HW_CLEAR_RF +# if E2K_MAXSR != 112 +# error Must clear all registers here +# endif + E2K_CLEAR_RF_108(); +#endif +} +#undef printk +#undef panic + +void fill_cut_entry(e2k_cute_t *cute_p, + unsigned long code_base, unsigned code_sz, + unsigned long glob_base, unsigned glob_sz) +{ + CUTE_CUD_BASE(cute_p) = code_base; + CUTE_CUD_SIZE(cute_p) = + ALIGN_TO_MASK(code_sz, E2K_ALIGN_CODES_MASK); + CUTE_CUD_C(cute_p) = CUD_CFLAG_SET; + + CUTE_GD_BASE(cute_p) = glob_base; + CUTE_GD_SIZE(cute_p) = + ALIGN_TO_MASK(glob_sz, E2K_ALIGN_GLOBALS_MASK); +} + +int create_cut_entry(int tcount, + unsigned long code_base, unsigned code_sz, + unsigned long glob_base, unsigned glob_sz) +{ + struct mm_struct *mm = current->mm; + register e2k_cute_t *cute_p; /* register for workaround against */ + /* gcc bug */ + unsigned long ts_flag; + int free_cui; + int error = 0; +#ifdef CONFIG_PROTECTED_MODE + int retval; +#endif + + if (TASK_IS_PROTECTED(current)) { + mutex_lock(&mm->context.cut_mask_lock); + + /* Find the first free entry in cut and occupy it */ + free_cui = find_next_zero_bit( + (unsigned long *) &mm->context.cut_mask, + USER_CUT_AREA_SIZE/sizeof(e2k_cute_t), 1); + + /* If no free cut entry found */ + if (free_cui == USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)) + error = -EFAULT; + else + bitmap_set((unsigned long *) &mm->context.cut_mask, + free_cui, 1); + + mutex_unlock(&mm->context.cut_mask_lock); + + if (error) + return error; + } else { + /* not protected aplications should have zero CUI */ + free_cui = USER_CODES_UNPROT_INDEX(current); + } + + /* Fill found cut entry by information about loaded module */ + cute_p = (e2k_cute_t *) USER_CUT_AREA_BASE + free_cui; + DebugCU("Create cut entry: cui = %d; tct = %d; code = 0x%lx: 0x%x; " + "data = 0x%lx : 0x%x\n", free_cui, tcount, code_base, code_sz, + glob_base, glob_sz); +#ifdef CONFIG_PROTECTED_MODE + if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) { + DebugCU("e2k_set_vmm_cui called for cui = %d; code 0x%lx : 0x%lx\n", + free_cui, code_base, code_base + code_sz); + retval = e2k_set_vmm_cui(mm, free_cui, code_base, + code_base + code_sz); + if (retval) + return retval; + } +#endif + + /* cute_p is user address, read it carefully */ + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + TRY_USR_PFAULT { + fill_cut_entry(cute_p, code_base, code_sz, glob_base, glob_sz); + CUTE_TSD_BASE(cute_p) = + atomic_add_return(tcount, &mm->context.tstart) - tcount; + CUTE_TSD_SIZE(cute_p) = tcount; + } CATCH_USR_PFAULT { + error = -EFAULT; + } END_USR_PFAULT + clear_ts_flag(ts_flag); + + /* If something was wrong with access to cut */ + if (error) + return error; + + return free_cui; +} + +int free_cut_entry(unsigned long glob_base, size_t glob_sz, + unsigned long *code_base, size_t *code_sz) +{ + struct mm_struct *mm = current->mm; + register e2k_cute_t *cute_p; /* register for workaround against */ + /* gcc bug */ + unsigned long ts_flag; + int cui; + int error = 0; + int removed_cui = -1; + + /* Free cut entry with appropriate glob_base */ + mutex_lock(&mm->context.cut_mask_lock); + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + TRY_USR_PFAULT { + for (cui = 1; cui < USER_CUT_AREA_SIZE/sizeof(e2k_cute_t); + cui++) { + cute_p = (e2k_cute_t *) USER_CUT_AREA_BASE + cui; + if (CUTE_GD_BASE(cute_p) == glob_base && + CUTE_GD_SIZE(cute_p) == glob_sz) { + if (code_base) + *code_base = CUTE_CUD_BASE(cute_p); + if (code_sz) + *code_sz = CUTE_CUD_SIZE(cute_p); + fill_cut_entry(cute_p, 0, 0, 0, 0); + bitmap_clear((unsigned long *) + &mm->context.cut_mask, cui, 1); + removed_cui = cui; + } + } + } CATCH_USR_PFAULT { + error = -EFAULT; + } END_USR_PFAULT + clear_ts_flag(ts_flag); + mutex_unlock(&mm->context.cut_mask_lock); + + /* + * If cut entry with appropriate glob_base and glob_sz was not found + */ + if (removed_cui > 0) { + DebugCU("Free cut entry: cui = %d; ", removed_cui); + if (code_base && code_sz) + DebugCU("code = 0x%lx: 0x%lx; ", *code_base, *code_sz); + DebugCU("data = 0x%lx : 0x%lx\n", glob_base, glob_sz); + } else { + error = -EFAULT; + } + + return error; +} + +/** + * do_sys_execve - switch to the thread's context (stacks, CUT, etc) + * @entry: user function to call + * @sp: user stack's top + * @kernel: called by a kernel thread + * + * This is always called as the last step when exec()'ing a user binary. + */ +long __attribute__((no_caller_stack)) +do_sys_execve(unsigned long entry, unsigned long sp, int kernel) +{ + thread_info_t *const ti = current_thread_info(); + struct mm_struct *mm = current->mm; + start_fn start; + e2k_stacks_t stacks; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + unsigned long u_stk_bottom, u_stk_sz, stack_top; + hw_stack_t hw_stacks; + e2k_cutd_t cutd; + e2k_size_t cut_size; + int cui, ret; + u64 flags; + + BUG_ON(!list_empty(&ti->old_u_pcs_list)); + BUG_ON(CURRENT_PS_BASE() || CURRENT_PCS_BASE()); + + if (TASK_IS_PROTECTED(current)) + sp &= ~E2K_ALIGN_PUSTACK_MASK; + else + sp &= ~E2K_ALIGN_USTACK_MASK; + + start = (start_fn) entry; + + /* it is actual for guest kernel */ + clear_vm_thread_flags(); + +#ifdef CONFIG_KVM_HOST_MODE + if (ti->gthread_info) { + /* It is guest thread: clear from old process */ + kvm_pv_clear_guest_thread_info(ti->gthread_info); + } +#endif /* CONFIG_KVM_HOST_MODE */ + + u_stk_bottom = get_user_main_c_stack(sp, &stack_top); + if (!u_stk_bottom) { + ret = -ENOMEM; + DebugEX("is terminated: get_user_main_c_stack() failed and returned error %d\n", + ret); + goto fatal_error; + } + u_stk_sz = sp - u_stk_bottom; + + usd_hi = NATIVE_NV_READ_USD_HI_REG(); + usd_lo = NATIVE_NV_READ_USD_LO_REG(); + + AS_STRUCT(usd_lo).base = sp; + AS_STRUCT(usd_lo).p = 0; + AS_STRUCT(usd_hi).size = u_stk_sz; + + DebugEX("stack base 0x%lx size 0x%lx top 0x%lx\n", + u_stk_bottom, u_stk_sz, stack_top); + + define_user_hw_stacks_sizes(&hw_stacks); + ret = create_user_hard_stacks(&hw_stacks, &stacks); + if (ret) { + DebugEX("Could not create user hardware stacks\n"); + goto fatal_error; + } + + /* + * Here we go with CUT handling. + */ + + /* Allocate memory for CUT table (2Mb) */ + ret = create_cu_table(mm, &cut_size, &cui); + if (ret) { + DebugEX("Can't create CU table.\n"); + goto fatal_error; + } + + /* + * Set CU descriptor (register) to point to the CUT base. + */ + cutd.CUTD_base = USER_CUT_AREA_BASE; + ti->u_cutd = cutd; + WRITE_CUTD_REG(cutd); + + flags = 0; + if (TASK_IS_BINCO(current)) + flags |= BIN_COMP_CODE_TASK_FLAG; + if (current->thread.flags & E2K_FLAG_32BIT) + flags |= BIN_32_CODE_TASK_FLAG; + if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) + flags |= PROTECTED_CODE_TASK_FLAG; + + /* + * We don't return to handle_sys_call() so call + * syscall_trace_leave() and co manually. + */ + if (unlikely(ti->flags & _TIF_WORK_SYSCALL_TRACE)) { + struct pt_regs *regs = current_pt_regs(); + + if (regs && user_mode(regs)) { + if (!TASK_IS_PROTECTED(current)) + SAVE_SYSCALL_RVAL(regs, 0); + else + SAVE_PSYSCALL_RVAL(regs, 0, 0, 0, 0, 0, 0); + syscall_trace_leave(regs); + } + } + + /* + * Set some special registers in accordance with + * E2K API specifications. + */ + INIT_SPECIAL_REGISTERS(); + + free_getsp_adj(&ti->getsp_adj); + + /* + * Switch to user hard stacks should be last action in the function + * to avoid starting of scheduler on this process. Scheduler should not + * cause switching from or to user hard and soft stacks, because MMU + * context switching precedes to stacks spilling and switching. + * In this case stacks of current task will be spilled to user space + * of the next task + */ + + raw_all_irq_disable(); + + stacks.usd_lo = usd_lo; + stacks.usd_hi = usd_hi; + stacks.top = stack_top & ~E2K_ALIGN_STACKS_BASE_MASK; + + /* save local data & hardware stacks pointers */ + ti->u_stack.bottom = stacks.usd_lo.USD_lo_base - + stacks.usd_hi.USD_hi_size; + ti->u_stack.size = stacks.usd_hi.USD_hi_size; + ti->u_stack.top = stacks.top; + + /* + * The next function can be paravirtualized and do various actions: + * on host (or pure native mode) should only return 0 to continue + * switching and start of new user + * on guest should switch to created user stacks and start from + * user entry point (to do all into one hypercall) Function can + * return 1 on exit from user or negative error code + */ + ret = switch_to_new_user(&stacks, &hw_stacks, + cutd.CUTD_base, cut_size, entry, cui, flags, kernel); + if (ret == 0) { + /* native or host kernel case: */ + /* continue switching to new user process */ + } else if (ret < 0) { + /* paravirtualized guest function case: */ + /* error occurred while switching */ + DebugGEX("error %d occurred while switch to new user\n", ret); + goto fatal_error; + } else if (ret > 0) { + /* paravirtualized guest function case: */ + /* guest execve() completed and returned from user */ + panic("return from user execve(), return value %d\n", ret); + } + + INIT_G_REGS(); + +#if defined CONFIG_FUNCTION_GRAPH_TRACER + /* + * We won't ever return from this function and we are on a new stack + * so remove all tracing entries. + */ + current->curr_ret_stack = -1; +#endif + + user_enter(); + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + thread_info->times_num = 0; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + /* + * And now just switch to user's data stack and function + */ + + raw_all_irq_disable(); + + NATIVE_NV_WRITE_USBR_USD_REG_VALUE( + stack_top & ~E2K_ALIGN_STACKS_BASE_MASK, + AW(usd_hi), AW(usd_lo)); + + /* + * Switch to the hardware stacks in the new user space. + * Note that we now we work on user stacks, so must be + * very careful after this point... + */ + goto_new_user_hard_stk(&stacks); + ti->u_hw_stack = hw_stacks; + + /* from here process is new user process and empty pt_regs history */ + ti->pt_regs = NULL; + + NATIVE_WRITE_RPR_HI_REG_VALUE(0); + NATIVE_WRITE_RPR_LO_REG_VALUE(0); + + if (TASK_IS_PROTECTED(current)) { + unsigned long *p_base_lo, *p_base_hi; + unsigned long base_lo, base_hi; + + init_sem_malloc(&mm->context.umpools); + /* new loader interface */ + p_base_lo = (unsigned long *) mm->start_stack; + p_base_hi = p_base_lo + 1; + base_lo = *p_base_lo; + base_hi = *p_base_hi; + /* We may erase base descriptor from stack since + * no one will ever need it there. */ + *p_base_lo = 0; + *p_base_hi = 0; + E2K_JUMP_WITH_ARGUMENTS(protected_switch_to_user_func, + 5, base_lo, base_hi, start, u_stk_sz, cui); + } else { + E2K_JUMP_WITH_ARGUMENTS(switch_to_user_func, + 4, 0, start, u_stk_sz, cui); + } + +fatal_error: + E2K_LMS_HALT_OK; + + DebugEX("fatal error %d: send KILL signal\n", ret); + + if (kernel) + /* Nowhere to return to, just exit */ + do_exit(SIGKILL); + + send_sig(SIGKILL, current, 0); + + return ret; +} + +long e2k_sys_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp) +{ + int ret; + + set_ts_flag(TS_USER_EXECVE); + ret = sys_execve(filename, argv, envp); + clear_ts_flag(TS_USER_EXECVE); + if (!ret) + ret = do_sys_execve(current_thread_info()->execve.entry, + current_thread_info()->execve.sp, false); + + return ret; +} + +#ifdef CONFIG_COMPAT +long compat_e2k_sys_execve(const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp) +{ + int ret; + + set_ts_flag(TS_USER_EXECVE); + ret = compat_sys_execve(filename, argv, envp); + clear_ts_flag(TS_USER_EXECVE); + if (!ret) + ret = do_sys_execve(current_thread_info()->execve.entry, + current_thread_info()->execve.sp, false); + + return ret; +} +#endif + +long e2k_sys_execveat(int fd, const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, + int flags) +{ + int ret; + + set_ts_flag(TS_USER_EXECVE); + ret = sys_execveat(fd, filename, argv, envp, flags); + clear_ts_flag(TS_USER_EXECVE); + if (!ret) + ret = do_sys_execve(current_thread_info()->execve.entry, + current_thread_info()->execve.sp, false); + + return ret; +} + +#ifdef CONFIG_COMPAT +long compat_e2k_sys_execveat(int fd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, + int flags) +{ + int ret; + + set_ts_flag(TS_USER_EXECVE); + ret = compat_sys_execveat(fd, filename, argv, envp, flags); + clear_ts_flag(TS_USER_EXECVE); + if (!ret) + ret = do_sys_execve(current_thread_info()->execve.entry, + current_thread_info()->execve.sp, false); + + return ret; +} +#endif + +static inline int +native_prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_mem_crs_t *crs; + unsigned long *frame; + unsigned long flags; + bool flush_chain, flush_procedure; + + raw_all_irq_save(flags); + + psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + flush_chain = (pcsp_hi.PCSP_hi_ind < 2 * SZ_OF_CR); + /* Assume a maximum of 4 do_sys_execve()'s parameters */ + flush_procedure = (psp_hi.PSP_hi_ind < 2 * EXT_4_NR_SZ); + + if (flush_chain) + E2K_FLUSHC; + if (flush_procedure) + E2K_FLUSHR; + + /* + * Change IP of the last frame to do_sys_execve(). + */ + crs = (e2k_mem_crs_t *) (pcsp_lo.PCSP_lo_base + SZ_OF_CR); + + crs->cr0_hi.CR0_hi_ip = (unsigned long) &do_sys_execve >> 3; + + /* + * Put do_sys_execve()'s parameters into the procedure stack. + */ + frame = (unsigned long *) psp_lo.PSP_lo_base; + + frame[0] = entry; + if (machine.native_iset_ver < E2K_ISET_V5) { + frame[1] = sp; + /* Skip frame[2] and frame[3] - they hold extended data */ + } else { + frame[2] = sp; + /* Skip frame[1] and frame[3] - they hold extended data */ + } + frame[4] = true; + + raw_all_irq_restore(flags); + + return 0; +} +int native_do_prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + return native_prepare_start_thread_frames(entry, sp); +} + +void start_thread(struct pt_regs *regs, unsigned long entry, unsigned long sp) +{ + int ret; + + DebugP("entry 0x%lx sp 0x%lx\n", entry, sp); + + current_thread_info()->execve.entry = entry; + current_thread_info()->execve.sp = sp; + + /* + * If called from user mode then do_sys_execve() will + * be called manually from ttable_entry(). + */ + if (test_ts_flag(TS_USER_EXECVE)) + return; + + ret = prepare_start_thread_frames(entry, sp); + if (ret != 0) { + pr_err("%s(): could not prepare stack frames to return " + "to do_sys_execve(), error %d", + __func__, ret); + do_exit(SIGKILL); + } + + return; /* to prepared kernel thread stack frames to call */ + /* do_sys_execve(entry, sp) */ +} +EXPORT_SYMBOL(start_thread); + + +/* + * Idle related variables and functions + */ +unsigned long boot_option_idle_override = 0; +EXPORT_SYMBOL(boot_option_idle_override); + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +static void save_binco_regs(struct sw_regs *sw_regs) +{ + + /* Save intel regs from processor. For binary compiler. */ + NATIVE_SAVE_INTEL_REGS(sw_regs); +} +#else /* !CONFIG_SECONDARY_SPACE_SUPPORT: */ +static inline void save_binco_regs(struct sw_regs *sw_regs) { } +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + +void init_sw_user_regs(struct sw_regs *sw_regs, + bool save_gregs, bool save_binco_regs_needed) +{ + /* + * New process will start with interrupts disabled. + * They will be enabled in schedule_tail() (user's upsr + * is saved in pt_regs and does not need to be corrected). + */ + sw_regs->upsr = E2K_KERNEL_UPSR_DISABLED; + + sw_regs->fpcr = NATIVE_NV_READ_FPCR_REG(); + sw_regs->fpsr = NATIVE_NV_READ_FPSR_REG(); + sw_regs->pfpfr = NATIVE_NV_READ_PFPFR_REG(); + sw_regs->cutd = NATIVE_NV_READ_CUTD_REG(); + +#ifdef CONFIG_GREGS_CONTEXT + if (save_gregs) { + machine.save_gregs(&sw_regs->gregs); + } +#endif /* CONFIG_GREGS_CONTEXT */ + + if (save_binco_regs_needed) + save_binco_regs(sw_regs); +} + +static void set_default_registers(struct task_struct *new_task, bool save_gregs) +{ + struct sw_regs *new_sw_regs = &new_task->thread.sw_regs; + struct thread_info *new_ti = task_thread_info(new_task); + unsigned long addr = (unsigned long) new_task->stack; + + memset(new_sw_regs, 0, sizeof(*new_sw_regs)); + + memset(&new_task->thread.debug.regs, 0, + sizeof(new_task->thread.debug.regs)); + + clear_ptrace_hw_breakpoint(new_task); + + /* + * Calculate kernel stacks registers + */ + AW(new_ti->k_psp_lo) = 0; + AS(new_ti->k_psp_lo).base = addr + KERNEL_P_STACK_OFFSET; + AW(new_ti->k_psp_hi) = 0; + AS(new_ti->k_psp_hi).size = KERNEL_P_STACK_SIZE; + AW(new_ti->k_pcsp_lo) = 0; + AS(new_ti->k_pcsp_lo).base = addr + KERNEL_PC_STACK_OFFSET; + AW(new_ti->k_pcsp_hi) = 0; + AS(new_ti->k_pcsp_hi).size = KERNEL_PC_STACK_SIZE; + AW(new_ti->k_usd_lo) = 0; + AS(new_ti->k_usd_lo).base = addr + KERNEL_C_STACK_OFFSET + + KERNEL_C_STACK_SIZE; + AW(new_ti->k_usd_hi) = 0; + AS(new_ti->k_usd_hi).size = KERNEL_C_STACK_SIZE; + + /* + * Prepare registers for first schedule() to a new task + */ + new_sw_regs->top = (u64) new_task->stack + KERNEL_C_STACK_SIZE + + KERNEL_C_STACK_OFFSET; + new_sw_regs->usd_lo = new_ti->k_usd_lo; + new_sw_regs->usd_hi = new_ti->k_usd_hi; + new_sw_regs->psp_lo = new_ti->k_psp_lo; + new_sw_regs->psp_hi = new_ti->k_psp_hi; + new_sw_regs->pcsp_lo = new_ti->k_pcsp_lo; + new_sw_regs->pcsp_hi = new_ti->k_pcsp_hi; + + init_sw_user_regs(new_sw_regs, save_gregs, TASK_IS_BINCO(current)); +} + +asmlinkage pid_t sys_clone_thread(unsigned long clone_flags, long stack_base, + unsigned long long stack_size, int __user *parent_tidptr, + int __user *child_tidptr, unsigned long tls) +{ + struct pt_regs *regs = current_pt_regs(); + long rval, flags = clone_flags; + struct kernel_clone_args args; + + if (!access_ok(stack_base, stack_size)) + return -ENOMEM; + + if (!flags) + flags = SIGCHLD | CLONE_VM | CLONE_FS | CLONE_FILES; + + args.flags = (flags & ~CSIGNAL); + args.pidfd = parent_tidptr; + args.child_tid = child_tidptr; + args.parent_tid = parent_tidptr; + args.exit_signal = (flags & CSIGNAL); + args.stack = stack_base; + args.stack_size = stack_size; + args.tls = tls; + + rval = _do_fork(&args); + + return rval; +} + +/* + * Get return IP for n level below pt_regs return IP + */ +e2k_addr_t +get_nested_kernel_IP(pt_regs_t *regs, int n) +{ + e2k_addr_t IP = 0UL; + e2k_cr0_hi_t cr0_hi; + e2k_addr_t base; + s64 cr_ind; + u64 flags; + + raw_all_irq_save(flags); + E2K_FLUSHC; + base = regs->stacks.pcsp_lo.PCSP_lo_base; + cr_ind = regs->stacks.pcsp_hi.PCSP_hi_ind; + + while (--n) { + if (cr_ind <= 0) { + panic("get_nested_kernel_IP(): procedure chain " + "stack underflow\n"); + } + cr_ind = cr_ind - SZ_OF_CR; + get_kernel_cr0_hi(&cr0_hi, base, cr_ind); + IP = AS_STRUCT(cr0_hi).ip << 3; + } + + raw_all_irq_restore(flags); + return IP; +} + +static void reserved_frame(void) +{ + panic("kthread must not return, but it did"); +} + +/** + * copy_kernel_stacks - prepare for return to kernel function + * @stacks - allocated stacks' parameters (will be corrected) + * @crs - chain stack frame will be returned here + * @fn - function to return to + * @arg - function's argument + * + * Note that cr1_lo.psr value is taken from PSR register. This means + * that interrupts and sge are expected to be enabled by caller. + */ +int native_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + struct sw_regs *new_sw_regs = &new_task->thread.sw_regs; + e2k_mem_crs_t crs; + e2k_psr_t psr; + unsigned long *p_frame, reserved_frame_size; + e2k_mem_crs_t *c_frames; + int ret; + + /* + * How kernel thread creation works. + * + * 1) After schedule() to the new kthread we return to __ret_from_fork() + * with wbs=0 (i.e. returned value ends up in %r0). + * 2) __ret_from_fork() calls schedule_tail() to finish the things + * for scheduler. + * 3) When __ret_from_fork() returns @fn frame will be FILLed along + * with function's argument. + */ + + /* + * Reserve space in hardware stacks for: + * __ret_from_fork <= @fn(@arg) <= reserved_frame() + * + * reserved_frame() is used solely to print an error when @fn returns + */ + AS(new_sw_regs->pcsp_hi).ind = 3 * SZ_OF_CR; + + reserved_frame_size = 4 * EXT_4_NR_SZ; + AS(new_sw_regs->psp_hi).ind = EXT_4_NR_SZ + reserved_frame_size; + + /* + * Prepare @fn's argument + */ + p_frame = (unsigned long *) (AS(new_sw_regs->psp_lo).base + + reserved_frame_size); + *p_frame = arg; + + /* + * Prepare @fn's frame in chain stack. + */ + psr = NATIVE_NV_READ_PSR_REG(); + /* function kernel_thread() can be started from trap to dump */ + /* all stacks state on VMs and VCPUs */ + BUG_ON((psr.PSR_sge == 0) && !current_is_in_trap()); + ret = chain_stack_frame_init(&crs, (void *) fn, + AS(new_sw_regs->usd_hi).size, psr, 1, 0, false); + if (ret) + return ret; + + c_frames = (e2k_mem_crs_t *) AS(new_sw_regs->pcsp_lo).base; + c_frames[2] = crs; + + /* + * Prepare frame to catch errors + */ + AS(crs.cr0_hi).ip = (u64) reserved_frame >> 3; + AS(crs.cr1_lo).wbs = reserved_frame_size / EXT_4_NR_SZ; + + c_frames[1] = crs; + + memset((void *) AS(new_sw_regs->psp_lo).base, 0, reserved_frame_size); + + return 0; +} + +static struct pt_regs *reserve_child_pt_regs(struct sw_regs *new_sw_regs, + struct task_struct *new_task) +{ + struct pt_regs *new_regs; + unsigned long stack_top, stack_bottom, regs_size; + + stack_top = (unsigned long) new_task->stack + KERNEL_C_STACK_OFFSET + + KERNEL_C_STACK_SIZE; + stack_bottom = new_sw_regs->usd_lo.USD_lo_base - + new_sw_regs->usd_hi.USD_hi_size; + regs_size = round_up(sizeof(*new_regs), E2K_ALIGN_STACK); + + BUG_ON(AS(new_sw_regs->usd_hi).size < regs_size); + + /* allocate pt_regs structute from top of the stack + AS(new_sw_regs->usd_lo).base -= regs_size; + AS(new_sw_regs->usd_hi).size -= regs_size; + + AS(new_sw_regs->crs.cr1_hi).ussz -= regs_size / 16; + + new_regs = (struct pt_regs *) (stack_top - regs_size); + */ + + /* allocate pt_regs structute from bottom of the stack + * Overlay of areas cannot now controlled by hardware because of + * guest does not report stack borders changes to host. + * TODO that it need implement simple light hypercall + new_sw_regs->usd_hi.USD_hi_size -= regs_size; + */ + new_regs = (struct pt_regs *)stack_bottom; + + return new_regs; +} + +/* + * Clear unallocated memory pointers which can be allocated for parent task + */ +void clear_thread_info(struct task_struct *task) +{ + struct thread_struct *thread = &task->thread; + thread_info_t *thread_info = task_thread_info(task); + + DebugEX("started for task 0x%px CPU #%d\n", task, task_cpu(task)); + + AW(thread_info->k_usd_lo) = 0; + AW(thread_info->k_usd_hi) = 0; + AW(thread_info->k_psp_lo) = 0; + AW(thread_info->k_psp_hi) = 0; + AW(thread_info->k_pcsp_lo) = 0; + AW(thread_info->k_pcsp_hi) = 0; + +#ifdef CONFIG_TC_STORAGE + thread->sw_regs.tcd = 0; +#endif + + thread_info->this_hw_context = NULL; + + thread_info->pt_regs = NULL; + + SET_PS_BASE(&thread_info->u_hw_stack, NULL); + SET_PCS_BASE(&thread_info->u_hw_stack, NULL); + + thread_info->old_ps_base = NULL; + thread_info->old_ps_size = 0; + thread_info->old_pcs_base = NULL; + thread_info->old_pcs_size = 0; + + INIT_LIST_HEAD(&thread_info->old_u_pcs_list); + + INIT_LIST_HEAD(&thread_info->getsp_adj); + + thread_info->status = 0; + +#if defined(CONFIG_SECONDARY_SPACE_SUPPORT) + thread_info->rp_start = 0; + thread_info->rp_end = 0; + thread_info->last_ic_flush_cpu = -1; +#endif + +#if defined(CONFIG_E2K) && defined(CONFIG_PROTECTED_MODE) + thread_info->pm_robust_list = NULL; +#endif + + /* clear virtualization support fields into thread info */ + clear_virt_thread_struct(thread_info); +} + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +static void ktimes_account_copy_thread(struct thread_info *new_ti) +{ + int i; + scall_times_t *new_scall_times; + + for (i = 0; i < (sizeof(new_ti->times)) / sizeof(u16); i++) + ((u16 *)(new_ti->times))[i] = new_task->pid; + new_ti->times_num = 1; + new_ti->times_index = 1; + new_scall_times = &(new_ti->times[0].of.syscall); + new_ti->times[0].type = SYSTEM_CALL_TT; + new_ti->fork_scall_times = new_scall_times; + new_scall_times->syscall_num = regs->scall_times->syscall_num; + if (regs->scall_times) + new_scall_times->syscall_num = regs->scall_times->syscall_num; + else + new_scall_times->syscall_num = -1; + new_scall_times->signals_num = 0; +} +#else +static void ktimes_account_copy_thread(struct thread_info *new_ti) { } +#endif + + +/** + * *_clone_prepare_spilled_user_stacks - prepare user's part of kernel stacks + * for a new thread + * @child_stacks: child PSP/PCSP/USD/SBR registers + * @child_crs: child CR registers + * @regs: parent registers + */ +int native_clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags) +{ + const struct thread_info *old_ti = current_thread_info(); + u64 ps_copy_size; + s64 u_pshtp_size, u_pcshtp_size, parent_pshtp_size; + unsigned long flags; + void __user *child_pframe; + int ret; + + u_pshtp_size = GET_PSHTP_MEM_INDEX(child_stacks->pshtp); + u_pcshtp_size = PCSHTP_SIGN_EXTEND(child_stacks->pcshtp); + + /* + * After clone child has new empty stacks + */ + if (WARN_ON_ONCE(u_pshtp_size || u_pcshtp_size)) + do_exit(SIGKILL); + + /* + * Leave one empty frame that will be loaded to %CR + * registers when the top user frame starts executing. + */ + AS(new_sw_regs->pcsp_hi).ind += SZ_OF_CR; + child_stacks->pcshtp += SZ_OF_CR; + AS(child_stacks->pcsp_hi).ind += SZ_OF_CR; + + /* + * Copy last chain stack frame from parent (needed only for vfork()). + */ + if (clone_flags & CLONE_VFORK) { + e2k_mem_crs_t *cframe; + unsigned long parent_pcshtp_size; + + parent_pcshtp_size = PCSHTP_SIGN_EXTEND(regs->stacks.pcshtp); + if (WARN_ON_ONCE(parent_pcshtp_size < SZ_OF_CR)) + do_exit(SIGKILL); + cframe = (e2k_mem_crs_t *) (AS(old_ti->k_pcsp_lo).base + + parent_pcshtp_size - SZ_OF_CR); + + raw_all_irq_save(flags); + COPY_STACKS_TO_MEMORY(); + memcpy((void *) (AS(new_ti->k_pcsp_lo).base + + AS(new_sw_regs->pcsp_hi).ind), + cframe, SZ_OF_CR); + raw_all_irq_restore(flags); + DebugCLN("last chain stack frame from parent copyed to " + "kernel stack %px from %px, size 0x%lx\n", + (void *)(new_ti->k_pcsp_lo.PCSP_lo_base + + AS(new_sw_regs->pcsp_hi).ind), + cframe, SZ_OF_CR); + + AS(new_sw_regs->pcsp_hi).ind += SZ_OF_CR; + child_stacks->pcshtp += SZ_OF_CR; + AS(child_stacks->pcsp_hi).ind += SZ_OF_CR; + + ps_copy_size = (AS(child_crs->cr1_lo).wbs + + AS(cframe->cr1_lo).wbs) * EXT_4_NR_SZ; + } else { + ps_copy_size = AS(child_crs->cr1_lo).wbs * EXT_4_NR_SZ; + } + + + /* + * Copy procedure stack from parent. + */ + child_pframe = (void __user *) (AS(child_stacks->psp_lo).base + + AS(child_stacks->psp_hi).ind); + parent_pshtp_size = GET_PSHTP_MEM_INDEX(regs->stacks.pshtp); + + if (ps_copy_size > parent_pshtp_size) { + void __user *parent_pframe; + unsigned long ts_flag; + u64 size; + + size = ps_copy_size - parent_pshtp_size; + parent_pframe = (void __user *) (AS(regs->stacks.psp_lo).base + + AS(regs->stacks.psp_hi).ind - + ps_copy_size); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user_with_tags(child_pframe, parent_pframe, + size); + clear_ts_flag(ts_flag); + if (ret) { + pr_err("%s(): copying of parent procedure frames to " + "child failed\n", + __func__); + return -EFAULT; + } + DebugCLN("parent procedure stack frames from %px copyed to " + "childe stack %px, size 0x%llx\n", + parent_pframe, child_pframe, size); + + child_pframe += size; + AS(child_stacks->psp_hi).ind += size; + ps_copy_size -= size; + } + + raw_all_irq_save(flags); + COPY_STACKS_TO_MEMORY(); + tagged_memcpy_8((void *) AS(new_ti->k_psp_lo).base, + (void *) (AS(old_ti->k_psp_lo).base + parent_pshtp_size - + ps_copy_size), ps_copy_size); + raw_all_irq_restore(flags); + DebugCLN("parent kernel procedure stack frames from %px copyed to " + "childe stack %px, size 0x%llx\n", + (void *) (AS(old_ti->k_psp_lo).base + parent_pshtp_size - + ps_copy_size), + (void *) AS(new_ti->k_psp_lo).base, ps_copy_size); + + AS(new_sw_regs->psp_hi).ind += ps_copy_size; + AS(child_stacks->pshtp).ind += PSP_IND_TO_PSHTP(ps_copy_size); + AS(child_stacks->psp_hi).ind += ps_copy_size; + + if (AS(child_stacks->pshtp).ind) { + AS(child_stacks->pshtp).fx = 1; + AS(child_stacks->pshtp).fxind = 0x10; + } + + return 0; +} + +/** + * *_copy_spilled_user_stacks - copy user's part of kernel hardware stacks + * @child_stacks: copy of parent task registers + */ +void native_copy_spilled_user_stacks(struct e2k_stacks *child_stacks, + e2k_mem_crs_t *child_crs, struct sw_regs *new_sw_regs, + const struct thread_info *new_ti) +{ + const struct thread_info *old_ti = current_thread_info(); + s64 u_pshtp_size, u_pcshtp_size; + unsigned long flags; + + u_pshtp_size = GET_PSHTP_MEM_INDEX(child_stacks->pshtp); + u_pcshtp_size = PCSHTP_SIGN_EXTEND(child_stacks->pcshtp); + DebugCPY("procedure stack size to copy (PSHTP) 0x%llx\n", + u_pshtp_size); + DebugCPY("chain stack size to copy (PCSHTP) 0x%llx\n", + u_pcshtp_size); + + if (WARN_ON_ONCE(u_pshtp_size < 0 || u_pcshtp_size < 0)) + do_exit(SIGKILL); + + raw_all_irq_save(flags); + COPY_STACKS_TO_MEMORY(); + tagged_memcpy_8((void *) AS(new_ti->k_psp_lo).base, + (void *) AS(old_ti->k_psp_lo).base, u_pshtp_size); + memcpy((void *) AS(new_ti->k_pcsp_lo).base, + (void *) AS(old_ti->k_pcsp_lo).base, u_pcshtp_size); + raw_all_irq_restore(flags); + DebugCPY("user's part of procedure stack copyed to %px from %px " + "size 0x%llx\n", + (void *)new_ti->k_psp_lo.PSP_lo_base, + (void *)old_ti->k_psp_lo.PSP_lo_base, u_pshtp_size); + DebugCPY("user's part of chain stack copyed to %px from %px " + "size 0x%llx\n", + (void *)new_ti->k_pcsp_lo.PCSP_lo_base, + (void *)old_ti->k_pcsp_lo.PCSP_lo_base, u_pcshtp_size); + + AS(new_sw_regs->psp_hi).ind += u_pshtp_size; + AS(new_sw_regs->pcsp_hi).ind += u_pcshtp_size; + DebugCPY("procedure stack ind is now 0x%x\n", + new_sw_regs->psp_hi.PSP_hi_ind); + DebugCPY("chain stack ind is now 0x%x\n", + new_sw_regs->pcsp_hi.PCSP_hi_ind); + + if (AS(child_stacks->pshtp).ind) { + AS(child_stacks->pshtp).fx = 1; + AS(child_stacks->pshtp).fxind = 0x10; + } +} + +static int calculate_dstack_size(struct mm_struct *mm, + unsigned long *sp, unsigned long *stack_size) +{ + struct vm_area_struct *vma, *cur, *prev; + u64 delta; + + down_read(&mm->mmap_sem); + vma = find_vma_prev(mm, *sp + *stack_size - 1, &prev); + if (!vma || *sp + *stack_size <= vma->vm_start) + goto out_efault; + + cur = vma; + + if (*stack_size) { + /* + * Check passed area + */ + while (*sp < cur->vm_start) { + if (!prev || cur->vm_start != prev->vm_end || + ((cur->vm_flags ^ prev->vm_flags) & VM_GROWSDOWN)) + goto out_efault; + + cur = prev; + prev = prev->vm_prev; + } + } else { + /* + * We assume here that the stack area is contained + * in a single vma. + */ + *stack_size = *sp - cur->vm_start; + *sp = cur->vm_start; + } + + if (*stack_size > MAX_USD_HI_SIZE) { + delta = *stack_size - MAX_USD_HI_SIZE; + + *sp += delta; + *stack_size -= delta; + } + + up_read(&mm->mmap_sem); + + /* Align the stack */ + + delta = round_up(*sp, E2K_ALIGN_STACK) - *sp; + *sp += delta; + *stack_size -= delta; + + delta = *stack_size - round_down(*stack_size, E2K_ALIGN_STACK); + *stack_size -= delta; + + if ((s64) *stack_size < 0) + return -EINVAL; + + return 0; + +out_efault: + up_read(&mm->mmap_sem); + + return -EFAULT; +} + +static void prepare_ret_from_fork(e2k_mem_crs_t *crs, u64 usd_size) +{ + memset(crs, 0, sizeof(*crs)); + + crs->cr0_lo.fields.pf = -1ULL; + crs->cr0_hi.fields.ip = (unsigned long) __ret_from_fork >> 3; + crs->cr1_lo.fields.psr = AW(E2K_KERNEL_PSR_ENABLED); + crs->cr1_lo.fields.cui = KERNEL_CODES_INDEX; + crs->cr1_lo.fields.ic = !cpu_has(CPU_FEAT_ISET_V6); + crs->cr1_lo.fields.wbs = 0; + crs->cr1_lo.fields.wpsz = 4; + crs->cr1_hi.fields.ussz = usd_size / 16; +} + +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long stack_size, struct task_struct *new_task, + unsigned long tls) +{ + struct thread_info *new_ti = task_thread_info(new_task); + struct sw_regs *new_sw_regs = &new_task->thread.sw_regs; + struct pt_regs *childregs, *regs = current_thread_info()->pt_regs; + int ret; + + ktimes_account_copy_thread(new_ti); + + /* + * Initialize sw_regs with default values + */ + set_default_registers(new_task, true); + + /* + * Set __ret_from_fork frame to be called right after __switch_to() + */ + prepare_ret_from_fork(&new_sw_regs->crs, AS(new_sw_regs->usd_hi).size); + + /* + * For kernel threads @sp is a function and @stacks_size is its argument + */ + if (current->flags & PF_KTHREAD) + return copy_kernel_stacks(new_task, sp, stack_size); + + /* + * Set a new TLS for the child thread. + */ + if (clone_flags & CLONE_SETTLS) { + if (!TASK_IS_PROTECTED(current)) { + new_sw_regs->gregs.g[13].base = tls; + } else { + u64 tls_lo = 0; + u64 tls_hi = 0; + u32 tls_lo_tag = 0; + u32 tls_hi_tag = 0; + u64 args_ptr; + + switch (regs->kernel_entry) { + case 8: + tls_lo = regs->args[9]; + tls_hi = regs->args[10]; + tls_lo_tag = regs->tags >> (4*10) & 0xf; + tls_hi_tag = regs->tags >> (4*11) & 0xf; + break; + case 10: + args_ptr = __E2K_PTR_PTR(regs->args[4], + regs->args[5], GET_SBR_HI()); + /* Copy TLS argument with tags. */ + TRY_USR_PFAULT { + NATIVE_LOAD_TAGGED_QWORD_AND_TAGS( + ((u64 *) args_ptr) + 4, + tls_lo, tls_hi, + tls_lo_tag, tls_hi_tag); + } CATCH_USR_PFAULT { + pr_warn("Bad tls on entry10\n"); + } END_USR_PFAULT + break; + default: + pr_warn("Unknown entry in tls copy\n"); + } + __NATIVE_STORE_TAGGED_QWORD( + &new_sw_regs->gregs.g[12].base, + tls_lo, tls_hi, tls_lo_tag, tls_hi_tag, 16); + } + } + + childregs = reserve_child_pt_regs(new_sw_regs, new_task); + + *childregs = *regs; + clear_fork_child_pt_regs(childregs); + new_ti->pt_regs = childregs; + + /* + * Update data stack if needed + */ + if (sp) { + if (TASK_IS_PROTECTED(new_task) && stack_size) + set_ti_thread_flag(new_ti, TIF_USD_NOT_EXPANDED); + + ret = calculate_dstack_size(new_task->mm, &sp, &stack_size); + if (ret) + return ret; + + if (TASK_IS_PROTECTED(current)) { + e2k_pusd_lo_t pusd_lo; + + childregs->stacks.top = (sp + stack_size) & + ~0xffffffffUL; + AW(pusd_lo) = 0; + AS(pusd_lo).base = (sp + stack_size) & 0xffffffffUL; + AS(pusd_lo).psl = 1; + AS(pusd_lo).p = 1; + AS(pusd_lo).rw = RW_ENABLE; + AW(childregs->stacks.usd_lo) = AW(pusd_lo); + } else { + childregs->stacks.top = sp + stack_size; + AS(childregs->stacks.usd_lo).base = sp + stack_size; + } + AS(childregs->stacks.usd_hi).size = stack_size; + + new_ti->u_stack.bottom = sp; + new_ti->u_stack.top = sp + stack_size; + new_ti->u_stack.size = stack_size; + + AS(childregs->crs.cr1_hi).ussz = stack_size / 16; + } + + if (clone_flags & CLONE_VM) { + /* + * User thread creation + */ + ret = create_user_hard_stacks(&new_ti->u_hw_stack, + &childregs->stacks); + if (ret) + return ret; + + ret = clone_prepare_spilled_user_stacks(&childregs->stacks, + &childregs->crs, regs, new_sw_regs, new_ti, + clone_flags); + if (ret) + return ret; + + /* TODO FIXME on fork g_list should be copied, not zeroed */ + clear_g_list(task_thread_info(new_task)); + + /* + * New thread will use different signal stack + */ + new_ti->signal_stack.base = 0; + new_ti->signal_stack.size = 0; + new_ti->signal_stack.used = 0; + } else { + /* + * User process creation + */ + copy_spilled_user_stacks(&childregs->stacks, &childregs->crs, + new_sw_regs, new_ti); + + ret = copy_old_u_pcs_list(new_ti, current_thread_info()); + if (ret) + return ret; + + ret = copy_getsp_adj(new_ti, current_thread_info()); + if (ret) + return ret; + + /* + * Stacks of the fork'ed process are located at the same address + */ + new_ti->u_hw_stack = current_thread_info()->u_hw_stack; + +#ifdef CONFIG_PROTECTED_MODE + if (new_task->thread.flags & E2K_FLAG_PROTECTED_MODE && + !(clone_flags & CLONE_VM)) + init_pool_malloc(current, new_task); +#endif + + if (MONITORING_IS_ACTIVE) + init_monitors(new_task); + } + + /* + * __ret_from_fork() does not restore user CR registers (because + * handle_sys_call() does not do it for performance reasons), so + * make sure they are FILLed by hardware. + */ + memcpy((void *) (AS(new_ti->k_pcsp_lo).base + + AS(new_sw_regs->pcsp_hi).ind), + &childregs->crs, SZ_OF_CR); + AS(new_sw_regs->pcsp_hi).ind += SZ_OF_CR; + + return 0; +} + +void native_deactivate_mm(struct task_struct *dead_task, struct mm_struct *mm) +{ + struct thread_info *ti = task_thread_info(dead_task); + struct pt_regs *regs = ti->pt_regs; + int ret; + + if (!mm) + return; + + DebugEX("entered for task 0x%px %d [%s], mm 0x%lx\n", + dead_task, dead_task->pid, dead_task->comm, mm); + BUG_ON(dead_task != current); + +#if defined(CONFIG_E2K) && defined(CONFIG_PROTECTED_MODE) + if (unlikely(ti->pm_robust_list)) { + pm_exit_robust_list(dead_task); + ti->pm_robust_list = NULL; + } +#endif + + /* + * There may be coredump in progress + */ + if (regs) + do_user_hw_stacks_copy_full(®s->stacks, regs, NULL); + +#ifdef CONFIG_MLT_STORAGE + /* + * Do not want any surprises from MLT later on. + */ + /* FIXME: MLT support is not yet implemented for guest kernel */ + if (!paravirt_enabled()) + machine.invalidate_MLT(); +#endif + + hw_context_deactivate_mm(dead_task); + + /* + * Free user hardware stacks, as kernel created them and only kernel + * knows about them. We must free both physical and virtual memory. + */ + ret = free_user_hardware_stacks(&ti->u_hw_stack); + if (ret) { + pr_err("%s(): Could not free user hardware stacks, error %d\n", + __func__, ret); + dump_stack(); + } + + free_signal_stack(&ti->signal_stack); + + DebugEX("successfully finished\n"); +} + +void release_thread(struct task_struct *dead_task) +{ + DebugP("is empty function for task %s pid %d\n", + dead_task->comm, dead_task->pid); +} + +void exit_thread(struct task_struct *task) +{ + thread_info_t *ti = task_thread_info(task); + + DebugP("CPU#%d : started for %s pid %d, user data stack base 0x%lx\n", + smp_processor_id(), current->comm, current->pid, + ti->u_stack.bottom); + + free_getsp_adj(&ti->getsp_adj); + + if (task != current) { + if (task->mm) { + /* We don't have to free virtual memory if this was + * a fork (and we'd have to switch mm to do it). */ + if (task->mm != current->mm) { + SET_PS_BASE(&ti->u_hw_stack, NULL); + SET_PCS_BASE(&ti->u_hw_stack, NULL); + return; + } + BUG_ON(current->mm != current->active_mm); + + /* It is possible that copy_process() failed after + * allocating stacks in copy_thread(). In this case + * we must free the allocated stacks. */ + free_user_hw_stacks(&ti->u_hw_stack); + } + + return; + } + +#ifdef CONFIG_HAVE_EL_POSIX_SYSCALL + if (current->pobjs) + pthread_exit(); +#endif + +#ifdef CONFIG_PROTECTED_MODE + free_global_sp(); +#endif /* CONFIG_PROTECTED_MODE */ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + if (debug_process_name != NULL && + !strncmp(current->comm, debug_process_name, + debug_process_name_len)) { + sys_e2k_print_kernel_times(current, ti->times, + ti->times_num, ti->times_index); + } +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + +#ifdef CONFIG_KVM_HOST_MODE + /* FIXME: it should be without ifdef/endif */ + kvm_resume_vm_thread(); +#endif /* CONFIG_KVM_HOST_MODE */ + +#ifdef CONFIG_MONITORS + if (MONITORING_IS_ACTIVE) { + process_monitors(current); + add_dead_proc_events(current); + } +#endif /* CONFIG_MONITORS */ + + DebugP("exit_thread exited.\n"); +} + +void machine_restart(char * __unused) +{ + DebugP("machine_restart entered.\n"); + + if (machine.restart != NULL) + machine.restart(__unused); + + DebugP("machine_restart exited.\n"); +} +EXPORT_SYMBOL(machine_restart); + +void machine_halt(void) +{ + DebugP("machine_halt entered.\n"); + + if (machine.halt != NULL) + machine.halt(); + + DebugP("machine_halt exited.\n"); +} + +void machine_power_off(void) +{ + DebugP("machine_power_off entered.\n"); + + if (machine.power_off != NULL) + machine.power_off(); + + DebugP("machine_power_off exited.\n"); +} + +/* + * We use this if we don't have any better + * idle routine.. + */ +void native_default_idle(void) +{ + /* loop is done by the caller */ + local_irq_enable(); +} +EXPORT_SYMBOL(native_default_idle); + +void arch_cpu_idle_enter() +{ + /* It works under CONFIG_E2K_PROFILING flag only */ + cpu_idle_time(); +} + +void arch_cpu_idle_exit() +{ + /* It works under CONFIG_E2K_PROFILING flag only */ + calculate_cpu_idle_time(); +} + +void arch_cpu_idle() +{ + default_idle(); +} + +void flush_thread(void) +{ + DebugP("flush_thread entered.\n"); + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + current_thread_info()->last_ic_flush_cpu = -1; +#endif + flush_ptrace_hw_breakpoint(current); + + DebugP("flush_thread exited.\n"); +} + +int dump_fpu( struct pt_regs *regs, void *fpu ) +{ + DebugP("dump_fpu entered.\n"); + DebugP("dump_fpu exited.\n"); + + return 0; +} + + +/* + * For coredump + * + * The threads can't free hw_stacks because coredump will + * used those stacks and + * the threads must free pc & p stacks after finish_coredump + * struct delayed_hw_stack is used to create delayed free hw_stacks + * + * core_state->delay_free_stacks -> list of struct delayed_hw_stack + * for all threads + */ +struct delayed_hw_stack { + struct list_head list_entry; + hw_stack_t hw_stacks; /* hardware stacks state */ +}; + +void clear_delayed_free_hw_stacks(struct mm_struct *mm) +{ + struct core_state *core_state = mm->core_state; + struct delayed_hw_stack *delayed_hw_stack, *n; + mm_context_t *context = &mm->context; + + DebugCD(" %s beginn pid=%d core_state=%px mm=%px context=%px\n", + __func__, current->pid, core_state, mm, context); + if (!core_state) { + return; + } + + down_write(&context->core_lock); + list_for_each_entry_safe(delayed_hw_stack, n, + &context->delay_free_stacks, list_entry) { + __list_del_entry(&delayed_hw_stack->list_entry); + free_user_hw_stacks(&delayed_hw_stack->hw_stacks); + kfree(delayed_hw_stack); + } + up_write(&context->core_lock); +} + +void create_delayed_free_hw_stacks(void) +{ + struct mm_struct *mm = current->mm; + struct core_state *core_state = mm->core_state; + mm_context_t *context = &mm->context; + struct delayed_hw_stack *delayed_hw_stack; + thread_info_t *ti = task_thread_info(current); + + DebugCD("begin core_state=%px\n", core_state); + + if (!core_state) { + return; + } + + delayed_hw_stack = kmalloc(sizeof(struct delayed_hw_stack), GFP_KERNEL); + BUG_ON(delayed_hw_stack == NULL); + + /* copy lists */ + INIT_LIST_HEAD(&delayed_hw_stack->list_entry); + delayed_hw_stack->hw_stacks = ti->u_hw_stack; + + down_write(&context->core_lock); + list_add_tail(&delayed_hw_stack->list_entry, + &context->delay_free_stacks); + up_write(&context->core_lock); +} + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +static __always_inline void flush_ic_on_switch(void) +{ + struct thread_info *ti = current_thread_info(); + int cpu = raw_smp_processor_id(); + + if (unlikely(ti->last_ic_flush_cpu >= 0 && + ti->last_ic_flush_cpu != cpu)) { + ti->last_ic_flush_cpu = cpu; + __flush_icache_all(); + } +} +#else +static __always_inline void flush_ic_on_switch(void) { } +#endif + +/* + * Tricks for __switch_to(), sys_clone(), sys_fork(). + * When we switch_to() to a new kernel thread or a forked process, + * it works as follows: + * + * 1. __schedule() calls switch_to() (which is defined to __switch_to() + * and is inlined) for the new task. + * + * 2. __switch_to() does E2K_FLUSHCPU and saves and sets the new stacks regs. + * + * 3. When __switch_to() returns there will be a FILL operation + * and __schedule() window for a next process will be loaded into + * register file. + * + * 4. When creating a new thread/process, __ret_from_fork() window + * will be FILLed instead. + * + * 5. Return value of system call in child will be that of __ret_from_fork(). + */ +notrace +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next) +{ + thread_info_t *next_ti = task_thread_info(next); + + /* Save interrupt mask state and disable NMIs */ + UPSR_ALL_SAVE_AND_CLI(AW(prev->thread.sw_regs.upsr)); + + NATIVE_SAVE_TASK_REGS_TO_SWITCH(prev); + + native_set_current_thread_info(task_thread_info(next), next); + + NATIVE_RESTORE_TASK_REGS_TO_SWITCH(next, next_ti); + + flush_ic_on_switch(); + + /* Restore interrupt mask and enable NMIs */ + UPSR_RESTORE(AW(next->thread.sw_regs.upsr)); + + return prev; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + return randomize_page(mm->brk, 0x02000000); +} + +int find_cui_by_ip(unsigned long ip) +{ + struct mm_struct *mm = current->mm; + e2k_cute_t __user *cut = (e2k_cute_t __user *) USER_CUT_AREA_BASE; + int i, cui = -ESRCH; + + if (!TASK_IS_PROTECTED(current)) + return USER_CODES_UNPROT_INDEX(current); + + mutex_lock(&mm->context.cut_mask_lock); + + for_each_set_bit(i, mm->context.cut_mask, + USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)) { + e2k_cute_dw0_t dw0; + e2k_cute_dw1_t dw1; + + if (__get_user(AW(dw0), &AW(cut[i].dw0)) || + __get_user(AW(dw1), &AW(cut[i].dw1))) { + cui = -EFAULT; + break; + } + + if (ip >= AS(dw0).cud_base && + ip < AS(dw0).cud_base + AS(dw1).cud_size) { + cui = i; + break; + } + } + + mutex_unlock(&mm->context.cut_mask_lock); + + return cui; +} + +SYSCALL_DEFINE5(arch_prctl, int, option, + unsigned long, arg2, unsigned long, arg3, + unsigned long, arg4, unsigned long, arg5) +{ + long error; + + error = security_task_prctl(option, arg2, arg3, arg4, arg5); + if (error != -ENOSYS) + return error; + + error = 0; + switch (option) { +#ifdef CONFIG_PROTECTED_MODE + /* returns PM debug mode status: */ + case PR_PM_DBG_MODE_GET: + if (arg2 || arg3 || arg4 || arg5) + return -EINVAL; + error = current->mm->context.pm_sc_debug_mode; + break; + /* resets PM debug mode status and returns '0': */ + case PR_PM_DBG_MODE_RESET: + if (arg2 || arg3 || arg4 || arg5) + return -EINVAL; + error = current->mm->context.pm_sc_debug_mode; + current->mm->context.pm_sc_debug_mode = PM_SC_DBG_MODE_DEFAULT; + break; + /* sets PM debug mode status and returns previous status: */ + case PR_PM_DBG_MODE_SET: + if (arg3 || arg4 || arg5) + return -EINVAL; + error = current->mm->context.pm_sc_debug_mode; + current->mm->context.pm_sc_debug_mode = arg2 + | PM_SC_DBG_MODE_INIT; + break; + case PR_PM_DBG_MODE_ADD: + if (!arg2 || arg3 || arg4 || arg5) + return -EINVAL; + current->mm->context.pm_sc_debug_mode |= arg2; + break; + case PR_PM_DBG_MODE_DEL: + if (!arg2 || arg3 || arg4 || arg5) + return -EINVAL; + current->mm->context.pm_sc_debug_mode &= ~arg2; + break; +#endif /* CONFIG_PROTECTED_MODE */ + default: + if (current->mm->context.pm_sc_debug_mode + & PM_SC_DBG_MODE_CHECK) + pr_err("Unknown option 0x%x in 'arch_prctl' syscall\n", + option); + error = -EINVAL; + break; + } + return error; +} + +/** + * chain_stack_frame_init - initialize chain stack frame for current + * task from provided parameters + * @crs - frame to initialize + * @fn_ptr - IP to return to + * @dstack_size - free size of data stack _after_ return + * @wbs - cr1_lo.wbs value + * @wpsz - cr1_lo.wpsz value + * @user - execute in user or kernel mode + * + * We could try to derive @user and cui from @fn_ptr by comparing it + * to TASK_SIZE but we must not: if user controls @fn_ptr value then + * this would be a security hole. + */ +int chain_stack_frame_init(e2k_mem_crs_t *crs, void *fn_ptr, + size_t dstack_size, e2k_psr_t psr, + int wbs, int wpsz, bool user) +{ + unsigned long fn = (unsigned long) fn_ptr; + + if (user && psr.pm) + return -EPERM; + + memset(crs, 0, sizeof(*crs)); + + AS(crs->cr0_lo).pf = -1ULL; + AS(crs->cr0_hi).ip = fn >> 3; + AS(crs->cr1_lo).psr = AW(psr); + AS(crs->cr1_lo).wbs = wbs; + AS(crs->cr1_lo).wpsz = wpsz; + AS(crs->cr1_hi).ussz = dstack_size / 16; + + if (user) { + int cui = find_cui_by_ip(fn); + if (cui < 0) + return cui; + if (machine.native_iset_ver < E2K_ISET_V6) + AS(crs->cr1_lo).ic = 0; + AS(crs->cr1_lo).cui = cui; + } else { + if (machine.native_iset_ver < E2K_ISET_V6) + AS(crs->cr1_lo).ic = 1; + AS(crs->cr1_lo).cui = KERNEL_CODES_INDEX; + } + + return 0; +} + +#ifdef CONFIG_PREEMPT_RT +static int __init e2k_idle_init(void) +{ + cpu_idle_poll_ctrl(true); + return 0; +} +arch_initcall(e2k_idle_init); +#endif diff --git a/arch/e2k/kernel/protected_mq_notify.c b/arch/e2k/kernel/protected_mq_notify.c new file mode 100644 index 000000000000..53d954eb35c5 --- /dev/null +++ b/arch/e2k/kernel/protected_mq_notify.c @@ -0,0 +1,153 @@ +/* linux/arch/e2k/kernel/protected_mq_notify.c, v 1.0 02/11/2019. + * + * This is implementation of the system call mq_notify: + * int mq_notify(mqd_t mqdes, const struct sigevent *sevp) + * for E2K protected mode. + * + * Copyright (C) 2019 MCST + */ + + +#include +#include + +#include +#include +#include + + +#undef DEBUG_SYSCALLP +#define DEBUG_SYSCALLP 0 /*Protected System Calls trace */ +#if DEBUG_SYSCALLP +#define DbgSCP(...) DebugPrint(DEBUG_SYSCALLP, ##__VA_ARGS__) +#else +#define DbgSCP(...) +#define DbgSCP_print_array(...) +#endif + + +#ifdef CONFIG_PROTECTED_MODE + +#define USER_SIVAL_PTR_OFFSET_LO 0 +#define USER_SIVAL_PTR_OFFSET_HI 8 + +#define get_user_space(x) arch_compat_alloc_user_space(x) + + +#if DEBUG_SYSCALLP +static void DbgSCP_print_array(void *array, int wordnum) +{ +int *ptr = (int *) array; +int i; + if (!array || !wordnum) + return; + DbgSCP("print_array 0x%px of %d words:\n", array, wordnum); + for (i = 0; i < wordnum; i++) { + DbgSCP("\t0x%.8x\n", *ptr); + ptr++; + } +} +#endif /* DEBUG_SYSCALLP */ + +long protected_sys_mq_notify(const long arg1 /*mqdes*/, + const unsigned long __user arg2 /*sevp*/) +{ + unsigned int size; + long rval = -EINVAL; + /* + * struct sigevent: {int/(f)ptr} [int][int] {int,ptr,[fptr,ptr]} + * MASK_type_INT: 0b 11 10 00 00 00 int-int-int-fptr-ptr + * MASK_type_PTR: 0b 11 10 00 00 11 ptr-int-int-fptr-ptr + * MASK_type_FPTR: 0b 11 10 00 00 10 fptr-int-int-fptr-ptr + * MASK_align: 0b 11 11 11 00 11 16b- 4b-16b- 16b-16b + * NB> {...} - union; [...] - field/structure. + */ +#define MQ_NOTIFY_MASK_typeI 0x0 /* integers in fields 1-3 */ +#define MQ_NOTIFY_MASK_typeP 0x3 /* pointer at the 1st union field */ +#define MQ_NOTIFY_MASK_typeF 0x2 /* ptr-to-function at the 1st union field */ +#define MQ_NOTIFY_MASK_type2 0x380 /* fptr/ptr in the 4th/5th fields */ +#define MQ_NOTIFY_MASK_align1 0x33 /* fields 1-3 */ +#define MQ_NOTIFY_MASK_align2 0x3c0 /* fields 4-5 */ +#define MQ_NOTIFY_STRING "Bad sigevent stack descriptor for mq_notify\n" +#define PROT_SIZEOF_SIGEVENT 80 /* structure size in the user space (in PM) */ +#define PROT_SIGEV_NOTIFY_OFFSET_DELTA 2 /* field offset shift in PM */ + void *ev = NULL; + void *kernel_ptr = NULL; + long user_ptr_lo = 0, user_ptr_hi = 0; + int sival_ptr_tags = 0; + int signum = 0; + + DbgSCP("arg1 = %ld, arg2 = %px\n", arg1, (void *)arg2); + if (arg2) { + long mask_type; + long align_type = MQ_NOTIFY_MASK_align1; + int tag; + int *sigev_notify_ptr; /* pointer to the sigev_notify field */ + + size = PROT_SIZEOF_SIGEVENT; + + TRY_USR_PFAULT { + NATIVE_LOAD_VAL_AND_TAGD(arg2 + + USER_SIVAL_PTR_OFFSET_LO, + user_ptr_lo, sival_ptr_tags); + NATIVE_LOAD_VAL_AND_TAGD(arg2 + + USER_SIVAL_PTR_OFFSET_HI, + user_ptr_hi, tag); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + sival_ptr_tags |= tag << 4; + + switch (sival_ptr_tags) { + case ETAGNUM: + mask_type = MQ_NOTIFY_MASK_typeI; + break; + case ETAGAPQ: + mask_type = MQ_NOTIFY_MASK_typeP; + break; + case ETAGPLD: + mask_type = MQ_NOTIFY_MASK_typeF; + break; + case ETAGPLQ: /* this is for future Elbrus arch V6 */ + pr_err("__NR_mq_notify: unsupported tag ETAGPLQ (0x%x)\n", + sival_ptr_tags); + DbgSCP("\tptr_lo=0x%lx ptr_hi=0x%lx\n", + user_ptr_lo, user_ptr_hi); + DbgSCP_print_array((long *)arg2, size); + return -EINVAL; + default: + mask_type = MQ_NOTIFY_MASK_typeI; + } + /* Checking the content of the 'sigev_notify' field: */ + sigev_notify_ptr = (int *)(&(((sigevent_t *)arg2)->sigev_notify) + + PROT_SIGEV_NOTIFY_OFFSET_DELTA); + if (*sigev_notify_ptr == SIGEV_THREAD) { + align_type |= MQ_NOTIFY_MASK_align2; + mask_type |= MQ_NOTIFY_MASK_type2; + } + + ev = get_user_space(size); + + rval = convert_array((long *)arg2, ev, size, + 5, 1, mask_type, align_type); + if (rval) { + DbgSCP(MQ_NOTIFY_STRING); + return rval; + } + kernel_ptr = ((sigevent_t *)ev)->sigev_value.sival_ptr; + signum = ((sigevent_t *)ev)->sigev_signo; + } + DbgSCP("sys_mq_notify(%ld, %px)\n", arg1, ev); + rval = sys_mq_notify((mqd_t)arg1, (const sigevent_t *) ev); + + if (rval || !arg2) + return rval; + + /* Saving sival_ptr in sival_ptr_list: */ + store_descriptor_attrs(kernel_ptr, user_ptr_lo, user_ptr_hi, + sival_ptr_tags, signum); + + return rval; +} + +#endif /* CONFIG_PROTECTED_MODE */ diff --git a/arch/e2k/kernel/protected_syscalls.c b/arch/e2k/kernel/protected_syscalls.c new file mode 100644 index 000000000000..83e4a8359d9b --- /dev/null +++ b/arch/e2k/kernel/protected_syscalls.c @@ -0,0 +1,4035 @@ +/* linux/arch/e2k/kernel/protected_syscalls.c, v 1.0 03/25/2019. + * + * This is implementation of system call handlers for E2K protected mode: + * int protected_sys_(const long a1, ... a6, + * const struct pt_regs *regs); + * + * Copyright (C) 2019 MCST + */ + + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_PROTECTED_MODE + +#if (DYNAMIC_DEBUG_SYSCALLP_ENABLED) + /* NB> PM debug module must have been initialized by the moment + * of invocation of any of the functions that follow; + * we can use simple defines over here. + * For full ones see . + */ +#undef DbgSCP +#if defined(CONFIG_THREAD_INFO_IN_TASK) && defined(CONFIG_SMP) +#define DbgSCP(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_COMPLEX_WRAPPERS) \ + pr_info("%s [%.3d#%d]: %s: " fmt, current->comm, \ + current->cpu, current->pid, \ + __func__, ##__VA_ARGS__); \ +} while (0) +#else /* no 'cpu' field in 'struct task_struct' */ +#define DbgSCP(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_COMPLEX_WRAPPERS) \ + pr_info("%s [#%d]: %s: " fmt, current->comm, \ + current->pid, \ + __func__, ##__VA_ARGS__); \ +} while (0) +#endif /* no 'cpu' field in 'struct task_struct' */ + +#undef DbgSCP_ERROR +#define DbgSCP_ERROR(fmt, ...) \ +do { \ + if ((current->mm->context.pm_sc_debug_mode & \ + (PM_SC_DBG_MODE_CHECK | PM_SC_DBG_MODE_NO_ERR_MESSAGES)) == \ + PM_SC_DBG_MODE_CHECK) \ + pr_info("%s [%d]: " fmt, current->comm, current->pid, \ + ##__VA_ARGS__); \ +} while (0) + +#undef DbgSCP_ERR +#define DbgSCP_ERR(fmt, ...) \ + DbgSCP_ERROR("%s: " fmt, __func__, ##__VA_ARGS__) + +#undef DbgSCP_ALERT +#define DbgSCP_ALERT(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_info("%s [%d]: " fmt, current->comm, current->pid, \ + ##__VA_ARGS__); \ +} while (0) + +#undef DbgSCP_WARN +#define DbgSCP_WARN(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_info("%s [%d]: " fmt, current->comm, current->pid, \ + ##__VA_ARGS__); \ +} while (0) + +#undef PM_SYSCALL_WARN_ONLY +#define PM_SYSCALL_WARN_ONLY \ + (current->mm->context.pm_sc_debug_mode & PM_SC_DBG_MODE_WARN_ONLY) + +#endif /* DYNAMIC_DEBUG_SYSCALLP_ENABLED */ + + +#define get_user_space(x) arch_compat_alloc_user_space(x) + +/* NB> 'n' below is syscall argument number; + * 'i' - is register number. + */ +#define PROT_SC_ARG_TAGS(n) ((regs->tags >> 8*(n)) & 0xff) +#define NOT_PTR(n) (PROT_SC_ARG_TAGS(n) != ETAGAPQ) +#define NULL_PTR(n, i) ((((regs->tags >> (8*(n))) & 0xf) == E2K_NULLPTR_ETAG) \ + && (arg##i == 0)) + +#define DESCRIPTOR_SIZE 16 + + #define FATAL_ERR_READ "FATAL ERROR: failed to read from 0x%lx !!!\n" +#define FATAL_ERR_WRITE "FATAL ERROR: failed to write at 0x%lx !!!\n" + + +static inline unsigned long ptr128_to_64(e2k_ptr_t dscr) +{ + return dscr.fields.ap.base + dscr.fields.ap.curptr; +} + + +/* + * Counts the number of descriptors in array, which is terminated by NULL + * (For counting of elements in argv and envp arrays) + */ +notrace __section(".entry.text") +static int count_descriptors(long __user *prot_array, const int prot_array_size) +{ + int i; + long tmp[2]; + + if (prot_array == NULL) + return 0; + + /* Ensure that protected array is aligned and sized properly */ + if (!IS_ALIGNED((u64) prot_array, 16)) + return -EINVAL; + + /* Read each entry */ + for (i = 0; 8 * i + 16 <= prot_array_size; i += 2) { + long hi, lo; + int htag, ltag; + + if (copy_from_user_with_tags(tmp, &prot_array[i], 16)) + return -EFAULT; + + NATIVE_LOAD_VAL_AND_TAGD(tmp, lo, ltag); + NATIVE_LOAD_VAL_AND_TAGD(&tmp[1], hi, htag); + + /* If zero is met, it is the end of array*/ + if (lo == 0 && hi == 0 && ltag == 0 && htag == 0) + return i >> 1; + } + + return -EINVAL; +} + + +/* + * Scans environment for the given env var. + * Reports: value of the given env var; 0 - if doesn't exist. + */ +static inline +char *pm_getenv(const char *env_var_name, const size_t max_len) +{ + /* NB> Length of the environment record expected less that 'max_len'. */ + unsigned long __user uenvp; + size_t len, lenvar; + unsigned long kenvp; + unsigned long lmax = 128; + long copied; + + if (!current->mm || !current->mm->env_start) + return 0; + if (current->mm->env_start >= current->mm->env_end) + return 0; + lenvar = strlen(env_var_name); + kenvp = (unsigned long)kmalloc(lmax, GFP_KERNEL); + for (uenvp = current->mm->env_start; + uenvp < current->mm->env_end; + uenvp += len) /* strnlen_user accounts terminating '\0' */ { + len = strnlen_user((void __user *)uenvp, + current->mm->env_end - uenvp); + if (!len) + break; + else if ((len < lenvar) || (len > max_len)) + continue; + if (lmax < len) { + lmax = (len + 127) & 0xffffff80; + kenvp = (unsigned long)krealloc((void *)kenvp, + lmax, GFP_KERNEL); + } + copied = strncpy_from_user((void *)kenvp, + (void __user *)uenvp, len); + if (!copied) + continue; + else if (copied < 0) { + pr_alert("%s:%d: Cannot strncpy_from_user(len = %zd)\n", + __func__, __LINE__, len); + break; + } + if (!strncmp(env_var_name, (void *)kenvp, min(lenvar, len))) { + if (current->mm->context.pm_sc_debug_mode + & PM_SC_DBG_MODE_DEBUG) + pr_info("ENVP: %s\n", (char *)kenvp); + if (*((char *)(kenvp + lenvar)) == '=') + return (char *)(kenvp + lenvar + 1); + pr_alert("Wrong env var found: %s\n", (char *)kenvp); + } + } + kfree((void *)kenvp); + return 0; +} + +/* + * Checks for PM debug mode env var setup and outputs corresponding debug mask. + * 'max_len' - maximum expected env var length. + * Returns: mask to apply to 'pm_sc_debug_mode' if env var is "set"; + * 0 - otherwise. + */ +static +unsigned long check_debug_mask(const char *env_var_name, const size_t max_len, + const unsigned long mask) +{ + char *env_val; + + env_val = pm_getenv(env_var_name, max_len); + if (!env_val) + return 0; + if (!*env_val || env_val[1]) /* single char expected as env var value */ + goto wrong_val_out; + + if ((*env_val == '1') || (*env_val == 'y') || (*env_val == 'Y')) + return mask; + if ((*env_val == '0') || (*env_val == 'n') || (*env_val == 'N')) + return ~mask; + +wrong_val_out: + pr_alert("Wrong value of the env var %s = %s\n", + env_var_name, env_val); + pr_alert("Legal values: 0/1/y/n/Y/N\n"); + return 0; +} + +#define CHECK_DEBUG_MASK(mask_name) \ +do { \ + mask = check_debug_mask(#mask_name, 48, mask_name); \ + if (mask) { \ + if (mask & mask_name) /* positive mask */ \ + context->pm_sc_debug_mode |= mask; \ + else /* negative mask */ \ + context->pm_sc_debug_mode &= mask; \ + } \ +} while (0) + +/* Checks if the given env var is defined in the environment. + * Returns: 1 - if "reset/disabled" env var found; 0 - otherwise. + */ +static inline +int pm_sc_debug_envp_check(mm_context_t *context) +{ + unsigned long mask; + + /* Checking for env vars: */ + mask = check_debug_mask("PM_SC_DBG_MODE_DISABLED", 48, + PM_SC_DBG_MODE_INIT); + if (mask & PM_SC_DBG_MODE_INIT) { /* positive mask */ + context->pm_sc_debug_mode = PM_SC_DBG_MODE_INIT; + return 1; + } + + mask = check_debug_mask("PM_SC_DBG_MODE_ALL", 48, + PM_SC_DBG_MODE_ALL); + if (mask & PM_SC_DBG_MODE_ALL) { /* positive mask */ + context->pm_sc_debug_mode |= PM_SC_DBG_MODE_ALL; + pr_info("ENVP: PM_SC_DBG_MODE_ALL=1\n"); + return 0; + } + + CHECK_DEBUG_MASK(PM_SC_DBG_MODE_DEBUG); + CHECK_DEBUG_MASK(PM_SC_DBG_MODE_COMPLEX_WRAPPERS); + CHECK_DEBUG_MASK(PM_SC_DBG_MODE_CHECK); + CHECK_DEBUG_MASK(PM_SC_DBG_MODE_CONV_STRUCT); + CHECK_DEBUG_MASK(PM_SC_DBG_MODE_SIGNALS); + CHECK_DEBUG_MASK(PM_SC_DBG_MODE_NO_ERR_MESSAGES); + /* Protected mode setup: */ + mask = check_debug_mask("PM_SC_DBG_MODE_WARN_ONLY", + 48, PM_SC_DBG_MODE_WARN_ONLY); + if (mask) { + if (mask & PROTECTED_MODE_SOFT) /* positive mask */ + context->pm_sc_debug_mode |= mask; + else /* negative mask */ + context->pm_sc_debug_mode &= mask; + } + CHECK_DEBUG_MASK(PROTECTED_MODE_SOFT); + /* libc mmu control stuff: */ + CHECK_DEBUG_MASK(PM_MM_CHECK_4_DANGLING_POINTERS); + CHECK_DEBUG_MASK(PM_MM_EMPTYING_FREED_POINTERS); + CHECK_DEBUG_MASK(PM_MM_ZEROING_FREED_POINTERS); + + context->pm_sc_debug_mode |= PM_SC_DBG_MODE_INIT; + + if (IF_PM_DBG_MODE(PM_SC_DBG_MODE_DEBUG)) + pr_info("\tpm_sc_debug_mode = 0x%lx\n", + context->pm_sc_debug_mode); + + return 0; +} + +int arch_init_pm_sc_debug_mode(const int debug_mask) +{ + mm_context_t *context = ¤t->mm->context; + + if (context->pm_sc_debug_mode & PM_SC_DBG_MODE_INIT) + return context->pm_sc_debug_mode & debug_mask; + + /* Checking for env vars: */ + if (pm_sc_debug_envp_check(context)) + return 0; + + return context->pm_sc_debug_mode & debug_mask; +} + +void pm_deliver_exception(int signo, int code, int errno) +/* Sometimes we need to deliver exception to end up execution of the current thread: */ +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = signo; /* f.e. SIGILL */ + info.si_code = code; /* f.e. ILL_ILLOPN - "illegal operand" */ + info.si_errno = errno; /* f.e. -EINVAL */ + + if (force_sig_info(&info)) + pr_alert("Failed to deliver exception at %s:%d\n", + __FILE__, __LINE__); +} + + +struct protected_user_msghdr { + e2k_ptr_t msg_name; /* ptr to socket address structure */ + int msg_namelen; /* size of socket address structure */ + unsigned long pad_align1; /* placeholder to align next prot ptr */ + e2k_ptr_t msg_iov; /* scatter/gather array */ + __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + unsigned long pad_align2; /* placeholder to align next prot ptr */ + e2k_ptr_t msg_control; /* ancillary data */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ + unsigned int msg_flags; /* flags on received message */ +}; + +static struct user_msghdr __user *convert_msghdr( + struct protected_user_msghdr __user *prot_msghdr, + unsigned int size, + const char *syscall_name, + void __user *user_buff) +/* Converts user msghdr structure from protected to regular structure format. + * Outputs converted structure (allocated in user space if (user_buff == NULL)). + * 'prot_msghdr' - protected message header structure. + * 'size' - size of the input structure. + * 'user_buff' - buffer for converted structure in user space. + */ +{ + long __user *args = (long *) user_buff; + struct user_msghdr __user *converted_msghdr = NULL; + struct iovec __user *converted_iovec; + int err_mh, err_iov; + +#define MASK_MSGHDR_TYPE 0x0773 /* type mask for struct msghdr */ +#define MASK_MSGHDR_ALIGN 0x17ff /* alignment mask for msghdr structure */ +#define MASK_MSGHDR_RW 0x2000 /* WRITE-only msg_flags field */ +#define SIZE_MSGHDR 96 /* size of struct msghdr in user space */ +#define MASK_IOVEC_TYPE 0x7 /* mask for converting of struct iovec */ +#define MASK_IOVEC_ALIGN 0xf /* alignment mask for struct iovec */ +#define SIZE_IOVEC 32 /* size of struct iovec in user space */ + /* + * Structures user_msghdr and iovec contain pointers + * inside, therefore they need to be additionally + * converted with saving results in these structures + */ + + /* Allocating space on user stack for converted structures: */ + if (!args) + args = get_user_space(sizeof(struct user_msghdr) + + sizeof(struct iovec)); + + /* Convert struct msghdr: */ + converted_msghdr = (struct user_msghdr *) args; + err_mh = convert_array_3((long *)prot_msghdr, (long *) converted_msghdr, + SIZE_MSGHDR, 7, 1, MASK_MSGHDR_TYPE, + MASK_MSGHDR_ALIGN, MASK_MSGHDR_RW, + CONV_ARR_WRONG_DSCR_FLD); + if (err_mh) + DbgSCP_ALERT("Bad user_msghdr in syscall \'%s\'\n", + syscall_name); + + if (converted_msghdr->msg_iov) { + /* Convert struct iovec from msghdr->msg_iov */ + converted_iovec = (struct iovec *) + ((char *)converted_msghdr + sizeof(struct user_msghdr)); + err_iov = convert_array_3((long *) converted_msghdr->msg_iov, + (long *) converted_iovec, + SIZE_IOVEC, 2, 1, MASK_IOVEC_TYPE, + MASK_IOVEC_ALIGN, 0, + CONV_ARR_WRONG_DSCR_FLD); + if (err_iov) { + DbgSCP_ALERT("Bad struct iovec in msghdr (syscall \'%s\')\n", + syscall_name); + } + } else { + DbgSCP_ALERT("Empty struct iovec in msghdr (syscall \'%s\')\n", + syscall_name); + converted_iovec = NULL; + } + + /* Assign converted iovec pointer to converted msghdr structure: */ + converted_msghdr->msg_iov = converted_iovec; + + return (struct user_msghdr *) args; +} + + +notrace __section(".entry.text") +long protected_sys_sigaltstack(const stack_prot_t __user *ss_128, + stack_prot_t __user *old_ss_128, + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ +#define SIGALTST_MASK_TYPE 0x103 +#define SIGALTST_MASK_ALIGN 0x113 +#define SIGALTST_ERR1 "Both 'ss' and 'old_ss' arguments to sigaltstack() are empty\n" +#define SIGALTST_ERR2 "Bad 'ss' descriptor for sigaltstack()\n" +#define SIGALTST_ERRSIZE "Size of 'ss' arg (%d) is less than required by 'ss_size' field (%zd)\n" +#define SIGALTST_ERRWRITE "Failed to update 'old_ss' descriptor; error code = %d\n" + stack_t *ss = NULL; + stack_t *old_ss = NULL; + unsigned long lo, hi; + unsigned int size = 0, size1 = 0, size2 = 0; + long rval = -EINVAL; /* syscall return value */ + + if (!ss_128 && !old_ss_128) + DbgSCP_WARN(SIGALTST_ERR1); + /* NB> Currently syscall returns OK if both args are empty */ + + if (ss_128) { + size1 = e2k_ptr_size(regs->args[1], regs->args[2], + sizeof(stack_prot_t)); + if (!size1) + return -EINVAL; + size = size1; + } + if (old_ss_128) { + size2 = e2k_ptr_size(regs->args[3], regs->args[4], + sizeof(stack_prot_t)); + if (!size2) + return -EINVAL; + size += sizeof(stack_t); + } + if (size) { + void *buf; + + buf = get_user_space(size); + if (ss_128) + ss = buf; + if (old_ss_128) + old_ss = buf + size1; + } + + if (ss_128) { + /* Struct 'ss_128' contains pointer in the first field */ + rval = get_pm_struct_simple((long *) ss_128, (long *) ss, size1, + 3, 1, SIGALTST_MASK_TYPE, SIGALTST_MASK_ALIGN); + if (rval) { + DbgSCP_ALERT(SIGALTST_ERR2); + return rval; + } + /* Checking stack size for correctness: */ + rval = get_user(hi, &ss_128->ss_sp.word.hi); + if (rval) { + DbgSCP_ALERT(FATAL_ERR_READ, + (long) &ss_128->ss_sp.word.hi); + return rval; + } + size2 = e2k_ptr_size(0L /*lo*/, hi, 0); + if (size2 < ss->ss_size) { + DbgSCP_ALERT(SIGALTST_ERRSIZE, size2, ss->ss_size); + return -EINVAL; + } + } + + rval = sys_sigaltstack(ss, old_ss); + + if (old_ss_128) { + /* Updating 'old_ss_128': */ + e2k_ptr_t dscr; + int ret; + + if (old_ss->ss_sp) { + /* Constructing descriptor for protected 'ss_sp': */ + lo = make_ap_lo((e2k_addr_t) old_ss->ss_sp, + old_ss->ss_size, 0, RW_ENABLE); + hi = make_ap_hi((e2k_addr_t) old_ss->ss_sp, + old_ss->ss_size, 0, RW_ENABLE); + NATIVE_STORE_VALUE_WITH_TAG(&AWP(&dscr).hi, hi, + E2K_AP_HI_ETAG); + NATIVE_STORE_VALUE_WITH_TAG(&AWP(&dscr).lo, lo, + E2K_AP_LO_ETAG); + + ret = copy_to_user_with_tags(&old_ss_128->ss_sp, &dscr, + sizeof(dscr)); + if (ret) + ret = -EFAULT; + } else { + /* Zeroing protected 'ss_sp' field: */ + ret = put_user(0L, &old_ss_128->ss_sp.word.lo); + ret |= put_user(0L, &old_ss_128->ss_sp.word.hi); + } + + ret |= put_user(old_ss->ss_flags, &old_ss_128->ss_flags); + ret |= put_user(old_ss->ss_size, &old_ss_128->ss_size); + + if (!rval) + rval = ret; + + if (ret) { + DbgSCP_ALERT(SIGALTST_ERRWRITE, ret); + } else { + DbgSCP("old_ss_128: sp=0x%lx:0x%lx flags=0x%x size=0x%zx\n", + old_ss_128->ss_sp.word.lo, + old_ss_128->ss_sp.word.hi, + old_ss_128->ss_flags, old_ss_128->ss_size); + } + } + + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_clean_descriptors(void __user *addr, + unsigned long size, + const unsigned long flags, + const unsigned long unused_a4, + const unsigned long unused_a5, + const unsigned long unused_a6, + struct pt_regs *regs) +/* If (!flags) then 'addr' is a pointer to list of descriptors to clean. */ +{ + long rval = 0; /* syscall return value */ + unsigned int descr_size; + unsigned long size_to_clean = size; + + DbgSCP("addr = 0x%p, size = %ld ", addr, size); + + descr_size = e2k_ptr_size(regs->args[1], regs->args[2], 0); + if (!(flags & CLEAN_DESCRIPTORS_SINGLE)) + size_to_clean *= sizeof(e2k_ptr_t); + if (descr_size < size_to_clean) { + DbgSCP_ERROR("clean_descriptors(ptr=0x%lx:0x%lx, size=%ld): 'size' exceeds length of 'ptr'", + regs->args[1], regs->args[2], size); + return -EFAULT; + } + + if (flags & (CLEAN_DESCRIPTORS_SINGLE | CLEAN_DESCRIPTORS_NO_GARB_COLL) == + (CLEAN_DESCRIPTORS_SINGLE | CLEAN_DESCRIPTORS_NO_GARB_COLL)) { + rval = mem_set_empty_tagged_dw(addr, size, 0x0baddead0baddead); + } else if (!flags) { + rval = clean_descriptors(addr, size); + } else { + DbgSCP_ERR("wrong flags value 0x%lx", flags); + return -EINVAL; + } + if (rval == -EFAULT) + send_sig_info(SIGSEGV, SEND_SIG_PRIV, current); + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_clone(const unsigned long a1, /* flags */ + const unsigned long a2, /* new_stackptr */ + const unsigned long __user a3,/* parent_tidptr */ + const unsigned long __user a4,/* child_tidptr */ + const unsigned long __user a5,/* tls */ + const unsigned long a6, /* unused */ + struct pt_regs *regs) +{ + long rval; /* syscall return value */ + unsigned int size; + struct kernel_clone_args args; + + DbgSCP("(fl=0x%lx, newsp=0x%lx, p/ch_tidptr=0x%lx/0x%lx, tls=0x%lx)\n", + a1, a2, a3, a4, a5); + /* + * User may choose to not pass additional arguments + * (tls, tid) at all for historical and compatibility + * reasons, so we do not fail if (a3), (a4), and (a5) + * pointers are bad. + * + * The fifth argument (tls) requires special handling: + */ + if (a1 & CLONE_SETTLS) { + unsigned int tls_size; + + /* TLS argument passed thru arg9,10: */ + tls_size = (a5 == 0) ? 0 : e2k_ptr_size(regs->args[9], + regs->args[10], + sizeof(int)); + /* Check that the pointer is good. */ + if (!tls_size) { + DbgSCP_ALERT(" Bad TLS pointer: size=%d, tags=%lx\n", + tls_size, PROT_SC_ARG_TAGS(5)); + return -EINVAL; + } + } + + /* + * Multithreading support - change all SAP to AP in globals + * to guarantee correct access to memory + */ + if (a1 & CLONE_VM) + mark_all_global_sp(regs, current->pid); + + size = e2k_ptr_curptr(regs->args[3], regs->args[4]); + + args.flags = (a1 & ~CSIGNAL); + args.pidfd = (int *)a3; + args.child_tid = (int *)a4; + args.parent_tid = (int *)a3; + args.exit_signal = (a1 & CSIGNAL); + args.stack = a2 - size; + args.stack_size = size; + args.tls = a5; + + /* passing size of array */ + rval = _do_fork(&args); + DbgSCP("rval = %ld, sys_num = %d size=%d\n", rval, regs->sys_num, size); + + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_execve(const unsigned long __user a1,/* filename*/ + const unsigned long __user a2,/* argv[] */ + const unsigned long __user a3,/* envp[] */ + const unsigned long a4, /* not used */ + const unsigned long a5, /* not used */ + const unsigned long a6, /* not used*/ + const struct pt_regs *regs) +{ + char __user *filename = (char *) a1; + unsigned long *buf; + unsigned long *argv, *envp; + unsigned long __user *u_argv = (unsigned long *) a2; + unsigned long __user *u_envp = (unsigned long *) a3; + unsigned int size = 0, size2 = 0; + int argc = 0, envc = 0; + long rval; /* syscall return value */ + + /* Path to executable */ + if (!filename) + return -EINVAL; + + /* argv */ + if (u_argv) { + size = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (!size) + return -EINVAL; + } + + /* envp */ + if (u_envp) { + size2 = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (!size2) + return -EINVAL; + } + /* + * Note in the release 5.00 of the Linux man-pages: + * The use of a third argument to the main function + * is not specified in POSIX.1; according to POSIX.1, + * the environment should be accessed via the external + * variable environ(7). + */ + + /* Count real number of entries in argv */ + argc = count_descriptors((long *) u_argv, size); + if (argc < 0) + return -EINVAL; + + /* Count real number of entries in envc */ + if (size2) { + envc = count_descriptors((long *) u_envp, size2); + if (envc < 0) + return -EINVAL; + } + + /* + * Allocate space on user stack for converting of + * descriptors in argv and envp to ints + */ + buf = get_user_space((argc + envc + 2) << 3); + argv = buf; + envp = &buf[argc + 1]; + + /* + * Convert descriptors in argv to ints. + * For statically-linked executables missing argv is allowed, + * therefore kernel doesn't return error in this case. + * For dynamically-linked executables missing argv is not + * allowed, because at least argv[0] is required by ldso for + * loading of executable. Protected ldso must check argv. + */ + if (argc) { + rval = convert_array((long *) u_argv, argv, + argc << 4, 1, argc, 0x3, 0x3); + if (rval) { + DbgSCP_ALERT("Bad argv in protected execve syscall\n"); + return rval; + } + } + /* The array argv must be terminated by zero */ + argv[argc] = 0; + + /* + * Convert descriptors in envp to ints + * envc can be zero without problems + */ + if (envc) { + rval = convert_array(u_envp, envp, + envc << 4, 1, envc, 0x3, 0x3); + if (rval) { + DbgSCP_ALERT("Bad envp in protected execve syscall\n"); + return rval; + } + } + /* The array envp must be terminated by zero */ + envp[envc] = 0; + + rval = e2k_sys_execve(filename, (char **) argv, + (char **) envp); + + DbgSCP(" rval = %ld filename=%s argv=%p envp=%p\n", + rval, filename, argv, envp); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_execveat(const unsigned long dirfd, /*a1 */ + const unsigned long __user pathname,/* a2 */ + const unsigned long __user argv, /* a3 */ + const unsigned long __user envp, /* a4 */ + const unsigned long flags, /* a5 */ + const unsigned long unused6, + const struct pt_regs *regs) +{ + char __user *filename = (char *) pathname; + unsigned long *buf; + unsigned long *kargv, *kenvp; + unsigned long __user *u_argv = (unsigned long *) argv; + unsigned long __user *u_envp = (unsigned long *) envp; + unsigned int size = 0, size2 = 0; + int argc = 0, envc = 0; + long rval; /* syscall return value */ + + DbgSCP(" dirfd=%ld path=%s argv=0x%lx envp=0x%lx flags=0x%lx\n", + dirfd, (char *)pathname, argv, envp, flags); + + /* Path to executable */ + if (!filename) + return -EINVAL; + + /* argv */ + if (u_argv) { + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (!size) + return -EINVAL; + + /* Count real number of entries in argv */ + argc = count_descriptors((long *) u_argv, size); + if (argc < 0) + return -EINVAL; + } + + /* envp */ + if (u_envp) { + size2 = e2k_ptr_size(regs->args[7], regs->args[8], 0); + if (!size2) + return -EINVAL; + + /* Count real number of entries in envc */ + envc = count_descriptors((long *) u_envp, size2); + if (envc < 0) + return -EINVAL; + } + + DbgSCP(" argc=%d envc=%d\n", argc, envc); + + /* + * Allocate space on user stack for converting of + * descriptors in argv and envp to ints + */ + buf = get_user_space((argc + envc + 2) << 3); + kargv = buf; + kenvp = &buf[argc + 1]; + + /* + * Convert descriptors in argv to ints. + * For statically-linked executables missing argv is allowed, + * therefore kernel doesn't return error in this case. + * For dynamically-linked executables missing argv is not + * allowed, because at least argv[0] is required by ldso for + * loading of executable. Protected ldso must check argv. + */ + if (argc) { + rval = convert_array(u_argv, kargv, + argc << 4, 1, argc, 0x3, 0x3); + if (rval) { + DbgSCP_ALERT("Bad argv in protected execveat\n"); + return rval; + } + } + /* The array argv must be terminated by zero */ + kargv[argc] = 0; + + /* + * Convert descriptors in envp to ints + * envc can be zero without problems + */ + if (envc) { + rval = convert_array(u_envp, kenvp, + envc << 4, 1, envc, 0x3, 0x3); + if (rval) { + DbgSCP_ALERT("Bad envp in protected execveat\n"); + return rval; + } + } + /* The array envp must be terminated by zero */ + kenvp[envc] = 0; + + rval = e2k_sys_execveat(dirfd, filename, (char **) kargv, + (char **) kenvp, flags); + + DbgSCP(" rval = %ld filename=%s argv=%p envp=%p\n", + rval, filename, kargv, kenvp); + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_futex(const unsigned long __user a1, /* uaddr */ + const unsigned long a2, /* futex_op */ + const unsigned long a3, /* val */ + const unsigned long la4, /* timeout/val2 */ + const unsigned long __user la5, /* uaddr2 */ + const unsigned long a6, /* val3 */ + const struct pt_regs *regs) +{ +#define ERROR_MESSAGE_FUTEX " NULL pointer is not allowed (sys_num %ld).\n" + int cmd; + unsigned long a4 = la4; + unsigned long __user a5 = la5; + long sys_num; + long rval = 0; /* syscall return value */ + + cmd = a2 & FUTEX_CMD_MASK; + if (la4 && (cmd == FUTEX_WAIT || + cmd == FUTEX_WAIT_BITSET || + cmd == FUTEX_LOCK_PI || + cmd == FUTEX_WAIT_REQUEUE_PI)) { + /* + * These commands assume la4 must be a pointer. Let's check it: + */ + unsigned long arg7 = regs->args[7]; + + sys_num = regs->sys_num; + if (NOT_PTR(4) && !NULL_PTR(4, 7)) { + DbgSCP_ALERT("7 8" ERROR_MESSAGE_FUTEX, sys_num); + rval = -EINVAL; + } + } + if (la5 && (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || + cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP || + cmd == FUTEX_WAIT_REQUEUE_PI)) { + /* + * These commands assume la5 must be a pointer. Let's check it: + */ + unsigned long arg9 = regs->args[9]; + + sys_num = regs->sys_num; + if (NOT_PTR(5) && !NULL_PTR(5, 9)) { + DbgSCP_ALERT("9 10" ERROR_MESSAGE_FUTEX, sys_num); + rval = -EINVAL; + } + } + rval = sys_futex((u32 *) a1, a2, a3, (struct __kernel_timespec *) a4, + (u32 *) a5, a6); + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_getgroups(const long a1, /* size */ + const unsigned long __user a2, /* list[] */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + long rval; /* syscall return value */ + unsigned int bufsize; + + DbgSCP(" (size=%ld, list[]=0x%lx) ", a1, a2); + + if (a1 < 0) { + DbgSCP_ALERT( + "Wrong 'size' (%ld) in getgroups()\n", a1); + return -EINVAL; + } + if (a2 && (PROT_SC_ARG_TAGS(2) != ETAGAPQ)) { + DbgSCP_ALERT( + "Not a pointer in arg #2 in getgroups()\n"); + return -EFAULT; + } + /* + * Here we check that list size is enough to receive 'size' gid's: + */ + bufsize = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (bufsize < (a1 * sizeof(gid_t))) { + DbgSCP_ALERT( + "Insufficient list size in getgroups(): %d < %zu\n", + bufsize, (size_t)(a1 * sizeof(gid_t))); + return -EINVAL; + } + + rval = sys_getgroups(a1, (gid_t *) a2); + DbgSCP("rval = %ld\n", rval); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_setgroups(const long a1, /* size */ + const unsigned long __user a2, /* list[] */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + long rval; /* syscall return value */ + unsigned int bufsize; + + DbgSCP(" (size=%ld, list[]=0x%lx) ", a1, a2); + + if (a1 < 0) { + DbgSCP_ALERT( + "Wrong 'size' (%ld) in setgroups()\n", a1); + return -EINVAL; + } + if (a2 && (PROT_SC_ARG_TAGS(2) != ETAGAPQ)) { + DbgSCP_ALERT( + "Not a pointer in arg #2 in setgroups()\n"); + return -EFAULT; + } + /* + * Here we check that list size is enough to receive 'size' gid's: + */ + bufsize = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (bufsize < (a1 * sizeof(gid_t))) { + DbgSCP_ALERT( + "Insufficient list size in setgroups(): %d < %zu\n", + bufsize, (size_t)(a1 * sizeof(gid_t))); + return -EINVAL; + } + + rval = sys_setgroups(a1, (gid_t *) a2); + DbgSCP("rval = %ld\n", rval); + return rval; +} + + +/* + * This function converts protected structure 'iov' into regular one + * alone with the check for validity of iov/iovcnt arguments to system calls, + * taking iovec structure at input (like readv/writev and so). + * iovcnt - number of buffers from the file assiciated to read/write. + * It returns converted structure pointer if OK; NULL pointer otherwise. + */ +static inline +long __user *convert_prot_iovec_struct(const unsigned long __user iov, + const unsigned long iovcnt, + const unsigned long descr_lo, + const unsigned long descr_hi, + const char *sysname) +{ + const int nr_segs = iovcnt; + long __user *new_arg = NULL; + unsigned int size; + long rval; /* syscall return value */ + + if (((unsigned int) nr_segs) > UIO_MAXIOV) { + DbgSCP_ALERT("Bad iov count number (%d) in iov structure\n", + nr_segs); + return NULL; + } + + /* + * One could use 0 in place `32 * nr_segs' here as the size + * will be checked below in `convert_array ()'. + */ + size = e2k_ptr_size(descr_lo, descr_hi, 0); + if (size < (32 * nr_segs)) { + DbgSCP_ALERT("Bad iov structure size: %u < %d\n", + size, 32 * nr_segs); + return NULL; + } + + new_arg = get_user_space(nr_segs * 2 * 8); + rval = get_pm_struct_simple((long *) iov, new_arg, size, + 2, nr_segs, 0x13, 0x33); + if (rval) { + DbgSCP_ALERT("Bad iov structure in protected %s()\n", sysname); + return NULL; + } + return new_arg; +} + + +notrace __section(".entry.text") +long protected_sys_readv(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* unused */ + const unsigned long a5, /* unused */ + const unsigned long a6, /* unused */ + const struct pt_regs *regs) +{ + /* + * sys_readv(unsigned long fd, const struct iovec __user *vec, + * unsigned long nr_segs) + * struct iovec { + * void __user *iov_base; + * __kernel_size_t iov_len; + * }; + */ + const int nr_segs = (int) a3; + long __user *new_arg; + long rval; /* syscall return value */ + + if (!nr_segs) + return 0; + + new_arg = convert_prot_iovec_struct(a2, a3, regs->args[3], + regs->args[4], "readv"); + if (!new_arg) + return -EINVAL; + + rval = sys_readv(a1, (const struct iovec *) new_arg, nr_segs); + + DbgSCP(" rval = %ld new_arg=%px\n", rval, new_arg); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_preadv(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h */ + const unsigned long a6, /* unused */ + const struct pt_regs *regs) +{ + const int nr_segs = (int) a3; + long __user *new_arg; + long rval; /* syscall return value */ + + new_arg = convert_prot_iovec_struct(a2, a3, regs->args[3], + regs->args[4], "preadv"); + if (!new_arg) + return -EINVAL; + + rval = sys_preadv(a1, (const struct iovec *) new_arg, nr_segs, a4, a5); + + DbgSCP(" rval = %ld new_arg=%px\n", rval, new_arg); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_writev(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* unused */ + const unsigned long a5, /* unused */ + const unsigned long a6, /* unused */ + const struct pt_regs *regs) +{ + const int nr_segs = (int) a3; + long __user *new_arg; + long rval; /* syscall return value */ + + if (!nr_segs) + return 0; + + new_arg = convert_prot_iovec_struct(a2, a3, regs->args[3], + regs->args[4], "writev"); + if (!new_arg) + return -EINVAL; + + rval = sys_writev(a1, (const struct iovec *) new_arg, nr_segs); + + DbgSCP(" rval = %ld new_arg=%px\n", rval, new_arg); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_pwritev(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h */ + const unsigned long a6, /* unused */ + const struct pt_regs *regs) +{ + const int nr_segs = (int) a3; + long __user *new_arg; + long rval; /* syscall return value */ + + new_arg = convert_prot_iovec_struct(a2, a3, regs->args[3], + regs->args[4], "pwritev"); + if (!new_arg) + return -EINVAL; + + rval = sys_pwritev(a1, (const struct iovec *) new_arg, nr_segs, a4, a5); + + DbgSCP(" rval = %ld new_arg=%px\n", rval, new_arg); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_preadv2(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h*/ + const unsigned long a6, /* flags */ + const struct pt_regs *regs) +{ + const int nr_segs = (int) a3; + long __user *new_arg; + long rval; /* syscall return value */ + + new_arg = convert_prot_iovec_struct(a2, a3, regs->args[3], + regs->args[4], "preadv2"); + if (!new_arg) + return -EINVAL; + + rval = sys_preadv2(a1, (const struct iovec *) new_arg, nr_segs, + a4, a5, a6); + + DbgSCP(" rval = %ld new_arg=%px\n", rval, new_arg); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_pwritev2(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h*/ + const unsigned long a6, /* flags */ + const struct pt_regs *regs) +{ + const int nr_segs = (int) a3; + long __user *new_arg; + long rval; /* syscall return value */ + + new_arg = convert_prot_iovec_struct(a2, a3, regs->args[3], + regs->args[4], "pwritev2"); + if (!new_arg) + return -EINVAL; + + rval = sys_pwritev2(a1, (const struct iovec *) new_arg, nr_segs, + a4, a5, a6); + + DbgSCP(" rval = %ld new_arg=%px\n", rval, new_arg); + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_sysctl(const unsigned long __user arg1) +{ +#define SYSCTL_ARGS_STRUCT_SIZE 88; /* size of the protected structure used */ + long __user *ptr = (long *)arg1; + int rval = 0; /* syscall return value */ + struct __sysctl_args *new_arg; + size_t size = SYSCTL_ARGS_STRUCT_SIZE; + + if (!ptr) + return -EINVAL; + + new_arg = get_user_space(sizeof(struct __sysctl_args)); + if ((rval = convert_array((long *) ptr, (long *)new_arg, size, + 6, 1, 0x7f3, 0x7ff))) { + DbgSCP("convert_array returned %d\n", rval); + DbgSCP_ALERT("Bad struct args in protected sysctl syscall\n"); + return -EINVAL; + } + + rval = sys_sysctl(new_arg); + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_olduselib(const unsigned long __user a1, /* library */ + const unsigned long __user a2) /* umdd */ +{ + char *str = (char *)a1; + umdd_old_t *umdd = (umdd_old_t *) a2; + kmdd_t kmdd; + int rval; /* syscall return value */ + + if (IS_CPU_ISET_V6()) + return -ENOSYS; + + if (!a1 || !a2) + return -EINVAL; + + if (current->thread.flags & E2K_FLAG_3P_ELF32) + rval = sys_load_cu_elf32_3P(str, &kmdd); + else + rval = sys_load_cu_elf64_3P(str, &kmdd); + + if (rval) { + DbgSCP_ERR("failed, could not load\n"); + return rval; + } + + rval |= PUT_USER_AP(&umdd->mdd_got, kmdd.got_addr, + kmdd.got_len, 0, RW_ENABLE); + if (kmdd.init_got_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_init_got, + kmdd.init_got_point); + else + rval |= put_user(0L, &umdd->mdd_init_got.word); + + if (kmdd.entry_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_start, + kmdd.entry_point); + else + rval |= put_user(0L, &umdd->mdd_start.word); + + if (kmdd.init_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_init, + kmdd.init_point); + else + rval |= put_user(0L, &umdd->mdd_init.word); + + if (kmdd.fini_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_fini, + kmdd.fini_point); + else + rval |= put_user(0L, &umdd->mdd_fini.word); + + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_uselib(const unsigned long __user a1, /* library */ + const unsigned long __user a2) /* umdd */ +{ + char *str = (char *)a1; + umdd_t *umdd = (umdd_t *) a2; + kmdd_t kmdd; + int rval; /* syscall return value */ + + if (!a1 || !a2) + return -EINVAL; + + if (current->thread.flags & E2K_FLAG_3P_ELF32) + rval = sys_load_cu_elf32_3P(str, &kmdd); + else + rval = sys_load_cu_elf64_3P(str, &kmdd); + + if (rval) { + DbgSCP("could not load '%s' err #%d\n", str, rval); + return rval; + } + BUG_ON(kmdd.cui == 0); + + rval |= PUT_USER_AP(&umdd->mdd_got, kmdd.got_addr, + kmdd.got_len, 0, RW_ENABLE); + + if (kmdd.init_got_point) { + rval |= PUT_USER_PL(&umdd->mdd_init_got, + kmdd.init_got_point, + kmdd.cui); + } else { + rval |= put_user(0L, &umdd->mdd_init_got.PLLO_value); + rval |= put_user(0L, &umdd->mdd_init_got.PLHI_value); + } + + return rval; +} + +long protected_sys_mremap(const unsigned long __user old_address, + const unsigned long old_size, + const unsigned long new_size, + const unsigned long flags, + const unsigned long __user new_address, + const unsigned long a6, /* unused */ + struct pt_regs *regs) +{ + long rval = -EINVAL; + unsigned int ptr_size; + e2k_addr_t base; + e2k_ptr_t old_descriptor; + + if (old_address & ~PAGE_MASK) + goto nr_mremap_err; + + AW(old_descriptor).lo = regs->args[1]; + AW(old_descriptor).hi = regs->args[2]; + ptr_size = e2k_ptr_size(regs->args[1], regs->args[2], 0); + if (ptr_size < old_size) { + /* Reject, if user tries to remap more than allocated. */ + DbgSCP_ALERT("mremap cannot remap more than available\n"); + DbgSCP_ALERT("old_size (%lu) > descriptor size (%u)\n", + old_size, ptr_size); + rval = -EFAULT; + goto nr_mremap_err; + } + if (flags & MREMAP_FIXED) { + DbgSCP_ALERT("MREMAP_FIXED flag is not supported in PM\n"); + goto nr_mremap_err; + } + if (e2k_ptr_itag(regs->args[1]) != AP_ITAG) { + DbgSCP_ALERT("mremap cannot remap descriptor in stack\n"); + goto nr_mremap_err; + } + base = sys_mremap(old_address, old_size, new_size, flags, + /* + * MREMAP_FIXED is not supported in PM, + * therefore pass an invalid value for + * new_address. + */ + 0); + if (base & ~PAGE_MASK) { /* this is error code */ + rval = base; + goto nr_mremap_err; + } else { + regs->rval1 = make_ap_lo(base, new_size, 0, + e2k_ptr_rw(regs->args[1])); + regs->rval2 = make_ap_hi(base, new_size, 0, + e2k_ptr_rw(regs->args[1])); + regs->rv1_tag = E2K_AP_LO_ETAG; + regs->rv2_tag = E2K_AP_HI_ETAG; + regs->return_desk = 1; + rval = 0; + } + if (old_address != base || old_size > new_size) + clean_single_descriptor(old_descriptor); + + DbgSCP("rval = %ld regs->rval = 0x%lx : 0x%lx\n", + rval, regs->rval1, regs->rval2); + return rval; + +nr_mremap_err: + regs->rval1 = rval; + regs->rval2 = 0; + regs->rv1_tag = E2K_NUMERIC_ETAG; + regs->rv2_tag = E2K_NUMERIC_ETAG; + regs->return_desk = 1; + DbgSCP("rval = %ld\n", rval); + return rval; +} + +/* + * The structure of the second argument to socket call depends on + * the socket call number. + * This function calculates mask/align type arguments to process + * the structure by 'convert+array'. + */ +notrace __section(".entry.text") +static void get_socketcall_mask(long call, long *mask_type, long *mask_align, + int *fields) +{ + switch (call) { + case SYS_SOCKET: + *mask_type = 0x15; + *mask_align = 0x15; + *fields = 3; + /* err = sys_socket(a[0], a[1], a[2]); */ + break; + case SYS_BIND: + /* err = sys_bind(a[0], */ + /* (struct sockaddr __user *) a[1], a[2]); */ + case SYS_CONNECT: + /* err = sys_connect(a[0], */ + /* (struct sockaddr __user *) a[1], a[2]); */ + *mask_type = 0x1d; + *mask_align = 0x1f; + *fields = 3; + break; + case SYS_LISTEN: + /* err = sys_listen(a[0], a[1]); */ + case SYS_SHUTDOWN: + /* err = sys_shutdown(a[0], a[1]); */ + *mask_type = 0x5; + *mask_align = 0x5; + *fields = 2; + break; + case SYS_ACCEPT: + /* err = sys_accept(a[0], */ + /* (struct sockaddr __user *) a[1], */ + /* (int __user*) a[2]); */ + case SYS_GETSOCKNAME: + /* err = sys_getsockname(a[0], */ + /* (struct sockaddr __user*) a[1], */ + /* (int __user *) a[2]); */ + case SYS_GETPEERNAME: + /* err = sys_getpeername(a[0], */ + /* (struct sockaddr __user *) a[1], */ + /* (int __user *)a[2]); */ + *mask_type = 0x3d; + *mask_align = 0x3f; + *fields = 3; + break; + case SYS_ACCEPT4: + *mask_type = 0x7d; + *mask_align = 0xff; + *fields = 4; + /* err = sys_accept4(a[0], */ + /* (struct sockaddr __user *) a[1], */ + /* (int __user*) a[2] */ + /* int a[3]); */ + break; + case SYS_SOCKETPAIR: + *mask_type = 0xd5; + *mask_align = 0xf5; + *fields = 4; + /*err = sys_socketpair(a[0], a[1], a[2], */ + /* (int __user *)a[3]); */ + break; + case SYS_SEND: + *mask_type = 0x5d; + *mask_align = 0x5f; + *fields = 4; + /* err = sys_send(a[0], (void __user *) a[1], a[2], */ + /* a[3]); */ + break; + case SYS_SENDTO: + *mask_type = 0x75d; + *mask_align = 0x7df; + *fields = 6; + /* err = sys_sendto(a[0], (void __user *) a[1], a[2], */ + /* a[3], (struct sockaddr __user *) a[4], a[5]); */ + break; + case SYS_RECV: + *mask_type = 0x5d; + *mask_align = 0x5f; + *fields = 4; + /* err = sys_recv(a[0], (void __user *) a[1], */ + /* a[2], a[3]); */ + break; + case SYS_RECVFROM: + *mask_type = 0xf5d; + *mask_align = 0xfdf; + *fields = 6; + /* err = sys_recvfrom(a[0], (void __user *) a[1], a[2], */ + /* a[3], (struct sockaddr __user *) a[4], */ + /* (int __user *) a[5]); */ + break; + case SYS_SETSOCKOPT: + *mask_type = 0x1d5; + *mask_align = 0x1f5; + *fields = 5; + /* err = sys_setsockopt(a[0], a[1], a[2], */ + /* (char __user *)a[3], a[4]); */ + break; + case SYS_GETSOCKOPT: + *mask_type = 0x3d5; + *mask_align = 0x3f5; + *fields = 5; + /* err = sys_getsockopt(a[0], a[1], a[2], */ + /* (char __user *) a[3], (int __user *)a[4]); */ + break; + case SYS_SENDMSG: + /* err = sys_sendmsg(a[0], */ + /* (struct msghdr __user *) a[1], a[2]); */ + case SYS_RECVMSG: + /* err = sys_recvmsg(a[0], */ + /* (struct msghdr __user *) a[1], a[2]); */ + *mask_type = 0x1d; + *mask_align = 0x1f; + *fields = 3; + break; + default: + DbgSCP("Empty masks used for socketcall #%ld\n", call); + *mask_type = 0x0; + *mask_align = 0x0; + *fields = 0; + break; + } +} + +notrace __section(".entry.text") +long protected_sys_socketcall(const unsigned long a1, /* call */ + const unsigned long __user a2, /* args */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + long __user *args; + unsigned int size; + long mask_type, mask_align; + int fields; + long rval; /* syscall return value */ + struct protected_user_msghdr __user *prot_msghdr; + struct user_msghdr __user *converted_msghdr; + + get_socketcall_mask(a1, &mask_type, &mask_align, &fields); + + if (fields == 0) { + DbgSCP_ALERT("Bad socketcall number %ld\n", a1); + return -EINVAL; + } + + if (!a2) { + DbgSCP_ALERT("NULL pointer passed to socketcall (%d)\n", + (int)a1); + return -EFAULT; + } + size = e2k_ptr_size(regs->args[3], regs->args[4], 1 /*min_size*/); + /* NB> `convert_array' below will check if size is large + * enough for this request. + * + * Need an additional conversions of arguments + * for syscalls recvmsg/sendmsg + */ + if ((a1 == SYS_SENDMSG) || (a1 == SYS_RECVMSG)) { + /* + * Allocate space on user stack for additional + * structures for saving of converted parameters + */ + args = get_user_space((fields * 8) + + sizeof(struct user_msghdr) + + sizeof(struct iovec)); + /* Convert args array for socketcall from ptr */ + rval = convert_array_3((long *) a2, args, size, fields, 1, + mask_type, mask_align, 0, + CONV_ARR_WRONG_DSCR_FLD); + + if (rval) + goto err_out_bad_array; + + /* Convert struct msghdr from args[1] */ + prot_msghdr = (struct protected_user_msghdr *) args[1]; + converted_msghdr = (struct user_msghdr *) (args + (fields)); + if (prot_msghdr) { + converted_msghdr = convert_msghdr(prot_msghdr, + SIZE_MSGHDR, "socketcall", converted_msghdr); + /* Set args[1] to pointer to converted structure */ + args[1] = (long) converted_msghdr; + } else { + args[1] = 0; + DbgSCP_ALERT("Empty user_msghdr in args[1]\n"); + } + /* Other socketcalls */ + } else { + if (fields) { + /* Allocate space on user stack for args array */ + args = get_user_space(fields * 8); + /* Convert args array for socketcall from ptr */ + rval = convert_array((long *) a2, args, size, + fields, 1, mask_type, + mask_align); + if (rval) + goto err_out_bad_array; + } else { + DbgSCP_ERR("Using args as is; convert_array not called.\n"); + args = get_user_space(size); + if (copy_from_user(args, (void *) a2, size)) + return -EFAULT; + } + } + + /* Calling regular socketcall function with converted arguments: */ + rval = sys_socketcall((int) a1, (unsigned long *) args); + + if (!rval && (a1 == SYS_RECVMSG)) { + long ret; + /* Updating the msg_flags field @ user space: */ + DbgSCP("Socket call RECVMSG returned msg_flags: 0x%x\n", + converted_msghdr->msg_flags); + rval = put_user(converted_msghdr->msg_flags, + &prot_msghdr->msg_flags); + if (rval) + DbgSCP_ERR("Socket call RECVMSG: faled to return 'msg_flags'\n"); + /* Updating the 'controllen' field @ user space: */ + DbgSCP("Socket call RECVMSG returned 'controllen': %ld\n", + converted_msghdr->msg_controllen); + ret = put_user(converted_msghdr->msg_controllen, + &prot_msghdr->msg_controllen); + if (ret) { + DbgSCP_ERR("Socket call RECVMSG failed to return 'controllen'\n"); + rval = ret; + } + } + + DbgSCP(" (%d) returned %ld\n", (int) a1, rval); + return rval; + +err_out_bad_array: + DbgSCP_ALERT("Bad array for (%ld): size=%d\n", a1, size); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_sendmsg(const unsigned long sockfd, + const unsigned long __user msg, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + unsigned int size; + long rval; /* syscall return value */ + struct user_msghdr __user *converted_msghdr; + + size = e2k_ptr_size(regs->args[3], regs->args[4], 1 /*min_size*/); + converted_msghdr = convert_msghdr((struct protected_user_msghdr *) msg, + size, "sendmsg", NULL); + + /* Call socketcall handler function: */ + rval = sys_sendmsg(sockfd, converted_msghdr, flags); + + DbgSCP(" returned %ld\n", rval); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_recvmsg(const unsigned long socket, + const unsigned long __user message, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + unsigned int size; + long rval; /* syscall return value */ + struct user_msghdr __user *converted_msghdr; + struct protected_user_msghdr __user *prot_msghdr = + (struct protected_user_msghdr *)message; + + size = e2k_ptr_size(regs->args[3], regs->args[4], 1 /*min_size*/); + converted_msghdr = convert_msghdr(prot_msghdr, size, "recvmsg", NULL); + + /* Call socketcall handler function: */ + rval = sys_recvmsg(socket, converted_msghdr, flags); + + if (rval >= 0) { + long ret; + /* Updating the 'msg_flags' field @ user space: */ + DbgSCP("Syscall recvmsg() returned msg_flags: 0x%x\n", + converted_msghdr->msg_flags); + ret = put_user(converted_msghdr->msg_flags, + &prot_msghdr->msg_flags); + if (ret) { + DbgSCP_ERR("Protected recvmsg() failed to return 'msg_flags'\n"); + rval = ret; + } + /* Updating the 'controllen' field @ user space: */ + DbgSCP("Syscall recvmsg() returned 'controllen': %ld\n", + converted_msghdr->msg_controllen); + ret = put_user(converted_msghdr->msg_controllen, + &prot_msghdr->msg_controllen); + if (ret) { + DbgSCP_ERR("Protected recvmsg() failed to return 'controllen'\n"); + rval = ret; + } + } + + DbgSCP(" returned %ld\n", rval); + return rval; +} + + +#define MMSGHDR_STRUCT_SIZE_LONGS \ + (sizeof(struct mmsghdr) / sizeof(long)) +#define MMSGHDR_VECT_SIZE_LONGS(vlen) \ + ((sizeof(struct mmsghdr) * vlen) / sizeof(long)) + +static long convert_mmsghdr(long __user *prot_mmsghdr, + long __user *kernel_mmsghdr, + unsigned int size, + unsigned int vlen, + const char *syscall_name) +/* Converts user msghdr structure from protected to regular structure format. + * Outputs: 0 if converted OK; error code otherwise. + * 'prot_msghdr' - protected message header structure. + * 'kernel_mmsghdr' - converted structure (to be allocated in syscall + * to avoid re-using stack area if allocated over here). + * 'size' - size of the input structure. + * 'vlen' - vector length if vector of structures is converted. + * 'syscall_name' - reference to particular syscall in diagnostic output. + */ +{ + long __user *args = kernel_mmsghdr; + long __user *v_mmsrhdr; + struct mmsghdr __user *converted_mmsghdr; + struct user_msghdr __user *converted_msghdr; + long __user *converted_iovec; + int err, iov_len, i; + +#define MASK_MMSGHDR_TYPE 0x0773 /* type mask for struct mmsghdr */ +#define MASK_MMSGHDR_ALIGN 0xd7ff /* alignment mask for mmsghdr structure */ +#define MASK_MMSGHDR_RW MASK_MSGHDR_RW + /* + * Structures user_msghdr and iovec contain pointers + * inside, therefore they need to be additionally + * converted with saving results in these structures + */ + + /* (1) Converting 'mmsghdr' structure array: */ + + converted_mmsghdr = (struct mmsghdr *) args; + err = convert_array_3(prot_mmsghdr, (long *) converted_mmsghdr, + size, 8, vlen, MASK_MMSGHDR_TYPE, + MASK_MMSGHDR_ALIGN, MASK_MMSGHDR_RW, + CONV_ARR_WRONG_DSCR_FLD); + if (err) { + DbgSCP_ALERT("Bad mmsghdr in syscall \'%s\'\n", syscall_name); + return -EINVAL; + } + + /* (2) Converting struct iovec fields in msghdr structures + * (msghdr->msg_iov): + */ + converted_iovec = args + MMSGHDR_VECT_SIZE_LONGS(vlen); + for (i = 0, v_mmsrhdr = args; i < vlen; i++) { + converted_mmsghdr = (struct mmsghdr *) v_mmsrhdr; + converted_msghdr = &converted_mmsghdr->msg_hdr; + iov_len = converted_msghdr->msg_iovlen; + if (converted_msghdr->msg_iov) { + err = convert_array_3( + (long *) converted_msghdr->msg_iov, + (long *) converted_iovec, + SIZE_IOVEC * iov_len, 2, iov_len, + MASK_IOVEC_TYPE, MASK_IOVEC_ALIGN, 0, + CONV_ARR_WRONG_DSCR_FLD); + if (err) { + DbgSCP_ALERT("Bad struct iovec in mmsghdr (syscall \'%s\')\n", + syscall_name); + } + } else { + DbgSCP_ALERT("Empty struct iovec in mmsghdr (syscall \'%s\')\n", + syscall_name); + converted_iovec = NULL; + } + + /* Replacing iovec pointer in converted msghdr structure: */ + converted_msghdr->msg_iov = (struct iovec *) converted_iovec; + + v_mmsrhdr += MMSGHDR_STRUCT_SIZE_LONGS; + converted_iovec += + iov_len * sizeof(struct iovec) / sizeof(long); + } + + return 0; +} + +#if 1 +#define print_mmsghdr_struct(a1, a2, a3) +#else +static void print_mmsghdr_struct(const char *title, + long __user *mmsghdr_arr, + const int vlen) +{ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CONV_STRUCT)) { + long __user *larr = mmsghdr_arr; + struct mmsghdr __user *mmsghdrp; + struct iovec __user *iovp; + long lval; + int i, j; + + /* Print structure content: */ + pr_info("%s[%d]:\n", title, vlen); + for (i = 0; i < vlen; i++) { + mmsghdrp = (struct mmsghdr *)larr; + pr_info("\t##### mmsghdr[%d] : 0x%lx #####\n", + i, (long)mmsghdrp); + for (j = 0; j < MMSGHDR_STRUCT_SIZE_LONGS; j++) { + pr_info("\t0x%.8x.%.8x\n", + (int)(*larr), (int)(*larr >> 32)); + lar++; + } + iovp = mmsghdrp->msg_hdr.msg_iov; + for (j = 0; j < mmsghdrp->msg_hdr.msg_iovlen; j++) { + lval = (long) iovp; + pr_info("\t->msg_iov[%d: 0x%lx]: base = 0x%lx len = %ld\n", + j, lval, + (long)iovp->iov_base, iovp->iov_len); + lval += sizeof(struct iovec); + iovp = (struct iovec *)lval; + } + } + } +} +#endif /* print_mmsghdr_struct */ + +static long update_prot_mmsghdr_struct(long __user *mmsghdr_arr, + long __user *prot_msgvec, + const int vlen) +/* This is post-syscall post-processing procedure. + * Propagate .msg_len values from processed 'mmsghdr_arr' back to 'prot_msgvec'. + * 'vlen' - number of elements in the array. + * Returns error code or 0 if OK. + */ +{ +#define MMSGHDR_STR_LEN_OFFSET 96 + /* .msg_len field offset in the protected structure */ +#define PROT_MMSGHDR_SIZE 112 + /* size of struct mmsghdr in prot. user space */ + long __user *from = mmsghdr_arr; + long __user *to = prot_msgvec; + struct mmsghdr __user *mmsghdr_from; + long rval = 0, val; + int i; + + to += MMSGHDR_STR_LEN_OFFSET / sizeof(long); + + for (i = 0; i < vlen; i++) { + mmsghdr_from = (struct mmsghdr *) from; + val = mmsghdr_from->msg_len; + DbgSCP("mmsghdr[%d].msg_len = %ld\n", i, val); + rval |= put_user(val, to); + + from += MMSGHDR_STRUCT_SIZE_LONGS; + to += PROT_MMSGHDR_SIZE / sizeof(long); + } + + DbgSCP(" returned %ld\n", rval); + return rval; +} + +#define PROTECTED_MMSGHDR_SIZE(vlen) \ + (PROT_MMSGHDR_SIZE * vlen) + +notrace __section(".entry.text") +long protected_sys_sendmmsg(const unsigned long sockfd, + const unsigned long __user msgvec, + const unsigned long vlen, /* vector lngth */ + const unsigned long flags, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + unsigned int size; + long rval; /* syscall return value */ + long __user *kernel_mmsghdr; + + DbgSCP(" sockfd=%ld vlen=%ld\n", sockfd, vlen); + + size = e2k_ptr_size(regs->args[3], regs->args[4], 1 /*min_size*/); + if (size < PROTECTED_MMSGHDR_SIZE(vlen)) { + DbgSCP_ERR(" Bad 'msgvec' size: %d < %ld", + size, PROTECTED_MMSGHDR_SIZE(vlen)); + return -EINVAL; + } + + /* NB> For the sake of performance we don't calculate exact vector size. + * Instead, we allocate same space as in PM, which is bigger + * and is quite enough for kernel structire for sure. + */ + kernel_mmsghdr = get_user_space(size); + if (!kernel_mmsghdr) { + DbgSCP_ERR("FATAL ERROR: failed to allocate %d on stack !!!", + size); + return -EINVAL; + } + + if (convert_mmsghdr((long *) msgvec, kernel_mmsghdr, + size, vlen, "sendmmsg")) + return -EINVAL; + + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CONV_STRUCT)) + print_mmsghdr_struct("protected sendmmsg: converted mmsghdr", + kernel_mmsghdr, vlen); + + rval = sys_sendmmsg(sockfd, (struct mmsghdr *) kernel_mmsghdr, vlen, + flags); + + if (rval <= 0) + DbgSCP("sys_sendmmsg() failed with error code %ld\n", rval); + + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CONV_STRUCT)) + print_mmsghdr_struct("protected sendmmsg: post-syscall mmsghdr", + kernel_mmsghdr, vlen); + + if (rval > 0) { + /* Propagating .msg_len values back to 'msgvec' */ + long ret; + + ret = update_prot_mmsghdr_struct(kernel_mmsghdr, + (long *)msgvec, (int)vlen); + if (ret) + rval = ret; + } + + DbgSCP(" returned %ld\n", rval); + return rval; +} + +notrace __section(".entry.text") +long protected_sys_recvmmsg(const unsigned long sockfd, + const unsigned long __user msgvec, + const unsigned long vlen, /* vector lngth */ + const unsigned long flags, + const unsigned long __user timeout, + const unsigned long unused6, + const struct pt_regs *regs) +{ + unsigned int size; + long rval; /* syscall return value */ + long __user *kernel_mmsghdr; + + DbgSCP(" sockfd=%ld vlen=%ld\n", sockfd, vlen); + + size = e2k_ptr_size(regs->args[3], regs->args[4], 1 /*min_size*/); + if (size < PROTECTED_MMSGHDR_SIZE(vlen)) { + DbgSCP_ERR(" Bad 'msgvec' arg size: %d < %ld", + size, PROTECTED_MMSGHDR_SIZE(vlen)); + return -EINVAL; + } + + /* NB> For the sake of performance we allocate same space as in PM. */ + kernel_mmsghdr = get_user_space(size); + if (!kernel_mmsghdr) { + DbgSCP_ERR("FATAL ERROR: failed to allocate %d on stack !!!", + size); + return -EINVAL; + } + + if (convert_mmsghdr((long *) msgvec, kernel_mmsghdr, + size, vlen, "recvmmsg")) + return -EINVAL; + + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CONV_STRUCT)) + print_mmsghdr_struct("protected recvmmsg: converted mmsghdr", + kernel_mmsghdr, vlen); + + rval = sys_recvmmsg(sockfd, (struct mmsghdr *) kernel_mmsghdr, vlen, + flags, (struct __kernel_timespec *) timeout); + + if (rval <= 0) { + DbgSCP("sys_recvmmsg() failed with error code %ld\n", rval); + } else { /* (rval > 0) */ + long ret; + + ret = update_prot_mmsghdr_struct(kernel_mmsghdr, + (long *)msgvec, (int)vlen); + if (ret) + rval = ret; + } + + DbgSCP(" returned %ld\n", rval); + return rval; +} + + +/* + * Selecting proper convert_array masks (type and align) and argument number + * to convert protected array of arguments to the corresponding sys_ipc syscall. + * NB> Elements of the array are normally of types long and descriptor. + */ +notrace __section(".entry.text") +static inline void get_ipc_mask(long call, long *mask_type, long *mask_align, + int *fields) +{ + /* According to sys_ipc () these are SEMTIMEDOP and (MSGRCV | + * (1 << 16))' (see below on why MSGRCV is not useful in PM) calls that + * make use of FIFTH argument. Both of them interpret it as a long. Thus + * all other calls may be considered as 4-argument ones. Some of them + * may accept less than 4 arguments. + */ + switch (call) { + case (MSGRCV | (1 << 16)): + /* Instead it's much more handy to pass MSGP as PTR (aka FOURTH) + * and MSGTYP as FIFTH. `1 << 16' makes it clear to `sys_ipc ()' + * that this way of passing arguments is used. + */ + case SEMTIMEDOP: + *mask_type = 0x3d5; + *mask_align = 0x3f5; + *fields = 5; + break; + case SHMAT: + /* SHMAT is special because it interprets the THIRD argument as + * a pointer to which AP should be stored in PM. + */ + *mask_type = 0xf5; + *mask_align = 0xfd; + *fields = 3; + break; + case SEMGET: + case SHMGET: + *mask_type = 0x15; + *mask_align = 0x15; + *fields = 3; + break; + case MSGGET: + *mask_type = 0x5; + *mask_align = 0x5; + *fields = 2; + break; + default: + *mask_type = 0xd5; + *mask_align = 0xf5; + *fields = 4; + DbgSCP("default ipc masks used in the ipc call %ld\n", call); + } + DbgSCP("call=%ld mask_type=0x%lx mask_align=0x%lx fields=%d\n", + call, *mask_type, *mask_align, *fields); +} + +static long process_shmat_syscall_result(const int shmid, const int shmflg, + ulong __user *raddr) +{ + /* This is 'shmat' syscall post-processing for protected execution mode. + * We need to convert obtained shm pointer to descriptor + * (must have been available in *raddr) and pass it to 'raddr': + */ + unsigned long segm_size; + ulong base; + e2k_ptr_t dscr; + unsigned long lo, hi; + int access; + long rval; /* return value */ + + /* taking shm parameters from shmid: */ + segm_size = get_shm_segm_size(shmid); + DbgSCP("(%d): segm_size = %ld\n", shmid, segm_size); + + if (IS_ERR_VALUE(segm_size)) + return (long) segm_size; + + access = (shmflg & SHM_RDONLY) ? R_ENABLE : RW_ENABLE; + + base = *raddr; + + lo = make_ap_lo(base, segm_size, 0, access); + hi = make_ap_hi(base, segm_size, 0, access); + NATIVE_STORE_VALUE_WITH_TAG(&AWP(&dscr).hi, hi, E2K_AP_HI_ETAG); + NATIVE_STORE_VALUE_WITH_TAG(&AWP(&dscr).lo, lo, E2K_AP_LO_ETAG); + + DbgSCP("(%d): lo = 0x%lx hi = 0x%lx\n", shmid, lo, hi); + + rval = copy_to_user_with_tags(raddr, &dscr, sizeof(dscr)); + if (rval) + rval = -EFAULT; + + DbgSCP("(%d) returned %ld\n", shmid, rval); + return rval; +} + +static int semctl_ptr128_to_64(unsigned long __user semun_ptr128, + unsigned long __user semun_ptr64) +/* Union semun may contain descriptor; if so replacing it with 64-bit pointer. */ +{ + e2k_ptr_t descr; + unsigned long ptr64; + int tag, tag_hi; + int rval = 0; /* syscall return value */ + + if (!semun_ptr128 || !semun_ptr64) { + DbgSCP("Empty semun pointer\n"); + return -EINVAL; + } + + /* Check for descriptor in semun_ptr128: */ +TRY_USR_PFAULT { + + NATIVE_LOAD_VAL_AND_TAGD(semun_ptr128, descr.word.lo, tag); + if (tag) /* not 'int' */ + NATIVE_LOAD_VAL_AND_TAGD(semun_ptr128 + 8, descr.word.hi, tag_hi); + +} CATCH_USR_PFAULT { + DbgSCP_ALERT(FATAL_ERR_READ, semun_ptr128); + rval = -EFAULT; + goto out; +} END_USR_PFAULT + + if ((tag != E2K_AP_LO_ETAG) || (tag_hi != E2K_AP_HI_ETAG)) { + DbgSCP_WARN("Semun ptr 0x%lx doesn't contain descriptor\n", + semun_ptr128); + rval = -EFAULT; + goto out; + } + + /* replacing descriptor with 64-bit pointer: */ + ptr64 = ptr128_to_64(descr); + if (put_user(ptr64, (long *) semun_ptr64)) { + DbgSCP_ALERT(FATAL_ERR_WRITE, semun_ptr64); + rval = -EFAULT; + } +out: + DbgSCP("returned %d\n", rval); + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_semctl(const long semid, /* a1 */ + const long semnum, /* a2 */ + const long cmd, /* a3 */ + const unsigned long __user ptr, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + union semun *converted_semun; + unsigned long __user fourth = 0; /* fourth arg to 'semctl' syscall */ + long rval; /* syscall return value */ + + /* Fields of union semun depend on the 'cmd' parameter */ + switch (cmd & ~IPC_64) { + /* Pointer in union semun required */ + case IPC_STAT: + case IPC_SET: + case IPC_INFO: + case GETALL: + case SEM_INFO: + case SEM_STAT: + case SEM_STAT_ANY: + case SETALL: + if (!ptr) { + DbgSCP_ERR(" Bad semun parameter for semctl"); + return -EINVAL; + } + /* Union semun (4-th arg) contains pointer */ + converted_semun = get_user_space(sizeof(union semun)); + rval = semctl_ptr128_to_64(ptr, (unsigned long) converted_semun); + if (rval) + goto out; + fourth = (unsigned long) converted_semun; + break; + /* Int value in union semun required */ + case SETVAL: + fourth = ptr; + break; + /* No 'semun' argument */ + default: + break; + } + + DbgSCP(" semid:%ld semnum:%ld cmd:%ld semun:0x:%lx\n", + semid, semnum, cmd, fourth); + + rval = sys_old_semctl((int) semid, (int) semnum, (int) cmd, fourth); +out: + DbgSCP("(%d) returned %ld\n", (int) cmd, rval); + return rval; +} + +/* long sys_shmat(int shmid, char __user *shmaddr, int shmflg); */ +notrace __section(".entry.text") +long protected_sys_shmat(const long shmid, /* a1 */ + const unsigned long __user shmaddr, /* a2 */ + const long shmflg, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs) +{ + unsigned long segm_size; + ulong base; + unsigned long lo = 0, hi = 0; + int access; + int rv1_tag = E2K_NUMERIC_ETAG, rv2_tag = E2K_NUMERIC_ETAG; + long rval; /* syscall return value */ + + rval = sys_shmat((int) shmid, (char *) shmaddr, (int) shmflg); + + if (IS_ERR_VALUE(rval)) + goto err_out; + base = (ulong) rval; + + /* + * 'shmat' syscall post-processing for protected execution mode: + * We need to convert obtained shm pointer to descriptor + */ + + segm_size = get_shm_segm_size(shmid); + DbgSCP("(%ld): segm_size = %ld\n", shmid, segm_size); + + if (IS_ERR_VALUE(segm_size)) { + rval = (long) segm_size; + goto err_out; + } + + access = (shmflg & SHM_RDONLY) ? R_ENABLE : RW_ENABLE; + + lo = make_ap_lo(base, segm_size, 0, access); + hi = make_ap_hi(base, segm_size, 0, access); + + rv1_tag = E2K_AP_LO_ETAG; + rv2_tag = E2K_AP_HI_ETAG; + rval = 0; +err_out: + regs->return_desk = 1; + regs->rval1 = lo; + regs->rval2 = hi; + regs->rv1_tag = rv1_tag; + regs->rv2_tag = rv2_tag; + DbgSCP("rval = %ld (hex: %lx) - 0x%lx : 0x%lx t1/t2=0x%x/0x%x\n", + rval, rval, lo, hi, rv1_tag, rv2_tag); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_ipc(const unsigned long call, /* a1 */ + const long first, /* a2 */ + const unsigned long second, /* a3 */ + const unsigned long third, /* a4 */ + const unsigned long __user ptr, /* a5 */ + const long fifth, /* a6 */ + const struct pt_regs *regs) +{ + long mask_type, mask_align; + int fields; + void *fourth = (void *) ptr; /* fourth arg to 'ipc' syscall */ + long rval; /* syscall return value */ + + get_ipc_mask(call, &mask_type, &mask_align, &fields); + if ((fields == 0) || (unlikely(fields > 5))) { + DbgSCP_ALERT("Bad syscall_ipc number %ld\n", call); + return -EINVAL; + } + + if (check_args_array(®s->args[3], regs->tags >> 16, fields, + mask_type, 0, "Syscall ipc()")) { + DbgSCP_ERR("Bad args to syscall_ipc #%ld\n", call); + return -EINVAL; + } + + /* Syscalls that follow require converting arg-structures: */ + switch (call) { + case SEMCTL: { + /* + * Union semun (5-th parameter) contains pointers + * inside, therefore they need to be additionally + * converted depended on corresponding types + */ + union semun *converted_semun; + + /* Fields of union semun depend on cmd parameter */ + switch (third & ~IPC_64) { + /* Pointer in union semun required */ + case IPC_STAT: + case IPC_SET: + case IPC_INFO: + case GETALL: + case SEM_INFO: + case SEM_STAT: + case SEM_STAT_ANY: + case SETALL: + if (!ptr) + return -EINVAL; + converted_semun = get_user_space(sizeof(union semun)); + rval = semctl_ptr128_to_64(ptr, (unsigned long) converted_semun); + if (rval) + goto out; + /* + * Assign args[3] to pointer to + * converted union + */ + fourth = (void *) converted_semun; + break; + /* Int value in union semun required */ + case SETVAL: + /* Int value for SETVAL */ + fourth = (void *) ptr; + break; + /* No union semun as argument */ + default: + break; + } + break; + } + case MSGRCV: { +#define MASK_MSG_BUF_PTR_TYPE 0x7 /* type mask for struct msg_buf */ +#define MASK_MSG_BUF_PTR_ALIGN 0x7 /* alignment mask for struct msg_buf */ +#define SIZE_MSG_BUF_PTR 32 /* size of struct msg_buf with pointer */ + /* + * NB> Library uses different msg structure, + * not the one sys_msgrcv syscall uses. + * Struct new_msg_buf (ipc_kludge) contains pointer + * inside, therefore it needs to be additionally + * converted with saving results in these struct + */ + struct ipc_kludge *converted_new_msg_buf; + + converted_new_msg_buf = + get_user_space(sizeof(struct ipc_kludge)); + rval = convert_array((long *) ptr, + (long *) converted_new_msg_buf, + SIZE_MSG_BUF_PTR, 2, 1, + MASK_MSG_BUF_PTR_TYPE, + MASK_MSG_BUF_PTR_ALIGN); + if (rval) { + DbgSCP_ERR("Bad msg_buf parameter for msgrcv\n"); + return -EINVAL; + } + + /* + * Assign args[3] to pointer to converted new_msg_buf + */ + fourth = (void *) converted_new_msg_buf; + break; + } + default: /* other options don't require extra arg processing */ + break; + } + + /* + * Call syscall_ipc handler function with passing of + * arguments to it + */ + + DbgSCP(" call:%d 1st:0x%x 2nd:0x%lx 3rd:0x%lx\nptr:%p 5th:0x%lx\n", + (u32) call, (int) first, (unsigned long) second, + (unsigned long) third, fourth, fifth); + + rval = sys_ipc((u32) call, (int) first, (unsigned long) second, + (unsigned long) third, fourth, fifth); + + if (!IS_ERR_VALUE(rval) && (call == SHMAT)) { + /* we need to return descriptor to pointer in args[1] */ + rval = process_shmat_syscall_result( + (int) first /*shmid*/, + (int) second /*shmflg*/, + (ulong *) third /**raddr*/); + } +out: + DbgSCP("(%d) returned %ld\n", (int) call, rval); + return rval; +} + +__section(".entry.text") +static long prot_sys_mmap(const unsigned long start, + const unsigned long length, const unsigned long prot, + const unsigned long flags, const unsigned long fd, + const unsigned long offset, const int offset_in_bytes, + struct pt_regs *regs) +{ + long rval = -EINVAL; /* syscall return value */ + e2k_addr_t base; + unsigned int enable = 0; + long rval1 = 0, rval2 = 0; + int rv1_tag = E2K_NUMERIC_ETAG, rv2_tag = E2K_NUMERIC_ETAG; + + DbgSCP("start = %ld, len = %ld, prot = 0x%lx ", start, length, prot); + DbgSCP("flags = 0x%lx, fd = 0x%lx, off = %ld, in_bytes=%d", + flags, fd, offset, offset_in_bytes); + if (!length) + goto nr_mmap_out; + + if (length > 0x7fffffffL) { + /* NB> For details on this limitation see bug #99875 */ + DbgSCP_ERR("trying to map %ld (0x%lx) bytes\n", + length, length); + DbgSCP_WARN("cannot allocate over 2**31 bytes (2Gb) in protected mode\n"); + /* NB> We cannot simply return error code as + * this syscall returns structured result. + */ + goto nr_mmap_out; + } + if (offset_in_bytes) + base = sys_mmap((unsigned long) start, (unsigned long) length, + (unsigned long) prot, (unsigned long) flags, + (unsigned long) fd, (unsigned long) offset); + else /* this is __NR_mmap2 */ + base = sys_mmap2((unsigned long) start, (unsigned long) length, + (unsigned long) prot, (unsigned long) flags, + (unsigned long) fd, (unsigned long) offset); + DbgSCP("base = 0x%lx\n", (unsigned long)base); + if (base & ~PAGE_MASK) { /* this is error code */ + rval = base; + goto nr_mmap_out; + } + base += (unsigned long) offset & PAGE_MASK; + + if (!prot) { + DbgSCP_WARN("delivered descriptor without access rights:\n"); + DbgSCP_WARN("\tbase = 0x%lx size = 0x%lx prot = 0\n", + (unsigned long)base, length); + } + if (prot & PROT_READ) + enable |= R_ENABLE; + if (prot & PROT_WRITE) + enable |= W_ENABLE; + + rval1 = make_ap_lo(base, length, 0, enable); + rval2 = make_ap_hi(base, length, 0, enable); + rv1_tag = E2K_AP_LO_ETAG; + rv2_tag = E2K_AP_HI_ETAG; + rval = 0; +nr_mmap_out: + regs->return_desk = 1; + regs->rval1 = rval1; + regs->rval2 = rval2; + regs->rv1_tag = rv1_tag; + regs->rv2_tag = rv2_tag; + DbgSCP("rval = %ld (hex: %lx) - 0x%lx : 0x%lx t1/t2=0x%x/0x%x\n", + rval, rval, rval1, rval2, rv1_tag, rv2_tag); + return rval; +} + +__section(".entry.text") +long protected_sys_mmap(const unsigned long a1, /* start */ + const unsigned long a2, /* length */ + const unsigned long a3, /* prot */ + const unsigned long a4, /* flags */ + const unsigned long a5, /* fd */ + const unsigned long a6, /* offset */ + struct pt_regs *regs) +{ + return prot_sys_mmap(a1, a2, a3, a4, a5, a6, 1, regs); +} + +__section(".entry.text") +long protected_sys_mmap2(const unsigned long a1, /* start */ + const unsigned long a2, /* length */ + const unsigned long a3, /* prot */ + const unsigned long a4, /* flags */ + const unsigned long a5, /* fd */ + const unsigned long a6, /* offset */ + struct pt_regs *regs) +{ + return prot_sys_mmap(a1, a2, a3, a4, a5, a6, 0, regs); +} + + +notrace __section(".entry.text") +long protected_sys_unuselib(const unsigned long a1, /* address of module */ + const unsigned long a2, + const unsigned long a3, + const unsigned long a4, + const unsigned long a5, + const unsigned long a6, + struct pt_regs *regs) +{ + unsigned long rval; + /* Base address of module data segment */ + unsigned long glob_base = a1; + /* Size of module data segment */ + size_t glob_size = e2k_ptr_size(regs->args[1], regs->args[2], + 1 /*min_size*/); + + /* Unload module module from memory */ + if (current->thread.flags & E2K_FLAG_3P_ELF32) + rval = sys_unload_cu_elf32_3P(glob_base, + glob_size); + else + rval = sys_unload_cu_elf64_3P(glob_base, + glob_size); + + if (rval) { + DbgSCP("failed, could not unload module with" + " data_base = 0x%lx , data_size = 0x%lx\n", + glob_base, glob_size); + } + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_munmap(const unsigned long __user a1, /* addr */ + const unsigned long a2, /* length */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs) +{ + long rval = -EINVAL; /* syscall return value */ + unsigned int addr_size; + + DbgSCP("(addr=%lx, len=%lx) ", a1, a2); + + if (!a1 || !a2) + return -EINVAL; + + addr_size = e2k_ptr_size(regs->args[1], regs->args[2], 0); + if (addr_size < a2) { + DbgSCP_ALERT("Length bigger than descr size: %ld > %d\n", + a2, addr_size); + return -EINVAL; + } + + if (e2k_ptr_itag(regs->args[1]) != AP_ITAG) { + DbgSCP_ALERT("Desc in stack (SAP, not AP): 0x%lx\n", a1); + return -EINVAL; + } + + rval = sys_munmap(a1, a2); + DbgSCP("rval = %ld (hex: %lx)\n", rval, rval); + return rval; +} + + + +notrace __section(".entry.text") +long protected_sys_get_backtrace(const unsigned long __user buf, /* a1 */ + size_t count, size_t skip, /* a2,3 */ + unsigned long flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + unsigned int size; + + DbgSCP("(buf=0x%lx, count=%ld, skip=%ld, flags=0x%lx)\n", + buf, count, skip, flags); + size = e2k_ptr_size(regs->args[1], regs->args[2], 0); + if (size < (count * 8)) { + DbgSCP_ALERT("Count bigger than buf size: %ld > %d\n", + (count * 8), size); + return -EINVAL; + } + return sys_get_backtrace((unsigned long *) buf, count, skip, flags); +} + +notrace __section(".entry.text") +long protected_sys_set_backtrace(const unsigned long __user buf, /* a1 */ + size_t count, size_t skip, /* a2,3 */ + unsigned long flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + unsigned int size; + + DbgSCP("(buf=0x%lx, count=%ld, skip=%ld, flags=0x%lx)\n", + buf, count, skip, flags); + size = e2k_ptr_size(regs->args[1], regs->args[2], 0); + if (size < (count * 8)) { + DbgSCP_ALERT("Count bigger than buf size: %ld > %d\n", + (count * 8), size); + return -EINVAL; + } + return sys_set_backtrace((unsigned long *) buf, count, skip, flags); +} + + +notrace __section(".entry.text") +long protected_sys_set_robust_list(const unsigned long __user listhead, /* a1 */ + const size_t len, /* a2 */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + DbgSCP("(head=0x%lx, len=%zd)\n", listhead, len); + + if (!futex_cmpxchg_enabled) { + DbgSCP_ALERT("futex_cmpxchg is not enabled\n"); + return -ENOSYS; + } + + /* In glibc side `sizeof (struct robust_list_head) == 0x30'. */ + if (unlikely(len != 0x30)) { + DbgSCP_ALERT("len (0x%zx) != sizeof(struct robust_list_head)\n", + len); + return -EINVAL; + } + + current_thread_info()->pm_robust_list = (long __user *) listhead; + + /* We need to save the original descriptor + * to return it in protected_sys_get_robust_list: + */ + store_descriptor_attrs((void *)listhead, regs->args[1], regs->args[2], + (regs->tags >> 8) & 0xFF, 0 /*signum*/); + + DbgSCP("tags = 0x%lx / ret = 0\n", (regs->tags >> 8)); + return 0; +} + +notrace __section(".entry.text") +long protected_sys_get_robust_list(const unsigned long pid, + unsigned long __user head_ptr, + unsigned long __user len_ptr) +{ + /* In glibc side `sizeof (struct robust_list_head) == 0x30'. */ +#define SIZEOF_PROT_HEAD_STRUCT 0x30 + long __user *head; + unsigned long ret; /* result of the function */ + struct task_struct *p; + struct sival_ptr_list *dscr_attrs; + e2k_ptr_t dscr; + size_t len; + + DbgSCP("(pid=%ld, head_ptr=0x%lx, len_ptr=0x%lx)\n", + pid, head_ptr, len_ptr); + + if (!futex_cmpxchg_enabled) { + DbgSCP("futex_cmpxchg is not enabled\n"); + return -ENOSYS; + } + + rcu_read_lock(); + + ret = -ESRCH; + if (!pid) { + p = current; + } else { + p = find_task_by_vpid(pid); + if (!p) + goto err_unlock; + } + + ret = -EPERM; + if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) + goto err_unlock; + + head = task_thread_info(p)->pm_robust_list; + rcu_read_unlock(); + + if (!head) { + DbgSCP("robust_list is not set yet\n"); + len = sizeof(dscr); + memset(&dscr, 0, len); + ret = 0; + goto empty_list_out; + } + + /* We need to return the original descriptor; + * restoring it from the pointer saved in task_struct: + */ + dscr_attrs = get_descriptor_attrs((void *)head, 0 /* signum */); + if (!dscr_attrs) { + DbgSCP_ALERT("Failed to restore descriptor attributes " + "on pointer 0x%lx\n", head); + return -EFAULT; + } + DbgSCP("dscr_attrs = 0x%p\n", dscr_attrs); + + len = e2k_ptr_size(dscr_attrs->user_ptr_lo, dscr_attrs->user_ptr_hi, 0); + DbgSCP("list head stored: lo=0x%llx hi=0x%llx tags=0x%x len=%zd\n", + dscr_attrs->user_ptr_lo, dscr_attrs->user_ptr_hi, + dscr_attrs->user_tags, len); + if (unlikely(len < SIZEOF_PROT_HEAD_STRUCT)) { + DbgSCP_ALERT("len (0x%zx) < sizeof(struct robust_list_head)\n", + len); + return -EFAULT; + } + + TRY_USR_PFAULT { + NATIVE_STORE_VALUE_WITH_TAG(&AWP(&dscr).hi, + dscr_attrs->user_ptr_hi, + dscr_attrs->user_tags >> 4); + NATIVE_STORE_VALUE_WITH_TAG(&AWP(&dscr).lo, + dscr_attrs->user_ptr_lo, + dscr_attrs->user_tags & 0xF); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + DbgSCP("robust_list head: lo=0x%lx hi=0x%lx tags=0x%x len=%zd\n", + AWP(&dscr).lo, AWP(&dscr).hi, dscr_attrs->user_tags, len); + ret = 0; + + len = SIZEOF_PROT_HEAD_STRUCT; +empty_list_out: + if (copy_to_user((void *)len_ptr, &len, sizeof(len))) + return -EFAULT; + if (copy_to_user_with_tags((void *)head_ptr, &dscr, sizeof(dscr))) + return -EFAULT; + return ret; + +err_unlock: + rcu_read_unlock(); + + return ret; +} + +#define PROCESS_VM_RW_V_PROC \ + (regs->sys_num == __NR_process_vm_readv) ? \ + "process_vm_readv" : "process_vm_writev" +#define BAD_IOVEC_STR_MSG \ + "Bad %s iovec structure in protected %s\n" + +notrace __section(".entry.text") +static +long protected_sys_process_vm_readwritev(const unsigned long pid, + const struct iovec __user *lvec, + unsigned long liovcnt, + const struct iovec __user *rvec, + unsigned long riovcnt, + unsigned long flags, + const struct pt_regs *regs, + const int vm_write) +{ + pid_t id = pid; + size_t lsize, rsize; + struct iovec *lv = NULL, *rv = NULL; + long rval; + + DbgSCP("(%ld, lvec=0x%lx, lcnt=%ld, rvec=0x%lx, rcnt=%ld, flg=0x%lx)\n", + pid, lvec, liovcnt, rvec, riovcnt, flags); + + lsize = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (lsize < (sizeof(struct iovec) * liovcnt)) { + DbgSCP_ALERT("Insufficient lvec size: %zd < %ld\n", + lsize, sizeof(struct iovec) * liovcnt); + return -EFAULT; + } + rsize = e2k_ptr_size(regs->args[7], regs->args[8], 0); + if (rsize < (sizeof(struct iovec) * riovcnt)) { + DbgSCP_ALERT("Insufficient rvec size: %zd < %ld\n", + rsize, sizeof(struct iovec) * riovcnt); + return -EFAULT; + } + + if (liovcnt || riovcnt) { + char *new_arg; + + new_arg = get_user_space(lsize + rsize); + lv = (struct iovec *)new_arg; + rv = (struct iovec *)(new_arg + lsize); + + if (liovcnt) { + rval = convert_array((long *) lvec, (long *) lv, lsize, + 2, liovcnt/*nr_segs*/, 0x7, 0xf); + if (rval) { + DbgSCP_ALERT(BAD_IOVEC_STR_MSG, "local", + PROCESS_VM_RW_V_PROC); + return -EINVAL; + } + } + + if (riovcnt) { + rval = convert_array((long *) rvec, (long *) rv, rsize, + 2, riovcnt/*nr_segs*/, 0x7, 0xf); + if (rval) { + DbgSCP_ALERT(BAD_IOVEC_STR_MSG, "remote", + PROCESS_VM_RW_V_PROC); + return -EINVAL; + } + } + } + + if (vm_write) + rval = sys_process_vm_writev(id, lv, liovcnt, + rv, riovcnt, flags); + else + rval = sys_process_vm_readv(id, lv, liovcnt, + rv, riovcnt, flags); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_process_vm_readv(const unsigned long pid, /* a1 */ + const struct iovec __user *lvec, /* a2 */ + unsigned long liovcnt, /* a3 */ + const struct iovec __user *rvec, /* a4 */ + unsigned long riovcnt, /* a5 */ + unsigned long flags, /* a6 */ + const struct pt_regs *regs) +{ + return protected_sys_process_vm_readwritev(pid, + lvec, liovcnt, + rvec, riovcnt, + flags, regs, 0); +} + +notrace __section(".entry.text") +long protected_sys_process_vm_writev(const unsigned long pid, /* a1 */ + const struct iovec __user *lvec, /* a2 */ + unsigned long liovcnt, /* a3 */ + const struct iovec __user *rvec, /* a4 */ + unsigned long riovcnt, /* a5 */ + unsigned long flags, /* a6 */ + const struct pt_regs *regs) +{ + return protected_sys_process_vm_readwritev(pid, + lvec, liovcnt, + rvec, riovcnt, + flags, regs, 1); +} + + +notrace __section(".entry.text") +long protected_sys_vmsplice(int fd, /* a1 */ + const struct iovec __user *iov, /* a2 */ + unsigned long nr_segs, /* a3 */ + unsigned int flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + long rval = -EINVAL; + size_t size; + struct iovec *kiov; + + DbgSCP("(fd=%d, iov=0x%lx, nr_segs=%ld, flg=0x%x)\n", + fd, iov, nr_segs, flags); + + if (!iov) + goto err_out; + + size = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (size < sizeof(struct iovec)) { + DbgSCP_ALERT("Insufficient iov size: %zd < %ld\n", + size, sizeof(struct iovec)); + return rval; + } + + kiov = get_user_space(size); + rval = convert_array((long *) iov, (long *) kiov, size, + 2, 1/*nr_segs*/, 0x7, 0x7); + if (rval) + goto err_out; + + rval = sys_vmsplice(fd, kiov, nr_segs, flags); + return rval; + +err_out: + DbgSCP_ALERT("Bad iovec structure in protected vmsplice syscall\n"); + return -EINVAL; +} + + +notrace __section(".entry.text") +long protected_sys_keyctl(const int operation, + const unsigned long arg2, + const unsigned long arg3, + const unsigned long arg4, + const unsigned long arg5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + long rval = -EINVAL; + size_t size; + struct iovec __user *iov; + struct iovec *kiov; + struct keyctl_kdf_params __user *ukdf_params; + struct keyctl_kdf_params *kkdf_params; + + switch (operation) { + case KEYCTL_INSTANTIATE_IOV: + iov = (struct iovec __user *) arg3; + if (!iov) + break; + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < sizeof(struct iovec)) { + DbgSCP_ALERT("Insufficient iov size: %zd < %ld\n", + size, sizeof(struct iovec)); + return rval; + } + kiov = get_user_space(size); + rval = convert_array((long *) iov, (long *) kiov, size, + 2, 1/*nr_segs*/, 0x7, 0x7); + if (rval) + return rval; + return sys_keyctl(operation, arg2, (unsigned long)kiov, + arg4, arg5); + case KEYCTL_DH_COMPUTE: + ukdf_params = (struct keyctl_kdf_params __user *) arg5; + if (!ukdf_params) + break; + size = e2k_ptr_size(regs->args[9], regs->args[10], 0); + if (size < sizeof(struct keyctl_kdf_params)) { + DbgSCP_ALERT("Insufficient keyctl_kdf_params size: %zd < %ld\n", + size, sizeof(struct keyctl_kdf_params)); + return rval; + } + kkdf_params = get_user_space(size); + rval = convert_array((long *) ukdf_params, (long *) kkdf_params, + size, 3, 1/*nr_segs*/, 0x1f, 0x1f); + if (rval) + return rval; + return sys_keyctl(operation, arg2, arg3, arg4, + (unsigned long)kkdf_params); + } + + return sys_keyctl(operation, arg2, arg3, arg4, arg5); +} + + +notrace __section(".entry.text") +long protected_sys_prctl(const int option, + const unsigned long arg2, + const unsigned long arg3, + const unsigned long arg4, + const unsigned long arg5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + long rval = -EINVAL; + size_t size; + int __user **intptr; + int **kintptr; + struct sock_fprog __user *sfprog; + struct sock_fprog *ksfprog; + + switch (option) { + case PR_GET_TID_ADDRESS: + intptr = (int __user **) arg2; + if (!intptr) + break; + size = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (size < 16) { + DbgSCP_ALERT("Insufficient (int **) arg2 size: %zd < 16\n", + size); + return rval; + } + kintptr = get_user_space(size); + rval = convert_array((long *) intptr, (long *) kintptr, size, + 1, 1/*nr_segs*/, 0x3, 0x3); + if (rval) + return rval; + return sys_prctl(option, (unsigned long) kintptr, arg3, + arg4, arg5); + case PR_SET_SECCOMP: + sfprog = (struct sock_fprog __user *) arg3; + if (!sfprog) + break; + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < sizeof(struct sock_fprog)) { + DbgSCP_ALERT("Insufficient (sock_fprog *) arg3 size: %zd < %ld\n", + size, sizeof(struct sock_fprog)); + return rval; + } + ksfprog = get_user_space(size); + rval = convert_array((long *) sfprog, (long *) ksfprog, size, + 2, 1/*nr_segs*/, 0xc, 0xf); + if (rval) + return rval; + return sys_prctl(option, arg2, (unsigned long) ksfprog, + arg4, arg5); + } + + return sys_prctl(option, arg2, arg3, arg4, arg5); +} + +notrace __section(".entry.text") +long protected_sys_ioctl(const int fd, /* a1 */ + const unsigned long request, /* a2 */ + const unsigned long __user argp, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + unsigned int size; + long rval; + + DbgSCP("(fd=0x%x, request=0x%lx, argp=0x%lx)\n", + fd, request, (unsigned long) argp); + + switch (request) { + case SIOCGIFCONF: { +#define STRUCT_IFCONF_FIELDS 2 +#define STRUCT_IFCONF_ITEMS 1 +#define STRUCT_IFCONF_MASK_TYPE 0xc +#define STRUCT_IFCONF_MASK_ALIGN 0xf + +/* sizeof(struct ifconf) in user128 protected mode space */ +#define STRUCT_IFCONF_PROT_SIZE 32 +/* sizeof(struct ifreq) in user128 protected mode space */ +#define STRUCT_IFREQ_PROT_SIZE 48 + + /* Pointer to user128 struct ifconf */ + void __user *ifc128 = (void *) argp; + /* Pointer to user128 array of ifreq structures */ + void __user *ifr128; + + /* Pointer to temporary64 translated struct ifconf */ + struct ifconf __user *ifc64; + /* Pointer to temporary64 array of ifreq structures */ + struct ifreq __user *ifr64; + + /* + * Lengths in terms of bytes of user128 and temporary64 array + * of ifreq structures + */ + int ifc_len128, ifc_len64; + long stack_size; /* to allocate for calculations */ + int i, tag; + + /* Check descriptor's size of user128 struct ifconf. */ + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < STRUCT_IFCONF_PROT_SIZE) { + DbgSCP_ALERT("ifconf pointer is too little: %d < %d\n", + size, STRUCT_IFCONF_PROT_SIZE); + return -EINVAL; + } + + /* Reading value of the 'ifc_len' field: */ +TRY_USR_PFAULT { + NATIVE_LOAD_VAL_AND_TAGW((int *) ifc128, ifc_len128, tag); +} CATCH_USR_PFAULT { + DbgSCP_ALERT(FATAL_ERR_READ, (long) argp); + return -EINVAL; +} END_USR_PFAULT + + if (tag != ETAGNVS) { +#define ERR_FATAL_IFCLEN "unexpected value in field 'ifc_len' (tag 0x%x)\n" + DbgSCP_ALERT(ERR_FATAL_IFCLEN, tag); + return -EINVAL; + } + + /* + * Count length of temporary64 array of ifreq structures. + * It differs from user128 one, because struct ifreq contains + * pointers. + */ + ifc_len64 = ifc_len128 * sizeof(struct ifreq) / + STRUCT_IFREQ_PROT_SIZE; + + /* Allocating stack to convert 'ifc128' to 64 bit mode. + * NB> 'stack_size' must be multiple of 16; otherwise + * get_user_space() may deliver non-aligned space. + */ + stack_size = (sizeof(struct ifconf) + ifc_len64 + 15) & ~0xf; + ifc64 = get_user_space(stack_size); + /* Translate struct ifconf from user128 to kernel64 mode. */ + ifr64 = (struct ifreq *) ((uintptr_t) ifc64 + + sizeof(struct ifconf)); + + rval = convert_array(ifc128, (long *) ifc64, size, + STRUCT_IFCONF_FIELDS, STRUCT_IFCONF_ITEMS, + STRUCT_IFCONF_MASK_TYPE, + STRUCT_IFCONF_MASK_ALIGN); + if (rval) { + DbgSCP_ALERT("Bad struct ifconf for ioctl SIOCGIFCONF"); + return rval; + } + + /* Save pointer to user128's array of ifreq structures. */ + ifr128 = ifc64->ifc_req; + + /* + * Initialize temporary64 struct ifconf with translated values. + */ + ifc64->ifc_len = ifc_len64; + ifc64->ifc_req = ifr64; + + /* Do the ioctl(). */ + rval = sys_ioctl(fd, request, (unsigned long) ifc64); + if (rval) + return rval; + + /* + * Kernel writes actual length of array of ifreq structures + * in ifc_len. Translate it to actual length of user128 array. + */ + ifc_len64 = ifc64->ifc_len; + ifc_len128 = ifc_len64 * STRUCT_IFREQ_PROT_SIZE / + sizeof(struct ifreq); + /* + * Sys_ioctl writes an array of stucts ifreq in ifc_req buffer. + * In our case it does not contais pointers, + * but still sizeof(struct ifreq64) > sizeof(struct ifreq128), + * so we need to copy it one by one. + */ + for (i = 0; i < ifc_len128; i += STRUCT_IFREQ_PROT_SIZE) { + if (copy_to_user(ifr128 + i, ifr64++, + sizeof(struct ifreq))) { + DbgSCP_ALERT("%s:%d copy_to_user() failed\n", + __FILE__, __LINE__); + return -EFAULT; + } + } + + /* + * Write actual length of array of ifreq structures to user128 + * struct ifconf. + */ + if (put_user(ifc_len128, (int *) ifc128)) { + DbgSCP_ALERT("%s:%d put_user() failed\n", + __FILE__, __LINE__); + return -EFAULT; + } + + break; + } + case SIOCETHTOOL: { + /* Pointer to user128's struct ifreq */ + void __user *ifr128 = (void *) argp; + /* Pointer to converted struct ifreq */ + struct ifreq *ifr64; + long stack_size; + void __user *useraddr; + u32 ethcmd; + /* All errors here are of ours, so we are responsible for + * not supporting all these ioctls. Let's respond as + * 'Not supported' for any errors here. + */ + rval = -EOPNOTSUPP; + + /* Check descriptor's size of user128's struct ifreq. */ + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < STRUCT_IFREQ_PROT_SIZE) { + DbgSCP_ALERT("ifreq size is too small: %d < %d\n", + size, STRUCT_IFREQ_PROT_SIZE); + return rval; + } + + /* Allocate a stack to convert 'ifr128' to 64 bit mode. + * People say that 'stack_size' MUST be multiple of 16, + * otherwise get_user_space may deliver non-aligned space. + */ + stack_size = (sizeof(struct ifreq) + 15) & ~0xf; + ifr64 = get_user_space(stack_size); + + /* Convert a ifr128 to ifr64: struct {long; long; descr} */ + if (get_pm_struct_simple(( + long __user *) ifr128, (long __user *)ifr64, + STRUCT_IFREQ_PROT_SIZE, 3, 1, 0x311, 0x311)) { + return rval; + } + + useraddr = ifr64->ifr_data; + + if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) + return rval; + + switch (ethcmd) { + case ETHTOOL_GRXCLSRLALL: + case ETHTOOL_GRXRINGS: + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + case ETHTOOL_SRXCLSRLINS: + case ETHTOOL_SRXCLSRLDEL: + /* Most ethtool structures are defined without padding. + * Unfortunately struct ethtool_rxnfc is an exception. + * A special processing is required. Not supported + * right now. + */ + return rval; + default: + rval = sys_ioctl(fd, request, (unsigned long) ifr64); + } + break; + } + default: + rval = sys_ioctl(fd, request, (unsigned long) argp); + } + + return rval; +} + + +notrace __section(".entry.text") +long protected_sys_bpf(const int cmd, /* a1 */ + const unsigned long __user attr, /* a2 */ + const unsigned int size, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ +#define BPF_ERR_BAD_ATTR "Bad arg 'attr' (0x%lx) in syscall bpf(%d)" +#define BPF_ERR_BAD_CMD "Bad arg 'cmd' in syscall bpf(%d)" +#define BPF_ERR_CMD_NOT_SUPPORTED "BPF command 'cmd=%d' not supported yet" + unsigned long __user *attr_64 = (unsigned long *) attr; + unsigned int size_128, size_64 = sizeof(union bpf_attr); + long rval = 0; + + DbgSCP("(cmd=0x%x, attr=0x%lx, size=%d) tags=0x%lx\n", + cmd, attr, size, regs->tags); + + if (attr) { + int tag = (regs->tags >> 16 /*2x8*/) & 0xff; + + if ((tag == ETAGAPQ) || (tag == ETAGPLD) || (tag == ETAGPLQ)) { + size_128 = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if ((size_64 > size) || (size_128 < size)) { + DbgSCP_ALERT(BPF_ERR_BAD_ATTR, attr, cmd); + rval = -EINVAL; + goto out; + } + } + attr_64 = get_user_space(size); + /* NB> BPF requires unused attr fields must be zeroed! */ + memset(attr_64, 0, size); + switch (cmd) { + case BPF_MAP_CREATE: + case BPF_PROG_ATTACH: + case BPF_PROG_DETACH: + case BPF_PROG_GET_NEXT_ID: + case BPF_MAP_GET_NEXT_ID: + case BPF_PROG_GET_FD_BY_ID: + case BPF_MAP_GET_FD_BY_ID: + case BPF_RAW_TRACEPOINT_OPEN: + /* No pointer in 'attr' for these commands */ + attr_64 = (unsigned long *) attr; + break; + + case BPF_MAP_LOOKUP_ELEM: + case BPF_MAP_UPDATE_ELEM: + case BPF_MAP_DELETE_ELEM: + case BPF_MAP_LOOKUP_AND_DELETE_ELEM: + case BPF_MAP_GET_NEXT_KEY: +#define BPF_MAP_x_ELEM_FIELDS 4 +#define BPF_MAP_x_ELEM_MTYPE 0x1330 +#define BPF_MAP_x_ELEM_MALIGN 0x1333 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_MAP_x_ELEM_FIELDS, 1/*items*/, + BPF_MAP_x_ELEM_MTYPE, + BPF_MAP_x_ELEM_MALIGN); + break; + + case BPF_PROG_LOAD: +#define BPF_PROG_LOAD_FIELDS 14 +#define BPF_PROG_LOAD_MTYPE 0x03131111131331 +#define BPF_PROG_LOAD_MALIGN 0x03333111133333 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_PROG_LOAD_FIELDS, 1/*items*/, + BPF_PROG_LOAD_MTYPE, + BPF_PROG_LOAD_MALIGN); + break; + + case BPF_OBJ_PIN: + case BPF_OBJ_GET: +#define BPF_OBJ_x_FIELDS 2 +#define BPF_OBJ_x_MTYPE 0x13 +#define BPF_OBJ_x_MALIGN 0x13 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_OBJ_x_FIELDS, 1/*items*/, + BPF_OBJ_x_MTYPE, + BPF_OBJ_x_MALIGN); + break; + + case BPF_PROG_TEST_RUN: +#define BPF_PTEST_RUN_FIELDS 8 +#define BPF_PTEST_RUN_MTYPE 0x33113311 +#define BPF_PTEST_RUN_MALIGN 0x33113311 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_PTEST_RUN_FIELDS, 1/*items*/, + BPF_PTEST_RUN_MTYPE, + BPF_PTEST_RUN_MALIGN); + break; + + case BPF_OBJ_GET_INFO_BY_FD: +#define BPF_OBJ_GET_INFO_FIELDS 2 +#define BPF_OBJ_GET_INFO_MTYPE 0x31 +#define BPF_OBJ_GET_INFO_MALIGN 0x33 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_OBJ_GET_INFO_FIELDS, 1/*items*/, + BPF_OBJ_GET_INFO_MTYPE, + BPF_OBJ_GET_INFO_MALIGN); + break; + + case BPF_PROG_QUERY: +#define BPF_PROG_QUERY_FIELDS 4 +#define BPF_PROG_QUERY_MTYPE 0x0311 +#define BPF_PROG_QUERY_MALIGN 0x1311 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_PROG_QUERY_FIELDS, 1/*items*/, + BPF_PROG_QUERY_MTYPE, + BPF_PROG_QUERY_MALIGN); + break; + + case BPF_BTF_LOAD: +#define BPF_BTF_LOAD_FIELDS 4 +#define BPF_BTF_LOAD_MTYPE 0x0133 +#define BPF_BTF_LOAD_MALIGN 0x1133 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_BTF_LOAD_FIELDS, 1/*items*/, + BPF_BTF_LOAD_MTYPE, + BPF_BTF_LOAD_MALIGN); + break; + + case BPF_TASK_FD_QUERY: +#define BPF_TASK_FD_QUERY_FIELDS 6 +#define BPF_TASK_FD_QUERY_MTYPE 0x111311 +#define BPF_TASK_FD_QUERY_MALIGN 0x1113311 + rval = get_pm_struct_simple((long *)attr, attr_64, size, + BPF_TASK_FD_QUERY_FIELDS, 1/*items*/, + BPF_TASK_FD_QUERY_MTYPE, + BPF_TASK_FD_QUERY_MALIGN); + break; + + case BPF_BTF_GET_FD_BY_ID: + case BPF_MAP_FREEZE: + case BPF_BTF_GET_NEXT_ID: + DbgSCP_ALERT(BPF_ERR_CMD_NOT_SUPPORTED, cmd); + rval = -ENOTSUPP; + goto out; + + default: + DbgSCP_ALERT(BPF_ERR_BAD_CMD, cmd); + rval = -EINVAL; + goto out; + } + if (rval) { + DbgSCP_ALERT(BPF_ERR_BAD_ATTR, attr, cmd); + goto out; + } + } + rval = sys_bpf(cmd, (union bpf_attr *) attr_64, size_64); + +out: + DbgSCP("\treturned %ld\n", rval); + return rval; +} + + +#if 0 +static void print_epoll_kevent(void *kevent, int count) +{ + int *kptr; + int i, j; + + if (!arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_COMPLEX_WRAPPERS)) + return; + + pr_info("%s:: kevent = 0x%lx:\n", __func__, kevent); + for (j = 0; j < count; j++) { + if (count > 1) + pr_info("\t[instance #%d:]\n", j); + for (i = 0, kptr = kevent; + i < sizeof(struct epoll_event) / sizeof(int); + i++) { + pr_info("\t\t0x%.8x\n", *kptr); + kptr++; + } + } +} +#endif + +#define EPOLL_EVENT_PROT_DATA_OFFSET 16 /* field offset in prot struct */ +#define SIZEOF_EPOLL_EVENT_KDWRD (sizeof(struct epoll_event) / sizeof(long)) +#define SIZEOF_EPOLL_EVENT_UDWRD 4 /* protected size in double words */ + +/* Converting user (protected) event structure to kernel structure: */ +static +struct epoll_event *convert_epoll_event(void __user *event, int count, + size_t max_size) +{ + void *kevent = NULL; + int rval; + + if (!event) + return kevent; + + kevent = get_user_space(sizeof(struct epoll_event) * count); + rval = convert_array_3((long *)event, (long *)kevent, max_size, + 2 /*fields*/, count /*items*/, + 0xc /*mask_type*/, 0xf /*mask_align*/, + 0 /*mask_rw*/, CONV_ARR_IGNORE_DSCR_FLD_ERR); + if (rval) { + DbgSCP_ALERT("Bad epoll_event structure"); + return NULL; + } + + return kevent; +} + +/* Updating user (protected) event structure on modified kernel structure: */ +static int update_epoll_event(void __user *event, void *kevent, int count) +{ + long lval; + int tag; + int j, ret; + long *klarr; + long __user *ularr; + long __user *pfield; + + if (!event || !kevent) + return -1; /* something's wrong */ + + DbgSCP("(event=0x%lx, kevent=0x%lx, count=%d)\n", event, kevent, count); + /* print_epoll_kevent(kevent, count); */ + + klarr = (long *)kevent; + ularr = (long *)event; + for (j = 0, ret = 0; + j < count; + j++, klarr += SIZEOF_EPOLL_EVENT_KDWRD, + ularr += SIZEOF_EPOLL_EVENT_UDWRD) { + lval = (long)(((struct epoll_event *)klarr)->events); + pfield = (long *)((long)ularr); + if (put_user(lval, pfield)) { + DbgSCP("put_user() failed at %s:%d\n", + __FILE__, __LINE__); + return -EFAULT; + } + ret++; + + /* Checking if struct field 'data' is descriptor: */ + NATIVE_LOAD_VAL_AND_TAGD((long)ularr + + EPOLL_EVENT_PROT_DATA_OFFSET, lval, tag); + if (tag != ETAGNVD) { /* this must be descriptor */ + DbgSCP("lval=0x%lx tag=0x%x update skipped\n", + lval, tag); + continue; /* skipping it for now */ + } + + lval = (long)(((struct epoll_event *)klarr)->data); + pfield = (long *)((long)ularr + EPOLL_EVENT_PROT_DATA_OFFSET); + if (put_user(lval, pfield)) { + DbgSCP("%s:%d put_user() failed\n", + __FILE__, __LINE__); + return -EFAULT; + } + ret++; + } + + return ret; /* # fields updated */ +} + +notrace __section(".entry.text") +long protected_sys_epoll_ctl(const unsigned long epfd, /* a1 */ + const unsigned long op, /* a2 */ + const unsigned long fd, /* a3 */ + void __user *event, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + void *kevent; + long rval; +#define EVENT_STRUCT_PROT_SIZE 32 + DbgSCP("(epfd=0x%lx, op=0x%lx, fd=0x%lx, event=0x%lx)\n", + epfd, op, fd, event); + + kevent = convert_epoll_event(event, 1, EVENT_STRUCT_PROT_SIZE); + + rval = sys_epoll_ctl(epfd, op, fd, (struct epoll_event *) kevent); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_epoll_wait(const unsigned long epfd, /* a1 */ + void __user *event, /* a2 */ + const long maxevents, /* a3 */ + const long timeout, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + void *kevent; + long rval; + size_t size; + + DbgSCP("(epfd=0x%lx, event=0x%lx, maxevents=%ld, timeout=%ld)\n", + epfd, event, maxevents, timeout); + + if (maxevents <= 0) + return -EINVAL; + + size = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (size < (EVENT_STRUCT_PROT_SIZE * maxevents)) { + if (size) + DbgSCP_ALERT("Wrong event structure size (%zd < %ld)\n", + size, EVENT_STRUCT_PROT_SIZE * maxevents); + return -EINVAL; + } + + kevent = convert_epoll_event(event, maxevents, size); + + rval = sys_epoll_wait(epfd, (struct epoll_event *) kevent, + maxevents, timeout); + if (rval <= 0) + return rval; + + /* 'kevent' structure may have been modified; updating user struct: */ + if (update_epoll_event(event, kevent, maxevents) < 0) + rval = -EFAULT; + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_epoll_pwait(const unsigned long epfd, /* a1 */ + void __user *event, /* a2 */ + const long maxevents, /* a3 */ + const long timeout, /* a4 */ + const unsigned long sigmask, /* a5 */ + const unsigned long sigsetsize, /* a6 */ + const struct pt_regs *regs) +{ + void *kevent; + long rval; + size_t size; + + DbgSCP("(epfd=0x%lx, event=0x%lx, maxevents=%ld, timeout=%ld, sigmask, sigsetsize=%ld)\n", + epfd, event, maxevents, timeout, sigsetsize); + + if (maxevents <= 0) + return -EINVAL; + + size = e2k_ptr_size(regs->args[3], regs->args[4], 0); + if (size < (EVENT_STRUCT_PROT_SIZE * maxevents)) { + if (size) + DbgSCP_ALERT("Wrong event structure size (%zd < %ld)\n", + size, EVENT_STRUCT_PROT_SIZE * maxevents); + return -EINVAL; + } + + kevent = convert_epoll_event(event, maxevents, size); + + rval = sys_epoll_pwait(epfd, (struct epoll_event *) kevent, maxevents, + timeout, (sigset_t *) sigmask, sigsetsize); + if (rval <= 0) + return rval; + + /* 'kevent' structure may have been modified; updating user struct: */ + if (update_epoll_event(event, kevent, maxevents) < 0) + rval = -EFAULT; + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_pselect6(const long nfds, /* a1 */ + const unsigned long readfds, /* a2 */ + const unsigned long writefds, /* a3 */ + const unsigned long exceptfds, /* a4 */ + const unsigned long timeout, /* a5 */ + const unsigned long sigmask, /* a6 */ + const struct pt_regs *regs) +{ + unsigned int size; + void *sigmask_ptr64 = NULL; + long rval; + + DbgSCP("(nfds=%ld, ...)\n", nfds); + + if (sigmask) { +#define STRUCT_SIGSET6_FIELDS 2 +#define STRUCT_SIGSET6_MASK_TYPE 0x7 +#define STRUCT_SIGSET6_MASK_ALIGN 0x7 +#define STRUCT_SIGSET6_PROT_SIZE 24 /* sizeof(modified sigmask) in PM */ + + /* Check descriptor's size of 6th argument. */ + size = e2k_ptr_size(regs->args[11], regs->args[12], 0); + if (size < STRUCT_SIGSET6_PROT_SIZE) { + DbgSCP_ALERT("'sigmask' pointer size is too little: %d < %d\n", + size, STRUCT_SIGSET6_PROT_SIZE); + return -EINVAL; + } + + /* Translate struct sigmask from user128 to kernel64 mode. */ + sigmask_ptr64 = get_user_space(size); + rval = convert_array((long *)sigmask, (long *) sigmask_ptr64, + size, STRUCT_SIGSET6_FIELDS, 1 /*items*/, + STRUCT_SIGSET6_MASK_TYPE, + STRUCT_SIGSET6_MASK_ALIGN); + if (rval) { + DbgSCP_ALERT("Bad struct 'sigmask' for pselect6() syscall"); + return rval; + } + } + + rval = sys_pselect6((int) nfds, (void *) readfds, (void *) writefds, + (void *) exceptfds, (void *) timeout, + (void *) sigmask_ptr64); + DbgSCP("sys_pselect6(nfds=%ld, ...) returned %ld\n", nfds, rval); + + return rval; +} + +/* + * Converting protected structure siginfo_t into 64-bit format. + * Allocates converted structure on user stack and returns it in the 2nd arg. + * Returns error code or 0 if converted OK. + */ +static +int convert_protected_siginfo_t(const unsigned long __user prot_siginfo, + void __user **siginfo_64, + const unsigned long mask_type) +{ +/* Structure siginfo_t contains 9 fields: + * [ int-int - int - int-int - ptr - int - long - {int,ptr} ] + * Full conversion masks for siginfo_t structure (every field initialized): + */ +#define MASK_SIGINFO_T 0x410400000 /* field type mask */ +#define MASK_SIGINFO_T_ALIGN 0x311330100 /* next field alignment mask */ +#define SIGINFO_T_FIELDS 9 /* field number in the structure */ +#define SIGVAL_OFFSET_LO 32 /* 'sigval' field offset (in bytes) */ +#define SIGVAL_OFFSET_HI 40 /* ditto */ +#define SIGVAL_OFFSET 24 /* ditto in the 64-bit structure */ + void __user *converted_siginfo; + long descr_lo, descr_hi, ptr; + int rval, tag, tag_hi; + + DbgSCP(" siginfo=0x%lx\n", prot_siginfo); + + /* Allocating space on user stack: */ + converted_siginfo = get_user_space(sizeof(siginfo_t)); + + /* Convert 'prot_siginfo': */ + rval = get_pm_struct((long *) prot_siginfo, + converted_siginfo, + sizeof(siginfo_t), SIGINFO_T_FIELDS, 1, + mask_type, MASK_SIGINFO_T_ALIGN, 0, + CONV_ARR_WRONG_DSCR_FLD); + + if (rval) { + converted_siginfo = NULL; + goto out; + } + + /* Check for descriptor in the '_sigval' field: */ +TRY_USR_PFAULT { + NATIVE_LOAD_VAL_AND_TAGD((prot_siginfo + SIGVAL_OFFSET_LO), + descr_lo, tag); + if (!tag) /* 'int' in the union */ + goto out; + NATIVE_LOAD_VAL_AND_TAGD((prot_siginfo + SIGVAL_OFFSET_HI), + descr_hi, tag_hi); +} CATCH_USR_PFAULT { + DbgSCP_ALERT(FATAL_ERR_READ, (long) (prot_siginfo + 4)); + converted_siginfo = NULL; + rval = -EFAULT; + goto out; +} END_USR_PFAULT + + tag |= (tag_hi << 4); + if (tag != ETAGAPQ) { + DbgSCP_ALERT("Bad struct '_sigval' in siginfo_t: tag = 0x%x\n", + tag); + goto out; + } + /* Storing descriptor attributes in 'sival_ptr_list' for + * kernel to update 'usiginfo' in copy_siginfo_to_user_prot(): + */ + ptr = *(long *)(converted_siginfo + SIGVAL_OFFSET); + store_descriptor_attrs((void *)ptr, + descr_lo, descr_hi, tag, 0 /*sig#*/); + DbgSCP("stored sigval attrs: [0x%lx] ==> 0x%lx 0x%lx\n", ptr, + descr_lo, descr_hi); + +out: + *siginfo_64 = converted_siginfo; + + return rval; +} + +/* Post-processor aimed to return syscall termination status + * from temporal structure used to run syscall back + * to original protected structure. + * Returns error code from put_user() or 0 if OK. + */ +static +int update_protected_siginfo_t(unsigned long __user siginfo64, + unsigned long __user siginfo128) +{ + unsigned long __user *infop64 = (unsigned long __user *) siginfo64; + unsigned long __user *infop128 = (unsigned long __user *) siginfo128; + unsigned long lval; + int rval = 0; + /* + Structure siginfo_t consists of 5 'int's + ptr + int + long + ptr: + + -= 128 bit format: =- -= 64 bit format: =- + 63 32 0 63 32 0 + +===============|===============+ +===============|===============+ + | si_errno | si_signo | 0 | si_errno | si_signo | + +===============|===============+ +===============|===============+ + | XXXXXXXXXXXXX | si_code | 1 | XXXXXXXXXXXXX | si_code | + +===============|===============+ +===============|===============+ + | _uid | _pid | 2 | _uid | _pid | + +===============|===============+ +===============|===============+ + | XXXXXXXXXXXXXXXXXXXXXXXXXXXXX | 3 | sigval_t: {int, sival_ptr} | + +===============|===============+ +===============|===============+ + | sigval_t: {int/sival_ptr(lo)} | 4 | XXXXXXXXXXXXX | si_status | + +---------------|---------------+ +===============|===============+ + | sival_ptr(hi) | 5 | [long] | + +===============|===============+ +===============|===============+ + | XXXXXXXXXXXXX | si_status | 6 | [ptr] | + +===============|===============+ +===============|===============+ + | [long] | 7 + +===============|===============+ + | [ptr (lo)] | 8 + +---------------|---------------+ + | [ptr (hi)] | 9 + +===============|===============+ + */ + + if (!infop64 || !infop128) { + DbgSCP("Empty input: siginfo64=0x%lx siginfo128=0x%lx\n", + siginfo64, siginfo128); + return rval; + } + + if (infop64 != infop128) { /* these are different descriptors */ + lval = *infop64; + rval = put_user(lval, infop128); + + lval = *(infop64 + 1); + rval = (rval) ?: put_user(lval, infop128 + 1); + + lval = *(infop64 + 2); + rval = (rval) ?: put_user(lval, infop128 + 2); + + if (rval) + goto out; + } + /* NB> We cannot use direct order below as it wouldn't work + * in the case when siginfo64 and siginfo128 are the same pointer. + */ + lval = *(infop64 + 5); + rval = (rval) ?: put_user(lval, infop128 + 7); + + lval = *(infop64 + 4); + rval = (rval) ?: put_user(lval, infop128 + 6); + + lval = *(infop64 + 3); + rval = (rval) ?: put_user(lval, infop128 + 4); + + rval = (rval) ?: put_user(0L, infop128 + 5); /* to avoid ETAG */ + +out: + if (rval) + DbgSCP_ALERT("FATAL ERROR: failed to write at 0x%lx !!!\n", + (long) infop128); + return rval; +} + +/* rt_sigqueueinfo/rt_tgsigqueueinfo conversion masks siginfo_t structure: */ +#define MASK_SIGINFO_T_RT_PID_UID 0xc9c400088 /* field type mask */ +#define MASK_SIGINFO_T_RT 0xc9c488088 /* field type mask */ + +static inline +unsigned long get_siginfo_mask_on_layout(int signo, int code) +/* This function implements check similar to the one in has_si_pid_and_uid() */ +{ + unsigned long mask; + + switch (siginfo_layout(signo, code)) { + case SIL_KILL: + case SIL_CHLD: + case SIL_RT: + mask = MASK_SIGINFO_T_RT_PID_UID; + break; + default: + mask = MASK_SIGINFO_T_RT; + break; + } + + return mask; +} + +static inline +unsigned long get_siginfo_mask_on_siginfo(const unsigned long __user usiginfo) +{ + int signo, code; + long mask; + + if (get_user(signo, (int *) usiginfo) + || get_user(code, (int *) (usiginfo + 8))) { + DbgSCP_ALERT(FATAL_ERR_READ, (long) usiginfo); + return 0L; + } + + mask = get_siginfo_mask_on_layout(signo, code); + DbgSCP("signo=%d, code=%d ==> mask = 0x%lx\n", signo, code, mask); + + return mask; +} + +notrace __section(".entry.text") +long protected_sys_rt_sigqueueinfo(const long tgid, /* a1 */ + const long sig, /* a2 */ + const unsigned long __user usiginfo, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + void *converted_siginfo = NULL; + long rval; + + DbgSCP("tgid=%ld, sig=%ld, usiginfo=0x%lx\n", + tgid, sig, (long)usiginfo); + + if (usiginfo) { + unsigned int size; + unsigned long mask; + + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < sizeof(siginfo_t)) { + DbgSCP_ALERT(" 'uinfo' pointer size is too little: %d < %zd\n", + size, sizeof(siginfo_t)); + return -EINVAL; + } + mask = get_siginfo_mask_on_siginfo(usiginfo); + if (!mask) + return -EINVAL; + rval = convert_protected_siginfo_t(usiginfo, &converted_siginfo, + mask); + if (rval) + return rval; + } + + rval = sys_rt_sigqueueinfo(tgid, sig, converted_siginfo); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_rt_tgsigqueueinfo(const long tgid, /* a1 */ + const long tid, /* a2 */ + const long sig, /* a3 */ + const unsigned long __user usiginfo, /*a4*/ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + void *converted_siginfo = NULL; + long rval; + + DbgSCP("tgid=%ld, tid=%ld, sig=%ld, usiginfo=0x%lx\n", + tgid, tid, sig, (long)usiginfo); + + if (usiginfo) { + unsigned int size; + unsigned long mask; + + size = e2k_ptr_size(regs->args[7], regs->args[8], 0); + if (size < sizeof(siginfo_t)) { + DbgSCP_ALERT("'usiginfo' pointer size is too little: %d < %zd\n", + size, sizeof(siginfo_t)); + return -EINVAL; + } + mask = get_siginfo_mask_on_siginfo(usiginfo); + if (!mask) + return -EINVAL; + rval = convert_protected_siginfo_t(usiginfo, &converted_siginfo, + mask); + if (rval) + return rval; + } + + rval = sys_rt_tgsigqueueinfo(tgid, tid, sig, converted_siginfo); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_pidfd_send_signal(const long pidfd, /* a1 */ + const long sig, /* a2 */ + const unsigned long __user info, /* a3 */ + unsigned long flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + void *siginfo64 = NULL; + long rval; + + DbgSCP("pidfd=%ld, sig=%ld, info=0x%lx, flags=0x%lx\n", + pidfd, sig, (long)info, flags); + + if (!pidfd) { + DbgSCP_ALERT("Bad syscall args: pidfd=%ld, sig=%ld, info=0x%lx\n", + pidfd, sig, (long)info); + return -EINVAL; + } + + if (info) { + unsigned int size; + unsigned long mask; + + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < sizeof(siginfo_t)) { + DbgSCP_ALERT("'info' pointer size is too little: %d < %zd\n", + size, sizeof(siginfo_t)); + return -EINVAL; + } + mask = get_siginfo_mask_on_siginfo(info); + if (!mask) + return -EINVAL; + rval = convert_protected_siginfo_t(info, &siginfo64, + mask); + if (rval) + return rval; + } + + rval = sys_pidfd_send_signal((int) pidfd, (int) sig, + (siginfo_t __user *) siginfo64, + (unsigned int) flags); + if (!rval && info) + update_protected_siginfo_t((unsigned long)siginfo64, info); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_waitid(const long which, /* a1 */ + const long pid, /* a2 */ + const unsigned long __user *infop, /* a3 */ + const long options, /* a4 */ + const unsigned long __user *ru, /* a5 */ + const unsigned long unused6, + const struct pt_regs *regs) +{ + long __user siginfop = (long) infop; + long rval; + + DbgSCP("which=%ld, pid=%ld, infop=0x%lx, options=0x%x, ru=0x%lx\n", + which, pid, infop, (int) options, ru); + + if (infop) { + unsigned int size; + + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < sizeof(siginfo_t)) { + DbgSCP_ALERT(" 'infop' pointer size is too little: %d < %zd\n", + size, sizeof(siginfo_t)); + return -EINVAL; + } + /* NB> There is no need in converting input 'infop' to 64 format + * as the syscall uses only top half of the structure data. + */ + } + + rval = sys_waitid((int) which, (pid_t) pid, + (struct siginfo __user *) siginfop, + (int) options, (struct rusage __user *) ru); + update_protected_siginfo_t(siginfop, siginfop); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_io_uring_register(const unsigned long fd, /* a1 */ + const unsigned long opcode, /* a2 */ + const unsigned long __user arg, /* a3 */ + const unsigned long nr_args, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + void __user *arg64 = (void __user *) arg; + long rval; + + DbgSCP("fd=%ld, opcode=%ld, arg=0x%lx, nr_args=0x%lx\n", + fd, opcode, arg, nr_args); + + if (arg && (opcode == IORING_REGISTER_BUFFERS)) { + unsigned int size; + + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + if (size < (DESCRIPTOR_SIZE * nr_args)) { + DbgSCP_ALERT(" 'arg' pointer size is too little: %d < %zd\n", + size, (DESCRIPTOR_SIZE * nr_args)); + return -EINVAL; + } + arg64 = convert_prot_iovec_struct(arg, nr_args, regs->args[5], + regs->args[6], "io_uring_register"); + rval = get_pm_struct_simple((long __user *) arg, arg64, size, + 1, nr_args, 0x3, 0x3); + if (rval) + return rval; + } + + rval = sys_io_uring_register((unsigned int) fd, (unsigned int) opcode, + arg64, (unsigned int) nr_args); + + return rval; +} + +notrace __section(".entry.text") +long protected_sys_kexec_load(const unsigned long entry, /* a1 */ + const unsigned long nr_segments, /* a2 */ + const unsigned long __user segments, /* a3 */ + const unsigned long flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ + void __user *segments64; + long rval; + unsigned int size, size128; +#define KEXEC_SEGMENT_STRUCT_SIZE128 64 /* protected segment structure size */ +#define KEXEC_SEGMENT_T 0x3131 +#define KEXEC_SEGMENT_A 0x3331 + DbgSCP("entry=%ld, nr_segments=%ld, segments=0x%lx, flags=0x%lx\n", + entry, nr_segments, segments, flags); + + if (!segments || !nr_segments) { + DbgSCP_ALERT("Empty segments/nr_segments: 0x%lx / %ld\n", + segments, nr_segments); + return -EADDRNOTAVAIL; + } + + size = e2k_ptr_size(regs->args[5], regs->args[6], 0); + size128 = KEXEC_SEGMENT_STRUCT_SIZE128 * nr_segments; + if (size < size128) { + DbgSCP_ALERT(" 'segments' pointer size is too little: %d < %d\n", + size, size128); + return -EADDRNOTAVAIL; + } + + segments64 = get_user_space(size128); + rval = get_pm_struct_simple((long *) segments, segments64, size, + 4, nr_segments, + KEXEC_SEGMENT_T, KEXEC_SEGMENT_A); + if (rval) + return rval; + + rval = sys_kexec_load(entry, nr_segments, segments64, flags); + + return rval; +} + +#endif /* CONFIG_PROTECTED_MODE */ diff --git a/arch/e2k/kernel/protected_timer_create.c b/arch/e2k/kernel/protected_timer_create.c new file mode 100644 index 000000000000..7a686983ad46 --- /dev/null +++ b/arch/e2k/kernel/protected_timer_create.c @@ -0,0 +1,301 @@ +/* linux/arch/e2k/kernel/protected_timer_create.c, v 1.0 02/20/2019. + * + * This is implementation of the system calls timer_create and rt_sigtimedwait: + * int timer_create(clockid_t clockid, struct sigevent *sevp, + * timer_t *timerid); + * int rt_sigtimedwait(const sigset_t *set, siginfo_t *info, + * const struct timespec *timeout, + * size_t sigsetsize); + * for E2K protected mode. + * + * Copyright (C) 2019 MCST + */ + + +#include +#include +#include + +#include +#include +#include + + +#ifdef CONFIG_PROTECTED_MODE + + +#if (DYNAMIC_DEBUG_SYSCALLP_ENABLED) + /* NB> PM debug module must have been initialized by the moment + * of invocation of any of the functions that follow; + * we can use simple defines over here. + * For full ones see . + */ +#undef DbgSCP +#define DbgSCP(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_COMPLEX_WRAPPERS) \ + pr_info("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#undef DbgSCP_ERR +#define DbgSCP_ERR(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK) \ + pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#undef DbgSCP_ALERT +#define DbgSCP_ALERT(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK) \ + pr_alert("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#undef DbgSCP_WARN +#define DbgSCP_WARN(fmt, ...) \ +do { \ + if (current->mm->context.pm_sc_debug_mode & PM_SC_DBG_MODE_CHECK) \ + pr_warn("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#undef PM_SYSCALL_WARN_ONLY +#define PM_SYSCALL_WARN_ONLY \ + (current->mm->context/pm_sc_debug_mode & PM_SC_DBG_MODE_WARN_ONLY) + +#endif /* DYNAMIC_DEBUG_SYSCALLP_ENABLED */ + + +#define get_user_space(x) arch_compat_alloc_user_space(x) + +#define USER_PTR_OFFSET_LO 0 +#define USER_PTR_OFFSET_HI 8 + +static inline +unsigned long e2k_descriptor_size(long user_ptr_hi, unsigned int min_size) +{ + e2k_ptr_hi_t hi; + unsigned int ptr_size; + + AW(hi) = user_ptr_hi; + ptr_size = AS(hi).size - AS(hi).curptr; + + if (ptr_size < min_size) { +#define E2K_PTR_ERR_MSG \ + "Pointer is too small in protected timer_create(): %d < %d\n" + DbgSCP_ALERT(E2K_PTR_ERR_MSG, ptr_size, min_size); + return 0; + } else { + return ptr_size; + } +} + +/* + * On success, timer_create() returns 0, and the ID of the new timer is + * placed in *timerid. On failure, -1 is returned, and errno is set to + * indicate the error. + */ +long protected_sys_timer_create(const long arg1 /*clockid*/, + const unsigned long __user arg2 /*sevp*/, + const unsigned long __user arg3 /*timerid*/, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs) +{ +#define MASK_SIGEVENT_TYPE_I 0x0 +#define MASK_SIGEVENT_TYPE_P 0x3 +#define MASK_SIGEVENT_TYPE_F 0x2 +#define MASK_SIGEVENT_TYPE_xIIFP 0x380 +#define MASK_SIGEVENT_ALIGN_THR 0xf3 +#define MASK_SIGEVENT_ALIGN_TID 0x33 +#define MASK_SIGEVENT_RW_NONE 0x288 +#define MASK_SIGEVENT_RW_SIGNAL 0x280 +#define MASK_SIGEVENT_RW_THREAD 0x000 +#define MASK_SIGEVENT_RW_THREAD_ID 0x800 +#define SIZE_SIGEVENT 64 +#define PROT_OFFSET_SIGEV_NOTIFY 16 /* NB> 16 is correct number to download + * the field with NATIVE_LOAD_VAL_AND_TAGD. + */ + unsigned long user_sev = (unsigned long)arg2; + timer_t *timerid = (timer_t *)arg3; + sigevent_t *kernel_sev = NULL; + unsigned long size; + long mask_sigevent_type, mask_align, mask_rw_type; + int field_num; + unsigned char sival_ptr_tags, tag, tag3; + long user_ptr_lo, user_ptr_hi, user_notify; + int rval; + + DbgSCP("clockid=%ld, sevp=0x%lx, timerid=0x%lx\n", arg1, arg2, arg3); + if (!user_sev) + goto run_syscall; + + /* Detecting the type of the first field of the sigevent structure: */ + TRY_USR_PFAULT { + NATIVE_LOAD_VAL_AND_TAGD(user_sev + USER_PTR_OFFSET_LO, + user_ptr_lo, sival_ptr_tags); + NATIVE_LOAD_VAL_AND_TAGD(user_sev + USER_PTR_OFFSET_HI, + user_ptr_hi, tag); + NATIVE_LOAD_VAL_AND_TAGD(user_sev + PROT_OFFSET_SIGEV_NOTIFY, + user_notify, tag3); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + sival_ptr_tags |= tag << 4; + user_notify >>= 32; /* NB> the value stored in the upper half of long */ + + /* + * Acqiure type mask in accordance with the data type + * in the union sigval: int/ptr + */ + switch (sival_ptr_tags) { + case ETAGNUM: + mask_sigevent_type = MASK_SIGEVENT_TYPE_I; + break; + case ETAGAPQ: + mask_sigevent_type = MASK_SIGEVENT_TYPE_P; + break; + case ETAGPLD: + mask_sigevent_type = MASK_SIGEVENT_TYPE_F; + break; + case ETAGPLQ: /* this is for future Elbrus arch V6 */ + DbgSCP_ERR("unsupported tag ETAGPLQ (0x%x)\n", sival_ptr_tags); + DbgSCP("\tptr_lo=0x%lx ptr_hi=0x%lx\n", + user_ptr_lo, user_ptr_hi); + return -EINVAL; + default: + mask_sigevent_type = MASK_SIGEVENT_TYPE_I; + } + + /* Calculating mask_rw_type on sevp.sigev_notify value: */ + switch (user_notify) { + case SIGEV_NONE: + mask_align = MASK_SIGEVENT_ALIGN_THR; + mask_rw_type = MASK_SIGEVENT_RW_NONE; + field_num = 3; + break; + case SIGEV_SIGNAL: + mask_align = MASK_SIGEVENT_ALIGN_THR; + mask_rw_type = MASK_SIGEVENT_RW_SIGNAL; + field_num = 3; + break; + case SIGEV_THREAD: + mask_sigevent_type |= MASK_SIGEVENT_TYPE_xIIFP; + mask_align = MASK_SIGEVENT_ALIGN_THR; + mask_rw_type = MASK_SIGEVENT_RW_THREAD; + field_num = 5; + break; + case SIGEV_THREAD_ID: + mask_align = MASK_SIGEVENT_ALIGN_TID; + mask_rw_type = MASK_SIGEVENT_RW_THREAD_ID; + field_num = 5; /* +1 extra field to have 8-order struct size */ + break; + default: + DbgSCP_ERR("unsupported sigev_notify value (0x%lx)\n", + user_notify); + return -EINVAL; + } + + /* Converting structure sigevent sev: */ + kernel_sev = get_user_space(sizeof(*kernel_sev)); + size = e2k_descriptor_size(regs->args[4], SIZE_SIGEVENT /*min_size*/); + if (!size) + return -EINVAL; + rval = convert_array_3((long *) user_sev, (long *)kernel_sev, size, + field_num, 1, + mask_sigevent_type, mask_align, mask_rw_type, 0); + + if (rval != 0) { + DbgSCP_ERR("Bad structure sigevent\n"); + return -EINVAL; + } +run_syscall: + rval = sys_timer_create((clockid_t)arg1, kernel_sev, timerid); + if (rval) + return rval; + + /* Save it in sival_ptr_list: */ + if (kernel_sev) { + store_descriptor_attrs(kernel_sev->sigev_value.sival_ptr, + user_ptr_lo, user_ptr_hi, sival_ptr_tags, 0 /*signum*/); + + DbgSCP("\tkernel_ptr = 0x%lx\n", + (long)kernel_sev->sigev_value.sival_ptr); + DbgSCP("\tuser_ptr_lo = 0x%lx\n", user_ptr_lo); + DbgSCP("\tuser_ptr_hi = 0x%lx\n", user_ptr_hi); + DbgSCP("\tuser_tags = 0x%x\n", sival_ptr_tags); + } + return 0; +} + +/* + * On success, rt_sigtimedwait() returns a signal number (positive value). + * On failure it returns -1, with errno set to indicate the error. + */ +long protected_sys_rt_sigtimedwait(const unsigned long __user arg1 /*set*/, + const unsigned long __user arg2 /*info*/, + const unsigned long __user arg3 /*timeout*/, + const unsigned long arg4 /*sigsetsize*/) +{ + struct sival_ptr_list *curr_el = NULL; + void __user *dscr_ptr; + siginfo_t __user *info; + int rval; + + DbgSCP("set= 0x%lx, info=0x%lx, timeout=0x%lx, sigsetsize=%ld\n", + arg1, arg2, arg3, arg4); + + rval = sys_rt_sigtimedwait((sigset_t *) arg1, (siginfo_t *) arg2, + (struct __kernel_timespec *) arg3, (size_t)arg4); + if (rval <= 0) { + DbgSCP("rt_sigtimedwait failed. rval = %d\n", rval); + return rval; + } + if (!arg2) + return rval; + info = (siginfo_t *)arg2; + + DbgSCP("si_code = 0x%x\n", info->si_code); + if ((info->si_code > 0) && + siginfo_layout(info->si_signo, info->si_code) != SIL_RT) { + return rval; + } + dscr_ptr = info->si_ptr; + if (dscr_ptr == NULL) { + /* + * The 'si_ptr pointer' in the 'siginfo' structure + * appeared empty. So there is nothing to convert + * to descriptor for proper handling in the user space. + */ + return rval; + } + /* + * We need to pass si_ptr descriptor to user. + * We look for the descriptor in ti->sival_ptr_list: + */ + curr_el = get_descriptor_attrs(dscr_ptr, 0 /*signum*/); + +#define ERRMSG_rt_sigtimedwait_ESPIPE \ +"prot_sys_rt_sigtimedwait failed to find descriptor %px in ti->sival_ptr_list\n" + if (curr_el == NULL) { + DbgSCP_ALERT(ERRMSG_rt_sigtimedwait_ESPIPE, dscr_ptr); + DbgSCP_ALERT("\treturning (-ESPIPE)\n"); + return -ESPIPE; + } + + DbgSCP("curr_el:\nnext = %px\n", curr_el->link.next); + DbgSCP("kernel_ptr = %px\n", curr_el->kernel_ptr); + DbgSCP("user_ptr_lo = 0x%llx\n", curr_el->user_ptr_lo); + DbgSCP("user_ptr_hi = 0x%llx\n", curr_el->user_ptr_hi); + DbgSCP("user_tags = 0x%x\n", curr_el->user_tags); + NATIVE_STORE_TAGGED_QWORD( + (e2k_ptr_t *) (&(((siginfo_t *)arg2)->si_ptr)+0x1), + curr_el->user_ptr_lo, curr_el->user_ptr_hi, + curr_el->user_tags & 0xf, curr_el->user_tags >> 4); + + DbgSCP("info->si_code = 0x%x\n", info->si_code); + return rval; +} + +#endif /* CONFIG_PROTECTED_MODE */ diff --git a/arch/e2k/kernel/ptrace.c b/arch/e2k/kernel/ptrace.c new file mode 100644 index 000000000000..015afdde0de5 --- /dev/null +++ b/arch/e2k/kernel/ptrace.c @@ -0,0 +1,1959 @@ + +/* + * linux/arch/e2k/kernel/ptrace.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define CREATE_TRACE_POINTS +#include + +/* #define DEBUG_PTRACE 0 */ +#define NEED_CUI_COMPUTING + +#undef DEBUG_PT_MODE +#undef DebugPT +#define DEBUG_PT_MODE 0 /* Compilation unit debugging */ +#define DebugPT(...) DebugPrint(DEBUG_PT_MODE, ##__VA_ARGS__) + +#undef DEBUG_CUI_MODE +#undef DebugCUI +#define DEBUG_CUI_MODE 0 /* Compilation unit debugging */ +#define DebugCUI(...) DebugPrint(DEBUG_CUI_MODE, ##__VA_ARGS__) + +#undef DEBUG_TRACE +#undef DebugTRACE +#define DEBUG_TRACE 0 +#define DebugTRACE(...) DebugPrint(DEBUG_TRACE, ##__VA_ARGS__) + + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + int reg_num, offset; + + if (name[0] == '\0' || (name[0] != 'r' && name[0] != 'b' && + strncmp(name, "pred", 4) && + strncmp(name, "ret_ip", 6))) + return INT_MIN; + + if (!strncmp(name, "ret_ip", 6)) { + offset = REGS_TIR1_REGISTER_FLAG; + } else if (name[0] == 'r') { + /* '%r' register */ + if (kstrtoint(name + 1, 10, ®_num)) + return INT_MIN; + + if (reg_num < 0 || reg_num >= E2K_MAXSR_d) + return INT_MIN; + + offset = (reg_num & ~1) * 16; + if (reg_num & 1) { + if (machine.native_iset_ver < E2K_ISET_V5) + offset += 8; + else + offset += 16; + } + } else if (name[0] == 'b') { + /* '%b' register */ + if (kstrtoint(name + 1, 10, ®_num)) + return INT_MIN; + + if (reg_num < 0 || reg_num >= 128) + return INT_MIN; + + offset = reg_num | REGS_B_REGISTER_FLAG; + } else { + /* '%pred' register */ + if (kstrtoint(name + 4, 10, ®_num)) + return INT_MIN; + + if (reg_num < 0 || reg_num >= 32) + return INT_MIN; + + offset = reg_num | REGS_PRED_REGISTER_FLAG; + } + + return offset; +} + +static char *r_reg_name[E2K_MAXSR_d] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", + "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", + "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", + "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", + "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", + "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71", + "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79", + "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", + "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95", + "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103", + "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111", + "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119", + "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127", + "r128", "r129", "r130", "r131", "r132", "r133", "r134", "r135", + "r136", "r137", "r138", "r139", "r140", "r141", "r142", "r143", + "r144", "r145", "r146", "r147", "r148", "r149", "r150", "r151", + "r152", "r153", "r154", "r155", "r156", "r157", "r158", "r159", + "r160", "r161", "r162", "r163", "r164", "r165", "r166", "r167", + "r168", "r169", "r170", "r171", "r172", "r173", "r174", "r175", + "r176", "r177", "r178", "r179", "r180", "r181", "r182", "r183", + "r184", "r185", "r186", "r187", "r188", "r189", "r190", "r191", + "r192", "r193", "r194", "r195", "r196", "r197", "r198", "r199", + "r200", "r201", "r202", "r203", "r204", "r205", "r206", "r207", + "r208", "r209", "r210", "r211", "r212", "r213", "r214", "r215", + "r216", "r217", "r218", "r219", "r220", "r221", "r222", "r223" +}; + +static char *b_reg_name[128] = { + "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", + "b8", "b9", "b10", "b11", "b12", "b13", "b14", "b15", + "b16", "b17", "b18", "b19", "b20", "b21", "b22", "b23", + "b24", "b25", "b26", "b27", "b28", "b29", "b30", "b31", + "b32", "b33", "b34", "b35", "b36", "b37", "b38", "b39", + "b40", "b41", "b42", "b43", "b44", "b45", "b46", "b47", + "b48", "b49", "b50", "b51", "b52", "b53", "b54", "b55", + "b56", "b57", "b58", "b59", "b60", "b61", "b62", "b63", + "b64", "b65", "b66", "b67", "b68", "b69", "b70", "b71", + "b72", "b73", "b74", "b75", "b76", "b77", "b78", "b79", + "b80", "b81", "b82", "b83", "b84", "b85", "b86", "b87", + "b88", "b89", "b90", "b91", "b92", "b93", "b94", "b95", + "b96", "b97", "b98", "b99", "b100", "b101", "b102", "b103", + "b104", "b105", "b106", "b107", "b108", "b109", "b110", "b111", + "b112", "b113", "b114", "b115", "b116", "b117", "b118", "b119", + "b120", "b121", "b122", "b123", "b124", "b125", "b126", "b127" +}; + +static char *pred_reg_name[32] = { + "pred0", "pred1", "pred2", "pred3", "pred4", "pred5", "pred6", "pred7", + "pred8", "pred9", "pred10", "pred11", "pred12", "pred13", "pred14", "pred15", + "pred16", "pred17", "pred18", "pred19", "pred20", "pred21", "pred22", "pred23", + "pred24", "pred25", "pred26", "pred27", "pred28", "pred29", "pred30", "pred31" +}; + + +/** + * regs_query_register_name() - query register name from its offset + * @offset: the offset of a register in struct pt_regs. + * + * regs_query_register_name() returns the name of a register from its + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; + */ +const char *regs_query_register_name(unsigned int offset) +{ + unsigned int reg_num; + + if (offset & REGS_TIR1_REGISTER_FLAG) + return "ret_ip"; + + if (offset & REGS_PRED_REGISTER_FLAG) + return pred_reg_name[offset & ~REGS_PRED_REGISTER_FLAG]; + + if (offset & REGS_B_REGISTER_FLAG) + return b_reg_name[offset & ~REGS_B_REGISTER_FLAG]; + + reg_num = 2 * (offset / 32); + if (offset % 32) + ++reg_num; + + if (reg_num >= E2K_MAXSR_d) + return NULL; + + return r_reg_name[reg_num]; +} + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten. + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register. The @offset is the + * offset of the register in struct pt_regs address which specified by @regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +unsigned long regs_get_register(const struct pt_regs *regs, unsigned int offset) +{ + e2k_psp_lo_t psp_lo = regs->stacks.psp_lo; + e2k_psp_hi_t cur_psp_hi, psp_hi = regs->stacks.psp_hi; + e2k_cr0_lo_t cr0_lo = regs->crs.cr0_lo; + e2k_cr1_lo_t cr1_lo = regs->crs.cr1_lo; + e2k_cr1_hi_t cr1_hi = regs->crs.cr1_hi; + unsigned long base, spilled, size; + u64 value; + u8 tag; + + if (unlikely((signed int) offset < 0)) + return 0xdead; + + if (offset & REGS_TIR1_REGISTER_FLAG) { + struct trap_pt_regs *trap = regs->trap; + + if (!trap || trap->nr_TIRs <= 0) + return 0xdead; + + return trap->TIRs[1].TIR_lo.TIR_lo_ip; + } + + if (offset & REGS_PRED_REGISTER_FLAG) { + u64 pf, pval, ptag; + int pred, psz, pcur; + + pred = offset & ~REGS_PRED_REGISTER_FLAG; + + psz = AS(cr1_hi).psz; + pcur = AS(cr1_hi).pcur; + + if (pcur && pred <= psz) { + pred = pred + pcur; + if (pred > psz) + pred -= psz + 1; + } + + pf = AS(cr0_lo).pf; + + pval = (pf & (1ULL << 2 * pred)) >> 2 * pred; + ptag = (pf & (1ULL << (2 * pred + 1))) >> (2 * pred + 1); + + return (ptag << 1) | pval; + } + + cur_psp_hi = READ_PSP_HI_REG(); + + if (offset & REGS_B_REGISTER_FLAG) { + int qr, r, br, rbs, rsz, rcur; + + rbs = AS(cr1_hi).rbs; + rsz = AS(cr1_hi).rsz; + rcur = AS(cr1_hi).rcur; + + br = offset & ~REGS_B_REGISTER_FLAG; + + qr = br / 2 + rcur; + if (qr > rsz) + qr -= rsz + 1; + qr += rbs; + + r = 2 * qr; + if (br & 1) + ++r; + + offset = 16 * (r & ~1); + if (r & 1) { + if (machine.native_iset_ver < E2K_ISET_V5) + offset += 8; + else + offset += 16; + } + } + + size = AS(cr1_lo).wbs * EXT_4_NR_SZ; + base = AS(psp_lo).base + AS(psp_hi).ind - size; + + spilled = AS(psp_lo).base + AS(cur_psp_hi).ind; + + if (unlikely(offset + 8 > size)) + return 0xdead; + + if (base + offset >= spilled) + E2K_FLUSHR; + + load_value_and_tagd((void *) base + offset, &value, &tag); + + return value; +} + +/* User's "struct user_regs_struct" may be smaller than kernel one */ +static inline int get_user_regs_struct_size( + struct user_regs_struct __user *uregs, long *size) +{ + unsigned long val; + int ret; + + ret = get_user(val, &uregs->sizeof_struct); + if (!ret) { + *size = val; + if (val < offsetof(struct user_regs_struct, idr)) + ret = -EPERM; + } + + if (!ret && (cpu_has(CPU_FEAT_QPREG) && + *size < offsetofend(struct user_regs_struct, gext_tag_v5) || + cpu_has(CPU_FEAT_ISET_V6) && + *size < offsetofend(struct user_regs_struct, ctpr3_hi))) + pr_info_ratelimited("%s [%d] sys_ptrace: size of user_regs_struct is too small to keep all registers. Are you using an old version of profiler or gdb?\n", + current->comm, current->pid); + + return ret; +} + +/* psl field value in usd_lo variable, which is stored in the kernel, +* differs from the real user value by 1 +* according to the instruction set - any call increases this field by 1 +* and any return reduces by 1 +*/ +static void change_psl_field(unsigned long *pnt, int value) +{ + e2k_rwsap_lo_struct_t lo; + lo.word = *pnt; + /* only for protected mode */ + if (!lo.E2K_RUSD_lo_p) + return; + lo.fields.psl += value; + *pnt = lo.word; +} +/* The value of user gd & cud registers are in memory + they would be executed in done and return commands + Current gd & cud registers are pointed to kernel address + + cut_entry = mem[CUTD.base + cuir.[15:0]*32]; + CUD.base = cut_entry.cud.base; + CUD.size = cut_entry.cud.size; + CUD.c = cut_entry.cud.c; + GD.base = cut_entry.gd.base; + GD.size = cut_entry.gd.size; +*/ +static void execute_user_gd_cud_regs(struct task_struct *child, + struct user_regs_struct *user_regs) +{ + e2k_cutd_t cutd; + e2k_cute_t cute; + e2k_cute_t *p_cute = &cute; + unsigned long pnt_cut_entry, ts_flag; + size_t copied; + + /* index checkup */ + if (!(user_regs->cuir >> (CR1_lo_cuir_size))) + return; + cutd.word = user_regs->cutd; + pnt_cut_entry = cutd.CUTD_base + 32 * (user_regs->cuir & CUIR_mask); + if (pnt_cut_entry + sizeof(cute) > PAGE_OFFSET) + return; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + copied = access_process_vm(child, pnt_cut_entry, &cute, + sizeof(cute), 0); + clear_ts_flag(ts_flag); + if (copied != sizeof(cute)) { + pr_info(" %s pnt_cut_entry=0x%lx bad, sizeof(cute)=%ld\n", + __func__, pnt_cut_entry, sizeof(e2k_cute_t)); + return; + } + user_regs->gd_lo = CUTE_GD_BASE(p_cute); + user_regs->gd_hi = CUTE_GD_SIZE(p_cute); + user_regs->cud_lo = CUTE_CUD_BASE(p_cute); + user_regs->cud_hi = CUTE_CUD_SIZE(p_cute); +} + +void core_pt_regs_to_user_regs (struct pt_regs *pt_regs, + struct user_regs_struct *user_regs) +{ + struct trap_pt_regs *trap; + long size = sizeof(struct user_regs_struct); + int i; + struct thread_info *ti = current_thread_info(); +#ifdef CONFIG_GREGS_CONTEXT + struct global_regs gregs; +#endif +#ifdef CONFIG_USE_AAU + e2k_aau_t aau_regs; +#endif /* CONFIG_USE_AAU */ + + DebugTRACE("core_pt_regs_to_user_regs current->pid=%d(%s)\n", + current->pid, current->comm); + + memset(user_regs, 0, size); + +#ifdef CONFIG_GREGS_CONTEXT + machine.save_gregs(&gregs); + copy_k_gregs_to_gregs(&gregs, &ti->k_gregs); + GET_GREGS_FROM_THREAD(user_regs->g, user_regs->gtag, gregs.g); + for (i = 0; i < 32; i++) + user_regs->gext[i] = (u16) gregs.g[i].ext; + if (machine.native_iset_ver >= E2K_ISET_V5) + GET_GREGS_FROM_THREAD(user_regs->gext_v5, + user_regs->gext_tag_v5, &gregs.g[0].ext); + user_regs->bgr = AW(gregs.bgr); +#endif /* CONFIG_GREGS_CONTEXT */ + + user_regs->upsr = AW(ti->upsr); + + /* user_regs->oscud_lo = ; */ + /* user_regs->oscud_hi = ; */ + /* user_regs->osgd_lo = ; */ + /* user_regs->osgd_hi = ; */ + /* user_regs->osem = ; */ + /* user_regs->osr0 = ; */ + + user_regs->pfpfr = READ_PFPFR_REG_VALUE(); + user_regs->fpcr = READ_FPCR_REG_VALUE(); + user_regs->fpsr = READ_FPSR_REG_VALUE(); + + user_regs->cs_lo = READ_CS_LO_REG_VALUE(); + user_regs->cs_hi = READ_CS_HI_REG_VALUE(); + user_regs->ds_lo = READ_DS_LO_REG_VALUE(); + user_regs->ds_hi = READ_DS_HI_REG_VALUE(); + user_regs->es_lo = READ_ES_LO_REG_VALUE(); + user_regs->es_hi = READ_ES_HI_REG_VALUE(); + user_regs->fs_lo = READ_FS_LO_REG_VALUE(); + user_regs->fs_hi = READ_FS_HI_REG_VALUE(); + user_regs->gs_lo = READ_GS_LO_REG_VALUE(); + user_regs->gs_hi = READ_GS_HI_REG_VALUE(); + user_regs->ss_lo = READ_SS_LO_REG_VALUE(); + user_regs->ss_hi = READ_SS_HI_REG_VALUE(); + +#ifdef CONFIG_USE_AAU + memset(&aau_regs, 0, sizeof(aau_regs)); + + aau_regs.aasr = read_aasr_reg(); + aau_regs.aafstr = read_aafstr_reg_value(); + read_aaldm_reg(&aau_regs.aaldm); + read_aaldv_reg(&aau_regs.aaldv); + machine.get_aau_context(&aau_regs); + SAVE_AADS(&aau_regs); + + machine.save_aaldi(user_regs->aaldi); + SAVE_AALDA(user_regs->aalda); + + for (i = 0; i < 32; i++) { + user_regs->aad[2*i] = AW(aau_regs.aads[i]).lo; + user_regs->aad[2*i+1] = AW(aau_regs.aads[i]).hi; + } + + if (machine.native_iset_ver < E2K_ISET_V5) { + for (i = 0; i < 16; i++) + user_regs->aaind[i] = (u32) aau_regs.aainds[i]; + + for (i = 0; i < 8; i++) + user_regs->aaincr[i] = (u32) aau_regs.aaincrs[i]; + + for (i = 0; i < 16; i++) + user_regs->aasti[i] = (u32) aau_regs.aastis[i]; + } else { + for (i = 0; i < 16; i++) + user_regs->aaind[i] = aau_regs.aainds[i]; + + for (i = 0; i < 8; i++) + user_regs->aaincr[i] = aau_regs.aaincrs[i]; + + for (i = 0; i < 16; i++) + user_regs->aasti[i] = aau_regs.aastis[i]; + } + + user_regs->aaldv = AW(aau_regs.aaldv); + user_regs->aaldm = AW(aau_regs.aaldm); + + user_regs->aasr = AW(aau_regs.aasr); + user_regs->aafstr = (unsigned long long) aau_regs.aafstr; +#endif /* CONFIG_USE_AAU */ + + user_regs->clkr = 0; + + user_regs->dibcr = READ_DIBCR_REG_VALUE(); + user_regs->ddbcr = READ_DDBCR_REG_VALUE(); + user_regs->dibsr = READ_DIBSR_REG_VALUE(); + user_regs->dibar[0] = READ_DIBAR0_REG_VALUE(); + user_regs->dibar[1] = READ_DIBAR1_REG_VALUE(); + user_regs->dibar[2] = READ_DIBAR2_REG_VALUE(); + user_regs->dibar[3] = READ_DIBAR3_REG_VALUE(); + user_regs->ddbar[0] = READ_DDBAR0_REG_VALUE(); + user_regs->ddbar[1] = READ_DDBAR1_REG_VALUE(); + user_regs->ddbar[2] = READ_DDBAR2_REG_VALUE(); + user_regs->ddbar[3] = READ_DDBAR3_REG_VALUE(); + user_regs->dimcr = READ_DIMCR_REG_VALUE(); + user_regs->ddmcr = READ_DDMCR_REG_VALUE(); + user_regs->dimar[0] = READ_DIMAR0_REG_VALUE(); + user_regs->dimar[1] = READ_DIMAR1_REG_VALUE(); + user_regs->ddmar[0] = READ_DDMAR0_REG_VALUE(); + user_regs->ddmar[1] = READ_DDMAR1_REG_VALUE(); + user_regs->ddbsr = READ_DDBSR_REG_VALUE(); + if (machine.save_dimtp) { + e2k_dimtp_t dimtp; + machine.save_dimtp(&dimtp); + user_regs->dimtp_lo = dimtp.lo; + user_regs->dimtp_hi = dimtp.hi; + } + + user_regs->cutd = READ_CUTD_REG_VALUE(); + user_regs->cuir = (machine.native_iset_ver < E2K_ISET_V6) ? + AS(pt_regs->crs.cr1_lo).cuir : + AS(pt_regs->crs.cr1_lo).cui; + + /* user_regs->rpr = ; */ + user_regs->rpr_lo = READ_RPR_LO_REG_VALUE(); + user_regs->rpr_hi = READ_RPR_HI_REG_VALUE(); + + /* DAM */ + memcpy(user_regs->dam, ti->dam, sizeof(ti->dam)); + + user_regs->chain_stack_base = (u64) GET_PCS_BASE(&ti->u_hw_stack); + user_regs->proc_stack_base = (u64) GET_PS_BASE(&ti->u_hw_stack); + + user_regs->idr = READ_IDR_REG_VALUE(); + + if (machine.native_iset_ver >= E2K_ISET_V3) + user_regs->core_mode = machine.rrd(E2K_REG_CORE_MODE); + + user_regs->sizeof_struct = sizeof(struct user_regs_struct); + + if (!pt_regs) + return; + + trap = pt_regs->trap; + + user_regs->usbr = pt_regs->stacks.top; + user_regs->usd_lo = AW(pt_regs->stacks.usd_lo); + user_regs->usd_hi = AW(pt_regs->stacks.usd_hi); + change_psl_field((unsigned long *)&user_regs->usd_lo, -1); + + user_regs->psp_lo = AW(pt_regs->stacks.psp_lo); + user_regs->psp_hi = AW(pt_regs->stacks.psp_hi); + user_regs->pshtp = AW(pt_regs->stacks.pshtp); + + user_regs->cr0_lo = AW(pt_regs->crs.cr0_lo); + user_regs->cr0_hi = AW(pt_regs->crs.cr0_hi); + user_regs->cr1_lo = AW(pt_regs->crs.cr1_lo); + user_regs->cr1_hi = AW(pt_regs->crs.cr1_hi); + + /* + * new ip - the crash ip + * Gdb shows last command from chain + */ + user_regs->pcsp_lo = AW(pt_regs->stacks.pcsp_lo); + user_regs->pcsp_hi = AW(pt_regs->stacks.pcsp_hi); + user_regs->pcshtp = pt_regs->stacks.pcshtp; + + user_regs->wd = AW(pt_regs->wd); + + user_regs->br = AS(pt_regs->crs.cr1_hi).br; + + user_regs->ctpr1 = AW(pt_regs->ctpr1); + user_regs->ctpr2 = AW(pt_regs->ctpr2); + user_regs->ctpr3 = AW(pt_regs->ctpr3); + if (machine.native_iset_ver >= E2K_ISET_V6) { + user_regs->ctpr1_hi = AW(pt_regs->ctpr1_hi); + user_regs->ctpr2_hi = AW(pt_regs->ctpr2_hi); + user_regs->ctpr3_hi = AW(pt_regs->ctpr3_hi); + } + + /* user_regs->eir = ; */ + + user_regs->lsr = pt_regs->lsr; + user_regs->ilcr = pt_regs->ilcr; + if (machine.native_iset_ver >= E2K_ISET_V5) { + user_regs->lsr1 = pt_regs->lsr1; + user_regs->ilcr1 = pt_regs->ilcr1; + } + + if (trap) { + u64 data; + u8 tag; + + /* MLT */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + /* FIXME: it need implement for guest */ + if (!paravirt_enabled() && trap->mlt_state.num) + memcpy(user_regs->mlt, trap->mlt_state.mlt, + sizeof(e2k_mlt_entry_t) * trap->mlt_state.num); +#endif + + /* TC */ + for (i = 0; i < min(MAX_TC_SIZE, HW_TC_SIZE); i++) { + user_regs->trap_cell_addr[i] = trap->tcellar[i].address; + user_regs->trap_cell_info[i] = + AW(trap->tcellar[i].condition); + load_value_and_tagd(&trap->tcellar[i].data, + &data, &tag); + user_regs->trap_cell_val[i] = data; + user_regs->trap_cell_tag[i] = tag; + } + + /* TIR */ + for (i = 0; i <= trap->nr_TIRs; i++) { + user_regs->tir_hi[i] = trap->TIRs[i].TIR_hi.TIR_hi_reg; + user_regs->tir_lo[i] = trap->TIRs[i].TIR_lo.TIR_lo_reg; + } + + /* SBBP */ + if (trap->sbbp) + memcpy(user_regs->sbbp, trap->sbbp, + sizeof(user_regs->sbbp)); + } + execute_user_gd_cud_regs(current, user_regs); +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +/* + * Handle hitting a HW-breakpoint. + */ +static void ptrace_hbp_triggered(struct perf_event *bp, + struct perf_sample_data *data, struct pt_regs *regs) +{ + struct thread_struct *thread = ¤t->thread; + struct arch_hw_breakpoint *hw = counter_arch_bp(bp); + kernel_siginfo_t info; + int i, is_data_bp; + + is_data_bp = hw_breakpoint_type(bp) & HW_BREAKPOINT_RW; + + for (i = 0; i < HBP_NUM; ++i) { + if (is_data_bp && bp == thread->debug.hbp_data[i]) { + AW(thread->sw_regs.ddbsr) &= ~E2K_DDBSR_MASK(i); + AW(thread->sw_regs.ddbsr) |= + READ_DDBSR_REG_VALUE() & E2K_DDBSR_MASK(i); + break; + } + + if (!is_data_bp && bp == thread->debug.hbp_instr[i]) { + AW(thread->sw_regs.dibsr) &= ~E2K_DIBSR_MASK(i); + AW(thread->sw_regs.dibsr) |= + READ_DIBSR_REG_VALUE() & E2K_DIBSR_MASK(i); + break; + } + } + + info.si_signo = SIGTRAP; + info.si_errno = i; + info.si_code = TRAP_HWBKPT; + info.si_addr = (void __user *) (hw->address); + + force_sig_info(&info); +} + +static int register_ptrace_breakpoint(struct task_struct *child, + unsigned long bp_addr, int bp_len, int bp_type, + int idx, int enabled) +{ + struct perf_event_attr attr; + struct perf_event *event; + int ret; + + if (bp_type & HW_BREAKPOINT_RW) + event = child->thread.debug.hbp_data[idx]; + else + event = child->thread.debug.hbp_instr[idx]; + + if (!event) { + if (!enabled) + return 0; + + ptrace_breakpoint_init(&attr); + attr.bp_addr = bp_addr; + attr.bp_len = bp_len; + attr.bp_type = bp_type; + + event = register_user_hw_breakpoint(&attr, + ptrace_hbp_triggered, NULL, child); + if (IS_ERR(event)) + return PTR_ERR(event); + + if (bp_type & HW_BREAKPOINT_RW) + child->thread.debug.hbp_data[idx] = event; + else + child->thread.debug.hbp_instr[idx] = event; + + ret = 0; + } else { + attr = event->attr; + attr.bp_addr = bp_addr; + attr.bp_len = bp_len; + attr.bp_type = bp_type; + attr.disabled = !enabled; + + ret = modify_user_hw_breakpoint(event, &attr); + } + + return ret; +} +#endif + +static inline int get_hbp_len(int lng) +{ + return 1 << (lng - 1); +} + +static inline int get_hbp_type(int rw) +{ + int bp_type = 0; + + if (rw & 1) + bp_type |= HW_BREAKPOINT_W; + if (rw & 2) + bp_type |= HW_BREAKPOINT_R; + + return bp_type; +} + +static int ptrace_write_hbp_registers(struct task_struct *child, + struct user_regs_struct *user_regs) +{ + struct thread_struct *thread = &child->thread; + e2k_dibcr_t dibcr; + e2k_ddbcr_t ddbcr; + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + int ret; + + AW(dibcr) = user_regs->dibcr; + AW(ddbcr) = user_regs->ddbcr; + AW(dibsr) = user_regs->dibsr; + AW(ddbsr) = user_regs->ddbsr; + + ret = 0; + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->dibar[0], HW_BREAKPOINT_LEN_8, + HW_BREAKPOINT_X, 0, dibcr.v0 && !dibsr.b0); + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->dibar[1], HW_BREAKPOINT_LEN_8, + HW_BREAKPOINT_X, 1, dibcr.v1 && !dibsr.b1); + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->dibar[2], HW_BREAKPOINT_LEN_8, + HW_BREAKPOINT_X, 2, dibcr.v2 && !dibsr.b2); + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->dibar[3], HW_BREAKPOINT_LEN_8, + HW_BREAKPOINT_X, 3, dibcr.v3 && !dibsr.b3); + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->ddbar[0], get_hbp_len(ddbcr.lng0), + get_hbp_type(ddbcr.rw0), 0, ddbcr.v0 && !ddbsr.b0); + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->ddbar[1], get_hbp_len(ddbcr.lng1), + get_hbp_type(ddbcr.rw1), 1, ddbcr.v1 && !ddbsr.b1); + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->ddbar[2], get_hbp_len(ddbcr.lng2), + get_hbp_type(ddbcr.rw2), 2, ddbcr.v2 && !ddbsr.b2); + ret = ret ?: register_ptrace_breakpoint(child, + user_regs->ddbar[3], get_hbp_len(ddbcr.lng3), + get_hbp_type(ddbcr.rw3), 3, ddbcr.v3 && !ddbsr.b3); + if (ret) + return ret; + + AW(thread->sw_regs.dibsr) = user_regs->dibsr; + AW(thread->sw_regs.ddbsr) = user_regs->ddbsr; + + AW(thread->debug.regs.dibcr) = user_regs->dibcr; + AW(thread->debug.regs.ddbcr) = user_regs->ddbcr; + thread->debug.regs.dibar0 = user_regs->dibar[0]; + thread->debug.regs.dibar1 = user_regs->dibar[1]; + thread->debug.regs.dibar2 = user_regs->dibar[2]; + thread->debug.regs.dibar3 = user_regs->dibar[3]; + thread->debug.regs.ddbar0 = user_regs->ddbar[0]; + thread->debug.regs.ddbar1 = user_regs->ddbar[1]; + thread->debug.regs.ddbar2 = user_regs->ddbar[2]; + thread->debug.regs.ddbar3 = user_regs->ddbar[3]; + + return 0; +} + +static int pt_regs_to_user_regs(struct task_struct *child, + struct user_regs_struct *user_regs, long size) +{ + struct thread_info *ti = task_thread_info(child); + struct thread_struct *thread = &child->thread; + struct pt_regs *pt_regs = ti->pt_regs; + struct trap_pt_regs *trap; + struct sw_regs *sw_regs = &child->thread.sw_regs; +#ifdef CONFIG_USE_AAU + e2k_aau_t *aau_regs; +#endif /* CONFIG_USE_AAU*/ + int i; + + memset(user_regs, 0, size); + DebugTRACE("pt_regs_to_user_regs current->pid=%d(%s) child->pid=%d\n", + current->pid, current->comm, child->pid); + + if (!pt_regs) + return -1; + +#ifdef CONFIG_USE_AAU + aau_regs = pt_regs->aau_context; +#endif /* CONFIG_USE_AAU*/ + + trap = pt_regs->trap; + +#ifdef CONFIG_GREGS_CONTEXT + copy_k_gregs_to_gregs(&sw_regs->gregs, &ti->k_gregs); + GET_GREGS_FROM_THREAD(user_regs->g, user_regs->gtag, sw_regs->gregs.g); + for (i = 0; i < 32; i++) + user_regs->gext[i] = (u16) sw_regs->gregs.g[i].ext; + if (machine.native_iset_ver >= E2K_ISET_V5) { + if (size >= offsetofend(struct user_regs_struct, gext_v5) && + size >= offsetofend(struct user_regs_struct, gext_tag_v5)) { + GET_GREGS_FROM_THREAD(user_regs->gext_v5, + user_regs->gext_tag_v5, + &sw_regs->gregs.g[0].ext); + } + } + user_regs->bgr = AW(sw_regs->gregs.bgr); +#endif /* CONFIG_GREGS_CONTEXT */ + + user_regs->upsr = AW(ti->upsr); + + /* user_regs->oscud_lo = ; */ + /* user_regs->oscud_hi = ; */ + /* user_regs->osgd_lo = ; */ + /* user_regs->osgd_hi = ; */ + /* user_regs->osem = ; */ + /* user_regs->osr0 = ; */ + + user_regs->pfpfr = AW(sw_regs->pfpfr); + user_regs->fpcr = AW(sw_regs->fpcr); + user_regs->fpsr = AW(sw_regs->fpsr); + + user_regs->usbr = pt_regs->stacks.top; + user_regs->usd_lo = AW(pt_regs->stacks.usd_lo); + user_regs->usd_hi = AW(pt_regs->stacks.usd_hi); + change_psl_field((unsigned long *)&user_regs->usd_lo, -1); + + user_regs->psp_lo = AW(pt_regs->stacks.psp_lo); + user_regs->psp_hi = AW(pt_regs->stacks.psp_hi); + user_regs->pshtp = AW(pt_regs->stacks.pshtp); + + user_regs->cr0_lo = AW(pt_regs->crs.cr0_lo); + user_regs->cr0_hi = AW(pt_regs->crs.cr0_hi); + user_regs->cr1_lo = AW(pt_regs->crs.cr1_lo); + user_regs->cr1_hi = AW(pt_regs->crs.cr1_hi); + + user_regs->pcsp_lo = AW(pt_regs->stacks.pcsp_lo); + user_regs->pcsp_hi = AW(pt_regs->stacks.pcsp_hi); + user_regs->pcshtp = pt_regs->stacks.pcshtp; + + user_regs->cs_lo = sw_regs->cs_lo; + user_regs->cs_hi = sw_regs->cs_hi; + user_regs->ds_lo = sw_regs->ds_lo; + user_regs->ds_hi = sw_regs->ds_hi; + user_regs->es_lo = sw_regs->es_lo; + user_regs->es_hi = sw_regs->es_hi; + user_regs->fs_lo = sw_regs->fs_lo; + user_regs->fs_hi = sw_regs->fs_hi; + user_regs->gs_lo = sw_regs->gs_lo; + user_regs->gs_hi = sw_regs->gs_hi; + user_regs->ss_lo = sw_regs->ss_lo; + user_regs->ss_hi = sw_regs->ss_hi; + +#ifdef CONFIG_USE_AAU + if (aau_regs) { + for (i = 0; i < 32; i++) { + user_regs->aad[2*i] = AW(aau_regs->aads[i]).lo; + user_regs->aad[2*i+1] = AW(aau_regs->aads[i]).hi; + } + + if (machine.native_iset_ver < E2K_ISET_V5) { + for (i = 0; i < 16; i++) + user_regs->aaind[i] = (u32) aau_regs->aainds[i]; + + for (i = 0; i < 8; i++) + user_regs->aaincr[i] = (u32) aau_regs->aaincrs[i]; + + for (i = 0; i < 64; i++) + user_regs->aaldi[i] = (u32) aau_regs->aaldi[i]; + + for (i = 0; i < 16; i++) + user_regs->aasti[i] = (u32) aau_regs->aastis[i]; + } else { + for (i = 0; i < 16; i++) + user_regs->aaind[i] = aau_regs->aainds[i]; + + for (i = 0; i < 8; i++) + user_regs->aaincr[i] = aau_regs->aaincrs[i]; + + for (i = 0; i < 64; i++) + user_regs->aaldi[i] = aau_regs->aaldi[i]; + + for (i = 0; i < 16; i++) + user_regs->aasti[i] = aau_regs->aastis[i]; + } + + user_regs->aaldv = AW(aau_regs->aaldv); + + for (i = 0; i < 64; i++) + user_regs->aalda[i] = AW(ti->aalda[i]); + + user_regs->aaldm = AW(aau_regs->aaldm); + + user_regs->aasr = AW(aau_regs->aasr); + user_regs->aafstr = (unsigned long long) aau_regs->aafstr; + } +#endif /* CONFIG_USE_AAU */ + + user_regs->clkr = 0; + + user_regs->dibcr = AW(thread->debug.regs.dibcr); + user_regs->ddbcr = AW(thread->debug.regs.ddbcr); + user_regs->dibar[0] = thread->debug.regs.dibar0; + user_regs->dibar[1] = thread->debug.regs.dibar1; + user_regs->dibar[2] = thread->debug.regs.dibar2; + user_regs->dibar[3] = thread->debug.regs.dibar3; + user_regs->ddbar[0] = thread->debug.regs.ddbar0; + user_regs->ddbar[1] = thread->debug.regs.ddbar1; + user_regs->ddbar[2] = thread->debug.regs.ddbar2; + user_regs->ddbar[3] = thread->debug.regs.ddbar3; + user_regs->dibsr = AW(sw_regs->dibsr); + user_regs->ddbsr = AW(sw_regs->ddbsr); + user_regs->dimcr = AW(sw_regs->dimcr); + user_regs->ddmcr = AW(sw_regs->ddmcr); + user_regs->dimar[0] = sw_regs->dimar0; + user_regs->dimar[1] = sw_regs->dimar1; + user_regs->ddmar[0] = sw_regs->ddmar0; + user_regs->ddmar[1] = sw_regs->ddmar1; + if (machine.native_iset_ver >= E2K_ISET_V6 && + size >= offsetofend(struct user_regs_struct, dimtp_hi)) { + user_regs->dimtp_lo = sw_regs->dimtp.lo; + user_regs->dimtp_hi = sw_regs->dimtp.hi; + } + + user_regs->wd = AW(pt_regs->wd); + + user_regs->br = AS(pt_regs->crs.cr1_hi).br; + + user_regs->ctpr1 = AW(pt_regs->ctpr1); + user_regs->ctpr2 = AW(pt_regs->ctpr2); + user_regs->ctpr3 = AW(pt_regs->ctpr3); + if (machine.native_iset_ver >= E2K_ISET_V6 && + size >= offsetofend(struct user_regs_struct, ctpr3_hi)) { + user_regs->ctpr1_hi = AW(pt_regs->ctpr1_hi); + user_regs->ctpr2_hi = AW(pt_regs->ctpr2_hi); + user_regs->ctpr3_hi = AW(pt_regs->ctpr3_hi); + } + + /* user_regs->eir = ; */ + + user_regs->cutd = AW(sw_regs->cutd); + user_regs->cuir = (machine.native_iset_ver < E2K_ISET_V6) ? + AS(pt_regs->crs.cr1_lo).cuir : + AS(pt_regs->crs.cr1_lo).cui; + + if (size >= offsetofend(struct user_regs_struct, idr)) + user_regs->idr = READ_IDR_REG_VALUE(); + + if (machine.native_iset_ver >= E2K_ISET_V3) { + if (size >= offsetofend(struct user_regs_struct, core_mode)) + user_regs->core_mode = machine.rrd(E2K_REG_CORE_MODE); + } + + user_regs->lsr = pt_regs->lsr; + user_regs->ilcr = pt_regs->ilcr; + if (machine.native_iset_ver >= E2K_ISET_V5) { + if (size >= offsetofend(struct user_regs_struct, lsr1)) + user_regs->lsr1 = pt_regs->lsr1; + if (size >= offsetofend(struct user_regs_struct, ilcr1)) + user_regs->ilcr1 = pt_regs->ilcr1; + } + + user_regs->rpr_lo = sw_regs->rpr_lo; + user_regs->rpr_hi = sw_regs->rpr_hi; + + if (trap) { + u64 data; + u8 tag; + + /* MLT */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + /* FIXME: it need implement for guest */ + if (!paravirt_enabled() && trap->mlt_state.num) + memcpy(user_regs->mlt, trap->mlt_state.mlt, + sizeof(e2k_mlt_entry_t) * trap->mlt_state.num); +#endif + + /* TC */ + for (i = 0; i < min(MAX_TC_SIZE, HW_TC_SIZE); i++) { + user_regs->trap_cell_addr[i] = trap->tcellar[i].address; + user_regs->trap_cell_info[i] = + trap->tcellar[i].condition.word; + load_value_and_tagd(&trap->tcellar[i].data, + &data, &tag); + user_regs->trap_cell_val[i] = data; + user_regs->trap_cell_tag[i] = tag; + } + + /* TIR */ + for (i = 0; i <= trap->nr_TIRs; i++) { + user_regs->tir_hi[i] = trap->TIRs[i].TIR_hi.TIR_hi_reg; + user_regs->tir_lo[i] = trap->TIRs[i].TIR_lo.TIR_lo_reg; + } + + /* SBBP */ + if (trap->sbbp) + memcpy(user_regs->sbbp, trap->sbbp, + sizeof(user_regs->sbbp)); + + user_regs->sys_num = -1UL; + } else { + user_regs->arg1 = pt_regs->args[1]; + user_regs->arg2 = pt_regs->args[2]; + user_regs->arg3 = pt_regs->args[3]; + user_regs->arg4 = pt_regs->args[4]; + user_regs->arg5 = pt_regs->args[5]; + user_regs->arg6 = pt_regs->args[6]; + user_regs->sys_rval = pt_regs->sys_rval; + user_regs->sys_num = (s64) (s32) pt_regs->sys_num; + } + + /* DAM */ + memcpy(user_regs->dam, ti->dam, sizeof(ti->dam)); + + if (size >= offsetofend(struct user_regs_struct, proc_stack_base)) { + user_regs->proc_stack_base = (u64) GET_PS_BASE(&ti->u_hw_stack); + user_regs->chain_stack_base = + (u64) GET_PCS_BASE(&ti->u_hw_stack); + } + + /* + * gdb uses (sizeof_struct != 0) check to test for + * errors, so don't clear this field. + */ + user_regs->sizeof_struct = size; + + execute_user_gd_cud_regs(child, user_regs); + return 0; +} + +static int check_permissions(struct user_regs_struct *user_regs, long size) +{ + e2k_ctpr_t ctpr1, ctpr2, ctpr3; + e2k_dibcr_t dibcr; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + + if (capable(CAP_SYS_ADMIN)) + return 0; + + AW(dibcr) = user_regs->dibcr; + AW(dimcr) = user_regs->dimcr; + AW(ddmcr) = user_regs->ddmcr; + AW(ctpr1) = user_regs->ctpr1; + AW(ctpr2) = user_regs->ctpr2; + AW(ctpr3) = user_regs->ctpr3; + + /* Sanity check (breakpoints are checked + * in arch_check_bp_in_kernelspace()). */ + if (AS(dimcr)[0].system && AS(dimcr)[0].trap || + AS(dimcr)[1].system && AS(dimcr)[1].trap || + AS(ddmcr)[0].system && AS(ddmcr)[0].trap || + AS(ddmcr)[1].system && AS(ddmcr)[1].trap) + return -EIO; + + if (dibcr.stop) + return -EIO; + + if (machine.native_iset_ver >= E2K_ISET_V6) { + /* + * Prohibit user changing of monitor registers + */ + if (dimcr.u_m_en) + return -EIO; + + if (size >= offsetofend(struct user_regs_struct, dimtp_hi)) { + e2k_dimtp_t dimtp = { + .lo = user_regs->dimtp_lo, + .hi = user_regs->dimtp_hi + }; + + /* + * Disallow setting up buffer in kernel + */ + if (!access_ok(dimtp.base, dimtp.size)) + return -EIO; + } + } + + /* + * Prohibit setting of privileged and protected tags + */ + if (AS(ctpr1).ta_tag >= 5 || AS(ctpr2).ta_tag >= 5 || + AS(ctpr3).ta_tag >= 5) + return -EIO; + + return 0; +} + +static int user_regs_to_pt_regs(struct user_regs_struct *user_regs, + struct task_struct *child, long size) +{ + struct thread_info *ti = task_thread_info(child); + struct pt_regs *pt_regs = ti->pt_regs; + struct trap_pt_regs *trap; + struct sw_regs *sw_regs = &child->thread.sw_regs; +#ifdef CONFIG_USE_AAU + e2k_aau_t *aau_regs; +#endif /* CONFIG_USE_AAU */ + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + int ret, i; + + DebugTRACE("user_regs_to_pt_regs current->pid=%d(%s) " + "child->pid=%d BINCO(child) is %s\n", + current->pid, current->comm, child->pid, + TASK_IS_BINCO(child) ? "true" : "false"); + + /* Sanity check */ + ret = check_permissions(user_regs, size); + if (ret) + return ret; + + ret = ptrace_write_hbp_registers(child, user_regs); + if (ret) + return ret; + +#ifdef CONFIG_GREGS_CONTEXT + /* FIXME: guest kernel sw_regs have not right values of global */ + /* registers. Right values save/restore/keep host into gthread_info */ + /* structure for this guest process */ + /* FIXME: it need implement for guest, but why do copying of */ + /* separate word, extention, tag so complex, using LDRD operations */ + SET_GREGS_TO_THREAD(sw_regs->gregs.g, user_regs->g, user_regs->gtag); + for (i = 0; i < 32; i++) + sw_regs->gregs.g[i].ext = (u64) user_regs->gext[i]; + if (machine.native_iset_ver >= E2K_ISET_V5) { + if (size >= offsetofend(struct user_regs_struct, gext_v5) && + size >= offsetofend(struct user_regs_struct, gext_tag_v5)) { + SET_GREGS_TO_THREAD(&sw_regs->gregs.g[0].ext, + user_regs->gext_v5, user_regs->gext_tag_v5); + } + } + get_k_gregs_from_gregs(&ti->k_gregs, &sw_regs->gregs); + + AW(sw_regs->gregs.bgr) = user_regs->bgr; +#endif /* CONFIG_GREGS_CONTEXT */ + + AW(ti->upsr) = user_regs->upsr; + + /* = user_regs->oscud_lo; */ + /* = user_regs->oscud_hi; */ + /* = user_regs->osgd_lo; */ + /* = user_regs->osgd_hi; */ + /* = user_regs->osem; */ + /* = user_regs->osr0; */ + + AW(sw_regs->pfpfr) = user_regs->pfpfr; + AW(sw_regs->fpcr) = user_regs->fpcr; + AW(sw_regs->fpsr) = user_regs->fpsr; + + sw_regs->cs_lo = user_regs->cs_lo; + sw_regs->cs_hi = user_regs->cs_hi; + sw_regs->ds_lo = user_regs->ds_lo; + sw_regs->ds_hi = user_regs->ds_hi; + sw_regs->es_lo = user_regs->es_lo; + sw_regs->es_hi = user_regs->es_hi; + sw_regs->fs_lo = user_regs->fs_lo; + sw_regs->fs_hi = user_regs->fs_hi; + sw_regs->gs_lo = user_regs->gs_lo; + sw_regs->gs_hi = user_regs->gs_hi; + sw_regs->ss_lo = user_regs->ss_lo; + sw_regs->ss_hi = user_regs->ss_hi; + + AW(sw_regs->dimcr) = user_regs->dimcr; + AW(sw_regs->ddmcr) = user_regs->ddmcr; + sw_regs->dimar0 = user_regs->dimar[0]; + sw_regs->dimar1 = user_regs->dimar[1]; + sw_regs->ddmar0 = user_regs->ddmar[0]; + sw_regs->ddmar1 = user_regs->ddmar[1]; + if (machine.native_iset_ver >= E2K_ISET_V6 && + size >= offsetofend(struct user_regs_struct, dimtp_hi)) { + sw_regs->dimtp.lo = user_regs->dimtp_lo; + sw_regs->dimtp.hi = user_regs->dimtp_hi; + } + + AW(sw_regs->cutd) = user_regs->cutd; + /* = user_regs->cuir; */ + + /* = user_regs->rpr; */ + sw_regs->rpr_lo = user_regs->rpr_lo; + sw_regs->rpr_hi = user_regs->rpr_hi; + + if (!pt_regs) + return 0; + + /* = user_regs->usbr; */ + AW(pt_regs->stacks.usd_lo) = user_regs->usd_lo; + AW(pt_regs->stacks.usd_hi) = user_regs->usd_hi; + change_psl_field((unsigned long *)&pt_regs->stacks.usd_lo, 1); + + AW(cr0_hi) = user_regs->cr0_hi; + AW(cr1_lo) = user_regs->cr1_lo; + AW(cr1_hi) = user_regs->cr1_hi; + + AW(pt_regs->crs.cr0_lo) = user_regs->cr0_lo; + AS(pt_regs->crs.cr0_hi).ip = AS(cr0_hi).ip; + AS(pt_regs->crs.cr1_lo).cui = AS(cr1_lo).cui; + if (machine.native_iset_ver < E2K_ISET_V6) + AS(pt_regs->crs.cr1_lo).ic = AS(cr1_lo).ic; + AS(pt_regs->crs.cr1_lo).ss = AS(cr1_lo).ss; + AS(pt_regs->crs.cr1_hi).ussz = AS(cr1_hi).ussz; + AS(pt_regs->crs.cr1_hi).wdbl = AS(cr1_hi).wdbl; + AS(pt_regs->crs.cr1_hi).br = AS(cr1_hi).br; + +#ifdef CONFIG_USE_AAU + /* + * Skip copying aaldi/aalda since they are recalculated anyway + */ + if (aau_regs = pt_regs->aau_context) { + for (i = 0; i < 32; i++) { + AW(aau_regs->aads[i]).lo = user_regs->aad[2*i]; + AW(aau_regs->aads[i]).hi = user_regs->aad[2*i+1]; + } + + for (i = 0; i < 16; i++) + aau_regs->aainds[i] = user_regs->aaind[i]; + + for (i = 0; i < 8; i++) + aau_regs->aaincrs[i] = user_regs->aaincr[i]; + + AW(aau_regs->aaldv) = user_regs->aaldv; + AW(aau_regs->aaldm) = user_regs->aaldm; + + AW(aau_regs->aasr) = user_regs->aasr; + aau_regs->aafstr = user_regs->aafstr; + + for (i = 0; i < 16; i++) + aau_regs->aastis[i] = user_regs->aasti[i]; + } +#endif /* CONFIG_USE_AAU */ + + AW(pt_regs->wd) = user_regs->wd; + + AS(pt_regs->crs.cr1_hi).br = user_regs->br; + + AW(pt_regs->ctpr1) = user_regs->ctpr1; + AW(pt_regs->ctpr2) = user_regs->ctpr2; + AW(pt_regs->ctpr3) = user_regs->ctpr3; + + /* = user_regs->eir; */ + + pt_regs->lsr = user_regs->lsr; + pt_regs->ilcr = user_regs->ilcr; + if (machine.native_iset_ver >= E2K_ISET_V5) { + if (size >= offsetofend(struct user_regs_struct, lsr1)) + pt_regs->lsr1 = user_regs->lsr1; + if (size >= offsetofend(struct user_regs_struct, ilcr1)) + pt_regs->ilcr1 = user_regs->ilcr1; + } + + trap = pt_regs->trap; + if (!trap) { + pt_regs->args[1] = user_regs->arg1; + pt_regs->args[2] = user_regs->arg2; + pt_regs->args[3] = user_regs->arg3; + pt_regs->args[4] = user_regs->arg4; + pt_regs->args[5] = user_regs->arg5; + pt_regs->args[6] = user_regs->arg6; + pt_regs->sys_rval = user_regs->sys_rval; + pt_regs->sys_num = user_regs->sys_num; + } + + /* copy MLT */ + /* Unsupported */ + + return 0; +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} + + +static bool is_hw_stack_from_task(struct task_struct *child, + unsigned long addr, size_t size) +{ + struct thread_info *ti = task_thread_info(child); + + if (range_includes((u64) GET_PCS_BASE(&ti->u_hw_stack), + get_hw_pcs_user_size(&ti->u_hw_stack), + addr, size) || + range_includes((u64) GET_PS_BASE(&ti->u_hw_stack), + get_hw_ps_user_size(&ti->u_hw_stack), + addr, size)) + return true; + + return false; +} + +static int arch_ptrace_peek(struct task_struct *child, + unsigned long addr, unsigned long data, bool tag, bool user) +{ + struct thread_info *ti = task_thread_info(child); + volatile unsigned long tmp; /* volatile because it contains tag */ + unsigned long value; + int copied; + bool privileged_access = range_intersects(addr, sizeof(tmp), + USER_HW_STACKS_BASE, PAGE_OFFSET - USER_HW_STACKS_BASE); + bool tag_unaligned = false; + + if (!user && data < PAGE_OFFSET) + return -EINVAL; + + if (tag && !IS_ALIGNED(addr, 8)) { + if (!IS_ALIGNED(addr, 4)) + return -EINVAL; + + addr = round_down(addr, 8); + tag_unaligned = true; + } + + if (privileged_access) { + unsigned long ts_flag; + + /* Only allow access to CUT and + * this particular thread's stacks */ + if (!is_hw_stack_from_task(child, addr, sizeof(tmp)) && + !range_includes(USER_CUT_AREA_BASE, USER_CUT_AREA_SIZE, + addr, sizeof(tmp))) + return -EPERM; + + /* Chain stack access works only with aligned dwords. + * Also this allows for the security check below. */ + if (!IS_ALIGNED(addr, 8)) + return -EINVAL; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + copied = ptrace_access_vm(child, addr, (unsigned long *) &tmp, + sizeof(tmp), FOLL_FORCE); + clear_ts_flag(ts_flag); + } else { + copied = ptrace_access_vm(child, addr, (unsigned long *) &tmp, + sizeof(tmp), FOLL_FORCE); + } + if (copied != sizeof(tmp)) + return -EIO; + + if (tag) { + u64 unused; + u8 tag; + + load_value_and_tagd((void *) &tmp, &unused, &tag); + value = (tag_unaligned) ? (tag >> 2) : (tag & 0x3); + } else { + value = tmp; + + /* Hide kernel information from user */ + if (privileged_access && value >= PAGE_OFFSET && + value <= E2K_VA_MASK) + value = UL(0xe2ffffffffff); + } + + if (user) { + /* Ugly, but it seems like backwards + * compatibility requires this... */ + if (tag && is_compat_task()) + return put_user((compat_ulong_t) value, + (compat_ulong_t __user *) data); + else + return put_user(value, (unsigned long __user *) data); + } else { + if (tag) + *(u8 *) data = value; + else + *(unsigned long *) data = value; + return 0; + } +} + +#ifdef CONFIG_PROTECTED_MODE +static int arch_ptrace_peek_pl(struct task_struct *child, + unsigned long addr, unsigned long data) +{ + e2k_pl_lo_t pl; + long resdata = -1L; + int ret = -EIO; + + if (arch_ptrace_peek(child, addr, (unsigned long) &pl, false, false)) + return ret; + + if (pl.PL_lo_itag == E2K_PL_ITAG) { + resdata = pl.PL_lo_target; + ret = put_user(resdata, (unsigned long __user *)data); +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: result 0x%016lx\n", resdata); +#endif /* DEBUG_PTRACE */ + } else { + /* TD not supported */ +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: TD not supported\n"); +#endif /* DEBUG_PTRACE */ + } + return ret; +} +#endif /* CONFIG_PROTECTED_MODE */ + +struct poke_work_args { + unsigned long addr; + unsigned long data; + u8 tag; + struct callback_head callback; +}; + +static void poke_work_fn(struct callback_head *head) +{ + unsigned long pcs_base, pcs_used_top, ps_base, ps_used_top; + struct pt_regs *regs = current_pt_regs(); + struct poke_work_args *args = + container_of(head, struct poke_work_args, callback); + unsigned long addr = args->addr, data = args->data; + u8 tag = args->tag; + volatile unsigned long value; /* volatile because it contains tag */ + + kfree(args); + args = NULL; + + /* + * Calculate stack frame addresses + */ + pcs_base = (unsigned long) CURRENT_PCS_BASE(); + ps_base = (unsigned long) CURRENT_PS_BASE(); + + pcs_used_top = AS(regs->stacks.pcsp_lo).base + + AS(regs->stacks.pcsp_hi).ind; + ps_used_top = AS(regs->stacks.psp_lo).base + AS(regs->stacks.psp_hi).ind; + + store_tagged_dword((u64 *) &value, data, tag); + + if (addr >= pcs_base && addr + sizeof(value) <= pcs_used_top) { + write_current_chain_stack(addr, (void *) &value, sizeof(value)); + } else if (addr >= ps_base && addr + sizeof(value) <= ps_used_top) { + copy_current_proc_stack((void *) &value, (void *) addr, + sizeof(value), true, ps_used_top); + } else { + /* Writing of signal stack and CUT is prohibited */ + return; + } +} + +static int arch_ptrace_poke(struct task_struct *child, + unsigned long addr, unsigned long data, u8 tag) +{ + struct thread_info *ti = task_thread_info(child); + bool privileged_access = range_intersects(addr, sizeof(data), + USER_HW_STACKS_BASE, PAGE_OFFSET - USER_HW_STACKS_BASE); + + /* Only allow access to this child's stacks */ + if (privileged_access) { + struct poke_work_args *poke_work; + + if (!is_hw_stack_from_task(child, addr, sizeof(data))) + return -EPERM; + + /* Chain stack access works only with aligned dwords */ + if (!IS_ALIGNED(addr, 8)) + return -EINVAL; + + poke_work = kmalloc(sizeof(*poke_work), GFP_KERNEL); + if (!poke_work) + return -ENOMEM; + + poke_work->addr = addr; + poke_work->data = data; + poke_work->tag = tag; + init_task_work(&poke_work->callback, poke_work_fn); + return task_work_add(child, &poke_work->callback, true); + } else { + /* volatile because it contains tag */ + volatile unsigned long value; + int copied; + + store_tagged_dword((u64 *) &value, data, tag); + + copied = ptrace_access_vm(child, addr, (void *) &value, + sizeof(value), FOLL_FORCE | FOLL_WRITE); + return (copied == sizeof(value)) ? 0 : -EIO; + } +} + +long common_ptrace(struct task_struct *child, long request, unsigned long addr, + unsigned long data, bool compat) +{ + struct user_regs_struct local_user_regs; + u8 tag; + long ret; +#ifdef CONFIG_PROTECTED_MODE + long resdata = -1L; + int itag; +#endif /* CONFIG_PROTECTED_MODE */ + + switch (request) { + case PTRACE_PEEKTEXT: + case PTRACE_PEEKDATA: + ret = arch_ptrace_peek(child, addr, data, false, true); + break; + + case PTRACE_POKETEXT: + case PTRACE_POKEDATA: + ret = arch_ptrace_poke(child, addr, data, 0); + break; + + /* read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: + /* not implemented yet. */ + ret = -EIO; + break; + + case PTRACE_POKEUSR: /* write the word at location addr in the */ + /* USER area */ + /* not implemented yet. */ + ret = -EIO; + break; + + case PTRACE_PEEKTAG: + ret = arch_ptrace_peek(child, addr, data, true, true); + break; + + case PTRACE_POKETAG: + /* not implemented yet. */ + ret = -EIO; +#ifdef DEBUG_PTRACE + printk("do_ptrace: PTRACE_POKETAG not implemented yet\n"); +#endif /* DEBUG_PTRACE */ + break; + +#ifdef CONFIG_PROTECTED_MODE + case PTRACE_PEEKPTR: + ret = -EIO; + + /* Address should be aligned at least 8 bytes */ + if ((addr & 0x7) != 0) + break; + + if (arch_ptrace_peek(child, addr, (unsigned long) &tag, + true, false)) + break; +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: tag=0x%x\n", tag); +#endif /* DEBUG_PTRACE */ + if (tag == E2K_AP_LO_ETAG) { + /* C. 4.6.1. tag.lo = 1111 - AP, OD or PL + * Address should be aligned at 16 bytes */ + if ((addr & 15) != 0) + break; + + if (arch_ptrace_peek(child, addr + 8, + (unsigned long) &tag, true, false)) + break; +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: tag=0x%x\n", tag); +#endif /* DEBUG_PTRACE */ + if (tag == E2K_AP_HI_ETAG) { + /* AP & SAP */ + e2k_rwap_lo_struct_t ap_lo; + e2k_rwap_hi_struct_t ap_hi; + e2k_rwsap_lo_struct_t sap_lo; + e2k_rwsap_hi_struct_t sap_hi; + + if (arch_ptrace_peek(child, addr, + (unsigned long) &ap_lo, false, false)) + break; + if (arch_ptrace_peek(child, addr + 8, + (unsigned long) &ap_hi, false, false)) + break; + + itag = ap_lo.E2K_RWAP_lo_itag; + + if (itag == E2K_AP_ITAG) { + /* AP */ + resdata = ap_lo.E2K_RWAP_lo_base + ap_hi.E2K_RWAP_hi_curptr; + } else if (itag == E2K_SAP_ITAG) { + /* SAP */ + sap_lo.word = ap_lo.word; + sap_hi.word = ap_hi.word; + resdata = sap_lo.E2K_RWSAP_lo_base + sap_hi.E2K_RWSAP_hi_curptr; + } else { + resdata = -1; +#ifdef DEBUG_PTRACE + printk("do_ptrace: unknown itag 0x%x\n", itag); +#endif /* DEBUG_PTRACE */ + } + + ret = put_user(resdata,(unsigned long __user *) data); +#ifdef DEBUG_PTRACE + printk("do_ptrace: result 0x%016lx\n", resdata); +#endif /* DEBUG_PTRACE */ + } else if (tag == E2K_PLHI_ETAG) { + ret = arch_ptrace_peek_pl(child, addr, data); + } else { + /* OD not supported. */ +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: OD not supported\n"); +#endif /* DEBUG_PTRACE */ + break; + } + } else if (tag == E2K_PL_ETAG) { + ret = arch_ptrace_peek_pl(child, addr, data); + } else { + /* Unknown tag */ +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: unknown tag 0x%x\n", tag); +#endif /* DEBUG_PTRACE */ + break; + } + break; + + case PTRACE_POKEPTR: { + + /* We arrive as follows: + * data - the address WHICH we want to write + * addr - the address, a software to WHICH we want to write + * + * If gd_base < = data < gd_base + gd_size, we will create + * AP descriptor also we will write it as structure to the + * address ADDR, then we will add tags + * + * FIXME + * Descriptor as we will prescribe the area size to + * the addresses gd_base + gd_addr (because it isn't clear, + * what size to register), as we create 0. + * as rw - E2_RWAR_RW_ENABLE + * + * If usd_base < = data < usd_base + usd_size, we will create + * descriptor of SAP + * + * If cud_base < = data < cud_base + cud_size, we will create + * PL descriptor */ + + struct pt_regs *pt_regs = task_thread_info(child)->pt_regs; + struct sw_regs *sw_regs = &child->thread.sw_regs; + e2k_cutd_t cutd = sw_regs->cutd; + e2k_pusd_lo_t pusd_lo; + e2k_pusd_hi_t pusd_hi; + e2k_cute_t cute, *cute_p = &cute; + int cui = USER_CODES_PROT_INDEX; /* FIXME In a kernel it + * isn't realized yet */ + long cute_entry_addr, stack_bottom; + long pusd_base, pusd_size, gd_base, gd_size, cud_base, cud_size; + unsigned long ts_flag; + size_t copied; + + ret = -EIO; + + /* Address should be aligned at least 8 bytes */ + if ((addr & 7) != 0) + break; + + + /* Read register %pusd */ + AW(pusd_lo) = AW(pt_regs->stacks.usd_lo); + AW(pusd_hi) = AW(pt_regs->stacks.usd_hi); + pusd_base = pusd_lo.PUSD_lo_base; + pusd_size = pusd_hi.PUSD_hi_size; + + /* usd.size + * <------> + * USER_P_STACK_SIZE <- FIXME + * <-------------------> + * 0x0 |......................|...................| 0xfff... + * ^ ^ + * usd.base stack_bottom */ + stack_bottom = pusd_base + 0x2000 /* FIXME */; + + /* In %cutd the table address is written, + * in %cui - an index in the table is written. + * we calculate the address of entry necessary to us */ + cute_entry_addr = cutd.E2K_RWP_base + cui * sizeof (e2k_cute_t); + +#ifdef DEBUG_PTRACE + printk("do_ptrace: cutd.base = 0x%lx, cui = 0x%x, cute_entry_addr = 0x%lx\n", + cutd.E2K_RWP_base, cui, cute_entry_addr); + printk("do_ptrace: pusd.base = 0x%lx, pusd.size = 0x%lx\n", pusd_base, pusd_size); +#endif /* DEBUG_PTRACE */ + + if (cute_entry_addr + sizeof(cute) > PAGE_OFFSET) + break; + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + copied = access_process_vm(child, cute_entry_addr, &cute, + sizeof(cute), 0); + clear_ts_flag(ts_flag); + if (copied != sizeof(cute)) + break; + + gd_base = CUTE_GD_BASE(cute_p); + gd_size = CUTE_GD_SIZE(cute_p); + cud_base = CUTE_CUD_BASE(cute_p); + cud_size = CUTE_CUD_SIZE(cute_p); + +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: gd.base = 0x%lx, gd.size = 0x%lx\n" + "do_ptrace: cud.base = 0x%lx, cud.size = 0x%lx\n", + gd_base, gd_size, cud_base, cud_size); +#endif /* DEBUG_PTRACE */ + + if (gd_base <= data && data < (gd_base + gd_size)) { + /* AP descriptor needed */ + e2k_rwap_struct_t ap = { { { 0 } } }; + + /* Address should be aligned at 16 bytes */ + if ((addr & 15) != 0) + break; + + ap.E2K_RWAP_base = data; + ap.E2K_RWAP_rw = E2_RWAR_RW_ENABLE; + ap.E2K_RWAP_itag = E2K_AP_ITAG; + ap.E2K_RWAP_curptr = 0; + ap.E2K_RWAP_size = gd_base + gd_size - data; + + if (arch_ptrace_poke(child, addr, + AW(ap.lo), E2K_AP_LO_ETAG)) + break; + if (arch_ptrace_poke(child, addr + 8, + AW(ap.hi), E2K_AP_HI_ETAG)) + break; + + ret = 0; +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: AP writed\n"); +#endif /* DEBUG_PTRACE */ + } else if (pusd_base <= data && data < stack_bottom) { + /* SAP descriptor needed */ + e2k_rwsap_struct_t sap = {{{ 0 }}}; + + /* Address should be aligned at 16 bytes */ + if ((addr & 15) != 0) + break; + + sap.E2K_RWSAP_base = data; + sap.E2K_RWSAP_psl = pusd_lo.E2K_RPUSD_lo_psl - 1 + /* FIXME without -1 does not function, + * but -1 - is it correct? */; + sap.E2K_RWSAP_rw = E2_RWAR_RW_ENABLE; + sap.E2K_RWSAP_itag = E2K_SAP_ITAG; + sap.E2K_RWSAP_curptr = 0; + sap.E2K_RWSAP_size = stack_bottom - data; + + if (arch_ptrace_poke(child, addr, + AW(sap.lo), E2K_SAP_LO_ETAG)) + break; + if (arch_ptrace_poke(child, addr + 8, + AW(sap.hi), E2K_SAP_HI_ETAG)) + break; + + ret = 0; +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: SAP written\n"); +#endif /* DEBUG_PTRACE */ + } else if (cud_base <= data && data < (cud_base + cud_size)) { + /* PL descriptor needed */ + e2k_pl_t pl; + + if (cpu_has(CPU_FEAT_ISET_V6)) { + pl = MAKE_PL_V6(data, cui); + if (arch_ptrace_poke(child, addr, + AW(pl.lo), E2K_PLLO_ETAG)) + break; + if (arch_ptrace_poke(child, addr + 8, + AW(pl.hi), E2K_PLHI_ETAG)) + break; + } else { + pl = MAKE_PL_V2(data); + if (arch_ptrace_poke(child, addr, + AW(pl.lo), E2K_PL_ETAG)) + break; + } + + ret = 0; +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: PL writed\n"); +#endif /* DEBUG_PTRACE */ + } else { +#ifdef DEBUG_PTRACE + pr_info("do_ptrace: incorrect ptr\n"); +#endif /* DEBUG_PTRACE */ + } + break; + } +#endif /* CONFIG_PROTECTED_MODE */ + + case PTRACE_EXPAND_STACK: { + /* + * This was created to prevent SIGSEGV when trying + * to PTRACE_POKEDATA below the allocated data stack + * area, but it is no longer needed: get_user_pages() + * calls into find_extend_vma() which automatically + * expands user's data stack + */ + ret = 0; + break; + } + + case PTRACE_GETREGS: { + long size; + + ret = get_user_regs_struct_size( + (struct user_regs_struct __user *) data, &size); + if (ret) { + unsigned long long zero = 0; + if (copy_to_user((void __user *) data, &zero, + sizeof(zero))); + break; + } + ret = pt_regs_to_user_regs (child, &local_user_regs, size); + if (ret) { + /* + * Now pt_regs can be NULL (for example: under + * PTRACE_O_TRACEEXEC flag the user process + * doesn't work now). But result must be 0. + */ + ret = 0; + memset(&local_user_regs, 0, size); + /* + * gdb uses (sizeof_struct != 0) check to test for + * errors, so don't clear this field. + */ + local_user_regs.sizeof_struct = size; + } + + ret = copy_to_user((void __user *) data, + &local_user_regs, size); + break; + } + + case PTRACE_SETREGS: { /* Set all gp regs in the child. */ + long size; + + ret = get_user_regs_struct_size( + (struct user_regs_struct __user *) data, &size); + if (ret) + break; + + ret = copy_from_user(&local_user_regs, + (void __user *) data, size); + if (ret) + break; + + ret = user_regs_to_pt_regs(&local_user_regs, child, size); + break; + } + + case PTRACE_SINGLESTEP: { + struct thread_info *ti = task_thread_info(child); + + if (!ti->pt_regs) { + ret = -EPERM; + break; + } + /* Fall through. */ + } + + default: + ret = (compat) ? compat_ptrace_request(child, request, addr, data) : + ptrace_request(child, request, addr, data); + break; + } +#ifdef DEBUG_PTRACE + if (ret < 0) + printk("do_ptrace: FAIL: ret=%d\n", ret); +#endif /* DEBUG_PTRACE */ + return ret; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + return common_ptrace(child, request, addr, data, false); +} + +void user_enable_single_step(struct task_struct *child) +{ + struct thread_info *ti = task_thread_info(child); + + set_ti_status_flag(ti, TS_SINGLESTEP_USER); + if (!AS(ti->pt_regs->crs.cr1_lo).pm) + AS(ti->pt_regs->crs.cr1_lo).ss = 1; +} + +void user_disable_single_step(struct task_struct *child) +{ + struct thread_info *ti = task_thread_info(child); + + clear_ti_status_flag(ti, TS_SINGLESTEP_USER); + if (ti->pt_regs) + AS(ti->pt_regs->crs.cr1_lo).ss = 0; +} + + +int syscall_trace_entry(struct pt_regs *regs) +{ + int ret = 0; + + /* For compatibility with Intel. It can be used to distinguish + syscall entry from syscall exit */ + regs->sys_rval = -ENOSYS; + +#ifdef CONFIG_MCST +#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER + /* do the secure computing check first */ + ret = secure_computing(NULL); +#endif +#endif + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + ret = tracehook_report_syscall_entry(regs); + } + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->sys_num); + + audit_syscall_entry(regs->sys_num, regs->args[1], + regs->args[2], regs->args[3], regs->args[4]); + + return ret; +} + +void syscall_trace_leave(struct pt_regs *regs) +{ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->sys_rval); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, 0); + + if (test_thread_flag(TIF_NOHZ)) + user_enter(); + + rseq_syscall(regs); +} + diff --git a/arch/e2k/kernel/recovery.c b/arch/e2k/kernel/recovery.c new file mode 100644 index 000000000000..1b0e6a50a9b1 --- /dev/null +++ b/arch/e2k/kernel/recovery.c @@ -0,0 +1,165 @@ +/* + * arch/e2k/kernel/recovery.c + * + * Kernel suspend and recovery. + * + * Copyright (C) 2016 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#undef DebugR +#define DEBUG_RECOVERY_MODE 0 /* system recovery */ +#define DebugR(...) DebugPrint(DEBUG_RECOVERY_MODE ,##__VA_ARGS__) + + +static DEFINE_RAW_SPINLOCK(restart_lock); + +#ifdef CONFIG_SMP +static unsigned int max_cpus_to_recover = NR_CPUS; +#endif /* CONFIG_SMP */ +struct task_struct *task_to_recover; + +struct aligned_task task_to_restart[NR_CPUS]; + + +#ifdef CONFIG_SMP +/* Called by boot processor to recover the rest. */ +static void +smp_recovery(void) +{ + unsigned int i; + unsigned int j = 1; + + /* Get other processors into their bootup holding patterns. */ + for (i = 0; i < NR_CPUS; i++) { + if (num_online_cpus() >= max_cpus_to_recover) + break; + if (cpu_possible(i) && !cpu_online(i)) { + cpu_recover(i); + j++; + } + } + + pr_warn("Recover up %u CPUs\n", j); + + smp_cpus_recovery_done(max_cpus_to_recover); +} +#endif + +/* + * Recovery the first processor. + * Same as function start_kernel() only to recover kernel state, + * timers, BUS controllers, IO controllers, drivers, etc... + */ +void +recover_kernel(void) +{ + DebugR("started\n"); + + /* + * Interrupts should be still disabled. Do necessary setups, + * interrupts will be enabled after switching to interrupted + * tasks on all CPUs + */ + + /* + * Mark the boot cpu "online" so that it can call console drivers in + * printk() and can access its per-cpu storage. + */ +#ifdef CONFIG_SMP + smp_prepare_boot_cpu_to_recover(); +#else + init_cpu_online(cpumask_of(smp_processor_id())); +#endif + + kernel_trap_mask_init(); + +#ifdef CONFIG_SMP + /* + * Recover SMP mode and other CPUs + */ + DebugR("will start smp_prepare_cpus_to_recover()\n"); + smp_prepare_cpus_to_recover(max_cpus_to_recover); +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_SMP + DebugR("will start smp_recovery()\n"); + smp_recovery(); + DebugR("completed SMP recovery\n"); +#endif + + /* + * Return to caller function to switch + * to interrupted task on all CPUs + */ + DebugR("completed recovery and returns to " + "switch to interrupted tasks\n"); +} + +static void str_adjust_bootblock(void) +{ + set_bootblock_flags(bootblock_phys, + RECOVERY_BB_FLAG | NO_READ_IMAGE_BB_FLAG); +} + +static noinline void do_restart_system(void (*restart_func)(void *), void *arg) +{ + task_to_recover = current; + NATIVE_SAVE_TASK_REGS_TO_SWITCH(current); + + str_adjust_bootblock(); + + local_write_back_cache_all(); + + restart_func(arg); + + /* + * Never should be here + */ + BUG(); +} + +int restart_system(void (*restart_func)(void *), void *arg) +{ + void (*volatile restart)(void (*)(void *), void *) = do_restart_system; + unsigned long flags; + + DebugR("System restart started on cpu %d\n", raw_smp_processor_id()); + + if (num_online_cpus() != 1) { + DebugR("Not only one cpu is online\n"); + return -EBUSY; + } + + if (!raw_spin_trylock_irqsave(&restart_lock, flags)) { + DebugR("Restart system already in progress\n"); + return -EBUSY; + } + + /* + * Use pointer instead of call or LCC will remove all code after + * do_restart_system() because of noret attribute of BUG() in + * do_restart_system() + */ + restart(restart_func, arg); + + /* + * kernel returns here after recovery. + */ + + DebugR("System restart finished on cpu %d\n", raw_smp_processor_id()); + + raw_spin_unlock_irqrestore(&restart_lock, flags); + + return 0; +} diff --git a/arch/e2k/kernel/rtc.c b/arch/e2k/kernel/rtc.c new file mode 100644 index 000000000000..654be64c3e7c --- /dev/null +++ b/arch/e2k/kernel/rtc.c @@ -0,0 +1,273 @@ +/* + * RTC related functions + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#if defined(CONFIG_SCLKR_CLOCKSOURCE) +#include +#include +#endif + +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +/* + * In order to set the CMOS clock precisely, set_rtc_mmss has to be + * called 500 ms after the second nowtime has started, because when + * nowtime is written into the registers of the CMOS clock, it will + * jump to the next second precisely 500 ms later. Check the Motorola + * MC146818A or Dallas DS12887 data sheet for details. + * + * BUG: This routine does not handle hour overflow properly; it just + * sets the minutes. Usually you'll only notice that after reboot! + */ + +static int x86_set_rtc_mmss(unsigned long nowtime) +{ + int retval = 0; + int real_seconds, real_minutes, cmos_minutes; + unsigned char save_control, save_freq_select; + + save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being */ + /* set */ + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset */ + /* prescaler */ + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + + cmos_minutes = CMOS_READ(RTC_MINUTES); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + cmos_minutes = bcd2bin(cmos_minutes); + + /* + * since we're only adjusting minutes and seconds, + * don't interfere with hour overflow. This avoids + * messing with unknown time zones but requires your + * RTC not to be off by more than 15 minutes + */ + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) + real_minutes += 30; /* correct for half hour time zone */ + real_minutes %= 60; + + if (abs(real_minutes - cmos_minutes) < 30) { + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + real_seconds = bin2bcd(real_seconds); + real_minutes = bin2bcd(real_minutes); + } + CMOS_WRITE(real_seconds, RTC_SECONDS); + CMOS_WRITE(real_minutes, RTC_MINUTES); + } else { + printk(KERN_WARNING + "set_rtc_mmss: can't update from %d to %d\n", + cmos_minutes, real_minutes); + retval = -1; + } + + /* The following flags have to be released exactly in this order, + * otherwise the DS12887 (popular MC146818A clone with integrated + * battery and quartz) will not reset the oscillator and will not + * update precisely 500 ms later. You won't find this mentioned in + * the Dallas Semiconductor data sheets, but who believes data + * sheets anyway ... -- Markus Kuhn + */ + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + + return retval; +} + +static unsigned long x86_get_cmos_time(void) +{ + unsigned int year, mon, day, hour, min, sec; + int i; + + /* The Linux interpretation of the CMOS clock register contents: + * When the Update-In-Progress (UIP) flag goes from 1 to 0, the + * RTC registers show the second which has precisely just started. + * Let's hope other operating systems interpret the RTC the same way. + */ + /* read RTC exactly on falling edge of update flag */ + for (i = 0; i < 1000000; i++) /* may take up to 1 second... */ + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) + break; + for (i = 0; i < 1000000; i++) /* must try at least 2.228 ms */ + if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) + break; + do { /* Isn't this overkill ? UIP above should guarantee consistency */ + sec = CMOS_READ(RTC_SECONDS); + min = CMOS_READ(RTC_MINUTES); + hour = CMOS_READ(RTC_HOURS); + day = CMOS_READ(RTC_DAY_OF_MONTH); + mon = CMOS_READ(RTC_MONTH); + year = CMOS_READ(RTC_YEAR); + } while (sec != CMOS_READ(RTC_SECONDS)); + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + sec = bcd2bin(sec); + min = bcd2bin(min); + hour = bcd2bin(hour); + day = bcd2bin(day); + mon = bcd2bin(mon); + year = bcd2bin(year); + } + if ((year += 1900) < 1970) + year += 100; + + return mktime(year, mon, day, hour, min, sec); +} + +/* + * Everything since E2+ uses /dev/rtc0 interface. + */ +static int iohub_rtc_set_mmss(unsigned long nowtime) +{ + struct rtc_device *rtc; + struct rtc_time tm; + int ret; + + rtc_time64_to_tm(nowtime, &tm); + rtc = rtc_class_open("rtc0"); + if (rtc == NULL) + return -1; + +#if defined(CONFIG_SCLKR_CLOCKSOURCE) + if (strcmp(curr_clocksource->name, "sclkr") == 0 && + sclkr_mode == SCLKR_RTC) { + timekeeping_notify(<_cs); + ret = rtc_set_time(rtc, &tm); + timekeeping_notify(&clocksource_sclkr); + } else { + ret = rtc_set_time(rtc, &tm); + } +#else + ret = rtc_set_time(rtc, &tm); +#endif + rtc_class_close(rtc); + + return ret; +} + +static unsigned long iohub_rtc_get_time(void) +{ + struct rtc_time tm; + struct rtc_device *rtc = rtc_class_open("rtc0"); + unsigned long time; + int ret; + + rtc = rtc_class_open("rtc0"); + if (!rtc) + return 0; + ret = rtc_read_time(rtc, &tm); + rtc_class_close(rtc); + if (ret) + return 0; + ret = rtc_tm_to_time(&tm, &time); + if (ret) + return 0; + return time; +} + +void __init native_clock_init(void) +{ + int nid; + + if (HAS_MACHINE_E2K_IOHUB) { + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->set_wallclock = + &iohub_rtc_set_mmss; + the_node_machine(nid)->get_wallclock = + &iohub_rtc_get_time; + } + } else { + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->set_wallclock = + &x86_set_rtc_mmss; + the_node_machine(nid)->get_wallclock = + &x86_get_cmos_time; + } + } +} + +int update_persistent_clock(struct timespec now) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&rtc_lock, flags); + ret = mach_set_wallclock(now.tv_sec); + spin_unlock_irqrestore(&rtc_lock, flags); + + return ret; +} + +void read_persistent_clock(struct timespec *ts) +{ + unsigned long retval, flags; + + spin_lock_irqsave(&rtc_lock, flags); + retval = mach_get_wallclock(); + spin_unlock_irqrestore(&rtc_lock, flags); + + ts->tv_sec = retval; + ts->tv_nsec = 0; +} + +static struct resource rtc_resources[] = { + [0] = { + .start = RTC_PORT(0), + .end = RTC_PORT(1), + .flags = IORESOURCE_IO, + }, + [1] = { + .start = RTC_IRQ, + .end = RTC_IRQ, + .flags = IORESOURCE_IRQ, + } +}; + +static struct platform_device rtc_device = { + .name = "rtc_cmos", + .id = -1, + .resource = rtc_resources, + .num_resources = ARRAY_SIZE(rtc_resources), +}; + +static __init int add_rtc_cmos(void) +{ + /* Everything since E2C+ uses SPI rtc clocks. */ + if (HAS_MACHINE_E2K_IOHUB) + return 0; + +#ifdef CONFIG_PNP + static const char *ids[] __initconst = + { "PNP0b00", "PNP0b01", "PNP0b02", }; + struct pnp_dev *dev; + struct pnp_id *id; + int i; + + pnp_for_each_dev(dev) { + for (id = dev->id; id; id = id->next) { + for (i = 0; i < ARRAY_SIZE(ids); i++) { + if (compare_pnp_id(id, ids[i]) != 0) + return 0; + } + } + } +#endif + + platform_device_register(&rtc_device); + dev_info(&rtc_device.dev, + "registered platform RTC device (no PNP device found)\n"); + + return 0; +} +device_initcall(add_rtc_cmos); diff --git a/arch/e2k/kernel/sclkr.c b/arch/e2k/kernel/sclkr.c new file mode 100644 index 000000000000..138a32bacfdb --- /dev/null +++ b/arch/e2k/kernel/sclkr.c @@ -0,0 +1,531 @@ +/* + * arch/e2k/kernel/sclkr.c + * + * This file contains implementation of sclkr clocksource. + * + * Copyright (C) MCST 2015 Leonid Ananiev (leoan@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* #define SET_SCLKR_TIME1970 */ +#define SCLKR_CHECKUP 1 + +#define SCLKR_LO 0xffffffff + +/* For kernel 4.9: */ +#define READ_SSCLKR_REG() READ_SCLKR_REG() +#define READ_SSCLKM1_REG() READ_SCLKM1_REG() +#define READ_SSCLKM2_REG() READ_SCLKM2_REG() +#define READ_SSCLKM3_REG() READ_SCLKM3_REG() +#define WRITE_SSCLKR_REG(val) WRITE_SCLKR_REG(val) +#define WRITE_SSCLKM1_REG(val) WRITE_SCLKM1_REG(val) +#define WRITE_SSCLKM2_REG(val) WRITE_SCLKM2_REG(val) +#define WRITE_SSCLKM3_REG(val) WRITE_SCLKM3_REG(val) +#define READ_SCURRENT_REG() READ_CURRENT_REG() +#define sclkr_clocksource_register() \ + __clocksource_register(&clocksource_sclkr) + +long long sclkr_sched_offset = 0; +int sclkr_initialized = 0; +#ifdef DEBUG_SCLKR_FREQ +static int rtc_wr_sec = 0; +#endif + +static DEFINE_MUTEX(sclkr_set_lock); /* for /proc/sclkr_src */ +#ifdef DEBUG_SCLKR_FREQ +static DEFINE_PER_CPU(u64, prev_freq); +static DEFINE_PER_CPU(u64, freq_print) = 0; +#endif + +u64 basic_freq_hz = 1; /* 1 means there was not call to basic_freq_setup() + * and will be used hardware setting */ +int __init basic_freq_setup(char *str) +{ + if (!str) + return 0; + basic_freq_hz = simple_strtoul(str, &str, 0); +#ifdef DEBUG_SCLKR_FREQ + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(prev_freq, cpu) = basic_freq_hz; +#endif + return 1; +} +__setup("sclkr_hz=", basic_freq_setup); + +static int do_watch4sclkr = 0; +static int __init set_watch4sclkr(char *str) +{ + do_watch4sclkr = 1; + return 1; +} +__setup("watch_sclkr", set_watch4sclkr); + +/* Use an aligned structure to make it occupy a whole cache line */ +struct prev_sclkr prev_sclkr = { ATOMIC64_INIT(0) }; + +/* exponential moving average of frequency */ +DEFINE_PER_CPU(int, ema_freq); +#define OSCIL_JIT_SHFT 10 +notrace +static u64 read_sclkr(struct clocksource *cs) +{ + u64 sclkr_sec, sclkr, freq, res; + e2k_sclkm1_t sclkm1; + unsigned long flags; +#ifdef DEBUG_SCLKR_FREQ + u64 this_prev_freq; + + this_prev_freq = __this_cpu_read(prev_freq); +#endif + raw_all_irq_save(flags); + sclkr = READ_SSCLKR_REG(); + sclkm1 = READ_SSCLKM1_REG(); + sclkr_sec = sclkr >> 32; + freq = sclkm1.div; + + if (unlikely(sclkr_mode != SCLKR_INT && !sclkm1.mode || + !sclkm1.sw || !freq)) { + pr_alert("WARNING: sclkr clocksource error.\n" + "CPU%02d sclkr=.%09lld, freq=%llu Hz, sclkm1=0x%llx, sclkr_mode=%d\n" + "There is no PulsePerSecond signal.\n" + "Set sclkr=no in cmdline\n", + raw_smp_processor_id(), (u64) (u32) sclkr, freq, + AW(sclkm1), sclkr_mode); + panic("read_sclkr: ERROR"); + } +#ifdef DEBUG_SCLKR_FREQ + if (unlikely(abs(this_prev_freq - freq) > + (this_prev_freq >> OSCIL_JIT_SHFT))) { + if (abs(freq - __this_cpu_read(freq_print)) > 2 && + /* write to RTC may change PPS phase */ + rtc_wr_sec != sclkr_sec && + (rtc_wr_sec + 1) != sclkr_sec) { + __this_cpu_write(freq_print, freq); + pr_err("CPU %d SCLKR ERROR freq(div)= %llu prev=%llu rtcwr=%d sec=%lld\n", + raw_smp_processor_id(), freq, this_prev_freq, + rtc_wr_sec, sclkr_sec); + } + + freq = basic_freq_hz; + } + __this_cpu_write(prev_freq, freq); +#endif + res = sclkr_to_ns(sclkr, freq); + raw_all_irq_restore(flags); + return res; +} + +notrace +u64 raw_read_sclkr(void) +{ + u64 sclkr, freq, res; + unsigned long flags; + e2k_sclkm1_t sclkm1; +#ifdef DEBUG_SCLKR_FREQ + u64 this_prev_freq; + + this_prev_freq = __this_cpu_read(prev_freq); +#endif + raw_all_irq_save(flags); + + sclkr = READ_SSCLKR_REG(); + sclkm1 = READ_SSCLKM1_REG(); + freq = sclkm1.div; + + if (unlikely(!freq)) { + raw_all_irq_restore(flags); + return 0; + } + +#ifdef DEBUG_SCLKR_FREQ + if (unlikely(abs(this_prev_freq - freq) > + (this_prev_freq >> OSCIL_JIT_SHFT))) + freq = basic_freq_hz; + __this_cpu_write(prev_freq, freq); +#endif + res = sclkr_to_ns(sclkr, freq); + raw_all_irq_restore(flags); + return res; +} +static void sclk_set_range(void *range) +{ + WRITE_SSCLKM2_REG((unsigned long long) range); +} + +static void resume_sclkr(struct clocksource *clocksource) +{ + if (strcmp(curr_clocksource->name, "lt") == 0 && + (sclkr_mode == SCLKR_RTC || sclkr_mode == SCLKR_EXT)) { + if (timekeeping_notify(&clocksource_sclkr)) { + pr_warn("resume_sclkr: can't set sclkr clocksourse\n"); + } + } +} + +#define SCLK_CSOUR_SHFT 20 +/* ns = (cyc * mult) >> shift + * for sclkr cyc==ns then 1 = (1 * mult) >> shift */ +struct clocksource clocksource_sclkr = { + .name = "sclkr", + .rating = 400, + .read = read_sclkr, + .resume = resume_sclkr, + .mask = CLOCKSOURCE_MASK(64 - SCLK_CSOUR_SHFT), + .shift = SCLK_CSOUR_SHFT, + .mult = 1 << SCLK_CSOUR_SHFT, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + + +static void sclkr_set_mode(void *arg) +{ + e2k_sclkm1_t sclkm1 = READ_SSCLKM1_REG(); + WRITE_SSCLKM1_REG((e2k_sclkm1_t) (u64) arg); + if (sclkm1.sclkm3) { + WRITE_SSCLKM3_REG(0); + } +} + +#ifdef SCLKR_CHECKUP +static int sclkr_sec_cpu[NR_CPUS]; +static void sclkr_read_sec(void *arg) +{ + sclkr_sec_cpu[raw_smp_processor_id()] = READ_SSCLKR_REG() >> 32; +} +#endif + +/* Set allowable deviation of frequency in % */ +void sclk_set_deviat(int dev) +{ + unsigned long long range; + unsigned int freq, d_freq; + + /* freq >> 7 -- allowable freq error is 0.01 */ + freq = READ_SSCLKM1_REG().div; + d_freq = freq * 100 / dev; + range = ((unsigned long long)(freq + d_freq) << 32) | + (freq - d_freq); + sclk_set_range((void *)range); + smp_call_function(sclk_set_range, (void *)range, 1); +} + +/* watch for cogerence of SCLKRs in each cpu */ +static long long diff_tod_sclkr = 0; +int watch4sclkr(void *arg) +{ + struct timespec64 ts; + u64 sclkr_time; + long long gtod_time; + unsigned long flags; + + while (1) { + local_irq_save(flags); + ktime_get_real_ts64(&ts); + sclkr_time = clocksource_sclkr.read(&clocksource_sclkr); + local_irq_restore(flags); + gtod_time = ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; + if (diff_tod_sclkr == 0) + diff_tod_sclkr = gtod_time - sclkr_time; + if (abs(diff_tod_sclkr - (gtod_time - sclkr_time)) > 10000) + pr_warn("cpu%02u %lld gtod-sclkr= %lld\n", + raw_smp_processor_id(), ts.tv_sec, + gtod_time - sclkr_time); + schedule_timeout_interruptible(600 * HZ); + } + return 0; +} + +static void check_training_finished(void *arg) +{ + bool *finished = arg; + e2k_sclkm1_t sclkm1 = READ_SSCLKM1_REG(); + if (sclkm1.trn || !sclkm1.mode) + *finished = false; +} + +static bool is_training_finished_all_cpus(void) +{ + bool finished = true; + on_each_cpu(check_training_finished, (void *) &finished, 1); + return finished; +} + +static bool wait_for_cleared_trn(const long max_timeout) +{ + const int single_wait = 10; + int waited = 0; + while (!is_training_finished_all_cpus() && waited < max_timeout) { + schedule_timeout_uninterruptible(single_wait); + waited += single_wait; + } + return !is_training_finished_all_cpus(); +} + +noinline int sclk_register(void *new_sclkr_src_arg) +{ + long new_sclkr_mode = (long)new_sclkr_src_arg; + unsigned int sclkr_lo, sclkr_lo_swch; + unsigned int freq, safe_lo, safe_lo2; + unsigned long long range, sclkr_all; + e2k_sclkm1_t sclkm1; + struct task_struct *sclkr_w_thread; + struct timespec64 ts; + unsigned long flags; + int cpu; + const long max_timeout = 3 * HZ; + bool timedout; + + /* Make sure this kthread and suspend/resume do not run simultaneously */ + set_freezable(); + + if (basic_freq_hz == 1) { /* was not call to basic_freq_setup() */ + if (is_prototype()) { + basic_freq_hz = 1000000; + pr_notice("sclkr: PROTOTYPE DETECTED, SETTING FREQUENCY TO %llu HZ\n", + basic_freq_hz); + } else if (IS_MACHINE_E1CP) { + /* e1c+ has wrong frequency in sclkm1.div */ + basic_freq_hz = 100000000; + } else { + basic_freq_hz = READ_SSCLKM1_REG().div; + } + } + sclkm1 = (e2k_sclkm1_t) { .mdiv = 1, .div = basic_freq_hz }; + WRITE_SSCLKM1_REG(sclkm1); /* SCLKR_INT */ + pr_info("sclk_register old mod %d new %ld basic_fr_hz=%lld m1=%llx\n", + sclkr_mode, new_sclkr_mode, basic_freq_hz, + READ_SSCLKM1_REG().word); + for_each_possible_cpu(cpu) + per_cpu(ema_freq, cpu) = basic_freq_hz; +#ifdef DEBUG_SCLKR_FREQ + for_each_possible_cpu(cpu) + per_cpu(prev_freq, cpu) = basic_freq_hz; +#endif + mutex_lock(&sclkr_set_lock); + if (new_sclkr_mode == SCLKR_NO) { + strcpy(sclkr_src, "no"); + sclkr_mode = SCLKR_NO; + mutex_unlock(&sclkr_set_lock); + if (timekeeping_notify(<_cs)) + pr_warn("can't set lt clocksourse\n"); + return -1; + } + + /* All sclkr in cpu cores in a single processor are synchronous. */ + /* or = (bootblock_virt->info.bios.mb_type == MB_TYPE_ES4_PC401); */ + if (new_sclkr_mode == SCLKR_INT) { + sclkm1 = READ_SSCLKM1_REG(); + if (sclkm1.mode) { + /* Work around E16C/E8C Bug 120921 - sclkm1.div renew + * is missed while mode is chenged ext->int. + * Write in sclkm1.mode when far from second change */ + freq = sclkm1.div; + safe_lo = (freq >> 2) + (freq >> 3); + safe_lo2 = freq - safe_lo; + sclkr_lo = READ_SSCLKR_REG() & SCLKR_LO; + while (sclkr_lo < safe_lo || sclkr_lo > safe_lo2) { + cpu_relax(); + sclkr_lo = READ_SSCLKR_REG() & SCLKR_LO; + } + } + pr_info("sclkr clocksource registration at internal mode\n"); + sclkm1 = (e2k_sclkm1_t) { .sw = 1, .mdiv = 1, + .div = basic_freq_hz }; + /* .mode = 0 -- internel */ + on_each_cpu(sclkr_set_mode, (void *) AW(sclkm1), 1); + strcpy(sclkr_src, "int"); + sclkr_mode = SCLKR_INT; + sclkr_sched_offset = sched_clock() - raw_read_sclkr(); + /* sclkr_initialized should be set after sclkr_sched_offset */ + smp_wmb(); + sclkr_initialized = 1; + sclkr_clocksource_register(); + mutex_unlock(&sclkr_set_lock); + pr_info("sclk_register set to int mode, %%sclkm1.div=0x%x " + "(%d Mhz)\n", + READ_SSCLKM1_REG().div, + (READ_SSCLKM1_REG().div + 1) / 1000000); + if (do_watch4sclkr) { + if (num_online_nodes() >= 1) { + for_each_online_cpu(cpu) { + sclkr_w_thread = kthread_create(watch4sclkr, + NULL, "watch4sclkr/%d", cpu); + if (WARN_ON(!sclkr_w_thread)) { + pr_cont("kthread_create(watch4sclkr) " + "FAILED\n"); + } + kthread_bind(sclkr_w_thread, cpu); + wake_up_process(sclkr_w_thread); + } + } + } + return 0; + } /* new_sclkr_mode == SCLKR_INT */ + + /* FIXME add call register_cpu_notifier() for cpu hotplug case */ + + /* Next for new_sclkr_mode == SCLKR_EXT or SCLKR_RTC */ + + raw_all_irq_save(flags); + freq = basic_freq_hz; + safe_lo = (freq >> 2) + (freq >> 3) + (freq >> 4) + (freq >> 5); /* 46% reserve */ + safe_lo2 = freq - safe_lo; + pr_err("sclkr INFO safe_lo=%u %u fr=%u div=%u bas=%llu\n", + safe_lo, safe_lo2, freq, READ_SSCLKM1_REG().div, basic_freq_hz); + + /* We want to be far from beginning of internal second. + * So different processors will not appear on the different + * parties of seconds border while switching to extrnal. + */ + sclkr_lo = READ_SSCLKR_REG() & SCLKR_LO; + while (sclkr_lo < safe_lo || sclkr_lo > safe_lo2) { + cpu_relax(); + sclkr_lo = READ_SSCLKR_REG() & SCLKR_LO; + } + sclkr_lo_swch = sclkr_lo; + + /* Wait for first external signal of second biginig and look at + * sclkm1.div (prvious sclkr_lo) to see how far from external + * second bigining the swinching was. */ + + raw_all_irq_restore(flags); + /* .mode = 1 -- for RTC or externel sync */ + sclkm1 = (e2k_sclkm1_t) { .sw = 1, .trn = 1, .mode = 1 }; + + /* Hardware won't clear 'trn' bit if CPU clock is disabled + * so we pause cpuidle until sclkr initialization completes. */ + cpuidle_pause_and_lock(); + on_each_cpu(sclkr_set_mode, (void *) AW(sclkm1), 1); + pr_info("Set sclkm1.mode=1 done in all CPUs sclkr=%lld.%09llu sec, last sclkm1.div=%d\n" + "Waiting for sclkm1.trn==0\n", + READ_SSCLKR_REG() >> 32, + ((u64) READ_SSCLKR_REG() & SCLKR_LO) * NSEC_PER_SEC / freq, + READ_SSCLKM1_REG().div); + /* SCLKR synchronized by RTC is for monotonic time coherent across CPUs + * It may leap due to hwclock command */ + if (new_sclkr_mode != SCLKR_RTC) { + /* freq >> 7 -- allowable freq error is 0.01 */ + range = ((unsigned long long)(freq + (freq >> 7)) << 32) | + (freq - (freq >> 7)); + sclk_set_range((void *)range); + smp_call_function(sclk_set_range, (void *)range, 1); + } + mutex_unlock(&sclkr_set_lock); + + timedout = wait_for_cleared_trn(max_timeout); + cpuidle_resume_and_unlock(); + if (timedout) + goto sclkr_no; + + sclkr_all = READ_SSCLKR_REG(); + sclkr_lo = sclkr_all & SCLKR_LO; + freq = READ_SSCLKM1_REG().div; + ktime_get_real_ts64(&ts); + pr_info("sclkr clocksource registration at cpu %d " + "sclkr=%lld.%09llu sec, getnstod =%lld.%09ld " + "fr=%u Hz, ext=%d swOK=%d range= %lld:%lld\n", + raw_smp_processor_id(), sclkr_all >> 32, + (unsigned long long)sclkr_lo * NSEC_PER_SEC / freq, + ts.tv_sec, ts.tv_nsec, + freq, READ_SSCLKM1_REG().mode, READ_SSCLKM1_REG().sw, + READ_SSCLKM2_REG() & 0xffffffff, + READ_SSCLKM2_REG() >> 32); +#ifdef SCLKR_CHECKUP + { + int cpu, cpu_cur = raw_smp_processor_id(); + while (sclkr_lo < safe_lo || sclkr_lo > safe_lo2) { + cpu_relax(); + sclkr_lo = READ_SSCLKR_REG() & SCLKR_LO; + } + on_each_cpu(sclkr_read_sec, NULL, 1); + for_each_online_cpu(cpu) + if (sclkr_sec_cpu[cpu] != sclkr_sec_cpu[cpu_cur]) { + pr_err("sclkr FAIL seconds on cpu%d =%d" + " is differ from cpu%d =%d\n", + cpu, sclkr_sec_cpu[cpu], + cpu_cur, sclkr_sec_cpu[cpu_cur]); + return -1; + } + } +#endif +#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK + set_sched_clock_stable(); +#endif + mutex_lock(&sclkr_set_lock); + sclkr_sched_offset = sched_clock() - raw_read_sclkr(); + /* sclkr_initialized should be set after sclkr_sched_offset */ + smp_wmb(); + sclkr_initialized = 1; + sclkr_clocksource_register(); + sclkr_mode = new_sclkr_mode; + if (new_sclkr_mode == SCLKR_RTC) + strcpy(sclkr_src, "rtc"); + if (new_sclkr_mode == SCLKR_EXT) + strcpy(sclkr_src, "ext"); + if (new_sclkr_mode == SCLKR_INT) + strcpy(sclkr_src, "int"); + mutex_unlock(&sclkr_set_lock); + return 0; +sclkr_no: + do { + pr_err("There is no pulse per second signal from RTC during %ld " + " secs, tell your hw vendor.\n" + "As a temporary workaround you can try setting " + "\"sclkr=int nohlt\" in kernel cmdline " + "on a single-socket system\n" + "and \"sclkr=no\" on a multi-socket system.\n" + "If RTC is not ticking then set the time in boot.\n" + "sclkm1=0x%llx (sw=%d, trn=%d mode=%d mdiv=%d " + "div or freq =%d )\n" + "sclkm2= 0x%llx safe_lo=%u=%llu%% basic_freq_hz=%lld " + "sclkr_lo_swch=%d\n", max_timeout, + READ_SSCLKM1_REG().word, READ_SSCLKM1_REG().sw, + READ_SSCLKM1_REG().trn, READ_SSCLKM1_REG().mode, + READ_SSCLKM1_REG().mdiv, READ_SSCLKM1_REG().div, + READ_SSCLKM2_REG(), + safe_lo, (long long)safe_lo * 100 / freq, + basic_freq_hz, sclkr_lo_swch); + schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); + } while (1); + return -1; +} +EXPORT_SYMBOL(sclk_register); + +static int __init sclkr_init(void) +{ + int cpu = raw_smp_processor_id(); + struct task_struct *k; + + if (machine.native_iset_ver < E2K_ISET_V3 || + sclkr_mode == -1 || + sclkr_mode == SCLKR_RTC || + sclkr_mode == SCLKR_NO) + return 0; + + k = kthread_create_on_cpu(sclk_register, (void *) SCLKR_INT, + cpu, "sclkregister"); + if (IS_ERR(k)) { + pr_err("Failed to start sclk register thread, error: %ld\n", + PTR_ERR(k)); + return PTR_ERR(k); + } + wake_up_process(k); + + return 0; +} +arch_initcall(sclkr_init); diff --git a/arch/e2k/kernel/sec_space.c b/arch/e2k/kernel/sec_space.c new file mode 100644 index 000000000000..7ce5d0639652 --- /dev/null +++ b/arch/e2k/kernel/sec_space.c @@ -0,0 +1,166 @@ +/* + * arch/e2k/kernel/sec_space.c + * + * Secondary space support for E2K binary compiler + * + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_SS_MODE +#undef DebugSS +#define DEBUG_SS_MODE 0 /* Secondary Space Debug */ +#define DebugSS(...) DebugPrint(DEBUG_SS_MODE ,##__VA_ARGS__) + +void set_upt_sec_ad_shift_dsbl(void *arg) +{ + unsigned long flags; + e2k_cu_hw0_t cu_hw0; + + raw_all_irq_save(flags); + cu_hw0 = READ_CU_HW0_REG(); + cu_hw0.upt_sec_ad_shift_dsbl = (arg) ? 1 : 0; + WRITE_CU_HW0_REG(cu_hw0); + raw_all_irq_restore(flags); +} + +s64 sys_el_binary(s64 work, s64 arg2, s64 arg3, s64 arg4) +{ + s64 res = -EINVAL; + thread_info_t *ti = current_thread_info(); + + if (!TASK_IS_BINCO(current)) { + pr_info("Task %d is not binary compiler\n", current->pid); + return res; + } + + switch (work) { + case GET_SECONDARY_SPACE_OFFSET: + DebugSS("GET_SECONDARY_SPACE_OFFSET: 0x%lx\n", SS_ADDR_START); + res = SS_ADDR_START; + break; + case SET_SECONDARY_REMAP_BOUND: + DebugSS("SET_SECONDARY_REMAP_BOUND: bottom = 0x%llx\n", arg2); + ti->ss_rmp_bottom = arg2 + SS_ADDR_START; + res = 0; + break; + case SET_SECONDARY_DESCRIPTOR: + /* arg2 - descriptor # ( 0-CS, 1-DS, 2-ES, 3-SS, 4-FS, 5-GS ) + * arg3 - desc.lo + * arg4 - desc.hi + */ + DebugSS("SET_SECONDARY_DESCRIPTOR: desc #%lld, desc.lo = " + "0x%llx, desc.hi = 0x%llx\n", + arg2, arg3, arg4); + res = 0; + switch (arg2) { + case CS_SELECTOR: + WRITE_CS_LO_REG_VALUE(I32_ADDR_TO_E2K(arg3)); + WRITE_CS_HI_REG_VALUE(arg4); + break; + case DS_SELECTOR: + WRITE_DS_LO_REG_VALUE(I32_ADDR_TO_E2K(arg3)); + WRITE_DS_HI_REG_VALUE(arg4); + break; + case ES_SELECTOR: + WRITE_ES_LO_REG_VALUE(I32_ADDR_TO_E2K(arg3)); + WRITE_ES_HI_REG_VALUE(arg4); + break; + case SS_SELECTOR: + WRITE_SS_LO_REG_VALUE(I32_ADDR_TO_E2K(arg3)); + WRITE_SS_HI_REG_VALUE(arg4); + break; + case FS_SELECTOR: + WRITE_FS_LO_REG_VALUE(I32_ADDR_TO_E2K(arg3)); + WRITE_FS_HI_REG_VALUE(arg4); + break; + case GS_SELECTOR: + WRITE_GS_LO_REG_VALUE(I32_ADDR_TO_E2K(arg3)); + WRITE_GS_HI_REG_VALUE(arg4); + break; + default: + DebugSS("Invalid descriptor #%lld\n", arg2); + res = -EINVAL; + } + break; + case GET_SNXE_USAGE: + DebugSS("GET_SNXE_USAGE\n"); + res = (machine.native_iset_ver >= E2K_ISET_V5) ? 1 : 0; + break; + case SIG_EXIT_GROUP: + arg2 = arg2 & 0xff7f; + DebugSS("SIG_EXIT_GROUP: code = 0x%llx\n", arg2); + do_group_exit(arg2); + BUG(); + break; + case SET_RP_BOUNDS_AND_IP: + DebugSS("SET_RP_BOUNDS_AND_IP: start=0x%llx, end=0x%llx, " + "IP=0x%llx\n", + arg2, arg3, arg4); + ti->rp_start = arg2; + ti->rp_end = arg3; + ti->rp_ret_ip = arg4; + res = 0; + break; + case SET_SECONDARY_64BIT_MODE: + if (arg2 == 1) { + current->thread.flags |= E2K_FLAG_64BIT_BINCO; + res = 0; + } + break; + case GET_PROTOCOL_VERSION: + DebugSS("GET_PROTOCOL_VERSION: %d\n", + BINCO_PROTOCOL_VERSION); + res = BINCO_PROTOCOL_VERSION; + break; + case SET_IC_NEED_FLUSH_ON_SWITCH: + DebugSS("SET_IC_NEED_FLUSH_ON_SWITCH: set = %lld\n", arg2); + if (arg2) + ti->last_ic_flush_cpu = smp_processor_id(); + else + ti->last_ic_flush_cpu = -1; + res = 0; + break; + case SET_UPT_SEC_AD_SHIFT_DSBL: + DebugSS("SET_UPT_AEC_AD_SHIFT_DSBL: set = %lld\n", arg2); + res = -EPERM; + if (machine.native_iset_ver >= E2K_ISET_V6) { + on_each_cpu(set_upt_sec_ad_shift_dsbl, (void *)arg2, 1); + res = 0; + } + break; + case GET_UPT_SEC_AD_SHIFT_DSBL: + DebugSS("SET_UPT_AEC_AD_SHIFT_DSBL\n"); + res = -EPERM; + if (machine.native_iset_ver >= E2K_ISET_V6) { + e2k_cu_hw0_t cu_hw0 = READ_CU_HW0_REG(); + res = cu_hw0.upt_sec_ad_shift_dsbl; + } + break; + default: + DebugSS("Invalid work: #%lld\n", work); + break; + } + + return res; +} + +static __init int check_ss_addr(void) +{ + WARN(SS_ADDR_END > USER_HW_STACKS_BASE, + "Secondary space crosses hardware stacks area!\n"); + + return 0; +} +late_initcall(check_ss_addr); + diff --git a/arch/e2k/kernel/setup.c b/arch/e2k/kernel/setup.c new file mode 100644 index 000000000000..c8b25a8a12f9 --- /dev/null +++ b/arch/e2k/kernel/setup.c @@ -0,0 +1,1205 @@ +/* + * + * Architecture-specific setup. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SOFTWARE_SWAP_TAGS +#include +#endif +#include +#include +#include + +#include +#include +#include +#ifdef CONFIG_OF +#include +#endif + +/* For PCI or other memory-mapped resources */ +unsigned long pci_mem_start = 0x80000000; + +#undef DEBUG_PROCESS_MODE +#undef DebugP +#define DEBUG_PROCESS_MODE 0 /* processes */ +#define DebugP(...) DebugPrint(DEBUG_PROCESS_MODE ,##__VA_ARGS__) + +#undef DEBUG_PER_CPU_MODE +#undef DebugPC +#define DEBUG_PER_CPU_MODE 0 /* per CPU data */ +#define DebugPC(...) DebugPrint(DEBUG_PER_CPU_MODE ,##__VA_ARGS__) + +#undef DEBUG_SPRs_MODE +#define DEBUG_SPRs_MODE 0 /* stack pointers registers */ + +/* cpu_data[boot_cpu_physical_apicid] is data for the bootstrap processor: */ +cpuinfo_e2k_t cpu_data[NR_CPUS]; +EXPORT_SYMBOL(cpu_data); + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * Used to pass parameters into the kernel like root=/dev/sda1, etc. + */ +static char command_line[COMMAND_LINE_SIZE]; + +struct resource standard_io_resources[] = { + { 0x00, 0x1f, "dma1", IORESOURCE_BUSY }, + { 0x20, 0x3f, "pic1", IORESOURCE_BUSY }, + { 0x40, 0x5f, "timer", IORESOURCE_BUSY }, + { 0x60, 0x6f, "keyboard", IORESOURCE_BUSY }, + { 0x80, 0x8f, "dma page reg", IORESOURCE_BUSY }, + { 0xa0, 0xbf, "pic2", IORESOURCE_BUSY }, + { 0xc0, 0xdf, "dma2", IORESOURCE_BUSY } +}; + +#define MACH_TYPE_NAME_UNKNOWN 0 +#define MACH_TYPE_NAME_ES2_DSP 1 +#define MACH_TYPE_NAME_ES2_RU 2 +#define MACH_TYPE_NAME_E2S 3 +#define MACH_TYPE_NAME_E8C 4 +#define MACH_TYPE_NAME_E1CP 5 +#define MACH_TYPE_NAME_E8C2 6 +#define MACH_TYPE_NAME_E12C 7 +#define MACH_TYPE_NAME_E16C 8 +#define MACH_TYPE_NAME_E2C3 9 + +/* + * Machine type names. + * Machine name can be retrieved from /proc/cpuinfo as model name. + */ +static const char const *native_cpu_type_name[] = { + "unknown", + "e2c+", + "e2c", + "e2s", + "e8c", + "e1c+", + "e8c2", + "e12c", + "e16c", + "e2c3", +}; +static const char const *native_mach_type_name[] = { + "unknown", + "Elbrus-e2k-e2c+", + "Elbrus-e2k-e2c", + "Elbrus-e2k-e2s", + "Elbrus-e2k-e8c", + "Elbrus-e2k-e1c+", + "Elbrus-e2k-e8c2", + "Elbrus-e2k-e12c", + "Elbrus-e2k-e16c", + "Elbrus-e2k-e2c3", +}; +const char *e2k_get_cpu_type_name(int mach_type_id) +{ + return native_cpu_type_name[mach_type_id]; +} +const char *e2k_get_mach_type_name(int mach_type_id) +{ + return native_mach_type_name[mach_type_id]; +} +int e2k_get_machine_type_name(int mach_id) +{ + int mach_type; + + switch (mach_id) { +#if CONFIG_E2K_MINVER == 2 + case MACHINE_ID_ES2_DSP_LMS: + case MACHINE_ID_ES2_DSP: + mach_type = MACH_TYPE_NAME_ES2_DSP; + break; + case MACHINE_ID_ES2_RU_LMS: + case MACHINE_ID_ES2_RU: + mach_type = MACH_TYPE_NAME_ES2_RU; + break; +#endif +#if CONFIG_E2K_MINVER <= 3 + case MACHINE_ID_E2S_LMS: + case MACHINE_ID_E2S: + mach_type = MACH_TYPE_NAME_E2S; + break; +#endif +#if CONFIG_E2K_MINVER <= 4 + case MACHINE_ID_E8C_LMS: + case MACHINE_ID_E8C: + mach_type = MACH_TYPE_NAME_E8C; + break; + case MACHINE_ID_E1CP_LMS: + case MACHINE_ID_E1CP: + mach_type = MACH_TYPE_NAME_E1CP; + break; +#endif +#if CONFIG_E2K_MINVER <= 5 + case MACHINE_ID_E8C2_LMS: + case MACHINE_ID_E8C2: + mach_type = MACH_TYPE_NAME_E8C2; + break; +#endif +#if CONFIG_E2K_MINVER <= 6 + case MACHINE_ID_E12C_LMS: + case MACHINE_ID_E12C: + mach_type = MACH_TYPE_NAME_E12C; + break; + case MACHINE_ID_E16C_LMS: + case MACHINE_ID_E16C: + mach_type = MACH_TYPE_NAME_E16C; + break; + case MACHINE_ID_E2C3_LMS: + case MACHINE_ID_E2C3: + mach_type = MACH_TYPE_NAME_E2C3; + break; +#endif /* CONFIG_E2K_MINVER */ + default: + panic("setup_arch(): !!! UNKNOWN MACHINE TYPE !!!"); + mach_type = MACH_TYPE_NAME_UNKNOWN; + break; + } + return mach_type; +} + +/* + * Native mach_type_id variable is set in setup_arch() function. + */ +static int native_mach_type_id = MACH_TYPE_NAME_UNKNOWN; + +/* + * Function to get name of machine type. + * Must be used after setup_arch(). + */ +static const char *native_get_cpu_type_name(void) +{ + return e2k_get_cpu_type_name(native_mach_type_id); +} +const char *native_get_mach_type_name(void) +{ + return e2k_get_mach_type_name(native_mach_type_id); +} + +void native_set_mach_type_id(void) +{ + native_mach_type_id = e2k_get_machine_type_name(machine.native_id); + if (native_mach_type_id == MACH_TYPE_NAME_UNKNOWN) { + pr_err("%s(): unknown the machine type name\n", + __func__); + machine.setup_arch = NULL; + } +} + +void native_print_machine_type_info(void) +{ + const char *cpu_type = "?????????????"; + + cpu_type = native_get_cpu_type_name(); + pr_cont("NATIVE MACHINE TYPE: %s %s %s %s, ID %04x, REVISION: %03x, " + "ISET #%d", + cpu_type, + (NATIVE_HAS_MACHINE_E2K_DSP) ? "DSP" : "", + (NATIVE_HAS_MACHINE_E2K_IOHUB) ? "IOHUB" : "", + (NATIVE_IS_MACHINE_SIM) ? "LMS" : "", + native_machine_id, + machine.native_rev, machine.native_iset_ver); +} + +#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource)) + +machdep_t __nodedata machine = { 0 }; +EXPORT_SYMBOL(machine); + +#ifdef CONFIG_E2K_MACHINE +/* native_machine_id; is define in asm/e2k.h */ +#else /* ! CONFIG_E2K_MACHINE */ +__nodedata unsigned int native_machine_id = -1; +EXPORT_SYMBOL(native_machine_id); +#endif /* ! CONFIG_E2K_MACHINE */ + +unsigned long machine_serial_num = -1UL; +EXPORT_SYMBOL(machine_serial_num); + +int iohub_i2c_line_id = 0; +EXPORT_SYMBOL(iohub_i2c_line_id); + +static int __init iohub_i2c_line_id_setup(char *str) +{ + get_option(&str, &iohub_i2c_line_id); + if (iohub_i2c_line_id > 3) + iohub_i2c_line_id = 3; + else if (iohub_i2c_line_id <= 0) + iohub_i2c_line_id = 0; + return 1; +} +__setup("iohub_i2c_line_id=", iohub_i2c_line_id_setup); + +extern int __initdata max_iolinks; +extern int __initdata max_node_iolinks; + +static int __init +max_iolinks_num_setup(char *str) +{ + get_option(&str, &max_iolinks); + if (max_iolinks > MAX_NUMIOLINKS) + max_iolinks = MAX_NUMIOLINKS; + else if (max_iolinks <= 0) + max_iolinks = 1; + return 1; +} +__setup("iolinks=", max_iolinks_num_setup); + +static int __init +max_node_iolinks_num_setup(char *str) +{ + get_option(&str, &max_node_iolinks); + if (max_node_iolinks > NODE_NUMIOLINKS) + max_iolinks = NODE_NUMIOLINKS; + else if (max_node_iolinks <= 0) + max_node_iolinks = 1; + return 1; +} +__setup("nodeiolinks=", max_node_iolinks_num_setup); + +int eldsp_disable = 0; +EXPORT_SYMBOL(eldsp_disable); +static int __init +eldsp_disable_setup(char *str) +{ + eldsp_disable = 1; + if (IS_MACHINE_ES2) { + int nid; + e2k_pwr_mgr_t pwr_mgr; + + for_each_online_node(nid) { + pwr_mgr.E2K_PWR_MGR0_reg = + early_sic_read_node_nbsr_reg(nid, SIC_pwr_mgr); + pwr_mgr.E2K_PWR_MGR0_ic_clk = 0; + early_sic_write_node_nbsr_reg(nid, SIC_pwr_mgr, + pwr_mgr.E2K_PWR_MGR0_reg); + } + } + + return 1; +} +__setup("eldsp-off", eldsp_disable_setup); + + +#if defined (CONFIG_SMP) && defined (CONFIG_HAVE_SETUP_PER_CPU_AREA) +unsigned long __nodedata __per_cpu_offset[NR_CPUS]; +EXPORT_SYMBOL(__per_cpu_offset); + +# ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK +# ifdef CONFIG_NUMA +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + int distance = REMOTE_DISTANCE; + if (cpu_to_node(from) == cpu_to_node(to)) + distance = LOCAL_DISTANCE; + return distance; +} +# endif /* CONFIG_NUMA */ + +static void * __init pcpu_alloc_memblock(unsigned int cpu, unsigned long size, + unsigned long align) +{ + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +# ifdef CONFIG_NUMA + int node = cpu_to_node(cpu); + void *ptr; + + if (!node_online(node) || !NODE_DATA(node)) { + ptr = memblock_alloc_from(size, align, goal); + DebugPC("cpu %d has no node %d or node-local memory\n", + cpu, node); + DebugPC("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = memblock_alloc_try_nid(size, align, goal, + MEMBLOCK_ALLOC_ACCESSIBLE, + node); + DebugPC("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", + cpu, size, node, __pa(ptr)); + } + return ptr; +# else + return memblock_alloc_from(size, align, goal); +# endif +} + +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) +{ + return pcpu_alloc_memblock(cpu, size, align); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + memblock_free(__pa(ptr), size); +} +# endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK */ + +# ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK +static void __init pcpu_populate_pte(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + pud_t *pud; + pmd_t *pmd; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + pmd_t *new; + + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, new); + } +} +# endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ + +void __init setup_per_cpu_areas(void) +{ + e2k_addr_t delta; + unsigned int cpu; +# ifdef CONFIG_NUMA + int node; +# endif /* CONFIG_NUMA */ + int rc = -EINVAL; + +# ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK + if (pcpu_chosen_fc != PCPU_FC_PAGE) { + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, +# ifdef CONFIG_NUMA + pcpu_cpu_distance, +# else /* !CONFIG_NUMA */ + NULL, +# endif /* CONFIG_NUMA */ + pcpu_fc_alloc, pcpu_fc_free); + if (rc) + DebugPC("embed allocator failed " + "(%d), falling back to page size.\n", rc); + } +# endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK */ + +# ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK + if (rc < 0) { + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, + pcpu_fc_alloc, pcpu_fc_free, pcpu_populate_pte); + if (rc) + DebugPC("page allocator failed (%d).\n", rc); + } +# endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ + + if (rc < 0) + panic("Failed to initialized percpu areas (err=%d).\n", rc); + + delta = (e2k_addr_t)pcpu_base_addr - (e2k_addr_t)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; + +# ifdef CONFIG_NUMA + for_each_node_has_dup_kernel(node) { + void *per_cpu_offset = __va(vpa_to_pa( + node_kernel_va_to_pa(node, + __per_cpu_offset))); + memcpy(per_cpu_offset, __per_cpu_offset, + sizeof(__per_cpu_offset)); + } +# endif /* CONFIG_NUMA */ + + /* alrighty, percpu areas up and running */ + for_each_possible_cpu(cpu) { +# ifdef CONFIG_L_LOCAL_APIC + per_cpu(x86_cpu_to_apicid, cpu) = + early_per_cpu_map(x86_cpu_to_apicid, cpu); + per_cpu(x86_bios_cpu_apicid, cpu) = + early_per_cpu_map(x86_bios_cpu_apicid, cpu); +# endif + } + + /* Set per_cpu area pointer */ + set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); + + /* indicate the early static arrays will soon be gone */ +# ifdef CONFIG_L_LOCAL_APIC + early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; + early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; +# endif +} +#endif /* CONFIG_SMP && CONFIG_HAVE_SETUP_PER_CPU_AREA */ + +void thread_init(void) +{ + thread_info_t *ti = current_thread_info(); + + DebugP("thread_init entered for task 0x%px\n", current); + current->thread.context = E2K_KERNEL_CONTEXT; + + kernel_trap_mask_init(); + + /* + * Dont worry about p->k_stk_base - sizeof(pt_regs_t). + * In the ttable_entry() we will do first: + * pt_regs + sizeof(pt_regs_t); + */ + ti->pt_regs = NULL; + DebugP("kernel stack: bottom %llx pt_regs %px\n", + (u64)current->stack, ti->pt_regs); + + ti->k_usd_hi = NATIVE_NV_READ_USD_HI_REG(); + ti->k_usd_lo = NATIVE_NV_READ_USD_LO_REG(); + ti->k_psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + ti->k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + ti->k_pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + ti->k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + DebugP("k_usd_lo.base %llx\nk_psp_lo.base %llx\nk_pcsp_lo.base %llx\n", + AS(ti->k_usd_lo).base, AS(ti->k_psp_lo).base, + AS(ti->k_pcsp_lo).base); + + /* it needs only for guest booting threads */ + virt_cpu_thread_init(current); + + DebugP("thread_init exited.\n"); +} + +int __init +parse_bootinfo(void) +{ + boot_info_t *bootblock = &bootblock_virt->info; + + if (bootblock->signature == X86BOOT_SIGNATURE || + bootblock->signature == ROMLOADER_SIGNATURE || + bootblock->signature == KVM_GUEST_SIGNATURE) { + if (!strncmp(bootblock->kernel_args_string, + KERNEL_ARGS_STRING_EX_SIGNATURE, + KERNEL_ARGS_STRING_EX_SIGN_SIZE)) + /* Extended command line (512 bytes) */ + strncpy(boot_command_line, + bootblock->bios.kernel_args_string_ex, + KSTRMAX_SIZE_EX); + else + /* Standart command line (128 bytes) */ + strncpy(boot_command_line, + bootblock->kernel_args_string, + KSTRMAX_SIZE); + + machine_serial_num = bootblock->mach_serialn; + +#ifdef CONFIG_BLK_DEV_INITRD + if (bootblock->ramdisk_size) { + initrd_start = vpa_to_pa(init_initrd_phys_base); + initrd_end = initrd_start + init_initrd_size; + } else { + initrd_start = initrd_end = 0; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Workaround against misfortunate 80x30 vmode BOOT leftover */ + if (bootblock->vga_mode == 0xe2) { + screen_info.orig_y = 30; + screen_info.orig_video_lines = 30; + }; + if (bootblock->mach_flags & MSI_MACH_FLAG) { + pr_info("MSI supported\n"); + } else { + pr_info("MSI disabled\n"); + e2k_msi_disabled = 1; + } + } else { + return -1; + } + return 0; +} + + +notrace void cpu_set_feature(struct machdep *machine, int feature) +{ + set_bit(feature, machine->cpu_features); +} + +notrace void cpu_clear_feature(struct machdep *machine, int feature) +{ + clear_bit(feature, machine->cpu_features); +} + + +static int __init check_hwbug_atomic(void) +{ + int node, cpu, nodes_num, cpus_num, node_with_many_cpus; + cpumask_t node_cpus; + + if (!cpu_has(CPU_HWBUG_ATOMIC)) + return 0; + + /* + * Now that SMP has been initialized check again + * that this hardware bug can really happen. + * + * Conditions: + * 1. There must be more than 1 node present. + * 2. There must be a node with more than 1 cpu. + */ + + node_with_many_cpus = false; + nodes_num = 0; + for_each_online_node(node) { + ++nodes_num; + + cpus_num = 0; + for_each_cpu_of_node(node, cpu, node_cpus) + ++cpus_num; + + if (cpus_num > 1) + node_with_many_cpus = true; + } + + if (nodes_num > 1 && node_with_many_cpus) + pr_alert("NOTE: workaround for hardware bug in atomics is enabled\n"); + else + cpu_clear_feature(&machine, CPU_HWBUG_ATOMIC); + + return 0; +} +arch_initcall(check_hwbug_atomic); + +static int __init check_hwbug_iommu(void) +{ + int node; + + if (!cpu_has(CPU_HWBUG_IOMMU)) + return 0; + + if (num_online_nodes() <= 1) + cpu_clear_feature(&machine, CPU_HWBUG_IOMMU); + + for_each_online_node(node) { + e2k_sic_sccfg_struct_t sccfg; + + sccfg.E2K_SIC_SCCFG_reg = + sic_read_node_nbsr_reg(node, SIC_sccfg); + if (!sccfg.E2K_SIC_SCCFG_diren) { + return 0; + } + + } + + cpu_clear_feature(&machine, CPU_HWBUG_IOMMU); + + return 0; +} +arch_initcall(check_hwbug_iommu); + +extern void (*late_time_init)(void); + +static void __init e2k_late_time_init(void) +{ +#ifdef CONFIG_SOFTWARE_SWAP_TAGS + swap_info_cache_init(); +#endif /* CONFIG_SOFTWARE_SWAP_TAGS */ + + /* + * Now that the external timer is enabled we can + * set up the local PIC timer on boot CPU. + * + * Since setup_boot_pic_clock() will enable interrupts + * it should not be called from time_init(). + */ + setup_boot_pic_clock(); +} + +void __init e2k_start_kernel_switched_stacks(void) +{ + /* + * Set pointer of current task structure to kernel initial task + */ + setup_bsp_idle_task(0); + +#ifdef CONFIG_SMP + current->cpu = 0; + E2K_SET_DGREG_NV(SMP_CPU_ID_GREG, 0); +#endif + + /* + * to save initial state of debugging registers to enable + * hardware breakpoints + */ + /* FIXME: debug registers is privileged */ + if (!paravirt_enabled()) + native_save_user_only_regs(¤t->thread.sw_regs); + + /* + * All kernel threads share the same mm context. + */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + BUG_ON(current->mm); + + E2K_JUMP(start_kernel); +} + +void __init e2k_start_kernel() +{ + bsp_switch_to_init_stack(); + + E2K_JUMP(e2k_start_kernel_switched_stacks); +} + +void __init setup_arch(char **cmdline_p) +{ + int i; + extern int panic_timeout; + char c = ' ', *to = command_line, *from = boot_command_line; + int len = 0; + int cpu; + + DebugSPRs("setup_arch()"); + + arch_setup_machine(); + + /* + * This should be as early as possible to fill cpu_present_mask and + * cpu_possible_mask. + */ +#ifdef CONFIG_L_LOCAL_APIC + /* + * Find (but now set) boot-time smp configuration. + * Like in i386 arch. used MP Floating Pointer Structure. + */ + find_smp_config(&bootblock_virt->info); + + /* + * Set entries of MP Configuration tables (but now one processor + * system) + */ + get_smp_config(); +#endif + + pr_notice("cpu to cpuid map: "); + for_each_possible_cpu(cpu) + pr_cont("%d->%d ", cpu, cpu_to_cpuid(cpu)); + pr_cont("\n"); + + numa_init(); + +#ifdef CONFIG_SMP + nmi_call_function_init(); +#endif + + parse_bootinfo(); + + for (;;) { + if (c != ' ') + goto next_char; + if (!memcmp(from, "iolinks=", 8)) { + from += 8; + max_iolinks = simple_strtol(from, &from, 0); + } + if (!memcmp(from, "nodeiolinks=", 12)) { + from += 12; + max_node_iolinks = simple_strtol(from, &from, 0); + } + next_char: + c = *(from++); + if (!c) + break; + if (COMMAND_LINE_SIZE <= ++len) + break; + *(to++) = c; + } + *to = '\0'; + *cmdline_p = command_line; + strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); + pr_notice("Full kernel command line: %s\n", saved_boot_cmdline); + + /* reboot on panic */ + panic_timeout = 30; /* 30 seconds of black screen of death */ + + parse_early_param(); + + l_setup_arch(); + + set_mach_type_id(); + + pr_notice("ARCH: E2K "); + + /* Although utsname is protected by uts_sem, locking it here is + * not needed - this early in boot process there is no one to race + * with. Moreover, semaphore operations must be called from places + * where sleeping is allowed, but here interrupts are disabled. */ + /* down_write(&uts_sem); */ + + print_machine_type_info(); + + /* See comment above */ + /* up_write(&uts_sem); */ + + if (machine_serial_num == -1UL || machine_serial_num == 0) { + pr_cont(" SERIAL # UNKNOWN\n"); + } else { + pr_cont(" SERIAL # 0x%016lx\n", machine_serial_num); + } + + printk("Kernel image check sum: %u\n", + bootblock_virt->info.kernel_csum); + + apply_alternative_instructions(); + + if (machine.setup_arch != NULL) { + machine.setup_arch(); + } + + paravirt_banner(); + + BOOT_TRACEPOINT("Calling paging_init()"); + paging_init(); + BOOT_TRACEPOINT("paging_init() finished"); + +#ifdef CONFIG_OF + device_tree_init(); +#endif + /* Must be called after paging_init() & device_tree_init() */ + l_setup_vga(); + + /* ACPI Tables are to be placed to phys addr in machine.setup_arch(). + * acpi_boot_table_init() will parse the ACPI tables (if they are) for + * possible boot-time SMP configuration. If machine does not support + * ACPI, acpi_boot_table_init will disable it. + */ + acpi_boot_table_init(); + + /* Parses MADT when ACPI is on. */ + early_acpi_boot_init(); + + thread_init(); + + /* request I/O space for devices used on all i[345]86 PCs */ + if (!HAS_MACHINE_E2K_IOHUB) { + for (i = 0; i < STANDARD_IO_RESOURCES; i++) + request_resource(&ioport_resource, + standard_io_resources+i); + } + +#ifdef CONFIG_BLK_DEV_INITRD + ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); +#endif + + if (machine.native_iset_ver < E2K_ISET_V6) { + /* memory wait operation is not supported */ + idle_nomwait = true; + pr_info("Memory wait type idle is not supported, turn OFF\n"); + } else { + pr_info("Memory wait type idle is %s\n", + (idle_nomwait) ? "OFF" : "ON"); + } + + /* + * Read APIC and some other early information from ACPI tables. + */ + acpi_boot_init(); + +#ifdef CONFIG_L_LOCAL_APIC + init_pic_mappings(); + + if (num_possible_cpus() != mp_num_processors) { + pr_alert( + "********************************************************\n" + "* *\n" + "* WARNING: Only %d from %d cpus were described by BOOT *\n" + "* in MP configuration table! OS is unreliable! *\n" + "* *\n" + "********************************************************\n", + mp_num_processors, num_possible_cpus()); + } + + /* need to wait for io_apic is mapped */ + probe_nr_irqs_gsi(); +#endif + + arch_clock_setup(); + +#ifdef CONFIG_NET + if (HAS_MACHINE_E2K_IOHUB) { + extern int e1000; + e1000 = 1; + } +#endif + + late_time_init = e2k_late_time_init; +} + +void __init init_IRQ(void) +{ + machine.init_IRQ(); +} + +/* + * Called by both boot and secondary processors + * to move global data into per-processor storage. + */ +void store_cpu_info(int cpu) +{ + cpuinfo_e2k_t *c = &cpu_data[cpu]; + + machine.setup_cpu_info(c); + + c->proc_freq = measure_cpu_freq(cpu); + + if (cpu_freq_hz == UNSET_CPU_FREQ) + cpu_freq_hz = c->proc_freq; + if (!cpu_clock_psec) + cpu_clock_psec = 1000000000000L / cpu_freq_hz; + +#ifdef CONFIG_SMP + c->cpu = cpu; + + c->mmu_last_context = CTX_FIRST_VERSION; + /* Flush TLB when reusing context after hotplug */ + __flush_tlb_all(); +#endif +} + +static int __init boot_store_cpu_info(void) +{ + /* Final full version of the data */ + store_cpu_info(0); + + pr_alert("Processor frequency %llu\n", cpu_data[0].proc_freq); + + return 0; +} +early_initcall(boot_store_cpu_info); + +/* + * Print CPU information. + */ +static int show_cpuinfo(struct seq_file *m, void *v) +{ + int rval = 0; + + if (machine.show_cpuinfo) + rval = machine.show_cpuinfo(m, v); + + return rval; +} + +static void *c_update(loff_t *pos) +{ + while (*pos < NR_CPUS && !cpumask_test_cpu(*pos, cpu_online_mask)) + ++*pos; + + return *pos < NR_CPUS ? &cpu_data[*pos] : NULL; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + cpus_read_lock(); + return c_update(pos); +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_update(pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ + cpus_read_unlock(); +} + +struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + + +/* + * Handler of errors. + * The error message is output on console and CPU goes to suspended state + * (executes infinite unmeaning cicle). + * In simulation mode CPU is halted with error sign. + */ + +void +init_bug(const char *fmt_v, ...) +{ + register va_list ap; + + va_start(ap, fmt_v); + dump_vprintk(fmt_v, ap); + va_end(ap); + dump_vprintk("\n\n\n", NULL); + + E2K_HALT_ERROR(100); + + for (;;) + cpu_relax(); +} + +/* + * Handler of warnings. + * The warning message is output on console and CPU continues execution of + * kernel process. + */ + +void +init_warning(const char *fmt_v, ...) +{ + register va_list ap; + + va_start(ap, fmt_v); + dump_vprintk(fmt_v, ap); + va_end(ap); + dump_vprintk("\n", NULL); +} + +#ifdef CONFIG_SYSFS +/* + * Allow IPD setting under /sys/devices/system/cpu/e2k/ipd + */ +static ssize_t ipd_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 mmu_cr; + int ipd; + + mmu_cr = get_MMU_CR(); + ipd = (mmu_cr & _MMU_CR_IPD_MASK) >> _MMU_CR_IPD_SHIFT; + + return sprintf(buf, "%d\n", ipd); +} + +static ssize_t ipd_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ipd; + u64 mmu_cr = get_MMU_CR(); + + if (kstrtoint(buf, 0, &ipd) < 0) + return -EINVAL; + + if (ipd != 0 && ipd != 1) + return -EINVAL; + + if (ipd) + mmu_cr |= _MMU_CR_IPD_MASK; + else + mmu_cr &= ~_MMU_CR_IPD_MASK; + + set_MMU_CR(mmu_cr); + + return count; +} + +static DEVICE_ATTR_RW(ipd); + +/* + * Allow CU_HW0 setting under /sys/devices/system/cpu/e2k/cu_hw0 + */ + +static ssize_t cu_hw0_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 cu_hw0 = NATIVE_READ_CU_HW0_REG_VALUE(); + + return sprintf(buf, "0x%llx\n", cu_hw0); +} + +static ssize_t cu_hw0_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + u64 cu_hw0; + + if (kstrtoull(buf, 0, &cu_hw0) < 0) + return -EINVAL; + + raw_all_irq_save(flags); + NATIVE_WRITE_CU_HW0_REG_VALUE(cu_hw0); + raw_all_irq_restore(flags); + + return count; +} + +static DEVICE_ATTR_RW(cu_hw0); + +/* + * Allow CU_HW1 setting under /sys/devices/system/cpu/e2k/cu_hw1 + */ + +static ssize_t cu_hw1_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 cu_hw1 = machine.get_cu_hw1(); + + return sprintf(buf, "0x%llx\n", cu_hw1); +} + +static ssize_t cu_hw1_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + u64 cu_hw1; + + if (kstrtoull(buf, 0, &cu_hw1) < 0) + return -EINVAL; + + raw_all_irq_save(flags); + machine.set_cu_hw1(cu_hw1); + raw_all_irq_restore(flags); + + return count; +} + +static DEVICE_ATTR_RW(cu_hw1); + +/* + * Allow L2_CTRL_EXT setting under /sys/devices/system/cpu/e2k/l2_ctrl_ext + */ +static ssize_t l2_ctrl_ext_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "0x%lx\n", + read_DCACHE_L2_reg(_E2K_DCACHE_L2_CTRL_EXT_REG, 0)); +} + +static ssize_t l2_ctrl_ext_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + u64 l2_ctrl_ext; + + if (kstrtoull(buf, 0, &l2_ctrl_ext) < 0) + return -EINVAL; + + raw_all_irq_save(flags); + E2K_WAIT_ALL; + write_DCACHE_L2_reg(l2_ctrl_ext, _E2K_DCACHE_L2_CTRL_EXT_REG, 0); + E2K_WAIT_ALL; + raw_all_irq_restore(flags); + + return count; +} + +static DEVICE_ATTR_RW(l2_ctrl_ext); + + + +static struct attribute *e2k_default_attrs_v2[] = { + &dev_attr_ipd.attr, + &dev_attr_cu_hw0.attr, + NULL +}; + +static struct attribute *e2k_default_attrs_v5[] = { + &dev_attr_cu_hw1.attr, + NULL +}; + +static struct attribute *e2k_default_attrs_v6[] = { + &dev_attr_l2_ctrl_ext.attr, + NULL +}; + +static struct attribute_group e2k_attr_group_v2 = { + .attrs = e2k_default_attrs_v2, + .name = "e2k" +}; + +static struct attribute_group e2k_attr_group_v5 = { + .attrs = e2k_default_attrs_v5, + .name = "e2k" +}; + +static struct attribute_group e2k_attr_group_v6 = { + .attrs = e2k_default_attrs_v6, + .name = "e2k" +}; + +static __init int e2k_add_sysfs(void) +{ + int ret; + + ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, + &e2k_attr_group_v2); + if (ret) + return ret; + + if (machine.native_iset_ver >= E2K_ISET_V5) + sysfs_merge_group(&cpu_subsys.dev_root->kobj, + &e2k_attr_group_v5); + + if (machine.native_iset_ver >= E2K_ISET_V6) + sysfs_merge_group(&cpu_subsys.dev_root->kobj, + &e2k_attr_group_v6); + + return 0; +} +late_initcall(e2k_add_sysfs); +#endif diff --git a/arch/e2k/kernel/signal.c b/arch/e2k/kernel/signal.c new file mode 100644 index 000000000000..0b3f586849e6 --- /dev/null +++ b/arch/e2k/kernel/signal.c @@ -0,0 +1,2134 @@ +/* linux/arch/e2k/kernel/signal.c, v 1.10 08/21/2001. + * + * Copyright (C) 2001 MCST + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PROTECTED_MODE +#include +#include +#include +#include +#endif /* CONFIG_PROTECTED_MODE */ +#include +#include + +#undef DEBUG_SIG_MODE +#undef DebugSig +#define DEBUG_SIG_MODE 0 /* Signal handling */ +#define DebugSig(...) DebugPrint(DEBUG_SIG_MODE ,##__VA_ARGS__) + +#undef DEBUG_HS_MODE +#undef DebugHS +#define DEBUG_HS_MODE 0 /* Signal handling */ +#define DebugHS(fmt, args...) \ +({ \ + if (DEBUG_HS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SLJ_MODE +#undef DebugSLJ +#define DEBUG_SLJ_MODE 0 /* Signal long jump handling */ +#define DebugSLJ(...) DebugPrint(DEBUG_SLJ_MODE ,##__VA_ARGS__) + +#define DEBUG_FTRACE_MODE 0 +#if DEBUG_FTRACE_MODE +# define DebugFTRACE(...) pr_info(__VA_ARGS__) +#else +# define DebugFTRACE(...) +#endif + +#define DEBUG_SRT_MODE 0 /* Signal return handling */ +#define DebugSRT(...) DebugPrint(DEBUG_SRT_MODE, ##__VA_ARGS__) + +#define DEBUG_CTX_MODE 0 /* setcontext/swapcontext */ +#if DEBUG_CTX_MODE +#define DebugCTX(...) DebugPrint(DEBUG_CTX_MODE, ##__VA_ARGS__) +#else +#define DebugCTX(...) +#endif + +#undef DebugSCP +#define DebugSCP(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_SIGNALS)) \ + pr_info("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + + +void +sig_to_exit(int errno) +{ + struct kernel_siginfo si; + struct k_sigaction *ka; + + DebugSig("start\n"); + + ka = ¤t->sighand->action[SIGSEGV-1]; + ka->sa.sa_handler = SIG_DFL; + + si.si_signo = SIGSEGV; + si.si_errno = 0; + si.si_code = SI_KERNEL; + force_sig_info(&si); + + DebugSig("finish\n"); + return; +} + +static inline void copy_jmp_regs(pt_regs_t *to, const pt_regs_t *from) +{ + CHECK_PT_REGS_CHAIN((pt_regs_t *)from, NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + (u64) current->stack + KERNEL_C_STACK_SIZE); + + to->stacks.top = from->stacks.top; + to->wd = from->wd; + to->stacks.usd_lo = from->stacks.usd_lo; + to->stacks.usd_hi = from->stacks.usd_hi; + to->stacks.psp_lo = from->stacks.psp_lo; + to->stacks.psp_hi = from->stacks.psp_hi; + to->stacks.pcsp_lo = from->stacks.pcsp_lo; + to->stacks.pcsp_hi = from->stacks.pcsp_hi; + to->stacks.pshtp = from->stacks.pshtp; + to->stacks.pcshtp = from->stacks.pcshtp; + to->crs.cr0_lo = from->crs.cr0_lo; + to->crs.cr0_hi = from->crs.cr0_hi; + to->crs.cr1_lo = from->crs.cr1_lo; + to->crs.cr1_hi = from->crs.cr1_hi; + to->sys_rval = from->sys_rval; + to->flags = from->flags; +} + +static inline int setup_frame(struct sigcontext __user *sigc, + struct extra_ucontext __user *extra, const pt_regs_t *user_regs) +{ + struct trap_pt_regs *trap = user_regs->trap; + register struct k_sigaction *ka = ¤t_thread_info()->ksig.ka; + int rval; + int i; + char tag; + int sc_need_rstrt = 0; + + rval = __put_user(AS_WORD(user_regs->crs.cr0_lo), &sigc->cr0_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->crs.cr0_hi), + &sigc->cr0_hi); + rval = (rval) ?: __put_user(AS_WORD(user_regs->crs.cr1_lo), + &sigc->cr1_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->crs.cr1_hi), + &sigc->cr1_hi); + + rval = (rval) ?: __put_user(user_regs->stacks.top, &sigc->sbr); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.usd_lo), + &sigc->usd_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.usd_hi), + &sigc->usd_hi); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.psp_lo), + &sigc->psp_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.psp_hi), + &sigc->psp_hi); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.pcsp_lo), + &sigc->pcsp_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.pcsp_hi), + &sigc->pcsp_hi); + + /* for binary compiler */ + if (unlikely(TASK_IS_BINCO(current))) { +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + int mlt_num = trap ? trap->mlt_state.num : 0; +#endif + + rval = (rval) ?: __put_user( + AS_WORD(current_thread_info()->upsr), + &sigc->upsr); + rval = (rval) ?: __put_user(user_regs->rpr_hi, &sigc->rpr_hi); + rval = (rval) ?: __put_user(user_regs->rpr_lo, &sigc->rpr_lo); + + /* copy MLT */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (!rval && mlt_num) { + if (copy_to_user((void __user *) sigc->mlt, + trap->mlt_state.mlt, + sizeof(e2k_mlt_entry_t) * mlt_num)) + rval |= -EFAULT; + } + if (!rval && (mlt_num < NATIVE_MAX_MLT_SIZE)) { + if (clear_user((void *)&sigc->mlt[mlt_num * 3], + sizeof(e2k_mlt_entry_t) * + (NATIVE_MAX_MLT_SIZE - mlt_num))) + rval |= -EFAULT; + } +#endif + } + + if (trap) { + u64 data; + + for (i = 0; i < min(MAX_TC_SIZE, HW_TC_SIZE); i++) { + rval = (rval) ?: __put_user(trap->tcellar[i].address, + &sigc->trap_cell_addr[i]); + rval = (rval) ?: __put_user(trap->tcellar[i].data, + &sigc->trap_cell_val[i]); + rval = (rval) ?: __put_user( + trap->tcellar[i].condition.word, + &sigc->trap_cell_info[i]); + load_value_and_tagd( + &trap->tcellar[i].data, &data, &tag); + rval = (rval) ?: __put_user(tag, + &sigc->trap_cell_tag[i]); + } + + /* TIR */ + rval = (rval) ?: __put_user(trap->nr_TIRs, &sigc->nr_TIRs); + for (i = 0; i <= trap->nr_TIRs; i++) { + rval = (rval) ?: __put_user( + trap->TIRs[i].TIR_hi.TIR_hi_reg, + &sigc->tir_hi[i]); + rval = (rval) ?: __put_user( + trap->TIRs[i].TIR_lo.TIR_lo_reg, + &sigc->tir_lo[i]); + } + + rval = (rval) ?: __put_user(trap->tc_count / 3, + &extra->tc_count); + rval = (rval) ?: __put_user(trap->curr_cnt, &extra->curr_cnt); + } else { + rval = (rval) ?: __put_user(0, &sigc->nr_TIRs); + rval = (rval) ?: __put_user(0ULL, &sigc->tir_hi[0]); + rval = (rval) ?: __put_user(0ULL, &sigc->tir_lo[0]); + rval = (rval) ?: __put_user(0, &extra->tc_count); + rval = (rval) ?: __put_user(-1, &extra->curr_cnt); + } + + rval = (rval) ?: __put_user(AW(user_regs->ctpr1), &extra->ctpr1); + rval = (rval) ?: __put_user(AW(user_regs->ctpr2), &extra->ctpr2); + rval = (rval) ?: __put_user(AW(user_regs->ctpr3), &extra->ctpr3); + + if (from_syscall(user_regs) && + ((user_regs->sys_rval == -ERESTARTNOINTR) || + (user_regs->sys_rval == -ERESTARTSYS) && + (ka->sa.sa_flags & SA_RESTART))) + sc_need_rstrt = 1; + rval = (rval) ?: __put_user(sc_need_rstrt, &extra->sc_need_rstrt); + + /* size of saved extra elements */ + rval = (rval) ?: __put_user(sizeof(struct extra_ucontext) - sizeof(int), + &extra->sizeof_extra_uc); + + /* DAM */ + SAVE_DAM(current_thread_info()->dam); + for (i = 0; i < DAM_ENTRIES_NUM; i++) + rval = (rval) ?: __put_user(current_thread_info()->dam[i], + &sigc->dam[i]); + + return rval; +} + +#ifdef CONFIG_PROTECTED_MODE +static inline int setup_prot_frame(struct sigcontext_prot *sigc, + const pt_regs_t *user_regs) +{ + int rval; + + rval = __put_user(AS_WORD(user_regs->crs.cr0_lo), &sigc->cr0_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->crs.cr0_hi), + &sigc->cr0_hi); + rval = (rval) ?: __put_user(AS_WORD(user_regs->crs.cr1_lo), + &sigc->cr1_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->crs.cr1_hi), + &sigc->cr1_hi); + + rval = (rval) ?: __put_user(user_regs->stacks.top, &sigc->sbr); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.usd_lo), + &sigc->usd_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.usd_hi), + &sigc->usd_hi); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.psp_lo), + &sigc->psp_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.psp_hi), + &sigc->psp_hi); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.pcsp_lo), + &sigc->pcsp_lo); + rval = (rval) ?: __put_user(AS_WORD(user_regs->stacks.pcsp_hi), + &sigc->pcsp_hi); + + return rval; +} + +/* + * This function fixes alignment of the given pointer 'ptr' towards upper bound: + * 'alignment' - alignment value (8 or 16) + * Returning value: pointer aligned. + */ +static inline char *align_ptr_up(const char *ptr, const int alignment) +{ + char *aligned_ptr; + + if (((unsigned long) ptr) % alignment) + aligned_ptr = (char *) (((unsigned long) ptr + + alignment - 1) & ~(alignment - 1)); + else + aligned_ptr = (char *)ptr; + + return aligned_ptr; +} + +#define PROTECTED_ALIGNMENT_VALUE 16 + +/* + * This function updates siginfo structure that kernel formed to pass it + * to signal handler procedure operating in the protected mode. + * It does the following: + * (a) if _sigval field of the siginfo structure stores pointer (sival_ptr), + * and the pointer is available in the sival_ptr_list_head list, it + * takes the descriptor linked to that pointer, and puts it into + * the siginfo structure following protected alignment value; + * (b) if _sigval field of the siginfo structure stores integer (sival_int) + * value, it shifts the value in memory to fix misalignment between + * kernel (8 byte base) and protected user memory space (16 byte). + */ +static inline int copy_siginfo_to_user_prot(rt_sigframe_t __user *frame, + kernel_siginfo_t *info) +{ + int ret; + siginfo_t __user *siginfo_ptr = &frame->info; + char __user *sigval_prot_ptr; + void __user *ptr; /* kernel pointer in the siginfo structure */ + struct sival_ptr_list *curr_el; + + ret = copy_siginfo_to_user(&frame->info, info); + if (ret) + return ret; + + ptr = info->si_ptr; + if (!ptr) { + DbgSCP("Empty 'siginfo_t *info->si_ptr'\n"); + return ret; + } + + sigval_prot_ptr = align_ptr_up((char *)(&siginfo_ptr->si_ptr), + PROTECTED_ALIGNMENT_VALUE); + + DebugHS("ptr=0x%px; si_ptr=0x%llx; sigval_prot_ptr=0x%px\n", + ptr, &siginfo_ptr->si_ptr, sigval_prot_ptr); + + /* We look thru sival_ptr_list to find a record with the same ptr: + */ + curr_el = get_descriptor_attrs(ptr, 0 /*signum*/); + + DebugHS("curr_el=0x%px\n", curr_el); + if (!curr_el) { + /* + * This may be integer value in siginfo.sival_int field: + * Updating field alignment for PM: simply + * moving dwords #6,7 to position #8,9: + */ + if ((int)((long)ptr & PAGE_MASK)) { + /* this is definitely pointer; issuing alert */ + DbgSCP_ALERT("No record for 0x%px in sival_ptr_list\n", + ptr); + } else { /* there can be trash in high 32 bits; + * issuing debug message + */ + DbgSCP("Not a pointer in '(siginfo_t *)info->si_ptr': 0x%px\n", + ptr); + DbgSCP("from=0x%px [0x%llx] --> to=0x%px [0x%llx]\n", + &siginfo_ptr->si_ptr, ptr, sigval_prot_ptr, + *(u64 __user *)sigval_prot_ptr); + } + ret = __put_user((u64 __user)ptr, + (u64 __user *)sigval_prot_ptr); + } else { + /* + * Simply adding tags to user_ptr_lo/_hi and + * set these to sigvalptr + 4/5): + * --> we need to restore it for PM: + */ + DebugHS("curr_el: _lo=0x%llx _hi=0x%llx _tags=0x%x\n", + curr_el->user_ptr_lo, curr_el->user_ptr_hi, + curr_el->user_tags); + TRY_USR_PFAULT { + NATIVE_STORE_TAGGED_QWORD( + (e2k_ptr_t *)sigval_prot_ptr, + curr_el->user_ptr_lo, curr_el->user_ptr_hi, + curr_el->user_tags & 0xf, + curr_el->user_tags >> 4); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + } + return ret; +} +#else +# define setup_prot_frame(...) do { } while (0) +# define copy_siginfo_to_user_prot(...) do { } while (0) +#endif /* CONFIG_PROTECTED_MODE */ + +static inline int setup_rt_frame(rt_sigframe_t __user *frame, + kernel_siginfo_t *info, + const struct pt_regs *regs) +{ + sigset_t *set = sigmask_to_save(); + compat_sigset_t *cset = (compat_sigset_t *) set; + struct k_sigaction *ka = ¤t_thread_info()->ksig.ka; + int ret; + + if (!access_ok(frame, sizeof(*frame))) { + DebugHS("access failed to user stack frame %px\n", frame); + return -EFAULT; + } + DebugHS("info=%px signal=%d ->thread.flags=0x%lx IS_PROTECTED=%ld\n", + info, current_thread_info()->ksig.sig, current->thread.flags, + TASK_IS_PROTECTED(current)); + + if (TASK_IS_PROTECTED(current)) { + e2k_ptr_t ss_sp; + + ret = setup_prot_frame(&frame->uc_prot.uc_mcontext, regs); + ret = (ret) ?: __copy_to_user(&frame->uc_prot.uc_sigmask, + set, sizeof(*set)); + + AW(ss_sp).lo = MAKE_AP_LO(current->sas_ss_sp, + current->sas_ss_size, 0, 3); + AW(ss_sp).hi = MAKE_AP_HI(current->sas_ss_sp, + current->sas_ss_size, 0, 3); + ret = (ret) ?: __put_user(AW(ss_sp).lo, + &AW(frame->uc_prot.uc_stack.ss_sp).lo); + ret = (ret) ?: __put_user(AW(ss_sp).hi, + &AW(frame->uc_prot.uc_stack.ss_sp).hi); + ret = (ret) ?: __put_user(sas_ss_flags( + AS(regs->stacks.usd_lo).base), + &frame->uc_prot.uc_stack.ss_flags); + ret = (ret) ?: __put_user(current->sas_ss_size, + &frame->uc_prot.uc_stack.ss_size); + } else if (!(current->thread.flags & E2K_FLAG_32BIT)) { + ret = setup_frame(&frame->uc.uc_mcontext, + &frame->uc.uc_extra, regs); + ret = (ret) ?: __copy_to_user(&frame->uc.uc_sigmask, + set, sizeof(*set)); + ret = (ret) ?: __save_altstack(&frame->uc.uc_stack, + AS(regs->stacks.usd_lo).base); + } else { + ret = setup_frame(&frame->uc_32.uc_mcontext, + &frame->uc_32.uc_extra, regs); + ret = (ret) ?: __copy_to_user(&frame->uc_32.uc_sigmask, + cset, sizeof(*cset)); + ret = (ret) ?: __compat_save_altstack(&frame->uc_32.uc_stack, + AS(regs->stacks.usd_lo).base); + } + + /* + * Must we set additional flags? + */ + if (!(ka->sa.sa_flags & SA_SIGINFO)) + return ret; + + if (TASK_IS_PROTECTED(current)) { + ret = (ret) ?: copy_siginfo_to_user_prot(frame, info); + ret = (ret) ?: __put_user(0, &frame->uc_prot.uc_flags); + ret = (ret) ?: __put_user(0, &AW(frame->uc_prot.uc_link).lo); + ret = (ret) ?: __put_user(0, &AW(frame->uc_prot.uc_link).hi); + } else if (!(current->thread.flags & E2K_FLAG_32BIT)) { + ret = (ret) ?: copy_siginfo_to_user(&frame->info, info); + ret = (ret) ?: __put_user(0, &frame->uc.uc_flags); + ret = (ret) ?: __put_user(0, &frame->uc.uc_link); + } else { + if (current->thread.flags & E2K_FLAG_64BIT_BINCO) + ret = (ret) ?: copy_siginfo_to_user(&frame->info, info); + else + ret = (ret) ?: copy_siginfo_to_user32( + &frame->compat_info, info); + + ret = (ret) ?: __put_user(0, &frame->uc_32.uc_flags); + ret = (ret) ?: __put_user(0, &frame->uc_32.uc_link); + } + + DebugHS("ret=%d info=0x%lx info->si_value: [int]=%d [ptr]=0x%llx\n", + ret, info, info->_sifields._rt._sigval.sival_int, + info->_sifields._rt._sigval.sival_ptr); + + return ret; +} + +static inline void copy_user_ctpr(e2k_ctpr_t *dst, e2k_ctpr_t val) +{ + /* Disallow privileged or reserved values */ + if (AS(val).opc == 2 || + (AS(val).ta_tag != CTPLL_CT_TAG && AS(val).ta_tag != CTPNL_CT_TAG)) + return; + + AW(*dst) = 0; + AS(*dst).ta_base = AS(val).ta_base & ~7ULL; + AS(*dst).opc = AS(val).opc; + AS(*dst).ta_tag = AS(val).ta_tag; + AS(*dst).ipd = AS(val).ipd; +} + +int restore_rt_frame(rt_sigframe_t __user *frame, struct k_sigaction *ka) +{ + unsigned long long __user *cr0_hi_ptr; + struct extra_ucontext __user *uc_extra_ptr; + void __user *set_ptr; + sigset_t set; + int ret = 0; + + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + if (TASK_IS_PROTECTED(current)) { + e2k_ptr_t ptr; + stack_t stack; + mm_segment_t seg; + int ret; + + ret = __get_user(stack.ss_flags, + &frame->uc_prot.uc_stack.ss_flags); + ret = (ret) ?: __get_user(stack.ss_size, + &frame->uc_prot.uc_stack.ss_size); + ret = (ret) ?: __get_user(AW(ptr).lo, + &AW(frame->uc_prot.uc_stack.ss_sp).lo); + ret = (ret) ?: __get_user(AW(ptr).hi, + &AW(frame->uc_prot.uc_stack.ss_sp).hi); + if (ret) + return -EFAULT; + + stack.ss_sp = (void *) (AS(ptr).ap.base + AS(ptr).ap.curptr); + + seg = get_fs(); + set_fs(KERNEL_DS); + ret = restore_altstack(&stack); + set_fs(seg); + + set_ptr = &frame->uc_prot.uc_sigmask; + cr0_hi_ptr = &frame->uc_prot.uc_mcontext.cr0_hi; + uc_extra_ptr = &frame->uc_prot.uc_extra; + } else if (!(current->thread.flags & E2K_FLAG_32BIT)) { + ret = restore_altstack(&frame->uc.uc_stack); + + set_ptr = &frame->uc.uc_sigmask; + cr0_hi_ptr = &frame->uc.uc_mcontext.cr0_hi; + uc_extra_ptr = &frame->uc.uc_extra; + } else { + ret = compat_restore_altstack(&frame->uc_32.uc_stack); + + set_ptr = (sigset_t *) &frame->uc_32.uc_sigmask; + cr0_hi_ptr = &frame->uc_32.uc_mcontext.cr0_hi; + uc_extra_ptr = &frame->uc_32.uc_extra; + } + + if (ret || __copy_from_user(&set, set_ptr, sizeof(set))) + return -EFAULT; + + if (ka->sa.sa_flags & SA_SIGINFO) { + e2k_ctpr_t ctpr1, ctpr2, ctpr3; + e2k_cr0_hi_t cr0_hi; + struct pt_regs *regs = current_pt_regs(); + + ret = (ret) ?: __get_user(AW(cr0_hi), cr0_hi_ptr); + ret = (ret) ?: __get_user(AW(ctpr1), &uc_extra_ptr->ctpr1); + ret = (ret) ?: __get_user(AW(ctpr2), &uc_extra_ptr->ctpr2); + ret = (ret) ?: __get_user(AW(ctpr3), &uc_extra_ptr->ctpr3); + if (ret) + return -EFAULT; + + if (AS(regs->crs.cr0_hi).ip != AS(cr0_hi).ip && + (AS(cr0_hi).ip << 3) < TASK_SIZE) { + /* + * There could be such situation: + * - user's signal handler changes IP + * - kernel ignores the trap cellar in this case and + * start to deliver the next signal + * - user's signal handler doesn't change IP + * - kernel starts to handle trap cellar again + * Kernel should never handle trap cellar after user's + * signal handler changed IP. So kernel should give up + * the trap cellar. + */ + if (regs->trap) { + regs->trap->tc_count = 0; +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + regs->trap->flags &= ~TRAP_RP_FLAG; +#endif + } + + AS(regs->crs.cr0_hi).ip = AS(cr0_hi).ip; + } + + if (TASK_IS_BINCO(current)) { + copy_user_ctpr(®s->ctpr1, ctpr1); + copy_user_ctpr(®s->ctpr2, ctpr2); + copy_user_ctpr(®s->ctpr3, ctpr3); + } + } + + set_current_blocked(&set); + + return 0; +} + +#define synchronize_user_stack() {} /* Nothing to do. RF is already flushed. */ +#define save_and_clear_fpu() {} /* NEEDSWORK */ + + +#ifdef CONFIG_VIRTUALIZATION +# define printk printk_fixed_args +# define panic panic_fixed_args +static __always_inline notrace __interrupt +void return_to_the_func(long fn, bool system_fn) +{ + register thread_info_t *ti = current_thread_info(); + register e2k_cr1_lo_t cr1_lo; + register e2k_cr0_hi_t cr0_hi; + register e2k_cuir_t cuir; + register e2k_psr_t psr; + + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + + AS_WORD(psr) = 0; + AS_STRUCT(psr).sge = 1; + AS_STRUCT(psr).ie = 1; /* sti(); */ + AS_STRUCT(psr).nmie = 1; /* nm sti(); */ + AS_STRUCT(psr).pm = (system_fn) ? 1 : 0; + AS_STRUCT(cr1_lo).psr = AS_WORD(psr); + AS_STRUCT(cr0_hi).ip = fn >> 3; /* start user IP */ + + AS_WORD(cuir) = 0; // AS_STRUCT(cuir).checkup = 0 too + AS_STRUCT(cr1_lo).cuir = AS_WORD(cuir); + //TODO + panic("should write actual cui of guest function into %%cr1_lo as hardware won't load it automatically for us since iset v6\n"); + + if (!native_psr_irqs_disabled()) + panic_fixed_args("go2user: under sti\n"); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + + sti_return(); + + if (TASK_IS_PROTECTED(current)) { + e2k_pusd_lo_t pusd_lo; + + pusd_lo = READ_PUSD_LO_REG(); + AS(pusd_lo).p = 1; + /* + * correct usd as if we are entered by call + */ + AS(pusd_lo).psl += 1; + WRITE_PUSD_LO_REG(pusd_lo); + ENABLE_US_CLW(); + } + + /* Restore user global registers. This is needed only for binco, */ + /* since for e2k applications g16-g31 registers are actually local. */ + if (TASK_IS_BINCO(current)) { + NATIVE_RESTORE_KERNEL_GREGS(¤t_thread_info()->k_gregs); + } else { + CLEAR_KERNEL_GREGS(); + } + + WRITE_CUTD_REG(READ_CUTD_REG()); + + /* the follow function should restore user UPSR state */ + complete_go2user(ti, fn); + + /* Prevent kernel information leakage */ +#if E2K_MAXSR != 112 +# error Must clear all registers here +#endif + E2K_CLEAR_RF_112(); +} +noinline notrace __interrupt __section(".entry.text") +void go2guest(long fn, bool priv_guest) +{ + return_to_the_func(fn, priv_guest); +} +# undef printk +# undef panic +#endif /* CONFIG_VIRTUALIZATION */ + +static int copy_context_to_signal_stack( + struct signal_stack_context __user *context, + struct local_gregs *l_gregs, struct pt_regs *regs, + struct ksignal *ksig) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + ret = __copy_to_user_with_tags(&context->regs, regs, sizeof(*regs)); + + if (regs->trap) { + ret = ret ?: __copy_to_user_with_tags(&context->trap, + regs->trap, sizeof(*regs->trap)); + /* This pointer must not be accessed directly since signal + * stack could be reallocated (use signal_pt_regs_to_trap() + * instead), so put bogus value in it to help catch errors. */ + ret = ret ?: __put_user((void *) 1, &context->regs.trap); + + if (regs->trap->sbbp) { + ret = ret ?: __copy_to_user(&context->sbbp, regs->trap->sbbp, + sizeof(regs->trap->sbbp[0]) * SBBP_ENTRIES_NUM); + ret = ret ?: __put_user(context->sbbp, + &context->trap.sbbp); + } + } + + if (regs->aau_context) { + ret = ret ?: __copy_to_user(&context->aau_regs, + regs->aau_context, sizeof(*regs->aau_context)); + /* This pointer must not be accessed directly since signal + * stack could be reallocated, so put bogus value in it to + * help catch errors. */ + ret = ret ?: __put_user((void *) 1, &context->regs.aau_context); + } + + ret = ret ?: __copy_to_user(&context->sigact, &ksig->ka, + sizeof(ksig->ka)); + + if (l_gregs) { + ret = ret ?: __copy_to_user_with_tags(&context->l_gregs, + l_gregs, sizeof(*l_gregs)); + } + + clear_ts_flag(ts_flag); + + return ret ? -EFAULT : 0; +} + +/* + * Follow function is sutable for native, host and guest kernels + */ +notrace noinline __interrupt __section(".entry.text") +void sighandler_trampoline_continue(void) +{ + e2k_addr_t sbr; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + + if (TASK_IS_PROTECTED(current)) + DISABLE_US_CLW(); + + /* + * Switch to kernel stacks. + */ + GET_SIG_RESTORE_STACK(current_thread_info(), sbr, usd_lo, usd_hi); + NV_WRITE_USBR_USD_REG_VALUE(sbr, AW(usd_hi), AW(usd_lo)); + + /* + * Switch to %upsr for interrupts control + */ + DO_SAVE_UPSR_REG(current_thread_info()->upsr); + SET_KERNEL_UPSR_WITH_DISABLED_NMI(); + + /* + * Set pointer to VCPU state to enable interface host <-> guest + * (it is actual only for guest kernel) + */ + ONLY_SET_GUEST_GREGS(thread_info); + + E2K_JUMP(do_sigreturn); +} + +/** + * push_signal_stack - make sure there is enough space in the signal stack + * to store interrupted user context + * + * We use privileged area at the end of user space since we have + * to save privileged structures such as trap cellar or CTPRs. + */ +static struct signal_stack_context __user *push_signal_stack(void) +{ + struct thread_info *ti = current_thread_info(); + unsigned long context_size, address; + struct signal_stack_context __user *context; + + /* + * Is there enough space already? + */ + if (ti->signal_stack.size - ti->signal_stack.used >= sizeof(*context)) { + context = (struct signal_stack_context __user *) + (ti->signal_stack.base + ti->signal_stack.used); + ti->signal_stack.used += sizeof(*context); + + return context; + } + + context_size = sizeof(struct signal_stack_context); + context_size = round_up(context_size, PAGE_SIZE); + + /* + * Allocate if this is the first signal + */ + if (!ti->signal_stack.base) { + unsigned long ti_flags = TS_MMAP_PRIVILEGED | + TS_MMAP_SIGNAL_STACK; + + ti->status |= ti_flags; + address = vm_mmap_notkillable(NULL, USER_HW_STACKS_BASE, + context_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, 0); + ti->status &= ~ti_flags; + + if (IS_ERR_VALUE(address)) + return ERR_PTR(address); + + ti->signal_stack.base = address; + ti->signal_stack.size = context_size; + ti->signal_stack.used = sizeof(*context); + + return (struct signal_stack_context __user *) address; + } + + /* + * Expand already allocated area + */ + address = remap_e2k_stack(ti->signal_stack.base, ti->signal_stack.size, + ti->signal_stack.size + context_size, false); + if (IS_ERR_VALUE(address)) + return ERR_PTR(address); + + ti->signal_stack.base = address; + ti->signal_stack.size += context_size; + + context = (struct signal_stack_context __user *) + (ti->signal_stack.base + ti->signal_stack.used); + ti->signal_stack.used += sizeof(*context); + + return context; +} + +/** + * pop_signal_stack - counterpart to push_signal_stack() + */ +static struct signal_stack_context __user *do_get_signal_stack(bool push) +{ + struct thread_info *ti = current_thread_info(); + struct signal_stack_context __user *context; + unsigned long used = ti->signal_stack.used; + + if (WARN_ON_ONCE(used < sizeof(*context))) + do_exit(SIGKILL); + + used -= sizeof(*context); + context = (struct signal_stack_context __user *) + (ti->signal_stack.base + used); + if (push) { + ti->signal_stack.used = used; + } + + return context; +} +struct signal_stack_context __user *get_signal_stack(void) +{ + return do_get_signal_stack(false); +} +struct signal_stack_context __user *pop_signal_stack(void) +{ + return do_get_signal_stack(true); +} + +/** + * free_signal_stack - remove signal stack area on thread or context exit + */ +void free_signal_stack(struct signal_stack *signal_stack) +{ + int ret; + + if (!signal_stack->base) + return; + + ret = vm_munmap_notkillable(signal_stack->base, signal_stack->size); + if (ret) { + pr_err_ratelimited("%s [%d]: Could not free signal stack, error %d\n", + current->comm, current->pid, ret); + } + + signal_stack->base = 0; + signal_stack->size = 0; + signal_stack->used = 0; +} + +/** + * setup_signal_stack - save priviliged part of interrupted user context + * to a special privileged area in user space. + */ +int setup_signal_stack(struct pt_regs *regs, bool is_signal) +{ + struct signal_stack_context __user *context; + struct local_gregs l_gregs, *gregs; + int ret; + + /* FIXME; macros TASK_IS_BINCO() should be updated to provide */ + /* guest user process case: is one running under binary compiler */ + if (!TASK_IS_BINCO(current)) { + save_local_glob_regs(&l_gregs, is_signal); + gregs = &l_gregs; + } else { + gregs = NULL; + } + + context = push_signal_stack(); + if (IS_ERR(context)) + return PTR_ERR(context); + + ret = copy_context_to_signal_stack(context, gregs, regs, + ¤t_thread_info()->ksig); + if (ret) + pop_signal_stack(); + + return ret; +} + +static int prepare_sighandler_trampoline(struct e2k_stacks *stacks) +{ + e2k_mem_crs_t *k_crs, crs; + unsigned long flags; + int ret; + + /* + * Prepare 'sighandler_trampoline' frame + */ + ret = chain_stack_frame_init(&crs, sighandler_trampoline, + current_thread_info()->u_stack.size, + E2K_KERNEL_PSR_DISABLED, 0, 0, false); + if (ret) + return ret; + + /* + * Copy the new frame into chain stack + * + * See user_hw_stacks_copy_full() for an explanation why this frame + * is located at (AS(ti->k_pcsp_lo).base). + */ + k_crs = (e2k_mem_crs_t *) AS(current_thread_info()->k_pcsp_lo).base; + + raw_all_irq_save(flags); + E2K_FLUSHC; + /* User frame from *k_crs has been copied to userspace + * already in user_hw_stacks_copy_full() */ + *k_crs = crs; + /* OK, now account for the new frame in *k_crs. */ + AS(stacks->pcsp_hi).ind += SZ_OF_CR; + raw_all_irq_restore(flags); + + return 0; +} + +int prepare_sighandler_frame(struct e2k_stacks *stacks, + u64 pframe[32], e2k_mem_crs_t *crs) +{ + struct thread_info *ti = current_thread_info(); + struct ksignal *ksig = &ti->ksig; + rt_sigframe_t *rt_sigframe; + void *uc, *u_si; + u64 u_si_size, uc_size; + size_t pframe_size; + unsigned long reg1_offset; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + int ret; + + /* + * Calculate ucontext/siginfo address + */ + rt_sigframe = (rt_sigframe_t *) ti->u_stack.top; + + if (!(ksig->ka.sa.sa_flags & SA_SIGINFO)) { + /* + * On Linux systems we pass 'struct sigcontext' in 2nd argument + */ + if (TASK_IS_PROTECTED(current)) { + u_si = &rt_sigframe->uc_prot.uc_mcontext; + u_si_size = sizeof(rt_sigframe->uc_prot.uc_mcontext); + } else if (!(current->thread.flags & E2K_FLAG_32BIT)) { + u_si = &rt_sigframe->uc.uc_mcontext; + u_si_size = sizeof(rt_sigframe->uc.uc_mcontext); + } else { + u_si = &rt_sigframe->uc_32.uc_mcontext; + u_si_size = sizeof(rt_sigframe->uc_32.uc_mcontext); + } + + uc = NULL; + uc_size = 0; + } else if (TASK_IS_PROTECTED(current)) { + u_si = &rt_sigframe->info; + u_si_size = sizeof(rt_sigframe->info); + uc = &rt_sigframe->uc_prot; + uc_size = sizeof(rt_sigframe->uc_prot); + } else if (!(current->thread.flags & E2K_FLAG_32BIT)) { + u_si = &rt_sigframe->info; + u_si_size = sizeof(rt_sigframe->info); + uc = &rt_sigframe->uc; + uc_size = sizeof(rt_sigframe->uc); + } else { + if (current->thread.flags & E2K_FLAG_64BIT_BINCO) { + u_si = &rt_sigframe->info; + u_si_size = sizeof(rt_sigframe->info); + } else { + u_si = &rt_sigframe->compat_info; + u_si_size = sizeof(rt_sigframe->compat_info); + } + uc = &rt_sigframe->uc_32; + uc_size = sizeof(rt_sigframe->uc_32); + } + + /* + * Update data stack + */ + usd_hi = stacks->usd_hi; + AS(usd_hi).size = ti->u_stack.size; + AW(sbr) = ti->u_stack.top; + + if (!TASK_IS_PROTECTED(current)) { + usd_lo = stacks->usd_lo; + AS(usd_lo).base = ti->u_stack.top; + } else { + e2k_pusd_lo_t pusd_lo; + + AW(pusd_lo) = AW(stacks->usd_lo); + AS(pusd_lo).base = ti->u_stack.top & 0xffffffffULL; + AS(pusd_lo).p = 1; + AS(pusd_lo).psl += 1; /* signal handler */ + + AW(usd_lo) = AW(pusd_lo); + } + + stacks->usd_lo = usd_lo; + stacks->usd_hi = usd_hi; + stacks->top = round_up(AW(sbr), E2K_ALIGN_STACK_BASE_REG); + + /* + * Update procedure stack + */ + pframe_size = (TASK_IS_PROTECTED(current)) ? (32 * 8) : (16 * 8); + memset(pframe, 0, pframe_size); + + if (machine.native_iset_ver < E2K_ISET_V5) + reg1_offset = 1; + else + reg1_offset = 2; + if (!TASK_IS_PROTECTED(current)) { + pframe[0] = ksig->sig; + pframe[0 + reg1_offset] = (u64) u_si; + pframe[4] = (u64) uc; + } else { + __NATIVE_STORE_TAGGED_QWORD(&pframe[0], + MAKE_AP_LO((u64) rt_sigframe, 64, 0UL, RW_ENABLE), + MAKE_AP_HI((u64) rt_sigframe, 64, 0UL, RW_ENABLE), + E2K_AP_LO_ETAG, E2K_AP_HI_ETAG, 8 * reg1_offset); + pframe[4] = ksig->sig; + __NATIVE_STORE_TAGGED_QWORD(&pframe[8], + MAKE_AP_LO((u64) u_si, u_si_size, 0UL, RW_ENABLE), + MAKE_AP_HI((u64) u_si, u_si_size, 0UL, RW_ENABLE), + E2K_AP_LO_ETAG, E2K_AP_HI_ETAG, 8 * reg1_offset); + __NATIVE_STORE_TAGGED_QWORD(&pframe[12], + MAKE_AP_LO((u64) uc, uc_size, 0UL, RW_ENABLE), + MAKE_AP_HI((u64) uc, uc_size, 0UL, RW_ENABLE), + E2K_AP_LO_ETAG, E2K_AP_HI_ETAG, 8 * reg1_offset); + } + + /* + * Update chain stack + */ + ret = chain_stack_frame_init(crs, ksig->ka.sa.sa_handler, AS(usd_hi).size, + E2K_USER_INITIAL_PSR, pframe_size / EXT_4_NR_SZ, + (pframe_size / EXT_4_NR_SZ) / 2, true); + if (ret) + return ret; + + /* + * Flush CUT cache after modification of CUT (#117859) + */ + WRITE_CUTD_REG(READ_CUTD_REG()); + + return 0; +} + +static int copy_sighandler_frame(struct e2k_stacks *stacks, + u64 *pframe, e2k_mem_crs_t *crs) +{ + size_t pframe_size; + void __user *u_pframe; + unsigned long flags, ts_flag; + e2k_mem_crs_t *k_crs; + int ret; + + u_pframe = (void __user *) (AS(stacks->psp_lo).base + + AS(stacks->psp_hi).ind); + pframe_size = (TASK_IS_PROTECTED(current)) ? (32 * 8) : (16 * 8); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user_with_tags(u_pframe, pframe, pframe_size); + clear_ts_flag(ts_flag); + if (ret) + return -EFAULT; + + AS(stacks->psp_hi).ind += pframe_size; + + /* + * handle_sys_call() does not restore %cr registers from pt_regs + * for performance reasons, so update chain stack in memory too. + * + * See user_hw_stacks_copy_full() for an explanation why this frame + * is located at (AS(ti->k_pcsp_lo).base + SZ_OF_CR). + */ + k_crs = (e2k_mem_crs_t *) AS(current_thread_info()->k_pcsp_lo).base; + + raw_all_irq_save(flags); + E2K_FLUSHC; + *(k_crs + 1) = *crs; + /* Same as prepare_sighandler_trampoline(): now account for the new + * frame in *k_crs. Its previous contents have been copied already + * by user_hw_stacks_copy_full(crs != NULL). */ + AS(stacks->pcsp_hi).ind += SZ_OF_CR; + raw_all_irq_restore(flags); + + /* See comment in user_hw_stacks_copy_full() */ + BUG_ON(PCSHTP_SIGN_EXTEND(stacks->pcshtp) != SZ_OF_CR); + + return 0; +} + +int signal_rt_frame_setup(pt_regs_t *regs) +{ + struct trap_pt_regs *trap = regs->trap; + register thread_info_t *ti = current_thread_info(); + register struct k_sigaction *ka = &ti->ksig.ka; + register kernel_siginfo_t *info = &ti->ksig.info; + register rt_sigframe_t __user *rt_sigframe; + u64 ss_sp, ss_stk_size, tmp_sp, tmp_sz; + + DebugHS("start addr %lx regs %px fn %lx\n", + (trap) ? trap->tcellar[trap->curr_cnt].address : 0UL, + regs, ka->sa.sa_handler); + + BUG_ON(!user_mode(regs)); + BUILD_BUG_ON(E2K_ALIGN_STACK != + max(E2K_ALIGN_USTACK_SIZE, E2K_ALIGN_PUSTACK_SIZE)); + + /* Perform fixup for the pre-signal frame. */ + rseq_signal_deliver(&ti->ksig, regs); + + ss_sp = user_stack_pointer(regs); + ss_stk_size = AS(regs->stacks.usd_hi).size; + + DebugHS("ss_sp 0x%llx size 0x%llx\n", ss_sp, ss_stk_size); + + /* + * This is the X/Open sanctioned signal stack switching + * to alt stack. + */ + if (ka->sa.sa_flags & SA_ONSTACK) { + if (sas_ss_flags(ss_sp) == 0) { + u64 alt_ss_stk_base = round_up(current->sas_ss_sp, + E2K_ALIGN_STACK); + u64 alt_ss_stk_size = round_down(current->sas_ss_size + + current->sas_ss_sp - alt_ss_stk_base, + E2K_ALIGN_STACK); + + DebugHS("SA_ONSTACK ss 0x%lx sz 0x%lx, after aligning " + "ss 0x%llx sz 0x%llx, need 0x%lx " + "for signal frame\n", + current->sas_ss_sp, current->sas_ss_size, + alt_ss_stk_base, alt_ss_stk_size, + sizeof(rt_sigframe_t)); + + ss_stk_size = alt_ss_stk_size; + ss_sp = alt_ss_stk_base + alt_ss_stk_size; + } + + /* + * Do not try to expand altstack, fail with SIGSEGV instead. + */ + if (ss_stk_size < sizeof(rt_sigframe_t)) + return -EFAULT; + } else if (ss_stk_size < sizeof(rt_sigframe_t)) { + u64 incr; + + DebugHS("user stack size 0x%llx < 0x%lx needed to pass " + "signal info and context\n", + ss_stk_size, sizeof(rt_sigframe_t)); + + incr = sizeof(rt_sigframe_t) - ss_stk_size + PAGE_SIZE; + incr = round_up(incr, E2K_ALIGN_STACK_BASE_REG); + if (expand_user_data_stack(regs, incr)) { + pr_info_ratelimited("[%d] %s: user data stack overflow\n", + current->pid, current->comm); + return -EFAULT; + } + + ss_sp = user_stack_pointer(regs); + ss_stk_size = AS(regs->stacks.usd_hi).size; + + DebugHS("expanded stack: ss_sp 0x%llx size 0x%llx\n", + ss_sp, ss_stk_size); + } + + tmp_sp = ss_sp; + tmp_sz = ss_stk_size; + ss_sp -= sizeof(rt_sigframe_t); + ss_sp = round_down(ss_sp, E2K_ALIGN_STACK); + ss_stk_size -= (tmp_sp - ss_sp); + BUG_ON(ss_stk_size >= tmp_sz || ss_sp >= tmp_sp); + + rt_sigframe = (rt_sigframe_t *) ss_sp; + DebugHS("rt_sigframe %px\n", rt_sigframe); + + if (TASK_IS_BINCO(current)) + NATIVE_SAVE_RPR_REGS(regs); + + if (setup_rt_frame(rt_sigframe, info, regs)) + return -EFAULT; + + /* + * Update stack limits in thread_info - signal handler should use + * its own stack (be it altstack or just a part of main C stack). + */ + STORE_USER_REGS_TO_THREAD_INFO(ti, ss_sp - ss_stk_size, + ss_sp, ss_stk_size); + DebugHS("sig #%d sig_info %px\n", + ti->ksig.sig, &rt_sigframe->info); + + return 0; +} + +int native_signal_setup(struct pt_regs *regs) +{ + thread_info_t *ti = current_thread_info(); + u64 pframe[32]; + int ret; + + ret = signal_rt_frame_setup(regs); + if (ret != 0) { + pr_err("%s(): setup signal rt frame failed, error %d\n", + __func__, ret); + return ret; + } + + /* + * After having called setup_signal_stack() we must unroll signal + * stack by calling pop_signal_stack() in case an error happens. + */ + ret = setup_signal_stack(regs, true); + if (ret) + return ret; + + /* + * Copy user's part of kernel hardware stacks into user + */ + ret = do_user_hw_stacks_copy_full(®s->stacks, regs, ®s->crs); + if (ret) + goto free_signal_stack; + + /* + * We want user to return to sighandler_trampoline so + * create fake kernel frame in user's chain stack + */ + ret = prepare_sighandler_trampoline(®s->stacks); + if (ret) + goto free_signal_stack; + + /* + * User's signal handler frame should be the last in stacks + */ + ret = prepare_sighandler_frame(®s->stacks, pframe, ®s->crs); + ret = ret ?: copy_sighandler_frame(®s->stacks, pframe, ®s->crs); + if (ret) + goto free_signal_stack; + + /* + * Update psize for ttable_entry8: syscall uses 0x70 + * but handler uses 0x40. + */ + if (from_syscall(regs)) + regs->wd.psize = 0x40; + + /* + * For e2k applications g16-g31 registers are local, initialize them + */ + if (!TASK_IS_BINCO(current)) + memset(&ti->k_gregs, 0, sizeof(ti->k_gregs)); + + DebugHS("signal handler: sig=%d siginfo=0x%px\n" + "\tIS_PROTECTED = 0x%lx\tsa_flags = 0x%lx\t->thread.flags=0x%lx\n", + ti->ksig.sig, &ti->ksig.info, + TASK_IS_PROTECTED(current), ti->ksig.ka.sa.sa_flags, + current->thread.flags); + DebugHS("will start handler() 0x%lx for sig #%d\n", + ti->ksig.ka.sa.sa_handler, ti->ksig.sig); + + return 0; + +free_signal_stack: + pop_signal_stack(); + + return ret; +} + +void do_signal(struct pt_regs *regs) +{ + struct ksignal *ksig = ¤t_thread_info()->ksig; + bool restart_needed; + + DebugSig("signal pending, %s, sys_num %d\n", + from_trap(regs) ? "trap" : "syscall", regs->sys_num); + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + E2K_SAVE_CLOCK_REG(scall_times->do_signal_start); + scall_times->signals_num++; +#endif + + if (TASK_IS_BINCO(current)) + clear_delayed_signal_handling(current_thread_info()); + + if (get_signal(ksig)) { + int failed = signal_setup(regs); + + signal_setup_done(failed, ksig, + test_ts_flag(TS_SINGLESTEP_USER)); + if (!failed) { + regs->flags.sig_call_handler = 1; +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (regs->trap) + regs->trap->flags &= ~TRAP_RP_FLAG; +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + } + + return; + } + +#ifdef CONFIG_DEBUG_INIT + /* + * Only to debug kernel, if some test launch as init process + */ + if (current->pid <= 1) + panic("do_signal: signal on Init so will be recursive traps or signals\n"); +#endif /* CONFIG_DEBUG_INIT */ + + restart_needed = false; + + /* Did we come from a system call? */ + if (from_syscall(regs)) { + /* Restart the system call - no handlers present */ + switch (regs->sys_rval) { + case -ERESTART_RESTARTBLOCK: + regs->sys_num = __NR_restart_syscall; + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + restart_needed = true; + break; + } + } + CHECK_PT_REGS_CHAIN(regs, + NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + (u64)current->stack + KERNEL_C_STACK_SIZE); + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back. + */ + restore_saved_sigmask(); + + if (restart_needed) + regs->flags.sig_restart_syscall = 1; +} + + +static int get_data_stack_from_signal_regs(unsigned long corrected_frame_addr, + struct thread_info *ti, + u64 *dstack_sp, u64 *dstack_free, u64 *dstack_top) +{ + struct pt_regs __user *u_regs; + int skipped_regs, ret = 0; + unsigned long sig_pcs_window_base, sig_pcs_window_ind, ts_flag; + + skipped_regs = 0; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + signal_pt_regs_for_each(u_regs) { + e2k_stacks_t stacks; + unsigned long delta; + + if (__copy_from_user(&stacks, &u_regs->stacks, + sizeof(stacks))) { + SIGDEBUG_PRINT("SIGKILL. could not read signal stack\n"); + force_sig(SIGKILL); + ret = -EFAULT; + break; + } + + sig_pcs_window_base = AS(stacks.pcsp_lo).base; + sig_pcs_window_ind = AS(stacks.pcsp_hi).ind; + ret = find_in_old_u_pcs_list(sig_pcs_window_base, &delta); + if (ret) { + SIGDEBUG_PRINT("SIGKILL. do_longjmp(): could not find sig_u_pcs\n"); + force_sig(SIGKILL); + break; + } + sig_pcs_window_base += delta; + + if (sig_pcs_window_base + sig_pcs_window_ind >= + corrected_frame_addr) { + ++skipped_regs; + + calculate_e2k_dstack_parameters(&stacks, dstack_sp, + dstack_free, dstack_top); + } else { + break; + } + } + clear_ts_flag(ts_flag); + + /* + * Remove unwinded signal stack + */ + if (skipped_regs) { + ti->signal_stack.used -= skipped_regs * + sizeof(struct signal_stack_context); + if (WARN_ON_ONCE((s64) ti->signal_stack.used < 0)) + ti->signal_stack.used = 0; + } + + return ret; + +} + + +struct unwind_stack_args { + u64 jmp_frame_address; + u64 *ppsl_shift; + u64 *psp_delta; + u64 *pcsp_delta; + u64 corrected_size; + u64 *dstack_sp; + u64 *dstack_free; + u64 *dstack_top; + e2k_mem_crs_t *crs; +}; + +static int __unwind_stack(e2k_mem_crs_t *frame, unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct unwind_stack_args *args = arg; + stack_frame_t cur_frame; + u64 usd_next_size; + int ret; + + /* + * Are we done yet? + */ + if (unlikely(corrected_frame_addr < args->jmp_frame_address)) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): bad jump address from setjmp()\n"); + force_sig(SIGKILL); + return -EINVAL; + } + + if (corrected_frame_addr == args->jmp_frame_address) { + memcpy(args->crs, frame, sizeof(*args->crs)); + + return 1; + } + + /* + * Calculate data stack delta + */ + args->corrected_size += 0x100000000L * + getsp_adj_get_correction(corrected_frame_addr); + getsp_adj_set_correction(corrected_frame_addr, 0); + + usd_next_size = ((u32) AS(frame->cr1_hi).ussz << 4UL) + + args->corrected_size; + *args->dstack_sp += usd_next_size - *args->dstack_free; + *args->dstack_free = usd_next_size; + + cur_frame = get_stack_frame_type_IP(frame->cr0_hi, frame->cr1_lo, + false); + if (cur_frame == user_frame_type) { + *args->ppsl_shift += 1; + } else { + /* We are at signal trampoline right now so + * subtract SZ_OF_CR from frame address because + * pt_regs point to user frames below */ + ret = get_data_stack_from_signal_regs( + corrected_frame_addr - SZ_OF_CR, + current_thread_info(), args->dstack_sp, + args->dstack_free, args->dstack_top); + if (ret) + return ret; + } + + /* + * Calculate hardware stacks deltas + */ + *args->psp_delta += AS(frame->cr1_lo).wbs * EXT_4_NR_SZ; + *args->pcsp_delta += SZ_OF_CR; + + return 0; +} + +/** + * unwind_stack - go down to the target frame and find its current parameters + * (they could have changed since the call to setjmp()) + * + * @jmp_pcsp_lo - saved %pcsp.lo + * @jmp_pcsp_hi - saved %pcsp.hi + * @stacks - current user stacks + * @ppsl_shift - psl delta for %usd register will be returned here + * @psp_delta - procedure stack pointer delta will be returned here + * @pcsp_delta - chain stack pointer delta will be returned here + * @crs - target frame's %cr registers will be returned here + * @dstack_sp, @dstack_free, @dstack_top - data stack parameters for + * the target frame will be returned here + */ +static int unwind_stack(e2k_pcsp_lo_t jmp_pcsp_lo, e2k_pcsp_hi_t jmp_pcsp_hi, + const struct e2k_stacks *stacks, u64 *ppsl_shift, + u64 *psp_delta, u64 *pcsp_delta, e2k_mem_crs_t *crs, + u64 *dstack_sp, u64 *dstack_free, u64 *dstack_top) +{ + unsigned long jmp_frame_address, delta; + struct unwind_stack_args args; + long ret; + + /* Calculate the starting parameters of data stack */ + calculate_e2k_dstack_parameters(stacks, dstack_sp, + dstack_free, dstack_top); + + jmp_frame_address = AS(jmp_pcsp_lo).base + AS(jmp_pcsp_hi).ind; + ret = find_in_old_u_pcs_list(AS(jmp_pcsp_lo).base, &delta); + if (ret) { + SIGDEBUG_PRINT("SIGKILL. do_longjmp(): couldn't find new_u_pcs\n"); + force_sig(SIGKILL); + return ret; + } + jmp_frame_address += delta; + + args.jmp_frame_address = jmp_frame_address; + args.ppsl_shift = ppsl_shift; + args.psp_delta = psp_delta; + args.pcsp_delta = pcsp_delta; + args.corrected_size = 0; + args.dstack_sp = dstack_sp; + args.dstack_free = dstack_free; + args.dstack_top = dstack_top; + args.crs = crs; + + *ppsl_shift = 0; + *psp_delta = 0; + *pcsp_delta = 0; + + ret = parse_chain_stack(PCS_USER | PCS_OPEN_IRQS, NULL, + __unwind_stack, &args); + if (ret == 0) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): could not find jump frame\n"); + force_sig(SIGKILL); + ret = -ESRCH; + } + + return (IS_ERR_VALUE(ret)) ? ret : 0; +} + +static int check_longjmp_permissions(u64 old_ip, u64 new_ip) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *old_vma, *new_vma; + int ret = 0; + + down_read(&mm->mmap_sem); + + old_vma = find_vma(mm, old_ip); + if (!old_vma || old_ip < old_vma->vm_start) { + ret = -ESRCH; + goto out_unlock; + } + + new_vma = find_vma(mm, new_ip); + if (!new_vma || new_ip < new_vma->vm_start) { + ret = -ESRCH; + goto out_unlock; + } + + if ((old_vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)) ^ + (new_vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC))) { + ret = -EPERM; + goto out_unlock; + } + +out_unlock: + up_read(&mm->mmap_sem); + + if (ret) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): old (0x%llx) and new (0x%llx) IPs have different permissions\n", + old_ip, new_ip); + force_sig(SIGKILL); + } + return ret; +} + +static int longjmp_check_goal_frame(const struct e2k_stacks *stacks, + const e2k_mem_crs_t *crs) +{ + hw_stack_t *u_hw_stack = ¤t_thread_info()->u_hw_stack; + unsigned long new_fp; + int syscall_psize = TASK_IS_PROTECTED(current) ? 8 : 4; + int ret = -EINVAL; + + /* Check for possible WD.wsz overflow. When a function returns WD.psize + * is added to CR1_LO.wbs and the result is written to WD.wsz. Since no + * overflow checking is done in hardware we check it in software. */ + if (AS(crs->cr1_lo).wbs + syscall_psize > E2K_MAXSR) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): corrupted jmp_buf: cr1_lo.wbs (%d) + syscall psize (%d) > MAXSR\n", + AS(crs->cr1_lo).wbs, syscall_psize); + goto out; + } + + new_fp = AS(stacks->pcsp_lo).base + AS(stacks->pcsp_hi).ind; + if (new_fp > (unsigned long) GET_PCS_BASE(u_hw_stack)) { + e2k_mem_crs_t __user *u_cframe = (e2k_mem_crs_t *) new_fp - 1; + e2k_cr1_lo_t cr1_lo; + unsigned long ts_flag; + int res; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + res = __get_user(AW(cr1_lo), &AW(u_cframe->cr1_lo)); + clear_ts_flag(ts_flag); + + if (res) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): __get_user() fault\n"); + ret = -EFAULT; + goto out; + } + + if (AS(cr1_lo).wbs + AS(crs->cr1_lo).wpsz > E2K_MAXSR) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): corrupted jmp_buf: caller's wbs + cr1_lo.psz > MAXSR\n"); + goto out; + } + } + + /* Avoid targeting kernel stack frames. This check is done just + * in case since such bad jmp_buf should not lead to anything bad. */ + if (get_stack_frame_type(crs->cr0_hi, crs->cr1_lo) != user_frame_type) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): target frame is not user's\n"); + goto out; + } + + return 0; + +out: + force_sig(SIGKILL); + return ret; +} + +/** + * longjmp_restore_user_frame_state - restore the last frame to its saved state + * + * @crs: current state of frame that called setjmp()/getcontext(), + * this state must be restored to the way it was at the moment + * of the call + * @jmp_cr0_hi: saved %cr0.hi + * @jmp_cr1_lo: saved %cr1.lo + * @jmp_br: saved %br of frame that called setjmp()/getcontext() + * @jmp_psize: used %psize when calling setjmp()/getcontext() + * @psp_delta: user's %psp_hi.ind correction can be saved here + * @wd: current kernel entry's %wd + * + * The hard part here is to restore %wd.wsz and %wd.psize since + * all parameters of PS frame could have changed in the meantime. + */ +static int longjmp_restore_user_frame_state(e2k_mem_crs_t *crs, + e2k_cr0_hi_t jmp_cr0_hi, e2k_cr1_lo_t jmp_cr1_lo, + u32 jmp_br, u32 jmp_psize, u64 *psp_delta, e2k_wd_t wd) +{ + /* Take into account possibly different 'wbs' with which + * setjmp()/getcontext() and longjmp()/setcontext() have been called */ + *psp_delta += (AS(crs->cr1_lo).wbs - AS(jmp_cr1_lo).wbs) * EXT_4_NR_SZ; + /* Dealing with this is too hard, and it seems no real + * application changes 'psize' in this particular way. */ + if (jmp_psize != AS(wd).psize) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): corrupted setjmp_buf: wd.psize != system call psize (4)\n"); + goto out; + } + + if (jmp_cr1_lo.pm) { + SIGDEBUG_PRINT("SIGKILL. longjmp(): corrupted setjmp_buf: cr1_lo = 0x%llx\n", + AW(jmp_cr1_lo)); + goto out; + } + + /* + * Restore target frame parameters + */ + crs->cr0_hi.ip = jmp_cr0_hi.ip; + crs->cr1_lo.wfx = jmp_cr1_lo.wfx; + crs->cr1_lo.wpsz = jmp_cr1_lo.wpsz; + crs->cr1_lo.wbs = jmp_cr1_lo.wbs; + crs->cr1_hi.br = jmp_br; + + return 0; + +out: + force_sig(SIGKILL); + return -EINVAL; +} + +static void longjmp_update_hw_stacks(e2k_stacks_t *stacks, + u64 psp_delta, u64 pcsp_delta) +{ + unsigned long new_fp; + + /* + * Calculate new %psp + */ + new_fp = AS(stacks->psp_lo).base + AS(stacks->psp_hi).ind - psp_delta; + update_psp_regs(new_fp, &stacks->psp_lo, &stacks->psp_hi); + + BUG_ON(GET_PSHTP_MEM_INDEX(stacks->pshtp)); + DebugSLJ("new PSP base 0x%llx size 0x%x ind 0x%x PSHTP 0x%llx\n", + stacks->psp_lo.PSP_lo_base, stacks->psp_hi.PSP_hi_size, + stacks->psp_hi.PSP_hi_ind, stacks->pshtp.PSHTP_reg); + + /* + * Calculate new %pcsp + */ + new_fp = AS(stacks->pcsp_lo).base + AS(stacks->pcsp_hi).ind - + pcsp_delta; + update_pcsp_regs(new_fp, &stacks->pcsp_lo, &stacks->pcsp_hi); + + /* See comment in user_hw_stacks_copy_full() */ + BUG_ON(PCSHTP_SIGN_EXTEND(stacks->pcshtp) != SZ_OF_CR); + DebugSLJ("new PCSP base 0x%llx size 0x%x ind 0x%x PCSHTP 0x%x\n", + stacks->pcsp_lo.PCSP_lo_base, stacks->pcsp_hi.PCSP_hi_size, + stacks->pcsp_hi.PCSP_hi_ind, stacks->pcshtp); +} + +static int longjmp_switch_to_new_context(pt_regs_t *regs, pt_regs_t *new_regs, + u64 dstack_sp, u64 dstack_free, u64 dstack_top) +{ + e2k_stacks_t *new_stacks = &new_regs->stacks; + e2k_mem_crs_t *k_crs; + e2k_mem_crs_t __user *u_cframe; + unsigned long flags; + int ret; + + if (WARN_ON_ONCE(AS(new_stacks->pcsp_hi).ind < SZ_OF_CR)) + do_exit(SIGKILL); + + /* + * Copy 2 last frames into chain stack - the first one for + * handle_sys_call() which does not restore pt_regs->crs and + * the second one because of the trick with FILL_HARDWARE_STACK(). + * + * See user_hw_stacks_copy_full() for an explanation why these + * two frames are located at (AS(ti->k_pcsp_lo).base). + */ + k_crs = (e2k_mem_crs_t *) AS(current_thread_info()->k_pcsp_lo).base; + u_cframe = (void __user *) (AS(new_stacks->pcsp_lo).base + + AS(new_stacks->pcsp_hi).ind); + /* Do all of the updates under closed interrupts so that + * we still see consistent stack state from interrupt + * handler in case an interrupt arrives here. */ + raw_all_irq_save(flags); + ret = __copy_user_to_current_hw_stack(k_crs, u_cframe - 1, + sizeof(*k_crs), new_regs, true); + if (ret) { + SIGDEBUG_PRINT("SIGKILL. lcngjmp(): copy_user_to_current_hw_stack() fault\n"); + goto out; + } + + new_stacks->pcshtp = SZ_OF_CR; + NATIVE_FLUSHC; + *(k_crs + 1) = new_regs->crs; + + current_thread_info()->u_stack.bottom = dstack_sp - dstack_free; + current_thread_info()->u_stack.top = dstack_top; + + copy_jmp_regs(regs, new_regs); + +out: + raw_all_irq_restore(flags); + + if (ret) + force_sig(SIGKILL); + return ret; +} + +static void longjmp_update_dstack(struct e2k_stacks *stacks, u64 dstack_sp, + u64 dstack_free, u64 dstack_top, u64 ppsl_shift, + e2k_pcsp_hi_t jmp_pcsp_hi) +{ + stacks->top = dstack_top; + + AS(stacks->usd_hi).size = dstack_free; + + if (!TASK_IS_PROTECTED(current)) { + AS(stacks->usd_lo).base = dstack_sp; + } else { + e2k_pusd_lo_t pusd_lo; + + AW(pusd_lo) = AW(stacks->usd_lo); + AS(pusd_lo).base = dstack_sp & 0xffffffffUL; + AS(pusd_lo).psl -= ppsl_shift; + AW(stacks->usd_lo) = AW(pusd_lo); + + DebugSLJ("new psl %d, ppsl_shift %lld, jump point pusd_lo.psl %d\n", + AS(pusd_lo).psl, ppsl_shift, AS(pusd_lo).psl); + + /* + * Delete global pointers to local data stack. + * setjmp - libc procedure and field .psl in usd_lo more 1 than + * in user procedure + */ + if ((AS(pusd_lo).psl - 1) * SZ_OF_CR > AS(jmp_pcsp_hi).ind) { + pr_info_ratelimited(" BAD in longjmp() jmp_pcsp_hi.ind : 0x%d jmp_psl=%d\n", + AS(jmp_pcsp_hi).ind, AS(pusd_lo).psl); + } + delete_records(AS(pusd_lo).psl); + } + + +} + +#if _NSIG != 64 +# error Fix sigmask restoring in longjmp/setcontext +#endif +long do_longjmp(u64 retval, u64 jmp_sigmask, e2k_cr0_hi_t jmp_cr0_hi, + e2k_cr1_lo_t jmp_cr1_lo, e2k_pcsp_lo_t jmp_pcsp_lo, + e2k_pcsp_hi_t jmp_pcsp_hi, u32 jmp_br, u32 jmp_psize, + e2k_fpcr_t fpcr, e2k_fpsr_t fpsr, e2k_pfpfr_t pfpfr, bool restore_fpu) +{ + thread_info_t *ti = current_thread_info(); + pt_regs_t new_regs, *regs = ti->pt_regs; + u64 psp_delta, pcsp_delta, dstack_sp, dstack_free, dstack_top, + ppsl_shift; + int ret; + + /* + * Copy user's part from kernel stacks back to user. + * This also removes any need to FILL before return to user. + */ + ret = do_user_hw_stacks_copy_full(®s->stacks, regs, NULL); + if (ret) + return ret; + + DebugSLJ("current PCSP : base 0x%llx, ind 0x%x, size 0x%x PCSHTP 0x%x\n" + " ip 0x%llx cr1_lo 0x%llx : wbs 0x%x wpsz 0x%x wfx %d\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_ind, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcshtp, + regs->crs.cr0_hi.CR0_hi_IP, + regs->crs.cr1_lo.CR1_lo_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_lo.CR1_lo_wpsz, + regs->crs.cr1_lo.CR1_lo_wfx); + DebugSLJ("current PSP : base 0x%llx, ind 0x%x, size 0x%x PSHTP 0x%llx\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_ind, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.pshtp.PSHTP_reg); + + init_pt_regs_for_syscall(&new_regs); + copy_jmp_regs(&new_regs, regs); + + DebugSLJ("jump point PCSP : base 0x%llx, ind 0x%x, size 0x%x\n" + "jump point sigmask 0x%llx ip 0x%llx cr1_lo 0x%llx : wbs 0x%x wpsz 0x%x wfx %d\n", + jmp_pcsp_lo.PCSP_lo_base, jmp_pcsp_hi.PCSP_hi_ind, + jmp_pcsp_hi.PCSP_hi_size, jmp_sigmask, AW(jmp_cr0_hi), + AW(jmp_cr1_lo), AS_STRUCT(jmp_cr1_lo).wbs, + AS(jmp_cr1_lo).wpsz, AS(jmp_cr1_lo).wfx); + + ret = check_longjmp_permissions(AS(regs->crs.cr0_hi).ip << 3, + AS(jmp_cr0_hi).ip << 3); + if (ret) + return ret; + + /* unwind_stack - go down to the target frame and find its current + * parameters (they could have changed since the call to setjmp()) */ + ret = unwind_stack(jmp_pcsp_lo, jmp_pcsp_hi, &new_regs.stacks, + &ppsl_shift, &psp_delta, &pcsp_delta, &new_regs.crs, + &dstack_sp, &dstack_free, &dstack_top); + if (ret) + return ret; + + /* Restore the last frame %cr to its saved state */ + ret = longjmp_restore_user_frame_state(&new_regs.crs, jmp_cr0_hi, + jmp_cr1_lo, jmp_br, jmp_psize, &psp_delta, new_regs.wd); + if (ret) + return ret; + + /* Update all 3 stacks' pointers in pt_regs */ + longjmp_update_hw_stacks(&new_regs.stacks, psp_delta, pcsp_delta); + longjmp_update_dstack(&new_regs.stacks, dstack_sp, dstack_free, + dstack_top, ppsl_shift, jmp_pcsp_hi); + + /* Check that passed buffer is correct */ + ret = longjmp_check_goal_frame(&new_regs.stacks, &new_regs.crs); + if (ret) + return ret; + + ret = longjmp_switch_to_new_context(regs, &new_regs, + dstack_sp, dstack_free, dstack_top); + if (ret) + return ret; + + ret = complete_long_jump(&new_regs); + if (ret) + return ret; + + if (jmp_sigmask & sigmask(SIGKILL)) { + sigset_t k_sigset = { .sig[0] = jmp_sigmask }; + set_current_blocked(&k_sigset); + } + + if (restore_fpu) { + WRITE_FPCR_REG(fpcr); + WRITE_FPSR_REG(fpsr); + WRITE_PFPFR_REG(pfpfr); + } + + DebugSLJ("jump point new CR1: wbs 0x%x, wpsz 0x%x, wfx %d\n" + "jump point IP in mem CR0 0x%llx new IP 0x%llx\n" + "jump point BR in mem CR1 0x%x new BR 0x%x\n" + "jump point new USD = %llx:%llx\n", + AS(jmp_cr1_lo).wbs, AS(jmp_cr1_lo).wpsz, AS(jmp_cr1_lo).wfx, + AS(new_regs.crs.cr0_hi).ip << 3, AS(jmp_cr0_hi).ip << 3, + AS(new_regs.crs.cr1_hi).br, jmp_br, + AW(new_regs.stacks.usd_hi), AW(new_regs.stacks.usd_lo)); + + return retval; +} + +long sys_e2k_longjmp2(struct jmp_info __user *env, u64 retval) +{ + struct jmp_info jmp_info; + u32 jmp_psize; + struct pt_regs *regs = current_pt_regs(); + e2k_pcsp_lo_t pcsp_lo; + unsigned long delta; + + DebugSLJ("pid %d start env %px retval %lld\n", + current->pid, env, retval); + + if (copy_from_user(&jmp_info, env, sizeof(jmp_info))) + return -EFAULT; + + /* Switch to another context if needed */ + AW(pcsp_lo) = jmp_info.pcsplo; + if (find_in_old_u_pcs_list(AS(pcsp_lo).base, &delta)) { + e2k_usd_lo_t usd_lo = (e2k_usd_lo_t) jmp_info.usd_lo; + long ret = hw_context_lookup_pcsp_and_switch(pcsp_lo, usd_lo); + if (ret) + return ret; + } + + jmp_psize = AS((e2k_wd_t) ((u64) jmp_info.wd_hi32 << 32ULL)).psize; + + return do_longjmp(retval, jmp_info.sigmask, + (e2k_cr0_hi_t) jmp_info.ip, + (e2k_cr1_lo_t) jmp_info.cr1lo, + (e2k_pcsp_lo_t) jmp_info.pcsplo, (e2k_pcsp_hi_t) + (jmp_info.pcsphi + PCSHTP_SIGN_EXTEND(jmp_info.pcshtp)), + jmp_info.br, jmp_psize, (e2k_fpcr_t) { .word = 0}, + (e2k_fpsr_t) { .word = 0 }, (e2k_pfpfr_t) { .word = 0}, + false); +} + +#ifdef CONFIG_PROTECTED_MODE +long protected_sys_rt_sigaction(int sig, + const void __user *ptr, void __user *ptr2, + const size_t sigsetsize) + +{ + long rval; + struct k_sigaction new_ka, old_ka; + prot_sigaction_old_t __user *act; + prot_sigaction_old_t __user *oact; + int tag = -1; + + if (sigsetsize != sizeof(sigset_t)) { + if (sigsetsize > sizeof(sigset_t) + && !(sigsetsize >> 8)) /* reasonable positive value */ + DbgSCP_ALERT("SigSetSize seems extended beyond 64 bits.\n"); + else + DbgSCP_ALERT("Wrong 'sigsetsize' value (%zd); expected %zd.\n", + sigsetsize, sizeof(sigset_t)); + return -EINVAL; + } + + if (ptr) { + e2k_pl_lo_t pl_lo; + + act = (prot_sigaction_old_t __user *)ptr; + + if (GET_USER_VAL_TAGD(AW(pl_lo), tag, &act->sa_handler)) { + DebugSCP("Bad act->sa_handler = %px\n", + &act->sa_handler); + return -EFAULT; + } + new_ka.sa.sa_handler = (__sighandler_t)pl_lo.target; + + if (tag != E2K_PL_ETAG && new_ka.sa.sa_handler != SIG_DFL && + new_ka.sa.sa_handler != SIG_IGN) { + DebugSCP("Wrong act->sa_handler %d %px %px\n", + tag, &act->sa_handler, + new_ka.sa.sa_handler); + return -EINVAL; + } + + /* Note that I intentionally ignore sa_restorer below */ + /* because it seems to be useless on E2K. */ + if (copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, + sizeof(new_ka.sa.sa_mask)) || + get_user(new_ka.sa.sa_flags, &act->sa_flags)) + return -EFAULT; + } + + rval = do_sigaction(sig, (ptr) ? &new_ka : NULL, + (ptr2) ? &old_ka : NULL); + DebugSCP("sys_rt_sigaction rval = %ld\n", rval); + + if (!rval && ptr2) { + oact = (prot_sigaction_old_t __user *)ptr2; + + if (old_ka.sa.sa_handler != SIG_DFL && + old_ka.sa.sa_handler != SIG_IGN) { + if (IS_CPU_ISET_V6()) + return -ENOSYS; + rval = PUT_USER_PL_V2(&oact->sa_handler, + (u64)old_ka.sa.sa_handler); + } else { + rval = put_user((u64)old_ka.sa.sa_handler, + &oact->sa_handler.PL_lo_value); + } + + rval = (rval) ?: copy_to_user( + &oact->sa_mask, &old_ka.sa.sa_mask, + sizeof(old_ka.sa.sa_mask)); + rval = (rval) ?: put_user(old_ka.sa.sa_flags, &oact->sa_flags); + } + + return rval; +} + +long protected_sys_rt_sigaction_ex(int sig, + const void __user *ptr, void __user *ptr2, + const size_t sigsetsize) +{ + long rval; + struct k_sigaction new_ka, old_ka; + prot_sigaction_t __user *act; + prot_sigaction_t __user *oact; + int tag_lo; + int tag_hi; + + if (sigsetsize != sizeof(sigset_t)) { + DbgSCP_ALERT("SigSetSize seems extended beyond 64 bits.\n"); + return -EINVAL; + } + + if (ptr) { + e2k_pl_lo_t pl_lo; + e2k_pl_hi_t pl_hi; + + act = (prot_sigaction_t __user *)ptr; + + if (GET_USER_VAL_TAGD(AW(pl_lo), tag_lo, + &act->sa_handler.PLLO_item)) { + DebugSCP("Bad act->sa_handler = %px\n", + &act->sa_handler); + return -EFAULT; + } + new_ka.sa.sa_handler = (__sighandler_t)pl_lo.target; + + if (!IS_CPU_ISET_V6()) { + if (tag_lo != E2K_PL_ETAG && + new_ka.sa.sa_handler != SIG_DFL && + new_ka.sa.sa_handler != SIG_IGN) { + DebugSCP("Wrong act->sa_handler tag 0x%x %px %px\n", + tag_lo, &act->sa_handler.PLLO_item, + new_ka.sa.sa_handler); + return -EINVAL; + } + } else if (new_ka.sa.sa_handler != SIG_DFL && + new_ka.sa.sa_handler != SIG_IGN) { + /* it is CPU ISET version >= V6 */ + if (tag_lo != E2K_PLLO_ETAG) { + DebugSCP("Bad act->sa_handler lo tag 0x%x\n", + tag_lo); + return -EINVAL; + } + if (GET_USER_VAL_TAGD(pl_hi.PL_hi_value, tag_hi, + &act->sa_handler.PLHI_item)) { + DebugSCP("Bad act->sa_handler = %px\n", + &act->sa_handler.PLHI_item); + return -EFAULT; + } + if (tag_hi != E2K_PLHI_ETAG) { + DebugSCP("Bad act->sa_handler " + "hi tag 0x%x\n", + tag_hi); + return -EINVAL; + } + if (pl_hi.PL_hi_cui == 0) { + DebugSCP("Zero CUI of act->sa_handler " + "procedure label\n"); + return -EINVAL; + } + new_ka.sa_handler_cui = pl_hi.PL_hi_cui; + } + + /* Note that I intentionally ignore sa_restorer below + because it seems to be useless on E2K. */ + + if (copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, + sizeof(new_ka.sa.sa_mask)) || + get_user(new_ka.sa.sa_flags, &act->sa_flags)) + return -EFAULT; + } + + rval = do_sigaction(sig, (ptr) ? &new_ka : NULL, + (ptr2) ? &old_ka : NULL); + DebugSCP("rval = %ld\n", rval); + if (rval) + return rval; + + if (ptr2) { + oact = (prot_sigaction_t __user *)ptr2; + + rval = PUT_USER_PL(&oact->sa_handler, (u64)old_ka.sa.sa_handler, + old_ka.sa_handler_cui); + rval = (rval) ?: copy_to_user( + &oact->sa_mask, &old_ka.sa.sa_mask, + sizeof(old_ka.sa.sa_mask)); + rval = (rval) ?: put_user(old_ka.sa.sa_flags, &oact->sa_flags); + + if (rval) + DbgSCP_ALERT("failed to return 'oldact'.\n"); + } + + return rval; +} + +#endif /* CONFIG_PROTECTED_MODE */ diff --git a/arch/e2k/kernel/smp.c b/arch/e2k/kernel/smp.c new file mode 100644 index 000000000000..be95cd25629d --- /dev/null +++ b/arch/e2k/kernel/smp.c @@ -0,0 +1,488 @@ +/* + * SMP Support + * + * Lots of stuff stolen from arch/i386/kernel/smp.c + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#undef DEBUG_SMP_MODE +#undef DebugSMP +#define DEBUG_SMP_MODE 0 +#define DebugSMP(...) DebugPrint(DEBUG_SMP_MODE, ##__VA_ARGS__) + +#undef DEBUG_DATA_BREAKPOINT_MODE +#undef DebugDBP +#define DEBUG_DATA_BREAKPOINT_MODE 0 /* data breakpoint debugging */ +#define DebugDBP(...) DebugPrint(DEBUG_DATA_BREAKPOINT_MODE, ##__VA_ARGS__) + +int refresh_processor; + + +/* + * Flush a specified user mapping + */ + +static void +flush_tlb_mm_ipi(void* info) +{ + struct mm_struct *mm = (struct mm_struct *)info; + + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + inc_irq_stat(irq_tlb_count); + + __flush_cpu_root_pt_mm(mm); + __flush_tlb_mm(mm); +} + +void +native_smp_flush_tlb_mm(struct mm_struct *const mm) +{ + preempt_disable(); + + /* Signal to all users of this mm that it has been flushed. + * Invalid context will be updated while activating or switching to. */ + memset(mm->context.cpumsk, 0, nr_cpu_ids * sizeof(mm->context.cpumsk[0])); + + /* See comment about memory barriers in do_switch_mm(). */ + smp_mb(); + + __flush_tlb_mm(mm); + + /* Check that mm_cpumask() has some other CPU set */ + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) { + /* Send flush ipi to all other cpus in mm_cpumask(). */ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + smp_call_function_many(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1); + } + + preempt_enable(); +} + +/* + * Flush a single page from TLB + */ + +void native_smp_flush_tlb_page(struct vm_area_struct *const vma, + const e2k_addr_t addr) +{ + struct mm_struct *const mm = vma->vm_mm; + int i, cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + + /* See comment in smp_flush_tlb_range() */ + for (i = 0; i < nr_cpu_ids; i++) { + if (i == cpu) + continue; + mm->context.cpumsk[i] = 0; + } + + __flush_tlb_page(mm, addr); + + /* See comment about memory barriers in do_switch_mm(). */ + smp_mb(); + + /* Check that mm_cpumask() has some other CPU set */ + if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + /* Send flush ipi to all other cpus in mm_cpumask(). */ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + smp_call_function_many(mm_cpumask(mm), flush_tlb_mm_ipi, + vma->vm_mm, 1); + } + + preempt_enable(); +} +EXPORT_SYMBOL(native_smp_flush_tlb_page); + +/* + * Flush all processes TLBs + */ + +static void flush_tlb_all_ipi(void* info) +{ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + inc_irq_stat(irq_tlb_count); + + __flush_tlb_all(); + __flush_cpu_root_pt(); +} + +void native_smp_flush_tlb_all(void) +{ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + smp_call_function(flush_tlb_all_ipi, NULL, 1); + __flush_tlb_all(); +} +EXPORT_SYMBOL(native_smp_flush_tlb_all); + + +/* + * Flush a range of pages + */ + +void native_smp_flush_tlb_range(struct mm_struct *const mm, + const e2k_addr_t start, const e2k_addr_t end) +{ + int cpu, i; + + preempt_disable(); + + cpu = smp_processor_id(); + + /* Signal to all users of this mm that it has been flushed. + * Invalid context will be updated while activating or switching to. + * + * Things to consider: + * + * 1) Clearing the whole context for CPUs to which we send the flush + * ipi looks unnecessary, but is needed to avoid race conditions. The + * problem is that there is a window between reading mm_cpumask() and + * deciding which context should be set to 0. In that window situation + * could have changed, so the only safe way is to set mm context on + * ALL cpus to 0. + * + * 2) Setting it to 0 essentially means that the cpus which receive the + * flush ipis cannot flush only a range of pages because they do not + * know the context, so they will flush the whole mm. + * + * 3) TODO FIXME This way of doing things is OK for 2 CPUs, for 4 CPUs, + * but it may become a problem for e2s with its 64 CPUs if there is a + * really-multi-threaded application running. If this is the case it + * would be better to implement scheme which will remember pending TLB + * flush requests. But such a scheme will greatly increase struct mm + * size (64 * 4 * 32 = 8 Kb for 64-processors system with a maximum + * of 4 simultaneously pending flushes each taking up 32 bytes). + * + * This problem (3) only gets worse when we are making all pages valid + * since EVERY mmap/sys_brk and some other calls will end up sending + * 63 flush ipis which will flush all the TLBs. + */ + for (i = 0; i < nr_cpu_ids; i++) { + if (i == cpu) + /* That being said, current CPU can still + * flush only the given range of pages. */ + continue; + mm->context.cpumsk[i] = 0; + } + + __flush_tlb_range(mm, start, end); + + /* See comment about memory barriers in do_switch_mm(). */ + smp_mb(); + + /* Check that mm_cpumask() has some other CPU set */ + if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + /* Send flush ipi to all other cpus in mm_cpumask(). */ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + smp_call_function_many(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1); + } + + preempt_enable(); +} +EXPORT_SYMBOL(native_smp_flush_tlb_range); + +/* + * As native_smp_flush_tlb_range() but for pmd's + */ +void native_smp_flush_pmd_tlb_range(struct mm_struct *const mm, + const e2k_addr_t start, const e2k_addr_t end) +{ + int cpu, i; + + preempt_disable(); + + cpu = smp_processor_id(); + + /* See comment in smp_flush_tlb_range() */ + for (i = 0; i < nr_cpu_ids; i++) { + if (i == cpu) + /* That being said, current CPU can still + * flush only the given range of pages. */ + continue; + mm->context.cpumsk[i] = 0; + } + + __flush_pmd_tlb_range(mm, start, end); + + /* See comment about memory barriers in do_switch_mm(). */ + smp_mb(); + + /* Check that mm_cpumask() has some other CPU set */ + if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + /* Send flush ipi to all other cpus in mm_cpumask(). */ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + smp_call_function_many(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1); + } + + preempt_enable(); +} + +/* + * Flush a range of pages and page tables. + */ + +void native_smp_flush_tlb_range_and_pgtables(struct mm_struct *const mm, + const e2k_addr_t start, const e2k_addr_t end) +{ + int i, cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + + /* See comment in smp_flush_tlb_range() */ + for (i = 0; i < nr_cpu_ids; i++) { + if (i == cpu) + continue; + mm->context.cpumsk[i] = 0; + } + + __flush_tlb_range_and_pgtables(mm, start, end); + + /* See comment about memory barriers in do_switch_mm(). */ + smp_mb(); + + /* Check that mm_cpumask() has some other CPU set */ + if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + /* Send flush ipi to all other cpus in mm_cpumask(). */ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + smp_call_function_many(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1); + } + + preempt_enable(); +} + +static void smp_flush_icache_range_ipi(void *info) +{ + icache_range_t *icache_range = (icache_range_t *)info; + + __flush_icache_range(icache_range->start, icache_range->end); +} + +void native_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + icache_range_t icache_range; + + icache_range.start = start; + icache_range.end = end; + + migrate_disable(); + smp_call_function(smp_flush_icache_range_ipi, &icache_range, 1); + __flush_icache_range(start, end); + migrate_enable(); +} +EXPORT_SYMBOL(native_smp_flush_icache_range); + +static void smp_flush_icache_range_array_ipi(void *info) +{ + icache_range_array_t *icache_range_arr = (icache_range_array_t *)info; + + __flush_icache_range_array(icache_range_arr); +} + +void native_smp_flush_icache_range_array(icache_range_array_t *icache_range_arr) +{ + migrate_disable(); + smp_call_function( + smp_flush_icache_range_array_ipi, icache_range_arr, 1); + __flush_icache_range_array(icache_range_arr); + migrate_enable(); +} + +static void smp_flush_icache_kernel_line_ipi(void *info) +{ + flush_ICACHE_kernel_line(*((e2k_addr_t *)info)); +} + +void native_smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + smp_call_function(smp_flush_icache_kernel_line_ipi, &addr, 1); + flush_ICACHE_kernel_line(addr); +} + +static void smp_flush_icache_page_ipi(void* info) +{ + icache_page_t *icache_page = (icache_page_t *)info; + + __flush_icache_page(icache_page->vma, icache_page->page); +} + +void native_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + icache_page_t icache_page; + struct mm_struct *mm = vma->vm_mm; + int cpu, i; + + migrate_disable(); + + cpu = smp_processor_id(); + + /* See comment in smp_flush_tlb_range() */ + for (i = 0; i < nr_cpu_ids; i++) { + if (i == cpu) + continue; + mm->context.cpumsk[i] = 0; + } + + __flush_icache_page(vma, page); + + /* See comment about memory barriers in do_switch_mm(). */ + smp_mb(); + + icache_page.vma = vma; + icache_page.page = page; + + /* Check that mm_cpumask() has some other CPU set */ + if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) + smp_call_function(smp_flush_icache_page_ipi, &icache_page, 1); + + migrate_enable(); +} + +static void smp_flush_icache_all_ipi(void *info) +{ + __flush_icache_all(); +} + +void smp_flush_icache_all(void) +{ + smp_call_function(smp_flush_icache_all_ipi, NULL, 1); + __flush_icache_all(); +} +EXPORT_SYMBOL(smp_flush_icache_all); + +void smp_send_refresh(void) +{ + refresh_processor = refresh_processor | ~(0U); + +} + + +static void stop_this_cpu_ipi(void *dummy) +{ + raw_all_irq_disable(); + + set_cpu_online(smp_processor_id(), false); + + spin_begin(); + +#ifdef CONFIG_KVM_GUEST_KERNEL + //TODO Why is this needed? + refresh_processor = refresh_processor & ~(1U << smp_processor_id()); + for (;;) { + if (refresh_processor & (1U << smp_processor_id())) + break; + spin_cpu_relax(); + } + refresh_processor = refresh_processor & ~(1U << smp_processor_id()); +#else + while (1) + spin_cpu_relax(); +#endif +} + +void smp_send_stop(void) +{ + unsigned long timeout; + + /* + * NMI may stop other CPU holding printk ringbuffer lock, causing panicking CPU to + * hang on next printk (bug 128863). + * As a workaround, use MI to stop other CPUs. + * + * nmi_call_function(stop_this_cpu_ipi, NULL, 0, 1000); + */ + smp_call_function(stop_this_cpu_ipi, NULL, 0); + + /* Interrupt delivery may take a while, wait for up to 30 seconds for other CPUs to stop */ + timeout = 30 * USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); +} + +#ifdef CONFIG_DATA_BREAKPOINT + +static void smp_set_data_breakpoint_ipi(void *info) +{ + hw_data_bp_t *data_bp = (hw_data_bp_t *)info; + + set_hardware_data_breakpoint(data_bp->address, data_bp->size, + data_bp->write, data_bp->read, data_bp->stop, data_bp->cp_num); + DebugDBP("set data breakpoint: CPU #%d address %px size %d bytes " + "write %d read %d stop %d BAR #%d\n", + smp_processor_id(), data_bp->address, data_bp->size, + data_bp->write != 0, data_bp->read != 0, data_bp->stop != 0, + data_bp->cp_num); +} + +void smp_set_data_breakpoint(void *address, u64 size, + bool write, bool read, bool stop, const int cp_num) +{ + hw_data_bp_t data_bp; + + data_bp.address = address; + data_bp.size = size; + data_bp.write = write; + data_bp.read = read; + data_bp.stop = stop; + data_bp.cp_num = cp_num; + + smp_call_function(smp_set_data_breakpoint_ipi, &data_bp, 1); + + smp_set_data_breakpoint_ipi(&data_bp); +} + +static int smp_reset_data_breakpoint_ipi(void *info) +{ + void *address = info; + int cp_num; + + cp_num = reset_hardware_data_breakpoint(address); + if (cp_num >= 0 && cp_num < 4) { + DebugDBP("reset data breakpoint: CPU #%d address %px BAR #%d\n", + smp_processor_id(), address, cp_num); + } else if (cp_num < 0) { + DebugDBP("reset data breakpoint failed on CPU #%d " + "address %px, error %d\n", + smp_processor_id(), address, cp_num); + } else { + DebugDBP("reset data breakpoint: could not find on CPU #%d " + "address %px\n", + smp_processor_id(), address); + } + return cp_num; +} + +int smp_reset_data_breakpoint(void *address) +{ + smp_call_function((void (*)(void *))smp_reset_data_breakpoint_ipi, + address, 1 /* wait */); + return smp_reset_data_breakpoint_ipi(address); +} +#endif /* CONFIG_DATA_BREAKPOINT */ + diff --git a/arch/e2k/kernel/smpboot.c b/arch/e2k/kernel/smpboot.c new file mode 100644 index 000000000000..14ce18bd3e51 --- /dev/null +++ b/arch/e2k/kernel/smpboot.c @@ -0,0 +1,568 @@ +/* + * E2K SMP booting functions + * + * Much of the core SMP work is based on previous work by Thomas Radke, to + * whom a great many thanks are extended. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DEBUG_SMP_BOOT_MODE 0 /* SMP Booting process */ +#define DebugSMPB(...) DebugPrint(DEBUG_SMP_BOOT_MODE ,##__VA_ARGS__) + +/* + * A small decription of what functions in this file do. + * + * + * SMP boot process: + * + * 1) Bootstrap processor (BSP) calls start_kernel() and initializes + * the most basic things while all other cpus spin in e2k_start_secondary() + * on callin_go cpumask waiting for the signal from BSP. + * + * 2) BSP calls cpu_up() for every other cpu in the system. cpu_up() + * calls architecture-dependent __cpu_up() which does the following: + * + * 2.1) Creates idle task structure on that cpu. + * + * 2.2) Sets the corresponding bit in the callin_go cpumask. + * + * 3.3) Waits until the cpu sets the corresponding bit in the + * cpu_online_mask cpumask. + * + * 3) After 2.2 secondary cpus set up idle task struct (created by + * BSP in 2.1), initialize LAPIC and some other things like clearing + * themselves from callin_go which is needed for hotplug. + * + * 4) BSP goes on with the initialization, other CPUs call cpu_idle(). + */ + +static int bsp_cpu; +cpumask_t callin_go; + +/* + * __nodedata variables should lay in single cache line. In other case access + * to neighboring variables could lead to hardware hang. It can be in case of + * lowmem access to neighboring variables and the following highmem access to + * __nodedata variables or vice versa. + */ + +#ifdef CONFIG_NUMA +nodemask_t ____cacheline_aligned_in_smp __nodedata node_has_dup_kernel_map; + +atomic_t ____cacheline_aligned_in_smp __nodedata +node_has_dup_kernel_num = ATOMIC_INIT(0); + +int ____cacheline_aligned_in_smp __nodedata +all_nodes_dup_kernel_nid[MAX_NUMNODES]; + +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +pgd_t ____cacheline_aligned_in_smp __nodedata *all_nodes_pg_dir[MAX_NUMNODES]; +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +pg_dir_t ____cacheline_aligned_in_smp __nodedata +*all_nodes_pg_dir[MAX_NUMNODES]; +#endif /* ! ONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#endif /* CONFIG_NUMA */ + +static int old_num_online_cpus; + + +void native_wait_for_cpu_booting(void) +{ + /* all waitings on real CPU */ +} +void native_wait_for_cpu_wake_up(void) +{ + /* all waitings on real CPU */ +} +int native_activate_cpu(int cpu_id) +{ + /* all waitings on real CPUs, so nothing to activate */ + return 0; +} +int native_activate_all_cpus(void) +{ + /* all waitings on real CPUs, so nothing to activate */ + return 0; +} + +#ifdef CONFIG_NUMA +/* + * Which logical CPUs are on which nodes + */ +cpumask_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +/* + * Allocate node_to_cpumask_map based on node_online_map + * Requires cpu_online_mask to be valid. + */ +void __init_recv setup_node_to_cpumask_map(void) +{ + int node; + + for (node = 0; node < MAX_NUMNODES; node ++) { + cpumask_clear(&node_to_cpumask_map[node]); + } + for_each_online_node(node) { + node_to_cpumask_map[node] = node_to_cpumask(node); + } +} +#endif /* CONFIG_NUMA */ + +/* Used solely to pass pointer from cpu_up() running on BSP + * to e2k_start_secondary() running on AP */ +struct task_struct *idle_tasks[NR_CPUS]; + +/* + * Setup idle task structure for secondary CPU + */ +void native_setup_secondary_task(int cpu) +{ + struct task_struct *idle = idle_tasks[cpu]; + + set_current_thread_info(task_thread_info(idle), idle); + + /* + * Init thread structure + */ + thread_init(); + + /* + * All kernel threads share the same mm context. + */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + BUG_ON(current->mm); +} + +void wait_for_startup(int cpuid, int hotplug) +{ + do { + wait_for_cpu_booting(); + if (hotplug && machine.clk_off) + machine.clk_off(); + } while (!cpumask_test_cpu(cpuid, &callin_go)); +} + +void __init_recv e2k_setup_secondary_apic(void) +{ +#ifdef CONFIG_L_LOCAL_APIC + /* + * Set up the local APIC of the AP CPU + */ + if (apic->smp_callin_clear_local_apic) + apic->smp_callin_clear_local_apic(); + setup_local_APIC(); + end_local_APIC_setup(); +#endif +} + +void e2k_start_secondary_switched_stacks(int cpuid, int cpu) +{ +#ifdef CONFIG_L_LOCAL_APIC + int phys_id; +#endif /* CONFIG_L_LOCAL_APIC */ + + /* + * This works even if the APIC is not enabled + */ +#ifdef CONFIG_L_LOCAL_APIC + phys_id = read_pic_id(); + if (phys_id != cpuid) { + INIT_BUG("boot bug, CPU #%d is not the same as ID #%d\n", + cpuid, phys_id); + } +#endif /* CONFIG_L_LOCAL_APIC */ + + set_smp_processor_id(cpu); + + /* By now percpu areas should have been initialized by BSP */ + set_my_cpu_offset(__per_cpu_offset[cpu]); + + /* + * By this point BSP has already cleared and write-protected + * ZERO_PAGE, so flush it from TLB + */ + flush_TLB_page((unsigned long) empty_zero_page, + E2K_KERNEL_CONTEXT); + + /* + * The BSP has finished the init stage and is spinning on + * cpu_online_mask until we finish. We are free to set up this + * CPU, first the init_task structure for this CPU. + */ +#ifdef CONFIG_L_LOCAL_APIC + if (!physid_isset(phys_id, phys_cpu_present_map)) { + INIT_WARNING("boot bug, CPU #%d ID #%d is not present in physical CPU bitmap 0x%lx\n", + cpuid, phys_id, + physids_coerce(&phys_cpu_present_map)); + physid_set(phys_id, phys_cpu_present_map); + } +#endif + + setup_secondary_task(cpu); + + set_secondary_space_MMU_state(); + + /* + * This is to make sure that idle task is running with + * preemption disabled which is a more robust way of doing it. + * For BSP this is done in init/main.c. + */ + preempt_disable(); + + DebugSMPB("e2k_start_secondary, before e2k_setup_secondary_pic().\n"); + e2k_setup_secondary_pic(); + + DebugSMPB("Stack at about %px\n", &cpuid); + + /* + * Paravirt guest should not enable PIC timer (guest handler is not yet started) + */ + if (!(paravirt_enabled() && !IS_HV_GM())) { + setup_secondary_pic_clock(); + store_cpu_info(cpu); + } + + __setup_vector_irq(cpu); + + notify_cpu_starting(cpu); + + /* Allow BSP to continue */ + DebugSMPB("CPU #%d set bit in cpu_online_mask\n", cpuid); + set_cpu_online(cpu, true); + + /* secondary CPU Local PIC VIRQs handler can be now started up */ + startup_local_pic_virq(cpu); + + /* wake up BSP CPU waiting for this CPU start up */ + wmb(); + activate_cpu(bsp_cpu); + + cpumask_clear_cpu(cpuid, &callin_go); + + local_irq_enable(); + + BOOT_TRACEPOINT("e2k_start_secondary finished, going to cpu_idle()"); + + /* + * Processor should go to idle task + */ + DebugSMPB("CPU #%d call cpu_idle()\n", cpuid); + wmb(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + + BUG(); +} + +/* + * Activate a secondary processor. + */ +void __init e2k_start_secondary(int cpuid) +{ + struct task_struct *idle; + unsigned long stack_base; + int cpu; + + wait_for_startup(cpuid, 0); + + /* + * Paired with smp_wmb() in e2k_smp_boot_cpu() + */ + smp_rmb(); + + /* + * Now switch to properly allocated stack (with proper size and node) + */ + cpu = cpuid_to_cpu(cpuid); + idle = idle_tasks[cpu]; + stack_base = (unsigned long) idle->stack; + BUG_ON(!stack_base); + ap_switch_to_init_stack(stack_base, cpuid, cpu); +} + +#ifdef CONFIG_HOTPLUG_CPU +/* Reset stacks and call start_secondary */ +void start_secondary_resume(int cpuid, int cpu) +{ + unsigned long stack_base = (unsigned long) idle_tasks[cpu]->stack; + + BUG_ON(!stack_base); + NATIVE_SWITCH_TO_KERNEL_STACK( + stack_base + KERNEL_P_STACK_OFFSET, KERNEL_P_STACK_SIZE, + stack_base + KERNEL_PC_STACK_OFFSET, KERNEL_PC_STACK_SIZE, + stack_base + KERNEL_C_STACK_OFFSET, KERNEL_C_STACK_SIZE); + + E2K_JUMP_WITH_ARGUMENTS(e2k_start_secondary_switched_stacks, 2, + cpuid, cpu); +} +#endif + +/* + * Various sanity checks. + */ +static int __init_recv smp_sanity_check(unsigned max_cpus) +{ + preempt_disable(); + + /* + * If we couldn't find an SMP configuration at boot time, + * get out of here now! + */ + if (!smp_found_config) { + preempt_enable(); + pr_err("SMP motherboard not detected.\n"); + return -1; + } + + /* + * Should not be necessary because the MP table should list the boot + * CPU too, but we do it for the sake of robustness anyway. + */ + if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { + pr_err("weird, boot CPU (#%d) not listed by the BIOS.\n", + boot_cpu_physical_apicid); + physid_set(hard_smp_processor_id(), phys_cpu_present_map); + } + preempt_enable(); + + /* + * If SMP should be disabled, then really disable it! + */ + if (!max_cpus) { + pr_info("SMP mode deactivated.\n"); + return -1; + } + + return 0; +} + + +/* + * Cycle through the processors to complete boot on each CPU. + * This function is called on bootstrap processor only. + * The number of BSP is boot_cpu_physical_apicid + */ + +void __init_recv e2k_smp_prepare_cpus(unsigned int max_cpus, int recovery) + +{ + int cpu; + int timeout; + + /* + * Initialize the logical to physical CPU number mapping + * and the per-CPU profiling counter/multiplier + */ + + /* + * Setup boot CPU information + */ + if (!recovery) { + pr_info("BSP %s ID: %d\n", cpu_has_epic() ? "EPIC" : "APIC", + boot_cpu_physical_apicid); + } else if (boot_cpu_physical_apicid != read_pic_id()) { + pr_err("Bootstrap CPU PIC ID is %d then it " + "should be %d as before recovery\n", + boot_cpu_physical_apicid, read_pic_id()); + } + + /* + * Wait 5 sec. total for all other CPUs will be ready to do + * own sequences of initialization + */ + for (timeout = 0; timeout < 50000; timeout++) { + if (num_present_cpus() >= phys_cpu_present_num) + /* all CPU are in the 'e2k_start_secondary()' + * function */ + break; + udelay(100); + } + if (num_present_cpus() < phys_cpu_present_num) { + pr_err("Only %d CPU(s) from %d has booted\n", + num_present_cpus(), phys_cpu_present_num); + for (cpu = 0; cpu < NR_CPUS; cpu ++) { + if (!cpu_present(cpu)) + pr_err("CPU #%d has not booted!\n", cpu); + } + } + + smp_sanity_check(max_cpus); + + preempt_disable(); + if (read_pic_id() != boot_cpu_physical_apicid) { + pr_err("local PIC id #%d of bootstrap CPU is not #%d\n", + read_pic_id(), + boot_cpu_physical_apicid); + BUG(); + } + preempt_enable(); + + DebugSMPB("CPU present number is %d, physical map: 0x%lx\n", + phys_cpu_present_num, physids_coerce(&phys_cpu_present_map)); +} + +/* + * Called by smp_init prepare the secondaries + */ +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + e2k_smp_prepare_cpus(max_cpus, 0); +} + +#ifdef CONFIG_RECOVERY +void smp_prepare_cpus_to_recover(unsigned int max_cpus) +{ + e2k_smp_prepare_cpus(max_cpus, 1); +} +#endif /* CONFIG_RECOVERY */ + +static int e2k_smp_boot_cpu(unsigned int cpu, int recovery, int hotplug) +{ + int cpuid = cpu_to_cpuid(cpu); + + if (!cpu_present(cpu)) { + DebugSMPB("AP CPU #%d does not present\n", cpu); + return -ENOSYS; + } + + if (!recovery && !hotplug) { + /* bootstrap CPU can only setup Local PIC VIRQs handler */ + setup_local_pic_virq(cpu); + } + + WARN_ON(!recovery && raw_irqs_disabled()); + + /* + * Paired with smp_rmb() in e2k_start_secondary() + */ + smp_wmb(); + + cpumask_set_cpu(cpuid, &callin_go); + + /* Barrier between write to callin_go and sending + * a wakeup (be it machine.clk_on or a hypercall) */ + wmb(); + activate_cpu(cpu); + + if (hotplug && machine.clk_on) + machine.clk_on(cpu); + + DebugSMPB("wait for CPU %d to come online\n", cpu); + while (!cpu_online(cpu) || cpumask_test_cpu(cpuid, &callin_go)) + wait_for_cpu_wake_up(); + + DebugSMPB("finished for CPU #%d\n", cpu); + return 0; +} + +static void __init_recv +e2k_smp_cpus_done(unsigned int max_cpus, int recovery) +{ + if (num_online_cpus() < min(num_present_cpus(), max_cpus)) { + pr_err("Only %d CPU(s) from %d has completed initialization\n", + num_online_cpus(), min(num_present_cpus(), + max_cpus ? max_cpus : 1)); + } + + if (recovery && old_num_online_cpus != num_online_cpus()) + panic("Number of recovered CPU(s) %d is not the same as before recovery (%d)!\n", + num_online_cpus(), old_num_online_cpus); + + pr_info("Total of %d processors activated\n", num_online_cpus()); + +#ifdef CONFIG_NUMA + setup_node_to_cpumask_map(); +#endif /* CONFIG_NUMA */ + + setup_ioapic_dest(); + + setup_processor_pic(); + + if (!recovery) + old_num_online_cpus = num_online_cpus(); + + DebugSMPB("finished\n"); +} + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +#include +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ + +void __init smp_prepare_boot_cpu(void) +{ +#ifdef CONFIG_PARAVIRT_SPINLOCKS + /* + * Allocate "PV qspinlock" global hash table used by paravirt spinlocks + */ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) + __pv_init_lock_hash(); +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ +} + +#ifdef CONFIG_RECOVERY +void +smp_prepare_boot_cpu_to_recover(void) +{ + bsp_cpu = smp_processor_id(); + + /* + * We want to re-use cpu_online mask to synchronize SMP booting process + * so re-initialize it here. + * + * Later we will compare old_num_online_cpus (number of CPUs booted + * when creating the recovery point) with num_online_cpus() (number of + * CPUs booted when recovering) and panic if they are not equal. + */ + init_cpu_online(cpumask_of(bsp_cpu)); +} +#endif /* CONFIG_RECOVERY */ + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + int hotplug = (system_state >= SYSTEM_RUNNING); + + idle_tasks[cpu] = tidle; + + return e2k_smp_boot_cpu(cpu, 0, hotplug); +} + +/* number of CPUs arrived to sync while boot-time init completion */ +cpu_sync_count_t ____cacheline_aligned_in_smp init_num_arrived = {.pad = 0}; + +#ifdef CONFIG_RECOVERY +int +cpu_recover(unsigned int cpu) +{ + return e2k_smp_boot_cpu(cpu, 1, 0); +} +#endif /* CONFIG_RECOVERY */ + +void __init smp_cpus_done(unsigned int max_cpus) +{ + if (smp_found_config) + e2k_smp_cpus_done(max_cpus, 0); +} + +#ifdef CONFIG_RECOVERY +void smp_cpus_recovery_done(unsigned int max_cpus) +{ + if (smp_found_config) + e2k_smp_cpus_done(max_cpus, 1); +} +#endif /* CONFIG_RECOVERY */ diff --git a/arch/e2k/kernel/stacktrace.c b/arch/e2k/kernel/stacktrace.c new file mode 100644 index 000000000000..7da1f80114e2 --- /dev/null +++ b/arch/e2k/kernel/stacktrace.c @@ -0,0 +1,124 @@ +/* + * Stack trace management functions + * + * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar + */ +#include +#include +#include +#include +#include +#include + + +struct save_stack_address_kernel_args { + struct stack_trace *trace; + struct pt_regs *regs; +}; + +static int save_stack_address_kernel(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct save_stack_address_kernel_args *args = arg; + struct stack_trace *trace = args->trace; + struct pt_regs *regs = args->regs; + u64 ip; + + if (regs && corrected_frame_addr > (AS(regs->stacks.pcsp_lo).base + + AS(regs->stacks.pcsp_hi).ind)) + return 0; + + if (AS(frame->cr1_lo).pm == 0) + return 1; + + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + ip = AS_STRUCT(frame->cr0_hi).ip << 3; + + if (likely(trace->nr_entries < trace->max_entries)) + trace->entries[trace->nr_entries++] = ip; + else + return 1; + + return 0; +} + +static int save_stack_address_user(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct stack_trace *trace = arg; + u64 ip; + + if (AS(frame->cr1_lo).pm) + return 0; + + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + ip = AS_STRUCT(frame->cr0_hi).ip << 3; + + if (likely(trace->nr_entries < trace->max_entries)) + trace->entries[trace->nr_entries++] = ip; + else + return 1; + + return 0; +} + + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace(struct stack_trace *trace) +{ + struct save_stack_address_kernel_args args; + + args.trace = trace; + args.regs = NULL; + parse_chain_stack(0, NULL, save_stack_address_kernel, &args); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + struct save_stack_address_kernel_args args; + + args.trace = trace; + args.regs = regs; + parse_chain_stack(0, NULL, save_stack_address_kernel, &args); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + struct save_stack_address_kernel_args args; + + args.trace = trace; + args.regs = NULL; + parse_chain_stack(0, tsk, save_stack_address_kernel, &args); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace_user(struct stack_trace *trace) +{ + parse_chain_stack(PCS_USER, NULL, save_stack_address_user, trace); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + diff --git a/arch/e2k/kernel/sys_32.c b/arch/e2k/kernel/sys_32.c new file mode 100644 index 000000000000..b455101756dc --- /dev/null +++ b/arch/e2k/kernel/sys_32.c @@ -0,0 +1,93 @@ +/* linux/arch/e2k/kernel/sys_32.c 1.10 08/21/2001. + * + * Copyright (C) 2001 MCST + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +/* warning: next two assume little endian */ +asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf, + compat_size_t count, unsigned long poslo, unsigned long poshi) +{ + return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo); +} + +asmlinkage long sys32_pwrite64(unsigned int fd, char __user *ubuf, + compat_size_t count, unsigned long poslo, unsigned long poshi) +{ + return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo); +} + +asmlinkage long sys32_readahead(int fd, unsigned long offlo, + unsigned long offhi, compat_size_t count) +{ + return sys_readahead(fd, (offhi << 32) | offlo, count); +} + +asmlinkage long sys32_fadvise64(int fd, unsigned long offlo, + unsigned long offhi, compat_size_t len, int advice) +{ + return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice); +} + +asmlinkage long sys32_fadvise64_64(int fd, + unsigned long offlo, unsigned long offhi, + unsigned long lenlo, unsigned long lenhi, int advice) +{ + return sys_fadvise64_64(fd, (offhi << 32) | offlo, + (lenhi << 32) | lenlo, advice); +} + +asmlinkage long sys32_sync_file_range(int fd, + unsigned long off_low, unsigned long off_high, + unsigned long nb_low, unsigned long nb_high, int flags) +{ + return sys_sync_file_range(fd, (off_high << 32) | off_low, + (nb_high << 32) | nb_low, flags); +} + +asmlinkage long sys32_fallocate(int fd, int mode, + unsigned long offlo, unsigned long offhi, + unsigned long lenlo, unsigned long lenhi) +{ + return sys_fallocate(fd, mode, (offhi << 32) | offlo, + (lenhi << 32) | lenlo); +} + +asmlinkage long sys32_truncate64(const char __user * path, + unsigned long low, unsigned long high) +{ + return sys_truncate(path, (high << 32) | low); +} + +asmlinkage long sys32_ftruncate64(unsigned int fd, + unsigned long low, unsigned long high) +{ + return sys_ftruncate(fd, (high << 32) | low); +} + +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + return common_ptrace(child, (long)request, (long)caddr, (long)cdata, true); +} + + diff --git a/arch/e2k/kernel/sys_e2k.c b/arch/e2k/kernel/sys_e2k.c new file mode 100644 index 000000000000..29f3d8625639 --- /dev/null +++ b/arch/e2k/kernel/sys_e2k.c @@ -0,0 +1,132 @@ + +/* linux/arch/e2k/kernel/sys_e2k.c, v 1.1 07/27/2001. + * + * Copyright (C) 2001 MCST + */ + +/* + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/E2K + * platform. + */ + +#include +#include +#include +#include +#include +#include /* doh, must come after sched.h... */ +#include +#include +#include +#include +#include + +#include +#include + + +#undef DEBUG_SYS_CALLS_MODE +#undef DebugSC +#define DEBUG_SYS_CALLS_MODE 0 /* system calls */ +#define DebugSC(...) DebugPrint(DEBUG_SYS_CALLS_MODE ,##__VA_ARGS__) + +/* + * Old cruft + */ +asmlinkage long sys_uname(struct old_utsname * name) +{ + int err; + + DebugSC("sys_uname entered.\n"); + + if (!name) + return -EFAULT; + down_read(&uts_sem); + err = copy_to_user(name, utsname(), sizeof (*name)); + up_read(&uts_sem); + + DebugSC("sys_uname exited.\n"); + + return err?-EFAULT:0; +} + + +/* + * Linux version of mmap() + * + * offset "off" is measuring in bytes. + */ +asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, unsigned long fd, + unsigned long off) +{ + if (off & ~PAGE_MASK) + return -EINVAL; + + return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); +} + + +/* + * mmap2() is like mmap() except that the offset is expressed in units + * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces + * of) files that are larger than the address space of the CPU. + */ +asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, + int prot, int flags, int fd, long pgoff) +{ + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); +} + +static inline unsigned int rlimit_translate_resource(unsigned int resource) +{ + switch (resource) { + case RLIMIT_P_STACK: return RLIMIT_P_STACK_EXT; + case RLIMIT_PC_STACK: return RLIMIT_PC_STACK_EXT; + } + return resource; +} + +asmlinkage long e2k_sys_prlimit64(pid_t pid, unsigned int resource, + const struct rlimit64 __user *new_rlim, + struct rlimit64 __user *old_rlim) +{ + return sys_prlimit64(pid, rlimit_translate_resource(resource), new_rlim, + old_rlim); +} + +asmlinkage long e2k_sys_getrlimit(unsigned int resource, + struct rlimit __user *rlim) +{ + return sys_getrlimit(rlimit_translate_resource(resource), rlim); +} + +#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT +asmlinkage long e2k_sys_old_getrlimit(unsigned int resource, + struct rlimit __user *rlim) +{ + return sys_old_getrlimit(rlimit_translate_resource(resource), rlim); +} +#endif + +asmlinkage long e2k_sys_setrlimit(unsigned int resource, + struct rlimit __user *rlim) +{ + return sys_setrlimit(rlimit_translate_resource(resource), rlim); +} + +#ifdef CONFIG_COMPAT +asmlinkage long compat_e2k_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim) +{ + return compat_sys_getrlimit(rlimit_translate_resource(resource), rlim); +} + +asmlinkage long compat_e2k_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim) +{ + return compat_sys_setrlimit(rlimit_translate_resource(resource), rlim); +} +#endif + diff --git a/arch/e2k/kernel/systable.c b/arch/e2k/kernel/systable.c new file mode 100644 index 000000000000..6d55745edbc3 --- /dev/null +++ b/arch/e2k/kernel/systable.c @@ -0,0 +1,3767 @@ +/* linux/arch/e2k/kernel/systable.c, v 1.1 05/28/2001. + * + * Copyright (C) 2001 MCST + */ + +#include +#include + +#include +#include + +#define SYSTEM_CALL_TBL_ENTRY(sysname) (system_call_func) sysname +#define PROT_SYSCALL_TBL_ENTRY(sysname) ((protected_system_call_func) sysname) + +#ifdef CONFIG_COMPAT +# define COMPAT_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (system_call_func) compat_##sysname +#else +# define COMPAT_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (system_call_func) sys_ni_syscall +#endif + + +asmlinkage long sys_deprecated(void) +{ + pr_info_ratelimited("System call #%d/$s is obsolete\n", + current_pt_regs()->sys_num); + + return -ENOSYS; +} + +/* + * Real map of system calls. + */ + +const system_call_func sys_call_table[NR_syscalls] = +{ + SYSTEM_CALL_TBL_ENTRY(sys_restart_syscall), /* 0 */ + SYSTEM_CALL_TBL_ENTRY(sys_exit), + SYSTEM_CALL_TBL_ENTRY(sys_fork), + SYSTEM_CALL_TBL_ENTRY(sys_read), + SYSTEM_CALL_TBL_ENTRY(sys_write), + SYSTEM_CALL_TBL_ENTRY(sys_open), /* 5 */ + SYSTEM_CALL_TBL_ENTRY(sys_close), + SYSTEM_CALL_TBL_ENTRY(sys_waitpid), + SYSTEM_CALL_TBL_ENTRY(sys_creat), + SYSTEM_CALL_TBL_ENTRY(sys_link), + SYSTEM_CALL_TBL_ENTRY(sys_unlink), /* 10 */ + SYSTEM_CALL_TBL_ENTRY(e2k_sys_execve), + SYSTEM_CALL_TBL_ENTRY(sys_chdir), + SYSTEM_CALL_TBL_ENTRY(sys_time), + SYSTEM_CALL_TBL_ENTRY(sys_mknod), + SYSTEM_CALL_TBL_ENTRY(sys_chmod), /* 15 */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old break syscall holder */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_stat() */ + SYSTEM_CALL_TBL_ENTRY(sys_lseek), + SYSTEM_CALL_TBL_ENTRY(sys_getpid), /* 20 */ + SYSTEM_CALL_TBL_ENTRY(sys_mount), + SYSTEM_CALL_TBL_ENTRY(sys_oldumount), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_stime), /* 25 */ + SYSTEM_CALL_TBL_ENTRY(sys_ptrace), + SYSTEM_CALL_TBL_ENTRY(sys_alarm), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_fstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_pause), + SYSTEM_CALL_TBL_ENTRY(sys_utime), /* 30 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old stty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old gtty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_access), + SYSTEM_CALL_TBL_ENTRY(sys_nice), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 35, old ftime syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_sync), + SYSTEM_CALL_TBL_ENTRY(sys_kill), + SYSTEM_CALL_TBL_ENTRY(sys_rename), + SYSTEM_CALL_TBL_ENTRY(sys_mkdir), + SYSTEM_CALL_TBL_ENTRY(sys_rmdir), /* 40 */ + SYSTEM_CALL_TBL_ENTRY(sys_dup), + SYSTEM_CALL_TBL_ENTRY(sys_pipe), + SYSTEM_CALL_TBL_ENTRY(sys_times), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old prof syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_brk), /* 45 */ + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* signal() have to be */ + /* emulated by rt_sigaction() */ + /* on user level (GLIBC) */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), /* 50 */ + SYSTEM_CALL_TBL_ENTRY(sys_acct), + SYSTEM_CALL_TBL_ENTRY(sys_umount), /* recycled never used phys() */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old lock syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ioctl), + + SYSTEM_CALL_TBL_ENTRY(sys_fcntl), /* 55 */ /* for 64 & 32 */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old mpx syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_setpgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old ulimit syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_umask), /* 60 */ + SYSTEM_CALL_TBL_ENTRY(sys_chroot), + SYSTEM_CALL_TBL_ENTRY(sys_ustat), + SYSTEM_CALL_TBL_ENTRY(sys_dup2), + SYSTEM_CALL_TBL_ENTRY(sys_getppid), + SYSTEM_CALL_TBL_ENTRY(sys_getpgrp), /* 65 */ + SYSTEM_CALL_TBL_ENTRY(sys_setsid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* no sys_sigaction(), use */ + SYSTEM_CALL_TBL_ENTRY(sys_sgetmask), /* sys_rt_sigaction() instead */ + SYSTEM_CALL_TBL_ENTRY(sys_ssetmask), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), /* 70 */ + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_sigpending), + SYSTEM_CALL_TBL_ENTRY(sys_sethostname), + SYSTEM_CALL_TBL_ENTRY(e2k_sys_setrlimit), /* 75 */ + SYSTEM_CALL_TBL_ENTRY(e2k_sys_old_getrlimit), + SYSTEM_CALL_TBL_ENTRY(sys_getrusage), + SYSTEM_CALL_TBL_ENTRY(sys_gettimeofday), + SYSTEM_CALL_TBL_ENTRY(sys_settimeofday), + SYSTEM_CALL_TBL_ENTRY(sys_getgroups), /* 80 */ + SYSTEM_CALL_TBL_ENTRY(sys_setgroups), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_symlink), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_lstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_readlink), /* 85 */ + SYSTEM_CALL_TBL_ENTRY(sys_uselib), + SYSTEM_CALL_TBL_ENTRY(sys_swapon), + SYSTEM_CALL_TBL_ENTRY(sys_reboot), + SYSTEM_CALL_TBL_ENTRY(sys_old_readdir), + SYSTEM_CALL_TBL_ENTRY(sys_mmap), /* 90 */ + SYSTEM_CALL_TBL_ENTRY(sys_munmap), + + SYSTEM_CALL_TBL_ENTRY(sys_truncate), + SYSTEM_CALL_TBL_ENTRY(sys_ftruncate), + + SYSTEM_CALL_TBL_ENTRY(sys_fchmod), + SYSTEM_CALL_TBL_ENTRY(sys_fchown), /* 95 */ + SYSTEM_CALL_TBL_ENTRY(sys_getpriority), + SYSTEM_CALL_TBL_ENTRY(sys_setpriority), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old profil syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_statfs), + SYSTEM_CALL_TBL_ENTRY(sys_fstatfs), /* 100 */ + SYSTEM_CALL_TBL_ENTRY(sys_ioperm), + SYSTEM_CALL_TBL_ENTRY(sys_socketcall), + SYSTEM_CALL_TBL_ENTRY(sys_syslog), + SYSTEM_CALL_TBL_ENTRY(sys_setitimer), + SYSTEM_CALL_TBL_ENTRY(sys_getitimer), /* 105 */ + + SYSTEM_CALL_TBL_ENTRY(sys_newstat), /* in libc used in ptr64 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_newlstat), /* in libc used in ptr64 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_newfstat), /* in libc used in ptr64 mode */ + + SYSTEM_CALL_TBL_ENTRY(sys_uname), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 110 */ + SYSTEM_CALL_TBL_ENTRY(sys_vhangup), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old "idle" system call */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_wait4), + SYSTEM_CALL_TBL_ENTRY(sys_swapoff), /* 115 */ + SYSTEM_CALL_TBL_ENTRY(sys_sysinfo), + SYSTEM_CALL_TBL_ENTRY(sys_ipc), + SYSTEM_CALL_TBL_ENTRY(sys_fsync), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_clone), /* 120 */ + SYSTEM_CALL_TBL_ENTRY(sys_setdomainname), + SYSTEM_CALL_TBL_ENTRY(sys_newuname), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_adjtimex), + SYSTEM_CALL_TBL_ENTRY(sys_mprotect), /* 125 */ + SYSTEM_CALL_TBL_ENTRY(sys_sigprocmask), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_init_module), + SYSTEM_CALL_TBL_ENTRY(sys_delete_module), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 130 */ + SYSTEM_CALL_TBL_ENTRY(sys_quotactl), + SYSTEM_CALL_TBL_ENTRY(sys_getpgid), + SYSTEM_CALL_TBL_ENTRY(sys_fchdir), + SYSTEM_CALL_TBL_ENTRY(sys_bdflush), + SYSTEM_CALL_TBL_ENTRY(sys_sysfs), /* 135 */ + SYSTEM_CALL_TBL_ENTRY(sys_personality), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* for afs_syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + SYSTEM_CALL_TBL_ENTRY(sys_llseek), /* 140 */ + SYSTEM_CALL_TBL_ENTRY(sys_getdents), + SYSTEM_CALL_TBL_ENTRY(sys_select), + SYSTEM_CALL_TBL_ENTRY(sys_flock), + SYSTEM_CALL_TBL_ENTRY(sys_msync), + SYSTEM_CALL_TBL_ENTRY(sys_readv), /* 145 */ + SYSTEM_CALL_TBL_ENTRY(sys_writev), + SYSTEM_CALL_TBL_ENTRY(sys_getsid), + SYSTEM_CALL_TBL_ENTRY(sys_fdatasync), + SYSTEM_CALL_TBL_ENTRY(sys_sysctl), + SYSTEM_CALL_TBL_ENTRY(sys_mlock), /* 150 */ + SYSTEM_CALL_TBL_ENTRY(sys_munlock), + SYSTEM_CALL_TBL_ENTRY(sys_mlockall), + SYSTEM_CALL_TBL_ENTRY(sys_munlockall), + SYSTEM_CALL_TBL_ENTRY(sys_sched_setparam), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getparam), /* 155 */ + SYSTEM_CALL_TBL_ENTRY(sys_sched_setscheduler), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getscheduler), + SYSTEM_CALL_TBL_ENTRY(sys_sched_yield), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_max), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_min), /* 160 */ + SYSTEM_CALL_TBL_ENTRY(sys_sched_rr_get_interval), + SYSTEM_CALL_TBL_ENTRY(sys_nanosleep), + SYSTEM_CALL_TBL_ENTRY(sys_mremap), + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), /* 165 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_poll), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* was sys_nfsservctl */ + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 170 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_prctl), + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /* sys_rt_sigreturn() */ + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigaction), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigprocmask), /* 175 */ + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigpending), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigtimedwait), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigqueueinfo), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigsuspend), + SYSTEM_CALL_TBL_ENTRY(sys_pread64), /* 180 */ + SYSTEM_CALL_TBL_ENTRY(sys_pwrite64), + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_getcwd), + SYSTEM_CALL_TBL_ENTRY(sys_capget), + SYSTEM_CALL_TBL_ENTRY(sys_capset), /* 185 */ + SYSTEM_CALL_TBL_ENTRY(sys_sigaltstack), + SYSTEM_CALL_TBL_ENTRY(sys_sendfile64), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams1 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams2 */ + SYSTEM_CALL_TBL_ENTRY(sys_vfork), /* 190 */ + SYSTEM_CALL_TBL_ENTRY(e2k_sys_getrlimit), + SYSTEM_CALL_TBL_ENTRY(sys_mmap2), + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + /* + * 193 & 194 entries are + * sys_truncate64 & + * sys_ftruncate64 in open.c + * if OS is for + * BITS_PER_LONG == 32 + * Our OS is for 64 + */ + + SYSTEM_CALL_TBL_ENTRY(sys_stat64), /* 195 , in libc used in ptr32 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_lstat64), /* in libc used in ptr32 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_fstat64), /* in libc used in ptr32 mode */ + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), /* 200 */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + + SYSTEM_CALL_TBL_ENTRY(sys_pidfd_send_signal), /* 205 */ + SYSTEM_CALL_TBL_ENTRY(sys_pidfd_open), + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_fchown), + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 210 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), /* 215 */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + + SYSTEM_CALL_TBL_ENTRY(sys_pivot_root), + SYSTEM_CALL_TBL_ENTRY(sys_mincore), + SYSTEM_CALL_TBL_ENTRY(sys_madvise), + SYSTEM_CALL_TBL_ENTRY(sys_getdents64), /* 220 */ + SYSTEM_CALL_TBL_ENTRY(sys_fcntl), + /* + * 221 is sys_fcntl64 in fcntl.c + * if BITS_PER_LONG == 32 + * for some other archs + */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 223 */ + SYSTEM_CALL_TBL_ENTRY(sys_newfstatat), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 225 */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_setjmp */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_longjmp*/ + SYSTEM_CALL_TBL_ENTRY(sys_e2k_syswork), + SYSTEM_CALL_TBL_ENTRY(sys_clone_thread), + SYSTEM_CALL_TBL_ENTRY(sys_e2k_longjmp2), /* 230 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_setxattr), + SYSTEM_CALL_TBL_ENTRY(sys_lsetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_fsetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_getxattr), /* 235 */ + SYSTEM_CALL_TBL_ENTRY(sys_lgetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_fgetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_listxattr), + SYSTEM_CALL_TBL_ENTRY(sys_llistxattr), + SYSTEM_CALL_TBL_ENTRY(sys_flistxattr), /* 240 */ + SYSTEM_CALL_TBL_ENTRY(sys_removexattr), + SYSTEM_CALL_TBL_ENTRY(sys_lremovexattr), + SYSTEM_CALL_TBL_ENTRY(sys_fremovexattr), + SYSTEM_CALL_TBL_ENTRY(sys_gettid), + SYSTEM_CALL_TBL_ENTRY(sys_readahead), /* 245 */ + SYSTEM_CALL_TBL_ENTRY(sys_tkill), + SYSTEM_CALL_TBL_ENTRY(sys_sendfile64), +#if defined CONFIG_FUTEX + SYSTEM_CALL_TBL_ENTRY(sys_futex), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + SYSTEM_CALL_TBL_ENTRY(sys_sched_setaffinity), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getaffinity), /* 250 */ + SYSTEM_CALL_TBL_ENTRY(sys_pipe2), + SYSTEM_CALL_TBL_ENTRY(sys_set_backtrace), + SYSTEM_CALL_TBL_ENTRY(sys_get_backtrace), + SYSTEM_CALL_TBL_ENTRY(sys_access_hw_stacks), + SYSTEM_CALL_TBL_ENTRY(sys_el_posix), /* 255 */ + SYSTEM_CALL_TBL_ENTRY(sys_io_uring_setup), + SYSTEM_CALL_TBL_ENTRY(sys_io_uring_enter), + SYSTEM_CALL_TBL_ENTRY(sys_io_uring_register), + SYSTEM_CALL_TBL_ENTRY(sys_set_tid_address), +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + SYSTEM_CALL_TBL_ENTRY(sys_el_binary), /* 260 */ +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 260 */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + SYSTEM_CALL_TBL_ENTRY(sys_timer_create), + SYSTEM_CALL_TBL_ENTRY(sys_timer_settime), + SYSTEM_CALL_TBL_ENTRY(sys_timer_gettime), + SYSTEM_CALL_TBL_ENTRY(sys_timer_getoverrun), + SYSTEM_CALL_TBL_ENTRY(sys_timer_delete), /* 265 */ + SYSTEM_CALL_TBL_ENTRY(sys_clock_settime), + SYSTEM_CALL_TBL_ENTRY(sys_clock_gettime), + SYSTEM_CALL_TBL_ENTRY(sys_clock_getres), + SYSTEM_CALL_TBL_ENTRY(sys_clock_nanosleep), + SYSTEM_CALL_TBL_ENTRY(sys_msgget), /* 270 */ + SYSTEM_CALL_TBL_ENTRY(sys_msgctl), + SYSTEM_CALL_TBL_ENTRY(sys_msgrcv), + SYSTEM_CALL_TBL_ENTRY(sys_msgsnd), + SYSTEM_CALL_TBL_ENTRY(sys_semget), + SYSTEM_CALL_TBL_ENTRY(sys_old_semctl), /* 275 */ + SYSTEM_CALL_TBL_ENTRY(sys_semtimedop), + SYSTEM_CALL_TBL_ENTRY(sys_semop), + SYSTEM_CALL_TBL_ENTRY(sys_shmget), + SYSTEM_CALL_TBL_ENTRY(sys_shmctl), + SYSTEM_CALL_TBL_ENTRY(sys_shmat), /* 280 */ + SYSTEM_CALL_TBL_ENTRY(sys_shmdt), + SYSTEM_CALL_TBL_ENTRY(sys_open_tree), + SYSTEM_CALL_TBL_ENTRY(sys_move_mount), + SYSTEM_CALL_TBL_ENTRY(sys_rseq), + SYSTEM_CALL_TBL_ENTRY(sys_io_pgetevents), /* 285 */ + SYSTEM_CALL_TBL_ENTRY(sys_accept4), + SYSTEM_CALL_TBL_ENTRY(sys_sched_setattr), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getattr), + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_set), /* 289 __NR_ioprio_set */ + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_get), /* 290 __NR_ioprio_get */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_init),/* 291 __NR_inotify_init */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_add_watch), + /* 292 __NR_inotify_add_watch */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_rm_watch), + /* 293 __NR_inotify_rm_watch */ + SYSTEM_CALL_TBL_ENTRY(sys_io_setup), /* 294 */ + SYSTEM_CALL_TBL_ENTRY(sys_io_destroy), + SYSTEM_CALL_TBL_ENTRY(sys_io_getevents), + SYSTEM_CALL_TBL_ENTRY(sys_io_submit), + SYSTEM_CALL_TBL_ENTRY(sys_io_cancel), + SYSTEM_CALL_TBL_ENTRY(sys_fadvise64), + SYSTEM_CALL_TBL_ENTRY(sys_exit_group), /* 300 */ + SYSTEM_CALL_TBL_ENTRY(sys_lookup_dcookie), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_create), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_ctl), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_wait), + SYSTEM_CALL_TBL_ENTRY(sys_remap_file_pages), + SYSTEM_CALL_TBL_ENTRY(sys_statfs64), + SYSTEM_CALL_TBL_ENTRY(sys_fstatfs64), + SYSTEM_CALL_TBL_ENTRY(sys_tgkill), + SYSTEM_CALL_TBL_ENTRY(sys_utimes), + SYSTEM_CALL_TBL_ENTRY(sys_fadvise64_64), /* 310 */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* __NR_vserver */ + /*The system call isn't implemented in the Linux 2.6.14 + * kernel */ + SYSTEM_CALL_TBL_ENTRY(sys_mbind), + SYSTEM_CALL_TBL_ENTRY(sys_get_mempolicy), + SYSTEM_CALL_TBL_ENTRY(sys_set_mempolicy), + SYSTEM_CALL_TBL_ENTRY(sys_mq_open), + SYSTEM_CALL_TBL_ENTRY(sys_mq_unlink), + SYSTEM_CALL_TBL_ENTRY(sys_mq_timedsend), + SYSTEM_CALL_TBL_ENTRY(sys_mq_timedreceive), + SYSTEM_CALL_TBL_ENTRY(sys_mq_notify), + SYSTEM_CALL_TBL_ENTRY(sys_mq_getsetattr), /* 320 */ + SYSTEM_CALL_TBL_ENTRY(sys_kexec_load), + SYSTEM_CALL_TBL_ENTRY(sys_waitid), + SYSTEM_CALL_TBL_ENTRY(sys_add_key), + SYSTEM_CALL_TBL_ENTRY(sys_request_key), + SYSTEM_CALL_TBL_ENTRY(sys_keyctl), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* sys_mcst_rt */ + SYSTEM_CALL_TBL_ENTRY(sys_getcpu), + SYSTEM_CALL_TBL_ENTRY(sys_move_pages), + SYSTEM_CALL_TBL_ENTRY(sys_splice), + SYSTEM_CALL_TBL_ENTRY(sys_vmsplice), /* 330 */ + SYSTEM_CALL_TBL_ENTRY(sys_tee), + SYSTEM_CALL_TBL_ENTRY(sys_migrate_pages), + SYSTEM_CALL_TBL_ENTRY(sys_utimensat), + SYSTEM_CALL_TBL_ENTRY(sys_rt_tgsigqueueinfo), + SYSTEM_CALL_TBL_ENTRY(sys_openat), + SYSTEM_CALL_TBL_ENTRY(sys_mkdirat), + SYSTEM_CALL_TBL_ENTRY(sys_mknodat), + SYSTEM_CALL_TBL_ENTRY(sys_fchownat), + SYSTEM_CALL_TBL_ENTRY(sys_unlinkat), + SYSTEM_CALL_TBL_ENTRY(sys_renameat), /* 340 */ + SYSTEM_CALL_TBL_ENTRY(sys_linkat), + SYSTEM_CALL_TBL_ENTRY(sys_symlinkat), + SYSTEM_CALL_TBL_ENTRY(sys_readlinkat), + SYSTEM_CALL_TBL_ENTRY(sys_fchmodat), + SYSTEM_CALL_TBL_ENTRY(sys_faccessat), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_pwait), + SYSTEM_CALL_TBL_ENTRY(sys_signalfd4), + SYSTEM_CALL_TBL_ENTRY(sys_eventfd2), + SYSTEM_CALL_TBL_ENTRY(sys_recvmmsg), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 350 */ +#ifdef CONFIG_TIMERFD + SYSTEM_CALL_TBL_ENTRY(sys_timerfd_create), + SYSTEM_CALL_TBL_ENTRY(sys_timerfd_settime), + SYSTEM_CALL_TBL_ENTRY(sys_timerfd_gettime), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + SYSTEM_CALL_TBL_ENTRY(sys_preadv), + SYSTEM_CALL_TBL_ENTRY(sys_pwritev), + SYSTEM_CALL_TBL_ENTRY(sys_fallocate), + SYSTEM_CALL_TBL_ENTRY(sys_sync_file_range), + SYSTEM_CALL_TBL_ENTRY(sys_dup3), + SYSTEM_CALL_TBL_ENTRY(sys_inotify_init1), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_create1),/* 360 */ + SYSTEM_CALL_TBL_ENTRY(sys_fstatat64), + SYSTEM_CALL_TBL_ENTRY(sys_futimesat), + SYSTEM_CALL_TBL_ENTRY(sys_perf_event_open), + SYSTEM_CALL_TBL_ENTRY(sys_unshare), + SYSTEM_CALL_TBL_ENTRY(sys_get_robust_list), + SYSTEM_CALL_TBL_ENTRY(sys_set_robust_list), + SYSTEM_CALL_TBL_ENTRY(sys_pselect6), + SYSTEM_CALL_TBL_ENTRY(sys_ppoll), + SYSTEM_CALL_TBL_ENTRY(sys_setcontext), + SYSTEM_CALL_TBL_ENTRY(sys_makecontext), /* 370 */ + SYSTEM_CALL_TBL_ENTRY(sys_swapcontext), + SYSTEM_CALL_TBL_ENTRY(sys_freecontext), + SYSTEM_CALL_TBL_ENTRY(sys_fanotify_init), + SYSTEM_CALL_TBL_ENTRY(sys_fanotify_mark), + SYSTEM_CALL_TBL_ENTRY(e2k_sys_prlimit64), + SYSTEM_CALL_TBL_ENTRY(sys_clock_adjtime), + SYSTEM_CALL_TBL_ENTRY(sys_syncfs), + SYSTEM_CALL_TBL_ENTRY(sys_sendmmsg), + SYSTEM_CALL_TBL_ENTRY(sys_setns), + SYSTEM_CALL_TBL_ENTRY(sys_process_vm_readv), /* 380 */ + SYSTEM_CALL_TBL_ENTRY(sys_process_vm_writev), + SYSTEM_CALL_TBL_ENTRY(sys_kcmp), + SYSTEM_CALL_TBL_ENTRY(sys_finit_module), + /* added in linux-4.4 */ + SYSTEM_CALL_TBL_ENTRY(sys_renameat2), + SYSTEM_CALL_TBL_ENTRY(sys_getrandom), + SYSTEM_CALL_TBL_ENTRY(sys_memfd_create), + SYSTEM_CALL_TBL_ENTRY(sys_bpf), + SYSTEM_CALL_TBL_ENTRY(e2k_sys_execveat), + SYSTEM_CALL_TBL_ENTRY(sys_userfaultfd), + SYSTEM_CALL_TBL_ENTRY(sys_membarrier), /* 390 */ + SYSTEM_CALL_TBL_ENTRY(sys_mlock2), + /* added in linux-4.9 */ + SYSTEM_CALL_TBL_ENTRY(sys_seccomp), + SYSTEM_CALL_TBL_ENTRY(sys_shutdown), + SYSTEM_CALL_TBL_ENTRY(sys_copy_file_range), + SYSTEM_CALL_TBL_ENTRY(sys_preadv2), + SYSTEM_CALL_TBL_ENTRY(sys_pwritev2), + + /* free (unused) items */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + + SYSTEM_CALL_TBL_ENTRY(sys_name_to_handle_at), /* 400 */ + SYSTEM_CALL_TBL_ENTRY(sys_open_by_handle_at), /* 401 */ + SYSTEM_CALL_TBL_ENTRY(sys_statx), /* 402 */ + /* added for compatibility with x86_64 */ + SYSTEM_CALL_TBL_ENTRY(sys_socket), /* 403 */ + SYSTEM_CALL_TBL_ENTRY(sys_connect), /* 404 */ + SYSTEM_CALL_TBL_ENTRY(sys_accept), /* 405 */ + SYSTEM_CALL_TBL_ENTRY(sys_sendto), /* 406 */ + SYSTEM_CALL_TBL_ENTRY(sys_recvfrom), /* 407 */ + SYSTEM_CALL_TBL_ENTRY(sys_sendmsg), /* 408 */ + SYSTEM_CALL_TBL_ENTRY(sys_recvmsg), /* 409 */ + SYSTEM_CALL_TBL_ENTRY(sys_bind), /* 410 */ + SYSTEM_CALL_TBL_ENTRY(sys_listen), /* 411 */ + SYSTEM_CALL_TBL_ENTRY(sys_getsockname), /* 412 */ + SYSTEM_CALL_TBL_ENTRY(sys_getpeername), /* 413 */ + SYSTEM_CALL_TBL_ENTRY(sys_socketpair), /* 414 */ + SYSTEM_CALL_TBL_ENTRY(sys_setsockopt), /* 415 */ + SYSTEM_CALL_TBL_ENTRY(sys_getsockopt), /* 416 */ + + /* free (unused) items */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 417 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 418 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 419 */ + + /* protected specific system calls entries */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 420 __NR_newuselib */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 421 __NR_rt_sigaction_ex */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 422 __NR_get_mem */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 423 __NR_free_mem */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 424 __NR_clean_descriptors */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 425 __NR_unuselib */ + + SYSTEM_CALL_TBL_ENTRY(sys_clone3), + SYSTEM_CALL_TBL_ENTRY(sys_fsopen), + SYSTEM_CALL_TBL_ENTRY(sys_fsconfig), + SYSTEM_CALL_TBL_ENTRY(sys_fsmount), + SYSTEM_CALL_TBL_ENTRY(sys_fspick), /* 430 */ +}; + +const system_call_func sys_call_table_32[NR_syscalls] = +{ + SYSTEM_CALL_TBL_ENTRY(sys_restart_syscall), /* 0 */ + SYSTEM_CALL_TBL_ENTRY(sys_exit), + SYSTEM_CALL_TBL_ENTRY(sys_fork), + SYSTEM_CALL_TBL_ENTRY(sys_read), + SYSTEM_CALL_TBL_ENTRY(sys_write), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_open), /* 5 */ + SYSTEM_CALL_TBL_ENTRY(sys_close), + SYSTEM_CALL_TBL_ENTRY(sys_waitpid), + SYSTEM_CALL_TBL_ENTRY(sys_creat), + SYSTEM_CALL_TBL_ENTRY(sys_link), + SYSTEM_CALL_TBL_ENTRY(sys_unlink), /* 10 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(e2k_sys_execve), + SYSTEM_CALL_TBL_ENTRY(sys_chdir), + SYSTEM_CALL_TBL_ENTRY(sys_time32), + SYSTEM_CALL_TBL_ENTRY(sys_mknod), + SYSTEM_CALL_TBL_ENTRY(sys_chmod), /* 15 */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old break syscall holder */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_stat() */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_lseek), + SYSTEM_CALL_TBL_ENTRY(sys_getpid), /* 20 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mount), + SYSTEM_CALL_TBL_ENTRY(sys_oldumount), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_stime32), /* 25 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_ptrace), + SYSTEM_CALL_TBL_ENTRY(sys_alarm), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_fstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_pause), + SYSTEM_CALL_TBL_ENTRY(sys_utime32), /* 30 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old stty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old gtty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_access), + SYSTEM_CALL_TBL_ENTRY(sys_nice), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 35, old ftime syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_sync), + SYSTEM_CALL_TBL_ENTRY(sys_kill), + SYSTEM_CALL_TBL_ENTRY(sys_rename), + SYSTEM_CALL_TBL_ENTRY(sys_mkdir), + SYSTEM_CALL_TBL_ENTRY(sys_rmdir), /* 40 */ + SYSTEM_CALL_TBL_ENTRY(sys_dup), + SYSTEM_CALL_TBL_ENTRY(sys_pipe), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_times), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old prof syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_brk), /* 45 */ + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* signal() have to be */ + /* emulated by rt_sigaction() */ + /* on user level (GLIBC) */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), /* 50 */ + SYSTEM_CALL_TBL_ENTRY(sys_acct), + SYSTEM_CALL_TBL_ENTRY(sys_umount), /* recycled never used phys() */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old lock syscall holder */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_ioctl), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fcntl),/* 55 */ /* for 64 & 32 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old mpx syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_setpgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old ulimit syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_umask), /* 60 */ + SYSTEM_CALL_TBL_ENTRY(sys_chroot), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_ustat), + SYSTEM_CALL_TBL_ENTRY(sys_dup2), + SYSTEM_CALL_TBL_ENTRY(sys_getppid), + SYSTEM_CALL_TBL_ENTRY(sys_getpgrp), /* 65 */ + SYSTEM_CALL_TBL_ENTRY(sys_setsid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* no sys_sigaction(), use */ + SYSTEM_CALL_TBL_ENTRY(sys_sgetmask), /* sys_rt_sigaction() instead */ + SYSTEM_CALL_TBL_ENTRY(sys_ssetmask), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), /* 70 */ + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sigpending), + SYSTEM_CALL_TBL_ENTRY(sys_sethostname), + COMPAT_SYSTEM_CALL_TBL_ENTRY(e2k_sys_setrlimit), /* 75 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(e2k_sys_getrlimit), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_getrusage), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_gettimeofday), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_settimeofday), + SYSTEM_CALL_TBL_ENTRY(sys_getgroups), /* 80 */ + SYSTEM_CALL_TBL_ENTRY(sys_setgroups), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_symlink), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_lstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_readlink), /* 85 */ + SYSTEM_CALL_TBL_ENTRY(sys_uselib), + SYSTEM_CALL_TBL_ENTRY(sys_swapon), + SYSTEM_CALL_TBL_ENTRY(sys_reboot), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_old_readdir), + SYSTEM_CALL_TBL_ENTRY(sys_mmap), /* 90 */ + SYSTEM_CALL_TBL_ENTRY(sys_munmap), + + SYSTEM_CALL_TBL_ENTRY(sys_truncate), + SYSTEM_CALL_TBL_ENTRY(sys_ftruncate), + + SYSTEM_CALL_TBL_ENTRY(sys_fchmod), + SYSTEM_CALL_TBL_ENTRY(sys_fchown), /* 95 */ + SYSTEM_CALL_TBL_ENTRY(sys_getpriority), + SYSTEM_CALL_TBL_ENTRY(sys_setpriority), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old profil syscall holder */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_statfs), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fstatfs), /* 100 */ + SYSTEM_CALL_TBL_ENTRY(sys_ioperm), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_socketcall), + SYSTEM_CALL_TBL_ENTRY(sys_syslog), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_setitimer), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_getitimer), /* 105 */ + + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_newstat), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_newlstat), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_newfstat), + + SYSTEM_CALL_TBL_ENTRY(sys_uname), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 110 */ + SYSTEM_CALL_TBL_ENTRY(sys_vhangup), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old "idle" system call */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_wait4), + SYSTEM_CALL_TBL_ENTRY(sys_swapoff), /* 115 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sysinfo), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_ipc), + SYSTEM_CALL_TBL_ENTRY(sys_fsync), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_clone), /* 120 */ + SYSTEM_CALL_TBL_ENTRY(sys_setdomainname), + SYSTEM_CALL_TBL_ENTRY(sys_newuname), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_adjtimex_time32), + SYSTEM_CALL_TBL_ENTRY(sys_mprotect), /* 125 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sigprocmask), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_init_module), + SYSTEM_CALL_TBL_ENTRY(sys_delete_module), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 130 */ + SYSTEM_CALL_TBL_ENTRY(sys_quotactl), + SYSTEM_CALL_TBL_ENTRY(sys_getpgid), + SYSTEM_CALL_TBL_ENTRY(sys_fchdir), + SYSTEM_CALL_TBL_ENTRY(sys_bdflush), + SYSTEM_CALL_TBL_ENTRY(sys_sysfs), /* 135 */ + SYSTEM_CALL_TBL_ENTRY(sys_personality), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* for afs_syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + SYSTEM_CALL_TBL_ENTRY(sys_llseek), /* 140 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_getdents), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_select), + SYSTEM_CALL_TBL_ENTRY(sys_flock), + SYSTEM_CALL_TBL_ENTRY(sys_msync), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_readv), /* 145 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_writev), + SYSTEM_CALL_TBL_ENTRY(sys_getsid), + SYSTEM_CALL_TBL_ENTRY(sys_fdatasync), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sysctl), + SYSTEM_CALL_TBL_ENTRY(sys_mlock), /* 150 */ + SYSTEM_CALL_TBL_ENTRY(sys_munlock), + SYSTEM_CALL_TBL_ENTRY(sys_mlockall), + SYSTEM_CALL_TBL_ENTRY(sys_munlockall), + SYSTEM_CALL_TBL_ENTRY(sys_sched_setparam), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getparam), /* 155 */ + SYSTEM_CALL_TBL_ENTRY(sys_sched_setscheduler), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getscheduler), + SYSTEM_CALL_TBL_ENTRY(sys_sched_yield), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_max), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_min), /* 160 */ + SYSTEM_CALL_TBL_ENTRY(sys_sched_rr_get_interval_time32), + SYSTEM_CALL_TBL_ENTRY(sys_nanosleep_time32), + SYSTEM_CALL_TBL_ENTRY(sys_mremap), + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), /* 165 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_poll), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* was sys_nfsservctl */ + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 170 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_prctl), + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /* sys_rt_sigreturn() */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_rt_sigaction), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigprocmask), /* 175 */ + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigpending), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigtimedwait_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_rt_sigqueueinfo), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigsuspend), + SYSTEM_CALL_TBL_ENTRY(sys32_pread64), /* 180 */ + SYSTEM_CALL_TBL_ENTRY(sys32_pwrite64), + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_getcwd), + SYSTEM_CALL_TBL_ENTRY(sys_capget), + SYSTEM_CALL_TBL_ENTRY(sys_capset), /* 185 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sigaltstack), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sendfile), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams1 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams2 */ + SYSTEM_CALL_TBL_ENTRY(sys_vfork), /* 190 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(e2k_sys_getrlimit), + SYSTEM_CALL_TBL_ENTRY(sys_mmap2), + + SYSTEM_CALL_TBL_ENTRY(sys32_truncate64), + SYSTEM_CALL_TBL_ENTRY(sys32_ftruncate64), + SYSTEM_CALL_TBL_ENTRY(sys_stat64), /* 195 , in libc used in ptr32 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_lstat64), /* in libc used in ptr32 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_fstat64), /* in libc used in ptr32 mode */ + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), /* 200 */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + + SYSTEM_CALL_TBL_ENTRY(sys_pidfd_send_signal), /* 205 */ + SYSTEM_CALL_TBL_ENTRY(sys_pidfd_open), + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_fchown), + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 210 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), /* 215 */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + + SYSTEM_CALL_TBL_ENTRY(sys_pivot_root), + SYSTEM_CALL_TBL_ENTRY(sys_mincore), + SYSTEM_CALL_TBL_ENTRY(sys_madvise), + SYSTEM_CALL_TBL_ENTRY(sys_getdents64), /* 220 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fcntl64), + /* + * 221 is sys_fcntl64 in fcntl.c + * if BITS_PER_LONG == 32 + * for some other archs + */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 223 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 225 */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_setjmp */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_longjmp*/ + SYSTEM_CALL_TBL_ENTRY(sys_e2k_syswork), + SYSTEM_CALL_TBL_ENTRY(sys_clone_thread), + SYSTEM_CALL_TBL_ENTRY(sys_e2k_longjmp2), /* 230 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_setxattr), + SYSTEM_CALL_TBL_ENTRY(sys_lsetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_fsetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_getxattr), /* 235 */ + SYSTEM_CALL_TBL_ENTRY(sys_lgetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_fgetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_listxattr), + SYSTEM_CALL_TBL_ENTRY(sys_llistxattr), + SYSTEM_CALL_TBL_ENTRY(sys_flistxattr), /* 240 */ + SYSTEM_CALL_TBL_ENTRY(sys_removexattr), + SYSTEM_CALL_TBL_ENTRY(sys_lremovexattr), + SYSTEM_CALL_TBL_ENTRY(sys_fremovexattr), + SYSTEM_CALL_TBL_ENTRY(sys_gettid), + SYSTEM_CALL_TBL_ENTRY(sys32_readahead), /* 245 */ + SYSTEM_CALL_TBL_ENTRY(sys_tkill), + SYSTEM_CALL_TBL_ENTRY(sys_sendfile64), +#if defined CONFIG_FUTEX + SYSTEM_CALL_TBL_ENTRY(sys_futex_time32), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sched_setaffinity), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sched_getaffinity), /* 250 */ + SYSTEM_CALL_TBL_ENTRY(sys_pipe2), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_set_backtrace), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_get_backtrace), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_access_hw_stacks), + SYSTEM_CALL_TBL_ENTRY(sys_el_posix), /* 255 */ + SYSTEM_CALL_TBL_ENTRY(sys_io_uring_setup), + SYSTEM_CALL_TBL_ENTRY(sys_io_uring_enter), + SYSTEM_CALL_TBL_ENTRY(sys_io_uring_register), + SYSTEM_CALL_TBL_ENTRY(sys_set_tid_address), +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + SYSTEM_CALL_TBL_ENTRY(sys_el_binary), /* 260 */ +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 260 */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_timer_create), + SYSTEM_CALL_TBL_ENTRY(sys_timer_settime32), + SYSTEM_CALL_TBL_ENTRY(sys_timer_gettime32), + SYSTEM_CALL_TBL_ENTRY(sys_timer_getoverrun), + SYSTEM_CALL_TBL_ENTRY(sys_timer_delete), /* 265 */ + SYSTEM_CALL_TBL_ENTRY(sys_clock_settime32), + SYSTEM_CALL_TBL_ENTRY(sys_clock_gettime32), + SYSTEM_CALL_TBL_ENTRY(sys_clock_getres_time32), + SYSTEM_CALL_TBL_ENTRY(sys_clock_nanosleep_time32), + SYSTEM_CALL_TBL_ENTRY(sys_msgget), /* 270 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_msgctl), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_msgrcv), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_msgsnd), + SYSTEM_CALL_TBL_ENTRY(sys_semget), + SYSTEM_CALL_TBL_ENTRY(sys_old_semctl), /* 275 */ + SYSTEM_CALL_TBL_ENTRY(sys_semtimedop_time32), + SYSTEM_CALL_TBL_ENTRY(sys_semop), + SYSTEM_CALL_TBL_ENTRY(sys_shmget), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_shmctl), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_shmat), /* 280 */ + SYSTEM_CALL_TBL_ENTRY(sys_shmdt), + SYSTEM_CALL_TBL_ENTRY(sys_open_tree), + SYSTEM_CALL_TBL_ENTRY(sys_move_mount), + SYSTEM_CALL_TBL_ENTRY(sys_rseq), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_io_pgetevents), /* 285 */ + SYSTEM_CALL_TBL_ENTRY(sys_accept4), + SYSTEM_CALL_TBL_ENTRY(sys_sched_setattr), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getattr), + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_set), /* 289 __NR_ioprio_set */ + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_get), /* 290 __NR_ioprio_get */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_init),/* 291 __NR_inotify_init */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_add_watch), + /* 292 __NR_inotify_add_watch */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_rm_watch), + /* 293 __NR_inotify_rm_watch */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_io_setup), /* 294 */ + SYSTEM_CALL_TBL_ENTRY(sys_io_destroy), + SYSTEM_CALL_TBL_ENTRY(sys_io_getevents_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_io_submit), + SYSTEM_CALL_TBL_ENTRY(sys_io_cancel), + SYSTEM_CALL_TBL_ENTRY(sys32_fadvise64), + SYSTEM_CALL_TBL_ENTRY(sys_exit_group), /* 300 */ + SYSTEM_CALL_TBL_ENTRY(sys_lookup_dcookie), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_create), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_ctl), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_wait), + SYSTEM_CALL_TBL_ENTRY(sys_remap_file_pages), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_statfs64), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fstatfs64), + SYSTEM_CALL_TBL_ENTRY(sys_tgkill), + SYSTEM_CALL_TBL_ENTRY(sys_utimes_time32), + SYSTEM_CALL_TBL_ENTRY(sys32_fadvise64_64), /* 310 */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* __NR_vserver */ + /*The system call isn't implemented in the Linux 2.6.14 + * kernel */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mbind), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_get_mempolicy), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_set_mempolicy), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mq_open), + SYSTEM_CALL_TBL_ENTRY(sys_mq_unlink), + SYSTEM_CALL_TBL_ENTRY(sys_mq_timedsend_time32), + SYSTEM_CALL_TBL_ENTRY(sys_mq_timedreceive_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mq_notify), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mq_getsetattr), /* 320 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_kexec_load), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_waitid), + SYSTEM_CALL_TBL_ENTRY(sys_add_key), + SYSTEM_CALL_TBL_ENTRY(sys_request_key), +#ifdef CONFIG_KEYS_COMPAT + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_keyctl), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* sys_mcst_rt */ + SYSTEM_CALL_TBL_ENTRY(sys_getcpu), + SYSTEM_CALL_TBL_ENTRY(sys_move_pages), + SYSTEM_CALL_TBL_ENTRY(sys_splice), + SYSTEM_CALL_TBL_ENTRY(sys_vmsplice), /* 330 */ + SYSTEM_CALL_TBL_ENTRY(sys_tee), + SYSTEM_CALL_TBL_ENTRY(sys_migrate_pages), + SYSTEM_CALL_TBL_ENTRY(sys_utimensat_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_rt_tgsigqueueinfo), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_openat), + SYSTEM_CALL_TBL_ENTRY(sys_mkdirat), + SYSTEM_CALL_TBL_ENTRY(sys_mknodat), + SYSTEM_CALL_TBL_ENTRY(sys_fchownat), + SYSTEM_CALL_TBL_ENTRY(sys_unlinkat), + SYSTEM_CALL_TBL_ENTRY(sys_renameat), /* 340 */ + SYSTEM_CALL_TBL_ENTRY(sys_linkat), + SYSTEM_CALL_TBL_ENTRY(sys_symlinkat), + SYSTEM_CALL_TBL_ENTRY(sys_readlinkat), + SYSTEM_CALL_TBL_ENTRY(sys_fchmodat), + SYSTEM_CALL_TBL_ENTRY(sys_faccessat), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_pwait), +#ifdef CONFIG_SIGNALFD + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_signalfd4), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + SYSTEM_CALL_TBL_ENTRY(sys_eventfd2), + SYSTEM_CALL_TBL_ENTRY(sys_recvmmsg_time32), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 350 */ +#ifdef CONFIG_TIMERFD + SYSTEM_CALL_TBL_ENTRY(sys_timerfd_create), + SYSTEM_CALL_TBL_ENTRY(sys_timerfd_settime32), + SYSTEM_CALL_TBL_ENTRY(sys_timerfd_gettime32), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_preadv), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_pwritev), + SYSTEM_CALL_TBL_ENTRY(sys32_fallocate), + SYSTEM_CALL_TBL_ENTRY(sys32_sync_file_range), + SYSTEM_CALL_TBL_ENTRY(sys_dup3), + SYSTEM_CALL_TBL_ENTRY(sys_inotify_init1), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_create1),/* 360 */ + SYSTEM_CALL_TBL_ENTRY(sys_fstatat64), + SYSTEM_CALL_TBL_ENTRY(sys_futimesat_time32), + SYSTEM_CALL_TBL_ENTRY(sys_perf_event_open), + SYSTEM_CALL_TBL_ENTRY(sys_unshare), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_get_robust_list), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_set_robust_list), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_pselect6_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_ppoll_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_setcontext), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_makecontext), /* 370 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_swapcontext), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_freecontext), + SYSTEM_CALL_TBL_ENTRY(sys_fanotify_init), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fanotify_mark), + SYSTEM_CALL_TBL_ENTRY(e2k_sys_prlimit64), + SYSTEM_CALL_TBL_ENTRY(sys_clock_adjtime32), + SYSTEM_CALL_TBL_ENTRY(sys_syncfs), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sendmmsg), + SYSTEM_CALL_TBL_ENTRY(sys_setns), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_process_vm_readv), /* 380 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_process_vm_writev), + SYSTEM_CALL_TBL_ENTRY(sys_kcmp), + SYSTEM_CALL_TBL_ENTRY(sys_finit_module), + /* added in linux-4.4 */ + SYSTEM_CALL_TBL_ENTRY(sys_renameat2), + SYSTEM_CALL_TBL_ENTRY(sys_getrandom), + SYSTEM_CALL_TBL_ENTRY(sys_memfd_create), + SYSTEM_CALL_TBL_ENTRY(sys_bpf), + COMPAT_SYSTEM_CALL_TBL_ENTRY(e2k_sys_execveat), + SYSTEM_CALL_TBL_ENTRY(sys_userfaultfd), + SYSTEM_CALL_TBL_ENTRY(sys_membarrier), /* 390 */ + SYSTEM_CALL_TBL_ENTRY(sys_mlock2), + /* added in linux-4.9 */ + SYSTEM_CALL_TBL_ENTRY(sys_seccomp), + SYSTEM_CALL_TBL_ENTRY(sys_shutdown), + SYSTEM_CALL_TBL_ENTRY(sys_copy_file_range), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_preadv2), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_pwritev2), + + /* free (unused) items */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + + SYSTEM_CALL_TBL_ENTRY(sys_name_to_handle_at), /* 400 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_open_by_handle_at), + SYSTEM_CALL_TBL_ENTRY(sys_statx), /* 402 */ + + /* added for compatibility with x86_64 */ + SYSTEM_CALL_TBL_ENTRY(sys_socket), /* 403 */ + SYSTEM_CALL_TBL_ENTRY(sys_connect), /* 404 */ + SYSTEM_CALL_TBL_ENTRY(sys_accept), /* 405 */ + SYSTEM_CALL_TBL_ENTRY(sys_sendto), /* 406 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_recvfrom), /* 407 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sendmsg), /* 408 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_recvmsg), /* 409 */ + SYSTEM_CALL_TBL_ENTRY(sys_bind), /* 410 */ + SYSTEM_CALL_TBL_ENTRY(sys_listen), /* 411 */ + SYSTEM_CALL_TBL_ENTRY(sys_getsockname), /* 412 */ + SYSTEM_CALL_TBL_ENTRY(sys_getpeername), /* 413 */ + SYSTEM_CALL_TBL_ENTRY(sys_socketpair), /* 414 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_setsockopt), /* 415 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_getsockopt), /* 416 */ + + /* free (unused) items */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 417 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 418 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 419 */ + + /* protected specific system calls entries */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 420 __NR_newuselib */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 421 __NR_rt_sigaction_ex */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 422 __NR_get_mem */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 423 __NR_free_mem */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 424 __NR_clean_descriptors */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 425 __NR_unuselib */ + + SYSTEM_CALL_TBL_ENTRY(sys_clone3), + SYSTEM_CALL_TBL_ENTRY(sys_fsopen), + SYSTEM_CALL_TBL_ENTRY(sys_fsconfig), + SYSTEM_CALL_TBL_ENTRY(sys_fsmount), + SYSTEM_CALL_TBL_ENTRY(sys_fspick), /* 430 */ +}; + +/* System call handlers for protected mode (entry 10). If some system + * call is not here it does not mean it is not implemented - + * it is probably called from ttable_entry10 after reading + * and preparing its parameters. */ +const system_call_func sys_protcall_table[NR_syscalls] = +{ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 0 */ + SYSTEM_CALL_TBL_ENTRY(sys_exit), + SYSTEM_CALL_TBL_ENTRY(sys_fork), // fork + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // read + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // write + SYSTEM_CALL_TBL_ENTRY(sys_open), /* 5 */ + SYSTEM_CALL_TBL_ENTRY(sys_close), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // waitpid + SYSTEM_CALL_TBL_ENTRY(sys_creat), + SYSTEM_CALL_TBL_ENTRY(sys_link), + SYSTEM_CALL_TBL_ENTRY(sys_unlink), /* 10 */ + SYSTEM_CALL_TBL_ENTRY(e2k_sys_execve), + SYSTEM_CALL_TBL_ENTRY(sys_chdir), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // time + SYSTEM_CALL_TBL_ENTRY(sys_mknod), + SYSTEM_CALL_TBL_ENTRY(sys_chmod), /* 15 */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old break syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_stat() */ + SYSTEM_CALL_TBL_ENTRY(sys_lseek), + SYSTEM_CALL_TBL_ENTRY(sys_getpid), /* 20 */ + SYSTEM_CALL_TBL_ENTRY(sys_mount), + SYSTEM_CALL_TBL_ENTRY(sys_oldumount), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_stime), /* 25 */ + SYSTEM_CALL_TBL_ENTRY(sys_ptrace), + SYSTEM_CALL_TBL_ENTRY(sys_alarm), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_fstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_pause), + SYSTEM_CALL_TBL_ENTRY(sys_utime), /* 30 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old stty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old gtty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_access), + SYSTEM_CALL_TBL_ENTRY(sys_nice), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 35, old ftime syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_sync), + SYSTEM_CALL_TBL_ENTRY(sys_kill), + SYSTEM_CALL_TBL_ENTRY(sys_rename), + SYSTEM_CALL_TBL_ENTRY(sys_mkdir), + SYSTEM_CALL_TBL_ENTRY(sys_rmdir), /* 40 */ + SYSTEM_CALL_TBL_ENTRY(sys_dup), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // pipe + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // times + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old prof syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 45 */ // brk + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* signal() have to be */ + /* emulated by rt_sigaction() */ + /* on user level (GLIBC) */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), /* 50 */ + SYSTEM_CALL_TBL_ENTRY(sys_acct), + SYSTEM_CALL_TBL_ENTRY(sys_umount), /* recycled never used phys() */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old lock syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ioctl), + + SYSTEM_CALL_TBL_ENTRY(sys_fcntl), /* 55 */ /* for 64 & 32 */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old mpx syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_setpgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old ulimit syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_umask), /* 60 */ + SYSTEM_CALL_TBL_ENTRY(sys_chroot), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // ustat + SYSTEM_CALL_TBL_ENTRY(sys_dup2), + SYSTEM_CALL_TBL_ENTRY(sys_getppid), + SYSTEM_CALL_TBL_ENTRY(sys_getpgrp), /* 65 */ + SYSTEM_CALL_TBL_ENTRY(sys_setsid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* no sys_sigaction(), use */ + SYSTEM_CALL_TBL_ENTRY(sys_sgetmask), /* sys_rt_sigaction() instead */ + SYSTEM_CALL_TBL_ENTRY(sys_ssetmask), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), /* 70 */ + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sigpending, use rt_* + SYSTEM_CALL_TBL_ENTRY(sys_sethostname), + SYSTEM_CALL_TBL_ENTRY(e2k_sys_setrlimit), /* 75 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_old_getrlimit, use u* + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_getrusage + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_gettimeofday + SYSTEM_CALL_TBL_ENTRY(sys_settimeofday), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 80 */ // sys_getgroups + SYSTEM_CALL_TBL_ENTRY(sys_setgroups), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_symlink), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_lstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 85 */ // sys_readlink + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_uselib + SYSTEM_CALL_TBL_ENTRY(sys_swapon), + SYSTEM_CALL_TBL_ENTRY(sys_reboot), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // old_readdir + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 90 */ // sys_mmap + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_munmap + + SYSTEM_CALL_TBL_ENTRY(sys_truncate), + SYSTEM_CALL_TBL_ENTRY(sys_ftruncate), + + SYSTEM_CALL_TBL_ENTRY(sys_fchmod), + SYSTEM_CALL_TBL_ENTRY(sys_fchown), /* 95 */ + SYSTEM_CALL_TBL_ENTRY(sys_getpriority), + SYSTEM_CALL_TBL_ENTRY(sys_setpriority), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old profil syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_statfs + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 100 */ // sys_fstatfs + SYSTEM_CALL_TBL_ENTRY(sys_ioperm), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_socketcall + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_syslog + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_setitimer + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 105 */ // sys_getitimer + + /* next 3 calls realized in libc in ptr64 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_newstat + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_newlstat + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_newfstat + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_uname - old ni + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 110 */ + SYSTEM_CALL_TBL_ENTRY(sys_vhangup), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old "idle" system call */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_wait4 + SYSTEM_CALL_TBL_ENTRY(sys_swapoff), /* 115 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sysinfo + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // ipc + SYSTEM_CALL_TBL_ENTRY(sys_fsync), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_clone + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_setdomainname + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_newuname + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_adjtimex + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_mprotect + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sigprocmask - ni, see rt* + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_init_module + SYSTEM_CALL_TBL_ENTRY(sys_delete_module), // sys_delete_module + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 130 */ + SYSTEM_CALL_TBL_ENTRY(sys_quotactl), + SYSTEM_CALL_TBL_ENTRY(sys_getpgid), + SYSTEM_CALL_TBL_ENTRY(sys_fchdir), + SYSTEM_CALL_TBL_ENTRY(sys_bdflush), // sys_bdflush + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sysfs + SYSTEM_CALL_TBL_ENTRY(sys_personality), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* for afs_syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_llseek + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_getdents + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_select + SYSTEM_CALL_TBL_ENTRY(sys_flock), + SYSTEM_CALL_TBL_ENTRY(sys_msync), + SYSTEM_CALL_TBL_ENTRY(sys_readv), // sys_readv + SYSTEM_CALL_TBL_ENTRY(sys_writev), // sys_writev + SYSTEM_CALL_TBL_ENTRY(sys_getsid), + SYSTEM_CALL_TBL_ENTRY(sys_fdatasync), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sysctl + SYSTEM_CALL_TBL_ENTRY(sys_mlock), /* 150 */ + SYSTEM_CALL_TBL_ENTRY(sys_munlock), + SYSTEM_CALL_TBL_ENTRY(sys_mlockall), + SYSTEM_CALL_TBL_ENTRY(sys_munlockall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sched_setparam + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sched_getparam + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getscheduler), + SYSTEM_CALL_TBL_ENTRY(sys_sched_yield), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_max), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_min), /* 160 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sched_rr_get_interval + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_nanosleep + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_mremap + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), /* 165 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 168 sys_poll */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 169 sys_nfsservctl */ + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 170 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_prctl + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 173 sys_rt_sigreturn */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_rt_sigaction + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_rt_sigprocmask + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_rt_sigpending + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_rt_sigtimedwait + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_rt_sigqueueinfo + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_rt_sigsuspend + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_pread64 + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_pwrite64 + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_getcwd + SYSTEM_CALL_TBL_ENTRY(sys_capget), + SYSTEM_CALL_TBL_ENTRY(sys_capset), /* 185 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sigaltstack + SYSTEM_CALL_TBL_ENTRY(sys_sendfile64), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams1 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams2 */ + SYSTEM_CALL_TBL_ENTRY(sys_vfork), /* 190 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_getrlimit + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_mmap2 + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + /* + * 193 & 194 entries are + * sys_truncate64 & + * sys_ftruncate64 in open.c + * if OS is for + * BITS_PER_LONG == 32 + * Our OS is for 64 + */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_stat64 + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_lstat64 + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_fstat64 + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), /* 200 */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 205 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_fchown), + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 210 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), /* 215 */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + + SYSTEM_CALL_TBL_ENTRY(sys_pivot_root), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_mincore + SYSTEM_CALL_TBL_ENTRY(sys_madvise), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_getdents64 + SYSTEM_CALL_TBL_ENTRY(sys_fcntl), /* sys_fcntl */ + /* + * 221 is sys_fcntl64 in fcntl.c + * if BITS_PER_LONG == 32 + * for some other archs + */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 223 */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 225 */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_setjmp */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_longjmp*/ + SYSTEM_CALL_TBL_ENTRY(sys_e2k_syswork), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* sys_clone_thread */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 230 */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_setxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), //sys_lsetxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), //sys_fsetxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_getxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_lgetxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_fgetxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), //sys_listxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), //sys_llistxattr + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_flistxattr + SYSTEM_CALL_TBL_ENTRY(sys_removexattr), + SYSTEM_CALL_TBL_ENTRY(sys_lremovexattr), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_fremovexattr + SYSTEM_CALL_TBL_ENTRY(sys_gettid), + SYSTEM_CALL_TBL_ENTRY(sys_readahead), /* 245 */ + SYSTEM_CALL_TBL_ENTRY(sys_tkill), + SYSTEM_CALL_TBL_ENTRY(sys_sendfile64), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_futex + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sched_setaffinity + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_sched_getaffinity + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 251 pipe2 */ + SYSTEM_CALL_TBL_ENTRY(sys_set_backtrace), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_access_hw_stacks), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), // sys_el_posix + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 256 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + SYSTEM_CALL_TBL_ENTRY(sys_el_binary), /* 260 */ +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 260 */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 265 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_msgget), /* 270 */ + SYSTEM_CALL_TBL_ENTRY(sys_msgctl), + SYSTEM_CALL_TBL_ENTRY(sys_msgrcv), + SYSTEM_CALL_TBL_ENTRY(sys_msgsnd), + SYSTEM_CALL_TBL_ENTRY(sys_semget), + SYSTEM_CALL_TBL_ENTRY(sys_old_semctl), /* 275 */ + SYSTEM_CALL_TBL_ENTRY(sys_semtimedop), + SYSTEM_CALL_TBL_ENTRY(sys_semop), + SYSTEM_CALL_TBL_ENTRY(sys_shmget), + SYSTEM_CALL_TBL_ENTRY(sys_shmctl), + SYSTEM_CALL_TBL_ENTRY(sys_shmat), /* 280 */ + SYSTEM_CALL_TBL_ENTRY(sys_shmdt), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 285 */ + SYSTEM_CALL_TBL_ENTRY(sys_accept4), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_set), /* 289 __NR_ioprio_set */ + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_get), /* 290 __NR_ioprio_get */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_init),/* 291 __NR_inotify_init */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_add_watch), + /* 292 __NR_inotify_add_watch */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_rm_watch), + /* 293 __NR_inotify_rm_watch */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 295 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_exit_group), /* 300 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 305 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_tgkill), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 310 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 315 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 320 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 325 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 330 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 340 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 350 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_preadv), + SYSTEM_CALL_TBL_ENTRY(sys_pwritev), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_dup3), + SYSTEM_CALL_TBL_ENTRY(sys_inotify_init1), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_create1),/* 360 */ + SYSTEM_CALL_TBL_ENTRY(sys_fstatat64), + SYSTEM_CALL_TBL_ENTRY(sys_futimesat), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_unshare), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 370 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(e2k_sys_prlimit64), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 380 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 390 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_preadv2), + SYSTEM_CALL_TBL_ENTRY(sys_pwritev2), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 400 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + + /* free (unused) items */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 402 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 403 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 404 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 405 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 406 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 407 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 408 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 409 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 410 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 411 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 412 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 413 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 414 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 415 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 416 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 417 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 418 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 419 */ + + /* protected specific system calls entries */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 420 __NR_newuselib */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 421 __NR_rt_sigaction_ex */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 422 __NR_get_mem */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 423 __NR_free_mem */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 424 __NR_clean_descriptors */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 425 __NR_unuselib */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 430 */ + /* 430 last System call */ +}; + +/* + * System call handlers for protected mode entry #8: + */ +const protected_system_call_func sys_call_table_entry8[NR_syscalls] = { + PROT_SYSCALL_TBL_ENTRY(sys_restart_syscall), /* 0 */ + PROT_SYSCALL_TBL_ENTRY(sys_exit), + PROT_SYSCALL_TBL_ENTRY(sys_fork), + PROT_SYSCALL_TBL_ENTRY(sys_read), + PROT_SYSCALL_TBL_ENTRY(sys_write), + PROT_SYSCALL_TBL_ENTRY(sys_open), /* 5 */ + PROT_SYSCALL_TBL_ENTRY(sys_close), + PROT_SYSCALL_TBL_ENTRY(sys_waitpid), + PROT_SYSCALL_TBL_ENTRY(sys_creat), + PROT_SYSCALL_TBL_ENTRY(sys_link), + PROT_SYSCALL_TBL_ENTRY(sys_unlink), /* 10 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_execve), + PROT_SYSCALL_TBL_ENTRY(sys_chdir), + PROT_SYSCALL_TBL_ENTRY(sys_time), + PROT_SYSCALL_TBL_ENTRY(sys_mknod), + PROT_SYSCALL_TBL_ENTRY(sys_chmod), /* 15 */ + PROT_SYSCALL_TBL_ENTRY(sys_lchown), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old break syscall holder */ + + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old sys_stat() */ + PROT_SYSCALL_TBL_ENTRY(sys_lseek), + PROT_SYSCALL_TBL_ENTRY(sys_getpid), /* 20 */ + PROT_SYSCALL_TBL_ENTRY(sys_mount), + PROT_SYSCALL_TBL_ENTRY(sys_oldumount), + PROT_SYSCALL_TBL_ENTRY(sys_setuid), + PROT_SYSCALL_TBL_ENTRY(sys_getuid), + PROT_SYSCALL_TBL_ENTRY(sys_stime), /* 25 */ + PROT_SYSCALL_TBL_ENTRY(sys_ptrace), + PROT_SYSCALL_TBL_ENTRY(sys_alarm), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old sys_fstat() */ + PROT_SYSCALL_TBL_ENTRY(sys_pause), + PROT_SYSCALL_TBL_ENTRY(sys_utime), /* 30 */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old stty syscall holder */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old gtty syscall holder */ + PROT_SYSCALL_TBL_ENTRY(sys_access), + PROT_SYSCALL_TBL_ENTRY(sys_nice), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 35, old ftime syscall */ + PROT_SYSCALL_TBL_ENTRY(sys_sync), + PROT_SYSCALL_TBL_ENTRY(sys_kill), + PROT_SYSCALL_TBL_ENTRY(sys_rename), + PROT_SYSCALL_TBL_ENTRY(sys_mkdir), + PROT_SYSCALL_TBL_ENTRY(sys_rmdir), /* 40 */ + PROT_SYSCALL_TBL_ENTRY(sys_dup), + PROT_SYSCALL_TBL_ENTRY(sys_pipe), + PROT_SYSCALL_TBL_ENTRY(sys_times), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old prof syscall holder */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* brk() unavailable in PM */ + PROT_SYSCALL_TBL_ENTRY(sys_setgid), /* 46 */ + PROT_SYSCALL_TBL_ENTRY(sys_getgid), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* signal() have to be */ + /* emulated by rt_sigaction() */ + /* on user level (GLIBC) */ + PROT_SYSCALL_TBL_ENTRY(sys_geteuid), + PROT_SYSCALL_TBL_ENTRY(sys_getegid), /* 50 */ + PROT_SYSCALL_TBL_ENTRY(sys_acct), + PROT_SYSCALL_TBL_ENTRY(sys_umount), /* recycled never used phys() */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old lock syscall holder */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_ioctl), + + PROT_SYSCALL_TBL_ENTRY(sys_fcntl), /* 55 */ /* for 64 & 32 */ + + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old mpx syscall holder */ + PROT_SYSCALL_TBL_ENTRY(sys_setpgid), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old ulimit syscall holder */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* oldolduname */ + PROT_SYSCALL_TBL_ENTRY(sys_umask), /* 60 */ + PROT_SYSCALL_TBL_ENTRY(sys_chroot), + PROT_SYSCALL_TBL_ENTRY(sys_ustat), + PROT_SYSCALL_TBL_ENTRY(sys_dup2), + PROT_SYSCALL_TBL_ENTRY(sys_getppid), + PROT_SYSCALL_TBL_ENTRY(sys_getpgrp), /* 65 */ + PROT_SYSCALL_TBL_ENTRY(sys_setsid), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* no sys_sigaction(), use */ + /* sys_rt_sigaction() instead */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* sys_sgetmask obsoleted */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* sys_ssetmask obsoleted */ + PROT_SYSCALL_TBL_ENTRY(sys_setreuid), /* 70 */ + PROT_SYSCALL_TBL_ENTRY(sys_setregid), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_sigpending), + PROT_SYSCALL_TBL_ENTRY(sys_sethostname), + PROT_SYSCALL_TBL_ENTRY(e2k_sys_setrlimit), /* 75 */ + PROT_SYSCALL_TBL_ENTRY(e2k_sys_old_getrlimit), + PROT_SYSCALL_TBL_ENTRY(sys_getrusage), + PROT_SYSCALL_TBL_ENTRY(sys_gettimeofday), + PROT_SYSCALL_TBL_ENTRY(sys_settimeofday), + PROT_SYSCALL_TBL_ENTRY(protected_sys_getgroups), /* 80 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_setgroups), + PROT_SYSCALL_TBL_ENTRY(sys_select), + PROT_SYSCALL_TBL_ENTRY(sys_symlink), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old sys_lstat() */ + PROT_SYSCALL_TBL_ENTRY(sys_readlink), /* 85 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_olduselib), /* obsolete syscall */ + PROT_SYSCALL_TBL_ENTRY(sys_swapon), + PROT_SYSCALL_TBL_ENTRY(sys_reboot), + PROT_SYSCALL_TBL_ENTRY(sys_old_readdir), + PROT_SYSCALL_TBL_ENTRY(protected_sys_mmap), /* 90 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_munmap), + + PROT_SYSCALL_TBL_ENTRY(sys_truncate), + PROT_SYSCALL_TBL_ENTRY(sys_ftruncate), + + PROT_SYSCALL_TBL_ENTRY(sys_fchmod), + PROT_SYSCALL_TBL_ENTRY(sys_fchown), /* 95 */ + PROT_SYSCALL_TBL_ENTRY(sys_getpriority), + PROT_SYSCALL_TBL_ENTRY(sys_setpriority), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old profil syscall holder */ + PROT_SYSCALL_TBL_ENTRY(sys_statfs), + PROT_SYSCALL_TBL_ENTRY(sys_fstatfs), /* 100 */ + PROT_SYSCALL_TBL_ENTRY(sys_ioperm), + PROT_SYSCALL_TBL_ENTRY(protected_sys_socketcall), + PROT_SYSCALL_TBL_ENTRY(sys_syslog), + PROT_SYSCALL_TBL_ENTRY(sys_setitimer), + PROT_SYSCALL_TBL_ENTRY(sys_getitimer), /* 105 */ + + PROT_SYSCALL_TBL_ENTRY(sys_newstat), /* in libc used in ptr64 mode */ + PROT_SYSCALL_TBL_ENTRY(sys_newlstat), /* in libc used in ptr64 mode */ + PROT_SYSCALL_TBL_ENTRY(sys_newfstat), /* in libc used in ptr64 mode */ + + PROT_SYSCALL_TBL_ENTRY(sys_uname), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 110 */ + PROT_SYSCALL_TBL_ENTRY(sys_vhangup), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* old "idle" system call */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_wait4), + PROT_SYSCALL_TBL_ENTRY(sys_swapoff), /* 115 */ + PROT_SYSCALL_TBL_ENTRY(sys_sysinfo), + PROT_SYSCALL_TBL_ENTRY(protected_sys_ipc), + PROT_SYSCALL_TBL_ENTRY(sys_fsync), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(protected_sys_clone), /* 120 */ + PROT_SYSCALL_TBL_ENTRY(sys_setdomainname), + PROT_SYSCALL_TBL_ENTRY(sys_newuname), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_adjtimex), + PROT_SYSCALL_TBL_ENTRY(sys_mprotect), /* 125 */ + PROT_SYSCALL_TBL_ENTRY(sys_sigprocmask), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_init_module), + PROT_SYSCALL_TBL_ENTRY(sys_delete_module), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 130 */ + PROT_SYSCALL_TBL_ENTRY(sys_quotactl), + PROT_SYSCALL_TBL_ENTRY(sys_getpgid), + PROT_SYSCALL_TBL_ENTRY(sys_fchdir), + PROT_SYSCALL_TBL_ENTRY(sys_bdflush), + PROT_SYSCALL_TBL_ENTRY(sys_sysfs), /* 135 - obsolete */ + PROT_SYSCALL_TBL_ENTRY(sys_personality), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* for afs_syscall */ + PROT_SYSCALL_TBL_ENTRY(sys_setfsuid), + PROT_SYSCALL_TBL_ENTRY(sys_setfsgid), + PROT_SYSCALL_TBL_ENTRY(sys_llseek), /* 140 */ + PROT_SYSCALL_TBL_ENTRY(sys_getdents), + PROT_SYSCALL_TBL_ENTRY(sys_select), + PROT_SYSCALL_TBL_ENTRY(sys_flock), + PROT_SYSCALL_TBL_ENTRY(sys_msync), + PROT_SYSCALL_TBL_ENTRY(protected_sys_readv), /* 145 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_writev), + PROT_SYSCALL_TBL_ENTRY(sys_getsid), + PROT_SYSCALL_TBL_ENTRY(sys_fdatasync), + PROT_SYSCALL_TBL_ENTRY(protected_sys_sysctl), + PROT_SYSCALL_TBL_ENTRY(sys_mlock), /* 150 */ + PROT_SYSCALL_TBL_ENTRY(sys_munlock), + PROT_SYSCALL_TBL_ENTRY(sys_mlockall), + PROT_SYSCALL_TBL_ENTRY(sys_munlockall), + PROT_SYSCALL_TBL_ENTRY(sys_sched_setparam), + PROT_SYSCALL_TBL_ENTRY(sys_sched_getparam), /* 155 */ + PROT_SYSCALL_TBL_ENTRY(sys_sched_setscheduler), + PROT_SYSCALL_TBL_ENTRY(sys_sched_getscheduler), + PROT_SYSCALL_TBL_ENTRY(sys_sched_yield), + PROT_SYSCALL_TBL_ENTRY(sys_sched_get_priority_max), + PROT_SYSCALL_TBL_ENTRY(sys_sched_get_priority_min), /* 160 */ + PROT_SYSCALL_TBL_ENTRY(sys_sched_rr_get_interval), + PROT_SYSCALL_TBL_ENTRY(sys_nanosleep), + PROT_SYSCALL_TBL_ENTRY(protected_sys_mremap), + PROT_SYSCALL_TBL_ENTRY(sys_setresuid), + PROT_SYSCALL_TBL_ENTRY(sys_getresuid), /* 165 */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_poll), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 169 sys_nfsservctl */ + PROT_SYSCALL_TBL_ENTRY(sys_setresgid), /* 170 */ + PROT_SYSCALL_TBL_ENTRY(sys_getresgid), + PROT_SYSCALL_TBL_ENTRY(protected_sys_prctl), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 173 sys_rt_sigreturn */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_rt_sigaction), + PROT_SYSCALL_TBL_ENTRY(sys_rt_sigprocmask), /* 175 */ + PROT_SYSCALL_TBL_ENTRY(sys_rt_sigpending), + PROT_SYSCALL_TBL_ENTRY(protected_sys_rt_sigtimedwait), + PROT_SYSCALL_TBL_ENTRY(protected_sys_rt_sigqueueinfo), + PROT_SYSCALL_TBL_ENTRY(sys_rt_sigsuspend), + PROT_SYSCALL_TBL_ENTRY(sys_pread64), /* 180 */ + PROT_SYSCALL_TBL_ENTRY(sys_pwrite64), + PROT_SYSCALL_TBL_ENTRY(sys_chown), + PROT_SYSCALL_TBL_ENTRY(sys_getcwd), + PROT_SYSCALL_TBL_ENTRY(sys_capget), + PROT_SYSCALL_TBL_ENTRY(sys_capset), /* 185 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_sigaltstack), + PROT_SYSCALL_TBL_ENTRY(sys_sendfile64), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 188 sys_getpmsg */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 189 sys_putpmsg */ + PROT_SYSCALL_TBL_ENTRY(sys_vfork), /* 190 */ + PROT_SYSCALL_TBL_ENTRY(e2k_sys_getrlimit), + PROT_SYSCALL_TBL_ENTRY(protected_sys_mmap2), + + /* Entries 193-194 are for BITS_PER_LONG == 32; and this is 64 bit OS */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 193 sys_truncate64 */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 194 sys_ftruncate64 */ + + PROT_SYSCALL_TBL_ENTRY(sys_stat64), /* 195 */ + PROT_SYSCALL_TBL_ENTRY(sys_lstat64), + PROT_SYSCALL_TBL_ENTRY(sys_fstat64), + + /* + * They are used for back compatibility + */ + PROT_SYSCALL_TBL_ENTRY(sys_lchown), + PROT_SYSCALL_TBL_ENTRY(sys_getuid), + PROT_SYSCALL_TBL_ENTRY(sys_getgid), /* 200 */ + PROT_SYSCALL_TBL_ENTRY(sys_geteuid), + PROT_SYSCALL_TBL_ENTRY(sys_getegid), + PROT_SYSCALL_TBL_ENTRY(sys_setreuid), + PROT_SYSCALL_TBL_ENTRY(sys_setregid), + + PROT_SYSCALL_TBL_ENTRY(protected_sys_pidfd_send_signal), /* 205 */ + PROT_SYSCALL_TBL_ENTRY(sys_pidfd_open), + + /* + * They are used for back compatibility + */ + PROT_SYSCALL_TBL_ENTRY(sys_fchown), + PROT_SYSCALL_TBL_ENTRY(sys_setresuid), + PROT_SYSCALL_TBL_ENTRY(sys_getresuid), + PROT_SYSCALL_TBL_ENTRY(sys_setresgid), /* 210 */ + PROT_SYSCALL_TBL_ENTRY(sys_getresgid), + PROT_SYSCALL_TBL_ENTRY(sys_chown), + PROT_SYSCALL_TBL_ENTRY(sys_setuid), + PROT_SYSCALL_TBL_ENTRY(sys_setgid), + PROT_SYSCALL_TBL_ENTRY(sys_setfsuid), /* 215 */ + PROT_SYSCALL_TBL_ENTRY(sys_setfsgid), + + PROT_SYSCALL_TBL_ENTRY(sys_pivot_root), + PROT_SYSCALL_TBL_ENTRY(sys_mincore), + PROT_SYSCALL_TBL_ENTRY(sys_madvise), + PROT_SYSCALL_TBL_ENTRY(sys_getdents64), /* 220 */ + PROT_SYSCALL_TBL_ENTRY(sys_fcntl), /* 221 is sys_fcntl64 in fcntl.c + * if BITS_PER_LONG == 32 + * for some other archs + */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 223 */ + PROT_SYSCALL_TBL_ENTRY(sys_newfstatat), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 225 */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /*sys_e2k_setjmp in traptable*/ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /*sys_e2k_longjmp in traptable*/ + PROT_SYSCALL_TBL_ENTRY(sys_e2k_syswork), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* sys_clone_thread */ + PROT_SYSCALL_TBL_ENTRY(sys_e2k_longjmp2), /* 230 */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_setxattr), + PROT_SYSCALL_TBL_ENTRY(sys_lsetxattr), + PROT_SYSCALL_TBL_ENTRY(sys_fsetxattr), + PROT_SYSCALL_TBL_ENTRY(sys_getxattr), /* 235 */ + PROT_SYSCALL_TBL_ENTRY(sys_lgetxattr), + PROT_SYSCALL_TBL_ENTRY(sys_fgetxattr), + PROT_SYSCALL_TBL_ENTRY(sys_listxattr), + PROT_SYSCALL_TBL_ENTRY(sys_llistxattr), + PROT_SYSCALL_TBL_ENTRY(sys_flistxattr), /* 240 */ + PROT_SYSCALL_TBL_ENTRY(sys_removexattr), + PROT_SYSCALL_TBL_ENTRY(sys_lremovexattr), + PROT_SYSCALL_TBL_ENTRY(sys_fremovexattr), + PROT_SYSCALL_TBL_ENTRY(sys_gettid), + PROT_SYSCALL_TBL_ENTRY(sys_readahead), /* 245 */ + PROT_SYSCALL_TBL_ENTRY(sys_tkill), + PROT_SYSCALL_TBL_ENTRY(sys_sendfile64), +#if defined CONFIG_FUTEX + PROT_SYSCALL_TBL_ENTRY(protected_sys_futex), +#else + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), +#endif + PROT_SYSCALL_TBL_ENTRY(sys_sched_setaffinity), + PROT_SYSCALL_TBL_ENTRY(sys_sched_getaffinity), /* 250 */ + PROT_SYSCALL_TBL_ENTRY(sys_pipe2), + PROT_SYSCALL_TBL_ENTRY(protected_sys_set_backtrace), + PROT_SYSCALL_TBL_ENTRY(protected_sys_get_backtrace), + PROT_SYSCALL_TBL_ENTRY(sys_access_hw_stacks), + PROT_SYSCALL_TBL_ENTRY(sys_el_posix), /* 255 */ + PROT_SYSCALL_TBL_ENTRY(sys_io_uring_setup), + PROT_SYSCALL_TBL_ENTRY(sys_io_uring_enter), + PROT_SYSCALL_TBL_ENTRY(protected_sys_io_uring_register), + PROT_SYSCALL_TBL_ENTRY(sys_set_tid_address), +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + PROT_SYSCALL_TBL_ENTRY(sys_el_binary), /* 260 */ +#else + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 260 */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_timer_create), + PROT_SYSCALL_TBL_ENTRY(sys_timer_settime), + PROT_SYSCALL_TBL_ENTRY(sys_timer_gettime), + PROT_SYSCALL_TBL_ENTRY(sys_timer_getoverrun), + PROT_SYSCALL_TBL_ENTRY(sys_timer_delete), /* 265 */ + PROT_SYSCALL_TBL_ENTRY(sys_clock_settime), + PROT_SYSCALL_TBL_ENTRY(sys_clock_gettime), + PROT_SYSCALL_TBL_ENTRY(sys_clock_getres), + PROT_SYSCALL_TBL_ENTRY(sys_clock_nanosleep), + PROT_SYSCALL_TBL_ENTRY(sys_msgget), /* 270 */ + PROT_SYSCALL_TBL_ENTRY(sys_msgctl), + PROT_SYSCALL_TBL_ENTRY(sys_msgrcv), + PROT_SYSCALL_TBL_ENTRY(sys_msgsnd), + PROT_SYSCALL_TBL_ENTRY(sys_semget), + PROT_SYSCALL_TBL_ENTRY(protected_sys_semctl), /* 275 */ + PROT_SYSCALL_TBL_ENTRY(sys_semtimedop), + PROT_SYSCALL_TBL_ENTRY(sys_semop), + PROT_SYSCALL_TBL_ENTRY(sys_shmget), + PROT_SYSCALL_TBL_ENTRY(sys_shmctl), + PROT_SYSCALL_TBL_ENTRY(protected_sys_shmat), /* 280 */ + PROT_SYSCALL_TBL_ENTRY(sys_shmdt), + PROT_SYSCALL_TBL_ENTRY(sys_open_tree), + PROT_SYSCALL_TBL_ENTRY(sys_move_mount), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 285 */ + PROT_SYSCALL_TBL_ENTRY(sys_accept4), + PROT_SYSCALL_TBL_ENTRY(sys_sched_setattr), + PROT_SYSCALL_TBL_ENTRY(sys_sched_getattr), + PROT_SYSCALL_TBL_ENTRY(sys_ioprio_set), /* 289 __NR_ioprio_set */ + PROT_SYSCALL_TBL_ENTRY(sys_ioprio_get), /* 290 __NR_ioprio_get */ + PROT_SYSCALL_TBL_ENTRY(sys_inotify_init),/* 291 __NR_inotify_init */ + PROT_SYSCALL_TBL_ENTRY(sys_inotify_add_watch), + /* 292 __NR_inotify_add_watch */ + PROT_SYSCALL_TBL_ENTRY(sys_inotify_rm_watch), + /* 293 __NR_inotify_rm_watch */ + PROT_SYSCALL_TBL_ENTRY(sys_io_setup), /* 294 */ + PROT_SYSCALL_TBL_ENTRY(sys_io_destroy), + PROT_SYSCALL_TBL_ENTRY(sys_io_getevents), + PROT_SYSCALL_TBL_ENTRY(sys_io_submit), + PROT_SYSCALL_TBL_ENTRY(sys_io_cancel), + PROT_SYSCALL_TBL_ENTRY(sys_fadvise64), + PROT_SYSCALL_TBL_ENTRY(sys_exit_group), /* 300 */ + PROT_SYSCALL_TBL_ENTRY(sys_lookup_dcookie), + PROT_SYSCALL_TBL_ENTRY(sys_epoll_create), + PROT_SYSCALL_TBL_ENTRY(protected_sys_epoll_ctl), + PROT_SYSCALL_TBL_ENTRY(protected_sys_epoll_wait), + PROT_SYSCALL_TBL_ENTRY(sys_remap_file_pages), + PROT_SYSCALL_TBL_ENTRY(sys_statfs64), + PROT_SYSCALL_TBL_ENTRY(sys_fstatfs64), + PROT_SYSCALL_TBL_ENTRY(sys_tgkill), + PROT_SYSCALL_TBL_ENTRY(sys_utimes), + PROT_SYSCALL_TBL_ENTRY(sys_fadvise64_64), /* 310 */ + + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* __NR_vserver */ + /* The system call isn't implemented + * in the Linux 2.6.14 kernel + */ + PROT_SYSCALL_TBL_ENTRY(sys_mbind), + PROT_SYSCALL_TBL_ENTRY(sys_get_mempolicy), + PROT_SYSCALL_TBL_ENTRY(sys_set_mempolicy), + PROT_SYSCALL_TBL_ENTRY(sys_mq_open), + PROT_SYSCALL_TBL_ENTRY(sys_mq_unlink), + PROT_SYSCALL_TBL_ENTRY(sys_mq_timedsend), + PROT_SYSCALL_TBL_ENTRY(sys_mq_timedreceive), + PROT_SYSCALL_TBL_ENTRY(protected_sys_mq_notify), + PROT_SYSCALL_TBL_ENTRY(sys_mq_getsetattr), /* 320 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_kexec_load), + PROT_SYSCALL_TBL_ENTRY(protected_sys_waitid), + PROT_SYSCALL_TBL_ENTRY(sys_add_key), + PROT_SYSCALL_TBL_ENTRY(sys_request_key), + PROT_SYSCALL_TBL_ENTRY(protected_sys_keyctl), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* sys_mcst_rt */ + PROT_SYSCALL_TBL_ENTRY(sys_getcpu), + PROT_SYSCALL_TBL_ENTRY(sys_move_pages), + PROT_SYSCALL_TBL_ENTRY(sys_splice), + PROT_SYSCALL_TBL_ENTRY(protected_sys_vmsplice), /* 330 */ + PROT_SYSCALL_TBL_ENTRY(sys_tee), + PROT_SYSCALL_TBL_ENTRY(sys_migrate_pages), + PROT_SYSCALL_TBL_ENTRY(sys_utimensat), + PROT_SYSCALL_TBL_ENTRY(protected_sys_rt_tgsigqueueinfo), + PROT_SYSCALL_TBL_ENTRY(sys_openat), + PROT_SYSCALL_TBL_ENTRY(sys_mkdirat), + PROT_SYSCALL_TBL_ENTRY(sys_mknodat), + PROT_SYSCALL_TBL_ENTRY(sys_fchownat), + PROT_SYSCALL_TBL_ENTRY(sys_unlinkat), + PROT_SYSCALL_TBL_ENTRY(sys_renameat), /* 340 */ + PROT_SYSCALL_TBL_ENTRY(sys_linkat), + PROT_SYSCALL_TBL_ENTRY(sys_symlinkat), + PROT_SYSCALL_TBL_ENTRY(sys_readlinkat), + PROT_SYSCALL_TBL_ENTRY(sys_fchmodat), + PROT_SYSCALL_TBL_ENTRY(sys_faccessat), + PROT_SYSCALL_TBL_ENTRY(protected_sys_epoll_pwait), + PROT_SYSCALL_TBL_ENTRY(sys_signalfd4), + PROT_SYSCALL_TBL_ENTRY(sys_eventfd2), + PROT_SYSCALL_TBL_ENTRY(protected_sys_recvmmsg), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 350 */ +#ifdef CONFIG_TIMERFD + PROT_SYSCALL_TBL_ENTRY(sys_timerfd_create), + PROT_SYSCALL_TBL_ENTRY(sys_timerfd_settime), + PROT_SYSCALL_TBL_ENTRY(sys_timerfd_gettime), +#else + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), +#endif + PROT_SYSCALL_TBL_ENTRY(protected_sys_preadv), + PROT_SYSCALL_TBL_ENTRY(protected_sys_pwritev), + PROT_SYSCALL_TBL_ENTRY(sys_fallocate), + PROT_SYSCALL_TBL_ENTRY(sys_sync_file_range), + PROT_SYSCALL_TBL_ENTRY(sys_dup3), + PROT_SYSCALL_TBL_ENTRY(sys_inotify_init1), + PROT_SYSCALL_TBL_ENTRY(sys_epoll_create1),/* 360 */ + PROT_SYSCALL_TBL_ENTRY(sys_fstatat64), + PROT_SYSCALL_TBL_ENTRY(sys_futimesat), + PROT_SYSCALL_TBL_ENTRY(sys_perf_event_open), + PROT_SYSCALL_TBL_ENTRY(sys_unshare), + PROT_SYSCALL_TBL_ENTRY(protected_sys_get_robust_list), /* 365 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_set_robust_list), + PROT_SYSCALL_TBL_ENTRY(protected_sys_pselect6), + PROT_SYSCALL_TBL_ENTRY(sys_ppoll), + PROT_SYSCALL_TBL_ENTRY(protected_sys_setcontext), + PROT_SYSCALL_TBL_ENTRY(protected_sys_makecontext), /* 370 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_swapcontext), + PROT_SYSCALL_TBL_ENTRY(protected_sys_freecontext), + PROT_SYSCALL_TBL_ENTRY(sys_fanotify_init), + PROT_SYSCALL_TBL_ENTRY(sys_fanotify_mark), + PROT_SYSCALL_TBL_ENTRY(e2k_sys_prlimit64), + PROT_SYSCALL_TBL_ENTRY(sys_clock_adjtime), + PROT_SYSCALL_TBL_ENTRY(sys_syncfs), + PROT_SYSCALL_TBL_ENTRY(protected_sys_sendmmsg), + PROT_SYSCALL_TBL_ENTRY(sys_setns), + PROT_SYSCALL_TBL_ENTRY(protected_sys_process_vm_readv), /* 380 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_process_vm_writev), + PROT_SYSCALL_TBL_ENTRY(sys_kcmp), + PROT_SYSCALL_TBL_ENTRY(sys_finit_module), + /* added in linux-4.4 */ + PROT_SYSCALL_TBL_ENTRY(sys_renameat2), + PROT_SYSCALL_TBL_ENTRY(sys_getrandom), + PROT_SYSCALL_TBL_ENTRY(sys_memfd_create), + PROT_SYSCALL_TBL_ENTRY(protected_sys_bpf), + PROT_SYSCALL_TBL_ENTRY(protected_sys_execveat), + PROT_SYSCALL_TBL_ENTRY(sys_userfaultfd), + PROT_SYSCALL_TBL_ENTRY(sys_membarrier), /* 390 */ + PROT_SYSCALL_TBL_ENTRY(sys_mlock2), + /* added in linux-4.9 */ + PROT_SYSCALL_TBL_ENTRY(sys_seccomp), + PROT_SYSCALL_TBL_ENTRY(sys_shutdown), + PROT_SYSCALL_TBL_ENTRY(sys_copy_file_range), + PROT_SYSCALL_TBL_ENTRY(protected_sys_preadv2), + PROT_SYSCALL_TBL_ENTRY(protected_sys_pwritev2), + + /* free (unused) items */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), + + PROT_SYSCALL_TBL_ENTRY(sys_name_to_handle_at), /* 400 */ + PROT_SYSCALL_TBL_ENTRY(sys_open_by_handle_at), /* 401 */ + PROT_SYSCALL_TBL_ENTRY(sys_statx), /* 402 */ + /* added for compatibility with x86_64 */ + PROT_SYSCALL_TBL_ENTRY(sys_socket), /* 403 */ + PROT_SYSCALL_TBL_ENTRY(sys_connect), /* 404 */ + PROT_SYSCALL_TBL_ENTRY(sys_accept), /* 405 */ + PROT_SYSCALL_TBL_ENTRY(sys_sendto), /* 406 */ + PROT_SYSCALL_TBL_ENTRY(sys_recvfrom), /* 407 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_sendmsg), /* 408 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_recvmsg), /* 409 */ + PROT_SYSCALL_TBL_ENTRY(sys_bind), /* 410 */ + PROT_SYSCALL_TBL_ENTRY(sys_listen), /* 411 */ + PROT_SYSCALL_TBL_ENTRY(sys_getsockname), /* 412 */ + PROT_SYSCALL_TBL_ENTRY(sys_getpeername), /* 413 */ + PROT_SYSCALL_TBL_ENTRY(sys_socketpair), /* 414 */ + PROT_SYSCALL_TBL_ENTRY(sys_setsockopt), /* 415 */ + PROT_SYSCALL_TBL_ENTRY(sys_getsockopt), /* 416 */ + + /* free (unused) items */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 417 */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 418 */ + + /* protected specific system calls entries */ + PROT_SYSCALL_TBL_ENTRY(sys_arch_prctl), /* 419 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_uselib), /* 420 __NR_newuselib */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_rt_sigaction_ex), /* 421 */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 422 __NR_get_mem */ + PROT_SYSCALL_TBL_ENTRY(sys_ni_syscall), /* 423 __NR_free_mem */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_clean_descriptors), /* 424 */ + PROT_SYSCALL_TBL_ENTRY(protected_sys_unuselib), /* 425 */ + + PROT_SYSCALL_TBL_ENTRY(sys_clone3), + PROT_SYSCALL_TBL_ENTRY(sys_fsopen), + PROT_SYSCALL_TBL_ENTRY(sys_fsconfig), + PROT_SYSCALL_TBL_ENTRY(sys_fsmount), + PROT_SYSCALL_TBL_ENTRY(sys_fspick), /* 430 */ + + /* 430 last System call */ +}; /* sys_call_table_entry8 */ + +/* + * Following is table of masks for pre-processing system call parameters + * in the function ttable_entry*_C (ttable.c). + * Format: + * , / * syscall # * / size1,..,size6 + * NB> Mask is hexadecimal expression of the bitmask binary value. + * NB> Bits in the bitmask (four bits per argument) get coded right to left + * starting with the bit #4 so that: + * - bit #0 is SIZE_ADJUSTMENT bit (see below); + * - bits #1-3 unused for the moment; + * - bits #4-7 define type of system call argument #1; + * - bits #8-11 define type of system call argument #2; + * - bits #12-15 define type of system call argument #3; + * and so forth thru arg #6; + * - bits #28-32 - unused for the moment; + * - arg type codes (see the legend below) are: + * 0(L) / 1(P) / 2(?) / 3(S) / 4(I) / 5(F) / f(X); + * NB> Legend describes type of signal call arguments; left-to-right; + * starting with argument #1: + * 'L' - is for 'long' - this argument gets passed as-is + * to system call handler function; + * 'P' - is for 'pointer' - this argument would be pre-processed in + * ttable_entry8_C to convert 'long' pointer descriptor used in + * the protected mode into the 'short' one used by kernel; + * '?' - may be either 'pointer' or 'long' depending on other arguments; + * 'S' - is for string descriptor; + * 'i' - is for 'int'; + * 'F' - pointer to function (function label); + * 'X' - agrument doesn't exist. + * For example: LSLP legend is coded a system call like: + * syscall( long, , long, ). + * NB> Size(i) specifies minimum required size for syscall argument (i). + * NB> Negative size means the actual value to be taken from the corresponding + * syscall argument. For example, size2 value '-3' means the minimum size + * for syscall argument #2 is provided thru argument #3 of the system call. + * If the actual value appears greater that the size of the corresponding + * descriptor argument, and SIZE_ADJUSTMENT bit is set to'1', + * then the actual size is set to the size of the descriptor. + */ +const struct syscall_attrs sys_protcall_args[NR_syscalls] = { + { 0x0, /* restart_syscall 0 */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* exit 1 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* fork 2 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0141, /* read 3 iPL */ + 0, -3, 0, 0, 0, 0 }, + { 0xfff0141, /* write 4 iPL */ + 0, -3, 0, 0, 0, 0 }, + { 0xfff4430, /* open 5 Sii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* close 6 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* waitpid 7 iPi */ + 0, 4, 0, 0, 0, 0 }, + { 0xffff430, /* creat 8 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff330, /* link 9 SS */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff30, /* unlink 10 SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1130, /* execve 11 SPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff30, /* chdir 12 SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* time 13 PX */ + 8, 0, 0, 0, 0, 0 }, + { 0xfff0430, /* mknod 14 SiL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff430, /* chmod 15 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0030, /* lchown 16 SLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* break 17 XX ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff130, /* oldstat 18 SP ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4040, /* lseek 19 iLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getpid 20 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xf103330, /* mount 21 SSSLP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff30, /* umount 22 SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setuid 23 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getuid 24 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* stime 25 PX */ + 0, 0, 0, 0, 0, 0 }, + { 0xff11000, /* ptrace 26 LLPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* alarm 27 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff100, /* oldfstat 28 LP ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* pause 29 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff130, /* utime 30 SP */ + 0, 16, 0, 0, 0, 0 }, + { 0x0, /* stty 31 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* gtty 32 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff430, /* access 33 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* nice 34 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* ftime 35 PX ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* sync 36 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* kill 37 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff330, /* rename 38 SS */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff430, /* mkdir 39 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff30, /* rmdir 40 SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* dup 41 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* pipe 42 PX */ + 8, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* times 43 PX */ + 32, 0, 0, 0, 0, 0 }, + { 0x0, /* prof 44 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* brk 45 ?X ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setgid 46 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getgid 47 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff100, /* signal 48 LP ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* geteuid 49 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getegid 50 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff30, /* acct 51 SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff430, /* umount2 52 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* lock 53 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff2040, /* ioctl 54 iL? */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff2440, /* fcntl 55 ii? */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* mpx 56 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* setpgid 57 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff000, /* ulimit 58 LL ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* oldolduname 59 PX ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* umask 60 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff30, /* chroot 61 SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* ustat 62 iP */ + 0, 32, 0, 0, 0, 0 }, + { 0xffff440, /* dup2 63 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getppid 64 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getpgrp 65 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* setsid 66 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1100, /* sigaction 67 LPP ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* sgetmask 68 XX ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff00, /* ssetmask 69 LX ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* setreuid 70 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* setregid 71 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* sigsuspend 72 PX ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* sigpending 73 PX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff430, /* sethostname 74 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* setrlimit 75 iP */ + 0, 16, 0, 0, 0, 0 }, + { 0xffff140, /* getrlimit 76 iP */ + 0, 16, 0, 0, 0, 0 }, + { 0xffff140, /* getrusage 77 iP */ + 0, 144, 0, 0, 0, 0 }, + { 0xffff110, /* gettimeofday 78 PP */ + 16, 8, 0, 0, 0, 0 }, + { 0xffff110, /* settimeofday 79 PP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* getgroups 80 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* setgroups 81 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xf111140, /* select 82 iPPPP */ + 0, 128, 128, 128, 16, 0 }, + { 0xffff330, /* symlink 83 SS */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* oldlstat 84 ni_syscall */ + 0, 88, 0, 0, 0, 0 }, + { 0xfff4131, /* readlink 85 SPi */ + 0, -3, 0, 0, 0, 0 }, + { 0xffff130, /* uselib 86 SP */ + 0, 96, 0, 0, 0, 0 }, + { 0xffff430, /* swapon 87 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14440, /* reboot 88 iiiP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* readdir 89 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0x0000020, /* mmap 90 ?LLLLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff010, /* munmap 91 PL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff030, /* truncate 92 SL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff040, /* ftruncate 93 iL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* fchmod 94 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* fchown 95 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* getpriority 96 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* setpriority 97 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xff00010, /* profil 98 PLLL ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff130, /* statfs 99 SP */ + 0, 120, 0, 0, 0, 0 }, + { 0xffff140, /* fstatfs 100 iP */ + 0, 120, 0, 0, 0, 0 }, + { 0xfff4000, /* ioperm 101 LLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* socketcall 102 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4141, /* syslog 103 iPi */ + 0, -3, 0, 0, 0, 0 }, + { 0xfff1140, /* setitimer 104 iPP */ + 0, 32, 32, 0, 0, 0 }, + { 0xffff140, /* getitimer 105 iP */ + 0, 32, 0, 0, 0, 0 }, + { 0xffff130, /* stat 106 SP */ + 0, 112, 0, 0, 0, 0 }, + { 0xffff130, /* lstat 107 SP */ + 0, 112, 0, 0, 0, 0 }, + { 0xffff140, /* fstat 108 iP */ + 0, 112, 0, 0, 0, 0 }, + { 0xfffff10, /* olduname 109 PX */ + 325, 0, 0, 0, 0, 0 }, + { 0xfffff00, /* iopl 110 LX ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* vhangup 111 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* idle 112 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* vm86old 113 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14140, /* wait4 114 iPiP */ + 0, 4, 0, 144, 0, 0 }, + { 0xfffff30, /* swapoff 115 SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* sysinfo 116 PX */ + 112, 0, 0, 0, 0, 0 }, + { 0x0120440, /* ipc 117 iiL?PL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* fsync 118 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* sigreturn 119 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xf211100, /* clone 120 LPPP? */ + 0, 0, 0, 0, 4, 0 }, + { 0xffff411, /* setdomainname 121 Pi */ + -2, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* uname 122 PX */ + 390, 0, 0, 0, 0, 0 }, + { 0xfff0100, /* modify_ldt 123 LPL ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* adjtimex 124 PX */ + 216, 0, 0, 0, 0, 0 }, + { 0xfff0011, /* mprotect 125 PLL */ + -2, 0, 0, 0, 0, 0 }, + { 0xfff1140, /* sigprocmask 126 iPP */ + 0, 8, 8, 0, 0, 0 }, + { 0xffff030, /* create_module 127 SL ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff3010, /* init_module 128 PLS */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff430, /* delete_module 129 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* get_kernel_syms 130 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14340, /* quotactl 131 iSiP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* getpgid 132 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* fchdir 133 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff040, /* bdflush 134 iL [Obsolete] */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1240, /* sysfs 135 i?P */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* personality 136 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* afs_syscall 137 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setfsuid 138 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setfsgid 139 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xf410040, /* _llseek 140 iLLPi */ + 0, 0, 0, 8, 0, 0 }, + { 0xfff4141, /* getdents 141 iPi */ + 0, -3, 0, 0, 0, 0 }, + { 0xf111140, /* _newselect 142 iPPPP */ + 0, 128, 128, 128, 16, 0 }, + { 0xffff440, /* flock 143 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4010, /* msync 144 PLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0100, /* readv 145 LPL */ + 0, 32, 0, 0, 0, 0 }, + { 0xfff0100, /* writev 146 LPL */ + 0, 32, 0, 0, 0, 0 }, + { 0xfffff40, /* getsid 147 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* fdatasync 148 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* _sysctl 149 PX */ + 128, 0, 0, 0, 0, 0 }, + { 0xffff020, /* mlock 150 ?L */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff020, /* munlock 151 ?L */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* mlockall 152 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* munlockall 153 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* sched_setparam 154 iP */ + 0, 4, 0, 0, 0, 0 }, + { 0xffff140, /* sched_getparam 155 iP */ + 0, 4, 0, 0, 0, 0 }, + { 0xfff1440, /* sched_setscheduler 156 iiP */ + 0, 0, 4, 0, 0, 0 }, + { 0xfffff40, /* sched_getscheduler 157 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* sched_yield 158 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* sched_get_priority_max 159 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* sched_get_priority_min 160 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* sched_rr_get_interval 161 iP */ + 0, 16, 0, 0, 0, 0 }, + { 0xffff110, /* nanosleep 162 PP */ + 16, 16, 0, 0, 0, 0 }, + { 0xf200010, /* mremap 163 PLLL? */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* setresuid 164 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1110, /* getresuid 165 PPP */ + 4, 4, 4, 0, 0, 0 }, + { 0x0, /* vm86 166 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xf101010, /* query_module 167 PLPLP ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4410, /* poll 168 Pii */ + 8, 0, 0, 0, 0, 0 }, + { 0xfff1100, /* nfsservctl 169 LPP ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* setresgid 170 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1110, /* getresgid 171 PPP */ + 4, 4, 4, 0, 0, 0 }, + { 0xf022240, /* prctl 172 i???L */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* rt_sigreturn 173 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xff01140, /* rt_sigaction 174 iPPL */ + 0, 32, 32, 0, 0, 0 }, + { 0xff01140, /* rt_sigprocmask 175 iPPL */ + 0, 8, 8, 0, 0, 0 }, + { 0xffff010, /* rt_sigpending 176 PL */ + -2, 0, 0, 0, 0, 0 }, + { 0xff01110, /* rt_sigtimedwait 177 PPPL */ + 8, 128, 16, 0, 0, 0 }, + { 0xfff1440, /* rt_sigqueueinfo 178 iiP */ + 0, 0, 128, 0, 0, 0 }, + { 0xffff010, /* rt_sigsuspend 179 PL */ + -2, 0, 0, 0, 0, 0 }, + { 0xff00141, /* pread 180 iPLL */ + 0, -3, 0, 0, 0, 0 }, + { 0xff00141, /* pwrite 181 iPLL */ + 0, -3, 0, 0, 0, 0 }, + { 0xfff4430, /* chown 182 Sii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff011, /* getcwd 183 PL */ + -2, 0, 0, 0, 0, 0 }, + { 0xffff110, /* capget 184 PP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff110, /* capset 185 PP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff110, /* sigaltstack 186 PP */ + 32, 32, 0, 0, 0, 0 }, + { 0xff01440, /* sendfile 187 iiPL */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* getpmsg 188 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* putpmsg 189 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* vfork 190 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* ugetrlimit 191 iP */ + 0, 16, 0, 0, 0, 0 }, + { 0x0444020, /* mmap2 192 ?LiiiL */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* truncate64 193 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* ftruncate64 194 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff130, /* stat64 195 SP */ + 0, 88, 0, 0, 0, 0 }, + { 0xffff130, /* lstat64 196 SP */ + 0, 88, 0, 0, 0, 0 }, + { 0xffff100, /* fstat64 197 LP */ + 0, 88, 0, 0, 0, 0 }, + { 0xfff4430, /* lchown32 198 Sii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getuid32 199 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getgid32 200 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* geteuid32 201 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* getegid32 202 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* setreuid32 203 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* setregid32 204 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xff41440, /* pidfd_send_signal 205 iiPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* pidfd_open 206 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* fchown32 207 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* setresuid32 208 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1110, /* getresuid32 209 PPP */ + 4, 4, 4, 0, 0, 0 }, + { 0xfff4440, /* setresgid32 210 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1110, /* getresgid32 211 PPP */ + 4, 4, 4, 0, 0, 0 }, + { 0xfff4430, /* chown32 212 Sii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setuid32 213 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setgid32 214 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setfsuid32 215 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* setfsgid32 216 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff330, /* pivot_root 217 SS */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1010, /* mincore 218 PLP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4010, /* madvise 219 PLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4141, /* getdents64 220 iPi */ + 0, -3, 0, 0, 0, 0 }, + { 0xfff2440, /* fcntl64 221 ii? */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* core 222 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* macctl 223 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff41340, /* newfstatat 224 iSPi */ + 0, 0, 88, 0, 0, 0 }, + { 0x0, /* emergency 225 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* e2k_sigsetjmp 226 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* e2k_longjmp 227 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xf000000, /* e2k_syswork 228 LLLLL */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* clone2 229 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff010, /* e2k_longjmp2 230 PL */ + 64, 0, 0, 0, 0, 0 }, + { 0x0, /* soft_debug 231 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xf401330, /* setxattr 232 SSPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf401330, /* lsetxattr 233 SSPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf401340, /* fsetxattr 234 iSPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff01330, /* getxattr 235 SSPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xff01330, /* lgetxattr 236 SSPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xff01340, /* fgetxattr 237 iSPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0330, /* listxattr 238 SSL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0330, /* llistxattr 239 SSL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0340, /* flistxattr 240 iSL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff330, /* removexattr 241 SS */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff330, /* lremovexattr 242 SS */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff340, /* fremovexattr 243 iS */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* gettid 244 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0040, /* readahead 245 iLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* tkill 246 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xff01440, /* sendfile64 247 iiPL */ + 0, 0, 0, 0, 0, 0 }, + { 0x4224410, /* futex 248 Pii??i */ + 4, 0, 0, 16, 4, 0 }, + { 0xfff1440, /* sched_setaffinity 249 iiP */ + 0, 0, -2, 0, 0, 0 }, + { 0xfff1440, /* sched_getaffinity 250 iiP */ + 0, 0, -2, 0, 0, 0 }, + { 0xffff410, /* pipe2 251 Pi */ + 8, 0, 0, 0, 0, 0 }, + { 0xff00010, /* set_backtrace 252 PLLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xff00010, /* get_backtrace 253 PLLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xf101100, /* access_hw_stacks 254 LPPLP */ + 0, 8, -4, 0, 8, 0 }, + { 0xff22240, /* el_posix 255 i??? */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* io_uring_setup 256 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0x0144440, /* io_uring_enter 257 iiiiPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xff41440, /* io_uring_register 258 iiPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* set_tid_address 259 PX */ + 4, 0, 0, 0, 0, 0 }, + { 0x0, /* el_binary 260 */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1140, /* timer_create 261 iPP */ + 0, 80, 4, 0, 0, 0 }, + { 0xff11440, /* timer_settime 262 iiPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* timer_gettime 263 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* timer_getoverrun 264 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* timer_delete 265 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* clock_settime 266 iP */ + 0, 16, 0, 0, 0, 0 }, + { 0xffff140, /* clock_gettime 267 iP */ + 0, 16, 0, 0, 0, 0 }, + { 0xffff140, /* clock_getres 268 iP */ + 0, 16, 0, 0, 0, 0 }, + { 0xff11440, /* clock_nanosleep 269 iiPP */ + 0, 0, 16, 16, 0, 0 }, + { 0xffff440, /* msgget 270 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1440, /* msgctl 271 iiP */ + 0, 0, 32, 0, 0, 0 }, + { 0xf400140, /* msgrcv 272 iPLLi */ + 0, 8, 0, 0, 0, 0 }, + { 0xff40140, /* msgsnd 273 iPLi */ + 0, 8, 0, 0, 0, 0 }, + { 0xfff4440, /* semget 274 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xff24440, /* semctl 275 iii? */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14140, /* semtimedop 276 iPiP */ + 0, 6, 0, 16, 0, 0 }, + { 0xfff0140, /* semop 277 iPL */ + 0, 6, 0, 0, 0, 0 }, + { 0xfff4040, /* shmget 278 iLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1440, /* shmctl 279 iiP */ + 0, 0, 40, 0, 0, 0 }, + { 0xfff4240, /* shmat 280 i?i */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* shmdt 281 P */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4340, /* open_tree 282 iSi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf434340, /* move_mount 283 iSiSi */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* reserved 284 */ 0, 0, 0, 0, 0, 0 }, + { 0x0, /* reserved 285 */ 0, 0, 0, 0, 0, 0 }, + + { 0xff41140, /* accept4 286 iPPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* sched_setattr 287 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff44140, /* sched_getattr 288 iPii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* ioprio_set 289 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* ioprio_get 290 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffffff0, /* inotify_init 291 XX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* inotify_add_watch 292 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* inotify_rm_watch 293 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* io_setup 294 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff00, /* io_destroy 295 LX */ + 0, 0, 0, 0, 0, 0 }, + { 0xf110000, /* io_getevents 296 LLLPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1000, /* io_submit 297 LLP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1100, /* io_cancel 298 LPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xff40040, /* fadvise64 299 iLLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* exit_group 300 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0100, /* lookup_dcookie 301 LPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* epoll_create 302 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14440, /* epoll_ctl 303 iiiP */ + 0, 0, 0, 0, 0, 0 }, + { 0xff44140, /* epoll_wait 304 iPii */ + 0, 0, 0, 0, 0, 0 }, + { 0xf000010, /* remap_file_pages 305 PLLLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1030, /* statfs64 306 SLP */ + 0, 120, 0, 0, 0, 0 }, + { 0xfff1040, /* fstatfs64 307 iLP */ + 0, 120, 0, 0, 0, 0 }, + { 0xfff4440, /* tgkill 308 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff130, /* utimes 309 SP */ + 0, 0, 0, 0, 0, 0 }, + { 0xff40040, /* fadvise64_64 310 iLLi */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* vserver 311 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x4010010, /* mbind 312 PLLPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf000110, /* get_mempolicy 313 PPLLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0140, /* set_mempolicy 314 iPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14430, /* mq_open 315 SiiP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff30, /* mq_unlink (__NR_mq_open+1) SX */ + 0, 0, 0, 0, 0, 0 }, + { 0xf140140, /* mq_timedsend (__NR_mq_open+2) iPLiP */ + 0, 0, 0, 0, 0, 0 }, + { 0xf110140, /* mq_timedreceive (__NR_mq_open+3) iPLPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* mq_notify (__NR_mq_open+4) iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1140, /* mq_getsetattr (__NR_mq_open+5) iPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xff01000, /* kexec_load 321 LLPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xf141440, /* waitid 322 iiPiP */ + 0, 0, 128, 0, 144, 0 }, + { 0xf401110, /* add_key 323 PPPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff41110, /* request_key 324 PPPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf222240, /* keyctl 325 i???? */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* mcst_rt 326 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1110, /* getcpu 327 PPP */ + 0, 0, 0, 0, 0, 0 }, + { 0x4111000, /* move_pages 328 LLPPPi */ + 0, 0, 0, 0, 0, 0 }, + { 0x4014140, /* splice 329 iPiPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff40140, /* vmsplice 330 iPLi */ + 0, 24, 0, 0, 0, 0 }, + { 0xff40440, /* tee 331 iiLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff11040, /* migrate_pages 332 iLPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xff41140, /* utimensat 333 iPPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14440, /* rt_tgsigqueueinfo 334 iiiP */ + 0, 0, 0, 128, 0, 0 }, + { 0xff44340, /* openat 335 iSii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4340, /* mkdirat 336 iSi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff44340, /* mknodat 337 iSii */ + 0, 0, 0, 0, 0, 0 }, + { 0xf444340, /* fchownat 338 iSiii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4340, /* unlinkat 339 iSi */ + 0, 0, 0, 0, 0, 0 }, + { 0xff34340, /* renameat 340 iSiS */ + 0, 0, 0, 0, 0, 0 }, + { 0xf434340, /* linkat 341 iSiSi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff3430, /* symlinkat 342 SiS */ + 0, 0, 0, 0, 0, 0 }, + { 0xff41341, /* readlinkat 343 iSPi */ + 0, 0, -4, 0, 0, 0 }, + { 0xfff4340, /* fchmodat 344 iSi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4340, /* faccessat 345 iSi */ + 0, 0, 0, 0, 0, 0 }, + { 0x0144140, /* epoll_pwait 346 iPiiPL */ + 0, 0, 0, 0, 0, 0 }, + { 0xff40140, /* signalfd4 347 iPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* eventfd2 348 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xf144140, /* recvmmsg 349 iPiiP */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* cnt_point 350 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* timerfd_create 351 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xff11440, /* timerfd_settime 352 iiPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff140, /* timerfd_gettime 353 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xf000100, /* preadv 354 LPLLL */ + 0, 32, 0, 0, 0, 0 }, + { 0xf000100, /* pwritev 355 LPLLL */ + 0, 32, 0, 0, 0, 0 }, + { 0xff00440, /* fallocate 356 iiLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xff40040, /* sync_file_range 357 iLLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* dup3 358 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* inotify_init1 359 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* epoll_create1 360 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xff41340, /* fstatat64 361 iSPi */ + 0, 0, 88, 0, 0, 0 }, + { 0xfff1340, /* futimesat 362 iSP */ + 0, 0, 32, 0, 0, 0 }, + { 0xf044410, /* perf_event_open 363 PiiiL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff00, /* unshare 364 LX */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1140, /* get_robust_list 365 iPP */ + 0, 16, 8, 0, 0, 0 }, + { 0xffff010, /* set_robust_list 366 PL */ + 0x30, 0, 0, 0, 0, 0 }, + { 0x1111140, /* pselect6 367 iPPPPP */ + 0, 128, 128, 128, 16, 16 }, + { 0xf011410, /* ppoll 368 PiPPL */ + 8, 0, 16, 8, 0, 0 }, + { 0xffff410, /* setcontext 369 Pi */ + 332, 0, 0, 0, 0, 0 }, + { 0xf410510, /* makecontext 370 PFLPi */ + 332, 0, 0, 0, 0, 0 }, + { 0xfff4110, /* swapcontext 371 PPi */ + 332, 332, 0, 0, 0, 0 }, + { 0xfffff10, /* freecontext 372 PX */ + 332, 0, 0, 0, 0, 0 }, + { 0xffff440, /* fanotify_init 373 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xf340440, /* fanotify_mark 374 iiLiS */ + 0, 0, 0, 0, 0, 0 }, + { 0xff11440, /* prlimit64 375 iiPP */ + 0, 0, 16, 16, 0, 0 }, + { 0xffff140, /*clock_adjtime 376 iP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* syncfs 377 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xff44140, /* sendmmsg 378 iPii */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* setns 379 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0x0010140, /* process_vm_readv 380 iPLPLL */ + 0, 32, 0, 32, 0, 0 }, + { 0x0010140, /* process_vm_writev 381 iPLPLL */ + 0, 32, 0, 32, 0, 0 }, + { 0xf004440, /* kcmp 382 iiiLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4340, /* finit_module 383 iSi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf434340, /* renameat2 384 iSiSi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4010, /* getrandom 385 PLi */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff410, /* memfd_create 386 Pi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* bpf 387 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf411340, /* execveat 388 iSPPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff40, /* userfaultfd 389 iX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* membarrier 390 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4020, /* mlock2 391 ?Li */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1440, /* seccomp 392 iiP */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* shutdown 393 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0x4014140, /* copy_file_range 394 iPiPLi */ + 0, 0, 0, 0, 0, 0 }, + { 0x0000100, /* preadv2 395 LPLLLL */ + 0, 32, 0, 0, 0, 0 }, + { 0x0000100, /* pwritev2 396 LPLLLL */ + 0, 32, 0, 0, 0, 0 }, + { 0x0, /* reserved 397 */ 0, 0, 0, 0, 0, 0 }, + { 0x0, /* reserved 398 */ 0, 0, 0, 0, 0, 0 }, + { 0x0, /* reserved 399 */ 0, 0, 0, 0, 0, 0 }, + { 0xf411340, /* name_to_handle_at 400 iSPPi */ + 0, 0, 8, 0, 0, 0 }, + { 0xfff4140, /* open_by_handle_at 401 iPi */ + 0, 8, 0, 0, 0, 0 }, + { 0xf144340, /* statx 402 iSiiP */ + 0, 0, 0, 0, 256, 0 }, + { 0xfff4440, /* socket 403 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* connect 404 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1140, /* accept 405 iPP */ + 0, 0, 0, 0, 0, 0 }, + { 0x4140140, /* sendto 406 iPLiPi */ + 0, 0, 0, 0, 0, 0 }, + { 0x1140140, /* recvfrom 407 iPLiPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* sendmsg 408 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* recvmsg 409 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4140, /* bind 410 iPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff440, /* listen 411 ii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1140, /* getsockname 412 iPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff1140, /* getpeername 413 iPP */ + 0, 0, 0, 0, 0, 0 }, + { 0xff14440, /* socketpair 414 iiiP */ + 0, 0, 0, 0, 0, 0 }, + { 0xf414440, /* setsockopt 415 iiiPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xf114440, /* getsockopt 416 iiiPP */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* reserved 417 */ 0, 0, 0, 0, 0, 0 }, + { 0x0, /* reserved 418 */ 0, 0, 0, 0, 0, 0 }, + + { 0xf022240, /* arch_prctl 419 i???L */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff130, /* newuselib 420 SP */ + 0, 32, 0, 0, 0, 0 }, + { 0xff01140, /* rt_sigaction_ex 421 iPPL */ + 0, 56, 56, 0, 0, 0 }, + { 0x0, /* get_mem 422 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0x0, /* free_mem 423 ni_syscall */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff0010, /* clean_descriptors 424 PLL */ + 0, 0, 0, 0, 0, 0 }, + { 0xfffff10, /* unuselib 425 PX */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff010, /* clone3 426 PL */ + 0, 0, 0, 0, 0, 0 }, + { 0xffff430, /* fsopen 427 Si */ + 0, 0, 0, 0, 0, 0 }, + { 0xf413440, /* fsconfig 428 iiSPi */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4440, /* fsmount 429 iii */ + 0, 0, 0, 0, 0, 0 }, + { 0xfff4340, /* fspick 430 iSi */ + 0, 0, 0, 0, 0, 0 } +}; + +/* + * System call name table: + */ +const char *sys_call_ID_to_name[NR_syscalls] = { + "restart_syscall", /* 0 */ + "exit", + "fork", + "read", + "write", + "open", /* 5 */ + "close", + "waitpid", + "creat", + "link", + "unlink", /* 10 */ + "execve", + "chdir", + "time", + "mknod", + "chmod", /* 15 */ + "lchown", + "ni_syscall", /* old break syscall holder */ + + "ni_syscall", /* old sys_stat() */ + "lseek", + "getpid", /* 20 */ + "mount", + "oldumount", + "setuid", + "getuid", + "stime", /* 25 */ + "ptrace", + "alarm", + "ni_syscall", /* old sys_fstat() */ + "pause", + "utime", /* 30 */ + "ni_syscall", /* old stty syscall holder */ + "ni_syscall", /* old gtty syscall holder */ + "access", + "nice", + "ni_syscall", /* 35, old ftime syscall */ + "sync", + "kill", + "rename", + "mkdir", + "rmdir", /* 40 */ + "dup", + "pipe", + "times", + "ni_syscall", /* old prof syscall holder */ + "brk", /* 45 */ + "setgid", + "getgid", + "ni_syscall", /* signal() have to be emulated by rt_sigaction() */ + "geteuid", + "getegid", /* 50 */ + "acct", + "umount", /* recycled never used phys() */ + "ni_syscall", /* old lock syscall holder */ + "ioctl", + + "fcntl", /* 55 */ + + "ni_syscall", /* old mpx syscall holder */ + "setpgid", + "ni_syscall", /* old ulimit syscall holder */ + "ni_syscall", /* oldolduname */ + "umask", /* 60 */ + "chroot", + "ustat", + "dup2", + "getppid", + "getpgrp", /* 65 */ + "setsid", + "ni_syscall", /* no sys_sigaction() */ + "sgetmask", + "ssetmask", + "setreuid", /* 70 */ + "setregid", + "ni_syscall", + "sigpending", + "sethostname", + "setrlimit", /* 75 */ + "old_getrlimit", + "getrusage", + "gettimeofday", + "settimeofday", + "getgroups", /* 80 */ + "setgroups", + "select", + "symlink", + "ni_syscall", /* old sys_lstat() */ + "readlink", /* 85 */ + "olduselib", /* obsolete syscall */ + "swapon", + "reboot", + "old_readdir", + "mmap", /* 90 */ + "munmap", + + "truncate", + "ftruncate", + + "fchmod", + "fchown", /* 95 */ + "getpriority", + "setpriority", + "ni_syscall", /* old profil syscall holder */ + "statfs", + "fstatfs", /* 100 */ + "ioperm", + "socketcall", + "syslog", + "setitimer", + "getitimer", /* 105 */ + + "newstat", /* in libc used in ptr64 mode */ + "newlstat", /* in libc used in ptr64 mode */ + "newfstat", /* in libc used in ptr64 mode */ + + "uname", + "ni_syscall", /* 110 */ + "vhangup", + "ni_syscall", /* old "idle" system call */ + "ni_syscall", + "wait4", + "swapoff", /* 115 */ + "sysinfo", + "ipc", + "fsync", + "ni_syscall", + "clone", /* 120 */ + "setdomainname", + "newuname", + "ni_syscall", + "adjtimex", + "mprotect", /* 125 */ + "sigprocmask", + "ni_syscall", + "init_module", + "delete_module", + "ni_syscall", /* 130 */ + "quotactl", + "getpgid", + "fchdir", + "bdflush", + "sysfs", /* 135 - obsolete */ + "personality", + "ni_syscall", /* for afs_syscall */ + "setfsuid", + "setfsgid", + "llseek", /* 140 */ + "getdents", + "select", + "flock", + "msync", + "readv", /* 145 */ + "writev", + "getsid", + "fdatasync", + "sysctl", + "mlock", /* 150 */ + "munlock", + "mlockall", + "munlockall", + "sched_setparam", + "sched_getparam", /* 155 */ + "sched_setscheduler", + "sched_getscheduler", + "sched_yield", + "sched_get_priority_max", + "sched_get_priority_min", /* 160 */ + "sched_rr_get_interval", + "nanosleep", + "mremap", + "setresuid", + "getresuid", /* 165 */ + "ni_syscall", + "ni_syscall", + "poll", + "ni_syscall", /* 169 sys_nfsservctl */ + "setresgid", /* 170 */ + "getresgid", + "prctl", + "ni_syscall", /* 173 sys_rt_sigreturn */ + "rt_sigaction", + "rt_sigprocmask", /* 175 */ + "rt_sigpending", + "rt_sigtimedwait", + "rt_sigqueueinfo", + "rt_sigsuspend", + "pread64", /* 180 */ + "pwrite64", + "chown", + "getcwd", + "capget", + "capset", /* 185 */ + "sigaltstack", + "sendfile64", + "ni_syscall", /* 188 sys_getpmsg */ + "ni_syscall", /* 189 sys_putpmsg */ + "vfork", /* 190 */ + "getrlimit", + "mmap2", + + /* Entries 193-194 are for BITS_PER_LONG == 32; and this is 64 bit OS */ + "ni_syscall", /* 193 sys_truncate64 */ + "ni_syscall", /* 194 sys_ftruncate64 */ + + "stat64", /* 195 */ + "lstat64", + "fstat64", + + "lchown", + "getuid", + "getgid", /* 200 */ + "geteuid", + "getegid", + "setreuid", + "setregid", + "pidfd_send_signal", /* 205 */ + "pidfd_open", + "fchown", + "setresuid", + "getresuid", + "setresgid", /* 210 */ + "getresgid", + "chown", + "setuid", + "setgid", + "setfsuid", /* 215 */ + "setfsgid", + "pivot_root", + "mincore", + "madvise", + "getdents64", /* 220 */ + "fcntl", /* 221 */ + "ni_syscall", + "ni_syscall", /* 223 */ + "newfstatat", + "ni_syscall", /* 225 */ + "ni_syscall", /*sys_e2k_setjmp in traptable*/ + "ni_syscall", /*sys_e2k_longjmp in traptable*/ + "e2k_syswork", + "ni_syscall", /* sys_clone_thread */ + "e2k_longjmp2", /* 230 */ + "ni_syscall", + "setxattr", + "lsetxattr", + "fsetxattr", + "getxattr", /* 235 */ + "lgetxattr", + "fgetxattr", + "listxattr", + "llistxattr", + "flistxattr", /* 240 */ + "removexattr", + "lremovexattr", + "fremovexattr", + "gettid", + "readahead", /* 245 */ + "tkill", + "sendfile64", +#if defined CONFIG_FUTEX + "futex", +#else + "ni_syscall", +#endif + "sched_setaffinity", + "sched_getaffinity", /* 250 */ + "pipe2", + "set_backtrace", + "get_backtrace", + "access_hw_stacks", + "el_posix", /* 255 */ + "io_uring_setup", + "io_uring_enter", + "io_uring_register", + "set_tid_address", +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + "el_binary", /* 260 */ +#else + "ni_syscall", /* 260 */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + "timer_create", + "timer_settime", + "timer_gettime", + "timer_getoverrun", + "timer_delete", /* 265 */ + "clock_settime", + "clock_gettime", + "clock_getres", + "clock_nanosleep", + "msgget", /* 270 */ + "msgctl", + "msgrcv", + "msgsnd", + "semget", + "semctl", /* 275 */ + "semtimedop", + "semop", + "shmget", + "shmctl", + "shmat", /* 280 */ + "shmdt", + "open_tree", + "move_mount", + "rseq", + "io_pgetevents", /* 285 */ + "accept4", + "sched_setattr", + "sched_getattr", + "ioprio_set", /* 289 */ + "ioprio_get", /* 290 */ + "inotify_init", /* 291 */ + "inotify_add_watch", + "inotify_rm_watch", + "io_setup", /* 294 */ + "io_destroy", + "io_getevents", + "io_submit", + "io_cancel", + "fadvise64", + "exit_group", /* 300 */ + "lookup_dcookie", + "epoll_create", + "epoll_ctl", + "epoll_wait", + "remap_file_pages", + "statfs64", + "fstatfs64", + "tgkill", + "utimes", + "fadvise64_64", /* 310 */ + + "ni_syscall", /* __NR_vserver - isn't implemented + * in the Linux 2.6.14 kernel + */ + "mbind", + "get_mempolicy", + "set_mempolicy", + "mq_open", + "mq_unlink", + "mq_timedsend", + "mq_timedreceive", + "mq_notify", + "mq_getsetattr", /* 320 */ + "kexec_load", + "waitid", + "add_key", + "request_key", + "keyctl", + "ni_syscall", /* sys_mcst_rt */ + "getcpu", + "move_pages", + "splice", + "vmsplice", /* 330 */ + "tee", + "migrate_pages", + "utimensat", + "rt_tgsigqueueinfo", + "openat", + "mkdirat", + "mknodat", + "fchownat", + "unlinkat", + "renameat", /* 340 */ + "linkat", + "symlinkat", + "readlinkat", + "fchmodat", + "faccessat", + "epoll_pwait", + "signalfd4", + "eventfd2", + "recvmmsg", + "ni_syscall", /* 350 */ +#ifdef CONFIG_TIMERFD + "timerfd_create", + "timerfd_settime", + "timerfd_gettime", +#else + "ni_syscall", + "ni_syscall", + "ni_syscall", +#endif + "preadv", + "pwritev", + "fallocate", + "sync_file_range", + "dup3", + "inotify_init1", + "epoll_create1", /* 360 */ + "fstatat64", + "futimesat", + "perf_event_open", + "unshare", + "get_robust_list", /* 365 */ + "set_robust_list", + "pselect6", + "ppoll", + "setcontext", + "makecontext", /* 370 */ + "swapcontext", + "freecontext", + "fanotify_init", + "fanotify_mark", + "prlimit64", + "clock_adjtime", + "syncfs", + "sendmmsg", + "setns", + "process_vm_readv", /* 380 */ + "process_vm_writev", + "kcmp", + "finit_module", + /* added in linux-4.4 */ + "renameat2", + "getrandom", /* 385 */ + "memfd_create", + "bpf", + "execveat", + "userfaultfd", + "membarrier", /* 390 */ + "mlock2", + /* added in linux-4.9 */ + "seccomp", + "shutdown", + "copy_file_range", + "preadv2", /* 395 */ + "pwritev2", + + /* free (unused) items */ + "ni_syscall", /* 397 */ + "ni_syscall", /* 398 */ + "ni_syscall", /* 399 */ + + "name_to_handle_at", /* 400 */ + "open_by_handle_at", /* 401 */ + "statx", /* 402 */ + /* added for compatibility with x86_64 */ + "socket", /* 403 */ + "connect", /* 404 */ + "accept", /* 405 */ + "sendto", /* 406 */ + "recvfrom", /* 407 */ + "sendmsg", /* 408 */ + "recvmsg", /* 409 */ + "bind", /* 410 */ + "listen", /* 411 */ + "getsockname", /* 412 */ + "getpeername", /* 413 */ + "socketpair", /* 414 */ + "setsockopt", /* 415 */ + "getsockopt", /* 416 */ + + /* free (unused) items */ + "ni_syscall", /* 417 */ + "ni_syscall", /* 418 */ + + /* protected specific system calls entries */ + "arch_prctl", /* 419 */ + "uselib", /* 420 __NR_newuselib */ + "rt_sigaction_ex", /* 421 */ + "ni_syscall", /* 422 __NR_get_mem */ + "ni_syscall", /* 423 __NR_free_mem */ + "clean_descriptors", /* 424 */ + "unuselib", /* 425 */ + + "clone3", + "fsopen", + "fsconfig", + "fsmount", + "fspick", /* 430 */ +}; + + +/* For the deprecated 4th syscall entry. + * Since this system call entry is deprecated we use + * sys_ni_syscall for all new entries from now on. */ +const system_call_func sys_call_table_deprecated[NR_syscalls] = +{ + SYSTEM_CALL_TBL_ENTRY(sys_restart_syscall), /* 0 */ + SYSTEM_CALL_TBL_ENTRY(sys_exit), + SYSTEM_CALL_TBL_ENTRY(sys_fork), + SYSTEM_CALL_TBL_ENTRY(sys_read), + SYSTEM_CALL_TBL_ENTRY(sys_write), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_open), /* 5 */ + SYSTEM_CALL_TBL_ENTRY(sys_close), + SYSTEM_CALL_TBL_ENTRY(sys_waitpid), + SYSTEM_CALL_TBL_ENTRY(sys_creat), + SYSTEM_CALL_TBL_ENTRY(sys_link), + SYSTEM_CALL_TBL_ENTRY(sys_unlink), /* 10 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(e2k_sys_execve), + SYSTEM_CALL_TBL_ENTRY(sys_chdir), + SYSTEM_CALL_TBL_ENTRY(sys_time32), + SYSTEM_CALL_TBL_ENTRY(sys_mknod), + SYSTEM_CALL_TBL_ENTRY(sys_chmod), /* 15 */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old break syscall holder */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_stat() */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_lseek), + SYSTEM_CALL_TBL_ENTRY(sys_getpid), /* 20 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mount), + SYSTEM_CALL_TBL_ENTRY(sys_oldumount), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_stime32), /* 25 */ + SYSTEM_CALL_TBL_ENTRY(sys_ptrace), + SYSTEM_CALL_TBL_ENTRY(sys_alarm), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_fstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_pause), + SYSTEM_CALL_TBL_ENTRY(sys_utime),/* 30 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old stty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old gtty syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_access), + SYSTEM_CALL_TBL_ENTRY(sys_nice), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 35, old ftime syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_sync), + SYSTEM_CALL_TBL_ENTRY(sys_kill), + SYSTEM_CALL_TBL_ENTRY(sys_rename), + SYSTEM_CALL_TBL_ENTRY(sys_mkdir), + SYSTEM_CALL_TBL_ENTRY(sys_rmdir), /* 40 */ + SYSTEM_CALL_TBL_ENTRY(sys_dup), + SYSTEM_CALL_TBL_ENTRY(sys_pipe), + SYSTEM_CALL_TBL_ENTRY(sys_times), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old prof syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_brk), /* 45 */ + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* signal() have to be */ + /* emulated by rt_sigaction() */ + /* on user level (GLIBC) */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), /* 50 */ + SYSTEM_CALL_TBL_ENTRY(sys_acct), + SYSTEM_CALL_TBL_ENTRY(sys_umount), /* recycled never used phys() */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old lock syscall holder */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_ioctl), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fcntl),/* 55 */ /* for 64 & 32 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old mpx syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_setpgid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old ulimit syscall holder */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_umask), /* 60 */ + SYSTEM_CALL_TBL_ENTRY(sys_chroot), + SYSTEM_CALL_TBL_ENTRY(sys_ustat), + SYSTEM_CALL_TBL_ENTRY(sys_dup2), + SYSTEM_CALL_TBL_ENTRY(sys_getppid), + SYSTEM_CALL_TBL_ENTRY(sys_getpgrp), /* 65 */ + SYSTEM_CALL_TBL_ENTRY(sys_setsid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* no sys_sigaction(), use */ + SYSTEM_CALL_TBL_ENTRY(sys_sgetmask), /* sys_rt_sigaction() instead */ + SYSTEM_CALL_TBL_ENTRY(sys_ssetmask), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), /* 70 */ + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sigpending), + SYSTEM_CALL_TBL_ENTRY(sys_sethostname), + SYSTEM_CALL_TBL_ENTRY(e2k_sys_setrlimit), /* 75 */ + SYSTEM_CALL_TBL_ENTRY(e2k_sys_getrlimit), + SYSTEM_CALL_TBL_ENTRY(sys_getrusage), + SYSTEM_CALL_TBL_ENTRY(sys_gettimeofday), + SYSTEM_CALL_TBL_ENTRY(sys_settimeofday), + SYSTEM_CALL_TBL_ENTRY(sys_getgroups), /* 80 */ + SYSTEM_CALL_TBL_ENTRY(sys_setgroups), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_symlink), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old sys_lstat() */ + SYSTEM_CALL_TBL_ENTRY(sys_readlink), /* 85 */ + SYSTEM_CALL_TBL_ENTRY(sys_uselib), + SYSTEM_CALL_TBL_ENTRY(sys_swapon), + SYSTEM_CALL_TBL_ENTRY(sys_reboot), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_old_readdir), + SYSTEM_CALL_TBL_ENTRY(sys_mmap), /* 90 */ + SYSTEM_CALL_TBL_ENTRY(sys_munmap), + + SYSTEM_CALL_TBL_ENTRY(sys_truncate), + SYSTEM_CALL_TBL_ENTRY(sys_ftruncate), + + SYSTEM_CALL_TBL_ENTRY(sys_fchmod), + SYSTEM_CALL_TBL_ENTRY(sys_fchown), /* 95 */ + SYSTEM_CALL_TBL_ENTRY(sys_getpriority), + SYSTEM_CALL_TBL_ENTRY(sys_setpriority), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old profil syscall holder */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_statfs), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fstatfs), /* 100 */ + SYSTEM_CALL_TBL_ENTRY(sys_ioperm), + SYSTEM_CALL_TBL_ENTRY(sys_socketcall), + SYSTEM_CALL_TBL_ENTRY(sys_syslog), + SYSTEM_CALL_TBL_ENTRY(sys_setitimer), + SYSTEM_CALL_TBL_ENTRY(sys_getitimer), /* 105 */ + + SYSTEM_CALL_TBL_ENTRY(sys_newstat), + SYSTEM_CALL_TBL_ENTRY(sys_newlstat), + SYSTEM_CALL_TBL_ENTRY(sys_newfstat), + + SYSTEM_CALL_TBL_ENTRY(sys_uname), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 110 */ + SYSTEM_CALL_TBL_ENTRY(sys_vhangup), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* old "idle" system call */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_wait4), + SYSTEM_CALL_TBL_ENTRY(sys_swapoff), /* 115 */ + SYSTEM_CALL_TBL_ENTRY(sys_sysinfo), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_ipc), + SYSTEM_CALL_TBL_ENTRY(sys_fsync), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_clone), /* 120 */ + SYSTEM_CALL_TBL_ENTRY(sys_setdomainname), + SYSTEM_CALL_TBL_ENTRY(sys_newuname), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_adjtimex_time32), + SYSTEM_CALL_TBL_ENTRY(sys_mprotect), /* 125 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sigprocmask), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_init_module), + SYSTEM_CALL_TBL_ENTRY(sys_delete_module), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 130 */ + SYSTEM_CALL_TBL_ENTRY(sys_quotactl), + SYSTEM_CALL_TBL_ENTRY(sys_getpgid), + SYSTEM_CALL_TBL_ENTRY(sys_fchdir), + SYSTEM_CALL_TBL_ENTRY(sys_bdflush), + SYSTEM_CALL_TBL_ENTRY(sys_sysfs), /* 135 */ + SYSTEM_CALL_TBL_ENTRY(sys_personality), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* for afs_syscall */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + SYSTEM_CALL_TBL_ENTRY(sys_llseek), /* 140 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_getdents), + SYSTEM_CALL_TBL_ENTRY(sys_select), + SYSTEM_CALL_TBL_ENTRY(sys_flock), + SYSTEM_CALL_TBL_ENTRY(sys_msync), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_readv), /* 145 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_writev), + SYSTEM_CALL_TBL_ENTRY(sys_getsid), + SYSTEM_CALL_TBL_ENTRY(sys_fdatasync), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sysctl), + SYSTEM_CALL_TBL_ENTRY(sys_mlock), /* 150 */ + SYSTEM_CALL_TBL_ENTRY(sys_munlock), + SYSTEM_CALL_TBL_ENTRY(sys_mlockall), + SYSTEM_CALL_TBL_ENTRY(sys_munlockall), + SYSTEM_CALL_TBL_ENTRY(sys_sched_setparam), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getparam), /* 155 */ + SYSTEM_CALL_TBL_ENTRY(sys_sched_setscheduler), + SYSTEM_CALL_TBL_ENTRY(sys_sched_getscheduler), + SYSTEM_CALL_TBL_ENTRY(sys_sched_yield), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_max), + SYSTEM_CALL_TBL_ENTRY(sys_sched_get_priority_min), /* 160 */ + SYSTEM_CALL_TBL_ENTRY(sys_sched_rr_get_interval_time32), + SYSTEM_CALL_TBL_ENTRY(sys_nanosleep), + SYSTEM_CALL_TBL_ENTRY(sys_mremap), + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), /* 165 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_poll), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* was sys_nfsservctl */ + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 170 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_prctl), + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /* sys_rt_sigreturn() */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_rt_sigaction), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigprocmask), /* 175 */ + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigpending), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigtimedwait_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_rt_sigqueueinfo), + SYSTEM_CALL_TBL_ENTRY(sys_rt_sigsuspend), + SYSTEM_CALL_TBL_ENTRY(sys32_pread64), /* 180 */ + SYSTEM_CALL_TBL_ENTRY(sys32_pwrite64), + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_getcwd), + SYSTEM_CALL_TBL_ENTRY(sys_capget), + SYSTEM_CALL_TBL_ENTRY(sys_capset), /* 185 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sigaltstack), + SYSTEM_CALL_TBL_ENTRY(sys_sendfile64), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams1 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* streams2 */ + SYSTEM_CALL_TBL_ENTRY(sys_vfork), /* 190 */ + SYSTEM_CALL_TBL_ENTRY(e2k_sys_getrlimit), + SYSTEM_CALL_TBL_ENTRY(sys_mmap2), + + SYSTEM_CALL_TBL_ENTRY(sys32_truncate64), + SYSTEM_CALL_TBL_ENTRY(sys32_ftruncate64), + SYSTEM_CALL_TBL_ENTRY(sys_stat64), /* 195 , in libc used in ptr32 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_lstat64), /* in libc used in ptr32 mode */ + SYSTEM_CALL_TBL_ENTRY(sys_fstat64), /* in libc used in ptr32 mode */ + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_lchown), + SYSTEM_CALL_TBL_ENTRY(sys_getuid), + SYSTEM_CALL_TBL_ENTRY(sys_getgid), /* 200 */ + SYSTEM_CALL_TBL_ENTRY(sys_geteuid), + SYSTEM_CALL_TBL_ENTRY(sys_getegid), + SYSTEM_CALL_TBL_ENTRY(sys_setreuid), + SYSTEM_CALL_TBL_ENTRY(sys_setregid), + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 205 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + + /* + * They are used for back compatibility + */ + SYSTEM_CALL_TBL_ENTRY(sys_fchown), + SYSTEM_CALL_TBL_ENTRY(sys_setresuid), + SYSTEM_CALL_TBL_ENTRY(sys_getresuid), + SYSTEM_CALL_TBL_ENTRY(sys_setresgid), /* 210 */ + SYSTEM_CALL_TBL_ENTRY(sys_getresgid), + SYSTEM_CALL_TBL_ENTRY(sys_chown), + SYSTEM_CALL_TBL_ENTRY(sys_setuid), + SYSTEM_CALL_TBL_ENTRY(sys_setgid), + SYSTEM_CALL_TBL_ENTRY(sys_setfsuid), /* 215 */ + SYSTEM_CALL_TBL_ENTRY(sys_setfsgid), + + SYSTEM_CALL_TBL_ENTRY(sys_pivot_root), + SYSTEM_CALL_TBL_ENTRY(sys_mincore), + SYSTEM_CALL_TBL_ENTRY(sys_madvise), + SYSTEM_CALL_TBL_ENTRY(sys_getdents64), /* 220 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fcntl), + /* + * 221 is sys_fcntl64 in fcntl.c + * if BITS_PER_LONG == 32 + * for some other archs + */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 223 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 225 */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_setjmp */ + SYSTEM_CALL_TBL_ENTRY(sys_deprecated), /*sys_e2k_longjmp*/ + SYSTEM_CALL_TBL_ENTRY(sys_e2k_syswork), + SYSTEM_CALL_TBL_ENTRY(sys_clone_thread), + SYSTEM_CALL_TBL_ENTRY(sys_e2k_longjmp2), /* 230 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_setxattr), + SYSTEM_CALL_TBL_ENTRY(sys_lsetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_fsetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_getxattr), /* 235 */ + SYSTEM_CALL_TBL_ENTRY(sys_lgetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_fgetxattr), + SYSTEM_CALL_TBL_ENTRY(sys_listxattr), + SYSTEM_CALL_TBL_ENTRY(sys_llistxattr), + SYSTEM_CALL_TBL_ENTRY(sys_flistxattr), /* 240 */ + SYSTEM_CALL_TBL_ENTRY(sys_removexattr), + SYSTEM_CALL_TBL_ENTRY(sys_lremovexattr), + SYSTEM_CALL_TBL_ENTRY(sys_fremovexattr), + SYSTEM_CALL_TBL_ENTRY(sys_gettid), + SYSTEM_CALL_TBL_ENTRY(sys32_readahead), /* 245 */ + SYSTEM_CALL_TBL_ENTRY(sys_tkill), + SYSTEM_CALL_TBL_ENTRY(sys_sendfile64), +#if defined CONFIG_FUTEX + SYSTEM_CALL_TBL_ENTRY(sys_futex_time32), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sched_setaffinity), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_sched_getaffinity), /* 250 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_el_posix), /* 255 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 256 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_set_tid_address), +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + SYSTEM_CALL_TBL_ENTRY(sys_el_binary), /* 260 */ +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 260 */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_timer_create), + SYSTEM_CALL_TBL_ENTRY(sys_timer_settime32), + SYSTEM_CALL_TBL_ENTRY(sys_timer_gettime32), + SYSTEM_CALL_TBL_ENTRY(sys_timer_getoverrun), + SYSTEM_CALL_TBL_ENTRY(sys_timer_delete), /* 265 */ + SYSTEM_CALL_TBL_ENTRY(sys_clock_settime32), + SYSTEM_CALL_TBL_ENTRY(sys_clock_gettime32), + SYSTEM_CALL_TBL_ENTRY(sys_clock_getres_time32), + SYSTEM_CALL_TBL_ENTRY(sys_clock_nanosleep_time32), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 270 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 275 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 280 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* 285 */ + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_set), /* 289 __NR_ioprio_set */ + SYSTEM_CALL_TBL_ENTRY(sys_ioprio_get), /* 290 __NR_ioprio_get */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_init),/* 291 __NR_inotify_init */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_add_watch), + /* 292 __NR_inotify_add_watch */ + SYSTEM_CALL_TBL_ENTRY(sys_inotify_rm_watch), + /* 293 __NR_inotify_rm_watch */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_io_setup), /* 294 */ + SYSTEM_CALL_TBL_ENTRY(sys_io_destroy), + SYSTEM_CALL_TBL_ENTRY(sys_io_getevents_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_io_submit), + SYSTEM_CALL_TBL_ENTRY(sys_io_cancel), + SYSTEM_CALL_TBL_ENTRY(sys32_fadvise64), + SYSTEM_CALL_TBL_ENTRY(sys_exit_group), /* 300 */ + SYSTEM_CALL_TBL_ENTRY(sys_lookup_dcookie), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_create), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_ctl), + SYSTEM_CALL_TBL_ENTRY(sys_epoll_wait), + SYSTEM_CALL_TBL_ENTRY(sys_remap_file_pages), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_statfs64), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_fstatfs64), + SYSTEM_CALL_TBL_ENTRY(sys_tgkill), + SYSTEM_CALL_TBL_ENTRY(sys_utimes_time32), + SYSTEM_CALL_TBL_ENTRY(sys32_fadvise64_64), /* 310 */ + + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), /* __NR_vserver */ + /*The system call isn't implemented in the Linux 2.6.14 + * kernel */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mbind), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_get_mempolicy), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_set_mempolicy), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mq_open), + SYSTEM_CALL_TBL_ENTRY(sys_mq_unlink), + SYSTEM_CALL_TBL_ENTRY(sys_mq_timedsend_time32), + SYSTEM_CALL_TBL_ENTRY(sys_mq_timedreceive_time32), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mq_notify), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_mq_getsetattr), /* 320 */ + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_kexec_load), + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_waitid), + SYSTEM_CALL_TBL_ENTRY(sys_add_key), + SYSTEM_CALL_TBL_ENTRY(sys_request_key), +#ifdef CONFIG_KEYS_COMPAT + COMPAT_SYSTEM_CALL_TBL_ENTRY(sys_keyctl), +#else + SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall), +#endif + /* This system call entry is deprecated so use + * sys_ni_syscall for all entries from now on. */ + [__NR_keyctl + 1 ... NR_syscalls - 1] = SYSTEM_CALL_TBL_ENTRY(sys_ni_syscall) +}; + diff --git a/arch/e2k/kernel/time.c b/arch/e2k/kernel/time.c new file mode 100644 index 000000000000..03a20b73309d --- /dev/null +++ b/arch/e2k/kernel/time.c @@ -0,0 +1,101 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_TIMER_MODE +#undef DebugTM +#define DEBUG_TIMER_MODE 0 /* timer and time */ +#define DebugTM(...) DebugPrint(DEBUG_TIMER_MODE ,##__VA_ARGS__) + +extern ktime_t tick_period; +u64 cpu_clock_psec; /* number of pikoseconds in one CPU clock */ +EXPORT_SYMBOL(cpu_clock_psec); + +extern struct clocksource clocksource_jiffies; +void __init arch_clock_setup(void) +{ + arch_clock_init(); +} + +extern struct machdep machine; + +#if defined(CONFIG_SMP) +unsigned long profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + + if (in_lock_functions(pc)) { + return get_nested_kernel_IP(regs, 1); + } + + return pc; +} +EXPORT_SYMBOL(profile_pc); +#endif + +static irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + global_clock_event->event_handler(global_clock_event); + + return IRQ_HANDLED; +} + +static struct irqaction irq0 = { + .handler = timer_interrupt, + .flags = IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, + .name = "timer" +}; + +void __init native_time_init(void) +{ + int ret; + DebugTM("entered\n"); + + /* Initialize external timer */ + setup_lt_timer(); + + ret = setup_irq(0, &irq0); + if (ret) { + printk("Could not setup IRQ #%02x as timer interrupt, error " + "%d\n", 0, ret); + return; + } + + DebugTM("time_init exited.\n"); +} + +#ifdef CONFIG_PARAVIRT +/* It need only to account stolen time by guest */ +/* native kernel and host has not stolen time */ + +struct static_key paravirt_steal_enabled = STATIC_KEY_INIT_FALSE; + +unsigned long native_steal_clock(int cpu) +{ + /* none steal time */ + return 0; +} +#endif /* CONFIG_PARAVIRT */ diff --git a/arch/e2k/kernel/topology.c b/arch/e2k/kernel/topology.c new file mode 100644 index 000000000000..8760b7ceb768 --- /dev/null +++ b/arch/e2k/kernel/topology.c @@ -0,0 +1,189 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * This file contains NUMA specific variables and functions which can + * be split away from DISCONTIGMEM and are used on NUMA machines with + * contiguous memory. + * 2002/08/07 Erich Focht + * Populate cpu entries in sysfs for non-numa systems as well + * Intel Corporation - Ashok Raj + * Port to E2K + * MCST - 2009/11/18 Evgeny Kravtsunov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#undef DEBUG_TOPOLOGY_MODE +#undef DebugT +#define DEBUG_TOPOLOGY_MODE 0 /* topology */ +#define DebugT if (DEBUG_TOPOLOGY_MODE) printk + + +#ifdef CONFIG_NUMA +static struct node *sysfs_nodes; +#endif + +static struct cpu *sysfs_cpus; + + +int __ref arch_register_cpu(int num) +{ +#ifdef CONFIG_HOTPLUG_CPU + sysfs_cpus[num].hotpluggable = 1; +#endif + + return register_cpu(&sysfs_cpus[num], num); +} + +#ifdef CONFIG_HOTPLUG_CPU +EXPORT_SYMBOL(arch_register_cpu); + +void arch_unregister_cpu(int num) +{ + return unregister_cpu(&sysfs_cpus[num]); +} +EXPORT_SYMBOL(arch_unregister_cpu); +#endif + +/* maps the cpu to the sched domain representing multi-core */ +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return cpumask_of_node(cpu_to_node(cpu)); +} + +static int __init topology_init(void) +{ + int i, err = 0; + +#ifdef CONFIG_NUMA + sysfs_nodes = kmalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL); + if (!sysfs_nodes) { + err = -ENOMEM; + goto out; + } + memset(sysfs_nodes, 0, sizeof(struct node) * MAX_NUMNODES); + + for_each_online_node(i) + if ((err = register_one_node(i))) + goto out; +#endif + + sysfs_cpus = kmalloc(sizeof(sysfs_cpus[0]) * NR_CPUS, GFP_KERNEL); + if (!sysfs_cpus) { + err = -ENOMEM; + goto out; + } + memset(sysfs_cpus, 0, sizeof(sysfs_cpus[0]) * NR_CPUS); + + for_each_possible_cpu(i) + if ((err = arch_register_cpu(i))) + goto out; + +out: + return err; +} +subsys_initcall(topology_init); + +int __init_recv cpuid_to_cpu(int cpuid) +{ + int cpu = 0; + + for (; cpu < NR_CPUS; cpu++) + if (cpu_to_cpuid(cpu) == cpuid) + return cpu; + + BUG(); +} + +#ifdef CONFIG_NUMA +s16 __apicid_to_node[NR_CPUS] = { + [0 ... NR_CPUS-1] = NUMA_NO_NODE +}; + +int __nodedata __cpu_to_node[NR_CPUS]; +EXPORT_SYMBOL(__cpu_to_node); + +cpumask_t __nodedata __node_to_cpu_mask[MAX_NUMNODES]; + +static int __init numa_cpu_node(int cpu) +{ + int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); + + BUG_ON(apicid == BAD_APICID); + BUG_ON(__apicid_to_node[apicid] == NUMA_NO_NODE); + + return __apicid_to_node[apicid]; +} + +static void __init cpu_to_node_init(void) +{ + int cpu, node; + + for_each_possible_cpu(cpu) { + __cpu_to_node[cpu] = numa_cpu_node(cpu); + DebugT("__cpu_to_node[%d]=%d\n", cpu, __cpu_to_node[cpu]); + } + + for_each_node_has_dup_kernel(node) { + int *nid_cpu_to_node = __va(vpa_to_pa( + node_kernel_va_to_pa( + node, __cpu_to_node))); + + memcpy(nid_cpu_to_node, __cpu_to_node, sizeof(__cpu_to_node)); + } +} + +static void __init node_to_cpu_mask_init(void) +{ + int cpu, node; + + for_each_node(node) + cpumask_clear(&__node_to_cpu_mask[node]); + + for_each_possible_cpu(cpu) { + node = __cpu_to_node[cpu]; + cpumask_set_cpu(cpu, &__node_to_cpu_mask[node]); + DebugT("__node_to_cpu_mask[%d]=0x%lx\n", + node, __node_to_cpu_mask[node].bits[0]); + } + + for_each_node_has_dup_kernel(node) { + cpumask_t *nid_node_to_cpu_mask; + + nid_node_to_cpu_mask = + __va(vpa_to_pa(node_kernel_va_to_pa( + node, __node_to_cpu_mask))); + + memcpy(nid_node_to_cpu_mask, __node_to_cpu_mask, + sizeof(__node_to_cpu_mask)); + } +} + +static void __init update_numa_possible_map(void) +{ + int cpu; + + nodes_clear(node_possible_map); + for_each_possible_cpu(cpu) + node_set(__cpu_to_node[cpu], node_possible_map); +} + +void __init numa_init(void) +{ + cpu_to_node_init(); + node_to_cpu_mask_init(); + update_numa_possible_map(); +} +#endif diff --git a/arch/e2k/kernel/trace.c b/arch/e2k/kernel/trace.c new file mode 100644 index 000000000000..6b63fe93614b --- /dev/null +++ b/arch/e2k/kernel/trace.c @@ -0,0 +1,2 @@ +#define CREATE_TRACE_POINTS +#include diff --git a/arch/e2k/kernel/trace_clock.c b/arch/e2k/kernel/trace_clock.c new file mode 100644 index 000000000000..68dd6b9348bc --- /dev/null +++ b/arch/e2k/kernel/trace_clock.c @@ -0,0 +1,20 @@ +/* + * e2k trace clocks + */ + +#include + +#include +#include +#include + +/* + * trace_clock_e2k_clkr(): A clock that is just the cycle counter. + * + * Unlike the other clocks, this is not in nanoseconds. + */ +__section(".entry.text") +notrace u64 trace_clock_e2k_clkr(void) +{ + return get_cycles(); +} diff --git a/arch/e2k/kernel/trace_stack.c b/arch/e2k/kernel/trace_stack.c new file mode 100644 index 000000000000..d1abd83e2597 --- /dev/null +++ b/arch/e2k/kernel/trace_stack.c @@ -0,0 +1,641 @@ +/* + * Copyright (C) 2008 Steven Rostedt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../../kernel/trace/trace.h" + +#include +#include +#include + +#define STACK_TRACE_ENTRIES 500 + +struct extended_stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; + unsigned long *sizes; + int skip; /* input argument: How many entries to skip */ +}; + + +static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = { + [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX +}; +static unsigned long stack_dumps_sizes[STACK_TRACE_ENTRIES+1] = { + [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX +}; + +static struct extended_stack_trace max_stack_trace = { + .max_entries = STACK_TRACE_ENTRIES, + .entries = stack_dump_trace, + .sizes = stack_dumps_sizes +}; + + +static unsigned long p_stack_dump_trace[STACK_TRACE_ENTRIES+1] = { + [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX +}; +static unsigned long p_stack_dumps_sizes[STACK_TRACE_ENTRIES+1] = { + [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX +}; + +static struct extended_stack_trace max_p_stack_trace = { + .max_entries = STACK_TRACE_ENTRIES, + .entries = p_stack_dump_trace, + .sizes = p_stack_dumps_sizes +}; + + +static unsigned long pc_stack_dump_trace[STACK_TRACE_ENTRIES+1] = { + [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX +}; + +static struct stack_trace max_pc_stack_trace = { + .max_entries = STACK_TRACE_ENTRIES, + .entries = pc_stack_dump_trace, +}; + + +static unsigned long max_stack_size; +static unsigned long max_p_stack_size; +static unsigned long max_pc_stack_size; +static arch_spinlock_t max_stack_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; + +static DEFINE_PER_CPU(int, trace_active); +static DEFINE_MUTEX(stack_sysctl_mutex); + +int stack_tracer_enabled = 0; +static int last_stack_tracer_enabled; + +int stack_tracer_kernel_only = 0; + + +struct save_stack_address_args { + struct extended_stack_trace *trace; + u64 *prev; + u64 *prev_kernel_frame; +}; + +static int save_stack_address(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct save_stack_address_args *args = arg; + struct extended_stack_trace *trace = args->trace; + u64 *prev = args->prev; + u64 *prev_kernel_frame = args->prev_kernel_frame; + u64 alloc_stack = KERNEL_C_STACK_SIZE; + u64 free_stack, prev_size; + u64 ip; + + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + ip = AS(frame->cr0_hi).ip << 3; + + /* + * Skip user frames + */ + if (!AS(frame->cr1_lo).pm) { + trace->entries[trace->nr_entries] = ip; + trace->sizes[trace->nr_entries] = 0; + ++(trace->nr_entries); + return 0; + } + + if (*prev == ULONG_MAX) { + /* + * The top frame - save the used data stack size + * to do the necessary calculation one step later. + */ + free_stack = AS(frame->cr1_hi).ussz * 16; + *prev = alloc_stack - free_stack; + } else { + u64 used_stack; + + free_stack = AS(frame->cr1_hi).ussz * 16; + used_stack = alloc_stack - free_stack; + if (used_stack > *prev) { + /* + * Looks like the end of the stack + * (last frame has leftover information from + * the previously used kernel data stack). + */ + used_stack = 0; + } + prev_size = *prev - used_stack; + *prev = used_stack; + } + + if (likely(trace->nr_entries < trace->max_entries)) { + trace->entries[trace->nr_entries] = ip; + if (trace->nr_entries > 0) + trace->sizes[*prev_kernel_frame] = prev_size; + *prev_kernel_frame = trace->nr_entries; + ++(trace->nr_entries); + } else { + return 1; + } + + return 0; +} + +noinline +static void save_extended_stack_trace(struct extended_stack_trace *trace) +{ + struct save_stack_address_args args; + u64 prev_kernel_frame = trace->nr_entries; + u64 prev_used = ULONG_MAX; + + args.trace = trace; + args.prev = &prev_used; + args.prev_kernel_frame = &prev_kernel_frame; + parse_chain_stack(0, NULL, save_stack_address, &args); + + trace->sizes[prev_kernel_frame] = prev_used; + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + + +static int save_p_stack_address(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct extended_stack_trace *trace = arg; + u64 size, ip; + + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + ip = AS(frame->cr0_hi).ip << 3; + + size = AS(frame->cr1_lo).wbs * EXT_4_NR_SZ; + + if (likely(trace->nr_entries < trace->max_entries)) { + trace->entries[trace->nr_entries] = ip; + trace->sizes[trace->nr_entries] = size; + ++(trace->nr_entries); + } else { + return 1; + } + + return 0; +} + +noinline +static void save_extended_p_stack_trace(struct extended_stack_trace *trace) +{ + parse_chain_stack(0, NULL, save_p_stack_address, trace); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +struct read_kernel_stacks_size_args { + unsigned long *cs_size; + unsigned long *ps_size; + int *skip; +}; + +static int read_kernel_stacks_size(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct read_kernel_stacks_size_args *args = arg; + unsigned long *cs_size = args->cs_size; + unsigned long *ps_size = args->ps_size; + int *skip = args->skip; + + if (*skip > 0) { + (*skip)--; + return 0; + } + + if (!AS(frame->cr1_lo).pm) + return 1; + + *cs_size += SZ_OF_CR; + *ps_size += AS(frame->cr1_lo).wbs * EXT_4_NR_SZ; + + return 0; +} + +noinline +static void get_kernel_stacks_size(unsigned long *cs_size, + unsigned long *ps_size) +{ + struct read_kernel_stacks_size_args args; + int skip = 3; + + *cs_size = 0; + *ps_size = 0; + + args.cs_size = cs_size; + args.ps_size = ps_size; + args.skip = &skip; + parse_chain_stack(0, NULL, read_kernel_stacks_size, &args); +} + +static inline void check_stack(void) +{ + unsigned long this_size, flags, ps_size, cs_size; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; + e2k_cr1_lo_t cr1_lo; + + this_size = READ_SBR_REG_VALUE() - (unsigned long) &this_size; + + if (stack_tracer_kernel_only && + (current->mm || (current->flags & PF_EXITING))) { + get_kernel_stacks_size(&cs_size, &ps_size); + } else { + raw_all_irq_save(flags); + cr1_lo = READ_CR1_LO_REG(); + psp_lo = READ_PSP_LO_REG(); + psp_hi = READ_PSP_HI_REG(); + pcsp_lo = READ_PCSP_LO_REG(); + pcsp_hi = READ_PCSP_HI_REG(); + pshtp = READ_PSHTP_REG(); + pcshtp = READ_PCSHTP_REG(); + raw_all_irq_restore(flags); + + ps_size = AS(psp_hi).ind + GET_PSHTP_MEM_INDEX(pshtp) - + AS(cr1_lo).wbs * EXT_4_NR_SZ; + if (AS(psp_lo).base < TASK_SIZE) + ps_size += AS(psp_lo).base - (u64) CURRENT_PS_BASE(); + + cs_size = AS(pcsp_hi).ind + PCSHTP_SIGN_EXTEND(pcshtp) - + SZ_OF_CR; + if (AS(pcsp_lo).base < TASK_SIZE) + cs_size += AS(pcsp_lo).base - (u64) CURRENT_PCS_BASE(); + } + + if (this_size <= max_stack_size && ps_size <= max_p_stack_size + && cs_size <= max_pc_stack_size) + return; + + local_irq_save(flags); + arch_spin_lock(&max_stack_lock); + + /* a race could have already updated it */ + if (this_size > max_stack_size) { + max_stack_size = this_size; + + max_stack_trace.nr_entries = 0; + max_stack_trace.skip = 3; + + save_extended_stack_trace(&max_stack_trace); + } + + if (ps_size > max_p_stack_size) { + max_p_stack_size = ps_size; + + max_p_stack_trace.nr_entries = 0; + max_p_stack_trace.skip = 3; + + save_extended_p_stack_trace(&max_p_stack_trace); + } + + if (cs_size > max_pc_stack_size) { + max_pc_stack_size = cs_size; + + max_pc_stack_trace.nr_entries = 0; + max_pc_stack_trace.skip = 3; + + save_stack_trace(&max_pc_stack_trace); + } + + arch_spin_unlock(&max_stack_lock); + local_irq_restore(flags); +} + +static void +stack_trace_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) +{ + int cpu; + + if (unlikely(raw_nmi_irqs_disabled())) + return; + + preempt_disable_notrace(); + + cpu = raw_smp_processor_id(); + /* no atomic needed, we only modify this variable by this cpu */ + if (per_cpu(trace_active, cpu)++ != 0) + goto out; + + check_stack(); + + out: + per_cpu(trace_active, cpu)--; + /* prevent recursion in schedule */ + preempt_enable_notrace(); +} + +static struct ftrace_ops trace_ops __read_mostly = { + .func = stack_trace_call, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, +}; + +static ssize_t +stack_max_size_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + unsigned long *ptr = filp->private_data; + char buf[64]; + int r; + + r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); + if (r > sizeof(buf)) + r = sizeof(buf); + return simple_read_from_buffer(ubuf, count, ppos, buf, r); +} + +static ssize_t +stack_max_size_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + long *ptr = filp->private_data; + unsigned long val, flags; + int ret; + int cpu; + + ret = kstrtoul_from_user(ubuf, count, 10, &val); + if (ret) + return ret; + + local_irq_save(flags); + + /* + * In case we trace inside arch_spin_lock() or after (NMI), + * we will cause circular lock, so we also need to increase + * the percpu trace_active here. + */ + cpu = smp_processor_id(); + per_cpu(trace_active, cpu)++; + + arch_spin_lock(&max_stack_lock); + *ptr = val; + arch_spin_unlock(&max_stack_lock); + + per_cpu(trace_active, cpu)--; + local_irq_restore(flags); + + return count; +} + +static const struct file_operations stack_max_size_fops = { + .open = tracing_open_generic, + .read = stack_max_size_read, + .write = stack_max_size_write, + .llseek = default_llseek, +}; + +static void * +__next(struct seq_file *m, loff_t *pos) +{ + long n = *pos - 1; + + if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) + return NULL; + + m->private = (void *)n; + return &m->private; +} + +static void * +t_next(struct seq_file *m, void *v, loff_t *pos) +{ + (*pos)++; + return __next(m, pos); +} + +static void *t_start(struct seq_file *m, loff_t *pos) +{ + int cpu; + + local_irq_disable(); + + cpu = smp_processor_id(); + per_cpu(trace_active, cpu)++; + + arch_spin_lock(&max_stack_lock); + + if (*pos == 0) + return SEQ_START_TOKEN; + + return __next(m, pos); +} + +static void t_stop(struct seq_file *m, void *p) +{ + int cpu; + + arch_spin_unlock(&max_stack_lock); + + cpu = smp_processor_id(); + per_cpu(trace_active, cpu)--; + + local_irq_enable(); +} + +static void print_disabled(struct seq_file *m) +{ + seq_puts(m, "#\n" + "# Stack tracer disabled\n" + "#\n" + "# To enable the stack tracer, either add 'stacktrace' to the\n" + "# kernel command line\n" + "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" + "#\n"); +} + +static int t_show(struct seq_file *m, void *v) +{ + long i; + u64 total; + + if (v != SEQ_START_TOKEN) + return 0; + + if (!stack_tracer_enabled && !max_stack_size && !max_p_stack_size + && !max_pc_stack_size) { + print_disabled(m); + + return 0; + } + + seq_printf(m, "%d entries in data stack\n" + " Depth Size Location\n" + " ----- ---- --------\n", + max_stack_trace.nr_entries - 1); + + total = max_stack_size; + for (i = 0; i < max_stack_trace.nr_entries && + max_stack_trace.entries[i] != ULONG_MAX; i++) { + seq_printf(m, "%3ld) %8d %5d %pF\n", i, total, + max_stack_trace.sizes[i], + max_stack_trace.entries[i]); + total -= max_stack_trace.sizes[i]; + } + + seq_printf(m, "\n%d entries in procedure stack\n" + " Depth Size Location\n" + " ----- ---- --------\n", + max_p_stack_trace.nr_entries - 1); + + total = max_p_stack_size; + for (i = 0; i < max_p_stack_trace.nr_entries && + max_p_stack_trace.entries[i] != ULONG_MAX; i++) { + seq_printf(m, "%3ld) %8d %5d %pF\n", i, total, + max_p_stack_trace.sizes[i], + max_p_stack_trace.entries[i]); + total -= max_p_stack_trace.sizes[i]; + } + + seq_printf(m, "\n%d entries in chain stack\n" + " Location\n" + " --------\n", + max_pc_stack_trace.nr_entries - 1); + + for (i = 0; i < max_pc_stack_trace.nr_entries && + max_pc_stack_trace.entries[i] != ULONG_MAX; i++) + seq_printf(m, "%3ld) %pF\n", i, + max_pc_stack_trace.entries[i]); + + return 0; +} + +static const struct seq_operations stack_trace_seq_ops = { + .start = t_start, + .next = t_next, + .stop = t_stop, + .show = t_show, +}; + +static int stack_trace_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &stack_trace_seq_ops); +} + +static const struct file_operations stack_trace_fops = { + .open = stack_trace_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int +stack_trace_filter_open(struct inode *inode, struct file *file) +{ + return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, + inode, file); +} + +static const struct file_operations stack_trace_filter_fops = { + .open = stack_trace_filter_open, + .read = seq_read, + .write = ftrace_filter_write, + .llseek = tracing_lseek, + .release = ftrace_regex_release, +}; + +int +stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + mutex_lock(&stack_sysctl_mutex); + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (ret || !write || + (last_stack_tracer_enabled == !!stack_tracer_enabled)) + goto out; + + last_stack_tracer_enabled = !!stack_tracer_enabled; + + if (stack_tracer_enabled) + register_ftrace_function(&trace_ops); + else + unregister_ftrace_function(&trace_ops); + + out: + mutex_unlock(&stack_sysctl_mutex); + return ret; +} + +static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; + +static __init int enable_stacktrace(char *str) +{ + if (strncmp(str, "_filter=", 8) == 0) + strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); + + stack_tracer_enabled = 1; + last_stack_tracer_enabled = 1; + + stack_tracer_kernel_only = (strstr(str, "kernel") != NULL); + + return 1; +} +__setup("stacktrace", enable_stacktrace); + +static __init int stack_trace_init(void) +{ + struct dentry *d_tracer; + + d_tracer = tracing_init_dentry(); + if (!d_tracer) + return 0; + + trace_create_file("stack_max_size", 0644, d_tracer, + &max_stack_size, &stack_max_size_fops); + + trace_create_file("stack_max_size_p", 0644, d_tracer, + &max_p_stack_size, &stack_max_size_fops); + + trace_create_file("stack_max_size_pc", 0644, d_tracer, + &max_pc_stack_size, &stack_max_size_fops); + + trace_create_file("stack_trace", 0444, d_tracer, + NULL, &stack_trace_fops); + + trace_create_file("stack_trace_filter", 0444, d_tracer, + NULL, &stack_trace_filter_fops); + + if (stack_trace_filter_buf[0]) + ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); + + if (stack_tracer_enabled) + register_ftrace_function(&trace_ops); + + return 0; +} + +device_initcall(stack_trace_init); diff --git a/arch/e2k/kernel/trap_table.S b/arch/e2k/kernel/trap_table.S new file mode 100644 index 000000000000..a997cec916a5 --- /dev/null +++ b/arch/e2k/kernel/trap_table.S @@ -0,0 +1,1139 @@ +/* + * Trap table entries implemented on assembler + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +.global user_trap_handler; +.global kernel_trap_handler; +.global ttable_entry0; +.global __per_cpu_offset; +.global kernel_data_stack_overflow; +.global machine; + +#ifdef CONFIG_SMP +# define SMP_ONLY(...) __VA_ARGS__ +#else +# define SMP_ONLY(...) +#endif + +#ifdef CONFIG_CLW_ENABLE +# define CLW_ONLY(...) __VA_ARGS__ +#else +# define CLW_ONLY(...) +#endif + +#ifdef CONFIG_KVM_PARAVIRTUALIZATION +# define PV_VCPU(...) __VA_ARGS__ +#else +# define PV_VCPU(...) +#endif + +.section .ttable_entry0, "ax",@progbits +.align 8 +.type ttable_entry0,@function +ttable_entry0: + /* + * Important: the first memory access in kernel is store, not load. + * This is needed to flush SLT before trying to load anything. + */ + ALTERNATIVE_1_ALTINSTR + /* iset v5 version - save qp registers extended part */ + { + /* #80747: must repeat interrupted barriers */ + wait fl_c=1 + stgdq,sm %qg16, 0, TSK_TI_TMP_G_VCPU_STATE + qpswitchd,1,sm GVCPUSTATE, GVCPUSTATE + qpswitchd,4,sm GCURTASK, GCURTASK + } + { + stgdq,sm %qg18, 0, TSK_TI_TMP_G_MY_CPU_OFFSET + qpswitchd,1,sm GCPUOFFSET, GCPUOFFSET + qpswitchd,4,sm GCPUID_PREEMPT, GCPUID_PREEMPT + } + ALTERNATIVE_2_OLDINSTR + /* Original instruction - save only 16 bits */ + { + /* #80747: must repeat interrupted barriers */ + wait fl_c=1 + stgdq,sm %qg16, 0, TSK_TI_TMP_G_VCPU_STATE + movfi,1 GVCPUSTATE, GVCPUSTATE + movfi,4 GCURTASK, GCURTASK + } + { + stgdq,sm %qg18, 0, TSK_TI_TMP_G_MY_CPU_OFFSET + nop 2 + movfi,1 GCPUOFFSET, GCPUOFFSET + movfi,4 GCPUID_PREEMPT, GCPUID_PREEMPT + } + ALTERNATIVE_3_FEATURE(CPU_FEAT_QPREG) + + { + rrd %sbr, GCURTASK + stgdq,sm %qg16, 0, TSK_TI_TMP_G_TASK + /* Do not restore %rpr (it's not clobbered by kernel entry) */ + cmpesb,3 0, 1, %pred1 + } + { + rrd %osr0, GVCPUSTATE + stgdq,sm %qg18, 0, TSK_TI_TMP_G_CPU_ID_PREEMPT + } + { + rrd %psp.hi, GCURTASK + /* pred0 = sbr < TASK_SIZE */ + cmpbedb,1 GCURTASK, TASK_SIZE - 1, %pred0 + } + SWITCH_HW_STACKS(TSK_TI_TMP_) +trap_handler_switched_stacks: + { + setwd wsz = 18, nfx = 1; + rrd %ctpr1, %dr6; + ldd GCURTASK, TSK_STACK, %dr1 ? %pred0; // %dr1: stack + ldd GCURTASK, TSK_K_USD_LO, %dr23 ? %pred0; // %dr23: usd_lo + ldd GCURTASK, TSK_K_USD_HI, %dr22 ? %pred0; // %dr22: usd_hi + } + + // trap can occur on guest kernel + // %pred20, %dr15, %dr16, %dr17: temporary predicate and registers + SWITCH_TO_KERNEL_IMAGE_PGD GCURTASK, %pred20, %dr15, %dr16, %dr17 + + ALTERNATIVE_1_ALTINSTR + /* CPU_FEAT_TRAP_V5 version - save %lsr1, %ilcr1 */ + { + rrd %lsr1, %dr14 + } + { + rrd %ilcr1, %dr15 + addd 0, 0, %dr17 + addd 0, 0, %dr18 + addd 0, 0, %dr19 + } + ALTERNATIVE_2_ALTINSTR2 + /* CPU_FEAT_TRAP_V6 version - save %lsr1, %ilcr1, %ctpr.hi */ + { + rrd %ctpr1.hi, %dr17 + } + { + rrd %ctpr2.hi, %dr18 + } + { + rrd %ctpr3.hi, %dr19 + } + { + rrd %lsr1, %dr14 + } + { + rrd %ilcr1, %dr15 + } + ALTERNATIVE_3_OLDINSTR2 + /* iset v2 version */ + { + addd 0, 0, %dr14 + addd 0, 0, %dr15 + addd 0, 0, %dr17 + addd 0, 0, %dr18 + addd 0, 0, %dr19 + } + ALTERNATIVE_4_FEATURE2(CPU_FEAT_TRAP_V5, CPU_FEAT_TRAP_V6) + + { + rrd %clkr, %dr0 + ipd 1 + disp %ctpr1, 2f + /* Read CLW unit registers state for protected mode */ + CLW_ONLY(mmurr,5 %us_cl_m0, %dr8 ? %pred0) + } + { + rrd %sbr, %dr26 + CLW_ONLY(mmurr,5 %us_cl_m1, %dr9 ? %pred0) + } + { + rrd %usd.hi, %dr7; + CLW_ONLY(mmurr,5 %us_cl_m2, %dr10 ? %pred0) + } + { + rrd %ctpr2, %dr5; + CLW_ONLY(addd,1 0, _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), %dr20 ? %pred0) + CLW_ONLY(addd,2 0, 1, %dr21 ? %pred0) + CLW_ONLY(mmurr,5 %us_cl_m3, %dr11 ? %pred0) + } + { + rrd %ctpr3, %dr3; + + std %dr0, GCURTASK, TSK_IRQ_ENTER_CLK /* ti->irq_enter_clk = %clkr */ + /* Check for data stack overflow. Handler does not use stack at all + * so it is enough to catch biggest possible getsp that failed. */ + cmpbedb,1 %dr7, 4096ULL << 32, %pred1 ? ~ %pred0 + CLW_ONLY(mmurr,5 %us_cl_up, %dr12 ? %pred0) + } + { + rrd %usd.lo, %dr2; + + /* Disable CLW unit for nonprotected mode, we must do it before + * data stack switching. There is no need for an explicit + * 'wait all_e' as it is done by hardware on trap enter. */ + CLW_ONLY(std,2 %dr21, [%dr20] MAS_MMU_REG ? %pred0) + CLW_ONLY(mmurr,5 %us_cl_b, %dr13 ? %pred0) + ct %ctpr1 ? ~ %pred0; // kernel_trap_handler() + } + +/* user */ + + /* set kernel state of UPSR to preserve FP disable exception */ + /* on movfi instructions */ + /* %r21 - user UPSR to save at thread_info structure */ + /* NMI disabled and can be enabled after TIRs parsing */ + /* SGI should be disabled and can be enabled later */ + { + ipd 3 + disp %ctpr1, user_trap_handler; + + rrs %upsr, %r21; + + /* For user traps only since only then tmp_k_gregs is needed: + * thread_info->k_gregs = thread_info->tmp_k_gregs */ + ldrd,2 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_VCPU_STATE, %dr28 + ldrd,5 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_VCPU_STATE_EXT, %dr29 + } + { + rws E2K_KERNEL_UPSR_DISABLED_ALL, %upsr; + + addd,1 %dr1, KERNEL_C_STACK_SIZE + KERNEL_C_STACK_OFFSET, %dr1; + stw,2 %r21, GCURTASK, TSK_UPSR; /* thread_info->upsr = upsr */ + ldd,5 GCURTASK, TSK_U_STACK_TOP, %dr27 + } + + /* Switch to kernel local data stack */ + ALTERNATIVE_1_ALTINSTR + /* CPU_HWBUG_USD_ALIGNMENT version */ + { + /* sbr = stack + KERNEL_C_STACK_SIZE + KERNEL_C_STACK_OFFSET */ + rwd %dr1, %sbr + ldrd,2 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_TASK, %dr30 + ldrd,5 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_TASK_EXT, %dr31 + nop 1; + } + ALTERNATIVE_2_OLDINSTR + /* Default version */ + { + /* sbr = stack + KERNEL_C_STACK_SIZE + KERNEL_C_STACK_OFFSET */ + rwd %dr1, %sbr + ldrd,2 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_TASK, %dr30 + ldrd,5 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_TASK_EXT, %dr31 + } + ALTERNATIVE_3_FEATURE(CPU_HWBUG_USD_ALIGNMENT) + { + rwd %dr23, %usd.lo + ldrd,2 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_MY_CPU_OFFSET, %dr32 + ldrd,5 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_MY_CPU_OFFSET_EXT, %dr33 + } + { + rwd %dr22, %usd.hi; + ldrd,2 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_CPU_ID_PREEMPT, %dr34 + ldrd,5 GCURTASK, TAGGED_MEM_LOAD_REC_OPC | TSK_TI_TMP_G_CPU_ID_PREEMPT_EXT, %dr35 + } + { + rrd %lsr, %dr16 + } + { + rrd %ilcr, %dr4; + strd,2 %dr28, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_VCPU_STATE + strd,5 %dr29, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_VCPU_STATE_EXT + } + { + strd,2 %dr30, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_TASK + strd,5 %dr31, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_TASK_EXT + } + { + strd,2 %dr32, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_MY_CPU_OFFSET + strd,5 %dr33, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_MY_CPU_OFFSET_EXT + } + { + strd,2 %dr34, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_CPU_ID_PREEMPT + strd,5 %dr35, GCURTASK, TAGGED_MEM_STORE_REC_OPC | TSK_TI_G_CPU_ID_PREEMPT_EXT + } + { + nop 1; /* getsp -> use */ + getsp -(PTRACE_SZOF + AAU_SZOF + TRAP_PTREGS_SZOF + 3 * 7), %dr0 + } +#ifdef CONFIG_CLW_ENABLE + { + std %dr10, [%dr0 + PT_US_CL_M2] + } + { + std %dr11, [%dr0 + PT_US_CL_M3] + } + { + std %dr8, [%dr0 + PT_US_CL_M0] + std %dr9, [%dr0 + PT_US_CL_M1] + } + { + std %dr12, [%dr0 + PT_US_CL_UP] + std %dr13, [%dr0 + PT_US_CL_B] + } +#endif + { + std %dr6, [%dr0 + PT_CTRP1]; // regs->ctpr1 = ctpr1 + std %dr5, [%dr0 + PT_CTRP2]; // regs->ctpr2 = ctpr2 + } + { + std %dr3, [%dr0 + PT_CTRP3]; // regs->ctpr3 = ctpr3 + std %dr16, [%dr0 + PT_LSR]; // regs->lsr = lsr + } + { + std %dr4, [%dr0 + PT_ILCR]; // regs->ilcr = ilcr + std %dr27, [%dr0 + PT_STACK+ST_TOP]; /* regs->stacks.top = */ + } + { + /* regs->g_stacks.sbr/top = %sbr */ + PV_VCPU(std %dr26, [%dr0 + PT_G_STACK+G_ST_SBR]) + } + { + getsp -64, %empty; // reserve stack for function arguments + std %dr2, [%dr0 + PT_STACK+ST_USD_LO]; // regs->stacks.usd_lo = usd.lo + std %dr7, [%dr0 + PT_STACK+ST_USD_HI]; // regs->stacks.usd_hi = usd.hi + } + { + std %dr14, [%dr0 + PT_LSR1] + std %dr15, [%dr0 + PT_ILCR1] + SMP_ONLY(shld,3 GCPUID_PREEMPT, 3, GCPUOFFSET) + } + { + std %dr17, [%dr0 + PT_CTPR1_HI] + std %dr18, [%dr0 + PT_CTPR2_HI] + } + { + std,2 %dr19, [%dr0 + PT_CTPR3_HI] + SMP_ONLY(ldd,5 GCPUOFFSET, __per_cpu_offset, GCPUOFFSET) + ct %ctpr1; // user_trap_handler() + } + +2: /* kernel */ + // if (READ_SBR_REG() >= TASK_SIZE) + // kernel_trap_handler(); + { + ipd 2 + disp %ctpr1, kernel_trap_handler; + getsp -(PTRACE_SZOF + TRAP_PTREGS_SZOF + 2 * 7), %dr0 ? ~ %pred1 + } + { + rrd %lsr, %dr16 + ibranch kernel_data_stack_overflow ? %pred1 + } + { + rrd %ilcr, %dr4; + std,2 %dr6, [%dr0 + PT_CTRP1] ; // regs->ctpr1 = ctpr1 + } + { + std,2 %dr5, [%dr0 + PT_CTRP2] ; // regs->ctpr2 = ctpr2 + } + { + std %dr3, [%dr0 + PT_CTRP3] ; // regs->ctpr3 = ctpr3 + std %dr26,[%dr0 + PT_STACK+ST_TOP] + } + { + getsp -64, %empty; // reserve stack for function arguments + std %dr7, [%dr0 + PT_STACK+ST_USD_HI] + std %dr2, [%dr0 + PT_STACK+ST_USD_LO] + } + { + std %dr14, [%dr0 + PT_LSR1] + std %dr15, [%dr0 + PT_ILCR1] + } + { + std %dr17, [%dr0 + PT_CTPR1_HI] + std %dr18, [%dr0 + PT_CTPR2_HI] + } + { + std %dr19, [%dr0 + PT_CTPR3_HI] + } + { + std %dr4, [%dr0 + PT_ILCR] + std %dr16,[%dr0 + PT_LSR] + ct %ctpr1; // kernel_trap_handler() + } +.size $ttable_entry0, . - $ttable_entry0 + + +#ifdef CONFIG_CLW_ENABLE +#define PREPARE_CLW_ADDR(addr_reg) \ + addd,1 0, _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), addr_reg; +#define PREPARE_CLW_VAL(val_reg) \ + addd,1 0, 1, val_reg; +#define STORE_CLW_VAL(val_reg, addr_reg) \ + std,2 val_reg, 0, [addr_reg] MAS_MMU_REG; +#else /* !CONFIG_CLW_ENABLE */ +#define PREPARE_CLW_ADDR(addr_reg) +#define PREPARE_CLW_VAL(val_reg) +#define STORE_CLW_VAL(val_reg, addr_reg) +#endif /* CONFIG_CLW_ENABLE */ + +#define WSZ 11 +#define SYS_CALL(sys_call_table, FORMAT_32, KERNEL_ENTRY) \ + { \ + setwd wsz = WSZ, nfx = 1; \ + nop 2; \ + rrd %osr0, %dr7; \ + ldd,2 GCURTASK, TSK_STACK, %dr10; \ + ldd,3 GCURTASK, TSK_K_USD_HI, %dr8; \ + puttags,5 %r0, 0, %r0; \ + } \ + \ + /* goto guest kernel system call table entry, */ \ + /* if system call is from guest user */ \ + /* %dr7: register of current_thread_info() */ \ + /* %dr18, %dr19, %dr11: temporary registers */ \ + /* %pred1: temporary predicates */ \ + GOTO_PV_VCPU_KERNEL_TTABLE %dr7, %dr18, %dr19, %dr11, %pred1 \ + \ + { \ + ipd 2; \ + disp %ctpr1, handle_sys_call; \ + PV_VCPU(rrd %sbr, %dr21); \ + cmpbsb,3 %r0, NR_syscalls,%pred3; /* sys_num < NR_syscalls */ \ + sxt,4 6, %r0, %dr0; \ + ldd,5 GCURTASK, TSK_K_USD_LO, %dr9; \ + } \ + { \ + rrd %usd.lo, %dr19; \ + ldd,2 GCURTASK, TSK_U_STACK_TOP, %dr11; \ + shld,4 %dr0, 3, %dr14; /* sys_num * 8 */ \ + puttagd,5 %dr1, 0, %dr1; \ + } \ + { \ + rrd %usd.hi, %dr20; \ + addd,2 %dr10, KERNEL_C_STACK_SIZE + \ + KERNEL_C_STACK_OFFSET, %dr10; \ + ldd,5 [sys_call_table + %dr14], %dr14 ? %pred3; \ + } \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_HWBUG_USD_ALIGNMENT version */ \ + { \ + rwd %dr10, %sbr; \ + cmpesb,1 FORMAT_32, 1, %pred2; /* 32 bit system call */ \ + puttagd,2 %dr2, 0, %dr2; \ + addd,4 sys_ni_syscall, %dr14 ? ~ %pred3; \ + nop 1; \ + } \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + { \ + rwd %dr10, %sbr; \ + cmpesb,1 FORMAT_32, 1, %pred2; /* 32 bit system call */ \ + puttagd,2 %dr2, 0, %dr2; \ + addd,4 sys_ni_syscall, %dr14 ? ~ %pred3; \ + } \ + ALTERNATIVE_3_FEATURE(CPU_HWBUG_USD_ALIGNMENT) \ + { \ + rwd %dr9, %usd.lo; \ + PREPARE_CLW_VAL(%dr18); \ + puttagd,2 %dr3, 0, %dr3; \ + puttagd,5 %dr4, 0, %dr4; \ + } \ + { \ + nop 3; /* rwd %usd -> getsp */ \ + rwd %dr8, %usd.hi; \ + PREPARE_CLW_ADDR(%dr17); \ + puttagd,5 %dr5, 0, %dr5; \ + } \ + /* \ + * Guest under hardware virtualization support - IS_HV_GM() \ + * should save global registers used by host to support \ + * (para)virtualization. Saving is unconditional because of \ + * only such guest can be here. \ + * %dr7 - pointer to thread info \ + * %dr10 - temporary registers \ + */ \ + SAVE_HOST_GREGS_TO_VIRT_UNEXT %dr7, %dr10 \ + { \ + rrs %upsr, %r15; \ + STORE_CLW_VAL(%dr18, %dr17); \ + } \ + { \ + rws E2K_KERNEL_UPSR_ENABLED, %upsr; \ + stw %r15, GCURTASK, TSK_UPSR; \ + puttagd,5 %dr6, 0, %dr6; \ + } \ + { \ + nop 1; /* getsp -> usage */ \ + getsp -(PTRACE_SZOF + 64), %dr7; \ + adds,1 0, KERNEL_ENTRY, %r8; \ + sxt,3 6, %r1, %dr1 ? %pred2; \ + sxt,4 6, %r2, %dr2 ? %pred2; \ + } \ + { \ + addd,1 %dr7, 64, %dr7; \ + stw,2 %r0, [ %dr7 + (PT_SYS_NUM + 64) ]; \ + sxt,3 6, %r3, %dr3 ? %pred2; \ + sxt,4 6, %r4, %dr4 ? %pred2; \ + } \ + { \ + stw,2 %r8, [ %dr7 + PT_KERNEL_ENTRY ]; \ + SMP_ONLY(shld,4 GCPUID_PREEMPT, 3, GCPUOFFSET); \ + /* regs->g_stacks.sbr/top = %sbr */ \ + PV_VCPU(std %dr21, [%dr7 + PT_G_STACK+G_ST_SBR]); \ + } \ + { \ + std,2 %dr19, [%dr7 + PT_STACK+ST_USD_LO]; \ + std,5 %dr20, [%dr7 + PT_STACK+ST_USD_HI]; \ + sxt,3 6, %r5, %dr5 ? %pred2; \ + sxt,4 6, %r6, %dr6 ? %pred2; \ + } \ + { \ + std,2 %dr11, [%dr7 + PT_STACK+ST_TOP]; \ + SMP_ONLY(ldd,5 GCPUOFFSET, __per_cpu_offset, GCPUOFFSET); \ + addd,4 %dr14, 0, %dr0; \ + ct %ctpr1; \ + } + +.global native_ttable_entry1; +.section .native_ttable_entry1, "ax",@progbits +.align 8 +.type native_ttable_entry1,@function +native_ttable_entry1: + SWITCH_HW_STACKS_FROM_USER() + SYS_CALL(sys_call_table_32, 1, 1) +.size $native_ttable_entry1, . - $native_ttable_entry1 + +.global native_ttable_entry3; +.global native_ttable_entry3_switched; + +.section .native_ttable_entry3, "ax",@progbits +.align 8 +.type native_ttable_entry3,@function +native_ttable_entry3: + SWITCH_HW_STACKS_FROM_USER() +native_ttable_entry3_switched: + SYS_CALL(sys_call_table, 0, 3) +.size $native_ttable_entry3, . - $native_ttable_entry3 + + +.global native_ttable_entry4; +.section .native_ttable_entry4, "ax",@progbits + .align 8 + .type native_ttable_entry4,@function +native_ttable_entry4: + SWITCH_HW_STACKS_FROM_USER() + { + /* wsz here must be not smaller than in ttable_entry3 + * and SYS_CALL() to workaround hw bug #68012 */ + setwd wsz = WSZ, nfx = 1 + ipd 0 + disp %ctpr1, compatibility_call + /* %dr7 = current->thread.flags */ + ldd,0 GCURTASK, TSK_THREAD_FLAGS, %dr7 + } + { + puttagd,2 %dr0, 0, %dr0 + ipd 1 + disp %ctpr2, native_ttable_entry3_switched + } + { + nop 2 + /* %pred1 = sys_num < 0 */ + cmplsb,1 %r0, 0, %pred1 + } + { + /* pred2 = !(current->thread.flags & E2K_FLAG_32BIT) */ + cmpandedb %dr7, E2K_FLAG_32BIT, %pred2 + } + { + /* sys_num = -sys_num */ + subs,1 0, %r0, %r0 ? %pred1 + /* if (sys_num < 0) goto compatibility_call */ + ct %ctpr1 ? %pred1 + } + { + /* Wait for %pred2 */ + addd 0x0, 0x0, %empty + } + { + /* if (!(current->thread.flags & E2K_FLAG_32BIT)) goto ttable_entry3 */ + ct %ctpr2 ? %pred2 + } +compatibility_call: + SYS_CALL(sys_call_table_deprecated, 2, 4) +.size $native_ttable_entry4, . - $native_ttable_entry4 + + +.global ttable_entry5; +.section .ttable_entry5, "ax",@progbits + .align 8 + .type ttable_entry5,@function +ttable_entry5: + /* We want to just jump right to the handler without + * doing anything, but at least we have to make sure + * that the passed parameters are valid. */ +{ + setwd wsz = 0x4 +} + + /* goto guest kernel system call table entry, */ + /* if system call is from guest user */ + /* %dr7: temporary register to read current_thread_info() */ + /* %dr8, %dr9, %dr10: temporary registers */ + /* %pred0, %pred1, %pred2: temporary predicates */ + GOTO_GUEST_KERNEL_FAST_TTABLE 5, %dr7, %dr8, %dr9, %dr10, \ + %pred0, %pred1, %pred2 + +{ + /* If dr0 holds value with a bad tag, we will be SIGILL'ed. + * If we are called with an empty register window (no %dr0 + * yet), we will be SIGSEGV'ed. */ + andd,0 %dr0, NR_fast_syscalls_mask, %dr0 +} +{ + shld,0 %dr0, 3, %dr0 + puttagd,2 %dr1, 0, %dr1 +} +{ + ldd,0 [fast_sys_calls_table_32 + %dr0], %dr3 + sxt,1 6, %dr1, %dr0 + puttagd,2 %dr2, 0, %dr2 +} +{ + ipd 2 + movtd,0 %dr3, %ctpr1 + + sxt,1 6, %dr2, %dr1 +} + ct %ctpr1 +.size $ttable_entry5, . -$ttable_entry5 + + +.global fast_sys_calls_table; +.global ttable_entry6; + +.section .ttable_entry6, "ax",@progbits + .align 8 + .type ttable_entry6,@function +ttable_entry6: + /* We want to just jump right to the handler without + * doing anything, but at least we have to make sure + * that the passed parameters are valid. */ +{ + setwd wsz = 0x4 +} + + /* goto guest kernel system call table entry, */ + /* if system call is from guest user */ + /* %dr7: temporary register to read current_thread_info() */ + /* %dr8, %dr9, %dr10: temporary registers */ + /* %pred0, %pred1, %pred2: temporary predicates */ + GOTO_GUEST_KERNEL_FAST_TTABLE 6, %dr7, %dr8, %dr9, %dr10, \ + %pred0, %pred1, %pred2 + +{ + /* If dr0 holds value with a bad tag, we will be SIGILL'ed. + * If we are called with an empty register window (no %dr0 + * yet), we will be SIGSEGV'ed. */ + andd,0 %dr0, NR_fast_syscalls_mask, %dr0 +} +{ + shld,0 %dr0, 3, %dr0 +} +{ + ldd,0 [fast_sys_calls_table + %dr0], %dr3 + puttagd,2 %dr1, 0, %dr0 +} +{ + ipd 2 + movtd,0 %dr3, %ctpr1 + + puttagd,2 %dr2, 0, %dr1 +} + ct %ctpr1 +.size $ttable_entry6, . -$ttable_entry6 + + +.global fast_sys_calls_table_128; +.global ttable_entry7; + +.section .ttable_entry7, "ax",@progbits + .align 8 + .type ttable_entry7,@function +ttable_entry7: + /* goto guest kernel system call table entry, */ + /* if system call is from guest user */ + /* %dr7: temporary register to read current_thread_info() */ + /* %dr8, %dr9, %dr10: temporary registers */ + /* %pred0, %pred1, %pred2: temporary predicates */ + GOTO_GUEST_KERNEL_FAST_TTABLE 7, %dr7, %dr8, %dr9, %dr10, \ + %pred0, %pred1, %pred2 + + /* We want to just jump right to the handler without + * doing anything, but at least we have to make sure + * that the passed parameters are valid. + * + * Read tags of %dr1 - %dr5 and pack them by forths in %r0. + * Clear any speculative tags in arguments, which can be unused + * by some system calls. */ +{ + setwd wsz = 0x8 + + /* If dr0 holds value with a bad tag, we will be SIGILL'ed. + * If we are called with an empty register window (no %dr0 + * yet), we will be SIGSEGV'ed. */ + andd,0 %dr0, NR_fast_syscalls_mask, %dr0 + + gettagd,5 %dr2, %r10 +} +{ + shld,0 %dr0, 3, %dr0 + + shls,3 %r10, 8, %r10 + gettagd,2 %dr3, %r11 + gettagd,5 %dr4, %r12 +} +{ + ldd,0 [fast_sys_calls_table_128 + %dr0], %dr8 + + shls,1 %r11, 12, %r11 + shls,3 %r12, 16, %r12 + gettagd,2 %dr5, %r13 +} +{ + nop 1 /* movtd -> ct */ + + ipd 2 + movtd,0 %dr8, %ctpr1 + nop 1 + + shls,2 %r13, 20, %r13 +} +{ + ors,0 %r11, %r13, %r11 + puttagd,5 %dr2, 0, %dr1 +} +{ + ors,0 %r11, %r10, %r11 + puttagd,2 %dr3, 0, %dr2 + puttagd,5 %dr4, 0, %dr3 +} +{ + ors,0 %r11, %r12, %r11 + puttagd,2 %dr5, 0, %dr4 +} +{ + adds,0 %r11, 0, %r0 + ct %ctpr1 +} +.size $ttable_entry7, . -$ttable_entry7 + + +#ifdef CONFIG_PROTECTED_MODE +.global ttable_entry8_C; +.global ttable_entry8 + +.section .ttable_entry8, "ax",@progbits + .align 8 + .type ttable_entry8@function +ttable_entry8: + SWITCH_HW_STACKS_FROM_USER( + ldgdd TSK_STACK, %dr1; + ipd 3; + disp %ctpr1, ttable_entry8_C; + ) + { + setwd wsz = 16, nfx = 1 + ldgdd,3 TSK_K_USD_HI, %dr25 + addd,0 0, 1, %dr28 + addd,1 0, _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), %dr26 + /* Read tags of %dr2 - %dr5 and pack them by forths in %dr1. + * Clear any speculative tags in arguments, which can be unused + * by some system calls. */ + gettagd,2 %dr2, %r17 + gettagd,5 %dr3, %r18 + } + { + rrd %usd.lo, %dr29 + ldd,3 GCURTASK, TSK_K_USD_LO, %dr27 + gettagd,2 %dr4, %r19 + gettagd,5 %dr5, %r20 + shls,1 %r17, 8, %r17 + shls,4 %r18, 12, %r18 + } + + /* goto guest kernel system call table entry, */ + /* if system call is from guest user */ + /* %dr14, %dr15, %dr16: temporary registers */ + /* %pred0, %pred1, %pred2: temporary predicates */ + GOTO_GUEST_KERNEL_PROT_TTABLE 8, GCURTASK, %dr14, %dr15, %dr16, \ + %pred0, %pred1, %pred2 + + { + /* "wait all_e" is done before switching hardware stacks */ + rrd %usd.hi, %dr30 + shls,1 %r19, 16, %r19 + shls,4 %r20, 20, %r20 + gettagd,2 %dr6, %r21 + gettagd,5 %dr7, %r22 + ldd,3 GCURTASK, TSK_U_STACK_TOP, %dr15 + } + { + rrs %upsr, %r31 + + puttags,5 %r0, 0, %r0 + addd,1 %dr1, KERNEL_C_STACK_SIZE + KERNEL_C_STACK_OFFSET, %dr1 + /* Disable CLW unit for nonprotected mode */ + std,2 %dr28, 0, [%dr26] MAS_MMU_REG + } + /* Switch to kernel local data stack */ + ALTERNATIVE_1_ALTINSTR + /* CPU_HWBUG_USD_ALIGNMENT version */ + { + rwd %dr1, %sbr + gettagd,2 %dr8, %r23 + gettagd,5 %dr9, %r24 + ors,1 %r17, %r19, %r19 + ors,4 %r18, %r20, %r20 + nop 1 + } + ALTERNATIVE_2_OLDINSTR + /* Default version */ + { + rwd %dr1, %sbr + gettagd,2 %dr8, %r23 + gettagd,5 %dr9, %r24 + ors,1 %r17, %r19, %r19 + ors,4 %r18, %r20, %r20 + } + ALTERNATIVE_3_FEATURE(CPU_HWBUG_USD_ALIGNMENT) + { + rwd %dr27, %usd.lo + gettagd,2 %dr10, %r17 + gettagd,5 %dr11, %r18 + sxt,1 6, %r21, %dr21 + sxt,4 6, %r22, %dr22 + } + { + rwd %dr25, %usd.hi + sxt,3 6, %r0, %dr0 + puttagd,2 %dr2, 0, %dr2 + puttagd,5 %dr3, 0, %dr3 + sxt,1 6, %r23, %dr23 + sxt,4 6, %r24, %dr24 + } + { + shld,0 %dr21, 24, %dr21 + shld,3 %dr22, 28, %dr22 + shld,1 %dr23, 32, %dr23 + shld,4 %dr24, 36, %dr24 + puttagd,2 %dr4, 0, %dr4 + puttagd,5 %dr5, 0, %dr5 + } + { + rws E2K_KERNEL_UPSR_ENABLED, %upsr + gettagd,2 %dr12, %r21 + gettagd,5 %dr13, %r22 + ord,1 %dr21, %dr23, %dr23 + ord,4 %dr22, %dr24, %dr24 + } + { + ors,3 %r19, %r20, %r1 + sxt,1 6, %r17, %dr17 + sxt,4 6, %r18, %dr18 + sxt,2 6, %r21, %dr21 + sxt,5 6, %r22, %dr22 + } + { + puttagd,2 %dr6, 0, %dr6 + puttagd,5 %dr7, 0, %dr7 + shld,1 %dr17, 40, %dr17 + shld,4 %dr18, 44, %dr18 + shld,0 %dr21, 48, %dr21 + shld,3 %dr22, 52, %dr22 + } + { + ord,1 %dr17, %dr21, %dr21 + ord,4 %dr18, %dr22, %dr22 + stw,2 %r31, GCURTASK, TSK_UPSR + sxt,3 6, %r1, %dr1 + } + + /* Reserve memory for 'struct pt_regs' and parameters and put in + * there the last argument 'tags' (cannot put it in %dr8 since the + * size of the register window for C functions is only 8 dregs). */ + { + getsp -PTRACE_SZOF, %dr14; + SMP_ONLY(shld,3 GCPUID_PREEMPT, 3, GCPUOFFSET) + + puttagd,2 %dr8, 0, %dr8 + puttagd,5 %dr9, 0, %dr9 + } + { + /* %dr20: current_thread_info */ + rrd %osr0, %dr20 + + SMP_ONLY(ldd,3 GCPUOFFSET, __per_cpu_offset, GCPUOFFSET) + + puttagd,2 %dr10, 0, %dr10 + puttagd,5 %dr11, 0, %dr11 + } + /* + * Guest under hardware virtualization support - IS_HV_GM() + * should save global registers used by host to support + * (para)virtualization. Saving is unconditional because of + * only such guest can be here. + * %dr20 - pointer to thread info + * %dr17 - temporary registers + */ + SAVE_HOST_GREGS_TO_VIRT_UNEXT %dr20, %dr17 + + /* Go to main protected system call handler. + * Do not store tags because we pass tags via %dr1 */ + { + std,2 %dr29, [%dr14 + PT_STACK+ST_USD_LO] + puttagd,5 %dr12, 0, %dr12 + } + { + getsp -64, %empty; + std,2 %dr30, [%dr14 + PT_STACK+ST_USD_HI] + puttagd,5 %dr13, 0, %dr13 + ord,4 %dr23, %dr24, %dr24 + } + { + std,2 %dr6, [%dr14 + PT_ARG_5] + std,5 %dr7, [%dr14 + PT_ARG_6] + ord,4 %dr1, %dr24, %dr1 + } + { + adds,1 8, 0, %r8 + std,2 %dr8, [%dr14 + PT_ARG_7] + std,5 %dr9, [%dr14 + PT_ARG_8] + ord,4 %dr21, %dr22, %dr22 + } + { + std,2 %dr10, [%dr14 + PT_ARG_9] + std,5 %dr11, [%dr14 + PT_ARG_10] + ord,4 %dr1, %dr22, %dr1 + } + { + std,2 %dr12, [%dr14 + PT_ARG_11] + std,5 %dr13, [%dr14 + PT_ARG_12] + } + { + addd,0 %dr14, 0, %dr6 + + stw,2 %r8, [ %dr14 + PT_KERNEL_ENTRY ] + std %dr15, [%dr14 + PT_STACK+ST_TOP] + + ct %ctpr1 + } +.size $ttable_entry8, . - $ttable_entry8 + +.global ttable_entry10_C; +.global ttable_entry10; + +.section .ttable_entry10, "ax",@progbits + .align 8 + .type ttable_entry10,@function +ttable_entry10: + SWITCH_HW_STACKS_FROM_USER() + { + setwd wsz = 11, nfx = 1 + addd 0, _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), %dr16 + addd 0, 1, %dr17 + } + + /* goto guest kernel system call table entry, */ + /* if system call is from guest user */ + /* %dr7: temporary register to read current_thread_info() */ + /* %dr8, %dr9, %dr10: temporary registers */ + /* %pred0, %pred1, %pred2: temporary predicates */ + GOTO_GUEST_KERNEL_PROT_TTABLE 10, GCURTASK, %dr8, %dr9, %dr10, \ + %pred0, %pred1, %pred2 + + /* Read tags of %dr0 - %dr7 and pack them by forths in %dr8. + * Clear any speculative tags in arguments, which can be unused + * by some system calls. */ + { + ipd 2 + disp %ctpr1, ttable_entry10_C + rrd %usd.lo, %dr18 + gettagd,2 %dr1, %r9 + gettagd,5 %dr2, %r10 + } + { + shls,0 %r9, 4, %r9 + gettagd,2 %dr0, %r8 + shls,1 %r10, 8, %r10 + gettagd,5 %dr3, %r11 + } + { + ors,0 %r8, %r9, %r8 + gettagd,2 %dr4, %r12 + shls,3 %r11, 12, %r11 + gettagd,5 %dr5, %r13 + } + { + ors,0 %r8, %r10, %r8 + shls,1 %r12, 16, %r12 + gettagd,2 %dr6, %r14 + shls,3 %r13, 20, %r13 + gettagd,5 %dr7, %r15 + } + { + ors,0 %r8, %r11, %r8 + puttagd,2 %dr0, 0, %dr0 + shls,1 %r15, 28, %r15 + puttagd,5 %dr1, 0, %dr1 + shls,3 %r14, 24, %r14 + } + { + ors,0 %r8, %r12, %r8 + sxt,1 6, %r0, %dr0 + puttagd,2 %dr2, 0, %dr2 + puttagd,5 %dr3, 0, %dr3 + } + { + rrd %usd.hi, %dr19 + + ors,1 %r8, %r13, %r8 + puttagd,2 %dr4, 0, %dr4 + puttagd,5 %dr5, 0, %dr5 + } + { + ors,0 %r8, %r14, %r8 + puttagd,2 %dr6, 0, %dr6 + puttagd,5 %dr7, 0, %dr7 + } + { + ors,0 %r8, %r15, %r8 + /* Wait for FPU exceptions _and_ for CLW work completion */ + wait all_e = 1 + } + { + /* %dr13: current_thread_info */ + rrd %osr0, %dr13 + + /* Disable CLW unit for nonprotected mode */ + std,2 %dr17, 0, [%dr16] MAS_MMU_REG + } + // thread_info = current_thread_info(); + // usd_lo = thread_info->k_usd_lo; + // usd_hi = thread_info->k_usd_hi; + // WRITE_USD_REG(usd_hi, usd_lo); + // WRITE_SBR_REG_VALUE(stack + KERNEL_C_STACK_SIZE + + // KERNEL_C_STACK_OFFSET); + + // Switch to kernel local data stack + { + ldd,0 GCURTASK, TSK_K_USD_HI, %dr9 // %dr9: usd_hi + ldd,2 GCURTASK, TSK_K_USD_LO, %dr10 // %dr10: usd_lo + ldd,3 GCURTASK, TSK_STACK, %dr11 // %dr11: stack + ldd,5 GCURTASK, TSK_U_STACK_TOP, %dr16 // %dr16: u_stack.top + } + { + addd,1 %dr11, KERNEL_C_STACK_SIZE + KERNEL_C_STACK_OFFSET, %dr12 + } + ALTERNATIVE_1_ALTINSTR + /* CPU_HWBUG_USD_ALIGNMENT version */ + { + rwd %dr12, %sbr + nop 1 + } + ALTERNATIVE_2_OLDINSTR + /* Default version */ + { + rwd %dr12, %sbr + } + ALTERNATIVE_3_FEATURE(CPU_HWBUG_USD_ALIGNMENT) + { + rwd %dr10, %usd.lo + } + { + rwd %dr9, %usd.hi + } + { + rrs %upsr, %r20 + } + { + nop 3 + rws E2K_KERNEL_UPSR_ENABLED, %upsr + } + + /* Reserve memory for 'struct pt_regs' and parameters and put in + * there the last argument 'tags' (cannot put it in %dr8 since the + * size of the register window for C functions is only 8 dregs). */ + { + getsp -(6 * 8), %dr9 + SMP_ONLY(shld,1 GCPUID_PREEMPT, 3, GCPUOFFSET) + } + { + stw %r20, GCURTASK, TSK_UPSR + } + { + getsp -(PTRACE_SZOF + 64), %dr7; + std %dr7, [%dr9] + } + { + SMP_ONLY(ldd,2 GCPUOFFSET, __per_cpu_offset, GCPUOFFSET) + stw %r8, [%dr9 + 8] + } + + /* + * Guest under hardware virtualization support - IS_HV_GM() + * should save global registers used by host to support + * (para)virtualization. Saving is unconditional because of + * only such guest can be here. + * %dr13 - pointer to thread info + * %dr10 - temporary registers + */ + SAVE_HOST_GREGS_TO_VIRT_UNEXT %dr13, %dr10 + + { + addd %dr7, 64, %dr7 + std %dr18, [%dr7 + PT_STACK+ST_USD_LO + 64] + std %dr19, [%dr7 + PT_STACK+ST_USD_HI + 64] + } + { + std %dr16, [%dr7 + PT_STACK+ST_TOP] + /* Go to main protected system call handler */ + ct %ctpr1 + } +.size $ttable_entry10, . - $ttable_entry10 +#endif /* CONFIG_PROTECTED_MODE */ + +.global osgd_to_gd +.section ".ttable_entry11", "ax" +.type osgd_to_gd,@function +osgd_to_gd: + { + nop 3 + return %ctpr3 + } + ct %ctpr3 +.size $osgd_to_gd, . - $osgd_to_gd + +.global sighandler_trampoline + +.section ".entry.text", "ax" +.type sighandler_trampoline,@function +sighandler_trampoline: + HANDLER_TRAMPOLINE %ctpr2, 11, sighandler_trampoline_continue, 0 +.size $sighandler_trampoline, . - $sighandler_trampoline + +.global makecontext_trampoline + +.section ".entry.text", "ax" +.type makecontext_trampoline,@function +makecontext_trampoline: + HANDLER_TRAMPOLINE %ctpr2, 11, makecontext_trampoline_continue, 4 +.size $makecontext_trampoline, . - $makecontext_trampoline + +.global makecontext_trampoline_protected + +.section ".entry.text", "ax" +.type makecontext_trampoline_protected,@function +makecontext_trampoline_protected: + HANDLER_TRAMPOLINE %ctpr2, 11, makecontext_trampoline_continue, 8 +.size $makecontext_trampoline_protected, . - $makecontext_trampoline_protected diff --git a/arch/e2k/kernel/traps.c b/arch/e2k/kernel/traps.c new file mode 100644 index 000000000000..d80fa7cefb6a --- /dev/null +++ b/arch/e2k/kernel/traps.c @@ -0,0 +1,1729 @@ +/* + * Copyright (C) 2001 MCST + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_USE_AAU +#include +#endif + +#include + +#ifdef CONFIG_PROTECTED_MODE +#include +#endif + +#ifdef CONFIG_MLT_STORAGE +#include +#endif + +#ifdef CONFIG_KPROBES +#include +#endif + +#include + +#include +#include + +#define DEBUG_TRAP_CELLAR 0 /* DEBUG_TRAP_CELLAR */ +#define DbgTC(...) DebugPrint(DEBUG_TRAP_CELLAR, ##__VA_ARGS__) + +#undef DEBUG_PF_MODE +#undef DebugPF +#define DEBUG_PF_MODE 0 /* Page fault */ +#define DebugPF(...) DebugPrint(DEBUG_PF_MODE, ##__VA_ARGS__) + +#undef DEBUG_US_EXPAND +#undef DebugUS +#define DEBUG_US_EXPAND 0 /* User stacks */ +#define DebugUS(...) DebugPrint(DEBUG_US_EXPAND, ##__VA_ARGS__) + +#undef DEBUG_MEM_LOCK +#undef DebugML +#define DEBUG_MEM_LOCK 0 +#define DebugML(...) DebugPrint(DEBUG_MEM_LOCK, ##__VA_ARGS__) + +/* Forward declarations */ +static void do_illegal_opcode(struct pt_regs *regs); +static void do_priv_action(struct pt_regs *regs); +static void do_fp_disabled(struct pt_regs *regs); +static void do_fp_stack_u(struct pt_regs *regs); +static void do_d_interrupt(struct pt_regs *regs); +static void do_diag_ct_cond(struct pt_regs *regs); +static void do_diag_instr_addr(struct pt_regs *regs); +static void do_illegal_instr_addr(struct pt_regs *regs); +static void do_instr_debug(struct pt_regs *regs); +static void do_window_bounds(struct pt_regs *regs); +static void do_user_stack_bounds(struct pt_regs *regs); +static void do_proc_stack_bounds(struct pt_regs *regs); +static void do_chain_stack_bounds(struct pt_regs *regs); +static void do_fp_stack_o(struct pt_regs *regs); +static void do_diag_cond(struct pt_regs *regs); +static void do_diag_operand(struct pt_regs *regs); +static void do_illegal_operand(struct pt_regs *regs); +static void do_array_bounds(struct pt_regs *regs); +static void do_access_rights(struct pt_regs *regs); +static void do_addr_not_aligned(struct pt_regs *regs); +static void do_instr_page_miss(struct pt_regs *regs); +static void do_instr_page_prot(struct pt_regs *regs); +static void do_ainstr_page_miss(struct pt_regs *regs); +static void do_ainstr_page_prot(struct pt_regs *regs); +static void do_last_wish(struct pt_regs *regs); +static void do_base_not_aligned(struct pt_regs *regs); +static void do_software_trap(struct pt_regs *regs); +static void do_data_debug(struct pt_regs *regs); +static void do_data_page(struct pt_regs *regs); +void do_nm_interrupt(struct pt_regs *regs); +static void do_division(struct pt_regs *regs); +static void do_fp(struct pt_regs *regs); +static void do_mem_lock(struct pt_regs *regs); +static void do_mem_lock_as(struct pt_regs *regs); +static void do_data_error(struct pt_regs *regs); +static void do_mem_error(struct pt_regs *regs); +static void do_unknown_exc(struct pt_regs *regs); +static void do_recovery_point(struct pt_regs *regs); + +/* Exception table. */ +typedef void (*exceptions)(struct pt_regs *regs); +const exceptions exc_tbl[] = { +/*0*/ (exceptions)(do_illegal_opcode), +/*1*/ (exceptions)(do_priv_action), +/*2*/ (exceptions)(do_fp_disabled), +/*3*/ (exceptions)(do_fp_stack_u), +/*4*/ (exceptions)(do_d_interrupt), +/*5*/ (exceptions)(do_diag_ct_cond), +/*6*/ (exceptions)(do_diag_instr_addr), +/*7*/ (exceptions)(do_illegal_instr_addr), +/*8*/ (exceptions)(do_instr_debug), +/*9*/ (exceptions)(do_window_bounds), +/*10*/ (exceptions)(do_user_stack_bounds), +/*11*/ (exceptions)(do_proc_stack_bounds), +/*12*/ (exceptions)(do_chain_stack_bounds), +/*13*/ (exceptions)(do_fp_stack_o), +/*14*/ (exceptions)(do_diag_cond), +/*15*/ (exceptions)(do_diag_operand), +/*16*/ (exceptions)(do_illegal_operand), +/*17*/ (exceptions)(do_array_bounds), +/*18*/ (exceptions)(do_access_rights), +/*19*/ (exceptions)(do_addr_not_aligned), +/*20*/ (exceptions)(do_instr_page_miss), +/*21*/ (exceptions)(do_instr_page_prot), +/*22*/ (exceptions)(do_ainstr_page_miss), +/*23*/ (exceptions)(do_ainstr_page_prot), +/*24*/ (exceptions)(do_last_wish), +/*25*/ (exceptions)(do_base_not_aligned), +/*26*/ (exceptions)(do_software_trap), +/*27*/ (exceptions)(do_unknown_exc), +/*28*/ (exceptions)(do_data_debug), +/*29*/ (exceptions)(do_data_page), +/*30*/ (exceptions)(do_unknown_exc), +/*31*/ (exceptions)(do_recovery_point), +/*32*/ (exceptions)(native_do_interrupt), +/*33*/ (exceptions)(do_nm_interrupt), +/*34*/ (exceptions)(do_division), +/*35*/ (exceptions)(do_fp), +/*36*/ (exceptions)(do_mem_lock), +/*37*/ (exceptions)(do_mem_lock_as), +/*38*/ (exceptions)(do_data_error), +/*39*/ (exceptions)(do_mem_error), +/*40*/ (exceptions)(do_mem_error), +/*41*/ (exceptions)(do_mem_error), +/*42*/ (exceptions)(do_mem_error), +/*43*/ (exceptions)(do_mem_error) +}; + +const char *exc_tbl_name[] = { +/*0*/ "exc_illegal_opcode", +/*1*/ "exc_priv_action", +/*2*/ "exc_fp_disabled", +/*3*/ "exc_fp_stack_u", +/*4*/ "exc_d_interrupt", +/*5*/ "exc_diag_ct_cond", +/*6*/ "exc_diag_instr_addr", +/*7*/ "exc_illegal_instr_addr", +/*8*/ "exc_instr_debug", +/*9*/ "exc_window_bounds", +/*10*/ "exc_user_stack_bounds", +/*11*/ "exc_proc_stack_bounds", +/*12*/ "exc_chain_stack_bounds", +/*13*/ "exc_fp_stack_o", +/*14*/ "exc_diag_cond", +/*15*/ "exc_diag_operand", +/*16*/ "exc_illegal_operand", +/*17*/ "exc_array_bounds", +/*18*/ "exc_access_rights", +/*19*/ "exc_addr_not_aligned", +/*20*/ "exc_instr_page_miss", +/*21*/ "exc_instr_page_prot", +/*22*/ "exc_ainstr_page_miss", +/*23*/ "exc_ainstr_page_prot", +/*24*/ "exc_last_wish", +/*25*/ "exc_base_not_aligned", +/*26*/ "exc_software_trap", +/*27*/ "exc_unknown_exc", +/*28*/ "exc_data_debug", +/*29*/ "exc_data_page", +/*30*/ "exc_unknown_exc", +/*31*/ "exc_recovery_point", +/*32*/ "exc_interrupt", +/*33*/ "exc_nm_interrupt", +/*34*/ "exc_division", +/*35*/ "exc_fp", +/*36*/ "exc_memlock", +/*37*/ "exc_memlock_as", +/*38*/ "exc_data_error", +/*39*/ "exc_mem_error", +/*40*/ "exc_mem_error", +/*41*/ "exc_mem_error", +/*42*/ "exc_mem_error", +/*43*/ "exc_mem_error" +}; + +int debug_signal = false; +bool dump_signal_stack = false; + +static int __init sig_debug_setup(char *str) +{ + debug_signal = true; + return 1; +} +__setup("sigdebug", sig_debug_setup); + +static int __init sig_dump_stack_setup(char *str) +{ + dump_signal_stack = true; + debug_signal = true; + return 1; +} +__setup("sigdumpstack", sig_dump_stack_setup); + +int debug_trap = 0; +static int __init debug_trap_setup(char *str) +{ + debug_trap = 1; + return 1; +} +__setup("trap_regs", debug_trap_setup); + +int sig_on_mem_err = 0; +static int __init sig_on_mem_err_setup(char *str) +{ + sig_on_mem_err = 1; + return 1; +} +__setup("sig_on_mem_err", sig_on_mem_err_setup); + +void __init trap_init(void) +{ +} + +void start_dump_print(void) +{ +#if defined(CONFIG_EARLY_PRINTK) + switch_to_early_dump_console(); +#endif + oops_in_progress = 1; + flush_TLB_all(); + ftrace_dump(DUMP_ALL); + console_verbose(); +} + +DEFINE_RAW_SPINLOCK(print_lock); +#ifdef CONFIG_SMP +static atomic_t one_finished = ATOMIC_INIT(0); +static unsigned char cpu_is_main[NR_CPUS] = { 0 }; +static unsigned char cpu_in_dump[NR_CPUS] = { 0 }; +#define my_cpu_is_main cpu_is_main[raw_smp_processor_id()] +#define my_cpu_in_dump cpu_in_dump[raw_smp_processor_id()] +#else /* ! CONFIG_SMP */ +#define my_cpu_is_main 1 +static unsigned char cpu_in_dump = 0; +#define my_cpu_in_dump cpu_in_dump +#endif + +#ifdef CONFIG_DUMP_ALL_STACKS +static void do_coredump_in_future(void) +{ + unsigned long flags; + int count = 0; + bool locked = true; + + while (!raw_spin_trylock_irqsave(&print_lock, flags)) { + udelay(1000); + if (count++ >= 3000) { + locked = false; + break; + } + } + + dump_stack(); + + if (my_cpu_is_main) { + show_state(); + console_flush_on_panic(CONSOLE_REPLAY_ALL); + } + + if (locked) + raw_spin_unlock_irqrestore(&print_lock, flags); +} + +void coredump_in_future(void) +{ +# ifdef CONFIG_SMP + my_cpu_is_main = (atomic_inc_return(&one_finished) == 1); +# endif + +#if defined(CONFIG_SERIAL_PRINTK) && defined(CONFIG_SMP) + if (my_cpu_in_dump) + vprint_lock = __BOOT_SPIN_LOCK_UNLOCKED; /* unlocked */ + +#endif + + my_cpu_in_dump = 1; + + start_dump_print(); + do_coredump_in_future(); + +# ifdef CONFIG_SMP + atomic_dec(&one_finished); +# endif +} +#endif /* CONFIG_DUMP_ALL_STACKS */ + +static inline u64 +native_TIR0_clear_false_exceptions(u64 TIR_hi, int nr_TIRs) +{ + /* + * Hardware features: + * + * If register TIR0 contains deferred or asynchronous and + * precise traps, then some of precise traps can be false. + * Trap handler should handle only deferred and asynchronous + * traps and return to interrupted command. All precise + * exceptions will be thrown again, but this time there will + * be no false positives from asynchronous/deferred traps. + * + * When number of TIRs is greater than 0 precise traps bits + * are cleared automatically by hardware. + */ + if (nr_TIRs == 0) { + if (TIR_hi & (async_exc_mask | defer_exc_mask)) { + /* + * Precise traps should be masked. + */ + if (TIR_hi & sync_exc_mask) + DbgTC("ignore precise traps in TIR0 0x%llx\n", + TIR_hi); + TIR_hi &= ~sync_exc_mask; + } else { + /* + * Precise traps should not be masked. + * + * But some precise traps can be a consequence + * of the others. + */ + if (TIR_hi & exc_illegal_opcode_mask) + TIR_hi &= ~sync_exc_mask | + exc_illegal_opcode_mask; + else if (TIR_hi & (exc_window_bounds_mask + | exc_fp_stack_u_mask + | exc_fp_stack_o_mask)) + TIR_hi &= ~(exc_diag_operand_mask + | exc_illegal_operand_mask + | exc_array_bounds_mask + | exc_access_rights_mask + | exc_addr_not_aligned_mask + | exc_base_not_aligned_mask); + } + } + + return TIR_hi; +} + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +# define INCREASE_TRAP_NUM(trap_times) \ + do { trap_times->trap_num++; } while (0) +#else +# define INCREASE_TRAP_NUM(trap_times) +#endif + +#define HANDLE_TIR_EXCEPTION(regs, exc_num, func, pass_func, tir_hi, tir_lo) \ +do { \ + unsigned long handled = 0; \ +\ + read_ticks(start_tick); \ +\ + (regs)->trap->nr_trap = exc_num; \ + if (pass_func) \ + handled = pass_func(regs, tir_hi, tir_lo, exc_num); \ + if (!handled) \ + (func)(regs); \ +\ + add_info_interrupt(exc_num, start_tick); \ + INCREASE_TRAP_NUM(trap_times); \ +} while (0) + +static __always_inline void +handle_nm_exceptions(struct pt_regs *regs, e2k_tir_t *TIRs, u64 nmi) +{ + /* + * Handle NMIs from TIR0 + */ + if (nmi & exc_instr_debug_mask) + HANDLE_TIR_EXCEPTION(regs, exc_instr_debug_num, do_instr_debug, + pass_the_trap_to_guest, + TIRs[0].TIR_hi.TIR_hi_reg, TIRs[0].TIR_lo.TIR_lo_reg); + + /* + * Handle NMIs from TIR1 + */ + if (nmi & exc_data_debug_mask) + HANDLE_TIR_EXCEPTION(regs, exc_data_debug_num, do_data_debug, + pass_the_trap_to_guest, + TIRs[0].TIR_hi.TIR_hi_reg, TIRs[0].TIR_lo.TIR_lo_reg); + + /* + * Handle NMIs from the last TIR + */ + if (nmi & exc_nm_interrupt_mask) + HANDLE_TIR_EXCEPTION(regs, exc_nm_interrupt_num, + do_nm_interrupt, + pass_nm_interrupt_to_guest, + TIRs[0].TIR_hi.TIR_hi_reg, TIRs[0].TIR_lo.TIR_lo_reg); + if (nmi & exc_mem_lock_as_mask) + HANDLE_TIR_EXCEPTION(regs, exc_mem_lock_as_num, do_mem_lock_as, + pass_the_trap_to_guest, + TIRs[0].TIR_hi.TIR_hi_reg, TIRs[0].TIR_lo.TIR_lo_reg); +} + +/** + * parse_TIR_registers - call handlers for all arrived exceptions + * @regs: saved context + * @exceptions: mask of all arrived exceptions + * + * Noinline because we update %cr1_lo.psr (so that interrupts are + * enabled in caller). + */ +noinline __irq_entry +notrace void parse_TIR_registers(struct pt_regs *regs, u64 exceptions) +{ + struct trap_pt_regs *trap = regs->trap; + register unsigned long TIR_hi, TIR_lo; + register unsigned long nr_TIRs = trap->nr_TIRs; + register unsigned int nr_intrpt; + e2k_tir_t *TIRs = trap->TIRs; +#ifdef CONFIG_E2K_PROFILING + register unsigned long start_tick; +#endif +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + thread_info_t *thread_info = current_thread_info(); + register trap_times_t *trap_times; + register int count; +#endif + u64 nmi = exceptions & non_maskable_exc_mask; + bool user = user_mode(regs); + /* + * We enable interrupts if this is a user interrupt (required to + * handle AAU) or if this is a page fault on a user address that + * did not happen in an atomic context. + */ + bool enable_irqs = user || nr_TIRs > 0 && + (AW(TIRs[1].TIR_hi) & exc_data_page_mask) && + !in_atomic() && !pagefault_disabled(); +#ifdef CONFIG_DUMP_ALL_STACKS + bool core_dump = unlikely(nr_TIRs == 0 && + AS(TIRs[0].TIR_hi).exc == 0 && + AS(TIRs[0].TIR_hi).aa == 0); +#endif + + /* + * We handle interrupts in the following order: + * 1) Non-maskable interrupts are handled under closed NMIs + * 2) Open non-maskable interrupts + * 3) exc_interrupt + * 4) Open maskable interrupts if this is user mode intertupt + * 5) Handle everything else. + */ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + GET_DECR_KERNEL_TIMES_COUNT(thread_info, count); + trap_times = &(thread_info->times[count].of.trap); + trap_times->nr_TIRs = nr_TIRs; + trap_times->psp_hi = regs->stacks.psp_hi; + trap_times->pcsp_hi = regs->stacks.pcsp_hi; + trap_times->trap_num = 0; +#endif + +#ifdef CONFIG_CLI_CHECK_TIME + check_cli(); +#endif + + AW(TIRs[0].TIR_hi) = TIR0_clear_false_exceptions(AW(TIRs[0].TIR_hi), + nr_TIRs); + + /* + * 1) Handle NMIs + */ + + if (unlikely(nmi)) + handle_nm_exceptions(regs, TIRs, nmi); + + + /* + * 2) All NMIs have been handled, now we can open them. + * Note that we do not allow NMIs nesting to avoid stack overflow. + * + * + * Hardware trap operation disables interrupts mask in PSR + * and PSR becomes main register to control interrupts. + * Switch control from PSR register to UPSR, if UPSR + * interrupts control is used and all following trap handling + * will be executed under UPSR control. + * + * SGE was already disabled by hardware on trap enter. + * + * We disable NMI in UPSR here again in case a local_irq_save() + * called from an NMI handler enabled it. + */ + INIT_KERNEL_UPSR_REG(false, nmi && !enable_irqs && + !(exceptions & exc_interrupt_mask)); + SWITCH_IRQ_TO_UPSR(true); + trace_hardirqs_off(); + + + /* + * 3) Handle external interrupts before enabling interrupts + */ + if (trace_tir_enabled()) { + int i; + + for (i = 0; i <= nr_TIRs; i++) + trace_tir(AW(TIRs[i].TIR_lo), AW(TIRs[i].TIR_hi)); + } + + if (IS_ENABLED(CONFIG_KVM_HOST_MODE) && kvm_test_intc_emul_flag(regs)) { + if (trace_intc_tir_enabled()) { + int i; + + for (i = 0; i <= nr_TIRs; i++) + trace_intc_tir(AW(TIRs[i].TIR_lo), AW(TIRs[i].TIR_hi)); + } + + if (trace_intc_trap_cellar_enabled()) { + int cnt; + + for (cnt = 0; (3 * cnt) < trap->tc_count; cnt++) + trace_intc_trap_cellar(&trap->tcellar[cnt], cnt); + } + + if (trace_intc_ctprs_enabled()) { + trace_intc_ctprs(AW(regs->ctpr1), AW(regs->ctpr1_hi), + AW(regs->ctpr2), AW(regs->ctpr2_hi), + AW(regs->ctpr3), AW(regs->ctpr3_hi)); + } + +#ifdef CONFIG_USE_AAU + if (trace_intc_aau_enabled()) { + e2k_aau_t *aau_context = regs->aau_context; + + if (AW(aau_context->guest_aasr)) + trace_intc_aau(aau_context, regs->lsr, regs->lsr1, + regs->ilcr, regs->ilcr1); + } +#endif + } + + if (exceptions & exc_interrupt_mask) + HANDLE_TIR_EXCEPTION(regs, exc_interrupt_num, + handle_interrupt, + pass_interrupt_to_guest, + TIRs[0].TIR_hi.TIR_hi_reg, TIRs[0].TIR_lo.TIR_lo_reg); + + pass_virqs_to_guest(regs, TIRs[0].TIR_hi.TIR_hi_reg, + TIRs[0].TIR_lo.TIR_lo_reg); + + + /* + * 4) Open interrupts if possible + * + * + * There are several reasons to not enable interrupts in kernel: + * + * - Linux does not support NMIs nesting, so do not enable + * interrupts when handling them. Otherwise we can have + * spurious APIC interrupts. + * + * - Besides NMIs there are other non-maskable exceptions: + * exc_instr_debug, exc_data_debug, exc_mem_lock_as. So + * opening non-maskable interrupts can gretly increase + * stack usage. + * + * - Opening interrupts in kernel mode increases the maximum + * stack usage. This is also true for non-maskable interrupts + * (we can have 4 nested interrupts from monitoring registers + * only). + * + * - We do not want to enable interrupts when get_user() was + * called from a critical section with disabled interrupts. + */ + if (enable_irqs) + local_irq_enable(); + +#ifdef CONFIG_USE_AAU + if (user) { + int aa_field; + + /* + * For SDBGPRINT from do_aau_fault_*() -> do_page_fault() + * and for handle_forbidden_aau_load(). + */ + TIR_lo = AW(TIRs[0].TIR_lo); + TIR_hi = AW(TIRs[0].TIR_hi); + trap->TIR_lo = TIR_lo; + trap->TIR_hi = TIR_hi; + + /* + * AAU fault must be handled with open interrupts + */ + aa_field = GET_AA_TIRS(TIR_hi); + if (aa_field) { + unsigned long handled; + + /* check is trap occured on guest and */ + /* should be passed to guest kernel */ + handled = pass_aau_trap_to_guest(regs, + TIR_hi, TIR_lo); + if (!handled) + machine.do_aau_fault(aa_field, regs); + } + } +#endif + + + /* + * 5) Handle all other exceptions + */ + +#pragma loop count (2) + do { + TIR_hi = AW(TIRs[nr_TIRs].TIR_hi); + TIR_lo = AW(TIRs[nr_TIRs].TIR_lo); + + trap->TIR_hi = TIR_hi; + trap->TIR_lo = TIR_lo; + trap->TIR_no = nr_TIRs; + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + trap_times->TIRs[nr_TIRs].TIR_hi.TIR_hi_reg = TIR_hi; + trap_times->TIRs[nr_TIRs].TIR_lo.TIR_lo_reg = TIR_lo; + if (nr_TIRs == 0) { + trap_times->pcs_bounds = + !!(TIR_hi & exc_chain_stack_bounds_mask); + trap_times->ps_bounds = + !!(TIR_hi & exc_proc_stack_bounds_mask); + } +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + /* + * Define number of interrupt (nr_intrpt) and run needed handler + * (*exc_tbl[nr_intrpt])(regs); + */ + TIR_hi &= exc_all_mask; +#pragma loop count (1) + for (nr_intrpt = __ffs64(TIR_hi); TIR_hi != 0; + TIR_hi &= ~(1UL << nr_intrpt), + nr_intrpt = __ffs64(TIR_hi)) { + BUG_ON(nr_intrpt >= sizeof(exc_tbl)/sizeof(exc_tbl[0])); + + if ((1UL << nr_intrpt) & (non_maskable_exc_mask | + exc_interrupt_mask)) + continue; + + HANDLE_TIR_EXCEPTION(regs, nr_intrpt, + *exc_tbl[nr_intrpt], + pass_the_trap_to_guest, + TIRs[0].TIR_hi.TIR_hi_reg, + TIRs[0].TIR_lo.TIR_lo_reg); + } + } while (nr_TIRs-- > 0); + +#ifdef CONFIG_DUMP_ALL_STACKS + if (unlikely(core_dump)) + pass_coredump_trap_to_guest(regs); +#endif /* CONFIG_DUMP_ALL_STACKS */ + + /* now it needs handle all traps passed to guest by guest kernel */ + handle_guest_traps(regs); + +#ifdef CONFIG_DUMP_ALL_STACKS + if (unlikely(core_dump)) + coredump_in_future(); +#endif /* CONFIG_DUMP_ALL_STACKS */ +} + +DEFINE_RAW_SPINLOCK(die_lock); + +static inline int __die(const char *str, struct pt_regs *regs, long err) +{ + int ret; + + pr_alert("die %s: %lx\n", str, err); + + show_regs(regs); + + ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); + if (ret == NOTIFY_STOP) + return ret; + + return 0; +} + +static __cold void die(const char *str, struct pt_regs *regs, long err) +{ + int ret; + + oops_enter(); + raw_spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + + ret = __die(str, regs, err); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + raw_spin_unlock_irq(&die_lock); + oops_exit(); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + if (ret != NOTIFY_STOP) + do_exit(SIGSEGV); +} + +static inline void die_if_kernel(const char *str, struct pt_regs *regs, + long err) +{ + /* + * Check SBR. This check can be wrong only in one case: when + * we get an exc_array_bounds upon entering system call, but + * it is OK. This way fast system calls code is also detected + * as user mode (as it should). + */ + if (!user_mode(regs)) + die(str, regs, err); +} + +static inline void die_if_init(const char * str, struct pt_regs * regs, + long err) +{ + struct task_struct *tsk = current; + + if (tsk->pid == 1) + die(str, regs, err); +} + +/* + * The architecture-independent dump_stack generator + */ +void dump_stack(void) +{ + console_verbose(); + bust_spinlocks(1); + print_stack_frames(current, NULL, 1); + bust_spinlocks(0); +} +EXPORT_SYMBOL(dump_stack); + +static void do_illegal_opcode(struct pt_regs *regs) +{ +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + thread_info_t *thread_info = current_thread_info(); + + sys_e2k_print_kernel_times(current, thread_info->times, + thread_info->times_num, thread_info->times_index); +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + if (!user_mode(regs)) { + u32 *ip; + + if (is_kprobe_break1_trap(regs)) { + notify_die(DIE_BREAKPOINT, "break", regs, 0, + exc_illegal_opcode_num, SIGTRAP); + return; + } + + ip = (u32 *) AS(regs->trap->TIRs[0].TIR_lo).base; + pr_alert("*0x%llx = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + (u64) ip, ip[0], ip[1], ip[2], ip[3], + ip[4], ip[5], ip[6], ip[7]); + die("illegal_opcode trap in kernel mode", regs, 0); + } else { + die_if_init("illegal_opcode trap in init process", regs, SIGILL); + + if (is_gdb_breakpoint_trap(regs)) { + S_SIG(regs, SIGTRAP, exc_illegal_opcode_num, TRAP_BRKPT); + } else { + S_SIG(regs, SIGILL, exc_illegal_opcode_num, ILL_ILLOPC); + SDBGPRINT_WITH_STACK("SIGILL. illegal_opcode"); + } + } +} + +static void do_priv_action(struct pt_regs *regs) +{ + die_if_kernel("priv_action trap in kernel mode", regs, 0); + S_SIG(regs, SIGILL, exc_priv_action_num, ILL_PRVOPC); + SDBGPRINT_WITH_STACK("SIGILL. priv_action"); + +} + +static void do_fp_disabled(struct pt_regs *regs) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) + panic("fp_disabled trap was removed in iset v6\n"); + + die_if_kernel("fp_disabled trap in kernel mode", regs, 0); + S_SIG(regs, SIGILL, exc_fp_disabled_num, ILL_COPROC); + SDBGPRINT_WITH_STACK("SIGILL. fp_disabled"); +} + +static void do_fp_stack_u(struct pt_regs *regs) +{ + die_if_kernel("fp_stack_u trap in kernel mode", regs, 0); + S_SIG(regs, SIGFPE, exc_fp_stack_u_num, FPE_FLTINV); + SDBGPRINT("SIGFPE. fp_stack_u"); +} + +static void *syscall_entry_begin = _t_entry + 0x800; +static void *syscall_entry_end = _t_entry_end; +static void do_d_interrupt(struct pt_regs *regs) +{ + die_if_kernel("d_interrupt trap in kernel mode", regs, 0); + + S_SIG(regs, SIGBUS, exc_d_interrupt_num, BUS_OBJERR); + SDBGPRINT("SIGBUS. d_interrupt"); + + if (TASK_IS_BINCO(current)) { + /* + * UPSR.di == 1, VFDI called, and what must we do here? + * + * There are two cases: + * 1) if VFDI's long instructions does not have a system call + * then we just send a SIGSEGV. + * 2) if VFDI's long instructions contains a system call then + * we will set a special flag forbidding signal handling in + * interrupts so that it will be handled only after that + * system call. + * + * For details refer to bug #56664. + */ + void *ip = (void *) (AS(regs->crs.cr0_hi).ip << 3); + if (ip >= syscall_entry_begin && ip < syscall_entry_end) + set_delayed_signal_handling(current_thread_info()); + } +} + +static void do_diag_ct_cond(struct pt_regs *regs) +{ + die_if_kernel("diag_ct_cond trap in kernel mode", regs, 0); + + S_SIG(regs, SIGILL, exc_diag_ct_cond_num, ILL_ILLOPN); + SDBGPRINT_WITH_STACK("SIGILL. diag_ct_cond"); +} + +static void do_diag_instr_addr(struct pt_regs *regs) +{ + die_if_kernel("diag_instr_addr trap in kernel mode", regs, 0); + + S_SIG(regs, SIGILL, exc_diag_instr_addr_num, ILL_ILLADR); + SDBGPRINT_WITH_STACK("SIGILL. diag_instr_addr"); +} + +static void do_illegal_instr_addr(struct pt_regs *regs) +{ + die_if_kernel("illegal_instr_addr trap in kernel mode", regs, 0); + + if (cpu_has(CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR)) { + SDBGPRINT("Not sending SIGILL: illegal_instr_addr ignored"); + } else { + S_SIG(regs, SIGILL, exc_illegal_instr_addr_num, SEGV_MAPERR); + SDBGPRINT_WITH_STACK("SIGILL. illegal_instr_addr"); + } +} + +static notrace void do_instr_debug(struct pt_regs *regs) +{ + e2k_dibsr_t dibsr; + e2k_dimcr_t dimcr; + + nmi_enter(); + + dimcr = dimcr_pause(); + + /* Make sure gdb sees the new value */ + current->thread.sw_regs.dibsr = READ_DIBSR_REG(); + + /* Call registered handlers */ + if (!user_mode(regs)) + kprobe_instr_debug_handle(regs); + bp_instr_overflow_handle(regs); + perf_instr_overflow_handle(regs); + + /* Send SIGTRAP if this was from ptrace */ + dibsr = READ_DIBSR_REG(); + if (dibsr.m0 || dibsr.m1 || dibsr.ss || dibsr.b0 || + dibsr.b1 || dibsr.b2 || dibsr.b3) { + /* ptrace works in user space only */ + if ((current->flags & PF_KTHREAD) || !user_mode(regs)) + die("instr_debug trap in kernel mode", regs, 0); + S_SIG(regs, SIGTRAP, exc_instr_debug_num, TRAP_HWBKPT); + /* #24785 Customer asks us to avoid this annoying message + SDBGPRINT("SIGTRAP. Stop on breakpoint"); */ + + dibsr.m0 = 0; + dibsr.m1 = 0; + dibsr.b0 = 0; + dibsr.b1 = 0; + dibsr.b2 = 0; + dibsr.b3 = 0; + dibsr.ss = 0; + WRITE_DIBSR_REG(dibsr); + } + + dimcr_continue(dimcr); + + nmi_exit(); +} + +static void do_window_bounds(struct pt_regs *regs) +{ + die_if_kernel("window_bounds trap in kernel mode", regs, 0); + S_SIG(regs, SIGSEGV, exc_window_bounds_num, SEGV_BOUNDS); + SDBGPRINT_WITH_STACK("SIGSEGV. window_bounds"); +} + +static void do_user_stack_bounds(struct pt_regs *regs) +{ + die_if_kernel("user_stack_bounds trap in kernel mode", regs, 0); + S_SIG(regs, SIGSEGV, exc_user_stack_bounds_num, SEGV_BOUNDS); + SDBGPRINT_WITH_STACK("SIGSEGV. user_stack_bounds"); +} + +static void do_proc_stack_bounds(struct pt_regs *regs) +{ + die_if_kernel("proc_stack_bounds trap in kernel mode", regs, 0); + + if (handle_proc_stack_bounds(®s->stacks, regs->trap)) { + SDBGPRINT_WITH_STACK("SIGSEGV. Could not expand procedure stack"); + force_sig(SIGSEGV); + return; + } +} + +static void do_chain_stack_bounds(struct pt_regs *regs) +{ + die_if_kernel("chain_stack_bounds trap in kernel mode", regs, 0); + + if (handle_chain_stack_bounds(®s->stacks, regs->trap)) { + SDBGPRINT_WITH_STACK("SIGSEGV. Could not expand chain stack"); + force_sig(SIGSEGV); + return; + } +} + +static void do_fp_stack_o(struct pt_regs *regs) +{ + die_if_kernel("fp_stack_o trap in kernel mode", regs, 0); + S_SIG(regs, SIGFPE, exc_fp_stack_o_num, FPE_FLTINV); + SDBGPRINT("SIGFPE. fp_stack_o"); +} + +static void do_diag_cond(struct pt_regs *regs) +{ + die_if_kernel("diag_cond trap in kernel mode", regs, 0); + + S_SIG(regs, SIGILL, exc_diag_cond_num, ILL_ILLOPN); + SDBGPRINT_WITH_STACK("SIGILL. diag_cond"); +} + +static void do_diag_operand(struct pt_regs *regs) +{ + /* Some history... */ + regs->trap->TIR_lo &= 0x0000ffffffffffff; + + DbgTC("start\n"); + DbgTC("regs->cr0: IP 0x%llx\n", GET_IP_CR0_HI(regs->crs.cr0_hi)); + die_if_kernel("diag_operand trap in kernel mode", regs, 0); + die_if_init("diag_operand trap in init process", regs, 0); + + S_SIG(regs, SIGILL, exc_diag_operand_num, ILL_ILLOPN); + SDBGPRINT_WITH_STACK("SIGILL. diag_operand"); + + DbgTC("finish"); +} + +static void do_illegal_operand(struct pt_regs *regs) +{ + die_if_kernel("illegal_operand trap in kernel mode", regs, 0); + die_if_init("illegal_operand trap in init process", regs, 0); + + S_SIG(regs, SIGILL, exc_illegal_operand_num, ILL_ILLOPN); + SDBGPRINT_WITH_STACK("SIGILL. illegal_operand"); +} + +static void force_sigsegv_array_bounds(struct pt_regs *user_regs) +{ + void __user *addr; + + /* #100842 Pass address of the first byte below the stack */ + addr = (void __user *) (user_stack_pointer(user_regs) - + AS(user_regs->stacks.usd_hi).size - 1); + + force_sig_fault(SIGSEGV, SEGV_ACCERR, addr, exc_array_bounds_num); +} + + +static void do_array_bounds(struct pt_regs *regs) +{ + void __user *fault_addr; + int incr; + + die_if_kernel("array_bounds trap in kernel mode\n", regs, 0); + + switch (parse_getsp_operation(regs, &incr, &fault_addr)) { + case GETSP_OP_INCREMENT: + if (expand_user_data_stack(regs, (unsigned int) incr)) { + force_sigsegv_array_bounds(regs); + SDBGPRINT_WITH_STACK("SIGSEGV. expand on array_bounds"); + } + break; + case GETSP_OP_DECREMENT: + if (constrict_user_data_stack(regs, incr)) { + force_sig(SIGSEGV); + SDBGPRINT_WITH_STACK("SIGSEGV. constrict on array_bounds"); + } + break; + case GETSP_OP_SIGSEGV: + force_sig_fault(SIGSEGV, SEGV_BOUNDS, fault_addr, exc_array_bounds_num); + SDBGPRINT_WITH_STACK("SIGSEGV. array_bounds - could not read getsp instruction"); + break; + case GETSP_OP_FAIL: + S_SIG(regs, SIGSEGV, exc_array_bounds_num, SEGV_BOUNDS); + SDBGPRINT_WITH_STACK("SIGSEGV. array_bounds on not a getsp instruction"); + break; + default: + BUG(); + } +} + +static void do_access_rights(struct pt_regs *regs) +{ + die_if_kernel("access_rights trap in kernel mode", regs, 0); + + S_SIG(regs, SIGSEGV, exc_access_rights_num, SEGV_ACCERR); + SDBGPRINT_WITH_STACK("SIGSEGV. access_rights"); +} + +static void do_addr_not_aligned(struct pt_regs *regs) +{ + if (kernel_mode(regs)) { + e2k_upsr_t upsr = NATIVE_NV_READ_UPSR_REG(); + + pr_err("TRAP addr not aligned, UPSR.ac is %d\n", + upsr.UPSR_ac); + if (upsr.UPSR_ac) { + upsr.UPSR_ac = 0; + NATIVE_WRITE_UPSR_REG(upsr); + } else { + /* goto infinite loop to avoid recursion */ + do { + mb(); /* to do not delete loop */ + /* by compiler */ + E2K_LMS_HALT_OK; + } while (true); + } + } + + die_if_kernel("addr_not_aligned trap in kernel mode", regs, 0); + + S_SIG(regs, SIGBUS, exc_addr_not_aligned_num, BUS_ADRALN); + SDBGPRINT_WITH_STACK("SIGBUS. addr_not_aligned"); +} + +static inline void +native_do_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + struct trap_pt_regs *trap = regs->trap; + e2k_addr_t address; + tc_cond_t condition; + tc_mask_t mask; + int ret; + + if (async_instr) { + trap->nr_page_fault_exc = (AS(ftype).page_miss) ? + exc_ainstr_page_miss_num : + exc_ainstr_page_prot_num; + } else { + trap->nr_page_fault_exc = (AS(ftype).page_miss) ? + exc_instr_page_miss_num : + exc_instr_page_prot_num; + } + + if (!async_instr) { + e2k_tir_lo_t tir_lo; + tir_lo.TIR_lo_reg = trap->TIR_lo; + address = tir_lo.TIR_lo_ip; + } else { + address = AS_STRUCT(regs->ctpr2).ta_base; + } + AW(condition) = 0; + AS(condition).store = 0; + AS(condition).spec = 0; + AS(condition).fmt = LDST_DWORD_FMT; + AS(condition).fmtc = 0; + AS(condition).fault_type = AW(ftype); + AW(mask) = 0; + ret = do_page_fault(regs, address, condition, mask, 1); + if (ret == PFR_SIGPENDING) + return; + + if (!async_instr && ((address & PAGE_MASK) != + ((address + E2K_INSTR_MAX_SIZE - 1) & PAGE_MASK))) { + instr_hs_t hs; + instr_syl_t *user_hsp; + int instr_size; + + user_hsp = &E2K_GET_INSTR_HS(address); + while (unlikely(__get_user(AS_WORD(hs), user_hsp))) + do_page_fault(regs, (e2k_addr_t) user_hsp, + condition, mask, 1); + instr_size = E2K_GET_INSTR_SIZE(hs); + if ((address & PAGE_MASK) != ((address + instr_size - 1) & + PAGE_MASK)) { + address = PAGE_ALIGN_UP(address + instr_size); + DebugPF("instruction on pages " + "bounds: will start handle_mm_fault()" + "for next page 0x%lx\n", address); + (void) do_page_fault(regs, address, condition, mask, 1); + } + } + + if (async_instr) { + /* For asynchronous programs ctpr2 points to the beginning + * of the program, and we have have to determine its length + * by ourselves. So we walk asynchronous program until: + * (1) we find 'branch' instruction; + * (2) we walk the maximum asynchronous program's length; + * (3) we stumble at the end of the page ctpr2 points to. + * + * If (3) is true then we must load the next page. */ + e2k_fapb_instr_t *fapb_addr; + int page_boundary_crossed; + + /* + * Some trickery here. + * + * Every instruction takes E2K_ASYNC_INSTR_SIZE (16 bytes). + * But instructions are only 8-bytes aligned, so they can + * cross pages boundary. 'ct' bit which we are looking for + * is located in the first half of an asynchronous instruction. + * + * So we have to sub (E2K_ASYNC_INSTR_SIZE / 2) to make sure + * that even if the instruction with branch crosses page + * boundary, we will still check its first half (since it + * has already been faulted in). + */ + if (PAGE_ALIGN_UP(address) == PAGE_ALIGN_UP(address - 1 + + MAX_ASYNC_PROGRAM_INSTRUCTIONS + * E2K_ASYNC_INSTR_SIZE)) { + /* Even the biggest asynchronous program will + * fit in this page, no need to do anything */ + page_boundary_crossed = 0; + } else { + int ct_found = 0; + for (fapb_addr = (e2k_fapb_instr_t *) address; + (unsigned long) fapb_addr < + PAGE_ALIGN_UP(address - 1 + + MAX_ASYNC_PROGRAM_INSTRUCTIONS + * E2K_ASYNC_INSTR_SIZE); + fapb_addr += 2) { + e2k_fapb_instr_t fapb; + + while (unlikely(__get_user(AW(fapb), + fapb_addr))) + do_page_fault(regs, + (e2k_addr_t) fapb_addr, + condition, mask, 1); + if (AS(fapb).ct) { + ct_found = 1; + break; + } + } + + if (ct_found) { + /* Special case: even if we have found branch, + * the instruction with branch can itself cross + * pages boundary. */ + if (unlikely(PAGE_ALIGN_UP(fapb_addr) != + PAGE_ALIGN_UP(((u64) fapb_addr) + + E2K_ASYNC_INSTR_SIZE - 1))) + page_boundary_crossed = 1; + else + page_boundary_crossed = 0; + } else { + page_boundary_crossed = 1; + } + } + + if (page_boundary_crossed) { + address = PAGE_ALIGN_UP(address + PAGE_SIZE); + DebugPF("asynchronous instruction on " + "pages bounds: will start handle_mm_fault() " + "for next page 0x%lx\n", address); + (void) do_page_fault(regs, address, condition, mask, 1); + } + } +} +void native_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + native_do_instr_page_fault(regs, ftype, async_instr); +} + +static void do_instr_page_miss(struct pt_regs *regs) +{ + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).page_miss = 1; + instr_page_fault(regs, ftype, 0); +} + +static void do_instr_page_prot(struct pt_regs *regs) +{ + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).illegal_page = 1; + instr_page_fault(regs, ftype, 0); +} + +static void do_ainstr_page_miss(struct pt_regs *regs) +{ + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).page_miss = 1; + instr_page_fault(regs, ftype, 1); +} + +static void do_ainstr_page_prot(struct pt_regs *regs) +{ + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).illegal_page = 1; + instr_page_fault(regs, ftype, 1); +} + +static void do_last_wish(struct pt_regs *regs) +{ + struct thread_info *ti = current_thread_info(); + + if (user_mode(regs)) { + getsp_adj_apply(regs); + } else if (handle_guest_last_wish(regs)) { + /* it is wish of host to support guest and it handled */ + return; + } else { + if (!kretprobe_last_wish_handle(regs)) + die("last_wish in kernel mode", regs, 0); + } + + if (ti->last_wish && user_mode(regs)) { + ti->last_wish = false; + return; + } + +#ifdef CONFIG_PROTECTED_MODE + /* + * "Last wish" exception can be induced either by debugger or + * "SP -> global" handling mechanism. + * I will leave some space here for alternative code then execute + * the processor of the "global_sp" list. + */ + lw_global_sp(regs); +#endif /* CONFIG_PROTECTED_MODE */ +} + +static void do_base_not_aligned(struct pt_regs *regs) +{ + die_if_kernel("base_not_aligned in kernel mode", regs, 0); + + S_SIG(regs, SIGBUS, exc_base_not_aligned_num, BUS_ADRALN); + SDBGPRINT_WITH_STACK("SIGBUS. Address base is not aligned"); +} + +int is_valid_bugaddr(unsigned long addr) +{ + return true; +} + +static void do_software_trap(struct pt_regs *regs) +{ + if (user_mode(regs)) { + S_SIG(regs, SIGTRAP, exc_software_trap_num, TRAP_BRKPT); + SDBGPRINT("SIGTRAP. Software trap"); + } else { + struct trap_pt_regs *trap = regs->trap; + enum bug_trap_type btt; + + btt = report_bug(trap->TIRs[0].TIR_lo.TIR_lo_ip, regs); + if (btt == BUG_TRAP_TYPE_WARN) { + unsigned long ip = regs->crs.cr0_hi.CR0_hi_IP; + unsigned long new_ip; + instr_cs1_t *cs1; + + cs1 = find_cs1((void *) ip); + if (cs1 && cs1->opc == CS1_OPC_SETEI && cs1->sft) { + new_ip = ip + get_instr_size_by_vaddr(ip); + correct_trap_return_ip(regs, new_ip); + } + + return; + } + + if (btt == BUG_TRAP_TYPE_BUG) + panic("Oops - BUG"); + + die("software_trap in kernel mode", regs, 0); + } +} + +static notrace void do_data_debug(struct pt_regs *regs) +{ + e2k_ddbsr_t ddbsr; + e2k_ddmcr_t ddmcr; + + nmi_enter(); + + ddmcr = ddmcr_pause(); + + /* Make sure gdb sees the new value */ + current->thread.sw_regs.ddbsr = READ_DDBSR_REG(); + + /* Call registered handlers */ + bp_data_overflow_handle(regs); + perf_data_overflow_handle(regs); + + ddbsr = READ_DDBSR_REG(); + if (ddbsr.m0 || ddbsr.m1 || ddbsr.b0 || ddbsr.b1 || ddbsr.b2 || ddbsr.b3) { + if (DATA_BREAKPOINT_ON) { + /* data breakpoint occured */ + dump_stack(); + goto out; + } + + /* ptrace works in user space only */ + if ((current->flags & PF_KTHREAD) || !user_mode(regs)) { + struct pt_regs *pregs = regs->next; + const struct exception_table_entry *fixup; + bool from_execute_mmu_op; + + /* get_user/put_user case: */ + fixup = search_exception_tables( + regs->trap->TIRs[1].TIR_lo.TIR_lo_ip); + from_execute_mmu_op = (pregs && pregs->flags.exec_mmu_op); + + if (!current_thread_info()->usr_pfault_jump && + !fixup && !from_execute_mmu_op) + die("data_debug trap in kernel mode", regs, 0); + } + S_SIG(regs, SIGTRAP, exc_data_debug_num, TRAP_HWBKPT); + /* #24785 Customer asks us to avoid this annoying message + SDBGPRINT("SIGTRAP. Stop on watchpoint"); */ + + ddbsr.m0 = 0; + ddbsr.m1 = 0; + ddbsr.b0 = 0; + ddbsr.b1 = 0; + ddbsr.b2 = 0; + ddbsr.b3 = 0; + WRITE_DDBSR_REG(ddbsr); + } + +out: + ddmcr_continue(ddmcr); + + nmi_exit(); +} + +static void do_data_page(struct pt_regs *regs) +{ + struct trap_pt_regs *trap = regs->trap; + + DbgTC("call do_trap_cellar\n"); + if (!trap->tc_called) { + trap->nr_page_fault_exc = exc_data_page_num; + do_trap_cellar(regs, 1); + do_trap_cellar(regs, 0); + trap->tc_called = 1; + } + DbgTC("after do_trap_cellar\n"); + DbgTC("user_mode(regs) %d signal_pending(current) %d\n", + user_mode(regs), signal_pending(current)); +} + +static void do_recovery_point(struct pt_regs *regs) +{ + unsigned long ip = get_trap_ip(regs); + + /* only for e2s/e8c/e1c+ and next */ + if (machine.native_iset_ver < E2K_ISET_V3) + return do_unknown_exc(regs); + + if (!user_mode(regs)) { + /* We do not warn about ".entry.text" section because + * there are places in it where it is legal to receive + * exc_recovery_point: between kernel entry (syscall entry, + * signal and makecontext trampolines) and up to "crp" + * instruction (including it). False exc_recovery_point + * exceptions can be generated by hardware when loading + * instructions into L1$ (of course only when the + * "generations mode" is active). */ + if (ip < (unsigned long) __entry_handlers_start || + ip >= (unsigned long) __entry_handlers_end) { + /* Should not happen, error in binco. */ + pr_info("%d [%s]: ERROR: exc_recovery_point received in kernel mode\n", + current->pid, current->comm); + } + return; + } + if (!(TASK_IS_BINCO(current) && cpu_has(CPU_FEAT_ISET_V6))) { + S_SIG(regs, SIGBUS, exc_recovery_point_num, BUS_OBJERR); + SDBGPRINT("SIGBUS. exc_recovery_point"); + } +} + +static notrace void __cpuidle return_from_cpuidle(void) { } + +void __cpuidle handle_wtrap(struct pt_regs *regs) +{ + e2k_cr0_hi_t cr0_hi = regs->crs.cr0_hi; + + if (is_from_C3_wait_trap(regs)) { + /* Instruction prefetch is disabled, re-enable it. */ + u64 mmu_cr = READ_MMU_CR() | _MMU_CR_IPD_MASK; + WRITE_MMU_CR(__mmu_reg(mmu_cr)); + + /* NMIs from local exceptions are disabled, re-enable them. */ + WRITE_DDBCR_REG(current->thread.C3.ddbcr); + WRITE_DIBCR_REG(current->thread.C3.dibcr); + WRITE_DDMCR_REG(current->thread.C3.ddmcr); + WRITE_DIMCR_REG(current->thread.C3.dimcr); + } + + AS(cr0_hi).ip = (unsigned long) return_from_cpuidle >> 3; + regs->crs.cr0_hi = cr0_hi; +} + +irqreturn_t native_do_interrupt(struct pt_regs *regs) +{ + int vector = machine.get_irq_vector(); + + if (WARN_ONCE(vector == -1, "empty interrupt vector was received\n")) + return IRQ_NONE; + + /* + * Another CPU has written some data before sending this IPI, + * wait for that data to arrive. + */ + NATIVE_HWBUG_AFTER_LD_ACQ(); + + if (unlikely(is_from_wait_trap(regs))) + handle_wtrap(regs); + +#ifdef CONFIG_MCST + if (unlikely(show_woken_time) > 1) { + per_cpu(prev_intr_clock, smp_processor_id()) = + __this_cpu_read(last_intr_clock); + per_cpu(last_intr_clock, smp_processor_id()) = + getns64timeofday(); + } +#endif + + /* + * We store the interrupt vector to detect cases when this irq is moved + * to another vector. So when the new vector starts arriving, special + * function irq_complete_move() will detect that the arrived vector + * is for the irq that is being migrated and will send the cleanup + * vector to all other cpus from the old configuration of the IRQ. + * + * Stored vector number is compared with expected vector for this IRQ: + * if they are the same (i.e. the actual move was done) and + * move_in_progress == 1 (i.e. old configuration structures has not been + * freed yet), a cleanup IPI is send. + */ + regs->interrupt_vector = vector; + + if (*interrupt[vector]) { + (*interrupt[vector])(regs); + } else { + do_IRQ(regs, vector); + } + return IRQ_HANDLED; +} + +noinline notrace void do_nm_interrupt(struct pt_regs *regs) +{ + nmi_enter(); + do_nmi(regs); + nmi_exit(); +} + +static void do_division(struct pt_regs *regs) +{ + die_if_kernel("division trap in kernel mode", regs, 0); + + S_SIG(regs, SIGFPE, exc_div_num, FPE_INTDIV); + SDBGPRINT("SIGFPE. Division by zero or overflow"); +} + +/* + * IP for fp exection lay in TIRs + */ +static long get_fp_ip(struct trap_pt_regs *trap) +{ + e2k_tir_t *TIRs = trap->TIRs; + e2k_tir_hi_t tir_hi; + e2k_tir_lo_t tir_lo; + int nr_TIRs = trap->nr_TIRs; + int i; + + for (i = nr_TIRs; i >= 0; i --) { + tir_hi = TIRs[i].TIR_hi; + /* do_fp exection - 35 BIT */ + if (!(tir_hi.TIR_hi_exc & (1L<<35))) { + continue; + } + tir_lo = TIRs[i].TIR_lo; + return tir_lo.TIR_lo_ip; + } + printk(" get_fp_ip not find IP\n"); + print_all_TIRs(trap->TIRs, trap->nr_TIRs); + return 0; +} + +static void do_fp(struct pt_regs *regs) +{ + void __user *addr = (void __user *) get_fp_ip(regs->trap); + int code = 0; + unsigned int FPSR; + unsigned int PFPFR; + + die_if_kernel("fp trap in kernel mode", regs, 0); + + FPSR = NATIVE_NV_READ_FPSR_REG_VALUE(); + PFPFR = NATIVE_NV_READ_PFPFR_REG_VALUE(); + + if( FPSR & fp_es ) { + if (FPSR & fp_pe) + code = FPE_FLTRES; + else if (FPSR & fp_ue) + code = FPE_FLTUND; + else if (FPSR & fp_oe) + code = FPE_FLTOVF; + else if (FPSR & fp_ze) + code = FPE_FLTDIV; + else if (FPSR & fp_de) + code = FPE_FLTUND; + else if (FPSR & fp_ie) + code = FPE_FLTINV; + } else { + if (PFPFR & fp_pe) + code = FPE_FLTRES; + else if (PFPFR & fp_de) + code = FPE_FLTUND; + else if (PFPFR & fp_oe) + code = FPE_FLTOVF; + else if (PFPFR & fp_ie) + code = FPE_FLTINV; + else if (PFPFR & fp_ze) + code = FPE_FLTDIV; + else if (PFPFR & fp_ue) + code = FPE_FLTUND; + } + + force_sig_fault(SIGFPE, code, addr, 0); + SDBGPRINT("SIGFPE. Floating point error"); +} + +static void do_mem_lock(struct pt_regs *regs) +{ + if (TASK_IS_BINCO(current)) { + struct trap_pt_regs *trap = regs->trap; + + DebugML("started\n"); + DbgTC("call do_trap_cellar\n"); + if (!trap->tc_called) { + trap->nr_page_fault_exc = exc_mem_lock_num; + do_trap_cellar(regs, 1); + do_trap_cellar(regs, 0); + trap->tc_called = 1; + } + DbgTC("after do_trap_cellar\n"); + DbgTC("user_mode(regs) %d signal_pending(current) %d\n", + user_mode(regs), signal_pending(current)); + } else { + DebugML("do_mem_lock: send SIGBUS\n"); + S_SIG(regs, SIGBUS, exc_mem_lock_num, BUS_OBJERR); + SDBGPRINT_WITH_STACK("SIGBUS. Memory lock signaled"); + } +} + +static notrace void do_mem_lock_as(struct pt_regs *regs) +{ + nmi_enter(); +#ifndef CONFIG_IGNORE_MEM_LOCK_AS + if (TASK_IS_BINCO(current) && user_mode(regs)) { + DebugML("started\n"); + S_SIG(regs, SIGBUS, exc_mem_lock_as_num, BUS_OBJERR); + SDBGPRINT("SIGBUS. Memory lock AS signaled"); + } +#endif + nmi_exit(); +} + +static void do_mem_error(struct pt_regs *regs) +{ + struct trap_pt_regs *trap = regs->trap; + int trapno = 0; + e2k_tir_hi_t tir_hi; + e2k_tir_lo_t tir_lo; + char *s; + + tir_lo.TIR_lo_reg = trap->TIR_lo; + tir_hi.TIR_hi_reg = trap->TIR_hi; + + switch (tir_hi.TIR_hi_exc & exc_mem_error_mask) { + case exc_mem_error_ICACHE_mask: + trapno = exc_mem_error_ICACHE_num; + s = "ICACHE"; + break; + case exc_mem_error_L1_02_mask: + trapno = exc_mem_error_L1_02_num; + s = "L1 chanel 0, 2"; + break; + case exc_mem_error_L1_35_mask: + trapno = exc_mem_error_L1_35_num; + s = "L1 chanel 3, 5"; + break; + case exc_mem_error_L2_mask: + trapno = exc_mem_error_L2_num; + s = "L2"; + break; + case exc_mem_error_MAU_mask: + trapno = exc_mem_error_MAU_num; + s = "MAU"; + break; + case exc_mem_error_out_cpu_mask: + trapno = exc_mem_error_out_cpu_num; + s = "out cpu"; + break; + default: + s = "unknown"; + break; + } + + if (likely(!sig_on_mem_err)) { + panic("EXCEPTION: exc_mem_error TIR_hi.exc 0x%016llx (%s) TIR_lo.ip " + "0x%016llx on cpu %d\n", + tir_hi.TIR_hi_exc, s, tir_lo.TIR_lo_ip, + raw_smp_processor_id()); + } else { + S_SIG(regs, SIGUSR2, trapno, SI_KERNEL); + SDBGPRINT("SIGUSR2. exc_mem_error"); + } +} + +static void do_data_error(struct pt_regs *regs) +{ + /* + * 38 bit of TIRs was reused since iset v6, in iset v2 it's + * exc_mem_error, in iset v3, iset v4 and iset v5 it's unused. + */ + if (machine.native_iset_ver <= E2K_ISET_V2) + return do_mem_error(regs); + else if (machine.native_iset_ver < E2K_ISET_V6) + BUG(); + + S_SIG(regs, SIGBUS, exc_data_error_num, BUS_OBJERR); + SDBGPRINT_WITH_STACK("SIGBUS. data_error"); +} + +__noreturn static void do_unknown_exc(struct pt_regs *regs) +{ + panic("EXCEPTION: Unknown e2k exception!!!\n"); +} + +/* + * pseudo IRQ to emulate SysRq on guest kernel + */ +void native_sysrq_showstate_interrupt(struct pt_regs *regs) +{ + ack_pic_irq(); + /* dump stacks uses NMI to interrupt other CPUs and dump current */ + /* process state running on the CPU */ + raw_all_irq_enable(); + + /* vcpu state is unavailable on native guest */ + show_state_filter(0); + + dump_stack(); + + HYPERVISOR_vcpu_show_state_completion(); +} + diff --git a/arch/e2k/kernel/ttable-help.h b/arch/e2k/kernel/ttable-help.h new file mode 100644 index 000000000000..b194277eca48 --- /dev/null +++ b/arch/e2k/kernel/ttable-help.h @@ -0,0 +1,125 @@ +/* + * + * Copyright (C) 2020 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_KERNEL_TTABLE_HELP_H +#define _E2K_KERNEL_TTABLE_HELP_H + +#include +#include +#include +#include +#include +#include + +#include + + +#ifdef CONFIG_KVM_GUEST_KERNEL + +/* Non-privileged guest kernel has its kernel's + * registers cleaned by hypervisor when it restores + * guest user context upon return from guest kernel, + * so it should not clear anything. */ +# define USER_TRAP_HANDLER_SIZE 0x1 +# define TTABLE_ENTRY_8_SIZE 0x1 +# define TTABLE_ENTRY_10_SIZE 0x1 +# define RET_FROM_FORK_SIZE 0x1 +# define MAKECONTEXT_SIZE 0x1 +# define HANDLE_SYS_CALL_SIZE 0x1 +# define DO_SIGRETURN_SIZE 0x1 + +# define CLEAR_USER_TRAP_HANDLER_WINDOW() NATIVE_RETURN() +# define CLEAR_TTABLE_ENTRY_10_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_TTABLE_ENTRY_10_WINDOW_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) +# define CLEAR_TTABLE_ENTRY_8_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_TTABLE_ENTRY_8_WINDOW_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) +# define CLEAR_RET_FROM_FORK_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_MAKECONTEXT_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_HANDLE_SYS_CALL_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_DO_SIGRETURN_INTERRUPT() NATIVE_RETURN() +# define CLEAR_DO_SIGRETURN_SYSCALL(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_DO_SIGRETURN_SYSCALL_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) + +#elif defined CONFIG_CPU_HW_CLEAR_RF + +# if defined GENERATING_HEADER +# define USER_TRAP_HANDLER_SIZE 0x1 +# define TTABLE_ENTRY_8_SIZE 0x1 +# define TTABLE_ENTRY_10_SIZE 0x1 +# define RET_FROM_FORK_SIZE 0x1 +# define MAKECONTEXT_SIZE 0x1 +# define HANDLE_SYS_CALL_SIZE 0x1 +# define DO_SIGRETURN_SIZE 0x1 +# else +# include "ttable_wbs.h" +# endif + +# define CLEAR_USER_TRAP_HANDLER_WINDOW() E2K_DONE() +# define CLEAR_TTABLE_ENTRY_10_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_TTABLE_ENTRY_10_WINDOW_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) +# define CLEAR_TTABLE_ENTRY_8_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_TTABLE_ENTRY_8_WINDOW_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) +# define CLEAR_RET_FROM_FORK_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_MAKECONTEXT_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_HANDLE_SYS_CALL_WINDOW(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_DO_SIGRETURN_INTERRUPT() E2K_DONE() +# define CLEAR_DO_SIGRETURN_SYSCALL(r0) E2K_SYSCALL_RETURN(r0) +# define CLEAR_DO_SIGRETURN_SYSCALL_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) + +#else /* ! CONFIG_CPU_HW_CLEAR_RF */ + +# ifdef GENERATING_HEADER +# define CLEAR_USER_TRAP_HANDLER_WINDOW() E2K_EMPTY_CMD(: "ctpr3") +# define CLEAR_TTABLE_ENTRY_10_WINDOW(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define CLEAR_TTABLE_ENTRY_10_WINDOW_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_EMPTY_CMD([_r0] "ir" (r0), [_r1] "ir" (r1), \ + [_r2] "ir" (r2), [_r3] "ir" (r3), \ + [_tag2] "ir" (tag2), [_tag3] "ir" (tag3) \ + : "ctpr3") +# define CLEAR_TTABLE_ENTRY_8_WINDOW(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define CLEAR_TTABLE_ENTRY_8_WINDOW_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_EMPTY_CMD([_r0] "ir" (r0), [_r1] "ir" (r1), \ + [_r2] "ir" (r2), [_r3] "ir" (r3), \ + [_tag2] "ir" (tag2), [_tag3] "ir" (tag3) \ + : "ctpr3") +# define CLEAR_RET_FROM_FORK_WINDOW(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define CLEAR_MAKECONTEXT_WINDOW(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define CLEAR_HANDLE_SYS_CALL_WINDOW(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define CLEAR_DO_SIGRETURN_INTERRUPT() E2K_EMPTY_CMD(: "ctpr3") +# define CLEAR_DO_SIGRETURN_SYSCALL(r0) \ + E2K_EMPTY_CMD([_r0] "ir" (r0) : "ctpr3") +# define CLEAR_DO_SIGRETURN_SYSCALL_PROT(r0, r1, r2, r3, tag2, tag3) \ + E2K_EMPTY_CMD([_r0] "ir" (r0), [_r1] "ir" (r1), \ + [_r2] "ir" (r2), [_r3] "ir" (r3), \ + [_tag2] "ir" (tag2), [_tag3] "ir" (tag3) \ + : "ctpr3") +# define USER_TRAP_HANDLER_SIZE 0x1 +# define TTABLE_ENTRY_8_SIZE 0x1 +# define TTABLE_ENTRY_10_SIZE 0x1 +# define RET_FROM_FORK_SIZE 0x1 +# define MAKECONTEXT_SIZE 0x1 +# define HANDLE_SYS_CALL_SIZE 0x1 +# define DO_SIGRETURN_SIZE 0x1 +# else +# include "ttable_asm.h" +# include "ttable_wbs.h" +# endif + +#endif /* CONFIG_CPU_HW_CLEAR_RF */ + +#endif /* _E2K_KERNEL_TTABLE_HELP_H */ diff --git a/arch/e2k/kernel/ttable-inline.h b/arch/e2k/kernel/ttable-inline.h new file mode 100644 index 000000000000..99c568e64051 --- /dev/null +++ b/arch/e2k/kernel/ttable-inline.h @@ -0,0 +1,859 @@ +/* + * + * Copyright (C) 2020 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_KERNEL_TTABLE_H +#define _E2K_KERNEL_TTABLE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ttable-help.h" + +#undef DEBUG_PV_UST_MODE +#undef DebugUST +#define DEBUG_PV_UST_MODE 0 /* trap injection debugging */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_guest_ust) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_SYSCALL_MODE +#define DEBUG_PV_SYSCALL_MODE 0 /* syscall injection debugging */ + +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE +extern bool debug_guest_ust; +#else +#define debug_guest_ust false +#endif /* DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE */ + +#ifdef CONFIG_CPU_HAS_FILL_INSTRUCTION +static __always_inline void +user_hw_stacks_restore(struct pt_regs *regs, e2k_stacks_t *stacks, + u64 cur_window_q, u64 clear_num_q) +{ + e2k_psp_lo_t u_psp_lo; + e2k_psp_hi_t u_psp_hi; + e2k_pcsp_lo_t u_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi; + s64 pcs_copy_size, ps_copy_size, u_pshtp_size, u_pcshtp_size; + + u_psp_lo = stacks->psp_lo; + u_psp_hi = stacks->psp_hi; + u_pcsp_lo = stacks->pcsp_lo; + u_pcsp_hi = stacks->pcsp_hi; + + u_pshtp_size = GET_PSHTP_MEM_INDEX(stacks->pshtp); + u_pcshtp_size = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + ps_copy_size = get_ps_copy_size(cur_window_q, u_pshtp_size); + pcs_copy_size = get_pcs_copy_size(u_pcshtp_size); + + if (ps_copy_size > 0) + u_pshtp_size -= ps_copy_size; + if (pcs_copy_size > 0) + u_pcshtp_size -= pcs_copy_size; + + AS(u_psp_hi).ind -= u_pshtp_size; + AS(u_pcsp_hi).ind -= u_pcshtp_size; + + FILL_HARDWARE_STACKS(); + + WRITE_PSP_REG(u_psp_hi, u_psp_lo); + WRITE_PCSP_REG(u_pcsp_hi, u_pcsp_lo); +} +#else /* !CONFIG_CPU_HAS_FILL_INSTRUCTION */ +extern void fill_handler_0(void); +extern void fill_handler_1(void); +extern void fill_handler_2(void); +extern void fill_handler_3(void); +extern void fill_handler_4(void); +extern void fill_handler_5(void); +extern void fill_handler_6(void); +extern void fill_handler_7(void); +extern void fill_handler_8(void); +extern void fill_handler_9(void); +extern void fill_handler_10(void); +extern void fill_handler_11(void); +extern void fill_handler_12(void); +extern void fill_handler_13(void); +extern void fill_handler_14(void); +extern void fill_handler_15(void); +extern void fill_handler_16(void); +extern void fill_handler_17(void); +extern void fill_handler_18(void); +extern void fill_handler_19(void); +extern void fill_handler_20(void); +extern void fill_handler_21(void); +extern void fill_handler_22(void); +extern void fill_handler_23(void); +extern void fill_handler_24(void); +extern void fill_handler_25(void); +extern void fill_handler_26(void); +extern void fill_handler_27(void); +extern void fill_handler_28(void); +extern void fill_handler_29(void); +extern void fill_handler_30(void); +extern void fill_handler_31(void); +extern void fill_handler_32(void); +extern void fill_handler_33(void); +extern void fill_handler_34(void); +extern void fill_handler_35(void); +extern void fill_handler_36(void); +extern void fill_handler_37(void); +extern void fill_handler_38(void); +extern void fill_handler_39(void); +extern void fill_handler_40(void); +extern void fill_handler_41(void); +extern void fill_handler_42(void); +extern void fill_handler_43(void); +extern void fill_handler_44(void); +extern void fill_handler_45(void); +extern void fill_handler_46(void); +extern void fill_handler_47(void); +extern void fill_handler_48(void); +extern void fill_handler_49(void); +extern void fill_handler_50(void); +extern void fill_handler_51(void); +extern void fill_handler_52(void); +extern void fill_handler_53(void); +extern void fill_handler_54(void); +extern void fill_handler_55(void); +extern void fill_handler_56(void); +extern void fill_handler_57(void); +extern void fill_handler_58(void); +extern void fill_handler_59(void); +extern void fill_handler_60(void); +extern void fill_handler_61(void); +extern void fill_handler_62(void); +extern void fill_handler_63(void); +extern void fill_handler_64(void); +extern void fill_handler_65(void); +extern void fill_handler_66(void); +extern void fill_handler_67(void); +extern void fill_handler_68(void); +extern void fill_handler_69(void); +extern void fill_handler_70(void); +extern void fill_handler_71(void); +extern void fill_handler_72(void); +extern void fill_handler_73(void); +extern void fill_handler_74(void); +extern void fill_handler_75(void); +extern void fill_handler_76(void); +extern void fill_handler_77(void); +extern void fill_handler_78(void); +extern void fill_handler_79(void); +extern void fill_handler_80(void); +extern void fill_handler_81(void); +extern void fill_handler_82(void); +extern void fill_handler_83(void); +extern void fill_handler_84(void); +extern void fill_handler_85(void); +extern void fill_handler_86(void); +extern void fill_handler_87(void); +extern void fill_handler_88(void); +extern void fill_handler_89(void); +extern void fill_handler_90(void); +extern void fill_handler_91(void); +extern void fill_handler_92(void); +extern void fill_handler_93(void); +extern void fill_handler_94(void); +extern void fill_handler_95(void); +extern void fill_handler_96(void); +extern void fill_handler_97(void); +extern void fill_handler_98(void); +extern void fill_handler_99(void); +extern void fill_handler_100(void); +extern void fill_handler_101(void); +extern void fill_handler_102(void); +extern void fill_handler_103(void); +extern void fill_handler_104(void); +extern void fill_handler_105(void); +extern void fill_handler_106(void); +extern void fill_handler_107(void); +extern void fill_handler_108(void); +extern void fill_handler_109(void); +extern void fill_handler_110(void); +extern void fill_handler_111(void); + +typedef void (*fill_handler_t)(void); + +extern const fill_handler_t fill_handlers_table[E2K_MAXSR]; + +static __always_inline void +user_hw_stacks_restore(struct pt_regs *regs, e2k_stacks_t *stacks, + u64 cur_window_q, u64 clear_num_q) +{ + e2k_pshtp_t u_pshtp; + e2k_pcshtp_t u_pcshtp; + e2k_psp_lo_t u_psp_lo; + e2k_psp_hi_t u_psp_hi; + e2k_pcsp_lo_t u_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi; + e2k_pcsp_hi_t k_pcsp_hi; + e2k_cr0_hi_t new_cr0_hi; + e2k_cr1_lo_t new_cr1_lo; + e2k_cr1_hi_t new_cr1_hi; + s64 pcs_copy_size, ps_copy_size, u_pshtp_size, u_pcshtp_size; + u64 wbs; + +#ifndef CONFIG_CPU_HW_CLEAR_RF + clear_rf_kernel_except_current(clear_num_q); +#endif + + u_pshtp = stacks->pshtp; + u_pcshtp = stacks->pcshtp; + u_psp_lo = stacks->psp_lo; + u_psp_hi = stacks->psp_hi; + u_pcsp_lo = stacks->pcsp_lo; + u_pcsp_hi = stacks->pcsp_hi; + + u_pshtp_size = GET_PSHTP_MEM_INDEX(u_pshtp); + u_pcshtp_size = PCSHTP_SIGN_EXTEND(u_pcshtp); + ps_copy_size = get_ps_copy_size(cur_window_q, u_pshtp_size); + pcs_copy_size = get_pcs_copy_size(u_pcshtp_size); + + if (ps_copy_size > 0) + u_pshtp_size -= ps_copy_size; + if (pcs_copy_size > 0) + u_pcshtp_size -= pcs_copy_size; + + AS(u_psp_hi).ind -= u_pshtp_size; + AS(u_pcsp_hi).ind -= u_pcshtp_size; + + current->thread.fill.u_psp_lo = u_psp_lo; + current->thread.fill.u_psp_hi = u_psp_hi; + current->thread.fill.u_pcsp_lo = u_pcsp_lo; + current->thread.fill.u_pcsp_hi = u_pcsp_hi; + current->thread.fill.cr0_hi = READ_CR0_HI_REG(); + current->thread.fill.cr1_lo = READ_CR1_LO_REG(); + current->thread.fill.cr1_hi = READ_CR1_HI_REG(); + + if (u_pcshtp_size == 0) + goto set_new_regs; + + wbs = (u64) u_pshtp_size >> 5UL; + AW(new_cr1_lo) = 0; + AS(new_cr1_lo).psr = AW(E2K_KERNEL_PSR_DISABLED); + AS(new_cr1_lo).cui = KERNEL_CODES_INDEX; + if (machine.native_iset_ver < E2K_ISET_V6) + AS(new_cr1_lo).ic = 1; + AS(new_cr1_lo).wfx = AS(u_pshtp).fx; + AS(new_cr1_lo).wbs = wbs; + AW(new_cr0_hi) = (u64) fill_handlers_table[wbs]; + AW(new_cr1_hi) = 0; + AS(new_cr1_hi).ussz = AS(READ_USD_HI_REG()).size >> 4; + WRITE_CR0_HI_REG(new_cr0_hi); + WRITE_CR1_LO_REG(new_cr1_lo); + WRITE_CR1_HI_REG(new_cr1_hi); + + prefetch_nospec(¤t->thread.fill.cr0_hi); + prefetch_nospec(¤t->thread.fill.return_to_user); + + /* + * To make hardware issue a FILL we have to make stack empty first + */ + k_pcsp_hi = READ_PCSP_HI_REG(); + if (AS(k_pcsp_hi).ind) + E2K_FLUSHC; + +# define DEBUG_FILL_HARDWARE_STACKS_V2 0 +# if DEBUG_FILL_HARDWARE_STACKS_V2 + if (__builtin_constant_p(cur_window_q)) + asm volatile ("{setwd wsz=4}" "{setwd wsz=%0}" + :: "i" (cur_window_q)); +# endif + FILL_HARDWARE_STACKS(); + +set_new_regs: + + WRITE_CR0_HI_REG(current->thread.fill.cr0_hi); + WRITE_CR1_LO_REG(current->thread.fill.cr1_lo); + WRITE_CR1_HI_REG(current->thread.fill.cr1_hi); + + WRITE_PSP_REG(current->thread.fill.u_psp_hi, + current->thread.fill.u_psp_lo); + WRITE_PCSP_REG(current->thread.fill.u_pcsp_hi, + current->thread.fill.u_pcsp_lo); +} +#endif /* CONFIG_CPU_HAS_FILL_INSTRUCTION */ + +static __always_inline void +native_jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from) +{ + if (from & (FROM_SYSCALL_N_PROT | FROM_PV_VCPU_SYSCALL)) { + switch (regs->kernel_entry) { + case 1: + __E2K_JUMP_WITH_ARGUMENTS_7(ttable_entry1, + regs->sys_num, + regs->args[1], regs->args[2], + regs->args[3], regs->args[4], + regs->args[5], regs->args[6], + !is_paravirt_kernel()); + case 3: + __E2K_JUMP_WITH_ARGUMENTS_7(ttable_entry3, + regs->sys_num, + regs->args[1], regs->args[2], + regs->args[3], regs->args[4], + regs->args[5], regs->args[6], + !is_paravirt_kernel()); + case 4: + __E2K_JUMP_WITH_ARGUMENTS_7(ttable_entry4, + -(s32) regs->sys_num, + regs->args[1], regs->args[2], + regs->args[3], regs->args[4], + regs->args[5], regs->args[6], + !is_paravirt_kernel()); + default: + BUG(); + } + } else if (from & FROM_SYSCALL_PROT_8) { + __E2K_RESTART_TTABLE_ENTRY8_C(ttable_entry8, regs->sys_num, + regs->args[1], regs->args[2], regs->args[3], + regs->args[4], regs->args[5], regs->args[6], + regs->args[7], regs->args[8], regs->args[9], + regs->args[10], regs->args[11], regs->args[12], + regs->tags); + } else if (from & FROM_SYSCALL_PROT_10) { + __E2K_RESTART_TTABLE_ENTRY10_C(ttable_entry10, + regs->sys_num, regs->args[1], regs->args[2], + regs->args[3], regs->args[4], regs->args[5], + regs->args[6], regs->args[7], regs->tags); + } else { + BUG(); + } +} + +#if !defined(CONFIG_VIRTUALIZATION) || !defined(CONFIG_KVM_GUEST_KERNEL) +static __always_inline void +jump_to_ttable_entry(struct pt_regs *regs, enum restore_caller from) +{ + native_jump_to_ttable_entry(regs, from); +} +#endif /* !CONFIG_VIRTUALIZATION || !CONFIG_KVM_GUEST_KERNEL) */ + +extern int copy_context_from_signal_stack(struct local_gregs *l_gregs, + struct pt_regs *regs, struct trap_pt_regs *trap, u64 *sbbp, + e2k_aau_t *aau_context, struct k_sigaction *ka); + +static inline int signal_pending_usermode_loop(bool syscall) +{ + return (syscall || !test_delayed_signal_handling(current, + current_thread_info())) && + signal_pending(current); +} + +static inline unsigned long exit_to_usermode_has_work(struct pt_regs *regs, + bool return_to_user, bool syscall) +{ + if (unlikely(!return_to_user)) + return 0; + + if (syscall) + return unlikely(current_thread_info()->flags & _TIF_WORK_MASK); + + return unlikely(!test_delayed_signal_handling(current, + current_thread_info()) && + (current_thread_info()->flags & _TIF_SIGPENDING) || + (current_thread_info()->flags & _TIF_WORK_MASK_NOSIG)); +} + +/* + * Loop before exiting to usermode until all events requiring + * attention are handled (these events include signals, + * rescheduling and handling of TIF_NOTIFY_RESUME). + */ + +static __always_inline e2k_pshtp_t exit_to_usermode_loop(struct pt_regs *regs, + enum restore_caller from, bool *return_to_user, + u64 wsz, bool syscall) +{ + e2k_pshtp_t pshtp = regs->stacks.pshtp; + struct pt_regs __user *orig_u_pt_regs = NULL; + + /* + * Return control from UPSR register to PSR, if UPSR interrupts + * control is used. RETURN operation restores PSR state at system + * call point and recovers interrupts control + * + * This also disables interrupts and serves as a compiler barrier. + */ + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); + + if (unlikely(host_test_intc_emul_mode(regs))) { + /* host is at guest VCPU interception emulation mode */ + host_exit_to_usermode_loop(regs, syscall, + signal_pending_usermode_loop(syscall)); + return pshtp; + } + + /* + * Check under closed interrupts to avoid races + */ + while (unlikely(exit_to_usermode_has_work(regs, *return_to_user, syscall))) { + /* Make sure compiler does not reuse previous checks (this + * is better than adding "volatile" to reads in hot path). */ + barrier(); + + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); + + /* Check for rescheduling first */ + if (need_resched()) + schedule(); + + /* This will set SIG_*_FLAG_PT_REGS flags */ + if (signal_pending_usermode_loop(syscall)) { +#ifdef CONFIG_USE_AAU + struct e2k_aau_context *aau_regs = regs->aau_context; +#endif + do_signal(regs); +#ifdef CONFIG_USE_AAU + /* arch_ptrace_stop() reads current values of AALDI + * and AALDA registers and those values must not be + * restored - we want APB to restart from the last + * *used* address. So recalculate proper values here. */ + if (!syscall && aau_regs && + unlikely(AAU_STOPPED(aau_regs->aasr))) { + machine.calculate_aau_aaldis_aaldas(regs, + current_thread_info(), aau_regs); + } +#endif + } + + if (syscall && regs->flags.sig_restart_syscall) { + /* + * Rules for system call restart: + * 1) First we call signal handlers for _all_ signals + * sent to us (if they have handler registered) + * 2) Then we restart system call _exactly_once_ even + * if multiple signals were restart-worthy. + */ + if (regs->flags.sig_call_handler) { + /* Call handler _before_ restarting system call + * and do not forget to restart it later. */ + typeof(orig_u_pt_regs->flags) flags; + unsigned long ts_flag; + int ret; + + /* This will point to the first pt_regs + * where restart logic should apply */ + if (!orig_u_pt_regs) + orig_u_pt_regs = signal_pt_regs_first(); + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AW(flags), &AW(orig_u_pt_regs->flags)); + if (!ret) { + flags.sig_restart_syscall = 1; + ret = __put_user(AW(flags), + &AW(orig_u_pt_regs->flags)); + } + clear_ts_flag(ts_flag); + if (ret) + force_sigsegv(SIGSEGV); + + /* Restart will be done after signal handling */ + regs->flags.sig_restart_syscall = 0; + } else if (!signal_pending_usermode_loop(syscall)) { + /* There are no signal handlers and no more + * signals so we can restart this system call */ + BUG_ON(host_test_intc_emul_mode(regs)); + *return_to_user = false; + } + } + + if (test_thread_flag(TIF_NOTIFY_RESUME)) { + clear_thread_flag(TIF_NOTIFY_RESUME); + do_notify_resume(regs); + } + + /* + * Signal handler delivery does magic with stack, + * so check again whether manual copy is needed + */ + if (regs->flags.sig_call_handler) { + /* this case has not yet been accounted for */ + BUG_ON(!syscall && + guest_trap_from_user(current_thread_info()) || + syscall && + guest_syscall_from_user(current_thread_info())); + host_user_hw_stacks_prepare(®s->stacks, regs, + wsz, from, !(from & ~(FROM_SYSCALL_N_PROT | + FROM_SYSCALL_PROT_8 | + FROM_SYSCALL_PROT_10))); + } + + pshtp = regs->stacks.pshtp; + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); + } + + return pshtp; +} + +static __noreturn __always_inline void +finish_user_trap_handler(struct pt_regs *regs, restore_caller_t from) +{ + thread_info_t *ti; + struct e2k_aau_context *aau_regs = regs->aau_context; + struct trap_pt_regs *trap = regs->trap; +#if defined(CONFIG_VIRTUALIZATION) && !defined(CONFIG_KVM_GUEST_KERNEL) + bool from_paravirt_guest; +#endif /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ + bool return_to_user = true; + e2k_pshtp_t pshtp; + u64 wsz, num_q; + +#ifdef CONFIG_USE_AAU + if (unlikely(AAU_STOPPED(aau_regs->aasr))) + machine.calculate_aau_aaldis_aaldas(regs, + current_thread_info(), aau_regs); +#endif + + /* + * This can page fault so call with open interrupts + */ + BUILD_BUG_ON(from & ~(FROM_SIGRETURN | FROM_USER_TRAP | + FROM_RETURN_PV_VCPU_TRAP)); + wsz = get_wsz(from); + host_user_hw_stacks_prepare(®s->stacks, regs, wsz, from, false); + + pshtp = exit_to_usermode_loop(regs, from, &return_to_user, wsz, false); + + exception_exit(trap->prev_state); + + num_q = get_ps_clear_size(wsz, pshtp); + + NATIVE_DO_RESTORE_UPSR_REG(current_thread_info()->upsr); + + /* calculate if done will be to paravirtualized guest */ + GET_PARAVIRT_GUEST_MODE(from_paravirt_guest, regs); + + /* Update run state info, if trap occured on guest kernel */ + SET_RUNSTATE_OUT_USER_TRAP(); + + read_ticks(start_tick); + +#if (!defined CONFIG_E2K_MACHINE && defined CONFIG_E2K_MINVER_V2) || defined CONFIG_E2K_ES2_DSP || \ + defined CONFIG_E2K_ES2_RU + /* Hardware bug 71610 workaround */ + if (cpu_has(CPU_HWBUG_ATOMIC) && + unlikely(AS(regs->ctpr1).ta_base == 0xfffffffffff8ULL)) { + unsigned long long g23; + int g23_tag; + + E2K_GET_DGREG_VAL_AND_TAG(23, g23, g23_tag); + if ((current->thread.flags & E2K_FLAG_32BIT) + && !TASK_IS_BINCO(current) + && !(current->thread.flags & + E2K_FLAG_PROTECTED_MODE)) { + g23 &= 0xffffffffULL; + g23_tag &= 0x3; + } + if (g23_tag == ETAGNVD && access_ok(g23, 1)) { + __TRY_USR_PFAULT { + flush_DCACHE_line(g23); + } CATCH_USR_PFAULT { + } END_USR_PFAULT + } + } +#endif + + info_restore_mmu_reg(start_tick); + + CHECK_PT_REGS_CHAIN(regs, + NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + current->stack + KERNEL_C_STACK_SIZE); + + read_ticks(clock); + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (unlikely(trap->flags & TRAP_RP_FLAG)) { + u64 cr0_hi = AS_WORD(regs->crs.cr0_hi); + + WARN_ON(cr0_hi < current_thread_info()->rp_start || + cr0_hi >= current_thread_info()->rp_end); + AS_WORD(regs->crs.cr0_hi) = current_thread_info()->rp_ret_ip; + } +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + + /* complete intercept emulation mode */ + trap_guest_enter(current_thread_info(), regs, EXIT_FROM_INTC_SWITCH); + + RESTORE_USER_TRAP_STACK_REGS(regs); + + if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) + ENABLE_US_CLW(); + + info_restore_stack_reg(clock); + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + trap_times->psp_hi_to_done = NATIVE_NV_READ_PSP_HI_REG(); + trap_times->pcsp_hi_to_done = NATIVE_NV_READ_PCSP_HI_REG(); + trap_times->pshtp_to_done = NATIVE_NV_READ_PSHTP_REG(); + trap_times->ctpr1_to_done = AS_WORD(regs->ctpr1); + trap_times->ctpr2_to_done = AS_WORD(regs->ctpr2); + trap_times->ctpr3_to_done = AS_WORD(regs->ctpr3); + E2K_SAVE_CLOCK_REG(trap_times->end); +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + if (unlikely(cpu_has(CPU_HWBUG_SS) && + test_ts_flag(TS_SINGLESTEP_USER))) { + /* + * Hardware can lose singlestep flag on interrupt if it + * arrives earlier, so we must always manually reset it. + */ + e2k_cr1_lo_t cr1_lo = READ_CR1_LO_REG(); + + if (!AS(cr1_lo).pm) { + AS(cr1_lo).ss = 1; + WRITE_CR1_LO_REG(cr1_lo); + } + } + + /* + * We have FILLed user hardware stacks so no + * function calls are allowed after this point. + */ +#if !defined(CONFIG_CPU_HAS_FILL_INSTRUCTION) && \ + defined(CONFIG_VIRTUALIZATION) && \ + !defined(CONFIG_KVM_GUEST_KERNEL) + from_paravirt_guest = current->thread.fill.from_paravirt_guest; +#endif + user_hw_stacks_restore(regs, + trap_guest_get_restore_stacks(current_thread_info(), regs), + wsz, num_q); +#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION + BUILD_BUG_ON(!__builtin_constant_p(from)); +# if defined(CONFIG_VIRTUALIZATION) && !defined(CONFIG_KVM_GUEST_KERNEL) + from_paravirt_guest = current->thread.fill.from_paravirt_guest; +# endif + regs = current_thread_info()->pt_regs; + aau_regs = regs->aau_context; + trap = regs->trap; +#endif + + /* TODO Drop this leftover from Paravirt-1.0 (user_trap_handler + * is the last entry, nothing to queue in entry or dequeue here) */ + /* + * Dequeue current pt_regs structure and previous + * regs will be now actuale + */ + current_thread_info()->pt_regs = regs->next; + regs->next = NULL; + + /* restore some guest context, if trap was on guest */ + ti = current_thread_info(); + trap_guest_enter(ti, regs, EXIT_FROM_TRAP_SWITCH); + BUG_ON(ti != READ_CURRENT_REG()); + /* WARNING: from here should not use current, current_thread_info() */ + /* only variable 'ti' */ + +#ifdef CONFIG_USE_AAU + clear_apb(); + if (cpu_has(CPU_HWBUG_AAU_AALDV)) + __E2K_WAIT(_ma_c); + if (aau_working(aau_regs)) { + set_aau_context(aau_regs); + + /* + * It's important to restore AAD after + * all return operations. + */ + if (AS(aau_regs->aasr).iab) + RESTORE_AADS(aau_regs); + } + + /* + * There must not be any branches after restoring ctpr register + * because of HW bug, so this 'if' is done before restoring %ctpr2 + * (actually it belongs to set_aau_aaldis_aaldas()). + * + * RESTORE_COMMON_REGS() must be called before RESTORE_AAU_MASK_REGS() + * because of ctpr2 and AAU registers restoring dependencies. + */ + if (likely(!AAU_STOPPED(aau_regs->aasr))) { +#endif + RESTORE_COMMON_REGS(regs); +#ifdef CONFIG_USE_AAU + RESTORE_AAU_MASK_REGS(aau_regs); +#endif + /* only if return from paravirtualized host to guest */ + COND_GOTO_DONE_TO_PARAVIRT_GUEST(from_paravirt_guest); + if (from & FROM_SIGRETURN) { + CLEAR_DO_SIGRETURN_INTERRUPT(); + } else if (from & (FROM_RETURN_PV_VCPU_TRAP)) { + CLEAR_RETURN_PV_VCPU_TRAP_WINDOW(); + } else { + CLEAR_USER_TRAP_HANDLER_WINDOW(); + } +#ifdef CONFIG_USE_AAU + } else { + BUILD_BUG_ON(!__builtin_constant_p(from)); + RESTORE_GUEST_AAU_AASR(aau_regs, + test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)); + RESTORE_COMMON_REGS(regs); + native_set_aau_aaldis_aaldas(ti, aau_regs); + RESTORE_AAU_MASK_REGS(aau_regs); + /* only if return from paravirtualized host to guest */ + COND_GOTO_DONE_TO_PARAVIRT_GUEST(from_paravirt_guest); + if (from & FROM_SIGRETURN) { + CLEAR_DO_SIGRETURN_INTERRUPT(); + } else if (from & (FROM_RETURN_PV_VCPU_TRAP)) { + CLEAR_RETURN_PV_VCPU_TRAP_WINDOW(); + } else { + CLEAR_USER_TRAP_HANDLER_WINDOW(); + } + } +#endif + + unreachable(); +} + +static __always_inline __noreturn +void finish_syscall(struct pt_regs *regs, enum restore_caller from, + bool return_to_user) +{ + e2k_pshtp_t pshtp; + u64 wsz, num_q, rval; + int return_desk; + bool ts_host_at_vcpu_mode, intc_emul_flag; + + /* + * This can page fault so call with open interrupts + */ + wsz = get_wsz(from); + host_user_hw_stacks_prepare(®s->stacks, regs, wsz, from, + !(from & ~(FROM_SYSCALL_N_PROT | FROM_SYSCALL_PROT_8 | + FROM_SYSCALL_PROT_10))); + + pshtp = exit_to_usermode_loop(regs, from, &return_to_user, wsz, true); + + intc_emul_flag = kvm_test_intc_emul_flag(regs); + ts_host_at_vcpu_mode = ts_host_at_vcpu_mode() || intc_emul_flag; + +#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION + current->thread.fill.from = from; + current->thread.fill.return_to_user = return_to_user; +#endif + + /* + * All signals delivered, can access *regs now + */ + BUG_ON(from & FROM_USER_TRAP); + + CHECK_PT_REGS_CHAIN(regs, NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + current->stack + KERNEL_C_STACK_SIZE); + + num_q = get_ps_clear_size(wsz, pshtp); + + /* complete intercept emulation mode */ + guest_exit_intc(regs, intc_emul_flag); + + /* + * We have FILLed user hardware stacks so no + * function calls are allowed after this point. + */ + user_hw_stacks_restore(regs, + syscall_guest_get_restore_stacks(ts_host_at_vcpu_mode, regs), + wsz, num_q); +#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION + regs = current_thread_info()->pt_regs; + if (!__builtin_constant_p(from)) + from = current->thread.fill.from; + return_to_user = current->thread.fill.return_to_user; +#endif + + rval = regs->sys_rval; + return_desk = regs->return_desk; + + /* + * It is possible to use closed GNU ASM since we have more than + * 4 instructions before the return to user. + */ + exit_handle_syscall(regs->stacks.top, regs->stacks.usd_hi, + regs->stacks.usd_lo, current_thread_info()->upsr); + + /* + * Dequeue current pt_regs structure + */ + current_thread_info()->pt_regs = NULL; + + /* restore some guest context, if trap was on guest */ + guest_syscall_exit_trap(regs, ts_host_at_vcpu_mode); + + if (!(from & (FROM_SYSCALL_N_PROT | FROM_PV_VCPU_SYSCALL | FROM_PV_VCPU_SYSFORK))) + ENABLE_US_CLW(); + + if (cpu_has(CPU_HWBUG_VIRT_PSIZE_INTERCEPTION) && + (from & FROM_SYSCALL_PROT_8)) { + e2k_wd_t wd = READ_WD_REG(); + wd.psize = 0x80; + WRITE_WD_REG(wd); + } + + /* %gN-%gN+3 must be restored last as they hold pointers to current */ + /* now N=16 (see asm/glob_regs.h) */ + RESTORE_KERNEL_GREGS_IN_SYSCALL(current_thread_info()); + + if (likely(return_to_user)) { + if ((from & (FROM_SYSCALL_PROT_8 | FROM_SYSCALL_PROT_10)) && + unlikely(return_desk)) { + u64 flag, rval1 = regs->rval1, rval2 = regs->rval2; + int rv1_tag = regs->rv1_tag, rv2_tag = regs->rv2_tag; + + if ((long) rval < 0) { + flag = 1; + rval1 = -rval; + } else { + flag = 0; + } + + if (from & FROM_SIGRETURN) + CLEAR_DO_SIGRETURN_SYSCALL_PROT(flag, 0, + rval1, rval2, rv1_tag, rv2_tag); + else if (from & FROM_SYSCALL_PROT_8) + CLEAR_TTABLE_ENTRY_8_WINDOW_PROT(flag, 0, + rval1, rval2, rv1_tag, rv2_tag); + else + CLEAR_TTABLE_ENTRY_10_WINDOW_PROT(flag, 0, + rval1, rval2, rv1_tag, rv2_tag); + } + + /* Check for 'wsz' modifiers first */ + if (from & FROM_SIGRETURN) + CLEAR_DO_SIGRETURN_SYSCALL(rval); + else if (from & FROM_RET_FROM_FORK) + CLEAR_RET_FROM_FORK_WINDOW(rval); + else if (from & FROM_MAKECONTEXT) + CLEAR_MAKECONTEXT_WINDOW(rval); + else if (from & FROM_SYSCALL_N_PROT) + CLEAR_HANDLE_SYS_CALL_WINDOW(rval); + else if (from & FROM_SYSCALL_PROT_8) + CLEAR_TTABLE_ENTRY_8_WINDOW(rval); + else if (from & FROM_SYSCALL_PROT_10) + CLEAR_TTABLE_ENTRY_10_WINDOW(rval); + else if (from & FROM_PV_VCPU_SYSCALL) + CLEAR_HANDLE_PV_VCPU_SYS_CALL_WINDOW(rval); + else if (from & FROM_PV_VCPU_SYSFORK) + CLEAR_HANDLE_PV_VCPU_SYS_FORK_WINDOW(rval); + else + BUG(); + } else { + jump_to_ttable_entry(regs, from); + } + + unreachable(); +} + +/* virtualization support */ +#include "../kvm/ttable-inline.h" + +#endif /* _E2K_KERNEL_TTABLE_H */ diff --git a/arch/e2k/kernel/ttable.c b/arch/e2k/kernel/ttable.c new file mode 100644 index 000000000000..73607148b837 --- /dev/null +++ b/arch/e2k/kernel/ttable.c @@ -0,0 +1,4805 @@ +/* linux/arch/e2k/kernel/ttable.c, v 1.1 05/28/2001. + * + * Copyright (C) 2001 MCST + */ + +/**************************** DEBUG DEFINES *****************************/ + +#undef DEBUG_SYSCALL +#define DEBUG_SYSCALL 0 /* System Calls trace */ +#if DEBUG_SYSCALL +#define DbgSC printk +#else +#define DbgSC(...) +#endif + +#undef DEBUG_1SYSCALL +#define DEBUG_1SYSCALL 0 /* Tracing particular System Call */ +#if DEBUG_1SYSCALL +#define Dbg1SC(sys_num, fmt, ...) \ +do { \ + if (sys_num == DEBUG_1SYSCALL) \ + pr_info("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) +#else +#define Dbg1SC(...) +#endif + +#define DEBUG_CTX_MODE 0 /* setcontext/swapcontext */ +#if DEBUG_CTX_MODE +#define DebugCTX(...) DebugPrint(DEBUG_CTX_MODE, ##__VA_ARGS__) +#else +#define DebugCTX(...) +#endif + +/**************************** END of DEBUG DEFINES ***********************/ + +#include +#include +#include +#include +#include +#include +#include +#include /* NR_syscalls */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_KERNEL_TIMES_ACCOUNT) || defined(CONFIG_E2K_PROFILING) || \ + defined(CONFIG_CLI_CHECK_TIME) +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_USE_AAU +#include +#include +#endif + +#ifdef CONFIG_PROTECTED_MODE +#include +#include +#endif /* CONFIG_PROTECTED_MODE */ + +#include +#include + +#include + +#ifdef CONFIG_COMPAT +#include +#endif + +#include "ttable-inline.h" + +#undef DEBUG_PV_UST_MODE +#undef DebugUST +#define DEBUG_PV_UST_MODE 0 /* guest user stacks debug mode */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_guest_ust) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_SYSCALL_MODE +#define DEBUG_PV_SYSCALL_MODE 0 /* syscall injection debugging */ + +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE +bool debug_guest_ust = false; +#else +#define debug_guest_ust false +#endif /* DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE */ + +#define is_kernel_thread(task) ((task)->mm == NULL || (task)->mm == &init_mm) + +#define SAVE_PSYSCALL_ARGS(regs, a1, a2, a3, a4, a5, a6, a7, tags) \ +({ \ + (regs)->args[1] = (a1); \ + (regs)->args[2] = (a2); \ + (regs)->args[3] = (a3); \ + (regs)->args[4] = (a4); \ + (regs)->args[5] = (a5); \ + (regs)->args[6] = (a6); \ + (regs)->args[7] = (a7); \ + (regs)->tags = (tags); \ + (regs)->kernel_entry = 10; \ +}) + +#define SAVE_SYSCALL_ARGS(regs, a1, a2, a3, a4, a5, a6) \ +({ \ + (regs)->args[1] = (a1); \ + (regs)->args[2] = (a2); \ + (regs)->args[3] = (a3); \ + (regs)->args[4] = (a4); \ + (regs)->args[5] = (a5); \ + (regs)->args[6] = (a6); \ +}) +#define RESTORE_SYSCALL_ARGS(regs, num, a1, a2, a3, a4, a5, a6) \ +({ \ + (num) = (regs)->sys_num; \ + (a1) = (regs)->args[1]; \ + (a2) = (regs)->args[2]; \ + (a3) = (regs)->args[3]; \ + (a4) = (regs)->args[4]; \ + (a5) = (regs)->args[5]; \ + (a6) = (regs)->args[6]; \ +}) +static inline void save_syscall_args_prot(struct pt_regs *regs, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, + u64 a7, u64 a8, u64 a9, u64 a10, u64 a11, u64 a12, u64 tags) +{ + regs->args[1] = a1; + regs->args[2] = a2; + regs->args[3] = a3; + regs->args[4] = a4; + regs->args[5] = a5; + regs->args[6] = a6; + regs->args[7] = a7; + regs->args[8] = a8; + regs->args[9] = a9; + regs->args[10] = a10; + regs->args[11] = a11; + regs->args[12] = a12; + regs->tags = tags; +} +#define RESTORE_SYSCALL_RVAL(regs, rval) \ +({ \ + (rval) = (regs)->sys_rval; \ +}) +#define RESTORE_PSYSCALL_RVAL(regs, rval, rval1, rval2) \ +({ \ + (rval) = (regs)->sys_rval; \ + (rval1) = (regs)->rval1; \ + (rval2) = (regs)->rval2; \ +}) + + +/* + * Maximum number of hardware interrupts: + * 1) Interrupt on user - we must open interrupts to handle AAU; + * 2) Page fault exception in kernel on access to user space; + * 3) Maskable interrupt or we could have got a page fault exception + * in execute_mmu_operations(); + * 4) Another maskable interrupt in kernel after preempt_schedule_irq() + * opened interrupts; + * 5) Non-maskable interrupt in kernel. + * + * Plus we can have a signal. + */ +#define MAX_HW_INTR 6 + + +#ifdef CONFIG_DEBUG_PT_REGS +#define DO_NEW_CHECK_PT_REGS_ADDR(prev_regs, regs, usd_lo_reg) \ +{ \ + \ + register struct pt_regs *new_regs; \ + register e2k_addr_t delta_sp; \ + register e2k_usd_lo_t usd_lo_cur; \ + \ + usd_lo_cur = NATIVE_NV_READ_USD_LO_REG(); \ + delta_sp = usd_lo_cur.USD_lo_base - usd_lo_reg.USD_lo_base; \ + new_regs = (pt_regs_t *)(((e2k_addr_t) prev_regs) + delta_sp); \ + if (regs != new_regs) { \ + pr_alert("ttable_entry() calculated pt_regs structure 0x%px is not the same as from thread_info structure 0x%px\n", \ + new_regs, regs); \ + dump_stack(); \ + } \ +} + +/* + * pt_regs structure is placed as local data of the + * trap handler (or system call handler) function + * into the kernel local data stack + * Calculate placement of pt_regs structure, it should be + * same as from thread_info structure + */ +#define NEW_CHECK_PT_REGS_ADDR(prev_regs, regs, usd_lo_reg) \ + DO_NEW_CHECK_PT_REGS_ADDR(prev_regs, regs, usd_lo_reg) +#else +#define NEW_CHECK_PT_REGS_ADDR(prev_regs, regs, usd_lo_reg) +#endif + +#ifndef CONFIG_CPU_HW_CLEAR_RF +/* + * Hardware does not properly clean the register file + * before returning to user so do the cleaning manually. + */ +extern void clear_rf_6(void); +extern void clear_rf_9(void); +extern void clear_rf_18(void); +extern void clear_rf_21(void); +extern void clear_rf_24(void); +extern void clear_rf_27(void); +extern void clear_rf_36(void); +extern void clear_rf_45(void); +extern void clear_rf_54(void); +extern void clear_rf_63(void); +extern void clear_rf_78(void); +extern void clear_rf_90(void); +extern void clear_rf_99(void); +extern void clear_rf_108(void); +/* Add 4 qregs because clear_rf() is called with parameter area of 4 qregs */ +const clear_rf_t clear_rf_fn[E2K_MAXSR] = { + [0 ... 2] = clear_rf_6, + [3 ... 5] = clear_rf_9, + [6 ... 14] = clear_rf_18, + [15 ... 17] = clear_rf_21, + [18 ... 20] = clear_rf_24, + [21 ... 23] = clear_rf_27, + [24 ... 32] = clear_rf_36, + [33 ... 41] = clear_rf_45, + [42 ... 50] = clear_rf_54, + [51 ... 59] = clear_rf_63, + [60 ... 74] = clear_rf_78, + [75 ... 86] = clear_rf_90, + [87 ... 95] = clear_rf_99, + [96 ... 108] = clear_rf_108 +}; +#endif /* CONFIG_CPU_ISET < 5 */ + + +#ifdef CONFIG_SERIAL_PRINTK +/* + * Use global variables to prevent using data stack + */ +static char hex_numbers_for_debug[16] = {'0', '1', '2', '3', '4', '5', '6', + '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; +static char u64_char[NR_CPUS][17]; + +static __interrupt notrace void dump_u64_no_stack(u64 num) +{ + int i; + int cpu_id; + char *_u64_char; + + cpu_id = raw_smp_processor_id(); + + _u64_char = u64_char[cpu_id]; + _u64_char[16] = 0; + + for (i = 0; i < 16; i++) { + _u64_char[15 - i] = hex_numbers_for_debug[num % 16]; + num = num / 16; + } + dump_puts(_u64_char); +} + +static __interrupt notrace void dump_u32_no_stack(u32 num) +{ + int i; + int cpu_id; + char *_u32_char; + + cpu_id = raw_smp_processor_id(); + + _u32_char = u64_char[cpu_id]; + _u32_char[8] = 0; + + for (i = 0; i < 8; i++) { + _u32_char[7 - i] = hex_numbers_for_debug[num % 16]; + num = num / 16; + } + dump_puts(_u32_char); +} + +static arch_spinlock_t dump_lock = __ARCH_SPIN_LOCK_UNLOCKED; +static __interrupt notrace void dump_debug_info_no_stack(void) +{ + u64 usd_lo_base; + e2k_cr0_hi_t cr0_hi; + u64 ip; + u32 ussz; + e2k_mem_crs_t *frame; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + u64 cr_ind; + u64 cr_base; + int flags; + struct thread_info *ti = READ_CURRENT_REG(); + + dump_puts("BUG: kernel data stack overflow\n"); + + raw_all_irq_save(flags); + arch_spin_lock(&dump_lock); + + usd_lo_base = NATIVE_NV_READ_USD_LO_REG().USD_lo_base; + cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + ip = AS_STRUCT(cr0_hi).ip << 3; + + /* + * Print IP ASAP before flushc/flushr instructions + */ + dump_puts("last IP: 0x"); + dump_u64_no_stack(ip); + + dump_puts("\nUSD base = 0x"); + dump_u64_no_stack(usd_lo_base); + + dump_puts("\n bottom = 0x"); + dump_u64_no_stack((u64)thread_info_task(ti)->stack); + + COPY_STACKS_TO_MEMORY(); + + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + cr_ind = AS_STRUCT(pcsp_hi).ind; + cr_base = AS_STRUCT(pcsp_lo).base; + + dump_puts("\nchain stack: USD size\n 0x"); + dump_u64_no_stack(ip); + ussz = NATIVE_NV_READ_CR1_HI_REG().CR1_hi_ussz << 4; + dump_puts(" "); + dump_u32_no_stack(ussz); + + frame = ((e2k_mem_crs_t *)(cr_base + cr_ind)) - 1; + while (frame != (e2k_mem_crs_t *)cr_base) { + dump_puts("\n 0x"); + ip = frame->cr0_hi.CR0_hi_ip << 3; + dump_u64_no_stack(ip); + ussz = frame->cr1_hi.CR1_hi_ussz << 4; + dump_puts(" "); + dump_u32_no_stack(ussz); + frame--; + } + dump_puts("\n"); + + arch_spin_unlock(&dump_lock); + raw_all_irq_restore(flags); +} +#else +static inline void dump_debug_info_no_stack(void) {} +#endif + +__noreturn __interrupt notrace +void kernel_data_stack_overflow(void) +{ + dump_debug_info_no_stack(); + + for (;;) + cpu_relax(); +} + +DEFINE_PER_CPU(void *, reserve_hw_stacks); +static int __init reserve_hw_stacks_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + per_cpu(reserve_hw_stacks, cpu) = + __alloc_thread_stack_node(cpu_to_node(cpu)); + } + + return 0; +} +core_initcall(reserve_hw_stacks_init); + +static __always_inline void switch_to_reserve_stacks(void) +{ + unsigned long base; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + + base = (unsigned long) raw_cpu_read(reserve_hw_stacks); + + AW(pcsp_lo) = 0; + AS(pcsp_lo).base = base + KERNEL_PC_STACK_OFFSET; + AW(pcsp_hi) = 0; + AS(pcsp_hi).size = KERNEL_PC_STACK_SIZE; + AW(psp_lo) = 0; + AS(psp_lo).base = base + KERNEL_P_STACK_OFFSET; + AW(psp_hi) = 0; + AS(psp_hi).size = KERNEL_P_STACK_SIZE; + AW(usd_lo) = 0; + AS(usd_lo).base = base + KERNEL_C_STACK_OFFSET + KERNEL_C_STACK_SIZE - + K_DATA_GAP_SIZE; + AW(usd_hi) = 0; + AS(usd_hi).size = KERNEL_C_STACK_SIZE; + AW(sbr) = 0; + AS(sbr).base = base + KERNEL_C_STACK_OFFSET + KERNEL_C_STACK_SIZE; + + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_USBR_USD_REG_VALUE(AW(sbr), AW(usd_hi), AW(usd_lo)); +} + +/* noinline is needed to make sure we use the reserved data stack */ +notrace noinline __cold __noreturn +static void kernel_hw_stack_fatal_error(struct pt_regs *regs, + u64 exceptions, u64 kstack_pf_addr) +{ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); + raw_local_irq_enable(); + + bust_spinlocks(1); + + if (kstack_pf_addr) { + print_address_tlb(kstack_pf_addr); + print_address_page_tables(kstack_pf_addr, true); + + pr_emerg("BUG: page fault on kernel stack at 0x%llx\n", + kstack_pf_addr); + } + + if (exceptions & exc_chain_stack_bounds_mask) { + e2k_pcsp_hi_t pcsp_hi = regs->stacks.pcsp_hi; + AS(pcsp_hi).ind -= PCSHTP_SIGN_EXTEND(regs->stacks.pcshtp); + pr_emerg("BUG: chain stack overflow: pcsp.lo 0x%llx pcsp.hi 0x%llx pcshtp 0x%x\n", + AW(regs->stacks.pcsp_lo), AW(pcsp_hi), + regs->stacks.pcshtp); + } + + if (exceptions & exc_proc_stack_bounds_mask) { + e2k_psp_hi_t psp_hi = regs->stacks.psp_hi; + + AS(psp_hi).ind -= GET_PSHTP_MEM_INDEX(regs->stacks.pshtp); + pr_emerg("BUG: procedure stack overflow: base 0x%llx ind 0x%x " + "size 0x%x\n pshtp 0x%llx\n", + regs->stacks.psp_lo.PSP_lo_base, + psp_hi.PSP_hi_ind, psp_hi.PSP_hi_size, + GET_PSHTP_MEM_INDEX(regs->stacks.pshtp)); + } + + if (!kstack_pf_addr) + print_stack_frames(current, regs, 1); + + print_pt_regs(regs); + if (regs->next != NULL) + print_pt_regs(regs->next); + + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + panic("kernel stack overflow and/or page fault\n"); +} + +int cf_max_fill_return __read_mostly = 16 * 0x10; + +#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION + +const fill_handler_t fill_handlers_table[E2K_MAXSR] = { + &fill_handler_0, &fill_handler_1, &fill_handler_2, + &fill_handler_3, &fill_handler_4, &fill_handler_5, + &fill_handler_6, &fill_handler_7, &fill_handler_8, + &fill_handler_9, &fill_handler_10, &fill_handler_11, + &fill_handler_12, &fill_handler_13, &fill_handler_14, + &fill_handler_15, &fill_handler_16, &fill_handler_17, + &fill_handler_18, &fill_handler_19, &fill_handler_20, + &fill_handler_21, &fill_handler_22, &fill_handler_23, + &fill_handler_24, &fill_handler_25, &fill_handler_26, + &fill_handler_27, &fill_handler_28, &fill_handler_29, + &fill_handler_30, &fill_handler_31, &fill_handler_32, + &fill_handler_33, &fill_handler_34, &fill_handler_35, + &fill_handler_36, &fill_handler_37, &fill_handler_38, + &fill_handler_39, &fill_handler_40, &fill_handler_41, + &fill_handler_42, &fill_handler_43, &fill_handler_44, + &fill_handler_45, &fill_handler_46, &fill_handler_47, + &fill_handler_48, &fill_handler_49, &fill_handler_50, + &fill_handler_51, &fill_handler_52, &fill_handler_53, + &fill_handler_54, &fill_handler_55, &fill_handler_56, + &fill_handler_57, &fill_handler_58, &fill_handler_59, + &fill_handler_60, &fill_handler_61, &fill_handler_62, + &fill_handler_63, &fill_handler_64, &fill_handler_65, + &fill_handler_66, &fill_handler_67, &fill_handler_68, + &fill_handler_69, &fill_handler_70, &fill_handler_71, + &fill_handler_72, &fill_handler_73, &fill_handler_74, + &fill_handler_75, &fill_handler_76, &fill_handler_77, + &fill_handler_78, &fill_handler_79, &fill_handler_80, + &fill_handler_81, &fill_handler_82, &fill_handler_83, + &fill_handler_84, &fill_handler_85, &fill_handler_86, + &fill_handler_87, &fill_handler_88, &fill_handler_89, + &fill_handler_90, &fill_handler_91, &fill_handler_92, + &fill_handler_93, &fill_handler_94, &fill_handler_95, + &fill_handler_96, &fill_handler_97, &fill_handler_98, + &fill_handler_99, &fill_handler_100, &fill_handler_101, + &fill_handler_102, &fill_handler_103, &fill_handler_104, + &fill_handler_105, &fill_handler_106, &fill_handler_107, + &fill_handler_108, &fill_handler_109, &fill_handler_110, + &fill_handler_111 +}; + +noinline notrace +static u64 cf_fill_call(int nr) +{ + if (nr > 0) { + u64 ret; + + ret = cf_fill_call(nr - 1); + if (ret == -1ULL) + ret = NATIVE_READ_PCSHTP_REG_SVALUE(); + + return ret; + } + + NATIVE_FLUSHC; + + return -1ULL; +} + +static int init_cf_fill_depth(void) +{ + unsigned long flags; + u64 cf_fill_depth; + + raw_all_irq_save(flags); + cf_fill_depth = cf_fill_call(E2K_MAXCR_q / 2); + raw_all_irq_restore(flags); + + cf_max_fill_return = cf_fill_depth + 32; + + pr_info("CF FILL depth: %d quadro registers\n", + cf_max_fill_return / 16); + + return 0; +} +pure_initcall(init_cf_fill_depth); +#endif /* !CONFIG_CPU_HAS_FILL_INSTRUCTION */ + +/* + * Do work marked by TIF_NOTIFY_RESUME + */ +void do_notify_resume(struct pt_regs *regs) +{ +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND + if (unlikely(current->forced_info.si_signo)) { + force_sig_info(¤t->forced_info); + current->forced_info.si_signo = 0; + } +#endif + + tracehook_notify_resume(regs); + + rseq_handle_notify_resume(NULL, regs); +} + +/* + * Trap occurred on user or kernel function but on user's stacks + * So, it needs to switch to kernel stacks + */ +void notrace __irq_entry +user_trap_handler(struct pt_regs *regs, thread_info_t *thread_info) +{ + struct trap_pt_regs *trap; +#if defined(CONFIG_KERNEL_TIMES_ACCOUNT) || defined(CONFIG_E2K_PROFILING) + register e2k_clock_t clock = NATIVE_READ_CLKR_REG_VALUE(); + register e2k_clock_t clock1; + register e2k_clock_t start_tick; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +#ifdef CONFIG_USE_AAU + e2k_aau_t *aau_regs; + e2k_aasr_t aasr; +#endif /* CONFIG_USE_AAU */ +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + register trap_times_t *trap_times; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + u64 exceptions; + int save_sbbp = current->ptrace || debug_trap; + + trap = pt_regs_to_trap_regs(regs); + trap->flags = 0; + regs->trap = trap; + regs->kernel_entry = 0; + +#ifdef CONFIG_CLI_CHECK_TIME + start_tick = NATIVE_READ_CLKR_REG_VALUE(); +#endif + +#ifdef CONFIG_USE_AAU + aau_regs = pt_regs_to_aau_regs(regs); + regs->aau_context = aau_regs; + + /* + * We are not using ctpr2 here (compiling with -fexclude-ctpr2) + * thus reading of AASR, AALDV, AALDM can be done at any + * point before the first call. + * + * Usage of ctpr2 here is not possible since AALDA and AALDI + * registers would be zeroed. + */ + aasr = native_read_aasr_reg(); + SWITCH_GUEST_AAU_AASR(&aasr, aau_regs, test_ts_flag(TS_HOST_AT_VCPU_MODE)); +#endif /* CONFIG_USE_AAU */ + + /* + * All actual pt_regs structures of the process are queued. + * The head of this queue is thread_info->pt_regs pointer, + * it points to the last (current) pt_regs structure. + * The current pt_regs structure points to the previous etc + * Queue is empty before first trap or system call on the + * any process and : thread_info->pt_regs == NULL + */ + regs->next = current_thread_info()->pt_regs; + current_thread_info()->pt_regs = regs; +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + trap_times = &(current_thread_info()-> + times[current_thread_info()-> + times_index].of.trap); + current_thread_info()-> + times[current_thread_info()-> + times_index].type = TRAP_TT; + INCR_KERNEL_TIMES_COUNT(current_thread_info()); + trap_times->start = clock; + trap_times->ctpr1 = NATIVE_NV_READ_CR1_LO_REG_VALUE(); + trap_times->ctpr2 = NATIVE_NV_READ_CR0_HI_REG_VALUE(); + trap_times->pshtp = NATIVE_NV_READ_PSHTP_REG(); + trap_times->psp_ind = NATIVE_NV_READ_PSP_HI_REG().PSP_hi_ind; + E2K_SAVE_CLOCK_REG(trap_times->pt_regs_set); +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + AW(regs->flags) = 0; + init_guest_traps_handling(regs, true /* user mode trap */); + +#ifdef CONFIG_USE_AAU + /* + * Put some distance between reading AASR (above) and using it here + * since reading of AAU registers is slow. + * + * This is placed before saving trap cellar since it is done using + * 'mmurr' instruction which requires AAU to be stopped. + * + * Do this before saving %sbbp as it uses 'alc' and thus zeroes %aaldm. + */ + NATIVE_SAVE_AAU_MASK_REGS(aau_regs, aasr); +#endif + + /* + * %sbbp LIFO stack is unfreezed by writing %TIR register, + * so it must be read before TIRs. + */ + if (unlikely(save_sbbp || ts_host_at_vcpu_mode())) { + trap->sbbp = __builtin_alloca(sizeof(*trap->sbbp) * + SBBP_ENTRIES_NUM); + SAVE_SBBP(trap->sbbp); + } else { + trap->sbbp = NULL; + } + + /* + * Now we can store all needed trap context into the + * current pt_regs structure + */ + + read_ticks(clock1); + exceptions = SAVE_TIRS(trap->TIRs, trap->nr_TIRs, false); + info_save_tir_reg(clock1); + + if (exceptions & have_tc_exc_mask) { + NATIVE_SAVE_TRAP_CELLAR(regs, trap); + } else { + trap->tc_count = 0; + } + + read_ticks(clock1); + /* + * Here (in SAVE_STACK_REGS) hardware bug #29263 is being worked + * around with 'flushc' instruction, so NO function calls must + * happen and IRQs must not be enabled (even NMIs) until now. + */ + NATIVE_SAVE_STACK_REGS(regs, current_thread_info(), true, true); + info_save_stack_reg(clock1); + +#ifdef CONFIG_USE_AAU + /* It's important to save AAD before all call operations. */ + if (unlikely(AS(aasr).iab)) + NATIVE_SAVE_AADS(aau_regs); + + /* + * If AAU fault happened read aalda/aaldi/aafstr here, + * before some call zeroes them. + */ + if (unlikely(trap->TIRs[0].TIR_hi.TIR_hi_aa)) + aau_regs->aafstr = native_read_aafstr_reg_value(); + + /* + * Function calls are allowed from this point on, + * mark it with a compiler barrier. + */ + barrier(); + + /* Since iset v6 %aaldi must be saved too */ + if (machine.native_iset_ver >= E2K_ISET_V6 && + unlikely(AAU_STOPPED(aasr))) + NATIVE_SAVE_AALDIS(aau_regs->aaldi); +#endif + + /* No atomic/DAM operations are allowed before this point. + * Note that we cannot do this before saving AAU. */ + if (cpu_has(CPU_HWBUG_L1I_STOPS_WORKING)) + E2K_DISP_CTPRS(); + + /* un-freeze the TIR's LIFO. Tracing can issue a call + * here so we cannot do it earlier. */ + if (trace_tir_ip_trace_enabled()) { + int i; + for (i = 1; i <= TIR_TRACE_PARTS; i++) + trace_tir_ip_trace(i); + } + UNFREEZE_TIRs(); + + /* Restore some host context if trap is on guest. + * This uses function calls so cannot be called earlier. */ + trap_guest_exit(current_thread_info(), regs, trap, 0); + + info_save_mmu_reg(clock1); + + if (unlikely(is_chain_stack_bounds(current_thread_info(), regs))) + (trap->TIRs[0].TIR_hi.TIR_hi_exc) |= + exc_chain_stack_bounds_mask; + if (unlikely(is_proc_stack_bounds(current_thread_info(), regs))) + (trap->TIRs[0].TIR_hi.TIR_hi_exc) |= + exc_proc_stack_bounds_mask; + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (unlikely(TASK_IS_BINCO(current))) { + u64 rpr_lo = NATIVE_READ_RPR_LO_REG_VALUE(); + u64 cr0_hi = AW(regs->crs.cr0_hi); + + machine.get_and_invalidate_MLT_context(&trap->mlt_state); + + /* Check if this was a trap in generations mode. */ + if (rpr_lo && (cpu_has(CPU_FEAT_ISET_V3) || trap->mlt_state.num) && + cr0_hi >= current_thread_info()->rp_start && + cr0_hi < current_thread_info()->rp_end) + trap->flags |= TRAP_RP_FLAG; + } else { + trap->mlt_state.num = 0; + } +#endif + + /* Update run state info, if trap occured on guest kernel */ + SET_RUNSTATE_IN_USER_TRAP(); + + BUILD_BUG_ON(sizeof(enum ctx_state) != sizeof(trap->prev_state)); + trap->prev_state = exception_enter(); + + CHECK_PT_REGS_LOOP(current_thread_info()->pt_regs); + CHECK_PT_REGS_CHAIN(regs, + NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + current->stack + KERNEL_C_STACK_SIZE); + +#ifdef CONFIG_USE_AAU + if (aau_working(aau_regs)) + machine.get_aau_context(aau_regs); +#endif +#ifdef CONFIG_CLI_CHECK_TIME + tt0_prolog_ticks(E2K_GET_DSREG(clkr) - start_tick); +#endif + + /* + * %pshtp/%pcshtp cannot be negative after entering kernel + */ + if (WARN_ON_ONCE((AW(regs->stacks.pshtp) & (1ULL << E2K_WD_SIZE)) || + (regs->stacks.pcshtp & (1ULL << E2K_PCSHTP_MSB)))) { + local_irq_enable(); + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); + do_exit(SIGKILL); + } + + /* + * This will enable interrupts + */ + parse_TIR_registers(regs, exceptions); + trace_hardirqs_on(); + + /* Guest trap handling can be scheduled and migrate to other VCPU */ + /* see comments at arch/e2k/include/asm/process.h */ + /* So: */ + /* 1) host VCPU thread was changed and */ + /* 2) need update thread info and */ + /* 3) regs satructures pointers */ + UPDATE_VCPU_THREAD_CONTEXT(NULL, &thread_info, ®s, NULL, NULL); + + finish_user_trap_handler(regs, FROM_USER_TRAP); +} + +/* + * Trap occured on kernel function and on kernel's stacks + * So it does not need to switch to kernel stacks + */ +void notrace __irq_entry +kernel_trap_handler(struct pt_regs *regs, thread_info_t *thread_info) +{ + struct trap_pt_regs *trap; + e2k_usd_lo_t usd_lo = regs->stacks.usd_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_hi_t pcsp_hi; +#if defined(CONFIG_KERNEL_TIMES_ACCOUNT) || defined(CONFIG_E2K_PROFILING) + register e2k_clock_t clock = NATIVE_READ_CLKR_REG_VALUE(); +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + e2k_cr0_hi_t cr0_hi; +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + register trap_times_t *trap_times; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + e2k_upsr_t upsr; + u64 exceptions, nmi, hw_overflow, kstack_pf_addr; + int save_sbbp = current->ptrace || debug_trap; +#if defined(CONFIG_VIRTUALIZATION) && !defined(CONFIG_KVM_GUEST_KERNEL) + int to_save_runstate; +#endif /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ + int hardirqs_enabled = trace_hardirqs_enabled(current); +#ifdef CONFIG_DEBUG_PT_REGS + e2k_usd_lo_t usd_lo_prev; +#endif +#ifdef CONFIG_CLI_CHECK_TIME + register long start_tick = NATIVE_READ_CLKR_REG_VALUE(); +#endif + + /* No atomic/DAM operations are allowed before this point. + * Note that we cannot do this before saving AAU. */ + if (cpu_has(CPU_HWBUG_L1I_STOPS_WORKING)) + E2K_DISP_CTPRS(); + + trap = pt_regs_to_trap_regs(regs); + trap->flags = 0; + regs->trap = trap; + +#ifdef CONFIG_DEBUG_PT_REGS + usd_lo_prev = NATIVE_NV_READ_USD_LO_REG(); +#endif + +#ifdef CONFIG_USE_AAU + regs->aau_context = NULL; +#endif + + /* + * All actual pt_regs structures of the process are queued. + * The head of this queue is thread_info->pt_regs pointer, + * it points to the last (current) pt_regs structure. + * The current pt_regs structure points to the previous etc + * Queue is empty before first trap or system call on the + * any process and : thread_info->pt_regs == NULL + */ + regs->next = current_thread_info()->pt_regs; + current_thread_info()->pt_regs = regs; +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + trap_times = &(current_thread_info()-> + times[current_thread_info()-> + times_index].of.trap); + current_thread_info()-> + times[current_thread_info()-> + times_index].type = TRAP_TT; + INCR_KERNEL_TIMES_COUNT(current_thread_info()); + trap_times->start = clock; + trap_times->ctpr1 = NATIVE_NV_READ_CR1_LO_REG_VALUE(); + trap_times->ctpr2 = NATIVE_NV_READ_CR0_HI_REG_VALUE(); + trap_times->pshtp = NATIVE_NV_READ_PSHTP_REG(); + trap_times->psp_ind = NATIVE_NV_READ_PSP_HI_REG().PSP_hi_ind; + E2K_SAVE_CLOCK_REG(trap_times->pt_regs_set); +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + AW(regs->flags) = 0; + init_guest_traps_handling(regs, false /* user mode trap */); + + /* + * %sbbp LIFO stack is unfreezed by writing %TIR register, + * so it must be read before TIRs. + */ + if (unlikely(save_sbbp)) { + trap->sbbp = __builtin_alloca(sizeof(*trap->sbbp) * + SBBP_ENTRIES_NUM); + SAVE_SBBP(trap->sbbp); + } else { + trap->sbbp = NULL; + } + + /* + * Now we can store all needed trap context into the + * current pt_regs structure + */ + read_ticks(clock); + exceptions = SAVE_TIRS(trap->TIRs, trap->nr_TIRs, false); + nmi = exceptions & non_maskable_exc_mask; + hw_overflow = unlikely(exceptions & (exc_chain_stack_bounds_mask | + exc_proc_stack_bounds_mask)); + info_save_tir_reg(clock); + + /* Update run state info, if trap occured on guest kernel */ + SET_RUNSTATE_IN_KERNEL_TRAP(to_save_runstate); + + if (exceptions & have_tc_exc_mask) { + kstack_pf_addr = NATIVE_SAVE_TRAP_CELLAR(regs, trap); + } else { + trap->tc_count = 0; + kstack_pf_addr = 0; + } + read_ticks(clock); + NATIVE_SAVE_STACK_REGS(regs, current_thread_info(), false, + likely(!hw_overflow && !kstack_pf_addr)); + info_save_stack_reg(clock); + + /* un-freeze the TIR's LIFO. Tracing can issue a call + * here so we cannot do it earlier. */ + if (trace_tir_ip_trace_enabled()) { + int i; + for (i = 1; i <= TIR_TRACE_PARTS; i++) + trace_tir_ip_trace(i); + } + UNFREEZE_TIRs(); + + cr0_hi = regs->crs.cr0_hi; + psp_hi = regs->stacks.psp_hi; + pcsp_hi = regs->stacks.pcsp_hi; + + /* + * We will switch interrupts control from PSR to UPSR + * _after_ we have handled all non-masksable exceptions. + * This is needed to ensure that a local_irq_save() call + * in NMI handler won't enable non-maskable exceptions. + */ + DO_SAVE_UPSR_REG(upsr); + INIT_KERNEL_UPSR_REG(false, true); + + CHECK_PT_REGS_LOOP(current_thread_info()->pt_regs); + CHECK_PT_REGS_CHAIN(regs, + NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + current->stack + KERNEL_C_STACK_SIZE); + + if (unlikely(hw_overflow || kstack_pf_addr)) { + /* Assume that no function calls has been done until this + * point, otherwise printed stack might be corrupted. */ + switch_to_reserve_stacks(); + kernel_hw_stack_fatal_error(regs, exceptions, kstack_pf_addr); + } + + if (is_kernel_data_stack_bounds(true /* trap on kernel */, usd_lo)) + kernel_data_stack_overflow(); + +#ifdef CONFIG_CLI_CHECK_TIME + tt0_prolog_ticks(E2K_GET_DSREG(clkr) - start_tick); +#endif + + /* + * This will enable non-maskable interrupts if (!nmi) + */ + parse_TIR_registers(regs, exceptions); + + /* Guest trap handling can be scheduled and migrate to other VCPU */ + /* see comments at arch/e2k/include/asm/process.h */ + /* So: */ + /* 1) host VCPU thread was changed and */ + /* 2) need update thread info and */ + /* 3) regs satructures pointers */ + UPDATE_VCPU_THREAD_CONTEXT(NULL, &thread_info, ®s, NULL, NULL); + +#ifdef CONFIG_PREEMPTION + /* + * Check if we need preemption (the NEED_RESCHED flag could + * have been set by another CPU or by this interrupt handler). + * + * Don't do reschedule on NMIs - we do not want preempt_schedule_irq() + * to enable interrupts or local_irq_disable() to enable non-maskable + * interrupts. But there is one exception - if we received a maskable + * interrupt we must do a reschedule, otherwise we might lose it. + */ + if (unlikely(need_resched() && preempt_count() == 0) && + (!nmi || (exceptions & exc_interrupt_mask)) +#ifdef CONFIG_PREEMPT_LAZY + || (preempt_count() == 0 && + current_thread_info()->preempt_lazy_count == 0 + && test_thread_flag(TIF_NEED_RESCHED_LAZY)) +#endif + ) { + unsigned long flags; + raw_all_irq_save(flags); + /* Check again under closed interrupts to avoid races */ + if (likely(need_resched() && !host_is_at_HV_GM_mode())) + preempt_schedule_irq(); + else + raw_all_irq_restore(flags); + } +#endif + + /* + * Return control from UPSR register to PSR, if UPSR interrupts + * control is used. DONE operation restores PSR state at trap + * point and recovers interrupts control + * + * This also disables all interrupts including NMIs. + */ + if (hardirqs_enabled) { + raw_all_irq_disable(); + trace_hardirqs_on(); + } + RETURN_TO_KERNEL_UPSR(upsr); + + /* + * Dequeue current pt_regs structure and previous + * regs will be now actuale + */ + + CHECK_PT_REGS_CHAIN(regs, + NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + current->stack + KERNEL_C_STACK_SIZE); + current_thread_info()->pt_regs = regs->next; + regs->next = NULL; + CHECK_PT_REGS_LOOP(current_thread_info()->pt_regs); + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + trap_times->psp_hi_to_done = NATIVE_NV_READ_PSP_HI_REG(); + trap_times->pshtp_to_done = NATIVE_NV_READ_PSHTP_REG(); + trap_times->pcsp_hi_to_done = NATIVE_NV_READ_PCSP_HI_REG(); + trap_times->ctpr1_to_done = AS_WORD(regs->ctpr1); + trap_times->ctpr2_to_done = AS_WORD(regs->ctpr2); + trap_times->ctpr3_to_done = AS_WORD(regs->ctpr3); + E2K_SAVE_CLOCK_REG(trap_times->end); +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + /* Update run state info, if trap occured on guest kernel */ + SET_RUNSTATE_OUT_KERNEL_TRAP(to_save_runstate); + + if (unlikely(AW(cr0_hi) != AW(regs->crs.cr0_hi))) + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(regs->crs.cr0_hi); + if (unlikely(cpu_has(CPU_HWBUG_SS) && + test_ts_flag(TS_SINGLESTEP_KERNEL))) { + /* + * Hardware can lose singlestep flag on interrupt if it + * arrives earlier, so we must always manually reset it. + */ + e2k_cr1_lo_t cr1_lo = READ_CR1_LO_REG(); + AS(cr1_lo).ss = 1; + WRITE_CR1_LO_REG(cr1_lo); + } + + NATIVE_RESTORE_COMMON_REGS(regs); + E2K_DONE(); +} + + +/***********************************************************************/ + +#ifdef CONFIG_PROTECTED_MODE +#include + +int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, + bool pi, bool pending_op); + +extern const system_call_func sys_protcall_table[]; /* defined in systable.c */ + +static int count_descriptors(long __user *prot_array, + const int prot_array_size); + +static long do_protected_syscall(unsigned long sys_num, const long arg1, + const long arg2, const long arg3, const long arg4, + const long arg5, const long arg6, const long arg7); + +notrace __section(".entry.text") +static inline void get_ipc_mask(long call, long *mask_type, long *mask_align, + int *fields) +{ + /* According to sys_ipc () these are SEMTIMEDOP and (MSGRCV | + * (1 << 16))' (see below on why MSGRCV is not useful in PM) calls that + * make use of FIFTH argument. Both of them interpret it as a long. Thus + * all other calls may be considered as 4-argument ones. Some of them + * are likely to accept even less than 4 arguments, but here I stupidly + * rely on the fact that all invocations of `INLINE_SYSCALL_CALL (ipc)' + * in glibc are passed to either 5 or 6 arguments including CALL. */ + + switch (call) { + case MSGRCV: + /* + * Converting of parameters, which comtain pointers, + * implemented in do_protected_syscall + */ + *mask_type = 0xd5; + *mask_align = 0xf5; + *fields = 4; + break; + case (MSGRCV | (1 << 16)): + /* Instead it's much more handy to pass MSGP as PTR (aka FOURTH) + * and MSGTYP as FIFTH. `1 << 16' makes it clear to `sys_ipc ()' + * that this way of passing arguments is used. */ + case SEMTIMEDOP: + *mask_type = 0x3d5; + *mask_align = 0x3f5; + *fields = 5; + break; + case SHMAT: + /* SHMAT is special because it interprets the THIRD argument as + * a pointer to which AP should be stored in PM. TODO: this will + * require additional efforts in our PM handler. */ + *mask_type = 0xf5; + *mask_align = 0xfd; + *fields = 4; + break; + case SEMGET: + *mask_type = 0x15; + *mask_align = 0x15; + *fields = 3; + break; + case SHMGET: + *mask_type = 0x55; + *mask_align = 0x55; + *fields = 4; + break; + case MSGGET: + *mask_type = 0x5; + *mask_align = 0x5; + *fields = 2; + break; + default: + *mask_type = 0xd5; + *mask_align = 0xf5; + *fields = 4; + } +} + +notrace __section(".entry.text") +static inline void get_futex_mask(long call, long *mask_type, long *mask_align, + int *fields) +{ + long cmd = call & FUTEX_CMD_MASK; + + switch (cmd) { + case FUTEX_UNLOCK_PI: + /* On glibc side this command is used both with 2 and 4 + * arguments. I guess that the last two arguments are just + * ignored in the latter case. Consider it to be a 2-argument + * one here to be on the safe side. */ + case FUTEX_WAKE: + /* This command is invoked with 3 arguments on glibc side. For + * 2 and 3-argument futex commands the array parameter is not + * used. */ + *mask_type = 0x0; + *mask_align = 0x0; + *fields = 0; + break; + case FUTEX_WAIT: +#if 0 + /* Does the 4-argument variant of FUTEX_WAKE employed in glibc + * make any sense? According to do_futex () on the Kernel side + * it does not because the 4-th parameter isn't used in any + * way. */ + case FUTEX_WAKE: +#endif /* 0 */ + case FUTEX_LOCK_PI: + case FUTEX_TRYLOCK_PI: + /* For 4 argument futex commands the only TIMEOUT field is + * passed in array as a pointer. */ + *mask_type = 0x3; + *mask_align = 0x2; + *fields = 1; + break; + + case FUTEX_WAIT_BITSET: + /* On glibc side this command is invoked both with 5 and 6 + * arguments with TIMEOUT being a pointer. I doubt if 5 + * arguments are enough for all invocations of this command + * in fact because the last VAL3 argument seems to be meaningful + * and is passed to futex_wake () when handling this request in + * do_futex (). Therefore, here it's treated as 6-argument + * one. */ + *mask_type = 0x1f; + *mask_align = 0x1f; + *fields = 3; + break; + default: + /* Stupidly treat all other requests as 6-argument ones taking + * VAL2 instead of TIMEOUT for now. */ + *mask_type = 0x1d; + *mask_align = 0x1f; + *fields = 3; + } +} + + +/* + * Fetch a PM robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int +fetch_pm_robust_entry(long __user **entry, long __user *head, unsigned int *pi) +{ + long addr; + if (convert_array(head, &addr, 16, 1, 1, 0x3, 0x3)) { + long tmp[2]; + if (copy_from_user_with_tags(tmp, head, 16) == 0) { + long lo, hi; + int ltag, htag; + NATIVE_LOAD_VAL_AND_TAGD(tmp, lo, ltag); + NATIVE_LOAD_VAL_AND_TAGD(&tmp[1], hi, htag); + + printk(KERN_DEBUG "Fetch pm_robust_entry failed with AP == <%x> 0x%lx : <%x> 0x%lx\n", + ltag, tmp[0], htag, tmp[1]); + } + + return -EFAULT; + } + + *entry = (long __user *) (addr & ~1); + *pi = (unsigned int) addr & 1; + + return 0; +} + +static void __user *pm_futex_uaddr(long __user *entry, long futex_offset) +{ + compat_uptr_t base = (long) entry; + void __user *uaddr = (void __user *) (base + futex_offset); + + return uaddr; +} + +/* + * Walk curr->robust_list (very carefully, it's a userspace list!) + * and mark any locks found there dead, and notify any waiters. + * + * We silently return on any sign of list-walking problem. + */ +void pm_exit_robust_list(struct task_struct *curr) +{ + long __user *head = task_thread_info(curr)->pm_robust_list; + long __user *entry, *next_entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; + unsigned int uninitialized_var(next_pi); + long futex_offset; + int rc; + + if (!futex_cmpxchg_enabled) + return; + + /* + * Fetch the list head (which was registered earlier, via + * sys_set_robust_list()): + */ + if (fetch_pm_robust_entry(&entry, head, &pi)) + return; + /* + * Fetch the relative futex offset. + * Note that structures being converted are aligned on 16 + * byte boundary and have size being a multiple of 16. This is rather + * harmless as the FUTEX_OFFSET field in PM `struct robust_list_head' + * is aligned on 16 byte boundary and there's an 8-byte gap between it + * and the next LIST_OP_PENDING field, however, it makes sense to get + * rid of this limiation when (sub)structures containing no APs are + * converted. + */ + if (get_user(futex_offset, (long *) &head[2])) { + DbgSCP_ALERT("failed to read from 0x%lx !!!\n", + (uintptr_t) &head[2]); + return; + } + + /* + * Fetch any possibly pending lock-add first, and handle it + * if it exists: + */ + if (fetch_pm_robust_entry(&pending, &head[4], &pip)) + return; + + next_entry = NULL; /* avoid warning with gcc */ + while (entry != head) { + /* + * Fetch the next entry in the list before calling + * handle_futex_death: + */ + rc = fetch_pm_robust_entry(&next_entry, entry, &next_pi); + /* + * A pending lock might already be on the list, so + * dont process it twice: + */ + if (entry != pending) { + void __user *uaddr; + uaddr = pm_futex_uaddr(entry, futex_offset); + + if (handle_futex_death(uaddr, curr, pi, false)) + return; + } + + if (rc) + return; + + entry = next_entry; + pi = next_pi; + /* + * Avoid excessively long or circular lists: + */ + if (!--limit) + break; + + cond_resched(); + } + if (pending) { + void __user *uaddr = pm_futex_uaddr(pending, futex_offset); + + handle_futex_death(uaddr, curr, pip, true); + } +} + +__section(".entry.text") +SYS_RET_TYPE notrace ttable_entry10_C(long sys_num, + long arg1, long arg2, long arg3, long arg4, + long arg5, long arg6, struct pt_regs *regs) +{ +#define ARG_TAG(i) ((tags >> (4*(i))) & 0xF) +#define NOT_PTR(i) (((tags >> (4*(i))) & 0xFFUL) != ETAGAPQ) +#define NULL_PTR(i) ((ARG_TAG(i) == E2K_NULLPTR_ETAG) && (arg##i == 0)) + +#define GET_PTR_OR_NUMBER(ptr, size, i, j, min_size, null_is_allowed) \ +do { \ + if (unlikely(NULL_PTR(i))) { \ + ptr = 0; \ + size = min_size * !!null_is_allowed; \ + if (!null_is_allowed) \ + DbgSCP(#i " " #j " NULL pointer is not allowed.\n"); \ + } else if (likely(!NOT_PTR(i))) { \ + ptr = e2k_ptr_ptr(arg##i, arg##j, min_size); \ + size = e2k_ptr_size(arg##i, arg##j, min_size); \ + } else { \ + ptr = arg##i; \ + size = 0; \ + } \ +} while (0) + +#define GET_PTR(ptr, size, i, j, min_size, null_is_allowed) \ +do { \ + if (unlikely(NULL_PTR(i))) { \ + ptr = 0; \ + size = min_size * !!null_is_allowed; \ + if (!null_is_allowed) \ + DbgSCP(#i " " #j " NULL pointer is not allowed.\n"); \ + } else if (likely(!NOT_PTR(i))) { \ + ptr = e2k_ptr_ptr(arg##i, arg##j, min_size); \ + size = e2k_ptr_size(arg##i, arg##j, min_size); \ + } else { \ + ptr = 0; \ + size = 0; \ + DbgSCP_ALERT(#i " " #j " Not a pointer is not allowed.\n"); \ + } \ +} while (0) + +#define GET_STR(str, i, j) \ +do { \ + if (likely(!NOT_PTR(i) && !NULL_PTR(i))) { \ + str = e2k_ptr_str(arg##i, arg##j, GET_SBR_HI()); \ + if (!str) \ + DbgSCP(#i ":" #j " is not a null-terminated string"); \ + } else { \ + str = 0; \ + DbgSCP_ALERT(#i ":" #j " is NULL or not a valid pointer"); \ + break; \ + } \ +} while (0) + + register long rval = -EINVAL; +#ifdef CONFIG_DEBUG_PT_REGS + e2k_usd_lo_t usd_lo_prev; + struct pt_regs *prev_regs = regs; +#endif + /* Array for storing parameters when they are passed + * through another array (usually arg2:arg3 points to it). + * Users: + * 6 arguments: sys_ipc, sys_futex; + * 5 arguments: sys_newselect; + * 3 arguments: sys_execve. + * + * NOTE: some syscalls (namely sys_rt_sigtimedwait, sys_el_posix and + * sys_linkat) had to have the order of arguments changed to fit + * them all into dr1-dr7 registers because pointers in protected + * mode take up two registers dr[2 * n] and dr[2 * n + 1]. + * In sys_el_posix first and last arguments are even merged into + * one. */ + long *args = (long *) ((((unsigned long) regs) + sizeof(struct pt_regs) + + 0xfUL) & (~0xfUL)); + const long arg7 = args[0]; + const u32 tags = (u32) args[1]; + + register long rval1 = 0; /* numerical return value or */ + register long rval2 = 0; /* both rval1 & rval2 */ + int return_desk = 0; + int rv1_tag = E2K_NUMERIC_ETAG; + int rv2_tag = E2K_NUMERIC_ETAG; +#ifdef CONFIG_E2K_PROFILING + register long start_tick = NATIVE_READ_CLKR_REG_VALUE(); + register long clock1; +#endif + char *str, *str2, *str3; + e2k_addr_t base; + unsigned long ptr, ptr2, ptr3; + unsigned int size; + long mask_type, mask_align; + int fields; + +#ifdef CONFIG_DEBUG_PT_REGS + /* + * pt_regs structure is placed as local data of the + * trap handler (or system call handler) function + * into the kernel local data stack + */ + usd_lo_prev = NATIVE_NV_READ_USD_LO_REG(); +#endif + + init_pt_regs_for_syscall(regs); + /* now we have 2 proc_sys_call entry*/ + regs->flags.protected_entry10 = 1; + regs->return_desk = 0; + SAVE_STACK_REGS(regs, current_thread_info(), true, false); + regs->sys_num = sys_num; + SAVE_PSYSCALL_ARGS(regs, arg1, arg2, arg3, arg4, + arg5, arg6, arg7, tags); +#ifdef CONFIG_E2K_PROFILING + read_ticks(clock1); + info_save_stack_reg(clock1); +#endif + current_thread_info()->pt_regs = regs; + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); + + DbgSCP("_NR_ %ld start. current %px pid %d tags=0x%x\n", + sys_num, current, current->pid, tags); + + local_irq_enable(); + + /* Trace syscall enter */ + if (unlikely(current_thread_info()->flags & _TIF_WORK_SYSCALL_TRACE)) { + /* Call tracer */ + syscall_trace_entry(regs); + + /* Update args, since tracer could have changed them */ + RESTORE_SYSCALL_ARGS(regs, sys_num, + arg1, arg2, arg3, arg4, arg5, arg6); + } + + switch (sys_num) { + case __NR_restart_syscall: + DbgSC("restart_syscall()\n"); + rval = sys_restart_syscall(); + break; + case __NR_read: + case __NR_write: + case __NR_getdents: + case __NR_getdents64: + DbgSCP("__NR_%ld protected: fd = %d, buf = 0x%lx : 0x%lx, " + "count = 0x%lx", sys_num, (int) arg1, arg2, arg3, arg4); + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size && arg4) + break; + + if (sys_num == __NR_read) + rval = sys_read(arg1, (char *) ptr, (size_t) arg4); + else if (sys_num == __NR_write) + rval = sys_write(arg1, (char *) ptr, (size_t) arg4); + else if (sys_num == __NR_getdents) + rval = sys_getdents((unsigned int) arg1, + (struct linux_dirent *) ptr, + (unsigned int) arg4); + else + rval = sys_getdents64((unsigned int) arg1, + (struct linux_dirent64 *) ptr, + (unsigned int) arg4); + DbgSCP(" rval = %ld\n", rval); + break; + case __NR_waitpid: + DbgSCP("waitpid(): pid = %ld, int * = 0x%lx : 0x%lx, " + "flag = 0x%lx", arg1, arg2, arg3, arg4); + GET_PTR(ptr, size, 2, 3, sizeof(int), 1); + if (!size) + break; + + rval = sys_waitpid((int) arg1, (int *) ptr, (int) arg4); + DbgSCP(" rval = %ld\n",rval); + break; + case __NR_waitid: + DbgSCP("waitid(): idtype = %ld, id = %ld, options = 0x%lx, " + "infop = 0x%lx : 0x%lx, rusage = 0x%lx : 0x%lx", + arg1, arg2, arg3, arg4, arg5, arg6, arg7); + + GET_PTR(ptr, size, 4, 5, sizeof(siginfo_t), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 6, 7, sizeof(struct rusage), 1); + if (!size) + break; + + rval = sys_waitid((int) arg1, (pid_t) arg2, + (struct siginfo *) ptr, + (int) arg3, (struct rusage *) ptr2); + DbgSCP(" rval = %ld\n", rval); + break; + case __NR_time: + DbgSCP("time(): t = 0x%lx : 0x%lx ", arg2, arg3); + GET_PTR(ptr, size, 2, 3, sizeof(time_t), 1); + if (!size) + break; + + rval = sys_time((time_t *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_pipe: + case __NR_pipe2: + DbgSCP("pipe(0x%lx : 0x%lx\n) ", arg2, arg3); + GET_PTR(ptr, size, 2, 3, 2 * sizeof (u32), 0); + if (!size) + break; + + if (sys_num == __NR_pipe) + rval = sys_pipe((int *) ptr); + else + rval = sys_pipe2((int *) ptr, (int) arg4); + + DbgSCP(" rval = %ld\n", rval); + break; + case __NR_times: + DbgSCP("times(): buf = 0x%lx : 0x%lx, ", arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct tms), 1); + if (!size) + break; + + rval = sys_times((struct tms *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_utime: + DbgSCP("utime(): filename = 0x%lx : 0x%lx, times = 0x%lx : 0x%lx", + arg2, arg3, arg4, arg5); + GET_STR(str, 2, 3); + if (!str) + break; + + GET_PTR(ptr, size, 4, 5, sizeof(struct utimbuf), 1); + if (!size) + break; + + rval = sys_utime(str, (struct utimbuf *) ptr); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_ustat: + DbgSCP("ustat(): fd = %ld, statbuf = 0x%lx : 0x%lx, ", + arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct ustat), 0); + if (!size) + break; + + rval = sys_ustat(arg1, (struct ustat *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_setrlimit: + case __NR_getrlimit: + case __NR_ugetrlimit: + DbgSCP("%ld protected(): resource = %ld, rlimit = " + "0x%lx : 0x%lx, ", sys_num, arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct rlimit), 0); + if (!size) + break; + + if (sys_num == __NR_setrlimit) + rval = sys_setrlimit(arg1, (struct rlimit *) ptr); + else + rval = sys_getrlimit(arg1, (struct rlimit *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_prlimit64: + GET_PTR(ptr, size, 4, 5, sizeof(struct rlimit64), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 6, 7, sizeof(struct rlimit64), 1); + if (!size) + break; + + rval = sys_prlimit64((pid_t) arg1, (int) arg2, + (struct rlimit64 *)ptr, + (struct rlimit64 *) ptr2); + + break; + + case __NR_getrusage: + DbgSCP("getrusage(): who = %ld, rusage = 0x%lx : 0x%lx, ", + arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct rusage), 0); + if (!size) + break; + + rval = sys_getrusage(arg1, (struct rusage *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_gettimeofday: + DbgSCP("gettimeofday(): time = 0x%lx : 0x%lx, " + "zone = 0x%lX : 0x%lx, ", arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, sizeof(struct timeval), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(struct timezone), 1); + if (!size) + break; + + rval = sys_gettimeofday((struct timeval *) ptr, + (struct timezone *) ptr2); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_getgroups: + DbgSCP("getgroups(): cnt = %ld, buf = 0x%lx : 0x%lx, ", + arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, arg1 * sizeof(gid_t), 1); + if (arg1 && !size) + break; + + rval = sys_getgroups(arg1, (gid_t *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_readlink: + DbgSCP("readlink(): path = 0x%lx : 0x%lx, buf = 0x%lx : 0x%lx, sz = %ld", + arg2, arg3, arg4, arg5, arg6); + + GET_STR(str, 2, 3); + if (!str) + break; + + GET_PTR(ptr, size, 4, 5, arg6, 0); + if (!size) + break; + + rval = sys_readlink(str, (char *) ptr, (size_t) arg6); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_readdir: +#if 0 + DbgSCP("readdir(): fd = %ld, buf = 0x%lx : 0x%lx, sz = %ld", + arg1, arg2, arg3, arg4); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + rval = old_readdir((unsigned int) arg1, (char *) ptr, + (unsigned int) arg4); +#else + DbgSCP("readdir(): fd = %ld, buf = 0x%lx : 0x%lx, sz = %ld", + arg1, arg2, arg3, arg4); + rval = -ENOSYS; +#endif + DbgSCP("rval = %ld\n",rval); + break; + case __NR_mmap: { + unsigned int enable = 0; + + DbgSCP("mmap(): start = %ld, len = %ld, prot = 0x%lx " + "flags = 0x%lx, fd = 0x%lx, off = %ld", + arg1, arg2, arg3, arg4, arg5, arg6); + return_desk = 1; + rval1 = rval2 = 0; + rv1_tag = rv2_tag = 0; + if ((unsigned long) arg2 > 0xFFFFFFFF) { + rval = -E2BIG; + break; + } + base = sys_mmap((unsigned long) arg1, (unsigned long) arg2, + (unsigned long) arg3, (unsigned long) arg4, + (unsigned long) arg5, (unsigned long) arg6); + if (base & ~PAGE_MASK) { + rval = base; + goto nr_mmap_out; + } + base += (unsigned long) arg6 & PAGE_MASK; + if (arg3 & PROT_READ) { + enable |= R_ENABLE; + } + if (arg3 & PROT_WRITE) { + enable |= W_ENABLE; + } + rval1 = make_ap_lo(base, arg2, 0, enable); + rval2 = make_ap_hi(base, arg2, 0, enable); + rv1_tag = E2K_AP_LO_ETAG; + rv2_tag = E2K_AP_HI_ETAG; + rval = 0; +nr_mmap_out: + DbgSCP(" rval = %ld (hex: %lx) - 0x%lx : 0x%lx\n", + rval, rval, rval1, rval2); + break; + } + case __NR_munmap: + DbgSCP("munmap(): mem = %lx : %lx, sz = %lx ", + arg2, arg3, arg4); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + if (e2k_ptr_itag(arg2) != AP_ITAG) { + DbgSCP("Desc in stack\n"); + break; + } + + rval = sys_munmap(ptr, arg4); + DbgSC("rval = %ld (hex: %lx)\n", rval, rval); + break; + case __NR_statfs: + DbgSCP("stat(): path = 0x%lx : 0x%lx, buf = 0x%lx : 0x%lx, ", + arg2, arg3, arg4, arg5); + + GET_STR(str, 2, 3); + if (!str) + break; + + GET_PTR(ptr, size, 4, 5, sizeof(struct statfs), 0); + if (!size) + break; + + rval = sys_statfs(str, (struct statfs *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_fstatfs: + DbgSCP("fstat(): fd = %ld, buf = 0x%lx : 0x%lx, ", + arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct statfs), 0); + if (!size) + break; + + rval = sys_fstatfs(arg1, (struct statfs *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_stat: + case __NR_lstat: + DbgSCP("stat(): filename = (0x%lx : 0x%lx, " + "statbuf = 0x%lx : 0x%lx, ", arg2, arg3, arg4, arg5); + + GET_STR(str, 2, 3); + if (!str) + break; + + GET_PTR(ptr, size, 4, 5, sizeof(struct stat), 0); + if (!size) + break; + + if (sys_num == __NR_stat) + rval = sys_newstat(str, (struct stat *) ptr); + else + rval = sys_newlstat(str, (struct stat *) ptr); + + DbgSCP("rval = %ld\n", rval); + break; + case __NR_syslog: + DbgSCP("syslogr(): tupe = %ld, buf = 0x%lx : 0x%lx, sz = %ld", + arg1, arg2, arg3, arg4); + + GET_PTR(ptr, size, 2, 3, arg4, 1); + if (!size) + break; + + rval = sys_syslog((int) arg1, (char *) ptr, (int) arg4); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_setitimer: + case __NR_getitimer: + DbgSCP("%ld protected: which = %ld, " + "val= 0x%lx : 0x%lx, oval= 0x%lx : 0x%lx, ", + sys_num, arg1, arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, sizeof(struct itimerval), 0); + if (!size) + break; + + if (sys_num == __NR_getitimer) { + rval = sys_getitimer(arg1, (struct itimerval *) ptr); + } else { + GET_PTR(ptr2, size, 4, 5, sizeof(struct itimerval), 1); + if (!size) + break; + + rval = sys_setitimer(arg1, (struct itimerval *) ptr, + (struct itimerval *) ptr2); + } + DbgSCP("rval = %ld\n",rval); + break; + case __NR_fstat: + DbgSCP("fstat(): fd = %ld, statbuf = 0x%lx : 0x%lx, ", + arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct stat), 0); + if (!size) + break; + + rval = sys_newfstat(arg1, (struct stat *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_wait4: + DbgSCP("wait4(): pid = %ld, status= 0x%lx : 0x%lx, " + "opt = 0x%lx, usage= 0x%lx : 0x%lx, ", + arg1, arg2, arg3, arg6, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, sizeof(int), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(struct rusage), 1); + if (!size) + break; + + rval = sys_wait4((pid_t) arg1, (int *) ptr, (int) arg6, + (struct rusage *) ptr2); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_sysinfo: + DbgSCP("sysinfo(): sysinfo = 0x%lx : 0x%lx, ", + arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct sysinfo), 0); + if (!size) + break; + + rval = sys_sysinfo((struct sysinfo *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_ipc: + /* sys_ipc - last parameter may be pointer or long - + * it depends on the first parameter. + * 6 parameters are passed through array. + * maska is the first element of array. */ + GET_PTR(ptr, size, 2, 3, 0, 0); + + /* Let the CALL parameter be passed separately, i.e. not in + * array. FIRST, SECOND, THIRD, PTR and FIFTH will be passed + * in array. */ + get_ipc_mask(arg1, &mask_type, &mask_align, &fields); + if (fields == 0) + break; + + if ((rval = convert_array((long *) ptr, args, size, fields, 1, + mask_type, mask_align))) { + DbgSCP(" Bad array for _ipc\n"); + break; + } + DbgSCP("ipc(): call:%d first:%d second:%lld third:%lld\n" + "ptr:%px fifth:%lld\n", (u32) arg1, + (int) args[0], (u64) args[1], (u64) args[2], + (void *) (u64) args[3], (u64) args[4]); + rval = sys_ipc((u32) arg1, (int) args[0], (u64) args[1], + (u64) args[2], (void *) (u64) args[3], + (u64) args[4]); + DbgSC("rval = %ld\n", rval); + break; + case __NR_clone: { + unsigned long args_ptr; + unsigned int args_size, tls = 0, tls_size; + struct kernel_clone_args cargs; + + DbgSCP("clone(0x%lx, 0x%01lx, 0x%016lx, 0x%lx, 0x%lx)\n", + arg1, arg2, arg3, arg4, arg5); + + /* Read TLS and TID parameters passed indirectly through + * an array at (arg4:arg5). + * + * User may choose to not pass additional arguments + * (tls, tid) at all for historical and compatibility + * reasons, so we do not fail if (arg4,arg5) pointer + * is bad. */ + GET_PTR(args_ptr, args_size, 4, 5, 3 * 16, 0); + if (args_size != 0 + && convert_array((long *) args_ptr, args, args_size, + 1, 3, 0x3, 0x3) == 0) { + /* Looks like a good pointer. Flags will later + * show whether these arguments are any good. + * + * The first argument is parent_tidptr and + * the second one is child_tidptr. The third + * argument (tls) requires special handling. */ + if (arg1 & CLONE_SETTLS) { + int tls_lo_tag, tls_hi_tag; + u64 tls_lo, tls_hi; + + /* Copy TLS argument with tags. */ + TRY_USR_PFAULT { + NATIVE_LOAD_TAGGED_QWORD_AND_TAGS( + ((u64 *) args_ptr) + 4, + tls_lo, tls_hi, + tls_lo_tag, tls_hi_tag); + } CATCH_USR_PFAULT { + rval = -EFAULT; + break; + } END_USR_PFAULT + + /* Check that the pointer is good. */ + tls = e2k_ptr_ptr(tls_lo, tls_hi, 4); + tls_size = e2k_ptr_size(tls_lo, tls_hi, 4); + if (((tls_hi_tag << 4) | tls_lo_tag) != ETAGAPQ + || tls_size < sizeof(int)) { + DbgSCP(" Bad TLS pointer: size=%d, tag=%d\n", + tls_size, + (tls_hi_tag << 4) | tls_lo_tag); + break; + } + } + } else { + if (unlikely(arg1 & (CLONE_SETTLS | CLONE_CHILD_SETTID | + CLONE_PARENT_SETTID | + CLONE_CHILD_CLEARTID))) { + DbgSCP("Bad tid or tls argument\n"); + break; + } + } + + /* Get stack parameters */ + GET_PTR(ptr, size, 2, 3, 0, true); + size = e2k_ptr_curptr(arg2, arg3); + /* + * Multithreading support - change all SAP to AP in globals + * to guarantee correct access to memory + */ + if (arg1 & CLONE_VM) + mark_all_global_sp(regs, current->pid); + + DbgSCP("calling sys_clone(0x%lx, 0x%lx)size=0x%x\n", + arg1, ptr, size); + + cargs.flags = (arg1 & ~CSIGNAL); + cargs.pidfd = (int __user *) args[0]; + cargs.child_tid = (int __user *) args[1]; + cargs.parent_tid = (int __user *) args[0]; + cargs.exit_signal = (arg1 & CSIGNAL); + cargs.stack = ptr - size; + cargs.stack_size = size; + cargs.tls = tls; + + /* passing size of desk to _do_fork */ + rval = _do_fork(&cargs); + + DbgSCP("rval = %ld, sys_num = %ld\n", rval, sys_num); + break; + } + case __NR_uname: + DbgSCP("uname(): struct = 0x%lx : 0x%lx ", + arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct new_utsname), 0); + if (!size) + break; + + rval = sys_newuname((struct new_utsname *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_adjtimex: + DbgSCP("adjmutex(): struct = 0x%lx : 0x%lx ", + arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct __kernel_timex), 0); + if (!size) + break; + + rval = sys_adjtimex((struct __kernel_timex *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_mprotect: + DbgSCP("mprotect(): void* = 0x%lx : 0x%lx," + "len = 0x%lx; prot = 0x%lx ", + arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + rval = sys_mprotect((unsigned long) ptr, (size_t) arg4, + (unsigned long) arg5); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_init_module: + GET_PTR(ptr, size, 2, 3, 0, 0); + if (!size) + break; + + GET_STR(str, 4, 5); + if (!str) + break; + DbgSCP("init_module(): umod:%px, len:0x%lx, uargs:%px\n", + (void*) ptr, arg1, str); + + rval = sys_init_module((void *) ptr, (u64) arg1, str); + DbgSC("rval = %ld\n", rval); + break; + case __NR_sysfs: + /* arg2 may be pnt or long (depend on arg1) */ + DbgSCP("system call %ld arg1=%ld (arg{2,3} = 0x%lx : 0x%lx)," + " (arg{4,5} = 0x%lx : 0x%lx)", + sys_num, arg1, arg2, arg3, arg4, arg5); + GET_PTR(ptr, size, 2, 3, 0, 1); + GET_PTR(ptr2, size, 4, 5, 0, 1); + rval = sys_sysfs(arg1, ptr, ptr2); + DbgSCP("sys_sysfs rval = %ld\n",rval); + break; + case __NR__llseek: + DbgSCP("llseek(): fd = 0x%lx, hi = 0x%lx,lo = 0x%lx; " + "res = 0x%lx : 0x%lx, wh = 0x%lx", + arg1, arg2, arg3, arg4, arg5, arg6); + + GET_PTR(ptr, size, 4, 5, sizeof(loff_t), 0); + if (!size) + break; + + rval = sys_llseek((unsigned int) arg1, (unsigned long) arg2, + (unsigned long) arg3, (loff_t *) ptr, + (unsigned int) arg6); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_sched_setparam: + case __NR_sched_getparam: + GET_PTR(ptr, size, 2, 3, sizeof(struct sched_param), 0); + if (!size) + break; + + if (sys_num == __NR_sched_setparam) { + DbgSCP("sched_setparam(): pid = 0x%lx, " + "args = 0x%lx : 0x%lx, ", arg1, arg2, arg3); + rval = sys_sched_setparam((pid_t) arg1, + (struct sched_param *) ptr); + } else { + DbgSCP("sched_getparam(): pid = 0x%lx, " + "args = 0x%lx : 0x%lx, ", arg1, arg2, arg3); + rval = sys_sched_getparam((pid_t) arg1, + (struct sched_param *) ptr); + } + DbgSCP("rval = %ld\n",rval); + break; + case __NR_sched_setscheduler: + DbgSCP("sched_setscheduler(): pid = %d, policy=%d, " + "args = 0x%lx : 0x%lx, ", + (pid_t) arg1, (int) arg2, arg4, arg5); + + GET_PTR(ptr, size, 4, 5, sizeof(struct sched_param), 0); + if (!size) + break; + + rval = sys_sched_setscheduler((pid_t) arg1, (int) arg2, + (struct sched_param __user *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_sched_rr_get_interval: + DbgSCP("sched_getparam(): pid = 0x%lx, time = 0x%lx : 0x%lx\n", + arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct __kernel_timespec), 0); + if (!size) + break; + + rval = sys_sched_rr_get_interval((pid_t) arg1, + (struct __kernel_timespec *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_nanosleep: + DbgSCP("nanosleep(): req = 0x%lx : 0x%lx," + "rem = 0x%lx : 0x%lx ", + arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, sizeof(struct __kernel_timespec), 0); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(struct __kernel_timespec), 1); + if (!size) + break; + + rval = sys_nanosleep((struct __kernel_timespec *) ptr, + (struct __kernel_timespec *) ptr2); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_mremap: + DbgSCP("mremap(): void * = 0x%lx, : 0x%lx " + "o_sz = 0x%lx, n_sz = 0x%lx, flags = 0x%lx", + arg2, arg3, arg4, arg5, arg6); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + base = sys_mremap((unsigned long) ptr, + (unsigned long) arg4, (unsigned long) arg5, + (unsigned long) arg6, + /* MREMAP_FIXED is not supported in PM, + * therefore pass an invalid value for + * new_address. */ + 0); + if (base & ~PAGE_MASK) { + rval = base; + } else { + rval1 = make_ap_lo(base, arg2, 0, e2k_ptr_rw(arg2)); + rval2 = make_ap_hi(base, arg2, 0, e2k_ptr_rw(arg2)); + rv1_tag = E2K_AP_LO_ETAG; + rv2_tag = E2K_AP_HI_ETAG; + return_desk = 1; + rval = 0; + } + DbgSCP("rval = %ld\n",rval); + break; + case __NR_poll: + DbgSCP("poll(): fds = 0x%lx : 0x%lx, " + "nfds = 0x%lx, timeout = 0x%lx, ", + arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, arg4 * sizeof(struct pollfd), 0); + if (!size) + break; + + rval = sys_poll((struct pollfd *) ptr, arg4, arg5); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_ppoll: + DbgSCP("ppoll(): fds = 0x%lx : 0x%lx, " + "nfds = 0x%lx, tmo_p = 0x%lx : 0x%lx, " + "sigmask = 0x%lx : 0x%lx", + arg2, arg3, arg1, arg4, arg5, arg6, arg7); + + GET_PTR(ptr, size, 2, 3, arg1 * sizeof(struct pollfd), 0); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(struct __kernel_timespec), 1); + if (!size) + break; + + GET_PTR(ptr3, size, 6, 7, sizeof(sigset_t), 1); + if (!size) + break; + + rval = sys_ppoll((struct pollfd *) ptr, + arg1, + (struct __kernel_timespec *) ptr2, + (const sigset_t *) ptr3, + sizeof(sigset_t)); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_rt_sigaction: { + GET_PTR(ptr, size, 2, 3, sizeof(prot_sigaction_old_t), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(prot_sigaction_old_t), 1); + if (!size) + break; + + rval = protected_sys_rt_sigaction((int)arg1, + (const void *)ptr, (void *)ptr2, (size_t) arg6); + DbgSCP("compat_protected_sys_rt_sigaction() " + "rval = %ld\n", rval); + break; + } + case __NR_rt_sigaction_ex: { + GET_PTR(ptr, size, 2, 3, sizeof(prot_sigaction_t), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(prot_sigaction_t), 1); + if (!size) + break; + + rval = protected_sys_rt_sigaction_ex((int)arg1, + (const void *)ptr, (void *)ptr2, (size_t) arg6); + DbgSCP("protected_sys_rt_sigaction() rval = %ld\n", rval); + break; + } + case __NR_rt_sigprocmask: + case __NR_sigprocmask: + DbgSCP("sigprocmask(): how = 0x%lx, new = 0x%lx : 0x%lx," + "old = 0x%lx : 0x%lx ", + arg1, arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, sizeof(sigset_t), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(sigset_t), 1); + if (!size) + break; + + rval = sys_rt_sigprocmask((int) arg1, (sigset_t*) ptr, + (sigset_t*) ptr2, sizeof(sigset_t)); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_rt_sigtimedwait: + DbgSCP("sys_rt_sigtimedwait(): uthese = 0x%lx : 0x%lx, " + "uinfo = 0x%lx : 0x%lx, uts = 0x%lx : 0x%lx, " + "sigsetsize %ld\n", arg2, arg3, arg4, arg5, + arg6, arg7, arg1); + + GET_PTR(ptr, size, 2, 3, arg1, 0); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(siginfo_t), 1); + if (!size) + break; + + GET_PTR(ptr3, size, 6, 7, sizeof(struct __kernel_timespec), 1); + if (!size) + break; + + rval = sys_rt_sigtimedwait((const sigset_t *) ptr, + (siginfo_t *) ptr2, + (const struct __kernel_timespec *) ptr3, + (size_t) arg1); + DbgSC("rval = %ld\n", rval); + break; + case __NR_rt_sigpending: + case __NR_setdomainname: + DbgSCP("__NR_%ld protected: buf = 0x%lx : 0x%lx, sz = %ld", + sys_num, arg2, arg3, arg4); + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + if (sys_num == __NR_rt_sigpending) + rval = sys_rt_sigpending((sigset_t *) ptr, arg4); + else + rval = sys_setdomainname((char *) ptr, (size_t) arg4); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_rt_sigsuspend: + DbgSCP("rt_sigsuspend(): sigset = 0x%lx : 0x%lx, sz = %ld", + arg2, arg3, arg4); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + rval = sys_rt_sigsuspend((sigset_t *) ptr, arg4); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_rt_sigqueueinfo: + GET_PTR(ptr, size, 4, 5, sizeof(siginfo_t), 1); + rval = sys_rt_sigqueueinfo((pid_t) arg1, (int) arg2, + (siginfo_t *) ptr); + break; + case __NR_pread: + DbgSCP("pread(): fd = 0x%lx, " + "buf = 0x%lx : 0x%lx, len= 0x%lx, off = 0x%lx", + arg1, arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + rval = sys_pread64(arg1, (void *) ptr, arg4, arg5); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_pwrite: + DbgSCP("pwrite(): fd = 0x%lx, " + "buf = 0x%lx : 0x%lx, len= 0x%lx, off = 0x%lx", + arg1, arg2, arg3, arg4, arg5); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + rval = sys_pwrite64(arg1, (void *) ptr, arg4, arg5); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_getcwd: + DbgSCP("getcwd(): char* = 0x%lx : 0x%lx, len = 0x%lx", + arg2, arg3, arg4); + + GET_PTR(ptr, size, 2, 3, arg4, 0); + if (!size) + break; + + rval = sys_getcwd((char *) ptr, (unsigned long) arg4); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_e2k_longjmp2: + DbgSCP("longjmp2: buf = 0x%lx : 0x%lx, retval = %ld ", + arg2, arg3, arg4); + + GET_PTR(ptr, size, 2, 3, sizeof(struct jmp_info), 0); + if (!size) + break; + + rval = sys_e2k_longjmp2((struct jmp_info *) ptr, arg4); + DbgSCP("longjmp2 finish regs %px rval %ld\n", + regs, rval); + break; + case __NR_futex: + /* To ease access to FUTEX_OP let the first three UADDR, + * FUTEX_OP and VAL arguments be passed on registers. This way, + * the array passed on %qr6 may contain three fields at maximum: + * TIMEOUT (VAL2), UADDR2 and VAL3. */ + get_futex_mask(arg4, &mask_type, &mask_align, &fields); + /* Don't bother about converting array if it's not required by + * the command under consideration. */ + if (fields != 0) { + GET_PTR(ptr, size, 6, 7, 0, 0); + if (!size) + break; + + /* Strip all protected mode stuff from the passed + * parameters. */ + rval = convert_array((long *) ptr, args, size, fields, + 1, mask_type, mask_align); + if (rval) { + DbgSCP(" Bad array for sys_futex (0x%x): rval == %d\n", + (int) arg4, (int) rval); + break; + } + DbgSCP("sys_futex extended args: 0x%lx 0x%lx %d\n", + args[0], args[1], (int) args[2]); + } + + /* Extract UADDR out of AP. */ + GET_PTR(ptr, size, 2, 3, sizeof(int), 0); + if (!size) + break; + + DbgSCP("sys_futex primary args: 0x%lx %d %d\n", + ptr, (int) arg4, (int) arg5); + + rval = sys_futex((u32 *) ptr, (int) arg4, (int) arg5, + (struct __kernel_timespec __user *) args[0], + (u32 __user *) args[1], (int) args[2]); + DbgSC("rval = %ld\n", rval); + break; + case __NR_set_robust_list: + { + unsigned long head; + if (!futex_cmpxchg_enabled) { + rval = -ENOSYS; + break; + } + + /* On glibc side `sizeof (struct robust_list_head) == 0x30'. */ + if (unlikely(arg4 != 0x30)) { + /* -EINVAL will be returned by default. */ + break; + } + + GET_PTR(head, size, 2, 3, 0x30, 0); + if (!size) + break; + + current_thread_info()->pm_robust_list = (long __user *) head; + rval = 0; + break; + } + case __NR_sched_setaffinity: + DbgSCP("sched_setaffinity(): pid %ld, len %ld, " + "ptr 0x%lx : 0x%lx", arg1, arg2, arg4, arg5); + GET_PTR(ptr, size, 4, 5, arg2, 0); + if (!size) + break; + + rval = sys_sched_setaffinity(arg1, arg2, + (unsigned long __user *) ptr); + DbgSCP(" rval = %ld\n", rval); + break; + case __NR_sched_getaffinity: + DbgSCP("sched_getaffinity(): pid %ld, len %ld, " + "ptr 0x%lx : 0x%lx", arg1, arg2, arg4, arg5); + GET_PTR(ptr, size, 4, 5, arg2, 0); + if (!size) + break; + + rval = sys_sched_getaffinity(arg1, arg2, + (unsigned long __user *) ptr); + DbgSCP(" rval = %ld\n", rval); + break; +#ifdef CONFIG_HAVE_EL_POSIX_SYSCALL + case __NR_el_posix: + DbgSCP("sys_el_posix args: 0x%lx : 0x%lx, 0x%lx : 0x%lx, " + "0x%lx : 0x%lx, 0x%lx\n", arg2, arg3, + arg4, arg5, arg6, arg7, arg1); + + GET_PTR_OR_NUMBER(ptr, size, 2, 3, 0, 1); + GET_PTR_OR_NUMBER(ptr2, size, 4, 5, 0, 1); + GET_PTR_OR_NUMBER(ptr3, size, 6, 7, 0, 1); + + rval = sys_el_posix((int) (unsigned long) arg1, + (void *) ptr, (void *) ptr2, (void *) ptr3, + (int) (unsigned long) (arg1 >> 32)); + DbgSC("rval = %ld\n", rval); + break; +#endif + case __NR_clock_settime: + case __NR_clock_gettime: + case __NR_clock_getres: + DbgSCP("syscall %ld: clock_id = 0x%lx, timespec = " + "0x%lx : 0x%lx, ", sys_num, arg1, arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct __kernel_timespec), + /* clock_getres is the only among these syscalls which + * may be invoked with `struct __kernel_timespec *tp == + * NULL '. + */ + (sys_num == __NR_clock_getres ? 1 : 0)); + if (!size) + break; + + switch (sys_num) { + case __NR_clock_settime: + rval = sys_clock_settime((clockid_t) arg1, + (const struct __kernel_timespec + __user *) ptr); + break; + case __NR_clock_gettime: + rval = sys_clock_gettime((clockid_t) arg1, + (struct __kernel_timespec __user *) + ptr); + break; + case __NR_clock_getres: + rval = sys_clock_getres((clockid_t) arg1, + (struct __kernel_timespec __user *) + ptr); + break; + } + DbgSCP("rval = %ld\n",rval); + break; + case __NR_timer_create: + GET_PTR(ptr, size, 2, 3, sizeof(struct sigevent), 0); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(timer_t), 0); + if (!size) + break; + + rval = sys_timer_create((clockid_t) arg1, + (struct sigevent __user *) ptr, + (timer_t __user *) ptr2); + break; + case __NR_clock_nanosleep: + DbgSCP("sys_clock_nanosleep(): clock_id %ld, flags %ld, " + "req = 0x%lx : 0x%lx, rem = 0x%lx : 0x%lx\n", + arg1, arg2, arg4, arg5, arg6, arg7); + + GET_PTR(ptr2, size, 4, 5, sizeof(struct __kernel_timespec), 0); + if (!size) + break; + + GET_PTR(ptr3, size, 6, 7, sizeof(struct __kernel_timespec), 1); + if (!size) + break; + + rval = sys_clock_nanosleep((clockid_t) arg1, (int) arg2, + (const struct __kernel_timespec __user *) ptr2, + (struct __kernel_timespec __user *) ptr3); + DbgSC("rval = %ld\n", rval); + break; + case __NR_set_tid_address: + DbgSCP("set_tid_address(): tidptr = 0x%lx : 0x%lx, ", + arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(int), 0); + if (!size) + break; + + rval = sys_set_tid_address((int *) ptr); + DbgSCP("rval = %ld\n",rval); + break; + case __NR_olduselib: + case __NR_newuselib: + case __NR__sysctl: + case __NR_socketcall: + case __NR_readv: + case __NR_writev: + case __NR_preadv: + case __NR_preadv2: + case __NR_pwritev: + case __NR_pwritev2: + case __NR_select: + case __NR__newselect: + case __NR_pselect6: + case __NR_execve: + /* E2K ABI uses only 8 registers for parameter passing + * so sys_name and tags are packed into one parameter + */ + rval = ((unsigned long)(tags) << 32) | (sys_num & 0xffffffff); + rval = do_protected_syscall((unsigned long)rval, arg1, arg2, + arg3, arg4, arg5, arg6, arg7); + break; + case __NR_P_get_mem: + case __NR_get_mem: + DbgSCP("get_mem(): size = %ld, ", arg1); + base = sys_malloc((size_t) arg1); + DbgSCP("base = 0x%lx ", base); + if (base == 0) { + rval = -ENOMEM; + } else { + rval1 = make_ap_lo(base, arg1, 0, RW_ENABLE); + rval2 = make_ap_hi(base, arg1, 0, RW_ENABLE); + rv1_tag = E2K_AP_LO_ETAG; + rv2_tag = E2K_AP_HI_ETAG; + return_desk = 1; + rval = 0; + } + DbgSCP("rval = %ld (0x%02x : 0x%lx - 0x%02x : 0x%lx)\n", + rval, rv1_tag, rval1, rv2_tag, rval2); + break; + case __NR_P_free_mem: + case __NR_free_mem: + DbgSCP("free_mem(): arg2 = %lx, arg3 = %lx, ", + arg2, arg3); + + GET_PTR(ptr, size, 2, 3, 0, 0); + if (!size) + break; + + if (e2k_ptr_itag(arg2) != AP_ITAG) { + DbgSCP(" Stack pointer; EINVAL\n"); + break; + } + + sys_free((e2k_addr_t) ptr, (size_t) size); + rval = 0; + break; + case __NR_P_dump_umem: + rval = 0; + dump_malloc_cart(); + break; + case __NR_open: + case __NR_creat: + case __NR_unlink: + case __NR_chdir: + case __NR_mknod: + case __NR_chmod: + case __NR_lchown: + case __NR_access: + case __NR_mkdir: + case __NR_rmdir: + case __NR_acct: + case __NR_umount: + case __NR_chroot: + case __NR_sethostname: + case __NR_swapon: + case __NR_truncate: + case __NR_swapoff: + case __NR_chown: + case __NR_delete_module: + DbgSCP("system call %ld (arg{2,3} = 0x%lx : 0x%lx)", + sys_num, arg2, arg3); + + GET_STR(str, 2, 3); + if (!str) + break; + + rval = (*sys_protcall_table[sys_num])( + (unsigned long) str, arg4, arg5, arg6, 0, 0); + DbgSCP(" rval = %ld\n", rval); + break; + case __NR_fremovexattr: + DbgSCP("fremovexattr: 0x%lx, (0x%lx : 0x%lx), 0x%lx, 0x%lx, " + "0x%lx", arg1, arg2, arg3, arg4, arg5, arg6); + + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_fremovexattr(arg1, str); + DbgSCP(" rval = %ld\n", rval); + break; + case __NR_link: + case __NR_rename: + case __NR_symlink: + case __NR_pivot_root: + case __NR_removexattr: + case __NR_lremovexattr: + DbgSCP("system call %ld (arg{2,3} = 0x%lx : 0x%lx), (arg{4,5} = " + "0x%lx : 0x%lx)", sys_num, arg2, arg3, arg4, arg5); + + GET_STR(str, 2, 3); + if (!str) + break; + + GET_STR(str2, 4, 5); + if (!str2) + break; + + rval = (*sys_protcall_table[sys_num])((unsigned long) str, + (unsigned long) str2, arg6, 0, 0, 0); + DbgSCP(" rval = %ld\n", rval); + break; + case __NR_create_module: + DbgSCP("Unimplemented yet system call %ld\n", sys_num); + rval = -ENOSYS; + break; + case __NR_getcpu: + DbgSCP("getcpu(): cpup = 0x%lx : 0x%lx, " + "nodep = 0x%lx : 0x%lx, " + "cache = 0x%lx : 0x%lx, ", + arg2, arg3, arg4, arg5, arg6, arg7); + + GET_PTR(ptr, size, 2, 3, sizeof(unsigned int), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(unsigned int), 1); + if (!size) + break; + + GET_PTR(ptr3, size, 6, 7, sizeof(struct getcpu_cache), 1); + if (!size) + break; + + rval = sys_getcpu((unsigned *)ptr, (unsigned *)ptr2, + (struct getcpu_cache *)ptr3); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_rt_tgsigqueueinfo: + DbgSCP("rt_tgsigqueueinfo(): tgid = %ld, pid = %ld, sig = %ld, " + "uinfo = 0x%lx : 0x%lx, ", + arg1, arg2, arg3, arg4, arg5); + + GET_PTR(ptr2, size, 4, 5, sizeof(siginfo_t), 0); + if (!size) + break; + + rval = sys_rt_tgsigqueueinfo(arg1, arg2, arg3, + (siginfo_t *)ptr2); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_openat: + DbgSCP("openat(): dfd = %ld, filename = 0x%lx : 0x%lx, " + "flags = %lx, mode = %lx, ", + arg1, arg2, arg3, arg4, arg5); + + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_openat(arg1, str, arg4, arg5); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_mkdirat: + DbgSCP("mkdirat(): dfd = %ld, pathname = 0x%lx : 0x%lx, " + "mode = %lx, ", arg1, arg2, arg3, arg4); + + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_mkdirat(arg1, str, arg4); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_mknodat: + DbgSCP("mknodat(): dfd = %ld, filename = 0x%lx : 0x%lx, " + "mode = %lx, dev = %ld, ", + arg1, arg2, arg3, arg4, arg5); + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_mknodat(arg1, str, arg4, arg5); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_fchownat: + DbgSCP("fchownat(): dfd = %ld, filename = 0x%lx : 0x%lx, " + "user = %ld, group = %ld, flag = %lx, ", + arg1, arg2, arg3, arg4, arg5, arg6); + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_fchownat(arg1, str, arg4, arg5, arg6); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_unlinkat: + DbgSCP("unlinkat(): dfd = %ld, pathname = 0x%lx : 0x%lx, " + "flag = %lx, ", arg1, arg2, arg3, arg4); + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_unlinkat(arg1, str, arg4); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_renameat: + DbgSCP("renameat(): olddfd = %ld, oldname = 0x%lx : 0x%lx, " + "newdfd = %ld, newname = 0x%lx : 0x%lx, ", + arg1, arg2, arg3, arg4, arg6, arg7); + GET_STR(str, 2, 3); + if (!str) + break; + GET_STR(str3, 6, 7); + if (!str3) + break; + + rval = sys_renameat(arg1, str, arg4, str3); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_linkat: + DbgSCP("linkat(): olddfd = %ld, oldname = 0x%lx : 0x%lx, " + "newdfd = %ld, flags = %lx, newname = 0x%lx : 0x%lx, ", + arg1, arg2, arg3, arg4, arg5, arg6, arg7); + GET_STR(str, 2, 3); + if (!str) + break; + GET_STR(str3, 6, 7); + if (!str3) + break; + + rval = sys_linkat(arg1, str, arg4, str3, arg5); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_symlinkat: + DbgSCP("symlinkat(): oldname = 0x%lx : 0x%lx, " + "newdfd = %ld, newname = 0x%lx : 0x%lx, ", + arg2, arg3, arg4, arg6, arg7); + GET_STR(str, 2, 3); + if (!str) + break; + GET_STR(str3, 6, 7); + if (!str3) + break; + + rval = sys_symlinkat(str, arg4, str3); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_readlinkat: + DbgSCP("readlinkat(): dfd = %ld, pathname = 0x%lx : 0x%lx, " + "buf = 0x%lx : 0x%lx, bufsiz = %ld, ", + arg1, arg2, arg3, arg4, arg5, arg6); + GET_STR(str, 2, 3); + if (!str) + break; + GET_STR(str2, 4, 5); + if (!str2) + break; + + rval = sys_readlinkat(arg1, str, str2, arg6); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_fchmodat: + DbgSCP("fchmodat(): dfd = %ld, filename = 0x%lx : 0x%lx, " + "mode = %lx, ", arg1, arg2, arg3, arg4); + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_fchmodat(arg1, str, arg4); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_faccessat: + DbgSCP("faccessat(): dfd = %ld, filename = 0x%lx : 0x%lx, " + "mode = %lx, ", arg1, arg2, arg3, arg4); + GET_STR(str, 2, 3); + if (!str) + break; + + rval = sys_faccessat(arg1, str, arg4); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_dup3: + DbgSCP("dup3(): oldfd= %ld, newfd = %ld : flags=0x%lx\n " + , arg1, arg2, arg3); + rval = sys_dup3(arg1, arg2, arg3); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_inotify_init1: + rval = sys_inotify_init1(arg1); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_epoll_create1: + DbgSCP("sys_epoll_create1(): flags=0x%lx\n ", arg1); + rval = sys_epoll_create1(arg1); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_newfstatat: + DbgSCP("sys_fstatat64(): dfd=0x%lx filename=0x%lx: 0x%lx" + " statbuf=0x%lx: 0x%lx,flags=0x%lx\n", + arg1, arg2, arg3, arg4, arg5, arg6); + GET_STR(str, 2, 3); + if (!str) + break; + GET_PTR(ptr, size, 4, 5, sizeof(struct stat), 0); + if (!ptr) + break; + rval = sys_newfstatat((int) arg1, str, (struct stat *) ptr, + (int) arg6); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_futimesat: + DbgSCP("sys_futimesat(): dfd=0x%lx filename=0x%lx: 0x%lx" + " statbuf=0x%lx: 0x%lx,flags=0x%lx\n", + arg1, arg2, arg3, arg4, arg5, arg6); + GET_STR(str, 2, 3); + if (!str) + break; + GET_PTR(ptr, size, 4, 5, 2 * sizeof(struct timeval), 1); + if (!ptr) + break; + rval = sys_futimesat((int) arg1, str, (struct timeval *) ptr); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_setcontext: + DbgSCP("sys_setcontext(): ucp=0x%lx:0x%lx, sigsetsize=%ld\n", + arg2, arg3, arg4); + + GET_PTR(ptr, size, 2, 3, sizeof(struct ucontext_prot), 0); + if (!size) + break; + + rval = protected_sys_setcontext((struct ucontext_prot *) ptr, + arg4); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_makecontext: + DbgSCP("sys_makecontext(): ucp=0x%lx:0x%lx, func %lx," + " args_size %lx, args %lx:%lx, sigsetsize=%ld\n", + arg2, arg3, arg4, arg5, arg6, arg7, arg1); + + GET_PTR(ptr, size, 2, 3, sizeof(struct ucontext_prot), 0); + if (!size) + break; + + GET_PTR(ptr2, size, 6, 7, 16, 1); + if (!size) + ptr2 = 0; + + rval = protected_sys_makecontext( + (struct ucontext_prot *) ptr, + (void (*)(void)) arg4, arg5, + (void *) ptr2, arg1); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_swapcontext: + DbgSCP("sys_swapcontext(): oucp=0x%lx:0x%lx, ucp %lx:%lx, sigsetsize=%ld\n", + arg2, arg3, arg4, arg5, arg6); + + GET_PTR(ptr, size, 2, 3, sizeof(struct ucontext_prot), 0); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, sizeof(struct ucontext_prot), + 0); + if (!size) + break; + + rval = protected_sys_swapcontext( + (struct ucontext_prot *) ptr, + (struct ucontext_prot *) ptr2, + arg6); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_freecontext: + DbgSCP("sys_freecontext(): ucp=0x%lx:0x%lx\n", + arg2, arg3); + + GET_PTR(ptr, size, 2, 3, sizeof(struct ucontext_prot), 0); + if (!size) + break; + + rval = protected_sys_freecontext( + (struct ucontext_prot *) ptr); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_set_backtrace: + GET_PTR(ptr, size, 2, 3, arg4 * 8, 1); + if ((arg4 * 8) && !size) + break; + rval = sys_set_backtrace((unsigned long *) ptr, + arg4, arg5, arg6); + break; + case __NR_access_hw_stacks: + DbgSCP("access_hw_stacks(): mode = 0x%lx, " + "buf_size = 0x%lx, " + "frame_ptr = 0x%lx : 0x%lx, " + "buf = 0x%lx : 0x%lx, " + "real_size = 0x%lx : 0x%lx, ", + arg1 >> 32, arg1 & 0xffffffffUL, + arg2, arg3, arg4, arg5, arg6, arg7); + + GET_PTR(ptr, size, 2, 3, sizeof(unsigned long long), 1); + if (!size) + break; + + GET_PTR(ptr2, size, 4, 5, arg1 & 0xffffffffUL, 1); + /* Take into account that BUF_SIZE provided by the user may + be zero in which case it's quite OK to get for BUF `size + == 0' (note that `GET_PTR ()' will return `size == 0' also + for `buf == NULL' in such a case). */ + if ((arg1 & 0xffffffffUL) && !size) + break; + + GET_PTR(ptr3, size, 6, 7, sizeof(u64), 1); + if (!size) + break; + + rval = sys_access_hw_stacks(arg1 >> 32, + (unsigned long long *) ptr, + (char __user *) ptr2, + arg1 & 0xffffffffUL, + (void __user *) ptr3); + DbgSCP("rval = %ld\n", rval); + break; + case __NR_ioctl: + /* The exact size of `char *argp' AP required by this or that + * request is obviously unknown here. For now stupidly require + * AP to be at least one byte long if it's not NULL, but at the + * same time allow for NULL. The rationale is that some `ioctl + * ()' requests requiring no `argp' should accept NULL, however, + * there is no point in passing a zero sized non-NULL buffer to + * `ioctl ()'. */ + GET_PTR(ptr, size, 4, 5, 1, 1); + rval = sys_ioctl((unsigned int) arg1, (unsigned int) arg2, + (unsigned long) ptr); + break; + case __NR_fcntl: + GET_PTR_OR_NUMBER(ptr, size, 4, 5, 0, 1); + rval = sys_fcntl((unsigned int) arg1, (unsigned int) arg2, + (unsigned long) ptr); + break; + case __NR_fallocate: + DbgSCP("fallocate(arg1=%d, arg2=%d, arg3=0x%lx, arg4=0x%lx)\n", + (int) arg1, (int) arg2, (off_t) arg3, (off_t) arg4); + rval = sys_fallocate((int) arg1, (int) arg2, (off_t) arg3, + (off_t) arg4); + break; + case __NR_getresuid: + GET_PTR(ptr, size, 2, 3, sizeof(uid_t), 0); + if (!size) { + rval = -EFAULT; + break; + } + + GET_PTR(ptr2, size, 4, 5, sizeof(uid_t), 0); + if (!size) { + rval = -EFAULT; + break; + } + + GET_PTR(ptr3, size, 6, 7, sizeof(uid_t), 0); + if (!size) { + rval = -EFAULT; + break; + } + + rval = sys_getresuid((uid_t *) ptr, (uid_t *) ptr2, + (uid_t *) ptr3); + break; + case __NR_getresgid: + GET_PTR(ptr, size, 2, 3, sizeof(gid_t), 0); + if (!size) { + rval = -EFAULT; + break; + } + + GET_PTR(ptr2, size, 4, 5, sizeof(gid_t), 0); + if (!size) { + rval = -EFAULT; + break; + } + + GET_PTR(ptr3, size, 6, 7, sizeof(gid_t), 0); + if (!size) { + rval = -EFAULT; + break; + } + + rval = sys_getresgid((gid_t *) ptr, (gid_t *) ptr2, + (gid_t *) ptr3); + break; + case __NR_mount: + GET_PTR(ptr, size, 2, 3, 0, 0); + rval = convert_array((long *) ptr, args, 80, 5, 1, + 0x37f, 0x3ff); + if (rval) { + DbgSCP(" Bad array for sys_mount\n"); + break; + } + rval = sys_mount((char *) args[0], (char *) args[1], + (char *) args[2], (unsigned long) args[3], + (char *) args[4]); + break; + default: + if ((u64) sys_num >= NR_syscalls) { + rval = -ENOSYS; + break; + } + DbgSCP("system call %ld (0x%lx, 0x%lx, 0x%lx, 0x%lx) ", + sys_num, arg1, arg2, arg3, arg4); + rval = (*sys_protcall_table[sys_num])(arg1, arg2, arg3, arg4, + arg5, arg6); + DbgSCP(" rval = %ld\n", rval); + break; + } + + SAVE_PSYSCALL_RVAL(regs, rval, rval1, rval2, + rv1_tag, rv2_tag, return_desk); + + /* Trace syscall exit */ + if (unlikely(current_thread_info()->flags & _TIF_WORK_SYSCALL_TRACE)) { + /* Call tracer */ + syscall_trace_leave(regs); + + /* Update return value, since tracer could have changed it */ + RESTORE_PSYSCALL_RVAL(regs, rval, rval1, rval2); + } + + /* It works only under CONFIG_FTRACE flag */ + add_info_syscall(sys_num, start_tick); + + /* We may skip assigning 'args' here because + * it is used only in the switch above. + * args = (long *) ((((unsigned long) regs) + sizeof(struct pt_regs) + * + 0xfUL) & (~0xfUL)); + */ + + NEW_CHECK_PT_REGS_ADDR(prev_regs, regs, usd_lo_prev); + + finish_syscall(regs, FROM_SYSCALL_PROT_10, true); +} + + +static inline +unsigned long e2k_dscr_ptr_size(long low, long hiw, long min_size, + unsigned int *ptr_size, u64 sbr_hi, + u16 sys_num, u8 argnum, u8 *fatal) +{ + /* NB> 'min_size' may be negative; this is why it has 'long' type */ + e2k_ptr_t ptr; + + AW(ptr).lo = low; + AW(ptr).hi = hiw; + *ptr_size = AS(ptr).size - AS(ptr).curptr; + + if (*ptr_size < min_size) { +#define ERR_DESCRIPTOR_SIZE "System call #%d arg #%d: Pointer is too small: %d < %ld\n" + DbgSCP_ALERT(ERR_DESCRIPTOR_SIZE, + sys_num, argnum, *ptr_size, min_size); + *ptr_size = 0; + *fatal = 1; + return 0; + } + return E2K_PTR_PTR(ptr, sbr_hi); +} + +#define MASK_PROT_ARG_LONG 0 +#define MASK_PROT_ARG_DSCR 1 +#define MASK_PROT_ARG_LONG_OR_DSCR 2 +#define MASK_PROT_ARG_STRING 3 +#define MASK_PROT_ARG_INT 4 +#define MASK_PROT_ARG_FPTR 5 +#define MASK_PROT_ARG_NOARG 0xf +#define ADJUST_SIZE_MASK 1 + +static inline int null_prot_ptr(u32 tag, u64 arg) +{ + return tag == E2K_NULLPTR_ETAG && !arg; +} + +/* + * This function takes couple of arguments to protected system call, + * validates these, and outputs corresponding argument for kernel system call. + * Arguments: + * sys_num - system call number; + * tag - actual argument tags packed (4 + 4 bits for lo/hi arg component); + * mask - system call mask (expected argument types); + * a_num - argument number in kernel system call; + * descr_lo/hi - protected argument couple; + * min_size - minimum allowed argument-descriptor size (if known); + * sbr_hi - stack base pointer (hi); + * fatal - signal to let caller know that this argument is wrong, and + * it would be unsafe to proceed with the system call. + */ +static inline +unsigned long get_protected_ARG(u16 sys_num, u64 tag, u32 mask, u8 a_num, + unsigned long descr_lo, unsigned long descr_hi, + long min_size, u64 sbr_hi, u8 *fatal) +{ +#define ERR_BAD_ARG_TAG \ + "System call #%u/%s: unexpected tag (0x%x) in arg #%d.\n" +#define ERR_MISSED_ARG_TAG \ + "\t\tArg #%d is missed or uninitialized.\n" +#define ERR_NONPTR_NOT_ALLOWED \ + "System call #%u/%s: not a pointer is not allowed in arg #%d.\n" +#define ERR_NOT_A_STRING \ + "System call #%u/%s: not a null-terminated string in arg #%d.\n" +#define ERR_UNEXPECTED_DSCR \ + "System call #%u/%s: unexpected descriptor in arg #%d.\n" + u8 msk = (mask >> (a_num * 4)) & 0xf; + unsigned long ptr; /* the result */ + unsigned int size; + u64 tag_lo = tag & 0xf; + + if ((tag == ETAGDWQ) || (msk == MASK_PROT_ARG_NOARG)) + return 0L; /* arg was not passed or irrelevant */ + else if ((msk == MASK_PROT_ARG_INT) || + /* The check below does the following: + * - in the current ABI syscall argument takes 4 words (16 bytes); + * - if syscall argument is of type 'int' (not 'long'), then + * only lowest word (word #0) gets filled by compiler while + * other 3 words contain trash (the contents of previous call); + * - if tag of the lowest word is numeric tag (i.e. '0'), then + * this argument is definitely of type 'int' and + * - we may remove trash in the word #1 to make it simpler. + */ + (tag_lo && !(tag_lo & 0x3))) { /* numerical tag in lower word */ + /* this is 'int' argument */ + tag_lo &= 0x3; + tag = tag_lo; + descr_lo = (int) descr_lo; /* removing trash in higher word */ + } + +#if DEBUG_SYSCALLP_CHECK + if ((tag != ETAGDWQ) + && (tag_lo != ETAGNUM) + && (tag != ETAGAPQ) + && (tag != ETAGPLD) + && (tag != ETAGPLQ)) { + DbgSCP_ERR(ERR_BAD_ARG_TAG, + sys_num, sys_call_ID_to_name[sys_num], (u8)tag, a_num); + if (((tag == ETAGDWD) + && ((msk == MASK_PROT_ARG_LONG) || (msk == MASK_PROT_ARG_INT))) + || ((tag == ETAGDWS) && (msk == MASK_PROT_ARG_INT))) + DbgSCP_ERR(ERR_MISSED_ARG_TAG, a_num); + DbgSCP("%s: tag=0x%llx tag_lo=0x%llx msk=0x%x a_num=%d\n", + __func__, tag, tag_lo, (int)msk, a_num); + PM_EXCEPTION_IF_ORTH_MODE(SIGILL, ILL_ILLOPN, -EINVAL); + *fatal = 1; + } +#endif /* DEBUG_SYSCALLP_CHECK */ + + if ((tag == ETAGPLD) || (tag == ETAGPLQ)) { + e2k_pl_lo_t pl_lo; + + AW(pl_lo) = descr_lo; + return pl_lo.PL_lo_target; + } + + /* First, we check if the argument is non-pointer: */ + if (tag != ETAGAPQ) { + unsigned long ret = (tag == ETAGDWQ) ? 0 : descr_lo; + + if (unlikely(!null_prot_ptr(tag_lo, descr_lo) && + (msk == MASK_PROT_ARG_DSCR || + msk == MASK_PROT_ARG_STRING))) { + if (!PM_SYSCALL_WARN_ONLY) + *fatal = 1; + DbgSCP_ALERT(ERR_NONPTR_NOT_ALLOWED, + sys_num, sys_call_ID_to_name[sys_num], a_num); + PM_EXCEPTION_IF_ORTH_MODE(SIGILL, ILL_ILLOPN, -EINVAL); + } + + return ret; + } + + /* Finally, this is descriptor; getting pointer from it: */ + ptr = e2k_dscr_ptr_size(descr_lo, descr_hi, min_size, &size, + sbr_hi, sys_num, a_num, fatal); + + /* Second, we check if the argument is string: */ + if (msk == MASK_PROT_ARG_STRING) { + if (e2k_ptr_str_check((char __user *) ptr, size)) { + if (!PM_SYSCALL_WARN_ONLY) + *fatal = 1; + DbgSCP_ALERT(ERR_NOT_A_STRING, + sys_num, sys_call_ID_to_name[sys_num], a_num); + } + } else { + /* Eventually, we check if this is proper pointer: */ + if (unlikely(sys_num && msk != MASK_PROT_ARG_DSCR && + msk != MASK_PROT_ARG_LONG_OR_DSCR)) { + if (!PM_SYSCALL_WARN_ONLY) + *fatal = 1; + DbgSCP_ALERT(ERR_UNEXPECTED_DSCR, sys_num, + sys_call_ID_to_name[sys_num], a_num); + PM_EXCEPTION_IF_ORTH_MODE(SIGILL, ILL_ILLOPN, -EINVAL); + } + } + + return ptr; +} + +#define RW_BUFSIZE_WARN \ + "Syscall #%u/%s: Count exceeds the descriptor (arg #%d) size: %d > %d\n" +#define RW_COUNT_TRUNCATED "Count truncated down to the descriptor size (%d)\n" + +static inline +int check_arg_descr_size(int sys_num, int arg_num, int neg_size, + struct pt_regs *regs, int adjust_bufsize, + long *arg3, long *arg5, long *arg7) +/* In case of negative size in syscall argument mask, + * calculate effective argument size and update args3-7 + */ +{ + int size, descr_size, index; + + if (neg_size >= 0) { + pr_alert("FATAL: bad 'neg_size' (%d) at %s:%d !!!\n", + neg_size, __FILE__, __LINE__); + return neg_size; /* nothing to do with this */ + } + + index = -neg_size*2 - 1; + size = regs->args[index]; + if (!adjust_bufsize) + return size; + + descr_size = e2k_ptr_size(regs->args[arg_num * 2 - 1], + regs->args[arg_num * 2], 0); + + if (likely(descr_size >= size)) + return size; + + /* Requested size appeared bigger than descriptor size. + * Adjusting the requested size value: + */ + DbgSCP_WARN(RW_BUFSIZE_WARN, + (u32)sys_num, sys_call_ID_to_name[sys_num], arg_num, + size, descr_size); + if (PM_SYSCALL_WARN_ONLY && adjust_bufsize) + DbgSCP_WARN(RW_COUNT_TRUNCATED, descr_size); + size = descr_size; + + if (!PM_SYSCALL_WARN_ONLY) { + e2k_ptr_lo_t descr_lo; + e2k_ptr_hi_t descr_hi; + void *addr; + + descr_lo.word = regs->args[arg_num * 2 - 1]; + descr_hi.word = regs->args[arg_num * 2]; + addr = (void *)(descr_lo.fields.ap.base + descr_hi.fields.size); + force_sig_bnderr(addr, (void *)descr_lo.fields.ap.base, addr); + } + + if (adjust_bufsize) + switch (index) { + case 3: + *arg3 = size; + break; + case 5: + *arg5 = size; + break; + case 7: + *arg7 = size; + break; + default: + pr_alert("FATAL: bad 'index' (%d) at %s:%d !!!\n", + index, __FILE__, __LINE__); + break; + } + + return size; +} + + +__section(".entry.text") +SYS_RET_TYPE notrace ttable_entry8_C(u64 sys_num, u64 tags, long arg1, + long arg2, long arg3, long arg4, struct pt_regs *regs) +{ +#ifdef CONFIG_DEBUG_PT_REGS + e2k_usd_lo_t usd_lo_prev; + struct pt_regs *prev_regs = regs; +#endif + long rval = -EINVAL; + long arg5 = regs->args[5], arg6 = regs->args[6], arg7 = regs->args[7], + arg8 = regs->args[8], arg9 = regs->args[9], arg10 = regs->args[10], + arg11 = regs->args[11], arg12 = regs->args[12]; + unsigned long a1, a2, a3, a4, a5, a6; + protected_system_call_func sys_call; + unsigned long ti_flags = current_thread_info()->flags; + u32 mask; + int size1, size2, size3, size4; + u16 size5, size6; + u64 sbr_hi = GET_SBR_HI(); + u8 wrong_arg = 0; /* signal that an argument detected wrong */ +#ifdef CONFIG_E2K_PROFILING + register long start_tick = NATIVE_READ_CLKR_REG_VALUE(); + register long clock1; +#endif + +#ifdef CONFIG_DEBUG_PT_REGS + /* + * pt_regs structure is placed as local data of the + * trap handler (or system call handler) function + * into the kernel local data stack + */ + usd_lo_prev = NATIVE_NV_READ_USD_LO_REG(); +#endif + init_pt_regs_for_syscall(regs); + SAVE_STACK_REGS(regs, current_thread_info(), true, false); + regs->sys_num = sys_num; + regs->return_desk = 0; + + /* Important: this must be before the first call + * but after saving %wd register. + */ + if (cpu_has(CPU_HWBUG_VIRT_PSIZE_INTERCEPTION)) { + e2k_wd_t wd = READ_WD_REG(); + wd.psize = 0x40; + WRITE_WD_REG(wd); + } + DbgSCP("\nsys_num = %lld: tags = 0x%llx, arg1 = 0x%lx, arg2 = 0x%lx, arg3 = 0x%lx, arg4 = 0x%lx\n" + "\targ5 = 0x%lx, arg6 = 0x%lx, arg7 = 0x%lx, arg8 = 0x%lx, arg9 = 0x%lx, arg10 = 0x%lx\n", + sys_num, tags, arg1, arg2, arg3, arg4, + arg5, arg6, arg7, arg8, arg9, arg10); + +#ifdef CONFIG_E2K_PROFILING + read_ticks(clock1); + info_save_stack_reg(clock1); +#endif + if (sys_num >= NR_syscalls) { + sys_call = (protected_system_call_func) sys_ni_syscall; + mask = size1 = size2 = 0; + } else { + sys_call = sys_call_table_entry8[sys_num]; +#if DEBUG_SYSCALLP_CHECK +#define SYSCALL_NOT_AVAILABLE_IN_PM \ +"!!! System call #%lld (%s) is not available in the protected mode !!!\n" + if (sys_call == (protected_system_call_func)sys_ni_syscall) + DbgSCP_ALERT(SYSCALL_NOT_AVAILABLE_IN_PM, sys_num, + sys_call_ID_to_name[sys_num]); +#endif + mask = sys_protcall_args[sys_num].mask; + size1 = sys_protcall_args[sys_num].size1; + size2 = sys_protcall_args[sys_num].size2; + } + + current_thread_info()->pt_regs = regs; + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); + + DbgSCP("_NR_ %lld/%s start: mask=0x%x current %px pid %d\n", sys_num, + (sys_num < NR_syscalls) ? sys_call_ID_to_name[sys_num] : "sys_ni_syscall", + mask, current, current->pid); + + /* All other arguments have been saved in assembler already */ + regs->args[1] = arg1; + regs->args[2] = arg2; + regs->args[3] = arg3; + regs->args[4] = arg4; + regs->tags = tags; + + if (likely(sys_num < NR_syscalls)) { + + if (size1 < 0) + size1 = check_arg_descr_size(sys_num, 1, size1, regs, + mask & ADJUST_SIZE_MASK, + &arg3, &arg5, &arg7); + size3 = sys_protcall_args[sys_num].size3; + if (size3 < 0) + size3 = check_arg_descr_size(sys_num, 3, size3, regs, + mask & ADJUST_SIZE_MASK, + &arg3, &arg5, &arg7); + size4 = sys_protcall_args[sys_num].size4; + /* So far we don't have negative size in the 4th row. + * To be added in the future if needed: + if (size4 < 0) + size4 = regs->args[-size4]; + */ + size5 = sys_protcall_args[sys_num].size5; + /* So far we don't have negative size in the 5th row. + * To be added in the future if needed: + if (size5 < 0) + size5 = regs->args[-size5]; + */ + if (size2 < 0) + size2 = check_arg_descr_size(sys_num, 2, size2, regs, + mask & ADJUST_SIZE_MASK, + &arg3, &arg5, &arg7); + size6 = sys_protcall_args[sys_num].size6; + /* So far we don't have negative size in the 6th row. + * To be added in the future if needed: + if (size6 < 0) + size6 = regs->args[-size6]; + */ + + a1 = get_protected_ARG(sys_num, (tags >> 8) & 0xffUL, mask, 1, + arg1, arg2, size1, sbr_hi, &wrong_arg); + a2 = get_protected_ARG(sys_num, (tags >> 16) & 0xffUL, mask, 2, + arg3, arg4, size2, sbr_hi, &wrong_arg); + a3 = get_protected_ARG(sys_num, (tags >> 24) & 0xffUL, mask, 3, + arg5, arg6, size3, sbr_hi, &wrong_arg); + a4 = get_protected_ARG(sys_num, (tags >> 32) & 0xffUL, mask, 4, + arg7, arg8, size4, sbr_hi, &wrong_arg); + a5 = get_protected_ARG(sys_num, (tags >> 40) & 0xffUL, mask, 5, + arg9, arg10, size5, sbr_hi, &wrong_arg); + a6 = get_protected_ARG(sys_num, (tags >> 48) & 0xffUL, mask, 6, + arg11, arg12, size6, sbr_hi, &wrong_arg); + + } /* (sys_num < NR_syscalls) */ + + DbgSCP("system call %lld (0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx)\n", + sys_num, a1, a2, a3, a4, a5, a6); + if (likely(!(ti_flags & _TIF_WORK_SYSCALL_TRACE) && !wrong_arg)) { + /* Fast path */ + rval = sys_call(a1, a2, a3, a4, a5, a6, regs); + SAVE_SYSCALL_RVAL(regs, rval); + } else if (likely(!wrong_arg)) { + /* Trace syscall enter */ + SAVE_SYSCALL_ARGS(regs, a1, a2, a3, a4, a5, a6); + syscall_trace_entry(regs); + /* Update args, since tracer could have changed them */ + RESTORE_SYSCALL_ARGS(regs, sys_num, a1, a2, a3, a4, a5, a6); + + save_syscall_args_prot(regs, arg1, arg2, arg3, arg4, arg5, arg6, + arg7, arg8, arg9, arg10, arg11, arg12, tags); + rval = sys_call(a1, a2, a3, a4, a5, a6, regs); + SAVE_SYSCALL_RVAL(regs, rval); + + /* Trace syscall exit */ + SAVE_SYSCALL_ARGS(regs, a1, a2, a3, a4, a5, a6); + syscall_trace_leave(regs); + /* Update rval, since tracer could have changed it */ + RESTORE_SYSCALL_RVAL(regs, rval); + + /* For syscall restart */ + SAVE_SYSCALL_ARGS(regs, arg1, arg2, arg3, arg4, arg5, arg6); + } else /* (unlikely(wrong_arg)) */ { + rval = -EFAULT; + SAVE_SYSCALL_RVAL(regs, rval); + } + DbgSCP("syscall %lld : rval = 0x%lx / %ld\n", sys_num, rval, rval); + + /* It works only under CONFIG_FTRACE flag */ + add_info_syscall(sys_num, start_tick); + + /* We may skip assigning 'args' here because + * it is used only in the switch above. + * args = (long *) ((((unsigned long) regs) + sizeof(struct pt_regs) + * + 0xfUL) & (~0xfUL)); + */ + + NEW_CHECK_PT_REGS_ADDR(prev_regs, regs, usd_lo_prev); + + finish_syscall(regs, FROM_SYSCALL_PROT_8, true); +} + +/* + * this is a copy of sys_socketcall (net/socket.c) + * + * The type of structure depend on first parameter + */ +notrace __section(".entry.text") +static void get_socketcall_mask(long call, long *mask_type, long *mask_align, + int *fields) +{ + + switch(call) + { + case SYS_SOCKET: + *mask_type = 0x15; + *mask_align = 0x15; + *fields = 3; + /* err = sys_socket(a[0], a[1], a[2]); */ + break; + case SYS_BIND: + *mask_type = 0x1d; + *mask_align = 0x1f; + *fields = 3; + /* err = sys_bind(a[0], + (struct sockaddr __user *) a[1], a[2]); */ + break; + case SYS_CONNECT: + *mask_type = 0x1d; + *mask_align = 0x1f; + *fields = 3; + /* err = sys_connect(a[0], + (struct sockaddr __user *) a[1], a[2]); */ + break; + case SYS_LISTEN: + *mask_type = 0x5; + *mask_align = 0x5; + *fields = 2; + /* err = sys_listen(a[0], a[1]); */ + break; + case SYS_ACCEPT: + *mask_type = 0x3d; + *mask_align = 0x3f; + *fields = 3; + /* err = sys_accept(a[0], + (struct sockaddr __user *) a[1], (int __user*) a[2]);*/ + break; + case SYS_GETSOCKNAME: + *mask_type = 0x3d; + *mask_align = 0x3f; + *fields = 3; + /* err = sys_getsockname(a[0], + (struct sockaddr __user*) a[1], (int __user *) a[2]);*/ + break; + case SYS_GETPEERNAME: + *mask_type = 0x3d; + *mask_align = 0x3f; + *fields = 3; + /*err = sys_getpeername(a[0], + (struct sockaddr __user *) a[1], (int __user *)a[2]);*/ + break; + case SYS_SOCKETPAIR: + *mask_type = 0xd5; + *mask_align = 0xf5; + *fields = 4; + /*err = sys_socketpair(a[0], a[1], a[2], + (int __user *)a[3]);*/ + break; + case SYS_SEND: + *mask_type = 0x5d; + *mask_align = 0x5f; + *fields = 4; + /* err = sys_send(a[0], (void __user *) a[1], a[2], + a[3]); */ + break; + case SYS_SENDTO: + *mask_type = 0x75d; + *mask_align = 0x7df; + *fields = 6; + /* err = sys_sendto(a[0], (void __user *) a[1], a[2], + a[3], (struct sockaddr __user *) a[4], a[5]); */ + break; + case SYS_RECV: + *mask_type = 0x5d; + *mask_align = 0x5f; + *fields = 4; + /* err = sys_recv(a[0], (void __user *) a[1], + a[2], a[3]); */ + break; + case SYS_RECVFROM: + *mask_type = 0xf5d; + *mask_align = 0xfdf; + *fields = 6; + /* err = sys_recvfrom(a[0], (void __user *) a[1], a[2], + a[3], (struct sockaddr __user *) a[4], + (int __user *) a[5]); */ + break; + case SYS_SHUTDOWN: + *mask_type = 0x5; + *mask_align = 0x5; + *fields = 2; + /* err = sys_shutdown(a[0], a[1]); */ + break; + case SYS_SETSOCKOPT: + *mask_type = 0x1d5; + *mask_align = 0x1f5; + *fields = 5; + /* err = sys_setsockopt(a[0], a[1], a[2], + (char __user *)a[3], a[4]); */ + break; + case SYS_GETSOCKOPT: + *mask_type = 0x3d5; + *mask_align = 0x3f5; + *fields = 5; + /* err = sys_getsockopt(a[0], a[1], a[2], + (char __user *) a[3], (int __user *)a[4]); */ + break; + case SYS_SENDMSG: + *mask_type = 0x1d; + *mask_align = 0x1f; + *fields = 3; + /* err = sys_sendmsg(a[0], + (struct msghdr __user *) a[1], a[2]);*/ + break; + case SYS_RECVMSG: + *mask_type = 0x1d; + *mask_align = 0x1f; + *fields = 3; + /* err = sys_recvmsg(a[0], + (struct msghdr __user *) a[1], a[2]); */ + break; + default: + *mask_type = 0x0; + *mask_align = 0x0; + *fields = 0; + break; + } +} + +notrace __section(".entry.text") +static long check_select_fs(e2k_ptr_t *fds_p, fd_set *fds[3]) +{ + volatile int res = 0; + int i; + + /* Now we'll touch user addresses. Let's do it carefuly */ + TRY_USR_PFAULT { + for (i = 0; i < 3; i++, fds_p++) { + if (AWP(fds_p).lo == 0 + && AWP(fds_p).hi == 0 + && (NATIVE_LOAD_TAGD(&AWP(fds_p).hi) == 0) + && (NATIVE_LOAD_TAGD(&AWP(fds_p).lo) == 0)) { + fds[i] = (fd_set *) 0; + continue; + } + + if ((NATIVE_LOAD_TAGD(&AWP(fds_p).hi) != E2K_AP_HI_ETAG) || + (NATIVE_LOAD_TAGD(&AWP(fds_p).lo) != E2K_AP_LO_ETAG)) { + DbgSCP(" No desk fds[%d]; EINVAL\n", i); + res = -EINVAL; + break; + } + if (ASP(fds_p).size - ASP(fds_p).curptr < + sizeof (fd_set)) { + DbgSCP(" Too small fds[%d];\n", i); + res = -EINVAL; + break; + } + fds[i] = (fd_set *)E2K_PTR_PTR(fds_p[i], GET_SBR_HI()); + } + } CATCH_USR_PFAULT { + res = -EINVAL; + } END_USR_PFAULT + + return res; +} + +#define get_user_space(x) arch_compat_alloc_user_space(x) + +notrace __section(".entry.text") +static long do_protected_syscall(unsigned long sys_num, const long arg1, + const long arg2, const long arg3, const long arg4, + const long arg5, const long arg6, const long arg7) +{ + long rval = -EINVAL; + unsigned long ptr, ptr2; + unsigned int size; + char *str; + long mask_type, mask_align; + int fields; + unsigned long tags = sys_num >> 32; + + sys_num = sys_num & 0xffffffff; + DbgSCP("protected call %ld: 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx", + sys_num, arg1, arg2, arg3, arg4, arg5); + + switch (sys_num) { + case __NR_olduselib: { + kmdd_t kmdd; + umdd_old_t *umdd; + + if (IS_CPU_ISET_V6()) + return -ENOSYS; + + GET_STR(str, 2, 3); + if (!str) + break; + + GET_PTR(ptr, size, 4, 5, MDD_OLD_PROT_SIZE, 0); + if (!size) + break; + + if (current->thread.flags & E2K_FLAG_3P_ELF32) + rval = sys_load_cu_elf32_3P(str, &kmdd); + else + rval = sys_load_cu_elf64_3P(str, &kmdd); + + if (rval) { + DbgSCP("failed, could not load\n"); + break; + } + + umdd = (umdd_old_t *) ptr; + + rval |= PUT_USER_AP(&umdd->mdd_got, kmdd.got_addr, + kmdd.got_len, 0, RW_ENABLE); + if (kmdd.init_got_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_init_got, + kmdd.init_got_point); + else + rval |= put_user(0L, &umdd->mdd_init_got.word); + + if (kmdd.entry_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_start, + kmdd.entry_point); + else + rval |= put_user(0L, &umdd->mdd_start.word); + + if (kmdd.init_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_init, + kmdd.init_point); + else + rval |= put_user(0L, &umdd->mdd_init.word); + + if (kmdd.fini_point) + rval |= PUT_USER_PL_V2(&umdd->mdd_fini, + kmdd.fini_point); + else + rval |= put_user(0L, &umdd->mdd_fini.word); + break; + } + case __NR_newuselib: { + kmdd_t kmdd; + umdd_t *umdd; + + GET_STR(str, 2, 3); + if (!str) + break; + + GET_PTR(ptr, size, 4, 5, MDD_PROT_SIZE, 0); + if (!size) + break; + + if (current->thread.flags & E2K_FLAG_3P_ELF32) + rval = sys_load_cu_elf32_3P(str, &kmdd); + else + rval = sys_load_cu_elf64_3P(str, &kmdd); + + if (rval) { + DbgSCP("failed, could not load\n"); + break; + } + BUG_ON(kmdd.cui == 0); + + umdd = (umdd_t *) ptr; + + rval |= PUT_USER_AP(&umdd->mdd_got, kmdd.got_addr, + kmdd.got_len, 0, RW_ENABLE); + + if (kmdd.init_got_point) { + rval |= PUT_USER_PL(&umdd->mdd_init_got, + kmdd.init_got_point, + kmdd.cui); + } else { + rval |= put_user(0L, &umdd->mdd_init_got.PLLO_value); + rval |= put_user(0L, &umdd->mdd_init_got.PLHI_value); + } + + break; + } + case __NR__sysctl: { + struct __sysctl_args *new_arg; + + GET_PTR(ptr, size, 2, 3, 0, 0); + new_arg = get_user_space(sizeof(struct __sysctl_args)); + if ((rval = convert_array((long *) ptr, (long *)new_arg, size, + 6, 1, 0x3f3, 0x3ff))) { + DbgSCP(" Bad array for sys_sysctl\n"); + return -EINVAL; + } + + rval = sys_sysctl(new_arg); + break; + } + case __NR_socketcall: { + long *args; + get_socketcall_mask(arg1, &mask_type, &mask_align, &fields); + + if (fields == 0) { + DbgSCP("Bad socketcall number %ld\n", arg1); + return -EINVAL; + } + + /* `convert_array ()' below will determine if AP.size is large + * enough for this request. */ + GET_PTR(ptr, size, 2, 3, 0, 0); + if (!ptr) { + DbgSCP("NULL pointer passed to socketcall (%d)", + (int) arg1); + return -EFAULT; + } + + /* + * Need an additional conversions of arguments + * for syscalls recvmsg/sendmsg + */ + if ((arg1 == SYS_SENDMSG) || (arg1 == SYS_RECVMSG)) { +#define MASK_MSGHDR_TYPE 0x773 /* type mask for struct msghdr */ +#define MASK_MSGHDR_ALIGN 0x17ff /* alignment mask for msghdr structure */ +#define SIZE_MSGHDR 96 /* size of struct msghdr in user space */ +#define MASK_IOVEC_TYPE 0x7 /* mask for converting of struct iovec */ +#define MASK_IOVEC_ALIGN 0xf /* alignment mask for struct iovec */ +#define SIZE_IOVEC 32 /* size of struct iovec in user space */ + + /* + * Structures user_msghdr and iovec contain pointers + * inside, therefore they need to be additionally + * converted with saving results in these structures + */ + struct user_msghdr *converted_msghdr; + struct iovec *converted_iovec; + + /* + * Allocate space on user stack for additional + * structures for saving of converted parameters + */ + args = get_user_space((fields * 8) + + sizeof(struct user_msghdr) + + sizeof(struct iovec)); + /* Convert args array for socketcall from ptr */ + rval = convert_array((long *) ptr, args, size, + fields, 1, mask_type, + mask_align); + + if (rval) { + DbgSCP(" Bad array for socketcall (%ld)", arg1); + DbgSCP(" size=%d\n", size); + return -EINVAL; + } + + /* Convert struct msghdr from args[1] */ + converted_msghdr = (struct user_msghdr *) (args + + (fields * 8)); + rval = convert_array((long *) args[1], + (long *) converted_msghdr, + SIZE_MSGHDR, 7, 1, MASK_MSGHDR_TYPE, + MASK_MSGHDR_ALIGN); + + if (rval) { + DbgSCP("Bad user_msghdr in args[1]\n"); + return -EINVAL; + } + + /* Convert struct iovec from msghdr->msg_iov */ + converted_iovec = (struct iovec *) (converted_msghdr + + sizeof(struct user_msghdr)); + rval = convert_array((long *) converted_msghdr->msg_iov, + (long *) converted_iovec, + SIZE_IOVEC, 2, 1, MASK_IOVEC_TYPE, + MASK_IOVEC_ALIGN); + + if (rval) { + DbgSCP("Bad struct iovec in msghdr\n"); + return -EINVAL; + } + + /* Assign args[1] to pointers to converted structures */ + args[1] = (long) converted_msghdr; + converted_msghdr->msg_iov = converted_iovec; + /* Other socketcalls */ + } else { + /* Allocate space on user stack for args array */ + args = get_user_space(fields * 8); + /* Convert args array for socketcall from ptr */ + rval = convert_array((long *) ptr, args, size, + fields, 1, mask_type, + mask_align); + + if (rval) { + DbgSCP(" Bad array for socketcall (%ld)", arg1); + DbgSCP(" size=%d\n", size); + return -EINVAL; + } + } + + /* + * Call socketcall handler function with passing of + * arguments to it + */ + rval = sys_socketcall((int) arg1, (unsigned long *) args); + DbgSCP("socketcall (%d) returned %ld\n", (int) arg1, rval); + break; + } + case __NR_ipc: { + long *args; + get_ipc_mask(arg1, &mask_type, &mask_align, &fields); + + if (fields == 0) { + DbgSCP("Bad syscall_ipc number %ld\n", arg1); + return -EINVAL; + } + + /* + * `convert_array ()' below will determine if AP.size is large + * enough for this request. + */ + GET_PTR(ptr, size, 2, 3, 0, 0); + if (!ptr) { + DbgSCP("NULL pointer passed to syscall_ipc (%d)", + (int) arg1); + return -EFAULT; + } + + /* + * Syscalls semctl need an additional converting of arguments + * after getting the arg array + */ + switch (arg1) { + case SEMCTL: { +#define MASK_SEMUN_PTR_TYPE 0x3 /* mask for union semun with pointer */ +#define MASK_SEMUN_PTR_ALIGN 0x3 /* alignment mask for union semun with ptr */ +#define SIZE_SEMUN_PTR 16 /* size of union semun with ptr */ +#define MASK_SEMUN_INT_TYPE 0x0 /* mask for union semun with int */ +#define MASK_SEMUN_INT_ALIGN 0x3 /* alignment mask for union semun with int */ +#define SIZE_SEMUN_INT 16 /* size of union semun with int */ + + /* + * Union semun (5-th parameter) contains pointers + * inside, therefore they need to be additionally + * converted with saving results in these union + */ + union semun *converted_semun; + + /* + * Allocate space on user stack for additional + * structures for saving of converted parameters + */ + args = get_user_space((fields * 8) + + sizeof(union semun)); + /* Convert args array for syscall_ipc from ptr */ + rval = convert_array((long *) ptr, args, size, + fields, 1, mask_type, + mask_align); + if (rval) { + DbgSCP(" Bad args array for syscall_ipc "); + DbgSCP("(%ld), size=%d\n", arg1, size); + return -EINVAL; + } + + /* Convert union semun from args[3] */ + converted_semun = (union semun *) (args + (fields * 8)); + + /* Fields of union semun depend on cmd parameter */ + switch (args[2]) { + /* Pointer in union semun required */ + case IPC_STAT: + case IPC_SET: + case IPC_INFO: + case GETALL: + case SETALL: + if (!args[3]) + return -EINVAL; + rval = convert_array((long *) args[3], + (long *) converted_semun, + SIZE_SEMUN_PTR, 1, 1, + MASK_SEMUN_PTR_TYPE, + MASK_SEMUN_PTR_ALIGN); + if (rval) { + DbgSCP(" Bad semun parameter"); + DbgSCP(" for semctl\n"); + return -EINVAL; + } + /* + * Assign args[3] to pointer to + * converted union + */ + args[3] = (long) converted_semun; + break; + /* Int value in union semun required */ + case SETVAL: + rval = convert_array((long *) args[3], + (long *) converted_semun, + SIZE_SEMUN_INT, 1, 1, + MASK_SEMUN_INT_TYPE, + MASK_SEMUN_INT_ALIGN); + if (rval) { + DbgSCP(" Bad semun parameter"); + DbgSCP(" for semctl\n"); + return -EINVAL; + } + /* + * Assign args[3] to pointer to + * converted union + */ + args[3] = (long) converted_semun; + break; + /* No union semun as argument */ + default: + break; + } + break; + } + case MSGRCV: { +#define MASK_MSG_BUF_PTR_TYPE 0x7 /* type mask for struct msg_buf */ +#define MASK_MSG_BUF_PTR_ALIGN 0x7 /* alignment mask for struct msg_buf */ +#define SIZE_MSG_BUF_PTR 32 /* size of struct msg_buf with pointer */ + /* + * Struct new_msg_buf (ipc_kludge) contains pointer + * inside, therefore it needs to be additionally + * converted with saving results in these struct + */ + struct ipc_kludge *converted_new_msg_buf; + + /* + * Allocate space on user stack for additional + * structures for saving of converted parameters + */ + args = get_user_space((fields * 8) + + sizeof(struct ipc_kludge)); + /* Convert args array for syscall_ipc from ptr */ + rval = convert_array((long *) ptr, args, size, + fields, 1, mask_type, + mask_align); + if (rval) { + DbgSCP(" Bad args array for syscall_ipc "); + DbgSCP("(%ld), size=%d\n", arg1, size); + return -EINVAL; + } + + /* Convert struct new_msg_buf from args[3] */ + converted_new_msg_buf = (struct ipc_kludge *) + (args + (fields * 8)); + + rval = convert_array((long *) args[3], + (long *) converted_new_msg_buf, + SIZE_MSG_BUF_PTR, 2, 1, + MASK_MSG_BUF_PTR_TYPE, + MASK_MSG_BUF_PTR_ALIGN); + if (rval) { + DbgSCP(" Bad msg_buf parameter"); + DbgSCP(" for msgrcv\n"); + return -EINVAL; + } + + /* + * Assign args[3] to pointer to converted new_msg_buf + */ + args[3] = (long) converted_new_msg_buf; + break; + } + /* No additional converting of parameters for other syscalls */ + default: + /* Allocate space on user stack for args array */ + args = get_user_space(fields * 8); + /* Convert args array for syscall_ipc from ptr */ + rval = convert_array((long *) ptr, args, size, + fields, 1, mask_type, + mask_align); + if (rval) { + DbgSCP(" Bad args array for syscall_ipc "); + DbgSCP("(%ld) size=%d\n", arg1, size); + return -EINVAL; + } + break; + } + + /* + * Call syscall_ipc handler function with passing of + * arguments to it + */ + + DbgSCP("ipc(): call:%d first:%d second:%d third:%ld\n" + "ptr:%px fifth:0x%px\n", (u32) arg1, + (int) args[0], (int) args[1], args[2], + (void *) args[3], (void *) args[4]); + rval = sys_ipc((u32) arg1, (int) args[0], (u64) args[1], + (u64) args[2], (void *) args[3], + (u64) args[4]); + DbgSCP("syscall_ipc (%d) returned %ld\n", (int) arg1, rval); + break; + } + case __NR_readv: + case __NR_writev: + case __NR_preadv: + case __NR_pwritev: + case __NR_preadv2: + case __NR_pwritev2: { + /* + * sys_readv(unsigned long fd, const struct iovec __user *vec, + * unsigned long nr_segs) + * struct iovec { + * void __user *iov_base; + * __kernel_size_t iov_len; + * }; + */ + const int nr_segs = (int) arg4; + long *new_arg; + + if (((unsigned int) nr_segs) > UIO_MAXIOV) { + DbgSCP("Bad nr_segs(%d)\n", nr_segs); + return -EINVAL; + } + + /* One could use 0 in place `32 * nr_segs' here as the size + * will be checked below in `convert_array ()'. */ + GET_PTR(ptr, size, 2, 3, 32 * nr_segs, 0); + if (!size) + return -EINVAL; + + new_arg = get_user_space(nr_segs * 2 * 8); + rval = convert_array((long *) ptr, new_arg, size, + 2, nr_segs, 0x7, 0xf); + if (rval) { + DbgSCP(" Bad array for sys_sysctl\n"); + return rval; + } + if (sys_num == __NR_readv || sys_num == __NR_writev) { + rval = (*sys_protcall_table[sys_num])(arg1, + (long) new_arg, nr_segs, 0, 0, 0); + } else if (sys_num == __NR_preadv || sys_num == __NR_pwritev) { + rval = (*sys_protcall_table[sys_num])(arg1, + (unsigned long) new_arg, nr_segs, arg5, + arg6, 0); + } else { + /* sys_num == __NR_preadv2 || sys_num==__NR_pwritev2*/ + rval = (*sys_protcall_table[sys_num])(arg1, + (unsigned long) new_arg, + nr_segs, arg5, arg6, arg7); + } + DbgSCP(" rval = %ld new_arg=%px\n", rval, new_arg); + break; + } + case __NR_select: + case __NR__newselect: { + fd_set *fds[3] = { NULL, NULL, NULL }; + + GET_PTR(ptr, size, 2, 3, 3 * sizeof(e2k_ptr_t), 0); + if (!size) + return -EINVAL; + + GET_PTR(ptr2, size, 4, 5, sizeof(struct timeval), 1); + if (!size) + return -EINVAL; + + rval = check_select_fs((e2k_ptr_t *) ptr, fds); + if (rval) + return -EINVAL; + + rval = sys_select(arg1, fds[0], fds[1], fds[2], + (struct timeval *) ptr2); + break; + } + case __NR_pselect6: { + fd_set *fds[3] = {NULL, NULL, NULL}; + unsigned long ptr3; + long *buf; + + GET_PTR(ptr, size, 2, 3, 3 * sizeof(e2k_ptr_t), 0); + if (!size) + return -EINVAL; + + GET_PTR(ptr2, size, 4, 5, sizeof(struct __kernel_timespec), 1); + if (!size) + return -EINVAL; + + GET_PTR(ptr3, size, 6, 7, 2 * 16, 0); + if (!size) + return -EINVAL; + + buf = get_user_space(2 * 8); + /* Extract a pointer to `sigset_t' and its length into `buf[]'. + 0x1 mask matches one pointer and one long field. */ + rval = convert_array((long *) ptr3, buf, 2 * 16, 2, 1, + 0x7, 0x7); + if (rval) { + DbgSCP("Bad 4th argument for pselect6\n"); + return rval; + } + + rval = check_select_fs((e2k_ptr_t *) ptr, fds); + if (rval) + return -EINVAL; + + rval = sys_pselect6((int) arg1, fds[0], fds[1], fds[2], + (struct __kernel_timespec *) ptr2, buf); + break; + } + case __NR_execve: { + long filename; + long *buf; + long *argv; + long *envp; + unsigned int size2; + int argc, envc = 0; + + /* Path to executable */ + GET_PTR(filename, size, 2, 3, 0, 0); + if (!size) + return -EINVAL; + + /* argv */ + GET_PTR(ptr, size, 4, 5, 0, 0); + if (!size) + return -EINVAL; + + /* envp */ + GET_PTR(ptr2, size2, 6, 7, 0, 1); + /* + * Note in the release 5.00 of the Linux man-pages: + * The use of a third argument to the main function + * is not specified in POSIX.1; according to POSIX.1, + * the environment should be accessed via the external + * variable environ(7). + */ + + /* Count real number of entries in argv */ + argc = count_descriptors((long *) ptr, size); + if (argc < 0) + return -EINVAL; + + /* Count real number of entries in envc */ + if (size2) { + envc = count_descriptors((long *) ptr2, size2); + if (envc < 0) + return -EINVAL; + } + + /* + * Allocate space on user stack for converting of + * descriptors in argv and envp to ints + */ + buf = get_user_space((argc + envc + 2) << 3); + argv = buf; + envp = &buf[argc + 1]; + + /* + * Convert descriptors in argv to ints. + * For statically-linked executables missing argv is allowed, + * therefore kernel doesn't return error in this case. + * For dynamically-linked executables missing argv is not + * allowed, because at least argv[0] is required by ldso for + * loading of executable. Protected ldso must check argv. + */ + if (argc) { + rval = convert_array((long *) ptr, argv, + argc << 4, 1, argc, 0x3, 0x3); + if (rval) { + DbgSCP(" Bad argv array for execve\n"); + return rval; + } + } + /* The array argv must be terminated by zero */ + argv[argc] = 0; + + /* + * Convert descriptors in envp to ints + * envc can be zero without problems + */ + if (envc) { + rval = convert_array((long *) ptr2, envp, + envc << 4, 1, envc, 0x3, 0x3); + if (rval) { + DbgSCP(" Bad envp array for execve\n"); + return rval; + } + } + /* The array envp must be terminated by zero */ + envp[envc] = 0; + + rval = e2k_sys_execve((char *) filename, (char **) argv, + (char **) envp); + + DbgSCP(" rval = %ld filename=%s argv=%px envp=%px\n", + rval, (char *) filename, argv, envp); + break; + } + default: + WARN_ON(1); + } + + DbgSCP("do_protected_syscall(%ld): rval = %ld\n", sys_num, rval); + return rval; +} + +/* + * Count the number of descriptors in array, which is terminated by NULL + * (For counting of elements in argv and envp arrays) + */ +notrace __section(".entry.text") +static int count_descriptors(long __user *prot_array, const int prot_array_size) +{ + int i; + long tmp[2]; + + if (prot_array == NULL) + return 0; + + /* Ensure that protected array is aligned and sized properly */ + if (!IS_ALIGNED((u64) prot_array, 16)) + return -EINVAL; + + /* Read each entry */ + for (i = 0; 8 * i + 16 <= prot_array_size; i += 2) { + long hi, lo; + int htag, ltag; + + if (copy_from_user_with_tags(tmp, &prot_array[i], 16)) + return -EFAULT; + + NATIVE_LOAD_VAL_AND_TAGD(tmp, lo, ltag); + NATIVE_LOAD_VAL_AND_TAGD(&tmp[1], hi, htag); + + /* If zero is met, it is the end of array*/ + if (lo == 0 && hi == 0 && ltag == 0 && htag == 0) + return i >> 1; + } + + return -EINVAL; +} + +#endif /* CONFIG_PROTECTED_MODE */ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +static inline void syscall_enter_kernel_times_account(struct pt_regs *regs) +{ + e2k_clock_t clock = NATIVE_READ_CLKR_REG_VALUE(); + scall_times_t *scall_times; + int count; + + scall_times = &(current_thread_info()->times[current_thread_info()-> + times_index].of.syscall); + current_thread_info()->times[current_thread_info()-> + times_index].type = SYSTEM_CALL_TT; + INCR_KERNEL_TIMES_COUNT(current_thread_info()); + scall_times->start = clock; + E2K_SAVE_CLOCK_REG(scall_times->pt_regs_set); + scall_times->signals_num = 0; + + E2K_SAVE_CLOCK_REG(scall_times->save_stack_regs); + E2K_SAVE_CLOCK_REG(scall_times->save_sys_regs); + E2K_SAVE_CLOCK_REG(scall_times->save_stacks_state); + E2K_SAVE_CLOCK_REG(scall_times->save_thread_state); + scall_times->syscall_num = regs->sys_num; + E2K_SAVE_CLOCK_REG(scall_times->scall_switch); +} +static inline void syscall_exit_kernel_times_account(struct pt_regs *regs) +{ + scall_times_t *scall_times; + + scall_times = &(current_thread_info()->times[current_thread_info()-> + times_index].of.syscall); + E2K_SAVE_CLOCK_REG(scall_times->restore_thread_state); + E2K_SAVE_CLOCK_REG(scall_times->scall_done); + E2K_SAVE_CLOCK_REG(scall_times->check_pt_regs); +} +#else +static inline void syscall_enter_kernel_times_account(struct pt_regs *regs) { } +static inline void syscall_exit_kernel_times_account(struct pt_regs *regs) { } +#endif + +__section(".entry.text") +SYS_RET_TYPE notrace handle_sys_call(system_call_func sys_call, + long arg1, long arg2, long arg3, long arg4, + long arg5, long arg6, struct pt_regs *regs) +{ + unsigned long ti_flags = current_thread_info()->flags; + long rval; + bool ts_host_at_vcpu_mode = ts_host_at_vcpu_mode(); + + check_cli(); + info_save_stack_reg(NATIVE_READ_CLKR_REG_VALUE()); + syscall_enter_kernel_times_account(regs); + + SAVE_STACK_REGS(regs, current_thread_info(), true, false); + init_pt_regs_for_syscall(regs); + /* Make sure current_pt_regs() works properly by initializing + * pt_regs pointer before enabling any interrupts. */ + current_thread_info()->pt_regs = regs; + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); + + SAVE_SYSCALL_ARGS(regs, arg1, arg2, arg3, arg4, arg5, arg6); + + if (guest_syscall_enter(regs, ts_host_at_vcpu_mode)) { + /* the system call is from guest and syscall is injecting */ + current_thread_info()->pt_regs = NULL; + guest_syscall_inject(current_thread_info(), regs); + return (SYS_RET_TYPE)0; + } + + Dbg1SC(regs->sys_num, "_NR_ %d current %px pid %d name %s\n" + "handle_sys_call: k_usd: base 0x%llx, size 0x%x, sbr 0x%llx\n" + "arg1 %lld arg2 0x%llx arg3 0x%llx arg4 0x%llx arg5 0x%llx arg6 0x%llx\n", + regs->sys_num, current, current->pid, current->comm, + current_thread_info()->k_usd_lo.USD_lo_base, + current_thread_info()->k_usd_hi.USD_hi_size, + current->stack, (u64) arg1, (u64) arg2, (u64) arg3, (u64) arg4, + (u64) arg5, (u64) arg6); + + if (likely(!(ti_flags & _TIF_WORK_SYSCALL_TRACE))) { + /* Fast path */ + rval = sys_call((unsigned long) arg1, (unsigned long) arg2, + (unsigned long) arg3, (unsigned long) arg4, + (unsigned long) arg5, (unsigned long) arg6); + SAVE_SYSCALL_RVAL(regs, rval); + } else { + /* Trace syscall enter */ + rval = syscall_trace_entry(regs); + /* Update args, since tracer could have changed them */ + RESTORE_SYSCALL_ARGS(regs, regs->sys_num, + arg1, arg2, arg3, arg4, arg5, arg6); + + if (rval != -1) + rval = sys_call((unsigned long) arg1, (unsigned long) arg2, + (unsigned long) arg3, (unsigned long) arg4, + (unsigned long) arg5, (unsigned long) arg6); + else + rval = -EPERM; + + SAVE_SYSCALL_RVAL(regs, rval); + + /* Trace syscall exit */ + syscall_trace_leave(regs); + /* Update rval, since tracer could have changed it */ + RESTORE_SYSCALL_RVAL(regs, rval); + } + + add_info_syscall(regs->sys_num, clock); + syscall_exit_kernel_times_account(regs); + + DbgSC("generic_sys_calls:_NR_ %d finish k_stk bottom %lx rval %ld " + "pid %d nam %s\n", + regs->sys_num, current->stack, rval, current->pid, current->comm); + + finish_syscall(regs, FROM_SYSCALL_N_PROT, true); +} + +__section(".entry.text") +int copy_context_from_signal_stack(struct local_gregs *l_gregs, + struct pt_regs *regs, struct trap_pt_regs *trap, u64 *sbbp, + e2k_aau_t *aau_context, struct k_sigaction *ka) +{ + struct signal_stack_context __user *context; + unsigned long ts_flag; + int ret; + + context = pop_signal_stack(); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + ret = __copy_from_user_with_tags(regs, &context->regs, sizeof(*regs)); + + if (regs->trap) { + ret = ret ?: __copy_from_user_with_tags(trap, &context->trap, + sizeof(*trap)); + regs->trap = trap; + + if (trap->sbbp) { + ret = ret ?: __copy_from_user(sbbp, &context->sbbp, + sizeof(sbbp[0]) * SBBP_ENTRIES_NUM); + trap->sbbp = sbbp; + } + } + + if (regs->aau_context) { + ret = ret ?: __copy_from_user(aau_context, &context->aau_regs, + sizeof(*aau_context)); + regs->aau_context = aau_context; + } + + if (ka) { + ret = ret ?: __copy_from_user(ka, &context->sigact, + sizeof(*ka)); + } + + if (!TASK_IS_BINCO(current)) { + ret = ret ?: __copy_from_user(l_gregs, &context->l_gregs, + sizeof(*l_gregs)); + } + + clear_ts_flag(ts_flag); + + return ret ? -EFAULT : 0; +} + + +__section(".entry.text") +notrace long __ret_from_fork(struct task_struct *prev) +{ + struct pt_regs *regs = current_thread_info()->pt_regs; + enum restore_caller from = FROM_RET_FROM_FORK; + int ret; + + prev = ret_from_fork_get_prev_task(prev); + + e2k_finish_switch(prev); + schedule_tail(prev); + + if (current->flags & PF_KTHREAD) + return 0; + + /* + * Restore proper psize for protected mode + * TODO Remove this together with TS_FORK and test with longjmp_tc + */ + if (TASK_IS_PROTECTED(current)) { + e2k_wd_t wd; + unsigned long flags; + + raw_all_irq_save(flags); + wd = READ_WD_REG(); + wd.psize = regs->wd.psize; + WRITE_WD_REG(wd); + raw_all_irq_restore(flags); + } + + if (TASK_IS_PROTECTED(current)) { + if (regs->flags.protected_entry10) + from |= FROM_SYSCALL_PROT_10; + else + from |= FROM_SYSCALL_PROT_8; + } else { + from |= FROM_SYSCALL_N_PROT; + } + + ret = ret_from_fork_prepare_hv_stacks(regs); + if (ret) { + do_exit(SIGKILL); + } + + finish_syscall(regs, from, true); +} + + +__section(".entry.text") +notrace void makecontext_trampoline_switched(void) +{ + long ret = 0; + struct hw_context *ctx; + void __user *uc_link = NULL; + struct pt_regs regs; + + init_pt_regs_for_syscall(®s); + regs.sys_num = -1; + regs.sys_rval = -ENOSYS; + SAVE_STACK_REGS(®s, current_thread_info(), true, false); + current_thread_info()->pt_regs = ®s; + raw_all_irq_enable(); + + /* + * Call switch_hw_contexts if needed + */ + ctx = current_thread_info()->this_hw_context; + if (!ctx) { + DebugCTX("Could not find current context\n"); + do_exit(SIGKILL); + } + + /* + * Read uc_link from user + */ + if (ctx->ptr_format == CTX_32_BIT) { + u32 ucontext_32; + + if (get_user(ucontext_32, (u32 *) ctx->p_uc_link)) { + ret = -EFAULT; + goto exit; + } + uc_link = (struct ucontext_32 *) (u64) ucontext_32; + } else if (ctx->ptr_format == CTX_64_BIT) { + u64 ucontext_64; + + if (get_user(ucontext_64, (u64 *) ctx->p_uc_link)) { + ret = -EFAULT; + goto exit; + } + uc_link = (struct ucontext *) ucontext_64; + } else { + /* CTX_128_BIT */ + e2k_ptr_t ptr; + u64 lo_val, hi_val; + u8 lo_tag, hi_tag; + u8 tag; + u32 size; + + TRY_USR_PFAULT { + NATIVE_LOAD_TAGGED_QWORD_AND_TAGS(ctx->p_uc_link, + lo_val, hi_val, lo_tag, hi_tag); + } CATCH_USR_PFAULT { + ret = -EFAULT; + goto exit; + } END_USR_PFAULT + AW(ptr).lo = lo_val; + AW(ptr).hi = hi_val; + size = AS(ptr).size - AS(ptr).curptr; + tag = (hi_tag << 4) | lo_tag; + + /* + * Check that the pointer is good. + * We must be able to access uc_mcontext.sbr field. + */ + if (!size) + /* NULL pointer, just return */ + goto exit; + if (tag != ETAGAPQ || size < + offsetof(struct ucontext_prot, + uc_mcontext.usd_lo)) { + ret = -EFAULT; + goto exit; + } + + uc_link = (struct ucontext_prot *) E2K_PTR_PTR(ptr, GET_SBR_HI()); + } + + DebugCTX("ctx %lx, uc_link=%lx\n", ctx, uc_link); + + if (uc_link) { + /* + * Call this before swapcontext() to make sure + * that u_pcshtp != 0 for user_hw_stacks_copy_full() + */ + /* this case has not yet been accounted for */ + BUG_ON(guest_syscall_from_user(current_thread_info())); + host_user_hw_stacks_prepare(®s.stacks, ®s, + MAKECONTEXT_SIZE, FROM_MAKECONTEXT, false); + + /* + * Note that this will drop reference from + * current_thread_info()->this_hw_context, + * but the reference from makecontext() still + * holds (until user calls freecontext()). + */ + ret = swapcontext(uc_link, ctx->ptr_format); + if (!ret) { + enum restore_caller from = FROM_MAKECONTEXT; + + if (TASK_IS_PROTECTED(current)) + from |= FROM_SYSCALL_PROT_8; + else + from |= FROM_SYSCALL_N_PROT; + + regs.sys_rval = 0; + regs.return_desk = 0; + finish_syscall(®s, from, true); + } + + DebugCTX("swapcontext failed with %ld\n", ret); + } + +exit: + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + + /* Convert to user codes */ + ret = -ret; + + DebugCTX("calling do_exit with %ld\n", ret); + do_exit((ret & 0xff) << 8); +} + + +__section(".entry.text") +notrace long do_sigreturn(void) +{ + struct thread_info *ti = current_thread_info(); + struct pt_regs regs; + struct trap_pt_regs saved_trap, *trap; + u64 sbbp[SBBP_ENTRIES_NUM]; + struct k_sigaction ka; + e2k_aau_t aau_context; + struct local_gregs l_gregs; + e2k_stacks_t cur_stacks; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + rt_sigframe_t __user *frame; + + COPY_U_HW_STACKS_FROM_TI(&cur_stacks, ti); + raw_all_irq_enable(); + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + E2K_SAVE_CLOCK_REG(clock); + { + register int count; + + GET_DECR_KERNEL_TIMES_COUNT(ti, count); + scall_times = &(ti->times[count].of.syscall); + scall_times->do_signal_done = clock; + } +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + /* Always make any pending restarted system call return -EINTR. + * Otherwise we might restart the wrong system call. */ + current->restart_block.fn = do_no_restart_syscall; + + if (copy_context_from_signal_stack(&l_gregs, ®s, &saved_trap, + sbbp, &aau_context, &ka)) { + user_exit(); + do_exit(SIGKILL); + } + + /* Preserve current p[c]shtp as they indicate */ + /* how much to FILL when returning */ + preserve_user_hw_stacks_to_copy(®s.stacks, &cur_stacks); + + /* Restore proper psize as it was when signal was delivered */ + if (regs.wd.WD_psize) { + restore_wd_register_psize(regs.wd); + } + + if (from_trap(®s)) + regs.trap->prev_state = exception_enter(); + else + user_exit(); + + regs.next = NULL; + /* Make sure 'pt_regs' are ready before enqueuing them */ + barrier(); + ti->pt_regs = ®s; + + frame = (rt_sigframe_t *) current_thread_info()->u_stack.top; + + usd_lo = regs.stacks.usd_lo; + usd_hi = regs.stacks.usd_hi; + STORE_USER_REGS_TO_THREAD_INFO(ti, AS(usd_lo).base - AS(usd_hi).size, + regs.stacks.top, + regs.stacks.top - AS(usd_lo).base + AS(usd_hi).size); + + if (restore_rt_frame(frame, &ka)) { + printk("%s%s[%d] bad frame:%px\n", + task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, + current->comm, current->pid, frame); + + force_sig(SIGSEGV); + } + + trap = regs.trap; + if (trap && (3 * trap->curr_cnt) < trap->tc_count && + trap->tc_count > 0) { + trap->from_sigreturn = 1; + do_trap_cellar(®s, 0); + } + + clear_restore_sigmask(); + + if (!TASK_IS_BINCO(current)) + restore_local_glob_regs(&l_gregs, true); + + if (!from_syscall(®s)) { + BUG_ON(!regs.trap || !regs.aau_context || regs.kernel_entry); + + finish_user_trap_handler(®s, FROM_USER_TRAP | FROM_SIGRETURN); + } else { + bool restart_needed = false; + enum restore_caller from = FROM_SIGRETURN; + + switch (regs.sys_rval) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs.sys_rval = -EINTR; + break; + case -ERESTARTSYS: + if (!(ka.sa.sa_flags & SA_RESTART)) { + regs.sys_rval = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + restart_needed = true; + break; + } + + switch (regs.kernel_entry) { + case 1: + case 3: + case 4: + from |= FROM_SYSCALL_N_PROT; + break; + case 8: + from |= FROM_SYSCALL_PROT_8; + break; + case 10: + from |= FROM_SYSCALL_PROT_10; + break; + default: + BUG(); + } + + finish_syscall(®s, from, !restart_needed); + } +} + +__section(".entry.text") +notrace long return_pv_vcpu_trap(void) +{ + return_pv_vcpu_inject(FROM_PV_VCPU_TRAP_INJECT); + return 0; +} + +__section(".entry.text") +notrace long return_pv_vcpu_syscall(void) +{ + return_pv_vcpu_inject(FROM_PV_VCPU_SYSCALL_INJECT); + return 0; +} + +__section(".entry.text") +notrace long return_pv_vcpu_syscall_fork(void) +{ + pv_vcpu_return_from_fork(); + return 0; +} diff --git a/arch/e2k/kernel/ttable_tmp.c b/arch/e2k/kernel/ttable_tmp.c new file mode 100644 index 000000000000..c69abba2cbaf --- /dev/null +++ b/arch/e2k/kernel/ttable_tmp.c @@ -0,0 +1 @@ +#include "ttable.c" diff --git a/arch/e2k/kernel/vmlinux.lds.S b/arch/e2k/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..0362eea7a8fd --- /dev/null +++ b/arch/e2k/kernel/vmlinux.lds.S @@ -0,0 +1,323 @@ +/* + * This is the GNU ld script to construct E2K Linux kernel + */ + +/* No __ro_after_init data in the .rodata section - which will always be ro */ +#define RO_AFTER_INIT_DATA + +#include +#include +#include +#include +#include +#include +#include + +OUTPUT_FORMAT("elf64-e2k", "elf64-e2k", "elf64-e2k") + +#ifdef CONFIG_E2K_MACHINE + #if defined(CONFIG_E2K_ES2_DSP) || defined(CONFIG_E2K_ES2_RU) + OUTPUT_ARCH(elbrus-v2) + #elif defined(CONFIG_E2K_E2S) + OUTPUT_ARCH(elbrus-v3) + #elif defined(CONFIG_E2K_E8C) + OUTPUT_ARCH(elbrus-v4) + #elif defined(CONFIG_E2K_E1CP) + OUTPUT_ARCH(elbrus-v4) + #elif defined(CONFIG_E2K_E8C2) + OUTPUT_ARCH(elbrus-v5) + #elif defined(CONFIG_E2K_E2C3) + OUTPUT_ARCH(elbrus-v6) + #elif defined(CONFIG_E2K_E12C) + OUTPUT_ARCH(elbrus-v6) + #elif defined(CONFIG_E2K_E16C) + OUTPUT_ARCH(elbrus-v6) + #else + error "Invalid e2k machine type" + #endif /* all machines types */ +#else /* ! CONFIG_E2K_MACHINE */ +OUTPUT_ARCH(e2k) +#endif /* CONFIG_E2K_MACHINE */ + +ENTRY(_start) +jiffies = jiffies_64; +SECTIONS +{ + . = E2K_KERNEL_IMAGE_AREA_BASE; /* KERNEL BASE VA ~ 14 * 2^44 */ + _start = .; /* Start of kernel image */ + _text = .; /* Text and read-only data */ + _stext = .; /* Text and read-only data */ + .text : { + __ttable_start = .; + _t_entry = .; + *(.ttable_entry0) + . = _t_entry + 0x800; + *(.native_ttable_entry1) + . = _t_entry + 0x1800; + *(.native_ttable_entry3) + . = _t_entry + 0x2000; + *(.native_ttable_entry4) + . = _t_entry + 0x2800; +#ifdef CONFIG_COMPAT + *(.ttable_entry5) + *(.ttable_entry5_table) + *(.ttable_entry5_C) +#endif + . = _t_entry + 0x3000; + *(.ttable_entry6) + *(.ttable_entry6_table) + *(.ttable_entry6_C) +#ifdef CONFIG_PROTECTED_MODE + . = _t_entry + 0x3800; + *(.ttable_entry7) + *(.ttable_entry7_table) + *(.ttable_entry7_C) +#endif +#ifdef CONFIG_PROTECTED_MODE + . = _t_entry + 0x4000; + *(.ttable_entry8) + *(.ttable_entry8_C) + . = _t_entry + 0x5000; + *(.ttable_entry10) + *(.ttable_entry10_C) +#endif + . = _t_entry + 0x5800; + *(.ttable_entry11) + . = _t_entry + 0x6000; + *(.ttable_entry12) + +#ifdef CONFIG_KVM_HOST_MODE +/* It is paravirtualized host and guest kernel */ +/* or native host kernel with virtualization support */ +/* hypervisor and host features is not supported on guest mode */ + + . = _t_entry + 0x8000; /* hypercalls */ + *(.ttable_entry16) /* generic */ + . = _t_entry + 0x8800; + *(.ttable_entry17) + . = _t_entry + 0x9000; + +/* FIXME: following auxiliary entries should have fixed offsets into ttable */ +/* whole entry #10 to enable some updates of entries content within the fixed */ +/* entry max size (same as ttable entries #0, 1, 3, 4, ...) */ + . = _t_entry + 0xa000; + __ptext_host_start = .; + __to_paravirt_guest_entries__ = .; + *(.to_paravirt_guest) + __ptext_host_end = .; + . = ALIGN(PAGE_SIZE); + __to_guest_entries__ = .; + *(.to_guest) + +#endif /* CONFIG_KVM_HOST_MODE */ + + _t_entry_end = .; + __ttable_end = .; + +#ifdef CONFIG_KVM_GUEST_KERNEL + /* 0x10_000 */ + . = _t_entry + KVM_GUEST_STARTUP_ENTRY_NUM * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + *(.kvm_guest_startup_entry) + + /* 0x12_000 */ + . = _t_entry + (KVM_PV_VCPU_TRAP_ENTRY_NUM + 0) * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + __kvm_pv_vcpu_ttable_entry0 = .; + *(.kvm_pv_vcpu_ttable_entry0) + . = _t_entry + (KVM_PV_VCPU_TRAP_ENTRY_NUM + 1) * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + *(.kvm_guest_ttable_entry1) + . = _t_entry + (KVM_PV_VCPU_TRAP_ENTRY_NUM + 3) * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + *(.kvm_guest_ttable_entry3) + . = _t_entry + (KVM_PV_VCPU_TRAP_ENTRY_NUM + 4) * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + *(.kvm_guest_ttable_entry4) + . = _t_entry + (KVM_PV_VCPU_TRAP_ENTRY_NUM + 5) * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + *(.kvm_guest_ttable_entry5) + . = _t_entry + (KVM_PV_VCPU_TRAP_ENTRY_NUM + 6) * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + *(.kvm_guest_ttable_entry6) + __kvm_pv_vcpu_ttable_end = .; +#endif /* CONFIG_KVM_GUEST_KERNEL */ + . = _t_entry + (KVM_PV_VCPU_TRAP_ENTRY_NUM + 7) * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + +#ifdef CONFIG_KVM_HOST_MODE + /* Hypercalls entry points */ + . = ALIGN(PAGE_SIZE); + __hypercalls_begin = .; + *(.hcall_entry0) /* generic hypercalls */ + . = __hypercalls_begin + 0x1000; + *(.hcall_entry1) /* light hypercalls */ +#endif /* CONFIG_KVM_HOST_MODE */ + + __entry_handlers_start = .; + ENTRY_TEXT + IRQENTRY_TEXT + __entry_handlers_end = .; + HEAD_TEXT +#ifdef CONFIG_KPROBES + KPROBES_TEXT +#endif + __C1_wait_trap_start = .; + *(.C1_wait_trap.text) + __C1_wait_trap_end = .; + __C3_wait_trap_start = .; + *(.C3_wait_trap.text) + __C3_wait_trap_end = .; + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + CPUIDLE_TEXT + *(.fixup) +#ifdef CONFIG_CPU_HWBUG_IBRANCH + /* Add NOPs after ibranch in .fixup section */ + QUAD(0) + QUAD(0) +#endif + *(.gnu.warning) +#ifdef CONFIG_E2K_KEXEC + . = ALIGN(PAGE_SIZE); + *(.switch_to_phys) +#endif + } /*:text*/ + NOTES /*:kernel :note*/ + .dummy : { + *(.dummy) + } /*:kernel*/ + + RO_DATA(E2K_MAX_PAGE_SIZE) + + . = ALIGN(E2K_MAX_PAGE_SIZE); + + _etext = .; /* End of text section */ + + . = ALIGN(E2K_MAX_PAGE_SIZE); + + _sdata = .; + +#ifdef CONFIG_NUMA + __node_data_start = .; + .node.data : { *(.node.data) } + . = ALIGN(PAGE_SIZE); + __node_data_end = .; +#endif /* CONFIG_NUMA */ + + . = ALIGN(PAGE_SIZE); + __start_ro_after_init = .; + .data..ro_after_init : { + *(.data..ro_after_init) + } + . = ALIGN(PAGE_SIZE); + __end_ro_after_init = .; + + . = ALIGN(E2K_MAX_PAGE_SIZE); /* Start of data segment */ + + __common_data_begin = .; + + EXCEPTION_TABLE(16) + + /* We want to use OSGD for quick data access */ + RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, E2K_ALIGN_GLOBALS_SZ) + + .got : { + *(.got) + } + .sdata : { + *(.sdata) + } + _edata = .; /* End of data section */ + + BSS_SECTION(0, 0, 0) + + . = ALIGN(PAGE_SIZE); + __common_data_end = .; + + . = ALIGN(E2K_MAX_PAGE_SIZE); + + _edata_bss = .; + +#ifndef CONFIG_NUMA + /* We want to map zero page with small pages to write-protect it, + * so in !NUMA case it goes into different section */ + . = ALIGN(PAGE_SIZE); + empty_zero_page = .; + . = . + PAGE_SIZE; +#endif + + __init_begin = .; + + /* Will be freed after init */ + __init_text_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + /* + * .exit.text is discarded at runtime, not link time, + * to deal with references from __bug_table + */ + .exit.text : { + EXIT_TEXT + } + __init_text_end = .; + + . = ALIGN(PAGE_SIZE); + + __init_data_begin = .; + .exit.data : { + EXIT_DATA + } + E2K_BOOT_SETUP(16) + INIT_DATA_SECTION(16) + PERCPU_SECTION(INTERNODE_CACHE_BYTES) + + . = ALIGN(8); + .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { + __apicdrivers = .; + *(.apicdrivers); + __apicdrivers_end = .; + } + + /* + * Alternative instructions for different CPU types or capabilities + */ + . = ALIGN(8); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + . = ALIGN(8); + .altinstr_replacement : { + *(.altinstr_replacement) + } + + /* + * cpu_has() initializers + */ + . = ALIGN(8); + .cpuhas_initcall : { + __cpuhas_initcalls = .; + *(.cpuhas_initcall) + __cpuhas_initcalls_end = .; + } + + __init_data_end = .; + + __init_end = .; + + _end = .; + + .mdebug 0 : { + *(.mdebug) + } + .note 0 : { + *(.note) + } + + STABS_DEBUG + DWARF_DEBUG + + DISCARDS +} diff --git a/arch/e2k/kvm/.gitignore b/arch/e2k/kvm/.gitignore new file mode 100644 index 000000000000..514b9fe66a32 --- /dev/null +++ b/arch/e2k/kvm/.gitignore @@ -0,0 +1,3 @@ +mkclearwindow +ttable_asm.h +ttable_wbs.h \ No newline at end of file diff --git a/arch/e2k/kvm/Kconfig b/arch/e2k/kvm/Kconfig new file mode 100644 index 000000000000..1929a96bd7be --- /dev/null +++ b/arch/e2k/kvm/Kconfig @@ -0,0 +1,311 @@ +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig KVM + tristate "Kernel-based Virtual Machine (KVM) support" + depends on VIRTUALIZATION + default y + select KVM_GUEST + select PREEMPT_NOTIFIERS + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD + select HAVE_KVM_EVENTFD + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_MSI + select KVM_APIC_ARCHITECTURE + select SRCU + select MMU_NOTIFIER + select KVM_VFIO + select VFIO + select VFIO_PCI + select VFIO_PCI_VGA + select KVM_MMIO if KVM_HOST_MODE + # FIXME: Async page faults support is not yet implemented + # for Paravirt-2.0 + select KVM_ASYNC_PF if KVM_HOST_MODE + select DIRECT_VIRQ_INJECTION + select PARAVIRT_SPINLOCKS if SMP + + help + Support hosting: + - paravirtualized guest kernels + - fully virtualized guest machines based on hardware + virtualization extensions (only for CPUs ISET >= V6) + If unsure, say N. + +if KVM +config KVM_GUEST + bool # KVM guest machine support + help + This allows support of KVM guest machines + +config KVM_PARAVIRTUALIZATION + bool "Hypervisor software paravirtualization enable" + default y + help + Say Y here if hypervisor based on KVM should support + software paravirtualized mode. In this case guest kernel + is modified to make some privileged actions through + hypercalls and other software engines + +config KVM_HW_VIRTUALIZATION + bool "Hypervisor based on hardware virtualization and KVM support" + default y if CPU_ISET >= 6 + select MMU_SEP_VIRT_SPACE if CPU_ISET >= 6 + help + Say Y here if machine hardware has virtualization support + and to enable host kernel and hypervisor based on KVM + +config KVM_HW_PARAVIRTUALIZATION + bool "Hypervisor based on hardware virtualization and paravirtulized guest" + depends on KVM_HW_VIRTUALIZATION + select KVM_PARAVIRTUALIZATION + help + Say Y here if machine hardware has virtualization support + and to enable host kernel and hypervisor based on KVM. + In addition hypervisor supports guest paravirtualized kernels + based on hardware virtualization extensions + default KVM_HW_VIRTUALIZATION + +choice + prompt "KVM host and guest kernels support type" + depends on KVM_GUEST + default KVM_HOST_KERNEL + +config KVM_HOST_KERNEL + bool "KVM hypervisor and host kernel support" + select KVM_HOST_MODE + help + Say Y here to get host kernel which can be run + only as host kernel with hypervisor functionality + and support any guest machines based on KVM + +config KVM_GUEST_KERNEL + bool "Paravirtualized native guest kernel support" + select KVM_GUEST_MODE + help + Say Y here to get native guest kernel which can be run + only as guest kernel on any hypervisor based on KVM + +config PARAVIRT_GUEST + bool "Paravirtualized host & guest kernel support" + select KVM_HOST_MODE + select KVM_GUEST_MODE + help + Say Y here to get paravirtualized united host & guest kernel, + which can be run as host + hypervisor and as guest kernel. +endchoice + +config KVM_GUEST_HW_PV + bool "Paravirtualized guest kernel based on hardware virtualization" + depends on KVM_HW_VIRTUALIZATION + default y + help + Say Y here to get paravirtualized guest kernel which can be run + only as guest kernel and only on hypervisor based on hardware + virtualization extensions + +config KVM_SHADOW_PT_ENABLE + bool "Enable Shadow Page Tables support" + depends on KVM_HOST_MODE + default n + select KVM_HV_MMU + help + Say Y here to enable support of shadow page tables by hypervisor + This mode can be applyed for hardware and software virtualization, + and based on common type of shadow PTs for both kind of + virtualization. + Old mode of software virtualization based on own implementation of + PTs and special hypercalls to updatem them from guest. If say N here + then only old style of PTs will be enabled for software mode + +config GUEST_MM_SPT_LIST + bool "Enable Guest MM Shadow Page Tables list support" + depends on KVM_SHADOW_PT_ENABLE && KVM_PARAVIRTUALIZATION + default n + help + Say Y here to enable support of separate list of shadow page tables + for each guest MM structure. Any allocation shadow page tables for + guest MM adds the new structure SP to the list and any release of + the shadow page table deletes the structure SP from the list. + It allows to control the complete release of all shadow PTs when + the guest mm released. + +config KVM_PARAVIRT_TLB_FLUSH + bool "Enable flush tlb through hypercall by paravirt guest" + depends on (KVM_HOST_MODE && KVM_SHADOW_PT_ENABLE) || (KVM_GUEST_MODE && KVM_SHADOW_PT) + default y + help + Say Y to permit guest to edit the lowest level (PTE) of guest + page table without getting page fault. Shadow page table is + synchronized with guest page table when guest calls hypercall + KVM_HCALL_FLUSH_TLB_RANGE + +config KVM_TDP_ENABLE + bool "Enable Two Dimensional Page Tables (gva->gpa->hpa) support" + depends on KVM_HOST_MODE && KVM_HW_VIRTUALIZATION + depends on KVM_SHADOW_PT_ENABLE + default n + select KVM_HV_MMU + select KVM_PHYS_PT_ENABLE + help + Say Y here to enable support of two dimensional paging by hypervisor. + This mode can be applyed only for hardware virtualization and + only if the mode is enabled by hardware. Both mode TDP and + shadow PTs can be turn ON. + +config KVM_NONPAGING_ENABLE + bool "Nonpaging mode of KVM MMU is enable" + depends on KVM_HOST_MODE && KVM_HW_VIRTUALIZATION + depends on KVM_SHADOW_PT_ENABLE + default n + select KVM_HV_MMU + help + Say Y here to enable non paging mode by hypervisor. + This mode is applyed only for hardware virtualization and + allows to boot guest with disabled translation of virtual addresses. + Guest physical addresses can be translated by hypervosor + shadow PT or direct translation GPA -> PA by TDP + +config KVM_PHYS_PT_ENABLE + bool "Guest physical addresses translation is enable" + depends on KVM_HOST_MODE && KVM_HW_VIRTUALIZATION + depends on KVM_SHADOW_PT_ENABLE + default n + select KVM_HV_MMU + help + Say Y here to enable translation of guest physical addresses + by hypervisor using hardware supported Guest Page tables (GP_*PTB) + This mode is applyed only for hardware virtualization and + allows to boot guest on physical mode (TLB disabled) and direct + translation GPA -> PA + +config KVM_GUEST_HW_HCALL + bool "Guest kernel can use hardware supported hypercalls" + depends on KVM_HW_VIRTUALIZATION + select KVM_GUEST_HW_PV + default y + help + Say Y here to enable hardware supported hypercalls for guest kernel. + It can be done only for guest kernel and only on hypervisor + based on hardware virtualization extensions. + In other cases guest can use special system calls instead of HCALLs + as at paravirtualization mode. + +config KVM_SHADOW_PT + bool "Guest kernel PTs based on host Shadow Page Tables support" + depends on KVM_GUEST_MODE + default n + help + Host should support shadow PTs and guest kernel knows about that + and can manage own PTs based on host shadow PTs features. + If say N than only old style of software virtualization PTs + support can be enabled for guest + +config E2K_VIRT + bool # Elbrus virtual (1 core, no NUMA, machine & IO - virtual) + depends on (KVM_GUEST_KERNEL && (E2K_MACHINE || E2K_MINVER >= 6)) + default y + help + Native machine to run such kernel can be any. + Guest machine is Elbrus virtual systems. + +config KVM_HOST_MODE + bool # Enable run the kernel as host kernel + help + This allows support of KVM on kernel + +config KVM_GUEST_MODE + bool # Enable run the kernel as guest kernel + help + This allows building of KVM guest kernel + +config KVM_HV_MMU + bool # Guest MMU support based on shadow PT, TDP, nonpaging + help + This adds to hypervisor support of guest MMU + based on sgadow PT, Two Dimensional Paging and + non paging booting + +config KVM_GUEST_SMP + bool # Enable SMP mode on guest kernel + depends on (KVM_GUEST_MODE && SMP) + default y + help + This allows SMP mode of building of KVM guest kernel + +endif # KVM + +if PARAVIRT_GUEST + +config KVM_CLOCK + bool "KVM paravirtualized clock" + select PARAVIRT + select PARAVIRT_CLOCK + help + Turning on this option will allow you to run a paravirtualized clock + when running over the KVM hypervisor. Instead of relying on a PIT + (or probably other) emulation by the underlying device model, the host + provides the guest with timing infrastructure such as time of day, and + system time + +config PARAVIRT + bool "Enable paravirtualization code" + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. However, when run without a hypervisor + the kernel is theoretically slower and slightly larger. + +config PARAVIRT_CLOCK + bool + default n + +endif # PARAVIRT_GUEST + +config PARAVIRT_SPINLOCKS + bool "Paravirtualization layer for spinlocks" + depends on SMP + help + Paravirtualized spinlocks allow to use hypercall-based backend to + replace the spinlock implementation with virtualization-friendly + blocking of the virtual CPU rather than spinning. + + Unfortunately the downside is an up to 5% performance hit on + native kernels, with various workloads. + + If you are unsure how to answer this question, answer N. + +config QUEUED_LOCK_STAT + bool "Paravirt queued spinlock statistics" + depends on PARAVIRT_SPINLOCKS && DEBUG_FS + ---help--- + Enable the collection of statistical data on the slowpath + behavior of paravirtualized queued spinlocks and report + them on debugfs. + +config PARAVIRT_DEBUG + bool "paravirt-ops debugging" + depends on PARAVIRT && DEBUG_KERNEL + help + Enable to debug paravirt_ops internals. Specifically, BUG if + a paravirt_op is missing when it is called. + +config KVM_GUEST_DEBUG + bool "KVM guest kernel debugging" + depends on (PARAVIRT_GUEST || KVM_GUEST_KERNEL) + help + Enable to debug KVM guest kernel. This mode turn on + compilation with debugging info (-g option) to enable + run guest kernel under gdb + +config DIRECT_VIRQ_INJECTION + tristate "Enable direct injection of virtual IRQs to guest kernel" + depends on KVM + default y + help + Direct injection of VIRQs to guest kernel VCPU through standard + mechanism of traps passing from host to guest diff --git a/arch/e2k/kvm/Makefile b/arch/e2k/kvm/Makefile new file mode 100644 index 000000000000..a120f011011e --- /dev/null +++ b/arch/e2k/kvm/Makefile @@ -0,0 +1,73 @@ +subdir-ccflags-y := -Werror -Wswitch -Wenum-compare + +ccflags-y += -Iarch/e2k/kvm +EXTRA_CFLAGS += -Wframe-larger-than=4096 + +KVM := ../../../virt/kvm +KERNEL := ../kernel + +obj-$(CONFIG_KVM_GUEST) += guest/ + +kvm-$(CONFIG_KVM_HOST_MODE) += $(KVM)/kvm_main.o +kvm-$(CONFIG_KVM_HOST_MODE) += $(KVM)/irqchip.o +kvm-$(CONFIG_KVM_HOST_MODE) += $(KVM)/eventfd.o +kvm-$(CONFIG_KVM_HOST_MODE) += $(KVM)/vfio.o +kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o + +kvm-$(CONFIG_KVM_HOST_MODE) += pt-structs.o switch.o \ + lapic.o ioapic.o cepic.o ioepic.o \ + irq_comm.o timer.o virq.o io.o \ + user_area.o nid.o vmid.o gpid.o \ + hypercalls.o process.o ttable.o \ + trap_table.o cpu.o mm.o mmu_flush.o \ + boot_spinlock.o spinlock.o \ + runstate.o complete.o \ + sic-nbsr.o lt.o spmc.o gaccess.o debug.o +kvm-y += kvm-e2k.o +obj-y += cpu/ + +kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o + +ifeq ($(CONFIG_KVM_HOST_MODE),y) + +# To make sure that AAU is not zeroed before we get to it +CFLAGS_ttable.o := -fexclude-ctpr2 + +# To compile gregs and ctpr saving for iset v6 +AFLAGS_REMOVE_trap_table.o = $(CFLAGS_ALL_CPUS) +AFLAGS_trap_table.o += -march=elbrus-v6 + +# We should no instrument these files +CFLAGS_REMOVE_ttable.o := -fprofile-generate-kernel + +# -fexclude-ctpr2: make sure that AAU is not zeroed before we get to it +# -fno-dam - hardware bug 124206 workaround +CFLAGS_REMOVE_hv_cpu.o = $(CFLAGS_ALL_CPUS) +CFLAGS_hv_cpu.o := -march=elbrus-v6 -fexclude-ctpr2 -fno-dam +CFLAGS_REMOVE_intercepts.o = $(CFLAGS_ALL_CPUS) +CFLAGS_intercepts.o := -march=elbrus-v6 + +kvm-$(CONFIG_KVM_HW_VIRTUALIZATION) += hv_cpu.o hv_mmu.o intercepts.o + +ifeq ($(CONFIG_KVM_PARAVIRTUALIZATION),y) +kvm-y += mmu-pv.o +ifeq ($(CONFIG_KVM_SHADOW_PT_ENABLE),y) +kvm-y += mmu-pv-spt.o +endif # CONFIG_KVM_SHADOW_PT_ENABLE +endif # CONFIG_KVM_PARAVIRTUALIZATION + +kvm-$(CONFIG_KVM_HV_MMU) += mmu-e2k.o page_track.o + +endif # CONFIG_KVM_HOST_MODE + +kvm-$(CONFIG_KVM_HOST_MODE) += csd_lock.o + +kvm-$(CONFIG_PARAVIRT) += paravirt.o + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile paravirtualized functions callers +CFLAGS_REMOVE_paravirt.o = -pg +endif + +obj-$(CONFIG_KVM) += kvm.o + diff --git a/arch/e2k/kvm/boot_spinlock.c b/arch/e2k/kvm/boot_spinlock.c new file mode 100644 index 000000000000..2bee370ace91 --- /dev/null +++ b/arch/e2k/kvm/boot_spinlock.c @@ -0,0 +1,789 @@ +/* + * This file implements on host the arch-dependent parts of kvm guest + * spinlock()/spinunlock() slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "irq.h" +#include "process.h" +#include "complete.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_DEADLOCK_MODE +#undef DebugKVMDL +#define DEBUG_KVM_DEADLOCK_MODE 0 /* spinlock deadlock debugging */ +#define DebugKVMDL(fmt, args...) \ +({ \ + if (DEBUG_KVM_DEADLOCK_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_UNLOCKED_MODE +#undef DebugKVMUN +#define DEBUG_KVM_UNLOCKED_MODE 0 /* spinlock deadlock debugging */ +#define DebugKVMUN(fmt, args...) \ +({ \ + if (DEBUG_KVM_UNLOCKED_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) +#undef DEBUG_UNLOCKED_MODE +#undef DebugKVMUL +#define DEBUG_UNLOCKED_MODE 0 /* spinlock deadlock debugging */ +#define DebugKVMUL(fmt, args...) \ +({ \ + if (DEBUG_UNLOCKED_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static bool debug_loop = false; +#undef DEBUG_KVM_LOOP_MODE +#undef DebugLOOP +#define DEBUG_KVM_LOOP_MODE 0 /* list loop debugging */ +#define DebugLOOP(fmt, args...) \ +({ \ + if (DEBUG_KVM_LOOP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +typedef struct spinlock_waiter { + struct hlist_node wait_list; + struct completion done; + struct kvm_vcpu *vcpu; + void *lock; +} spinlock_waiter_t; + +/* + * Lock a guest spinlock, slowpath: + */ + +/* + * Probably spinlock was already unlocked, so search + * the spinlock in list of unlocked spinlocks + * spinlock hash table and unlocked list should be locked by caller + * If bool 'find' is true then function only scans list to find specified + * lock in the list of unlocked spinlocks. + * If bool 'find' is false, then the function scans list to add new entry with + * current thread. + */ +static inline boot_spinlock_unlocked_t * +check_boot_spin_unlocked_list(struct kvm_vcpu *vcpu, void *lock, bool find) +{ + struct kvm *kvm = vcpu->kvm; + boot_spinlock_unlocked_t *u; + struct kvm_vcpu *vu, *vc; + int loop = 0; + + list_for_each_entry(u, &kvm->arch.boot_spinunlocked_head, + unlocked_list) { + vu = u->vcpu; + DebugKVM("next spinunlocked list entry VCPU #%d lock %px\n", + vu->vcpu_id, u->lock); + if (u->lock == lock) { + DebugKVM("spinlock %px already was unlocked\n", + lock); + if (find) { + DebugKVM("spinlock found at the unlocked " + "list entry on VCPU %d\n", + vu->vcpu_id); + return u; + } + + /* FIXME: it seems guest threads lock/unlock spins, */ + /* so their thread agents should be considered here */ + /* 1. In some case on the VCPU (it is one thread of */ + /* host) the guest thread lock spin and guest */ + /* kernel switch to other thread, which can do */ + /* lock of the same spin. It is deadlock, but host */ + /* does not detect now this case. */ + /* 2. Same as case above, but other guest thread lock */ + /* other spin. It is good case, but host try check */ + /* and update list of unlocked spins on behalf the */ + /* same host thread? it is not good */ + /* Probably guest thread info (gti) should be here */ + /* instead of host thread info (ti) */ + loop = 0; + list_for_each_entry(vc, &u->checked_unlocked, + arch.vcpus_to_spin) { + DebugLOOP("next spinunlocked list entry %px " + "head %px VCPU #%d lock %px\n", + u, &u->checked_unlocked, vu->vcpu_id, + u->lock); + DebugLOOP("next VCPU #%d current VCPU #%d\n", + vc->vcpu_id, vcpu->vcpu_id); + if (vc == vcpu) { + DebugLOOP("spinlock %px already was " + "checked by the VCPU #%d\n", + lock, vu->vcpu_id); + return NULL; + } + loop++; + if (loop == 50) + debug_loop = true; + else if (loop == 100) + debug_loop = false; + else if (loop > 100) + panic("infinity spinlock checked " + "threads\n"); + } + GTI_BUG_ON(!list_empty(&vcpu->arch.vcpus_to_spin)); + list_add_tail(&vcpu->arch.vcpus_to_spin, + &u->checked_unlocked); + DebugKVM("vcpu #%d is added to the list of " + "spin %px unlock checked vcpus\n", + vcpu->vcpu_id, lock); + return u; + } + } + return NULL; +} +static inline void +clear_boot_spin_unlocked_list(struct kvm_vcpu *vcpu, + boot_spinlock_unlocked_t *node) +{ + struct kvm_vcpu *v, *tmp; + + DebugKVM("started for node %px lock %px unlocked by VCPU #%d\n", + node, node->lock, node->vcpu->vcpu_id); + list_for_each_entry_safe(v, tmp, &node->checked_unlocked, + arch.vcpus_to_spin) { + DebugKVM("next VCPU #%d\n", v->vcpu_id); + /* current thread takes lock, VCPU cannot change */ + list_del_init(&v->arch.vcpus_to_spin); + } +} +static inline void +free_boot_spin_unlocked_node(struct kvm_vcpu *vcpu, boot_spinlock_unlocked_t *u) +{ + struct kvm *kvm = vcpu->kvm; + + clear_boot_spin_unlocked_list(vcpu, u); + list_move_tail(&u->unlocked_list, &kvm->arch.boot_spinunlocked_free); + + if (!list_empty(&kvm->arch.boot_spinunlocked_wait)) { + struct kvm_vcpu *v; + + u = list_first_entry(&kvm->arch.boot_spinunlocked_wait, + boot_spinlock_unlocked_t, unlocked_list); + list_del(&u->unlocked_list); + v = u->vcpu; + kvm_vcpu_wake_up(v); + DebugKVM("spinlock on VCPU #%d waiting for unlocked " + "spinlocks free entry is woken up\n", + v->vcpu_id); + } +} + +/* + * Queue the lock to list of waiting for wake up + * Lock kvm->arch.boot_spinlock_hash_lock should be taken by caller, + * the function will unlock the spin before calling of scheduler + * and take the spin again before returen from thr function + */ +static inline int +kvm_queue_boot_spin_lock_to_wait(struct kvm_vcpu *vcpu, void *lock, + unsigned long flags) +{ + struct kvm *kvm = vcpu->kvm; + spinlock_waiter_t waiter; + spinlock_waiter_t *w; + struct hlist_node *next; + struct hlist_head *head; + struct hlist_node *node; + bool unlocked; + + DebugKVM("started on VCPU #%d for guest lock %px (hash index 0x%02x)\n", + vcpu->vcpu_id, lock, boot_spinlock_hashfn(lock)); + + head = &kvm->arch.boot_spinlock_hash[boot_spinlock_hashfn(lock)]; + waiter.vcpu = vcpu; + waiter.lock = lock; + INIT_HLIST_NODE(&waiter.wait_list); + init_completion(&waiter.done); + + if (hlist_empty(head)) { + hlist_add_head(&waiter.wait_list, head); + DebugKVM("spinlock waitqueue is empty, add as first\n"); + } else { + /* add current thread to the end of the waitqueue */ + /* but before check that this thread and lock is not already */ + /* at waitqueue */ + hlist_for_each_safe(node, next, head) { + struct kvm_vcpu *v; + + w = hlist_entry(node, spinlock_waiter_t, wait_list); + v = w->vcpu; + DebugKVM("next spinlock waitqueue entry VCPU #%d " + "lock %px\n", + v->vcpu_id, w->lock); + while (v == vcpu) { + if (w->lock == lock) { + DebugKVMDL("VCPU #%d : same lock %px " + "detected at waitqueue %px\n", + v->vcpu_id, w->lock, head); + } else { + DebugKVMDL("VCPU #%d : other lock %px " + "(new lock %px) " + "detected at waitqueue %px\n", + v->vcpu_id, w->lock, lock, + head); + } + GTI_BUG_ON(true); + break; + } + if (next == NULL) + break; + } + hlist_add_behind(&waiter.wait_list, node); + DebugKVM("add to the end\n"); + } + w = &waiter; + + /* + * Wait for the thread will be waked up, but reasons of waking up + * can be a few, for example some event occurred and should be passed + * to VCPU. In our case reason of waking up must be the spin unlocking. + * It need check spinlock waitqueue after waking up to make sure that + * the thread is unqueued and + * can try lock the spin again; + * take the spin lock + * If spinlock was detected in waitqueue again and pending VIRQs flag + * is set, then interrupt the waiting and return to guest to try + * handle pending VIRQs (in this case the function return -EINTR) + */ + + GTI_BUG_ON(vcpu == NULL); + vcpu->arch.on_spinlock = true; + wmb(); /* flag should be seen before read 'on_csd_lock' or */ + /* other VCPU waiting state flags */ + do { + struct kvm_vcpu *other_vcpu; + struct kvm_vcpu *v; + int ret; + int r; + + kvm_for_each_vcpu(r, other_vcpu, kvm) { + if (other_vcpu == vcpu) + continue; + if (other_vcpu->arch.on_csd_lock) { + if (kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + pr_debug("%s(): VCPU #%d is waiting " + "for IPI completion VCPU #%d " + "is waiting for spinlock %px\n", + __func__, other_vcpu->vcpu_id, + vcpu->vcpu_id, lock); + } else if (!kvm_test_pending_virqs(vcpu)) { + pr_debug("%s(): VCPU #%d there is IPI " + "but none VIRQs pending flag, " + "VIRQs count %d\n", + __func__, vcpu->vcpu_id, + kvm_get_pending_virqs_num( + vcpu)); + /* kvm_print_local_APIC(vcpu); */ + } + } + } + if (DO_DUMP_VCPU(vcpu) || + kvm_test_pending_virqs(vcpu) && + !kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + /* there is signal to do dump guest state or */ + /* there are VIRQs to handle, goto to try handle */ + if (likely(w == &waiter)) { + /* delete waiter from list */ + hlist_del(&w->wait_list); + } + vcpu->arch.on_spinlock = false; + DebugKVM("VCPU #%d there are pending VIRQs, " + "counter %d, mask enabled, try to handle\n", + vcpu->vcpu_id, + kvm_get_pending_virqs_num(vcpu)); + return -EINTR; + } + if (kvm_test_pending_virqs(vcpu) && + kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + DebugKVM("VCPU #%d there are pending VIRQs, counter " + "%d, mask disabled\n", + vcpu->vcpu_id, + kvm_get_pending_virqs_num(vcpu)); + } + raw_spin_unlock_irqrestore(&kvm->arch.boot_spinlock_hash_lock, + flags); + DebugKVM("go to wait for completion\n"); + ret = kvm_wait_for_completion_interruptible(&w->done); + DebugKVM("waiting for completion terminated with %d\n", ret); + + if (kvm->arch.boot_spinlock_hash_disable || + ret == -ERESTARTSYS) { + DebugKVMSH("guest spinlock disabled or fatal signal: " + "exit from process\n"); + kvm_spare_host_vcpu_release(vcpu); + do_exit(ret); + } + raw_spin_lock_irqsave(&kvm->arch.boot_spinlock_hash_lock, + flags); + /* search thread at spinlock waitqueue */ + unlocked = true; + hlist_for_each_entry(w, head, wait_list) { + if (w == &waiter) { + unlocked = false; + v = w->vcpu; + break; + } + } + if (!unlocked && ret == 0) { + pr_err("%s(): VCPU #%d lock %px = 0x%lx detected " + "at spinlock waitqueue, when waiting was " + "completed\n", + __func__, v->vcpu_id, w->lock, + (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)w->lock)) ? + (long)w->lock : *(long *)w->lock); + vcpu->arch.on_spinlock = false; + return -EINVAL; + } else if (!unlocked) { + DebugKVM("VCPU #%d lock %px = 0x%lx detected " + "at spinlock waitqueue, waiting was " + "interrupted ret = %d, so continue waiting\n", + v->vcpu_id, w->lock, + (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)w->lock)) ? + (long)w->lock : *(long *)w->lock, + ret); + } else if (unlocked) { + v = waiter.vcpu; + DebugKVM("VCPU #%d is not detected at spinlock " + "waitqueue, so complete waiting\n", + v->vcpu_id); + break; + } + } while (!unlocked); + vcpu->arch.on_spinlock = false; + + DebugKVM("%s (%d) is woken up, return to guest\n", + current->comm, current->pid); + return 0; +} + +int kvm_boot_spin_lock_slow(struct kvm_vcpu *vcpu, void *lock, + bool check_unlock) +{ + struct kvm *kvm = vcpu->kvm; + boot_spinlock_unlocked_t *u; + struct kvm_vcpu *v; + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&kvm->arch.boot_spinlock_hash_lock, flags); + DebugKVMUL("%s (%d) VCPU #%d lock %px started\n", + current->comm, current->pid, vcpu->vcpu_id, lock); + + /* probably spinlock was already unlocked, so first search our */ + /* spinlock in list of unlocked spinlocks */ + if (check_unlock) { + if (check_boot_spin_unlocked_list(vcpu, lock, false) != NULL) { + raw_spin_unlock_irqrestore( + &kvm->arch.boot_spinlock_hash_lock, flags); + DebugKVM("spinlock %px already was unlocked, return to " + "try get locking\n", + lock); + return 0; + } + } else { + u = check_boot_spin_unlocked_list(vcpu, lock, true); + if (u != NULL) { + v = u->vcpu; + /* lock is found as already unlocked */ + /* wake up unlocking process to wake up all process */ + /* waiting for the lock */ + DebugKVMUL("%s (%d) lock %px will wake up unlocking " + "VCPU #%d\n", + current->comm, current->pid, lock, v->vcpu_id); + kvm_vcpu_wake_up(v); + } + } + + /* spinlock was not unlocked, so add our process to waitqueue */ + ret = kvm_queue_boot_spin_lock_to_wait(vcpu, lock, flags); + if (ret == -EINTR) { + DebugKVMDL("%s (%d) VCPU has pending VIRQs, return " + "to guest to try handle it\n", + current->comm, current->pid); + } + + raw_spin_unlock_irqrestore(&kvm->arch.boot_spinlock_hash_lock, flags); + + return ret; +} + +/* + * Guest locked spinlock, slowpath: + */ + +int kvm_boot_spin_locked_slow(struct kvm_vcpu *vcpu, void *lock) +{ + struct kvm *kvm = vcpu->kvm; + boot_spinlock_unlocked_t *u; + unsigned long flags; + int ret; + + DebugKVM("%s (%d) started for guest lock %px (hash index 0x%02x)\n", + current->comm, current->pid, lock, boot_spinlock_hashfn(lock)); + + raw_spin_lock_irqsave(&kvm->arch.boot_spinlock_hash_lock, flags); + + /* search spinlock at the list of unlocked spinlocks */ + do { + u = check_boot_spin_unlocked_list(vcpu, lock, true); + if (likely(u != NULL)) + break; + DebugKVMDL("%s (%d) could not find lock at the list of " + "unlocked spinloks\n", + current->comm, current->pid); + /* lock was not yet queued to waitqueue */ + /* so add our process to waitqueue and wait for wake up */ + /* to try find the lock in the list of unlocked spinlocks */ + ret = kvm_queue_boot_spin_lock_to_wait(vcpu, lock, flags); + if (ret && ret != -EINTR) { + pr_err("%s(): queue spinlock to waitqueue list failed " + "with error %d, abort the process %s (%d)\n", + __func__, ret, current->comm, current->pid); + raw_spin_unlock_irqrestore( + &kvm->arch.boot_spinlock_hash_lock, flags); + do_exit(ret); + } else if (ret == -EINTR) { + DebugKVMDL("%s (%d) VCPU has pending VIRQs, return " + "to guest to try handle it\n", + current->comm, current->pid); + raw_spin_unlock_irqrestore( + &kvm->arch.boot_spinlock_hash_lock, flags); + return ret; + } + } while (u == NULL); + + free_boot_spin_unlocked_node(vcpu, u); + + raw_spin_unlock_irqrestore(&kvm->arch.boot_spinlock_hash_lock, flags); + DebugKVM("unlocked spinlock %px move to free list\n", lock); + return 0; +} + +/* + * Add the spinlock to the list of unlocked spinlocks, + * because of unlocking can outrun locking process, which can in progress + * and will be waiting for unlocking at any time + * Spinlock should be taken + */ +static boot_spinlock_unlocked_t * +add_guest_boot_spin_as_unlocked(struct kvm_vcpu *vcpu, void *lock, + bool add_to_unlock, unsigned long flags) +{ + struct kvm *kvm = vcpu->kvm; + boot_spinlock_unlocked_t *u = NULL; + struct kvm_vcpu *v; + + DebugKVM("%s (%d) started for guest lock %px\n", + current->comm, current->pid, lock); + + do { + u = check_boot_spin_unlocked_list(vcpu, lock, true); + if (u != NULL) { + if (add_to_unlock) { + pr_err("%s() lock %px detected at unlocked " + "list WHY ???\n", + __func__, lock); + return ERR_PTR(-EINVAL); + } + v = u->vcpu; + DebugKVMUN("guest lock %px already detected at " + "unlocked list, queued by VCPU #%d\n", + lock, v->vcpu_id); + return NULL; + } + + if (list_empty(&kvm->arch.boot_spinunlocked_free)) { + boot_spinlock_unlocked_t unlock_waiter; + + pr_warning("kvm_boot_spin_unlock_slow() overflow " + "of list of unlocked spinlocks\n"); + unlock_waiter.vcpu = vcpu; + unlock_waiter.lock = lock; + INIT_LIST_HEAD(&unlock_waiter.unlocked_list); + list_add_tail(&unlock_waiter.unlocked_list, + &kvm->arch.boot_spinunlocked_wait); + set_current_state(TASK_INTERRUPTIBLE); + raw_spin_unlock_irqrestore( + &kvm->arch.boot_spinlock_hash_lock, flags); + + DebugKVM("go to schedule and wait for wake up\n"); + schedule(); + __set_current_state(TASK_RUNNING); + if (kvm->arch.boot_spinlock_hash_disable || + fatal_signal_pending(current)) { + DebugKVMSH("guest spinlock disabled or fatal " + "signal: exit from process\n"); + kvm_spare_host_vcpu_release(vcpu); + do_exit(0); + } + raw_spin_lock_irqsave( + &kvm->arch.boot_spinlock_hash_lock, flags); + } else { + break; + } + } while (u == NULL); + + u = list_first_entry(&kvm->arch.boot_spinunlocked_free, + boot_spinlock_unlocked_t, unlocked_list); + list_move_tail(&u->unlocked_list, &kvm->arch.boot_spinunlocked_head); + u->vcpu = vcpu; + u->lock = lock; + INIT_LIST_HEAD(&u->checked_unlocked); + DebugKVM("add spinlock %s (%d) VCPU #%d lock %px to the list of " + "unlocked\n", + current->comm, current->pid, u->vcpu->vcpu_id, u->lock); + return u; +} + +/* + * Unlock a guest spinlock, slowpath: + */ + +int kvm_boot_spin_unlock_slow(struct kvm_vcpu *vcpu, void *lock, + bool add_to_unlock) +{ + struct kvm *kvm = vcpu->kvm; + boot_spinlock_unlocked_t *u; + spinlock_waiter_t *w; + struct hlist_head *head; + struct hlist_node *tmp; + struct kvm_vcpu *v; + int unlocked = 0; + unsigned long flags; + + DebugKVMUN("%s (%d) started for guest lock %px (hash index 0x%02x)\n", + current->comm, current->pid, lock, boot_spinlock_hashfn(lock)); + + raw_spin_lock_irqsave(&kvm->arch.boot_spinlock_hash_lock, flags); + + /* at first add our spinlock to the list of unlocked spinlocks, */ + /* because of unlocking can outrun locking process which is */ + /* trying to enable lock */ + if (add_to_unlock) { + u = add_guest_boot_spin_as_unlocked(vcpu, lock, true, flags); + if (IS_ERR(u)) { + raw_spin_unlock_irqrestore( + &kvm->arch.boot_spinlock_hash_lock, flags); + return PTR_ERR(u); + } + } else { + u = NULL; + } + + head = &kvm->arch.boot_spinlock_hash[boot_spinlock_hashfn(lock)]; + +waking_up: + if (hlist_empty(head)) { + DebugKVMUN("spinlock waitqueue is empty\n"); + goto not_found; + } + /* find all task waiting for this spinlock and wake up its */ + hlist_for_each_entry_safe(w, tmp, head, wait_list) { + v = w->vcpu; + DebugKVMUN("next spinlock waitqueue entry VCPU #%d lock %px\n", + v->vcpu_id, w->lock); + if (w->lock != lock) + continue; + hlist_del(&w->wait_list); + if (unlikely(completion_done(&w->done))) { + pr_err("%s(): VCPU #%d waiting for unlock is already " + "completed\n", + __func__, v->vcpu_id); + } + complete(&w->done); + DebugKVMUN("spin unlocked and VCPU #%d is woken up\n", + v->vcpu_id); + unlocked++; + } + +not_found: + if (unlikely(!add_to_unlock && unlocked == 0)) { + boot_spinlock_unlocked_t *u_new; + + /* could not find any waiting for spin unlocking process, */ + /* so unlocking is first and locking process are in progress */ + /* It need wait for locking process in wait list queue */ + if (u == NULL) { + u_new = add_guest_boot_spin_as_unlocked(vcpu, lock, + false, flags); + if (unlikely(IS_ERR(u_new))) { + raw_spin_unlock_irqrestore( + &kvm->arch.boot_spinlock_hash_lock, + flags); + return PTR_ERR(u); + } else if (u_new == NULL) { + /* there is already unlocking process for */ + /* this lock, it need not second same process */ + goto done; + } + u = u_new; + v = u->vcpu; + DebugKVMUN("spin lock %px is queued as unlocked " + "by VCPU #%d\n", + lock, v->vcpu_id); + } + /* the process should wait for any locking process */ + /* which will detect the spin as unlocked and wake up */ + /* this process to restart waking up of all waiting for */ + /* the lock processes */ + set_current_state(TASK_INTERRUPTIBLE); + DebugKVMUN("%s (%d) lock %px go to schedule and wait for " + "wake up by locking process\n", + current->comm, current->pid, lock); + raw_spin_unlock_irqrestore( + &kvm->arch.boot_spinlock_hash_lock, flags); + + schedule(); + __set_current_state(TASK_RUNNING); + if (kvm->arch.boot_spinlock_hash_disable || + fatal_signal_pending(current)) { + goto signaled; + } + raw_spin_lock_irqsave(&kvm->arch.boot_spinlock_hash_lock, + flags); + DebugKVMUN("%s (%d) lock %px is waked up by locking process\n", + current->comm, current->pid, lock); + goto waking_up; + } +done: + if (!add_to_unlock && u != NULL) { + free_boot_spin_unlocked_node(vcpu, u); + DebugKVMUN("%s (%d) lock %px is deleted from unlocking queue\n", + current->comm, current->pid, lock); + } + raw_spin_unlock_irqrestore(&kvm->arch.boot_spinlock_hash_lock, flags); + + if (unlocked > 0) + cond_resched(); + + DebugKVMUN("%s (%d) completed for guest lock %px, unlocked %d\n", + current->comm, current->pid, lock, unlocked); + return 0; + +signaled: + DebugKVMSH("guest spinlock disabled or fatal signal: " + "exit from process\n"); + kvm_spare_host_vcpu_release(vcpu); + do_exit(0); + return 0; +} + +int kvm_boot_spinlock_init(struct kvm *kvm) +{ + boot_spinlock_unlocked_t *u; + int i; + + for (i = 0; i < BOOT_SPINLOCK_HASH_SIZE; i++) + INIT_HLIST_HEAD(&kvm->arch.boot_spinlock_hash[i]); + INIT_LIST_HEAD(&kvm->arch.boot_spinunlocked_head); + INIT_LIST_HEAD(&kvm->arch.boot_spinunlocked_free); + INIT_LIST_HEAD(&kvm->arch.boot_spinunlocked_wait); + for (i = 0; i < BOOT_SPINUNLOCKED_LIST_SIZE; i++) { + u = &kvm->arch.boot_spinunlocked_list[i]; + INIT_LIST_HEAD(&u->unlocked_list); + list_add_tail(&u->unlocked_list, + &kvm->arch.boot_spinunlocked_free); + } + kvm->arch.boot_spinlock_hash_lock = + __RAW_SPIN_LOCK_UNLOCKED(kvm->arch.boot_spinlock_hash_lock); + kvm->arch.boot_spinlock_hash_disable = false; + return 0; +} + +static void destroy_boot_spinlock_list(struct hlist_head *head) +{ + spinlock_waiter_t *w; + struct hlist_node *tmp; + struct kvm_vcpu *v; + + hlist_for_each_entry_safe(w, tmp, head, wait_list) { + v = w->vcpu; + DebugKVM("next spinlock waitqueue entry VCPU #%d lock %px\n", + v->vcpu_id, w->lock); + hlist_del(&w->wait_list); + kvm_vcpu_wake_up(v); + } +} +void kvm_boot_spinlock_destroy(struct kvm *kvm) +{ + boot_spinlock_unlocked_t *u; + boot_spinlock_unlocked_t *tmp; + struct hlist_head *head; + struct kvm_vcpu *v; + unsigned long flags; + int i; + + DebugKVM("started\n"); + + raw_spin_lock_irqsave(&kvm->arch.boot_spinlock_hash_lock, flags); + kvm->arch.boot_spinlock_hash_disable = true; + for (i = 0; i < BOOT_SPINLOCK_HASH_SIZE; i++) { + head = &kvm->arch.boot_spinlock_hash[i]; + if (hlist_empty(head)) { + DebugKVM("hash index 0x%02x: waitqueue is empty\n", i); + continue; + } + DebugKVM("hash index 0x%02x waitqueue is not empty\n", i); + destroy_boot_spinlock_list(head); + } + list_for_each_entry_safe(u, tmp, &kvm->arch.boot_spinunlocked_head, + unlocked_list) { + v = u->vcpu; + DebugKVM("next spin unlocked list entry VCPU #%d lock %px\n", + v->vcpu_id, u->lock); + list_del(&u->unlocked_list); + } + list_for_each_entry_safe(u, tmp, &kvm->arch.boot_spinunlocked_free, + unlocked_list) { + DebugKVM("next spin unlocked free entry %px\n", u); + list_del(&u->unlocked_list); + } + list_for_each_entry_safe(u, tmp, &kvm->arch.boot_spinunlocked_wait, + unlocked_list) { + v = u->vcpu; + DebugKVM("next spin unlocked waiting list entry VCPU #%d " + "lock %px\n", + v->vcpu_id, u->lock); + list_del(&u->unlocked_list); + kvm_vcpu_wake_up(v); + } + raw_spin_unlock_irqrestore(&kvm->arch.boot_spinlock_hash_lock, flags); +} diff --git a/arch/e2k/kvm/cepic.c b/arch/e2k/kvm/cepic.c new file mode 100644 index 000000000000..cd76a5177f70 --- /dev/null +++ b/arch/e2k/kvm/cepic.c @@ -0,0 +1,817 @@ +/* + * CEPIC virtualization + * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. + * Based on arch/x86/kvm/cepic.c code + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG + +#include "ioapic.h" +#include "ioepic.h" +#include "irq.h" +#include "cepic.h" + +#define mod_64(x, y) ((x) % (y)) + +#define PRId64 "d" +#define PRIx64 "llx" +#define PRIu64 "u" +#define PRIo64 "o" + +#ifdef DEBUG +#define epic_debug(fmt, arg...) pr_warn(fmt, ##arg) +#define epic_reg_debug(fmt, arg...) pr_warn(fmt, ##arg) +#else /* ! DEBUG */ +#define epic_debug(fmt, arg...) +#define epic_reg_debug(fmt, arg...) +#endif /* DEBUG */ + +#undef DEBUG_KVM_IRQ_MODE +#undef DebugKVMIRQ +#define DEBUG_KVM_IRQ_MODE 0 /* kernel EPIC IRQs debugging */ +#define DebugKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TIMER_MODE +#undef DebugKVMTM +#define DEBUG_KVM_TIMER_MODE 0 /* kernel epic timer debugging */ +#define DebugKVMTM(fmt, args...) \ +({ \ + if (DEBUG_KVM_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_EPIC_TIMER_MODE +#undef DebugKVMAT +#define DEBUG_KVM_EPIC_TIMER_MODE 0 /* KVM CEPIC timer debugging */ +#define DebugKVMAT(fmt, args...) \ +({ \ + if (DEBUG_KVM_EPIC_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VIRQs_MODE +#undef DebugVIRQs +#define DEBUG_KVM_VIRQs_MODE 0 /* VIRQs debugging */ +#define DebugVIRQs(fmt, args...) \ +({ \ + if (DEBUG_KVM_VIRQs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SHOW_GUEST_STACKS_MODE +#undef DebugGST +#define DEBUG_SHOW_GUEST_STACKS_MODE true /* show all guest stacks */ +#define DebugGST(fmt, args...) \ +({ \ + if (DEBUG_SHOW_GUEST_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define CEPIC_MMIO_LENGTH (4 * PAGE_SIZE) +/* followed define is not in epicdef.h */ +#define EPIC_SHORT_MASK 0xc0000 +#define EPIC_DEST_NOSHORT 0x0 +#define EPIC_DEST_MASK 0x800 +#define MAX_EPIC_VECTOR 1024 + +static inline u32 epic_get_reg_w(struct kvm_cepic *epic, int reg_off) +{ + epic_reg_debug("%s(0x%x) = 0x%x from %px\n", + __func__, reg_off, *((u32 *) (epic->regs + reg_off)), + ((u32 *) (epic->regs + reg_off))); + return *((u32 *) (epic->regs + reg_off)); +} + +static inline void epic_set_reg_w(struct kvm_cepic *epic, int reg_off, u32 val) +{ + *((u32 *) (epic->regs + reg_off)) = val; + epic_reg_debug("%s(0x%x) = 0x%x to %px\n", + __func__, reg_off, *((u32 *) (epic->regs + reg_off)), + ((u32 *) (epic->regs + reg_off))); +} + +static inline u64 epic_get_reg_d(struct kvm_cepic *epic, int reg_off) +{ + epic_reg_debug("%s(0x%x) = 0x%x from %px\n", + __func__, reg_off, *((u64 *) (epic->regs + reg_off)), + ((u64 *) (epic->regs + reg_off))); + return *((u64 *) (epic->regs + reg_off)); +} + +static inline void epic_set_reg_d(struct kvm_cepic *epic, int reg_off, u32 val) +{ + *((u64 *) (epic->regs + reg_off)) = val; + epic_reg_debug("%s(0x%x) = 0x%x to %px\n", + __func__, reg_off, *((u64 *) (epic->regs + reg_off)), + ((u64 *) (epic->regs + reg_off))); +} + +static inline int epic_hw_enabled(struct kvm_cepic *epic) +{ + return (epic)->vcpu->arch.epic_base == EPIC_DEFAULT_PHYS_BASE; +} + +static inline int epic_sw_enabled(struct kvm_cepic *epic) +{ + union cepic_ctrl reg; + + reg.raw = epic_get_reg_w(epic, CEPIC_CTRL); + + return reg.bits.soft_en; +} + +static inline int epic_enabled(struct kvm_cepic *epic) +{ + return epic_sw_enabled(epic) && epic_hw_enabled(epic); +} + +int kvm_epic_id(struct kvm_cepic *epic) +{ + return epic_get_reg_w(epic, CEPIC_ID); +} + +static inline int epic_lvtt_enabled(struct kvm_cepic *epic) +{ + union cepic_timer_lvtt reg; + + reg.raw = epic_get_reg_w(epic, CEPIC_TIMER_LVTT); + + return !(reg.bits.mask); +} + +static inline int epic_lvtt_period(struct kvm_cepic *epic) +{ + union cepic_timer_lvtt reg; + + reg.raw = epic_get_reg_w(epic, CEPIC_TIMER_LVTT); + + return reg.bits.mode; +} + +static inline int epic_test_and_set_irr(int vec, struct kvm_cepic *epic) +{ + u64 *cepic_pmirr = epic->regs + CEPIC_PMIRR; + + epic->irr_pending = true; + + return test_and_set_bit(vec & 0x3f, (void *)&cepic_pmirr[vec >> 6]); +} + +static inline int epic_search_irr(struct kvm_cepic *epic) +{ + u64 *cepic_pmirr = epic->regs + CEPIC_PMIRR; + int reg_num; + + for (reg_num = CEPIC_PMIRR_NR_DREGS - 1; reg_num >= 0; reg_num--) + if (cepic_pmirr[reg_num]) + return fls64(cepic_pmirr[reg_num]) - 1 + (reg_num << 6); + + return -1; +} + +static inline int epic_find_highest_irr(struct kvm_cepic *epic) +{ + int result; + + if (epic->irr_pending) { + result = epic_search_irr(epic); + if (result == -1) { + pr_warn("CEPIC fixing incorrect irr_pending\n"); + epic->irr_pending = false; + } + return result; + } + + return -1; +} + +static inline void epic_clear_irr(int vec, struct kvm_cepic *epic) +{ + u64 *cepic_pmirr = epic->regs + CEPIC_PMIRR; + + clear_bit(vec & 0x3f, (void *)&cepic_pmirr[vec >> 6]); + + if (epic_search_irr(epic) == -1) + epic->irr_pending = false; +} + +static int __epic_accept_irq(struct kvm_cepic *epic, int delivery_mode, + int vector, int trig_mode); + +int kvm_epic_set_irq(struct kvm_vcpu *vcpu, struct kvm_cepic_irq *irq) +{ + struct kvm_cepic *epic = vcpu->arch.epic; + + DebugKVMIRQ("started for VCPU #%d vector 0x%x\n", + vcpu->vcpu_id, irq->vector); + return __epic_accept_irq(epic, irq->delivery_mode, irq->vector, + irq->trig_mode); +} + +/* + * Add a pending IRQ into cepic. + * Return 1 if successfully added and 0 if discarded. + */ +static int __epic_accept_irq(struct kvm_cepic *epic, int delivery_mode, + int vector, int trig_mode) +{ + int result = 0; + unsigned int reg_cir; + struct kvm_vcpu *vcpu = epic->vcpu; + + DebugKVMAT("started for VCPU #%d vector 0x%x, dlvm %d trigger %d\n", + epic->vcpu->vcpu_id, vector, delivery_mode, trig_mode); + switch (delivery_mode) { + case CEPIC_ICR_DLVM_FIXED_EXT: + case CEPIC_ICR_DLVM_FIXED_IPI: + DebugKVMAT("delivery mode is CEPIC_DLVM_FIXED\n"); + if (unlikely(!epic_enabled(epic))) + break; + + reg_cir = epic_get_reg_w(epic, CEPIC_CIR); + if (reg_cir == 0) { + epic_set_reg_w(epic, CEPIC_CIR, vector); + kvm_inject_cepic_virq(epic); + result = 1; + } else { + /* + * Save it on PMIRR. VIRQ will be injected later, + * in CEPIC_EOI + */ + result = !epic_test_and_set_irr(vector, epic); + if (!result) + DebugVIRQs("CEPIC #%d vector 0x%x already set in PMIRR. Lost interrupt\n", + vcpu->vcpu_id, vector); + } + + trace_kvm_epic_accept_irq(vcpu->vcpu_id, delivery_mode, + trig_mode, vector, !result); + break; + case CEPIC_ICR_DLVM_NMI: + DebugKVMAT("delivery mode is CEPIC_DLVM_NMI\n"); + result = 1; + kvm_inject_nmi(vcpu); + kvm_vcpu_kick(vcpu); + break; + default: + pr_err("TODO: unsupported delivery mode %x\n", + delivery_mode); + break; + } + return result; +} + +static void epic_send_ipi(struct kvm_cepic *epic) +{ + union cepic_icr reg_icr; + struct kvm_cepic_irq irq; + + reg_icr.raw = epic_get_reg_d(epic, CEPIC_ICR); + + irq.vector = reg_icr.bits.vect; + irq.delivery_mode = reg_icr.bits.dlvm; + irq.trig_mode = 0; /* Edge */ + irq.shorthand = reg_icr.bits.dst_sh; + irq.dest_id = reg_icr.bits.dst; + + trace_kvm_epic_ipi(irq.dest_id, irq.vector); + + epic_debug("cepic_icr 0x%lx, short_hand 0x%x, dest 0x%x, trig_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", + reg_icr.raw, irq.shorthand, irq.dest_id, + irq.trig_mode, irq.delivery_mode, + irq.vector); + + kvm_irq_delivery_to_epic(epic->vcpu->kvm, kvm_epic_id(epic), &irq); +} + +int kvm_epic_inta(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic *epic = vcpu->arch.epic; + int vector; + unsigned int cpr; + union cepic_vect_inta reg_inta; + + if (!epic || !epic_enabled(epic)) + return -1; + + /* Read CEPIC_INTA */ + vector = epic_get_reg_w(epic, CEPIC_CIR); + cpr = epic_get_reg_w(epic, CEPIC_CPR); + + /* Update CEPIC_CPR. CEPIC_CIR and CEPIC_PMIRR are updated in EOI */ + epic_set_reg_w(epic, CEPIC_CPR, vector & 0x300); + + DebugKVMAT("vector is 0x%x\n", vector); + if (vector == -1) + return -1; + + epic_debug("kvm_get_epic_interrupt() vector is 0x%x\n", vector); + + reg_inta.raw = 0; + reg_inta.bits.vect = vector; + reg_inta.bits.cpr = cpr; + + return reg_inta.raw; +} + +static u32 __epic_read(struct kvm_cepic *epic, unsigned int offset) +{ + u32 val = 0xffffffff; + + switch (offset) { + case CEPIC_VECT_INTA: + val = kvm_epic_inta(epic->vcpu); + break; + default: + val = epic_get_reg_w(epic, offset); + break; + } + + return val; +} + +static inline struct kvm_cepic *to_cepic(struct kvm_io_device *dev) +{ + return container_of(dev, struct kvm_cepic, dev); +} + +static void epic_reg_read(struct kvm_cepic *epic, u32 offset, int len, + void *data) +{ + if ((len != 4) || (len != 8)) { + epic_debug("KVM_EPIC_READ: unsupported len %d offset %x\n", + len, offset); + *(unsigned int *)data = -1UL; + return; + } + + if (len == 4) { + unsigned int result; + + /* Do not model accesses to PREPIC */ + if (offset < PAGE_SIZE) + result = __epic_read(epic, offset); + else + result = 0; + + trace_kvm_epic_read_w(offset, result); + + memcpy(data, (unsigned int *)&result, len); + } else { + unsigned long result; + + /* Do not model accesses to PREPIC */ + if (offset < PAGE_SIZE) + result = epic_get_reg_d(epic, offset); + else + result = 0; + + trace_kvm_epic_read_d(offset, result); + + memcpy(data, (unsigned long *)&result, len); + } +} + +static int epic_mmio_in_range(struct kvm_cepic *epic, gpa_t addr) +{ + return epic_hw_enabled(epic) && + addr >= epic->base_address && + addr < epic->base_address + CEPIC_MMIO_LENGTH; +} + +static int epic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, void *data) +{ + struct kvm_cepic *epic = to_cepic(this); + unsigned long int offset = address - epic->base_address; + + epic_reg_debug("started to mmio read address 0x%lx, offset 0x%lx,len %d to %px\n", + address, offset, len, data); + if (!epic_mmio_in_range(epic, address)) + return -EOPNOTSUPP; + + epic_reg_read(epic, offset, len, data); + epic_reg_debug("mmio read data 0x%lx\n", *(u64 *)data); + + return 0; +} + +static void start_epic_timer(struct kvm_cepic *epic) +{ + ktime_t now = epic->cepic_timer.timer.base->get_time(); + long period; + long cycles; + + hrtimer_cancel(&epic->cepic_timer.timer); + + cycles = get_cycles(); + period = (u64)epic_get_reg_w(epic, CEPIC_TIMER_INIT) * NSEC_PER_SEC / + epic->cepic_freq; + DebugKVMTM("CEPIC_TIMER_INIT 0x%x period 0x%lx cpu_freq_hz 0x%x cycles 0x%lx\n", + epic_get_reg_w(epic, CEPIC_TIMER_INIT), + period, cpu_freq_hz, cycles); + if (unlikely(!epic->cepic_timer.started)) + epic->cepic_timer.started = true; + atomic_set(&epic->cepic_timer.pending, 0); + + if (period == 0) { + epic->cepic_timer.period = 0; + return; + } + + if (epic_get_reg_w(epic, CEPIC_TIMER_DIV) != CEPIC_TIMER_DIV_1) { + pr_warn("ERROR: CEPIC timer div != 1 not supported\n"); + return; + } + + /* + * Do not allow the guest to program periodic timers with small + * interval, since the hrtimers are not throttled by the host + * scheduler. + */ + if (epic_lvtt_period(epic)) { + if (period < NSEC_PER_MSEC/2) + period = NSEC_PER_MSEC/2; + } + +again: + if (!hrtimer_active(&epic->cepic_timer.timer)) { + epic->cepic_timer.period = period; + cycles = get_cycles(); + hrtimer_start(&epic->cepic_timer.timer, + ktime_add_ns(now, period), + HRTIMER_MODE_ABS); + epic->cepic_timer.running_time = + kvm_get_guest_vcpu_running_time(epic->vcpu); + DebugKVMTM("started cepic hrtimer now 0x%llx period 0x%lx running time 0x%llx, cycles 0x%lx\n", + ktime_to_ns(now), period, + epic->cepic_timer.running_time, cycles); + } else if (hrtimer_callback_running(&epic->cepic_timer.timer)) { + BUG_ON(epic->cepic_timer.period != 0); + cycles = get_cycles(); + hrtimer_add_expires_ns(&epic->cepic_timer.timer, period); + epic->cepic_timer.period = period; + epic->cepic_timer.running_time = + kvm_get_guest_vcpu_running_time(epic->vcpu); + DebugKVMTM("restarted cepic hrtimer now 0x%llx period 0x%lx running time 0x%llx, cycles 0x%lx\n", + ktime_to_ns(now), period, + epic->cepic_timer.running_time, cycles); + } else { + /* timer is active probably is completing, so waiting */ + DebugKVMTM("hrtimer is completing, small waiting\n"); + cpu_relax(); + goto again; + } +} + +/* Returns 0 if nothing is waiting on CEPIC_PMIRR, 1 otherwise */ +static void epic_check_pmirr(struct kvm_cepic *epic) +{ + int max_irr; + + max_irr = epic_find_highest_irr(epic); + + if (max_irr == -1) { + epic_set_reg_w(epic, CEPIC_CIR, 0); + } else { + epic_clear_irr(max_irr, epic); + epic_set_reg_w(epic, CEPIC_CIR, max_irr); + kvm_inject_cepic_virq(epic); + } +} + +static void epic_write_eoi(struct kvm_cepic *epic, u32 val) +{ + unsigned int vector = epic_get_reg_w(epic, CEPIC_CIR); + union cepic_eoi reg_eoi; + union cepic_cpr reg_cpr; + + reg_eoi.raw = val; + reg_cpr.raw = 0; + + reg_cpr.bits.cpr = reg_eoi.bits.rcpr; + + epic_set_reg_w(epic, CEPIC_CPR, reg_cpr.raw); + + kvm_ioepic_update_eoi(epic->vcpu, vector, 1); + + epic_check_pmirr(epic); + + trace_kvm_epic_eoi(vector); +} + +static void epic_reg_write_w(struct kvm_cepic *epic, u32 reg, u32 val) +{ + epic_set_reg_w(epic, reg, val); + + switch (reg) { + case CEPIC_EOI: + epic_write_eoi(epic, val); + break; + case CEPIC_TIMER_INIT: + start_epic_timer(epic); + break; + case CEPIC_ICR: + epic_send_ipi(epic); + break; + } +} + +static void epic_reg_write_d(struct kvm_cepic *epic, u32 reg, u32 val) +{ + epic_set_reg_d(epic, reg, val); + + if (reg == CEPIC_ICR) + epic_send_ipi(epic); +} + +static int epic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, const void *data) +{ + struct kvm_cepic *epic = to_cepic(this); + unsigned int offset = address - epic->base_address; + + if (!epic_mmio_in_range(epic, address)) + return -EOPNOTSUPP; + + if (len != 4 && len != 8) { + epic_debug("epic write: bad size=%d %lx\n", len, (long)address); + return 0; + } + + epic_debug("%s: offset 0x%x with length 0x%x, and value is 0x%lx\n", + __func__, offset, len, val); + + /* Do not model accesses to PREPIC regs */ + if (offset < PAGE_SIZE) + if (len == 4) { + u32 val; + val = *(u32 *)data; + + trace_kvm_epic_write_w(offset, val); + epic_reg_write_w(epic, offset, val); + } else { + u64 val; + val = *(u64 *)data; + + trace_kvm_epic_write_d(offset, val); + epic_reg_write_d(epic, offset, val); + } + + return 0; +} + +void kvm_free_cepic(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.epic) + return; + + hrtimer_cancel(&vcpu->arch.epic->cepic_timer.timer); + + if (vcpu->arch.epic->regs_page) + __free_page(vcpu->arch.epic->regs_page); + + kfree(vcpu->arch.epic); + vcpu->arch.epic = NULL; +} + +/* + *---------------------------------------------------------------------- + * CEPIC interface + *---------------------------------------------------------------------- + */ + +/* Initialize all registers, as if this were the reset state on host */ +void kvm_cepic_reset(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic *epic; + int i; + union cepic_ctrl reg_ctrl; + + epic_debug("%s\n", __func__); + + ASSERT(vcpu); + epic = vcpu->arch.epic; + ASSERT(epic != NULL); + + /* Stop the timer in case it's a reset to an active epic */ + hrtimer_cancel(&epic->cepic_timer.timer); + + /* Initialize all registers as on CEPIC reset */ + reg_ctrl.raw = 0; + reg_ctrl.bits.bsp_core = kvm_vcpu_is_bsp(vcpu); + epic_set_reg_w(epic, CEPIC_CTRL, reg_ctrl.raw); + + epic_set_reg_w(epic, CEPIC_ID, cepic_id_short_to_full(vcpu->vcpu_id)); + epic_set_reg_w(epic, CEPIC_CPR, 0); + epic_set_reg_w(epic, CEPIC_ESR, 0); + epic_set_reg_w(epic, CEPIC_ESR2, 0); + epic_set_reg_w(epic, CEPIC_EOI, 0); + epic_set_reg_w(epic, CEPIC_CIR, 0); + for (i = 0; i < CEPIC_PMIRR_NR_DREGS; i++) + epic_set_reg_d(epic, CEPIC_PMIRR + i * 8, 0); + epic_set_reg_w(epic, CEPIC_PNMIRR, 0); + epic_set_reg_d(epic, CEPIC_ICR, 0); + epic_set_reg_w(epic, CEPIC_TIMER_LVTT, 0); + epic_set_reg_w(epic, CEPIC_TIMER_INIT, 0); + epic_set_reg_w(epic, CEPIC_TIMER_CUR, 0); + epic_set_reg_w(epic, CEPIC_TIMER_DIV, 0); + epic_set_reg_w(epic, CEPIC_NM_TIMER_LVTT, 0); + epic_set_reg_w(epic, CEPIC_NM_TIMER_INIT, 0); + epic_set_reg_w(epic, CEPIC_NM_TIMER_CUR, 0); + epic_set_reg_w(epic, CEPIC_NM_TIMER_DIV, 0); + epic_set_reg_w(epic, CEPIC_SVR, 0); + epic_set_reg_w(epic, CEPIC_PNMIRR_MASK, 0); + epic_set_reg_w(epic, CEPIC_VECT_INTA, 0); + + epic->irr_pending = false; + atomic_set(&epic->cepic_timer.pending, 0); + epic->cepic_timer.started = false; + + epic_debug("%s: vcpu=%px, id=%d, base_address=0x%lx\n", + __func__, vcpu, kvm_epic_id(epic), epic->base_address); +} + +#if 0 +bool kvm_epic_present(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.epic && epic_hw_enabled(vcpu->arch.epic); +} + +int kvm_cepic_enabled(struct kvm_vcpu *vcpu) +{ + return kvm_epic_present(vcpu) && epic_sw_enabled(vcpu->arch.epic); +} +#endif + +/* + *---------------------------------------------------------------------- + * timer interface + *---------------------------------------------------------------------- + */ + +static bool cepic_is_periodic(struct kvm_timer *ktimer) +{ + struct kvm_cepic *epic = container_of(ktimer, struct kvm_cepic, + cepic_timer); + return epic_lvtt_period(epic); +} + +int epic_has_pending_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic *cepic = vcpu->arch.epic; + + if (cepic && epic_enabled(cepic) && epic_lvtt_enabled(cepic)) + return atomic_read(&cepic->cepic_timer.pending); + + return 0; +} + +static int kvm_epic_lvtt_deliver(struct kvm_cepic *epic) +{ + union cepic_timer_lvtt reg; + + reg.raw = epic_get_reg_w(epic, CEPIC_TIMER_LVTT); + + if (epic_hw_enabled(epic) && !(reg.bits.mask)) + return __epic_accept_irq(epic, CEPIC_ICR_DLVM_FIXED_EXT, + reg.bits.vect, 0); + + return 0; +} + +int kvm_sw_epic_sysrq_deliver(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic *epic = vcpu->arch.epic; + + DebugGST("started for VCPU #%d\n", vcpu->vcpu_id); + if (epic && epic_hw_enabled(epic)) { + return __epic_accept_irq(epic, + CEPIC_ICR_DLVM_FIXED_EXT, + SYSRQ_SHOWSTATE_EPIC_VECTOR, + 0); /* trigger mode */ + } + return 0; +} + +int kvm_epic_nmi_deliver(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic *epic = vcpu->arch.epic; + + DebugGST("started for VCPU #%d\n", vcpu->vcpu_id); + if (epic && epic_hw_enabled(epic)) { + return __epic_accept_irq(epic, + CEPIC_ICR_DLVM_FIXED_EXT, + KVM_NMI_EPIC_VECTOR, + 0); /* trigger mode */ + } + return 0; +} + +static const struct kvm_timer_ops cepic_timer_ops = { + .is_periodic = cepic_is_periodic, +}; + +static const struct kvm_io_device_ops epic_mmio_ops = { + .read = epic_mmio_read, + .write = epic_mmio_write, +}; + +int kvm_create_cepic(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic *epic; + + /* No need for CEPIC model, if hardware support is available */ + if (vcpu->kvm->arch.is_hv) + return 0; + + ASSERT(vcpu != NULL); + epic_debug("epic_init %d\n", vcpu->vcpu_id); + + epic = kzalloc(sizeof(*epic), GFP_KERNEL); + if (!epic) + goto nomem; + + vcpu->arch.epic = epic; + + epic->regs_page = alloc_page(GFP_KERNEL); + if (epic->regs_page == NULL) { + pr_err("malloc epic regs error for vcpu %x\n", vcpu->vcpu_id); + goto nomem_free_epic; + } + epic->regs = page_address(epic->regs_page); + memset(epic->regs, 0, PAGE_SIZE); + epic->vcpu = vcpu; + epic->cepic_freq = vcpu->kvm->arch.cepic_freq; + + hrtimer_init(&epic->cepic_timer.timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + epic->cepic_timer.timer.function = kvm_epic_timer_fn; + epic->cepic_timer.t_ops = &cepic_timer_ops; + epic->cepic_timer.kvm = vcpu->kvm; + epic->cepic_timer.vcpu = vcpu; + + epic->base_address = EPIC_DEFAULT_PHYS_BASE; + vcpu->arch.epic_base = EPIC_DEFAULT_PHYS_BASE; + + kvm_cepic_reset(vcpu); + kvm_iodevice_init(&epic->dev, &epic_mmio_ops); + + return 0; +nomem_free_epic: + kfree(epic); + vcpu->arch.epic = NULL; +nomem: + return -ENOMEM; +} + +void kvm_inject_epic_timer_irqs(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic *epic = vcpu->arch.epic; + + DebugKVMAT("started timer pending %d\n", + atomic_read(&epic->cepic_timer.pending)); + if (epic && atomic_read(&epic->cepic_timer.pending) > 0) { + if (kvm_epic_lvtt_deliver(epic)) { + atomic_dec(&epic->cepic_timer.pending); + DebugKVMAT("delivered timer pending %d\n", + atomic_read(&epic->cepic_timer.pending)); + } else { + DebugKVMAT("local EPIC timer interrupt was coalesced for VCPU #%d\n", + vcpu->vcpu_id); + if (!epic_lvtt_period(epic) && + (atomic_read(&epic->cepic_timer.pending) > 1 || + atomic_read(&epic->cepic_timer.pending) == 0)) { + /* it can be while switch periodic */ + /* mode to one shot or back */ + E2K_LMS_HALT_OK; + } + } + } +} + diff --git a/arch/e2k/kvm/cepic.h b/arch/e2k/kvm/cepic.h new file mode 100644 index 000000000000..8bf54dd101e3 --- /dev/null +++ b/arch/e2k/kvm/cepic.h @@ -0,0 +1,140 @@ +#ifndef __KVM_E2K_CEPIC_H +#define __KVM_E2K_CEPIC_H + +#include +#include "kvm_timer.h" + +#include +#include + +typedef struct kvm_cepic { + unsigned long base_address; + struct kvm_io_device dev; + struct kvm_timer cepic_timer; + struct kvm_vcpu *vcpu; + bool irr_pending; + struct page *regs_page; + void *regs; + int virq_no; + unsigned long cepic_freq; +} kvm_cepic_t; + +int kvm_create_cepic(struct kvm_vcpu *vcpu); +void kvm_free_cepic(struct kvm_vcpu *vcpu); + +int kvm_epic_has_interrupt(struct kvm_vcpu *vcpu); +int kvm_epic_accept_pic_intr(struct kvm_vcpu *vcpu); +int kvm_get_epic_interrupt(struct kvm_vcpu *vcpu); +void kvm_cepic_reset(struct kvm_vcpu *vcpu); +void kvm_cepic_set_base(struct kvm_vcpu *vcpu, u64 value); +u64 kvm_cepic_get_base(struct kvm_vcpu *vcpu); +void kvm_epic_set_version(struct kvm_vcpu *vcpu); + +int kvm_epic_match_physical_addr(struct kvm_cepic *epic, u16 dest); +int kvm_epic_match_logical_addr(struct kvm_cepic *epic, u8 mda); +int kvm_epic_set_irq(struct kvm_vcpu *vcpu, struct kvm_cepic_irq *irq); + +u64 kvm_get_epic_base(struct kvm_vcpu *vcpu); +void kvm_set_epic_base(struct kvm_vcpu *vcpu, u64 data); +void kvm_epic_post_state_restore(struct kvm_vcpu *vcpu); +int kvm_cepic_enabled(struct kvm_vcpu *vcpu); +bool kvm_epic_present(struct kvm_vcpu *vcpu); +int kvm_cepic_find_highest_irr(struct kvm_vcpu *vcpu); +int kvm_epic_id(struct kvm_cepic *epic); + +int epic_has_pending_timer(struct kvm_vcpu *vcpu); +void kvm_inject_epic_timer_irqs(struct kvm_vcpu *vcpu); +int kvm_sw_epic_sysrq_deliver(struct kvm_vcpu *vcpu); +int kvm_epic_nmi_deliver(struct kvm_vcpu *vcpu); + +extern void kvm_print_EPIC_field(struct kvm_cepic *epic, int base); +extern void kvm_print_local_EPIC(struct kvm_vcpu *vcpu); +extern u32 kvm_vcpu_to_full_cepic_id(const struct kvm_vcpu *vcpu); + +/* From ioapic.h */ +int kvm_epic_match_dest(int cepic_id, int src, int short_hand, int dest); +int kvm_epic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); + +#undef ASSERT +#ifdef DEBUG +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + pr_emerg("assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else +#define ASSERT(x) do { } while (0) +#endif + +/* + * Basic functions to access to local EPIC state structure + * (see asm/kvm/guest.h) on host. + */ +static inline kvm_epic_state_t * +kvm_get_guest_cepic_state(struct kvm_vcpu *vcpu) +{ + return &vcpu->arch.kmap_vcpu_state->cepic; +} + +static inline atomic_t * +kvm_get_guest_cepic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_epic_state_t *cepic = kvm_get_guest_cepic_state(vcpu); + + return &cepic->virqs_num; +} + +static inline int +kvm_read_guest_cepic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_epic_state_t *cepic = kvm_get_guest_cepic_state(vcpu); + + return atomic_read(&cepic->virqs_num); +} +static inline void +kvm_set_guest_cepic_virqs_num(struct kvm_vcpu *vcpu, int count) +{ + kvm_epic_state_t *cepic = kvm_get_guest_cepic_state(vcpu); + + atomic_set(&cepic->virqs_num, count); +} +static inline void +kvm_init_guest_cepic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_set_guest_cepic_virqs_num(vcpu, 0); +} +static inline void +kvm_inc_guest_cepic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_epic_state_t *cepic = kvm_get_guest_cepic_state(vcpu); + + atomic_inc(&cepic->virqs_num); +} +static inline bool +kvm_inc_and_test_guest_cepic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_epic_state_t *cepic = kvm_get_guest_cepic_state(vcpu); + + return atomic_inc_and_test(&cepic->virqs_num); +} +static inline void +kvm_dec_guest_cepic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_epic_state_t *cepic = kvm_get_guest_cepic_state(vcpu); + + atomic_dec(&cepic->virqs_num); +} +static inline bool +kvm_dec_and_test_guest_cepic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_epic_state_t *cepic = kvm_get_guest_cepic_state(vcpu); + + return atomic_dec_and_test(&cepic->virqs_num); +} + +#define MAX_PENDING_VIRQS 8 /* why 8 ???? */ + +#endif /* __KVM_E2K_CEPIC_H */ diff --git a/arch/e2k/kvm/complete.c b/arch/e2k/kvm/complete.c new file mode 100644 index 000000000000..c12dbe3e91f2 --- /dev/null +++ b/arch/e2k/kvm/complete.c @@ -0,0 +1,126 @@ +/* + * This file implements on host the arch-dependent parts of kvm guest + * csd_lock/csd_unlock functions to serialize access to per-cpu csd resources + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "irq.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static inline long __sched +do_wait_for_common(struct completion *x, long timeout, int state) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + if (!x->done) { + DECLARE_SWAITQUEUE(wait); + + __prepare_to_swait(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + DebugKVM("VCPU #%d interrupted by signal, " + "VIRQs flag %d counter %d\n", + vcpu->vcpu_id, + kvm_test_pending_virqs(vcpu), + kvm_get_pending_virqs_num(vcpu)); + break; + } + __set_current_state(state); + raw_spin_unlock_irq(&x->wait.lock); + timeout = schedule_timeout(timeout); + raw_spin_lock_irq(&x->wait.lock); + if (timeout && !signal_pending_state(state, current)) { + timeout = -EINTR; + DebugKVM("VCPU #%d waked up, " + "VIRQs flag %d counter %d\n", + vcpu->vcpu_id, + kvm_test_pending_virqs(vcpu), + kvm_get_pending_virqs_num(vcpu)); + break; + } + } while (!x->done && timeout); + __finish_swait(&x->wait, &wait); + if (!x->done) + return timeout; + if (timeout == -EINTR) + return timeout; + } + x->done--; + return timeout ?: 1; +} + +static long __sched +wait_for_common(struct completion *x, long timeout, int state) +{ + might_sleep(); + + raw_spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, timeout, state); + raw_spin_unlock_irq(&x->wait.lock); + return timeout; +} + +/** + * wait_for_completion: - waits for completion of a task + * @x: holds the state of this particular completion + * + * This waits to be signaled for completion of a specific task. It is NOT + * interruptible and there is no timeout. + * + * See also similar routines (i.e. wait_for_completion_timeout()) with timeout + * and interrupt capability. Also see complete(). + */ +void __sched kvm_wait_for_completion(struct completion *x) +{ + wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); +} + +/** + * wait_for_completion_timeout: - waits for completion of a task (w/timeout) + * @x: holds the state of this particular completion + * @timeout: timeout value in jiffies + * + * This waits for either a completion of a specific task to be signaled or for a + * specified timeout to expire. The timeout is in jiffies. It is not + * interruptible. + */ +unsigned long __sched +kvm_wait_for_completion_timeout(struct completion *x, unsigned long timeout) +{ + return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); +} + +/** + * wait_for_completion_interruptible: - waits for completion of a task (w/intr) + * @x: holds the state of this particular completion + * + * This waits for completion of a specific task to be signaled. It is + * interruptible. + */ +int __sched kvm_wait_for_completion_interruptible(struct completion *x) +{ + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + if (t == -ERESTARTSYS || t == -EINTR) + return t; + return 0; +} diff --git a/arch/e2k/kvm/complete.h b/arch/e2k/kvm/complete.h new file mode 100644 index 000000000000..a92453d4ad8c --- /dev/null +++ b/arch/e2k/kvm/complete.h @@ -0,0 +1,12 @@ +#ifndef __KVM_E2K_COMPLETE_H +#define __KVM_E2K_COMPLETE_H + +#include +#include + +extern void __sched kvm_wait_for_completion(struct completion *x); +extern unsigned long kvm_wait_for_completion_timeout(struct completion *x, + unsigned long timeout); +extern int kvm_wait_for_completion_interruptible(struct completion *x); + +#endif /* __KVM_E2K_COMPLETE_H */ diff --git a/arch/e2k/kvm/cpu.c b/arch/e2k/kvm/cpu.c new file mode 100644 index 000000000000..fd6b0cd9154f --- /dev/null +++ b/arch/e2k/kvm/cpu.c @@ -0,0 +1,2415 @@ + +/* + * CPU virtualization + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cpu.h" +#include "gregs.h" +#include "process.h" +#include "mmu.h" +#include "gaccess.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GREGS_MODE +#undef DebugGREGS +#define DEBUG_KVM_GREGS_MODE 0 /* global registers debugging */ +#define DebugGREGS(fmt, args...) \ +({ \ + if (DEBUG_KVM_GREGS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_ACTIVATION_MODE +#undef DebugKVMACT +#define DEBUG_KVM_ACTIVATION_MODE 0 /* KVM guest kernel data */ + /* stack activations */ + /* debugging */ +#define DebugKVMACT(fmt, args...) \ +({ \ + if (DEBUG_KVM_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SHADOW_CONTEXT_MODE +#undef DebugSHC +#define DEBUG_SHADOW_CONTEXT_MODE 0 /* shadow context debugging */ +#define DebugSHC(fmt, args...) \ +({ \ + if (DEBUG_SHADOW_CONTEXT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_HWS_UPDATE_MODE +#undef DebugKVMHSU +#define DEBUG_KVM_HWS_UPDATE_MODE 0 /* hardware stacks frames */ + /* update debugging */ +#define DebugKVMHSU(fmt, args...) \ +({ \ + if (DEBUG_KVM_HWS_UPDATE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_HWS_PATCH_MODE +#undef DebugKVMHSP +#define DEBUG_KVM_HWS_PATCH_MODE 0 /* hardware stacks frames */ + /* patching debug */ +#define DebugKVMHSP(fmt, args...) \ +({ \ + if (DEBUG_KVM_HWS_PATCH_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_HS_MODE +#undef DebugGHS +#define DEBUG_GUEST_HS_MODE 0 /* Hard Stack expantions */ +#define DebugGHS(fmt, args...) \ +({ \ + if (DEBUG_GUEST_HS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_VCPU_TRAP_MODE +#undef DebugTRAP +#define DEBUG_PV_VCPU_TRAP_MODE 0 /* trap injection debugging */ +#define DebugTRAP(fmt, args...) \ +({ \ + if (DEBUG_PV_VCPU_TRAP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_VCPU_SIG_MODE +#undef DebugSIG +#define DEBUG_PV_VCPU_SIG_MODE 0 /* signals injection debugging */ +#define DebugSIG(fmt, args...) \ +({ \ + if (DEBUG_PV_VCPU_SIG_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_UST_MODE +#undef DebugUST +#define DEBUG_PV_UST_MODE 0 /* trap injection debugging */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_guest_ust) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_SYSCALL_MODE +#define DEBUG_PV_SYSCALL_MODE 0 /* syscall injection debugging */ + +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE +extern bool debug_guest_ust; +#else +#define debug_guest_ust false +#endif /* DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE */ + +bool debug_guest_user_stacks = false; + +void kvm_set_pv_vcpu_kernel_image(struct kvm_vcpu *vcpu) +{ + bool nonpaging = !is_paging(vcpu); + e2k_oscud_lo_t oscud_lo; + e2k_oscud_hi_t oscud_hi; + e2k_osgd_lo_t osgd_lo; + e2k_osgd_hi_t osgd_hi; + e2k_cute_t *cute_p; + e2k_addr_t base; + e2k_cutd_t cutd; + + oscud_lo.OSCUD_lo_half = 0; + oscud_lo.OSCUD_lo_base = (u64)vcpu->arch.guest_base; + oscud_hi.OSCUD_hi_half = 0; + oscud_hi.OSCUD_hi_size = vcpu->arch.guest_size; + kvm_set_guest_vcpu_OSCUD(vcpu, oscud_hi, oscud_lo); + DebugKVM("set OSCUD to guest kernel image: base 0x%llx, size 0x%x\n", + oscud_lo.OSCUD_lo_base, oscud_hi.OSCUD_hi_size); + kvm_set_guest_vcpu_CUD(vcpu, oscud_hi, oscud_lo); + DebugKVM("set CUD to init state: base 0x%llx, size 0x%x\n", + oscud_lo.CUD_lo_base, oscud_hi.CUD_hi_size); + + osgd_lo.OSGD_lo_half = 0; + osgd_lo.OSGD_lo_base = (u64)vcpu->arch.guest_base; + osgd_hi.OSGD_hi_half = 0; + osgd_hi.OSGD_hi_size = vcpu->arch.guest_size; + kvm_set_guest_vcpu_OSGD(vcpu, osgd_hi, osgd_lo); + DebugKVM("set OSGD to guest kernel image: base 0x%llx, size 0x%x\n", + osgd_lo.OSGD_lo_base, osgd_hi.OSGD_hi_size); + kvm_set_guest_vcpu_GD(vcpu, osgd_hi, osgd_lo); + DebugKVM("set GD to init state: base 0x%llx, size 0x%x\n", + osgd_lo.GD_lo_base, osgd_hi.GD_hi_size); + + cute_p = (e2k_cute_t *)kvm_vcpu_hva_to_gpa(vcpu, + (unsigned long)vcpu->arch.guest_cut); + base = (e2k_addr_t)cute_p; + cutd.CUTD_reg = 0; + cutd.CUTD_base = base; + kvm_set_guest_vcpu_OSCUTD(vcpu, cutd); + kvm_set_guest_vcpu_CUTD(vcpu, cutd); + DebugKVM("set OSCUTD & CUTD to init state: base 0x%llx\n", + cutd.CUTD_base); +} + +void kvm_init_cpu_state(struct kvm_vcpu *vcpu) +{ + { + u64 osr0; + + osr0 = 0; + kvm_set_guest_vcpu_OSR0(vcpu, osr0); + DebugKVM("set OSR0 to init state: base 0x%llx\n", + osr0); + } + { + e2k_core_mode_t core_mode; + + core_mode.CORE_MODE_reg = 0; + core_mode.CORE_MODE_pt_v6 = 0; + core_mode.CORE_MODE_sep_virt_space = 0; + kvm_set_guest_vcpu_CORE_MODE(vcpu, core_mode); + DebugKVM("set CORE_MODE to init state: 0x%x\n", + core_mode.CORE_MODE_reg); + } + + /* TIRs num from -1: 0 it means 1 TIR */ + kvm_set_guest_vcpu_TIRs_num(vcpu, -1); + + /* Set virtual CPUs registers status to initial value */ + kvm_reset_guest_vcpu_regs_status(vcpu); +} + +void kvm_init_cpu_state_idr(struct kvm_vcpu *vcpu) +{ + e2k_idr_t idr = kvm_vcpu_get_idr(vcpu); + + kvm_set_guest_vcpu_IDR(vcpu, idr); + DebugKVM("set IDR to init state: 0x%llx\n", + idr.IDR_reg); +} + +static void vcpu_write_os_cu_hw_ctxt_to_registers(struct kvm_vcpu *vcpu, + const struct kvm_hw_cpu_context *hw_ctxt) +{ + /* + * CPU shadow context + */ + kvm_set_guest_vcpu_OSCUD_lo(vcpu, hw_ctxt->sh_oscud_lo); + kvm_set_guest_vcpu_OSCUD_hi(vcpu, hw_ctxt->sh_oscud_hi); + kvm_set_guest_vcpu_OSGD_lo(vcpu, hw_ctxt->sh_osgd_lo); + kvm_set_guest_vcpu_OSGD_hi(vcpu, hw_ctxt->sh_osgd_hi); + kvm_set_guest_vcpu_OSCUTD(vcpu, hw_ctxt->sh_oscutd); + kvm_set_guest_vcpu_OSCUIR(vcpu, hw_ctxt->sh_oscuir); + DebugSHC("initialized VCPU #%d shadow context\n" + "SH_OSCUD: base 0x%llx size 0x%x\n" + "SH_OSGD: base 0x%llx size 0x%x\n" + "CUTD: base 0x%llx\n" + "SH_OSCUTD: base 0x%llx\n" + "SH_OSCUIR: index 0x%x\n", + vcpu->vcpu_id, + hw_ctxt->sh_oscud_lo.OSCUD_lo_base, + hw_ctxt->sh_oscud_hi.OSCUD_hi_size, + hw_ctxt->sh_osgd_lo.OSGD_lo_base, + hw_ctxt->sh_osgd_hi.OSGD_hi_size, + vcpu->arch.sw_ctxt.cutd.CUTD_base, + hw_ctxt->sh_oscutd.CUTD_base, + hw_ctxt->sh_oscuir.CUIR_index); +} + +void write_hw_ctxt_to_pv_vcpu_registers(struct kvm_vcpu *vcpu, + const struct kvm_hw_cpu_context *hw_ctxt, + const struct kvm_sw_cpu_context *sw_ctxt) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + /* + * Stack registers + */ + kvm_set_guest_vcpu_PSP_lo(vcpu, hw_ctxt->sh_psp_lo); + kvm_set_guest_vcpu_PSP_hi(vcpu, hw_ctxt->sh_psp_hi); + kvm_set_guest_vcpu_PCSP_lo(vcpu, hw_ctxt->sh_pcsp_lo); + kvm_set_guest_vcpu_PCSP_hi(vcpu, hw_ctxt->sh_pcsp_hi); + + DebugSHC("initialized VCPU #%d shadow registers:\n" + "SH_PSP: base 0x%llx size 0x%x index 0x%x\n" + "SH_PCSP: base 0x%llx size 0x%x index 0x%x\n", + vcpu->vcpu_id, + hw_ctxt->sh_psp_lo.PSP_lo_base, hw_ctxt->sh_psp_hi.PSP_hi_size, + hw_ctxt->sh_psp_hi.PSP_hi_ind, hw_ctxt->sh_pcsp_lo.PCSP_lo_base, + hw_ctxt->sh_pcsp_hi.PCSP_hi_size, + hw_ctxt->sh_pcsp_hi.PCSP_hi_ind); + + kvm_set_guest_vcpu_WD(vcpu, hw_ctxt->sh_wd); + + /* + * MMU shadow context + */ + kvm_write_pv_vcpu_MMU_CR_reg(vcpu, hw_ctxt->sh_mmu_cr); + kvm_write_pv_vcpu_mmu_PID_reg(vcpu, hw_ctxt->sh_pid); + DebugSHC("initialized VCPU #%d MMU shadow context:\n" + "SH_MMU_CR: value 0x%llx\n" + "SH_PID: value 0x%llx\n" + "GP_PPTB: value 0x%llx\n" + "sh_U_PPTB: value 0x%lx\n" + "sh_U_VPTB: value 0x%lx\n" + "U_PPTB: value 0x%lx\n" + "U_VPTB: value 0x%lx\n" + "GID: value 0x%llx\n", + vcpu->vcpu_id, + hw_ctxt->sh_mmu_cr, hw_ctxt->sh_pid, + mmu->get_vcpu_gp_pptb(vcpu), + mmu->get_vcpu_context_u_pptb(vcpu), + mmu->get_vcpu_context_u_vptb(vcpu), + mmu->get_vcpu_u_pptb(vcpu), + mmu->get_vcpu_u_vptb(vcpu), + hw_ctxt->gid); + + /* + * CPU shadow context + */ + vcpu_write_os_cu_hw_ctxt_to_registers(vcpu, hw_ctxt); + + kvm_set_guest_vcpu_OSR0(vcpu, hw_ctxt->sh_osr0); + DebugSHC("SH_OSR0: value 0x%llx\n", hw_ctxt->sh_osr0); + kvm_set_guest_vcpu_CORE_MODE(vcpu, hw_ctxt->sh_core_mode); + DebugSHC("SH_CORE_MODE: value 0x%x, gmi %s, hci %s\n", + hw_ctxt->sh_core_mode.CORE_MODE_reg, + (hw_ctxt->sh_core_mode.CORE_MODE_gmi) ? "true" : "false", + (hw_ctxt->sh_core_mode.CORE_MODE_hci) ? "true" : "false"); +} + +void kvm_init_pv_vcpu_intc_handling(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->arch.intc_ctxt; + + /* MMU intercepts were handled, clear state for new intercepts */ + kvm_clear_intc_mu_state(vcpu); + + /* clear hypervisor intercept event counters */ + intc_ctxt->cu_num = -1; + intc_ctxt->mu_num = -1; + intc_ctxt->cur_mu = -1; + + /* clear guest interception TIRs & trap cellar */ + if (likely(!vcpu->arch.trap_wish)) { + kvm_clear_vcpu_intc_TIRs_num(vcpu); + regs->traps_to_guest = 0; + } else { + /* some trap(s)was (were) injected as wish to handle them */ + regs->traps_to_guest = 0; + } + + /* replace stacks->top value with real register SBR state */ + regs->stacks.top = regs->g_stacks.top; +} + +noinline __interrupt void +startup_pv_vcpu(struct kvm_vcpu *vcpu, guest_hw_stack_t *stack_regs, + unsigned flags) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_sbr_t sbr; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_cutd_t cutd; + bool is_pv_guest; + + is_pv_guest = test_thread_flag(TIF_PARAVIRT_GUEST); + + cr0_lo = stack_regs->crs.cr0_lo; + cr0_hi = stack_regs->crs.cr0_hi; + cr1_lo = stack_regs->crs.cr1_lo; + cr1_hi = stack_regs->crs.cr1_hi; + psp_lo = stack_regs->stacks.psp_lo; + psp_hi = stack_regs->stacks.psp_hi; + pcsp_lo = stack_regs->stacks.pcsp_lo; + pcsp_hi = stack_regs->stacks.pcsp_hi; + sbr.SBR_reg = stack_regs->stacks.top; + usd_lo = stack_regs->stacks.usd_lo; + usd_hi = stack_regs->stacks.usd_hi; + cutd = stack_regs->cutd; + + /* return interrupts control to PSR and disable all IRQs */ + /* disable all IRQs in UPSR to switch mmu context */ + NATIVE_RETURN_TO_KERNEL_UPSR(E2K_KERNEL_UPSR_DISABLED_ALL); + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); + + if (unlikely(!(flags & FROM_HYPERCALL_SWITCH))) { + KVM_BUG_ON(true); /* now only from hypercall */ + /* save host VCPU data stack pointer registers */ + sw_ctxt->host_sbr = NATIVE_NV_READ_SBR_REG(); + sw_ctxt->host_usd_lo = NATIVE_NV_READ_USD_LO_REG(); + sw_ctxt->host_usd_hi = NATIVE_NV_READ_USD_HI_REG(); + } + + __guest_enter(current_thread_info(), &vcpu->arch, flags); + + /* switch host MMU to VCPU MMU context */ + kvm_switch_to_guest_mmu_pid(vcpu); + + if (flags & FROM_HYPERCALL_SWITCH) { + int users; + + /* free MMU hypercall stacks */ + users = kvm_pv_put_hcall_guest_stacks(vcpu, false); + KVM_BUG_ON(users != 0); + } + KVM_BUG_ON(host_hypercall_exit(vcpu)); + + /* set guest UPSR to initial state */ + NATIVE_WRITE_UPSR_REG(sw_ctxt->upsr); + + /* + * Optimization to do not flush chain stack. + * + * Old stacks are not needed anymore, do not flush procedure + * registers and chain registers - only strip sizes + */ + NATIVE_STRIP_PSHTP_WINDOW(); + NATIVE_STRIP_PCSHTP_WINDOW(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + NATIVE_NV_WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo); + + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(cutd); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(is_pv_guest, 0); +} + +/* See at arch/include/asm/switch.h the 'flags' argument values */ +noinline __interrupt unsigned long +launch_pv_vcpu(struct kvm_vcpu *vcpu, unsigned switch_flags) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + thread_info_t *ti = current_thread_info(); + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + bool is_pv_guest = true; + + is_pv_guest = false; + + cr0_lo = sw_ctxt->crs.cr0_lo; + cr0_hi = sw_ctxt->crs.cr0_hi; + cr1_lo = sw_ctxt->crs.cr1_lo; + cr1_hi = sw_ctxt->crs.cr1_hi; + psp_lo = hw_ctxt->sh_psp_lo; + psp_hi = hw_ctxt->sh_psp_hi; + pcsp_lo = hw_ctxt->sh_pcsp_lo; + pcsp_hi = hw_ctxt->sh_pcsp_hi; + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); + + /* Switch IRQ control to PSR and disable MI/NMIs */ + /* disable all IRQs in UPSR to switch mmu context */ + NATIVE_RETURN_TO_KERNEL_UPSR(E2K_KERNEL_UPSR_DISABLED_ALL); + + /* save host VCPU data stack pointer registers */ + sw_ctxt->host_sbr = NATIVE_NV_READ_SBR_REG(); + sw_ctxt->host_usd_lo = NATIVE_NV_READ_USD_LO_REG(); + sw_ctxt->host_usd_hi = NATIVE_NV_READ_USD_HI_REG(); + + __guest_enter(ti, &vcpu->arch, switch_flags); + + /* switch host MMU to VCPU MMU context */ + kvm_switch_to_guest_mmu_pid(vcpu); + + /* from now the host process is at paravirtualized guest (VCPU) mode */ + set_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE); + + if (switch_flags & FROM_HYPERCALL_SWITCH) { + int users; + + /* free MMU hypercall stacks */ + users = kvm_pv_put_hcall_guest_stacks(vcpu, false); + KVM_BUG_ON(users != 0); + } + KVM_BUG_ON(host_hypercall_exit(vcpu)); + + NATIVE_FLUSHCPU; /* spill all host hardware stacks */ + + sw_ctxt->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + sw_ctxt->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + sw_ctxt->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + sw_ctxt->crs.cr1_hi = NATIVE_NV_READ_CR1_HI_REG(); + + E2K_WAIT_MA; /* wait for spill completion */ + + hw_ctxt->sh_psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + hw_ctxt->sh_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + hw_ctxt->sh_pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + hw_ctxt->sh_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(is_pv_guest, 0); + return 0; +} + +notrace noinline __interrupt void +pv_vcpu_switch_to_host_from_intc(thread_info_t *ti) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + + KVM_BUG_ON(vcpu == NULL); + vcpu->arch.from_pv_intc = true; + (void) switch_to_host_pv_vcpu_mode(ti, vcpu, false /* from vcpu-guest */, + FULL_CONTEXT_SWITCH | DONT_AAU_CONTEXT_SWITCH | + DONT_SAVE_KGREGS_SWITCH | DONT_MMU_CONTEXT_SWITCH | + DONT_TRAP_MASK_SWITCH); +} + +notrace noinline __interrupt void +pv_vcpu_return_to_intc_mode(thread_info_t *ti, struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(vcpu == NULL); + vcpu->arch.from_pv_intc = false; + (void) return_to_intc_pv_vcpu_mode(ti, vcpu, + FULL_CONTEXT_SWITCH | DONT_AAU_CONTEXT_SWITCH | + DONT_SAVE_KGREGS_SWITCH | DONT_MMU_CONTEXT_SWITCH | + DONT_TRAP_MASK_SWITCH); +} + +void kvm_emulate_pv_vcpu_intc(thread_info_t *ti, pt_regs_t *regs, + trap_pt_regs_t *trap) +{ + do_emulate_pv_vcpu_intc(ti, regs, trap); +} + +void return_from_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs) +{ + do_return_from_pv_vcpu_intc(ti, regs); +} + +static notrace __always_inline void inject_handler_trampoline(void) +{ + e2k_addr_t sbr; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + + if (TASK_IS_PROTECTED(current)) + DISABLE_US_CLW(); + + /* + * Switch to kernel stacks. + */ + GET_SIG_RESTORE_STACK(current_thread_info(), sbr, usd_lo, usd_hi); + NATIVE_NV_WRITE_USBR_USD_REG_VALUE(sbr, AW(usd_hi), AW(usd_lo)); + + /* + * Switch to %upsr for interrupts control + */ + DO_SAVE_UPSR_REG(current_thread_info()->upsr); + SET_KERNEL_UPSR_WITH_DISABLED_NMI(); +} + +notrace noinline __interrupt __section(".entry.text") +void trap_handler_trampoline_continue(void) +{ + /* return to hypervisor context */ + return_from_pv_vcpu_inject(current_thread_info()->vcpu); + + inject_handler_trampoline(); + E2K_JUMP(return_pv_vcpu_trap); +} + +notrace noinline __interrupt __section(".entry.text") +void syscall_handler_trampoline_continue(u64 sys_rval) +{ + struct kvm_vcpu *vcpu; + + vcpu = current_thread_info()->vcpu; + /* return to hypervisor context */ + return_from_pv_vcpu_inject(vcpu); + + inject_handler_trampoline(); + + syscall_handler_trampoline_start(vcpu, sys_rval); + + E2K_JUMP(return_pv_vcpu_syscall); +} + +notrace noinline __interrupt __section(".entry.text") +void syscall_fork_trampoline_continue(u64 sys_rval) +{ + struct kvm_vcpu *vcpu; + gthread_info_t *gti; + + vcpu = current_thread_info()->vcpu; + /* return to hypervisor context */ + return_from_pv_vcpu_inject(vcpu); + + gti = pv_vcpu_get_gti(vcpu); + KVM_BUG_ON(!is_sys_call_pt_regs(>i->fork_regs)); + gti->fork_regs.sys_rval = sys_rval; + + inject_handler_trampoline(); + + E2K_JUMP(return_pv_vcpu_syscall_fork); +} + +static void fill_pv_vcpu_handler_trampoline(struct kvm_vcpu *vcpu, + e2k_mem_crs_t *crs, inject_caller_t from) +{ + memset(crs, 0, sizeof(*crs)); + + crs->cr0_lo.CR0_lo_pf = -1ULL; + if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + crs->cr0_hi.CR0_hi_IP = (u64)syscall_handler_trampoline; + } else if (from == FROM_PV_VCPU_TRAP_INJECT) { + crs->cr0_hi.CR0_hi_IP = (u64)trap_handler_trampoline; + } else { + KVM_BUG_ON(true); + } + crs->cr1_lo.CR1_lo_psr = E2K_KERNEL_PSR_DISABLED.PSR_reg; + crs->cr1_lo.CR1_lo_cui = KERNEL_CODES_INDEX; + if (machine.native_iset_ver < E2K_ISET_V6) + crs->cr1_lo.CR1_lo_ic = 1; + if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + crs->cr1_lo.CR1_lo_wpsz = 1; + crs->cr1_lo.CR1_lo_wbs = 0; + } else if (from == FROM_PV_VCPU_TRAP_INJECT) { + crs->cr1_lo.CR1_lo_wpsz = 0; + crs->cr1_lo.CR1_lo_wbs = 0; + } else { + KVM_BUG_ON(true); + } + crs->cr1_hi.CR1_hi_ussz = pv_vcpu_get_gti(vcpu)->us_size >> 4; +} + +static void prepare_pv_vcpu_inject_handler_trampoline(struct kvm_vcpu *vcpu, + e2k_stacks_t *stacks, inject_caller_t from, + bool guest_user) +{ + e2k_mem_crs_t *k_crs, crs; + unsigned long flags; + + /* + * Prepare 'sighandler_trampoline' frame + */ + fill_pv_vcpu_handler_trampoline(vcpu, &crs, from); + + /* + * Copy the new frame into chain stack + * + * See user_hw_stacks_copy_full() for an explanation why this frame + * is located at (AS(ti->k_pcsp_lo).base). + */ + k_crs = (e2k_mem_crs_t *)current_thread_info()->k_pcsp_lo.PCSP_lo_base; + + raw_all_irq_save(flags); + E2K_FLUSHC; + /* User frame from *k_crs has been copied to userspace */ + /* already in user_hw_stacks_copy_full() */ + *k_crs = crs; + raw_all_irq_restore(flags); + + if (unlikely(stacks->pcshtp > 0)) { + /* top guest frame was spilled to memory */ + /* and is replaced at by the trampoline frame */ + stacks->pcsp_hi.PCSP_hi_ind += SZ_OF_CR; + } + DebugUST("set trampoline CRS at bottom of host stack from %px, " + "increase guest kernel chain index 0x%x\n", + k_crs, stacks->pcsp_hi.PCSP_hi_ind); +} + +static int prepare_pv_vcpu_inject_handler_frame(struct kvm_vcpu *vcpu, + pt_regs_t *regs, e2k_stacks_t *stacks, e2k_mem_crs_t *crs) +{ + thread_info_t *ti = current_thread_info(); + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + unsigned long flags; + e2k_mem_crs_t *k_crs; + long g_pcshtp; + int cui; + + /* + * Update chain stack + */ + memset(crs, 0, sizeof(*crs)); + + cui = 0; + + /* FIXME: Here it need set guest OSCUD as CUD and remember curren CUD */ + /* which can be guest user CUD + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(vcpu->arch.hw_ctxt.oscutd); + */ + + crs->cr0_lo.CR0_lo_pf = -1ULL; + crs->cr0_hi.CR0_hi_IP = kvm_get_pv_vcpu_ttable_base(vcpu); + /* real guest VCPU PSR should be as for user - nonprivileged */ + crs->cr1_lo.CR1_lo_psr = E2K_USER_INITIAL_PSR.PSR_reg; + crs->cr1_lo.CR1_lo_cui = cui; + if (machine.native_iset_ver < E2K_ISET_V6) + crs->cr1_lo.CR1_lo_ic = 0; + crs->cr1_lo.CR1_lo_wbs = 0; + crs->cr1_hi.CR1_hi_ussz = stacks->usd_hi.USD_hi_size >> 4; + + /* + * handle_sys_call() does not restore %cr registers from pt_regs + * for performance reasons, so update chain stack in memory too. + * + * See user_hw_stacks_copy_full() for an explanation why this frame + * is located at (AS(ti->k_pcsp_lo).base + SZ_OF_CR). + */ + k_crs = (e2k_mem_crs_t *)ti->k_pcsp_lo.PCSP_lo_base; + + raw_all_irq_save(flags); + E2K_FLUSHC; + *(k_crs + 1) = *crs; + raw_all_irq_restore(flags); + DebugUST("set trap handler chain frame at bottom of host stack " + "from %px and CRS at %px to return to handler instead of " + "trap point\n", + k_crs + 1, crs); + + /* See comment in user_hw_stacks_copy_full() */ + /* but guest user chain stack can be empty */ + g_pcshtp = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + KVM_BUG_ON(g_pcshtp != SZ_OF_CR && g_pcshtp != 0 && !regs->need_inject); + + return 0; +} + +static int prepare_pv_vcpu_syscall_handler_frame(struct kvm_vcpu *vcpu, + pt_regs_t *regs) +{ + e2k_stacks_t *g_stacks = ®s->g_stacks; + e2k_mem_crs_t *crs = ®s->crs; + e2k_mem_ps_t ps_frames[4]; + e2k_mem_crs_t *k_crs; + unsigned long flags, ts_flag; + void __user *u_pframe; + int arg, cui, wbs, ret; + + /* + * Update procedure stack + */ + memset(ps_frames, 0, sizeof(ps_frames)); + + for (arg = 0; arg <= 6; arg++) { + int frame = (arg * sizeof(*regs->args)) / (EXT_4_NR_SZ / 2); + bool lo = (arg & 0x1) == 0x0; + unsigned long long arg_value; + + if (arg == 0) { + arg_value = regs->sys_num; + } else { + arg_value = regs->args[arg]; + } + + if (machine.native_iset_ver < E2K_ISET_V5) { + if (lo) + ps_frames[frame].v2.word_lo = arg_value; + else + ps_frames[frame].v2.word_hi = arg_value; + /* Skip frame[2] and frame[3] - they hold */ + /* extended data not used by kernel */ + } else { + if (lo) + ps_frames[frame].v5.word_lo = arg_value; + else + ps_frames[frame].v5.word_hi = arg_value; + /* Skip frame[1] and frame[3] - they hold */ + /* extended data not used by kernel */ + } + DebugUST(" PS[%d].%s is 0x%016llx\n", + frame, (lo) ? "lo" : "hi", arg_value); + } + + u_pframe = (void __user *) (g_stacks->psp_lo.PSP_lo_base + + g_stacks->psp_hi.PSP_hi_ind); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = copy_e2k_stack_to_user(u_pframe, &ps_frames, sizeof(ps_frames), + regs); + clear_ts_flag(ts_flag); + if (ret) + return -EFAULT; + + g_stacks->psp_hi.PSP_hi_ind += sizeof(ps_frames); + DebugUST("set system call handler proc frame at bottom of guest " + "kernel stack from %px\n", + u_pframe); + + /* + * Update chain stack + */ + memset(crs, 0, sizeof(*crs)); + + cui = 0; + wbs = (sizeof(*regs->args) * 2 * 8 + (EXT_4_NR_SZ - 1)) / EXT_4_NR_SZ; + + /* FIXME: Here it need set guest OSCUD as CUD and remember curren CUD */ + /* which can be guest user CUD + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(vcpu->arch.hw_ctxt.oscutd); + */ + + crs->cr0_lo.CR0_lo_pf = -1ULL; + crs->cr0_hi.CR0_hi_IP = kvm_get_pv_vcpu_ttable_base(vcpu) + + regs->kernel_entry * + E2K_SYSCALL_TRAP_ENTRY_SIZE; + /* real guest VCPU PSR should be as for user - nonprivileged */ + crs->cr1_lo.CR1_lo_psr = E2K_USER_INITIAL_PSR.PSR_reg; + crs->cr1_lo.CR1_lo_cui = cui; + if (machine.native_iset_ver < E2K_ISET_V6) + crs->cr1_lo.CR1_lo_ic = 0; + crs->cr1_lo.CR1_lo_wpsz = 4; + crs->cr1_lo.CR1_lo_wbs = wbs; + crs->cr1_hi.CR1_hi_ussz = g_stacks->usd_hi.USD_hi_size >> 4; + + /* + * handle_sys_call() does not restore %cr registers from pt_regs + * for performance reasons, so update chain stack in memory too. + * + * See user_hw_stacks_copy_full() for an explanation why this frame + * is located at (AS(ti->k_pcsp_lo).base + SZ_OF_CR). + */ + k_crs = (e2k_mem_crs_t *)current_thread_info()->k_pcsp_lo.PCSP_lo_base; + + raw_all_irq_save(flags); + E2K_FLUSHC; + *(k_crs + 1) = *crs; + raw_all_irq_restore(flags); + DebugUST("set trap handler chain frame at bottom of host stack " + "from %px and CRS at %px to return to handler instead of " + "trap point\n", + k_crs + 1, crs); + + /* See comment in user_hw_stacks_copy_full() */ + BUG_ON(PCSHTP_SIGN_EXTEND(g_stacks->pcshtp) != SZ_OF_CR); + + return 0; +} + +/** + * setup_pv_vcpu_trap_stack - save priviliged part of interrupted + * (emulated interception mode) user context to a special privileged area + * now in user space of host VCPU process (qemu) + */ +static int setup_pv_vcpu_trap_stack(struct kvm_vcpu *vcpu, struct pt_regs *regs, + inject_caller_t from) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + struct signal_stack_context __user *context; + pv_vcpu_ctxt_t __user *vcpu_ctxt; + kvm_host_context_t *host_ctxt; + int trap_no = 0; + e2k_psr_t guest_psr; + bool irq_under_upsr; + unsigned long ts_flag; + int ret; + + host_ctxt = &vcpu->arch.host_ctxt; + if (from == FROM_PV_VCPU_TRAP_INJECT) { + trap_no = atomic_inc_return(&host_ctxt->signal.traps_num); + KVM_BUG_ON(atomic_read(&host_ctxt->signal.traps_num) <= + atomic_read(&host_ctxt->signal.in_work)); + } else if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + atomic_inc(&host_ctxt->signal.syscall_num); + KVM_BUG_ON(atomic_read(&host_ctxt->signal.syscall_num) <= + atomic_read(&host_ctxt->signal.in_syscall)); + } else { + KVM_BUG_ON(true); + } + + trace_pv_injection(from, ®s->stacks, ®s->crs, + atomic_read(&host_ctxt->signal.traps_num), + atomic_read(&host_ctxt->signal.syscall_num)); + + ret = setup_signal_stack(regs, false); + if (unlikely(ret)) { + pr_err("%s(): could not create alt stack to save context, " + "error %d\n", + __func__, ret); + return ret; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + context = get_signal_stack(); + vcpu_ctxt = &context->vcpu_ctxt; + ret = 0; + if (from == FROM_PV_VCPU_TRAP_INJECT) { + ret |= __put_user(FROM_PV_VCPU_TRAP_INJECT, + &vcpu_ctxt->inject_from); + ret |= __put_user(trap_no, &vcpu_ctxt->trap_no); + } else if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + ret |= __put_user(FROM_PV_VCPU_SYSCALL_INJECT, + &vcpu_ctxt->inject_from); + } else { + KVM_BUG_ON(true); + } + ret |= __put_user(false, &vcpu_ctxt->in_sig_handler); + + /* emulate guest VCPU PSR state after trap */ + guest_psr = kvm_emulate_guest_vcpu_psr_trap(vcpu, &irq_under_upsr); + ret |= __put_user(guest_psr.PSR_reg, &(vcpu_ctxt->guest_psr.PSR_reg)); + ret |= __put_user(irq_under_upsr, &(vcpu_ctxt->irq_under_upsr)); + + clear_ts_flag(ts_flag); + + return ret; +} + +static int setup_pv_vcpu_trap(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + bool guest_user; + int ret; + + BUG_ON(!user_mode(regs)); + BUILD_BUG_ON(E2K_ALIGN_STACK != + max(E2K_ALIGN_USTACK_SIZE, E2K_ALIGN_PUSTACK_SIZE)); + + guest_user = !pv_vcpu_trap_on_guest_kernel(regs); + regs->is_guest_user = guest_user; +#if DEBUG_PV_UST_MODE + debug_guest_ust = guest_user; +#endif + if (guest_user && !regs->g_stacks_valid) { + prepare_pv_vcpu_inject_stacks(vcpu, regs); + } + + DebugTRAP("recursive trap injection, already %d trap(s), in work %d " + "in %s mode\n", + atomic_read(&vcpu->arch.host_ctxt.signal.traps_num), + atomic_read(&vcpu->arch.host_ctxt.signal.in_work), + (guest_user) ? "user" : "kernel"); + + if (guest_user && gti->task_is_binco) + NATIVE_SAVE_RPR_REGS(regs); + + /* + * After having called setup_signal_stack() we must unroll signal + * stack by calling pop_signal_stack() in case an error happens. + */ + ret = setup_pv_vcpu_trap_stack(vcpu, regs, FROM_PV_VCPU_TRAP_INJECT); + if (ret) + return ret; + + /* + * Copy guest's part of kernel hardware stacks into user + */ + if (guest_user) { + ret = pv_vcpu_user_hw_stacks_copy_full(vcpu, regs); + } else { + ret = do_user_hw_stacks_copy_full(®s->stacks, regs, ®s->crs); + if (!ret) + AS(regs->stacks.pcsp_hi).ind += SZ_OF_CR; + } + if (ret) + goto free_signal_stack; + + /* + * We want user to return to inject_handler_trampoline so + * create fake kernel frame in user's chain stack + */ + if (guest_user) { + prepare_pv_vcpu_inject_handler_trampoline(vcpu, + ®s->g_stacks, FROM_PV_VCPU_TRAP_INJECT, true); + } else { + prepare_pv_vcpu_inject_handler_trampoline(vcpu, + ®s->stacks, FROM_PV_VCPU_TRAP_INJECT, false); + } + + /* + * guest's trap handler frame should be the last in stacks + */ + if (guest_user) { + ret = prepare_pv_vcpu_inject_handler_frame(vcpu, regs, + ®s->g_stacks, ®s->crs); + } else { + ret = prepare_pv_vcpu_inject_handler_frame(vcpu, regs, + ®s->stacks, ®s->crs); + } + if (ret) + goto free_signal_stack; + + if (guest_user) { + /* + * Set copy of kernel & host global regs ti initial state: + * kernel gregs is zeroed + * host VCPU state greg is inited by pointer to the VCPU + * interface with guest + */ + INIT_HOST_GREGS_COPY(current_thread_info(), vcpu); + } else { + /* keep the current kernel & host global registers state */ + kvm_check_vcpu_state_greg(); + } + + return 0; + +free_signal_stack: + pop_signal_stack(); + + return ret; +} + +void insert_pv_vcpu_traps(thread_info_t *ti, pt_regs_t *regs) +{ + struct kvm_vcpu *vcpu; + int failed; + int TIRs_num; + vcpu = ti->vcpu; + KVM_BUG_ON(vcpu == NULL); + + KVM_BUG_ON(!kvm_test_intc_emul_flag(regs)); + KVM_BUG_ON(vcpu->arch.sw_ctxt.in_hypercall); + + TIRs_num = kvm_get_guest_vcpu_TIRs_num(vcpu); + if (atomic_read(&vcpu->arch.host_ctxt.signal.traps_num) > 1) { + pr_debug("%s() recursive trap injection, already %d trap(s), " + "in work %d\n", + __func__, + atomic_read(&vcpu->arch.host_ctxt.signal.traps_num), + atomic_read(&vcpu->arch.host_ctxt.signal.in_work)); + if (TIRs_num >= 0) { + pr_err("%s(): guest trap handler did not have time " + "to read %d TIRs of previous injection\n", + __func__, TIRs_num); + KVM_BUG_ON(true); + } + } else { + if (TIRs_num >= 0) { + pr_err("%s(): new trap before previous TIRs read\n", + __func__); + print_all_TIRs(regs->trap->TIRs, regs->trap->nr_TIRs); + print_pt_regs(regs); + print_all_TIRs(vcpu->arch.kmap_vcpu_state-> + cpu.regs.CPU_TIRs, + TIRs_num); + do_exit(SIGKILL); + } + } + + kvm_clear_vcpu_guest_stacks_pending(vcpu, regs); + + kvm_set_pv_vcpu_SBBP_TIRs(vcpu, regs); + kvm_set_pv_vcpu_trap_cellar(vcpu); + kvm_set_pv_vcpu_trap_context(vcpu, regs); + + failed = setup_pv_vcpu_trap(vcpu, regs); + + if (failed) { + do_exit(SIGKILL); + } + +} + +static int setup_pv_vcpu_syscall(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + int ret; + + DebugTRAP("start on VCPU #%d, system call entry #%d/%d, regs at %px\n", + vcpu->vcpu_id, regs->kernel_entry, regs->sys_num, regs); + + BUG_ON(!user_mode(regs)); + regs->is_guest_user = true; + + BUILD_BUG_ON(E2K_ALIGN_STACK != + max(E2K_ALIGN_USTACK_SIZE, E2K_ALIGN_PUSTACK_SIZE)); + +#if DEBUG_PV_SYSCALL_MODE + debug_guest_ust = true; +#endif + + KVM_BUG_ON(!is_sys_call_pt_regs(regs)); + gti->fork_regs = *regs; + + if (!regs->g_stacks_valid) { + prepare_pv_vcpu_inject_stacks(vcpu, regs); + } + + /* + * After having called setup_signal_stack() we must unroll signal + * stack by calling pop_signal_stack() in case an error happens. + */ + ret = setup_pv_vcpu_trap_stack(vcpu, regs, FROM_PV_VCPU_SYSCALL_INJECT); + if (ret) + return ret; + + /* + * Copy guest's part of kernel hardware stacks into user + */ + ret = pv_vcpu_user_hw_stacks_copy_full(vcpu, regs); + if (ret) + goto free_signal_stack; + + /* + * We want user to return to inject_handler_trampoline so + * create fake kernel frame in user's chain stack + */ + prepare_pv_vcpu_inject_handler_trampoline(vcpu, ®s->g_stacks, + FROM_PV_VCPU_SYSCALL_INJECT, true); + + /* + * guest's trap handler frame should be the last in stacks + */ + ret = prepare_pv_vcpu_syscall_handler_frame(vcpu, regs); + if (ret) + goto free_signal_stack; + + /* + * Set copy of kernel & host global regs ti initial state: + * kernel gregs is zeroed + * host VCPU state greg is inited by pointer to the VCPU + * interface with guest + */ + INIT_HOST_GREGS_COPY(current_thread_info(), vcpu); + + return 0; + +free_signal_stack: + pop_signal_stack(); + + return ret; +} + +static void insert_pv_vcpu_syscall(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + int failed; + + save_pv_vcpu_sys_call_stack_regs(vcpu, regs); + + failed = setup_pv_vcpu_syscall(vcpu, regs); + + if (!failed) { +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (regs->trap && regs->trap->flags & TRAP_RP_FLAG) { + pr_err("%s(): binary compliler support is not yet " + "impleneted for trap in generations mode\n", + __func__); + KVM_BUG_ON(true); + } +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + } else { + do_exit(SIGKILL); + } + +} + +static int prepare_pv_vcpu_sigreturn_handler_trampoline(struct kvm_vcpu *vcpu, + pt_regs_t *regs, inject_caller_t from) +{ + e2k_mem_crs_t crs; + + /* + * Create 'sighandler_trampoline' chain stack frame + */ + fill_pv_vcpu_handler_trampoline(vcpu, &crs, from); + + /* + * Copy the new frame into the top of guest kernel chain stack + */ + return pv_vcpu_user_hw_stacks_copy_crs(vcpu, ®s->g_stacks, regs, + &crs); +} + +static int prepare_pv_vcpu_sigreturn_frame(struct kvm_vcpu *vcpu, + pt_regs_t *regs, unsigned long sigreturn_entry) +{ + e2k_stacks_t *g_stacks = ®s->g_stacks; + e2k_mem_crs_t *crs = ®s->crs; + int cui; + + memset(crs, 0, sizeof(*crs)); + + cui = 0; + + crs->cr0_lo.CR0_lo_pf = -1ULL; + crs->cr0_hi.CR0_hi_IP = sigreturn_entry; + /* real guest VCPU PSR should be as for user - nonprivileged */ + crs->cr1_lo.CR1_lo_psr = E2K_USER_INITIAL_PSR.PSR_reg; + crs->cr1_lo.CR1_lo_cui = cui; + if (machine.native_iset_ver < E2K_ISET_V6) + crs->cr1_lo.CR1_lo_ic = 0; + crs->cr1_lo.CR1_lo_wbs = 0; + crs->cr1_hi.CR1_hi_ussz = g_stacks->usd_hi.USD_hi_size >> 4; + + return 0; +} + +static int setup_pv_vcpu_sigreturn(struct kvm_vcpu *vcpu, + pv_vcpu_ctxt_t *vcpu_ctxt, pt_regs_t *regs) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + inject_caller_t from = vcpu_ctxt->inject_from; + unsigned long sigreturn_entry; + int ret; + + if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + DebugSIG("start on VCPU #%d, signal on system call\n", + vcpu->vcpu_id); + } else if (from == FROM_PV_VCPU_TRAP_INJECT) { + DebugSIG("start on VCPU #%d, signal on trap\n", + vcpu->vcpu_id); + } else { + KVM_BUG_ON(true); + } + + KVM_BUG_ON(!user_mode(regs)); + regs->is_guest_user = true; + + regs->g_stacks_valid = false; + prepare_pv_vcpu_inject_stacks(vcpu, regs); + + /* + * Copy guest user CRS to the bottom of guest kernel stack + * to return to trap/system call entry point + */ + ret = pv_vcpu_user_hw_stacks_copy_crs(vcpu, ®s->g_stacks, regs, + ®s->crs); + if (ret) + goto error_out; + + /* + * We want user to return to inject_handler_trampoline so + * create fake kernel frame in user's chain stack + */ + ret = prepare_pv_vcpu_sigreturn_handler_trampoline(vcpu, regs, from); + if (ret) + goto error_out; + + /* + * guest's sigreturn frame should be at on chain stack registers (crs) + */ + sigreturn_entry = vcpu_ctxt->sigreturn_entry; + ret = prepare_pv_vcpu_sigreturn_frame(vcpu, regs, sigreturn_entry); + if (ret) + goto error_out; + + return 0; + +error_out: + return ret; +} + +noinline __interrupt void +switch_to_pv_vcpu_sigreturn(struct kvm_vcpu *vcpu, e2k_stacks_t *g_stacks, + e2k_mem_crs_t *g_crs) +{ + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_sbr_t sbr; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_cutd_t cutd; + + cr0_lo = g_crs->cr0_lo; + cr0_hi = g_crs->cr0_hi; + cr1_lo = g_crs->cr1_lo; + cr1_hi = g_crs->cr1_hi; + psp_lo = g_stacks->psp_lo; + psp_hi = g_stacks->psp_hi; + pcsp_lo = g_stacks->pcsp_lo; + pcsp_hi = g_stacks->pcsp_hi; + sbr.SBR_reg = g_stacks->top; + usd_lo = g_stacks->usd_lo; + usd_hi = g_stacks->usd_hi; + cutd = vcpu->arch.hw_ctxt.sh_oscutd; + + /* return interrupts control to PSR and disable all IRQs */ + /* disable all IRQs in UPSR to switch mmu context */ + NATIVE_RETURN_TO_KERNEL_UPSR(E2K_KERNEL_UPSR_DISABLED_ALL); + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); + + __guest_enter(current_thread_info(), &vcpu->arch, 0); + + /* switch host MMU to VCPU MMU context */ + kvm_switch_to_guest_mmu_pid(vcpu); + + /* from now the host process is at paravirtualized guest (VCPU) mode */ + set_ts_flag(TS_HOST_AT_VCPU_MODE); + + /* set guest UPSR to initial state */ + NATIVE_WRITE_UPSR_REG(E2K_USER_INITIAL_UPSR); + + /* Restore guest kernel & host (vcpu state) global registers */ + HOST_RESTORE_GUEST_KERNEL_GREGS(pv_vcpu_get_gti(vcpu)); + + /* + * Optimization to do not flush chain stack. + * + * Old stacks are not needed anymore, do not flush procedure + * registers and chain registers - only strip sizes + */ + NATIVE_STRIP_PSHTP_WINDOW(); + NATIVE_STRIP_PCSHTP_WINDOW(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + NATIVE_NV_WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo); + + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(cutd); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); +} + +void insert_pv_vcpu_sigreturn(struct kvm_vcpu *vcpu, pv_vcpu_ctxt_t *vcpu_ctxt, + pt_regs_t *regs) +{ + struct signal_stack_context __user *context; + pv_vcpu_ctxt_t *u_vcpu_ctxt; + unsigned long ts_flag; + int failed; + + failed = setup_pv_vcpu_sigreturn(vcpu, vcpu_ctxt, regs); + + if (failed) + goto fault; + + /* clear flag of return from signal handler */ + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + context = get_signal_stack(); + u_vcpu_ctxt = &context->vcpu_ctxt; + failed = __put_user(false, &u_vcpu_ctxt->in_sig_handler); + clear_ts_flag(ts_flag); + + if (failed) + goto fault; + + switch_to_pv_vcpu_sigreturn(vcpu, ®s->g_stacks, ®s->crs); + +fault: + user_exit(); + do_exit(SIGKILL); +} + +/* + * The function should return bool 'is the system call from guest?' + */ +bool pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + + preempt_disable(); + + /* disable all IRQs to switch mmu context */ + raw_all_irq_disable(); + + __guest_exit(ti, &vcpu->arch, 0); + + /* return to hypervisor MMU context to emulate intercept */ + kvm_switch_to_host_mmu_pid(current->mm); + + kvm_set_intc_emul_flag(regs); + + raw_all_irq_enable(); + + preempt_enable(); + + /* replace stacks->top value with real register SBR state */ + regs->stacks.top = regs->g_stacks.top; + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_intercept); + + insert_pv_vcpu_syscall(vcpu, regs); + + return true; /* it is system call from guest */ +} + +static inline unsigned long +kvm_get_host_guest_glob_regs(struct kvm_vcpu *vcpu, + unsigned long **g_gregs, unsigned long not_get_gregs_mask, + bool dirty_bgr, unsigned int *bgr) +{ + global_regs_t gregs; + unsigned long ret = 0; + + preempt_disable(); /* to save on one CPU */ + machine.save_gregs_on_mask(&gregs, dirty_bgr, not_get_gregs_mask); + preempt_enable(); + + if (copy_to_user_with_tags(g_gregs, gregs.g, sizeof(gregs.g))) { + pr_err("%s(): could not copy global registers to user\n", + __func__); + ret = -EFAULT; + } + if (bgr != NULL) { + if (put_user(gregs.bgr.BGR_reg, bgr)) { + pr_err("%s(): could not copy BGR register to user\n", + __func__); + ret = -EFAULT; + } + } + + copy_k_gregs_to_gregs(&gregs, ¤t_thread_info()->k_gregs); + + if (ret == 0) { + DebugGREGS("get %ld global registers of guest\n", + sizeof(gregs.g) / sizeof(*gregs.g)); + } + return ret; +} + +unsigned long +kvm_get_guest_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *g_gregs[2], unsigned long not_get_gregs_mask, + bool dirty_bgr, __user unsigned int *bgr) +{ + hva_t hva; + kvm_arch_exception_t exception; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)g_gregs, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", g_gregs); + kvm_vcpu_inject_page_fault(vcpu, (void *)g_gregs, &exception); + return -EAGAIN; + } + + g_gregs = (void *)hva; + if (bgr != NULL) { + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)bgr, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", bgr); + kvm_vcpu_inject_page_fault(vcpu, (void *)bgr, + &exception); + return -EAGAIN; + } + + bgr = (void *)hva; + } + return kvm_get_host_guest_glob_regs(vcpu, g_gregs, not_get_gregs_mask, + dirty_bgr, bgr); +} + +static inline int +copy_gregs_to_guest_gregs(__user unsigned long *g_gregs[2], + struct e2k_greg *gregs, + int gregs_size) +{ + if (copy_to_user_with_tags(g_gregs, gregs, gregs_size)) { + DebugKVM("could not copy global registers used by kernel " + "to user\n"); + return -EFAULT; + } + return 0; +} + +#ifdef CONFIG_GREGS_CONTEXT +static inline int +copy_k_gregs_to_guest_gregs(__user unsigned long *g_gregs[2], + global_regs_t *k_gregs) +{ + kernel_gregs_t *kerne_gregs; + + return copy_gregs_to_guest_gregs( + &g_gregs[KERNEL_GREGS_PAIRS_START], + &k_gregs->g[KERNEL_GREGS_PAIRS_START], + sizeof(kerne_gregs->g)); +} +#else /* ! CONFIG_GREGS_CONTEXT */ +static inline int +copy_k_gregs_to_guest_gregs(__user unsigned long *g_gregs[2], + global_regs_t *k_gregs) +{ + return 0; +} +#endif /* CONFIG_GREGS_CONTEXT */ + +static inline int +copy_h_gregs_to_guest_gregs(__user unsigned long *g_gregs[2], + global_regs_t *h_gregs) +{ + host_gregs_t *host_gregs; + + return copy_gregs_to_guest_gregs( + &g_gregs[HOST_GREGS_PAIRS_START], + &h_gregs->g[HOST_GREGS_PAIRS_START], + sizeof(host_gregs->g)); +} + +static inline int +copy_kernel_gregs_to_guest_gregs(__user unsigned long *g_gregs[2], + kernel_gregs_t *k_gregs) +{ + return copy_gregs_to_guest_gregs( + &g_gregs[KERNEL_GREGS_PAIRS_START], + k_gregs->g, + sizeof(k_gregs->g)); +} + +static inline int +copy_host_gregs_to_guest_gregs(__user unsigned long *g_gregs[2], + host_gregs_t *h_gregs) +{ + return copy_gregs_to_guest_gregs( + &g_gregs[HOST_GREGS_PAIRS_START], + h_gregs->g, + sizeof(h_gregs->g)); +} + +static inline int +copy_gregs_to_guest_local_gregs(__user unsigned long *l_gregs[2], + global_regs_t *gregs) +{ + local_gregs_t *local_regs; + + if (copy_to_user_with_tags(l_gregs, &gregs->g[LOCAL_GREGS_START], + sizeof(local_regs->g))) { + DebugKVM("could not copy local global registers to user\n"); + return -EFAULT; + } + return 0; +} + +#ifdef CONFIG_GREGS_CONTEXT +static inline void +copy_guest_k_gregs_to_guest_gregs(global_regs_t *g_gregs, + kernel_gregs_t *k_gregs) +{ + kernel_gregs_t *kernel_gregs; + + tagged_memcpy_8(&g_gregs->g[KERNEL_GREGS_PAIRS_START], + &k_gregs->g[CURRENT_GREGS_PAIRS_INDEX_LO], + sizeof(kernel_gregs->g)); +} +#else /* ! CONFIG_GREGS_CONTEXT */ +static inline void +copy_guest_k_gregs_to_guest_gregs(global_regs_t *g_gregs, + kernel_gregs_t *k_gregs) +{ + return 0; +} +#endif /* CONFIG_GREGS_CONTEXT */ + +static inline void +copy_guest_h_gregs_to_guest_gregs(global_regs_t *g_gregs, + host_gregs_t *h_gregs) +{ + host_gregs_t *host_gregs; + + tagged_memcpy_8(&g_gregs->g[HOST_GREGS_PAIRS_START], + &h_gregs->g[HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO], + sizeof(host_gregs->g)); +} + +unsigned long +kvm_get_guest_local_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *u_l_gregs[2], + bool is_signal) +{ + thread_info_t *ti = current_thread_info(); + global_regs_t gregs; + unsigned long **l_gregs = u_l_gregs; + hva_t hva; + kvm_arch_exception_t exception; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)l_gregs, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", l_gregs); + kvm_vcpu_inject_page_fault(vcpu, (void *)l_gregs, &exception); + return -EAGAIN; + } + + l_gregs = (void *)hva; + preempt_disable(); /* to restore on one CPU */ + if (is_signal) + machine.save_gregs_on_mask(&gregs, + true, /* dirty BGR */ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK); + preempt_enable(); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) { + copy_guest_k_gregs_to_guest_gregs(&gregs, &ti->k_gregs); + } + return copy_gregs_to_guest_local_gregs(l_gregs, &gregs); +} + +int kvm_get_all_guest_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *g_gregs[2]) +{ + thread_info_t *ti = current_thread_info(); + unsigned long **gregs = g_gregs; + hva_t hva; + int ret; + kvm_arch_exception_t exception; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)gregs, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", g_gregs); + kvm_vcpu_inject_page_fault(vcpu, (void *)g_gregs, &exception); + return -EAGAIN; + } + + gregs = (void *)hva; + ret = kvm_get_host_guest_glob_regs(vcpu, gregs, GUEST_GREGS_MASK, + true, /* dirty BGR */ + NULL); + if (ret) + return ret; + ret = copy_kernel_gregs_to_guest_gregs(gregs, &ti->k_gregs); + ret |= copy_host_gregs_to_guest_gregs(gregs, &ti->h_gregs); + return ret; +} + +unsigned long +kvm_set_guest_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *g_gregs[2], unsigned long not_set_gregs_mask, + bool dirty_bgr, unsigned int *bgr) +{ + global_regs_t gregs; + hva_t hva; + unsigned long ret = 0; + kvm_arch_exception_t exception; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)g_gregs, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", g_gregs); + kvm_vcpu_inject_page_fault(vcpu, (void *)g_gregs, &exception); + return -EAGAIN; + } + + g_gregs = (void *)hva; + if (copy_from_user(gregs.g, g_gregs, sizeof(gregs.g))) { + DebugKVM("could not copy global registers base from user\n"); + ret = -EFAULT; + } + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)bgr, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", bgr); + kvm_vcpu_inject_page_fault(vcpu, (void *)bgr, &exception); + return -EAGAIN; + } + + bgr = (void *)hva; + if (get_user(gregs.bgr.BGR_reg, bgr)) { + DebugKVM("could not copy BGR registers from user\n"); + ret = -EFAULT; + } + get_k_gregs_from_gregs(¤t_thread_info()->k_gregs, &gregs); + + preempt_disable(); /* to restore on one CPU */ + if (ret == 0) { + machine.restore_gregs_on_mask(&gregs, dirty_bgr, + not_set_gregs_mask); + DebugKVM("set %ld global registers of guest\n", + sizeof(gregs.g) / sizeof(*gregs.g)); + } + preempt_enable(); + return ret; +} + +#ifdef CONFIG_GREGS_CONTEXT +static inline void +copy_k_gregs_from_guest_gregs(kernel_gregs_t *k_gregs, global_regs_t *g_gregs) +{ + kernel_gregs_t *kernel_gregs; + + tagged_memcpy_8(&k_gregs->g[CURRENT_GREGS_PAIRS_INDEX_LO], + &g_gregs->g[KERNEL_GREGS_PAIRS_START], + sizeof(kernel_gregs->g)); +} +#else /* ! CONFIG_GREGS_CONTEXT */ +static inline void +copy_k_gregs_from_guest_gregs(kernel_gregs_t *k_gregs, global_regs_t *g_gregs) +{ + return 0; +} +#endif /* CONFIG_GREGS_CONTEXT */ + +static inline void +copy_h_gregs_from_guest_gregs(host_gregs_t *h_gregs, global_regs_t *g_gregs) +{ + host_gregs_t *host_gregs; + + tagged_memcpy_8(&h_gregs->g[HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO], + &g_gregs->g[HOST_GREGS_PAIRS_START], + sizeof(host_gregs->g)); +} + +static inline void +copy_local_gregs_from_guest_gregs(global_regs_t *l_gregs, + global_regs_t *g_gregs) +{ + local_gregs_t *local_gregs; + + tagged_memcpy_8(&l_gregs->g[LOCAL_GREGS_START], + &g_gregs->g[LOCAL_GREGS_START], + sizeof(local_gregs->g)); +} + +static inline int +copy_guest_local_gregs_to_gregs(global_regs_t *gregs, + __user unsigned long *l_gregs[2]) +{ + local_gregs_t *local_regs; + + if (copy_from_user_with_tags(&gregs->g[LOCAL_GREGS_START], l_gregs, + sizeof(local_regs->g))) { + DebugKVM("could not copy local global registers from user\n"); + return -EFAULT; + } + return 0; +} + +int kvm_copy_guest_all_glob_regs(struct kvm_vcpu *vcpu, + global_regs_t *h_gregs, __user unsigned long *g_gregs) +{ + hva_t hva; + kvm_arch_exception_t exception; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)g_gregs, true, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", + __func__, g_gregs); + kvm_vcpu_inject_page_fault(vcpu, (void *)g_gregs, &exception); + return -EAGAIN; + } + + g_gregs = (void *)hva; + if (copy_from_user_with_tags(h_gregs->g, g_gregs, sizeof(h_gregs->g))) { + pr_err("%s(); could not copy global registers from user\n", + __func__); + return -EFAULT; + } + + return 0; +} + +unsigned long +kvm_set_guest_local_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *u_l_gregs[2], + bool is_signal) +{ + thread_info_t *ti = current_thread_info(); + global_regs_t gregs; + unsigned long **l_gregs = u_l_gregs; + hva_t hva; + int ret; + kvm_arch_exception_t exception; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)l_gregs, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", l_gregs); + kvm_vcpu_inject_page_fault(vcpu, (void *)l_gregs, &exception); + return -EAGAIN; + } + + l_gregs = (void *)hva; + ret = copy_guest_local_gregs_to_gregs(&gregs, l_gregs); + if (ret != 0) + return ret; + + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) { + copy_k_gregs_from_guest_gregs(&ti->k_gregs, &gregs); + } + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) { + copy_h_gregs_from_guest_gregs(&ti->h_gregs, &gregs); + } + preempt_disable(); /* to restore on one CPU */ + if (is_signal) + machine.restore_gregs_on_mask(&gregs, + true, /* dirty BGR */ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK); + preempt_enable(); + return ret; +} + +#ifdef CONFIG_KVM_HOST_MODE +/* It is paravirtualized host and guest kernel */ +/* or native host kernel with virtualization support */ +/* FIXME: kvm host and hypervisor features is not supported on guest mode */ +/* and all files from arch/e2k/kvm should not be compiled for guest kernel */ +/* only arch/e2k/kvm/guest/ implements guest kernel support */ +/* So this ifdef should be deleted after excluding arch/e2k/kvm compilation */ + +#define printk printk_fixed_args +#define __trace_bprintk __trace_bprintk_fixed_args +#define panic panic_fixed_args + +/* + * Return from host kernel to paravirtualized guest kernel image + * It is used to return/done/call from host kernel to guest kernel + * shadowed image (paravirtualized images of host and guest kernel). + * In this case host and guest images start from identical virtual addresses + * and it need switch from one image page table (only pgd level) to another + */ +unsigned long notrace __interrupt __to_paravirt_guest +return_to_paravirt_guest(unsigned long ret_value) +{ + thread_info_t *ti = NATIVE_READ_CURRENT_REG(); + + /* switch to guest shadow kernel image */ + if (ti->flags & _TIF_PARAVIRT_GUEST) { + *ti->kernel_image_pgd_p = ti->shadow_image_pgd; + /* guest and host kernel images are load to equal addresses */ + /* then switch from one to another must flush all caches */ + native_raw_flush_TLB_all(); + native_raw_write_back_CACHE_L12(); + } + return ret_value; +} + +/* + * Done from host kernel trap handler to paravirtualized guest kernel image + * WARNING: function should not have any CTPR and AAU based operations + */ +void notrace __interrupt __to_paravirt_guest +done_to_paravirt_guest(void) +{ + thread_info_t *ti = NATIVE_READ_CURRENT_REG(); + + /* switch to guest shadow kernel image */ + *ti->kernel_image_pgd_p = ti->shadow_image_pgd; + + /* guest and host kernel images are loaded to equal addresses */ + /* then switch from one to another must flush all caches */ + native_raw_flush_TLB_all(); + native_raw_write_back_CACHE_L12(); + + E2K_DONE(); +} + +/* + * Paravirtualized guest kernel function call from host kernel + */ +long notrace __interrupt __to_paravirt_guest +as_paravirt_guest_entry(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + char *entry_point, bool priv_guest) +{ + thread_info_t *ti; + long ret; + + ret = as_guest_entry_start(arg0, arg1, arg2, arg3, + entry_point, priv_guest); + + /* + * Guest kernel does not use global registers, so need not save + * values of global regs. Only set current pointers + * If guest kernel use global registers to support PV OPS + * (paravirtualized host and guest) then these global registers + * were saved earlier. + * Guest use one global register to support VCPU state pointer, + * this global register was saved also earlier before start guest + * kernel process (host does not use this global register). + */ + ONLY_SET_KERNEL_GREGS(NATIVE_READ_CURRENT_REG()); + + ti = current_thread_info(); + if (ti->flags & _TIF_PARAVIRT_GUEST) { + /* return to host kernel image from guest shadow */ + *ti->kernel_image_pgd_p = ti->kernel_image_pgd; + /* guest and host kernel images are load to equal addresses */ + /* then switch from one to another must flush all caches */ + native_raw_flush_TLB_all(); + native_raw_write_back_CACHE_L12(); + /* recalculate per-CPU offset after switch to host */ + /* virtual space, previous setting was based on guest image */ + /* virtual adresses */ + barrier(); /* only for compiler to complete all waitings */ + /* for flushing old guest virtual space */ + ONLY_SET_SMP_CPUS_GREGS(ti); + } + return ret; +} +long notrace __interrupt __to_guest +call_guest_ttable_entry(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, + unsigned long ttable_func) +{ + thread_info_t *ti = current_thread_info(); + unsigned long kernel_image_pgd = pgd_val(ti->kernel_image_pgd); + unsigned long *kernel_image_pgd_p; + e2k_upsr_t guest_upsr; + long ret; + + /* restore guest kernel UPSR state */ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); + + KVM_RESTORE_GUEST_KERNEL_UPSR(ti); + + ret = as_guest_ttable_entry(sys_num, arg1, arg2, arg3, arg4, arg5, arg6, + ttable_func); + + NATIVE_SWITCH_TO_KERNEL_UPSR(guest_upsr, + false, /* enable IRQs */ + false /* disable NMI */); + + /* the guest process can be scheduled and migrate to other VCPU */ + /* so host VCPU thread was changed and need update thread info */ + /* and VCPU satructures pointers */ + ti = NATIVE_READ_CURRENT_REG(); + + /* save current state of guest kernel UPSR */ + KVM_SAVE_GUEST_KERNEL_UPSR(ti, guest_upsr); + + if (ti->flags & _TIF_PARAVIRT_GUEST) { + /* return to host kernel image from guest shadow */ + /* system call (fork(), clone()) can create new mm context */ + /* or switch to other guest pgd, so update pointer */ + /* to kernel image */ + + /* reread kernel image pgd, because of ti can be changed */ + kernel_image_pgd = pgd_val(ti->kernel_image_pgd); + + kernel_image_pgd_p = (unsigned long *)ti->kernel_image_pgd_p; + *kernel_image_pgd_p = kernel_image_pgd; + + /* if guest and host kernel images load to equal addresses */ + /* then switch from one to another must flush all caches */ + native_raw_flush_TLB_all(); + native_raw_write_back_CACHE_L12(); + } + return ret; +} + +#undef printk +#undef __trace_bprintk +#undef panic + +/* + * The following functions (excluding return_to_guest_ttable_entry) are not + * used and is here only to can disassemble and help us in creation assembler + * macros GOTO_GUEST_KERNEL_TTABLE() + * see file arch/e2k/include/asm/trap_table.h + */ + + +/* trap table entry #18 is used for auxiliary codes common for host and guest */ + +void return_to_guest_ttable_entry(unsigned long ttable_func) +{ + e2k_cr1_lo_t cr1_lo; + e2k_cr0_hi_t cr0_hi; + bool priv; + e2k_psr_t psr; + + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + + priv = (ttable_func & PL_PM_MASK) ? true : false; + + AS_WORD(psr) = 0; + AS_STRUCT(psr).sge = 1; + AS_STRUCT(psr).ie = 1; /* sti(); */ + AS_STRUCT(psr).nmie = 1; /* nm sti(); */ + AS_STRUCT(psr).pm = priv; /* system/user mode */ + AS_STRUCT(cr1_lo).psr = AS_WORD(psr); + AS_STRUCT(cr0_hi).ip = ttable_func >> 3; /* start user IP */ + + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + + KVM_GOTO_RETURN_TO_PARAVIRT_GUEST(0); +} +long as_guest_ttable_entry_C(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, + unsigned long ttable_func) +{ + return_to_guest_ttable_entry(ttable_func); + return 0; +} +#else /* ! CONFIG_KVM_HOST_MODE */ +/* It is native guest kernel. */ +/* Virtualiztion in guest mode cannot be supported */ +unsigned long return_to_paravirt_guest(unsigned long ret_value) +{ + BUG_ON(true); + return 0; +} +long call_guest_ttable_entry(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, + unsigned long ttable_func) +{ + BUG_ON(true); + return 0; +} +void return_to_guest_ttable_entry(unsigned long ttable_func) +{ + BUG_ON(true); +} +#endif /* CONFIG_KVM_HOST_MODE */ + +void kvm_guest_vcpu_relax(void) +{ + yield(); /* to activate other VCPU waiting for real CPU */ +} + +int kvm_update_hw_stacks_frames(struct kvm_vcpu *vcpu, + __user e2k_mem_crs_t *u_pcs_frame, int pcs_frame_ind, + __user kernel_mem_ps_t *u_ps_frame, + int ps_frame_ind, int ps_frame_size) +{ + kernel_mem_ps_t ps_frame[KVM_MAX_PS_FRAME_NUM_TO_UPDATE]; + e2k_mem_crs_t pcs_frame; + e2k_mem_crs_t *pcs; + e2k_mem_ps_t *ps; + unsigned long flags; + bool priv_guest; + e2k_stacks_t *guest_stacks; + hva_t hva; + int ps_ind, pcs_ind; + int frame; + int ret; + kvm_arch_exception_t exception; + + if ((pcs_frame_ind & E2K_ALIGN_PSTACK_TOP_MASK) != 0) { + DebugKVM("chain stack frame ind 0x%x is not aligned\n", + pcs_frame_ind); + return -EINVAL; + } + if (ps_frame_size > KVM_MAX_PS_FRAME_SIZE_TO_UPDATE || + ((ps_frame_size & E2K_ALIGN_PSTACK_TOP_MASK) != 0)) { + DebugKVM("procedure stack frame size 0x%x is too big or not " + "aligned\n", ps_frame_size); + return -EINVAL; + } + if ((ps_frame_ind & E2K_ALIGN_PSTACK_TOP_MASK) != 0) { + DebugKVM("procedure stack frame ind 0x%x is not aligned\n", + ps_frame_ind); + return -EINVAL; + } + /* hypercalls are running on own hardware stacks */ + guest_stacks = &vcpu->arch.guest_stacks.stacks; + ps_ind = guest_stacks->psp_hi.PSP_hi_ind; + pcs_ind = guest_stacks->pcsp_hi.PCSP_hi_ind; + if (pcs_frame_ind >= pcs_ind) { + DebugKVM("chain stack frame ind 0x%x is out of current " + "stack boundaries 0x%x\n", + pcs_frame_ind, pcs_ind); + return -EINVAL; + } + if (ps_frame_ind + ps_frame_size > ps_ind) { + DebugKVM("procedure stack frame ind 0x%x and size 0x%x " + "is out of current stack boundaries 0x%x\n", + ps_frame_ind, ps_frame_size, ps_ind); + return -EINVAL; + } + ret = kvm_vcpu_copy_from_guest(vcpu, &pcs_frame, u_pcs_frame, + sizeof(pcs_frame)); + if (unlikely(ret < 0)) { + DebugKVM("copy chain stack frames from user failed, " + "maybe retried\n"); + return ret; + } + ret = kvm_vcpu_copy_from_guest(vcpu, &ps_frame, u_ps_frame, + ps_frame_size); + if (unlikely(ret < 0)) { + DebugKVM("copy procedure stack frames from user failed, " + "maybe retried\n"); + return ret; + } + + /* hardware virtualized guest runs by GLAUNCH at privileged mode */ + priv_guest = vcpu->arch.is_hv; + + raw_all_irq_save(flags); + + /* hypercalls are running on own hardware stacks */ + ps = (e2k_mem_ps_t *)guest_stacks->psp_lo.PSP_lo_base; + pcs = (e2k_mem_crs_t *)guest_stacks->pcsp_lo.PCSP_lo_base; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)ps, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", ps); + kvm_vcpu_inject_page_fault(vcpu, (void *)ps, &exception); + ret = -EAGAIN; + goto out_error; + } + + ps = (e2k_mem_ps_t *)hva; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)pcs, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", pcs); + kvm_vcpu_inject_page_fault(vcpu, (void *)pcs, &exception); + ret = -EAGAIN; + goto out_error; + } + + pcs = (e2k_mem_crs_t *)hva; + ps = &ps[ps_frame_ind / sizeof(*ps)]; + DebugKVMHSU("procedure stack frame to update: index 0x%x base %px\n", + ps_frame_ind, ps); + pcs = &pcs[pcs_frame_ind / sizeof(*pcs)]; + DebugKVMHSU("chain stack frame to update: index 0x%x base %px\n", + pcs_frame_ind, pcs); + + if (pcs->cr1_lo.CR1_lo_pm && !priv_guest) { + DebugKVM("try to update host kernel frame\n"); + ret = -EINVAL; + goto out_error; + } + if (ps_frame_size > pcs->cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ) { + DebugKVM("try to update too big procedure frame\n"); + ret = -EINVAL; + goto out_error; + } + + /* FIXME: it need use kvm_vcpu_copy_/to_guest/from_guest() */ + /* functions to update guest hw stacks frames */ + + /* now can update only IP field of chain stack registers */ + DebugKVMHSU("will update only CR0_hi IP from %pF to %pF\n", + (void *)(pcs->cr0_hi.CR0_hi_IP), + (void *)(pcs_frame.cr0_hi.CR0_hi_IP)); + pcs->cr0_hi = pcs_frame.cr0_hi; + + /* FIXME: tags are not copied */ + for (frame = 0; frame < ps_frame_size / EXT_4_NR_SZ; frame++) { + if (machine.native_iset_ver < E2K_ISET_V5) { + ps[frame].v2.word_lo = ps_frame[frame].word_lo; + ps[frame].v2.word_hi = ps_frame[frame].word_hi; + /* Skip frame[2] and frame[3] - they hold */ + /* extended data not used by kernel */ + } else { + ps[frame].v5.word_lo = ps_frame[frame].word_lo; + ps[frame].v5.word_hi = ps_frame[frame].word_hi; + /* Skip frame[1] and frame[3] - they hold */ + /* extended data not used by kernel */ + } + } + + raw_all_irq_restore(flags); + + return 0; + +out_error: + raw_all_irq_restore(flags); + return ret; +} + +int kvm_patch_guest_data_stack(struct kvm_vcpu *vcpu, + __user kvm_data_stack_info_t *u_ds_patch) +{ + thread_info_t *ti = current_thread_info(); + gthread_info_t *gti = ti->gthread_info; + kvm_data_stack_info_t ds_patch; + int ret = 0; + + ret = kvm_vcpu_copy_from_guest(vcpu, &ds_patch, u_ds_patch, + sizeof(ds_patch)); + if (unlikely(ret < 0)) { + pr_err("%s(): copy dat stack pointers patch from user " + "failed, maybe retried\n", __func__); + return ret; + } + if (gti == NULL) { + pr_err("%s(): process %s (%d) is not guest thread\n", + __func__, current->comm, current->pid); + return -EINVAL; + } + DebugKVMHSP("host guest kernel data stack bottom 0x%lx " + "top 0x%lx size 0x%lx\n", + gti->data_stack.bottom, gti->data_stack.top, + gti->data_stack.size); + DebugKVMHSP("native user data stack bottom 0x%lx top 0x%lx " + "size 0x%lx\n", + ti->u_stack.bottom, ti->u_stack.top, ti->u_stack.size); + DebugKVMHSP("current guest kernel data stack top 0x%lx " + "base 0x%llx size 0x%x\n", + gti->stack_regs.stacks.u_top, + gti->stack_regs.stacks.u_usd_lo.USD_lo_base, + gti->stack_regs.stacks.u_usd_hi.USD_hi_size); + DebugKVMHSP("patched data stack: top 0x%lx base 0x%lx size 0x%lx\n", + ds_patch.top, ds_patch.usd_base, ds_patch.usd_size); + if (ds_patch.protected) { + pr_err("%s(): patching of protected data stacks is not " + "yet implemented\n", + __func__); + return -EINVAL; + } + if (ds_patch.top < gti->data_stack.bottom || + ds_patch.top > gti->data_stack.top) { + DebugKVMHSP("top to patch is out of data stack bounderies\n"); + return -EINVAL; + } + if (ds_patch.usd_base < gti->data_stack.bottom || + ds_patch.usd_base > gti->data_stack.top) { + DebugKVMHSP("base to patch is out of data stack bounderies\n"); + return -EINVAL; + } + if (ds_patch.usd_base >= ds_patch.top) { + DebugKVMHSP("base to patch is above of top to patch\n"); + return -EINVAL; + } + if (ds_patch.usd_base - ds_patch.usd_size < gti->data_stack.bottom) { + DebugKVMHSP("base - size to patch is below of stack bottom\n"); + return -EINVAL; + } + if (ds_patch.top != gti->stack_regs.stacks.u_top) { + DebugKVMHSP("will patch top (SBR) of guest kernel data stack " + "from 0x%lx to 0x%lx\n", + gti->stack_regs.stacks.u_top, ds_patch.top); + gti->stack_regs.stacks.u_top = ds_patch.top; + } + if (ds_patch.usd_base != gti->stack_regs.stacks.u_usd_lo.USD_lo_base) { + DebugKVMHSP("will patch base of guest kernel data stack " + "from 0x%llx to 0x%lx\n", + gti->stack_regs.stacks.u_usd_lo.USD_lo_base, + ds_patch.usd_base); + gti->stack_regs.stacks.u_usd_lo.USD_lo_base = ds_patch.usd_base; + } + if (ds_patch.usd_size != gti->stack_regs.stacks.u_usd_hi.USD_hi_size) { + DebugKVMHSP("will patch size of guest kernel data stack " + "from 0x%x to 0x%lx\n", + gti->stack_regs.stacks.u_usd_hi.USD_hi_size, + ds_patch.usd_size); + gti->stack_regs.stacks.u_usd_hi.USD_hi_size = ds_patch.usd_size; + } + return ret; +} +int kvm_patch_guest_chain_stack(struct kvm_vcpu *vcpu, + __user kvm_pcs_patch_info_t u_pcs_patch[], int pcs_frames) +{ + kvm_pcs_patch_info_t pcs_patch[KVM_MAX_PCS_FRAME_NUM_TO_PATCH]; + kvm_pcs_patch_info_t *patch; + e2k_mem_crs_t *pcs, *frame; + e2k_stacks_t *guest_stacks; + e2k_addr_t pcs_base; + hva_t hva; + e2k_size_t pcs_ind, pcs_size, pcshtop; + bool priv_guest; + unsigned long flags; + int fr_no; + int ret = 0; + kvm_arch_exception_t exception; + + if (pcs_frames > KVM_MAX_PCS_FRAME_NUM_TO_PATCH || + pcs_frames < 0) { + pr_err("%s(): PCS frames number %d is too big, can only %d\n", + __func__, pcs_frames, KVM_MAX_PCS_FRAME_NUM_TO_PATCH); + return -EINVAL; + } + ret = kvm_vcpu_copy_from_guest(vcpu, pcs_patch, u_pcs_patch, + sizeof(pcs_patch[0]) * pcs_frames); + if (unlikely(ret < 0)) { + pr_err("%s(): copy chain stack frames patch from user " + "failed, maybe retried\n", __func__); + return ret; + } + + /* hardware virtualized guest runs by GLAUNCH at privileged mode */ + priv_guest = vcpu->arch.is_hv; + + raw_all_irq_save(flags); + + /* FIXME: disable stack boundaries traps to exclude resident window */ + /* change, because now is implemented patch only frames into */ + /* the resident window */ + //TODO is this needed now ??? native_reset_sge(); + + guest_stacks = &vcpu->arch.guest_stacks.stacks; + pcs_base = guest_stacks->pcsp_lo.PCSP_lo_base; + pcs_ind = guest_stacks->pcsp_hi.PCSP_hi_ind; + pcs_size = guest_stacks->pcsp_hi.PCSP_hi_size; + pcshtop = 0; + + pcs = (e2k_mem_crs_t *)pcs_base; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)pcs, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, inject page " + "fault to guest\n", pcs); + kvm_vcpu_inject_page_fault(vcpu, (void *)pcs, &exception); + ret = -EAGAIN; + goto out_error; + } + + pcs = (e2k_mem_crs_t *)hva; + for (fr_no = 0; fr_no < pcs_frames; fr_no++) { + patch = &pcs_patch[fr_no]; + DebugKVMHSP("PCS patch #%d will patch frame at ind 0x%x\n", + fr_no, patch->ind); + if (patch->ind < 0 || patch->ind >= pcs_ind + pcshtop) { + DebugKVMHSP("PCS frame ind 0x%x to patch is out of " + "chain stack boundaries\n", + patch->ind); + ret = -EINVAL; + goto out_error; + } + /* FIXME: patching of not resident part of chain stack */ + /* is not implemented */ + WARN_ONCE(1, "stacks are not resident anymore"); + if (patch->ind >= pcs_ind) { + pr_err("%s(): patching of chain stack frame ind 0x%x " + "up of current final frame ind 0x%lx\n", + __func__, patch->ind, pcs_ind); + ret = -EINVAL; + goto out_error; + } + frame = &pcs[patch->ind / sizeof(*pcs)]; + if (frame->cr1_lo.CR1_lo_pm && !priv_guest) { + pr_err("%s(): try to patch host kernel frame\n", + __func__); + ret = -EINVAL; + goto out_error; + } + if (patch->update_flags == 0) { + DebugKVMHSP("PCS frame ind 0x%x update flags empty\n", + patch->ind); + continue; + } + if (patch->update_flags & KVM_PCS_IP_UPDATE_FLAG) { + DebugKVMHSP("wiil patch IP from 0x%llx to 0x%lx at " + "PCS frame ind 0x%x\n", + frame->cr0_hi.CR0_hi_IP, patch->IP, patch->ind); + frame->cr0_hi.CR0_hi_IP = patch->IP; + } + if (patch->update_flags & KVM_PCS_USSZ_UPDATE_FLAG) { + DebugKVMHSP("wiil patch USD size from 0x%x to 0x%x " + "at PCS frame ind 0x%x\n", + frame->cr1_hi.CR1_hi_ussz, patch->usd_size >> 4, + patch->ind); + frame->cr1_hi.CR1_hi_ussz = patch->usd_size >> 4; + } + if (patch->update_flags & KVM_PCS_WBS_UPDATE_FLAG) { + DebugKVMHSP("wiil patch wbs from 0x%x to 0x%x at " + "PCS frame ind 0x%x\n", + frame->cr1_lo.CR1_lo_wbs, patch->wbs, + patch->ind); + frame->cr1_lo.CR1_lo_wbs = patch->wbs; + } + if (patch->update_flags & KVM_PCS_WPSZ_UPDATE_FLAG) { + DebugKVMHSP("wiil patch wpsz from 0x%x to 0x%x at " + "PCS frame ind 0x%x\n", + frame->cr1_lo.CR1_lo_wpsz, patch->wpsz, + patch->ind); + frame->cr1_lo.CR1_lo_wpsz = patch->wpsz; + } + } + ret = 0; + +out_error: + + /* FIXME: enabl stack boundaries traps to include resident window */ + /* change, because now is implemented patch only frames into */ + /* the resident window */ + //TODO needed?? native_set_sge(); + + raw_all_irq_restore(flags); + + return ret; +} + +int kvm_patch_guest_data_and_chain_stacks(struct kvm_vcpu *vcpu, + __user kvm_data_stack_info_t *u_ds_patch, + __user kvm_pcs_patch_info_t u_pcs_patch[], int pcs_frames) +{ + int ret = 0; + + if (u_ds_patch != NULL) + ret = kvm_patch_guest_data_stack(vcpu, u_ds_patch); + if (ret != 0) + return ret; + if (u_pcs_patch != NULL && pcs_frames != 0) + ret = kvm_patch_guest_chain_stack(vcpu, + u_pcs_patch, pcs_frames); + return ret; +} diff --git a/arch/e2k/kvm/cpu.h b/arch/e2k/kvm/cpu.h new file mode 100644 index 000000000000..9e083df80cea --- /dev/null +++ b/arch/e2k/kvm/cpu.h @@ -0,0 +1,1627 @@ +#ifndef __KVM_E2K_CPU_H +#define __KVM_E2K_CPU_H + +#include +#include +#include +#include +#include +#include + +#include "cpu_defs.h" +#include "intercepts.h" +#include "process.h" +#include "mmu_defs.h" +#include "irq.h" + +#undef DEBUG_UPDATE_HW_STACK_MODE +#undef DebugUHS +#define DEBUG_UPDATE_HW_STACK_MODE 0 /* guest hardware stacks */ + /* update debugging */ +#define DebugUHS(fmt, args...) \ +({ \ + if (DEBUG_UPDATE_HW_STACK_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_HOST_ACTIVATION_MODE +#undef DebugHACT +#define DEBUG_HOST_ACTIVATION_MODE 0 /* KVM host kernel data */ + /* stack activations */ + /* debugging */ +#define DebugHACT(fmt, args...) \ +({ \ + if (DEBUG_HOST_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_HS_MODE +#undef DebugGHS +#define DEBUG_GUEST_HS_MODE 0 /* Hard Stack expantions */ +#define DebugGHS(fmt, args...) \ +({ \ + if (DEBUG_GUEST_HS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_LONG_JUMP_MODE +#undef DebugLJMP +#define DEBUG_KVM_LONG_JUMP_MODE 0 /* long jump debug */ +#define DebugLJMP(fmt, args...) \ +({ \ + if (DEBUG_KVM_LONG_JUMP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +extern void kvm_init_cpu_state(struct kvm_vcpu *vcpu); +extern void kvm_set_vcpu_kernel_image(struct kvm_vcpu *vcpu, + char *kernel_base, unsigned long kernel_size); +extern void kvm_set_pv_vcpu_kernel_image(struct kvm_vcpu *vcpu); +extern void write_hw_ctxt_to_pv_vcpu_registers(struct kvm_vcpu *vcpu, + const struct kvm_hw_cpu_context *hw_ctxt, + const struct kvm_sw_cpu_context *sw_ctxt); +extern void init_hw_ctxt(struct kvm_vcpu *vcpu); +extern noinline __interrupt void startup_pv_vcpu(struct kvm_vcpu *vcpu, + guest_hw_stack_t *stack_regs, + unsigned flags); +extern noinline __interrupt unsigned long launch_pv_vcpu(struct kvm_vcpu *vcpu, + unsigned switch_flags); +extern void kvm_init_cpu_state_idr(struct kvm_vcpu *vcpu); +extern e2k_idr_t kvm_vcpu_get_idr(struct kvm_vcpu *vcpu); + +/* guest kernel trap table base address: ttable0 */ +extern char __kvm_pv_vcpu_ttable_entry0[]; + +static inline e2k_addr_t kvm_get_pv_vcpu_ttable_base(struct kvm_vcpu *vcpu) +{ + if (likely(vcpu->arch.is_hv || vcpu->arch.is_pv)) { + return (e2k_addr_t)vcpu->arch.trap_entry; + } else { + KVM_BUG_ON(true); + return -1UL; + } +} + +static inline void +set_pv_vcpu_u_stack_context(struct kvm_vcpu *vcpu, guest_hw_stack_t *stack_regs) +{ + kvm_sw_cpu_context_t *sw_ctxt = &vcpu->arch.sw_ctxt; + e2k_stacks_t *stacks = &stack_regs->stacks; + + sw_ctxt->sbr.SBR_reg = stacks->top; + sw_ctxt->usd_lo = stacks->usd_lo; + sw_ctxt->usd_hi = stacks->usd_hi; + + sw_ctxt->cutd = stack_regs->cutd; +} + +/* + * Only the state of the hardware virtualization bits is interesting + */ +static inline e2k_core_mode_t read_guest_CORE_MODE_reg(struct kvm_vcpu *vcpu) +{ + e2k_core_mode_t core_mode; + + core_mode.CORE_MODE_reg = 0; + + if (vcpu->arch.is_hv) { + /* register state is real actual */ + return read_SH_CORE_MODE_reg(); + } else if (vcpu->arch.is_pv) { + /* register state is not actual */ + return core_mode; + } else { + KVM_BUG_ON(true); + } + return core_mode; +} + +static inline void +write_guest_CORE_MODE_reg(struct kvm_vcpu *vcpu, e2k_core_mode_t new_reg) +{ + if (vcpu->arch.is_hv) { + /* register state is real actual */ + write_SH_CORE_MODE_reg(new_reg); + } else if (vcpu->arch.is_pv) { + /* register state is not actual, ignore */ + ; + } else { + KVM_BUG_ON(true); + } +} + +/* + * The function emulates change of guest PSR state on trap & system call. + * In these cases interrupts mask are disabled into PSR + * WARNING: 'sge' flag is disabled only on trap, but for guest kernel is need + * disable flag too to mask hardware stacks bounds traps while guest kernel + * saving user context and enabling the trap. + * Function return source state of PSR to enable recovery after 'done' + */ +static inline e2k_psr_t +kvm_emulate_guest_vcpu_psr_trap(struct kvm_vcpu *vcpu, bool *irqs_under_upsr) +{ + e2k_psr_t psr; + e2k_psr_t new_psr; + + psr = kvm_get_guest_vcpu_PSR(vcpu); + *irqs_under_upsr = kvm_get_guest_vcpu_under_upsr(vcpu); + new_psr.PSR_reg = psr.PSR_reg & ~(PSR_IE | PSR_NMIE | PSR_SGE); + new_psr.PSR_reg = new_psr.PSR_reg | PSR_PM; + kvm_set_guest_vcpu_PSR(vcpu, new_psr); + return psr; +} + +/* + * The function emulates change of guest PSR state on done from trap or + * return after system call. + * In these cases PSR state is recovered from CR1.lo by hardware. + * For guest VCPU registers state (copy into memory) only host can recover + * source state, which be saved by function above or from CR1.lo + */ +static inline void +kvm_emulate_guest_vcpu_psr_done(struct kvm_vcpu *vcpu, e2k_psr_t source_psr, + bool source_under_upsr) +{ + kvm_set_guest_vcpu_PSR(vcpu, source_psr); + kvm_set_guest_vcpu_under_upsr(vcpu, source_under_upsr); +} +static inline void +kvm_emulate_guest_vcpu_psr_return(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + e2k_psr_t source_psr; + + source_psr.PSR_reg = regs->crs.cr1_lo.CR1_lo_psr; + KVM_BUG_ON(!psr_all_irqs_enabled_flags(source_psr.PSR_reg) || + all_irqs_under_upsr_flags(source_psr.PSR_reg)); + kvm_set_guest_vcpu_PSR(vcpu, source_psr); + kvm_set_guest_vcpu_under_upsr(vcpu, false); +} + +extern int kvm_update_hw_stacks_frames(struct kvm_vcpu *vcpu, + __user e2k_mem_crs_t *u_pcs_frame, int pcs_frame_ind, + __user kernel_mem_ps_t *u_ps_frame, + int ps_frame_ind, int ps_frame_size); +extern int kvm_patch_guest_data_and_chain_stacks(struct kvm_vcpu *vcpu, + __user kvm_data_stack_info_t *u_ds_patch, + __user kvm_pcs_patch_info_t pcs_patch[], int pcs_frames); + +#ifdef CONFIG_KVM_HOST_MODE +/* It is paravirtualized host and guest kernel */ +/* or native host kernel with virtualization support */ +/* FIXME: kvm host and hypervisor features is not supported on guest mode */ +/* and all files from arch/e2k/kvm should not be compiled for guest kernel */ +/* only arch/e2k/kvm/guest/ implements guest kernel support */ +/* So this ifdef should be deleted after excluding arch/e2k/kvm compilation */ + +extern unsigned long kvm_get_guest_local_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *u_l_gregs[2], + bool is_signal); +extern unsigned long kvm_set_guest_local_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *u_l_gregs[2], + bool is_signal); +extern int kvm_copy_guest_all_glob_regs(struct kvm_vcpu *vcpu, + global_regs_t *h_gregs, __user unsigned long *g_gregs); +extern int kvm_get_all_guest_glob_regs(struct kvm_vcpu *vcpu, + __user unsigned long *g_gregs[2]); +extern long as_guest_entry_start(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + char *entry_point, bool priv_guest); +extern long as_paravirt_guest_entry(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + char *entry_point, bool priv_guest); +extern long as_guest_ttable_entry(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, + unsigned long ttable_func); + +#else /* ! CONFIG_KVM_HOST_MODE */ +/* It is native guest kernel. */ +/* Virtualiztion in guest mode cannot be supported */ +static inline long +as_guest_entry_start(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + char *entry_point, bool priv_guest) +{ + return 0; +} +static inline long +as_paravirt_guest_entry(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + char *entry_point, bool priv_guest) +{ + return 0; +} +#endif /* CONFIG_KVM_HOST_MODE */ + +static inline int get_pv_vcpu_traps_num(struct kvm_vcpu *vcpu) +{ + kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt; + + return atomic_read(&host_ctxt->signal.traps_num); +} + +static inline int get_pv_vcpu_pre_trap_gener(struct kvm_vcpu *vcpu) +{ + return get_pv_vcpu_traps_num(vcpu) - 1; +} + +static inline int get_pv_vcpu_post_trap_gener(struct kvm_vcpu *vcpu) +{ + return get_pv_vcpu_traps_num(vcpu); +} + +static inline vcpu_l_gregs_t *get_new_pv_vcpu_l_gregs(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + vcpu_l_gregs_t *l_gregs = >i->l_gregs; + int gener; + + gener = get_pv_vcpu_pre_trap_gener(vcpu); + KVM_BUG_ON(gener < 0); + + if (likely(l_gregs->valid)) { + /* there is valid gregs */ + if (likely(l_gregs->gener == gener)) { + /* gregs is actual for use */ + return l_gregs; + } + /* gregs is from other trap generation and not actual here */ + return NULL; + } + + /* make current generation as actual */ + KVM_BUG_ON(l_gregs->updated != 0); + l_gregs->gener = gener; + l_gregs->valid = true; + return l_gregs; +} + +static inline bool is_actual_pv_vcpu_l_gregs(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + vcpu_l_gregs_t *l_gregs = >i->l_gregs; + int gener; + + if (likely(l_gregs->valid)) { + /* there is valid gregs */ + gener = get_pv_vcpu_post_trap_gener(vcpu); + KVM_BUG_ON(gener < 0); + if (likely(l_gregs->gener == gener)) { + /* gregs is actual for use */ + return true; + } + } + return false; +} + +static inline vcpu_l_gregs_t *get_actual_pv_vcpu_l_gregs(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + vcpu_l_gregs_t *l_gregs = >i->l_gregs; + + if (!is_actual_pv_vcpu_l_gregs(vcpu)) { + /* there is not actual gregs */ + return NULL; + } + return l_gregs; +} + +static inline void init_pv_vcpu_l_gregs(gthread_info_t *gti) +{ + vcpu_l_gregs_t *l_gregs = >i->l_gregs; + + /* invalidate current generation */ + l_gregs->updated = 0; + l_gregs->gener = -1; + l_gregs->valid = false; +} + +static inline void put_pv_vcpu_l_gregs(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + vcpu_l_gregs_t *l_gregs = >i->l_gregs; + + KVM_BUG_ON(!is_actual_pv_vcpu_l_gregs(vcpu)); + + /* invalidate current generation */ + KVM_BUG_ON(l_gregs->updated != 0); + l_gregs->gener = -1; + l_gregs->valid = false; +} + +static __always_inline void +do_emulate_pv_vcpu_intc(thread_info_t *ti, pt_regs_t *regs, + trap_pt_regs_t *trap) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + + __guest_exit(ti, &vcpu->arch, DONT_AAU_CONTEXT_SWITCH); + + /* return to hypervisor MMU context to emulate hw intercept */ + kvm_switch_to_host_mmu_pid(current->mm); + + kvm_set_intc_emul_flag(regs); + + kvm_init_pv_vcpu_intc_handling(vcpu, regs); + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_intercept); +} + +static notrace __always_inline void +return_from_pv_vcpu_inject(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(!test_and_clear_ts_flag(TS_HOST_AT_VCPU_MODE)); + + /* return to hypervisor context */ + __guest_exit(current_thread_info(), &vcpu->arch, 0); + /* return to hypervisor MMU context */ + kvm_switch_to_host_mmu_pid(current->mm); +} + +static __always_inline void +do_return_from_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); + + __guest_enter(ti, &vcpu->arch, DONT_AAU_CONTEXT_SWITCH); + + /* switch host MMU to guest VCPU MMU context */ + kvm_switch_to_guest_mmu_pid(vcpu); + + /* from now the host process is at paravirtualized guest (VCPU) mode */ + set_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE); +} + +static __always_inline void +trap_handler_trampoline_finish(struct kvm_vcpu *vcpu, + pv_vcpu_ctxt_t *vcpu_ctxt, kvm_host_context_t *host_ctxt) +{ + KVM_BUG_ON(vcpu_ctxt->inject_from != FROM_PV_VCPU_TRAP_INJECT); + + KVM_BUG_ON(atomic_read(&host_ctxt->signal.traps_num) <= 0); + KVM_BUG_ON(atomic_read(&host_ctxt->signal.traps_num) != + atomic_read(&host_ctxt->signal.in_work)); + KVM_BUG_ON(vcpu_ctxt->trap_no != + atomic_read(&host_ctxt->signal.traps_num)); + + /* emulate restore of guest VCPU PSR state after done */ + kvm_emulate_guest_vcpu_psr_done(vcpu, vcpu_ctxt->guest_psr, + vcpu_ctxt->irq_under_upsr); + + /* decrement number of handled recursive traps */ + atomic_dec(&host_ctxt->signal.traps_num); + atomic_dec(&host_ctxt->signal.in_work); +} + +static notrace __always_inline void +syscall_handler_trampoline_start(struct kvm_vcpu *vcpu, u64 sys_rval) +{ + struct signal_stack_context __user *context; + pv_vcpu_ctxt_t __user *vcpu_ctxt; + kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt; + bool in_sig_handler; + unsigned long ts_flag; + int ret; + + context = get_signal_stack(); + vcpu_ctxt = &context->vcpu_ctxt; + + KVM_BUG_ON(atomic_read(&host_ctxt->signal.syscall_num) <= 0); + KVM_BUG_ON(atomic_read(&host_ctxt->signal.syscall_num) != + atomic_read(&host_ctxt->signal.in_syscall)); + /* FIXME: the follow checker is correct only without */ + /* support of guest user signal handlers + KVM_BUG_ON(atomic_read(&host_ctxt->signal.traps_num) > 0); + */ + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(in_sig_handler, &vcpu_ctxt->in_sig_handler); + if (ret) { + clear_ts_flag(ts_flag); + user_exit(); + do_exit(SIGKILL); + } + if (likely(!in_sig_handler)) { + /* signal handler should not change system call return value */ + ret = __put_user(sys_rval, &vcpu_ctxt->sys_rval); + } + clear_ts_flag(ts_flag); + if (ret) { + user_exit(); + do_exit(SIGKILL); + } + + if (likely(!in_sig_handler)) { + atomic_dec(&host_ctxt->signal.syscall_num); + atomic_dec(&host_ctxt->signal.in_syscall); + } else { + /* signals are handling before return from system call & trap */ + ; + } +} + +static __always_inline void +syscall_handler_trampoline_finish(struct kvm_vcpu *vcpu, pt_regs_t *regs, + pv_vcpu_ctxt_t *vcpu_ctxt, kvm_host_context_t *host_ctxt) +{ + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + + KVM_BUG_ON(vcpu_ctxt->inject_from != FROM_PV_VCPU_SYSCALL_INJECT); + KVM_BUG_ON(atomic_read(&host_ctxt->signal.syscall_num) != + atomic_read(&host_ctxt->signal.in_syscall)); + + /* emulate restore of guest VCPU PSR state after return from syscall */ + kvm_emulate_guest_vcpu_psr_done(vcpu, vcpu_ctxt->guest_psr, + vcpu_ctxt->irq_under_upsr); +} + +extern long call_guest_ttable_entry(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, + unsigned long ttable_func); + +extern unsigned long kvm_get_guest_glob_regs(struct kvm_vcpu *vcpu, + unsigned long *g_gregs[2], unsigned long not_get_gregs_mask, + bool keep_bgr, unsigned int *bgr); +extern unsigned long kvm_set_guest_glob_regs(struct kvm_vcpu *vcpu, + unsigned long *g_gregs[2], unsigned long not_set_gregs_mask, + bool dirty_bgr, unsigned int *bgr); + +static inline void +save_pv_vcpu_sys_call_stack_regs(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + e2k_pcsp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + + kvm_set_guest_vcpu_USD_hi(vcpu, regs->stacks.usd_hi); + kvm_set_guest_vcpu_USD_lo(vcpu, regs->stacks.usd_lo); + kvm_set_guest_vcpu_SBR(vcpu, regs->stacks.top); + + kvm_set_guest_vcpu_CR0_hi(vcpu, regs->crs.cr0_hi); + kvm_set_guest_vcpu_CR0_lo(vcpu, regs->crs.cr0_lo); + kvm_set_guest_vcpu_CR1_hi(vcpu, regs->crs.cr1_hi); + kvm_set_guest_vcpu_CR1_lo(vcpu, regs->crs.cr1_lo); + kvm_set_guest_vcpu_WD(vcpu, regs->wd); + + /* regs.PSP.ind has been increased by PSHTP value, so decrement here */ + psp_hi = regs->stacks.psp_hi; + pshtp = regs->stacks.pshtp; + KVM_BUG_ON(psp_hi.PSP_hi_ind < GET_PSHTP_MEM_INDEX(pshtp)); + psp_hi.PSP_hi_ind -= GET_PSHTP_MEM_INDEX(pshtp); + kvm_set_guest_vcpu_PSP_hi(vcpu, psp_hi); + kvm_set_guest_vcpu_PSP_lo(vcpu, regs->stacks.psp_lo); + kvm_set_guest_vcpu_PSHTP(vcpu, pshtp); + + /* regs.PCSP.ind has been increased by PCSHTP value, ro decrement */ + pcsp_hi = regs->stacks.pcsp_hi; + pcshtp = regs->stacks.pcshtp; + KVM_BUG_ON(pcsp_hi.PCSP_hi_ind < PCSHTP_SIGN_EXTEND(pcshtp)); + pcsp_hi.PCSP_hi_ind -= PCSHTP_SIGN_EXTEND(pcshtp); + kvm_set_guest_vcpu_PCSP_hi(vcpu, pcsp_hi); + kvm_set_guest_vcpu_PCSP_lo(vcpu, regs->stacks.pcsp_lo); + kvm_set_guest_vcpu_PCSHTP(vcpu, pcshtp); + + /* set UPSR as before trap */ + kvm_set_guest_vcpu_UPSR(vcpu, current_thread_info()->upsr); +} + +static inline void save_guest_sys_call_stack_regs(struct kvm_vcpu *vcpu, + e2k_usd_lo_t usd_lo, e2k_usd_hi_t usd_hi, + e2k_addr_t sbr) +{ + NATIVE_FLUSHCPU; + NATIVE_FLUSHCPU; + + kvm_set_guest_vcpu_WD(vcpu, NATIVE_READ_WD_REG()); + kvm_set_guest_vcpu_USD_hi(vcpu, usd_hi); + kvm_set_guest_vcpu_USD_lo(vcpu, usd_lo); + kvm_set_guest_vcpu_SBR(vcpu, sbr); + + kvm_set_guest_vcpu_PSHTP(vcpu, NATIVE_NV_READ_PSHTP_REG()); + kvm_set_guest_vcpu_CR0_hi(vcpu, NATIVE_NV_READ_CR0_HI_REG()); + kvm_set_guest_vcpu_CR0_lo(vcpu, NATIVE_NV_READ_CR0_LO_REG()); + kvm_set_guest_vcpu_CR1_hi(vcpu, NATIVE_NV_READ_CR1_HI_REG()); + kvm_set_guest_vcpu_CR1_lo(vcpu, NATIVE_NV_READ_CR1_LO_REG()); + + E2K_WAIT_ALL; + kvm_set_guest_vcpu_PSP_hi(vcpu, NATIVE_NV_READ_PSP_HI_REG()); + kvm_set_guest_vcpu_PSP_lo(vcpu, NATIVE_NV_READ_PSP_LO_REG()); + kvm_set_guest_vcpu_PCSP_hi(vcpu, NATIVE_NV_READ_PCSP_HI_REG()); + kvm_set_guest_vcpu_PCSP_lo(vcpu, NATIVE_NV_READ_PCSP_LO_REG()); +} + +static inline void +save_guest_sys_call_user_regs(struct kvm_vcpu *vcpu, gthread_info_t *gti) +{ + GTI_BUG_ON(!gti->u_upsr_valid); + kvm_set_guest_vcpu_UPSR(vcpu, gti->u_upsr); +} + +static inline void restore_guest_sys_call_stack_regs(thread_info_t *ti, + struct kvm_vcpu *vcpu, + e2k_usd_lo_t usd_lo, e2k_usd_hi_t usd_hi, + e2k_addr_t sbr_base) +{ + unsigned long regs_status = kvm_get_guest_vcpu_regs_status(vcpu); + bool hw_frame_updated = false; + gthread_info_t *gti; + gpt_regs_t *gregs; + gpt_regs_t *prev_gregs; + struct task_struct *task; + e2k_pcsp_lo_t new_pcsp_lo; + e2k_pcsp_hi_t new_pcsp_hi; + e2k_pcsp_lo_t cur_pcsp_lo; + e2k_pcsp_hi_t cur_pcsp_hi; + e2k_pcshtp_t cur_pcshtp; + e2k_size_t new_ind; + e2k_size_t cur_ind; + e2k_sbr_t sbr; + + sbr.SBR_reg = 0; + sbr.SBR_base = sbr_base; + + if (!KVM_TEST_UPDATED_CPU_REGS_FLAGS(regs_status)) { + NATIVE_NV_WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo); + return; + } + + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, WD_UPDATED_CPU_REGS)) { + e2k_wd_t wd = NATIVE_READ_WD_REG(); + wd.WD_psize = kvm_get_guest_vcpu_WD(vcpu).WD_psize; + NATIVE_WRITE_WD_REG(wd); + } + /* FIXME: only to debug info print follow external if-statement */ + if (!(DEBUG_HOST_ACTIVATION_MODE || DEBUG_GREGS_MODE || DEBUG_GTI)) { + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, + USD_UPDATED_CPU_REGS)) { + NATIVE_NV_WRITE_USBR_USD_REG(kvm_get_guest_vcpu_SBR(vcpu), + kvm_get_guest_vcpu_USD_hi(vcpu), + kvm_get_guest_vcpu_USD_lo(vcpu)); + } else { + NATIVE_NV_WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo); + } + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, + HS_REGS_UPDATED_CPU_REGS)) { + cur_pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + cur_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + cur_pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); + NATIVE_STRIP_PSHTP_WINDOW(); + NATIVE_STRIP_PCSHTP_WINDOW(); + NATIVE_NV_WRITE_PSP_REG(kvm_get_guest_vcpu_PSP_hi(vcpu), + kvm_get_guest_vcpu_PSP_lo(vcpu)); + new_pcsp_lo = kvm_get_guest_vcpu_PCSP_lo(vcpu); + new_pcsp_hi = kvm_get_guest_vcpu_PCSP_hi(vcpu); + NATIVE_NV_WRITE_PCSP_REG(new_pcsp_hi, new_pcsp_lo); + if (cur_pcsp_lo.PCSP_lo_base != new_pcsp_lo.PCSP_lo_base || + (cur_pcsp_hi.PCSP_hi_ind + cur_pcshtp) != + new_pcsp_hi.PCSP_hi_ind) { + hw_frame_updated = true; + } + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, CRS_UPDATED_CPU_REGS)) { + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG( + kvm_get_guest_vcpu_CR0_hi(vcpu)); + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG( + kvm_get_guest_vcpu_CR0_lo(vcpu)); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG( + kvm_get_guest_vcpu_CR1_hi(vcpu)); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG( + kvm_get_guest_vcpu_CR1_lo(vcpu)); + } + kvm_reset_guest_updated_vcpu_regs_flags(vcpu, regs_status); + if (!hw_frame_updated) + /* FIXME: only to debug info print, change to return */ + goto debug_exit; + /* FIXME: only to debug, should be deleted */ + E2K_SET_USER_STACK(DEBUG_HOST_ACTIVATION_MODE || DEBUG_GTI); + + /* + * Hardware stack frame has been updated, it can be long jump, + * so host needs restore own thread and stacks state, including + * guest kernel stacks state + */ + task = thread_info_task(ti); + gti = ti->gthread_info; + GTI_BUG_ON(gti == NULL); + new_ind = new_pcsp_hi.PCSP_hi_ind; + DebugHACT("new chain stack base 0x%llx index 0x%x size 0x%x\n", + new_pcsp_lo.PCSP_lo_base, new_pcsp_hi.PCSP_hi_ind, + new_pcsp_hi.PCSP_hi_size); + DebugHACT("data stack state: guest #%d usd size 0x%x, " + "host #%d usd size 0x%x\n", + gti->g_stk_frame_no, gti->stack_regs.stacks.usd_hi.USD_hi_size, + gti->k_stk_frame_no, ti->k_usd_hi.USD_hi_size); + GTI_BUG_ON(new_pcsp_lo.PCSP_lo_base < + (u64)GET_PCS_BASE(>i->hw_stacks) || + new_pcsp_lo.PCSP_lo_base + new_pcsp_hi.PCSP_hi_ind >= + (u64)GET_PCS_BASE(>i->hw_stacks) + + gti->hw_stacks.pcs.size); + gregs = get_gpt_regs(ti); + if (gregs == NULL) { + /* none host activations, so nothing update */ + /* it can be direct long jump from user without any trap and */ + /* signal handler: */ + /* user user -> syscall -> long jump + */ + /* ^ | */ + /* | | */ + /* +--------------------------------+ */ + GTI_BUG_ON(ti->pt_regs != NULL); + DebugHACT("none any guest pt_regs structure, so noting " + "update\n"); + /* FIXME: only to debug info print, change to return */ + goto debug_exit; + } + prev_gregs = NULL; + do { + DebugHACT("current activation type %d guest #%d usd size 0x%lx, host #%d usd size 0x%lx, chain stack index 0x%lx\n", + gregs->type, gregs->g_stk_frame_no, gregs->g_usd_size, + gregs->k_stk_frame_no, gregs->k_usd_size, + gregs->pcsp_ind); + DebugHACT("current thread state: pt_regs %px\n", + gregs->pt_regs); + cur_ind = gregs->pcsp_ind; + if (cur_ind < new_ind) { + /* current activation is the nearest to jump point */ + /* and is below of this point */ + DebugHACT("the activation is the nearest and below to " + "jump point, use prev to update\n"); + break; + } + prev_gregs = gregs; + delete_gpt_regs(ti); + gregs = get_gpt_regs(ti); + } while (gregs); + if (prev_gregs) { + struct pt_regs *regs; + + /* restore state of host thread at the find point */ + DO_RESTORE_KVM_KERNEL_STACKS_STATE(ti, gti, prev_gregs); + regs = ti->pt_regs; + if (regs == NULL) { + /* none host activations, so nothing update */ + /* it can be direct long jump from user into */ + /* system call from signal handler: */ + /* user user -> syscall -> */ + /* signal handler -> long jump + */ + /* ^ | */ + /* | | */ + /* +------------------------------------------+ */ + ; + } else { + ti->pt_regs = regs->next; + } + } else { + DebugHACT("none activation the nearest and above to " + "jump point, so do not update state\n"); + } + DebugHACT("new data stack state: guest #%d usd size 0x%x, " + "host #%d usd size 0x%x\n", + gti->g_stk_frame_no, gti->stack_regs.stacks.usd_hi.USD_hi_size, + gti->k_stk_frame_no, ti->k_usd_hi.USD_hi_size); +debug_exit: + if ((DEBUG_HOST_ACTIVATION_MODE || DEBUG_GREGS_MODE || DEBUG_GTI)) { + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, + USD_UPDATED_CPU_REGS)) { + NATIVE_NV_WRITE_USBR_USD_REG(kvm_get_guest_vcpu_SBR(vcpu), + kvm_get_guest_vcpu_USD_hi(vcpu), + kvm_get_guest_vcpu_USD_lo(vcpu)); + } else { + NATIVE_NV_WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo); + } + } +} + +static __always_inline void +kvm_pv_clear_hcall_host_stacks(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hypv_backup.users = 0; + vcpu->arch.guest_stacks.valid = false; +} + +/* interrupts/traps should be disabled by caller */ +static __always_inline int +kvm_pv_switch_to_hcall_host_stacks(struct kvm_vcpu *vcpu) +{ + bu_hw_stack_t *hypv_backup; + guest_hw_stack_t *guest_stacks; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + int users; + + users = (++vcpu->arch.hypv_backup.users); + if (users > 1) { + /* already on hypercall (hypervisor) stacks */ + KVM_BUG_ON(!vcpu->arch.guest_stacks.valid); + /* FIXME: it need check PSP/PCSP point to that stacks */ + return users; + } + KVM_BUG_ON(vcpu->arch.guest_stacks.valid); + + NATIVE_FLUSHCPU; + + /* These will wait for the flush so we give + * the flush some time to finish. */ + + hypv_backup = &vcpu->arch.hypv_backup; + guest_stacks = &vcpu->arch.guest_stacks; + psp_lo = hypv_backup->psp_lo; + psp_hi = hypv_backup->psp_hi; + pcsp_lo = hypv_backup->pcsp_lo; + pcsp_hi = hypv_backup->pcsp_hi; + + E2K_WAIT_MA; /* wait for spill completion */ + + guest_stacks->stacks.psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + guest_stacks->stacks.psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + guest_stacks->stacks.pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + guest_stacks->stacks.pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + + /* the follow info only to correctly dump guest stack */ + guest_stacks->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + guest_stacks->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + guest_stacks->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + guest_stacks->crs.cr1_hi = NATIVE_NV_READ_CR1_HI_REG(); + + /* guest pointers are actual */ + guest_stacks->valid = true; + + E2K_WAIT_ST; /* wait for all hardware stacks registers saving */ + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + return users; +} + +/* interrupts/traps should be disabled by caller */ +static __always_inline int +kvm_pv_restore_hcall_guest_stacks(struct kvm_vcpu *vcpu) +{ + guest_hw_stack_t *guest_stacks; + u64 pshtp; + u32 pcshtp; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + int users; + + KVM_BUG_ON(!vcpu->arch.guest_stacks.valid); + users = (--vcpu->arch.hypv_backup.users); + if (users != 0) { + /* there are some other users of hypervisor stacks */ + /* so it need remain on that stacks */ + /* FIXME: it need check PSP/PCSP point to that stacks */ + return users; + } + + /* host hardware stacks should be empty before return from HCALL */ + pshtp = GET_PSHTP_MEM_INDEX(NATIVE_NV_READ_PSHTP_REG()); + pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); + BUG_ON(pshtp != 0); + BUG_ON(pcshtp != 0); + + guest_stacks = &vcpu->arch.guest_stacks; + psp_lo = guest_stacks->stacks.psp_lo; + psp_hi = guest_stacks->stacks.psp_hi; + pcsp_lo = guest_stacks->stacks.pcsp_lo; + pcsp_hi = guest_stacks->stacks.pcsp_hi; + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + /* pointers are not more actual */ + guest_stacks->valid = false; + + E2K_WAIT_ALL_OP; /* wait for registers restore completion */ + + return users; +} + +/* interrupts/traps should be disabled by caller */ +static __always_inline int +kvm_pv_put_hcall_guest_stacks(struct kvm_vcpu *vcpu, bool should_be_empty) +{ + u64 pshtp; + u32 pcshtp; + int users; + + KVM_WARN_ON(!vcpu->arch.guest_stacks.valid); + users = (--vcpu->arch.hypv_backup.users); + if (users != 0) { + /* there are some other users of hypervisor stacks */ + /* so it need remain on that stacks */ + /* FIXME: it need check PSP/PCSP point to that stacks */ + return users; + } + + if (should_be_empty) { + /* host hardware stacks should be empty before free stacks */ + pshtp = GET_PSHTP_MEM_INDEX(NATIVE_NV_READ_PSHTP_REG()); + pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); + BUG_ON(pshtp != 0); + BUG_ON(pcshtp != 0); + } + + /* pointers are not more actual */ + vcpu->arch.guest_stacks.valid = false; + + return users; +} + +static __always_inline bool +kvm_pv_is_vcpu_on_hcall_host_stacks(struct kvm_vcpu *vcpu) +{ + /* hypercall is running on host stacks */ + return vcpu->arch.guest_stacks.valid && + (vcpu->arch.hypv_backup.users != 0); +} + +/* interrupts/traps should be disabled by caller */ +static __always_inline int +kvm_pv_switch_to_host_stacks(struct kvm_vcpu *vcpu) +{ + /* hypercall stacks are now used for hypervisor handlers */ + return kvm_pv_switch_to_hcall_host_stacks(vcpu); +} + +/* interrupts/traps should be disabled by caller */ +static __always_inline int +kvm_pv_switch_to_guest_stacks(struct kvm_vcpu *vcpu) +{ + /* hypercall stacks are now used for hypervisor handlers */ + return kvm_pv_restore_hcall_guest_stacks(vcpu); +} + +static __always_inline bool +kvm_pv_is_vcpu_on_host_stacks(struct kvm_vcpu *vcpu) +{ + /* hypercall stacks are now used for hypervisor handlers */ + return kvm_pv_is_vcpu_on_hcall_host_stacks(vcpu); +} + +static __always_inline bool +pv_vcpu_syscall_in_user_mode(struct kvm_vcpu *vcpu) +{ + pt_regs_t *regs = current_thread_info()->pt_regs; + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + kvm_host_context_t *host_ctxt; + + host_ctxt = &vcpu->arch.host_ctxt; + return !(test_gti_thread_flag(gti, GTIF_KERNEL_THREAD) || + pv_vcpu_trap_on_guest_kernel(regs)); +} + +static __always_inline bool +pv_vcpu_trap_in_user_mode(struct kvm_vcpu *vcpu) +{ + pt_regs_t *regs = current_thread_info()->pt_regs; + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + kvm_host_context_t *host_ctxt; + + host_ctxt = &vcpu->arch.host_ctxt; + return !(test_gti_thread_flag(gti, GTIF_KERNEL_THREAD) || + pv_vcpu_trap_on_guest_kernel(regs)); +} + +static __always_inline bool +kvm_inject_vcpu_exit(struct kvm_vcpu *vcpu) +{ + WARN_ON(vcpu->arch.vm_exit_wish); + vcpu->arch.vm_exit_wish = true; + return true; +} + +static __always_inline bool +kvm_is_need_inject_vcpu_exit(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vm_exit_wish; +} + +static __always_inline void +kvm_inject_guest_traps_wish(struct kvm_vcpu *vcpu, int trap_no) +{ + unsigned long trap_mask = 0; + + trap_mask |= (1UL << trap_no); + vcpu->arch.trap_mask_wish |= trap_mask; + vcpu->arch.trap_wish = true; +} + +static __always_inline bool +kvm_is_need_inject_guest_traps(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.trap_wish; +} + +static __always_inline bool +kvm_try_inject_event_wish(struct kvm_vcpu *vcpu, struct thread_info *ti, + unsigned long upsr, unsigned long psr) +{ + bool need_inject = false; + + /* probably need inject VM exit to handle exit reason by QEMU */ + need_inject |= kvm_is_need_inject_vcpu_exit(vcpu); + + if (atomic_read(&vcpu->arch.host_ctxt.signal.traps_num) > 1) { + /* there is (are) already traps to handle by guest */ + /* FIXME: interrupts probavly can be added to guest TIRs, */ + /* if guest did not yet read its */ + goto out; + } + /* probably need inject some traps */ + need_inject |= kvm_is_need_inject_guest_traps(vcpu); + /* probably need inject virtual interrupts */ + need_inject |= kvm_try_inject_direct_guest_virqs(vcpu, ti, upsr, psr); + +out: + return need_inject; +} + +/* See at arch/include/asm/switch.h the 'switch_flags' argument values */ +static __always_inline __interrupt unsigned long +switch_to_host_pv_vcpu_mode(thread_info_t *ti, struct kvm_vcpu *vcpu, + bool from_hypercall, unsigned switch_flags) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + + if (from_hypercall) { + KVM_BUG_ON(!test_and_clear_ti_status_flag(ti, + TS_HOST_AT_VCPU_MODE)); + __guest_exit(ti, &vcpu->arch, switch_flags); + /* return to hypervisor MMU context to emulate hw intercept */ + kvm_switch_to_host_mmu_pid(thread_info_task(ti)->mm); + } else { + /* switch from interception emulation mode to host vcpu mode */ + KVM_BUG_ON(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)); + __guest_exit(ti, &vcpu->arch, switch_flags); + } + + /* restore host VCPU data stack pointer registers */ + if (!from_hypercall) { + usd_lo.USD_lo_half = NATIVE_NV_READ_USD_LO_REG_VALUE(); + usd_hi.USD_hi_half = NATIVE_NV_READ_USD_HI_REG_VALUE(); + sbr.SBR_reg = NATIVE_NV_READ_SBR_REG_VALUE(); + } + NATIVE_NV_WRITE_USBR_USD_REG(sw_ctxt->host_sbr, sw_ctxt->host_usd_hi, + sw_ctxt->host_usd_lo); + if (!from_hypercall) { + sw_ctxt->host_sbr = sbr; + sw_ctxt->host_usd_lo = usd_lo; + sw_ctxt->host_usd_hi = usd_hi; + } + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_intercept); + + cr0_lo = sw_ctxt->crs.cr0_lo; + cr0_hi = sw_ctxt->crs.cr0_hi; + cr1_lo = sw_ctxt->crs.cr1_lo; + cr1_hi = sw_ctxt->crs.cr1_hi; + psp_lo = hw_ctxt->sh_psp_lo; + psp_hi = hw_ctxt->sh_psp_hi; + pcsp_lo = hw_ctxt->sh_pcsp_lo; + pcsp_hi = hw_ctxt->sh_pcsp_hi; + + NATIVE_FLUSHCPU; /* spill all host hardware stacks */ + + sw_ctxt->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + sw_ctxt->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + sw_ctxt->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + sw_ctxt->crs.cr1_hi = NATIVE_NV_READ_CR1_HI_REG(); + + E2K_WAIT_MA; /* wait for spill completion */ + + hw_ctxt->sh_psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + hw_ctxt->sh_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + hw_ctxt->sh_pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + hw_ctxt->sh_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + return 0; +} + +/* See at arch/include/asm/switch.h the 'switch_flags' argument values */ +static __always_inline __interrupt unsigned long +return_to_intc_pv_vcpu_mode(thread_info_t *ti, struct kvm_vcpu *vcpu, + unsigned switch_flags) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); + + /* return to interception emulation mode from host vcpu mode */ + KVM_BUG_ON(test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE)); + __guest_enter(ti, &vcpu->arch, switch_flags); + + /* restore host VCPU data stack pointer registers */ + usd_lo.USD_lo_half = NATIVE_NV_READ_USD_LO_REG_VALUE(); + usd_hi.USD_hi_half = NATIVE_NV_READ_USD_HI_REG_VALUE(); + sbr.SBR_reg = NATIVE_NV_READ_SBR_REG_VALUE(); + NATIVE_NV_WRITE_USBR_USD_REG(sw_ctxt->host_sbr, sw_ctxt->host_usd_hi, + sw_ctxt->host_usd_lo); + sw_ctxt->host_sbr = sbr; + sw_ctxt->host_usd_lo = usd_lo; + sw_ctxt->host_usd_hi = usd_hi; + + cr0_lo = sw_ctxt->crs.cr0_lo; + cr0_hi = sw_ctxt->crs.cr0_hi; + cr1_lo = sw_ctxt->crs.cr1_lo; + cr1_hi = sw_ctxt->crs.cr1_hi; + psp_lo = hw_ctxt->sh_psp_lo; + psp_hi = hw_ctxt->sh_psp_hi; + pcsp_lo = hw_ctxt->sh_pcsp_lo; + pcsp_hi = hw_ctxt->sh_pcsp_hi; + + NATIVE_FLUSHCPU; /* spill all host hardware stacks */ + + sw_ctxt->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + sw_ctxt->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + sw_ctxt->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + sw_ctxt->crs.cr1_hi = NATIVE_NV_READ_CR1_HI_REG(); + + E2K_WAIT_MA; /* wait for spill completion */ + + hw_ctxt->sh_psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + hw_ctxt->sh_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + hw_ctxt->sh_pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + hw_ctxt->sh_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + return 0; +} + +static __always_inline __interrupt unsigned long +pv_vcpu_return_to_host(thread_info_t *ti, struct kvm_vcpu *vcpu) +{ + return switch_to_host_pv_vcpu_mode(ti, vcpu, true /* from hypercall */, + FULL_CONTEXT_SWITCH | USD_CONTEXT_SWITCH); +} + +/* + * WARNING: do not use global registers optimization: + * current + * current_thread_info() + * smp_processor_id() + * cpu_offset... to access to per-cpu items + */ +static __always_inline __interrupt unsigned long +kvm_hcall_return_from(struct thread_info *ti, + e2k_upsr_t user_upsr, unsigned long psr, + bool restore_data_stack, e2k_size_t g_usd_size, + unsigned long ret) +{ + bool from_paravirt_guest; + + from_paravirt_guest = test_ti_thread_flag(ti, TIF_PARAVIRT_GUEST); + + /* + * Now we should restore kernel saved stack state and + * return to guest kernel data stack, if it need + */ + if (ti->gthread_info != NULL && + !test_gti_thread_flag(ti->gthread_info, GTIF_KERNEL_THREAD)) { + RESTORE_KVM_GUEST_KERNEL_STACKS_STATE(ti); + delete_gpt_regs(ti); + DebugKVMACT("restored guest data stack activation #%d: " + "base 0x%llx, size 0x%x, top 0x%lx\n", + ti->gthread_info->g_stk_frame_no, + ti->gthread_info->stack_regs.stacks.usd_lo.USD_lo_base, + ti->gthread_info->stack_regs.stacks.usd_hi.USD_hi_size, + ti->gthread_info->stack_regs.stacks.top); + if (DEBUG_GPT_REGS_MODE) + print_all_gpt_regs(ti); + } + if (restore_data_stack) { + RETURN_TO_GUEST_KERNEL_DATA_STACK(ti, g_usd_size); + } + + /* if there are pending VIRQs, then provide with direct interrupt */ + /* to cause guest interrupting and handling VIRQs */ + kvm_try_inject_event_wish(ti->vcpu, ti, user_upsr.UPSR_reg, psr); + + /* + * Return control from UPSR register to PSR, if UPSR + * interrupts control is used. + * RETURN operation restores PSR state at hypercall point and + * recovers interrupts control + * Restoring of user UPSR should be after global registers restoring + * to preserve FP disable exception on movfi instructions + * while global registers manipulations + */ + NATIVE_RETURN_TO_USER_UPSR(user_upsr); + + COND_GOTO_RETURN_TO_PARAVIRT_GUEST(from_paravirt_guest, ret); + return ret; +} + +#define DEBUG_CHECK_VCPU_STATE_GREG + +#ifdef DEBUG_CHECK_VCPU_STATE_GREG +static inline void kvm_check_vcpu_ids_as_light(struct kvm_vcpu *vcpu) +{ + kvm_vcpu_state_t *greg_vs; + + greg_vs = (kvm_vcpu_state_t *) + HOST_GET_SAVED_VCPU_STATE_GREG_AS_LIGHT(current_thread_info()); + KVM_BUG_ON(greg_vs->cpu.regs.CPU_VCPU_ID != vcpu->vcpu_id); +} +static inline void kvm_check_vcpu_state_greg(void) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + unsigned long vs; + kvm_vcpu_state_t *greg_vs, *vcpu_vs; + + KVM_BUG_ON(vcpu == NULL); + if (!vcpu->arch.is_hv) { + vs = HOST_GET_SAVED_VCPU_STATE_GREG(current_thread_info()); + greg_vs = (kvm_vcpu_state_t *)vs; + vcpu_vs = (kvm_vcpu_state_t *)GET_GUEST_VCPU_STATE_POINTER(vcpu); + KVM_BUG_ON(greg_vs != vcpu_vs); + } +} +static inline bool +kvm_is_guest_migrated_to_other_vcpu(thread_info_t *ti, struct kvm_vcpu *vcpu) +{ + unsigned long vs; + kvm_vcpu_state_t *greg_vs, *vcpu_vs; + + vs = HOST_GET_SAVED_VCPU_STATE_GREG(ti); + greg_vs = (kvm_vcpu_state_t *)vs; + vcpu_vs = (kvm_vcpu_state_t *)GET_GUEST_VCPU_STATE_POINTER(vcpu); + return greg_vs != vcpu_vs; +} +#else /* !DEBUG_CHECK_VCPU_STATE_GREG */ +static inline void kvm_check_vcpu_ids_as_light(struct kvm_vcpu *vcpu) +{ +} +static inline void kvm_check_vcpu_state_greg(void) +{ +} +static inline bool +kvm_is_guest_migrated_to_other_vcpu(thread_info_t *ti, struct kvm_vcpu *vcpu) +{ + return false; +} +#endif /* DEBUG_CHECK_VCPU_STATE_GREG */ + +/* + * Guest trap handling support + */ +#ifdef CONFIG_USE_AAU +static inline void +save_guest_trap_aau_regs(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + trap_pt_regs_t *trap; + e2k_aau_t *aau; + e2k_aasr_t aasr; + bool aau_fault = false; + int i; + + trap = pt_regs_to_trap_regs(regs); + regs->trap = trap; + aau = pt_regs_to_aau_regs(regs); + regs->aau_context = aau; + aasr = aau->aasr; + kvm_set_guest_vcpu_aasr(vcpu, aasr); + kvm_set_guest_vcpu_aaldm(vcpu, aau->aaldm); + kvm_set_guest_vcpu_aaldv(vcpu, aau->aaldv); + if (AS(aasr).iab) + kvm_copy_to_guest_vcpu_aads(vcpu, aau->aads); + for (i = 0; i <= trap->nr_TIRs; i++) { + if (GET_AA_TIRS(trap->TIRs[i].TIR_hi.TIR_hi_reg)) { + aau_fault = true; + break; + } + } + + if (unlikely(aau_fault)) { + kvm_set_guest_vcpu_aafstr_value(vcpu, aau->aafstr); + } + + if (AS(aasr).iab) { + /* get descriptors & auxiliary registers */ + kvm_copy_to_guest_vcpu_aainds(vcpu, aau->aainds); + kvm_set_guest_vcpu_aaind_tags_value(vcpu, aau->aaind_tags); + kvm_copy_to_guest_vcpu_aaincrs(vcpu, aau->aaincrs); + kvm_set_guest_vcpu_aaincr_tags_value(vcpu, aau->aaincr_tags); + } + + if (AS(aasr).stb) { + /* get synchronous part of APB */ + kvm_copy_to_guest_vcpu_aastis(vcpu, aau->aastis); + kvm_set_guest_vcpu_aasti_tags_value(vcpu, aau->aasti_tags); + } +} +#else /* ! CONFIG_USE_AAU */ +static inline void +save_guest_trap_aau_regs(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ +} +#endif /* ! CONFIG_USE_AAU */ + +static inline void +save_guest_trap_cpu_regs(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + /* stacks registers */ + kvm_set_guest_vcpu_WD(vcpu, regs->wd); + kvm_set_guest_vcpu_USD_hi(vcpu, regs->stacks.usd_hi); + kvm_set_guest_vcpu_USD_lo(vcpu, regs->stacks.usd_lo); + kvm_set_guest_vcpu_SBR(vcpu, regs->stacks.top); + DebugKVMVGT("regs USD: base 0x%llx size 0x%x top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + + kvm_set_guest_vcpu_CR0_hi(vcpu, regs->crs.cr0_hi); + kvm_set_guest_vcpu_CR0_lo(vcpu, regs->crs.cr0_lo); + kvm_set_guest_vcpu_CR1_hi(vcpu, regs->crs.cr1_hi); + kvm_set_guest_vcpu_CR1_lo(vcpu, regs->crs.cr1_lo); + + kvm_set_guest_vcpu_PSHTP(vcpu, regs->stacks.pshtp); + kvm_set_guest_vcpu_PSP_hi(vcpu, regs->stacks.psp_hi); + kvm_set_guest_vcpu_PSP_lo(vcpu, regs->stacks.psp_lo); + DebugKVMVGT("regs PSP: base 0x%llx size 0x%x ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); + kvm_set_guest_vcpu_PCSHTP(vcpu, regs->stacks.pcshtp); + kvm_set_guest_vcpu_PCSP_hi(vcpu, regs->stacks.pcsp_hi); + kvm_set_guest_vcpu_PCSP_lo(vcpu, regs->stacks.pcsp_lo); + DebugKVMVGT("regs PCSP: base 0x%llx size 0x%x ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); + + /* Control transfer registers */ + kvm_set_guest_vcpu_CTPR1(vcpu, regs->ctpr1); + kvm_set_guest_vcpu_CTPR2(vcpu, regs->ctpr2); + kvm_set_guest_vcpu_CTPR3(vcpu, regs->ctpr3); + /* Cycles control registers */ + kvm_set_guest_vcpu_LSR(vcpu, regs->lsr); + kvm_set_guest_vcpu_ILCR(vcpu, regs->ilcr); + /* set UPSR as before trap */ + kvm_set_guest_vcpu_UPSR(vcpu, current_thread_info()->upsr); +} + +static inline void +save_guest_trap_regs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + save_guest_trap_aau_regs(vcpu, regs); + save_guest_trap_cpu_regs(vcpu, regs); +} + +static inline bool check_is_guest_TIRs_frozen(pt_regs_t *regs, bool to_update) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + bool TIRs_empty = kvm_check_is_guest_TIRs_empty(vcpu); + + if (TIRs_empty) + return false; + if (regs->traps_to_guest == 0) { + /* probably it is recursive traps on host and can be */ + /* handled only by host */ + if (count_trap_regs(regs) <= 1) { + /* it is not recursive trap */ + return true; + } + /* it is recursive trap and previous guest traps is not yet */ + /* saved, so it can be only host traps; check enable, */ + /* update diasble */ + return to_update; + } + /* TIRs are not empty and there ara unhandled guest traps, */ + /* so it can be new guest trap */ + return false; +} + +static inline void +kvm_set_pv_vcpu_trap_context(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + KVM_BUG_ON(check_is_guest_TIRs_frozen(regs, false)); + + if (kvm_get_guest_vcpu_TIRs_num(vcpu) < 0) { + KVM_BUG_ON(kvm_check_is_vcpu_guest_stacks_empty(vcpu, regs)); + } + + if (DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE) + print_pt_regs(regs); + + KVM_BUG_ON(test_ts_flag(TS_HOST_AT_VCPU_MODE)); + + KVM_BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_trap && + kvm_get_guest_vcpu_runstate(vcpu) != + RUNSTATE_in_intercept); + + save_guest_trap_regs(vcpu, regs); +} + +static inline void +kvm_set_pv_vcpu_SBBP_TIRs(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + int TIRs_num, TIR_no; + e2k_tir_lo_t TIR_lo; + e2k_tir_hi_t TIR_hi; + unsigned long mask = 0; + + TIRs_num = kvm_get_vcpu_intc_TIRs_num(vcpu); + kvm_reset_guest_vcpu_TIRs_num(vcpu); + + for (TIR_no = 0; TIR_no <= TIRs_num; TIR_no++) { + TIR_hi = kvm_get_vcpu_intc_TIR_hi(vcpu, TIR_no); + TIR_lo = kvm_get_vcpu_intc_TIR_lo(vcpu, TIR_no); + mask |= kvm_update_guest_vcpu_TIR(vcpu, TIR_no, TIR_hi, TIR_lo); + } + regs->traps_to_guest = mask; + kvm_clear_vcpu_intc_TIRs_num(vcpu); + + kvm_copy_guest_vcpu_SBBP(vcpu, regs->trap->sbbp); +} + +static inline void kvm_inject_pv_vcpu_tc_entry(struct kvm_vcpu *vcpu, + trap_cellar_t *tc_from) +{ + void *tc_kaddr = vcpu->arch.mmu.tc_kaddr; + kernel_trap_cellar_t *tc; + kernel_trap_cellar_ext_t *tc_ext; + tc_opcode_t opcode; + int cnt, fmt; + + cnt = vcpu->arch.mmu.tc_num; + tc = tc_kaddr; + tc_ext = tc_kaddr + TC_EXT_OFFSET; + tc += cnt; + tc_ext += cnt; + + tc->address = tc_from->address; + tc->condition = tc_from->condition; + AW(opcode) = AS(tc_from->condition).opcode; + fmt = AS(opcode).fmt; + if (fmt == LDST_QP_FMT) { + tc_ext->mask = tc_from->mask; + } + if (AS(tc_from->condition).store) { + NATIVE_MOVE_TAGGED_DWORD(&tc_from->data, &tc->data); + if (fmt == LDST_QP_FMT) { + NATIVE_MOVE_TAGGED_DWORD(&tc_from->data_ext, + &tc_ext->data); + } + } + cnt++; + vcpu->arch.mmu.tc_num = cnt; + + /* MMU TRAP_COUNT cannot be set, so write flag of end of records */ + tc++; + AW(tc->condition) = -1; +} + +static inline bool +check_injected_stores_to_addr(struct kvm_vcpu *vcpu, gva_t addr, int size) +{ + void *tc_kaddr = vcpu->arch.mmu.tc_kaddr; + kernel_trap_cellar_t *tc; + tc_cond_t tc_cond; + e2k_addr_t tc_addr, tc_end; + bool tc_store; + int tc_size; + gva_t start, end; + int cnt, num; + + start = addr & PAGE_MASK; + end = (addr + (size - 1)) & PAGE_MASK; + tc = tc_kaddr; + num = vcpu->arch.mmu.tc_num; + for (cnt = 0; cnt < num; cnt++) { + tc_cond = tc->condition; + tc_store = tc_cond_is_store(tc_cond, machine.native_iset_ver); + if (!tc_store) + continue; + + tc_size = tc_cond_to_size(tc_cond); + tc_addr = tc->address; + tc_end = (tc_addr + (tc_size - 1)) & PAGE_MASK; + tc_addr &= PAGE_MASK; + if (tc_addr == start || tc_addr == end || + tc_end == start || tc_end == end) + return true; + } + return false; +} + +static inline void kvm_set_pv_vcpu_trap_cellar(struct kvm_vcpu *vcpu) +{ + kvm_write_pv_vcpu_mmu_TRAP_COUNT_reg(vcpu, vcpu->arch.mmu.tc_num * 3); +} + +static inline void +kvm_init_pv_vcpu_trap_handling(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + /* clear guest trap TIRs & trap cellar */ + kvm_reset_guest_vcpu_TIRs_num(vcpu); + if (regs) { + regs->traps_to_guest = 0; + } + kvm_clear_vcpu_trap_cellar(vcpu); +} + +/* + * The function updates PSP registers base/size/index/offset and + * procedure stack state as result of guest stack expantion/constriction + * + * Interrupts should be disabled by caller + */ + +#define printk printk_fixed_args +#define panic panic_fixed_args +static inline void +kvm_update_guest_proc_stack(hw_stack_area_t *new_ps, long delta_ind) +{ + thread_info_t *ti; + gthread_info_t *gti; + hw_stack_t *ti_hw_stacks; + hw_stack_t *gti_hw_stacks; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + long flags; + + raw_all_irq_save(flags); + NATIVE_FLUSHR; + psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + if (DEBUG_GUEST_HS_MODE) { + DebugGHS("current PS state: base 0x%llx ind 0x%x size 0x%x\n", + psp_lo.PSP_lo_base, + psp_hi.PSP_hi_ind, psp_hi.PSP_hi_size); + NATIVE_FLUSHR; + } + + __update_psp_regs((unsigned long)new_ps->base, new_ps->size, + (u64) new_ps->base + AS(psp_hi).ind - delta_ind, + &psp_lo, &psp_hi); + BUG_ON(psp_hi.PSP_hi_ind >= psp_hi.PSP_hi_size); + +//TODO update in memory and use it when switching back NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + + ti = current_thread_info(); + gti = ti->gthread_info; + ti_hw_stacks = &ti->u_hw_stack; + gti_hw_stacks = >i->hw_stacks; + + SET_PS_BASE(gti_hw_stacks, new_ps->base); + kvm_set_guest_hw_ps_user_size(gti_hw_stacks, + get_hw_ps_area_user_size(new_ps)); + /* copy from guest thread info to host thread info, because of */ + /* updated PS is now PS of current host process (VCPU) */ + SET_PS_BASE(ti_hw_stacks, new_ps->base); + kvm_set_guest_hw_ps_user_size(ti_hw_stacks, + get_hw_ps_area_user_size(new_ps)); + + raw_all_irq_restore(flags); + + DebugGHS("current PSP updated: base 0x%lx size 0x%x index 0x%x\n", + psp_lo.PSP_lo_base, psp_hi.PSP_hi_size, psp_hi.PSP_hi_ind); +} + +static inline void +kvm_update_guest_chain_stack(hw_stack_area_t *new_pcs, long delta_ind) +{ + thread_info_t *ti; + gthread_info_t *gti; + hw_stack_t *ti_hw_stacks; + hw_stack_t *gti_hw_stacks; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + long flags; + + raw_all_irq_save(flags); + NATIVE_FLUSHC; + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + if (DEBUG_GUEST_HS_MODE) { + DebugGHS("current PCS state: base 0x%llx ind 0x%x size 0x%x\n", + pcsp_lo.PCSP_lo_base, + pcsp_hi.PCSP_hi_ind, pcsp_hi.PCSP_hi_size); + NATIVE_FLUSHC; + } + + __update_pcsp_regs((unsigned long)new_pcs->base, new_pcs->size, + (u64) new_pcs->base + AS(pcsp_hi).ind - delta_ind, + &pcsp_lo, &pcsp_hi); + BUG_ON(pcsp_hi.PCSP_hi_ind >= pcsp_hi.PCSP_hi_size); + + //TODO NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + ti = current_thread_info(); + gti = ti->gthread_info; + ti_hw_stacks = &ti->u_hw_stack; + gti_hw_stacks = >i->hw_stacks; + + SET_PCS_BASE(gti_hw_stacks, new_pcs->base); + kvm_set_guest_hw_pcs_user_size(gti_hw_stacks, + get_hw_pcs_area_user_size(new_pcs)); + /* copy from guest thread info to host thread info, because of */ + /* updated PCS is now PCS of current host process (VCPU) */ + SET_PCS_BASE(ti_hw_stacks, new_pcs->base); + kvm_set_guest_hw_pcs_user_size(ti_hw_stacks, + get_hw_pcs_area_user_size(new_pcs)); + + raw_all_irq_restore(flags); + + DebugGHS("current PCSP updated: base 0x%lx size 0x%x index 0x%x\n", + pcsp_lo.PCSP_lo_base, pcsp_hi.PCSP_hi_size, pcsp_hi.PCSP_hi_ind); +} +#undef printk +#undef panic + +#endif /* __KVM_E2K_CPU_H */ diff --git a/arch/e2k/kvm/cpu/Makefile b/arch/e2k/kvm/cpu/Makefile new file mode 100644 index 000000000000..bd4a3df4c505 --- /dev/null +++ b/arch/e2k/kvm/cpu/Makefile @@ -0,0 +1,13 @@ +CFLAGS_REMOVE_iset_v2.o = $(CFLAGS_ALL_CPUS) +CFLAGS_REMOVE_iset_v5.o = $(CFLAGS_ALL_CPUS) +CFLAGS_REMOVE_iset_v6.o = $(CFLAGS_ALL_CPUS) + +CFLAGS_iset_v2.o := -march=elbrus-v2 +CFLAGS_iset_v5.o := -march=elbrus-v5 +CFLAGS_iset_v6.o := -march=elbrus-v6 + +obj-$(CONFIG_VIRTUALIZATION) := iset_v2.o iset_v5.o + +ifeq ($(CONFIG_VIRTUALIZATION),y) +obj-$(CONFIG_KVM_HW_VIRTUALIZATION) += iset_v6.o +endif # CONFIG_VIRTUALIZATION diff --git a/arch/e2k/kvm/cpu/iset_v2.c b/arch/e2k/kvm/cpu/iset_v2.c new file mode 100644 index 000000000000..e5d7c478b650 --- /dev/null +++ b/arch/e2k/kvm/cpu/iset_v2.c @@ -0,0 +1,59 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Host kernel is using some additional global registers to support + * virtualization and guest kernel + * So it need save/restore these registers + */ + +notrace __interrupt +void kvm_guest_save_local_gregs_v2(local_gregs_t *gregs, bool is_signal) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + if (is_signal) + DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs->g); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void kvm_guest_save_gregs_v2(global_regs_t *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs->g); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void kvm_guest_save_gregs_dirty_bgr_v2(global_regs_t *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs->g); +} + +notrace __interrupt +void kvm_guest_restore_gregs_v2(const global_regs_t *gregs) +{ + init_BGR_reg(); /* enable whole GRF */ + DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs->g); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void kvm_guest_restore_local_gregs_v2(const local_gregs_t *gregs, + bool is_signal) +{ + init_BGR_reg(); + if (is_signal) + DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} diff --git a/arch/e2k/kvm/cpu/iset_v5.c b/arch/e2k/kvm/cpu/iset_v5.c new file mode 100644 index 000000000000..374ea8751b80 --- /dev/null +++ b/arch/e2k/kvm/cpu/iset_v5.c @@ -0,0 +1,59 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Host kernel is using some additional global registers to support + * virtualization and guest kernel + * So it need save/restore these registers + */ + +notrace __interrupt +void kvm_guest_save_local_gregs_v5(struct local_gregs *gregs, bool is_signal) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + if (is_signal) + DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs->g); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void kvm_guest_save_gregs_v5(struct global_regs *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs->g); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void kvm_guest_save_gregs_dirty_bgr_v5(struct global_regs *gregs) +{ + gregs->bgr = NATIVE_READ_BGR_REG(); + init_BGR_reg(); /* enable whole GRF */ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs->g); +} + +notrace __interrupt +void kvm_guest_restore_gregs_v5(const global_regs_t *gregs) +{ + init_BGR_reg(); /* enable whole GRF */ + DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs->g); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} + +notrace __interrupt +void kvm_guest_restore_local_gregs_v5(const local_gregs_t *gregs, + bool is_signal) +{ + init_BGR_reg(); + if (is_signal) + DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs); + NATIVE_WRITE_BGR_REG(gregs->bgr); +} diff --git a/arch/e2k/kvm/cpu/iset_v6.c b/arch/e2k/kvm/cpu/iset_v6.c new file mode 100644 index 000000000000..b0f69058bc87 --- /dev/null +++ b/arch/e2k/kvm/cpu/iset_v6.c @@ -0,0 +1,1292 @@ + +#include +#include +#include +#include +#include +#include +#include + +static inline unsigned long +read_shadow_cpu_dsreg(const char *name) +{ + pr_err("Shadow register %s is not implemented on the CPU ISET " + "or compilator is not LCC-1.23\n", + name); + return 0; +} +static inline void +write_shadow_cpu_dsreg(const char *name, unsigned long value) +{ + pr_err("Shadow register %s is not implemented on the CPU ISET " + "or compilator is not LCC-1.23\n", + name); +} + +#if CONFIG_CPU_ISET >= 6 + +unsigned long read_VIRT_CTRL_CU_reg_value(void) +{ + return READ_VIRT_CTRL_CU_REG_VALUE(); +} +void write_VIRT_CTRL_CU_reg_value(unsigned long value) +{ + WRITE_VIRT_CTRL_CU_REG_VALUE(value); +} + +unsigned int read_SH_CORE_MODE_reg_value(void) +{ + return READ_SH_CORE_MODE_REG_VALUE(); +} +void write_SH_CORE_MODE_reg_value(unsigned int value) +{ + WRITE_SH_CORE_MODE_REG_VALUE(value); +} +unsigned long read_SH_PSP_LO_reg_value(void) +{ + return READ_SH_PSP_LO_REG_VALUE(); +} +unsigned long read_SH_PSP_HI_reg_value(void) +{ + return READ_SH_PSP_HI_REG_VALUE(); +} +void write_SH_PSP_LO_reg_value(unsigned long value) +{ + WRITE_SH_PSP_LO_REG_VALUE(value); +} +void write_SH_PSP_HI_reg_value(unsigned long value) +{ + WRITE_SH_PSP_HI_REG_VALUE(value); +} +unsigned long read_BU_PSP_LO_reg_value(void) +{ + return READ_BU_PSP_LO_REG_VALUE(); +} +unsigned long read_BU_PSP_HI_reg_value(void) +{ + return READ_BU_PSP_HI_REG_VALUE(); +} +void write_BU_PSP_LO_reg_value(unsigned long value) +{ + WRITE_BU_PSP_LO_REG_VALUE(value); +} +void write_BU_PSP_HI_reg_value(unsigned long value) +{ + WRITE_BU_PSP_HI_REG_VALUE(value); +} +unsigned long read_SH_PSHTP_reg_value(void) +{ + return READ_SH_PSHTP_REG_VALUE(); +} +void write_SH_PSHTP_reg_value(unsigned long value) +{ + WRITE_SH_PSHTP_REG_VALUE(value); +} +unsigned long read_SH_PCSP_LO_reg_value(void) +{ + return READ_SH_PCSP_LO_REG_VALUE(); +} +unsigned long read_SH_PCSP_HI_reg_value(void) +{ + return READ_SH_PCSP_HI_REG_VALUE(); +} +void write_SH_PCSP_LO_reg_value(unsigned long value) +{ + WRITE_SH_PCSP_LO_REG_VALUE(value); +} +void write_SH_PCSP_HI_reg_value(unsigned long value) +{ + WRITE_SH_PCSP_HI_REG_VALUE(value); +} +unsigned long read_BU_PCSP_LO_reg_value(void) +{ + return READ_BU_PCSP_LO_REG_VALUE(); +} +unsigned long read_BU_PCSP_HI_reg_value(void) +{ + return READ_BU_PCSP_HI_REG_VALUE(); +} +void write_BU_PCSP_LO_reg_value(unsigned long value) +{ + WRITE_BU_PCSP_LO_REG_VALUE(value); +} +void write_BU_PCSP_HI_reg_value(unsigned long value) +{ + WRITE_BU_PCSP_HI_REG_VALUE(value); +} +int read_SH_PCSHTP_reg_value(void) +{ + return READ_SH_PCSHTP_REG_SVALUE(); +} +void write_SH_PCSHTP_reg_value(int value) +{ + WRITE_SH_PCSHTP_REG_SVALUE(value); +} +unsigned long read_SH_WD_reg_value(void) +{ + return READ_SH_WD_REG_VALUE(); +} +void write_SH_WD_reg_value(unsigned long value) +{ + WRITE_SH_WD_REG_VALUE(value); +} + +unsigned long read_SH_OSCUD_LO_reg_value(void) +{ + return READ_SH_OSCUD_LO_REG_VALUE(); +} +unsigned long read_SH_OSCUD_HI_reg_value(void) +{ + return READ_SH_OSCUD_HI_REG_VALUE(); +} +void write_SH_OSCUD_LO_reg_value(unsigned long value) +{ + WRITE_SH_OSCUD_LO_REG_VALUE(value); +} +void write_SH_OSCUD_HI_reg_value(unsigned long value) +{ + WRITE_SH_OSCUD_HI_REG_VALUE(value); +} + +unsigned long read_SH_OSGD_LO_reg_value(void) +{ + return READ_SH_OSGD_LO_REG_VALUE(); +} +unsigned long read_SH_OSGD_HI_reg_value(void) +{ + return READ_SH_OSGD_HI_REG_VALUE(); +} +void write_SH_OSGD_LO_reg_value(unsigned long value) +{ + WRITE_SH_OSGD_LO_REG_VALUE(value); +} +void write_SH_OSGD_HI_reg_value(unsigned long value) +{ + WRITE_SH_OSGD_HI_REG_VALUE(value); +} + +unsigned long read_SH_OSCUTD_reg_value(void) +{ + return READ_SH_OSCUTD_REG_VALUE(); +} +void write_SH_OSCUTD_reg_value(unsigned long value) +{ + WRITE_SH_OSCUTD_REG_VALUE(value); +} + +unsigned int read_SH_OSCUIR_reg_value(void) +{ + return READ_SH_OSCUIR_REG_VALUE(); +} +void write_SH_OSCUIR_reg_value(unsigned int value) +{ + WRITE_SH_OSCUIR_REG_VALUE(value); +} + +unsigned long read_SH_OSR0_reg_value(void) +{ + return READ_SH_OSR0_REG_VALUE(); +} +void write_SH_OSR0_reg_value(unsigned long value) +{ + WRITE_SH_OSR0_REG_VALUE(value); +} + +unsigned long read_VIRT_CTRL_MU_reg_value(void) +{ + return READ_VIRT_CTRL_MU_REG_VALUE(); +} +void write_VIRT_CTRL_MU_reg_value(unsigned long value) +{ + WRITE_VIRT_CTRL_MU_REG_VALUE(value); +} + +unsigned long read_GID_reg_value(void) +{ + return READ_GID_REG_VALUE(); +} +void write_GID_reg_value(unsigned long value) +{ + WRITE_GID_REG_VALUE(value); +} + +unsigned long read_GP_VPTB_reg_value(void) +{ + return READ_GP_VPTB_REG_VALUE(); +} +void write_GP_VPTB_reg_value(unsigned long value) +{ + WRITE_GP_VPTB_REG_VALUE(value); +} + +unsigned long read_GP_PPTB_reg_value(void) +{ + return READ_GP_PPTB_REG_VALUE(); +} +void write_GP_PPTB_reg_value(unsigned long value) +{ + WRITE_GP_PPTB_REG_VALUE(value); +} + +unsigned long read_SH_OS_PPTB_reg_value(void) +{ + return READ_SH_OS_PPTB_REG_VALUE(); +} +void write_SH_OS_PPTB_reg_value(unsigned long value) +{ + WRITE_SH_OS_PPTB_REG_VALUE(value); +} + +unsigned long read_SH_OS_VPTB_reg_value(void) +{ + return READ_SH_OS_VPTB_REG_VALUE(); +} +void write_SH_OS_VPTB_reg_value(unsigned long value) +{ + WRITE_SH_OS_VPTB_REG_VALUE(value); +} + +unsigned long read_SH_OS_VAB_reg_value(void) +{ + return READ_SH_OS_VAB_REG_VALUE(); +} +void write_SH_OS_VAB_reg_value(unsigned long value) +{ + WRITE_SH_OS_VAB_REG_VALUE(value); +} + +unsigned long read_G_W_IMASK_MMU_CR_reg_value(void) +{ + return READ_G_W_IMASK_MMU_CR_REG_VALUE(); +} +void write_G_W_IMASK_MMU_CR_reg_value(unsigned long value) +{ + WRITE_G_W_IMASK_MMU_CR_REG_VALUE(value); +} + +unsigned long read_SH_PID_reg_value(void) +{ + return READ_SH_PID_REG_VALUE(); +} +void write_SH_PID_reg_value(unsigned long value) +{ + WRITE_SH_PID_REG_VALUE(value); +} + +unsigned long read_SH_MMU_CR_reg_value(void) +{ + return READ_SH_MMU_CR_REG_VALUE(); +} +void write_SH_MMU_CR_reg_value(unsigned long value) +{ + WRITE_SH_MMU_CR_REG_VALUE(value); +} + +#elif CONFIG_CPU_ISET >= 1 + +unsigned long read_VIRT_CTRL_CU_reg_value(void) +{ + return read_shadow_cpu_dsreg("VIRT_CTRL_CU"); +} +void write_VIRT_CTRL_CU_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("VIRT_CTRL_CU", value); +} + +unsigned int read_SH_CORE_MODE_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_CORE_MODE"); +} +void write_SH_CORE_MODE_reg_value(unsigned int value) +{ + write_shadow_cpu_dsreg("SH_CORE_MODE", value); +} +unsigned long read_SH_PSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PSP_LO"); +} +unsigned long read_SH_PSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PSP_HI"); +} +void write_SH_PSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PSP_LO", value); +} +void write_SH_PSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PSP_HI", value); +} +unsigned long read_BU_PSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PSP_LO"); +} +unsigned long read_BU_PSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PSP_HI"); +} +void write_BU_PSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PSP_LO", value); +} +void write_BU_PSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PSP_HI", value); +} +unsigned long read_SH_PSHTP_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PSHTP"); +} +void write_SH_PSHTP_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PSHTP", value); +} +unsigned long read_SH_PCSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PCSP_LO"); +} +unsigned long read_SH_PCSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PCSP_HI"); +} +void write_SH_PCSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PCSP_LO", value); +} +void write_SH_PCSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PCSP_HI", value); +} +unsigned long read_BU_PCSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PCSP_LO"); +} +unsigned long read_BU_PCSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PCSP_HI"); +} +void write_BU_PCSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PCSP_LO", value); +} +void write_BU_PCSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PCSP_HI", value); +} +int read_SH_PCSHTP_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PCSHTP"); +} +void write_SH_PCSHTP_reg_value(int value) +{ + write_shadow_cpu_dsreg("SH_PCSHTP", value); +} +unsigned long read_SH_WD_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_WD"); +} +void write_SH_WD_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_WD", value); +} +unsigned long read_SH_OSCUD_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUD_LO"); +} +unsigned long read_SH_OSCUD_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUD_HI"); +} +void write_SH_OSCUD_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSCUD_LO", value); +} +void write_SH_OSCUD_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSCUD_HI", value); +} +unsigned long read_SH_OSGD_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSGD_LO"); +} +unsigned long read_SH_OSGD_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSGD_HI"); +} +void write_SH_OSGD_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSGD_LO", value); +} +void write_SH_OSGD_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSGD_HI", value); +} +unsigned long read_SH_OSCUTD_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUTD"); +} +void write_SH_OSCUTD_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSCUTD", value); +} +unsigned long read_SH_OSR0_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSR0"); +} +void write_SH_OSR0_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSR0", value); +} +unsigned int read_SH_OSCUIR_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUIR"); +} +void write_SH_OSCUIR_reg_value(unsigned int value) +{ + write_shadow_cpu_dsreg("SH_OSCUIR", value); +} + +unsigned long read_VIRT_CTRL_MU_reg_value(void) +{ + return read_shadow_cpu_dsreg("VIRT_CTRL_MU"); +} +void write_VIRT_CTRL_MU_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("VIRT_CTRL_MU", value); +} + +unsigned long read_GID_reg_value(void) +{ + return read_shadow_cpu_dsreg("GID"); +} +void write_GID_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("GID", value); +} + +unsigned long read_GP_VPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("GP_VPTB"); +} +void write_GP_VPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("GP_VPTB", value); +} + +unsigned long read_GP_PPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("GP_PPTB"); +} +void write_GP_PPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("GP_PPTB", value); +} + +unsigned long read_SH_OS_PPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OS_PPTB"); +} +void write_SH_OS_PPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OS_PPTB", value); +} + +unsigned long read_SH_OS_VPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OS_VPTB"); +} +void write_SH_OS_VPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OS_VPTB", value); +} + +unsigned long read_SH_OS_VAB_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OS_VAB"); +} +void write_SH_OS_VAB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OS_VAB", value); +} + +unsigned long read_G_W_IMASK_MMU_CR_reg_value(void) +{ + return read_shadow_cpu_dsreg("G_W_IMASK_MMU_CR"); +} +void write_G_W_IMASK_MMU_CR_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("G_W_IMASK_MMU_CR", value); +} + +unsigned long read_SH_PID_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PID"); +} +void write_SH_PID_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PID", value); +} + +unsigned long read_SH_MMU_CR_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_MMU_CR"); +} +void write_SH_MMU_CR_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_MMU_CR", value); +} + +#else /* CONFIG_CPU_ISET is 0 or undefined or negative */ +# if CONFIG_CPU_ISET != 0 +# warning "Undefined CPU ISET VERSION #" +# endif + +# if __LCC__ >= 123 + +unsigned long read_VIRT_CTRL_CU_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_VIRT_CTRL_CU_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("VIRT_CTRL_CU"); + } +} +void write_VIRT_CTRL_CU_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_VIRT_CTRL_CU_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("VIRT_CTRL_CU", value); + } +} + +unsigned int read_SH_CORE_MODE_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_CORE_MODE_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_CORE_MODE"); + } +} +void write_SH_CORE_MODE_reg_value(unsigned int value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_CORE_MODE_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_CORE_MODE", value); + } +} +unsigned long read_SH_PSP_LO_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_PSP_LO_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_PSP_LO"); + } +} +unsigned long read_SH_PSP_HI_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_PSP_HI_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_PSP_HI"); + } +} +void write_SH_PSP_LO_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_PSP_LO_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_PSP_LO", value); + } +} +void write_SH_PSP_HI_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_PSP_HI_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_PSP_HI", value); + } +} +unsigned long read_BU_PSP_LO_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_BU_PSP_LO_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("BU_PSP_LO"); + } +} +unsigned long read_BU_PSP_HI_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_BU_PSP_HI_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("BU_PSP_HI"); + } +} +void write_BU_PSP_LO_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_BU_PSP_LO_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("BU_PSP_LO", value); + } +} +void write_BU_PSP_HI_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_BU_PSP_HI_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("BU_PSP_HI", value); + } +} +unsigned long read_SH_PSHTP_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_PSHTP_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_PSHTP"); + } +} +void write_SH_PSHTP_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_PSHTP_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_PSHTP", value); + } +} +unsigned long read_SH_PCSP_LO_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_PCSP_LO_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_PCSP_LO"); + } +} +unsigned long read_SH_PCSP_HI_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_PCSP_HI_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_PCSP_HI"); + } +} +void write_SH_PCSP_LO_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_PCSP_LO_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_PCSP_LO", value); + } +} +void write_SH_PCSP_HI_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_PCSP_HI_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_PCSP_HI", value); + } +} +unsigned long read_BU_PCSP_LO_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_BU_PCSP_LO_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("BU_PCSP_LO"); + } +} +unsigned long read_BU_PCSP_HI_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_BU_PCSP_HI_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("BU_PCSP_HI"); + } +} +void write_BU_PCSP_LO_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_BU_PCSP_LO_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("BU_PCSP_LO", value); + } +} +void write_BU_PCSP_HI_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_BU_PCSP_HI_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("BU_PCSP_HI", value); + } +} +int read_SH_PCSHTP_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_PCSHTP_REG_SVALUE(); + } else { + return read_shadow_cpu_dsreg("SH_PCSHTP"); + } +} +void write_SH_PCSHTP_reg_value(int value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_PCSHTP_REG_SVALUE(value); + } else { + write_shadow_cpu_dsreg("SH_PCSHTP", value); + } +} +unsigned long read_SH_WD_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_WD_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_WD"); + } +} +void write_SH_WD_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_WD_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_WD", value); + } +} +unsigned long read_SH_OSCUD_LO_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OSCUD_LO_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OSCUD_LO"); + } +} +unsigned long read_SH_OSCUD_HI_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OSCUD_HI_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OSCUD_HI"); + } +} +void write_SH_OSCUD_LO_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OSCUD_LO_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OSCUD_LO", value); + } +} +void write_SH_OSCUD_HI_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OSCUD_HI_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OSCUD_HI", value); + } +} +unsigned long read_SH_OSGD_LO_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OSGD_LO_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OSGD_LO"); + } +} +unsigned long read_SH_OSGD_HI_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OSGD_HI_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OSGD_HI"); + } +} +void write_SH_OSGD_LO_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OSGD_LO_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OSGD_LO", value); + } +} +void write_SH_OSGD_HI_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OSGD_HI_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OSGD_HI", value); + } +} +unsigned long read_SH_OSCUTD_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OSCUTD_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OSCUTD"); + } +} +void write_SH_OSCUTD_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OSCUTD_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OSCUTD", value); + } +} +unsigned long read_SH_OSR0_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OSR0_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OSR0"); + } +} +void write_SH_OSR0_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OSR0_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OSR0", value); + } +} +unsigned int read_SH_OSCUIR_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OSCUIR_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OSCUIR"); + } +} +void write_SH_OSCUIR_reg_value(unsigned int value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OSCUIR_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OSCUIR", value); + } +} + +unsigned long read_VIRT_CTRL_MU_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_VIRT_CTRL_MU_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("VIRT_CTRL_MU"); + } +} +void write_VIRT_CTRL_MU_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_VIRT_CTRL_MU_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("VIRT_CTRL_MU", value); + } +} + +unsigned long read_GID_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_GID_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("GID"); + } +} +void write_GID_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_GID_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("GID", value); + } +} + +unsigned long read_GP_VPTB_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_GP_VPTB_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("GP_VPTB"); + } +} +void write_GP_VPTB_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_GP_VPTB_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("GP_VPTB", value); + } +} + +unsigned long read_GP_PPTB_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_GP_PPTB_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("GP_PPTB"); + } +} +void write_GP_PPTB_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_GP_PPTB_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("GP_PPTB", value); + } +} + +unsigned long read_SH_OS_PPTB_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OS_PPTB_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OS_PPTB"); + } +} +void write_SH_OS_PPTB_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OS_PPTB_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OS_PPTB", value); + } +} + +unsigned long read_SH_OS_VPTB_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OS_VPTB_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OS_VPTB"); + } +} +void write_SH_OS_VPTB_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OS_VPTB_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OS_VPTB", value); + } +} + +unsigned long read_SH_OS_VAB_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_OS_VAB_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_OS_VAB"); + } +} +void write_SH_OS_VAB_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_OS_VAB_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_OS_VAB", value); + } +} + +unsigned long read_G_W_IMASK_MMU_CR_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_G_W_IMASK_MMU_CR_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("G_W_IMASK_MMU_CR"); + } +} +void write_G_W_IMASK_MMU_CR_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_G_W_IMASK_MMU_CR_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("G_W_IMASK_MMU_CR", value); + } +} + +unsigned long read_SH_PID_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_PID_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_PID"); + } +} +void write_SH_PID_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_PID_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_PID", value); + } +} + +unsigned long read_SH_MMU_CR_reg_value(void) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + return READ_SH_MMU_CR_REG_VALUE(); + } else { + return read_shadow_cpu_dsreg("SH_MMU_CR"); + } +} +void write_SH_MMU_CR_reg_value(unsigned long value) +{ + if (machine.native_iset_ver >= E2K_ISET_V6) { + WRITE_SH_MMU_CR_REG_VALUE(value); + } else { + write_shadow_cpu_dsreg("SH_MMU_CR", value); + } +} +# else /* __LCC__ < 123 */ + +unsigned long read_VIRT_CTRL_CU_reg_value(void) +{ + return read_shadow_cpu_dsreg("VIRT_CTRL_CU"); +} +void write_VIRT_CTRL_CU_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("VIRT_CTRL_CU", value); +} + +unsigned int read_SH_CORE_MODE_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_CORE_MODE"); +} +void write_SH_CORE_MODE_reg_value(unsigned int value) +{ + write_shadow_cpu_dsreg("SH_CORE_MODE", value); +} +unsigned long read_SH_PSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PSP_LO"); +} +unsigned long read_SH_PSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PSP_HI"); +} +void write_SH_PSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PSP_LO", value); +} +void write_SH_PSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PSP_HI", value); +} +unsigned long read_BU_PSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PSP_LO"); +} +unsigned long read_BU_PSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PSP_HI"); +} +void write_BU_PSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PSP_LO", value); +} +void write_BU_PSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PSP_HI", value); +} +unsigned long read_SH_PSHTP_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PSHTP"); +} +void write_SH_PSHTP_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PSHTP", value); +} +unsigned long read_SH_PCSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PCSP_LO"); +} +unsigned long read_SH_PCSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PCSP_HI"); +} +void write_SH_PCSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PCSP_LO", value); +} +void write_SH_PCSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PCSP_HI", value); +} +unsigned long read_BU_PCSP_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PCSP_LO"); +} +unsigned long read_BU_PCSP_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("BU_PCSP_HI"); +} +void write_BU_PCSP_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PCSP_LO", value); +} +void write_BU_PCSP_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("BU_PCSP_HI", value); +} +int read_SH_PCSHTP_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PCSHTP"); +} +void write_SH_PCSHTP_reg_value(int value) +{ + write_shadow_cpu_dsreg("SH_PCSHTP", value); +} +unsigned long read_SH_WD_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_WD"); +} +void write_SH_WD_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_WD", value); +} +unsigned long read_SH_OSCUD_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUD_LO"); +} +unsigned long read_SH_OSCUD_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUD_HI"); +} +void write_SH_OSCUD_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSCUD_LO", value); +} +void write_SH_OSCUD_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSCUD_HI", value); +} +unsigned long read_SH_OSGD_LO_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSGD_LO"); +} +unsigned long read_SH_OSGD_HI_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSGD_HI"); +} +void write_SH_OSGD_LO_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSGD_LO", value); +} +void write_SH_OSGD_HI_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSGD_HI", value); +} +unsigned long read_SH_OSCUTD_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUTD"); +} +void write_SH_OSCUTD_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSCUTD", value); +} +unsigned long read_SH_OSR0_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSR0"); +} +void write_SH_OSR0_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OSR0", value); +} +unsigned int read_SH_OSCUIR_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OSCUIR"); +} +void write_SH_OSCUIR_reg_value(unsigned int value) +{ + write_shadow_cpu_dsreg("SH_OSCUIR", value); +} + +unsigned long read_VIRT_CTRL_MU_reg_value(void) +{ + return read_shadow_cpu_dsreg("VIRT_CTRL_MU"); +} +void write_VIRT_CTRL_MU_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("VIRT_CTRL_MU", value); +} + +unsigned long read_GID_reg_value(void) +{ + return read_shadow_cpu_dsreg("GID"); +} +void write_GID_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("GID", value); +} + +unsigned long read_GP_VPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("GP_VPTB"); +} +void write_GP_VPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("GP_VPTB", value); +} + +unsigned long read_GP_PPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("GP_PPTB"); +} +void write_GP_PPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("GP_PPTB", value); +} + +unsigned long read_SH_OS_PPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OS_PPTB"); +} +void write_SH_OS_PPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OS_PPTB", value); +} + +unsigned long read_SH_OS_VPTB_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OS_VPTB"); +} +void write_SH_OS_VPTB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OS_VPTB", value); +} + +unsigned long read_SH_OS_VAB_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_OS_VAB"); +} +void write_SH_OS_VAB_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_OS_VAB", value); +} + +unsigned long read_G_W_IMASK_MMU_CR_reg_value(void) +{ + return read_shadow_cpu_dsreg("G_W_IMASK_MMU_CR"); +} +void write_G_W_IMASK_MMU_CR_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("G_W_IMASK_MMU_CR", value); +} + +unsigned long read_SH_PID_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_PID"); +} +void write_SH_PID_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_PID", value); +} + +unsigned long read_SH_MMU_CR_reg_value(void) +{ + return read_shadow_cpu_dsreg("SH_MMU_CR"); +} +void write_SH_MMU_CR_reg_value(unsigned long value) +{ + write_shadow_cpu_dsreg("SH_MMU_CR", value); +} +#endif /* __LCC >= 123 */ + +#endif /* CONFIG_CPU_ISET 0-6 */ diff --git a/arch/e2k/kvm/cpu_defs.h b/arch/e2k/kvm/cpu_defs.h new file mode 100644 index 000000000000..7c99f3357226 --- /dev/null +++ b/arch/e2k/kvm/cpu_defs.h @@ -0,0 +1,1303 @@ +#ifndef __KVM_E2K_CPU_DEFS_H +#define __KVM_E2K_CPU_DEFS_H + +#include +#include + +/* FIXME: the follow define only to debug, delete after completion and */ +/* turn on __interrupt atribute */ +#undef DEBUG_GTI +#define DEBUG_GTI 1 + +/* + * VCPU state structure contains CPU, MMU, Local APIC and other registers + * current values of VCPU. The structure is common for host and guest and + * can (and should) be accessed by both. + * Guest access do through global pointer which should be load on some global + * register (GUEST_VCPU_STATE_GREG) or on special CPU register GD. + * But GD can be used only if guest kernel run as protected task + */ + +/* + * Basic functions to access to virtual CPUs registers status on host. + */ + +static inline u64 +kvm_get_guest_vcpu_regs_status(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.kmap_vcpu_state->cpu.regs_status; +} +static inline void +kvm_put_guest_vcpu_regs_status(struct kvm_vcpu *vcpu, unsigned long new_status) +{ + vcpu->arch.kmap_vcpu_state->cpu.regs_status = new_status; +} +static inline void +kvm_reset_guest_vcpu_regs_status(struct kvm_vcpu *vcpu) +{ + kvm_put_guest_vcpu_regs_status(vcpu, 0); +} +static inline void +kvm_put_guest_updated_vcpu_regs_flags(struct kvm_vcpu *vcpu, + unsigned long new_flags) +{ + unsigned long cur_flags = kvm_get_guest_vcpu_regs_status(vcpu); + cur_flags = KVM_SET_UPDATED_CPU_REGS_FLAGS(cur_flags, new_flags); + kvm_put_guest_vcpu_regs_status(vcpu, cur_flags); +} +static inline void +kvm_clear_guest_updated_vcpu_regs_flags(struct kvm_vcpu *vcpu, + unsigned long flags) +{ + unsigned long cur_flags = kvm_get_guest_vcpu_regs_status(vcpu); + cur_flags = KVM_CLEAR_UPDATED_CPU_REGS_FLAGS(cur_flags, flags); + kvm_put_guest_vcpu_regs_status(vcpu, cur_flags); +} +static inline void +kvm_reset_guest_updated_vcpu_regs_flags(struct kvm_vcpu *vcpu, + unsigned long regs_status) +{ + regs_status = KVM_INIT_UPDATED_CPU_REGS_FLAGS(regs_status); + kvm_put_guest_vcpu_regs_status(vcpu, regs_status); +} + +#define CPU_GET_SREG(vcpu, reg_name) \ +({ \ + kvm_cpu_regs_t *regs = &((vcpu)->arch.kmap_vcpu_state->cpu.regs); \ + u32 reg; \ + \ + reg = regs->CPU_##reg_name; \ + reg; \ +}) +#define CPU_GET_SSREG(vcpu, reg_name) \ +({ \ + kvm_cpu_regs_t *regs = &((vcpu)->arch.kmap_vcpu_state->cpu.regs); \ + int reg; \ + \ + reg = regs->CPU_##reg_name; \ + reg; \ +}) +#define CPU_GET_DSREG(vcpu, reg_name) \ +({ \ + kvm_cpu_regs_t *regs = &((vcpu)->arch.kmap_vcpu_state->cpu.regs); \ + u64 reg; \ + \ + reg = regs->CPU_##reg_name; \ + reg; \ +}) + +#define CPU_SET_SREG(vcpu, reg_name, reg_value) \ +({ \ + kvm_cpu_regs_t *regs = &((vcpu)->arch.kmap_vcpu_state->cpu.regs); \ + \ + regs->CPU_##reg_name = (reg_value); \ +}) +#define CPU_SETUP_SSREG(vcpu, reg_name, reg_value) \ +({ \ + kvm_cpu_regs_t *regs = &((vcpu)->arch.kmap_vcpu_state->cpu.regs); \ + \ + regs->CPU_##reg_name = (u32)(reg_value); \ +}) +#define CPU_SET_SSREG(vcpu, reg_name, reg_value) \ +({ \ + kvm_cpu_regs_t *regs = &((vcpu)->arch.kmap_vcpu_state->cpu.regs); \ + \ + regs->CPU_##reg_name = (reg_value); \ +}) +#define CPU_SET_DSREG(vcpu, reg_name, reg_value) \ +({ \ + kvm_cpu_regs_t *regs = &((vcpu)->arch.kmap_vcpu_state->cpu.regs); \ + \ + regs->CPU_##reg_name = (reg_value); \ +}) + +#define CPU_SET_TIR_lo(vcpu, reg_no, reg_value) \ +({ \ + e2k_tir_t *tir = &((vcpu)->arch.kmap_vcpu_state-> \ + cpu.regs.CPU_TIRs[reg_no]); \ + tir->TIR_lo.TIR_lo_reg = (reg_value); \ +}) + +#define CPU_SET_TIR_hi(vcpu, reg_no, reg_value) \ +({ \ + e2k_tir_t *tir = &((vcpu)->arch.kmap_vcpu_state-> \ + cpu.regs.CPU_TIRs[reg_no]); \ + tir->TIR_hi.TIR_hi_reg = (reg_value); \ +}) + +#define CPU_GET_TIR_lo(vcpu, reg_no) \ +({ \ + e2k_tir_t *tir = &((vcpu)->arch.kmap_vcpu_state-> \ + cpu.regs.CPU_TIRs[reg_no]); \ + tir->TIR_lo.TIR_lo_reg; \ +}) + +#define CPU_GET_TIR_hi(vcpu, reg_no) \ +({ \ + e2k_tir_t *tir = &((vcpu)->arch.kmap_vcpu_state-> \ + cpu.regs.CPU_TIRs[reg_no]); \ + tir->TIR_hi.TIR_hi_reg; \ +}) + +#define CPU_SET_SBBP(vcpu, reg_no, reg_value) \ +({ \ + u64 *sbbp_reg = &((vcpu)->arch.kmap_vcpu_state-> \ + cpu.regs.CPU_SBBP[reg_no]); \ + *sbbp_reg = (reg_value); \ +}) + +#define CPU_COPY_SBBP(vcpu, sbbp_from) \ +({ \ + u64 *sbbp_to = ((vcpu)->arch.kmap_vcpu_state->cpu.regs.CPU_SBBP); \ + if (likely(sbbp_from)) { \ + memcpy(sbbp_to, sbbp_from, sizeof(*sbbp_to) * SBBP_ENTRIES_NUM); \ + } else { \ + memset(sbbp_to, 0, sizeof(*sbbp_to) * SBBP_ENTRIES_NUM); \ + } \ +}) + +#define CPU_GET_SBBP(vcpu, reg_no) \ +({ \ + u64 *sbbp = &((vcpu)->arch.kmap_vcpu_state-> \ + cpu.regs.CPU_SBBP[reg_no]); \ + *sbbp; \ +}) + +static inline e2k_aau_t *get_vcpu_aau_context(struct kvm_vcpu *vcpu) +{ + return &(vcpu->arch.kmap_vcpu_state->cpu.aau); +} + +static inline u64 *get_vcpu_aaldi_context(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.kmap_vcpu_state->cpu.aaldi; +} + +static inline e2k_aalda_t *get_vcpu_aalda_context(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.kmap_vcpu_state->cpu.aalda; +} + +#define AAU_GET_SREG(vcpu, reg_name) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + u32 reg; \ + \ + reg = aau->reg_name; \ + reg; \ +}) + +#define AAU_GET_DREG(vcpu, reg_name) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + u64 reg; \ + \ + reg = aau->reg_name; \ + reg; \ +}) + +#define AAU_SET_SREG(vcpu, reg_name, reg_value) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + aau->reg_name = (reg_value); \ +}) + +#define AAU_SET_DREG(vcpu, reg_name, reg_value) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + aau->reg_name = (reg_value); \ +}) + +#define AAU_GET_SREGS_ITEM(vcpu, regs_name, reg_no) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + u32 reg; \ + \ + reg = (aau->regs_name)[reg_no]; \ + reg; \ +}) +#define AAU_GET_DREGS_ITEM(vcpu, regs_name, reg_no) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + u64 reg; \ + \ + reg = (aau->regs_name)[reg_no]; \ + reg; \ +}) +#define AAU_GET_STRUCT_REGS_ITEM(vcpu, regs_name, reg_no, reg_struct) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + *(reg_struct) = (aau->regs_name)[reg_no]; \ +}) +#define AAU_SET_SREGS_ITEM(vcpu, regs_name, reg_no, reg_value) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + (aau->regs_name)[reg_no] = (reg_value); \ +}) +#define AAU_SET_DREGS_ITEM(vcpu, regs_name, reg_no, reg_value) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + (aau->regs_name)[reg_no] = (reg_value); \ +}) +#define AAU_SET_STRUCT_REGS_ITEM(vcpu, regs_name, reg_no, reg_struct) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + (aau->regs_name)[reg_no] = *(reg_struct); \ +}) + +#define AAU_COPY_FROM_REGS(vcpu, regs_name, regs_to) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + memcpy(regs_to, aau->regs_name, sizeof(aau->regs_name)); \ +}) + +#define AAU_COPY_TO_REGS(vcpu, regs_name, regs_from) \ +({ \ + e2k_aau_t *aau = get_vcpu_aau_context(vcpu); \ + \ + memcpy(aau->regs_name, regs_from, sizeof(aau->regs_name)); \ +}) + +static inline const u32 +kvm_get_guest_VCPU_ID(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SSREG(vcpu, VCPU_ID); +} +static inline void +kvm_setup_guest_VCPU_ID(struct kvm_vcpu *vcpu, const u32 vcpu_id) +{ + CPU_SETUP_SSREG(vcpu, VCPU_ID, vcpu_id); +} + +static inline u64 +kvm_get_guest_vcpu_OSCUD_lo_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, OSCUD_lo.OSCUD_lo_half); +} + +static inline u64 +kvm_get_guest_vcpu_OSCUD_hi_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, OSCUD_hi.OSCUD_hi_half); +} + +static inline e2k_oscud_lo_t +kvm_get_guest_vcpu_OSCUD_lo(struct kvm_vcpu *vcpu) +{ + e2k_oscud_lo_t oscud_lo; + + oscud_lo.OSCUD_lo_half = kvm_get_guest_vcpu_OSCUD_lo_value(vcpu); + return oscud_lo; +} + +static inline e2k_oscud_hi_t +kvm_get_guest_vcpu_OSCUD_hi(struct kvm_vcpu *vcpu) +{ + e2k_oscud_hi_t oscud_hi; + + oscud_hi.OSCUD_hi_half = kvm_get_guest_vcpu_OSCUD_hi_value(vcpu); + return oscud_hi; +} + +static inline u64 +kvm_get_guest_vcpu_OSGD_lo_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, OSGD_lo.OSGD_lo_half); +} + +static inline u64 +kvm_get_guest_vcpu_OSGD_hi_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, OSGD_hi.OSGD_hi_half); +} + +static inline e2k_osgd_lo_t +kvm_get_guest_vcpu_OSGD_lo(struct kvm_vcpu *vcpu) +{ + e2k_osgd_lo_t osgd_lo; + + osgd_lo.OSGD_lo_half = kvm_get_guest_vcpu_OSGD_lo_value(vcpu); + return osgd_lo; +} + +static inline e2k_osgd_hi_t +kvm_get_guest_vcpu_OSGD_hi(struct kvm_vcpu *vcpu) +{ + e2k_osgd_hi_t osgd_hi; + + osgd_hi.OSGD_hi_half = kvm_get_guest_vcpu_OSGD_hi_value(vcpu); + return osgd_hi; +} + +static inline void +kvm_set_guest_vcpu_WD(struct kvm_vcpu *vcpu, e2k_wd_t WD) +{ + CPU_SET_DSREG(vcpu, WD.WD_reg, WD.WD_reg); +} +static inline u64 +kvm_get_guest_vcpu_WD_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, WD.WD_reg); +} +static inline e2k_wd_t +kvm_get_guest_vcpu_WD(struct kvm_vcpu *vcpu) +{ + e2k_wd_t WD; + + WD.WD_reg = kvm_get_guest_vcpu_WD_value(vcpu); + return WD; +} + +static inline void +kvm_set_guest_vcpu_USD_hi(struct kvm_vcpu *vcpu, e2k_usd_hi_t USD_hi) +{ + CPU_SET_DSREG(vcpu, USD_hi.USD_hi_half, USD_hi.USD_hi_half); +} + +static inline void +kvm_set_guest_vcpu_USD_lo(struct kvm_vcpu *vcpu, e2k_usd_lo_t USD_lo) +{ + CPU_SET_DSREG(vcpu, USD_lo.USD_lo_half, USD_lo.USD_lo_half); +} +static inline u64 +kvm_get_guest_vcpu_USD_hi_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, USD_hi.USD_hi_half); +} +static inline u64 +kvm_get_guest_vcpu_USD_lo_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, USD_lo.USD_lo_half); +} +static inline e2k_usd_hi_t +kvm_get_guest_vcpu_USD_hi(struct kvm_vcpu *vcpu) +{ + e2k_usd_hi_t USD_hi; + + USD_hi.USD_hi_half = kvm_get_guest_vcpu_USD_hi_value(vcpu); + return USD_hi; +} +static inline e2k_usd_lo_t +kvm_get_guest_vcpu_USD_lo(struct kvm_vcpu *vcpu) +{ + e2k_usd_lo_t USD_lo; + + USD_lo.USD_lo_half = kvm_get_guest_vcpu_USD_lo_value(vcpu); + return USD_lo; +} + +static inline void +kvm_set_guest_vcpu_PSHTP(struct kvm_vcpu *vcpu, e2k_pshtp_t PSHTP) +{ + CPU_SET_DSREG(vcpu, PSHTP.PSHTP_reg, PSHTP.PSHTP_reg); +} +static inline u64 +kvm_get_guest_vcpu_PSHTP_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, PSHTP.PSHTP_reg); +} +static inline e2k_pshtp_t +kvm_get_guest_vcpu_PSHTP(struct kvm_vcpu *vcpu) +{ + e2k_pshtp_t PSHTP; + + PSHTP.PSHTP_reg = kvm_get_guest_vcpu_PSHTP_value(vcpu); + return PSHTP; +} + +static inline void +kvm_set_guest_vcpu_PCSHTP(struct kvm_vcpu *vcpu, e2k_pcshtp_t PCSHTP) +{ + CPU_SET_SSREG(vcpu, PCSHTP, PCSHTP); +} +static inline e2k_pcshtp_t +kvm_get_guest_vcpu_PCSHTP_svalue(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SSREG(vcpu, PCSHTP); +} + +static inline void +kvm_set_guest_vcpu_CR0_hi(struct kvm_vcpu *vcpu, e2k_cr0_hi_t CR0_hi) +{ + CPU_SET_DSREG(vcpu, CR0_hi.CR0_hi_half, CR0_hi.CR0_hi_half); +} + +static inline void +kvm_set_guest_vcpu_CR0_lo(struct kvm_vcpu *vcpu, e2k_cr0_lo_t CR0_lo) +{ + CPU_SET_DSREG(vcpu, CR0_lo.CR0_lo_half, CR0_lo.CR0_lo_half); +} + +static inline void +kvm_set_guest_vcpu_CR1_hi(struct kvm_vcpu *vcpu, e2k_cr1_hi_t CR1_hi) +{ + CPU_SET_DSREG(vcpu, CR1_hi.CR1_hi_half, CR1_hi.CR1_hi_half); +} + +static inline void +kvm_set_guest_vcpu_CR1_lo(struct kvm_vcpu *vcpu, e2k_cr1_lo_t CR1_lo) +{ + CPU_SET_DSREG(vcpu, CR1_lo.CR1_lo_half, CR1_lo.CR1_lo_half); +} +static inline u64 +kvm_get_guest_vcpu_CR0_hi_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, CR0_hi.CR0_hi_half); +} +static inline u64 +kvm_get_guest_vcpu_CR0_lo_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, CR0_lo.CR0_lo_half); +} +static inline u64 +kvm_get_guest_vcpu_CR1_hi_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, CR1_hi.CR1_hi_half); +} +static inline u64 +kvm_get_guest_vcpu_CR1_lo_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, CR1_lo.CR1_lo_half); +} +static inline e2k_cr0_hi_t +kvm_get_guest_vcpu_CR0_hi(struct kvm_vcpu *vcpu) +{ + e2k_cr0_hi_t CR0_hi; + + CR0_hi.CR0_hi_half = kvm_get_guest_vcpu_CR0_hi_value(vcpu); + return CR0_hi; +} +static inline e2k_cr0_lo_t +kvm_get_guest_vcpu_CR0_lo(struct kvm_vcpu *vcpu) +{ + e2k_cr0_lo_t CR0_lo; + + CR0_lo.CR0_lo_half = kvm_get_guest_vcpu_CR0_lo_value(vcpu); + return CR0_lo; +} +static inline e2k_cr1_hi_t +kvm_get_guest_vcpu_CR1_hi(struct kvm_vcpu *vcpu) +{ + e2k_cr1_hi_t CR1_hi; + + CR1_hi.CR1_hi_half = kvm_get_guest_vcpu_CR1_hi_value(vcpu); + return CR1_hi; +} +static inline e2k_cr1_lo_t +kvm_get_guest_vcpu_CR1_lo(struct kvm_vcpu *vcpu) +{ + e2k_cr1_lo_t CR1_lo; + + CR1_lo.CR1_lo_half = kvm_get_guest_vcpu_CR1_lo_value(vcpu); + return CR1_lo; +} + +static inline void +kvm_set_guest_vcpu_PSP_hi(struct kvm_vcpu *vcpu, e2k_psp_hi_t PSP_hi) +{ + CPU_SET_DSREG(vcpu, PSP_hi.PSP_hi_half, PSP_hi.PSP_hi_half); +} + +static inline void +kvm_set_guest_vcpu_PSP_lo(struct kvm_vcpu *vcpu, e2k_psp_lo_t PSP_lo) +{ + CPU_SET_DSREG(vcpu, PSP_lo.PSP_lo_half, PSP_lo.PSP_lo_half); +} +static inline u64 +kvm_get_guest_vcpu_PSP_hi_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, PSP_hi.PSP_hi_half); +} +static inline u64 +kvm_get_guest_vcpu_PSP_lo_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, PSP_lo.PSP_lo_half); +} +static inline e2k_psp_hi_t +kvm_get_guest_vcpu_PSP_hi(struct kvm_vcpu *vcpu) +{ + e2k_psp_hi_t PSP_hi; + + PSP_hi.PSP_hi_half = kvm_get_guest_vcpu_PSP_hi_value(vcpu); + return PSP_hi; +} +static inline e2k_psp_lo_t +kvm_get_guest_vcpu_PSP_lo(struct kvm_vcpu *vcpu) +{ + e2k_psp_lo_t PSP_lo; + + PSP_lo.PSP_lo_half = kvm_get_guest_vcpu_PSP_lo_value(vcpu); + return PSP_lo; +} + +static inline void +kvm_set_guest_vcpu_PCSP_hi(struct kvm_vcpu *vcpu, e2k_pcsp_hi_t PCSP_hi) +{ + CPU_SET_DSREG(vcpu, PCSP_hi.PCSP_hi_half, PCSP_hi.PCSP_hi_half); +} + +static inline void +kvm_set_guest_vcpu_PCSP_lo(struct kvm_vcpu *vcpu, e2k_pcsp_lo_t PCSP_lo) +{ + CPU_SET_DSREG(vcpu, PCSP_lo.PCSP_lo_half, PCSP_lo.PCSP_lo_half); +} +static inline u64 +kvm_get_guest_vcpu_PCSP_hi_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, PCSP_hi.PCSP_hi_half); +} +static inline u64 +kvm_get_guest_vcpu_PCSP_lo_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, PCSP_lo.PCSP_lo_half); +} +static inline e2k_pcsp_hi_t +kvm_get_guest_vcpu_PCSP_hi(struct kvm_vcpu *vcpu) +{ + e2k_pcsp_hi_t PCSP_hi; + + PCSP_hi.PCSP_hi_half = kvm_get_guest_vcpu_PCSP_hi_value(vcpu); + return PCSP_hi; +} +static inline e2k_pcsp_lo_t +kvm_get_guest_vcpu_PCSP_lo(struct kvm_vcpu *vcpu) +{ + e2k_pcsp_lo_t PCSP_lo; + + PCSP_lo.PCSP_lo_half = kvm_get_guest_vcpu_PCSP_lo_value(vcpu); + return PCSP_lo; +} + +static inline void +kvm_set_guest_vcpu_SBR(struct kvm_vcpu *vcpu, e2k_addr_t sbr) +{ + CPU_SET_DSREG(vcpu, SBR.SBR_reg, sbr); +} +static inline e2k_addr_t +kvm_get_guest_vcpu_SBR_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, SBR.SBR_reg); +} +static inline e2k_sbr_t +kvm_get_guest_vcpu_SBR(struct kvm_vcpu *vcpu) +{ + e2k_sbr_t sbr; + + sbr.SBR_reg = 0; + sbr.SBR_base = kvm_get_guest_vcpu_SBR_value(vcpu); + + return sbr; +} + +static inline void +kvm_set_guest_vcpu_CUD_lo(struct kvm_vcpu *vcpu, e2k_cud_lo_t CUD_lo) +{ + CPU_SET_DSREG(vcpu, CUD_lo.CUD_lo_half, CUD_lo.CUD_lo_half); +} + +static inline void +kvm_set_guest_vcpu_CUD_hi(struct kvm_vcpu *vcpu, e2k_cud_hi_t CUD_hi) +{ + CPU_SET_DSREG(vcpu, CUD_hi.CUD_hi_half, CUD_hi.CUD_hi_half); +} + +static inline void +kvm_set_guest_vcpu_CUD(struct kvm_vcpu *vcpu, e2k_cud_hi_t CUD_hi, + e2k_cud_lo_t CUD_lo) +{ + kvm_set_guest_vcpu_CUD_hi(vcpu, CUD_hi); + kvm_set_guest_vcpu_CUD_lo(vcpu, CUD_lo); +} + +static inline void +kvm_set_guest_vcpu_GD_lo(struct kvm_vcpu *vcpu, e2k_gd_lo_t GD_lo) +{ + CPU_SET_DSREG(vcpu, GD_lo.GD_lo_half, GD_lo.GD_lo_half); +} + +static inline void +kvm_set_guest_vcpu_GD_hi(struct kvm_vcpu *vcpu, e2k_gd_hi_t GD_hi) +{ + CPU_SET_DSREG(vcpu, GD_hi.GD_hi_half, GD_hi.GD_hi_half); +} + +static inline void +kvm_set_guest_vcpu_GD(struct kvm_vcpu *vcpu, e2k_gd_hi_t GD_hi, + e2k_gd_lo_t GD_lo) +{ + kvm_set_guest_vcpu_GD_hi(vcpu, GD_hi); + kvm_set_guest_vcpu_GD_lo(vcpu, GD_lo); +} + +static inline void +kvm_set_guest_vcpu_OSCUD_lo(struct kvm_vcpu *vcpu, e2k_oscud_lo_t OSCUD_lo) +{ + CPU_SET_DSREG(vcpu, OSCUD_lo.OSCUD_lo_half, OSCUD_lo.OSCUD_lo_half); +} + +static inline void +kvm_set_guest_vcpu_OSCUD_hi(struct kvm_vcpu *vcpu, e2k_oscud_hi_t OSCUD_hi) +{ + CPU_SET_DSREG(vcpu, OSCUD_hi.OSCUD_hi_half, OSCUD_hi.OSCUD_hi_half); +} + +static inline void +kvm_set_guest_vcpu_OSCUD(struct kvm_vcpu *vcpu, e2k_oscud_hi_t OSCUD_hi, + e2k_oscud_lo_t OSCUD_lo) +{ + kvm_set_guest_vcpu_OSCUD_hi(vcpu, OSCUD_hi); + kvm_set_guest_vcpu_OSCUD_lo(vcpu, OSCUD_lo); +} + +static inline void +kvm_set_guest_vcpu_OSGD_lo(struct kvm_vcpu *vcpu, e2k_osgd_lo_t OSGD_lo) +{ + CPU_SET_DSREG(vcpu, OSGD_lo.OSGD_lo_half, OSGD_lo.OSGD_lo_half); +} + +static inline void +kvm_set_guest_vcpu_OSGD_hi(struct kvm_vcpu *vcpu, e2k_osgd_hi_t OSGD_hi) +{ + CPU_SET_DSREG(vcpu, OSGD_hi.OSGD_hi_half, OSGD_hi.OSGD_hi_half); +} + +static inline void +kvm_set_guest_vcpu_OSGD(struct kvm_vcpu *vcpu, e2k_osgd_hi_t OSGD_hi, + e2k_osgd_lo_t OSGD_lo) +{ + kvm_set_guest_vcpu_OSGD_hi(vcpu, OSGD_hi); + kvm_set_guest_vcpu_OSGD_lo(vcpu, OSGD_lo); +} + +static inline void +kvm_set_guest_vcpu_CUTD(struct kvm_vcpu *vcpu, e2k_cutd_t CUTD) +{ + CPU_SET_DSREG(vcpu, CUTD.CUTD_reg, CUTD.CUTD_reg); +} +static inline unsigned long +kvm_get_guest_vcpu_CUTD_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, CUTD.CUTD_reg); +} +static inline e2k_cutd_t +kvm_get_guest_vcpu_CUTD(struct kvm_vcpu *vcpu) +{ + e2k_cutd_t cutd; + + cutd.CUTD_reg = kvm_get_guest_vcpu_CUTD_value(vcpu); + return cutd; +} + +static inline void +kvm_set_guest_vcpu_CUIR(struct kvm_vcpu *vcpu, e2k_cuir_t CUIR) +{ + CPU_SET_SSREG(vcpu, CUIR.CUIR_reg, CUIR.CUIR_reg); +} +static inline unsigned int +kvm_get_guest_vcpu_CUIR_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SSREG(vcpu, CUIR.CUIR_reg); +} +static inline e2k_cuir_t +kvm_get_guest_vcpu_CUIR(struct kvm_vcpu *vcpu) +{ + e2k_cuir_t cuir; + + cuir.CUIR_reg = kvm_get_guest_vcpu_CUIR_value(vcpu); + return cuir; +} + +static inline void +kvm_set_guest_vcpu_OSCUTD(struct kvm_vcpu *vcpu, e2k_cutd_t CUTD) +{ + CPU_SET_DSREG(vcpu, OSCUTD.CUTD_reg, CUTD.CUTD_reg); +} +static inline unsigned long +kvm_get_guest_vcpu_OSCUTD_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, OSCUTD.CUTD_reg); +} +static inline e2k_cutd_t +kvm_get_guest_vcpu_OSCUTD(struct kvm_vcpu *vcpu) +{ + e2k_cutd_t cutd; + + cutd.CUTD_reg = kvm_get_guest_vcpu_OSCUTD_value(vcpu); + return cutd; +} + +static inline void +kvm_set_guest_vcpu_OSCUIR(struct kvm_vcpu *vcpu, e2k_cuir_t CUIR) +{ + CPU_SET_SSREG(vcpu, OSCUIR.CUIR_reg, CUIR.CUIR_reg); +} +static inline unsigned int +kvm_get_guest_vcpu_OSCUIR_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SSREG(vcpu, OSCUIR.CUIR_reg); +} +static inline e2k_cuir_t +kvm_get_guest_vcpu_OSCUIR(struct kvm_vcpu *vcpu) +{ + e2k_cuir_t cuir; + + cuir.CUIR_reg = kvm_get_guest_vcpu_OSCUIR_value(vcpu); + return cuir; +} + +static inline void +kvm_set_guest_vcpu_OSR0(struct kvm_vcpu *vcpu, u64 osr0) +{ + CPU_SET_DSREG(vcpu, OSR0, osr0); +} +static inline unsigned long +kvm_get_guest_vcpu_OSR0_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, OSR0); +} + +static inline void +kvm_set_guest_vcpu_CORE_MODE(struct kvm_vcpu *vcpu, e2k_core_mode_t core_mode) +{ + CPU_SET_SSREG(vcpu, CORE_MODE.CORE_MODE_reg, core_mode.CORE_MODE_reg); +} +static inline unsigned int +kvm_get_guest_vcpu_CORE_MODE_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SSREG(vcpu, CORE_MODE.CORE_MODE_reg); +} +static inline e2k_core_mode_t +kvm_get_guest_vcpu_CORE_MODE(struct kvm_vcpu *vcpu) +{ + e2k_core_mode_t core_mode; + + core_mode.CORE_MODE_reg = kvm_get_guest_vcpu_CORE_MODE_value(vcpu); + return core_mode; +} + +static inline void +kvm_set_guest_vcpu_PSR(struct kvm_vcpu *vcpu, e2k_psr_t psr) +{ + CPU_SET_SSREG(vcpu, E2K_PSR.PSR_reg, psr.PSR_reg); +} +static inline unsigned int +kvm_get_guest_vcpu_PSR_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SSREG(vcpu, E2K_PSR.PSR_reg); +} +static inline e2k_psr_t +kvm_get_guest_vcpu_PSR(struct kvm_vcpu *vcpu) +{ + e2k_psr_t psr; + + psr.PSR_reg = kvm_get_guest_vcpu_PSR_value(vcpu); + return psr; +} + +static inline void +kvm_set_guest_vcpu_UPSR(struct kvm_vcpu *vcpu, e2k_upsr_t upsr) +{ + CPU_SET_SSREG(vcpu, UPSR.UPSR_reg, upsr.UPSR_reg); +} +static inline unsigned int +kvm_get_guest_vcpu_UPSR_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SSREG(vcpu, UPSR.UPSR_reg); +} +static inline e2k_upsr_t +kvm_get_guest_vcpu_UPSR(struct kvm_vcpu *vcpu) +{ + e2k_upsr_t upsr; + + upsr.UPSR_reg = kvm_get_guest_vcpu_UPSR_value(vcpu); + return upsr; +} + +static inline void +kvm_set_guest_vcpu_under_upsr(struct kvm_vcpu *vcpu, bool under_upsr) +{ + VCPU_IRQS_UNDER_UPSR(vcpu) = under_upsr; +} +static inline bool +kvm_get_guest_vcpu_under_upsr(struct kvm_vcpu *vcpu) +{ + return VCPU_IRQS_UNDER_UPSR(vcpu); +} +static inline void +kvm_set_guest_vcpu_IDR(struct kvm_vcpu *vcpu, e2k_idr_t idr) +{ + CPU_SET_DSREG(vcpu, IDR.IDR_reg, idr.IDR_reg); +} + +static inline u64 +kvm_get_guest_vcpu_CTPR_value(struct kvm_vcpu *vcpu, int CTPR_no) +{ + switch (CTPR_no) { + case 1: return CPU_GET_DSREG(vcpu, CTPR1.CTPR_reg); + break; + case 2: return CPU_GET_DSREG(vcpu, CTPR2.CTPR_reg); + break; + case 3: return CPU_GET_DSREG(vcpu, CTPR3.CTPR_reg); + break; + default: + BUG_ON(true); + return -1UL; + } +} +static inline e2k_ctpr_t +kvm_get_guest_vcpu_CTPR(struct kvm_vcpu *vcpu, int CTPR_no) +{ + e2k_ctpr_t CTPR; + + CTPR.CTPR_reg = kvm_get_guest_vcpu_CTPR_value(vcpu, CTPR_no); + return CTPR; +} +static inline e2k_ctpr_t +kvm_get_guest_vcpu_CTPR1(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_CTPR(vcpu, 1); +} +static inline e2k_ctpr_t +kvm_get_guest_vcpu_CTPR2(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_CTPR(vcpu, 2); +} +static inline e2k_ctpr_t +kvm_get_guest_vcpu_CTPR3(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_CTPR(vcpu, 3); +} + +static inline void +kvm_set_guest_vcpu_CTPR(struct kvm_vcpu *vcpu, e2k_ctpr_t CTPR, int CTPR_no) +{ + switch (CTPR_no) { + case 1: + CPU_SET_DSREG(vcpu, CTPR1.CTPR_reg, CTPR.CTPR_reg); + break; + case 2: + CPU_SET_DSREG(vcpu, CTPR2.CTPR_reg, CTPR.CTPR_reg); + break; + case 3: + CPU_SET_DSREG(vcpu, CTPR3.CTPR_reg, CTPR.CTPR_reg); + break; + default: + BUG_ON(true); + } +} +static inline void +kvm_set_guest_vcpu_CTPR1(struct kvm_vcpu *vcpu, e2k_ctpr_t CTPR) +{ + kvm_set_guest_vcpu_CTPR(vcpu, CTPR, 1); +} +static inline void +kvm_set_guest_vcpu_CTPR2(struct kvm_vcpu *vcpu, e2k_ctpr_t CTPR) +{ + kvm_set_guest_vcpu_CTPR(vcpu, CTPR, 2); +} +static inline void +kvm_set_guest_vcpu_CTPR3(struct kvm_vcpu *vcpu, e2k_ctpr_t CTPR) +{ + kvm_set_guest_vcpu_CTPR(vcpu, CTPR, 3); +} + +static inline void +kvm_set_guest_vcpu_LSR(struct kvm_vcpu *vcpu, u64 lsr) +{ + CPU_SET_DSREG(vcpu, LSR.LSR_reg, lsr); +} +static inline u64 +kvm_get_guest_vcpu_LSR_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, LSR.LSR_reg); +} + +static inline void +kvm_set_guest_vcpu_ILCR(struct kvm_vcpu *vcpu, u64 ilcr) +{ + CPU_SET_DSREG(vcpu, ILCR.ILCR_reg, ilcr); +} +static inline u64 +kvm_get_guest_vcpu_ILCR_value(struct kvm_vcpu *vcpu) +{ + return CPU_GET_DSREG(vcpu, ILCR.ILCR_reg); +} + +static inline void +kvm_set_guest_vcpu_SBBP(struct kvm_vcpu *vcpu, int sbbp_no, u64 sbbp) +{ + CPU_SET_SBBP(vcpu, sbbp_no, sbbp); +} + +static inline void +kvm_copy_guest_vcpu_SBBP(struct kvm_vcpu *vcpu, u64 *sbbp) +{ + CPU_COPY_SBBP(vcpu, sbbp); +} + +static inline u64 +kvm_get_guest_vcpu_SBBP(struct kvm_vcpu *vcpu, int sbbp_no) +{ + u64 sbbp; + + BUG_ON(sbbp_no > SBBP_ENTRIES_NUM); + sbbp = CPU_GET_SBBP(vcpu, sbbp_no); + return sbbp; +} + +static inline void +kvm_set_guest_vcpu_TIR_lo(struct kvm_vcpu *vcpu, + int TIR_no, e2k_tir_lo_t TIR_lo) +{ + CPU_SET_TIR_lo(vcpu, TIR_no, TIR_lo.TIR_lo_reg); +} + +static inline void +kvm_set_guest_vcpu_TIR_hi(struct kvm_vcpu *vcpu, + int TIR_no, e2k_tir_hi_t TIR_hi) +{ + CPU_SET_TIR_hi(vcpu, TIR_no, TIR_hi.TIR_hi_reg); +} + +static inline void +kvm_set_guest_vcpu_TIRs_num(struct kvm_vcpu *vcpu, int TIRs_num) +{ + CPU_SET_SREG(vcpu, TIRs_num, TIRs_num); +} + +static inline void +kvm_reset_guest_vcpu_TIRs_num(struct kvm_vcpu *vcpu) +{ + kvm_set_guest_vcpu_TIRs_num(vcpu, -1); +} + +static inline int +kvm_get_guest_vcpu_TIRs_num(struct kvm_vcpu *vcpu) +{ + return CPU_GET_SREG(vcpu, TIRs_num); +} + +static inline e2k_tir_lo_t +kvm_get_guest_vcpu_TIR_lo(struct kvm_vcpu *vcpu, int TIR_no) +{ + e2k_tir_lo_t TIR_lo; + + BUG_ON(TIR_no > kvm_get_guest_vcpu_TIRs_num(vcpu)); + TIR_lo.TIR_lo_reg = CPU_GET_TIR_lo(vcpu, TIR_no); + return TIR_lo; +} + +static inline e2k_tir_hi_t +kvm_get_guest_vcpu_TIR_hi(struct kvm_vcpu *vcpu, int TIR_no) +{ + e2k_tir_hi_t TIR_hi; + + BUG_ON(TIR_no > kvm_get_guest_vcpu_TIRs_num(vcpu)); + TIR_hi.TIR_hi_reg = CPU_GET_TIR_hi(vcpu, TIR_no); + return TIR_hi; +} + +static inline bool kvm_check_is_guest_TIRs_empty(struct kvm_vcpu *vcpu) +{ + if (kvm_get_guest_vcpu_TIRs_num(vcpu) < 0) + return true; + /* TIRs have traps */ + return false; +} + +static inline unsigned long +kvm_update_guest_vcpu_TIR(struct kvm_vcpu *vcpu, + int TIR_no, e2k_tir_hi_t TIR_hi, e2k_tir_lo_t TIR_lo) +{ + e2k_tir_lo_t g_TIR_lo; + e2k_tir_hi_t g_TIR_hi; + unsigned long trap_mask; + int TIRs_num; + int tir; + + TIRs_num = kvm_get_guest_vcpu_TIRs_num(vcpu); + if (TIRs_num < TIR_no) { + for (tir = TIRs_num + 1; tir < TIR_no; tir++) { + g_TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(tir); + g_TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(tir); + kvm_set_guest_vcpu_TIR_lo(vcpu, tir, g_TIR_lo); + kvm_set_guest_vcpu_TIR_hi(vcpu, tir, g_TIR_hi); + } + g_TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(TIR_no); + g_TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(TIR_no); + } else { + g_TIR_hi = kvm_get_guest_vcpu_TIR_hi(vcpu, TIR_no); + g_TIR_lo = kvm_get_guest_vcpu_TIR_lo(vcpu, TIR_no); + BUG_ON(g_TIR_hi.TIR_hi_j != TIR_no); + if (TIR_lo.TIR_lo_ip == 0 && g_TIR_lo.TIR_lo_ip != 0) + /* some traps can be caused by kernel and have not */ + /* precision IP (for example hardware stack bounds) */ + TIR_lo.TIR_lo_ip = g_TIR_lo.TIR_lo_ip; + else if (TIR_lo.TIR_lo_ip != 0 && g_TIR_lo.TIR_lo_ip == 0) + /* new trap IP will be common for other traps */ + ; + else + BUG_ON(g_TIR_lo.TIR_lo_ip != TIR_lo.TIR_lo_ip); + } + g_TIR_hi.TIR_hi_reg |= TIR_hi.TIR_hi_reg; + g_TIR_lo.TIR_lo_reg |= TIR_lo.TIR_lo_reg; + kvm_set_guest_vcpu_TIR_hi(vcpu, TIR_no, g_TIR_hi); + kvm_set_guest_vcpu_TIR_lo(vcpu, TIR_no, g_TIR_lo); + trap_mask = TIR_hi.TIR_hi_exc; + trap_mask |= SET_AA_TIRS(0UL, GET_AA_TIRS(TIR_hi.TIR_hi_reg)); + if (TIR_no > TIRs_num) + kvm_set_guest_vcpu_TIRs_num(vcpu, TIR_no); + return trap_mask; +} + +static inline bool kvm_guest_vcpu_irqs_disabled(struct kvm_vcpu *vcpu, + unsigned long upsr_reg, unsigned long psr_reg) +{ + return psr_and_upsr_irqs_disabled_flags(psr_reg, upsr_reg); +} + +static inline bool kvm_get_guest_vcpu_sge(struct kvm_vcpu *vcpu) +{ + unsigned long psr_reg; + + psr_reg = kvm_get_guest_vcpu_PSR_value(vcpu); + return (psr_reg & PSR_SGE) != 0; +} + +/* VCPU AAU context model access */ + +static inline void +kvm_set_guest_vcpu_aasr_value(struct kvm_vcpu *vcpu, u32 reg_value) +{ + AAU_SET_SREG(vcpu, aasr.word, reg_value); +} +static inline void +kvm_set_guest_vcpu_aasr(struct kvm_vcpu *vcpu, e2k_aasr_t aasr) +{ + kvm_set_guest_vcpu_aasr_value(vcpu, AW(aasr)); +} + +static inline u32 +kvm_get_guest_vcpu_aasr_value(struct kvm_vcpu *vcpu) +{ + return AAU_GET_SREG(vcpu, aasr.word); +} +static inline e2k_aasr_t +kvm_get_guest_vcpu_aasr(struct kvm_vcpu *vcpu) +{ + e2k_aasr_t aasr; + + AW(aasr) = kvm_get_guest_vcpu_aasr_value(vcpu); + return aasr; +} + +static inline void +kvm_set_guest_vcpu_aafstr_value(struct kvm_vcpu *vcpu, u32 reg_value) +{ + AAU_SET_SREG(vcpu, aafstr, reg_value); +} + +static inline u32 +kvm_get_guest_vcpu_aafstr_value(struct kvm_vcpu *vcpu) +{ + return AAU_GET_SREG(vcpu, aafstr); +} + +static inline void +kvm_set_guest_vcpu_aaldm_value(struct kvm_vcpu *vcpu, u64 reg_value) +{ + AAU_SET_DREG(vcpu, aaldm.word, reg_value); +} +static inline void +kvm_set_guest_vcpu_aaldm(struct kvm_vcpu *vcpu, e2k_aaldm_t aaldm) +{ + kvm_set_guest_vcpu_aaldm_value(vcpu, AW(aaldm)); +} + +static inline u64 +kvm_get_guest_vcpu_aaldm_value(struct kvm_vcpu *vcpu) +{ + return AAU_GET_DREG(vcpu, aaldm.word); +} +static inline e2k_aaldm_t +kvm_get_guest_vcpu_aaldm(struct kvm_vcpu *vcpu) +{ + e2k_aaldm_t aaldm; + + AW(aaldm) = kvm_get_guest_vcpu_aaldm_value(vcpu); + return aaldm; +} +static inline void +kvm_set_guest_vcpu_aaldv_value(struct kvm_vcpu *vcpu, u64 reg_value) +{ + AAU_SET_DREG(vcpu, aaldv.word, reg_value); +} +static inline void +kvm_set_guest_vcpu_aaldv(struct kvm_vcpu *vcpu, e2k_aaldv_t aaldv) +{ + kvm_set_guest_vcpu_aaldv_value(vcpu, AW(aaldv)); +} + +static inline u64 +kvm_get_guest_vcpu_aaldv_value(struct kvm_vcpu *vcpu) +{ + return AAU_GET_DREG(vcpu, aaldv.word); +} +static inline e2k_aaldv_t +kvm_get_guest_vcpu_aaldv(struct kvm_vcpu *vcpu) +{ + e2k_aaldv_t aaldv; + + AW(aaldv) = kvm_get_guest_vcpu_aaldv_value(vcpu); + return aaldv; +} + +static inline void +kvm_set_guest_vcpu_aasti_value(struct kvm_vcpu *vcpu, int AASTI_no, u64 value) +{ + AAU_SET_DREGS_ITEM(vcpu, aastis, AASTI_no, value); +} +static inline u64 +kvm_get_guest_vcpu_aasti_value(struct kvm_vcpu *vcpu, int AASTI_no) +{ + return AAU_GET_DREGS_ITEM(vcpu, aastis, AASTI_no); +} +static inline void +kvm_set_guest_vcpu_aasti_tags_value(struct kvm_vcpu *vcpu, u32 reg_value) +{ + AAU_SET_SREG(vcpu, aasti_tags, reg_value); +} +static inline u32 +kvm_get_guest_vcpu_aasti_tags_value(struct kvm_vcpu *vcpu) +{ + return AAU_GET_SREG(vcpu, aasti_tags); +} +static inline void +kvm_copy_to_guest_vcpu_aastis(struct kvm_vcpu *vcpu, u64 *aastis_from) +{ + AAU_COPY_TO_REGS(vcpu, aastis, aastis_from); +} +static inline void +kvm_copy_from_guest_vcpu_aastis(struct kvm_vcpu *vcpu, u64 *aastis_to) +{ + AAU_COPY_FROM_REGS(vcpu, aastis, aastis_to); +} + +static inline void +kvm_set_guest_vcpu_aaind_value(struct kvm_vcpu *vcpu, int AAIND_no, u64 value) +{ + AAU_SET_DREGS_ITEM(vcpu, aainds, AAIND_no, value); +} +static inline u64 +kvm_get_guest_vcpu_aaind_value(struct kvm_vcpu *vcpu, int AAIND_no) +{ + return AAU_GET_DREGS_ITEM(vcpu, aainds, AAIND_no); +} +static inline void +kvm_set_guest_vcpu_aaind_tags_value(struct kvm_vcpu *vcpu, u32 reg_value) +{ + AAU_SET_SREG(vcpu, aaind_tags, reg_value); +} +static inline u32 +kvm_get_guest_vcpu_aaind_tags_value(struct kvm_vcpu *vcpu) +{ + return AAU_GET_SREG(vcpu, aaind_tags); +} +static inline void +kvm_copy_to_guest_vcpu_aainds(struct kvm_vcpu *vcpu, u64 *aainds_from) +{ + AAU_COPY_TO_REGS(vcpu, aainds, aainds_from); +} +static inline void +kvm_copy_from_guest_vcpu_aainds(struct kvm_vcpu *vcpu, u64 *aainds_to) +{ + AAU_COPY_FROM_REGS(vcpu, aainds, aainds_to); +} + +static inline void +kvm_set_guest_vcpu_aaincr_value(struct kvm_vcpu *vcpu, int AAINCR_no, u64 value) +{ + AAU_SET_DREGS_ITEM(vcpu, aaincrs, AAINCR_no, value); +} +static inline u64 +kvm_get_guest_vcpu_aaincr_value(struct kvm_vcpu *vcpu, int AAINCR_no) +{ + return AAU_GET_DREGS_ITEM(vcpu, aaincrs, AAINCR_no); +} +static inline void +kvm_set_guest_vcpu_aaincr_tags_value(struct kvm_vcpu *vcpu, u32 reg_value) +{ + AAU_SET_SREG(vcpu, aaincr_tags, reg_value); +} +static inline u32 +kvm_get_guest_vcpu_aaincr_tags_value(struct kvm_vcpu *vcpu) +{ + return AAU_GET_SREG(vcpu, aaincr_tags); +} +static inline void +kvm_copy_to_guest_vcpu_aaincrs(struct kvm_vcpu *vcpu, u64 *aaincrs_from) +{ + AAU_COPY_TO_REGS(vcpu, aaincrs, aaincrs_from); +} +static inline void +kvm_copy_from_guest_vcpu_aaincrs(struct kvm_vcpu *vcpu, u64 *aaincrs_to) +{ + AAU_COPY_FROM_REGS(vcpu, aaincrs, aaincrs_to); +} + +static inline void +kvm_copy_to_guest_vcpu_aaldis(struct kvm_vcpu *vcpu, u64 *aaldis_from) +{ + u64 *aaldi = get_vcpu_aaldi_context(vcpu); + memcpy(aaldi, aaldis_from, AALDIS_REGS_NUM * sizeof(aaldi[0])); +} + +static inline void +kvm_copy_from_guest_vcpu_aaldis(struct kvm_vcpu *vcpu, u64 *aaldis_to) +{ + u64 *aaldi = get_vcpu_aaldi_context(vcpu); + memcpy(aaldis_to, aaldi, AALDIS_REGS_NUM * sizeof(aaldi[0])); +} + +static inline void +kvm_copy_to_guest_vcpu_aaldas(struct kvm_vcpu *vcpu, e2k_aalda_t *aaldas_from) +{ + e2k_aalda_t *aalda = get_vcpu_aalda_context(vcpu); + memcpy(aalda, aaldas_from, AALDAS_REGS_NUM * sizeof(aalda[0])); +} +static inline void +kvm_copy_from_guest_vcpu_aaldas(struct kvm_vcpu *vcpu, e2k_aalda_t *aaldas_to) +{ + e2k_aalda_t *aalda = get_vcpu_aalda_context(vcpu); + memcpy(aaldas_to, aalda, AALDAS_REGS_NUM * sizeof(aalda[0])); +} + +static inline void +kvm_set_guest_vcpu_aad(struct kvm_vcpu *vcpu, int AAD_no, e2k_aadj_t *aad) +{ + AAU_SET_STRUCT_REGS_ITEM(vcpu, aads, AAD_no, aad); +} +static inline void +kvm_get_guest_vcpu_aad(struct kvm_vcpu *vcpu, int AAD_no, e2k_aadj_t *aad) +{ + AAU_GET_STRUCT_REGS_ITEM(vcpu, aads, AAD_no, aad); +} +static inline void +kvm_copy_to_guest_vcpu_aads(struct kvm_vcpu *vcpu, e2k_aadj_t *aads_from) +{ + AAU_COPY_TO_REGS(vcpu, aads, aads_from); +} +static inline void +kvm_copy_from_guest_vcpu_aads(struct kvm_vcpu *vcpu, e2k_aadj_t *aads_to) +{ + AAU_COPY_FROM_REGS(vcpu, aads, aads_to); +} + +#endif /* __KVM_E2K_CPU_DEFS_H */ diff --git a/arch/e2k/kvm/csd_lock.c b/arch/e2k/kvm/csd_lock.c new file mode 100644 index 000000000000..db6937bd9013 --- /dev/null +++ b/arch/e2k/kvm/csd_lock.c @@ -0,0 +1,436 @@ +/* + * This file implements on host the arch-dependent parts of kvm guest + * csd_lock/csd_unlock functions to serialize access to per-cpu csd resources + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "process.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_DEADLOCK_MODE +#undef DebugKVMDL +#define DEBUG_KVM_DEADLOCK_MODE 0 /* spinlock deadlock debugging */ +#define DebugKVMDL(fmt, args...) \ +({ \ + if (DEBUG_KVM_DEADLOCK_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_UNLOCK_MODE +#undef DebugKVMUL +#define DEBUG_KVM_UNLOCK_MODE 0 /* spinlock unlock debugging */ +#define DebugKVMUL(fmt, args...) \ +({ \ + if (DEBUG_KVM_UNLOCK_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#ifdef CONFIG_SMP + +static inline csd_lock_waiter_t * +find_lock_in_csd_list(struct kvm *kvm, void *lock) +{ + csd_lock_waiter_t *w; + + list_for_each_entry(w, &kvm->arch.csd_lock_wait_head, wait_list) { + DebugKVM("next csd lock waiter list entry lock %px\n", + w->lock); + if (w->lock == lock) { + if (w->task) { + DebugKVM("csd lock %px found, task %s (%d) " + "VCPU #%d\n", + lock, w->task->comm, w->task->pid, + w->vcpu->vcpu_id); + } else { + DebugKVM("csd lock %px found, unlocked by " + "VCPU #%d\n", + lock, w->vcpu->vcpu_id); + } + return w; + } + } + return NULL; +} + +static void dump_waiter_list(struct kvm *kvm) +{ + csd_lock_waiter_t *w; + + list_for_each_entry(w, &kvm->arch.csd_lock_wait_head, wait_list) { + pr_alert("next csd lock waiter list entry %px: lock %px", + w, w->lock); + if (w->task) { + pr_cont("is waiting by task %s (%d) VCPU #%d\n", + w->task->comm, w->task->pid, + w->vcpu->vcpu_id); + } else { + pr_cont("is already unlocked by by VCPU #%d\n", + w->vcpu->vcpu_id); + } + pr_cont("\n"); + } +} + +/* Insert lock to waiting list as waiter entry */ +/* spinlock should be taken */ +static inline csd_lock_waiter_t * +queue_lock_to_waiter_list(struct kvm_vcpu *vcpu, void *lock) +{ + struct kvm *kvm = vcpu->kvm; + csd_lock_waiter_t *w; + + if (likely(!list_empty(&kvm->arch.csd_lock_free_head))) { + w = list_first_entry(&kvm->arch.csd_lock_free_head, + csd_lock_waiter_t, wait_list); + list_move_tail(&w->wait_list, + &kvm->arch.csd_lock_wait_head); + KVM_BUG_ON(vcpu->arch.host_task != current); + w->task = current; + w->lock = lock; + w->vcpu = vcpu; + } else { + pr_err("%s(): empty list of free csd lock waiter " + "structures\n", __func__); + dump_waiter_list(kvm); + BUG_ON(true); + return NULL; + } + DebugKVM("add csd lock %px to waiter list %px as waiter entry " + "on VCPU #%d\n", + w->lock, w, vcpu->vcpu_id); + return w; +} + +/* + * Register csd lock waiter structure. + * Now this function (and CSD ctl) is used to check if the lock already + * was queued as waiter for unlocking and only to debugging purposes. + * Should be deleted after debug completion. + */ +static inline int +guest_csd_lock(struct kvm_vcpu *vcpu, void *lock) +{ + struct kvm *kvm = vcpu->kvm; + csd_lock_waiter_t *w; + unsigned long flags; + + DebugKVM("%s (%d) started for guest csd lock %px on VCPU #%d\n", + current->comm, current->pid, lock, vcpu->vcpu_id); + + raw_spin_lock_irqsave(&kvm->arch.csd_spinlock, flags); + w = find_lock_in_csd_list(kvm, lock); + if (likely(w == NULL)) { + DebugKVM("csd lock %px on VCPU #%d is not queued to waiter " + "list\n", + lock, vcpu->vcpu_id); + /* Insert lock to waiting list as waiter entry */ + w = queue_lock_to_waiter_list(vcpu, lock); + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); + if (w == NULL) + goto failed; + return 0; + } + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); + if (likely(w->task)) { + pr_err("%s(): lock %px on VCPU #%d is now queued to waiter " + "list by task %s (%d) VCPU #%d\n", + __func__, lock, vcpu->vcpu_id, w->task->comm, + w->task->pid, w->vcpu->vcpu_id); + } else { + pr_err("%s(): lock %px on VCPU #%d is now queued to waiter " + "list as unlocked by VCPU #%d\n", + __func__, lock, vcpu->vcpu_id, w->vcpu->vcpu_id); + } + BUG_ON(true); +failed: + return -EBUSY; +} + +/* + * Unlock csd lock and wake up VCPU task waiting for unlocking + * Unlocking can outrun waiting VCPU, so if unlocking is first then queue + * new waiter structure as unlocked. + * If some VCPU thread already is queued as waiter for csd lock unlocking, + * then wake up waiting VCPU thread, dequeue and queue to free list + * the csd lock waiter structure. + */ +static inline int +guest_csd_unlock(struct kvm_vcpu *vcpu, void *lock) +{ + struct kvm *kvm = vcpu->kvm; + csd_lock_waiter_t *w; + unsigned long flags; + + DebugKVM("%s (%d) started for guest csd lock %px on VCPU #%d\n", + current->comm, current->pid, lock, vcpu->vcpu_id); + + raw_spin_lock_irqsave(&kvm->arch.csd_spinlock, flags); + w = find_lock_in_csd_list(vcpu->kvm, lock); + if (likely(w != NULL)) { + /* there is waiter for lock unlocking */ + if (unlikely(w->task)) { + DebugKVM("guest csd lock %px on VCPU #%d is queued " + "as waiter task %s (%d) on VCPU #%d\n", + lock, vcpu->vcpu_id, + w->task->comm, w->task->pid, w->vcpu->vcpu_id); + wake_up_process(w->task); + } else { + pr_err("%s(): guest csd lock %px on VCPU #%d is queued " + "as unlocked by VCPU #%d\n", + __func__, lock, vcpu->vcpu_id, + w->vcpu->vcpu_id); + BUG_ON(true); + } + w->task = NULL; + w->vcpu = NULL; + w->lock = NULL; + list_move_tail(&w->wait_list, &kvm->arch.csd_lock_free_head); + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); + return 0; + } + /* csd lock is not found at waiters list, so unlock is comming */ + /* earlier then lock waiting. Insert lock to waiting list as */ + /* unlocked entry */ + if (likely(!list_empty(&kvm->arch.csd_lock_free_head))) { + w = list_first_entry(&kvm->arch.csd_lock_free_head, + csd_lock_waiter_t, wait_list); + list_move_tail(&w->wait_list, &kvm->arch.csd_lock_wait_head); + w->task = NULL; + w->lock = lock; + w->vcpu = vcpu; + DebugKVMUL("guest csd lock %px on VCPU #%d could not find " + "at waiters for unlocking list, queue %px as unlocked\n", + lock, vcpu->vcpu_id, w); + } else { + pr_err("%s(): empty list of free csd lock waiter structures\n", + __func__); + dump_waiter_list(kvm); + BUG_ON(true); + } + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); + return 0; +} + +/* + * Wait for csd lock unlocking + * Unlocking can outrun waiting VCPU, so if unlocking is first then queue + * new waiter structure as unlocked. + * If some VCPU thread already is queued as waiter for csd lock unlocking, + * then wake up waiting VCPU thread, dequeue and queue to free list + * the csd lock waiter structure. + */ +static inline int +guest_csd_lock_wait(struct kvm_vcpu *vcpu, void *lock, bool try) +{ + struct kvm *kvm = vcpu->kvm; + csd_lock_waiter_t *w; + struct task_struct *guest_task; + unsigned long flags; + bool do_wait = false; + + DebugKVM("%s (%d) started for guest csd lock %px on VCPU #%d\n", + current->comm, current->pid, lock, vcpu->vcpu_id); + + KVM_BUG_ON(vcpu->arch.host_task != current); + guest_task = current; + GTI_BUG_ON(guest_task == NULL); + raw_spin_lock_irqsave(&kvm->arch.csd_spinlock, flags); + w = find_lock_in_csd_list(vcpu->kvm, lock); + if (likely(w == NULL)) { + int r; + struct kvm_vcpu *other_vcpu; + + /* csd lock is not found at waiters list as already */ + /* unlocked. */ + if (try) { + /* waiting does not need, nothing to do */ + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, + flags); + DebugKVM("none waiters and it is well case\n"); + return 0; + } + vcpu->arch.on_csd_lock = true; + wmb(); /* flag should be seen before read 'on_spinlock' or */ + /* other VCPU waiting state flags */ + if (kvm_test_pending_virqs(vcpu)) { + /* there are VIRQs to handle, goto to try handle */ + vcpu->arch.on_csd_lock = false; + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, + flags); + DebugKVM("there are pending VIRQs, try handle\n"); + return 0; + } + + kvm_for_each_vcpu(r, other_vcpu, kvm) { + if (other_vcpu == vcpu) + continue; + } + + /* Insert lock to waiting list as waiter entry */ + w = queue_lock_to_waiter_list(vcpu, lock); + if (w == NULL) + goto failed; + do_wait = true; + } else if (likely(w->task == NULL)) { + /* there is csd lock already unlocked entry */ + DebugKVM("guest csd lock %px on VCPU #%d is queued " + "as unlocked by VCPU #%d\n", + lock, vcpu->vcpu_id, w->vcpu->vcpu_id); + BUG_ON(!try && vcpu == w->vcpu); + do_wait = false; + goto unlocked; + } else if (w->task == guest_task) { + /* there is csd lock already waiter entry */ + DebugKVM("guest csd lock %px on VCPU #%d is queued " + "as waiter by VCPU #%d\n", + lock, vcpu->vcpu_id, w->vcpu->vcpu_id); + BUG_ON(!try && vcpu != w->vcpu); + vcpu->arch.on_csd_lock = true; + do_wait = true; + } else { + pr_err("%s(): guest csd lock %px on VCPU #%d is queued " + "as waiter task %s (%d) by other VCPU #%d\n", + __func__, lock, vcpu->vcpu_id, + w->task->comm, w->task->pid, w->vcpu->vcpu_id); + BUG_ON(true); + } + if (do_wait) { + set_current_state(TASK_INTERRUPTIBLE); + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); + DebugKVM("go to schedule and wait for waking up\n"); + schedule(); + __set_current_state(TASK_RUNNING); + DebugKVM("guest csd lock %px on VCPU #%d is waked up\n", + lock, vcpu->vcpu_id); + if (fatal_signal_pending(current)) { + vcpu->arch.on_csd_lock = false; + DebugKVMSH("%s (%d) fatal signal received: spare " + "VCPU thread\n", + current->comm, current->pid); + kvm_spare_host_vcpu_release(vcpu); + return -ERESTARTSYS; + } + raw_spin_lock_irqsave(&kvm->arch.csd_spinlock, flags); + vcpu->arch.on_csd_lock = false; + goto unlocked; + } +unlocked: + /* lock already unlocked, dequeue and free lock structure */ + /* and return to guest */ + w->task = NULL; + w->vcpu = NULL; + w->lock = NULL; + list_move_tail(&w->wait_list, &kvm->arch.csd_lock_free_head); + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); + return 0; + +failed: + /* other VCPU on spinlock waiting and */ + /* now cannot handle IPI or VCPU should */ + /* do dumping of guest state */ + vcpu->arch.on_csd_lock = false; + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); + return -EBUSY; +} + +int kvm_guest_csd_lock_ctl(struct kvm_vcpu *vcpu, + csd_ctl_t csd_ctl_no, void *lock) +{ + switch (csd_ctl_no) { + case CSD_LOCK_CTL: + return guest_csd_lock(vcpu, lock); + case CSD_UNLOCK_CTL: + return guest_csd_unlock(vcpu, lock); + case CSD_LOCK_WAIT_CTL: + return guest_csd_lock_wait(vcpu, lock, false); + case CSD_LOCK_TRY_WAIT_CTL: + return guest_csd_lock_wait(vcpu, lock, true); + default: + pr_err("%s(): invalid CSD ctl number %d\n", + __func__, csd_ctl_no); + return -ENOSYS; + } +} + +int kvm_guest_csd_lock_init(struct kvm *kvm) +{ + csd_lock_waiter_t *w; + int i; + + kvm->arch.csd_spinlock = + __RAW_SPIN_LOCK_UNLOCKED(kvm->arch.csd_spinlock); + INIT_LIST_HEAD(&kvm->arch.csd_lock_wait_head); + INIT_LIST_HEAD(&kvm->arch.csd_lock_free_head); + for (i = 0; i < KVM_MAX_CSD_LOCK_FREE_NUM; i++) { + w = &kvm->arch.csd_lock_free_list[i]; + INIT_LIST_HEAD(&w->wait_list); + w->task = NULL; + w->vcpu = NULL; + w->lock = NULL; + list_add_tail(&w->wait_list, &kvm->arch.csd_lock_free_head); + } + return 0; +} + +static inline void destroy_csd_lock_waiter(csd_lock_waiter_t *w) +{ + DebugKVM("current csd lock waiter list entry %px\n", w); + if (likely(w->task != NULL)) { + DebugKVM("current csd lock waiter list entry VCPU #%d " + "task %s (%d) lock %px\n", + w->vcpu->vcpu_id, w->task->comm, w->task->pid, w->lock); + wake_up_process(w->task); + w->task = NULL; + w->lock = NULL; + w->vcpu = NULL; + } else { + DebugKVM("current csd lock waiter list entry unlocked " + "by VCPU #%d lock %px\n", + w->vcpu->vcpu_id, w->lock); + } +} +void kvm_guest_csd_lock_destroy(struct kvm *kvm) +{ + csd_lock_waiter_t *w; + csd_lock_waiter_t *tmp; + unsigned long flags; + + DebugKVM("started\n"); + + raw_spin_lock_irqsave(&kvm->arch.csd_spinlock, flags); + list_for_each_entry_safe(w, tmp, + &kvm->arch.csd_lock_wait_head, wait_list) { + destroy_csd_lock_waiter(w); + list_move_tail(&w->wait_list, &kvm->arch.csd_lock_free_head); + } + raw_spin_unlock_irqrestore(&kvm->arch.csd_spinlock, flags); +} + +#endif /* CONFIG_SMP */ diff --git a/arch/e2k/kvm/debug.c b/arch/e2k/kvm/debug.c new file mode 100644 index 000000000000..84295f0171de --- /dev/null +++ b/arch/e2k/kvm/debug.c @@ -0,0 +1,192 @@ + +/* + * CPU virtualization + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "cpu.h" +#include "gregs.h" +#include "process.h" +#include "gaccess.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static void +copy_vcpu_stack_regs(struct kvm_vcpu *vcpu, stack_regs_t *const regs, + struct task_struct *task) +{ + guest_hw_stack_t *guest_stacks; + u64 cr_ind; + int i; + u64 psp_ind; + u64 sz; + void *dst; + void *src; + int ret; + + regs->valid = 0; + + if (vcpu == NULL) + return; + + guest_stacks = &vcpu->arch.guest_stacks; + if (!guest_stacks->valid) + /* nothing active guest process stacks */ + return; + + regs->crs = guest_stacks->crs; + regs->pcsp_hi = guest_stacks->stacks.pcsp_hi; + regs->pcsp_lo = guest_stacks->stacks.pcsp_lo; + regs->psp_hi = guest_stacks->stacks.psp_hi; + regs->psp_lo = guest_stacks->stacks.psp_lo; + regs->base_psp_stack = (void *)regs->psp_lo.PSP_lo_base; + regs->orig_base_psp_stack_u = (u64)regs->base_psp_stack; + regs->orig_base_psp_stack_k = (u64)regs->base_psp_stack; + regs->size_psp_stack = regs->psp_hi.PSP_hi_ind; + + regs->show_trap_regs = 0; + for (i = 0; i < MAX_USER_TRAPS; i++) { + regs->trap[i].valid = 0; + } + +#ifdef CONFIG_DATA_STACK_WINDOW + regs->base_k_data_stack = NULL; + for (i = 0; i < MAX_PT_REGS_SHOWN; i++) { + regs->pt_regs[i].valid = 0; + } +#endif + +#ifdef CONFIG_GREGS_CONTEXT + get_all_user_glob_regs(®s->gregs); + regs->gregs_valid = 1; +#endif + + /* + * Copy a part (or all) of the chain stack. + * If it fails then leave regs->valid set to 0. + */ + regs->base_chain_stack = (u64 *)regs->base_chain_stack; + if (!regs->base_chain_stack) + goto out; + + cr_ind = regs->pcsp_hi.PCSP_hi_ind; + regs->size_chain_stack = min_t(u64, cr_ind, SIZE_CHAIN_STACK); + sz = regs->size_chain_stack; + + dst = regs->base_chain_stack; + src = (void *)regs->pcsp_lo.PCSP_lo_base + cr_ind - sz; + + /* Remember original stack address. */ + regs->orig_base_chain_stack_u = (u64)src; + regs->orig_base_chain_stack_k = (u64)src; + + /* FIXME: only guest system stacks can be correctly copied, */ + /* it need implement guest user stacks case */ + ret = kvm_vcpu_read_guest_system(vcpu, (gva_t)src, dst, sz); + if (ret != 0) { + pr_err("%s(): could not copy guest chain stacks from guest " + "virt address %px, size 0x%llx\n", + __func__, src, sz); + goto out; + } + + /* + * Copy a part (or all) of the procedure stack. + * Do _not_ set regs->valid to 0 if it fails + * (we can still print stack albeit without register windows) + */ + psp_ind = regs->psp_hi.PSP_hi_ind; + regs->base_psp_stack = (u64 *) regs->psp_stack_cache; + if (!regs->base_psp_stack) + goto finish_copying_psp_stack; + + regs->size_psp_stack = min_t(u64, psp_ind, SIZE_PSP_STACK); + + sz = regs->size_psp_stack; + + dst = regs->base_psp_stack; + + src = (void *)regs->psp_lo.PSP_lo_base + psp_ind - sz; + + /* FIXME: only guest system stacks can be correctly copied, */ + /* it need implement guest user stacks case */ + ret = kvm_vcpu_read_guest_system(vcpu, (gva_t)src, dst, sz); + if (ret != 0) { + pr_err("%s(): could not copy guest procedure stacks from guest " + "virt address %px, size 0x%llx\n", + __func__, src, sz); + regs->base_psp_stack = NULL; + goto finish_copying_psp_stack; + } + +finish_copying_psp_stack: + + regs->task = task; + regs->ignore_banner = true; + regs->valid = 1; + return; +out: + regs->valid = 0; + return; +} + +static void vcpu_stack_banner(struct kvm_vcpu *vcpu, gthread_info_t *gti) +{ + gmm_struct_t *gmm = NULL; + + if (gti != NULL) + gmm = gti->gmm; + + pr_info("VCPU #%d GPID %d guest %s Thread\n", + vcpu->vcpu_id, vcpu->kvm->arch.vmid.nr, + (gmm == NULL) ? "Kernel" : "User"); + + if (gti != NULL) { + pr_alert("GUEST PROCESS: PID on host: %d , flags: 0x%lx\n", + gti->gpid->nid.nr, gti->flags); + } +} + +void kvm_dump_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window) +{ + thread_info_t *ti = task_thread_info(task); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + /* guest is not running by this process */ + return; + vcpu = ti->vcpu; + if (vcpu == NULL) + /* guest process already completed */ + return; + copy_vcpu_stack_regs(vcpu, regs, task); + if (!regs->valid) { + if (vcpu->arch.guest_stacks.valid) { + pr_err("%s(): could not get VCPU stacks, so cannot " + "dump guest stacks\n", + __func__); + } + return; + } + + if (regs->ignore_banner) + vcpu_stack_banner(vcpu, ti->gthread_info); + print_chain_stack(regs, show_reg_window); +} diff --git a/arch/e2k/kvm/gaccess.c b/arch/e2k/kvm/gaccess.c new file mode 100644 index 000000000000..68d143a3f623 --- /dev/null +++ b/arch/e2k/kvm/gaccess.c @@ -0,0 +1,995 @@ + +/* + * Guest virtual and physical memory access to read from/write to + * Based on arch/x86/kvm/x86.c code + * + * Copyright (C) 2018, MCST. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include + +#include "gaccess.h" +#include "cpu.h" +#include "mmu.h" +#include "intercepts.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_COPY_MODE +#undef DebugCOPY +#define DEBUG_KVM_COPY_MODE 0 /* copy guest memory debugging */ +#define DebugCOPY(fmt, args...) \ +({ \ + if (DEBUG_KVM_COPY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_HOST_USER_COPY_MODE +#undef DebugHUCOPY +#define DEBUG_KVM_HOST_USER_COPY_MODE 0 /* copy host to/from user */ + /* memory debugging */ +#define DebugHUCOPY(fmt, args...) \ +({ \ + if (DEBUG_KVM_HOST_USER_COPY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_HOST_GUEST_COPY_MODE +#undef DebugHGCOPY +#define DEBUG_KVM_HOST_GUEST_COPY_MODE 0 /* copy host to/from guest */ + /* memory debugging */ +#define DebugHGCOPY(fmt, args...) \ +({ \ + if (DEBUG_KVM_HOST_GUEST_COPY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static int kvm_vcpu_read_guest_virt_helper(gva_t addr, void *val, + unsigned int bytes, struct kvm_vcpu *vcpu, + u32 access, kvm_arch_exception_t *exception) +{ + void *data = val; + + while (bytes) { + gpa_t gpa = kvm_mmu_gva_to_gpa(vcpu, addr, access, exception); + unsigned offset = addr & ~PAGE_MASK; + unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); + int ret; + + if (gpa == UNMAPPED_GVA) + return -EFAULT; + ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(gpa), data, + offset, toread); + if (ret < 0) { + pr_err("%s(): could not read data from guest virt " + "addr 0x%lx (phys 0x%llx), size 0x%x\n", + __func__, addr, gpa, bytes); + return ret; + } + bytes -= toread; + data += toread; + addr += toread; + } + return 0; +} + +static int kvm_vcpu_read_guest_phys_helper(struct kvm_vcpu *vcpu, + gpa_t gpa, void *val, unsigned int bytes) +{ + void *data = val; + unsigned offset = gpa & ~PAGE_MASK; + int ret; + + ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(gpa), data, + offset, bytes); + if (ret < 0) { + pr_err("%s(): could not read data from guest phys addr 0x%llx, " + "size 0x%x\n", + __func__, gpa, bytes); + } + return ret; +} + +void kvm_vcpu_inject_page_fault(struct kvm_vcpu *vcpu, void *addr, + kvm_arch_exception_t *exception) +{ + trap_cellar_t tcellar; + tc_cond_t cond; + tc_fault_type_t ftype; + u32 error_code; + + AW(cond) = 0; + AS(cond).fmt = LDST_BYTE_FMT; + AW(ftype) = 0; + + KVM_BUG_ON(!exception->error_code_valid); + + error_code = exception->error_code; + if (error_code & PFERR_ONLY_VALID_MASK) { + AS(ftype).page_miss = 1; + } else if (error_code & PFERR_WRITE_MASK) { + AS(cond).store = 1; + AS(ftype).nwrite_page = 1; + } else if (exception->error_code & PFERR_IS_UNMAPPED_MASK) { + AS(ftype).illegal_page = 1; + } + AS(cond).fault_type = AW(ftype); + AS(cond).chan = 1; + + tcellar.address = (e2k_addr_t)addr; + tcellar.condition = cond; + tcellar.data = 0; + + kvm_inject_pv_vcpu_tc_entry(vcpu, &tcellar); + kvm_inject_data_page_exc_on_IP(vcpu, exception->ip); + kvm_inject_guest_traps_wish(vcpu, exc_data_page_num); +} + +/* can be used for instruction fetching */ +int kvm_vcpu_fetch_guest_virt(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes) +{ + kvm_arch_exception_t exception = { + error_code_valid : 0, + }; + unsigned offset; + int ret; + + /* Inline kvm_vcpu_read_guest_virt_helper for speed. */ + gpa_t gpa = kvm_mmu_gva_to_gpa_fetch(vcpu, addr, &exception); + if (unlikely(gpa == UNMAPPED_GVA)) + return -EFAULT; + + offset = addr & ~PAGE_MASK; + if (WARN_ON(offset + bytes > PAGE_SIZE)) + bytes = (unsigned)PAGE_SIZE - offset; + ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(gpa), val, + offset, bytes); + if (unlikely(ret < 0)) { + pr_err("%s(): could not read data from guest virt addr 0x%lx " + "(phys 0x%llx), size 0x%x\n", + __func__, addr, gpa, bytes); + return ret; + } + if (unlikely(exception.error_code_valid)) { + pr_err("%s(): exception on read data from guest virt " + "addr 0x%lx (phys 0x%llx), size 0x%x\n", + __func__, addr, gpa, bytes); + return -EFAULT; + } + return 0; +} + +int kvm_vcpu_read_guest_virt_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes) +{ + kvm_arch_exception_t exception = { + error_code_valid : 0, + }; + int ret; + + ret = kvm_vcpu_read_guest_virt_helper(addr, val, bytes, vcpu, 0, + &exception); + if (ret < 0) + return ret; + + if (unlikely(exception.error_code_valid)) { + pr_err("%s(): exception on read data from guest virt " + "addr 0x%lx, size 0x%x\n", + __func__, addr, bytes); + return -EFAULT; + } + return 0; +} + +int kvm_vcpu_read_guest_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes) +{ + int ret; + + if (kvm_mmu_gva_is_gpa_range(vcpu, addr, bytes)) { + ret = kvm_vcpu_read_guest_phys_helper( + vcpu, (gpa_t)addr, val, bytes); + } else if (kvm_mmu_gva_is_gvpa_range(vcpu, addr, bytes)) { + gpa_t gpa; + + gpa = kvm_mmu_gvpa_to_gpa(addr); + ret = kvm_vcpu_read_guest_phys_helper(vcpu, gpa, val, bytes); + } else { + ret = kvm_vcpu_read_guest_virt_system(vcpu, addr, val, bytes); + } + + return ret; +} + +int kvm_vcpu_write_guest_virt_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes) +{ + void *data = val; + kvm_arch_exception_t exception = { + error_code_valid : 0, + }; + + while (bytes) { + gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &exception); + if (arch_is_error_gpa(gpa)) { + DebugKVM("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", addr); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr, + &exception); + return -EAGAIN; + } + unsigned offset = addr & ~PAGE_MASK; + unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); + int ret; + + ret = kvm_vcpu_write_guest_page(vcpu, gpa_to_gfn(gpa), data, + offset, towrite); + if (ret < 0) { + pr_err("%s(): could not write data to guest virt " + "addr 0x%lx (phys 0x%llx), size 0x%x\n", + __func__, addr, gpa, towrite); + return ret; + } + + bytes -= towrite; + data += towrite; + addr += towrite; + } + return 0; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_virt_system); + +static int kvm_vcpu_write_guest_phys_system(struct kvm_vcpu *vcpu, + gpa_t gpa, void *val, unsigned int bytes) +{ + void *data = val; + unsigned offset = gpa & ~PAGE_MASK; + int ret; + + ret = kvm_vcpu_write_guest_page(vcpu, gpa_to_gfn(gpa), data, + offset, bytes); + if (ret < 0) { + pr_err("%s(): could not write data to guest phys addr 0x%llx, " + "size 0x%x\n", + __func__, gpa, bytes); + } + return ret; +} + +int kvm_vcpu_write_guest_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes) +{ + long ret; + + if (kvm_mmu_gva_is_gpa_range(vcpu, addr, bytes)) { + ret = kvm_vcpu_write_guest_phys_system(vcpu, (gpa_t)addr, val, + bytes); + } else if (kvm_mmu_gva_is_gvpa_range(vcpu, addr, bytes)) { + gpa_t gpa; + + gpa = kvm_mmu_gvpa_to_gpa(addr); + ret = kvm_vcpu_write_guest_phys_system(vcpu, gpa, val, bytes); + } else { + ret = kvm_vcpu_write_guest_virt_system(vcpu, addr, val, bytes); + } + + return ret; +} + +long kvm_vcpu_set_guest_virt_system(struct kvm_vcpu *vcpu, + void *addr, u64 val, u64 tag, size_t size, u64 strd_opcode) +{ + size_t len = size; + long set = 0; + unsigned long memset_ret; + kvm_arch_exception_t exception; + + while (len) { + void *haddr; + long offset; + long towrite; + hva_t hva; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)addr, true, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", addr); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr, + &exception); + return -EAGAIN; + } + + haddr = (void *)hva; + offset = hva & ~PAGE_MASK; + towrite = min(len, (unsigned)PAGE_SIZE - offset); + + if (!access_ok(haddr, towrite)) + return -EFAULT; + SET_USR_PFAULT("$.recovery_memset_fault"); + memset_ret = recovery_memset_8(haddr, val, tag, + towrite, strd_opcode); + if (RESTORE_USR_PFAULT) + return -EFAULT; + if (memset_ret < towrite) { + pr_err("%s(): could not set data to guest virt " + "addr %px host addr %px, size 0x%lx, " + "error %ld\n", __func__, addr, haddr, + towrite, memset_ret); + return set; + } + + len -= towrite; + addr += towrite; + set += towrite; + } + return set; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_set_guest_virt_system); + +static inline long copy_aligned_guest_virt_system(struct kvm_vcpu *vcpu, + void __user *dst, const void __user *src, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch, int ALIGN) +{ + size_t len = size; + long copied = 0; + void *dst_arg = dst; + const void *src_arg = src; + void *haddr_dst = NULL, *haddr_src = NULL; + int to_dst = 0, to_src = 0, off, tail; + bool is_dst_len = true, is_src_len = true; + unsigned long memcpy_ret; + kvm_arch_exception_t exception; + + /* src can be not aligned */ + off = (u64)src & (ALIGN - 1); + + DebugCOPY("started to copy from %px to %px, size 0x%lx\n", + src, dst, size); + + /* dst & size should be 'ALIGN'-bytes aligned */ + KVM_BUG_ON(((u64)dst & (ALIGN - 1)) != 0); + KVM_BUG_ON((size & (ALIGN - 1)) != 0); + + while (len) { + unsigned offset_dst, offset_src; + int towrite; + hva_t hva_dst, hva_src; + + if (is_dst_len) { + KVM_BUG_ON(to_dst != 0); + hva_dst = kvm_vcpu_gva_to_hva(vcpu, (gva_t)dst, + true, &exception); + if (kvm_is_error_hva(hva_dst)) { + DebugCOPY("failed to find GPA for dst %lx GVA," + " inject page fault to guest\n", dst); + kvm_vcpu_inject_page_fault(vcpu, (void *)dst, + &exception); + return -EAGAIN; + } + + haddr_dst = (void *)hva_dst; + offset_dst = hva_dst & ~PAGE_MASK; + to_dst = min(len, (unsigned)PAGE_SIZE - offset_dst); + DebugCOPY("dst %px hva %px offset 0x%x size 0x%x\n", + dst, haddr_dst, offset_dst, to_dst); + KVM_BUG_ON((to_dst & (ALIGN - 1)) != 0); + } + if (is_src_len) { + KVM_BUG_ON(to_src > 0); + + hva_src = kvm_vcpu_gva_to_hva(vcpu, (gva_t)src, + false, &exception); + if (kvm_is_error_hva(hva_src)) { + DebugCOPY("failed to find GPA for dst %lx GVA," + " inject page fault to guest\n", src); + kvm_vcpu_inject_page_fault(vcpu, (void *)src, + &exception); + return -EAGAIN; + } + + haddr_src = (void *)hva_src; + if (unlikely(to_src < 0)) { + int ret; + + /* + * Current src address crosses the page + * boundaries and 'tail' bytes at the ending + * of the previous page were already copied, + * so copy remaining 'off' bytes at the + * begining of the next page + */ + KVM_BUG_ON(to_dst < off); + ret = copy_in_user(haddr_dst, haddr_src, off); + if (ret) + return -EFAULT; + DebugCOPY("copy %d page off bytes from %px " + "to %px\n", + off, haddr_src, haddr_dst); + len -= off; + dst += off; + src += off; + haddr_dst += off; + haddr_src += off; + hva_src += off; + to_dst -= off; + copied += off; + to_src = 0; + DebugCOPY("len 0x%lx dst %px 0x%x " + "src %px 0x%x\n", + len, haddr_dst, to_dst, + haddr_src, to_src); + if (len == 0) + break; + } + offset_src = hva_src & ~PAGE_MASK; + if (len <= (unsigned)PAGE_SIZE - offset_src) { + to_src = len; + tail = 0; + } else { + to_src = (unsigned)PAGE_SIZE - offset_src; + tail = (off) ? ALIGN - off : 0; + to_src -= tail; + } + DebugCOPY("src %px hva %px offset 0x%x size 0x%x\n", + src, haddr_src, offset_src, to_src); + KVM_BUG_ON((to_src & (ALIGN - 1)) != 0); + } + + if (unlikely(to_src < ALIGN && tail != 0)) { + int ret; + + /* + * Current src address crosses the page boundaries + * and the remaining' tail' bytes at the ending of the + * page should be copied as separate bytes + */ + KVM_BUG_ON(to_src != 0); + KVM_BUG_ON(to_dst < tail); + ret = copy_in_user(haddr_dst, haddr_src, tail); + if (ret) + return -EFAULT; + DebugCOPY("copy %d page tail bytes from %px to %px\n", + tail, haddr_src, haddr_dst); + len -= tail; + dst += tail; + src += tail; + haddr_dst += tail; + haddr_src += tail; + to_dst -= tail; + to_src -= tail; + copied += tail; + tail = 0; + DebugCOPY("len 0x%lx dst %px 0x%x src %px 0x%x\n", + len, haddr_dst, to_dst, haddr_src, to_src); + is_src_len = true; + KVM_BUG_ON(to_dst <= 0); + is_dst_len = false; + continue; + } + + if (to_src + tail < to_dst) { + towrite = to_src; + is_src_len = true; + is_dst_len = false; + } else if (to_src + tail > to_dst) { + towrite = to_dst; + is_src_len = false; + is_dst_len = true; + } else { + towrite = to_src; + is_src_len = true; + is_dst_len = true; + } + + DebugCOPY("copy from %px to %px size 0x%x\n", + haddr_src, haddr_dst, towrite); + KVM_BUG_ON((towrite & (ALIGN - 1)) != 0 || len == 0); + if (towrite) { + /* fast copy 'ALIGN'-bytes aligned and */ + /* within one page dst and src areas */ + if (!access_ok(haddr_dst, towrite) || + !access_ok(haddr_src, towrite)) + return -EFAULT; + + if (trace_host_copy_hva_area_enabled()) + trace_host_copy_hva_area(haddr_dst, haddr_src, + towrite); + + SET_USR_PFAULT("$.recovery_memcpy_fault"); + memcpy_ret = recovery_memcpy_8(haddr_dst, haddr_src, + towrite, strd_opcode, ldrd_opcode, + prefetch); + if (RESTORE_USR_PFAULT) + return -EFAULT; + if (trace_host_hva_area_line_enabled()) { + trace_host_hva_area((u64 *)haddr_src, memcpy_ret); + trace_host_hva_area((u64 *)haddr_dst, memcpy_ret); + } + if (memcpy_ret < towrite) { + pr_err("%s(): could not copy data to guest " + "virt addr %px host addr %px, from " + "guest virt addr %px host addr %px " + "size 0x%x, error %ld\n", + __func__, dst, haddr_dst, + src, haddr_src, towrite, memcpy_ret); + return copied; + } + + len -= towrite; + dst += towrite; + src += towrite; + haddr_dst += towrite; + haddr_src += towrite; + to_dst -= towrite; + to_src -= towrite; + copied += towrite; + DebugCOPY("len 0x%lx dst %px 0x%x src %px 0x%x\n", + len, haddr_dst, to_dst, haddr_src, to_src); + if (len == 0) + break; + } + } + + KVM_BUG_ON(len != 0); + KVM_BUG_ON(src != src_arg + size); + KVM_BUG_ON(dst != dst_arg + size); + + return copied; +} + +long kvm_vcpu_copy_guest_virt_system(struct kvm_vcpu *vcpu, + void __user *dst, const void __user *src, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return copy_aligned_guest_virt_system(vcpu, dst, src, size, + strd_opcode, ldrd_opcode, prefetch, 8); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_copy_guest_virt_system); + +long kvm_vcpu_copy_guest_virt_system_16(struct kvm_vcpu *vcpu, + void __user *dst, const void __user *src, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return copy_aligned_guest_virt_system(vcpu, dst, src, size, + strd_opcode, ldrd_opcode, prefetch, 16); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_copy_guest_virt_system_16); + +static int kvm_vcpu_copy_host_guest(struct kvm_vcpu *vcpu, + void *host, void __user *guest, size_t size, bool to_host, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + size_t len = size, quad; + unsigned long hva; + void *dst_addr = NULL, *src_addr = NULL, *guest_addr = NULL; + unsigned guest_off, hva_len = 0; + int head, head_len, tail, tail_len, ret; + kvm_arch_exception_t exception; + + if (to_host) { + dst_addr = host; + DebugHGCOPY("started to copy from guest %px to host %px, " + "size 0x%lx\n", guest, host, size); + } else { + src_addr = host; + DebugHGCOPY("started to copy from host %px to guest %px, " + "size 0x%lx\n", host, guest, size); + } + + /* dst can be not quad aligned, but it must be aligned + * when copying with tags */ + head = (16 - ((unsigned long) dst_addr & 0xf)) & 0xf; + head = min(head, len); + + /* copy not quad aligned head of transfered data */ + while (head) { + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)guest, + !to_host, &exception); + if (kvm_is_error_hva(hva)) { + DebugHGCOPY("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", guest); + kvm_vcpu_inject_page_fault(vcpu, (void *)guest, + &exception); + return -EAGAIN; + } + + guest_addr = (void *)hva; + if (to_host) + src_addr = guest_addr; + else + dst_addr = guest_addr; + guest_off = (u64)guest_addr & ~PAGE_MASK; + hva_len = (unsigned)PAGE_SIZE - guest_off; + head_len = min(head, hva_len); + + DebugHGCOPY("copy head from %px to %px, size 0x%x\n", + src_addr, dst_addr, head_len); + if (to_host) { + ret = copy_from_user(dst_addr, src_addr, head_len); + } else { + ret = copy_to_user(dst_addr, src_addr, head_len); + } + if (ret) { + pr_err("%s(): could not copy 0x%x bytes from %px to %px, not copied 0x%x bytes\n", + __func__, head_len, src_addr, dst_addr, ret); + return -EFAULT; + } + + dst_addr += head_len; + src_addr += head_len; + guest += head_len; + host += head_len; + hva_len -= head_len; + head -= head_len; + len -= head_len; + DebugHGCOPY("len 0x%lx dst %px src %px hva len 0x%x\n", + len, dst_addr, src_addr, hva_len); + }; + + if (unlikely(len == 0)) + goto out; + + /* now dst & size should be quad aligned */ + KVM_BUG_ON(((u64)dst_addr & (16 - 1)) != 0); + tail = len & (16 - 1); + quad = len - tail; + if (unlikely(quad == 0)) + goto tail_copy; + + while (quad) { + size_t quad_len; + int quad_tail, tail_len; + + if (hva_len == 0) { + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)guest, + !to_host, &exception); + if (kvm_is_error_hva(hva)) { + DebugHGCOPY("failed to find GPA for dst %lx " + "GVA, inject page fault to guest\n", + guest); + kvm_vcpu_inject_page_fault(vcpu, (void *)guest, + &exception); + return -EAGAIN; + } + + guest_addr = (void *)hva; + if (to_host) + src_addr = guest_addr; + else + dst_addr = guest_addr; + guest_off = (u64)guest_addr & ~PAGE_MASK; + hva_len = (unsigned)PAGE_SIZE - guest_off; + } + + quad_len = min(quad, hva_len); + quad_tail = quad_len & (16 - 1); + quad_len -= quad_tail; + if (unlikely(quad_len == 0)) + goto quad_tail_copy; + + DebugHGCOPY("copy from %px to %px size 0x%lx\n", + src_addr, dst_addr, quad_len); + if (!access_ok(guest_addr, quad_len)) { + pr_err("%s(): guest HVA %px, size 0x%lx is bad\n", + __func__, guest_addr, quad_len); + return -EFAULT; + } + /* fast copy quad aligned and within one page of guest */ + SET_USR_PFAULT("$.recovery_memcpy_fault"); + ret = recovery_memcpy_8(dst_addr, src_addr, quad_len, + strd_opcode, ldrd_opcode, prefetch); + if (RESTORE_USR_PFAULT) + return -EFAULT; + if (ret < quad_len) { + pr_err("%s(): could not copy 0x%lx bytes from %px to %px, not copied 0x%x bytes\n", + __func__, quad_len, src_addr, dst_addr, ret); + return -EFAULT; + } + dst_addr += quad_len; + src_addr += quad_len; + guest += quad_len; + host += quad_len; + hva_len -= quad_len; + quad -= quad_len; + len -= quad_len; + DebugHGCOPY("len 0x%lx dst %px src %px hva len 0x%x\n", + len, dst_addr, src_addr, hva_len); + +quad_tail_copy: + if (likely(quad_tail == 0)) + continue; + + do { + if (hva_len == 0) { + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)guest, + !to_host, &exception); + if (kvm_is_error_hva(hva)) { + DebugHGCOPY("failed to find GPA for " + "dst %lx GVA, inject page " + "fault to guest\n", guest); + kvm_vcpu_inject_page_fault(vcpu, + (void *)guest, + &exception); + return -EAGAIN; + } + + guest_addr = (void *)hva; + if (to_host) + src_addr = guest_addr; + else + dst_addr = guest_addr; + guest_off = (u64)guest_addr & ~PAGE_MASK; + hva_len = (unsigned)PAGE_SIZE - guest_off; + } + tail_len = min(quad_tail, hva_len); + DebugHGCOPY("copy quad tail from %px to %px, size 0x%x\n", + src_addr, dst_addr, tail_len); + if (to_host) { + ret = copy_from_user(dst_addr, src_addr, tail_len); + } else { + ret = copy_to_user(dst_addr, src_addr, tail_len); + } + if (ret) { + pr_err("%s(): could not copy 0x%x bytes from %px to %px, not copied 0x%x bytes\n", + __func__, tail_len, src_addr, dst_addr, ret); + return -EFAULT; + } + dst_addr += tail_len; + src_addr += tail_len; + guest += tail_len; + host += tail_len; + hva_len -= tail_len; + quad_tail -= tail_len; + len -= tail_len; + DebugHGCOPY("len 0x%lx dst %px src %px hva len 0x%x\n", + len, dst_addr, src_addr, hva_len); + } while (quad_tail > 0); + } + if (likely(len == 0)) + goto out; + +tail_copy: + if (likely(tail == 0)) + goto out; + + /* copy not quad aligned tail of transfered data */ + do { + if (hva_len == 0) { + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)guest, + !to_host, &exception); + if (kvm_is_error_hva(hva)) { + DebugHGCOPY("failed to find GPA for dst %lx " + "GVA, inject page fault to guest\n", + guest); + kvm_vcpu_inject_page_fault(vcpu, (void *)guest, + &exception); + return -EAGAIN; + } + + guest_addr = (void *)hva; + if (to_host) + src_addr = guest_addr; + else + dst_addr = guest_addr; + guest_off = (u64)guest_addr & ~PAGE_MASK; + hva_len = (unsigned)PAGE_SIZE - guest_off; + } + tail_len = min(tail, hva_len); + DebugHGCOPY("copy tail from %px to %px, size 0x%x\n", + src_addr, dst_addr, tail_len); + if (to_host) { + ret = copy_from_user(dst_addr, src_addr, tail_len); + } else { + ret = copy_to_user(dst_addr, src_addr, tail_len); + } + if (ret) { + pr_err("%s(): could not copy 0x%x bytes from %px to %px, not copied 0x%x bytes\n", + __func__, tail_len, src_addr, dst_addr, ret); + return -EFAULT; + } + dst_addr += tail_len; + src_addr += tail_len; + guest += tail_len; + host += tail_len; + hva_len -= tail_len; + tail -= tail_len; + len -= tail_len; + DebugHGCOPY("len 0x%lx dst %px src %px hva len 0x%x\n", + len, dst_addr, src_addr, hva_len); + } while (tail > 0); + +out: + return size; +} + +int kvm_vcpu_copy_host_to_guest(struct kvm_vcpu *vcpu, + const void *host, void __user *guest, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_vcpu_copy_host_guest(vcpu, (void *)host, guest, size, + false, strd_opcode, ldrd_opcode, prefetch); +} + +int kvm_vcpu_copy_host_from_guest(struct kvm_vcpu *vcpu, + void *host, const void __user *guest, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_vcpu_copy_host_guest(vcpu, host, (void __user *)guest, size, + true, strd_opcode, ldrd_opcode, prefetch); +} + +unsigned long kvm_copy_in_user_with_tags(void __user *to, + const void __user *from, unsigned long n) +{ + int ret; + + ret = kvm_vcpu_copy_guest_virt_system(current_thread_info()->vcpu, + to, (void __user *)from, n, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + true /* prefetch */); + if (likely(ret == n)) { + return 0; + } else { + /* copying failed or not all was copyed */ + return ret; + } +} + +unsigned long kvm_copy_to_user_with_tags(void *__user to, + const void *from, unsigned long n) +{ + struct kvm_vcpu *vcpu = native_current_thread_info()->vcpu; + kvm_arch_exception_t exception; + + if (unlikely(((long) to & 0x7) || ((long) from & 0x7) || (n & 0x7))) { + DebugHUCOPY("%s(): to=%px from=%px n=%ld\n", + __func__, to, from, n); + return n; + } + + while (n) { + size_t left, copy_len, hva_off; + __user void *dst; + + hva_t to_hva = kvm_vcpu_gva_to_hva(vcpu, (__force gva_t) to, + true, &exception); + if (kvm_is_error_hva(to_hva)) { + DebugHUCOPY("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", to); + kvm_vcpu_inject_page_fault(vcpu, (void *)to, + &exception); + return -EAGAIN; + } + + hva_off = to_hva & ~PAGE_MASK; + copy_len = min((size_t)PAGE_SIZE - hva_off, n); + + DebugHUCOPY("copy from %px to %lx size 0x%lx\n", + from, to_hva, copy_len); + KVM_BUG_ON(copy_len <= 0); + /* We are working with guest kernel's stacks which are + * located below usual hardware stacks area (USER_ADDR_MAX), + * thus there is no need to bypass access_ok() check. */ + dst = (__user void *) to_hva; + left = copy_to_user_with_tags(dst, from, copy_len); + if (unlikely(left)) { + pr_err("%s(): error: copied 0x%lx/0x%lx bytes from %px to %px\n", + __func__, copy_len - left, copy_len, from, to); + return n - (copy_len - left); + } + + to += copy_len; + from += copy_len; + n -= copy_len; + } + + return 0; +} + +unsigned long kvm_copy_from_user_with_tags(void *to, + const void __user *from, unsigned long n) +{ + struct kvm_vcpu *vcpu = native_current_thread_info()->vcpu; + kvm_arch_exception_t exception; + + if (unlikely(((long) to & 0x7) || ((long) from & 0x7) || (n & 0x7))) { + DebugHUCOPY("%s(): to=%px from=%px n=%ld\n", + __func__, to, from, n); + return n; + } + + while (n) { + size_t left, copy_len, hva_off; + + hva_t from_hva = kvm_vcpu_gva_to_hva(vcpu, + (__force gva_t) from, false, &exception); + if (kvm_is_error_hva(from_hva)) { + DebugHUCOPY("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", from); + kvm_vcpu_inject_page_fault(vcpu, (void *)from, + &exception); + return -EAGAIN; + } + + hva_off = from_hva & ~PAGE_MASK; + copy_len = min((size_t)PAGE_SIZE - hva_off, n); + + DebugHUCOPY("copy from %lx to %px size 0x%lx\n", + from_hva, to, copy_len); + KVM_BUG_ON(copy_len <= 0); + /* We are working with guest kernel's stacks which are + * located below usual hardware stacks area (USER_ADDR_MAX), + * thus there is no need to bypass access_ok() check. */ + left = copy_from_user_with_tags(to, (__user void *) from_hva, copy_len); + if (unlikely(left)) { + pr_err("%s(): error: copied 0x%lx/0x%lx bytes from %px to %px\n", + __func__, copy_len - left, copy_len, from, to); + return n - (copy_len - left); + } + + to += copy_len; + from += copy_len; + n -= copy_len; + } + + return 0; +} + +int kvm_read_guest_phys_system(struct kvm *kvm, gpa_t gpa, void *val, + unsigned int bytes) +{ + unsigned offset = gpa & ~PAGE_MASK; + int ret; + + if (WARN_ON_ONCE(offset + bytes > PAGE_SIZE)) + return -EINVAL; + + ret = kvm_read_guest_page(kvm, gpa_to_gfn(gpa), val, offset, bytes); + if (ret < 0) { + pr_err("%s(): could not read data from guest phys addr 0x%llx, " + "size 0x%x\n", + __func__, gpa, bytes); + } + return ret; +} + +int kvm_write_guest_phys_system(struct kvm *kvm, gpa_t gpa, void *val, + unsigned int bytes) +{ + unsigned offset = gpa & ~PAGE_MASK; + int ret; + + if (WARN_ON_ONCE(offset + bytes > PAGE_SIZE)) + return -EINVAL; + + ret = kvm_write_guest_page(kvm, gpa_to_gfn(gpa), val, offset, bytes); + if (ret < 0) { + pr_err("%s(): could not write data to guest phys addr 0x%llx, " + "size 0x%x\n", + __func__, gpa, bytes); + } + return ret; +} diff --git a/arch/e2k/kvm/gaccess.h b/arch/e2k/kvm/gaccess.h new file mode 100644 index 000000000000..51af55e708c1 --- /dev/null +++ b/arch/e2k/kvm/gaccess.h @@ -0,0 +1,280 @@ +#ifndef __KVM_E2K_GACCESS_H +#define __KVM_E2K_GACCESS_H + +/* + * Guest virtual and physical memory access to read from/write to + * + * Copyright (C) 2018, MCST. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include + +#include "mmu.h" + +/* + * The follow defines is expansion of arch-independent GVA->HVA translation + * error codes (see include/linux/kvm_host.h + */ +#define KVM_HVA_ONLY_VALID (PAGE_OFFSET + 2 * PAGE_SIZE) +#define KVM_HVA_IS_UNMAPPED (PAGE_OFFSET + 3 * PAGE_SIZE) +#define KVM_HVA_IS_WRITE_PROT (PAGE_OFFSET + 4 * PAGE_SIZE) + +static inline bool kvm_is_only_valid_hva(unsigned long addr) +{ + return addr == KVM_HVA_ONLY_VALID; +} + +static inline bool kvm_is_unmapped_hva(unsigned long addr) +{ + return addr == KVM_HVA_IS_UNMAPPED; +} + +static inline bool kvm_is_write_prot_hva(unsigned long addr) +{ + return addr == KVM_HVA_IS_WRITE_PROT; +} + +static inline bool +kvm_mmu_gva_is_gpa(struct kvm_vcpu *vcpu, gva_t gva) +{ + gpa_t gpa; + gfn_t gfn; + e2k_addr_t hva; + + if (!vcpu->arch.is_pv) + /* it is unknown in common case */ + return false; + if (is_paging(vcpu)) + /* can be only virtual addresses */ + return false; + if (gva >= GUEST_PAGE_OFFSET) + return false; + + gpa = (gpa_t)gva; + gfn = gpa_to_gfn(gpa); + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (unlikely(kvm_is_error_hva(hva))) + return false; + + return true; +} +static inline bool +kvm_mmu_gva_is_gpa_range(struct kvm_vcpu *vcpu, gva_t gva, unsigned int bytes) +{ + gva_t end; + + if (!kvm_mmu_gva_is_gpa(vcpu, gva)) + return false; + end = gva + bytes - 1; + if ((end & PAGE_MASK) == (gva & PAGE_MASK)) + return true; + return kvm_mmu_gva_is_gpa(vcpu, end); +} + +static inline gpa_t +kvm_mmu_gvpa_to_gpa(gva_t gvpa) +{ + return (gpa_t)__guest_pa(gvpa); +} + +static inline bool +kvm_mmu_gva_is_gvpa(struct kvm_vcpu *vcpu, gva_t gva) +{ + gpa_t gpa; + gfn_t gfn; + e2k_addr_t hva; + + if (!vcpu->arch.is_pv) + /* it is unknown in common case */ + return false; + if (gva < GUEST_PAGE_OFFSET) + return false; + + gpa = kvm_mmu_gvpa_to_gpa(gva); + gfn = gpa_to_gfn(gpa); + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (unlikely(kvm_is_error_hva(hva))) + return false; + if (unlikely(hva != (gva & PAGE_MASK))) + /* gfn should be mapped to equal virtual addresses */ + /* on host and on guest (from GUEST_PAGE_OFFSET) */ + return false; + + return true; +} +static inline bool +kvm_mmu_gva_is_gvpa_range(struct kvm_vcpu *vcpu, gva_t gva, unsigned int bytes) +{ + gva_t end; + + if (!kvm_mmu_gva_is_gvpa(vcpu, gva)) + return false; + end = gva + bytes - 1; + if ((end & PAGE_MASK) == (gva & PAGE_MASK)) + return true; + return kvm_mmu_gva_is_gvpa(vcpu, end); +} + +static inline gpa_t +kvm_mmu_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, u32 access, + kvm_arch_exception_t *exception) +{ + gpa_t gpa; + bool again = false; + + if (likely(kvm_mmu_gva_is_gvpa(vcpu, gva))) + return kvm_mmu_gvpa_to_gpa(gva); + +again: + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, exception); + if (arch_is_error_gpa(gpa)) { + /* it's OK to have bad guest virt address and + * pass it back to guest even if it's not valid */ + } else if (unlikely(gpa >= HOST_PAGE_OFFSET && !again && + vcpu->arch.is_hv)) { + /* Bug 119772: we may need to switch from nonpaging */ + /* to tdp here */ + is_paging(vcpu); + again = true; + goto again; + } + gpa |= (gva & ~PAGE_MASK); + return gpa; +} + +static inline gpa_t +kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, + kvm_arch_exception_t *exception) +{ + u32 access = 0; + + return kvm_mmu_gva_to_gpa(vcpu, gva, access, exception); +} + +static inline gpa_t +kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, + kvm_arch_exception_t *exception) +{ + u32 access = 0; + + access |= PFERR_FETCH_MASK; + return kvm_mmu_gva_to_gpa(vcpu, gva, access, exception); +} + +static inline gpa_t +kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, + kvm_arch_exception_t *exception) +{ + u32 access = 0; + + access |= PFERR_WRITE_MASK; + return kvm_mmu_gva_to_gpa(vcpu, gva, access, exception); +} + +/* uses this to access any guest's mapped memory without checking CPL */ +static inline gpa_t +kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, + kvm_arch_exception_t *exception) +{ + return kvm_mmu_gva_to_gpa(vcpu, gva, 0, exception); +} + +static inline hva_t kvm_vcpu_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + gfn_t gfn; + unsigned long hva; + + gfn = gpa_to_gfn(gpa); + + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (kvm_is_error_hva(hva)) + return hva; + + hva |= (gpa & ~PAGE_MASK); + return hva; +} + +static inline hva_t kvm_vcpu_gva_to_hva(struct kvm_vcpu *vcpu, gva_t gva, + bool is_write, kvm_arch_exception_t *exception) +{ + gpa_t gpa; + gfn_t gfn; + unsigned long hva; + kvm_arch_exception_t gpa_exception; + + gpa = is_write ? kvm_mmu_gva_to_gpa_write(vcpu, gva, &gpa_exception) : + kvm_mmu_gva_to_gpa_read(vcpu, gva, &gpa_exception); + if (unlikely(arch_is_error_gpa(gpa))) { + if (gpa_exception.error_code & PFERR_ONLY_VALID_MASK) + hva = KVM_HVA_ONLY_VALID; + else if (gpa_exception.error_code & PFERR_IS_UNMAPPED_MASK) + hva = KVM_HVA_IS_UNMAPPED; + else if (is_write && + (gpa_exception.error_code & PFERR_WRITE_MASK)) + hva = KVM_HVA_IS_WRITE_PROT; + else + hva = KVM_HVA_ERR_BAD; + + if (exception) + memcpy(exception, &gpa_exception, + sizeof(gpa_exception)); + + return hva; + } + + gfn = gpa_to_gfn(gpa); + + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (kvm_is_error_hva(hva)) + return hva; + + hva |= (gva & ~PAGE_MASK); + return hva; +} + +extern void kvm_vcpu_inject_page_fault(struct kvm_vcpu *vcpu, void *addr, + kvm_arch_exception_t *exception); +extern int kvm_vcpu_fetch_guest_virt(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes); +extern int kvm_vcpu_read_guest_virt_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes); +extern int kvm_vcpu_read_guest_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes); +extern int kvm_vcpu_write_guest_virt_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes); +extern int kvm_vcpu_write_guest_system(struct kvm_vcpu *vcpu, + gva_t addr, void *val, unsigned int bytes); +extern int kvm_read_guest_phys_system(struct kvm *kvm, gpa_t addr, + void *val, unsigned int bytes); +extern int kvm_write_guest_phys_system(struct kvm *kvm, gpa_t addr, + void *val, unsigned int bytes); +extern long kvm_vcpu_set_guest_virt_system(struct kvm_vcpu *vcpu, + void *addr, u64 val, u64 tag, size_t size, u64 strd_opcode); +extern long kvm_vcpu_copy_guest_virt_system(struct kvm_vcpu *vcpu, + void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); +extern long kvm_vcpu_copy_guest_virt_system_16(struct kvm_vcpu *vcpu, + void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); + +static inline unsigned long +kvm_vcpu_copy_from_guest(struct kvm_vcpu *vcpu, + void *to, const void *from, unsigned long n) +{ + return kvm_vcpu_read_guest_system(vcpu, (gva_t)from, to, n); +} + +static inline unsigned long +kvm_vcpu_copy_to_guest(struct kvm_vcpu *vcpu, + void *to, const void *from, unsigned long n) +{ + return kvm_vcpu_write_guest_system(vcpu, (gva_t)to, (void *)from, n); +} + +#endif /* __KVM_E2K_GACCESS_H */ diff --git a/arch/e2k/kvm/gpid.c b/arch/e2k/kvm/gpid.c new file mode 100644 index 000000000000..0fd244c73833 --- /dev/null +++ b/arch/e2k/kvm/gpid.c @@ -0,0 +1,130 @@ +/* + * Generic guest pidhash and scalable, time-bounded GPID allocator + * + * Based on simplified kernel/pid.c + */ + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +gpid_t *kvm_alloc_gpid(kvm_gpid_table_t *gpid_table) +{ + gpid_t *gpid; + int nr; + + DebugKVM("started\n"); + gpid = kmem_cache_alloc(gpid_table->nid_cachep, GFP_KERNEL); + if (!gpid) + goto out; + + nr = kvm_alloc_nid(gpid_table, &gpid->nid); + if (nr < 0) + goto out_free; + + gpid->gthread_info = NULL; + DebugKVM("allocated guest PID %d structure at %px\n", + gpid->nid.nr, gpid); + +out: + return gpid; + +out_free: + + kmem_cache_free(gpid_table->nid_cachep, gpid); + gpid = NULL; + goto out; +} + +static void kvm_drop_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table) +{ + DebugKVM("started\n"); + kmem_cache_free(gpid_table->nid_cachep, gpid); +} + +void kvm_do_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table) +{ + DebugKVM("started\n"); + + kvm_do_free_nid(&gpid->nid, gpid_table); + kvm_drop_gpid(gpid, gpid_table); +} + +void kvm_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table) +{ + unsigned long flags; + + DebugKVM("started\n"); + + gpid_table_lock_irqsave(gpid_table, flags); + kvm_do_free_gpid(gpid, gpid_table); + gpid_table_unlock_irqrestore(gpid_table, flags); +} + +/* + * The gpid hash table is scaled according to the amount of memory in the + * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or + * more. + */ + +int kvm_gpidmap_init(struct kvm *kvm, kvm_gpid_table_t *gpid_table, + kvm_nidmap_t *gpid_nidmap, int gpidmap_entries, + struct hlist_head *gpid_hash, int gpid_hash_bits) +{ + int ret; + + DebugKVM("started\n"); + gpid_table->nidmap = gpid_nidmap; + gpid_table->nidmap_entries = gpidmap_entries; + gpid_table->nid_hash = gpid_hash; + gpid_table->nid_hash_bits = gpid_hash_bits; + gpid_table->nid_hash_size = NID_HASH_SIZE(gpid_hash_bits); + ret = kvm_nidmap_init(gpid_table, GPID_MAX_LIMIT, RESERVED_GPIDS, + /* last gpid: no reserved, */ + /* init_task gpid #0 will be allocated first */ + -1); + if (ret != 0) { + pr_err("kvm_gpidmap_init() could not create NID map\n"); + return ret; + } + sprintf(gpid_table->nid_cache_name, "gpid_VM%d", kvm->arch.vmid.nr); + gpid_table->nid_cachep = + kmem_cache_create(gpid_table->nid_cache_name, + sizeof(gpid_t), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (gpid_table->nid_cachep == NULL) { + pr_err("kvm_gpidmap_init() could not allocate GPID cache\n"); + return -ENOMEM; + } + return 0; +} + +void kvm_gpidmap_destroy(kvm_gpid_table_t *gpid_table) +{ + gpid_t *gpid; + struct hlist_node *next; + unsigned long flags; + int i; + + DebugKVM("started\n"); + gpid_table_lock_irqsave(gpid_table, flags); + for_each_guest_thread_info(gpid, i, next, gpid_table) { + kvm_do_free_gpid(gpid, gpid_table); + } + gpid_table_unlock_irqrestore(gpid_table, flags); + kvm_nidmap_destroy(gpid_table); + kmem_cache_destroy(gpid_table->nid_cachep); + gpid_table->nid_cachep = NULL; +} diff --git a/arch/e2k/kvm/gpid.h b/arch/e2k/kvm/gpid.h new file mode 100644 index 000000000000..c97e6c338f0c --- /dev/null +++ b/arch/e2k/kvm/gpid.h @@ -0,0 +1,54 @@ +#ifndef _KVM_E2K_GPID_H +#define _KVM_E2K_GPID_H + +/* + * Guest processes identifier (gpid) allocator + * Based on simplified include/linux/pid.h + */ + +#include +#include +#include "process.h" + +#define GPID_MAX_LIMIT (PID_MAX_LIMIT / 2) + +#define GPIDMAP_ENTRIES ((GPID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) + +#define GPID_HASH_BITS 4 +#define CPID_HASH_SIZE (1 << GPID_HASH_BITS) + +typedef struct gpid { + int nr; + gthread_desc_t *gthread_desc; + struct hlist_node gpid_chain; +} gpid_t; + +typedef struct gpidmap { + atomic_t nr_free; + void *page; +} gpidmap_t; + +typedef struct kvm_gpid_table { + raw_spinlock_t gpidmap_lock; + gpidmap_t gpidmap[GPIDMAP_ENTRIES]; + int last_gpid; + struct kmem_cache *gpid_cachep; + struct hlist_head gpid_hash[CPID_HASH_SIZE]; + unsigned int gpidhash_shift; +} kvm_gpid_table_t; + +#define gpid_hashfn(nr) hash_long((unsigned long)nr, GPID_HASH_BITS) + +static inline gthread_desc_t *kvm_gpid_proc_desk(gpid_t *gpid) +{ + return gpid->gthread_desc; +} + +extern gpid_t *kvm_alloc_gpid(kvm_gpid_table_t *gpid_table); +extern void kvm_do_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table); +extern void kvm_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table); + +extern int kvm_gpidmap_init(kvm_gpid_table_t *gpid_table); +extern void kvm_gpidmap_destroy(kvm_gpid_table_t *gpid_table); + +#endif /* _KVM_E2K_GPID_H */ diff --git a/arch/e2k/kvm/gregs.h b/arch/e2k/kvm/gregs.h new file mode 100644 index 000000000000..e199cd15b0bb --- /dev/null +++ b/arch/e2k/kvm/gregs.h @@ -0,0 +1,6 @@ +#ifndef _E2K_KVM_GREGS_H +#define _E2K_KVM_GREGS_H + +#include + +#endif /* _E2K_KVM_GREGS_H */ diff --git a/arch/e2k/kvm/guest/Makefile b/arch/e2k/kvm/guest/Makefile new file mode 100644 index 000000000000..929a8b4eac87 --- /dev/null +++ b/arch/e2k/kvm/guest/Makefile @@ -0,0 +1,98 @@ +# 2) Generate kvm-asm-offsets.h +# + +# offsets-file := ../../../../include/generated/kvm-asm-offsets.h + +# always += $(offsets-file) +# targets += $(offsets-file) +# targets += arch/$(SRCARCH)/kvm/guest/kvm-asm-offsets.s + +# Custom string to workaround LCC limitations +define sed-y + "/@@@/x; s:^->::p; /\.uadword/{s:.*\.uadword[^0-f]*\([x0-f]*\).*:\1:;x;G;s:->\([^ ]*\) @@@ \(.*\)\n\([^ ]*\):#define \1 \3 /* \2 */:p;}" +endef + +quiet_cmd_kvm_asm_offsets = GEN $@ +define cmd_kvm_asm_offsets + (set -e; \ + echo "#ifndef __KVM_ASM_OFFSETS_H__"; \ + echo "#define __KVM_ASM_OFFSETS_H__"; \ + echo "/*"; \ + echo " * DO NOT MODIFY."; \ + echo " *"; \ + echo " * This file was generated by Kbuild"; \ + echo " *"; \ + echo " */"; \ + echo ""; \ + sed -ne $(sed-y) $<; \ + echo ""; \ + echo "#endif" ) > $@ +endef + +# patched CPU registers access now is not used, but can be need in future +# We use internal kbuild rules to avoid the "is up to date" message from make +#arch/$(SRCARCH)/kvm/guest/kvm-asm-offsets.s: \ +# arch/$(SRCARCH)/kvm/guest/kvm-asm-offsets.c +# $(Q)mkdir -p $(dir $@) +# $(call if_changed_dep,cc_s_c) + +# patched CPU registers access now is not used, but can be need in future +#include/generated/kvm-asm-offsets.h: \ +# arch/$(SRCARCH)/kvm/guest/kvm-asm-offsets.s +# $(Q)mkdir -p $(dir $@) +# $(call cmd,kvm_asm_offsets) + +#$(obj)/$(offsets-file): arch/$(SRCARCH)/kvm/guest/kvm-asm-offsets.s +# $(call cmd,kvm_asm_offsets) + +# patched CPU registers access now is not used, but can be need in future +#FORCE : include/generated/kvm-asm-offsets.h + +BOOT_CFLAGS_REMOVE := -pg +BOOT_CFLAGS_ADD := -DE2K_P2V +ifeq ($(call cc-option-yn,-fno-semi-spec-ld -fno-spec-ld),y) + BOOT_CFLAGS_ADD += -fno-semi-spec-ld -fno-spec-ld +else + BOOT_CFLAGS_ADD += -fno-ld-spec +endif + +CFLAGS_ttable.o += -Wframe-larger-than=4096 +CFLAGS_REMOVE_ttable.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_irq.o += -Wframe-larger-than=8192 +CFLAGS_REMOVE_boot.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_boot.o += $(BOOT_CFLAGS_ADD) +CFLAGS_REMOVE_boot_spinlock.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_boot_spinlock.o += $(BOOT_CFLAGS_ADD) +CFLAGS_REMOVE_boot_vram.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_boot_vram.o += $(BOOT_CFLAGS_ADD) +CFLAGS_REMOVE_boot_io.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_boot_io.o = $(BOOT_CFLAGS_ADD) +CFLAGS_REMOVE_boot_e2k_virt.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_boot_e2k_virt.o = $(BOOT_CFLAGS_ADD) +CFLAGS_REMOVE_boot_string.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_boot_string.o = $(BOOT_CFLAGS_ADD) +CFLAGS_REMOVE_boot-pv-hpt.o = $(BOOT_CFLAGS_REMOVE) +CFLAGS_boot-pv-hpt.o = $(BOOT_CFLAGS_ADD) + +obj-$(CONFIG_KVM_GUEST_MODE) += ttable.o traps.o fast_syscalls.o \ + process.o signal.o mmu.o \ + e2k_virt.o time.o io.o string.o \ + spinlock.o irq.o lapic.o \ + host_printk.o host_dump_stack.o \ + boot.o boot_spinlock.o boot_e2k_virt.o boot_io.o \ + boot_vram.o boot_string.o tlbflush.o + +ifeq ($(CONFIG_EPIC),y) +obj-$(CONFIG_KVM_GUEST_MODE) += cepic.o +endif # CONFIG_EPIC + +obj-$(CONFIG_KVM_ASYNC_PF) += async_pf.o + +CFLAGS_smp.o += -Wframe-larger-than=4096 +obj-$(CONFIG_KVM_GUEST_SMP) += smp.o + +obj-$(CONFIG_PARAVIRT_GUEST) += paravirt.o + +# patched CPU registers access now is not used, but can be need in future +#obj-$(CONFIG_PARAVIRT_GUEST) += patched_cpu_regs_access.o + diff --git a/arch/e2k/kvm/guest/async_pf.c b/arch/e2k/kvm/guest/async_pf.c new file mode 100644 index 000000000000..b2ffb58f2864 --- /dev/null +++ b/arch/e2k/kvm/guest/async_pf.c @@ -0,0 +1,225 @@ +#include +#include +#include +#include +#include + + +/* Task waiting for async page fault completion */ +struct pv_apf_wait_task { + struct hlist_node link; + struct task_struct *task; + u32 apf_id; +}; + +/* + * Cache to store tasks waiting for async page fault completion. + * The key is apf_id = vcpu->arch.apf.id << 12 | vcpu->id . + */ +static struct pv_apf_wait_bucket { + raw_spinlock_t lock; + struct hlist_head list; +} pv_apf_cache[KVM_APF_CACHE_SIZE]; + +/* + * Descriptor of async page fault event, filled by host + * + * @apf_reson - type of async page fault event + * 3 possible values: + * KVM_APF_NO - no async page fault occurred. + * KVM_APF_PAGE_IN_SWAP - physical page was swapped out by host, + * need to suspend current process until it will be loaded from swap. + * KVM_APF_PAGE_READY - physical page is loaded from swap and ready for access, + * need to wake up process waiting for loading of this page. + * + * @apf_id- unique identifier for async page fault event + * Needed by irq handler epic_apf_wake + * apf_id = vcpu->arch.apf.id << 12 | vcpu->id + */ +struct pv_apf_event { + u32 apf_reason; + u32 apf_id; +}; + +static DEFINE_PER_CPU(struct pv_apf_event, pv_apf_event); + +u32 pv_apf_read_and_reset_reason(void) +{ + u32 apf_reason = this_cpu_read(pv_apf_event.apf_reason); + + this_cpu_write(pv_apf_event.apf_reason, KVM_APF_NO); + return apf_reason; +} + +static u32 pv_apf_read_and_reset_id(void) +{ + u32 apf_id = this_cpu_read(pv_apf_event.apf_id); + + this_cpu_write(pv_apf_event.apf_id, 0); + return apf_id; +} + +/* + * Wake up task, waiting for async page fault completion + */ +static void pv_apf_wake_one_task(struct pv_apf_wait_task *wait_task) +{ + hlist_del_init(&wait_task->link); + + if (wait_task->task) { + wake_up_process(wait_task->task); + wait_task->task = NULL; + } +} + +/* + * Lookup for task with required apf_id in pv_apf_cache hash bucket + */ +static struct pv_apf_wait_task *pv_apf_find_wait_task( + struct pv_apf_wait_bucket *wait_bucket, + u32 apf_id) +{ + struct hlist_node *wait_entry; + + hlist_for_each(wait_entry, &wait_bucket->list) { + struct pv_apf_wait_task *wait_task = + hlist_entry(wait_entry, typeof(*wait_task), link); + if (wait_task->apf_id == apf_id) + return wait_task; + } + + return NULL; +} + +/* + * Suspend current task to wait for completion of async page fault handling. + */ +void pv_apf_wait(void) +{ + struct pv_apf_wait_task new_wait_task, *exist_wait_task; + unsigned long flags; + u32 apf_id = pv_apf_read_and_reset_id(); + + /* Get hash bucket in pv_apf_cache */ + u32 key = hash_32(apf_id, KVM_APF_HASH_BITS); + struct pv_apf_wait_bucket *wait_bucket = &pv_apf_cache[key]; + + raw_spin_lock_irqsave(&wait_bucket->lock, flags); + + exist_wait_task = pv_apf_find_wait_task(wait_bucket, apf_id); + + if (exist_wait_task) { + /* + * pv_apf_wake was called ahead of pv_apf_wait. + * Delete dummy entry from cache and do not suspend + * current task. + */ + hlist_del(&exist_wait_task->link); + kfree(exist_wait_task); + + raw_spin_unlock_irqrestore(&wait_bucket->lock, flags); + + return; + } + + /* Add current task in pv_apf_cache */ + new_wait_task.apf_id = apf_id; + new_wait_task.task = current; + hlist_add_head(&new_wait_task.link, &wait_bucket->list); + + raw_spin_unlock_irqrestore(&wait_bucket->lock, flags); + + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + /* Check if current task was woken up by pv_apf_wake */ + if (hlist_unhashed(&new_wait_task.link)) + break; + + /* + * Suspend current task until it will be woken + * up by pv_apf_wake + */ + schedule(); + } + + __set_current_state(TASK_RUNNING); +} + +/* + * Wake up task, which waits for async page fault completion + */ +void pv_apf_wake(void) +{ + u32 apf_id = pv_apf_read_and_reset_id(); + + /* Get hash bucket in pv_apf_cache */ + u32 key = hash_32(apf_id, KVM_APF_HASH_BITS); + struct pv_apf_wait_bucket *wait_bucket = &pv_apf_cache[key]; + + struct pv_apf_wait_task *wait_task; + unsigned long flags; + + raw_spin_lock_irqsave(&wait_bucket->lock, flags); + + wait_task = pv_apf_find_wait_task(wait_bucket, apf_id); + + if (!wait_task) { + /* + * pv_apf_wake was called ahead of pv_apf_wait. + * Add dummy entry in pv_apf_cache with this apf_id and + * do not wake up any tasks. + */ + wait_task = kzalloc(sizeof(*wait_task), GFP_ATOMIC); + KVM_BUG_ON(!wait_task); + wait_task->apf_id = apf_id; + hlist_add_head(&wait_task->link, &wait_bucket->list); + + raw_spin_unlock_irqrestore(&wait_bucket->lock, flags); + + return; + } + + /* Waiting task is present in pv_apf_cache, wake up it */ + pv_apf_wake_one_task(wait_task); + + raw_spin_unlock_irqrestore(&wait_bucket->lock, flags); +} + +/* + * Translate gva to gpa + */ +static unsigned long gva_to_gpa(void *gva) +{ + unsigned long gfn = page_to_pfn(virt_to_page(gva)); + return PFN_PHYS(gfn) + ((unsigned long)gva & ~PAGE_MASK); +} + +/* + * Enable async page fault handling on current cpu + */ +static void pv_apf_enable_curr_cpu(void *info) +{ + struct pv_apf_event *event = this_cpu_ptr(&pv_apf_event); + unsigned long apf_reason_gpa = gva_to_gpa(&event->apf_reason); + unsigned long apf_id_gpa = gva_to_gpa(&event->apf_id); + + this_cpu_write(pv_apf_event.apf_reason, KVM_APF_NO); + this_cpu_write(pv_apf_event.apf_id, 0); + + WARN_ON(HYPERVISOR_pv_enable_async_pf(apf_reason_gpa, apf_id_gpa, + ASYNC_PF_WAKE_VECTOR, EPIC_CONTROLLER)); +} + +/* + * Enable async page fault handling on all cpus + */ +static int __init pv_apf_enable(void) +{ + if (IS_HV_GM()) + on_each_cpu(&pv_apf_enable_curr_cpu, NULL, 1); + + return 0; +} +arch_initcall(pv_apf_enable); diff --git a/arch/e2k/kvm/guest/boot.c b/arch/e2k/kvm/guest/boot.c new file mode 100644 index 000000000000..a7e1f389cb87 --- /dev/null +++ b/arch/e2k/kvm/guest/boot.c @@ -0,0 +1,851 @@ + +/* + * KVM boot-time initialization + * + * Copyright (C) 2014 MCST + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "boot.h" +#include "process.h" +#include "cpu.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 1 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_NUMA_MODE +#undef DebugNUMA +#define DEBUG_NUMA_MODE 1 /* kernel virtual machine debugging */ +#define DebugNUMA(fmt, args...) \ +({ \ + if (DEBUG_NUMA_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MUU_INIT_MODE +#undef DebugMMU +#define DEBUG_MUU_INIT_MODE 1 /* MMU init debugging */ +#define DebugMMU(fmt, args...) \ +({ \ + if (DEBUG_MUU_INIT_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +extern char __kvm_guest_ttable_start[]; +extern char __kvm_guest_ttable_end[]; + +/* + * Table of pointers to VCPUs state. + * Own VCPU state pointer is loaded on some global registers to direct access + * Other VCPUs state pointers can be accessible through this table + */ +kvm_vcpu_state_t *vcpus_state[NR_CPUS]; + +/* + * Native/guest VM early indicator + */ +static inline bool boot_kvm_early_is_guest_hv_vm(void) +{ + return kvm_vcpu_host_is_hv(); +} + +void boot_kvm_setup_machine_id(bootblock_struct_t *bootblock) +{ + bool is_hv_gm; +#ifdef CONFIG_MMU_PT_V6 + bool host_mmu_pt_v6; +#endif /* CONFIG_MMU_PT_V6 */ + +#ifdef CONFIG_E2K_MACHINE +#if defined(CONFIG_E2K_VIRT) +#else +# error "E2K VIRTUAL MACHINE type does not defined" +#endif +#else /* ! CONFIG_E2K_MACHINE */ +#endif /* CONFIG_E2K_MACHINE */ + + is_hv_gm = boot_kvm_early_is_guest_hv_vm(); + + boot_native_setup_machine_id(bootblock); + + /* boot_machine now contains info from emulated IDR */ + + /* + * At this point we have three machine ids: + * - boot_machine.native_id is determined by QEMU parameter + * - kvm_vcpu_host_machine_id is host id + * - boot_native_machine_id may be statically set when compiling guest + */ + + boot_guest_machine_id = boot_machine.native_id; + boot_machine.guest.id = boot_guest_machine_id; + boot_machine.guest.rev = boot_machine.native_rev; + boot_machine.guest.iset_ver = boot_machine.native_iset_ver; + +#ifdef CONFIG_E2K_MACHINE + if (boot_guest_machine_id != boot_native_machine_id) + BOOT_BUG("Guest kernel arch does not match QEMU parameter arch"); + + if (boot_native_machine_id != kvm_vcpu_host_machine_id()) + BOOT_BUG("Guest kernel arch does not match host arch"); +#else + boot_native_machine_id = kvm_vcpu_host_machine_id(); +#endif + boot_machine.native_id = boot_native_machine_id; + boot_machine.native_rev = kvm_vcpu_host_cpu_rev(); + boot_machine.native_iset_ver = kvm_vcpu_host_cpu_iset(); + +#ifdef CONFIG_MMU_PT_V6 + host_mmu_pt_v6 = kvm_vcpu_host_mmu_support_pt_v6(); + if (host_mmu_pt_v6) { + /* host support new MMU PT structures, so guest can it too */ + boot_machine.mmu_pt_v6 = true; + } else { + boot_machine.mmu_pt_v6 = false; + } +#else /* ! CONFIG_MMU_PT_V6 */ + boot_machine.mmu_pt_v6 = false; +#endif /* CONFIG_MMU_PT_V6 */ + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + /* on VCPU the low memory cannot be part of the high */ + BOOT_LOW_MEMORY_ENABLED() = true; +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ +} + +int __init +boot_kvm_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + int ret; + + if (bootblock->signature != KVM_GUEST_SIGNATURE && + bootblock->signature != X86BOOT_SIGNATURE) { + BOOT_BUG("Unknown type of Boot information structure"); + return -ENOMEM; + } + + ret = boot_biosx86_probe_memory(nodes_phys_mem, bootblock); + if (ret < 0) { + BOOT_BUG("Probe of physical memory failed, error %d|n", + ret); + return ret; + } + + ret = boot_kvm_probe_vram_memory(bootblock); + if (ret < 0) { + BOOT_BUG("Probe of virtual RAM failed, error %d|n", + ret); + return ret; + } + + return ret; +} + +void __init boot_kvm_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + boot_info_t *boot_info = &bootblock->info; + e2k_addr_t base_addr, addr; + e2k_size_t size; + bool is_base_phys; + + base_addr = BOOT_READ_OSCUD_LO_REG().OSCUD_lo_base; + is_base_phys = (base_addr < GUEST_PAGE_OFFSET) ? true : false; + + /* + * The guest kernel launcher (QEMU) can pass addresses into bootblock + * structure both physical (PA) and virtual physical + * (VPA == PA + GUEST_PAGE_OFFSET + * It need put all adresses to unified format with kernel base + */ + if (!is_base_phys) + return; + + /* all addresses should be PA */ + addr = boot_info->kernel_base; + if (addr >= GUEST_PAGE_OFFSET) { + addr = boot_vpa_to_pa(addr); + boot_info->kernel_base = addr; + } + + addr = boot_info->mp_table_base; + if (addr != 0) { + struct intel_mp_floating *mpf; + + if (addr >= GUEST_PAGE_OFFSET) { + addr = boot_vpa_to_pa(addr); + boot_info->mp_table_base = addr; + } + mpf = (struct intel_mp_floating *)addr; + addr = mpf->mpf_physptr; + if (addr != 0 && addr >= GUEST_PAGE_OFFSET) { + addr = boot_vpa_to_pa(addr); + mpf->mpf_checksum = 0; + mpf->mpf_physptr = addr; + /* recalculate structure sum */ + mpf->mpf_checksum = + boot_mpf_do_checksum((unsigned char *)mpf, + sizeof(*mpf)); + } + } + + size = boot_info->ramdisk_size; /* INITRD_SIZE */ + if (size != 0) { + addr = boot_info->ramdisk_base; /* INITRD_BASE */ + if (addr >= GUEST_PAGE_OFFSET) { + addr = boot_vpa_to_pa(addr); + boot_info->ramdisk_base = addr; + } + } +} + +e2k_size_t __init +boot_kvm_get_bootblock_size(boot_info_t *bblock) +{ + e2k_size_t area_size = 0; + + if (bblock->signature == KVM_GUEST_SIGNATURE || + bblock->signature == X86BOOT_SIGNATURE) { + area_size = sizeof(bootblock_struct_t); + } else { + BOOT_BUG("Unknown type of Boot information structure"); + } + return area_size; +} + +void __init_recv +boot_kvm_cpu_relax(void) +{ + HYPERVISOR_kvm_guest_vcpu_relax(); +} + +#ifdef CONFIG_SMP +int __init_recv +boot_kvm_smp_cpu_config(boot_info_t *bootblock) +{ + if (bootblock->signature == KVM_GUEST_SIGNATURE || + bootblock->signature == X86BOOT_SIGNATURE) { + return boot_biosx86_smp_cpu_config(bootblock); + } else { + BOOT_BUG("Unknown type of Boot information structure"); + } + return 0; +} + +void __init_recv +boot_kvm_smp_node_config(boot_info_t *bootblock) +{ + if (bootblock->signature == KVM_GUEST_SIGNATURE || + bootblock->signature == X86BOOT_SIGNATURE) { + boot_biosx86_smp_node_config(bootblock); + } else { + BOOT_BUG("Unknown type of Boot information structure"); + } +} +#endif /* CONFIG_SMP */ + +/* + * Reserve memory of VCPU state structure to communacate with host kernel + */ +static void __init +boot_kvm_reserve_vcpu_state(void) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + + KVM_GET_VCPU_STATE_BASE(area_base); + area_size = sizeof(kvm_vcpu_state_t); + ret = boot_reserve_physmem(area_base, area_size, kernel_data_mem_type, + BOOT_NOT_IGNORE_BUSY_BANK); + if (ret != 0) { + BOOT_BUG("Could not reserve VCPU state area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, PAGE_SIZE); + } + + DebugKVM("The VCPU state reserved area: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, PAGE_SIZE); +} + +/* + * Reserve memory of Compilation Units Table for guest kernel + */ +static void __init +boot_kvm_reserve_kernel_cut(void) +{ + e2k_cutd_t cutd; + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + + cutd.CUTD_reg = BOOT_KVM_READ_OSCUTD_REG_VALUE(); + area_base = cutd.CUTD_base; + area_size = sizeof(e2k_cute_t) * MAX_GUEST_CODES_UNITS; + ret = boot_reserve_physmem(area_base, area_size, kernel_data_mem_type, + BOOT_NOT_IGNORE_BUSY_BANK); + if (ret != 0) { + BOOT_BUG("Could not reserve kernel CUT area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, PAGE_SIZE); + } + + DebugKVM("The kernel CUT reserved area: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, PAGE_SIZE); +} + +/* + * Reserve legacy VGA IO memory + */ +static void __init +boot_kvm_reserve_legacy_VGA_MEM(bool bsp) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + + if (BOOT_IS_BSP(bsp)) { + area_base = VGA_VRAM_PHYS_BASE; + area_size = VGA_VRAM_SIZE; + ret = boot_reserve_physmem(area_base, area_size, hw_stripped_mem_type, + BOOT_NOT_IGNORE_BUSY_BANK | BOOT_IGNORE_BANK_NOT_FOUND); + if (ret != 0) { + BOOT_BUG("Could not reserve VGA MEM area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, PAGE_SIZE); + } + + DebugKVM("Legacy VGA MEM reserved area: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, PAGE_SIZE); + } +} + +/* + * Reserve the memory used by KVM guest boot-time initialization. + * All the used memory areas enumerate below. If a some new area will be used, + * then it should be added to the list of already known ones. + */ + +void __init +boot_kvm_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + /* + * Reserve kernel image 'text/data/bss' segments. + * 'OSCUD' & 'OSGD' register-pointers describe these areas. + * 'text' and 'data/bss' segments can intersect or one can include + * other. + */ + boot_reserve_kernel_image(bsp, boot_info); + + /* + * The special virtual physical memory VRAM is now used to emulate + * VCPU, VMMU, VSIC and other hardware registers, tables, structures + * Reservation can be not made, but only to check reservation areas + * intersections (including main memory busy areas) it will be done + */ + #define CHECK_VCPU_VRAM_INTERSECTIONS + +#ifdef CHECK_VCPU_VRAM_INTERSECTIONS + + /* + * Reserve memory of VCPU state structure to communacate with + * host kernel + * (allocated in VRAM) + */ + boot_kvm_reserve_vcpu_state(); + + + /* + * Reserve memory of Compilation Units Table for guest kernel + * (allocated in VRAM) + */ + boot_kvm_reserve_kernel_cut(); +#endif /* CHECK_VCPU_VRAM_INTERSECTIONS */ + + /* + * Reserve boot information records. + */ + boot_reserve_bootblock(bsp, boot_info); + + /* + * Reserve memory of boot-time stacks. + */ + boot_reserve_stacks(boot_info); + + /* + * Reserve legacy VGA IO memory + */ + boot_kvm_reserve_legacy_VGA_MEM(bsp); +} + +/* + * The function defines sizes of all guest kernel hardware stacks(PS & PCS) + * host run on own stacks, the guest stacks should define only + * own hardware stacks sizes + */ +void __init boot_kvm_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + kvm_set_hw_ps_user_size(hw_stacks, KVM_GUEST_KERNEL_PS_SIZE); + kvm_set_hw_pcs_user_size(hw_stacks, KVM_GUEST_KERNEL_PCS_SIZE); +} + +static void __init boot_kvm_map_all_phys_memory(boot_info_t *boot_info) +{ + e2k_addr_t area_phys_base; + e2k_size_t area_size; + e2k_addr_t area_virt_base; + int ret; + int bank; + + /* + * Map the available physical memory into virtual space to direct + * access to physical memory using kernel pa <-> va translations + * All physical memory pages are mapped to virtual space starting + * from 'PAGE_OFFSET' + */ + +#ifdef CONFIG_SMP + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_mem_mapped)) { +#endif /* CONFIG_SMP */ +#ifdef CONFIG_NUMA + if (!boot_node_has_dup_kernel()) { + DebugNUMA("boot_map_all_bootmem() node " + "has not own page table and will use " + "node #%d physical memory mapping\n", + boot_my_node_dup_kernel_nid); + goto no_mem_mapping; + } else { + DebugNUMA("boot_map_all_bootmem() will map all " + "physical memory\n"); + } +#endif /* CONFIG_NUMA */ + DebugKVM("The physical memory start address 0x%lx, " + "end 0x%lx\n", + boot_start_of_phys_memory, + boot_end_of_phys_memory); + area_phys_base = boot_pa_to_vpa(boot_start_of_phys_memory); + area_virt_base = + (e2k_addr_t)__boot_va(boot_start_of_phys_memory); + area_size = 0; + for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS; bank++) { + if (!boot_info->nodes_mem[0].banks[bank].size) + break; + area_size += boot_info->nodes_mem[0].banks[bank].size; + } + ret = boot_map_physmem(PAGE_MAPPED_PHYS_MEM, + BOOT_E2K_MAPPED_PHYS_MEM_PAGE_SIZE); + if (ret <= 0) { + BOOT_BUG("Could not map physical memory area: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + area_phys_base, area_size, + BOOT_E2K_MAPPED_PHYS_MEM_PAGE_SIZE, + area_virt_base); + } + DebugKVM("The physical memory area: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + BOOT_E2K_MAPPED_PHYS_MEM_PAGE_SIZE); +#ifdef CONFIG_NUMA +no_mem_mapping: +#endif /* CONFIG_NUMA */ +#ifdef CONFIG_SMP + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_mem_mapped); + } +#endif /* CONFIG_SMP */ +} + +void __init boot_kvm_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + + /* guest kernel image should be registered on host */ + /* for paravirtualization mode without shadow PT support */ + boot_host_kernel_image(bsp); + + /* + * Map the kernel image 'text/data/bss' segments. + */ + boot_map_kernel_image(populate_image_on_host); + + /* + * Map the kernel stacks + */ + boot_map_kernel_boot_stacks(); + + /* + * Map all available physical memory + */ + boot_kvm_map_all_phys_memory(boot_info); + + /* + * Map all needed physical areas from boot-info. + */ + boot_map_all_bootinfo_areas(boot_info); + + /* + * Map all available VRAM areas + */ + boot_kvm_map_vram_memory(boot_info); +} + +/* + * KVM guest kernel started on virtual memory so does not need + * special switch to virtual space + */ +void __init_recv +boot_kvm_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + return; +} + +void boot_kvm_set_kernel_MMU_state_before(void) +{ + vcpu_gmmu_info_t gmmu_info; + mmu_reg_t mmu_cr = _MMU_CR_KERNEL; + int ret; + + /* translation (TLB enable) will be turn ON later */ + gmmu_info.mmu_cr = mmu_cr & ~_MMU_CR_TLB_EN; + gmmu_info.pid = MMU_KERNEL_CONTEXT; + gmmu_info.trap_cellar = (unsigned long)boot_kernel_trap_cellar; + DebugMMU("will set MMU_CR 0x%llx PID 0x%llx TRAP_CELLAR at %p\n", + gmmu_info.mmu_cr, gmmu_info.pid, (void *)gmmu_info.trap_cellar); + + gmmu_info.sep_virt_space = MMU_IS_SEPARATE_PT(); + gmmu_info.pt_v6 = MMU_IS_PT_V6(); + if (gmmu_info.sep_virt_space) { + gmmu_info.os_pptb = MMU_SEPARATE_KERNEL_PPTB; + gmmu_info.os_vptb = MMU_SEPARATE_KERNEL_VPTB; + BUILD_BUG_ON(MMU_SEPARATE_KERNEL_VAB != GUEST_PAGE_OFFSET); + gmmu_info.os_vab = MMU_SEPARATE_KERNEL_VAB; + DebugMMU("will set separate OS_PPTB at %p OS_VPTB at %p " + "OS_VAB at %p\n", + (void *)gmmu_info.os_pptb, (void *)gmmu_info.os_vptb, + (void *)gmmu_info.os_vab); + + /* set user PT to kernel PT too as initial state */ + gmmu_info.u_pptb = MMU_SEPARATE_KERNEL_PPTB; + gmmu_info.u_vptb = MMU_SEPARATE_USER_VPTB; + DebugMMU("will set user PTs same as OS: U_PPTB at %p " + "U_VPTB at %p\n", + (void *)gmmu_info.u_pptb, (void *)gmmu_info.u_vptb); + } else { + gmmu_info.u_pptb = MMU_UNITED_KERNEL_PPTB; + gmmu_info.u_vptb = MMU_UNITED_KERNEL_VPTB; + DebugMMU("will set united U_PPTB at %p U_VPTB at %p\n", + (void *)gmmu_info.u_pptb, (void *)gmmu_info.u_vptb); + } + gmmu_info.opcode = INIT_STATE_GMMU_OPC; + + ret = HYPERVISOR_vcpu_guest_mmu_state(&gmmu_info); + if (ret != 0) { + BOOT_BUG("Could not set guest mmu state by hypercall, " + "error %d", ret); + } +} + +void boot_kvm_set_kernel_MMU_state_after(void) +{ +} + +/* + * Guest kernel is running on virtual space, so it does not need to turn on + * virtual memory support + */ +void __init_recv +boot_kvm_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus)) +{ + bootmem_areas_t *bootmem = boot_kernel_bootmem; + hw_stack_t hw_stacks; + kvm_task_info_t task_info; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_usbr_t usbr; + e2k_cud_lo_t cud_lo; + e2k_cud_hi_t cud_hi; + e2k_gd_lo_t gd_lo; + e2k_gd_hi_t gd_hi; + e2k_cutd_t cutd; + e2k_size_t size; + bool is_hv; + int cpus_to_sync = boot_cpu_to_sync_num; + int ret; + + is_hv = BOOT_IS_HV_GM(); + + /* + * Set all needed MMU registers + */ + boot_set_kernel_MMU_state_before(); + boot_set_kernel_MMU_state_after(); + + /* + * Calculate hardware procedure and chain stacks pointers + */ + + /* + * Set hardware stacks registers + */ + AW(psp_lo) = 0; + AW(psp_hi) = 0; + AW(pcsp_lo) = 0; + AW(pcsp_hi) = 0; +#ifndef CONFIG_SMP + psp_lo.PSP_lo_base = bootmem->boot_ps.virt; + psp_hi.PSP_hi_size = bootmem->boot_ps.size; + pcsp_lo.PCSP_lo_base = bootmem->boot_pcs.virt; + pcsp_hi.PCSP_hi_size = bootmem->boot_pcs.size; +#else + psp_lo.PSP_lo_base = bootmem->boot_ps[cpuid].virt; + psp_hi.PSP_hi_size = bootmem->boot_ps[cpuid].size; + pcsp_lo.PCSP_lo_base = bootmem->boot_pcs[cpuid].virt; + pcsp_hi.PCSP_hi_size = bootmem->boot_pcs[cpuid].size; +#endif + psp_hi.PSP_hi_ind = 0; + pcsp_hi.PCSP_hi_ind = 0; + + /* + * Calculate guest kernel OSCUD trap table start + */ + cud_lo = NATIVE_READ_OSCUD_LO_REG(); + cud_hi = NATIVE_READ_OSCUD_HI_REG(); + cud_lo.OSCUD_lo_base = (e2k_addr_t)_start; + cud_hi.OSCUD_hi_size = (e2k_addr_t)_etext - (e2k_addr_t)_start; + DebugKVM("The kernel CUD virtual area: base addr 0x%lx size 0x%x\n", + cud_lo.OSCUD_lo_base, cud_hi.OSCUD_hi_size); + + /* + * Calculate guest kernel OSGD area + */ + gd_lo = NATIVE_READ_OSGD_LO_REG(); + gd_hi = NATIVE_READ_OSGD_HI_REG(); + size = (e2k_addr_t)_edata_bss - (e2k_addr_t)_sdata; + size = ALIGN_TO_MASK(size, E2K_ALIGN_OS_GLOBALS_MASK); + gd_lo.OSCUD_lo_base = (e2k_addr_t)_sdata; + gd_hi.OSCUD_hi_size = size; + + /* calculate virtual CUTD pointer */ + cutd.CUTD_reg = BOOT_KVM_READ_OSCUTD_REG_VALUE(); + cutd.CUTD_base = (e2k_addr_t)__boot_va(cutd.CUTD_base); + DebugKVM("The kernel CUT virtual area: base addr 0x%lx size 0x%x\n", + cutd.CUTD_base, sizeof(e2k_cute_t) * 1); + + /* Enable control of PS & PCS stack bounds */ + boot_kvm_set_sge(); + + /* + * Calculate User Stack registers init kernel stack addresses. + * Set stack pointer to the very begining of initial stack to collapse + * useless previous stack frames + */ + AW(usd_lo) = 0; + AW(usd_hi) = 0; + AW(usbr) = 0; +#ifndef CONFIG_SMP + usbr.USBR_base = bootmem->boot_stack.virt + bootmem->boot_stack.size; + usd_lo.USD_lo_base = bootmem->boot_stack.virt + + bootmem->boot_stack.virt_offset; + usd_hi.USD_hi_size = bootmem->boot_stack.virt_offset; +#else + usbr.USBR_base = bootmem->boot_stack[cpuid].virt + + bootmem->boot_stack[cpuid].size; + usd_lo.USD_lo_base = bootmem->boot_stack[cpuid].virt + + bootmem->boot_stack[cpuid].virt_offset; + usd_hi.USD_hi_size = bootmem->boot_stack[cpuid].virt_offset; +#endif + usd_lo.USD_lo_p = 0; + + /* + * Real switch to new init stacks can be done only by hypervisor + */ + + boot_define_kernel_hw_stacks_sizes(&hw_stacks); + +#ifndef CONFIG_SMP + task_info.sp_offset = bootmem->boot_stack.size; + task_info.us_base = bootmem->boot_stack.virt; + task_info.us_size = bootmem->boot_stack.size; + task_info.ps_base = bootmem->boot_ps.virt; + task_info.init_ps_size = bootmem->boot_ps.size; + task_info.pcs_base = bootmem->boot_pcs.virt; + task_info.init_pcs_size = bootmem->boot_pcs.size; +#else + task_info.sp_offset = bootmem->boot_stack[cpuid].size; + task_info.us_base = bootmem->boot_stack[cpuid].virt; + task_info.us_size = bootmem->boot_stack[cpuid].size; + task_info.ps_base = bootmem->boot_ps[cpuid].virt; + task_info.init_ps_size = bootmem->boot_ps[cpuid].size; + task_info.pcs_base = bootmem->boot_pcs[cpuid].virt; + task_info.init_pcs_size = bootmem->boot_pcs[cpuid].size; +#endif + task_info.flags = 0; + BUG_ON(task_info.sp_offset > task_info.us_size); + task_info.us_ps_size = kvm_get_hw_ps_user_size(&hw_stacks); + task_info.ps_size = task_info.us_ps_size; + task_info.ps_offset = 0; + task_info.ps_top = task_info.init_ps_size; + task_info.us_pcs_size = kvm_get_hw_pcs_user_size(&hw_stacks); + task_info.pcs_size = task_info.us_pcs_size; + task_info.pcs_offset = 0; + task_info.pcs_top = task_info.init_pcs_size; + task_info.flags |= (DO_PRESENT_HW_STACKS_TASK_FLAG | + PS_HAS_NOT_GUARD_PAGE_TASK_FLAG | + PCS_HAS_NOT_GUARD_PAGE_TASK_FLAG); + task_info.cud_base = cud_lo.OSCUD_lo_base; + task_info.cud_size = cud_hi.OSCUD_hi_size; + task_info.gd_base = gd_lo.OSGD_lo_base; + task_info.gd_size = gd_hi.OSGD_hi_size; + task_info.cut_base = cutd.CUTD_base; + /* only 1 entry for guest kernel: cui #0 */ + task_info.cut_size = sizeof(e2k_cute_t) * 1; + task_info.cui = 0; + + /* + * Set hardware stacks registers + */ + BOOT_KVM_FLUSHCPU; + BOOT_KVM_WRITE_PSP_REG(psp_hi, psp_lo); + BOOT_KVM_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + /* + * Switch User Stack registers to init kernel stack addresses. + * Set stack pointer to the very begining of initial stack to collapse + * useless previous stack frames + */ + BOOT_KVM_WRITE_USBR_USD_REG_VALUE(AW(usbr), AW(usd_hi), AW(usd_lo)); + + /* + * Set guest kernel OSCUD to trap table start (only virtual copies + * of registers at memory) + */ + BOOT_KVM_WRITE_GD_LO_REG(gd_lo); + BOOT_KVM_WRITE_GD_HI_REG(gd_hi); + BOOT_KVM_COPY_WRITE_OSGD_LO_REG_VALUE(gd_lo.GD_lo_half); + BOOT_KVM_COPY_WRITE_OSGD_HI_REG_VALUE(gd_hi.GD_hi_half); + BOOT_KVM_WRITE_CUTD_REG_VALUE(cutd.CUTD_reg); + BOOT_KVM_COPY_WRITE_OSCUTD_REG_VALUE(cutd.CUTD_reg); + BOOT_KVM_WRITE_CUD_LO_REG(cud_lo); + BOOT_KVM_WRITE_CUD_HI_REG(cud_hi); + BOOT_KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(cud_hi.CUD_hi_half); + /* should be set last because of the OSCUD.base is used */ + /* to convert boot-time VA<->PA */ + __E2K_WAIT_ALL; + BOOT_KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(cud_lo.CUD_lo_half); + if (is_hv) { + /* set virtual CUTD/OSCUTD pointer */ + BOOT_KVM_WRITE_OSCUTD_REG_VALUE(cutd.CUTD_reg); + /* set hardware registers copies too */ + /* to enable native trap table and handlers */ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(cud_hi.CUD_hi_half); + /* should be set last because of the OSCUD.base is used */ + /* to convert boot-time VA<->PA */ + __E2K_WAIT_ALL; + NATIVE_WRITE_OSCUD_LO_REG_VALUE(cud_lo.CUD_lo_half); + } + + ret = HYPERVISOR_switch_to_virt_mode(&task_info, + (void (*)(void *, void *, void *))boot_init_sequel_func, + (void *) (long) bsp, + (void *) (long) cpuid, + (void *) (long) cpus_to_sync); + if (ret) { + boot_panic("could not switch to new init kernel stacks," + "error %d\n", ret); + } + + /* guest kernel should run under hardware stacks bounds enable */ + kvm_stack_bounds_trap_enable(); + +#ifdef CONFIG_KVM_GUEST_HW_PV + boot_init_sequel_func(bsp, cpuid, cpus_to_sync); +#endif +} + +/* + * Clear kernel BSS segment in native mode + */ + +void __init boot_kvm_clear_bss(void) +{ + e2k_size_t size; + unsigned long *bss_p; + + bss_p = (unsigned long *)(&__bss_start); + bss_p = boot_kvm_va_to_pa(bss_p); + size = (e2k_addr_t)__bss_stop - (e2k_addr_t)__bss_start; + DebugKVM("Kernel BSS segment will be cleared from " + "physical address 0x%lx size 0x%lx\n", + bss_p, size); + boot_fast_memset(bss_p, 0, size); +} + +/* + * Sequel of process of initialization. This function is run into virtual + * space and controls termination of boot-time init and start kernel init + */ + +void __init init_kvm_terminate_boot_init(bool bsp, int cpuid) +{ + kvm_vcpu_state_t *my_vcpu_state; + + /* Set pointer of the VCPU state at table */ + /* to enable access from/to other VCPUs */ + KVM_GET_VCPU_STATE_BASE(my_vcpu_state); + vcpus_state[cpuid] = my_vcpu_state; + DebugKVM("VCPU #%d state at %px populated for other vcpus\n", + cpuid, my_vcpu_state); + + /* + * Start kernel initialization on bootstrap processor. + * Other processors will do some internal initialization and wait + * for commands from bootstrap processor. + */ + init_start_kernel_init(bsp, cpuid); + +} + +void __init +boot_kvm_parse_param(bootblock_struct_t *bootblock) +{ + boot_native_parse_param(bootblock); +} diff --git a/arch/e2k/kvm/guest/boot.h b/arch/e2k/kvm/guest/boot.h new file mode 100644 index 000000000000..8ee56e0637c2 --- /dev/null +++ b/arch/e2k/kvm/guest/boot.h @@ -0,0 +1,47 @@ +/* + * KVM boot-time initialization + * + * Copyright (C) 2018 MCST + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef __E2K_KVM_GUEST_BOOT_H +#define __E2K_KVM_GUEST_BOOT_H + +#include + +#include +#include + +typedef struct vram_area { + e2k_addr_t base_addr; /* base physical address of the start */ + /* page of the bank */ + e2k_size_t pages_num; /* total number of pages in the bank */ +} vram_area_t; + +extern vram_area_t vram_areas[KVM_MAX_VRAM_AREAS]; +extern int vram_areas_num; + +#define boot_vram_areas \ + boot_vp_to_pp((vram_area_t *)vram_areas) +#define boot_vram_areas_num \ + boot_get_vo_value(vram_areas_num) + +extern int __init boot_kvm_probe_vram_memory(boot_info_t *bootblock); +extern void __init boot_kvm_map_vram_memory(boot_info_t *boot_info); + +#ifdef CONFIG_KVM_SHADOW_PT +static inline void __init boot_host_kernel_image(bool bsp) +{ + /* nothing to do */ + return; +} +#define populate_image_on_host false +#else /* ! CONFIG_KVM_SHADOW_PT */ +extern void __init boot_host_kernel_image(bool bsp); +#define populate_image_on_host true +#endif /* CONFIG_KVM_SHADOW_PT */ + +#endif /* __E2K_KVM_GUEST_BOOT_H */ diff --git a/arch/e2k/kvm/guest/boot_e2k_virt.c b/arch/e2k/kvm/guest/boot_e2k_virt.c new file mode 100644 index 000000000000..b345df569566 --- /dev/null +++ b/arch/e2k/kvm/guest/boot_e2k_virt.c @@ -0,0 +1,59 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cpu.h" +#include "time.h" + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 1 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +void __init +boot_e2k_virt_setup_arch(void) +{ + boot_machine.x86_io_area_base = E2K_VIRT_CPU_X86_IO_AREA_PHYS_BASE; + boot_machine.guest.rev = E2K_VIRT_CPU_REVISION; + boot_machine.guest.iset_ver = E2K_VIRT_CPU_ISET; + boot_machine.max_nr_node_cpus = E2K_VIRT_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E2K_VIRT_NR_NODE_CPUS; + boot_machine.node_iolinks = E2K_VIRT_NODE_IOLINKS; +} + +/* + * Panicing. + */ + +void boot_kvm_panic(const char *fmt_v, ...) +{ + register va_list ap; + + va_start(ap, fmt_v); + boot_vprintk(fmt_v, ap); + va_end(ap); + HYPERVISOR_kvm_shutdown("boot-time panic", KVM_SHUTDOWN_PANIC); +} diff --git a/arch/e2k/kvm/guest/boot_io.c b/arch/e2k/kvm/guest/boot_io.c new file mode 100644 index 000000000000..94af88b54a91 --- /dev/null +++ b/arch/e2k/kvm/guest/boot_io.c @@ -0,0 +1,122 @@ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_BOOT_KVM_IO_MODE +#undef DebugBKVMIO +#define DEBUG_BOOT_KVM_IO_MODE 0 /* boot-time kernel virt machine */ + /* IO debugging */ +#define DebugBKVMIO(fmt, args...) \ +({ \ + if (DEBUG_BOOT_KVM_IO_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +static unsigned long +boot_kvm_guest_mmio(void __iomem *mmio_addr, u64 value, u8 size, + u8 is_write) +{ + u64 phys_addr = (u64)mmio_addr; + unsigned long data[1]; + int ret; + + if (is_write) { + data[0] = value; + DebugBKVMIO("data to write 0x%lx size %d to addr 0x%lx\n", + data[0], size, phys_addr); + } + ret = HYPERVISOR_guest_mmio_request(phys_addr, data, size, is_write); + if (!is_write) { + DebugBKVMIO("read data 0x%lx size %d from addr 0x%lx\n", + data[0], size, phys_addr); + } + return data[0]; +} + +void boot_kvm_writeb(u8 b, void __iomem *addr) +{ + DebugBKVMIO("started to write byte 0x%02x to MMIO addr %px\n", + b, addr); + if (BOOT_IS_HV_GM()) + return boot_native_writeb(b, addr); + + boot_kvm_guest_mmio(addr, b, 1, 1); +} + +void boot_kvm_writew(u16 w, void __iomem *addr) +{ + DebugBKVMIO("started to write halfword 0x%04x to MMIO addr %px\n", + w, addr); + if (BOOT_IS_HV_GM()) + return boot_native_writew(w, addr); + + boot_kvm_guest_mmio(addr, w, 2, 1); +} + +void boot_kvm_writel(u32 l, void __iomem *addr) +{ + DebugBKVMIO("started to write word 0x%08x to MMIO addr %px\n", + l, addr); + if (BOOT_IS_HV_GM()) + return boot_native_writel(l, addr); + + boot_kvm_guest_mmio(addr, l, 4, 1); +} + +void boot_kvm_writell(u64 q, void __iomem *addr) +{ + DebugBKVMIO("started to write long word 0x%016lx to MMIO addr %px\n", + q, addr); + if (BOOT_IS_HV_GM()) + return boot_native_writeq(q, addr); + + boot_kvm_guest_mmio(addr, q, 8, 1); +} + +u8 boot_kvm_readb(void __iomem *addr) +{ + DebugBKVMIO("started to read byte from MMIO addr %px\n", addr); + if (BOOT_IS_HV_GM()) + return boot_native_readb(addr); + + return boot_kvm_guest_mmio(addr, 0, 1, 0); +} + +u16 boot_kvm_readw(void __iomem *addr) +{ + DebugBKVMIO("started to read halfword from MMIO addr %px\n", addr); + if (BOOT_IS_HV_GM()) + return boot_native_readw(addr); + + return boot_kvm_guest_mmio(addr, 0, 2, 0); +} + +u32 boot_kvm_readl(void __iomem *addr) +{ + DebugBKVMIO("started to read word from MMIO addr %px\n", addr); + if (BOOT_IS_HV_GM()) + return boot_native_readl(addr); + + return boot_kvm_guest_mmio(addr, 0, 4, 0); +} + +u64 boot_kvm_readll(void __iomem *addr) +{ + DebugBKVMIO("started to read long word from MMIO addr %px\n", addr); + if (BOOT_IS_HV_GM()) + return boot_native_readq(addr); + + return boot_kvm_guest_mmio(addr, 0, 8, 0); +} diff --git a/arch/e2k/kvm/guest/boot_spinlock.c b/arch/e2k/kvm/guest/boot_spinlock.c new file mode 100644 index 000000000000..a7c30984794b --- /dev/null +++ b/arch/e2k/kvm/guest/boot_spinlock.c @@ -0,0 +1,89 @@ +/* + * This file implements the arch-dependent parts of kvm guest + * boot-time spinlock()/spinunlock() slow part + * + * Copyright 2020 MCST + */ + +#include +#include +#include +#include + +#include +#include + +#include "cpu.h" + +#undef DEBUG_BOOT_SPINLOCK_MODE +#undef DebugBSL +#define DEBUG_BOOT_SPINLOCK_MODE 0 /* boot-time spinlocks */ + /* debugging */ +#define DebugBSL(fmt, args...) \ +({ \ + if (DEBUG_BOOT_SPINLOCK_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Slowpath of a guest spinlock: goto hypervisor to wait for spin unlocking + */ + +static inline void do_arch_boot_spin_lock_slow(void *lock, bool check_unlock) +{ + int err; + + DebugBSL("started on vcpu #%d for lock %px\n", + boot_smp_processor_id(), lock); + err = HYPERVISOR_boot_spin_lock_slow(lock, check_unlock); + if (err) { + BOOT_BUG("HYPERVISOR_guest_boot_spin_lock_slow() failed " + "(error %d)\n", + err); + } +} + +void kvm_arch_boot_spin_lock_slow(void *lock) +{ + do_arch_boot_spin_lock_slow(lock, true); +} + +static inline void do_arch_boot_spin_locked_slow(void *lock) +{ + int err; + + DebugBSL("%s (%d) started for lock %px\n", + boot_smp_processor_id(), lock); + err = HYPERVISOR_boot_spin_locked_slow(lock); + if (err) { + BOOT_BUG("HYPERVISOR_guest_spin_locked_slow() failed " + "(error %d)\n", + err); + } +} +void kvm_arch_boot_spin_locked_slow(void *lock) +{ + do_arch_boot_spin_locked_slow(lock); +} + +/* + * Slowpath of a guest spinunlock: goto hypervisor to wake up proccesses + * which are waiting on this lock + */ +static inline void do_arch_boot_spin_unlock_slow(void *lock, bool add_to_unlock) +{ + int err; + + DebugBSL("%s (%d) started for lock %px add to unlock list %d\n", + boot_smp_processor_id(), lock, add_to_unlock); + err = HYPERVISOR_boot_spin_unlock_slow(lock, add_to_unlock); + if (err) { + BOOT_BUG("HYPERVISOR_guest_boot_spin_unlock_slow() failed " + "(error %d)\n", + err); + } +} +void kvm_arch_boot_spin_unlock_slow(void *lock) +{ + do_arch_boot_spin_unlock_slow(lock, true); +} diff --git a/arch/e2k/kvm/guest/boot_string.c b/arch/e2k/kvm/guest/boot_string.c new file mode 100644 index 000000000000..e9fc08ec890a --- /dev/null +++ b/arch/e2k/kvm/guest/boot_string.c @@ -0,0 +1,61 @@ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef DEBUG_GUEST_STRINGS +/* + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ +unsigned long +boot_kvm_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + long ret; + + if (likely(BOOT_IS_HV_GM())) + ret = boot_native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + else + ret = kvm_do_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + if (ret) { + do_boot_printk("%s(): could not copy memory from %px to %px, " + "size 0x%lx, error %ld\n", + __func__, src, dst, len, ret); + } + return ret; +} +EXPORT_SYMBOL(boot_kvm_fast_tagged_memory_copy); + +unsigned long +boot_kvm_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + long ret = 0; + + if (likely(BOOT_IS_HV_GM())) + boot_native_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + else + ret = kvm_do_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + if (ret) { + do_boot_printk("%s() could not set memory " + "from %px by 0x%llx_0x%llx, size 0x%lx, error %ld\n", + __func__, addr, val, tag, len, ret); + } + return ret; +} +EXPORT_SYMBOL(boot_kvm_fast_tagged_memory_set); + +#endif /* DEBUG_GUEST_STRINGS */ diff --git a/arch/e2k/kvm/guest/boot_vram.c b/arch/e2k/kvm/guest/boot_vram.c new file mode 100644 index 000000000000..cfebd266c99e --- /dev/null +++ b/arch/e2k/kvm/guest/boot_vram.c @@ -0,0 +1,195 @@ +/* + * + * Boot-time initialization of Virtual RAM support. + * VRAM is memory areas into physical memory address range, which + * can be allocated by KVM guest kernel launcher (for example QEMU). + * These areas can not + * intersect with real physical memory addresses range; + * be used as physical memory + * VRAMs are special areas shared by QEMU, host and guest as extensions + * to support virtual machines + * + * Copyright (C) 2018 MCST + */ + +#include +#include +#include +#include +#include +#include + +#include "boot.h" + +#undef DEBUG_BOOT_MODE +#undef boot_printk +#define DEBUG_BOOT_MODE 1 /* Boot process */ +#define boot_printk(fmt, args...) \ +({ \ + if (DEBUG_BOOT_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_NUMA_MODE +#undef DebugNUMA +#define DEBUG_NUMA_MODE 0 /* NUMA mode debugging */ +#define DebugNUMA(fmt, args...) \ +({ \ + if (DEBUG_NUMA_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +/* + * The next structure contains list of descriptors of the memory areas + * used by boot-time initialization. + * All the used memory areas enumerate in this structure. If a some new + * area will be used, then it should be added to the list of already known ones. + */ + +vram_area_t __initdata vram_areas[KVM_MAX_VRAM_AREAS]; +int __initdata vram_areas_num = 0; + +#ifdef CONFIG_NUMA +static int __initdata node_vram_mapped[MAX_NUMNODES] = { 0 }; +#define boot_node_vram_mapped \ + boot_get_vo_value(node_vram_mapped[boot_numa_node_id()]) +#else /* ! CONFIG_NUMA */ +#define boot_node_vram_mapped 0 +#endif /* CONFIG_NUMA */ + +static void __init +boot_create_vram_area(vram_area_t *vram_area, + e2k_addr_t bank_start, e2k_size_t bank_size) +{ + vram_area->base_addr = bank_start; + vram_area->pages_num = bank_size >> PAGE_SHIFT; +} + +int __init +boot_kvm_probe_vram_memory(boot_info_t *bootblock) +{ + bank_info_t *vram_banks; + bank_info_t *bank_info; + vram_area_t *vrams; + int bank = 0; + + vram_banks = bootblock->bios.banks_ex; + vrams = boot_vram_areas; + + for (bank = 0; bank < L_MAX_PHYS_BANKS_EX; bank++) { + e2k_size_t bank_size; + e2k_addr_t bank_start; + vram_area_t *vram; + + bank_info = &vram_banks[bank]; + bank_start = bank_info->address; + bank_size = bank_info->size; + + if (bank_size == 0) + /* no more VRAM banks */ + break; + + if (bank >= KVM_MAX_VRAM_AREAS) { + do_boot_printk("Number of VRAM banks too many, only " + "%d banks is allowed, ignore other\n", + KVM_MAX_VRAM_AREAS); + break; + } + + if ((bank_size & (PAGE_SIZE - 1)) != 0) { + BOOT_BUG("VRAM bank #%d size 0x%lx is not page aligned", + bank, bank_size); + bank_size &= ~(PAGE_SIZE - 1); + } + if ((bank_start & (PAGE_SIZE - 1)) != 0) { + BOOT_BUG("VRAM bank #%d base address 0x%lx is not " + "page aligned", + bank, bank_start); + bank_size += (bank_start & (PAGE_SIZE - 1)); + bank_start &= ~(PAGE_SIZE - 1); + } + + vram = &vrams[bank]; + boot_create_vram_area(vram, bank_start, bank_size); + boot_printk("VRAM bank #%d : base 0x%lx, size 0x%lx " + "(0x%lx pages)\n", + bank, vram->base_addr, vram->pages_num << PAGE_SHIFT, + vram->pages_num); + } + if (bank > 0) { + boot_vram_areas_num = bank; + boot_printk("Created %d VRAM bank(s)\n", bank); + } else { + BOOT_BUG("Could not find or create VRAM banks"); + } + + return bank; +} + +static void __init boot_map_vram_area(vram_area_t *vram) +{ + e2k_addr_t area_phys_base; + e2k_size_t area_size; + e2k_addr_t area_virt_base; + int ret; + + area_phys_base = boot_pa_to_vpa(vram->base_addr); + area_size = vram->pages_num << PAGE_SHIFT; + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa(area_phys_base)); + ret = boot_map_phys_area(area_phys_base, area_size, + area_virt_base, + PAGE_KERNEL, E2K_SMALL_PAGE_SIZE, + false, /* do not ignore if data mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map VRAM area: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + area_phys_base, area_size, E2K_SMALL_PAGE_SIZE, + area_virt_base); + } + boot_printk("The VRAM area: " + "base addr 0x%lx size 0x%lx is mapped to 0x%x virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + E2K_SMALL_PAGE_SIZE); +} + +void __init boot_kvm_map_vram_memory(boot_info_t *boot_info) +{ + vram_area_t *vrams; + int bank; + + /* + * Map the available VRAM areas into virtual space to direct + * access to the memory using kernel pa <-> va translations + * VRAM are mapped to virtual space starting from 'PAGE_OFFSET', + * same as physical memory pages + */ + + vrams = boot_vram_areas; + + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_vram_mapped)) { +#ifdef CONFIG_NUMA + if (!boot_node_has_dup_kernel()) { + DebugNUMA("boot_kvm_map_vram_memory() node " + "has not own page table and will use " + "node #%d VRAM areas mapping\n", + boot_my_node_dup_kernel_nid); + goto no_mem_mapping; + } else { + DebugNUMA("boot_kvm_map_vram_memory() will map all " + "VRAM areas\n"); + } +#endif /* CONFIG_NUMA */ + for (bank = 0; bank < boot_vram_areas_num; bank++) { + boot_map_vram_area(&vrams[bank]); + } +#ifdef CONFIG_NUMA +no_mem_mapping: +#endif /* CONFIG_NUMA */ + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_vram_mapped); + } +} diff --git a/arch/e2k/kvm/guest/cepic.c b/arch/e2k/kvm/guest/cepic.c new file mode 100644 index 000000000000..699f369bf4cf --- /dev/null +++ b/arch/e2k/kvm/guest/cepic.c @@ -0,0 +1,204 @@ +/* + * KVM guest virtual IRQs implementation. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "cpu.h" +#include "irq.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 1 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_THREAD_IRQ_MODE +#undef DebugKVMTI +#define DEBUG_KVM_THREAD_IRQ_MODE 0 /* kernel virtual IRQ thread */ + /* debugging */ +#define DebugKVMTI(fmt, args...) \ +({ \ + if (DEBUG_KVM_THREAD_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_CEPIC_IRQ_MODE +#undef DebugCEI +#define DEBUG_KVM_CEPIC_IRQ_MODE 0 /* CEPIC IRQ thread */ + /* debugging */ +#define DebugCEI(fmt, args...) \ +({ \ + if (DEBUG_KVM_CEPIC_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_DIRECT_IRQ_MODE +#undef DebugDIRQ +#define DEBUG_DIRECT_IRQ_MODE 0 /* direct IRQ injection debugging */ +#define DebugDIRQ(fmt, args...) \ +({ \ + if (DEBUG_DIRECT_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static bool bsp_direct_virq_cepic; + +static int kvm_cepic_virq_panic(struct notifier_block *this, + unsigned long event, void *ptr); + +static DEFINE_PER_CPU(struct notifier_block, resume_block_cpu); + +/* + * Basic functions to access to CEPIC state fields on guest. + */ +static inline int kvm_read_cepic_virqs_num(void) +{ + kvm_epic_state_t *cepic; + + cepic = kvm_vcpu_cepic_state(); + return atomic_read(&cepic->virqs_num); +} +static inline void kvm_dec_cepic_virqs_num(void) +{ + kvm_epic_state_t *cepic; + + cepic = kvm_vcpu_cepic_state(); + atomic_dec(&cepic->virqs_num); +} +static inline bool kvm_dec_and_test_cepic_virqs_num(void) +{ + kvm_epic_state_t *cepic; + + cepic = kvm_vcpu_cepic_state(); + return atomic_dec_and_test(&cepic->virqs_num); +} + +/* + * CEPIC of guest VCPU virtualized by host (see arch/e2k/kvm/cepic.c) + * Any virtual IRQ received by CEPIC on host, which must be handled + * by guest, causes virtual IRQ type of KVM_VIRQ_CEPIC and wake up + * special thread on special VIRQ VCPU. This thread wakes up the thread + * on real VCPU which starts this handler + */ +static irqreturn_t kvm_cepic_interrupt(int irq, void *dev_id) +{ + struct pt_regs *regs; + long cpu = (long)dev_id; + irqreturn_t ret; + unsigned long flags; + + DebugCEI("process %s (%d): started for CEPIC VIRQ #%d on CPU #%ld\n", + current->comm, current->pid, irq, cpu); + if (cpu != smp_processor_id()) { + /* here need access to foreign CEPIC, not own */ + /* Update CEPIC base address to enable such access */ + BUG_ON(true); + } + raw_local_irq_save(flags); + regs = get_irq_regs(); + + ret = native_do_interrupt(regs); + + if (regs->interrupt_vector == KVM_NMI_EPIC_VECTOR) { + /* NMI IPI on guest implemented as general interrupt */ + /* with vector KVM_NMI_EPIC_VECTOR */ + /* but nmi_call_function_interrupt() has been called */ + /* under NMI disabled, so now enable NMIs */ + irq_exit(); + KVM_INIT_KERNEL_UPSR_REG(false, /* enable IRQs */ + false /* disable NMIs */); + } + raw_local_irq_restore(flags); + + DebugKVMTI("CEPIC VIRQ #%d on CPU #%ld handled\n", + irq, cpu); + return ret; +} + +static int kvm_do_setup_cepic_virq(bool bsp, int cpu) +{ + const char *name; + struct notifier_block *resume_block; + unsigned long irqflags; + int ret; + + if (!paravirt_enabled()) + return 0; + pr_info("installing KVM guest CEPIC VIRQ on CPU %d\n", + cpu); + + name = kasprintf(GFP_KERNEL, "cepic/%d", cpu); + if (!name) + name = ""; + + irqflags = kvm_get_default_virq_flags(KVM_VIRQ_CEPIC); + + if (irqflags & BY_DIRECT_INJ_VIRQ_FLAG) { + BUG_ON(cpu != smp_processor_id()); + ret = kvm_request_virq(KVM_VIRQ_CEPIC, + &kvm_cepic_interrupt, cpu, + BY_DIRECT_INJ_VIRQ_FLAG, + name, (void *)cpu); + if (ret == 0) { + if (bsp) + bsp_direct_virq_cepic = true; + goto success; + } + DebugDIRQ("could not request direct CEPIC VIRQ %s injection\n", + name); + } else { + /* unknown mode to request VIRQ delivery */ + BUG_ON(true); + ret = -EINVAL; + } + if (ret) { + panic("could not register CEPIC VIRQ #%d for CPU #%d\n", + KVM_VIRQ_CEPIC, cpu); + } + +success: + resume_block = &per_cpu(resume_block_cpu, cpu); + resume_block->notifier_call = kvm_cepic_virq_panic; + resume_block->next = NULL; + atomic_notifier_chain_register(&panic_notifier_list, resume_block); + + if (bsp) { + /* CEPIC support on guest is now ready, so enable */ + /* EPIC timer and set up the CEPIC timer on boot CPU */ + disable_epic_timer = false; + setup_boot_epic_clock(); + } + + DebugKVM("KVM guest CEPIC VIRQ on CPU %d installed\n", cpu); + return ret; +} + +__init int kvm_setup_boot_cepic_virq(void) +{ + return kvm_do_setup_cepic_virq(true, raw_smp_processor_id()); +} + +static int +kvm_cepic_virq_panic(struct notifier_block *this, + unsigned long event, void *ptr) +{ + return NOTIFY_DONE; +} diff --git a/arch/e2k/kvm/guest/cpu.h b/arch/e2k/kvm/guest/cpu.h new file mode 100644 index 000000000000..3090c6f1dd56 --- /dev/null +++ b/arch/e2k/kvm/guest/cpu.h @@ -0,0 +1,186 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines e2l CPU architecture specific interfaces + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef __E2K_CPU_GUEST_H +#define __E2K_CPU_GUEST_H + +#include + +#include +#include +#include +#include + +#define VCPU_DEBUG_MODE_ON (kvm_get_vcpu_state()->debug_mode_on) + +/* + * Basic functions accessing virtual CPU running state info on guest. + */ +#define GUEST_RUNSTATE_INFO_BASE (offsetof(kvm_vcpu_state_t, runstate)) + +/* own VCPU runstate info: directly accessible through global registers */ +static inline kvm_runstate_info_t *kvm_vcpu_runstate_info(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return (kvm_runstate_info_t *)(vcpu_base + GUEST_RUNSTATE_INFO_BASE); +} +/* other VCPU runstate info: accessible through global pointers table */ +static inline kvm_runstate_info_t *kvm_the_vcpu_runstate_info(long vcpu_id) +{ + kvm_vcpu_state_t *vcpu_state = kvm_get_the_vcpu_state(vcpu_id); + + return &vcpu_state->runstate; +} + +#define DEBUG_CHECK_VCPU_ID + +#ifdef DEBUG_CHECK_VCPU_ID +static inline void kvm_check_vcpu_id(void) +{ + if (unlikely(raw_smp_processor_id() != KVM_READ_VCPU_ID())) { + pr_err("%s(): different smp processor id #%d and " + "VCPU id #%d\n", + __func__, raw_smp_processor_id(), KVM_READ_VCPU_ID()); + BUG_ON(raw_smp_processor_id() != KVM_READ_VCPU_ID()); + } +} +#else /* !DEBUG_CHECK_VCPU_ID */ +static inline void kvm_check_vcpu_id(void) +{ +} +#endif /* DEBUG_CHECK_VCPU_ID */ + +/* Host info access */ +static inline int kvm_vcpu_host_machine_id(void) +{ + kvm_host_info_t *host_info; + + host_info = kvm_get_host_info(); + return host_info->mach_id; +} +static inline int kvm_vcpu_host_cpu_iset(void) +{ + kvm_host_info_t *host_info; + + host_info = kvm_get_host_info(); + return host_info->cpu_iset; +} +static inline int kvm_vcpu_host_cpu_rev(void) +{ + kvm_host_info_t *host_info; + + host_info = kvm_get_host_info(); + return host_info->cpu_rev; +} +static inline bool kvm_vcpu_host_mmu_support_pt_v6(void) +{ + kvm_host_info_t *host_info; + + host_info = kvm_get_host_info(); + return host_info->mmu_support_pt_v6; +} +static inline bool kvm_host_support_hw_hc(void) +{ + return kvm_vcpu_host_support_hw_hc(); +} +static inline bool kvm_vcpu_host_is_hv(void) +{ + kvm_host_info_t *host_info; + unsigned long features, hv_mask; + + host_info = kvm_get_host_info(); + features = host_info->features; + hv_mask = features & (KVM_FEAT_HV_CPU_MASK | KVM_FEAT_HV_MMU_MASK); + + return hv_mask == (KVM_FEAT_HV_CPU_MASK | KVM_FEAT_HV_MMU_MASK); +} +static inline kvm_time_t *kvm_vcpu_time_info(void) +{ + kvm_host_info_t *host_info; + + host_info = kvm_get_host_info(); + return &host_info->time; +} + +/* + * Basic functions to access to local APIC state on guest. + */ +#define GUEST_LAPIC_STATE_BASE (offsetof(kvm_vcpu_state_t, lapic)) + +static inline kvm_apic_state_t *kvm_vcpu_lapic_state(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return (kvm_apic_state_t *)(vcpu_base + GUEST_LAPIC_STATE_BASE); +} +static inline atomic_t *kvm_get_lapic_virqs_num(void) +{ + kvm_apic_state_t *lapic; + + lapic = kvm_vcpu_lapic_state(); + return &lapic->virqs_num; +} + +/* + * Basic functions to access to CEPIC state on guest. + */ +#define GUEST_CEPIC_STATE_BASE (offsetof(kvm_vcpu_state_t, cepic)) + +static inline kvm_epic_state_t *kvm_vcpu_cepic_state(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return (kvm_epic_state_t *)(vcpu_base + GUEST_CEPIC_STATE_BASE); +} +static inline atomic_t *kvm_get_cepic_virqs_num(void) +{ + kvm_epic_state_t *cepic; + + cepic = kvm_vcpu_cepic_state(); + return &cepic->virqs_num; +} + +/* + * Basic functions to access to VIRQs state on guest. + */ +#define GUEST_VIRQs_STATE_BASE (offsetof(kvm_vcpu_state_t, virqs)) + +static inline kvm_virqs_state_t *kvm_vcpu_virqs_state(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return (kvm_virqs_state_t *)(vcpu_base + GUEST_VIRQs_STATE_BASE); +} +static inline atomic_t *kvm_get_timer_virqs_num(void) +{ + kvm_virqs_state_t *virqs; + + virqs = kvm_vcpu_virqs_state(); + return &virqs->timer_virqs_num; +} +static inline atomic_t *kvm_get_hvc_virqs_num(void) +{ + kvm_virqs_state_t *virqs; + + virqs = kvm_vcpu_virqs_state(); + return &virqs->hvc_virqs_num; +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest kernel */ +extern pv_v2p_ops_t kvm_v2p_ops; +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __E2K_CPU_GUEST_H */ diff --git a/arch/e2k/kvm/guest/e2k_virt.c b/arch/e2k/kvm/guest/e2k_virt.c new file mode 100644 index 000000000000..fdb1eaa76118 --- /dev/null +++ b/arch/e2k/kvm/guest/e2k_virt.c @@ -0,0 +1,424 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "time.h" +#include "pic.h" + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 1 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +unsigned int guest_machine_id = -1; +EXPORT_SYMBOL(guest_machine_id); + +static int e2k_virt_show_cpuinfo(struct seq_file *m, void *v); + +extern struct exception_table_entry __start___ex_table[]; +extern struct exception_table_entry __stop___ex_table[]; +extern u32 __initdata __visible main_extable_sort_needed; +static void __init kvm_sort_main_extable(void); + +#define MACH_TYPE_NAME_E2K_VIRT 0 +#define MACH_TYPE_NAME_E2K_HW_VIRT 1 + +/* + * Machine type names. + * Machine name can be retrieved from /proc/cpuinfo as model name. + */ +static char *kvm_cpu_type_name[] = { + "kvm-pv", + "kvm-hv-pv", +}; +static char *kvm_mach_type_name[] = { + "kvm-para-virt", + "kvm-hw-para-virt", +}; + +/* + * mach_type_id variable is set in setup_arch() function. + */ +static int kvm_mach_type_id = -1; + +/* + * Function to get name of virtual machine type. + * Must be used after setup_arch(). + */ +char *kvm_get_cpu_type_name(void) +{ + return kvm_cpu_type_name[kvm_mach_type_id]; +} +char *kvm_get_mach_type_name(void) +{ + return kvm_mach_type_name[kvm_mach_type_id]; +} +void kvm_set_mach_type_id(void) +{ + if (IS_HV_GM()) { + kvm_mach_type_id = MACH_TYPE_NAME_E2K_HW_VIRT; + } else { + kvm_mach_type_id = MACH_TYPE_NAME_E2K_VIRT; + } + native_set_mach_type_id(); +} + +static void +e2k_virt_setup_cpu_info(cpuinfo_e2k_t *cpu_info) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + strncpy(cpu_info->vendor, ELBRUS_CPU_VENDOR, 16); + cpu_info->family = E2K_VIRT_CPU_FAMILY; + cpu_info->model = IDR.IDR_mdl; + cpu_info->revision = IDR.IDR_rev; +} + +void e2k_virt_shutdown(void) +{ + kvm_time_shutdown(); +/* + if (current->mm && !test_ts_flag(TS_REMAP_HW_STACKS_TO_KERNEL)) { + set_ts_flag(TS_REMAP_HW_STACKS_TO_KERNEL); + deactivate_mm(current, current->mm); + clear_ts_flag(TS_REMAP_HW_STACKS_TO_KERNEL); + } + */ + pr_err("%s(): is not implemented or deleted\n", __func__); +} + +/* + * The SHUTDOWN hypercall takes a string to describe what's happening, and + * an argument which says whether this to restart (reboot) the Guest or not. + * + * Note that the Host always prefers that the Guest speak in physical addresses + * rather than virtual addresses, so we use __pa() here. + */ +void e2k_virt_power_off(void) +{ + DebugKVMSH("started on %s (%d)\n", current->comm, current->pid); + e2k_virt_shutdown(); + HYPERVISOR_kvm_shutdown("KVM Power down", KVM_SHUTDOWN_POWEROFF); +} + +/* + * Rebooting also tells the Host we're finished, but the RESTART flag tells the + * Launcher to reboot us. + */ +static void e2k_virt_restart_machine(char *reason) +{ + if (reason == NULL) + reason = "Restarting system..."; + DebugKVMSH("started to %s on %s (%d)\n", + reason, current->comm, current->pid); + disable_nonboot_cpus(); + e2k_virt_shutdown(); + HYPERVISOR_kvm_shutdown(reason, KVM_SHUTDOWN_RESTART); +} +static void e2k_virt_reset_machine(char *reason) +{ + DebugKVMSH("started on %s (%d)\n", current->comm, current->pid); + e2k_virt_restart_machine("KVM reset"); +} + +/* + * Panicing. + */ + +#define KVM_PANIC_TIMER_STEP 100 +#define KVM_PANIC_BLINK_SPD 6 +#define KVM_PANIC_TIMEOUT 1 + +static long no_blink(int state) +{ + return 0; +} + +static int kvm_panic(struct notifier_block *nb, unsigned long event, void *msg) +{ + long i, i_next = 0; + int state = 0; + + DebugKVMSH("started: %s\n", (char *)msg); + host_printk("%s\n", msg); + host_dump_stack(); + + /* + * Delay some times before rebooting the guest to wait + * for flush to console all important messages of kernel + * Here can't be used the "normal" timers since kernel just panicked. + */ + suppress_printk = 1; + if (!panic_blink) + panic_blink = no_blink; + local_irq_enable(); + for (i = 0; i < KVM_PANIC_TIMEOUT * 1000; i += KVM_PANIC_TIMER_STEP) { + touch_softlockup_watchdog(); + if (i >= i_next) { + i += panic_blink(state ^= 1); + i_next = i + 3600 / KVM_PANIC_BLINK_SPD; + } + msleep_interruptible(KVM_PANIC_TIMER_STEP); + } + + e2k_virt_shutdown(); + HYPERVISOR_kvm_shutdown(msg, KVM_SHUTDOWN_PANIC); + /* The hypercall won't return, but to keep gcc happy, we're "done". */ + return NOTIFY_DONE; +} + +struct notifier_block kvm_paniced = { + .notifier_call = kvm_panic +}; + +void __init +e2k_virt_setup_arch(void) +{ + machine.setup_cpu_info = e2k_virt_setup_cpu_info; + kvm_sort_main_extable(); +} + +int e2k_virt_get_vector_apic(void) +{ + int vector; + + vector = arch_apic_read(APIC_VECT); + if (vector < 0) + return vector; + + vector = APIC_VECT_VECTOR(vector); + if (vector == KVM_NMI_APIC_VECTOR) { + /* on guest NMI IPI implemented as general Local APIC */ + /* interrupt with vector KVM_NMI_APIC_VECTOR */ + /* but nmi_call_function_interrupt() should be called */ + /* under NMI disabled */ + KVM_INIT_KERNEL_UPSR_REG(false, /* enable IRQs */ + true /* disable NMIs */); + entering_ack_irq(); + } + return vector; +} + +#ifdef CONFIG_EPIC +int e2k_virt_get_vector_epic(void) +{ + union cepic_vect_inta reg_inta; + + reg_inta.raw = epic_read_w(CEPIC_VECT_INTA); + if (reg_inta.bits.vect < 0) + return reg_inta.raw; + if (reg_inta.bits.vect == KVM_NMI_EPIC_VECTOR) { + /* on guest NMI IPI implemented as general Local APIC */ + /* interrupt with vector KVM_NMI_APIC_VECTOR */ + /* but nmi_call_function_interrupt() should be called */ + /* under NMI disabled */ + KVM_INIT_KERNEL_UPSR_REG(false, /* enable IRQs */ + true /* disable NMIs */); + irq_enter(); + ack_epic_irq(); + } + return reg_inta.raw; +} +#endif + +#ifdef CONFIG_IOHUB_DOMAINS +/* + * This e2k virtual machine has not IO link and is connect to VIRTIO controller + * through virtual North breadge, so it has only one IO bus and PCI domain # 0 + */ +void __init +e2k_virt_create_io_config(void) +{ + char src_buffer[80]; + char *buffer = src_buffer; + + iolinks_num = 1; + iohub_set(0, iolink_iohub_map); + iohub_set(0, iolink_online_iohub_map); + iolink_iohub_num = 1; + iolink_online_iohub_num = 1; + buffer += iolinkmask_scnprintf(buffer, 80, iolink_online_iohub_map); + buffer[0] = '\0'; +} +#endif /* CONFIG_IOHUB_DOMAINS */ + +void setup_guest_interface(void) +{ + machdep_t *node_mach; + int nid; + + for_each_node_has_dup_kernel(nid) { + node_mach = the_node_machine(nid); + if (node_mach->native_iset_ver >= E2K_ISET_V5) { + node_mach->save_gregs = save_glob_regs_v5; + node_mach->save_gregs_dirty_bgr = + save_glob_regs_dirty_bgr_v5; + node_mach->save_local_gregs = save_local_glob_regs_v5; + node_mach->restore_gregs = restore_glob_regs_v5; + node_mach->restore_local_gregs = + restore_local_glob_regs_v5; + } else if (node_mach->native_iset_ver >= E2K_ISET_V2) { + node_mach->save_gregs = save_glob_regs_v2; + node_mach->save_gregs_dirty_bgr = + save_glob_regs_dirty_bgr_v2; + node_mach->save_local_gregs = save_local_glob_regs_v2; + node_mach->restore_gregs = restore_glob_regs_v2; + node_mach->restore_local_gregs = + restore_local_glob_regs_v2; + } else { + BUG_ON(true); + } + + if (IS_HV_GM()) { + if (node_mach->native_iset_ver < E2K_ISET_V6) { + panic("%s(): native host ISET version #%d is " + "too old to support hardware " + "virtualization\n", + __func__, node_mach->native_iset_ver); + } + } else { + } + } +} + +/* + * virtual machine is not NUMA type machine + */ +void __init +e2k_virt_setup_machine(void) +{ +#ifdef CONFIG_E2K_MACHINE +#if defined(CONFIG_E2K_VIRT) +#else /* ! CONFIG_E2K_VIRT */ +# error "E2K VIRTUAL MACHINE type does not defined" +#endif +#endif /* CONFIG_E2K_MACHINE */ + machine.setup_arch = e2k_virt_setup_arch; + machine.init_IRQ = e2k_init_IRQ; + machine.restart = e2k_virt_restart_machine; + machine.power_off = e2k_virt_power_off; + machine.show_cpuinfo = e2k_virt_show_cpuinfo; + machine.halt = e2k_virt_power_off; + machine.arch_reset = e2k_virt_reset_machine; + machine.arch_halt = e2k_virt_power_off; + machine.get_irq_vector = e2k_virt_get_vector; + + setup_guest_interface(); + +#ifdef CONFIG_IOHUB_DOMAINS + e2k_virt_create_io_config(); +#endif /* CONFIG_IOHUB_DOMAINS */ + + /* Hook in our special panic hypercall code. */ + atomic_notifier_chain_register(&panic_notifier_list, &kvm_paniced); +} + +void kvm_print_machine_type_info(void) +{ + int mach_type; + const char *kvm_cpu_type = "?????????????"; + const char *cpu_type = "?????????????"; + + mach_type = e2k_get_machine_type_name(machine.guest.id); + kvm_cpu_type = kvm_get_cpu_type_name(); + cpu_type = e2k_get_cpu_type_name(mach_type); + pr_cont("GUEST MACHINE TYPE: %s-%s, ID %04x, REVISION: %03x, ISET #%d " + "VIRTIO\n", + kvm_cpu_type, cpu_type, + guest_machine_id, + machine.guest.rev, machine.guest.iset_ver); + native_print_machine_type_info(); +} + +static int e2k_virt_show_cpuinfo(struct seq_file *m, void *v) +{ + struct cpuinfo_e2k *c = v; + u8 cputype; + +#ifdef CONFIG_SMP +# define cpunum (c->cpu) +#else +# define cpunum 0 +#endif + +#ifdef CONFIG_SMP + if (!cpu_online(cpunum)) + return 0; +#endif + + /* + * Boot is brain-dead and takes cpu_type from RAM, so one should use + * cpu_type from boot in borderline case only ("fake" cpu). + */ + cputype = c->model; + + seq_printf(m, "VCPU\t\t: %d\n" + "native CPUs\t: %s\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %d\n" + "model name\t: %s\n" + "revision\t: %u\n" + "cpu MHz\t\t: %llu.%02llu\n", + cpunum, native_get_mach_type_name(), c->vendor, + c->family, c->model, GET_CPU_TYPE_NAME(cputype), + c->revision, c->proc_freq / 1000000, + c->proc_freq % 1000000); + seq_printf(m, "bogomips\t: %lu.%02lu\n\n", + loops_per_jiffy / (500000 / HZ), + (loops_per_jiffy / (5000 / HZ)) % 100); + + return 0; +} + +/* Sort the guest kernel's built-in exception table */ +/* Guest exception table can be protected on write into kernel image, */ +/* so it need sort by 'physical' addresses of image */ +static void __init kvm_sort_main_extable(void) +{ + struct exception_table_entry *start = __start___ex_table; + struct exception_table_entry *end = __stop___ex_table; + + if (main_extable_sort_needed && start < end) { + start = (struct exception_table_entry *) + kernel_address_to_pva((e2k_addr_t)start); + end = (struct exception_table_entry *) + kernel_address_to_pva((e2k_addr_t)end); + pr_notice("Sorting __ex_table from %px to %px ...\n", + start, end); + sort_extable(start, end); + main_extable_sort_needed = 0; + } +} diff --git a/arch/e2k/kvm/guest/fast_syscalls.c b/arch/e2k/kvm/guest/fast_syscalls.c new file mode 100644 index 000000000000..6441004fced1 --- /dev/null +++ b/arch/e2k/kvm/guest/fast_syscalls.c @@ -0,0 +1,89 @@ +#include + +#include + + +/* + * Guest trap table cannot be placed into host kernel table because of + * host table is located in privileged area. + * FIXME: to improve locality, fast syscalls tables should be located + * in the .text section nearly to the OS entry code. + */ + +int kvm_do_fast_clock_gettime(const clockid_t which_clock, + struct timespec *tp) +{ + return DO_FAST_CLOCK_GETTIME(which_clock, tp); +} + +int kvm_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp) +{ + return FAST_SYS_CLOCK_GETTIME(which_clock, tp); +} + +int kvm_do_fast_gettimeofday(struct timeval *tv) +{ + return DO_FAST_GETTIMEOFDAY(tv); +} + +int kvm_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return FAST_SYS_SIGGETMASK(oset, sigsetsize); +} + +const fast_system_call_func kvm_fast_sys_calls_table[NR_fast_syscalls] = { + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_gettimeofday), + FAST_SYSTEM_CALL_TBL_ENTRY(kvm_fast_sys_clock_gettime), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcpu), + FAST_SYSTEM_CALL_TBL_ENTRY(kvm_fast_sys_siggetmask), + + /* + * the follow fast system call is not yet implemented + * FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcontext), + */ + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), +}; + +#ifdef CONFIG_COMPAT +const fast_system_call_func kvm_fast_sys_calls_table_32[NR_fast_syscalls] = { + COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_gettimeofday), + COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_clock_gettime), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcpu), + COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_siggetmask), + + /* + * the follow fast system call is not yet implemented + * COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcontext), + */ + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), +}; +#endif + +#ifdef CONFIG_PROTECTED_MODE +const fast_system_call_func kvm_fast_sys_calls_table_128[NR_fast_syscalls] = { + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_gettimeofday), + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_clock_gettime), + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcpu), + PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_siggetmask), + + /* + * the follow fast system call is not yet implemented + * PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_getcontext), + */ + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), + FAST_SYSTEM_CALL_TBL_ENTRY(fast_sys_ni_syscall), +}; +#endif + diff --git a/arch/e2k/kvm/guest/fast_syscalls.h b/arch/e2k/kvm/guest/fast_syscalls.h new file mode 100644 index 000000000000..df538773ddf1 --- /dev/null +++ b/arch/e2k/kvm/guest/fast_syscalls.h @@ -0,0 +1,17 @@ +#ifndef _E2K_KVM_GUEST_FAST_SYSCALLS_H +#define _E2K_KVM_GUEST_FAST_SYSCALLS_H + +#include +#include + +typedef int (*kvm_fast_system_call_func)(u64 arg1, u64 arg2); + +extern const kvm_fast_system_call_func + kvm_fast_sys_calls_table[NR_fast_syscalls]; +extern const kvm_fast_system_call_func + kvm_fast_sys_calls_table_32[NR_fast_syscalls]; +extern const kvm_fast_system_call_func + kvm_fast_sys_calls_table_128[NR_fast_syscalls]; + +#endif /* _E2K_KVM_GUEST_FAST_SYSCALLS_H */ + diff --git a/arch/e2k/kvm/guest/host_dump_stack.c b/arch/e2k/kvm/guest/host_dump_stack.c new file mode 100644 index 000000000000..cc6a676a8beb --- /dev/null +++ b/arch/e2k/kvm/guest/host_dump_stack.c @@ -0,0 +1,918 @@ +/* + * This file based on host functions to dump stack and some other + * kernel structures. But host_printk() is used to output all these things + * In some cases it allows to avoid the breaking problems into the output + * subsystem of the kernel + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void host_print_chain_stack(struct stack_regs *regs, int show_rf_window); + +/* + * print_reg_window - print local registers from psp stack + * @window_base - pointer to the window in psp stack + * @window_size - size of the window in psp stack (in quadro registers) + * @fx - do print extensions? + */ +static void print_reg_window(u64 window_base, int window_size, + int fx, e2k_cr1_hi_t cr1_hi) +{ + int qreg, dreg, dreg_ind; + u64 *rw = (u64 *)window_base; + u64 qreg_lo, qreg_hi, ext_lo, ext_hi; + u8 tag_lo, tag_hi, tag_ext_lo, tag_ext_hi; + char brX0_name[6], brX1_name[6]; + u64 rbs, rsz, rcur; + + rbs = AS(cr1_hi).rbs; + rsz = AS(cr1_hi).rsz; + rcur = AS(cr1_hi).rcur; + + for (qreg = window_size - 1; qreg >= 0; qreg--) { + dreg_ind = qreg * (EXT_4_NR_SZ / sizeof(*rw)); + + load_value_and_tagd(&rw[dreg_ind + 0], &qreg_lo, &tag_lo); + if (machine.native_iset_ver < E2K_ISET_V5) { + load_value_and_tagd(&rw[dreg_ind + 1], + &qreg_hi, &tag_hi); + if (fx) { + ext_lo = rw[dreg_ind + 2]; + ext_hi = rw[dreg_ind + 3]; + } + } else { + load_value_and_tagd(&rw[dreg_ind + 2], + &qreg_hi, &tag_hi); + if (fx) { + load_value_and_tagd(&rw[dreg_ind + 1], + &ext_lo, &tag_ext_lo); + load_value_and_tagd(&rw[dreg_ind + 3], + &ext_hi, &tag_ext_hi); + } + } + + dreg = qreg * 2; + + /* Calculate %br[] register number */ + if (qreg >= rbs && qreg <= (rbs + rsz) && rsz >= rcur) { + int qbr, brX0, brX1; + + qbr = (qreg - rbs) + ((rsz + 1) - rcur); + + while (qbr > rsz) + qbr -= rsz + 1; + + brX0 = 2 * qbr; + brX1 = 2 * qbr + 1; + + snprintf(brX0_name, 7, "%sb%d/", (brX0 < 10) ? " " : + ((brX0 < 100) ? " " : ""), brX0); + snprintf(brX1_name, 7, "%sb%d/", (brX0 < 10) ? " " : + ((brX0 < 100) ? " " : ""), brX1); + } else { + memset(brX0_name, ' ', 5); + memset(brX1_name, ' ', 5); + brX0_name[5] = 0; + brX1_name[5] = 0; + } + + if (fx) { + if (machine.native_iset_ver < E2K_ISET_V5) { + host_pr_alert(" %sr%-3d: %hhx 0x%016llx " + "%04hx %sr%-3d: %hhx 0x%016llx %04hx\n", + brX0_name, dreg, tag_lo, qreg_lo, + (u16) ext_lo, brX1_name, dreg + 1, + tag_hi, qreg_hi, (u16) ext_hi); + } else { + host_pr_alert(" %sr%-3d: %hhx 0x%016llx " + "ext: %hhx %016llx\n" + " %sr%-3d: %hhx 0x%016llx ext: " + "%hhx %016llx\n", + brX1_name, dreg + 1, tag_hi, qreg_hi, + tag_ext_hi, ext_hi, brX0_name, dreg, + tag_lo, qreg_lo, tag_ext_lo, ext_lo); + } + } else { + host_pr_alert(" %sr%-3d: %hhx 0x%016llx " + "%sr%-3d: %hhx 0x%016llx\n", + brX0_name, dreg, tag_lo, qreg_lo, + brX1_name, dreg + 1, tag_hi, qreg_hi); + } + } +} + +static inline void print_predicates(e2k_cr0_lo_t cr0_lo, e2k_cr1_hi_t cr1_hi) +{ + u64 pf = AS(cr0_lo).pf; + u64 i, values = 0, tags = 0; + + for (i = 0; i < 32; i++) { + values |= (pf & (1ULL << 2 * i)) >> i; + tags |= (pf & (1ULL << (2 * i + 1))) >> (i + 1); + } + host_pr_info(" predicates[31:0] %08x ptags[31:0] %08x " + "psz %d pcur %d\n", + (u32) values, (u32) tags, + cr1_hi.CR1_hi_psz, cr1_hi.CR1_hi_pcur); +} + +u64 host_print_all_TIRs(const e2k_tir_t *TIRs, u64 nr_TIRs) +{ + e2k_tir_hi_t tir_hi; + e2k_tir_lo_t tir_lo; + u64 all_interrupts = 0; + int i; + + host_printk("TIR all registers:\n"); + for (i = nr_TIRs; i >= 0; i--) { + tir_hi = TIRs[i].TIR_hi; + tir_lo = TIRs[i].TIR_lo; + + all_interrupts |= AW(tir_hi); + + host_pr_alert("TIR.hi[%d]: 0x%016llx : exc 0x%011llx al 0x%x " + "aa 0x%x #%d\n", + i, AW(tir_hi), tir_hi.exc, tir_hi.al, + tir_hi.aa, tir_hi.j); + + if (tir_hi.exc) { + u64 exc = tir_hi.exc; + int nr_intrpt; + + host_pr_alert(" "); + for (nr_intrpt = __ffs64(exc); exc != 0; + exc &= ~(1UL << nr_intrpt), + nr_intrpt = __ffs64(exc)) + host_pr_cont(" %s", exc_tbl_name[nr_intrpt]); + host_pr_cont("\n"); + } + + host_pr_alert("TIR.lo[%d]: 0x%016llx : IP 0x%012llx\n", + i, tir_lo.TIR_lo_reg, tir_lo.TIR_lo_ip); + } + + return all_interrupts & (exc_all_mask | aau_exc_mask); +} + +void host_print_tc_record(const trap_cellar_t *tcellar, int num) +{ + tc_fault_type_t ftype; + tc_dst_t dst; + tc_opcode_t opcode; + u64 data; + u8 data_tag; + + AW(dst) = AS(tcellar->condition).dst; + AW(opcode) = AS(tcellar->condition).opcode; + AW(ftype) = AS(tcellar->condition).fault_type; + + load_value_and_tagd(&tcellar->data, &data, &data_tag); + /* FIXME: data has tag, but E2K_LOAD_TAGGED_DWORD() is privileged */ + /* action? guest will be trapped */ + if (!paravirt_enabled()) { + load_value_and_tagd(&tcellar->data, &data, &data_tag); + } else { + data = tcellar->data; + data_tag = 0; + } + host_printk(" record #%d: address 0x%016llx data 0x%016llx tag 0x%x\n" + " condition 0x%016llx:\n" + " dst 0x%05x: address 0x%04x, vl %d, vr %d\n" + " opcode 0x%03x: fmt 0x%02x, npsp 0x%x\n" + " store 0x%x, s_f 0x%x, mas 0x%x\n" + " root 0x%x, scal 0x%x, sru 0x%x\n" + " chan 0x%x, se 0x%x, pm 0x%x\n" + " fault_type 0x%x:\n" + " intl_res_bits = %d MLT_trap = %d\n" + " ph_pr_page = %d page_bound = %d\n" + " io_page = %d isys_page = %d\n" + " prot_page = %d priv_page = %d\n" + " illegal_page = %d nwrite_page = %d\n" + " page_miss = %d ph_bound = %d\n" + " global_sp = %d\n" + " miss_lvl 0x%x, num_align 0x%x, empt 0x%x\n" + " clw 0x%x, rcv 0x%x dst_rcv 0x%x\n", + num, + (u64)tcellar->address, data, data_tag, + (u64)AW(tcellar->condition), + (u32)AW(dst), (u32)(AS(dst).address), (u32)(AS(dst).vl), + (u32)(AS(dst).vr), + (u32)AW(opcode), (u32)(AS(opcode).fmt), (u32)(AS(opcode).npsp), + (u32)AS(tcellar->condition).store, + (u32)AS(tcellar->condition).s_f, + (u32)AS(tcellar->condition).mas, + (u32)AS(tcellar->condition).root, + (u32)AS(tcellar->condition).scal, + (u32)AS(tcellar->condition).sru, + (u32)AS(tcellar->condition).chan, + (u32)AS(tcellar->condition).spec, + (u32)AS(tcellar->condition).pm, + (u32)AS(tcellar->condition).fault_type, + (u32)AS(ftype).intl_res_bits, (u32)(AS(ftype).exc_mem_lock), + (u32)AS(ftype).ph_pr_page, (u32)AS(ftype).page_bound, + (u32)AS(ftype).io_page, (u32)AS(ftype).isys_page, + (u32)AS(ftype).prot_page, (u32)AS(ftype).priv_page, + (u32)AS(ftype).illegal_page, (u32)AS(ftype).nwrite_page, + (u32)AS(ftype).page_miss, (u32)AS(ftype).ph_bound, + (u32)AS(ftype).global_sp, + (u32)AS(tcellar->condition).miss_lvl, + (u32)AS(tcellar->condition).num_align, + (u32)AS(tcellar->condition).empt, + (u32)AS(tcellar->condition).clw, + (u32)AS(tcellar->condition).rcv, + (u32)AS(tcellar->condition).dst_rcv); +} + +void host_print_all_TC(const trap_cellar_t *TC, int TC_count) +{ + int i; + + if (!TC_count) + return; + + host_printk("TRAP CELLAR all %d records:\n", TC_count / 3); + for (i = 0; i < TC_count / 3; i++) + print_tc_record(&TC[i], i); +} + +/* + * Print pt_regs + */ +void host_print_pt_regs(const pt_regs_t *regs) +{ + const e2k_mem_crs_t *crs = ®s->crs; + + if (!regs) + return; + + host_pr_info(" PT_REGS value:\n"); + + host_pr_info("usd: base 0x%llx, size 0x%x, p %d, sbr: 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, regs->stacks.usd_lo.USD_lo_p, + regs->stacks.top); + + host_pr_info("psp: base %llx, ind %x, size %x\n", + AS(regs->stacks.psp_lo).base, + AS(regs->stacks.psp_hi).ind, AS(regs->stacks.psp_hi).size); + host_pr_info("pcsp: base %llx, ind %x, size %x\n", + AS(regs->stacks.pcsp_lo).base, + AS(regs->stacks.pcsp_hi).ind, AS(regs->stacks.pcsp_hi).size); + + host_pr_info("cr0.lo: pf 0x%llx, cr0.hi: ip 0x%llx\n", + AS(crs->cr0_lo).pf, AS(crs->cr0_hi).ip << 3); + host_pr_info("cr1.lo: unmie %d, nmie %d, uie %d, lw %d, sge %d, " + "ie %d, pm %d\n" + " cuir 0x%x, wbs 0x%x, wpsz 0x%x, wfx %d, ss %d, " + "ein %d\n", + AS(crs->cr1_lo).unmie, AS(crs->cr1_lo).nmie, + AS(crs->cr1_lo).uie, + AS(crs->cr1_lo).lw, AS(crs->cr1_lo).sge, AS(crs->cr1_lo).ie, + AS(crs->cr1_lo).pm, AS(crs->cr1_lo).cuir, AS(crs->cr1_lo).wbs, + AS(crs->cr1_lo).wpsz, AS(crs->cr1_lo).wfx, AS(crs->cr1_lo).ss, + AS(crs->cr1_lo).ein); + host_pr_info("cr1.hi: ussz 0x%x, wdbl %d\n" + " rbs 0x%x, rsz 0x%x, rcur 0x%x, psz 0x%x, pcur 0x%x\n", + AS(crs->cr1_hi).ussz, AS(crs->cr1_hi).wdbl, AS(crs->cr1_hi).rbs, + AS(crs->cr1_hi).rsz, AS(crs->cr1_hi).rcur, AS(crs->cr1_hi).psz, + AS(crs->cr1_hi).pcur); + host_pr_info("WD: base 0x%x, size 0x%x, psize 0x%x, fx %d, dbl %d\n", + regs->wd.base, regs->wd.size, regs->wd.psize, regs->wd.fx, + regs->wd.dbl); + if (from_syscall(regs)) { + host_pr_info("regs->kernel_entry: %d, syscall #%d\n", + regs->kernel_entry, regs->sys_num); + } else { + const struct trap_pt_regs *trap = regs->trap; + u64 exceptions; + + host_pr_info("ctpr1: base 0x%llx, tag 0x%x, opc 0x%x, " + "ipd 0x%x\n", + AS(regs->ctpr1).ta_base, AS(regs->ctpr1).ta_tag, + AS(regs->ctpr1).opc, AS(regs->ctpr1).ipd); + host_pr_info("ctpr2: base 0x%llx, tag 0x%x, opcode 0x%x, " + "prefetch 0x%x\n", + AS(regs->ctpr2).ta_base, AS(regs->ctpr2).ta_tag, + AS(regs->ctpr2).opc, AS(regs->ctpr2).ipd); + host_pr_info("ctpr3: base 0x%llx, tag 0x%x, opcode 0x%x, " + "prefetch 0x%x\n", + AS(regs->ctpr3).ta_base, AS(regs->ctpr3).ta_tag, + AS(regs->ctpr3).opc, AS(regs->ctpr3).ipd); + host_pr_info("regs->trap: 0x%px, AAU context at 0x%px\n", + regs->trap, regs->aau_context); + + exceptions = print_all_TIRs(trap->TIRs, trap->nr_TIRs); + print_all_TC(trap->tcellar, trap->tc_count); + if (exceptions & exc_data_debug_mask) { + host_pr_info("ddbcr 0x%llx, ddmcr 0x%llx, " + "ddbsr 0x%llx\n", + READ_DDBCR_REG_VALUE(), READ_DDMCR_REG_VALUE(), + READ_DDBSR_REG_VALUE()); + host_pr_info("ddbar0 0x%llx, ddbar1 0x%llx, " + "ddbar2 0x%llx, ddbar3 0x%llx\n", + READ_DDBAR0_REG_VALUE(), + READ_DDBAR1_REG_VALUE(), + READ_DDBAR2_REG_VALUE(), + READ_DDBAR3_REG_VALUE()); + host_pr_info("ddmar0 0x%llx, ddmar1 0x%llx\n", + READ_DDMAR0_REG_VALUE(), + READ_DDMAR1_REG_VALUE()); + } + if (exceptions & exc_instr_debug_mask) { + host_pr_info("dibcr 0x%x, dimcr 0x%llx, dibsr 0x%x\n", + READ_DIBCR_REG_VALUE(), + READ_DIMCR_REG_VALUE(), + READ_DIBSR_REG_VALUE()); + host_pr_info("dibar0 0x%llx, dibar1 0x%llx, " + "dibar2 0x%llx, dibar3 0x%llx\n", + READ_DIBAR0_REG_VALUE(), + READ_DIBAR1_REG_VALUE(), + READ_DIBAR2_REG_VALUE(), + READ_DIBAR3_REG_VALUE()); + host_pr_info("dimar0 0x%llx, dimar1 0x%llx\n", + READ_DIMAR0_REG_VALUE(), + READ_DIMAR1_REG_VALUE()); + } + } +} + +static int get_addr_name(u64 addr, char *buf, size_t len, + unsigned long *start_addr_p, struct mm_struct *mm) +{ + struct vm_area_struct *vma; + int ret = 0, locked; + + if (addr >= TASK_SIZE || !mm) + return -ENOENT; + + /* + * This function is used when everything goes south + * so do not try too hard to lock mmap_sem + */ + locked = down_read_trylock(&mm->mmap_sem); + + vma = find_vma(mm, addr); + if (!vma || vma->vm_start > addr || !vma->vm_file) { + ret = -ENOENT; + goto out_unlock; + } + + /* seq_buf_path() locks init_fs.seq which is normally + * locked with enabled interrupts, so we cannot reliably + * call it if we are in interrupt */ + if (!in_irq()) { + struct seq_buf s; + + seq_buf_init(&s, buf, len); + seq_buf_path(&s, &vma->vm_file->f_path, "\n"); + + if (seq_buf_used(&s) < len) + buf[seq_buf_used(&s)] = 0; + else + buf[len - 1] = 0; + } else { + buf[0] = 0; + } + + /* Assume that load_base == vm_start */ + if (start_addr_p) + *start_addr_p = vma->vm_start; + +out_unlock: + if (locked) + up_read(&mm->mmap_sem); + + return ret; +} + + +static DEFINE_RAW_SPINLOCK(print_stack_lock); + +/** + * print_stack_frames - print task's stack to console + * @task: which task's stack to print? + * @pt_regs: skip stack on top of this pt_regs structure + * @show_reg_window: print local registers? + */ +static noinline void +host_print_stack_frames(struct task_struct *task, struct pt_regs *pt_regs, + int show_reg_window) +{ + unsigned long flags; + int cpu; + bool used; + struct stack_regs *stack_regs; + + /* if this is guest, stop tracing in host to avoid buffer overwrite */ + host_ftrace_stop(); + + if (!task) + task = current; + + if (test_and_set_bit(PRINT_FUNCY_STACK_WORKS_BIT, + &task->thread.flags)) { + host_pr_alert(" %d: print_stack: works already on pid %d\n", + current->pid, task->pid); + if (task != current) + return; + } + + /* + * stack_regs_cache[] is protected by IRQ-disable + * (we assume that NMI handlers will not call dump_stack() and + * do not disable NMIs here as they are used by copy_stack_regs()) + */ + raw_local_irq_save(flags); + + if (task == current) { + host_pr_alert("%s", linux_banner); + } + + cpu = raw_smp_processor_id(); + stack_regs = &stack_regs_cache[cpu]; + + used = xchg(&stack_regs->used, 1); + if (used) { + host_pr_alert(" %d: print stack: works already on cpu %d\n", + current->pid, cpu); + } else { + stack_regs->show_trap_regs = debug_trap; + stack_regs->show_user_regs = debug_userstack; +#ifdef CONFIG_DATA_STACK_WINDOW + stack_regs->show_k_data_stack = debug_datastack; +#endif + copy_stack_regs(task, pt_regs, stack_regs); + + /* All checks of stacks validity are + * performed in print_chain_stack() */ + + host_print_chain_stack(stack_regs, show_reg_window); + } + + /* if task is host of guest VM or VCPU, then print guest stacks */ + print_guest_stack(task, stack_regs, show_reg_window); + + stack_regs->used = 0; + + raw_local_irq_restore(flags); + + clear_bit(PRINT_FUNCY_STACK_WORKS_BIT, &task->thread.flags); +} + +static inline void print_funcy_ip(u64 addr, u64 cr_base, u64 cr_ind, + struct task_struct *task, u64 orig_base) +{ + unsigned long start_addr; + char buf[64]; + int traced = 0; + + if (addr < TASK_SIZE) { + if (!get_addr_name(addr, buf, sizeof(buf), + &start_addr, task->mm)) { + host_pr_alert(" 0x%-12llx %s (@0x%lx)\n", addr, + buf, start_addr); + } else { + host_pr_alert(" 0x%-12llx \n", addr); + } + + return; + } + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (task->ret_stack) { + int index; + for (index = 0; index <= task->curr_ret_stack; index++) + if (task->ret_stack[index].fp == orig_base + cr_ind) { + addr = task->ret_stack[index].ret; + traced = 1; + break; + } + } +#endif + + host_pr_alert(" 0x%-12llx %pF%s", addr, (void *) addr, + (traced) ? " (traced)" : ""); +} + +#ifdef CONFIG_DATA_STACK_WINDOW +static void print_k_data_stack(struct stack_regs *regs, int *pt_regs_num, + unsigned long base, u64 size) +{ + unsigned long delta = regs->real_k_data_stack_addr - + regs->base_k_data_stack; + bool pt_regs_valid = regs->pt_regs[*pt_regs_num].valid; + unsigned long pt_regs_addr = regs->pt_regs[*pt_regs_num].addr; + unsigned long addr; + bool show_pt_regs; + + if (!size) + return; + + if (pt_regs_valid && pt_regs_addr >= (unsigned long) base + delta && + pt_regs_addr < (unsigned long) base + delta + size) { + show_pt_regs = 1; + (*pt_regs_num)++; + } else { + show_pt_regs = 0; + } + + host_printk(" DATA STACK from %lx to %llx\n", base + delta, + base + delta + size); + for (addr = base; addr < base + size; addr += 16) { + u8 tag_lo, tag_hi; + u64 value_lo, value_hi; + bool is_pt_regs_addr = show_pt_regs + && (addr + delta) >= pt_regs_addr + && (addr + delta) < (pt_regs_addr + + sizeof(struct pt_regs)); + + load_qvalue_and_tagq(addr, &value_lo, &value_hi, + &tag_lo, &tag_hi); + host_printk(" %lx (%s+0x%-3lx): %x %016llx %x %016llx\n", + addr + delta, + (is_pt_regs_addr) ? "pt_regs" : "", + (is_pt_regs_addr) ? (addr + delta - pt_regs_addr) : + (addr - base), + tag_lo, value_lo, tag_hi, value_hi); + } +} +#endif + +/* + * Must be called with disabled interrupts + */ +static void host_print_chain_stack(struct stack_regs *regs, int show_reg_window) +{ + unsigned long flags; + bool disable_nmis; + struct task_struct *task = regs->task; + u32 attempt, locked = 0; + u64 new_chain_base = (u64) regs->base_chain_stack; + u64 orig_chain_base, orig_psp_base; + s64 cr_ind = regs->size_chain_stack; + s64 kernel_size_chain_stack = regs->size_chain_stack - + regs->user_size_chain_stack; + e2k_mem_crs_t crs = regs->crs; + u64 new_psp_base = (u64) regs->base_psp_stack; + s64 psp_ind = regs->size_psp_stack; + s64 kernel_size_psp_stack = regs->size_psp_stack - + regs->user_size_psp_stack; + stack_frame_t cur_frame; + bool ignore_ip = false; + int trap_num = 0; +#ifdef CONFIG_DATA_STACK_WINDOW + e2k_cr1_lo_t prev_cr1_lo; + e2k_cr1_hi_t prev_k_cr1_hi; + bool show_k_data_stack = !!regs->base_k_data_stack; + int pt_regs_num = 0; + void *base_k_data_stack = regs->base_k_data_stack; + u64 size_k_data_stack = regs->size_k_data_stack; +#endif + int last_user_windows = 2; + int i; + int timeout = is_prototype() ? 150000 : 30000; + + if (!regs->valid) { + host_pr_alert(" BUG print_chain_stack pid=%d valid=0\n", + (task) ? task->pid : -1); + return; + } + if (!regs->base_chain_stack) { + host_pr_alert(" BUG could not get task %s (%d) stack " + "registers, stack will not be printed\n", + task->comm, task->pid); + return; + } + + if (unlikely(!raw_irqs_disabled())) + host_pr_alert("WARNING: print_chain_stack called with enabled " + "interrupts\n"); + + /* If task is current, disable NMIs so that interrupts handlers + * will not spill our stacks.*/ + disable_nmis = (task == current); + if (disable_nmis) + raw_all_irq_save(flags); + /* Try locking the spinlock (with 30 seconds timeout) */ + attempt = 0; + do { + if (raw_spin_trylock(&print_stack_lock)) { + locked = 1; + break; + } + + /* Wait for 0.001 second. */ + if (disable_nmis) + raw_all_irq_restore(flags); + udelay(1000); + if (disable_nmis) + raw_all_irq_save(flags); + } while (attempt++ < timeout); + if (disable_nmis) { + COPY_STACKS_TO_MEMORY(); + } + + debug_userstack |= (print_window_regs && debug_guest_regs(task)); + + if (!regs->ignore_banner) { + if (IS_KERNEL_THREAD(task, task->mm)) { + host_pr_info("Task %s(%d) is Kernel Thread\n", + task->comm, task->pid); + } else { + host_pr_info("Task %s(%d) is User Thread\n", + task->comm, task->pid); + } + + host_pr_alert("PROCESS: %s, PID: %d, %s: %d, state: %c %s " + "(0x%lx), flags: 0x%x\n", + task->comm == NULL ? "NULL" : task->comm, + task->pid, + get_cpu_type_name(), + task_cpu(task), task_state_to_char(task), +#ifdef CONFIG_SMP + task_curr(task) ? "oncpu" : "", +#else + "", +#endif + task->state, task->flags); + } + + if (!regs->base_psp_stack) { + host_pr_alert(" WARNING could not get task %s(%d) procedure " + "stack registers, register windows will not be " + "printed\n", + task->comm, task->pid); + show_reg_window = 0; + } else { + show_reg_window = show_reg_window && (task == current || + print_window_regs || task_curr(task) || + debug_guest_regs(task)); + } + + /* Print header */ + if (show_reg_window) { + host_pr_alert(" PSP: base 0x%016llx ind 0x%08x size 0x%08x\n", + AS_STRUCT(regs->psp_lo).base, + AS_STRUCT(regs->psp_hi).ind, + AS_STRUCT(regs->psp_hi).size); + host_pr_alert(" PCSP: base 0x%016llx ind 0x%08x size 0x%08x\n", + AS_STRUCT(regs->pcsp_lo).base, + AS_STRUCT(regs->pcsp_hi).ind, + AS_STRUCT(regs->pcsp_hi).size); + host_pr_alert(" ---------------------------------------------" + "------------------------\n" + " IP (hex) PROCEDURE/FILE(@ Library load " + "address)\n" + " ---------------------------------------------------" + "------------------\n"); + } + + for (;;) { + if (kernel_size_chain_stack > 0) { + orig_chain_base = regs->orig_base_chain_stack_k; + kernel_size_chain_stack -= SZ_OF_CR; + } else { + orig_chain_base = regs->orig_base_chain_stack_u; + } + print_funcy_ip(AS(crs.cr0_hi).ip << 3, new_chain_base, cr_ind, + task, orig_chain_base); + + if (show_reg_window) { + psp_ind -= AS(crs.cr1_lo).wbs * EXT_4_NR_SZ; + + if (regs->show_trap_regs && trap_num < MAX_USER_TRAPS && + regs->trap[trap_num].valid && + regs->trap[trap_num].frame == + orig_chain_base + cr_ind) { + if (machine.native_iset_ver >= E2K_ISET_V6) { + host_pr_alert(" ctpr1 %llx:%llx " + "ctpr2 %llx:%llx ctpr3 " + "%llx:%llx\n" + "lsr %llx ilcr %llx lsr1 %llx " + "ilcr1 %llx\n", + AW(regs->trap[trap_num].ctpr1_hi), + AW(regs->trap[trap_num].ctpr1), + AW(regs->trap[trap_num].ctpr2_hi), + AW(regs->trap[trap_num].ctpr2), + AW(regs->trap[trap_num].ctpr3_hi), + AW(regs->trap[trap_num].ctpr3), + regs->trap[trap_num].lsr, + regs->trap[trap_num].ilcr, + regs->trap[trap_num].lsr1, + regs->trap[trap_num].ilcr1); + } else if (machine.native_iset_ver == E2K_ISET_V5) { + host_pr_alert(" ctpr1 %llx ctpr2 " + "%llx ctpr3 %llx\n" + " lsr %llx ilcr %llx lsr1 " + "%llx ilcr1 %llx\n", + AW(regs->trap[trap_num].ctpr1), + AW(regs->trap[trap_num].ctpr2), + AW(regs->trap[trap_num].ctpr3), + regs->trap[trap_num].lsr, + regs->trap[trap_num].ilcr, + regs->trap[trap_num].lsr1, + regs->trap[trap_num].ilcr1); + } else { + host_pr_alert(" ctpr1 %llx ctpr2 " + "%llx ctpr3 %llx\n" + " lsr %llx ilcr %llx\n", + AW(regs->trap[trap_num].ctpr1), + AW(regs->trap[trap_num].ctpr2), + AW(regs->trap[trap_num].ctpr3), + regs->trap[trap_num].lsr, + regs->trap[trap_num].ilcr); + } + for (i = 0; i < SBBP_ENTRIES_NUM; i += 4) { + host_pr_alert(" sbbp%-2d 0x%-12llx " + "0x%-12llx 0x%-12llx 0x%-12llx\n", + i, regs->trap[trap_num].sbbp[i], + regs->trap[trap_num].sbbp[i + 1], + regs->trap[trap_num].sbbp[i + 2], + regs->trap[trap_num].sbbp[i + 3]); + } + ++trap_num; + } + cur_frame = get_task_stack_frame_type_IP(task, + crs.cr0_hi, crs.cr1_lo, ignore_ip); + if (cur_frame != user_frame_type || + regs->show_user_regs || last_user_windows) { + /* Show a couple of last user windows - usually + * there is something useful there */ + if ((cur_frame == user_frame_type) && + last_user_windows) + --last_user_windows; + + if (kernel_size_psp_stack > 0) { + orig_psp_base = + regs->orig_base_psp_stack_k; + kernel_size_psp_stack -= + AS(crs.cr1_lo).wbs * EXT_4_NR_SZ; + } else { + orig_psp_base = + regs->orig_base_psp_stack_u; + } + + host_pr_alert(" PCSP: 0x%llx, PSP: " + "0x%llx/0x%x\n", + orig_chain_base + cr_ind, + orig_psp_base + psp_ind, + AS(crs.cr1_lo).wbs * EXT_4_NR_SZ); + + print_predicates(crs.cr0_lo, crs.cr1_hi); + + if (psp_ind < 0 && cr_ind > 0) { + host_pr_alert("! Invalid Register " + "Window index (psp.ind) 0x%llx", + psp_ind); + } else if (psp_ind >= 0) { + print_reg_window(new_psp_base + psp_ind, + AS(crs.cr1_lo).wbs, + AS(crs.cr1_lo).wfx, crs.cr1_hi); + } + } + } +#ifdef CONFIG_DATA_STACK_WINDOW + if (show_k_data_stack && + call_from_kernel_mode(crs.cr0_hi, crs.cr1_lo)) { + u64 k_window_size; + s64 cur_chain_index; + + /* To find data stack window size we have to + * read cr1.hi from current *and* previous frames */ + cur_chain_index = cr_ind; + do { + cur_chain_index -= SZ_OF_CR; + if (cur_chain_index < 0) + /* This is a thread created with clone + * and we have reached the last kernel + * frame. */ + break; + + get_kernel_cr1_lo(&prev_cr1_lo, new_chain_base, + cur_chain_index); + } while (!AS(prev_cr1_lo).pm); + + if (cur_chain_index < 0) { + k_window_size = size_k_data_stack; + } else { + get_kernel_cr1_hi(&prev_k_cr1_hi, + new_chain_base, cur_chain_index); + + k_window_size = 16 * AS(prev_k_cr1_hi).ussz - + 16 * AS(crs.cr1_hi).ussz; + if (k_window_size > size_k_data_stack) { + /* The stack is suspiciously large */ + k_window_size = size_k_data_stack; + host_pr_alert(" This is the last " + "frame or it was not copied fully\n" + "The stack is suspiciously " + "large (0x%llx)\n", + k_window_size); + show_k_data_stack = 0; + } + } + print_k_data_stack(regs, &pt_regs_num, (unsigned long) + base_k_data_stack, k_window_size); + base_k_data_stack += k_window_size; + size_k_data_stack -= k_window_size; + if (!size_k_data_stack) + show_k_data_stack = 0; + } +#endif + + if (cr_ind < SZ_OF_CR) + break; + + cr_ind -= SZ_OF_CR; + + /* + * Last frame is bogus (from execve or clone), skip it. + * + * For kernel threads there is one more reserved frame + * (for start_thread()) + */ + if ((cr_ind == 0 || + cr_ind == SZ_OF_CR && (task->flags & PF_KTHREAD)) && + (task == current || + regs->size_chain_stack < SIZE_CHAIN_STACK)) + break; + + crs = *(e2k_mem_crs_t *) (new_chain_base + cr_ind); + } + + if (cr_ind < 0) + host_pr_alert("INVALID cr_ind SHOULD BE 0\n"); + +#ifdef CONFIG_GREGS_CONTEXT + if (show_reg_window && regs->show_user_regs && regs->gregs_valid) { + int i; + + host_pr_alert(" Global registers: bgr.cur = %d, " + "bgr.val = 0x%x\n", + AS(regs->gregs.bgr).cur, AS(regs->gregs.bgr).val); + for (i = 0; i < 32; i += 2) { + u64 val_lo, val_hi; + u8 tag_lo, tag_hi; + + load_value_and_tagd(®s->gregs.g[i + 0].base, + &val_lo, &tag_lo); + load_value_and_tagd(®s->gregs.g[i + 1].base, + &val_hi, &tag_hi); + + if (machine.native_iset_ver < E2K_ISET_V5) { + host_pr_alert(" g%-3d: %hhx %016llx " + "%04hx " + "g%-3d: %hhx %016llx %04hx\n", + i, tag_lo, val_lo, + (u16) regs->gregs.g[i].ext, + i + 1, tag_hi, val_hi, + (u16) regs->gregs.g[i+1].ext); + } else { + u64 ext_lo_val, ext_hi_val; + u8 ext_lo_tag, ext_hi_tag; + + load_value_and_tagd(®s->gregs.g[i + 0].ext, + &ext_lo_val, &ext_lo_tag); + load_value_and_tagd(®s->gregs.g[i + 1].ext, + &ext_hi_val, &ext_hi_tag); + + host_pr_alert(" g%-3d: %hhx %016llx " + "ext: %hhx %016llx\n" + " g%-3d: %hhx %016llx " + "ext: %hhx %016llx\n", + i, tag_lo, val_lo, + ext_lo_tag, ext_lo_val, + i + 1, tag_hi, val_hi, + ext_hi_tag, ext_hi_val); + } + } + } +#endif + + if (locked) + raw_spin_unlock(&print_stack_lock); + if (disable_nmis) + raw_all_irq_restore(flags); +} + +void host_dump_stack(void) +{ + host_print_stack_frames(current, NULL, 1); +} diff --git a/arch/e2k/kvm/guest/host_printk.c b/arch/e2k/kvm/guest/host_printk.c new file mode 100644 index 000000000000..40cc2cb087c0 --- /dev/null +++ b/arch/e2k/kvm/guest/host_printk.c @@ -0,0 +1,27 @@ +/* + * KVM guest printk() on host implementation. + */ + +#include +#include +#include + +#include +#include + +int kvm_host_printk(const char *fmt, ...) +{ + va_list args; + char buf[HOST_PRINTK_BUFFER_MAX]; + int size; + + va_start(args, fmt); + size = vsnprintf(buf, HOST_PRINTK_BUFFER_MAX, fmt, args); + va_end(args); + + if (size <= 0) + return size; + size = HYPERVISOR_host_printk(buf, size); + return size; +} +EXPORT_SYMBOL(kvm_host_printk); diff --git a/arch/e2k/kvm/guest/host_time.c b/arch/e2k/kvm/guest/host_time.c new file mode 100644 index 000000000000..f5c9850d0471 --- /dev/null +++ b/arch/e2k/kvm/guest/host_time.c @@ -0,0 +1,50 @@ +/* + * KVM guest time implementation. + * + * This is implemented in terms of a clocksource driver which uses + * the hypervisor clock as a nanosecond timebase, and a clockevent + * driver which uses the hypervisor's timer mechanism. + * + * Based on Xen implementation: arch/x86/xen/time.c + */ +#include +#include +#include +#include +#include + +#include +#include "kvm_time.h" + +/* + * Time. + * + * It would be far better for everyone if the Guest had its own clock, but + * until then the Host time on every guest running start. + */ + +static void kvm_read_wallclock(struct timespec *ts) +{ + kvm_time_t *time_info = get_vcpu_time_info(); + long sec; + + do { + sec = time_info->tv_sec; + ts->tv_sec = sec; + ts->tv_nsec = time_info->tv_nsec; + } while (sec != time_info->tv_sec); +} + +unsigned long kvm_get_rtc_time(void) +{ + struct timespec ts; + + kvm_read_wallclock(&ts); + return ts.tv_sec; +} + +int kvm_set_rtc_time(unsigned long now) +{ + /* do nothing for domU */ + return -1; +} diff --git a/arch/e2k/kvm/guest/io.c b/arch/e2k/kvm/guest/io.c new file mode 100644 index 000000000000..8b95b82471af --- /dev/null +++ b/arch/e2k/kvm/guest/io.c @@ -0,0 +1,573 @@ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_IO_MODE +#undef DebugKVMIO +#define DEBUG_KVM_IO_MODE 0 /* kernel virt machine IO debugging */ +#define DebugKVMIO(fmt, args...) \ +({ \ + if (DEBUG_KVM_IO_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IOPORT_MODE +#undef DebugKVMIOP +#define DEBUG_KVM_IOPORT_MODE 0 /* kernel virt machine IO debugging */ +#define DebugKVMIOP(fmt, args...) \ +({ \ + if (DEBUG_KVM_IOPORT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_MMIO_MODE +#undef DebugMMIO +#define DEBUG_KVM_MMIO_MODE 0 /* kernel virt machine MMIO debugging */ +#define DebugMMIO(fmt, args...) \ +({ \ + if (DEBUG_KVM_MMIO_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_CIO_MODE +#undef DebugCIO +#define DEBUG_CIO_MODE 0 /* configuration space */ + /* input/output functions */ +#define DebugCIO(fmt, args...) \ +({ \ + if (DEBUG_CIO_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static unsigned long +do_guest_mmio(e2k_addr_t phys_addr, u64 value, u8 size, u8 is_write) +{ + unsigned long data[1]; + int ret; + + if (is_write) { + data[0] = value; + DebugKVMIO("data to write 0x%lx size %d to addr 0x%lx\n", + data[0], size, phys_addr); + } + if ((phys_addr & MAX_PA_MASK) != (phys_addr & E2K_VA_MASK)) { + pr_err("%s: MMIO addr 0x%lx, size %d out of physival memory\n", + __func__, phys_addr, size); + BUG_ON(true); + } + ret = HYPERVISOR_guest_mmio_request(phys_addr, data, size, is_write); + if (ret) { + pr_err("%s: could not pass MMIO request to host, error %d\n", + __func__, ret); + return -1L; + } + if (!is_write) { + DebugKVMIO("read data 0x%lx size %d from addr 0x%lx\n", + data[0], size, phys_addr); + } + return data[0]; +} + +/* + * KVM guest MMIO should be passed to QEMU through host hypercall. + * KVM MMIO <-> QEMU interface assumes physical address of MMIO request. + * The function argument can be + * physical address; + * IO remapped address; + * VGA VRAM address + * IO remapped address is translated to source physical IO address on PCI + * VGA VRAM address is converted to special physical address into guest + * IO memory address space (see asm/head.h) + */ +static inline unsigned long +kvm_guest_mmio(volatile void __iomem *mmio_addr, u64 value, u8 size, u8 is_write, + int domain) +{ + e2k_addr_t addr = (e2k_addr_t)mmio_addr; + e2k_addr_t phys_addr; + bool epic = cpu_has_epic(); + + DebugMMIO("started to %s KVM MMIO address %px value 0x%02llx size %d\n", + (is_write) ? "write to" : "read from", + mmio_addr, value, size); + if ((addr & MAX_PA_MASK) == addr) { + /* address is already physical */ + phys_addr = addr; + DebugMMIO("source address 0x%lx is already physical\n", + addr); + } else if (addr >= GUEST_VMALLOC_START && addr < GUEST_VMALLOC_END) { + /* address inside IO remapping area */ + struct vm_struct *vm; + + vm = find_io_vm_area((const void *)addr); + if (unlikely(vm == NULL)) { + pr_err("%s: could not find MMIO address %px into " + "IO remapping areas\n", + __func__, mmio_addr); + BUG_ON(true); + } + if (unlikely(!(vm->flags & VM_IOREMAP))) { + pr_err("%s: MMIO address %px is into not IO remapping " + "area\n", + __func__, mmio_addr); + BUG_ON(true); + } + phys_addr = vm->phys_addr; + DebugMMIO("virtual address 0x%lx is from MMIO remapping space, " + "converted to physical 0x%lx\n", + addr, phys_addr); + BUG_ON(phys_addr == 0 || + ((phys_addr & MAX_PA_MASK) != phys_addr)); + phys_addr |= (addr & ~PAGE_MASK); + } else if (unlikely(KVM_IS_VGA_VRAM_VIRT_ADDR(addr))) { + /* it is virtual address of VGA VRAM */ + phys_addr = KVM_VGA_VRAM_VIRT_TO_PHYS(addr); + DebugMMIO("virtual address 0x%lx is from VGA VRAM space, " + "converted to physical 0x%lx\n", + addr, phys_addr); + } else { + pr_err("%s: invalid KVM MMIO address %px\n", + __func__, mmio_addr); + BUG_ON(true); + } + if (likely(phys_addr >= PCIBIOS_MIN_MEM && + phys_addr <= PCIBIOS_MAX_MEM_32)) { + /* it is address inside PCI space */ + /* pass direct physical address */ + DebugMMIO("physical address 0x%lx is from PCI space\n", + phys_addr); + } else if (phys_addr >= get_domain_pci_conf_base(domain) && + phys_addr < get_domain_pci_conf_base(domain) + + get_domain_pci_conf_size(domain)) { + /* it is address inside PCI config space */ + /* pass direct physical address */ + DebugMMIO("physical address 0x%lx is from PCI config space\n", + phys_addr); + } else if (unlikely(KVM_IS_VGA_VRAM_PHYS_ADDR(phys_addr))) { + /* it is physical address inside VGA VRAM space */ + /* convert to KVM guest "physical" address */ + phys_addr += KVM_VGA_VRAM_PHYS_BASE; + DebugMMIO("physical address 0x%lx is from VGA VRAM space\n", + phys_addr); + } else if (!epic && phys_addr >= APIC_BASE && + phys_addr < APIC_BASE + APIC_REGS_SIZE) { + /* it is local APIC registers address space */ + DebugMMIO("physical address 0x%lx is local APIC register\n", + phys_addr); + } else if (!epic && phys_addr >= IO_APIC_DEFAULT_PHYS_BASE && + phys_addr < IO_APIC_DEFAULT_PHYS_BASE + + IO_APIC_SLOT_SIZE) { + /* it is IO-APIC registers address space */ + DebugMMIO("physical address 0x%lx is IO-APIC register\n", + phys_addr); + } else if (epic && phys_addr >= EPIC_DEFAULT_PHYS_BASE && + phys_addr < EPIC_DEFAULT_PHYS_BASE + EPIC_REGS_SIZE) { + /* it is CEPIC registers address space */ + DebugMMIO("physical address 0x%lx is CEPIC register\n", + phys_addr); + } else if (epic && phys_addr >= IO_EPIC_DEFAULT_PHYS_BASE && + phys_addr < IO_EPIC_DEFAULT_PHYS_BASE + IO_EPIC_REGS_SIZE) { + /* it is IO-EPIC registers address space */ + DebugMMIO("physical address 0x%lx is IO-EPIC register\n", + phys_addr); + } else if (phys_addr >= (e2k_addr_t)THE_NODE_NBSR_PHYS_BASE(0) && + phys_addr < (e2k_addr_t)THE_NODE_NBSR_PHYS_BASE(0) + + NODE_NBSR_SIZE * MAX_NUMNODES) { + /* it is NBSR (SIC) registers address space */ + DebugMMIO("physical address 0x%lx is SIC-NBSR register\n", + phys_addr); + } else { + pr_err("%s: invalid KVM MMIO physical address 0x%lx for " + "source virtual address %px\n", + __func__, phys_addr, mmio_addr); + BUG_ON(true); + } + DebugMMIO("pass request to QEMU to %s KVM MMIO physical address 0x%lx " + "value 0x%02llx size %d\n", + (is_write) ? "write to" : "read from", + phys_addr, value, size); + return do_guest_mmio(phys_addr, value, size, is_write); +} +unsigned long +kvm_handle_guest_mmio(void __iomem *mmio_addr, u64 value, u8 size, u8 is_write) +{ + return kvm_guest_mmio(mmio_addr, value, size, is_write, 0); +} + +u8 kvm_readb(const volatile void __iomem *addr) +{ + DebugKVMIO("started to read byte from MMIO addr %px\n", addr); + return kvm_guest_mmio((__force volatile void __iomem *) addr, 0, 1, 0, 0); +} +EXPORT_SYMBOL(kvm_readb); + +u16 kvm_readw(const volatile void __iomem *addr) +{ + DebugKVMIO("started to read halfword from MMIO addr %px\n", addr); + return kvm_guest_mmio((__force volatile void __iomem *) addr, 0, 2, 0, 0); +} +EXPORT_SYMBOL(kvm_readw); + +u32 kvm_readl(const volatile void __iomem *addr) +{ + DebugKVMIO("started to read word from MMIO addr %px\n", addr); + return kvm_guest_mmio((__force volatile void __iomem *) addr, 0, 4, 0, 0); +} +EXPORT_SYMBOL(kvm_readl); + +u64 kvm_readll(const volatile void __iomem *addr) +{ + DebugKVMIO("started to read long word from MMIO addr %px\n", addr); + return kvm_guest_mmio((__force volatile void __iomem *) addr, 0, 8, 0, 0); +} +EXPORT_SYMBOL(kvm_readll); + +void kvm_writeb(u8 b, volatile void __iomem *addr) +{ + DebugKVMIO("started to write byte 0x%02x to MMIO addr %px\n", + b, addr); + kvm_guest_mmio(addr, b, 1, 1, 0); +} +EXPORT_SYMBOL(kvm_writeb); + +void kvm_writew(u16 w, volatile void __iomem *addr) +{ + DebugKVMIO("started to write halfword 0x%04x to MMIO addr %px\n", + w, addr); + kvm_guest_mmio(addr, w, 2, 1, 0); +} +EXPORT_SYMBOL(kvm_writew); + +void kvm_writel(u32 l, volatile void __iomem *addr) +{ + DebugKVMIO("started to write word 0x%08x to MMIO addr %px\n", + l, addr); + kvm_guest_mmio(addr, l, 4, 1, 0); +} +EXPORT_SYMBOL(kvm_writel); + +void kvm_writell(u64 q, volatile void __iomem *addr) +{ + DebugKVMIO("started to write long word 0x%016llx to MMIO addr %px\n", + q, addr); + kvm_guest_mmio(addr, q, 8, 1, 0); +} +EXPORT_SYMBOL(kvm_writell); + +static unsigned long +kvm_guest_ioport(u32 port, u32 value, u8 size, u8 is_out) +{ + u32 data[1]; + int ret; + + if (is_out) { + data[0] = value; + DebugKVMIOP("data to write 0x%x size %d to port 0x%x\n", + value, size, port); + } + ret = HYPERVISOR_guest_ioport_request(port, data, size, is_out); + if (!is_out) { + DebugKVMIOP("read data 0x%x size %d from port 0x%x\n", + data[0], size, port); + } + return data[0]; +} + +static unsigned long +kvm_guest_ioport_string(u32 port, const void *data, u8 size, u32 count, + u8 is_out) +{ + unsigned long ret; + + ret = HYPERVISOR_guest_ioport_string_request(port, data, size, + count, is_out); + DebugKVMIO("%s data %px size %d, count 0x%x to port 0x%x\n", + (is_out) ? "written from" : "read to", data, size, count, port); + return ret; +} + +u8 kvm_inb(unsigned short port) +{ + DebugKVMIO("started to read byte from IO port 0x%x\n", port); + return kvm_guest_ioport(port, 0, 1, 0); +} +EXPORT_SYMBOL(kvm_inb); + +u16 kvm_inw(unsigned short port) +{ + DebugKVMIO("started to read halfword from IO port 0x%x\n", port); + return kvm_guest_ioport(port, 0, 2, 0); +} +EXPORT_SYMBOL(kvm_inw); + +u32 kvm_inl(unsigned short port) +{ + DebugKVMIO("started to read word from IO port 0x%x\n", port); + return kvm_guest_ioport(port, 0, 4, 0); +} +EXPORT_SYMBOL(kvm_inl); + +void kvm_outb(unsigned char byte, unsigned short port) +{ + DebugKVMIO("started to write byte 0x%02x to IO port 0x%x\n", + byte, port); + kvm_guest_ioport(port, byte, 1, 1); +} +EXPORT_SYMBOL(kvm_outb); + +void kvm_outw(unsigned short halfword, unsigned short port) +{ + DebugKVMIO("started to write halfword 0x%04x to IO port 0x%x\n", + halfword, port); + kvm_guest_ioport(port, halfword, 2, 1); +} +EXPORT_SYMBOL(kvm_outw); + +void kvm_outl(unsigned int word, unsigned short port) +{ + DebugKVMIO("started to write word 0x%08x to IO port 0x%x\n", + word, port); + kvm_guest_ioport(port, word, 4, 1); +} +EXPORT_SYMBOL(kvm_outl); + +void kvm_outsb(unsigned short port, const void *src, unsigned long count) +{ + DebugKVMIO("started to write 0x%lx bytes fron %px to IO port 0x%x\n", + count, src, port); + kvm_guest_ioport_string(port, src, 1, count, 1); +} +EXPORT_SYMBOL(kvm_outsb); + +void kvm_outsw(unsigned short port, const void *src, unsigned long count) +{ + DebugKVMIO("started to write 0x%lx halfwords fron %px to " + "IO port 0x%x\n", + count, src, port); + kvm_guest_ioport_string(port, src, 2, count, 1); +} +EXPORT_SYMBOL(kvm_outsw); + +void kvm_outsl(unsigned short port, const void *src, unsigned long count) +{ + DebugKVMIO("started to write 0x%lx words fron %px to IO port 0x%x\n", + count, src, port); + kvm_guest_ioport_string(port, src, 4, count, 1); +} +EXPORT_SYMBOL(kvm_outsl); + +void kvm_insb(unsigned short port, void *dst, unsigned long count) +{ + DebugKVMIO("started to read 0x%lx bytes to %px from IO port 0x%x\n", + count, dst, port); + kvm_guest_ioport_string(port, dst, 1, count, 0); +} +EXPORT_SYMBOL(kvm_insb); + +void kvm_insw(unsigned short port, void *dst, unsigned long count) +{ + DebugKVMIO("started to read 0x%lx halfwords to %px from IO port 0x%x\n", + count, dst, port); + kvm_guest_ioport_string(port, dst, 2, count, 0); +} +EXPORT_SYMBOL(kvm_insw); + +void kvm_insl(unsigned short port, void *dst, unsigned long count) +{ + DebugKVMIO("started to read 0x%lx words to %px from IO port 0x%x\n", + count, dst, port); + kvm_guest_ioport_string(port, dst, 4, count, 0); +} +EXPORT_SYMBOL(kvm_insl); + +void kvm_conf_inb(unsigned int domain, unsigned int bus, unsigned long port, + u8 *byte) +{ + void __iomem *conf_base; + void __iomem *conf_port; + + conf_base = (void __iomem *)get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + *byte = kvm_guest_mmio(conf_port, 0, 1, 0, domain); + DebugCIO("kvm_conf_inb(): value %x read from port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) *byte, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); +} + +void kvm_conf_inw(unsigned int domain, unsigned int bus, unsigned long port, + u16 *hword) +{ + void __iomem *conf_base; + void __iomem *conf_port; + + conf_base = (void __iomem *)get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + *hword = kvm_guest_mmio(conf_port, 0, 2, 0, domain); + DebugCIO("kvm_conf_inw(): value %x read from port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) *hword, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); +} + +void kvm_conf_inl(unsigned int domain, unsigned int bus, unsigned long port, + u32 *word) +{ + void __iomem *conf_base; + void __iomem *conf_port; + + conf_base = (void __iomem *)get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + *word = kvm_guest_mmio(conf_port, 0, 4, 0, domain); + DebugCIO("kvm_conf_inl(): value %x read from port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) *word, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); +} + +void kvm_conf_outb(unsigned int domain, unsigned int bus, unsigned long port, + u8 byte) +{ + void __iomem *conf_base; + void __iomem *conf_port; + + conf_base = (void __iomem *)get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + DebugCIO("kvm_conf_outb(): value %x write to port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) byte, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); + kvm_guest_mmio(conf_port, byte, 1, 1, domain); +} + +void kvm_conf_outw(unsigned int domain, unsigned int bus, unsigned long port, + u16 hword) +{ + void __iomem *conf_base; + void __iomem *conf_port; + + conf_base = (void __iomem *)get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + DebugCIO("kvm_conf_outw(): value %x write to port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) hword, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); + kvm_guest_mmio(conf_port, hword, 2, 1, domain); +} + +void kvm_conf_outl(unsigned int domain, unsigned int bus, unsigned long port, + u32 word) +{ + void __iomem *conf_base; + void __iomem *conf_port; + + conf_base = (void __iomem *)get_domain_pci_conf_base(domain); + conf_port = conf_base + port; + DebugCIO("kvm_conf_outl(): value %x write to port %lx, domain #%d " + "(node %d, IO link %d)\n", + (u32) word, conf_port, domain, + iohub_domain_to_node(domain), iohub_domain_to_link(domain)); + kvm_guest_mmio(conf_port, word, 4, 1, domain); +} + +static inline unsigned long +kvm_vga_vram_access(void *addr, u64 value, u8 size, bool is_write) +{ + e2k_addr_t phys_addr = (e2k_addr_t)addr; + + if (is_write) { + DebugKVMIO("data to write 0x%llx size %d to addr %px\n", + value, size, addr); + } + if (likely(KVM_IS_VGA_VRAM_VIRT_ADDR(phys_addr) || + KVM_IS_VGA_VRAM_PHYS_ADDR(phys_addr))) { + return kvm_guest_mmio(addr, value, size, is_write, 0); + } else if (KVM_IS_PHYS_MEM_MAP_ADDR(phys_addr)) { + if (is_write) { + switch (size) { + case 1: /* byte */ + *((u8 *)addr) = (u8)value; + break; + case 2: /* half word */ + *((u16 *)addr) = (u16)value; + break; + default: + pr_err("%s() Invalid size %d of data " + "to write\n", __func__, size); + BUG_ON(true); + } + return value; + } else { + switch (size) { + case 1: /* byte */ + value = *((u8 *)addr); + break; + case 2: /* half word */ + value = *((u16 *)addr); + break; + default: + pr_err("%s(): Invalid size %d of data " + "to read\n", __func__, size); + BUG_ON(true); + } + DebugKVMIO("read data 0x%llx size %d from addr %px\n", + value, size, addr); + return value; + } + } else { + pr_err("%s(): Invalid address %px to read/write\n", + __func__, addr); + BUG_ON(true); + return -1L; + } +} + +void kvm_scr_writew(u16 w, volatile u16 *addr) +{ + DebugKVMIO("started to write halfword 0x%04x to VGA VRAM addr %px\n", + w, addr); + kvm_vga_vram_access((void *)addr, w, 2, true); +} +u16 kvm_scr_readw(volatile const u16 *addr) +{ + DebugKVMIO("started to read halfword from VGA VRAM addr %px\n", addr); + return kvm_vga_vram_access((void *)addr, 0, 2, false); +} +void kvm_vga_writeb(u8 b, volatile u8 *addr) +{ + DebugKVMIO("started to write byte 0x%02x to VGA VRAM addr %px\n", + b, addr); + kvm_vga_vram_access((void *)addr, b, 1, true); +} +u8 kvm_vga_readb(volatile const u8 *addr) +{ + DebugKVMIO("started to read byte from VGA VRAM addr %px\n", addr); + return kvm_vga_vram_access((void *)addr, 0, 1, false); +} + +unsigned long kvm_notify_io(unsigned int notifier_io) +{ + unsigned long ret; + + ret = HYPERVISOR_notify_io(notifier_io); + return ret; +} + +int __init kvm_arch_pci_init(void) +{ + return native_arch_pci_init(); +} diff --git a/arch/e2k/kvm/guest/io.h b/arch/e2k/kvm/guest/io.h new file mode 100644 index 000000000000..552856735144 --- /dev/null +++ b/arch/e2k/kvm/guest/io.h @@ -0,0 +1,14 @@ + +#ifndef __E2K_KVM_GUEST_IO_H_ +#define __E2K_KVM_GUEST_IO_H_ + +#include +#include + +#include + +extern unsigned long kvm_notify_io(unsigned int notifier_io); +extern unsigned long kvm_handle_guest_mmio(void __iomem *mmio_addr, + u64 value, u8 size, u8 is_write); + +#endif /* __E2K_KVM_GUEST_IO_H_ */ diff --git a/arch/e2k/kvm/guest/irq.c b/arch/e2k/kvm/guest/irq.c new file mode 100644 index 000000000000..6be32a280e20 --- /dev/null +++ b/arch/e2k/kvm/guest/irq.c @@ -0,0 +1,371 @@ +/* + * KVM guest virtual IRQs implementation. + */ +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include "irq.h" +#include "traps.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IRQ_MODE +#undef DebugKVMIRQ +#define DEBUG_KVM_IRQ_MODE 0 /* kernel virtual IRQ debugging */ +#define DebugKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_DIRECT_IRQ_MODE +#undef DebugDIRQ +#define DEBUG_DIRECT_IRQ_MODE 1 /* direct IRQ injection debugging */ +#define DebugDIRQ(fmt, args...) \ +({ \ + if (DEBUG_DIRECT_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* On VIRQ VCUPs common printk() cannot be used, because of thread */ +/* running on these VCPUs has not task structure */ +#undef DEBUG_DUMP_KVM_MODE +#undef DebugDKVM +#define DEBUG_DUMP_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugDKVM(fmt, args...) \ +({ \ + if (DEBUG_DUMP_KVM_MODE) \ + dump_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_DUMP_KVM_IRQ_MODE +#undef DebugDKVMIRQ +#define DEBUG_DUMP_KVM_IRQ_MODE 0 /* kernel virtual IRQ debugging */ +#define DebugDKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_DUMP_KVM_IRQ_MODE) \ + dump_printk("%s(): " fmt, __func__, ##args); \ +}) + +/* + * FIXME: all VIRQ should be registered at a list or table and + * need implement function to free all VIRQs + * + * based on Xen model of interrupts (driver/xen/events.c) + * There are a few kinds of interrupts which should be mapped to an event + * channel: + * + * 1. Inter-domain notifications. This includes all the virtual + * device events, since they're driven by front-ends in another domain + * (typically dom0). Not supported at present. + * 2. VIRQs, typically used for timers. These are per-cpu events. + * 3. IPIs. Not supported at present. + * 4. Hardware interrupts. Not supported at present. + */ + +kvm_irq_info_t irq_info[KVM_NR_IRQS]; +int kvm_nr_irqs = 0; + +static DEFINE_SPINLOCK(irq_mapping_lock); + +/* IRQ <-> VIRQ mapping. */ +static DEFINE_PER_CPU(int [KVM_NR_VIRQS], virq_to_irq) = { + [0 ... KVM_NR_VIRQS - 1] = -1 +}; + +/* Constructor for packed IRQ information. */ +static inline kvm_irq_info_t mk_unbound_info(void) +{ + return (kvm_irq_info_t) { .type = IRQT_UNBOUND }; +} + +static inline kvm_irq_info_t mk_virq_info(int virq, int cpu) +{ + return (kvm_irq_info_t) { .type = IRQT_VIRQ, + .cpu = cpu, + .active = false, + .u.virq.virq_nr = virq, + .u.virq.gpid_nr = 0, + .u.virq.task = NULL, + .u.virq.dev_id = NULL, + .u.virq.handler = NULL, + .u.virq.count = NULL, + }; +} + +static DEFINE_PER_CPU(struct pt_regs, vcpu_virq_regs); + +/* + * Accessors for packed IRQ information. + */ + +static inline void set_gpid_to_irq(unsigned irq, unsigned gpid_nr) +{ + kvm_virq_info_t *info = virq_info_from_irq(irq); + + info->gpid_nr = gpid_nr; +} + +static inline void set_virq_task_to_irq(unsigned irq, + struct task_struct *task) +{ + kvm_virq_info_t *info = virq_info_from_irq(irq); + + info->task = task; +} + +static inline void activate_irq(unsigned irq, bool activate) +{ + kvm_irq_info_t *info = info_for_irq(irq); + + info->active = activate; +} + +static int find_unbound_irq(void) +{ + int irq; + + for (irq = 0; irq < kvm_nr_irqs; irq++) + if (irq_info[irq].type == IRQT_UNBOUND) + return irq; + + if (kvm_nr_irqs >= KVM_NR_IRQS) + panic("No available IRQ to bind to: increase KVM_NR_IRQS!\n"); + BUG_ON(irq_info[irq].type != IRQT_UNBOUND); + kvm_nr_irqs++; + + return irq; +} + +static inline int do_bind_virq_to_irq(unsigned int virq, unsigned int cpu, + bool create) +{ + int irq; + + spin_lock(&irq_mapping_lock); + + irq = per_cpu(virq_to_irq, cpu)[virq]; + + if (irq < 0 && create) { + irq = find_unbound_irq(); + irq_info[irq] = mk_virq_info(virq, cpu); + per_cpu(virq_to_irq, cpu)[virq] = irq; + } + + spin_unlock(&irq_mapping_lock); + + return irq; +} + +static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) +{ + return do_bind_virq_to_irq(virq, cpu, true); +} + +static void unbind_from_irq(unsigned int irq) +{ + int cpu = cpu_from_irq(irq); + + spin_lock(&irq_mapping_lock); + + switch (type_from_irq(irq)) { + case IRQT_VIRQ: + per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; + break; + case IRQT_IPI: + panic("unbind_from_irq() does not yet implemented fo IPI\n"); + break; + default: + break; + } + + if (irq_info[irq].type != IRQT_UNBOUND) { + irq_info[irq] = mk_unbound_info(); + if (irq + 1 == kvm_nr_irqs) + kvm_nr_irqs--; + } + + spin_unlock(&irq_mapping_lock); +} + +#ifdef CONFIG_DIRECT_VIRQ_INJECTION +static int do_request_direct_virq(int irq, const char *name) +{ + kvm_virq_info_t *virq_info = virq_info_from_irq(irq); + int virq = virq_from_irq(irq); + int cpu = cpu_from_irq(irq); + atomic_t *virqs_num; + int ret; + + DebugDIRQ("process %s (%d): started to register VIRQ #%d CPU #%d " + "for %s\n", + current->comm, current->pid, virq, cpu, name); + + /* atomic VIRQs counter can be received only on this CPU */ + BUG_ON(cpu != smp_processor_id()); + + virqs_num = kvm_get_virqs_atomic_counter(virq); + if (IS_ERR(virqs_num)) { + pr_err("%s(): could not take VIRQs #%d atomic counter\n", + __func__, virq); + return PTR_ERR(virqs_num); + } + virq_info->count = virqs_num; + + ret = HYPERVISOR_get_guest_direct_virq(irq, virq); + if (ret && ret != -EEXIST) { + DebugDIRQ("could not register VIRQ #%d for %s\n", + virq, name); + } else if (ret == -EEXIST) { + virq_info->mode = BY_DIRECT_INJ_VIRQ_MODE; + DebugDIRQ("VIRQ #%d IRQ #%d was already registered for %s\n", + virq, irq, name); + ret = 0; + } else { + virq_info->mode = BY_DIRECT_INJ_VIRQ_MODE; + DebugDIRQ("VIRQ #%d IRQ #%d registered for %s\n", + virq, irq, name); + } + return ret; +} + +static int do_free_direct_virq(int irq) +{ + int ret; + + DebugDIRQ("process %s (%d): started to free VIRQ #%d CPU #%d\n", + current->comm, current->pid, + virq_from_irq(irq), cpu_from_irq(irq)); + + ret = HYPERVISOR_free_guest_direct_virq(irq); + return ret; +} +#else /* ! CONFIG_DIRECT_VIRQ_INJECTION */ +static int do_request_direct_virq(int irq, const char *name) +{ + pr_err("Cannot request direct VIRQ injection, turn ON config mode " + "to enable this feature\n"); + return -ENOSYS; +} +static int do_free_direct_virq(int irq) +{ + pr_warning("Cannot free direct VIRQ injection, turn ON config mode " + "to enable this feature\n"); + return -ENOSYS; +} +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + +int kvm_request_virq(int virq, irq_handler_t handler, int cpu, + unsigned long irqflags, const char *name, void *dev) +{ + kvm_virq_info_t *virq_info; + int irq; + int ret = -ENOSYS; + + DebugKVM("process %s (%d): started to register VIRQ #%d CPU #%d " + "for %s\n", + current->comm, current->pid, virq, cpu, name); + + irq = bind_virq_to_irq(virq, cpu); + DebugKVM("VIRQ #%d CPU #%d binded to IRQ #%d\n", + virq, cpu, irq); + + virq_info = virq_info_from_irq(irq); + virq_info->virq_nr = virq; + virq_info->dev_id = dev; + virq_info->handler = handler; + + if (irqflags == 0) + irqflags = kvm_get_default_virq_flags(virq); + + if (irqflags & BY_DIRECT_INJ_VIRQ_FLAG) { + ret = do_request_direct_virq(irq, name); + if (ret == 0) { + goto out; + } + DebugDIRQ("could not request direct IRQ #%d %s injection\n", + irq, name); + } else { + BUG(); + } + +out: + if (ret) { + unbind_from_irq(irq); + DebugKVM("could not register VIRQ #%d for %s\n", + virq, name); + } else { + activate_irq(irq, true /* activate */); + DebugKVM("VIRQ #%d IRQ #%d registered and activated for %s\n", + virq, irq, name); + } + return ret; +} + +int kvm_free_virq(int virq, int cpu, void *dev) +{ + kvm_virq_info_t *virq_info; + int irq; + int ret = -ENOSYS; + + DebugKVM("process %s (%d): started to free VIRQ #%d CPU #%d\n", + current->comm, current->pid, virq, cpu); + + irq = per_cpu(virq_to_irq, cpu)[virq]; + if (irq < 0) { + DebugKVM("VIRQ #%d CPU #%d is not bound any IRQ so it free\n", + virq, cpu); + return 0; + } + virq_info = virq_info_from_irq(irq); + + if (virq_info->flags & BY_DIRECT_INJ_VIRQ_FLAG) { + ret = do_free_direct_virq(irq); + } else { + BUG(); + } + + unbind_from_irq(irq); + + if (ret) { + DebugKVM("failed for VIRQ #%d CPU #%d, error %d\n", + virq, cpu, ret); + } else { + DebugKVM("VIRQ #%d CPU #%d is now free\n", virq, cpu); + } + return ret; +} + +__init void kvm_virqs_init(int cpu) +{ + struct pt_regs *regs; + + regs = this_cpu_ptr(&vcpu_virq_regs); + memset(regs, 0, sizeof(*regs)); +} + +notrace unsigned long kvm_hypervisor_inject_interrupt(void) +{ + return HYPERVISOR_inject_interrupt(); +} +EXPORT_SYMBOL(kvm_hypervisor_inject_interrupt); diff --git a/arch/e2k/kvm/guest/irq.h b/arch/e2k/kvm/guest/irq.h new file mode 100644 index 000000000000..60b7b3a742af --- /dev/null +++ b/arch/e2k/kvm/guest/irq.h @@ -0,0 +1,104 @@ + +#ifndef __E2K_IRQ_GUEST_H_ +#define __E2K_IRQ_GUEST_H_ + +#include +#include +#include +#include + +#include + +#include "cpu.h" + +static inline atomic_t * +kvm_get_virqs_atomic_counter(int virq_id) +{ + switch (virq_id) { + case KVM_VIRQ_TIMER: + return kvm_get_timer_virqs_num(); + case KVM_VIRQ_HVC: + return kvm_get_hvc_virqs_num(); + case KVM_VIRQ_LAPIC: + return kvm_get_lapic_virqs_num(); + case KVM_VIRQ_CEPIC: + return kvm_get_cepic_virqs_num(); + default: + return ERR_PTR(-EINVAL); + } +} + +/* + * FIXME: all VIRQ should be registered at a list or table and + * need implement function to free all VIRQs + * + * based on Xen model of interrupts (driver/xen/events.c) + * There are a few kinds of interrupts which should be mapped to an event + * channel: + * + * 1. Inter-domain notifications. This includes all the virtual + * device events, since they're driven by front-ends in another domain + * (typically dom0). Not supported at present. + * 2. VIRQs, typically used for timers. These are per-cpu events. + * 3. IPIs. Not supported at present. + * 4. Hardware interrupts. Not supported at present. + */ + +extern kvm_irq_info_t irq_info[KVM_NR_IRQS]; +extern int kvm_nr_irqs; + +/* + * Accessors for packed IRQ information. + */ +static inline kvm_irq_info_t *info_for_irq(unsigned irq) +{ + return &irq_info[irq]; +} +static inline kvm_virq_info_t *virq_info_from_irq(unsigned irq) +{ + kvm_irq_info_t *info = info_for_irq(irq); + + BUG_ON(info == NULL); + BUG_ON(info->type != IRQT_VIRQ); + + return &info->u.virq; +} +static inline unsigned virq_from_irq(unsigned irq) +{ + kvm_virq_info_t *info = virq_info_from_irq(irq); + + return info->virq_nr; +} + +static inline unsigned gpid_from_irq(unsigned irq) +{ + kvm_virq_info_t *info = virq_info_from_irq(irq); + + return info->gpid_nr; +} + +static inline struct task_struct *virq_task_from_irq(unsigned irq) +{ + kvm_virq_info_t *info = virq_info_from_irq(irq); + + return info->task; +} + +static inline kvm_irq_type_t type_from_irq(unsigned irq) +{ + return info_for_irq(irq)->type; +} + +static inline unsigned cpu_from_irq(unsigned irq) +{ + return info_for_irq(irq)->cpu; +} + +static inline bool is_irq_active(unsigned irq) +{ + return info_for_irq(irq)->active; +} + +extern __init void kvm_virqs_init(int cpu); + +#endif /* __E2K_IRQ_GUEST_H_ */ diff --git a/arch/e2k/kvm/guest/lapic.c b/arch/e2k/kvm/guest/lapic.c new file mode 100644 index 000000000000..6d2e031c5724 --- /dev/null +++ b/arch/e2k/kvm/guest/lapic.c @@ -0,0 +1,206 @@ +/* + * KVM guest virtual IRQs implementation. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "cpu.h" +#include "irq.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 1 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_THREAD_IRQ_MODE +#undef DebugKVMTI +#define DEBUG_KVM_THREAD_IRQ_MODE 0 /* kernel virtual IRQ thread */ + /* debugging */ +#define DebugKVMTI(fmt, args...) \ +({ \ + if (DEBUG_KVM_THREAD_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_LAPIC_IRQ_MODE +#undef DebugLAI +#define DEBUG_KVM_LAPIC_IRQ_MODE 0 /* local APIC IRQ thread */ + /* debugging */ +#define DebugLAI(fmt, args...) \ +({ \ + if (DEBUG_KVM_LAPIC_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_DIRECT_IRQ_MODE +#undef DebugDIRQ +#define DEBUG_DIRECT_IRQ_MODE 0 /* direct IRQ injection debugging */ +#define DebugDIRQ(fmt, args...) \ +({ \ + if (DEBUG_DIRECT_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static bool bsp_direct_virq_lapic = false; + +static int kvm_lapic_virq_panic(struct notifier_block *this, + unsigned long event, void *ptr); + +static DEFINE_PER_CPU(struct notifier_block, resume_block_cpu); + +/* + * Basic functions to access to local APIC state fields on guest. + */ +static inline int kvm_read_lapic_virqs_num(void) +{ + kvm_apic_state_t *lapic; + + lapic = kvm_vcpu_lapic_state(); + return atomic_read(&lapic->virqs_num); +} +static inline void kvm_dec_lapic_virqs_num(void) +{ + kvm_apic_state_t *lapic; + + lapic = kvm_vcpu_lapic_state(); + atomic_dec(&lapic->virqs_num); +} +static inline bool kvm_dec_and_test_lapic_virqs_num(void) +{ + kvm_apic_state_t *lapic; + + lapic = kvm_vcpu_lapic_state(); + return atomic_dec_and_test(&lapic->virqs_num); +} + +/* + * Local APIC of guest VCPU virtualized by host (see arch/e2k/kvm/lapic.c) + * Any virtual IRQ received by local APIC on host, which must be handled + * by guest, causes virtual IRQ type of KVM_VIRQ_LAPIC and wake up + * special thread on special VIRQ VCPU. This thread wakes up the thread + * on real VCPU which starts this handler + */ +static irqreturn_t kvm_lapic_interrupt(int irq, void *dev_id) +{ + struct pt_regs *regs; + long cpu = (long)dev_id; + irqreturn_t ret; + unsigned long flags; + + DebugLAI("process %s (%d): started for local APIC VIRQ #%d " + "on CPU #%ld\n", + current->comm, current->pid, irq, cpu); + if (cpu != smp_processor_id()) { + /* here need access to foreign local APIC, not own */ + /* Update local APIC base address to enable such access */ + BUG_ON(true); + } + raw_local_irq_save(flags); + regs = get_irq_regs(); + + ret = native_do_interrupt(regs); + + if (regs->interrupt_vector == KVM_NMI_APIC_VECTOR) { + /* NMI IPI on guest implemented as general inteerupt */ + /* with vector KVM_NMI_APIC_VECTOR */ + /* but nmi_call_function_interrupt() has been called */ + /* under NMI disabled, so now enable NMIs */ + exiting_irq(); + KVM_INIT_KERNEL_UPSR_REG(false, /* enable IRQs */ + false /* disable NMIs */); + } + raw_local_irq_restore(flags); + + DebugKVMTI("local APIC VIRQ #%d on CPU #%ld handled\n", + irq, cpu); + return ret; +} + +static int kvm_do_setup_lapic_virq(bool bsp, int cpu) +{ + const char *name; + struct notifier_block *resume_block; + unsigned long irqflags; + int ret; + + if (!paravirt_enabled()) + return 0; + + DebugKVM("installing KVM guest local APIC VIRQ on CPU %d\n", + cpu); + + name = kasprintf(GFP_KERNEL, "lapic/%d", cpu); + if (!name) + name = ""; + + irqflags = kvm_get_default_virq_flags(KVM_VIRQ_LAPIC); + + if (irqflags & BY_DIRECT_INJ_VIRQ_FLAG) { + BUG_ON(cpu != smp_processor_id()); + ret = kvm_request_virq(KVM_VIRQ_LAPIC, + &kvm_lapic_interrupt, cpu, + BY_DIRECT_INJ_VIRQ_FLAG, + name, (void *) (long) cpu); + if (ret == 0) { + if (bsp) + bsp_direct_virq_lapic = true; + goto success; + } + DebugDIRQ("could not request direct local APIC VIRQ %s " + "injection\n", name); + } else { + /* unknown mode to request VIRQ delivery */ + BUG_ON(true); + ret = -EINVAL; + } + if (ret) { + panic("could not register local APIC VIRQ #%d for CPU #%d\n", + KVM_VIRQ_LAPIC, cpu); + } + +success: + resume_block = &per_cpu(resume_block_cpu, cpu); + resume_block->notifier_call = kvm_lapic_virq_panic; + resume_block->next = NULL; + atomic_notifier_chain_register(&panic_notifier_list, resume_block); + + if (bsp) { + /* Local APIC support on guest is now ready, so enable */ + /* APIC timer and set up the local APIC timer on boot CPU */ + disable_apic_timer = false; + } + + DebugKVM("KVM guest local APIC VIRQ on CPU %d installed\n", cpu); + return ret; +} + +__init int kvm_setup_boot_lapic_virq(void) +{ + return kvm_do_setup_lapic_virq(true, raw_smp_processor_id()); +} + +static int +kvm_lapic_virq_panic(struct notifier_block *this, + unsigned long event, void *ptr) +{ + return NOTIFY_DONE; +} diff --git a/arch/e2k/kvm/guest/mmu.c b/arch/e2k/kvm/guest/mmu.c new file mode 100644 index 000000000000..ee8138697f3b --- /dev/null +++ b/arch/e2k/kvm/guest/mmu.c @@ -0,0 +1,728 @@ + +/* + * KVM guest kernel MMU virtualization + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#undef DEBUG_KVM_PTE_MODE +#undef DebugKVMPTE +#define DEBUG_KVM_PTE_MODE 0 /* kernel pte debugging */ +#define DebugKVMPTE(fmt, args...) \ +({ \ + if (DEBUG_KVM_PTE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_RECOVERY_MODE +#undef DebugKVMREC +#define DEBUG_KVM_RECOVERY_MODE 0 /* kernel recovery debugging */ +#define DebugKVMREC(fmt, args...) \ +({ \ + if (DEBUG_KVM_RECOVERY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_MMU_OP_MODE +#undef DebugMMUOP +#define DEBUG_KVM_MMU_OP_MODE 0 /* MUU operations debugging */ +#define DebugMMUOP(fmt, args...) \ +({ \ + if (DEBUG_KVM_MMU_OP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_ACTIVATE_MM_MODE +#undef DebugAMM +#define DEBUG_ACTIVATE_MM_MODE 0 /* activate mm debug */ +#define DebugAMM(fmt, args...) \ +({ \ + if (DEBUG_ACTIVATE_MM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_MMU_ACCESS_MODE +#undef DebugKVMMMU +#define DEBUG_KVM_MMU_ACCESS_MODE 0 /* MUU access debugging */ +#define DebugKVMMMU(fmt, args...) \ +({ \ + if (DEBUG_KVM_MMU_ACCESS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MM_NOTIFIER_MODE +#undef DebugMN +#define DEBUG_MM_NOTIFIER_MODE 0 /* MM notifier operations debug */ +#define DebugMN(fmt, args...) \ +({ \ + if (DEBUG_MM_NOTIFIER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static bool is_simple_ldst_op(u64 ldst_rec_opc, tc_cond_t cond) +{ + ldst_rec_op_t *opc = (ldst_rec_op_t *) &ldst_rec_opc; + bool is_simple_lock_check_ld = tc_cond_is_check_ld(cond) || + tc_cond_is_check_unlock_ld(cond) || + tc_cond_is_lock_check_ld(cond) || + tc_cond_is_spec_lock_check_ld(cond); + + return (!opc->mas || is_simple_lock_check_ld) && + !opc->prot && !opc->root && !opc->mode_h && !opc->fmt_h && + (opc->fmt >= LDST_BYTE_FMT) && (opc->fmt <= LDST_DWORD_FMT) && + !TASK_IS_PROTECTED(current); +} + +static void simple_recovery_faulted_load_to_greg(e2k_addr_t address, + u32 greg_num_d, u64 ld_rec_opc, tc_cond_t cond) +{ + if (tc_cond_is_lock_check_ld(cond)) { + SIMPLE_RECOVERY_LOAD_TO_GREG(address, ld_rec_opc, greg_num_d, + ",sm", 0x0); + } else if (tc_cond_is_spec_lock_check_ld(cond)) { + SIMPLE_RECOVERY_LOAD_TO_GREG(address, ld_rec_opc, greg_num_d, + ",sm", 0x3); + } else { + SIMPLE_RECOVERY_LOAD_TO_GREG(address, ld_rec_opc, greg_num_d, + "", 0x0); + } +} + +static void simple_recovery_faulted_move(e2k_addr_t addr_from, + e2k_addr_t addr_to, u64 ldst_rec_opc, u32 first_time, + tc_cond_t cond) +{ + if (tc_cond_is_lock_check_ld(cond)) { + SIMPLE_RECOVERY_MOVE(addr_from, addr_to, ldst_rec_opc, + first_time, ",sm", 0x0); + } else if (tc_cond_is_spec_lock_check_ld(cond)) { + SIMPLE_RECOVERY_MOVE(addr_from, addr_to, ldst_rec_opc, + first_time, ",sm", 0x3); + } else { + SIMPLE_RECOVERY_MOVE(addr_from, addr_to, ldst_rec_opc, + first_time, "", 0x0); + } +} + +static void simple_recovery_faulted_store(e2k_addr_t address, u64 wr_data, + u64 st_rec_opc) +{ + SIMPLE_RECOVERY_STORE(address, wr_data, st_rec_opc); +} + +static probe_entry_t +check_native_mmu_probe(e2k_addr_t virt_addr, unsigned long probe_val) +{ + if (!DTLB_ENTRY_TEST_SUCCESSFUL(probe_val)) { + DebugMMUOP("virt addr 0x%lx, MMU probe returned 0x%lx : " + "probe disabled\n", + virt_addr, probe_val); + } else if (DTLB_ENTRY_TEST_VVA(probe_val)) { + DebugMMUOP("virt addr 0x%lx, MMU probe returned 0x%lx : " + "adrress valid\n", + virt_addr, probe_val); + } else { + DebugMMUOP("virt addr 0x%lx, MMU probe returned 0x%lx\n", + virt_addr, probe_val); + } + + /* native MMU probe returns pfn, but guest understand only gfn */ + /* so return always disable result of probe */ + return __probe_entry(ILLEGAL_PAGE_EP_RES); +} +/* Get Entry probe for virtual address */ +probe_entry_t +kvm_mmu_entry_probe(e2k_addr_t virt_addr) +{ + unsigned long probe_val; + + probe_val = HYPERVISOR_mmu_probe(virt_addr, KVM_MMU_PROBE_ENTRY); + + return check_native_mmu_probe(virt_addr, probe_val); +} +/* Get physical address for virtual address */ +probe_entry_t +kvm_mmu_address_probe(e2k_addr_t virt_addr) +{ + unsigned long probe_val; + + probe_val = HYPERVISOR_mmu_probe(virt_addr, KVM_MMU_PROBE_ADDRESS); + + return check_native_mmu_probe(virt_addr, probe_val); +} + +#ifdef CONFIG_KVM_SHADOW_PT + +pgprot_t kvm_pt_atomic_update(struct mm_struct *mm, + unsigned long addr, pgprot_t *ptp, + pt_atomic_op_t atomic_op, pgprotval_t prot_mask) +{ + pgprot_t oldptval; + gpa_t gpa; + int gmmid_nr; + int ret; + + DebugKVMPTE("started for address 0x%lx\n", addr); + /* FIXME: sinchronization on mm should be here */ + oldptval = *ptp; + + switch (atomic_op) { + case ATOMIC_GET_AND_XCHG: + case ATOMIC_GET_AND_CLEAR: { + pte_t pteval = __pte(pgprot_val(oldptval)); + + if (pte_none(pteval)) { + if (!pte_valid(pteval)) { + DebugKVMPTE("pte 0x%lx is none & not valid " + "for addr 0x%lx\n", + pte_val(pteval), addr); + return __pgprot(pte_val(pteval)); + } else if (atomic_op == ATOMIC_GET_AND_CLEAR) { + DebugKVMPTE("pte 0x%lx is already now and " + "should be as valid for addr 0x%lx\n", + pte_val(pteval), addr); + return __pgprot(pte_val(pteval)); + } else if (pte_valid(__pte(prot_mask))) { + DebugKVMPTE("pte 0x%lx is already now and new " + "value should be as valid for " + "addr 0x%lx\n", + pte_val(pteval), addr); + return __pgprot(pte_val(pteval)); + } + } + break; + } + case ATOMIC_TEST_AND_CLEAR_YOUNG: + case ATOMIC_SET_WRPROTECT: { + pte_t pteval = __pte(pgprot_val(oldptval)); + + if (pte_none(pteval)) { + panic("%s(): pte entry 0x%lx is none and cannot be " + "update its protections for addr 0x%lx\n", + __func__, pte_val(pteval), addr); + } + break; + } + default: + panic("%s(): invalid type %d of atomic PT update operations\n", + __func__, atomic_op); + } + + gpa = __pa(ptp); + gmmid_nr = mm->gmmid_nr; + + if (mm != &init_mm && mm != current->mm) { + DebugKVMPTE("mm %px id #%d is not current mm %px id #%d " + "for addr 0x%lx\n", + mm, gmmid_nr, + current->mm, + (current->mm) ? current->mm->gmmid_nr : 0, addr); + } else if (mm != &init_mm) { + DebugKVMPTE("current mm %px id #%d, addr 0x%lx\n", + mm, gmmid_nr, addr); + } + if (addr < PAGE_OFFSET) { + DebugKVMPTE("address 0x%lx is user address\n", addr); + if (mm == &init_mm) { + panic("%s(): current mm %px id #%d, addr 0x%lx\n", + __func__, mm, gmmid_nr, addr); + } + } else if (mm != &init_mm) { + panic("%s(): current mm %px id #%d is not kernel init_mm " + "for addr 0x%lx\n", + __func__, mm, gmmid_nr, addr); + } + + ret = HYPERVISOR_pt_atomic_update(gmmid_nr, gpa, &oldptval, atomic_op, + prot_mask); + if (ret) { + panic("%s(): could not update guest pte by host, error %d\n", + __func__, ret); + } + return oldptval; +} + +pgprot_t kvm_pt_atomic_clear_relaxed(pgprotval_t ptot_mask, pgprot_t *pgprot) +{ + pgprot_t oldptval; + pte_t pteval; + gpa_t gpa; + int ret; + + oldptval = *pgprot; + + pteval = __pte(pgprot_val(oldptval)); + if (pte_none(pteval)) { + panic("%s(): pte entry 0x%lx is none and cannot update " + "its protections\n", + __func__, pte_val(pteval)); + } + + gpa = __pa(pgprot); + ret = HYPERVISOR_pt_atomic_update(-1, gpa, &oldptval, + ATOMIC_TEST_AND_CLEAR_RELAXED, ptot_mask); + if (ret) { + panic("%s(): could not update guest pte by host, error %d\n", + __func__, ret); + } + return oldptval; +} + +#endif /* CONFIG_KVM_SHADOW_PT */ + +pte_t kvm_get_pte_for_address(struct vm_area_struct *vma, e2k_addr_t address) +{ + pte_t *pte; + + pte = get_user_address_pte(vma, address); + if (pte == NULL) { + return __pte(0); + } + return *pte; +} + +static void kvm_mmu_notifier_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + int gmmid_nr = mm->gmmid_nr; + int ret; + + DebugMN("%s (%d) started to release mm GMMID #%d at %px (users %d) " + "notifier at %px (users %d)\n", + current->comm, current->pid, + gmmid_nr, mm, atomic_read(&mm->mm_users), + mn, mn->users); + if (mm == &init_mm || gmmid_nr < 0) { + panic("kvm__mmdrop() invalid mm: init_mm or GMMID %d < 0\n", + gmmid_nr); + } + BUG_ON(current->mm == mm); + if (gmmid_nr > 0) { + ret = HYPERVISOR_kvm_guest_mm_drop(gmmid_nr); + if (ret != 0) { + pr_err("%s(): hypervisor could not drop host gmm #%d\n", + __func__, gmmid_nr); + } + } + mmu_notifier_put(mn); + mm->gmmid_nr = -1; +} + +static struct mmu_notifier *kvm_alloc_mm_notifier(struct mm_struct *mm) +{ + struct mmu_notifier *mn; + + mn = kzalloc(sizeof(*mn), GFP_KERNEL); + if (!mn) { + pr_err("%s(): %s (%d) could not allocate mm notifier, ENOMEM\n", + __func__, current->comm, current->pid); + return ERR_PTR(-ENOMEM); + } + DebugMN("%s (%d) allocated mm %px notifier at %px\n", + current->comm, current->pid, mm, mn); + + return mn; +} + +static void kvm_free_notifier(struct mmu_notifier *mn) +{ + DebugMN("%s (%d) freeing mm %px notifier at %px\n", + current->comm, current->pid, mn->mm, mn); + kfree(mn); +} + +static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { + .release = kvm_mmu_notifier_release, + .alloc_notifier = kvm_alloc_mm_notifier, + .free_notifier = kvm_free_notifier, +}; + +void kvm_get_mm_notifier_locked(struct mm_struct *mm) +{ + struct mmu_notifier *mn; + + /* create mm notifier to trace some events over mm */ + mn = mmu_notifier_get_locked(&kvm_mmu_notifier_ops, mm); + if (IS_ERR(mn)) { + panic("%s(): %s (%d) ; could not create mm notifier, " + "error %ld\n", + __func__, current->comm, current->pid, PTR_ERR(mn)); + } + DebugMN("%s (%d) created mm notifier at %px\n users %d\n", + current->comm, current->pid, mn, mn->users); +} + +/* + * Memory management mman support + */ +void kvm_activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + int gmmid_nr = 0; + int ammid_nr; + e2k_addr_t phys_ptb; + int ret; + + if (IS_HV_GM()) + return native_activate_mm(active_mm, mm); + + DebugAMM("started on %s (%d) for mm %px\n", + current->comm, current->pid, mm); + if (mm == &init_mm) { + panic("kvm_activate_mm() invalid mm: init_mm\n"); + } + if (active_mm == NULL) { + ammid_nr = -1; + DebugAMM("active mm is NULL\n"); + } else { + ammid_nr = current_thread_info()->gmmid_nr; + DebugAMM("active mm %px GMMID %d\n", active_mm, ammid_nr); + } + native_activate_mm(active_mm, mm); + + /* FIXME: it need implement separate kernel and user root PTs */ + phys_ptb = __pa(mm->pgd); + ret = HYPERVISOR_kvm_activate_guest_mm(ammid_nr, gmmid_nr, phys_ptb); + if (ret < 0) { + panic("%s(): hypervisor could not activate host agent #%d " + "of guest mm\n", + __func__, gmmid_nr); + } + current_thread_info()->gmmid_nr = ret; + + /* FIXME: it need delete this field from arch-independent struct */ + mm->gmmid_nr = ret; +} + +/* + * Recovery faulted store operations + */ +void kvm_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + long hret; + + DebugKVMREC("started for address 0x%lx data 0x%llx tag 0x%x, " + "channel #%d\n", address, wr_data, data_tag, chan); + + if (likely(is_simple_ldst_op(st_rec_opc, (tc_cond_t) {.word = 0})) && + !data_tag) { + simple_recovery_faulted_store(address, wr_data, st_rec_opc); + } else if (IS_HOST_KERNEL_ADDRESS(address)) { + hret = HYPERVISOR_recovery_faulted_tagged_guest_store(address, + wr_data, data_tag, st_rec_opc, data_ext, + data_ext_tag, opc_ext, chan, qp_store, + atomic_store); + } else { + hret = HYPERVISOR_recovery_faulted_tagged_store(address, + wr_data, data_tag, st_rec_opc, data_ext, + data_ext_tag, opc_ext, chan, qp_store, + atomic_store); + } + + if (!hret) { + DebugKVMREC("started for address 0x%lx data 0x%llx tag 0x%x, " + "channel #%d\n", address, wr_data, data_tag, chan); + } else { + DebugKVMREC("started for address 0x%lx data 0x%llx tag 0x%x, " + "channel #%d will be retried\n", address, wr_data, + data_tag, chan); + } +} +void kvm_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan, tc_cond_t cond) +{ + long hret; + + DebugKVMREC("started for address 0x%lx, channel #%d\n", address, chan); + + if (likely(is_simple_ldst_op(ld_rec_opc, cond))) { + simple_recovery_faulted_move(address, (e2k_addr_t) ld_val, + ld_rec_opc, 1, cond); + if (data_tag) + *data_tag = 0; + } else if (unlikely(IS_HOST_KERNEL_ADDRESS(address) || + IS_HOST_KERNEL_ADDRESS((e2k_addr_t)ld_val) || + IS_HOST_KERNEL_ADDRESS((e2k_addr_t)data_tag))) { + hret = HYPERVISOR_recovery_faulted_guest_load(address, ld_val, + data_tag, ld_rec_opc, chan); + } else { + hret = HYPERVISOR_recovery_faulted_load(address, ld_val, + data_tag, ld_rec_opc, chan); + } + + if (!hret) { + DebugKVMREC("loaded data 0x%llx tag 0x%x from address 0x%lx\n", + *ld_val, *data_tag, address); + } else { + DebugKVMREC("loading data 0x%llx tag 0x%x from address 0x%lx " + "should be retried\n", *ld_val, *data_tag, address); + } +} +void kvm_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, + int chan, int qp_load, int atomic_load, u32 first_time, + tc_cond_t cond) +{ + long hret; + u64 val; + u8 tag; + + DebugKVMREC("started for address from 0x%lx to addr 0x%lx, " + "channel #%d\n", + addr_from, addr_to, chan); + if (likely(is_simple_ldst_op(ld_rec_opc, cond)) && vr) { + simple_recovery_faulted_move(addr_from, addr_to, ld_rec_opc, + first_time, cond); + } else if (unlikely(IS_HOST_KERNEL_ADDRESS(addr_from) || + IS_HOST_KERNEL_ADDRESS(addr_to))) { + hret = HYPERVISOR_recovery_faulted_guest_move(addr_from, + addr_to, addr_to_hi, vr, ld_rec_opc, chan, + qp_load, atomic_load, first_time); + } else { + hret = HYPERVISOR_recovery_faulted_move(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc, chan, + qp_load, atomic_load, first_time); + } + + if (DEBUG_KVM_RECOVERY_MODE) + load_value_and_tagd((void *) addr_to, &val, &tag); + + DebugKVMREC("moved data 0x%llx tag 0x%x from address 0x%lx %s\n", + val, tag, addr_from, !hret ? "completed" : "will be retried"); +} +void kvm_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan, int qp_load, int atomic_load, + void *saved_greg_lo, void *saved_greg_hi, tc_cond_t cond) +{ + long hret; + u64 val; + u8 tag; + + DebugKVMREC("started for address 0x%lx global reg #%d, channel #%d\n", + address, greg_num_d, chan); + + if (likely(is_simple_ldst_op(ld_rec_opc, cond)) + && !saved_greg_lo && vr) { + simple_recovery_faulted_load_to_greg(address, greg_num_d, + ld_rec_opc, cond); + } else if (unlikely(IS_HOST_KERNEL_ADDRESS(address) || + IS_HOST_KERNEL_ADDRESS((e2k_addr_t)saved_greg_lo))) { + hret = HYPERVISOR_recovery_faulted_load_to_guest_greg(address, + greg_num_d, vr, ld_rec_opc, chan, + qp_load, atomic_load, saved_greg_lo, saved_greg_hi); + } else { + hret = HYPERVISOR_recovery_faulted_load_to_greg(address, + greg_num_d, vr, ld_rec_opc, chan, + qp_load, atomic_load, saved_greg_lo, saved_greg_hi); + } + + if (DEBUG_KVM_RECOVERY_MODE) + E2K_GET_DGREG_VAL_AND_TAG(greg_num_d, val, tag); + + DebugKVMREC("loaded data 0x%llx tag 0x%x from address 0x%lx %s\n", + val, tag, address, !hret ? "completed" : "will be retried"); +} +static inline void kvm_do_move_tagged_data(int word_size, e2k_addr_t addr_from, + e2k_addr_t addr_to) +{ + long hret; + + DebugKVMREC("started for address from 0x%lx to addr 0x%lx " + "data format : %s\n", + addr_from, addr_to, + (word_size == sizeof(u32)) ? "word" + : + ((word_size == sizeof(u64)) ? "double" + : + ((word_size == sizeof(u64) * 2) ? "quad" + : + "???"))); + if (IS_HOST_KERNEL_ADDRESS(addr_from) || + IS_HOST_KERNEL_ADDRESS(addr_to)) { + hret = HYPERVISOR_move_tagged_guest_data(word_size, + addr_from, addr_to); + } else { + hret = HYPERVISOR_move_tagged_data(word_size, + addr_from, addr_to); + } + + if (!hret) { + DebugKVMREC("move from 0x%lx to 0x%lx, size = %d\n", + addr_from, addr_to, word_size); + } else { + DebugKVMREC("move from 0x%lx to 0x%lx, size = %d will " + "be retried\n", addr_from, addr_to, word_size); + } +} +void kvm_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + kvm_do_move_tagged_data(sizeof(u32), addr_from, addr_to); +} +void kvm_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + kvm_do_move_tagged_data(sizeof(u64), addr_from, addr_to); +} +void kvm_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + kvm_do_move_tagged_data(sizeof(u64) * 2, addr_from, addr_to); +} +mmu_reg_t kvm_read_dtlb_reg(e2k_addr_t virt_addr) +{ + mmu_reg_t reg_value; + + DebugKVMMMU("started for address 0x%lx\n", virt_addr); + if (IS_HV_GM()) { + return NATIVE_READ_DTLB_REG(virt_addr); + } + reg_value = HYPERVISOR_read_dtlb_reg(virt_addr); + if ((long)reg_value < 0) { + pr_err("%s(): hypervisor could not read DTLB entry " + "for addres 0x%lx, error %ld\n", + __func__, virt_addr, (long)reg_value); + } + return reg_value; +} +void kvm_flush_dcache_line(e2k_addr_t virt_addr) +{ + long ret; + + DebugKVMMMU("started for address 0x%lx\n", virt_addr); + ret = HYPERVISOR_flush_dcache_line(virt_addr); + if (ret != 0) { + pr_err("kvm_flush_dcache_line() hypervisor could not flush " + "DCACHE line for addres 0x%lx, error %ld\n", + virt_addr, ret); + } +} +EXPORT_SYMBOL(kvm_flush_dcache_line); + +void kvm_clear_dcache_l1_set(e2k_addr_t virt_addr, unsigned long set) +{ + long ret; + + DebugKVMMMU("started for address 0x%lx, set 0x%lx\n", + virt_addr, set); + ret = HYPERVISOR_clear_dcache_l1_set(virt_addr, set); + if (ret != 0) { + pr_err("kvm_flush_dcache_range() hypervisor could not clear " + "DCACHE L1 set 0x%lx for addres 0x%lx, error %ld\n", + set, virt_addr, ret); + } +} + +void kvm_flush_dcache_range(void *addr, size_t len) +{ + long ret; + + DebugKVMMMU("started for address %px size 0x%lx\n", + addr, len); + ret = HYPERVISOR_flush_dcache_range(addr, len); + if (ret != 0) { + pr_err("kvm_flush_dcache_range() hypervisor could not flush " + "DCACHE range from %px, size 0x%lx error %ld\n", + addr, len, ret); + } +} +EXPORT_SYMBOL(kvm_flush_dcache_range); + +void kvm_clear_dcache_l1_range(void *virt_addr, size_t len) +{ + long ret; + + DebugKVMMMU("started for address %px size 0x%lx\n", + virt_addr, len); + ret = HYPERVISOR_clear_dcache_l1_range(virt_addr, len); + if (ret != 0) { + pr_err("kvm_flush_dcache_range() hypervisor could not clear " + "DCACHE L1 range from %px, size 0x%lx error %ld\n", + virt_addr, len, ret); + } +} + +/* + * Guest kernel functions can be run on any guest user processes and can have + * arbitrary MMU contexts to track which on host is not possible, therefore + * it is necessary to flush all instruction caches + */ +void kvm_flush_icache_all(void) +{ + long ret; + + DebugKVMMMU("started flush_icache_all()\n"); + ret = HYPERVISOR_flush_icache_all(); + if (ret != 0) { + pr_err("%s(): hypervisor could not flush all ICACHE, " + "error %ld\n", + __func__, ret); + } +} + +void kvm_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + kvm_flush_icache_all(); +} +EXPORT_SYMBOL(kvm_flush_icache_range); + +void kvm_flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ + kvm_flush_icache_all(); +} + +void kvm_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + kvm_flush_icache_all(); +} + +/* + * Guest ICACHEs can be localy flushed as user caches + */ +int kvm_flush_local_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + e2k_addr_t addr; + + DebugKVMMMU("started for range from 0x%lx to 0x%lx\n", + start, end); + if (IS_HV_GM()) { + native_flush_icache_range(start, end); + return 0; + } + + start = round_down(start, E2K_ICACHE_SET_SIZE); + end = round_up(end, E2K_ICACHE_SET_SIZE); + + flush_ICACHE_line_begin(); + for (addr = start; addr < end; addr += E2K_ICACHE_SET_SIZE) { + DebugKVMMMU("will flush_ICACHE_line_user() 0x%lx\n", addr); + __flush_ICACHE_line_user(addr); + } + flush_ICACHE_line_end(); + return 0; +} +EXPORT_SYMBOL(kvm_flush_local_icache_range); + +/* + * Write/read DCACHE L2 registers + */ +void kvm_write_dcache_l2_reg(unsigned long reg_val, int reg_num, int bank_num) +{ + panic("kvm_write_dcache_l2_reg() not implemented\n"); +} +unsigned long kvm_read_dcache_l2_reg(int reg_num, int bank_num) +{ + panic("kvm_read_dcache_l2_reg() not implemented\n"); + return -1; +} diff --git a/arch/e2k/kvm/guest/paravirt.c b/arch/e2k/kvm/guest/paravirt.c new file mode 100644 index 000000000000..d39ba5c9322e --- /dev/null +++ b/arch/e2k/kvm/guest/paravirt.c @@ -0,0 +1,1551 @@ +/* + * Core of KVM guest paravirt_ops implementation. + * + * This file contains the kvm_paravirt_ops structure itself, and the + * implementations for: + * - privileged instructions + * - booting and setup + */ + +#include + +#include +#include +#include +#include +#include +#include /* user_trap_init() */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "paravirt.h" +#include "time.h" +#include "process.h" +#include "pic.h" + +static const pv_info_t kvm_info __initdata = { + .name = "KVM", + .paravirt_enabled = 1, + .page_offset = GUEST_PAGE_OFFSET, + .vmalloc_start = GUEST_VMALLOC_START, + .vmalloc_end = GUEST_VMALLOC_END, + .vmemmap_start = GUEST_VMEMMAP_START, + .vmemmap_end = GUEST_VMEMMAP_END, +}; + +static void * +BOOT_KVM_KERNEL_VA_TO_PA(void *virt_pnt, unsigned long kernel_base) +{ + return boot_kvm_kernel_va_to_pa(virt_pnt, kernel_base); +} + +static void * +BOOT_KVM_FUNC_TO_PA(void *virt_pnt) +{ + return boot_kvm_func_to_pa(virt_pnt); +} + +static e2k_addr_t +BOOT_KVM_VPA_TO_PA(e2k_addr_t vpa) +{ + return boot_kvm_vpa_to_pa(vpa); +} +static e2k_addr_t +BOOT_KVM_PA_TO_VPA(e2k_addr_t pa) +{ + return boot_kvm_pa_to_vpa(pa); +} + +static e2k_addr_t +KVM_VPA_TO_PA(e2k_addr_t vpa) +{ + return kvm_vpa_to_pa(vpa); +} +static e2k_addr_t +KVM_PA_TO_VPA(e2k_addr_t pa) +{ + return kvm_pa_to_vpa(pa); +} + +pv_v2p_ops_t kvm_v2p_ops = { + .boot_kernel_va_to_pa = BOOT_KVM_KERNEL_VA_TO_PA, + .boot_func_to_pa = BOOT_KVM_FUNC_TO_PA, + .boot_vpa_to_pa = BOOT_KVM_VPA_TO_PA, + .boot_pa_to_vpa = BOOT_KVM_PA_TO_VPA, + .vpa_to_pa = KVM_VPA_TO_PA, + .pa_to_vpa = KVM_PA_TO_VPA, +}; + +static void __init kvm_banner(void) +{ + printk(KERN_INFO "Booting paravirtualized guest kernel on %s\n", + pv_info.name); +} + +pv_init_ops_t kvm_init_ops = { + .banner = kvm_banner, + .set_mach_type_id = kvm_set_mach_type_id, + .print_machine_type_info = kvm_print_machine_type_info, +}; + +static void kvm_debug_outb(u8 byte, u16 port) +{ + KVM_DEBUG_OUTB(byte, port); +} +static u8 kvm_debug_inb(u16 port) +{ + return KVM_DEBUG_INB(port); +} +static u32 kvm_debug_inl(u16 port) +{ + return KVM_DEBUG_INL(port); +} + +static const struct pv_boot_ops kvm_boot_ops __initdata = { + .boot_setup_machine_id = boot_kvm_setup_machine_id, + .boot_loader_probe_memory = boot_kvm_probe_memory, + .boot_get_bootblock_size = boot_kvm_get_bootblock_size, + .boot_cpu_relax = boot_kvm_cpu_relax, +#ifdef CONFIG_SMP + .boot_smp_cpu_config = boot_kvm_smp_cpu_config, + .boot_smp_node_config = boot_kvm_smp_node_config, +#endif /* CONFIG_SMP */ + .boot_reserve_all_bootmem = boot_kvm_reserve_all_bootmem, + .boot_map_all_bootmem = boot_kvm_map_all_bootmem, + .boot_map_needful_to_equal_virt_area = + boot_kvm_map_needful_to_equal_virt_area, + .boot_kernel_switch_to_virt = boot_kvm_switch_to_virt, + .boot_clear_bss = boot_kvm_clear_bss, + .boot_check_bootblock = boot_kvm_check_bootblock, + .init_terminate_boot_init = init_kvm_terminate_boot_init, + .boot_parse_param = boot_kvm_parse_param, + .boot_debug_cons_outb = kvm_debug_outb, + .boot_debug_cons_inb = kvm_debug_inb, + .boot_debug_cons_inl = kvm_debug_inl, + .debug_cons_outb = kvm_debug_outb, + .debug_cons_inb = kvm_debug_inb, + .debug_cons_inl = kvm_debug_inl, + .do_boot_panic = boot_kvm_panic, +}; + +static unsigned long kvm_read_OSCUD_lo_reg_value(void) +{ + return KVM_READ_OSCUD_LO_REG_VALUE(); +} + +static unsigned long kvm_read_OSCUD_hi_reg_value(void) +{ + return KVM_READ_OSCUD_HI_REG_VALUE(); +} + +static void kvm_write_OSCUD_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_OSCUD_LO_REG_VALUE(reg_value); +} + +static void kvm_write_OSCUD_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_OSCUD_HI_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_OSGD_lo_reg_value(void) +{ + return KVM_READ_OSGD_LO_REG_VALUE(); +} + +static unsigned long kvm_read_OSGD_hi_reg_value(void) +{ + return KVM_READ_OSGD_HI_REG_VALUE(); +} + +static void kvm_write_OSGD_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_OSGD_LO_REG_VALUE(reg_value); +} + +static void kvm_write_OSGD_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_OSGD_HI_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_CUD_lo_reg_value(void) +{ + return KVM_READ_CUD_LO_REG_VALUE(); +} + +static unsigned long kvm_read_CUD_hi_reg_value(void) +{ + return KVM_READ_CUD_HI_REG_VALUE(); +} + +static void kvm_write_CUD_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CUD_LO_REG_VALUE(reg_value); +} + +static void kvm_write_CUD_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CUD_HI_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_GD_lo_reg_value(void) +{ + return KVM_READ_GD_LO_REG_VALUE(); +} + +static unsigned long kvm_read_GD_hi_reg_value(void) +{ + return KVM_READ_GD_HI_REG_VALUE(); +} + +static void kvm_write_GD_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_GD_LO_REG_VALUE(reg_value); +} + +static void kvm_write_GD_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_GD_HI_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_PSP_lo_reg_value(void) +{ + return KVM_READ_PSP_LO_REG_VALUE(); +} +static unsigned long kvm_read_PSP_hi_reg_value(void) +{ + return KVM_READ_PSP_HI_REG_VALUE(); +} +static void kvm_write_PSP_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_PSP_LO_REG_VALUE(reg_value); +} +static void kvm_write_PSP_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_PSP_HI_REG_VALUE(reg_value); +} +static unsigned long kvm_read_PSHTP_reg_value(void) +{ + return KVM_READ_PSHTP_REG_VALUE(); +} +static void kvm_write_PSHTP_reg_value(unsigned long reg_value) +{ + KVM_WRITE_PSHTP_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_PCSP_lo_reg_value(void) +{ + return KVM_READ_PCSP_LO_REG_VALUE(); +} +static unsigned long kvm_read_PCSP_hi_reg_value(void) +{ + return KVM_READ_PCSP_HI_REG_VALUE(); +} +static void kvm_write_PCSP_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_PCSP_LO_REG_VALUE(reg_value); +} +static void kvm_write_PCSP_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_PCSP_HI_REG_VALUE(reg_value); +} +static int kvm_read_PCSHTP_reg_svalue(void) +{ + return KVM_READ_PCSHTP_REG_SVALUE(); +} +static void kvm_write_PCSHTP_reg_svalue(int reg_value) +{ + KVM_WRITE_PCSHTP_REG_SVALUE(reg_value); +} + +static unsigned long kvm_read_CR0_lo_reg_value(void) +{ + return KVM_READ_CR0_LO_REG_VALUE(); +} +static unsigned long kvm_read_CR0_hi_reg_value(void) +{ + return KVM_READ_CR0_HI_REG_VALUE(); +} +static unsigned long kvm_read_CR1_lo_reg_value(void) +{ + return KVM_READ_CR1_LO_REG_VALUE(); +} +static unsigned long kvm_read_CR1_hi_reg_value(void) +{ + return KVM_READ_CR1_HI_REG_VALUE(); +} +static void kvm_write_CR0_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CR0_LO_REG_VALUE(reg_value); +} +static void kvm_write_CR0_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CR0_HI_REG_VALUE(reg_value); +} +static void kvm_write_CR1_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CR1_LO_REG_VALUE(reg_value); +} +static void kvm_write_CR1_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CR1_HI_REG_VALUE(reg_value); +} +static unsigned long kvm_read_USD_lo_reg_value(void) +{ + return KVM_READ_USD_LO_REG_VALUE(); +} +static unsigned long kvm_read_USD_hi_reg_value(void) +{ + return KVM_READ_USD_HI_REG_VALUE(); +} +static void kvm_write_USD_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_USD_LO_REG_VALUE(reg_value); +} +static void kvm_write_USD_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_USD_HI_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_WD_reg_value(void) +{ + return KVM_READ_WD_REG_VALUE(); +} +static void kvm_write_WD_reg_value(unsigned long reg_value) +{ + KVM_WRITE_WD_REG_VALUE(reg_value); +} + +static unsigned int kvm_read_UPSR_reg_value(void) +{ + return KVM_READ_UPSR_REG_VALUE(); +} +static void kvm_write_UPSR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_UPSR_REG_VALUE(reg_value); +} + +static unsigned int kvm_read_PSR_reg_value(void) +{ + return KVM_READ_PSR_REG_VALUE(); +} +static void kvm_write_PSR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_PSR_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_CTPR_reg_value(int reg_no) +{ + switch (reg_no) { + case 1: return KVM_READ_CTPR_REG_VALUE(1); + case 2: return KVM_READ_CTPR_REG_VALUE(2); + case 3: return KVM_READ_CTPR_REG_VALUE(3); + default: + panic("kvm_read_CTPR_reg_value() invalid CTPR # %d\n", + reg_no); + } + return -1; +} + +static void kvm_write_CTPR_reg_value(int reg_no, unsigned long reg_value) +{ + switch (reg_no) { + case 1: + KVM_WRITE_CTPR_REG_VALUE(1, reg_value); + break; + case 2: + KVM_WRITE_CTPR_REG_VALUE(2, reg_value); + break; + case 3: + KVM_WRITE_CTPR_REG_VALUE(3, reg_value); + break; + default: + panic("kvm_write_CTPR_reg_value() invalid CTPR # %d\n", + reg_no); + } +} + +static unsigned long kvm_read_SBR_reg_value(void) +{ + return KVM_READ_SBR_REG_VALUE(); +} + +static void kvm_write_SBR_reg_value(unsigned long reg_value) +{ + KVM_WRITE_SBR_REG_VALUE(reg_value); +} + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS +static unsigned long kvm_read_LSR_reg_value(void) +{ + return KVM_READ_LSR_REG_VALUE(); +} + +static void kvm_write_LSR_reg_value(unsigned long reg_value) +{ + KVM_WRITE_LSR_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_ILCR_reg_value(void) +{ + return KVM_READ_ILCR_REG_VALUE(); +} + +static void kvm_write_ILCR_reg_value(unsigned long reg_value) +{ + KVM_WRITE_ILCR_REG_VALUE(reg_value); +} +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +static unsigned long kvm_read_OSR0_reg_value(void) +{ + return KVM_READ_OSR0_REG_VALUE(); +} + +static void kvm_write_OSR0_reg_value(unsigned long reg_value) +{ + KVM_WRITE_OSR0_REG_VALUE(reg_value); +} + +static unsigned int kvm_read_OSEM_reg_value(void) +{ + return KVM_READ_OSEM_REG_VALUE(); +} + +static void kvm_write_OSEM_reg_value(unsigned int reg_value) +{ + KVM_WRITE_OSEM_REG_VALUE(reg_value); +} + +static unsigned int kvm_read_BGR_reg_value(void) +{ + return KVM_READ_BGR_REG_VALUE(); +} + +static void kvm_write_BGR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_BGR_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_CLKR_reg_value(void) +{ + return KVM_READ_CLKR_REG_VALUE(); +} +static unsigned long kvm_read_CU_HW0_reg_value(void) +{ + return KVM_READ_CU_HW0_REG_VALUE(); +} +static unsigned long kvm_read_CU_HW1_reg_value(void) +{ + return KVM_READ_CU_HW1_REG_VALUE(); +} +static void kvm_write_CU_HW0_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CU_HW0_REG_VALUE(reg_value); +} +static void kvm_write_CU_HW1_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CU_HW1_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_RPR_lo_reg_value(void) +{ + return KVM_READ_RPR_LO_REG_VALUE(); +} + +static unsigned long kvm_read_RPR_hi_reg_value(void) +{ + return KVM_READ_RPR_HI_REG_VALUE(); +} + +static void kvm_write_RPR_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_RPR_LO_REG_VALUE(reg_value); +} + +static void kvm_write_RPR_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_RPR_HI_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_SBBP_reg_value(void) +{ + return KVM_READ_SBBP_REG_VALUE(); +} + +static unsigned long kvm_read_IP_reg_value(void) +{ + return KVM_READ_IP_REG_VALUE(); +} + +static unsigned int kvm_read_DIBCR_reg_value(void) +{ + return KVM_READ_DIBCR_REG_VALUE(); +} + +static unsigned int kvm_read_DIBSR_reg_value(void) +{ + return KVM_READ_DIBSR_REG_VALUE(); +} + +static unsigned long kvm_read_DIMCR_reg_value(void) +{ + return KVM_READ_DIMCR_REG_VALUE(); +} + +static unsigned long kvm_read_DIBAR0_reg_value(void) +{ + return KVM_READ_DIBAR0_REG_VALUE(); +} + +static unsigned long kvm_read_DIBAR1_reg_value(void) +{ + return KVM_READ_DIBAR1_REG_VALUE(); +} + +static unsigned long kvm_read_DIBAR2_reg_value(void) +{ + return KVM_READ_DIBAR2_REG_VALUE(); +} + +static unsigned long kvm_read_DIBAR3_reg_value(void) +{ + return KVM_READ_DIBAR3_REG_VALUE(); +} + +static unsigned long kvm_read_DIMAR0_reg_value(void) +{ + return KVM_READ_DIMAR0_REG_VALUE(); +} + +static unsigned long kvm_read_DIMAR1_reg_value(void) +{ + return KVM_READ_DIMAR1_REG_VALUE(); +} + +static void kvm_write_DIBCR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_DIBCR_REG_VALUE(reg_value); +} + +static void kvm_write_DIBSR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_DIBSR_REG_VALUE(reg_value); +} + +static void kvm_write_DIMCR_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DIMCR_REG_VALUE(reg_value); +} + +static void kvm_write_DIBAR0_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DIBAR0_REG_VALUE(reg_value); +} + +static void kvm_write_DIBAR1_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DIBAR1_REG_VALUE(reg_value); +} + +static void kvm_write_DIBAR2_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DIBAR2_REG_VALUE(reg_value); +} + +static void kvm_write_DIBAR3_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DIBAR3_REG_VALUE(reg_value); +} + +static void kvm_write_DIMAR0_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DIMAR0_REG_VALUE(reg_value); +} + +static void kvm_write_DIMAR1_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DIMAR1_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_CUTD_reg_value(void) +{ + return KVM_READ_CUTD_REG_VALUE(); +} + +static void kvm_write_CUTD_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CUTD_REG_VALUE(reg_value); +} + +static unsigned int kvm_read_CUIR_reg_value(void) +{ + return KVM_READ_CUIR_REG_VALUE(); +} + +static unsigned int kvm_read_PFPFR_reg_value(void) +{ + return KVM_READ_PFPFR_REG_VALUE(); +} + +static void kvm_write_PFPFR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_PFPFR_REG_VALUE(reg_value); +} + +static unsigned int kvm_read_FPCR_reg_value(void) +{ + return KVM_READ_FPCR_REG_VALUE(); +} + +static void kvm_write_FPCR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_FPCR_REG_VALUE(reg_value); +} + +static unsigned int kvm_read_FPSR_reg_value(void) +{ + return KVM_READ_FPSR_REG_VALUE(); +} + +static void kvm_write_FPSR_reg_value(unsigned int reg_value) +{ + KVM_WRITE_FPSR_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_CS_lo_reg_value(void) +{ + return KVM_READ_CS_LO_REG_VALUE(); +} + +static unsigned long kvm_read_CS_hi_reg_value(void) +{ + return KVM_READ_CS_HI_REG_VALUE(); +} + +static unsigned long kvm_read_DS_lo_reg_value(void) +{ + return KVM_READ_DS_LO_REG_VALUE(); +} + +static unsigned long kvm_read_DS_hi_reg_value(void) +{ + return KVM_READ_DS_HI_REG_VALUE(); +} + +static unsigned long kvm_read_ES_lo_reg_value(void) +{ + return KVM_READ_ES_LO_REG_VALUE(); +} + +static unsigned long kvm_read_ES_hi_reg_value(void) +{ + return KVM_READ_ES_HI_REG_VALUE(); +} + +static unsigned long kvm_read_FS_lo_reg_value(void) +{ + return KVM_READ_FS_LO_REG_VALUE(); +} + +static unsigned long kvm_read_FS_hi_reg_value(void) +{ + return KVM_READ_FS_HI_REG_VALUE(); +} + +static unsigned long kvm_read_GS_lo_reg_value(void) +{ + return KVM_READ_GS_LO_REG_VALUE(); +} + +static unsigned long kvm_read_GS_hi_reg_value(void) +{ + return KVM_READ_GS_HI_REG_VALUE(); +} + +static unsigned long kvm_read_SS_lo_reg_value(void) +{ + return KVM_READ_SS_LO_REG_VALUE(); +} + +static unsigned long kvm_read_SS_hi_reg_value(void) +{ + return KVM_READ_SS_HI_REG_VALUE(); +} + +static void kvm_write_CS_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CS_LO_REG_VALUE(reg_value); +} + +static void kvm_write_CS_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_CS_HI_REG_VALUE(reg_value); +} + +static void kvm_write_DS_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DS_LO_REG_VALUE(reg_value); +} + +static void kvm_write_DS_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_DS_HI_REG_VALUE(reg_value); +} + +static void kvm_write_ES_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_ES_LO_REG_VALUE(reg_value); +} + +static void kvm_write_ES_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_ES_HI_REG_VALUE(reg_value); +} + +static void kvm_write_FS_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_FS_LO_REG_VALUE(reg_value); +} + +static void kvm_write_FS_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_FS_HI_REG_VALUE(reg_value); +} + +static void kvm_write_GS_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_GS_LO_REG_VALUE(reg_value); +} + +static void kvm_write_GS_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_GS_HI_REG_VALUE(reg_value); +} + +static void kvm_write_SS_lo_reg_value(unsigned long reg_value) +{ + KVM_WRITE_SS_LO_REG_VALUE(reg_value); +} + +static void kvm_write_SS_hi_reg_value(unsigned long reg_value) +{ + KVM_WRITE_SS_HI_REG_VALUE(reg_value); +} + +static unsigned long kvm_read_IDR_reg_value(void) +{ + return KVM_READ_IDR_REG_VALUE(); +} + +static unsigned int kvm_read_CORE_MODE_reg_value(void) +{ + return KVM_READ_CORE_MODE_REG_VALUE(); +} +static void kvm_write_CORE_MODE_reg_value(unsigned int modes) +{ + return KVM_WRITE_CORE_MODE_REG_VALUE(modes); +} + +static void kvm_put_updated_cpu_regs_flags(unsigned long flags) +{ + KVM_PUT_UPDATED_CPU_REGS_FLAGS(flags); +} + +static unsigned int pv_kvm_read_aasr_reg_value(void) +{ + return kvm_read_aasr_reg_value(); +} +static void pv_kvm_write_aasr_reg_value(unsigned int reg_value) +{ + kvm_write_aasr_reg_value(reg_value); +} +static unsigned int pv_kvm_read_aafstr_reg_value(void) +{ + return kvm_read_aafstr_reg_value(); +} +static void pv_kvm_write_aafstr_reg_value(unsigned int reg_value) +{ + kvm_write_aafstr_reg_value(reg_value); +} + +static void kvm_flush_stacks(void) +{ + KVM_FLUSHCPU; +} +static void kvm_flush_regs_stack(void) +{ + KVM_FLUSHR; +} +static void kvm_flush_chain_stack(void) +{ + KVM_FLUSHC; +} +static void +do_free_old_kernel_hardware_stacks(void) +{ + kvm_free_old_kernel_hardware_stacks(); +} +static void +kvm_switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + kvm_do_switch_to_expanded_proc_stack(delta_size, delta_offset, + decr_k_ps); +} +static void +kvm_switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + kvm_do_switch_to_expanded_chain_stack(delta_size, delta_offset, + decr_k_pcs); +} +static void +do_stack_bounds_trap_enable(void) +{ + kvm_stack_bounds_trap_enable(); +} +static bool +do_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_is_proc_stack_bounds(ti, regs); +} +static bool +do_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_is_chain_stack_bounds(ti, regs); +} +static void +guest_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + kvm_instr_page_fault(regs, ftype, async_instr); +} +static unsigned long +do_mmio_page_fault(struct pt_regs *regs, struct trap_cellar *tcellar) +{ + return kvm_mmio_page_fault(regs, (trap_cellar_t *)tcellar); +} + +static void kvm_copy_stacks_to_memory(void) +{ + KVM_COPY_STACKS_TO_MEMORY(); +} + +static __interrupt void +kvm_restore_kernel_gregs_in_syscall(struct thread_info *ti) +{ + KVM_RESTORE_KERNEL_GREGS_IN_SYSCALL(ti); +} + +static unsigned long +guest_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static unsigned long +guest_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return kvm_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} + +static unsigned long +guest_extract_tags_32(u16 *dst, const void *src) +{ + return kvm_extract_tags_32(dst, src); +} +#ifdef CONFIG_SMP + +static void +do_smp_flush_tlb_all(void) +{ + kvm_smp_flush_tlb_all(); +} +static void +do_smp_flush_tlb_mm(struct mm_struct *mm) +{ + kvm_smp_flush_tlb_mm(mm); +} +static void +do_smp_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + kvm_smp_flush_tlb_page(vma, addr); +} +static void +do_smp_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + kvm_smp_flush_tlb_range(mm, start, end); +} +static void +do_smp_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + kvm_smp_flush_pmd_tlb_range(mm, start, end); +} +static void +do_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + kvm_smp_flush_tlb_range_and_pgtables(mm, start, end); +} +static void +do_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + kvm_smp_flush_icache_range(start, end); +} +static void +do_smp_flush_icache_range_array(void *icache_range_arr) +{ + kvm_smp_flush_icache_range_array(icache_range_arr); +} +static void +do_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + kvm_smp_flush_icache_page(vma, page); +} +static void +do_smp_flush_icache_all(void) +{ + kvm_smp_flush_icache_all(); +} +static void +do_smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + kvm_smp_flush_icache_kernel_line(addr); +} +#endif /* CONFIG_SMP */ + +static const pv_cpu_ops_t kvm_cpu_ops __initdata = { + .read_OSCUD_lo_reg_value = kvm_read_OSCUD_lo_reg_value, + .read_OSCUD_hi_reg_value = kvm_read_OSCUD_hi_reg_value, + .write_OSCUD_lo_reg_value = kvm_write_OSCUD_lo_reg_value, + .write_OSCUD_hi_reg_value = kvm_write_OSCUD_hi_reg_value, + .read_OSGD_lo_reg_value = kvm_read_OSGD_lo_reg_value, + .read_OSGD_hi_reg_value = kvm_read_OSGD_hi_reg_value, + .write_OSGD_lo_reg_value = kvm_write_OSGD_lo_reg_value, + .write_OSGD_hi_reg_value = kvm_write_OSGD_hi_reg_value, + .read_CUD_lo_reg_value = kvm_read_CUD_lo_reg_value, + .read_CUD_hi_reg_value = kvm_read_CUD_hi_reg_value, + .write_CUD_lo_reg_value = kvm_write_CUD_lo_reg_value, + .write_CUD_hi_reg_value = kvm_write_CUD_hi_reg_value, + .read_GD_lo_reg_value = kvm_read_GD_lo_reg_value, + .read_GD_hi_reg_value = kvm_read_GD_hi_reg_value, + .write_GD_lo_reg_value = kvm_write_GD_lo_reg_value, + .write_GD_hi_reg_value = kvm_write_GD_hi_reg_value, + .read_PSP_lo_reg_value = kvm_read_PSP_lo_reg_value, + .read_PSP_hi_reg_value = kvm_read_PSP_hi_reg_value, + .write_PSP_lo_reg_value = kvm_write_PSP_lo_reg_value, + .write_PSP_hi_reg_value = kvm_write_PSP_hi_reg_value, + .read_PSHTP_reg_value = kvm_read_PSHTP_reg_value, + .write_PSHTP_reg_value = kvm_write_PSHTP_reg_value, + .read_PCSP_lo_reg_value = kvm_read_PCSP_lo_reg_value, + .read_PCSP_hi_reg_value = kvm_read_PCSP_hi_reg_value, + .write_PCSP_lo_reg_value = kvm_write_PCSP_lo_reg_value, + .write_PCSP_hi_reg_value = kvm_write_PCSP_hi_reg_value, + .read_PCSHTP_reg_value = kvm_read_PCSHTP_reg_svalue, + .write_PCSHTP_reg_value = kvm_write_PCSHTP_reg_svalue, + .read_CR0_lo_reg_value = kvm_read_CR0_lo_reg_value, + .read_CR0_hi_reg_value = kvm_read_CR0_hi_reg_value, + .read_CR1_lo_reg_value = kvm_read_CR1_lo_reg_value, + .read_CR1_hi_reg_value = kvm_read_CR1_hi_reg_value, + .write_CR0_lo_reg_value = kvm_write_CR0_lo_reg_value, + .write_CR0_hi_reg_value = kvm_write_CR0_hi_reg_value, + .write_CR1_lo_reg_value = kvm_write_CR1_lo_reg_value, + .write_CR1_hi_reg_value = kvm_write_CR1_hi_reg_value, + .read_CTPR_reg_value = kvm_read_CTPR_reg_value, + .write_CTPR_reg_value = kvm_write_CTPR_reg_value, + .read_USD_lo_reg_value = kvm_read_USD_lo_reg_value, + .read_USD_hi_reg_value = kvm_read_USD_hi_reg_value, + .write_USD_lo_reg_value = kvm_write_USD_lo_reg_value, + .write_USD_hi_reg_value = kvm_write_USD_hi_reg_value, + .read_SBR_reg_value = kvm_read_SBR_reg_value, + .write_SBR_reg_value = kvm_write_SBR_reg_value, + .read_WD_reg_value = kvm_read_WD_reg_value, + .write_WD_reg_value = kvm_write_WD_reg_value, +#ifdef NEED_PARAVIRT_LOOP_REGISTERS + .read_LSR_reg_value = kvm_read_LSR_reg_value, + .write_LSR_reg_value = kvm_write_LSR_reg_value, + .read_ILCR_reg_value = kvm_read_ILCR_reg_value, + .write_ILCR_reg_value = kvm_write_ILCR_reg_value, +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + .read_OSR0_reg_value = kvm_read_OSR0_reg_value, + .write_OSR0_reg_value = kvm_write_OSR0_reg_value, + .read_OSEM_reg_value = kvm_read_OSEM_reg_value, + .write_OSEM_reg_value = kvm_write_OSEM_reg_value, + .read_BGR_reg_value = kvm_read_BGR_reg_value, + .write_BGR_reg_value = kvm_write_BGR_reg_value, + .read_CLKR_reg_value = kvm_read_CLKR_reg_value, + .read_CU_HW0_reg_value = kvm_read_CU_HW0_reg_value, + .read_CU_HW1_reg_value = kvm_read_CU_HW1_reg_value, + .write_CU_HW0_reg_value = kvm_write_CU_HW0_reg_value, + .write_CU_HW1_reg_value = kvm_write_CU_HW1_reg_value, + .read_RPR_lo_reg_value = kvm_read_RPR_lo_reg_value, + .read_RPR_hi_reg_value = kvm_read_RPR_hi_reg_value, + .write_RPR_lo_reg_value = kvm_write_RPR_lo_reg_value, + .write_RPR_hi_reg_value = kvm_write_RPR_hi_reg_value, + .read_SBBP_reg_value = kvm_read_SBBP_reg_value, + .read_IP_reg_value = kvm_read_IP_reg_value, + .read_DIBCR_reg_value = kvm_read_DIBCR_reg_value, + .read_DIBSR_reg_value = kvm_read_DIBSR_reg_value, + .read_DIMCR_reg_value = kvm_read_DIMCR_reg_value, + .read_DIBAR0_reg_value = kvm_read_DIBAR0_reg_value, + .read_DIBAR1_reg_value = kvm_read_DIBAR1_reg_value, + .read_DIBAR2_reg_value = kvm_read_DIBAR2_reg_value, + .read_DIBAR3_reg_value = kvm_read_DIBAR3_reg_value, + .read_DIMAR0_reg_value = kvm_read_DIMAR0_reg_value, + .read_DIMAR1_reg_value = kvm_read_DIMAR1_reg_value, + .write_DIBCR_reg_value = kvm_write_DIBCR_reg_value, + .write_DIBSR_reg_value = kvm_write_DIBSR_reg_value, + .write_DIMCR_reg_value = kvm_write_DIMCR_reg_value, + .write_DIBAR0_reg_value = kvm_write_DIBAR0_reg_value, + .write_DIBAR1_reg_value = kvm_write_DIBAR1_reg_value, + .write_DIBAR2_reg_value = kvm_write_DIBAR2_reg_value, + .write_DIBAR3_reg_value = kvm_write_DIBAR3_reg_value, + .write_DIMAR0_reg_value = kvm_write_DIMAR0_reg_value, + .write_DIMAR1_reg_value = kvm_write_DIMAR1_reg_value, + .read_CUTD_reg_value = kvm_read_CUTD_reg_value, + .read_CUIR_reg_value = kvm_read_CUIR_reg_value, + .write_CUTD_reg_value = kvm_write_CUTD_reg_value, + .read_UPSR_reg_value = kvm_read_UPSR_reg_value, + .write_UPSR_reg_value = kvm_write_UPSR_reg_value, + .write_UPSR_irq_barrier = kvm_write_UPSR_reg_value, + .read_PSR_reg_value = kvm_read_PSR_reg_value, + .write_PSR_reg_value = kvm_write_PSR_reg_value, + .write_PSR_irq_barrier = kvm_write_PSR_reg_value, + .read_PFPFR_reg_value = kvm_read_PFPFR_reg_value, + .read_FPCR_reg_value = kvm_read_FPCR_reg_value, + .read_FPSR_reg_value = kvm_read_FPSR_reg_value, + .write_PFPFR_reg_value = kvm_write_PFPFR_reg_value, + .write_FPCR_reg_value = kvm_write_FPCR_reg_value, + .write_FPSR_reg_value = kvm_write_FPSR_reg_value, + .read_CS_lo_reg_value = kvm_read_CS_lo_reg_value, + .read_CS_hi_reg_value = kvm_read_CS_hi_reg_value, + .read_DS_lo_reg_value = kvm_read_DS_lo_reg_value, + .read_DS_hi_reg_value = kvm_read_DS_hi_reg_value, + .read_ES_lo_reg_value = kvm_read_ES_lo_reg_value, + .read_ES_hi_reg_value = kvm_read_ES_hi_reg_value, + .read_FS_lo_reg_value = kvm_read_FS_lo_reg_value, + .read_FS_hi_reg_value = kvm_read_FS_hi_reg_value, + .read_GS_lo_reg_value = kvm_read_GS_lo_reg_value, + .read_GS_hi_reg_value = kvm_read_GS_hi_reg_value, + .read_SS_lo_reg_value = kvm_read_SS_lo_reg_value, + .read_SS_hi_reg_value = kvm_read_SS_hi_reg_value, + .write_CS_lo_reg_value = kvm_write_CS_lo_reg_value, + .write_CS_hi_reg_value = kvm_write_CS_hi_reg_value, + .write_DS_lo_reg_value = kvm_write_DS_lo_reg_value, + .write_DS_hi_reg_value = kvm_write_DS_hi_reg_value, + .write_ES_lo_reg_value = kvm_write_ES_lo_reg_value, + .write_ES_hi_reg_value = kvm_write_ES_hi_reg_value, + .write_FS_lo_reg_value = kvm_write_FS_lo_reg_value, + .write_FS_hi_reg_value = kvm_write_FS_hi_reg_value, + .write_GS_lo_reg_value = kvm_write_GS_lo_reg_value, + .write_GS_hi_reg_value = kvm_write_GS_hi_reg_value, + .write_SS_lo_reg_value = kvm_write_SS_lo_reg_value, + .write_SS_hi_reg_value = kvm_write_SS_hi_reg_value, + .read_IDR_reg_value = kvm_read_IDR_reg_value, + .boot_read_IDR_reg_value = kvm_read_IDR_reg_value, + .read_CORE_MODE_reg_value = kvm_read_CORE_MODE_reg_value, + .boot_read_CORE_MODE_reg_value = kvm_read_CORE_MODE_reg_value, + .write_CORE_MODE_reg_value = kvm_write_CORE_MODE_reg_value, + .boot_write_CORE_MODE_reg_value = kvm_write_CORE_MODE_reg_value, + .put_updated_cpu_regs_flags = kvm_put_updated_cpu_regs_flags, + .read_aasr_reg_value = pv_kvm_read_aasr_reg_value, + .write_aasr_reg_value = pv_kvm_write_aasr_reg_value, + .read_aafstr_reg_value = pv_kvm_read_aafstr_reg_value, + .write_aafstr_reg_value = pv_kvm_write_aafstr_reg_value, + .flush_stacks = kvm_flush_stacks, + .flush_regs_stack = kvm_flush_regs_stack, + .flush_chain_stack = kvm_flush_chain_stack, + .copy_stacks_to_memory = kvm_copy_stacks_to_memory, + .get_active_cr0_lo_value = kvm_get_active_cr0_lo_value, + .get_active_cr0_hi_value = kvm_get_active_cr0_hi_value, + .get_active_cr1_lo_value = kvm_get_active_cr1_lo_value, + .get_active_cr1_hi_value = kvm_get_active_cr1_hi_value, + .put_active_cr0_lo_value = kvm_put_active_cr0_lo_value, + .put_active_cr0_hi_value = kvm_put_active_cr0_hi_value, + .put_active_cr1_lo_value = kvm_put_active_cr1_lo_value, + .put_active_cr1_hi_value = kvm_put_active_cr1_hi_value, + .correct_trap_psp_pcsp = kvm_correct_trap_psp_pcsp, + .correct_scall_psp_pcsp = kvm_correct_scall_psp_pcsp, + .correct_trap_return_ip = kvm_correct_trap_return_ip, + .nested_kernel_return_address = kvm_nested_kernel_return_address, + .prepare_start_thread_frames = kvm_prepare_start_thread_frames, + .copy_kernel_stacks = kvm_copy_kernel_stacks, + .virt_cpu_thread_init = kvm_vcpu_boot_thread_init, + .copy_user_stacks = kvm_copy_user_stacks, + .define_kernel_hw_stacks_sizes = kvm_define_kernel_hw_stacks_sizes, + .define_user_hw_stacks_sizes = kvm_define_user_hw_stacks_sizes, + .switch_to_expanded_proc_stack = kvm_switch_to_expanded_proc_stack, + .switch_to_expanded_chain_stack = kvm_switch_to_expanded_chain_stack, + .stack_bounds_trap_enable = do_stack_bounds_trap_enable, + .is_proc_stack_bounds = do_is_proc_stack_bounds, + .is_chain_stack_bounds = do_is_chain_stack_bounds, + .release_hw_stacks = kvm_release_hw_stacks, + .release_kernel_stacks = kvm_release_kernel_stacks, + .register_kernel_hw_stack = kvm_register_kernel_hw_stack, + .register_kernel_data_stack = kvm_register_kernel_data_stack, + .unregister_kernel_hw_stack = kvm_unregister_kernel_hw_stack, + .unregister_kernel_data_stack = kvm_unregister_kernel_data_stack, + .kmem_area_host_chunk = kvm_kmem_area_host_chunk, + .kmem_area_unhost_chunk = kvm_kmem_area_unhost_chunk, + .switch_to_new_user = kvm_switch_to_new_user, + .do_map_user_hard_stack_to_kernel = NULL, + .do_switch_to_kernel_hardware_stacks = NULL, + .free_old_kernel_hardware_stacks = do_free_old_kernel_hardware_stacks, + .instr_page_fault = guest_instr_page_fault, + .mmio_page_fault = do_mmio_page_fault, + .do_hw_stack_bounds = kvm_do_hw_stack_bounds, + .handle_interrupt = guest_do_interrupt, + .init_guest_system_handlers_table = kvm_init_system_handlers_table, + .fix_process_pt_regs = kvm_fix_process_pt_regs, + .run_user_handler = kvm_run_user_handler, + .trap_table_entry1 = (long (*)(int, ...))kvm_guest_ttable_entry1, + .trap_table_entry3 = (long (*)(int, ...))kvm_guest_ttable_entry3, + .trap_table_entry4 = (long (*)(int, ...))kvm_guest_ttable_entry4, + .do_fast_clock_gettime = kvm_do_fast_clock_gettime, + .fast_sys_clock_gettime = kvm_fast_sys_clock_gettime, + .do_fast_gettimeofday = kvm_do_fast_gettimeofday, + .fast_sys_siggetmask = kvm_fast_sys_siggetmask, + .fast_tagged_memory_copy = guest_fast_tagged_memory_copy, + .fast_tagged_memory_set = guest_fast_tagged_memory_set, + .extract_tags_32 = guest_extract_tags_32, + .save_local_glob_regs = kvm_save_local_glob_regs, + .restore_local_glob_regs = kvm_restore_local_glob_regs, + .restore_kernel_gregs_in_syscall = kvm_restore_kernel_gregs_in_syscall, + .get_all_user_glob_regs = kvm_get_all_user_glob_regs, + .arch_setup_machine = e2k_virt_setup_machine, + .cpu_default_idle = kvm_default_idle, + .cpu_relax = kvm_cpu_relax, + .cpu_relax_no_resched = kvm_cpu_relax_no_resched, +#ifdef CONFIG_SMP + .wait_for_cpu_booting = kvm_wait_for_cpu_booting, + .wait_for_cpu_wake_up = kvm_wait_for_cpu_wake_up, + .activate_cpu = kvm_activate_cpu, + .activate_all_cpus = kvm_activate_all_cpus, + .csd_lock_wait = kvm_csd_lock_wait, + .csd_lock = kvm_csd_lock, + .arch_csd_lock_async = kvm_arch_csd_lock_async, + .csd_unlock = kvm_csd_unlock, + .setup_local_pic_virq = kvm_setup_pic_virq, + .startup_local_pic_virq = kvm_startup_pic_virq, + .smp_flush_tlb_all = do_smp_flush_tlb_all, + .smp_flush_tlb_mm = do_smp_flush_tlb_mm, + .smp_flush_tlb_page = do_smp_flush_tlb_page, + .smp_flush_tlb_range = do_smp_flush_tlb_range, + .smp_flush_pmd_tlb_range = do_smp_flush_pmd_tlb_range, + .smp_flush_tlb_range_and_pgtables = + do_smp_flush_tlb_range_and_pgtables, + .smp_flush_icache_range = do_smp_flush_icache_range, + .smp_flush_icache_range_array = + do_smp_flush_icache_range_array, + .smp_flush_icache_page = do_smp_flush_icache_page, + .smp_flush_icache_all = do_smp_flush_icache_all, + .smp_flush_icache_kernel_line = + do_smp_flush_icache_kernel_line, +#endif /* CONFIG_SMP */ + .host_printk = kvm_host_printk, + .arch_spin_lock_slow = kvm_arch_spin_lock_slow, + .arch_spin_locked_slow = kvm_arch_spin_locked_slow, + .arch_spin_unlock_slow = kvm_arch_spin_unlock_slow, + .ord_wait_read_lock_slow = kvm_wait_read_lock_slow, + .ord_wait_write_lock_slow = kvm_wait_write_lock_slow, + .ord_arch_read_locked_slow = kvm_arch_read_locked_slow, + .ord_arch_write_locked_slow = kvm_arch_write_locked_slow, + .ord_arch_read_unlock_slow = kvm_arch_read_unlock_slow, + .ord_arch_write_unlock_slow = kvm_arch_write_unlock_slow, +}; + +static void kvm_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + KVM_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static mmu_reg_t kvm_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return (mmu_reg_t)KVM_READ_MMU_REG(mmu_addr); +} + +/* + * Write/read Data TLB register + */ + +static void kvm_WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + KVM_WRITE_DTLB_REG(tlb_addr, mmu_reg); +} + +static mmu_reg_t kvm_READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return KVM_READ_DTLB_REG(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static void +kvm_FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + KVM_FLUSH_TLB_ENTRY(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static void +kvm_FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + kvm_flush_dcache_line(virt_addr); +} + +/* + * Clear DCACHE L1 set + */ +static void +kvm_CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + kvm_clear_dcache_l1_set(virt_addr, set); +} +static void +kvm_flush_DCACHE_range(void *addr, size_t len) +{ + kvm_flush_dcache_range(addr, len); +} +static void +kvm_clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + kvm_clear_dcache_l1_range(virt_addr, len); +} + +/* + * Flush ICACHE line + */ + +static void +kvm_FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + KVM_FLUSH_ICACHE_LINE(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static void +kvm_FLUSH_CACHE_L12(flush_op_t flush_op) +{ + KVM_FLUSH_CACHE_L12(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static void +kvm_FLUSH_TLB_ALL(flush_op_t flush_op) +{ + KVM_FLUSH_TLB_ALL(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static void +kvm_FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + KVM_FLUSH_ICACHE_ALL(flush_op); +} + +/* + * Get Entry probe for virtual address + */ + +static probe_entry_t +kvm_ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return KVM_ENTRY_PROBE_MMU_OP(virt_addr); +} + +/* + * Get physical address for virtual address + */ + +static probe_entry_t +kvm_ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return KVM_ADDRESS_PROBE_MMU_OP(virt_addr); +} + +/* + * Read CLW register + */ + +static clw_reg_t +kvm_READ_CLW_REG(clw_addr_t clw_addr) +{ + return KVM_READ_CLW_REG(clw_addr); +} + +/* + * Write CLW register + */ + +static void +kvm_WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val) +{ + KVM_WRITE_CLW_REG(clw_addr, val); +} + +/* save DAM state */ +static void +do_save_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + kvm_save_DAM(dam); +} + +/* + * KVM MMU DEBUG registers access + */ +static inline mmu_reg_t +PV_DO_READ_MMU_DEBUG_REG_VALUE(int reg_no) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(reg_no); +} +static inline void +PV_DO_WRITE_MMU_DEBUG_REG_VALUE(int reg_no, mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(reg_no, value); +} + +static void +do_write_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval, + bool only_validate, bool to_move) +{ + kvm_write_pte_at(mm, addr, ptep, pteval, only_validate, to_move); +} + +static void kvm_raw_set_pte(pte_t *ptep, pte_t pteval) +{ + kvm_set_pte_kernel(ptep, pteval); +} + +static pte_t do_pv_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, bool to_move) +{ + return kvm_do_ptep_get_and_clear(mm, addr, ptep, false, to_move); +} + +static void +do_write_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval, + bool only_validate) +{ + kvm_write_pmd_at(mm, addr, pmdp, pmdval, only_validate); +} + +static void +do_write_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval, + bool only_validate) +{ + kvm_write_pud_at(mm, addr, pudp, pudval, only_validate); +} + +static void +do_write_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval, + bool only_validate) +{ + kvm_write_pgd_at(mm, addr, pgdp, pgdval, only_validate); +} + +pv_mmu_ops_t kvm_mmu_ops = { + .recovery_faulted_tagged_store = kvm_recovery_faulted_tagged_store, + .recovery_faulted_load = kvm_recovery_faulted_load, + .recovery_faulted_move = kvm_recovery_faulted_move, + .recovery_faulted_load_to_greg = kvm_recovery_faulted_load_to_greg, + .move_tagged_word = kvm_move_tagged_word, + .move_tagged_dword = kvm_move_tagged_dword, + .move_tagged_qword = kvm_move_tagged_qword, + .write_mmu_reg = kvm_WRITE_MMU_REG, + .read_mmu_reg = kvm_READ_MMU_REG, + .write_dtlb_reg = kvm_WRITE_DTLB_REG, + .read_dtlb_reg = kvm_READ_DTLB_REG, + .flush_tlb_entry = kvm_FLUSH_TLB_ENTRY, + .flush_dcache_line = kvm_FLUSH_DCACHE_LINE, + .clear_dcache_l1_set = kvm_CLEAR_DCACHE_L1_SET, + .flush_dcache_range = kvm_flush_DCACHE_range, + .clear_dcache_l1_range = kvm_clear_DCACHE_L1_range, + .write_dcache_l2_reg = kvm_write_dcache_l2_reg, + .read_dcache_l2_reg = kvm_read_dcache_l2_reg, + .flush_icache_line = kvm_FLUSH_ICACHE_LINE, + .flush_cache_all = kvm_FLUSH_CACHE_L12, + .do_flush_tlb_all = kvm_FLUSH_TLB_ALL, + .flush_icache_all = kvm_FLUSH_ICACHE_ALL, + .entry_probe_mmu_op = kvm_ENTRY_PROBE_MMU_OP, + .address_probe_mmu_op = kvm_ADDRESS_PROBE_MMU_OP, + .read_clw_reg = kvm_READ_CLW_REG, + .write_clw_reg = kvm_WRITE_CLW_REG, + .save_DAM = do_save_DAM, + .write_mmu_debug_reg = PV_DO_WRITE_MMU_DEBUG_REG_VALUE, + .read_mmu_debug_reg = PV_DO_READ_MMU_DEBUG_REG_VALUE, + .boot_set_pte_at = boot_kvm_set_pte, + .write_pte_at = do_write_pte_at, + .set_pte = kvm_raw_set_pte, + .write_pmd_at = do_write_pmd_at, + .write_pud_at = do_write_pud_at, + .write_pgd_at = do_write_pgd_at, + .ptep_get_and_clear = do_pv_ptep_get_and_clear, + .ptep_wrprotect_atomic = kvm_ptep_wrprotect_atomic, + .get_pte_for_address = kvm_get_pte_for_address, + .remap_area_pages = kvm_remap_area_pages, + .host_guest_vmap_area = kvm_host_guest_vmap_area, + .unhost_guest_vmap_area = kvm_unhost_guest_vmap_area, + + /* memory management - mman.h */ + .free_mm = kvm_free_mm, + .mm_init = kvm_mm_init, + .activate_mm = kvm_activate_mm, + .make_host_pages_valid = kvm_make_host_pages_valid, + .set_memory_attr_on_host = + (int (*)(e2k_addr_t, e2k_addr_t, int)) + kvm_set_memory_attr_on_host, + .access_process_vm = native_access_process_vm, + + /* memory management - mm.h */ + .free_pgd_range = kvm_free_pgd_range, + + /* kernel virtual memory allocation */ + .alloc_vmap_area = kvm_alloc_vmap_area, + .__free_vmap_area = kvm__free_vmap_area, + .free_unmap_vmap_area = kvm_free_unmap_vmap_area, +#ifdef CONFIG_SMP + .pcpu_get_vm_areas = kvm_pcpu_get_vm_areas, +#endif /* CONFIG_SMP */ + + /* unmap __init areas */ + .unmap_initmem = kvm_unmap_initmem, +}; + +pv_time_ops_t kvm_time_ops = { + .time_init = kvm_time_init, + .clock_init = kvm_clock_init, + .read_current_timer = kvm_read_current_timer, + .get_cpu_running_cycles = kvm_get_cpu_running_cycles, + .do_sched_clock = kvm_sched_clock, + .steal_clock = kvm_steal_clock, +}; + +pv_io_ops_t kvm_io_ops = { + .boot_writeb = kvm_writeb, + .boot_writew = kvm_writew, + .boot_writel = kvm_writel, + .boot_writell = kvm_writell, + .boot_readb = kvm_readb, + .boot_readw = kvm_readw, + .boot_readl = kvm_readl, + .boot_readll = kvm_readll, + + .writeb = kvm_writeb, + .writew = kvm_writew, + .writel = kvm_writel, + .writell = kvm_writell, + .readb = kvm_readb, + .readw = kvm_readw, + .readl = kvm_readl, + .readll = kvm_readll, + + .inb = kvm_inb, + .outb = kvm_outb, + .outw = kvm_outw, + .inw = kvm_inw, + .outl = kvm_outl, + .inl = kvm_inl, + + .outsb = kvm_outsb, + .outsw = kvm_outsw, + .outsl = kvm_outsl, + .insb = kvm_insb, + .insw = kvm_insw, + .insl = kvm_insl, + + .conf_inb = kvm_conf_inb, + .conf_inw = kvm_conf_inw, + .conf_inl = kvm_conf_inl, + .conf_outb = kvm_conf_outb, + .conf_outw = kvm_conf_outw, + .conf_outl = kvm_conf_outl, + + .scr_writew = kvm_scr_writew, + .scr_readw = kvm_scr_readw, + .vga_writeb = kvm_vga_writeb, + .vga_readb = kvm_vga_readb, + + .pci_init = kvm_arch_pci_init, +}; +static void kvm_set_pv_ops(void) +{ + /* set PV_OPS pointers to virtual functions entries */ + cur_pv_v2p_ops = &pv_v2p_ops; + cur_pv_boot_ops = &pv_boot_ops; + cur_pv_cpu_ops = &pv_cpu_ops; + cur_pv_mmu_ops = &pv_mmu_ops; + cur_pv_io_ops = &pv_io_ops; +} + +/* First C function to be called on KVM guest boot */ +asmlinkage void __init kvm_init_paravirt_guest(void) +{ + /* Install kvm guest paravirt ops */ + pv_info = kvm_info; + pv_v2p_ops = kvm_v2p_ops; + pv_boot_ops = kvm_boot_ops; + pv_init_ops = kvm_init_ops; + pv_time_ops = kvm_time_ops; + pv_cpu_ops = kvm_cpu_ops; + pv_mmu_ops = kvm_mmu_ops; + pv_io_ops = kvm_io_ops; + kvm_set_pv_ops(); +} diff --git a/arch/e2k/kvm/guest/paravirt.h b/arch/e2k/kvm/guest/paravirt.h new file mode 100644 index 000000000000..6acc0764ee73 --- /dev/null +++ b/arch/e2k/kvm/guest/paravirt.h @@ -0,0 +1,84 @@ +/****************************************************************************** + * Copyright (c) 2012 Salavat Gilyazov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_KVM_PARAVIRT_H +#define __ASM_E2K_KVM_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT + +#ifndef __ASSEMBLY__ + +#ifdef PATCHED_REG_ACCESS +extern asmlinkage unsigned int kvm_patched_read_PSR_reg_value(void); +extern asmlinkage void kvm_patched_write_PSR_reg_value(unsigned int reg_value); +extern asmlinkage unsigned int kvm_patched_read_UPSR_reg_value(void); +extern asmlinkage void kvm_patched_write_UPSR_reg_value(unsigned int reg_value); +extern asmlinkage unsigned long kvm_patched_read_PSP_lo_reg_value(void); +extern asmlinkage unsigned long kvm_patched_read_PSP_hi_reg_value(void); +extern asmlinkage void kvm_patched_write_PSP_lo_reg_value( + unsigned long reg_value); +extern asmlinkage void kvm_patched_write_PSP_hi_reg_value( + unsigned long reg_value); +extern asmlinkage unsigned long kvm_patched_read_PSHTP_reg_value(void); +extern asmlinkage void kvm_patched_write_PSHTP_reg_value( + unsigned long reg_value); +extern asmlinkage unsigned long kvm_patched_read_PCSP_lo_reg_value(void); +extern asmlinkage unsigned long kvm_patched_read_PCSP_hi_reg_value(void); +extern asmlinkage void kvm_patched_write_PCSP_lo_reg_value( + unsigned long reg_value); +extern asmlinkage void kvm_patched_write_PCSP_hi_reg_value( + unsigned long reg_value); +extern asmlinkage int kvm_patched_read_PCSHTP_reg_svalue(void); +extern asmlinkage void kvm_patched_write_PCSHTP_reg_svalue(int reg_value); +extern asmlinkage unsigned long kvm_patched_read_CR0_lo_reg_value(void); +extern asmlinkage unsigned long kvm_patched_read_CR0_hi_reg_value(void); +extern asmlinkage unsigned long kvm_patched_read_CR1_lo_reg_value(void); +extern asmlinkage unsigned long kvm_patched_read_CR1_hi_reg_value(void); +extern asmlinkage void kvm_patched_write_CR0_lo_reg_value( + unsigned long reg_value); +extern asmlinkage void kvm_patched_write_CR0_hi_reg_value( + unsigned long reg_value); +extern asmlinkage void kvm_patched_write_CR1_lo_reg_value( + unsigned long reg_value); +extern asmlinkage void kvm_patched_write_CR1_hi_reg_value( + unsigned long reg_value); +extern asmlinkage unsigned long kvm_patched_read_USD_lo_reg_value(void); +extern asmlinkage unsigned long kvm_patched_read_USD_hi_reg_value(void); +extern asmlinkage void kvm_patched_write_USD_lo_reg_value( + unsigned long reg_value); +extern asmlinkage void kvm_patched_write_USD_hi_reg_value( + unsigned long reg_value); +extern asmlinkage unsigned long kvm_patched_read_WD_reg_value(void); +extern asmlinkage void kvm_patched_write_WD_reg_value( + unsigned long reg_value); +extern asmlinkage unsigned int kvm_patched_read_aasr_reg_value(void); +extern asmlinkage void kvm_patched_write_aasr_reg_value(unsigned int reg_value); +extern asmlinkage void kvm_patched_flush_stacks(void); +extern asmlinkage void kvm_patched_flush_regs_stack(void); +extern asmlinkage void kvm_patched_flush_chain_stack(void); +extern asmlinkage void kvm_patched_put_updated_cpu_regs_flags( + unsigned long flags); +#endif /* PATCHED_REG_ACCESS */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* CONFIG_PARAVIRT */ + +#endif /* __ASM_E2K_KVM_PARAVIRT_H */ diff --git a/arch/e2k/kvm/guest/pic.h b/arch/e2k/kvm/guest/pic.h new file mode 100644 index 000000000000..b17ddfab463f --- /dev/null +++ b/arch/e2k/kvm/guest/pic.h @@ -0,0 +1,154 @@ +#ifndef __KVM_GUEST_PIC_H +#define __KVM_GUEST_PIC_H + +#include +#include +#include +#include + +/* Choosing between software LAPIC/CEPIC models and execution with hardware support */ + +extern irqreturn_t kvm_do_interrupt(struct pt_regs *regs); +extern __init void kvm_time_init_clockevents(void); +extern __init void kvm_time_init_clocksource(void); +extern void kvm_timer_resume(void); +extern void kvm_time_shutdown(void); +extern __init int kvm_setup_sw_timer(void); + +static inline irqreturn_t guest_do_interrupt_pic(struct pt_regs *regs) +{ + irqreturn_t ret; + + ret = native_do_interrupt(regs); + + if (regs->interrupt_vector == KVM_NMI_APIC_VECTOR) { + /* NMI IPI on guest implemented as general inteerupt */ + /* with vector KVM_NMI_APIC_VECTOR */ + /* but nmi_call_function_interrupt() has been called */ + /* under NMI disabled, so now enable NMIs */ + exiting_irq(); + KVM_INIT_KERNEL_UPSR_REG(false, /* enable IRQs */ + false /* disable NMIs */); + } + return ret; +} + +#ifdef CONFIG_EPIC +extern int pic_get_vector(void); +extern int e2k_virt_get_vector_apic(void); +extern int e2k_virt_get_vector_epic(void); +static inline int e2k_virt_get_vector(void) +{ + if (IS_HV_GM()) + return pic_get_vector(); + + if (cpu_has(CPU_FEAT_EPIC)) { + return e2k_virt_get_vector_epic(); + } else { + return e2k_virt_get_vector_apic(); + } +} + +extern void __init_recv kvm_init_system_handlers_table_apic(void); +extern void __init_recv kvm_init_system_handlers_table_epic(void); +static inline void __init_recv kvm_init_system_handlers_table_pic(void) +{ + if (IS_HV_GM()) + return; + + if (cpu_has(CPU_FEAT_EPIC)) { + kvm_init_system_handlers_table_epic(); + } else { + kvm_init_system_handlers_table_apic(); + } +} + +extern __init int kvm_setup_boot_lapic_virq(void); +extern __init int kvm_setup_boot_cepic_virq(void); +static inline int __init kvm_setup_boot_local_pic_virq(void) +{ + if (IS_HV_GM()) + return 0; + + if (cpu_has(CPU_FEAT_EPIC)) { + return kvm_setup_boot_cepic_virq(); + } else { + return kvm_setup_boot_lapic_virq(); + } +} + +extern void kvm_setup_local_apic_virq(unsigned int cpuid); +extern void kvm_setup_epic_virq(unsigned int cpuid); +static inline void kvm_setup_local_pic_virq(unsigned int cpuid) +{ + if (IS_HV_GM()) + return; + + if (cpu_has(CPU_FEAT_EPIC)) { + kvm_setup_epic_virq(cpuid); + } else { + kvm_setup_local_apic_virq(cpuid); + } +} + +extern __init void kvm_startup_local_apic_virq(unsigned int cpuid); +extern __init void kvm_startup_epic_virq(unsigned int cpuid); +static inline void kvm_startup_local_pic_virq(unsigned int cpuid) +{ + if (IS_HV_GM()) + return; + + if (cpu_has(CPU_FEAT_EPIC)) { + kvm_startup_epic_virq(cpuid); + } else { + kvm_startup_local_apic_virq(cpuid); + } +} +#else /* !(CONFIG_EPIC) */ +extern int pic_get_vector(void); +extern int e2k_virt_get_vector_apic(void); +static inline int e2k_virt_get_vector(void) +{ + if (IS_HV_GM()) + return pic_get_vector(); + + return e2k_virt_get_vector_apic(); +} + +extern void __init_recv kvm_init_system_handlers_table_apic(void); +static inline void __init_recv kvm_init_system_handlers_table_pic(void) +{ + if (IS_HV_GM()) + return; + + kvm_init_system_handlers_table_apic(); +} + +extern __init int kvm_setup_boot_lapic_virq(void); +static inline int __init kvm_setup_boot_local_pic_virq(void) +{ + if (IS_HV_GM()) + return 0; + + return kvm_setup_boot_lapic_virq(); +} + +extern void kvm_setup_local_apic_virq(unsigned int cpuid); +static inline void kvm_setup_local_pic_virq(unsigned int cpuid) +{ + if (IS_HV_GM()) + return; + + kvm_setup_local_apic_virq(cpuid); +} + +extern __init void kvm_startup_local_apic_virq(unsigned int cpuid); +static inline void kvm_startup_local_pic_virq(unsigned int cpuid) +{ + if (IS_HV_GM()) + return; + + kvm_startup_local_apic_virq(cpuid); +} +#endif +#endif /* __KVM_GUEST_PIC_H */ diff --git a/arch/e2k/kvm/guest/process.c b/arch/e2k/kvm/guest/process.c new file mode 100644 index 000000000000..986fb4be8427 --- /dev/null +++ b/arch/e2k/kvm/guest/process.c @@ -0,0 +1,1292 @@ +/* + * Guest processes management + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "process.h" +#include "traps.h" +#include "time.h" + +#undef DEBUG_PROCESS_MODE +#undef DebugKVM +#define DEBUG_PROCESS_MODE 0 +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_PROCESS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_MODE +#undef DebugKVMSW +#define DEBUG_KVM_SWITCH_MODE 0 /* KVM switching debugging */ +#define DebugKVMSW(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KERNEL_STACKS_MODE +#undef DebugKVMKS +#define DEBUG_KERNEL_STACKS_MODE 0 +#define DebugKVMKS(fmt, args...) \ +({ \ + if (DEBUG_KERNEL_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SWITCH_KERNEL_STACKS_MODE +#undef DebugSWSTK +#define DEBUG_SWITCH_KERNEL_STACKS_MODE 0 /* switch to new kernel stacks */ +#define DebugSWSTK(fmt, args...) \ +({ \ + if (DEBUG_SWITCH_KERNEL_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_EXEC_MODE +#undef DebugKVMEX +#define DEBUG_KVM_EXEC_MODE 0 +#define DebugKVMEX(fmt, args...) \ +({ \ + if (DEBUG_KVM_EXEC_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_OLD_MODE +#undef DebugOLD +#define DEBUG_KVM_OLD_MODE 0 +#define DebugOLD(fmt, args...) \ +({ \ + if (DEBUG_KVM_OLD_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_USER_STACKS_MODE +#undef DebugKVMUS +#define DEBUG_USER_STACKS_MODE 0 +#define DebugKVMUS(fmt, args...) \ +({ \ + if (DEBUG_USER_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +bool debug_clone_guest = false; +#undef DEBUG_KVM_CLONE_USER_MODE /* sys_clone() */ +#undef DebugKVMCLN +#define DEBUG_KVM_CLONE_USER_MODE 0 /* KVM thread clone debug */ +#define DebugKVMCLN(fmt, args...) \ +({ \ + if (DEBUG_KVM_CLONE_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_COPY_USER_MODE +#undef DebugKVMCPY +#define DEBUG_KVM_COPY_USER_MODE 0 /* KVM process copy debugging */ +#define DebugKVMCPY(fmt, args...) \ +({ \ + if (DEBUG_KVM_COPY_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GMM_MODE +#undef DebugGMM +#define DEBUG_KVM_GMM_MODE 0 /* GMM creation debug */ +#define DebugGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_GMM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_UNHOST_STACKS_MODE +#undef DebugKVMUH +#define DEBUG_UNHOST_STACKS_MODE 0 +#define DebugKVMUH(fmt, args...) \ +({ \ + if (DEBUG_UNHOST_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SIGNAL_MODE +#undef DebugSIG +#define DEBUG_SIGNAL_MODE 0 +#define DebugSIG(fmt, args...) \ +({ \ + if (DEBUG_SIGNAL_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_HS_MODE +#undef DebugGHS +#define DEBUG_GUEST_HS_MODE 0 /* Hard Stack expantions */ +#define DebugGHS(fmt, args...) \ +({ \ + if (DEBUG_GUEST_HS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 1 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IDLE_MODE +#undef DebugKVMIDLE +#define DEBUG_KVM_IDLE_MODE 0 /* KVM idle debugging */ +#define DebugKVMIDLE(fmt, args...) \ +({ \ + if (DEBUG_KVM_IDLE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static long kvm_finish_switch_to_new_process(void); + +/* + * in Makefile -D__builtin_return_address=__e2k_kernel_return_address + */ +void *kvm_nested_kernel_return_address(int n) +{ + e2k_addr_t ret = 0UL; + e2k_cr0_hi_t cr0_hi; + u64 base; + u64 size; + s64 cr_ind; + + NATIVE_FLUSHC; + NATIVE_FLUSHC; + E2K_WAIT_ALL; + ATOMIC_GET_HW_PCS_SIZES_AND_BASE(cr_ind, size, base); + cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + ret = AS_STRUCT(cr0_hi).ip << 3; + DebugKVM("base 0x%llx ind 0x%llx\n", base, cr_ind); + n++; + while (n--) { + cr_ind = cr_ind - SZ_OF_CR; + if (cr_ind < 0) { + dump_stack(); + return NULL; + } + AS_WORD(cr0_hi) = *((u64 *)(base + cr_ind + CR0_HI_I)); + ret = AS_STRUCT(cr0_hi).ip << 3; + DebugKVM("IP 0x%lx\n", ret); + } + + return (void *)ret; +} + +/* + * Procedure chain stacks can be mapped to user (user processes) + * or kernel space (kernel threads). But mapping is always to privileged area + * and directly can be accessed only by host kernel. + * SPECIAL CASE: access to current procedure chain stack: + * 1. Current stack frame must be locked (resident), so access is + * safety and can use common load/store operations + * 2. Top of stack can be loaded to the special hardware register file and + * must be spilled to memory before any access. + * 3. If items of chain stack are not updated, then spilling is enough to + * their access + * 4. If items of chain stack are updated, then interrupts and + * any calling of function should be disabled in addition to spilling, + * because of return (done) will fill some part of stack from memory and can be + * two copy of chain stack items: in memory and in registers file. + * We can update only in memory and following spill recover not updated + * value from registers file. + * So guest kernel can access to items of procedure chain stacks only through + * host kernel hypercall + */ +static inline unsigned long +kvm_get_active_cr_mem_value(e2k_addr_t base, e2k_addr_t cr_ind, + e2k_addr_t cr_item) +{ + unsigned long cr_value; + int error; + + error = HYPERVISOR_get_active_cr_mem_item(&cr_value, + base, cr_ind, cr_item); + if (error) { + panic("could not get active procedure chain stack item: " + "base 0x%lx index 0x%lx item offset 0x%lx, error %d\n", + base, cr_ind, cr_item, error); + } + return cr_value; +} +static inline void +kvm_put_active_cr_mem_value(unsigned long cr_value, e2k_addr_t base, + e2k_addr_t cr_ind, e2k_addr_t cr_item) +{ + int error; + + error = HYPERVISOR_put_active_cr_mem_item(cr_value, + base, cr_ind, cr_item); + if (error) { + panic("could not put active procedure chain stack item: " + "base 0x%lx index 0x%lx item offset 0x%lx, error %d\n", + base, cr_ind, cr_item, error); + } +} +unsigned long +kvm_get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr_mem_value(base, cr_ind, CR0_LO_I); +} +unsigned long +kvm_get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr_mem_value(base, cr_ind, CR0_HI_I); +} +unsigned long +kvm_get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr_mem_value(base, cr_ind, CR1_LO_I); +} +unsigned long +kvm_get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr_mem_value(base, cr_ind, CR1_HI_I); +} +void kvm_put_active_cr0_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr_mem_value(cr_value, base, cr_ind, CR0_LO_I); +} +void kvm_put_active_cr0_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr_mem_value(cr_value, base, cr_ind, CR0_HI_I); +} +void kvm_put_active_cr1_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr_mem_value(cr_value, base, cr_ind, CR1_LO_I); +} +void kvm_put_active_cr1_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr_mem_value(cr_value, base, cr_ind, CR1_HI_I); +} + +/* + * The function defines sizes of all guest kernel hardware stacks(PS & PCS) + * including host kernel part of the hardware stacks + * + * FIXME: host kernel stacks size additions should be determined + * by host (hypercall or some shared common interface structure) + */ +void kvm_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + kvm_set_hw_ps_user_size(hw_stacks, KVM_GUEST_KERNEL_PS_SIZE); + kvm_set_hw_pcs_user_size(hw_stacks, KVM_GUEST_KERNEL_PCS_SIZE); +} + +int kvm_prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + e2k_pcsp_lo_t pcsp_lo; + e2k_psp_lo_t psp_lo; + e2k_mem_crs_t *pcs; + e2k_mem_ps_t *ps; + e2k_mem_crs_t pcs_frames[1]; /* 1 frames */ + kernel_mem_ps_t ps_frames[2]; /* Assume a maximum of 4 */ + /* do_sys_execve()'s parameters */ + int pcs_frame_ind; + int ps_frame_ind; + int ps_frame_size; + int ret; + + DebugKVMEX("entry 0x%lx sp 0x%lx\n", entry, sp); + + KVM_COPY_STACKS_TO_MEMORY(); + + psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + ps = (e2k_mem_ps_t *)psp_lo.PSP_lo_base; + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + pcs = (e2k_mem_crs_t *)pcsp_lo.PCSP_lo_base; + DebugKVMEX("PS base %px ind 0x%x, PCS base %px ind 0x%x\n", + ps, NATIVE_NV_READ_PSP_HI_REG().PSP_hi_ind, + pcs, NATIVE_NV_READ_PCSP_HI_REG().PSP_hi_ind); + + /* PS & PCS stack frames should be updated, but guest cannot */ + /* preserves current updated frames from fill/spill and some */ + /* updates can be lost. So prepare frames in temporary storage */ + + /* pcs[0] frame can be empty, because of it should not be returns */ + /* here and it is used only to fill into current CR registers */ + /* while run function on the next frame pcs[1] */ + DebugKVMEX("PCS[0]: IP %pF wbs 0x%x\n", + (void *)(pcs[0].cr0_hi.CR0_hi_ip << 3), + pcs[0].cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ); + + /* Prepare pcs[1] frame, it is frame of do_sys_execve() */ + /* Update only IP (as start of function) */ + DebugKVMEX("PCS[1]: IP %pF wbs 0x%x\n", + (void *)(pcs[1].cr0_hi.CR0_hi_ip << 3), + pcs[1].cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ); + *pcs_frames = pcs[1]; + pcs_frame_ind = (1 * SZ_OF_CR); /* 1-st frame index */ + pcs_frames[0].cr0_hi.CR0_hi_ip = (unsigned long) &do_sys_execve >> 3; + DebugKVMEX("updated PCS[1]: IP %pF wbs 0x%x\n", + (void *)(pcs_frames[0].cr0_hi.CR0_hi_ip << 3), + pcs_frames[0].cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ); + + /* prepare procedure stack frame ps[0] for pcs[1] should contain */ + /* do_sys_execve()'s function arguments */ + ps_frame_ind = 0; /* update 0 frame from base */ + ps_frames[0].word_lo = entry; /* %dr0 */ + ps_frames[0].word_hi = sp; /* %dr1 */ + ps_frames[1].word_lo = true; /* %dr2 */ + ps_frame_size = (2 * EXT_4_NR_SZ); /* 4 double-word registers */ + +retry: + ret = HYPERVISOR_update_hw_stacks_frames(pcs_frames, pcs_frame_ind, + ps_frames, ps_frame_ind, ps_frame_size); + if (unlikely(ret == -EAGAIN)) { + DebugKVMEX("could not update hardware stacks, error %d " + "retry\n", ret); + goto retry; + } else if (unlikely(ret < 0)) { + DebugKVMEX("could not update hardware stacks, error %d\n", + ret); + } + + return ret; +} + +/** + * prepare_kernel_frame - prepare for return to kernel function + * @stacks - allocated stacks' parameters (will be corrected) + * @crs - chain stack frame will be returned here + * @fn - function to return to + * @arg - function's argument + * + * Note that cr1_lo.psr value is taken from PSR register. This means + * that interrupts and sge are expected to be enabled by caller. + * It is paravirtualized KVM version of function. + * In this case real switch is done by host (hypercall), so + * 1) some more frames it need to prepare and + * 2) chain frames should be into memory (new chain stack) + */ +static void kvm_prepare_kernel_frame(struct sw_regs *new_sw_regs, + e2k_mem_crs_t *crs, unsigned long fn, unsigned long arg) +{ + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psr_t psr; + e2k_mem_crs_t *pcs; + unsigned long *ps; + int ps_size; + int pcs_size; + + psr.PSR_reg = NATIVE_NV_READ_PSR_REG_VALUE(); + BUG_ON(!psr.PSR_sge && !IS_HV_GM()); + + pcs = (e2k_mem_crs_t *)new_sw_regs->pcsp_lo.PCSP_lo_base; + ps = (unsigned long *)new_sw_regs->psp_lo.PSP_lo_base; + + /* Prepare pcs[0] frame, it can be empty, because of should */ + /* not be returns here and it is used only to fill into current */ + /* CR registers while run function @fn on the next frame pcs[1] */ + pcs_size = SZ_OF_CR; + + /* procedure stack frame ps[-1] for pcs[0] not exists */ + ps_size = 0; + + + /* + * Prepare crs[1] frame in chain stack. It should also be reserved + * because it is used by kvm_prepare_start_thread_frames for + * creation of new kernel thread via do_execve. + * kvm_prepare_start_thread_frames will rewrite this frame + * by do_sys_execve function. + */ + cr0_lo.CR0_lo_half = 0; + cr0_lo.CR0_lo_pf = -1ULL; + + cr0_hi.CR0_hi_ip = 0; /* TODO: need handler of return here */ + + cr1_lo.CR1_lo_half = 0; + cr1_lo.CR1_lo_psr = psr.PSR_reg; + cr1_lo.CR1_lo_cui = KERNEL_CODES_INDEX; + if (machine.native_iset_ver < E2K_ISET_V6) + AS(cr1_lo).ic = 0; + cr1_lo.CR1_lo_wbs = 4; /* 2 quad regs (4 doubles) */ + + cr1_hi.CR1_hi_half = 0; + cr1_hi.CR1_hi_ussz = new_sw_regs->usd_hi.USD_hi_size / 16; + + pcs[1].cr0_lo = cr0_lo; + pcs[1].cr0_hi = cr0_hi; + pcs[1].cr1_lo = cr1_lo; + pcs[1].cr1_hi = cr1_hi; + pcs_size += SZ_OF_CR; /* 1 frame size */ + ps_size += (4 * EXT_4_NR_SZ); /* 4 double args */ + + + /* Prepare crs[2] frame: @fn's frame in chain stack */ + /* The frame should be into memory and will fill after return to @fn */ + cr0_hi.CR0_hi_ip = fn >> 3; + cr1_lo.CR1_lo_wbs = 1; /* extended quad register (2 double) */ + + pcs[2].cr0_lo = cr0_lo; + pcs[2].cr0_hi = cr0_hi; + pcs[2].cr1_lo = cr1_lo; + pcs[2].cr1_hi = cr1_hi; + pcs_size += SZ_OF_CR; /* 1 frame size */ + + /* + * Prepare procedure stack frame ps[EXT_4_NR_SZ] for pcs[2] + * should contain @fn's function argument @arg + */ + ps[4*EXT_4_NR_SZ/sizeof(unsigned long)] = arg; + /* ps[2] not used, only reserved for aligement */ + /* @fn'S function procedure stack frame size is 1 quad register */ + /* (at memory it is 2 double + 2 double extentions */ + ps_size += (1 * EXT_4_NR_SZ); + + /* Prepare crs[3] frame: kvm_finish_switch_to_new_process()'s frame */ + /* The frame is used as return function from hypercall after real */ + /* switch of context to new process */ + /* The registers of the frame should be into sw_regs structure */ + /* and can be not at stack memory */ + cr0_hi.CR0_hi_IP = (u64)&kvm_finish_switch_to_new_process; + /* other CRs can be same as at previous @fn's frame? including */ + /* wbs (1 quad register) and ussz (data stack frame not used) */ + + crs->cr0_lo = cr0_lo; + crs->cr0_hi = cr0_hi; + crs->cr1_lo = cr1_lo; + crs->cr1_hi = cr1_hi; + + /* prepare procedure stack frame ps[2] for pcs[3] */ + /* it will be filled from stack at memory, so should be reserved */ + /* kvm_finish_switch_to_new_process() set real frame sizes */ + /* Here reserved procedure stack frame for 1 quad register */ + /* (at memory it is 2 double + 2 double extentions */ + ps_size += (1 * EXT_4_NR_SZ); + + /* Update hardware stacks registers to point to prepared frames */ + new_sw_regs->pcsp_hi.PCSP_hi_ind = pcs_size; + new_sw_regs->psp_hi.PSP_hi_ind = ps_size; +} + +int kvm_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + thread_info_t *new_ti = task_thread_info(new_task); + struct sw_regs *new_sw_regs = &new_task->thread.sw_regs; + kvm_task_info_t task_info; + e2k_size_t ps_size; + e2k_size_t pcs_size; + int ret; + + DebugKVMKS("started to create new kernel thread %s (%d)\n", + new_task->comm, new_task->pid); + + /* + * Put function IP and argument to chain and procedure stacks. + */ + kvm_prepare_kernel_frame(new_sw_regs, &new_sw_regs->crs, fn, arg); + DebugKVMKS("new kernel data stack: top 0x%lx, base 0x%llx, size 0x%x\n", + new_sw_regs->top, + new_ti->k_usd_lo.USD_lo_base, + new_ti->k_usd_hi.USD_hi_size); + DebugKVMKS("procedure stack: base 0x%llx size 0x%x index 0x%x\n", + new_ti->k_psp_lo.PSP_lo_base, + new_ti->k_psp_hi.PSP_hi_size, + new_ti->k_psp_hi.PSP_hi_ind); + DebugKVMKS("chain stack: base 0x%llx size 0x%x index 0x%x\n", + new_ti->k_pcsp_lo.PCSP_lo_base, + new_ti->k_pcsp_hi.PCSP_hi_size, + new_ti->k_pcsp_hi.PCSP_hi_ind); + + task_info.sp_offset = new_sw_regs->usd_hi.USD_hi_size; + task_info.us_base = (u64)new_task->stack + KERNEL_C_STACK_OFFSET; + task_info.us_size = KERNEL_C_STACK_SIZE; + task_info.flags = 0; + DebugKVMKS("local data stack from 0x%lx size 0x%lx SP offset 0x%lx\n", + task_info.us_base, task_info.us_size, task_info.sp_offset); + + BUG_ON(task_info.sp_offset > task_info.us_size); + + ps_size = new_sw_regs->psp_hi.PSP_hi_size; + task_info.ps_base = new_sw_regs->psp_lo.PSP_lo_base; + task_info.ps_ind = new_sw_regs->psp_hi.PSP_hi_ind; + task_info.ps_size = ps_size; + DebugKVMKS("procedure stack from 0x%lx size 0x%lx, index 0x%lx\n", + task_info.ps_base, task_info.ps_size, task_info.ps_ind); + + pcs_size = new_sw_regs->pcsp_hi.PCSP_hi_size; + task_info.pcs_base = new_sw_regs->pcsp_lo.PCSP_lo_base; + task_info.pcs_ind = new_sw_regs->pcsp_hi.PCSP_hi_ind; + task_info.pcs_size = pcs_size; + task_info.flags |= (PS_HAS_NOT_GUARD_PAGE_TASK_FLAG | + PCS_HAS_NOT_GUARD_PAGE_TASK_FLAG); + DebugKVMKS("procedure chain stack from 0x%lx size 0x%lx, index 0x%lx\n", + task_info.pcs_base, task_info.pcs_size, task_info.pcs_ind); + + task_info.cr0_lo = new_sw_regs->crs.cr0_lo.CR0_lo_half; + task_info.cr0_hi = new_sw_regs->crs.cr0_hi.CR0_hi_half; + task_info.cr1_wd = new_sw_regs->crs.cr1_lo.CR1_lo_half; + task_info.cr1_ussz = new_sw_regs->crs.cr1_hi.CR1_hi_half; + DebugKVMKS("chain registers: IP %pF, wbs 0x%lx, ussz 0x%lx\n", + (void *)(task_info.cr0_hi), task_info.cr1_wd, + task_info.cr1_ussz); + + ret = HYPERVISOR_copy_guest_kernel_stacks(&task_info); + if (ret < 0) { + pr_err("%s(): could not create new kernel thread, error %d\n", + __func__, ret); + goto out_k_stacks; + } + DebugKVMKS("created new kernel thread, GPID #%d\n", ret); + new_ti->gpid_nr = ret; + new_ti->gmmid_nr = current_thread_info()->gmmid_nr; + + new_sw_regs->crs.cr0_lo.CR0_lo_half = task_info.cr0_lo; + new_sw_regs->crs.cr0_hi.CR0_hi_half = task_info.cr0_hi; + new_sw_regs->crs.cr1_lo.CR1_lo_half = task_info.cr1_wd; + new_sw_regs->crs.cr1_hi.CR1_hi_half = task_info.cr1_ussz; + + return 0; + +out_k_stacks: + return ret; +} + +void __init kvm_bsp_switch_to_init_stack(void) +{ + kvm_task_info_t task_info; + e2k_addr_t stack_base = (unsigned long) &init_stack; + e2k_addr_t us_base; + e2k_addr_t ps_base; + e2k_addr_t pcs_base; + int ret; + + us_base = stack_base + KERNEL_C_STACK_OFFSET; + ps_base = stack_base + KERNEL_P_STACK_OFFSET; + pcs_base = stack_base + KERNEL_PC_STACK_OFFSET; + + task_info.sp_offset = KERNEL_C_STACK_SIZE; + task_info.us_base = us_base; + task_info.us_size = KERNEL_C_STACK_SIZE; + task_info.flags = 0; + DebugSWSTK("local data stack from 0x%lx size 0x%lx SP offset 0x%lx\n", + task_info.us_base, task_info.us_size, task_info.sp_offset); + + BUG_ON(task_info.sp_offset > task_info.us_size); + + task_info.ps_base = ps_base; + task_info.ps_ind = 0; + task_info.ps_size = KERNEL_P_STACK_SIZE; + DebugSWSTK("procedure stack from 0x%lx size 0x%lx, index 0x%lx\n", + task_info.ps_base, task_info.ps_size, task_info.ps_ind); + + task_info.pcs_base = pcs_base; + task_info.pcs_ind = 0; + task_info.pcs_size = KERNEL_PC_STACK_SIZE; + DebugSWSTK("procedure chain stack from 0x%lx size 0x%lx, index 0x%lx\n", + task_info.pcs_base, task_info.pcs_size, task_info.pcs_ind); + + ret = HYPERVISOR_switch_guest_kernel_stacks(&task_info, + (char *) &e2k_start_kernel_switched_stacks, NULL, 0); + if (ret < 0) { + panic("%s(): could not switch to init kernel stacks, " + "error %d\n", + __func__, ret); + } +} + +void kvm_setup_bsp_idle_task(int cpu) +{ + struct task_struct *idle = &init_task; + struct thread_info *ti_idle; + int ret; + + native_setup_bsp_idle_task(cpu); + + /* setup the idle task on host (get GPID_ID #) */ + ret = HYPERVISOR_setup_idle_task(cpu); + if (ret < 0) { + panic("%s(): could not setup CPU #%d idle task on host, " + "error %d\n", + __func__, cpu, ret); + } + ti_idle = task_thread_info(idle); + BUG_ON(ti_idle != &init_task.thread_info); + + ti_idle->gpid_nr = ret; + ti_idle->gmmid_nr = 0; /* init mm should have GMMID == 0 */ +} + +/* + * The function defines sizes of all guest user hardware stacks(PS & PCS) + * including host and guest kernel part of the hardware stacks + * + * FIXME: host kernel stacks size additions should be determined + * by host (hypercall or some shared common interface structure) + */ +void kvm_define_user_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + kvm_set_hw_ps_user_size(hw_stacks, KVM_GUEST_USER_PS_INIT_SIZE); + kvm_set_hw_pcs_user_size(hw_stacks, KVM_GUEST_USER_PCS_INIT_SIZE); +} + +static long kvm_finish_switch_to_new_process(void) +{ + /* Restore interrupt mask and enable NMIs */ + UPSR_RESTORE(AW(current->thread.sw_regs.upsr)); + + E2K_JUMP_WITH_ARGUMENTS(__ret_from_fork, 1, + current->thread.sw_regs.prev_task); + + return (long)current->thread.sw_regs.prev_task; +} + +int kvm_switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + thread_info_t *thread_info = current_thread_info(); + kvm_task_info_t task_info; + int ret; + + DebugKVMEX("started\n"); + task_info.flags = flags; + task_info.u_us_base = stacks->usd_lo.USD_lo_base - + stacks->usd_hi.USD_hi_size; + task_info.u_us_size = stacks->top - task_info.u_us_base; + task_info.u_sp_offset = stacks->usd_hi.USD_hi_size; + DebugKVMEX("local data stack from 0x%lx size 0x%lx SP " + "offset 0x%lx %s\n", + task_info.u_us_base, task_info.u_us_size, task_info.u_sp_offset, + (task_info.flags & PROTECTED_CODE_TASK_FLAG) ? + "protected" : "not protected"); + BUG_ON(task_info.u_sp_offset > task_info.u_us_size); + + task_info.u_ps_base = stacks->psp_lo.PSP_lo_base; + task_info.u_ps_size = stacks->psp_hi.PSP_hi_size; + task_info.u_ps_ind = stacks->psp_hi.PSP_hi_ind; + DebugKVMEX("procedure stack from 0x%lx size 0x%lx\n", + task_info.u_ps_base, task_info.u_ps_size); + task_info.u_pcs_base = stacks->pcsp_lo.PCSP_lo_base; + task_info.u_pcs_size = stacks->pcsp_hi.PCSP_hi_size; + task_info.u_pcs_ind = stacks->pcsp_hi.PCSP_hi_ind; + DebugKVMEX("procedure chain stack from 0x%lx size 0x%lx\n", + task_info.u_pcs_base, task_info.u_pcs_size); + + task_info.flags |= (PS_HAS_NOT_GUARD_PAGE_TASK_FLAG | + PCS_HAS_NOT_GUARD_PAGE_TASK_FLAG); + + BUG_ON(thread_info->u_cutd.CUTD_base != cut_base); + task_info.cut_base = cut_base; + task_info.cut_size = cut_size; + task_info.cui = cui; + task_info.kernel = kernel; + + DebugKVMEX("compilation unit table CUT from 0x%lx size 0x%lx CUI %d\n", + task_info.cut_base, task_info.cut_size, task_info.cui); + task_info.entry_point = entry_point; + DebugKVMEX("entry point to user 0x%lx\n", task_info.entry_point); + + thread_info->u_hw_stack = *hw_stacks; + + /* + * Set kernel local stack to empty state and forget old history + * of the process and start new life on new process + */ + thread_info->k_usd_lo.USD_lo_base = + (u64)current->stack + KVM_GUEST_KERNEL_C_STACK_SIZE; + thread_info->k_usd_hi.USD_hi_size = KVM_GUEST_KERNEL_C_STACK_SIZE; + DebugKVMEX("set kernel local data stack to empty state: base 0x%llx " + "size 0x%x\n", + thread_info->k_usd_lo.USD_lo_base, + thread_info->k_usd_hi.USD_hi_size); + + task_info.us_base = (u64)current->stack; + task_info.us_size = KVM_GUEST_KERNEL_C_STACK_SIZE; + DebugKVMEX("kernel local data stack from 0x%lx size 0x%lx\n", + task_info.us_base, task_info.us_size); + + task_info.ps_base = thread_info->k_psp_lo.PSP_lo_base; + task_info.ps_size = thread_info->k_psp_hi.PSP_hi_size; + DebugKVMEX("kernel procedure stack from 0x%lx size 0x%lx\n", + task_info.ps_base, task_info.ps_size); + + task_info.pcs_base = thread_info->k_pcsp_lo.PCSP_lo_base; + task_info.pcs_size = thread_info->k_pcsp_hi.PCSP_hi_size; + DebugKVMEX("kernel procedure chain stack from 0x%lx size 0x%lx\n", + task_info.pcs_base, task_info.pcs_size); + + /* Set flag to free the old hardware stacks after */ + /* real switch to the new ones and new user process */ + if (kernel) { + DebugOLD("thread info %px old: ps %px pcs %px\n", + thread_info, + thread_info->old_ps_base, thread_info->old_pcs_base); + BUG_ON(thread_info->old_ps_base != NULL || + thread_info->old_pcs_base != NULL); + } else { + } + + /* switch to IRQs control under PSR and init user UPSR */ + KVM_RETURN_TO_INIT_USER_UPSR(); + +retry: + ret = HYPERVISOR_switch_to_guest_new_user(&task_info); + if (unlikely(ret == -EAGAIN)) { + DebugKVM("could not switch to new user process, error %d, " + "retry\n", ret); + goto retry; + } else if (unlikely(ret < 0)) { + DebugKVM("could not switch to new user process, error %d\n", + ret); + goto out; + } + + /* successful switch to new user should not return here */ + panic("%s(): return from user execve()\n", __func__); + ret = 1; /* return from guest user process */ + +out: + return ret; +} + +int kvm_clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags) +{ + struct task_struct *new_task = thread_info_task(new_ti); + kvm_task_info_t task_info; + e2k_addr_t sbr; + int ret, gpid_nr; + + if (DEBUG_KVM_CLONE_USER_MODE) + debug_clone_guest = true; + + /* copy user's part of kernel hardware stacks */ + ret = native_clone_prepare_spilled_user_stacks(child_stacks, child_crs, + regs, new_sw_regs, new_ti, clone_flags); + if (ret != 0) { + pr_err("%s(): native clone/prepare user stacks failed, " + "error %d\n", + __func__, ret); + return ret; + } + + /* + * Register new thread on host and complete new guest user thread + * creation + */ + /* guest kernel local data stack */ + task_info.sp_offset = new_sw_regs->usd_hi.USD_hi_size; + task_info.us_base = new_sw_regs->usd_lo.USD_lo_base - + new_sw_regs->usd_hi.USD_hi_size; + task_info.us_size = new_sw_regs->top - task_info.us_base; + /* guest user local data stack */ + sbr = round_up(child_stacks->top, E2K_ALIGN_STACK_BASE_REG); + child_stacks->top = sbr; + task_info.u_sp_offset = child_stacks->usd_hi.USD_hi_size; + task_info.u_us_base = child_stacks->usd_lo.USD_lo_base - + child_stacks->usd_hi.USD_hi_size; + task_info.u_us_size = child_stacks->top - task_info.u_us_base; + + task_info.flags = 0; + if (new_task->thread.flags & E2K_FLAG_PROTECTED_MODE) + task_info.flags |= PROTECTED_CODE_TASK_FLAG; + if (TASK_IS_BINCO(new_task)) + task_info.flags |= BIN_COMP_CODE_TASK_FLAG; + DebugKVMCLN("kernel data stack from 0x%lx size 0x%lx SP offset 0x%lx\n", + task_info.us_base, task_info.us_size, task_info.sp_offset); + DebugKVMCLN("user data stack from 0x%lx size 0x%lx SP offset 0x%lx " + "%s\n", + task_info.u_us_base, task_info.u_us_size, + task_info.u_sp_offset, + (task_info.flags & PROTECTED_CODE_TASK_FLAG) ? + "protected" : "not protected"); + BUG_ON(task_info.sp_offset > task_info.us_size); + BUG_ON(task_info.u_sp_offset > task_info.u_us_size); + + /* guest kernel procedure stack */ + task_info.ps_base = new_sw_regs->psp_lo.PSP_lo_base; + task_info.ps_size = new_sw_regs->psp_hi.PSP_hi_size; + task_info.ps_ind = new_sw_regs->psp_hi.PSP_hi_ind; + DebugKVMCLN("kernel procedure stack from 0x%lx size 0x%lx ind 0x%lx\n", + task_info.ps_base, task_info.ps_size, task_info.ps_ind); + /* guest user procedure stack */ + task_info.u_ps_base = child_stacks->psp_lo.PSP_lo_base; + task_info.u_ps_size = child_stacks->psp_hi.PSP_hi_size; + task_info.u_ps_ind = child_stacks->psp_hi.PSP_hi_ind; + DebugKVMCLN("user procedure stack from 0x%lx size 0x%lx ind 0x%lx\n", + task_info.u_ps_base, task_info.u_ps_size, task_info.u_ps_ind); + + /* guest kernel procedure chain stack */ + task_info.pcs_base = new_sw_regs->pcsp_lo.PCSP_lo_base; + task_info.pcs_size = new_sw_regs->pcsp_hi.PCSP_hi_size; + task_info.pcs_ind = new_sw_regs->pcsp_hi.PCSP_hi_ind; + DebugKVMCLN("kernel procedure chain stack from 0x%lx size 0x%lx " + "ind 0x%lx\n", + task_info.pcs_base, task_info.pcs_size, task_info.pcs_ind); + /* guest user procedure chain stack */ + task_info.u_pcs_base = child_stacks->pcsp_lo.PCSP_lo_base; + task_info.u_pcs_size = child_stacks->pcsp_hi.PCSP_hi_size; + task_info.u_pcs_ind = child_stacks->pcsp_hi.PCSP_hi_ind; + DebugKVMCLN("user procedure chain stack from 0x%lx size 0x%lx " + "ind 0x%lx\n", + task_info.u_pcs_base, task_info.u_pcs_size, + task_info.u_pcs_ind); + + task_info.flags |= (PS_HAS_NOT_GUARD_PAGE_TASK_FLAG | + PCS_HAS_NOT_GUARD_PAGE_TASK_FLAG); + + task_info.cr0_lo = child_crs->cr0_lo.CR0_lo_half; + task_info.cr0_hi = child_crs->cr0_hi.CR0_hi_half; + task_info.cr1_wd = child_crs->cr1_lo.CR1_lo_half; + task_info.cr1_ussz = child_crs->cr1_hi.CR1_hi_half; + DebugKVMCLN("chain registers: IP %pF, wbs 0x%lx, ussz 0x%lx\n", + (void *)(task_info.cr0_hi), task_info.cr1_wd, + task_info.cr1_ussz); + + new_sw_regs->cutd = new_ti->u_cutd; + task_info.cut_base = new_sw_regs->cutd.CUTD_base; + + if (clone_flags & CLONE_SETTLS) { + task_info.flags |= CLONE_SETTLS_TASK_FLAG; + } + task_info.gregs = (e2k_addr_t)new_sw_regs->gregs.g; + + task_info.entry_point = (u64)&__ret_from_fork; + DebugKVMCLN("handler of return from fork() is %pfx, gregs at 0x%lx\n", + (void *)task_info.entry_point, task_info.gregs); + + /* + * Set pointers of kernel local & hardware stacks to empty state + */ + + BUG_ON(task_info.us_base != new_ti->k_usd_lo.USD_lo_base - + new_ti->k_usd_hi.USD_hi_size); + BUG_ON(task_info.us_base != (u64)new_task->stack + + KERNEL_C_STACK_OFFSET); + + BUG_ON(new_task->mm == NULL || new_task->mm->pgd == NULL); + BUG_ON(new_task->mm != current->mm); + + down_write(&new_task->mm->mmap_sem); + kvm_get_mm_notifier_locked(new_task->mm); + up_write(&new_task->mm->mmap_sem); + +retry: + gpid_nr = HYPERVISOR_clone_guest_user_stacks(&task_info); + if (unlikely(gpid_nr == -EAGAIN)) { + pr_err("host could not clone stacks of new user thread, " + "error %d, retry\n", gpid_nr); + goto retry; + } else if (unlikely(gpid_nr < 0)) { + pr_err("host could not clone stacks of new user thread, " + "error %d\n", gpid_nr); + ret = gpid_nr; + return ret; + } + new_ti->gpid_nr = gpid_nr; + new_ti->gmmid_nr = current_thread_info()->gmmid_nr; + + /* FIXME: it need delete this field from arch-independent struct */ + new_task->mm->gmmid_nr = new_ti->gmmid_nr; + + DebugKVMCLN("new thread created on %s (%d) GPID %d GMMID %d\n", + current->comm, current->pid, gpid_nr, new_ti->gmmid_nr); + + if (DEBUG_KVM_CLONE_USER_MODE) + debug_clone_guest = false; + + return 0; +} + +int kvm_copy_spilled_user_stacks(e2k_stacks_t *child_stacks, + e2k_mem_crs_t *child_crs, sw_regs_t *new_sw_regs, + thread_info_t *new_ti) +{ + struct task_struct *new_task = thread_info_task(new_ti); + kvm_task_info_t task_info; + vcpu_gmmu_info_t gmmu_info; + int ret, gpid_nr; + + /* copy user's part of kernel hardware stacks */ + native_copy_spilled_user_stacks(child_stacks, child_crs, + new_sw_regs, new_ti); + + /* + * Register new thread on host and complete new guest user thread + * creation + */ + /* guest kernel local data stack */ + task_info.sp_offset = new_sw_regs->usd_hi.USD_hi_size; + task_info.us_base = new_sw_regs->usd_lo.USD_lo_base - + new_sw_regs->usd_hi.USD_hi_size; + task_info.us_size = new_sw_regs->top - task_info.us_base; + /* guest user local data stack */ + task_info.u_sp_offset = child_stacks->usd_hi.USD_hi_size; + task_info.u_us_base = child_stacks->usd_lo.USD_lo_base - + child_stacks->usd_hi.USD_hi_size; + task_info.u_us_size = child_stacks->top - task_info.u_us_base; + + task_info.flags = 0; + if (new_task->thread.flags & E2K_FLAG_PROTECTED_MODE) + task_info.flags |= PROTECTED_CODE_TASK_FLAG; + if (TASK_IS_BINCO(new_task)) + task_info.flags |= BIN_COMP_CODE_TASK_FLAG; + DebugKVMCPY("kernel data stack from 0x%lx size 0x%lx SP offset 0x%lx\n", + task_info.us_base, task_info.us_size, task_info.sp_offset); + DebugKVMCPY("user data stack from 0x%lx size 0x%lx SP offset 0x%lx " + "%s\n", + task_info.u_us_base, task_info.u_us_size, + task_info.u_sp_offset, + (task_info.flags & PROTECTED_CODE_TASK_FLAG) ? + "protected" : "not protected"); + BUG_ON(task_info.sp_offset > task_info.us_size); + + task_info.ps_base = new_sw_regs->psp_lo.PSP_lo_base; + task_info.ps_size = new_sw_regs->psp_hi.PSP_hi_size; + task_info.ps_ind = new_sw_regs->psp_hi.PSP_hi_ind; + DebugKVMCPY("kernel procedure stack from 0x%lx size 0x%lx ind 0x%lx\n", + task_info.ps_base, task_info.ps_size, task_info.ps_ind); + task_info.pcs_base = new_sw_regs->pcsp_lo.PCSP_lo_base; + task_info.pcs_size = new_sw_regs->pcsp_hi.PCSP_hi_size; + task_info.pcs_ind = new_sw_regs->pcsp_hi.PCSP_hi_ind; + DebugKVMCPY("kernel procedure chain stack from 0x%lx size 0x%lx " + "ind 0x%lx\n", + task_info.pcs_base, task_info.pcs_size, task_info.pcs_ind); + + task_info.flags |= (PS_HAS_NOT_GUARD_PAGE_TASK_FLAG | + PCS_HAS_NOT_GUARD_PAGE_TASK_FLAG); + + task_info.cr0_lo = child_crs->cr0_lo.CR0_lo_half; + task_info.cr0_hi = child_crs->cr0_hi.CR0_hi_half; + task_info.cr1_wd = child_crs->cr1_lo.CR1_lo_half; + task_info.cr1_ussz = child_crs->cr1_hi.CR1_hi_half; + DebugKVMCPY("chain registers: IP %pF, wbs 0x%lx, ussz 0x%lx\n", + (void *)(task_info.cr0_hi), task_info.cr1_wd, + task_info.cr1_ussz); + + new_sw_regs->cutd = new_ti->u_cutd; + task_info.cut_base = new_sw_regs->cutd.CUTD_base; + + task_info.gregs = (e2k_addr_t)new_sw_regs->gregs.g; + + task_info.entry_point = (u64)&__ret_from_fork; + DebugKVMCLN("handler of return from fork() is %pfx, gregs at 0x%lx\n", + (void *)task_info.entry_point, task_info.gregs); + + /* + * Set pointers of kernel local & hardware stacks to empty state + */ + + BUG_ON(task_info.us_base != new_ti->k_usd_lo.USD_lo_base - + new_ti->k_usd_hi.USD_hi_size); + BUG_ON(task_info.us_base != (u64)new_task->stack + + KERNEL_C_STACK_OFFSET); + + BUG_ON(new_task->mm == NULL || new_task->mm->pgd == NULL); + + down_write(&new_task->mm->mmap_sem); + kvm_get_mm_notifier_locked(new_task->mm); + up_write(&new_task->mm->mmap_sem); + + gmmu_info.opcode = CREATE_NEW_GMM_GMMU_OPC; + gmmu_info.u_pptb = __pa(new_task->mm->pgd); + +retry: + gpid_nr = HYPERVISOR_copy_guest_user_stacks(&task_info, &gmmu_info); + if (unlikely(gpid_nr == -EAGAIN)) { + DebugKVM("could not copy stacks of new user thread, " + "error %d, retry\n", gpid_nr); + goto retry; + } else if (unlikely(gpid_nr < 0)) { + DebugKVM("could not copy stacks of new user thread, " + "error %d\n", gpid_nr); + ret = gpid_nr; + goto out_error; + } + new_ti->gpid_nr = gpid_nr; + new_ti->gmmid_nr = gmmu_info.gmmid_nr; + + /* FIXME: it need delete this field from arch-independent struct */ + new_task->mm->gmmid_nr = gmmu_info.gmmid_nr; + + DebugGMM("created on %s (%d) GPID %d GMMID %d\n", + current->comm, current->pid, gpid_nr, new_ti->gmmid_nr); + DebugKVMCPY("succeeded, new thread GPID #%d GMMID #%d\n", + gpid_nr, new_ti->gmmid_nr); + + return 0; + +out_error: + pr_err("%s(): failed, error %d\n", __func__, ret); + return ret; +} + +void kvm_save_kernel_glob_regs(kernel_gregs_t *k_gregs) +{ + panic("%s(): is not yetmplemented\n", __func__); +} +void kvm_save_glob_regs(global_regs_t *gregs) +{ + unsigned long **g_regs = (unsigned long **)&gregs->g[0].xreg; + int ret; + +retry: + ret = HYPERVISOR_get_guest_glob_regs(g_regs, GUEST_GREGS_MASK, + true, /*dirty BGR */ + NULL); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not get global registers state, " + "error %d, retry\n", __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not get global registers state, " + "error %d\n", __func__, ret); + } +} +void kvm_restore_glob_regs(const global_regs_t *gregs) +{ + unsigned long **g_regs = (unsigned long **)&gregs->g[0].xreg; + int ret; + +retry: + ret = HYPERVISOR_set_guest_glob_regs(g_regs, GUEST_GREGS_MASK, + true, /*dirty BGR */ + NULL); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not set global registers state, " + "error %d, retry\n", __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not set global registers state, " + "error %d\n", __func__, ret); + } +} +void kvm_save_glob_regs_dirty_bgr(global_regs_t *gregs) +{ + unsigned long **g_regs = (unsigned long **)&gregs->g[0].xreg; + int ret; + +retry: + ret = HYPERVISOR_set_guest_glob_regs_dirty_bgr(g_regs, + GUEST_GREGS_MASK); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not get global registers state, " + "error %d, retry\n", __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not get global registers state, " + "error %d\n", __func__, ret); + } +} +void kvm_save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + unsigned long **gregs = (unsigned long **)&l_gregs->g[0].xreg; + int ret; + +retry: + ret = HYPERVISOR_get_guest_local_glob_regs(gregs, is_signal); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not get local global registers state, " + "error %d, retry\n", __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not get local global registers state, " + "error %d\n", __func__, ret); + } +} +void kvm_restore_local_glob_regs(const local_gregs_t *l_gregs, bool is_signal) +{ + unsigned long **gregs = (unsigned long **)&l_gregs->g[0].xreg; + int ret; + +retry: + ret = HYPERVISOR_set_guest_local_glob_regs(gregs, is_signal); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not get local global registers state, " + "error %d, retry\n", __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not get local global registers state, " + "error %d\n", __func__, ret); + } +} + +void kvm_get_all_user_glob_regs(global_regs_t *gregs) +{ + unsigned long **g_regs = (unsigned long **)&gregs->g[0].xreg; + int ret; + +retry: + ret = HYPERVISOR_get_all_guest_glob_regs(g_regs); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not get all global registers state, " + "error %d, retry\n", __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not get all global registers state, " + "error %d\n", __func__, ret); + } +} + +/* + * We use this on KVM guest if we don't have any better idle routine. + */ +void kvm_default_idle(void) +{ + if (psr_and_upsr_irqs_disabled()) + local_irq_enable(); + + /* clear POLLING flag because of VCPU go to sleeping, */ + /* so cannot polling flag NEED_RESCHED and should be waked up */ + /* to reschedule if it need */ + clear_thread_flag(TIF_POLLING_NRFLAG); + + /* + * goto host to wait for some event will be injected into guest + * to wake up it + * Waiting is timed out and can be iterrupted on any event for + * this VCPU or guest kernel to exit from idle state + */ + HYPERVISOR_kvm_guest_vcpu_common_idle(GUEST_CPU_IDLE_TIMEOUT, + true); /* can interrupt waiting on any event */ + + /* restore POLLING flag because of VCPU completed sleeping */ + /* and can polling flag NEED_RESCHED to reschedule if it need */ + set_thread_flag(TIF_POLLING_NRFLAG); + + DebugKVMIDLE("current guest jiffies 0x%lx\n", jiffies); +} +EXPORT_SYMBOL(kvm_default_idle); + +static inline void kvm_do_cpu_relax(void) +{ + cpumask_var_t cpus_allowed; + int cpu = smp_processor_id(); + + /* scheduler cannot be called into atomic */ + if (unlikely(in_atomic_preempt_off())) + return; + + /* update allowed CPU mask to didsable migration */ + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { + pr_err("%s(): could not allocate CPUs mask structure " + "to keep allowed mask\n", + __func__); + BUG_ON(true); + } + cpumask_copy(cpus_allowed, ¤t->cpus_mask); + cpumask_copy(¤t->cpus_mask, cpumask_of(cpu)); + + if (likely(need_resched())) { + /* probably some thread is ready to execute, so switch */ + /* to this thread before go to idle mode on host */ + schedule(); + } + HYPERVISOR_kvm_guest_vcpu_common_idle(GUEST_CPU_WAKE_UP_TIMEOUT, + true); /* can interrupt waiting on any event */ + /* to enable rescheduling */ + if (likely(need_resched())) { + /* timer interrupts should be handled */ + /* now timer handler is separate bottom half thread */ + schedule(); + } + + /* restore source mask of allowed CPUs */ + cpumask_copy(¤t->cpus_mask, cpus_allowed); + free_cpumask_var(cpus_allowed); +} + +/* + * We use this on KVM guest if we don't have any better idle routine. + */ +void kvm_cpu_relax(void) +{ + HYPERVISOR_kvm_guest_vcpu_common_idle(GUEST_CPU_WAKE_UP_TIMEOUT, + true); /* can interrupt waiting on any event */ + /* to enable rescheduling */ +} +EXPORT_SYMBOL(kvm_cpu_relax); + +/* + * In some case it need CPU relaxation without rescheduling + * for example CPU frequency measurement + */ +void kvm_cpu_relax_no_resched(void) +{ + HYPERVISOR_kvm_guest_vcpu_common_idle(GUEST_CPU_WAKE_UP_TIMEOUT, + true); /* can interrupt waiting on any event */ + /* to return to guest */ +} + +#ifdef CONFIG_SMP +/* + * Guest kernel cannot wait for some events in the loop on real CPU, + * so make hypercall to free CPU and wait for the VCPU activation from + * other VCPU or guest kernel + * Waiting is not timed out and cannot be iterrupted on any event, + * activation can be done only by different hypercall from other VCPU, + */ +void kvm_wait_for_cpu_booting(void) +{ + HYPERVISOR_kvm_guest_vcpu_common_idle(0, /* without timeout */ + false); /* cannot interrupt waiting on any event, */ + /* because of VCPU is not yet activated */ +} +void kvm_wait_for_cpu_wake_up(void) +{ + kvm_do_cpu_relax(); +} +/* + * Activate the CPU, which is waiting on idle mode after hypercall above + */ +int kvm_activate_cpu(int cpu_id) +{ + int ret; + + ret = HYPERVISOR_kvm_activate_guest_vcpu(cpu_id); + if (ret) { + pr_err("%s(): failed to activate CPU #%d, error %d\n", + __func__, cpu_id, ret); + } + return ret; +} +/* + * Activate all CPUs, which are waiting on idle mode after hypercall above + */ +int kvm_activate_all_cpus(void) +{ + int ret; + + ret = HYPERVISOR_kvm_activate_guest_all_vcpus(); + if (ret) { + pr_err("%s(): failed to activate all CPUs, error %d\n", + __func__, ret); + } + return ret; +} +#endif /* CONFIG_SMP */ diff --git a/arch/e2k/kvm/guest/process.h b/arch/e2k/kvm/guest/process.h new file mode 100644 index 000000000000..bd7d6be2eaad --- /dev/null +++ b/arch/e2k/kvm/guest/process.h @@ -0,0 +1,38 @@ +/* + * process.h: Guest kernel KVM process related definitions + * Copyright (c) 2011, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __GUEST_PROCESS_H +#define __GUEST_PROCESS_H + +#include +#include + +#include +#include +#include + +#include + +#include "traps.h" + +/* timeout to wake up guest idle to check on need reschedule */ +#define GUEST_CPU_IDLE_TIMEOUT HZ /* every 1 sec. */ +#define GUEST_CPU_WAKE_UP_TIMEOUT 1 /* each jiffies to handle */ + /* timer interrupt */ + +extern int kvm_do_map_user_hard_stack_to_kernel(int nid, + e2k_addr_t kernel_stack_base, e2k_addr_t user_stack_base, + e2k_size_t kernel_size); + +#endif /* __GUEST_PROCESS_H */ diff --git a/arch/e2k/kvm/guest/signal.c b/arch/e2k/kvm/guest/signal.c new file mode 100644 index 000000000000..546e2786d618 --- /dev/null +++ b/arch/e2k/kvm/guest/signal.c @@ -0,0 +1,250 @@ +/* + * Guest paravitualized version of user signal handler + * + * Copyright (C) 2020 MCST + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PROTECTED_MODE +#include +#include +#include +#include +#endif /* CONFIG_PROTECTED_MODE */ +#include +#include +#include + +#undef DEBUG_HS_MODE +#undef DebugHS +#define DEBUG_HS_MODE 0 /* Signal handling */ +#define DebugHS(fmt, args...) \ +({ \ + if (DEBUG_HS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static int kvm_copy_sighandler_frame(struct pt_regs *regs, u64 *pframe) +{ + e2k_stacks_t *stacks = ®s->stacks; + size_t pframe_size; + void __user *u_pframe; + unsigned long ts_flag; + int ret; + + /* copy the signal handler procedure frame */ + /* to the top of user procedure stack */ + u_pframe = (void __user *)(stacks->psp_lo.PSP_lo_base + + stacks->psp_hi.PSP_hi_ind); + pframe_size = (TASK_IS_PROTECTED(current)) ? (32 * 8) : (16 * 8); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user_with_tags(u_pframe, pframe, pframe_size); + clear_ts_flag(ts_flag); + if (ret != 0) { + pr_err("%s(): could not copy user signal handler procedure " + "stack frame\n", + __func__); + goto failed; + } + stacks->psp_hi.PSP_hi_ind += pframe_size; + DebugHS("copy signal handler frame to %px size 0x%lx, " + "PSP new ind 0x%x\n", + u_pframe, pframe_size, stacks->psp_hi.PSP_hi_ind); + + return 0; + +failed: + return ret; +} + +static int kvm_launch_sig_handler(struct pt_regs *regs) +{ + kvm_stacks_info_t regs_info; + long sys_rval = regs->sys_rval; + int ret; + + regs_info.top = regs->stacks.top; + regs_info.usd_lo = regs->stacks.usd_lo.USD_lo_half; + regs_info.usd_hi = regs->stacks.usd_hi.USD_hi_half; + + regs_info.psp_lo = regs->stacks.psp_lo.PSP_lo_half; + regs_info.psp_hi = regs->stacks.psp_hi.PSP_hi_half; + regs_info.pshtp = regs->stacks.pshtp.PSHTP_reg; + regs_info.pcsp_lo = regs->stacks.pcsp_lo.PCSP_lo_half; + regs_info.pcsp_hi = regs->stacks.pcsp_hi.PCSP_hi_half; + regs_info.pcshtp = regs->stacks.pcshtp; + + regs_info.cr0_lo = regs->crs.cr0_lo.CR0_lo_half; + regs_info.cr0_hi = regs->crs.cr0_hi.CR0_hi_half; + regs_info.cr1_lo = regs->crs.cr1_lo.CR1_lo_half; + regs_info.cr1_hi = regs->crs.cr1_hi.CR1_hi_half; + + /* return IRQs mask control from UPSR to PSR */ + KVM_RETURN_TO_USER_UPSR(current_thread_info()->upsr, false); + +retry: + ret = HYPERVISOR_launch_sig_handler(®s_info, + (unsigned long)sighandler_trampoline_continue, sys_rval); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not complete launch sig handler on host, " + "error %d, retry\n", + __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not complete launch sig handler on host, " + "error %d\n", + __func__, ret); + } + return ret; +} + +int kvm_signal_setup(struct pt_regs *regs) +{ + register thread_info_t *ti = current_thread_info(); + e2k_stacks_t *stacks = ®s->stacks; + u64 pframe[32]; + int ret; + + ret = signal_rt_frame_setup(regs); + if (ret != 0) { + pr_err("%s(): setup signal rt frame failed, error %d\n", + __func__, ret); + return ret; + } + + /* + * Copy user's part of kernel hardware stacks into user + */ + ret = kvm_user_hw_stacks_copy(regs); + if (ret) + return ret; + + /* + * Copy 2 additional chain stack frames from guest kernel back to user: + * top user frame that caused trap or system call; + * host trampoline to return to user stacks & context; + * + * plus Guest kernel signal handler trampoline frame; + * + * The signal handler chain frame should be topmost, so on CRs, + * although a copy in memory may be needed + */ + ret = kvm_copy_injected_pcs_frames_to_user(regs, 2); + if (ret != 0) { + pr_err("%s(): could not restore user hardware stacks frames\n", + __func__); + return ret; + } + + collapse_kernel_hw_stacks(stacks); + + /* + * After having called setup_signal_stack() we must unroll signal + * stack by calling pop_signal_stack() in case an error happens. + */ + ret = setup_signal_stack(regs, true); + if (ret) + return ret; + + /* + * User's signal handler frame should be the last in stacks + */ + ret = prepare_sighandler_frame(stacks, pframe, ®s->crs); + if (ret) + goto free_signal_stack; + ret = kvm_copy_sighandler_frame(regs, pframe); + if (ret) + goto free_signal_stack; + + /* + * Update psize for ttable_entry8: syscall uses 0x70 + * but handler uses 0x40. + */ + if (from_syscall(regs)) + regs->wd.psize = 0x40; + + /* + * For e2k applications g16-g31 registers are local, initialize them + */ + if (!TASK_IS_BINCO(current)) { + memset(&ti->k_gregs, 0, sizeof(ti->k_gregs)); + } + + DebugHS("sig=%d siginfo=0x%px\n" + "\tIS_PROTECTED = 0x%lx\tsa_flags = 0x%lx\t" + "->thread.flags=0x%lx\n", + ti->ksig.sig, &ti->ksig.info, + TASK_IS_PROTECTED(current), ti->ksig.ka.sa.sa_flags, + current->thread.flags); + DebugHS("will start handler() 0x%lx for sig #%d\n", + ti->ksig.ka.sa.sa_handler, ti->ksig.sig); + + signal_setup_done(0, &ti->ksig, test_ts_flag(TS_SINGLESTEP_USER)); + regs->flags.sig_call_handler = 1; +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (regs->trap) + regs->trap->flags &= ~TRAP_RP_FLAG; +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + + ret = kvm_launch_sig_handler(regs); + + /* should not be return to here */ + +free_signal_stack: + pop_signal_stack(); + return ret; +} + +int kvm_complete_long_jump(struct pt_regs *regs) +{ + kvm_long_jump_info_t regs_info; + int ret; + + regs_info.top = regs->stacks.top; + regs_info.usd_lo = regs->stacks.usd_lo.USD_lo_half; + regs_info.usd_hi = regs->stacks.usd_hi.USD_hi_half; + + regs_info.psp_lo = regs->stacks.psp_lo.PSP_lo_half; + regs_info.psp_hi = regs->stacks.psp_hi.PSP_hi_half; + regs_info.pshtp = regs->stacks.pshtp.PSHTP_reg; + regs_info.pcsp_lo = regs->stacks.pcsp_lo.PCSP_lo_half; + regs_info.pcsp_hi = regs->stacks.pcsp_hi.PCSP_hi_half; + regs_info.pcshtp = regs->stacks.pcshtp; + + regs_info.cr0_lo = regs->crs.cr0_lo.CR0_lo_half; + regs_info.cr0_hi = regs->crs.cr0_hi.CR0_hi_half; + regs_info.cr1_lo = regs->crs.cr1_lo.CR1_lo_half; + regs_info.cr1_hi = regs->crs.cr1_hi.CR1_hi_half; + +retry: + ret = HYPERVISOR_complete_long_jump(®s_info); + if (unlikely(ret == -EAGAIN)) { + pr_err("%s(): could not complete long jump on host, " + "error %d, retry\n", + __func__, ret); + goto retry; + } else if (unlikely(ret < 0)) { + pr_err("%s(): could not complete long jump on host, " + "error %d\n", + __func__, ret); + force_sig(SIGKILL); + } + return ret; +} diff --git a/arch/e2k/kvm/guest/smp.c b/arch/e2k/kvm/guest/smp.c new file mode 100644 index 000000000000..95adb8d79574 --- /dev/null +++ b/arch/e2k/kvm/guest/smp.c @@ -0,0 +1,241 @@ +/* + * This file implements the arch-dependent parts of kvm guest + * csd_lock/csd_unlock functions to serialize access to per-cpu csd resources + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "cpu.h" +#include "pic.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SWITCH_KERNEL_STACKS_MODE +#undef DebugSWSTK +#define DEBUG_SWITCH_KERNEL_STACKS_MODE 0 /* switch to new kernel stacks */ +#define DebugSWSTK(fmt, args...) \ +({ \ + if (DEBUG_SWITCH_KERNEL_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +void kvm_ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, int cpu) +{ + kvm_task_info_t task_info; + unsigned long args[2]; + e2k_addr_t us_base; + e2k_addr_t ps_base; + e2k_addr_t pcs_base; + int ret; + + us_base = stack_base + KERNEL_C_STACK_OFFSET; + ps_base = stack_base + KERNEL_P_STACK_OFFSET; + pcs_base = stack_base + KERNEL_PC_STACK_OFFSET; + + task_info.sp_offset = KERNEL_C_STACK_SIZE; + task_info.us_base = us_base; + task_info.us_size = KERNEL_C_STACK_SIZE; + task_info.flags = 0; + DebugSWSTK("local data stack from 0x%lx size 0x%lx SP offset 0x%lx\n", + task_info.us_base, task_info.us_size, task_info.sp_offset); + + BUG_ON(task_info.sp_offset > task_info.us_size); + + task_info.ps_base = ps_base; + task_info.ps_ind = 0; + task_info.ps_size = KERNEL_P_STACK_SIZE; + DebugSWSTK("procedure stack from 0x%lx size 0x%lx, index 0x%lx\n", + task_info.ps_base, task_info.ps_size, task_info.ps_ind); + + task_info.pcs_base = pcs_base; + task_info.pcs_ind = 0; + task_info.pcs_size = KERNEL_PC_STACK_SIZE; + DebugSWSTK("procedure chain stack from 0x%lx size 0x%lx, index 0x%lx\n", + task_info.pcs_base, task_info.pcs_size, task_info.pcs_ind); + + args[0] = cpuid; + args[1] = cpu; + + ret = HYPERVISOR_switch_guest_kernel_stacks(&task_info, + (char *) &e2k_start_secondary_switched_stacks, + args, 2); + if (ret < 0) { + panic("%s(): could not switch to init kernel stacks, " + "error %d\n", + __func__, ret); + } +} + +void kvm_setup_secondary_task(int cpu) +{ + struct task_struct *idle = idle_tasks[cpu]; + struct thread_info *ti_idle; + int ret; + + native_setup_secondary_task(cpu); + + /* setup the idle task on host (get GPID_ID #) */ + ret = HYPERVISOR_setup_idle_task(cpu); + if (ret < 0) { + panic("%s(): could not setup CPU #%d idle task on host, " + "error %d\n", + __func__, cpu, ret); + } + ti_idle = task_thread_info(idle); + ti_idle->gpid_nr = ret; + ti_idle->gmmid_nr = 0; /* init mm should have GMMID == 0 */ +} + +/* + * The function implements asynchronous wait for csd lock unlocking. + * In this case csd_lock_wait() has not explicit call and waiting will be + * started only while next csd lock using. So previous lock can be unlocked + * and queued as unlocked csd lock on host. The host function should dequeue + * and free this csd lock. + */ +static inline void kvm_csd_lock_try_wait(call_single_data_t *data) +{ + int ret; + + ret = HYPERVISOR_guest_csd_lock_try_wait(data); + if (ret == -EBUSY) { + /* other VCPUs cannot handle IPI, try show all stacks */ + if (kvm_get_vcpu_state()->do_dump_state) { + kvm_get_vcpu_state()->do_dump_state = false; + show_state(); + } else if (kvm_get_vcpu_state()->do_dump_stack) { + kvm_get_vcpu_state()->do_dump_stack = false; + dump_stack(); + } + panic("could not handle IPI by all VCPUs\n"); + } +} + +/* + * csd lock can be already unlocked, flag CSD_FLAG_LOCK cleared and + * the lock queued as unlocked on host. In this case it need dequeue the lock, + * so should be call try waiting + */ +void kvm_csd_lock_wait(call_single_data_t *data) +{ + int ret; + + if (!(data->flags & CSD_FLAG_LOCK)) + return kvm_csd_lock_try_wait(data); + while (data->flags & CSD_FLAG_LOCK) { + ret = HYPERVISOR_guest_csd_lock_wait(data); + if (ret == -EBUSY) { + /* other VCPUs cannot handle IPI, try show all stacks */ + show_state(); + panic("could not handle IPI by all VCPUs\n"); + } + } +} + +void kvm_csd_lock(call_single_data_t *data) +{ + kvm_csd_lock_try_wait(data); + + /* + * prevent CPU from reordering the above assignment + * to ->flags with any subsequent assignments to other + * fields of the specified call_single_data_t structure: + */ + smp_mb(); + data->flags |= CSD_FLAG_LOCK; + + /* register lock wait guest on host */ + HYPERVISOR_guest_csd_lock(data); +} + +void kvm_arch_csd_lock_async(call_single_data_t *data) +{ + data->flags = (CSD_FLAG_LOCK | CSD_FLAG_LOCK_ASYNC); + + /* + * prevent CPU from reordering the above assignment + * to ->flags with any subsequent assignments to other + * fields of the specified call_single_data_t structure: + */ + smp_mb(); + + /* asynchronous lock need not register on host */ + /* HYPERVISOR_guest_csd_lock(data); */ +} + +void kvm_csd_unlock(call_single_data_t *data) +{ + unsigned int flags = data->flags; + + WARN_ON(!(flags & CSD_FLAG_LOCK)); + + /* wake up sychronous lock waiting guest on host */ + if (!(flags & CSD_FLAG_LOCK_ASYNC)) + HYPERVISOR_guest_csd_unlock(data); + + /* ensure we're all done before releasing data */ + smp_mb(); + + data->flags &= ~(CSD_FLAG_LOCK | CSD_FLAG_LOCK_ASYNC); +} + +void kvm_setup_pic_virq(unsigned int cpuid) +{ + kvm_setup_local_pic_virq(cpuid); +} +void kvm_startup_pic_virq(unsigned int cpuid) +{ + kvm_startup_local_pic_virq(cpuid); +} + +void kvm_setup_local_apic_virq(unsigned int cpuid) +{ +} +void kvm_startup_local_apic_virq(unsigned int cpuid) +{ + setup_secondary_APIC_clock(); + store_cpu_info(cpuid); + + /* complete creation of idle task fot this virtual CPU */ + init_idle(current, cpuid); +} + +#ifdef CONFIG_EPIC +void kvm_setup_epic_virq(unsigned int cpuid) +{ +} + +void kvm_startup_epic_virq(unsigned int cpuid) +{ + setup_secondary_epic_clock(); + store_cpu_info(cpuid); + + /* complete creation of idle task fot this virtual CPU */ + init_idle(current, cpuid); +} + +static __init int kvm_setup_boot_pic_virq(void) +{ + return kvm_setup_boot_local_pic_virq(); +} +early_initcall(kvm_setup_boot_pic_virq); +#endif diff --git a/arch/e2k/kvm/guest/spinlock.c b/arch/e2k/kvm/guest/spinlock.c new file mode 100644 index 000000000000..66623a0eed7b --- /dev/null +++ b/arch/e2k/kvm/guest/spinlock.c @@ -0,0 +1,141 @@ +/* + * This file implements the arch-dependent parts of kvm guest + * spinlock()/spinunlock() slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include + +#include +#include + +#include "cpu.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_RW_MODE +#undef DebugKVMRW +#define DEBUG_KVM_RW_MODE 1 /* RW spinlocks debugging */ +#define DebugKVMRW(fmt, args...) \ +({ \ + if (DEBUG_KVM_RW_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Slowpath of a guest spinlock: goto hypervisor to wait for spin unlocking + */ + +static inline void do_arch_spin_lock_slow(void *lock, bool check_unlock) +{ + int err; + + DebugKVM("%s (%d) started for lock %px\n", + current->comm, current->pid, lock); + err = HYPERVISOR_guest_spin_lock_slow(lock, check_unlock); + if (err == -EINTR) { + DebugKVM("hypercall was interrupted to handle " + "pending VIRQs\n"); + } + if (err && err != -EINTR) { + panic("HYPERVISOR_guest_spin_lock_slow() failed (error %d)\n", + err); + } +} + +void kvm_arch_spin_lock_slow(void *lock) +{ + do_arch_spin_lock_slow(lock, true); +} +EXPORT_SYMBOL(kvm_arch_spin_lock_slow); + +void kvm_wait_read_lock_slow(arch_rwlock_t *rw) +{ + do_arch_spin_lock_slow(rw, true); +} +EXPORT_SYMBOL(kvm_wait_read_lock_slow); + +void kvm_wait_write_lock_slow(arch_rwlock_t *rw) +{ + do_arch_spin_lock_slow(rw, true); +} +EXPORT_SYMBOL(kvm_wait_write_lock_slow); + +static inline void do_arch_spin_locked_slow(void *lock) +{ + int err; + + DebugKVM("%s (%d) started for lock %px\n", + current->comm, current->pid, lock); + do { + err = HYPERVISOR_guest_spin_locked_slow(lock); + if (err == -EINTR) { + DebugKVM("hypercall was interrupted to handle " + "pending VIRQs\n"); + } + } while (err == -EINTR); + if (err) { + panic("HYPERVISOR_guest_spin_locked_slow() failed (error %d)\n", + err); + } +} +void kvm_arch_spin_locked_slow(void *lock) +{ + do_arch_spin_locked_slow(lock); +} +void kvm_arch_read_locked_slow(arch_rwlock_t *rw) +{ + do_arch_spin_locked_slow(rw); +} +EXPORT_SYMBOL(kvm_arch_read_locked_slow); + +void kvm_arch_write_locked_slow(arch_rwlock_t *rw) +{ + do_arch_spin_locked_slow(rw); +} +EXPORT_SYMBOL(kvm_arch_write_locked_slow); + +/* + * Slowpath of a guest spinunlock: goto hypervisor to wake up proccesses + * which are waiting on this lock + */ +static inline void do_arch_spin_unlock_slow(void *lock, bool add_to_unlock) +{ + int err; + + DebugKVM("%s (%d) started for lock %px add to unlock list %d\n", + current->comm, current->pid, lock, add_to_unlock); + err = HYPERVISOR_guest_spin_unlock_slow(lock, add_to_unlock); + if (err) { + panic("kvm_arch_spin_unlock_slow() failed (error %d)\n", + err); + } +} +void kvm_arch_spin_unlock_slow(void *lock) +{ + do_arch_spin_unlock_slow(lock, true); +} +EXPORT_SYMBOL(kvm_arch_spin_unlock_slow); + +void kvm_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + do_arch_spin_unlock_slow(rw, true); +} +EXPORT_SYMBOL(kvm_arch_read_unlock_slow); + +void kvm_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + do_arch_spin_unlock_slow(rw, true); +} +EXPORT_SYMBOL(kvm_arch_write_unlock_slow); diff --git a/arch/e2k/kvm/guest/string.c b/arch/e2k/kvm/guest/string.c new file mode 100644 index 000000000000..4bf0311a4e0e --- /dev/null +++ b/arch/e2k/kvm/guest/string.c @@ -0,0 +1,152 @@ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +#ifdef BOOT +/* This file is included in kernel's builtin boot directly, + * undefine EXPORT_SYMBOL to avoid linking errors. */ +# undef EXPORT_SYMBOL +# define EXPORT_SYMBOL(sym) +# define DEBUG_DISABLE_BOOT 1 +#else +# define DEBUG_DISABLE_BOOT 0 +#endif + +#undef DEBUG_KVM_RETRY_MODE +#undef DebugRETRY +#define DEBUG_KVM_RETRY_MODE 0 /* memory copy retries debug */ +#define DebugRETRY(fmt, args...) \ +({ \ + if (DEBUG_KVM_RETRY_MODE && !DEBUG_DISABLE_BOOT) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_FAULT_MODE +#undef DebugFAULT +#define DEBUG_KVM_FAULT_MODE 0 /* memory copy page fault debug */ +#define DebugFAULT(fmt, args...) \ +({ \ + if (DEBUG_KVM_FAULT_MODE && !DEBUG_DISABLE_BOOT) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_EXTRACT_TAGS +#undef DebugEXTRACT +#define DEBUG_KVM_EXTRACT_TAGS 1 /* extract tags debug */ +#define DebugEXTRACT(fmt, args...) \ +({ \ + if (DEBUG_KVM_EXTRACT_TAGS && !DEBUG_DISABLE_BOOT) \ + pr_err("%s(): " fmt, __func__, ##args); \ +}) + +#ifdef DEBUG_GUEST_STRINGS +/* + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + * + * Returns number of successfully copied bytes. + */ +unsigned long +kvm_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + long ret; + +retry: + if (likely(IS_HV_GM())) { + return native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } else { + ret = kvm_do_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } + + if (unlikely(ret < 0)) { + BUG_ON(ret != -EAGAIN); + printk(KERN_ERR "%s(): could not copy memory from %px to %px, " + "size 0x%lx, error %ldi, retry\n", __func__, src, dst, + len, ret); + goto retry; + } + + BUG_ON(ret != len); + + return ret; +} +EXPORT_SYMBOL(kvm_fast_tagged_memory_copy); + +unsigned long +kvm_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + long ret; + +retry: + if (likely(IS_HV_GM())) { + return native_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + } else { + ret = kvm_do_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + } + + if (unlikely(ret < 0)) { + BUG_ON(ret != -EAGAIN); + printk(KERN_ERR "%s(): could set memory %px, size 0x%lx, " + "error %ldi, retry\n", __func__, + addr, len, ret); + goto retry; + } + + BUG_ON(ret != len); + + return ret; +} +EXPORT_SYMBOL(kvm_fast_tagged_memory_set); + +/* + * Extract tags from 32 bytes of data + * FIXME: need improve function to extract tags from any size of data + */ +unsigned long +kvm_extract_tags_32(u16 *dst, const void *src) +{ + long ret; + + if (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)src) || + IS_HOST_KERNEL_ADDRESS((e2k_addr_t)dst)) { + DebugEXTRACT("could not extract tags from host kernel memory " + "address %px to %px\n", + src, dst); + } + if (!IS_GUEST_KERNEL_ADDRESS((e2k_addr_t)src) || + !IS_GUEST_KERNEL_ADDRESS((e2k_addr_t)dst)) { + DebugEXTRACT("could not extract tags from user memory " + "address %px to %px\n", + src, dst); + } + if (likely(IS_HV_GM())) + ret = native_extract_tags_32(dst, src); + else + ret = kvm_do_extract_tags_32(dst, src); + if (ret) { + DebugEXTRACT("could not extract tags from %px to %px, " + "error %ld\n", + src, dst, ret); + } + return ret; +} +EXPORT_SYMBOL(kvm_extract_tags_32); + +#endif /* DEBUG_GUEST_STRINGS */ diff --git a/arch/e2k/kvm/guest/time.c b/arch/e2k/kvm/guest/time.c new file mode 100644 index 000000000000..aa0093f83376 --- /dev/null +++ b/arch/e2k/kvm/guest/time.c @@ -0,0 +1,756 @@ +/* + * KVM guest time implementation. + * + * This is implemented in terms of a clocksource driver which uses + * the hypervisor clock as a nanosecond timebase, and a clockevent + * driver which uses the hypervisor's timer mechanism. + * + * Based on Xen implementation: arch/x86/xen/time.c + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "time.h" +#include "irq.h" +#include "cpu.h" +#include "traps.h" +#include "pic.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 1 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* On VIRQ VCUPs common printk() cannot be used, because of thread */ +/* running on these VCPUs has not task structure */ +#undef DEBUG_DUMP_KVM_MODE +#undef DebugDKVM +#define DEBUG_DUMP_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugDKVM(fmt, args...) \ +({ \ + if (DEBUG_DUMP_KVM_MODE) \ + dump_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_EARLY_TIME_MODE +#undef DebugKVMET +#define DEBUG_KVM_EARLY_TIME_MODE 0 /* KVM early time/timer */ + /* debugging */ +#define DebugKVMET(fmt, args...) \ +({ \ + if (DEBUG_KVM_EARLY_TIME_MODE) \ + dump_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TIME_MODE +#undef DebugKVMT +#define DEBUG_KVM_TIME_MODE 0 /* KVM time/timer debugging */ +#define DebugKVMT(fmt, args...) \ +({ \ + if (DEBUG_KVM_TIME_MODE) \ + dump_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IRQ_MODE +#undef DebugKVMIRQ +#define DEBUG_KVM_IRQ_MODE 0 /* kernel virtual IRQ debugging */ +#define DebugKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_DIRECT_IRQ_MODE +#undef DebugDIRQ +#define DEBUG_DIRECT_IRQ_MODE 0 /* direct IRQ injection debugging */ +#define DebugDIRQ(fmt, args...) \ +({ \ + if (DEBUG_DIRECT_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TIME_INTR_MODE +#undef DebugKVMTI +#define DEBUG_KVM_TIME_INTR_MODE 0 /* KVM timer interrupt */ + /* debugging */ +#define DebugKVMTI(fmt, args...) \ +({ \ + if (DEBUG_KVM_TIME_INTR_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TIMER_MODE +#undef DebugKVMTM +#define DEBUG_KVM_TIMER_MODE 0 /* KVM timer debugging */ +#define DebugKVMTM(fmt, args...) \ +({ \ + if (DEBUG_KVM_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define KVM_SHIFT 22 + +/* Xen may fire a timer up to this many ns early */ +#define TIMER_SLOP 100000 +#define NS_PER_TICK (1000000000LL / HZ) +#define NSEC_AT_SEC 1000000000LL + +/* snapshots of runstate info */ +static DEFINE_PER_CPU(kvm_runstate_info_t, kvm_runstate_snapshot); + +/* unused ns of stolen and blocked time */ +static DEFINE_PER_CPU(u64, kvm_residual_stolen); +static DEFINE_PER_CPU(u64, kvm_residual_blocked); + +extern ktime_t tick_period; + +/* + * Runstate accounting + */ +static u64 get_runstate_snapshot(kvm_runstate_info_t *res) +{ + kvm_runstate_info_t *state; + u64 state_time; + u64 cur_time; + + BUG_ON(preemptible()); + + state = kvm_vcpu_runstate_info(); + + /* + * The runstate info is always updated by the hypervisor on + * the current CPU, so there's no need to use anything + * stronger than a compiler barrier when fetching it. + */ + do { + state_time = state->state_entry_time; + rmb(); /* wait for all read completed */ + *res = *state; + barrier(); + cur_time = HYPERVISOR_get_host_runstate_ktime(); + } while (state->state_entry_time != state_time); + return cur_time; +} + +#ifdef CONFIG_PARAVIRT +/* FIXME: this method has support on arch-independent code */ +/* so it should be main method to account steal time */ + +static unsigned long get_stolen_time(void) +{ + kvm_runstate_info_t state; + u64 now; + s64 running, blocked, runnable, offline, stolen, in_hcall; + + now = get_runstate_snapshot(&state); + + WARN_ON(state.state != RUNSTATE_running); + + running = state.time[RUNSTATE_running]; + BUG_ON(now < state.state_entry_time); + running += (now - state.state_entry_time); + + /* work out how much time the VCPU has not been runn*ing* */ + blocked = state.time[RUNSTATE_blocked]; + in_hcall = state.time[RUNSTATE_in_hcall]; + runnable = state.time[RUNSTATE_runnable] + + state.time[RUNSTATE_in_QEMU] + + state.time[RUNSTATE_in_trap]; + offline = state.time[RUNSTATE_offline]; + + stolen = runnable + offline; + + BUG_ON(now < stolen + running + blocked); + + return stolen; +} + +unsigned long kvm_steal_clock(int cpu) +{ + return get_stolen_time(); +} +#endif /* CONFIG_PARAVIRT */ + +static unsigned long get_running_time(bool early) +{ + kvm_runstate_info_t state; + u64 now; + u64 running; + u64 in_hcall; + u64 blocked; + + now = get_runstate_snapshot(&state); + + WARN_ON(!early && state.state != RUNSTATE_running); + + running = state.time[RUNSTATE_running]; + in_hcall = state.time[RUNSTATE_in_hcall]; + blocked = state.time[RUNSTATE_blocked]; + if (running == 0) { + E2K_LMS_HALT_OK; + } + DebugKVMTM("time running 0x%llx in hcall 0x%llx blocked 0x%llx " + "now 0x%llx\n", + running, in_hcall, blocked, now); + if (now < state.state_entry_time) + now = state.state_entry_time; + running += (now - state.state_entry_time); + DebugKVMTM("time runnable 0x%llx in QEMU 0x%llx in trap 0x%llx\n", + state.time[RUNSTATE_runnable], + state.time[RUNSTATE_in_QEMU], + state.time[RUNSTATE_in_trap]); + DebugKVMTM("entry time 0x%llx running 0x%llx total 0x%llx\n", + state.state_entry_time, running, running + in_hcall + blocked); + + return running + in_hcall + blocked; +} + +static void do_stolen_accounting(int cpu, bool early) +{ + kvm_runstate_info_t state; + kvm_runstate_info_t *snap; + s64 blocked, runnable, in_QEMU, in_trap, offline, stolen; + u64 ticks; + + get_runstate_snapshot(&state); + + WARN_ON(!early && state.state != RUNSTATE_running); + + snap = &per_cpu(kvm_runstate_snapshot, cpu); + + /* work out how much time the VCPU has not been runn*ing* */ + blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; + runnable = state.time[RUNSTATE_runnable] - + snap->time[RUNSTATE_runnable]; + in_QEMU = state.time[RUNSTATE_in_QEMU] - snap->time[RUNSTATE_in_QEMU]; + in_trap = state.time[RUNSTATE_in_trap] - snap->time[RUNSTATE_in_trap]; + offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; + + *snap = state; + + /* + * Add the appropriate number of ticks of stolen time, + * including any left-overs from last time. + */ + stolen = runnable + in_QEMU + in_trap + offline + + per_cpu(kvm_residual_stolen, cpu); + + if (stolen < 0) + stolen = 0; + + ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); + per_cpu(kvm_residual_stolen, cpu) = stolen; + +// account_steal_ticks(ticks); + + /* + * Add the appropriate number of ticks of blocked time, + * including any left-overs from last time. + */ + blocked += per_cpu(kvm_residual_blocked, cpu); + + if (blocked < 0) + blocked = 0; + + ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); + per_cpu(kvm_residual_blocked, cpu) = blocked; + + account_idle_ticks(ticks); +} + +/* + * KVM guest sched_clock implementation. Returns the number of unstolen + * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED + * states. + */ +unsigned long long kvm_sched_clock(void) +{ + cycles_t running; + u64 ns; + + /* + * Ideally schedule_clock should be called on a per-cpu basis + * anyway, so preempt should already be disabled, but that's + * not current practice at the moment. + */ + preempt_disable(); + + if (current == NULL || test_thread_flag(TIF_PSEUDOTHREAD)) { + /* sched_clock() started on VIRQ VCPU, so only host */ + /* can get main VCPU running time */ + running = HYPERVISOR_get_guest_running_time(); + } else { + running = get_running_time(false); + } + + ns = cycles_2nsec(running); + + preempt_enable(); + + return ns; +} + +int kvm_read_current_timer(unsigned long *timer_val) +{ + *timer_val = get_running_time(false); + return 0; +} + +unsigned long kvm_get_cpu_running_cycles(void) +{ + return get_running_time(true); +} + +static void kvm_read_wallclock(struct timespec64 *ts) +{ + kvm_time_t *time_info = kvm_vcpu_time_info(); + long sec; + + do { + sec = time_info->wall_time.tv_sec; + ts->tv_sec = sec; + ts->tv_nsec = time_info->wall_time.tv_nsec; + rmb(); /* wait for all read completed */ + } while (sec != time_info->wall_time.tv_sec); +} + +u64 kvm_clocksource_read(void) +{ + kvm_time_t *time_info = kvm_vcpu_time_info(); + long nsec; + long sec; + + do { + sec = time_info->sys_time.tv_sec; + nsec = time_info->sys_time.tv_nsec; + rmb(); /* wait for all read completed */ + } while (sec != time_info->sys_time.tv_sec); + return sec * NSEC_AT_SEC + nsec; +} + +static u64 kvm_clocksource_get_cycles(struct clocksource *cs) +{ + return kvm_clocksource_read(); +} + +unsigned long kvm_get_wallclock(void) +{ + struct timespec64 ts; + + kvm_read_wallclock(&ts); + return ts.tv_sec; +} + +int kvm_set_wallclock(unsigned long now) +{ + /* do nothing for domU */ + return -1; +} + +void __init kvm_clock_init(void) +{ + machine.set_wallclock = &kvm_set_wallclock; + machine.get_wallclock = &kvm_get_wallclock; +} + +static struct clocksource kvm_clocksource __read_mostly = { + .name = "kvm_clock", + .rating = 400, + .read = kvm_clocksource_get_cycles, + .mask = ~0, + .mult = 1 << KVM_SHIFT, /* time directly in nanoseconds */ + .shift = KVM_SHIFT, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +/* + KVM guest clockevent implementation + + Xen has two clockevent implementations: + + The old timer_op one works with all released versions of Xen prior + to version 3.0.4. This version of the hypervisor provides a + single-shot timer with nanosecond resolution. However, sharing the + same event channel is a 100Hz tick which is delivered while the + vcpu is running. We don't care about or use this tick, but it will + cause the core time code to think the timer fired too soon, and + will end up resetting it each time. It could be filtered, but + doing so has complications when the ktime clocksource is not yet + the xen clocksource (ie, at boot time). + + The new vcpu_op-based timer interface allows the tick timer period + to be changed or turned off. The tick timer is not useful as a + periodic timer because events are only delivered to running vcpus. + The one-shot timer can report when a timeout is in the past, so + set_next_event is capable of returning -ETIME when appropriate. + This interface is used when available. +*/ + + +/* + Get a hypervisor absolute time. In theory we could maintain an + offset between the kernel's time and the hypervisor's time, and + apply that to a kernel's absolute timeout. Unfortunately the + hypervisor and kernel times can drift even if the kernel is using + the Xen clocksource, because ntp can warp the kernel's clocksource. +*/ +/* FIXME: the function was declared but never referenced at now */ +/* static */ s64 get_abs_timeout(unsigned long delta) +{ + return kvm_clocksource_read() + delta; +} + +static int kvm_timerop_shutdown(struct clock_event_device *evt) +{ + /* The 0 delta shuts the clock down. */ + HYPERVISOR_set_clockevent(0); + + return 0; +} + +static int kvm_timerop_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + WARN_ON(!clockevent_state_oneshot(evt)); + + DebugKVMT("starts hyper call to set clockevent delta 0x%lx\n", + delta); + HYPERVISOR_set_clockevent(delta); + + /* We may have missed the deadline, but there's no real way of + knowing for sure. If the event was in the past, then we'll + get an immediate interrupt. */ + + return 0; +} + +static const struct clock_event_device kvm_timerop_clockevent = { + .name = "kvm_clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT, + + .max_delta_ns = 0xffffffff, + .min_delta_ns = TIMER_SLOP, + + .mult = 1, + .shift = 0, + .rating = 90, + + .set_state_shutdown = kvm_timerop_shutdown, + .set_next_event = kvm_timerop_set_next_event, +}; + + +static const struct clock_event_device *kvm_clockevent = + &kvm_timerop_clockevent; +static DEFINE_PER_CPU(struct clock_event_device, kvm_clock_events); +static DEFINE_PER_CPU(bool, kvm_clock_inited) = false; + +static __initdata struct task_struct clock_event_early_task; +int stop_early_timer_interrupt; +static bool timer_interrupt_set = false; + +#ifdef CONFIG_DIRECT_VIRQ_INJECTION +static irqreturn_t kvm_early_timer_direct_intr(int irq, void *dev_id) +{ + struct clock_event_device *evt; + kvm_virq_info_t *virq_info; + int cpu = cpu_from_irq(irq); + + DebugKVMET("started for virtual IRQ #%d\n", irq); + + evt = (struct clock_event_device *)dev_id; + virq_info = virq_info_from_irq(irq); + + if (stop_early_timer_interrupt) { + DebugKVMET("erly timer IRQ #%d stopped\n", irq); + return IRQ_NONE; + } + if (evt->event_handler) { + DebugKVMET("will start event handler %px\n", + evt->event_handler); + + BUG_ON(!irqs_disabled()); + + evt->event_handler(evt); + do_stolen_accounting(cpu, true); + return IRQ_HANDLED; + } else { + pr_warning("%s(): early timer clock event device has not " + "handler to run on VCPU #%d\n", __func__, cpu); + } + return IRQ_NONE; +} +#else /* !CONFIG_DIRECT_VIRQ_INJECTION */ +static irqreturn_t kvm_early_timer_direct_intr(int irq, void *dev_id) +{ + pr_err("%s(): direct VIRQs injection disabled, turn ON config mode to " + "enable\n", __func__); + return IRQ_NONE; +} +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + +static int kvm_early_setup_timer(int cpu) +{ + const char *name; + struct clock_event_device *evt; + unsigned long irqflags; + int ret; + + printk(KERN_INFO "installing KVM guest early timer for CPU %d\n", cpu); + + name = kasprintf(GFP_KERNEL, "timer/%d", cpu); + if (!name) + name = ""; + + DebugKVM("kvm_clock_events %px cpu %d\n", + &per_cpu(kvm_clock_events, cpu), cpu); + evt = &per_cpu(kvm_clock_events, cpu); + memcpy(evt, kvm_clockevent, sizeof(*evt)); + + evt->cpumask = cpumask_of(cpu); + evt->irq = KVM_VIRQ_TIMER; + DebugKVM("CPU #%d timer evt %px IRQ %d mult %d\n", + cpu, evt, evt->irq, evt->mult); + stop_early_timer_interrupt = 0; + global_clock_event = evt; + + irqflags = kvm_get_default_virq_flags(KVM_VIRQ_TIMER); + + if (irqflags & BY_DIRECT_INJ_VIRQ_FLAG) { + ret = kvm_request_virq(KVM_VIRQ_TIMER, + &kvm_early_timer_direct_intr, cpu, + BY_DIRECT_INJ_VIRQ_FLAG, + name, evt); + if (ret == 0) { + timer_interrupt_set = true; + return 0; + } + DebugDIRQ("could not request direct early timer VIRQ %s " + "injection\n", name); + } else { + /* unknown mode to request VIRQ delivery */ + BUG_ON(true); + ret = -EINVAL; + } + if (ret) { + panic("could not register early timer VIRQ #%d\n", + KVM_VIRQ_TIMER); + } + return ret; +} + +__init int kvm_setup_sw_timer(void) +{ + const char *name; + struct clock_event_device *evt; + ktime_t next; + int cpu = raw_smp_processor_id(); + unsigned long irqflags; + unsigned long flags; + int ret; + + if (!paravirt_enabled()) + return 0; + if (timer_interrupt_set) { + printk(KERN_INFO "KVM guest timer for CPU %d already set\n", + cpu); + return 0; + } + printk(KERN_INFO "installing KVM guest timer for CPU %d\n", cpu); + + name = kasprintf(GFP_KERNEL, "timer/%d", cpu); + if (!name) + name = ""; + + evt = &per_cpu(kvm_clock_events, cpu); + if (evt != global_clock_event) { + memcpy(evt, kvm_clockevent, sizeof(*evt)); + evt->cpumask = cpumask_of(cpu); + evt->irq = KVM_VIRQ_TIMER; + DebugKVM("CPU #%d timer evt %px mult %d\n", + cpu, evt, evt->mult); + } + /* stop early timer handler */ + clockevents_shutdown(evt); + + irqflags = kvm_get_default_virq_flags(KVM_VIRQ_TIMER); + + ret = -ENOSYS; + if (irqflags & BY_DIRECT_INJ_VIRQ_FLAG) { + ret = kvm_request_virq(KVM_VIRQ_TIMER, + &kvm_early_timer_direct_intr, cpu, + BY_DIRECT_INJ_VIRQ_FLAG, + name, evt); + if (ret == 0) { + goto ok; + } + DebugDIRQ("could not request direct timer VIRQ %s " + "injection\n", name); + } else { + BUG(); + } + if (ret) { + panic("could not register timer VIRQ #%d for CPU #%d\n", + KVM_VIRQ_TIMER, cpu); + } +ok: + local_irq_save(flags); + DebugKVM("timer next event 0x%llx current time 0x%llx period 0x%llx\n", + ktime_to_ns(evt->next_event), ktime_to_ns(ktime_get()), + ktime_to_ns(tick_period)); + next = ktime_add(ktime_get(), tick_period); + DebugKVM("set timer next event 0x%llx\n", + ktime_to_ns(next)); +/* FIXME: not implemented how program next event + if (clockevents_program_event(evt, next, ktime_get())) + panic("could not programm timer events for VCPU #%d\n", + cpu); +*/ + local_irq_restore(flags); + + DebugKVM("KVM guest timer for CPU %d installed\n", cpu); + return ret; +} + +static __init int kvm_setup_timer(void) +{ + if (IS_HV_GM()) + return 0; + + return kvm_setup_sw_timer(); +} +early_initcall(kvm_setup_timer); + +static void kvm_teardown_timer(int cpu) +{ + struct clock_event_device *evt; + evt = &per_cpu(kvm_clock_events, cpu); + kvm_free_virq(KVM_VIRQ_TIMER, cpu, evt); +} + +static void kvm_setup_cpu_clockevents(void) +{ + BUG_ON(preemptible()); + + clockevents_register_device(this_cpu_ptr(&kvm_clock_events)); + __this_cpu_write(kvm_clock_inited, true); +} + +/* FIXME: should be implemented some other way */ +void kvm_wait_timer_tick(void) +{ + unsigned long start_jiffies; + + start_jiffies = jiffies; + do { + barrier(); + } while (jiffies == start_jiffies); +} + +void kvm_timer_resume(void) +{ + int cpu = smp_processor_id(); + + DebugKVM("started on CPU #%d\n", cpu); + stop_early_timer_interrupt = 1; + if (!__this_cpu_read(kvm_clock_inited)) + return; + clockevents_shutdown(this_cpu_ptr(&kvm_clock_events)); + clocksource_unregister(&kvm_clocksource); + __this_cpu_write(kvm_clock_inited, false); + kvm_teardown_timer(cpu); +} + +static int +kvm_timer_panic(struct notifier_block *this, unsigned long event, void *ptr) +{ + kvm_timer_resume(); + + return NOTIFY_DONE; +} + +static struct notifier_block resume_block = { + .notifier_call = kvm_timer_panic, +}; + +__init void kvm_time_init_clockevents(void) +{ + int cpu = smp_processor_id(); + int ret; + + kvm_virqs_init(cpu); + /* Local APIC support on guest is not ready at present time, */ + /* so temporarly disable APIC timer */ + disable_apic_timer = true; +#ifdef CONFIG_EPIC + /* Same with CEPIC paravirt model. Do not disable timer for HW EPIC */ + disable_epic_timer = true; +#endif + + ret = kvm_early_setup_timer(cpu); + if (ret) { + pr_err("%s(): could not setup guest timer, error %d\n", + __func__, ret); + clocksource_unregister(&kvm_clocksource); + return; + } + kvm_setup_cpu_clockevents(); + atomic_notifier_chain_register(&panic_notifier_list, &resume_block); +} + +__init void kvm_time_init_clocksource(void) +{ + struct timespec64 tp; + + clocksource_register_hz(&kvm_clocksource, NSEC_PER_SEC); + + /* Set initial system time with full resolution */ + kvm_read_wallclock(&tp); + do_settimeofday64(&tp); +} + +__init void kvm_time_init(void) +{ + native_time_init(); + + if (IS_HV_GM()) + return; + + timer_interrupt_set = true; + if (timer_interrupt_set) + return; + + kvm_time_init_clockevents(); + kvm_time_init_clocksource(); +} + +void kvm_time_shutdown(void) +{ + if (IS_HV_GM()) + return; + + kvm_timer_resume(); +} diff --git a/arch/e2k/kvm/guest/time.h b/arch/e2k/kvm/guest/time.h new file mode 100644 index 000000000000..0cccd30d8ed8 --- /dev/null +++ b/arch/e2k/kvm/guest/time.h @@ -0,0 +1,19 @@ +#ifndef __ASM_KVM_TIME_H +#define __ASM_KVM_TIME_H + +#ifdef __KERNEL__ +#include +#include + +struct clock_event_device; + +extern u64 kvm_clocksource_read(void); +extern void kvm_timer_resume(void); +extern void clockevents_shutdown(struct clock_event_device *dev); + +extern int arch_dup_task_struct(struct task_struct *dst, + struct task_struct *src); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_TIME_H */ diff --git a/arch/e2k/kvm/guest/tlbflush.c b/arch/e2k/kvm/guest/tlbflush.c new file mode 100644 index 000000000000..35b769b12654 --- /dev/null +++ b/arch/e2k/kvm/guest/tlbflush.c @@ -0,0 +1,152 @@ +/* + * TLB flushing support on paravirt guest kernel + * + * Memroy access of paravirt guest is also cached in tlb, therefore guest + * needs to flush tlb when editing page tables. + * But guest kernel manages its own gest page tables, whereas hardware + * uses shadow page tables for real memery access. Therefore, wee need + * to synchronize guest and shadow page tables when flushing tlb. + * Syncronization is provided by host. + * + * Copyright 2020 Andrey A. Alekhin (alekhin_a@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_TLB_MODE +#undef DebugTLB +#define DEBUG_TLB_MODE 0 /* TLB FLUSHES */ +#define DebugTLB(...) DebugPrint(DEBUG_TLB_MODE, ##__VA_ARGS__) + +static void pv_flush_tlb_range(e2k_addr_t start_gva, e2k_addr_t end_gva) +{ + unsigned long flags; + bool fl_c_needed = cpu_has(CPU_HWBUG_TLB_FLUSH_L1D); + + if (IS_ENABLED(CONFIG_KVM_PARAVIRT_TLB_FLUSH)) { + flush_TLB_page_begin(); + raw_all_irq_save(flags); + HYPERVISOR_flush_tlb_range(start_gva, end_gva); + if (fl_c_needed) + __E2K_WAIT(_fl_c); + raw_all_irq_restore(flags); + flush_TLB_page_end(); + } +} + +void kvm_pv_flush_tlb_all(void) +{ + DebugTLB("Flush all mm address space CPU #%d\n", + raw_smp_processor_id()); + + preempt_disable(); + + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + pv_flush_tlb_range(0, E2K_VA_SIZE); + + preempt_enable(); +} + +void kvm_pv_flush_tlb_mm(struct mm_struct *mm) +{ + DebugTLB("Flush all mm address space context 0x%lx CPU #%d\n", + CTX_HARDWARE(mm->context.cpumsk[raw_smp_processor_id()]), + raw_smp_processor_id()); + + preempt_disable(); + + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + pv_flush_tlb_range(0, E2K_VA_SIZE); + + preempt_enable(); +} + +void kvm_pv_flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr) +{ + DebugTLB("Flush address 0x%lx context 0x%lx CPU #%d\n", + PAGE_ALIGN_UP(addr), + CTX_HARDWARE(mm->context.cpumsk[raw_smp_processor_id()]), + raw_smp_processor_id()); + + preempt_disable(); + + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); + pv_flush_tlb_range(addr, addr); + + preempt_enable(); +} + +void kvm_pv_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + const long pages_num = (PAGE_ALIGN_DOWN(end) - PAGE_ALIGN_UP(start)) + / PAGE_SIZE; + KVM_BUG_ON(start > end); + DebugTLB("Flush address range start 0x%lx end 0x%lx context 0x%lx " + "CPU #%d\n", PAGE_ALIGN_UP(start), PAGE_ALIGN_DOWN(end), + CTX_HARDWARE(mm->context.cpumsk[raw_smp_processor_id()]), + raw_smp_processor_id()); + + preempt_disable(); + + count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, pages_num); + pv_flush_tlb_range(start, end); + + preempt_enable(); +} + +void kvm_pv_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end) +{ + const long pages_num = (PAGE_ALIGN_DOWN(end) - PAGE_ALIGN_UP(start)) + / PAGE_SIZE; + KVM_BUG_ON(start > end); + DebugTLB("Flush kernel address range start 0x%lx end 0x%lx CPU #%d\n", + PAGE_ALIGN_UP(start), PAGE_ALIGN_DOWN(end), + raw_smp_processor_id()); + + preempt_disable(); + + count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, pages_num); + pv_flush_tlb_range(start, end); + + preempt_enable(); +} + +void kvm_pv_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + KVM_BUG_ON(start > end); + + /* + * Do not need real flush here for paravirt kernel. + * Page tables are synchronized automatically and full tlb flush + * occures when guest tries to edit pmd level of guest pts. + */ +} + +void kvm_pv_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, + e2k_addr_t end) +{ + const long pages_num = (PAGE_ALIGN_DOWN(end) - PAGE_ALIGN_UP(start)) + / PAGE_SIZE; + KVM_BUG_ON(start > end); + DebugTLB("Flush kernel address range start 0x%lx end 0x%lx CPU #%d\n", + PAGE_ALIGN_UP(start), PAGE_ALIGN_DOWN(end), + raw_smp_processor_id()); + + preempt_disable(); + + count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, pages_num); + pv_flush_tlb_range(start, end); + + preempt_enable(); +} diff --git a/arch/e2k/kvm/guest/traps.c b/arch/e2k/kvm/guest/traps.c new file mode 100644 index 000000000000..03c6a085fba2 --- /dev/null +++ b/arch/e2k/kvm/guest/traps.c @@ -0,0 +1,430 @@ +/* + * Copyright (C) 2015 MCST + * Designed by Salavat S. Guiliazov (atic@mcst.ru) + * + * KVM guest traps handling + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_USE_AAU +#include +#endif + +#include + +#include "process.h" +#include "irq.h" +#include "io.h" +#include "pic.h" + +#undef DEBUG_GUEST_TRAPS +#undef DebugGT +#define DEBUG_GUEST_TRAPS 0 /* guest traps trace */ +#define DebugGT(fmt, args...) \ +({ \ + if (DEBUG_GUEST_TRAPS) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_INTERRUPTS +#undef DebugINT +#define DEBUG_GUEST_INTERRUPTS 0 /* guest interrupts trace */ +#define DebugINT(fmt, args...) \ +({ \ + if (DEBUG_GUEST_INTERRUPTS) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_DIRECT_INTR +#undef DebugDINT +#define DEBUG_GUEST_DIRECT_INTR 0 /* guest interrupts trace */ +#define DebugDINT(fmt, args...) \ +({ \ + if (DEBUG_GUEST_DIRECT_INTR) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_HS_MODE +#undef DebugGHS +#define DEBUG_GUEST_HS_MODE 0 /* Hard Stack expantions */ +#define DebugGHS(fmt, args...) \ +({ \ + if (DEBUG_GUEST_HS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_MMIO_MODE +#undef DebugMMIO +#define DEBUG_KVM_MMIO_MODE 0 /* kernel KVM MMIO debugging */ +#define DebugMMIO(fmt, args...) \ +({ \ + if (DEBUG_KVM_MMIO_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + + +/* + * The function handles page fault trap on address inside guest kernel: + * IO remapping area; + * VGA VRAM area + * The function returnes 0, if it is not KVM MMIO request of guest kernel + * and not zero value, if the fault was handled (probably with error) + */ +unsigned long kvm_do_mmio_page_fault(struct pt_regs *regs, + trap_cellar_t *tcellar) +{ + e2k_addr_t addr; + tc_cond_t condition; + tc_opcode_t opcode; + int fmt; + u64 data; + int size; + bool is_write; + + addr = tcellar->address; + DebugMMIO("started for address 0x%lx\n", addr); + + if (!kernel_mode(regs)) { + /* trap on user and cannot be to IO */ + return 0; /* not handled */ + } + if (likely(addr >= GUEST_VMALLOC_START && addr < GUEST_VMALLOC_END)) { + struct vm_struct *vm; + + vm = find_io_vm_area((const void *)addr); + if (unlikely(vm == NULL)) { + DebugMMIO("could not find MMIO address 0x%lx into " + "IO remapping areas\n", + addr); + return 0; /* not handled */ + } + if (unlikely(!(vm->flags & VM_IOREMAP))) { + DebugMMIO("MMIO address 0x%lx is not from IO " + "remapping area\n", + addr); + return 0; /* not handled */ + } + DebugMMIO("address 0x%lx is from MMIO remapping space\n", + addr); + } else if (likely(KVM_IS_VGA_VRAM_VIRT_ADDR(addr))) { + DebugMMIO("address 0x%lx is from VGA VMRAM mapping space\n", + addr); + } else { + /* This address is not competence of the function */ + DebugMMIO("address 0x%lx is not from MMIO or VGA VRAM\n", + addr); + return 0; /* not handled */ + } + condition = tcellar->condition; + WARN_ON(AS(condition).spec); /* speculative access to IO memory */ + is_write = AS(condition).store; + AW(opcode) = AS(tcellar->condition).opcode; + fmt = AS(opcode).fmt; + WARN_ON((unsigned int)fmt > 5 || fmt == 0); + size = 1 << (fmt - 1); + if (size > sizeof(u64)) + size = sizeof(u64); + data = tcellar->data; + + DebugMMIO("will pass to QEMU KVM MMIO or VGA VRAM request: " + "%s address 0x%lx, data 0x%llx, size %d, mas 0x%x\n", + (is_write) ? "write to" : "read from", + addr, data, size, AS(condition).mas); + + data = kvm_handle_guest_mmio((void __iomem *)addr, + data, size, is_write); + if (is_write) { + DebugMMIO("writed to address 0x%lx data 0x%llx, size %d\n", + addr, data, size); + return true; /* MMIO request handled */ + } + DebugMMIO("read from address 0x%lx data 0x%llx, size %d\n", + addr, data, size); + /* FIXME: here should be recovery of interrupted MMU load operation */ + WARN_ON(true); + + return true; /* MMIO request handled */ +} + +/* + * The function handles kvm guest traps on hardware procedure stack overflow + * or underflow. If stack overflow occured then the procedure stack will be + * expanded. In the case of stack underflow it will be constricted + */ + +static int kvm_proc_stack_bounds(struct pt_regs *regs) +{ + WARN_ONCE(1, "implement me"); + return -ENOSYS; +} +static int kvm_chain_stack_bounds(struct pt_regs *regs) +{ + WARN_ONCE(1, "implement me"); + return -ENOSYS; +} + +int kvm_do_hw_stack_bounds(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + int ret = 0; + + if (proc_bounds) + ret |= kvm_proc_stack_bounds(regs); + if (chain_bounds) + ret |= kvm_chain_stack_bounds(regs); + return ret; +} + +int kvm_host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + int ret; + + ret = HYPERVISOR_apply_psp_bounds(base, size, start, end, delta); + if (ret != 0) { + pr_err("%s(): could not apply updated procedure stack " + "boundaries, error %d\n", + __func__, ret); + } + return ret; +} + +int kvm_host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + int ret; + + ret = HYPERVISOR_apply_pcsp_bounds(base, size, start, end, delta); + if (ret != 0) { + pr_err("%s(): could not apply updated chain stack " + "boundaries, error %d\n", + __func__, ret); + } + return ret; +} + +#ifdef CONFIG_VIRQ_VCPU_INJECTION +/* + * Real VIRQs handlers are guest kernel threads bind to the VCPU + * So here it need only set reschedule flag to switch to VIRQs handler + * thread before return from common traps handler. + * The function should return number of handled interrupts. + * FIXME: it need check on VIRQs waiting for handling to do not make + * unnecessary processes switch + */ +static inline int kvm_virq_vcpu_intr_handler(int irq, struct pt_regs *regs) +{ + pr_err("%s(): IRQ #%d %s should be already handled\n", + __func__, irq, kvm_get_virq_name(virq_from_irq(irq))); + return false; +} +static inline int kvm_virq_vcpu_intr_thread(int irq, struct pt_regs *regs) +{ + set_tsk_need_resched(current); + return true; +} +#else /* ! CONFIG_VIRQ_VCPU_INJECTION */ +static inline int kvm_virq_vcpu_intr_handler(int irq, struct pt_regs *regs) +{ + /* VIRQ VCPU and VIRQs handler can not be used */ + return 0; +} +static inline int kvm_virq_vcpu_intr_thread(int irq, struct pt_regs *regs) +{ + /* VIRQ VCPU and VIRQs handler thread can not be used */ + return 0; +} +#endif /* CONFIG_VIRQ_VCPU_INJECTION */ + +#ifdef CONFIG_DIRECT_VIRQ_INJECTION +static inline int kvm_direct_virq_intr_handler(int irq, struct pt_regs *regs) +{ + kvm_virq_info_t *virq_info; + irq_handler_t handler; + void *dev; + struct pt_regs *old_regs; + int virq; + int virqs_num; + int handled = 0; + int ret; + + virq_info = virq_info_from_irq(irq); + virq = virq_from_irq(irq); + virqs_num = atomic_read(virq_info->count); + DebugDINT("started for irq #%d %s, pending interrupts %d\n", + irq, kvm_get_virq_name(virq), virqs_num); + if (virqs_num <= 0) + return 0; + + handler = virq_info->handler; + BUG_ON(handler == NULL); + dev = virq_info->dev_id; + old_regs = set_irq_regs(regs); + + do { + ret = handler(virq, dev); + if (ret == IRQ_NONE) { + /* IRQ could not be handled: */ + /* other IRQ is being handled and EOI is not yet */ + /* sent, because of new interrupt recevied while */ + /* handle_IRQ_event() enable IRQs */ + /* In this case should be one more handler below */ + /* on stack and it handle all pending IRQs */ + goto busy; + } else if (ret != IRQ_HANDLED) { + pr_err("%s(): failed, returns error %d\n", + __func__, ret); + } else { + DebugDINT("irq #%d %s handled\n", + irq, kvm_get_virq_name(virq)); + handled += 1; + } + virqs_num = atomic_dec_return(virq_info->count); + } while (virqs_num > 0); + +busy: + set_irq_regs(old_regs); + + return handled; +} +#else /* ! CONFIG_DIRECT_VIRQ_INJECTION */ +static inline int kvm_direct_virq_intr_handler(int irq, struct pt_regs *regs) +{ + /* direct VIRQs injection mode turn off, so cannot use the handler */ + return 0; +} +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + +irqreturn_t kvm_do_interrupt(struct pt_regs *regs) +{ + int irq; + int handled_num = 0; + int cpu = smp_processor_id(); + + for (irq = 0; irq < kvm_nr_irqs; irq++) { + kvm_irq_info_t *info; + kvm_virq_info_t *virq_info; + int handled; + int virqs_num; + + info = info_for_irq(irq); + if (unlikely(info->type == IRQT_UNBOUND)) { + continue; + } else if (!is_irq_active(irq)) { + continue; + } else if (cpu_from_irq(irq) != cpu) { + continue; + } else if (unlikely(info->type != IRQT_VIRQ)) { + pr_err("%s(): invalid type %d of virtual IRQ #%d " + "cannot be handled\n", + __func__, info->type, irq); + continue; + } + virq_info = virq_info_from_irq(irq); + virqs_num = atomic_read(virq_info->count); + if (unlikely(virqs_num <= 0)) { + DebugINT("none pending IRQs #%d %s\n", + irq, kvm_get_virq_name(virq_from_irq(irq))); + continue; + } + switch (virq_info->mode) { + case BY_DIRECT_INJ_VIRQ_MODE: + handled = kvm_direct_virq_intr_handler(irq, regs); + break; + default: + pr_err("%s(): invalid handling mode of IRQ #%d %s\n", + __func__, irq, + kvm_get_virq_name(virq_from_irq(irq))); + handled = 0; + } + if (likely(handled)) { + DebugINT("handled %d interrupts of IRQ #%d %s from " + "pending %d interrupts\n", + handled, irq, + kvm_get_virq_name(virq_from_irq(irq)), + virqs_num); + handled_num += handled; + continue; + } + pr_err("%s(): could not handle none of pending %d interrupts " + "for IRQ #%d %s\n", + __func__, virqs_num, irq, + kvm_get_virq_name(virq_from_irq(irq))); + } + DebugINT("total handled interrupts number is %d\n", handled_num); + HYPERVISOR_virqs_handled(); + if (handled_num) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +irqreturn_t guest_do_interrupt(struct pt_regs *regs) +{ + return guest_do_interrupt_pic(regs); +} + +/* + * pseudo IRQ to emulate SysRq on guest kernel + */ +void kvm_sysrq_showstate_interrupt(struct pt_regs *regs) +{ + ack_pic_irq(); + /* dump stacks uses NMI to interrupt other CPUs and dump current */ + /* process state running on the CPU */ + raw_all_irq_enable(); + + if (kvm_get_vcpu_state()->do_dump_state) + show_state_filter(0); + if (kvm_get_vcpu_state()->do_dump_stack) + dump_stack(); + HYPERVISOR_vcpu_show_state_completion(); +} + +void __init_recv kvm_init_system_handlers_table(void) +{ + kvm_init_system_handlers_table_pic(); +} + +void __init_recv kvm_init_system_handlers_table_apic(void) +{ + /* VIRQ vector to emulate SysRq on guest kernel */ + setup_PIC_vector_handler(SYSRQ_SHOWSTATE_APIC_VECTOR, + kvm_sysrq_showstate_interrupt, 1, + "kvm_sysrq_showstate_interrupt"); + setup_PIC_vector_handler(KVM_NMI_APIC_VECTOR, + (void (*)(struct pt_regs *))nmi_call_function_interrupt, + 1, + "nmi_call_function_interrupt"); +} + +void __init_recv kvm_init_system_handlers_table_epic(void) +{ + /* VIRQ vector to emulate SysRq on guest kernel */ + setup_PIC_vector_handler(SYSRQ_SHOWSTATE_EPIC_VECTOR, + kvm_sysrq_showstate_interrupt, 1, + "kvm_sysrq_showstate_interrupt"); + setup_PIC_vector_handler(KVM_NMI_EPIC_VECTOR, + (void (*)(struct pt_regs *))nmi_call_function_interrupt, + 1, + "nmi_call_function_interrupt"); +} diff --git a/arch/e2k/kvm/guest/traps.h b/arch/e2k/kvm/guest/traps.h new file mode 100644 index 000000000000..bb65b9cd8426 --- /dev/null +++ b/arch/e2k/kvm/guest/traps.h @@ -0,0 +1,42 @@ +/* + * + * Copyright (C) 2012 MCST + * + * Defenition of kvm guest kernel traps handling routines. + */ + +#ifndef _E2K_KVM_GUEST_TRAPS_H +#define _E2K_KVM_GUEST_TRAPS_H + +#include +#include +#include +#include +#include + +#include "cpu.h" + +#define KVM_SAVE_GREGS_AND_SET(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + \ + /* user global registers were saved by host kernel and will */ \ + /* be restored by host */ \ + ONLY_SET_KERNEL_GREGS(__ti); \ +}) + +#define kvm_from_user_IP(cr0_hi) \ + is_from_user_IP(cr0_hi, GUEST_TASK_SIZE) +#define kvm_from_kernel_IP(cr0_hi) \ + is_from_kernel_IP(cr0_hi, GUEST_TASK_SIZE) +#define kvm_user_mode(regs) kvm_from_user_IP((regs)->crs.cr0_hi) +#define kvm_call_from_user_mode() \ +({ \ + e2k_cr0_hi_t cr0_hi; \ + bool ret; \ + cr0_hi = NATIVE_READ_CR0_HI_REG(); \ + ret = kvm_from_user_IP(cr0_hi); \ + ret; \ +}) + +#endif /* _E2K_KVM_GUEST_TRAPS_H */ diff --git a/arch/e2k/kvm/guest/ttable.c b/arch/e2k/kvm/guest/ttable.c new file mode 100644 index 000000000000..4499c7f16747 --- /dev/null +++ b/arch/e2k/kvm/guest/ttable.c @@ -0,0 +1,933 @@ +/* + * + * Copyright (C) 2001 MCST + */ + +/* + * Simple E2K KVM guest trap table. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define CREATE_TRACE_POINTS +#include + +#include "cpu.h" +#include "traps.h" +#include "fast_syscalls.h" +#include "../../kernel/ttable-inline.h" + +/**************************** DEBUG DEFINES *****************************/ + + +#undef DEBUG_GUEST_TRAPS +#undef DebugGT +#define DEBUG_GUEST_TRAPS 0 /* guest traps trace */ +#define DebugGT(fmt, args...) \ +({ \ + if (DEBUG_GUEST_TRAPS) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +bool debug_ustacks = false; + +#ifdef VCPU_REGS_DEBUG +bool vcpu_regs_trace_on = false; +EXPORT_SYMBOL(vcpu_regs_trace_on); +#endif /* VCPU_REGS_DEBUG */ + +/*********************************************************************/ +static inline void switch_to_guest_kernel_stacks(void) +{ + if (KVM_READ_SBR_REG_VALUE() < GUEST_TASK_SIZE) { + thread_info_t *thread_info; + hw_stack_t *hw_stacks; + e2k_usd_hi_t usd_hi; + e2k_usd_lo_t usd_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_hi_t pcsp_hi; + + /* switch to kernel data stack */ + thread_info = current_thread_info(); + usd_lo = thread_info->k_usd_lo; + usd_hi = thread_info->k_usd_hi; + KVM_WRITE_USD_REG(usd_hi, usd_lo); + KVM_WRITE_SBR_REG_VALUE( + ((u64)current->stack + KERNEL_C_STACK_SIZE)); + /* increment hardware stacks sizes on kernel resident part */ + hw_stacks = &thread_info->u_hw_stack; + psp_hi = KVM_READ_PSP_HI_REG(); + pcsp_hi = KVM_READ_PCSP_HI_REG(); +#if 0 /* now guest kernel does not increment hardware stacks on guest */ + /* kernel reserve part */ + psp_hi.PSP_hi_size += kvm_get_hw_ps_guest_limit(hw_stacks); + pcsp_hi.PCSP_hi_size += kvm_get_hw_pcs_guest_limit(hw_stacks); + KVM_FLUSHCPU; + KVM_WRITE_PSP_HI_REG(psp_hi); + KVM_WRITE_PCSP_HI_REG(pcsp_hi); + kvm_set_hw_ps_guest_reserved(hw_stacks, + kvm_get_hw_ps_guest_limit(hw_stacks)); + kvm_set_hw_pcs_guest_reserved(hw_stacks, + kvm_get_hw_pcs_guest_limit(hw_stacks)); +#endif /* 0 */ /* now guest kernel does not increment hardware stacks */ + /* on guest kernel reserve part */ + } +} +void kvm_correct_trap_psp_pcsp(struct pt_regs *regs, + thread_info_t *thread_info) +{ + pr_err("%s(): is not implemented\n", __func__); +} +void kvm_correct_scall_psp_pcsp(struct pt_regs *regs, + thread_info_t *thread_info) +{ + pr_err("%s(): is not implemented\n", __func__); +} + +void kvm_correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + int ret; + + native_correct_trap_return_ip(regs, return_ip); + ret = HYPERVISOR_correct_trap_return_ip(return_ip); + if (ret) { + pr_err("%s(): hypervisor could not coorect IP to return, " + "error %d\n", + __func__, ret); + do_exit(SIGKILL); + } +} + +static void kvm_guest_save_sbbp(u64 *sbbp) +{ + int i; + + for (i = 0; i < SBBP_ENTRIES_NUM; i++) { + sbbp[i] = KVM_READ_SBBP_REG_VALUE(i); + } +} + +/* + * The function return boolean value: there is interrupt (MI or NMI) as one + * of trap to handle + */ +static unsigned long kvm_guest_save_tirs(trap_pt_regs_t *trap) +{ + int tir_no, TIRs_num; + unsigned long TIR_hi, TIR_lo; + unsigned long all_interrupts = 0; + + DebugGT("started\n"); + + trap->nr_TIRs = KVM_READ_TIRs_num(); + for (tir_no = trap->nr_TIRs; tir_no >= 0; tir_no--) { + TIR_hi = KVM_READ_TIR_HI_REG_VALUE(); + TIR_lo = KVM_READ_TIR_LO_REG_VALUE(); + trap->TIRs[tir_no].TIR_hi.TIR_hi_reg = TIR_hi; + trap->TIRs[tir_no].TIR_lo.TIR_lo_reg = TIR_lo; + all_interrupts |= TIR_hi; + } + KVM_WRITE_TIR_HI_REG_VALUE(0); + wmb(); /* to wait clearing of TIRs_num counter at nenory */ + TIRs_num = KVM_READ_TIRs_num(); + if (TIRs_num >= 0) { + pr_err("%s(): TIRs registers is not cleared, probably " + "the host had in time to introduce recursive traps\n", + __func__); + } + DebugGT("was saved %d TIRs\n", trap->nr_TIRs - tir_no); + return all_interrupts; +} + +static void kvm_guest_save_trap_cellar(pt_regs_t *regs) +{ + trap_pt_regs_t *trap = regs->trap; + int tc_count = KVM_READ_MMU_TRAP_COUNT(); + int tc_entries = tc_count / 3; + int tc_no; + + DebugGT("started, for TC count %d\n", tc_count); + BUG_ON(tc_count % 3 != 0); + BUG_ON(tc_entries > MAX_TC_SIZE); + KVM_SAVE_TRAP_CELLAR(regs, trap); + trap->tc_count = tc_count; + trap->curr_cnt = -1; + trap->ignore_user_tc = 0; + trap->tc_called = 0; + trap->from_sigreturn = 0; + CLEAR_CLW_REQUEST_COUNT(regs); + KVM_RESET_MMU_TRAP_COUNT(); + DebugGT("was saved %d TC entries\n", tc_no); +} + +static void kvm_guest_save_stack_regs(pt_regs_t *regs) +{ + DebugGT("started\n"); + + /* stacks registers */ + regs->wd.WD_reg = KVM_READ_WD_REG_VALUE(); + regs->stacks.usd_hi.USD_hi_half = KVM_READ_USD_HI_REG_VALUE(); + regs->stacks.usd_lo.USD_lo_half = KVM_READ_USD_LO_REG_VALUE(); + regs->stacks.top = KVM_READ_SBR_REG_VALUE(); + DebugGT("updated USD: base 0x%llx size 0x%x, top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + + regs->crs.cr0_hi.CR0_hi_half = KVM_READ_CR0_HI_REG_VALUE(); + regs->crs.cr0_lo.CR0_lo_half = KVM_READ_CR0_LO_REG_VALUE(); + regs->crs.cr1_hi.CR1_hi_half = KVM_READ_CR1_HI_REG_VALUE(); + regs->crs.cr1_lo.CR1_lo_half = KVM_READ_CR1_LO_REG_VALUE(); + + regs->stacks.psp_hi.PSP_hi_half = KVM_READ_PSP_HI_REG_VALUE(); + regs->stacks.psp_lo.PSP_lo_half = KVM_READ_PSP_LO_REG_VALUE(); + regs->stacks.pshtp.PSHTP_reg = KVM_READ_PSHTP_REG_VALUE(); + DebugGT("saved PSP: base 0x%llx size 0x%x, ind 0x%x, pshtp 0x%llx\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind, + GET_PSHTP_MEM_INDEX(regs->stacks.pshtp)); + regs->stacks.pcsp_hi.PCSP_hi_half = KVM_READ_PCSP_HI_REG_VALUE(); + regs->stacks.pcsp_lo.PCSP_lo_half = KVM_READ_PCSP_LO_REG_VALUE(); + regs->stacks.pcshtp = KVM_READ_PCSHTP_REG_SVALUE(); + DebugGT("saved PCSP: base 0x%llx size 0x%x, ind 0x%x, pcshtp 0x%llx\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind, + PCSHTP_SIGN_EXTEND(regs->stacks.pcshtp)); + /* Control transfer registers */ + regs->ctpr1.CTPR_reg = KVM_READ_CTPR1_REG_VALUE(); + regs->ctpr2.CTPR_reg = KVM_READ_CTPR2_REG_VALUE(); + regs->ctpr3.CTPR_reg = KVM_READ_CTPR3_REG_VALUE(); + /* Cycles control registers */ + regs->lsr = KVM_READ_LSR_REG_VALUE(); + regs->ilcr = KVM_READ_ILCR_REG_VALUE(); +} +#ifdef CONFIG_USE_AAU +static void kvm_save_guest_trap_aau_regs(trap_pt_regs_t *trap, e2k_aau_t *aau) +{ + e2k_aasr_t aasr; + bool aau_fault = false; + int i; + + aasr = kvm_read_aasr_reg(); + KVM_SAVE_AAU_MASK_REGS(aau, aasr); + if (AS(aasr).iab) + KVM_SAVE_AADS(aau); + for (i = 0; i <= trap->nr_TIRs; i++) { + if (GET_AA_TIRS(trap->TIRs[i].TIR_hi.TIR_hi_reg)) { + aau_fault = true; + break; + } + } + + if (AS(aasr).iab) { + /* get descriptors & auxiliary registers */ + kvm_get_array_descriptors(aau); + aau->aafstr = kvm_read_aafstr_reg_value(); + } + + if (AS(aasr).stb) { + /* get synchronous part of APB */ + kvm_get_synchronous_part(aau); + } +} +#else /* ! CONFIG_USE_AAU */ +static inline void +kvm_save_guest_trap_aau_regs(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ +} +#endif /* ! CONFIG_USE_AAU */ + +/* + * Real restoring of stack and other CPU registers is made by host + * Here it need modify VCPU stack registers which were updated by guest + * into pt_regs structure. + * VCPU registers emulated as memory copy and write to register updated + * memory copy of register and set flag 'updated'. This flag is visible + * to host, so host can update real CPU register state probably through + * own pt_regs structure + * FIXME: probably as optimization it need add 'pt_regs updates flag' + * to mark registers or other values which updated into structure by guest + * and should be updated or take into account by host. + */ +static void kvm_guest_restore_stack_regs(pt_regs_t *regs) +{ + u64 updated = 0; + + DebugGT("started\n"); + + KVM_WRITE_USD_HI_REG_VALUE(regs->stacks.usd_hi.USD_hi_half); + KVM_WRITE_USD_LO_REG_VALUE(regs->stacks.usd_lo.USD_lo_half); + KVM_WRITE_SBR_REG_VALUE(regs->stacks.top); + UPDATE_CPU_REGS_FLAGS(updated, USD_UPDATED_CPU_REGS); + DebugGT("updated USD: base 0x%llx size 0x%x, top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + + /* hardware stacks cannot be updated or were already updated */ + DebugGT("regs PSP: base 0x%llx size 0x%x, ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); + DebugGT("regs PCSP: base 0x%llx size 0x%x, ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); + + /* chain registers can be updated by guest: + * - user data stack expansion (but only cr1_hi.ussz) + * - fixing page fault (cr0_hi.ip) */ + KVM_WRITE_CR0_HI_REG_VALUE(regs->crs.cr0_hi.CR0_hi_half); + KVM_WRITE_CR0_LO_REG_VALUE(regs->crs.cr0_lo.CR0_lo_half); + KVM_WRITE_CR1_HI_REG_VALUE(regs->crs.cr1_hi.CR1_hi_half); + KVM_WRITE_CR1_LO_REG_VALUE(regs->crs.cr1_lo.CR1_lo_half); + UPDATE_CPU_REGS_FLAGS(updated, CRS_UPDATED_CPU_REGS); + DebugGT("updated CR: CR1_lo.wbs 0x%x, cr1_hi.ussz 0x%x\n", + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); + + /* Control transfer registers will be restored by host */ + + /* Cycles control registers will be restored by host */ + + /* put updates flags to be visible by host */ + PUT_UPDATED_CPU_REGS_FLAGS(updated); +} + +/* + * Restore virtual copy of stack & CRs registers state based on + * pt_regs structure in thr hope that this structure will always + * updated if the registers should be changed in the system call + * FIXME: it should be supported only for certain cases (syscalls) + */ +#ifdef RESTORE_SYSCALL_REGS +static void kvm_restore_syscall_stack_regs(pt_regs_t *regs) +{ + u64 updated = 0; + + DebugGT("started\n"); + + /* user data stacks registers */ + { + unsigned long sbr = KVM_READ_SBR_REG_VALUE(); + unsigned long usd_lo = KVM_READ_USD_LO_REG_VALUE(); + unsigned long usd_hi = KVM_READ_USD_HI_REG_VALUE(); + + if (usd_lo != regs->stacks.usd_lo.USD_lo_half || + usd_hi != regs->stacks.usd_hi.USD_hi_half || + sbr != regs->stacks.top) { + KVM_WRITE_USD_HI_REG_VALUE(regs->stacks.usd_hi.USD_hi_half); + KVM_WRITE_USD_LO_REG_VALUE(regs->stacks.usd_lo.USD_lo_half); + KVM_WRITE_SBR_REG_VALUE(regs->stacks.top); + UPDATE_CPU_REGS_FLAGS(updated, USD_UPDATED_CPU_REGS); + DebugGT("updated USD: base 0x%llx size 0x%x, top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + } + } + + { + unsigned long psp_lo = KVM_READ_PSP_LO_REG_VALUE(); + e2k_psp_hi_t psp_hi = KVM_READ_PSP_HI_REG(); + e2k_pshtp_t pshtp = regs->stacks.pshtp; + unsigned long pcsp_lo = KVM_READ_PCSP_LO_REG_VALUE(); + e2k_pcsp_hi_t pcsp_hi = KVM_READ_PCSP_HI_REG(); + e2k_pcshtp_t pcshtp = regs->stacks.pcshtp; + + /* hardware stacks cannot be updated or were already updated */ + if (psp_lo != regs->stacks.psp_lo.PSP_lo_half || + psp_hi.PSP_hi_ind != + regs->stacks.psp_hi.PSP_hi_ind - + GET_PSHTP_MEM_INDEX(pshtp) || + psp_hi.PSP_hi_size != + regs->stacks.psp_hi.PSP_hi_size) { + pr_err("%s(): proc stack regs updated:\n" + " PSP: base 0x%llx size 0x%x, ind 0x%x " + "PSHTP 0x%llx\n", + __func__, + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind, + GET_PSHTP_MEM_INDEX(pshtp)); + BUG_ON(true); + } + if (pcsp_lo != regs->stacks.pcsp_lo.PCSP_lo_half || + pcsp_hi.PCSP_hi_ind != + regs->stacks.pcsp_hi.PCSP_hi_ind - + PCSHTP_SIGN_EXTEND(pcshtp) || + pcsp_hi.PCSP_hi_size != + regs->stacks.pcsp_hi.PCSP_hi_size) { + pr_err("%s(): chain stack regs updated:\n" + " PCSP: base 0x%llx size 0x%x, ind 0x%x " + "PCSHTP 0x%llx\n", + __func__, + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind, + PCSHTP_SIGN_EXTEND(pcshtp)); + BUG_ON(true); + } + } + + { + unsigned long cr0_lo = KVM_READ_CR0_LO_REG_VALUE(); + unsigned long cr0_hi = KVM_READ_CR0_HI_REG_VALUE(); + unsigned long cr1_lo = KVM_READ_CR1_LO_REG_VALUE(); + unsigned long cr1_hi = KVM_READ_CR1_HI_REG_VALUE(); + + /* chain registers can be updated by guest system calls: + * - long jump */ + if (cr0_lo != regs->crs.cr0_lo.CR0_lo_half || + cr0_hi != regs->crs.cr0_hi.CR0_hi_half || + cr1_lo != regs->crs.cr1_lo.CR1_lo_half || + cr1_hi != regs->crs.cr1_hi.CR1_hi_half) { + KVM_WRITE_CR0_HI_REG_VALUE(regs->crs.cr0_hi.CR0_hi_half); + KVM_WRITE_CR0_LO_REG_VALUE(regs->crs.cr0_lo.CR0_lo_half); + KVM_WRITE_CR1_HI_REG_VALUE(regs->crs.cr1_hi.CR1_hi_half); + KVM_WRITE_CR1_LO_REG_VALUE(regs->crs.cr1_lo.CR1_lo_half); + UPDATE_CPU_REGS_FLAGS(updated, CRS_UPDATED_CPU_REGS); + DebugGT("updated CR: CR1_lo.wbs 0x%x, cr1_hi.ussz 0x%x\n", + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); + } + } + + /* Control transfer registers will be restored by host */ + + /* Cycles control registers will be restored by host */ + + /* put updates flags to be visible by host */ + PUT_UPDATED_CPU_REGS_FLAGS(updated); +} +#endif /* RESTORE_SYSCALL_REGS */ + +/* + * Trap occured on user or kernel function but on user's stacks + * So, it needs to switch to kernel stacks + * WARNING: host should emulate right state of guest PSR: + * switch interrupts mask control to PSR; + * disable all interrupts mask; + * disable 'sge' mask to prevent stacks bounds traps while guest is + * saving trap context and only guest enable the mask and traps + * after saving completion. + */ +int kvm_trap_handler(void) +{ + pt_regs_t pt_regs; + trap_pt_regs_t trap; + u64 sbbp[SBBP_ENTRIES_NUM]; +#ifdef CONFIG_USE_AAU + e2k_aau_t aau_context; +#endif /* CONFIG_USE_AAU */ + pt_regs_t *regs = &pt_regs; + thread_info_t *thread_info = KVM_READ_CURRENT_REG(); + unsigned long exceptions; + unsigned long irq_flags; + e2k_psr_t user_psr; + bool irqs_under_upsr; + bool in_user_mode; + bool has_irqs = false; + int save_sbbp; + struct task_struct *task = thread_info_task(thread_info); + + DebugGT("started\n"); + +#ifdef CONFIG_USE_AAU + /* AAU context was saved and will be restored by host */ +#endif /* CONFIG_USE_AAU */ + + /* + * All actual pt_regs structures of the process are queued. + * The head of this queue is thread_info->pt_regs pointer, + * it points to the last (current) pt_regs structure. + * The current pt_regs structure points to the previous etc + * Queue is empty before first trap or system call on the + * any process and : thread_info->pt_regs == NULL + */ + trap.flags = 0; + regs->kernel_entry = 0; + regs->next = thread_info->pt_regs; + regs->trap = &trap; + regs->aau_context = &aau_context; + regs->stack_regs_saved = false; + thread_info->pt_regs = regs; + + KVM_SWITCH_TO_KERNEL_UPSR(user_psr, thread_info->upsr, irqs_under_upsr, + false, /* enable IRQs */ + false); /* disable nmi */ + raw_local_irq_save(irq_flags); + + /* + * Setup guest kernel global registers, pointer to the VCPU state + * has been restored by host and other gregs can be cleared, + * so restore anyway its state + */ + KVM_SAVE_GREGS_AND_SET(thread_info); + + /* + * See comments in ttable_entry4() for sc_restart + */ + AW(regs->flags) = 0; + init_guest_traps_handling(regs, true /* user mode trap */); + save_sbbp = current->ptrace || debug_trap; + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + /* FIXME: secondary space support does not implemented for guest */ + /* trap.flags |= TRAP_RP_FLAG; */ +#endif + + + /* should be before setting user/kernel mode trap on */ + kvm_guest_save_stack_regs(regs); + + /* + * See common comments at arch/e2k/kernel/ttable.c user_trap_handler() + * Additional note: this trap handler called by host to handle injected + * traps on guest user or on guest kernel + */ + in_user_mode = kvm_trap_user_mode(regs); + DebugGT("trap on %s\n", (in_user_mode) ? "user" : "kernel"); + + /* + * %sbbp LIFO stack is unfreezed by writing %TIR register, + * so it must be read before TIRs. + */ + if (unlikely(save_sbbp)) { + kvm_guest_save_sbbp(sbbp); + trap.sbbp = sbbp; + } else { + trap.sbbp = NULL; + } + + /* + * Now we can store all needed trap context into the + * current pt_regs structure + */ + exceptions = kvm_guest_save_tirs(&trap); + if (exceptions & (exc_interrupt_mask | exc_nm_interrupt_mask)) + has_irqs = true; + + kvm_guest_save_trap_cellar(regs); + + kvm_save_guest_trap_aau_regs(&trap, &aau_context); + + /* user context was saved, so enable traps on hardware stacks bounds */ + kvm_set_sge(); + + /* unfreeze TIRs & trap cellar on host */ + HYPERVISOR_unfreeze_guest_traps(); + + /* any checkers with BUG() can be run only after unfreezing TIRs */ + kvm_check_vcpu_id(); + KVM_CHECK_IRQ_STATE(user_psr, thread_info->upsr, irqs_under_upsr, + has_irqs, in_user_mode); + if (DEBUG_GUEST_TRAPS) + print_pt_regs(regs); + + raw_local_irq_restore(irq_flags); + + if (unlikely(trap.nr_TIRs < 0)) { + /* guest has nothing traps and handler was called only */ + /* to copy spilled gueest user part of hardware stacks */ + /* from guest kernel stacks */ + ; + if (DEBUG_USER_STACKS_MODE) + debug_ustacks = true; + } else { + parse_TIR_registers(regs, exceptions); + } + + DebugGT("all TIRs parsed\n"); + + /* parse_TIR_registers() returns with interrupts disabled */ + local_irq_enable(); + + if (in_user_mode) { + finish_user_trap_handler(regs, FROM_USER_TRAP); + } else { + local_irq_disable(); + /* + * Dequeue current pt_regs structure and previous + * regs will be now actuale + */ + thread_info->pt_regs = regs->next; + regs->next = NULL; + } + + /* FIXME: Stack registers can be updated by not all traps handlers, */ + /* so it need here traps mask & conditional statement before the */ + /* follow restore of stack & system registers */ + kvm_guest_restore_stack_regs(regs); + + KVM_RETURN_TO_USER_UPSR(thread_info->upsr, irqs_under_upsr); + + DebugGT("returns with GUEST_TRAP_HANDLED\n"); + + return GUEST_TRAP_HANDLED; +} + +/* + * WARNING: host should emulate right state of guest PSR: + * switch interrupts mask control to PSR; + * disable all interrupts mask; + * INTERNAL AGREEMENT HOST <-> GUEST: hardware does not update but host should: + * disable 'sge' mask to prevent stacks bounds traps while guest is + * saving trap context and only guest enable the mask and traps + * after saving completion. + */ +static noinline long kvm_guest_sys_call32(long sys_num_and_entry, + u32 arg1, u32 arg2, + u32 arg3, u32 arg4, + u32 arg5, u32 arg6) +{ + pt_regs_t pt_regs; + pt_regs_t *regs = &pt_regs; + system_call_func sys_func; + int sys_num = sys_num_and_entry & 0xffffffff; + int entry = sys_num_and_entry >> 32; + long rval; + + sys_func = sys_call_table_32[sys_num]; + /* set IS 32 system call bit at function label */ + sys_func = (system_call_func) ((unsigned long)sys_func | (1UL << 56)); + + /* SBR should be saved here because of syscall handlers can use */ + /* this value at user_mode() macros to determine source of calling */ + regs->stacks.top = KVM_READ_SBR_REG_VALUE(); + /* USD is restored in restore_hard_sys_calls() */ + regs->stacks.usd_hi.USD_hi_half = KVM_READ_USD_HI_REG_VALUE(); + regs->stacks.usd_lo.USD_lo_half = KVM_READ_USD_LO_REG_VALUE(); + regs->stack_regs_saved = false; + regs->sys_num = sys_num; + regs->sys_func = (u64)sys_func; + regs->kernel_entry = entry; + + rval = handle_sys_call(sys_func, + arg1, arg2, arg3, arg4, arg5, arg6, regs); + + /* set virtual copy of stack & CRs registers state based on */ + /* pt_regs structure in thr hope that this structure will always */ + /* updated if the registers should be changed in the system call */ + /* FIXME: it should be supported only for certain cases (syscalls) + kvm_restore_syscall_stack_regs(regs); + */ + + return rval; +} + +static noinline long kvm_guest_sys_call64(long sys_num_and_entry, + u64 arg1, u64 arg2, + u64 arg3, u64 arg4, + u64 arg5, u64 arg6) +{ + pt_regs_t pt_regs; + pt_regs_t *regs = &pt_regs; + system_call_func sys_func; + int sys_num = sys_num_and_entry & 0xffffffff; + int entry = sys_num_and_entry >> 32; + long rval; + + sys_func = sys_call_table[sys_num]; + /* clear IS 32 system call bit at function label */ + sys_func = (system_call_func) ((unsigned long)sys_func & ~(1UL << 56)); + + /* SBR should be saved here because of syscall handlers can use */ + /* this value at user_mode() macros to determine source of calling */ + regs->stacks.top = KVM_READ_SBR_REG_VALUE(); + /* USD is restored in restore_hard_sys_calls() */ + regs->stacks.usd_hi.USD_hi_half = KVM_READ_USD_HI_REG_VALUE(); + regs->stacks.usd_lo.USD_lo_half = KVM_READ_USD_LO_REG_VALUE(); + regs->stack_regs_saved = false; + regs->sys_num = sys_num; + regs->sys_func = (u64)sys_func; + regs->kernel_entry = entry; + + rval = handle_sys_call(sys_func, + arg1, arg2, arg3, arg4, arg5, arg6, regs); + + /* set virtual copy of stack & CRs registers state based on */ + /* pt_regs structure in thr hope that this structure will always */ + /* updated if the registers should be changed in the system call */ + /* FIXME: it should be supported only for certain cases (syscalls) + kvm_restore_syscall_stack_regs(regs); + */ + + return rval; +} + +static noinline long kvm_guest_sys_call64_or_32(long sys_num_and_entry, + u64 arg1, u64 arg2, + u64 arg3, u64 arg4, + u64 arg5, u64 arg6) +{ + pt_regs_t pt_regs; + pt_regs_t *regs = &pt_regs; + const system_call_func *sys_calls_table; + system_call_func sys_func; + int sys_num = sys_num_and_entry & 0xffffffff; + int entry = sys_num_and_entry >> 32; + bool depr_scall = (sys_num < 0) ? true : false; + bool scall_32; + long rval; + + scall_32 = (current->thread.flags & E2K_FLAG_32BIT) != 0; + if (depr_scall) + sys_num = -sys_num; + + if (scall_32) + sys_calls_table = sys_call_table_32; + else if (depr_scall) + sys_calls_table = sys_call_table_deprecated; + else + sys_calls_table = sys_call_table; + + sys_func = sys_calls_table[sys_num]; + if (scall_32) + /* set IS 32 system call bit at function label */ + sys_func = (system_call_func) ((unsigned long)sys_func | + (1UL << 56)); + else + /* clear IS 32 system call bit at function label */ + sys_func = (system_call_func) ((unsigned long)sys_func & + ~(1UL << 56)); + + /* SBR should be saved here because of syscall handlers can use */ + /* this value at user_mode() macros to determine source of calling */ + regs->stacks.top = KVM_READ_SBR_REG_VALUE(); + /* USD is restored in restore_hard_sys_calls() */ + regs->stacks.usd_hi.USD_hi_half = KVM_READ_USD_HI_REG_VALUE(); + regs->stacks.usd_lo.USD_lo_half = KVM_READ_USD_LO_REG_VALUE(); + regs->stack_regs_saved = false; + regs->sys_num = sys_num; + regs->sys_func = (u64)sys_func; + regs->kernel_entry = entry; + + rval = handle_sys_call(sys_func, + arg1, arg2, arg3, arg4, arg5, arg6, regs); + + /* set virtual copy of stack & CRs registers state based on */ + /* pt_regs structure in thr hope that this structure will always */ + /* updated if the registers should be changed in the system call */ + /* FIXME: it should be supported only for certain cases (syscalls) + kvm_restore_syscall_stack_regs(regs); + */ + + return rval; +} + +static __interrupt __always_inline int +kvm_guest_fast_sys_call32(int sys_num, u64 arg1, u64 arg2) +{ + kvm_fast_system_call_func func; + int ret; + + func = kvm_fast_sys_calls_table_32[sys_num & NR_fast_syscalls_mask]; + ret = func(arg1, arg2); + return ret; +} + +static __interrupt __always_inline int +kvm_guest_fast_sys_call64(int sys_num, u64 arg1, u64 arg2) +{ + kvm_fast_system_call_func func; + int ret; + + func = kvm_fast_sys_calls_table[sys_num & NR_fast_syscalls_mask]; + ret = func(arg1, arg2); + return ret; +} + +/* FIXME: protected fast system calls are not implemented */ + +/*********************************************************************/ + +/* + * The following function should do about the same as assembler part of host + * system calls entries. + * To make it on assembler too is not good idea, because of guest operates with + * virtual CPU hardware (for example, registers are emulated as memory) + * So it is the same as unprivileged user function + */ +static inline thread_info_t *sys_call_prolog(int sys_num) +{ + thread_info_t *ti = KVM_READ_CURRENT_REG(); + + /* save hardware stacks registers at thread info */ + /* same as on host to use the same kernel interface */ + KVM_SAVE_HW_STACKS_AT_TI(ti); + + /* save user gregs and set kernel state of all global registers */ + /* used by kernel to optimize own actions */ + KVM_SAVE_GREGS_AND_SET(ti); + kvm_check_vcpu_id(); + + /* + * Host emulates hardware behavior and disables interrupts mask in PSR, + * before calling guest system calls entries. + * PSR becomes main register to control interrupts. + * Save user UPSR state and set kernel UPSR state to enable all + * interrupts. But switch control from PSR register to UPSR + * will be some later. + */ + KVM_DO_SAVE_UPSR_REG(ti->upsr); + KVM_WRITE_UPSR_REG(E2K_KERNEL_UPSR_ENABLED); + + return ti; +} + +/* trap table entry #0 is allways traps/interrupts guest kernel entry */ + +#define __kvm_pv_vcpu_ttable_entry0__ \ + __attribute__((__section__(".kvm_pv_vcpu_ttable_entry0"))) + +void __interrupt __kvm_pv_vcpu_ttable_entry0__ +kvm_pv_vcpu_ttable_entry0(void) +{ + E2K_JUMP(kvm_trap_handler); +} + +/* trap table entry #1 is common 32 bits system calls entry */ + +#define __kvm_guest_ttable_entry1__ \ + __attribute__((__section__(".kvm_guest_ttable_entry1"))) + +long __kvm_guest_ttable_entry1__ +kvm_guest_ttable_entry1(int sys_num, + u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5, u32 arg6) +{ + sys_call_prolog(sys_num); + + /* host saved global register and will restore its */ + return (kvm_guest_sys_call32(sys_num | (1UL << 32), + arg1, arg2, arg3, arg4, arg5, arg6)); +} + +/* trap table entry #3 is common 64 bits system calls entry */ + +#define __kvm_guest_ttable_entry3__ \ + __attribute__((__section__(".kvm_guest_ttable_entry3"))) + +long __kvm_guest_ttable_entry3__ +kvm_guest_ttable_entry3(int sys_num, + u64 arg1, u64 arg2, + u64 arg3, u64 arg4, + u64 arg5, u64 arg6) +{ + sys_call_prolog(sys_num); + + /* host saved global register and will restore its */ + return (kvm_guest_sys_call64(sys_num | (3UL << 32), + arg1, arg2, arg3, arg4, arg5, arg6)); +} + +/* trap table entry #4 is common 32 or 64 bits system calls entry */ + +#define __kvm_guest_ttable_entry4__ \ + __attribute__((__section__(".kvm_guest_ttable_entry4"))) + +long __kvm_guest_ttable_entry4__ +kvm_guest_ttable_entry4(int sys_num, + u64 arg1, u64 arg2, + u64 arg3, u64 arg4, + u64 arg5, u64 arg6) +{ + sys_call_prolog(sys_num); + + /* host saved global register and will restore its */ + return kvm_guest_sys_call64_or_32(sys_num | (4UL << 32), + arg1, arg2, arg3, arg4, arg5, arg6); +} + +/* trap table entry #5 is fast 32 bits system calls entry */ + +#define __kvm_guest_ttable_entry5__ \ + __attribute__((__section__(".kvm_guest_ttable_entry5"))) + +long __interrupt __kvm_guest_ttable_entry5__ +kvm_guest_ttable_entry5(int sys_num, + u64 arg1, u64 arg2, + u64 arg3, u64 arg4, + u64 arg5, u64 arg6) +{ + int ret; + + ret = kvm_guest_fast_sys_call32(sys_num, arg1, arg2); + return ret; +} + +/* trap table entry #6 is fast 64 bits system calls entry */ + +#define __kvm_guest_ttable_entry6__ \ + __attribute__((__section__(".kvm_guest_ttable_entry6"))) + +long __interrupt __kvm_guest_ttable_entry6__ +kvm_guest_ttable_entry6(int sys_num, + u64 arg1, u64 arg2, + u64 arg3, u64 arg4, + u64 arg5, u64 arg6) +{ + int ret; + + ret = kvm_guest_fast_sys_call64(sys_num, arg1, arg2); + return ret; +} + +/* Pseudo SCALL 32 is used as a guest kernel jumpstart. */ + +#ifdef CONFIG_PARAVIRT_GUEST +static atomic_t __initdata boot_paravirt_init_finished = ATOMIC_INIT(0); +#endif /* CONFIG_PARAVIRT_GUEST */ + +#define __ttable_entry32__ \ + __attribute__((__section__(".kvm_guest_startup_entry"))) + +void notrace __ttable_entry32__ +kvm_guest_startup_entry(int bsp, bootblock_struct_t *bootblock) +{ + unsigned long vcpu_base; + +#ifdef CONFIG_PARAVIRT_GUEST + cur_pv_v2p_ops = &kvm_v2p_ops; + if (bsp) { + kvm_init_paravirt_guest(); +#ifdef CONFIG_SMP + boot_set_event(&boot_paravirt_init_finished); + } else { + boot_wait_for_event(&boot_paravirt_init_finished); +#endif /* CONFIG_SMP */ + } +#endif /* CONFIG_PARAVIRT_GUEST */ + /* VCPU state base can be on global register, so save & restore */ + KVM_SAVE_VCPU_STATE_BASE(vcpu_base); + NATIVE_BOOT_INIT_G_REGS(); + KVM_RESTORE_VCPU_STATE_BASE(vcpu_base); + + boot_startup(bsp, bootblock); +} diff --git a/arch/e2k/kvm/hv_cpu.c b/arch/e2k/kvm/hv_cpu.c new file mode 100644 index 000000000000..331302adb07c --- /dev/null +++ b/arch/e2k/kvm/hv_cpu.c @@ -0,0 +1,1857 @@ + +/* + * CPU hardware virtualized support + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cpu_defs.h" +#include "cpu.h" +#include "mmu_defs.h" +#include "mmu.h" +#include "gregs.h" +#include "process.h" +#include "intercepts.h" +#include "io.h" +#include "pic.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_STARTUP_MODE +#undef DebugKVMSTUP +#define DEBUG_KVM_STARTUP_MODE 0 /* VCPU startup debugging */ +#define DebugKVMSTUP(fmt, args...) \ +({ \ + if (DEBUG_KVM_STARTUP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SHADOW_CONTEXT_MODE +#undef DebugSHC +#define DEBUG_SHADOW_CONTEXT_MODE 0 /* shadow context debugging */ +#define DebugSHC(fmt, args...) \ +({ \ + if (DEBUG_SHADOW_CONTEXT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_WAIT_TRAP_MODE +#undef DebugWTR +#define DEBUG_INTC_WAIT_TRAP_MODE 0 /* CU wait trap intercept */ +#define DebugWTR(fmt, args...) \ +({ \ + if (DEBUG_INTC_WAIT_TRAP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IT_MODE +#undef DebugKVMIT +#define DEBUG_KVM_IT_MODE 0 /* CEPIC idle timer */ +#define DebugKVMIT(fmt, args...) \ +({ \ + if (DEBUG_KVM_IT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) +#undef VM_BUG_ON +#define VM_BUG_ON(cond) BUG_ON(cond) + +int __nodedata slt_disable; + +static void vcpu_write_os_cu_hw_ctxt_to_registers(struct kvm_vcpu *vcpu, + const struct kvm_hw_cpu_context *hw_ctxt); + +/* + * FIXME: QEMU should pass physical addresses for entry IP and + * for any addresses info into arguments list to pass to guest. + * The function convert virtual physical adresses to physical + * to enable VCPU startup at nonpaging mode + */ +void prepare_vcpu_startup_args(struct kvm_vcpu *vcpu) +{ + unsigned long entry_IP; + u64 *args; + int args_num, arg; + unsigned long long arg_value; + + + DebugKVMSTUP("started on VCPU #%d\n", vcpu->vcpu_id); + + if (is_paging(vcpu)) { + DebugKVMSTUP("there is paging mode, nothing convertions " + "need\n"); + return; + } + args_num = vcpu->arch.args_num; + entry_IP = (unsigned long)vcpu->arch.entry_point; + + if (entry_IP >= GUEST_PAGE_OFFSET) { + entry_IP = __guest_pa(entry_IP); + vcpu->arch.entry_point = (void *)entry_IP; + } + DebugKVMSTUP("VCPU startup entry point at %px\n", (void *)entry_IP); + + args = vcpu->arch.args; + + /* prepare VCPU startup function arguments */ +#pragma loop count (2) + for (arg = 0; arg < args_num; arg++) { + arg_value = args[arg]; + if (arg_value >= GUEST_PAGE_OFFSET) { + arg_value = __guest_pa(arg_value); + args[arg] = arg_value; + } + DebugKVMSTUP(" arg[%d] is 0x%016llx\n", + arg, arg_value); + } +} + +void prepare_stacks_to_startup_vcpu(struct kvm_vcpu *vcpu, + e2k_mem_ps_t *ps_frames, e2k_mem_crs_t *pcs_frames, + u64 *args, int args_num, char *entry_point, e2k_psr_t psr, + e2k_size_t usd_size, e2k_size_t *ps_ind, e2k_size_t *pcs_ind, + int cui, bool kernel) +{ + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + unsigned long entry_IP; + bool priv_guest = vcpu->arch.is_hv; + int arg; + int wbs; + + DebugKVMSTUP("started on VCPU #%d\n", vcpu->vcpu_id); + + entry_IP = (unsigned long)entry_point; + + wbs = (sizeof(*args) * 2 * args_num + (EXT_4_NR_SZ - 1)) / EXT_4_NR_SZ; + + /* pcs[0] frame can be empty, because of it should not be returns */ + /* to here and it is used only to fill into current CR registers */ + /* while function on the next frame pcs[1] is running */ + pcs_frames[0].cr0_lo.CR0_lo_pf = -1; + pcs_frames[0].cr0_hi.CR0_hi_ip = 0; + pcs_frames[0].cr1_lo.CR1_lo_half = 0; + pcs_frames[0].cr1_hi.CR1_hi_half = 0; + + /* guest is user of host for pv, GLAUNCH set PSR for hv */ + if (priv_guest) { + /* guest should be run at privileged mode */ + psr.PSR_pm = 1; + } else { + /* guest is not privileged user task of host */ + psr.PSR_pm = 0; + } + + /* Prepare pcs[1] frame, it is frame of VCPU start function */ + /* Important only only IP (as start of function) */ + cr0_lo.CR0_lo_half = 0; + cr0_lo.CR0_lo_pf = -1; + cr0_hi.CR0_hi_half = 0; + cr0_hi.CR0_hi_IP = entry_IP; + cr1_lo.CR1_lo_half = 0; + cr1_lo.CR1_lo_psr = psr.PSR_reg; + cr1_lo.CR1_lo_wbs = wbs; + cr1_lo.CR1_lo_wpsz = cr1_lo.CR1_lo_wbs; + cr1_lo.CR1_lo_cui = cui; + if (!cpu_has(CPU_FEAT_ISET_V6)) + cr1_lo.CR1_lo_ic = kernel; + + cr1_hi.CR1_hi_half = 0; + cr1_hi.CR1_hi_ussz = usd_size / 16; + pcs_frames[1].cr0_lo = cr0_lo; + pcs_frames[1].cr0_hi = cr0_hi; + pcs_frames[1].cr1_lo = cr1_lo; + pcs_frames[1].cr1_hi = cr1_hi; + + DebugKVMSTUP("VCPU start PCS[1]: IP %pF wbs 0x%x\n", + (void *)(pcs_frames[1].cr0_hi.CR0_hi_ip << 3), + pcs_frames[1].cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ); + DebugKVMSTUP(" PCS[%d] CR0 lo: 0x%016llx hi: 0x%016llx\n", + 1, pcs_frames[1].cr0_lo.CR0_lo_half, + pcs_frames[1].cr0_hi.CR0_hi_half); + DebugKVMSTUP(" PCS[%d] CR1 lo: 0x%016llx hi: 0x%016llx\n", + 1, pcs_frames[1].cr1_lo.CR1_lo_half, + pcs_frames[1].cr1_hi.CR1_hi_half); + DebugKVMSTUP(" PCS[%d] CR0 lo: 0x%016llx hi: 0x%016llx\n", + 0, pcs_frames[0].cr0_lo.CR0_lo_half, + pcs_frames[0].cr0_hi.CR0_hi_half); + DebugKVMSTUP(" PCS[%d] CR1 lo: 0x%016llx hi: 0x%016llx\n", + 0, pcs_frames[0].cr1_lo.CR1_lo_half, + pcs_frames[0].cr1_hi.CR1_hi_half); + + /* prepare procedure stack frame ps[0] for pcs[1] should contain */ + /* VCPU start function arguments */ +#pragma loop count (2) + for (arg = 0; arg < args_num; arg++) { + int frame = (arg * sizeof(*args)) / (EXT_4_NR_SZ / 2); + bool lo = (arg & 0x1) == 0x0; + unsigned long long arg_value; + + arg_value = args[arg]; + + if (machine.native_iset_ver < E2K_ISET_V5) { + if (lo) + ps_frames[frame].v2.word_lo = arg_value; + else + ps_frames[frame].v2.word_hi = arg_value; + /* Skip frame[2] and frame[3] - they hold */ + /* extended data not used by kernel */ + } else { + if (lo) + ps_frames[frame].v5.word_lo = arg_value; + else + ps_frames[frame].v5.word_hi = arg_value; + /* Skip frame[1] and frame[3] - they hold */ + /* extended data not used by kernel */ + } + DebugKVMSTUP(" PS[%d].%s is 0x%016llx\n", + frame, (lo) ? "lo" : "hi", arg_value); + } + + /* set stacks pointers indexes */ + *ps_ind = wbs * EXT_4_NR_SZ; + *pcs_ind = 2 * SZ_OF_CR; + DebugKVMSTUP("stacks PS.ind 0x%lx PCS.ind 0x%lx\n", + *ps_ind, *pcs_ind); +} + +void prepare_bu_stacks_to_startup_vcpu(struct kvm_vcpu *vcpu) +{ + bu_hw_stack_t *hypv_backup; + vcpu_boot_stack_t *boot_stacks; + e2k_mem_crs_t *pcs_frames; + e2k_mem_ps_t *ps_frames; + e2k_size_t ps_ind, pcs_ind; + bool priv_guest = vcpu->arch.is_hv; + e2k_psr_t psr; + + DebugKVMSTUP("started on VCPU #%d\n", vcpu->vcpu_id); + + prepare_vcpu_startup_args(vcpu); + hypv_backup = &vcpu->arch.hypv_backup; + boot_stacks = &vcpu->arch.boot_stacks; + + ps_frames = (e2k_mem_ps_t *)GET_BACKUP_PS_BASE(hypv_backup); + VM_BUG_ON(ps_frames == NULL); + pcs_frames = (e2k_mem_crs_t *)GET_BACKUP_PCS_BASE(hypv_backup); + VM_BUG_ON(pcs_frames == NULL); + + if (priv_guest) + psr = E2K_KERNEL_PSR_DISABLED; + else + psr = E2K_USER_INITIAL_PSR; + prepare_stacks_to_startup_vcpu(vcpu, ps_frames, pcs_frames, + vcpu->arch.args, vcpu->arch.args_num, + vcpu->arch.entry_point, psr, + GET_VCPU_BOOT_CS_SIZE(boot_stacks), + &ps_ind, &pcs_ind, 0, 1); + + /* correct stacks pointers indexes */ + hypv_backup->psp_hi.PSP_hi_ind = ps_ind; + hypv_backup->pcsp_hi.PCSP_hi_ind = pcs_ind; + DebugKVMSTUP("backup PS.ind 0x%x PCS.ind 0x%x\n", + hypv_backup->psp_hi.PSP_hi_ind, + hypv_backup->pcsp_hi.PCSP_hi_ind); +} + +void setup_vcpu_boot_stacks(struct kvm_vcpu *vcpu, gthread_info_t *gti) +{ + thread_info_t *ti = current_thread_info(); + vcpu_boot_stack_t *boot_stacks; + e2k_stacks_t *boot_regs; + data_stack_t *data_stack; + hw_stack_t *hw_stacks; + /* FIXME: all addresses of stacks should be physical, if guest */ + /* will be launched at nonpaging mode. */ + /* It should be done while stacks allocation, but may be not done */ + /* and then need do it here */ + bool nonpaging = !is_paging(vcpu); + e2k_addr_t stack_addr; + e2k_usd_lo_t usd_lo; + e2k_psp_lo_t psp_lo; + e2k_pcsp_lo_t pcsp_lo; + + boot_stacks = &vcpu->arch.boot_stacks; + boot_regs = &boot_stacks->regs.stacks; + data_stack = >i->data_stack; + hw_stacks = >i->hw_stacks; + stack_addr = GET_VCPU_BOOT_CS_BASE(boot_stacks); + if (nonpaging && stack_addr >= GUEST_PAGE_OFFSET) { + /* see FIXME above */ + stack_addr = __guest_pa(stack_addr); + SET_VCPU_BOOT_CS_BASE(boot_stacks, stack_addr); + } + data_stack->bottom = stack_addr; + stack_addr = GET_VCPU_BOOT_CS_TOP(boot_stacks); + if (nonpaging && stack_addr >= GUEST_PAGE_OFFSET) { + /* see FIXME above */ + stack_addr = __guest_pa(stack_addr); + SET_VCPU_BOOT_CS_TOP(boot_stacks, stack_addr); + boot_regs->top = stack_addr; + } + data_stack->top = stack_addr; + data_stack->size = GET_VCPU_BOOT_CS_SIZE(boot_stacks); + gti->stack = current->stack; + gti->stack_regs.stacks.top = + (u64)gti->stack + KERNEL_C_STACK_SIZE; + gti->stack_regs.stacks.usd_lo = ti->k_usd_lo; + gti->stack_regs.stacks.usd_hi = ti->k_usd_hi; + usd_lo = boot_regs->usd_lo; + if (nonpaging && usd_lo.USD_lo_base >= GUEST_PAGE_OFFSET) { + /* see FIXME above */ + usd_lo.USD_lo_base = __guest_pa(usd_lo.USD_lo_base); + boot_regs->usd_lo = usd_lo; + } + gti->stack_regs.stacks.u_usd_lo = usd_lo; + gti->stack_regs.stacks.u_usd_hi = boot_regs->usd_hi; + gti->stack_regs.stacks.u_top = GET_VCPU_BOOT_CS_TOP(boot_stacks); + DebugKVMSTUP("guest kernel start thread GPID #%d\n", + gti->gpid->nid.nr); + DebugKVMSTUP("guest data stack bottom 0x%lx, top 0x%lx, size 0x%lx\n", + data_stack->bottom, data_stack->top, data_stack->size); + DebugKVMSTUP("guest data stack USD: base 0x%llx size 0x%x\n", + gti->stack_regs.stacks.u_usd_lo.USD_lo_base, + gti->stack_regs.stacks.u_usd_hi.USD_hi_size); + DebugKVMSTUP("host data stack bottom 0x%lx\n", + gti->stack); + + *hw_stacks = ti->u_hw_stack; + hw_stacks->ps = boot_stacks->ps; + hw_stacks->pcs = boot_stacks->pcs; + + stack_addr = (e2k_addr_t)GET_VCPU_BOOT_PS_BASE(boot_stacks); + if (nonpaging && stack_addr >= GUEST_PAGE_OFFSET) { + /* see FIXME above */ + stack_addr = __guest_pa(stack_addr); + SET_VCPU_BOOT_PS_BASE(boot_stacks, (void *)stack_addr); + SET_PS_BASE(hw_stacks, (void *)stack_addr); + } + psp_lo = boot_regs->psp_lo; + if (nonpaging && psp_lo.PSP_lo_base >= GUEST_PAGE_OFFSET) { + /* see FIXME above */ + psp_lo.PSP_lo_base = __guest_pa(psp_lo.PSP_lo_base); + boot_regs->psp_lo = psp_lo; + } + gti->stack_regs.stacks.psp_lo = psp_lo; + gti->stack_regs.stacks.psp_hi = boot_regs->psp_hi; + + stack_addr = (e2k_addr_t)GET_VCPU_BOOT_PCS_BASE(boot_stacks); + if (nonpaging && stack_addr >= GUEST_PAGE_OFFSET) { + /* see FIXME above */ + stack_addr = __guest_pa(stack_addr); + SET_VCPU_BOOT_PCS_BASE(boot_stacks, (void *)stack_addr); + SET_PCS_BASE(hw_stacks, (void *)stack_addr); + } + pcsp_lo = boot_regs->pcsp_lo; + if (nonpaging && pcsp_lo.PCSP_lo_base >= GUEST_PAGE_OFFSET) { + /* see FIXME above */ + pcsp_lo.PCSP_lo_base = __guest_pa(pcsp_lo.PCSP_lo_base); + boot_regs->pcsp_lo = pcsp_lo; + } + gti->stack_regs.stacks.pcsp_lo = pcsp_lo; + gti->stack_regs.stacks.pcsp_hi = boot_regs->pcsp_hi; + DebugKVMSTUP("guest procedure stack base 0x%lx, size 0x%lx\n", + GET_PS_BASE(hw_stacks), + kvm_get_guest_hw_ps_user_size(hw_stacks)); + DebugKVMSTUP("guest procedure chain stack base 0x%lx, size 0x%lx\n", + GET_PCS_BASE(hw_stacks), + kvm_get_guest_hw_pcs_user_size(hw_stacks)); + DebugKVMSTUP("guest procedure stack PSP: base 0x%llx size 0x%x ind 0x%x\n", + gti->stack_regs.stacks.psp_lo.PSP_lo_base, + gti->stack_regs.stacks.psp_hi.PSP_hi_size, + gti->stack_regs.stacks.psp_hi.PSP_hi_ind); + DebugKVMSTUP("guest procedure chain stack PCSP: base 0x%llx size 0x%x ind 0x%x\n", + gti->stack_regs.stacks.pcsp_lo.PCSP_lo_base, + gti->stack_regs.stacks.pcsp_hi.PCSP_hi_size, + gti->stack_regs.stacks.pcsp_hi.PCSP_hi_ind); +} + +/* + * Boot loader should set OSCUD/OSGD to physical base and size of guest kernel + * image before startup guest. So hypervisor should do same too. + */ +void kvm_set_vcpu_kernel_image(struct kvm_vcpu *vcpu, + char *kernel_base, unsigned long kernel_size) +{ + + KVM_BUG_ON(!vcpu->arch.is_hv && + (e2k_addr_t)kernel_base >= GUEST_PAGE_OFFSET); + vcpu->arch.guest_phys_base = (e2k_addr_t)kernel_base; + vcpu->arch.guest_base = kernel_base; + vcpu->arch.guest_size = kernel_size; + if (vcpu->arch.vcpu_state != NULL) { + kvm_set_pv_vcpu_kernel_image(vcpu); + } + + DebugSHC("Guest kernel image: base 0x%lx, size 0x%lx\n", + vcpu->arch.guest_base, vcpu->arch.guest_size); + +} + +static void init_guest_image_hw_ctxt(struct kvm_vcpu *vcpu, + struct kvm_hw_cpu_context *hw_ctxt) +{ + e2k_oscud_lo_t oscud_lo; + e2k_oscud_hi_t oscud_hi; + e2k_osgd_lo_t osgd_lo; + e2k_osgd_hi_t osgd_hi; + e2k_cutd_t oscutd; + e2k_cuir_t oscuir; + e2k_addr_t guest_cut_pa; + + oscud_lo.OSCUD_lo_half = 0; + oscud_lo.OSCUD_lo_base = (unsigned long)vcpu->arch.guest_base; + oscud_hi.OSCUD_hi_half = 0; + oscud_hi.OSCUD_hi_size = vcpu->arch.guest_size; + hw_ctxt->sh_oscud_lo = oscud_lo; + hw_ctxt->sh_oscud_hi = oscud_hi; + + osgd_lo.OSGD_lo_half = 0; + osgd_lo.OSGD_lo_base = (unsigned long)vcpu->arch.guest_base; + osgd_hi.OSGD_hi_half = 0; + osgd_hi.OSGD_hi_size = vcpu->arch.guest_size; + hw_ctxt->sh_osgd_lo = osgd_lo; + hw_ctxt->sh_osgd_hi = osgd_hi; + + if (vcpu->arch.guest_cut != NULL) { + guest_cut_pa = kvm_vcpu_hva_to_gpa(vcpu, + (u64)vcpu->arch.guest_cut); + } else { + guest_cut_pa = 0; + } + oscutd.CUTD_reg = 0; + oscutd.CUTD_base = guest_cut_pa; + oscuir.CUIR_reg = 0; + hw_ctxt->sh_oscutd = oscutd; + vcpu->arch.sw_ctxt.cutd = oscutd; + hw_ctxt->sh_oscuir = oscuir; +} + +int vcpu_init_os_cu_hw_ctxt(struct kvm_vcpu *vcpu, kvm_task_info_t *user_info) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + e2k_oscud_lo_t oscud_lo; + e2k_oscud_hi_t oscud_hi; + e2k_osgd_lo_t osgd_lo; + e2k_osgd_hi_t osgd_hi; + e2k_cutd_t oscutd; + e2k_cuir_t oscuir; + e2k_addr_t guest_cut; + + oscud_lo.OSCUD_lo_half = 0; + oscud_lo.OSCUD_lo_base = user_info->cud_base; + oscud_hi.OSCUD_hi_half = 0; + oscud_hi.OSCUD_hi_size = user_info->cud_size; + hw_ctxt->sh_oscud_lo = oscud_lo; + hw_ctxt->sh_oscud_hi = oscud_hi; + /* switch guest CUT (kernel image) to virtual address */ + vcpu->arch.guest_base = (char *)user_info->cud_base; + vcpu->arch.trap_entry = (char *)user_info->cud_base + + vcpu->arch.trap_offset; + + osgd_lo.OSGD_lo_half = 0; + osgd_lo.OSGD_lo_base = user_info->gd_base; + osgd_hi.OSGD_hi_half = 0; + osgd_hi.OSGD_hi_size = user_info->gd_size; + hw_ctxt->sh_osgd_lo = osgd_lo; + hw_ctxt->sh_osgd_hi = osgd_hi; + + guest_cut = user_info->cut_base; + oscutd.CUTD_reg = 0; + oscutd.CUTD_base = guest_cut; + hw_ctxt->sh_oscutd = oscutd; + if (vcpu->arch.is_hv) { + sw_ctxt->cutd = oscutd; /* for kernel CUTD == OSCUTD */ + } + + oscuir.CUIR_reg = user_info->cui; + hw_ctxt->sh_oscuir = oscuir; + + /* set OC CU conteext on shadow registers */ + preempt_disable(); + vcpu_write_os_cu_hw_ctxt_to_registers(vcpu, hw_ctxt); + preempt_enable(); + + return 0; +} + +static void init_hv_vcpu_intc_ctxt(struct kvm_vcpu *vcpu) +{ + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->arch.intc_ctxt; + e2k_tir_hi_t TIR_hi; + e2k_tir_lo_t TIR_lo; + + /* Initialize empty TIRs before first GLAUNCH to avoid showing host's + * IP to guest */ + TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(1); + TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(1); + kvm_clear_vcpu_intc_TIRs_num(vcpu); + kvm_update_vcpu_intc_TIR(vcpu, 1, TIR_hi, TIR_lo); + + intc_ctxt->cu_num = -1; + intc_ctxt->mu_num = -1; + kvm_reset_intc_info_mu_is_updated(vcpu); + kvm_reset_intc_info_cu_is_updated(vcpu); +} + +static void init_vcpu_intc_ctxt(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.is_hv) { + /* interceptions is supported by hardware */ + init_hv_vcpu_intc_ctxt(vcpu); + } else if (vcpu->arch.is_pv) { + /* interceptions is not supported by hardware */ + /* so nothing to do */ + ; + } else { + KVM_BUG_ON(true); + } +} + +void kvm_setup_mmu_intc_mode(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + virt_ctrl_mu_t mu; + mmu_reg_t g_w_imask_mmu_cr; + mmu_reg_t sh_mmu_cr, sh_pid; + + /* MMU interception control registers state */ + mu.VIRT_CTRL_MU_reg = 0; + g_w_imask_mmu_cr = 0; + + if (kvm_is_tdp_enable(kvm)) { + mu.sh_pt_en = 0; + } else if (kvm_is_shadow_pt_enable(kvm)) { + mu.sh_pt_en = 1; + } else { + KVM_BUG_ON(true); + } + + if (kvm_is_phys_pt_enable(kvm)) + mu.gp_pt_en = 1; + + /* Guest should not be able to write special MMU/AAU */ + mu.rw_dbg1 = 1; + + if (vcpu->arch.is_hv) { + if (kvm_is_tdp_enable(kvm)) { + /* intercept only MMU_CR updates to track */ + /* paging enable/disable */ + mu.rw_mmu_cr = 0; + g_w_imask_mmu_cr |= _MMU_CR_TLB_EN; + } else if (kvm_is_shadow_pt_enable(kvm)) { + /* intercept all read/write MMU CR */ + /* and Page Table Base */ + mu.rr_mmu_cr = 1; + mu.rr_pptb = 1; + mu.rr_vptb = 1; + mu.rw_mmu_cr = 1; + mu.rw_pptb = 1; + mu.rw_vptb = 1; + mu.fl_tlbpg = 1; + mu.fl_tlb2pg = 1; + g_w_imask_mmu_cr |= _MMU_CR_TLB_EN; + } else { + KVM_BUG_ON(true); + } + } + vcpu->arch.mmu.virt_ctrl_mu = mu; + vcpu->arch.mmu.g_w_imask_mmu_cr = g_w_imask_mmu_cr; + + + /* MMU shadow registers initial state */ + if (vcpu->arch.is_hv || vcpu->arch.is_pv) { + sh_mmu_cr = mmu_reg_val(MMU_CR_KERNEL_OFF); + sh_pid = 0; /* guest kernel should have PID == 0 */ + vcpu_write_SH_MMU_CR_reg(vcpu, sh_mmu_cr); + } else { + KVM_BUG_ON(true); + } + vcpu->arch.mmu.init_sh_mmu_cr = sh_mmu_cr; + vcpu->arch.mmu.init_sh_pid = sh_pid; +} + +static void kvm_init_lintel_gregs(struct kvm_vcpu *vcpu) +{ + /* + * It need only pass pointer to bootinfo structure as %dg1 register + * but hypervisor pass as 0 & 1-st parameter and set: + * %dg0 - BSP flag + * %dg1 - bootinfo pointer + */ + SET_HOST_GREG(0, vcpu->arch.args[0]); + SET_HOST_GREG(1, vcpu->arch.args[1]); +} + +static void init_backup_hw_ctxt(struct kvm_vcpu *vcpu) +{ + bu_hw_stack_t *hypv_backup; + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + + if (!vcpu->arch.is_hv) { + /* there is not support of hardware virtualizsation */ + return; + } + + /* + * Stack registers + */ + hypv_backup = &vcpu->arch.hypv_backup; + hw_ctxt->bu_psp_lo = hypv_backup->psp_lo; + hw_ctxt->bu_psp_hi = hypv_backup->psp_hi; + hw_ctxt->bu_pcsp_lo = hypv_backup->pcsp_lo; + hw_ctxt->bu_pcsp_hi = hypv_backup->pcsp_hi; + + /* set backup stacks to empty state will be done by hardware after */ + /* GLAUNCH, so update software pointers at hypv_backup structure */ + /* for following GLAUNCHes and paravirtualization HCALL emulation */ + hypv_backup->psp_hi.PSP_hi_ind = 0; + hypv_backup->pcsp_hi.PCSP_hi_ind = 0; +} + +void init_hw_ctxt(struct kvm_vcpu *vcpu) +{ + vcpu_boot_stack_t *boot_stacks = &vcpu->arch.boot_stacks; + guest_hw_stack_t *boot_regs = &boot_stacks->regs; + kvm_guest_info_t *guest_info = &vcpu->kvm->arch.guest_info; + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + epic_page_t *cepic = hw_ctxt->cepic; + virt_ctrl_cu_t cu; + union cepic_ctrl epic_reg_ctrl; + union cepic_esr2 epic_reg_esr2; + union cepic_timer_lvtt epic_reg_timer_lvtt; + union cepic_pnmirr_mask epic_reg_pnmirr_mask; + unsigned int i; + + /* + * Stack registers + */ + hw_ctxt->sh_psp_lo = boot_regs->stacks.psp_lo; + hw_ctxt->sh_psp_hi = boot_regs->stacks.psp_hi; + hw_ctxt->sh_pcsp_lo = boot_regs->stacks.pcsp_lo; + hw_ctxt->sh_pcsp_hi = boot_regs->stacks.pcsp_hi; + + /* setup initial state of backup stacks */ + init_backup_hw_ctxt(vcpu); + + /* set shadow WD state to initial value */ + hw_ctxt->sh_wd.WD_reg = 0; + hw_ctxt->sh_wd.WD_fx = 0; + + /* MMU shadow context registers state */ + hw_ctxt->sh_mmu_cr = vcpu->arch.mmu.init_sh_mmu_cr; + hw_ctxt->sh_pid = vcpu->arch.mmu.init_sh_pid; + + hw_ctxt->gid = vcpu->kvm->arch.vmid.nr; + + /* + * CPU shadow context + */ + /* FIXME: set guest kernel OSCUD to host OSCUD to allow handling */ + /* traps, hypercalls by host. Real guest OSCUD should be set to */ + /* physical base of guest kernel image + oscud_lo = kvm_get_guest_vcpu_OSCUD_lo(vcpu); + oscud_hi = kvm_get_guest_vcpu_OSCUD_hi(vcpu); + */ + if (vcpu->arch.is_hv || vcpu->arch.is_pv) { + /* guest image state should be saved */ + /* by kvm_set_hv_kernel_image() */ + init_guest_image_hw_ctxt(vcpu, hw_ctxt); + } else { + KVM_BUG_ON(true); + } + + /* FIXME: guest now use paravirtualized register (in memory) */ + /* so set shadow OSR0 to host current_thread_info() to enable */ + /* host trap handler + osr0 = kvm_get_guest_vcpu_OSR0_value(vcpu); + */ + if (vcpu->arch.is_hv) { + hw_ctxt->sh_osr0 = 0; + } else if (vcpu->arch.is_pv) { + hw_ctxt->sh_osr0 = (u64) current_thread_info(); + } else { + KVM_BUG_ON(true); + } + if (vcpu->arch.is_hv) { + hw_ctxt->sh_core_mode = read_SH_CORE_MODE_reg(); + } else if (vcpu->arch.is_pv) { + hw_ctxt->sh_core_mode = kvm_get_guest_vcpu_CORE_MODE(vcpu); + } else { + KVM_BUG_ON(true); + } + /* turn ON indicators of GM and enbale hypercalls */ + if (vcpu->arch.is_hv) { + hw_ctxt->sh_core_mode.CORE_MODE_gmi = 1; + hw_ctxt->sh_core_mode.CORE_MODE_hci = 1; + } + + /* + * VIRT_CTRL_* registers + */ + cu.VIRT_CTRL_CU_reg = 0; + if (guest_info->is_stranger) { + /* it need turn ON interceptions on IDR read */ + cu.VIRT_CTRL_CU_rr_idr = 1; + } + cu.VIRT_CTRL_CU_rw_sclkr = 1; + cu.VIRT_CTRL_CU_rw_sclkm3 = 1; + cu.VIRT_CTRL_CU_virt = 1; + + hw_ctxt->virt_ctrl_cu = cu; + hw_ctxt->virt_ctrl_mu = vcpu->arch.mmu.virt_ctrl_mu; + hw_ctxt->g_w_imask_mmu_cr = vcpu->arch.mmu.g_w_imask_mmu_cr; + + /* Set CEPIC reset state */ + if (vcpu->arch.is_hv) { + epic_reg_ctrl.raw = 0; + epic_reg_ctrl.bits.bsp_core = kvm_vcpu_is_bsp(vcpu); + cepic->ctrl = epic_reg_ctrl.raw; + cepic->id = kvm_vcpu_to_full_cepic_id(vcpu); + cepic->cpr = 0; + cepic->esr = 0; + epic_reg_esr2.raw = 0; + epic_reg_esr2.bits.mask = 1; + cepic->esr2 = epic_reg_esr2; + cepic->cir.raw = 0; + cepic->esr_new.counter = 0; + cepic->icr.raw = 0; + epic_reg_timer_lvtt.raw = 0; + epic_reg_timer_lvtt.bits.mask = 1; + cepic->timer_lvtt = epic_reg_timer_lvtt; + cepic->timer_init = 0; + cepic->timer_cur = 0; + cepic->timer_div = 0; + cepic->nm_timer_lvtt = 0; + cepic->nm_timer_init = 0; + cepic->nm_timer_cur = 0; + cepic->nm_timer_div = 0; + cepic->svr = 0; + epic_reg_pnmirr_mask.raw = 0; + epic_reg_pnmirr_mask.bits.nm_special = 1; + epic_reg_pnmirr_mask.bits.nm_timer = 1; + epic_reg_pnmirr_mask.bits.int_violat = 1; + cepic->pnmirr_mask = epic_reg_pnmirr_mask.raw; + for (i = 0; i < CEPIC_PMIRR_NR_DREGS; i++) + cepic->pmirr[i].counter = 0; + cepic->pnmirr.counter = 0; + for (i = 0; i < CEPIC_PMIRR_NR_BITS; i++) + cepic->pmirr_byte[i] = 0; + for (i = 0; i < 16; i++) + cepic->pnmirr_byte[i] = 0; + } + + /* FIXME Initializing CEPIC for APIC v6 model. Ideally, this should be + * done by the model itself */ + if (!kvm_vcpu_is_epic(vcpu) && kvm_vcpu_is_hw_apic(vcpu)) { + union cepic_timer_div reg_div; + union cepic_svr epic_reg_svr; + + epic_reg_ctrl.bits.soft_en = 1; + cepic->ctrl = epic_reg_ctrl.raw; + + epic_reg_esr2.bits.vect = 0xfe; + epic_reg_esr2.bits.mask = 0; + cepic->esr2 = epic_reg_esr2; + + reg_div.raw = 0; + reg_div.bits.divider = CEPIC_TIMER_DIV_1; + cepic->timer_div = reg_div.raw; + + epic_reg_svr.raw = 0; + epic_reg_svr.bits.vect = 0xff; + cepic->svr = epic_reg_svr.raw; + } +} + +void kvm_update_guest_stacks_registers(struct kvm_vcpu *vcpu, + guest_hw_stack_t *stack_regs) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + + /* + * Guest Stack state is now on back UP registers + */ + hw_ctxt->sh_psp_lo = stack_regs->stacks.psp_lo; + hw_ctxt->sh_psp_hi = stack_regs->stacks.psp_hi; + hw_ctxt->sh_pcsp_lo = stack_regs->stacks.pcsp_lo; + hw_ctxt->sh_pcsp_hi = stack_regs->stacks.pcsp_hi; + if (vcpu->arch.is_hv) { + WRITE_BU_PSP_LO_REG_VALUE(AW(hw_ctxt->sh_psp_lo)); + WRITE_BU_PSP_HI_REG_VALUE(AW(hw_ctxt->sh_psp_hi)); + WRITE_BU_PCSP_LO_REG_VALUE(AW(hw_ctxt->sh_pcsp_lo)); + WRITE_BU_PCSP_HI_REG_VALUE(AW(hw_ctxt->sh_pcsp_hi)); + } + sw_ctxt->crs.cr0_lo = stack_regs->crs.cr0_lo; + sw_ctxt->crs.cr0_hi = stack_regs->crs.cr0_hi; + sw_ctxt->crs.cr1_lo = stack_regs->crs.cr1_lo; + sw_ctxt->crs.cr1_hi = stack_regs->crs.cr1_hi; + + DebugSHC("update guest stacks registers:\n" + "BU_PSP: base 0x%llx size 0x%x index 0x%x\n" + "BU_PCSP: base 0x%llx size 0x%x index 0x%x\n", + stack_regs->stacks.psp_lo.PSP_lo_base, + stack_regs->stacks.psp_hi.PSP_hi_size, + stack_regs->stacks.psp_hi.PSP_hi_ind, + stack_regs->stacks.pcsp_lo.PCSP_lo_base, + stack_regs->stacks.pcsp_hi.PCSP_hi_size, + stack_regs->stacks.pcsp_hi.PCSP_hi_ind); +} + +static void kvm_dump_mmu_tdp_context(struct kvm_vcpu *vcpu, unsigned flags) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + KVM_BUG_ON(!is_tdp_paging(vcpu)); + + DebugSHC("Set MMU guest TDP PT context:\n"); + + if (DEBUG_SHADOW_CONTEXT_MODE && (flags & GP_ROOT_PT_FLAG)) { + pr_info(" GP_PPTB: value 0x%llx\n", + mmu->get_vcpu_context_gp_pptb(vcpu)); + } + if (DEBUG_SHADOW_CONTEXT_MODE && + ((flags & U_ROOT_PT_FLAG) || + ((flags & OS_ROOT_PT_FLAG) && + !is_sep_virt_spaces(vcpu)))) { + pr_info(" U_PPTB: value 0x%lx\n" + " U_VPTB: value 0x%lx\n", + mmu->get_vcpu_context_u_pptb(vcpu), + mmu->get_vcpu_context_u_vptb(vcpu)); + } + if (DEBUG_SHADOW_CONTEXT_MODE && + ((flags & OS_ROOT_PT_FLAG) && + is_sep_virt_spaces(vcpu))) { + pr_info(" OS_PPTB: value 0x%lx\n" + " OS_VPTB: value 0x%lx\n" + " OS_VAB: value 0x%lx\n", + mmu->get_vcpu_context_os_pptb(vcpu), + mmu->get_vcpu_context_os_vptb(vcpu), + mmu->get_vcpu_context_os_vab(vcpu)); + } + if (DEBUG_SHADOW_CONTEXT_MODE) { + pr_info(" SH_PID: value 0x%llx\n", + read_guest_PID_reg(vcpu)); + } + if (DEBUG_SHADOW_CONTEXT_MODE && (flags & SEP_VIRT_ROOT_PT_FLAG)) { + e2k_core_mode_t core_mode = read_guest_CORE_MODE_reg(vcpu); + + pr_info(" SH_CORE_MODE: 0x%x sep_virt_space: %s\n", + core_mode.CORE_MODE_reg, + (core_mode.CORE_MODE_sep_virt_space) ? + "true" : "false"); + } +} + +static void setup_mmu_tdp_context(struct kvm_vcpu *vcpu, unsigned flags) +{ + KVM_BUG_ON(!is_tdp_paging(vcpu)); + + /* setup MMU page tables hardware and software context */ + kvm_set_vcpu_the_pt_context(vcpu, flags); + + /* setup user PID on hardware shadow register */ + write_SH_PID_reg(vcpu->arch.mmu.pid); + + if ((flags & SEP_VIRT_ROOT_PT_FLAG) && vcpu->arch.is_pv) { + e2k_core_mode_t core_mode = read_SH_CORE_MODE_reg(); + + /* enable/disable guest separate Page Tables support */ + core_mode.CORE_MODE_sep_virt_space = is_sep_virt_spaces(vcpu); + write_SH_CORE_MODE_reg(core_mode); + vcpu->arch.hw_ctxt.sh_core_mode = core_mode; + } + + kvm_dump_mmu_tdp_context(vcpu, flags); +} + +void kvm_setup_mmu_tdp_u_pt_context(struct kvm_vcpu *vcpu) +{ + unsigned flags; + + if (vcpu->arch.mmu.u_context_on) { + flags = U_ROOT_PT_FLAG; + } else { + flags = U_ROOT_PT_FLAG | OS_ROOT_PT_FLAG | + SEP_VIRT_ROOT_PT_FLAG; + } + /* setup MMU page tables hardware and software context */ + setup_mmu_tdp_context(vcpu, flags); +} + +void kvm_setup_mmu_tdp_context(struct kvm_vcpu *vcpu) +{ + /* setup MMU page tables hardware and software context */ + setup_mmu_tdp_context(vcpu, + GP_ROOT_PT_FLAG | OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG | + SEP_VIRT_ROOT_PT_FLAG); +} + +static inline void +kvm_dump_shadow_u_pptb(struct kvm_vcpu *vcpu, const char *title) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + DebugSHC("%s" + " sh_U_PPTB: value 0x%lx\n" + " sh_U_VPTB: value 0x%lx\n" + " U_PPTB: value 0x%lx\n" + " U_VPTB: value 0x%lx\n" + " SH_PID: value 0x%llx\n", + title, + mmu->get_vcpu_context_u_pptb(vcpu), + mmu->get_vcpu_context_u_vptb(vcpu), + mmu->get_vcpu_u_pptb(vcpu), + mmu->get_vcpu_u_vptb(vcpu), + read_guest_PID_reg(vcpu)); +} + +void kvm_setup_mmu_spt_context(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + /* setup OS and user PT hardware and software context */ + kvm_set_vcpu_pt_context(vcpu); + + if (vcpu->arch.is_hv) { + e2k_core_mode_t core_mode = read_SH_CORE_MODE_reg(); + + /* enable/disable guest separate Page Tables support */ + core_mode.CORE_MODE_sep_virt_space = is_sep_virt_spaces(vcpu); + write_SH_CORE_MODE_reg(core_mode); + vcpu->arch.hw_ctxt.sh_core_mode = core_mode; + } + + kvm_dump_shadow_u_pptb(vcpu, "Set MMU guest shadow OS/U_PT context:\n"); + + if (DEBUG_SHADOW_CONTEXT_MODE && is_sep_virt_spaces(vcpu)) { + pr_info(" sh_OS_PPTB: value 0x%lx\n" + " sh_OS_VPTB: value 0x%lx\n" + " OS_PPTB: value 0x%lx\n" + " OS_VPTB: value 0x%lx\n" + " SH_OS_VAB: value 0x%lx\n", + mmu->get_vcpu_context_os_pptb(vcpu), + mmu->get_vcpu_context_os_vptb(vcpu), + mmu->get_vcpu_os_pptb(vcpu), + mmu->get_vcpu_os_vptb(vcpu), + mmu->get_vcpu_context_os_vab(vcpu)); + } + if (DEBUG_SHADOW_CONTEXT_MODE) { + if (is_phys_paging(vcpu)) { + pr_info(" GP_PPTB: value 0x%llx\n", + mmu->get_vcpu_context_gp_pptb(vcpu)); + } + if (!vcpu->arch.is_hv) { + pr_info(" GP_PPTB: value 0x%llx\n", + mmu->get_vcpu_gp_pptb(vcpu)); + } + } + if (DEBUG_SHADOW_CONTEXT_MODE && is_paging(vcpu)) { + pr_info(" SH_MMU_CR: value 0x%llx\n", + read_guest_MMU_CR_reg(vcpu)); + } + if (DEBUG_SHADOW_CONTEXT_MODE) { + e2k_core_mode_t core_mode = read_guest_CORE_MODE_reg(vcpu); + + pr_info(" CORE_MODE: value 0x%x sep_virt_space: %s\n", + core_mode.CORE_MODE_reg, + (core_mode.CORE_MODE_sep_virt_space) ? + "true" : "false"); + } +} + +void kvm_set_mmu_guest_pt(struct kvm_vcpu *vcpu) +{ + mmu_reg_t mmu_cr; + + if (is_tdp_paging(vcpu)) { + kvm_setup_mmu_tdp_context(vcpu); + } else if (is_shadow_paging(vcpu)) { + kvm_setup_mmu_spt_context(vcpu); + } else { + KVM_BUG_ON(true); + } + + /* enable TLB in paging mode */ + mmu_cr = MMU_CR_KERNEL; + write_guest_MMU_CR_reg(vcpu, mmu_cr); + DebugSHC("Enable guest MMU paging:\n" + " SH_MMU_CR: value 0x%llx\n", + mmu_cr); +} + +void kvm_setup_shadow_u_pptb(struct kvm_vcpu *vcpu) +{ + /* setup new user PT hardware/software context */ + kvm_set_vcpu_u_pt_context(vcpu); + + kvm_dump_shadow_u_pptb(vcpu, "Set MMU guest shadow U_PT context:\n"); +} + +void mmu_pv_setup_shadow_u_pptb(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + kvm_setup_shadow_u_pptb(vcpu); + pv_vcpu_set_gmm(vcpu, gmm); + pv_vcpu_set_active_gmm(vcpu, gmm); +} + +void kvm_dump_shadow_os_pt_regs(struct kvm_vcpu *vcpu) +{ + if (is_sep_virt_spaces(vcpu)) { + DebugSHC("Set MMU guest shadow OS PT context:\n" + " SH_OS_PPTB: value 0x%lx\n" + " SH_OS_VPTB: value 0x%lx\n" + " OS_PPTB: value 0x%lx\n" + " OS_VPTB: value 0x%lx\n" + " SH_OS_VAB: value 0x%lx\n" + " OS_VAB: value 0x%lx\n", + vcpu->arch.mmu.get_vcpu_context_os_pptb(vcpu), + vcpu->arch.mmu.get_vcpu_context_os_vptb(vcpu), + vcpu->arch.mmu.get_vcpu_os_pptb(vcpu), + vcpu->arch.mmu.get_vcpu_os_vptb(vcpu), + vcpu->arch.mmu.get_vcpu_context_os_vab(vcpu), + vcpu->arch.mmu.get_vcpu_os_vab(vcpu)); + } else { + DebugSHC("Set MMU guest shadow OS/U PT context:\n" + " SH_OS/U_PPTB: value 0x%lx\n" + " SH_OS/U_VPTB: value 0x%lx\n" + " OS/U_PPTB: value 0x%lx\n" + " OS/U_VPTB: value 0x%lx\n", + vcpu->arch.mmu.get_vcpu_context_u_pptb(vcpu), + vcpu->arch.mmu.get_vcpu_context_u_vptb(vcpu), + vcpu->arch.mmu.get_vcpu_u_pptb(vcpu), + vcpu->arch.mmu.get_vcpu_u_vptb(vcpu)); + } +} + +void kvm_setup_shadow_os_pptb(struct kvm_vcpu *vcpu) +{ + /* setup kernel new PT hardware/software context */ + kvm_set_vcpu_os_pt_context(vcpu); + + kvm_dump_shadow_os_pt_regs(vcpu); +} + +void kvm_switch_mmu_guest_u_pt(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + /* setup user PT hardware and software context */ + kvm_set_vcpu_u_pt_context(vcpu); + + write_guest_PID_reg(vcpu, mmu->pid); + + kvm_dump_shadow_u_pptb(vcpu, "Set MMU guest shadow U_PT context:\n"); +} + +void kvm_set_mmu_guest_u_pt(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + e2k_core_mode_t core_mode; + + if (likely(mmu->u_context_on)) + return kvm_switch_mmu_guest_u_pt(vcpu); + + /* setup OS and user PT hardware and software context */ + kvm_set_vcpu_pt_context(vcpu); + + /* setup user PID on hardware shadow register */ + write_guest_PID_reg(vcpu, mmu->pid); + + kvm_dump_shadow_u_pptb(vcpu, "Set MMU guest shadow OS/U_PT context:\n"); + if (DEBUG_SHADOW_CONTEXT_MODE && is_sep_virt_spaces(vcpu)) { + pr_info(" SH_OS_PPTB: value 0x%lx\n" + " SH_OS_VPTB: value 0x%lx\n" + " OS_PPTB: value 0x%lx\n" + " OS_VPTB: value 0x%lx\n" + " SH_OS_VAB: value 0x%lx\n", + mmu->get_vcpu_context_os_pptb(vcpu), + mmu->get_vcpu_context_os_vptb(vcpu), + mmu->get_vcpu_os_pptb(vcpu), + mmu->get_vcpu_os_vptb(vcpu), + mmu->get_vcpu_context_os_vab(vcpu)); + } + if (DEBUG_SHADOW_CONTEXT_MODE && is_phys_paging(vcpu)) { + pr_info(" GP_PPTB: value 0x%llx\n", + mmu->get_vcpu_context_gp_pptb(vcpu)); + } + + /* enable separate Page Tables support */ + core_mode = read_guest_CORE_MODE_reg(vcpu); + core_mode.CORE_MODE_sep_virt_space = is_sep_virt_spaces(vcpu); + write_guest_CORE_MODE_reg(vcpu, core_mode); + DebugSHC("Set separate PT support on guest MMU:\n" + " SH_CORE_MODE: 0x%x gmi %s hci %s sep_virt_space %s\n", + core_mode.CORE_MODE_reg, + (core_mode.CORE_MODE_gmi) ? "true" : "false", + (core_mode.CORE_MODE_hci) ? "true" : "false", + (core_mode.CORE_MODE_sep_virt_space) ? "true" : "false"); +} + +static void vcpu_write_os_cu_hw_ctxt_to_registers(struct kvm_vcpu *vcpu, + const struct kvm_hw_cpu_context *hw_ctxt) +{ + /* + * CPU shadow context + */ + if (vcpu->arch.is_hv) { + write_SH_OSCUD_LO_reg(hw_ctxt->sh_oscud_lo); + write_SH_OSCUD_HI_reg(hw_ctxt->sh_oscud_hi); + write_SH_OSGD_LO_reg(hw_ctxt->sh_osgd_lo); + write_SH_OSGD_HI_reg(hw_ctxt->sh_osgd_hi); + write_SH_OSCUTD_reg(hw_ctxt->sh_oscutd); + write_SH_OSCUIR_reg(hw_ctxt->sh_oscuir); + } + DebugSHC("initialized CPU shadow context\n" + "SH_OSCUD: base 0x%llx size 0x%x\n" + "SH_OSGD: base 0x%llx size 0x%x\n" + "CUTD: base 0x%llx\n" + "SH_OSCUTD: base 0x%llx\n" + "SH_OSCUIR: index 0x%x\n" + "Trap table entry at %px\n", + hw_ctxt->sh_oscud_lo.OSCUD_lo_base, + hw_ctxt->sh_oscud_hi.OSCUD_hi_size, + hw_ctxt->sh_osgd_lo.OSGD_lo_base, + hw_ctxt->sh_osgd_hi.OSGD_hi_size, + vcpu->arch.sw_ctxt.cutd.CUTD_base, + hw_ctxt->sh_oscutd.CUTD_base, + hw_ctxt->sh_oscuir.CUIR_index, + vcpu->arch.trap_entry); +} + +static void write_hw_ctxt_to_hv_vcpu_registers(struct kvm_vcpu *vcpu, + const struct kvm_hw_cpu_context *hw_ctxt, + const struct kvm_sw_cpu_context *sw_ctxt) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + epic_page_t *cepic = hw_ctxt->cepic; + unsigned int i; + + /* + * Stack registers + */ + WRITE_SH_PSP_LO_REG_VALUE(AW(hw_ctxt->sh_psp_lo)); + WRITE_SH_PSP_HI_REG_VALUE(AW(hw_ctxt->sh_psp_hi)); + WRITE_SH_PCSP_LO_REG_VALUE(AW(hw_ctxt->sh_pcsp_lo)); + WRITE_SH_PCSP_HI_REG_VALUE(AW(hw_ctxt->sh_pcsp_hi)); + WRITE_BU_PSP_LO_REG_VALUE(AW(hw_ctxt->bu_psp_lo)); + WRITE_BU_PSP_HI_REG_VALUE(AW(hw_ctxt->bu_psp_hi)); + WRITE_BU_PCSP_LO_REG_VALUE(AW(hw_ctxt->bu_pcsp_lo)); + WRITE_BU_PCSP_HI_REG_VALUE(AW(hw_ctxt->bu_pcsp_hi)); + /* Filling of backup stacks is made on main PSHTP/PCSHTP and + * BU_PSP/BU_PCSP pointers. Switch from main PSHTP/PCSHTP to + * shadow SH_PSHTP/SH_PCSHTP is done after filling, so set + * shadow SH_PSHTP/SH_PCSHTP to sizes of backup stacks */ + WRITE_SH_PSHTP_REG_VALUE(hw_ctxt->bu_psp_hi.PSP_hi_ind); + WRITE_SH_PCSHTP_REG_SVALUE(hw_ctxt->bu_pcsp_hi.PCSP_hi_ind); + + DebugSHC("initialized hardware shadow registers:\n" + "SH_PSP: base 0x%llx size 0x%x index 0x%x\n" + "SH_PCSP: base 0x%llx size 0x%x index 0x%x\n" + "BU_PSP: base 0x%llx size 0x%x index 0x%x\n" + "BU_PCSP: base 0x%llx size 0x%x index 0x%x\n" + "SH_PSHTP: value 0x%x\n" + "SH_PSHTP: value 0x%x\n", + hw_ctxt->sh_psp_lo.PSP_lo_base, hw_ctxt->sh_psp_hi.PSP_hi_size, + hw_ctxt->sh_psp_hi.PSP_hi_ind, hw_ctxt->sh_pcsp_lo.PCSP_lo_base, + hw_ctxt->sh_pcsp_hi.PCSP_hi_size, + hw_ctxt->sh_pcsp_hi.PCSP_hi_ind, hw_ctxt->bu_psp_lo.PSP_lo_base, + hw_ctxt->bu_psp_hi.PSP_hi_size, hw_ctxt->bu_psp_hi.PSP_hi_ind, + hw_ctxt->bu_pcsp_lo.PCSP_lo_base, + hw_ctxt->bu_pcsp_hi.PCSP_hi_size, + hw_ctxt->bu_pcsp_hi.PCSP_hi_ind, hw_ctxt->bu_psp_hi.PSP_hi_ind, + hw_ctxt->bu_pcsp_hi.PCSP_hi_ind); + + WRITE_SH_WD_REG_VALUE(hw_ctxt->sh_wd.WD_reg); + + /* + * MMU shadow context + */ + write_SH_MMU_CR_reg(hw_ctxt->sh_mmu_cr); + write_SH_PID_reg(hw_ctxt->sh_pid); + write_GID_reg(hw_ctxt->gid); + DebugSHC("initialized MMU shadow context:\n" + "SH_MMU_CR: value 0x%llx\n" + "SH_PID: value 0x%llx\n" + "GP_PPTB: value 0x%llx\n" + "sh_U_PPTB: value 0x%lx\n" + "sh_U_VPTB: value 0x%lx\n" + "SH_OS_PPTB: value 0x%lx\n" + "SH_OS_VPTB: value 0x%lx\n" + "SH_OS_VAB: value 0x%lx\n" + "GID: value 0x%llx\n", + hw_ctxt->sh_mmu_cr, hw_ctxt->sh_pid, + mmu->get_vcpu_context_gp_pptb(vcpu), + mmu->get_vcpu_context_u_pptb(vcpu), + mmu->get_vcpu_context_u_vptb(vcpu), + mmu->get_vcpu_context_os_pptb(vcpu), + mmu->get_vcpu_context_os_vptb(vcpu), + mmu->get_vcpu_context_os_vab(vcpu), + hw_ctxt->gid); + + /* + * CPU shadow context + */ + vcpu_write_os_cu_hw_ctxt_to_registers(vcpu, hw_ctxt); + + write_SH_OSR0_reg_value(hw_ctxt->sh_osr0); + DebugSHC("SH_OSR0: value 0x%llx\n", hw_ctxt->sh_osr0); + write_SH_CORE_MODE_reg(hw_ctxt->sh_core_mode); + DebugSHC("SH_CORE_MODE: value 0x%x, gmi %s, hci %s\n", + hw_ctxt->sh_core_mode.CORE_MODE_reg, + (hw_ctxt->sh_core_mode.CORE_MODE_gmi) ? "true" : "false", + (hw_ctxt->sh_core_mode.CORE_MODE_hci) ? "true" : "false"); + + /* + * VIRT_CTRL_* registers + */ + write_VIRT_CTRL_CU_reg(hw_ctxt->virt_ctrl_cu); + write_VIRT_CTRL_MU_reg(hw_ctxt->virt_ctrl_mu); + write_G_W_IMASK_MMU_CR_reg(hw_ctxt->g_w_imask_mmu_cr); + DebugSHC("initialized VIRT_CTRL registers\n" + "VIRT_CTRL_CU: 0x%llx\n" + "VIRT_CTRL_MU: 0x%llx, sh_pt_en : %s, gp_pt_en : %s\n" + "G_W_IMASK_MMU_CR: 0x%llx, tlb_en : %s\n", + AW(hw_ctxt->virt_ctrl_cu), AW(hw_ctxt->virt_ctrl_mu), + (hw_ctxt->virt_ctrl_mu.sh_pt_en) ? "true" : "false", + (hw_ctxt->virt_ctrl_mu.gp_pt_en) ? "true" : "false", + hw_ctxt->g_w_imask_mmu_cr, + (hw_ctxt->g_w_imask_mmu_cr & _MMU_CR_TLB_EN) ? + "true" : "false"); + + epic_write_guest_w(CEPIC_CTRL, cepic->ctrl); + epic_write_guest_w(CEPIC_ID, cepic->id); + epic_write_guest_w(CEPIC_CPR, cepic->cpr); + epic_write_guest_w(CEPIC_ESR, cepic->esr); + epic_write_guest_w(CEPIC_ESR2, cepic->esr2.raw); + epic_write_guest_w(CEPIC_CIR, cepic->cir.raw); + epic_write_guest_w(CEPIC_ESR_NEW, cepic->esr_new.counter); + epic_write_guest_d(CEPIC_ICR, cepic->icr.raw); + epic_write_guest_w(CEPIC_TIMER_LVTT, cepic->timer_lvtt.raw); + epic_write_guest_w(CEPIC_TIMER_INIT, cepic->timer_init); + epic_write_guest_w(CEPIC_TIMER_CUR, cepic->timer_cur); + epic_write_guest_w(CEPIC_TIMER_DIV, cepic->timer_div); + epic_write_guest_w(CEPIC_NM_TIMER_LVTT, cepic->nm_timer_lvtt); + epic_write_guest_w(CEPIC_NM_TIMER_INIT, cepic->nm_timer_init); + epic_write_guest_w(CEPIC_NM_TIMER_CUR, cepic->nm_timer_cur); + epic_write_guest_w(CEPIC_NM_TIMER_DIV, cepic->nm_timer_div); + epic_write_guest_w(CEPIC_SVR, cepic->svr); + epic_write_guest_w(CEPIC_PNMIRR_MASK, cepic->pnmirr_mask); + for (i = 0; i < CEPIC_PMIRR_NR_DREGS; i++) + epic_write_guest_d(CEPIC_PMIRR + i * 8, + cepic->pmirr[i].counter); + epic_write_guest_w(CEPIC_PNMIRR, cepic->pnmirr.counter); +} + +static void write_hw_ctxt_to_vcpu_registers(struct kvm_vcpu *vcpu, + const struct kvm_hw_cpu_context *hw_ctxt, + const struct kvm_sw_cpu_context *sw_ctxt) +{ + if (vcpu->arch.is_hv) { + write_hw_ctxt_to_hv_vcpu_registers(vcpu, hw_ctxt, sw_ctxt); + } else if (vcpu->arch.is_pv) { + write_hw_ctxt_to_pv_vcpu_registers(vcpu, hw_ctxt, sw_ctxt); + } else { + KVM_BUG_ON(true); + } +} + +noinline __interrupt +static void launch_hv_vcpu(struct kvm_vcpu_arch *vcpu) +{ + struct thread_info *ti = current_thread_info(); + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->intc_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + u64 ctpr1 = AW(intc_ctxt->ctpr1), ctpr1_hi = AW(intc_ctxt->ctpr1_hi), + ctpr2 = AW(intc_ctxt->ctpr2), ctpr2_hi = AW(intc_ctxt->ctpr2_hi), + ctpr3 = AW(intc_ctxt->ctpr3), ctpr3_hi = AW(intc_ctxt->ctpr3_hi), + lsr = intc_ctxt->lsr, lsr1 = intc_ctxt->lsr1, + ilcr = intc_ctxt->ilcr, ilcr1 = intc_ctxt->ilcr1; + + /* + * Here kernel is on guest context including data stack + * so nothing complex: calls, prints, etc + */ + + __guest_enter(ti, vcpu, FULL_CONTEXT_SWITCH | USD_CONTEXT_SWITCH | + DEBUG_REGS_SWITCH); + + NATIVE_WRITE_CTPR2_REG_VALUE(ctpr2); + NATIVE_WRITE_CTPR2_HI_REG_VALUE(ctpr2_hi); +#ifdef CONFIG_USE_AAU + /* These registers must be restored after ctpr2 */ + native_set_aau_aaldis_aaldas(ti, &sw_ctxt->aau_context); + /* Restore the real guest AASR value */ + RESTORE_GUEST_AAU_AASR(&sw_ctxt->aau_context, 1); + NATIVE_RESTORE_AAU_MASK_REGS(&sw_ctxt->aau_context); +#endif + /* issue GLAUNCH instruction. + * This macro does not restore %ctpr2 register because of ordering + * with AAU restore. */ + E2K_GLAUNCH(ctpr1, ctpr1_hi, ctpr2, ctpr2_hi, ctpr3, ctpr3_hi, lsr, lsr1, ilcr, ilcr1); + + AW(intc_ctxt->ctpr1) = ctpr1; + /* Make sure that the first kernel memory access is store. + * This is needed to flush SLT before trying to load anything. */ + barrier(); + AW(intc_ctxt->ctpr2) = ctpr2; + AW(intc_ctxt->ctpr3) = ctpr3; + AW(intc_ctxt->ctpr1_hi) = ctpr1_hi; + AW(intc_ctxt->ctpr2_hi) = ctpr2_hi; + AW(intc_ctxt->ctpr3_hi) = ctpr3_hi; + intc_ctxt->lsr = lsr; + intc_ctxt->lsr1 = lsr1; + intc_ctxt->ilcr = ilcr; + intc_ctxt->ilcr1 = ilcr1; + + __guest_exit(ti, vcpu, FULL_CONTEXT_SWITCH | USD_CONTEXT_SWITCH | + DEBUG_REGS_SWITCH); +} + +static inline bool calculate_g_th(const intc_info_cu_hdr_t *cu_hdr, + const struct kvm_intc_cpu_context *intc_ctxt) +{ + u64 exceptions = intc_ctxt->exceptions; + + /* Entering trap handler will freeze TIRs, so no need for g_th flag */ + if (cu_hdr->lo.tir_fz) + return false; + + /* #132939 - hardware always tries to translate guest trap handler upon + * interception, so we do not set 'g_th' bit if only exc_instr_page_prot + * or exc_instr_page_miss happened (as those are precise traps, they + * will be regenerated by hardware anyway). */ + exceptions &= ~(exc_instr_page_prot_mask | exc_instr_page_miss_mask); + + if (exceptions) + return true; + + return intc_ctxt->cu_num >= 0 && cu_hdr->lo.exc_c; +} + +static int vcpu_enter_guest(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gti = current_thread_info()->gthread_info; + e2k_upsr_t guest_upsr; + intc_info_cu_t *cu = &vcpu->arch.intc_ctxt.cu; + intc_info_mu_t *mu = vcpu->arch.intc_ctxt.mu; + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->arch.intc_ctxt; + u64 exceptions; + int ret; + bool g_th; + + raw_all_irq_disable(); + while (unlikely(current_thread_info()->flags & _TIF_WORK_MASK)) { + raw_all_irq_enable(); + + if (signal_pending(current)) { + vcpu->run->exit_reason = KVM_EXIT_INTR; + ++vcpu->stat.signal_exits; + return -EINTR; + } + + /* and here we do tasks re-scheduling on a h/w interrupt */ + if (need_resched()) + schedule(); + + if (test_thread_flag(TIF_NOTIFY_RESUME)) { + clear_thread_flag(TIF_NOTIFY_RESUME); + /* + * We do not have pt_regs that correspond to + * the intercepted context so just pass NULL. + */ + do_notify_resume(NULL); + } + + raw_all_irq_disable(); + } + + preempt_disable(); + + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + kvm_vcpu_flush_tlb(vcpu); + } + if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) { + kvm_mmu_sync_roots(vcpu, OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG); + } + + /* Switch IRQ control to PSR and disable MI/NMIs */ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); + + /* Check if guest should enter trap handler after glaunch. */ + g_th = calculate_g_th(&cu->header, intc_ctxt); + + restore_SBBP_TIRs(intc_ctxt->sbbp, intc_ctxt->TIRs, intc_ctxt->nr_TIRs, + cu->header.lo.tir_fz, g_th); + kvm_clear_vcpu_intc_TIRs_num(vcpu); + + /* FIXME: simulator bug: simulator does not reexecute requests */ + /* from INTC_INFO_MU unlike the hardware, so do it by software */ + if (vcpu->arch.intc_ctxt.intc_mu_to_move != 0) + kvm_restore_vcpu_trap_cellar(vcpu); + + /* if intc info structures were updated, then restore registers */ + if (kvm_get_intc_info_mu_is_updated(vcpu)) { + modify_intc_info_mu_data(intc_ctxt->mu, intc_ctxt->mu_num); + restore_intc_info_mu(intc_ctxt->mu, intc_ctxt->mu_num); + } + if (kvm_get_intc_info_cu_is_updated(vcpu)) + restore_intc_info_cu(&intc_ctxt->cu, intc_ctxt->cu_num); + + /* MMU intercepts were handled, clear state for new intercepts */ + kvm_clear_intc_mu_state(vcpu); + + /* clear hypervisor intercept event counters */ + intc_ctxt->cu_num = -1; + intc_ctxt->mu_num = -1; + intc_ctxt->cur_mu = -1; + kvm_reset_intc_info_mu_is_updated(vcpu); + kvm_reset_intc_info_cu_is_updated(vcpu); + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); + + /* the function should set initial UPSR state */ + if (gti != NULL) { + KVM_RESTORE_GUEST_KERNEL_UPSR(current_thread_info()); + } + + launch_hv_vcpu(&vcpu->arch); + + /* Guest can switch to other thread, so update guest thread info */ + gti = current_thread_info()->gthread_info; + + save_intc_info_cu(cu, &vcpu->arch.intc_ctxt.cu_num); + save_intc_info_mu(mu, &vcpu->arch.intc_ctxt.mu_num); + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_intercept); + + /* + * %sbbp LIFO stack is unfreezed by writing %TIR register, + * so it must be read before TIRs. + */ + SAVE_SBBP(intc_ctxt->sbbp); + + /* + * Save guest TIRs should be at any case, including empty state + */ + exceptions = 0; + exceptions = SAVE_TIRS(intc_ctxt->TIRs, intc_ctxt->nr_TIRs, + true); /* from_intc */ + /* un-freeze the TIR's LIFO */ + UNFREEZE_TIRs(); + intc_ctxt->exceptions = exceptions; + + /* save current state of guest kernel UPSR */ + NATIVE_DO_SAVE_UPSR_REG(guest_upsr); + if (gti != NULL) { + DO_SAVE_GUEST_KERNEL_UPSR(gti, guest_upsr); + } + + preempt_enable(); + + /* This will enable interrupts */ + ret = parse_INTC_registers(&vcpu->arch); + + /* check requests after intercept handling and do */ + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + kvm_vcpu_flush_tlb(vcpu); + } + + return ret; +} + +static int do_startup_hv_vcpu(struct kvm_vcpu *vcpu, bool first_launch) +{ + bool nonpaging = !is_paging(vcpu); +#ifdef SWITCH_TO_GUEST_MMU_CONTEXT + bool host_context; +#endif /* SWITCH_TO_GUEST_MMU_CONTEXT */ + int ret; + +#ifdef SWITCH_TO_GUEST_MMU_CONTEXT + if (nonpaging) { + raw_all_irq_disable(); + /* switch to guest MMU context, guest page faults should */ + /* be handled based on hypervisor U_PPTB page table */ + host_context = kvm_hv_mmu_switch_context(vcpu, false); + KVM_WARN_ON(!host_context); + raw_all_irq_enable(); + } +#endif /* SWITCH_TO_GUEST_MMU_CONTEXT */ + + if (kvm_request_pending(vcpu)) { + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + kvm_vcpu_flush_tlb(vcpu); + } + } + + /* loop while some intercept event need be handled at user space */ + do { + if (DEBUG_INTC_TIRs_MODE && + !kvm_check_is_vcpu_intc_TIRs_empty(vcpu)) { + pr_info("%s(): There are traps injected to handle " + "by guest\n", + __func__); + print_all_TIRs(vcpu->arch.intc_ctxt.TIRs, + vcpu->arch.intc_ctxt.nr_TIRs); + } + ret = vcpu_enter_guest(vcpu); + if (unlikely(vcpu->arch.exit_shutdown_terminate)) + ret = -1; + } while (ret == 0 && !first_launch); + +#ifdef SWITCH_TO_GUEST_MMU_CONTEXT + if (nonpaging) { + raw_all_irq_disable(); + /* return to host MMU context */ + host_context = kvm_hv_mmu_switch_context(vcpu, true); + KVM_WARN_ON(host_context); + raw_all_irq_enable(); + } +#endif /* SWITCH_TO_GUEST_MMU_CONTEXT */ + + return ret; +} + +int startup_hv_vcpu(struct kvm_vcpu *vcpu) +{ + return do_startup_hv_vcpu(vcpu, false /* first launch ? */); +} + +static void kvm_epic_write_gstid(int gst_id) +{ + union cepic_gstid reg_gstid; + + reg_gstid.raw = 0; + reg_gstid.bits.gstid = gst_id; + epic_write_w(CEPIC_GSTID, reg_gstid.raw); +} + +static void kvm_epic_write_gstbase(unsigned long epic_gstbase) +{ + epic_write_d(CEPIC_GSTBASE_LO, epic_gstbase >> PAGE_SHIFT); +} + +/* + * Currently DAT only has 64 rows, so hardware will transform full CEPIC ID + * back to short to get index + */ +static void kvm_epic_write_dat(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + unsigned int vcpu_id = kvm_vcpu_to_full_cepic_id(vcpu); + unsigned int cpu = cepic_id_short_to_full(vcpu->cpu); + unsigned int gst_id = kvm->arch.vmid.nr; + unsigned long flags; + union cepic_dat reg; + + reg.raw = 0; + reg.bits.gst_id = gst_id; + reg.bits.gst_dst = vcpu_id; + reg.bits.index = cpu; + reg.bits.dat_cop = CEPIC_DAT_WRITE; + + raw_spin_lock_irqsave(&vcpu->arch.epic_dat_lock, flags); + epic_write_d(CEPIC_DAT, reg.raw); + + /* Wait for status bit */ + do { + cpu_relax(); + reg.raw = (unsigned long) epic_read_w(CEPIC_DAT); + } while (reg.bits.stat); + vcpu->arch.epic_dat_active = true; + raw_spin_unlock_irqrestore(&vcpu->arch.epic_dat_lock, flags); +} + +void kvm_epic_invalidate_dat(struct kvm_vcpu_arch *vcpu) +{ + union cepic_dat reg; + unsigned long flags; + + reg.raw = 0; + reg.bits.index = cepic_id_short_to_full(arch_to_vcpu(vcpu)->cpu); + reg.bits.dat_cop = CEPIC_DAT_INVALIDATE; + + raw_spin_lock_irqsave(&vcpu->epic_dat_lock, flags); + epic_write_w(CEPIC_DAT, (unsigned int)reg.raw); + + /* Wait for status bit */ + do { + cpu_relax(); + reg.raw = (unsigned long) epic_read_w(CEPIC_DAT); + } while (reg.bits.stat); + + vcpu->epic_dat_active = false; + raw_spin_unlock_irqrestore(&vcpu->epic_dat_lock, flags); +} + +void kvm_epic_timer_start(void) +{ + union cepic_ctrl2 reg; + + reg.raw = epic_read_w(CEPIC_CTRL2); + WARN_ON_ONCE(!reg.bits.timer_stop); + reg.bits.timer_stop = 0; + epic_write_w(CEPIC_CTRL2, reg.raw); +} + +void kvm_epic_timer_stop(bool skip_check) +{ + union cepic_ctrl2 reg; + + reg.raw = epic_read_w(CEPIC_CTRL2); + WARN_ON_ONCE(!skip_check && reg.bits.timer_stop); + reg.bits.timer_stop = 1; + epic_write_w(CEPIC_CTRL2, reg.raw); +} + +void kvm_epic_enable_int(void) +{ + union cepic_ctrl2 reg; + + reg.raw = epic_read_w(CEPIC_CTRL2); + reg.bits.mi_gst_blk = 0; + reg.bits.nmi_gst_blk = 0; + epic_write_w(CEPIC_CTRL2, reg.raw); +} + +/* + * PNMIRR "startup_entry" field cannot be restored using "OR" + * write to PNMIRR as that will create a mix of restored and + * previous values. So we restore it by sending startup IPI + * to ourselves. + * + * No need to acquire epic_dat_lock, as we are in the process + * of restoring the target vcpu (this is the last step). + */ +static void kvm_epic_restore_pnmirr_startup_entry(struct kvm_vcpu *vcpu) +{ + epic_page_t *cepic = vcpu->arch.hw_ctxt.cepic; + union cepic_pnmirr reg; + + reg.raw = atomic_read(&cepic->pnmirr); + if (reg.bits.startup) + kvm_hw_epic_deliver_to_icr(vcpu, reg.bits.startup_entry, + CEPIC_ICR_DLVM_STARTUP); +} + +void kvm_hv_epic_load(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + unsigned int gst_id = kvm->arch.vmid.nr; + unsigned long epic_gstbase = + (unsigned long) __pa(page_address(kvm->arch.epic_pages)); + + kvm_epic_write_gstid(gst_id); + kvm_epic_write_gstbase(epic_gstbase); + kvm_epic_write_dat(vcpu); + kvm_epic_restore_pnmirr_startup_entry(vcpu); +} + + +enum hrtimer_restart kvm_epic_idle_timer_fn(struct hrtimer *hrtimer) +{ + struct kvm_vcpu *vcpu = container_of(hrtimer, struct kvm_vcpu, arch.cepic_idle); + + DebugKVMIT("started on VCPU #%d\n", vcpu->vcpu_id); + vcpu->arch.unhalted = true; + kvm_vcpu_wake_up(vcpu); + + return HRTIMER_NORESTART; +} + +void kvm_init_cepic_idle_timer(struct kvm_vcpu *vcpu) +{ + ASSERT(vcpu != NULL); + DebugKVMIT("started on VCPU #%d\n", vcpu->vcpu_id); + + hrtimer_init(&vcpu->arch.cepic_idle, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + vcpu->arch.cepic_idle.function = kvm_epic_idle_timer_fn; +} + +/* Useful for debugging problems with wakeup */ +static bool periodic_wakeup = false; +module_param(periodic_wakeup, bool, 0600); + +void kvm_epic_start_idle_timer(struct kvm_vcpu *vcpu) +{ + struct hrtimer *hrtimer = &vcpu->arch.cepic_idle; + struct kvm_arch *kvm = &vcpu->kvm->arch; + u64 cepic_timer_cur = (u64) vcpu->arch.hw_ctxt.cepic->timer_cur; + u64 vcpu_idle_timeout_ns = jiffies_to_nsecs(VCPU_IDLE_TIMEOUT); + u64 delta_ns; + + if (unlikely(cepic_timer_cur == 0 && !periodic_wakeup && + !kvm_has_passthrough_device(kvm))) + return; + + delta_ns = (cepic_timer_cur) + ? ((u64) cepic_timer_cur * NSEC_PER_SEC / kvm->cepic_freq) + : vcpu_idle_timeout_ns; + + /* Make sure to wake up periodically to check for interrupts + * from external devices. Also do it if debugging option + * [periodic_wakeup] is enabled. */ + if (delta_ns > vcpu_idle_timeout_ns && (periodic_wakeup || + kvm_has_passthrough_device(kvm))) + delta_ns = vcpu_idle_timeout_ns; + + ktime_t current_time = hrtimer->base->get_time(); + vcpu->arch.cepic_idle_start_time = current_time; + hrtimer_start(&vcpu->arch.cepic_idle, + ktime_add_ns(current_time, delta_ns), HRTIMER_MODE_ABS); +} + +static u32 calculate_cepic_timer_cur(struct kvm_vcpu *vcpu, u32 cepic_timer_cur) +{ + struct hrtimer *hrtimer = &vcpu->arch.cepic_idle; + u64 cepic_freq = vcpu->kvm->arch.cepic_freq; + u64 cepic_timer_ns = (u64) cepic_timer_cur * NSEC_PER_SEC / cepic_freq; + u64 passed_time_ns = ktime_to_ns(ktime_sub(hrtimer->base->get_time(), + vcpu->arch.cepic_idle_start_time)); + if (cepic_timer_ns > passed_time_ns) + cepic_timer_ns -= passed_time_ns; + else + cepic_timer_ns = 0; + + u64 new_timer_cur = max(cepic_timer_ns * cepic_freq / NSEC_PER_SEC, 1ull); + if (WARN_ON_ONCE(new_timer_cur > (u64) UINT_MAX)) + new_timer_cur = UINT_MAX; + return new_timer_cur; +} + +void kvm_epic_stop_idle_timer(struct kvm_vcpu *vcpu) +{ + struct hrtimer *hrtimer = &vcpu->arch.cepic_idle; + epic_page_t *cepic = vcpu->arch.hw_ctxt.cepic; + u32 cepic_timer_cur = (u64) cepic->timer_cur; + + /* Stop the software timer if it is still running */ + hrtimer_cancel(hrtimer); + + /* Adjust CEPIC timer if it is running, otherwise the guest might hang + * for a long time. For example, if guest waits in idle then most of + * the time it does not actually execute and thus the timer advances + * at a _much_ slower rate; it is hypervisor's duty to forward CEPIC + * timer in this case. */ + if (cepic_timer_cur) { + cepic->timer_cur = calculate_cepic_timer_cur(vcpu, cepic_timer_cur); + DebugKVMIT("Recalculating cepic timer %d from %x to %x\n", + vcpu->vcpu_id, cepic_timer_cur, cepic->timer_cur); + } else { + DebugKVMIT("Not recalculating cepic timer %d\n", vcpu->vcpu_id); + } +} + +static int kvm_prepare_hv_vcpu_start_stacks(struct kvm_vcpu *vcpu) +{ + prepare_bu_stacks_to_startup_vcpu(vcpu); + return 0; +} + +static int kvm_prepare_vcpu_start_stacks(struct kvm_vcpu *vcpu) +{ + int ret; + + if (vcpu->arch.is_hv) { + ret = kvm_prepare_hv_vcpu_start_stacks(vcpu); + } else if (vcpu->arch.is_pv) { + ret = kvm_prepare_pv_vcpu_start_stacks(vcpu); + } else { + KVM_BUG_ON(true); + ret = -EINVAL; + } + return ret; +} + +int kvm_start_vcpu_thread(struct kvm_vcpu *vcpu) +{ + int ret; + + DebugKVM("started to start guest kernel on VCPU %d\n", + vcpu->vcpu_id); + + + if (vcpu->arch.is_hv) { + ret = hv_vcpu_start_thread(vcpu); + } else if (vcpu->arch.is_pv) { + ret = pv_vcpu_start_thread(vcpu); + } else { + KVM_BUG_ON(true); + ret = -EINVAL; + } + if (ret != 0) + return ret; + + /* prepare start stacks */ + ret = kvm_prepare_vcpu_start_stacks(vcpu); + if (ret != 0) { + pr_err("%s(): could not prepare VCPU #%d start stacks, " + "error %d\n", + __func__, vcpu->vcpu_id, ret); + return ret; + } + + /* create empty root PT to translate GPA -> PA while guest will */ + /* create own PTs and then switch to them and enable virtual space */ + kvm_hv_setup_nonpaging_mode(vcpu); + + /* hardware context initialization and shadow registers setting */ + /* should be under disabled preemption to exclude scheduling */ + /* and save/restore intermediate state of shadow registers */ + preempt_disable(); + kvm_init_sw_ctxt(vcpu); + init_hw_ctxt(vcpu); + kvm_set_vcpu_pt_context(vcpu); + init_vcpu_intc_ctxt(vcpu); + write_hw_ctxt_to_vcpu_registers(vcpu, + &vcpu->arch.hw_ctxt, &vcpu->arch.sw_ctxt); + preempt_enable(); + + /* prefetch MMIO space areas, which should be */ + /* directly accessed by guest */ + kvm_prefetch_mmio_areas(vcpu); + + /* Set global registers to empty state as start state of guest */ + INIT_G_REGS(); + /* Zeroing global registers used by kernel */ + CLEAR_KERNEL_GREGS_COPY(current_thread_info()); + /* Setup guest type special globals registers */ + if (test_kvm_mode_flag(vcpu->kvm, KVMF_LINTEL)) { + kvm_init_lintel_gregs(vcpu); + } else { + /* Set pointer to VCPU state to enable interface with guest */ + INIT_HOST_VCPU_STATE_GREG_COPY(current_thread_info(), vcpu); + } + + return 0; +} diff --git a/arch/e2k/kvm/hv_mmu.c b/arch/e2k/kvm/hv_mmu.c new file mode 100644 index 000000000000..5d100bc001a4 --- /dev/null +++ b/arch/e2k/kvm/hv_mmu.c @@ -0,0 +1,725 @@ + +/* + * MMU hardware virtualized support + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include "mmu_defs.h" +#include "mmu.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MMU_REG_MODE +#undef DebugMMUREG +#define DEBUG_MMU_REG_MODE 0 /* MMU register access events */ + /* debug mode */ +#define DebugMMUREG(fmt, args...) \ +({ \ + if (DEBUG_MMU_REG_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MMU_PID_MODE +#undef DebugMMUPID +#define DEBUG_MMU_PID_MODE 0 /* MMU PID register access events */ + /* debug mode */ +#define DebugMMUPID(fmt, args...) \ +({ \ + if (DEBUG_MMU_PID_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MMU_VPT_REG_MODE +#undef DebugMMUVPT +#define DEBUG_MMU_VPT_REG_MODE 0 /* MMU virtual PT bases */ +#define DebugMMUVPT(fmt, args...) \ +({ \ + if (DEBUG_MMU_VPT_REG_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +void kvm_vcpu_release_trap_cellar(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.mmu.tc_page == NULL) + return; + kvm_release_page_dirty(vcpu->arch.mmu.tc_page); + vcpu->arch.mmu.tc_page = NULL; + if (vcpu->arch.mmu.tc_kaddr == NULL) + return; + kunmap(vcpu->arch.mmu.tc_kaddr); + vcpu->arch.mmu.tc_kaddr = NULL; + vcpu->arch.mmu.tc_gpa = 0; + vcpu->arch.sw_ctxt.tc_hpa = 0; +} + +int vcpu_write_trap_point_mmu_reg(struct kvm_vcpu *vcpu, gpa_t tc_gpa, + hpa_t *tc_hpap) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + gfn_t tc_gfn; + kvm_pfn_t tc_pfn; + hpa_t tc_hpa; + struct page *tc_page; + void *tc_kaddr; + int ret; + + if (vcpu->arch.mmu.tc_page != NULL || vcpu->arch.mmu.tc_kaddr != NULL) + /* release old trap cellar before setup new */ + kvm_vcpu_release_trap_cellar(vcpu); + + if ((tc_gpa & MMU_TRAP_POINT_MASK) != tc_gpa) { + if ((tc_gpa & MMU_TRAP_POINT_MASK_V2) != tc_gpa) { + pr_err("%s(): guest TRAP POINT 0x%llx is bad aligned, " + "should be at least 0x%llx\n", + __func__, tc_gpa, tc_gpa & MMU_TRAP_POINT_MASK); + return -EINVAL; + } + pr_warn("%s(): guest TRAP POINT 0x%llx has legacy alignment\n", + __func__, tc_gpa); + } + + tc_gfn = gpa_to_gfn(tc_gpa); + tc_pfn = kvm_vcpu_gfn_to_pfn(vcpu, tc_gfn); + if (is_error_noslot_pfn(tc_pfn)) { + pr_err("%s(): could not convert guest TRAP POINT " + "gfn 0x%llx to host pfn\n", + __func__, tc_gfn); + return -EFAULT; + } + tc_hpa = tc_pfn << PAGE_SHIFT; + tc_hpa += offset_in_page(tc_gpa); + tc_page = pfn_to_page(tc_pfn); + if (is_error_page(tc_page)) { + pr_err("%s(): could not convert guest TRAP POINT " + "address 0x%llx to host page\n", + __func__, tc_gpa); + return -EFAULT; + } + + tc_kaddr = kmap(tc_page); + if (tc_kaddr == NULL) { + pr_err("%s(): could not map guest TRAP POINT page to host " + "memory\n", + __func__); + ret = -ENOMEM; + goto kmap_error; + } + tc_kaddr += offset_in_page(tc_gpa); + + vcpu->arch.mmu.tc_gpa = tc_gpa; + sw_ctxt->tc_hpa = tc_hpa; + vcpu->arch.mmu.tc_page = tc_page; + vcpu->arch.mmu.tc_kaddr = tc_kaddr; + + DebugMMUREG("write guest TRAP POINT: host PA 0x%llx, GPA 0x%llx, " + "mapped to host addr %px\n", + tc_hpa, tc_gpa, tc_kaddr); + + *tc_hpap = tc_hpa; + return 0; + +kmap_error: + kvm_release_page_dirty(tc_page); + return ret; +} + +int vcpu_write_mmu_cr_reg(struct kvm_vcpu *vcpu, mmu_reg_t mmu_cr) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + mmu_reg_t old_mmu_cr; + int r; + + old_mmu_cr = read_guest_MMU_CR_reg(vcpu); + + if ((old_mmu_cr & _MMU_CR_TLB_EN) == (mmu_cr & _MMU_CR_TLB_EN)) { + /* paging mode is not changed, so can only update */ + write_guest_MMU_CR_reg(vcpu, mmu_cr); + hw_ctxt->sh_mmu_cr = mmu_cr; + DebugMMUREG("guest MMU_CR paging mode does not change: " + "only update from 0x%llx to 0x%llx, tlb_en %d\n", + old_mmu_cr, mmu_cr, !!(mmu_cr & _MMU_CR_TLB_EN)); + return 0; + } + if (!!(old_mmu_cr & _MMU_CR_TLB_EN) && !(mmu_cr & _MMU_CR_TLB_EN)) { + /* paging mode is OFF */ + write_guest_MMU_CR_reg(vcpu, mmu_cr); + hw_ctxt->sh_mmu_cr = mmu_cr; + DebugMMUREG("guest MMU_CR paging mode is turn OFF: " + "from 0x%llx to 0x%llx, tlb_en %d\n", + old_mmu_cr, mmu_cr, !!(mmu_cr & _MMU_CR_TLB_EN)); + /* it need free all page tables and invalidate roots */ + /* FIXME: turn OFF is not implemented */ + pr_err("%s(): guest turns OFF paging mode: MMU_CR " + "from 0x%llx to 0x%llx, tlb_en %d\n", + __func__, old_mmu_cr, mmu_cr, + !!(mmu_cr & _MMU_CR_TLB_EN)); + KVM_BUG_ON(is_paging(vcpu) && !is_tdp_paging(vcpu)); + reset_paging_flag(vcpu); + return 0; + } + + /* guest turns ON paging mode */ + KVM_BUG_ON(is_paging_flag(vcpu)); + + if (is_tdp_paging(vcpu)) { + r = kvm_hv_setup_tdp_paging(vcpu); + } else if (is_shadow_paging(vcpu)) { + if (vcpu->arch.is_hv) { + r = kvm_hv_setup_shadow_paging(vcpu, NULL); + } else { + r = kvm_hv_setup_shadow_paging(vcpu, + pv_vcpu_get_gmm(vcpu)); + } + } else { + KVM_BUG_ON(true); + r = -EINVAL; + } + if (r != 0) { + pr_err("%s(): could not switch guest to paging mode, " + "error %d\n", + __func__, r); + return r; + } + + write_guest_MMU_CR_reg(vcpu, mmu_cr); + hw_ctxt->sh_mmu_cr = mmu_cr; + DebugMMUREG("Enable guest MMU paging:\n" + " SH_MMU_CR: value 0x%llx\n" + " SH_PID: value 0x%llx\n", + mmu_cr, hw_ctxt->sh_pid); + + return 0; +} + +int vcpu_write_mmu_pid_reg(struct kvm_vcpu *vcpu, mmu_reg_t pid) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + /* probably it is flush mm */ + kvm_mmu_sync_roots(vcpu, U_ROOT_PT_FLAG); + + mmu->pid = pid; + write_guest_PID_reg(vcpu, pid); + DebugMMUPID("Set MMU guest PID: 0x%llx\n", pid); + + return 0; +} + +int vcpu_write_mmu_u_pptb_reg(struct kvm_vcpu *vcpu, pgprotval_t u_pptb, + bool *pt_updated, hpa_t *u_root) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_u_pptb; + pgprotval_t old_u_pptb; + int r; + + sw_u_pptb = mmu->get_vcpu_context_u_pptb(vcpu); + old_u_pptb = mmu->get_vcpu_u_pptb(vcpu); + KVM_BUG_ON(!is_shadow_paging(vcpu) && sw_u_pptb != old_u_pptb && + vcpu->arch.is_pv); + + if (!is_paging(vcpu)) { + /* it is setup of initial guest page table */ + /* paging wiil be enabled while set MMU_CR.tlb_en */ + /* now save only base of guest */ + mmu->set_vcpu_u_pptb(vcpu, u_pptb); + DebugMMUREG("guest MMU U_PPTB: initial PT base at 0x%lx\n", + u_pptb); + r = 0; + goto handled; + } + if (sw_u_pptb == u_pptb) { + /* set the same page table, so nothing to do */ + DebugMMUREG("guest MMU U_PPTB: write the same PT root " + "at 0x%lx\n", + u_pptb); + r = 0; + goto handled; + } + + /* + * Switch to new page table root + */ + + DebugMMUREG("switch to new guest U_PPTB base at 0x%lx\n", + u_pptb); + + if (is_tdp_paging(vcpu)) { + r = kvm_switch_tdp_u_pptb(vcpu, u_pptb); + } else if (is_shadow_paging(vcpu)) { + r = kvm_switch_shadow_u_pptb(vcpu, u_pptb, u_root); + *pt_updated = true; + } else { + KVM_BUG_ON(true); + r = -EINVAL; + } + +handled: + return r; +} + +int vcpu_write_mmu_os_pptb_reg(struct kvm_vcpu *vcpu, pgprotval_t os_pptb, + bool *pt_updated, hpa_t *os_root) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sh_os_pptb; + pgprotval_t old_os_pptb; + int r; + + sh_os_pptb = mmu->get_vcpu_context_os_pptb(vcpu); + old_os_pptb = mmu->get_vcpu_os_pptb(vcpu); + KVM_BUG_ON(!is_shadow_paging(vcpu) && sh_os_pptb != old_os_pptb); + + if (!is_paging(vcpu)) { + /* it is setup of initial guest page table */ + /* paging wiil be enabled while set MMU_CR.tlb_en */ + /* now save only base of guest */ + mmu->set_vcpu_os_pptb(vcpu, os_pptb); + DebugMMUREG("guest MMU OS_PPTB: initial PT base at 0x%lx\n", + os_pptb); + return 0; + } + if (old_os_pptb == os_pptb) { + /* set the same page table, so nothing to do */ + DebugMMUREG("guest MMU OS_PPTB: write the same PT root " + "at 0x%lx\n", + os_pptb); + return 0; + } + + /* + * Switch to new page table root + */ + DebugMMUREG("switch to new guest OS PT base at 0x%lx\n", + os_pptb); + + if (is_tdp_paging(vcpu)) { + r = kvm_switch_tdp_os_pptb(vcpu, os_pptb); + } else if (is_shadow_paging(vcpu)) { + r = kvm_switch_shadow_os_pptb(vcpu, os_pptb, os_root); + *pt_updated = true; + } else { + KVM_BUG_ON(true); + r = -EINVAL; + } + + return r; +} + +int vcpu_write_mmu_u_vptb_reg(struct kvm_vcpu *vcpu, gva_t u_vptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_u_vptb; + gva_t old_u_vptb; + + sw_u_vptb = mmu->get_vcpu_context_u_vptb(vcpu); + old_u_vptb = mmu->get_vcpu_u_vptb(vcpu); + KVM_BUG_ON(!is_shadow_paging(vcpu) && sw_u_vptb != old_u_vptb && + vcpu->arch.is_pv); + + if (!is_paging(vcpu)) { + /* it is setup of initial guest page table */ + /* paging wiil be enabled while set MMU_CR.tlb_en */ + /* now save only virtual base of guest */ + mmu->set_vcpu_u_vptb(vcpu, u_vptb); + DebugMMUVPT("guest MMU U_VPTB: virtual PT base at 0x%lx\n", + u_vptb); + return 0; + } + if (sw_u_vptb == u_vptb) { + /* set the same page table, so nothing to do */ + DebugMMUVPT("guest MMU U_VPTB: write the same PT base 0x%lx\n", + u_vptb); + return 0; + } + + pr_err("%s(): virtual User PT base update from 0x%llx to 0x%lx " + "is not implemented\n", + __func__, sw_u_vptb, u_vptb); + return -EINVAL; +} + +int vcpu_write_mmu_os_vptb_reg(struct kvm_vcpu *vcpu, gva_t os_vptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_os_vptb; + gva_t old_os_vptb; + + sw_os_vptb = mmu->get_vcpu_context_os_vptb(vcpu); + old_os_vptb = mmu->get_vcpu_os_vptb(vcpu); + KVM_BUG_ON(!is_shadow_paging(vcpu) && sw_os_vptb != old_os_vptb); + + if (!is_paging(vcpu)) { + /* it is setup of initial guest page table */ + /* paging wiil be enabled while set MMU_CR.tlb_en */ + /* now save only virtual base of guest */ + mmu->set_vcpu_os_vptb(vcpu, os_vptb); + DebugMMUVPT("guest MMU OS_VPTB: virtual PT base at 0x%lx\n", + os_vptb); + return 0; + } + if (sw_os_vptb == os_vptb) { + /* set the same page table, so nothing to do */ + DebugMMUVPT("guest MMU OS_VPTB: write the same PT base 0x%lx\n", + os_vptb); + return 0; + } + + pr_err("%s(): virtual OS PT base update from 0x%llx to 0x%lx " + "is not implemented\n", + __func__, sw_os_vptb, os_vptb); + return -EINVAL; +} + +int vcpu_write_mmu_os_vab_reg(struct kvm_vcpu *vcpu, gva_t os_vab) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_os_vab; + gva_t old_os_vab; + + sw_os_vab = mmu->get_vcpu_context_os_vab(vcpu); + old_os_vab = mmu->get_vcpu_os_vab(vcpu); + KVM_BUG_ON(!is_shadow_paging(vcpu) && sw_os_vab != old_os_vab); + + if (!is_paging(vcpu)) { + /* it is setup of initial guest page table */ + /* paging wiil be enabled while set MMU_CR.tlb_en */ + /* now save only virtual base of guest */ + mmu->set_vcpu_os_vab(vcpu, os_vab); + return 0; + } + if (sw_os_vab == os_vab) { + /* set the same page table, so nothing to do */ + DebugMMUVPT("guest MMU OS_VAB: write the same virtual " + "addresses base 0x%lx\n", + os_vab); + return 0; + } + + pr_err("%s(): guest OS virtual addresses base update from 0x%llx " + "to 0x%lx is not implemented\n", + __func__, sw_os_vab, os_vab); + return -EINVAL; +} + +int vcpu_read_trap_point_mmu_reg(struct kvm_vcpu *vcpu, gpa_t *tc_gpa) +{ + if (vcpu->arch.mmu.tc_page != NULL) { + /* guest TRAP_POINT register was written */ + *tc_gpa = vcpu->arch.mmu.tc_gpa; + } else { + /* read without writing */ + *tc_gpa = 0; + } + + DebugMMUREG("read guest TRAP POINT: GPA 0x%llx, host PA 0x%llx, " + "mapped to host addr %px\n", + *tc_gpa, vcpu->arch.sw_ctxt.tc_hpa, vcpu->arch.mmu.tc_kaddr); + + return 0; +} + +int vcpu_read_mmu_cr_reg(struct kvm_vcpu *vcpu, mmu_reg_t *mmu_cr) +{ + *mmu_cr = read_guest_MMU_CR_reg(vcpu); + + DebugMMUREG("guest MMU_CR does not change: 0x%llx, tlb_en: %d\n", + *mmu_cr, !!(*mmu_cr & _MMU_CR_TLB_EN)); + + return 0; +} + +int vcpu_read_mmu_u_pptb_reg(struct kvm_vcpu *vcpu, pgprotval_t *u_pptb_p) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_u_pptb; + pgprotval_t u_pptb; + hpa_t root_u_pptb; + bool sep_virt_space; + + sw_u_pptb = mmu->get_vcpu_context_u_pptb(vcpu); + u_pptb = mmu->get_vcpu_u_pptb(vcpu); + DebugMMUREG("guest MMU U_PPTB : register 0x%llx, base 0x%lx\n", + sw_u_pptb, u_pptb); + + sep_virt_space = is_sep_virt_spaces(vcpu); + + if (!is_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(u_pptb != sw_u_pptb); + } else { + root_u_pptb = kvm_get_space_type_spt_u_root(vcpu); + KVM_BUG_ON(!sep_virt_space && + IS_E2K_INVALID_PAGE(root_u_pptb)); + KVM_BUG_ON(sw_u_pptb != root_u_pptb); + } + } else if (likely(is_tdp_paging(vcpu))) { + KVM_BUG_ON(sw_u_pptb != u_pptb); + } else if (is_shadow_paging(vcpu)) { + root_u_pptb = kvm_get_space_type_spt_u_root(vcpu); + KVM_BUG_ON(!sep_virt_space && IS_E2K_INVALID_PAGE(root_u_pptb)); + if (!is_phys_paging(vcpu)) { + KVM_BUG_ON(sw_u_pptb != root_u_pptb); + } else { + KVM_BUG_ON(sw_u_pptb != u_pptb); + } + } else { + KVM_BUG_ON(true); + } + + *u_pptb_p = u_pptb; + + DebugMMUREG("guest MMU U_PPTB does not change: 0x%lx\n", *u_pptb_p); + + return 0; +} + +int vcpu_read_mmu_os_pptb_reg(struct kvm_vcpu *vcpu, pgprotval_t *os_pptb_p) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sh_os_pptb; + pgprotval_t os_pptb; + hpa_t root; + bool sep_virt_space; + + sh_os_pptb = mmu->get_vcpu_context_os_pptb(vcpu); + os_pptb = mmu->get_vcpu_os_pptb(vcpu); + DebugMMUREG("guest MMU OS_PPTB : register 0x%llx, base 0x%lx\n", + sh_os_pptb, os_pptb); + + sep_virt_space = is_sep_virt_spaces(vcpu); + + if (!is_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(sh_os_pptb != os_pptb); + } else { + if (sep_virt_space) { + root = kvm_get_space_type_spt_u_root(vcpu); + } else { + root = kvm_get_space_type_spt_os_root(vcpu); + } + KVM_BUG_ON(IS_E2K_INVALID_PAGE(root)); + KVM_BUG_ON(sh_os_pptb != root); + } + } else if (likely(is_tdp_paging(vcpu))) { + KVM_BUG_ON(sh_os_pptb != os_pptb); + } else if (is_shadow_paging(vcpu)) { + if (sep_virt_space) { + root = kvm_get_space_type_spt_u_root(vcpu); + } else { + root = kvm_get_space_type_spt_os_root(vcpu); + } + KVM_BUG_ON(IS_E2K_INVALID_PAGE(root)); + KVM_BUG_ON(sh_os_pptb != root); + } else { + KVM_BUG_ON(true); + } + + *os_pptb_p = os_pptb; + + DebugMMUREG("guest MMU OS_PPTB does not change: 0x%lx\n", *os_pptb_p); + + return 0; +} + +int vcpu_read_mmu_u_vptb_reg(struct kvm_vcpu *vcpu, gva_t *u_vptb_p) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_u_vptb; + gva_t u_vptb; + bool sep_virt_space; + + sw_u_vptb = mmu->get_vcpu_context_u_vptb(vcpu); + u_vptb = mmu->get_vcpu_u_vptb(vcpu); + DebugMMUVPT("guest MMU U_VPTB : register 0x%llx, base 0x%lx\n", + sw_u_vptb, u_vptb); + + sep_virt_space = is_sep_virt_spaces(vcpu); + + if (!is_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(u_vptb != sw_u_vptb); + } + } else if (likely(is_tdp_paging(vcpu))) { + KVM_BUG_ON(sw_u_vptb != u_vptb); + } else if (is_shadow_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(sw_u_vptb != u_vptb); + } + } else { + KVM_BUG_ON(true); + } + + *u_vptb_p = u_vptb; + + DebugMMUVPT("guest MMU U_VPTB does not change: 0x%lx\n", *u_vptb_p); + + return 0; +} + +int vcpu_read_mmu_os_vptb_reg(struct kvm_vcpu *vcpu, gva_t *os_vptb_p) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_os_vptb; + gva_t os_vptb; + bool sep_virt_space; + + sw_os_vptb = mmu->get_vcpu_context_os_vptb(vcpu); + os_vptb = mmu->get_vcpu_os_vptb(vcpu); + DebugMMUVPT("guest MMU OS_VPTB : register 0x%llx, base 0x%lx\n", + sw_os_vptb, os_vptb); + + sep_virt_space = is_sep_virt_spaces(vcpu); + + if (!is_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(os_vptb != sw_os_vptb); + } + } else if (likely(is_tdp_paging(vcpu))) { + KVM_BUG_ON(sw_os_vptb != os_vptb); + } else if (is_shadow_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(sw_os_vptb != os_vptb); + } + } else { + KVM_BUG_ON(true); + } + + *os_vptb_p = os_vptb; + + DebugMMUVPT("guest MMU OS_VPTB does not change: 0x%lx\n", *os_vptb_p); + + return 0; +} + +int vcpu_read_mmu_os_vab_reg(struct kvm_vcpu *vcpu, gva_t *os_vab_p) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t sw_os_vab; + gva_t os_vab; + bool sep_virt_space; + + sw_os_vab = mmu->get_vcpu_context_os_vab(vcpu); + os_vab = mmu->get_vcpu_os_vab(vcpu); + DebugMMUVPT("guest MMU OS_VAB : register 0x%llx, base 0x%lx\n", + sw_os_vab, os_vab); + + sep_virt_space = is_sep_virt_spaces(vcpu); + + if (!is_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(os_vab != sw_os_vab); + } + } else if (likely(is_tdp_paging(vcpu))) { + KVM_BUG_ON(sw_os_vab != os_vab); + } else if (is_shadow_paging(vcpu)) { + if (is_phys_paging(vcpu)) { + KVM_BUG_ON(sw_os_vab != os_vab); + } + } else { + KVM_BUG_ON(true); + } + + *os_vab_p = os_vab; + + DebugMMUVPT("guest MMU OS_VAB does not change: 0x%lx\n", *os_vab_p); + + return 0; +} + +bool kvm_mmu_is_hv_paging(struct kvm_vcpu *vcpu) +{ + mmu_reg_t mmu_cr; + bool sh_mmu_cr_paging; + int r; + + KVM_BUG_ON(!is_tdp_paging(vcpu)); + + r = vcpu_read_mmu_cr_reg(vcpu, &mmu_cr); + if (r != 0) { + pr_err("%s(): could not read SH_MMU_CR register, error %d\n", + __func__, r); + KVM_BUG_ON(true); + return false; + } + + sh_mmu_cr_paging = !!(mmu_cr & _MMU_CR_TLB_EN); + if (likely(sh_mmu_cr_paging)) { + if (unlikely(!is_paging_flag(vcpu))) { + /* guest MMU paging has been enabled */ + r = kvm_hv_setup_tdp_paging(vcpu); + if (r != 0) { + pr_err("%s(): could not switch guest to " + "paging mode, error %d\n", + __func__, r); + KVM_BUG_ON(true); + } + } + return true; + } else { + if (unlikely(is_paging_flag(vcpu))) { + /* guest MMU paging has been disabled */ + pr_err("%s(): guest turns OFF paging mode: " + "SH_MMU_CR 0x%llx\n", + __func__, mmu_cr); + reset_paging_flag(vcpu); + } + return false; + } +} + +int kvm_mmu_enable_shadow_paging(struct kvm_vcpu *vcpu) +{ + mmu_reg_t mmu_cr; + bool sh_mmu_cr_paging; + int r; + + KVM_BUG_ON(!is_shadow_paging(vcpu)); + + r = vcpu_read_mmu_cr_reg(vcpu, &mmu_cr); + if (r != 0) { + pr_err("%s(): could not read SH_MMU_CR register, error %d\n", + __func__, r); + return r; + } + + sh_mmu_cr_paging = !!(mmu_cr & _MMU_CR_TLB_EN); + + if (unlikely(sh_mmu_cr_paging)) { + pr_err("%s() : paging is already enabled\n", __func__); + return 0; + } + + if (unlikely(is_paging_flag(vcpu))) { + /* guest MMU paging has been disabled */ + pr_err("%s(): guest paging is turned OFF SH_MMU_CR 0x%llx\n", + __func__, mmu_cr); + KVM_BUG_ON(true); + return -EBUSY; + } + + mmu_cr |= _MMU_CR_TLB_EN; + r = vcpu_write_mmu_cr_reg(vcpu, mmu_cr); + if (r != 0) { + pr_err("%s() : could not enable paging on VCPU #%d, error %d\n", + __func__, vcpu->vcpu_id, r); + return r; + } + + return 0; +} diff --git a/arch/e2k/kvm/hv_mmu.h b/arch/e2k/kvm/hv_mmu.h new file mode 100644 index 000000000000..1c497ba5d882 --- /dev/null +++ b/arch/e2k/kvm/hv_mmu.h @@ -0,0 +1,81 @@ +#ifndef __KVM_E2K_HV_MMU_H +#define __KVM_E2K_HV_MMU_H + +#include +#include +#include "pv_mmu.h" + +extern void kvm_vcpu_release_trap_cellar(struct kvm_vcpu *vcpu); +extern int vcpu_write_trap_point_mmu_reg(struct kvm_vcpu *vcpu, + gpa_t tc_gpa, hpa_t *tc_hpap); +extern int vcpu_write_mmu_pid_reg(struct kvm_vcpu *vcpu, mmu_reg_t pid); +extern int vcpu_write_mmu_cr_reg(struct kvm_vcpu *vcpu, mmu_reg_t mmu_cr); +extern int vcpu_write_mmu_u_pptb_reg(struct kvm_vcpu *vcpu, pgprotval_t u_pptb, + bool *pt_updated, hpa_t *u_root); +extern int vcpu_write_mmu_u_vptb_reg(struct kvm_vcpu *vcpu, gva_t u_vptb); +extern int vcpu_write_mmu_os_pptb_reg(struct kvm_vcpu *vcpu, + pgprotval_t os_pptb, bool *pt_updated, hpa_t *os_root); +extern int vcpu_write_mmu_os_vptb_reg(struct kvm_vcpu *vcpu, gva_t os_vptb); +extern int vcpu_write_mmu_os_vab_reg(struct kvm_vcpu *vcpu, gva_t os_vab); +extern int vcpu_read_trap_point_mmu_reg(struct kvm_vcpu *vcpu, gpa_t *tc_gpa); +extern int vcpu_read_mmu_cr_reg(struct kvm_vcpu *vcpu, mmu_reg_t *mmu_cr); +extern int vcpu_read_mmu_u_pptb_reg(struct kvm_vcpu *vcpu, + pgprotval_t *u_pptb_p); +extern int vcpu_read_mmu_os_pptb_reg(struct kvm_vcpu *vcpu, + pgprotval_t *os_pptb_p); +extern int vcpu_read_mmu_u_vptb_reg(struct kvm_vcpu *vcpu, gva_t *u_vptb_p); +extern int vcpu_read_mmu_os_vptb_reg(struct kvm_vcpu *vcpu, gva_t *os_vptb_p); +extern int vcpu_read_mmu_os_vab_reg(struct kvm_vcpu *vcpu, gva_t *os_vab_p); + +extern bool kvm_mmu_is_hv_paging(struct kvm_vcpu *vcpu); +extern int kvm_mmu_enable_shadow_paging(struct kvm_vcpu *vcpu); + +static inline mmu_reg_t read_guest_MMU_CR_reg(struct kvm_vcpu *vcpu) +{ + if (likely(vcpu->arch.is_hv)) { + return read_SH_MMU_CR_reg(); + } else if (vcpu->arch.is_pv) { + return read_pv_MMU_CR_reg(vcpu); + } else { + KVM_BUG_ON(true); + } + return (mmu_reg_t) -1; +} + +static inline void +write_guest_MMU_CR_reg(struct kvm_vcpu *vcpu, mmu_reg_t value) +{ + if (likely(vcpu->arch.is_hv)) { + write_SH_MMU_CR_reg(value); + } else if (vcpu->arch.is_pv) { + write_pv_MMU_CR_reg(vcpu, value); + } else { + KVM_BUG_ON(true); + } +} + +static inline mmu_reg_t read_guest_PID_reg(struct kvm_vcpu *vcpu) +{ + if (likely(vcpu->arch.is_hv)) { + return read_SH_PID_reg(); + } else if (vcpu->arch.is_pv) { + return read_pv_PID_reg(vcpu); + } else { + KVM_BUG_ON(true); + } + return (mmu_reg_t) -1; +} + +static inline void +write_guest_PID_reg(struct kvm_vcpu *vcpu, mmu_reg_t value) +{ + if (likely(vcpu->arch.is_hv)) { + write_SH_PID_reg(value); + } else if (vcpu->arch.is_pv) { + write_pv_PID_reg(vcpu, value); + } else { + KVM_BUG_ON(true); + } +} + +#endif /* __KVM_E2K_HV_MMU_H */ diff --git a/arch/e2k/kvm/hypercalls.c b/arch/e2k/kvm/hypercalls.c new file mode 100644 index 000000000000..2d6cccaeda6a --- /dev/null +++ b/arch/e2k/kvm/hypercalls.c @@ -0,0 +1,1321 @@ +/* + * Just as userspace programs request kernel operations through a system + * call, the Guest requests Host operations through a "hypercall". You might + * notice this nomenclature doesn't really follow any logic, but the name has + * been around for long enough that we're stuck with it. As you'd expect, this + * code is basically a one big switch statement. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#ifdef CONFIG_KVM_ASYNC_PF +#include +#endif /* CONFIG_KVM_ASYNC_PF */ +#include + +#include "process.h" +#include "cpu.h" +#include "mmu.h" +#include "irq.h" +#include "io.h" +#include "mman.h" +#include "time.h" +#include "string.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_MODE +#undef DebugKVMSW +#define DEBUG_KVM_SWITCH_MODE false /* guest thread switch debugging */ +#define DebugKVMSW(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_HEAD_MODE +#undef DebugKVMSWH +#define DEBUG_KVM_SWITCH_HEAD_MODE false /* guest thread switch head */ +#define DebugKVMSWH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_HEAD_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_VCPU_MODE +#undef DebugSWVCPU +#define DEBUG_KVM_SWITCH_VCPU_MODE false /* guest thread switch to */ + /* other VCPU */ +#define DebugSWVCPU(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_VCPU_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_MIGRATE_VCPU_MODE +#undef DebugMGVCPU +#define DEBUG_KVM_MIGRATE_VCPU_MODE false /* guest thread switch to */ + /* other VCPU */ +#define DebugMGVCPU(fmt, args...) \ +({ \ + if (DEBUG_KVM_MIGRATE_VCPU_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_ACTIVATION_MODE +#undef DebugKVMACT +#define DEBUG_KVM_ACTIVATION_MODE 0 /* KVM guest kernel data */ + /* stack activations */ + /* debugging */ +#define DebugKVMACT(fmt, args...) \ +({ \ + if (DEBUG_KVM_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GPT_REGS_MODE +#define DEBUG_GPT_REGS_MODE 0 /* KVM host and guest kernel */ + /* stack activations print */ + +int last_light_hcall = -1; + +/* + * Should be inline function and should call from light hypercall or + * is better to create separate hypercall + * Do not call any function from here. + */ +static inline void +kvm_switch_guest_thread_stacks(struct kvm_vcpu *vcpu, int gpid_nr, int gmmid_nr) +{ + gthread_info_t *cur_gti = pv_vcpu_get_gti(vcpu); + gthread_info_t *next_gti; + struct gmm_struct *init_gmm = pv_vcpu_get_init_gmm(vcpu); + struct gmm_struct *next_gmm; + struct sw_regs *cur_gsw; + struct sw_regs *next_gsw; + int cur_gmmid_nr = pv_vcpu_get_gmm(vcpu)->nid.nr; + e2k_upsr_t upsr; + bool migrated = false; + int old_vcpu_id = -1; + int gtask_is_binco; + u64 fpcr, fpsr, pfpfr; + + DebugKVMSWH("started to switch from current GPID #%d to #%d GMM #%d\n", + cur_gti->gpid->nid.nr, gpid_nr, gmmid_nr); + + next_gti = kvm_get_guest_thread_info(vcpu->kvm, gpid_nr); + if (next_gti == NULL) { + /* FIXME: we should kill guest kernel, but first it needs */ + /* to switch to host kernel stacks */ + panic("kvm_switch_guest_thread_stacks() could not find " + "guest thread GPID #%d\n", gpid_nr); + } + if (next_gti->vcpu == NULL) { + DebugKVMSWH("next thread GPID #%d starts on VCPU #%d " + "first time\n", + gpid_nr, vcpu->vcpu_id); + next_gti->vcpu = vcpu; + } else if (next_gti->vcpu != vcpu) { + DebugSWVCPU("next thread GPID #%d migrates from current GPID " + "#%d VCPU #%d to VCPU #%d\n", + gpid_nr, cur_gti->gpid->nid.nr, + next_gti->vcpu->vcpu_id, vcpu->vcpu_id); + migrated = true; + old_vcpu_id = next_gti->vcpu->vcpu_id; + next_gti->vcpu = vcpu; + } else { + DebugKVMSWH("next thread GPID #%d continues running " + "on VCPU #%d\n", + gpid_nr, vcpu->vcpu_id); + } + if (gmmid_nr != pv_vcpu_get_init_gmm(vcpu)->nid.nr) { + next_gmm = kvm_find_gmmid(&vcpu->kvm->arch.gmmid_table, + gmmid_nr); + if (next_gmm == NULL) { + /* FIXME: we should kill guest kernel, but first */ + /* it needs to switch to host kernel stacks */ + panic("could not find new host agent #%d of guest mm\n", + gmmid_nr); + } + } else { + /* new process is kernel thread */ + next_gmm = pv_vcpu_get_init_gmm(vcpu); + } + cur_gsw = &cur_gti->sw_regs; + + /* Save interrupt mask state and disable NMIs on host */ + NATIVE_UPSR_NMI_SAVE_AND_CLI(upsr.UPSR_reg); + WARN_ONCE(!upsr.UPSR_nmie, + "Non-maskable interrupts are disabled\n"); + BUG_ON(!upsr_irqs_disabled_flags(upsr.UPSR_reg)); + + /* hardware stack bounds trap must have been provoked by guest */ + /* to handle it before save stacks state and switch to other process */ + /* so here should not be any traps */ + NATIVE_FLUSHCPU; + + ATOMIC_SAVE_CURRENT_STACK_REGS(cur_gsw, &cur_gsw->crs); + + E2K_SET_USER_STACK(DEBUG_KVM_SWITCH_VCPU_MODE); + DebugKVMSW("current guest thread kernel data stack: base 0x%llx, " + "size 0x%x, guest top 0x%lx\n", + cur_gsw->usd_lo.USD_lo_base, + cur_gsw->usd_hi.USD_hi_size, + cur_gsw->top); + DebugKVMSW("current guest thread host kernel data stack : base 0x%llx, " + "size 0x%x, host top 0x%lx\n", + cur_gti->stack_regs.stacks.usd_lo.USD_lo_base, + cur_gti->stack_regs.stacks.usd_hi.USD_hi_size, + cur_gti->stack_regs.stacks.top); + DebugKVMSW("current guest thread PS: base 0x%llx, ind 0x%x, " + "size 0x%x\n", + cur_gsw->psp_lo.PSP_lo_base, + cur_gsw->psp_hi.PSP_hi_ind, + cur_gsw->psp_hi.PSP_hi_size); + DebugKVMSW("current guest thread PCS: base 0x%llx, ind 0x%x, " + "size 0x%x\n", + cur_gsw->pcsp_lo.PCSP_lo_base, + cur_gsw->pcsp_hi.PCSP_hi_ind, + cur_gsw->pcsp_hi.PCSP_hi_size); + DebugKVMSW("current CR0_lo 0x%016llx CR0_hi 0x%016llx " + "CR1_lo 0x%016llx CR1_hi 0x%016llx\n", + cur_gsw->crs.cr0_lo.CR0_lo_half, + cur_gsw->crs.cr0_hi.CR0_hi_half, + cur_gsw->crs.cr1_lo.CR1_lo_half, + cur_gsw->crs.cr1_hi.CR1_hi_half); + + gtask_is_binco = cur_gti->task_is_binco; + AW(cur_gsw->fpcr) = NATIVE_NV_READ_FPCR_REG_VALUE(); + AW(cur_gsw->fpsr) = NATIVE_NV_READ_FPSR_REG_VALUE(); + AW(cur_gsw->pfpfr) = NATIVE_NV_READ_PFPFR_REG_VALUE(); + NATIVE_DO_SAVE_TASK_USER_REGS_TO_SWITCH(cur_gsw, gtask_is_binco, + false /* task traced */); + + /* global registers should be saved by host */ + if (cur_gti->gmm != NULL) { + SAVE_PV_VCPU_GLOBAL_REGISTERS(cur_gti); + } + + /* switch mm to new process, it is actual if user process */ + NATIVE_FLUSHCPU; /* spill current stacks on current mm */ + switch_guest_mm(next_gti, next_gmm); + if (next_gti->gmm != NULL && next_gti->gmm == next_gmm) { + /* switch guest MMU context */ + kvm_switch_to_guest_mmu_pid(vcpu); + } + + /* Should not be print or other functions calling here */ + + next_gsw = &next_gti->sw_regs; + + if (!vcpu->arch.is_hv) { + NATIVE_NV_WRITE_USBR_USD_REG_VALUE(next_gsw->top, + AW(next_gsw->usd_hi), AW(next_gsw->usd_lo)); + + NATIVE_NV_WRITE_PSP_REG(next_gsw->psp_hi, next_gsw->psp_lo); + NATIVE_NV_WRITE_PCSP_REG(next_gsw->pcsp_hi, next_gsw->pcsp_lo); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(next_gsw->crs.cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(next_gsw->crs.cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(next_gsw->crs.cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(next_gsw->crs.cr1_hi); + } + + E2K_SET_USER_STACK(DEBUG_KVM_SWITCH_VCPU_MODE); + DebugKVMSW("next guest thread kernel data stack: base 0x%llx, " + "size 0x%x, guest top 0x%lx\n", + next_gsw->usd_lo.USD_lo_base, + next_gsw->usd_hi.USD_hi_size, + next_gsw->top); + DebugKVMSW("next guest thread host kernel data stack : " + "base 0x%llx, size 0x%x, host top 0x%lx\n", + next_gti->stack_regs.stacks.usd_lo.USD_lo_base, + next_gti->stack_regs.stacks.usd_hi.USD_hi_size, + next_gti->stack_regs.stacks.top); + DebugKVMSW("next guest thread PS: base 0x%llx, ind 0x%x, " + "size 0x%x\n", + next_gsw->psp_lo.PSP_lo_base, + next_gsw->psp_hi.PSP_hi_ind, + next_gsw->psp_hi.PSP_hi_size); + DebugKVMSW("next guest thread PCS: base 0x%llx, ind 0x%x, " + "size 0x%x\n", + next_gsw->pcsp_lo.PCSP_lo_base, + next_gsw->pcsp_hi.PCSP_hi_ind, + next_gsw->pcsp_hi.PCSP_hi_size); + DebugKVMSW("next CR0_lo 0x%016llx CR0_hi 0x%016llx " + "CR1_lo 0x%016llx CR1_hi 0x%016llx\n", + next_gsw->crs.cr0_lo.CR0_lo_half, + next_gsw->crs.cr0_hi.CR0_hi_half, + next_gsw->crs.cr1_lo.CR1_lo_half, + next_gsw->crs.cr1_hi.CR1_hi_half); + + gtask_is_binco = next_gti->task_is_binco; + fpcr = AS_WORD(next_gsw->fpcr); + fpsr = AS_WORD(next_gsw->fpsr); + pfpfr = AS_WORD(next_gsw->pfpfr); + NATIVE_DO_RESTORE_TASK_USER_REGS_TO_SWITCH(next_gsw, gtask_is_binco, + false /* traced */); + NATIVE_NV_WRITE_FPCR_REG_VALUE(fpcr); + NATIVE_NV_WRITE_FPSR_REG_VALUE(fpsr); + NATIVE_NV_WRITE_PFPFR_REG_VALUE(pfpfr); + + /* global registers should be restored by host */ + if (next_gti->gmm != NULL && next_gti->gmm == next_gmm) { + /* FIXME: only to debug gregs save/restore, should be deleted */ + E2K_SET_USER_STACK(DEBUG_GREGS_MODE); + + RESTORE_PV_VCPU_GLOBAL_REGISTERS(next_gti); + } + + pv_vcpu_set_gti(vcpu, next_gti); + + pv_vcpu_switch_guest_host_context(vcpu, cur_gti, next_gti); + + /* Enable NMIs on host */ + NATIVE_UPSR_NMI_STI(upsr.UPSR_reg); + + /* FIXME: only to debug gregs save/restore, should be deleted */ + E2K_SET_USER_STACK(DEBUG_KVM_MIGRATE_VCPU_MODE); + /* save current state of guest kernel global registers */ + /* it need here save only some registers which can be changed */ + /* after migration, but now all global registers are saved + if (migrated) + */ + SAVE_GUEST_KERNEL_GREGS_COPY(current_thread_info(), next_gti); + DebugMGVCPU("thread GPID #%d migrates from VCPU #%d to " + "VCPU #%d signal stack entries %ld\n", + gpid_nr, old_vcpu_id, vcpu->vcpu_id, + current_thread_info()->signal_stack.used / + sizeof(struct signal_stack_context)); + + if (trace_guest_switch_to_enabled()) + trace_guest_switch_to(vcpu, cur_gti->gpid->nid.nr, cur_gmmid_nr, + gpid_nr, gmmid_nr, next_gsw); + + KVM_BUG_ON(vcpu->cpu < 0); + + if (!vcpu->arch.is_hv) + pv_vcpu_switch_kernel_pgd_range(vcpu, vcpu->cpu); + + return; +} + +/* + * Hypercall hanlder of tlb flush requests from paravirtualized kernel + */ +static inline unsigned long kvm_hv_flush_tlb_range(struct kvm_vcpu *vcpu, + e2k_addr_t start_gva, + e2k_addr_t end_gva) +{ + vcpu->arch.mmu.sync_gva_range(vcpu, PAGE_ALIGN_UP(start_gva), + PAGE_ALIGN_UP(end_gva), true); + + return 0; +} + +static inline void kvm_hv_sync_addr_range(struct kvm_vcpu *vcpu, + e2k_addr_t start_gva, + e2k_addr_t end_gva) +{ + vcpu->arch.mmu.sync_gva_range(vcpu, PAGE_ALIGN_UP(start_gva), + PAGE_ALIGN_UP(end_gva), false); +} + +static inline unsigned long update_psp_hi(unsigned long psp_hi_value) +{ + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi, new_psp_hi; + e2k_cr1_lo_t cr1_lo; + u64 size; + void *dst, *src; + + NATIVE_FLUSHR; + psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + size = cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ; + new_psp_hi.PSP_hi_half = psp_hi_value; + dst = (void *)(psp_lo.PSP_lo_base + new_psp_hi.PSP_hi_ind); + src = (void *)(psp_lo.PSP_lo_base + psp_hi.PSP_hi_ind - size); + fast_tagged_memory_copy(dst, src, size, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, true); + new_psp_hi.PSP_hi_half += size; + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(new_psp_hi); + return 0; +} + +static inline unsigned long update_pcsp_hi(unsigned long pcsp_hi_value) +{ + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi, new_pcsp_hi; + u64 size; + void *dst, *src; + + NATIVE_FLUSHC; + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + size = SZ_OF_CR; + new_pcsp_hi.PCSP_hi_half = pcsp_hi_value; + dst = (void *)(pcsp_lo.PCSP_lo_base + new_pcsp_hi.PCSP_hi_ind); + src = (void *)(pcsp_lo.PCSP_lo_base + pcsp_hi.PCSP_hi_ind - size); + fast_tagged_memory_copy(dst, src, size, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, true); + new_pcsp_hi.PCSP_hi_half += size; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(new_pcsp_hi); + return 0; +} + +static inline unsigned long update_wd_psise(unsigned long psize_value) +{ + e2k_wd_t wd; + e2k_cr1_lo_t cr1_lo; + + wd = NATIVE_READ_WD_REG(); + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + wd.psize = psize_value; + cr1_lo.CR1_lo_wpsz = psize_value >> 4; + NATIVE_WRITE_WD_REG(wd); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + + return 0; +} + +/* + * This is the light hypercalls execution. + * Lighte hypercalls do not: + * - switch to kernel stacks + * - use data stack + * - call any function + */ +unsigned long notrace /* __interrupt */ +kvm_light_hcalls(unsigned long hcall_num, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + struct kvm_vcpu *vcpu; + thread_info_t *thread_info; + gthread_info_t *gti; + bool from_light_hypercall; + bool from_generic_hypercall; + e2k_cr1_lo_t cr1_lo; + e2k_upsr_t user_upsr; + int from_paravirt_guest; + bool need_inject; + unsigned long ret = 0; + unsigned long from_sdisp = hcall_num >> 63; + + hcall_num &= ~(1UL << 63); + + /* Save guest values of global regs and set current pointers instead */ + thread_info = NATIVE_READ_CURRENT_REG(); + vcpu = thread_info->vcpu; + + __guest_exit_light(thread_info, &vcpu->arch); + + /* check VCPU ID of global register and running VCPU */ + kvm_check_vcpu_ids_as_light(vcpu); + + /* set kernel state of UPSR to preserve FP disable exception */ + /* on movfi instructions while global registers saving */ + NATIVE_SWITCH_TO_KERNEL_UPSR(user_upsr, + false, /* enable IRQs */ + false /* disable NMI */); + + if (!vcpu->arch.is_hv) { + /* Do not switch guest MMU context to host MMU one to enable */ + /* light access to guest address space from host. */ + /* All hard cases of access should cause page faults traps */ + /* and switch to host MMU context to handle these faults */ + ; + } + + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + + gti = thread_info->gthread_info; + if (gti != NULL) { + /* save guest kernel UPSR state at guest thread info + DO_SAVE_GUEST_KERNEL_UPSR(gti, user_upsr); + */ + } + + from_light_hypercall = test_thread_flag(TIF_LIGHT_HYPERCALL); + from_generic_hypercall = test_thread_flag(TIF_GENERIC_HYPERCALL); + if (!from_light_hypercall) + set_thread_flag(TIF_LIGHT_HYPERCALL); + if (from_generic_hypercall) + clear_thread_flag(TIF_GENERIC_HYPERCALL); + + last_light_hcall = hcall_num; + trace_light_hcall(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, vcpu->cpu); + + /* in common case cannot enable hardware stacks bounds traps, */ + /* disabled by assembler entry of the hypercall handler */ + /* (see arch/e2k/kvm/ttable.S). */ + /* Light hypercalls do not call other functions and run under */ + /* IRQs disabled, but hardware stack bounds trap can be enabled */ + /* for all hypercalls excluding switch to other guest process */ + /* (see comments at kvm_switch_guest_thread_stacks() function */ + if (hcall_num != KVM_HCALL_SWITCH_GUEST_THREAD_STACKS) { + //TODO needed?? This is only for virtualization w/o hardware support + //native_set_sge(); + } + + switch (hcall_num) { + case KVM_HCALL_COPY_STACKS_TO_MEMORY: + if ((void *)arg1 == NULL) { + NATIVE_FLUSHCPU; + } else { + ret = kvm_flush_hw_stacks_to_memory( + (kvm_hw_stacks_flush_t *)arg1); + } + break; + case KVM_HCALL_GET_TLB_SET_TAG: + ret = get_va_tlb_set_tag(arg1, (int)arg2, (bool)arg3); + break; + case KVM_HCALL_GET_TLB_SET_ENTRY: + ret = pte_val(get_va_tlb_set_entry(arg1, (int)arg2, (bool)arg3)); + break; + case KVM_HCALL_GET_HOST_MMU_PPTB: + ret = get_mmu_u_pptb_reg(); + break; + case KVM_HCALL_GET_HOST_MMU_PID: + ret = get_mmu_pid_reg(); + break; + case KVM_HCALL_UPDATE_PCSP_HI: + update_pcsp_hi(arg1); + break; + case KVM_HCALL_UPDATE_PSP_HI: + update_psp_hi(arg1); + break; + case KVM_HCALL_UPDATE_WD_PSIZE: + ret = update_wd_psise(arg1); + break; + case KVM_HCALL_SETUP_IDLE_TASK: + ret = pv_vcpu_get_gpid_id(vcpu); + break; + case KVM_HCALL_UNFREEZE_TRAPS: + kvm_init_pv_vcpu_trap_handling(vcpu, NULL); + break; + case KVM_HCALL_SWITCH_TO_INIT_MM: + kvm_switch_to_init_guest_mm(vcpu); + break; + case KVM_HCALL_SWITCH_GUEST_THREAD_STACKS: + kvm_switch_guest_thread_stacks(vcpu, (int) arg1, (int) arg2); + break; + case KVM_HCALL_GET_ACTIVE_CR_MEM_ITEM: + ret = kvm_get_guest_active_cr_mem_item( + (unsigned long __user *) arg1, arg2, arg3, arg4); + break; + case KVM_HCALL_PUT_ACTIVE_CR_MEM_ITEM: + ret = kvm_put_guest_active_cr_mem_item(arg1, arg2, arg3, arg4); + break; + case KVM_HCALL_MOVE_TAGGED_DATA: + ret = kvm_move_guest_tagged_data((int)arg1, arg2, arg3); + break; + case KVM_HCALL_EXTRACT_TAGS_32: + ret = kvm_extract_guest_tags_32((u16 *)arg1, + (const void *)arg2); + break; + case KVM_HCALL_INJECT_INTERRUPT: + /* interrupt will be injected while hypercall return */ + /* see below */ + break; + case KVM_HCALL_VIRQ_HANDLED: + /* injected interrupt to handle VIRQs was completed */ + ret = kvm_guest_handled_virqs(vcpu); + break; + case KVM_HCALL_TEST_PENDING_VIRQ: + ret = kvm_test_pending_virqs(vcpu); + break; + case KVM_HCALL_GET_HOST_RUNSTATE_KTIME: + ret = kvm_get_host_runstate_ktime(); + break; + case KVM_HCALL_GET_GUEST_RUNNING_TIME: + ret = kvm_get_guest_running_time(vcpu); + break; + case KVM_HCALL_READ_DTLB_REG: + ret = kvm_read_guest_dtlb_reg(arg1); + break; + case KVM_HCALL_GET_DAM: + ret = kvm_get_guest_DAM((unsigned long long __user *)arg1, + arg2); + break; + case KVM_HCALL_FLUSH_DCACHE_LINE: + ret = kvm_flush_guest_dcache_line(arg1); + break; + case KVM_HCALL_CLEAR_DCACHE_L1_SET: + ret = kvm_clear_guest_dcache_l1_set(arg1, arg2); + break; + case KVM_HCALL_FLUSH_DCACHE_RANGE: + ret = kvm_flush_guest_dcache_range((void *)arg1, arg2); + break; + case KVM_HCALL_CLEAR_DCACHE_L1_RANGE: + ret = kvm_clear_guest_dcache_l1_range((void *)arg1, arg2); + break; + case KVM_HCALL_FLUSH_ICACHE_ALL: + ret = kvm_flush_guest_icache_all(); + break; + case KVM_HCALL_MMU_PROBE: + ret = kvm_guest_mmu_probe(arg1, (kvm_mmu_probe_t)arg2); + break; + case KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK: + case KVM_HCALL_SWITCH_TO_EXPANDED_CHAIN_STACK: + WARN_ONCE(1, "implement me"); + ret = -ENOSYS; + break; + default: + ret = -ENOSYS; + } + + KVM_HOST_CHECK_VCPU_THREAD_CONTEXT(thread_info); + + trace_light_hcall_exit(ret); + + /* light hypercall execution completed */ + if (!from_light_hypercall) + clear_thread_flag(TIF_LIGHT_HYPERCALL); + if (from_generic_hypercall) + set_thread_flag(TIF_GENERIC_HYPERCALL); + + /* reread guest thread structure pointer, which can be changed */ + /* while switching guest processes hypercall */ + gti = thread_info->gthread_info; + + /* if there are pending VIRQs, then provide with direct injection */ + /* to cause guest interrupting and handling VIRQs */ + need_inject = kvm_try_inject_event_wish(vcpu, thread_info, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu)); + + /* check VCPU ID of global register and running VCPU */ + kvm_check_vcpu_ids_as_light(vcpu); + + __guest_enter_light(thread_info, &vcpu->arch, !!from_sdisp); + + /* from here cannot by any traps including BUG/BUG_ON/KVM_BUG_ON */ + /* because of host context is switched to guest context */ + + if (gti != NULL) { + /* restore guest kernel UPSR state from guest thread info + DO_RESTORE_GUEST_KERNEL_UPSR(gti, user_upsr); + */ + } + + from_paravirt_guest = test_ti_thread_flag(thread_info, + TIF_PARAVIRT_GUEST); + + NATIVE_RETURN_PSR_IRQ_TO_USER_UPSR(user_upsr, need_inject); + + if (!from_sdisp) { + E2K_HRET(ret); + } else { + COND_GOTO_RETURN_TO_PARAVIRT_GUEST(from_paravirt_guest, ret); + return ret; + } + return ret; +} + +/* + * hardware hypercall should return to new guest stacks and function + */ +static __always_inline void +switch_to_new_hv_vcpu_stacks(struct kvm_vcpu *vcpu, + guest_hw_stack_t *stack_regs) +{ + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_hi_t psp_hi; + e2k_pcsp_hi_t pcsp_hi; + + /* + * Set current chain registers to return to guest upper function + */ + cr0_lo = stack_regs->crs.cr0_lo; + cr0_hi = stack_regs->crs.cr0_hi; + cr1_lo = stack_regs->crs.cr1_lo; + cr1_hi = stack_regs->crs.cr1_hi; + + /* + * Optimization to do not flush chain stack. + * + * Old stacks are not needed anymore, do not flush procedure + * registers and chain registers - only strip sizes + */ + NATIVE_STRIP_PSHTP_WINDOW(); + NATIVE_STRIP_PCSHTP_WINDOW(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + /* strip old procedure and chain stacks frames */ + psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + psp_hi.PSP_hi_ind = 0; + pcsp_hi.PCSP_hi_ind = 0; + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(psp_hi); + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(pcsp_hi); +} + +/* + * hardware hypercall should return to new guest user stacks and function + */ +static __always_inline void +switch_to_new_user_pv_vcpu_stacks(struct kvm_vcpu *vcpu, + guest_hw_stack_t *stack_regs) +{ + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + + /* + * Set current chain registers to return to guest upper function + */ + cr0_lo = stack_regs->crs.cr0_lo; + cr0_hi = stack_regs->crs.cr0_hi; + cr1_lo = stack_regs->crs.cr1_lo; + cr1_hi = stack_regs->crs.cr1_hi; + + psp_lo = stack_regs->stacks.psp_lo; + psp_hi = stack_regs->stacks.psp_hi; + pcsp_lo = stack_regs->stacks.pcsp_lo; + pcsp_hi = stack_regs->stacks.pcsp_hi; + + /* + * Optimization to do not flush chain stack. + * + * Old stacks are not needed anymore, do not flush procedure + * registers and chain registers - only strip sizes + */ + NATIVE_STRIP_PSHTP_WINDOW(); + NATIVE_STRIP_PCSHTP_WINDOW(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(cr1_hi); + + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); +} + +static inline long outdated_hypercall(const char *hcall_name) +{ + pr_err("%s(): hypercall %s is outdated and cannot be supported\n", + __func__, hcall_name); + return -ENOSYS; +} + +/* + * This is the core hypercall routine: where the Guest gets what it wants. + * Or gets killed. Or, in the case of KVM_HCALL_SHUTDOWN, both. + */ +notrace unsigned long +kvm_generic_hcalls(unsigned long hcall_num, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + unsigned long arg6, unsigned long gsbr) +{ + thread_info_t *ti; + gthread_info_t *gti = NULL; + struct kvm_vcpu *vcpu; + struct kvm *kvm; + bool from_generic_hypercall; + bool from_light_hypercall; + gpt_regs_t gpt_regs; + gpt_regs_t *gregs = NULL; + e2k_upsr_t upsr_to_save; + e2k_usd_lo_t k_usd_lo; + e2k_size_t g_usd_size; + e2k_cr1_hi_t cr1_hi; + e2k_cr1_lo_t cr1_lo; + guest_hw_stack_t stack_regs; + bool to_new_stacks = false; + bool to_host_vcpu = false; /* it need return to host qemu thread */ + bool to_new_user_stacks = false; /* it need switch to new user */ + /* process */ + bool from_paravirt_guest; + bool need_inject; + unsigned guest_enter_flags = FROM_HYPERCALL_SWITCH | + USD_CONTEXT_SWITCH; + int users; + unsigned long ret = 0; + unsigned long from_sdisp = hcall_num >> 63; + + hcall_num &= ~(1UL << 63); + + ti = NATIVE_READ_CURRENT_REG(); + vcpu = ti->vcpu; + + if (from_sdisp) { + /* emulate hardware supported HCALL operation */ + users = kvm_pv_switch_to_hcall_host_stacks(vcpu); + KVM_WARN_ON(users > 1); + } + + __guest_exit(ti, &vcpu->arch, FROM_HYPERCALL_SWITCH); + + /* check saved greg and running VCPU IDs: should be the same */ + kvm_check_vcpu_state_greg(); + + /* + * Hardware system hypercall operation disables interrupts mask in PSR + * and PSR becomes main register to control interrupts. + * Switch control from PSR register to UPSR, if UPSR + * interrupts control is used and all following kernel handler + * will be executed under UPSR control + * Setting of UPSR should be before global registers saving + * to preserve FP disable exception on movfi instructions + * while global registers saving + */ + NATIVE_SWITCH_TO_KERNEL_UPSR(upsr_to_save, + false, /* enable IRQs */ + true /* disable NMI to switch */ + /* mm context */); + + kvm = vcpu->kvm; + + if (!vcpu->arch.is_hv) { + /* switch to host MMU context to enable access to guest */ + /* physical memory from host, where this memory mapped */ + /* as virtual space of user QEMU process */ + kvm_switch_to_host_mmu_pid(current->mm); + } + + gti = ti->gthread_info; + if (gti != NULL) { + /* save guest kernel UPSR state at guest thread info + DO_SAVE_GUEST_KERNEL_UPSR(gti, upsr_to_save); + */ + } + + /* Update run state of guest */ + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_running); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_hcall); + + /* generic hypercall execution starts */ + from_generic_hypercall = test_thread_flag(TIF_GENERIC_HYPERCALL); + from_light_hypercall = test_thread_flag(TIF_LIGHT_HYPERCALL); + if (!from_generic_hypercall) + set_thread_flag(TIF_GENERIC_HYPERCALL); + if (from_light_hypercall) + clear_thread_flag(TIF_LIGHT_HYPERCALL); + + trace_generic_hcall(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, gsbr, vcpu->cpu); + + /* in common case cannot enable hardware stacks bounds traps, */ + /* disabled by assembler entry of the hypercall handler */ + /* (see arch/e2k/kvm/ttable.S). It can be done only for specific */ + /* hypercalss and at concrete time */ + /* native_set_sge(); */ + + /* + * Save info about current host and guest kernel stack state activation + * to enable recursive hypercalls, interrupts, signal handling + */ + if (unlikely(gti && test_thread_flag(TIF_VIRTUALIZED_GUEST) && + !test_thread_flag(TIF_PSEUDOTHREAD))) { + if (!test_gti_thread_flag(gti, GTIF_KERNEL_THREAD)) { + /* only on user processes can be traps, signals and */ + /* other recursive activations */ + gregs = &gpt_regs; + SAVE_KVM_KERNEL_STACKS_STATE(ti, gti, gregs); + add_gpt_regs(ti, gregs, hypercall_regs_type); + if (DEBUG_GPT_REGS_MODE) + print_all_gpt_regs(ti); + } + } + if (vcpu->arch.is_hv && hcall_num == KVM_HCALL_SWITCH_TO_VIRT_MODE) + to_new_stacks = true; + + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + vcpu->arch.hcall_irqs_disabled = kvm_guest_vcpu_irqs_disabled(vcpu, + vcpu->arch.is_hv ? AW(upsr_to_save) + : kvm_get_guest_vcpu_UPSR_value(vcpu), + cr1_lo.CR1_lo_psr); + + /* save guest stack state to return from hypercall */ + cr1_hi = NATIVE_NV_READ_CR1_HI_REG(); + g_usd_size = cr1_hi.CR1_hi_ussz << 4; + + if (gregs) { + /* Set current state of guest kernel stacks as start */ + /* point of new activations for guest. */ + /* Probably updatind needs only for some hypercalls */ + /* and this action can be optimized, but now thus */ + /* Host kernel state updating should be at points of */ + /* discontinuity in host kernel runnig. */ + /* For example before switch to guest or user functions, */ + /* which can cause recursive host kernel events (traps, ...) */ + INC_KVM_GUEST_KERNEL_STACKS_STATE(ti, gti, g_usd_size); + DebugKVMACT("updated guest data stack : " + "base 0x%llx, size 0x%x, top 0x%lx\n", + gti->stack_regs.stacks.usd_lo.USD_lo_base, + gti->stack_regs.stacks.usd_hi.USD_hi_size, + gti->stack_regs.stacks.top); + if (DEBUG_GPT_REGS_MODE) + print_all_gpt_regs(ti); + } + + k_usd_lo = NATIVE_NV_READ_USD_LO_REG(); + raw_local_irq_enable(); + + switch (hcall_num) { + case KVM_HCALL_PV_WAIT: + kvm_pv_wait(kvm, vcpu); + break; + case KVM_HCALL_PV_KICK: + kvm_pv_kick(kvm, arg1); + break; + case KVM_HCALL_RELEASE_TASK_STRUCT: + ret = kvm_release_guest_task_struct(vcpu, arg1); + break; + case KVM_HCALL_SET_CLOCKEVENT: + kvm_guest_set_clockevent(vcpu, arg1); + break; + case KVM_HCALL_COMPLETE_LONG_JUMP: + ret = kvm_long_jump_return(vcpu, (kvm_long_jump_info_t *)arg1); + break; + case KVM_HCALL_LAUNCH_SIG_HANDLER: + ret = kvm_sig_handler_return(vcpu, (kvm_stacks_info_t *)arg1, + arg2, arg3, &stack_regs); + to_new_user_stacks = true; + break; + case KVM_HCALL_APPLY_PSP_BOUNDS: + ret = kvm_apply_updated_psp_bounds(vcpu, arg1, arg2, arg3, + arg4, arg5); + break; + case KVM_HCALL_APPLY_PCSP_BOUNDS: + ret = kvm_apply_updated_pcsp_bounds(vcpu, arg1, arg2, arg3, + arg4, arg5); + break; + case KVM_HCALL_CORRECT_TRAP_RETURN_IP: + ret = kvm_correct_guest_trap_return_ip(arg1); + break; + case KVM_HCALL_SWITCH_TO_VIRT_MODE: + ret = kvm_switch_to_virt_mode(vcpu, + (kvm_task_info_t *)arg1, &stack_regs, + (void (*)(void *data, void *arg_1, void *arg_2))arg2, + (void *)arg3, (void *)arg4, (void *)arg5); + if (vcpu->arch.is_hv) + to_new_stacks = true; + break; + case KVM_HCALL_SWITCH_GUEST_KERNEL_STACKS: + ret = kvm_switch_guest_kernel_stacks(vcpu, + (kvm_task_info_t *)arg1, (char *)arg2, + (unsigned long *)arg3, (int)arg4, + &stack_regs); + break; + case KVM_HCALL_GUEST_INTR_HANDLER: + outdated_hypercall("KVM_HCALL_GUEST_INTR_HANDLER"); + break; + case KVM_HCALL_GUEST_FREE_INTR_HANDLER: + outdated_hypercall("KVM_HCALL_GUEST_FREE_INTR_HANDLER"); + break; + case KVM_HCALL_GUEST_INTR_THREAD: + outdated_hypercall("KVM_HCALL_GUEST_INTR_THREAD"); + break; + case KVM_HCALL_WAIT_FOR_VIRQ: + outdated_hypercall("KVM_HCALL_WAIT_FOR_VIRQ"); + break; + case KVM_HCALL_GET_GUEST_DIRECT_VIRQ: + ret = kvm_get_guest_direct_virq(vcpu, (int)arg1, (int)arg2); + break; + case KVM_HCALL_FREE_GUEST_DIRECT_VIRQ: + ret = kvm_free_guest_direct_virq(kvm, (int)arg1); + break; + case KVM_HCALL_COPY_GUEST_KERNEL_STACKS: + ret = kvm_copy_guest_kernel_stacks(vcpu, + (kvm_task_info_t *)arg1, cr1_hi); + break; + case KVM_HCALL_UPDATE_HW_STACKS_FRAMES: + ret = kvm_update_hw_stacks_frames(vcpu, + (e2k_mem_crs_t *)arg1, (int)arg2, + (kernel_mem_ps_t *)arg3, (int)arg4, (int)arg5); + break; + case KVM_HCALL_COPY_HW_STACKS_FRAMES: + ret = kvm_copy_hw_stacks_frames(vcpu, + (void *)arg1, (void *)arg2, arg3, (bool)arg4); + break; + case KVM_HCALL_SWITCH_TO_GUEST_NEW_USER: + ret = kvm_switch_to_guest_new_user(vcpu, + (kvm_task_info_t *)arg1, &stack_regs); + to_new_user_stacks = true; + break; + case KVM_HCALL_CLONE_GUEST_USER_STACKS: + ret = kvm_clone_guest_user_stacks(vcpu, + (kvm_task_info_t *)arg1); + break; + case KVM_HCALL_COPY_GUEST_USER_STACKS: + ret = kvm_copy_guest_user_stacks(vcpu, + (kvm_task_info_t *)arg1, (vcpu_gmmu_info_t *)arg2); + break; + case KVM_HCALL_PATCH_GUEST_DATA_AND_CHAIN_STACKS: + ret = kvm_patch_guest_data_and_chain_stacks(vcpu, + (kvm_data_stack_info_t *)arg1, + (kvm_pcs_patch_info_t *)arg2, arg3); + break; + case KVM_HCALL_GET_GUEST_GLOB_REGS: + ret = kvm_get_guest_glob_regs(vcpu, (unsigned long **)arg1, + arg2, (bool)arg3, (unsigned int *)arg4); + break; + case KVM_HCALL_SET_GUEST_GLOB_REGS: + ret = kvm_set_guest_glob_regs(vcpu, (unsigned long **)arg1, + arg2, (bool)arg3, (unsigned int *)arg4); + break; + case KVM_HCALL_GET_GUEST_LOCAL_GLOB_REGS: + ret = kvm_get_guest_local_glob_regs(vcpu, + (unsigned long **)arg1, (bool)arg2); + break; + case KVM_HCALL_SET_GUEST_LOCAL_GLOB_REGS: + ret = kvm_set_guest_local_glob_regs(vcpu, + (unsigned long **)arg1, (bool)arg2); + break; + case KVM_HCALL_GET_ALL_GUEST_GLOB_REGS: + ret = kvm_get_all_guest_glob_regs(vcpu, (unsigned long **)arg1); + break; + case KVM_HCALL_RECOVERY_FAULTED_TAGGED_GUEST_STORE: + case KVM_HCALL_RECOVERY_FAULTED_TAGGED_STORE: + ret = kvm_recovery_faulted_tagged_guest_store(vcpu, arg1, arg2, + arg3, arg4, arg5, arg6); + break; + case KVM_HCALL_RECOVERY_FAULTED_GUEST_LOAD: + case KVM_HCALL_RECOVERY_FAULTED_LOAD: + ret = kvm_recovery_faulted_guest_load(vcpu, arg1, (u64 *)arg2, + (u8 *)arg3, arg4, (int)arg5); + break; + case KVM_HCALL_RECOVERY_FAULTED_GUEST_MOVE: + case KVM_HCALL_RECOVERY_FAULTED_MOVE: + ret = kvm_recovery_faulted_guest_move(vcpu, arg1, arg2, + arg3, arg4, arg5, (u32)arg6); + break; + case KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GUEST_GREG: + case KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GREG: + ret = kvm_recovery_faulted_load_to_guest_greg(vcpu, arg1, + (int) arg2, arg3, arg4, arg5, arg6); + break; + case KVM_HCALL_MOVE_TAGGED_GUEST_DATA: + ret = kvm_move_tagged_guest_data(vcpu, (int)arg1, arg2, arg3); + break; + case KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_COPY: + ret = kvm_fast_tagged_guest_memory_copy(vcpu, (void *)arg1, + (void *)arg2, arg3, arg4, arg5, (int)arg6); + break; + case KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_SET: + ret = kvm_fast_tagged_guest_memory_set(vcpu, (void *)arg1, + arg2, arg3, arg4, arg5); + break; + case KVM_HCALL_FAST_TAGGED_MEMORY_COPY: + ret = kvm_fast_guest_tagged_memory_copy(vcpu, (void *)arg1, + (void *)arg2, arg3, arg4, arg5, (int)arg6); + break; + case KVM_HCALL_FAST_TAGGED_MEMORY_SET: + ret = kvm_fast_guest_tagged_memory_set(vcpu, (void *)arg1, + arg2, arg3, arg4, arg5); + break; + case KVM_HCALL_PT_ATOMIC_UPDATE: + ret = kvm_pv_mmu_pt_atomic_update(vcpu, (int)arg1, + arg2, (void __user *)arg3, + (pt_atomic_op_t)arg4, arg5); + break; + case KVM_HCALL_GUEST_MM_DROP: + ret = kvm_guest_mm_drop(vcpu, (int)arg1); + break; + case KVM_HCALL_ACTIVATE_GUEST_MM: + ret = kvm_activate_guest_mm(vcpu, (int)arg1, (int)arg2, + (gpa_t)arg3); + break; + case KVM_HCALL_SWITCH_GUEST_MM: + ret = kvm_pv_switch_guest_mm(vcpu, (int)arg1, (int)arg2, + (gpa_t)arg3); + break; + case KVM_HCALL_VCPU_MMU_STATE: + ret = kvm_pv_vcpu_mmu_state(vcpu, + (vcpu_gmmu_info_t __user *)arg1); + break; + case KVM_HCALL_BOOT_SPIN_LOCK_SLOW: + ret = kvm_boot_spin_lock_slow(vcpu, (void *)arg1, (bool)arg2); + break; + case KVM_HCALL_BOOT_SPIN_LOCKED_SLOW: + ret = kvm_boot_spin_locked_slow(vcpu, (void *)arg1); + break; + case KVM_HCALL_BOOT_SPIN_UNLOCK_SLOW: + ret = kvm_boot_spin_unlock_slow(vcpu, (void *)arg1, (bool)arg2); + break; + case KVM_HCALL_GUEST_SPIN_LOCK_SLOW: + ret = kvm_guest_spin_lock_slow(kvm, (void *)arg1, (bool)arg2); + break; + case KVM_HCALL_GUEST_SPIN_LOCKED_SLOW: + ret = kvm_guest_spin_locked_slow(kvm, (void *)arg1); + break; + case KVM_HCALL_GUEST_SPIN_UNLOCK_SLOW: + ret = kvm_guest_spin_unlock_slow(kvm, (void *)arg1, (bool)arg2); + break; + case KVM_HCALL_GUEST_CSD_LOCK_CTL: + ret = kvm_guest_csd_lock_ctl(vcpu, + (csd_ctl_t)arg1, (void *)arg2); + break; + case KVM_HCALL_GUEST_IOPORT_REQ: + ret = kvm_guest_ioport_request(vcpu, (u16)arg1, + (u32 __user *)arg2, (u8)arg3, (u8)arg4); + break; + case KVM_HCALL_GUEST_IOPORT_STRING_REQ: + ret = kvm_guest_ioport_string_request(vcpu, (u16)arg1, + (void __user *)arg2, (u8)arg3, (u32) arg4, (u8)arg5); + break; + case KVM_HCALL_GUEST_MMIO_REQ: + ret = kvm_guest_mmio_request(vcpu, arg1, + (u64 __user *)arg2, (u8)arg3, (u8)arg4); + break; + case KVM_HCALL_CONSOLE_IO: + ret = kvm_guest_console_io(vcpu, (int)arg1, (int)arg2, + (char __user *)arg3); + break; + case KVM_HCALL_NOTIFY_IO: + ret = kvm_guest_notify_io(vcpu, (unsigned int)arg1); + break; + case KVM_HCALL_GUEST_VCPU_COMMON_IDLE: + kvm_guest_vcpu_common_idle(vcpu, arg1, (bool)arg2); + break; + case KVM_HCALL_GUEST_VCPU_RELAX: + kvm_guest_vcpu_relax(); + break; +#ifdef CONFIG_SMP + case KVM_HCALL_ACTIVATE_GUEST_VCPU: + ret = kvm_activate_host_vcpu(kvm, (int)arg1); + break; + case KVM_HCALL_ACTIVATE_GUEST_ALL_VCPUS: + ret = kvm_activate_guest_all_vcpus(kvm); + break; +#endif /* CONFIG_SMP */ + case KVM_HCALL_HOST_PRINTK: + ret = kvm_guest_printk_on_host(vcpu, (char __user *)arg1, + (int)arg2); + break; + case KVM_HCALL_GET_SPT_TRANSLATION: + ret = kvm_get_va_spt_translation(vcpu, arg1, + (mmu_spt_trans_t __user *)arg2); + break; + case KVM_HCALL_PRINT_GUEST_KERNEL_PTES: + ret = kvm_print_guest_kernel_ptes(arg1); + break; + case KVM_HCALL_PRINT_GUEST_USER_ADDRESS_PTES: + ret = kvm_print_guest_user_address_ptes(kvm, + (int)arg1, arg2); + break; + case KVM_HCALL_SHUTDOWN: + ret = kvm_guest_shutdown(vcpu, (void __user *)arg1, arg2); + break; + case KVM_HCALL_DUMP_GUEST_STACK: { + dump_stack(); + break; + } + case KVM_HCALL_FTRACE_STOP: + tracing_off(); + break; + case KVM_HCALL_FTRACE_DUMP: + ftrace_dump(DUMP_ALL); + break; + case KVM_HCALL_DUMP_COMPLETION: + kvm_complete_vcpu_show_state(vcpu); + break; +#ifdef CONFIG_KVM_ASYNC_PF + case KVM_HCALL_PV_ENABLE_ASYNC_PF: + ret = kvm_pv_host_enable_async_pf(vcpu, (u64) arg1, + (u64) arg2, (u32) arg3, (u32) arg4); + break; +#endif /* CONFIG_KVM_ASYNC_PF */ + case KVM_HCALL_FLUSH_TLB_RANGE: + ret = kvm_hv_flush_tlb_range(vcpu, arg1, arg2); + break; + case KVM_HCALL_SYNC_ADDR_RANGE: + kvm_hv_sync_addr_range(vcpu, arg1, arg2); + break; + default: + pr_err("Bad hypercall #%li\n", hcall_num); + ret = -ENOSYS; + } + + if (ret == RETURN_TO_HOST_APP_HCRET) + to_host_vcpu = true; + + raw_all_irq_disable(); /* all IRQs to switch mm context */ + while (need_resched()) { + raw_all_irq_enable(); + schedule(); + raw_all_irq_disable(); + } + + /* It can be trap on hypercall handler (due to guest user address */ + /* access while copy from/to user for example). So: */ + /* 1) the guest process can be scheduled and migrate to other VCPU */ + /* 2) host VCPU thread was changed and */ + /* 3) need update thread info and */ + /* 4) VCPU satructures pointers */ + KVM_HOST_UPDATE_VCPU_THREAD_CONTEXT(NULL, &ti, NULL, NULL, &vcpu); + GTI_BUG_ON(gti != ti->gthread_info); + + /* Update run state of guest */ + WARN_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_hcall); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); + /* update guest system time common with host */ + kvm_update_guest_system_time(vcpu->kvm); + + gti = ti->gthread_info; + if (gti != NULL) { + /* restore guest kernel UPSR state from guest thread info + DO_RESTORE_GUEST_KERNEL_UPSR(gti, upsr_to_save); + */ + } + + vcpu->arch.hcall_irqs_disabled = false; + + trace_generic_hcall_exit(ret); + + /* generic hypercall execution completed */ + if (!from_generic_hypercall) + clear_thread_flag(TIF_GENERIC_HYPERCALL); + if (from_light_hypercall) + set_thread_flag(TIF_LIGHT_HYPERCALL); + + from_paravirt_guest = test_ti_thread_flag(ti, TIF_PARAVIRT_GUEST); + + /* + * Now we should restore kernel saved stack state and + * return to guest kernel data stack, if it need + */ + if (ti->gthread_info != NULL && + !test_gti_thread_flag(ti->gthread_info, GTIF_KERNEL_THREAD)) { + RESTORE_KVM_GUEST_KERNEL_STACKS_STATE(ti); + delete_gpt_regs(ti); + DebugKVMACT("restored guest data stack : " + "base 0x%llx, size 0x%x, top 0x%lx\n", + ti->gthread_info->stack_regs.stacks.usd_lo.USD_lo_base, + ti->gthread_info->stack_regs.stacks.usd_hi.USD_hi_size, + ti->gthread_info->stack_regs.stacks.top); + if (DEBUG_GPT_REGS_MODE) + print_all_gpt_regs(ti); + } + + if (to_new_stacks) { + switch_to_new_hv_vcpu_stacks(vcpu, &stack_regs); + } else if (to_new_user_stacks) { + switch_to_new_user_pv_vcpu_stacks(vcpu, &stack_regs); + } + + /* if there are pending VIRQs, then provide with direct interrupt */ + /* to cause guest interrupting and handling VIRQs */ + if (!to_host_vcpu) { + need_inject = kvm_try_inject_event_wish(ti->vcpu, ti, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu)); + } else { + need_inject = false; + } + + if (!vcpu->arch.is_hv) { + /* return to guest VCPU MMU context */ + kvm_switch_to_guest_mmu_pid(vcpu); + } + + /* check saved greg and running VCPU IDs: should be the same */ + kvm_check_vcpu_state_greg(); + + __guest_enter(ti, &vcpu->arch, guest_enter_flags); + + /* from here cannot by any traps including BUG/BUG_ON/KVM_BUG_ON */ + /* because of host context is switched to guest context */ + + /* + * Return control from UPSR register to PSR, if UPSR + * interrupts control is used. + * RETURN operation restores PSR state at hypercall point and + * recovers interrupts control + * Restoring of user UPSR should be after global registers restoring + * to preserve FP disable exception on movfi instructions + * while global registers manipulations + */ + NATIVE_RETURN_PSR_IRQ_TO_USER_UPSR(upsr_to_save, need_inject); + + if (!from_sdisp) { + E2K_HRET(ret); + } + + if (!(to_new_user_stacks || to_new_stacks)) { + users = kvm_pv_restore_hcall_guest_stacks(ti->vcpu); + } else { + kvm_pv_clear_hcall_host_stacks(ti->vcpu); + } + + if (to_host_vcpu) { + return pv_vcpu_return_to_host(ti, vcpu); + } + + COND_GOTO_RETURN_TO_PARAVIRT_GUEST(from_paravirt_guest, ret); + return ret; +} diff --git a/arch/e2k/kvm/intercepts.c b/arch/e2k/kvm/intercepts.c new file mode 100644 index 000000000000..a7a553f53cc0 --- /dev/null +++ b/arch/e2k/kvm/intercepts.c @@ -0,0 +1,3037 @@ + +/* + * CPU hardware virtualized support + * Interceptions handling + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpu_defs.h" +#include "mmu_defs.h" +#include "mmu.h" +#include "gregs.h" +#include "process.h" +#include "io.h" +#include "intercepts.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_EXC_INSTR_FAULT_MODE +#undef DebugIPF +#define DEBUG_EXC_INSTR_FAULT_MODE 0 /* instruction page fault */ + /* exception mode debug */ +#define DebugIPF(fmt, args...) \ +({ \ + if (DEBUG_EXC_INSTR_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_INSTR_FAULT_MODE +#undef DebugIPINTC +#define DEBUG_INTC_INSTR_FAULT_MODE 0 /* MMU intercept on instr */ + /* page fault mode debug */ +#define DebugIPINTC(fmt, args...) \ +({ \ + if (DEBUG_INTC_INSTR_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_PAGE_FAULT_MODE +#undef DebugPFINTC +#define DEBUG_INTC_PAGE_FAULT_MODE 0 /* MMU intercept on data */ + /* page fault mode debug */ +#define DebugPFINTC(fmt, args...) \ +({ \ + if (DEBUG_INTC_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_REEXEC_MODE +#undef DebugREEXECMU +#define DEBUG_INTC_REEXEC_MODE 0 /* reexecute MMU intercepts debug */ +#define DebugREEXECMU(fmt, args...) \ +({ \ + if (DEBUG_INTC_REEXEC_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_REEXEC_VERBOSE_MODE +#undef DebugREEXECMUV +#define DEBUG_INTC_REEXEC_VERBOSE_MODE 0 /* reexecute MMU intercepts */ + /* verbose debug */ +#define DebugREEXECMUV(fmt, args...) \ +({ \ + if (DEBUG_INTC_REEXEC_VERBOSE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_EXC_INTERRUPT_MODE +#undef DebugINTR +#define DEBUG_EXC_INTERRUPT_MODE 0 /* interrupt intercept */ + /* debug */ +#define DebugINTR(fmt, args...) \ +({ \ + if (DEBUG_EXC_INTERRUPT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_INTC_MODE +#undef DebugINTC +#define DEBUG_KVM_INTC_MODE 0 /* intercept debug mode */ +#define DebugINTC(fmt, args...) \ +({ \ + if (DEBUG_KVM_INTC_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_CU_ENTRY_MODE +#undef DebugCUEN +#define DEBUG_INTC_CU_ENTRY_MODE 0 /* CPU intercept entries */ + /* debug mode */ +#define DebugCUEN(fmt, args...) \ +({ \ + if (DEBUG_INTC_CU_ENTRY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_CU_EXCEPTION_MODE +#undef DebugINTCEXC +#define DEBUG_INTC_CU_EXCEPTION_MODE 0 /* CPU exceptions intercept */ + /* debug mode */ +#define DebugINTCEXC(fmt, args...) \ +({ \ + if (DEBUG_INTC_CU_EXCEPTION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_TIRs_MODE +#undef DebugTIRs +#define DEBUG_INTC_TIRs_MODE 0 /* intercept TIRs debugging */ +#define DebugTIRs(fmt, args...) \ +({ \ + if (DEBUG_INTC_TIRs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_LWISH_TIRs_MODE +#undef DebugLWTIRs +#define DEBUG_LWISH_TIRs_MODE 0 /* intercept on last wish TIRs */ + /* debugging */ +#define DebugLWTIRs(fmt, args...) \ +({ \ + if (DEBUG_LWISH_TIRs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_VM_EXIT_MODE +#undef DebugVMEX +#define DEBUG_INTC_VM_EXIT_MODE 0 /* VM exit intercept debug mode */ +#define DebugVMEX(fmt, args...) \ +({ \ + if (DEBUG_INTC_VM_EXIT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_CU_REG_MODE +#undef DebugCUREG +#define DEBUG_INTC_CU_REG_MODE 0 /* CPU reguster access intercept */ + /* events debug mode */ +#define DebugCUREG(fmt, args...) \ +({ \ + if (DEBUG_INTC_CU_REG_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_MMU_MODE +#undef DebugINTCMU +#define DEBUG_INTC_MMU_MODE 0 /* MMU intercept events debug mode */ +#define DebugINTCMU(fmt, args...) \ +({ \ + if (DEBUG_INTC_MMU_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_MMU_SS_REG_MODE +#undef DebugMMUSSREG +#define DEBUG_INTC_MMU_SS_REG_MODE 0 /* MMU secondary space */ + /* register access intercept */ + /* events debug mode */ +#define DebugMMUSSREG(fmt, args...) \ +({ \ + if (DEBUG_INTC_MMU_SS_REG_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_WAIT_TRAP_MODE +#undef DebugWTR +#define DEBUG_INTC_WAIT_TRAP_MODE 0 /* CU wait trap intercept */ +#define DebugWTR(fmt, args...) \ +({ \ + if (DEBUG_INTC_WAIT_TRAP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PF_RETRY_MODE +#undef DebugTRY +#define DEBUG_PF_RETRY_MODE 0 /* retry page fault debug */ +#define DebugTRY(fmt, args...) \ +({ \ + if (DEBUG_PF_RETRY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PF_FORCED_MODE +#undef DebugPFFORCED +#define DEBUG_PF_FORCED_MODE 0 /* forced page fault event debug */ +#define DebugPFFORCED(fmt, args...) \ +({ \ + if (DEBUG_PF_FORCED_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PF_EXC_RPR_MODE +#undef DebugEXCRPR +#define DEBUG_PF_EXC_RPR_MODE 0 /* page fault at recovery mode debug */ +#define DebugEXCRPR(fmt, args...) \ +({ \ + if (DEBUG_PF_EXC_RPR_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_MMU_PID_MODE +#undef DebugMMUPID +#define DEBUG_INTC_MMU_PID_MODE 0 /* MMU PID register access intercept */ + /* events debug mode */ +#define DebugMMUPID(fmt, args...) \ +({ \ + if (DEBUG_INTC_MMU_PID_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VIRQs_MODE +#undef DebugVIRQs +#define DEBUG_KVM_VIRQs_MODE 0 /* VIRQs injection debugging */ +#define DebugVIRQs(fmt, args...) \ +({ \ + if (DEBUG_KVM_VIRQs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static intc_info_cu_entry_t *find_cu_info_entry(struct kvm_vcpu *vcpu, + intc_info_cu_t *cu, + info_cu_event_code_t code, + cu_reg_no_t reg_no); + +static noinline notrace int +do_unsupported_intc(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + pr_err("%s(): unsupported intercept in INTC_INFO_CU\n", __func__); + return -ENOSYS; +} + +/* Interception table. */ +exc_intc_handler_t intc_exc_table[INTC_CU_COND_EXC_MAX] = { + [0 ... INTC_CU_COND_EXC_MAX - 1] = + (exc_intc_handler_t)(do_unsupported_intc) +}; + +const cond_exc_info_t cond_exc_info_table[INTC_CU_COND_EXC_MAX] = { + { + no : INTC_CU_EXC_INSTR_DEBUG_NO, + exc_mask : exc_instr_debug_mask, + name : "exc_instr_debug", + }, + { + no : INTC_CU_EXC_DATA_DEBUG_NO, + exc_mask : exc_data_debug_mask, + name : "exc_data_debug", + }, + { + no : INTC_CU_EXC_INSTR_PAGE_NO, + exc_mask : exc_instr_page_miss_mask | + exc_instr_page_prot_mask | + exc_ainstr_page_miss_mask | + exc_ainstr_page_prot_mask, + name : "exc instr/ainstr page miss/prot", + }, + { + no : INTC_CU_EXC_DATA_PAGE_NO, + exc_mask : exc_data_page_mask, + name : "exc_data_page", + }, + { + no : INTC_CU_EXC_MOVA_NO, + exc_mask : exc_mova_ch_0_mask | + exc_mova_ch_1_mask | + exc_mova_ch_2_mask | + exc_mova_ch_3_mask, + name : "exc_mova_ch_#0/1/2/3", + }, + { + no : INTC_CU_EXC_INTERRUPT_NO, + exc_mask : exc_interrupt_num, + name : "exc_interrupt", + }, + { + no : INTC_CU_EXC_NM_INTERRUPT_NO, + exc_mask : exc_nm_interrupt_num, + name : "exc_nm_interrupt", + }, + { + no : -1, + exc_mask : 0, + name : "reserved", + }, +}; + +static int do_forced_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_forced_gva_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_shadow_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_instr_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_ainstr_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_read_mmu_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_write_mmu_reg_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); +static int do_tlb_line_flush_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); + + +static noinline notrace int +do_unsupported_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + int event = intc_info_mu->hdr.event_code; + + pr_err("%s(): unsupported MMU event intercept code %d %s\n", + __func__, event, kvm_get_mu_event_name(event)); + return -ENOSYS; +} + +static noinline notrace int +do_reserved_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + int event = intc_info_mu->hdr.event_code; + + pr_err("%s(): reserved MMU event intercept code %d\n", + __func__, event); + return -ENOSYS; +} + +const mu_event_desc_t mu_events_desc_table[MU_INTC_EVENTS_MAX] = { + { + code : IME_FORCED, + handler : do_forced_data_page_intc_mu, + name : "empty: forced", + }, + { + code : IME_FORCED_GVA, + handler : do_forced_gva_data_page_intc_mu, + name : "empty: page fault GVA->GPA", + }, + { + code : IME_SHADOW_DATA, + handler : do_shadow_data_page_intc_mu, + name : "data page on shadow PT", + }, + { + code : IME_GPA_DATA, + handler : do_data_page_intc_mu, + name : "data page fault GPA->PA", + }, + { + code : IME_GPA_INSTR, + handler : do_instr_page_intc_mu, + name : "instr page fault", + }, + { + code : IME_GPA_AINSTR, + handler : do_ainstr_page_intc_mu, + name : "async instr page fault", + }, + { + code : IME_RESERVED_6, + handler : do_reserved_intc_mu, + name : "reserved #6", + }, + { + code : IME_RESERVED_7, + handler : do_reserved_intc_mu, + name : "reserved #7", + }, + { + code : IME_MAS_IOADDR, + handler : do_unsupported_intc_mu, + name : "GPA/IO address access", + }, + { + code : IME_READ_MU, + handler : do_read_mmu_intc_mu, + name : "read MMU register", + }, + { + code : IME_WRITE_MU, + handler : do_write_mmu_reg_intc_mu, + name : "write MMU register", + }, + { + code : IME_CACHE_FLUSH, + handler : do_unsupported_intc_mu, + name : "cache flush operation", + }, + { + code : IME_CACHE_LINE_FLUSH, + handler : do_unsupported_intc_mu, + name : "cache line flush operation", + }, + { + code : IME_ICACHE_FLUSH, + handler : do_unsupported_intc_mu, + name : "instr cache flush operation", + }, + { + code : IME_ICACHE_LINE_FLUSH_USER, + handler : do_unsupported_intc_mu, + name : "user instr cache flush operation", + }, + { + code : IME_ICACHE_LINE_FLUSH_SYSTEM, + handler : do_unsupported_intc_mu, + name : "system instr cache flush operation", + }, + { + code : IME_TLB_FLUSH, + handler : do_unsupported_intc_mu, + name : "TLB flush operation", + }, + { + code : IME_TLB_PAGE_FLUSH_LAST, + handler : do_tlb_line_flush_intc_mu, + name : "main TLB page flush operation", + }, + { + code : IME_TLB_PAGE_FLUSH_UPPER, + handler : do_tlb_line_flush_intc_mu, + name : "upper level TLB page flush operation", + }, + { + code : IME_TLB_ENTRY_PROBE, + handler : do_unsupported_intc_mu, + name : "TLB entry probe operation", + }, +}; + +static int do_instr_page_exc(struct kvm_vcpu *vcpu, struct pt_regs *regs, + bool nonpaging) +{ + int evn_no = 0; /* should not be used here */ + intc_mu_state_t *mu_state; + struct trap_pt_regs *trap = regs->trap; + e2k_tir_lo_t tir_lo; + e2k_tir_hi_t tir_hi; + gva_t address; + unsigned long exc; + u64 exc_mask; + u32 error_code; + bool async_instr = false; + const char *trap_name; + int ret; + pf_res_t pfres; + + KVM_BUG_ON(nonpaging != !is_paging(vcpu)); + + trap->TIR_lo = AW(trap->TIRs[0].TIR_lo); + trap->TIR_hi = AW(trap->TIRs[0].TIR_hi); + tir_lo.TIR_lo_reg = trap->TIR_lo; + tir_hi.TIR_hi_reg = trap->TIR_hi; + + error_code = PFERR_INSTR_FAULT_MASK; + exc = tir_hi.TIR_hi_exc; + + if (likely(exc & exc_instr_page_miss_mask)) { + trap->nr_page_fault_exc = exc_instr_page_miss_num; + exc_mask = exc_instr_page_miss_mask; + error_code |= PFERR_NOT_PRESENT_MASK; + trap_name = "instr_page_miss"; + } else if (exc & exc_instr_page_prot_mask) { + trap->nr_page_fault_exc = exc_instr_page_prot_num; + exc_mask = exc_instr_page_prot_mask; + error_code |= PFERR_NOT_PRESENT_MASK | PFERR_INSTR_PROT_MASK; + trap_name = "instr_page_prot"; + } else if (exc & exc_ainstr_page_miss_mask) { + trap->nr_page_fault_exc = exc_ainstr_page_miss_num; + exc_mask = exc_ainstr_page_miss_mask; + async_instr = true; + trap_name = "async_instr_page_miss"; + } else if (exc & exc_ainstr_page_prot_mask) { + trap->nr_page_fault_exc = exc_ainstr_page_prot_num; + exc_mask = exc_ainstr_page_prot_mask; + error_code |= PFERR_NOT_PRESENT_MASK | PFERR_INSTR_PROT_MASK; + async_instr = true; + trap_name = "async_instr_page_prot"; + } else { + exc_mask = 0; + KVM_BUG_ON(true); + } + + if (!async_instr) { + address = tir_lo.TIR_lo_ip; + } else { + address = AS(regs->ctpr2).ta_base; + } + + if (nonpaging) + address = nonpaging_gva_to_gpa(vcpu, address, ACC_EXEC_MASK, + NULL); + + DebugIPF("intercept on %s exception for IP 0x%lx\n", + trap_name, address); + + vcpu->arch.intc_ctxt.cur_mu = evn_no; + mu_state = get_intc_mu_state(vcpu); + mu_state->may_be_retried = false; + mu_state->ignore_notifier = false; + + ret = kvm_mmu_instr_page_fault(vcpu, address, async_instr, error_code); + pfres = mu_state->pfres; + if (ret < 0) { + /* page fault handler detected error, so pass fault */ + /* to guest handler */ + pr_err("%s(): instr page fault for IP 0x%lx could not be " + "handled, error %d\n", + __func__, address, ret); + kvm_need_pass_vcpu_exception(vcpu, exc_mask); + } else if (ret == 0) { + if (pfres == PFRES_NO_ERR) { + /* page fault successfuly handled and guest can */ + /* continue execution without fault injection */ + kvm_need_delete_vcpu_exception(vcpu, exc_mask); + } else if (pfres == PFRES_RETRY) { + /* page fault handling should be retried, but */ + /* it is not allowed (implemented) in this case */ + kvm_need_pass_vcpu_exception(vcpu, exc_mask); + } else { + /* page fault failed */ + kvm_need_pass_vcpu_exception(vcpu, exc_mask); + } + } else { + /* The page is not mapped by the guest. */ + /* pass to let the guest handle it */ + kvm_need_pass_vcpu_exception(vcpu, exc_mask); + } + return 0; +} + +static int do_nonp_instr_page_intc_exc(struct kvm_vcpu *vcpu, + struct pt_regs *regs) +{ + return do_instr_page_exc(vcpu, regs, true /* nonpaging ? */); +} + +static int do_instr_page_intc_exc(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + return do_instr_page_exc(vcpu, regs, false /* nonpaging ? */); +} + +static int instr_page_fault_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs, bool async_instr) +{ + intc_mu_state_t *mu_state = get_intc_mu_state(vcpu); + struct trap_pt_regs *trap = regs->trap; + gpa_t gpa; + gva_t address; + tc_cond_t cond; + tc_fault_type_t ftype; + bool nonpaging = !is_paging(vcpu); + const char *trap_name; + u32 error_code; + + gpa = intc_info_mu->gpa; + address = intc_info_mu->gva; + cond = intc_info_mu->condition; + AW(ftype) = AS(cond).fault_type; + + DebugIPINTC("intercept on %s instr page, IP gpa 0x%llx gva 0x%lx, " + "fault type 0x%x\n", + (async_instr) ? "async" : "sync", gpa, address, AW(ftype)); + + if (likely(!nonpaging)) { + /* paging mode */ + if (is_shadow_paging(vcpu)) { + /* GP_* PT can be used only to data access */ + } else if (is_phys_paging(vcpu)) { + address = nonpaging_gva_to_gpa(vcpu, gpa, + ACC_EXEC_MASK, NULL); + } else { + KVM_BUG_ON(true); + } + } else { + /* nonpaging mode, all addresses should be physical */ + if (is_phys_paging(vcpu)) { + address = nonpaging_gva_to_gpa(vcpu, gpa, + ACC_EXEC_MASK, NULL); + } else if (is_shadow_paging(vcpu)) { + /* GP_* PT is not used, GPA is not set by HW */ + address = nonpaging_gva_to_gpa(vcpu, address, + ACC_EXEC_MASK, NULL); + } else { + KVM_BUG_ON(true); + } + } + + error_code = PFERR_INSTR_FAULT_MASK; + if (AS(ftype).page_miss) { + trap->nr_page_fault_exc = exc_instr_page_miss_num; + error_code |= PFERR_NOT_PRESENT_MASK; + trap_name = "instr_page_miss"; + } else if (AS(ftype).prot_page) { + trap->nr_page_fault_exc = exc_instr_page_prot_num; + error_code |= PFERR_NOT_PRESENT_MASK | PFERR_INSTR_PROT_MASK; + trap_name = "instr_page_prot"; + } else if (AS(ftype).illegal_page) { + KVM_BUG_ON(is_shadow_paging(vcpu) && !nonpaging); + trap->nr_page_fault_exc = exc_instr_page_miss_num; + error_code |= PFERR_NOT_PRESENT_MASK; + trap_name = "illegal_instr_page"; + } else if (AW(ftype) == 0) { + trap->nr_page_fault_exc = exc_instr_page_miss_num; + error_code |= PFERR_NOT_PRESENT_MASK; + trap_name = "empty_fault_type_instr_page"; + } else { + pr_err("%s(): bad fault type 0x%x, probably it need pass " + "fault to guest\n", + __func__, AW(ftype)); + KVM_BUG_ON(true); + } + + DebugIPINTC("intercept on %s fault, IP 0x%lx\n", + trap_name, address); + + mu_state->may_be_retried = true; + mu_state->ignore_notifier = false; + + return kvm_mmu_instr_page_fault(vcpu, address, async_instr, error_code); +} + +static int do_instr_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + return instr_page_fault_intc_mu(vcpu, intc_info_mu, regs, false); +} + +static int do_ainstr_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + return instr_page_fault_intc_mu(vcpu, intc_info_mu, regs, true); +} + +static int do_forced_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + int event = intc_info_mu->hdr.event_code; + tc_cond_t cond = intc_info_mu->condition; + int fmt = TC_COND_FMT_FULL(cond); + int cur_mu = vcpu->arch.intc_ctxt.cur_mu; + bool ss_under_rpr; + bool root = AS(cond).root; /* secondary space */ + bool ignore_store = false; /* the store should not be reexecuted */ + intc_info_mu_t *prev_mu; + + DebugPFFORCED("event code %d %s: GVA 0x%lx, GPA 0x%lx, " + "condition 0x%llx\n", + event, kvm_get_mu_event_name(event), + intc_info_mu->gva, intc_info_mu->gpa, AW(cond)); + + if (cur_mu == 0) { + pr_err("%s(): forced MU intercept is first in INTC_INFO_MU\n", + __func__); + KVM_BUG_ON(true); + } + + ss_under_rpr = root && kvm_has_vcpu_exc_recovery_point(vcpu); + + if (likely(!ss_under_rpr)) { + DebugPFFORCED("it is not accsess to secondary space in " + "generation (RPR) mode, so ignored\n"); + return 0; + } + + ignore_store = tc_cond_is_store(cond, machine.native_iset_ver); + if (!ignore_store) { + DebugPFFORCED("it is load from secondary space in " + "generation (RPR) mode, so will be reexecuted\n"); + return 0; + } + + DebugEXCRPR("event code %d %s: %s secondary space at recovery mode: " + "GVA 0x%lx, GPA 0x%lx, cond 0x%016llx\n", + event, kvm_get_mu_event_name(event), + (ignore_store) ? ((AS(cond).store) ? "store to" + : "load with store semantics to") + : "load from", + intc_info_mu->gva, intc_info_mu->gpa, AW(cond)); + + if ((fmt == LDST_QWORD_FMT || fmt == TC_FMT_QWORD_QP)) { + prev_mu = &vcpu->arch.intc_ctxt.mu[cur_mu - 1]; + if (prev_mu->hdr.event_code == 1) { + DebugEXCRPR("leaving qword access to secondary space " + "at recovery mode for guest to handle\n"); + + return 0; + } + } + + /* mark the INTC_INFO_MU event as deleted to avoid */ + /* hardware reexucution of the store operation */ + kvm_delete_intc_info_mu(vcpu, intc_info_mu); + DebugEXCRPR("access to secondary space at recovery mode " + "will not be reexecuted by hardware\n"); + + return 0; +} + +static int do_forced_gva_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + int event = intc_info_mu->hdr.event_code; + tc_cond_t cond = intc_info_mu->condition; + bool ss_under_rpr; + bool root = AS(cond).root; /* secondary space */ + bool ignore_store = false; /* the store should not be reexecuted */ + + DebugPFFORCED("event code %d %s: GVA 0x%lx, GPA 0x%lx, " + "condition 0x%llx\n", + event, kvm_get_mu_event_name(event), + intc_info_mu->gva, intc_info_mu->gpa, AW(cond)); + + ss_under_rpr = root && kvm_has_vcpu_exc_recovery_point(vcpu); + + if (likely(!ss_under_rpr)) { + DebugPFFORCED("it is not accsess to secondary space in " + "generation (RPR) mode, so ignored\n"); + return 0; + } + + ignore_store = tc_cond_is_store(cond, machine.native_iset_ver); + if (!ignore_store) { + DebugPFFORCED("it is load from secondary space in " + "generation (RPR) mode, so will be reexecuted\n"); + return 0; + } + + DebugEXCRPR("event code %d %s: %s secondary space at recovery mode: " + "GVA 0x%lx, GPA 0x%lx, cond 0x%016llx\n", + event, kvm_get_mu_event_name(event), + (ignore_store) ? ((AS(cond).store) ? "store to" + : "load with store semantics to") + : "load from", + intc_info_mu->gva, intc_info_mu->gpa, AW(cond)); + + /* + * Pass the fault to guest. Hardware will transfer this entry to + * guest's cellar and add exception to TIRs. + * Lintel will not re-execute this store. + */ + return 0; +} + +static int do_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + gpa_t gpa; + gva_t address; + tc_cond_t cond; + tc_fault_type_t ftype; + bool nonpaging = !is_paging(vcpu); + int ret; + + gpa = intc_info_mu->gpa; + address = intc_info_mu->gva; + cond = intc_info_mu->condition; + AW(ftype) = AS(cond).fault_type; + + DebugPFINTC("intercept on data page fault, gpa 0x%llx gva 0x%lx, " + "fault type 0x%x\n", + gpa, address, AW(ftype)); + + if (!is_phys_paging(vcpu)) { + pr_err("%s(): intercept on GPA->PA translation fault, but " + "GP_* tables disabled\n", + __func__); + KVM_BUG_ON(true); + } + + address = nonpaging_gva_to_gpa(vcpu, gpa, ACC_ALL, NULL); + + ret = kvm_hv_mmu_page_fault(vcpu, regs, intc_info_mu); + if (ret != PFRES_NO_ERR && ret != PFRES_TRY_MMIO) { + pr_info("%s(): could not handle intercept on data " + "page fault\n", + __func__); + KVM_BUG_ON(true); + } + return ret; +} + +static int do_shadow_data_page_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + gpa_t gpa; + gva_t address; + tc_cond_t cond; + tc_fault_type_t ftype; + bool nonpaging = !is_paging(vcpu); + int ret; + + gpa = intc_info_mu->gpa; + address = intc_info_mu->gva; + cond = intc_info_mu->condition; + AW(ftype) = AS(cond).fault_type; + + DebugPFINTC("intercept on data page fault, gpa 0x%llx gva 0x%lx, " + "fault type 0x%x\n", + gpa, address, AW(ftype)); + + if (!is_shadow_paging(vcpu)) { + pr_err("%s(): intercept on shadow PT translation fault, but " + "shadow PT mode is disabled\n", + __func__); + KVM_BUG_ON(true); + } + if (nonpaging && is_phys_paging(vcpu)) { + pr_err("%s(): should be intercept on GPA->PA translation " + "fault, GP_* tables enabled\n", + __func__); + KVM_BUG_ON(true); + } + if (nonpaging) + address = nonpaging_gva_to_gpa(vcpu, gpa, ACC_ALL, NULL); + + ret = kvm_hv_mmu_page_fault(vcpu, regs, intc_info_mu); + if (ret != PFRES_NO_ERR && ret != PFRES_TRY_MMIO) { + pr_info("%s(): could not handle intercept on data " + "page fault\n", + __func__); + KVM_BUG_ON(true); + } + return ret; +} + +int do_nonp_data_page_intc_exc(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + struct trap_pt_regs *trap = regs->trap; + bool nonpaging = !is_paging(vcpu); + + KVM_BUG_ON(!nonpaging); + + if (trap->nr_TIRs < 1) { + pr_info("KVM ERROR: intercepted data page without TIR\n"); + do_exit(SIGKILL); + } + + if (!trap->tc_called) { + trap->nr_page_fault_exc = exc_data_page_num; + trap->is_intc = true; + do_trap_cellar(regs, 1); + do_trap_cellar(regs, 0); + trap->tc_called = 1; + } + return 0; +} + +static int do_tlb_line_flush_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + mmu_addr_t mmu_addr; + flush_addr_t flush_addr; + gva_t gva; + + mmu_addr = intc_info_mu->gva; + KVM_BUG_ON(flush_op_get_type(mmu_addr) != _FLUSH_TLB_PAGE_SYS_OP); + + flush_addr = intc_info_mu->data; + + /* implemented only for current guest process */ + KVM_BUG_ON(flush_addr_get_pid(flush_addr) != read_SH_PID_reg()); + + gva = FLUSH_VADDR_TO_VA(flush_addr); + if (!!(flush_addr & _FLUSH_ADDR_PHYS)) { + gva = gfn_to_gpa(gva); + } + + kvm_mmu_flush_gva(vcpu, gva); + + return 0; +} + +int do_hret_last_wish_intc(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + struct trap_pt_regs *trap = regs->trap; + unsigned long flags; + bool was_trap_wish = false; + + if (DEBUG_LWISH_TIRs_MODE && + trap->nr_TIRs >= 0 && !AW(trap->TIRs[0].TIR_hi)) { + pr_err("%s(): empty TIR[0] in HRET last wish intercept\n", + __func__); + print_all_TIRs(trap->TIRs, trap->nr_TIRs); + } + + if (vcpu->arch.vm_exit_wish) { + DebugVMEX("intercept to do VM exit, exit reason %d\n", + vcpu->arch.exit_reason); + vcpu->arch.vm_exit_wish = false; + return 1; + } + + if (vcpu->arch.trap_wish) { + DebugVMEX("intercept to inject VM traps\n"); + KVM_WARN_ON(kvm_is_empty_vcpu_intc_TIRs(vcpu)); + regs->traps_to_guest |= vcpu->arch.trap_mask_wish; + vcpu->arch.trap_wish = false; + vcpu->arch.trap_mask_wish = 0; + was_trap_wish = true; + } + + if (!vcpu->arch.virq_wish) { + if (!was_trap_wish) { + pr_err("%s(): unknown reason for HRET last wish " + "intercept\n", + __func__); + } + return 0; + } + + raw_spin_lock_irqsave(&vcpu->kvm->arch.virq_lock, flags); + + if (!kvm_has_virqs_to_guest(vcpu)) { + /* nothing pending VIRQs to pass to guest */ + raw_spin_unlock_irqrestore(&vcpu->kvm->arch.virq_lock, flags); + return 0; + } + + KVM_BUG_ON(!kvm_test_pending_virqs(vcpu)); + + raw_spin_unlock_irqrestore(&vcpu->kvm->arch.virq_lock, flags); + + /* trap is only to inject interrupt to guest */ + if (vcpu->arch.is_hv) { + if (!(vcpu->arch.intc_ctxt.exceptions & exc_interrupt_mask)) { + kvm_need_create_vcpu_exception(vcpu, + exc_interrupt_mask); + } + vcpu->arch.virq_wish = false; + } else if (vcpu->arch.is_pv) { + kvm_inject_interrupt(vcpu, regs); + vcpu->arch.virq_wish = false; + } else { + /* guest traps are handled by host at first */ + /* and host only pass guest traps to guest */ + kvm_need_create_vcpu_exception(vcpu, exc_last_wish_mask); + } + return 0; +} + +static int write_trap_point_mmu_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + gpa_t tc_gpa; + hpa_t tc_hpa; + int ret; + + tc_gpa = intc_info_mu->data; + ret = vcpu_write_trap_point_mmu_reg(vcpu, tc_gpa, &tc_hpa); + if (ret != 0) + return ret; + + /* set system physical address of guest trap cellar to recover */ + /* intercepted writing to MMU register 'TRAP_POINT' */ + kvm_set_intc_info_mu_modified_data(intc_info_mu, tc_hpa, 0); + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int write_mmu_cr_reg(struct kvm_vcpu *vcpu, intc_info_mu_t *intc_info_mu) +{ + return vcpu_write_mmu_cr_reg(vcpu, intc_info_mu->data); +} + +static int write_mmu_u_pptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + pgprotval_t u_pptb; + hpa_t u_root; + bool pt_updated = false; + int r; + + u_pptb = intc_info_mu->data; + r = vcpu_write_mmu_u_pptb_reg(vcpu, u_pptb, &pt_updated, &u_root); + if (r != 0) + return r; + + if (pt_updated) + kvm_set_intc_info_mu_modified_data(intc_info_mu, u_root, 0); + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int write_mmu_os_pptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + pgprotval_t os_pptb; + hpa_t os_root; + bool pt_updated = false; + int r; + + os_pptb = intc_info_mu->data; + r = vcpu_write_mmu_os_pptb_reg(vcpu, os_pptb, &pt_updated, &os_root); + if (r != 0) + return r; + + if (pt_updated) + kvm_set_intc_info_mu_modified_data(intc_info_mu, os_root, 0); + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int write_mmu_u_vptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + return vcpu_write_mmu_u_vptb_reg(vcpu, intc_info_mu->data); +} + +static int write_mmu_os_vptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + return vcpu_write_mmu_os_vptb_reg(vcpu, intc_info_mu->data); +} + +static int write_mmu_os_vab_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + return vcpu_write_mmu_os_vab_reg(vcpu, intc_info_mu->data); +} + +static int write_mmu_pid_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + return vcpu_write_mmu_pid_reg(vcpu, intc_info_mu->data); +} + +static int write_mmu_ss_ptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, int mmu_reg_no) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t mmu_reg, old_mmu_reg; + const char *reg_name; + + BUG_ON(!is_tdp_paging(vcpu)); + + mmu_reg = intc_info_mu->data; + switch (mmu_reg_no) { + case _MMU_U2_PPTB_NO: + old_mmu_reg = mmu->u2_pptb; + reg_name = "U2_PPTB"; + break; + case _MMU_MPT_B_NO: + old_mmu_reg = mmu->mpt_b; + reg_name = "MPT_B"; + break; + case _MMU_PDPTE0_NO: + old_mmu_reg = mmu->pdptes[0]; + reg_name = "PDPTE0"; + break; + case _MMU_PDPTE1_NO: + old_mmu_reg = mmu->pdptes[1]; + reg_name = "PDPTE1"; + break; + case _MMU_PDPTE2_NO: + old_mmu_reg = mmu->pdptes[2]; + reg_name = "PDPTE2"; + break; + case _MMU_PDPTE3_NO: + old_mmu_reg = mmu->pdptes[3]; + reg_name = "PDPTE3"; + break; + default: + BUG_ON(true); + } + if (old_mmu_reg == mmu_reg) { + /* the same registers state, so nothing to do */ + DebugMMUSSREG("guest MMU %s: write the same value 0x%llx\n", + reg_name, mmu_reg); + return 0; + } + + /* Only save new MMU register value, probably will be need */ + switch (mmu_reg_no) { + case _MMU_U2_PPTB_NO: + mmu->u2_pptb = mmu_reg; + break; + case _MMU_MPT_B_NO: + mmu->mpt_b = mmu_reg; + break; + case _MMU_PDPTE0_NO: + mmu->pdptes[0] = mmu_reg; + break; + case _MMU_PDPTE1_NO: + mmu->pdptes[1] = mmu_reg; + break; + case _MMU_PDPTE2_NO: + mmu->pdptes[2] = mmu_reg; + break; + case _MMU_PDPTE3_NO: + mmu->pdptes[3] = mmu_reg; + break; + default: + BUG_ON(true); + } + DebugMMUSSREG("guest MMU %s: write the new value 0x%llx\n", + reg_name, mmu_reg); + + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int write_mmu_ss_pid_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t pid; + + pid = intc_info_mu->data; + if (mmu->pid2 != pid) { + /* only remember secondary space PID */ + mmu->pid2 = pid; + DebugMMUSSREG("Set MMU guest secondary space new PID: 0x%llx\n", + pid); + } else { + DebugMMUSSREG("MMU guest secondary space is not changed " + "PID: 0x%llx\n", pid); + return 0; + } + + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_trap_point_mmu_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + gpa_t tc_gpa; + int r; + + r = vcpu_read_trap_point_mmu_reg(vcpu, &tc_gpa); + if (r != 0) + return r; + + intc_info_mu->data = tc_gpa; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_mmu_cr_reg(struct kvm_vcpu *vcpu, intc_info_mu_t *intc_info_mu) +{ + mmu_reg_t mmu_cr; + int r; + + r = vcpu_read_mmu_cr_reg(vcpu, &mmu_cr); + if (r != 0) + return r; + + intc_info_mu->data = mmu_cr; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_mmu_u_pptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + pgprotval_t u_pptb; + int r; + + r = vcpu_read_mmu_u_pptb_reg(vcpu, &u_pptb); + if (r != 0) + return r; + + intc_info_mu->data = u_pptb; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_mmu_os_pptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + pgprotval_t os_pptb; + int r; + + r = vcpu_read_mmu_os_pptb_reg(vcpu, &os_pptb); + if (r != 0) + return r; + + intc_info_mu->data = os_pptb; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_mmu_ss_ptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, int mmu_reg_no) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + mmu_reg_t mmu_reg; + const char *reg_name; + + BUG_ON(!is_tdp_paging(vcpu)); + + switch (mmu_reg_no) { + case _MMU_U2_PPTB_NO: + mmu_reg = mmu->u2_pptb; + reg_name = "U2_PPTB"; + break; + case _MMU_MPT_B_NO: + mmu_reg = mmu->mpt_b; + reg_name = "MPT_B"; + break; + case _MMU_PDPTE0_NO: + mmu_reg = mmu->pdptes[0]; + reg_name = "PDPTE0"; + break; + case _MMU_PDPTE1_NO: + mmu_reg = mmu->pdptes[1]; + reg_name = "PDPTE1"; + break; + case _MMU_PDPTE2_NO: + mmu_reg = mmu->pdptes[2]; + reg_name = "PDPTE2"; + break; + case _MMU_PDPTE3_NO: + mmu_reg = mmu->pdptes[3]; + reg_name = "PDPTE3"; + break; + case _MMU_PID2_NO: + mmu_reg = mmu->pid2; + reg_name = "PID2"; + break; + default: + BUG_ON(true); + } + + intc_info_mu->data = mmu_reg; + kvm_set_intc_info_mu_is_updated(vcpu); + + DebugMMUSSREG("guest MMU %s: read the value 0x%llx\n", + reg_name, mmu_reg); + + return 0; +} + +static int read_mmu_u_vptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + gva_t u_vptb; + int r; + + r = vcpu_read_mmu_u_vptb_reg(vcpu, &u_vptb); + if (r != 0) + return r; + + intc_info_mu->data = u_vptb; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_mmu_os_vptb_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + gva_t os_vptb; + int r; + + r = vcpu_read_mmu_os_vptb_reg(vcpu, &os_vptb); + if (r != 0) + return r; + + intc_info_mu->data = os_vptb; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_mmu_os_vab_reg(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + gva_t os_vab; + int r; + + r = vcpu_read_mmu_os_vab_reg(vcpu, &os_vab); + if (r != 0) + return r; + + intc_info_mu->data = os_vab; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int do_write_mmu_reg_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + mmu_addr_t mmu_reg_addr; + int mmu_reg_no; + int ret; + + KVM_BUG_ON(intc_info_mu->hdr.event_code != IME_WRITE_MU); + + mmu_reg_addr = intc_info_mu->gva; + mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_reg_addr); + switch (mmu_reg_no) { + case _MMU_TRAP_POINT_NO: + ret = write_trap_point_mmu_reg(vcpu, intc_info_mu); + break; + case _MMU_CR_NO: + ret = write_mmu_cr_reg(vcpu, intc_info_mu); + break; + case _MMU_U_PPTB_NO: + ret = write_mmu_u_pptb_reg(vcpu, intc_info_mu); + break; + case _MMU_OS_PPTB_NO: + ret = write_mmu_os_pptb_reg(vcpu, intc_info_mu); + break; + case _MMU_U2_PPTB_NO: + case _MMU_MPT_B_NO: + case _MMU_PDPTE0_NO: + case _MMU_PDPTE1_NO: + case _MMU_PDPTE2_NO: + case _MMU_PDPTE3_NO: + ret = write_mmu_ss_ptb_reg(vcpu, intc_info_mu, mmu_reg_no); + break; + case _MMU_U_VPTB_NO: + ret = write_mmu_u_vptb_reg(vcpu, intc_info_mu); + break; + case _MMU_OS_VPTB_NO: + ret = write_mmu_os_vptb_reg(vcpu, intc_info_mu); + break; + case _MMU_OS_VAB_NO: + ret = write_mmu_os_vab_reg(vcpu, intc_info_mu); + break; + case _MMU_PID_NO: + ret = write_mmu_pid_reg(vcpu, intc_info_mu); + break; + case _MMU_PID2_NO: + ret = write_mmu_ss_pid_reg(vcpu, intc_info_mu); + break; + default: + pr_err("%s(): unimplemented MMU register #%d intercept\n", + __func__, mmu_reg_no); + ret = -ENOSYS; + break; + } + + return ret; +} + +static int read_mmu_reg_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + mmu_addr_t mmu_reg_addr; + int mmu_reg_no; + int ret; + + mmu_reg_addr = intc_info_mu->gva; + mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_reg_addr); + switch (mmu_reg_no) { + case _MMU_TRAP_POINT_NO: + ret = read_trap_point_mmu_reg(vcpu, intc_info_mu); + break; + case _MMU_CR_NO: + ret = read_mmu_cr_reg(vcpu, intc_info_mu); + break; + case _MMU_U_PPTB_NO: + ret = read_mmu_u_pptb_reg(vcpu, intc_info_mu); + break; + case _MMU_OS_PPTB_NO: + ret = read_mmu_os_pptb_reg(vcpu, intc_info_mu); + break; + case _MMU_U2_PPTB_NO: + case _MMU_MPT_B_NO: + case _MMU_PDPTE0_NO: + case _MMU_PDPTE1_NO: + case _MMU_PDPTE2_NO: + case _MMU_PDPTE3_NO: + case _MMU_PID2_NO: + ret = read_mmu_ss_ptb_reg(vcpu, intc_info_mu, mmu_reg_no); + break; + case _MMU_U_VPTB_NO: + ret = read_mmu_u_vptb_reg(vcpu, intc_info_mu); + break; + case _MMU_OS_VPTB_NO: + ret = read_mmu_os_vptb_reg(vcpu, intc_info_mu); + break; + case _MMU_OS_VAB_NO: + ret = read_mmu_os_vab_reg(vcpu, intc_info_mu); + break; + default: + pr_err("%s(): unimplemented MMU register #%d intercept\n", + __func__, mmu_reg_no); + ret = -ENOSYS; + break; + } + + return ret; +} + +static int read_dtlb_reg_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + tlb_addr_t tlb_addr; + mmu_reg_t tlb_entry; + + tlb_addr = intc_info_mu->gva; + tlb_entry = NATIVE_READ_DTLB_REG(tlb_addr); + + /* FIXME: here should be conversion from native DTLB entry structure */ + /* to guest arch one, but such readings are used only for debug info */ + /* dumping. */ + intc_info_mu->data = tlb_entry; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static void check_virt_ctrl_mu_rr_dbg1(void) +{ + virt_ctrl_mu_t reg = read_VIRT_CTRL_MU_reg(); + + if (!reg.rr_dbg1) + pr_err("%s(): intercepted MLT/DAM read with disabled rr_dbg1\n", + __func__); +} + +static int read_dam_reg_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + e2k_addr_t dam_addr; + u64 dam_entry; + + dam_addr = intc_info_mu->gva; + dam_entry = NATIVE_READ_DAM_REG(dam_addr); + + intc_info_mu->data = dam_entry; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int read_mlt_reg_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + e2k_addr_t mlt_addr; + u64 mlt_entry; + + mlt_addr = intc_info_mu->gva; + mlt_entry = NATIVE_READ_MLT_REG(mlt_addr); + + intc_info_mu->data = mlt_entry; + kvm_set_intc_info_mu_is_updated(vcpu); + + return 0; +} + +static int do_read_mmu_intc_mu(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs) +{ + tc_cond_t cond; + unsigned int mas; + int ret; + + KVM_BUG_ON(intc_info_mu->hdr.event_code != IME_READ_MU); + + cond = intc_info_mu->condition; + mas = AS(cond).mas; + if (mas == MAS_MMU_REG) { + return read_mmu_reg_intc_mu(vcpu, intc_info_mu, regs); + } else if (mas == MAS_DTLB_REG) { + return read_dtlb_reg_intc_mu(vcpu, intc_info_mu, regs); + } else if (mas == MAS_DAM_REG) { + check_virt_ctrl_mu_rr_dbg1(); + + if ((intc_info_mu->gva & REG_DAM_TYPE) == REG_DAM_TYPE) { + return read_dam_reg_intc_mu(vcpu, intc_info_mu, regs); + } else if ((intc_info_mu->gva & REG_MLT_TYPE) == REG_MLT_TYPE) { + return read_mlt_reg_intc_mu(vcpu, intc_info_mu, regs); + } else { + pr_err("%s(): not implemented special MMU or AAU " + "operation type, mas.mod %d, mas.opc %d, " + "addr 0x%lx\n", + __func__, + (mas & MAS_MOD_MASK) >> MAS_MOD_SHIFT, + (mas & MAS_OPC_MASK) >> MAS_OPC_SHIFT, + intc_info_mu->gva); + ret = -EINVAL; + } + } else { + pr_err("%s(): not implemented special MMU or AAU operation " + "type, mas.mod %d, mac.opc %d\n", + __func__, + (mas & MAS_MOD_MASK) >> MAS_MOD_SHIFT, + (mas & MAS_OPC_MASK) >> MAS_OPC_SHIFT); + ret = -EINVAL; + } + + return ret; +} + +static int handle_exc_interrupt_intc(struct kvm_vcpu *vcpu, + struct pt_regs *regs, unsigned long exc_mask) +{ + struct trap_pt_regs *trap = regs->trap; + unsigned long exc; + + DebugINTR("intercept on interrupt exception\n"); + + trap->TIR_lo = trap->TIRs[0].TIR_lo.TIR_lo_reg; + trap->TIR_hi = trap->TIRs[0].TIR_hi.TIR_hi_reg; + exc = trap->TIRs[0].TIR_hi.TIR_hi_exc; + if (unlikely((exc & exc_mask) != exc_mask)) { + pr_err("%s(): intercept on interrupt exception 0x%016lx, " + "but TIR[0].exc 0x%016lx has not interrupt\n", + __func__, exc_mask, exc); + } + local_irq_disable(); + if (exc_mask & exc_nm_interrupt_mask) + do_nm_interrupt(regs); + if (exc_mask & exc_interrupt_mask) + native_do_interrupt(regs); + local_irq_enable(); + + return 0; +} + +static int do_exc_interrupt_intc(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + return handle_exc_interrupt_intc(vcpu, regs, exc_interrupt_mask); +} + +static int do_exc_nm_interrupt_intc(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + return handle_exc_interrupt_intc(vcpu, regs, exc_nm_interrupt_mask); +} + +void mmu_init_nonpaging_intc(struct kvm_vcpu *vcpu) +{ + kvm_set_cond_exc_handler(INTC_CU_EXC_INSTR_PAGE_NO, + (exc_intc_handler_t)(do_nonp_instr_page_intc_exc)); +} + +void kvm_init_kernel_intc(struct kvm_vcpu *vcpu) +{ + kvm_set_cond_exc_handler(INTC_CU_EXC_INSTR_PAGE_NO, + (exc_intc_handler_t)(do_instr_page_intc_exc)); + kvm_set_cond_exc_handler(INTC_CU_EXC_INTERRUPT_NO, + (exc_intc_handler_t)(do_exc_interrupt_intc)); + kvm_set_cond_exc_handler(INTC_CU_EXC_NM_INTERRUPT_NO, + (exc_intc_handler_t)(do_exc_nm_interrupt_intc)); +} + +e2k_idr_t kvm_vcpu_get_idr(struct kvm_vcpu *vcpu) +{ + kvm_guest_info_t *guest_info = &vcpu->kvm->arch.guest_info; + e2k_idr_t idr = read_IDR_reg(); + + if (guest_info->is_stranger) { + /* update IDR in accordance with guest machine CPUs type */ + idr.IDR_mdl = guest_info->cpu_mdl; + idr.IDR_rev = guest_info->cpu_rev; + idr.IDR_ms_core = vcpu->vcpu_id; + idr.IDR_ms_pn = 0; /* FIXME: is not implemented NUMA node id */ + if (unlikely(guest_info->cpu_iset < E2K_ISET_V3)) { + /* set IDR.hw_virt to mark guest mode because of */ + /* CPUs of iset V2 have not CORE_MODE register */ + idr.IDR_ms_hw_virt = vcpu->kvm->arch.is_hv; + } + + DebugCUREG("guest IDR was changed: 0x%llx\n", idr.IDR_reg); + } + + return idr; +} + +static void print_intc_ctxt(struct kvm_vcpu *vcpu) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + int cu_num = intc_ctxt->cu_num, mu_num = intc_ctxt->mu_num; + intc_info_mu_t *mu = intc_ctxt->mu; + int evn_no; + + pr_alert("Dumping intercept context on CPU %d VCPU %d. cu_num %d, mu_num %d\n", + vcpu->cpu, vcpu->vcpu_id, cu_num, mu_num); + pr_alert("CU header: lo 0x%llx; hi 0x%llx\n", + AW(intc_ctxt->cu.header.lo), AW(intc_ctxt->cu.header.hi)); + pr_alert("CU entry0: lo 0x%llx; hi 0x%llx\n", + AW(intc_ctxt->cu.entry[0].lo), intc_ctxt->cu.entry[0].hi); + pr_alert("CU entry1: lo 0x%llx; hi 0x%llx\n", + AW(intc_ctxt->cu.entry[1].lo), intc_ctxt->cu.entry[1].hi); + + for (evn_no = 0; evn_no < mu_num; evn_no++) { + intc_info_mu_t *mu_event = &mu[evn_no]; + int event = mu_event->hdr.event_code; + + pr_alert("MU entry %d: code %d %s\n", evn_no, event, kvm_get_mu_event_name(event)); + pr_alert("hdr 0x%llx gpa 0x%lx gva 0x%lx data 0x%lx\n", + mu_event->hdr.word, mu_event->gpa, mu_event->gva, mu_event->data); + pr_alert("condition 0x%llx data_ext 0x%lx mask 0x%llx\n", + mu_event->condition.word, mu_event->data_ext, mu_event->mask.word); + } + + print_all_TIRs(intc_ctxt->TIRs, intc_ctxt->nr_TIRs); +} + +static int do_read_cu_idr(struct kvm_vcpu *vcpu, intc_info_cu_t *cu) +{ + kvm_guest_info_t *guest_info = &vcpu->kvm->arch.guest_info; + intc_info_cu_entry_t *rr_event; + e2k_idr_t idr; + + rr_event = find_cu_info_entry(vcpu, cu, ICE_READ_CU, IDR_cu_reg_no); + if (rr_event == NULL) { + pr_err("%s(): could not find INTC_INFO_CU event with IDR\n", __func__); + print_intc_ctxt(vcpu); + KVM_BUG_ON(true); + return -EINVAL; + } + + idr = kvm_vcpu_get_idr(vcpu); + + if (guest_info->is_stranger) { + rr_event->hi = idr.IDR_reg; + kvm_set_intc_info_cu_is_updated(vcpu); + } + + return 0; +} + +static int read_reg_intc_cu(struct kvm_vcpu *vcpu, + intc_info_cu_t *intc_info_cu, pt_regs_t *regs) +{ + u64 rr_events = intc_info_cu->header.lo.evn_c; + int ret = 0, r; + + if (rr_events & intc_cu_evn_c_rr_idr_mask) { + r = do_read_cu_idr(vcpu, intc_info_cu); + if (r != 0) + ret |= r; + rr_events &= ~intc_cu_evn_c_rr_idr_mask; + } + + if (rr_events != 0) { + pr_err("%s(): some events were not handled: 0x%llx\n", + __func__, rr_events); + } + + return ret; +} + +static void do_write_cu_sclk_reg(struct kvm_vcpu *vcpu, intc_info_cu_t *cu, + cu_reg_no_t reg_no) +{ + intc_info_cu_entry_t *rw_event; + + rw_event = find_cu_info_entry(vcpu, cu, ICE_WRITE_CU, reg_no); + if (rw_event) + kvm_delete_intc_info_cu(vcpu, rw_event); +} + +static int do_write_cu_sclk_regs(struct kvm_vcpu *vcpu, intc_info_cu_t *cu) +{ + do_write_cu_sclk_reg(vcpu, cu, SCLKR_cu_reg_no); + do_write_cu_sclk_reg(vcpu, cu, SCLKM1_cu_reg_no); + do_write_cu_sclk_reg(vcpu, cu, SCLKM2_cu_reg_no); + + return 0; +} + +static int do_write_cu_sclkm3(struct kvm_vcpu *vcpu, intc_info_cu_t *cu) +{ + struct kvm_arch *ka = &vcpu->kvm->arch; + unsigned long flags; + + do_write_cu_sclk_reg(vcpu, cu, SCLKM3_cu_reg_no); + + if (cpu_has(CPU_HWBUG_VIRT_SCLKM3_INTC)) { + raw_spin_lock_irqsave(&ka->sh_sclkr_lock, flags); + WRITE_SH_SCLKM3_REG_VALUE(ka->sh_sclkm3); + raw_spin_unlock_irqrestore(&ka->sh_sclkr_lock, flags); + } + + return 0; +} + +/* Bug 127993: to ignore guest's CU write, delete it from INTC_INFO_CU */ +static int write_reg_intc_cu(struct kvm_vcpu *vcpu, + intc_info_cu_t *intc_info_cu, pt_regs_t *regs) +{ + u64 rw_events = intc_info_cu->header.lo.evn_c; + int ret = 0, r; + + /* Ignore guest's writes to sclkr, sclkm1, sclkm2 */ + if (rw_events & intc_cu_evn_c_rw_sclkr_mask) { + r = do_write_cu_sclk_regs(vcpu, intc_info_cu); + if (r != 0) + ret |= r; + rw_events &= ~intc_cu_evn_c_rw_sclkr_mask; + } + + /* Ignore guest's writes to sclkm3 */ + if (rw_events & intc_cu_evn_c_rw_sclkm3_mask) { + r = do_write_cu_sclkm3(vcpu, intc_info_cu); + if (r != 0) + ret |= r; + rw_events &= ~intc_cu_evn_c_rw_sclkm3_mask; + } + + if (rw_events != 0) { + pr_err("%s(): some events were not handled: 0x%llx\n", + __func__, rw_events); + } + + return ret; +} + +static int handle_cu_cond_events(struct kvm_vcpu *vcpu, + intc_info_cu_t *cu, pt_regs_t *regs) +{ + u64 cond_events = cu->header.lo.evn_c; + int r, ret = 0; + + if (cond_events & intc_cu_evn_c_rr_mask) { + r = read_reg_intc_cu(vcpu, cu, regs); + if (r != 0) + ret |= r; + cond_events &= ~intc_cu_hrd_lo_rr_mask; + } + if (cond_events & intc_cu_evn_c_rw_mask) { + r = write_reg_intc_cu(vcpu, cu, regs); + if (r != 0) + ret |= r; + cond_events &= ~intc_cu_hrd_lo_rw_mask; + } + if (cond_events & intc_cu_evn_c_hret_last_wish_mask) { + r = do_hret_last_wish_intc(vcpu, regs); + if (r < 0) { + pr_err("%s(): conditional event HRET last wish " + "intercept handler failed, error %d\n", + __func__, r); + KVM_BUG_ON(true); + } else if (r != 0) { + /* it need return to user space to handle */ + /* intercept (exit) reason */ + ret |= 1; + } + cond_events &= ~intc_cu_evn_c_hret_last_wish_mask; + } + if (cond_events & intc_cu_evn_c_virt_mask) { + /* FIXME: simulator bug: unexpected resources can cause */ + /* intercept "virtualization registers/commands access" */ + pr_err("%s(): unexpected intercept on virtualization " + "resources access, ignore\n", + __func__); + cond_events &= ~intc_cu_evn_c_virt_mask; + } + if (cond_events == 0) + return ret; + + panic("%s(): is not yet implemented, events: 0x%llx\n", + __func__, cond_events); + return ret; +} + +static int wait_trap_intc_cu(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + /* Go to scheduler to wait for a wake up event. */ + DebugWTR("VCPU #%d interception on wait trap, block and wait for wake up\n", + vcpu->vcpu_id); + vcpu->arch.mp_state = KVM_MP_STATE_HALTED; + kvm_vcpu_block(vcpu); + kvm_check_request(KVM_REQ_UNHALT, vcpu); + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + vcpu->arch.unhalted = false; + DebugWTR("VCPU #%d has been woken up, so run guest again\n", vcpu->vcpu_id); + + return 0; +} + +static int handle_cu_uncond_events(struct kvm_vcpu *vcpu, + intc_info_cu_t *cu, pt_regs_t *regs) +{ + u64 uncond_evn = cu->header.lo.evn_u; + + if ((uncond_evn & intc_cu_evn_u_hv_int_mask) || + (uncond_evn & intc_cu_evn_u_hv_nm_int_mask)) { + /* should be already handled, so ignore here */ + uncond_evn &= ~(intc_cu_evn_u_hv_int_mask | + intc_cu_evn_u_hv_nm_int_mask); + } + if (uncond_evn & intc_cu_evn_u_wait_trap_mask) { + uncond_evn &= ~intc_cu_evn_u_wait_trap_mask; + wait_trap_intc_cu(vcpu, regs); + } + if (uncond_evn & intc_cu_evn_u_dbg_mask) { + /* May be sent by: + * - simulator, with -bI option + * - JTAG, when manually switching to hypervisor mode after + * stop_hard in guest + */ + uncond_evn &= ~intc_cu_evn_u_dbg_mask; + coredump_in_future(); + } + if (uncond_evn != 0) { + pr_err("%s(): is not yet implemented, events: 0x%llx\n", + __func__, uncond_evn); + } + + return 0; +} + +static intc_info_cu_entry_t *find_cu_info_entry(struct kvm_vcpu *vcpu, + intc_info_cu_t *cu, + info_cu_event_code_t code, + cu_reg_no_t reg_no) +{ + u64 entry_handled = vcpu->arch.intc_ctxt.cu_entry_handled; + unsigned cu_num = vcpu->arch.intc_ctxt.cu_num; + int no; + + /* all info intries should be already handled */ + for (no = 0; no < cu_num; no++) { + intc_info_cu_entry_t *cu_entry = &cu->entry[no]; + int event = cu_entry->lo.event_code; + u64 mask = (1ULL << no); + + if (event != code) { + continue; + } + if (code == ICE_READ_CU || code == ICE_WRITE_CU) { + if (cu_entry->lo.reg_num != reg_no) { + continue; + } + } + if (entry_handled & mask) { + /* entry was already handled */ + pr_err("%s(): event #%d code %d was already handled\n", + __func__, no, event); + } + return cu_entry; + } + DebugCUREG("%s(): could not found entry: event code %d\n", + __func__, code); + return NULL; +} + +static void check_cu_info_entries(struct kvm_vcpu *vcpu, + intc_info_cu_t *cu, int entries_num) +{ + u64 entry_handled = vcpu->arch.intc_ctxt.cu_entry_handled; + int no; + + /* all info intries should be already handled */ + for (no = 0; no < entries_num; + no += (sizeof(intc_info_cu_entry_t) / sizeof(u64))) { + intc_info_cu_entry_t *cu_entry = &cu->entry[no]; + int event = cu_entry->lo.event_code; + u64 mask = (1ULL << no); + + if (event == ICE_FORCED) { + /* Guest event - leave its handling to guest */ + if (entry_handled & mask) { + DebugCUEN("entry[%d]: has been converted " + "to empty\n", no); + } else { + DebugCUEN("entry[%d]: empty\n", no); + } + continue; + } + if (event == ICE_READ_CU) { + /* read from CPU system register */ + DebugCUEN("entry[%d]: read register #%d, channel #%d, " + "dst 0x%x mask 0x%x\n" + " data: 0x%llx\n", + no, cu_entry->lo.reg_num, cu_entry->lo.ch_code, + cu_entry->lo.dst, cu_entry->lo.vm_dst, + cu_entry->hi); + continue; + } else if (event == ICE_WRITE_CU) { + /* write to CPU system register */ + DebugCUEN("entry[%d]: write register #%d, channel #%d\n" + " data: 0x%llx\n", + no, cu_entry->lo.reg_num, cu_entry->lo.ch_code, + cu_entry->hi); + continue; + } else if (event == ICE_MASKED_HCALL) { + /* hypercall is not alowed */ + DebugCUEN("entry[%d]: masked HCALL\n", no); + continue; + } else { + pr_err("%s(): unknown event code %d at " + "INTC_INFO_CU[%d]\n", + __func__, event, INTC_INFO_CU_HDR_MAX + no); + KVM_BUG_ON(true); + } + if (entry_handled & mask) + /* entry was handled */ + continue; + pr_err("%s(): INTC_INFO_CU[%d] entry was not handled, " + "event code %d\n", + __func__, INTC_INFO_CU_HDR_MAX + no, event); + KVM_WARN_ON(true); + } +} + +static int handle_cu_cond_exceptions(struct kvm_vcpu *vcpu, + intc_info_cu_t *cu, pt_regs_t *regs) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + u64 tir_exc = intc_ctxt->exceptions; + u64 cond_exc = cu->header.lo.exc_c; + u64 cond_evn = cu->header.lo.evn_c; + u64 uncond_evn = cu->header.lo.evn_u; + u64 exc_to_intc; + exc_intc_handler_t handler; + int exc_no; + u64 cond_exc_mask, tir_exc_mask; + int r, ret = 0; + + exc_to_intc = vcpu->arch.hw_ctxt.virt_ctrl_cu.VIRT_CTRL_CU_exc_c; + + for (exc_no = 0; exc_no < INTC_CU_COND_EXC_MAX; exc_no++) { + cond_exc_mask = 1ULL << exc_no; + tir_exc_mask = kvm_cond_exc_no_to_exc_mask(exc_no); + if (likely((cond_exc & cond_exc_mask) == 0)) { + /* intercept of exception did not occur */ + if (tir_exc_mask == 0) + /* it is reserved bit of exc_c field */ + continue; + if (likely((tir_exc & tir_exc_mask) == 0)) + /* exception did not occur too */ + continue; + /* exception occured */ + if (cu->header.lo.tir_fz) + /* exceptions at frozen TIRs, so guest is not */ + /* yet read TIRs to start handling */ + continue; + if (likely((exc_to_intc & cond_exc_mask) == 0)) { + /* exception is not expected and */ + /* intercepted, so it is guest trap, */ + /* pass to guest */ + if (cond_exc == 0 && cond_evn == 0 && + uncond_evn == 0) { + pr_err("%s(): unexpected conditional " + "exception #%d (0x%llx) %s " + "occured, but did not " + "intercepted, expected " + "mask 0x%llx, so it is trap " + "of guest and will be passed " + "to guest\n", + __func__, exc_no, cond_exc_mask, + kvm_cond_exc_no_to_exc_name( + exc_no), + exc_to_intc); + } + kvm_pass_cond_exc_to_vcpu(vcpu, exc_no); + continue; + } + pr_err("%s(): expected conditional exception #%d " + "(0x%llx) %s occured, but did not intercepted, " + "expected mask 0x%llx, so will be passed " + "to guest\n", + __func__, exc_no, cond_exc_mask, + kvm_cond_exc_no_to_exc_name(exc_no), + exc_to_intc); + kvm_pass_cond_exc_to_vcpu(vcpu, exc_no); + continue; + } + + /* intercept on conditional exception occured */ + DebugINTCEXC("INTC CU exception #%d\n", exc_no); + KVM_BUG_ON(tir_exc_mask == 0); + if (unlikely((exc_to_intc & cond_exc_mask) == 0)) { + pr_err("%s(): unexpected intercept of conditional " + "exception #%d (0x%llx), " + "expected mask 0x%llx\n", + __func__, exc_no, cond_exc_mask, exc_to_intc); + if (tir_exc_mask & tir_exc) { + kvm_pass_cond_exc_to_vcpu(vcpu, exc_no); + pr_err("%s(): unexpected exception %s is " + "detected in the TIRs, so will be " + "passed to guest\n", + __func__, + kvm_cond_exc_no_to_exc_name(exc_no)); + } else { + pr_err("%s(): unexpected exception %s is not " + "detected in the TIRs, so will be " + "ignored\n", + __func__, + kvm_cond_exc_no_to_exc_name(exc_no)); + } + continue; + } + if (unlikely((tir_exc_mask & tir_exc) == 0)) { + /* but exception did not occur */ + pr_err("%s(): there is intercept of expected " + "conditional exception #%d (0x%llx) %s, " + "but exception did not occur, " + "mask of all TIRs exceptions 0x%llx, " + "so will be ignored\n", + __func__, exc_no, cond_exc_mask, + kvm_cond_exc_no_to_exc_name(exc_no), + tir_exc); + continue; + } + handler = kvm_get_cond_exc_handler(exc_no); + r = handler(vcpu, regs); + if (r < 0) { + pr_err("%s(): conditional exception #%d %s intercept " + "handler %pF failed, error %d\n", + __func__, exc_no, + kvm_cond_exc_no_to_exc_name(exc_no), + handler, r); + KVM_BUG_ON(true); + } else if (r != 0) { + /* it need return to user space to handle */ + /* intercept (exit) reason */ + ret |= 1; + } + } + return ret; +} + +/* + * The function returns new mask of total exceptions (including AAU) + * at all TIRs + */ +static u64 restore_vcpu_intc_TIRs(struct kvm_vcpu *vcpu, + u64 TIRs_exc, u64 to_pass, u64 to_delete, u64 to_create) +{ + int TIRs_num, TIR_no, last_valid_TIR_no = -1; + e2k_tir_lo_t TIR_lo; + e2k_tir_hi_t TIR_hi; + u64 TIRs_aa, aa_to_pass, aa_to_delete, aa_to_create; + u64 new_TIRs_exc = 0; + u64 new_TIRs_aa = 0; + bool aa_valid; + + TIRs_num = kvm_get_vcpu_intc_TIRs_num(vcpu); + KVM_BUG_ON(TIRs_exc != 0 && TIRs_num < 0); + TIRs_aa = ((e2k_tir_hi_t)TIRs_exc).TIR_hi_aa; + aa_to_pass = ((e2k_tir_hi_t)to_pass).TIR_hi_aa; + aa_to_delete = ((e2k_tir_hi_t)to_delete).TIR_hi_aa; + aa_to_create = ((e2k_tir_hi_t)to_create).TIR_hi_aa; + aa_valid = (TIRs_aa || aa_to_pass || aa_to_delete || aa_to_create); + + for (TIR_no = 0; TIR_no <= TIRs_num; TIR_no++) { + u64 exc, tir_exc, pass, delete, create, new_exc, new_aa; + + TIR_hi = kvm_get_vcpu_intc_TIR_hi(vcpu, TIR_no); + TIR_lo = kvm_get_vcpu_intc_TIR_lo(vcpu, TIR_no); + exc = TIR_hi.TIR_hi_exc; + tir_exc = exc & TIRs_exc; + pass = exc & to_pass; + delete = exc & to_delete; + create = exc & to_create; + new_exc = pass | create; + new_exc |= (tir_exc &~delete); + DebugTIRs("TIR[%d]: source exc 0x%llx. intersections with " + "TIRs 0x%llx pass 0x%llx delete 0x%llx create 0x%llx " + " -> new exc 0x%llx\n", + TIR_no, exc, tir_exc, pass, delete, create, new_exc); + if (aa_valid) { + u64 aa, tir_aa, aa_pass, aa_delete, aa_create; + + aa = TIR_hi.TIR_hi_aa; + tir_aa = aa & TIRs_aa; + aa_pass = aa & aa_to_pass; + aa_delete = aa & aa_to_delete; + aa_create = aa & aa_to_create; + new_aa = aa_pass | aa_create; + new_aa |= (tir_aa &~aa_delete); + DebugTIRs("TIR[%d]: source aa 0x%llx. intersections " + "with TIRs 0x%llx pass 0x%llx delete 0x%llx " + "create 0x%llx -> new aa 0x%llx\n", + TIR_no, aa, tir_aa, aa_pass, aa_delete, + aa_create, new_aa); + } else { + new_aa = 0; + } + TIR_hi.TIR_hi_exc = new_exc; + TIR_hi.TIR_hi_aa = new_aa; + new_TIRs_exc |= new_exc; + new_TIRs_aa |= new_aa; + if (new_exc || new_aa) + last_valid_TIR_no = TIR_no; + kvm_set_vcpu_intc_TIR_hi(vcpu, TIR_no, TIR_hi); + DebugTIRs("TIR[%d].hi: exc 0x%llx alu 0x%x aa 0x%x #%d\n" + "TIR[%d].lo: IP 0x%llx\n", + TIR_no, TIR_hi.TIR_hi_exc, TIR_hi.TIR_hi_al, + TIR_hi.TIR_hi_aa, TIR_hi.TIR_hi_j, + TIR_no, TIR_lo.TIR_lo_ip); + } + + if (last_valid_TIR_no < TIRs_num) + kvm_set_vcpu_intc_TIRs_num(vcpu, last_valid_TIR_no); + + if (kvm_check_is_vcpu_intc_TIRs_empty(vcpu)) { + DebugTIRs("intercept TIRs are empty to pass to guest\n"); + KVM_BUG_ON(new_TIRs_exc || new_TIRs_aa); + return 0; + } else { + DebugTIRs("intercept TIRs of %d num total exc mask 0x%llx, " + "aa 0x%llx will be passed to guest\n", + kvm_get_vcpu_intc_TIRs_num(vcpu), + new_TIRs_exc, new_TIRs_aa); + TIR_hi.TIR_hi_reg = 0; + TIR_hi.TIR_hi_exc = new_TIRs_exc; + TIR_hi.TIR_hi_aa = new_TIRs_aa; + return TIR_hi.TIR_hi_reg; + } +} + + +#ifdef CONFIG_KVM_ASYNC_PF + +/* + * Return event code for given event number + */ +intc_info_mu_event_code_t get_event_code(struct kvm_vcpu *vcpu, int ev_no) +{ + intc_info_mu_t *intc_info_mu = &vcpu->arch.intc_ctxt.mu[ev_no]; + + return intc_info_mu->hdr.event_code; +} + +/* + * intc_mu_record_asynchronous - return true if the record + * in intc_info_mu buffer is asynchronous + * @vcpu: current vcpu descriptor + * @ev_no: index of record in intc_info_mu buffer + */ +bool intc_mu_record_asynchronous(struct kvm_vcpu *vcpu, int ev_no) +{ + intc_info_mu_t *intc_info_mu = &vcpu->arch.intc_ctxt.mu[ev_no]; + tc_cond_t cond = intc_info_mu->condition; + + return is_record_asynchronous(cond); +} + +/* + * is_in_pm returns: + * true if guest was intercepted in kernel mode + * false if guest was intercepted in user mode + */ +static bool is_in_pm(struct pt_regs *regs) +{ + return regs->crs.cr1_lo.CR1_lo_pm; +} + +/* + * Add "dummy" page fault event in guest tcellar + */ +static void add_apf_to_guest_tcellar(struct kvm_vcpu *vcpu) +{ + /* Get pointer to free entries in guest tcellar */ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + int guest_tc_cnt = sw_ctxt->trap_count; + kernel_trap_cellar_t *guest_tc = ((kernel_trap_cellar_t *) + vcpu->arch.mmu.tc_kaddr) + guest_tc_cnt/3; + tc_cond_t condition; + tc_fault_type_t ftype; + + KVM_BUG_ON(guest_tc_cnt % 3); + + AW(condition) = 0; + AW(ftype) = 0; + AS(condition).store = 0; + AS(condition).spec = 0; + AS(condition).fmt = LDST_DWORD_FMT; + AS(condition).fmtc = 0; + AS(ftype).page_miss = 1; + AS(condition).fault_type = AW(ftype); + + guest_tc->condition = condition; + + sw_ctxt->trap_count = guest_tc_cnt + 3; +} + +/* + * Move events which can cause async page fault from intercept buffer + * to guest tcellar. Leave all other events in intercept buffer. + */ +static void kvm_apf_save_and_clear_intc_mu(struct kvm_vcpu *vcpu) +{ + /* Get pointer to intercept buffer */ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + intc_info_mu_t *intc_mu = (intc_info_mu_t *) &vcpu->arch.intc_ctxt.mu; + + /* Get number of entries in guest tcellar */ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + int guest_tc_cnt = sw_ctxt->trap_count; + + KVM_BUG_ON(guest_tc_cnt % 3); + + /* Get pointer to free entries in guest tcellar */ + kernel_trap_cellar_t *guest_tc = ((kernel_trap_cellar_t *) + vcpu->arch.mmu.tc_kaddr) + guest_tc_cnt/3; + kernel_trap_cellar_ext_t *guest_tc_ext = + ((void *) guest_tc) + TC_EXT_OFFSET; + + int e_idx = 0, e_hv_idx = 0, fmt, ev_code; + intc_info_mu_t hv_intc_mu[INTC_INFO_MU_ITEM_MAX]; + intc_info_mu_t *mu_event; + tc_opcode_t opcode; + + + for (e_idx = 0; e_idx < intc_ctxt->mu_num; e_idx++) { + ev_code = get_event_code(vcpu, e_idx); + + if ((ev_code <= IME_GPA_DATA) && + !intc_mu_record_asynchronous(vcpu, e_idx)) { + /* Check guest tcellar capacity */ + KVM_BUG_ON(guest_tc_cnt/3 >= HW_TC_SIZE); + + /* Copy event from intercept buffer to guest tcellar */ + mu_event = &intc_mu[e_idx]; + guest_tc->address = mu_event->gva; + guest_tc->condition = mu_event->condition; + AW(opcode) = AS(mu_event->condition).opcode; + fmt = AS(opcode).fmt; + if (fmt == LDST_QP_FMT) + guest_tc_ext->mask = mu_event->mask; + + if (AS(mu_event->condition).store) { + NATIVE_MOVE_TAGGED_DWORD(&mu_event->data, + &guest_tc->data); + + if (fmt == LDST_QP_FMT) { + NATIVE_MOVE_TAGGED_DWORD( + &mu_event->data_ext, + &guest_tc_ext->data); + } + } + guest_tc++; + guest_tc_ext++; + guest_tc_cnt += 3; + } else { + memcpy(&hv_intc_mu[e_hv_idx], &intc_mu[e_idx], + sizeof(intc_info_mu_t)); + e_hv_idx++; + } + } + + /* Set new number of entries in guest tcellar */ + sw_ctxt->trap_count = guest_tc_cnt; + + /* Clear intercept buffer */ + memset(intc_mu, 0, sizeof(intc_info_mu_t) * intc_ctxt->mu_num); + + /* Write remained events back to intercept buffer */ + memcpy(intc_mu, &hv_intc_mu, sizeof(intc_info_mu_t) * e_hv_idx); + intc_ctxt->mu_num = e_hv_idx; +} + +#endif /* CONFIG_KVM_ASYNC_PF */ + +static u64 inject_new_vcpu_intc_exceptions(struct kvm_vcpu *vcpu, + u64 to_create, pt_regs_t *regs) +{ + u64 created = 0; + + if (to_create & exc_last_wish_mask) { + kvm_inject_last_wish(vcpu, regs); + created |= exc_last_wish_mask; + } + + if (to_create & exc_data_page_mask) { +#ifdef CONFIG_KVM_ASYNC_PF + if (vcpu->arch.apf.enabled && + vcpu->arch.apf.host_apf_reason == + KVM_APF_PAGE_IN_SWAP) { + add_apf_to_guest_tcellar(vcpu); + kvm_apf_save_and_clear_intc_mu(vcpu); + vcpu->arch.apf.host_apf_reason = KVM_APF_NO; + } +#endif /* CONFIG_KVM_ASYNC_PF */ + kvm_inject_data_page_exc(vcpu, regs); + created |= exc_data_page_mask; + } + + if (to_create & exc_instr_page_miss_mask) { + kvm_inject_instr_page_exc(vcpu, regs, exc_instr_page_miss_mask, + vcpu->arch.intc_ctxt.exc_IP_to_create); + created |= exc_instr_page_miss_mask; + } + + if (to_create & exc_instr_page_prot_mask) { + kvm_inject_instr_page_exc(vcpu, regs, exc_instr_page_prot_mask, + vcpu->arch.intc_ctxt.exc_IP_to_create); + created |= exc_instr_page_prot_mask; + } + + if (to_create & exc_ainstr_page_miss_mask) { + kvm_inject_ainstr_page_exc(vcpu, regs, + exc_ainstr_page_miss_mask, + AS(vcpu->arch.intc_ctxt.ctpr2).ta_base); + created |= exc_ainstr_page_miss_mask; + } + + if (to_create & exc_ainstr_page_prot_mask) { + kvm_inject_ainstr_page_exc(vcpu, regs, + exc_ainstr_page_prot_mask, + AS(vcpu->arch.intc_ctxt.ctpr2).ta_base); + created |= exc_ainstr_page_prot_mask; + } + + /* + * Interrupt should be injected last, to the highest non-empty TIR, + * but at least to TIR1 (as exc_data_page may register in TIR1 during + * GLAUNCH) + */ + if (to_create & exc_interrupt_mask) { + kvm_inject_interrupt(vcpu, regs); + created |= exc_interrupt_mask; + } + + if (unlikely(created != to_create)) { + pr_err("%s() could not inject all exceptions, only 0x%llx " + "from 0x%llx -> 0x%llx\n", + __func__, created, to_create, to_create & ~created); + KVM_BUG_ON(true); + } + return created; +} + +static void handle_pending_virqs(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + if (likely(!kvm_test_pending_virqs(vcpu))) { + /* nothing pending VIRQs */ + return; + } + if (!kvm_test_inject_direct_guest_virqs(vcpu, NULL, + vcpu->arch.sw_ctxt.upsr.UPSR_reg, + regs->crs.cr1_lo.CR1_lo_psr)) { + /* there are some VIRQs, but cannot be injected right now */ + return; + } + if (!(vcpu->arch.intc_ctxt.exceptions & exc_interrupt_mask)) { + kvm_need_create_vcpu_exception(vcpu, exc_interrupt_mask); + DebugVIRQs("interrupt is injected on VCPU #%d\n", + vcpu->vcpu_id); + } +} + +static int handle_cu_exceptions(struct kvm_vcpu *vcpu, + intc_info_cu_t *cu, pt_regs_t *regs) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + u64 tir_exc, to_delete, to_create, to_pass; + u64 new_tir_exc; + + handle_pending_virqs(vcpu, regs); + + tir_exc = intc_ctxt->exceptions; + to_delete = intc_ctxt->exc_to_delete; + to_create = intc_ctxt->exc_to_create; + to_pass = intc_ctxt->exc_to_pass; + if (tir_exc == 0 && to_pass == 0 && to_delete == 0 && to_create == 0) { + return 0; + } + if (unlikely((to_pass & tir_exc) != to_pass)) { + pr_err("%s(): not all exceptions to pass 0x%llx are present " + "at TIRs 0x%llx\n", + __func__, to_pass, tir_exc); + KVM_BUG_ON(true); + } + if (unlikely((to_delete & tir_exc) != to_delete)) { + pr_err("%s(): not all exceptions to delete 0x%llx are present " + "at TIRs 0x%llx\n", + __func__, to_delete, tir_exc); + KVM_BUG_ON(true); + } + if (unlikely((to_create & tir_exc) != 0)) { + pr_err("%s(): not all exceptions to create 0x%llx are not " + "already present at TIRs 0x%llx -> 0x%llx\n", + __func__, to_create, tir_exc, to_create & tir_exc); + KVM_BUG_ON(true); + } + if (unlikely((to_pass & to_delete) != 0)) { + pr_err("%s(): exceptions to delete 0x%llx and to pass 0x%llx " + "intersection 0x%llx\n", + __func__, to_delete, to_pass, to_pass & to_delete); + KVM_BUG_ON(true); + } + if (unlikely((to_pass & to_create) != 0)) { + pr_err("%s(): exceptions to create 0x%llx and to pass 0x%llx " + "intersection 0x%llx\n", + __func__, to_create, to_pass, to_pass & to_create); + KVM_BUG_ON(true); + } + if (unlikely((to_delete & to_create) != 0)) { + pr_err("%s(): exceptions to create 0x%llx and to delete 0x%llx " + "intersection 0x%llx\n", + __func__, to_create, to_create, to_create & to_create); + KVM_BUG_ON(true); + } + + new_tir_exc = restore_vcpu_intc_TIRs(vcpu, tir_exc, + to_pass, to_delete, to_create); + if (unlikely((new_tir_exc & to_pass) != to_pass)) { + pr_err("%s(): not all exception to pass 0x%llx " + "were passed 0x%llx, not passed 0x%llx\n", + __func__, to_pass, new_tir_exc, new_tir_exc & to_pass); + KVM_BUG_ON(true); + } + if (unlikely((new_tir_exc & to_delete) != 0)) { + pr_err("%s(): not all exception to delete 0x%llx " + "were deleted 0x%llx, not deleted 0x%llx\n", + __func__, to_delete, new_tir_exc, + new_tir_exc & to_delete); + KVM_BUG_ON(true); + } + to_create &= ~new_tir_exc; + if (to_create != 0) { + u64 created; + + created = inject_new_vcpu_intc_exceptions(vcpu, + to_create, regs); + new_tir_exc |= created; + intc_ctxt->exceptions |= created; + } + DebugTIRs("intercept TIRs of %d num total exc mask 0x%llx, " + "will be passed to guest\n", + kvm_get_vcpu_intc_TIRs_num(vcpu), new_tir_exc); + + return 0; +} + +static int handle_cu_intercepts(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + intc_info_cu_t *cu = &intc_ctxt->cu; + intc_info_cu_hdr_lo_t cu_hdr_lo = cu->header.lo; + u64 exceptions = intc_ctxt->exceptions; + int cu_num = intc_ctxt->cu_num; + int r, ret = 0; + + KVM_BUG_ON(cu_num < 0); + + /* handle intercepts on conditional events */ + if (cu_hdr_lo.evn_c != 0) { + r = handle_cu_cond_events(vcpu, cu, regs); + if (r < 0) { + ret = r; + goto out; + } else if (r == 1) { + /* it need return to user space to continue handling */ + ret |= 1; + } else if (r != 0) { + ret = r; + } + } + + /* handle intercepts on unconditional events */ + if (cu_hdr_lo.evn_u != 0) { + r = handle_cu_uncond_events(vcpu, cu, regs); + if (r != 0) { + ret = r; + goto out; + } + } + + /* handle intercepts on conditional exceptions */ + if (cu_hdr_lo.exc_c != 0 || exceptions != 0) { + r = handle_cu_cond_exceptions(vcpu, cu, regs); + if (r != 0) { + ret = r; + goto out; + } + } + + /* check additional info entries to precise events */ + if (cu_num > 0) { + check_cu_info_entries(vcpu, cu, cu_num); + } + +out: + /* handle guest CU exceptions to pass to guest */ + r = handle_cu_exceptions(vcpu, cu, regs); + if (r != 0) + ret = r; + + return ret; +} + +static int soft_reexecute_mu_one_intercept(struct kvm_vcpu *vcpu, + intc_info_mu_t *mu_event, pt_regs_t *regs) +{ + int event = mu_event->hdr.event_code; + gpa_t gpa; + gva_t address; + tc_cond_t cond; + tc_fault_type_t ftype; + bool nonpaging = !is_paging(vcpu); + int ret; + + gpa = mu_event->gpa; + address = mu_event->gva; + cond = mu_event->condition; + AW(ftype) = AS(cond).fault_type; + + if (unlikely(!nonpaging && !is_shadow_paging(vcpu))) { + pr_err("%s(): MU %s GVA 0x%lx GPA 0x%llx fault type 0x%x " + "cannot be software reexecuted\n", + __func__, kvm_get_mu_event_name(event), + address, gpa, AW(ftype)); + KVM_BUG_ON(true); + } + DebugREEXECMU("INTC MU event code %d %s GVA 0x%lx GPA 0x%llx " + "fault type 0x%x\n", + event, kvm_get_mu_event_name(event), address, gpa, AW(ftype)); + + if (nonpaging) { + /* GPA & GVA should be equal */ + gpa = address; + mu_event->gpa = gpa; + } + + ret = kvm_hv_mmu_page_fault(vcpu, regs, mu_event); + if (ret != PFRES_NO_ERR) { + pr_err("%s(): could not software reexecute %s GVA 0x%lx " + "GPA 0x%llx fault type 0x%x\n", + __func__, kvm_get_mu_event_name(event), + address, gpa, AW(ftype)); + KVM_BUG_ON(true); + } + return 0; +} + +static int soft_reexecute_mu_intercepts(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + intc_info_mu_t *mu = intc_ctxt->mu; + int mu_num = intc_ctxt->mu_num; + unsigned long intc_mu_to_move = intc_ctxt->intc_mu_to_move; + int evn_no; + int reexec_num = 0; + int ret = 0; + + KVM_BUG_ON(mu_num < 0); + + if (intc_mu_to_move == 0) + /* nothing to reexecute */ + return 0; + + for (evn_no = 0; evn_no < mu_num; evn_no++) { + if (intc_mu_to_move & (1UL << evn_no)) + /* intercept should be moved to guest trap cellar */ + /* to handle by guest */ + continue; + + intc_info_mu_t *mu_event = &mu[evn_no]; + int event = mu_event->hdr.event_code; + + DebugREEXECMUV("INTC MU event #%d code %d %s\n", + evn_no, event, kvm_get_mu_event_name(event)); + switch (event) { + case IME_FORCED: + case IME_FORCED_GVA: + /* should be reexecuted */ + break; + case IME_SHADOW_DATA: + case IME_GPA_DATA: + case IME_GPA_INSTR: + case IME_GPA_AINSTR: + /* should be already reexecuted */ + continue; + default: + DebugREEXECMU("event #%d %s should not be software " + "reexecuted\n", + evn_no, kvm_get_mu_event_name(event)); + continue; + } + ret = soft_reexecute_mu_one_intercept(vcpu, mu_event, regs); + if (ret != 0) + break; + reexec_num++; + } + + DebugREEXECMUV("total number of sofware reexecuted requests %d\n", + reexec_num); + + return ret; +} + +static int handle_mu_one_intercept(struct kvm_vcpu *vcpu, + intc_info_mu_t *mu_event, pt_regs_t *regs) +{ + int evn_no = vcpu->arch.intc_ctxt.cur_mu; + int event = mu_event->hdr.event_code; + mu_intc_handler_t handler; + int ret; + + DebugINTCMU("INTC MU event code %d %s\n", + event, kvm_get_mu_event_name(event)); + + KVM_BUG_ON(evn_no < 0 || evn_no >= vcpu->arch.intc_ctxt.mu_num); + + handler = kvm_get_mu_event_handler(event); + + if (handler == NULL) { + DebugINTCMU("event handler is empty, event is ignored\n"); + return 0; + } + + ret = handler(vcpu, mu_event, regs); + if (ret != PFRES_NO_ERR && ret != PFRES_TRY_MMIO) { + pr_err("%s(): could not handle MMU intercept event %d %s\n", + __func__, event, kvm_get_mu_event_name(event)); + if (event == IME_GPA_INSTR && + vcpu->arch.intc_ctxt.mu_num >= 2) { + pr_err("%s(): ignoring guest trap handler preload\n", + __func__); + ret = 0; + } else { + KVM_BUG_ON(true); + } + } + + return ret; +} + +static int try_handle_mu_intercepts(struct kvm_vcpu *vcpu, + pt_regs_t *regs, bool retry) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + intc_info_mu_t *mu = intc_ctxt->mu; + int mu_num = intc_ctxt->mu_num; + int evn_no; + int r, ret = 0; + + for (evn_no = 0; evn_no < mu_num; evn_no++) { + intc_info_mu_t *mu_event = &mu[evn_no]; + int event = mu_event->hdr.event_code; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (unlikely(retry)) { + intc_mu_state_t *event_state; + + event_state = &intc_ctxt->mu_state[evn_no]; + /* probably some MMU events should be retried */ + if (!event_state->may_be_retried) { + /* the MMU event is not retried */ + continue; + } + if (!mmu_notifier_retry(vcpu->kvm, + event_state->notifier_seq)) { + /* MMU event already uptime */ + continue; + } + DebugTRY("retry seq 0x%lx: event #%d code %d : " + "gpa 0x%lx gva 0x%lx data 0x%lx\n", + event_state->notifier_seq, + evn_no, event, mu_event->gpa, + mu_event->gva, mu_event->data); + } +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + intc_ctxt->cur_mu = evn_no; + DebugINTCMU("INTC MU event #%d code %d %s\n", + evn_no, event, kvm_get_mu_event_name(event)); + DebugINTCMU("hdr 0x%llx gpa 0x%lx gva 0x%lx data 0x%lx\n", + mu_event->hdr.word, mu_event->gpa, mu_event->gva, + mu_event->data); + DebugINTCMU("condition 0x%llx data_ext 0x%lx mask 0x%llx\n", + mu_event->condition.word, mu_event->data_ext, + mu_event->mask.word); + r = handle_mu_one_intercept(vcpu, mu_event, regs); + if (r != 0) + ret = r; + } + + return ret; +} + +static int handle_mu_intercepts(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + int mu_num = intc_ctxt->mu_num; + int ret = 0; + int try = 0; + + KVM_BUG_ON(mu_num < 0); + + DebugINTCMU("INTC_INFO_MU total events number %d\n", mu_num); + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + do { + unsigned long mmu_seq; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + smp_rmb(); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + ret = try_handle_mu_intercepts(vcpu, regs, !!(try > 0)); + + if (unlikely(ret != 0)) + goto out; + + ret = soft_reexecute_mu_intercepts(vcpu, regs); + if (unlikely(ret != 0)) + goto out; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + + if (unlikely(!mmu_notifier_retry(vcpu->kvm, mmu_seq))) { + /* nothing to retry page faults */ + break; + } + + mu_num = intc_ctxt->mu_num; + if (unlikely(mu_num <= 0)) { + /* INTC_INFO_MU to reexecute and retry is empty */ + break; + } + + /* host kernel updates some HVA (and probably gfn) mappings */ + /* so probably it need retry some MMU intercepts */ + try++; + DebugTRY("retry #%d seq 0x%lx:0x%lx to rehandle MU %d " + "intercept(s)\n", + try, mmu_seq, vcpu->kvm->mmu_notifier_seq, + mu_num); + + cond_resched(); + } while (mu_num > 0); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + return 0; + +out: + return ret; +} + +/* + * Returns 0 to let vcpu_run() continue the guest execution loop without + * exiting to the userspace. Otherwise, the value will be returned to the + * userspace. + * Each intercept handler should return same as the function + */ +noinline /* So that caller's %psr restoring works as intended */ +int parse_INTC_registers(struct kvm_vcpu_arch *vcpu) +{ + struct pt_regs regs; + struct trap_pt_regs trap; + u64 sbbp[SBBP_ENTRIES_NUM]; + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->hw_ctxt; + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->intc_ctxt; + intc_info_cu_t *cu = &intc_ctxt->cu; + int cu_num = intc_ctxt->cu_num, mu_num = intc_ctxt->mu_num; + e2k_mem_crs_t *frame; + u64 cu_intc; + u64 interrupts; + u64 guest_ip; + u64 cpu = arch_to_vcpu(vcpu)->cpu; + int ret = 0, ret_mu, ret_cu; + int r; + + /* + * We handle interceptions in the following order + * (this is similar to parse_TIR_regsiters() since + * these two functions do roughly the same thing): + * 1) Form pt_regs + * 2) Non-maskable interrupts are handled under closed NMIs + * 3) Open non-maskable interrupts + * 4) Handle maskable interrupts + * 5) Open maskable interrupts + * 6) Handle MU exceptions + * 7) Handle CU exceptions + * 8) Remove pt_regs + */ + + /* + * 1) Form pt_regs - they are used by all of our interrupt + * handlers. Another way is to replace `hw_ctxt'/`sw_ctxt' + * pair in `struct kvm_vcpu_arch' with pt_regs and some new + * virtual_pt_regs structure for all the new registers, but + * then we will lose the division between hardware-switched + * context (hw_ctxt) and software-switched context (sw_ctxt). + */ + + trap.curr_cnt = -1; + trap.ignore_user_tc = 0; + trap.tc_called = 0; + trap.is_intc = false; + trap.from_sigreturn = 0; + trap.tc_count = 0; + trap.flags = 0; + CLEAR_CLW_REQUEST_COUNT(®s); + + memcpy(sbbp, intc_ctxt->sbbp, sizeof(sbbp)); + trap.sbbp = sbbp; + + AW(regs.flags) = 0; + regs.flags.kvm_hw_intercept = 1; + + regs.trap = &trap; + regs.aau_context = &vcpu->sw_ctxt.aau_context; + + hw_ctxt->sh_psp_lo.PSP_lo_half = READ_SH_PSP_LO_REG_VALUE(); + hw_ctxt->sh_psp_hi.PSP_hi_half = READ_SH_PSP_HI_REG_VALUE(); + hw_ctxt->sh_pcsp_lo.PCSP_lo_half = READ_SH_PCSP_LO_REG_VALUE(); + hw_ctxt->sh_pcsp_hi.PCSP_hi_half = READ_SH_PCSP_HI_REG_VALUE(); + + hw_ctxt->bu_psp_lo.PSP_lo_half = READ_BU_PSP_LO_REG_VALUE(); + hw_ctxt->bu_psp_hi.PSP_hi_half = READ_BU_PSP_HI_REG_VALUE(); + hw_ctxt->bu_pcsp_lo.PCSP_lo_half = READ_BU_PCSP_LO_REG_VALUE(); + hw_ctxt->bu_pcsp_hi.PCSP_hi_half = READ_BU_PCSP_HI_REG_VALUE(); + regs.stacks.psp_lo = hw_ctxt->bu_psp_lo; + regs.stacks.psp_hi = hw_ctxt->bu_psp_hi; + regs.stacks.pcsp_lo = hw_ctxt->bu_pcsp_lo; + regs.stacks.pcsp_hi = hw_ctxt->bu_pcsp_hi; + + trap.nr_TIRs = -1; + memset(trap.TIRs, 0, sizeof(trap.TIRs[0])); + if (intc_ctxt->nr_TIRs >= 0) { + trap.nr_TIRs = intc_ctxt->nr_TIRs; + memcpy(trap.TIRs, intc_ctxt->TIRs, + (intc_ctxt->nr_TIRs + 1) * sizeof(trap.TIRs[0])); + } + + /* This makes sure that user_mode(regs) returns true (thus we + * cannot put here real guest SBR since it could be >PAGE_OFFSET). */ + regs.stacks.top = 0; + AW(regs.stacks.usd_lo) = 0; + AW(regs.stacks.usd_hi) = 0; + + /* CR registers are used e.g. in perf to get user IP */ + frame = (e2k_mem_crs_t *) (AS(hw_ctxt->bu_pcsp_lo).base + + AS(hw_ctxt->bu_pcsp_hi).ind); + --frame; + regs.crs = *frame; + guest_ip = AS(regs.crs.cr0_hi).ip << 3; + + /* Intercepted data page, read guest's trap cellar */ + if (cu->header.lo.exc_data_page) + NATIVE_SAVE_TRAP_CELLAR(®s, &trap); + + KVM_BUG_ON(current_thread_info()->pt_regs == NULL); + regs.next = current_thread_info()->pt_regs; + current_thread_info()->pt_regs = ®s; + + /* Trap for bug 130291 */ + if (unlikely(cu->header.lo.rr_idr && !hw_ctxt->virt_ctrl_cu.VIRT_CTRL_CU_rr_idr)) { + print_intc_ctxt(arch_to_vcpu(vcpu)); + KVM_BUG_ON(true); + } + + /* Try more lightweight tracepoints first */ + if (mu_num == 1 && cu_num == -1) + trace_single_mu_intc(intc_ctxt->mu, guest_ip, cpu); + else if (mu_num == 2 && cu_num == -1) + trace_double_mu_intc(intc_ctxt->mu, guest_ip, cpu); + else if (mu_num == -1 && cu_num == 0) + trace_single_cu_intc(intc_ctxt->cu.header, guest_ip, cpu); + else + trace_intc(intc_ctxt, guest_ip, cpu); + + if (trace_intc_tir_enabled()) { + int i; + + for (i = 0; i <= trap.nr_TIRs; i++) + trace_intc_tir(AW(trap.TIRs[i].TIR_lo), + AW(trap.TIRs[i].TIR_hi)); + } + + trace_intc_ctprs(AW(intc_ctxt->ctpr1), AW(intc_ctxt->ctpr1_hi), + AW(intc_ctxt->ctpr2), AW(intc_ctxt->ctpr2_hi), + AW(intc_ctxt->ctpr3), AW(intc_ctxt->ctpr3_hi)); + + if (AW(vcpu->sw_ctxt.aau_context.guest_aasr)) + trace_intc_aau(&vcpu->sw_ctxt.aau_context, + intc_ctxt->lsr, intc_ctxt->lsr1, + intc_ctxt->ilcr, intc_ctxt->ilcr1); + + trace_intc_clw(vcpu->sw_ctxt.us_cl_d, vcpu->sw_ctxt.us_cl_b, vcpu->sw_ctxt.us_cl_up, + vcpu->sw_ctxt.us_cl_m0, vcpu->sw_ctxt.us_cl_m1, + vcpu->sw_ctxt.us_cl_m2, vcpu->sw_ctxt.us_cl_m3); + + intc_ctxt->exc_to_create = 0; + intc_ctxt->exc_to_delete = 0; + intc_ctxt->exc_to_pass = 0; + intc_ctxt->exc_IP_to_create = 0; + intc_ctxt->cu_entry_handled = 0; + + interrupts = 0; + if (cu_num != -1) { + cu_intc = AW(cu->header.lo); + interrupts = cu_intc & (intc_cu_hdr_lo_exc_interrupt_mask | + intc_cu_hdr_lo_exc_nm_interrupt_mask); + cu_intc &= ~(intc_cu_hdr_lo_exc_interrupt_mask | + intc_cu_hdr_lo_exc_nm_interrupt_mask); + AW(cu->header.lo) = cu_intc; + } + +#ifdef CONFIG_KVM_ASYNC_PF + if (vcpu->apf.enabled) + vcpu->apf.in_pm = is_in_pm(®s); +#endif /* CONFIG_KVM_ASYNC_PF */ + + /* + * 2) Handle NMIs + */ + if (unlikely(cu_num != -1 && cu->header.lo.hv_nm_int)) { + do_nm_interrupt(®s); + } else if (interrupts & intc_cu_hdr_lo_exc_nm_interrupt_mask) { + exc_intc_handler_t handler; + + /* Simulator bug: hypervisor interrupts on guest */ + /* do not cause immediate intercept */ + handler = kvm_get_cond_exc_handler(INTC_CU_EXC_NM_INTERRUPT_NO); + r = handler(arch_to_vcpu(vcpu), ®s); + if (r != 0) { + pr_err("%s(): intercept handler %pF failed, " + "error %d\n", + __func__, handler, r); + KVM_BUG_ON(true); + } + if (intc_ctxt->exceptions & exc_nm_interrupt_mask) + kvm_need_delete_vcpu_exception(arch_to_vcpu(vcpu), + exc_nm_interrupt_mask); + } + + /* + * 3) All NMIs have been handled, now we can open them. + * + * SGE was already disabled by hardware on trap enter. + * + * We disable NMI in UPSR here again in case a local_irq_save() + * called from an NMI handler enabled it. + */ + INIT_KERNEL_UPSR_REG(false /* enable IRQs */, + false /* disable NMI */); + SWITCH_IRQ_TO_UPSR(true /* set CR1_LO.psr */); + trace_hardirqs_off(); + + /* + * 4) Handle external interrupts before enabling interrupts + */ + if (cu_num != -1 && cu->header.lo.hv_int) { + native_do_interrupt(®s); + } else if (interrupts & intc_cu_hdr_lo_exc_interrupt_mask) { + exc_intc_handler_t handler; + + /* Simulator bug: hypervisor interrupts on guest */ + /* do not cause immediate intercept */ + handler = kvm_get_cond_exc_handler(INTC_CU_EXC_INTERRUPT_NO); + r = handler(arch_to_vcpu(vcpu), ®s); + if (r != 0) { + pr_err("%s(): intercept handler %pF failed, " + "error %d\n", + __func__, handler, r); + KVM_BUG_ON(true); + } + if (intc_ctxt->exceptions & exc_interrupt_mask) + kvm_need_delete_vcpu_exception(arch_to_vcpu(vcpu), + exc_interrupt_mask); + } + + /* + * 5) Open maskable interrupts + * + * Nasty hack here: we want to make sure that CEPIC_EPIC_INT interrupt + * is always delivered to the current context, otherwise it is very + * hard to handle synchronization. The problem is that a concurrent + * interrupts's handler might do a reschedule here: + * kernel_trap_handler() -> preempt_schedule_irq(). + * So we disable preemption while all interrupts are being handled. + */ + preempt_disable(); + local_irq_enable(); + preempt_enable(); + + /* + * 6) Handle MU exceptions. Currently we handle only those + * with GPA and we do _not_ reexecute them: reexecution is + * done by hardware for all entries in INTC_INFO_MU registers. + */ + if (mu_num > 0) { + ret_mu = handle_mu_intercepts(arch_to_vcpu(vcpu), ®s); + if (ret_mu) + ret = ret_mu; + } + +#ifdef CONFIG_KVM_ASYNC_PF + if (vcpu->apf.enabled) + kvm_check_async_pf_completion(arch_to_vcpu(vcpu)); +#endif /* CONFIG_KVM_ASYNC_PF */ + + /* + * 7) Handle CU interceptions + */ + if (cu_num != -1) + ret_cu = handle_cu_intercepts(arch_to_vcpu(vcpu), ®s); + else + ret_cu = handle_cu_exceptions(arch_to_vcpu(vcpu), cu, ®s); + if (ret == 0) + ret = ret_cu; + + /* + * 8) Remove pt_regs - they are not needed anymore + */ + current_thread_info()->pt_regs = regs.next; + + trace_intc_exit(ret); + + return ret; +} diff --git a/arch/e2k/kvm/intercepts.h b/arch/e2k/kvm/intercepts.h new file mode 100644 index 000000000000..d72045a44f6a --- /dev/null +++ b/arch/e2k/kvm/intercepts.h @@ -0,0 +1,543 @@ +#ifndef __KVM_E2K_INTERCEPTS_H +#define __KVM_E2K_INTERCEPTS_H + +#include +#include +#include +#include + +#include "mmu_defs.h" + +#undef DEBUG_INTC_TIRs_MODE +#undef DebugTIRs +#define DEBUG_INTC_TIRs_MODE 0 /* intercept TIRs debugging */ +#define DebugTIRs(fmt, args...) \ +({ \ + if (DEBUG_INTC_TIRs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* intersepts handlers */ +typedef int (*exc_intc_handler_t)(struct kvm_vcpu *vcpu, pt_regs_t *regs); +typedef int (*mu_intc_handler_t)(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu, pt_regs_t *regs); + +extern int parse_INTC_registers(struct kvm_vcpu_arch *vcpu); + +typedef struct cond_exc_info { + int no; /* relative number at VIRT_CTRL_CU.exc_c & */ + /* INTC_INFO_CU[0].exc_c fields */ + u64 exc_mask; /* mask of absolute numbers of exceptions at */ + /* TIR[].hi.exc field and exc_..._num */ + const char *name; /* exception (trap) name */ +} cond_exc_info_t; + +typedef struct mu_event_desc { + intc_info_mu_event_code_t code; /* event code */ + mu_intc_handler_t handler; /* intercept handler */ + const char *name; /* event name */ +} mu_event_desc_t; + +static inline void +kvm_set_vcpu_intc_TIR_lo(struct kvm_vcpu *vcpu, + int TIR_no, e2k_tir_lo_t TIR_lo) +{ + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->arch.intc_ctxt; + + intc_ctxt->TIRs[TIR_no].TIR_lo = TIR_lo; +} + +static inline void +kvm_set_vcpu_intc_TIR_hi(struct kvm_vcpu *vcpu, + int TIR_no, e2k_tir_hi_t TIR_hi) +{ + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->arch.intc_ctxt; + + intc_ctxt->TIRs[TIR_no].TIR_hi = TIR_hi; +} + +static inline void +kvm_set_vcpu_intc_TIRs_num(struct kvm_vcpu *vcpu, int TIRs_num) +{ + vcpu->arch.intc_ctxt.nr_TIRs = TIRs_num; +} + +static inline void +kvm_clear_vcpu_intc_TIRs_num(struct kvm_vcpu *vcpu) +{ + kvm_set_vcpu_intc_TIRs_num(vcpu, -1); +} + +static inline bool +kvm_is_empty_vcpu_intc_TIRs(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.intc_ctxt.nr_TIRs < 0; +} + +static inline e2k_tir_lo_t +kvm_get_vcpu_intc_TIR_lo(struct kvm_vcpu *vcpu, int TIR_no) +{ + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->arch.intc_ctxt; + e2k_tir_lo_t TIR_lo; + + BUG_ON(TIR_no > kvm_get_vcpu_intc_TIRs_num(vcpu)); + TIR_lo = intc_ctxt->TIRs[TIR_no].TIR_lo; + return TIR_lo; +} + +static inline e2k_tir_hi_t +kvm_get_vcpu_intc_TIR_hi(struct kvm_vcpu *vcpu, int TIR_no) +{ + struct kvm_intc_cpu_context *intc_ctxt = &vcpu->arch.intc_ctxt; + e2k_tir_hi_t TIR_hi; + + BUG_ON(TIR_no > kvm_get_vcpu_intc_TIRs_num(vcpu)); + TIR_hi = intc_ctxt->TIRs[TIR_no].TIR_hi; + return TIR_hi; +} + +static inline void +kvm_update_vcpu_intc_TIR(struct kvm_vcpu *vcpu, + int TIR_no, e2k_tir_hi_t TIR_hi, e2k_tir_lo_t TIR_lo) +{ + e2k_tir_lo_t g_TIR_lo; + e2k_tir_hi_t g_TIR_hi; + int TIRs_num; + int tir; + + TIRs_num = kvm_get_vcpu_intc_TIRs_num(vcpu); + if (TIRs_num < TIR_no) { + for (tir = TIRs_num + 1; tir < TIR_no; tir++) { + g_TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(tir); + g_TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(tir); + kvm_set_vcpu_intc_TIR_lo(vcpu, tir, g_TIR_lo); + kvm_set_vcpu_intc_TIR_hi(vcpu, tir, g_TIR_hi); + } + g_TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(TIR_no); + g_TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(TIR_no); + } else { + g_TIR_hi = kvm_get_vcpu_intc_TIR_hi(vcpu, TIR_no); + g_TIR_lo = kvm_get_vcpu_intc_TIR_lo(vcpu, TIR_no); + BUG_ON(g_TIR_hi.TIR_hi_j != TIR_no); + if (TIR_lo.TIR_lo_ip == 0 && g_TIR_lo.TIR_lo_ip != 0) { + /* some traps can be caused by kernel and have not */ + /* precision IP (for example hardware stack bounds) */ + TIR_lo.TIR_lo_ip = g_TIR_lo.TIR_lo_ip; + } else if (TIR_lo.TIR_lo_ip != 0 && g_TIR_lo.TIR_lo_ip == 0) { + /* new trap IP will be common for other traps */ + ; + } else { + /* guest TIRs have always precision IP */ + ; + } + } + g_TIR_hi.TIR_hi_reg |= TIR_hi.TIR_hi_reg; + g_TIR_lo.TIR_lo_reg = TIR_lo.TIR_lo_reg; + kvm_set_vcpu_intc_TIR_hi(vcpu, TIR_no, g_TIR_hi); + kvm_set_vcpu_intc_TIR_lo(vcpu, TIR_no, g_TIR_lo); + if (TIR_no > TIRs_num) + kvm_set_vcpu_intc_TIRs_num(vcpu, TIR_no); +} + +static inline void +kvm_need_pass_vcpu_exception(struct kvm_vcpu *vcpu, u64 exc_mask) +{ + u64 tir_exc = vcpu->arch.intc_ctxt.exceptions; + + exc_mask &= tir_exc; + KVM_BUG_ON(exc_mask == 0); + vcpu->arch.intc_ctxt.exc_to_pass |= exc_mask; +} + +static inline void +kvm_need_create_vcpu_exception(struct kvm_vcpu *vcpu, u64 exc_mask) +{ + u64 tir_exc = vcpu->arch.intc_ctxt.exceptions; + + exc_mask &= ~tir_exc; + KVM_BUG_ON(exc_mask == 0); + vcpu->arch.intc_ctxt.exc_to_create |= exc_mask; +} + +static inline void +kvm_need_create_vcpu_exc_and_IP(struct kvm_vcpu *vcpu, u64 exc_mask, gva_t IP) +{ + KVM_BUG_ON(vcpu->arch.intc_ctxt.exc_IP_to_create != 0); + kvm_need_create_vcpu_exception(vcpu, exc_mask); + vcpu->arch.intc_ctxt.exc_IP_to_create = IP; +} + +static inline void +kvm_need_delete_vcpu_exception(struct kvm_vcpu *vcpu, u64 exc_mask) +{ + u64 tir_exc = vcpu->arch.intc_ctxt.exceptions; + + exc_mask &= tir_exc; + KVM_BUG_ON(exc_mask == 0); + vcpu->arch.intc_ctxt.exc_to_delete |= exc_mask; +} + +static inline bool +kvm_has_vcpu_exception(struct kvm_vcpu *vcpu, u64 exc_mask) +{ + u64 tir_exc = vcpu->arch.intc_ctxt.exceptions; + + return (exc_mask & tir_exc) != 0; +} + +static inline bool +kvm_has_vcpu_exc_recovery_point(struct kvm_vcpu *vcpu) +{ + return kvm_has_vcpu_exception(vcpu, exc_recovery_point_mask); +} + +/* + * There are TIR_NUM(19) tir regs. Bits 64 - 56 is current tir nr + * After each NATIVE_READ_TIR_LO_REG() we will read next tir. + * For more info see instruction set doc. + * Read tir hi/lo regs order is significant + */ +static inline void +restore_SBBP_TIRs(u64 sbbp[], e2k_tir_t TIRs[], int TIRs_num, + bool tir_fz, bool g_th) +{ + virt_ctrl_cu_t virt_ctrl_cu; + int i; + + virt_ctrl_cu = READ_VIRT_CTRL_CU_REG(); + + /* Allow writing of TIRs and SBBP */ + virt_ctrl_cu.tir_rst = 1; + WRITE_VIRT_CTRL_CU_REG(virt_ctrl_cu); + + for (i = SBBP_ENTRIES_NUM - 1; i >= 0; i--) + NATIVE_WRITE_SBBP_REG_VALUE(sbbp[i]); + +#pragma loop count (2) + for (i = TIRs_num; i >= 0; i--) { + NATIVE_WRITE_TIR_HI_REG_VALUE(AW(TIRs[i].TIR_hi)); + NATIVE_WRITE_TIR_LO_REG_VALUE(AW(TIRs[i].TIR_lo)); + } + + /* Keep guest TIRs frozen after GLAUNCH */ + virt_ctrl_cu.VIRT_CTRL_CU_glnch_tir_fz = tir_fz; + + /* Enter guest trap handler after GLAUNCH */ + virt_ctrl_cu.VIRT_CTRL_CU_glnch_g_th = g_th; + + /* Forbid writing of TIRs and SBBP */ + virt_ctrl_cu.tir_rst = 0; + WRITE_VIRT_CTRL_CU_REG(virt_ctrl_cu); +} + +static inline void kvm_clear_vcpu_trap_cellar(struct kvm_vcpu *vcpu) +{ + void *tc_kaddr = vcpu->arch.mmu.tc_kaddr; + kernel_trap_cellar_t *tc; + + tc = tc_kaddr; + if (tc == NULL) + return; /* trap cellar is not inited by guest */ + + /* MMU TRAP_COUNT cannot be set, so write flag of end of records */ + AW(tc->condition) = -1; + vcpu->arch.mmu.tc_num = 0; + kvm_write_pv_vcpu_mmu_TRAP_COUNT_reg(vcpu, 0 * 3); +} + +/* FIXME: simulator bug: simulator does not reexecute requests */ +/* from INTC_INFO_MU unlike the hardware, so do it by software */ +static inline void kvm_restore_vcpu_trap_cellar(struct kvm_vcpu *vcpu) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + intc_info_mu_t *mu = intc_ctxt->mu; + intc_info_mu_t *mu_event; + unsigned long intc_mu_to_move = intc_ctxt->intc_mu_to_move; + int mu_num = intc_ctxt->mu_num; + int evn_no; + void *tc_kaddr = vcpu->arch.mmu.tc_kaddr; + kernel_trap_cellar_t *tc; + kernel_trap_cellar_ext_t *tc_ext; + tc_opcode_t opcode; + int cnt, fmt; + + tc = tc_kaddr; + tc_ext = tc_kaddr + TC_EXT_OFFSET; + cnt = 0; + + for (evn_no = 0; + intc_mu_to_move != 0 && evn_no < mu_num; + intc_mu_to_move >>= 1, evn_no++) { + if (likely(!(intc_mu_to_move & 0x1))) + continue; + KVM_BUG_ON(cnt >= HW_TC_SIZE); + mu_event = &mu[evn_no]; + tc->address = mu_event->gva; + tc->condition = mu_event->condition; + AW(opcode) = AS(mu_event->condition).opcode; + fmt = AS(opcode).fmt; + if (fmt == LDST_QP_FMT) + tc_ext->mask = mu_event->mask; + if (AS(mu_event->condition).store) { + NATIVE_MOVE_TAGGED_DWORD(&mu_event->data, &tc->data); + if (fmt == LDST_QP_FMT) { + NATIVE_MOVE_TAGGED_DWORD(&mu_event->data_ext, + &tc_ext->data); + } + } + cnt++; + tc++; + tc_ext++; + } + KVM_BUG_ON(intc_mu_to_move != 0); + + /* MMU TRAP_COUNT cannot be set, so write flag of end of records */ + AW(tc->condition) = -1; + + intc_ctxt->intc_mu_to_move = 0; +} + +extern const cond_exc_info_t cond_exc_info_table[INTC_CU_COND_EXC_MAX]; +extern exc_intc_handler_t intc_exc_table[INTC_CU_COND_EXC_MAX]; + +static inline exc_intc_handler_t +kvm_get_cond_exc_handler(int exc_no) +{ + KVM_BUG_ON(exc_no < 0 || exc_no >= INTC_CU_COND_EXC_MAX); + return intc_exc_table[exc_no]; +} + +static inline void +kvm_set_cond_exc_handler(int exc_no, exc_intc_handler_t handler) +{ + KVM_BUG_ON(exc_no < 0 || exc_no >= INTC_CU_COND_EXC_MAX); + intc_exc_table[exc_no] = handler; +} + +static inline int kvm_cond_exc_no_to_exc_mask(int exc_no) +{ + KVM_BUG_ON(exc_no < 0 || exc_no >= INTC_CU_COND_EXC_MAX); + KVM_BUG_ON(cond_exc_info_table[exc_no].no != exc_no && + cond_exc_info_table[exc_no].no >= 0); + return cond_exc_info_table[exc_no].exc_mask; +} + +static inline const char *kvm_cond_exc_no_to_exc_name(int exc_no) +{ + KVM_BUG_ON(exc_no < 0 || exc_no >= INTC_CU_COND_EXC_MAX); + KVM_BUG_ON(cond_exc_info_table[exc_no].no != exc_no && + cond_exc_info_table[exc_no].no >= 0); + return cond_exc_info_table[exc_no].name; +} + +static inline void +kvm_pass_cond_exc_to_vcpu(struct kvm_vcpu *vcpu, int exc_no) +{ + u64 exc_mask; + + exc_mask = kvm_cond_exc_no_to_exc_mask(exc_no); + kvm_need_pass_vcpu_exception(vcpu, exc_mask); +} + +static inline void +kvm_inject_trap_TIR(struct kvm_vcpu *vcpu, int TIR_no, + unsigned long trap_mask, e2k_addr_t IP) +{ + e2k_tir_hi_t TIR_hi; + e2k_tir_lo_t TIR_lo; + + TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(TIR_no); + TIR_lo.TIR_lo_ip = IP; + TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(TIR_no); + TIR_hi.TIR_hi_exc = trap_mask; + kvm_update_vcpu_intc_TIR(vcpu, TIR_no, TIR_hi, TIR_lo); + DebugTIRs("trap is injected to guest TIRs #%d hi 0x%016llx " + "lo 0x%016llx\n", + TIR_no, TIR_hi.TIR_hi_reg, TIR_lo.TIR_lo_reg); +} + +static inline void +kvm_inject_interrupt(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + int TIR_no = kvm_get_vcpu_intc_TIRs_num(vcpu); + + if (TIR_no >= 1) + kvm_inject_trap_TIR(vcpu, TIR_no, exc_interrupt_mask, 0); + else + kvm_inject_trap_TIR(vcpu, 1, exc_interrupt_mask, + regs->crs.cr0_hi.CR0_hi_IP); +} + +static inline void +kvm_inject_last_wish(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + kvm_inject_trap_TIR(vcpu, 0, exc_last_wish_mask, + regs->crs.cr0_hi.CR0_hi_IP); +} + +static inline void +kvm_inject_data_page_exc_on_IP(struct kvm_vcpu *vcpu, u64 ip) +{ + kvm_inject_trap_TIR(vcpu, 1, exc_data_page_mask, ip); +} + +static inline void +kvm_inject_data_page_exc(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + struct trap_pt_regs *trap = regs->trap; + u64 ip; + + if (trap && trap->nr_TIRs >= 1 && + (trap->TIRs[1].TIR_hi.exc & exc_data_page_mask)) { + /* Synchronous page fault */ + ip = trap->TIRs[1].TIR_lo.TIR_lo_ip; + } else if (trap && trap->nr_TIRs >= 0 && + (trap->TIRs[0].TIR_hi.exc & exc_data_page_mask)) { + /* Hardware stacks SPILL/FILL operation (or some + * other hardware activity like CUT access). */ + ip = trap->TIRs[0].TIR_lo.TIR_lo_ip; + } else { + WARN_ON_ONCE(1); + /* Precise IP unknown, so take IP of intercepted command */ + ip = AS(regs->crs.cr0_hi).ip << 3; + } + kvm_inject_data_page_exc_on_IP(vcpu, ip); +} + +static inline void +kvm_inject_instr_page_exc(struct kvm_vcpu *vcpu, pt_regs_t *regs, + unsigned long trap_mask, e2k_addr_t IP) +{ + kvm_inject_trap_TIR(vcpu, 0, trap_mask, IP); +} + +static inline void +kvm_inject_ainstr_page_exc(struct kvm_vcpu *vcpu, pt_regs_t *regs, + unsigned long trap_mask, e2k_addr_t IP) +{ + kvm_inject_trap_TIR(vcpu, 0, trap_mask, + regs->crs.cr0_hi.CR0_hi_IP); +} + +static inline void +kvm_inject_aau_page_exc(struct kvm_vcpu *vcpu, pt_regs_t *regs, + unsigned int aa_no) +{ + struct trap_pt_regs *trap = regs->trap; + u64 ip; + e2k_tir_hi_t TIR_hi; + e2k_tir_lo_t TIR_lo; + int TIR_no = 0; + + if (trap && trap->nr_TIRs >= 0 && trap->TIRs[0].TIR_hi.aa != 0) { + /* Hardware stacks SPILL/FILL operation (or some + * other hardware activity like CUT access). */ + ip = trap->TIRs[0].TIR_lo.TIR_lo_ip; + } else { + /* Precise IP unknown, so take IP of intercepted command */ + pr_err("%s(): unknown precise IP, so take IP of intercepted " + "command\n", __func__); + ip = regs->crs.cr0_hi.CR0_hi_IP; + } + + TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(TIR_no); + TIR_lo.TIR_lo_ip = ip; + TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(TIR_no); + TIR_hi.aa = (1UL << aa_no); + kvm_update_vcpu_intc_TIR(vcpu, TIR_no, TIR_hi, TIR_lo); + DebugTIRs("AAU trap is injected to guest TIRs #%d hi 0x%016llx " + "lo 0x%016llx\n", + TIR_no, TIR_hi.TIR_hi_reg, TIR_lo.TIR_lo_reg); +} + +/* + * CU interceptions events service + */ +static inline void +kvm_reset_intc_info_cu_is_deleted(intc_info_cu_entry_t *info) +{ + info->no_restore = false; +} +static inline void +kvm_set_intc_info_cu_is_deleted(intc_info_cu_entry_t *info) +{ + info->no_restore = true; +} +static inline bool +kvm_is_intc_info_cu_deleted(intc_info_cu_entry_t *info) +{ + return info->no_restore; +} + +static inline void +kvm_delete_intc_info_cu(struct kvm_vcpu *vcpu, intc_info_cu_entry_t *info) +{ + if (!likely(kvm_is_intc_info_cu_deleted(info))) { + kvm_set_intc_info_cu_is_deleted(info); + kvm_set_intc_info_cu_is_updated(vcpu); + } +} + +/* + * MMU interceptions events service + */ +static inline void +kvm_reset_intc_info_mu_is_deleted(intc_info_mu_t *info) +{ + info->no_restore = false; +} +static inline void +kvm_set_intc_info_mu_is_deleted(intc_info_mu_t *info) +{ + info->no_restore = true; +} +static inline bool +kvm_is_intc_info_mu_deleted(intc_info_mu_t *info) +{ + return info->no_restore; +} + +static inline void +kvm_delete_intc_info_mu(struct kvm_vcpu *vcpu, intc_info_mu_t *info) +{ + if (!likely(kvm_is_intc_info_mu_deleted(info))) { + kvm_set_intc_info_mu_is_deleted(info); + kvm_set_intc_info_mu_is_updated(vcpu); + } +} + +extern const mu_event_desc_t mu_events_desc_table[MU_INTC_EVENTS_MAX]; + +static inline const mu_event_desc_t *kvm_get_mu_event_desc(int evn_code) +{ + const mu_event_desc_t *mu_event; + + KVM_BUG_ON(evn_code < 0 || evn_code >= MU_INTC_EVENTS_MAX); + mu_event = &mu_events_desc_table[evn_code]; + KVM_BUG_ON(mu_event->code != evn_code); + return mu_event; +} + +static inline mu_intc_handler_t kvm_get_mu_event_handler(int evn_code) +{ + const mu_event_desc_t *mu_event; + + mu_event = kvm_get_mu_event_desc(evn_code); + return mu_event->handler; +} + +static inline const char *kvm_get_mu_event_name(int evn_code) +{ + const mu_event_desc_t *mu_event; + + mu_event = kvm_get_mu_event_desc(evn_code); + return mu_event->name; +} + +#ifdef CONFIG_KVM_ASYNC_PF +extern intc_info_mu_event_code_t get_event_code(struct kvm_vcpu *vcpu, + int ev_no); +extern bool intc_mu_record_asynchronous(struct kvm_vcpu *vcpu, int ev_no); +#endif /* CONFIG_KVM_ASYNC_PF */ + +#endif /* __KVM_E2K_INTERCEPTS_H */ diff --git a/arch/e2k/kvm/io.c b/arch/e2k/kvm/io.c new file mode 100644 index 000000000000..549af9b2493f --- /dev/null +++ b/arch/e2k/kvm/io.c @@ -0,0 +1,904 @@ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "cpu.h" +#include "mmu.h" +#include "gaccess.h" +#include "io.h" +#include "pic.h" +#include "intercepts.h" +#include "spmc_regs.h" + +#include +#include +#include + +#undef DEBUG_KVM_IO_MODE +#undef DebugKVMIO +#define DEBUG_KVM_IO_MODE 0 /* kernel virt machine IO debugging */ +#define DebugKVMIO(fmt, args...) \ +({ \ + if (DEBUG_KVM_IO_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_IO_FAULT_MODE +#undef DebugIOPF +#define DEBUG_IO_FAULT_MODE 0 /* IO port page fault debugging */ +#define DebugIOPF(fmt, args...) \ +({ \ + if (DEBUG_IO_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MMIO_FAULT_MODE +#undef DebugMMIOPF +#define DEBUG_MMIO_FAULT_MODE 0 /* MMIO page fault debugging */ +#define DebugMMIOPF(fmt, args...) \ +({ \ + if (DEBUG_MMIO_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MMIO_SHUTDOWN_MODE +#undef DebugMMIOSHUTDOWN +#define DEBUG_MMIO_SHUTDOWN_MODE 0 /* MMIO shutdown debugging */ +#define DebugMMIOSHUTDOWN(fmt, args...) \ +({ \ + if (DEBUG_MMIO_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define DIRECT_IO_PORT_ACCESS 0 /* do direct access to IO port from */ + /* here */ + +static void copy_io_intc_info_data(void *mmio_data, void *intc_data, + gpa_t gpa, int size, bool to_intc) +{ + switch (size) { + case 1: { + u8 *mmio = (u8 *)mmio_data; + u8 *intc = (u8 *)intc_data; + + if (to_intc) { + *intc = *mmio; + } else { + int u8_no = (gpa & (sizeof(u64) - 1)) >> 0; + *mmio = intc[u8_no]; + } + return; + } + case 2: { + u16 *mmio = (u16 *)mmio_data; + u16 *intc = (u16 *)intc_data; + + if (to_intc) { + *intc = *mmio; + } else { + int u16_no = (gpa & (sizeof(u64) - 1)) >> 1; + *mmio = intc[u16_no]; + } + return; + } + case 4: { + u32 *mmio = (u32 *)mmio_data; + u32 *intc = (u32 *)intc_data; + + if (to_intc) { + *intc = *mmio; + } else { + int u32_no = (gpa & (sizeof(u64) - 1)) >> 2; + *mmio = intc[u32_no]; + } + return; + } + case 8: { + u64 *mmio = (u64 *)mmio_data; + u64 *intc = (u64 *)intc_data; + + if (to_intc) { + *intc = *mmio; + } else { + int u64_no = (gpa & (sizeof(u64) - 1)) >> 3; + *mmio = intc[u64_no]; + } + return; + } + default: + KVM_BUG_ON(true); + } +} + +/* TODO Emulation of PCI config space should be done in QEMU */ +#define PCI_SOFT_RESET_CONTROL 0x64 +#define L_SOFTWARE_RESET 0x00000001 + +static spmc_pm1_cnt_t reg_spmc_pm1_cnt; + +static void kvm_i2c_spi_conf_write(struct kvm_vcpu *vcpu, gpa_t conf, gpa_t addr, u32 value) +{ + WARN_ON(!conf); + + if (addr == (conf + PCI_SOFT_RESET_CONTROL) && value & L_SOFTWARE_RESET) { + vcpu->arch.exit_shutdown_terminate = KVM_EXIT_E2K_RESTART; + DebugMMIOSHUTDOWN("REBOOT i2c-spi probe\n"); + return; + } +} + +static void kvm_spmc_conf_write(struct kvm_vcpu *vcpu, gpa_t conf, gpa_t addr, u32 value) +{ + WARN_ON(!conf); + + if (addr == (conf + SPMC_PM1_CNT_OFF)) { + reg_spmc_pm1_cnt.reg = value; + if (reg_spmc_pm1_cnt.sci_en == 1 && reg_spmc_pm1_cnt.slp_typx == 5 && + reg_spmc_pm1_cnt.slp_en == 1) { + vcpu->arch.exit_shutdown_terminate = KVM_EXIT_SHUTDOWN; + DebugMMIOSHUTDOWN("set HALT spmc probe\n"); + return; + } + } +} + +static void kvm_check_reboot_halt(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) +{ + u32 value; + int i; + + if (len == 4) + value = *(u32 *)v; + else if (len == 2) + value = *(u16 *)v; + else + return; + + + for (i = 0; i < vcpu->kvm->arch.num_numa_nodes; i++) { + kvm_i2c_spi_conf_write(vcpu, kvm_i2c_spi_conf_base[i], addr, value); + kvm_spmc_conf_write(vcpu, kvm_spmc_conf_base[i], addr, value); + } +} + +int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, + const void *v) +{ + if (vcpu->arch.apic && + !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, len, v)) + return 0; + + if (vcpu->arch.epic && + !kvm_iodevice_write(vcpu, &vcpu->arch.epic->dev, addr, len, v)) + return 0; + + kvm_check_reboot_halt(vcpu, addr, len, v); + + return kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, len, v); +} + +int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) +{ + if (vcpu->arch.apic && + !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, addr, len, v)) + return 0; + + if (vcpu->arch.epic && + !kvm_iodevice_read(vcpu, &vcpu->arch.epic->dev, addr, len, v)) + return 0; + + return kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, len, v); +} + +static void complete_intc_info_io_write(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + /* For stores - delete this entry from INTC_INFO_MU */ + kvm_delete_intc_info_mu(vcpu, intc_info_mu); + trace_complete_intc_info_io_write(intc_info_mu->gpa, + intc_info_mu->data); +} + +static void complete_intc_info_io_read(struct kvm_vcpu *vcpu, + intc_info_mu_t *intc_info_mu) +{ + /* For loads - change the event_code to MMU reg read. */ + /* Data will be read from the INTC_INFO_MU */ + intc_info_mu->hdr.event_code = IME_READ_MU; + kvm_set_intc_info_mu_is_updated(vcpu); + trace_complete_intc_info_io_read(intc_info_mu->gpa, intc_info_mu->data); +} + +static int vcpu_mmio_local_write(struct kvm_vcpu *vcpu, gpa_t gpa, + int size, intc_info_mu_t *intc_info_mu) +{ + unsigned long data; + int ret; + + copy_io_intc_info_data(&data, &intc_info_mu->data, gpa, size, false); + + ret = vcpu_mmio_write(vcpu, gpa, size, &data); + if (ret != 0) { + /* cannot be handled locally */ + return ret; + } + + complete_intc_info_io_write(vcpu, intc_info_mu); + return 0; +} + +static int vcpu_mmio_local_read(struct kvm_vcpu *vcpu, gpa_t gpa, + int size, intc_info_mu_t *intc_info_mu) +{ + unsigned long data; + int ret; + + ret = vcpu_mmio_read(vcpu, gpa, size, &data); + if (ret != 0) { + /* cannot be handled locally */ + return ret; + } + + copy_io_intc_info_data(&data, &intc_info_mu->data, gpa, size, true); + complete_intc_info_io_read(vcpu, intc_info_mu); + return 0; +} + +static void vcpu_mmio_prepare_request(struct kvm_vcpu *vcpu, + gpa_t gpa, void *mmio_data, int size, bool is_write) +{ + struct kvm_mmio_fragment *frag = NULL; + + BUG_ON(vcpu->mmio_nr_fragments != 0); + + frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; + vcpu->mmio_needed = 1; + frag->gpa = gpa; + frag->len = size; + frag->data = mmio_data; + vcpu->mmio_is_write = is_write; + + vcpu->arch.exit_reason = EXIT_REASON_MMIO_REQ; +} + +static int kvm_hv_mmio_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + int size, bool is_write, intc_info_mu_t *intc_info_mu) +{ + int ret; + + DebugMMIOPF("started for GPA 0x%llx %s size %d byte(s)\n", + gpa, (is_write) ? "write" : "read", size); + /* + * Is this MMIO handled locally? + */ + if (is_write) { + ret = vcpu_mmio_local_write(vcpu, gpa, size, intc_info_mu); + } else { + ret = vcpu_mmio_local_read(vcpu, gpa, size, intc_info_mu); + } + if (ret == 0) { + /* Yes, MMIO is hadled locally */ + DebugMMIOPF("access to GPA 0x%llx %s size %d byte(s) was " + "handled locally\n", + gpa, (is_write) ? "write" : "read", size); + return 0; + } + + /* MMIO request should be passed to user space emulation */ + if (is_write) { + copy_io_intc_info_data(vcpu->arch.mmio_data, + &intc_info_mu->data, gpa, size, false); + DebugMMIOPF("write data 0x%llx to 0x%llx size %d byte(s)\n", + *vcpu->arch.mmio_data, gpa, size); + } + vcpu_mmio_prepare_request(vcpu, gpa, vcpu->arch.mmio_data, size, + is_write); + KVM_BUG_ON(vcpu->arch.io_intc_info); + vcpu->arch.io_intc_info = intc_info_mu; + + DebugMMIOPF("access to GPA 0x%llx %s size %d byte(s) is passing " + "to emulate at user space\n", + gpa, (is_write) ? "write" : "read", size); + return PFRES_TRY_MMIO; +} + +static void vcpu_io_port_prepare_request(struct kvm_vcpu *vcpu, + u16 port, int size, bool is_write) +{ + vcpu->arch.ioport.port = port; + vcpu->arch.ioport.size = size; + vcpu->arch.ioport.is_out = is_write; + vcpu->arch.ioport.count = 1; + vcpu->arch.ioport.string = 0; + + vcpu->arch.ioport.needed = 1; + vcpu->arch.ioport.completed = 0; + vcpu->arch.exit_reason = EXIT_REASON_IOPORT_REQ; +} + +static int kvm_hv_io_port_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + int size, bool is_write, intc_info_mu_t *intc_info_mu) +{ + u16 port = gpa - X86_IO_AREA_PHYS_BASE; + + DebugIOPF("started for GPA 0x%llx, port 0x%x %s size %d byte(s)\n", + gpa, port, (is_write) ? "write" : "read", size); + + /* IO port request should be passed to user space emulation */ + if (is_write) { + copy_io_intc_info_data(&vcpu->arch.ioport.data, + &intc_info_mu->data, gpa, size, false); + DebugIOPF("write data 0x%llx to port 0x%x size %d byte(s)\n", + vcpu->arch.ioport.data, port, size); + } + vcpu_io_port_prepare_request(vcpu, port, size, is_write); + KVM_BUG_ON(vcpu->arch.io_intc_info); + vcpu->arch.io_intc_info = intc_info_mu; + + DebugIOPF("access to GPA 0x%llx port 0x%x %s size %d byte(s) is " + "passing to emulate at user space\n", + gpa, port, (is_write) ? "write" : "read", size); + return PFRES_TRY_MMIO; +} + +int kvm_hv_io_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + intc_info_mu_t *intc_info_mu) +{ + tc_cond_t cond; + tc_opcode_t opcode; + bool is_write; + bool spec; + int size; + + cond = intc_info_mu->condition; + is_write = !!(AS(cond).store); + spec = !!(AS(cond).spec); + + if (spec) { + if (is_write) { + complete_intc_info_io_write(vcpu, intc_info_mu); + DebugKVMIO("speculative write to IO area - ignoring\n"); + } else { + NATIVE_STORE_VALUE_WITH_TAG(&intc_info_mu->data, + ITAGDWD_IO_DEBUG, ETAGDWD); + NATIVE_STORE_VALUE_WITH_TAG(&intc_info_mu->data_ext, + ITAGDWD_IO_DEBUG, ETAGDWD); + complete_intc_info_io_read(vcpu, intc_info_mu); + DebugKVMIO("speculative read from IO area - return diag value\n"); + } + return 0; + } + + AW(opcode) = AS(cond).opcode; + KVM_BUG_ON(AS(opcode).fmt == 0); + size = 1 << (AS(opcode).fmt - 1); + if (size > sizeof(unsigned long)) + size = sizeof(unsigned long); + + if (gpa >= X86_IO_AREA_PHYS_BASE && + gpa < X86_IO_AREA_PHYS_BASE + X86_IO_AREA_PHYS_SIZE) { + return kvm_hv_io_port_page_fault(vcpu, gpa, size, is_write, + intc_info_mu); + } else { + return kvm_hv_mmio_page_fault(vcpu, gpa, size, is_write, + intc_info_mu); + } +} + +static int kvm_complete_hv_io_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + void *io_data, int size, bool is_write) +{ + intc_info_mu_t *intc_info_mu = vcpu->arch.io_intc_info; + + KVM_BUG_ON(intc_info_mu == NULL); + vcpu->arch.io_intc_info = NULL; + + if (!is_write) { + copy_io_intc_info_data(io_data, + &intc_info_mu->data, gpa, size, true); + DebugIOPF("read data 0x%lx from 0x%llx size %d byte(s)\n", + intc_info_mu->data, gpa, size); + complete_intc_info_io_read(vcpu, intc_info_mu); + } else { + complete_intc_info_io_write(vcpu, intc_info_mu); + } + return 0; +} + +static unsigned long +kvm_complete_guest_mmio_read(struct kvm_vcpu *vcpu, + u64 phys_addr, u64 *mmio_data, u64 __user *user_data, u8 size) +{ + int error; + + error = kvm_vcpu_copy_to_guest(vcpu, user_data, mmio_data, + sizeof(*user_data)); + if (error) { + DebugKVMIO("copy to guest (%px) to read from 0x%llx " + "size %d failed\n", + user_data, phys_addr, size); + return error; + } + return 0; +} + +unsigned long kvm_complete_guest_mmio_request(struct kvm_vcpu *vcpu) +{ + struct kvm_mmio_fragment *frag = NULL; + u64 *mmio_data; + u64 phys_addr; + u8 is_write; + u8 size; + int ret = 0; + + BUG_ON(vcpu->mmio_nr_fragments != 1); + frag = &vcpu->mmio_fragments[--vcpu->mmio_nr_fragments]; + mmio_data = frag->data; + phys_addr = frag->gpa; + size = frag->len; + is_write = vcpu->mmio_is_write; + + if (vcpu->mmio_read_completed) { + if (!is_write) { + *mmio_data &= (~0UL >> (64-(size*8))); + DebugKVMIO("read data 0x%llx size %d from " + "addr 0x%llx\n", + *mmio_data, size, phys_addr); + } + vcpu->mmio_read_completed = 0; + } else { + DebugKVMIO("mmio request is not completed: data 0x%llx, " + "size %d, addr 0x%llx\n", + *mmio_data, size, phys_addr); + *mmio_data = ~0UL; + } + if (vcpu->arch.io_intc_info != NULL) { + ret = kvm_complete_hv_io_page_fault(vcpu, phys_addr, + mmio_data, size, is_write); + } else { + if (!is_write) { + u64 __user *user_data; + + user_data = vcpu->arch.mmio_user_data; + ret = kvm_complete_guest_mmio_read(vcpu, + phys_addr, mmio_data, user_data, size); + } + } + + frag->data = NULL; + frag->len = 0; + + return ret; +} + +unsigned long kvm_guest_mmio_request(struct kvm_vcpu *vcpu, + u64 phys_addr, u64 __user *user_data, u8 size, u8 is_write) +{ + u64 *mmio_data = vcpu->arch.mmio_data; + + if (is_write) { + int error; + + error = kvm_vcpu_copy_from_guest(vcpu, mmio_data, user_data, + sizeof(*user_data)); + if (error) { + DebugKVMIO("copy from guest (%px) to write to 0x%llx " + "size %d failed\n", + user_data, phys_addr, size); + return error; + } + DebugKVMIO("started to write data 0x%llx size %d to " + "addr 0x%llx\n", + *mmio_data, size, phys_addr); + } else { + DebugKVMIO("started to read data size %d from addr 0x%llx\n", + size, phys_addr); + } + + /* + * Is this MMIO handled locally? + */ + if (is_write) { + if (!vcpu_mmio_write(vcpu, phys_addr, size, mmio_data)) { + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, + size, phys_addr, mmio_data); + DebugKVMIO("MMIO handled locally: phys addr 0x%llx " + "size %d writen data 0x%llx\n", + phys_addr, size, *mmio_data); + return 0; + } + } else { + *mmio_data = 0; + if (!vcpu_mmio_read(vcpu, phys_addr, size, mmio_data)) { + trace_kvm_mmio(KVM_TRACE_MMIO_READ, + size, phys_addr, mmio_data); + DebugKVMIO("MMIO handled locally: phys addr 0x%llx " + "size %d read data 0x%llx\n", + phys_addr, size, *mmio_data); + return kvm_complete_guest_mmio_read(vcpu, + phys_addr, mmio_data, user_data, size); + } + } + + vcpu_mmio_prepare_request(vcpu, phys_addr, mmio_data, size, is_write); + vcpu->arch.mmio_user_data = user_data; + + if (!vcpu->arch.is_hv) { + return RETURN_TO_HOST_APP_HCRET; + } else { + /* inject intercept as hypercall return to switch to */ + /* vcpu run thread and handle VM exit on IO access */ + kvm_inject_vcpu_exit(vcpu); + return 0; + } + + return kvm_complete_guest_mmio_request(vcpu); +} + +static unsigned long +kvm_complete_guest_ioport_read(struct kvm_vcpu *vcpu, + u32 port, u32 *data, u32 __user *user_data, u8 size) +{ + int error; + + error = kvm_vcpu_copy_to_guest(vcpu, user_data, &data[0], + sizeof(*user_data)); + if (error) { + DebugKVMIO("copy to guest (%px) to read from port 0x%x " + "size %d failed\n", + user_data, port, size); + return error; + } + return 0; +} + +unsigned long kvm_complete_guest_ioport_request(struct kvm_vcpu *vcpu) +{ + u32 data[1]; + u32 port; + u32 __user *user_data; + u8 size; + u8 is_out; + int ret = 0; + + port = vcpu->arch.ioport.port; + size = vcpu->arch.ioport.size; + is_out = vcpu->arch.ioport.is_out; + user_data = vcpu->arch.ioport.user_data; + + if (vcpu->arch.ioport.completed) { + if (!is_out) { + data[0] = vcpu->arch.ioport.data; + data[0] &= (~0UL >> (64-(size*8))); + DebugKVMIO("read data 0x%x size %d from port 0x%x\n", + data[0], size, port); + } + vcpu->arch.ioport.completed = 0; + } else { + DebugKVMIO("IO port request is not completed: data 0x%llx, " + "size %d, port 0x%x\n", + vcpu->arch.ioport.data, size, port); + data[0] = ~0UL; + } + + if (vcpu->arch.io_intc_info != NULL) { + ret = kvm_complete_hv_io_page_fault(vcpu, + port, data, size, is_out); + } else { + if (!is_out) { + ret = kvm_complete_guest_ioport_read(vcpu, + port, data, user_data, size); + } + } + + return ret; +} + +unsigned long kvm_guest_ioport_request(struct kvm_vcpu *vcpu, + u16 port, u32 __user *user_data, u8 size, u8 is_out) +{ + u32 data[1]; + unsigned long ret; + + if (is_out) { + int error; + + error = kvm_vcpu_copy_from_guest(vcpu, &data[0], user_data, + sizeof(*user_data)); + if (error) { + DebugKVMIO("copy from guest (%px) to write to " + "port 0x%x size %d failed\n", + user_data, port, size); + ret = error; + goto out; + } + DebugKVMIO("write data 0x%x size %d to port 0x%x\n", + data[0], size, port); + } else { + DebugKVMIO("read data size %d from port 0x%x\n", + size, port); + } + + if (DIRECT_IO_PORT_ACCESS) { + if (is_out) { + switch (size) { + case 1: + native_outb(data[0], port); + break; + case 2: + native_outw(data[0], port); + break; + case 4: + native_outl(data[0], port); + break; + default: + DebugKVMIO("invalid size %d\n", size); + } + } else { + switch (size) { + case 1: + data[0] = native_inb(port); + break; + case 2: + data[0] = native_inw(port); + break; + case 4: + data[0] = native_inl(port); + break; + default: + DebugKVMIO("invalid size %d\n", size); + data[0] = ~0U; + } + ret = kvm_complete_guest_ioport_read(vcpu, + port, data, user_data, size); + goto out; + } + ret = 0; + goto out; + } + + vcpu_io_port_prepare_request(vcpu, port, size, is_out); + if (is_out) { + vcpu->arch.ioport.data = data[0]; + } + vcpu->arch.ioport.user_data = user_data; + + if (!vcpu->arch.is_hv) { + return RETURN_TO_HOST_APP_HCRET; + } else { + /* inject intercept as hypercall return to switch to */ + /* vcpu run thread and handle VM exit on IO access */ + kvm_inject_vcpu_exit(vcpu); + return 0; + } + + return kvm_complete_guest_ioport_request(vcpu); + +out: + return ret; +} + +unsigned long kvm_guest_ioport_string_request(struct kvm_vcpu *vcpu, + u16 port, void __user *data, u8 size, u32 count, u8 is_out) +{ + unsigned long ret; + + DebugKVMIO("%s %px size %d count 0x%x to port 0x%x\n", + (is_out) ? "write data from" : "read data to", + data, size, count, port); + + if (count * size > vcpu->arch.ioport_data_size) { + panic("kvm_guest_ioport_string_request() IO data area size " + "0x%llx < string size 0x%x\n", + vcpu->arch.ioport_data_size, count * size); + } + if (DIRECT_IO_PORT_ACCESS) { + if (is_out) { + kvm_vcpu_copy_from_guest(vcpu, + vcpu->arch.ioport_data, data, size * count); + switch (size) { + case 1: + native_outsb(port, vcpu->arch.ioport_data, count); + break; + case 2: + native_outsw(port, vcpu->arch.ioport_data, count); + break; + case 4: + native_outsl(port, vcpu->arch.ioport_data, count); + break; + default: + DebugKVMIO("invalid size %d\n", size); + } + } else { + switch (size) { + case 1: + native_insb(port, vcpu->arch.ioport_data, count); + break; + case 2: + native_insw(port, vcpu->arch.ioport_data, count); + break; + case 4: + native_insl(port, vcpu->arch.ioport_data, count); + break; + default: + DebugKVMIO("invalid size %d\n", size); + data = NULL; + } + if (data != NULL) { + kvm_vcpu_copy_to_guest(vcpu, + data, vcpu->arch.ioport_data, + size * count); + } + } + ret = (data) ? 0 : -EINVAL; + goto out; + } + + vcpu->arch.ioport.port = port; + vcpu->arch.ioport.size = size; + vcpu->arch.ioport.data = (u64)data; + vcpu->arch.ioport.count = count; + vcpu->arch.ioport.cur_count = count; + vcpu->arch.ioport.string = 1; + vcpu->arch.ioport.is_out = is_out; + if (is_out) { + kvm_vcpu_copy_from_guest(vcpu, + vcpu->arch.ioport_data, data, size * count); + } + + vcpu->arch.ioport.needed = 1; + vcpu->arch.ioport.completed = 0; + vcpu->arch.exit_reason = EXIT_REASON_IOPORT_REQ; + + ret = RETURN_TO_HOST_APP_HCRET; + + if (vcpu->arch.ioport.completed) { + if (!is_out) { + kvm_vcpu_copy_to_guest(vcpu, + data, vcpu->arch.ioport_data, size * count); + DebugKVMIO("read data to %px size %d count 0x%x " + "from port 0x%x\n", + data, size, count, port); + } + vcpu->arch.ioport.completed = 0; + } else { + DebugKVMIO("IO port request is not completed: data at %px, " + "size %d, count 0x%x, port 0x%x\n", + data, size, count, port); + data = NULL; + if (ret == 0) + ret = -EINVAL; + } + +out: + return ret; +} + +long kvm_guest_console_io(struct kvm_vcpu *vcpu, + int io_cmd, int count, char __user *str) +{ + char buffer[512]; + struct tty_struct *tty; + long ret; + + DebugKVMIO("%s console: count 0x%x, string %px\n", + (io_cmd == CONSOLEIO_write) ? "write string to" : + "read string from", + count, str); + + if (count > sizeof(buffer) - 1) { + pr_err("%s(): string size 0x%x > max buffer size 0x%lx\n", + __func__, count, sizeof(buffer) - 1); + count = sizeof(buffer) - 1; + } + if (io_cmd == CONSOLEIO_write) { + ret = kvm_vcpu_copy_from_guest(vcpu, buffer, str, count); + if (ret) { + DebugKVMIO("could not copy string from user, err %ld\n", + ret); + count = ret; + goto out; + } + buffer[count] = '\0'; + tty = get_current_tty(); + if (!tty) { + DebugKVMIO("could not get current tty of guest\n"); + pr_err("%s", buffer); + goto out; + } + tty_write_message(tty, buffer); + tty_kref_put(tty); + } else { + /* read from console */ + DebugKVMIO("read string from console is not supported\n"); + count = -ENOENT; + goto out; + } +out: + return count; +} + +unsigned long +kvm_guest_notify_io(struct kvm_vcpu *vcpu, unsigned int notifier_io) +{ + vcpu->arch.notifier_io = notifier_io; + vcpu->arch.exit_reason = EXIT_NOTIFY_IO; + + return RETURN_TO_HOST_APP_HCRET; +} + +int kvm_guest_printk_on_host(struct kvm_vcpu *vcpu, char __user *msg, int size) +{ + char buffer[HOST_PRINTK_BUFFER_MAX + 1]; + int ret; + + if (size > sizeof(buffer) - 1) + size = sizeof(buffer) - 1; + ret = kvm_vcpu_copy_from_guest(vcpu, buffer, msg, size); + if (ret) { + DebugKVMIO("could not copy string from user, err %d\n", + ret); + size = ret; + goto out; + } + buffer[size] = '\0'; + size = pr_info("%s", buffer); +out: + return size; +} + +/* + * Prefetching is disabled: prefixed MMIO pages are populated on demand, in + * nonpaging/tdp_page_fault. FIXME: support shadow PT mode + * + * Alternatively, we could try to keep prefixed MMIO populated at all times, + * but that is hard to do, since guest (Lintel) may change IOEPIC base GPA on + * the fly. And prefetching pages from kvm_ioepic_set_base is impossible, + * since we don't know, which VCPU wrote the new base + */ +int kvm_prefetch_mmio_areas(struct kvm_vcpu *vcpu) +{ +#if 0 + struct kvm *kvm = vcpu->kvm; + struct irq_remap_table *irt = kvm->arch.irt; + int ret; + + if (!kvm_is_epic(kvm) || !kvm->arch.is_hv) + return 0; + + /* Populate the CEPIC page (for HW CEPIC only) */ + ret = kvm_prefetch_mmu_area(vcpu, EPIC_DEFAULT_PHYS_BASE, + EPIC_DEFAULT_PHYS_BASE + PAGE_SIZE, + PFERR_NOT_PRESENT_MASK | PFERR_WRITE_MASK); + if (ret != 0) { + pr_err("%s(): Failed to populate CEPIC page\n", __func__); + return ret; + } + pr_info("%s(): Mapping CEPIC page GPA 0x%x -> HPA 0x%x\n", + __func__, EPIC_DEFAULT_PHYS_BASE, EPIC_DEFAULT_PHYS_BASE); + + /* Populate the passthrough IOEPIC page */ + if (irt->enabled) { + ret = kvm_prefetch_mmu_area(vcpu, irt->gpa, + irt->gpa + PAGE_SIZE, + PFERR_NOT_PRESENT_MASK | PFERR_WRITE_MASK); + if (ret != 0) { + pr_err("%s(): Failed to map IOEPIC passthrough page GPA 0x%llx -> HPA 0x%llx\n", + __func__, irt->gpa, irt->hpa); + return ret; + } + pr_info("%s(): Mapping IOEPIC passthrough page GPA 0x%llx -> HPA 0x%llx\n", + __func__, irt->gpa, irt->hpa); + } +#endif + return 0; +} diff --git a/arch/e2k/kvm/io.h b/arch/e2k/kvm/io.h new file mode 100644 index 000000000000..18e2b595cfc1 --- /dev/null +++ b/arch/e2k/kvm/io.h @@ -0,0 +1,71 @@ + +#ifndef __E2K_KVM_HOST_IO_H_ +#define __E2K_KVM_HOST_IO_H_ + +#include +#include + +#include + +#include "cepic.h" + +extern unsigned long kvm_guest_mmio_request(struct kvm_vcpu *vcpu, + u64 phys_addr, u64 __user *user_data, u8 size, + u8 is_write); +extern unsigned long kvm_complete_guest_mmio_request(struct kvm_vcpu *vcpu); +extern unsigned long kvm_guest_ioport_request(struct kvm_vcpu *vcpu, + u16 port, u32 __user *user_data, u8 size, + u8 is_out); +extern unsigned long kvm_complete_guest_ioport_request(struct kvm_vcpu *vcpu); +extern unsigned long kvm_guest_ioport_string_request(struct kvm_vcpu *vcpu, + u16 port, void __user *data, u8 size, u32 count, + u8 is_out); +extern long kvm_guest_console_io(struct kvm_vcpu *vcpu, + int io_cmd, int count, char __user *str); +extern unsigned long kvm_guest_notify_io(struct kvm_vcpu *vcpu, + unsigned int notifier_io); +extern int kvm_guest_printk_on_host(struct kvm_vcpu *vcpu, + char __user *msg, int size); +extern int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, + const void *v); +extern int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v); + +extern int kvm_prefetch_mmio_areas(struct kvm_vcpu *vcpu); +extern int kvm_hv_io_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + intc_info_mu_t *intc_info_mu); +extern unsigned long kvm_i2c_spi_conf_base[4]; +extern unsigned long kvm_spmc_conf_base[4]; + +static inline kvm_pfn_t mmio_prefixed_gfn_to_pfn(struct kvm *kvm, gfn_t gfn) +{ + struct irq_remap_table *irt = kvm->arch.irt; + + if (!(kvm_is_epic(kvm) && kvm->arch.is_hv)) + return 0; + + /* CEPIC page - always mapped */ + if (gfn == gpa_to_gfn(EPIC_DEFAULT_PHYS_BASE)) + return EPIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT; + + /* IOEPIC page - for passthrough device */ + if (irt->enabled && gfn == gpa_to_gfn(irt->gpa)) + return irt->hpa >> PAGE_SHIFT; + + /* Legacy VGA area - speed up VGA passthrough */ + if (kvm->arch.legacy_vga_passthrough) { + gpa_t gpa = gfn_to_gpa(gfn); + + if (gpa >= VGA_VRAM_PHYS_BASE && + gpa < VGA_VRAM_PHYS_BASE + VGA_VRAM_SIZE) + return gfn; + } + + return 0; +} + +static inline bool is_mmio_prefixed_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + return !!mmio_prefixed_gfn_to_pfn(vcpu->kvm, gfn); +} + +#endif /* __E2K_KVM_HOST_IO_H_ */ diff --git a/arch/e2k/kvm/ioapic.c b/arch/e2k/kvm/ioapic.c new file mode 100644 index 000000000000..aa030975fc51 --- /dev/null +++ b/arch/e2k/kvm/ioapic.c @@ -0,0 +1,444 @@ +/* + * Copyright (C) 2001 MandrakeSoft S.A. + * + * MandrakeSoft S.A. + * 43, rue d'Aboukir + * 75002 Paris - France + * http://www.linux-mandrake.com/ + * http://www.mandrakesoft.com/ + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Yunhong Jiang + * Yaozu (Eddie) Dong + * Based on Xen 3.1 code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ioapic.h" +#include "lapic.h" +#include "irq.h" + +#if 0 +#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) +#else +#define ioapic_debug(fmt, arg...) +#endif +static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); + +static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, + unsigned long addr, + unsigned long length) +{ + unsigned long result = 0; + + switch (ioapic->ioregsel) { + case IOAPIC_REG_VERSION: + result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) + | (IOAPIC_VERSION_ID & 0xff)); + break; + + case IOAPIC_REG_APIC_ID: + case IOAPIC_REG_ARB_ID: + result = ((ioapic->id & 0xff) << 24); + break; + + default: + { + u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; + u64 redir_content; + + ASSERT(redir_index < IOAPIC_NUM_PINS); + + redir_content = ioapic->redirtbl[redir_index].bits; + result = (ioapic->ioregsel & 0x1) ? + (redir_content >> 32) & 0xffffffff : + redir_content & 0xffffffff; + break; + } + } + + return result; +} + +static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) +{ + union kvm_ioapic_redirect_entry *pent; + int injected = -1; + + pent = &ioapic->redirtbl[idx]; + + if (!pent->fields.mask) { + injected = ioapic_deliver(ioapic, idx); + if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) + pent->fields.remote_irr = 1; + } + + return injected; +} + +void kvm_make_scan_ioapic_request(struct kvm *kvm) +{ + kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); +} + +void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) +{ + int i; + + for (i = 0; i < kvm->arch.num_numa_nodes; i++) { + struct kvm_ioapic *ioapic = kvm->arch.vioapic[i]; + + if (!ioapic) + return; + } + kvm_make_scan_ioapic_request(kvm); +} + +void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) +{ + if (!ioapic_in_kernel(kvm)) + return; + kvm_make_scan_ioapic_request(kvm); +} + +static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) +{ + unsigned index; + bool mask_before, mask_after; + union kvm_ioapic_redirect_entry *e; + + switch (ioapic->ioregsel) { + case IOAPIC_REG_VERSION: + /* Writes are ignored. */ + break; + + case IOAPIC_REG_APIC_ID: + ioapic->id = (val >> 24) & 0xff; + break; + + case IOAPIC_REG_ARB_ID: + break; + + default: + index = (ioapic->ioregsel - 0x10) >> 1; + + ioapic_debug("change redir index %x val %x\n", index, val); + if (index >= IOAPIC_NUM_PINS) + return; + e = &ioapic->redirtbl[index]; + mask_before = e->fields.mask; + if (ioapic->ioregsel & 1) { + e->bits &= 0xffffffff; + e->bits |= (u64) val << 32; + } else { + e->bits &= ~0xffffffffULL; + e->bits |= (u32) val; + e->fields.remote_irr = 0; + } + mask_after = e->fields.mask; + if (mask_before != mask_after) + kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); + if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG + && ioapic->irr & (1 << index)) + ioapic_service(ioapic, index); + break; + } +} + +static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) +{ + union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; + struct kvm_lapic_irq irqe; + + ioapic_debug("dest_id=%x dest_mode=%x delivery_mode=%x " + "vector=%x trig_mode=%x\n", + entry->fields.dest_id, entry->fields.dest_mode, + entry->fields.delivery_mode, entry->fields.vector, + entry->fields.trig_mode); + + irqe.dest_id = entry->fields.dest_id; + irqe.vector = entry->fields.vector; + irqe.dest_mode = entry->fields.dest_mode; + irqe.trig_mode = entry->fields.trig_mode; + irqe.delivery_mode = entry->fields.delivery_mode << 8; + irqe.level = 1; + irqe.shorthand = 0; + +#ifdef CONFIG_X86 + /* Always delivery PIT interrupt to vcpu 0 */ + if (irq == 0) { + irqe.dest_mode = 0; /* Physical mode. */ + /* need to read apic_id from apic regiest since + * it can be rewritten */ + irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; + } +#endif + return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); +} + +int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) +{ + u32 old_irr; + u32 mask = 1 << irq; + union kvm_ioapic_redirect_entry entry; + int ret = 1; + + mutex_lock(&ioapic->lock); + old_irr = ioapic->irr; + if (irq >= 0 && irq < IOAPIC_NUM_PINS) { + entry = ioapic->redirtbl[irq]; + level ^= entry.fields.polarity; + if (!level) + ioapic->irr &= ~mask; + else { + int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); + ioapic->irr |= mask; + if ((edge && old_irr != ioapic->irr) || + (!edge && !entry.fields.remote_irr)) + ret = ioapic_service(ioapic, irq); + else + ret = 0; /* report coalesced interrupt */ + } + trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); + } + mutex_unlock(&ioapic->lock); + + return ret; +} + +static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector) +{ + int i; + + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; + + if (ent->fields.vector != vector) + continue; + + /* + * We are dropping lock while calling ack notifiers because ack + * notifier callbacks for assigned devices call into IOAPIC + * recursively. Since remote_irr is cleared only after call + * to notifiers if the same vector will be delivered while lock + * is dropped it will be put into irr and will be delivered + * after ack notifier returns. + */ + mutex_unlock(&ioapic->lock); + kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); + mutex_lock(&ioapic->lock); + + if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG) + continue; + + ent->fields.remote_irr = 0; + if (!ent->fields.mask && (ioapic->irr & (1 << i))) + ioapic_service(ioapic, i); + } +} + +void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector) +{ + struct kvm_ioapic *ioapic = + vcpu->kvm->arch.vioapic[vcpu->arch.node_id]; + mutex_lock(&ioapic->lock); + __kvm_ioapic_update_eoi(ioapic, vector); + mutex_unlock(&ioapic->lock); +} + +static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) +{ + return container_of(dev, struct kvm_ioapic, dev); +} + +static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) +{ + return ((addr >= ioapic->base_address && + (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); +} + +static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t addr, int len, void *val) +{ + struct kvm_ioapic *ioapic = to_ioapic(this); + u32 result; + if (!ioapic_in_range(ioapic, addr)) + return -EOPNOTSUPP; + + ioapic_debug("addr %lx\n", (unsigned long)addr); + ASSERT(!(addr & 0xf)); /* check alignment */ + + addr &= 0xff; + mutex_lock(&ioapic->lock); + switch (addr) { + case IOAPIC_REG_SELECT: + result = ioapic->ioregsel; + break; + + case IOAPIC_REG_WINDOW: + result = ioapic_read_indirect(ioapic, addr, len); + break; + + default: + result = 0; + break; + } + mutex_unlock(&ioapic->lock); + + switch (len) { + case 8: + *(u64 *) val = result; + break; + case 1: + case 2: + case 4: + memcpy(val, (char *)&result, len); + break; + default: + printk(KERN_WARNING "ioapic: wrong length %d\n", len); + } + return 0; +} + +static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t addr, int len, const void *val) +{ + struct kvm_ioapic *ioapic = to_ioapic(this); + u32 data; + if (!ioapic_in_range(ioapic, addr)) + return -EOPNOTSUPP; + + ASSERT(!(addr & 0xf)); /* check alignment */ + + if (len == 4 || len == 8) + data = *(u32 *) val; + else { + printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); + return 0; + } + + ioapic_debug("ioapic_mmio_write addr=%px len=%d data=0x%x\n", + (void *)addr, len, data); + + addr &= 0xff; + mutex_lock(&ioapic->lock); + switch (addr) { + case IOAPIC_REG_SELECT: + ioapic->ioregsel = data; + break; + + case IOAPIC_REG_WINDOW: + ioapic_write_indirect(ioapic, data); + break; +#ifdef CONFIG_IA64 + case IOAPIC_REG_EOI: + __kvm_ioapic_update_eoi(ioapic, data); + break; +#endif + + default: + break; + } + mutex_unlock(&ioapic->lock); + return 0; +} + +static void kvm_ioapic_reset(struct kvm_ioapic *ioapic, int node_id) +{ + int i; + + for (i = 0; i < IOAPIC_NUM_PINS; i++) + ioapic->redirtbl[i].fields.mask = 1; + ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS + + 0x1000 * node_id; + ioapic->ioregsel = 0; + ioapic->irr = 0; + ioapic->id = 0; +} + +static const struct kvm_io_device_ops ioapic_mmio_ops = { + .read = ioapic_mmio_read, + .write = ioapic_mmio_write, +}; + +int kvm_ioapic_init(struct kvm *kvm) +{ + struct kvm_ioapic *ioapic; + int i; + int ret = -EPERM; + + for (i = 0; i < kvm->arch.num_numa_nodes; i++) { + ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); + if (!ioapic) { + ret = -ENOMEM; + goto out; + } + mutex_init(&ioapic->lock); + kvm->arch.vioapic[i] = ioapic; + kvm_ioapic_reset(ioapic, i); + kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); + ioapic->kvm = kvm; + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, + ioapic->base_address, IOAPIC_MEM_LENGTH, &ioapic->dev); +out: if (ret < 0) { +again: kfree(kvm->arch.vioapic[i]); + kvm->arch.vioapic[i] = NULL; + if (i) { + i--; + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, + &kvm->arch.vioapic[i]->dev); + goto again; + } + return ret; + } + } + return ret; +} + +int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) +{ + struct kvm_ioapic *ioapic = ioapic_irqchip(kvm, state->node_id); + if (!ioapic) + return -EINVAL; + + mutex_lock(&ioapic->lock); + memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); + mutex_unlock(&ioapic->lock); + return 0; +} + +int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) +{ + struct kvm_ioapic *ioapic = ioapic_irqchip(kvm, state->node_id); + if (!ioapic) + return -EINVAL; + + mutex_lock(&ioapic->lock); + memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); + mutex_unlock(&ioapic->lock); + return 0; +} diff --git a/arch/e2k/kvm/ioapic.h b/arch/e2k/kvm/ioapic.h new file mode 100644 index 000000000000..94ee961f2489 --- /dev/null +++ b/arch/e2k/kvm/ioapic.h @@ -0,0 +1,109 @@ +#ifndef __KVM_IO_APIC_H +#define __KVM_IO_APIC_H + +#include + +#include + +#ifdef CONFIG_E2K +#include "lapic.h" +#endif /* CONFIG_E2K */ + +struct kvm; +struct kvm_vcpu; + +#define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS +#define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */ +#define IOAPIC_EDGE_TRIG 0 +#define IOAPIC_LEVEL_TRIG 1 + +#define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000 +#define IOAPIC_MEM_LENGTH 0x100 + +/* Direct registers. */ +#define IOAPIC_REG_SELECT 0x00 +#define IOAPIC_REG_WINDOW 0x10 +#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */ + +/* Indirect registers. */ +#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ +#define IOAPIC_REG_VERSION 0x01 +#define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ + +/*ioapic delivery mode*/ +#define IOAPIC_FIXED 0x0 +#define IOAPIC_LOWEST_PRIORITY 0x1 +#define IOAPIC_PMI 0x2 +#define IOAPIC_NMI 0x4 +#define IOAPIC_INIT 0x5 +#define IOAPIC_EXTINT 0x7 + +union kvm_ioapic_redirect_entry { + u64 bits; + struct { + u8 vector; + u8 delivery_mode:3; + u8 dest_mode:1; + u8 delivery_status:1; + u8 polarity:1; + u8 remote_irr:1; + u8 trig_mode:1; + u8 mask:1; + u8 reserve:7; + u8 reserved[4]; + u8 dest_id; + } fields; +}; + +struct kvm_ioapic { + u64 base_address; + u32 ioregsel; + u32 id; + u32 irr; + u32 pad; + union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS]; + u32 node_id; + unsigned long irq_states[IOAPIC_NUM_PINS]; + struct kvm_io_device dev; + struct kvm *kvm; + void (*ack_notifier)(void *opaque, int irq); + struct mutex lock; +}; + +#undef ASSERT +#ifdef DEBUG +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + printk(KERN_EMERG "assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else +#define ASSERT(x) do { } while (0) +#endif + +static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm, int node_id) +{ + return kvm->arch.vioapic[node_id]; +} + +static inline int ioapic_in_kernel(struct kvm *kvm) +{ + int ret; + + ret = (ioapic_irqchip(kvm, 0) != NULL); + return ret; +} + +int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, + int short_hand, int dest, int dest_mode); +int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); +void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector); +int kvm_ioapic_init(struct kvm *kvm); +int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); +int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); +int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); + +#endif diff --git a/arch/e2k/kvm/ioepic.c b/arch/e2k/kvm/ioepic.c new file mode 100644 index 000000000000..e17d7332c53d --- /dev/null +++ b/arch/e2k/kvm/ioepic.c @@ -0,0 +1,569 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ioepic.h" +#include "pic.h" +#include "irq.h" +#include "mmu.h" + +#if 0 +#define ioepic_debug(fmt, arg...) pr_err(fmt, ##arg) +#else +#define ioepic_debug(fmt, arg...) +#endif + +#undef DEBUG_IRQ_DELIVER_MODE +#undef DebugIRQ +#define DEBUG_IRQ_DELIVER_MODE 0 /* IRQ deliver debugging */ +#define DebugIRQ(fmt, args...) \ +({ \ + if (DEBUG_IRQ_DELIVER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_COALESCED_IRQ_MODE +#undef DebugCIRQ +#define DEBUG_COALESCED_IRQ_MODE 0 /* Coalesced IRQ debugging */ +#define DebugCIRQ(fmt, args...) \ +({ \ + if (DEBUG_COALESCED_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_IOEPIC_MODE +#undef DebugIOEPIC +#define DEBUG_IOEPIC_MODE 0 /* IOEPIC base debugging */ +#define DebugIOEPIC(fmt, args...) \ +({ \ + if (DEBUG_IOEPIC_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + + +static int ioepic_deliver_to_cepic(struct kvm_ioepic *ioepic, int irq); + +static int ioepic_service(struct kvm_ioepic *ioepic, unsigned int idx) +{ + struct IO_EPIC_route_entry *entry; + int injected = -1; + + entry = &ioepic->redirtbl[idx]; + + if (!entry->int_ctrl.bits.mask) { + injected = ioepic_deliver_to_cepic(ioepic, idx); + /* Set delivery_status bit for level interrupts */ + if (injected && entry->int_ctrl.bits.trigger) + entry->int_ctrl.bits.delivery_status = 1; + DebugIRQ("IRQ #%d was %s\n", idx, + (injected) ? "injected" : "not injected"); + } + + return injected; +} + +void kvm_make_scan_ioepic_request(struct kvm *kvm) +{ + kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOEPIC); +} + +void kvm_vcpu_request_scan_ioepic(struct kvm *kvm) +{ + int i; + + for (i = 0; i < kvm->arch.num_numa_nodes; i++) { + struct kvm_ioepic *ioepic = kvm->arch.ioepic[i]; + + if (!ioepic) + return; + } + kvm_make_scan_ioepic_request(kvm); +} + +int ioepic_deliver_to_cepic(struct kvm_ioepic *ioepic, int irq) +{ + struct IO_EPIC_route_entry *entry = &ioepic->redirtbl[irq]; + struct kvm_cepic_irq irqe; + + ioepic_debug("dest=%x vector=%x trig_mode=%x dlvm=%x\n", + entry->addr_low.bits.dst, entry->msg_data.bits.vector, + entry->int_ctrl.bits.trigger, entry->msg_data.bits.dlvm); + + irqe.dest_id = entry->addr_low.bits.dst; + irqe.vector = entry->msg_data.bits.vector; + irqe.trig_mode = entry->int_ctrl.bits.trigger; + irqe.delivery_mode = entry->msg_data.bits.dlvm; + irqe.shorthand = CEPIC_ICR_DST_FULL; + + return kvm_irq_delivery_to_epic(ioepic->kvm, ioepic->id, &irqe); +} + +int kvm_ioepic_set_irq(struct kvm_ioepic *ioepic, int irq, int pin_status) +{ + u32 old_irr; + u32 mask = 1 << irq; + struct IO_EPIC_route_entry entry; + int ret = 1; + + mutex_lock(&ioepic->lock); + old_irr = ioepic->irr; + if (irq >= 0 && irq < IOEPIC_NUM_PINS) { + entry = ioepic->redirtbl[irq]; + if (!pin_status) { + ioepic->irr &= ~mask; + } else { + int level = entry.int_ctrl.bits.trigger; + + ioepic->irr |= mask; + if ((!level && old_irr != ioepic->irr) || + (level && !entry.int_ctrl.bits.delivery_status)) { + ret = ioepic_service(ioepic, irq); + } else { + DebugCIRQ("IRQ #%d is coalesced\n", irq); + ret = 0; /* report coalesced interrupt */ + } + } + trace_kvm_ioepic_set_irq(entry.addr_low.bits.dst, + entry.msg_data.bits.vector, entry.msg_data.bits.dlvm, + entry.int_ctrl.bits.trigger, entry.int_ctrl.bits.mask, + irq, pin_status, ret == 0); + } + mutex_unlock(&ioepic->lock); + + return ret; +} + +/* TODO Only node 0 is supported */ +static void ioepic_notify_acked_irq(struct kvm_ioepic *ioepic, unsigned int pin) +{ + /* + * We are dropping lock while calling ack notifiers because ack + * notifier callbacks for assigned devices call into IOEPIC + * recursively + */ + mutex_unlock(&ioepic->lock); + kvm_notify_acked_irq(ioepic->kvm, KVM_IRQCHIP_IOEPIC_NODE0, pin); + mutex_lock(&ioepic->lock); +} + +static void __kvm_ioepic_update_eoi(struct kvm_ioepic *ioepic, int vector, + int trigger_mode) +{ + int i; + + for (i = 0; i < IOEPIC_NUM_PINS; i++) { + struct IO_EPIC_route_entry *ent = &ioepic->redirtbl[i]; + + if (ent->msg_data.bits.vector != vector) + continue; + + ioepic_notify_acked_irq(ioepic, i); + } +} + +/* In ioapic, this is called from LAPIC's EOI */ +void kvm_ioepic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) +{ + struct kvm_ioepic *ioepic = vcpu->kvm->arch.ioepic[vcpu->arch.node_id]; + + mutex_lock(&ioepic->lock); + __kvm_ioepic_update_eoi(ioepic, vector, trigger_mode); + mutex_unlock(&ioepic->lock); +} + +static inline struct kvm_ioepic *to_ioepic(struct kvm_io_device *dev) +{ + return container_of(dev, struct kvm_ioepic, dev); +} + +static inline int ioepic_in_range(struct kvm_ioepic *ioepic, gpa_t addr) +{ + return ((addr >= ioepic->base_address && + (addr < ioepic->base_address + IOEPIC_MEM_LENGTH))); +} + +/* Only latest version of IOEPIC is supported */ +static inline unsigned int ioepic_read_version(void) +{ + union IO_EPIC_VERSION reg; + + reg.raw = 0; + reg.bits.version = IOEPIC_VERSION_2; + reg.bits.entries = IOEPIC_NUM_PINS; + + return reg.raw; +} + +static int ioepic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t addr, int len, void *val) +{ + struct kvm_ioepic *ioepic = to_ioepic(this); + unsigned int offset = addr - ioepic->base_address; + unsigned int reg_offset = offset & 0xfff; + unsigned int pin = offset >> 12; + unsigned int result; + struct irq_remap_table *irt = vcpu->kvm->arch.irt; + + if (!ioepic_in_range(ioepic, addr)) + return -EOPNOTSUPP; + + ASSERT(len == 4); /* 4 bytes access */ + + /* In case of passthrough device, read directly from real IOEPIC */ + if (irt->enabled && pin == irt->guest_pin) { + unsigned int host_pin_offset = irt->host_pin << 12; + unsigned int node = irt->host_node; + + E2K_LMS_HALT_ERROR(1); + pr_err("%s(): error: IOEPIC passthrough page not mapped\n", + __func__); + + result = io_epic_read(node, host_pin_offset + reg_offset); + *(u32 *) val = result; + ioepic_debug("passthrough ioepic read offset %x val %x\n", + offset, result); + return 0; + } + + mutex_lock(&ioepic->lock); + switch (reg_offset) { + case IOEPIC_ID: + result = ioepic->id; + break; + case IOEPIC_VERSION: + result = ioepic_read_version(); + break; + case IOEPIC_TABLE_INT_CTRL(0): + result = ioepic->redirtbl[pin].int_ctrl.raw; + break; + case IOEPIC_TABLE_MSG_DATA(0): + result = ioepic->redirtbl[pin].msg_data.raw; + break; + case IOEPIC_TABLE_ADDR_HIGH(0): + result = ioepic->redirtbl[pin].addr_high; + break; + case IOEPIC_TABLE_ADDR_LOW(0): + result = ioepic->redirtbl[pin].addr_low.raw; + break; + default: + if (reg_offset >= IOEPIC_INT_RID(0) && + reg_offset < IOEPIC_INT_RID(IOEPIC_NUM_PINS)) { + result = 0; + } else { + ioepic_debug("unknown ioepic reg 0x%x\n", offset); + result = 0xffffffff; + } + break; + } + mutex_unlock(&ioepic->lock); + + *(u32 *) val = result; + + ioepic_debug("%s offset %x val %x\n", __func__, offset, result); + + return 0; +} + +/* TODO software interrupts not fully supported */ +static void ioepic_write_int_ctrl(struct kvm_ioepic *ioepic, unsigned int pin, + unsigned int data) +{ + union IO_EPIC_INT_CTRL old_val, new_val; + bool eoi, sint_eoi, unmasking, irr_pending; + + old_val.raw = ioepic->redirtbl[pin].int_ctrl.raw; + new_val.raw = data; + + eoi = new_val.bits.delivery_status; + sint_eoi = new_val.bits.software_int; + + /* + * Fire ack notifiers (used by irqfd resampler for INTx passthrough) + * Notifier will de-assert the pin; check interrupt status in the device and + * either unmask the interrupt, or re-assert the pin without unmasking. + * Keeping devlivery_status asserted to make sure we don't inject an interrupt twice + */ + if (eoi) + ioepic_notify_acked_irq(ioepic, pin); + + /* Writing R/W1C fields does not change the RW bits (IOEPIC ver. 2) */ + if (eoi || sint_eoi) { + new_val.raw = old_val.raw; + + if (eoi) + new_val.bits.delivery_status = 0; + + if (sint_eoi) + new_val.bits.software_int = 0; + } + ioepic->redirtbl[pin].int_ctrl.raw = new_val.raw; + + unmasking = old_val.bits.mask && !new_val.bits.mask; + irr_pending = ioepic->irr & (1 << pin); + + if (!new_val.bits.mask && irr_pending) { + if (!eoi && !unmasking) + pr_err("kvm_ioepic: firing pin %d, not eoi/unmasking\n", pin); + + ioepic_service(ioepic, pin); + } +} + +static int ioepic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t addr, int len, const void *val) +{ + struct kvm_ioepic *ioepic = to_ioepic(this); + unsigned int offset = addr - ioepic->base_address; + unsigned int reg_offset = offset & 0xfff; + unsigned int pin = offset >> 12; + unsigned int data = *(u32 *) val; + struct irq_remap_table *irt = vcpu->kvm->arch.irt; + + if (!ioepic_in_range(ioepic, addr)) + return -EOPNOTSUPP; + + ASSERT(len == 4); /* 4 bytes access */ + + /* In case of passthrough device, write directly to real IOEPIC */ + if (irt->enabled && pin == irt->guest_pin) { + unsigned int host_pin_offset = irt->host_pin << 12; + unsigned int node = irt->host_node; + struct iohub_sysdata *sd = irt->vfio_dev->bus->sysdata; + + E2K_LMS_HALT_ERROR(1); + pr_err("%s(): error: IOEPIC passthrough page not mapped\n", + __func__); + + switch (reg_offset) { + case IOEPIC_TABLE_ADDR_HIGH(0): + if (data != sd->pci_msi_addr_hi) { + pr_err("kvm_ioepic: guest's RT_MSI_HI (0x%x) does not match host's RT_MSI_HI (0x%x)\n", + data, sd->pci_msi_addr_hi); + data = sd->pci_msi_addr_hi; + } + break; + case IOEPIC_TABLE_ADDR_LOW(0): + if (data != sd->pci_msi_addr_lo) { + pr_err("kvm_ioepic: guest's RT_MSI_LO (0x%x) does not match host's RT_MSI_LO (0x%x)\n", + data, sd->pci_msi_addr_lo); + data = sd->pci_msi_addr_lo; + } + break; + } + + ioepic_debug("passthrough ioepic write offset %x val %x\n", + offset, data); + io_epic_write(node, host_pin_offset + reg_offset, data); + return 0; + } + + ioepic_debug("%s offset %x val %x\n", __func__, offset, data); + + mutex_lock(&ioepic->lock); + switch (reg_offset) { + case IOEPIC_ID: + ioepic->id = data; + break; + case IOEPIC_VERSION: + break; + case IOEPIC_TABLE_INT_CTRL(0): + ioepic_write_int_ctrl(ioepic, pin, data); + break; + case IOEPIC_TABLE_MSG_DATA(0): + ioepic->redirtbl[pin].msg_data.raw = data; + break; + case IOEPIC_TABLE_ADDR_HIGH(0): + ioepic->redirtbl[pin].addr_high = data; + break; + case IOEPIC_TABLE_ADDR_LOW(0): + ioepic->redirtbl[pin].addr_low.raw = data; + break; + default: + if (!(reg_offset >= IOEPIC_INT_RID(0) && + reg_offset < IOEPIC_INT_RID(IOEPIC_NUM_PINS))) + ioepic_debug("unknown ioepic reg 0x%x\n", offset); + break; + } + mutex_unlock(&ioepic->lock); + + return 0; +} + +void kvm_ioepic_reset(struct kvm_ioepic *ioepic) +{ + int i; + + for (i = 0; i < IOEPIC_NUM_PINS; i++) + ioepic->redirtbl[i].int_ctrl.bits.mask = 1; + + ioepic->base_address = IOEPIC_DEFAULT_BASE_ADDRESS; + ioepic->id = 0; +} + +static const struct kvm_io_device_ops ioepic_mmio_ops = { + .read = ioepic_mmio_read, + .write = ioepic_mmio_write, +}; + +int kvm_ioepic_init(struct kvm *kvm) +{ + struct kvm_ioepic *ioepic; + int i; + int ret = -EPERM; + + for (i = 0; i < kvm->arch.num_numa_nodes; i++) { + ioepic = kzalloc(sizeof(struct kvm_ioepic), GFP_KERNEL); + if (!ioepic) { + ret = -ENOMEM; + goto out; + } + mutex_init(&ioepic->lock); + kvm->arch.ioepic[i] = ioepic; + kvm_ioepic_reset(ioepic); + kvm_iodevice_init(&ioepic->dev, &ioepic_mmio_ops); + ioepic->kvm = kvm; + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, + ioepic->base_address, IOEPIC_MEM_LENGTH, &ioepic->dev); +out: if (ret < 0) { +again: kfree(kvm->arch.ioepic[i]); + kvm->arch.ioepic[i] = NULL; + if (i) { + i--; + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, + &kvm->arch.ioepic[i]->dev); + goto again; + } + return ret; + } + } + + return ret; +} + +void kvm_ioepic_destroy(struct kvm *kvm) +{ + int i; + + for (i = 0; i < kvm->arch.num_numa_nodes; i++) { + struct kvm_ioepic *ioepic = kvm->arch.ioepic[i]; + + if (!ioepic) + return; + + mutex_lock(&kvm->slots_lock); + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioepic->dev); + mutex_unlock(&kvm->slots_lock); + kvm->arch.ioepic[i] = NULL; + kfree(ioepic); + } +} + +int kvm_ioepic_set_base(struct kvm *kvm, unsigned long new_base, int node_id) +{ + struct kvm_ioepic *ioepic; + int ret; + struct irq_remap_table *irt = kvm->arch.irt; + + ioepic = kvm->arch.ioepic[node_id]; + + if (!ioepic) { + pr_err("%s(): IOEPIC is not yet created, ignore setup\n", + __func__); + return -ENODEV; + } + if (ioepic->base_address == new_base) { + DebugIOEPIC("%s(): IOEPIC base 0x%lx is the same, " + "so ignore update\n", + __func__, new_base); + return 0; + } else if (new_base == 0xffffffff) { + DebugIOEPIC("%s(): ignore probing write to IOEPIC BAR\n", __func__); + return 0; + } + + mutex_lock(&kvm->slots_lock); + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioepic->dev); + ioepic->base_address = new_base; + kvm_iodevice_init(&ioepic->dev, &ioepic_mmio_ops); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, new_base, + IOEPIC_MEM_LENGTH, &ioepic->dev); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kvm->arch.ioepic[node_id] = NULL; + kfree(ioepic); + pr_err("%s(): could not register IOEPIC as MMIO bus device, " + "error %d\n", + __func__, ret); + + return ret; + } + + if (kvm->arch.is_hv && irt->enabled && irt->guest_node == node_id) { + gpa_t new_gpa = new_base + (irt->guest_pin << PAGE_SHIFT); + + if (irt->gpa) { + /* + * Not first time mapping this page, need to unmap first + */ + + gfn_t gfn = gpa_to_gfn(irt->gpa); + + /* This will request TLB flushes */ + direct_unmap_prefixed_mmio_gfn(kvm, gfn); + pr_info("%s(): Unmapping IOEPIC passthrough page GPA 0x%llx -> HPA 0x%llx\n", + __func__, irt->gpa, irt->hpa); + } + + /* + * Mapping is done on demand (nonpaging/tdp_page_fault) + * Prefetching is impossible, since we don't know, which + * VCPU changed base from VM IOCTL + */ + irt->gpa = new_gpa; + } + + return 0; +} + +/* KVM_GET/SET_IRQCHIP is not yet supported for IOEPIC */ + +#if 0 +int kvm_get_ioepic(struct kvm *kvm, struct kvm_ioepic_state *state) +{ + struct kvm_ioepic *ioepic = ioepic_irqchip(kvm); + + if (!ioepic) + return -EINVAL; + + mutex_lock(&ioepic->lock); + memcpy(state, ioepic, sizeof(struct kvm_ioepic_state)); + mutex_unlock(&ioepic->lock); + return 0; +} + +int kvm_set_ioepic(struct kvm *kvm, struct kvm_ioepic_state *state) +{ + struct kvm_ioepic *ioepic = ioepic_irqchip(kvm); + + if (!ioepic) + return -EINVAL; + + mutex_lock(&ioepic->lock); + memcpy(ioepic, state, sizeof(struct kvm_ioepic_state)); + mutex_unlock(&ioepic->lock); + return 0; +} +#endif diff --git a/arch/e2k/kvm/ioepic.h b/arch/e2k/kvm/ioepic.h new file mode 100644 index 000000000000..bbb75d1b31b4 --- /dev/null +++ b/arch/e2k/kvm/ioepic.h @@ -0,0 +1,65 @@ +#ifndef __KVM_IO_EPIC_H +#define __KVM_IO_EPIC_H + +#include +#include +#include +#include +#include "cepic.h" + +#define IOEPIC_DEFAULT_BASE_ADDRESS 0xfec00000 +#define IOEPIC_MEM_LENGTH 0x100000 +#define IOEPIC_NUM_PINS KVM_IOEPIC_NUM_PINS + +struct kvm_ioepic { + u64 base_address; + u32 id; + u32 irr; + struct IO_EPIC_route_entry redirtbl[IOEPIC_NUM_PINS]; + unsigned long irq_states[IOEPIC_NUM_PINS]; + struct kvm_io_device dev; + struct kvm *kvm; + void (*ack_notifier)(void *opaque, int irq); + struct mutex lock; +}; + +#undef ASSERT +#ifdef DEBUG +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + pr_emerg("assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else +#define ASSERT(x) do { } while (0) +#endif + +static inline struct kvm_ioepic *ioepic_irqchip(struct kvm *kvm, int node_id) +{ + return kvm->arch.ioepic[node_id]; +} + +static inline int ioepic_in_kernel(struct kvm *kvm) +{ + int ret; + + ret = (ioepic_irqchip(kvm, 0) != NULL); + return ret; +} + +int kvm_epic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); +void kvm_ioepic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode); +int kvm_ioepic_init(struct kvm *kvm); +void kvm_ioepic_destroy(struct kvm *kvm); +int kvm_ioepic_set_base(struct kvm *kvm, unsigned long new_base, int node_id); +int kvm_ioepic_set_irq(struct kvm_ioepic *ioepic, int irq, int level); +void kvm_ioepic_reset(struct kvm_ioepic *ioepic); +void kvm_make_scan_ioepic_request(struct kvm *kvm); +#if 0 +int kvm_get_ioepic(struct kvm *kvm, struct kvm_ioepic_state *state); +int kvm_set_ioepic(struct kvm *kvm, struct kvm_ioepic_state *state); +#endif +#endif diff --git a/arch/e2k/kvm/irq.h b/arch/e2k/kvm/irq.h new file mode 100644 index 000000000000..23d24b753c53 --- /dev/null +++ b/arch/e2k/kvm/irq.h @@ -0,0 +1,268 @@ +/* + * irq.h: In-kernel interrupt controller related definitions + * Copyright (c) 2011, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IRQ_H +#define __IRQ_H + +#include + +#include + +#include "cpu_defs.h" +#include "pic.h" + +#undef DEBUG_KVM_VIRQs_MODE +#undef DebugVIRQs +#define DEBUG_KVM_VIRQs_MODE 0 /* VIRQs debugging */ +#define DebugVIRQs(fmt, args...) \ +({ \ + if (DEBUG_KVM_VIRQs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* guest kernel thread can have not time to handle previous timer interrupt */ +/* so need restart hrtimer on some small addition */ +#define GUEST_RESTART_TIME_NS 1000000 /* 1 mseck addition */ + +static inline int irqchip_in_kernel(struct kvm *kvm) +{ + return 1; +} + +static inline int lapic_in_kernel(struct kvm_vcpu *vcpu) +{ + /* Same as irqchip_in_kernel(vcpu->kvm), but with less + * pointer chasing and no unnecessary memory barriers. + */ + return vcpu->arch.apic != NULL; +} + +/* + * Basic functions to access to VIRQs state structure on host + * (see asm/kvm/guest.h) + */ +static inline kvm_virqs_state_t * +kvm_get_guest_virqs_state(struct kvm_vcpu *vcpu) +{ + return &vcpu->arch.kmap_vcpu_state->virqs; +} + +static inline atomic_t * +kvm_get_guest_timer_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_virqs_state_t *virqs = kvm_get_guest_virqs_state(vcpu); + + return &virqs->timer_virqs_num; +} +static inline atomic_t * +kvm_get_guest_hvc_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_virqs_state_t *virqs = kvm_get_guest_virqs_state(vcpu); + + return &virqs->hvc_virqs_num; +} + +static inline atomic_t * +kvm_get_guest_virqs_atomic_counter(struct kvm_vcpu *vcpu, int virq_id) +{ + switch (virq_id) { + case KVM_VIRQ_TIMER: + return kvm_get_guest_timer_virqs_num(vcpu); + case KVM_VIRQ_HVC: + return kvm_get_guest_hvc_virqs_num(vcpu); + case KVM_VIRQ_LAPIC: + return kvm_get_guest_lapic_virqs_num(vcpu); + case KVM_VIRQ_CEPIC: + return kvm_get_guest_cepic_virqs_num(vcpu); + default: + return ERR_PTR(-EINVAL); + } +} + +extern void kvm_init_clockdev(struct kvm_vcpu *vcpu); +extern void kvm_cancel_clockdev(struct kvm_vcpu *vcpu); + +extern void kvm_guest_set_clockevent(struct kvm_vcpu *vcpu, + unsigned long delta); + +extern int kvm_setup_default_irq_routing(struct kvm *kvm); + +extern pid_t kvm_guest_intr_handler(struct kvm_vcpu *vcpu, int irq, int virq_id, + irq_handler_t fn, void *arg); +extern int kvm_guest_intr_thread(int vcpu_id, int irq, int virq_id, + int gpid_nr, irq_thread_t fn, void *arg); +extern int kvm_guest_free_intr_handler(struct kvm *kvm, int irq, void *arg); +extern int kvm_get_guest_direct_virq(struct kvm_vcpu *vcpu, + int irq, int virq_id); +extern int kvm_free_guest_direct_virq(struct kvm *kvm, int irq); +extern int kvm_vcpu_interrupt(struct kvm_vcpu *vcpu, int irq); +extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, int virq_id); +extern int kvm_guest_wait_for_virq(struct kvm *kvm, int irq, bool in_progress); +extern void kvm_inject_lapic_virq(struct kvm_lapic *apic); +extern void kvm_inject_cepic_virq(struct kvm_cepic *epic); +extern void kvm_inject_nmi(struct kvm_vcpu *vcpu); +extern enum hrtimer_restart kvm_apic_timer_fn(struct hrtimer *data); +extern enum hrtimer_restart kvm_epic_timer_fn(struct hrtimer *data); +extern int kvm_find_pending_virqs(struct kvm_vcpu *vcpu, + bool inject, bool wakeup); +extern int kvm_dec_vcpu_pending_virq(struct kvm_vcpu *vcpu, int virq_no); + +static inline int kvm_wake_up_pending_virqs(struct kvm_vcpu *vcpu) +{ + return kvm_find_pending_virqs(vcpu, false, true); +} +static inline int kvm_get_pending_virqs_num(struct kvm_vcpu *vcpu) +{ + return kvm_find_pending_virqs(vcpu, false, false); +} +static inline bool kvm_is_handling_vcpu_virqs(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.on_virqs_handling; +} +static inline void kvm_set_handling_vcpu_virqs(struct kvm_vcpu *vcpu) +{ + vcpu->arch.on_virqs_handling = true; +} +static inline void kvm_clear_handling_vcpu_virqs(struct kvm_vcpu *vcpu) +{ + vcpu->arch.on_virqs_handling = false; +} +static inline bool kvm_test_hw_stack_bounds_waiting(struct kvm_vcpu *vcpu, + thread_info_t *ti) +{ + if (likely(vcpu->arch.is_hv)) { + return false; + } + return test_guest_hw_stack_bounds_waiting(ti, + exc_proc_stack_bounds_mask | + exc_chain_stack_bounds_mask); +} + +/* kvm->arch.virq_lock should be locked by caller */ +static inline bool +kvm_has_virqs_to_guest(struct kvm_vcpu *vcpu) +{ + int virqs_num; + bool has_pending_virqs; + + virqs_num = kvm_wake_up_pending_virqs(vcpu); + has_pending_virqs = kvm_test_pending_virqs(vcpu); + DebugVIRQs("on VCPU #%d, pending flag %d VIRQs number is %d\n", + vcpu->vcpu_id, has_pending_virqs, virqs_num); + if (!has_pending_virqs && virqs_num == 0) { + /* none VIRQs and none pending VIRQs flag */ + /* so nothing to pass */ + return false; + } else if (!has_pending_virqs && virqs_num != 0) { + /* there are VIRQs and none pending VIRQs flag */ + /* Do not pass new interrupt because of old interrupt is */ + /* in progress */ + return false; + } else if (has_pending_virqs && virqs_num == 0) { + /* none VIRQS and there is pending VIRQs flag */ + /* it can be if host want pass new interrupt, but guest */ + /* is now handling old interrupt and see already new VIRQ */ + /* so do not pass new interrupt, guest should handle old, */ + /* host should clear pending VIRQs flag */ + BUG_ON(!kvm_test_and_clear_pending_virqs(vcpu)); + kvm_clear_virqs_injected(vcpu); + return false; + } else if (has_pending_virqs && virqs_num != 0) { + /* there are VIRQs and there are pending VIRQs flag */ + /* So it need pass new interrupt */ + ; + } else { + /* unknown and impossible case */ + WARN_ON(true); + } + return true; +} + +#ifdef CONFIG_DIRECT_VIRQ_INJECTION +/* + * Inject 'last wish' to PSR to cauuse trap after return on guest kernel + * This trap needs to inject one more trap (interrupt on pending VIRQs) + * to implemet direct ijection of interrupts on guest VCPU thread. + * FIXME: 'Last with' method is too costly, need implement direct call + * of guest trap handling, similar to deferred traps. + */ +static __always_inline bool +kvm_test_inject_direct_guest_virqs(struct kvm_vcpu *vcpu, + struct thread_info *ti, unsigned long upsr, unsigned long psr) +{ + if (!kvm_test_pending_virqs(vcpu) && + !kvm_test_hw_stack_bounds_waiting(vcpu, ti)) + return false; + if (kvm_guest_vcpu_irqs_disabled(vcpu, upsr, psr)) { + /* guest IRQs is now disabled, so it cannot pass interrupts */ + /* right now, delay while appropriate case */ + return false; + } + if (!kvm_vcpu_is_epic(vcpu) && !kvm_check_lapic_priority(vcpu)) { + /* do not inject an interrupt with a lower priority */ + return false; + } + + return true; +} +static __always_inline bool +kvm_try_inject_direct_guest_virqs(struct kvm_vcpu *vcpu, struct thread_info *ti, + unsigned long upsr, unsigned long psr) +{ + if (!kvm_test_inject_direct_guest_virqs(vcpu, ti, upsr, psr)) + /* there is not VIRQs to inject */ + return false; + + if (kvm_test_virqs_injected(vcpu)) { + /* already injected */ + return false; + } + + BUG_ON(vcpu->arch.virq_wish); + vcpu->arch.virq_wish = true; + return true; +} +static __always_inline int +kvm_guest_handled_virqs(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.virq_injected) + /* host did not inject any interrupt */ + return 0; + vcpu->arch.virq_injected = false; + return 0; +} +#else /* ! CONFIG_DIRECT_VIRQ_INJECTION */ +static __always_inline bool +kvm_test_inject_direct_guest_virqs(struct kvm_vcpu *vcpu, + struct thread_info *ti, unsigned long upsr, unsigned long psr) +{ + return false; +} +static __always_inline bool +kvm_try_inject_direct_guest_virqs(struct kvm_vcpu *vcpu, + struct thread_info *ti, unsigned long upsr, unsigned long psr) +{ + return false; +} +static __always_inline int +kvm_guest_handled_virqs(struct kvm_vcpu *vcpu) +{ + return 0; +} +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + +extern void kvm_free_all_VIRQs(struct kvm *kvm); + +#endif /* __IRQ_H */ + diff --git a/arch/e2k/kvm/irq_comm.c b/arch/e2k/kvm/irq_comm.c new file mode 100644 index 000000000000..2c7992ddf8de --- /dev/null +++ b/arch/e2k/kvm/irq_comm.c @@ -0,0 +1,1021 @@ +/* + * irq_comm.c: Common API for in kernel interrupt controller + * Copyright (c) 2007, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * Authors: + * Yaozu (Eddie) Dong + * + */ + +#include +#include +#include + +#include +#include +#ifdef CONFIG_IA64 +#include +#endif + +#include "irq.h" +#include "pic.h" +#include "ioapic.h" +#include "ioepic.h" + +#undef DEBUG_IRQ_DELIVER_MODE +#undef DebugIRQ +#define DEBUG_IRQ_DELIVER_MODE 0 /* IRQ deliver debugging */ +#define DebugIRQ(fmt, args...) \ +({ \ + if (DEBUG_IRQ_DELIVER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static inline int kvm_irq_line_state(unsigned long *irq_state, + int irq_source_id, int level) +{ + /* Logical OR for level trig interrupt */ + if (level) + set_bit(irq_source_id, irq_state); + else + clear_bit(irq_source_id, irq_state); + + return !!(*irq_state); +} + +static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status) +{ +#ifdef CONFIG_X86 + struct kvm_pic *pic = pic_irqchip(kvm); + level = kvm_irq_line_state(&pic->irq_states[e->irqchip.pin], + irq_source_id, level); + return kvm_pic_set_irq(pic, e->irqchip.pin, level); +#else + return -1; +#endif +} + +static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status) +{ + struct kvm_ioapic *ioapic = kvm->arch.vioapic[e->gsi / 64]; + level = kvm_irq_line_state(&ioapic->irq_states[e->irqchip.pin], + irq_source_id, level); + + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, level); +} + +static int kvm_set_ioepic_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status) +{ + struct kvm_ioepic *ioepic = kvm->arch.ioepic[e->gsi / 64]; + + level = kvm_irq_line_state(&ioepic->irq_states[e->irqchip.pin], + irq_source_id, level); + + return kvm_ioepic_set_irq(ioepic, e->irqchip.pin, level); +} + +inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) +{ +#ifdef CONFIG_IA64 + return irq->delivery_mode == + (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT); +#else + return irq->delivery_mode == APIC_DM_LOWEST; +#endif +} + + +static u32 convert_apic_to_epic_dlvm(u32 apic_dlvm) +{ + u32 epic_dlvm; + + switch (apic_dlvm) { + case APIC_DM_LOWEST: + case APIC_DM_FIXED: + case APIC_DM_EXTINT: + epic_dlvm = CEPIC_ICR_DLVM_FIXED_EXT; + break; + case APIC_DM_SMI: + epic_dlvm = CEPIC_ICR_DLVM_SMI; + break; + case APIC_DM_NMI: + epic_dlvm = CEPIC_ICR_DLVM_NMI; + break; + case APIC_DM_INIT: + epic_dlvm = CEPIC_ICR_DLVM_INIT; + break; + case APIC_DM_STARTUP: + epic_dlvm = CEPIC_ICR_DLVM_STARTUP; + break; + case APIC_DM_REMRD: + default: + pr_err("Unsupported delivery mode %x\n", apic_dlvm); + epic_dlvm = CEPIC_ICR_DLVM_FIXED_EXT; + break; + } + + return epic_dlvm; +} + +static u32 convert_apic_to_epic_shorthand(u32 apic_shorthand) +{ + u32 epic_shorthand; + + switch (apic_shorthand) { + case APIC_DEST_NOSHORT: + epic_shorthand = CEPIC_ICR_DST_FULL; + break; + case APIC_DEST_SELF: + epic_shorthand = CEPIC_ICR_DST_SELF; + break; + case APIC_DEST_ALLINC: + epic_shorthand = CEPIC_ICR_DST_ALLINC; + break; + case APIC_DEST_ALLBUT: + epic_shorthand = CEPIC_ICR_DST_ALLBUT; + break; + default: + pr_err("Bad dest shorthand value %x\n", apic_shorthand); + epic_shorthand = CEPIC_ICR_DST_FULL; + break; + } + + return epic_shorthand; +} + + +int kvm_irq_delivery_to_hw_apic(struct kvm *kvm, struct kvm_lapic *src, + struct kvm_lapic_irq *irq_apic) +{ + struct kvm_cepic_irq irq_epic; + int src_id = 0; + + irq_epic.dest_id = irq_apic->dest_id; + irq_epic.vector = irq_apic->vector; + irq_epic.trig_mode = irq_apic->trig_mode; + irq_epic.delivery_mode = + convert_apic_to_epic_dlvm(irq_apic->delivery_mode); + irq_epic.shorthand = + convert_apic_to_epic_shorthand(irq_apic->shorthand); + if (src) + src_id = kvm_vcpu_to_full_cepic_id(src->vcpu); + else + if (irq_epic.shorthand == CEPIC_ICR_DST_ALLBUT || + irq_epic.shorthand == CEPIC_ICR_DST_SELF) + pr_err("%s(): Unknown source for vector 0x%x\n", + __func__, irq_epic.vector); + + return kvm_irq_delivery_to_hw_epic(kvm, src_id, &irq_epic); +} + +int kvm_irq_delivery_to_sw_apic(struct kvm *kvm, struct kvm_lapic *src, + struct kvm_lapic_irq *irq) +{ + int i, r = -1; + struct kvm_vcpu *vcpu, *lowest = NULL; + + if (irq->dest_mode == 0 && irq->dest_id == 0xff && + kvm_is_dm_lowest_prio(irq)) + printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!kvm_apic_present(vcpu)) + continue; + + if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, + irq->dest_id, irq->dest_mode)) + continue; + + if (!kvm_is_dm_lowest_prio(irq)) { + if (r < 0) + r = 0; + r += kvm_apic_set_irq(vcpu, irq); + } else { + if (!lowest) + lowest = vcpu; + else if (kvm_apic_compare_prio(vcpu, lowest) < 0) + lowest = vcpu; + } + } + + if (lowest) + r = kvm_apic_set_irq(lowest, irq); + + return r; +} + +/* VCPU is not running now. Set bit in the PMIRR copy in hw context */ +static int kvm_hw_epic_set_irq_vector(struct kvm_vcpu *vcpu, unsigned int vector) +{ + if (vector >= CEPIC_PMIRR_NR_BITS || vector == 0) { + pr_err("Error: Invalid EPIC vector value %u\n", vector); + return -1; + } + + if (unlikely(epic_bgi_mode)) { + vcpu->arch.hw_ctxt.cepic->pmirr_byte[vector] = 1; + } else { + unsigned int epic_pmirr = vector >> 6; + atomic64_or(BIT_ULL_MASK(vector & 0x3f), + &vcpu->arch.hw_ctxt.cepic->pmirr[epic_pmirr]); + } + + return 1; +} + +/* VCPU is not running now. Set bit in the PNMIRR copy in hw context */ +static int kvm_hw_epic_set_smi(struct kvm_vcpu *vcpu) +{ + union cepic_pnmirr reg; + + reg.raw = 0; + reg.bits.smi = 1; + + atomic_or(reg.raw, &vcpu->arch.hw_ctxt.cepic->pnmirr); + + return 1; +} + +static int kvm_hw_epic_set_nm_special(struct kvm_vcpu *vcpu) +{ + union cepic_pnmirr reg; + + reg.raw = 0; + reg.bits.nm_special = 1; + + atomic_or(reg.raw, &vcpu->arch.hw_ctxt.cepic->pnmirr); + + return 1; +} + +static int kvm_hw_epic_set_nmi(struct kvm_vcpu *vcpu) +{ + union cepic_pnmirr reg; + + reg.raw = 0; + reg.bits.nmi = 1; + + atomic_or(reg.raw, &vcpu->arch.hw_ctxt.cepic->pnmirr); + + return 1; +} + +static int kvm_hw_epic_set_init(struct kvm_vcpu *vcpu) +{ + union cepic_pnmirr reg; + + reg.raw = 0; + reg.bits.init = 1; + + atomic_or(reg.raw, &vcpu->arch.hw_ctxt.cepic->pnmirr); + + return 1; +} + +static int kvm_hw_epic_set_startup(struct kvm_vcpu *vcpu, unsigned int vector) +{ + union cepic_pnmirr reg; + + reg.raw = 0; + reg.bits.startup = 1; + reg.bits.startup_entry = vector & CEPIC_PNMIRR_STARTUP_ENTRY; + + atomic_or(reg.raw, &vcpu->arch.hw_ctxt.cepic->pnmirr); + + return 1; +} + +/* + * Lintel uses: + * - SMI for devices, hidden from x86 + * - NM-special for broadcast IPI + * - INIT and STARTUP for waking up a secondary CPU + */ +int kvm_hw_epic_deliver_to_pirr(struct kvm_vcpu *vcpu, unsigned int vector, + u8 dlvm) +{ + switch (dlvm) { + case CEPIC_ICR_DLVM_FIXED_EXT: + case CEPIC_ICR_DLVM_FIXED_IPI: + return kvm_hw_epic_set_irq_vector(vcpu, vector); + case CEPIC_ICR_DLVM_SMI: + return kvm_hw_epic_set_smi(vcpu); + case CEPIC_ICR_DLVM_NM_SPECIAL: + return kvm_hw_epic_set_nm_special(vcpu); + case CEPIC_ICR_DLVM_NMI: + return kvm_hw_epic_set_nmi(vcpu); + case CEPIC_ICR_DLVM_INIT: + return kvm_hw_epic_set_init(vcpu); + case CEPIC_ICR_DLVM_STARTUP: + return kvm_hw_epic_set_startup(vcpu, vector); + default: + pr_err("IOEPIC: unsupported dlvm 0x%x (vect 0x%x)\n", dlvm, + vector); + return -1; + } +} + +u32 kvm_vcpu_to_full_cepic_id(const struct kvm_vcpu *vcpu) +{ + union cepic_id epic_reg_id; + + epic_reg_id.raw = 0; + + if (!kvm_vcpu_is_epic(vcpu) && kvm_vcpu_is_hw_apic(vcpu)) { + epic_reg_id.bits.cepicn = vcpu->arch.hard_cpu_id; + } else { + epic_reg_id.bits.cepicn = vcpu->vcpu_id / + vcpu->kvm->arch.num_numa_nodes; + epic_reg_id.bits.prepicn = vcpu->arch.node_id; + } + + return epic_reg_id.raw; +} + +/* VCPU is running now. Send an interrupt to guest through host's ICR */ +int kvm_hw_epic_deliver_to_icr(struct kvm_vcpu *vcpu, unsigned int vector, + u8 dlvm) +{ + union cepic_icr reg; + + /* + * Wait if other IPI is currently being delivered + */ + epic_wait_icr_idle(); + + /* + * Set destination in CEPIC_ICR2 + */ + reg.raw = 0; + reg.bits.dst = kvm_vcpu_to_full_cepic_id(vcpu); + reg.bits.gst_id = vcpu->kvm->arch.vmid.nr; + reg.bits.dst_sh = CEPIC_ICR_DST_FULL; + reg.bits.dlvm = dlvm; + reg.bits.vect = vector; + + /* + * Send the guest interrupt + */ + epic_write_d(CEPIC_ICR, reg.raw); + + return 1; +} + +int kvm_epic_match_dest(int cepic_id, int src, int short_hand, int dest) +{ + int result = 0; + + DebugIRQ("cepic_id 0x%x, src 0x%x, dest 0x%x, short_hand 0x%x\n", + cepic_id, src, dest, short_hand); + + switch (short_hand) { + case CEPIC_ICR_DST_FULL: + result = cepic_id == dest; + break; + case CEPIC_ICR_DST_SELF: + result = cepic_id == src; + break; + case CEPIC_ICR_DST_ALLBUT: + result = cepic_id != src; + break; + case CEPIC_ICR_DST_ALLINC: + result = 1; + break; + default: + pr_warn("Bad dest shorthand value %x\n", short_hand); + break; + } + + return result; +} + +static void kvm_wake_up_irq(struct kvm_vcpu *vcpu) +{ + /* There is no need to kick the target vcpu into hypervisor mode: + * - if it is running in guest mode then hardware EPIC support will + * deliver the interrupt directly to guest's EPIC and trigger + * interrupt (kernel mode) in guest; + * - if it is running in QEMU mode/preempted or halted then + * kvm_vcpu_wake_up() will correspondingly either do nothing + * or unhalt it. */ + kvm_vcpu_wake_up(vcpu); +} + +//TODO fix this and all other delivery functions to return 0 on success and proper errno on error +static int kvm_irq_delivery_to_hw_epic_single(struct kvm_vcpu *vcpu, + const struct kvm_cepic_irq *irq) +{ + unsigned long flags; + bool dat_active; + int ret; + + raw_spin_lock_irqsave(&vcpu->arch.epic_dat_lock, flags); + dat_active = vcpu->arch.epic_dat_active; + trace_irq_delivery(irq->vector, irq->delivery_mode, + vcpu->vcpu_id, vcpu->arch.epic_dat_active); + + if (dat_active) { + ret = kvm_hw_epic_deliver_to_icr(vcpu, + irq->vector, irq->delivery_mode); + /* + * Although kvm_irq_delivery_*() functions do set the + * required condition for the target VCPU wake up + * (either in P[N]MIRR in memory or in registers), + * there might be a race if we do not wait for ICR.stat: + * + * VCPU0 VCPU1 + * -------------------------------------------------------- + * DAT is active + * Sees that epic_dat_active() + * is true and calls + * kvm_hw_epic_deliver_to_icr() + * + * Sends an IPI through ICR, + * it hits in DAT and sends + * message to target PREPIC + * invalidates DAT in all + * PREPICs (while IPI is + * still in flight) + * + * Checks for pending + * interrupts in + * kvm_arch_vcpu_runnable() + * + * Goes to sleep + * IPI finally arrives at + * target PREPIC and sets + * corresponding bit in + * memory PMIRR + * + * In the end VCPU0 is sleeping and does not know + * about the pending IPI. + */ + epic_wait_icr_idle(); + } else { + ret = kvm_hw_epic_deliver_to_pirr(vcpu, + irq->vector, irq->delivery_mode); + } + raw_spin_unlock_irqrestore(&vcpu->arch.epic_dat_lock, flags); + + if (ret == 1) { + /* In [dat_active] case the target vcpu will see + * the interrupt in kvm_vcpu_check_block() (see + * comment before kvm_arch_vcpu_blocking()). */ + if (!dat_active) + kvm_wake_up_irq(vcpu); + } + + return ret; +} + +int kvm_irq_delivery_to_hw_epic(struct kvm *kvm, int src, + const struct kvm_cepic_irq *irq) +{ + struct kvm_vcpu *vcpu; + bool delivered = false; + int i, cepic_id; + int shorthand = irq->shorthand, dest_id = irq->dest_id; + + kvm_for_each_vcpu(i, vcpu, kvm) { + cepic_id = kvm_vcpu_to_full_cepic_id(vcpu); + if (!kvm_epic_match_dest(cepic_id, src, shorthand, dest_id)) + continue; + + if (kvm_irq_delivery_to_hw_epic_single(vcpu, irq) == 1) { + delivered = true; + /* Stop if there is a single destination */ + if (shorthand == CEPIC_ICR_DST_FULL || + shorthand == CEPIC_ICR_DST_SELF) { + break; + } + } + } + + return delivered ? 1 : -1; +} + +int kvm_hw_epic_sysrq_deliver(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic_irq irq; + + irq.vector = SYSRQ_SHOWSTATE_EPIC_VECTOR; + irq.delivery_mode = CEPIC_ICR_DLVM_FIXED_EXT; + irq.trig_mode = 0; /* Edge */ + irq.shorthand = CEPIC_ICR_DST_FULL; + irq.dest_id = kvm_vcpu_to_full_cepic_id(vcpu); + + return kvm_irq_delivery_to_hw_epic(vcpu->kvm, 0, &irq); +} + +#ifdef CONFIG_KVM_ASYNC_PF + +int kvm_hw_epic_async_pf_wake_deliver(struct kvm_vcpu *vcpu) +{ + struct kvm_cepic_irq irq; + + irq.vector = vcpu->arch.apf.apf_ready_vector; + irq.delivery_mode = CEPIC_ICR_DLVM_FIXED_EXT; + irq.trig_mode = 0; + irq.shorthand = CEPIC_ICR_DST_FULL; + irq.dest_id = kvm_vcpu_to_full_cepic_id(vcpu); + + return kvm_irq_delivery_to_hw_epic(vcpu->kvm, vcpu->vcpu_id, &irq); +} + +#endif /* CONFIG_KVM_ASYNC_PF */ + +int kvm_irq_delivery_to_sw_epic(struct kvm *kvm, int src, + struct kvm_cepic_irq *irq) +{ + int i; + int cepic_id; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + cepic_id = kvm_epic_id(vcpu->arch.epic); + if (kvm_epic_match_dest(cepic_id, src, irq->shorthand, + irq->dest_id)) { + int ret = kvm_epic_set_irq(vcpu, irq); + + if (ret >= 0) { + kvm_wake_up_irq(vcpu); + } + return ret; + } + } + + return -1; +} + +void kvm_int_violat_delivery_to_hw_epic(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) + set_bit(CEPIC_PNMIRR_INT_VIOLAT_BIT, + (void *)&vcpu->arch.hw_ctxt.cepic->pnmirr); +} + +void kvm_deliver_cepic_epic_interrupt(void) +{ + struct kvm_cepic_irq irq; + union cepic_epic_int2 reg; + struct kvm *kvm; + u32 src = cepic_id_short_to_full(read_epic_id()); + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + reg.raw = epic_read_d(CEPIC_EPIC_INT2); + + if (WARN_ONCE(!vcpu, "vcpu is NULL inside CEPIC_EPIC_INT handler")) + return; + + kvm = vcpu->kvm; + if (WARN_ONCE(kvm->arch.vmid.nr != reg.bits.gst_id, + "Received CEPIC_EPIC_INT with bad gst_id %d\n", reg.bits.gst_id)) + return; + + irq.dest_id = reg.bits.gst_dst; + irq.vector = reg.bits.vect; + irq.trig_mode = 0; + irq.delivery_mode = reg.bits.dlvm; + irq.shorthand = reg.bits.dst_sh; + + kvm_irq_delivery_to_epic(kvm, src, &irq); +} + +int kvm_cpu_has_pending_apic_timer(struct kvm_vcpu *vcpu) +{ + if (lapic_in_kernel(vcpu)) + return apic_has_pending_timer(vcpu); + + return 0; +} + +/* This is called from pv_wait hcall (CEPIC DAT is active) */ +int kvm_cpu_has_pending_epic_timer(struct kvm_vcpu *vcpu) +{ + union cepic_cir reg_cir; + u64 pmirr; + + reg_cir.raw = epic_read_guest_w(CEPIC_CIR); + + if (!reg_cir.bits.stat) + return false; + + if (reg_cir.bits.vect == CEPIC_TIMER_VECTOR) + return true; + + pmirr = epic_read_guest_d(CEPIC_PMIRR + (CEPIC_TIMER_VECTOR >> 6) * 8); + + return !!(pmirr & (1ULL << (CEPIC_TIMER_VECTOR & 0x3f))); +} + +/* + * check if there are pending timer events + * to be processed. + */ +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return !vcpu->arch.hcall_irqs_disabled && kvm_cpu_has_pending_pic_timer(vcpu); + +} +EXPORT_SYMBOL(kvm_cpu_has_pending_timer); + +int kvm_set_apic_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, bool line_status) +{ + struct kvm_lapic_irq irq; + + irq.dest_id = (e->msi.address_lo & + MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + irq.vector = (e->msi.data & + MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; + irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo; + irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data; + irq.delivery_mode = e->msi.data & 0x700; + irq.level = 1; + irq.shorthand = 0; + + /* TODO Deal with RH bit of MSI message address */ + return kvm_irq_delivery_to_apic(kvm, NULL, &irq); +} + +int kvm_set_epic_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, bool line_status) +{ + struct kvm_cepic_irq irq; + union IO_EPIC_MSG_ADDR_LOW addr_low; + union IO_EPIC_MSG_DATA data; + + addr_low.raw = e->msi.address_lo; + data.raw = e->msi.data; + + irq.dest_id = addr_low.bits.dst; + irq.vector = data.bits.vector; + irq.delivery_mode = data.bits.dlvm; + irq.shorthand = 0; + + return kvm_irq_delivery_to_epic(kvm, 0, &irq); +} + +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, bool line_status) +{ + DebugIRQ("IRQ #%d level %d line status %d\n", + irq_source_id, level, line_status); + if (!level) + return -1; + + trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data); + + return kvm_set_pic_msi(e, kvm, irq_source_id, level, line_status); +} +EXPORT_SYMBOL(kvm_set_msi); + +int kvm_request_irq_source_id(struct kvm *kvm) +{ + unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; + int irq_source_id; + + mutex_lock(&kvm->irq_lock); + irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); + + if (irq_source_id >= BITS_PER_LONG) { + printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); + irq_source_id = -EFAULT; + goto unlock; + } + + ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); + set_bit(irq_source_id, bitmap); +unlock: + mutex_unlock(&kvm->irq_lock); + + return irq_source_id; +} + +#if 0 +void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) +{ + int i; + + ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); + + mutex_lock(&kvm->irq_lock); + if (irq_source_id < 0 || + irq_source_id >= BITS_PER_LONG) { + printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); + goto unlock; + } + clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); + if (!irqchip_in_kernel(kvm)) + goto unlock; + + for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) { + clear_bit(irq_source_id, &kvm->arch.vioapic->irq_states[i]); + if (i >= 16) + continue; +#ifdef CONFIG_X86 + clear_bit(irq_source_id, &pic_irqchip(kvm)->irq_states[i]); +#endif + } +unlock: + mutex_unlock(&kvm->irq_lock); +} +#endif + +void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, + struct kvm_irq_mask_notifier *kimn) +{ + mutex_lock(&kvm->irq_lock); + kimn->irq = irq; + hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list); + mutex_unlock(&kvm->irq_lock); +} + +void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, + struct kvm_irq_mask_notifier *kimn) +{ + mutex_lock(&kvm->irq_lock); + hlist_del_rcu(&kimn->link); + mutex_unlock(&kvm->irq_lock); + synchronize_rcu(); +} + +void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) +{ + struct kvm_irq_mask_notifier *kimn; + + rcu_read_lock(); + hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link) + if (kimn->irq == irq) + kimn->func(kimn, mask); + rcu_read_unlock(); +} + +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + int delta; + unsigned max_pin; + + DebugIRQ("started for entry type #%d gsi #%d\n", ue->type, ue->gsi); + switch (ue->type) { + case KVM_IRQ_ROUTING_IRQCHIP: + DebugIRQ("routing IRQCHIP\n"); + delta = 0; + switch (ue->u.irqchip.irqchip) { + case KVM_IRQCHIP_PIC_MASTER: + e->set = kvm_set_pic_irq; + max_pin = 16; + DebugIRQ("IRQCHIP is PIC master\n"); + break; + case KVM_IRQCHIP_PIC_SLAVE: + e->set = kvm_set_pic_irq; + max_pin = 16; + delta = 8; + DebugIRQ("IRQCHIP is PIC slave\n"); + break; + case KVM_IRQCHIP_IOAPIC: + max_pin = KVM_IOAPIC_NUM_PINS; + e->set = kvm_set_ioapic_irq; + DebugIRQ("IRQCHIP is IOAPIC pin #%d\n", + ue->u.irqchip.pin); + break; + case KVM_IRQCHIP_IOEPIC_NODE0: + max_pin = KVM_IOEPIC_NUM_PINS; + e->set = kvm_set_ioepic_irq; + DebugIRQ("IRQCHIP is IOEPIC_NODE0 pin #%d\n", + ue->u.irqchip.pin); + break; + case KVM_IRQCHIP_IOEPIC_NODE1: + max_pin = KVM_IOEPIC_NUM_PINS; + e->set = kvm_set_ioepic_irq; + DebugIRQ("IRQCHIP is IOEPIC_NODE1 pin #%d\n", + ue->u.irqchip.pin); + break; + case KVM_IRQCHIP_IOEPIC_NODE2: + max_pin = KVM_IOEPIC_NUM_PINS; + e->set = kvm_set_ioepic_irq; + DebugIRQ("IRQCHIP is IOEPIC_NODE2 pin #%d\n", + ue->u.irqchip.pin); + break; + case KVM_IRQCHIP_IOEPIC_NODE3: + max_pin = KVM_IOEPIC_NUM_PINS; + e->set = kvm_set_ioepic_irq; + DebugIRQ("IRQCHIP is IOEPIC_NODE3 pin #%d\n", + ue->u.irqchip.pin); + break; + default: + DebugIRQ("IRQCHIP is unknown\n"); + goto out; + } + e->irqchip.irqchip = ue->u.irqchip.irqchip; + e->irqchip.pin = ue->u.irqchip.pin + delta; + if (e->irqchip.pin >= max_pin) + goto out; + break; + case KVM_IRQ_ROUTING_MSI: + DebugIRQ("routing MSI\n"); + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + break; + default: + DebugIRQ("routing unknown\n"); + goto out; + } + + r = 0; +out: + return r; +} + +#define IOAPIC_ROUTING_ENTRY(irq) \ + { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ + .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } +#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) + +#ifdef CONFIG_X86 +# define PIC_ROUTING_ENTRY(irq) \ + { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ + .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 } +# define ROUTING_ENTRY2(irq) \ + IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) +#else +# define ROUTING_ENTRY2(irq) \ + IOAPIC_ROUTING_ENTRY(irq) +#endif + +#define IOAPIC_ROUTING_ENTRY_NODE_1(irq) \ + { .gsi = irq + 64, .type = KVM_IRQ_ROUTING_IRQCHIP, \ + .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } + +#define IOAPIC_ROUTING_ENTRY_NODE_2(irq) \ + { .gsi = irq + 128, .type = KVM_IRQ_ROUTING_IRQCHIP, \ + .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } + +#define IOAPIC_ROUTING_ENTRY_NODE_3(irq) \ + { .gsi = irq + 192, .type = KVM_IRQ_ROUTING_IRQCHIP, \ + .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } + +static const struct kvm_irq_routing_entry default_routing[] = { + ROUTING_ENTRY2(0), ROUTING_ENTRY2(1), + ROUTING_ENTRY2(2), ROUTING_ENTRY2(3), + ROUTING_ENTRY2(4), ROUTING_ENTRY2(5), + ROUTING_ENTRY2(6), ROUTING_ENTRY2(7), + ROUTING_ENTRY2(8), ROUTING_ENTRY2(9), + ROUTING_ENTRY2(10), ROUTING_ENTRY2(11), + ROUTING_ENTRY2(12), ROUTING_ENTRY2(13), + ROUTING_ENTRY2(14), ROUTING_ENTRY2(15), + ROUTING_ENTRY1(16), ROUTING_ENTRY1(17), + ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), + ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), + ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), +#ifdef CONFIG_IA64 + ROUTING_ENTRY1(24), ROUTING_ENTRY1(25), + ROUTING_ENTRY1(26), ROUTING_ENTRY1(27), + ROUTING_ENTRY1(28), ROUTING_ENTRY1(29), + ROUTING_ENTRY1(30), ROUTING_ENTRY1(31), + ROUTING_ENTRY1(32), ROUTING_ENTRY1(33), + ROUTING_ENTRY1(34), ROUTING_ENTRY1(35), + ROUTING_ENTRY1(36), ROUTING_ENTRY1(37), + ROUTING_ENTRY1(38), ROUTING_ENTRY1(39), + ROUTING_ENTRY1(40), ROUTING_ENTRY1(41), + ROUTING_ENTRY1(42), ROUTING_ENTRY1(43), + ROUTING_ENTRY1(44), ROUTING_ENTRY1(45), + ROUTING_ENTRY1(46), ROUTING_ENTRY1(47), +#endif +#ifdef CONFIG_E2K + IOAPIC_ROUTING_ENTRY_NODE_1(0), IOAPIC_ROUTING_ENTRY_NODE_1(1), + IOAPIC_ROUTING_ENTRY_NODE_1(2), IOAPIC_ROUTING_ENTRY_NODE_1(3), + IOAPIC_ROUTING_ENTRY_NODE_1(4), IOAPIC_ROUTING_ENTRY_NODE_1(5), + IOAPIC_ROUTING_ENTRY_NODE_1(6), IOAPIC_ROUTING_ENTRY_NODE_1(7), + IOAPIC_ROUTING_ENTRY_NODE_1(8), IOAPIC_ROUTING_ENTRY_NODE_1(9), + IOAPIC_ROUTING_ENTRY_NODE_1(10), IOAPIC_ROUTING_ENTRY_NODE_1(11), + IOAPIC_ROUTING_ENTRY_NODE_1(12), IOAPIC_ROUTING_ENTRY_NODE_1(13), + IOAPIC_ROUTING_ENTRY_NODE_1(14), IOAPIC_ROUTING_ENTRY_NODE_1(15), + IOAPIC_ROUTING_ENTRY_NODE_1(16), IOAPIC_ROUTING_ENTRY_NODE_1(17), + IOAPIC_ROUTING_ENTRY_NODE_1(18), IOAPIC_ROUTING_ENTRY_NODE_1(19), + IOAPIC_ROUTING_ENTRY_NODE_1(20), IOAPIC_ROUTING_ENTRY_NODE_1(21), + IOAPIC_ROUTING_ENTRY_NODE_1(22), IOAPIC_ROUTING_ENTRY_NODE_1(23), + + IOAPIC_ROUTING_ENTRY_NODE_2(0), IOAPIC_ROUTING_ENTRY_NODE_2(1), + IOAPIC_ROUTING_ENTRY_NODE_2(2), IOAPIC_ROUTING_ENTRY_NODE_2(3), + IOAPIC_ROUTING_ENTRY_NODE_2(4), IOAPIC_ROUTING_ENTRY_NODE_2(5), + IOAPIC_ROUTING_ENTRY_NODE_2(6), IOAPIC_ROUTING_ENTRY_NODE_2(7), + IOAPIC_ROUTING_ENTRY_NODE_2(8), IOAPIC_ROUTING_ENTRY_NODE_2(9), + IOAPIC_ROUTING_ENTRY_NODE_2(10), IOAPIC_ROUTING_ENTRY_NODE_2(11), + IOAPIC_ROUTING_ENTRY_NODE_2(12), IOAPIC_ROUTING_ENTRY_NODE_2(13), + IOAPIC_ROUTING_ENTRY_NODE_2(14), IOAPIC_ROUTING_ENTRY_NODE_2(15), + IOAPIC_ROUTING_ENTRY_NODE_2(16), IOAPIC_ROUTING_ENTRY_NODE_2(17), + IOAPIC_ROUTING_ENTRY_NODE_2(18), IOAPIC_ROUTING_ENTRY_NODE_2(19), + IOAPIC_ROUTING_ENTRY_NODE_2(20), IOAPIC_ROUTING_ENTRY_NODE_2(21), + IOAPIC_ROUTING_ENTRY_NODE_2(22), IOAPIC_ROUTING_ENTRY_NODE_2(23), + + IOAPIC_ROUTING_ENTRY_NODE_3(0), IOAPIC_ROUTING_ENTRY_NODE_3(1), + IOAPIC_ROUTING_ENTRY_NODE_3(2), IOAPIC_ROUTING_ENTRY_NODE_3(3), + IOAPIC_ROUTING_ENTRY_NODE_3(4), IOAPIC_ROUTING_ENTRY_NODE_3(5), + IOAPIC_ROUTING_ENTRY_NODE_3(6), IOAPIC_ROUTING_ENTRY_NODE_3(7), + IOAPIC_ROUTING_ENTRY_NODE_3(8), IOAPIC_ROUTING_ENTRY_NODE_3(9), + IOAPIC_ROUTING_ENTRY_NODE_3(10), IOAPIC_ROUTING_ENTRY_NODE_3(11), + IOAPIC_ROUTING_ENTRY_NODE_3(12), IOAPIC_ROUTING_ENTRY_NODE_3(13), + IOAPIC_ROUTING_ENTRY_NODE_3(14), IOAPIC_ROUTING_ENTRY_NODE_3(15), + IOAPIC_ROUTING_ENTRY_NODE_3(16), IOAPIC_ROUTING_ENTRY_NODE_3(17), + IOAPIC_ROUTING_ENTRY_NODE_3(18), IOAPIC_ROUTING_ENTRY_NODE_3(19), + IOAPIC_ROUTING_ENTRY_NODE_3(20), IOAPIC_ROUTING_ENTRY_NODE_3(21), + IOAPIC_ROUTING_ENTRY_NODE_3(22), IOAPIC_ROUTING_ENTRY_NODE_3(23), +#endif +}; + +int kvm_setup_apic_irq_routing(struct kvm *kvm) +{ + return kvm_set_irq_routing(kvm, default_routing, + ARRAY_SIZE(default_routing), 0); +} + +int kvm_setup_epic_irq_routing(struct kvm *kvm) +{ + int i, node, ret; + struct kvm_irq_routing_entry *default_routing_ioepic; + int nr_entries = KVM_IOEPIC_NUM_PINS * 4; + + default_routing_ioepic = vmalloc(sizeof(struct kvm_irq_routing_entry) * nr_entries); + + for (i = 0; i < nr_entries; i++) { + node = i / 64; + + default_routing_ioepic[i].gsi = i; + default_routing_ioepic[i].type = KVM_IRQ_ROUTING_IRQCHIP; + default_routing_ioepic[i].flags = 0; + default_routing_ioepic[i].u.irqchip.irqchip = KVM_IRQCHIP_IOEPIC_NODE0 + node; + default_routing_ioepic[i].u.irqchip.pin = i % 64; + } + + + ret = kvm_set_irq_routing(kvm, default_routing_ioepic, nr_entries, 0); + + vfree(default_routing_ioepic); + + return ret; +} + +void kvm_irq_routing_update_epic(struct kvm *kvm) +{ + if (ioepic_in_kernel(kvm) || !irqchip_in_kernel(kvm)) + return; + kvm_make_scan_ioepic_request(kvm); +} + +void kvm_post_irq_routing_update_epic(struct kvm *kvm) +{ + if (ioepic_in_kernel(kvm) || !irqchip_in_kernel(kvm)) + return; + kvm_make_scan_ioepic_request(kvm); +} + +void kvm_irq_routing_update_apic(struct kvm *kvm) +{ + if (ioapic_in_kernel(kvm) || !irqchip_in_kernel(kvm)) + return; + kvm_make_scan_ioapic_request(kvm); +} + +void kvm_post_irq_routing_update_apic(struct kvm *kvm) +{ + if (ioapic_in_kernel(kvm) || !irqchip_in_kernel(kvm)) + return; + kvm_make_scan_ioapic_request(kvm); +} + +void kvm_arch_irq_routing_update(struct kvm *kvm) +{ + kvm_irq_routing_update_pic(kvm); +} + +void kvm_arch_post_irq_routing_update(struct kvm *kvm) +{ + kvm_post_irq_routing_update_pic(kvm); +} diff --git a/arch/e2k/kvm/kvm-e2k.c b/arch/e2k/kvm/kvm-e2k.c new file mode 100644 index 000000000000..dcab6fdcf66b --- /dev/null +++ b/arch/e2k/kvm/kvm-e2k.c @@ -0,0 +1,4674 @@ +/* + * kvm_e2k.c: Basic KVM support On Elbrus series processors + * + * + * Copyright (C) 2011, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_KVM_HOST_MODE + +#define CREATE_TRACE_POINTS +#include +#include +#include +#undef CREATE_TRACE_POINTS + +#include "user_area.h" +#include "vmid.h" +#include "cpu.h" +#include "mmu.h" +#include "io.h" +#include "process.h" +#include "sic-nbsr.h" +#include "ioapic.h" +#include "pic.h" +#include "irq.h" +#include "time.h" +#include "lt.h" +#include "spmc.h" +#include "gaccess.h" +#include "gregs.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_UNIMPL_MODE +#undef DebugUNIMPL +#define DEBUG_KVM_UNIMPL_MODE 0 /* unimplemeneted features debugging */ +#define DebugUNIMPL(fmt, args...) \ +({ \ + if (DEBUG_KVM_UNIMPL_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_RUN_MODE +#undef DebugKVMRUN +#define DEBUG_KVM_RUN_MODE 0 /* run debugging */ +#define DebugKVMRUN(fmt, args...) \ +({ \ + if (DEBUG_KVM_RUN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_EXIT_REQ_MODE +#undef DebugKVMER +#define DEBUG_KVM_EXIT_REQ_MODE 0 /* exit request debugging */ +#define DebugKVMER(fmt, args...) \ +({ \ + if (DEBUG_KVM_EXIT_REQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_EXIT_REQ_MAIN_MODE +#undef DebugKVMERM +#define DEBUG_KVM_EXIT_REQ_MAIN_MODE 0 /* exit request verbose */ +#define DebugKVMERM(fmt, args...) \ +({ \ + if (DEBUG_KVM_EXIT_REQ_MAIN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PAGE_FAULT_MODE +#undef DebugKVMPF +#define DEBUG_KVM_PAGE_FAULT_MODE 0 /* page fault on KVM */ +#define DebugKVMPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PARAVIRT_FAULT_MODE +#undef DebugKVMPVF +#define DEBUG_KVM_PARAVIRT_FAULT_MODE 0 /* paravirt page fault on KVM */ +#define DebugKVMPVF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PARAVIRT_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PARAVIRT_PREFAULT_MODE +#undef DebugPVF +#define DEBUG_PARAVIRT_PREFAULT_MODE 0 /* paravirt page prefault */ +#define DebugPVF(fmt, args...) \ +({ \ + if (DEBUG_PARAVIRT_PREFAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IOCTL_MODE +#undef DebugKVMIOCTL +#define DEBUG_KVM_IOCTL_MODE 0 /* kernel IOCTL debug */ +#define DebugKVMIOCTL(fmt, args...) \ +({ \ + if (DEBUG_KVM_IOCTL_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IOC_MODE +#undef DebugKVMIOC +#define DEBUG_KVM_IOC_MODE 0 /* kernel IOCTL verbose debug */ +#define DebugKVMIOC(fmt, args...) \ +({ \ + if (DEBUG_KVM_IOC_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IO_MODE +#undef DebugKVMIO +#define DEBUG_KVM_IO_MODE 0 /* kernel virt machine IO debug */ +#define DebugKVMIO(fmt, args...) \ +({ \ + if (DEBUG_KVM_IO_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IRQ_MODE +#undef DebugKVMIRQ +#define DEBUG_KVM_IRQ_MODE 0 /* kernel virt machine IRQ debugging */ +#define DebugKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_HV_MODE +#undef DebugKVMHV +#define DEBUG_KVM_HV_MODE 0 /* hardware virtualized VM debugging */ +#define DebugKVMHV(fmt, args...) \ +({ \ + if (DEBUG_KVM_HV_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef VM_BUG_ON +#define VM_BUG_ON(cond) BUG_ON(cond) + +MODULE_AUTHOR("Elbrus MCST"); +MODULE_DESCRIPTION("E2K arch virtualization driver based on KVM"); +MODULE_LICENSE("GPL"); + +/* mask of available and supported by the hypervisor VM types */ +/* depends on hardware, CPU ISET, kernel & hypervisor configuration */ +unsigned int kvm_vm_types_available = 0; + +extern __read_mostly struct preempt_ops kvm_preempt_ops; + +static int kvm_arch_pv_vcpu_init(struct kvm_vcpu *vcpu); +static void kvm_arch_pv_vcpu_uninit(struct kvm_vcpu *vcpu); +static int kvm_arch_pv_vcpu_setup(struct kvm_vcpu *vcpu); +static int kvm_arch_hv_vcpu_init(struct kvm_vcpu *vcpu); +static void kvm_arch_hv_vcpu_uninit(struct kvm_vcpu *vcpu); +static int kvm_arch_hv_vcpu_setup(struct kvm_vcpu *vcpu); +static int kvm_arch_any_vcpu_init(struct kvm_vcpu *vcpu); +static void kvm_arch_any_vcpu_uninit(struct kvm_vcpu *vcpu); +static int kvm_arch_any_vcpu_setup(struct kvm_vcpu *vcpu); + +static user_area_t *kvm_find_memory_region(struct kvm *kvm, + int slot, e2k_addr_t address, e2k_size_t size, + kvm_guest_mem_type_t type); +static long kvm_arch_ioctl_alloc_guest_area(struct kvm *kvm, + kvm_guest_area_alloc_t __user *what); +static int gva_to_alias_slot(struct kvm *kvm, gva_t gva); +static int find_shadow_intersection(struct kvm *kvm, e2k_addr_t kernel_base, + gva_t shadow_base, e2k_size_t area_size); +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); +static void kvm_arch_vcpu_release(struct kvm_vcpu *vcpu); +static void free_vcpu_state(struct kvm_vcpu *vcpu); +static int kvm_create_host_info(struct kvm *kvm); +static void kvm_free_host_info(struct kvm *kvm); +static int init_guest_boot_cut(struct kvm_vcpu *vcpu); +static int init_guest_vcpu_state(struct kvm_vcpu *vcpu); +static void kvm_wake_up_all_other_vcpu_host(struct kvm_vcpu *my_vcpu); + +struct kvm_stats_debugfs_item debugfs_entries[] = { + /* TODO fill me */ + { NULL } +}; + +static bool kvm_is_guest_pv_vm(void) +{ + if (paravirt_enabled()) { + pr_err("KVM: paravirtualized guest cannot support nested VM\n"); + return true; + } + return false; +} + +#ifdef CONFIG_KVM_HW_VIRTUALIZATION +static bool kvm_cpu_has_hv_support(void) +{ + e2k_idr_t IDR; + + IDR = read_IDR_reg(); + if (!IDR.hw_virt || machine.native_iset_ver < E2K_ISET_V6) { + return false; + } + DebugKVM("CPUs have hardware virtualization extensions version %d\n", + IDR.IDR_ms_hw_virt_ver); + return true; +} + +static bool kvm_cpu_hv_disabled(void) +{ + if (READ_CU_HW0_REG().virt_dsbl) { + DebugKVM("CPUs hardware virtualization extensions " + "are disabled\n"); + return true; + } + return false; +} + +static bool kvm_is_guest_hv_vm(void) +{ + e2k_core_mode_t CORE_MODE; + + CORE_MODE.CORE_MODE_reg = READ_CORE_MODE_REG_VALUE(); + if (CORE_MODE.CORE_MODE_gmi) { + DebugKVM("KVM: it is hardware virtualized guest VM\n"); + return true; + } + return false; +} + +static bool kvm_is_hv_enable(void) +{ + if (!kvm_cpu_has_hv_support()) { + pr_err("KVM: no hardware virtualization extentions\n"); + return false; + } + if (kvm_cpu_hv_disabled()) { + pr_err("KVM: hardware virtualization extentions " + "are disabled\n"); + return false; + } + if (kvm_is_guest_hv_vm()) { + pr_err("KVM: hardware virtualized guest cannot " + "run nested VM\n"); + return false; + } + return true; +} + +/* Set up CEPIC_EPIC_INT (IPI delivery to inactive guest) */ +static void kvm_setup_cepic_epic_int(void) +{ + union cepic_epic_int epic_int; + union cepic_ctrl2 ctrl2; + + epic_int.raw = 0; + epic_int.bits.vect = CEPIC_EPIC_INT_VECTOR; + epic_write_w(CEPIC_EPIC_INT, epic_int.raw); + + /* Also enable automatic generation of CEPIC_EPIC_INT on + * _current_ vcpu when IPI misses in DAT (i.e. when the + * _target_ vcpu is not running) */ + ctrl2.raw = epic_read_w(CEPIC_CTRL2); + ctrl2.bits.int_hv = 1; + epic_write_w(CEPIC_CTRL2, ctrl2.raw); +} + +static cpumask_t kvm_e2k_hardware_enabled; + +static void prepic_set_virt_en(bool on) +{ + union prepic_ctrl2 reg_ctrl; + int node; + + reg_ctrl.raw = 0; + reg_ctrl.bits.virt_en = !!on; + if (epic_bgi_mode) + reg_ctrl.bits.bgi_mode = 1; + + for_each_online_node(node) + prepic_node_write_w(node, SIC_prepic_ctrl2, reg_ctrl.raw); + + DebugKVM("%s virtualization support in PREPIC. bgi_mode=%d\n", + (on) ? "Enabled" : "Disabled", epic_bgi_mode); +} + +static void kvm_hardware_virt_enable(void) +{ + e2k_core_mode_t CORE_MODE; + + /* set guest CORE_MODE register to allow of guest mode indicator */ + /* for guest kernels, so any VM software can see guest mode */ + CORE_MODE.CORE_MODE_reg = read_SH_CORE_MODE_reg_value(); + CORE_MODE.CORE_MODE_gmi = 1; + CORE_MODE.CORE_MODE_hci = 1; + write_SH_CORE_MODE_reg_value(CORE_MODE.CORE_MODE_reg); + + DebugKVM("KVM: CPU #%d: set guest CORE_MODE to indicate guest mode on any VMs\n", + raw_smp_processor_id()); + + if (cpu_has(CPU_FEAT_EPIC)) { + if (cpumask_empty(&kvm_e2k_hardware_enabled)) + prepic_set_virt_en(true); + + kvm_epic_timer_stop(true); + kvm_setup_cepic_epic_int(); + } + + cpumask_set_cpu(raw_smp_processor_id(), &kvm_e2k_hardware_enabled); +} + +static void kvm_hardware_virt_disable(void) +{ + cpumask_clear_cpu(raw_smp_processor_id(), &kvm_e2k_hardware_enabled); + + if (cpu_has(CPU_FEAT_EPIC) && cpumask_empty(&kvm_e2k_hardware_enabled)) + prepic_set_virt_en(false); +} +#else /* ! CONFIG_KVM_HW_VIRTUALIZATION */ +static bool kvm_is_hv_enable(void) +{ + pr_err("KVM: hardware virtualization mode is turned OFF at kernel config\n"); + return false; +} +static void kvm_hardware_virt_enable(void) +{ + pr_err("KVM: hardware virtualization mode is turned OFF at kernel config\n"); +} +static void kvm_hardware_virt_disable(void) +{ + pr_err("KVM: hardware virtualization mode is turned OFF at kernel config\n"); +} +#endif /* CONFIG_KVM_HW_VIRTUALIZATION */ + +#ifdef CONFIG_KVM_HW_PARAVIRTUALIZATION +static bool kvm_is_hw_pv_enable(void) +{ + if (!kvm_is_hv_enable()) + return false; + return true; +} + +#else /* ! CONFIG_KVM_HW_PARAVIRTUALIZATION */ +static bool kvm_is_hw_pv_enable(void) +{ + pr_err("KVM: hardware paravirtualization mode is turned OFF at " + "kernel config\n"); + return false; +} +#endif /* CONFIG_KVM_HW_PARAVIRTUALIZATION */ + +int kvm_arch_hardware_enable(void) +{ + DebugKVM("started\n"); + if (kvm_is_hv_vm_available() || kvm_is_hw_pv_vm_available()) + kvm_hardware_virt_enable(); + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + DebugKVM("started\n"); + if (kvm_is_hv_vm_available() || kvm_is_hw_pv_vm_available()) + kvm_hardware_virt_disable(); +} + +int kvm_arch_check_processor_compat(void) +{ + DebugKVM("started\n"); + + if (kvm_is_hv_vm_available() && !kvm_is_hv_enable()) { + pr_err("KVM: CPU #%d has not hardware virtualization support\n", + raw_smp_processor_id()); + atomic_clear_mask(KVM_E2K_HV_VM_TYPE_MASK, + &kvm_vm_types_available); + } + + if (kvm_is_hw_pv_vm_available() && !kvm_is_hw_pv_enable()) { + pr_err("KVM: CPU #%d has not hardware paravirtualization " + "support\n", + raw_smp_processor_id()); + atomic_clear_mask(KVM_E2K_HW_PV_VM_TYPE_MASK, + &kvm_vm_types_available); + } + + if (kvm_vm_types_available == 0) + return -EINVAL; + else + return 0; +} + +#ifdef CONFIG_KVM_PARAVIRTUALIZATION + +static int create_vcpu_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + kvm_vcpu_state_t *vcpu_state = NULL; + kvm_vcpu_state_t *kmap_vcpu_state = NULL; + e2k_cute_t *cute_p = NULL; + user_area_t *guest_area; + e2k_size_t cut_size, size; + int npages; + long r; + + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + npages = PAGE_ALIGN(sizeof(kvm_vcpu_state_t)) >> PAGE_SHIFT; + size = (npages << PAGE_SHIFT); + if (vcpu->arch.is_pv) { + cut_size = sizeof(*cute_p) * MAX_GUEST_CODES_UNITS; + size += PAGE_ALIGN(cut_size); + } else { + cut_size = 0; + vcpu->arch.guest_cut = NULL; + } + guest_area = kvm_find_memory_region(kvm, -1, 0, size, + guest_vram_mem_type); + if (guest_area == NULL) { + DebugKVM("guest memory regions is not created or empty\n"); + return -EINVAL; + } + vcpu_state = user_area_alloc_locked_pages(guest_area, 0, + sizeof(kvm_vcpu_state_t), 1 << E2K_ALIGN_GLOBALS, 0); + if (vcpu_state == NULL) { + DebugKVM("could not allocate VCPU state struct\n"); + r = -ENOMEM; + goto error; + } + /* + * VCPU state maps to kernel vmaloc range to have access into + * this state from any host kernel threads + * For example, it needs for hrtimer callback function, which + * can be called on any process + */ + kmap_vcpu_state = map_user_area_to_vmalloc_range(guest_area, + vcpu_state, PAGE_KERNEL); + if (kmap_vcpu_state == NULL) { + DebugKVM("could not map VCPU state struct to kernel VM\n"); + r = -ENOMEM; + goto error; + } + + memset(vcpu_state, 0, sizeof(kvm_vcpu_state_t)); + vcpu->arch.vcpu_state = vcpu_state; + vcpu->arch.kmap_vcpu_state = kmap_vcpu_state; + if (IS_INVALID_GPA(kvm_vcpu_hva_to_gpa(vcpu, (u64)vcpu_state))) { + pr_err("%s() : could not allocate GPA of VCPU state struct\n", + __func__); + r = -ENOMEM; + goto error; + } + kvm_setup_guest_VCPU_ID(vcpu, (const u32)vcpu->vcpu_id); + + if (cut_size == 0) { + DebugKVM("VCPU #%d state struct allocated at %px\n", + vcpu->vcpu_id, + (void *)kvm_vcpu_hva_to_gpa(vcpu, + (u64)vcpu->arch.vcpu_state)); + return 0; + } + + cute_p = user_area_alloc_locked(guest_area, 0, + sizeof(*cute_p) * MAX_GUEST_CODES_UNITS, + 1 << E2K_ALIGN_CUT, 0); + if (cute_p == NULL) { + DebugKVM("could not allocate VCPU guest CUT\n"); + r = -ENOMEM; + goto error; + } + memset(cute_p, 0, PAGE_SIZE); + vcpu->arch.guest_cut = cute_p; + if (IS_INVALID_GPA(kvm_vcpu_hva_to_gpa(vcpu, (u64)cute_p))) { + pr_err("%s() : could not allocate GPA of VCPU guest CUT\n", + __func__); + r = -ENOMEM; + goto error; + } + DebugKVM("VCPU #%d allocated state struct at %px, CUT at %px\n", + vcpu->vcpu_id, + (void *)kvm_vcpu_hva_to_gpa(vcpu, (u64)vcpu->arch.vcpu_state), + (void *)kvm_vcpu_hva_to_gpa(vcpu, (u64)vcpu->arch.guest_cut)); + + return 0; + +error: + if (kmap_vcpu_state != NULL) { + unmap_user_area_to_vmalloc_range(guest_area, kmap_vcpu_state); + vcpu->arch.kmap_vcpu_state = NULL; + } + if (vcpu_state != NULL) { + user_area_free_chunk(guest_area, vcpu_state); + vcpu->arch.vcpu_state = NULL; + } + if (cute_p != NULL) { + user_area_free_chunk(guest_area, cute_p); + vcpu->arch.guest_cut = NULL; + } + return r; +} +static int init_vcpu_state(struct kvm_vcpu *vcpu) +{ + int r; + + r = init_guest_boot_cut(vcpu); + if (r) { + DebugKVM("could not create guest CUT\n"); + return r; + } + r = init_guest_vcpu_state(vcpu); + if (r) { + DebugKVM("could not init VCPU state to start guest\n"); + return r; + } + return 0; +} + +static void free_vcpu_state(struct kvm_vcpu *vcpu) +{ + user_area_t *guest_area; + e2k_addr_t area_start; + + DebugKVMSH("%s (%d) started for VCPU #%d\n", + current->comm, current->pid, vcpu->vcpu_id); + if (vcpu->arch.vcpu_state != NULL) { + area_start = (e2k_addr_t)vcpu->arch.vcpu_state; + guest_area = kvm_find_memory_region(vcpu->kvm, + -1, area_start, 0, guest_vram_mem_type); + if (vcpu->arch.kmap_vcpu_state != NULL) { + unmap_user_area_to_vmalloc_range(guest_area, + vcpu->arch.kmap_vcpu_state); + vcpu->arch.kmap_vcpu_state = NULL; + } + user_area_free_chunk(guest_area, vcpu->arch.vcpu_state); + vcpu->arch.vcpu_state = NULL; + } else if (vcpu->arch.kmap_vcpu_state != NULL) { + unmap_user_area_to_vmalloc_range(NULL, + vcpu->arch.kmap_vcpu_state); + vcpu->arch.kmap_vcpu_state = NULL; + } + if (vcpu->arch.guest_cut != NULL) { + area_start = (e2k_addr_t)vcpu->arch.guest_cut; + guest_area = kvm_find_memory_region(vcpu->kvm, + -1, area_start, 0, guest_vram_mem_type); + user_area_free_chunk(guest_area, vcpu->arch.guest_cut); + vcpu->arch.guest_cut = NULL; + } + +} +#else /* ! CONFIG_KVM_PARAVIRTUALIZATION */ +static int create_vcpu_state(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_pv); + return 0; +} +static void free_vcpu_state(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_pv); +} +#endif /* CONFIG_KVM_PARAVIRTUALIZATION */ + +/* + * Functions to create all kernel backup hardware stacks(PS & PCS) + * to support intercepts and hypercalls + */ +static inline void +define_backup_hw_stacks_sizes(bu_hw_stack_t *hypv_backup) +{ + SET_BACKUP_PS_SIZE(hypv_backup, HYPV_BACKUP_PS_SIZE); + SET_BACKUP_PCS_SIZE(hypv_backup, HYPV_BACKUP_PCS_SIZE); +} +static inline void +backup_hw_stacks_init(bu_hw_stack_t *hypv_backup) +{ + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + + psp_lo.PSP_lo_base = (e2k_addr_t)GET_PS_BASE(hypv_backup); + psp_hi.PSP_hi_size = GET_BACKUP_PS_SIZE(hypv_backup); + psp_hi.PSP_hi_ind = 0; + hypv_backup->psp_lo = psp_lo; + hypv_backup->psp_hi = psp_hi; + + pcsp_lo.PCSP_lo_base = (e2k_addr_t)GET_PCS_BASE(hypv_backup); + pcsp_hi.PCSP_hi_size = GET_BACKUP_PCS_SIZE(hypv_backup); + pcsp_hi.PCSP_hi_ind = 0; + hypv_backup->pcsp_lo = pcsp_lo; + hypv_backup->pcsp_hi = pcsp_hi; + + hypv_backup->users = 0; +} +static int +create_vcpu_backup_stacks(struct kvm_vcpu *vcpu) +{ + bu_hw_stack_t *hypv_backup = &vcpu->arch.hypv_backup; + e2k_size_t ps_size; + e2k_size_t pcs_size; + void *psp_stk; + void *pcsp_stk; + + DebugKVMHV("started on task %s(%d) for VCPU #%d\n", + current->comm, current->pid, vcpu->vcpu_id); + + /* Allocate memory for hypervisor backup hardware stacks */ + + define_backup_hw_stacks_sizes(hypv_backup); + ps_size = GET_BACKUP_PS_SIZE(hypv_backup); + pcs_size = GET_BACKUP_PCS_SIZE(hypv_backup); + + psp_stk = kvzalloc(ps_size, GFP_KERNEL); + if (psp_stk == NULL) { + DebugKVMHV("could not allocate backup procedure stack\n"); + return -ENOMEM; + } + pcsp_stk = kvzalloc(pcs_size, GFP_KERNEL); + if (pcsp_stk == NULL) { + DebugKVMHV("could not allocate backup procedure chain stack\n"); + goto out_free_p_stack; + } + + /* Create initial state of backup hardware stacks */ + + SET_PS_BASE(hypv_backup, psp_stk); + DebugKVMHV("allocated backup procedure stack %px, size 0x%lx\n", + psp_stk, ps_size); + + SET_PCS_BASE(hypv_backup, pcsp_stk); + DebugKVMHV("allocated backup procedure chain stack %px, size 0x%lx\n", + pcsp_stk, pcs_size); + + return 0; + +out_free_p_stack: + kvfree(psp_stk); + SET_PS_BASE(hypv_backup, NULL); + + return -ENOMEM; +} +static void +free_kernel_backup_stacks(bu_hw_stack_t *hypv_backup) +{ + void *psp_stk = GET_PS_BASE(hypv_backup); + void *pcsp_stk = GET_PCS_BASE(hypv_backup); + + KVM_BUG_ON(hypv_backup->users != 0); + + if (psp_stk != NULL) { + kvfree(psp_stk); + SET_PS_BASE(hypv_backup, NULL); + } + if (pcsp_stk != NULL) { + kvfree(pcsp_stk); + SET_PCS_BASE(hypv_backup, NULL); + } +} +static int +vcpu_backup_stacks_init(struct kvm_vcpu *vcpu) +{ + backup_hw_stacks_init(&vcpu->arch.hypv_backup); + return 0; +} + +/* + * Functions to create guest VCPU boot-time data & hardware stacks(PS & PCS) + * Such stacks for host has been created by boot loader. + * Hypervisor does not use a boot loader and launch guest VCPUs directly, + * so should prepare all VCPUs stacks into guest physical memory. + */ +static inline void +define_vcpu_boot_stacks_sizes(vcpu_boot_stack_t *boot_stacks) +{ + SET_VCPU_BOOT_CS_SIZE(boot_stacks, VIRT_KERNEL_C_STACK_SIZE); + SET_VCPU_BOOT_PS_SIZE(boot_stacks, VIRT_KERNEL_PS_SIZE); + SET_VCPU_BOOT_PCS_SIZE(boot_stacks, VIRT_KERNEL_PCS_SIZE); +} +static inline void +vcpu_all_boot_stacks_init(vcpu_boot_stack_t *boot_stacks) +{ + e2k_stacks_t *boot_regs = &boot_stacks->regs.stacks; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + + SET_VCPU_BOOT_CS_TOP(boot_stacks, + (e2k_addr_t)GET_VCPU_BOOT_CS_BASE(boot_stacks) + + GET_VCPU_BOOT_CS_SIZE(boot_stacks)); + boot_regs->top = GET_VCPU_BOOT_CS_TOP(boot_stacks); + usd_lo.USD_lo_base = GET_VCPU_BOOT_CS_TOP(boot_stacks); + usd_hi.USD_hi_size = GET_VCPU_BOOT_CS_SIZE(boot_stacks); + boot_regs->usd_lo = usd_lo; + boot_regs->usd_hi = usd_hi; + + psp_lo.PSP_lo_base = (e2k_addr_t)GET_VCPU_BOOT_PS_BASE(boot_stacks); + psp_hi.PSP_hi_size = GET_VCPU_BOOT_PS_SIZE(boot_stacks); + psp_hi.PSP_hi_ind = 0; + boot_regs->psp_lo = psp_lo; + boot_regs->psp_hi = psp_hi; + + pcsp_lo.PCSP_lo_base = (e2k_addr_t)GET_VCPU_BOOT_PCS_BASE(boot_stacks); + pcsp_hi.PCSP_hi_size = GET_VCPU_BOOT_PCS_SIZE(boot_stacks); + pcsp_hi.PCSP_hi_ind = 0; + boot_regs->pcsp_lo = pcsp_lo; + boot_regs->pcsp_hi = pcsp_hi; +} + +/* create VCPU booting local data stack */ +static int +alloc_vcpu_boot_c_stack(struct kvm *kvm, vcpu_boot_stack_t *boot_stacks) +{ + e2k_size_t stack_size; + user_area_t *guest_area; + void *data_stack; + unsigned long stack_hva; + gpa_t stack_gpa; + long npages; + int r; + + stack_size = GET_VCPU_BOOT_CS_SIZE(boot_stacks); + + DebugKVMHV("started to allocate stack size of 0x%lx\n", + stack_size); + npages = PAGE_ALIGN(stack_size) >> PAGE_SHIFT; + guest_area = kvm_find_memory_region(kvm, -1, 0, npages << PAGE_SHIFT, + guest_ram_mem_type); + if (guest_area == NULL) { + DebugKVMHV("guest memory regions is not created or empty\n"); + return -ENOMEM; + } + data_stack = user_area_alloc(guest_area, stack_size, 0); + if (data_stack == NULL) { + DebugKVMHV("could not allocate VCPU booting data stack\n"); + r = -ENOMEM; + goto free_region; + } + boot_stacks->data_stack = data_stack; + DebugKVMHV("VCPU booting data stack at user space %px\n", data_stack); + + stack_hva = (unsigned long)data_stack; + stack_gpa = kvm_hva_to_gpa(kvm, stack_hva); + if (IS_INVALID_GPA(stack_gpa)) { + pr_err("%s(): could not convert user space address %px to GPA\n", + __func__, data_stack); + r = -EINVAL; + goto free_region; + } + SET_VCPU_BOOT_CS_BASE(boot_stacks, (void *)stack_gpa); + SET_VCPU_BOOT_CS_TOP(boot_stacks, stack_gpa + stack_size); + DebugKVMHV("VCPU booting data stack at guest space from %px to %px\n", + (void *)stack_gpa, (void *)(stack_gpa + stack_size)); + + return 0; + +free_region: + if (data_stack != NULL) { + user_area_free_chunk(guest_area, data_stack); + boot_stacks->data_stack = NULL; + } + return r; +} + +/* create VCPU booting local data stack */ +static void * +alloc_vcpu_boot_hw_stack(struct kvm *kvm, e2k_size_t stack_size) +{ + user_area_t *guest_area; + void *hw_stack; + long npages; + + DebugKVMHV("started to allocate stack size of 0x%lx\n", + stack_size); + npages = PAGE_ALIGN(stack_size) >> PAGE_SHIFT; + guest_area = kvm_find_memory_region(kvm, -1, 0, npages << PAGE_SHIFT, + guest_ram_mem_type); + if (guest_area == NULL) { + DebugKVMHV("guest memory regions is not created or empty\n"); + return ERR_PTR(-EINVAL); + } + hw_stack = user_area_alloc_present(guest_area, 0, stack_size, 0, 0); + if (hw_stack == NULL) + return ERR_PTR(-ENOMEM); + + return hw_stack; +} +static void +free_vcpu_boot_p_stack(struct kvm *kvm, vcpu_boot_stack_t *boot_stacks) +{ + user_area_t *guest_area; + e2k_addr_t area_start; + + if (boot_stacks->proc_stack == NULL) + return; + area_start = (e2k_addr_t)boot_stacks->proc_stack; + guest_area = kvm_find_memory_region(kvm, + -1, area_start, 0, guest_ram_mem_type); + if (guest_area != NULL) + user_area_free_chunk(guest_area, (void *)area_start); + boot_stacks->proc_stack = NULL; +} +static void +free_vcpu_boot_pc_stack(struct kvm *kvm, vcpu_boot_stack_t *boot_stacks) +{ + user_area_t *guest_area; + e2k_addr_t area_start; + + if (boot_stacks->chain_stack == NULL) + return; + area_start = (e2k_addr_t)boot_stacks->chain_stack; + guest_area = kvm_find_memory_region(kvm, + -1, area_start, 0, guest_ram_mem_type); + if (guest_area != NULL) + user_area_free_chunk(guest_area, (void *)area_start); + boot_stacks->chain_stack = NULL; +} + +/* create VCPU booting hardware procedure stack */ +static int +alloc_vcpu_boot_p_stack(struct kvm *kvm, vcpu_boot_stack_t *boot_stacks) +{ + void *p_stack; + e2k_size_t stack_size; + unsigned long stack_hva; + gpa_t stack_gpa; + int r = 0; + + stack_size = GET_VCPU_BOOT_PS_SIZE(boot_stacks); + p_stack = alloc_vcpu_boot_hw_stack(kvm, stack_size); + if (IS_ERR(p_stack)) { + DebugKVMHV("could not allocate VCPU booting procedure stack\n"); + return PTR_ERR(p_stack); + } + boot_stacks->proc_stack = p_stack; + DebugKVMHV("VCPU booting procedure stack at user space %px\n", p_stack); + + stack_hva = (unsigned long)p_stack; + stack_gpa = kvm_hva_to_gpa(kvm, stack_hva); + if (IS_INVALID_GPA(stack_gpa)) { + pr_err("%s(): could not convert user space address %px to GPA\n", + __func__, p_stack); + r = -EINVAL; + goto free_region; + } + SET_VCPU_BOOT_PS_BASE(boot_stacks, (void *)stack_gpa); + DebugKVMHV("VCPU booting procedure stack at guest space " + "from %px to %px\n", + (void *)stack_gpa, (void *)(stack_gpa + stack_size)); + + return 0; + +free_region: + free_vcpu_boot_p_stack(kvm, boot_stacks); + return r; +} + +/* create VCPU booting hardware procedure chain stack */ +static int +alloc_vcpu_boot_pc_stack(struct kvm *kvm, vcpu_boot_stack_t *boot_stacks) +{ + void *pc_stack; + e2k_size_t stack_size; + unsigned long stack_hva; + gpa_t stack_gpa; + int r = 0; + + stack_size = GET_VCPU_BOOT_PCS_SIZE(boot_stacks); + pc_stack = alloc_vcpu_boot_hw_stack(kvm, stack_size); + if (IS_ERR(pc_stack)) { + DebugKVMHV("could not allocate VCPU booting chain stack\n"); + return PTR_ERR(pc_stack); + } + boot_stacks->chain_stack = pc_stack; + DebugKVMHV("VCPU booting chain stack at user space %px\n", pc_stack); + + stack_hva = (unsigned long)pc_stack; + stack_gpa = kvm_hva_to_gpa(kvm, stack_hva); + if (IS_INVALID_GPA(stack_gpa)) { + pr_err("%s(): could not convert user space address %px to GPA\n", + __func__, pc_stack); + r = -EINVAL; + goto free_region; + } + SET_VCPU_BOOT_PCS_BASE(boot_stacks, (void *)stack_gpa); + DebugKVMHV("VCPU booting procedure chain stack at guest space " + "from %px to %px\n", + (void *)stack_gpa, (void *)(stack_gpa + stack_size)); + + return 0; + +free_region: + free_vcpu_boot_pc_stack(kvm, boot_stacks); + return r; +} + +static void +free_vcpu_boot_c_stack(struct kvm *kvm, vcpu_boot_stack_t *boot_stacks) +{ + user_area_t *guest_area; + e2k_addr_t area_start; + + if (boot_stacks->data_stack == NULL) + return; + area_start = (e2k_addr_t)boot_stacks->data_stack; + guest_area = kvm_find_memory_region(kvm, + -1, area_start, 0, guest_ram_mem_type); + if (guest_area != NULL) + user_area_free_chunk(guest_area, (void *)area_start); + boot_stacks->data_stack = NULL; +} +static void +free_vcpu_boot_stacks(struct kvm_vcpu *vcpu) +{ + vcpu_boot_stack_t *boot_stacks = &vcpu->arch.boot_stacks; + + free_vcpu_boot_c_stack(vcpu->kvm, boot_stacks); + free_vcpu_boot_p_stack(vcpu->kvm, boot_stacks); + free_vcpu_boot_pc_stack(vcpu->kvm, boot_stacks); +} + +static int +create_vcpu_boot_stacks(struct kvm_vcpu *vcpu) +{ + vcpu_boot_stack_t *boot_stacks = &vcpu->arch.boot_stacks; + int r; + + DebugKVMHV("started on task %s(%d) for VCPU #%d\n", + current->comm, current->pid, vcpu->vcpu_id); + + /* FIXME: stacks now allocated in the guest RAM, but addresses */ + /* is virtual/ because of RAM mapped to virtual space */ + /* It need implement allocation with return guest physical address */ + + define_vcpu_boot_stacks_sizes(boot_stacks); + + r = alloc_vcpu_boot_c_stack(vcpu->kvm, boot_stacks); + if (r != 0) + return r; + + r = alloc_vcpu_boot_p_stack(vcpu->kvm, boot_stacks); + if (r != 0) + goto out_free_c_stack; + + r = alloc_vcpu_boot_pc_stack(vcpu->kvm, boot_stacks); + if (r != 0) + goto out_free_p_stack; + + /* create VCPU booting stacks */ + vcpu_all_boot_stacks_init(boot_stacks); + + return 0; + +out_free_c_stack: + free_vcpu_boot_c_stack(vcpu->kvm, boot_stacks); +out_free_p_stack: + free_vcpu_boot_p_stack(vcpu->kvm, boot_stacks); + + return r; +} +static int +vcpu_boot_stacks_init(struct kvm_vcpu *vcpu) +{ + vcpu_all_boot_stacks_init(&vcpu->arch.boot_stacks); + return 0; +} + +static int create_vcpu_host_context(struct kvm_vcpu *vcpu) +{ + kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt; + unsigned long *stack; + unsigned long addr; + + DebugKVMHV("started on task %s(%d) for VCPU #%d\n", + current->comm, current->pid, vcpu->vcpu_id); + + KVM_BUG_ON(vcpu->arch.is_hv || !vcpu->arch.is_pv); + + memset(host_ctxt, 0, sizeof(*host_ctxt)); + + /* + * Calculate kernel stacks registers + */ + stack = __alloc_thread_stack_node(numa_node_id()); + if (!stack) { + pr_err("%s(): could not allocate VCPU #%d host stacks\n", + __func__, vcpu->vcpu_id); + return -ENOMEM; + } + *stack = STACK_END_MAGIC; + host_ctxt->stack = stack; + addr = (unsigned long)stack; + host_ctxt->pt_regs = NULL; + host_ctxt->upsr = E2K_USER_INITIAL_UPSR; + host_ctxt->k_psp_lo.PSP_lo_half = 0; + host_ctxt->k_psp_lo.PSP_lo_base = addr + KERNEL_P_STACK_OFFSET; + host_ctxt->k_psp_hi.PSP_hi_half = 0; + host_ctxt->k_psp_hi.PSP_hi_size = KERNEL_P_STACK_SIZE; + host_ctxt->k_pcsp_lo.PCSP_lo_half = 0; + host_ctxt->k_pcsp_lo.PCSP_lo_base = addr + KERNEL_PC_STACK_OFFSET; + host_ctxt->k_pcsp_hi.PCSP_hi_half = 0; + host_ctxt->k_pcsp_hi.PCSP_hi_size = KERNEL_PC_STACK_SIZE; + host_ctxt->k_usd_lo.USD_lo_half = 0; + host_ctxt->k_usd_lo.USD_lo_base = addr + KERNEL_C_STACK_OFFSET + + KERNEL_C_STACK_SIZE; + host_ctxt->k_usd_hi.USD_hi_half = 0; + host_ctxt->k_usd_hi.USD_hi_size = KERNEL_C_STACK_SIZE; + host_ctxt->k_sbr.SBR_reg = host_ctxt->k_usd_lo.USD_lo_base; + + host_ctxt->osem = guest_trap_init(); + + host_ctxt->signal.stack.used = 0; + atomic_set(&host_ctxt->signal.traps_num, 0); + atomic_set(&host_ctxt->signal.in_work, 0); + atomic_set(&host_ctxt->signal.syscall_num, 0); + atomic_set(&host_ctxt->signal.in_syscall, 0); + + return 0; +} +static void destroy_vcpu_host_context(struct kvm_vcpu *vcpu) +{ + kvm_host_context_t *host_ctxt; + + if (likely(vcpu->arch.is_hv || !vcpu->arch.is_pv)) + return; + + host_ctxt = &vcpu->arch.host_ctxt; + if (host_ctxt->stack != NULL) { + __free_thread_stack(host_ctxt->stack); + host_ctxt->stack = NULL; + } +} + +static int kvm_arch_any_vcpu_init(struct kvm_vcpu *vcpu) +{ + int r; + + DebugKVM("started for CPU %d\n", vcpu->vcpu_id); + + vcpu->arch.exit_reason = -1; + + /* create shared with guest kernel structure to pass */ + /* some useful info about host and hypervisor */ + if (vcpu->kvm->arch.host_info == NULL) { + r = kvm_create_host_info(vcpu->kvm); + if (r != 0) + return r; + } + + /* create VCPU structures to emulate hardware state */ + r = create_vcpu_state(vcpu); + if (r != 0) + goto free_host_info; + + return 0; + +free_host_info: + kvm_free_host_info(vcpu->kvm); + return r; +} +static void kvm_arch_any_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + DebugKVM("started for VCPU #%d\n", vcpu->vcpu_id); + + kvm_free_host_info(vcpu->kvm); + free_vcpu_state(vcpu); +} +static int kvm_arch_any_vcpu_setup(struct kvm_vcpu *vcpu) +{ + int r; + + DebugKVM("started for CPU %d\n", vcpu->vcpu_id); + + /* init VCPU structures to emulate hardware state */ + r = init_vcpu_state(vcpu); + if (r != 0) + return r; + + return 0; +} + +#ifdef CONFIG_KVM_HW_VIRTUALIZATION + +static int kvm_arch_hv_vcpu_init(struct kvm_vcpu *vcpu) +{ + int r; + + if (vcpu->kvm->arch.vm_type != KVM_E2K_HV_VM_TYPE && + vcpu->kvm->arch.vm_type != KVM_E2K_HW_PV_VM_TYPE) + return 0; + + DebugKVM("started for VCPU #%d\n", vcpu->vcpu_id); + + vcpu->arch.is_hv = true; + + if (vcpu->kvm->arch.vm_type == KVM_E2K_HW_PV_VM_TYPE) { + /* paravirtualization support need create and enable */ + r = kvm_arch_pv_vcpu_init(vcpu); + if (r != 0) + goto failed; + } + + return 0; + +failed: + return r; +} +static void kvm_arch_hv_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.is_hv) + return; + + DebugKVM("started for VCPU #%d\n", vcpu->vcpu_id); + if (vcpu->arch.is_pv) + /* paravirtualization support need free and disable */ + kvm_arch_pv_vcpu_uninit(vcpu); + + vcpu->arch.is_hv = false; +} + +static int kvm_arch_hv_vcpu_setup(struct kvm_vcpu *vcpu) +{ + int r; + + if (!vcpu->arch.is_hv) + return 0; + + DebugKVM("started for VCPU #%d\n", vcpu->vcpu_id); + + if (vcpu->arch.is_pv) { + /* paravirtualization support need create and enable */ + r = kvm_arch_pv_vcpu_setup(vcpu); + if (r != 0) + return r; + } + + return 0; +} +#else /* ! CONFIG_KVM_HW_VIRTUALIZATION */ +static int kvm_arch_hv_vcpu_init(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_hv); + return 0; +} +static void kvm_arch_hv_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_hv); +} +static int kvm_arch_hv_vcpu_setup(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_hv); + return 0; +} +#endif /* CONFIG_KVM_HW_VIRTUALIZATION */ + +#ifdef CONFIG_KVM_PARAVIRTUALIZATION +static int kvm_arch_pv_vcpu_init(struct kvm_vcpu *vcpu) +{ + if (vcpu->kvm->arch.vm_type != KVM_E2K_SV_VM_TYPE && + vcpu->kvm->arch.vm_type != KVM_E2K_SW_PV_VM_TYPE && + vcpu->kvm->arch.vm_type != KVM_E2K_HW_PV_VM_TYPE) + return 0; + + DebugKVM("started for CPU %d\n", vcpu->vcpu_id); + + vcpu->arch.is_pv = true; + + return 0; +} +static void kvm_arch_pv_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.is_pv) + return; + + DebugKVM("started for VCPU #%d\n", vcpu->vcpu_id); + + vcpu->arch.is_pv = false; +} +static int kvm_arch_pv_vcpu_setup(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.is_pv) + return 0; + + DebugKVM("started for CPU %d\n", vcpu->vcpu_id); + + return 0; +} +#else /* ! CONFIG_KVM_PARAVIRTUALIZATION */ +static int kvm_arch_pv_vcpu_init(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_pv); + return 0; +} +static void kvm_arch_pv_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_pv); +} +static int kvm_arch_pv_vcpu_setup(struct kvm_vcpu *vcpu) +{ + VM_BUG_ON(vcpu->arch.is_pv); + return 0; +} +#endif /* CONFIG_KVM_PARAVIRTUALIZATION */ + +static void kvm_arch_vcpu_ctxt_init(struct kvm_vcpu *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + + memset(&vcpu->arch.sw_ctxt, 0, sizeof(vcpu->arch.sw_ctxt)); + memset(&vcpu->arch.hw_ctxt, 0, sizeof(vcpu->arch.hw_ctxt)); + + sw_ctxt->osem = guest_trap_init(); + + if (vcpu->arch.is_hv) { + guest_hw_stack_t *boot_regs = &vcpu->arch.boot_stacks.regs; + + /* set to initial state some fields */ + sw_ctxt->saved.valid = false; + + /* setup guest boot kernel local data stack */ + sw_ctxt->usd_lo = boot_regs->stacks.usd_lo; + sw_ctxt->usd_hi = boot_regs->stacks.usd_hi; + sw_ctxt->sbr.SBR_reg = boot_regs->stacks.top; + + GET_FPU_DEFAULTS(sw_ctxt->fpsr, sw_ctxt->fpcr, sw_ctxt->pfpfr); + + AS(sw_ctxt->dibcr).gm = 1; + AS(sw_ctxt->ddbcr).gm = 1; + } +} + +static void kvm_arch_vcpu_ctxt_uninit(struct kvm_vcpu *vcpu) +{ + memset(&vcpu->arch.sw_ctxt, 0, sizeof(vcpu->arch.sw_ctxt)); + memset(&vcpu->arch.hw_ctxt, 0, sizeof(vcpu->arch.hw_ctxt)); +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, int ext) +{ + + int r; + + DebugKVM("started for ext %d\n", ext); + switch (ext) { + case KVM_CAP_IRQCHIP: + DebugKVM("ioctl is KVM_CAP_IRQCHIP\n"); + r = 1; + break; + case KVM_CAP_MP_STATE: + DebugKVM("ioctl is KVM_CAP_MP_STATE\n"); + r = 1; + break; + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; + case KVM_CAP_IRQ_INJECT_STATUS: + DebugKVM("ioctl is KVM_CAP_IRQ_INJECT_STATUS\n"); + r = 0; + break; + case KVM_CAP_COALESCED_MMIO: + DebugKVM("ioctl is KVM_CAP_COALESCED_MMIO\n"); + r = 0; + break; + case KVM_CAP_SYNC_MMU: + DebugKVM("ioctl is KVM_CAP_SYNC_MMU\n"); + r = 1; + break; + case KVM_CAP_E2K_SV_VM: + DebugKVM("ioctl is KVM_CAP_E2K_SV_VM\n"); + r = kvm_is_sv_vm_available(); + break; + case KVM_CAP_E2K_SW_PV_VM: + DebugKVM("ioctl is KVM_CAP_E2K_SW_PV_VM\n"); + r = kvm_is_sw_pv_vm_available(); + break; + case KVM_CAP_E2K_HV_VM: + DebugKVM("ioctl is KVM_CAP_E2K_HV_VM\n"); + r = kvm_is_hv_vm_available(); + break; + case KVM_CAP_E2K_TDP_MMU: + DebugKVM("ioctl is KVM_CAP_E2K_TDP_MMU\n"); + r = kvm_is_tdp_enable(kvm); + break; + case KVM_CAP_E2K_SHADOW_PT_MMU: + DebugKVM("ioctl is KVM_CAP_E2K_SHADOW_PT_MMU\n"); + if (kvm->arch.is_hv) { + r = true; + } else { + r = kvm_is_shadow_pt_enable(kvm); + } + break; + case KVM_CAP_ENABLE_CAP_VM: + r = 1; + break; + default: + DebugKVM("ioctl is unsupported\n"); + r = 0; + } + DebugKVM("completed with value %d\n", r); + return r; + +} + +static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + kvm_run->exit_reason = KVM_EXIT_UNKNOWN; + kvm_run->hw.hardware_exit_reason = 1; + return 0; +} + +static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + struct kvm_mmio_fragment *frag; + + DebugKVMIO("started for VCPU #%d run area at %px\n", + vcpu->vcpu_id, kvm_run); + + if (vcpu->mmio_nr_fragments == 0) { + DebugKVMIO("VCPU #%d nothing to do: none mmio fragments\n", + vcpu->vcpu_id); + return -EINVAL; + } else if (vcpu->mmio_nr_fragments > 1) { + DebugKVMIO("VCPU #%d too many mmio fragments (%d > 1)\n", + vcpu->vcpu_id, vcpu->mmio_nr_fragments); + return -EINVAL; + } + frag = &vcpu->mmio_fragments[0]; + kvm_run->mmio.phys_addr = frag->gpa; + kvm_run->mmio.len = frag->len; + kvm_run->mmio.is_write = vcpu->mmio_is_write; + + if (vcpu->mmio_is_write) + memcpy(kvm_run->mmio.data, frag->data, frag->len); + kvm_run->exit_reason = KVM_EXIT_MMIO; + + DebugKVMIO("returns to host user: phys addr 0x%llx size %d to %s\n", + kvm_run->mmio.phys_addr, kvm_run->mmio.len, + (kvm_run->mmio.is_write) ? "write" : "read"); + return 0; +} + +static inline unsigned long get_ioport_data_offset(struct kvm_run *kvm_run) +{ + unsigned long data_offset; + + data_offset = ALIGN_TO_SIZE(sizeof(*kvm_run), 1 * 1024); + if (data_offset >= PAGE_SIZE) { + panic("get_ioport_data_offset() KVM run area size 0x%lx, " + "IO data area offset 0x%lx > PAGE SIZE\n", + sizeof(*kvm_run), data_offset); + } else if (sizeof(*kvm_run) > data_offset) { + panic("get_ioport_data_offset() KVM run area size 0x%lx > " + "IO data area offset 0x%lx\n", + sizeof(*kvm_run), data_offset); + } + return data_offset; +} +static inline unsigned long get_ioport_data_size(struct kvm_run *kvm_run) +{ + unsigned long data_offset; + + data_offset = get_ioport_data_offset(kvm_run); + return PAGE_SIZE - data_offset; +} +static inline void *get_ioport_data_pointer(struct kvm_run *kvm_run) +{ + return (void *)(((void *)kvm_run) + get_ioport_data_offset(kvm_run)); +} + +static int handle_ioport(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + DebugKVMIO("started for VCPU #%d run area at %px\n", + vcpu->vcpu_id, kvm_run); + kvm_run->io.port = vcpu->arch.ioport.port; + kvm_run->io.data_offset = get_ioport_data_offset(kvm_run); + kvm_run->io.size = vcpu->arch.ioport.size; + kvm_run->io.count = vcpu->arch.ioport.count; + kvm_run->io.direction = + (vcpu->arch.ioport.is_out) ? KVM_EXIT_IO_OUT : KVM_EXIT_IO_IN; + + if (vcpu->arch.ioport.is_out) { + void *data = get_ioport_data_pointer(kvm_run); + if (vcpu->arch.ioport.string) { + memcpy(data, vcpu->arch.ioport_data, + vcpu->arch.ioport.size * + vcpu->arch.ioport.count); + } else { + memcpy(data, &vcpu->arch.ioport.data, + vcpu->arch.ioport.size); + } + } + kvm_run->exit_reason = KVM_EXIT_IO; + + DebugKVMIO("returns to host user: port 0x%x size %d to %s\n", + kvm_run->io.port, kvm_run->io.size, + (kvm_run->io.direction == KVM_EXIT_IO_OUT) ? "write" : "read"); + return 0; +} + +static int handle_notify_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + kvm_run->exit_reason = KVM_EXIT_E2K_NOTIFY_IO; + kvm_run->notifier.io = vcpu->arch.notifier_io; + return 0; +} + +static int handle_shutdown(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + DebugKVMSH("started, shutdown type %d\n", vcpu->run->exit_reason); + + kvm_run->exit_reason = vcpu->run->exit_reason; + + raw_spin_lock(&vcpu->arch.exit_reqs_lock); + vcpu->arch.halted = true; + raw_spin_unlock(&vcpu->arch.exit_reqs_lock); + + /* FIXME: VCPU request queue is not more used, but need delete all + * functionality related to spliting VCPU support into two threads + if (!vcpu->arch.is_hv) { + complete(&vcpu->arch.exit_req_done); + put_exit_req_vcpu(vcpu); + } + */ + if (vcpu->run->exit_reason != KVM_EXIT_E2K_RESTART) { + vcpu->kvm->arch.halted = true; + } else if (kvm_run->exit_reason == KVM_EXIT_E2K_RESTART) { + vcpu->kvm->arch.reboot = true; + } + smp_mb(); /* to sure the flag is set */ + /* wake up other host VCPUs to complete guest VCPUs threads */ + kvm_wake_up_all_other_vcpu_host(vcpu); + return 0; +} + +static int (*kvm_guest_exit_handlers[])(struct kvm_vcpu *vcpu, + struct kvm_run *kvm_run) = { + [EXIT_REASON_VM_PANIC] = handle_vm_error, + [EXIT_REASON_MMIO_REQ] = handle_mmio, + [EXIT_REASON_IOPORT_REQ] = handle_ioport, + [EXIT_NOTIFY_IO] = handle_notify_io, + [EXIT_SHUTDOWN] = handle_shutdown, +}; + +static const int kvm_guest_max_exit_handlers = + sizeof(kvm_guest_exit_handlers) / + sizeof(*kvm_guest_exit_handlers); + +static inline uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) +{ + u32 exit_reason; + if (vcpu->arch.exit_shutdown_terminate) { + vcpu->arch.exit_reason = EXIT_SHUTDOWN; + if (vcpu->arch.exit_shutdown_terminate == KVM_EXIT_E2K_RESTART) + vcpu->run->exit_reason = KVM_EXIT_E2K_RESTART; + else + vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; + } + exit_reason = vcpu->arch.exit_reason; + vcpu->arch.exit_reason = -1; + return exit_reason; +} + +/* + * The guest has exited. See if we can fix it or if we need userspace + * assistance. + */ +static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +{ + u32 exit_reason = kvm_get_exit_reason(vcpu); + vcpu->arch.last_exit = exit_reason; + + DebugKVMRUN("started on VCPU %d on exit reason %d\n", + vcpu->vcpu_id, exit_reason); + if (exit_reason < kvm_guest_max_exit_handlers + && kvm_guest_exit_handlers[exit_reason]) { + return kvm_guest_exit_handlers[exit_reason](vcpu, kvm_run); + } else if (exit_reason == -1) { + /* exit reason was not set, try run VCPU again */ + return 1; + } else { + kvm_run->exit_reason = KVM_EXIT_UNKNOWN; + kvm_run->hw.hardware_exit_reason = exit_reason; + DebugKVM("exit reason %d is unknown\n", + exit_reason); + } + return 0; +} + +static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + int r; + + DebugKVMRUN("started on VCPU %d CPU %d\n", + vcpu->vcpu_id, vcpu->cpu); + + /* + * down_read() may sleep and return with interrupts enabled + */ + mutex_lock(&vcpu->kvm->slots_lock); + +again: + if (unlikely(signal_pending(current))) { + r = -EINTR; + kvm_run->exit_reason = KVM_EXIT_INTR; + ++vcpu->stat.signal_exits; + goto out; + } + if (unlikely(vcpu->arch.halted)) { + r = -EINVAL; + kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; + goto out; + } + if (unlikely(vcpu->kvm->arch.halted)) + /* VM halted, terminate all VCPUs */ + goto out; + + local_irq_disable(); + + clear_bit(KVM_REQ_KICK, (void *) &vcpu->requests); + + mutex_unlock(&vcpu->kvm->slots_lock); + + /* + * Transition to the guest + */ + if (likely(vcpu->arch.is_hv)) { + r = startup_hv_vcpu(vcpu); + KVM_BUG_ON(r == 0); + } else if (!vcpu->arch.from_pv_intc) { + launch_pv_vcpu(vcpu, FULL_CONTEXT_SWITCH | USD_CONTEXT_SWITCH); + } else { + return_to_pv_vcpu_intc(vcpu); + } + + local_irq_enable(); + + mutex_lock(&vcpu->kvm->slots_lock); + + r = kvm_handle_exit(kvm_run, vcpu); + + if (r > 0) { + if (!need_resched()) + goto again; + } + +out: + mutex_unlock(&vcpu->kvm->slots_lock); + if (unlikely(vcpu->kvm->arch.halted)) + goto vm_complete; + if (r > 0) { + cond_resched(); + mutex_lock(&vcpu->kvm->slots_lock); + goto again; + } + + kvm_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_QEMU); + + return r; + +vm_complete: + kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; + return 0; +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + int r; + + DebugKVMRUN("started\n"); + + vcpu_load(vcpu); + + kvm_sigset_activate(vcpu); + + if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { + if (kvm_run->immediate_exit) { + r = -EINTR; + goto out; + } + kvm_vcpu_block(vcpu); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); + r = -EAGAIN; + if (signal_pending(current)) { + r = -EINTR; + vcpu->run->exit_reason = KVM_EXIT_INTR; + ++vcpu->stat.signal_exits; + } + goto out; + } + + if (vcpu->arch.ioport.needed) { + if (!vcpu->arch.ioport.is_out) { + void *data = get_ioport_data_pointer(kvm_run); + if (vcpu->arch.ioport.string) { + memcpy(vcpu->arch.ioport_data, data, + vcpu->arch.ioport.size * + vcpu->arch.ioport.count); + } else { + memcpy(&vcpu->arch.ioport.data, data, + vcpu->arch.ioport.size); + } + } + vcpu->arch.ioport.completed = 1; + vcpu->arch.ioport.needed = 0; + r = kvm_complete_guest_ioport_request(vcpu); + if (r) { + pr_err("%s(): IO PORT request completion failed, " + "error %d\n", + __func__, r); + } + } + + if (vcpu->mmio_needed) { + struct kvm_mmio_fragment *frag; + unsigned len; + + /* Complete previous fragment */ + if (vcpu->mmio_cur_fragment != 0) { + pr_err("%s(): invalid number of current fragments " + "(%d != 0)\n", + __func__, vcpu->mmio_cur_fragment); + } + frag = &vcpu->mmio_fragments[0]; + len = min(8u, frag->len); + if (!vcpu->mmio_is_write) + memcpy(frag->data, kvm_run->mmio.data, len); + vcpu->mmio_read_completed = 1; + vcpu->mmio_needed = 0; + r = kvm_complete_guest_mmio_request(vcpu); + if (r) { + pr_err("%s(): MMIO request completion failed, " + "error %d\n", + __func__, r); + } + } + if (kvm_run->immediate_exit) { + r = -EINTR; + } else { + r = __vcpu_run(vcpu, kvm_run); + } +out: + kvm_sigset_deactivate(vcpu); + + if (kvm_run->exit_reason == KVM_EXIT_E2K_RESTART) { + vcpu->kvm->arch.reboot = true; + } + vcpu_put(vcpu); + return r; +} + +/* + * Set a new alias region. Aliases map a portion of virtual memory into + * another portion. This is useful for guest kernel image loading to + * own virtual addresses + */ +static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, + kvm_memory_alias_t __user *alias) +{ + int r, n; + kvm_memory_alias_t guest_alias; + struct kvm_mem_alias *p; + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + int slot; + + if (kvm_is_shadow_pt_enable(kvm)) { + pr_err("%s(): support of guest MMU based on shadow PT " + "so aliasing can be deleted for guest VM\n", + __func__); + return -ENOTTY; + } + if (copy_from_user(&guest_alias, alias, sizeof(kvm_memory_alias_t))) + return -EFAULT; + DebugKVM("started for aliasing from 0x%llx size 0x%llx to 0x%llx\n", + guest_alias.guest_alias_addr, guest_alias.memory_size, + guest_alias.target_addr); + r = -EINVAL; + /* General sanity checks */ + if (guest_alias.memory_size & (PAGE_SIZE - 1)) + goto out; + if (guest_alias.guest_alias_addr & (PAGE_SIZE - 1)) + goto out; + if (guest_alias.slot >= KVM_ALIAS_SLOTS) + goto out; + if (guest_alias.guest_alias_addr + guest_alias.memory_size < + guest_alias.guest_alias_addr) + goto out; + if (guest_alias.target_addr + guest_alias.memory_size < + guest_alias.target_addr) + goto out; + + mutex_lock(&kvm->slots_lock); + slot = kvm_gva_to_memslot_unaliased(kvm, guest_alias.target_addr); + if (slot < 0) { + mutex_unlock(&kvm->slots_lock); + goto out; + } + memslot = id_to_memslot(slots, slot); + spin_lock(&kvm->mmu_lock); + + p = &kvm->arch.aliases[guest_alias.slot]; + p->alias_start = guest_alias.guest_alias_addr; + p->target_start = guest_alias.target_addr; + p->alias_base_gfn = memslot->base_gfn + ((guest_alias.target_addr - + memslot->userspace_addr) >> PAGE_SHIFT); + p->npages = guest_alias.memory_size >> PAGE_SHIFT; + p->target_slot = slot; + + for (n = KVM_ALIAS_SLOTS; n > 0; --n) + if (kvm->arch.aliases[n - 1].npages) + break; + kvm->arch.naliases = n; + + spin_unlock(&kvm->mmu_lock); + + mutex_unlock(&kvm->slots_lock); + DebugKVM("created aliasing from 0x%lx pages 0x%lx to 0x%lx base gfn " + "0x%llx at slot %d\n", + p->alias_start, p->npages, p->target_start, p->alias_base_gfn, + slot); + + return 0; + +out: + DebugKVM("creation failed: error %d\n", r); + return r; +} + +/* + * Set a new shadow (alias) an area of host kernel virtual into guest kernel + * virtual space + */ +static int kvm_set_shadow_area(struct kvm *kvm, + kvm_kernel_area_shadow_t *guest_shadow) +{ + int r; + struct kvm_kernel_shadow *p; + unsigned long kernel_start; + unsigned long kernel_end; + unsigned long shadow_addr; + unsigned long area_size; + int alias_slot; + + DebugKVM("started for shadowing kernel area from 0x%llx size 0x%llx " + "to guest 0x%llx, slot %d\n", + guest_shadow->kernel_addr, guest_shadow->area_size, + guest_shadow->guest_shadow_addr, guest_shadow->slot); + r = -EINVAL; + /* General sanity checks */ + kernel_start = guest_shadow->kernel_addr; + area_size = PAGE_ALIGN_DOWN(guest_shadow->area_size); + if (kernel_start & ~PAGE_MASK) + goto out; + if (guest_shadow->slot >= KVM_SHADOW_SLOTS) + goto out; + kernel_end = kernel_start + area_size; + if (kernel_end < kernel_start) + goto out; + if ((kernel_start & PGDIR_MASK) != ((kernel_end - 1) & PGDIR_MASK)) { + printk(KERN_ERR " kvm_vm_ioctl_set_shadow_area() multiline " + "shadow is not supported\n"); + goto out; + } + shadow_addr = guest_shadow->guest_shadow_addr; + if ((kernel_start & ~PGDIR_MASK) != (shadow_addr & ~PGDIR_MASK)) { + printk(KERN_ERR " kvm_vm_ioctl_set_shadow_area() only PGD " + "level shadow is supported\n"); + goto out; + } + mutex_lock(&kvm->slots_lock); + if (find_shadow_intersection(kvm, kernel_start, shadow_addr, + area_size)) { + mutex_unlock(&kvm->slots_lock); + goto out; + } + alias_slot = gva_to_alias_slot(kvm, shadow_addr); + if (alias_slot < 0) { + mutex_unlock(&kvm->slots_lock); + DebugKVM("Could not find alias slot for shadow 0x%lx\n", + shadow_addr); + goto out; + } + + spin_lock(&kvm->mmu_lock); + + p = &kvm->arch.shadows[guest_shadow->slot]; + p->kernel_start = kernel_start; + p->shadow_start = shadow_addr; + p->area_size = area_size; + p->alias_slot = alias_slot; + kvm->arch.nshadows++; + + spin_unlock(&kvm->mmu_lock); + + mutex_unlock(&kvm->slots_lock); + DebugKVM("created shadowing at slot %d to alias addr at slot %d\n", + guest_shadow->slot, alias_slot); + + return 0; + +out: + DebugKVM("setting failed: error %d\n", r); + return r; +} + +/* + * Create shadow alias into guest virtual space to load guest kernel image + * into own virtual space instead of host kernel image addresses and later + * it needs switch only one host kernel pgd entry into host page table + * to shadow guest pgd entry to enable guest kernel image running on the + * same addresses as host kernel + */ +static int kvm_vm_ioctl_set_kernel_image_shadow(struct kvm *kvm, + kvm_kernel_area_shadow_t __user *shadow) +{ + kvm_kernel_area_shadow_t guest_shadow; + e2k_addr_t kernel_base; + thread_info_t *ti; + int r; + + if (copy_from_user(&guest_shadow, shadow, + sizeof(kvm_kernel_area_shadow_t))) + return -EFAULT; + DebugKVM("started for shadowing kernel image from 0x%llx size 0x%llx " + "to guest 0x%llx\n", + guest_shadow.kernel_addr, guest_shadow.area_size, + guest_shadow.guest_shadow_addr); + kernel_base = guest_shadow.kernel_addr; + if (kernel_base >= PAGE_ALIGN_UP(KERNEL_TTABLE_BASE) && + kernel_base < PAGE_ALIGN_DOWN(KERNEL_TTABLE_END)) { + if (!kvm_is_sw_pv_vm_available()) { + pr_err("KVM: hypervisor is not paravirtualized and " + "cannot run paravirtualized guest\n"); + return -EINVAL; + } + } + r = kvm_set_shadow_area(kvm, &guest_shadow); + if (r) { + DebugKVM("shadow area setting failed: error %d\n", r); + return r; + } + if (kernel_base < PAGE_ALIGN_UP(KERNEL_TTABLE_BASE) || + kernel_base >= PAGE_ALIGN_DOWN(KERNEL_TTABLE_END)) { + return 0; + } + r = kvm_map_host_ttable_to_shadow(kvm, kernel_base, + guest_shadow.guest_shadow_addr); + if (r) { + DebugKVM("mapping of host trap table to shadow guest failed: " + "error %d\n", r); + return r; + } + ti = current_thread_info(); + ti->shadow_image_pgd = + *pgd_offset(current->mm, guest_shadow.guest_shadow_addr); + ti->paravirt_page_prefault = &kvm_e2k_paravirt_page_prefault; + set_kvm_mode_flag(kvm, KVMF_PARAVIRT_GUEST); + DebugKVM("guest kernel is paravirtualized image: host image pgd %px = " + "0x%lx, shadow pgd 0x%lx\n", + ti->kernel_image_pgd_p, pgd_val(ti->kernel_image_pgd), + pgd_val(ti->shadow_image_pgd)); + return PAGE_ALIGN_DOWN(KERNEL_TTABLE_END) - + PAGE_ALIGN_UP(KERNEL_TTABLE_BASE); +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + DebugKVM("VCPU #%d started for address 0x%lx\n", + vcpu->vcpu_id, vmf->address); + return VM_FAULT_SIGBUS; +} + +static int kvm_alloc_epic_pages(struct kvm *kvm) +{ + unsigned long epic_gstbase; + + if (kvm->arch.is_hv) { + DebugKVM("started to alloc pages for EPIC\n"); + + kvm->arch.epic_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, + MAX_EPICS_ORDER); + + if (!kvm->arch.epic_pages) { + DebugKVM("failed to alloc memory for EPIC\n"); + return -ENOMEM; + } + + epic_gstbase = (unsigned long)page_address(kvm->arch.epic_pages); + + DebugKVM("EPIC gstbase for gstid %d is 0x%lx (PA 0x%lx)\n", kvm->arch.vmid.nr, + epic_gstbase, __pa(epic_gstbase)); + } + + return 0; +} + +static void kvm_free_epic_pages(struct kvm *kvm) +{ + struct page *epic_pages = kvm->arch.epic_pages; + + if (kvm->arch.is_hv) { + DebugKVM("started to free hw EPIC pages\n"); + + __free_pages(epic_pages, MAX_EPICS_ORDER); + } +} + +/* FIXME this only works for IOEPIC #0 and VCPU #0 */ +static int kvm_setup_passthrough(struct kvm *kvm) +{ + struct pci_dev *pdev = NULL; + struct irq_remap_table *irt; + + irt = kmalloc(sizeof(struct irq_remap_table), + GFP_KERNEL); + if (!irt) + return -ENOMEM; + + kvm->arch.irt = irt; + irt->enabled = false; + irt->vfio_dev = NULL; + + if (kvm->arch.is_hv) { + /* Setup passthrough for first device with vfio-pci driver */ + for_each_pci_dev(pdev) { + if (pdev->driver && !strcmp(pdev->driver->name, "vfio-pci")) { + int node = dev_to_node(&pdev->dev); + + irt->vfio_dev = pdev; + pdev->dev.archdata.kvm = kvm; + + if (node == NUMA_NO_NODE) + node = 0; + + pr_info("Found VFIO device bus %d devfn 0x%x\n", + pdev->bus->number, pdev->devfn); + + if (pdev->irq >= 16 && pdev->irq <= 19) { + pr_info("kvm_ioepic: using PCI INTx passthrough (pin %d)\n", + pdev->irq); + return 0; + } + + if (!l_eioh_device(pdev)) { + pr_warn("kvm_ioepic: IOHub2 interrupt passthrough not supported (IOAPIC pin %d)\n", + pdev->irq); + return 0; + } + + pr_info("kvm_ioepic: passing pin %d to guest\n", pdev->irq); + + irt->enabled = true; + irt->host_pin = pdev->irq; + irt->guest_pin = pdev->irq; + irt->host_node = node; + irt->guest_node = 0; + irt->hpa = io_epic_base_node(node) + (irt->host_pin << PAGE_SHIFT); + /* Set in kvm_ioepic_set_base() */ + irt->gpa = 0; + + return 0; + } + } + } + + return 0; +} + +int kvm_setup_legacy_vga_passthrough(struct kvm *kvm) +{ + int ret; + struct irq_remap_table *irt = kvm->arch.irt; + + if (unlikely(!irt->enabled)) { + pr_err("%s(): error: trying to pass VGA area without passing any device\n", + __func__); + return -EPERM; + } else { + ret = vga_get_interruptible(irt->vfio_dev, VGA_RSRC_LEGACY_MEM); + if (ret) { + pr_err("%s(): failed to acquire legacy VGA area from vgaarb\n", + __func__); + return ret; + } + } + + return 0; +} + +static void setup_guest_features(struct kvm *kvm) +{ + kvm_guest_info_t *guest_info = &kvm->arch.guest_info; + unsigned long features = 0; + + guest_info->features = features; +} + +static int kvm_setup_guest_info(struct kvm *kvm, void __user *user_info) +{ + kvm_guest_info_t *guest_info = &kvm->arch.guest_info; + int ret; + + if (copy_from_user(guest_info, user_info, sizeof(*guest_info))) { + pr_err("%s(): could not copy info from user\n", __func__); + return -EFAULT; + } + + guest_info->is_stranger = guest_info->cpu_iset < E2K_ISET_V6; + if (guest_info->is_stranger) { + if (kvm_is_epic(kvm)) { + pr_err("%s(): KVM was set to use 'EPIC', but guest " + "cpu iset V%d needs at 'APIC'\n", + __func__, guest_info->cpu_iset); + return -EINVAL; + } + guest_info->mmu_support_pt_v6 = false; + } else { + if (!kvm_is_epic(kvm)) { + pr_err("%s(): KVM was set to use 'APIC', but guest " + "cpu iset V%d needs at 'EPIC'\n", + __func__, guest_info->cpu_iset); + return -EINVAL; + } + } + + if (guest_info->is_pv) { + /* guest is paravirtualized and cannot be run as bare */ + ret = kvm_disable_tdp_mode(kvm); + if (ret) + return ret; + pr_info("%s(): guest is paravirtualized and cannot be run " + "in TDP mode, so mode is disabled\n", + __func__); + } + + if (guest_info->cpu_iset == E2K_ISET_V2) { + /* guest based on iset V2 cannot be run in TDP mode */ + ret = kvm_disable_tdp_mode(kvm); + if (ret) + return ret; + pr_info("%s(): cpu iset V2 cannot be run in TDP mode, " + "so mode is disabled\n", + __func__); + } + + setup_guest_features(kvm); + + return 0; +} + +static void kvm_free_passthrough(struct kvm *kvm) +{ + struct irq_remap_table *irt = kvm->arch.irt; + + if (kvm->arch.legacy_vga_passthrough) + vga_put(irt->vfio_dev, VGA_RSRC_LEGACY_MEM); + + kfree(irt); +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long vm_type) +{ + int err; + + DebugKVM("started to create VM type %lx\n", vm_type); + + if (vm_type & KVM_E2K_EPIC_VM_FLAG) { + DebugKVM("creating EPIC VM\n"); + kvm->arch.is_epic = true; + } else { + DebugKVM("creating APIC VM\n"); + } + + vm_type &= KVM_E2K_VM_TYPE_MASK; + + if (kvm_is_sv_vm_available() || kvm_is_sw_pv_vm_available()) + kvm->arch.is_pv = true; + if (kvm_is_hv_vm_available() || kvm_is_hw_pv_vm_available()) + kvm->arch.is_hv = true; + + if (vm_type == 0) { + /* default VM type, choose max better type */ + if (kvm_is_hw_pv_vm_available()) + vm_type = KVM_E2K_HW_PV_VM_TYPE; + else if (kvm_is_hv_vm_available()) + vm_type = KVM_E2K_HV_VM_TYPE; + else if (kvm_is_sw_pv_vm_available()) + vm_type = KVM_E2K_SW_PV_VM_TYPE; + DebugKVM("will be created VM type %ld\n", vm_type); + } else { + switch (vm_type) { + case KVM_E2K_SV_VM_TYPE: + if (!kvm_is_sv_vm_available()) + return -EINVAL; + kvm->arch.is_hv = false; + break; + case KVM_E2K_SW_PV_VM_TYPE: + if (!kvm_is_sw_pv_vm_available()) + return -EINVAL; + kvm->arch.is_hv = false; + break; + case KVM_E2K_HV_VM_TYPE: + if (!kvm_is_hv_vm_available()) + return -EINVAL; + kvm->arch.is_pv = false; + break; + case KVM_E2K_HW_PV_VM_TYPE: + if (!kvm_is_hw_pv_vm_available()) + return -EINVAL; + break; + default: + return -EINVAL; + } + } + kvm->arch.vm_type = vm_type; + + kvm_arch_init_vm_mmap(kvm); + + /* BSP id can be defined by ioctl(), now set to default 0 */ + kvm->arch.bsp_vcpu_id = 0; + + err = kvm_alloc_vmid(kvm); + if (err) + goto error_vm; + DebugKVM("allocated VM ID (GID) #%d\n", kvm->arch.vmid.nr); + set_thread_flag(TIF_VM_CREATED); + native_current_thread_info()->virt_machine = kvm; + + if (kvm->arch.is_pv && !kvm->arch.is_hv) { + err = kvm_pv_guest_thread_info_init(kvm); + if (err) + goto error_gti; + + err = kvm_guest_pv_mm_init(kvm); + if (err) + goto error_gmm; + } + + kvm_page_track_init(kvm); + kvm_mmu_init_vm(kvm); + + raw_spin_lock_init(&kvm->arch.virq_lock); + kvm->arch.max_irq_no = -1; + + INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); + + kvm_arch_init_vm_mmu(kvm); + + err = kvm_alloc_epic_pages(kvm); + if (err) + goto error_gmm; + + err = kvm_setup_passthrough(kvm); + if (err) + goto error_gmm; + + kvm->arch.reboot = false; + kvm->arch.num_numa_nodes = 1; + kvm->arch.max_nr_node_cpu = 0; + + err = kvm_boot_spinlock_init(kvm); + if (err) + goto error_gmm; + err = kvm_guest_spinlock_init(kvm); + if (err) + goto error_boot_spinlock; + err = kvm_guest_csd_lock_init(kvm); + if (err) + goto error_spinlock; + + kvm->arch.num_sclkr_run = 0; + kvm->arch.sh_sclkm3 = 0; + raw_spin_lock_init(&kvm->arch.sh_sclkr_lock); + + return 0; + +error_boot_spinlock: + kvm_boot_spinlock_destroy(kvm); +error_spinlock: + kvm_guest_spinlock_destroy(kvm); +error_gmm: + if (kvm->arch.is_pv && !kvm->arch.is_hv) { + kvm_pv_guest_thread_info_destroy(kvm); + } +error_gti: + native_current_thread_info()->virt_machine = NULL; + clear_thread_flag(TIF_VM_CREATED); +error_vm: + return err; +} + +static void setup_kvm_features(struct kvm *kvm) +{ + kvm_host_info_t *host_info = kvm->arch.kmap_host_info; + unsigned long features = 0; + + if (kvm->arch.is_hv) { + features |= (KVM_FEAT_HV_CPU_MASK | + KVM_FEAT_HW_HCALL_MASK); + features |= KVM_FEAT_HV_MMU_MASK; + } + if (kvm->arch.is_pv) { + features |= KVM_FEAT_PV_CPU_MASK; + } + if (kvm->arch.is_pv && !kvm->arch.is_hv) { + /* hypervisor (can) support only paravirtualization */ + features |= (KVM_FEAT_PV_HCALL_MASK | + KVM_FEAT_PV_MMU_MASK); + } + if (kvm_is_epic(kvm)) { + if (kvm->arch.is_hv && cpu_has(CPU_FEAT_EPIC)) + features |= KVM_FEAT_HV_EPIC_MASK; + else + features |= KVM_FEAT_PV_EPIC_MASK; + } else { /* can be only APIC */ + if (kvm->arch.is_pv) + features |= KVM_FEAT_PV_APIC_MASK; + } + + host_info->features = features; +} + +static void kvm_setup_host_info(struct kvm *kvm) +{ + kvm->arch.kmap_host_info->mach_id = native_machine_id; + kvm->arch.kmap_host_info->cpu_rev = machine.native_rev; + kvm->arch.kmap_host_info->cpu_iset = machine.native_iset_ver; + kvm->arch.kmap_host_info->support_hw_hc = + machine.native_iset_ver >= E2K_ISET_V6; + if (machine.native_iset_ver >= E2K_ISET_V6 && machine.mmu_pt_v6) + kvm->arch.kmap_host_info->mmu_support_pt_v6 = true; + else + kvm->arch.kmap_host_info->mmu_support_pt_v6 = false; + setup_kvm_features(kvm); + kvm_update_guest_time(kvm); +} + +static int kvm_create_host_info(struct kvm *kvm) +{ + kvm_host_info_t *host_info = NULL; + kvm_host_info_t *kmap_host_info = NULL; + user_area_t *guest_area; + + int npages; + long r; + + DebugKVM("started\n"); + mutex_lock(&kvm->lock); + if (unlikely(kvm->arch.host_info != NULL)) { + mutex_unlock(&kvm->lock); + DebugKVM("host info structure is already created at %px\n", + kvm->arch.host_info); + return 0; + } + npages = PAGE_ALIGN(sizeof(kvm_host_info_t)) >> PAGE_SHIFT; + guest_area = kvm_find_memory_region(kvm, -1, 0, npages << PAGE_SHIFT, + guest_vram_mem_type); + if (guest_area == NULL) { + DebugKVM("guest memory regions is not created or empty\n"); + r = -ENOMEM; + goto out; + } + host_info = user_area_alloc_locked_pages(guest_area, 0, + sizeof(kvm_host_info_t), 1 << E2K_ALIGN_GLOBALS, 0); + if (host_info == NULL) { + DebugKVM("could not allocate TIME state struct\n"); + r = -ENOMEM; + goto error; + } + DebugKVM("host info structure created at %px\n", host_info); + + /* + * host info maps to kernel vmaloc range to have access into + * this state from any host kernel threads on kernel addresses + * Guest address can change from physical to virtual + */ + kmap_host_info = map_user_area_to_vmalloc_range(guest_area, + host_info, PAGE_KERNEL); + if (kmap_host_info == NULL) { + DebugKVM("could not map host info struct to kernel VM\n"); + r = -ENOMEM; + goto error; + } + + memset(kmap_host_info, 0, sizeof(kvm_host_info_t)); + kvm->arch.host_info = host_info; + kvm->arch.kmap_host_info = kmap_host_info; + kvm->arch.time_state_lock = + __RAW_SPIN_LOCK_UNLOCKED(&kvm->arch.time_state_lock); + kvm_setup_host_info(kvm); + + r = 0; + goto out; + +error: + if (kmap_host_info != NULL) { + unmap_user_area_to_vmalloc_range(guest_area, kmap_host_info); + kvm->arch.kmap_host_info = NULL; + } + if (host_info != NULL) { + user_area_free_chunk(guest_area, host_info); + kvm->arch.host_info = NULL; + } + +out: + mutex_unlock(&kvm->lock); + return r; +} + +static void kvm_free_host_info(struct kvm *kvm) +{ + DebugKVMSH("%s (%d) started\n", + current->comm, current->pid); + if (kvm->arch.host_info != NULL) { + user_area_t *guest_area; + e2k_addr_t area_start; + + area_start = (e2k_addr_t)kvm->arch.host_info; + guest_area = kvm_find_memory_region(kvm, + -1, area_start, 0, guest_vram_mem_type); + if (kvm->arch.kmap_host_info != NULL) { + unmap_user_area_to_vmalloc_range(guest_area, + kvm->arch.kmap_host_info); + kvm->arch.kmap_host_info = NULL; + } + user_area_free_chunk(guest_area, kvm->arch.host_info); + kvm->arch.host_info = NULL; + } else if (kvm->arch.kmap_host_info != NULL) { + unmap_user_area_to_vmalloc_range(NULL, + kvm->arch.kmap_host_info); + kvm->arch.kmap_host_info = NULL; + } +} + +static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, + struct kvm_irqchip *chip) +{ + int r; + + DebugKVM("started\n"); + r = 0; + switch (chip->chip_id) { + case KVM_IRQCHIP_IOAPIC: + /* IOEPIC is currently not supported in QEMU */ + if (!kvm_is_epic(kvm)) + r = kvm_get_ioapic(kvm, &chip->chip.ioapic); + break; + default: + r = -EINVAL; + break; + } + return r; +} + +static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) +{ + int r; + + DebugKVM("started\n"); + r = 0; + switch (chip->chip_id) { + case KVM_IRQCHIP_IOAPIC: + /* IOEPIC is currently not supported in QEMU */ + if (!kvm_is_epic(kvm)) + r = kvm_set_ioapic(kvm, &chip->chip.ioapic); + break; + default: + r = -ENODEV; + break; + } + return r; +} + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, + bool line_status) +{ + if (!irqchip_in_kernel(kvm)) + return -ENXIO; + + irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event->irq, irq_event->level, + line_status); + return 0; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + DebugUNIMPL("started for VCPU %d\n", vcpu->vcpu_id); + DebugUNIMPL("does not implemented\n"); + + return 0; +} + +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + int r; + + if (cap->flags) + return -EINVAL; + + switch (cap->cap) { + default: + r = -ENODEV; + break; + } + return r; +} + +unsigned long kvm_i2c_spi_conf_base[4] = { 0UL, 0UL, 0UL, 0UL,}; +unsigned long kvm_spmc_conf_base[4] = { 0UL, 0UL, 0UL, 0UL,}; + +long kvm_arch_vm_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm *kvm = filp->private_data; + void __user *argp = (void __user *)arg; + int r = -ENOTTY; + + DebugKVMIOC("started\n"); + switch (ioctl) { + case KVM_SET_MEMORY_REGION: { + struct kvm_memory_region kvm_mem; + struct kvm_userspace_memory_region kvm_userspace_mem; + + DebugKVMIOCTL("ioctl is KVM_SET_MEMORY_REGION\n"); + r = -EFAULT; + if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) + goto out; + DebugKVM("ioctl(KVM_SET_MEMORY_REGION) for slot ID %d\n", + kvm_mem.slot); + kvm_userspace_mem.slot = kvm_mem.slot; + kvm_userspace_mem.flags = kvm_mem.flags; + kvm_userspace_mem.guest_phys_addr = + kvm_mem.guest_phys_addr; + kvm_userspace_mem.memory_size = kvm_mem.memory_size; + r = kvm_set_memory_region(kvm, &kvm_userspace_mem); + break; + } + case KVM_ALLOC_GUEST_AREA: { + kvm_guest_area_alloc_t __user *guest_area; + + DebugKVMIOCTL("ioctl is KVM_ALLOC_GUEST_AREA\n"); + guest_area = (kvm_guest_area_alloc_t __user *)argp; + r = kvm_arch_ioctl_alloc_guest_area(kvm, guest_area); + break; + } + case KVM_RESERVE_GUEST_AREA: { + kvm_guest_area_reserve_t __user reserve_area; + user_area_t *guest_area; + + DebugKVMIOCTL("ioctl is KVM_RESERVE_GUEST_AREA\n"); + r = -EFAULT; + if (copy_from_user(&reserve_area, argp, sizeof reserve_area)) + goto out; + r = -EINVAL; + guest_area = kvm_find_memory_region(kvm, -1, + reserve_area.start, reserve_area.size, + reserve_area.type); + if (!guest_area) + goto out; + r = user_area_reserve_chunk(guest_area, reserve_area.start, + reserve_area.size); + break; + } + case KVM_SET_MEMORY_ALIAS: { + kvm_memory_alias_t __user *guest_alias; + + DebugKVMIOCTL("ioctl is KVM_SET_MEMORY_ALIAS\n"); + guest_alias = (kvm_memory_alias_t __user *)argp; + r = kvm_vm_ioctl_set_memory_alias(kvm, guest_alias); + break; + } + case KVM_SET_KERNEL_IMAGE_SHADOW: { + kvm_kernel_area_shadow_t __user *guest_shadow; + + DebugKVMIOCTL("ioctl is KVM_SET_KERNEL_IMAGE_SHADOW\n"); + guest_shadow = (kvm_kernel_area_shadow_t __user *)argp; + r = kvm_vm_ioctl_set_kernel_image_shadow(kvm, guest_shadow); + break; + } + case KVM_CREATE_IRQCHIP: + DebugKVMIOCTL("ioctl is KVM_CREATE_IRQCHIP\n"); + r = -EFAULT; + r = kvm_io_pic_init(kvm); + if (r) + goto out; + r = kvm_setup_default_irq_routing(kvm); + if (r) { + kvm_iopic_release(kvm); + goto out; + } + break; + case KVM_CREATE_SIC_NBSR: + DebugKVMIOCTL("ioctl is KVM_CREATE_SIC_NBSR\n"); + r = kvm_nbsr_init(kvm); + break; + case KVM_GET_IRQCHIP: { + /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ + struct kvm_irqchip chip; + + DebugKVMIOCTL("ioctl is KVM_GET_IRQCHIP\n"); + r = -EFAULT; + if (copy_from_user(&chip, argp, sizeof chip)) + goto out; + r = -ENXIO; + if (!irqchip_in_kernel(kvm)) + goto out; + r = kvm_vm_ioctl_get_irqchip(kvm, &chip); + if (r) + goto out; + r = -EFAULT; + if (copy_to_user(argp, &chip, sizeof chip)) + goto out; + r = 0; + break; + } + case KVM_SET_IRQCHIP: { + /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ + struct kvm_irqchip chip; + + DebugKVMIOCTL("ioctl is KVM_SET_IRQCHIP\n"); + r = -EFAULT; + if (copy_from_user(&chip, argp, sizeof chip)) + goto out; + r = -ENXIO; + if (!irqchip_in_kernel(kvm)) + goto out; + r = kvm_vm_ioctl_set_irqchip(kvm, &chip); + break; + } + case KVM_SET_PCI_REGION: { + struct kvm_pci_region pci_region; + + DebugKVMIOCTL("ioctl is KVM_SET_PCI_REGION\n"); + r = -EFAULT; + if (copy_from_user(&pci_region, argp, sizeof pci_region)) + goto out; + r = nbsr_setup_pci_region(kvm, &pci_region); + break; + } + case KVM_SET_IRQCHIP_BASE: { + struct kvm_base_addr_node basen; + + r = -EFAULT; + if (copy_from_user(&basen, argp, sizeof basen)) + goto out; + DebugKVMIOCTL("ioctl is KVM_SET_IRQCHIP_BASE to 0x%lx " + "node %d\n", basen.base, basen.node_id); + r = kvm_io_pic_set_base(kvm, basen.base, + basen.node_id); + break; + } + case KVM_SET_SYS_TIMER_BASE: { + struct kvm_base_addr_node basen; + + r = -EFAULT; + if (copy_from_user(&basen, argp, sizeof basen)) + goto out; + DebugKVMIOCTL("ioctl is KVM_SET_SYS_TIMER_BASE to 0x%lx " + "node %d\n", basen.base, basen.node_id); + r = kvm_lt_set_base(kvm, basen.node_id, basen.base); + break; + } + case KVM_SET_SPMC_CONF_BASE: { + struct kvm_base_addr_node basen; + + r = -EFAULT; + if (copy_from_user(&basen, argp, sizeof basen)) + goto out; + + DebugKVMIOCTL("ioctl is KVM_SET_SPMC_CONF_BASE to 0x%lx " + "node %d\n", basen.base, basen.node_id); + kvm_spmc_conf_base[basen.node_id] = basen.base; + r = kvm_spmc_set_base(kvm, basen.node_id, basen.base); + break; + } + case KVM_SET_SPMC_CONF_BASE_SPMC_IN_QEMU: { + struct kvm_base_addr_node basen; + + if (copy_from_user(&basen, argp, sizeof basen)) + goto out; + DebugKVMIOCTL("ioctl is KVM_SET_SPMC_CONF_BASE_SPMC_IN_QEMU " + "to 0x%lx node %d\n", basen.base, basen.node_id); + kvm_spmc_conf_base[basen.node_id] = basen.base; + r = 0; + break; + } + case KVM_SET_I2C_SPI_CONF_BASE: { + struct kvm_base_addr_node basen; + + if (copy_from_user(&basen, argp, sizeof basen)) + goto out; + DebugKVMIOCTL("ioctl is KVM_SET_I2C_SPI_CONF_BASE " + "to 0x%lx node %d\n", basen.base, basen.node_id); + kvm_i2c_spi_conf_base[basen.node_id] = basen.base; + r = 0; + break; + } + case KVM_SET_COUNT_NUMA_NODES: + DebugKVMIOCTL("ioctl is KVM_SET_COUNT_NUMA_NODES to 0x%lx\n", + arg); + kvm->arch.num_numa_nodes = arg; + r = 0; + break; + case KVM_SET_MAX_NR_NODE_CPU: + DebugKVMIOCTL("ioctl is KVM_SET_MAX_NR_NODE_CPU to 0x%lx\n", + arg); + kvm->arch.max_nr_node_cpu = arg; + r = 0; + break; + case KVM_SET_CEPIC_FREQUENCY: + DebugKVMIOCTL("ioctl is KVM_SET_CEPIC_FREQUENCY to %lu Hz\n", + arg); + kvm->arch.cepic_freq = arg; + r = 0; + break; + case KVM_SET_WD_PRESCALER_MULT: + DebugKVMIOCTL("ioctl is KVM_SET_WD_PRESCALER_MULT to %lu\n", + arg); + kvm->arch.wd_prescaler_mult = arg; + r = 0; + break; + case KVM_SET_LEGACY_VGA_PASSTHROUGH: + DebugKVMIOCTL("ioctl is KVM_SET_LEGACY_VGA_PASSTHROUGH to %lu\n", + arg); + r = 0; + if (arg) { + r = kvm_setup_legacy_vga_passthrough(kvm); + if (!r) + kvm->arch.legacy_vga_passthrough = true; + } + break; + case KVM_ENABLE_CAP: { + struct kvm_enable_cap cap; + + DebugKVMIOCTL("ioctl is KVM_ENABLE_CAP\n"); + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + goto out; + r = kvm_vm_ioctl_enable_cap(kvm, &cap); + break; + } + case KVM_SET_GUEST_INFO: + DebugKVMIOCTL("ioctl is KVM_SET_GUEST_INFO\n"); + r = kvm_setup_guest_info(kvm, argp); + break; + case KVM_GET_NBSR_STATE: { + struct kvm_guest_nbsr_state nbsr; + int node_id; + DebugKVM("ioctl is KVM_GET_NBSR_STATE\n"); + r = -ENXIO; + if (copy_from_user(&nbsr, argp, sizeof(nbsr))) + goto out; + node_id = nbsr.node_id; + DebugKVM("ioctl is KVM_GET_NBSR_STATE node %d\n", node_id); + + if (!kvm->arch.nbsr) + goto out; + + r = kvm_get_nbsr_state(kvm, &nbsr, node_id); + if (r) + goto out; + + r = -EFAULT; + if (copy_to_user(argp, &nbsr, + sizeof(struct kvm_guest_nbsr_state))) + goto out; + r = 0; + break; + } + default: + DebugKVM("ioctl is not supported\n"); + ; + } +out: + DebugKVMIOC("returns with value %d\n", r); + return r; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + DebugKVM("started\n"); + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + DebugKVM("started\n"); + return -EINVAL; +} +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + DebugKVM("started\n"); + return -EINVAL; +} + +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, + unsigned int id) +{ + struct kvm_vcpu *vcpu = NULL; + int r; + + DebugKVM("started for CPU id %d\n", id); + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + if (!vcpu) + return ERR_PTR(-ENOMEM); + + r = kvm_vcpu_init(vcpu, kvm, id); + if (r) { + kmem_cache_free(kvm_vcpu_cache, vcpu); + DebugKVM("VCPU init failed: %d\n", r); + return ERR_PTR(r); + } + return vcpu; +} + +bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) +{ + return vcpu->vcpu_id == 0; +} + +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +{ + return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu); +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ + DebugKVM("Unimplemented\n"); +} + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) +{ + int r; + + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + + if (kvm_vcpu_is_bsp(vcpu)) { + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + } else { + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + } + + vcpu->arch.node_id = vcpu->vcpu_id % vcpu->kvm->arch.num_numa_nodes; + + if (!vcpu->kvm->arch.max_nr_node_cpu) { + pr_err("%s() : not initiated kvm->arch.max_nr_node_cpu" + " from qemu\n", __func__); + return -EPERM; + } + vcpu->arch.hard_cpu_id = (vcpu->kvm->arch.max_nr_node_cpu * + vcpu->arch.node_id) + vcpu->vcpu_id / + vcpu->kvm->arch.num_numa_nodes; + DebugKVM("started for VCPU %d hard_cpu_id %d\n", + vcpu->vcpu_id, vcpu->arch.hard_cpu_id); + + init_completion(&vcpu->arch.released); + mutex_init(&vcpu->arch.lock); + vcpu->arch.ioport_data = get_ioport_data_pointer(vcpu->run); + vcpu->arch.ioport_data_size = get_ioport_data_size(vcpu->run); + + if (vcpu->kvm->arch.vm_type == KVM_E2K_SV_VM_TYPE || + vcpu->kvm->arch.vm_type == KVM_E2K_SW_PV_VM_TYPE) { + r = kvm_arch_pv_vcpu_init(vcpu); + if (r != 0) + return r; + } + + if (vcpu->kvm->arch.vm_type == KVM_E2K_HV_VM_TYPE || + vcpu->kvm->arch.vm_type == KVM_E2K_HW_PV_VM_TYPE) { + r = kvm_arch_hv_vcpu_init(vcpu); + if (r != 0) + goto pv_uninit; + } + + r = kvm_arch_any_vcpu_init(vcpu); + if (r != 0) + goto pv_uninit; + + /* create hypervisor backup hardware stacks */ + r = create_vcpu_backup_stacks(vcpu); + if (r != 0) + goto hv_uninit; + + /* create VCPU booting stacks */ + r = create_vcpu_boot_stacks(vcpu); + if (r != 0) + goto free_backup; + + if (!vcpu->arch.is_hv) { + /* create the host VCPU context for multi-threading */ + r = create_vcpu_host_context(vcpu); + if (r != 0) + goto free_boot; + } + + /* Now that stacks are allocated, we can set + * initialize stack registers values for guest */ + kvm_arch_vcpu_ctxt_init(vcpu); + + r = kvm_mmu_create(vcpu); + if (r < 0) + goto free_host; + +#ifdef CONFIG_KVM_ASYNC_PF + /* + * Async page faults are disabled by default. Paravirtualized guest can + * enable it by calling hypercall KVM_HCALL_PV_ENABLE_ASYNC_PF. + */ + vcpu->arch.apf.enabled = false; +#endif /* CONFIG_KVM_ASYNC_PF */ + + if (irqchip_in_kernel(vcpu->kvm)) { + r = kvm_create_local_pic(vcpu); + if (r != 0) + goto mmu_destroy; + } + + return 0; + +mmu_destroy: + kvm_mmu_destroy(vcpu); +free_host: + destroy_vcpu_host_context(vcpu); +free_boot: + free_vcpu_boot_stacks(vcpu); +free_backup: + free_kernel_backup_stacks(&vcpu->arch.hypv_backup); + kvm_arch_vcpu_ctxt_uninit(vcpu); +hv_uninit: + kvm_arch_hv_vcpu_uninit(vcpu); +pv_uninit: + kvm_arch_pv_vcpu_uninit(vcpu); + kvm_arch_any_vcpu_uninit(vcpu); + return r; +} + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + int r; + unsigned long epic_gstbase; + + DebugKVM("started\n"); + raw_spin_lock_init(&vcpu->arch.exit_reqs_lock); + INIT_LIST_HEAD(&vcpu->arch.exit_reqs_list); + vcpu->arch.halted = false; + INIT_WORK(&vcpu->arch.dump_work, NULL); + INIT_LIST_HEAD(&vcpu->arch.vcpus_to_spin); + + if (vcpu->arch.is_hv) { + /* Set the pointer to the CEPIC page */ + epic_gstbase = (unsigned long) + page_address(vcpu->kvm->arch.epic_pages); + vcpu->arch.hw_ctxt.cepic = (epic_page_t *) (epic_gstbase + + (kvm_vcpu_to_full_cepic_id(vcpu) << PAGE_SHIFT)); + + raw_spin_lock_init(&vcpu->arch.epic_dat_lock); + vcpu->arch.epic_dat_active = false; + kvm_init_cepic_idle_timer(vcpu); + } + vcpu->arch.exit_shutdown_terminate = 0; + + vcpu_load(vcpu); + + if (vcpu->kvm->arch.vm_type == KVM_E2K_SV_VM_TYPE || + vcpu->kvm->arch.vm_type == KVM_E2K_SW_PV_VM_TYPE) { + r = kvm_arch_pv_vcpu_setup(vcpu); + if (r != 0) + goto error; + } + + if (vcpu->kvm->arch.vm_type == KVM_E2K_HV_VM_TYPE || + vcpu->kvm->arch.vm_type == KVM_E2K_HW_PV_VM_TYPE) { + r = kvm_arch_hv_vcpu_setup(vcpu); + if (r != 0) + goto error; + } + + r = kvm_arch_any_vcpu_setup(vcpu); + if (r != 0) + goto error; + + /* init hypervisor backup hardware stacks */ + r = vcpu_backup_stacks_init(vcpu); + if (r != 0) + goto error; + + /* init VCPU booting stacks */ + r = vcpu_boot_stacks_init(vcpu); + if (r != 0) + goto error; + + kvm_mmu_setup(vcpu); + + r = init_pic_state(vcpu); + +error: + vcpu_put(vcpu); + return r; +} + +static int init_guest_vcpu_state(struct kvm_vcpu *vcpu) +{ + kvm_host_info_t *host_info; + + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + + host_info = vcpu->kvm->arch.host_info; + BUG_ON(host_info == NULL); + host_info = (kvm_host_info_t *)kvm_vcpu_hva_to_gpa(vcpu, + (unsigned long)host_info); + if (IS_INVALID_GPA((gpa_t)host_info)) { + pr_err("%s() : could not allocate GPA of host info struct\n", + __func__); + goto error; + } + vcpu->arch.kmap_vcpu_state->host = host_info; + + if (vcpu->arch.is_pv) + kvm_init_cpu_state_idr(vcpu); + + if (vcpu->arch.is_hv) { + DebugKVM("VCPU #%d : setting host info structure at %px\n", + vcpu->vcpu_id, host_info); + return 0; + } + + kvm_init_cpu_state(vcpu); + + kvm_init_mmu_state(vcpu); + + DebugKVM("VCPU #%d : setting host info structure at %px\n", + vcpu->vcpu_id, host_info); + return 0; + +error: + kvm_free_host_info(vcpu->kvm); + return -ENOMEM; +} + +void guest_pv_vcpu_state_to_paging(struct kvm_vcpu *vcpu) +{ + kvm_host_info_t *host_info; + + host_info = vcpu->arch.kmap_vcpu_state->host; + BUG_ON(host_info == NULL || vcpu->kvm->arch.host_info == NULL); + vcpu->arch.kmap_vcpu_state->host = __guest_va(host_info); +} + +int init_cepic_state(struct kvm_vcpu *vcpu) +{ + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + if (vcpu->arch.epic) + kvm_cepic_reset(vcpu); + if (vcpu->arch.is_pv) + kvm_init_guest_cepic_virqs_num(vcpu); + return 0; +} + +int init_lapic_state(struct kvm_vcpu *vcpu) +{ + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + if (vcpu->arch.apic) + kvm_lapic_reset(vcpu); + kvm_init_guest_lapic_virqs_num(vcpu); + return 0; +} + +/* + * VCPUs halt and wake ups are synchronized on e2k as follows: + * + * VCPU0 VCPU1 + * -------------------------------------------------------------- + * intercept "wait int" Variant 1: + * kvm_vcpu_block() { send IPI to VCPU0 + * kvm_arch_vcpu_blocking() { DAT hit -> write target CIR + * clear DAT and save EPIC kvm_arch_vcpu_blocking will see CIR.stat + * } + * < ... > Variant 2: + * kvm_vcpu_check_block() { send IPI to VCPU0 + * check PMIRR/PNMIRR/CIR DAT miss -> generate interception + * both in shadow registers cepic_epic_interrupt() { + * and in memory kvm_irq_delivery_to_epic(): + * } either send through ICR if DAT + * < ... > is active (i.e. Variant 1) or + * kvm_arch_vcpu_unblocking() { write to PMIRR in memory where it + * restore EPIC and activate DAT will be seen by kvm_vcpu_check_block() + * } + * } + * + * Saving and restoring EPIC in kvm_arch_vcpu_[un]blocking() is necessary + * because otherwise there is a race: "Variant 2" above could happen between + * the check in kvm_vcpu_check_block() and the consequent schedule() call, in + * which case VCPU0 will go sleep but VCPU1 will be sure that VCPU0 was woken. + */ +void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) { + kvm_epic_vcpu_blocking(&vcpu->arch); + kvm_epic_start_idle_timer(vcpu); + } +} + +void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) { + kvm_epic_stop_idle_timer(vcpu); + kvm_epic_vcpu_unblocking(&vcpu->arch); + } +} + +static int init_guest_boot_cut(struct kvm_vcpu *vcpu) +{ + kvm_vcpu_state_t *vcpu_state = vcpu->arch.vcpu_state; + e2k_cute_t *cute_p = vcpu->arch.guest_cut; + + if (cute_p == NULL) { + KVM_BUG_ON(!vcpu->arch.is_hv); + return 0; + } else { + KVM_BUG_ON(!vcpu->arch.is_pv); + } + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + fill_cut_entry(cute_p, 0, 0, 0, 0); + DebugKVM("created guest CUT entry #0 zeroed at %px\n", cute_p); + cute_p += GUEST_CODES_INDEX; + fill_cut_entry(cute_p, 0, 0, + kvm_vcpu_hva_to_gpa(vcpu, (unsigned long)vcpu_state), + sizeof(*vcpu_state)); + DebugKVM("created guest CUT entry #%ld from 0x%lx size 0x%lx at %px\n", + GUEST_CODES_INDEX, + (void *)kvm_vcpu_hva_to_gpa(vcpu, (unsigned long)vcpu_state), + sizeof(*vcpu_state), cute_p); + return 0; +} +static int kvm_arch_ioctl_setup_vcpu(struct kvm_vcpu *vcpu) +{ + /* FIXME: the ioctl() can be deleted, but old version of */ + /* arch KVM API support this ioctl(), so let it be empty */ + return 0; +} + +static int kvm_arch_ioctl_vcpu_guest_startup(struct kvm_vcpu *vcpu, + kvm_vcpu_guest_startup_t __user *guest_startup) +{ + kvm_vcpu_guest_startup_t guest_args; + e2k_size_t trap_offset; + int arg; + + if (copy_from_user(&guest_args, guest_startup, sizeof(guest_args))) { + DebugKVM("copy to %px from user %px failed\n", + &guest_args, guest_startup); + return -EFAULT; + } + kvm_set_vcpu_kernel_image(vcpu, + guest_args.kernel_base, guest_args.kernel_size); + if (vcpu->arch.is_hv) { + /* should be always ttable #0 */ + trap_offset = 0; + } else if (vcpu->arch.is_pv) { + /* can be any ttable # from 32-63 */ + trap_offset = guest_args.trap_off; + } else { + KVM_BUG_ON(true); + trap_offset = 0; + } + vcpu->arch.trap_offset = trap_offset; + vcpu->arch.trap_entry = vcpu->arch.guest_base + trap_offset; + DebugKVM("guest trap table entry at %px\n", vcpu->arch.trap_entry); + + vcpu->arch.entry_point = guest_args.entry_point; + DebugKVM("guest image start point at %px\n", vcpu->arch.entry_point); + vcpu->arch.args_num = guest_args.args_num; + DebugKVM("guest image args num is %d\n", vcpu->arch.args_num); + for (arg = 0; arg < vcpu->arch.args_num; arg++) { + vcpu->arch.args[arg] = guest_args.args[arg]; + DebugKVM(" arg #%d : 0x%llx\n", arg, vcpu->arch.args[arg]); + } + if (guest_args.flags & NATIVE_KERNEL_IMAGE_GUEST_FLAG) { + set_kvm_mode_flag(vcpu->kvm, KVMF_NATIVE_KERNEL); + DebugKVM("guest is e2k linux native kernel\n"); + } else if (guest_args.flags & PARAVIRT_KERNEL_IMAGE_GUEST_FLAG) { + set_kvm_mode_flag(vcpu->kvm, KVMF_PARAVIRT_KERNEL); + DebugKVM("guest is e2k linux paravirtualized kernel\n"); + } else if (guest_args.flags & LINTEL_IMAGE_GUEST_FLAG) { + set_kvm_mode_flag(vcpu->kvm, KVMF_LINTEL); + DebugKVM("guest is e2k LIntel binary compilator\n"); + } + + kvm_init_clockdev(vcpu); + + vcpu_load(vcpu); + + if (vcpu->arch.is_hv || vcpu->arch.is_pv) { + kvm_start_vcpu_thread(vcpu); + } else { + KVM_BUG_ON(true); + } + + vcpu_put(vcpu); + + set_kvm_mode_flag(vcpu->kvm, KVMF_VCPU_STARTED); + + return 0; +} + +/* + * Mutex should be locked by caller (if needs) + */ +struct kvm_vcpu *kvm_get_vcpu_on_id(struct kvm *kvm, int vcpu_id) +{ + int r; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(r, vcpu, kvm) + if (vcpu->vcpu_id == vcpu_id) + return vcpu; + return ERR_PTR(-ENODEV); +} + +/* + * Mutex should be locked by caller (if needs) + */ +struct kvm_vcpu *kvm_get_vcpu_on_hard_cpu_id(struct kvm *kvm, int hard_cpu_id) +{ + int r; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(r, vcpu, kvm) + if (vcpu->arch.hard_cpu_id == hard_cpu_id) + return vcpu; + return ERR_PTR(-ENODEV); +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + DebugKVM("started\n"); + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + DebugKVM("started\n"); + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + DebugKVM("started\n"); + return -EINVAL; +} + +void kvm_halt_host_vcpu_thread(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("%s (%d) started to terminate VCPU #%d thread\n", + current->comm, current->pid, vcpu->vcpu_id); + + mutex_lock(&vcpu->arch.lock); + current_thread_info()->vcpu = NULL; + vcpu->arch.host_task = NULL; + mutex_unlock(&vcpu->arch.lock); + + kvm_arch_vcpu_release(vcpu); +} + +static void kvm_halt_all_host_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int r; + + DebugKVMSH("%s (%d) started\n", + current->comm, current->pid); + + mutex_lock(&kvm->lock); + kvm_for_each_vcpu(r, vcpu, kvm) { + if (vcpu != NULL) { + if (vcpu->arch.host_task != NULL) { + kvm_halt_host_vcpu_thread(vcpu); + } else { + free_vcpu_state(vcpu); + } + } + } + mutex_unlock(&kvm->lock); +} + +static void kvm_wait_for_vcpu_release(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("%s (%d) started to halt VCPU #%d\n", + current->comm, current->pid, vcpu->vcpu_id); + + if (vcpu->arch.host_task != NULL) { + kvm_halt_host_vcpu_thread(vcpu); + } else { + kvm_arch_vcpu_release(vcpu); + } + + if (!vcpu->arch.is_hv) { + wait_for_completion(&vcpu->arch.released); + } + DebugKVMSH("VCPU #%d released\n", vcpu->vcpu_id); +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("%s (%d) started for VCPU %d\n", + current->comm, current->pid, vcpu->vcpu_id); + kvm_vcpu_uninit(vcpu); + kvm_free_local_pic(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu); +} + +static void kvm_wake_up_all_other_vcpu_host(struct kvm_vcpu *my_vcpu) +{ + struct kvm *kvm = my_vcpu->kvm; + struct kvm_vcpu *vcpu; + struct task_struct *host_task; + int r; + + DebugKVMSH("%s (%d) started\n", + current->comm, current->pid); + + mutex_lock(&kvm->lock); + kvm_for_each_vcpu(r, vcpu, kvm) { + if (vcpu == NULL) + continue; + if (vcpu == my_vcpu) + continue; + mutex_lock(&vcpu->arch.lock); + host_task = vcpu->arch.host_task; + if (host_task != NULL) { + wake_up_process(host_task); + DebugKVMSH("waked up host thread %s (%d) VCPU #%d\n", + host_task->comm, host_task->pid, vcpu->vcpu_id); + } + mutex_unlock(&vcpu->arch.lock); + } + mutex_unlock(&kvm->lock); +} + +static void kvm_arch_release_all_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int r; + + DebugKVMSH("%s (%d) started\n", + current->comm, current->pid); + + /* complete current thread as thread of virtual machine */ + kvm_resume_vm_thread(); + + mutex_lock(&kvm->lock); + kvm_for_each_vcpu(r, vcpu, kvm) + if (vcpu != NULL) { + kvm_wait_for_vcpu_release(vcpu); + } + mutex_unlock(&kvm->lock); +} + +static void kvm_arch_free_all_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int r; + + DebugKVMSH("started\n"); + mutex_lock(&kvm->lock); + kvm_for_each_vcpu(r, vcpu, kvm) + if (vcpu != NULL) { + kvm_clear_vcpu(kvm, r); + kvm_arch_vcpu_free(vcpu); + } + mutex_unlock(&kvm->lock); +} + +void kvm_ioapic_release(struct kvm *kvm) +{ + int i; + + for (i = 0; i < kvm->arch.num_numa_nodes; i++) { + struct kvm_ioapic *ioapic = kvm->arch.vioapic[i]; + + if (!ioapic) + continue; + + mutex_lock(&kvm->slots_lock); + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); + mutex_unlock(&kvm->slots_lock); + kvm->arch.vioapic[i] = NULL; + kfree(ioapic); + } +} + +#ifdef DUMP_PAGE_STRUCT +void dump_page_struct(struct page *page) +{ + unsigned long flags; + + pr_info("\nStruct page 0x%px :\n", page); + if (page == NULL) + return; + flags = page->flags; + pr_info(" flags 0x%016lx NR %d, node #%d, zone #%d id 0x%x :\n", + flags, __NR_PAGEFLAGS, + page_to_nid(page), page_zonenum(page), page_zone_id(page)); + + pr_info(" count 0x%08x (starts from 0)\n", + atomic_read(&page->_count)); + pr_info(" map count 0x%08x (starts from -1) or if SLUB inuse 0x%04x " + "objects 0x%04x\n", + atomic_read(&page->_mapcount), + page->inuse, page->objects); + pr_info(" union {\n"); + pr_info(" private 0x%016lx mapping 0x%px\n", + page->private, page->mapping); + if (flags & (1UL << PG_private)) + pr_info(" private is buffer_heads\n"); + if (flags & (1UL << PG_swapcache)) + pr_info(" private is swp_entry_t\n"); + if (flags & (1UL << PG_buddy)) + pr_info(" private indicates order in the buddy " + "system\n"); + if (((unsigned long)page->mapping & PAGE_MAPPING_ANON)) + pr_info(" mapping points to anon_vma object\n"); + else + pr_info(" mapping points to inode address_space\n"); +#if USE_SPLIT_PTLOCKS +#ifndef CONFIG_PREEMPT_RT + pr_info(" spin lock *ptl 0x%px\n", &page->ptl); +#else + pr_info(" spin lock *ptl 0x%px\n", page->ptl); +#endif +#endif + pr_info(" SLUB: Pointer to slab 0x%px\n", page->slab); + pr_info(" Compound tail pages: Pointer to first page 0x%px\n", + page->first_page); + pr_info(" }\n"); + pr_info(" union {\n"); + pr_info(" index 0x%016lx offset within mapping\n", page->index); + pr_info(" SLUB: freelist req. slab lock 0x%px\n", page->freelist); + pr_info(" }\n"); + pr_info(" lru list head next 0x%px prev 0x%px\n", + page->lru.next, page->lru.prev); +#if defined(WANT_PAGE_VIRTUAL) + pr_info(" kernel virtual address 0x%px\n", page->virtual); +#else /* ! WANT_PAGE_VIRTUAL */ + pr_info(" kernel virtual address 0x%px\n", page_address(page)); +#endif /* WANT_PAGE_VIRTUAL */ +#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS + pr_info(" debug flags 0x%016lx\n", page->debug_flags); +#endif +#if defined(CONFIG_E2K) && defined(CONFIG_VIRTUALIZATION) + pr_info(" kvm %px gfn 0x%lx user mapps %d\n", + page->kvm, page->gfn, atomic_read(&page->user_maps)); +#endif /* CONFIG_E2K && CONFIG_VIRTUALIZATION */ +} +#else /* ! DUMP_PAGE_STRUCT */ +void dump_page_struct(struct page *page) +{ +} +#endif /* DUMP_PAGE_STRUCT */ + +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) +{ + /* + * memslots->generation has been incremented. + * mmio generation may have reached its maximum value. + */ + kvm_mmu_invalidate_mmio_sptes(kvm, gen); +} + +void kvm_arch_sync_events(struct kvm *kvm) +{ + DebugKVM("started\n"); +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + DebugKVMSH("%s (%d) started\n", current->comm, current->pid); + + if (current_thread_info()->virt_machine == NULL) + current_thread_info()->virt_machine = kvm; +#ifdef KVM_CAP_DEVICE_ASSIGNMENT + kvm_free_all_assigned_devices(kvm); +#endif + + kvm_free_all_VIRQs(kvm); + kvm_free_all_spmc(kvm); + kvm_free_all_lt(kvm); + /* + * Halting VCPU frees runstate, used by kvm timers. + * So PIC, LT, SPMC should be freed first + * FIXME: PIC is currently freed later, in kvm_arch_free_all_vcpus() + */ + kvm_arch_release_all_vcpus(kvm); + kvm_halt_all_host_vcpus(kvm); + kvm_free_host_info(kvm); + kvm_nbsr_destroy(kvm); + kvm_iopic_release(kvm); + kvm_free_passthrough(kvm); + kvm_free_epic_pages(kvm); + if (kvm->arch.is_pv) { + kvm_guest_pv_mm_destroy(kvm); + } + kvm_boot_spinlock_destroy(kvm); + kvm_guest_spinlock_destroy(kvm); + kvm_guest_csd_lock_destroy(kvm); + if (kvm->arch.is_pv) { + kvm_pv_guest_thread_info_destroy(kvm); + } + kvm_arch_free_all_vcpus(kvm); + kvm_mmu_uninit_vm(kvm); + kvm_page_track_cleanup(kvm); + kvm_free_vmid(kvm); + current_thread_info()->virt_machine = NULL; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu, bool schedule) +{ + unsigned long flags; + + DebugKVMRUN("started on VCPU %d\n", vcpu->vcpu_id); + trace_vcpu_put(vcpu->vcpu_id, vcpu->cpu); + set_bit(KVM_REQ_KICK, (void *) &vcpu->requests); + + local_irq_save(flags); + if (vcpu->arch.is_hv) + machine.save_kvm_context(&vcpu->arch); + + if (!schedule) { + machine.save_gregs_dirty_bgr(&vcpu->arch.sw_ctxt.vcpu_gregs); + copy_k_gregs_to_k_gregs( + &vcpu->arch.sw_ctxt.vcpu_k_gregs, + ¤t_thread_info()->k_gregs); + machine.restore_gregs(&vcpu->arch.sw_ctxt.host_gregs); + copy_k_gregs_to_k_gregs( + ¤t_thread_info()->k_gregs, + &vcpu->arch.sw_ctxt.host_k_gregs); + if (vcpu->arch.is_hv) { + ; + } else if (vcpu->arch.is_pv) { + /* switch VCPU guset context to host context */ + pv_vcpu_exit_to_host(vcpu); + } else { + KVM_BUG_ON(true); + } + } + local_irq_restore(flags); + + current_thread_info()->vcpu = NULL; +} + +DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu) = NULL; + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool schedule) +{ + int last_cpu = vcpu->cpu; + unsigned long flags; + + DebugKVMRUN("started on VCPU %d CPU %d\n", vcpu->vcpu_id, cpu); + + current_thread_info()->vcpu = vcpu; + vcpu->cpu = cpu; + trace_vcpu_load(vcpu->vcpu_id, last_cpu, cpu); + clear_bit(KVM_REQ_KICK, (void *) &vcpu->requests); + + local_irq_save(flags); + if (cpu != last_cpu || per_cpu(last_vcpu, cpu) != vcpu) { + /* bug 113981 comment 18: flush TLB/IB when moving + * to a new CPU to fix problems with GID reuse. + * + * bug 106525 comment 3: flush TLB/IB when changing + * VCPU on a real CPU, as MMU PIDs are per-cpu. */ + __flush_tlb_all(); + __flush_icache_all(); + } + per_cpu(last_vcpu, cpu) = vcpu; + + if (vcpu->arch.is_hv) + machine.restore_kvm_context(&vcpu->arch); + + if (!schedule) { + machine.save_gregs_dirty_bgr(&vcpu->arch.sw_ctxt.host_gregs); + copy_k_gregs_to_k_gregs( + &vcpu->arch.sw_ctxt.host_k_gregs, + ¤t_thread_info()->k_gregs); + machine.restore_gregs(&vcpu->arch.sw_ctxt.vcpu_gregs); + copy_k_gregs_to_k_gregs( + ¤t_thread_info()->k_gregs, + &vcpu->arch.sw_ctxt.vcpu_k_gregs); + if (vcpu->arch.is_hv) { + ; + } else if (vcpu->arch.is_pv) { + /* switch VCPU host context to guest context */ + pv_vcpu_enter_to_guest(vcpu); + } else { + KVM_BUG_ON(true); + } + } + local_irq_restore(flags); +} +void kvm_arch_vcpu_to_wait(struct kvm_vcpu *vcpu) +{ + clear_bit(KVM_REQ_KICK, (void *) &vcpu->requests); +} +void kvm_arch_vcpu_to_run(struct kvm_vcpu *vcpu) +{ + set_bit(KVM_REQ_KICK, (void *) &vcpu->requests); +} +static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, + struct kvm_lapic_state *s) +{ + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); + return 0; +} + +static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, + struct kvm_lapic_state *s) +{ + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); + return 0; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + DebugUNIMPL("started for VCPU %d\n", vcpu->vcpu_id); + DebugUNIMPL("does not implemented\n"); + return 0; +} + +static void kvm_arch_vcpu_release(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("started for VCPU %d\n", vcpu->vcpu_id); + + kvm_init_guest_lapic_virqs_num(vcpu); + kvm_cancel_clockdev(vcpu); + free_vcpu_state(vcpu); + if (!vcpu->arch.is_hv) { + complete(&vcpu->arch.released); + } +} +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("started for VCPU %d\n", vcpu->vcpu_id); + + kvm_arch_vcpu_destroy(vcpu); +} + +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("started for VCPU %d\n", vcpu->vcpu_id); + + vcpu->arch.halted = true; + kvm_arch_pv_vcpu_uninit(vcpu); + kvm_arch_vcpu_ctxt_uninit(vcpu); + kvm_arch_hv_vcpu_uninit(vcpu); + kvm_arch_any_vcpu_uninit(vcpu); + /* free hypervisor backup hardware stacks */ + free_kernel_backup_stacks(&vcpu->arch.hypv_backup); + /* free VCPU booting stacks */ + free_vcpu_boot_stacks(vcpu); + destroy_vcpu_host_context(vcpu); + kvm_free_local_pic(vcpu); + kvm_mmu_destroy(vcpu); +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + long r; + struct kvm_lapic_state *lapic = NULL; + + DebugKVM("started for VCPU %d ioctl 0x%x\n", vcpu->vcpu_id, ioctl); + switch (ioctl) { + case KVM_SETUP_VCPU: + DebugKVM("ioctl is KVM_SETUP_VCPU\n"); + r = kvm_arch_ioctl_setup_vcpu(vcpu); + break; + case KVM_VCPU_GUEST_STARTUP: { + kvm_vcpu_guest_startup_t __user *guest_startup; + + DebugKVM("ioctl is KVM_VCPU_GUEST_STARTUP\n"); + guest_startup = (kvm_vcpu_guest_startup_t __user *)argp; + r = kvm_arch_ioctl_vcpu_guest_startup(vcpu, guest_startup); + break; + } + case KVM_INTERRUPT: { + struct kvm_interrupt irq; + + DebugKVM("ioctl is KVM_INTERRUPT\n"); + r = -EFAULT; + if (copy_from_user(&irq, argp, sizeof irq)) + goto out; + r = kvm_vcpu_ioctl_interrupt(vcpu, irq.irq); + if (r) + goto out; + r = 0; + break; + } + case KVM_GET_LAPIC: { + DebugKVM("ioctl is KVM_GET_LAPIC\n"); + r = -EINVAL; + if (!vcpu->arch.apic) + goto out; + lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); + + r = -ENOMEM; + if (!lapic) + goto out; + r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic); + if (r) + goto out; + r = -EFAULT; + if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state))) + goto out; + r = 0; + break; + } + case KVM_SET_LAPIC: { + r = -EINVAL; + DebugKVM("ioctl is KVM_SET_LAPIC\n"); + if (!vcpu->arch.apic) + goto out; + lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); + r = -ENOMEM; + if (!lapic) + goto out; + r = -EFAULT; + if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state))) + goto out; + r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic); + if (r) + goto out; + r = 0; + break; + } + case KVM_SET_VAPIC_ADDR: { + struct kvm_vapic_addr va; + DebugKVM("ioctl is KVM_SET_VAPIC_ADDR\n"); + + r = -EINVAL; + if (!irqchip_in_kernel(vcpu->kvm)) + goto out; + r = -EFAULT; + if (copy_from_user(&va, argp, sizeof va)) + goto out; + r = 0; + kvm_pic_set_vapic_addr(vcpu, va.vapic_addr); + break; + } + default: + r = -EINVAL; + } +out: + return r; +} + +static unsigned int +calculate_memory_region_flags(struct kvm_memory_slot *memslot, + unsigned int flags) +{ + gfn_t base_gfn = memslot->base_gfn; + e2k_addr_t user_addr = memslot->userspace_addr; + + if (flags & KVM_MEM_TYPE_MASK) { + /* flags of memory type is already set */ + goto out; + } + if (likely(kvm_is_ram_gfn(base_gfn))) { + if (user_addr >= GUEST_RAM_VIRT_BASE && + user_addr < GUEST_RAM_VIRT_BASE + + GUEST_MAX_RAM_SIZE) + /* it is guest kernel virtual address */ + /* to map physical RAM */ + flags = KVM_MEM_VCPU_RAM; + else + /* it is user application of host (QEMU) address */ + /* to map IO-MEM (probably to emulate frame buffer */ + flags = KVM_MEM_USER_RAM; + } else if (kvm_is_vcpu_vram_gfn(base_gfn)) { + if (user_addr >= GUEST_VCPU_VRAM_VIRT_BASE && + user_addr < GUEST_VCPU_VRAM_VIRT_BASE + + GUEST_MAX_VCPU_VRAM_SIZE) + /* it is guest kernel virtual address to map VRAM */ + flags = KVM_MEM_VCPU_VRAM; + else + /* it is unknown address to map VRAM */ + BUG_ON(true); + } else if (kvm_is_io_vram_gfn(base_gfn)) { + if (user_addr >= GUEST_IO_VRAM_VIRT_BASE && + user_addr < GUEST_IO_VRAM_VIRT_BASE + + GUEST_IO_VRAM_SIZE) + /* it is guest kernel virtual address to map IO-VRAM */ + flags = KVM_MEM_IO_VRAM; + else + /* it is user application of host (QEMU) address */ + /* to map IO-VRAM (probably to emulate frame buffer */ + flags = KVM_MEM_USER_RAM; + } else { + /* it is unknown guest physical page # */ + BUG_ON(true); + } + +out: + return flags & KVM_MEM_TYPE_MASK; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + int slot = memslot->id; + unsigned long guest_size = mem->memory_size; + int npages = guest_size >> PAGE_SHIFT; + unsigned int flags = mem->flags; + gfn_t base_gfn = memslot->base_gfn; + unsigned long guest_start = memslot->userspace_addr; + unsigned long guest_end = guest_start + (npages << PAGE_SHIFT); + user_area_t *guest_area = NULL; + int node_id; + + DebugKVM("slot %d: base pfn 0x%llx guest virtual from 0x%lx to 0x%lx\n", + slot, base_gfn, guest_start, guest_end); + + if ((flags & KVM_MEM_TYPE_MASK) == 0) + flags |= calculate_memory_region_flags(memslot, flags); + if (memslot->userspace_addr == 0) { + printk(KERN_ERR "kvm_arch_set_memory_region() slot %d: base " + "gfn 0x%llx size 0x%x pages is not allocated by user " + "and cannot be used\n", + slot, base_gfn, npages); + return -ENOENT; + } + if (flags & KVM_MEM_IO_VRAM) { + memslot->arch.guest_areas.type = guest_io_vram_mem_type; + DebugKVM("memory region from 0x%lx to 0x%lx is IO-VRAM\n", + guest_start, guest_end); + } else if (flags & KVM_MEM_VCPU_VRAM) { + memslot->arch.guest_areas.type = guest_vram_mem_type; + DebugKVM("memory region from 0x%lx to 0x%lx is VRAM\n", + guest_start, guest_end); + } else if (flags & KVM_MEM_USER_RAM) { + memslot->arch.guest_areas.type = guest_user_ram_mem_type; + DebugKVM("memory region from 0x%lx to 0x%lx is USER-RAM\n", + guest_start, guest_end); + } else if (flags & KVM_MEM_VCPU_RAM) { + memslot->arch.guest_areas.type = guest_ram_mem_type; + DebugKVM("memory region from 0x%lx to 0x%lx is guest RAM\n", + guest_start, guest_end); + } else { + BUG_ON(true); + } + + if (change == KVM_MR_DELETE) { + DebugKVM("memory region should be deleted (some later)\n"); + return 0; + } else if (change == KVM_MR_FLAGS_ONLY) { + DebugKVM("should be changed only flags of region: " + "is not implemented\n"); + return -EINVAL; + } else if (change == KVM_MR_MOVE) { + DebugKVM("memory region should be moved\n"); + } else if (change == KVM_MR_CREATE) { + DebugKVM("memory region should be created\n"); + } else { + DebugKVM("unknown operation %d for memory region\n", + change); + return -EINVAL; + } + + guest_area = memslot->arch.guest_areas.area; + + /* KVM_MEM_USER_RAM is used by VFIO for mapping PCI BARs for guest */ + if (flags & KVM_MEM_IO_VRAM) { + DebugKVM("guest area support for this type of memory is not " + "used, so do not create\n"); + BUG_ON(guest_area != NULL); + goto out; + } + if (guest_area == NULL) { + guest_area = user_area_create(guest_start, guest_size, + USER_AREA_ORDERED); + if (guest_area == NULL) { + printk(KERN_ERR "kvm_arch_set_memory_region() slot %d: " + "base gfn 0x%llx guest virtual from 0x%lx " + "to 0x%lx could not create guest area " + "support\n", + slot, base_gfn, guest_start, guest_end); + return -ENOENT; + } + memslot->arch.guest_areas.area = guest_area; + DebugKVM("created guest area support at %px from 0x%lx " + "to 0x%lx\n", + guest_area, + guest_area->area_start, guest_area->area_end); + + if (flags & KVM_MEM_VCPU_RAM) { + for (node_id = 0; node_id < kvm->arch.num_numa_nodes; + node_id++) { + nbsr_setup_memory_region(kvm->arch.nbsr, + node_id, gfn_to_gpa(base_gfn), + guest_size); + DebugKVM("setup NBSR routers for node #%d " + "memory region from 0x%llx to 0x%llx\n", + node_id, gfn_to_gpa(base_gfn), + gfn_to_gpa(base_gfn) + guest_size); + } + } + } else { + DebugKVM("guest area support was already created " + "at %px from 0x%lx to 0x%lx\n", + guest_area, + guest_area->area_start, guest_area->area_end); + } +out: + return 0; +} + +int kvm_gva_to_memslot_unaliased(struct kvm *kvm, gva_t gva) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + + DebugKVMPF("started for guest addr 0x%lx\n", gva); + kvm_for_each_memslot(memslot, slots) { + DebugKVMPF("current slot #%d base addr 0x%lx size 0x%lx\n", + memslot->id, memslot->userspace_addr, + memslot->npages << PAGE_SHIFT); + if (gva >= memslot->userspace_addr && + gva < memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)) + return memslot->id; + } + DebugKVMPF("guest addres 0x%lx not found\n", gva); + return -1; +} + +static int gva_to_alias_slot(struct kvm *kvm, gva_t gva) +{ + int i; + + DebugKVMPF("started for guest addr 0x%lx\n", gva); + for (i = 0; i < kvm->arch.naliases; ++i) { + kvm_mem_alias_t *alias_slot = &kvm->arch.aliases[i]; + unsigned long alias_start = alias_slot->alias_start; + + DebugKVMPF("current slot #%d start addr 0x%lx end 0x%lx\n", + i, alias_start, + alias_start + (alias_slot->npages << PAGE_SHIFT)); + if (gva >= alias_start && + gva < alias_start + (alias_slot->npages << PAGE_SHIFT)) + return i; + } + DebugKVMPF("guest addres 0x%lx not found\n", gva); + return -1; +} + +static gva_t kvm_unalias_gva(struct kvm *kvm, gva_t gva) +{ + kvm_mem_alias_t *alias; + int slot; + + DebugKVMPF("started for guest addr 0x%lx\n", gva); + + slot = gva_to_alias_slot(kvm, gva); + if (slot < 0) { + DebugKVMPF("could not find alias slot for address 0x%lx\n", + gva); + return gva; + } + alias = &kvm->arch.aliases[slot]; + return alias->target_start + (gva - alias->alias_start); +} +/* + * convert guest virtual address to guest virtual physical address: + * GUEST_PAGE_OFFSET + gfn(gva) + */ +gva_t kvm_gva_to_gpa(struct kvm *kvm, gva_t gva) +{ + int slot; + gva_t gpa; + + DebugKVMPF("started for guest addr 0x%lx\n", gva); + + gpa = kvm_unalias_gva(kvm, gva); + slot = kvm_gva_to_memslot_unaliased(kvm, gpa); + if (slot < 0) { + DebugKVMPF("could not find memory slot for address 0x%lx\n", + gpa); + return (gva_t)-1; + } + DebugKVMPF("guest virtual address 0x%lx is virtual physical " + "address 0x%lx\n", gva, gpa); + return gpa; +} +gpa_t kvm_vcpu_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, u32 access, + kvm_arch_exception_t *exception) +{ + gva_t gvpa = kvm_gva_to_gpa(vcpu->kvm, gva); + + if (gvpa == (gva_t)-1) + return UNMAPPED_GVA; + return kvm_mmu_gvpa_to_gpa(gvpa); +} + +static user_area_t *kvm_do_find_memory_region(struct kvm *kvm, + int slot, e2k_addr_t address, e2k_size_t size, + bool phys_mem, e2k_addr_t *virt_address, + kvm_guest_mem_type_t type) +{ + struct kvm_memory_slot *memslot; + user_area_t *guest_area; + kvm_guest_mem_type_t guest_type; + gpa_t base_gpa; + e2k_size_t area_size; + int id, as_id, as_id_from, as_id_to; + + DebugKVM("started for slot %d address 0x%lx size 0x%lx type %s\n", + slot, address, size, + (type & guest_vram_mem_type) ? "VRAM" : "RAM"); + if (slot >= 0) { + if (slot >= KVM_USER_MEM_SLOTS) { + DebugKVM("slot %d is outside of slots number %d\n", + slot, KVM_USER_MEM_SLOTS); + return NULL; + } + as_id = slot >> 16; + id = (u16)slot; + as_id_from = as_id; + as_id_to = as_id; + } else { + id = -1; + as_id_from = 0; + as_id_to = KVM_ADDRESS_SPACE_NUM - 1; + } + if (type == 0) + type = guest_ram_mem_type; + + for (as_id = as_id_from; as_id <= as_id_to; as_id++) { + kvm_for_each_memslot(memslot, __kvm_memslots(kvm, as_id)) { + if ((id >= 0) && (id != memslot->id)) { + DebugKVM("slot %d is not slot to find %d\n", + memslot->id, id); + continue; + } + if (memslot->arch.guest_areas.area == NULL) { + DebugKVM("slot %d is empty\n", memslot->id); + continue; + } + guest_type = memslot->arch.guest_areas.type; + if ((guest_type & type) == 0) { + DebugKVM("slot %d has other memory type " + "0x%x != 0x%x to find\n", + memslot->id, guest_type, type); + continue; + } + guest_area = memslot->arch.guest_areas.area; + if (phys_mem) { + base_gpa = gfn_to_gpa(memslot->base_gfn); + area_size = guest_area->area_end - + guest_area->area_start; + if (address < base_gpa || + address + size > + base_gpa + area_size) { + DebugKVM("start phys address 0x%lx " + "or end 0x%lx is outside of " + "slot #%d region from 0x%llx " + "to 0x%llx\n", + address, address + size, + memslot->id, + base_gpa, + base_gpa + area_size); + continue; + } + /* convert physical address to virtual */ + /* address of area */ + address = guest_area->area_start + + (address - base_gpa); + } + if (address != 0 && + (address < guest_area->area_start || + address >= guest_area->area_end)) { + DebugKVM("address 0x%lx outside of slot #%d " + "region from 0x%lx to 0x%lx\n", + address, memslot->id, + guest_area->area_start, + guest_area->area_end); + continue; + } + if (size > guest_area->area_end - + guest_area->area_start) { + DebugKVM("size 0x%lx of slot #%d < memory " + "region size 0x%lx to find\n", + size, memslot->id, + guest_area->area_end - + guest_area->area_start); + continue; + } + DebugKVM("found memory region from 0x%lx to 0x%lx " + "at slot #%d\n", + guest_area->area_start, guest_area->area_end, + memslot->id); + if (phys_mem && virt_address != NULL) + *virt_address = address; + return guest_area; + } + } + DebugKVM("could not find any suitable memory slot\n"); + return NULL; +} + +static user_area_t *kvm_find_memory_region(struct kvm *kvm, + int slot, e2k_addr_t address, e2k_size_t size, + kvm_guest_mem_type_t type) +{ + return kvm_do_find_memory_region(kvm, slot, address, size, + false, /* phys memory ? */ NULL, type); +} + +static user_area_t *kvm_find_phys_memory_region(struct kvm *kvm, + int slot, gpa_t gpa, e2k_size_t size, + e2k_addr_t *virt_address, kvm_guest_mem_type_t type) +{ + return kvm_do_find_memory_region(kvm, slot, gpa, size, + true, /* phys memory ? */ virt_address, type); +} + +int kvm_find_shadow_slot(struct kvm *kvm, int slot, e2k_addr_t kernel_addr, + gva_t shadow_addr) +{ + e2k_addr_t kernel_index; + gva_t shadow_index; + int i; + + DebugKVMPVF("started for kernel addr 0x%lx, guest shadow addr 0x%lx, " + "slot %d\n", + kernel_addr, shadow_addr, slot); + kernel_index = kernel_addr & PGDIR_MASK; + shadow_index = shadow_addr & PGDIR_MASK; + for (i = slot; i < kvm->arch.nshadows; i++) { + kvm_kernel_shadow_t *shadow = &kvm->arch.shadows[i]; + e2k_addr_t kernel_base = shadow->kernel_start; + gva_t shadow_base = shadow->shadow_start; + + DebugKVMPVF("current slot #%d kernel base 0x%lx, shadow base " + "0x%lx, size 0x%lx\n", + i, kernel_base, shadow_base, shadow->area_size); + if (kernel_index == (kernel_base & PGDIR_MASK)) { + DebugKVMPVF("kernel index found at slot %d\n", i); + return i; + } + if (shadow_index == (shadow_base & PGDIR_MASK)) { + DebugKVMPVF("shadow index found at slot %d\n", i); + return i; + } + } + DebugKVMPVF("guest shadow not found\n"); + return -1; +} + +static int find_shadow_intersection(struct kvm *kvm, e2k_addr_t kernel_base, + gva_t shadow_base, e2k_size_t area_size) +{ + e2k_addr_t kernel_index; + e2k_addr_t shadow_index; + int slot; + + DebugKVM("started for kernel base 0x%lx, guest shadow base 0x%lx, " + "size 0x%lx\n", kernel_base, shadow_base, area_size); + kernel_index = kernel_base & PGDIR_MASK; + shadow_index = shadow_base & PGDIR_MASK; + slot = kvm_find_shadow_slot(kvm, 0, kernel_base, shadow_base); + while (slot >= 0) { + kvm_kernel_shadow_t *shadow = &kvm->arch.shadows[slot]; + e2k_addr_t kernel_start = shadow->kernel_start; + e2k_addr_t shadow_start = shadow->shadow_start; + e2k_size_t size = shadow->area_size; + + DebugKVM("shadow address find at slot %d: kernel start " + "0x%lx shadow 0x%lx, size 0x%lx\n", + slot, kernel_start, shadow_start, size); + if (shadow_base >= shadow_start && + shadow_base < shadow_start + size) { + DebugKVM("shadow address intersection\n"); + return 1; + } + if (kernel_base >= kernel_start && + kernel_base < kernel_start + size) { + DebugKVM("kernel address intersection\n"); + return 1; + } + if (kernel_index != (kernel_start & PGDIR_MASK) || + shadow_index != (shadow_start & PGDIR_MASK)) { + DebugKVM("different PGD lines\n"); + return 1; + } + slot++; + slot = kvm_find_shadow_slot(kvm, slot, kernel_base, + shadow_base); + } + return 0; +} + +static long kvm_arch_ioctl_alloc_guest_area(struct kvm *kvm, + kvm_guest_area_alloc_t __user *what) +{ + kvm_guest_area_alloc_t guest_chunk; + user_area_t *guest_area; + e2k_addr_t region_addr; + e2k_addr_t size; + kvm_guest_mem_type_t type; + bool phys_mem = false; + unsigned long flags; + void *chunk; + int ret = 0; + + if (copy_from_user(&guest_chunk, what, sizeof(guest_chunk))) { + DebugKVM("copy to %px from user %px failed\n", + &guest_chunk, what); + return -EFAULT; + } + DebugKVM("started for region %px, start 0x%lx, size 0x%lx type %s " + "align 0x%lx\n", + guest_chunk.region, guest_chunk.start, guest_chunk.size, + (guest_chunk.type & guest_vram_mem_type) ? "VRAM" : "RAM", + guest_chunk.align); + size = guest_chunk.size; + type = guest_chunk.type; + if (type == 0) + type = guest_ram_mem_type; + if (type & guest_ram_mem_type) { + if (test_kvm_mode_flag(kvm, KVMF_VCPU_STARTED)) { + /* VCPUs started and RAM is now allocated by */ + /* only guest kernel */ + type &= ~guest_ram_mem_type; + if (type == 0) + return -ENOMEM; + } + } + if (guest_chunk.region != NULL) { + region_addr = (e2k_addr_t)guest_chunk.region; + } else if (guest_chunk.start != 0) { + region_addr = guest_chunk.start; + phys_mem = true; + } else { + region_addr = 0; + } + /* FIXME: mutex cannot be locked here, because of following */ + /* user_alloc_xxx() functions take this mutex too. */ + /* Now memory slots only are created and deleted and not updated, */ + /* so guest_area cannot be updated by someone else and slots_lock */ + /* mutex can be not locked */ + /* But some sychronization should be made to use of memory balloon */ + /* functionality. Probably it can be get_xxx()/put_xxx() -> */ + /* free_xxx() type mechanism to lock guest_area & memory slot */ + /* updates */ +/* mutex_lock(&kvm->slots_lock); */ + if (!phys_mem) { + guest_area = kvm_find_memory_region(kvm, -1, region_addr, + size, type); + } else { + guest_area = kvm_find_phys_memory_region(kvm, -1, region_addr, + size, &guest_chunk.start, type); + } + if (guest_area == NULL) { + DebugKVM("could not find memory region for address 0x%lx\n", + region_addr); + ret = -EINVAL; + goto out_unlock; + } + flags = guest_chunk.flags; + if (flags & KVM_ALLOC_AREA_PRESENT) { + chunk = user_area_alloc_present(guest_area, guest_chunk.start, + guest_chunk.size, guest_chunk.align, flags); + } else if (flags & KVM_ALLOC_AREA_ZEROED) { + chunk = user_area_alloc_zeroed(guest_area, guest_chunk.start, + guest_chunk.size, guest_chunk.align, flags); + } else if (flags & KVM_ALLOC_AREA_LOCKED) { + chunk = user_area_alloc_locked(guest_area, guest_chunk.start, + guest_chunk.size, guest_chunk.align, flags); + } else { + chunk = user_area_get(guest_area, guest_chunk.start, + guest_chunk.size, guest_chunk.align, flags); + } + if (chunk == NULL) { + DebugKVM("could not allocate guest area size of 0x%lx\n", + guest_chunk.size); + ret = -ENOMEM; + goto out_unlock; + } + DebugKVM("allocated guest area from %px, size of 0x%lx\n", + chunk, guest_chunk.size); + guest_chunk.area = chunk; + if (copy_to_user(what, &guest_chunk, sizeof(guest_chunk))) { + DebugKVM("copy from %px to user %px failed\n", + what, &guest_chunk); + user_area_free_chunk(guest_area, chunk); + ret = -EFAULT; + goto out_unlock; + } +out_unlock: +/* mutex_unlock(&kvm->slots_lock); see FIXME above */ + return ret; +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + DebugKVM("started\n"); + kvm_flush_remote_tlbs(kvm); + kvm_mmu_invalidate_zap_all_pages(kvm); +} + +void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) +{ + KVM_BUG_ON(vcpu->cpu < 0); + + if (!vcpu->arch.is_hv && vcpu->cpu != cpu) + pv_vcpu_switch_kernel_pgd_range(vcpu, cpu); +} + +long kvm_arch_ioctl_get_guest_address(unsigned long __user *addr) +{ + unsigned long address = -1; + long r; + + r = get_user(address, addr); + if (r) { + DebugKVM("get_user() failed for user address 0x%lx\n", addr); + return r; + } + DebugKVM("started for address 0x%lx\n", addr); + switch (address) { + case KVM_GUEST_PAGE_OFFSET: + DebugKVM("address is KVM_GUEST_PAGE_OFFSET\n"); + address = GUEST_PAGE_OFFSET; + break; + case KVM_GUEST_KERNEL_IMAGE_BASE: + DebugKVM("address is KVM_GUEST_KERNEL_IMAGE_BASE\n"); + address = GUEST_KERNEL_IMAGE_AREA_BASE; + break; + case KVM_GUEST_VCPU_VRAM_PHYS_BASE: + DebugKVM("address is KVM_GUEST_VCPU_VRAM_PHYS_BASE\n"); + address = GUEST_VCPU_VRAM_PHYS_BASE; + break; + case KVM_GUEST_VCPU_VRAM_VIRT_BASE: + DebugKVM("address is KVM_GUEST_VCPU_VRAM_VIRT_BASE\n"); + address = GUEST_VCPU_VRAM_VIRT_BASE; + break; + case KVM_GUEST_VCPU_VRAM_SIZE: + DebugKVM("address is KVM_GUEST_VCPU_VRAM_SIZE\n"); + address = GUEST_ONE_VCPU_VRAM_SIZE; + break; + case KVM_GUEST_IO_VRAM_PHYS_BASE: + DebugKVM("address is KVM_GUEST_IO_VRAM_PHYS_BASE\n"); + address = GUEST_IO_VRAM_PHYS_BASE; + break; + case KVM_GUEST_IO_VRAM_VIRT_BASE: + DebugKVM("address is KVM_GUEST_IO_VRAM_VIRT_BASE\n"); + address = GUEST_IO_VRAM_VIRT_BASE; + break; + case KVM_GUEST_IO_VRAM_SIZE: + DebugKVM("address is KVM_GUEST_IO_VRAM_SIZE\n"); + address = GUEST_IO_VRAM_SIZE; + break; + case KVM_GUEST_IO_PORTS_BASE: + DebugKVM("address is KVM_GUEST_IO_PORTS_BASE\n"); + address = GUEST_IO_PORTS_VIRT_BASE; + break; + case KVM_GUEST_NBSR_BASE_NODE_0: + DebugKVM("address is KVM_GUEST_NBSR_BASE_NODE_0\n"); + address = (unsigned long)THE_NODE_NBSR_PHYS_BASE(0); + break; + case KVM_GUEST_NBSR_BASE_NODE_1: + DebugKVM("address is KVM_GUEST_NBSR_BASE_NODE_1\n"); + address = (unsigned long)THE_NODE_NBSR_PHYS_BASE(1); + break; + case KVM_GUEST_NBSR_BASE_NODE_2: + DebugKVM("address is KVM_GUEST_NBSR_BASE_NODE_2\n"); + address = (unsigned long)THE_NODE_NBSR_PHYS_BASE(2); + break; + case KVM_GUEST_NBSR_BASE_NODE_3: + DebugKVM("address is KVM_GUEST_NBSR_BASE_NODE_3\n"); + address = (unsigned long)THE_NODE_NBSR_PHYS_BASE(3); + break; + case KVM_HOST_PAGE_OFFSET: + DebugKVM("address is KVM_HOST_PAGE_OFFSET\n"); + address = HOST_PAGE_OFFSET; + break; + case KVM_HOST_KERNEL_IMAGE_BASE: + DebugKVM("address is KVM_HOST_KERNEL_IMAGE_BASE\n"); + address = HOST_KERNEL_IMAGE_AREA_BASE; + break; + case KVM_KERNEL_AREAS_SIZE: + DebugKVM("address is KVM_KERNEL_AREAS_SIZE\n"); + address = E2K_KERNEL_AREAS_SIZE; + break; + case KVM_SHADOW_KERNEL_IMAGE_BASE: + DebugKVM("address is KVM_SHADOW_KERNEL_IMAGE_BASE\n"); + address = SHADOW_KERNEL_IMAGE_AREA_BASE; + break; + default: + DebugKVM("ioctl is unsupported\n"); + return -EINVAL; + } + DebugKVM("returns address 0x%lx\n", address); + r = put_user(address, addr); + if (r) { + DebugKVM("put_user() failed for user address 0x%lx\n", addr); + } + DebugKVM("returns with value %ld\n", r); + return r; +} + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + long r; + + DebugKVM("started for ioctl 0x%x\n", ioctl); + switch (ioctl) { + case KVM_GET_GUEST_ADDRESS: { + unsigned long __user *p = argp; + + DebugKVM("ioctl is KVM_GET_GUEST_ADDRESS\n"); + r = kvm_arch_ioctl_get_guest_address(p); + break; + } + default: + DebugKVM("ioctl is unsupported\n"); + r = -EINVAL; + } + DebugKVM("returns with value %ld\n", r); + return r; +} + +static bool cpu_has_kvm_support(void) +{ + if (kvm_is_guest_pv_vm()) + /* it is guest and it cannot support own virtual machines */ + return false; + +#ifdef CONFIG_KVM_PARAVIRTUALIZATION + /* software virtualization is enable */ + kvm_vm_types_available = KVM_E2K_SV_VM_TYPE_MASK; +#ifdef CONFIG_PARAVIRT_GUEST + /* hypervisor is paravirtualized host and guest kernel */ + kvm_vm_types_available |= KVM_E2K_SW_PV_VM_TYPE_MASK; +#endif /* CONFIG_PARAVIRT_GUEST */ +#endif /* CONFIG_KVM_PARAVIRTUALIZATION */ + +#ifdef CONFIG_KVM_HW_VIRTUALIZATION + if (kvm_is_hv_enable()) + kvm_vm_types_available |= KVM_E2K_HV_VM_TYPE_MASK; + + if (kvm_is_hw_pv_enable()) + kvm_vm_types_available |= KVM_E2K_HW_PV_VM_TYPE_MASK; +#endif /* CONFIG_KVM_HW_VIRTUALIZATION */ + + return kvm_vm_types_available != 0; +} + +static inline bool cpu_virt_disabled(void) +{ + /* paravirtualization is enable at any case */ + /* hardware virtualization prohibition will be checked while creation */ + /* fully virtualized guest machines */ + return false; +} + +struct work_struct kvm_dump_stacks; /* to schedule work to dump */ + /* guest VCPU stacks */ +int kvm_arch_init(void *opaque) +{ + int err; + + DebugKVM("started\n"); + + if (!cpu_has_kvm_support()) { + pr_err("KVM: no hardware and paravirtualization " + "support\n"); + return -EOPNOTSUPP; + } + + kvm_host_machine_setup(&machine); + user_area_caches_init(); + err = kvm_vmidmap_init(); + if (err) + goto out_free_caches; + + err = kvm_mmu_module_init(); + if (err) + goto out_free_vmidmap; + + INIT_WORK(&kvm_dump_stacks, &wait_for_print_all_guest_stacks); + + return 0; + +out_free_vmidmap: + kvm_vmidmap_destroy(); +out_free_caches: + user_area_caches_destroy(); + return err; +} + +void kvm_arch_exit(void) +{ + DebugKVM("started\n"); + kvm_mmu_module_exit(); + user_area_caches_destroy(); + kvm_vmidmap_destroy(); +} + +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +{ + int r; + int n; + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + int is_dirty = 0; + + DebugKVM("started\n"); + mutex_lock(&kvm->slots_lock); + + r = kvm_get_dirty_log(kvm, log, &is_dirty); + if (r) + goto out; + + /* If nothing is dirty, don't bother messing with page tables. */ + if (is_dirty) { + kvm_flush_remote_tlbs(kvm); + memslot = id_to_memslot(slots, log->slot); + n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + memset(memslot->dirty_bitmap, 0, n); + } + r = 0; +out: + mutex_unlock(&kvm->slots_lock); + return r; +} + +int kvm_arch_hardware_setup(void) +{ + DebugKVM("started\n"); + return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ + DebugKVM("started\n"); +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return 1; +} + +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + +gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) +{ + DebugKVMPF("started for guest pfn 0x%llx\n", gfn); + return gfn; +} + +/* This is called either from another vcpu (CEPIC DAT is not active) + * or from current VCPU but inside of kvm_arch_vcpu_[un]blocking() pair + * (DAT is again inactive). */ +bool kvm_vcpu_has_epic_interrupts(const struct kvm_vcpu *vcpu) +{ + epic_page_t *cepic = vcpu->arch.hw_ctxt.cepic; + + /* Check mi_gst by reading CEPIC_CIR.stat and PMIRR */ + if (!vcpu->arch.hcall_irqs_disabled) { + if (cepic->cir.bits.stat) + return true; + + if (unlikely(epic_bgi_mode)) { + if (memchr_inv(cepic->pmirr_byte, 0, sizeof(cepic->pmirr_byte) + + __must_be_array(cepic->pmirr_byte))) + return true; + } else { + if (memchr_inv(cepic->pmirr, 0, sizeof(cepic->pmirr) + + __must_be_array(cepic->pmirr))) + return true; + } + } + + /* Check nmi_gst by reading CEPIC_PNMIRR */ + if (cepic->pnmirr.counter & CEPIC_PNMIRR_BIT_MASK) + return true; + + return false; +} + +/* This is called from kvm_vcpu_block() -> kvm_vcpu_running(), + * so EPIC has been saved in kvm_arch_vcpu_blocking() already. + * See kvm_arch_vcpu_blocking() for details. + * + * Also this can be called from kvm_arch_dy_runnable(), in + * which case we also check values in memory. */ +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + DebugKVMRUN("started for VCPU %d\n", vcpu->vcpu_id); + return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE || + vcpu->arch.unhalted || kvm_vcpu_has_pic_interrupts(vcpu); +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + DebugKVMRUN("started for VCPU %d\n", vcpu->vcpu_id); + mp_state->mp_state = vcpu->arch.mp_state; + return 0; +} + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return false; +} + +static int vcpu_reset(struct kvm_vcpu *vcpu) +{ + int r; + + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + vcpu->arch.launched = 0; + kvm_arch_vcpu_uninit(vcpu); + r = kvm_arch_vcpu_init(vcpu); + if (r) + goto fail; + + r = 0; +fail: + return r; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + int r = 0; + + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + vcpu->arch.mp_state = mp_state->mp_state; + if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) + r = vcpu_reset(vcpu); + return r; +} + +struct kvm_e2k_info e2k_info = { + .module = THIS_MODULE, +}; + +static int __init kvm_e2k_init(void) +{ + DebugKVM("started\n"); + + if (paravirt_enabled()) + /* it is guest virtual machine and guest cannot have */ + /* own guests and be virtualized */ + return -ENOENT; + /*Register e2k VMM data to kvm side*/ + return kvm_init(&e2k_info, sizeof(struct kvm_vcpu), + __alignof__(struct kvm_vcpu), THIS_MODULE); +} + +static void __exit kvm_e2k_exit(void) +{ + DebugKVM("started\n"); + kvm_exit(); + return; +} +#else /* ! CONFIG_KVM_HOST_MODE */ +static int __init kvm_e2k_init(void) +{ + pr_info("support of KVM is OFF\n"); + return -ENOENT; +} + +static void __exit kvm_e2k_exit(void) +{ +} +#endif /* CONFIG_KVM_HOST_MODE */ + +bool kvm_debug = false; +static int __init kvm_set_debug(char *arg) +{ + kvm_debug = true; + return 0; +} +early_param("kvm_debug", kvm_set_debug); + +module_init(kvm_e2k_init) +module_exit(kvm_e2k_exit) diff --git a/arch/e2k/kvm/kvm_timer.h b/arch/e2k/kvm/kvm_timer.h new file mode 100644 index 000000000000..9e84af5200de --- /dev/null +++ b/arch/e2k/kvm/kvm_timer.h @@ -0,0 +1,57 @@ +#ifndef __KVM_E2K_TIMER_H +#define __KVM_E2K_TIMER_H + +#include + +typedef enum kvm_timer_type { + kvm_unknown_timer_type = 0, /* unknown timer type */ + kvm_sys_timer_type, /* lt system timer */ + kvm_wd_timer_type, /* lt watchdog timer */ + kvm_reset_timer_type, /* lt reset counter */ + kvm_power_timer_type, /* lt power counter */ + kvm_apic_timer_type, /* APIC local timer */ + kvm_epic_timer_type, /* CEPIC local timer */ + kvm_sci_timer_type, /* SPMC SCI timer */ +} kvm_timer_type_t; + +typedef enum kvm_timer_work { + kvm_unknown_timer_work = 0, /* unknown work */ + kvm_set_reset_irq_timer_work, /* generate and reset interrupt */ + kvm_set_irq_timer_work, /* generate interrupt */ + kvm_reset_irq_timer_work, /* reset interrupt */ + kvm_watchdog_reset_timer_work, /* reset system on watchdog */ +} kvm_timer_work_t; + +typedef struct kvm_timer { + const char *name; /* timer name */ + kvm_timer_type_t type; /* timer type (see above) */ + struct hrtimer timer; /* high resolution timer to emulate */ + /* timers counters */ + u64 start_count; /* counter value at the (re)start */ + /* moment of high resolution timer */ + s64 period; /* unit: ns */ + u64 period_start; /* counter value at the start of */ + /* current timer period */ + s64 running_time; /* value of VCPU running time at */ + /* moment of last timer setting */ + u64 host_start_ns; /* hrtimer start time on host */ + /* at nsecs */ + atomic_t pending; /* accumulated triggered timers */ + bool reinject; + bool started; /* timer is runing */ + bool hrtimer_started; /* hrtimer is started and is active */ + raw_spinlock_t lock; /* lock to update timer struct */ + kvm_timer_work_t work; /* work type on timer expires */ + struct kthread_worker *worker; /* kernel thread to handle timer */ + struct kthread_work expired; + const struct kvm_timer_ops *t_ops; + struct kvm *kvm; + struct kvm_vcpu *vcpu; +} kvm_timer_t; + +typedef struct kvm_timer_ops { + bool (*is_periodic)(struct kvm_timer *ktimer); + void (*timer_fn)(struct kvm_vcpu *vcpu, void *data); +} kvm_timer_ops_t; + +#endif /* __KVM_E2K_TIMER_H */ \ No newline at end of file diff --git a/arch/e2k/kvm/lapic.c b/arch/e2k/kvm/lapic.c new file mode 100644 index 000000000000..4b434fb588fd --- /dev/null +++ b/arch/e2k/kvm/lapic.c @@ -0,0 +1,1707 @@ + +/* + * Local APIC virtualization + * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. + * Based on arch/x86/kvm/lapic.c code + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG + +#include "pic.h" +#include "ioapic.h" +#include "irq.h" +#include "lapic.h" + +#define mod_64(x, y) ((x) % (y)) + +#define PRId64 "d" +#define PRIx64 "llx" +#define PRIu64 "u" +#define PRIo64 "o" + +#define APIC_BUS_CYCLE_NS 1 + +#ifdef DEBUG +#define apic_debug(fmt, arg...) pr_warning(fmt, ##arg) +#define apic_reg_debug(fmt, arg...) pr_warning(fmt, ##arg) +#else /* ! DEBUG */ +#define apic_debug(fmt, arg...) +#define apic_reg_debug(fmt, arg...) +#endif /* DEBUG */ + +#undef DEBUG_KVM_IRQ_MODE +#undef DebugKVMIRQ +#define DEBUG_KVM_IRQ_MODE 0 /* kernel APIC IRQs debugging */ +#define DebugKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VECTOR_MODE +#undef DebugKVMVEC +#define DEBUG_KVM_VECTOR_MODE 0 /* kernel APIC IRQs debugging */ +#define DebugKVMVEC(fmt, args...) \ +({ \ + if (DEBUG_KVM_VECTOR_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TIMER_MODE +#undef DebugKVMTM +#define DEBUG_KVM_TIMER_MODE 0 /* kernel apic timer debugging */ +#define DebugKVMTM(fmt, args...) \ +({ \ + if (DEBUG_KVM_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_TIMER_MODE +#undef DebugTM +#define DEBUG_TIMER_MODE 0 /* kernel local apic timer debugging */ +#define DebugTM(fmt, args...) \ +({ \ + if (DEBUG_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_APIC_TIMER_MODE +#undef DebugKVMAT +#define DEBUG_KVM_APIC_TIMER_MODE 0 /* KVM LAPIC timer debugging */ +#define DebugKVMAT(fmt, args...) \ +({ \ + if (DEBUG_KVM_APIC_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +bool debug_VIRQs = false; +#undef DEBUG_KVM_VIRQs_MODE +#undef DebugVIRQs +#define DEBUG_KVM_VIRQs_MODE debug_VIRQs /* VIRQs debugging */ +#define DebugVIRQs(fmt, args...) \ +({ \ + if (DEBUG_KVM_VIRQs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SHOW_GUEST_STACKS_MODE +#undef DebugGST +#define DEBUG_SHOW_GUEST_STACKS_MODE true /* show all guest stacks */ +#define DebugGST(fmt, args...) \ +({ \ + if (DEBUG_SHOW_GUEST_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define APIC_LVT_NUM 6 +/* 14 is the version for Xeon and Pentium 8.4.8*/ +#define LAPIC_MMIO_LENGTH (1 << 12) + +#define VEC_POS(v) ((v) & (32 - 1)) +#define REG_POS(v) (((v) >> 5) << 4) + +static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off) +{ + apic_reg_debug("apic_get_reg(0x%x) = 0x%x from %px\n", + reg_off, *((u32 *) (apic->regs + reg_off)), + ((u32 *) (apic->regs + reg_off))); + return *((u32 *) (apic->regs + reg_off)); +} + +static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) +{ + *((u32 *) (apic->regs + reg_off)) = val; + apic_reg_debug("apic_set_reg(0x%x) = 0x%x to %px\n", + reg_off, *((u32 *) (apic->regs + reg_off)), + ((u32 *) (apic->regs + reg_off))); +} + +static inline int apic_test_and_set_vector(int vec, void *bitmap) +{ + return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); +} + +static inline int apic_test_and_clear_vector(int vec, void *bitmap) +{ + return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); +} + +static inline void apic_set_vector(int vec, void *bitmap) +{ + set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); +} + +static inline void apic_clear_vector(int vec, void *bitmap) +{ + clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); +} + +static inline int apic_hw_enabled(struct kvm_lapic *apic) +{ + return (apic)->vcpu->arch.apic_base == APIC_BASE; +} + +static inline int apic_sw_enabled(struct kvm_lapic *apic) +{ + return apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED; +} + +static inline int apic_enabled(struct kvm_lapic *apic) +{ + return apic_sw_enabled(apic) && apic_hw_enabled(apic); +} + +#define LVT_MASK \ + (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) + +#define LINT_MASK \ + (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ + APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) + +static inline int kvm_apic_id(struct kvm_lapic *apic) +{ + return GET_APIC_ID(apic_get_reg(apic, APIC_ID)); +} + +static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) +{ + return !(apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); +} + +static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) +{ + return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; +} + +static inline int apic_lvtt_period(struct kvm_lapic *apic) +{ + return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC; +} + +static inline int apic_lvt_nmi_mode(u32 lvt_val) +{ + return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; +} + +void kvm_apic_set_version(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + u32 v = SET_APIC_VERSION(APIC_VERSION); + + if (!irqchip_in_kernel(vcpu->kvm)) + return; + + v |= SET_APIC_MAXLVT(APIC_MAXLVT); + + apic_set_reg(apic, APIC_LVR, v); +} + +static inline int apic_x2apic_mode(struct kvm_lapic *apic) +{ + return apic->vcpu->arch.apic_base & X2APIC_ENABLE; +} + +static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { + LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */ + LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ + LVT_MASK | APIC_MODE_MASK, /* LVTPC */ + LINT_MASK, LINT_MASK, /* LVT0-1 */ + LVT_MASK /* LVTERR */ +}; + +static int find_highest_vector(void *bitmap) +{ + u32 *word = bitmap; + int word_offset = MAX_APIC_VECTOR >> 5; + + while ((word_offset != 0) && (word[(--word_offset) << 2] == 0)) + continue; + + if (likely(!word_offset && !word[0])) + return -1; + else + return fls(word[word_offset << 2]) - 1 + (word_offset << 5); +} + +static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) +{ + apic->irr_pending = true; + return apic_test_and_set_vector(vec, apic->regs + APIC_IRR); +} + +static inline int apic_search_irr(struct kvm_lapic *apic) +{ + return find_highest_vector(apic->regs + APIC_IRR); +} + +static inline int apic_find_highest_irr(struct kvm_lapic *apic) +{ + int result; + + if (!apic->irr_pending) { + result = apic_search_irr(apic); + if (result == -1) + return -1; + apic->irr_pending = true; + } else { + result = apic_search_irr(apic); + } + ASSERT(result == -1 || result >= 16); + + return result; +} + +static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) +{ + apic->irr_pending = false; + apic_clear_vector(vec, apic->regs + APIC_IRR); + if (apic_search_irr(apic) != -1) + apic->irr_pending = true; +} + +int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + int highest_irr; + + /* This may race with setting of irr in __apic_accept_irq() and + * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq + * will cause vmexit immediately and the value will be recalculated + * on the next vmentry. + */ + if (!apic) + return 0; + highest_irr = apic_find_highest_irr(apic); + + return highest_irr; +} + +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, + int vector, int level, int trig_mode); + +int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + DebugKVMIRQ("started for VCPU #%d vector 0x%x\n", + vcpu->vcpu_id, irq->vector); + return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, + irq->level, irq->trig_mode); +} + +static inline int apic_find_highest_isr(struct kvm_lapic *apic) +{ + int result; + + result = find_highest_vector(apic->regs + APIC_ISR); + ASSERT(result == -1 || result >= 16); + + return result; +} + +static void apic_update_ppr(struct kvm_lapic *apic) +{ + u32 tpr, isrv, ppr; + int isr; + + tpr = apic_get_reg(apic, APIC_TASKPRI); + isr = apic_find_highest_isr(apic); + isrv = (isr != -1) ? isr : 0; + + if ((tpr & 0xf0) >= (isrv & 0xf0)) + ppr = tpr & 0xff; + else + ppr = isrv & 0xf0; + + apic_debug("vlapic %px, ppr 0x%x, isr 0x%x, isrv 0x%x", + apic, ppr, isr, isrv); + + apic_set_reg(apic, APIC_PROCPRI, ppr); +} + +static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) +{ + apic_set_reg(apic, APIC_TASKPRI, tpr); + apic_update_ppr(apic); +} + +int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) +{ + return dest == 0xff || kvm_apic_id(apic) == dest; +} + +int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) +{ + int result = 0; + u32 logical_id; + + if (apic_x2apic_mode(apic)) { + logical_id = apic_get_reg(apic, APIC_LDR); + return logical_id & mda; + } + + logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR)); + + switch (apic_get_reg(apic, APIC_DFR)) { + case APIC_DFR_FLAT: + if (logical_id & mda) + result = 1; + break; + case APIC_DFR_CLUSTER: + if (((logical_id >> 4) == (mda >> 0x4)) + && (logical_id & mda & 0xf)) + result = 1; + break; + default: + printk(KERN_WARNING "Bad DFR vcpu %d: %08x\n", + apic->vcpu->vcpu_id, apic_get_reg(apic, APIC_DFR)); + break; + } + + return result; +} + +int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, + int short_hand, int dest, int dest_mode) +{ + int result = 0; + struct kvm_lapic *target = vcpu->arch.apic; + + apic_debug("target %px, source %px, dest 0x%x, " + "dest_mode 0x%x, short_hand 0x%x\n", + target, source, dest, dest_mode, short_hand); + + ASSERT(target); + switch (short_hand) { + case APIC_DEST_NOSHORT: + if (dest_mode == 0) + /* Physical mode. */ + result = kvm_apic_match_physical_addr(target, dest); + else + /* Logical mode. */ + result = kvm_apic_match_logical_addr(target, dest); + break; + case APIC_DEST_SELF: + result = (target == source); + break; + case APIC_DEST_ALLINC: + result = 1; + break; + case APIC_DEST_ALLBUT: + result = (target != source); + break; + default: + printk(KERN_WARNING "Bad dest shorthand value %x\n", + short_hand); + break; + } + + return result; +} + +/* + * Add a pending IRQ into lapic. + * Return 1 if successfully added and 0 if discarded. + */ +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, + int vector, int level, int trig_mode) +{ + int result = 0; + struct kvm_vcpu *vcpu = apic->vcpu; + + DebugKVMAT("started for VCPU #%d vector 0x%x, delivery mode %d " + "level %d trigger mode %d\n", + apic->vcpu->vcpu_id, vector, delivery_mode, level, trig_mode); + switch (delivery_mode) { + case APIC_DM_LOWEST: + DebugKVMAT("delivery mode is APIC_DM_LOWEST\n"); + vcpu->arch.apic_arb_prio++; + case APIC_DM_FIXED: + DebugKVMAT("delivery mode is APIC_DM_FIXED\n"); + /* FIXME add logic for vcpu on reset */ + if (unlikely(!apic_enabled(apic))) + break; + + if (trig_mode) { + apic_debug("level trig mode for vector %d", vector); + apic_set_vector(vector, apic->regs + APIC_TMR); + } else { + apic_clear_vector(vector, apic->regs + APIC_TMR); + } + result = !apic_test_and_set_irr(vector, apic); + trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, + trig_mode, vector, !result); + if (!result) { + if (trig_mode) { + apic_debug("level trig mode repeatedly for " + "vector %x", vector); + DebugVIRQs("LAPIC #%d level trig mode " + "repeatedly for vector %x\n", + vcpu->vcpu_id, vector); + } else { + apic_debug("edge mode coalesced interrupt for " + "vector %x", vector); + DebugVIRQs("LAPIC #%d edge mode VIRQ coalesced " + "for vector %x\n", + vcpu->vcpu_id, vector); + if (vector == 0x49 || + vector == 0x40 || + vector == 0x81 || + vector == 0xfd || + (vector == 0xef && + apic_lvtt_period(apic))) + break; + if (vector == 0xef && + !apic_lvtt_period(apic) && + atomic_read( + &apic->lapic_timer.pending) <= + 2 && + atomic_read( + &apic->lapic_timer.pending) > + 0) + /* it can be while switch periodic */ + /* mode to one shot or back */ + break; + } + DebugVIRQs("LAPIC #%d current pending VIRQs num %d\n", + vcpu->vcpu_id, + kvm_read_guest_lapic_virqs_num(vcpu)); + break; + } + + DebugVIRQs("LAPIC #%d set vector %x, current pending " + "VIRQs num %d", + vcpu->vcpu_id, vector, + kvm_read_guest_lapic_virqs_num(vcpu)); + kvm_inject_lapic_virq(apic); + break; + + case APIC_DM_REMRD: + DebugKVMVEC("delivery mode is APIC_DM_REMRD\n"); + printk(KERN_DEBUG "Ignoring delivery mode 3\n"); + break; + + case APIC_DM_SMI: + DebugKVMVEC("delivery mode is APIC_DM_SMI\n"); + printk(KERN_DEBUG "Ignoring guest SMI\n"); + break; + + case APIC_DM_NMI: + DebugKVMVEC("delivery mode is APIC_DM_NMI\n"); + result = 1; + kvm_inject_nmi(vcpu); + kvm_vcpu_kick(vcpu); + break; + + case APIC_DM_INIT: + DebugKVMVEC("delivery mode is APIC_DM_INIT\n"); + if (level) { + result = 1; + if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) + printk(KERN_DEBUG + "INIT on a runnable vcpu %d\n", + vcpu->vcpu_id); + vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; + kvm_inject_lapic_virq(apic); + } else { + apic_debug("Ignoring de-assert INIT to vcpu %d\n", + vcpu->vcpu_id); + } + break; + + case APIC_DM_STARTUP: + DebugKVMVEC("delivery mode is APIC_DM_STARTUP\n"); + apic_debug("SIPI to vcpu %d vector 0x%02x\n", + vcpu->vcpu_id, vector); + if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { + result = 1; + vcpu->arch.sipi_vector = vector; + vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; + kvm_inject_lapic_virq(apic); + } + break; + + case APIC_DM_EXTINT: + DebugKVMVEC("delivery mode is APIC_DM_EXTINT\n"); + /* + * Should only be called by kvm_apic_local_deliver() with LVT0, + * before NMI watchdog was enabled. Already handled by + * kvm_apic_accept_pic_intr(). + */ + break; + + default: + printk(KERN_ERR "TODO: unsupported delivery mode %x\n", + delivery_mode); + break; + } + return result; +} + +int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) +{ + return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; +} + +u32 hw_apic_find_cepic_priority(struct kvm_lapic *apic) +{ + int i; + + for (i = MAX_CEPIC_PRIORITY; i >= 0; i--) { + if (apic->cepic_vector[i]) + return i; + } + + pr_err("%s(): could not find cepic priority\n", __func__); + + return 0; +} + +void hw_apic_set_eoi(struct kvm_lapic *apic) +{ + union cepic_eoi reg_eoi; + union cepic_cpr reg_cpr; + u32 cepic_priority = hw_apic_find_cepic_priority(apic); + u32 cepic_vector = apic->cepic_vector[cepic_priority]; + + reg_eoi.raw = 0; + reg_eoi.bits.rcpr = cepic_priority; + epic_write_guest_w(CEPIC_EOI, reg_eoi.raw); + + /* Restore CPR */ + reg_cpr.raw = 0; + reg_cpr.bits.cpr = cepic_priority; + epic_write_guest_w(CEPIC_CPR, reg_cpr.raw); + + kvm_ioapic_update_eoi(apic->vcpu, cepic_vector); + + apic->cepic_vector[cepic_priority] = 0; +} + +void sw_apic_set_eoi(struct kvm_lapic *apic) +{ + int vector = apic_find_highest_isr(apic); + /* + * Not every write EOI will has corresponding ISR, + * one example is when Kernel check timer on setup_IO_APIC + */ + if (vector == -1) + return; + + apic_clear_vector(vector, apic->regs + APIC_ISR); + apic_update_ppr(apic); + + apic_clear_vector(vector, apic->regs + APIC_TMR); + if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) + kvm_ioapic_update_eoi(apic->vcpu, vector); +} + +static void apic_send_ipi(struct kvm_lapic *apic) +{ + u32 icr_low = apic_get_reg(apic, APIC_ICR); + u32 icr_high = apic_get_reg(apic, APIC_ICR2); + struct kvm_lapic_irq irq; + + irq.vector = icr_low & APIC_VECTOR_MASK; + irq.delivery_mode = icr_low & APIC_MODE_MASK; + irq.dest_mode = icr_low & APIC_DEST_MASK; + irq.level = icr_low & APIC_INT_ASSERT; + irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; + irq.shorthand = icr_low & APIC_SHORT_MASK; + if (apic_x2apic_mode(apic)) + irq.dest_id = icr_high; + else + irq.dest_id = GET_APIC_DEST_FIELD(icr_high); + + trace_kvm_apic_ipi(icr_low, irq.dest_id); + + apic_debug("icr_high 0x%x, icr_low 0x%x, " + "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " + "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", + icr_high, icr_low, irq.shorthand, irq.dest_id, + irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, + irq.vector); + + kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); +} + +u32 hw_apic_get_tmcct(struct kvm_lapic *apic) +{ + return epic_read_guest_w(CEPIC_TIMER_CUR); +} + +u32 sw_apic_get_tmcct(struct kvm_lapic *apic) +{ + struct kvm_vcpu *vcpu; + s64 remaining; + s64 running_time; + s64 ns; + s64 cycles; + u64 tmcct; + unsigned long flags; + + ASSERT(apic != NULL); + + /* if initial count is 0, current count should also be 0 */ + if (apic_get_reg(apic, APIC_TMICT) == 0) + return 0; + + raw_local_irq_save(flags); + + vcpu = apic->vcpu; + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_hcall && + kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_intercept); + running_time = kvm_do_get_guest_vcpu_running_time(vcpu); + cycles = get_cycles(); + DebugKVMTM("running time at start 0x%llx, now 0x%llx, cycles 0x%llx " + "period 0x%llx\n", + apic->lapic_timer.running_time, running_time, cycles, + apic->lapic_timer.period); + running_time -= apic->lapic_timer.running_time; + DebugKVMTM("running 0x%llx\n", running_time); + raw_local_irq_restore(flags); + BUG_ON(running_time < 0); + remaining = apic->lapic_timer.period - cycles_2nsec(running_time); + if (remaining < 0) + remaining = 0; + DebugKVMTM("remaining time 0x%llx\n", remaining); + + if (apic->lapic_timer.period != 0) { + ns = mod_64(remaining, apic->lapic_timer.period); + } else { + ns = 0; + } + cycles = nsecs_2cycles(ns); + tmcct = div64_u64(cycles, + (APIC_BUS_CYCLE_NS * apic->divide_count)); + if (tmcct > 0xffffffffUL) + tmcct = 0xffffffffUL; + DebugKVMTM("ns 0x%llx, cycles 0x%llx tmcct 0x%llx\n", + ns, cycles, tmcct); + + return tmcct; +} + +static inline void report_tpr_access(struct kvm_lapic *apic, bool write) +{ + pr_err("report_tpr_access() is not yet implemented\n"); + ASSERT(1); +} + +u32 hw_apic_read_nm(struct kvm_lapic *apic) +{ + return epic_read_guest_w(CEPIC_PNMIRR); +} + +u32 sw_apic_read_nm(struct kvm_lapic *apic) +{ + return apic_get_reg(apic, APIC_NM); +} + +static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) +{ + u32 val = 0; + + if (offset >= LAPIC_MMIO_LENGTH) + return 0; + + switch (offset) { + case APIC_ID: + if (apic_x2apic_mode(apic)) + val = kvm_apic_id(apic); + else + val = kvm_apic_id(apic) << 24; + break; + case APIC_ARBPRI: + printk(KERN_WARNING "Access APIC ARBPRI register " + "which is for P6\n"); + break; + + case APIC_TMCCT: /* Timer CCR */ + val = apic_get_tmcct(apic); + break; + + case APIC_TASKPRI: + report_tpr_access(apic, false); + /* fall thru */ + break; + case APIC_VECT: /* Timer CCR */ + val = kvm_get_apic_interrupt(apic->vcpu); + break; + + case APIC_NM: + val = apic_read_nm(apic); + break; + + default: + apic_update_ppr(apic); + val = apic_get_reg(apic, offset); + break; + } + + return val; +} + +static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) +{ + return container_of(dev, struct kvm_lapic, dev); +} + +static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len, + void *data) +{ + unsigned char alignment = offset & 0xf; + u32 result; + /* this bitmask has a bit cleared for each reserver register */ + static const u64 rmask = 0x43ff01ffffffe70eULL; + + if ((alignment + len) > 4) { + apic_debug("KVM_APIC_READ: alignment error %x %d\n", + offset, len); + return 1; + } + + if (offset <= 0x3f0 && !(rmask & (1ULL << (offset >> 4)))) { + apic_debug("KVM_APIC_READ: read reserved register %x\n", + offset); + return 1; + } + + result = __apic_read(apic, offset & ~0xf); + + trace_kvm_apic_read(offset, result); + + switch (len) { + case 1: + case 2: + case 4: + memcpy(data, (char *)&result + alignment, len); + break; + default: + printk(KERN_ERR "Local APIC read with len = %x, " + "should be 1,2, or 4 instead\n", len); + break; + } + return 0; +} + +static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) +{ + return apic_hw_enabled(apic) && + addr >= apic->base_address && + addr < apic->base_address + LAPIC_MMIO_LENGTH; +} + +static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, void *data) +{ + struct kvm_lapic *apic = to_lapic(this); + u32 offset = address - apic->base_address; + + if (!apic_mmio_in_range(apic, address)) + return -EOPNOTSUPP; + + apic_reg_debug("started to mmio read address 0x%lx, offset 0x%lx, " + "len %d to %px\n", + address, offset, len, data); + + apic_reg_read(apic, offset, len, data); + apic_reg_debug("mmio read data 0x%lx\n", *(u64 *)data); + + return 0; +} + +static void update_divide_count(struct kvm_lapic *apic) +{ + u32 tmp1, tmp2, tdcr; + + tdcr = apic_get_reg(apic, APIC_TDCR); + tmp1 = tdcr & 0xf; + tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; + apic->divide_count = 0x1 << (tmp2 & 0x7); + + apic_debug("timer divide count is 0x%x\n", + apic->divide_count); +} + +void start_hw_apic_timer(struct kvm_lapic *apic, u32 apic_tmict) +{ + epic_write_guest_w(CEPIC_TIMER_INIT, apic_tmict); + epic_write_guest_w(CEPIC_TIMER_CUR, apic_tmict); +} + +void start_sw_apic_timer(struct kvm_lapic *apic, u32 apic_tmict) +{ + ktime_t now; + long period; + long cycles; + + hrtimer_cancel(&apic->lapic_timer.timer); + apic_set_reg(apic, APIC_TMICT, apic_tmict); + + now = apic->lapic_timer.timer.base->get_time(); + cycles = get_cycles(); + period = cycles_2nsec((u64)apic_get_reg(apic, APIC_TMICT) * + APIC_BUS_CYCLE_NS * apic->divide_count); + DebugKVMTM("APIC TMICT 0x%x period 0x%lx cpu_freq_hz 0x%x " + "cycles 0x%lx\n", + apic_get_reg(apic, APIC_TMICT), + period, cpu_freq_hz, cycles); + if (unlikely(!apic->lapic_timer.started)) + apic->lapic_timer.started = true; + atomic_set(&apic->lapic_timer.pending, 0); + + if (period == 0) { + apic->lapic_timer.period = 0; + return; + } + /* + * Do not allow the guest to program periodic timers with small + * interval, since the hrtimers are not throttled by the host + * scheduler. + */ + if (apic_lvtt_period(apic)) { + if (period < NSEC_PER_MSEC/2) + period = NSEC_PER_MSEC/2; + } + +again: + if (!hrtimer_active(&apic->lapic_timer.timer)) { + apic->lapic_timer.period = period; + cycles = get_cycles(); + hrtimer_start(&apic->lapic_timer.timer, + ktime_add_ns(now, period), + HRTIMER_MODE_ABS); + apic->lapic_timer.running_time = + kvm_get_guest_vcpu_running_time(apic->vcpu); + DebugKVMTM("started lapic hrtimer now 0x%llx period 0x%lx " + "running time 0x%llx, cycles 0x%lx\n", + ktime_to_ns(now), period, + apic->lapic_timer.running_time, cycles); + } else if (hrtimer_callback_running(&apic->lapic_timer.timer)) { + BUG_ON(apic->lapic_timer.period != 0); + cycles = get_cycles(); + hrtimer_add_expires_ns(&apic->lapic_timer.timer, period); + apic->lapic_timer.period = period; + apic->lapic_timer.running_time = + kvm_get_guest_vcpu_running_time(apic->vcpu); + DebugKVMTM("restarted lapic hrtimer now 0x%llx period 0x%lx " + "running time 0x%llx, cycles 0x%lx\n", + ktime_to_ns(now), period, + apic->lapic_timer.running_time, cycles); + } else { + /* timer is active probably is completing, so waiting */ + DebugKVMTM("hrtimer is completing, small waiting\n"); + cpu_relax(); + goto again; + } + + DebugTM("%s: bus cycle is %" PRId64 "ns, now 0x%016" + PRIx64 ", " + "timer initial count 0x%x, period %lldns, " + "expire @ 0x%016" PRIx64 ".\n", __func__, + APIC_BUS_CYCLE_NS, ktime_to_ns(now), + apic_get_reg(apic, APIC_TMICT), + apic->lapic_timer.period, + ktime_to_ns(ktime_add_ns(now, + apic->lapic_timer.period))); +} + +static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) +{ + int nmi_wd_enabled = apic_lvt_nmi_mode(apic_get_reg(apic, APIC_LVT0)); + + if (apic_lvt_nmi_mode(lvt0_val)) { + if (!nmi_wd_enabled) { + apic_debug("Receive NMI setting on APIC_LVT0 " + "for cpu %d\n", apic->vcpu->vcpu_id); + apic->vcpu->kvm->arch.vapics_in_nmi_mode++; + } + } else if (nmi_wd_enabled) { + apic->vcpu->kvm->arch.vapics_in_nmi_mode--; + } +} + +void hw_apic_write_nm(struct kvm_lapic *apic, u32 val) +{ + union cepic_pnmirr reg_pnmirr, val_pnmirr; + + reg_pnmirr.raw = epic_read_guest_w(CEPIC_PNMIRR); + val_pnmirr.raw = val; + + if (reg_pnmirr.bits.nmi && val_pnmirr.bits.nmi) + reg_pnmirr.bits.nmi = 0; + + if (reg_pnmirr.raw & CEPIC_PNMIRR_BIT_MASK) + pr_err("%s(): unsupported CEPIC NMI type\n", __func__); + + epic_write_guest_w(CEPIC_PNMIRR, reg_pnmirr.raw); +} + +void hw_apic_write_lvtt(struct kvm_lapic *apic, u32 apic_lvtt) +{ + bool periodic = apic_lvtt & APIC_LVT_TIMER_PERIODIC; + bool masked = apic_lvtt & APIC_LVT_MASKED; + u32 vector = apic_lvtt & APIC_VECTOR_MASK; + union cepic_timer_lvtt reg_lvtt; + + reg_lvtt.raw = 0; + reg_lvtt.bits.mode = periodic; + reg_lvtt.bits.mask = masked; + reg_lvtt.bits.vect = vector; + epic_write_guest_w(CEPIC_TIMER_LVTT, reg_lvtt.raw); +} + +static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) +{ + int ret = 0; + + trace_kvm_apic_write(reg, val); + + switch (reg) { + case APIC_BSP: /* Local APIC BSP */ + case APIC_ID: /* Local APIC ID */ + if (!apic_x2apic_mode(apic)) + apic_set_reg(apic, APIC_ID, val); + else + ret = 1; + break; + + case APIC_TASKPRI: + report_tpr_access(apic, true); + apic_set_tpr(apic, val & 0xff); + break; + + case APIC_EOI: + apic_set_eoi(apic); + break; + + case APIC_LDR: + if (!apic_x2apic_mode(apic)) + apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK); + else + ret = 1; + break; + + case APIC_DFR: + if (!apic_x2apic_mode(apic)) + apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); + else + ret = 1; + break; + + case APIC_SPIV: { + u32 mask = 0x3ff; + if (apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) + mask |= APIC_SPIV_DIRECTED_EOI; + apic_set_reg(apic, APIC_SPIV, val & mask); + if (!(val & APIC_SPIV_APIC_ENABLED)) { + int i; + u32 lvt_val; + + for (i = 0; i < APIC_LVT_NUM; i++) { + lvt_val = apic_get_reg(apic, + APIC_LVTT + 0x10 * i); + apic_set_reg(apic, APIC_LVTT + 0x10 * i, + lvt_val | APIC_LVT_MASKED); + } + atomic_set(&apic->lapic_timer.pending, 0); + + } + break; + } + case APIC_ICR: + /* No delay here, so we always clear the pending bit */ + apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); + apic_send_ipi(apic); + break; + + case APIC_ICR2: + if (!apic_x2apic_mode(apic)) + val &= 0xff000000; + apic_set_reg(apic, APIC_ICR2, val); + break; + + case APIC_LVT0: + apic_manage_nmi_watchdog(apic, val); + case APIC_LVTTHMR: + case APIC_LVTPC: + case APIC_LVT1: + case APIC_LVTERR: + /* TODO: Check vector */ + if (!apic_sw_enabled(apic)) + val |= APIC_LVT_MASKED; + + val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; + apic_set_reg(apic, reg, val); + + break; + + case APIC_LVTT: + apic_write_lvtt(apic, val); + apic_set_reg(apic, reg, val); + + case APIC_TMICT: + start_apic_timer(apic, val); + break; + + case APIC_TDCR: + if (val & 4) + printk(KERN_ERR "KVM_WRITE:TDCR %x\n", val); + apic_set_reg(apic, APIC_TDCR, val); + update_divide_count(apic); + break; + + case APIC_ESR: + if (apic_x2apic_mode(apic) && val != 0) { + printk(KERN_ERR "KVM_WRITE:ESR not zero %x\n", val); + ret = 1; + } + break; + + case APIC_SELF_IPI: + if (apic_x2apic_mode(apic)) { + apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); + } else + ret = 1; + break; + + case APIC_NM: + apic_write_nm(apic, val); + break; + + default: + ret = 1; + break; + } + if (ret) + apic_debug("Local APIC Write to read-only register %x\n", reg); + return ret; +} + +static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, const void *data) +{ + struct kvm_lapic *apic = to_lapic(this); + unsigned int offset = address - apic->base_address; + u32 val; + + if (!apic_mmio_in_range(apic, address)) + return -EOPNOTSUPP; + + /* + * APIC register must be aligned on 128-bits boundary. + * 32/64/128 bits registers must be accessed thru 32 bits. + * Refer SDM 8.4.1 + */ + if (len != 4 || (offset & 0xf)) { + /* Don't shout loud, $infamous_os would cause only noise. */ + apic_debug("apic write: bad size=%d %lx\n", len, (long)address); + return 0; + } + + val = *(u32 *)data; + + /* too common printing */ + if (offset != APIC_EOI) + apic_debug("%s: offset 0x%x with length 0x%x, and value is " + "0x%x\n", __func__, offset, len, val); + + apic_reg_write(apic, offset & 0xff0, val); + + return 0; +} + +void kvm_free_lapic(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.apic) + return; + + if (!kvm_vcpu_is_hw_apic(vcpu)) + hrtimer_cancel(&vcpu->arch.apic->lapic_timer.timer); + + if (vcpu->arch.apic->regs_page) + __free_page(vcpu->arch.apic->regs_page); + + kfree(vcpu->arch.apic); + vcpu->arch.apic = NULL; +} + +/* + *---------------------------------------------------------------------- + * LAPIC interface + *---------------------------------------------------------------------- + */ + +void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + if (!apic) { + vcpu->arch.apic_base = value; + return; + } + + vcpu->arch.apic_base = value; + if (apic_x2apic_mode(apic)) { + u32 id = kvm_apic_id(apic); + u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf)); + apic_set_reg(apic, APIC_LDR, ldr); + } + apic->base_address = apic->vcpu->arch.apic_base; + + /* with FSB delivery interrupt, we can restart APIC functionality */ + apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " + "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); + +} + +void kvm_lapic_reset(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic; + unsigned int reg; + int i; + + apic_debug("%s\n", __func__); + + ASSERT(vcpu); + apic = vcpu->arch.apic; + ASSERT(apic != NULL); + + /* Stop the timer in case it's a reset to an active apic */ + if (!kvm_vcpu_is_hw_apic(vcpu)) + hrtimer_cancel(&apic->lapic_timer.timer); + + apic_set_reg(apic, APIC_ID, vcpu->arch.hard_cpu_id << 24); + + kvm_apic_set_version(apic->vcpu); + + for (i = 0; i < APIC_LVT_NUM; i++) + apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); + apic_set_reg(apic, APIC_LVT0, + SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); + apic_set_reg(apic, APIC_LVT2, APIC_LVT_MASKED); + apic_set_reg(apic, APIC_DSP, APIC_LVT_MASKED); + apic_set_reg(apic, APIC_LVT4, APIC_LVT_MASKED); + + apic_set_reg(apic, APIC_DFR, 0xffffffffU); + apic_set_reg(apic, APIC_SPIV, 0xff); + apic_set_reg(apic, APIC_TASKPRI, 0); + apic_set_reg(apic, APIC_LDR, 0); + apic_set_reg(apic, APIC_ESR, 0); + apic_set_reg(apic, APIC_ICR, 0); + apic_set_reg(apic, APIC_ICR2, 0); + apic_set_reg(apic, APIC_TDCR, 0); + apic_set_reg(apic, APIC_TMICT, 0); + for (i = 0; i < 8; i++) { + apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); + apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); + apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); + } + + reg = APIC_NM_PCI | APIC_NM_SPECIAL | APIC_NM_TIMER | + APIC_NM_NMI_DEBUG_MASK | APIC_NM_INTQLAPIC_MASK | + APIC_NM_INT_VIOLAT_MASK; + apic_set_reg(apic, APIC_M_ERM, reg); + apic_set_reg(apic, APIC_NM, reg); + + apic->irr_pending = false; + update_divide_count(apic); + if (!kvm_vcpu_is_hw_apic(vcpu)) { + atomic_set(&apic->lapic_timer.pending, 0); + apic->lapic_timer.started = false; + } + reg = APIC_BSP_ENABLE; + if (kvm_vcpu_is_bsp(vcpu)) + reg |= APIC_BSP_IS_BSP; + apic_set_reg(apic, APIC_BSP, reg); + apic_update_ppr(apic); + + vcpu->arch.apic_arb_prio = 0; + + apic_debug(KERN_INFO "%s: vcpu=%px, id=%d, base_msr=" + "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, + vcpu, kvm_apic_id(apic), + vcpu->arch.apic_base, apic->base_address); +} + +bool kvm_apic_present(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.apic && apic_hw_enabled(vcpu->arch.apic); +} + +int kvm_lapic_enabled(struct kvm_vcpu *vcpu) +{ + return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic); +} + +/* + *---------------------------------------------------------------------- + * timer interface + *---------------------------------------------------------------------- + */ + +static bool lapic_is_periodic(struct kvm_timer *ktimer) +{ + struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, + lapic_timer); + return apic_lvtt_period(apic); +} + +int apic_has_pending_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *lapic = vcpu->arch.apic; + + if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT)) + return atomic_read(&lapic->lapic_timer.pending); + + return 0; +} + +static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) +{ + u32 reg = apic_get_reg(apic, lvt_type); + int vector, mode, trig_mode; + + if (apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { + vector = reg & APIC_VECTOR_MASK; + mode = reg & APIC_MODE_MASK; + trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; + return __apic_accept_irq(apic, mode, vector, 1, trig_mode); + } + return 0; +} + +void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + if (apic) + kvm_apic_local_deliver(apic, APIC_LVT0); +} + +int kvm_apic_sysrq_deliver(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + DebugGST("started for VCPU #%d\n", vcpu->vcpu_id); + if (apic && apic_hw_enabled(apic)) { + /* set NMI interrupt type as reason to interrupt */ + apic_set_reg(apic, APIC_NM, APIC_NM_NMI); + + return __apic_accept_irq(apic, + APIC_DM_FIXED, + SYSRQ_SHOWSTATE_APIC_VECTOR, + 1, /* level */ + 1); /* trigger mode */ + } + return 0; +} + +int kvm_apic_nmi_deliver(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + if (apic && apic_hw_enabled(apic)) { + /* NMI is not used only to dump active stack on VCPU */ + /* DO_DUMP_VCPU_STACK(vcpu) = true; */ + return __apic_accept_irq(apic, + APIC_DM_FIXED, + KVM_NMI_APIC_VECTOR, + 1, /* level */ + 1); /* trigger mode */ + } + return 0; +} + +static const struct kvm_timer_ops lapic_timer_ops = { + .is_periodic = lapic_is_periodic, +}; + +static const struct kvm_io_device_ops apic_mmio_ops = { + .read = apic_mmio_read, + .write = apic_mmio_write, +}; + +int kvm_create_lapic(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic; + int ret = 0; + + ASSERT(vcpu != NULL); + apic_debug("apic_init %d\n", vcpu->vcpu_id); + + apic = kzalloc(sizeof(*apic), GFP_KERNEL); + if (!apic) { + ret = -ENOMEM; + goto nomem; + } + + vcpu->arch.apic = apic; + + apic->regs_page = alloc_page(GFP_KERNEL); + if (apic->regs_page == NULL) { + printk(KERN_ERR "malloc apic regs error for vcpu %x\n", + vcpu->vcpu_id); + ret = -ENOMEM; + goto nomem_free_apic; + } + apic->regs = page_address(apic->regs_page); + memset(apic->regs, 0, PAGE_SIZE); + apic->vcpu = vcpu; + + /* If possible, use hardware CEPIC timer instead */ + if (!kvm_vcpu_is_hw_apic(vcpu)) { + hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + apic->lapic_timer.timer.function = kvm_apic_timer_fn; + apic->lapic_timer.t_ops = &lapic_timer_ops; + apic->lapic_timer.kvm = vcpu->kvm; + apic->lapic_timer.vcpu = vcpu; + } + + apic->base_address = APIC_DEFAULT_PHYS_BASE; + vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE; + + kvm_lapic_reset(vcpu); + kvm_iodevice_init(&apic->dev, &apic_mmio_ops); + + kvm_get_guest_direct_virq(vcpu, vcpu->vcpu_id + KVM_VIRQ_LAPIC, + KVM_VIRQ_LAPIC); + + return 0; +nomem_free_apic: + kfree(apic); +nomem: + return ret; +} + +void kvm_print_APIC_field(struct kvm_lapic *apic, int base) +{ + int i; + + for (i = 0; i < 8; i++) + pr_cont("%08x", apic_get_reg(apic, base + i*0x10)); + + pr_cont("\n"); +} + +void kvm_print_local_APIC(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic; + unsigned int v, icr; + + apic = vcpu->arch.apic; + if (apic == NULL) { + pr_info("local APIC on VCPU #%d is absent\n", + vcpu->vcpu_id); + return; + } else { + pr_info("local APIC contents on VCPU #%d\n", + vcpu->vcpu_id); + } + v = kvm_apic_id(apic); + pr_info("... APIC ID: %08x\n", v); + v = apic_get_reg(apic, APIC_LVR); + pr_info("... APIC VERSION: %08x\n", v); + + v = apic_get_reg(apic, APIC_LDR); + pr_info("... APIC LDR: %08x\n", v); + if (!apic_x2apic_mode(apic)) { + v = apic_get_reg(apic, APIC_DFR); + pr_info("... APIC DFR: %08x\n", v); + } + v = apic_get_reg(apic, APIC_SPIV); + pr_info("... APIC SPIV: %08x\n", v); + + v = apic_get_reg(apic, APIC_PROCPRI); + pr_info("... APIC PROCPRI: %08x\n", v); + + pr_info("... IRR PENDING %d\n", apic->irr_pending); + pr_info("... APIC ISR field: "); + kvm_print_APIC_field(apic, APIC_ISR); + pr_info("... APIC TMR field: "); + kvm_print_APIC_field(apic, APIC_TMR); + pr_info("... APIC IRR field: "); + kvm_print_APIC_field(apic, APIC_IRR); + + icr = apic_get_reg(apic, APIC_ICR); + pr_info("... APIC ICR: %08x\n", icr); + icr = apic_get_reg(apic, APIC_ICR2); + pr_info("... APIC ICR2: %08x\n", icr); + + v = apic_get_reg(apic, APIC_LVTT); + pr_info("... APIC LVTT: %08x\n", v); + + v = apic_get_reg(apic, APIC_LVT0); + pr_info("... APIC LVT0: %08x\n", v); + v = apic_get_reg(apic, APIC_LVT1); + pr_info("... APIC LVT1: %08x\n", v); + + v = apic_get_reg(apic, APIC_LVTERR); + pr_info("... APIC LVTERR: %08x\n", v); + + v = apic_get_reg(apic, APIC_TMICT); + pr_info("... APIC TMICT: %08x\n", v); + v = apic_get_reg(apic, APIC_TMCCT); + pr_info("... APIC TMCCT: %08x\n", v); + v = apic_get_reg(apic, APIC_TDCR); + pr_info("... APIC TDCR: %08x\n", v); + + pr_info("local APIC on VCPU #%d timer state:\n", + vcpu->vcpu_id); + pr_info("... started %d pending %d period 0x%llx start at 0x%llx\n", + apic->lapic_timer.started, + atomic_read(&apic->lapic_timer.pending), + apic->lapic_timer.period, + apic->lapic_timer.running_time); +} + +int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + int highest_irr; + int old_highest_irr; + + if (!apic || !apic_enabled(apic)) + return -1; + + old_highest_irr = apic_find_highest_irr(apic); + apic_update_ppr(apic); + highest_irr = apic_find_highest_irr(apic); + if ((highest_irr == -1) || + ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI))) { + pr_err("highest_irr 0x%x before update 0x%x\n", + highest_irr, old_highest_irr); + kvm_print_local_APIC(vcpu); + return -1; + } + return highest_irr; +} + +int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) +{ + u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0); + int r = 0; + + if (kvm_vcpu_is_bsp(vcpu)) { + if (!apic_hw_enabled(vcpu->arch.apic)) + r = 1; + if ((lvt0 & APIC_LVT_MASKED) == 0 && + GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) + r = 1; + } + return r; +} + +void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + DebugKVMAT("started timer pending %d\n", + atomic_read(&apic->lapic_timer.pending)); + if (apic && atomic_read(&apic->lapic_timer.pending) > 0) { + if (kvm_apic_local_deliver(apic, APIC_LVTT)) { + atomic_dec(&apic->lapic_timer.pending); + DebugKVMAT("delivered timer pending %d\n", + atomic_read(&apic->lapic_timer.pending)); + } else { + DebugKVMAT("local APIC timer interrupt was coalesced " + "for VCPU #%d\n", vcpu->vcpu_id); + if (!apic_lvtt_period(apic) && + (atomic_read(&apic->lapic_timer.pending) > 1 || + atomic_read(&apic->lapic_timer.pending) == 0)) { + /* it can be while switch periodic */ + /* mode to one shot or back */ + E2K_LMS_HALT_OK; + } + } + } +} + +int kvm_get_hw_apic_interrupt(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + union cepic_vect_inta reg_vect_inta; + union cepic_cpr reg_cpr; + union cepic_cir reg_cir; + + reg_cir.raw = epic_read_guest_w(CEPIC_CIR); + reg_cpr.raw = epic_read_guest_w(CEPIC_CPR); + reg_vect_inta.raw = epic_read_guest_w(CEPIC_VECT_INTA); + + if (reg_vect_inta.bits.vect != reg_cir.bits.vect) + pr_err("CEPIC inta and cir vectors don't match: 0x%x 0x%x\n", + reg_vect_inta.bits.vect, reg_cir.bits.vect); + if (reg_vect_inta.bits.cpr != reg_cpr.bits.cpr) + pr_err("CEPIC inta and cpr prio don't match: 0x%x 0x%x\n", + reg_vect_inta.bits.cpr, reg_cpr.bits.cpr); + + apic->cepic_vector[reg_vect_inta.bits.cpr] = reg_vect_inta.bits.vect; + + /* Update CPR and clear CIR */ + reg_cpr.bits.cpr = (reg_vect_inta.bits.vect >> 8) + 1; + reg_cir.raw = 0; + + epic_write_guest_w(CEPIC_CPR, reg_cpr.raw); + epic_write_guest_w(CEPIC_CIR, reg_cir.raw); + + return reg_vect_inta.bits.vect; +} + +int kvm_get_sw_apic_interrupt(struct kvm_vcpu *vcpu) +{ + int vector = kvm_apic_has_interrupt(vcpu); + struct kvm_lapic *apic = vcpu->arch.apic; + unsigned long flags; + + DebugKVMAT("vector is 0x%x\n", vector); + if (vector == -1) + return -1; + + apic_set_vector(vector, apic->regs + APIC_ISR); + apic_update_ppr(apic); + apic_clear_irr(vector, apic); + apic_debug("kvm_get_apic_interrupt() vector is 0x%x\n", vector); + + if (kvm_test_pending_virqs(vcpu)) { + raw_spin_lock_irqsave(&vcpu->kvm->arch.virq_lock, flags); + kvm_dec_vcpu_pending_virq(vcpu, apic->virq_no); + if (!apic->irr_pending) { + /* nothing more pending VIRQs, clear flag */ + kvm_clear_pending_virqs(vcpu); + /* only APIC interrupts can be now injected */ + KVM_BUG_ON(kvm_get_pending_virqs_num(vcpu) != 0); + } + /* clear flag to enable new injections to handle */ + /* remaining here pending VIRQs on IRR or new one */ + kvm_clear_virqs_injected(vcpu); + raw_spin_unlock_irqrestore(&vcpu->kvm->arch.virq_lock, flags); + } + + DebugVIRQs("LAPIC #%d VIRQ vector is %x\n", + vcpu->vcpu_id, vector); + return vector; +} + +#if 0 +void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + apic->base_address = vcpu->arch.apic_base; + kvm_apic_set_version(vcpu); + + apic_update_ppr(apic); + hrtimer_cancel(&apic->lapic_timer.timer); + update_divide_count(apic); + start_apic_timer(apic); + apic->irr_pending = true; +} +#endif + +void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + struct hrtimer *timer; + + if (!apic) + return; + + timer = &apic->lapic_timer.timer; + if (hrtimer_cancel(timer)) { + apic->lapic_timer.running_time = + kvm_get_guest_vcpu_running_time(apic->vcpu); + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + } +} + +#ifdef CONFIG_VIRT_LOCAL_APIC +void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) +{ + u32 data; + + if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) + apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); + + if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) + return; + + if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, + &data, sizeof(u32))) + return; + + apic_set_tpr(vcpu->arch.apic, data & 0xff); +} + +void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) +{ + u32 data, tpr; + int max_irr, max_isr; + struct kvm_lapic *apic = vcpu->arch.apic; + + apic_sync_pv_eoi_to_guest(vcpu, apic); + + if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) + return; + + tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; + max_irr = apic_find_highest_irr(apic); + if (max_irr < 0) + max_irr = 0; + max_isr = apic_find_highest_isr(apic); + if (max_isr < 0) + max_isr = 0; + data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); + + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32)); +} +#endif /* CONFIG_VIRT_LOCAL_APIC */ + +void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) +{ + if (!irqchip_in_kernel(vcpu->kvm)) + return; + + vcpu->arch.apic->vapic_addr = vapic_addr; +} + +bool kvm_vcpu_has_apic_interrupts(const struct kvm_vcpu *vcpu) +{ + return !vcpu->arch.hcall_irqs_disabled && kvm_test_pending_virqs(vcpu); +} + +bool kvm_check_lapic_priority(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + u32 ppr = apic_get_reg(apic, APIC_PROCPRI); + int max_irr = apic_find_highest_irr(apic); + + if ((max_irr & 0xf0) <= ppr) + return false; + else + return true; +} diff --git a/arch/e2k/kvm/lapic.h b/arch/e2k/kvm/lapic.h new file mode 100644 index 000000000000..c02eb604f2a3 --- /dev/null +++ b/arch/e2k/kvm/lapic.h @@ -0,0 +1,140 @@ +#ifndef __KVM_E2K_LAPIC_H +#define __KVM_E2K_LAPIC_H + +#include +#include "kvm_timer.h" + +#include +#include + +#define MAX_CEPIC_PRIORITY 4 +struct kvm_lapic { + unsigned long base_address; + struct kvm_io_device dev; + struct kvm_timer lapic_timer; + u32 divide_count; + struct kvm_vcpu *vcpu; + bool irr_pending; + struct page *regs_page; + void *regs; + gpa_t vapic_addr; + struct page *vapic_page; + int virq_no; + /* APIC v6 (APIC model based on hardware CEPIC support) */ + u32 cepic_vector[MAX_CEPIC_PRIORITY + 1]; +}; + +int kvm_create_lapic(struct kvm_vcpu *vcpu); +void kvm_free_lapic(struct kvm_vcpu *vcpu); + +int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); +int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); +void kvm_lapic_reset(struct kvm_vcpu *vcpu); +void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); +u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu); +void kvm_apic_set_version(struct kvm_vcpu *vcpu); + +int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); +int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); +int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); + +u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); +void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); +void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu); +int kvm_lapic_enabled(struct kvm_vcpu *vcpu); +bool kvm_apic_present(struct kvm_vcpu *vcpu); +int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); + +void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); +void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); +void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); + +int apic_has_pending_timer(struct kvm_vcpu *vcpu); +void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); +int kvm_apic_sysrq_deliver(struct kvm_vcpu *vcpu); +int kvm_apic_nmi_deliver(struct kvm_vcpu *vcpu); + +extern void kvm_print_APIC_field(struct kvm_lapic *apic, int base); +extern void kvm_print_local_APIC(struct kvm_vcpu *vcpu); + +/* + * Basic functions to access to local APIC state structure + * (see asm/kvm/guest.h) on host. + */ +static inline kvm_apic_state_t * +kvm_get_guest_lapic_state(struct kvm_vcpu *vcpu) +{ + if (unlikely(vcpu == NULL)) + return NULL; + if (unlikely(vcpu->arch.kmap_vcpu_state == NULL)) + return NULL; + return &vcpu->arch.kmap_vcpu_state->lapic; +} + +static inline atomic_t * +kvm_get_guest_lapic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_apic_state_t *lapic = kvm_get_guest_lapic_state(vcpu); + + return &lapic->virqs_num; +} + +static inline int +kvm_read_guest_lapic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_apic_state_t *lapic = kvm_get_guest_lapic_state(vcpu); + + return atomic_read(&lapic->virqs_num); +} +static inline void +kvm_set_guest_lapic_virqs_num(struct kvm_vcpu *vcpu, int count) +{ + kvm_apic_state_t *lapic = kvm_get_guest_lapic_state(vcpu); + + if (unlikely(lapic == NULL)) + return; + atomic_set(&lapic->virqs_num, count); +} +static inline void +kvm_init_guest_lapic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_set_guest_lapic_virqs_num(vcpu, 0); +} +static inline void +kvm_inc_guest_lapic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_apic_state_t *lapic = kvm_get_guest_lapic_state(vcpu); + + atomic_inc(&lapic->virqs_num); +} +static inline bool +kvm_inc_and_test_guest_lapic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_apic_state_t *lapic = kvm_get_guest_lapic_state(vcpu); + + return atomic_inc_and_test(&lapic->virqs_num); +} +static inline void +kvm_dec_guest_lapic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_apic_state_t *lapic = kvm_get_guest_lapic_state(vcpu); + + atomic_dec(&lapic->virqs_num); +} +static inline bool +kvm_dec_and_test_guest_lapic_virqs_num(struct kvm_vcpu *vcpu) +{ + kvm_apic_state_t *lapic = kvm_get_guest_lapic_state(vcpu); + + return atomic_dec_and_test(&lapic->virqs_num); +} + +#define MAX_PENDING_VIRQS 8 /* why 8 ???? */ + +/* followed define is not in apicdef.h */ +#define APIC_SHORT_MASK 0xc0000 +#define APIC_DEST_NOSHORT 0x0 +#define APIC_DEST_MASK 0x800 +#define MAX_APIC_VECTOR 256 + +#endif /* __KVM_E2K_LAPIC_H */ diff --git a/arch/e2k/kvm/lt.c b/arch/e2k/kvm/lt.c new file mode 100644 index 000000000000..1af6e0a9369e --- /dev/null +++ b/arch/e2k/kvm/lt.c @@ -0,0 +1,1351 @@ +/* + * IOHUB system timer/watchdog/reset/power emulation + * + * Copyright (c) 2019 MCST + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Authors: + * Salavat Gilyazov + * Based on e2k lms simulator. + */ + +#include +#include +#include +#include +#include +#include +#include /* get redpill value*/ + +#include "ioepic.h" +#include "irq.h" +#include "lt.h" + +#define mod_64(x, y) ((x) % (y)) + +#define PRId64 "d" +#define PRIx64 "llx" +#define PRIu64 "u" +#define PRIo64 "o" + +#undef DEBUG_COUNT_MODE +#undef DebugCOUNT +#define DEBUG_COUNT_MODE 0 /* counter updates debugging */ +#if DEBUG_COUNT_MODE +#define DebugCOUNT(fmt, args...) \ + pr_info("%s(): " fmt, __func__, ##args); +#else +#define DebugCOUNT(fmt, args...) +#endif + +#undef DEBUG_VERBOSE_COUNT_MODE +#undef DebugVCOUNT +#define DEBUG_VERBOSE_COUNT_MODE 0 /* counter updates verbose */ + /* debugging */ +#define DebugVCOUNT(fmt, args...) \ +({ \ + if (DEBUG_VERBOSE_COUNT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SYS_TIMER_MODE +#undef DebugSYSTM +#define DEBUG_SYS_TIMER_MODE 0 /* system timer debugging */ +#define DebugSYSTM(fmt, args...) \ +({ \ + if (DEBUG_SYS_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_IRQ_MODE +#undef DebugIRQ +#define DEBUG_IRQ_MODE 0 /* IRQs debugging */ +#define DebugIRQ(fmt, args...) \ +({ \ + if (DEBUG_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_HR_TIMER_MODE +#undef DebugHRTM +#define DEBUG_HR_TIMER_MODE 0 /* high resolution timer debugging */ +#define DebugHRTM(fmt, args...) \ +({ \ + if (DEBUG_HR_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_LT_REGS_MODE +#undef DebugLTREGS +#define DEBUG_LT_REGS_MODE 0 /* system timer debugging */ +#if DEBUG_LT_REGS_MODE +#define lt_reg_debug(fmt, arg...) pr_err("%s() : " fmt, __func__, ##arg) +#else +#define lt_reg_debug(fmt, arg...) +#endif /* DEBUG_LT_REGS_MODE */ + +static bool wd_debug = false; +#undef DEBUG_WD_REGS_MODE +#undef DebugWD +#define DEBUG_WD_REGS_MODE (false && wd_debug) /* watchdog timer */ + /* debugging */ +#define DebugWD(fmt, args...) \ +({ \ + if (DEBUG_WD_REGS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_MMIO_SHUTDOWN_MODE +#undef DebugMMIOSHUTDOWN +#define DEBUG_MMIO_SHUTDOWN_MODE 0 /* MMIO shutdown debugging */ +#define DebugMMIOSHUTDOWN(fmt, args...) \ +({ \ + if (DEBUG_MMIO_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define HRTIMER_EXPIRES_PERCENT 90 /* percents */ +/* If hrtimer expires on HRTIMER_EXPIRES_PERCENTs it does not reactivate */ +#define HRTIMER_EXPIRES_APPROX(time) \ + (((time) / 100) * HRTIMER_EXPIRES_PERCENT) + +/* + * Bug 129924: sometimes guest doesn't make it in time to ack watchdog interrupt, and reboots. + * Since lintel is currently the only user of watchdog, and it doesn't expect reboot, disable it. + * Send another normal interrupt instead. + */ +#define ENABLE_WATCHDOG_RESET 0 + +static inline struct kvm_lt *to_lt(struct kvm_io_device *dev) +{ + return container_of(dev, struct kvm_lt, dev); +} + +static inline struct kvm_lt *sys_timer_to_lt(struct kvm_timer *timer) +{ + return container_of(timer, struct kvm_lt, sys_timer); +} + +static inline struct kvm_lt *wd_timer_to_lt(struct kvm_timer *timer) +{ + return container_of(timer, struct kvm_lt, wd_timer); +} + +static inline u64 cycles_to_count(struct kvm_lt *lt, u64 cycles) +{ + return mul_u64_u32_div(cycles, lt->frequency, lt->ticks_per_sec); +} + +static inline u64 count_to_cycles(struct kvm_lt *lt, u64 counter) +{ + return mul_u64_u32_div(counter, lt->ticks_per_sec, lt->frequency); +} + +static int lt_get_sys_timer_limits(struct kvm_lt *lt, u64 *limitp, u64 *startp) +{ + u32 start, limit; + + if (lt->regs.counter_limit.c_l != 0) { + if (lt->regs.counter_control.s_s) { + limit = lt->regs.counter_limit.c_l; + start = MIN_SYS_TIMER_COUNT; + } else { + limit = MAX_SYS_TIMER_COUNT; + if (lt->regs.counter_start.c_st_v != 0) { + start = lt->regs.counter_start.c_st_v; + } else { + start = MIN_SYS_TIMER_COUNT; + } + } + } else { + limit = MAX_SYS_TIMER_COUNT; + if (lt->regs.counter_start.c_st_v != 0) { + start = lt->regs.counter_start.c_st_v; + } else { + start = MIN_SYS_TIMER_COUNT; + } + } + ASSERT(limit > start); + *limitp = limit; + *startp = start; + return 0; +} + +static int lt_get_wd_timer_limits(struct kvm_lt *lt, u64 *limitp, u64 *startp) +{ + u64 limit; + u32 start; + + if (lt->regs.wd_limit.wd_l == 0) { + /* wd timer is OFF */ + start = 0; + limit = -1ULL; + } else { + start = 0; + limit = lt->regs.wd_limit.wd_l; + } + *limitp = limit; + *startp = start; + return 0; +} + +static int lt_get_reset_counter_limits(struct kvm_lt *lt, u64 *limitp, + u64 *startp) +{ + *limitp = -1ULL; + *startp = 0; + return 0; +} + +static int lt_get_power_counter_limits(struct kvm_lt *lt, u64 *limitp, + u64 *startp) +{ + *limitp = -1ULL; + *startp = 0; + return 0; +} + +static int lt_get_timer_limits(struct kvm_lt *lt, struct kvm_timer *timer, + u64 *limitp, u64 *startp) +{ + switch (timer->type) { + case kvm_sys_timer_type: + return lt_get_sys_timer_limits(lt, limitp, startp); + case kvm_wd_timer_type: + return lt_get_wd_timer_limits(lt, limitp, startp); + case kvm_reset_timer_type: + return lt_get_reset_counter_limits(lt, limitp, startp); + case kvm_power_timer_type: + return lt_get_power_counter_limits(lt, limitp, startp); + default: + pr_err("%s() : %d is unsupported or invalid timer type\n", + __func__, timer->type); + return -EINVAL; + } +} + +static u64 kvm_get_up_to_date_timer(struct kvm_vcpu *vcpu, struct kvm_lt *lt, + struct kvm_timer *timer) +{ + u64 running_time; + s64 running_cycles; + s64 running_ns, host_ns; + s64 cycles, host_cycles; + ktime_t now; + u64 now_ns; + u64 counter, host_counter; + u64 start64, limit64; + u32 start, limit, start_count, new_count; + u64 prescaler; + int miss_times; + unsigned long flags; + struct kvm_arch *ka = &vcpu->kvm->arch; + + ASSERT(timer != NULL); + ASSERT(vcpu != NULL); + + raw_spin_lock_irqsave(&timer->lock, flags); + + if (unlikely(timer->period == 0)) { + raw_spin_unlock_irqrestore(&timer->lock, flags); + return 0; + } + start_count = timer->start_count; + running_time = kvm_do_get_guest_vcpu_running_time(vcpu); + cycles = get_cycles(); + now = timer->timer.base->get_time(); + now_ns = ktime_to_ns(now); + /* sh_sclkm3 - summary time when each vcpu of guest was out of cpu */ + if (!redpill) + now_ns -= ka->sh_sclkm3; + DebugVCOUNT("%s : running cycles at start 0x%llx, now 0x%llx, " + "current cycles 0x%llx, start counter 0x%x period ns 0x%llx\n", + timer->name, timer->running_time, running_time, + cycles, start_count, timer->period); + DebugVCOUNT("%s : host start time at nsec 0x%llx, now 0x%llx\n", + timer->name, timer->host_start_ns, now_ns); + + running_cycles = running_time - timer->running_time; + if (running_cycles < 0) { + /* BUG(); probably it starts on or migrate to other VCPU/CPU */ + running_cycles = 0; + } + running_ns = cycles_2nsec(running_cycles); + host_ns = now_ns - timer->host_start_ns; + if (host_ns < 0) { + /* BUG(); probably it starts on or migrate to other CPU */ + host_ns = 0; + } + host_cycles = nsecs_2cycles(host_ns); + DebugVCOUNT("%s : current running cycles 0x%llx ns 0x%llx\n", + timer->name, running_cycles, running_ns); + DebugVCOUNT("%s : host running cycles 0x%llx ns 0x%llx\n", + timer->name, host_cycles, host_ns); + + lt_get_timer_limits(lt, timer, &limit64, &start64); + limit = limit64; + start = start64; + ASSERT(limit > start); + + if (timer->type == kvm_wd_timer_type) + prescaler = lt->regs.wd_prescaler.wd_c + 1; + else + prescaler = 1; + + counter = cycles_to_count(lt, running_cycles) / prescaler + start_count; + host_counter = cycles_to_count(lt, host_cycles) / prescaler + + start_count; + + if (host_counter > limit) { + miss_times = (host_counter - limit) / (limit - start); + new_count = mod_64(host_counter - limit, limit - start); + new_count += start; + } else { + miss_times = 0; + new_count = host_counter; + } + + /* update timer counter value */ + if (timer->type == kvm_sys_timer_type) { + lt->regs.counter.c = new_count; + } else if (timer->type == kvm_wd_timer_type) { + lt->regs.wd_counter.wd_c = new_count; + } else { + pr_err("%s(): %d is unsupported or invalid timer type\n", + __func__, timer->type); + } + timer->start_count = new_count; + timer->host_start_ns = now_ns; + timer->running_time = running_time; + + raw_spin_unlock_irqrestore(&timer->lock, flags); + + DebugCOUNT("%s : guest running cycles 0x%llx " + "counter 0x%llx : %lld%%\n", + timer->name, running_cycles, counter, + (counter * 100) / host_counter); + DebugCOUNT("%s : host running cycles 0x%llx counter 0x%llx\n", + timer->name, host_cycles, host_counter); + if (miss_times > 0) { + DebugHRTM("%s : host counter 0x%llx limit 0x%x start 0x%x " + "miss times %d : new counter 0x%x\n", + timer->name, host_counter, limit, start, miss_times, + new_count); + } else { + DebugHRTM("%s : host counter 0x%llx limit 0x%x start 0x%x " + "new counter 0x%x\n", + timer->name, host_counter, limit, start, + new_count); + } + + return host_counter; +} +static u64 kvm_get_up_to_date_count(struct kvm_vcpu *vcpu, struct kvm_lt *lt, + struct kvm_timer *timer) +{ + s64 host_ns, host_cycles; + u64 host_counter; + ktime_t now; + u64 now_ns; + u64 start, limit, new_count; + struct kvm_arch *ka = &vcpu->kvm->arch; +#if DEBUG_COUNT_MODE + s64 running_time, running_cycles, running_ns, cycles, counter; + int miss_times = 0; + unsigned long flags; + + ASSERT(timer != NULL); + ASSERT(vcpu != NULL); + raw_local_irq_save(flags); + running_time = kvm_do_get_guest_vcpu_running_time(vcpu); + cycles = get_cycles(); + now = ktime_get(); + raw_local_irq_restore(flags); +#else + now = ktime_get(); +#endif + now_ns = ktime_to_ns(now); + /* sh_sclkm3 - summary time when each vcpu of guest was out of cpu */ + if (!redpill) + now_ns -= ka->sh_sclkm3; + DebugCOUNT("%s : running cycles at start 0x%llx, now 0x%llx, " + "current cycles 0x%llx\n", + timer->name, timer->running_time, running_time, cycles); + DebugCOUNT("%s : host start time at nsec 0x%llx, now 0x%llx\n", + timer->name, timer->host_start_ns, now_ns); + + timer->vcpu = vcpu; +#if DEBUG_COUNT_MODE + running_cycles = running_time - timer->running_time; + ASSERT(running_cycles >= 0); + running_ns = cycles_2nsec(running_cycles); +#endif + host_ns = now_ns - timer->host_start_ns; + if (redpill) + ASSERT(host_ns >= 0); + host_cycles = nsecs_2cycles(host_ns); + DebugCOUNT("%s : current running cycles 0x%llx ns 0x%llx\n", + timer->name, running_cycles, running_ns); + DebugCOUNT("%s : host running cycles 0x%llx ns 0x%llx\n", + timer->name, host_cycles, host_ns); + + lt_get_timer_limits(lt, timer, &limit, &start); +#if DEBUG_COUNT_MODE + ASSERT(limit > start); + + counter = cycles_to_count(lt, running_cycles); +#endif + host_counter = cycles_to_count(lt, host_cycles); + DebugCOUNT("%s : host cycles 0x%llx counter 0x%llx\n", + timer->name, host_cycles, host_counter); + DebugCOUNT("%s : guest cycles 0x%llx counter 0x%llx : %lld%%\n", + timer->name, running_cycles, counter, + (counter * 100) / host_counter); + + if (host_counter > limit) { +#if DEBUG_COUNT_MODE + miss_times = (host_counter - limit) / ((limit + 1) - start); +#endif + new_count = mod_64(host_counter - limit, (limit + 1) - start); + } else { + new_count = host_counter; + } + + /* update counter value */ + if (timer->type == kvm_reset_timer_type) { + lt->regs.reset_counter_lo.rs_c = new_count & 0xffffffff; + lt->regs.reset_counter_hi.rs_c = (new_count >> 32) & 0xffffffff; + } else if (timer->type == kvm_power_timer_type) { + lt->regs.power_counter_lo.pw_c = new_count & 0xffffffff; + lt->regs.power_counter_hi.pw_c = (new_count >> 32) & 0xffffffff; + } else { + pr_err("%s(): %d is unsupported or invalid timer type\n", + __func__, timer->type); + } + +#if DEBUG_COUNT_MODE + if (miss_times > 0) { + DebugHRTM("%s : host counter 0x%llx limit 0x%llx start 0x%llx " + "miss times %d : new counter 0x%llx\n", + timer->name, host_counter, limit, start, miss_times, + new_count); + } else { + DebugVCOUNT("%s : host counter 0x%llx limit 0x%llx " + "start 0x%llx new counter 0x%llx\n", + timer->name, host_counter, limit, start, new_count); + } +#endif + + return host_counter; +} + +static inline bool lt_in_range(struct kvm_lt *lt, gpa_t addr) +{ + return ((addr >= lt->base_address && + (addr < lt->base_address + LT_MMIO_LENGTH))); +} +static inline u32 lt_get_reg(struct kvm_lt *lt, int reg_off) +{ + lt_reg_debug("%02x : %08x from %px\n", + reg_off, *((u32 *) ((void *)(<->regs) + reg_off)), + ((u32 *) ((void *)(<->regs) + reg_off))); + return *((u32 *) ((void *)(<->regs) + reg_off)); +} + +static inline void lt_set_reg(struct kvm_lt *lt, int reg_off, u32 val) +{ + *((u32 *) ((void *)(<->regs) + reg_off)) = val; + lt_reg_debug("%02x : %08x to %px\n", + reg_off, *((u32 *) ((void *)(<->regs) + reg_off)), + ((u32 *) ((void *)(<->regs) + reg_off))); +} + +static u32 update_counter_value(struct kvm_vcpu *vcpu, struct kvm_lt *lt) +{ + kvm_get_up_to_date_timer(vcpu, lt, <->sys_timer); + return lt->regs.counter.reg; +} + +static u32 update_wd_counter_value(struct kvm_vcpu *vcpu, struct kvm_lt *lt) +{ + u64 new_counter; + + new_counter = kvm_get_up_to_date_timer(vcpu, lt, <->wd_timer); + if (lt->regs.wd_limit.wd_l == 0) { + /* wd timer is not started */ + } else if (!lt->wd_timer.hrtimer_started) { + /* it need update event bit state */ + ASSERT(!(lt->regs.wd_control.w_out_e)); + + if (new_counter >= lt->regs.wd_limit.wd_l) { + lt->regs.wd_control.w_evn = 1; + } + } + return lt->regs.wd_counter.reg; +} + +static u64 update_reset_counter_value(struct kvm_vcpu *vcpu, struct kvm_lt *lt) +{ + u64 counter; + + counter = kvm_get_up_to_date_count(vcpu, lt, <->reset_count); + return counter; +} + +static u64 update_power_counter_value(struct kvm_vcpu *vcpu, struct kvm_lt *lt) +{ + u64 counter; + + counter = kvm_get_up_to_date_count(vcpu, lt, <->power_count); + return counter; +} + +static void start_lt_timer(struct kvm_vcpu *vcpu, struct kvm_lt *lt, + struct kvm_timer *lt_timer, + u32 start_count, u64 cycles_period, + bool start_hrtimer) +{ + ktime_t now; + u64 ns_period; + + ns_period = cycles_2nsec(cycles_period); + + if (ns_period == 0) { + lt_timer->period = 0; + return; + } + + /* + * Do not allow the guest to program periodic timers with small + * interval, since the hrtimers are not throttled by the host + * scheduler. + */ + if (ns_period < NSEC_PER_MSEC / 2) { + ns_period = NSEC_PER_MSEC / 2; + } + + ASSERT(!hrtimer_active(<_timer->timer)); + + lt_timer->vcpu = vcpu; + lt_timer->start_count = start_count; + lt_timer->period = ns_period; + now = lt_timer->timer.base->get_time(); + lt_timer->host_start_ns = ktime_to_ns(now); + lt_timer->running_time = + kvm_get_guest_vcpu_running_time(vcpu); + if (start_hrtimer) { + hrtimer_start(<_timer->timer, + ktime_add_ns(now, ns_period), + HRTIMER_MODE_ABS); + lt_timer->hrtimer_started = true; + } else { + lt_timer->hrtimer_started = false; + } + DebugSYSTM("%s hrtimer is %s at host ns 0x%llx start count 0x%x, " + "period 0x%llx\n", + lt_timer->name, + (lt_timer->hrtimer_started) ? "started" : "not started", + lt_timer->host_start_ns, start_count, ns_period); + DebugSYSTM("%s running time cycles 0x%llx\n", + lt_timer->name, lt_timer->running_time); + + DebugSYSTM("%s freq is %" PRId64 "Mhz, now 0x%016" PRIx64 ", " + "timer period cycles 0x%" PRIx64 ", nsec %lldns, " + "expire @ 0x%016" PRIx64 ".\n", + lt_timer->name, lt->frequency, ktime_to_ns(now), + cycles_period, lt_timer->period, + ktime_to_ns(ktime_add_ns(now, lt_timer->period))); +} + +static void restart_sys_timer(struct kvm_vcpu *vcpu, struct kvm_lt *lt, + u32 start_count) +{ + u64 limit; + u64 increments, cycles_increments; + + hrtimer_cancel(<->sys_timer.timer); + kthread_flush_work(<->sys_timer.expired); + DebugSYSTM("COUNTER hrtimer canceled at now 0x%llx\n", + ktime_to_ns(ktime_get())); + + lt->regs.counter.c = start_count; + + if (!lt->regs.counter_control.s_s) { + /* timer is not started */ + return; + } + + limit = (start_count <= lt->regs.counter_limit.c_l) ? + lt->regs.counter_limit.c_l : MAX_SYS_TIMER_COUNT; + increments = limit - start_count; + cycles_increments = count_to_cycles(lt, increments); + DebugSYSTM("COUNTER from 0x%x to limit 0x%llx, increments: 0x%llx " + "cycles 0x%llx\n", + lt->regs.counter.c, limit, increments, cycles_increments); + start_lt_timer(vcpu, lt, <->sys_timer, + start_count, cycles_increments, + true /* start hrtimer */); +} + +static void reset_sys_timer(struct kvm_vcpu *vcpu, struct kvm_lt *lt) +{ + lt->regs.counter.c = MIN_SYS_TIMER_COUNT; +} + +static void restart_wd_timer(struct kvm_vcpu *vcpu, struct kvm_lt *lt) +{ + u32 limit; + u64 increments, cycles_increments; + + hrtimer_cancel(<->wd_timer.timer); + kthread_flush_work(<->wd_timer.expired); + DebugSYSTM("COUNTER hrtimer canceled at now 0x%llx\n", + ktime_to_ns(ktime_get())); + if (lt->regs.wd_limit.wd_l == 0) { + /* wd timer is not started */ + DebugWD("WD_COUNTER is not started\n"); + return; + } + limit = lt->regs.wd_limit.wd_l; + ASSERT(lt->regs.wd_counter.wd_c == 0 && limit > 0); + increments = limit; + increments *= (lt->regs.wd_prescaler.wd_c + 1); + cycles_increments = count_to_cycles(lt, increments); + DebugWD("WD_COUNTER from 0x%x to limit 0x%x, increments: 0x%llx " + "* prescaler 0x%x cycles 0x%llx w_out_e %d\n", + lt->regs.wd_counter.wd_c, limit, increments, + lt->regs.wd_prescaler.wd_c + 1, cycles_increments, + lt->regs.wd_control.w_out_e); + start_lt_timer(vcpu, lt, <->wd_timer, 0, cycles_increments, + !!(lt->regs.wd_control.w_out_e)); +} + +static void generate_interrupt(struct kvm *kvm, lt_irq_map_t irq_id, + bool active) +{ + DebugIRQ("IRQ #%d level is %d\n", irq_id, active); + kvm_set_irq(kvm, irq_id, irq_id, active, false); +} + +static void generate_and_reset_interrupt(struct kvm *kvm, lt_irq_map_t irq_id) +{ + generate_interrupt(kvm, irq_id, true); + generate_interrupt(kvm, irq_id, false); +} + +static void generate_watchdog_reset(struct kvm_vcpu *vcpu, struct kvm_lt *lt) +{ + if (ENABLE_WATCHDOG_RESET) { + lt->regs.wd_control.w_evn = 0; + vcpu->arch.exit_shutdown_terminate = KVM_EXIT_E2K_RESTART; + DebugMMIOSHUTDOWN("%s(): rebooting guest\n", __func__); + } else { + generate_and_reset_interrupt(vcpu->kvm, lt->wd_timer_irq_id); + } +} + +static int lt_mmio_read_64(struct kvm_vcpu *vcpu, struct kvm_lt *lt, + unsigned int offset, void *data) +{ + u64 result; + const char *reg_name = "???"; + + mutex_lock(<->lock); + switch (offset) { + case RESET_COUNTER: + result = update_reset_counter_value(vcpu, lt); + reg_name = "Reset Counter"; + break; + case POWER_COUNTER: + result = update_power_counter_value(vcpu, lt); + reg_name = "Power Counter"; + break; + default: + pr_err("%s() : invalid system timer register offset 0x%x\n", + __func__, offset); + result = 0xffffffffffffffffULL; + break; + } + mutex_unlock(<->lock); + + *(u64 *)data = result; + + lt_reg_debug("%s data 0x%llx\n", reg_name, *(u64 *)data); + + return 0; +} + +static int lt_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, void *data) +{ + struct kvm_lt *lt = to_lt(this); + unsigned int offset = address - lt->base_address; + u32 result; + const char *reg_name = "???"; + + lt_reg_debug("address 0x%llx, offset %02x, len %d to %px\n", + address, offset, len, data); + + if (!lt_in_range(lt, address)) + return -EOPNOTSUPP; + + if (len == 8) { + /* 8 bytes access */ + return lt_mmio_read_64(vcpu, lt, offset, data); + } + + ASSERT(len == 4); /* 4 bytes access */ + + mutex_lock(<->lock); + switch (offset) { + case COUNTER_LIMIT: + result = lt->regs.counter_limit.reg; + if (lt->regs.counter_control.s_s || + !lt->regs.counter_control.inv_l) { + /* counter started or should not invert */ + lt->regs.counter_limit.l = 0; + lt->regs.counter_start.l = 0; + lt->regs.counter.l = 0; + } + reg_name = "Counter Limit"; + break; + case COUNTER_START_VALUE: + result = lt->regs.counter_start.reg; + reg_name = "Counter Start Value"; + break; + case COUNTER: + result = update_counter_value(vcpu, lt); + reg_name = "Counter"; + break; + case COUNTER_CONTROL: + result = lt->regs.counter_control.reg; + reg_name = "Counter Control"; + break; + case WD_COUNTER: + result = update_wd_counter_value(vcpu, lt); + reg_name = "WD Counter"; + wd_debug = true; + break; + case WD_PRESCALER: + result = lt->regs.wd_prescaler.reg; + reg_name = "WD Prescaler"; + wd_debug = true; + break; + case WD_LIMIT: + result = lt->regs.wd_limit.reg; + reg_name = "WD Limit"; + wd_debug = true; + break; + case WD_CONTROL: + update_wd_counter_value(vcpu, lt); + result = lt->regs.wd_control.reg; + reg_name = "WD Control"; + wd_debug = true; + break; + case RESET_COUNTER_L: { + u64 full_count; + full_count = update_reset_counter_value(vcpu, lt); + result = full_count & 0xffffffffUL; + lt->regs.latched_reset_counter = + (full_count >> 32) & 0xffffffffUL; + reg_name = "Reset Counter Lo"; + break; + } + case RESET_COUNTER_H: + result = lt->regs.latched_reset_counter; + reg_name = "Reset Counter Hi"; + break; + case POWER_COUNTER_L: { + u64 full_count; + full_count = update_power_counter_value(vcpu, lt); + result = full_count & 0xffffffffUL; + lt->regs.latched_power_counter = + (full_count >> 32) & 0xffffffffUL; + reg_name = "Power Counter Lo"; + break; + } + case POWER_COUNTER_H: + result = lt->regs.latched_power_counter; + reg_name = "Power Counter Hi"; + break; + default: + pr_err("%s() : invalid system timer register offset 0x%x\n", + __func__, offset); + result = 0xffffffff; + break; + } + mutex_unlock(<->lock); + + *(u32 *)data = result; + + lt_reg_debug("%s data 0x%x\n", reg_name, *(u32 *)data); + + return 0; +} + +static int lt_mmio_write_64(struct kvm_vcpu *vcpu, struct kvm_lt *lt, + unsigned int offset, const void *data) +{ + const char *reg_name = "???"; + + mutex_lock(<->lock); + switch (offset) { + case RESET_COUNTER: + /* nothing effect */ + reg_name = "Reset Counter"; + break; + case POWER_COUNTER: + /* nothing effect */ + reg_name = "Power Counter"; + break; + default: + pr_err("%s() : invalid system timer register offset 0x%x\n", + __func__, offset); + break; + } + mutex_unlock(<->lock); + + lt_reg_debug("%s data is not changed 0x%llx\n", reg_name, *(u64 *)data); + + return 0; +} + +static int lt_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, const void *data) +{ + struct kvm_lt *lt = to_lt(this); + unsigned int offset = address - lt->base_address; + u32 val; + const char *reg_name = "???"; + + lt_reg_debug("address 0x%llx, offset %02x, len %d from %px\n", + address, offset, len, data); + + if (!lt_in_range(lt, address)) + return -EOPNOTSUPP; + + if (len == 8) { + /* 8 bytes access */ + return lt_mmio_write_64(vcpu, lt, offset, data); + } + + ASSERT(len == 4); /* 4 bytes access */ + + val = *(u32 *)data; + + mutex_lock(<->lock); + switch (offset) { + case COUNTER_LIMIT: { + counter_limit_t limit; + + limit.reg = val; + lt->regs.counter_limit.c_l = limit.c_l; + if (lt->regs.counter_control.s_s || + !lt->regs.counter_control.inv_l) { + /* counter started or should not invert */ + lt->regs.counter_limit.l = 0; + lt->regs.counter_start.l = 0; + lt->regs.counter.l = 0; + } + if (lt->regs.counter_control.s_s) { + restart_sys_timer(vcpu, lt, MIN_SYS_TIMER_COUNT); + } + reg_name = "Counter Limit"; + break; + } + case COUNTER_START_VALUE: { + counter_start_t start; + + start.reg = val; + lt->regs.counter_start.c_st_v = start.c_st_v; + if (!lt->regs.counter_control.s_s) { + restart_sys_timer(vcpu, lt, start.c_st_v); + } + reg_name = "Counter Start Value"; + break; + } + case COUNTER: + pr_err("%s(): register Counter cannot be written\n", __func__); + reg_name = "Counter"; + break; + case COUNTER_CONTROL: { + counter_control_t control; + u32 start; + bool already_started; + + already_started = !!lt->regs.counter_control.s_s; + control.reg = val; + lt->regs.counter_control.s_s = control.s_s; + lt->regs.counter_control.inv_l = control.inv_l; + lt->regs.counter_control.l_ini = control.l_ini; + if (!already_started && control.s_s) { + if (control.inv_l) { + lt->regs.counter_limit.l = control.l_ini; + lt->regs.counter_start.l = control.l_ini; + lt->regs.counter.l = control.l_ini; + } + } + if (already_started ^ control.s_s) { + start = lt->regs.counter_start.c_st_v; + } else { + start = MIN_SYS_TIMER_COUNT; + } + restart_sys_timer(vcpu, lt, start); + if (lt->regs.counter.l) { + generate_and_reset_interrupt(vcpu->kvm, + lt->sys_timer_irq_id); + } + reg_name = "Counter Control"; + break; + } + case WD_COUNTER: + lt->regs.wd_counter.wd_c = 0; /* counter reset */ + wd_debug = true; + restart_wd_timer(vcpu, lt); + reg_name = "WD Counter"; + break; + case WD_PRESCALER: + lt->regs.wd_counter.wd_c = 0; /* counter reset */ + lt->regs.wd_prescaler.reg = + (val + 1) * vcpu->kvm->arch.wd_prescaler_mult - 1; + wd_debug = true; + restart_wd_timer(vcpu, lt); + reg_name = "WD Prescaler"; + break; + case WD_LIMIT: + lt->regs.wd_counter.wd_c = 0; /* counter reset */ + lt->regs.wd_limit.reg = val; + wd_debug = true; + restart_wd_timer(vcpu, lt); + reg_name = "WD Limit"; + break; + case WD_CONTROL: { + wd_control_t control; + bool was_oe = lt->regs.wd_control.w_out_e; + + control.reg = val; + control.unused = 0; + update_wd_counter_value(vcpu, lt); + lt->regs.wd_control.w_out_e = control.w_out_e; + lt->regs.wd_control.w_m = control.w_m; + if (control.w_evn) { + /* reset bit by writing 1 to bit */ + lt->regs.wd_control.w_evn = 0; + } + + wd_debug = true; + + /* Check if timer already fired and event is waiting */ + if (!was_oe && control.w_out_e && lt->regs.wd_control.w_evn) { + DebugWD("w_evn is set when enabling watchdog timer\n"); + if (!control.w_m) { + generate_watchdog_reset(vcpu, lt); + } else { + generate_and_reset_interrupt(vcpu->kvm, lt->wd_timer_irq_id); + } + } + + /* Start/stop the timer after enabling/disabling w_out_e */ + if (was_oe != control.w_out_e) { + DebugWD("%s watchdog timer\n", control.w_out_e ? + "Enabling" : "Disabling"); + lt->regs.wd_counter.wd_c = 0; + restart_wd_timer(vcpu, lt); + } + reg_name = "WD Control"; + break; + } + case RESET_COUNTER_L: + /* nothing effect */ + reg_name = "Reset Counter Lo"; + break; + case RESET_COUNTER_H: + /* nothing effect */ + reg_name = "Reset Counter Hi"; + break; + case POWER_COUNTER_L: + /* nothing effect */ + reg_name = "Power Counter Lo"; + break; + case POWER_COUNTER_H: + /* nothing effect */ + reg_name = "Power Counter Hi"; + break; + default: + pr_err("%s() : invalid system timer register offset 0x%x\n", + __func__, offset); + break; + } + mutex_unlock(<->lock); + + lt_reg_debug("%s data 0x%x\n", reg_name, *(u32 *)data); + if (wd_debug) { + DebugWD("%s data 0x%x\n", reg_name, *(u32 *)data); + if (offset == WD_CONTROL) { + DebugWD("%s new state 0x%x\n", + reg_name, lt->regs.wd_control.reg); + } + wd_debug = false; + } + + return 0; +} + +static void lt_sys_timer_do_work(struct kthread_work *work) +{ + struct kvm_timer *timer = container_of(work, struct kvm_timer, expired); + struct kvm *kvm = timer->kvm; + struct kvm_lt *lt = sys_timer_to_lt(timer); + + if (timer->work == kvm_set_reset_irq_timer_work) { + generate_and_reset_interrupt(kvm, lt->sys_timer_irq_id); + } else { + pr_err("%s(): %d is unknown or unsupported timer " + "expires work\n", + __func__, timer->work); + } +} + +static void lt_wd_timer_do_work(struct kthread_work *work) +{ + struct kvm_timer *timer = container_of(work, struct kvm_timer, expired); + struct kvm *kvm = timer->kvm; + struct kvm_lt *lt = wd_timer_to_lt(timer); + + if (timer->work == kvm_set_reset_irq_timer_work) { + generate_and_reset_interrupt(kvm, lt->wd_timer_irq_id); + } else if (timer->work == kvm_watchdog_reset_timer_work) { + generate_watchdog_reset(timer->vcpu, lt); + } else { + pr_err("%s(): %d is unknown or unsupported timer " + "expires work\n", + __func__, timer->work); + } +} + +static void do_lt_sys_timer(struct kvm_vcpu *vcpu, void *data) +{ + struct kvm_lt *lt = data; + u64 counter; + + ASSERT(lt->regs.counter_control.s_s); + + counter = kvm_get_up_to_date_timer(vcpu, lt, <->sys_timer); + + if (lt->regs.counter_limit.c_l != 0) { + if (counter < lt->regs.counter_limit.c_l) { + DebugHRTM("counter 0x%llx did not reach 0x%x " + "limit value", + counter, lt->regs.counter_limit.c_l); + } + } else { + if (counter < MAX_SYS_TIMER_COUNT) { + DebugHRTM("counter 0x%llx did not reach 0x%x max value", + counter, MAX_SYS_TIMER_COUNT); + } + } + + if (lt->regs.counter_control.inv_l) { + lt->regs.counter_limit.l = !(lt->regs.counter_limit.l); + lt->regs.counter_start.l = !(lt->regs.counter_start.l); + lt->regs.counter.l = !(lt->regs.counter.l); + } else { + lt->regs.counter_limit.l = 1; + lt->regs.counter_start.l = 1; + lt->regs.counter.l = 1; + } + + reset_sys_timer(vcpu, lt); + + if (lt->regs.counter.l) { + lt->sys_timer.work = kvm_set_reset_irq_timer_work; + kthread_queue_work(lt->sys_timer.worker, + <->sys_timer.expired); + } +} + +static void do_lt_wd_timer(struct kvm_vcpu *vcpu, void *data) +{ + struct kvm_lt *lt = data; + bool old_wd_event = lt->regs.wd_control.w_evn; + u64 counter; + + counter = kvm_get_up_to_date_timer(vcpu, lt, <->wd_timer); + + if (lt->regs.wd_limit.wd_l != 0) { + if (counter < lt->regs.wd_limit.wd_l) { + DebugHRTM("wd counter 0x%llx did not reach 0x%x " + "limit value", + counter, lt->regs.wd_limit.wd_l); + } + } + + lt->regs.wd_counter.wd_c = 0; /* counter reset */ + + lt->regs.wd_control.w_evn = 1; + if (lt->regs.wd_control.w_out_e) { + if (!lt->regs.wd_control.w_m || old_wd_event) { + lt->wd_timer.work = kvm_watchdog_reset_timer_work; + } else { + lt->wd_timer.work = kvm_set_reset_irq_timer_work; + } + kthread_queue_work(lt->wd_timer.worker, + <->wd_timer.expired); + } +} + +enum hrtimer_restart lt_timer_fn(struct kvm_lt *lt, struct kvm_timer *ktimer) +{ + struct kvm_vcpu *vcpu; + s64 period = ktimer->period; + + vcpu = ktimer->vcpu; + if (!vcpu) + return HRTIMER_NORESTART; + + DebugSYSTM("%s started on VCPU #%d\n", ktimer->name, vcpu->vcpu_id); + + ktimer->t_ops->timer_fn(vcpu, lt); + + if (ktimer->t_ops->is_periodic(ktimer)) { + hrtimer_add_expires_ns(&ktimer->timer, period); + DebugSYSTM("%s periodic timer restarted " + "at host ns 0x%llx expires at 0x%llx\n", + ktimer->name, ktimer->host_start_ns, + hrtimer_get_expires_ns(&ktimer->timer)); + return HRTIMER_RESTART; + } + DebugSYSTM("%s handled\n", ktimer->name); + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart lt_sys_timer_fn(struct hrtimer *data) +{ + struct kvm_lt *lt; + struct kvm_timer *sys_timer; + + sys_timer = container_of(data, struct kvm_timer, timer); + lt = sys_timer_to_lt(sys_timer); + return lt_timer_fn(lt, sys_timer); +} + +static enum hrtimer_restart lt_wd_timer_fn(struct hrtimer *data) +{ + struct kvm_lt *lt; + struct kvm_timer *wd_timer; + + wd_timer = container_of(data, struct kvm_timer, timer); + lt = wd_timer_to_lt(wd_timer); + return lt_timer_fn(lt, wd_timer); +} + +static bool lt_is_periodic(struct kvm_timer *ktimer) +{ + return true; /* sys timer and dw timer are periodic */ +} + +void kvm_lt_reset(struct kvm_lt *lt) +{ + + /* Stop the timer in case it's a reset to an active state */ + hrtimer_cancel(<->sys_timer.timer); + kthread_flush_work(<->sys_timer.expired); + hrtimer_cancel(<->wd_timer.timer); + kthread_flush_work(<->wd_timer.expired); + + lt->base_address = 0; + lt->sys_timer_irq_id = SYS_TIMER_IRQ_ID; + lt->wd_timer_irq_id = WD_TIMER_IRQ_ID; + + /* registers state on reset */ + lt_set_reg(lt, COUNTER_CONTROL, 0); + lt_set_reg(lt, WD_COUNTER, 0); + lt_set_reg(lt, WD_PRESCALER, 0x00001000); + lt_set_reg(lt, WD_LIMIT, 0x00002fb2); + lt_set_reg(lt, WD_CONTROL, 0); + lt_set_reg(lt, RESET_COUNTER_L, 0); + lt_set_reg(lt, RESET_COUNTER_H, 0); + lt_set_reg(lt, POWER_COUNTER_L, 0); + lt_set_reg(lt, POWER_COUNTER_H, 0); +} + +static const struct kvm_io_device_ops lt_mmio_ops = { + .read = lt_mmio_read, + .write = lt_mmio_write, +}; + +static const struct kvm_timer_ops lt_sys_timer_ops = { + .is_periodic = lt_is_periodic, + .timer_fn = do_lt_sys_timer, +}; + +static const struct kvm_timer_ops lt_wd_timer_ops = { + .is_periodic = lt_is_periodic, + .timer_fn = do_lt_wd_timer, +}; + +struct kvm_lt *kvm_create_lt(struct kvm *kvm, int node_id, u32 ticks_per_sec, + u32 sys_timer_freq) +{ + struct kvm_lt *lt; + pid_t pid_nr; + + ASSERT(kvm_get_lt(kvm, node_id) == NULL); + + lt = kzalloc(sizeof(struct kvm_lt), GFP_KERNEL); + if (!lt) + return NULL; + + mutex_init(<->lock); + + lt->kvm = kvm; + lt->ticks_per_sec = ticks_per_sec; + lt->frequency = sys_timer_freq; + + pid_nr = task_pid_nr(current); + + hrtimer_init(<->sys_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + lt->sys_timer.timer.function = lt_sys_timer_fn; + lt->sys_timer.name = "sys timer"; + lt->sys_timer.type = kvm_sys_timer_type; + lt->sys_timer.t_ops = <_sys_timer_ops; + raw_spin_lock_init(<->sys_timer.lock); + lt->sys_timer.worker = kthread_create_worker(0, "kvm-sys-timer/%d/%d", + pid_nr, node_id); + if (IS_ERR(lt->sys_timer.worker)) + goto fail_sys_timer; + kthread_init_work(<->sys_timer.expired, lt_sys_timer_do_work); + lt->sys_timer.kvm = kvm; + + hrtimer_init(<->wd_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + lt->wd_timer.timer.function = lt_wd_timer_fn; + lt->wd_timer.name = "wd timer"; + lt->wd_timer.type = kvm_wd_timer_type; + lt->wd_timer.t_ops = <_wd_timer_ops; + raw_spin_lock_init(<->wd_timer.lock); + lt->wd_timer.worker = kthread_create_worker(0, "kvm-wd-timer/%d/%d", + pid_nr, node_id); + if (IS_ERR(lt->wd_timer.worker)) + goto fail_wd_timer; + kthread_init_work(<->wd_timer.expired, lt_wd_timer_do_work); + lt->wd_timer.kvm = kvm; + + lt->reset_count.name = "reset counter"; + lt->reset_count.type = kvm_reset_timer_type; + lt->reset_count.running_time = 0; /* from reset */ + + lt->power_count.name = "power counter"; + lt->power_count.type = kvm_power_timer_type; + lt->power_count.running_time = 0; /* from power */ + + kvm_lt_reset(lt); + + kvm_set_lt(kvm, node_id, lt); + + return lt; + +fail_wd_timer: + kthread_destroy_worker(lt->sys_timer.worker); +fail_sys_timer: + kfree(lt); + return NULL; +} + +int kvm_lt_set_base(struct kvm *kvm, int node_id, unsigned long new_base) +{ + struct kvm_lt *lt = kvm_get_lt(kvm, node_id); + int ret; + u32 lt_freq = 10000000; + + if (is_prototype()) + lt_freq = 500000; + + if (lt == NULL) { + kvm_create_lt(kvm, node_id, + ((cpu_freq_hz + (USEC_PER_SEC - 1)) / USEC_PER_SEC) * + USEC_PER_SEC, + lt_freq /* now fixed, but is better to pass */ + /* qemu as machine parameter and */ + /* repass from qemu to KVM through ioctl() */); + lt = kvm_get_lt(kvm, node_id); + if (lt == NULL) { + pr_err("%s(): sys timer node #%d is not yet created, " + "ignore setup\n", + __func__, node_id); + return -ENODEV; + } + } + if (lt->base_address == new_base) { + pr_info("%s(): sys timer node #%d base 0x%lx is the same, " + "so ignore update\n", + __func__, node_id, new_base); + return 0; + } + + mutex_lock(&kvm->slots_lock); + if (lt->base_address != 0) { + /* base address was already set, so update */ + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, <->dev); + } + lt->base_address = new_base; + kvm_iodevice_init(<->dev, <_mmio_ops); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, new_base, + LT_MMIO_LENGTH, <->dev); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kvm_set_lt(kvm, node_id, NULL); + kfree(lt); + pr_err("%s(): could not register sys timer node #%d as MMIO " + "bus device, error %d\n", + __func__, node_id, ret); + } + + return ret; +} + +void kvm_free_lt(struct kvm *kvm, int node_id) +{ + struct kvm_lt *lt = kvm_get_lt(kvm, node_id); + + if (lt) { + if (lt->base_address != 0) { + /* mutex_lock(&kvm->slots_lock); */ + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, <->dev); + lt->base_address = 0; + /* mutex_unlock(&kvm->slots_lock); */ + } + hrtimer_cancel(<->sys_timer.timer); + kthread_flush_work(<->sys_timer.expired); + kthread_destroy_worker(lt->sys_timer.worker); + hrtimer_cancel(<->wd_timer.timer); + kthread_flush_work(<->wd_timer.expired); + kthread_destroy_worker(lt->wd_timer.worker); + kfree(lt); + kvm_set_lt(kvm, node_id, NULL); + } +} +void kvm_free_all_lt(struct kvm *kvm) +{ + int node_id; + + for (node_id = 0; node_id < KVM_MAX_EIOHUB_NUM; node_id++) { + kvm_free_lt(kvm, node_id); + } +} diff --git a/arch/e2k/kvm/lt.h b/arch/e2k/kvm/lt.h new file mode 100644 index 000000000000..6be33b87ef4c --- /dev/null +++ b/arch/e2k/kvm/lt.h @@ -0,0 +1,85 @@ +#ifndef __KVM_L_TIMER_H +#define __KVM_L_TIMER_H + +#include +#include +#include +#include "lt_regs.h" +#include "kvm_timer.h" + +#define DEBUG_LT +#undef ASSERT +#ifdef DEBUG_LT +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + pr_emerg("assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else /* ! DEBUG_LT */ +#define ASSERT(x) do { } while (0) +#endif /* DEBUG_LT */ + +typedef struct kvm_lt_regs { + counter_limit_t counter_limit; /* timer counter limit value */ + counter_start_t counter_start; /* start value of counter */ + counter_t counter; /* timer counter */ + counter_control_t counter_control; /* timer control register */ + wd_counter_l_t wd_counter; /* watchdog counter */ + wd_counter_h_t wd_prescaler; /* watchdog prescaler */ + wd_limit_t wd_limit; /* watchdog limit */ + power_counter_l_t power_counter_lo; /* power counter low bits */ + power_counter_h_t power_counter_hi; /* power counter high bits */ + wd_control_t wd_control; /* watchdog control register */ + reset_counter_l_t reset_counter_lo; /* reset counter low bits */ + reset_counter_h_t reset_counter_hi; /* reset counter low bits */ + + u32 latched_reset_counter; /* latched high part of reset counter */ + u32 latched_power_counter; /* latched high part of power counter */ +} kvm_lt_regs_t; + +typedef struct kvm_lt { + u64 base_address; + kvm_lt_regs_t regs; + struct kvm_timer sys_timer; + struct kvm_timer wd_timer; + struct kvm_timer reset_count; + struct kvm_timer power_count; + int sys_timer_irq_id; + int wd_timer_irq_id; + u32 ticks_per_sec; /* cycles (ticks) per 1 sec */ + u32 frequency; /* frequency of counter increment (Hz) */ + /* standard frequency of system timer */ + /* is 10 Mhz */ + struct kvm_io_device dev; + struct kvm *kvm; + struct mutex lock; +} kvm_lt_t; + +static inline struct kvm_lt *kvm_get_lt(struct kvm *kvm, int node_id) +{ + ASSERT(node_id < KVM_MAX_EIOHUB_NUM); + return kvm->arch.lt[node_id]; +} + +static inline void kvm_set_lt(struct kvm *kvm, int node_id, struct kvm_lt *lt) +{ + ASSERT(node_id < KVM_MAX_EIOHUB_NUM); + kvm->arch.lt[node_id] = lt; +} + +static inline bool kvm_lt_in_kernel(struct kvm *kvm, int node_id) +{ + return kvm_get_lt(kvm, node_id) != NULL; +} +extern int kvm_lt_set_base(struct kvm *kvm, int node_id, + unsigned long new_base); + +extern struct kvm_lt *kvm_create_lt(struct kvm *kvm, int node_id, + u32 ticks_per_sec, u32 sys_timer_freq); +extern void kvm_free_lt(struct kvm *kvm, int node_id); +extern void kvm_free_all_lt(struct kvm *kvm); + +#endif /* __KVM_L_TIMER_H */ diff --git a/arch/e2k/kvm/lt_regs.h b/arch/e2k/kvm/lt_regs.h new file mode 100644 index 000000000000..da9bdc80bfd2 --- /dev/null +++ b/arch/e2k/kvm/lt_regs.h @@ -0,0 +1,134 @@ +#ifndef _KVM_L_TIMER_REGS_H +#define _KVM_L_TIMER_REGS_H + +#include + +/* + * Elbrus System timer Registers + */ + +#define COUNTER_LIMIT 0x00 +typedef union counter_limit { + u32 reg; + struct { /* as fields */ + u32 unused : 9; /* [8:0] */ + u32 c_l : 22; /* [30:9] */ + u32 l : 1; /* [31] */ + }; +} counter_limit_t; + +#define COUNTER_START_VALUE 0x04 +typedef union counter_start { + u32 reg; + struct { /* as fields */ + u32 unused : 9; /* [8:0] */ + u32 c_st_v : 22; /* [30:9] */ + u32 l : 1; /* [31] */ + }; +} counter_start_t; + +#define COUNTER 0x08 +typedef union counter { + u32 reg; + struct { /* as fields */ + u32 unused : 9; /* [8:0] */ + u32 c : 22; /* [30:9] */ + u32 l : 1; /* [31] */ + }; +} counter_t; +#define MAX_SYS_TIMER_COUNT 0x3fffff /* [30: 9] : 22 bits */ +#define MIN_SYS_TIMER_COUNT 0x000001 + +#define COUNTER_CONTROL 0x0c +typedef union counter_control { + u32 reg; + struct { /* as fields */ + u32 s_s : 1; /* [0] */ + u32 inv_l : 1; /* [1] */ + u32 l_ini : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ + }; +} counter_control_t; + +#define WD_COUNTER 0x10 +typedef union wd_counter_l { + u32 reg; + struct { /* as fields */ + u32 wd_c : 32; /* [31:0] */ + }; +} wd_counter_l_t; + +#define WD_PRESCALER 0x14 +typedef union wd_counter_h { + u32 reg; + struct { /* as fields */ + u32 wd_c : 32; /* [31:0] */ + }; +} wd_counter_h_t; + +#define WD_LIMIT 0x18 +typedef union wd_limit { + u32 reg; + struct { /* as fields */ + u32 wd_l : 32; /* [31:0] */ + }; +} wd_limit_t; + +#undef WD_CONTROL +#define WD_CONTROL 0x1c +typedef union wd_control { + u32 reg; + struct { /* as fields */ + u32 w_m : 1; /* [0] */ + u32 w_out_e : 1; /* [1] */ + u32 w_evn : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ + }; +} wd_control_t; + +#undef RESET_COUNTER_L +#define RESET_COUNTER_L 0x20 +typedef union reset_counter_l { + u32 reg; + struct { /* as fields */ + u32 rs_c : 32; /* [31:0] */ + }; +} reset_counter_l_t; + +#undef RESET_COUNTER_H +#define RESET_COUNTER_H 0x24 +typedef union reset_counter_h { + u32 reg; + struct { /* as fields */ + u32 rs_c : 32; /* [31:0] */ + }; +} reset_counter_h_t; +#define RESET_COUNTER RESET_COUNTER_L + +#undef POWER_COUNTER_L +#define POWER_COUNTER_L 0x28 +typedef union power_counter_l { + u32 reg; + struct { /* as fields */ + u32 pw_c : 32; /* [31:0] */ + }; +} power_counter_l_t; + +#undef POWER_COUNTER_H +#define POWER_COUNTER_H 0x2c +typedef union power_counter_h { + u32 reg; + struct { /* as fields */ + u32 pw_c : 32; /* [31:0] */ + }; +} power_counter_h_t; +#define POWER_COUNTER POWER_COUNTER_L + +#define LT_MMIO_LENGTH (POWER_COUNTER_H + 4) + +typedef enum lt_irq_map { + SYS_TIMER_IRQ_ID = 2, + WD_TIMER_IRQ_ID = 20, +} lt_irq_map_t; + +#endif /* _KVM_L_TIMER_REGS_H */ diff --git a/arch/e2k/kvm/mm.c b/arch/e2k/kvm/mm.c new file mode 100644 index 000000000000..a94218964106 --- /dev/null +++ b/arch/e2k/kvm/mm.c @@ -0,0 +1,526 @@ + +/* + * VCPU guest MM virtualization + * + * Based on x86 code, Copyright (c) 2004, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include "mmu.h" +#include "process.h" +#include "mman.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GMM_MODE +#undef DebugGMM +#define DEBUG_KVM_GMM_MODE 0 /* guest mm freeing */ + /* debugging */ +#define DebugGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_GMM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GMM_FREE_MODE +#undef DebugKVMF +#define DEBUG_KVM_GMM_FREE_MODE 0 /* guest mm freeing */ + /* debugging */ +#define DebugKVMF(fmt, args...) \ +({ \ + if (DEBUG_KVM_GMM_FREE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_FREE_GMM_SP_MODE +#undef DebugFGMM +#define DEBUG_KVM_FREE_GMM_SP_MODE 0 /* guest mm SPs freeing */ + /* debug */ +#define DebugFGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_FREE_GMM_SP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* FIXME: the function is not yet called (see details below) +static void gmm_delete(struct kvm *kvm, gmm_struct_t *gmm); + */ + +/* + * Initialize a new mmu context for guest process. + * FIXME: is not yet implemented + */ +static inline int +init_new_gmm_context(struct kvm *kvm, gmm_struct_t *gmm) +{ + DebugKVM("started for gmm #%d\n", gmm->nid.nr); + kvm_init_new_context(kvm, gmm); + return 0; +} + +static void destroy_gmm_u_context(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + gmm_struct_t *init_gmm = pv_vcpu_get_init_gmm(vcpu); + + /* + * update gmm to emulate same as init gmm state before the gmm + * will be deleted + */ + + gmm->root_hpa = init_gmm->root_hpa; + gmm->root_gpa = init_gmm->root_gpa; + + /* FIXME: the followimg values is not used, probably may be deleted + gmm->u_pptb = init_gmm->u_pptb; + gmm->os_pptb = init_gmm->os_pptb; + */ + + KVM_BUG_ON(gmm == pv_vcpu_get_gmm(vcpu)); + if (gmm == pv_vcpu_get_active_gmm(vcpu)) { + pv_vcpu_switch_to_init_spt(vcpu, gmm); + } else if (atomic_read(&gmm->mm_count) == 1) { + /* gmm has nothing users, so release it */ + kvm_free_gmm(vcpu->kvm, gmm); + } +} + +/* + * Allocate and initialize host agent of guest mm + */ + +static inline void gmm_init(gmm_struct_t *gmm) +{ + atomic_set(&gmm->mm_count, 1); + spin_lock_init(&gmm->page_table_lock); + gmm->root_hpa = E2K_INVALID_PAGE; +#ifdef CONFIG_GUEST_MM_SPT_LIST + INIT_LIST_HEAD(&gmm->spt_list); + spin_lock_init(&gmm->spt_list_lock); + gmm->spt_list_size = 0; + gmm->total_released = 0; +#endif /* CONFIG_GUEST_MM_SPT_LIST */ +} + +static inline gmm_struct_t *do_alloc_gmm(gmmid_table_t *gmmid_table) +{ + gmm_struct_t *gmm; + int nr; + + gmm = kmem_cache_alloc(gmmid_table->nid_cachep, GFP_KERNEL); + if (!gmm) { + DebugKVM("could not allocate guest mm structure\n"); + return NULL; + } + memset(gmm, 0, sizeof(*gmm)); + + nr = kvm_alloc_nid(gmmid_table, &gmm->nid); + if (nr < 0) { + DebugKVM("could not allocate NID for mm structure\n"); + goto out_free; + } + + gmm_init(gmm); + DebugKVM("allocated guest mm structure #%d at %px\n", + gmm->nid.nr, gmm); + +out: + return gmm; + +out_free: + kmem_cache_free(gmmid_table->nid_cachep, gmm); + gmm = NULL; + goto out; +} + +static inline gmm_struct_t *allocate_gmm(struct kvm *kvm) +{ + return do_alloc_gmm(&kvm->arch.gmmid_table); +} + +static inline void do_drop_gmm(gmm_struct_t *gmm, gmmid_table_t *gmmid_table) +{ + DebugKVMF("started for guest mm #%d\n", gmm->nid.nr); + kmem_cache_free(gmmid_table->nid_cachep, gmm); +} + +void do_free_gmm(struct kvm *kvm, gmm_struct_t *gmm, gmmid_table_t *gmmid_table) +{ + DebugKVMF("started for guest mm #%d\n", gmm->nid.nr); + + if (!kvm_is_empty_gmm_spt_list(gmm)) { + pr_err("%s(): gmm #%d SP list is not empty, force release\n", + __func__, gmm->nid.nr); + kvm_delete_gmm_sp_list(kvm, gmm); + } + + kvm_do_free_nid(&gmm->nid, gmmid_table); + do_drop_gmm(gmm, gmmid_table); +} + +static gmm_struct_t *alloc_gmm(struct kvm *kvm) +{ + gmm_struct_t *new_gmm; + int ret; + + new_gmm = allocate_gmm(kvm); + if (new_gmm == NULL) { + DebugKVM("could not allocate agent of guest mm structure\n"); + return NULL; + } + + ret = init_new_gmm_context(kvm, new_gmm); + if (unlikely(ret != 0)) { + DebugKVM("could not init MMU context for guest mm\n"); + goto out_free_gmm; + } + return new_gmm; + +out_free_gmm: + free_gmm(kvm, new_gmm); + return NULL; +} + +gmm_struct_t *create_gmm(struct kvm *kvm) +{ + gmm_struct_t *new_gmm; + + DebugKVM("started\n"); + + new_gmm = alloc_gmm(kvm); + if (new_gmm == NULL) { + DebugKVM("could not allocate agent of guest mm structure\n"); + return NULL; + } + + kvm_init_gmm_root_pt(kvm, new_gmm); + + DebugKVM("created host agent #%d of guest mm structure\n", + new_gmm->nid.nr); + return new_gmm; +} + +/* + * Called when the last reference to the guest mm agent is dropped. + * Free the page directory and the guest mm structure. + */ +static void do_gmm_drop(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + DebugGMM("started on for guest MM #%d at %px users %d\n", + gmm->nid.nr, gmm, atomic_read(&gmm->mm_count)); + + if (atomic_read(&gmm->mm_count) > 2) { + pr_err("%s(): GMM #%d user's counter is %d not empty\n", + __func__, gmm->nid.nr, atomic_read(&gmm->mm_count)); + } + release_gmm_root_pt(vcpu, gmm); +} +void gmm_drop(struct kvm *kvm, gmm_struct_t *gmm) +{ + unsigned long flags; + + gmmid_table_lock_irqsave(&kvm->arch.gmmid_table, flags); + do_gmm_drop(current_thread_info()->vcpu, gmm); + gmmid_table_unlock_irqrestore(&kvm->arch.gmmid_table, flags); +} + +int kvm_guest_mm_drop(struct kvm_vcpu *vcpu, int gmmid_nr) +{ + gmm_struct_t *active_gmm = pv_vcpu_get_active_gmm(vcpu); + gthread_info_t *cur_gti = pv_vcpu_get_gti(vcpu); + gmm_struct_t *gmm; + + DebugGMM("started for host agent #%d of guest mm\n", gmmid_nr); + gmm = kvm_find_gmmid(&vcpu->kvm->arch.gmmid_table, gmmid_nr); + if (gmm == NULL) { + pr_err("%s(): could not find gmm host agent GMMID #%d\n", + __func__, gmmid_nr); + return -ENODEV; + } + BUG_ON(gmm == NULL); + DebugGMM("host gmm #%d at %px users %d\n", + gmmid_nr, gmm, atomic_read(&gmm->mm_count)); + + DebugFGMM("gmm #%d before release has 0x%lx SPs\n", + gmm->nid.nr, kvm_get_gmm_spt_list_size(gmm)); + + if (active_gmm == gmm) { + DebugGMM("gmm #%d can be now as active, so deactivate it\n", + gmm->nid.nr); + KVM_BUG_ON(atomic_read(&gmm->mm_count) < 2); + kvm_mmu_unload_gmm_root(vcpu); + } else if (cur_gti && cur_gti->gmm) { + KVM_BUG_ON(cur_gti->gmm == gmm && + !pv_vcpu_is_init_gmm(vcpu, active_gmm)); + } + + gmm_drop(vcpu->kvm, gmm); + + DebugFGMM("gmm #%d after release has 0x%lx SPs, total released 0x%lx\n", + gmm->nid.nr, kvm_get_gmm_spt_list_size(gmm), + kvm_get_gmm_spt_total_released(gmm)); + + KVM_BUG_ON(!kvm_is_empty_gmm_spt_list(gmm)); + + destroy_gmm_u_context(vcpu, gmm); + + return 0; +} + +static inline void force_drop_gmm(struct kvm *kvm, gmm_struct_t *gmm) +{ + if (atomic_read(&gmm->mm_count) != 0) { + if (gmm != pv_mmu_get_init_gmm(kvm)) { + pr_err("%s(): gmm GMMID #%d usage counter is %d, " + "should be 0\n", + __func__, gmm->nid.nr, + atomic_read(&gmm->mm_count)); + } + atomic_set(&gmm->mm_count, 0); + } + do_gmm_drop(current_thread_info()->vcpu, gmm); + free_gmm(kvm, gmm); +} + +static void force_exit_gmm(struct kvm *kvm, gthread_info_t *gthread_info) +{ + gmm_struct_t *gmm = gthread_info->gmm; + + DebugKVM("started for guest MM %px\n", gmm); + + do_gmm_drop(current_thread_info()->vcpu, gmm); + gthread_info->gmm = NULL; +} + +static int kvm_deactivate_gmm(struct kvm_vcpu *vcpu, + gthread_info_t *gti, gmm_struct_t *gmm) +{ + gmm_struct_t *cur_gmm; + int gmmid = gmm->nid.nr; + + DebugGMM("started for host gmm agent #%d users %d\n", + gmmid, atomic_read(&gmm->mm_count)); + KVM_BUG_ON(gmm != gti->gmm); + KVM_BUG_ON(atomic_read(&gmm->mm_count) < 2); + + cur_gmm = pv_vcpu_get_gmm(vcpu); + if (gmm == cur_gmm) { + /* deactivated GMM is not more current active gmm */ + kvm_mmu_unload_gmm_root(vcpu); + } + kvm_gmm_only_put(vcpu->kvm, gti); + DebugGMM("guest mm agent #%d of process agent #%d is deactivated\n", + gmmid, gti->gpid->nid.nr); + + return 0; +} + +int kvm_activate_guest_mm(struct kvm_vcpu *vcpu, + int active_gmmid_nr, int gmmid_nr, gpa_t u_phys_ptb) +{ + struct kvm *kvm = vcpu->kvm; + gthread_info_t *cur_gti = pv_vcpu_get_gti(vcpu); + gmm_struct_t *new_gmm, *old_gmm, *cur_gmm; + + DebugGMM("started for new host agent of new guest mm, pptb at %px\n", + (void *)u_phys_ptb); + new_gmm = create_gmm(kvm); + if (new_gmm == NULL) { + DebugGMM("could not create new host agent of guest mm\n"); + return -EINVAL; + } + + cur_gmm = pv_vcpu_get_gmm(vcpu); + if (!pv_vcpu_is_init_gmm(vcpu, cur_gmm)) { + /* current gmm should have been switched to init gmm */ + pr_err("%s(): active gmm #%d is not init guest\n", + __func__, cur_gmm->nid.nr); + } + + old_gmm = cur_gti->gmm; + if (likely(active_gmmid_nr > 0)) { + /* old process was user guest process */ + DebugGMM("guest old gmm is #%d\n", active_gmmid_nr); + if (old_gmm && old_gmm->nid.nr != active_gmmid_nr && + !vcpu->arch.is_hv) { + pr_err("%s(): old host gmm is #%d, but guest old " + "gmm #%d is not the same\n", + __func__, old_gmm->nid.nr, active_gmmid_nr); + } + KVM_BUG_ON(old_gmm == NULL); + } else { + /* old task was guest kernel thread */ + DebugGMM("guest old gmm is #%d (init gmm)\n", active_gmmid_nr); + if (old_gmm && !pv_vcpu_is_init_gmm(vcpu, old_gmm) && + !vcpu->arch.is_hv) { + pr_err("%s(): old guest gmm is init #%d, but host old " + "gmm #%d is not the init too\n", + __func__, active_gmmid_nr, old_gmm->nid.nr); + } + KVM_BUG_ON(old_gmm != NULL); + } + + /* deactivate old gmm of this thread */ + if (likely(old_gmm && !pv_vcpu_is_init_gmm(vcpu, old_gmm))) { + int ret; + + ret = kvm_deactivate_gmm(vcpu, cur_gti, old_gmm); + if (ret) { + pr_err("%s(): could not deactivate old guest mm, " + "error %d\n", + __func__, ret); + return ret; + } + } + + kvm_gmm_get(vcpu, cur_gti, new_gmm); + return kvm_pv_activate_guest_mm(vcpu, new_gmm, u_phys_ptb); +} + +static int kvm_gmmidmap_init(struct kvm *kvm, gmmid_table_t *gmmid_table, + kvm_nidmap_t *gmmid_nidmap, int gmmidmap_entries, + struct hlist_head *gmmid_hash, int gmmid_hash_bits) +{ + int ret; + + DebugKVM("started\n"); + + gmmid_table->nidmap = gmmid_nidmap; + gmmid_table->nidmap_entries = gmmidmap_entries; + gmmid_table->nid_hash = gmmid_hash; + gmmid_table->nid_hash_bits = gmmid_hash_bits; + gmmid_table->nid_hash_size = NID_HASH_SIZE(gmmid_hash_bits); + ret = kvm_nidmap_init(gmmid_table, GMMID_MAX_LIMIT, RESERVED_GMMIDS, + /* last gmm_id: no reserved, */ + /* init_gmm_id #0 will be allocated first */ + -1); + if (ret != 0) { + pr_err("kvm_gmmidmap_init() could not create NID map\n"); + return ret; + } + sprintf(gmmid_table->nid_cache_name, "gmm_struct_VM%d", + kvm->arch.vmid.nr); + gmmid_table->nid_cachep = + kmem_cache_create(gmmid_table->nid_cache_name, + sizeof(gmm_struct_t), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (gmmid_table->nid_cachep == NULL) { + pr_err("kvm_gpidmap_init() could not allocate GMM cache\n"); + return -ENOMEM; + } + return 0; +} + +/* + * Delete dropped guest mm from all guest threads + */ +/* FIXME: the function is not yet called (see details above) +static void gmm_delete(struct kvm *kvm, gmm_struct_t *gmm) +{ + gpid_t *gpid; + struct hlist_node *next; + unsigned long flags; + int i; + + DebugKVM("started\n"); + gpid_table_lock_irqsave(&kvm->arch.gpid_table, flags); + for_each_guest_thread_info(gpid, i, next, &kvm->arch.gpid_table) { + if (gpid->gthread_info->gmm == gmm) { + gpid->gthread_info->gmm = NULL; + } + } + gpid_table_unlock_irqrestore(&kvm->arch.gpid_table, flags); +} + */ + +int kvm_guest_pv_mm_init(struct kvm *kvm) +{ + int ret; + + DebugKVM("started\n"); + + ret = kvm_gmmidmap_init(kvm, &kvm->arch.gmmid_table, + kvm->arch.gmmid_nidmap, GMMIDMAP_ENTRIES, + kvm->arch.gmmid_hash, GMMID_HASH_BITS); + if (ret) { + DebugKVM("could not create ID mapping for host agents of " + "guest MMs structures\n"); + return ret; + } + kvm->arch.init_gmm = alloc_gmm(kvm); + if (kvm->arch.init_gmm == NULL) { + DebugKVM("could not allocate agent of guest init mm " + "structure\n"); + return -ENOMEM; + } + DebugKVM("created guest init mm agent #%d at %px\n", + kvm->arch.init_gmm->nid.nr, kvm->arch.init_gmm); + + kvm_fill_init_root_pt(kvm); + + return ret; +} + +void kvm_guest_pv_mm_destroy(struct kvm *kvm) +{ + gpid_t *gpid; + gmm_struct_t *gmm; + struct hlist_node *next; + int i; + + DebugKVMSH("started\n"); + + /* release init gmm */ + gmm = pv_mmu_get_init_gmm(kvm); + gmm_drop(kvm, gmm); + + gpid_table_lock(&kvm->arch.gpid_table); + for_each_guest_thread_info(gpid, i, next, &kvm->arch.gpid_table) { + if (gpid->gthread_info->gmm != NULL) + kvm_gmm_put(kvm, gpid->gthread_info); + } + for_each_guest_thread_info(gpid, i, next, &kvm->arch.gpid_table) { + if (gpid->gthread_info->gmm != NULL) { + pr_err("%s(): could not free mm on GPID %d\n", + __func__, gpid->nid.nr); + force_exit_gmm(kvm, gpid->gthread_info); + } + } + gpid_table_unlock(&kvm->arch.gpid_table); + gmmid_table_lock(&kvm->arch.gmmid_table); + for_each_guest_mm(gmm, i, next, &kvm->arch.gmmid_table) { + if (gmm != pv_mmu_get_init_gmm(kvm)) { + pr_err("%s(): mm %px #%d is not used by any task, " + "but is not free\n", + __func__, gmm, gmm->nid.nr); + } + force_drop_gmm(kvm, gmm); + } + gmmid_table_unlock(&kvm->arch.gmmid_table); + kvm_nidmap_destroy(&kvm->arch.gmmid_table); + kmem_cache_destroy(kvm->arch.gmmid_table.nid_cachep); + kvm->arch.gmmid_table.nid_cachep = NULL; +} diff --git a/arch/e2k/kvm/mman.h b/arch/e2k/kvm/mman.h new file mode 100644 index 000000000000..b1ed40508780 --- /dev/null +++ b/arch/e2k/kvm/mman.h @@ -0,0 +1,380 @@ +#ifndef __KVM_E2K_MMAN_H +#define __KVM_E2K_MMAN_H + +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_MM_MODE +#undef DebugKVMMM +#define DEBUG_KVM_MM_MODE 0 /* host kernel MM debugging */ +#define DebugKVMMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GMM_MODE +#undef DebugGMM +#define DEBUG_KVM_GMM_MODE 0 /* guest mm freeing */ + /* debugging */ +#define DebugGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_GMM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SP_LIST_GMM_MODE +#undef DebugSPGMM +#define DEBUG_KVM_SP_LIST_GMM_MODE 0 /* guest mm : SP list add */ + /* delete debug */ +#define DEBUG_EXCLUDE_INIT_GMM false +#define DebugSPGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_SP_LIST_GMM_MODE) { \ + if (DEBUG_EXCLUDE_INIT_GMM && gmm->nid.nr == 0) { \ + ; \ + } else { \ + pr_info("%s(): " fmt, __func__, ##args); \ + } \ + } \ +}) + +#undef DEBUG_KVM_FREE_GMM_SP_MODE +#undef DebugFGMM +#define DEBUG_KVM_FREE_GMM_SP_MODE 0 /* guest mm SPs freeing */ + /* debug */ +#define DebugFGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_FREE_GMM_SP_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +extern gmm_struct_t *create_gmm(struct kvm *kvm); +extern void kvm_init_gmm_root_pt(struct kvm *kvm, gmm_struct_t *new_gmm); +extern void kvm_switch_to_init_root_pt(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm); +extern void kvm_fill_init_root_pt(struct kvm *kvm); +extern void do_free_gmm(struct kvm *kvm, gmm_struct_t *gmm, + gmmid_table_t *gmmid_table); +extern void gmm_drop(struct kvm *kvm, gmm_struct_t *gmm); + +static inline void free_gmm(struct kvm *kvm, gmm_struct_t *gmm) +{ + do_free_gmm(kvm, gmm, &kvm->arch.gmmid_table); +} + +static inline void kvm_free_gmm(struct kvm *kvm, gmm_struct_t *gmm) +{ + unsigned long flags; + + gmmid_table_lock_irqsave(&kvm->arch.gmmid_table, flags); + free_gmm(kvm, gmm); + gmmid_table_unlock_irqrestore(&kvm->arch.gmmid_table, flags); +} + +static inline void do_gmm_get(gmm_struct_t *gmm) +{ + atomic_inc(&gmm->mm_count); +} + +static inline void kvm_gmm_get(struct kvm_vcpu *vcpu, gthread_info_t *gti, + gmm_struct_t *gmm) +{ + do_gmm_get(gmm); + if (likely(!pv_vcpu_is_init_gmm(vcpu, gmm))) { + gti->gmm = gmm; + gti->gmm_in_release = false; + } + DebugGMM("GPID #%d guest mm #%d at %px has now %d users\n", + gti->gpid->nid.nr, gmm->nid.nr, gmm, + atomic_read(&gmm->mm_count)); +} +static inline int do_gmm_put(struct kvm *kvm, gmm_struct_t *gmm) +{ + int count; + + count = atomic_dec_return(&gmm->mm_count); + KVM_BUG_ON(count <= 0); + return count; +} +static inline int kvm_do_gmm_put(struct kvm *kvm, gthread_info_t *gti, + bool only_put) +{ + gmm_struct_t *gmm; + int count; + + if (likely(!test_gti_thread_flag(gti, GTIF_KERNEL_THREAD))) { + gmm = gti->gmm; + } else { + gmm = pv_mmu_get_init_gmm(kvm); + } + DebugGMM("started for guest thread GPID #%d, gmm #%d users %d\n", + gti->gpid->nid.nr, gmm->nid.nr, atomic_read(&gmm->mm_count)); + + count = do_gmm_put(kvm, gmm); + gti->gmm = NULL; + if (!only_put && count == 1) { + /* nothing users gmm has now, so can be released */ + kvm_free_gmm(kvm, gmm); + count--; + } + return count; +} +static inline int kvm_gmm_put(struct kvm *kvm, gthread_info_t *gti) +{ + return kvm_do_gmm_put(kvm, gti, false); +} +static inline int kvm_gmm_only_put(struct kvm *kvm, gthread_info_t *gti) +{ + return kvm_do_gmm_put(kvm, gti, true); +} + +static inline void kvm_check_pgd(pgd_t *pgd) +{ + int ptr; + pgd_t *cur_pgd; + + BUG_ON(pgd == NULL); + for (ptr = 0; ptr < USER_PTRS_PER_PGD; ptr++) { + cur_pgd = pgd + ptr; + if (!pgd_none(*cur_pgd)) { + pgd_ERROR(*cur_pgd); + pr_err("is not empty current pgd #0x%x %px = 0x%lx\n", + ptr, cur_pgd, pgd_val(*cur_pgd)); + *cur_pgd = __pgd(0); + } + } +} + +#ifdef CONFIG_GUEST_MM_SPT_LIST +static inline void kvm_init_sp_gmm_entry(struct kvm_mmu_page *sp) +{ + INIT_LIST_HEAD(&sp->gmm_entry); + sp->gmm = NULL; +} +static inline size_t kvm_get_gmm_spt_list_size(gmm_struct_t *gmm) +{ + return gmm->spt_list_size; +} +static inline size_t kvm_get_gmm_spt_total_released(gmm_struct_t *gmm) +{ + return gmm->total_released; +} +static inline bool kvm_is_empty_gmm_spt_list(gmm_struct_t *gmm) +{ + bool is_empty; + size_t list_size; + + spin_lock(&gmm->spt_list_lock); + is_empty = list_empty(&gmm->spt_list); + list_size = gmm->spt_list_size; + spin_unlock(&gmm->spt_list_lock); + + KVM_BUG_ON(is_empty && list_size != 0); + KVM_BUG_ON(!is_empty && list_size == 0); + + return is_empty; +} +static inline bool kvm_is_not_empty_gmm_spt_list(gmm_struct_t *gmm) +{ + return !kvm_is_empty_gmm_spt_list(gmm); +} +static inline void +kvm_add_sp_to_gmm_list(gmm_struct_t *gmm, struct kvm_mmu_page *sp) +{ + KVM_BUG_ON(!list_empty(&sp->gmm_entry)); + + spin_lock(&gmm->spt_list_lock); + list_add_tail(&sp->gmm_entry, &gmm->spt_list); + gmm->spt_list_size++; + sp->gmm = gmm; + spin_unlock(&gmm->spt_list_lock); + + KVM_BUG_ON(gmm->spt_list_size <= 0); + + DebugSPGMM("gmm #%d : SP #%ld for GFN 0x%llx, role 0x%x GVA 0x%lx\n", + gmm->nid.nr, gmm->spt_list_size - 1, sp->gfn, sp->role.word, + sp->gva); +} +static inline void +kvm_try_add_sp_to_gmm_list(gmm_struct_t *gmm, struct kvm_mmu_page *sp) +{ + if (unlikely(!list_empty(&sp->gmm_entry))) { + /* SP already at the some list, probably it is good */ + if (sp->gmm == gmm) { + /* the gmm is the one it need */ + return; + } + } + kvm_add_sp_to_gmm_list(gmm, sp); +} +static inline void +kvm_init_root_gmm_spt_list(gmm_struct_t *gmm, struct kvm_mmu_page *root_sp) +{ + kvm_add_sp_to_gmm_list(gmm, root_sp); +} +static inline void +kvm_set_root_gmm_spt_list(gmm_struct_t *gmm) +{ + struct kvm_mmu_page *sp; + + KVM_BUG_ON(!VALID_PAGE(gmm->root_hpa)); + + sp = page_header(gmm->root_hpa); + kvm_init_root_gmm_spt_list(gmm, sp); +} +static inline void +kvm_delete_sp_from_the_gmm_list(gmm_struct_t *gmm, struct kvm_mmu_page *sp) +{ + KVM_BUG_ON(list_empty(&sp->gmm_entry)); + KVM_BUG_ON(list_empty(&gmm->spt_list)); + KVM_BUG_ON(sp->gmm != gmm); + + spin_lock(&gmm->spt_list_lock); + list_del_init(&sp->gmm_entry); + gmm->spt_list_size--; + gmm->total_released++; + sp->gmm = NULL; + spin_unlock(&gmm->spt_list_lock); + + KVM_BUG_ON(gmm->spt_list_size < 0); + + DebugSPGMM("gmm #%d : SP #%ld for GFN 0x%llx, role 0x%x GVA 0x%lx\n", + gmm->nid.nr, gmm->spt_list_size, sp->gfn, sp->role.word, + sp->gva); +} +static inline void +kvm_delete_sp_from_gmm_list(struct kvm_mmu_page *sp) +{ + gmm_struct_t *gmm; + + gmm = sp->gmm; + if (sp->role.direct && gmm == NULL) + return; + + kvm_delete_sp_from_the_gmm_list(gmm, sp); +} +static inline gmm_struct_t * +kvm_get_page_fault_gmm(struct kvm_vcpu *vcpu, u32 error_code) +{ + gmm_struct_t *gmm; + + if (vcpu->arch.is_hv) + return NULL; + + if (error_code & PFERR_USER_MASK) { + gmm = pv_vcpu_get_gmm(vcpu); + } else { + gmm = pv_vcpu_get_init_gmm(vcpu); + } + + KVM_BUG_ON(gmm == NULL); + + return gmm; +} +static inline gmm_struct_t * +kvm_get_faulted_addr_gmm(struct kvm_vcpu *vcpu, gva_t faulted_gva) +{ + gmm_struct_t *gmm; + + if (vcpu->arch.is_hv) + return NULL; + + if (faulted_gva < GUEST_TASK_SIZE) { + gmm = pv_vcpu_get_gmm(vcpu); + } else { + gmm = pv_vcpu_get_init_gmm(vcpu); + } + + KVM_BUG_ON(gmm == NULL); + + return gmm; +} +static inline void +kvm_delete_gmm_sp_list(struct kvm *kvm, gmm_struct_t *gmm) +{ + struct kvm_mmu_page *sp, *nsp; + + if (kvm_is_empty_gmm_spt_list(gmm)) + return; + + DebugFGMM("gmm #%d before SP list release has 0x%lx SPs\n", + gmm->nid.nr, gmm->spt_list_size); + + list_for_each_entry_safe(sp, nsp, &gmm->spt_list, gmm_entry) { + DebugFGMM("gmm #%d : SP #%ld for GFN 0x%llx, role 0x%x " + "GVA 0x%lx\n", + gmm->nid.nr, gmm->spt_list_size, sp->gfn, + sp->role.word, sp->gva); + kvm_mmu_free_page(kvm, sp); + } + + DebugFGMM("gmm #%d after release has 0x%lx SPs, total released 0x%lx\n", + gmm->nid.nr, gmm->spt_list_size, gmm->total_released); +} +#else /* !CONFIG_GUEST_MM_SPT_LIST */ +static inline size_t kvm_get_gmm_spt_list_size(gmm_struct_t *gmm) +{ + return 0; +} +static inline size_t kvm_get_gmm_spt_total_released(gmm_struct_t *gmm) +{ + return 0; +} +static inline void +kvm_init_sp_gmm_entry(struct kvm_mmu_page *sp) +{ +} +static inline bool kvm_is_empty_gmm_spt_list(gmm_struct_t *gmm) +{ + return true; +} +static inline bool kvm_is_not_empty_gmm_spt_list(gmm_struct_t *gmm) +{ + /* should be not empty at any case */ + return true; +} +static inline void +kvm_add_sp_to_gmm_list(gmm_struct_t *gmm, struct kvm_mmu_page *sp) +{ +} +static inline void +kvm_try_add_sp_to_gmm_list(gmm_struct_t *gmm, struct kvm_mmu_page *sp) +{ +} +static inline void +kvm_set_root_gmm_spt_list(gmm_struct_t *gmm) +{ +} +static inline void +kvm_init_root_gmm_spt_list(gmm_struct_t *gmm, struct kvm_mmu_page *root_sp) +{ +} +static inline void +kvm_delete_sp_from_the_gmm_list(gmm_struct_t *gmm, struct kvm_mmu_page *sp) +{ +} +static inline void +kvm_delete_sp_from_gmm_list(struct kvm_mmu_page *sp) +{ +} +static inline gmm_struct_t * +kvm_get_page_fault_gmm(struct kvm_vcpu *vcpu, u32 error_code) +{ + return NULL; +} +static inline gmm_struct_t * +kvm_get_faulted_addr_gmm(struct kvm_vcpu *vcpu, gva_t faulted_gva) +{ + return NULL; +} +static inline void +kvm_delete_gmm_sp_list(struct kvm *kvm, gmm_struct_t *gmm) +{ +} +#endif /* CONFIG_GUEST_MM_SPT_LIST */ + +#endif /* __KVM_E2K_MMAN_H */ diff --git a/arch/e2k/kvm/mmu-e2k.c b/arch/e2k/kvm/mmu-e2k.c new file mode 100644 index 000000000000..935fd9484364 --- /dev/null +++ b/arch/e2k/kvm/mmu-e2k.c @@ -0,0 +1,10302 @@ +/* + * Kernel-based Virtual Machine MMU driver for Linux + * + * This module enables machines with e2k hardware virtualization extensions + * to run virtual machines without emulation or binary translation. + * + * Based on x86 MMU virtualization ideas and sources: + * arch/x86/kvm/mmu.c + * arch/x86/kvm/mmu.h + * arch/x86/kvm/paging_tmpl.h + * + * Copyright 2018 MCST. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "cpu.h" +#include "mmu.h" +#include "mman.h" +#include "gaccess.h" +#include "intercepts.h" +#include "io.h" + +/* + * When setting this variable to true it enables Two-Dimensional-Paging + * where the hardware walks 2 page tables: + * 1. the guest-virtual to guest-physical + * 2. while doing 1. it walks guest-physical to host-physical + * If the hardware supports that we don't need to do shadow paging. + */ +bool tdp_enabled = false; + +enum { + AUDIT_PRE_PAGE_FAULT, + AUDIT_POST_PAGE_FAULT, + AUDIT_PRE_PTE_WRITE, + AUDIT_POST_PTE_WRITE, + AUDIT_PRE_SYNC, + AUDIT_POST_SYNC +}; + +#define HW_REEXECUTE_IS_SUPPORTED true +#define HW_MOVE_TO_TC_IS_SUPPORTED true + +#ifdef DEBUG +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + printk(KERN_EMERG "assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else +#define ASSERT(x) do { } while (0) +#endif + +#define MMU_DEBUG + +#ifdef MMU_DEBUG +static bool dbg = false; +module_param(dbg, bool, 0644); + +#define pgprintk(x...) do { if (sync_dbg) printk(x); } while (false) +#define rmap_printk(x...) do { if (sync_dbg) printk(x); } while (false) +#define MMU_WARN_ON(x) WARN_ON(x) +#define MMU_BUG_ON(x) BUG_ON(x) +#else /* ! MMU_DEBUG */ +#define pgprintk(x...) do { } while (false) +#define rmap_printk(x...) do { } while (false) +#define MMU_WARN_ON(x) WARN_ON(x) +#define MMU_BUG_ON(x) BUG_ON(x) +#endif /* MMU_DEBUG */ + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SYNC_ROOTS_MODE +#undef DebugSYNC +#define DEBUG_SYNC_ROOTS_MODE 0 /* PT roots alloc and sync debugging */ +#define DebugSYNC(fmt, args...) \ +({ \ + if (DEBUG_SYNC_ROOTS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PT_RANGE_SYNC_MODE +#undef DebugPTSYNC +#define DEBUG_PT_RANGE_SYNC_MODE 0 /* PT range sync debug */ +#define DebugPTSYNC(fmt, args...) \ +({ \ + if (DEBUG_PT_RANGE_SYNC_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TO_VIRT_MODE +#undef DebugTOVM +#define DEBUG_KVM_TO_VIRT_MODE 0 /* switch guest to virtual mode */ +#define DebugTOVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_TO_VIRT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SET_PAGING_MODE +#undef DebugSETPM +#define DEBUG_SET_PAGING_MODE 0 /* setup guest paging mode */ +#define DebugSETPM(fmt, args...) \ +({ \ + if (DEBUG_SET_PAGING_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_REEXEC_PF_MODE +#undef DebugREEXEC +#define DEBUG_REEXEC_PF_MODE 0 /* reexecute load and wait debugging */ +#define DebugREEXEC(fmt, args...) \ +({ \ + if (DEBUG_REEXEC_PF_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +bool sync_dbg = false; + +#undef DEBUG_KVM_SYNC_VERBOSE_MODE +#undef DebugSYNCV +#define DEBUG_KVM_SYNC_VERBOSE_MODE 0 /* new PT synchronizatiom */ + /* verbose mode */ +#define DebugSYNCV(fmt, args...) \ +({ \ + if (DEBUG_KVM_SYNC_VERBOSE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_UNSYNC_MODE +#undef DebugUNSYNC +#define DEBUG_KVM_UNSYNC_MODE 0 /* PT unsynchronizatiom mode */ +#define DebugUNSYNC(fmt, args...) \ +({ \ + if (DEBUG_KVM_UNSYNC_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHADOW_PT_MODE +#undef DebugSPT +#define DEBUG_KVM_SHADOW_PT_MODE 0 /* shadow PT manage */ +#define DebugSPT(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHADOW_PT_MODE) { \ + pr_info("%s(): " fmt, __func__, ##args); \ + } \ +}) + +#undef DEBUG_KVM_GMM_FREE_MODE +#undef DebugFREE +#define DEBUG_KVM_GMM_FREE_MODE 0 /* guest mm PT freeing debug */ +#define DebugFREE(fmt, args...) \ +({ \ + if (DEBUG_KVM_GMM_FREE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PAGE_FAULT_MODE +#undef DebugSPF +#define DEBUG_KVM_PAGE_FAULT_MODE 0 /* page fault manage */ +#define DebugSPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PAGE_FAULT_MODE) { \ + pr_info("%s(): " fmt, __func__, ##args); \ + } \ +}) + +#undef DEBUG_KVM_SPT_WALK_MODE +#undef DebugWSPT +#define DEBUG_KVM_SPT_WALK_MODE 0 /* walk all SPT levels */ +#define DebugWSPT(fmt, args...) \ +({ \ + if (DEBUG_KVM_SPT_WALK_MODE) { \ + pr_info("%s(): " fmt, __func__, ##args); \ + } \ +}) + +#undef DEBUG_KVM_NONPAGING_MODE +#undef DebugNONP +#define DEBUG_KVM_NONPAGING_MODE 0 /* nonpaging mode debug */ +#define DebugNONP(fmt, args...) \ +({ \ + if (DEBUG_KVM_NONPAGING_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TDP_MODE +#undef DebugTDP +#define DEBUG_KVM_TDP_MODE 0 /* TDP mode debug */ +#define DebugTDP(fmt, args...) \ +({ \ + if (DEBUG_KVM_TDP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_INTC_PAGE_FAULT_MODE +#undef DebugPFINTC +#define DEBUG_INTC_PAGE_FAULT_MODE 0 /* MMU intercept on data */ + /* page fault mode debug */ +#define DebugPFINTC(fmt, args...) \ +({ \ + if (DEBUG_INTC_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_INSTR_FAULT_MODE +#undef DebugIPF +#define DEBUG_KVM_INSTR_FAULT_MODE 0 /* instruction page fault */ + /* mode debug */ +#define DebugIPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_INSTR_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PTE_MODE +#undef DebugPTE +#define DEBUG_KVM_PTE_MODE 0 /* guest PTE update/write debug */ +#define DebugPTE(fmt, args...) \ +({ \ + if (DEBUG_KVM_PTE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SHADOW_INJECT_MODE +#undef DebugSHINJ +#define DEBUG_SHADOW_INJECT_MODE 0 /* shadow page faults */ + /* injection debug */ +#define DebugSHINJ(fmt, args...) \ +({ \ + if (DEBUG_SHADOW_INJECT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_READ_PROT_INJECT_MODE +#undef DebugRPROT +#define DEBUG_READ_PROT_INJECT_MODE 0 /* shadow page faults on */ + /* load after store debug */ +#define DebugRPROT(fmt, args...) \ +({ \ + if (DEBUG_READ_PROT_INJECT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_COPY_SPT_MODE +#undef DebugCPSPT +#define DEBUG_COPY_SPT_MODE 0 /* copy guest kernel SPT range */ +#define DebugCPSPT(fmt, args...) \ +({ \ + if (DEBUG_COPY_SPT_MODE) \ + pr_err("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_FREE_SPT_MODE +#undef DebugFRSPT +#define DEBUG_FREE_SPT_MODE 0 /* free guest kernel SPT range */ +#define DebugFRSPT(fmt, args...) \ +({ \ + if (DEBUG_FREE_SPT_MODE) \ + pr_err("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GUEST_MM_MODE +#undef DebugGMM +#define DEBUG_KVM_GUEST_MM_MODE 0 /* guest MM support */ +#define DebugGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_MM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_FLOOD_MODE +#undef DebugFLOOD +#define DEBUG_KVM_FLOOD_MODE 0 /* host SP flood support */ +#define DebugFLOOD(fmt, args...) \ +({ \ + if (DEBUG_KVM_FLOOD_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_FREE_SP_MODE +#undef DebugZAP +#define DEBUG_KVM_FREE_SP_MODE 0 /* host SP free debug */ +#define DebugZAP(fmt, args...) \ +({ \ + if (DEBUG_KVM_FREE_SP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_EXEC_MMU_OP +#undef DbgEXMMU +#define DEBUG_EXEC_MMU_OP 0 /* recovery operations debug */ +#define DbgEXMMU(fmt, args...) \ +({ \ + if (DEBUG_EXEC_MMU_OP) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PF_RETRY_MODE +#undef DebugTRY +#define DEBUG_PF_RETRY_MODE 0 /* retry page fault debug */ +#define DebugTRY(fmt, args...) \ +({ \ + if (DEBUG_PF_RETRY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PF_EXC_RPR_MODE +#undef DebugEXCRPR +#define DEBUG_PF_EXC_RPR_MODE 0 /* page fault at recovery mode debug */ +#define DebugEXCRPR(fmt, args...) \ +({ \ + if (DEBUG_PF_EXC_RPR_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define PTE_PREFETCH_NUM 8 + +#define PT64_LEVEL_BITS PT64_ENTRIES_BITS + +#define PT32_LEVEL_BITS PT32_ENTRIES_BITS + +#define PT64_PERM_MASK(kvm) \ + (PT_PRESENT_MASK | PT_WRITABLE_MASK | \ + get_spte_user_mask(kvm) | get_spte_priv_mask(kvm) | \ + get_spte_x_mask(kvm) | get_spte_nx_mask(kvm)) + +/* number of retries to handle page fault */ +#define PF_RETRIES_MAX_NUM 1 +/* common number of one try and retries to handle page fault */ +#define PF_TRIES_MAX_NUM (1 + PF_RETRIES_MAX_NUM) + +#include + +#define CREATE_TRACE_POINTS +#include "mmutrace-e2k.h" + +#define SPTE_HOST_WRITABLE_SW_MASK(__spt) ((__spt)->sw_bit1_mask) +#define SPTE_MMU_WRITABLE_SW_MASK(__spt) ((__spt)->sw_bit2_mask) + +#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) + +/* make pte_list_desc fit well in cache line */ +#define PTE_LIST_EXT 3 + +typedef struct pte_list_desc { + pgprot_t *sptes[PTE_LIST_EXT]; + struct pte_list_desc *more; +} pte_list_desc_t; + +static struct kmem_cache *pte_list_desc_cache; +static struct kmem_cache *mmu_page_header_cache; +static struct percpu_counter kvm_total_used_mmu_pages; + +static pgprot_t set_spte_pfn(struct kvm *kvm, pgprot_t spte, kvm_pfn_t pfn); +static int e2k_walk_shadow_pts(struct kvm_vcpu *vcpu, gva_t addr, + kvm_shadow_trans_t *st, hpa_t spt_root); + +static void mmu_spte_set(struct kvm *kvm, pgprot_t *sptep, pgprot_t spte); +static void mmu_free_roots(struct kvm_vcpu *vcpu, unsigned flags); +static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); +static int kvm_sync_shadow_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + hpa_t root_hpa, unsigned flags); + +/* + * the low bit of the generation number is always presumed to be zero. + * This disables mmio caching during memslot updates. The concept is + * similar to a seqcount but instead of retrying the access we just punt + * and ignore the cache. + * + * spte bits 5-11 are used as bits 1-7 of the generation number, + * the bits 48-57 are used as bits 8-17 of the generation number. + */ +#define MMIO_SPTE_GEN_LOW_SHIFT 4 +#define MMIO_SPTE_GEN_HIGH_SHIFT 48 + +#define MMIO_GEN_SHIFT 18 +#define MMIO_GEN_LOW_SHIFT 8 +#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2) +#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1) + +static pgprotval_t get_spte_mmio_mask(struct kvm *kvm) +{ + const pt_struct_t *host_pt = kvm_get_host_pt_struct(kvm); + + return host_pt->sw_mmio_mask; +} + +static u64 generation_mmio_spte_mask(unsigned int gen) +{ + u64 mask; + + WARN_ON(gen & ~MMIO_GEN_MASK); + + mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT; + mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT; + return mask; +} + +static unsigned int get_mmio_spte_generation(struct kvm *kvm, pgprotval_t spte) +{ + unsigned int gen; + + spte &= ~get_spte_mmio_mask(kvm); + + gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; + gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; + return gen; +} + +static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK; +} + +static bool is_mmio_spte(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mmio_mask = get_spte_mmio_mask(kvm); + + return (pgprot_val(spte) & mmio_mask) == mmio_mask; +} + +static gfn_t get_mmio_spte_gfn(struct kvm *kvm, pgprot_t spte) +{ + u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | + get_spte_mmio_mask(kvm); + return (pgprot_val(spte) & ~mask) >> PAGE_SHIFT; +} + +static unsigned get_mmio_spte_access(struct kvm *kvm, pgprot_t spte) +{ + u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | + get_spte_mmio_mask(kvm); + return (pgprot_val(spte) & ~mask) & ~PAGE_MASK; +} + +static bool check_mmio_spte(struct kvm_vcpu *vcpu, pgprot_t spte) +{ + unsigned int kvm_gen, spte_gen; + + kvm_gen = kvm_current_mmio_generation(vcpu); + spte_gen = get_mmio_spte_generation(vcpu->kvm, pgprot_val(spte)); + + trace_check_mmio_spte(spte, kvm_gen, spte_gen); + return likely(kvm_gen == spte_gen); +} + +static pgprotval_t get_spte_bit_mask(struct kvm *kvm, + bool accessed, bool dirty, bool present, bool valid) +{ + const pt_struct_t *host_pt = kvm_get_host_pt_struct(kvm); + pgprotval_t mask = 0; + + if (accessed) + mask |= host_pt->accessed_mask; + if (dirty) + mask |= host_pt->dirty_mask; + if (present) + mask |= host_pt->present_mask; + if (valid) + mask |= host_pt->valid_mask; + return mask; +} +static pgprotval_t get_spte_accessed_mask(struct kvm *kvm) +{ + return get_spte_bit_mask(kvm, true, false, false, false); +} +static pgprotval_t get_spte_dirty_mask(struct kvm *kvm) +{ + return get_spte_bit_mask(kvm, false, true, false, false); +} +static pgprotval_t get_spte_present_mask(struct kvm *kvm) +{ + return get_spte_bit_mask(kvm, false, false, true, false); +} +static pgprotval_t get_spte_valid_mask(struct kvm *kvm) +{ + return get_spte_bit_mask(kvm, false, false, false, true); +} +static pgprotval_t get_spte_present_valid_mask(struct kvm *kvm) +{ + return get_spte_bit_mask(kvm, false, false, true, true); +} + +pgprotval_t get_gpte_valid_mask(struct kvm_vcpu *vcpu) +{ + const pt_struct_t *gpt = kvm_get_vcpu_pt_struct(vcpu); + + return gpt->valid_mask; +} + +pgprotval_t get_gpte_unmapped_mask(struct kvm_vcpu *vcpu) +{ + return (pgprotval_t) 0; +} + +pgprot_t set_spte_bit_mask(struct kvm *kvm, pgprot_t spte, + bool accessed, bool dirty, bool present, bool valid) +{ + const pt_struct_t *host_pt = kvm_get_host_pt_struct(kvm); + pgprotval_t mask = 0; + + if (accessed) + mask |= host_pt->accessed_mask; + if (dirty) + mask |= host_pt->dirty_mask; + if (present) + mask |= host_pt->present_mask; + if (valid) + mask |= host_pt->valid_mask; + spte = __pgprot(pgprot_val(spte) | mask); + return spte; +} + +static bool is_spte_accessed_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_accessed_mask(kvm); + return pgprot_val(spte) & mask; +} +static pgprot_t set_spte_accessed_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_accessed_mask(kvm); + return __pgprot(pgprot_val(spte) | mask); +} +static pgprot_t clear_spte_accessed_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_accessed_mask(kvm); + return __pgprot(pgprot_val(spte) & ~mask); +} + +pgprotval_t get_pte_mode_mask(const pt_struct_t *pt_struct) +{ + if (pt_struct->user_mask != 0) + return pt_struct->user_mask; + else if (pt_struct->priv_mask != 0) + return pt_struct->priv_mask; + else + /* pte has not user or priv mode */ + ; + return (pgprotval_t) 0; +} + +pgprotval_t get_spte_mode_mask(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return get_pte_mode_mask(spt); +} + +pgprotval_t get_gpte_mode_mask(struct kvm_vcpu *vcpu) +{ + const pt_struct_t *gpt = kvm_get_vcpu_pt_struct(vcpu); + + return get_pte_mode_mask(gpt); +} + +pgprotval_t get_spte_user_mask(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return spt->user_mask; +} + +pgprotval_t get_gpte_user_mask(struct kvm_vcpu *vcpu) +{ + const pt_struct_t *gpt = kvm_get_vcpu_pt_struct(vcpu); + + return gpt->user_mask; +} + +pgprotval_t get_spte_priv_mask(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return spt->priv_mask; +} + +pgprotval_t get_gpte_priv_mask(struct kvm_vcpu *vcpu) +{ + const pt_struct_t *gpt = kvm_get_vcpu_pt_struct(vcpu); + + return gpt->priv_mask; +} + +bool is_spte_user_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->user_mask != 0) + return pgprot_val(spte) & spt->user_mask; + else if (spt->priv_mask != 0) + return !(pgprot_val(spte) & spt->priv_mask); + else + /* pte has not user or priv mode */ + ; + return false; +} +pgprot_t set_spte_user_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->user_mask != 0) + return __pgprot(pgprot_val(spte) | spt->user_mask); + else if (spt->priv_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->priv_mask); + else + /* pte has not user or priv mode */ + ; + return spte; +} +pgprot_t clear_spte_user_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (kvm->arch.is_pv && !kvm->arch.is_hv) + /* software paravirtualized guest */ + /* can be run only at user mode */ + return spte; + if (spt->user_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->user_mask); + else if (spt->priv_mask != 0) + return __pgprot(pgprot_val(spte) | spt->priv_mask); + else + /* pte has not user or priv mode */ + ; + return spte; +} + +bool is_spte_priv_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->priv_mask != 0) + return pgprot_val(spte) & spt->priv_mask; + else if (spt->user_mask != 0) + return !(pgprot_val(spte) & spt->user_mask); + else + /* pte has not user or priv mode */ + return true; /* always privileged */ + return false; +} +pgprot_t set_spte_priv_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (kvm->arch.is_pv && !kvm->arch.is_hv) + /* software paravirtualized guest */ + /* can be run only at user mode */ + return spte; + if (spt->priv_mask != 0) + return __pgprot(pgprot_val(spte) | spt->priv_mask); + else if (spt->user_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->user_mask); + else + /* pte has not user or priv mode */ + ; + return spte; +} +pgprot_t clear_spte_priv_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->priv_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->priv_mask); + else if (spt->user_mask != 0) + return __pgprot(pgprot_val(spte) | spt->user_mask); + else + /* pte has not user or priv mode */ + ; + return spte; +} +static bool is_spte_dirty_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_dirty_mask(kvm); + return pgprot_val(spte) & mask; +} +static pgprot_t set_spte_dirty_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_dirty_mask(kvm); + return __pgprot(pgprot_val(spte) | mask); +} +static pgprot_t set_spte_cui(pgprot_t spte, u64 cui) +{ + return !cpu_has(CPU_FEAT_ISET_V6) ? __pgprot(pgprot_val(spte) | + _PAGE_INDEX_TO_CUNIT_V2(cui)) : spte; +} +static pgprot_t clear_spte_dirty_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_dirty_mask(kvm); + return __pgprot(pgprot_val(spte) & ~mask); +} +bool is_spte_present_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_present_mask(kvm); + return pgprot_val(spte) & mask; +} +pgprot_t set_spte_present_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_present_mask(kvm); + return __pgprot(pgprot_val(spte) | mask); +} +pgprot_t clear_spte_present_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_present_mask(kvm); + return __pgprot(pgprot_val(spte) & ~mask); +} +bool is_spte_valid_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_valid_mask(kvm); + return pgprot_val(spte) & mask; +} +pgprot_t set_spte_valid_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_valid_mask(kvm); + return __pgprot(pgprot_val(spte) | mask); +} +pgprot_t clear_spte_valid_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_valid_mask(kvm); + return __pgprot(pgprot_val(spte) & ~mask); +} +pgprot_t set_spte_present_valid_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t mask; + + mask = get_spte_present_valid_mask(kvm); + return __pgprot(pgprot_val(spte) | mask); +} + +pgprotval_t get_spte_x_mask(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return spt->exec_mask; +} +static pgprotval_t get_pte_nx_mask(const pt_struct_t *pt_struct) +{ + return pt_struct->non_exec_mask; +} +static pgprotval_t get_spte_nx_mask(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return get_pte_nx_mask(spt); +} +static pgprotval_t get_gpte_nx_mask(struct kvm_vcpu *vcpu) +{ + const pt_struct_t *gpt = kvm_get_vcpu_pt_struct(vcpu); + + return get_pte_nx_mask(gpt); +} +bool is_spte_x_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->exec_mask != 0) + return pgprot_val(spte) & spt->exec_mask; + else if (spt->non_exec_mask != 0) + return !(pgprot_val(spte) & spt->non_exec_mask); + else + /* pte has not executable field */ + return true; /* always executable */ + return false; +} +static pgprot_t set_spte_x_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->exec_mask != 0) + return __pgprot(pgprot_val(spte) | spt->exec_mask); + else if (spt->non_exec_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->non_exec_mask); + else + /* pte has not executable field */ + ; + return spte; +} +pgprot_t clear_spte_x_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->exec_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->exec_mask); + else if (spt->non_exec_mask != 0) + return __pgprot(pgprot_val(spte) | spt->non_exec_mask); + else + /* pte has not executable field */ + ; + return spte; +} +bool is_spte_nx_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->exec_mask != 0) + return !(pgprot_val(spte) & spt->exec_mask); + else if (spt->non_exec_mask != 0) + return pgprot_val(spte) & spt->non_exec_mask; + else + /* pte has not executable field */ + return true; /* always can be not executable */ + return false; +} +static pgprot_t set_spte_nx_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->exec_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->exec_mask); + else if (spt->non_exec_mask != 0) + return __pgprot(pgprot_val(spte) | spt->non_exec_mask); + else + /* pte has not executable field */ + ; + return spte; +} +pgprot_t clear_spte_nx_mask(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + if (spt->exec_mask != 0) + return __pgprot(pgprot_val(spte) | spt->exec_mask); + else if (spt->non_exec_mask != 0) + return __pgprot(pgprot_val(spte) & ~spt->non_exec_mask); + else + /* pte has not executable field */ + ; + return spte; +} +bool is_spte_huge_page_mask(struct kvm *kvm, pgprot_t spte) +{ + return pgprot_val(spte) & PT_PAGE_SIZE_MASK; +} +static pgprot_t set_spte_huge_page_mask(struct kvm *kvm, pgprot_t spte) +{ + return __pgprot(pgprot_val(spte) | PT_PAGE_SIZE_MASK); +} +pgprot_t clear_spte_huge_page_mask(struct kvm *kvm, pgprot_t spte) +{ + return __pgprot(pgprot_val(spte) & ~PT_PAGE_SIZE_MASK); +} +static bool is_spte_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return pgprot_val(spte) & PT_WRITABLE_MASK; +} +static pgprot_t set_spte_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return __pgprot(pgprot_val(spte) | PT_WRITABLE_MASK); +} +static pgprot_t clear_spte_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return __pgprot(pgprot_val(spte) & ~PT_WRITABLE_MASK); +} + +static pgprotval_t get_spte_sw_mask(struct kvm *kvm, bool host, bool mmu) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + pgprotval_t mask = 0; + + if (host) + mask |= SPTE_HOST_WRITABLE_SW_MASK(spt); + if (mmu) + mask |= SPTE_MMU_WRITABLE_SW_MASK(spt); + return mask; +} +static bool is_spte_sw_writable_mask(struct kvm *kvm, pgprot_t spte, + bool host, bool mmu) +{ + pgprotval_t sw_mask = 0; + + sw_mask = get_spte_sw_mask(kvm, host, mmu); + return pgprot_val(spte) & sw_mask; +} +static bool is_spte_all_sw_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t sw_mask = 0; + + sw_mask = get_spte_sw_mask(kvm, true, true); + return (pgprot_val(spte) & sw_mask) == sw_mask; +} +static pgprot_t set_spte_sw_writable_mask(struct kvm *kvm, pgprot_t spte, + bool host, bool mmu) +{ + pgprotval_t sw_mask = 0; + + sw_mask = get_spte_sw_mask(kvm, host, mmu); + return __pgprot(pgprot_val(spte) | sw_mask); +} +static pgprot_t clear_spte_sw_writable_mask(struct kvm *kvm, pgprot_t spte, + bool host, bool mmu) +{ + pgprotval_t sw_mask = 0; + + sw_mask = get_spte_sw_mask(kvm, host, mmu); + return __pgprot(pgprot_val(spte) & ~sw_mask); +} +bool is_spte_host_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return is_spte_sw_writable_mask(kvm, spte, true, false); +} +bool is_spte_mmu_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return is_spte_sw_writable_mask(kvm, spte, false, true); +} +static pgprot_t set_spte_host_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return set_spte_sw_writable_mask(kvm, spte, true, false); +} +static pgprot_t set_spte_mmu_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return set_spte_sw_writable_mask(kvm, spte, false, true); +} +static pgprot_t clear_spte_host_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return clear_spte_sw_writable_mask(kvm, spte, true, false); +} +static pgprot_t clear_spte_mmu_writable_mask(struct kvm *kvm, pgprot_t spte) +{ + return clear_spte_sw_writable_mask(kvm, spte, false, true); +} + +static pgprotval_t get_spte_pt_user_prot(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return spt->ptd_user_prot; +} + +static pgprotval_t get_spte_pt_kernel_prot(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return spt->ptd_kernel_prot; +} + +static pgprot_t set_spte_memory_type_mask(struct kvm_vcpu *vcpu, pgprot_t spte, + gfn_t gfn, bool is_mmio) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(vcpu->kvm); + unsigned int mem_type; + + /* + * FIXME: here comments for x86, probably it can be useful for e2k, + * so keep its + * For VT-d and EPT combination + * 1. MMIO: always map as UC + * 2. EPT with VT-d: + * a. VT-d without snooping control feature: can't guarantee the + * result, try to trust guest. + * b. VT-d with snooping control feature: snooping control feature of + * VT-d engine can guarantee the cache correctness. Just set it + * to WB to keep consistent with host. So the same as item 3. + * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep + * consistent with host MTRR + */ + + /* + * FIXME: now is implemented only two case of memory type + * a. MMIO: always map as "External Configuration" + * b. Physical memory: always map as "General Cacheable" + */ + if (unlikely(is_mmio_prefixed_gfn(vcpu, gfn))) + mem_type = EXT_NON_PREFETCH_MT; + else + if (is_mmio) + mem_type = EXT_CONFIG_MT; + else + mem_type = GEN_CACHE_MT; + + return spt->set_pte_val_memory_type(spte, mem_type); +} + +static void mark_mmio_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, u64 gfn, + unsigned access) +{ + unsigned int gen = kvm_current_mmio_generation(vcpu); + u64 mask = generation_mmio_spte_mask(gen); + pgprot_t spte; + + access &= ACC_WRITE_MASK | ACC_USER_MASK; + mask |= get_spte_mmio_mask(vcpu->kvm) | access | gfn << PAGE_SHIFT; + + pgprot_val(spte) = (get_spte_valid_mask(vcpu->kvm) | mask); + + spte = set_spte_memory_type_mask(vcpu, spte, gfn, true); + + trace_mark_mmio_spte(sptep, gfn, access, gen); + mmu_spte_set(vcpu->kvm, sptep, spte); +} + +static void mark_mmio_space_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, + u64 gfn, unsigned access) +{ + unsigned int gen = kvm_current_mmio_generation(vcpu); + u64 mask = generation_mmio_spte_mask(gen); + pgprot_t spte; + + mask |= get_spte_mmio_mask(vcpu->kvm) | gfn << PAGE_SHIFT; + + pgprot_val(spte) = (get_spte_valid_mask(vcpu->kvm) | mask); + + spte = set_spte_memory_type_mask(vcpu, spte, gfn, true); + + mmu_spte_set(vcpu->kvm, sptep, spte); +} + +static void mark_mmio_prefixed_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, gfn_t gfn, + kvm_pfn_t pfn, int level, unsigned access) +{ + struct kvm *kvm = vcpu->kvm; + u64 mask; + pgprot_t spte; + + mask = get_spte_mmio_mask(kvm); + + pgprot_val(spte) = get_spte_present_valid_mask(kvm); + pgprot_val(spte) |= mask; + DebugSPF("spte %px initial value 0x%lx\n", + sptep, pgprot_val(spte)); + + spte = set_spte_nx_mask(kvm, spte); + + spte = set_spte_priv_mask(kvm, spte); + + if (level > PT_PAGE_TABLE_LEVEL) + spte = set_spte_huge_page_mask(kvm, spte); + + spte = set_spte_memory_type_mask(vcpu, spte, gfn, true); + + spte = set_spte_pfn(kvm, spte, pfn); + + if (access & ACC_WRITE_MASK) { + spte = set_spte_writable_mask(kvm, spte); + } + + DebugSPF("spte %px final value 0x%lx\n", + sptep, pgprot_val(spte)); + + mmu_spte_set(vcpu->kvm, sptep, spte); +} + +static bool set_mmio_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, gfn_t gfn, + kvm_pfn_t pfn, int level, unsigned access) +{ + if (unlikely(is_mmio_prefixed_gfn(vcpu, gfn))) { + mark_mmio_prefixed_spte(vcpu, sptep, gfn, pfn, level, access); + return true; + } + if (unlikely(is_mmio_space_pfn(pfn))) { + mark_mmio_space_spte(vcpu, sptep, gfn, access); + return true; + } + if (unlikely(is_noslot_pfn(pfn))) { + mark_mmio_spte(vcpu, sptep, gfn, access); + return true; + } + + return false; +} + +static int is_nx(struct kvm_vcpu *vcpu) +{ + return true; /* not executable flag is supported */ +} + +static int is_smap(struct kvm_vcpu *vcpu) +{ + pr_err_once("FIXME: %s() secondary PT is not supported\n", __func__); + return false; +} + +static int is_smep(struct kvm_vcpu *vcpu) +{ + pr_err_once("FIXME: %s() secondary PT is not supported\n", __func__); + return false; +} + +static int is_smm(struct kvm_vcpu *vcpu) +{ + pr_err_once("FIXME: %s() secondary PT is not supported\n", __func__); + return false; +} + +static bool is_shadow_present_pte(struct kvm *kvm, pgprot_t pte) +{ + return (pgprot_val(pte) != 0) && + pgprot_val(pte) != get_spte_valid_mask(kvm) && + !is_mmio_spte(kvm, pte); +} + +static bool is_shadow_valid_pte(struct kvm *kvm, pgprot_t pte) +{ + return pgprot_val(pte) == get_spte_valid_mask(kvm) || + is_mmio_spte(kvm, pte) && + is_spte_valid_mask(kvm, pte) && + !is_spte_present_mask(kvm, pte); +} + +static bool is_shadow_present_or_valid_pte(struct kvm *kvm, pgprot_t pte) +{ + return is_shadow_present_pte(kvm, pte) || + is_shadow_valid_pte(kvm, pte); +} + +static bool is_large_pte(pgprot_t pte) +{ + return pgprot_val(pte) & PT_PAGE_SIZE_MASK; +} + +static bool is_last_spte(pgprot_t pte, int level) +{ + if (level == PT_PAGE_TABLE_LEVEL) + return true; + if (is_large_pte(pte)) + return true; + return false; +} + +static inline pgprotval_t +kvm_get_pte_pfn_mask(const pt_struct_t *pt) +{ + return pt->pfn_mask; +} +static inline pgprotval_t +kvm_get_spte_pfn_mask(struct kvm *kvm) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return kvm_get_pte_pfn_mask(spt); +} + +static inline e2k_addr_t +kvm_pte_pfn_to_phys_addr(pgprot_t pte, const pt_struct_t *pt) +{ + return pgprot_val(pte) & kvm_get_pte_pfn_mask(pt); +} +static inline e2k_addr_t +kvm_spte_pfn_to_phys_addr(struct kvm *kvm, pgprot_t spte) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + + return kvm_pte_pfn_to_phys_addr(spte, spt); +} +static inline gpa_t +kvm_gpte_gfn_to_phys_addr(struct kvm_vcpu *vcpu, pgprot_t gpte) +{ + const pt_struct_t *gpt = kvm_get_vcpu_pt_struct(vcpu); + + return kvm_pte_pfn_to_phys_addr(gpte, gpt); +} + +static kvm_pfn_t spte_to_pfn(struct kvm *kvm, pgprot_t spte) +{ + return kvm_spte_pfn_to_phys_addr(kvm, spte) >> PAGE_SHIFT; +} + +static pgprot_t set_spte_pfn(struct kvm *kvm, pgprot_t spte, kvm_pfn_t pfn) +{ + pgprotval_t pfn_mask = kvm_get_spte_pfn_mask(kvm); + + return __pgprot((pgprot_val(spte) & ~pfn_mask) | + ((pfn << PAGE_SHIFT) & pfn_mask)); +} +pgprot_t clear_spte_pfn(struct kvm *kvm, pgprot_t spte) +{ + pgprotval_t pfn_mask = kvm_get_spte_pfn_mask(kvm); + + return __pgprot(pgprot_val(spte) & ~pfn_mask); +} + +void kvm_vmlpt_kernel_spte_set(struct kvm *kvm, pgprot_t *spte, pgprot_t *root) +{ + pgprot_t k_spte = __pgprot(get_spte_pt_kernel_prot(kvm)); + + *spte = set_spte_pfn(kvm, k_spte, __pa(root) >> PAGE_SHIFT); +} + +void kvm_vmlpt_user_spte_set(struct kvm *kvm, pgprot_t *spte, pgprot_t *root) +{ + pgprot_t k_spte = __pgprot(get_spte_pt_user_prot(kvm)); + + *spte = set_spte_pfn(kvm, k_spte, __pa(root) >> PAGE_SHIFT); +} + +static void __set_spte(pgprot_t *sptep, pgprot_t spte) +{ + WRITE_ONCE(*sptep, spte); +} + +static void __update_clear_spte_fast(pgprot_t *sptep, pgprot_t spte) +{ + WRITE_ONCE(*sptep, spte); +} + +static pgprot_t __update_clear_spte_slow(pgprot_t *sptep, pgprot_t spte) +{ + return __pgprot(xchg((pgprotval_t *)sptep, pgprot_val(spte))); +} + +static pgprot_t __get_spte_lockless(pgprot_t *sptep) +{ + return __pgprot(READ_ONCE(*(pgprotval_t *)sptep)); +} + +static bool spte_is_locklessly_modifiable(struct kvm *kvm, pgprot_t spte) +{ + return is_spte_all_sw_writable_mask(kvm, spte); +} + +static bool spte_has_volatile_bits(struct kvm *kvm, pgprot_t spte) +{ + /* + * Always atomically update spte if it can be updated + * out of mmu-lock, it can ensure dirty bit is not lost, + * also, it can help us to get a stable is_writable_pte() + * to ensure tlb flush is not missed. + */ + + if (!is_shadow_valid_pte(kvm, spte)) + return false; + + if (spte_is_locklessly_modifiable(kvm, spte)) + return true; + + if (!get_spte_accessed_mask(kvm)) + return false; + + if (!is_shadow_present_pte(kvm, spte)) + return false; + + if (is_spte_accessed_mask(kvm, spte) && + (!is_writable_pte(spte) || is_spte_dirty_mask(kvm, spte))) + return false; + + return true; +} + +static bool spte_is_bit_cleared(pgprot_t old_spte, + pgprot_t new_spte, pgprotval_t prot_mask) +{ + return (pgprot_val(old_spte) & prot_mask) && + !(pgprot_val(new_spte) & prot_mask); +} + +static bool spte_is_bit_changed(pgprot_t old_spte, + pgprot_t new_spte, pgprotval_t prot_mask) +{ + return (pgprot_val(old_spte) & prot_mask) != + (pgprot_val(new_spte) & prot_mask); +} + +/* Rules for using mmu_spte_set: + * Set the sptep from nonpresent to present. + * Note: the sptep being assigned *must* be either not present + * or in a state where the hardware will not attempt to update + * the spte. + */ +static void mmu_spte_set(struct kvm *kvm, pgprot_t *sptep, pgprot_t new_spte) +{ + WARN_ON(is_shadow_present_pte(kvm, *sptep)); + __set_spte(sptep, new_spte); +} + +/* Rules for using mmu_spte_update: + * Update the state bits, it means the mapped pfn is not changed. + * + * Whenever we overwrite a writable spte with a read-only one we + * should flush remote TLBs. Otherwise rmap_write_protect + * will find a read-only spte, even though the writable spte + * might be cached on a CPU's TLB, the return value indicates this + * case. + */ +static bool mmu_spte_update(struct kvm *kvm, pgprot_t *sptep, pgprot_t new_spte) +{ + pgprot_t old_spte = *sptep; + bool ret = false; + + WARN_ON(!is_shadow_present_or_valid_pte(kvm, new_spte)); + + if (!is_shadow_present_pte(kvm, old_spte)) { + mmu_spte_set(kvm, sptep, new_spte); + return ret; + } + + if (!spte_has_volatile_bits(kvm, old_spte)) + __update_clear_spte_fast(sptep, new_spte); + else + old_spte = __update_clear_spte_slow(sptep, new_spte); + + /* + * For the spte updated out of mmu-lock is safe, since + * we always atomically update it, see the comments in + * spte_has_volatile_bits(). + */ + if (spte_is_locklessly_modifiable(kvm, old_spte) && + !is_writable_pte(new_spte)) + ret = true; + + if (is_writable_pte(old_spte) != is_writable_pte(new_spte)) { + /* changed writable bit of pte */ + ret = true; + } + + if (!get_spte_accessed_mask(kvm)) { + /* + * We don't set page dirty when dropping non-writable spte. + * So do it now if the new spte is becoming non-writable. + */ + if (ret) + kvm_set_pfn_dirty(spte_to_pfn(kvm, old_spte)); + return ret; + } + + /* + * Flush TLB when accessed/dirty bits are changed in the page tables, + * to guarantee consistency between TLB and page tables. + */ + if (spte_is_bit_changed(old_spte, new_spte, + get_spte_bit_mask(kvm, true, true, false, false))) + ret = true; + + if (spte_is_bit_cleared(old_spte, new_spte, + get_spte_accessed_mask(kvm))) + kvm_set_pfn_accessed(spte_to_pfn(kvm, old_spte)); + if (spte_is_bit_cleared(old_spte, new_spte, + get_spte_dirty_mask(kvm))) + kvm_set_pfn_dirty(spte_to_pfn(kvm, old_spte)); + + return ret; +} + +/* + * Rules for using mmu_spte_clear_track_bits: + * It sets the sptep from present to nonpresent, and track the + * state bits, it is used to clear the last level sptep. + */ +static int mmu_spte_clear_track_bits(struct kvm *kvm, pgprot_t *sptep) +{ + kvm_pfn_t pfn; + pgprot_t old_spte = *sptep; + struct kvm_mmu_page *sp; + + sp = page_header(__pa(sptep)); + + DebugPTE("started for spte %px == 0x%lx\n", + sptep, pgprot_val(old_spte)); + if (!spte_has_volatile_bits(kvm, old_spte)) { + __update_clear_spte_fast(sptep, + (is_shadow_present_or_valid_pte(kvm, old_spte) && + !sp->released) ? + __pgprot(get_spte_valid_mask(kvm)) + : + __pgprot(0ull)); + } else { + old_spte = __update_clear_spte_slow(sptep, + (sp->released) ? + __pgprot(0ull) + : + __pgprot(get_spte_valid_mask(kvm))); + } + DebugPTE("cleared spte %px == 0x%lx\n", + sptep, pgprot_val(*sptep)); + + if (!is_shadow_present_pte(kvm, old_spte) || + is_mmio_spte(kvm, old_spte)) + return 0; + + pfn = spte_to_pfn(kvm, old_spte); + DebugPTE("host pfn 0x%llx, reserved %d, count %d\n", + pfn, kvm_is_reserved_pfn(pfn), page_count(pfn_to_page(pfn))); + + /* + * KVM does not hold the refcount of the page used by + * kvm mmu, before reclaiming the page, we should + * unmap it from mmu first. + */ + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); + + if (!get_spte_accessed_mask(kvm) || + is_spte_accessed_mask(kvm, old_spte)) + kvm_set_pfn_accessed(pfn); + if ((get_spte_dirty_mask(kvm)) ? + is_spte_dirty_mask(kvm, old_spte) : + is_spte_writable_mask(kvm, old_spte)) + kvm_set_pfn_dirty(pfn); + return 1; +} + +/* + * Rules for using mmu_spte_clear_no_track: + * Directly clear spte without caring the state bits of sptep, + * it is used to set the upper level spte. + */ +static void mmu_spte_clear_no_track(pgprot_t *sptep) +{ + __update_clear_spte_fast(sptep, __pgprot(0ull)); +} + +static pgprot_t mmu_spte_get_lockless(pgprot_t *sptep) +{ + return __get_spte_lockless(sptep); +} + +static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) +{ + /* + * Prevent page table teardown by making any free-er wait during + * kvm_flush_remote_tlbs() IPI to all active vcpus. + */ + local_irq_disable(); + + /* + * Make sure a following spte read is not reordered ahead of the write + * to vcpu->mode. + */ + smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); +} + +static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) +{ + /* + * Make sure the write to vcpu->mode is not reordered in front of + * reads to sptes. If it does, kvm_commit_zap_page() can see us + * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. + */ + smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); + local_irq_enable(); +} + +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + struct kmem_cache *base_cache, int min) +{ + void *obj; + + cache->kmem_cache = base_cache; + + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { + obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); + if (!obj) + return -ENOMEM; + cache->objects[cache->nobjs++] = obj; + } + return 0; +} + +static bool mmu_need_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + int min) +{ + return (cache->nobjs < min); +} + +static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) +{ + return cache->nobjs; +} + +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, + struct kmem_cache *cache) +{ + while (mc->nobjs) + kmem_cache_free(cache, mc->objects[--mc->nobjs]); +} + +static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, + int min) +{ + void *page; + + cache->kmem_cache = NULL; + + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { + page = (void *)__get_free_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + cache->objects[cache->nobjs++] = page; + } + return 0; +} + +static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) + free_page((unsigned long)mc->objects[--mc->nobjs]); +} + +static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) +{ + int r; + + r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + pte_list_desc_cache, KVM_NR_MIN_MEM_OBJS); + if (r) + goto out; + r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, + KVM_NR_MIN_MEM_OBJS); + if (r) + goto out; + r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, + mmu_page_header_cache, + KVM_NR_MIN_MEM_OBJS); +out: + return r; +} + +static bool mmu_need_topup_memory_caches(struct kvm_vcpu *vcpu) +{ + bool r = false; + + r = mmu_need_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + KVM_NR_MIN_MEM_OBJS); + if (r) + goto out; + r = mmu_need_topup_memory_cache(&vcpu->arch.mmu_page_cache, + KVM_NR_MIN_MEM_OBJS); + if (r) + goto out; + r = mmu_need_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, + KVM_NR_MIN_MEM_OBJS); +out: + return r; +} + +static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ + mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + pte_list_desc_cache); + mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); + mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, + mmu_page_header_cache); +} + +static inline void *mmu_memory_cache_alloc_obj( + struct kvm_mmu_memory_cache *mc, + gfp_t gfp_flags) +{ + if (mc->kmem_cache) + return kmem_cache_zalloc(mc->kmem_cache, gfp_flags); + else + return (void *)__get_free_page(gfp_flags); +} + +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + if (!mc->nobjs) + p = mmu_memory_cache_alloc_obj(mc, + GFP_ATOMIC | __GFP_ACCOUNT); + else + p = mc->objects[--mc->nobjs]; + + KVM_BUG_ON(!p); + + return p; +} + +static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) +{ + return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); +} + +static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) +{ + kmem_cache_free(pte_list_desc_cache, pte_list_desc); +} + +static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) +{ + if (!sp->role.direct) + return sp->gfns[index]; + + return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); +} + +static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) +{ + if (sp->role.direct) + KVM_BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); + else + sp->gfns[index] = gfn; +} + +/* + * Return the pointer to the large page information for a given gfn, + * handling slots that are not large page aligned. + */ +static struct kvm_lpage_info * +lpage_info_slot(struct kvm *kvm, gfn_t gfn, kvm_memory_slot_t *slot, int level) +{ + unsigned long idx; + + idx = kvm_gfn_to_index(kvm, gfn, slot->base_gfn, level); + return &slot->arch.lpage_info[level - 2][idx]; +} + +static void update_gfn_disallow_lpage_count(struct kvm *kvm, + kvm_memory_slot_t *slot, gfn_t gfn, int count) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + struct kvm_lpage_info *linfo; + int i; + + for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + if (!is_huge_pt_struct_level(spt, i)) + continue; + linfo = lpage_info_slot(kvm, gfn, slot, i); + linfo->disallow_lpage += count; + WARN_ON(linfo->disallow_lpage < 0); + } +} + +void kvm_mmu_gfn_disallow_lpage(struct kvm *kvm, + kvm_memory_slot_t *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(kvm, slot, gfn, 1); +} + +void kvm_mmu_gfn_allow_lpage(struct kvm *kvm, + kvm_memory_slot_t *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(kvm, slot, gfn, -1); +} + +static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + gfn_t gfn; + + kvm->arch.indirect_shadow_pages++; + gfn = sp->gfn; + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, gfn); + + /* + * Allow guest to write to the lowest levels of guest pt if + * CONFIG_PARAVIRT_TLB_FLUSH is enabled + */ + if ((sp->role.level > PT_PAGE_TABLE_LEVEL) || + !IS_ENABLED(CONFIG_KVM_PARAVIRT_TLB_FLUSH)) + kvm_slot_page_track_add_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + + kvm_mmu_gfn_disallow_lpage(kvm, slot, gfn); +} + +static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + gfn_t gfn; + + kvm->arch.indirect_shadow_pages--; + gfn = sp->gfn; + DebugFREE("SP %px level #%d gfn 0x%llx gva 0x%lx\n", + sp, sp->role.level, sp->gfn, sp->gva); + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, gfn); + if (kvm_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE)) + kvm_slot_page_track_remove_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + kvm_mmu_gfn_allow_lpage(kvm, slot, gfn); +} + +static bool __mmu_gfn_lpage_is_disallowed(struct kvm *kvm, + gfn_t gfn, int level, kvm_memory_slot_t *slot) +{ + struct kvm_lpage_info *linfo; + + if (slot) { + linfo = lpage_info_slot(kvm, gfn, slot, level); + return !!linfo->disallow_lpage; + } + + return true; +} + +static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn, + int level) +{ + struct kvm_memory_slot *slot; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + return __mmu_gfn_lpage_is_disallowed(vcpu->kvm, gfn, level, slot); +} + +static int host_mapping_level(struct kvm *kvm, gfn_t gfn) +{ + unsigned long page_size; + struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0); + int i, ret = 0; + + page_size = kvm_host_page_size(vcpu, gfn); + + for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + if (page_size >= KVM_MMU_HPAGE_SIZE(kvm, i)) + ret = i; + else + break; + } + + return ret; +} + +static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot, + bool no_dirty_log) +{ + if (!slot || slot->flags & KVM_MEMSLOT_INVALID) + return false; + if (no_dirty_log && slot->dirty_bitmap) + return false; + + return true; +} + +static struct kvm_memory_slot * +gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, + bool no_dirty_log) +{ + struct kvm_memory_slot *slot; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + if (!memslot_valid_for_gpte(slot, no_dirty_log)) + slot = NULL; + + return slot; +} + +static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, + bool *force_pt_level) +{ + int host_level, level, max_level; + const pt_level_t *pt_level; + kvm_memory_slot_t *slot; + + if (unlikely(*force_pt_level)) + return PT_PAGE_TABLE_LEVEL; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn); + *force_pt_level = !memslot_valid_for_gpte(slot, true); + if (unlikely(*force_pt_level)) + return PT_PAGE_TABLE_LEVEL; + + host_level = host_mapping_level(vcpu->kvm, large_gfn); + + if (host_level == PT_PAGE_TABLE_LEVEL) + return host_level; + + max_level = min(MAX_HUGE_PAGES_LEVEL, host_level); + + pt_level = &kvm_get_host_pt_struct(vcpu->kvm)->levels[ + PT_DIRECTORY_LEVEL]; + for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) { + if (!is_huge_pt_level(pt_level)) + break; + if (__mmu_gfn_lpage_is_disallowed(vcpu->kvm, large_gfn, + level, slot)) + break; + ++pt_level; + } + + return level - 1; +} + +/* + * About rmap_head encoding: + * + * If the bit zero of rmap_head->val is clear, then it points to the only spte + * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct + * pte_list_desc containing more mappings. + */ + +/* + * Returns the number of pointers in the rmap chain, not counting the new one. + */ +static int pte_list_add(struct kvm_vcpu *vcpu, pgprot_t *spte, + struct kvm_rmap_head *rmap_head) +{ + struct pte_list_desc *desc; + int i, count = 0; + + if (!rmap_head->val) { + rmap_printk("pte_list_add: %px %lx 0->1\n", + spte, pgprot_val(*spte)); + rmap_head->val = (unsigned long)spte; + } else if (!(rmap_head->val & 1)) { + rmap_printk("pte_list_add: %px %lx 1->many\n", + spte, pgprot_val(*spte)); + desc = mmu_alloc_pte_list_desc(vcpu); + desc->sptes[0] = (pgprot_t *)rmap_head->val; + desc->sptes[1] = spte; + rmap_head->val = (unsigned long)desc | 1; + ++count; + } else { + rmap_printk("pte_list_add: %px %lx many->many\n", + spte, pgprot_val(*spte)); + desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { + desc = desc->more; + count += PTE_LIST_EXT; + } + if (desc->sptes[PTE_LIST_EXT-1]) { + desc->more = mmu_alloc_pte_list_desc(vcpu); + desc = desc->more; + } + for (i = 0; desc->sptes[i]; ++i) + ++count; + desc->sptes[i] = spte; + } + return count; +} + +static void +pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, + struct pte_list_desc *desc, int i, + struct pte_list_desc *prev_desc) +{ + int j; + + for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) + ; + desc->sptes[i] = desc->sptes[j]; + desc->sptes[j] = NULL; + if (j != 0) + return; + if (!prev_desc && !desc->more) + rmap_head->val = (unsigned long)desc->sptes[0]; + else + if (prev_desc) + prev_desc->more = desc->more; + else + rmap_head->val = (unsigned long)desc->more | 1; + mmu_free_pte_list_desc(desc); +} + +static void pte_list_remove(pgprot_t *spte, struct kvm_rmap_head *rmap_head) +{ + struct pte_list_desc *desc; + struct pte_list_desc *prev_desc; + int i; + + if (!rmap_head->val) { + pr_err("%s(): %px 0x%lx 0->BUG\n", + __func__, spte, pgprot_val(*spte)); + BUG(); + } else if (!(rmap_head->val & 1)) { + rmap_printk("pte_list_remove: %px 1->0\n", spte); + DebugPTE("%px 1->0\n", spte); + if ((pgprot_t *)rmap_head->val != spte) { + pr_err("%s(): %px 0x%lx 1->BUG\n", + __func__, spte, pgprot_val(*spte)); + BUG(); + } + rmap_head->val = 0; + } else { + rmap_printk("pte_list_remove: %px many->many\n", spte); + desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + prev_desc = NULL; + DebugPTE("%px many->many, desc %px\n", spte, desc); + while (desc) { + for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { + if (desc->sptes[i] == spte) { + pte_list_desc_remove_entry(rmap_head, + desc, i, prev_desc); + DebugPTE("remove desc from list #%d, " + "prev %px\n", + i, prev_desc); + return; + } + } + prev_desc = desc; + desc = desc->more; + } + pr_err("pte_list_remove: %px many->many\n", spte); + BUG(); + } +} + +static struct kvm_rmap_head * +pt_level_gfn_to_rmap(gfn_t gfn, const pt_level_t *pt_level, kvm_memory_slot_t *slot) +{ + unsigned long idx; + int level = get_pt_level_id(pt_level); + + if (!(pt_level->is_pte || pt_level->is_huge)) { + return NULL; + } + idx = gfn_to_index(gfn, slot->base_gfn, pt_level); + return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; +} + +static struct kvm_rmap_head * +__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level, kvm_memory_slot_t *slot) +{ + unsigned long idx; + + idx = kvm_gfn_to_index(kvm, gfn, slot->base_gfn, level); + return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; +} + +static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, + struct kvm_mmu_page *sp) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, gfn); + return __gfn_to_rmap(kvm, gfn, sp->role.level, slot); +} + +static bool rmap_can_add(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_memory_cache *cache; + + cache = &vcpu->arch.mmu_pte_list_desc_cache; + return mmu_memory_cache_free_objects(cache); +} + +static int rmap_add(struct kvm_vcpu *vcpu, pgprot_t *spte, gfn_t gfn) +{ + struct kvm_mmu_page *sp; + struct kvm_rmap_head *rmap_head; + + sp = page_header(__pa(spte)); + kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); + rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); + return pte_list_add(vcpu, spte, rmap_head); +} + +static void rmap_remove(struct kvm *kvm, pgprot_t *spte) +{ + struct kvm_mmu_page *sp; + gfn_t gfn; + struct kvm_rmap_head *rmap_head; + + DebugPTE("started for spte %px == 0x%lx\n", + spte, pgprot_val(*spte)); + sp = page_header(__pa(spte)); + gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); + DebugPTE("SP gfn is 0x%llx\n", gfn); + rmap_head = gfn_to_rmap(kvm, gfn, sp); + DebugPTE("gfn rmap head at %px, val 0x%lx\n", + rmap_head, rmap_head->val); + pte_list_remove(spte, rmap_head); +} + +/* + * Used by the following functions to iterate through the sptes linked by a + * rmap. All fields are private and not assumed to be used outside. + */ +struct rmap_iterator { + /* private fields */ + struct pte_list_desc *desc; /* holds the sptep if not NULL */ + int pos; /* index of the sptep */ +}; + +/* + * Iteration must be started by this function. This should also be used after + * removing/dropping sptes from the rmap link because in such cases the + * information in the itererator may not be valid. + * + * Returns sptep if found, NULL otherwise. + */ +static pgprot_t *rmap_get_first(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct rmap_iterator *iter) +{ + pgprot_t *sptep; + + if (!rmap_head->val) + return NULL; + + if (!(rmap_head->val & 1)) { + iter->desc = NULL; + sptep = (pgprot_t *)rmap_head->val; + goto out; + } + + iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + iter->pos = 0; + sptep = iter->desc->sptes[iter->pos]; +out: + BUG_ON(!is_shadow_present_pte(kvm, *sptep)); + return sptep; +} + +/* + * Must be used with a valid iterator: e.g. after rmap_get_first(). + * + * Returns sptep if found, NULL otherwise. + */ +static pgprot_t *rmap_get_next(struct kvm *kvm, struct rmap_iterator *iter) +{ + pgprot_t *sptep; + + if (iter->desc) { + if (iter->pos < PTE_LIST_EXT - 1) { + ++iter->pos; + sptep = iter->desc->sptes[iter->pos]; + if (sptep) + goto out; + } + + iter->desc = iter->desc->more; + + if (iter->desc) { + iter->pos = 0; + /* desc->sptes[0] cannot be NULL */ + sptep = iter->desc->sptes[iter->pos]; + goto out; + } + } + + return NULL; +out: + BUG_ON(!is_shadow_present_pte(kvm, *sptep)); + return sptep; +} + +#define for_each_rmap_spte(_kvm_, _rmap_head_, _iter_, _spte_) \ + for (_spte_ = rmap_get_first(_kvm_, _rmap_head_, _iter_); \ + _spte_; _spte_ = rmap_get_next(_kvm_, _iter_)) + +static void drop_spte(struct kvm *kvm, pgprot_t *sptep) +{ + DebugPTE("started for spte %px == 0x%lx\n", + sptep, pgprot_val(*sptep)); + if (mmu_spte_clear_track_bits(kvm, sptep)) + rmap_remove(kvm, sptep); +} + + +static bool __drop_large_spte(struct kvm *kvm, pgprot_t *sptep) +{ + if (is_large_pte(*sptep)) { + WARN_ON(page_header(__pa(sptep))->role.level == + PT_PAGE_TABLE_LEVEL); + drop_spte(kvm, sptep); + --kvm->stat.lpages; + return true; + } + + return false; +} + +static void drop_large_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep) +{ + if (__drop_large_spte(vcpu->kvm, sptep)) + kvm_flush_remote_tlbs(vcpu->kvm); +} + +/* + * Write-protect on the specified @sptep, @pt_protect indicates whether + * spte write-protection is caused by protecting shadow page table. + * + * Note: write protection is difference between dirty logging and spte + * protection: + * - for dirty logging, the spte can be set to writable at anytime if + * its dirty bitmap is properly set. + * - for spte protection, the spte can be writable only after unsync-ing + * shadow page. + * + * Return true if tlb need be flushed. + */ +static bool spte_write_protect(struct kvm *kvm, + pgprot_t *sptep, bool pt_protect) +{ + pgprot_t spte = *sptep; + + if (!is_writable_pte(spte) && + !(pt_protect && spte_is_locklessly_modifiable(kvm, spte))) + return false; + + rmap_printk("rmap_write_protect: spte %px %lx\n", + sptep, pgprot_val(*sptep)); + + if (pt_protect) + spte = clear_spte_mmu_writable_mask(kvm, spte); + spte = clear_spte_writable_mask(kvm, spte); + + return mmu_spte_update(kvm, sptep, spte); +} + +static bool __rmap_write_protect(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + bool pt_protect) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + bool flush = false; + + for_each_rmap_spte(kvm, rmap_head, &iter, sptep) + flush |= spte_write_protect(kvm, sptep, pt_protect); + + return flush; +} + +static bool spte_clear_dirty(struct kvm *kvm, pgprot_t *sptep) +{ + pgprot_t spte = *sptep; + + rmap_printk("rmap_clear_dirty: spte %px %lx\n", + sptep, pgprot_val(*sptep)); + + spte = clear_spte_dirty_mask(kvm, spte); + + return mmu_spte_update(kvm, sptep, spte); +} + +static bool __rmap_clear_dirty(struct kvm *kvm, kvm_rmap_head_t *rmap_head) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + bool flush = false; + + for_each_rmap_spte(kvm, rmap_head, &iter, sptep) + flush |= spte_clear_dirty(kvm, sptep); + + return flush; +} + +static bool spte_set_dirty(struct kvm *kvm, pgprot_t *sptep) +{ + pgprot_t spte = *sptep; + + rmap_printk("rmap_set_dirty: spte %px %lx\n", + sptep, pgprot_val(*sptep)); + + spte = set_spte_dirty_mask(kvm, spte); + + return mmu_spte_update(kvm, sptep, spte); +} + +static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + bool flush = false; + + for_each_rmap_spte(kvm, rmap_head, &iter, sptep) + flush |= spte_set_dirty(kvm, sptep); + + return flush; +} + +/** + * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages + * @kvm: kvm instance + * @slot: slot to protect + * @gfn_offset: start of the BITS_PER_LONG pages we care about + * @mask: indicates which pages we should protect + * + * Used when we do not need to care about huge page mappings: e.g. during dirty + * logging we do not have any such mappings. + */ +static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + struct kvm_rmap_head *rmap_head; + + while (mask) { + rmap_head = __gfn_to_rmap(kvm, + slot->base_gfn + gfn_offset + __ffs(mask), + PT_PAGE_TABLE_LEVEL, slot); + __rmap_write_protect(kvm, rmap_head, false); + + /* clear the first set bit */ + mask &= mask - 1; + } +} + +/** + * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages + * @kvm: kvm instance + * @slot: slot to clear D-bit + * @gfn_offset: start of the BITS_PER_LONG pages we care about + * @mask: indicates which pages we should clear D-bit + * + * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. + */ +void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + struct kvm_rmap_head *rmap_head; + + while (mask) { + rmap_head = __gfn_to_rmap(kvm, + slot->base_gfn + gfn_offset + __ffs(mask), + PT_PAGE_TABLE_LEVEL, slot); + __rmap_clear_dirty(kvm, rmap_head); + + /* clear the first set bit */ + mask &= mask - 1; + } +} +EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); + +/** + * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected + * PT level pages. + * + * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to + * enable dirty logging for them. + * + * Used when we do not need to care about huge page mappings: e.g. during dirty + * logging we do not have any such mappings. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + /* FIXME: x86 has own enable_log_dirty_pt_masked() if PML mode */ + /* is supported + if (kvm_x86_ops->enable_log_dirty_pt_masked) + kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, + mask); + else + */ + kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); +} + +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, + struct kvm_memory_slot *slot, u64 gfn) +{ + const pt_struct_t *spt = kvm_get_host_pt_struct(kvm); + struct kvm_rmap_head *rmap_head; + int i; + bool write_protected = false; + + for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + if (!(is_huge_pt_struct_level(spt, i) || + is_page_pt_struct_level(spt, i))) + continue; + rmap_head = __gfn_to_rmap(kvm, gfn, i, slot); + write_protected |= __rmap_write_protect(kvm, rmap_head, true); + } + + return write_protected; +} + +static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) +{ + struct kvm_memory_slot *slot; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); +} + +static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + bool flush = false; + + while ((sptep = rmap_get_first(kvm, rmap_head, &iter))) { + rmap_printk("%s: spte %px %lx.\n", + __func__, sptep, pgprot_val(*sptep)); + + drop_spte(kvm, sptep); + flush = true; + } + + return flush; +} + +static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) +{ + return kvm_zap_rmapp(kvm, rmap_head); +} + +static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + int need_flush = 0; + pgprot_t new_spte; + pte_t *ptep = (pte_t *)data; + kvm_pfn_t new_pfn; + + WARN_ON(pte_huge(*ptep)); + new_pfn = pte_pfn(*ptep); + +restart: + for_each_rmap_spte(kvm, rmap_head, &iter, sptep) { + rmap_printk("kvm_set_pte_rmapp: spte %px %lx gfn %llx (%d)\n", + sptep, pgprot_val(*sptep), gfn, level); + + need_flush = 1; + + if (pte_write(*ptep)) { + drop_spte(kvm, sptep); + goto restart; + } else { + new_spte = set_spte_pfn(kvm, *sptep, new_pfn); + + new_spte = clear_spte_writable_mask(kvm, new_spte); + new_spte = clear_spte_host_writable_mask(kvm, new_spte); + new_spte = clear_spte_accessed_mask(kvm, new_spte); + + mmu_spte_clear_track_bits(kvm, sptep); + mmu_spte_set(kvm, sptep, new_spte); + } + } + + if (need_flush) + kvm_flush_remote_tlbs(kvm); + + return 0; +} + +typedef struct slot_rmap_walk_iterator { + /* input fields. */ + struct kvm_memory_slot *slot; + gfn_t start_gfn; + gfn_t end_gfn; + int start_level; + int end_level; + + /* output fields. */ + gfn_t gfn; + struct kvm_rmap_head *rmap; + int level; + + /* private field. */ + struct kvm_rmap_head *end_rmap; + const pt_struct_t *pt_struct; + const pt_level_t *pt_level; +} slot_rmap_walk_iterator_t; + +static void +rmap_walk_init_level(slot_rmap_walk_iterator_t *iterator, int level) +{ + iterator->level = level; + iterator->pt_level = &iterator->pt_struct->levels[level]; + iterator->gfn = iterator->start_gfn; + iterator->rmap = pt_level_gfn_to_rmap(iterator->gfn, + iterator->pt_level, iterator->slot); + iterator->end_rmap = pt_level_gfn_to_rmap(iterator->end_gfn, + iterator->pt_level, iterator->slot); +} + +static void +slot_rmap_walk_init(struct kvm *kvm, slot_rmap_walk_iterator_t *iterator, + kvm_memory_slot_t *slot, int start_level, + int end_level, gfn_t start_gfn, gfn_t end_gfn) +{ + iterator->slot = slot; + iterator->start_level = start_level; + iterator->pt_struct = kvm_get_host_pt_struct(kvm); + iterator->end_level = end_level; + iterator->start_gfn = start_gfn; + iterator->end_gfn = end_gfn; + + rmap_walk_init_level(iterator, iterator->start_level); +} + +static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) +{ + return !!iterator->rmap; +} + +static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) +{ + if (++iterator->rmap <= iterator->end_rmap) { + iterator->gfn += + (1UL << KVM_PT_LEVEL_HPAGE_SHIFT(iterator->pt_level)); + return; + } + + if (++iterator->level > iterator->end_level) { + iterator->rmap = NULL; + return; + } + + rmap_walk_init_level(iterator, iterator->level); +} + +#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ + _start_gfn, _end_gfn, _iter_, _kvm_) \ + for (slot_rmap_walk_init(_kvm_, _iter_, _slot_, \ + _start_level_, _end_level_, \ + _start_gfn, _end_gfn); \ + slot_rmap_walk_okay(_iter_); \ + slot_rmap_walk_next(_iter_)) + +static int kvm_handle_rmap_range(struct kvm *kvm, kvm_memory_slot_t *memslot, + unsigned long start, unsigned long end, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, + gfn_t gfn, int level, unsigned long data)) +{ + unsigned long hva_start, hva_end; + gfn_t gfn_start, gfn_end; + slot_rmap_walk_iterator_t iterator; + int ret = 0; + + hva_start = max(start, memslot->userspace_addr); + hva_end = min(end, memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + return false; + + /* + * {gfn(page) | page intersects with + * [hva_start, hva_end)} = + * {gfn_start, gfn_start+1, ..., gfn_end-1} + */ + gfn_start = hva_to_gfn_memslot(hva_start, memslot); + gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); + + trace_kvm_handle_rmap_range(hva_start, hva_end, gfn_to_gpa(gfn_start), + gfn_to_gpa(gfn_end), (void *)handler); + + for_each_slot_rmap_range(memslot, + PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, + gfn_start, gfn_end - 1, &iterator, kvm) { + ret |= handler(kvm, iterator.rmap, memslot, + iterator.gfn, iterator.level, data); + } + + return ret; +} + +static int kvm_handle_hva_range(struct kvm *kvm, + unsigned long start, + unsigned long end, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, + gfn_t gfn, + int level, + unsigned long data)) +{ + struct kvm_memslots *slots; + kvm_memory_slot_t *memslot; + int ret = 0; + int i; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + ret |= kvm_handle_rmap_range(kvm, memslot, + start, end, data, handler); + } + } + + return ret; +} + +static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, + gfn_t gfn, int level, + unsigned long data)) +{ + return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); +} + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); +} + +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned flags) +{ + return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); +} + +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); + return 0; +} + +static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) +{ + pgprot_t *sptep; + struct rmap_iterator uninitialized_var(iter); + int young = 0; + + BUG_ON(!get_spte_accessed_mask(kvm)); + + for_each_rmap_spte(kvm, rmap_head, &iter, sptep) { + if (is_spte_accessed_mask(kvm, *sptep)) { + young = 1; + clear_bit((ffs(get_spte_accessed_mask(kvm)) - 1), + (unsigned long *)sptep); + } + } + + trace_kvm_age_page(gfn, level, slot, young); + return young; +} + +static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, + int level, unsigned long data) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + int young = 0; + + /* + * If there's no access bit in the secondary pte set by the + * hardware it's up to gup-fast/gup to set the access bit in + * the primary pte or in the page structure. + */ + if (!get_spte_accessed_mask(kvm)) + goto out; + + for_each_rmap_spte(kvm, rmap_head, &iter, sptep) { + if (is_spte_accessed_mask(kvm, *sptep)) { + young = 1; + break; + } + } +out: + return young; +} + +#define RMAP_RECYCLE_THRESHOLD 1000 + +static void rmap_recycle(struct kvm_vcpu *vcpu, pgprot_t *spte, gfn_t gfn) +{ + struct kvm_rmap_head *rmap_head; + struct kvm_mmu_page *sp; + + sp = page_header(__pa(spte)); + + rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); + + kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); + kvm_flush_remote_tlbs(vcpu->kvm); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +{ + /* + * In case of absence of EPT Access and Dirty Bits supports, + * emulate the accessed bit for EPT, by checking if this page has + * an EPT mapping, and clearing it if it does. On the next access, + * a new EPT mapping will be established. + * This has some overhead, but not as much as the cost of swapping + * out actively used pages or breaking up actively used hugepages. + */ + if (!get_spte_accessed_mask(kvm)) { +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + /* + * We are holding the kvm->mmu_lock, and we are blowing up + * shadow PTEs. MMU notifier consumers need to be kept at bay. + * This is correct as long as we don't decouple the mmu_lock + * protected regions (like invalidate_range_start|end does). + */ + kvm->mmu_notifier_seq++; + return kvm_handle_hva_range(kvm, start, end, 0, + kvm_unmap_rmapp); +#else /* ! KVM_ARCH_WANT_MMU_NOTIFIER */ + kvm_pr_unimpl("%s(): absence of TDP Access and Dirty Bits " + "supports is not implemented case\n", + __func__); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + } + + return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); +} + +#ifdef MMU_DEBUG +static int is_empty_shadow_page(struct kvm *kvm, pgprot_t *spt) +{ + pgprot_t *pos; + pgprot_t *end; + + for (pos = spt, end = pos + PAGE_SIZE / sizeof(pgprot_t); + pos != end; pos++) + if (is_shadow_present_pte(kvm, *pos)) { + pr_err("%s: %px %lx\n", + __func__, pos, pgprot_val(*pos)); + return 0; + } + return 1; +} +#endif + +/* + * This value is the sum of all of the kvm instances's + * kvm->arch.n_used_mmu_pages values. We need a global, + * aggregate version in order to make the slab shrinker + * faster + */ +static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) +{ + kvm->arch.n_used_mmu_pages += nr; + percpu_counter_add(&kvm_total_used_mmu_pages, nr); +} + +void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + DebugZAP("SP at %px gva 0x%lx gfn 0x%llx spt at %px\n", + sp, sp->gva, sp->gfn, sp->spt); + KVM_WARN_ON(!is_empty_shadow_page(kvm, sp->spt)); + hlist_del(&sp->hash_link); + list_del(&sp->link); + if (!kvm->arch.is_hv) { + kvm_delete_sp_from_gmm_list(sp); + } + free_page((unsigned long)sp->spt); + if (!sp->role.direct) + free_page((unsigned long)sp->gfns); + kmem_cache_free(mmu_page_header_cache, sp); +} + +static unsigned kvm_page_table_hashfn(gfn_t gfn) +{ + return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); +} + +static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, pgprot_t *parent_pte) +{ + if (!parent_pte) + return; + + pte_list_add(vcpu, parent_pte, &sp->parent_ptes); +} + +static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, + pgprot_t *parent_pte) +{ + pte_list_remove(parent_pte, &sp->parent_ptes); +} + +static void drop_parent_pte(struct kvm_mmu_page *sp, + pgprot_t *parent_pte) +{ + struct kvm_mmu_page *parent_sp = page_header(__pa(parent_pte)); + + if (parent_sp == sp) { + /* it is one PGD entry for the VPTB self-map. */ + KVM_BUG_ON(sp->role.level != PT64_ROOT_LEVEL); + } else { + mmu_page_remove_parent_pte(sp, parent_pte); + } + mmu_spte_clear_no_track(parent_pte); +} + +static kvm_mmu_page_t *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) +{ + kvm_mmu_page_t *sp; + + sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); + sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); + if (!direct) + sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); + set_page_private(virt_to_page(sp->spt), (unsigned long)sp); + + /* + * The active_mmu_pages list is the FIFO list, do not move the + * page until it is zapped. kvm_zap_obsolete_pages depends on + * this feature. See the comments in kvm_zap_obsolete_pages(). + */ + list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); + kvm_mod_used_mmu_pages(vcpu->kvm, +1); + return sp; +} + +static void mark_unsync(struct kvm *kvm, pgprot_t *spte); +static void kvm_mmu_mark_parents_unsync(struct kvm *kvm, kvm_mmu_page_t *sp) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + + for_each_rmap_spte(kvm, &sp->parent_ptes, &iter, sptep) { + mark_unsync(kvm, sptep); + } +} + +static void mark_unsync(struct kvm *kvm, pgprot_t *spte) +{ + kvm_mmu_page_t *sp; + unsigned int index; + + sp = page_header(__pa(spte)); + index = spte - sp->spt; + if (__test_and_set_bit(index, sp->unsync_child_bitmap)) + return; + if (sp->unsync_children++) + return; + kvm_mmu_mark_parents_unsync(kvm, sp); +} + +static int nonpaging_sync_page(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp) +{ + return 0; +} + +static void nonpaging_sync_gva(struct kvm_vcpu *vcpu, gva_t gva) +{ +} + +static void nonpaging_sync_gva_range(struct kvm_vcpu *vcpu, + gva_t gva_start, + gva_t gva_end, + bool flush_tlb) +{ +} + +static void nonpaging_update_pte(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, pgprot_t *spte, + const void *pte) +{ + WARN_ON(1); +} + +#define KVM_PAGE_ARRAY_NR 16 + +struct kvm_mmu_pages { + struct mmu_page_and_offset { + struct kvm_mmu_page *sp; + unsigned int idx; + } page[KVM_PAGE_ARRAY_NR]; + unsigned int nr; +}; + +static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, + int idx) +{ + int i; + + if (sp->unsync || sp->released) { + for (i = 0; i < pvec->nr; i++) + if (pvec->page[i].sp == sp) { + DebugFREE("found same sp %px level #%d " + "gva 0x%lx at pvec #%d\n", + sp, sp->role.level, sp->gva, i); + return 0; + } + } + + pvec->page[pvec->nr].sp = sp; + pvec->page[pvec->nr].idx = idx; + DebugFREE("pvec[0x%02x] : new sp %px level #%d gva 0x%lx idx 0x%03lx\n", + pvec->nr, sp, sp->role.level, sp->gva, + idx * sizeof(pgprot_t)); + pvec->nr++; + return (pvec->nr == KVM_PAGE_ARRAY_NR); +} + +static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) +{ + --sp->unsync_children; + WARN_ON((int)sp->unsync_children < 0); + __clear_bit(idx, sp->unsync_child_bitmap); +} + +static int __mmu_unsync_walk(struct kvm *kvm, kvm_mmu_page_t *sp, + struct kvm_mmu_pages *pvec, int pte_level); +static int __mmu_release_walk(struct kvm *kvm, kvm_mmu_page_t *sp, + struct kvm_mmu_pages *pvec, int pte_level); + +static int __mmu_unsync_sp(struct kvm *kvm, kvm_mmu_page_t *sp, + struct kvm_mmu_pages *pvec, int i, bool unsync, int pt_level) +{ + int ret, nr_unsync_leaf = 0; + struct kvm_mmu_page *child; + pgprot_t ent = sp->spt[i]; + + if (!is_shadow_present_pte(kvm, ent)) { + if (unsync) + clear_unsync_child_bit(sp, i); + DebugPTE("pte not present, return 0\n"); + return 0; + } + + DebugFREE("found unsynced child entry index 0x%03lx : 0x%lx\n", + i * sizeof(pgprot_t), pgprot_val(ent)); + + BUG_ON(sp->role.level < pt_level); + + if (sp->role.level == pt_level) { + DebugFREE("sp %px level #%d, return 1\n", sp, sp->role.level); + return 1; + } + if (is_large_pte(ent)) { + DebugFREE("sp %px level #%d is huge page gfn 0x%llx\n", + sp, sp->role.level, sp->gfn); + return 1; + } + + child = page_header(kvm_spte_pfn_to_phys_addr(kvm, ent)); + child->released = sp->released; + + if (child->unsync_children || child->released) { + if (mmu_pages_add(pvec, child, i)) { + ret = -ENOSPC; + goto out_failed; + } + + if (child->released) { + ret = __mmu_release_walk(kvm, child, pvec, pt_level); + } else { + ret = __mmu_unsync_walk(kvm, child, pvec, pt_level); + } + if (!ret) { + if (unsync) + clear_unsync_child_bit(sp, i); + } else if (ret > 0) { + nr_unsync_leaf += ret; + } else { + goto out_failed; + } + } else if (child->unsync) { + nr_unsync_leaf++; + if (mmu_pages_add(pvec, child, i)) { + ret = -ENOSPC; + goto out_failed; + } + } else { + if (unsync) + clear_unsync_child_bit(sp, i); + } + + DebugFREE("return nr_unsync_leaf %d\n", nr_unsync_leaf); + return nr_unsync_leaf; + +out_failed: + DebugFREE("failed error %d\n", ret); + return ret; +} + +static int __mmu_unsync_walk(struct kvm *kvm, kvm_mmu_page_t *sp, + struct kvm_mmu_pages *pvec, int pte_level) +{ + int i, ret, nr_unsync_leaf = 0; + + DebugFREE("started to walk unsynced SP %px level #%d\n", + sp, sp->role.level); + for_each_set_bit(i, sp->unsync_child_bitmap, 512) { + ret = __mmu_unsync_sp(kvm, sp, pvec, i, true, pte_level); + if (!ret) { + continue; + } else if (ret > 0) { + nr_unsync_leaf += ret; + } else { + goto out_failed; + } + } + + DebugFREE("return nr_unsync_leaf %d\n", nr_unsync_leaf); + return nr_unsync_leaf; + +out_failed: + DebugFREE("failed error %d\n", ret); + return ret; +} + +static int __mmu_release_walk(struct kvm *kvm, kvm_mmu_page_t *sp, + struct kvm_mmu_pages *pvec, int pte_level) +{ + int i, ret, nr_unsync_leaf = 0; + + DebugFREE("started to walk released SP %px level #%d\n", + sp, sp->role.level); + for (i = 0; i < 512; i++) { + ret = __mmu_unsync_sp(kvm, sp, pvec, i, false, pte_level); + if (!ret) { + continue; + } else if (ret > 0) { + nr_unsync_leaf += ret; + } else { + goto out_failed; + } + } + if (nr_unsync_leaf == 0 && sp->role.level == pte_level) { + /* pgd/pud/pmd level PT is empty & can be released */ + DebugFREE("SP %px level #%d gfn 0x%llx gva 0x%lx is empty " + "to release\n", + sp, sp->role.level, sp->gfn, sp->gva); + nr_unsync_leaf++; + } + + DebugFREE("return nr_unsync_leaf %d\n", nr_unsync_leaf); + return nr_unsync_leaf; + +out_failed: + DebugFREE("failed error %d\n", ret); + return ret; +} + +#define INVALID_INDEX (-1) + +static int mmu_unsync_walk(struct kvm *kvm, kvm_mmu_page_t *sp, + struct kvm_mmu_pages *pvec, int pt_entries_level) +{ + int nr_unsync_leaf = 0; + + pvec->nr = 0; + DebugFREE("SP %px level #%d gfn 0x%llx gva 0x%lx\n", + sp, sp->role.level, sp->gfn, sp->gva); + if (!sp->unsync_children && !sp->released) { + DebugFREE("sp %px level #%d not released, return 0\n", + sp, sp->role.level); + return 0; + } + + mmu_pages_add(pvec, sp, INVALID_INDEX); + if (sp->released) { + nr_unsync_leaf = __mmu_release_walk(kvm, sp, pvec, + pt_entries_level); + } else { + nr_unsync_leaf = __mmu_unsync_walk(kvm, sp, pvec, + pt_entries_level); + } + + DebugFREE("return nr_unsync_leaf %d\n", nr_unsync_leaf); + return nr_unsync_leaf; +} + +static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + WARN_ON(!sp->unsync); + trace_kvm_mmu_sync_page(sp); + sp->unsync = 0; + --kvm->stat.mmu_unsync; +} + +static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, + struct list_head *invalid_list); +static void kvm_mmu_commit_zap_page(struct kvm *kvm, + struct list_head *invalid_list); + +/* + * NOTE: we should pay more attention on the zapped-obsolete page + * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk + * since it has been deleted from active_mmu_pages but still can be found + * at hast list. + * + * for_each_gfn_valid_sp() has skipped that kind of pages. + */ +#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ + hlist_for_each_entry(_sp, \ + &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ + if ((_sp)->gfn != (_gfn) || is_obsolete_sp((_kvm), (_sp)) \ + || (_sp)->role.invalid) {} else + +#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ + for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ + if ((_sp)->role.direct) {} else + +/* @sp->gfn should be write-protected at the call site */ +static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + struct list_head *invalid_list) +{ + if (sp->role.cr4_pae != !!is_pae(vcpu)) { + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); + return false; + } + + if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) { + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); + return false; + } + + return true; +} + +static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, + struct list_head *invalid_list, + bool remote_flush, bool local_flush) +{ + if (!list_empty(invalid_list)) { + kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list); + return; + } + + if (remote_flush) + kvm_flush_remote_tlbs(vcpu->kvm); + else if (local_flush) + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); +} + +#ifdef CONFIG_KVM_MMU_AUDIT +#include "mmu_audit.c" +#else +static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } +static void mmu_audit_disable(void) { } +#endif + +static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); +} + +static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + struct list_head *invalid_list) +{ + kvm_unlink_unsync_page(vcpu->kvm, sp); + return __kvm_sync_page(vcpu, sp, invalid_list); +} + +/* @gfn should be write-protected at the call site */ +static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *s; + bool ret = false; + + for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { + if (!s->unsync) + continue; + + WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); + ret |= kvm_sync_page(vcpu, s, invalid_list); + } + + return ret; +} + +struct mmu_page_path { + struct kvm_mmu_page *parent[PT64_ROOT_LEVEL]; + unsigned int idx[PT64_ROOT_LEVEL]; +}; + +#define for_each_sp(pvec, sp, parents, i) \ + for (i = mmu_pages_first(&pvec, &parents, \ + PT_PAGE_TABLE_LEVEL); \ + i < pvec.nr && ({ sp = pvec.page[i].sp; 1; }); \ + i = mmu_pages_next(&pvec, &parents, i, \ + PT_PAGE_TABLE_LEVEL)) + +#define for_each_sp_level(pvec, sp, parents, i, pt_level) \ + for (i = mmu_pages_first(&pvec, &parents, pt_level); \ + i < pvec.nr && ({ sp = pvec.page[i].sp; 1; }); \ + i = mmu_pages_next(&pvec, &parents, i, \ + pt_level)) + +static int mmu_pages_next(struct kvm_mmu_pages *pvec, + struct mmu_page_path *parents, + int i, int pt_level) +{ + int n; + + DebugFREE("started pvec num %d i %d\n", pvec->nr, i); + for (n = i+1; n < pvec->nr; n++) { + struct kvm_mmu_page *sp = pvec->page[n].sp; + unsigned idx = pvec->page[n].idx; + int level = sp->role.level; + + DebugFREE("pvec [0x%02x] : SP %px level #%d gfn 0x%llx " + "gva 0x%lx idx 0x%03lx\n", + n, sp, level, sp->gfn, sp->gva, + idx * sizeof(pgprot_t)); + BUG_ON(level < pt_level); + parents->idx[level-1] = idx; + DebugFREE("parents level #%d idx %px : 0x%03lx\n", + level - 1, &parents->idx[level-1], + idx * sizeof(pgprot_t)); + if (level == pt_level) + break; + + parents->parent[level-2] = sp; + DebugFREE("parents level #%d parent %px : sp %px\n", + level - 2, &parents->parent[level-2], sp); + } + + return n; +} + +static int mmu_pages_first(struct kvm_mmu_pages *pvec, + struct mmu_page_path *parents, int pt_level) +{ + struct kvm_mmu_page *sp; + int level; + + DebugFREE("started pvec num %d\n", pvec->nr); + if (pvec->nr == 0) + return 0; + + WARN_ON(pvec->page[0].idx != INVALID_INDEX); + + sp = pvec->page[0].sp; + level = sp->role.level; + WARN_ON(level <= PT_PAGE_TABLE_LEVEL); + DebugFREE("pvec [0x%02x] : SP %px level #%d gfn 0x%llx gva 0x%lx\n", + 0, sp, level, sp->gfn, sp->gva); + + parents->parent[level-2] = sp; + DebugFREE("parents level #%d parent %px : sp %px\n", + level - 2, &parents->parent[level-2], sp); + + /* Also set up a sentinel. Further entries in pvec are all + * children of sp, so this element is never overwritten. + */ + parents->parent[level-1] = NULL; + DebugFREE("parents level #%d parent %px : sp %px\n", + level - 1, &parents->parent[level-1], NULL); + return mmu_pages_next(pvec, parents, 0, pt_level); +} + +static void mmu_pages_clear_parents(struct mmu_page_path *parents, int pt_level) +{ + struct kvm_mmu_page *sp; + unsigned int level = pt_level - PT_PAGE_TABLE_LEVEL; + + do { + unsigned int idx = parents->idx[level]; + sp = parents->parent[level]; + if (!sp) + return; + + WARN_ON(idx == INVALID_INDEX); + if (!sp->released) { + clear_unsync_child_bit(sp, idx); + } + level++; + } while (!sp->unsync_children); +} + +static void mmu_sync_children(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *parent) +{ + int i, nr_unsync_leaf; + struct kvm_mmu_page *sp; + struct mmu_page_path parents; + struct kvm_mmu_pages pages; + LIST_HEAD(invalid_list); + bool flush = false; + + DebugUNSYNC("started on VCPU #%d for parent SP %px\n", + vcpu->vcpu_id, parent); + while (nr_unsync_leaf = mmu_unsync_walk(vcpu->kvm, parent, &pages, + PT_PAGE_TABLE_LEVEL), + nr_unsync_leaf) { + bool protected = false; + + DebugFREE("nr_unsync_leaf is not zero %d\n", nr_unsync_leaf); + for_each_sp(pages, sp, parents, i) + protected |= rmap_write_protect(vcpu, sp->gfn); + + if (protected) { + kvm_flush_remote_tlbs(vcpu->kvm); + flush = false; + } + + for_each_sp(pages, sp, parents, i) { + flush |= kvm_sync_page(vcpu, sp, &invalid_list); + mmu_pages_clear_parents(&parents, PT_PAGE_TABLE_LEVEL); + } + if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + cond_resched_lock(&vcpu->kvm->mmu_lock); + flush = false; + } + } + if (nr_unsync_leaf == 0) { + DebugFREE("nr_unsync_leaf is zero %d\n", nr_unsync_leaf); + } + + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); +} + +static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) +{ + atomic_set(&sp->write_flooding_count, 0); +} + +static void clear_sp_write_flooding_count(pgprot_t *spte) +{ + struct kvm_mmu_page *sp = page_header(__pa(spte)); + + __clear_sp_write_flooding_count(sp); +} + +static void clear_shadow_pt(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + bool validate) +{ + pgprot_t *spt; + pgprot_t init_pt; + int i; + + spt = sp->spt; + if (validate) { + pgprot_val(init_pt) = get_spte_valid_mask(vcpu->kvm); + } else { + pgprot_val(init_pt) = 0UL; + } + + for (i = 0; i < PT64_ENT_PER_PAGE; i++) { + spt[i] = init_pt; + } +} + +static void check_pt_validation(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + pgprot_t *spt; + pgprotval_t valid_pt; + int i; + + if (sp->role.direct) + return; + + if (sp->unsync) + return; + + spt = sp->spt; + valid_pt = get_spte_valid_mask(vcpu->kvm); + + for (i = 0; i < PT64_ENT_PER_PAGE; i++) { + KVM_BUG_ON(pgprot_val(spt[i]) != valid_pt); + } +} + +static inline bool +kvm_compare_mmu_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + gva_t gaddr, gfn_t gfn, bool is_direct) +{ + gva_t sp_gva, pt_gva; + unsigned index; + const pt_struct_t *gpt; + const pt_level_t *gpt_level; + int level; + + if (unlikely(!is_paging(vcpu))) + return true; + + level = sp->role.level; + KVM_BUG_ON(level < PT_PAGE_TABLE_LEVEL); + gpt = kvm_get_vcpu_pt_struct(vcpu); + gpt_level = &gpt->levels[level]; + index = (gaddr & ~PAGE_MASK) / sizeof(pgprotval_t); + sp_gva = sp->gva & get_pt_level_mask(gpt_level); + sp_gva = set_pt_level_addr_index(sp_gva, index, gpt_level); + pt_gva = gaddr & get_pt_level_mask(gpt_level); + pt_gva = set_pt_level_addr_index(pt_gva, index, gpt_level); + if (!is_direct && pt_gva >= GUEST_KERNEL_IMAGE_AREA_BASE && + pt_gva < GUEST_KERNEL_IMAGE_AREA_BASE + + vcpu->arch.guest_size) { + /* it is virtual address from guest kernel image, */ + /* convert it to equal "virtual" physical */ + pt_gva -= GUEST_KERNEL_IMAGE_AREA_BASE; + pt_gva += GUEST_PAGE_OFFSET + vcpu->arch.guest_phys_base; + } + if (sp_gva != pt_gva) { + DebugFLOOD("SP for GFN 0x%llx map other virt " + "addr 0x%lx then need 0x%lx\n", + gfn, sp_gva, pt_gva); + return false; + } + return true; +} + +static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, + gfn_t gfn, + gva_t gaddr, + unsigned level, + int direct, + unsigned access, + bool validate) +{ + union kvm_mmu_page_role role; + unsigned quadrant; + struct kvm_mmu_page *sp; + bool need_sync = false; + bool flush = false; + LIST_HEAD(invalid_list); + + role = vcpu->arch.mmu.base_role; + role.level = level; + role.direct = direct; + if (role.direct) + role.cr4_pae = 0; + role.access = access; + if (!vcpu->arch.mmu.direct_map && + vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { + quadrant = gaddr >> (PAGE_SHIFT + (PT64_LEVEL_BITS * level)); + quadrant &= (1 << ((PT32_LEVEL_BITS - PT64_LEVEL_BITS) * + level)) - 1; + role.quadrant = quadrant; + } + for_each_gfn_valid_sp(vcpu->kvm, sp, gfn) { + if (!need_sync && sp->unsync) + need_sync = true; + + if (unlikely(sp->role.word != role.word)) { + if (unlikely(is_paging(vcpu) && + !sp->role.direct && role.cr4_pae)) { + DebugFLOOD("SP for GFN 0x%llx map other role " + "PT level 0x%x then need 0x%x\n", + gfn, sp->role.word, role.word); + } + continue; + } + + if (unlikely(!kvm_compare_mmu_page(vcpu, sp, gaddr, gfn, + direct))) + continue; + + if (sp->unsync) { + /* The page is good, but __kvm_sync_page might still end + * up zapping it. If so, break in order to rebuild it. + */ + if (!__kvm_sync_page(vcpu, sp, &invalid_list)) + break; + + WARN_ON(!list_empty(&invalid_list)); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + if (sp->unsync_children) + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); + + KVM_BUG_ON(sp->released); + + __clear_sp_write_flooding_count(sp); + if (validate) { + check_pt_validation(vcpu, sp); + } + trace_kvm_mmu_get_page(sp, false); + return sp; + } + + ++vcpu->kvm->stat.mmu_cache_miss; + sp = kvm_mmu_alloc_page(vcpu, direct); + sp->gfn = gfn; + sp->gva = gaddr; + sp->role = role; + hlist_add_head(&sp->hash_link, + &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); + kvm_init_sp_gmm_entry(sp); + if (!direct) { + /* + * we should do write protection before syncing pages + * otherwise the content of the synced shadow page may + * be inconsistent with guest page table. + */ + account_shadowed(vcpu->kvm, sp); + if (level == PT_PAGE_TABLE_LEVEL && + rmap_write_protect(vcpu, gfn)) + kvm_flush_remote_tlbs(vcpu->kvm); + + if (level > PT_PAGE_TABLE_LEVEL && need_sync) + flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); + + if (level == PT64_ROOT_LEVEL) + kvm_unsync_page(vcpu, sp); + } + sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; + clear_shadow_pt(vcpu, sp, validate); + DebugSPF("allocated shadow page at %px, level %d, gfn 0x%llx, " + "gva 0x%lx\n", + sp, sp->role.level, gfn, gaddr); + trace_kvm_mmu_get_page(sp, true); + + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + return sp; +} + +void shadow_pt_walk_init(kvm_shadow_walk_iterator_t *iterator, + struct kvm_vcpu *vcpu, hpa_t spt_root, u64 addr) +{ + iterator->addr = addr; + iterator->shadow_addr = spt_root; + iterator->level = vcpu->arch.mmu.shadow_root_level; + iterator->pt_struct = kvm_get_host_pt_struct(vcpu->kvm); + iterator->pt_level = &iterator->pt_struct->levels[iterator->level]; + + if (iterator->level == PT64_ROOT_LEVEL && + vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && + !vcpu->arch.mmu.direct_map) { + --iterator->level; + --iterator->pt_level; + } + + if (iterator->level == PT32E_ROOT_LEVEL) { + iterator->shadow_addr + = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; + iterator->shadow_addr &= kvm_get_spte_pfn_mask(vcpu->kvm); + --iterator->level; + --iterator->pt_level; + if (!iterator->shadow_addr) { + iterator->level = 0; + iterator->pt_level = &iterator->pt_struct->levels[0]; + } + } +} +void shadow_walk_init(kvm_shadow_walk_iterator_t *iterator, + struct kvm_vcpu *vcpu, u64 addr) +{ + hpa_t spt_root = kvm_get_space_addr_root(vcpu, addr); + + shadow_pt_walk_init(iterator, vcpu, spt_root, addr); +} + +bool shadow_walk_okay(kvm_shadow_walk_iterator_t *iterator) +{ + if (iterator->level < PT_PAGE_TABLE_LEVEL) + return false; + + iterator->index = get_pt_level_addr_index(iterator->addr, + iterator->pt_level); + iterator->sptep = ((pgprot_t *)__va(iterator->shadow_addr)) + + iterator->index; + return true; +} + +void __shadow_walk_next(kvm_shadow_walk_iterator_t *iterator, pgprot_t spte) +{ + if (is_last_spte(spte, iterator->level)) { + iterator->level = 0; + iterator->pt_level = &iterator->pt_struct->levels[0]; + return; + } + + iterator->shadow_addr = kvm_pte_pfn_to_phys_addr(spte, + iterator->pt_struct); + --iterator->level; + --iterator->pt_level; +} + +void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) +{ + return __shadow_walk_next(iterator, *iterator->sptep); +} + +static void link_shadow_page(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + pgprot_t *sptep, struct kvm_mmu_page *sp) +{ + struct kvm *kvm = vcpu->kvm; + pgprot_t spte; + + pgprot_val(spte) = get_spte_pt_user_prot(kvm); + spte = set_spte_pfn(kvm, spte, __pa(sp->spt) >> PAGE_SHIFT); + + mmu_spte_set(vcpu->kvm, sptep, spte); + + mmu_page_add_parent_pte(vcpu, sp, sptep); + + if (unlikely(!vcpu->arch.is_hv)) { + kvm_try_add_sp_to_gmm_list(gmm, sp); + } + + if (sp->unsync_children || sp->unsync) + mark_unsync(kvm, sptep); +} + +static void validate_direct_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, + unsigned direct_access) +{ + if (is_shadow_present_pte(vcpu->kvm, *sptep) && !is_large_pte(*sptep)) { + struct kvm_mmu_page *child; + + /* + * For the direct sp, if the guest pte's dirty bit + * changed form clean to dirty, it will corrupt the + * sp's access: allow writable in the read-only sp, + * so we should update the spte at this point to get + * a new sp with the correct access. + */ + child = page_header(kvm_spte_pfn_to_phys_addr(vcpu->kvm, + *sptep)); + if (child->role.access == direct_access) + return; + + drop_parent_pte(child, sptep); + kvm_flush_remote_tlbs(vcpu->kvm); + } +} + +void copy_guest_kernel_root_range(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + struct kvm_mmu_page *sp, pgprot_t *src_root) +{ + int start, end, index; + pgprot_t *dst_root = sp->spt; + pgprot_t *sptep, spte; + struct kvm_mmu_page *child; + gmm_struct_t *init_gmm = pv_vcpu_get_init_gmm(vcpu); + + start = GUEST_KERNEL_PGD_PTRS_START; + end = GUEST_KERNEL_PGD_PTRS_END; + + for (index = start; index < end; index++) { + sptep = &dst_root[index]; + spte = src_root[index]; + if (!is_shadow_present_pte(vcpu->kvm, spte)) + continue; + child = page_header(kvm_spte_pfn_to_phys_addr(vcpu->kvm, spte)); + KVM_BUG_ON(child == NULL); + link_shadow_page(vcpu, init_gmm, sptep, child); + DebugCPSPT("copied %px = 0x%lx from %px = 0x%lx index 0x%lx\n", + sptep, pgprot_val(*sptep), &src_root[index], + pgprot_val(spte), index * sizeof(pgprot_t)); + } +} + +void mmu_zap_linked_children(struct kvm *kvm, struct kvm_mmu_page *parent) +{ + int start, end, index; + pgprot_t *root_spt = parent->spt; + pgprot_t *sptep, spte; + struct kvm_mmu_page *child; + struct kvm_rmap_head *parent_ptes; + + start = GUEST_KERNEL_PGD_PTRS_START; + end = GUEST_KERNEL_PGD_PTRS_END; + + if (root_spt == (pgprot_t *)kvm_mmu_get_init_gmm_root(kvm)) { + /* it is guest kernel root PT, so free unconditionally */ + return; + } + for (index = start; index < end; index++) { + sptep = &root_spt[index]; + spte = *sptep; + if (!is_shadow_present_pte(kvm, spte)) + continue; + + child = page_header(kvm_spte_pfn_to_phys_addr(kvm, spte)); + parent_ptes = &child->parent_ptes; + if (!parent_ptes->val) { + pr_err("%s(): index 0x%lx %px : nothing links\n", + __func__, index * sizeof(pgprot_t), sptep); + KVM_BUG_ON(true); + } else if (!(parent_ptes->val & 1)) { + DebugFRSPT("index 0x%lx %px : only one last link\n", + index * sizeof(pgprot_t), sptep); + } else { + DebugFRSPT("index 0x%lx %px : many links\n", + index * sizeof(pgprot_t), sptep); + drop_parent_pte(child, sptep); + } + } +} + +static struct kvm_mmu_page *mmu_page_zap_pte(struct kvm *kvm, + struct kvm_mmu_page *sp, pgprot_t *spte) +{ + pgprot_t pte; + struct kvm_mmu_page *child = NULL; + + pte = *spte; + DebugPTE("started spte %px == 0x%lx\n", spte, pgprot_val(pte)); + if (is_shadow_present_pte(kvm, pte) && !is_mmio_spte(kvm, pte)) { + DebugFREE("SP %px level #%d gfn 0x%llx gva 0x%lx idx 0x%llx\n", + sp, sp->role.level, sp->gfn, sp->gva, + (u64)spte & ~PAGE_MASK); + if (is_last_spte(pte, sp->role.level)) { + drop_spte(kvm, spte); + DebugPTE("spte at %px == 0x%lx dropped\n", + spte, pgprot_val(*spte)); + if (is_large_pte(pte)) + --kvm->stat.lpages; + return NULL; + } else { + child = page_header( + kvm_spte_pfn_to_phys_addr(kvm, pte)); + drop_parent_pte(child, spte); + child->released = sp->released; + DebugPTE("dropped spte of child SP at %px\n", child); + return child; + } + } + + if (is_mmio_spte(kvm, pte)) + mmu_spte_clear_no_track(spte); + + return child; +} + +static void kvm_mmu_page_unlink_children(struct kvm *kvm, + struct kvm_mmu_page *sp) +{ + unsigned i; + + DebugFREE("SP %px level #%d gfn 0x%llx gva 0x%lx\n", + sp, sp->role.level, sp->gfn, sp->gva); + for (i = 0; i < PT64_ENT_PER_PAGE; ++i) + mmu_page_zap_pte(kvm, sp, sp->spt + i); +} + +static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + + DebugFREE("sp %px level #%d gfn 0x%llx gva 0x%lx\n", + sp, sp->role.level, sp->gfn, sp->gva); + while ((sptep = rmap_get_first(kvm, &sp->parent_ptes, &iter))) { + DebugFREE("spte %px : 0x%lx\n", sptep, pgprot_val(*sptep)); + drop_parent_pte(sp, sptep); + } +} + +static int kvm_mmu_unlink_one_child(struct kvm *kvm, struct kvm_mmu_page *sp, + int spte_idx, struct list_head *invalid_list) +{ + int ret; + pgprot_t spte; + struct kvm_mmu_page *child_sp; + + spte = sp->spt[spte_idx]; + + if (!is_shadow_present_pte(kvm, spte)) + return 0; + + if (is_large_pte(spte)) + return 1; + + child_sp = page_header( + kvm_spte_pfn_to_phys_addr(kvm, spte)); + + /* Propagate released flag to lower levels of spt */ + child_sp->released = sp->released; + + if (sp->released) { + /* If released flag is set, then zap child */ + ret = kvm_mmu_prepare_zap_page(kvm, child_sp, invalid_list); + } else if (sp->unsync_children && + test_bit(spte_idx, sp->unsync_child_bitmap)) { + /* + * If relesed flag is not set, then zap child only + * if it is marked as unsynced. + */ + ret = kvm_mmu_prepare_zap_page(kvm, child_sp, + invalid_list); + /* Clear unsync flag for zapped child */ + clear_unsync_child_bit(sp, spte_idx); + } + + return ret; +} + +static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, + struct list_head *invalid_list) +{ + int ret = 0, spte_idx; + bool zap_this_sp; + + DebugFREE("started for SP %px level #%d gfn 0x%llx gva 0x%lx\n", + sp, sp->role.level, sp->gfn, sp->gva); + trace_kvm_mmu_prepare_zap_page(sp); + + ++kvm->stat.mmu_shadow_zapped; + + /* + * If this sp is pgd, zero mapping of guest kernel and host kernel + * ranges to prevent adding them to zap list. + */ + if (sp->root_flags.has_host_pgds || sp->root_flags.has_guest_pgds) { + /* clear host PGDs, which were added to support hypervisor */ + /* MMU PTs at guest <-> hypervisor mode */ + kvm_clear_shadow_root(kvm, sp); + sp->root_flags.has_host_pgds = 0; + sp->root_flags.has_guest_pgds = 0; + } + + if (sp->role.level == PT_PAGE_TABLE_LEVEL) { + /* + * If this sp is on the last level of shadow pt + * (sptes map physical pages), then check if we need to + * zap list and return back to upper level of pt. + */ + if (sp->released || sp->unsync) { + zap_this_sp = true; + ret = PT_ENTRIES_PER_PAGE; + } else { + zap_this_sp = false; + ret = 0; + } + } else { + /* Scan all children of this sp */ + for (spte_idx = 0; spte_idx < PT_ENTRIES_PER_PAGE; + spte_idx++) { + ret += kvm_mmu_unlink_one_child(kvm, sp, spte_idx, + invalid_list); + } + + /* + * If this sp has released flag or all child sp's + * are marked as unsync, than zap it. + */ + if (sp->unsync_children == PT_ENTRIES_PER_PAGE || + sp->released) + zap_this_sp = true; + else + zap_this_sp = false; + + } + + if (!zap_this_sp) + return ret; + + /* + * Zap all entries of this sp , unlink it from parent and + * from children's parnt_ptes lists. + */ + kvm_mmu_page_unlink_children(kvm, sp); + kvm_mmu_unlink_parents(kvm, sp); + + if (!sp->role.invalid && !sp->role.direct) + unaccount_shadowed(kvm, sp); + + if (sp->unsync) + kvm_unlink_unsync_page(kvm, sp); + + if (!sp->root_count) { + /* Count self */ + ret++; + list_move(&sp->link, invalid_list); + kvm_mod_used_mmu_pages(kvm, -1); + } else { + list_move(&sp->link, &kvm->arch.active_mmu_pages); + + /* + * The obsolete pages can not be used on any vcpus. + * See the comments in kvm_mmu_invalidate_zap_all_pages(). + */ + if (!sp->role.invalid && !is_obsolete_sp(kvm, sp)) + kvm_reload_remote_mmus(kvm); + } + sp->role.invalid = 1; + return ret; +} + +static void kvm_mmu_commit_zap_page(struct kvm *kvm, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp, *nsp; + + if (list_empty(invalid_list)) + return; + + /* + * We need to make sure everyone sees our modifications to + * the page tables and see changes to vcpu->mode here. The barrier + * in the kvm_flush_remote_tlbs() achieves this. This pairs + * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. + * + * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit + * guest mode and/or lockless shadow page table walks. + */ + kvm_flush_remote_tlbs(kvm); + + list_for_each_entry_safe(sp, nsp, invalid_list, link) { + WARN_ON(!sp->role.invalid || sp->root_count); + kvm_mmu_free_page(kvm, sp); + } +} + +static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp; + int zapped; + + if (list_empty(&kvm->arch.active_mmu_pages)) + return false; + + sp = list_last_entry(&kvm->arch.active_mmu_pages, + struct kvm_mmu_page, link); + zapped = kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + + return (zapped > 0) ? true : false; +} + +void kvm_get_spt_translation(struct kvm_vcpu *vcpu, e2k_addr_t address, + pgdval_t *pgd, pudval_t *pud, pmdval_t *pmd, pteval_t *pte, int *pt_level) +{ + kvm_shadow_trans_t st; + pgprot_t spte; + int level, level_off; + + KVM_BUG_ON(address >= NATIVE_TASK_SIZE); + + spin_lock(&vcpu->kvm->mmu_lock); + + level_off = e2k_walk_shadow_pts(vcpu, address, &st, E2K_INVALID_PAGE); + *pt_level = E2K_PGD_LEVEL_NUM + 1; + + for (level = E2K_PT_LEVELS_NUM; level > level_off; level--) { + + spte = st.pt_entries[level].spte; + if (level == E2K_PGD_LEVEL_NUM) { + *pgd = pgprot_val(spte); + if (likely(!pgd_huge(__pgd(*pgd)) && + !pgd_none(__pgd(*pgd)) && + !pgd_bad(__pgd(*pgd)))) { + continue; + } + *pt_level = E2K_PGD_LEVEL_NUM; + break; + } + + if (level == E2K_PUD_LEVEL_NUM) { + *pud = pgprot_val(spte); + if (likely(!pud_huge(__pud(*pud)) && + !pud_none(__pud(*pud)) && + !pud_bad(__pud(*pud)))) { + continue; + } + *pt_level = E2K_PUD_LEVEL_NUM; + break; + } + + if (level == E2K_PMD_LEVEL_NUM) { + *pmd = pgprot_val(spte); + if (likely(!pmd_huge(__pmd(*pmd)) && + !pmd_none(__pmd(*pmd)) && + !pmd_bad(__pmd(*pmd)))) { + continue; + } + *pt_level = E2K_PMD_LEVEL_NUM; + break; + } + + if (level == E2K_PTE_LEVEL_NUM) { + *pte = pgprot_val(spte); + *pt_level = E2K_PTE_LEVEL_NUM; + break; + } + } + spin_unlock(&vcpu->kvm->mmu_lock); +} + +int kvm_get_va_spt_translation(struct kvm_vcpu *vcpu, e2k_addr_t address, + mmu_spt_trans_t __user *user_trans_info) +{ + mmu_spt_trans_t trans_info; + int ret; + + kvm_get_spt_translation(vcpu, address, + &trans_info.pgd, &trans_info.pud, &trans_info.pmd, + &trans_info.pte, &trans_info.pt_levels); + + ret = kvm_vcpu_copy_to_guest(vcpu, user_trans_info, &trans_info, + sizeof(trans_info)); + if (unlikely(ret < 0)) { + pr_err("%s(): could not copy info to user, error %d\n", + __func__, ret); + return ret; + } + return 0; +} + +unsigned long kvm_get_gva_to_hva(struct kvm_vcpu *vcpu, gva_t gva) +{ + unsigned long hva; + + hva = kvm_vcpu_gva_to_hva(vcpu, gva, true, NULL); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): failed to convert GVA 0x%lx to HVA\n", + __func__, gva); + hva = 0; + } + return hva; +} + +/* + * Changing the number of mmu pages allocated to the vm + * Note: if goal_nr_mmu_pages is too small, you will get dead lock + */ +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) +{ + LIST_HEAD(invalid_list); + + spin_lock(&kvm->mmu_lock); + + if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { + /* Need to free some mmu pages to achieve the goal. */ + while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) + if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) + break; + + kvm_mmu_commit_zap_page(kvm, &invalid_list); + goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; + } + + kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; + + spin_unlock(&kvm->mmu_lock); +} + +int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) +{ + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + int r; + + pgprintk("%s: looking for gfn %llx\n", __func__, gfn); + r = 0; + spin_lock(&kvm->mmu_lock); + for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { + pgprintk("%s: gfn %llx role %x\n", __func__, gfn, + sp->role.word); + r = 1; + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + } + kvm_mmu_commit_zap_page(kvm, &invalid_list); + spin_unlock(&kvm->mmu_lock); + + return r; +} +EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); + +static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + trace_kvm_mmu_unsync_page(sp); + ++vcpu->kvm->stat.mmu_unsync; + sp->unsync = 1; + + kvm_mmu_mark_parents_unsync(vcpu->kvm, sp); +} + +static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, + bool can_unsync) +{ + struct kvm_mmu_page *sp; + struct kvm_memory_slot *slot; + + slot = __gfn_to_memslot(kvm_memslots_for_spte_role(vcpu->kvm, 0), + gfn); + + if (kvm_page_track_is_active(vcpu->kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE)) + return true; + + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { + if (!can_unsync) + return true; + + if (sp->unsync) + continue; + + WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); + kvm_unsync_page(vcpu, sp); + } + + return false; +} + +static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) +{ + if (pfn_valid(pfn)) + return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + + return true; +} + +static int set_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, + unsigned pte_access, int level, + gfn_t gfn, kvm_pfn_t pfn, bool speculative, + bool can_unsync, bool host_writable, + bool only_validate, u64 pte_cui) +{ + struct kvm *kvm = vcpu->kvm; + pgprot_t spte; + int ret = 0; + + DebugSPF("level #%d gfn 0x%llx pfn 0x%llx pte access 0x%x\n", + level, gfn, pfn, pte_access); + if (set_mmio_spte(vcpu, sptep, gfn, pfn, level, pte_access)) + return 0; + + /* + * For the EPT case, shadow_present_mask is 0 if hardware + * supports exec-only page table entries. In that case, + * ACC_USER_MASK and shadow_user_mask are used to represent + * read access. See FNAME(gpte_access) in paging_tmpl.h. + */ + if (!only_validate) { + pgprot_val(spte) = get_spte_present_valid_mask(kvm); + } else { + pgprot_val(spte) = get_spte_valid_mask(kvm); + goto set_pte; + } + DebugSPF("spte %px base value 0x%lx, speculative %d\n", + sptep, pgprot_val(spte), speculative); + if (!speculative) + spte = set_spte_accessed_mask(kvm, spte); + + if (pte_access & ACC_EXEC_MASK) + spte = set_spte_x_mask(kvm, spte); + else + spte = set_spte_nx_mask(kvm, spte); + + if (pte_access & ACC_USER_MASK) + spte = set_spte_user_mask(kvm, spte); + else + spte = set_spte_priv_mask(kvm, spte); + + if (level > PT_PAGE_TABLE_LEVEL) + spte = set_spte_huge_page_mask(kvm, spte); + + spte = set_spte_memory_type_mask(vcpu, spte, + gfn, kvm_is_mmio_pfn(pfn)); + + if (host_writable) + spte = set_spte_host_writable_mask(kvm, spte); + else + pte_access &= ~ACC_WRITE_MASK; + DebugSPF("spte %px current value 0x%lx, host_writable %d\n", + sptep, pgprot_val(spte), host_writable); + + spte = set_spte_cui(spte, pte_cui); + + spte = set_spte_pfn(kvm, spte, pfn); + + if (pte_access & ACC_WRITE_MASK) { + + /* + * Other vcpu creates new sp in the window between + * mapping_level() and acquiring mmu-lock. We can + * allow guest to retry the access, the mapping can + * be fixed if guest refault. + */ + if (level > PT_PAGE_TABLE_LEVEL && + mmu_gfn_lpage_is_disallowed(vcpu, gfn, level)) + goto done; + + spte = set_spte_writable_mask(kvm, spte); + spte = set_spte_mmu_writable_mask(kvm, spte); + + /* + * Optimization: for pte sync, if spte was writable the hash + * lookup is unnecessary (and expensive). Write protection + * is responsibility of mmu_get_page / kvm_sync_page. + * Same reasoning can be applied to dirty page accounting. + */ + if (!can_unsync && is_writable_pte(*sptep)) + goto set_pte; + + if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { + pgprintk("%s: found shadow page for %llx, marking ro\n", + __func__, gfn); + ret = PFRES_WRITE_TRACK; + pte_access &= ~ACC_WRITE_MASK; + spte = clear_spte_writable_mask(kvm, spte); + spte = clear_spte_mmu_writable_mask(kvm, spte); + } + } + + if (pte_access & ACC_WRITE_MASK) { + kvm_vcpu_mark_page_dirty(vcpu, gfn); + spte = set_spte_dirty_mask(kvm, spte); + } + DebugSPF("spte %px final value 0x%lx, can_unsync %d\n", + sptep, pgprot_val(spte), can_unsync); + +set_pte: + if (mmu_spte_update(kvm, sptep, spte)) + kvm_flush_remote_tlbs(vcpu->kvm); +done: + DebugSPF("spte %px == 0x%lx\n", sptep, pgprot_val(spte)); + return ret; +} + +static pf_res_t mmu_set_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, + unsigned pte_access, int write_fault, + int level, gfn_t gfn, kvm_pfn_t pfn, + bool speculative, bool host_writable, + bool only_validate, u64 pte_cui) +{ + int was_rmapped = 0; + int rmap_count; + pf_res_t emulate = PFRES_NO_ERR; + + pgprintk("%s: spte %lx write_fault %d gfn %llx\n", + __func__, pgprot_val(*sptep), write_fault, gfn); + + if (only_validate) + pfn = KVM_PFN_NULL; + if (is_shadow_present_pte(vcpu->kvm, *sptep)) { + pgprintk("updating spte %px == 0x%lx, new pfn 0x%llx spec %d " + "wr %d only validate %d\n", + sptep, pgprot_val(*sptep), + pfn, speculative, host_writable, only_validate); + /* + * If we overwrite a PTE page pointer with a 2MB PMD, unlink + * the parent of the now unreachable PTE. + */ + if (level > PT_PAGE_TABLE_LEVEL && + !is_large_pte(*sptep)) { + struct kvm_mmu_page *child; + pgprot_t pte = *sptep; + + pgprintk("hfn old is not large, new on level #%d\n", + level); + child = page_header(kvm_spte_pfn_to_phys_addr(vcpu->kvm, + pte)); + drop_parent_pte(child, sptep); + kvm_flush_remote_tlbs(vcpu->kvm); + } else if (pfn == KVM_PFN_NULL) { + KVM_BUG_ON(true); + } else if (pfn != spte_to_pfn(vcpu->kvm, *sptep)) { + pgprintk("hfn old %llx new %llx\n", + spte_to_pfn(vcpu->kvm, *sptep), pfn); + drop_spte(vcpu->kvm, sptep); + kvm_flush_remote_tlbs(vcpu->kvm); + } else { + was_rmapped = 1; + } + } + + if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, + true, host_writable, only_validate, pte_cui)) { + if (write_fault) + emulate = PFRES_WRITE_TRACK; + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + + if (unlikely(is_mmio_spte(vcpu->kvm, *sptep) && + !is_mmio_prefixed_gfn(vcpu, gfn))) + emulate = PFRES_TRY_MMIO; + + pgprintk("%s: setting spte %lx\n", __func__, pgprot_val(*sptep)); + if (!only_validate && !is_mmio_space_pfn(pfn)) + pgprintk("instantiating %s PTE (%s) at %llx (%lx) addr %px\n", + (is_large_pte(*sptep)) ? "2MB" : "4kB", + (pgprot_val(*sptep) & PT_PRESENT_MASK) ? "RW" : "R", + gfn, pgprot_val(*sptep), sptep); + else + pgprintk("instantiating only valid PTE at %llx (%lx) addr %px\n", + gfn, pgprot_val(*sptep), sptep); + if (!was_rmapped && is_large_pte(*sptep) && !only_validate) + ++vcpu->kvm->stat.lpages; + + if (is_shadow_present_pte(vcpu->kvm, *sptep) && + !is_mmio_prefixed_gfn(vcpu, gfn)) { + if (!was_rmapped) { + rmap_count = rmap_add(vcpu, sptep, gfn); + if (rmap_count > RMAP_RECYCLE_THRESHOLD) + rmap_recycle(vcpu, sptep, gfn); + } + } + + if (!only_validate && pfn != KVM_PFN_NULL) + kvm_release_pfn_clean(pfn); + + return emulate; +} + +static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, + struct kvm_memory_slot **slot, bool no_dirty_log) +{ + *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + if (!*slot) { + if (gfn_is_from_mmio_space(vcpu, gfn)) { + return KVM_PFN_MMIO_FAULT; + } + } + + return gfn_to_pfn_memslot_atomic(*slot, gfn); +} + +static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, + pgprot_t *start, pgprot_t *end) +{ + struct page *pages[PTE_PREFETCH_NUM]; + struct kvm_memory_slot *slot; + unsigned access = sp->role.access; + int i, ret; + gfn_t gfn; + + gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); + slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); + if (!slot) + return -1; + + ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); + if (ret <= 0) + return -1; + + for (i = 0; i < ret; i++, gfn++, start++) + mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, + page_to_pfn(pages[i]), true, true, false, 0); + + return 0; +} + +static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, pgprot_t *sptep) +{ + pgprot_t *spte, *start = NULL; + int i; + + WARN_ON(!sp->role.direct); + + i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); + spte = sp->spt + i; + + for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { + if (is_shadow_present_pte(vcpu->kvm, *spte) || spte == sptep) { + if (!start) + continue; + if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) + break; + start = NULL; + } else if (!start) { + start = spte; + } + } +} + +static void direct_pte_prefetch(struct kvm_vcpu *vcpu, pgprot_t *sptep) +{ + struct kvm_mmu_page *sp; + + /* + * Since it's no accessed bit on EPT, it's no way to + * distinguish between actually accessed translations + * and prefetched, so disable pte prefetch if EPT is + * enabled. + */ + if (!get_spte_accessed_mask(vcpu->kvm)) + return; + + sp = page_header(__pa(sptep)); + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return; + + __direct_pte_prefetch(vcpu, sp, sptep); +} + +static pf_res_t __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, + int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault) +{ + kvm_shadow_walk_iterator_t iterator; + kvm_mmu_page_t *sp; + pf_res_t emulate = PFRES_NO_ERR; + gfn_t pseudo_gfn; + gmm_struct_t *init_gmm = NULL; + + DebugNONP("started for level %d gfn 0x%llx pfn 0x%llx\n", + level, gfn, pfn); + + if (!VALID_PAGE(kvm_get_gp_phys_root(vcpu))) + return 0; + + if (unlikely(!vcpu->arch.is_hv)) { + init_gmm = pv_vcpu_get_init_gmm(vcpu); + } + + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { + DebugNONP("iterator level %d spte %px == 0x%lx\n", + iterator.level, iterator.sptep, + pgprot_val(*iterator.sptep)); + if (iterator.level == level) { + emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, + write, level, gfn, pfn, prefault, + map_writable, false, 0); + DebugNONP("set spte %px == 0x%lx\n", + iterator.sptep, pgprot_val(*iterator.sptep)); + if (emulate == PFRES_TRY_MMIO) + break; + direct_pte_prefetch(vcpu, iterator.sptep); + ++vcpu->stat.pf_fixed; + break; + } + + drop_large_spte(vcpu, iterator.sptep); + if (!is_shadow_present_pte(vcpu->kvm, *iterator.sptep)) { + u64 base_addr = iterator.addr; + + base_addr &= get_pt_level_mask(iterator.pt_level); + pseudo_gfn = base_addr >> PAGE_SHIFT; + sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, + iterator.level - 1, 1, ACC_ALL, + false /* validate */); + + link_shadow_page(vcpu, init_gmm, iterator.sptep, sp); + DebugNONP("allocated PTD to nonpaging level %d, pseudo " + "gfn 0x%llx SPTE %px == 0x%lx\n", + iterator.level - 1, pseudo_gfn, + iterator.sptep, pgprot_val(*iterator.sptep)); + } + } + return emulate; +} + +pgprot_t nonpaging_gpa_to_pte(struct kvm_vcpu *vcpu, gva_t addr) +{ + kvm_shadow_walk_iterator_t iterator; + pgprot_t spte = {0ull}; + gpa_t gpa; + + DebugNONP("started for GVA 0x%lx\n", addr); + + gpa = nonpaging_gva_to_gpa(vcpu, addr, ACC_ALL, NULL); + + if (!kvm_is_visible_gfn(vcpu->kvm, gpa_to_gfn(gpa))) { + pr_err("%s(): address 0x%llx is not guest valid physical " + "address\n", + __func__, gpa); + return __pgprot(0); + } + + if (!VALID_PAGE(kvm_get_gp_phys_root(vcpu))) { + pr_err("%s(): nonpaging root PT is not yet allocated\n", + __func__); + return __pgprot(0); + } + + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte) { + DebugNONP("iteratot level %d SPTE %px == 0x%lx\n", + iterator.level, iterator.sptep, pgprot_val(spte)); + if (!is_shadow_present_pte(vcpu->kvm, spte)) + break; + } + walk_shadow_page_lockless_end(vcpu); + + return spte; +} + +static void kvm_send_hwpoison_signal(unsigned long address, + struct task_struct *task) +{ + kernel_siginfo_t info; + + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_MCEERR_AR; + info.si_addr = (void __user *)address; + info.si_addr_lsb = PAGE_SHIFT; + + send_sig_info(SIGBUS, &info, task); +} + +static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) +{ + /* + * Do not cache the mmio info caused by writing the readonly gfn + * into the spte otherwise read access on readonly gfn also can + * caused mmio page fault and treat it as mmio access. + * Return 1 to tell kvm to emulate it. + */ + if (pfn == KVM_PFN_ERR_RO_FAULT) + return 1; + + if (pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), + current); + return 0; + } + + if (pfn == KVM_PFN_MMIO_FAULT) + return 1; + + return -EFAULT; +} + +static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, + gfn_t *gfnp, kvm_pfn_t *pfnp, + int *levelp) +{ + kvm_pfn_t pfn = *pfnp; + gfn_t gfn = *gfnp; + int level = *levelp; + + /* + * Check if it's a transparent hugepage. If this would be an + * hugetlbfs page, level wouldn't be set to + * PT_PAGE_TABLE_LEVEL and there would be no adjustment done + * here. + */ + if (!is_error_noslot_pfn(pfn) && + !kvm_is_reserved_pfn(pfn) && + level == PT_PAGE_TABLE_LEVEL && + PageTransCompoundMap(pfn_to_page(pfn)) && + !mmu_gfn_lpage_is_disallowed(vcpu, gfn, + PT_DIRECTORY_LEVEL)) { + unsigned long mask; + /* + * mmu_notifier_retry was successful and we hold the + * mmu_lock here, so the pmd can't become splitting + * from under us, and in turn + * __split_huge_page_refcount() can't run from under + * us and we can safely transfer the refcount from + * PG_tail to PG_head as we switch the pfn to tail to + * head. + */ + *levelp = level = PT_DIRECTORY_LEVEL; + mask = KVM_MMU_PAGES_PER_HPAGE(vcpu->kvm, level) - 1; + VM_BUG_ON((gfn & mask) != (pfn & mask)); + if (pfn & mask) { + gfn &= ~mask; + *gfnp = gfn; + kvm_release_pfn_clean(pfn); + pfn &= ~mask; + kvm_get_pfn(pfn); + *pfnp = pfn; + } + } +} + +static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + kvm_pfn_t pfn, unsigned access, + pf_res_t *ret_val) +{ + int ret; + + if (is_mmio_space_pfn(pfn)) { + /* gfn is from MMIO space, but is not registered on host */ + *ret_val = PFRES_TRY_MMIO; + return false; + } + + /* The pfn is invalid, report the error! */ + if (unlikely(is_error_pfn(pfn))) { + ret = kvm_handle_bad_page(vcpu, gfn, pfn); + if (ret) { + *ret_val = PFRES_ERR; + } else { + *ret_val = PFRES_NO_ERR; + } + return true; + } + + if (unlikely(is_noslot_pfn(pfn))) { + vcpu_cache_mmio_info(vcpu, gva, gfn, access); + *ret_val = PFRES_TRY_MMIO; + } else { + *ret_val = PFRES_NO_ERR; + } + + return false; +} + +static bool page_fault_can_be_fast(u32 error_code) +{ + /* + * Do not fix the mmio spte with invalid generation number which + * need to be updated by slow page fault path. + */ + if (unlikely(error_code & PFERR_RSVD_MASK)) + return false; + + /* + * #PF can be fast only if the shadow page table is present and it + * is caused by write-protect, that means we just need change the + * W bit of the spte which can be done out of mmu-lock. + */ + return (error_code & PFERR_PRESENT_MASK) && (error_code & PFERR_WRITE_MASK); +} + +static bool +fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + pgprot_t *sptep, pgprot_t spte) +{ + gfn_t gfn; + + WARN_ON(!sp->role.direct); + + /* + * The gfn of direct spte is stable since it is calculated + * by sp->gfn. + */ + gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); + + /* + * Theoretically we could also set dirty bit (and flush TLB) here in + * order to eliminate unnecessary PML logging. See comments in + * set_spte. But fast_page_fault is very unlikely to happen with PML + * enabled, so we do not do this. This might result in the same GPA + * to be logged in PML buffer again when the write really happens, and + * eventually to be called by mark_page_dirty twice. But it's also no + * harm. This also avoids the TLB flush needed after setting dirty bit + * so non-PML cases won't be impacted. + * + * Compare with set_spte where instead shadow_dirty_mask is set. + */ + if (cmpxchg64((pgprotval_t *)sptep, pgprot_val(spte), + pgprot_val(set_spte_writable_mask(vcpu->kvm, spte))) == + pgprot_val(spte)) + kvm_vcpu_mark_page_dirty(vcpu, gfn); + + return true; +} + +/* + * Return value: + * - true: let the vcpu to access on the same address again. + * - false: let the real page fault path to fix it. + */ +static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, + u32 error_code) +{ + struct kvm_shadow_walk_iterator iterator; + struct kvm_mmu_page *sp; + bool ret = false; + pgprot_t spte = {0ull}; + + DebugNONP("VCPU #%d started for GVA 0x%lx level %d\n", + vcpu->vcpu_id, gva, level); + + if (!VALID_PAGE(kvm_get_space_addr_root(vcpu, gva))) + return false; + + if (!page_fault_can_be_fast(error_code)) + return false; + + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) { + DebugNONP("iteratot level %d SPTE %px == 0x%lx\n", + iterator.level, iterator.sptep, pgprot_val(spte)); + if (!is_shadow_present_pte(vcpu->kvm, spte) || + iterator.level < level) + break; + } + + /* + * If the mapping has been changed, let the vcpu fault on the + * same address again. + */ + if (!is_shadow_present_pte(vcpu->kvm, spte)) { + ret = true; + DebugNONP("the mapping has been changed, SPTE 0x%lx\n", + pgprot_val(spte)); + goto exit; + } + + sp = page_header(__pa(iterator.sptep)); + if (!is_last_spte(spte, sp->role.level)) { + DebugNONP("SPTE 0x%lx is not last\n", + pgprot_val(spte)); + goto exit; + } + + /* + * Check if it is a spurious fault caused by TLB lazily flushed. + * + * Need not check the access of upper level table entries since + * they are always ACC_ALL. + */ + if (is_writable_pte(spte)) { + ret = true; + DebugNONP("SPTE 0x%lx is writable - spurious fault\n", + pgprot_val(spte)); + goto exit; + } + + /* + * Currently, to simplify the code, only the spte write-protected + * by dirty-log can be fast fixed. + */ + if (!spte_is_locklessly_modifiable(vcpu->kvm, spte)) { + DebugNONP("SPTE 0x%lx is not locklessly modifiable\n", + pgprot_val(spte)); + goto exit; + } + + /* + * Do not fix write-permission on the large spte since we only dirty + * the first page into the dirty-bitmap in fast_pf_fix_direct_spte() + * that means other pages are missed if its slot is dirty-logged. + * + * Instead, we let the slow page fault path create a normal spte to + * fix the access. + * + * See the comments in kvm_arch_commit_memory_region(). + */ + if (sp->role.level > PT_PAGE_TABLE_LEVEL) { + DebugNONP("SP level %d > PT_PAGE_TABLE_LEVEL\n", + sp->role.level); + goto exit; + } + + /* + * Currently, fast page fault only works for direct mapping since + * the gfn is not stable for indirect shadow page. + * See Documentation/virtual/kvm/locking.txt to get more detail. + */ + ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); + DebugNONP("fast fix direct SPTE %px == 0x%lx, ret %d\n", + iterator.sptep, pgprot_val(spte), ret); +exit: + trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, + spte, ret); + walk_shadow_page_lockless_end(vcpu); + + return ret; +} + +static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, + gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable); +static void make_mmu_pages_available(struct kvm_vcpu *vcpu); + +static pf_res_t nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, + gfn_t gfn, bool prefault) +{ + pf_res_t r; + int level; + bool force_pt_level = false; + kvm_pfn_t pfn; + bool map_writable, write = error_code & PFERR_WRITE_MASK; +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + intc_mu_state_t *mu_state = get_intc_mu_state(vcpu); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + DebugNONP("VCPU #%d started for GVA 0x%lx gfn 0x%llx\n", + vcpu->vcpu_id, v, gfn); + level = mapping_level(vcpu, gfn, &force_pt_level); + if (likely(!force_pt_level)) { + /* + * This path builds a PAE pagetable - so we can map + * 2mb pages at maximum. Therefore check if the level + * is larger than that. + */ + if (is_ss(vcpu) && level > PT_DIRECTORY_LEVEL) + level = PT_DIRECTORY_LEVEL; + + gfn &= ~(KVM_MMU_PAGES_PER_HPAGE(vcpu->kvm, level) - 1); + } + DebugNONP("mapping level %d force %d gfn 0x%llx\n", + level, force_pt_level, gfn); + + if (fast_page_fault(vcpu, v, level, error_code)) + return 0; + + DebugNONP("there is slow page fault case\n"); + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + mu_state->notifier_seq = vcpu->kvm->mmu_notifier_seq; + smp_rmb(); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + pfn = mmio_prefixed_gfn_to_pfn(vcpu->kvm, gfn); + + if (unlikely(pfn)) { + map_writable = true; + } else { + if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, + &map_writable)) + return PFRES_NO_ERR; + DebugNONP("try_async_pf() returned pfn 0x%llx\n", pfn); + } + + if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) { + return r; + } + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (r == PFRES_TRY_MMIO) { + mu_state->may_be_retried = false; + } +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + spin_lock(&vcpu->kvm->mmu_lock); +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (!mu_state->ignore_notifier && r != PFRES_TRY_MMIO && + mmu_notifier_retry(vcpu->kvm, mu_state->notifier_seq)) + goto out_unlock; +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + make_mmu_pages_available(vcpu); + if (likely(!force_pt_level)) + transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); + r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); + spin_unlock(&vcpu->kvm->mmu_lock); + + DebugNONP("returns %d\n", r); + return r; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + KVM_BUG_ON(!mu_state->may_be_retried); + return PFRES_RETRY; +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ +} + +/*static*/ e2k_addr_t get_vcpu_secondary_pptb(struct kvm_vcpu *vcpu) +{ + pr_err("FIXME: %s() is not implemented\n", __func__); + return get_vcpu_u2_pptb(vcpu); +} + +/*static*/ void set_vcpu_secondary_pptb(struct kvm_vcpu *vcpu, e2k_addr_t base) +{ + pr_err("FIXME: %s() is not implemented\n", __func__); + set_vcpu_u2_pptb(vcpu, base); +} + +/*static*/ e2k_addr_t get_vcpu_secondary_mpt_b(struct kvm_vcpu *vcpu) +{ + pr_err("FIXME: %s() is not implemented\n", __func__); + return get_vcpu_mpt_b(vcpu); +} + +/*static*/ void set_vcpu_secondary_mpt_b(struct kvm_vcpu *vcpu, e2k_addr_t base) +{ + pr_err("FIXME: %s() is not implemented\n", __func__); + set_vcpu_mpt_b(vcpu, base); +} + +/* + * Need support: + * - hardware virtualization; + * - paravirtualization; + * - non paging (nonp); + * - two dimensional paging (tdp); + * - shadow paging for + * hardware virtualizatio (spt_hv); + * paravirtualization (spt_pv); + * + * MMU structure of VCPU contains software copies of hardware registers + * to support first of all paravirtualization and nonpaging mode. + * Nonpaging with hardware virtulization can be based on hardware registers, + * but use software model for compatibility with paravirtualization. Besides + * registers should not be used by hardware until paging on guest is off. + * + * TDP mode can be only with hardware virtualization extensions and should + * use hardware registers and software part of guest context + * + * Shadow paging use software model or hardware registers depending on the type + * of virtualization - para or full. + * Besides shadow paging should replace some registers to enable guest + * addresses translations, so need have both shadow and source guest values. + */ + +static void set_vcpu_nonp_u_pptb(struct kvm_vcpu *vcpu, pgprotval_t base) +{ + vcpu->arch.mmu.u_pptb = base; +} +static void set_vcpu_nonp_sh_u_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + KVM_BUG_ON(is_phys_paging(vcpu)); + vcpu->arch.mmu.sh_u_root_hpa = root; +} +static void set_vcpu_nonp_u_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + vcpu->arch.mmu.u_vptb = base; +} +static void set_vcpu_nonp_sh_u_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + KVM_BUG_ON(is_phys_paging(vcpu)); + vcpu->arch.mmu.sh_u_vptb = base; +} +static void set_vcpu_nonp_os_pptb(struct kvm_vcpu *vcpu, pgprotval_t base) +{ + vcpu->arch.mmu.os_pptb = base; +} +static void set_vcpu_nonp_sh_os_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + KVM_BUG_ON(is_phys_paging(vcpu)); + vcpu->arch.mmu.sh_os_root_hpa = root; +} +static void set_vcpu_nonp_os_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + vcpu->arch.mmu.os_vptb = base; +} +static void set_vcpu_nonp_sh_os_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + KVM_BUG_ON(is_phys_paging(vcpu)); + vcpu->arch.mmu.sh_os_vptb = base; +} +static void set_vcpu_nonp_os_vab(struct kvm_vcpu *vcpu, gva_t os_virt_base) +{ + KVM_BUG_ON(!is_sep_virt_spaces(vcpu)); + vcpu->arch.mmu.sh_os_vab = os_virt_base; +} +static void set_vcpu_nonp_gp_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + KVM_BUG_ON(vcpu->arch.is_hv && !kvm_is_phys_pt_enable(vcpu->kvm)); + vcpu->arch.mmu.gp_root_hpa = root; +} +static void set_vcpu_nonp_pt_context(struct kvm_vcpu *vcpu, unsigned flags) +{ + if (likely((flags & GP_ROOT_PT_FLAG) && is_phys_paging(vcpu))) { + KVM_BUG_ON(!VALID_PAGE(vcpu->arch.mmu.gp_root_hpa)); + write_GP_PPTB_reg(vcpu->arch.mmu.gp_root_hpa); + } else if (is_shadow_paging(vcpu)) { + if ((flags & U_ROOT_PT_FLAG) || + ((flags & OS_ROOT_PT_FLAG) && + !is_sep_virt_spaces(vcpu))) { + KVM_BUG_ON(!VALID_PAGE(vcpu->arch.mmu.sh_u_root_hpa)); + vcpu->arch.sw_ctxt.sh_u_pptb = + vcpu->arch.mmu.sh_u_root_hpa; + vcpu->arch.sw_ctxt.sh_u_vptb = + vcpu->arch.mmu.sh_u_vptb; + } + if ((flags & OS_ROOT_PT_FLAG) && is_sep_virt_spaces(vcpu)) { + KVM_BUG_ON(!VALID_PAGE(vcpu->arch.mmu.sh_os_root_hpa)); + write_SH_OS_PPTB_reg(vcpu->arch.mmu.sh_os_root_hpa); + write_SH_OS_VPTB_reg(vcpu->arch.mmu.sh_os_vptb); + write_SH_OS_VAB_reg(vcpu->arch.mmu.sh_os_vab); + } + } else { + KVM_BUG_ON(true); + } +} +static void init_vcpu_nonp_ptb(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.u_pptb = 0; + vcpu->arch.mmu.sh_u_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.u_vptb = 0; + vcpu->arch.mmu.sh_u_vptb = 0; + vcpu->arch.mmu.os_pptb = 0; + vcpu->arch.mmu.sh_os_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.os_vptb = 0; + vcpu->arch.mmu.sh_os_vptb = 0; + vcpu->arch.mmu.sh_os_vab = 0; + vcpu->arch.mmu.gp_root_hpa = E2K_INVALID_PAGE; +} + +static pgprotval_t get_vcpu_nonp_u_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.u_pptb; +} +static hpa_t get_vcpu_nonp_sh_u_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sh_u_root_hpa; +} +static gva_t get_vcpu_nonp_u_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.u_vptb; +} +static gva_t get_vcpu_nonp_sh_u_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sh_u_vptb; +} +static pgprotval_t get_vcpu_nonp_os_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.os_pptb; +} +static hpa_t get_vcpu_nonp_sh_os_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sh_os_root_hpa; +} +static gva_t get_vcpu_nonp_os_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.os_vptb; +} +static gva_t get_vcpu_nonp_sh_os_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sh_os_vptb; +} +static gva_t get_vcpu_nonp_os_vab(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sh_os_vab; +} +static hpa_t get_vcpu_nonp_gp_pptb(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(vcpu->arch.is_hv && !kvm_is_phys_pt_enable(vcpu->kvm)); + return vcpu->arch.mmu.gp_root_hpa; +} + +static pgprotval_t get_vcpu_context_nonp_u_pptb(struct kvm_vcpu *vcpu) +{ + return (pgprotval_t)vcpu->arch.sw_ctxt.sh_u_pptb; +} +static gva_t get_vcpu_context_nonp_u_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.sw_ctxt.sh_u_vptb; +} +static pgprotval_t get_vcpu_context_nonp_os_pptb(struct kvm_vcpu *vcpu) +{ + return (pgprotval_t)read_SH_OS_PPTB_reg(); +} +static gva_t get_vcpu_context_nonp_os_vptb(struct kvm_vcpu *vcpu) +{ + return read_SH_OS_VPTB_reg(); +} +static gva_t get_vcpu_context_nonp_os_vab(struct kvm_vcpu *vcpu) +{ + return read_SH_OS_VAB_reg(); +} +static hpa_t get_vcpu_context_nonp_gp_pptb(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(vcpu->arch.is_hv && !kvm_is_phys_pt_enable(vcpu->kvm)); + return read_GP_PPTB_reg(); +} + +static void set_vcpu_tdp_u_pptb(struct kvm_vcpu *vcpu, pgprotval_t base) +{ + /* Guest can and must set, host should not change it */ + KVM_BUG_ON(!vcpu->arch.is_pv); + vcpu->arch.mmu.u_pptb = base; +} +static void set_vcpu_tdp_sh_u_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + /* shadow PTs are not used */ + KVM_BUG_ON(true); +} +static void set_vcpu_tdp_u_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + /* Guest can and must set, host should not change it */ + KVM_BUG_ON(!vcpu->arch.is_pv); + vcpu->arch.mmu.u_vptb = base; +} +static void set_vcpu_tdp_sh_u_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + /* shadow PTs are not used, so same as guest native PTs */ + KVM_BUG_ON(true); +} +static void set_vcpu_tdp_os_pptb(struct kvm_vcpu *vcpu, pgprotval_t base) +{ + /* Guest can and must set, host should not change it */ + KVM_BUG_ON(!vcpu->arch.is_pv); + vcpu->arch.mmu.os_pptb = base; +} +static void set_vcpu_tdp_sh_os_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + /* shadow PTs are not used, so same as guest native PTs */ + KVM_BUG_ON(true); +} +static void set_vcpu_tdp_os_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + /* Guest can and must set, host should not change it */ + KVM_BUG_ON(!vcpu->arch.is_pv); + vcpu->arch.mmu.os_vptb = base; +} +static void set_vcpu_tdp_sh_os_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + /* shadow PTs are not used, so same as guest native PTs */ + KVM_BUG_ON(true); +} +static void set_vcpu_tdp_os_vab(struct kvm_vcpu *vcpu, gva_t os_virt_base) +{ + /* Guest can and must set, host should not change it */ + KVM_BUG_ON(!vcpu->arch.is_pv); + vcpu->arch.mmu.sh_os_vab = os_virt_base; +} +static void set_vcpu_tdp_gp_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + /* initial PT (from nonpaging mode) is continuing to be used */ + KVM_BUG_ON(VALID_PAGE(root)); + vcpu->arch.mmu.gp_root_hpa = root; +} +static void set_vcpu_tdp_pt_context(struct kvm_vcpu *vcpu, unsigned flags) +{ + KVM_BUG_ON(!is_phys_paging(vcpu)); + if ((flags & GP_ROOT_PT_FLAG) && likely(is_phys_paging(vcpu))) { + if (VALID_PAGE(vcpu->arch.mmu.gp_root_hpa)) { + /* GP_* tables should not changed from nonpaging mode */ + KVM_BUG_ON(read_GP_PPTB_reg() != + vcpu->arch.mmu.gp_root_hpa); + } else { + /* invalidate GP_* tables register state */ + write_GP_PPTB_reg(vcpu->arch.mmu.gp_root_hpa); + } + } + if (vcpu->arch.is_pv) { + /* paravirtualized guest can pass own PTs through hcalls */ + if ((flags & U_ROOT_PT_FLAG) || + ((flags & OS_ROOT_PT_FLAG) && + !is_sep_virt_spaces(vcpu))) { + vcpu->arch.sw_ctxt.sh_u_pptb = vcpu->arch.mmu.u_pptb; + vcpu->arch.sw_ctxt.sh_u_vptb = vcpu->arch.mmu.u_vptb; + } + if ((flags & OS_ROOT_PT_FLAG) && is_sep_virt_spaces(vcpu)) { + write_SH_OS_PPTB_reg(vcpu->arch.mmu.os_pptb); + write_SH_OS_VPTB_reg(vcpu->arch.mmu.os_vptb); + write_SH_OS_VAB_reg(vcpu->arch.mmu.sh_os_vab); + } + } +} +static void init_vcpu_tdp_ptb(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.u_pptb = 0; + vcpu->arch.mmu.sh_u_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.u_vptb = 0; + vcpu->arch.mmu.sh_u_vptb = 0; + vcpu->arch.mmu.os_pptb = 0; + vcpu->arch.mmu.sh_os_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.os_vptb = 0; + vcpu->arch.mmu.sh_os_vptb = 0; + vcpu->arch.mmu.sh_os_vab = 0; + /* GP_* tables should not changed from nonpaging mode + vcpu->arch.mmu.gp_root_hpa = E2K_INVALID_PAGE; + */ +} + +static pgprotval_t get_vcpu_tdp_u_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.u_pptb; +} +static hpa_t get_vcpu_tdp_sh_u_pptb(struct kvm_vcpu *vcpu) +{ + /* shadow PT does not be used */ + KVM_BUG_ON(true); + return (hpa_t)-EINVAL; +} +static gva_t get_vcpu_tdp_u_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.u_vptb; +} +static gva_t get_vcpu_tdp_sh_u_vptb(struct kvm_vcpu *vcpu) +{ + /* shadow PT does not be used */ + KVM_BUG_ON(true); + return (gva_t)-EINVAL; +} +static pgprotval_t get_vcpu_tdp_os_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.os_pptb; +} +static hpa_t get_vcpu_tdp_sh_os_pptb(struct kvm_vcpu *vcpu) +{ + /* shadow PT does not be used */ + KVM_BUG_ON(true); + return (hpa_t)-EINVAL; +} +static gva_t get_vcpu_tdp_os_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.os_vptb; +} +static gva_t get_vcpu_tdp_sh_os_vptb(struct kvm_vcpu *vcpu) +{ + /* shadow PT does not be used */ + KVM_BUG_ON(true); + return (gva_t)-EINVAL; +} +static gva_t get_vcpu_tdp_os_vab(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sh_os_vab; +} +static hpa_t get_vcpu_tdp_gp_pptb(struct kvm_vcpu *vcpu) +{ + /* current guest root should be on hardware hypervisor register */ + return vcpu->arch.mmu.gp_root_hpa; +} + +static pgprotval_t get_vcpu_context_tdp_u_pptb(struct kvm_vcpu *vcpu) +{ + return (pgprotval_t)vcpu->arch.sw_ctxt.sh_u_pptb; +} +static gva_t get_vcpu_context_tdp_u_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.sw_ctxt.sh_u_vptb; +} +static pgprotval_t get_vcpu_context_tdp_os_pptb(struct kvm_vcpu *vcpu) +{ + return (pgprotval_t)read_SH_OS_PPTB_reg(); +} +static gva_t get_vcpu_context_tdp_os_vptb(struct kvm_vcpu *vcpu) +{ + return read_SH_OS_VPTB_reg(); +} +static gva_t get_vcpu_context_tdp_os_vab(struct kvm_vcpu *vcpu) +{ + return read_SH_OS_VAB_reg(); +} +static hpa_t get_vcpu_context_tdp_gp_pptb(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(!is_phys_paging(vcpu)); + if (!VALID_PAGE(get_vcpu_tdp_gp_pptb(vcpu))) { + return E2K_INVALID_PAGE; + } + return read_GP_PPTB_reg(); +} + +static void set_vcpu_spt_u_pptb(struct kvm_vcpu *vcpu, pgprotval_t base) +{ + vcpu->arch.mmu.u_pptb = base; +} +static void set_vcpu_spt_sh_u_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + /* hypervisor replaces the guest value with its own */ + vcpu->arch.mmu.sh_u_root_hpa = root; +} +static void set_vcpu_spt_u_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + vcpu->arch.mmu.u_vptb = base; +} +static void set_vcpu_spt_sh_u_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + /* hypervisor replaces the guest value with its own */ + vcpu->arch.mmu.sh_u_vptb = base; +} +static void set_vcpu_spt_os_pptb(struct kvm_vcpu *vcpu, pgprotval_t base) +{ + vcpu->arch.mmu.os_pptb = base; +} +static void set_vcpu_spt_sh_os_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + /* hypervisor replaces the guest value with its own */ + vcpu->arch.mmu.sh_os_root_hpa = root; +} +static void set_vcpu_spt_os_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + vcpu->arch.mmu.os_vptb = base; +} +static void set_vcpu_spt_sh_os_vptb(struct kvm_vcpu *vcpu, gva_t base) +{ + /* hypervisor replaces the guest value with its own */ + vcpu->arch.mmu.sh_os_vptb = base; +} +static void set_vcpu_spt_os_vab(struct kvm_vcpu *vcpu, gva_t base) +{ + vcpu->arch.mmu.sh_os_vab = base; +} +static void set_vcpu_spt_gp_pptb(struct kvm_vcpu *vcpu, hpa_t root) +{ + /* initial PT (from nonpaging mode) is continuing to be used */ + KVM_BUG_ON(VALID_PAGE(root)); + vcpu->arch.mmu.gp_root_hpa = root; +} +static void set_vcpu_spt_pt_context(struct kvm_vcpu *vcpu, unsigned flags) +{ + if ((flags & U_ROOT_PT_FLAG) || + ((flags & OS_ROOT_PT_FLAG) && + !is_sep_virt_spaces(vcpu))) { + KVM_BUG_ON(!VALID_PAGE(vcpu->arch.mmu.sh_u_root_hpa)); + vcpu->arch.sw_ctxt.sh_u_pptb = vcpu->arch.mmu.sh_u_root_hpa; + vcpu->arch.sw_ctxt.sh_u_vptb = vcpu->arch.mmu.sh_u_vptb; + } + if ((flags & OS_ROOT_PT_FLAG) && is_sep_virt_spaces(vcpu)) { + KVM_BUG_ON(!VALID_PAGE(vcpu->arch.mmu.sh_os_root_hpa)); + write_SH_OS_PPTB_reg(vcpu->arch.mmu.sh_os_root_hpa); + write_SH_OS_VPTB_reg(vcpu->arch.mmu.sh_os_vptb); + write_SH_OS_VAB_reg(vcpu->arch.mmu.sh_os_vab); + } + if ((flags & GP_ROOT_PT_FLAG) && likely(is_phys_paging(vcpu))) { + /* GP_* tables should not changed from nonpaging mode */ + KVM_BUG_ON(read_GP_PPTB_reg() != vcpu->arch.mmu.gp_root_hpa); + } +} +static void init_vcpu_spt_ptb(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.u_pptb = 0; + vcpu->arch.mmu.sh_u_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.u_vptb = 0; + vcpu->arch.mmu.sh_u_vptb = 0; + vcpu->arch.mmu.os_pptb = 0; + vcpu->arch.mmu.sh_os_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.os_vptb = 0; + vcpu->arch.mmu.sh_os_vptb = 0; + vcpu->arch.mmu.sh_os_vab = 0; + /* GP_* tables should not changed from nonpaging mode + vcpu->arch.mmu.gp_root_hpa = E2K_INVALID_PAGE; + */ +} + +static pgprotval_t get_vcpu_spt_u_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.u_pptb; +} +static hpa_t get_vcpu_spt_sh_u_pptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return vcpu->arch.mmu.sh_u_root_hpa; +} +static gva_t get_vcpu_spt_u_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.u_vptb; +} +static gva_t get_vcpu_spt_sh_u_vptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return vcpu->arch.mmu.sh_u_vptb; +} +static pgprotval_t get_vcpu_spt_os_pptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.os_pptb; +} +static hpa_t get_vcpu_spt_sh_os_pptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return vcpu->arch.mmu.sh_os_root_hpa; +} +static gva_t get_vcpu_spt_os_vptb(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.os_vptb; +} +static gva_t get_vcpu_spt_sh_os_vptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return vcpu->arch.mmu.sh_os_vptb; +} +static gva_t get_vcpu_spt_os_vab(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sh_os_vab; +} +static hpa_t get_vcpu_spt_gp_pptb(struct kvm_vcpu *vcpu) +{ + if (is_phys_paging(vcpu)) { + return read_GP_PPTB_reg(); + } else { + return vcpu->arch.mmu.gp_root_hpa; + } +} + +static pgprotval_t get_vcpu_context_spt_u_pptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return (pgprotval_t)vcpu->arch.sw_ctxt.sh_u_pptb; +} +static gva_t get_vcpu_context_spt_u_vptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return vcpu->arch.sw_ctxt.sh_u_vptb; +} +static pgprotval_t get_vcpu_context_spt_os_pptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return (pgprotval_t)read_SH_OS_PPTB_reg(); +} +static gva_t get_vcpu_context_spt_os_vptb(struct kvm_vcpu *vcpu) +{ + /* hypervisor replaces the guest value with its own */ + return read_SH_OS_VPTB_reg(); +} +static gva_t get_vcpu_context_spt_os_vab(struct kvm_vcpu *vcpu) +{ + return read_SH_OS_VAB_reg(); +} +static hpa_t get_vcpu_context_spt_gp_pptb(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(!is_phys_paging(vcpu) && vcpu->arch.is_hv); + return read_GP_PPTB_reg(); +} + +void mmu_get_spt_roots(struct kvm_vcpu *vcpu, unsigned flags, + hpa_t *os_root_p, hpa_t *u_root_p, hpa_t *gp_root_p) +{ + hpa_t os_root, u_root, gp_root; + + if (flags & OS_ROOT_PT_FLAG) { + if (is_sep_virt_spaces(vcpu)) { + os_root = kvm_get_space_type_spt_os_root(vcpu); + } else { + os_root = kvm_get_space_type_spt_u_root(vcpu); + } + if (flags & U_ROOT_PT_FLAG) { + if (!is_sep_virt_spaces(vcpu)) { + /* common OS & USER root */ + u_root = os_root; + } else { + u_root = kvm_get_space_type_spt_u_root(vcpu); + } + } else { + u_root = E2K_INVALID_PAGE; + } + } else if (!(flags & U_ROOT_PT_FLAG)) { + os_root = E2K_INVALID_PAGE; + u_root = E2K_INVALID_PAGE; + } else { + os_root = E2K_INVALID_PAGE; + u_root = kvm_get_space_type_spt_u_root(vcpu); + } + if (flags & GP_ROOT_PT_FLAG) { + gp_root = kvm_get_gp_phys_root(vcpu); + } else { + gp_root = E2K_INVALID_PAGE; + } + if (os_root_p != NULL) + *os_root_p = os_root; + if (u_root_p != NULL) + *u_root_p = u_root; + if (gp_root_p != NULL) + *gp_root_p = gp_root; +} + +void mmu_check_invalid_roots(struct kvm_vcpu *vcpu, bool invalid, + unsigned flags) +{ + if (is_tdp_paging(vcpu) || !is_paging(vcpu) && is_phys_paging(vcpu)) { + hpa_t gp_root; + + gp_root = kvm_get_gp_phys_root(vcpu); + if (invalid) { + WARN_ON(VALID_PAGE(gp_root)); + } else { + WARN_ON(!VALID_PAGE(gp_root)); + } + } + if (is_shadow_paging(vcpu)) { + hpa_t os_root, u_root; + + mmu_get_spt_roots(vcpu, flags, &os_root, &u_root, NULL); + if (invalid) { + WARN_ON(VALID_PAGE(os_root)); + WARN_ON(VALID_PAGE(u_root)); + } else { + if (flags & U_ROOT_PT_FLAG) { + WARN_ON(!VALID_PAGE(u_root)); + } + if (flags & OS_ROOT_PT_FLAG) { + WARN_ON(!VALID_PAGE(os_root)); + } + } + } +} + +static void do_free_spt_root(struct kvm_vcpu *vcpu, hpa_t root_hpa, bool force) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + + DebugFREE("started to free root hpa 0x%llx\n", root_hpa); + if (!VALID_PAGE(root_hpa)) { + MMU_WARN_ON(true); + return; + } + + spin_lock(&kvm->mmu_lock); + sp = page_header(root_hpa); + if (!force) { + KVM_BUG_ON(sp->root_count <= 0); + } else { + /* FIXME: root counter should be zero to release sp. */ + /* It need implement strict mechanism get()/put() */ + /* to account the current users of the structure */ + --sp->root_count; + KVM_BUG_ON(sp->root_count != 0); + sp->released = true; + } + DebugFREE("freed root 0x%llx, SP at %px, count %d (invalid %d), " + "gfn 0x%llx\n", + root_hpa, sp, sp->root_count, sp->role.invalid, sp->gfn); + if (!sp->root_count && sp->role.invalid || force) { + int zapped = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + DebugFREE("zapped %d pages\n", zapped); + kvm_mmu_commit_zap_page(kvm, &invalid_list); + } + spin_unlock(&kvm->mmu_lock); +} + +void mmu_free_spt_root(struct kvm_vcpu *vcpu, hpa_t root_hpa) +{ + do_free_spt_root(vcpu, root_hpa, false); +} + +void mmu_release_spt_root(struct kvm_vcpu *vcpu, hpa_t root_hpa) +{ + do_free_spt_root(vcpu, root_hpa, true); +} + +static void e2k_mmu_free_roots(struct kvm_vcpu *vcpu, unsigned flags) +{ + hpa_t gp_root, os_root, u_root; + + KVM_BUG_ON(!(vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && + (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || + vcpu->arch.mmu.direct_map))); + + if (vcpu->arch.mmu.direct_map) { + if (is_phys_paging(vcpu) || flags & GP_ROOT_PT_FLAG) { + if (!(flags & GP_ROOT_PT_FLAG)) + return; + gp_root = kvm_get_gp_phys_root(vcpu); + if (!VALID_PAGE(gp_root)) + return; + mmu_free_spt_root(vcpu, gp_root); + kvm_set_gp_phys_root(vcpu, E2K_INVALID_PAGE); + } + if (is_shadow_paging(vcpu)) { + kvm_set_space_type_spt_u_root(vcpu, E2K_INVALID_PAGE); + kvm_set_space_type_spt_os_root(vcpu, E2K_INVALID_PAGE); + } + /* invalidate context registers + kvm_set_vcpu_pt_context(vcpu); + */ + return; + } + + if (!(flags & (OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG))) + return; + + mmu_get_spt_roots(vcpu, flags, &os_root, &u_root, NULL); + if (!VALID_PAGE(os_root) && !VALID_PAGE(u_root)) + return; + + if (VALID_PAGE(u_root) && + ((flags & U_ROOT_PT_FLAG) || + ((flags & OS_ROOT_PT_FLAG) && + !is_sep_virt_spaces(vcpu)))) { + mmu_free_spt_root(vcpu, u_root); + kvm_set_space_type_spt_u_root(vcpu, E2K_INVALID_PAGE); + } + if (VALID_PAGE(os_root) && (flags & OS_ROOT_PT_FLAG)) { + mmu_free_spt_root(vcpu, os_root); + kvm_set_space_type_spt_os_root(vcpu, E2K_INVALID_PAGE); + } +} + +static void mmu_free_roots(struct kvm_vcpu *vcpu, unsigned flags) +{ + int i; + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && + (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || + vcpu->arch.mmu.direct_map)) { + e2k_mmu_free_roots(vcpu, flags); + return; + } + + spin_lock(&vcpu->kvm->mmu_lock); + for (i = 0; i < 4; ++i) { + hpa_t root = vcpu->arch.mmu.pae_root[i]; + + if (root) { + root &= kvm_get_spte_pfn_mask(vcpu->kvm); + sp = page_header(root); + --sp->root_count; + if (!sp->root_count && sp->role.invalid) + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, + &invalid_list); + } + vcpu->arch.mmu.pae_root[i] = E2K_INVALID_PAGE; + } + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + spin_unlock(&vcpu->kvm->mmu_lock); +/* kvm_set_space_type_root_hpa(vcpu, E2K_INVALID_PAGE, u_root); */ +} + +static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) +{ + int ret = 0; + + if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + ret = 1; + } + + return ret; +} + +static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_page *sp; + unsigned i; + + DebugKVM("started on VCPU #%d\n", vcpu->vcpu_id); + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { + hpa_t root; + + MMU_WARN_ON(VALID_PAGE(kvm_get_gp_phys_root(vcpu))); + + spin_lock(&vcpu->kvm->mmu_lock); + make_mmu_pages_available(vcpu); + sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL, + false /* validate */); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + root = __pa(sp->spt); + kvm_set_gp_phys_root(vcpu, root); + } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { + for (i = 0; i < 4; ++i) { + hpa_t root = vcpu->arch.mmu.pae_root[i]; + + MMU_WARN_ON(VALID_PAGE(root)); + spin_lock(&vcpu->kvm->mmu_lock); + make_mmu_pages_available(vcpu); + sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), + i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL, + false /* validate */); + root = __pa(sp->spt); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; + } +/* kvm_set_space_type_root_hpa(vcpu, __pa(vcpu->arch.mmu.pae_root), + false user space ? ); */ + } else { + BUG(); + } + return 0; +} + +static hpa_t e2k_mmu_alloc_spt_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) +{ + struct kvm_mmu_page *sp; + hpa_t root_hpa; + + DebugSPT("started on VCPU #%d, guest PT root at 0x%llx\n", + vcpu->vcpu_id, root_gfn << PAGE_SHIFT); + + if (mmu_check_root(vcpu, root_gfn)) { + pr_err("%s(): check of guest root PT failed\n", __func__); + return E2K_INVALID_PAGE; + } + + /* + * Do we shadow a long mode page table? If so we need to + * write-protect the guests page table root. + */ + KVM_BUG_ON(vcpu->arch.mmu.root_level != PT64_ROOT_LEVEL); + + spin_lock(&vcpu->kvm->mmu_lock); + make_mmu_pages_available(vcpu); + sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, 0, + ACC_WRITE_MASK, /* PTD should be not executable */ + /* and privileged */ + false /* validate */); + root_hpa = __pa(sp->spt); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + DebugSPT("VCPU #%d created shadow PT root at 0x%llx, " + "sp struct at %px, gfn 0x%llx\n", + vcpu->vcpu_id, root_hpa, sp, sp->gfn); + return root_hpa; +} + +static int e2k_mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + unsigned flags) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + pgprotval_t u_pptb, os_pptb; + gpa_t root_gpa; + hpa_t root; + + /* + * Do we shadow a long mode page table? If so we need to + * write-protect the guests page table root. + */ + KVM_BUG_ON(mmu->root_level != PT64_ROOT_LEVEL); + + u_pptb = mmu->get_vcpu_u_pptb(vcpu); + os_pptb = mmu->get_vcpu_os_pptb(vcpu); + + if (flags & OS_ROOT_PT_FLAG) { + if (is_sep_virt_spaces(vcpu)) { + root_gpa = kvm_get_space_type_guest_os_root(vcpu); + root = kvm_get_space_type_spt_os_root(vcpu); + } else { + root_gpa = kvm_get_space_type_guest_u_root(vcpu); + root = kvm_get_space_type_spt_u_root(vcpu); + } + MMU_WARN_ON(VALID_PAGE(root)); + + root = e2k_mmu_alloc_spt_root(vcpu, gpa_to_gfn(root_gpa)); + MMU_WARN_ON(!VALID_PAGE(root)); + if (is_sep_virt_spaces(vcpu)) { + kvm_set_space_type_spt_os_root(vcpu, root); + if (u_pptb == os_pptb) { + kvm_set_space_type_spt_u_root(vcpu, root); + } + } else { + kvm_set_space_type_spt_u_root(vcpu, root); + } + if (!(flags & DONT_SYNC_ROOT_PT_FLAG)) { + kvm_sync_shadow_root(vcpu, gmm, root, OS_ROOT_PT_FLAG); + } + DebugSPT("VCPU #%d, guest OS_PT root at 0x%llx shadow root " + "at 0x%llx\n", + vcpu->vcpu_id, root_gpa, root); + + if (flags & U_ROOT_PT_FLAG) { + if (!is_sep_virt_spaces(vcpu)) { + /* already allocated as OS & USER root */ + return 0; + } + } else { + return 0; + } + } else if (!(flags & U_ROOT_PT_FLAG)) { + return 0; + } + + /* allocate guest user PT root */ + root_gpa = kvm_get_space_type_guest_u_root(vcpu); + root = e2k_mmu_alloc_spt_root(vcpu, gpa_to_gfn(root_gpa)); + MMU_WARN_ON(!VALID_PAGE(root)); + kvm_set_space_type_spt_u_root(vcpu, root); + if (is_sep_virt_spaces(vcpu) && u_pptb == os_pptb) { + kvm_set_space_type_spt_os_root(vcpu, root); + } + if (!(flags & DONT_SYNC_ROOT_PT_FLAG)) { + kvm_sync_shadow_root(vcpu, gmm, root, U_ROOT_PT_FLAG); + } + DebugSPT("VCPU #%d, guest U_PT root at 0x%llx, shadow root " + "at 0x%llx\n", + vcpu->vcpu_id, root_gpa, root); + + return 0; +} + +int x86_mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu, unsigned flags) +{ + struct kvm_mmu_page *sp; + pgprotval_t pdpte, pm_mask; + gfn_t root_gfn; + int i; + + root_gfn = kvm_get_space_type_spt_u_root(vcpu); + root_gfn = gpa_to_gfn(root_gfn); + + if (mmu_check_root(vcpu, root_gfn)) { + pr_err("%s(): check of guest root PT failed\n", __func__); + return 1; + } + + /* + * Do we shadow a long mode page table? If so we need to + * write-protect the guests page table root. + */ + if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { + pr_err("%s(): is not yet implemented\n", __func__); + return -ENODEV; + } + + /* + * We shadow x86 32 bit page table. This may be a legacy 2-level + * or a PAE 3-level page table. In either case we need to be aware that + * the shadow page table may be a PAE or a long mode page table. + */ + pm_mask = PT_PRESENT_MASK; + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) + pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | + PT_X86_USER_MASK; + + for (i = 0; i < 4; ++i) { + hpa_t root = vcpu->arch.mmu.pae_root[i]; + + MMU_WARN_ON(VALID_PAGE(root)); + if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { + pdpte = get_vcpu_pdpte(vcpu, i); + if (!(pdpte & PT_PRESENT_MASK)) { + vcpu->arch.mmu.pae_root[i] = 0; + continue; + } + root_gfn = pdpte >> PAGE_SHIFT; + if (mmu_check_root(vcpu, root_gfn)) + return 1; + } + spin_lock(&vcpu->kvm->mmu_lock); + make_mmu_pages_available(vcpu); + sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, + 0, ACC_ALL, false /* validate */); + root = __pa(sp->spt); + ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + + vcpu->arch.mmu.pae_root[i] = root | pm_mask; + } +/* kvm_set_space_type_root_hpa(vcpu, __pa(vcpu->arch.mmu.pae_root), + u_root); */ + + /* + * If we shadow a 32 bit page table with a long mode page + * table we enter this path. + */ + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { + if (vcpu->arch.mmu.lm_root == NULL) { + /* + * The additional page necessary for this is only + * allocated on demand. + */ + + u64 *lm_root; + + lm_root = (void *)get_zeroed_page(GFP_KERNEL); + if (lm_root == NULL) + return 1; + + lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; + + vcpu->arch.mmu.lm_root = lm_root; + } + + kvm_set_space_type_spt_u_root(vcpu, + __pa(vcpu->arch.mmu.lm_root)); + } + + return 0; +} + +static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + unsigned flags) +{ + KVM_BUG_ON(vcpu->arch.mmu.direct_map); + return e2k_mmu_alloc_shadow_roots(vcpu, gmm, flags); +} + +static void mmu_sync_spt_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + struct kvm_mmu_page *sp; + + kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); + sp = page_header(root); + DebugSYNC("SP at %px for root PT 0x%llx, gfn 0x%llx\n", + sp, root, sp->gfn); + mmu_sync_children(vcpu, sp); + kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); +} + +static void e2k_mmu_sync_roots(struct kvm_vcpu *vcpu, unsigned flags) +{ + hpa_t os_root, u_root; + + if (vcpu->arch.mmu.direct_map) + return; + if (flags & DONT_SYNC_ROOT_PT_FLAG) + return; + + KVM_BUG_ON(vcpu->arch.mmu.root_level != PT64_ROOT_LEVEL); + + mmu_get_spt_roots(vcpu, flags, &os_root, &u_root, NULL); + if (!VALID_PAGE(os_root) && !VALID_PAGE(u_root)) + return; + + vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); + + if (VALID_PAGE(u_root)) { + DebugSYNC("started on VCPU #%d for root U_PT at 0x%llx\n", + vcpu->vcpu_id, u_root); + mmu_sync_spt_root(vcpu, u_root); + } + if (VALID_PAGE(os_root) && os_root != u_root) { + DebugSYNC("started on VCPU #%d for root OS_PT at 0x%llx\n", + vcpu->vcpu_id, os_root); + mmu_sync_spt_root(vcpu, os_root); + } +} + +static void mmu_sync_roots(struct kvm_vcpu *vcpu, unsigned flags) +{ + int i; + struct kvm_mmu_page *sp; + + if (vcpu->arch.mmu.direct_map) + return; + + if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { + e2k_mmu_sync_roots(vcpu, flags); + return; + } + vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); + kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); + for (i = 0; i < 4; ++i) { + hpa_t root = vcpu->arch.mmu.pae_root[i]; + + if (root && VALID_PAGE(root)) { + root &= kvm_get_spte_pfn_mask(vcpu->kvm); + sp = page_header(root); + mmu_sync_children(vcpu, sp); + } + } + kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); +} + +void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu, unsigned flags) +{ + spin_lock(&vcpu->kvm->mmu_lock); + mmu_sync_roots(vcpu, flags); + spin_unlock(&vcpu->kvm->mmu_lock); +} +EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); + +gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, + u32 access, kvm_arch_exception_t *exception) +{ + if (exception) + exception->error_code = 0; + return vaddr; +} + +static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, pgprot_t spte, + int level) +{ + if (is_ss(NULL)) { + pr_err_once("FIXME: %s() is not implemented\n", __func__); + } + return false; +} + +static pf_res_t handle_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t address, + u32 error_code, bool prefault, + gfn_t *gfn, kvm_pfn_t *pfn) +{ + intc_mu_state_t *mu_state = get_intc_mu_state(vcpu); + pf_res_t pfres; + int try = 0; + + do { + pfres = vcpu->arch.mmu.page_fault(vcpu, address, + error_code, prefault, gfn, pfn); + if (likely(pfres != PFRES_RETRY)) + break; + if (!mu_state->may_be_retried) { + /* cannot be retried */ + break; + } + try++; + if (try <= PF_RETRIES_MAX_NUM) { + DebugTRY("retry #%d seq 0x%lx to handle page fault : " + "address 0x%lx, pfn 0x%llx / gfn 0x%llx\n", + try, mu_state->notifier_seq, address, + (pfn != NULL) ? *pfn : ~0ULL, + (gfn != NULL) ? *gfn : ~0ULL); + } + } while (try < PF_TRIES_MAX_NUM); + + mu_state->pfres = pfres; + + DebugPFINTC("mmu.page_fault() returned %d\n", pfres); + + + if (PF_RETRIES_MAX_NUM > 0 && pfres == PFRES_RETRY) { + DebugTRY("could not handle page fault : retries %d, " + "address 0x%lx, pfn 0x%llx / gfn 0x%llx\n", + try, address, + (pfn != NULL) ? *pfn : ~0ULL, + (gfn != NULL) ? *gfn : ~0ULL); + } + + return pfres; +} + +static void inject_page_fault(struct kvm_vcpu *vcpu, + kvm_arch_exception_t *fault) +{ + if (vcpu->arch.mmu.inject_page_fault) { + vcpu->arch.mmu.inject_page_fault(vcpu, fault); + } +} + +void direct_unmap_prefixed_mmio_gfn(struct kvm *kvm, gfn_t gfn) +{ + struct kvm_shadow_walk_iterator iterator; + struct kvm_mmu_page *sp; + int level; + pgprot_t *sptep; + struct kvm_vcpu *vcpu = kvm->vcpus[0]; + + /* + * Prefixed MMIO is not rmapped, not in MMIO cache. + * But it is cached in TLB, and mmu_page_zap_pte() doesn't request + * flush for MMIO, so call it directly + */ + spin_lock(&kvm->mmu_lock); + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { + level = iterator.level; + sptep = iterator.sptep; + + sp = page_header(__pa(sptep)); + if (is_last_spte(*sptep, level)) { + mmu_page_zap_pte(kvm, sp, sptep); + kvm_flush_remote_tlbs(kvm); + break; + } + + if (!is_shadow_present_pte(kvm, *sptep)) + break; + + } + spin_unlock(&kvm->mmu_lock); +} + +static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) +{ + if (direct) + return vcpu_match_mmio_gpa(vcpu, addr); + + return vcpu_match_mmio_gva(vcpu, addr); +} + +static inline bool is_cached_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, + gfn_t *gfn, bool direct) +{ + if (mmio_info_in_cache(vcpu, addr, direct)) { + *gfn = vcpu->arch.mmio_gfn; + return true; + } + return false; +} + +/* return true if reserved bit is detected on spte. */ +static bool +walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, pgprot_t *sptep) +{ + struct kvm_shadow_walk_iterator iterator; + pgprot_t sptes[PT64_ROOT_LEVEL]; + pgprot_t spte = {0ull}; + int root, leaf; + bool reserved = false; + + if (!VALID_PAGE(kvm_get_space_addr_root(vcpu, addr))) + goto exit; + + walk_shadow_page_lockless_begin(vcpu); + + for (shadow_walk_init(&iterator, vcpu, addr), + leaf = iterator.level, root = leaf; + shadow_walk_okay(&iterator); + __shadow_walk_next(&iterator, spte)) { + spte = mmu_spte_get_lockless(iterator.sptep); + + sptes[leaf - 1] = spte; + leaf--; + + if (!is_shadow_present_pte(vcpu->kvm, spte)) + break; + + reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, + iterator.level); + } + + walk_shadow_page_lockless_end(vcpu); + + if (reserved) { + pr_err("%s: detect reserved bits on spte, addr 0x%llx, " + "dump hierarchy:\n", + __func__, addr); + while (root > leaf) { + pr_err("------ spte 0x%lx level %d.\n", + pgprot_val(sptes[root - 1]), root); + root--; + } + } +exit: + *sptep = spte; + return reserved; +} + +int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, gfn_t *gfn, + bool direct) +{ + pgprot_t spte; + bool reserved; + + if (is_cached_mmio_page_fault(vcpu, addr, gfn, direct)) { + return RET_MMIO_PF_EMULATE; + } + + reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); + if (WARN_ON(reserved)) + return RET_MMIO_PF_BUG; + + if (is_mmio_spte(vcpu->kvm, spte)) { + unsigned access = get_mmio_spte_access(vcpu->kvm, spte); + + *gfn = get_mmio_spte_gfn(vcpu->kvm, spte); + if (is_mmio_prefixed_gfn(vcpu, *gfn)) + /* prefixed MMIO areas were mapped directly */ + /* and should not cause page faults */ + return RET_MMIO_PF_INVALID; + if (!check_mmio_spte(vcpu, spte)) + return RET_MMIO_PF_INVALID; + + if (direct) + addr = 0; + + trace_handle_mmio_page_fault(addr, *gfn, access); + vcpu_cache_mmio_info(vcpu, addr, *gfn, access); + return RET_MMIO_PF_EMULATE; + } + + /* + * If the page table is zapped by other cpus, let CPU fault again on + * the address. + */ + return RET_MMIO_PF_RETRY; +} +EXPORT_SYMBOL_GPL(handle_mmio_page_fault); + +static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, + u32 error_code, gfn_t gfn) +{ + struct kvm_memory_slot *slot; + + if (unlikely(error_code & PFERR_RSVD_MASK)) + return false; + + if (!(error_code & PFERR_PRESENT_MASK) || + !(error_code & PFERR_WRITE_MASK)) + return false; + + /* + * guest is writing the page which is write tracked which can + * not be fixed by page fault handler. + */ + slot = __gfn_to_memslot(kvm_memslots_for_spte_role(vcpu->kvm, 0), + gfn); + if (kvm_page_track_is_active(vcpu->kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE)) + return true; + + return false; +} + +static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) +{ + struct kvm_shadow_walk_iterator iterator; + pgprot_t spte; + + if (!VALID_PAGE(kvm_get_space_addr_root(vcpu, addr))) + return; + + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { + clear_sp_write_flooding_count(iterator.sptep); + if (!is_shadow_present_pte(vcpu->kvm, spte)) + break; + } + walk_shadow_page_lockless_end(vcpu); +} + +static pf_res_t nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, + u32 error_code, bool prefault, + gfn_t *gfnp, kvm_pfn_t *pfnp) +{ + gpa_t gpa = gva; + gfn_t gfn = gpa >> PAGE_SHIFT; + int r; + + DebugNONP("VCPU #%d GPA 0x%llx, error 0x%x\n", + vcpu->vcpu_id, gpa, error_code); + pgprintk("%s: gpa 0x%llx error %x\n", __func__, gpa, error_code); + + if (gfnp != NULL) + *gfnp = gfn; + + if (page_fault_handle_page_track(vcpu, error_code, gfn)) + return PFRES_WRITE_TRACK; + + r = mmu_topup_memory_caches(vcpu); + if (r) + return PFRES_ERR; + + MMU_WARN_ON(!VALID_PAGE(kvm_get_gp_phys_root(vcpu))); + + + return nonpaging_map(vcpu, gpa & PAGE_MASK, + error_code, gfn, prefault); +} + +int kvm_prefetch_mmu_area(struct kvm_vcpu *vcpu, gva_t start, gva_t end, + u32 error_code) +{ + int evn_no = 0; /* should not be used here */ + intc_mu_state_t *mu_state; + gva_t addr; + gfn_t gfn; + pf_res_t pfres; + + DebugSPF("started on VCPU #%d : area from 0x%lx to 0x%lx\n", + vcpu->vcpu_id, start, end); + + vcpu->arch.intc_ctxt.cur_mu = evn_no; + mu_state = get_intc_mu_state(vcpu); + mu_state->may_be_retried = true; + mu_state->ignore_notifier = true; + + /* FIXME: only trivial case of addresses and sizes in terms of */ + /* PAGE_SIZE is implemented. It need support huge pages too */ + addr = start; + do { + pfres = handle_mmu_page_fault(vcpu, addr, error_code, true, + &gfn, NULL); + if (likely(pfres == PFRES_NO_ERR)) { + continue; + } else { + pr_err("%s(): failed to handle addr 0x%lx, error %d\n", + __func__, addr, pfres); + return -EFAULT; + } + } while (addr += PAGE_SIZE, addr < end); + + return 0; +} + +static void move_mu_intc_to_trap_cellar(struct kvm_vcpu *vcpu, int evn_no) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + int mu_num = intc_ctxt->mu_num; + unsigned long evn_mask; + + KVM_BUG_ON(evn_no < 0 || evn_no >= mu_num); + evn_mask = 1UL << evn_no; + + KVM_BUG_ON(intc_ctxt->intc_mu_to_move & evn_mask); + if (!HW_MOVE_TO_TC_IS_SUPPORTED) { + intc_ctxt->intc_mu_to_move |= evn_mask; + } +} + +static int move_rest_mu_intc_to_trap_cellar(struct kvm_vcpu *vcpu, + int from_evn_no) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + intc_info_mu_t *mus = intc_ctxt->mu; + intc_info_mu_t *mu, *mu_event; + int event; + int mu_num = intc_ctxt->mu_num; + int evn_no; + + KVM_BUG_ON(from_evn_no < 0 || from_evn_no >= mu_num); + + mu = &mus[from_evn_no]; + + /* loop on MMU events before event injected to guest */ + for (evn_no = 0; evn_no < from_evn_no; evn_no++) { + mu_event = &mus[evn_no]; + event = mu_event->hdr.event_code; + switch (event) { + case IME_FORCED: + case IME_FORCED_GVA: + DebugSHINJ("event #%d %s cannot precede the " + "event #%d %s injected to guest\n", + evn_no, kvm_get_mu_event_name(event), + from_evn_no, + kvm_get_mu_event_name(mu->hdr.event_code)); + break; + case IME_GPA_DATA: + case IME_SHADOW_DATA: + case IME_GPA_INSTR: + case IME_GPA_AINSTR: + default: + DebugSHINJ("event #%d %s precedes the event #%d %s " + "injected to guest\n", + evn_no, kvm_get_mu_event_name(event), + from_evn_no, + kvm_get_mu_event_name(mu->hdr.event_code)); + break; + } + } + + /* loop on MMU events after event injected to guest */ + for (evn_no = from_evn_no + 1; evn_no < mu_num; evn_no++) { + mu_event = &mus[evn_no]; + event = mu_event->hdr.event_code; + + switch (event) { + case IME_FORCED: + case IME_FORCED_GVA: + DebugSHINJ("event #%d %s move to guest trap cellar: " + "it is after the event #%d %s injected " + "to guest\n", + evn_no, kvm_get_mu_event_name(event), + from_evn_no, + kvm_get_mu_event_name(mu->hdr.event_code)); + if (!HW_MOVE_TO_TC_IS_SUPPORTED) { + /* update 'condition.address' destination */ + /* register abs number from dst_ind field, */ + /* new destination register number */ + AS(mu_event->condition).address = + AS(mu_event->condition).dst_ind; + } + move_mu_intc_to_trap_cellar(vcpu, evn_no); + break; + case IME_GPA_INSTR: + case IME_GPA_AINSTR: + DebugSHINJ("event #%d %s should be injected too: " + "it is after the event #%d %s injected " + "to guest\n", + evn_no, kvm_get_mu_event_name(event), + from_evn_no, + kvm_get_mu_event_name(mu->hdr.event_code)); + break; + case IME_GPA_DATA: + case IME_SHADOW_DATA: + DebugSHINJ("event #%d %s will move to guest trap " + "cellar too: but it is after the already " + "injected event #%d %s WHY\n", + evn_no, kvm_get_mu_event_name(event), + from_evn_no, + kvm_get_mu_event_name(mu->hdr.event_code)); + move_mu_intc_to_trap_cellar(vcpu, evn_no); + break; + default: + DebugSHINJ("event #%d %s should be handled although " + "it is after the event #%d %s injected " + "to guest\n", + evn_no, kvm_get_mu_event_name(event), + from_evn_no, + kvm_get_mu_event_name(mu->hdr.event_code)); + break; + } + } + return 0; +} + +static void move_mu_intc_to_vcpu_exception(struct kvm_vcpu *vcpu, int evn_no) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + + BUILD_BUG_ON(INTC_INFO_MU_ITEM_MAX > + sizeof(intc_ctxt->intc_mu_to_move) * 8); + KVM_BUG_ON(intc_ctxt->intc_mu_to_move != 0); + + move_mu_intc_to_trap_cellar(vcpu, evn_no); +} + +static int inject_shadow_data_page_fault(struct kvm_vcpu *vcpu, + int evn_no, intc_info_mu_t *mu_event) +{ + int event; + e2k_addr_t address; + int ret; + + event = mu_event->hdr.event_code; + address = mu_event->gva; + + DebugSHINJ("intercept event #%d code %d %s, guest address 0x%lx " + "fault type 0x%x\n", + evn_no, event, kvm_get_mu_event_name(event), + address, AS(mu_event->condition).fault_type); + + /* update event code to inject by hardware the event to guest */ + mu_event->hdr.event_code = IME_FORCED_GVA; + kvm_set_intc_info_mu_is_updated(vcpu); + if (!HW_MOVE_TO_TC_IS_SUPPORTED) { + /* update 'condition.address' destination register abs number */ + /* from dst_ind field, new destination register number */ + AS(mu_event->condition).address = + AS(mu_event->condition).dst_ind; + } + /* inject page fault exception into TIRs */ + kvm_need_create_vcpu_exception(vcpu, exc_data_page_mask); + if (!HW_MOVE_TO_TC_IS_SUPPORTED) { + /* FIXME: simulator bug: simulator does not move reguests */ + /* which should be reeexecuted from INTC_INFO_MU to trap */ + /* cellar unlike the hardware, so make it by software */ + move_mu_intc_to_vcpu_exception(vcpu, evn_no); + } + /* mark all rest MMU intercept events as moved to guest */ + /* now is here only to debug injection and requests moving */ + /* should be under !HW_MOVE_TO_TC_IS_SUPPORTED */ + ret = move_rest_mu_intc_to_trap_cellar(vcpu, evn_no); + return ret; +} + +static int inject_shadow_instr_page_fault(struct kvm_vcpu *vcpu, + int evn_no, intc_info_mu_t *mu_event) +{ + int event; + gva_t IP; + tc_cond_t cond; + tc_fault_type_t ftype; + unsigned long exc_mask; + const char *trap_name; + + event = mu_event->hdr.event_code; + IP = mu_event->gva; + cond = mu_event->condition; + AW(ftype) = AS(cond).fault_type; + + DebugSHINJ("intercept event #%d code %d %s, guest IP 0x%lx, " + "fault type 0x%x\n", + evn_no, event, kvm_get_mu_event_name(event), IP, AW(ftype)); + + if (AS(ftype).page_miss) { + exc_mask = exc_instr_page_miss_mask; + trap_name = "instr_page_miss"; + } else if (AS(ftype).prot_page) { + exc_mask = exc_instr_page_prot_mask; + trap_name = "instr_page_prot"; + } else { + pr_err("%s(): bad fault type 0x%x, pass instruction protection " + "fault to guest\n", + __func__, AW(ftype)); + exc_mask = exc_instr_page_prot_mask; + trap_name = "invalid_instr_page"; + } + + DebugSHINJ("intercept on %s fault, IP 0x%lx\n", trap_name, IP); + + kvm_need_create_vcpu_exc_and_IP(vcpu, exc_mask, IP); + + return 0; +} + +static int inject_shadow_ainstr_page_fault(struct kvm_vcpu *vcpu, + int evn_no, intc_info_mu_t *mu_event) +{ + int event; + gva_t IP; + tc_cond_t cond; + tc_fault_type_t ftype; + unsigned long exc_mask; + const char *trap_name; + + event = mu_event->hdr.event_code; + IP = mu_event->gva; + cond = mu_event->condition; + AW(ftype) = AS(cond).fault_type; + + DebugSHINJ("intercept event #%d code %d %s, guest IP 0x%lx, " + "fault type 0x%x\n", + evn_no, event, kvm_get_mu_event_name(event), IP, AW(ftype)); + + if (AS(ftype).page_miss) { + exc_mask = exc_ainstr_page_miss_mask; + trap_name = "ainstr_page_miss"; + } else if (AS(ftype).prot_page) { + exc_mask = exc_ainstr_page_prot_mask; + trap_name = "ainstr_page_prot"; + } else { + pr_err("%s(): bad fault type 0x%x, pass instruction protection " + "fault to guest\n", + __func__, AW(ftype)); + exc_mask = exc_ainstr_page_prot_mask; + trap_name = "invalid_ainstr_page"; + } + + DebugSHINJ("intercept on %s fault, IP 0x%lx\n", trap_name, IP); + + kvm_need_create_vcpu_exc_and_IP(vcpu, exc_mask, IP); + + return 0; +} + +static void inject_shadow_page_fault(struct kvm_vcpu *vcpu, + kvm_arch_exception_t *fault) +{ + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + intc_info_mu_t *mu_event; + int mu_num = intc_ctxt->mu_num; + int evn_no = intc_ctxt->cur_mu; + int event; + int ret = 0; + + KVM_BUG_ON(evn_no < 0 || evn_no >= mu_num); + + mu_event = &intc_ctxt->mu[evn_no]; + event = mu_event->hdr.event_code; + + DebugSHINJ("INTC MU event #%d code %d %s\n", + evn_no, event, kvm_get_mu_event_name(event)); + + switch (event) { + case IME_GPA_DATA: + pr_err("%s(): invalid event #%d code %d %s for access on " + "virtual address at shadow PT mode\n", + __func__, evn_no, event, kvm_get_mu_event_name(event)); + ret = -EINVAL; + break; + case IME_FORCED: + case IME_FORCED_GVA: + DebugSHINJ("INTC MU event #%d vode %d %s will be reexecuted " + "by hardware while intercept completion\n", + evn_no, event, kvm_get_mu_event_name(event)); + break; + case IME_SHADOW_DATA: + ret = inject_shadow_data_page_fault(vcpu, evn_no, mu_event); + break; + case IME_GPA_INSTR: + ret = inject_shadow_instr_page_fault(vcpu, evn_no, mu_event); + break; + case IME_GPA_AINSTR: + ret = inject_shadow_ainstr_page_fault(vcpu, evn_no, mu_event); + break; + default: + pr_err("%s(): event #%d %s should not cause injection\n", + __func__, evn_no, kvm_get_mu_event_name(event)); + ret = -EINVAL; + break; + } + + KVM_BUG_ON(ret != 0); + +} + +/** + * calculate_recovery_load_to_rf_frame - calculate the stack address + * of the register into registers file frame where the load was done. + * @dst_ind: trap cellar's "dst" field + * @radr: address of a "normal" register + * @load_to_rf: load to rf should be done + * + * This function calculates and sets @radr. + * + * Returns zero on success and value of type exec_mmu_ret on failure. + */ +static enum exec_mmu_ret calculate_guest_recovery_load_to_rf_frame( + struct pt_regs *regs, tc_cond_t cond, + u64 **radr, bool *load_to_rf) +{ + unsigned dst_ind = AS(cond).dst_ind; + unsigned w_base_rnum_d, frame_rnum_d; + u8 *ps_base = NULL, *frame_base; + unsigned rnum_offset_d, rnum_ind_d; + unsigned w_size_q; + u64 *rind; + + BUG_ON(!(dst_ind < E2K_MAXSR_d)); + + /* + * The guest registers frame was spilled to backup stacks and + * it should be at the top frame of the stack + * Intercept hardware set to INTC_INFO_MU and its calculated + * as: + * d is physical number of register to load to + * b is base of register ftame of guest function with load WD.base_d + * s is size of register frame WD.size_d + * + * i = (d >= b) ? (d - b) : (MAXSR_d + d - b); + * if (vm_dst != 0) + * dst_ind = MAXSR_d - s + i; + * else + * dst_ind = undefined; + * so: + * i = dst_ind - MAXSR_d + s + * + * always w_base_rnum_d > dst_ind: + * + * RF 0<-------| THE GUEST FRAME WD |E2K_MAXSR_d + * ^dst_ind + * ^w_base_rnum_d == E2K_MAXSR_d + * + * + * --|---------| THE GUEST FRAME |E2K_MAXSR_d + * ^psp.base ^psp.ind + * + * First address of first empty byte of psp stack is + * ps_base = base + ind; + * Our address to load is: + * ps_base - s + i + */ + + ps_base = (u8 *)(regs->stacks.psp_lo.PSP_lo_base + + regs->stacks.psp_hi.PSP_hi_ind); + w_base_rnum_d = E2K_MAXSR_d; + w_size_q = regs->crs.cr1_lo.CR1_lo_wbs; + frame_base = ps_base - w_size_q * EXT_4_NR_SZ; + + /* + * Offset from beginning spilled quad-NR for our + * dst_ind is + * rnum_offset_d. + * We define rnum_offset_d for dst_ind from ps_base + * in terms of double. + * Note. dst_ind is double too. + */ + if (w_base_rnum_d > dst_ind) { + rnum_offset_d = w_base_rnum_d - dst_ind; + frame_rnum_d = w_base_rnum_d - w_size_q * 2; + rnum_ind_d = dst_ind - frame_rnum_d; + } else { + KVM_BUG_ON(true); + } + /* + * Window boundaries are aligned at least to quad-NR. + * When windows spill then quad-NR is spilled as minimum. + * Also, extantion of regs is spilled too. + * So, each spilled quad-NR take 2*quad-NR size == 32 bytes + * So, bytes offset for our rnum_offset_d is + * (rnum_offset_d + 1) / 2) * 32 + * if it was uneven number we should add size of double: + * (rnum_offset_d % 2) * 8 + * starting from ISET V5 we should add size of quadro. + */ + *radr = (u64 *) (ps_base - ((rnum_offset_d + 1) / 2) * 32); + if (rnum_offset_d % 2) + *radr += ((machine.native_iset_ver < E2K_ISET_V5) ? 1 : 2); + DbgEXMMU(" is window register: " + "rnum_d = 0x%x offset 0x%x, " + "PS end 0x%px WD end = 0x%x, radr = 0x%px\n", + dst_ind, rnum_offset_d, ps_base, w_base_rnum_d, *radr); + + rind = (u64 *) (frame_base + ((rnum_ind_d + 0) / 2) * 32); + if (rnum_ind_d % 2) + rind += ((machine.native_iset_ver < E2K_ISET_V5) ? 1 : 2); + DbgEXMMU(" is window register: " + "rnum_d = 0x%x index 0x%x, " + "PS base 0x%px WD base = 0x%x, radr = 0x%px\n", + dst_ind, rnum_ind_d, frame_base, frame_rnum_d, rind); + + KVM_BUG_ON(*radr != rind); + + if (((unsigned long) *radr < (u64)frame_base) || + ((unsigned long) *radr >= (u64)ps_base)) { + /* + * The load operation out of guest top register window frame + * (for example this load is placed in one long instruction + * with return. The load operationb should be ignored + */ + DbgEXMMU(" address of register window points " + "out of guest top register procedure stack frame " + "0x%px > 0x%px >= 0x%px, load operation will be " + "ignored\n", + frame_base, *radr, ps_base); + return EXEC_MMU_SUCCESS; + } + + *load_to_rf = false; + return 0; +} + +static bool +check_guest_spill_fill_recovery(tc_cond_t cond, e2k_addr_t address, bool s_f, + struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + bool store; + + store = AS(cond).store; + + if (unlikely(AS(cond).s_f || s_f)) { + e2k_addr_t stack_base; + e2k_size_t stack_ind; + + return true; + + /* + * Not completed SPILL operation should be completed here + * by data store + * Not completed FILL operation replaced by restore of saved + * filling data in trap handler + */ + + DbgEXMMU("completion of %s %s operation\n", + (AS(cond).sru) ? "PCS" : "PS", + (store) ? "SPILL" : "FILL"); + if (AS(cond).sru) { + stack_base = hw_ctxt->sh_pcsp_lo.PCSP_lo_base; + stack_ind = hw_ctxt->sh_pcsp_hi.PCSP_hi_ind; + } else { + stack_base = hw_ctxt->sh_psp_lo.PSP_lo_base; + stack_ind = hw_ctxt->sh_psp_hi.PSP_hi_ind; + } + if (address < stack_base || address >= stack_base + stack_ind) { + pr_err("%s(): invalid procedure stack addr 0x%lx < " + "stack base 0x%lx or >= current stack " + "offset 0x%lx\n", + __func__, address, stack_base, + stack_base + stack_ind); + BUG(); + } + if (!store && !AS(cond).sru) { + pr_err("%s(): not completed PS FILL operation detected " + "in TC (only PCS FILL operation can be " + "dropped to TC)\n", + __func__); + BUG(); + } + return true; + } + return false; +} + +int reexecute_load_and_wait_page_fault(struct kvm_vcpu *vcpu, + trap_cellar_t *tcellar, gfn_t gfn, pt_regs_t *regs) +{ + e2k_addr_t address; + tc_cond_t cond; + e2k_addr_t hva; + trap_cellar_t *next_tcellar; + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + int r; + + /* + * It is load and wait lock operations. + * Hardware reexecute the operation, but the subsequent + * store and unlock operation can not be intercepted and + * only inevitable flush TLB line should be intercepted + * to update atomicaly updated PT entry. + */ + DebugREEXEC("reexecute hardware recovery load and wait lock " + "operation\n"); + + address = tcellar->address; + cond = tcellar->condition; + + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): could not convert gfn 0x%llx to hva\n", + __func__, gfn); + return -EFAULT; + } + hva |= (address & ~PAGE_MASK); + tcellar->address = hva; + tcellar->flags |= TC_IS_HVA_FLAG; + DebugREEXEC("converted guest address 0x%lx, gfn 0x%llx to hva 0x%lx " + "to recovery guest %s operation\n", + address, gfn, hva, (AS(cond).store) ? "store" : "load"); + + if (regs->trap->curr_cnt + 1 < get_vcpu_mu_events_num(vcpu)) { + next_tcellar = tcellar + 1; + } else { + next_tcellar = NULL; + } + r = execute_mmu_operations(tcellar, next_tcellar, regs, 0, NULL, + NULL, /*&check_guest_spill_fill_recovery,*/ + NULL /*&calculate_guest_recovery_load_to_rf_frame*/); + DebugREEXEC("reexecution of %s and wait: address 0x%lx, hva 0x%lx " + "completed, error %d\n", + (AS(cond).store) ? "store" : "load", + address, hva, r); + if (r != EXEC_MMU_SUCCESS) + return -EFAULT; + + spin_lock(&vcpu->kvm->mmu_lock); + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { + if (sp->unsync) + continue; + DebugPTE("found SP at %px mapped gva from 0x%lx, " + "gfn 0x%llx\n", + sp, sp->gva, gfn); + if (sp->role.level != PT_PAGE_TABLE_LEVEL) { + /* it can be only freed pgd/pud/pmd PT page */ + /* which is now used as pte PT page */ + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, + &invalid_list); + continue; + } + kvm_unsync_page(vcpu, sp); + } + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + spin_unlock(&vcpu->kvm->mmu_lock); + + return 0; +} + +long kvm_hv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + intc_info_mu_t *intc_info_mu) +{ + intc_mu_state_t *mu_state = get_intc_mu_state(vcpu); + intc_info_mu_event_code_t event = intc_info_mu->hdr.event_code; + tc_cond_t cond = intc_info_mu->condition; + e2k_addr_t address = intc_info_mu->gva; + gpa_t gpa = intc_info_mu->gpa; + tc_fault_type_t ftype; + tc_opcode_t opcode; + unsigned mas = AS(cond).mas; + bool root = AS(cond).root; /* secondary space */ + bool ignore_store = false; /* the store should not be reexecuted */ + u32 error_code = 0; + bool nonpaging = !is_paging(vcpu); + bool gpa_for_spt = false; + bool direct; + kvm_pfn_t pfn; + gfn_t gfn; + const pgprot_t *gpte; + int bytes; + int try; + pf_res_t pfres; + long r; + + if (is_shadow_paging(vcpu)) { + if (nonpaging) { + if (is_phys_paging(vcpu)) { + address = gpa; + } + } else if (event == IME_GPA_DATA) { + KVM_BUG_ON(mas != MAS_LOAD_PA && + mas != MAS_STORE_PA && + mas != MAS_IOADDR); + address = gpa; + gpa_for_spt = true; + DebugPFINTC("%s(): intercept on GPA->PA translation " + "fault at shadow PT mode, GPA 0x%lx," + "mas 0x%02x\n", + __func__, address, mas); + } + } else if (is_tdp_paging(vcpu)) { + address = gpa; + } else if (is_phys_paging(vcpu)) { + address = gpa; + } else { + KVM_BUG_ON(true); + } + + AW(ftype) = AS(cond).fault_type; + AW(opcode) = AS(cond).opcode; + KVM_BUG_ON(AS(opcode).fmt == 0 || AS(opcode).fmt == 6); + bytes = tc_cond_to_size(cond); + if (AS(cond).s_f) + bytes = 16; + DebugPFINTC("page fault on guest address 0x%lx fault type 0x%x\n", + address, AW(ftype)); + + if (!nonpaging && AS(ftype).illegal_page) { + DebugPFINTC("illegal page fault type, return back to host\n"); + r = -EINVAL; + goto out; + } + if (!nonpaging && AS(ftype).page_bound) { + DebugPFINTC("page baund fault type, return back to host\n"); + r = -EINVAL; + goto out; + } + if (AW(ftype) == 0) { + error_code |= PFERR_FORCED_MASK; + DebugPFINTC("empty page fault type\n"); + } else if (AS(ftype).page_miss) { + error_code |= PFERR_NOT_PRESENT_MASK; + DebugPFINTC("page miss fault type\n"); + } else if (nonpaging || gpa_for_spt) { + error_code |= PFERR_NOT_PRESENT_MASK; + DebugPFINTC("fault type at nonpaging mode\n"); + } + if (tc_cond_is_store(cond, machine.native_iset_ver)) { + error_code |= PFERR_WRITE_MASK; + DebugPFINTC("page fault on store\n"); + } else { + DebugPFINTC("page fault on load\n"); + } + + if (mas == MAS_WAIT_LOCK || + (mas == MAS_WAIT_LOCK_Q && bytes == 16)) { + DebugPFINTC("not writable page fault on load and lock " + "operation\n"); + /* this mas has store semantic */ + error_code |= PFERR_WAIT_LOCK_MASK; + } + if (mas == MAS_IOADDR) { + DebugPFINTC("IO space access operation\n"); + error_code |= PFERR_MMIO_MASK; + } + if (AS(ftype).nwrite_page) { + error_code &= ~PFERR_NOT_PRESENT_MASK; + error_code |= PFERR_PRESENT_MASK; + if (!(error_code & PFERR_WRITE_MASK)) { + panic("%s(): not store or unknown or unimplemented " + "case of load operation with store semantic: " + "GVA 0x%lx, condition 0x%llx\n", + __func__, address, AW(cond)); + } + DebugPFINTC("not writable page fault type\n"); + if (AS(cond).s_f) { + /* spill/fill operation to/from protected page */ + /* so need unprotect page */ + r = kvm_mmu_unprotect_page_virt(vcpu, address); + if (r) { + DebugPFINTC("unprotected GVA 0x%lx " + "to spill/fill\n", + address); + } + } + } + if (AS(ftype).priv_page) { + error_code &= ~PFERR_NOT_PRESENT_MASK; + error_code |= (PFERR_PRESENT_MASK | PFERR_USER_MASK); + DebugPFINTC("priviled page fault type\n"); + } + if (root && kvm_has_vcpu_exc_recovery_point(vcpu)) { + ignore_store = !!(error_code & PFERR_WRITE_MASK); + if (ignore_store) { + DebugEXCRPR("%s secondary space at recovery mode: " + "GVA 0x%lx, GPA 0x%llx, cond 0x%016llx\n", + (ignore_store) ? + ((AS(cond).store) ? "store to" + : "load with store " + "semantics to") + : "load from", + intc_info_mu->gva, gpa, AW(cond)); + } + } + + direct = vcpu->arch.mmu.direct_map; + + if (unlikely(error_code & PFERR_MMIO_MASK)) { + KVM_BUG_ON(ignore_store); + r = handle_mmio_page_fault(vcpu, address, &gfn, direct); + + if (r == RET_MMIO_PF_EMULATE) { + pfres = PFRES_TRY_MMIO; + goto mmio_emulate; + } + if (r == RET_MMIO_PF_RETRY) { + /* MMIO address is not yet known */ + } + if (r < 0) + goto out; + } else if (is_cached_mmio_page_fault(vcpu, address, &gfn, direct)) { + pfres = PFRES_TRY_MMIO; + goto mmio_emulate; + } + + mu_state->may_be_retried = true; + mu_state->ignore_notifier = false; + + if (likely(!gpa_for_spt)) { + pfres = handle_mmu_page_fault(vcpu, address, error_code, + false, &gfn, &pfn); + } else { + try = 0; + do { + set_spt_gpa_fault(vcpu); + mmu_set_host_pt_struct_func(vcpu->kvm, + &kvm_mmu_get_gp_pt_struct); + pfres = nonpaging_page_fault(vcpu, address, + error_code, false, &gfn, &pfn); + mmu_set_host_pt_struct_func(vcpu->kvm, + &kvm_mmu_get_host_pt_struct); + reset_spt_gpa_fault(vcpu); + if (likely(pfres != PFRES_RETRY)) + break; + try++; + if (try <= PF_RETRIES_MAX_NUM) { + DebugTRY("retry #%d to handle page fault " + "on %s : address 0x%lx, " + "pfn 0x%llx / gfn 0x%llx\n", + try, + (AS(cond).store) ? "store" : "load", + address, pfn, gfn); + } + } while (try < PF_TRIES_MAX_NUM); + if (PF_RETRIES_MAX_NUM > 0 && pfres == PFRES_RETRY) { + DebugTRY("could not handle page fault on %s : " + "retries %d, address 0x%lx, " + "pfn 0x%llx / gfn 0x%llx\n", + (AS(cond).store) ? "store" : "load", + try, address, pfn, gfn); + } + } + + mu_state->pfres = pfres; + DebugPFINTC("mmu.page_fault() returned %d\n", pfres); + + if (pfres == PFRES_RETRY) { + KVM_BUG_ON(!mu_state->may_be_retried); + r = 0; + goto out; + } + if (pfres == PFRES_NO_ERR) { + e2k_addr_t hva; + kvm_intc_cpu_context_t *intc_ctxt = &vcpu->arch.intc_ctxt; + trap_cellar_t *next_tcellar; + + /* page fault successfully handled and need recover */ + /* load/store operation */ + if (HW_REEXECUTE_IS_SUPPORTED) { + /* MMU hardware itself will reexecute the memory */ + /* access operations */ + r = 0; + goto out; + } + + KVM_BUG_ON(ignore_store); + + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): could not convert gfn 0x%llx to hva\n", + __func__, gfn); + r = -EFAULT; + goto out; + } + hva |= (address & ~PAGE_MASK); + intc_info_mu->gva = hva; + DebugPFINTC("converted guest address 0x%lx to hva 0x%lx to " + "recovery guest %s operation\n", + address, hva, (AS(cond).store) ? "store" : "load"); + + if (intc_ctxt->cur_mu + 1 < intc_ctxt->mu_num) { + next_tcellar = (trap_cellar_t *) + &(intc_info_mu + 1)->gva; + } else { + next_tcellar = NULL; + } + r = execute_mmu_operations((trap_cellar_t *)&intc_info_mu->gva, + next_tcellar, regs, 0, NULL, + &check_guest_spill_fill_recovery, + &calculate_guest_recovery_load_to_rf_frame); + if (r != EXEC_MMU_SUCCESS) + return -EFAULT; + return 0; + } + if (pfres == PFRES_ERR) { + /* error detected while page fault handling */ + r = -EFAULT; + goto out; + } + + if (pfres == PFRES_INJECTED) { + /* page fault injected to guest */ + KVM_BUG_ON(ignore_store); + return 0; + } + +mmio_emulate: + + KVM_BUG_ON(ignore_store); + + if (pfres == PFRES_TRY_MMIO) { + /* page fault on MMIO access */ + KVM_BUG_ON(mu_state->may_be_retried); + gpa = gfn_to_gpa(gfn); + gpa |= (address & ~PAGE_MASK); + return kvm_hv_io_page_fault(vcpu, gpa, intc_info_mu); + } + + KVM_BUG_ON(pfres != PFRES_WRITE_TRACK); + + /* set flag to enable writing for hardware recovery operation */ + intc_info_mu->hdr.ignore_wr_rights = 1; + kvm_set_intc_info_mu_is_updated(vcpu); + if ((error_code & PFERR_WAIT_LOCK_MASK) && + (error_code & PFERR_WRITE_MASK)) { + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + + /* + * It is load and wait lock operations. + * Hardware reexecute the operation, but the subsequent + * store and unlock operation can not be intercepted and + * only inevitable flush TLB line should be intercepted + * to update atomicaly updated PT entry. + */ + DebugPFINTC("reexecute hardware recovery load and wait lock " + "operation\n"); + spin_lock(&vcpu->kvm->mmu_lock); + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { + if (sp->unsync) + continue; + DebugPTE("found SP at %px mapped gva from 0x%lx, " + "gfn 0x%llx\n", + sp, sp->gva, gfn); + if (sp->role.level != PT_PAGE_TABLE_LEVEL) { + /* it can be only freed pgd/pud/pmd PT page */ + /* which is now used as pte PT page */ + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, + &invalid_list); + continue; + } + kvm_unsync_page(vcpu, sp); + } + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + spin_unlock(&vcpu->kvm->mmu_lock); + return 0; + } + + gpa = gfn_to_gpa(gfn); + gpa |= (address & ~PAGE_MASK); + gpte = (const pgprot_t *)&intc_info_mu->data; + + if (HW_REEXECUTE_IS_SUPPORTED) { + /* MMU hardware itself will reexecute the memory */ + /* access operations */ + /* FIXME: kvm_page_track_write() function assumes that + guest pte was already updated, but MMU hardware will + reexecute store to gpte operation only while return to guest + (GLAUNCH starts all reexecutions). + So temporarly hypervisor itself reexecute the store + and MMU hardware will reexecute too one more time. + It is not good + kvm_page_track_write(vcpu, gpa, + (const void*)gpte, sizeof(*gpte)); + return PFR_SUCCESS; + */ + } + /* MMU does not support reexecition of write protected memory access */ + /* so it need convert guest address to host 'physical' and */ + /* recover based on not protected host address */ + r = write_to_guest_pt_phys(vcpu, gpa, gpte, bytes); + + if (r == 1) { + /* guest try write to protected PT, page fault handled */ + /* and recovered by hypervisor */ + return 0; + } + +out: + if (ignore_store) { + /* mark the INTC_INFO_MU event as deleted to avoid */ + /* hardware reexucution of the store operation */ + kvm_delete_intc_info_mu(vcpu, intc_info_mu); + DebugEXCRPR("store to secondary space at recovery mode " + "will not be reexecuted by hardware\n"); + } + return r; +} +EXPORT_SYMBOL_GPL(kvm_hv_mmu_page_fault); + +int kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address, + bool async_instr, u32 error_code) +{ + int instr_num = 1; + gfn_t gfn; + pf_res_t r; + + DebugIPF("started for IP 0x%lx\n", address); + + if (!async_instr && ((address & PAGE_MASK) != + ((address + E2K_INSTR_MAX_SIZE - 1) & PAGE_MASK))) { + instr_num++; + } + + do { + r = handle_mmu_page_fault(vcpu, address, error_code, false, + &gfn, NULL); + if (r != PFRES_NO_ERR) + break; + address = (address & PAGE_MASK) + PAGE_SIZE; + } while (--instr_num, instr_num > 0); + + if (likely(r == PFRES_NO_ERR || r == PFRES_INJECTED)) { + /* fault was handler or injected to guest */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + return 0; + } else if (r == PFRES_RETRY) { + /* paga fault handling should be retried */ + return 0; + } + + /* it need something emulate */ + return -EFAULT; + + /* TODO In the end this interception will go away, so probably + * there is no need to handle AAU instruction page miss here */ +} + +#ifdef CONFIG_KVM_ASYNC_PF + +/* Can start handling async page fault or not ? */ +static bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.apf.enabled) + return false; + + /* + * Page fault can be handled asynchronously only if + * it has occured in user mode + */ + if (vcpu->arch.apf.in_pm) + return false; + + /* + * Data and instruction page faults can be handled asyncronyously. + * Other events should be handled immediately. + */ + int ev_no = vcpu->arch.intc_ctxt.cur_mu; + intc_info_mu_event_code_t ev_code = get_event_code(vcpu, ev_no); + + if (ev_code > IME_GPA_AINSTR) + return false; + + if (intc_mu_record_asynchronous(vcpu, ev_no)) + return false; + + return true; +} + +/* + * Start async page fault handling + */ +static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) +{ + struct kvm_arch_async_pf arch; + + /* Unique identifier of async page fault event */ + arch.apf_id = (vcpu->arch.apf.cnt++ << 12 | vcpu->vcpu_id); + + return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), + &arch); +} + +/* + * Write async page fault type to guest per-vcpu pv_apf_event.apf_reason + * Return value: 0 - on success , error code - on failure. + */ +static int kvm_set_apf_reason(struct kvm_vcpu *vcpu, u32 apf_reason) +{ + return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.reason_gpa, + &apf_reason, sizeof(u32)); +} + +/* + * Write async page fault id to guest per-vcpu pv_apf_event.apf_id + * Return value: 0 - on success , error code - on failure. + */ +static int kvm_set_apf_id(struct kvm_vcpu *vcpu, u32 apf_id) +{ + return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.id_gpa, + &apf_id, sizeof(u32)); +} + +/* + * Read async page fault type from guest per-vcpu pv_apf_event.apf_reason + * Return value: 0 - on success , error code - on failure. + */ +static int kvm_get_apf_reason(struct kvm_vcpu *vcpu, u32 *apf_reason) +{ + return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.reason_gpa, + apf_reason, sizeof(u32)); +} + +/* + * Read async page fault id from guest per-vcpu pv_apf_event.apf_id + * Return value: 0 - on success , error code - on failure. + */ +static int kvm_get_apf_id(struct kvm_vcpu *vcpu, u32 *apf_id) +{ + return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.id_gpa, + apf_id, sizeof(u32)); +} + + +/* + * Notify guest that physical page is swapped out by host + */ +void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work) +{ + if (!kvm_set_apf_reason(vcpu, KVM_APF_PAGE_IN_SWAP) && + !kvm_set_apf_id(vcpu, work->arch.apf_id)) { + vcpu->arch.apf.host_apf_reason = KVM_APF_PAGE_IN_SWAP; + kvm_need_create_vcpu_exception(vcpu, exc_data_page_mask); + } else { + pr_err("Host: async_pf, %s, error while setting " + "apf_reason and apf_id\n", __func__); + force_sig(SIGKILL); + } +} + +/* + * Notify guest that physical page is loaded from disk and ready for access + */ +void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work) +{ + if (!kvm_set_apf_reason(vcpu, KVM_APF_PAGE_READY) && + !kvm_set_apf_id(vcpu, work->arch.apf_id)) { + vcpu->arch.apf.host_apf_reason = KVM_APF_PAGE_READY; + switch (vcpu->arch.apf.irq_controller) { + case EPIC_CONTROLLER: + kvm_hw_epic_async_pf_wake_deliver(vcpu); + break; + case APIC_CONTROLLER: + /* TODO: support injecting page ready through APIC */ + pr_err("Host: async_pf, %s, APIC is not supported\n", + __func__); + force_sig(SIGKILL); + break; + default: + pr_err("Host: async_pf, %s, unsupported type of" + " irq controller\n", __func__); + force_sig(SIGKILL); + } + } else { + pr_err("Host: async_pf, %s, error while setting apf_reason" + " and apf_id\n", __func__); + force_sig(SIGKILL); + } +} + +/* + * Fix up hypervisor page table when physical page is loaded from disk + * and ready for access. + */ +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) +{ + pf_res_t ret = PFRES_NO_ERR; + gfn_t gfnp; + kvm_pfn_t pfnp; + + for (;;) { + ret = vcpu->arch.mmu.page_fault(vcpu, work->cr2_or_gpa, 0, + true, &gfnp, &pfnp); + + if (ret == PFRES_RETRY) + cond_resched(); + else + break; + } +} + +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) +{ + u32 guest_apf_reason, guest_apf_id; + + if (kvm_get_apf_reason(vcpu, &guest_apf_reason) || + kvm_get_apf_id(vcpu, &guest_apf_id)) { + force_sig(SIGKILL); + return false; + } + + return vcpu->arch.apf.enabled && + guest_apf_reason == KVM_APF_NO && guest_apf_id == 0; +} + +#endif /* CONFIG_KVM_ASYNC_PF */ + +static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, + gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable) +{ + struct kvm_memory_slot *slot; + bool async; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + +#ifdef CONFIG_KVM_ASYNC_PF + + if (prefault || !kvm_can_do_async_pf(vcpu)) { + *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, + write, writable); + return false; + } + + /* + * Try to get pfn from hv page table. Use flag FOLL_NOWAIT for + * get_user_pages(). If physical page was swapped out by host, + * then start i/o , indicate async=true and return without sleeping + * while pages are loading from disk. + */ + async = false; + *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); + if (!async) + return false; /* *pfn has correct page now */ + + if (vcpu->arch.apf.host_apf_reason) + return true; + + /* Physical page was swapped out by host, handle async page fault */ + if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) + return true; + +#endif /* CONFIG_KVM_ASYNC_PF */ + + /* If attempt to handle page fault asynchronyously failed */ + *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); + + return false; +} + +/* + * The function try convert gfn to rmapped host pfn, but only if gfn is valid + * anf pfn is exist, without faulting and pfn allocation. + * The function returns: + * - TRY_PF_NO_ERR - gfn is valid and gfn is rmapped to pfn + * - TRY_PF_ONLY_VALID_ERR - gfn is valid, but pfn is not yet allocated + * - TRY_PF_MMIO_ERR - gfn is from MMIO space, but not registered on host + * - < 0 - gfn is invalid, some actions failed or other errors + */ + +static try_pf_err_t try_atomic_pf(struct kvm_vcpu *vcpu, gfn_t gfn, + kvm_pfn_t *pfn, bool no_dirty_log) +{ + struct kvm_memory_slot *slot; + + *pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, &slot, no_dirty_log); + if (is_mmio_space_pfn(*pfn)) { + /* gfn is from MMIO space, but is not registered on host */ + return TRY_PF_MMIO_ERR; + } else if (is_noslot_pfn(*pfn)) { + /* gfn is out of phisical memory, probably it is from IO */ + return TRY_PF_MMIO_ERR; + } else if (*pfn == KVM_PFN_ERR_FAULT) { + e2k_addr_t hva; + + /* gfn is not valid or rmapped to pfn on host */ + hva = gfn_to_hva_memslot(slot, gfn); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): gfn_to_hva_memslot() gfn 0x%llx failed\n", + __func__, gfn); + return TO_TRY_PF_ERR(-EFAULT); + } + + /* Bug 129228: we may want to use a separate thread for HVA->HPA and debug prints */ + return TRY_PF_ONLY_VALID_ERR; +#if 0 + pgprot_t *pgprot; + pgprot = kvm_hva_to_pte(hva); + if (pgprot == NULL) { + pr_err("%s(): kvm_hva_to_pte() for gfn 0x%llx failed\n", + __func__, gfn); + return TO_TRY_PF_ERR(-EFAULT); + } + if (pgprot_present(*pgprot)) { + /* gfn is present and already rmapped on host */ + if ((pgprot_special(*pgprot) || + is_huge_zero_pmd(*(pmd_t *)pgprot) || + is_huge_zero_pud(*(pud_t *)pgprot)) && + !pgprot_write(*pgprot)) { + /* hva is zero mapped to huge page */ + /* so gfn can be mapped as only valid */ + return TRY_PF_ONLY_VALID_ERR; + } + pr_err("%s(): gfn 0x%llx present hva 0x%lx " + "pte %px == 0x%lx\n", + __func__, gfn, hva, pgprot, + pgprot_val(*pgprot)); + return TRY_PF_ONLY_VALID_ERR; + } else if (pgprot_valid(*pgprot)) { + /* gfn is valid, but not yet rmapped on host */ + DebugTOVM("gfn 0x%llx valid hva 0x%lx " + "pte %px == 0x%lx\n", + gfn, hva, pgprot, pgprot_val(*pgprot)); + return TRY_PF_ONLY_VALID_ERR; + } + KVM_BUG_ON(true); +#endif + } else if (is_error_pfn(*pfn)) { + pr_err("%s(): gfn_to_pfn_memslot_atomic() for gfn 0x%llx " + "failed\n", + __func__, gfn); + return TO_TRY_PF_ERR(-EFAULT); + } + return TRY_PF_NO_ERR; +} + +bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, + int page_num) +{ + if (!is_ss(vcpu)) + return true; + pr_err("FIXME: %s() is not implemented\n", __func__); + return true; +} + +static bool +check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) +{ + int page_num = KVM_MMU_PAGES_PER_HPAGE(vcpu->kvm, level); + + gfn &= ~(page_num - 1); + + return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num); +} + +static pf_res_t tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, + bool prefault, gfn_t *gfnp, kvm_pfn_t *pfnp) +{ + kvm_pfn_t pfn; + pf_res_t r = PFRES_ERR; + int ret; + int level; + bool force_pt_level; + gfn_t gfn = gpa >> PAGE_SHIFT; + int write = error_code & PFERR_WRITE_MASK; + bool map_writable = true; +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + intc_mu_state_t *mu_state = get_intc_mu_state(vcpu); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + MMU_WARN_ON(!VALID_PAGE(kvm_get_gp_phys_root(vcpu))); + + DebugTDP("started for GPA 0x%lx\n", gpa); + + *gfnp = gfn; + + if (page_fault_handle_page_track(vcpu, error_code, gfn)) + return PFRES_WRITE_TRACK; + + ret = mmu_topup_memory_caches(vcpu); + if (ret) + return PFRES_ERR; + + force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn, + PT_DIRECTORY_LEVEL); + level = mapping_level(vcpu, gfn, &force_pt_level); + if (likely(!force_pt_level)) { + if (level > PT_DIRECTORY_LEVEL && + !check_hugepage_cache_consistency(vcpu, gfn, level)) + level = PT_DIRECTORY_LEVEL; + gfn &= ~(KVM_MMU_PAGES_PER_HPAGE(vcpu->kvm, level) - 1); + *gfnp = gfn; + } + + if (fast_page_fault(vcpu, gpa, level, error_code)) + return PFRES_NO_ERR; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + mu_state->notifier_seq = vcpu->kvm->mmu_notifier_seq; + smp_rmb(); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + pfn = mmio_prefixed_gfn_to_pfn(vcpu->kvm, gfn); + + if (unlikely(pfn)) { + map_writable = true; + } else { + if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, + &map_writable)) + return PFRES_NO_ERR; + } + + if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r)) + return r; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (r == PFRES_TRY_MMIO) { + mu_state->may_be_retried = false; + } +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + spin_lock(&vcpu->kvm->mmu_lock); +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (!mu_state->ignore_notifier && r != PFRES_TRY_MMIO && + mmu_notifier_retry(vcpu->kvm, mu_state->notifier_seq)) + goto out_unlock; +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + make_mmu_pages_available(vcpu); + if (likely(!force_pt_level)) { + transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); + *gfnp = gfn; + } + r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); + spin_unlock(&vcpu->kvm->mmu_lock); + + return r; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + if (pfnp != NULL) { + *pfnp = pfn; + } + KVM_BUG_ON(!mu_state->may_be_retried && !prefault); + return PFRES_RETRY; +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ +} + +static void nonpaging_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (kvm_is_tdp_enable(vcpu->kvm)) { + if (vcpu->arch.mmu.virt_ctrl_mu.rw_mmu_cr) { + /* access to MMU_CR register is intercepted */ + /* so paging state can be accessed as soft flag */ + context->is_paging = NULL; + } else { + /* paging state can be accessed only though SH_MMU_CR */ + context->is_paging = kvm_mmu_is_hv_paging; + } + } else { + context->is_paging = NULL; + } + context->set_vcpu_u_pptb = set_vcpu_nonp_u_pptb; + context->set_vcpu_sh_u_pptb = set_vcpu_nonp_sh_u_pptb; + context->set_vcpu_u_vptb = set_vcpu_nonp_u_vptb; + context->set_vcpu_sh_u_vptb = set_vcpu_nonp_sh_u_vptb; + context->set_vcpu_os_pptb = set_vcpu_nonp_os_pptb; + context->set_vcpu_sh_os_pptb = set_vcpu_nonp_sh_os_pptb; + context->set_vcpu_os_vptb = set_vcpu_nonp_os_vptb; + context->set_vcpu_sh_os_vptb = set_vcpu_nonp_sh_os_vptb; + context->set_vcpu_os_vab = set_vcpu_nonp_os_vab; + context->set_vcpu_gp_pptb = set_vcpu_nonp_gp_pptb; + context->get_vcpu_u_pptb = get_vcpu_nonp_u_pptb; + context->get_vcpu_sh_u_pptb = get_vcpu_nonp_sh_u_pptb; + context->get_vcpu_u_vptb = get_vcpu_nonp_u_vptb; + context->get_vcpu_sh_u_vptb = get_vcpu_nonp_sh_u_vptb; + context->get_vcpu_os_pptb = get_vcpu_nonp_os_pptb; + context->get_vcpu_sh_os_pptb = get_vcpu_nonp_sh_os_pptb; + context->get_vcpu_os_vptb = get_vcpu_nonp_os_vptb; + context->get_vcpu_sh_os_vptb = get_vcpu_nonp_sh_os_vptb; + context->get_vcpu_os_vab = get_vcpu_nonp_os_vab; + context->get_vcpu_gp_pptb = get_vcpu_nonp_gp_pptb; + context->set_vcpu_pt_context = set_vcpu_nonp_pt_context; + context->init_vcpu_ptb = init_vcpu_nonp_ptb; + context->get_vcpu_context_u_pptb = get_vcpu_context_nonp_u_pptb; + context->get_vcpu_context_u_vptb = get_vcpu_context_nonp_u_vptb; + context->get_vcpu_context_os_pptb = get_vcpu_context_nonp_os_pptb; + context->get_vcpu_context_os_vptb = get_vcpu_context_nonp_os_vptb; + context->get_vcpu_context_os_vab = get_vcpu_context_nonp_os_vab; + context->get_vcpu_context_gp_pptb = get_vcpu_context_nonp_gp_pptb; + context->page_fault = nonpaging_page_fault; + context->gva_to_gpa = nonpaging_gva_to_gpa; + context->sync_page = nonpaging_sync_page; + context->update_pte = nonpaging_update_pte; + context->root_level = 0; + if (is_ss(vcpu)) + context->shadow_root_level = PT32E_ROOT_LEVEL; + else + context->shadow_root_level = PT64_ROOT_LEVEL; + context->sh_os_root_hpa = E2K_INVALID_PAGE; + context->sh_u_root_hpa = E2K_INVALID_PAGE; + context->gp_root_hpa = E2K_INVALID_PAGE; + context->sh_root_hpa = E2K_INVALID_PAGE; + context->direct_map = true; + context->nx = false; +} + +void kvm_mmu_new_pptb(struct kvm_vcpu *vcpu, unsigned flags) +{ + mmu_free_roots(vcpu, flags); +} + +static bool sync_mmio_spte(struct kvm_vcpu *vcpu, pgprot_t *sptep, gfn_t gfn, + unsigned access, int *nr_present) +{ + if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) { + if (gfn != get_mmio_spte_gfn(vcpu->kvm, *sptep)) { + mmu_spte_clear_no_track(sptep); + return true; + } + + (*nr_present)++; + mark_mmio_spte(vcpu, sptep, gfn, access); + return true; + } + + return false; +} + +static inline bool is_last_gpte(struct kvm_mmu *mmu, + unsigned level, unsigned gpte) +{ + /* + * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set + * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means + * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. + */ + gpte |= level - PT_PAGE_TABLE_LEVEL - 1; + + /* + * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. + * If it is clear, there are no large pages at this level, so clear + * PT_PAGE_SIZE_MASK in gpte if that is the case. + */ + gpte &= level - mmu->last_nonleaf_level; + + return gpte & PT_PAGE_SIZE_MASK; +} + +#define PTTYPE_E2K 0xe2 +#define PTTYPE_EPT 18 /* arbitrary */ + +#define PTTYPE PTTYPE_E2K +#include "paging_tmpl.h" +#undef PTTYPE + +#ifdef CONFIG_X86_HW_VIRTUALIZATION +#define PTTYPE PTTYPE_EPT +#include "paging_tmpl.h" +#undef PTTYPE + +#define PTTYPE 64 +#include "paging_tmpl.h" +#undef PTTYPE + +#define PTTYPE 32 +#include "paging_tmpl.h" +#undef PTTYPE +#endif /* CONFIG_X86_HW_VIRTUALIZATION */ + +#ifdef CONFIG_X86_HW_VIRTUALIZATION +static void +__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, int level, bool nx, bool gbpages, + bool pse, bool amd) +{ + u64 exb_bit_rsvd = 0; + u64 gbpages_bit_rsvd = 0; + u64 nonleaf_bit8_rsvd = 0; + + rsvd_check->bad_mt_xwr = 0; + + if (!nx) + exb_bit_rsvd = rsvd_bits(63, 63); + if (!gbpages) + gbpages_bit_rsvd = rsvd_bits(7, 7); + + /* + * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for + * leaf entries) on AMD CPUs only. + */ + if (amd) + nonleaf_bit8_rsvd = rsvd_bits(8, 8); + + switch (level) { + case PT32_ROOT_LEVEL: + /* no rsvd bits for 2 level 4K page table entries */ + rsvd_check->rsvd_bits_mask[0][1] = 0; + rsvd_check->rsvd_bits_mask[0][0] = 0; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; + + if (!pse) { + rsvd_check->rsvd_bits_mask[1][1] = 0; + break; + } + + if (is_cpuid_PSE36()) + /* 36bits PSE 4MB page */ + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); + else + /* 32 bits PSE 4MB page */ + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); + break; + case PT32E_ROOT_LEVEL: + rsvd_check->rsvd_bits_mask[0][2] = + rsvd_bits(maxphyaddr, 63) | + rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 62); /* PDE */ + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 62); /* PTE */ + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 62) | + rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; + break; + case PT64_ROOT_LEVEL: + rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | + nonleaf_bit8_rsvd | rsvd_bits(7, 7) | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | + nonleaf_bit8_rsvd | gbpages_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[1][3] = + rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | + gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | + rsvd_bits(13, 29); + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51) | + rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; + break; + } +} + +static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), context->root_level, + context->nx, guest_cpuid_has_gbpages(vcpu), + is_pse(vcpu), guest_cpuid_is_amd(vcpu)); +} + +static void +__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, bool execonly) +{ + u64 bad_mt_xwr; + + rsvd_check->rsvd_bits_mask[0][3] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); + rsvd_check->rsvd_bits_mask[0][2] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][1] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); + + /* large page */ + rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); + rsvd_check->rsvd_bits_mask[1][1] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); + rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; + + bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ + bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ + bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ + bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ + bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ + if (!execonly) { + /* bits 0..2 must not be 100 unless VMX capabilities allow it */ + bad_mt_xwr |= REPEAT_BYTE(1ull << 4); + } + rsvd_check->bad_mt_xwr = bad_mt_xwr; +} + +static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), execonly); +} + +/* + * the page table on host is the shadow page table for the page + * table in guest or amd nested guest, its mmu features completely + * follow the features in guest. + */ +void +reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) +{ + bool uses_nx = context->nx || context->base_role.smep_andnot_wp; + + /* + * Passing "true" to the last argument is okay; it adds a check + * on bit 8 of the SPTEs which KVM doesn't use anyway. + */ + __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + context->shadow_root_level, uses_nx, + guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), + true); +} +EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); + +static inline bool boot_cpu_is_amd(void) +{ + WARN_ON_ONCE(!tdp_enabled); + return shadow_x_mask == 0; +} + +/* + * the direct page table on host, use as much mmu features as + * possible, however, kvm currently does not do execution-protection. + */ +static void +reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (boot_cpu_is_amd()) + __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + context->shadow_root_level, false, + boot_cpu_has(X86_FEATURE_GBPAGES), + true, true); + else + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + false); + +} + +/* + * as the comments in reset_shadow_zero_bits_mask() except it + * is the shadow page table for intel nested guest. + */ +static void +reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, execonly); +} + +static void update_permission_bitmask(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu, bool ept) +{ + unsigned bit, byte, pfec; + u8 map; + bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0; + + cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); + cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); + for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { + pfec = byte << 1; + map = 0; + wf = pfec & PFERR_WRITE_MASK; + uf = pfec & PFERR_USER_MASK; + ff = pfec & PFERR_FETCH_MASK; + /* + * PFERR_RSVD_MASK bit is set in PFEC if the access is not + * subject to SMAP restrictions, and cleared otherwise. The + * bit is only meaningful if the SMAP bit is set in CR4. + */ + smapf = !(pfec & PFERR_RSVD_MASK); + for (bit = 0; bit < 8; ++bit) { + x = bit & ACC_EXEC_MASK; + w = bit & ACC_WRITE_MASK; + u = bit & ACC_USER_MASK; + + if (!ept) { + /* Not really needed: !nx will cause pte.nx */ + /* to fault */ + x |= !mmu->nx; + /* Allow supervisor writes if !cr0.wp */ + w |= !is_write_protection(vcpu) && !uf; + /* Disallow supervisor fetches of user code */ + /* if cr4.smep */ + x &= !(cr4_smep && u && !uf); + + /* + * SMAP:kernel-mode data accesses from user-mode + * mappings should fault. A fault is considered + * as a SMAP violation if all of the following + * conditions are ture: + * - X86_CR4_SMAP is set in CR4 + * - An user page is accessed + * - Page fault in kernel mode + * - if CPL = 3 or X86_EFLAGS_AC is clear + * + * Here, we cover the first three conditions. + * The fourth is computed dynamically in + * permission_fault() and is in smapf. + * + * Also, SMAP does not affect instruction + * fetches, add the !ff check here to make it + * clearer. + */ + smap = cr4_smap && u && !uf && !ff; + } + + fault = (ff && !x) || (uf && !u) || (wf && !w) || + (smapf && smap); + map |= fault << bit; + } + mmu->permissions[byte] = map; + } +} + +/* +* PKU is an additional mechanism by which the paging controls access to +* user-mode addresses based on the value in the PKRU register. Protection +* key violations are reported through a bit in the page fault error code. +* Unlike other bits of the error code, the PK bit is not known at the +* call site of e.g. gva_to_gpa; it must be computed directly in +* permission_fault based on two bits of PKRU, on some machine state (CR4, +* CR0, EFER, CPL), and on other bits of the error code and the page tables. +* +* In particular the following conditions come from the error code, the +* page tables and the machine state: +* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 +* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) +* - PK is always zero if U=0 in the page tables +* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. +* +* The PKRU bitmask caches the result of these four conditions. The error +* code (minus the P bit) and the page table's U bit form an index into the +* PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed +* with the two bits of the PKRU register corresponding to the protection key. +* For the first three conditions above the bits will be 00, thus masking +* away both AD and WD. For all reads or if the last condition holds, WD +* only will be masked away. +*/ +static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + bool ept) +{ + unsigned bit; + bool wp; + + if (ept) { + mmu->pkru_mask = 0; + return; + } + + /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */ + if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) { + mmu->pkru_mask = 0; + return; + } + + wp = is_write_protection(vcpu); + + for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { + unsigned pfec, pkey_bits; + bool check_pkey, check_write, ff, uf, wf, pte_user; + + pfec = bit << 1; + ff = pfec & PFERR_FETCH_MASK; + uf = pfec & PFERR_USER_MASK; + wf = pfec & PFERR_WRITE_MASK; + + /* PFEC.RSVD is replaced by ACC_USER_MASK. */ + pte_user = pfec & PFERR_RSVD_MASK; + + /* + * Only need to check the access which is not an + * instruction fetch and is to a user page. + */ + check_pkey = (!ff && pte_user); + /* + * write access is controlled by PKRU if it is a + * user access or CR0.WP = 1. + */ + check_write = check_pkey && wf && (uf || wp); + + /* PKRU.AD stops both read and write access. */ + pkey_bits = !!check_pkey; + /* PKRU.WD stops write access. */ + pkey_bits |= (!!check_write) << 1; + + mmu->pkru_mask |= (pkey_bits & 3) << pfec; + } +} +#else /* ! CONFIG_X86_HW_VIRTUALIZATION */ + +static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (is_ss(vcpu)) + pr_err("FIXME: %s() is not implemented\n", __func__); +} +static void update_permission_bitmask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool ept) +{ + if (is_ss(vcpu)) + pr_err("FIXME: %s() is not implemented\n", __func__); +} +static void update_pkru_bitmask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool ept) +{ + if (is_ss(vcpu)) + pr_err("FIXME: %s() is not implemented\n", __func__); +} +static void reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (is_ss(vcpu)) + pr_err("FIXME: %s() is not implemented\n", __func__); +} +void +reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) +{ + if (is_ss(vcpu)) + pr_err("FIXME: %s() is not implemented\n", __func__); +} +#endif /* CONFIG_X86_HW_VIRTUALIZATION */ + +static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu) +{ + unsigned root_level = mmu->root_level; + + mmu->last_nonleaf_level = root_level; + if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) + mmu->last_nonleaf_level++; +} + +static void e2k_paging_init_context_common(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, + int level) +{ + DebugKVM("started on VCPU #%d\n", vcpu->vcpu_id); + context->nx = is_nx(vcpu); + context->root_level = level; + + reset_rsvds_bits_mask(vcpu, context); + update_permission_bitmask(vcpu, context, false); + update_pkru_bitmask(vcpu, context, false); + update_last_nonleaf_level(vcpu, context); + + context->is_paging = NULL; + context->set_vcpu_u_pptb = set_vcpu_spt_u_pptb; + context->set_vcpu_sh_u_pptb = set_vcpu_spt_sh_u_pptb; + context->set_vcpu_u_vptb = set_vcpu_spt_u_vptb; + context->set_vcpu_sh_u_vptb = set_vcpu_spt_sh_u_vptb; + context->set_vcpu_os_pptb = set_vcpu_spt_os_pptb; + context->set_vcpu_sh_os_pptb = set_vcpu_spt_sh_os_pptb; + context->set_vcpu_os_vptb = set_vcpu_spt_os_vptb; + context->set_vcpu_sh_os_vptb = set_vcpu_spt_sh_os_vptb; + context->set_vcpu_os_vab = set_vcpu_spt_os_vab; + context->set_vcpu_gp_pptb = set_vcpu_spt_gp_pptb; + context->get_vcpu_u_pptb = get_vcpu_spt_u_pptb; + context->get_vcpu_sh_u_pptb = get_vcpu_spt_sh_u_pptb; + context->get_vcpu_u_vptb = get_vcpu_spt_u_vptb; + context->get_vcpu_sh_u_vptb = get_vcpu_spt_sh_u_vptb; + context->get_vcpu_os_pptb = get_vcpu_spt_os_pptb; + context->get_vcpu_sh_os_pptb = get_vcpu_spt_sh_os_pptb; + context->get_vcpu_os_vptb = get_vcpu_spt_os_vptb; + context->get_vcpu_sh_os_vptb = get_vcpu_spt_sh_os_vptb; + context->get_vcpu_os_vab = get_vcpu_spt_os_vab; + context->get_vcpu_gp_pptb = get_vcpu_spt_gp_pptb; + context->set_vcpu_pt_context = set_vcpu_spt_pt_context; + context->init_vcpu_ptb = init_vcpu_spt_ptb; + context->get_vcpu_context_u_pptb = get_vcpu_context_spt_u_pptb; + context->get_vcpu_context_u_vptb = get_vcpu_context_spt_u_vptb; + context->get_vcpu_context_os_pptb = get_vcpu_context_spt_os_pptb; + context->get_vcpu_context_os_vptb = get_vcpu_context_spt_os_vptb; + context->get_vcpu_context_os_vab = get_vcpu_context_spt_os_vab; + context->get_vcpu_context_gp_pptb = get_vcpu_context_spt_gp_pptb; + context->page_fault = e2k_page_fault; + context->gva_to_gpa = e2k_gva_to_gpa; + context->sync_page = e2k_sync_page; + context->sync_gva = e2k_sync_gva; + context->sync_gva_range = e2k_sync_gva_range; + context->update_pte = e2k_update_pte; + context->shadow_root_level = level; + context->sh_os_root_hpa = E2K_INVALID_PAGE; + context->sh_u_root_hpa = E2K_INVALID_PAGE; + context->direct_map = false; +} + +static void e2k_paging_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + e2k_paging_init_context_common(vcpu, context, PT_E2K_ROOT_LEVEL); +} + +#ifdef CONFIG_X86_HW_VIRTUALIZATION +static void paging64_init_context_common(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, + int level) +{ + context->nx = is_nx(vcpu); + context->root_level = level; + + reset_rsvds_bits_mask(vcpu, context); + update_permission_bitmask(vcpu, context, false); + update_pkru_bitmask(vcpu, context, false); + update_last_nonleaf_level(vcpu, context); + + MMU_WARN_ON(!is_pae(vcpu)); + context->page_fault = paging64_page_fault; + context->gva_to_gpa = paging64_gva_to_gpa; + context->sync_page = paging64_sync_page; + context->sync_gva = paging64_sync_gva; + context->sync_gva_range = paging64_sync_gva_range; + context->update_pte = paging64_update_pte; + context->shadow_root_level = level; + context->os_root_hpa = E2K_INVALID_PAGE; + context->u_root_hpa = E2K_INVALID_PAGE; + context->direct_map = false; +} + +static void paging64_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); +} + +static void paging32_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + context->nx = false; + context->root_level = PT32_ROOT_LEVEL; + + reset_rsvds_bits_mask(vcpu, context); + update_permission_bitmask(vcpu, context, false); + update_pkru_bitmask(vcpu, context, false); + update_last_nonleaf_level(vcpu, context); + + context->page_fault = paging32_page_fault; + context->gva_to_gpa = paging32_gva_to_gpa; + context->sync_page = paging32_sync_page; + context->sync_gva = paging32_sync_gva; + context->sync_gva_range = paging32_sync_gva_range; + context->update_pte = paging32_update_pte; + context->shadow_root_level = PT32E_ROOT_LEVEL; + context->os_root_hpa = E2K_INVALID_PAGE; + context->u_root_hpa = E2K_INVALID_PAGE; + context->direct_map = false; +} + +static void paging32E_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); +} +#else /* ! CONFIG_X86_HW_VIRTUALIZATION */ + +static void paging64_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (is_ss(vcpu)) + panic("FIXME: %s() is not yet implemented\n", __func__); + else + panic("FIXME: %s() secondary space support is not yet " + "implemented\n", __func__); +} +static void paging32_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (is_ss(vcpu)) + panic("FIXME: %s() is not yet implemented\n", __func__); + else + panic("FIXME: %s() secondary space support is not yet " + "implemented\n", __func__); +} + +static void paging32E_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (is_ss(vcpu)) + panic("FIXME: %s() is not yet implemented\n", __func__); + else + panic("FIXME: %s() secondary space support is not yet " + "implemented\n", __func__); +} +#endif /* CONFIG_X86_HW_VIRTUALIZATION */ + +static void init_kvm_nonpaging_mmu(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *context = &vcpu->arch.mmu; + + DebugKVM("started on VCPU #%d is PV %s, is HV %s\n", + vcpu->vcpu_id, + (vcpu->arch.is_pv) ? "true" : "false", + (vcpu->arch.is_hv) ? "true" : "false"); + + KVM_BUG_ON(is_paging(vcpu)); + + nonpaging_init_context(vcpu, context); +} + +static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *context = &vcpu->arch.mmu; + + DebugTDP("started on VCPU #%d\n", vcpu->vcpu_id); + + context->base_role.word = 0; + context->base_role.smm = is_smm(vcpu); + context->page_fault = tdp_page_fault; + context->sync_page = nonpaging_sync_page; + context->sync_gva = nonpaging_sync_gva; + context->sync_gva_range = nonpaging_sync_gva_range; + context->update_pte = nonpaging_update_pte; + context->shadow_root_level = get_tdp_root_level(); + if (!is_paging(vcpu)) { + context->sh_os_root_hpa = E2K_INVALID_PAGE; + context->sh_u_root_hpa = E2K_INVALID_PAGE; + } + context->direct_map = true; + if (vcpu->arch.mmu.virt_ctrl_mu.rw_mmu_cr) { + /* access to MMU_CR register is intercepted */ + /* so paging state can be accessed as soft flag */ + context->is_paging = NULL; + } else { + /* paging state can be accessed only though SH_MMU_CR */ + context->is_paging = kvm_mmu_is_hv_paging; + } + context->set_vcpu_u_pptb = set_vcpu_tdp_u_pptb; + context->set_vcpu_sh_u_pptb = set_vcpu_tdp_sh_u_pptb; + context->set_vcpu_u_vptb = set_vcpu_tdp_u_vptb; + context->set_vcpu_sh_u_vptb = set_vcpu_tdp_sh_u_vptb; + context->set_vcpu_os_pptb = set_vcpu_tdp_os_pptb; + context->set_vcpu_sh_os_pptb = set_vcpu_tdp_sh_os_pptb; + context->set_vcpu_os_vptb = set_vcpu_tdp_os_vptb; + context->set_vcpu_sh_os_vptb = set_vcpu_tdp_sh_os_vptb; + context->set_vcpu_os_vab = set_vcpu_tdp_os_vab; + context->set_vcpu_gp_pptb = set_vcpu_tdp_gp_pptb; + if (vcpu->arch.mmu.virt_ctrl_mu.rw_pptb) { + /* access to PT context registers are intercepted */ + /* so PT context have copy at MMU soft structure */ + context->get_vcpu_u_pptb = get_vcpu_tdp_u_pptb; + context->get_vcpu_u_vptb = get_vcpu_tdp_u_vptb; + context->get_vcpu_os_pptb = get_vcpu_tdp_os_pptb; + context->get_vcpu_os_vptb = get_vcpu_tdp_os_vptb; + context->get_vcpu_os_vab = get_vcpu_tdp_os_vab; + context->get_vcpu_gp_pptb = get_vcpu_tdp_gp_pptb; + } else { + /* PT context registers only on shadow registers */ + context->get_vcpu_u_pptb = get_vcpu_context_tdp_u_pptb; + context->get_vcpu_u_vptb = get_vcpu_context_tdp_u_vptb; + context->get_vcpu_os_pptb = get_vcpu_context_tdp_os_pptb; + context->get_vcpu_os_vptb = get_vcpu_context_tdp_os_vptb; + context->get_vcpu_os_vab = get_vcpu_context_tdp_os_vab; + context->get_vcpu_gp_pptb = get_vcpu_context_tdp_gp_pptb; + } + context->get_vcpu_sh_u_pptb = get_vcpu_tdp_sh_u_pptb; + context->get_vcpu_sh_u_vptb = get_vcpu_tdp_sh_u_vptb; + context->get_vcpu_sh_os_pptb = get_vcpu_tdp_sh_os_pptb; + context->get_vcpu_sh_os_vptb = get_vcpu_tdp_sh_os_vptb; + context->set_vcpu_pt_context = set_vcpu_tdp_pt_context; + context->init_vcpu_ptb = init_vcpu_tdp_ptb; + context->get_vcpu_context_u_pptb = get_vcpu_context_tdp_u_pptb; + context->get_vcpu_context_u_vptb = get_vcpu_context_tdp_u_vptb; + context->get_vcpu_context_os_pptb = get_vcpu_context_tdp_os_pptb; + context->get_vcpu_context_os_vptb = get_vcpu_context_tdp_os_vptb; + context->get_vcpu_context_os_vab = get_vcpu_context_tdp_os_vab; + context->get_vcpu_context_gp_pptb = get_vcpu_context_tdp_gp_pptb; + context->get_vcpu_pdpte = get_vcpu_pdpte; + context->inject_page_fault = NULL; + + if (!is_paging(vcpu)) { + context->nx = false; + context->gva_to_gpa = nonpaging_gva_to_gpa; + context->root_level = 0; + } else if (!is_ss(vcpu)) { + context->nx = is_nx(vcpu); + context->root_level = PT_E2K_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = e2k_gva_to_gpa; +#ifdef CONFIG_X86_HW_VIRTUALIZATION + } else if (is_long_mode(vcpu)) { + context->nx = is_nx(vcpu); + context->root_level = PT64_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging64_gva_to_gpa; + } else if (is_pae(vcpu)) { + context->nx = is_nx(vcpu); + context->root_level = PT32E_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging64_gva_to_gpa; + } else { + context->nx = false; + context->root_level = PT32_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging32_gva_to_gpa; +#endif /* CONFIG_X86_HW_VIRTUALIZATION */ + } + + update_permission_bitmask(vcpu, context, false); + update_pkru_bitmask(vcpu, context, false); + update_last_nonleaf_level(vcpu, context); + reset_tdp_shadow_zero_bits_mask(vcpu, context); +} + +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) +{ + bool smep = is_smep(vcpu); + bool smap = is_smap(vcpu); + struct kvm_mmu *context = &vcpu->arch.mmu; + + DebugKVM("started on VCPU #%d is PV %s, is HV %s\n", + vcpu->vcpu_id, + (vcpu->arch.is_pv) ? "true" : "false", + (vcpu->arch.is_hv) ? "true" : "false"); + + mmu_check_invalid_roots(vcpu, true /* invalid */, + OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG); + + if (!is_paging(vcpu)) + nonpaging_init_context(vcpu, context); + else if (!is_ss(vcpu)) + e2k_paging_init_context(vcpu, context); + else if (is_long_mode(vcpu)) + paging64_init_context(vcpu, context); + else if (is_pae(vcpu)) + paging32E_init_context(vcpu, context); + else + paging32_init_context(vcpu, context); + + context->base_role.nxe = is_nx(vcpu); + context->base_role.cr4_pae = !!is_pae(vcpu); + context->base_role.cr0_wp = is_write_protection(vcpu); + context->base_role.smep_andnot_wp + = smep && !is_write_protection(vcpu); + context->base_role.smap_andnot_wp + = smap && !is_write_protection(vcpu); + context->base_role.smm = is_smm(vcpu); + reset_shadow_zero_bits_mask(vcpu, context); +} +EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); + +#ifdef CONFIG_X86_EPT_MMU +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) +{ + struct kvm_mmu *context = &vcpu->arch.mmu; + + MMU_WARN_ON(VALID_PAGE(context->root_hpa)); + + context->shadow_root_level = get_tdp_root_level(); + + context->nx = true; + context->page_fault = ept_page_fault; + context->gva_to_gpa = ept_gva_to_gpa; + context->sync_page = ept_sync_page; + context->sync_gva = ept_sync_gva; + context->sync_gva_range = ept_sync_gva_range; + context->update_pte = ept_update_pte; + context->root_level = context->shadow_root_level; + context->root_hpa = E2K_INVALID_PAGE; + context->direct_map = false; + + update_permission_bitmask(vcpu, context, true); + update_pkru_bitmask(vcpu, context, true); + reset_rsvds_bits_mask_ept(vcpu, context, execonly); + reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); +} +#else /* ! CONFIG_X86_EPT_MMU */ +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) +{ + pr_err("%s() is not supported on e2k arch\n", __func__); +} +#endif /* CONFIG_X86_EPT_MMU */ +EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); + +static void init_kvm_softmmu(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *context = &vcpu->arch.mmu; + + DebugKVM("started on VCPU #%d\n", vcpu->vcpu_id); + kvm_init_shadow_mmu(vcpu); + if (vcpu->arch.is_hv) { + context->inject_page_fault = inject_shadow_page_fault; + } else if (vcpu->arch.is_pv) { + /* the function to inject depends on fault type */ + /* and will be called directly from page fault handler */ + context->inject_page_fault = NULL; + } else { + KVM_BUG_ON(true); + } +} + +static void init_kvm_mmu(struct kvm_vcpu *vcpu) +{ + if (!is_paging(vcpu)) { + init_kvm_nonpaging_mmu(vcpu); + } else if (tdp_enabled) { + init_kvm_tdp_mmu(vcpu); + } else { + init_kvm_softmmu(vcpu); + } +} + +void kvm_mmu_reset_context(struct kvm_vcpu *vcpu, unsigned flags) +{ + kvm_mmu_unload(vcpu, flags); + init_kvm_mmu(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); + +int kvm_mmu_load(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, unsigned flags) +{ + int r; + + DebugSYNC("started on VCPU #%d\n", vcpu->vcpu_id); + r = mmu_topup_memory_caches(vcpu); + if (r) + goto out; + if (vcpu->arch.mmu.direct_map) { + r = mmu_alloc_direct_roots(vcpu); + } else { + r = mmu_alloc_shadow_roots(vcpu, gmm, flags); + } + kvm_mmu_sync_roots(vcpu, flags); + if (r) + goto out; + /* set_vcpu_pptb() should ensure TLB has been flushed */ + /* FIXME: guest U_PPTB register should point to physical base of */ + /* guest PT, so I do not understand why the follow setting + vcpu->arch.mmu.set_vcpu_pptb(vcpu, vcpu->arch.mmu.root_hpa); + */ +out: + return r; +} +EXPORT_SYMBOL_GPL(kvm_mmu_load); + +void kvm_mmu_unload(struct kvm_vcpu *vcpu, unsigned flags) +{ + mmu_free_roots(vcpu, flags); + mmu_check_invalid_roots(vcpu, true /* invalid ? */, flags); +} +EXPORT_SYMBOL_GPL(kvm_mmu_unload); + +static void kvm_invalidate_all_roots(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int r; + + kvm_for_each_vcpu(r, vcpu, kvm) { + kvm_set_gp_phys_root(vcpu, E2K_INVALID_PAGE); + if (is_shadow_paging(vcpu)) { + kvm_set_space_type_spt_u_root(vcpu, E2K_INVALID_PAGE); + kvm_set_space_type_spt_os_root(vcpu, E2K_INVALID_PAGE); + } + } +} + +static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, struct gmm_struct *gmm, + struct kvm_mmu_page *sp, pgprot_t *spte, + gpa_t gpa, const void *new) +{ + DebugPTE("started for spte at %px == 0x%lx, new %px == 0x%lx\n", + spte, pgprot_val(*spte), new, pgprot_val(*(pgprot_t *)new)); + if (sp->role.level != PT_PAGE_TABLE_LEVEL) { + int ret; + + ++vcpu->kvm->stat.mmu_pde_zapped; + DebugPTE("PT level %d is not pte level, it need set pde\n", + sp->role.level); + spin_unlock(&vcpu->kvm->mmu_lock); + ret = e2k_shadow_pt_protection_fault(vcpu, gmm, gpa, sp); + KVM_BUG_ON(ret < 0); + DebugPTE("set PDE spte at %px == 0x%lx\n", + spte, pgprot_val(*spte)); + spin_lock(&vcpu->kvm->mmu_lock); + return; + } + ++vcpu->kvm->stat.mmu_pte_updated; + DebugPTE("set PTE spte at %px == 0x%lx\n", + spte, pgprot_val(*spte)); + vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); + DebugPTE("updated to new spte at %px == 0x%lx\n", + spte, pgprot_val(*spte)); +} + +static bool need_remote_flush(struct kvm *kvm, pgprot_t old, pgprot_t new) +{ + if (!is_shadow_present_pte(kvm, old)) + return false; + if (!is_shadow_present_pte(kvm, new)) + return true; + if ((pgprot_val(old) ^ pgprot_val(new)) & kvm_get_spte_pfn_mask(kvm)) + return true; + pgprot_val(old) ^= get_spte_nx_mask(kvm); + pgprot_val(new) ^= get_spte_nx_mask(kvm); + return (pgprot_val(old) & ~pgprot_val(new) & PT64_PERM_MASK(kvm)) != 0; +} + +static pgprotval_t mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, + const u8 *new, int *bytes) +{ + pgprotval_t gentry; + int r; + + /* + * Assume that the pte write on a page table of the same type + * as the current vcpu paging mode since we update the sptes only + * when they have the same mode. + */ + if (is_pae(vcpu) && *bytes == 4) { + /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ + *gpa &= ~(gpa_t)7; + *bytes = 8; + r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8); + if (r) + gentry = 0; + new = (const u8 *)&gentry; + } + + switch (*bytes) { + case 4: + gentry = *(const u32 *)new; + break; + case 8: + gentry = *(const u64 *)new; + break; + default: + gentry = 0; + break; + } + + return gentry; +} + +static pgprot_t *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, + int *nspte) +{ + unsigned page_offset, quadrant; + pgprot_t *spte; + int level; + + page_offset = offset_in_page(gpa); + level = sp->role.level; + *nspte = 1; + if (!sp->role.cr4_pae) { + page_offset <<= 1; /* 32->64 */ + /* + * A 32-bit pde maps 4MB while the shadow pdes map + * only 2MB. So we need to double the offset again + * and zap two pdes instead of one. + */ + if (level == PT32_ROOT_LEVEL) { + page_offset &= ~7; /* kill rounding error */ + page_offset <<= 1; + *nspte = 2; + } + quadrant = page_offset >> PAGE_SHIFT; + page_offset &= ~PAGE_MASK; + if (quadrant != sp->role.quadrant) + return NULL; + } + + spte = &sp->spt[page_offset / sizeof(*spte)]; + return spte; +} + +static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, struct gmm_struct *gmm, + gpa_t gpa, const u8 *new, int bytes) +{ + gfn_t gfn = gpa_to_gfn(gpa), new_gfn; + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + pgprot_t entry, *spte; + pgprotval_t gentry; + int npte; + bool remote_flush, local_flush; + union kvm_mmu_page_role mask = { }; + + DebugPTE("started for GPA 0x%llx, new pte %px == 0x%lx\n", + gpa, new, *((pgprotval_t *)new)); + + mask.cr0_wp = 1; + mask.cr4_pae = 1; + mask.nxe = 1; + mask.smep_andnot_wp = 1; + mask.smap_andnot_wp = 1; + mask.smm = 1; + + /* + * If we don't have indirect shadow pages, it means no page is + * write-protected, so we can exit simply. + */ + if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) + return; + + remote_flush = false; + local_flush = false; + + pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); + + gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); + new_gfn = gpa_to_gfn( + kvm_gpte_gfn_to_phys_addr(vcpu, __pgprot(gentry))); + DebugPTE("guest new pte %px == 0x%lx\n", + new, pte_val(*(pte_t *)new)); + + /* + * No need to care whether allocation memory is successful + * or not since pte prefetch is skiped if it does not have + * enough objects in the cache. + */ + mmu_topup_memory_caches(vcpu); + + spin_lock(&vcpu->kvm->mmu_lock); + ++vcpu->kvm->stat.mmu_pte_write; + kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); + + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { + DebugPTE("found SP at %px mapped gva from 0x%lx, gfn 0x%llx\n", + sp, sp->gva, gfn); + + spte = get_written_sptes(sp, gpa, &npte); + if (!spte) + continue; + + DebugPTE("GPA 0x%llx mapped by spte %px == 0x%lx, ptes %d\n", + gpa, spte, pgprot_val(*spte), npte); + + local_flush = true; + while (npte--) { + struct kvm_mmu_page *child; + + entry = *spte; + child = mmu_page_zap_pte(vcpu->kvm, sp, spte); + if (gentry && + !((sp->role.word ^ vcpu->arch.mmu.base_role.word) + & mask.word) && rmap_can_add(vcpu)) { + mmu_pte_write_new_pte(vcpu, gmm, sp, spte, + gpa, &gentry); + } + if (child && (child->gfn != new_gfn)) { + child->released = true; + kvm_mmu_prepare_zap_page(vcpu->kvm, child, + &invalid_list); + } + if (need_remote_flush(vcpu->kvm, entry, *spte)) + remote_flush = true; + ++spte; + } + } + kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); + kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); + spin_unlock(&vcpu->kvm->mmu_lock); +} + +int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) +{ + gpa_t gpa; + int r; + + if (vcpu->arch.mmu.direct_map) + return 0; + + gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); + + r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); + + return r; +} +EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); + +static void make_mmu_pages_available(struct kvm_vcpu *vcpu) +{ + LIST_HEAD(invalid_list); + + if (likely(kvm_mmu_available_pages(vcpu->kvm) >= + KVM_MIN_FREE_MMU_PAGES)) + return; + + while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { + if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) + break; + + ++vcpu->kvm->stat.mmu_recycled; + } + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); +} + +void kvm_mmu_flush_gva(struct kvm_vcpu *vcpu, gva_t gva) +{ + vcpu->arch.mmu.sync_gva(vcpu, gva); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + ++vcpu->stat.flush_gva; +} +EXPORT_SYMBOL_GPL(kvm_mmu_flush_gva); + +void kvm_enable_tdp(void) +{ + tdp_enabled = true; +} +EXPORT_SYMBOL_GPL(kvm_enable_tdp); + +void kvm_disable_tdp(void) +{ + tdp_enabled = false; +} +EXPORT_SYMBOL_GPL(kvm_disable_tdp); + +static void free_mmu_pages(struct kvm_vcpu *vcpu) +{ + free_page((unsigned long)vcpu->arch.mmu.pae_root); + if (vcpu->arch.mmu.lm_root != NULL) + free_page((unsigned long)vcpu->arch.mmu.lm_root); +} + +static int alloc_mmu_pages(struct kvm_vcpu *vcpu) +{ + struct page *page; + int i; + + /* + * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. + * Therefore we need to allocate shadow page tables in the first + * 4GB of memory, which happens to fit the DMA32 zone. + */ + page = alloc_page(GFP_KERNEL | __GFP_DMA32); + if (!page) + return -ENOMEM; + + vcpu->arch.mmu.pae_root = page_address(page); + for (i = 0; i < 4; ++i) + vcpu->arch.mmu.pae_root[i] = E2K_INVALID_PAGE; + + return 0; +} + +int kvm_mmu_create(struct kvm_vcpu *vcpu) +{ + vcpu->arch.walk_mmu = &vcpu->arch.mmu; + vcpu->arch.mmu.sh_os_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.sh_u_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.gp_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.sh_root_hpa = E2K_INVALID_PAGE; + vcpu->arch.mmu.translate_gpa = translate_gpa; + + kvm_setup_paging_mode(vcpu); + + return alloc_mmu_pages(vcpu); +} + +void kvm_mmu_setup(struct kvm_vcpu *vcpu) +{ + mmu_check_invalid_roots(vcpu, true /* invalid */, + OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG); + kvm_setup_mmu_intc_mode(vcpu); + init_kvm_mmu(vcpu); +} + +static const pt_struct_t *get_cpu_iset_mmu_pt_struct(int iset, bool mmu_pt_v6) +{ + const pt_struct_t *pts; + + if (iset <= E2K_ISET_V2) { + pts = &pgtable_struct_e2k_v2; + } else if (iset < E2K_ISET_V5) { + pts = &pgtable_struct_e2k_v3; + } else if (iset == E2K_ISET_V5) { + pts = &pgtable_struct_e2k_v5; + } else if (iset >= E2K_ISET_V6) { + if (mmu_pt_v6) + pts = &pgtable_struct_e2k_v6_pt_v6; + else + pts = &pgtable_struct_e2k_v6_pt_v2; + } else { + BUG_ON(true); + } + return pts; +} + +const pt_struct_t *kvm_get_mmu_host_pt_struct(struct kvm *kvm) +{ + return get_cpu_iset_mmu_pt_struct(machine.native_iset_ver, + machine.mmu_pt_v6); +} + +const pt_struct_t *kvm_get_cpu_mmu_pt_struct(struct kvm_vcpu *vcpu) +{ + kvm_guest_info_t *guest_info = &vcpu->kvm->arch.guest_info; + bool pt_v6; + + if (vcpu->arch.is_hv) { + e2k_core_mode_t core_mode; + + core_mode = read_SH_CORE_MODE_reg(); + pt_v6 = !!core_mode.CORE_MODE_pt_v6; + if (guest_info->mmu_support_pt_v6 != pt_v6) { + pr_warn("%s(): VCPU #%d SH_CORE_MODE.pt_v6 is %d, " + "but guest info claims the opposite\n", + __func__, vcpu->vcpu_id, pt_v6); + guest_info->mmu_support_pt_v6 = pt_v6; + } + } else { + pt_v6 = guest_info->mmu_support_pt_v6; + } + return get_cpu_iset_mmu_pt_struct(guest_info->cpu_iset, pt_v6); +} + +const pt_struct_t *kvm_get_mmu_guest_pt_struct(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.is_hv) { + /* paravirtualization case: guest PT type emulates */ + /* same as native PT type */ + return kvm_get_mmu_host_pt_struct(vcpu->kvm); + } else { + /* depends on guest CPU type */ + return kvm_get_cpu_mmu_pt_struct(vcpu); + } + + BUG_ON(true); + return NULL; +} + +const pt_struct_t *kvm_mmu_get_host_pt_struct(struct kvm *kvm) +{ + return mmu_get_host_pt_struct(kvm); +} + +const pt_struct_t *kvm_mmu_get_vcpu_pt_struct(struct kvm_vcpu *vcpu) +{ + return mmu_get_vcpu_pt_struct(vcpu); +} + +const pt_struct_t *kvm_mmu_get_gp_pt_struct(struct kvm *kvm) +{ + return mmu_get_gp_pt_struct(kvm); +} + +static void kvm_init_mmu_pt_structs(struct kvm *kvm) +{ + if (kvm_is_phys_pt_enable(kvm)) { + mmu_set_gp_pt_struct(kvm, &pgtable_struct_e2k_v6_gp); + mmu_set_host_pt_struct(kvm, &pgtable_struct_e2k_v6_gp); + } else if (kvm_is_shadow_pt_enable(kvm)) { + mmu_set_gp_pt_struct(kvm, kvm_get_mmu_host_pt_struct(kvm)); + mmu_set_host_pt_struct(kvm, kvm_get_mmu_host_pt_struct(kvm)); + } else { + BUG_ON(true); + } + mmu_set_gp_pt_struct_func(kvm, &kvm_mmu_get_gp_pt_struct); + mmu_set_host_pt_struct_func(kvm, &kvm_mmu_get_host_pt_struct); +} + +static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node) +{ + kvm_mmu_invalidate_zap_all_pages(kvm); +} + +void kvm_mmu_init_vm(struct kvm *kvm) +{ + struct kvm_page_track_notifier_node *node; + + kvm_init_mmu_pt_structs(kvm); + + node = &kvm->arch.mmu_sp_tracker; + node->track_write = kvm_mmu_pte_write; + node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; + kvm_page_track_register_notifier(kvm, node); + +} + +void kvm_mmu_uninit_vm(struct kvm *kvm) +{ + struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; + + kvm_page_track_unregister_notifier(kvm, node); +} + +/* The return value indicates if tlb flush on all vcpus is needed. */ +typedef bool (*slot_level_handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head); + +/* The caller should hold mmu-lock before calling this function. */ +static bool +slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int start_level, int end_level, + gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) +{ + struct slot_rmap_walk_iterator iterator; + bool flush = false; + + for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, + end_gfn, &iterator, kvm) { + if (iterator.rmap) + flush |= fn(kvm, iterator.rmap); + + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (flush && lock_flush_tlb) { + kvm_flush_remote_tlbs(kvm); + flush = false; + } + cond_resched_lock(&kvm->mmu_lock); + } + } + + if (flush && lock_flush_tlb) { + kvm_flush_remote_tlbs(kvm); + flush = false; + } + + return flush; +} + +static bool +slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int start_level, int end_level, + bool lock_flush_tlb) +{ + return slot_handle_level_range(kvm, memslot, fn, start_level, + end_level, memslot->base_gfn, + memslot->base_gfn + memslot->npages - 1, + lock_flush_tlb); +} + +static bool +slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) +{ + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); +} + +static bool +slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) +{ + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1, + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); +} + +static bool +slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) +{ + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, + PT_PAGE_TABLE_LEVEL, lock_flush_tlb); +} + +void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int i; + + spin_lock(&kvm->mmu_lock); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + gfn_t start, end; + + start = max(gfn_start, memslot->base_gfn); + end = min(gfn_end, memslot->base_gfn + memslot->npages); + if (start >= end) + continue; + + slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, + PT_PAGE_TABLE_LEVEL, + PT_MAX_HUGEPAGE_LEVEL, + start, end - 1, true); + } + } + + spin_unlock(&kvm->mmu_lock); +} + +static bool slot_rmap_write_protect(struct kvm *kvm, + struct kvm_rmap_head *rmap_head) +{ + return __rmap_write_protect(kvm, rmap_head, false); +} + +void kvm_mmu_slot_remove_write_access(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect, + false); + spin_unlock(&kvm->mmu_lock); + + /* + * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log() + * which do tlb flush out of mmu-lock should be serialized by + * kvm->slots_lock otherwise tlb flush would be missed. + */ + lockdep_assert_held(&kvm->slots_lock); + + /* + * We can flush all the TLBs out of the mmu lock without TLB + * corruption since we just change the spte from writable to + * readonly so that we only need to care the case of changing + * spte from present to present (changing the spte from present + * to nonpresent will flush all the TLBs immediately), in other + * words, the only case we care is mmu_spte_update() where we + * haved checked SPTE_HOST_WRITABLE | SPTE_MMU_WRITABLE + * instead of PT_WRITABLE_MASK, that means it does not depend + * on PT_WRITABLE_MASK anymore. + */ + if (flush) + kvm_flush_remote_tlbs(kvm); +} + +static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, + struct kvm_rmap_head *rmap_head) +{ + pgprot_t *sptep; + struct rmap_iterator iter; + int need_tlb_flush = 0; + kvm_pfn_t pfn; + struct kvm_mmu_page *sp; + +restart: + for_each_rmap_spte(kvm, rmap_head, &iter, sptep) { + sp = page_header(__pa(sptep)); + pfn = spte_to_pfn(kvm, *sptep); + + /* + * We cannot do huge page mapping for indirect shadow pages, + * which are found on the last rmap (level = 1) when not using + * tdp; such shadow pages are synced with the page table in + * the guest, and the guest page table is using 4K page size + * mapping if the indirect sp has level = 1. + */ + if (sp->role.direct && + !kvm_is_reserved_pfn(pfn) && + PageTransCompoundMap(pfn_to_page(pfn))) { + drop_spte(kvm, sptep); + need_tlb_flush = 1; + goto restart; + } + } + + return need_tlb_flush; +} + +void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + /* FIXME: const-ify all uses of struct kvm_memory_slot. */ + spin_lock(&kvm->mmu_lock); + slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, + kvm_mmu_zap_collapsible_spte, true); + spin_unlock(&kvm->mmu_lock); +} + +void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); + spin_unlock(&kvm->mmu_lock); + + lockdep_assert_held(&kvm->slots_lock); + + /* + * It's also safe to flush TLBs out of mmu lock here as currently this + * function is only used for dirty logging, in which case flushing TLB + * out of mmu lock also guarantees no dirty pages will be lost in + * dirty_bitmap. + */ + if (flush) + kvm_flush_remote_tlbs(kvm); +} +EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); + +void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, + false); + spin_unlock(&kvm->mmu_lock); + + /* see kvm_mmu_slot_remove_write_access */ + lockdep_assert_held(&kvm->slots_lock); + + if (flush) + kvm_flush_remote_tlbs(kvm); +} +EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); + +void kvm_mmu_slot_set_dirty(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + bool flush; + + spin_lock(&kvm->mmu_lock); + flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); + spin_unlock(&kvm->mmu_lock); + + lockdep_assert_held(&kvm->slots_lock); + + /* see kvm_mmu_slot_leaf_clear_dirty */ + if (flush) + kvm_flush_remote_tlbs(kvm); +} +EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); + +#define BATCH_ZAP_PAGES 10 +static void kvm_zap_obsolete_pages(struct kvm *kvm) +{ + struct kvm_mmu_page *sp, *node; + int batch = 0; + + DebugKVMSH("started\n"); +restart: + list_for_each_entry_safe_reverse(sp, node, + &kvm->arch.active_mmu_pages, link) { + int ret; + + /* + * No obsolete page exists before new created page since + * active_mmu_pages is the FIFO list. + */ + if (!is_obsolete_sp(kvm, sp)) + break; + + /* + * Since we are reversely walking the list and the invalid + * list will be moved to the head, skip the invalid page + * can help us to avoid the infinity list walking. + */ + if (sp->role.invalid) + continue; + + /* + * Need not flush tlb since we only zap the sp with invalid + * generation number. + */ + if (batch >= BATCH_ZAP_PAGES && + cond_resched_lock(&kvm->mmu_lock)) { + batch = 0; + goto restart; + } + + /* all SPs should be released unconditionally */ + sp->released = true; + + ret = kvm_mmu_prepare_zap_page(kvm, sp, + &kvm->arch.zapped_obsolete_pages); + batch += ret; + + if (ret) + goto restart; + } + + /* + * Should flush tlb before free page tables since lockless-walking + * may use the pages. + */ + kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); +} + +/* + * Fast invalidate all shadow pages and use lock-break technique + * to zap obsolete pages. + * + * It's required when memslot is being deleted or VM is being + * destroyed, in these cases, we should ensure that KVM MMU does + * not use any resource of the being-deleted slot or all slots + * after calling the function. + */ +void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) +{ + DebugKVMSH("started\n"); + spin_lock(&kvm->mmu_lock); + trace_kvm_mmu_invalidate_zap_all_pages(kvm); + kvm->arch.mmu_valid_gen++; + + /* + * Notify all vcpus to reload its shadow page table + * and flush TLB. Then all vcpus will switch to new + * shadow page table with the new mmu_valid_gen. + * + * Note: we should do this under the protection of + * mmu-lock, otherwise, vcpu would purge shadow page + * but miss tlb flush. + */ + kvm_reload_remote_mmus(kvm); + + kvm_zap_obsolete_pages(kvm); + + /* invalidate all page tables root pointers */ + kvm_invalidate_all_roots(kvm); + + spin_unlock(&kvm->mmu_lock); +} + +static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) +{ + return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); +} + +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) +{ + gen &= MMIO_GEN_MASK; + + /* + * Shift to eliminate the "update in-progress" flag, which isn't + * included in the spte's generation number. + */ + gen >>= 1; + + /* + * Generation numbers are incremented in multiples of the number of + * address spaces in order to provide unique generations across all + * address spaces. Strip what is effectively the address space + * modifier prior to checking for a wrap of the MMIO generation so + * that a wrap in any address space is detected. + */ + gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); + + /* + * The very rare case: if the MMIO generation number has wrapped, + * zap all shadow pages. + */ + if (unlikely(gen == 0)) { + kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); + kvm_mmu_invalidate_zap_all_pages(kvm); + } +} + +static unsigned long +mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + struct kvm *kvm; + int nr_to_scan = sc->nr_to_scan; + unsigned long freed = 0; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) { + int idx; + LIST_HEAD(invalid_list); + + /* + * Never scan more than sc->nr_to_scan VM instances. + * Will not hit this condition practically since we do not try + * to shrink more than one VM and it is very unlikely to see + * !n_used_mmu_pages so many times. + */ + if (!nr_to_scan--) + break; + /* + * n_used_mmu_pages is accessed without holding kvm->mmu_lock + * here. We may skip a VM instance errorneosly, but we do not + * want to shrink a VM that only started to populate its MMU + * anyway. + */ + if (!kvm->arch.n_used_mmu_pages && + !kvm_has_zapped_obsolete_pages(kvm)) + continue; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + if (kvm_has_zapped_obsolete_pages(kvm)) { + kvm_mmu_commit_zap_page(kvm, + &kvm->arch.zapped_obsolete_pages); + goto unlock; + } + + if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) + freed++; + kvm_mmu_commit_zap_page(kvm, &invalid_list); + +unlock: + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + + /* + * unfair on small ones + * per-vm shrinkers cry out + * sadness comes quickly + */ + list_move_tail(&kvm->vm_list, &vm_list); + break; + } + + mutex_unlock(&kvm_lock); + return freed; +} + +static unsigned long +mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + return percpu_counter_read_positive(&kvm_total_used_mmu_pages); +} + +static struct shrinker mmu_shrinker = { + .count_objects = mmu_shrink_count, + .scan_objects = mmu_shrink_scan, + .seeks = DEFAULT_SEEKS * 10, +}; + +static int init_nonpaging_root_pt(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + hpa_t root; + pgprot_t *new_root; + int pt_index; + + KVM_BUG_ON(VALID_PAGE(kvm->arch.nonp_root_hpa)); + + root = kvm_get_gp_phys_root(vcpu); + KVM_BUG_ON(IS_E2K_INVALID_PAGE(root)); + DebugNONP("VCPU #%d created root PT at 0x%llx\n", + vcpu->vcpu_id, root); + + kvm->arch.nonp_root_hpa = root; /* PT root is common for all VCPUs */ + + mmu_set_gp_pt_struct_func(kvm, &kvm_mmu_get_gp_pt_struct); + mmu_set_host_pt_struct_func(kvm, &kvm_mmu_get_gp_pt_struct); + if (kvm_is_phys_pt_enable(kvm)) { + mmu_set_gp_pt_struct(kvm, &pgtable_struct_e2k_v6_gp); + } else if (kvm_is_shadow_pt_enable(kvm)) { + mmu_set_gp_pt_struct(kvm, kvm_get_mmu_host_pt_struct(kvm)); + + /* One PGD entry is the VPTB self-map. */ + pt_index = pgd_index(KERNEL_VPTB_BASE_ADDR); + new_root = (pgprot_t *)__va(root); + kvm_vmlpt_kernel_spte_set(kvm, &new_root[pt_index], new_root); + } else { + BUG_ON(true); + } + + /* init intercept handling for nonpagin mode */ + mmu_init_nonpaging_intc(vcpu); + + return 0; +} + +/* + * Hypervisor should use only separate virtual space mode + * to provide atomic hardware switch hypervisor <-> guest. + * Only guest OS can be run at nonpaging mode, so use guest OS space + * to support this mode, but it is not surely. + */ +static void kvm_hv_setup_nonp_phys_pt(struct kvm_vcpu *vcpu, hpa_t root) +{ + /* guest PTs are not yet created and not used, only GP_PT */ + set_phys_paging(vcpu); + KVM_BUG_ON(!VALID_PAGE(vcpu->arch.mmu.get_vcpu_gp_pptb(vcpu))); +} + +static void kvm_setup_nonp_shadow_pt(struct kvm_vcpu *vcpu, hpa_t root) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + set_shadow_paging(vcpu); + if (!is_phys_paging(vcpu)) { + /* guest physical addresses are translated by hypervisor */ + /* OS_* / U_* PTs */ + if (is_sep_virt_spaces(vcpu)) { + kvm_set_space_type_spt_os_root(vcpu, root); + } else { + kvm_set_space_type_spt_u_root(vcpu, root); + } + if (is_sep_virt_spaces(vcpu)) { + KVM_BUG_ON(mmu->get_vcpu_sh_os_pptb(vcpu) != root); + mmu->set_vcpu_sh_os_vptb(vcpu, MMU_GUEST_OS_PT_VPTB); + mmu->set_vcpu_os_vab(vcpu, 0); + } else { + KVM_BUG_ON(mmu->get_vcpu_sh_u_pptb(vcpu) != root); + mmu->set_vcpu_sh_u_vptb(vcpu, MMU_UNITED_USER_VPTB); + } + /* GP_* PTs cannot be used */ + if (vcpu->arch.is_hv) { + /* shadow PTs cannot be used for host translations */ + ; + } else if (vcpu->arch.is_pv) { + /* shadow PTs will be used in both modes */ + /* as well as host and guest translations */ + kvm_prepare_shadow_root(vcpu, NULL, + root, E2K_INVALID_PAGE, + (is_sep_virt_spaces(vcpu)) ? + MMU_SEPARATE_KERNEL_VPTB + : + MMU_UNITED_KERNEL_VPTB); + } else { + KVM_BUG_ON(true); + } + } +} + +static void kvm_hv_setup_nonp_tdp(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(!is_phys_paging(vcpu)); + set_tdp_paging(vcpu); +} + +int kvm_hv_setup_nonpaging_mode(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + unsigned flags; + int ret; + + DebugNONP("started on VCPU #%d\n", vcpu->vcpu_id); + + KVM_BUG_ON(is_paging(vcpu)); + + /* It need create new nonpaging PT to translate guest physical */ + /* addresses to host physical pages GPA->PA */ + + /* set all guest page table pointers to initial state */ + vcpu->arch.mmu.init_vcpu_ptb(vcpu); + + /* create root PT level */ + if (kvm_is_phys_pt_enable(vcpu->kvm)) { + flags = GP_ROOT_PT_FLAG; + } else if (kvm_is_shadow_pt_enable(kvm)) { + if (vcpu->arch.is_hv) { + set_sep_virt_spaces(vcpu); + flags = OS_ROOT_PT_FLAG; + } else { + reset_sep_virt_spaces(vcpu); + flags = U_ROOT_PT_FLAG; + } + } else { + KVM_BUG_ON(true); + } + ret = kvm_mmu_load(vcpu, NULL, flags); + if (ret) { + pr_err("%s(): could not create VCPU #%d root PT, error %d\n", + __func__, vcpu->vcpu_id, ret); + return ret; + } + + mutex_lock(&kvm->slots_lock); + if (IS_E2K_INVALID_PAGE(kvm->arch.nonp_root_hpa)) { + ret = init_nonpaging_root_pt(vcpu); + if (ret) + goto failed; + } else if (VALID_PAGE(kvm->arch.nonp_root_hpa)) { + DebugNONP("VCPU #%d root PT has been already " + "created at 0x%llx\n", + vcpu->vcpu_id, kvm->arch.nonp_root_hpa); + } else if (ERROR_PAGE(kvm->arch.nonp_root_hpa)) { + ret = PAGE_TO_ERROR(kvm->arch.nonp_root_hpa); + DebugNONP("VCPU #%d root PT creation has been failed, " + "error %d\n", + vcpu->vcpu_id, ret); + goto failed; + } else { + KVM_BUG_ON(true); + } + mutex_unlock(&kvm->slots_lock); + + if (kvm_is_phys_pt_enable(vcpu->kvm)) + kvm_hv_setup_nonp_phys_pt(vcpu, kvm->arch.nonp_root_hpa); + + if (kvm_is_tdp_enable(vcpu->kvm)) { + kvm_hv_setup_nonp_tdp(vcpu); + } else if (kvm_is_shadow_pt_enable(vcpu->kvm)) { + kvm_setup_nonp_shadow_pt(vcpu, kvm->arch.nonp_root_hpa); + } + + KVM_BUG_ON(!(is_shadow_paging(vcpu) || is_phys_paging(vcpu))); + + return 0; + +failed: + mutex_unlock(&kvm->slots_lock); + return ret; +} + +static void complete_nonpaging_mode(struct kvm_vcpu *vcpu) +{ + set_paging_flag(vcpu); + kvm_mmu_reset_context(vcpu, OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG); +} + +static int setup_shadow_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + unsigned flags) +{ + struct kvm *kvm = vcpu->kvm; + hpa_t os_root, u_root, gp_root; + int ret; + + /* setup page table structures type to properly manage PTs */ + mmu_set_host_pt_struct(kvm, kvm_get_mmu_host_pt_struct(kvm)); + mmu_set_vcpu_pt_struct(kvm, kvm_get_mmu_guest_pt_struct(vcpu)); + mmu_set_host_pt_struct_func(kvm, &kvm_mmu_get_host_pt_struct); + mmu_set_vcpu_pt_struct_func(kvm, &kvm_mmu_get_vcpu_pt_struct); + + ret = kvm_mmu_load(vcpu, gmm, flags); + if (ret) { + pr_err("%s(): could not create support of VCPU #%d MMU\n", + __func__, vcpu->vcpu_id); + return ret; + } + + mmu_get_spt_roots(vcpu, flags | GP_ROOT_PT_FLAG, + &os_root, &u_root, &gp_root); + + if (VALID_PAGE(u_root)) { + kvm_prepare_shadow_root(vcpu, gmm, u_root, gp_root, + vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu)); + } + if (VALID_PAGE(os_root) && os_root != u_root) { + kvm_prepare_shadow_root(vcpu, gmm, os_root, gp_root, + vcpu->arch.mmu.get_vcpu_sh_os_vptb(vcpu)); + } + + return 0; +} + +static int kvm_sync_shadow_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + hpa_t root_hpa, unsigned flags) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu_page *sp; + e2k_addr_t sync_start, sync_end; + pgprotval_t pptb, u_pptb, os_pptb; + gva_t vptb; + const char *type; + int ret; + + sp = page_header(root_hpa); + if (!sp->unsync) + return 0; + + u_pptb = mmu->get_vcpu_u_pptb(vcpu); + os_pptb = mmu->get_vcpu_os_pptb(vcpu); + + if (flags & U_ROOT_PT_FLAG) { + if (is_sep_virt_spaces(vcpu)) { + if (u_pptb != os_pptb) { + /* can be two PTs user and OS */ + sync_start = 0; + sync_end = mmu->get_vcpu_os_vab(vcpu); + pptb = u_pptb; + vptb = mmu->get_vcpu_sh_u_vptb(vcpu); + type = "user U_PT"; + } else { + /* there is one PT (OS and user) */ + sync_start = 0; + sync_end = E2K_VA_MASK; + pptb = os_pptb; + vptb = mmu->get_vcpu_sh_os_vptb(vcpu); + type = "OS/user OS_PT"; + } + } else { + /* there is one PT (OS and user) */ + sync_start = 0; + sync_end = E2K_VA_MASK; + pptb = u_pptb; + vptb = mmu->get_vcpu_sh_u_vptb(vcpu); + type = "OS/user U_PT"; + } + DebugSPT("VCPU #%d created shadow root %s at 0x%llx " + "for guest ininitial root PT at 0x%lx\n", + vcpu->vcpu_id, type, root_hpa, pptb); + } else if (flags & OS_ROOT_PT_FLAG) { + if (is_sep_virt_spaces(vcpu)) { + if (u_pptb != os_pptb) { + /* can be two PTs user and OS */ + sync_start = mmu->get_vcpu_os_vab(vcpu); + sync_end = E2K_VA_MASK; + pptb = os_pptb; + vptb = mmu->get_vcpu_sh_os_vptb(vcpu); + type = "OS_PT"; + } else { + /* there is one PT (OS and user) */ + sync_start = 0; + sync_end = E2K_VA_MASK; + pptb = os_pptb; + vptb = mmu->get_vcpu_sh_os_vptb(vcpu); + type = "OS/user OS_PT"; + } + } else { + sync_start = 0; + sync_end = E2K_VA_MASK; + pptb = u_pptb; + vptb = mmu->get_vcpu_sh_u_vptb(vcpu); + type = "OS/user U_PT"; + } + DebugSPT("VCPU #%d created shadow root %s at 0x%llx " + "for guest ininitial root PT at 0x%lx\n", + vcpu->vcpu_id, type, root_hpa, pptb); + } else { + KVM_BUG_ON(true); + } + + kvm_unlink_unsync_page(vcpu->kvm, sp); + ret = e2k_sync_shadow_pt_range(vcpu, gmm, root_hpa, + sync_start, sync_end, E2K_INVALID_PAGE, vptb); + if (ret) { + pr_err("%s(): could not sync host shadow U_PT " + "and guest initial PT, error %d\n", + __func__, ret); + return ret; + } + DebugSPT("VCPU #%d shadow root %s at 0x%llx synced " + "from 0x%lx to 0x%lx\n", + vcpu->vcpu_id, type, root_hpa, sync_start, sync_end); + + return 0; +} + +static int kvm_sync_shadow_u_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + bool force) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu_page *sp; + hpa_t root; + e2k_addr_t sync_start, sync_end; + pgprotval_t u_pptb; + gva_t vptb; + const char *type; + int ret; + + root = kvm_get_space_type_spt_u_root(vcpu); + u_pptb = mmu->get_vcpu_u_pptb(vcpu); + sp = page_header(root); + if (!sp->unsync && !force) { + DebugSPT("VCPU #%d user shadow root at 0x%llx for guest " + "root PT at 0x%lxis already synced\n", + vcpu->vcpu_id, root, u_pptb); + return 0; + } else if (!sp->unsync) { + sp->unsync = 1; + } + + if (is_sep_virt_spaces(vcpu)) { + /* can be two PTs user and OS */ + sync_start = 0; + sync_end = mmu->get_vcpu_os_vab(vcpu); + vptb = mmu->get_vcpu_sh_u_vptb(vcpu); + type = "separate user U_PT"; + } else { + /* there is one PT (OS and user) */ + sync_start = 0; + sync_end = HOST_TASK_SIZE; + vptb = mmu->get_vcpu_sh_u_vptb(vcpu); + type = "united OS/user U_PT"; + } + DebugSPT("will be synced VCPU #%d root %s at 0x%llx for guest " + "root PT at 0x%lx\n", + vcpu->vcpu_id, type, root, u_pptb); + + kvm_unlink_unsync_page(vcpu->kvm, sp); + ret = e2k_sync_shadow_pt_range(vcpu, gmm, root, sync_start, sync_end, + u_pptb, vptb); + if (ret) { + pr_err("%s(): could not sync host shadow U_PT " + "and guest root PT, error %d\n", + __func__, ret); + return ret; + } + DebugSPT("VCPU #%d shadow user root %s at 0x%llx synced " + "from 0x%lx to 0x%lx\n", + vcpu->vcpu_id, type, root, sync_start, sync_end); + + return 0; +} + +static int sync_pv_vcpu_shadow_u_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + hpa_t root_hpa, gpa_t u_pptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu_page *sp; + e2k_addr_t sync_start, sync_end; + gva_t vptb; + int ret; + + sp = page_header(root_hpa); + if (!sp->unsync) + return 0; + + if (is_sep_virt_spaces(vcpu)) { + /* can be two PTs user and OS */ + sync_start = 0; + if (sp->guest_kernel_synced) { + sync_end = GUEST_TASK_SIZE; + } else { + sync_end = mmu->get_vcpu_os_vab(vcpu); + } + vptb = mmu->get_vcpu_sh_u_vptb(vcpu); + } else { + /* there is one PT (OS and user) */ + sync_start = 0; + if (sp->guest_kernel_synced) { + sync_end = GUEST_TASK_SIZE; + } else { + sync_end = HOST_TASK_SIZE; + } + vptb = mmu->get_vcpu_sh_u_vptb(vcpu); + } + DebugGMM("VCPU #%d created shadow user root at 0x%llx " + "for guest ininitial root PT at 0x%llx\n", + vcpu->vcpu_id, root_hpa, u_pptb); + + kvm_unlink_unsync_page(vcpu->kvm, sp); + ret = e2k_sync_shadow_pt_range(vcpu, gmm, root_hpa, + sync_start, sync_end, u_pptb, vptb); + if (ret) { + pr_err("%s(): could not sync host shadow user PT " + "and guest initial PT, error %d\n", + __func__, ret); + return ret; + } + DebugGMM("VCPU #%d shadow user root at 0x%llx synced " + "from 0x%lx to 0x%lx\n", + vcpu->vcpu_id, root_hpa, sync_start, sync_end); + + return 0; +} + +int kvm_sync_init_shadow_pt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gpa_t u_phys_ptb, gva_t u_virt_ptb, + gpa_t os_phys_ptb, gva_t os_virt_ptb, gva_t os_virt_base) +{ + hpa_t os_root, u_root; + unsigned flags; + int ret; + + KVM_BUG_ON(gmm == NULL); + KVM_BUG_ON(VALID_PAGE(gmm->root_hpa)); + + /* always separate speces should be used */ + set_sep_virt_spaces(vcpu); + + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, os_phys_ptb); + vcpu->arch.mmu.set_vcpu_os_vptb(vcpu, os_virt_ptb); + vcpu->arch.mmu.set_vcpu_sh_os_vptb(vcpu, os_virt_ptb); + vcpu->arch.mmu.set_vcpu_os_vab(vcpu, os_virt_base); + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, u_phys_ptb); + vcpu->arch.mmu.set_vcpu_u_vptb(vcpu, u_virt_ptb); + vcpu->arch.mmu.set_vcpu_sh_u_vptb(vcpu, u_virt_ptb); + flags = OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG; + + ret = setup_shadow_root(vcpu, gmm, flags); + if (ret) { + pr_err("%s(): could not create support of VCPU #%d MMU\n", + __func__, vcpu->vcpu_id); + goto failed; + } + + mmu_get_spt_roots(vcpu, flags, &os_root, &u_root, NULL); + + /* shadow PT root is common for all VCPUs */ + if (VALID_PAGE(os_root)) { + gmm->root_hpa = os_root; + } else if (VALID_PAGE(u_root)) { + gmm->root_hpa = u_root; + } else { + KVM_BUG_ON(true); + } + kvm_set_root_gmm_spt_list(gmm); + return 0; + +failed: + gmm->root_hpa = TO_ERROR_PAGE(ret); + return ret; +} + +int kvm_prepare_shadow_user_pt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gpa_t u_phys_ptb) +{ + hpa_t root; + struct kvm_mmu_page *sp; + int ret; + + KVM_BUG_ON(!is_shadow_paging(vcpu)); + KVM_BUG_ON(gmm == NULL); + KVM_BUG_ON(VALID_PAGE(gmm->root_hpa)); + KVM_BUG_ON(!vcpu->arch.mmu.u_context_on); + + ret = mmu_topup_memory_caches(vcpu); + if (ret) { + pr_err("%s(): could not create memory caches on VCPU #%d " + "error %d\n", + __func__, vcpu->vcpu_id, ret); + goto failed; + } + + root = e2k_mmu_alloc_spt_root(vcpu, gpa_to_gfn(u_phys_ptb)); + DebugGMM("VCPU #%d created shadow root PT at 0x%llx for guest " + "user root PT physical at 0x%llx for gmm #%d\n", + vcpu->vcpu_id, root, u_phys_ptb, gmm->nid.nr); + + mmu_pv_prepare_spt_u_root(vcpu, gmm, root); + + sp = page_header(root); + kvm_init_root_gmm_spt_list(gmm, sp); + + ret = sync_pv_vcpu_shadow_u_root(vcpu, gmm, root, u_phys_ptb); + if (ret) { + pr_err("%s(): failed to sync user root of GMM #%d, error %d\n", + __func__, gmm->nid.nr, ret); + goto failed; + } + DebugGMM("VCPU #%d, guest user root at 0x%llx, shadow root " + "at 0x%llx\n", + vcpu->vcpu_id, u_phys_ptb, root); + + gmm->pt_synced = true; + gmm->root_hpa = root; /* shadow PT root has been set */ + return 0; + +failed: + gmm->root_hpa = TO_ERROR_PAGE(ret); + return ret; +} + +int kvm_create_shadow_user_pt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gpa_t u_phys_ptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + hpa_t root; + struct kvm_mmu_page *sp; + e2k_addr_t sync_start, sync_end; + int ret; + + KVM_BUG_ON(!is_shadow_paging(vcpu)); + KVM_BUG_ON(gmm == NULL); + KVM_BUG_ON(VALID_PAGE(gmm->root_hpa)); + + if (likely(mmu->u_context_on)) { + /* unload previous MMU PT and context before load new */ + kvm_mmu_unload(vcpu, U_ROOT_PT_FLAG); + } else { + /* enable support of guest user space */ + root = kvm_get_space_type_spt_u_root(vcpu); + if (VALID_PAGE(root)) { + KVM_BUG_ON(is_sep_virt_spaces(vcpu) && + root != kvm_get_space_type_spt_os_root(vcpu)); + /* unload previous MMU PT and context before load new */ + kvm_mmu_unload(vcpu, U_ROOT_PT_FLAG); + } + if (is_sep_virt_spaces(vcpu)) { + mmu->set_vcpu_sh_u_vptb(vcpu, USER_VPTB_BASE_ADDR); + mmu->set_vcpu_os_vab(vcpu, MMU_GUEST_OS_VAB); + } else { + mmu->set_vcpu_sh_u_vptb(vcpu, MMU_UNITED_USER_VPTB); + } + } + mmu->set_vcpu_u_pptb(vcpu, u_phys_ptb); + ret = kvm_mmu_load(vcpu, gmm, U_ROOT_PT_FLAG | DONT_SYNC_ROOT_PT_FLAG); + if (ret) { + pr_err("%s(): could not load MMU support of VCPU #%d\n", + __func__, vcpu->vcpu_id); + ret = -ENOMEM; + goto failed; + } + mmu->pid = gmm->nid.nr; + + root = kvm_get_space_type_spt_u_root(vcpu); + DebugSPT("VCPU #%d created shadow root PT at 0x%llx for guest " + "user root PT physical at 0x%lx, virtual at 0x%lx\n", + vcpu->vcpu_id, root, mmu->get_vcpu_u_pptb(vcpu), + mmu->get_vcpu_sh_u_vptb(vcpu)); + + mmu_pv_prepare_spt_u_root(vcpu, gmm, root); + + sp = page_header(root); + kvm_init_root_gmm_spt_list(gmm, sp); + + sync_start = 0; + if (sp->guest_kernel_synced) { + sync_end = GUEST_TASK_SIZE; + } else { + sync_end = HOST_TASK_SIZE; + } + + ret = e2k_sync_shadow_pt_range(vcpu, gmm, root, sync_start, sync_end, + u_phys_ptb, mmu->get_vcpu_sh_u_vptb(vcpu)); + if (ret) { + pr_err("%s(): could not sync host shadow PT and guest " + "initial PT, error %d\n", + __func__, ret); + goto failed; + } + DebugSPT("VCPU #%d shadow root at 0x%llx synced " + "from 0x%lx to 0x%lx\n", + vcpu->vcpu_id, root, sync_start, sync_end); + + gmm->pt_synced = true; + gmm->root_hpa = root; /* shadow PT root has been set */ + return 0; + +failed: + gmm->root_hpa = TO_ERROR_PAGE(ret); + return ret; +} + +static int switch_shadow_pptb(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gpa_t pptb, unsigned flags) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + hpa_t root; + int ret; + + KVM_BUG_ON(!is_shadow_paging(vcpu)); + + /* unload previous MMU PT and context before load new */ + kvm_mmu_unload(vcpu, flags); + + /* switch VCPU MMU to new PT */ + if ((flags & U_ROOT_PT_FLAG) || + ((flags & OS_ROOT_PT_FLAG) && + is_sep_virt_spaces(vcpu))) { + mmu->set_vcpu_u_pptb(vcpu, pptb); + } else if (flags & OS_ROOT_PT_FLAG) { + mmu->set_vcpu_os_pptb(vcpu, pptb); + } else { + KVM_BUG_ON(true); + } + ret = kvm_mmu_load(vcpu, gmm, flags); + if (ret) { + pr_err("%s(): could not load new shadow PT\n", __func__); + goto failed; + } + + root = kvm_get_space_type_spt_u_root(vcpu); + DebugSPT("VCPU #%d created shadow root PT at 0x%llx for guest " + "user root PT at 0x%llx, virtual at 0x%lx\n", + vcpu->vcpu_id, root, pptb, mmu->get_vcpu_sh_u_vptb(vcpu)); + + if (!vcpu->arch.is_hv) { + KVM_BUG_ON(true); + kvm_prepare_shadow_root(vcpu, NULL, root, E2K_INVALID_PAGE, + mmu->get_vcpu_sh_u_vptb(vcpu)); + } + + ret = kvm_sync_shadow_u_root(vcpu, gmm, false); + if (ret) { + pr_err("%s(): could not sync host shadow PT and guest " + "user root PT, error %d\n", + __func__, ret); + goto failed; + } + + return 0; + +failed: + return ret; +} + +hpa_t mmu_pv_switch_spt_u_pptb(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gpa_t u_phys_ptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + hpa_t root; + int ret; + + ret = switch_shadow_pptb(vcpu, gmm, u_phys_ptb, U_ROOT_PT_FLAG); + if (ret) { + pr_err("%s(): could not load PT of next MM pid #%d\n", + __func__, gmm->nid.nr); + goto failed; + } + mmu->pid = gmm->nid.nr; + + root = kvm_get_space_type_spt_u_root(vcpu); + DebugSPT("VCPU #%d loaded root PT at 0x%llx for guest " + "user root PT physical at 0x%llx, PID %d\n", + vcpu->vcpu_id, root, u_phys_ptb, mmu->pid); + + /* switch MMU hardware/sofware context to new mm */ + kvm_switch_mmu_guest_u_pt(vcpu); + + return root; + +failed: + return TO_ERROR_PAGE(ret); +} + +int kvm_switch_shadow_u_pptb(struct kvm_vcpu *vcpu, gpa_t u_pptb, + hpa_t *u_root) +{ + hpa_t root; + int ret; + + DebugSPT("started on VCPU #%d for guest user root PT at 0x%llx\n", + vcpu->vcpu_id, u_pptb); + + KVM_BUG_ON(!vcpu->arch.is_hv); + + ret = switch_shadow_pptb(vcpu, NULL, u_pptb, U_ROOT_PT_FLAG); + if (ret) { + pr_err("%s(): could not load new U_PPTB root\n", + __func__); + return ret; + } + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + + root = kvm_get_space_type_spt_u_root(vcpu); + + /* switch MMU hardware/sofware context to new PT root */ + kvm_setup_shadow_u_pptb(vcpu); + + *u_root = root; + + return 0; +} + +int kvm_switch_shadow_os_pptb(struct kvm_vcpu *vcpu, gpa_t os_pptb, + hpa_t *os_root) +{ + hpa_t root; + int ret; + + KVM_BUG_ON(!vcpu->arch.is_hv); + + ret = switch_shadow_pptb(vcpu, NULL, os_pptb, OS_ROOT_PT_FLAG); + if (ret) { + pr_err("%s(): could not load new OS PT root\n", + __func__); + return ret; + } + + if (is_sep_virt_spaces(vcpu)) { + root = kvm_get_space_type_spt_u_root(vcpu); + } else { + root = kvm_get_space_type_spt_os_root(vcpu); + } + + /* switch MMU hardware/sofware context to new PT root */ + kvm_setup_shadow_os_pptb(vcpu); + + *os_root = root; + + return 0; +} + +int mmu_pv_create_tdp_user_pt(struct kvm_vcpu *vcpu, gpa_t u_phys_ptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + KVM_BUG_ON(!is_tdp_paging(vcpu)); + + mmu->set_vcpu_u_pptb(vcpu, u_phys_ptb); + mmu->set_vcpu_u_vptb(vcpu, USER_VPTB_BASE_ADDR); + mmu->set_vcpu_os_vab(vcpu, MMU_GUEST_OS_VAB); + + return 0; +} + +static int switch_tdp_pptb(struct kvm_vcpu *vcpu, gpa_t pptb, unsigned flags) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + KVM_BUG_ON(!is_tdp_paging(vcpu)); + + /* switch VCPU MMU to new PT */ + if ((flags & U_ROOT_PT_FLAG) || + ((flags & OS_ROOT_PT_FLAG) && + is_sep_virt_spaces(vcpu))) { + mmu->set_vcpu_u_pptb(vcpu, pptb); + } else if (flags & OS_ROOT_PT_FLAG) { + mmu->set_vcpu_os_pptb(vcpu, pptb); + } else { + KVM_BUG_ON(true); + } + + return 0; +} + +int mmu_pv_switch_tdp_u_pptb(struct kvm_vcpu *vcpu, int pid, gpa_t u_phys_ptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + int ret; + + ret = switch_tdp_pptb(vcpu, u_phys_ptb, U_ROOT_PT_FLAG); + if (ret) { + pr_err("%s(): could not load PT of next MM pid #%d\n", + __func__, pid); + goto failed; + } + mmu->pid = pid; + + /* switch MMU hardware/sofware context to new mm */ + kvm_setup_mmu_tdp_u_pt_context(vcpu); + + return 0; + +failed: + return ret; +} + +static void setup_tdp_paging(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(is_paging_flag(vcpu)); + set_paging_flag(vcpu); + + KVM_BUG_ON(!is_tdp_paging(vcpu)); + + tdp_enabled = true; + + init_kvm_mmu(vcpu); +} + +int kvm_switch_to_tdp_paging(struct kvm_vcpu *vcpu, + gpa_t u_phys_ptb, gva_t u_virt_ptb, + gpa_t os_phys_ptb, gva_t os_virt_ptb, gva_t os_virt_base) +{ + struct kvm *kvm = vcpu->kvm; + + DebugTDP("started on VCPU #%d to switch TDP to paging mode GP " + "root at 0x%llx\n", + vcpu->vcpu_id, kvm_get_gp_phys_root(vcpu)); + + setup_tdp_paging(vcpu); + + DebugTDP("VCPU #%d guest OS root PT base: physical 0x%llx, " + "virtual 0x%lx, space offset 0x%lx\n", + vcpu->vcpu_id, os_phys_ptb, os_virt_ptb, os_virt_base); + DebugTDP("VCPU #%d guest user root PT base: physical 0x%llx, " + "virtual 0x%lx\n", + vcpu->vcpu_id, u_phys_ptb, u_virt_ptb); + + /* always separate speces should be used for paravirtualized guest */ + set_sep_virt_spaces(vcpu); + + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, u_phys_ptb); + vcpu->arch.mmu.set_vcpu_u_vptb(vcpu, u_virt_ptb); + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, os_phys_ptb); + vcpu->arch.mmu.set_vcpu_os_vptb(vcpu, os_virt_ptb); + vcpu->arch.mmu.set_vcpu_os_vab(vcpu, os_virt_base); + + /* setup page table structures type to properly manage PTs */ + mmu_set_vcpu_pt_struct(kvm, kvm_get_mmu_guest_pt_struct(vcpu)); + mmu_set_vcpu_pt_struct_func(kvm, &kvm_mmu_get_vcpu_pt_struct); + + return 0; +} + +int kvm_hv_setup_tdp_paging(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + e2k_core_mode_t core_mode; + bool sep_virt_space; + + setup_tdp_paging(vcpu); + + /* enable guest paging mode and shadow MMU context */ + + core_mode = read_guest_CORE_MODE_reg(vcpu); + sep_virt_space = !!core_mode.CORE_MODE_sep_virt_space; + if (sep_virt_space) + set_sep_virt_spaces(vcpu); + else + reset_sep_virt_spaces(vcpu); + + /* setup page table structures type to properly manage PTs */ + mmu_set_vcpu_pt_struct(kvm, kvm_get_mmu_guest_pt_struct(vcpu)); + mmu_set_vcpu_pt_struct_func(kvm, &kvm_mmu_get_vcpu_pt_struct); + + /* setup TDP PTs hardware/software context */ + kvm_setup_mmu_tdp_context(vcpu); + + return 0; +} + +static int setup_shadow_paging(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + unsigned flags) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + hpa_t os_root, u_root; + int ret; + + KVM_BUG_ON(VALID_PAGE(mmu->sh_root_hpa)); + + /* setup shadow root of USER page table */ + ret = setup_shadow_root(vcpu, gmm, flags); + if (ret) { + pr_err("%s(): could not create shadow PT root " + "of VCPU #%d MMU\n", + __func__, vcpu->vcpu_id); + goto failed; + } + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + + mmu_get_spt_roots(vcpu, flags, &os_root, &u_root, NULL); + + /* shadow PT root is common for all VCPUs */ + if (VALID_PAGE(os_root)) { + mmu->sh_root_hpa = os_root; + } else if (VALID_PAGE(u_root)) { + mmu->sh_root_hpa = u_root; + } else { + KVM_BUG_ON(true); + } + kvm_mmu_set_init_gmm_root(vcpu, mmu->sh_root_hpa); + if (!vcpu->arch.is_hv) { + kvm_set_root_gmm_spt_list(gmm); + } + return 0; + +failed: + mmu->sh_root_hpa = TO_ERROR_PAGE(ret); + return ret; +} + +int kvm_hv_setup_shadow_paging(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu *mmu = &vcpu->arch.mmu; + gva_t os_vptb, u_vptb; + unsigned flags; + bool mmu_is_load = false; + e2k_core_mode_t core_mode; + bool sep_virt_space; + int ret; + + core_mode = read_guest_CORE_MODE_reg(vcpu); + sep_virt_space = !!core_mode.CORE_MODE_sep_virt_space; + + complete_nonpaging_mode(vcpu); + + if (sep_virt_space) { + /* create two kernel and user shadow PTs */ + flags = OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG; + set_sep_virt_spaces(vcpu); + u_vptb = MMU_SEPARATE_USER_VPTB; + os_vptb = MMU_SEPARATE_KERNEL_VPTB; + mmu->set_vcpu_sh_u_vptb(vcpu, u_vptb); + mmu->set_vcpu_sh_os_vptb(vcpu, os_vptb); + } else { + /* create only one user shadow PT */ + flags = U_ROOT_PT_FLAG; + reset_sep_virt_spaces(vcpu); + os_vptb = MMU_UNITED_KERNEL_VPTB; + mmu->set_vcpu_sh_u_vptb(vcpu, os_vptb); + } + + /* setup page table structures type to properly manage PTs */ + mmu_set_vcpu_pt_struct(kvm, kvm_get_mmu_guest_pt_struct(vcpu)); + mmu_set_vcpu_pt_struct_func(kvm, &kvm_mmu_get_vcpu_pt_struct); + + /* It need create new shadow PT */ + mutex_lock(&vcpu->kvm->slots_lock); + if (!kvm->arch.shadow_pt_set_up) { + DebugSETPM("VCPU #%d shadow root PT is not yet created, so create\n", + vcpu->vcpu_id); + ret = setup_shadow_paging(vcpu, gmm, flags); + if (ret) { + pr_err("%s(): coiuld not create initial shadow PT error %d\n", + __func__, ret); + goto unlock_failed; + } + kvm->arch.shadow_pt_set_up = true; + mmu_is_load = true; + } else { + DebugSETPM("VCPU #%d shadow PT has been already created\n", vcpu->vcpu_id); + } + mutex_unlock(&vcpu->kvm->slots_lock); + if (!mmu_is_load) { + kvm_mmu_load(vcpu, gmm, flags); + } + + kvm_setup_mmu_spt_context(vcpu); + + return 0; + +unlock_failed: + mutex_unlock(&vcpu->kvm->slots_lock); + return ret; +} + +static void mmu_destroy_caches(void) +{ + if (pte_list_desc_cache) + kmem_cache_destroy(pte_list_desc_cache); + if (mmu_page_header_cache) + kmem_cache_destroy(mmu_page_header_cache); +} + +int kvm_mmu_module_init(void) +{ + pte_list_desc_cache = kmem_cache_create("pte_list_desc", + sizeof(struct pte_list_desc), + 0, 0, NULL); + if (!pte_list_desc_cache) + goto nomem; + + mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", + sizeof(struct kvm_mmu_page), + 0, 0, NULL); + if (!mmu_page_header_cache) + goto nomem; + + if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) + goto nomem; + + register_shrinker(&mmu_shrinker); + + return 0; + +nomem: + mmu_destroy_caches(); + return -ENOMEM; +} + +/* + * Caculate mmu pages needed for kvm. + */ +unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) +{ + unsigned int nr_mmu_pages; + unsigned int nr_pages = 0; + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int i; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + + kvm_for_each_memslot(memslot, slots) + nr_pages += memslot->npages; + } + + nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; + nr_mmu_pages = max(nr_mmu_pages, + (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); + + return nr_mmu_pages; +} + +void kvm_mmu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_mmu_unload(vcpu, OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG | + GP_ROOT_PT_FLAG); + free_mmu_pages(vcpu); + kvm_vcpu_release_trap_cellar(vcpu); + mmu_free_memory_caches(vcpu); +} + +void kvm_mmu_module_exit(void) +{ + mmu_destroy_caches(); + percpu_counter_destroy(&kvm_total_used_mmu_pages); + unregister_shrinker(&mmu_shrinker); + mmu_audit_disable(); +} diff --git a/arch/e2k/kvm/mmu-e2k.h b/arch/e2k/kvm/mmu-e2k.h new file mode 100644 index 000000000000..9c8442ff3f31 --- /dev/null +++ b/arch/e2k/kvm/mmu-e2k.h @@ -0,0 +1,51 @@ +#ifndef _E2K_KVM_MMU_E2K_H_ +#define _E2K_KVM_MMU_E2K_H_ + +#include +#include + +#if _PAGE_P_V6 == _PAGE_P_V2 +# define PT_E2K_PRESENT_MASK _PAGE_P_V6 +#else +# error "Page table PRESENT bit is different for ISET-V6 vs older ISETs" +#endif +#if _PAGE_W_V6 == _PAGE_W_V2 +# define PT_E2K_WRITABLE_MASK _PAGE_W_V6 +#else +# error "Page table WRITABLE bit is different for ISET-V6 vs older ISETs" +#endif +#if _PAGE_A_HW_V6 == _PAGE_A_HW_V2 +# define PT_E2K_ACCESSED_MASK _PAGE_A_HW_V6 +#else +# error "Page table ACCESSED bit is different for ISET-V6 vs older ISETs" +#endif +#if _PAGE_D_V6 == _PAGE_D_V2 +# define PT_E2K_DIRTY_MASK _PAGE_D_V6 +#else +# error "Page table DIRTY bit is different for ISET-V6 vs older ISETs" +#endif +#if _PAGE_HUGE_V6 == _PAGE_HUGE_V2 +# define PT_E2K_PAGE_SIZE_MASK _PAGE_HUGE_V6 +#else +# error "Page table PAGE SIZE bit is different for ISET-V6 vs older ISETs" +#endif +#if _PAGE_G_V6 == _PAGE_G_V2 +# define PT_E2K_GLOBAL_MASK _PAGE_G_V6 +#else +# error "Page table GLOBAL bit is different for ISET-V6 vs older ISETs" +#endif +#if _PAGE_NON_EX_V6 == _PAGE_NON_EX_V2 +# define PT_E2K_NX_MASK _PAGE_NON_EX_V6 +#else +# define PT_E2K_NX_MASK(pt_v6) ((pt_v6) ? _PAGE_NON_EX_V6 : _PAGE_NON_EX_V2) +#endif + +#define PT_E2K_ROOT_LEVEL E2K_PGD_LEVEL_NUM /* pte, pmd, pud, pgd */ +#define PT_E2K_DIRECTORY_LEVEL E2K_PMD_LEVEL_NUM /* pmd */ +#define PT_E2K_PAGE_TABLE_LEVEL E2K_PTE_LEVEL_NUM /* pte */ +#define PT_E2K_MAX_HUGEPAGE_LEVEL MAX_HUGE_PAGES_LEVEL /* pud */ + +#define PT_E2K_ENTRIES_BITS PT_ENTRIES_BITS /* 9 bits */ +#define PT_E2K_ENT_PER_PAGE PT_ENTRIES_PER_PAGE /* 512 entries */ + +#endif /* _E2K_KVM_MMU_E2K_H_ */ diff --git a/arch/e2k/kvm/mmu-pv-spt.c b/arch/e2k/kvm/mmu-pv-spt.c new file mode 100644 index 000000000000..78a89d0d5c3c --- /dev/null +++ b/arch/e2k/kvm/mmu-pv-spt.c @@ -0,0 +1,1512 @@ + +/* + * VCPU MMU paravirtualization + * + * Based on x86 code and ideas. + * Copyright (c) 2014-2018, MCST. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mmu_defs.h" +#include "mmu.h" +#include "mman.h" +#include "cpu.h" +#include "process.h" +#include "user_area.h" +#include "gaccess.h" + +#define MMU_WARN_ON(x) WARN_ON(x) +#define MMU_BUG_ON(x) BUG_ON(x) + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PAGE_FAULT_MODE +#undef DebugKVMPF +#define DEBUG_KVM_PAGE_FAULT_MODE 0 /* page fault on KVM */ +#define DebugKVMPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PARAVIRT_FAULT_MODE +#undef DebugKVMPVF +#define DEBUG_KVM_PARAVIRT_FAULT_MODE 0 /* paravirt page fault on KVM */ +#define DebugKVMPVF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PARAVIRT_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_AAU_PAGE_FAULT_MODE +#undef DebugAAUPF +#define DEBUG_AAU_PAGE_FAULT_MODE 0 /* page fault from AAU MOVA */ +#define DebugAAUPF(fmt, args...) \ +({ \ + if (DEBUG_AAU_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VM_MODE +#undef DebugKVMVM +#define DEBUG_KVM_VM_MODE 0 /* page fault on KVM */ +#define DebugKVMVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_VM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TO_VIRT_MODE +#undef DebugTOVM +#define DEBUG_KVM_TO_VIRT_MODE 0 /* switch guest to virtual mode */ +#define DebugTOVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_TO_VIRT_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHADOW_PF_MODE +#undef DebugSPF +#define DEBUG_KVM_SHADOW_PF_MODE 0 /* shadow PT fault mode */ +#define DebugSPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHADOW_PF_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_NONPAGING_MODE +#undef DebugNONP +#define DEBUG_KVM_NONPAGING_MODE 0 /* nonpaging mode debug */ +#define DebugNONP(fmt, args...) \ +({ \ + if (DEBUG_KVM_NONPAGING_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_MM_MODE +#undef DebugKVMSWH +#define DEBUG_KVM_SWITCH_MM_MODE 0 /* switch guest MM debug */ +#define DebugKVMSWH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_MM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PTE_MODE +#undef DebugPTE +#define DEBUG_KVM_PTE_MODE 0 /* guest PTE update/write debug */ +#define DebugPTE(fmt, args...) \ +({ \ + if (DEBUG_KVM_PTE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GMM_MODE +#undef DebugGMM +#define DEBUG_KVM_GMM_MODE 0 /* guest mm freeing debug */ +#define DebugGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_GMM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_ACTIVATE_GMM_MODE +#undef DebugAGMM +#define DEBUG_ACTIVATE_GMM_MODE 0 /* guest mm activating debug */ +#define DebugAGMM(fmt, args...) \ +({ \ + if (DEBUG_ACTIVATE_GMM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GMM_FREE_MODE +#undef DebugFREE +#define DEBUG_KVM_GMM_FREE_MODE 0 /* guest mm PT freeing debug */ +#define DebugFREE(fmt, args...) \ +({ \ + if (DEBUG_KVM_GMM_FREE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_REEXEC_PF_MODE +#undef DebugREEXEC +#define DEBUG_REEXEC_PF_MODE 0 /* reexecute load and wait debugging */ +#define DebugREEXEC(fmt, args...) \ +({ \ + if (DEBUG_REEXEC_PF_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_WRITE_REEXEC_PF_MODE +#undef DebugWREEX +#define DEBUG_WRITE_REEXEC_PF_MODE 0 /* reexecute store debugging */ +#define DebugWREEX(fmt, args...) \ +({ \ + if (DEBUG_WRITE_REEXEC_PF_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* number of retries to handle page fault */ +#define PF_RETRIES_MAX_NUM 1 +/* common number of one try and retries to handle page fault */ +#define PF_TRIES_MAX_NUM (2 + PF_RETRIES_MAX_NUM) + +void kvm_init_gmm_root_pt(struct kvm *kvm, gmm_struct_t *new_gmm) +{ + /* guest kernel part will be inited on page fault while */ + /* access to guest kernel by guest user process */ + new_gmm->root_hpa = E2K_INVALID_PAGE; +} + +void kvm_fill_init_root_pt(struct kvm *kvm) +{ + pgd_t *root; + + root = kvm_mmu_get_init_gmm_root(kvm); + if (root == NULL) + /* is not yet created and valid */ + return; + + /* copy kernel part of root page table entries to enable host */ + /* traps and hypercalls on guest */ + copy_kernel_pgd_range(root, cpu_kernel_root_pt); +} + +void release_gmm_root_pt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + hpa_t gmm_root, root_hpa; + + if (vcpu == NULL) + vcpu = native_current_thread_info()->vcpu; + if (vcpu == NULL) + return; + + gmm_root = gmm->root_hpa; + KVM_BUG_ON(!VALID_PAGE(gmm_root)); + + root_hpa = kvm_get_space_type_guest_u_root(vcpu); + KVM_BUG_ON(gmm_root == root_hpa); + + DebugFREE("will release gmm #%d shodow PT from root 0x%llx\n", + gmm->nid.nr, gmm_root); + + mmu_release_spt_root(vcpu, gmm_root); +} + +void kvm_arch_init_vm_mmap(struct kvm *kvm) +{ + kvm->arch.shadow_pt_enable = true; +#ifdef CONFIG_KVM_PHYS_PT_ENABLE + kvm->arch.phys_pt_enable = kvm->arch.is_hv; +# ifdef CONFIG_KVM_TDP_ENABLE + kvm->arch.tdp_enable = kvm->arch.phys_pt_enable; +# else /* ! CONFIG_KVM_TDP_ENABLE */ + kvm->arch.tdp_enable = false; +# endif /* CONFIG_KVM_TDP_ENABLE */ +#else /* ! CONFIG_KVM_PHYS_PT_ENABLE */ + kvm->arch.phys_pt_enable = false; + kvm->arch.tdp_enable = false; +#endif /* CONFIG_KVM_PHYS_PT_ENABLE */ +} + +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + const pt_struct_t *pt_struct = kvm_get_mmu_host_pt_struct(kvm); + user_area_t *guest_area; + int i; + unsigned long base_gfn; + + DebugKVMSH("started for memory slot %px\n", free); + base_gfn = free->base_gfn; + DebugKVMVM("memory slot: base gfn 0x%lx, pages 0x%lx\n", + base_gfn, free->npages); + + if (dont == NULL) { + DebugKVMVM("started to free slot ID %d RMAP %px\n", + free->id, free->arch.rmap); + } + guest_area = free->arch.guest_areas.area; + if (guest_area != NULL) + kvm_arch_free_memory_region(kvm, free); + KVM_BUG_ON(free->arch.guest_areas.area != NULL); + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + const pt_level_t *pt_level; + int level = i + 1; + + pt_level = &pt_struct->levels[level]; + if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { + kvfree(free->arch.rmap[i]); + DebugKVMVM("free slot ID %d RMAP %px\n", + free->id, free->arch.rmap); + free->arch.rmap[i] = NULL; + } + if (!is_huge_pt_level(pt_level)) + /* the page table level has not huge pages */ + continue; + + if (!dont || free->arch.lpage_info[i - 1] != + dont->arch.lpage_info[i - 1]) { + kvfree(free->arch.lpage_info[i - 1]); + DebugKVMVM("free slot ID %d huge page INFO %px\n", + free->id, free->arch.lpage_info[i - 1]); + free->arch.lpage_info[i - 1] = NULL; + } + } + + kvm_page_track_free_memslot(free, dont); +} + +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) +{ + const pt_struct_t *pt_struct = kvm_get_mmu_host_pt_struct(kvm); + int i; + gfn_t bgfn = slot->base_gfn; + + DebugKVM("started for slot ID #%d base gfn 0x%llx pages 0x%lx " + "user addr 0x%lx\n", + slot->id, bgfn, npages, slot->userspace_addr); + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + const pt_level_t *pt_level; + kvm_lpage_info_t *linfo; + unsigned long ugfn; + int lpages; + int disallow_lpages = 0; + int level = i + 1; + + if (level > pt_struct->levels_num) + /* no more levels */ + break; + + pt_level = &pt_struct->levels[level]; + if (!is_page_pt_level(pt_level) && + !is_huge_pt_level(pt_level)) + /* nothing pages on the level */ + continue; + + lpages = gfn_to_index(bgfn + npages - 1, bgfn, pt_level) + 1; + + slot->arch.rmap[i] = + kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), + GFP_KERNEL); + if (!slot->arch.rmap[i]) + goto out_free; + DebugKVM("created RMAP %px to map 0x%x pages on PT level #%d\n", + slot->arch.rmap[i], lpages, level); + + if (!is_huge_pt_level(pt_level)) + /* the page table level has not huge pages */ + continue; + + linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL); + if (!linfo) + goto out_free; + + slot->arch.lpage_info[i - 1] = linfo; + + if (bgfn & (KVM_PT_LEVEL_PAGES_PER_HPAGE(pt_level) - 1)) { + linfo[0].disallow_lpage = 1; + disallow_lpages++; + } + if ((bgfn + npages) & + (KVM_PT_LEVEL_PAGES_PER_HPAGE(pt_level) - 1)) { + linfo[lpages - 1].disallow_lpage = 1; + disallow_lpages++; + } + DebugKVM("created huge pages INFO %px to map 0x%x pages " + "on PT level #%d\n", + slot->arch.lpage_info[i - 1], lpages, level); + ugfn = slot->userspace_addr >> PAGE_SHIFT; + /* + * If the gfn and userspace address are not aligned wrt each + * other, or if explicitly asked to, disable large page + * support for this slot + */ + if ((bgfn ^ ugfn) & + (KVM_PT_LEVEL_PAGES_PER_HPAGE(pt_level) - 1) || + !kvm_largepages_enabled()) { + unsigned long j; + + for (j = 0; j < lpages; ++j) + linfo[j].disallow_lpage = 1; + disallow_lpages++; + } + if (disallow_lpages != 0) { + DebugKVM("disallowed %d huge pages on PT level #%d\n", + disallow_lpages, level); + } + } + + if (kvm_page_track_create_memslot(slot, npages)) + goto out_free; + + return 0; + +out_free: + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + const pt_level_t *pt_level; + int level = i + 1; + + pt_level = &pt_struct->levels[level]; + kvfree(slot->arch.rmap[i]); + slot->arch.rmap[i] = NULL; + if (!is_huge_pt_level(pt_level)) + /* the page table level has not huge pages */ + continue; + + kvfree(slot->arch.lpage_info[i - 1]); + slot->arch.lpage_info[i - 1] = NULL; + } + return -ENOMEM; +} + +/* FIXME: it need implement x86 arch function for e2k arch */ +static void kvm_mmu_slot_apply_flags(struct kvm *kvm, + struct kvm_memory_slot *new) +{ + /* Still write protect RO slot */ + if (new->flags & KVM_MEM_READONLY) { + kvm_mmu_slot_remove_write_access(kvm, new); + return; + } + + /* + * Call kvm_x86_ops dirty logging hooks when they are valid. + * + * kvm_x86_ops->slot_disable_log_dirty is called when: + * + * - KVM_MR_CREATE with dirty logging is disabled + * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag + * + * The reason is, in case of PML, we need to set D-bit for any slots + * with dirty logging disabled in order to eliminate unnecessary GPA + * logging in PML buffer (and potential PML buffer full VMEXT). This + * guarantees leaving PML enabled during guest's lifetime won't have + * any additonal overhead from PML when guest is running with dirty + * logging disabled for memory slots. + * + * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot + * to dirty logging mode. + * + * If kvm_x86_ops dirty logging hooks are invalid, use write protect. + * + * In case of write protect: + * + * Write protect all pages for dirty logging. + * + * All the sptes including the large sptes which point to this + * slot are set to readonly. We can not create any new large + * spte on this slot until the end of the logging. + * + * See the comments in fast_page_fault(). + */ + if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { + kvm_mmu_slot_remove_write_access(kvm, new); + } +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + int nr_mmu_pages = 0; + + if (!kvm->arch.n_requested_mmu_pages) + nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); + + if (nr_mmu_pages) + kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); + + /* + * Dirty logging tracks sptes in 4k granularity, meaning that large + * sptes have to be split. If live migration is successful, the guest + * in the source machine will be destroyed and large sptes will be + * created in the destination. However, if the guest continues to run + * in the source machine (for example if live migration fails), small + * sptes will remain around and cause bad performance. + * + * Scan sptes if dirty logging has been stopped, dropping those + * which can be collapsed into a single large-page spte. Later + * page faults will create the large-page sptes. + */ + if ((change != KVM_MR_DELETE) && + (old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + kvm_mmu_zap_collapsible_sptes(kvm, new); + + /* + * Set up write protection and/or dirty logging for the new slot. + * + * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have + * been zapped so no dirty logging staff is needed for old slot. For + * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the + * new and it's also covered when dealing with the new slot. + * + * FIXME: const-ify all uses of struct kvm_memory_slot. + */ + if (change != KVM_MR_DELETE) + kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new); +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + kvm_page_track_flush_slot(kvm, slot); +} + +e2k_addr_t kvm_guest_kernel_addr_to_hva(struct kvm_vcpu *vcpu, + e2k_addr_t address) +{ + gpa_t gpa; + e2k_addr_t hva; + + DebugKVMPVF("started for addr 0x%lx\n", address); + if (!is_shadow_paging(vcpu)) { + /* it should be already host address */ + KVM_BUG_ON(address >= NATIVE_TASK_SIZE); + return address; + } + gpa = e2k_gva_to_gpa(vcpu, address, ACC_WRITE_MASK, NULL); + if (gpa == UNMAPPED_GVA) { + pr_err("%s(): address 0x%lx already unmapped or invalid\n", + __func__, address); + return -1; + } + hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); + hva |= address & ~PAGE_MASK; + return hva; +} +int kvm_e2k_paravirt_page_prefault(pt_regs_t *regs, trap_cellar_t *tcellar) +{ + pr_err("%s(): should not be used for shadow PT support\n", __func__); + return -EINVAL; +} + +static int kvm_pv_mmu_load_u_gmm(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm, gpa_t u_phys_ptb) +{ + int ret; + + /* It need create new shadow PT */ + KVM_BUG_ON(!IS_E2K_INVALID_PAGE(gmm->root_hpa)); + ret = kvm_create_shadow_user_pt(vcpu, gmm, u_phys_ptb); + if (ret) { + pr_err("%s(): could not create initial shadow PT or " + "sync all guest pages, error %d\n", + __func__, ret); + return ret; + } + + KVM_BUG_ON(!VALID_PAGE(kvm_get_space_type_spt_u_root(vcpu))); + + return 0; +} + +static int kvm_pv_mmu_prepare_u_gmm(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm, gpa_t u_phys_ptb) +{ + int ret; + + /* It need create new shadow PT */ + KVM_BUG_ON(!IS_E2K_INVALID_PAGE(gmm->root_hpa)); + if (unlikely(!vcpu->arch.mmu.u_context_on)) { + vcpu->arch.mmu.u_context_on = true; + } + ret = kvm_prepare_shadow_user_pt(vcpu, gmm, u_phys_ptb); + if (ret) { + pr_err("%s(): could not create initial shadow PT or " + "sync all guest pages, error %d\n", + __func__, ret); + return ret; + } + + KVM_BUG_ON(!VALID_PAGE(gmm->root_hpa)); + + return 0; +} + +static int vcpu_init_pv_mmu_state(struct kvm_vcpu *vcpu, + vcpu_gmmu_info_t *gmmu_info) +{ + gpa_t tc_gpa; + hpa_t tc_hpa, root; + e2k_core_mode_t core_mode; + bool updated; + int ret; + + tc_gpa = gmmu_info->trap_cellar; + ret = vcpu_write_trap_point_mmu_reg(vcpu, tc_gpa, &tc_hpa); + if (ret != 0) + goto error; + + ret = vcpu_write_mmu_pid_reg(vcpu, gmmu_info->pid); + if (ret != 0) + goto error_tc; + + if (gmmu_info->sep_virt_space) { + set_sep_virt_spaces(vcpu); + ret = vcpu_write_mmu_os_pptb_reg(vcpu, gmmu_info->os_pptb, + &updated, &root); + if (ret != 0) + goto error_tc; + + ret = vcpu_write_mmu_os_vptb_reg(vcpu, gmmu_info->os_vptb); + if (ret != 0) + goto error_tc; + + ret = vcpu_write_mmu_os_vab_reg(vcpu, gmmu_info->os_vab); + if (ret != 0) + goto error_tc; + } else { + reset_sep_virt_spaces(vcpu); + } + + core_mode = read_guest_CORE_MODE_reg(vcpu); + core_mode.CORE_MODE_pt_v6 = gmmu_info->pt_v6; + core_mode.CORE_MODE_sep_virt_space = gmmu_info->sep_virt_space; + write_guest_CORE_MODE_reg(vcpu, core_mode); + + ret = vcpu_write_mmu_u_pptb_reg(vcpu, gmmu_info->u_pptb, + &updated, &root); + if (ret != 0) + goto error_tc; + + ret = vcpu_write_mmu_u_vptb_reg(vcpu, gmmu_info->u_vptb); + if (ret != 0) + goto error_tc; + + ret = vcpu_write_mmu_cr_reg(vcpu, gmmu_info->mmu_cr); + if (ret != 0) + goto error_tc; + + kvm_mmu_set_init_gmm_root(vcpu, E2K_INVALID_PAGE); + + return 0; + +error_tc: + kvm_vcpu_release_trap_cellar(vcpu); +error: + return ret; +} + +static int vcpu_set_OS_VAB_pv_mmu_state(struct kvm_vcpu *vcpu, + vcpu_gmmu_info_t *gmmu_info) +{ + int ret; + + if (!is_sep_virt_spaces(vcpu)) { + pr_err("%s(): VCPU was inited with united PTs MMU and OS_VAB " + "cannot be set, so ignored\n", + __func__); + return -EINVAL; + } + + ret = vcpu_write_mmu_os_vab_reg(vcpu, gmmu_info->os_vab); + if (ret != 0) + goto error; + + return 0; + +error: + return ret; +} + +int kvm_pv_vcpu_mmu_state(struct kvm_vcpu *vcpu, + vcpu_gmmu_info_t __user *mmu_info) +{ + vcpu_gmmu_info_t gmmu_info; + + if (kvm_vcpu_copy_from_guest(vcpu, &gmmu_info, mmu_info, + sizeof(*mmu_info))) { + pr_err("%s() : copy VCPU #%d MMU info from user failed\n", + __func__, vcpu->vcpu_id); + return -EFAULT; + } + if (gmmu_info.opcode & INIT_STATE_GMMU_OPC) { + return vcpu_init_pv_mmu_state(vcpu, &gmmu_info); + } else if (gmmu_info.opcode & SET_OS_VAB_GMMU_OPC) { + return vcpu_set_OS_VAB_pv_mmu_state(vcpu, &gmmu_info); + } else { + pr_err("%s() : unknown operathion type on VCPU #%d MMU\n", + __func__, vcpu->vcpu_id); + return -EINVAL; + } + return 0; +} + +int kvm_pv_activate_guest_mm(struct kvm_vcpu *vcpu, + gmm_struct_t *new_gmm, gpa_t u_phys_ptb) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + gthread_info_t *gti; + int ret; + + KVM_BUG_ON(!is_shadow_paging(vcpu)); + KVM_BUG_ON(vcpu->arch.is_hv); + + gti = pv_vcpu_get_gti(vcpu); + DebugAGMM("proces #%d the new gmm #%d\n", + gti->gpid->nid.nr, new_gmm->nid.nr); + + new_gmm->u_pptb = u_phys_ptb; + DebugKVMSWH("VCPU #%d guest user mm #%d root PT base: 0x%llx\n", + vcpu->vcpu_id, new_gmm->nid.nr, u_phys_ptb); + + ret = kvm_pv_mmu_load_u_gmm(vcpu, new_gmm, u_phys_ptb); + if (ret != 0) + goto failed; + + mmu_pv_setup_shadow_u_pptb(vcpu, new_gmm); + + if (!mmu->u_context_on) + mmu->u_context_on = true; + + new_gmm->pt_synced = true; + + return new_gmm->nid.nr; + +failed: + return ret; +} + +int kvm_pv_prepare_guest_mm(struct kvm_vcpu *vcpu, + gmm_struct_t *new_gmm, gpa_t u_phys_ptb) +{ + int ret; + + new_gmm->u_pptb = u_phys_ptb; + new_gmm->u_vptb = pv_vcpu_get_init_gmm(vcpu)->u_vptb; + DebugKVMSWH("VCPU #%d guest user mm #%d root PT base: 0x%llx\n", + vcpu->vcpu_id, new_gmm->nid.nr, u_phys_ptb); + + if (is_shadow_paging(vcpu)) { + ret = kvm_pv_mmu_prepare_u_gmm(vcpu, new_gmm, u_phys_ptb); + if (ret != 0) + goto failed; + if (vcpu->arch.is_hv) { + KVM_BUG_ON(true); + } else if (vcpu->arch.is_pv) { + /* new gmm will setup as active while switch to */ + ; + } else { + KVM_BUG_ON(true); + } + } else { + KVM_BUG_ON(true); + } + + KVM_BUG_ON(!new_gmm->pt_synced); + + return 0; + +failed: + return ret; +} + +static void set_pv_vcpu_mu_events_num(struct kvm_vcpu *vcpu, int events_num) +{ + KVM_BUG_ON(events_num < get_vcpu_mu_events_num(vcpu)); + set_vcpu_mu_events_num(vcpu, events_num); +} + +static void set_pv_vcpu_cur_mu_event_no(struct kvm_vcpu *vcpu, int event_no) +{ + int events_num = get_vcpu_mu_events_num(vcpu); + + KVM_BUG_ON(events_num >= 0 && event_no > events_num); + set_vcpu_mu_cur_event_no(vcpu, event_no); + if (event_no >= get_vcpu_mu_events_num(vcpu)) { + set_pv_vcpu_mu_events_num(vcpu, event_no + 1); + } +} + +int write_to_guest_pt_phys(struct kvm_vcpu *vcpu, gpa_t gpa, + const pgprot_t *gpte, int bytes) +{ + int ret; + + DebugPTE("started for GPA 0x%llx gpte %px == 0x%lx\n", + gpa, gpte, pgprot_val(*gpte)); + ret = kvm_vcpu_write_guest(vcpu, gpa, gpte, bytes); + if (ret < 0) { + pr_err("%s(): could not write guest pte %px == 0x%lx on host " + "address of GPA 0x%llx\n", + __func__, gpte, pgprot_val(*gpte), gpa); + return ret; + } + kvm_page_track_write(vcpu, NULL, gpa, (const void *)gpte, bytes); + + return 1; /* fault handled and recovered */ +} + +int kvm_guest_addr_to_host(void **addr) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + unsigned long hva; + kvm_arch_exception_t exception; + + KVM_BUG_ON(vcpu == NULL || !vcpu->arch.is_pv); + + hva = kvm_vcpu_gva_to_hva(vcpu, (e2k_addr_t)*addr, + false, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", addr); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr, + &exception); + return -EAGAIN; + } + + *addr = (void *)hva; + return 0; +} + +void *kvm_guest_ptr_to_host_ptr(void *guest_ptr, int size, bool need_inject) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + unsigned long hva; + kvm_arch_exception_t exception; + + if ((u64)guest_ptr & PAGE_MASK != + (u64)(guest_ptr + size - 1) & PAGE_MASK) { + /* in this case need translation of two pages addresses */ + /* and two separate access to two part of data */ + pr_err("%s(): guest pointer %lx size %d bytes crosses " + "page boundaries, not implemented !!!\n", + __func__, guest_ptr, size); + return ERR_PTR(-EINVAL); + } + + vcpu = current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL || vcpu->arch.is_hv); + + hva = kvm_vcpu_gva_to_hva(vcpu, (e2k_addr_t)guest_ptr, + false, &exception); + if (kvm_is_error_hva(hva)) { + DebugKVM("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", guest_ptr); + if (need_inject) + kvm_vcpu_inject_page_fault(vcpu, (void *)guest_ptr, + &exception); + return ERR_PTR(-EAGAIN); + } + + return (void *)hva; +} + +static void inject_data_page_fault(struct kvm_vcpu *vcpu, pt_regs_t *regs, + trap_cellar_t *tcellar) +{ + kvm_inject_pv_vcpu_tc_entry(vcpu, tcellar); + kvm_inject_data_page_exc(vcpu, regs); +} + +static void inject_instr_page_fault(struct kvm_vcpu *vcpu, pt_regs_t *regs, + e2k_addr_t IP) +{ + kvm_inject_instr_page_exc(vcpu, regs, exc_instr_page_miss_mask, IP); +} + +static void inject_ainstr_page_fault(struct kvm_vcpu *vcpu, pt_regs_t *regs, + e2k_addr_t IP) +{ + kvm_inject_instr_page_exc(vcpu, regs, exc_ainstr_page_miss_mask, IP); +} + +static void inject_aau_page_fault(struct kvm_vcpu *vcpu, pt_regs_t *regs, + unsigned int aa_no) +{ + kvm_inject_aau_page_exc(vcpu, regs, aa_no); +} + +int kvm_pv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + trap_cellar_t *tcellar, bool user_mode) +{ + e2k_addr_t address; + tc_cond_t cond; + tc_fault_type_t ftype; + tc_opcode_t opcode; + unsigned mas; + bool store, page_boundary = false; + u32 error_code = 0; + bool nonpaging = !is_paging(vcpu); + kvm_pfn_t pfn; + gfn_t gfn; + gpa_t gpa; + e2k_addr_t hva; + int bytes; + intc_mu_state_t *mu_state; + int r, pfres, try, fmt; + + address = tcellar->address; + cond = tcellar->condition; + + AW(ftype) = AS(cond).fault_type; + AW(opcode) = AS(cond).opcode; + fmt = TC_COND_FMT_FULL(cond); + KVM_BUG_ON(AS(opcode).fmt == 0 || AS(opcode).fmt == 6); + bytes = tc_cond_to_size(cond); + PFRES_SET_ACCESS_SIZE(error_code, bytes); + mas = AS(cond).mas; + store = tc_cond_is_store(cond, machine.native_iset_ver); + DebugNONP("page fault on guest address 0x%lx fault type 0x%x\n", + address, AW(ftype)); + + KVM_BUG_ON(regs->trap == NULL); + set_pv_vcpu_cur_mu_event_no(vcpu, regs->trap->curr_cnt); + + /* + * address belongs to 2 pages (ld/st through page boundary) + * Count real address of ld/st + */ + if (AS(cond).num_align) { + if (fmt != LDST_QP_FMT && fmt != TC_FMT_QPWORD_Q) + address -= 8; + else + address -= 16; + } + + if (pf_on_page_boundary(address, cond)) { + unsigned long pf_address; + + if (is_spurious_qp_store(store, address, fmt, + tcellar->mask, &pf_address)) { + page_boundary = false; + address = pf_address; + } else { + page_boundary = true; + } + } else { + page_boundary = false; + } + + if (address >= NATIVE_TASK_SIZE) { + /* address from host page space range, so pass the fault */ + /* to guest, let the guest itself handle whaut to do */ + inject_data_page_fault(vcpu, regs, tcellar); + r = 2; + goto out; /* fault injected to guest */ + } + + if (AW(ftype) == 0) { + error_code |= PFERR_NOT_PRESENT_MASK; + DebugSPF("empty page fault type\n"); + } else if (AS(ftype).page_miss) { + error_code |= PFERR_NOT_PRESENT_MASK; + DebugSPF("page miss fault type\n"); + } else if (nonpaging) { + error_code |= PFERR_NOT_PRESENT_MASK; + DebugSPF("fault type at nonpaging mode\n"); + } + if (store) { + error_code |= PFERR_WRITE_MASK; + DebugSPF("page fault on store\n"); + } else { + DebugSPF("page fault on load\n"); + } + if (user_mode) { + error_code |= PFERR_USER_MASK; + DebugSPF("page fault at user mode\n"); + } else { + DebugSPF("page fault at kernel mode\n"); + } + + if (AS(ftype).nwrite_page) { + error_code &= ~PFERR_NOT_PRESENT_MASK; + error_code |= PFERR_PRESENT_MASK | PFERR_WRITE_MASK; + DebugSPF("not write page fault type\n"); + } + + if (mas == MAS_WAIT_LOCK || + (mas == MAS_WAIT_LOCK_Q && bytes == 16)) { + DebugREEXEC("not writable page fault on load and lock " + "operation\n"); + /* this mas has store semantic */ + error_code |= PFERR_WAIT_LOCK_MASK; + } + if (mas == MAS_IOADDR) { + DebugSPF("IO space access operation\n"); + error_code |= PFERR_MMIO_MASK; + } + if (AS(ftype).priv_page) { + error_code &= ~PFERR_NOT_PRESENT_MASK; + error_code |= PFERR_PRESENT_MASK; + DebugSPF("priviled page fault type\n"); + } + + mu_state = get_intc_mu_state(vcpu); + mu_state->may_be_retried = true; + mu_state->ignore_notifier = true; + + try = 0; + do { + pfres = vcpu->arch.mmu.page_fault(vcpu, address, error_code, + false, &gfn, &pfn); + if (page_boundary) { + int pfres_hi; + e2k_addr_t address_hi; + /* + * If address points tp page boundary, then + * handle next page + */ + address_hi = PAGE_ALIGN(address); + pfres_hi = vcpu->arch.mmu.page_fault(vcpu, address_hi, + error_code, false, &gfn, &pfn); + + if (pfres == PFRES_ERR || pfres_hi == PFRES_ERR) + pfres = PFRES_ERR; + else if (pfres == PFRES_RETRY || + pfres_hi == PFRES_RETRY) + pfres = PFRES_RETRY; + else if (pfres == PFRES_INJECTED || + pfres_hi == PFRES_INJECTED) + pfres = PFRES_INJECTED; + else if (pfres == PFRES_WRITE_TRACK || + pfres_hi == PFRES_WRITE_TRACK) + pfres = PFRES_WRITE_TRACK; + else + pfres = PFRES_NO_ERR; + } + + if (likely(pfres != PFRES_RETRY)) + break; + if (!mu_state->may_be_retried) { + /* cannot be retried */ + break; + } + try++; + } while (try < PF_TRIES_MAX_NUM); + + DebugNONP("mmu.page_fault() returned %d\n", pfres); + if (pfres == PFRES_NO_ERR) { + r = 0; + goto out; /* fault handled, but need recover */ + } else if (pfres == PFRES_INJECTED) { + inject_data_page_fault(vcpu, regs, tcellar); + r = 2; + goto out; /* fault injected to guest */ + } + if (pfres != PFRES_WRITE_TRACK) { + /* error detected while page fault handling */ + r = EFAULT; + goto out; + } + if ((error_code & PFERR_WAIT_LOCK_MASK) && + (error_code & PFERR_WRITE_MASK)) { + return reexecute_load_and_wait_page_fault(vcpu, tcellar, gfn, + regs); + } + + /* fault handled but guest PT is protected at shadow PT of host */ + /* so it need convert guest address to host HPA and */ + /* recover based on not protected host address */ + gpa = gfn_to_gpa(gfn); + gpa |= (address & ~PAGE_MASK); + if (likely(bytes == sizeof(pgprot_t))) { + /* highly likely it is update of protected PT entry */ + r = write_to_guest_pt_phys(vcpu, gpa, + (pgprot_t *)&tcellar->data, bytes); + } else { + /* it cannot be pte or other PT levels entries and gfn */ + /* should be unprotected while zeroing PT entry pointed to */ + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): could not convert gfn 0x%llx to hva\n", + __func__, gfn); + r = -EFAULT; + goto out; + } + hva |= (gpa & ~PAGE_MASK); + tcellar->address = hva; + tcellar->flags |= TC_IS_HVA_FLAG; + E2K_LMS_HALT_OK; + pr_err("%s(): guest %s : protected address 0x%lx size %d, " + "will be reexecuted on gpa 0x%llx hva 0x%lx\n", + __func__, + (store) ? "store" : "load", address, bytes, gpa, hva); + r = 0; /* fault handled, but need recover based on HVA */ + } + +out: + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + DebugSPF("it need flush TLB, so flushing\n"); + __flush_tlb_all(); + } else if (nonpaging && (AS(ftype).illegal_page || AW(ftype) == 0)) { + /* illegal PTDs/PTE can be at TLB, flush them */ + __flush_tlb_all(); + } + + return r; +} +EXPORT_SYMBOL_GPL(kvm_pv_mmu_page_fault); + +int kvm_pv_mmu_instr_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + e2k_addr_t address; + gfn_t gfn; + u32 error_code; + bool nonpaging = !is_paging(vcpu); + intc_mu_state_t *mu_state; + int instr_num = 1, try; + int pfres, r; + + if (!async_instr) { + e2k_tir_lo_t tir_lo; + tir_lo.TIR_lo_reg = regs->trap->TIR_lo; + address = tir_lo.TIR_lo_ip; + } else { + address = AS_STRUCT(regs->ctpr2).ta_base; + } + + DebugNONP("started for GVA 0x%lx\n", address); + + mu_state = get_intc_mu_state(vcpu); + mu_state->may_be_retried = true; + mu_state->ignore_notifier = true; + + if (address >= NATIVE_TASK_SIZE) { + /* IP from host virtual space range, so pass the fault */ + /* to guest, let the guest itself handle what to do */ + if (!async_instr) { + inject_instr_page_fault(vcpu, regs, address); + } else { + inject_ainstr_page_fault(vcpu, regs, address); + } + r = 2; + goto out; /* fault injected to guest */ + } + + if (nonpaging) { + address = nonpaging_gva_to_gpa(vcpu, address, ACC_ALL, NULL); + + if (!kvm_is_visible_gfn(vcpu->kvm, gpa_to_gfn(address))) { + pr_err("%s(): address 0x%lx is not guest valid " + "physical address\n", + __func__, address); + r = -EFAULT; + goto out; + } + } + + error_code = 0; + if (AS(ftype).page_miss) + error_code |= PFERR_NOT_PRESENT_MASK | PFERR_INSTR_FAULT_MASK; + if (AS(ftype).illegal_page) + error_code |= PFERR_NOT_PRESENT_MASK | PFERR_INSTR_PROT_MASK; + + if (!async_instr && ((address & PAGE_MASK) != + ((address + E2K_INSTR_MAX_SIZE - 1) & PAGE_MASK))) { + if (!nonpaging) { + instr_num++; + } else if (kvm_is_visible_gfn(vcpu->kvm, + gpa_to_gfn(address + E2K_INSTR_MAX_SIZE - 1))) { + instr_num++; + } + } + + do { + try = 0; + do { + pfres = vcpu->arch.mmu.page_fault(vcpu, address, + error_code, false, &gfn, NULL); + if (likely(pfres != PFRES_RETRY)) + break; + if (!mu_state->may_be_retried) { + /* cannot be retried */ + break; + } + try++; + } while (try < PF_TRIES_MAX_NUM); + + if (try >= PF_TRIES_MAX_NUM) + break; + if (pfres == PFRES_INJECTED) + break; + address = (address & PAGE_MASK) + PAGE_SIZE; + } while (--instr_num, instr_num > 0); + + + DebugNONP("mmu.page_fault() returned %d\n", pfres); + if (pfres == PFRES_NO_ERR) { + r = 0; + goto out; /* fault handled */ + } else if (pfres == PFRES_INJECTED) { + if (!async_instr) { + inject_instr_page_fault(vcpu, regs, address); + } else { + inject_ainstr_page_fault(vcpu, regs, address); + } + r = 2; + goto out; /* fault injected to guest */ + } + /* error detected while page fault handling */ + r = EFAULT; + +out: + if (r < 0) + /* error detected while page fault handling */ + return r; + + DebugNONP("mmu.page_fault() returned %d\n", r); + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + DebugNONP("it need flush TLB, so flushing\n"); + __flush_tlb_all(); + } else if (nonpaging && (AS(ftype).illegal_page || AW(ftype) == 0)) { + /* illegal PTDs/PTE can be at TLB, flush them */ + __flush_tlb_all(); + } + return r; +} + +int kvm_pv_mmu_aau_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + e2k_addr_t address, tc_cond_t cond, unsigned int aa_no) +{ + u32 error_code = 0; + bool store; + bool nonpaging = !is_paging(vcpu); + tc_opcode_t opcode; + kvm_pfn_t pfn; + gfn_t gfn; + int bytes; + intc_mu_state_t *mu_state; + int r, pfres, try; + + AW(opcode) = AS(cond).opcode; + KVM_BUG_ON(AS(opcode).fmt == 0 || AS(opcode).fmt == 6); + bytes = tc_cond_to_size(cond); + PFRES_SET_ACCESS_SIZE(error_code, bytes); + DebugAAUPF("page fault on guest address 0x%lx aa#%d\n", + address, aa_no); + + KVM_BUG_ON(nonpaging); + KVM_BUG_ON(regs->trap == NULL); + + if (address >= NATIVE_TASK_SIZE) { + /* address from host page space range, so pass the fault */ + /* to guest, let the guest itself handle whaut to do */ + inject_aau_page_fault(vcpu, regs, aa_no); + r = 2; + goto out; /* fault injected to guest */ + } + + error_code |= (PFERR_NOT_PRESENT_MASK | PFERR_FAPB_MASK); + store = tc_cond_is_store(cond, machine.native_iset_ver); + if (store) { + error_code |= PFERR_WRITE_MASK; + } + error_code |= PFERR_USER_MASK; + DebugAAUPF("page miss fault type on %s\n", + (store) ? "store" : "load"); + + mu_state = get_intc_mu_state(vcpu); + mu_state->may_be_retried = true; + mu_state->ignore_notifier = true; + + try = 0; + do { + pfres = vcpu->arch.mmu.page_fault(vcpu, address, error_code, + false, &gfn, &pfn); + if (likely(pfres != PFRES_RETRY)) + break; + if (!mu_state->may_be_retried) { + /* cannot be retried */ + break; + } + try++; + } while (try < PF_TRIES_MAX_NUM); + + DebugAAUPF("mmu.page_fault() returned %d\n", pfres); + if (pfres == PFRES_NO_ERR) { + r = 0; + goto out; /* fault handled */ + } else if (pfres == PFRES_INJECTED) { + inject_aau_page_fault(vcpu, regs, aa_no); + r = 2; + goto out; /* fault injected to guest */ + } else { + /* error detected while page fault handling */ + r = EFAULT; + goto out; + } + +out: + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + DebugSPF("it need flush TLB, so flushing\n"); + __flush_tlb_all(); + } + + return r; +} +EXPORT_SYMBOL_GPL(kvm_pv_mmu_aau_page_fault); + +pgprot_t *kvm_hva_to_pte(e2k_addr_t address) +{ + struct vm_area_struct *vma; + pte_t *pte; + + down_read(¤t->mm->mmap_sem); + + vma = find_vma(current->mm, address); + if (vma == NULL) { + pr_err("%s(): Could not find VMA structure of host virtual " + "address 0x%lx to map guest physical memory\n", + __func__, address); + goto failed; + } + pte = get_user_address_pte(vma, address); + + up_read(¤t->mm->mmap_sem); + + return (pgprot_t *)pte; + +failed: + up_read(¤t->mm->mmap_sem); + return NULL; +} + +int kvm_pv_mmu_pt_atomic_update(struct kvm_vcpu *vcpu, int gmmid_nr, + gpa_t gpa, void __user *old_gpt, + pt_atomic_op_t atomic_op, unsigned long prot_mask) +{ + gfn_t gfn; + struct page *page = NULL; + struct gmm_struct *gmm; + pgprot_t old_pt; + pgprot_t new_pt; + char *kaddr; + int ret; + + DebugPTE("started for guest PT GPA 0x%llx\n", gpa); + if (vcpu->arch.is_hv) { + pr_warn_once("%s(): MMU is hardware virtualized, so this " + "call/hypercall can be deleted from guest\n", + __func__); + } + + gfn = gpa_to_gfn(gpa); + DebugPTE("GPA 0x%llx converted to GFN 0x%llx\n", + gpa, gfn); + + page = kvm_vcpu_gfn_to_page(vcpu, gfn); + if (is_error_page(page)) { + pr_err("%s(): could not GPA 0x%llx convert to host page\n", + __func__, gpa); + ret = -EFAULT; + goto failed; + } + + kaddr = kmap_atomic(page); + kaddr += offset_in_page(gpa); + DebugPTE("GPA 0x%llx converted to host addr 0x%lx\n", + gpa, kaddr); + + switch (atomic_op) { + case ATOMIC_GET_AND_XCHG: + pgprot_val(old_pt) = native_pt_get_and_xchg_atomic(prot_mask, + (pgprotval_t *)kaddr); + pgprot_val(new_pt) = prot_mask; + break; + case ATOMIC_GET_AND_CLEAR: + pgprot_val(old_pt) = + native_pt_get_and_clear_atomic((pgprotval_t *)kaddr); + pgprot_val(new_pt) = pgprot_val(old_pt) & _PAGE_INIT_VALID; + break; + case ATOMIC_SET_WRPROTECT: + pgprot_val(old_pt) = + native_pt_set_wrprotect_atomic((pgprotval_t *)kaddr); + pgprot_val(new_pt) = pgprot_val(*(pgprot_t *)kaddr); + break; + case ATOMIC_TEST_AND_CLEAR_YOUNG: + pgprot_val(old_pt) = + native_pt_clear_young_atomic((pgprotval_t *)kaddr); + pgprot_val(new_pt) = pgprot_val(*(pgprot_t *)kaddr); + break; + case ATOMIC_TEST_AND_CLEAR_RELAXED: + pgprot_val(old_pt) = native_pt_clear_relaxed_atomic(prot_mask, + (pgprotval_t *)kaddr); + pgprot_val(new_pt) = pgprot_val(*(pgprot_t *)kaddr); + break; + default: + pr_err("%s(): invalid type %d of atomic PT modification\n", + __func__, atomic_op); + ret = -ENOSYS; + goto failed_unmap; + } + + kunmap_atomic(kaddr); + kvm_release_page_dirty(page); + page = NULL; + DebugPTE("old pt %px == 0x%lx, new 0x%lx\n", + kaddr, pgprot_val(old_pt), pgprot_val(new_pt)); + + kvm_vcpu_mark_page_dirty(vcpu, gfn); + + if (likely(gmmid_nr >= 0 && + gmmid_nr != pv_vcpu_get_init_gmm(vcpu)->nid.nr)) { + gmm = kvm_find_gmmid(&vcpu->kvm->arch.gmmid_table, + gmmid_nr); + if (gmm == NULL) { + pr_err("%s(): could not find gmm #%d\n", + __func__, gmmid_nr); + ret = -EINVAL; + goto failed_unmap; + } + } else { + /* gmm is kernel thread init_gmm */ + gmm = pv_vcpu_get_init_gmm(vcpu); + } + + kvm_page_track_write(vcpu, gmm, gpa, (const void *)&new_pt, + sizeof(pgprot_t)); + + ret = kvm_vcpu_copy_to_guest(vcpu, old_gpt, &old_pt, + sizeof(pgprot_t)); + if (ret != 0) + pr_err("%s(): could not copy old pte to guest, error %d\n", + __func__, ret); + DebugPTE("return to guest old pt %px == 0x%lx\n", + old_gpt, pgprot_val(old_pt)); + +failed_unmap: + if (page != NULL) { + kunmap_atomic(kaddr); + kvm_release_page_dirty(page); + page = NULL; + } +failed: + return ret; +} + +int kvm_pv_switch_guest_mm(struct kvm_vcpu *vcpu, + int gpid_nr, int gmmid_nr, gpa_t u_phys_ptb) +{ + gthread_info_t *cur_gti = current_thread_info()->gthread_info; + gthread_info_t *next_gti; + struct gmm_struct *init_gmm = pv_vcpu_get_init_gmm(vcpu); + struct gmm_struct *next_gmm; + bool migrated = false; + hpa_t root; + int ret; + + DebugKVMSWH("started to switch from current GPID #%d to #%d, guest " + "root PT at 0x%llx\n", + cur_gti->gpid->nid.nr, gpid_nr, u_phys_ptb); + + next_gti = kvm_get_guest_thread_info(vcpu->kvm, gpid_nr); + if (next_gti == NULL) { + /* FIXME: we should kill guest kernel, but first it needs */ + /* to switch to host kernel stacks */ + panic("%s(): could not find guest thread GPID #%d\n", + __func__, gpid_nr); + } + if (next_gti->vcpu == NULL) { + DebugKVMSWH("next thread GPID #%d starts on VCPU #%d " + "first time\n", + gpid_nr, vcpu->vcpu_id); + next_gti->vcpu = vcpu; + } else if (next_gti->vcpu != vcpu) { + DebugKVMSWH("next thread GPID #%d migrates from current GPID " + "#%d VCPU #%d to VCPU #%d\n", + gpid_nr, cur_gti->gpid->nid.nr, + next_gti->vcpu->vcpu_id, vcpu->vcpu_id); + migrated = true; + next_gti->vcpu = vcpu; + } else { + DebugKVMSWH("next thread GPID #%d continues running " + "on VCPU #%d\n", + gpid_nr, vcpu->vcpu_id); + } + if (gmmid_nr != init_gmm->nid.nr) { + next_gmm = kvm_find_gmmid(&vcpu->kvm->arch.gmmid_table, + gmmid_nr); + if (next_gmm == NULL) { + /* FIXME: we should kill guest kernel, but first */ + /* it needs to switch to host kernel stacks */ + panic("%s(): could not find new host agent #%d of " + "guest mm\n", + __func__, gmmid_nr); + } + } else { + /* new process is kernel thread */ + pr_err("%s(): switch to guest kernel init mm #%d\n", + __func__, gmmid_nr); + next_gmm = NULL; + } + + KVM_BUG_ON(next_gmm == NULL); + + if (unlikely(!next_gmm->pt_synced)) { + /* first swotch to new guest mm, so it need activate */ + ret = kvm_pv_activate_guest_mm(vcpu, next_gmm, u_phys_ptb); + goto done; + } + + /* switch to the next already activated guest mm */ + if (is_shadow_paging(vcpu)) { + KVM_BUG_ON(!VALID_PAGE(next_gmm->root_hpa)); + root = mmu_pv_switch_spt_u_pptb(vcpu, next_gmm, u_phys_ptb); + + KVM_BUG_ON(!VALID_PAGE(root) || root != next_gmm->root_hpa); + if (ERROR_PAGE(root)) { + ret = PAGE_TO_ERROR(root); + } else { + ret = 0; + } + KVM_BUG_ON(true); + } + ret = 0; + +done: + if (ret >= 0) { + current_thread_info()->gthread_info = next_gti; + KVM_BUG_ON(ret > 0 && ret != gmmid_nr); + ret = 0; + } + + return ret; +} diff --git a/arch/e2k/kvm/mmu-pv.c b/arch/e2k/kvm/mmu-pv.c new file mode 100644 index 000000000000..4c2ad046a59b --- /dev/null +++ b/arch/e2k/kvm/mmu-pv.c @@ -0,0 +1,1281 @@ + +/* + * VCPU MMU virtualization + * + * Based on x86 code, Copyright (c) 2004, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "process.h" +#include "cpu.h" +#include "mmu_defs.h" +#include "mmu.h" +#include "gaccess.h" +#include "user_area.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PARAVIRT_FAULT_MODE +#undef DebugKVMPVF +#define DEBUG_KVM_PARAVIRT_FAULT_MODE 0 /* paravirt page fault on KVM */ +#define DebugKVMPVF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PARAVIRT_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PAGE_FAULT_MODE +#undef DebugKVMPF +#define DEBUG_KVM_PAGE_FAULT_MODE 0 /* page fault on KVM */ +#define DebugKVMPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VM_PAGE_FAULT_MODE +#undef DebugKVMVMPF +#define DEBUG_KVM_VM_PAGE_FAULT_MODE 0 /* page fault on KVM VM */ +#define DebugKVMVMPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_VM_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_FREE_GUEST_USER_MODE +#undef DebugKVMFGU +#define DEBUG_KVM_FREE_GUEST_USER_MODE 0 /* free guest user VM */ +#define DebugKVMFGU(fmt, args...) \ +({ \ + if (DEBUG_KVM_FREE_GUEST_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_FREE_GUEST_PTE_MODE +#undef DebugKVMFGUPTE +#define DEBUG_KVM_FREE_GUEST_PTE_MODE 0 /* free guest user PTEs */ +#define DebugKVMFGUPTE(fmt, args...) \ +({ \ + if (DEBUG_KVM_FREE_GUEST_PTE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_LOCKED_GUEST_USER_MODE +#undef DebugKVMLGU +#define DEBUG_KVM_LOCKED_GUEST_USER_MODE 0 /* check locked */ + /* guest user area */ +#define DebugKVMLGU(fmt, args...) \ +({ \ + if (DEBUG_KVM_LOCKED_GUEST_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_LOCKED_GUEST_PTE_MODE +#undef DebugKVMLGUPTE +#define DEBUG_KVM_LOCKED_GUEST_PTE_MODE 0 /* check locked guest user */ + /* area PTEs */ +#define DebugKVMLGUPTE(fmt, args...) \ +({ \ + if (DEBUG_KVM_LOCKED_GUEST_PTE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PARAVIRT_PREFAULT_MODE +#undef DebugPVF +#define DEBUG_PARAVIRT_PREFAULT_MODE 0 /* paravirt page prefault */ +#define DebugPVF(fmt, args...) \ +({ \ + if (DEBUG_PARAVIRT_PREFAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_PTE_MODE +#undef DebugKVMPTE +#define DEBUG_KVM_PTE_MODE 0 /* set and clear pte on KVM */ +#define DebugKVMPTE(fmt, args...) \ +({ \ + if (DEBUG_KVM_PTE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_VALID_MODE +#undef DebugMGV +#define DEBUG_GUEST_VALID_MODE 0 /* make valid guest pages */ +#define DebugMGV(fmt, args...) \ +({ \ + if (DEBUG_GUEST_VALID_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GUEST_FREE_PT_MODE +#undef DebugFPT +#define DEBUG_GUEST_FREE_PT_MODE 0 /* free guest user pages */ + /* table entries */ +#define DebugFPT(fmt, args...) \ +({ \ + if (DEBUG_GUEST_FREE_PT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_USER_PTE_MODE +#undef DebugUPTE +#define DEBUG_USER_PTE_MODE 0 /* set and clear user ptes on KVM */ +#define DebugUPTE(fmt, args...) \ +({ \ + if (DEBUG_USER_PTE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_VALIDATE_MODE +#undef DebugVAL +#define DEBUG_VALIDATE_MODE 0 /* validate user addreses on host */ +#define DebugVAL(fmt, args...) \ +({ \ + if (DEBUG_VALIDATE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_GUEST_USER_MODE +#undef DebugKVMGU +#define DEBUG_KVM_GUEST_USER_MODE 0 /* guest user address */ +#define DebugKVMGU(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_HW_STACK_MAPPING_MODE +#undef DebugHWSM +#define DEBUG_HW_STACK_MAPPING_MODE 0 /* hardware stacks mapping */ +#define DebugHWSM(fmt, args...) \ +({ \ + if (DEBUG_HW_STACK_MAPPING_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_HW_STACK_REMAPPING_MODE +#undef DebugHWSG +#define DEBUG_HW_STACK_REMAPPING_MODE 0 /* hardware stacks mapping */ +#define DebugHWSG(fmt, args...) \ +({ \ + if (DEBUG_HW_STACK_REMAPPING_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_ANON_RMAP_MODE +#undef DebugANON +#define DEBUG_KVM_ANON_RMAP_MODE 0 /* anonimous VMA mapping */ +#define DebugANON(fmt, args...) \ +({ \ + if (DEBUG_KVM_ANON_RMAP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef VM_BUG_ON +#define VM_BUG_ON(cond) BUG_ON(cond) + +static inline void +mmu_get_tc_entry(struct kvm_vcpu *vcpu, int tc_no, trap_cellar_t *tc_entry) +{ + kvm_read_pv_vcpu_mmu_tc_entry(vcpu, tc_no, tc_entry); +} + +static inline void +mmu_set_tc_entry(struct kvm_vcpu *vcpu, int tc_no, + e2k_addr_t address, tc_cond_t condition, u64 *data) +{ + kvm_write_pv_vcpu_mmu_tc_entry(vcpu, tc_no, address, condition, data); +} + +void kvm_init_mmu_state(struct kvm_vcpu *vcpu) +{ + DebugKVM("started for VCPU %d\n", vcpu->vcpu_id); + + kvm_write_pv_vcpu_MMU_CR_reg(vcpu, MMU_CR_KERNEL_OFF); + DebugKVM("set MMU_CR to init state 0x%lx\n", + mmu_reg_val(MMU_CR_KERNEL_OFF)); + + kvm_write_pv_vcpu_mmu_US_CL_D_reg(vcpu, true); + DebugKVM("set MMU_US_CL_D to init disable state\n"); +} + +unsigned int kvm_get_guest_vcpu_mmu_trap_count(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_TRAP_COUNT_reg(vcpu); +} + +void kvm_set_guest_vcpu_mmu_trap_count(struct kvm_vcpu *vcpu, + unsigned int count) +{ + kvm_write_pv_vcpu_mmu_TRAP_COUNT_reg(vcpu, count); +} + +void kvm_get_guest_vcpu_tc_entry(struct kvm_vcpu *vcpu, + int tc_no, trap_cellar_t *tc_entry) +{ + mmu_get_tc_entry(vcpu, tc_no, tc_entry); +} + +int kvm_add_guest_vcpu_tc_entry(struct kvm_vcpu *vcpu, + e2k_addr_t address, tc_cond_t condition, u64 *data) +{ + int tc_count; + int tc_no; + + tc_count = kvm_get_guest_vcpu_mmu_trap_count(vcpu); + tc_no = tc_count / 3; + mmu_set_tc_entry(vcpu, tc_no, address, condition, data); + kvm_set_guest_vcpu_mmu_trap_count(vcpu, (tc_no + 1) * 3); + return tc_no; +} + +/* + * Init (create if need) VCPU root page table + */ +int kvm_init_vcpu_root_pt(struct kvm_vcpu *vcpu) +{ + thread_info_t *ti = current_thread_info(); + pgd_t *pgd; + + DebugKVM("started VCPU #%d\n", vcpu->vcpu_id); + current_thread_info()->vcpu_pgd = NULL; + if (!test_kvm_mode_flag(vcpu->kvm, KVMF_PARAVIRT_GUEST)) { + DebugKVM("guest kernel is not paravirtualized image\n"); + return 0; + } +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { + /* Host kernel has duplicated images on some nodes */ + /* so use separate root PGD for each CPU */ + /* It need not more separate PGD for each VCPU */ + /* PGD to host/guest kernel image will be updated */ + /* into root PGD of real CPU on which VCPU threads run */ + DebugKVMSW("host kernel has duplicated images and use " + "separate root PT for each CPU\n"); + return 0; + } +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + pgd = pgd_alloc(current->mm); + if (unlikely(pgd == NULL)) { + DebugKVM("could not allocate root PGD of VCPU #%d\n", + vcpu->vcpu_id); + return -ENOMEM; + } + current_thread_info()->host_pgd = current->mm->pgd; + current_thread_info()->vcpu_pgd = pgd; + DebugKVM("allocated root pgd %px for VCPU #%d\n", + pgd, vcpu->vcpu_id); + ti->kernel_image_pgd_p = &pgd[KERNEL_IMAGE_PGD_INDEX]; + ti->kernel_image_pgd = *(ti->kernel_image_pgd_p); + DebugKVM("VCPU #%d kernel image pgd %px = 0x%lx\n", + vcpu->vcpu_id, ti->kernel_image_pgd_p, + (ti->kernel_image_pgd_p) ? pgd_val(*ti->kernel_image_pgd_p) + : + 0); + return 0; +} + +void kvm_free_vcpu_root_pt(void) +{ + pgd_t *pgd = current_thread_info()->vcpu_pgd; + + if (pgd == NULL) + return; + DebugKVM("started on %s (%d)\n", current->comm, current->pid); + /* FIXME: This comment only in case of use qemu execvp() to reboot + * the virtual machine + BUG_ON(current->mm != NULL); + */ + + set_root_pt(current_thread_info()->host_pgd); /* switch to host */ + /* VCPU page table */ + + clear_pgd_range(pgd, 0, USER_PTRS_PER_PGD); + pgd_free(NULL, pgd); + current_thread_info()->vcpu_pgd = NULL; + DebugKVM("freed VCPU root PT on %s (%d)\n", + current->comm, current->pid); +} + +/** + * page_add_shadow_rmap - add pte mapping to an shadowed page + * @page: the page to add the mapping to + * + * The caller needs to hold the pte lock, and the page must be reserved + */ +static inline void page_add_shadow_rmap(struct page *page) +{ + int first = atomic_inc_and_test(&page->_mapcount); + if (first) { + /* first increment was kernel space mapping */ + /* now increment for as guest shadow mapping */ + atomic_inc_and_test(&page->_mapcount); + } + + VM_BUG_ON(!page_count(page)); +} + +static inline long +map_host_ttable_large_pte_range(struct vm_area_struct *vma, + pte_t kernel_pte, pmd_t *shadow_pmd, + e2k_addr_t kernel_addr, e2k_addr_t kernel_end, + e2k_addr_t shadow_addr, e2k_addr_t shadow_end) +{ + pte_t *shadow_pte; + pte_t *orig_shadow_pte; + spinlock_t *shadow_ptl = NULL; + pte_t pte; + e2k_addr_t pfn; + e2k_addr_t pfn_off; + struct page *ptepage; + long ret = 0; + + DebugKVM("started: kernel start 0x%lx end 0x%lx shadow start 0x%lx " + "end 0x%lx kernel large PTE 0x%lx\n", + kernel_addr, kernel_end, shadow_addr, shadow_end, + pte_val(kernel_pte)); + pte = kernel_pte; + pfn = pte_pfn(pte); + pte = pte_set_small_size(pte); + pfn_off = (shadow_addr & ~PMD_MASK) >> PTE_SHIFT; + shadow_pte = pte_alloc_map_lock(current->mm, shadow_pmd, shadow_addr, + &shadow_ptl); + if (!shadow_pte) { + printk(KERN_ERR "map_host_ttable_pte_range() could not alloc " + "PTE page for shadow addr 0x%lx\n", shadow_addr); + return -ENOMEM; + } + orig_shadow_pte = shadow_pte; + do { + if (!pte_none(*shadow_pte)) { + printk(KERN_ERR "Old mapping existed for shadow " + "address 0x%lx pte 0x%px = 0x%lx\n", + shadow_addr, shadow_pte, pte_val(*shadow_pte)); + ret = -EINVAL; + break; + } + ptepage = pfn_to_page(pfn + pfn_off); + if ((!page_valid(ptepage)) || !page_count(ptepage)) { + printk(KERN_ERR "map_host_ttable_pte_range() kernel " + "large pte 0x%lx pfn base 0x%lx offset 0x%lx " + "page 0x%px is not valid or is free " + "for shadow address 0x%lx\n", + pte_val(pte), pfn, pfn_off, ptepage, + shadow_addr); + ret = -EINVAL; + break; + } + get_page(ptepage); + page_add_shadow_rmap(ptepage); + pte = mk_pfn_pte(pfn + pfn_off, pte); + set_pte_at(current->mm, shadow_addr, shadow_pte, pte); + DebugKVM("set shadow PTE 0x%px == 0x%lx to page 0x%px for shadow " + "address 0x%lx\n", + shadow_pte, pte_val(*shadow_pte), ptepage, shadow_addr); + } while (shadow_pte++, pfn_off++, + kernel_addr += PAGE_SIZE, shadow_addr += PAGE_SIZE, + kernel_addr != kernel_end); + pte_unmap_unlock(orig_shadow_pte, shadow_ptl); + DebugKVM("finished and returns 0x%lx\n", ret); + return ret; +} + +static inline long +map_host_ttable_pte_range(struct vm_area_struct *vma, + pmd_t *kernel_pmd, pmd_t *shadow_pmd, + e2k_addr_t kernel_addr, e2k_addr_t kernel_end, + e2k_addr_t shadow_addr, e2k_addr_t shadow_end) +{ + pte_t *kernel_pte; + pte_t *shadow_pte; + pte_t *orig_shadow_pte; + spinlock_t *shadow_ptl = NULL; + struct page *ptepage; + int ret = 0; + + DebugKVM("started: kernel start 0x%lx end 0x%lx shadow start 0x%lx " + "end 0x%lx kernel_pmd 0x%px == 0x%lx\n", + kernel_addr, kernel_end, shadow_addr, shadow_end, + kernel_pmd, pmd_val(*kernel_pmd)); + kernel_pte = pte_offset_kernel(kernel_pmd, kernel_addr); + shadow_pte = pte_alloc_map_lock(current->mm, shadow_pmd, shadow_addr, + &shadow_ptl); + if (!shadow_pte) { + printk(KERN_ERR "map_host_ttable_pte_range() could not alloc " + "PTE page for shadow addr 0x%lx\n", shadow_addr); + return -ENOMEM; + } + orig_shadow_pte = shadow_pte; + do { + if (pte_none(*kernel_pte)) { + printk(KERN_ERR "map_host_ttable_pmd_range() empty " + "PTE for kernel addr 0x%lx\n", + kernel_addr); + ret = -EINVAL; + break; + } + DebugKVM("will map kernel address 0x%lx pte 0x%px == 0x%lx " + "for shadow address 0x%lx\n", + kernel_addr, kernel_pte, pte_val(*kernel_pte), + shadow_addr); + if (!pte_present(*kernel_pte)) { + printk(KERN_ERR "map_host_ttable_pmd_range() kernel " + "addr 0x%lx PTE is not present %px == 0x%lx\n", + kernel_addr, kernel_pte, pte_val(*kernel_pte)); + ret = -EINVAL; + break; + } + if (!pte_none(*shadow_pte)) { + printk(KERN_ERR "Old mapping existed for shadow " + "address 0x%lx pte 0x%px = 0x%lx\n", + shadow_addr, shadow_pte, pte_val(*shadow_pte)); + ret = -EINVAL; + break; + } + ptepage = pte_page(*kernel_pte); + if ((!page_valid(ptepage)) || !page_count(ptepage)) { + printk(KERN_ERR "map_host_ttable_pte_range() kernel " + "pte 0x%lx page 0x%px is not valid or is " + "free for address 0x%lx\n", + pte_val(*kernel_pte), ptepage, kernel_addr); + ret = -EINVAL; + break; + } + get_page(ptepage); + page_add_shadow_rmap(ptepage); + set_pte_at(current->mm, shadow_addr, shadow_pte, *kernel_pte); + DebugKVM("set shadow PTE 0x%px == 0x%lx to page 0x%px for shadow " + "address 0x%lx\n", + shadow_pte, pte_val(*shadow_pte), ptepage, shadow_addr); + } while (kernel_pte++, shadow_pte++, + kernel_addr += PAGE_SIZE, shadow_addr += PAGE_SIZE, + kernel_addr != kernel_end); + pte_unmap_unlock(orig_shadow_pte, shadow_ptl); + DebugKVM("finished and returns %d\n", ret); + return ret; +} + +static inline long +map_host_ttable_pmd_range(struct vm_area_struct *vma, + pud_t *kernel_pud, pud_t *shadow_pud, + e2k_addr_t kernel_addr, e2k_addr_t kernel_end, + e2k_addr_t shadow_addr, e2k_addr_t shadow_end) +{ + e2k_addr_t kernel_next; + e2k_addr_t shadow_next; + pmd_t *kernel_pmd; + pmd_t *shadow_pmd; + pte_t pte; + long ret = 0; + + DebugKVM("started: kernel start 0x%lx end 0x%lx shadow start 0x%lx " + "end 0x%lx kernel_pud 0x%px == 0x%lx\n", + kernel_addr, kernel_end, shadow_addr, shadow_end, + kernel_pud, pud_val(*kernel_pud)); + kernel_pmd = pmd_offset(kernel_pud, kernel_addr); + shadow_pmd = pmd_alloc(current->mm, shadow_pud, shadow_addr); + if (shadow_pmd == NULL) { + printk(KERN_ERR "map_host_ttable_pmd_range() could not " + "allocate PMD for shadow addr 0x%lx\n", + shadow_addr); + return -ENOMEM; + } + do { + if (pmd_none(*kernel_pmd)) { + printk(KERN_ERR "map_host_ttable_pmd_range() empty " + "PMD for kernel addr 0x%lx\n", + kernel_addr); + return -EINVAL; + } + DebugKVM("will map kernel address 0x%lx pmd 0x%px == 0x%lx to " + "shadow address 0x%lx\n", + kernel_addr, kernel_pmd, pmd_val(*kernel_pmd), + shadow_addr); + kernel_next = pmd_addr_end(kernel_addr, kernel_end); + shadow_next = pmd_addr_end(shadow_addr, shadow_end); + pte = *((pte_t *)kernel_pmd); + if (!pte_large_page(pte)) { + ret = map_host_ttable_pte_range(vma, + kernel_pmd, shadow_pmd, + kernel_addr, kernel_next, + shadow_addr, shadow_next); + } else { + ret = map_host_ttable_large_pte_range(vma, + pte, shadow_pmd, + kernel_addr, kernel_next, + shadow_addr, shadow_next); + } + if (ret < 0) + break; + } while (kernel_pmd++, shadow_pmd++, + kernel_addr = kernel_next, shadow_addr = shadow_end, + kernel_addr != kernel_end); + DebugKVM("finished and returns 0x%lx\n", ret); + return ret; +} + +static inline long +map_host_ttable_pud_range(struct vm_area_struct *vma, + pgd_t *kernel_pgd, pgd_t *shadow_pgd, + e2k_addr_t kernel_addr, e2k_addr_t kernel_end, + e2k_addr_t shadow_addr, e2k_addr_t shadow_end) +{ + e2k_addr_t kernel_next; + e2k_addr_t shadow_next; + pud_t *kernel_pud; + pud_t *shadow_pud; + long ret = 0; + + DebugKVM("started: kernel start 0x%lx end 0x%lx shadow start 0x%lx " + "end 0x%lx kernel_pgd 0x%px == 0x%lx\n", + kernel_addr, kernel_end, shadow_addr, shadow_end, + kernel_pgd, pgd_val(*kernel_pgd)); + kernel_pud = pud_offset(kernel_pgd, kernel_addr); + shadow_pud = pud_alloc(current->mm, shadow_pgd, shadow_addr); + if (shadow_pud == NULL) { + printk(KERN_ERR "map_host_ttable_pud_range() could not " + "allocate PUD for shadow addr 0x%lx\n", + shadow_addr); + return -ENOMEM; + } + do { + if (pud_none(*kernel_pud)) { + printk(KERN_ERR "map_host_ttable_pud_range() empty " + "PUD for kernel addr 0x%lx\n", + kernel_addr); + return -EINVAL; + } + DebugKVM("will map kernel address 0x%lx pud 0x%px == 0x%lx to " + "shadow address 0x%lx\n", + kernel_addr, kernel_pud, pud_val(*kernel_pud), + shadow_addr); + kernel_next = pud_addr_end(kernel_addr, kernel_end); + shadow_next = pud_addr_end(shadow_addr, shadow_end); + ret = map_host_ttable_pmd_range(vma, kernel_pud, shadow_pud, + kernel_addr, kernel_next, shadow_addr, shadow_next); + if (ret < 0) + break; + } while (kernel_pud++, shadow_pud++, + kernel_addr = kernel_next, shadow_addr = shadow_next, + kernel_addr != kernel_end); + DebugKVM("finished and returns 0x%lx\n", ret); + return ret; +} + +/* + * Map host kernel trap table to shadow guest kernel image + */ +int kvm_map_host_ttable_to_shadow(struct kvm *kvm, e2k_addr_t kernel_base, + gva_t shadow_base) +{ + e2k_addr_t start_addr; + e2k_addr_t end_addr; + e2k_addr_t kernel_next; + e2k_addr_t shadow_addr; + e2k_addr_t shadow_end; + e2k_addr_t shadow_next; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + pgd_t *kernel_pgd; + pgd_t *shadow_pgd; + int r; + int nid; + + DebugKVM("started: kernel base 0x%lx, guest shadow base 0x%lx\n", + kernel_base, shadow_base); + + start_addr = PAGE_ALIGN_UP(KERNEL_TTABLE_BASE); + end_addr = PAGE_ALIGN_DOWN(KERNEL_TTABLE_END); + if (mm == NULL) { + printk(KERN_ERR "kvm_map_host_ttable_to_shadow(): user has not " + "MM structure\n"); + return -EINVAL; + } + if (start_addr != kernel_base) { + printk(KERN_ERR "kvm_map_host_ttable_to_shadow(): trap table " + "0x%lx is not started from kernel base 0x%lx\n", + start_addr, kernel_base); + return -EINVAL; + } + shadow_addr = shadow_base; + shadow_end = shadow_addr + (end_addr - start_addr); + nid = numa_node_id(); + DebugKVM("will map trap table from 0x%lx to 0x%lx on node #%d\n", + start_addr, end_addr, nid); + down_write(&mm->mmap_sem); + kernel_pgd = node_pgd_offset_kernel(nid, start_addr); + shadow_pgd = pgd_offset(mm, shadow_addr); + vma = find_vma(mm, shadow_addr); + if (vma == NULL) { + printk(KERN_ERR "kvm_map_host_ttable_to_shadow(): could not " + "find VMA structure for address 0x%lx\n", + shadow_addr); + r = -EINVAL; + goto out; + } + if (vma->vm_start > shadow_addr || vma->vm_end < shadow_end) { + printk(KERN_ERR "Invalid VMA structure start addr 0x%lx or " + "end 0x%lx (should be <= 0x%lx & >= 0x%lx)\n", + vma->vm_start, vma->vm_end, + shadow_addr, shadow_end); + r = -EINVAL; + goto out; + } + DebugKVM("found VMA from 0x%lx to 0x%lx\n", + vma->vm_start, vma->vm_end); + VM_BUG_ON(vma->anon_vma); + do { + kernel_next = pgd_addr_end(start_addr, end_addr); + shadow_next = pgd_addr_end(shadow_addr, shadow_end); + if (pgd_none(*kernel_pgd)) { + printk(KERN_ERR "kvm_map_host_ttable_to_shadow() " + "empty kernel trap table pgd for addr 0x%lx\n", + start_addr); + r = -EINVAL; + break; + } + r = map_host_ttable_pud_range(vma, kernel_pgd, shadow_pgd, + start_addr, kernel_next, + shadow_addr, shadow_next); + if (r != 0) { + pr_err("kvm_map_host_ttable_to_shadow() failed and " + "returns error %d\n", r); + break; + } + } while (kernel_pgd++, shadow_pgd++, + start_addr = kernel_next, shadow_addr = shadow_next, + start_addr != end_addr); +out: + up_write(&mm->mmap_sem); + DebugKVM("finished and returns %d\n", r); + return r; +} + +static inline pte_t *get_user_ptep(gmm_struct_t *gmm, e2k_addr_t addr) +{ + pr_err("%s(): is not implemented\n", __func__); + return NULL; +} + +static e2k_addr_t do_print_guest_user_address_ptes(gmm_struct_t *gmm, + e2k_addr_t addr) +{ + pgd_t *pgd = NULL; + pud_t *pud = NULL; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + pte_t *pmd_pte, *pud_pte; + e2k_addr_t pa = 0; + + pr_err("%s(): is not implemented\n", __func__); + return pa; + + if (pgd_none_or_clear_bad(pgd)) { + pr_info("host PGD 0x%px = 0x%016lx none or bad for guest user " + "address 0x%016lx\n", + pgd, pgd_val(*pgd), addr); + return pa; + } + pr_info("host PGD 0x%px = 0x%016lx valid for guest user " + "address 0x%016lx\n", + pgd, pgd_val(*pgd), addr); + if (pud == NULL) { + pr_info("host PUD is NULL for guest user " + "address 0x%016lx\n", + addr); + return pa; + } + pud_pte = (pte_t *) pud; + if (pte_large_page(*pud_pte)) { + pr_info("host PUD 0x%px = 0x%016lx is PTE of large page for " + "guest user address 0x%016lx\n", + pud_pte, pte_val(*pud_pte), addr); + pa = _PAGE_PFN_TO_PADDR(pte_val(*pud_pte)) + (addr & ~PUD_MASK); + return pa; + } + if (pud_none_or_clear_bad(pud)) { + pr_info("host PUD 0x%px = 0x%016lx none or bad for guest user " + "address 0x%016lx\n", + pud, pud_val(*pud), addr); + return pa; + } + pr_info("host PUD 0x%px = 0x%016lx valid for guest user " + "address 0x%016lx\n", + pud, pud_val(*pud), addr); + if (pmd == NULL) { + pr_info("host PMD is NULL for guest user " + "address 0x%016lx\n", + addr); + return pa; + } + pmd_pte = (pte_t *) pmd; + if (pte_large_page(*pmd_pte)) { + pr_info("host PMD 0x%px = 0x%016lx is PTE of large page for " + "guest user address 0x%016lx\n", + pmd_pte, pte_val(*pmd_pte), addr); + pa = _PAGE_PFN_TO_PADDR(pte_val(*pmd_pte)) + (addr & ~PMD_MASK); + return pa; + } + if (pmd_none_or_clear_bad(pmd)) { + pr_info("host PMD 0x%px = 0x%016lx none or bad for guest user " + "address 0x%016lx\n", + pmd, pmd_val(*pmd), addr); + return pa; + } + pr_info("host PMD 0x%px = 0x%016lx valid for guest user address 0x%016lx\n", + pmd, pmd_val(*pmd), addr); + if (pte == NULL) { + pr_info("host PTE is NULL for guest user " + "address 0x%016lx\n", + addr); + return pa; + } else if (pte_none(*pte)) { + pr_info("host PTE 0x%px = 0x%016lx none for guest user " + "address 0x%016lx\n", + pte, pte_val(*pte), addr); + return pa; + } + if (!pte_present(*pte)) { + pr_info("host PTE 0x%px = 0x%016lx is pte of swaped " + "page for guest user address 0x%016lx\n", + pte, pte_val(*pte), addr); + return pa; + } + pr_info("host PTE 0x%px = 0x%016lx valid & present for guest user " + "address 0x%016lx\n", + pte, pte_val(*pte), addr); + pa = _PAGE_PFN_TO_PADDR(pte_val(*pte)) + (addr & 0xfff); + return pa; +} + +e2k_addr_t kvm_print_guest_user_address_ptes(struct kvm *kvm, + int gmmid_nr, e2k_addr_t addr) +{ + gmm_struct_t *gmm; + e2k_addr_t pa = 0; + + if (addr >= GUEST_PAGE_OFFSET) { + pr_err("address 0x%lx is not guest user address\n", addr); + return -EINVAL; + } + if (gmmid_nr < 0) { + pr_err("bad host agent id #%d of guest user mm\n", gmmid_nr); + return -EINVAL; + } + gmm = kvm_find_gmmid(&kvm->arch.gmmid_table, gmmid_nr); + if (gmm == NULL) { + pr_err("could not find host agent #%d of guest mm " + "for address 0x%lx\n", gmmid_nr, addr); + return -EINVAL; + } + pa = do_print_guest_user_address_ptes(gmm, addr); +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { + pgd_t *pgdp; + + pgdp = cpu_kernel_root_pt + pgd_index(addr); + pr_info("host CPU #%d kernel root page table:\n", + smp_processor_id()); + print_address_ptes(pgdp, addr, 0); + } +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + print_va_tlb(addr, 0); + print_va_tlb(pte_virt_offset(_PAGE_ALIGN_UP(addr, PTE_SIZE)), 0); + print_va_tlb(pmd_virt_offset(_PAGE_ALIGN_UP(addr, PMD_SIZE)), 0); + print_va_tlb(pud_virt_offset(_PAGE_ALIGN_UP(addr, PUD_SIZE)), 0); + return pa; +} + +e2k_addr_t +kvm_guest_user_address_to_pva(struct task_struct *task, e2k_addr_t address) +{ + thread_info_t *ti = task_thread_info(task); + struct kvm_vcpu *vcpu; + gmm_struct_t *gmm; + pte_t *ptep, pte; + e2k_addr_t phys; + bool locked = false; + + DebugKVMGU("Task %s (%d) started for address 0x%lx\n", + task->comm, task->pid, address); + + if (!IS_GUEST_USER_ADDRESS(address)) { + pr_err("%s(): Address 0x%lx is not guest address\n", + __func__, address); + return -1L; + } + vcpu = ti->vcpu; + if (vcpu == NULL) { + pr_err("%s():VCPU process %s (%d) has not VCPU structure " + "pointer\n", + __func__, task->comm, task->pid); + return -1L; + } + if (!is_paging(vcpu)) { + /* Nonpaging mode: it is guest physical address */ + pte_val(pte) = pgprot_val(nonpaging_gpa_to_pte(vcpu, address)); + gmm = pv_vcpu_get_init_gmm(vcpu); + goto host_mapped; + } + gmm = pv_vcpu_get_active_gmm(ti->vcpu); + if (gmm == NULL) { + pr_err("%s():VCPU #%d process %s (%d) has not active guest " + "user MM (gmm), so conversion is impossible\n", + __func__, vcpu->vcpu_id, task->comm, task->pid); + return -1L; + } + BUG_ON(ti->gthread_info == NULL); + WARN_ON(ti->gthread_info->gmm && gmm != ti->gthread_info->gmm); + + locked = spin_trylock(&gmm->page_table_lock); + + ptep = get_user_ptep(gmm, address); + if (ptep == NULL) { + pr_err("%s(): could not find guest user address 0x%lx mapping " + "on host\n", __func__, address); + goto host_nomap; + } + pte = *ptep; + +host_mapped: + if (pte_none(pte)) { + pr_err("%s(): guest user virtual address 0x%lx already " + "unmapped on host PT\n", __func__, address); + goto host_nomap; + } + if (!pte_present(pte)) { + pr_err("%s(): host page of guest user virtual address 0x%lx " + "mapping is not present\n", __func__, address); + goto host_nomap; + } + + if (locked) + spin_unlock(&gmm->page_table_lock); + + phys = (pte_pfn(pte) << PAGE_SHIFT) | (address & ~PAGE_MASK); + DebugKVMGU("guest user virtual address 0x%lx is physical " + "address 0x%lx on host\n", + address, phys); + + return (e2k_addr_t)__va(phys); + +host_nomap: + if (locked) + spin_unlock(&gmm->page_table_lock); + return -1L; + +} + +void kvm_arch_free_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + unsigned long base_gfn = memslot->base_gfn; + unsigned long guest_start = memslot->userspace_addr; + unsigned long npages = memslot->npages; + unsigned long guest_end = guest_start + (npages << PAGE_SHIFT); + user_area_t *guest_area; + + DebugKVM("slot base pfn 0x%lx guest virtual from 0x%lx to 0x%lx\n", + base_gfn, guest_start, guest_end); + if (npages == 0) + return; + if (memslot->userspace_addr == 0) { + DebugKVM("slot base gfn 0x%lx size 0x%lx pages is not " + "allocated by user so cannot be freed\n", + base_gfn, npages); + return; + } + guest_area = memslot->arch.guest_areas.area; + if (guest_area == NULL) { + DebugKVM("slot base gfn 0x%lx guest virtual from 0x%lx " + "to 0x%lx has not guest area support\n", + base_gfn, guest_start, guest_end); + return; + } + user_area_release(guest_area); + memslot->arch.guest_areas.area = NULL; + + return; +} + +/* + * Convert VCPU process virtual address to equal host physical address (__va()) + * VCPU process addres can be: + * host kernel address (in hypercals, traps, interrupts) + * guest kernel address which is host user address + * guest user address + */ +e2k_addr_t kvm_get_guest_phys_addr(struct task_struct *task, + e2k_addr_t virt_addr) +{ + thread_info_t *ti = task_thread_info(task); + + if (ti->vcpu == NULL) { + /* it is not VCPU process, so conversion as usual case */ + return NATIVE_GET_PHYS_ADDR(task, virt_addr); + } else if (virt_addr >= NATIVE_TASK_SIZE) { + /* it is host kernel address */ + return NATIVE_GET_PHYS_ADDR(task, virt_addr); + } else if (!IS_GUEST_USER_ADDRESS(virt_addr)) { + /* it is guest kernel address, so it is host user address */ + return NATIVE_GET_PHYS_ADDR(task, virt_addr); + } + /* so it is guest user virtual address */ + return kvm_guest_user_address_to_pva(task, virt_addr); +} + +/* + * Recovery faulted store operations + * common case: some addresses can be from host kernel address space, + * but point to guest structures, shadow image ... + */ +long kvm_recovery_faulted_tagged_guest_store(struct kvm_vcpu *vcpu, + e2k_addr_t address, u64 wr_data, u64 st_rec_opc, + u64 data_ext, u64 opc_ext, u64 _arg) +{ + union recovery_faulted_arg arg = { .entire = _arg }; + unsigned long hva; + kvm_arch_exception_t exception; + + DebugKVMREC("started for address 0x%lx data 0x%llx tag 0x%x, channel #%d\n", + address, wr_data, arg.tag, arg.chan); + + hva = kvm_vcpu_gva_to_hva(vcpu, address, true, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest address 0x%lx, " + "retry with page fault\n", __func__, address); + kvm_vcpu_inject_page_fault(vcpu, (void *)address, + &exception); + return -EAGAIN; + } + address = hva; + + native_recovery_faulted_tagged_store(address, wr_data, arg.tag, + st_rec_opc, data_ext, arg.tag_ext, opc_ext, + arg.chan, arg.qp, arg.atomic); + return 0; +} +long kvm_recovery_faulted_guest_load(struct kvm_vcpu *vcpu, e2k_addr_t address, + u64 *ld_val, u8 *data_tag, u64 ld_rec_opc, int chan) +{ + unsigned long hva; + kvm_arch_exception_t exception; + + DebugKVMREC("started for address 0x%lx, channel #%d\n", + address, chan); + + hva = kvm_vcpu_gva_to_hva(vcpu, address, false, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest address 0x%lx, " + "retry with page fault\n", __func__, address); + kvm_vcpu_inject_page_fault(vcpu, (void *)address, + &exception); + return -EAGAIN; + } + address = hva; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)ld_val, true, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest ld_val 0x%lx, " + "retry with page fault\n", __func__, ld_val); + kvm_vcpu_inject_page_fault(vcpu, (void *)ld_val, + &exception); + return -EAGAIN; + } + ld_val = (u64 *)hva; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)data_tag, true, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest data_tag 0x%lx, " + "retry with page fault\n", __func__, data_tag); + kvm_vcpu_inject_page_fault(vcpu, (void *)data_tag, + &exception); + return -EAGAIN; + } + data_tag = (u8 *)hva; + + native_recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); + DebugKVMREC("loaded data 0x%llx tag 0x%x from address 0x%lx\n", + *ld_val, *data_tag, address); + return 0; +} +long kvm_recovery_faulted_guest_move(struct kvm_vcpu *vcpu, + e2k_addr_t addr_from, e2k_addr_t addr_to, e2k_addr_t addr_to_hi, + u64 ld_rec_opc, u64 _arg, u32 first_time) +{ + union recovery_faulted_arg arg = { .entire = _arg }; + unsigned long hva; + kvm_arch_exception_t exception; + + DebugKVMREC("started from address 0x%lx to addr 0x%lx, channel #%d\n", + addr_from, addr_to, arg.chan); + + hva = kvm_vcpu_gva_to_hva(vcpu, addr_from, false, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest addr_from 0x%lx, " + "retry with page fault\n", __func__, addr_from); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr_from, + &exception); + return -EAGAIN; + } + addr_from = hva; + + hva = kvm_vcpu_gva_to_hva(vcpu, addr_to, true, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest addr_to 0x%lx, " + "retry with page fault\n", __func__, addr_to); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr_to, + &exception); + return -EAGAIN; + } + addr_to = hva; + + if (addr_to_hi) { + hva = kvm_vcpu_gva_to_hva(vcpu, addr_to_hi, true, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest addr_to_hi 0x%lx, " + "retry with page fault\n", + __func__, addr_to_hi); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr_to_hi, + &exception); + return -EAGAIN; + } + addr_to_hi = hva; + } + + native_recovery_faulted_move(addr_from, addr_to, addr_to_hi, + arg.vr, ld_rec_opc, arg.chan, arg.qp, arg.atomic, + first_time); + DebugKVMREC("loaded data 0x%llx from address 0x%lx\n", + *((u64 *)addr_to), addr_from); + return 0; +} +long kvm_recovery_faulted_load_to_guest_greg(struct kvm_vcpu *vcpu, + e2k_addr_t address, u32 greg_num_d, u64 ld_rec_opc, + u64 _arg, u64 saved_greg_lo, u64 saved_greg_hi) +{ + union recovery_faulted_arg arg = { .entire = _arg }; + unsigned long hva; + vcpu_l_gregs_t *l_gregs; + u64 *addr_lo, *addr_hi; + kvm_arch_exception_t exception; + + DebugKVMREC("started for address 0x%lx global reg #%d, channel #%d\n", + address, greg_num_d, arg.chan); + + hva = kvm_vcpu_gva_to_hva(vcpu, address, false, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest address 0x%lx, " + "retry with page fault\n", __func__, address); + kvm_vcpu_inject_page_fault(vcpu, (void *)address, &exception); + return -EAGAIN; + } + address = hva; + + if ((u64 *)saved_greg_lo != NULL) { + hva = kvm_vcpu_gva_to_hva(vcpu, saved_greg_lo, true, + &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest addr_to 0x%llx, " + "retry with page fault\n", + __func__, saved_greg_lo); + kvm_vcpu_inject_page_fault(vcpu, + (void *)saved_greg_lo, &exception); + return -EAGAIN; + } + saved_greg_lo = hva; + } + + if ((u64 *)saved_greg_hi != NULL) { + hva = kvm_vcpu_gva_to_hva(vcpu, saved_greg_hi, true, + &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): cannot translate guest saved_greg_hi " + "0x%llx, retry with page fault\n", + __func__, saved_greg_hi); + kvm_vcpu_inject_page_fault(vcpu, + (void *)saved_greg_hi, &exception); + return -EAGAIN; + } + saved_greg_hi = hva; + } + + native_recovery_faulted_load_to_greg(address, greg_num_d, arg.vr, + ld_rec_opc, arg.chan, arg.qp, arg.atomic, + (u64 *)saved_greg_lo, (u64 *)saved_greg_hi); + + if (!(LOCAL_GREGS_USER_MASK & (1UL << greg_num_d))) { + /* it is not "local" global register */ + return 0; + } + + /* save updated registers value to recover upon return to user */ + KVM_BUG_ON(!(LOCAL_GREGS_USER_MASK & (1UL << greg_num_d))); + KVM_BUG_ON((KERNEL_GREGS_MAX_MASK & (1UL << greg_num_d)) && + (u64 *)saved_greg_lo == NULL); + KVM_BUG_ON((HOST_KERNEL_GREGS_PAIR_MASK & (1UL << greg_num_d)) && + (u64 *)saved_greg_lo == NULL); + + l_gregs = get_new_pv_vcpu_l_gregs(vcpu); + KVM_BUG_ON(l_gregs == NULL); + + addr_lo = l_gregs->gregs.g[greg_num_d - LOCAL_GREGS_START].xreg; + if (!arg.atomic) + addr_hi = &addr_lo[1]; + else + addr_hi = &addr_lo[2]; + if ((u64 *)saved_greg_lo != NULL) { + native_recovery_faulted_move(saved_greg_lo, + (u64)addr_lo, (u64)addr_hi, + arg.vr, ld_rec_opc, arg.chan, arg.qp, arg.atomic, 1); + } else { + native_recovery_faulted_move(address, + (u64)addr_lo, (u64)addr_hi, + arg.vr, ld_rec_opc, arg.chan, arg.qp, arg.atomic, 1); + } + l_gregs->updated |= (1UL << greg_num_d); + + return 0; +} + +void update_pv_vcpu_local_glob_regs(struct kvm_vcpu *vcpu, + local_gregs_t *gregs) +{ + vcpu_l_gregs_t *l_gregs; + u64 updated_mask, reg_mask; + int l, reg_no; + u64 *addr_from, *addr_to; + + if (!is_actual_pv_vcpu_l_gregs(vcpu)) { + /* current trap activation has not actual gregs */ + return; + } + + l_gregs = get_actual_pv_vcpu_l_gregs(vcpu); + KVM_BUG_ON(l_gregs == NULL); + + updated_mask = l_gregs->updated; + if (updated_mask == 0) { + /* nothing to update */ + goto out_updated; + } + + KVM_BUG_ON((updated_mask & ~LOCAL_GREGS_USER_MASK) != 0); + + for (l = 0; l < LOCAL_GREGS_NUM; l++) { + reg_no = LOCAL_GREGS_START + l; + reg_mask = (1UL << reg_no); + if (!(updated_mask & reg_mask)) + continue; + addr_to = gregs->g[l].xreg; + addr_from = l_gregs->gregs.g[l].xreg; + native_move_tagged_qword((u64)addr_from, (u64)addr_to); + updated_mask &= ~reg_mask; + if (updated_mask == 0) + break; + } + l_gregs->updated = updated_mask; + +out_updated: + put_pv_vcpu_l_gregs(vcpu); +} + +long kvm_move_tagged_guest_data(struct kvm_vcpu *vcpu, + int word_size, e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + unsigned long hva_from, hva_to; + kvm_arch_exception_t exception; + + DebugKVMREC("started for address from 0x%lx to 0x%lx\n", + addr_from, addr_to); + + hva_from = kvm_vcpu_gva_to_hva(vcpu, addr_from, false, &exception); + if (kvm_is_error_hva(hva_from)) { + pr_err("%s(): cannot translate guest addr_from 0x%lx, " + "retry with page fault\n", __func__, addr_from); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr_from, + &exception); + return -EAGAIN; + } + DebugKVMREC("guest address from 0x%lx converted to hva 0x%lx\n", + addr_from, hva_from); + + hva_to = kvm_vcpu_gva_to_hva(vcpu, addr_to, true, &exception); + if (kvm_is_error_hva(hva_to)) { + pr_err("%s(): cannot translate guest addr_to 0x%lx, " + "retry with page fault\n", __func__, addr_to); + kvm_vcpu_inject_page_fault(vcpu, (void *)addr_to, + &exception); + return -EAGAIN; + } + DebugKVMREC("guest address to 0x%lx converted to hva 0x%lx\n", + addr_to, hva_to); + + switch (word_size) { + case sizeof(u32): + native_move_tagged_word(hva_from, hva_to); + break; + case sizeof(u64): + native_move_tagged_dword(hva_from, hva_to); + break; + case sizeof(u64) * 2: + native_move_tagged_qword(hva_from, hva_to); + break; + default: + return -EINVAL; + } + return 0; +} + +e2k_addr_t kvm_print_guest_kernel_ptes(e2k_addr_t address) +{ + e2k_addr_t pa = 0; + + if (address >= NATIVE_TASK_SIZE) { + pr_info("Address 0x%016lx is not guest kernel address " + "to print PTE's\n", + address); + return pa; + } + return print_user_address_ptes(current->mm, address); +} diff --git a/arch/e2k/kvm/mmu-x86.h b/arch/e2k/kvm/mmu-x86.h new file mode 100644 index 000000000000..0103f09e8537 --- /dev/null +++ b/arch/e2k/kvm/mmu-x86.h @@ -0,0 +1,133 @@ +#ifndef _E2K_KVM_MMU_X86_H_ +#define _E2K_KVM_MMU_X86_H_ + +#include +#include + +#define PT_X86_PRESENT_MASK _PAGE_P_X86 +#define PT_X86_WRITABLE_MASK _PAGE_W_X86 +#define PT_X86_USER_MASK _PAGE_USER_X86 +#define PT_X86_PWT_MASK _PAGE_PWT_X86 +#define PT_X86_PCD_MASK _PAGE_PCD_X86 +#define PT_X86_ACCESSED_MASK _PAGE_A_X86 +#define PT_X86_DIRTY_MASK _PAGE_D_X86 +#define PT_X86_PAGE_SIZE_MASK _PAGE_PSE_X86 +#define PT_X86_PAT_MASK _PAGE_PAT_X86 +#define PT_X86_GLOBAL_MASK _PAGE_G_X86 +#define PT_X86_32_NX_MASK _PAGE_NX_X86_32 +#define PT_X86_PAE_NX_MASK _PAGE_NX_X86_PAE +#define PT_X86_64_NX_MASK _PAGE_NX_X86_64 + +#define PT_X86_32_ROOT_LEVEL X86_32_PGD_LEVEL_NUM /* pte, pgd */ +#define PT_X86_PAE_ROOT_LEVEL X86_PAE_PGD_LEVEL_NUM /* pte, pmd, pgd */ +#define PT_X86_64_ROOT_LEVEL X86_64_PGD_LEVEL_NUM /* pte, pmd, pud, pgd */ +#define PT_X86_DIRECTORY_LEVEL X86_DIRECTORY_LEVEL_NUM /* pmd */ +#define PT_X86_PAGE_TABLE_LEVEL X86_PTE_LEVEL_NUM /* pte */ +#define PT_X86_MAX_HUGEPAGE_LEVEL MAX_HUGE_PAGES_LEVEL_X86_64 /* pud */ + +#define PT_X86_32_ENTRIES_BITS PT_ENT_BITS_X86_32 /* 10 bits */ +#define PT_X86_64_ENTRIES_BITS PT_ENT_BITS_X86_64 /* 9 bits */ +#define PT_X86_32_ENT_PER_PAGE PT_ENT_PER_PAGE_X86_32 /* 1024 entries */ +#define PT_X86_64_ENT_PER_PAGE PT_ENT_PER_PAGE_X86_64 /* 512 entries */ + +#ifdef CONFIG_X86_HW_VIRTUALIZATION +/* + * Currently, we have two sorts of write-protection, a) the first one + * write-protects guest page to sync the guest modification, b) another one is + * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences + * between these two sorts are: + * 1) the first case clears SPTE_MMU_WRITEABLE bit. + * 2) the first case requires flushing tlb immediately avoiding corrupting + * shadow page table between all vcpus so it should be in the protection of + * mmu-lock. And the another case does not need to flush tlb until returning + * the dirty bitmap to userspace since it only write-protects the page + * logged in the bitmap, that means the page in the dirty bitmap is not + * missed, so it can flush tlb out of mmu-lock. + * + * So, there is the problem: the first case can meet the corrupted tlb caused + * by another case which write-protects pages but without flush tlb + * immediately. In order to making the first case be aware this problem we let + * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit + * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit. + * + * Anyway, whenever a spte is updated (only permission and status bits are + * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes + * readonly, if that happens, we need to flush tlb. Fortunately, + * mmu_spte_update() has already handled it perfectly. + * + * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK: + * - if we want to see if it has writable tlb entry or if the spte can be + * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most + * case, otherwise + * - if we fix page fault on the spte or do write-protection by dirty logging, + * check PT_WRITABLE_MASK. + * + * TODO: introduce APIs to split these two cases. + */ + +static inline bool is_write_protection(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr0_bits(vcpu, X86_CR0_WP); +} + +/* + * Check if a given access (described through the I/D, W/R and U/S bits of a + * page fault error code pfec) causes a permission fault with the given PTE + * access rights (in ACC_* format). + * + * Return zero if the access does not fault; return the page fault error code + * if the access faults. + */ +static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + unsigned pte_access, unsigned pte_pkey, + unsigned pfec) +{ + int cpl = kvm_x86_ops->get_cpl(vcpu); + unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); + + /* + * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. + * + * If CPL = 3, SMAP applies to all supervisor-mode data accesses + * (these are implicit supervisor accesses) regardless of the value + * of EFLAGS.AC. + * + * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving + * the result in X86_EFLAGS_AC. We then insert it in place of + * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, + * but it will be one in index if SMAP checks are being overridden. + * It is important to keep this branchless. + */ + unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); + int index = (pfec >> 1) + + (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); + bool fault = (mmu->permissions[index] >> pte_access) & 1; + u32 errcode = PFERR_PRESENT_MASK; + + WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); + if (unlikely(mmu->pkru_mask)) { + u32 pkru_bits, offset; + + /* + * PKRU defines 32 bits, there are 16 domains and 2 + * attribute bits per domain in pkru. pte_pkey is the + * index of the protection domain, so pte_pkey * 2 is + * is the index of the first bit for the domain. + */ + pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; + + /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ + offset = (pfec & ~1) + + ((pte_access & PT_USER_MASK) << + (PFERR_RSVD_BIT - PT_USER_SHIFT)); + + pkru_bits &= mmu->pkru_mask >> offset; + errcode |= -pkru_bits & PFERR_PK_MASK; + fault |= (pkru_bits != 0); + } + + return -(u32)fault & errcode; +} +#endif /* CONFIG_X86_HW_VIRTUALIZATION */ + +#endif /* _E2K_KVM_MMU_X86_H_ */ diff --git a/arch/e2k/kvm/mmu.h b/arch/e2k/kvm/mmu.h new file mode 100644 index 000000000000..ea0fc5eb250b --- /dev/null +++ b/arch/e2k/kvm/mmu.h @@ -0,0 +1,1664 @@ +#ifndef __KVM_E2K_MMU_H +#define __KVM_E2K_MMU_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mmu-e2k.h" +#include "mmu-x86.h" +#include "hv_mmu.h" +#include "cpu_defs.h" +#include "mmu_defs.h" + +#undef DEBUG_KVM_RECOVERY_MODE +#undef DebugKVMREC +#define DEBUG_KVM_RECOVERY_MODE 0 /* kernel recovery debugging */ +#define DebugKVMREC(fmt, args...) \ +({ \ + if (DEBUG_KVM_RECOVERY_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHADOW_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHADOW_MODE 0 /* shadow adresses debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHADOW_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PT_STRUCT_MODE +#undef DebugPTS +#define DEBUG_PT_STRUCT_MODE 0 /* page tables structure debugging */ +#define DebugPTS(fmt, args...) \ +({ \ + if (DEBUG_PT_STRUCT_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#if PT_E2K_PRESENT_MASK == PT_X86_PRESENT_MASK +# define PT_PRESENT_MASK PT_E2K_PRESENT_MASK +#else +# error "Page table PRESENT bit is different for e2k vs x86" +#endif +#if PT_E2K_WRITABLE_MASK == PT_X86_WRITABLE_MASK +# define PT_WRITABLE_MASK PT_E2K_WRITABLE_MASK +#else +# error "Page table WRITABLE bit is different for e2k vs x86" +#endif +#if PT_E2K_ACCESSED_MASK == PT_X86_ACCESSED_MASK +# define PT_ACCESSED_MASK PT_E2K_ACCESSED_MASK +#else +# error "Page table ACCESSED bit is different for e2k vs x86" +#endif +#if PT_E2K_DIRTY_MASK == PT_X86_DIRTY_MASK +# define PT_DIRTY_MASK PT_E2K_DIRTY_MASK +#else +# error "Page table DIRTY bit is different for e2k vs x86" +#endif +#if PT_E2K_PAGE_SIZE_MASK == PT_X86_PAGE_SIZE_MASK +# define PT_PAGE_SIZE_MASK PT_E2K_PAGE_SIZE_MASK +#else +# error "Page table PAGE SIZE bit is different for e2k vs x86" +#endif +#if PT_E2K_GLOBAL_MASK == PT_X86_GLOBAL_MASK +# define PT_GLOBAL_MASK PT_E2K_GLOBAL_MASK +#else +# error "Page table GLOBAL bit is different for e2k vs x86" +#endif + +#if PT_E2K_ROOT_LEVEL == PT_X86_64_ROOT_LEVEL +# define PT64_ROOT_LEVEL PT_E2K_ROOT_LEVEL +#else +# error "Page table root level is different for e2k vs x86" +#endif +#define PT32_ROOT_LEVEL PT_X86_32_ROOT_LEVEL +#define PT32E_ROOT_LEVEL PT_X86_PAE_ROOT_LEVEL + +#if PT_E2K_DIRECTORY_LEVEL == PT_X86_DIRECTORY_LEVEL +# define PT_DIRECTORY_LEVEL PT_E2K_DIRECTORY_LEVEL /* pmd */ +#else +# error "Page table directory level is different for e2k vs x86" +#endif +#if PT_E2K_PAGE_TABLE_LEVEL == PT_X86_PAGE_TABLE_LEVEL +# define PT_PAGE_TABLE_LEVEL PT_E2K_PAGE_TABLE_LEVEL /* pte */ +#else +# error "Page table entries level is different for e2k vs x86" +#endif +#if PT_E2K_MAX_HUGEPAGE_LEVEL >= PT_X86_MAX_HUGEPAGE_LEVEL +# define PT_MAX_HUGEPAGE_LEVEL PT_E2K_MAX_HUGEPAGE_LEVEL +#else /* PT_X86_MAX_HUGEPAGE_LEVEL > PT_E2K_MAX_HUGEPAGE_LEVEL */ +# define PT_MAX_HUGEPAGE_LEVEL PT_X86_MAX_HUGEPAGE_LEVEL +#endif + +#if PT_E2K_ENTRIES_BITS == PT_X86_64_ENTRIES_BITS +# define PT64_ENTRIES_BITS PT_E2K_ENTRIES_BITS +#else +# error "Page table level entry bits number is different for e2k vs x86" +#endif +#define PT32_ENTRIES_BITS PT_X86_32_ENTRIES_BITS + +#if PT_E2K_ENT_PER_PAGE == PT_X86_64_ENT_PER_PAGE +# define PT64_ENT_PER_PAGE PT_E2K_ENT_PER_PAGE +#else +# error "Page table level number of entries is different for e2k vs x86" +#endif +#define PT32_ENT_PER_PAGE PT_X86_32_ENT_PER_PAGE + +/* all available page tables abstructs */ +extern const pt_struct_t __nodedata pgtable_struct_e2k_v2; +extern const pt_struct_t __nodedata pgtable_struct_e2k_v3; +extern const pt_struct_t __nodedata pgtable_struct_e2k_v5; +extern const pt_struct_t __nodedata pgtable_struct_e2k_v6_pt_v6; +extern const pt_struct_t __nodedata pgtable_struct_e2k_v6_gp; + +#define pgtable_struct_e2k_v6_pt_v2 pgtable_struct_e2k_v5 + +extern const pt_struct_t *kvm_mmu_get_host_pt_struct(struct kvm *kvm); +extern const pt_struct_t *kvm_mmu_get_vcpu_pt_struct(struct kvm_vcpu *vcpu); +extern const pt_struct_t *kvm_mmu_get_gp_pt_struct(struct kvm *kvm); + +extern const pt_struct_t *kvm_get_mmu_host_pt_struct(struct kvm *kvm); +extern const pt_struct_t *kvm_get_mmu_guest_pt_struct(struct kvm_vcpu *vcpu); + +static inline const pt_struct_t * +mmu_get_host_pt_struct(struct kvm *kvm) +{ + return kvm->arch.host_pt_struct; +} + +static inline void +mmu_set_host_pt_struct(struct kvm *kvm, const pt_struct_t *pt_struct) +{ + kvm->arch.host_pt_struct = pt_struct; + if (pt_struct != NULL) { + DebugPTS("Setting hypervisor page table type: %s\n", + pt_struct->name); + } else { + DebugPTS("Reset hypervisor page table type, " + "should not be used\n"); + } +} + +static inline void +mmu_set_host_pt_struct_func(struct kvm *kvm, get_pt_struct_func_t func) +{ + kvm->arch.get_host_pt_struct = func; +} + +static inline const pt_struct_t * +mmu_get_vcpu_pt_struct(struct kvm_vcpu *vcpu) +{ + BUG_ON(vcpu->kvm->arch.guest_pt_struct == NULL); + return vcpu->kvm->arch.guest_pt_struct; +} + +static inline void +mmu_set_vcpu_pt_struct(struct kvm *kvm, const pt_struct_t *pt_struct) +{ + kvm->arch.guest_pt_struct = pt_struct; + if (pt_struct != NULL) { + DebugPTS("Setting guest page table type: %s\n", + pt_struct->name); + } else { + DebugPTS("Reset guest page table type, " + "should not be used\n"); + } +} + +static inline void +mmu_set_vcpu_pt_struct_func(struct kvm *kvm, get_vcpu_pt_struct_func_t func) +{ + kvm->arch.get_vcpu_pt_struct = func; +} + +static inline const pt_struct_t * +mmu_get_gp_pt_struct(struct kvm *kvm) +{ + BUG_ON(kvm->arch.gp_pt_struct == NULL); + return kvm->arch.gp_pt_struct; +} + +static inline void +mmu_set_gp_pt_struct(struct kvm *kvm, const pt_struct_t *pt_struct) +{ + kvm->arch.gp_pt_struct = pt_struct; + if (pt_struct != NULL) { + DebugPTS("Setting guest physical addresses page table " + "type: %s\n", + pt_struct->name); + } else { + DebugPTS("Reset guest physical addresses page table type, " + "should not be used\n"); + } +} + +static inline void +mmu_set_gp_pt_struct_func(struct kvm *kvm, get_pt_struct_func_t func) +{ + kvm->arch.get_gp_pt_struct = func; +} + +static inline const pt_struct_t * +kvm_get_host_pt_struct(struct kvm *kvm) +{ + BUG_ON(kvm->arch.get_host_pt_struct(kvm) == NULL); + return kvm->arch.get_host_pt_struct(kvm); +} + +static inline const pt_struct_t * +kvm_get_vcpu_pt_struct(struct kvm_vcpu *vcpu) +{ + BUG_ON(vcpu->kvm->arch.get_vcpu_pt_struct == NULL); + return vcpu->kvm->arch.get_vcpu_pt_struct(vcpu); +} + +static inline const pt_struct_t * +kvm_get_gp_pt_struct(struct kvm *kvm) +{ + BUG_ON(kvm->arch.get_gp_pt_struct == NULL); + return kvm->arch.get_gp_pt_struct(kvm); +} + +extern void dump_page_struct(struct page *page); + +/* KVM Hugepage definitions for host machine */ +#define KVM_MMU_HPAGE_SHIFT(kvm, level_id) \ + KVM_PT_LEVEL_HPAGE_SHIFT( \ + get_pt_struct_level_on_id(kvm_get_host_pt_struct(kvm), \ + level_id)) +#define KVM_MMU_HPAGE_SIZE(kvm, level_id) \ + KVM_PT_LEVEL_HPAGE_SIZE( \ + get_pt_struct_level_on_id(kvm_get_host_pt_struct(kvm), \ + level_id)) +#define KVM_MMU_HPAGE_MASK(kvm, level_id) \ + KVM_PT_LEVEL_HPAGE_MASK( \ + get_pt_struct_level_on_id(kvm_get_host_pt_struct(kvm), \ + level_id)) +#define KVM_MMU_PAGES_PER_HPAGE(kvm, level_id) \ + KVM_PT_LEVEL_PAGES_PER_HPAGE( \ + get_pt_struct_level_on_id(kvm_get_host_pt_struct(kvm), \ + level_id)) +#define KVM_MMU_HPAGE_GFN_SHIFT(kvm, level_id) \ + (KVM_MMU_HPAGE_SHIFT(kvm, level_id) - PAGE_SHIFT) + +static inline gfn_t +kvm_gfn_to_index(struct kvm *kvm, gfn_t gfn, gfn_t base_gfn, int level_id) +{ + return gfn_to_index(gfn, base_gfn, + get_pt_struct_level_on_id(kvm_get_host_pt_struct(kvm), + level_id)); +} + +/* uwx (u - user mode, w - writable, x executable) */ +#define ACC_EXEC_MASK 0x1 +#define ACC_WRITE_MASK 0x2 +#define ACC_USER_MASK 0x4 +#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) +/* page tables directories always privileged & not executable */ +#define ACC_PT_DIR (ACC_WRITE_MASK) + +#define PFERR_PRESENT_BIT 0 +#define PFERR_WRITE_BIT 1 +#define PFERR_USER_BIT 2 +#define PFERR_RSVD_BIT 3 +#define PFERR_FETCH_BIT 4 +#define PFERR_NOT_PRESENT_BIT 5 +#define PFERR_PT_FAULT_BIT 6 +#define PFERR_INSTR_FAULT_BIT 7 +#define PFERR_INSTR_PROT_BIT 8 +#define PFERR_FORCED_BIT 9 +#define PFERR_WAIT_LOCK_BIT 10 +#define PFERR_GPTE_CHANGED_BIT 11 +#define PFERR_MMIO_BIT 12 +#define PFERR_ONLY_VALID_BIT 13 +#define PFERR_READ_PROT_BIT 14 +#define PFERR_IS_UNMAPPED_BIT 15 +#define PFERR_FAPB_BIT 16 + +#define PFERR_ACCESS_SIZE_BIT 24 + +#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) +#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) +#define PFERR_USER_MASK (1U << PFERR_USER_BIT) +#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) +#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) +#define PFERR_NOT_PRESENT_MASK (1U << PFERR_NOT_PRESENT_BIT) +#define PFERR_PT_FAULT_MASK (1U << PFERR_PT_FAULT_BIT) +#define PFERR_INSTR_FAULT_MASK (1U << PFERR_INSTR_FAULT_BIT) +#define PFERR_INSTR_PROT_MASK (1U << PFERR_INSTR_PROT_BIT) +#define PFERR_FORCED_MASK (1U << PFERR_FORCED_BIT) +#define PFERR_WAIT_LOCK_MASK (1U << PFERR_WAIT_LOCK_BIT) +#define PFERR_GPTE_CHANGED_MASK (1U << PFERR_GPTE_CHANGED_BIT) +#define PFERR_MMIO_MASK (1U << PFERR_MMIO_BIT) +#define PFERR_ONLY_VALID_MASK (1U << PFERR_ONLY_VALID_BIT) +#define PFERR_READ_PROT_MASK (1U << PFERR_READ_PROT_BIT) +#define PFERR_IS_UNMAPPED_MASK (1U << PFERR_IS_UNMAPPED_BIT) +#define PFERR_FAPB_MASK (1U << PFERR_FAPB_BIT) + +#define PFERR_ACCESS_SIZE_MASK (~0U << PFERR_ACCESS_SIZE_BIT) + +#define PFRES_GET_ACCESS_SIZE(pfres) \ + (((pfres) & PFERR_ACCESS_SIZE_MASK) >> PFERR_ACCESS_SIZE_BIT) +#define PFRES_SET_ACCESS_SIZE(pfres, size) \ + (((pfres) & ~PFERR_ACCESS_SIZE_MASK) | \ + ((size) << PFERR_ACCESS_SIZE_BIT)) + +/* try atomic/async page fault handling results */ +typedef enum try_pf_err { + TRY_PF_NO_ERR = 0, + TRY_PF_ONLY_VALID_ERR, + TRY_PF_MMIO_ERR, +} try_pf_err_t; + +#define TO_TRY_PF_ERR(errno) ((try_pf_err_t)(errno)) + +/* + * It is copy/paste from include/linux/kvm_host.h to add e2k-arch specific + * gfn -> pfn translation errors: + * For the normal pfn, the highest 12 bits should be zero, + * so we can mask bit 62 ~ bit 52 to indicate the error pfn, + * mask bit 63 to indicate the noslot pfn. +#define KVM_PFN_ERR_MASK (0x7ffULL << 52) +#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) +#define KVM_PFN_NOSLOT (0x1ULL << 63) + +#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) +#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) +#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) + * + * Do not forget modify here, if something changes in arch-indep header + */ +#define KVM_PFN_MMIO_FAULT (KVM_PFN_ERR_MASK + 9) + +/* MMIO space pfn indicates that the gfn is from IO space, */ +/* but not registered on host (by VM launcher) */ +static inline bool is_mmio_space_pfn(kvm_pfn_t pfn) +{ + return pfn == KVM_PFN_MMIO_FAULT; +} + +typedef struct kvm_arch_exception { + bool error_code_valid; /* PFERR_* flags is valid */ + u32 error_code; /* PFERR_* flags */ + u64 address; /* page fault gpa */ + u64 ip; /* IP to inject trap */ +} kvm_arch_exception_t; + +/* FIXME: following emulation is for x86 arch, so it need be updated */ +/* for e2k arch */ +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ + EMULATE_FAIL, /* can't emulate this instruction */ +}; + +#define EMULTYPE_NO_DECODE (1 << 0) +#define EMULTYPE_TRAP_UD (1 << 1) +#define EMULTYPE_SKIP (1 << 2) +#define EMULTYPE_RETRY (1 << 3) +#define EMULTYPE_NO_REEXECUTE (1 << 4) + +static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, + kvm_arch_exception_t *exception) +{ + return gpa; +} + +/* FIXME: x86 can support 2 addresses spaces at role.smm */ +/* (in system management mode */ +#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0) + +typedef struct kvm_memory_slot kvm_memory_slot_t; + +static inline bool gfn_is_from_mmio_space(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + gpa_t gpa = gfn_to_gpa(gfn); + + if (gpa >= VGA_VRAM_PHYS_BASE && + gpa < VGA_VRAM_PHYS_BASE + VGA_VRAM_SIZE) { + /* address is from VGA VRAM space */ + return true; + } else if (gpa >= sic_get_io_area_base() && + gpa < sic_get_io_area_base() + + sic_get_io_area_max_size()) { + /* address is from IO area space */ + return true; + } + return false; +} + +/* FIXME: follow define is from x86 arch */ +#define PF_VECTOR 14 + +static inline bool is_pae(struct kvm_vcpu *vcpu) +{ + if (is_ss(vcpu)) + return vcpu->arch.mmu.is_spae; + return true; +} + +static inline bool is_pse(struct kvm_vcpu *vcpu) +{ + if (is_ss(vcpu)) + return vcpu->arch.mmu.is_pse; + return false; +} + +static inline bool is_write_protection(struct kvm_vcpu *vcpu) +{ + if (is_ss(vcpu)) + pr_err("FIXME: %s() is not yet implemented\n", __func__); + return false; +} + +static inline void kvm_setup_paging_mode(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.is_pv && !vcpu->arch.is_hv) { + /* it is software full paravirtulization mode, */ +#ifdef CONFIG_KVM_NONPAGING_ENABLE + /* guest should be booted in nonpaging mode */ + reset_paging_flag(vcpu); +#else /* ! CONFIG_KVM_NONPAGING_ENABLE */ + /* nonpaging mode is not supported or should not be used */ + set_paging_flag(vcpu); +#endif /* CONFIG_KVM_NONPAGING_ENABLE */ + return; + } + +#ifdef CONFIG_KVM_NONPAGING_ENABLE + if (!vcpu->arch.is_hv) { + /* hardware has not virtualization support, */ + /* only software virtualization can be used (see above) */ + if (!vcpu->arch.is_pv) { + pr_err("%s(): hardware has not virtualization support, " + "only paravirtualized guests can be run on " + "this hypervisor\n", + __func__); + } + set_paging_flag(vcpu); + return; + } + reset_paging_flag(vcpu); + return; +#else /* ! CONFIG_KVM_NONPAGING_ENABLE */ + /* nonpaging mode is not supported or should not be used */ + if (!vcpu->arch.is_pv) { + pr_err("%s(): nonpaging mode is not supported or should " + "not be used on this hypervisor\n", + __func__); + } + set_paging_flag(vcpu); + return; +#endif /* CONFIG_KVM_NONPAGING_ENABLE */ +} + +static inline void +vcpu_write_SH_MMU_CR_reg(struct kvm_vcpu *vcpu, mmu_reg_t mmu_cr) +{ + if (vcpu->arch.is_hv) { + write_SH_MMU_CR_reg(mmu_cr); + } else if (vcpu->arch.is_pv) { + kvm_write_pv_vcpu_MMU_CR_reg(vcpu, mmu_cr); + } else { + KVM_BUG_ON(true); + } +} + +static inline bool is_long_mode(struct kvm_vcpu *vcpu) +{ + if (!is_ss(vcpu)) + return true; + pr_err("FIXME: %s() is not yet implemented\n", __func__); + return false; +} + +static inline bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) +{ + pr_err_once("FIXME: %s() is not yet implemented\n", __func__); + return false; +} + +static inline bool is_cpuid_PSE36(void) +{ + pr_err("FIXME: %s() is not yet implemented\n", __func__); + return false; +} + +static inline int pse36_gfn_delta(pgprotval_t pte) +{ + pr_err("FIXME: %s() is not yet implemented\n", __func__); + return 0; +} + +static inline pgprotval_t get_vcpu_u2_pptb(struct kvm_vcpu *vcpu) +{ + if (is_ss(vcpu)) + return vcpu->arch.mmu.u2_pptb & PAGE_MASK; + return E2K_INVALID_PAGE; +} +static inline e2k_addr_t get_vcpu_mpt_b(struct kvm_vcpu *vcpu) +{ + if (is_ss(vcpu)) + return vcpu->arch.mmu.mpt_b; + return E2K_INVALID_PAGE; +} +static inline void set_vcpu_u2_pptb(struct kvm_vcpu *vcpu, pgprotval_t base) +{ + if (is_ss(vcpu)) + vcpu->arch.mmu.u2_pptb = base; +} +static inline void set_vcpu_mpt_b(struct kvm_vcpu *vcpu, e2k_addr_t base) +{ + if (is_ss(vcpu)) + vcpu->arch.mmu.mpt_b = base; +} + +static inline pgprotval_t get_vcpu_pdpte(struct kvm_vcpu *vcpu, int no) +{ + if (is_ss(vcpu)) + return vcpu->arch.mmu.pdptes[no]; + return 0; +} + +/* + * Return zero if the access does not fault; return the page fault error code + * if the access faults. + */ +static inline u32 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + unsigned pte_access, unsigned pte_pkey, + unsigned pfec) +{ + u32 errcode = PFERR_PRESENT_MASK; + + if (is_ss(vcpu)) + pr_err("FIXME: %s() is not implemented\n", __func__); + + /* It need investigate how PFERR_NOT_PRESENT_MASK fault to lead to */ + /* present pte at guest PT and not present at shadow PT */ + KVM_BUG_ON(pfec & PFERR_PT_FAULT_MASK); + + if ((pfec & (PFERR_WRITE_MASK | PFERR_WAIT_LOCK_MASK)) && + !(pte_access & ACC_WRITE_MASK)) + /* try write to write protected page (by pte) */ + return errcode | PFERR_WRITE_MASK; + if ((pfec & PFERR_USER_MASK) && !(pfec & PFERR_FAPB_MASK) && + !(pte_access & ACC_WRITE_MASK)) + /* try access from user to privileged page */ + return errcode | PFERR_USER_MASK; + if ((pfec & (PFERR_INSTR_PROT_MASK | PFERR_INSTR_FAULT_MASK)) && + !(pte_access & ACC_EXEC_MASK)) + /* try execute not executable page */ + return errcode | PFERR_INSTR_PROT_MASK; + + return 0; +} + +/* FIXME: it need implementore priecision flush of various MMU TLBs */ +/* instead of flush all TLBs */ +static inline void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) +{ +/* ++vcpu->stat.tlb_flush; */ + __flush_tlb_all(); + __flush_icache_all(); +} + +typedef struct kvm_spt_entry { + pgprot_t *sptep; + pgprot_t spte; +} kvm_spt_entry_t; +typedef struct kvm_shadow_trans { + e2k_addr_t addr; + kvm_spt_entry_t pt_entries[E2K_PT_LEVELS_NUM + 1]; + int last_level; +} kvm_shadow_trans_t; + +typedef struct kvm_shadow_walk_iterator { + e2k_addr_t addr; + hpa_t shadow_addr; + pgprot_t *sptep; + const pt_struct_t *pt_struct; + const pt_level_t *pt_level; + int level; + unsigned index; +} kvm_shadow_walk_iterator_t; + +void shadow_walk_init(kvm_shadow_walk_iterator_t *iterator, + struct kvm_vcpu *vcpu, u64 addr); +bool shadow_walk_okay(kvm_shadow_walk_iterator_t *iterator); +void __shadow_walk_next(kvm_shadow_walk_iterator_t *iterator, pgprot_t spte); +void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator); + +#define for_each_shadow_entry(_vcpu, _addr, _walker) \ + for (shadow_walk_init(&(_walker), _vcpu, _addr); \ + shadow_walk_okay(&(_walker)); \ + shadow_walk_next(&(_walker))) + +#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ + for (shadow_walk_init(&(_walker), _vcpu, _addr); \ + shadow_walk_okay(&(_walker)) && \ + ({ spte = mmu_spte_get_lockless( \ + _walker.sptep); \ + true; \ + }); \ + __shadow_walk_next(&(_walker), spte)) + +extern int kvm_get_va_spt_translation(struct kvm_vcpu *vcpu, e2k_addr_t address, + mmu_spt_trans_t __user *user_trans_info); + +/* + * Return values of handle_mmio_page_fault: + * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction + * directly. + * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page + * fault path update the mmio spte. + * RET_MMIO_PF_RETRY: let CPU fault again on the address. + * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). + */ +typedef enum ret_mmio_pf { + RET_MMIO_PF_EMULATE = 1, + RET_MMIO_PF_INVALID = 2, + RET_MMIO_PF_RETRY = 0, + RET_MMIO_PF_BUG = -1, +} ret_mmio_pf_t; + +static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, + gva_t gva, gfn_t gfn, unsigned access) +{ + vcpu->arch.mmio_gva = gva & PAGE_MASK; + vcpu->arch.access = access; + vcpu->arch.mmio_gfn = gfn; + vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; +} + +static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; +} + +/* + * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we + * clear all mmio cache info. + */ +#define MMIO_GVA_ANY (~(gva_t)0) + +static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) + return; + + vcpu->arch.mmio_gva = 0; +} + +static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) +{ + if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && + vcpu->arch.mmio_gva == (gva & PAGE_MASK)) + return true; + + return false; +} + +static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && + vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) + return true; + + return false; +} + +extern bool tdp_enabled; + +extern int kvm_mmu_load(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + unsigned flags); +extern void kvm_mmu_unload(struct kvm_vcpu *vcpu, unsigned flags); +extern void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp); + +extern void kvm_setup_mmu_intc_mode(struct kvm_vcpu *vcpu); + +extern void kvm_mmu_set_mmio_spte_mask(pgprotval_t mmio_mask); + +extern void direct_unmap_prefixed_mmio_gfn(struct kvm *kvm, gfn_t gfn); + +extern int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, gfn_t *gfn, + bool direct); +extern void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); +extern void kvm_init_shadow_tdp_mmu(struct kvm_vcpu *vcpu, bool execonly); +extern pgprot_t *kvm_hva_to_pte(e2k_addr_t address); +extern int e2k_shadow_pt_protection_fault(struct kvm_vcpu *vcpu, + struct gmm_struct *gmm, gpa_t addr, kvm_mmu_page_t *sp); +extern int kvm_prefetch_mmu_area(struct kvm_vcpu *vcpu, + gva_t start, gva_t end, u32 error_code); +extern gpa_t e2k_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, + kvm_arch_exception_t *exception); +extern void guest_pv_vcpu_state_to_paging(struct kvm_vcpu *vcpu); +extern gpa_t kvm_vcpu_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, u32 access, + kvm_arch_exception_t *exception); +extern void kvm_mmu_flush_gva(struct kvm_vcpu *vcpu, gva_t gva); +extern void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu, unsigned flags); + +static inline int is_writable_pte(pgprot_t pte) +{ + return pgprot_val(pte) & PT_WRITABLE_MASK; +} + +void kvm_mmu_slot_remove_write_access(struct kvm *kvm, + struct kvm_memory_slot *memslot); +void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *memslot); +unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); + +void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); +int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); + +void kvm_mmu_gfn_disallow_lpage(struct kvm *kvm, + kvm_memory_slot_t *slot, gfn_t gfn); +void kvm_mmu_gfn_allow_lpage(struct kvm *kvm, + kvm_memory_slot_t *slot, gfn_t gfn); +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, + kvm_memory_slot_t *slot, u64 gfn); +int kvm_sync_init_shadow_pt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gpa_t phys_ptb, gva_t virt_ptb, + gpa_t os_phys_ptb, gva_t os_virt_ptb, gva_t os_virt_base); + +extern void kvm_vmlpt_kernel_spte_set(struct kvm *kvm, + pgprot_t *spte, pgprot_t *root); +extern void kvm_vmlpt_user_spte_set(struct kvm *kvm, + pgprot_t *spte, pgprot_t *root); + +extern void kvm_update_guest_stacks_registers(struct kvm_vcpu *vcpu, + guest_hw_stack_t *stack_regs); +extern void kvm_set_mmu_guest_pt(struct kvm_vcpu *vcpu); +extern void kvm_set_mmu_guest_u_pt(struct kvm_vcpu *vcpu); +extern void kvm_switch_mmu_guest_u_pt(struct kvm_vcpu *vcpu); +extern void kvm_setup_mmu_spt_context(struct kvm_vcpu *vcpu); +extern void kvm_setup_mmu_tdp_context(struct kvm_vcpu *vcpu); +extern void kvm_setup_mmu_tdp_u_pt_context(struct kvm_vcpu *vcpu); + +extern int mmu_pv_create_tdp_user_pt(struct kvm_vcpu *vcpu, gpa_t u_phys_ptb); + +extern void mmu_pv_setup_shadow_u_pptb(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm); +extern void kvm_setup_shadow_u_pptb(struct kvm_vcpu *vcpu); +extern void kvm_setup_shadow_os_pptb(struct kvm_vcpu *vcpu); +extern int kvm_switch_shadow_u_pptb(struct kvm_vcpu *vcpu, gpa_t u_pptb, + hpa_t *u_root); +extern int kvm_switch_shadow_os_pptb(struct kvm_vcpu *vcpu, gpa_t os_pptb, + hpa_t *os_root); +extern int kvm_prepare_shadow_user_pt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gpa_t u_phys_ptb); +extern void kvm_dump_shadow_os_pt_regs(struct kvm_vcpu *vcpu); +extern void copy_guest_kernel_root_range(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm, struct kvm_mmu_page *sp, pgprot_t *src_root); +extern void mmu_zap_linked_children(struct kvm *kvm, + struct kvm_mmu_page *parent); + +static inline void +set_guest_kernel_pgd_range(pgd_t *dst_pgd, pgd_t pgd_to_set) +{ + set_pgd_range(dst_pgd, pgd_to_set, GUEST_KERNEL_PGD_PTRS_START, + GUEST_KERNEL_PGD_PTRS_END); +} + +static inline void +kvm_prepare_shadow_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + hpa_t root, hpa_t gp_root, gva_t vptb) +{ + pgprot_t *new_root; + int pt_index; + + KVM_BUG_ON(!VALID_PAGE(root)); + + /* copy kernel part of root page table entries to enable host */ + /* traps and hypercalls on guest */ + + if (likely(!MMU_IS_SEPARATE_PT() && !vcpu->arch.is_hv)) { + kvm_mmu_page_t *sp; + pgd_t *new_pgd, *init_pgd; + pgprot_t *src_root; + unsigned long flags; + + sp = page_header(root); + new_pgd = (pgd_t *)__va(root); + init_pgd = kvm_mmu_get_init_gmm_root(vcpu->kvm); + + KVM_BUG_ON(vcpu->cpu < 0); + copy_kernel_pgd_range(new_pgd, cpu_kernel_root_pt); + + src_root = (pgprot_t *)init_pgd; + if (init_pgd != NULL) + copy_guest_kernel_root_range(vcpu, gmm, sp, src_root); + + /* copy MMU context of the guest nonpaging PT on host */ + spin_lock(&vcpu->kvm->mmu_lock); + raw_all_irq_save(flags); + if (!IS_E2K_INVALID_PAGE(gp_root)) { + KVM_BUG_ON(!pv_vcpu_is_init_gmm(vcpu, + pv_vcpu_get_gmm(vcpu))); + } else { + KVM_BUG_ON(is_paging(vcpu) && + pv_vcpu_is_init_gmm(vcpu, + pv_vcpu_get_gmm(vcpu))); + get_new_mmu_pid(pv_vcpu_get_gmm_context(vcpu), + smp_processor_id()); + } + sp->root_flags.has_host_pgds = 1; + sp->host_synced = true; + if (init_pgd != NULL) + sp->guest_kernel_synced = true; + raw_all_irq_restore(flags); + spin_unlock(&vcpu->kvm->mmu_lock); + pt_index = pgd_index(MMU_UNITED_KERNEL_VPTB); + } else { + pt_index = pgd_index(vptb); + } + + /* One PGD entry is the VPTB self-map. */ + new_root = (pgprot_t *)__va(root); + kvm_vmlpt_kernel_spte_set(vcpu->kvm, &new_root[pt_index], new_root); +} + +static inline void +mmu_pv_prepare_spt_u_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, hpa_t root) +{ + pgd_t *new_pgd; + pgprot_t *new_root, *src_root; + kvm_mmu_page_t *sp; + unsigned long flags; + int pt_index; + + KVM_BUG_ON(!VALID_PAGE(root)); + + /* copy kernel part of root page table entries to enable host */ + /* traps and hypercalls on guest */ + new_pgd = (pgd_t *)__va(root); + KVM_BUG_ON(vcpu->cpu < 0); + copy_kernel_pgd_range(new_pgd, cpu_kernel_root_pt); + + src_root = (pgprot_t *)kvm_mmu_get_init_gmm_root(vcpu->kvm); + KVM_BUG_ON(src_root == NULL); + sp = page_header(root); + copy_guest_kernel_root_range(vcpu, gmm, sp, src_root); + + /* create new host MMU context for guest user process */ + KVM_BUG_ON(pv_vcpu_is_init_gmm(vcpu, gmm)); + spin_lock(&vcpu->kvm->mmu_lock); + raw_all_irq_save(flags); + get_new_mmu_pid(&gmm->context, smp_processor_id()); + sp->root_flags.has_host_pgds = 1; + sp->host_synced = true; + sp->guest_kernel_synced = true; + raw_all_irq_restore(flags); + spin_unlock(&vcpu->kvm->mmu_lock); + + /* One PGD entry is the VPTB self-map. */ + pt_index = pgd_index(MMU_UNITED_KERNEL_VPTB); + new_root = (pgprot_t *)__va(root); + kvm_vmlpt_kernel_spte_set(vcpu->kvm, &new_root[pt_index], new_root); +} + +static inline void +kvm_clear_shadow_root(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + pgd_t *pgd; + int root_pt_index; + pgd_t zero; + + if (unlikely(MMU_IS_SEPARATE_PT() || kvm->arch.is_hv)) + return; + + pgd = (pgd_t *)sp->spt; + pgd_val(zero) = 0; + + if (likely(sp->root_flags.has_host_pgds)) { + /* clear host kernel part of root page table entries */ + set_kernel_pgd_range(pgd, zero); + } + + if (unlikely(sp->root_flags.has_guest_pgds)) { + /* clear guest kernel part of root page table entries */ + set_guest_kernel_pgd_range(pgd, zero); + } else { + mmu_zap_linked_children(kvm, sp); + } + + /* One PGD entry is the VPTB self-map. */ + root_pt_index = pgd_index(MMU_UNITED_KERNEL_VPTB); + pgd[root_pt_index] = zero; +} + +static inline int +kvm_switch_tdp_u_pptb(struct kvm_vcpu *vcpu, gpa_t u_pptb) +{ + return 0; /* nothing now to do */ +} + +static inline int +kvm_switch_tdp_os_pptb(struct kvm_vcpu *vcpu, gpa_t os_pptb) +{ + return 0; /* nothing now to do */ +} + +static inline void +kvm_set_vcpu_the_pt_context(struct kvm_vcpu *vcpu, unsigned flags) +{ + vcpu->arch.mmu.set_vcpu_pt_context(vcpu, flags); +} + +static inline void kvm_set_vcpu_pt_context(struct kvm_vcpu *vcpu) +{ + kvm_set_vcpu_the_pt_context(vcpu, + OS_ROOT_PT_FLAG | U_ROOT_PT_FLAG | GP_ROOT_PT_FLAG); +} + +static inline void kvm_set_vcpu_u_pt_context(struct kvm_vcpu *vcpu) +{ + kvm_set_vcpu_the_pt_context(vcpu, U_ROOT_PT_FLAG); +} + +static inline void kvm_set_vcpu_os_pt_context(struct kvm_vcpu *vcpu) +{ + kvm_set_vcpu_the_pt_context(vcpu, OS_ROOT_PT_FLAG); +} + +static inline void kvm_set_vcpu_gp_pt_context(struct kvm_vcpu *vcpu) +{ + kvm_set_vcpu_the_pt_context(vcpu, GP_ROOT_PT_FLAG); +} + +static inline int get_vcpu_mu_events_num(struct kvm_vcpu *vcpu) +{ + if (likely(vcpu->arch.is_hv || vcpu->arch.is_pv)) { + return vcpu->arch.intc_ctxt.mu_num; + } else { + KVM_BUG_ON(true); + } + return 0; +} + +static inline bool pv_vcpu_is_init_root_hpa(struct kvm_vcpu *vcpu, hpa_t root) +{ + gmm_struct_t *init_gmm = pv_vcpu_get_init_gmm(vcpu); + + return root == init_gmm->root_hpa; +} + +static inline void set_vcpu_mu_events_num(struct kvm_vcpu *vcpu, int events_num) +{ + KVM_BUG_ON(events_num <= vcpu->arch.intc_ctxt.mu_num); + if (likely(vcpu->arch.is_hv || vcpu->arch.is_pv)) { + vcpu->arch.intc_ctxt.mu_num = events_num; + } else { + KVM_BUG_ON(true); + } +} + +static inline void set_vcpu_mu_cur_event_no(struct kvm_vcpu *vcpu, int event_no) +{ + int events_num = get_vcpu_mu_events_num(vcpu); + + KVM_BUG_ON(events_num >= 0 && event_no > events_num); + if (likely(vcpu->arch.is_hv || vcpu->arch.is_pv)) { + vcpu->arch.intc_ctxt.cur_mu = event_no; + } else { + KVM_BUG_ON(true); + } +} + +static inline intc_mu_state_t *get_intc_mu_state(struct kvm_vcpu *vcpu) +{ + if (likely(vcpu->arch.is_hv || vcpu->arch.is_pv)) { + int evn_no = vcpu->arch.intc_ctxt.cur_mu; + return &vcpu->arch.intc_ctxt.mu_state[evn_no]; + } else { + KVM_BUG_ON(true); + } + return NULL; +} + +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +static inline void kvm_clear_intc_mu_state(struct kvm_vcpu *vcpu) +{ + intc_mu_state_t *mu_state = vcpu->arch.intc_ctxt.mu_state; + int mu_num; + int no; + + mu_num = get_vcpu_mu_events_num(vcpu); + + for (no = 0; no < mu_num; no++) { + intc_mu_state_t *entry; + + entry = &mu_state[no]; + entry->notifier_seq = 0; + entry->pfres = PFRES_NO_ERR; + entry->may_be_retried = false; + entry->ignore_notifier = false; + } +} +#else +static inline void kvm_clear_intc_mu_state(struct kvm_vcpu *vcpu) +{ +} +#endif /* CONFIG_MMU_NOTIFIER) && KVM_ARCH_WANT_MMU_NOTIFIER */ + +#ifdef CONFIG_KVM_HV_MMU +static inline int kvm_arch_init_vm_mmu(struct kvm *kvm) +{ + kvm->arch.nonp_root_hpa = E2K_INVALID_PAGE; + + INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); + INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); + INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); + + return 0; +} + +static inline void kvm_init_sw_ctxt(struct kvm_vcpu *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + vcpu_boot_stack_t *boot_stacks = &vcpu->arch.boot_stacks; + e2k_stacks_t *boot_regs = &boot_stacks->regs.stacks; + e2k_mem_crs_t *crs = &boot_stacks->regs.crs; + + /* set to initial state some fields */ + sw_ctxt->saved.valid = false; + + /* set pointers of guest boot-time local stacks to initial state */ + sw_ctxt->sbr.SBR_reg = boot_regs->top; + sw_ctxt->usd_lo = boot_regs->usd_lo; + sw_ctxt->usd_hi = boot_regs->usd_hi; + if (vcpu->arch.is_pv) { + sw_ctxt->crs.cr0_lo = crs->cr0_lo; + sw_ctxt->crs.cr0_hi = crs->cr0_hi; + sw_ctxt->crs.cr1_lo = crs->cr1_lo; + sw_ctxt->crs.cr1_hi = crs->cr1_hi; + } + + /* set pointer to CUTD area */ + if (vcpu->arch.is_pv) { + sw_ctxt->cutd = kvm_get_guest_vcpu_CUTD(vcpu); + } else { + sw_ctxt->cutd.CUTD_reg = 0; + } + + sw_ctxt->trap_count = 0; + + /* Initialize CLW context */ + sw_ctxt->us_cl_d = 1; + sw_ctxt->us_cl_b = 0; + sw_ctxt->us_cl_up = 0; + sw_ctxt->us_cl_m0 = 0; + sw_ctxt->us_cl_m1 = 0; + sw_ctxt->us_cl_m2 = 0; + sw_ctxt->us_cl_m3 = 0; +} +extern int kvm_hv_setup_nonpaging_mode(struct kvm_vcpu *vcpu); +extern int write_to_guest_pt_phys(struct kvm_vcpu *vcpu, gpa_t gpa, + const pgprot_t *gpte, int bytes); +extern int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); +extern void mmu_init_nonpaging_intc(struct kvm_vcpu *vcpu); +#else /* ! CONFIG_KVM_HV_MMU */ +static inline int kvm_arch_init_vm_mmu(struct kvm *kvm) +{ + return 0; +} +static inline int kvm_hv_setup_nonpaging_mode(struct kvm_vcpu *vcpu) +{ + KVM_BUG_ON(!is_paging(vcpu)); + /* guest physical addresses are translated by hypervisor */ + /* OS_* / U_* PTs */ + vcpu->arch.mmu.os_pptb = (pgprotval_t)NATIVE_READ_MMU_OS_PPTB_REG(); + vcpu->arch.mmu.os_vptb = NATIVE_READ_MMU_OS_VPTB_REG(); + vcpu->arch.mmu.u_pptb = + (pgprotval_t)__pa(kvm_mmu_get_init_gmm_root(vcpu->kvm)); + vcpu->arch.mmu.u_vptb = NATIVE_READ_MMU_U_VPTB_REG(); + vcpu->arch.mmu.sh_os_vab = NATIVE_READ_MMU_OS_VAB_REG(); + vcpu->arch.mmu.tc_gpa = NATIVE_READ_MMU_TRAP_POINT(); + return 0; +} +static inline void kvm_init_sw_ctxt(struct kvm_vcpu *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + + /* In this case guest PT are the same as host PTs */ + sw_ctxt->sh_u_pptb = vcpu->arch.mmu.u_pptb; + sw_ctxt->sh_u_vptb = vcpu->arch.mmu.u_vptb; + sw_ctxt->tc_hpa = vcpu->arch.mmu.tc_gpa; +} +#endif /* CONFIG_KVM_HV_MMU */ + +#ifdef CONFIG_KVM_SHADOW_PT_ENABLE + +extern int kvm_mmu_module_init(void); +extern void kvm_mmu_module_exit(void); + +extern void kvm_mmu_destroy(struct kvm_vcpu *vcpu); +extern int kvm_mmu_create(struct kvm_vcpu *vcpu); +extern void kvm_mmu_setup(struct kvm_vcpu *vcpu); +extern void kvm_mmu_init_vm(struct kvm *kvm); +extern void kvm_mmu_uninit_vm(struct kvm *kvm); +extern int kvm_pv_switch_guest_mm(struct kvm_vcpu *vcpu, + int gpid_nr, int gmmid_nr, gpa_t u_phys_ptb); + +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); +void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); + +extern int kvm_pv_vcpu_mmu_state(struct kvm_vcpu *vcpu, + vcpu_gmmu_info_t __user *mmu_info); +extern int kvm_create_shadow_user_pt(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm, gpa_t u_phys_ptb); +extern hpa_t mmu_pv_switch_spt_u_pptb(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm, gpa_t u_phys_ptb); +extern int mmu_pv_switch_tdp_u_pptb(struct kvm_vcpu *vcpu, + int pid, gpa_t u_phys_ptb); + +extern int kvm_hv_setup_tdp_paging(struct kvm_vcpu *vcpu); +extern int kvm_hv_setup_shadow_paging(struct kvm_vcpu *vcpu, gmm_struct_t *gmm); +extern void mmu_get_spt_roots(struct kvm_vcpu *vcpu, unsigned flags, + hpa_t *os_root_p, hpa_t *u_root_p, hpa_t *gp_root_p); +extern void mmu_check_invalid_roots(struct kvm_vcpu *vcpu, bool invalid, + unsigned flags); + +extern int kvm_switch_to_tdp_paging(struct kvm_vcpu *vcpu, + gpa_t u_phys_ptb, gva_t u_virt_ptb, + gpa_t os_phys_ptb, gva_t os_virt_ptb, gva_t os_virt_base); +extern gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, + u32 access, kvm_arch_exception_t *exception); +extern pgprot_t nonpaging_gpa_to_pte(struct kvm_vcpu *vcpu, gva_t addr); + +static inline void +pv_vcpu_switch_to_init_spt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + gmm_struct_t *init_gmm = pv_vcpu_get_init_gmm(vcpu); + pgd_t *root; + + KVM_BUG_ON(gmm == init_gmm); + root = kvm_mmu_get_gmm_root(init_gmm); + kvm_set_space_type_spt_u_root(vcpu, (hpa_t)__pa(root)); + if (likely(!is_sep_virt_spaces(vcpu))) { + kvm_set_space_type_guest_u_root(vcpu, init_gmm->u_pptb); + } else { + kvm_set_space_type_guest_os_root(vcpu, init_gmm->os_pptb); + } + kvm_set_vcpu_os_pt_context(vcpu); +} + +static inline void kvm_mmu_unload_gmm_root(struct kvm_vcpu *vcpu) +{ + gmm_struct_t *cur_gmm; + hpa_t u_root; + + cur_gmm = pv_vcpu_get_gmm(vcpu); + if (pv_vcpu_is_init_gmm(vcpu, cur_gmm)) { + /* current gmm is init_gmm, cannot be unliaded */ + return; + } + mmu_get_spt_roots(vcpu, U_ROOT_PT_FLAG, NULL, &u_root, NULL); + KVM_BUG_ON(!VALID_PAGE(u_root)); + if (unlikely(pv_vcpu_is_init_root_hpa(vcpu, u_root))) { + /* current root PT is guest kernel init PT, */ + /* cannot be unloaded */ + ; + } else { + kvm_mmu_unload(vcpu, U_ROOT_PT_FLAG); + } + pv_vcpu_clear_gmm(vcpu); + pv_vcpu_set_active_gmm(vcpu, pv_vcpu_get_init_gmm(vcpu)); + if (likely(!pv_vcpu_is_init_root_hpa(vcpu, u_root))) { + pv_vcpu_switch_to_init_spt(vcpu, cur_gmm); + } +} + +static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) +{ + if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) + return kvm->arch.n_max_mmu_pages - + kvm->arch.n_used_mmu_pages; + + return 0; +} + +static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + unsigned flags) +{ + mmu_check_invalid_roots(vcpu, true /* invalid ? */, flags); + + return kvm_mmu_load(vcpu, gmm, flags); +} + +static inline int kvm_mmu_populate_area(struct kvm *kvm, + e2k_addr_t area_start, e2k_addr_t area_end) +{ + struct kvm_vcpu *vcpu; + int ret; + + if (!kvm->arch.is_pv) + return 0; + + /* guest is software virtualized KVM, so it need */ + /* populate locked area on shadow PTs */ + vcpu = current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL); + if (unlikely(!vcpu->arch.mmu.shadow_pt_on)) + /* shadow PT is not yet enabled to use */ + return 0; + ret = kvm_prefetch_mmu_area(vcpu, area_start, area_end, + PFERR_PRESENT_MASK | PFERR_WRITE_MASK); + return ret; +} + +extern int kvm_pv_mmu_ptep_get_and_clear(struct kvm_vcpu *vcpu, gpa_t gpa, + void __user *old_gpte, int as_valid); +extern int kvm_pv_mmu_pt_atomic_update(struct kvm_vcpu *vcpu, int gmmid_nr, + gpa_t gpa, void __user *old_gpte, + pt_atomic_op_t atomic_op, + unsigned long prot_mask); +extern void mmu_free_spt_root(struct kvm_vcpu *vcpu, hpa_t root_hpa); +extern void mmu_release_spt_root(struct kvm_vcpu *vcpu, hpa_t root_hpa); +extern int reexecute_load_and_wait_page_fault(struct kvm_vcpu *vcpu, + trap_cellar_t *tcellar, gfn_t gfn, pt_regs_t *regs); +extern void release_gmm_root_pt(struct kvm_vcpu *vcpu, gmm_struct_t *gmm); + +#else /* ! CONFIG_KVM_SHADOW_PT_ENABLE */ + +static inline int kvm_mmu_module_init(void) +{ + return 0; +} +static inline void kvm_mmu_module_exit(void) +{ + return; +} + +static inline void kvm_mmu_destroy(struct kvm_vcpu *vcpu) +{ + return; +} +static inline int kvm_mmu_create(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.gva_to_gpa = kvm_vcpu_gva_to_gpa; + kvm_setup_paging_mode(vcpu); + return 0; +} +static inline void kvm_mmu_setup(struct kvm_vcpu *vcpu) +{ + return; +} +static inline void kvm_mmu_init_vm(struct kvm *kvm) +{ + return; +} +static inline void kvm_mmu_uninit_vm(struct kvm *kvm) +{ + return; +} +static inline int +kvm_pv_switch_guest_mm(struct kvm_vcpu *vcpu, + int gpid_nr, int gmmid_nr, gpa_t u_phys_ptb) +{ + KVM_BUG_ON(true); + return 0; +} + +static inline void +kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) +{ + return; +} +static inline void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) +{ + return; +} + +static inline pgprot_t +nonpaging_gpa_to_pte(struct kvm_vcpu *vcpu, gva_t addr) +{ + KVM_BUG_ON(!is_paging(vcpu)); + return __pgprot(0); +} +static inline gpa_t +nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, + u32 access, kvm_arch_exception_t *exception) +{ + return vaddr; +} + +static inline int switch_to_shadow_pt(struct kvm_vcpu *vcpu, + e2k_addr_t phys_ptb, e2k_addr_t virt_ptb) +{ + return 0; +} +static inline int +kvm_switch_to_tdp_paging(struct kvm_vcpu *vcpu, + gpa_t u_phys_ptb, gva_t u_virt_ptb, + gpa_t os_phys_ptb, gva_t os_virt_ptb, gva_t os_virt_base) +{ + KVM_BUG_ON(true); + return 0; +} +static inline int kvm_mmu_pv_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, trap_cellar_t *tcellar, + bool user_mode) +{ + /* guest is user of host, so host should handle page fault */ + return -1; +} + +static inline int kvm_mmu_populate_area(struct kvm *kvm, + e2k_addr_t start, e2k_addr_t end) +{ + return 0; +} + +static inline int +kvm_pv_mmu_ptep_get_and_clear(struct kvm_vcpu *vcpu, gpa_t gpa, + void __user *old_gpte, int as_valid) +{ + pr_warn_once("%s(): the hypervisor does not support guest MMU based " + "on Shadow PTs, so this call/hypercall cannot be done\n", + __func__); + return -ENOTTY; +} +static inline int +kvm_pv_mmu_pt_atomic_op(struct kvm_vcpu *vcpu, gpa_t gpa, void __user *old_gpte, + pt_atomic_op_t atomic_op, unsigned long prot_mask) +{ + pr_warn_once("%s(): the hypervisor does not support guest MMU based " + "on Shadow PTs, so this call/hypercall cannot be done\n", + __func__); + return -ENOTTY; +} +#endif /* CONFIG_KVM_SHADOW_PT_ENABLE */ + +#define MMU_GUEST_PHYS_PT_VPTB MMU_SEPARATE_KERNEL_VPTB +#define MMU_GUEST_OS_PT_VPTB MMU_SEPARATE_KERNEL_VPTB +#define MMU_GUEST_USER_PT_VPTB MMU_SEPARATE_USER_VPTB +#define MMU_GUEST_OS_VAB MMU_ADDR_TO_VAB(GUEST_PAGE_OFFSET) + +extern int kvm_pv_activate_guest_mm(struct kvm_vcpu *vcpu, + gmm_struct_t *new_gmm, gpa_t u_phys_ptb); +extern int kvm_pv_prepare_guest_mm(struct kvm_vcpu *vcpu, + gmm_struct_t *new_gmm, gpa_t u_phys_ptb); + +static inline bool kvm_is_shadow_pt_enable(struct kvm *kvm) +{ + return kvm->arch.shadow_pt_enable; +} +static inline void kvm_shadow_pt_disable(struct kvm *kvm) +{ + kvm->arch.shadow_pt_enable = false; +} + +static inline bool kvm_is_phys_pt_enable(struct kvm *kvm) +{ + return kvm->arch.phys_pt_enable; +} +static inline void kvm_phys_pt_disable(struct kvm *kvm) +{ + kvm->arch.phys_pt_enable = false; +} + +static inline bool kvm_is_tdp_enable(struct kvm *kvm) +{ + return kvm->arch.tdp_enable; +} +static inline void kvm_tdp_disable(struct kvm *kvm) +{ + kvm->arch.tdp_enable = false; +} + +static inline int kvm_disable_tdp_mode(struct kvm *kvm) +{ + if (!kvm_is_tdp_enable(kvm)) + return 0; + + if (!kvm_is_shadow_pt_enable(kvm)) { + pr_err("%s(): TDP mode should be disabled, but shadow PT mode " + "is too disabled\n", + __func__); + return -EINVAL; + } + kvm_tdp_disable(kvm); + return 0; +} + +/* + * Guest "physical" memory layout + */ +static inline bool kvm_is_ram_gfn(gfn_t gfn) +{ + e2k_addr_t phys_addr = gfn << PAGE_SHIFT; + + if (phys_addr >= GUEST_RAM_PHYS_BASE && + phys_addr < GUEST_RAM_PHYS_BASE + + GUEST_MAX_RAM_SIZE) + return true; + else + return false; +} +static inline bool kvm_is_vcpu_vram_gfn(gfn_t gfn) +{ + e2k_addr_t phys_addr = gfn << PAGE_SHIFT; + + if (phys_addr >= GUEST_VCPU_VRAM_PHYS_BASE && + phys_addr < GUEST_VCPU_VRAM_PHYS_BASE + + GUEST_MAX_VCPU_VRAM_SIZE) + return true; + else + return false; +} +static inline bool kvm_is_io_vram_gfn(gfn_t gfn) +{ + e2k_addr_t phys_addr = gfn << PAGE_SHIFT; + + if (phys_addr >= GUEST_IO_VRAM_PHYS_BASE && + phys_addr < GUEST_IO_VRAM_PHYS_BASE + + GUEST_IO_VRAM_SIZE) + return true; + else + return false; +} + +extern int kvm_init_vcpu_root_pt(struct kvm_vcpu *vcpu); +extern void kvm_free_vcpu_root_pt(void); + +extern void kvm_init_mmu_state(struct kvm_vcpu *vcpu); +extern unsigned int kvm_get_guest_vcpu_mmu_trap_count(struct kvm_vcpu *vcpu); +extern void kvm_set_guest_vcpu_mmu_trap_count(struct kvm_vcpu *vcpu, + unsigned int count); +extern void kvm_get_guest_vcpu_tc_entry(struct kvm_vcpu *vcpu, + int tc_no, trap_cellar_t *tc_entry); +extern int kvm_add_guest_vcpu_tc_entry(struct kvm_vcpu *vcpu, + e2k_addr_t address, tc_cond_t condition, u64 *data); + +extern int kvm_gva_to_memslot_unaliased(struct kvm *kvm, gva_t gva); +extern gva_t kvm_gva_to_gpa(struct kvm *kvm, gva_t gva); +extern void kvm_free_user_pages(gva_t start, gva_t end); +extern int kvm_map_host_ttable_to_shadow(struct kvm *kvm, + e2k_addr_t kernel_base, gva_t shadow_base); +extern int kvm_find_shadow_slot(struct kvm *kvm, int slot, + e2k_addr_t kernel_addr, gva_t shadow_addr); +extern e2k_addr_t kvm_guest_kernel_addr_to_hva(struct kvm_vcpu *vcpu, + e2k_addr_t address); +extern int kvm_e2k_paravirt_page_prefault(pt_regs_t *regs, + trap_cellar_t *tcellar); +extern int kvm_arch_vm_fault(struct vm_fault *vmf); + +extern e2k_addr_t kvm_print_guest_kernel_ptes(e2k_addr_t address); +extern e2k_addr_t print_user_address_ptes(struct mm_struct *mm, + e2k_addr_t address); +extern e2k_addr_t kvm_print_guest_user_address_ptes(struct kvm *kvm, + int gmmid_nr, unsigned long address); + +/* + * Convert guest kernel address matching with host address to shadow + * user address on host + */ +static inline void *kvm_get_guest_shadow_addr(void *src) +{ + thread_info_t *ti = native_current_thread_info(); + struct kvm_vcpu *vcpu = ti->vcpu; + e2k_addr_t address = (e2k_addr_t)src; + e2k_addr_t shadow_address; + + if (!test_ti_thread_flag(ti, TIF_PARAVIRT_GUEST) || !vcpu) { + /* thread is not paravirtualized guest kernel */ + return NULL; + } + if (address < NATIVE_TASK_SIZE) { + /* address is into guest kernel space area, */ + /* so do not search shadow */ + return NULL; + } + shadow_address = kvm_guest_kernel_addr_to_hva(vcpu, address); + if (shadow_address == 0) { + /* guest address has not host shadow address */ + return NULL; + } + return (void *)shadow_address; +} + +static inline bool +kvm_is_shadow_addr_host_ttable(struct kvm *kvm, e2k_addr_t shadow_addr) +{ + kvm_kernel_shadow_t *shadow; + e2k_addr_t ttable_start; + e2k_addr_t ttable_end; + e2k_addr_t kernel_base; + e2k_addr_t shadow_base; + e2k_addr_t shadow_end; + int slot; + + DebugKVMSH("started for shadow addr 0x%lx\n", shadow_addr); + ttable_start = PAGE_ALIGN_UP(KERNEL_TTABLE_BASE); + ttable_end = PAGE_ALIGN_DOWN(KERNEL_TTABLE_END); + + slot = kvm_find_shadow_slot(kvm, 0, 0, shadow_addr); + if (slot < 0) { + DebugKVMSH("could not find shadow address 0x%lx at the list " + "of guest shadow areas\n", shadow_addr); + return false; + } + shadow = &kvm->arch.shadows[slot]; + kernel_base = shadow->kernel_start; + shadow_base = shadow->shadow_start; + DebugKVMSH("shadow address 0x%lx is found at the slot %d: host kernel " + "base 0x%lx, shadow base 0x%lx\n", + shadow_addr, slot, kernel_base, shadow_base); + if (kernel_base != ttable_start) + return false; + shadow_end = shadow_base + (ttable_end - ttable_start); + DebugKVMSH("host ttable shadow base 0x%lx end 0x%lx\n", + shadow_base, shadow_end); + if (shadow_addr >= shadow_base && shadow_addr < shadow_end) + return true; + return false; +} + +static inline long +kvm_move_guest_tagged_data(int word_size, + e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + DebugKVMREC("started for address from 0x%lx to 0x%lx\n", + addr_from, addr_to); + + if (IS_HOST_KERNEL_ADDRESS(addr_from)) { + DebugKVMREC("invalid address 0x%lx from (outside " + "guest space)\n", + addr_from); + return -EINVAL; + } + if (IS_HOST_KERNEL_ADDRESS(addr_to)) { + DebugKVMREC("invalid address 0x%lx to (outside guest space)\n", + addr_to); + return -EINVAL; + } + switch (word_size) { + case sizeof(u32): + native_move_tagged_word(addr_from, addr_to); + break; + case sizeof(u64): + native_move_tagged_dword(addr_from, addr_to); + break; + case sizeof(u64) * 2: + native_move_tagged_qword(addr_from, addr_to); + break; + default: + return -EINVAL; + } + return 0; +} + +/* + * Recovery faulted store operations + * common case: some addresses can be from host kernel address space, + * but point to guest structures, shadow image ... + */ +extern long kvm_recovery_faulted_tagged_guest_store(struct kvm_vcpu *vcpu, + e2k_addr_t address, u64 wr_data, u64 st_rec_opc, + u64 data_ext, u64 opc_ext, u64 arg); +extern long kvm_recovery_faulted_guest_load(struct kvm_vcpu *vcpu, + e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan); +extern long kvm_recovery_faulted_guest_move(struct kvm_vcpu *vcpu, + e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, u64 ld_rec_opc, u64 _arg, + u32 first_time); +extern long kvm_recovery_faulted_load_to_guest_greg(struct kvm_vcpu *vcpu, + e2k_addr_t address, u32 greg_num_d, u64 ld_rec_opc, + u64 arg, u64 saved_greg_lo, u64 saved_greg_hi); +extern long kvm_move_tagged_guest_data(struct kvm_vcpu *vcpu, + int word_size, e2k_addr_t addr_from, e2k_addr_t addr_to); + +extern void update_pv_vcpu_local_glob_regs(struct kvm_vcpu *vcpu, + local_gregs_t *l_gregs); + +static inline long +kvm_read_guest_dtlb_reg(e2k_addr_t virt_addr) +{ + return NATIVE_READ_DTLB_REG(virt_addr); +} + +static inline long +kvm_get_guest_DAM(unsigned long long __user *dam, int dam_entries) +{ + thread_info_t *ti = native_current_thread_info(); + int entries; + int i; + int ret = 0; + + if (dam_entries < DAM_ENTRIES_NUM) + entries = dam_entries; + else if (dam_entries > DAM_ENTRIES_NUM) + entries = DAM_ENTRIES_NUM; + else + entries = dam_entries; + NATIVE_SAVE_DAM(ti->dam); + for (i = 0; i < dam_entries; i++) + ret |= __put_user(ti->dam[i], &dam[i]); + return ret; +} + +static inline long +kvm_flush_guest_dcache_line(e2k_addr_t virt_addr) +{ + NATIVE_FLUSH_DCACHE_LINE(virt_addr); + return 0; +} +static inline long +kvm_clear_guest_dcache_l1_set(e2k_addr_t virt_addr, unsigned long set) +{ + NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set); + return 0; +} + +static inline long +kvm_flush_guest_dcache_range(void *virt_addr, size_t len) +{ + native_flush_DCACHE_range(virt_addr, len); + return 0; +} +static inline long +kvm_clear_guest_dcache_l1_range(void *virt_addr, size_t len) +{ + native_clear_DCACHE_L1_range(virt_addr, len); + return 0; +} +static inline long +kvm_flush_guest_icache_all(void) +{ + native_flush_icache_all(); + return 0; +} +static inline long +kvm_guest_mmu_probe(e2k_addr_t virt_addr, kvm_mmu_probe_t what) +{ + if (what == KVM_MMU_PROBE_ENTRY) { + return NATIVE_ENTRY_PROBE_MMU_OP(virt_addr); + } else if (what == KVM_MMU_PROBE_ADDRESS) { + return NATIVE_ADDRESS_PROBE_MMU_OP(virt_addr); + } + /* invalid MMU probe type */ + return ILLEGAL_PAGE_EP_RES; +} + +extern void kvm_arch_init_vm_mmap(struct kvm *kvm); +extern void kvm_arch_free_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot); + +#endif /* __KVM_E2K_MMU_H */ diff --git a/arch/e2k/kvm/mmu_defs.h b/arch/e2k/kvm/mmu_defs.h new file mode 100644 index 000000000000..a23e26298f56 --- /dev/null +++ b/arch/e2k/kvm/mmu_defs.h @@ -0,0 +1,311 @@ +#ifndef __KVM_E2K_MMU_DEFS_H +#define __KVM_E2K_MMU_DEFS_H + +#include +#include +#include +#include + +/* + * VCPU state structure contains CPU, MMU, Local APIC and other registers + * current values of VCPU. The structure is common for host and guest and + * can (and should) be accessed by both. + * See for more details arch/e2k/kvm/cpu_defs.h + */ + +/* + * Basic functions to access to virtual MMUs registers on host. + */ +static inline mmu_reg_t * +kvm_get_pv_vcpu_mmu_regs(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.kmap_vcpu_state->mmu.regs; +} +static inline trap_cellar_t * +kvm_get_pv_vcpu_mmu_trap_cellar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.kmap_vcpu_state->mmu.tcellar; +} + +static inline mmu_reg_t +kvm_read_pv_mmu_reg(mmu_reg_t *mmu_regs, mmu_addr_t mmu_addr) +{ + int mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr_val(mmu_addr)); + + return mmu_regs[mmu_reg_no]; +} + +static inline mmu_reg_t +kvm_read_pv_vcpu_mmu_reg(struct kvm_vcpu *vcpu, mmu_addr_t mmu_addr) +{ + mmu_reg_t *mmu_regs = kvm_get_pv_vcpu_mmu_regs(vcpu); + + return kvm_read_pv_mmu_reg(mmu_regs, mmu_addr); +} + +static inline void +kvm_write_pv_mmu_reg(mmu_reg_t *mmu_regs, + mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + int mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr_val(mmu_addr)); + + mmu_regs[mmu_reg_no] = mmu_reg_val(mmu_reg); +} + +static inline void +kvm_write_pv_vcpu_mmu_reg(struct kvm_vcpu *vcpu, + mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + mmu_reg_t *mmu_regs = kvm_get_pv_vcpu_mmu_regs(vcpu); + + kvm_write_pv_mmu_reg(mmu_regs, mmu_addr, mmu_reg); +} + +static inline unsigned int +kvm_read_pv_mmu_TRAP_COUNT_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_TRAP_COUNT); +} +static inline unsigned int +kvm_read_pv_vcpu_mmu_TRAP_COUNT_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_TRAP_COUNT); +} +static inline void +kvm_write_pv_mmu_TRAP_COUNT_reg(mmu_reg_t *mmu_regs, unsigned int count) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_TRAP_COUNT, __mmu_reg(count)); +} +static inline void +kvm_write_pv_vcpu_mmu_TRAP_COUNT_reg(struct kvm_vcpu *vcpu, unsigned int count) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_TRAP_COUNT, __mmu_reg(count)); +} + +static inline mmu_reg_t +kvm_read_pv_MMU_CR_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_CR); +} +static inline mmu_reg_t +kvm_read_pv_vcpu_MMU_CR_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_CR); +} +static inline void +kvm_write_pv_MMU_CR_reg(mmu_reg_t *mmu_regs, mmu_reg_t mmu_cr) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_CR, mmu_cr); +} +static inline void +kvm_write_pv_vcpu_MMU_CR_reg(struct kvm_vcpu *vcpu, mmu_reg_t mmu_cr) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_CR, mmu_cr); +} + +static inline unsigned int +kvm_read_pv_mmu_PID_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_PID); +} +static inline unsigned int +kvm_read_pv_vcpu_mmu_PID_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_PID); +} +static inline void +kvm_write_pv_mmu_PID_reg(mmu_reg_t *mmu_regs, unsigned int pid) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_PID, MMU_PID(pid)); +} +static inline void +kvm_write_pv_vcpu_mmu_PID_reg(struct kvm_vcpu *vcpu, unsigned int pid) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_PID, MMU_PID(pid)); +} + +static inline e2k_addr_t +kvm_read_pv_mmu_OS_PPTB_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_OS_PPTB); +} +static inline e2k_addr_t +kvm_read_pv_vcpu_mmu_OS_PPTB_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_OS_PPTB); +} +static inline void +kvm_write_pv_mmu_OS_PPTB_reg(mmu_reg_t *mmu_regs, e2k_addr_t phys_pgd) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_OS_PPTB, + MMU_ADDR_TO_PPTB(phys_pgd)); +} +static inline void +kvm_write_pv_vcpu_mmu_OS_PPTB_reg(struct kvm_vcpu *vcpu, e2k_addr_t phys_pgd) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_OS_PPTB, + MMU_ADDR_TO_PPTB(phys_pgd)); +} + +static inline e2k_addr_t +kvm_read_pv_mmu_OS_VPTB_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_OS_VPTB); +} +static inline e2k_addr_t +kvm_read_pv_vcpu_mmu_OS_VPTB_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_OS_VPTB); +} +static inline void +kvm_write_pv_mmu_OS_VPTB_reg(mmu_reg_t *mmu_regs, e2k_addr_t virt_addr) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_OS_VPTB, + MMU_ADDR_TO_VPTB(virt_addr)); +} +static inline void +kvm_write_pv_vcpu_mmu_OS_VPTB_reg(struct kvm_vcpu *vcpu, e2k_addr_t virt_addr) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_OS_VPTB, + MMU_ADDR_TO_VPTB(virt_addr)); +} + +static inline e2k_addr_t +kvm_read_pv_mmu_U_PPTB_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_U_PPTB); +} +static inline e2k_addr_t +kvm_read_pv_vcpu_mmu_U_PPTB_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_U_PPTB); +} +static inline void +kvm_write_pv_mmu_U_PPTB_reg(mmu_reg_t *mmu_regs, e2k_addr_t phys_pgd) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_U_PPTB, + MMU_ADDR_TO_PPTB(phys_pgd)); +} +static inline void +kvm_write_pv_vcpu_mmu_U_PPTB_reg(struct kvm_vcpu *vcpu, e2k_addr_t phys_pgd) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_U_PPTB, + MMU_ADDR_TO_PPTB(phys_pgd)); +} + +static inline e2k_addr_t +kvm_read_pv_mmu_U_VPTB_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_U_VPTB); +} +static inline e2k_addr_t +kvm_read_pv_vcpu_mmu_U_VPTB_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_U_VPTB); +} +static inline void +kvm_write_pv_mmu_U_VPTB_reg(mmu_reg_t *mmu_regs, e2k_addr_t virt_addr) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_U_VPTB, + MMU_ADDR_TO_VPTB(virt_addr)); +} +static inline void +kvm_write_pv_vcpu_mmu_U_VPTB_reg(struct kvm_vcpu *vcpu, e2k_addr_t virt_addr) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_U_VPTB, + MMU_ADDR_TO_VPTB(virt_addr)); +} + +static inline e2k_addr_t +kvm_read_pv_mmu_OS_VAB_reg(mmu_reg_t *mmu_regs) +{ + return kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_OS_VAB); +} +static inline e2k_addr_t +kvm_read_pv_vcpu_mmu_OS_VAB_reg(struct kvm_vcpu *vcpu) +{ + return kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_OS_VAB); +} +static inline void +kvm_write_pv_mmu_OS_VAB_reg(mmu_reg_t *mmu_regs, e2k_addr_t virt_addr) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_OS_VPTB, + MMU_ADDR_TO_VAB(virt_addr)); +} +static inline void +kvm_write_pv_vcpu_mmu_OS_VAB_reg(struct kvm_vcpu *vcpu, e2k_addr_t virt_addr) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_OS_VPTB, + MMU_ADDR_TO_VAB(virt_addr)); +} + +static inline bool +kvm_read_pv_mmu_US_CL_D_reg(mmu_reg_t *mmu_regs) +{ + return (bool)kvm_read_pv_mmu_reg(mmu_regs, MMU_ADDR_US_CL_D); +} +static inline bool +kvm_read_pv_vcpu_mmu_US_CL_D_reg(struct kvm_vcpu *vcpu) +{ + return (bool)kvm_read_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_US_CL_D); +} +static inline void +kvm_write_pv_mmu_US_CL_D_reg(mmu_reg_t *mmu_regs, bool disable) +{ + kvm_write_pv_mmu_reg(mmu_regs, MMU_ADDR_US_CL_D, __mmu_reg(disable)); +} +static inline void +kvm_write_pv_vcpu_mmu_US_CL_D_reg(struct kvm_vcpu *vcpu, bool disable) +{ + kvm_write_pv_vcpu_mmu_reg(vcpu, MMU_ADDR_US_CL_D, __mmu_reg(disable)); +} + +static inline void +kvm_read_pv_mmu_tc_entry(trap_cellar_t *tc, int tc_no, trap_cellar_t *tc_entry) +{ + trap_cellar_t *tcellar; + + BUG_ON(tc_no * 3 > MAX_TC_SIZE); + tcellar = &tc[tc_no]; + tc_entry->address = tcellar->address; + tc_entry->condition = tcellar->condition; + if (AS(tcellar->condition).store) { + native_move_tagged_dword((e2k_addr_t)&tcellar->data, + (e2k_addr_t)&tc_entry->data); + } +} +static inline void +kvm_read_pv_vcpu_mmu_tc_entry(struct kvm_vcpu *vcpu, + int tc_no, trap_cellar_t *tc_entry) +{ + trap_cellar_t *tc = kvm_get_pv_vcpu_mmu_trap_cellar(vcpu); + + kvm_read_pv_mmu_tc_entry(tc, tc_no, tc_entry); +} + +static inline void +kvm_write_pv_mmu_tc_entry(trap_cellar_t *tc, int tc_no, + e2k_addr_t address, tc_cond_t condition, u64 *data) +{ + trap_cellar_t *tcellar; + + BUG_ON(tc_no * 3 > MAX_TC_SIZE); + tcellar = &tc[tc_no]; + tcellar->address = address; + tcellar->condition = condition; + if (data != NULL) { + native_move_tagged_dword((e2k_addr_t)data, + (e2k_addr_t)&tcellar->data); + } +} + +static inline void +kvm_write_pv_vcpu_mmu_tc_entry(struct kvm_vcpu *vcpu, int tc_no, + e2k_addr_t address, tc_cond_t condition, u64 *data) +{ + trap_cellar_t *tc = kvm_get_pv_vcpu_mmu_trap_cellar(vcpu); + + kvm_write_pv_mmu_tc_entry(tc, tc_no, address, condition, data); +} + +#endif /* __KVM_E2K_MMU_DEFS_H */ diff --git a/arch/e2k/kvm/mmu_flush.c b/arch/e2k/kvm/mmu_flush.c new file mode 100644 index 000000000000..6b8a976add00 --- /dev/null +++ b/arch/e2k/kvm/mmu_flush.c @@ -0,0 +1,265 @@ +/* + * Guest kernel MMU caches support on KVM host + * (Instruction and Data caches, TLB) + * + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_TLB_MODE +#undef DebugPT +#define DEBUG_KVM_TLB_MODE 0 /* TLB flushing */ +#define DebugPT(fmt, args...) \ +({ \ + if (DEBUG_KVM_TLB_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Flush just one page of a specified guest user. + */ +void kvm_flush_guest_tlb_page(gmm_struct_t *gmm, e2k_addr_t addr) +{ + unsigned long context; + + BUG_ON(!test_thread_flag(TIF_MULTITHREADING) || + !test_thread_flag(TIF_VIRTUALIZED_GUEST)); + + /* FIXME: guest user context is not completely implemented, */ + /* so all guest user MMs use the current host MM context */ + /* context = gmm->context.cpumsk[raw_smp_processor_id()]; */ + context = current->active_mm->context.cpumsk[raw_smp_processor_id()]; + + if (unlikely(context == 0)) { + /* See comment in __flush_tlb_range(). */ + kvm_flush_guest_tlb_mm(gmm); + return; + } + + flush_TLB_page(addr, CTX_HARDWARE(context)); + + /* FIXME: flush of secondary space page is not implemented + if (TASK_IS_BINCO(current) && ADDR_IN_SS(addr) && !IS_UPT_E3S) { + // flush secondary space address // + flush_TLB_ss_page(addr - SS_ADDR_START, CTX_HARDWARE(context)); + } + */ +} + +/* + * Flush a specified guest user mapping on the processor + */ +void kvm_flush_guest_tlb_mm(gmm_struct_t *gmm) +{ + thread_info_t *ti = current_thread_info(); + gthread_info_t *gti; + + BUG_ON(!test_thread_flag(TIF_MULTITHREADING) || + !test_thread_flag(TIF_VIRTUALIZED_GUEST)); + + if (gmm == pv_vcpu_get_active_gmm(ti->vcpu)) { + gti = ti->gthread_info; + if (!test_gti_thread_flag(gti, GTIF_KERNEL_THREAD)) { + /* kernel thread can manipulate with user addresses */ + /* for example while swap or page cache writeback */ + WARN_ON(pv_vcpu_get_active_gmm(ti->vcpu) != gti->gmm); + } + /* Should update right now */ + /* FIXME: guest user context is not completely implemented, */ + /* so all guest user MMs use the current host MM context */ + /* reload_mmu_context(&gmm->context, gmm->sec_pgd); */ + reload_mmu_context(current->active_mm); + } else { + /* invalidate_mmu_context(&gmm->context, gmm_cpumask(gmm)); */ + invalidate_mmu_context(current->active_mm); + } +} + +/* + * Flush a specified range of pages + */ + +/* + * If the number of pages to be flushed is below this value, + * then only those pages will be flushed. + * + * Flushing one page takes ~150 cycles, flushing the whole mm + * takes ~400 cycles. Also note that __flush_tlb_range() may + * be called repeatedly for the same process so high values + * are bad. + */ + +void kvm_flush_guest_tlb_range(gmm_struct_t *const gmm, + const e2k_addr_t start, const e2k_addr_t end) +{ + BUG_ON(start > end); + + /* FIXME: guest user context is not completely implemented, */ + /* so all guest user MMs use the current host MM context */ + /* if (flush_tlb_context_range(&gmm->context, start, end)) */ + flush_tlb_mm_range(current->active_mm, start, end); +} + +/* + * Flush the TLB entries mapping the virtually mapped linear page + * table corresponding to address range [start : end]. + */ +void kvm_flush_guest_tlb_pgtables(gmm_struct_t *gmm, + e2k_addr_t start, e2k_addr_t end) +{ + BUG_ON(start > end); + + /* flush virtual mapping of PTE entries (third level of page table) */ + kvm_flush_guest_tlb_range(gmm, + pte_virt_offset(_PAGE_ALIGN_UP(start, PTE_SIZE)), + pte_virt_offset(_PAGE_ALIGN_DOWN(end, PTE_SIZE))); + + /* flush virtual mapping of PMD entries (second level of page table) */ + kvm_flush_guest_tlb_range(gmm, + pmd_virt_offset(_PAGE_ALIGN_UP(start, PMD_SIZE)), + pmd_virt_offset(_PAGE_ALIGN_DOWN(end, PMD_SIZE))); + + /* flush virtual mapping of PUD entries (first level of page table) */ + kvm_flush_guest_tlb_range(gmm, + pud_virt_offset(_PAGE_ALIGN_UP(start, PUD_SIZE)), + pud_virt_offset(_PAGE_ALIGN_DOWN(end, PUD_SIZE))); +} + +/* + * Flush a specified range of pages and the TLB entries mapping the virtually + * mapped linear page table corresponding to address range [start : end]. + */ +void +kvm_flush_guest_tlb_range_and_pgtables(gmm_struct_t *gmm, + e2k_addr_t start, e2k_addr_t end) +{ + kvm_flush_guest_tlb_range(gmm, start, end); + kvm_flush_guest_tlb_pgtables(gmm, start, end); +} + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + +/* + * Functions to flush guest VM on host should return boolean value: + * true if address or MM is from guest VM space and flushing was done + * false if address or MM is not from guest VM space or flushing cannot + * be done + */ + +/* + * Update just one specified address of current active mm. + * PGD is updated into CPU root page table from main user PGD table + */ +bool kvm_do_flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, + e2k_addr_t addr) +{ + + BUG_ON(!test_thread_flag(TIF_MULTITHREADING)); + BUG_ON(MMU_IS_SEPARATE_PT()); + + BUG_ON(addr >= HOST_TASK_SIZE); + + if (addr >= GUEST_TASK_SIZE) + /* it is guest kernel address and guest kernel is user of */ + /* host, so host know what to do */ + return false; + /* + * It is user address of some thread to manage guest machine (QEMU). + * VCPU and VIRQ VCPU should not use these addresses and what is more + * such address can be used by some guest user process. + * So do nothing to update CPU root table + */ + return true; +} +/* + * Update user PGD entries from address range of current active mm. + * PGDs are updated into CPU root page table from main user PGD table + */ +bool kvm_do_flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + BUG_ON(start > end); + + BUG_ON(!test_thread_flag(TIF_MULTITHREADING)); + BUG_ON(MMU_IS_SEPARATE_PT()); + + BUG_ON(start >= HOST_TASK_SIZE || end >= HOST_TASK_SIZE); + + if (start >= GUEST_TASK_SIZE) + /* it is guest kernel address and guest kernel is user of */ + /* host, so host know what to do */ + return false; + BUG_ON(end >= GUEST_TASK_SIZE); + /* + * It is range of user addresses of some thread to manage guest + * machine (QEMU). + * VCPU and VIRQ VCPU should not use these addresses and what is more + * such address can be used by some guest user process. + * So do nothing to update CPU root table + */ + return true; +} +/* + * Update all user PGD entries of current active mm. + * PGDs are updated into CPU root page table from main user PGD table + */ +bool kvm_do_flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + BUG_ON(!test_thread_flag(TIF_MULTITHREADING)); + BUG_ON(MMU_IS_SEPARATE_PT()); + + /* + * Updated all user PGD entries, so it can be user and kernel part + * of guest VM. User part of guest user MM can update only special + * host functions (kvm_flush_guest_xxx()). In this case can be updated + * only MM of host user threads to manage guest virtual machine (QEMU). + * But it can be updated guest kernel part of VM, so reload all PGD + * entries of guest kernel + */ + copy_guest_kernel_pgd_to_kernel_root_pt(mm->pgd); + return true; +} +/* + * Update all users PGD entries of all active MMs. + * PGDs are updated into CPU root page table from main user PGD table + */ +bool kvm_do_flush_guest_cpu_root_pt(void) +{ + gmm_struct_t *active_gmm; + + BUG_ON(!test_thread_flag(TIF_MULTITHREADING)); + BUG_ON(MMU_IS_SEPARATE_PT()); + if (current->flags & PF_EXITING) { + /* process is exiting, nothing flush */ + return true; + } + + /* + * Reload guest kernel part PGDs from main user page table + */ + BUG_ON(current->mm == NULL); + copy_guest_kernel_pgd_to_kernel_root_pt(current->mm->pgd); + + /* + * Reload guest user part PGDs from current active guest user MM + */ + active_gmm = pv_vcpu_get_active_gmm(current_thread_info()->vcpu); + if (active_gmm != NULL) { + /* there is now active guest user */ + copy_guest_user_pgd_to_kernel_root_pt( + kvm_mmu_get_gmm_root(active_gmm)); + } + return true; +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ diff --git a/arch/e2k/kvm/mmutrace-e2k.h b/arch/e2k/kvm/mmutrace-e2k.h new file mode 100644 index 000000000000..c292b05b2a46 --- /dev/null +++ b/arch/e2k/kvm/mmutrace-e2k.h @@ -0,0 +1,337 @@ +#if !defined(_TRACE_KVMMMU_E2K_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVMMMU_E2K_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvmmmu + +#define KVM_MMU_PAGE_FIELDS \ + __field(unsigned long, mmu_valid_gen) \ + __field(__u64, gfn) \ + __field(__u32, role) \ + __field(__u32, root_count) \ + __field(bool, unsync) + +#define KVM_MMU_PAGE_ASSIGN(sp) \ + __entry->mmu_valid_gen = sp->mmu_valid_gen; \ + __entry->gfn = sp->gfn; \ + __entry->role = sp->role.word; \ + __entry->root_count = sp->root_count; \ + __entry->unsync = sp->unsync; + +#define KVM_MMU_PAGE_PRINTK() ({ \ + const char *saved_ptr = trace_seq_buffer_ptr(p); \ + static const char *access_str[] = { \ + "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ + }; \ + union kvm_mmu_page_role role; \ + \ + role.word = __entry->role; \ + \ + trace_seq_printf(p, "sp gen %lx gfn %llx %u %s %s %s" \ + " %snxe root %u %s%c", __entry->mmu_valid_gen, \ + __entry->gfn, role.level, \ + role.direct ? " direct" : "", \ + access_str[role.access], \ + role.invalid ? " invalid" : "", \ + role.nxe ? "" : "!", \ + __entry->root_count, \ + __entry->unsync ? "unsync" : "sync", 0); \ + saved_ptr; \ + }) + +#ifdef CONFIG_X86_HW_VIRTUALIZATION +#define kvm_mmu_trace_pferr_flags \ + { PFERR_PRESENT_MASK, "P" }, \ + { PFERR_WRITE_MASK, "W" }, \ + { PFERR_USER_MASK, "U" }, \ + { PFERR_RSVD_MASK, "RSVD" }, \ + { PFERR_FETCH_MASK, "F" } +#else +#define kvm_mmu_trace_pferr_flags \ + { 0, "?" } + +#endif /* CONFIG_X86_HW_VIRTUALIZATION */ + +/* + * A pagetable walk has started + */ +TRACE_EVENT( + kvm_mmu_pagetable_walk, + TP_PROTO(u64 addr, u32 pferr), + TP_ARGS(addr, pferr), + + TP_STRUCT__entry( + __field(__u64, addr) + __field(__u32, pferr) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->pferr = pferr; + ), + + TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr, + __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) +); + + +/* We just walked a paging element */ +TRACE_EVENT( + kvm_mmu_paging_element, + TP_PROTO(pgprot_t pte, int level), + TP_ARGS(pte, level), + + TP_STRUCT__entry( + __field(pgprotval_t, pte) + __field(__u32, level) + ), + + TP_fast_assign( + __entry->pte = pgprot_val(pte); + __entry->level = level; + ), + + TP_printk("pte %lx level %u", __entry->pte, __entry->level) +); + +DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class, + + TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), + + TP_ARGS(table_gfn, index, size), + + TP_STRUCT__entry( + __field(__u64, gpa) + ), + + TP_fast_assign( + __entry->gpa = ((u64)table_gfn << PAGE_SHIFT) + + index * size; + ), + + TP_printk("gpa %llx", __entry->gpa) +); + +/* We set a pte accessed bit */ +DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit, + + TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), + + TP_ARGS(table_gfn, index, size) +); + +/* We set a pte dirty bit */ +DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit, + + TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), + + TP_ARGS(table_gfn, index, size) +); + +TRACE_EVENT( + kvm_mmu_walker_error, + TP_PROTO(u32 pferr), + TP_ARGS(pferr), + + TP_STRUCT__entry( + __field(__u32, pferr) + ), + + TP_fast_assign( + __entry->pferr = pferr; + ), + + TP_printk("pferr %x %s", __entry->pferr, + __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) +); + +TRACE_EVENT( + kvm_mmu_get_page, + TP_PROTO(struct kvm_mmu_page *sp, bool created), + TP_ARGS(sp, created), + + TP_STRUCT__entry( + KVM_MMU_PAGE_FIELDS + __field(bool, created) + ), + + TP_fast_assign( + KVM_MMU_PAGE_ASSIGN(sp) + __entry->created = created; + ), + + TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(), + __entry->created ? "new" : "existing") +); + +DECLARE_EVENT_CLASS(kvm_mmu_page_class, + + TP_PROTO(struct kvm_mmu_page *sp), + TP_ARGS(sp), + + TP_STRUCT__entry( + KVM_MMU_PAGE_FIELDS + ), + + TP_fast_assign( + KVM_MMU_PAGE_ASSIGN(sp) + ), + + TP_printk("%s", KVM_MMU_PAGE_PRINTK()) +); + +DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page, + TP_PROTO(struct kvm_mmu_page *sp), + + TP_ARGS(sp) +); + +DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page, + TP_PROTO(struct kvm_mmu_page *sp), + + TP_ARGS(sp) +); + +DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page, + TP_PROTO(struct kvm_mmu_page *sp), + + TP_ARGS(sp) +); + +TRACE_EVENT( + mark_mmio_spte, + TP_PROTO(pgprot_t *sptep, gfn_t gfn, unsigned access, unsigned int gen), + TP_ARGS(sptep, gfn, access, gen), + + TP_STRUCT__entry( + __field(void *, sptep) + __field(gfn_t, gfn) + __field(unsigned, access) + __field(unsigned int, gen) + ), + + TP_fast_assign( + __entry->sptep = sptep; + __entry->gfn = gfn; + __entry->access = access; + __entry->gen = gen; + ), + + TP_printk("sptep:%px gfn %llx access %x gen %x", __entry->sptep, + __entry->gfn, __entry->access, __entry->gen) +); + +TRACE_EVENT( + handle_mmio_page_fault, + TP_PROTO(u64 addr, gfn_t gfn, unsigned access), + TP_ARGS(addr, gfn, access), + + TP_STRUCT__entry( + __field(u64, addr) + __field(gfn_t, gfn) + __field(unsigned, access) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->gfn = gfn; + __entry->access = access; + ), + + TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, + __entry->access) +); + +#define __spte_satisfied(__spte) \ + (__entry->retry && is_writable_pte(__pgprot(__entry->__spte))) + +TRACE_EVENT( + fast_page_fault, + TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, + pgprot_t *sptep, pgprot_t old_spte, bool retry), + TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(gva_t, gva) + __field(u32, error_code) + __field(pgprot_t *, sptep) + __field(pgprotval_t, old_spte) + __field(pgprotval_t, new_spte) + __field(bool, retry) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->gva = gva; + __entry->error_code = error_code; + __entry->sptep = sptep; + __entry->old_spte = pgprot_val(old_spte); + __entry->new_spte = pgprot_val(*sptep); + __entry->retry = retry; + ), + + TP_printk("vcpu %d gva %lx error_code %s sptep %px old %#lx" + " new %lx spurious %d fixed %d", __entry->vcpu_id, + __entry->gva, __print_flags(__entry->error_code, "|", + kvm_mmu_trace_pferr_flags), __entry->sptep, + __entry->old_spte, __entry->new_spte, + __spte_satisfied(old_spte), __spte_satisfied(new_spte) + ) +); + +TRACE_EVENT( + kvm_mmu_invalidate_zap_all_pages, + TP_PROTO(struct kvm *kvm), + TP_ARGS(kvm), + + TP_STRUCT__entry( + __field(unsigned long, mmu_valid_gen) + __field(unsigned int, mmu_used_pages) + ), + + TP_fast_assign( + __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen; + __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages; + ), + + TP_printk("kvm-mmu-valid-gen %lx used_pages %x", + __entry->mmu_valid_gen, __entry->mmu_used_pages + ) +); + + +TRACE_EVENT( + check_mmio_spte, + TP_PROTO(pgprot_t spte, unsigned int kvm_gen, unsigned int spte_gen), + TP_ARGS(spte, kvm_gen, spte_gen), + + TP_STRUCT__entry( + __field(unsigned int, kvm_gen) + __field(unsigned int, spte_gen) + __field(pgprotval_t, spte) + ), + + TP_fast_assign( + __entry->kvm_gen = kvm_gen; + __entry->spte_gen = spte_gen; + __entry->spte = pgprot_val(spte); + ), + + TP_printk("spte %lx kvm_gen %x spte-gen %x valid %d", __entry->spte, + __entry->kvm_gen, __entry->spte_gen, + __entry->kvm_gen == __entry->spte_gen + ) +); +#endif /* _TRACE_KVMMMU_E2K_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE mmutrace-e2k + +/* This part must be outside protection */ +#include diff --git a/arch/e2k/kvm/nid.c b/arch/e2k/kvm/nid.c new file mode 100644 index 000000000000..40cc1c4933cc --- /dev/null +++ b/arch/e2k/kvm/nid.c @@ -0,0 +1,218 @@ +/* + * Generic guest pidhash and scalable, time-bounded NID allocator + * + * Based on simplified kernel/pid.c + */ + +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define BITS_PER_PAGE (PAGE_SIZE * 8) +#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) + +static inline int mk_nid(struct kvm_nid_table *nid_table, + kvm_nidmap_t *map, int off) +{ + return (map - nid_table->nidmap)*BITS_PER_PAGE + off; +} + +#define find_next_offset(map, off) \ + find_next_zero_bit((map)->page, BITS_PER_PAGE, off) + +static void free_nidmap(int nr, struct kvm_nid_table *nid_table) +{ + kvm_nidmap_t *map = nid_table->nidmap + nr / BITS_PER_PAGE; + int offset = nr & BITS_PER_PAGE_MASK; + + DebugKVM("started for NID %d\n", nr); + clear_bit(offset, map->page); + atomic_inc(&map->nr_free); +} + +static int alloc_nidmap(struct kvm_nid_table *nid_table) +{ + int i, offset, max_scan, nid; + int last = nid_table->last_nid; + int nid_max_limit = nid_table->nid_max_limit; + int reserved_nids = nid_table->reserved_nids; + kvm_nidmap_t *map; + + DebugKVM("started\n"); + nid = last + 1; + if (nid >= nid_max_limit) + nid = reserved_nids; + offset = nid & BITS_PER_PAGE_MASK; + map = &nid_table->nidmap[nid/BITS_PER_PAGE]; + max_scan = (nid_max_limit + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; + for (i = 0; i <= max_scan; ++i) { + if (unlikely(!map->page)) { + void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); + /* + * Free the page if someone raced with us + * installing it: + */ + raw_spin_lock_irq(&nid_table->nidmap_lock); + if (!map->page) { + map->page = page; + page = NULL; + } + raw_spin_unlock_irq(&nid_table->nidmap_lock); + kfree(page); + if (unlikely(!map->page)) + break; + } + if (likely(atomic_read(&map->nr_free))) { + do { + if (!test_and_set_bit(offset, map->page)) { + atomic_dec(&map->nr_free); + nid_table->last_nid = nid; + DebugKVM("returns NID %d\n", nid); + return nid; + } + offset = find_next_offset(map, offset); + nid = mk_nid(nid_table, map, offset); + /* + * find_next_offset() found a bit, the nid from it + * is in-bounds, and if we fell back to the last + * bitmap block and the final block was the same + * as the starting point, nid is before last_nid. + */ + } while (offset < BITS_PER_PAGE && + nid < nid_max_limit && + (i != max_scan || nid < last || + !((last+1) & BITS_PER_PAGE_MASK))); + } + if (map < &nid_table->nidmap[(nid_max_limit-1) / + BITS_PER_PAGE]) { + ++map; + offset = 0; + } else { + map = &nid_table->nidmap[0]; + offset = reserved_nids; + if (unlikely(last == offset)) + break; + } + nid = mk_nid(nid_table, map, offset); + } + return -1; +} + +void kvm_do_free_nid(kvm_nid_t *nid, struct kvm_nid_table *nid_table) +{ + DebugKVM("started\n"); + hlist_del(&nid->nid_chain); + + free_nidmap(nid->nr, nid_table); +} + +void kvm_free_nid(kvm_nid_t *nid, struct kvm_nid_table *nid_table) +{ + /* We can be called with write_lock_irq(&tasklist_lock) held */ + unsigned long flags; + + DebugKVM("started\n"); + raw_spin_lock_irqsave(&nid_table->nidmap_lock, flags); + kvm_do_free_nid(nid, nid_table); + raw_spin_unlock_irqrestore(&nid_table->nidmap_lock, flags); +} + +int kvm_alloc_nid(struct kvm_nid_table *nid_table, kvm_nid_t *nid) +{ + int nr; + + DebugKVM("started\n"); + + nr = alloc_nidmap(nid_table); + if (nr < 0) + return nr; + + nid->nr = nr; + DebugKVM("allocated NID %d structure at %px\n", nr, nid); + + raw_spin_lock_irq(&nid_table->nidmap_lock); + hlist_add_head(&nid->nid_chain, + &nid_table->nid_hash[nid_hashfn(nr, + nid_table->nid_hash_bits)]); + raw_spin_unlock_irq(&nid_table->nidmap_lock); + return 0; +} + +/* + * The nid hash table is scaled according to the amount of memory in the + * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or + * more. + */ +static void nidhash_init(struct kvm_nid_table *nid_table) +{ + int i; + + for (i = 0; i < nid_table->nid_hash_size; i++) + INIT_HLIST_HEAD(&nid_table->nid_hash[i]); +} + +int kvm_nidmap_init(struct kvm_nid_table *nid_table, + int nid_max_limit, int reserved_nids, int last_nid) +{ + int entry; + + DebugKVM("started\n"); + raw_spin_lock_init(&nid_table->nidmap_lock); + + for (entry = 0; entry < nid_table->nidmap_entries; entry++) { + atomic_set(&nid_table->nidmap[entry].nr_free, BITS_PER_PAGE); + nid_table->nidmap[entry].page = NULL; + } + nid_table->nidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (nid_table->nidmap[0].page == NULL) { + pr_err("kvm_nidmap_init() could not allocate first page " + "for NID map\n"); + return -ENOMEM; + } + + nid_table->nid_max_limit = nid_max_limit; + nid_table->reserved_nids = reserved_nids; + nid_table->last_nid = last_nid; + nid_table->nid_cachep = NULL; + nidhash_init(nid_table); + + return 0; +} + +static void nidmap_release(struct kvm_nid_table *nid_table) +{ + kvm_nidmap_t *map; + int entry; + + DebugKVM("started\n"); + for (entry = 0; entry < nid_table->nidmap_entries; entry++) { + map = &nid_table->nidmap[entry]; + if (atomic_read(&map->nr_free) != BITS_PER_PAGE) { + printk(KERN_WARNING "nidmap_release() mapping #%d is " + "not empty, only %d entries from %ld is free\n", + entry, atomic_read(&map->nr_free), + BITS_PER_PAGE); + } + if (map->page != NULL) { + kfree(map->page); + map->page = NULL; + } + } +} + +void kvm_nidmap_destroy(struct kvm_nid_table *nid_table) +{ + DebugKVM("started\n"); + nidmap_release(nid_table); +} diff --git a/arch/e2k/kvm/page_track.c b/arch/e2k/kvm/page_track.c new file mode 100644 index 000000000000..314865a87694 --- /dev/null +++ b/arch/e2k/kvm/page_track.c @@ -0,0 +1,257 @@ +/* + * Support KVM gust page tracking + * + * This feature allows to track page access in guest. Currently, only + * write access is tracked. + * + * Based on arch/x86/kvm/page_track.c of Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include + +#include "mmu.h" + +void kvm_page_track_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + int i; + + for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) + if (!dont || free->arch.gfn_track[i] != + dont->arch.gfn_track[i]) { + kvfree(free->arch.gfn_track[i]); + free->arch.gfn_track[i] = NULL; + } +} + +int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages) +{ + int i; + + for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { + slot->arch.gfn_track[i] = kvzalloc(npages * + sizeof(*slot->arch.gfn_track[i]), + GFP_KERNEL); + if (!slot->arch.gfn_track[i]) + goto track_free; + } + + return 0; + +track_free: + kvm_page_track_free_memslot(slot, NULL); + return -ENOMEM; +} + +static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) +{ + if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) + return false; + + return true; +} + +static void update_gfn_track(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode, short count) +{ + int index, val; + + index = kvm_gfn_to_index(kvm, gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); + + val = slot->arch.gfn_track[mode][index]; + + if (WARN_ON(val + count < 0 || val + count > USHRT_MAX)) + return; + + slot->arch.gfn_track[mode][index] += count; +} + +/* + * add guest page to the tracking pool so that corresponding access on that + * page will be intercepted. + * + * It should be called under the protection both of mmu-lock and kvm->srcu + * or kvm->slots_lock. + * + * @kvm: the guest instance we are interested in. + * @slot: the @gfn belongs to. + * @gfn: the guest page. + * @mode: tracking mode, currently only write track is supported. + */ +void kvm_slot_page_track_add_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + + if (WARN_ON(!page_track_mode_is_valid(mode))) + return; + + update_gfn_track(kvm, slot, gfn, mode, 1); + + /* + * new track stops large page mapping for the + * tracked page. + */ + kvm_mmu_gfn_disallow_lpage(kvm, slot, gfn); + + if (mode == KVM_PAGE_TRACK_WRITE) + if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) + kvm_flush_remote_tlbs(kvm); +} + +/* + * remove the guest page from the tracking pool which stops the interception + * of corresponding access on that page. It is the opposed operation of + * kvm_slot_page_track_add_page(). + * + * It should be called under the protection both of mmu-lock and kvm->srcu + * or kvm->slots_lock. + * + * @kvm: the guest instance we are interested in. + * @slot: the @gfn belongs to. + * @gfn: the guest page. + * @mode: tracking mode, currently only write track is supported. + */ +void kvm_slot_page_track_remove_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + if (WARN_ON(!page_track_mode_is_valid(mode))) + return; + + update_gfn_track(kvm, slot, gfn, mode, -1); + + /* + * allow large page mapping for the tracked page + * after the tracker is gone. + */ + kvm_mmu_gfn_allow_lpage(kvm, slot, gfn); +} + +/* + * check if the corresponding access on the specified guest page is tracked. + */ +bool kvm_page_track_is_active(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode) +{ + int index; + + if (WARN_ON(!page_track_mode_is_valid(mode))) + return false; + + if (!slot) + return false; + + index = kvm_gfn_to_index(kvm, gfn, slot->base_gfn, + PT_PAGE_TABLE_LEVEL); + return !!READ_ONCE(slot->arch.gfn_track[mode][index]); +} + +void kvm_page_track_cleanup(struct kvm *kvm) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + cleanup_srcu_struct(&head->track_srcu); +} + +void kvm_page_track_init(struct kvm *kvm) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + init_srcu_struct(&head->track_srcu); + INIT_HLIST_HEAD(&head->track_notifier_list); +} + +/* + * register the notifier so that event interception for the tracked guest + * pages can be received. + */ +void +kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + + spin_lock(&kvm->mmu_lock); + hlist_add_head_rcu(&n->node, &head->track_notifier_list); + spin_unlock(&kvm->mmu_lock); +} + +/* + * stop receiving the event interception. It is the opposed operation of + * kvm_page_track_register_notifier(). + */ +void +kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + + spin_lock(&kvm->mmu_lock); + hlist_del_rcu(&n->node); + spin_unlock(&kvm->mmu_lock); + synchronize_srcu(&head->track_srcu); +} + +/* + * Notify the node that write access is intercepted and write emulation is + * finished at this time. + * + * The node should figure out if the written page is the one that node is + * interested in by itself. + */ +void kvm_page_track_write(struct kvm_vcpu *vcpu, struct gmm_struct *gmm, + gpa_t gpa, const u8 *new, int bytes) +{ + struct kvm_page_track_notifier_head *head; + struct kvm_page_track_notifier_node *n; + int idx; + + head = &vcpu->kvm->arch.track_notifier_head; + + if (hlist_empty(&head->track_notifier_list)) + return; + + idx = srcu_read_lock(&head->track_srcu); + hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) + if (n->track_write) + n->track_write(vcpu, gmm, gpa, new, bytes); + srcu_read_unlock(&head->track_srcu, idx); +} + +/* + * Notify the node that memory slot is being removed or moved so that it can + * drop write-protection for the pages in the memory slot. + * + * The node should figure out it has any write-protected pages in this slot + * by itself. + */ +void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + struct kvm_page_track_notifier_head *head; + struct kvm_page_track_notifier_node *n; + int idx; + + head = &kvm->arch.track_notifier_head; + + if (hlist_empty(&head->track_notifier_list)) + return; + + idx = srcu_read_lock(&head->track_srcu); + hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) + if (n->track_flush_slot) + n->track_flush_slot(kvm, slot, n); + srcu_read_unlock(&head->track_srcu, idx); +} diff --git a/arch/e2k/kvm/paging_tmpl.h b/arch/e2k/kvm/paging_tmpl.h new file mode 100644 index 000000000000..2985d09ccfce --- /dev/null +++ b/arch/e2k/kvm/paging_tmpl.h @@ -0,0 +1,2286 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This module enables machines with e2k hardware virtualization extensions + * to run virtual machines without emulation or binary translation. + * + * Based on x86 MMU virtualization ideas and sources: + * arch/x86/kvm/mmu.c + * arch/x86/kvm/mmu.h + * arch/x86/kvm/paging_tmpl.h + * + * Copyright 2018 MCST. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * We need the mmu code to access both 32-bit and 64-bit guest ptes, + * so the code in this file is compiled twice, once per pte size. + */ + +/* + * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro + * uses for EPT without A/D paging type. + */ +extern u64 __pure __using_nonexistent_pte_bit(void) + __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT"); + +#if PTTYPE == PTTYPE_E2K + #define pt_element_t pgprotval_t + #define guest_walker guest_walker_e2k + #define FNAME(name) e2k_##name + #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK + #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK + #define PT_MAX_FULL_LEVELS E2K_PT_LEVELS_NUM + #define CMPXCHG cmpxchg64 +#elif PTTYPE == 64 + #define pt_element_t u64 + #define guest_walker guest_walker64 + #define FNAME(name) paging##64_##name + #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK + #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK + #ifdef CONFIG_X86_64 + #define PT_MAX_FULL_LEVELS 4 + #define CMPXCHG cmpxchg + #else + #define CMPXCHG cmpxchg64 + #define PT_MAX_FULL_LEVELS 2 + #endif +#elif PTTYPE == 32 + #define pt_element_t u32 + #define guest_walker guest_walker32 + #define FNAME(name) paging##32_##name + #define PT_MAX_FULL_LEVELS 2 + #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK + #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK + #define CMPXCHG cmpxchg +#elif PTTYPE == PTTYPE_EPT + #define pt_element_t u64 + #define guest_walker guest_walkerEPT + #define FNAME(name) ept_##name + #define PT_GUEST_ACCESSED_MASK 0 + #define PT_GUEST_DIRTY_MASK 0 + #define CMPXCHG cmpxchg64 + #define PT_MAX_FULL_LEVELS 4 +#else + #error Invalid PTTYPE value +#endif + +#define gpte_to_gfn_level_index FNAME(gpte_to_gfn_level_index) +#define gpte_to_gfn_level_address FNAME(gpte_to_gfn_level_address) +#define gpte_to_gfn_ind(_vcpu_, ind, pte, _pts_) \ + gpte_to_gfn_level_index(_vcpu_, ind, pte, \ + &(_pts_)->levels[PT_PAGE_TABLE_LEVEL]) +#define gpte_to_gfn_addr(_vcpu_, addr, pte, _pts_) \ + gpte_to_gfn_level_address(_vcpu_, addr, pte, \ + &(_pts_)->levels[PT_PAGE_TABLE_LEVEL]) + +/* + * The guest_walker structure emulates the behavior of the hardware page + * table walker. + */ +typedef struct guest_walker { + int level; + unsigned max_level; + const pt_struct_t *pt_struct; + const pt_level_t *pt_level; + gfn_t table_gfn[PT_MAX_FULL_LEVELS]; + pt_element_t ptes[PT_MAX_FULL_LEVELS]; + pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; + gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; + pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS]; + bool pte_writable[PT_MAX_FULL_LEVELS]; + unsigned pt_access; + unsigned pte_access; + u64 pte_cui; + gfn_t gfn; + gva_t gva; + kvm_arch_exception_t fault; +} guest_walker_t; + +static gfn_t gpte_to_gfn_level_index(struct kvm_vcpu *vcpu, unsigned index, + pt_element_t gpte, const pt_level_t *pt_level) +{ + gpa_t gpa; + unsigned ptes; + + gpa = kvm_gpte_gfn_to_phys_addr(vcpu, __pgprot(gpte)); + gpa &= get_pt_level_mask(pt_level); + + /* + * common case: 1 page <-> 1 PT entry, + * but there is exclusion: + * e2c+ 4 Mb page <-> 2 PT entries + */ + ptes = get_pt_level_page_size(pt_level) >> get_pt_level_shift(pt_level); + gpa += get_pt_level_size(pt_level) * (index & (ptes - 1)); + return gpa_to_gfn(gpa); +} +static gfn_t gpte_to_gfn_level_address(struct kvm_vcpu *vcpu, gva_t address, + pt_element_t gpte, const pt_level_t *pt_level) +{ + unsigned index; + + index = get_pt_level_addr_index(address, pt_level); + return gpte_to_gfn_level_index(vcpu, index, gpte, pt_level); +} + +static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) +{ + unsigned mask; + + /* dirty bit is not supported, so no need to track it */ + if (!PT_GUEST_DIRTY_MASK) + return; + + BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); + + mask = (unsigned)~ACC_WRITE_MASK; + /* Allow write access to dirty gptes */ + if (gpte & PT_GUEST_DIRTY_MASK) + mask |= ACC_WRITE_MASK; + *access &= mask; +} + +static inline bool FNAME(is_present_gpte)(unsigned long pte) +{ +#if PTTYPE != PTTYPE_EPT + return pte & PT_PRESENT_MASK; +#else + return pte & 7; +#endif +} + +static inline bool FNAME(is_unmapped_gpte)(struct kvm_vcpu *vcpu, + unsigned long pte) +{ + return pte == get_gpte_unmapped_mask(vcpu); +} + +static inline bool FNAME(is_only_valid_gpte)(struct kvm_vcpu *vcpu, + unsigned long pte) +{ + return pte == get_gpte_valid_mask(vcpu); +} + +static inline bool FNAME(is_valid_gpte)(struct kvm_vcpu *vcpu, + unsigned long pte) +{ + return !!(pte & get_gpte_valid_mask(vcpu)); +} + +static inline bool FNAME(is_present_or_valid_gpte)(struct kvm_vcpu *vcpu, + unsigned long pte) +{ + return FNAME(is_present_gpte) || FNAME(is_only_valid_gpte); +} + +static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + pt_element_t __user *ptep_user, unsigned index, + pt_element_t orig_pte, pt_element_t new_pte) +{ + int npages; + pt_element_t ret; + pt_element_t *table; + struct page *page; + + npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); + /* Check if the user is doing something meaningless. */ + if (unlikely(npages != 1)) + return -EFAULT; + + table = kmap_atomic(page); + ret = CMPXCHG(&table[index], orig_pte, new_pte); + kunmap_atomic(table); + + kvm_release_page_dirty(page); + + return (ret != orig_pte); +} + +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, pgprot_t *spte, + u64 gpte) +{ + if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + goto no_present; + + if (!FNAME(is_present_gpte)(gpte)) + goto no_present; + + /* if accessed bit is not supported prefetch non accessed gpte */ + if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK)) + goto no_present; + + return false; + +no_present: + drop_spte(vcpu->kvm, spte); + if (FNAME(is_only_valid_gpte(vcpu, gpte))) + return false; + return true; +} + +/* + * For PTTYPE_EPT, a page table can be executable but not readable + * on supported processors. Therefore, set_spte does not automatically + * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK + * to signify readability since it isn't used in the EPT case + */ +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) +{ + unsigned access; +#if PTTYPE == PTTYPE_EPT + access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | + ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | + ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0); +#else + BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK); + BUILD_BUG_ON(ACC_EXEC_MASK != 1); + access = gpte & (PT_WRITABLE_MASK | PT_PRESENT_MASK); + access |= ((gpte & get_gpte_mode_mask(vcpu)) ? ACC_USER_MASK : 0); + + /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */ + if (gpte & get_gpte_nx_mask(vcpu)) + access ^= ACC_EXEC_MASK; + + /* e2k arch can have protection bit 'priv' instead of 'user', */ + /* so it need invert access permition */ + if (get_gpte_priv_mask(vcpu)) + access ^= ACC_USER_MASK; +#endif + + return access; +} + +static inline u64 FNAME(gpte_cui)(u64 gpte) +{ + return !cpu_has(CPU_FEAT_ISET_V6) ? + _PAGE_INDEX_FROM_CUNIT_V2(gpte) : 0; +} + +static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu, + guest_walker_t *walker, + int write_fault) +{ + unsigned level, index; + pt_element_t pte, orig_pte; + pt_element_t __user *ptep_user; + gfn_t table_gfn; + int ret; + + /* dirty/accessed bits are not supported, so no need to update them */ + if (!PT_GUEST_DIRTY_MASK) + return 0; + + for (level = walker->max_level; level >= walker->level; --level) { + pte = walker->ptes[level - 1]; + orig_pte = pte; + table_gfn = walker->table_gfn[level - 1]; + ptep_user = walker->ptep_user[level - 1]; + index = offset_in_page(ptep_user) / sizeof(pt_element_t); + if (!(pte & PT_GUEST_ACCESSED_MASK)) { + trace_kvm_mmu_set_accessed_bit(table_gfn, index, + sizeof(pte)); + pte |= PT_GUEST_ACCESSED_MASK; + } + if (level == walker->level && write_fault && + !(pte & PT_GUEST_DIRTY_MASK)) { + trace_kvm_mmu_set_dirty_bit(table_gfn, index, + sizeof(pte)); + pte |= PT_GUEST_DIRTY_MASK; + } + if (pte == orig_pte) + continue; + + /* + * If the slot is read-only, simply do not process the accessed + * and dirty bits. This is the correct thing to do if the slot + * is ROM, and page tables in read-as-ROM/write-as-MMIO slots + * are only supported if the accessed and dirty bits are already + * set in the ROM (so that MMIO writes are never needed). + * + * Note that NPT does not allow this at all and faults, since + * it always wants nested page table entries for the guest + * page tables to be writable. And EPT works but will simply + * overwrite the read-only memory to set the accessed and dirty + * bits. + */ + if (unlikely(!walker->pte_writable[level - 1])) + continue; + + ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, + orig_pte, pte); + if (ret) + return ret; + + kvm_vcpu_mark_page_dirty(vcpu, table_gfn); + walker->ptes[level - 1] = pte; + } + return 0; +} + +static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte) +{ + unsigned pkeys = 0; +#if PTTYPE == 64 + pte_t pte = {.pte = gpte}; + + pkeys = pte_flags_pkey(pte_flags(pte)); +#endif + return pkeys; +} + +/* + * Fetch a guest pte for a guest virtual address + */ +static int FNAME(walk_addr_generic)(guest_walker_t *walker, + struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + gva_t addr, u32 access) +{ + int ret; + int level; + const pt_level_t *pt_level; + pt_element_t pte; + u64 pte_cui; + pt_element_t __user *uninitialized_var(ptep_user); + gfn_t table_gfn; + unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey; + gpa_t pte_gpa; + int offset; + const int write_fault = access & PFERR_WRITE_MASK; + const int user_fault = access & PFERR_USER_MASK; + const int fetch_fault = access & PFERR_FETCH_MASK; + const int access_size = PFRES_GET_ACCESS_SIZE(access); + u16 errcode = 0; + gpa_t real_gpa; + gfn_t gfn; + + trace_kvm_mmu_pagetable_walk(addr, access); + DebugSPF("address 0x%lx, fault: write %d user %d fetch %d\n", + addr, write_fault, user_fault, fetch_fault); +retry_walk: + walker->level = mmu->root_level; + walker->pt_struct = kvm_get_vcpu_pt_struct(vcpu); + walker->pt_level = &walker->pt_struct->levels[mmu->root_level]; + walker->fault.error_code_valid = false; + walker->fault.error_code = 0; + pte = kvm_get_space_addr_guest_root(vcpu, addr); + DebugSPF("root pte 0x%lx\n", pte); + +#if PTTYPE == 64 + if (walker->level == PT32E_ROOT_LEVEL) { + pte = mmu->get_vcpu_pdpte(vcpu, (addr >> 30) & 3); + trace_kvm_mmu_paging_element(pte, walker->level); + if (!FNAME(is_present_gpte)(pte)) + goto error; + --walker->level; + --walker->pt_level; + } +#endif + walker->max_level = walker->level; + + ASSERT(is_ss(vcpu) && !(is_long_mode(vcpu) && !is_pae(vcpu))); + + accessed_dirty = PT_GUEST_ACCESSED_MASK; + + pt_access = ACC_ALL; + pte_access = ACC_ALL; + + while (true) { + gfn_t real_gfn; + unsigned long host_addr; + + level = walker->level; + pt_level = walker->pt_level; + +#if PTTYPE == PTTYPE_E2K + /* + * protections PT directories entries and page entries are + * independent for e2k arch, for example ptds always + * privileged and non-executable. ptes do not inherit ptds + * protections aoutomaticaly and can have own protection, + * for example executable and/or user pages + */ + pt_access = pte_access; + pte_access = ACC_ALL; +#else /* x86 */ + pt_access &= pte_access; +#endif /* PTTYPE_E2K */ + + index = get_pt_level_addr_index(addr, walker->pt_level); + table_gfn = gpte_to_gfn_ind(vcpu, 0, pte, walker->pt_struct); + offset = index * sizeof(pt_element_t); + pte_gpa = gfn_to_gpa(table_gfn) + offset; + + DebugSPF("guest PT level #%d addr 0x%lx index 0x%x " + "offset 0x%x gpa of pte 0x%llx\n", + level, addr, index, offset, pte_gpa); + + BUG_ON(level < 1); + walker->table_gfn[level - 1] = table_gfn; + walker->pte_gpa[level - 1] = pte_gpa; + + real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), + PFERR_USER_MASK|PFERR_WRITE_MASK, + &walker->fault); + DebugSPF("table gfn 0x%llx, gpa: table 0x%llx real 0x%llx\n", + table_gfn, gfn_to_gpa(table_gfn), real_gpa); + + /* + * FIXME: This can happen if emulation (for of an INS/OUTS + * instruction) triggers a nested page fault. The exit + * qualification / exit info field will incorrectly have + * "guest page access" as the nested page fault's cause, + * instead of "guest page structure access". To fix this, + * the x86_exception struct should be augmented with enough + * information to fix the exit_qualification or exit_info_1 + * fields. + */ + if (unlikely(arch_is_error_gpa(real_gpa))) + return 0; + + real_gfn = gpa_to_gfn(real_gpa); + + host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn, + &walker->pte_writable[level - 1]); + DebugSPF("real gfn 0x%llx, host addr 0x%lx\n", + real_gfn, host_addr); + if (unlikely(kvm_is_error_hva(host_addr))) + goto error; + + ptep_user = (pt_element_t __user *)((void *)host_addr + offset); + if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) + goto error; + walker->ptep_user[level - 1] = ptep_user; + DebugSPF("level #%d guest pte %px = 0x%lx\n", + level, ptep_user, pte); + + trace_kvm_mmu_paging_element(__pgprot(pte), level); + + if (unlikely(!FNAME(is_present_gpte)(pte))) { + if (FNAME(is_unmapped_gpte(vcpu, pte))) { + errcode |= PFERR_IS_UNMAPPED_MASK; + } else if (FNAME(is_only_valid_gpte(vcpu, pte))) { + errcode |= PFERR_ONLY_VALID_MASK; + } else if (FNAME(is_valid_gpte(vcpu, pte))) { + errcode |= PFERR_ONLY_VALID_MASK; + } + goto error; + } + + if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { + errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK; + goto error; + } + DebugSPF("guest pte is present and has not reserved bits\n"); + + accessed_dirty &= pte; + + pte_cui = FNAME(gpte_cui)(pte); +#if PTTYPE == PTTYPE_E2K + /* protections PT directories entries and page entries are */ + /* independent for e2k arch, see full comment above */ + pte_access &= FNAME(gpte_access)(vcpu, pte); +#else /* x86 */ + pte_access &= pt_access & FNAME(gpte_access)(vcpu, pte); +#endif /* PTTYPE_E2K */ + + walker->ptes[level - 1] = pte; + + if (is_last_gpte(mmu, level, pte)) + break; + + --walker->level; + --walker->pt_level; + } + + pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); + DebugSPF("pte: access 0x%x, pkey 0x%x, pt access 0x%x, errcode 0x%x\n", + pte_access, pte_pkey, pt_access, errcode); + errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access); + if (unlikely(errcode)) + goto error; + + if (!(access & (PFERR_WRITE_MASK | PFERR_WAIT_LOCK_MASK | + PFERR_INSTR_FAULT_MASK | PFERR_INSTR_PROT_MASK)) && + !(access & PFERR_FAPB_MASK) && + !(pte_access & ACC_WRITE_MASK)) { + /* + * Try read from write protected page (by gpte). + * Probably there is(are) before some write(s) to this page + * injected as page fault(s) for guest and it need + * to pre-handle this(these) faulted write(s). + * Such loads should be injected for guest too + */ + if (check_injected_stores_to_addr(vcpu, addr, access_size)) { + /* there is(are) such store(s) to same load address */ + errcode |= PFERR_READ_PROT_MASK; + DebugRPROT("found read addr 0x%lx croses previous " + "store to same page\n", + addr); + goto error; + } + } + + gfn = gpte_to_gfn_level_address(vcpu, addr, pte, walker->pt_level); + gfn += (addr & get_pt_level_offset(walker->pt_level)) >> PAGE_SHIFT; + + real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, + &walker->fault); + if (arch_is_error_gpa(real_gpa)) + return 0; + DebugSPF("level #%d gfn from guest pte 0x%llx, gpa 0x%llx " + "real gpa 0x%llx\n", + level, gfn, gfn_to_gpa(gfn), real_gpa); + + walker->gfn = real_gpa >> PAGE_SHIFT; + + if (!write_fault) { + FNAME(protect_clean_gpte)(&pte_access, pte); + DebugSPF("not write fault, so clean guest pte, " + "new pte access 0x%x\n", + pte_access); + } else { + /* + * On a write fault, fold the dirty bit into accessed_dirty. + * For modes without A/D bits support accessed_dirty will be + * always clear. + */ + if (!(pte & PT_GUEST_DIRTY_MASK)) + accessed_dirty &= ~PT_GUEST_ACCESSED_MASK; + DebugSPF("on write fault, accessed dirty 0x%x\n", + accessed_dirty); + } + + if (unlikely(!accessed_dirty)) { + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, + write_fault); + DebugSPF("not accessed dirty, update dirty bits returned %d\n", + ret); + if (unlikely(ret < 0)) + goto error; + else if (ret) + goto retry_walk; + } + + walker->pt_access = pt_access; + walker->pte_access = pte_access; + walker->pte_cui = pte_cui; + pgprintk("%s: pte %llx pte_access %x pt_access %x\n", + __func__, (u64)pte, pte_access, pt_access); + return 1; + +error: + errcode |= write_fault | user_fault; + if (fetch_fault && (mmu->nx || is_smep(vcpu))) + errcode |= PFERR_FETCH_MASK; + + walker->fault.error_code_valid = true; + walker->fault.error_code = errcode; + +#if PTTYPE == PTTYPE_EPT + /* + * Use PFERR_RSVD_MASK in error_code to to tell if EPT + * misconfiguration requires to be injected. The detection is + * done by is_rsvd_bits_set() above. + * + * We set up the value of exit_qualification to inject: + * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation + * [5:3] - Calculated by the page walk of the guest EPT page tables + * [7:8] - Derived from [7:8] of real exit_qualification + * + * The other bits are set to 0. + */ + if (!(errcode & PFERR_RSVD_MASK)) { + vcpu->arch.exit_qualification &= 0x187; + vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3; + } +#endif + walker->fault.address = addr; + + trace_kvm_mmu_walker_error(walker->fault.error_code); + DebugSPF("returns error code 0x%x, for addr 0x%lx\n", + errcode, addr); + return 0; +} + +static int FNAME(walk_addr)(guest_walker_t *walker, + struct kvm_vcpu *vcpu, gva_t addr, u32 access) +{ + return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, + access); +} + +static bool +FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + pgprot_t *spte, pt_element_t gpte, bool no_dirty_log) +{ + unsigned pte_access; + gfn_t gfn; + kvm_pfn_t pfn; + bool gfn_only_valid = false; + u64 pte_cui; + int ret; + + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) + return false; + + pgprintk("%s: gpte %llx spte %px\n", __func__, (u64)gpte, spte); + + if (FNAME(is_only_valid_gpte(vcpu, gpte))) { + gfn_only_valid = true; + gfn = 0; + pfn = 0; + pte_access = 0; + goto write_spte; + } + + gfn = gpte_to_gfn_ind(vcpu, 0, gpte, kvm_get_vcpu_pt_struct(vcpu)); + pte_cui = FNAME(gpte_cui)(gpte); +#if PTTYPE == PTTYPE_E2K + pte_access = FNAME(gpte_access)(vcpu, gpte); + FNAME(protect_clean_gpte)(&pte_access, gpte); + ret = try_atomic_pf(vcpu, gfn, &pfn, + no_dirty_log && (pte_access & ACC_WRITE_MASK)); +#else /* native e2k */ + pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + FNAME(protect_clean_gpte)(&pte_access, gpte); + pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, + no_dirty_log && (pte_access & ACC_WRITE_MASK)); +#endif /* CONFIG_X86_HW_VIRTUALIZATION */ + if (likely(ret == TRY_PF_NO_ERR)) { + /* valid guest gfn rmapped to pfn */ + } else if (ret == TRY_PF_ONLY_VALID_ERR) { + /* gfn with only valid flag */ + gfn_only_valid = true; + } else if (ret == TRY_PF_MMIO_ERR) { + /* gfn is from MMIO space, but not registered on host */ + DebugSYNCV("gfn 0x%llx is from MMIO space, but not " + "registered on host\n", + gfn); + } else if (ret < 0) { + pr_err("%s(): gfn 0x%llx is inavlid, error %d\n", + __func__, gfn, ret); + return false; + } else { + KVM_BUG_ON(true); + } + +write_spte: + /* + * we call mmu_set_spte() with host_writable = true because + * pte_prefetch_gfn_to_pfn always gets a writable pfn. + */ + mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, + true, true, gfn_only_valid, pte_cui); + + return true; +} + +static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + pgprot_t *spte, const void *pte) +{ + pt_element_t gpte = *(const pt_element_t *)pte; + + FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); +} + +static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, + guest_walker_t *gw, int level) +{ + pt_element_t curr_pte; + gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; + u64 mask; + int r, index; + + if (level == PT_PAGE_TABLE_LEVEL) { + mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1; + base_gpa = pte_gpa & ~mask; + index = (pte_gpa - base_gpa) / sizeof(pt_element_t); + + r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa, + gw->prefetch_ptes, sizeof(gw->prefetch_ptes)); + curr_pte = gw->prefetch_ptes[index]; + } else + r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, + &curr_pte, sizeof(curr_pte)); + DebugSPF("level #%d gpte gpa 0x%llx pte cur 0x%lx old 0x%lx\n", + level, pte_gpa, curr_pte, gw->ptes[level - 1]); + + return r || curr_pte != gw->ptes[level - 1]; +} + +static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, guest_walker_t *gw, + pgprot_t *sptep) +{ + struct kvm_mmu_page *sp; + pt_element_t *gptep = gw->prefetch_ptes; + pgprot_t *spte; + int i; + + sp = page_header(__pa(sptep)); + + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return; + + if (sp->role.direct) + return __direct_pte_prefetch(vcpu, sp, sptep); + + i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); + spte = sp->spt + i; + + for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { + if (spte == sptep) + continue; + + if (is_shadow_present_pte(vcpu->kvm, *spte)) + continue; + + if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) + break; + } +} + +/* + * Walk a shadow PT levels up to the all present levels in the paging hierarchy. + */ +static int e2k_walk_shadow_pts(struct kvm_vcpu *vcpu, gva_t addr, + kvm_shadow_trans_t *st, hpa_t spt_root) +{ + kvm_shadow_walk_iterator_t it; + int top_level; + top_level = vcpu->arch.mmu.root_level; + + KVM_BUG_ON(!VALID_PAGE(kvm_get_space_addr_spt_root(vcpu, addr))); + + DebugWSPT("started for guest addr 0x%lx\n", addr); + + st->last_level = E2K_PT_LEVELS_NUM + 1; + st->addr = addr; + + for ((!IS_E2K_INVALID_PAGE(spt_root)) ? + shadow_pt_walk_init(&it, vcpu, spt_root, addr) + : + shadow_walk_init(&it, vcpu, addr); + shadow_walk_okay(&it); + shadow_walk_next(&it)) { + st->pt_entries[it.level].sptep = it.sptep; + st->pt_entries[it.level].spte = *it.sptep; + DebugWSPT("shadow PT level #%d addr 0x%llx index 0x%x " + "sptep %px\n", + it.level, it.shadow_addr, it.index, it.sptep); + if (likely(is_shadow_present_pte(vcpu->kvm, *it.sptep))) { + st->last_level = it.level; + continue; + } else if (is_shadow_valid_pte(vcpu->kvm, *it.sptep)) { + st->last_level = it.level; + break; + } + break; + } + return it.level; +} + +/* + * Fetch a shadow PT levels up to the specified level in the paging hierarchy. + */ +static int FNAME(fetch_shadow_pts)(struct kvm_vcpu *vcpu, gva_t addr, + kvm_shadow_walk_iterator_t *it, + gmm_struct_t *gmm, hpa_t spt_root, + int down_to_level, guest_walker_t *gw) +{ + struct kvm_mmu_page *sp = NULL; + int top_level; + + top_level = vcpu->arch.mmu.root_level; + if (top_level == PT32E_ROOT_LEVEL) + top_level = PT32_ROOT_LEVEL; + /* + * Verify that the top-level gpte is still there. Since the page + * is a root page, it is either write protected (and cannot be + * changed from now on) or it is invalid (in which case, we don't + * really care if it changes underneath us after this point). + */ + if (FNAME(gpte_changed)(vcpu, gw, top_level)) + goto out_gpte_changed; + + if (!VALID_PAGE(kvm_get_space_addr_spt_root(vcpu, addr))) + goto out_gpte_changed; + + DebugSPF("started for guest addr 0x%lx gfn 0x%llx down to level %d\n", + addr, gw->gfn, down_to_level); + + for ((!IS_E2K_INVALID_PAGE(spt_root)) ? + shadow_pt_walk_init(it, vcpu, spt_root, addr) + : + shadow_walk_init(it, vcpu, addr); + shadow_walk_okay(it) && it->level > down_to_level; + shadow_walk_next(it)) { + gfn_t table_gfn; + + DebugSPF("shadow PT level #%d addr 0x%llx index 0x%x " + "sptep %px\n", + it->level, it->shadow_addr, it->index, it->sptep); + clear_sp_write_flooding_count(it->sptep); + drop_large_spte(vcpu, it->sptep); + + sp = NULL; + if (!is_shadow_present_pte(vcpu->kvm, *it->sptep)) { + table_gfn = gw->table_gfn[it->level - 2]; + sp = kvm_mmu_get_page(vcpu, table_gfn, addr, + it->level - 1, false, gw->pt_access, + is_shadow_valid_pte(vcpu->kvm, *it->sptep)); + DebugSPF("allocated shadow page at %px, " + "guest table gfn 0x%llx\n", + sp, table_gfn); + } + + /* + * Verify that the gpte in the page we've just write + * protected is still there. + */ + if (FNAME(gpte_changed)(vcpu, gw, it->level - 1)) + goto out_gpte_changed; + + if (sp) { + link_shadow_page(vcpu, gmm, it->sptep, sp); + DebugSPF("level #%d: linked shadow pte %px == 0x%lx\n", + it->level, it->sptep, pgprot_val(*it->sptep)); + } + } + return 0; + +out_gpte_changed: + return 1; +} + +/* + * Fetch a shadow pte for a specific level in the paging hierarchy. + * If the guest tries to write a write-protected page, we need to + * emulate this operation, return 1 to indicate this case. + */ +static pf_res_t FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, + guest_walker_t *gw, hpa_t spt_root, + int error_code, int hlevel, + kvm_pfn_t pfn, bool map_writable, bool prefault, + bool only_validate, bool not_prefetch) +{ + struct kvm_mmu_page *sp = NULL; + struct kvm_shadow_walk_iterator it; + unsigned direct_access; + bool write_fault = !!(error_code & PFERR_WRITE_MASK); + gmm_struct_t *gmm; + pf_res_t emulate; + + DebugTOVM("started for guest addr 0x%lx pfn 0x%llx level %d\n", + addr, pfn, hlevel); + + gmm = kvm_get_page_fault_gmm(vcpu, error_code); + + if (FNAME(fetch_shadow_pts)(vcpu, addr, &it, gmm, spt_root, + gw->level, gw)) + goto out_gpte_changed; + + direct_access = gw->pte_access; + + for (; + shadow_walk_okay(&it) && it.level > hlevel; + shadow_walk_next(&it)) { + gfn_t direct_gfn; + + DebugTOVM("shadow PT level #%d addr 0x%llx index 0x%x " + "sptep %px\n", + it.level, it.shadow_addr, it.index, it.sptep); + clear_sp_write_flooding_count(it.sptep); + validate_direct_spte(vcpu, it.sptep, direct_access); + + drop_large_spte(vcpu, it.sptep); + + DebugTOVM("shadow spte %px == 0x%lx\n", + it.sptep, pgprot_val(*it.sptep)); + if (is_shadow_present_pte(vcpu->kvm, *it.sptep)) + continue; + + direct_gfn = gw->gfn & + ~(KVM_PT_LEVEL_PAGES_PER_HPAGE(it.pt_level) - 1); + + sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, + true, direct_access, + is_shadow_valid_pte(vcpu->kvm, *it.sptep)); + link_shadow_page(vcpu, gmm, it.sptep, sp); + DebugTOVM("allocated shadow page at %px for direct " + "gfn 0x%llx, direct access %s\n", + sp, direct_gfn, (direct_access) ? "true" : "false"); + DebugTOVM("level #%d: linked shadow pte %px == 0x%lx\n", + it.level, it.sptep, pgprot_val(*it.sptep)); + } + + clear_sp_write_flooding_count(it.sptep); + emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, + it.level, gw->gfn, pfn, prefault, map_writable, + only_validate, gw->pte_cui); + if (!not_prefetch) + FNAME(pte_prefetch)(vcpu, gw, it.sptep); + DebugTOVM("set shadow spte %px == 0x%lx, emulate %d\n", + it.sptep, pgprot_val(*it.sptep), emulate); + + return emulate; + +out_gpte_changed: + if (!is_mmio_space_pfn(pfn)) + kvm_release_pfn_clean(pfn); + return PFRES_RETRY; +} + + /* + * To see whether the mapped gfn can write its page table in the current + * mapping. + * + * It is the helper function of FNAME(page_fault). When guest uses large page + * size to map the writable gfn which is used as current page table, we should + * force kvm to use small page size to map it because new shadow page will be + * created when kvm establishes shadow page table that stop kvm using large + * page size. Do it early can avoid unnecessary #PF and emulation. + * + * @write_fault_to_shadow_pgtable will return true if the fault gfn is + * currently used as its page table. + * + * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok + * since the PDPT is always shadowed, that means, we can not use large page + * size to map the gfn which is used as PDPT. + */ +static bool +FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, + guest_walker_t *walker, int user_fault, + bool *write_fault_to_shadow_pgtable) +{ + int level; + gfn_t mask = ~(KVM_PT_LEVEL_PAGES_PER_HPAGE(walker->pt_level) - 1); + bool self_changed = false; + + if (!(walker->pte_access & ACC_WRITE_MASK || + (!is_write_protection(vcpu) && !user_fault))) + return false; + + for (level = walker->level; level <= walker->max_level; level++) { + gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; + + self_changed |= !(gfn & mask); + *write_fault_to_shadow_pgtable |= !gfn; + } + + return self_changed; +} + +/* + * Page fault handler. There are several causes for a page fault: + * - there is no shadow pte for the guest pte + * - write access through a shadow pte marked read only so that we can set + * the dirty bit + * - write access to a shadow pte marked read only so we can update the page + * dirty bitmap, when userspace requests it + * - mmio access; in this case we will never install a present shadow pte + * - normal guest page fault due to the guest pte marked not present, not + * writable, or not executable + * + * Returns: 1 if we need to emulate the instruction, 0 otherwise, or + * a negative value on error. + */ +static pf_res_t FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, + u32 error_code, bool prefault, + gfn_t *gfnp, kvm_pfn_t *pfnp) +{ + int write_fault = error_code & PFERR_WRITE_MASK; + int user_fault = error_code & PFERR_USER_MASK; + guest_walker_t walker; + pf_res_t r; + int ret; + kvm_pfn_t pfn; + int level = PT_PAGE_TABLE_LEVEL; + bool force_pt_level = false; + bool map_writable, is_self_change_mapping; +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + intc_mu_state_t *mu_state = get_intc_mu_state(vcpu); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); + + ret = mmu_topup_memory_caches(vcpu); + if (ret) + return PFRES_ERR; + + /* + * If PFEC.RSVD is set, this is a shadow page fault. + * The bit needs to be cleared before walking guest page tables. + */ + error_code &= ~PFERR_RSVD_MASK; + + /* + * Look up the guest pte for the faulting address. + */ + ret = FNAME(walk_addr)(&walker, vcpu, addr, error_code); + + /* + * The page is not mapped by the guest. Let the guest handle it. + */ + if (!ret) { + pgprintk("%s: guest page fault\n", __func__); + if (!prefault) + inject_page_fault(vcpu, &walker.fault); + + return PFRES_INJECTED; + } + + if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { + shadow_page_table_clear_flood(vcpu, addr); + DebugSPF("page fault can not be fixed by handler: guest is " + "writing the page which is write tracked\n"); + if (pfnp == NULL) + return PFRES_WRITE_TRACK; + is_self_change_mapping = true; + } else { + is_self_change_mapping = false; + } + + vcpu->arch.write_fault_to_shadow_pgtable = is_self_change_mapping; + + is_self_change_mapping |= FNAME(is_self_change_mapping)(vcpu, + &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); + DebugSPF("is_self_change_mapping %s\n", + (is_self_change_mapping) ? "true" : "false"); + + if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) { + level = mapping_level(vcpu, walker.gfn, &force_pt_level); + DebugSPF("mapping level %d force level %d\n", + level, force_pt_level); + if (likely(!force_pt_level)) { + const pt_level_t *pt_level; + + level = min(walker.level, level); + pt_level = &walker.pt_struct->levels[level]; + walker.gfn = walker.gfn & + ~(KVM_PT_LEVEL_PAGES_PER_HPAGE(pt_level) - 1); + DebugSPF("level is now %d gfn 0x%llx\n", + level, walker.gfn); + } + } else { + force_pt_level = true; + } + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + mu_state->notifier_seq = vcpu->kvm->mmu_notifier_seq; + smp_rmb(); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, + &map_writable)) + return PFRES_NO_ERR; + DebugSPF("try_async_pf returned pfn 0x%llx, writable %d\n", + pfn, map_writable); + + if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, + walker.pte_access, &r)) { + if (pfnp != NULL) + *pfnp = pfn; + DebugSPF("returns %d and abnormal pfn 0x%llx\n", + r, pfn); + return r; + } + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (r == PFRES_TRY_MMIO) { + mu_state->may_be_retried = false; + } +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + /* + * Do not change pte_access if the pfn is a mmio page, otherwise + * we will cache the incorrect access into mmio spte. + */ + if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && + !is_write_protection(vcpu) && !user_fault && + !is_noslot_pfn(pfn)) { + walker.pte_access |= ACC_WRITE_MASK; + walker.pte_access &= ~ACC_USER_MASK; + + /* + * If we converted a user page to a kernel page, + * so that the kernel can write to it when cr0.wp=0, + * then we should prevent the kernel from executing it + * if SMEP is enabled. + */ + if (is_smep(vcpu)) + walker.pte_access &= ~ACC_EXEC_MASK; + DebugSPF("updated pte_access 0x%x\n", walker.pte_access); + } + + spin_lock(&vcpu->kvm->mmu_lock); +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (!mu_state->ignore_notifier && r != PFRES_TRY_MMIO && + mmu_notifier_retry(vcpu->kvm, mu_state->notifier_seq)) + goto out_unlock; +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); + make_mmu_pages_available(vcpu); + if (!force_pt_level) + transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); + r = FNAME(fetch)(vcpu, addr, &walker, E2K_INVALID_PAGE, error_code, + level, pfn, map_writable, prefault, false, false); + ++vcpu->stat.pf_fixed; + kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); + spin_unlock(&vcpu->kvm->mmu_lock); + + if (gfnp != NULL) + *gfnp = walker.gfn; + if (pfnp != NULL) + *pfnp = pfn; + DebugSPF("returns %d, pfn 0x%llx\n", r, pfn); + return r; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + KVM_BUG_ON(!mu_state->may_be_retried); + return PFRES_RETRY; +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ +} + +/* + * Return next gva pointing to next pte on pt_level + */ +static gva_t pt_level_next_gva(gva_t gva, gva_t end_gva, + const pt_level_t *pt_level) +{ + gva_t boundary = (gva + pt_level->page_size) & pt_level->page_mask; + + return (boundary - 1 < end_gva - 1) ? boundary : end_gva; +} + +static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) +{ + int offset = 0; + + WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); + + if (PTTYPE == 32) + offset = sp->role.quadrant << PT64_LEVEL_BITS; + + return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); +} + +static void FNAME(sync_gva)(struct kvm_vcpu *vcpu, gva_t gva) +{ + struct kvm_shadow_walk_iterator iterator; + struct kvm_mmu_page *sp; + int level; + pgprot_t *sptep; + + vcpu_clear_mmio_info(vcpu, gva); + + /* + * No need to check return value here, rmap_can_add() can + * help us to skip pte prefetch later. + */ + mmu_topup_memory_caches(vcpu); + + if (!VALID_PAGE(kvm_get_space_addr_spt_root(vcpu, gva))) { + WARN_ON(1); + return; + } + +retry_sync_gva: + + spin_lock(&vcpu->kvm->mmu_lock); + for_each_shadow_entry(vcpu, gva, iterator) { + level = iterator.level; + sptep = iterator.sptep; + + sp = page_header(__pa(sptep)); + if (is_last_spte(*sptep, level)) { + pt_element_t gpte; + gpa_t pte_gpa; + + if (!sp->unsync) + break; + + pte_gpa = FNAME(get_level1_sp_gpa)(sp); + pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + + if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) + kvm_flush_remote_tlbs(vcpu->kvm); + + if (!rmap_can_add(vcpu)) + break; + + if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) { + + spin_unlock(&vcpu->kvm->mmu_lock); + + if (kvm_vcpu_read_guest(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) + return; + else + goto retry_sync_gva; + } + + FNAME(update_pte)(vcpu, sp, sptep, &gpte); + } + + if (!is_shadow_present_pte(vcpu->kvm, *sptep) || + !sp->unsync_children) + break; + } + spin_unlock(&vcpu->kvm->mmu_lock); +} + +static bool FNAME(sync_gva_pte_range)(struct kvm_vcpu *vcpu, + struct kvm_shadow_walk_iterator *spt_walker, + gva_t gva_start, gva_t gva_end, + gva_t *retry_gva) +{ + struct kvm_mmu_page *sp; + int level; + const pt_level_t *spt_pt_level; + unsigned int pte_index; + pgprot_t *sptep, *spt_table_hva; + gva_t gva, gva_next; + + KVM_BUG_ON(gva_start > gva_end); + + /* Get descriptors of curr level of shadow page table */ + level = spt_walker->level; + spt_pt_level = spt_walker->pt_level; + KVM_BUG_ON(level < PT_PAGE_TABLE_LEVEL); + + /* Get index in curr level of shadow page table */ + pte_index = get_pt_level_addr_index(gva_start, spt_pt_level); + + /* hva of shadow pt entry in curr level */ + spt_table_hva = (pgprot_t *) __va(spt_walker->shadow_addr); + sptep = spt_table_hva + pte_index; + + sp = page_header(__pa(sptep)); + + gva = gva_start; + do { + gva_next = pt_level_next_gva(gva, gva_end, spt_pt_level); + + if (is_last_spte(*sptep, level)) { + /* + * This is last level pte (1-st level or large page) + */ + pt_element_t gpte; + gpa_t pte_gpa; + + vcpu_clear_mmio_info(vcpu, gva); + + if (!sp->unsync) + goto next_pte; + + pte_gpa = FNAME(get_level1_sp_gpa)(sp); + pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + + mmu_page_zap_pte(vcpu->kvm, sp, sptep); + + /* Read pte from guest table */ + if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) { + + spin_unlock(&vcpu->kvm->mmu_lock); + + if (kvm_vcpu_read_guest(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) + *retry_gva = gva_next; + else + *retry_gva = gva; + + return false; + } + + FNAME(update_pte)(vcpu, sp, sptep, &gpte); + } else { + bool ret; + + if (!is_shadow_present_pte(vcpu->kvm, *sptep) || + !sp->unsync_children) + goto next_pte; + + /* hpa of lower level of shadow page table */ + spt_walker->shadow_addr = + kvm_pte_pfn_to_phys_addr(*sptep, + spt_walker->pt_struct); + + /* Move iterators to lower level of page tables */ + spt_walker->level--; + spt_walker->pt_level--; + + ret = FNAME(sync_gva_pte_range)(vcpu, spt_walker, + gva, gva_next, retry_gva); + + /* + * Move iterators back to upper level + * of page tables + */ + spt_walker->level++; + spt_walker->pt_level++; + + if (!ret) + return ret; + } + +next_pte: + /* Go to next pt entry on curr level */ + sptep++; + gva = gva_next; + } while (gva != gva_end); + + return true; +} + +static void FNAME(sync_gva_range)(struct kvm_vcpu *vcpu, + gva_t start, gva_t end, + bool flush_tlb) +{ + struct kvm_shadow_walk_iterator spt_walker; + hpa_t spt_root; + e2k_addr_t vptb_start, vptb_size, vptb_mask, vptb_end; + int top_level; + const pt_struct_t *vcpu_pt = kvm_get_vcpu_pt_struct(vcpu); + gva_t retry_gva; + bool sync_range1, sync_range2; + + /* Get hpa of shadow page table root */ + spt_root = kvm_get_space_addr_spt_root(vcpu, start); + if (!VALID_PAGE(spt_root)) { + WARN_ON(1); + return; + } + + /* Get top level number for spt */ + top_level = vcpu->arch.mmu.root_level; + if (top_level == PT32E_ROOT_LEVEL) + top_level = PT32_ROOT_LEVEL; + + /* Get gva range of page table self-mapping */ + if (is_sep_virt_spaces(vcpu)) + vptb_start = vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + else + vptb_start = vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + + vptb_size = get_pt_level_size(&vcpu_pt->levels[top_level]); + vptb_mask = get_pt_level_mask(&vcpu_pt->levels[top_level]); + vptb_start &= vptb_mask; + vptb_end = vptb_start + vptb_size - 1; + + /* + * Use simplified function sync_gva to flush single address + * which does not hit into vptb range + */ + if ((start == end) && (start < vptb_start || start >= vptb_end)) { + FNAME(sync_gva)(vcpu, start); + goto flush_cpu_tlb; + } + +retry_sync_gva_range: + + sync_range1 = true; + sync_range2 = true; + + spin_lock(&vcpu->kvm->mmu_lock); + if (start < vptb_start && end < vptb_start || + start >= vptb_end && end >= vptb_end) { + /* flushed gva range doesn't overlap vptb range */ + shadow_pt_walk_init(&spt_walker, vcpu, spt_root, start); + sync_range1 = FNAME(sync_gva_pte_range)(vcpu, &spt_walker, + start, end, &retry_gva); + } else if (start < vptb_start && end >= vptb_start && + end < vptb_end) { + /* end part of flushed gva range overlaps vptb range */ + shadow_pt_walk_init(&spt_walker, vcpu, spt_root, start); + sync_range1 = FNAME(sync_gva_pte_range)(vcpu, &spt_walker, + start, vptb_start, &retry_gva); + } else if (end > vptb_end && start >= vptb_start && + start < vptb_end) { + /* start part of flushed gva range overlaps vptb range */ + shadow_pt_walk_init(&spt_walker, vcpu, spt_root, vptb_end); + sync_range1 = FNAME(sync_gva_pte_range)(vcpu, &spt_walker, + vptb_end, end, &retry_gva); + } else if (start < vptb_start && end >= vptb_end) { + /* flushed gva range contains vptb range */ + shadow_pt_walk_init(&spt_walker, vcpu, spt_root, start); + sync_range1 = FNAME(sync_gva_pte_range)(vcpu, &spt_walker, + start, vptb_start, &retry_gva); + + if (sync_range1) { + shadow_pt_walk_init(&spt_walker, vcpu, spt_root, + vptb_end); + sync_range2 = FNAME(sync_gva_pte_range)(vcpu, + &spt_walker, vptb_end, + end, &retry_gva); + } + } + /* Do nothing if vptb range contains flushed gva range */ + + if (!sync_range1 || !sync_range2) { + start = retry_gva; + goto retry_sync_gva_range; + } + + spin_unlock(&vcpu->kvm->mmu_lock); + + /* + * TODO: TLB flush here may be partial similarly to __flush_tlb_* + * in host kernel. + */ +flush_cpu_tlb: + if (flush_tlb) { + kvm_vcpu_flush_tlb(vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + } +} + +gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, + kvm_arch_exception_t *exception) +{ + guest_walker_t walker; + gpa_t gpa = UNMAPPED_GVA; + int r; + + r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); + + if (r) { + gpa = gfn_to_gpa(walker.gfn); + gpa |= vaddr & ~PAGE_MASK; + } else if (exception) { + *exception = walker.fault; + } + return gpa; +} + +/* + * Using the cached information from sp->gfns is safe because: + * - The spte has a reference to the struct page, so the pfn for a given gfn + * can't change unless all sptes pointing to it are nuked first. + * + * Note: + * We should flush all tlbs if spte is dropped even though guest is + * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page + * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't + * used by guest then tlbs are not flushed, so guest is allowed to access the + * freed pages. + * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. + */ +static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + int i, nr_present = 0; + bool host_writable; + gpa_t first_pte_gpa; + u64 pte_cui; + + /* direct kvm_mmu_page can not be unsync. */ + BUG_ON(sp->role.direct); + + first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); + + DebugSPF("sp %px level #%d first_pte_gpa 0x%llx\n", + sp, sp->role.level, first_pte_gpa); + for (i = 0; i < PT64_ENT_PER_PAGE; i++) { + unsigned pte_access; + pt_element_t gpte; + gpa_t pte_gpa; + gfn_t gfn; + + if (!pgprot_val(sp->spt[i])) + continue; + + pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); + + if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) + return 0; + + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { + /* + * Update spte before increasing tlbs_dirty to make + * sure no tlb flush is lost after spte is zapped; see + * the comments in kvm_flush_remote_tlbs(). + */ + smp_wmb(); + vcpu->kvm->tlbs_dirty++; + continue; + } + if (FNAME(is_only_valid_gpte(vcpu, gpte))) { + set_spte(vcpu, &sp->spt[i], 0, + PT_PAGE_TABLE_LEVEL, 0, 0, + false, false, false, + true /* only validate */, 0); + nr_present++; + continue; + } + + gfn = gpte_to_gfn_ind(vcpu, 0, gpte, + kvm_get_vcpu_pt_struct(vcpu)); + pte_cui = FNAME(gpte_cui)(gpte); +#if PTTYPE == PTTYPE_E2K + /* protections PT directories entries and page entries are */ + /* independent for e2k arch, see full comment above */ + pte_access = FNAME(gpte_access)(vcpu, gpte); +#else /* x86 PTs */ + pte_access = sp->role.access; + pte_access &= FNAME(gpte_access)(vcpu, gpte); +#endif /* PTTYPE_E2K */ + FNAME(protect_clean_gpte)(&pte_access, gpte); + DebugSPF("pte_gpa 0x%llx == 0x%lx, gfn 0x%llx\n", + pte_gpa, gpte, gfn); + + if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, + &nr_present)) + continue; + + if (gfn != sp->gfns[i]) { + drop_spte(vcpu->kvm, &sp->spt[i]); + /* + * The same as above where we are doing + * prefetch_invalid_gpte(). + */ + smp_wmb(); + vcpu->kvm->tlbs_dirty++; + continue; + } + + nr_present++; + + host_writable = is_spte_host_writable_mask(vcpu->kvm, + sp->spt[i]); + + set_spte(vcpu, &sp->spt[i], pte_access, + PT_PAGE_TABLE_LEVEL, gfn, + spte_to_pfn(vcpu->kvm, sp->spt[i]), true, false, + host_writable, false, pte_cui); + DebugSPF("shadow spte %px == 0x%lx, gfn 0x%llx, pfn 0x%llx\n", + &sp->spt[i], pgprot_val(sp->spt[i]), gfn, + spte_to_pfn(vcpu->kvm, sp->spt[i])); + } + + return nr_present; +} + +/* + * Initialize guest page table iterator + */ +static void guest_pt_walk_init(guest_walker_t *guest_walker, + struct kvm_vcpu *vcpu, + pt_element_t guest_root) +{ + guest_walker->pt_struct = kvm_get_vcpu_pt_struct(vcpu); + guest_walker->level = vcpu->arch.mmu.root_level; + guest_walker->pt_level = + &guest_walker->pt_struct->levels[guest_walker->level]; + guest_walker->gfn = gpte_to_gfn_ind(vcpu, 0, guest_root, + guest_walker->pt_struct); + guest_walker->table_gfn[guest_walker->level] = guest_walker->gfn; + guest_walker->pt_access = ACC_ALL; + guest_walker->pte_access = ACC_ALL; + guest_walker->max_level = guest_walker->level; +} + +/* + * Allocate new spte + */ +static pf_res_t allocate_shadow_level(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + gfn_t table_gfn, + gva_t gva, int level, + unsigned int pt_access, + pgprot_t *spt_pte_hva, + int is_direct) +{ + struct kvm_mmu_page *sp = NULL; + + clear_sp_write_flooding_count(spt_pte_hva); + /* If this spte is the large page, then unmap it */ + drop_large_spte(vcpu, spt_pte_hva); + + /* If spte is not peresent, allocate it */ + if (!is_shadow_present_pte(vcpu->kvm, *spt_pte_hva)) { + sp = kvm_mmu_get_page(vcpu, table_gfn, gva, + level, is_direct, pt_access, + is_shadow_valid_pte(vcpu->kvm, *spt_pte_hva)); + if (!sp) { + DebugSYNC("Allocation of shadow page for spte 0x%lx" + " failed\n", spt_pte_hva); + return PFRES_ERR; + } + + link_shadow_page(vcpu, gmm, spt_pte_hva, sp); + DebugSYNC("allocated new shadow page with hpa 0x%llx, guest" + " table gfn 0x%llx, on level #%d, linked to spte" + " with hpa 0x%lx, hva 0x%lx on level #%d\n", + pgprot_val(*spt_pte_hva) & _PAGE_PFN_V2, table_gfn, + level, __pa(spt_pte_hva), spt_pte_hva, level + 1); + } else { + DebugSYNC("present shadow page with hpa 0x%llx, guest table" + " gfn 0x%llx, on level #%d, linked to spte with" + " hpa 0x%lx, hva 0x%lx on level #%d\n", + pgprot_val(*spt_pte_hva) & _PAGE_PFN_V2, table_gfn, + level, __pa(spt_pte_hva), spt_pte_hva, level + 1); + } + + return PFRES_NO_ERR; +} + +/* + * Try to convert gfn to pfn. If pfn is allocated on host, return valid pfn. + * If pfn is not allocated on host, do not fault and wait pfn allocation, + * set *valid_only = true instead. + */ +static void gfn_atomic_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, + unsigned int pte_access, kvm_pfn_t *pfn, + bool *valid_only) +{ + try_pf_err_t res; + *valid_only = false; + + DebugSYNC("gva 0x%lx -> gfn 0x%llx\n", gva, gfn); + + /* Get pfn for given gfn */ + res = try_atomic_pf(vcpu, gfn, pfn, true); + if (res == TRY_PF_ONLY_VALID_ERR) { + /* + * gfn is valid (same as hva of gfn on host), + * but pfn for gva has not yet been allocated + */ + *valid_only = true; + DebugSYNC("gfn 0x%llx is valid but pfn is not yet allocated" + " on host\n", gfn); + } else if (res == TRY_PF_MMIO_ERR) { + /* + * gfn is from MMIO space, but not + * registered on host + */ + DebugSYNC("gfn 0x%llx is from MMIO space, but not registered " + "on host\n", gfn); + } else { + /* + * gfn is valid and pfn is already allocated on host + */ + pf_res_t ret_val; + + DebugSYNC("try_atomic_pf returned valid pfn 0x%llx\n", *pfn); + if (handle_abnormal_pfn(vcpu, gva, gfn, *pfn, pte_access, + &ret_val)) { + KVM_BUG_ON(true); + } + } +} + +/* + * Create mapping for guest huge page in shadow page table. + * Split huge page into smaller shadow pages if needed. + */ +static pf_res_t map_huge_page_to_spte(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + guest_walker_t *guest_walker, int level, + int split_to_level, pgprot_t *root_pte_hva) +{ + pf_res_t ret; + pgprot_t *pte_hva; + gfn_t table_gfn; + kvm_memory_slot_t *mem_slot; + kvm_pfn_t pfn = 0; + bool gfn_only_valid, is_guest_pt_area, force_pt_level = false; + + const pt_level_t *pt_level = &kvm_get_host_pt_struct(vcpu->kvm)->levels[ + level]; + const pt_struct_t *pt_struct = kvm_get_host_pt_struct(vcpu->kvm); + + /* Get number of sptes, which one spt level contains */ + int sptes_num = PAGE_SIZE / sizeof(pt_element_t); + int ind, split_page_size; + + KVM_BUG_ON(split_to_level > level); + + /* + * If we are already on level #1, then map it + */ + if (level == PT_PAGE_TABLE_LEVEL) + goto map; + + /* + * Check if gfn belongs to the area of guest page table, i.e write to + * this gfn will change guest page table itself. + */ + vcpu->arch.write_fault_to_shadow_pgtable = false; + is_guest_pt_area = FNAME(is_self_change_mapping)(vcpu, + guest_walker, false, + &vcpu->arch.write_fault_to_shadow_pgtable); + + /* + * If gfn belongs to the area of guest page table, then + * map it by pages on level 1 in shadow page table + */ + split_to_level = PT_PAGE_TABLE_LEVEL; + if (is_guest_pt_area) { + force_pt_level = true; + DebugSYNCV("gva 0x%lx (gfn = 0x%llx) belongs to guest pt " + "area, split guest page on level #%d into pages " + "on level #%d\n", + guest_walker->gva, guest_walker->gfn, + guest_walker->level, split_to_level); + } else { + /* + * Get max mapping level of this gfn in host + * page table (hva -> pfn) + */ + mem_slot = kvm_vcpu_gfn_to_memslot(vcpu, guest_walker->gfn); + force_pt_level = !memslot_valid_for_gpte(mem_slot, true); + DebugSYNCV("can split guest page on level #%d into pages on" + " level #%d , force = %s\n", level, + split_to_level, force_pt_level ? "yes" : "no"); + } + + + if (likely(!force_pt_level) && (level > PT_PAGE_TABLE_LEVEL)) { + transparent_hugepage_adjust(vcpu, &guest_walker->gfn, + &pfn, &split_to_level); + } + + DebugSYNCV("with thp map guest page on level #%d by pages on" + " level #%d\n", level, split_to_level); + + /* + * If we have achived split_to_level, then simply assign pfn to spte. + * Otherwise, allocate lower level of spt and map all sptes, + * which it contains. + */ +map: + split_page_size = pt_struct->levels[split_to_level].page_size; + if (level == split_to_level) { + gfn_atomic_pf(vcpu, guest_walker->gfn, guest_walker->gva, + guest_walker->pte_access, &pfn, + &gfn_only_valid); + mmu_set_spte(vcpu, root_pte_hva, guest_walker->pte_access, + false, level, guest_walker->gfn, pfn, false, + true, gfn_only_valid, guest_walker->pte_cui); + guest_walker->gfn += (split_page_size >> PAGE_SHIFT); + guest_walker->gva += split_page_size; + return PFRES_NO_ERR; + } else { + table_gfn = guest_walker->gfn & + ~(KVM_PT_LEVEL_PAGES_PER_HPAGE(pt_level) - 1); + ret = allocate_shadow_level(vcpu, gmm, table_gfn, + guest_walker->gva, level - 1, + guest_walker->pte_access, + root_pte_hva, true); + if (ret) + return ret; + } + + /* Get host address of allocated spte */ + pte_hva = __va(kvm_pte_pfn_to_phys_addr(*root_pte_hva, pt_struct)); + + DebugSYNC("map guest page with gfn 0x%llx on level #%d to spte with" + " hpa 0x%lx by pages of level #%d\n", + guest_walker->gfn, level, __pa(root_pte_hva), split_to_level); + + /* + * Walk through all sptes, contained in this spte and + * map them. + */ + ind = 0; + do { + ret = map_huge_page_to_spte(vcpu, gmm, guest_walker, level - 1, + split_to_level, pte_hva); + if (ret) + return ret; + + pte_hva++; + ind++; + } while (ind < sptes_num); + + return PFRES_NO_ERR; +} + +/* + * Create mapping for gva range [start_gva, end_gva] in shadow page table + * in accordance with guest page table maping. + */ +static pf_res_t FNAME(sync_shadow_pte_range)(struct kvm_vcpu *vcpu, + gmm_struct_t *gmm, struct kvm_mmu *mmu, + gva_t start_gva, gva_t end_gva, + guest_walker_t *guest_walker, + kvm_shadow_walk_iterator_t *spt_walker, + gva_t *retry_gva) +{ + pf_res_t ret; + kvm_pfn_t pfn; + gva_t next_gva, gva; + gpa_t guest_table_gpa, guest_pte_gpa; + pt_element_t *guest_table_hva, *guest_pte_hva; + hpa_t spt_table_hpa, spt_pte_hpa; + pgprot_t *spt_table_hva, *spt_pte_hva; + pt_element_t guest_pte; + unsigned pte_index, pte_access; + int level; + const pt_level_t *guest_pt_level, *spt_pt_level; + bool gfn_only_valid, is_huge_page, is_lowest_level; + + DebugSYNC("called for gva range [0x%lx - 0x%lx]\n", start_gva, + end_gva); + + KVM_BUG_ON(start_gva >= end_gva); + + /* Get descriptors of curr level of guest and shadow page tables */ + level = guest_walker->level; + guest_pt_level = guest_walker->pt_level; + spt_pt_level = spt_walker->pt_level; + KVM_BUG_ON(level < PT_PAGE_TABLE_LEVEL); + + + /* Get index in curr level of guest and shadow page tables */ + pte_index = get_pt_level_addr_index(start_gva, guest_pt_level); + + /* gpa of curr level of guest page table */ + guest_table_gpa = gfn_to_gpa(guest_walker->gfn); + /* hva of curr level of guest page table */ + guest_table_hva = (pt_element_t *) kvm_vcpu_gfn_to_hva_prot(vcpu, + gpa_to_gfn(guest_table_gpa), + &guest_walker->pte_writable[level - 1]); + /* gpa of guest pt entry in curr level */ + guest_pte_gpa = guest_table_gpa + pte_index * sizeof(pt_element_t); + /* hva of guest pt entry in curr level */ + guest_pte_hva = guest_table_hva + pte_index; + /* Save table gfn in guest iterator */ + guest_walker->table_gfn[level - 1] = gpa_to_gfn(guest_table_gpa); + + DebugSYNC("guest level gpa 0x%llx, hva 0x%lx, level #%d, idx %d\n", + guest_table_gpa, guest_table_hva, level, pte_index); + + if (unlikely(kvm_is_error_hva((unsigned long) guest_table_hva))) + return PFRES_ERR; + + /* hpa of curr level of shadow page table */ + spt_table_hpa = spt_walker->shadow_addr; + /* hva of curr level of shadow page table */ + spt_table_hva = (pgprot_t *) __va(spt_table_hpa); + /* hpa of shadow pt entry in curr level */ + spt_pte_hpa = spt_table_hpa + pte_index * sizeof(pt_element_t); + /* hva of shadow pt entry in curr level */ + spt_pte_hva = spt_table_hva + pte_index; + spt_walker->index = pte_index; + spt_walker->sptep = spt_pte_hva; + + DebugSYNC("shadow level hpa 0x%llx, hva 0x%lx, level #%d, idx %d\n", + spt_table_hpa, spt_table_hva, level, pte_index); + + + gva = start_gva; + do { +#if PTTYPE == PTTYPE_E2K + /* + * Protections PT directories entries and page entries are + * independent for e2k arch. + */ + pte_access = ACC_ALL; +#else /* x86 PTs */ + pte_access = guest_walker->pte_access; +#endif /* PTTYPE_E2K */ + + next_gva = pt_level_next_gva(gva, end_gva, guest_pt_level); + + /* + * Read current pt entry from guest page table (user memory). + * We need to disable page fault, because we are in spinlock + * crictical section. + * If user page is mmaped and correct, then no page fault + * occures, read is successful, zero is returned. + * If user page was swapped out by host, then __copy_from_user + * returns non zero. Need to release spinlock, enable page + * fault and retry. If retry is successful, remember current + * addr and return PFRES_RETRY to run sync_shadow_pte_range + * again for address range [*retry_addr, gva_end]. + * If retry with enabled pagefault failed, then run + * sync_shadow_pte_range again for next pt entry on this + * level. + */ + pagefault_disable(); + if (unlikely(__copy_from_user(&guest_pte, + guest_pte_hva, sizeof(guest_pte)))) { + DebugSYNC("gpte with gva 0x%lx gpa 0x%llx" + " hva 0x%lx failed to read, retry with" + " enabled pagefault... ", gva, guest_pte_gpa, + guest_pte_hva); + + spin_unlock(&vcpu->kvm->mmu_lock); + pagefault_enable(); + if (unlikely(__copy_from_user(&guest_pte, + guest_pte_hva, sizeof(guest_pte)))) { + DebugSYNC("failed retry, will run again with" + " start addr 0x%lx\n", next_gva); + *retry_gva = next_gva; + } else { + DebugSYNC("succed retry, will run again with" + " start_addr 0x%lx\n", gva); + *retry_gva = gva; + } + return PFRES_RETRY; + } + pagefault_enable(); + + /* Fullfill guest page table iterator for curr level */ + guest_walker->pte_gpa[level - 1] = guest_pte_gpa; + guest_walker->ptep_user[level - 1] = guest_pte_hva; + guest_walker->ptes[level - 1] = guest_pte; + guest_walker->gfn = gpte_to_gfn_ind(vcpu, 0, guest_pte, + guest_walker->pt_struct); + guest_walker->gva = gva; + + /* + * If gpte is marked as only valid, then + * it will be further allocated (during pagefault) + * Mark spte as only valid too. + */ + if (FNAME(is_only_valid_gpte)(vcpu, guest_pte)) { + DebugSYNCV("gpte with gpa 0x%llx hva 0x%lx," + " gva 0x%lx, level #%d is only valid, mark" + " it as only valid in shadow page table and" + " go to next pte on this level\n", + guest_pte_gpa, guest_pte_hva, gva, level); + mmu_set_spte(vcpu, spt_pte_hva, pte_access, false, + level, guest_walker->gfn, 0, false, + true, true, 0); + goto next_pte; + } + + /* If guest pt entry is not present, then skip it */ + if (unlikely(!FNAME(is_present_gpte)(guest_pte))) { + DebugSYNCV("gpte with gpa 0x%llx hva 0x%lx," + " gva 0x%lx, level #%d is not present, go" + " to next gpte on this level\n", + guest_pte_gpa, guest_pte_hva, gva, level); + goto next_pte; + } + + if (unlikely(is_rsvd_bits_set(mmu, guest_pte, level))) { + DebugSYNCV("guest pt entry gpa 0x%llx hva 0x%lx," + " gva 0x%lx, level #%d is reserved, go to" + " next pte on this level\n", + guest_pte_gpa, guest_pte_hva, gva, level); + goto next_pte; + } + + /* Get access rights for guest pt entry */ + pte_access &= FNAME(gpte_access)(vcpu, guest_pte); + FNAME(protect_clean_gpte)(&pte_access, guest_pte); + guest_walker->pt_access = guest_walker->pte_access; + guest_walker->pte_access = pte_access; + guest_walker->pte_cui = FNAME(gpte_cui)(guest_pte); + + /* Check if current pt entry is huge page */ + is_huge_page = (guest_pte & PT_PAGE_SIZE_MASK) && + (level >= PT_DIRECTORY_LEVEL); + + /* Check if the lowest possible level achieved */ + is_lowest_level = (level == PT_PAGE_TABLE_LEVEL); + + DebugSYNC("correct gpte with gpa 0x%llx hva 0x%lx," + " gpte val 0x%lx, gva 0x%lx, level #%d %s\n", + guest_pte_gpa, guest_pte_hva, guest_pte, gva, level, + is_huge_page ? "huge page" : ""); + + + if (is_lowest_level) { + gfn_only_valid = false; + gfn_atomic_pf(vcpu, guest_walker->gfn, gva, + pte_access, &pfn, &gfn_only_valid); + /* Set pfn in spte */ + mmu_set_spte(vcpu, spt_pte_hva, pte_access, false, + level, guest_walker->gfn, pfn, false, + true, gfn_only_valid, guest_walker->pte_cui); + } else if (is_huge_page) { + /* Map huge page to spte */ + ret = map_huge_page_to_spte(vcpu, gmm, guest_walker, + level, level, spt_pte_hva); + + if (ret) + return ret; + + if (mmu_need_topup_memory_caches(vcpu)) { + DebugSYNCV("need fill mmu caches, run again" + " with gva 0x%lx\n", gva); + spin_unlock(&vcpu->kvm->mmu_lock); + *retry_gva = gva; + return PFRES_RETRY; + } + } else { + /* Allocate lower level in shadow page table */ + ret = allocate_shadow_level(vcpu, gmm, + guest_walker->gfn, + gva, level - 1, pte_access, + (pgprot_t *) spt_pte_hva, false); + if (ret) + return ret; + + if (mmu_need_topup_memory_caches(vcpu)) { + DebugSYNCV("need fill mmu caches, run again" + " with gva 0x%lx\n", gva); + spin_unlock(&vcpu->kvm->mmu_lock); + *retry_gva = gva; + return PFRES_RETRY; + } + + /* hpa of lower level of shadow page table */ + spt_walker->shadow_addr = + kvm_pte_pfn_to_phys_addr(*spt_pte_hva, + spt_walker->pt_struct); + + /* Move iterators to lower level of page tables */ + guest_walker->level--; + guest_walker->pt_level--; + spt_walker->level--; + spt_walker->pt_level--; + + /* Sync lower-level pt range */ + ret = FNAME(sync_shadow_pte_range)(vcpu, gmm, mmu, + gva, next_gva, guest_walker, + spt_walker, retry_gva); + + /* + * Move iterators back to upper level + * of page tables + */ + guest_walker->level++; + guest_walker->pt_level++; + spt_walker->level++; + spt_walker->pt_level++; + + if (ret) + return ret; + } + +next_pte: + /* Go to next pt entry on curr level */ + guest_pte_gpa += sizeof(pt_element_t); + guest_pte_hva++; + spt_pte_hpa += sizeof(pt_element_t); + spt_pte_hva++; + gva = next_gva; + } while (gva != end_gva); + + return PFRES_NO_ERR; +} + +static int do_sync_shadow_pt_range(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + hpa_t spt_root, pt_element_t guest_root, + gva_t start, gva_t end) +{ + pf_res_t pfres; + guest_walker_t guest_walker; + kvm_shadow_walk_iterator_t spt_walker; +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + unsigned long mmu_seq; +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + gva_t gva_retry, gva_start, gva_end; + + DebugSYNC("started on VCPU #%d : shadow root at 0x%llx, range" + " from 0x%lx to 0x%lx\n", + vcpu->vcpu_id, spt_root, start, end); + + gva_retry = start; + gva_end = end; + +retry: + if (mmu_topup_memory_caches(vcpu)) + return -ENOMEM; +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* FIXME: Do we really need barrier here? */ + smp_rmb(); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + /* Acquire mmu_lock to modify shadow page table */ + spin_lock(&vcpu->kvm->mmu_lock); +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { + /* + * ??? If modification of shadow page table by another + * thread is not completed, than release mmu_lock and + * retry. + */ + DebugSYNC("mmu_notifier_retry...\n"); + spin_unlock(&vcpu->kvm->mmu_lock); + cond_resched(); + goto retry; + } +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + + /* Start with address, which caused retry */ + gva_start = gva_retry; + + DebugSYNC("sync gva range [0x%lx - 0x%lx]\n", gva_start, gva_end); + + /* Initialize iterator to pass through shadow page table */ + shadow_pt_walk_init(&spt_walker, vcpu, spt_root, gva_start); + + /* Initialize iterator to pass through guest page table */ + guest_pt_walk_init(&guest_walker, vcpu, guest_root); + + /* Sync page tables starting from pgd ranges */ + pfres = FNAME(sync_shadow_pte_range)(vcpu, gmm, &vcpu->arch.mmu, + gva_start, gva_end, &guest_walker, + &spt_walker, &gva_retry); + if (pfres == PFRES_RETRY) { + cond_resched(); + goto retry; + } else if (pfres != PFRES_NO_ERR) { + spin_unlock(&vcpu->kvm->mmu_lock); + pr_err("%s(): failed, error #%d\n", __func__, pfres); + return -EFAULT; + } + + spin_unlock(&vcpu->kvm->mmu_lock); + + DebugSYNC("succed for gva range [0x%lx - 0x%lx]\n", start, end); + + return 0; +} + +static int FNAME(sync_shadow_pt_range)(struct kvm_vcpu *vcpu, gmm_struct_t *gmm, + hpa_t spt_root, gva_t start, gva_t end, + gpa_t guest_pptb, gva_t vptb) +{ + const pt_struct_t *host_pt = kvm_get_host_pt_struct(vcpu->kvm); + const pt_struct_t *vcpu_pt = kvm_get_vcpu_pt_struct(vcpu); + pt_element_t guest_root; + e2k_addr_t guest_root_host_addr; + e2k_addr_t vptb_start, vptb_mask, vptb_size; + int top_level; + bool pte_writable; + gva_t gva_start, gva_end; + int ret; + + DebugSYNC("started on VCPU #%d : shadow root at 0x%llx, range " + "from 0x%lx to 0x%lx, vptb = 0x%lx\n", vcpu->vcpu_id, + spt_root, start, end, vptb); + + /* Get and check address of guest page table root */ + if (likely(!IS_E2K_INVALID_PAGE(guest_pptb))) + guest_root = guest_pptb; + else + guest_root = kvm_get_space_addr_guest_root(vcpu, start); + + guest_root_host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, + guest_root >> PAGE_SHIFT, + &pte_writable); + if (unlikely(kvm_is_error_hva(guest_root_host_addr))) { + pr_err("%s(): guest PT base address 0x%lx is invalid\n", + __func__, guest_root_host_addr); + return -EINVAL; + } + /* Check address of shadow page table root */ + if (!VALID_PAGE(kvm_get_space_addr_spt_root(vcpu, start))) + return -EINVAL; + + /* Check if top_level has correct value */ + top_level = vcpu->arch.mmu.root_level; + if (top_level == PT32E_ROOT_LEVEL) + top_level = PT32_ROOT_LEVEL; + KVM_BUG_ON(top_level < PT_DIRECTORY_LEVEL); + + vptb_start = vptb; + vptb_mask = get_pt_level_mask(&vcpu_pt->levels[top_level]); + vptb_size = get_pt_level_size(&vcpu_pt->levels[top_level]); + vptb_start &= vptb_mask; + + gva_start = start; + gva_end = end; + if (gva_start >= vptb_start && gva_start < vptb_start + vptb_size) + gva_start = vptb_start + vptb_size; + if (gva_end > vptb_start) + gva_end = vptb_start; + do { + /* exclude VPTB page from sync */ + + DebugPTSYNC("VCPU #%d : sync range from 0x%lx to 0x%lx\n", + vcpu->vcpu_id, gva_start, gva_end); + ret = do_sync_shadow_pt_range(vcpu, gmm, spt_root, guest_root, + gva_start, gva_end); + if (ret != 0) + return ret; + + if (gva_start >= vptb_start + vptb_size) + break; + if (gva_end >= end) + break; + if (vptb_start + vptb_size >= end) + break; + gva_start = vptb_start + vptb_size; + gva_end = end; + + } while (true); + + return ret; +} + +int FNAME(shadow_pt_protection_fault)(struct kvm_vcpu *vcpu, + struct gmm_struct *gmm, gpa_t addr, kvm_mmu_page_t *sp) +{ + gva_t start_gva, end_gva, vptb; + hpa_t root_hpa; + gpa_t guest_root; + int r; + unsigned index; + const pt_struct_t *gpt; + const pt_level_t *gpt_level; + int level; + + DebugPTE("SP of protected PT at %px level %d, gfn 0x%llx, " + "gva 0x%lx, addr 0x%llx\n", + sp, sp->role.level, sp->gfn, sp->gva, addr); + + level = sp->role.level; + KVM_BUG_ON(level <= PT_PAGE_TABLE_LEVEL); + gpt = kvm_get_vcpu_pt_struct(vcpu); + gpt_level = &gpt->levels[level]; + index = (addr & ~PAGE_MASK) / sizeof(pt_element_t); + start_gva = sp->gva & get_pt_level_mask(gpt_level); + start_gva = set_pt_level_addr_index(start_gva, index, gpt_level); + end_gva = start_gva + set_pt_level_addr_index(0, 1, gpt_level); + DebugPTE("protected PT level #%d gva from 0x%lx to 0x%lx\n", + level, start_gva, end_gva); + + if (end_gva >= GUEST_TASK_SIZE) { + /* guest kernel address - update init_gmm */ + gmm = pv_vcpu_get_init_gmm(vcpu); + KVM_BUG_ON(start_gva < GUEST_TASK_SIZE); + } else if (gmm == NULL) { + /* can be only current active gmm */ + gmm = pv_vcpu_get_gmm(vcpu); + } + root_hpa = gmm->root_hpa; + KVM_BUG_ON(!VALID_PAGE(root_hpa)); + guest_root = gmm->u_pptb; + vptb = pv_vcpu_get_init_gmm(vcpu)->u_vptb; + + r = FNAME(sync_shadow_pt_range)(vcpu, gmm, root_hpa, + start_gva, end_gva, guest_root, vptb); + KVM_BUG_ON(r != 0); + return r; +} + +#undef pt_element_t +#undef guest_walker +#undef FNAME +#undef PT_MAX_FULL_LEVELS +#undef gpte_to_gfn_ind +#undef gpte_to_gfn_addr +#undef gpte_to_gfn_level_index +#undef gpte_to_gfn_level_address +#undef CMPXCHG +#undef PT_GUEST_ACCESSED_MASK +#undef PT_GUEST_DIRTY_MASK diff --git a/arch/e2k/kvm/paravirt.c b/arch/e2k/kvm/paravirt.c new file mode 100644 index 000000000000..0f378d1f514d --- /dev/null +++ b/arch/e2k/kvm/paravirt.c @@ -0,0 +1,1880 @@ +/* Paravirtualization interfaces + Copyright (C) 2006 Rusty Russell IBM Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc +*/ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +pv_info_t pv_info = { + .name = "e2k bare hardware", + .paravirt_enabled = 0, + .page_offset = NATIVE_PAGE_OFFSET, + .vmalloc_start = NATIVE_VMALLOC_START, + .vmalloc_end = NATIVE_VMALLOC_END, + .vmemmap_start = NATIVE_VMEMMAP_START, + .vmemmap_end = NATIVE_VMEMMAP_END, +}; +EXPORT_SYMBOL_GPL(pv_info); + +#define BOOT_PARAVIRT_GET_BOOT_MACHINE_FUNC(func_name) \ +({ \ + machdep_t machdep = boot_machine; \ + typeof(machdep.func_name) func; \ + func = machdep.func_name; \ + boot_native_vp_to_pp(func); \ +}) +#define BOOT_PARAVIRT_CALL_MACHINE_FUNC(func_name) \ + (BOOT_PARAVIRT_GET_BOOT_MACHINE_FUNC(func_name)()) + +static void * +BOOT_NATIVE_KERNEL_VA_TO_PA(void *virt_pnt, unsigned long kernel_base) +{ + return boot_native_kernel_va_to_pa(virt_pnt, kernel_base); +} + +static void * +BOOT_NATIVE_FUNC_TO_PA(void *virt_pnt) +{ + return boot_native_func_to_pa(virt_pnt); +} + +static e2k_addr_t +BOOT_NATIVE_VPA_TO_PA(e2k_addr_t vpa) +{ + return boot_native_vpa_to_pa(vpa); +} +static e2k_addr_t +BOOT_NATIVE_PA_TO_VPA(e2k_addr_t pa) +{ + return boot_native_pa_to_vpa(pa); +} + +static e2k_addr_t +NATIVE_VPA_TO_PA(e2k_addr_t vpa) +{ + return native_vpa_to_pa(vpa); +} +static e2k_addr_t +NATIVE_PA_TO_VPA(e2k_addr_t pa) +{ + return native_pa_to_vpa(pa); +} + +#define PV_V2P_OPS { \ + .boot_kernel_va_to_pa = BOOT_NATIVE_KERNEL_VA_TO_PA, \ + .boot_func_to_pa = BOOT_NATIVE_FUNC_TO_PA, \ + .boot_vpa_to_pa = BOOT_NATIVE_VPA_TO_PA, \ + .boot_pa_to_vpa = BOOT_NATIVE_PA_TO_VPA, \ + .vpa_to_pa = NATIVE_VPA_TO_PA, \ + .pa_to_vpa = NATIVE_PA_TO_VPA, \ +} +pv_v2p_ops_t pv_v2p_ops = PV_V2P_OPS; +/* boot-time copy of pv_v2p_ops: functions have physical addresses */ +static pv_v2p_ops_t boot_pv_v2p_ops = PV_V2P_OPS; +pv_v2p_ops_t *cur_pv_v2p_ops = &boot_pv_v2p_ops; + +static void native_boot_debug_cons_outb(u8 byte, u16 port) +{ + boot_native_outb(byte, port); +} + +static u8 native_boot_debug_cons_inb(u16 port) +{ + return boot_native_inb(port); +} + +static u32 native_boot_debug_cons_inl(u16 port) +{ + return boot_native_inl(port); +} + +static void NATIVE_DEBUG_CONS_OUTB(u8 byte, u16 port) +{ + native_debug_cons_outb(byte, port); +} +static u8 NATIVE_DEBUG_CONS_INB(u16 port) +{ + return native_debug_cons_inb(port); +} +static u32 NATIVE_DEBUG_CONS_INL(u16 port) +{ + return native_debug_cons_inl(port); +} + +static void boot_do_native_cpu_relax(void) +{ + boot_native_cpu_relax(); +} + +#define PV_BOOT_COMMON_OPS \ + .boot_setup_machine_id = boot_native_setup_machine_id, \ + .boot_loader_probe_memory = boot_native_loader_probe_memory, \ + .boot_get_bootblock_size = boot_native_get_bootblock_size, \ + .boot_reserve_all_bootmem = boot_native_reserve_all_bootmem, \ + .boot_map_all_bootmem = boot_native_map_all_bootmem, \ + .boot_map_needful_to_equal_virt_area = \ + boot_native_map_needful_to_equal_virt_area, \ + .boot_kernel_switch_to_virt = boot_native_switch_to_virt, \ + .boot_clear_bss = boot_native_clear_bss, \ + .boot_check_bootblock = boot_native_check_bootblock, \ + .init_terminate_boot_init = init_native_terminate_boot_init, \ + .boot_parse_param = boot_native_parse_param, \ + .boot_debug_cons_outb = native_boot_debug_cons_outb, \ + .boot_debug_cons_inb = native_boot_debug_cons_inb, \ + .boot_debug_cons_inl = native_boot_debug_cons_inl, \ + .debug_cons_outb = NATIVE_DEBUG_CONS_OUTB, \ + .debug_cons_inb = NATIVE_DEBUG_CONS_INB, \ + .debug_cons_inl = NATIVE_DEBUG_CONS_INL, \ + .do_boot_panic = do_boot_printk, \ + .boot_cpu_relax = boot_do_native_cpu_relax, \ + +#ifdef CONFIG_SMP +#define PV_BOOT_SMP_OPS \ + .boot_smp_cpu_config = boot_native_smp_cpu_config, \ + .boot_smp_node_config = boot_native_smp_node_config, \ + +#else /* ! CONFIG_SMP */ +#define PV_BOOT_SMP_OPS +#endif /* CONFIG_SMP */ +#define PV_BOOT_OPS { \ + PV_BOOT_COMMON_OPS \ + PV_BOOT_SMP_OPS \ +} + +pv_boot_ops_t pv_boot_ops = PV_BOOT_OPS; +pv_boot_ops_t __initdata boot_pv_boot_ops = PV_BOOT_OPS; +pv_boot_ops_t *cur_pv_boot_ops = &boot_pv_boot_ops; + +void __init default_banner(void) +{ + printk(KERN_INFO "Booting paravirtualized kernel on %s\n", + pv_info.name); +} + +pv_init_ops_t pv_init_ops = { + .banner = default_banner, + .set_mach_type_id = native_set_mach_type_id, + .print_machine_type_info = native_print_machine_type_info, +}; + +static unsigned long native_read_OSCUD_lo_reg_value(void) +{ + return NATIVE_READ_OSCUD_LO_REG_VALUE(); +} + +static unsigned long native_read_OSCUD_hi_reg_value(void) +{ + return NATIVE_READ_OSCUD_HI_REG_VALUE(); +} + +static void native_write_OSCUD_lo_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(reg_value); +} + +static void native_write_OSCUD_hi_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(reg_value); +} + +static unsigned long native_read_OSGD_lo_reg_value(void) +{ + return NATIVE_READ_OSGD_LO_REG_VALUE(); +} + +static unsigned long native_read_OSGD_hi_reg_value(void) +{ + return NATIVE_READ_OSGD_HI_REG_VALUE(); +} + +static void native_write_OSGD_lo_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_OSGD_LO_REG_VALUE(reg_value); +} + +static void native_write_OSGD_hi_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_OSGD_HI_REG_VALUE(reg_value); +} + +static unsigned long native_read_CUD_lo_reg_value(void) +{ + return NATIVE_READ_CUD_LO_REG_VALUE(); +} + +static unsigned long native_read_CUD_hi_reg_value(void) +{ + return NATIVE_READ_CUD_HI_REG_VALUE(); +} + +static void native_write_CUD_lo_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_CUD_LO_REG_VALUE(reg_value); +} + +static void native_write_CUD_hi_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_CUD_HI_REG_VALUE(reg_value); +} + +static unsigned long native_read_GD_lo_reg_value(void) +{ + return NATIVE_READ_GD_LO_REG_VALUE(); +} + +static unsigned long native_read_GD_hi_reg_value(void) +{ + return NATIVE_READ_GD_HI_REG_VALUE(); +} + +static void native_write_GD_lo_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_GD_LO_REG_VALUE(reg_value); +} + +static void native_write_GD_hi_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_GD_HI_REG_VALUE(reg_value); +} + +static unsigned long native_read_CTPR_reg_value(int reg_no) +{ + switch (reg_no) { + case 1: return NATIVE_NV_READ_CTPR_REG_VALUE(1); + case 2: return NATIVE_NV_READ_CTPR_REG_VALUE(2); + case 3: return NATIVE_NV_READ_CTPR_REG_VALUE(3); + default: + panic("native_read_CTPR_reg_value() invalid CTPR # %d\n", + reg_no); + } + return -1; +} + +static void native_write_CTPR_reg_value(int reg_no, unsigned long reg_value) +{ + switch (reg_no) { + case 1: + NATIVE_WRITE_CTPR_REG_VALUE(1, reg_value); + break; + case 2: + NATIVE_WRITE_CTPR_REG_VALUE(2, reg_value); + break; + case 3: + NATIVE_WRITE_CTPR_REG_VALUE(3, reg_value); + break; + default: + panic("native_write_CTPR_reg_value() invalid CTPR # %d\n", + reg_no); + } +} + +static unsigned long native_read_SBR_reg_value(void) +{ + return NATIVE_NV_READ_SBR_REG_VALUE(); +} + +static void native_write_SBR_reg_value(unsigned long reg_value) +{ + NATIVE_NV_WRITE_SBR_REG_VALUE(reg_value); +} + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS +static unsigned long native_read_LSR_reg_value(void) +{ + return NATIVE_READ_LSR_REG_VALUE(); +} + +static void native_write_LSR_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_LSR_REG_VALUE(reg_value); +} + +static unsigned long native_read_ILCR_reg_value(void) +{ + return NATIVE_READ_ILCR_REG_VALUE(); +} + +static void native_write_ILCR_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_ILCR_REG_VALUE(reg_value); +} +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +static unsigned long native_read_OSR0_reg_value(void) +{ + return NATIVE_NV_READ_OSR0_REG_VALUE(); +} + +static void native_write_OSR0_reg_value(unsigned long reg_value) +{ + NATIVE_NV_WRITE_OSR0_REG_VALUE(reg_value); +} + +static unsigned int native_read_OSEM_reg_value(void) +{ + return NATIVE_READ_OSEM_REG_VALUE(); +} + +static void native_write_OSEM_reg_value(unsigned int reg_value) +{ + NATIVE_WRITE_OSEM_REG_VALUE(reg_value); +} + +static unsigned int native_read_BGR_reg_value(void) +{ + return NATIVE_READ_BGR_REG_VALUE(); +} + +static notrace void native_write_BGR_reg_value(unsigned int bgr_value) +{ + NATIVE_WRITE_BGR_REG_VALUE(bgr_value); +} + +static unsigned long native_read_CLKR_reg_value(void) +{ + return NATIVE_READ_CLKR_REG_VALUE(); +} + +static void native_write_CLKR_reg_value(void) +{ + NATIVE_WRITE_CLKR_REG_VALUE(); +} + +static unsigned long native_read_CU_HW0_reg_value(void) +{ + return NATIVE_READ_CU_HW0_REG_VALUE(); +} +static unsigned long native_read_CU_HW1_reg_value(void) +{ + return machine.get_cu_hw1(); +} +static void native_write_CU_HW0_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_CU_HW0_REG_VALUE(reg_value); + E2K_WAIT_ALL; +} +static void native_write_CU_HW1_reg_value(unsigned long reg_value) +{ + machine.set_cu_hw1(reg_value); +} + +static unsigned long native_read_RPR_lo_reg_value(void) +{ + return NATIVE_READ_RPR_LO_REG_VALUE(); +} + +static unsigned long native_read_RPR_hi_reg_value(void) +{ + return NATIVE_READ_RPR_HI_REG_VALUE(); +} + +static void native_write_RPR_lo_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_RPR_LO_REG_VALUE(reg_value); +} + +static void native_write_RPR_hi_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_RPR_HI_REG_VALUE(reg_value); +} + +static unsigned long native_read_SBBP_reg_value(void) +{ + return NATIVE_READ_SBBP_REG_VALUE(); +} + +static unsigned long native_read_IP_reg_value(void) +{ + return NATIVE_READ_IP_REG_VALUE(); +} + +static unsigned int native_read_DIBCR_reg_value(void) +{ + return NATIVE_READ_DIBCR_REG_VALUE(); +} + +static unsigned int native_read_DIBSR_reg_value(void) +{ + return NATIVE_READ_DIBSR_REG_VALUE(); +} + +static unsigned long native_read_DIMCR_reg_value(void) +{ + return NATIVE_READ_DIMCR_REG_VALUE(); +} + +static unsigned long native_read_DIBAR0_reg_value(void) +{ + return NATIVE_READ_DIBAR0_REG_VALUE(); +} + +static unsigned long native_read_DIBAR1_reg_value(void) +{ + return NATIVE_READ_DIBAR1_REG_VALUE(); +} + +static unsigned long native_read_DIBAR2_reg_value(void) +{ + return NATIVE_READ_DIBAR2_REG_VALUE(); +} + +static unsigned long native_read_DIBAR3_reg_value(void) +{ + return NATIVE_READ_DIBAR3_REG_VALUE(); +} + +static unsigned long native_read_DIMAR0_reg_value(void) +{ + return NATIVE_READ_DIMAR0_REG_VALUE(); +} + +static unsigned long native_read_DIMAR1_reg_value(void) +{ + return NATIVE_READ_DIMAR1_REG_VALUE(); +} + +static void native_write_DIBCR_reg_value(unsigned int reg_value) +{ + NATIVE_WRITE_DIBCR_REG_VALUE(reg_value); +} + +static void native_write_DIBSR_reg_value(unsigned int reg_value) +{ + NATIVE_WRITE_DIBSR_REG_VALUE(reg_value); +} + +static void native_write_DIMCR_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_DIMCR_REG_VALUE(reg_value); +} + +static void native_write_DIBAR0_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_DIBAR0_REG_VALUE(reg_value); +} + +static void native_write_DIBAR1_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_DIBAR1_REG_VALUE(reg_value); +} + +static void native_write_DIBAR2_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_DIBAR2_REG_VALUE(reg_value); +} + +static void native_write_DIBAR3_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_DIBAR3_REG_VALUE(reg_value); +} + +static void native_write_DIMAR0_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_DIMAR0_REG_VALUE(reg_value); +} + +static void native_write_DIMAR1_reg_value(unsigned long reg_value) +{ + NATIVE_WRITE_DIMAR1_REG_VALUE(reg_value); +} + +static unsigned long native_read_CUTD_reg_value(void) +{ + return NATIVE_NV_READ_CUTD_REG_VALUE(); +} + +static void native_write_CUTD_reg_value(unsigned long reg_value) +{ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(reg_value); +} + +static unsigned int native_read_CUIR_reg_value(void) +{ + return NATIVE_READ_CUIR_REG_VALUE(); +} + +static unsigned int native_read_PFPFR_reg_value(void) +{ + return NATIVE_NV_READ_PFPFR_REG_VALUE(); +} + +static void native_write_PFPFR_reg_value(unsigned int reg_value) +{ + NATIVE_NV_WRITE_PFPFR_REG_VALUE(reg_value); +} + +static unsigned int native_read_FPCR_reg_value(void) +{ + return NATIVE_NV_READ_FPCR_REG_VALUE(); +} + +static void native_write_FPCR_reg_value(unsigned int reg_value) +{ + NATIVE_NV_WRITE_FPCR_REG_VALUE(reg_value); +} + +static unsigned int native_read_FPSR_reg_value(void) +{ + return NATIVE_NV_READ_FPSR_REG_VALUE(); +} + +static void native_write_FPSR_reg_value(unsigned int reg_value) +{ + NATIVE_NV_WRITE_FPSR_REG_VALUE(reg_value); +} + +static unsigned long native_read_CS_lo_reg_value(void) +{ + return NATIVE_READ_CS_LO_REG_VALUE(); +} + +static unsigned long native_read_CS_hi_reg_value(void) +{ + return NATIVE_READ_CS_HI_REG_VALUE(); +} + +static unsigned long native_read_DS_lo_reg_value(void) +{ + return NATIVE_READ_DS_LO_REG_VALUE(); +} + +static unsigned long native_read_DS_hi_reg_value(void) +{ + return NATIVE_READ_DS_HI_REG_VALUE(); +} + +static unsigned long native_read_ES_lo_reg_value(void) +{ + return NATIVE_READ_ES_LO_REG_VALUE(); +} + +static unsigned long native_read_ES_hi_reg_value(void) +{ + return NATIVE_READ_ES_HI_REG_VALUE(); +} + +static unsigned long native_read_FS_lo_reg_value(void) +{ + return NATIVE_READ_FS_LO_REG_VALUE(); +} + +static unsigned long native_read_FS_hi_reg_value(void) +{ + return NATIVE_READ_FS_HI_REG_VALUE(); +} + +static unsigned long native_read_GS_lo_reg_value(void) +{ + return NATIVE_READ_GS_LO_REG_VALUE(); +} + +static unsigned long native_read_GS_hi_reg_value(void) +{ + return NATIVE_READ_GS_HI_REG_VALUE(); +} + +static unsigned long native_read_SS_lo_reg_value(void) +{ + return NATIVE_READ_SS_LO_REG_VALUE(); +} + +static unsigned long native_read_SS_hi_reg_value(void) +{ + return NATIVE_READ_SS_HI_REG_VALUE(); +} + +static void native_write_CS_lo_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_CS_LO_REG_VALUE(reg_value); +} + +static void native_write_CS_hi_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_CS_HI_REG_VALUE(reg_value); +} + +static void native_write_DS_lo_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_DS_LO_REG_VALUE(reg_value); +} + +static void native_write_DS_hi_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_DS_HI_REG_VALUE(reg_value); +} + +static void native_write_ES_lo_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_ES_LO_REG_VALUE(reg_value); +} + +static void native_write_ES_hi_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_ES_HI_REG_VALUE(reg_value); +} + +static void native_write_FS_lo_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_FS_LO_REG_VALUE(reg_value); +} + +static void native_write_FS_hi_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_FS_HI_REG_VALUE(reg_value); +} + +static void native_write_GS_lo_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_GS_LO_REG_VALUE(reg_value); +} + +static void native_write_GS_hi_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_GS_HI_REG_VALUE(reg_value); +} + +static void native_write_SS_lo_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_SS_LO_REG_VALUE(reg_value); +} + +static void native_write_SS_hi_reg_value(unsigned long reg_value) +{ + NATIVE_CL_WRITE_SS_HI_REG_VALUE(reg_value); +} + +static unsigned long native_read_IDR_reg_value(void) +{ + return NATIVE_READ_IDR_REG_VALUE(); +} + +static unsigned int do_native_read_CORE_MODE_reg_value(void) +{ + return native_read_CORE_MODE_reg_value(); +} +static void do_native_write_CORE_MODE_reg_value(unsigned int modes) +{ + native_write_CORE_MODE_reg_value(modes); +} +static unsigned int do_boot_native_read_CORE_MODE_reg_value(void) +{ + return boot_native_read_CORE_MODE_reg_value(); +} +static void do_boot_native_write_CORE_MODE_reg_value(unsigned int modes) +{ + boot_native_write_CORE_MODE_reg_value(modes); +} + +static inline unsigned int do_read_aafstr_reg_value(void) +{ + return native_read_aafstr_reg_value(); +} +static inline void do_write_aafstr_reg_value(unsigned int reg_value) +{ + native_write_aafstr_reg_value(reg_value); +} + +static void native_copy_stacks_to_memory(void) +{ + NATIVE_FLUSHCPU; +} +static void +native_correct_trap_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + NATIVE_CORRECT_TRAP_PSP_PCSP(regs, thread_info); +} +static void +native_correct_scall_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + NATIVE_CORRECT_SCALL_PSP_PCSP(regs, thread_info); +} +static void +do_correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + native_correct_trap_return_ip(regs, return_ip); +} +static int do_switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return 0; /* to continue switching on host */ +} +static void +do_free_old_kernel_hardware_stacks(void) +{ + native_free_old_kernel_hardware_stacks(); +} +static bool +do_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return native_is_proc_stack_bounds(ti, regs); +} +static bool +do_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return native_is_chain_stack_bounds(ti, regs); +} +static void +host_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + kvm_host_instr_page_fault(regs, ftype, async_instr); +} +static unsigned long +do_mmio_page_fault(struct pt_regs *regs, struct trap_cellar *tcellar) +{ + return native_mmio_page_fault(regs, (trap_cellar_t *)tcellar); +} +static void +do_init_guest_system_handlers_table(void) +{ + native_init_guest_system_handlers_table(); +} + +static unsigned long +do_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static void +do_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + native_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} + +static unsigned long +do_extract_tags_32(u16 *dst, const void *src) +{ + return native_extract_tags_32(dst, src); +} + +static void +do_save_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + native_save_local_glob_regs(l_gregs, bool is_signal); +} +static void +do_restore_local_glob_regs(local_gregs_t *l_gregs, bool is_signal) +{ + native_restore_local_glob_regs(l_gregs, is_signal); +} + +static void +do_get_all_user_glob_regs(global_regs_t *gregs) +{ + native_get_all_user_glob_regs(gregs); +} + +static __interrupt void +native_restore_kernel_gregs_in_syscall(struct thread_info *ti) +{ + NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(ti); +} +static void do_cpu_relax(void) +{ + native_cpu_relax(); +} +static void do_cpu_relax_no_resched(void) +{ + native_cpu_relax_no_resched(); +} +static void do_lock_relax(void *lock) +{ + native_cpu_relax(); +} + +#ifdef CONFIG_SMP +static void +do_arch_csd_lock_async(call_single_data_t *data) +{ + native_arch_csd_lock_async(data); +} +#endif /* CONFIG_SMP */ + +#define do_arch_spin_lock_slow do_lock_relax +#define do_arch_spin_locked_slow do_lock_relax +#define do_arch_spin_unlock_slow do_lock_relax + +#define PV_CPU_COMMON_OPS \ + .read_OSCUD_lo_reg_value = native_read_OSCUD_lo_reg_value, \ + .read_OSCUD_hi_reg_value = native_read_OSCUD_hi_reg_value, \ + .write_OSCUD_lo_reg_value = native_write_OSCUD_lo_reg_value, \ + .write_OSCUD_hi_reg_value = native_write_OSCUD_hi_reg_value, \ + .read_OSGD_lo_reg_value = native_read_OSGD_lo_reg_value, \ + .read_OSGD_hi_reg_value = native_read_OSGD_hi_reg_value, \ + .write_OSGD_lo_reg_value = native_write_OSGD_lo_reg_value, \ + .write_OSGD_hi_reg_value = native_write_OSGD_hi_reg_value, \ + .read_CUD_lo_reg_value = native_read_CUD_lo_reg_value, \ + .read_CUD_hi_reg_value = native_read_CUD_hi_reg_value, \ + .write_CUD_lo_reg_value = native_write_CUD_lo_reg_value, \ + .write_CUD_hi_reg_value = native_write_CUD_hi_reg_value, \ + .read_GD_lo_reg_value = native_read_GD_lo_reg_value, \ + .read_GD_hi_reg_value = native_read_GD_hi_reg_value, \ + .write_GD_lo_reg_value = native_write_GD_lo_reg_value, \ + .write_GD_hi_reg_value = native_write_GD_hi_reg_value, \ + .read_PSP_lo_reg_value = INLINE_FUNC_CALL, \ + .read_PSP_hi_reg_value = INLINE_FUNC_CALL, \ + .write_PSP_lo_reg_value = INLINE_FUNC_CALL, \ + .write_PSP_hi_reg_value = INLINE_FUNC_CALL, \ + .read_PSHTP_reg_value = INLINE_FUNC_CALL, \ + .write_PSHTP_reg_value = INLINE_FUNC_CALL, \ + .read_PCSP_lo_reg_value = INLINE_FUNC_CALL, \ + .read_PCSP_hi_reg_value = INLINE_FUNC_CALL, \ + .write_PCSP_lo_reg_value = INLINE_FUNC_CALL, \ + .write_PCSP_hi_reg_value = INLINE_FUNC_CALL, \ + .read_PCSHTP_reg_value = INLINE_FUNC_CALL, \ + .write_PCSHTP_reg_value = INLINE_FUNC_CALL, \ + .read_CR0_lo_reg_value = INLINE_FUNC_CALL, \ + .read_CR0_hi_reg_value = INLINE_FUNC_CALL, \ + .read_CR1_lo_reg_value = INLINE_FUNC_CALL, \ + .read_CR1_hi_reg_value = INLINE_FUNC_CALL, \ + .write_CR0_lo_reg_value = INLINE_FUNC_CALL, \ + .write_CR0_hi_reg_value = INLINE_FUNC_CALL, \ + .write_CR1_lo_reg_value = INLINE_FUNC_CALL, \ + .write_CR1_hi_reg_value = INLINE_FUNC_CALL, \ + .read_CTPR_reg_value = native_read_CTPR_reg_value, \ + .write_CTPR_reg_value = native_write_CTPR_reg_value, \ + .read_USD_lo_reg_value = INLINE_FUNC_CALL, \ + .read_USD_hi_reg_value = INLINE_FUNC_CALL, \ + .write_USD_lo_reg_value = INLINE_FUNC_CALL, \ + .write_USD_hi_reg_value = INLINE_FUNC_CALL, \ + .read_SBR_reg_value = native_read_SBR_reg_value, \ + .write_SBR_reg_value = native_write_SBR_reg_value, \ + .read_WD_reg_value = INLINE_FUNC_CALL, \ + .write_WD_reg_value = INLINE_FUNC_CALL, \ + .read_OSR0_reg_value = native_read_OSR0_reg_value, \ + .write_OSR0_reg_value = native_write_OSR0_reg_value, \ + .read_OSEM_reg_value = native_read_OSEM_reg_value, \ + .write_OSEM_reg_value = native_write_OSEM_reg_value, \ + .read_BGR_reg_value = native_read_BGR_reg_value, \ + .write_BGR_reg_value = native_write_BGR_reg_value, \ + .read_CLKR_reg_value = native_read_CLKR_reg_value, \ + .write_CLKR_reg_value = native_write_CLKR_reg_value, \ + .read_SCLKR_reg_value = native_read_SCLKR_reg_value, \ + .write_SCLKR_reg_value = native_write_SCLKR_reg_value, \ + .read_SCLKM1_reg_value = native_read_SCLKM1_reg_value, \ + .write_SCLKM1_reg_value = native_write_SCLKM1_reg_value, \ + .read_SCLKM2_reg_value = native_read_SCLKM2_reg_value, \ + .write_SCLKM2_reg_value = native_write_SCLKM2_reg_value, \ + .read_CU_HW0_reg_value = native_read_CU_HW0_reg_value, \ + .read_CU_HW1_reg_value = native_read_CU_HW1_reg_value, \ + .write_CU_HW0_reg_value = native_write_CU_HW0_reg_value, \ + .write_CU_HW1_reg_value = native_write_CU_HW1_reg_value, \ + .read_RPR_lo_reg_value = native_read_RPR_lo_reg_value, \ + .read_RPR_hi_reg_value = native_read_RPR_hi_reg_value, \ + .write_RPR_lo_reg_value = native_write_RPR_lo_reg_value, \ + .write_RPR_hi_reg_value = native_write_RPR_hi_reg_value, \ + .read_SBBP_reg_value = native_read_SBBP_reg_value, \ + .read_IP_reg_value = native_read_IP_reg_value, \ + .read_DIBCR_reg_value = native_read_DIBCR_reg_value, \ + .read_DIBSR_reg_value = native_read_DIBSR_reg_value, \ + .read_DIMCR_reg_value = native_read_DIMCR_reg_value, \ + .read_DIBAR0_reg_value = native_read_DIBAR0_reg_value, \ + .read_DIBAR1_reg_value = native_read_DIBAR1_reg_value, \ + .read_DIBAR2_reg_value = native_read_DIBAR2_reg_value, \ + .read_DIBAR3_reg_value = native_read_DIBAR3_reg_value, \ + .read_DIMAR0_reg_value = native_read_DIMAR0_reg_value, \ + .read_DIMAR1_reg_value = native_read_DIMAR1_reg_value, \ + .write_DIBCR_reg_value = native_write_DIBCR_reg_value, \ + .write_DIBSR_reg_value = native_write_DIBSR_reg_value, \ + .write_DIMCR_reg_value = native_write_DIMCR_reg_value, \ + .write_DIBAR0_reg_value = native_write_DIBAR0_reg_value, \ + .write_DIBAR1_reg_value = native_write_DIBAR1_reg_value, \ + .write_DIBAR2_reg_value = native_write_DIBAR2_reg_value, \ + .write_DIBAR3_reg_value = native_write_DIBAR3_reg_value, \ + .write_DIMAR0_reg_value = native_write_DIMAR0_reg_value, \ + .write_DIMAR1_reg_value = native_write_DIMAR1_reg_value, \ + .read_CUTD_reg_value = native_read_CUTD_reg_value, \ + .read_CUIR_reg_value = native_read_CUIR_reg_value, \ + .write_CUTD_reg_value = native_write_CUTD_reg_value, \ + .read_UPSR_reg_value = INLINE_FUNC_CALL, \ + .write_UPSR_reg_value = INLINE_FUNC_CALL, \ + .read_PSR_reg_value = INLINE_FUNC_CALL, \ + .write_PSR_reg_value = INLINE_FUNC_CALL, \ + .write_UPSR_irq_barrier = INLINE_FUNC_CALL, \ + .write_PSR_irq_barrier = INLINE_FUNC_CALL, \ + .read_PFPFR_reg_value = native_read_PFPFR_reg_value, \ + .read_FPCR_reg_value = native_read_FPCR_reg_value, \ + .read_FPSR_reg_value = native_read_FPSR_reg_value, \ + .write_PFPFR_reg_value = native_write_PFPFR_reg_value, \ + .write_FPCR_reg_value = native_write_FPCR_reg_value, \ + .write_FPSR_reg_value = native_write_FPSR_reg_value, \ + .read_CS_lo_reg_value = native_read_CS_lo_reg_value, \ + .read_CS_hi_reg_value = native_read_CS_hi_reg_value, \ + .read_DS_lo_reg_value = native_read_DS_lo_reg_value, \ + .read_DS_hi_reg_value = native_read_DS_hi_reg_value, \ + .read_ES_lo_reg_value = native_read_ES_lo_reg_value, \ + .read_ES_hi_reg_value = native_read_ES_hi_reg_value, \ + .read_FS_lo_reg_value = native_read_FS_lo_reg_value, \ + .read_FS_hi_reg_value = native_read_FS_hi_reg_value, \ + .read_GS_lo_reg_value = native_read_GS_lo_reg_value, \ + .read_GS_hi_reg_value = native_read_GS_hi_reg_value, \ + .read_SS_lo_reg_value = native_read_SS_lo_reg_value, \ + .read_SS_hi_reg_value = native_read_SS_hi_reg_value, \ + .write_CS_lo_reg_value = native_write_CS_lo_reg_value, \ + .write_CS_hi_reg_value = native_write_CS_hi_reg_value, \ + .write_DS_lo_reg_value = native_write_DS_lo_reg_value, \ + .write_DS_hi_reg_value = native_write_DS_hi_reg_value, \ + .write_ES_lo_reg_value = native_write_ES_lo_reg_value, \ + .write_ES_hi_reg_value = native_write_ES_hi_reg_value, \ + .write_FS_lo_reg_value = native_write_FS_lo_reg_value, \ + .write_FS_hi_reg_value = native_write_FS_hi_reg_value, \ + .write_GS_lo_reg_value = native_write_GS_lo_reg_value, \ + .write_GS_hi_reg_value = native_write_GS_hi_reg_value, \ + .write_SS_lo_reg_value = native_write_SS_lo_reg_value, \ + .write_SS_hi_reg_value = native_write_SS_hi_reg_value, \ + .read_IDR_reg_value = native_read_IDR_reg_value, \ + .boot_read_IDR_reg_value = boot_native_read_IDR_reg_value, \ + .read_CORE_MODE_reg_value = do_native_read_CORE_MODE_reg_value, \ + .boot_read_CORE_MODE_reg_value = \ + do_boot_native_read_CORE_MODE_reg_value, \ + .write_CORE_MODE_reg_value = do_native_write_CORE_MODE_reg_value, \ + .boot_write_CORE_MODE_reg_value = \ + do_boot_native_write_CORE_MODE_reg_value, \ + .put_updated_cpu_regs_flags = NULL, \ + .read_aasr_reg_value = (void *)-1UL, \ + .write_aasr_reg_value = INLINE_FUNC_CALL, \ + .read_aafstr_reg_value = do_read_aafstr_reg_value, \ + .write_aafstr_reg_value = do_write_aafstr_reg_value, \ + .flush_stacks = INLINE_FUNC_CALL, \ + .flush_regs_stack = INLINE_FUNC_CALL, \ + .flush_chain_stack = INLINE_FUNC_CALL, \ + .copy_stacks_to_memory = native_copy_stacks_to_memory, \ + .get_active_cr0_lo_value = INLINE_FUNC_CALL, \ + .get_active_cr0_hi_value = INLINE_FUNC_CALL, \ + .get_active_cr1_lo_value = INLINE_FUNC_CALL, \ + .get_active_cr1_hi_value = INLINE_FUNC_CALL, \ + .put_active_cr0_lo_value = INLINE_FUNC_CALL, \ + .put_active_cr0_hi_value = INLINE_FUNC_CALL, \ + .put_active_cr1_lo_value = INLINE_FUNC_CALL, \ + .put_active_cr1_hi_value = INLINE_FUNC_CALL, \ + .correct_trap_psp_pcsp = native_correct_trap_psp_pcsp, \ + .correct_scall_psp_pcsp = native_correct_scall_psp_pcsp, \ + .correct_trap_return_ip = do_correct_trap_return_ip, \ + .nested_kernel_return_address = __e2k_read_kernel_return_address, \ + .virt_cpu_thread_init = NULL, \ + .prepare_start_thread_frames = \ + native_do_prepare_start_thread_frames, \ + .copy_kernel_stacks = native_copy_kernel_stacks, \ + .copy_user_stacks = native_copy_user_stacks, \ + .define_kernel_hw_stacks_sizes = \ + native_do_define_kernel_hw_stacks_sizes, \ + .define_user_hw_stacks_sizes = native_define_user_hw_stacks_sizes, \ + .switch_to_expanded_proc_stack = NULL, \ + .switch_to_expanded_chain_stack = NULL, \ + .stack_bounds_trap_enable = NULL, \ + .is_proc_stack_bounds = do_is_proc_stack_bounds, \ + .is_chain_stack_bounds = do_is_chain_stack_bounds, \ + .release_hw_stacks = native_release_hw_stacks, \ + .release_kernel_stacks = native_release_kernel_stacks, \ + .register_kernel_hw_stack = NULL, \ + .register_kernel_data_stack = NULL, \ + .unregister_kernel_hw_stack = NULL, \ + .unregister_kernel_data_stack = NULL, \ + .kmem_area_host_chunk = NULL, \ + .kmem_area_unhost_chunk = NULL, \ + .switch_to_new_user = do_switch_to_new_user, \ + .do_map_user_hard_stack_to_kernel = NULL, \ + .do_switch_to_kernel_hardware_stacks = NULL, \ + .free_old_kernel_hardware_stacks = \ + do_free_old_kernel_hardware_stacks, \ + .instr_page_fault = host_instr_page_fault, \ + .mmio_page_fault = do_mmio_page_fault, \ + .do_hw_stack_bounds = native_do_hw_stack_bounds, \ + .handle_interrupt = native_do_interrupt, \ + .init_guest_system_handlers_table = \ + do_init_guest_system_handlers_table, \ + .fix_process_pt_regs = NULL, \ + .run_user_handler = NULL, \ + .trap_table_entry1 = native_ttable_entry1, \ + .trap_table_entry3 = native_ttable_entry3, \ + .trap_table_entry4 = native_ttable_entry4, \ + .do_fast_clock_gettime = native_do_fast_clock_gettime, \ + .fast_sys_clock_gettime = native_fast_sys_clock_gettime, \ + .do_fast_gettimeofday = native_do_fast_gettimeofday, \ + .fast_sys_siggetmask = native_fast_sys_siggetmask, \ + .fast_tagged_memory_copy = do_fast_tagged_memory_copy, \ + .fast_tagged_memory_set = do_fast_tagged_memory_set, \ + .extract_tags_32 = do_extract_tags_32, \ + .save_local_glob_regs = do_save_local_glob_regs, \ + .restore_local_glob_regs = do_restore_local_glob_regs, \ + .restore_kernel_gregs_in_syscall = \ + native_restore_kernel_gregs_in_syscall, \ + .get_all_user_glob_regs = do_get_all_user_glob_regs, \ + .arch_setup_machine = native_setup_machine, \ + .cpu_default_idle = native_default_idle, \ + .cpu_relax = do_cpu_relax, \ + .cpu_relax_no_resched = do_cpu_relax_no_resched, \ + .host_printk = printk, \ + .arch_spin_lock_slow = do_arch_spin_lock_slow, \ + .arch_spin_relock_slow = do_arch_spin_relock_slow, \ + .arch_spin_locked_slow = do_arch_spin_locked_slow, \ + .arch_spin_unlock_slow = do_arch_spin_unlock_slow, \ + .ord_wait_read_lock_slow = NULL, \ + .ord_wait_write_lock_slow = NULL, \ + .ord_arch_read_locked_slow = NULL, \ + .ord_arch_write_locked_slow = NULL, \ + .ord_arch_read_unlock_slow = NULL, \ + .ord_arch_write_unlock_slow = NULL, \ + +#ifdef CONFIG_SMP +#define PV_CPU_SMP_OPS \ + .wait_for_cpu_booting = native_wait_for_cpu_booting, \ + .wait_for_cpu_wake_up = native_wait_for_cpu_wake_up, \ + .activate_cpu = native_activate_cpu, \ + .activate_all_cpus = native_activate_all_cpus, \ + .csd_lock_wait = native_csd_lock_wait, \ + .csd_lock = native_csd_lock, \ + .arch_csd_lock_async = do_arch_csd_lock_async, \ + .csd_unlock = native_csd_unlock, \ + .setup_local_pic_virq = NULL, \ + .startup_local_pic_virq = NULL, \ + .smp_flush_tlb_all = native_smp_flush_tlb_all, \ + .smp_flush_tlb_mm = native_smp_flush_tlb_mm, \ + .smp_flush_tlb_page = native_smp_flush_tlb_page, \ + .smp_flush_tlb_range = native_smp_flush_tlb_range, \ + .smp_flush_pmd_tlb_range = native_smp_flush_pmd_tlb_range, \ + .smp_flush_tlb_range_and_pgtables = \ + native_smp_flush_tlb_range_and_pgtables, \ + .smp_flush_icache_range = native_smp_flush_icache_range, \ + .smp_flush_icache_range_array = \ + (void (*)(void *))native_smp_flush_icache_range_array, \ + .smp_flush_icache_page = native_smp_flush_icache_page, \ + .smp_flush_icache_all = native_smp_flush_icache_all, \ + .smp_flush_icache_kernel_line = \ + native_smp_flush_icache_kernel_line, \ + +#else /* ! CONFIG_SMP */ +#define PV_CPU_SMP_OPS +#endif /* CONFIG_SMP */ + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS +#define PV_CPU_LOOP_OPS \ + .read_LSR_reg_value = native_read_LSR_reg_value, \ + .write_LSR_reg_value = native_write_LSR_reg_value, \ + .read_ILCR_reg_value = native_read_ILCR_reg_value, \ + .write_ILCR_reg_value = native_write_ILCR_reg_value, \ + +#else /* ! NEED_PARAVIRT_LOOP_REGISTERS */ +#define PV_CPU_LOOP_OPS +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ +#define PV_CPU_OPS { \ + PV_CPU_COMMON_OPS \ + PV_CPU_LOOP_OPS \ + PV_CPU_SMP_OPS \ +} + +pv_cpu_ops_t pv_cpu_ops = PV_CPU_OPS; +EXPORT_SYMBOL(pv_cpu_ops); +pv_cpu_ops_t boot_pv_cpu_ops = PV_CPU_OPS; +pv_cpu_ops_t *cur_pv_cpu_ops = &boot_pv_cpu_ops; + +static unsigned int do_apic_read(unsigned int reg) +{ + return native_apic_read(reg); +} + +static void do_apic_write(unsigned int reg, unsigned int v) +{ + native_apic_write(reg, v); +} + +#define PV_APIC_OPS { \ + .apic_write = do_apic_write, \ + .apic_read = do_apic_read, \ + .boot_apic_write = do_apic_write, \ + .boot_apic_read = do_apic_read, \ +} + +pv_apic_ops_t pv_apic_ops = PV_APIC_OPS; +EXPORT_SYMBOL_GPL(pv_apic_ops); +pv_apic_ops_t boot_pv_apic_ops = PV_APIC_OPS; +pv_apic_ops_t *cur_pv_apic_ops = &boot_pv_apic_ops; + +static unsigned int do_epic_read_w(unsigned int reg) +{ + return native_epic_read_w(reg); +} + +static void do_epic_write_w(unsigned int reg, unsigned int v) +{ + native_epic_write_w(reg, v); +} + +static unsigned long do_epic_read_d(unsigned int reg) +{ + return native_epic_read_d(reg); +} + +static void do_epic_write_d(unsigned int reg, unsigned long v) +{ + native_epic_write_d(reg, v); +} + +#define PV_EPIC_OPS { \ + .epic_write_w = do_epic_write_w, \ + .epic_read_w = do_epic_read_w, \ + .epic_write_d = do_epic_write_d, \ + .epic_read_d = do_epic_read_d, \ + .boot_epic_write_w = do_epic_write_w, \ + .boot_epic_read_w = do_epic_read_w, \ +} + +pv_epic_ops_t pv_epic_ops = PV_EPIC_OPS; +EXPORT_SYMBOL_GPL(pv_epic_ops); +pv_epic_ops_t boot_pv_epic_ops = PV_EPIC_OPS; +pv_epic_ops_t *cur_pv_epic_ops = &boot_pv_epic_ops; + +static long +RECOVERY_FAULTED_TAGGED_STORE(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, int chan) +{ + return native_recovery_faulted_tagged_store(address, wr_data, data_tag, + st_rec_opc, chan); +} +static long +RECOVERY_FAULTED_LOAD(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + return native_recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); +} +static long +RECOVERY_FAULTED_MOVE(e2k_addr_t addr_from, e2k_addr_t addr_to, + int format, int vr, u64 ld_rec_opc, int chan) +{ + return native_recovery_faulted_move(addr_from, addr_to, + format, vr, ld_rec_opc, chan); +} +static long +RECOVERY_FAULTED_LOAD_TO_GREG(e2k_addr_t address, + u32 greg_num_d, int format, int vr, + u64 ld_rec_opc, int chan, void *saved_greg) +{ + return native_recovery_faulted_load_to_greg(address, greg_num_d, + format, vr, ld_rec_opc, chan, saved_greg); +} +static void +MOVE_TAGGED_WORD(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_word(addr_from, addr_to); +} +static void +MOVE_TAGGED_DWORD(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_dword(addr_from, addr_to); +} +static void +MOVE_TAGGED_QWORD(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_qword(addr_from, addr_to); +} + +static void DO_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + NATIVE_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static mmu_reg_t DO_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return (mmu_reg_t)NATIVE_READ_MMU_REG(mmu_addr); +} + +/* + * Write/read Data TLB register + */ + +static void DO_WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + NATIVE_WRITE_DTLB_REG(tlb_addr, mmu_reg); +} + +static mmu_reg_t DO_READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return NATIVE_READ_DTLB_REG(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static void +DO_FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + NATIVE_FLUSH_TLB_ENTRY(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static void +PV_DO_FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + NATIVE_FLUSH_DCACHE_LINE(virt_addr); +} + +/* + * Clear DCACHE L1 set + */ +static void +DO_CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set); +} +static void +do_flush_DCACHE_range(void *addr, size_t len) +{ + native_flush_DCACHE_range(addr, len); +} +static void +do_clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + native_clear_DCACHE_L1_range(virt_addr, len); +} + +/* + * Write/read DCACHE L2 registers + */ +static void +DO_WRITE_DCACHE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + native_write_DCACHE_L2_reg(reg_val, reg_num, bank_num); +} +static unsigned long +DO_READ_DCACHE_L2_REG(int reg_num, int bank_num) +{ + return native_read_DCACHE_L2_reg(reg_num, bank_num); +} + +/* + * Flush ICACHE line + */ + +static void +DO_FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + NATIVE_FLUSH_ICACHE_LINE(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static void +DO_FLUSH_CACHE_L12(flush_op_t flush_op) +{ + NATIVE_FLUSH_CACHE_L12(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static void +DO_FLUSH_TLB_ALL(flush_op_t flush_op) +{ + NATIVE_FLUSH_TLB_ALL(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static void +DO_FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + NATIVE_FLUSH_ICACHE_ALL(flush_op); +} + +/* + * Get Entry probe for virtual address + */ + +static probe_entry_t +DO_ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return NATIVE_ENTRY_PROBE_MMU_OP(virt_addr); +} + +/* + * Get physical address for virtual address + */ + +static probe_entry_t +DO_ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return NATIVE_ADDRESS_PROBE_MMU_OP(virt_addr); +} + +/* + * Read CLW register + */ + +static clw_reg_t +DO_READ_CLW_REG(clw_addr_t clw_addr) +{ + return NATIVE_READ_CLW_REG(clw_addr); +} + +/* + * Write CLW register + */ + +static void +DO_WRITE_CLW_REG(clw_addr_t clw_addr, clw_reg_t val) +{ + NATIVE_WRITE_CLW_REG(clw_addr, val); +} + +/* save DAM state */ +static void +DO_SAVE_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + NATIVE_SAVE_DAM(dam); +} + +/* + * MMU DEBUG registers access + */ +static mmu_reg_t DO_READ_MMU_DEBUG_REG(int reg_no) +{ + return (mmu_reg_t)NATIVE_GET_MMU_DEBUG_REG(reg_no); +} + +static void DO_WRITE_MMU_DEBUG_REG(int reg_no, mmu_reg_t mmu_reg) +{ + NATIVE_SET_MMU_DEBUG_REG(reg_no, mmu_reg); +} + +static void do_boot_set_pte_at(unsigned long addr, pte_t *ptep, pte_t pteval) +{ + native_set_pte(ptep, pteval, false); +} +static void +do_write_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval, + bool only_validate, bool to_move) +{ + native_write_pte_at(mm, addr, ptep, pteval, only_validate); +} +static void native_do_set_pte(pte_t *ptep, pte_t pteval) +{ + native_set_pte(ptep, pteval, false); +} + +static void +do_write_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval, + bool only_validate) +{ + native_write_pmd_at(mm, addr, pmdp, pmdval, only_validate); +} + +static void +do_write_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval, + bool only_validate) +{ + native_write_pud_at(mm, addr, pudp, pudval, only_validate); +} + +static void +do_write_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval, + bool only_validate) +{ + native_write_pgd_at(mm, addr, pgdp, pgdval, only_validate); +} + +static pte_t do_pv_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, bool to_move) +{ + return native_ptep_get_and_clear(mm, addr, ptep); +} +static void do_ptep_wrprotect_atomic(struct mm_struct *mm, + e2k_addr_t addr, pte_t *ptep) +{ +#ifdef CONFIG_SMP + native_ptep_wrprotect_atomic(mm, addr, ptep); +#endif /* CONFIG_SMP */ +} +static pte_t do_get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address) +{ + return native_do_get_pte_for_address(vma, address); +} +static void do_free_mm(struct mm_struct *mm) +{ + native_free_mm(mm); +} +static void do_activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + native_activate_mm(active_mm, mm); +} +static int do_make_host_pages_valid(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, + bool chprot, bool flush) +{ + return native_make_host_pages_valid(vma, start_addr, end_addr, + chprot, flush); +} +static int do_set_memory_attr_on_host(e2k_addr_t start, e2k_addr_t end, + int mode) +{ + return native_set_memory_attr_on_host(start, end, (enum sma_mode)mode); +} + +#define PV_GEN_MMU_OPS \ + .recovery_faulted_tagged_store = RECOVERY_FAULTED_TAGGED_STORE, \ + .recovery_faulted_load = RECOVERY_FAULTED_LOAD, \ + .recovery_faulted_move = RECOVERY_FAULTED_MOVE, \ + .recovery_faulted_load_to_greg = RECOVERY_FAULTED_LOAD_TO_GREG, \ + .move_tagged_word = MOVE_TAGGED_WORD, \ + .move_tagged_dword = MOVE_TAGGED_DWORD, \ + .move_tagged_qword = MOVE_TAGGED_QWORD, \ + .write_mmu_reg = DO_WRITE_MMU_REG, \ + .read_mmu_reg = DO_READ_MMU_REG, \ + .write_dtlb_reg = DO_WRITE_DTLB_REG, \ + .read_dtlb_reg = DO_READ_DTLB_REG, \ + .flush_tlb_entry = DO_FLUSH_TLB_ENTRY, \ + .flush_dcache_line = PV_DO_FLUSH_DCACHE_LINE, \ + .clear_dcache_l1_set = DO_CLEAR_DCACHE_L1_SET, \ + .flush_dcache_range = do_flush_DCACHE_range, \ + .clear_dcache_l1_range = do_clear_DCACHE_L1_range, \ + .write_dcache_l2_reg = DO_WRITE_DCACHE_L2_REG, \ + .read_dcache_l2_reg = DO_READ_DCACHE_L2_REG, \ + .flush_icache_line = DO_FLUSH_ICACHE_LINE, \ + .flush_cache_all = DO_FLUSH_CACHE_L12, \ + .do_flush_tlb_all = DO_FLUSH_TLB_ALL, \ + .flush_icache_all = DO_FLUSH_ICACHE_ALL, \ + .entry_probe_mmu_op = DO_ENTRY_PROBE_MMU_OP, \ + .address_probe_mmu_op = DO_ADDRESS_PROBE_MMU_OP, \ + .read_clw_reg = DO_READ_CLW_REG, \ + .write_clw_reg = DO_WRITE_CLW_REG, \ + .save_DAM = DO_SAVE_DAM, \ + .write_mmu_debug_reg = DO_WRITE_MMU_DEBUG_REG, \ + .read_mmu_debug_reg = DO_READ_MMU_DEBUG_REG, \ + .boot_set_pte_at = do_boot_set_pte_at, \ + .write_pte_at = do_write_pte_at, \ + .set_pte = native_do_set_pte, \ + .write_pmd_at = do_write_pmd_at, \ + .write_pud_at = do_write_pud_at, \ + .write_pgd_at = do_write_pgd_at, \ + .ptep_get_and_clear = do_pv_ptep_get_and_clear, \ + .ptep_wrprotect_atomic = do_ptep_wrprotect_atomic, \ + .get_pte_for_address = do_get_pte_for_address, \ + .remap_area_pages = native_remap_area_pages, \ + .host_guest_vmap_area = NULL, \ + .unhost_guest_vmap_area = NULL, \ + \ + /* memory management - mman.h */ \ + .free_mm = do_free_mm, \ + .mm_init = native_mm_init, \ + .activate_mm = do_activate_mm, \ + .make_host_pages_valid = do_make_host_pages_valid, \ + .set_memory_attr_on_host = do_set_memory_attr_on_host, \ + .access_process_vm = kvm_access_process_vm, \ + \ + /* memory management - mm.h */ \ + .free_pgd_range = native_free_pgd_range, \ + \ + /* kernel virtual memory allocation - vmalloc.h */ \ + .alloc_vmap_area = native_alloc_vmap_area, \ + .__free_vmap_area = native__free_vmap_area, \ + .free_unmap_vmap_area = native_free_unmap_vmap_area, \ + /* unmap __init areas */ \ + .unmap_initmem = NULL, \ + +#ifdef CONFIG_SMP +#define PV_SMP_MMU_OPS \ + .pcpu_get_vm_areas = native_pcpu_get_vm_areas, \ + +#else /* ! CONFIG_SMP */ +#define PV_SMP_MMU_OPS +#endif /* CONFIG_SMP */ + +#define PV_MMU_OPS { \ + PV_GEN_MMU_OPS \ + PV_SMP_MMU_OPS \ +} + +pv_mmu_ops_t pv_mmu_ops = PV_MMU_OPS; +EXPORT_SYMBOL(pv_mmu_ops); +pv_mmu_ops_t boot_pv_mmu_ops = PV_MMU_OPS; +pv_mmu_ops_t *cur_pv_mmu_ops = &boot_pv_mmu_ops; + +/* + * get/set current time + */ +static unsigned long do_get_cpu_running_cycles(void) +{ + return native_get_cpu_running_cycles(); +} + +static unsigned long long do_pv_sched_clock(void) +{ + /* FIXME: not implemented + return native_sched_clock(); + */ + return 0; +} + +pv_time_ops_t pv_time_ops = { + .time_init = native_time_init, + .clock_init = native_clock_init, + .read_current_timer = native_read_current_timer, + .get_cpu_running_cycles = do_get_cpu_running_cycles, + .do_sched_clock = do_pv_sched_clock, + .steal_clock = native_steal_clock, +}; +EXPORT_SYMBOL_GPL(pv_time_ops); + +pv_irq_ops_t pv_irq_ops = { +}; +EXPORT_SYMBOL(pv_irq_ops); + +static notrace void NATIVE_WRITEB(u8 b, void __iomem *addr) +{ + native_writeb(b, addr); +} + +static notrace void NATIVE_WRITEW(u16 w, void __iomem *addr) +{ + native_writew(w, addr); +} + +static notrace void NATIVE_WRITEL(u32 l, void __iomem *addr) +{ + native_writel(l, addr); +} + +static notrace void NATIVE_WRITELL(u64 q, void __iomem *addr) +{ + native_writeq(q, addr); +} + +static notrace u8 NATIVE_READB(void __iomem *addr) +{ + return native_readb(addr); +} + +static notrace u16 NATIVE_READW(void __iomem *addr) +{ + return native_readw(addr); +} + +static notrace u32 NATIVE_READL(void __iomem *addr) +{ + return native_readl(addr); +} + +static notrace u64 NATIVE_READLL(void __iomem *addr) +{ + return native_readq(addr); +} + +static notrace void BOOT_NATIVE_WRITEB(u8 b, void __iomem *addr) +{ + boot_native_writeb(b, addr); +} + +static notrace void BOOT_NATIVE_WRITEW(u16 w, void __iomem *addr) +{ + boot_native_writew(w, addr); +} + +static notrace void BOOT_NATIVE_WRITEL(u32 l, void __iomem *addr) +{ + boot_native_writel(l, addr); +} + +static notrace void BOOT_NATIVE_WRITELL(u64 q, void __iomem *addr) +{ + boot_native_writell(q, addr); +} + +static notrace u8 BOOT_NATIVE_READB(void __iomem *addr) +{ + return boot_native_readb(addr); +} + +static notrace u16 BOOT_NATIVE_READW(void __iomem *addr) +{ + return boot_native_readw(addr); +} + +static notrace u32 BOOT_NATIVE_READL(void __iomem *addr) +{ + return boot_native_readl(addr); +} + +static notrace u64 BOOT_NATIVE_READLL(void __iomem *addr) +{ + return boot_native_readll(addr); +} + +static void NATIVE_OUTSB(unsigned short port, const void *src, unsigned long count) +{ + native_outsb(port, src, count); +} + +static void NATIVE_OUTSW(unsigned short port, const void *src, unsigned long count) +{ + native_outsw(port, src, count); +} + +static void NATIVE_OUTSL(unsigned short port, const void *src, unsigned long count) +{ + native_outsl(port, src, count); +} + +static void NATIVE_INSB(unsigned short port, void *dst, unsigned long count) +{ + native_insb(port, dst, count); +} + +static void NATIVE_INSW(unsigned short port, void *dst, unsigned long count) +{ + native_insw(port, dst, count); +} + +static void NATIVE_INSL(unsigned short port, void *dst, unsigned long count) +{ + native_insl(port, dst, count); +} + +static void do_scr_writew(u16 val, volatile u16 *addr) +{ + native_scr_writew(val, addr); +} +static u16 do_scr_readw(volatile const u16 *addr) +{ + return native_scr_readw(addr); +} +static void do_vga_writeb(u8 val, volatile u8 *addr) +{ + native_vga_writeb(val, addr); +} +static u8 do_vga_readb(volatile const u8 *addr) +{ + return native_vga_readb(addr); +} + +#define PV_IO_OPS { \ + .boot_writeb = BOOT_NATIVE_WRITEB, \ + .boot_writew = BOOT_NATIVE_WRITEW, \ + .boot_writel = BOOT_NATIVE_WRITEL, \ + .boot_writell = BOOT_NATIVE_WRITELL, \ + .boot_readb = BOOT_NATIVE_READB, \ + .boot_readw = BOOT_NATIVE_READW, \ + .boot_readl = BOOT_NATIVE_READL, \ + .boot_readll = BOOT_NATIVE_READLL, \ + \ + .writeb = NATIVE_WRITEB, \ + .writew = NATIVE_WRITEW, \ + .writel = NATIVE_WRITEL, \ + .writell = NATIVE_WRITELL, \ + .readb = NATIVE_READB, \ + .readw = NATIVE_READW, \ + .readl = NATIVE_READL, \ + .readll = NATIVE_READLL, \ + \ + .inb = native_inb, \ + .outb = native_outb, \ + .outw = native_outw, \ + .inw = native_inw, \ + .outl = native_outl, \ + .inl = native_inl, \ + \ + .outsb = NATIVE_OUTSB, \ + .outsw = NATIVE_OUTSW, \ + .outsl = NATIVE_OUTSL, \ + .insb = NATIVE_INSB, \ + .insw = NATIVE_INSW, \ + .insl = NATIVE_INSL, \ + \ + .conf_inb = native_conf_inb, \ + .conf_inw = native_conf_inw, \ + .conf_inl = native_conf_inl, \ + .conf_outb = native_conf_outb, \ + .conf_outw = native_conf_outw, \ + .conf_outl = native_conf_outl, \ + \ + .scr_writew = do_scr_writew, \ + .scr_readw = do_scr_readw, \ + .vga_writeb = do_vga_writeb, \ + .vga_readb = do_vga_readb, \ + \ + .pci_init = native_arch_pci_init, \ +} +pv_io_ops_t pv_io_ops = PV_IO_OPS; +EXPORT_SYMBOL(pv_io_ops); +/* boot-time copy of pv_io_ops: functions have physical addresses */ +static pv_io_ops_t boot_pv_io_ops = PV_IO_OPS; +pv_io_ops_t *cur_pv_io_ops = &boot_pv_io_ops; + +static void pv_ops_to_boot_pv_ops(void *boot_pv_ops[], int entries_num) +{ + void **pv_ops = boot_native_vp_to_pp(boot_pv_ops); + void *op; + int entry; + + for (entry = 0; entry < entries_num; entry++) { + op = pv_ops[entry]; + if (op == NULL) + continue; + op = boot_native_vp_to_pp(op); + pv_ops[entry] = op; + } +} +static inline void pv_v2p_ops_to_boot_ops(void) +{ + pv_ops_to_boot_pv_ops((void **)&boot_pv_v2p_ops, + sizeof(boot_pv_v2p_ops) / sizeof(void *)); + /* switch PV_V2P_OPS pointer to physical functions entries */ + boot_native_get_vo_value(cur_pv_v2p_ops) = &boot_pv_v2p_ops; +} +static inline void pv_boot_ops_to_boot_ops(void) +{ + pv_ops_to_boot_pv_ops((void **)&boot_pv_boot_ops, + sizeof(boot_pv_boot_ops) / sizeof(void *)); + /* switch PV_V2P_OPS pointer to physical functions entries */ + boot_native_get_vo_value(cur_pv_boot_ops) = &boot_pv_boot_ops; +} +static inline void pv_cpu_ops_to_boot_ops(void) +{ + pv_ops_to_boot_pv_ops((void **)&boot_pv_cpu_ops, + sizeof(boot_pv_cpu_ops) / sizeof(void *)); + /* switch PV_V2P_OPS pointer to physical functions entries */ + boot_native_get_vo_value(cur_pv_cpu_ops) = &boot_pv_cpu_ops; +} +static inline void pv_apic_ops_to_boot_ops(void) +{ + pv_ops_to_boot_pv_ops((void **)&boot_pv_apic_ops, + sizeof(boot_pv_apic_ops) / sizeof(void *)); + /* switch PV_V2P_OPS pointer to physical functions entries */ + boot_native_get_vo_value(cur_pv_apic_ops) = &boot_pv_apic_ops; +} +static inline void pv_epic_ops_to_boot_ops(void) +{ + pv_ops_to_boot_pv_ops((void **)&boot_pv_epic_ops, + sizeof(boot_pv_epic_ops) / sizeof(void *)); + /* switch PV_V2P_OPS pointer to physical functions entries */ + boot_native_get_vo_value(cur_pv_epic_ops) = &boot_pv_epic_ops; +} +static inline void pv_mmu_ops_to_boot_ops(void) +{ + pv_ops_to_boot_pv_ops((void **)&boot_pv_mmu_ops, + sizeof(boot_pv_mmu_ops) / sizeof(void *)); + /* switch PV_V2P_OPS pointer to physical functions entries */ + boot_native_get_vo_value(cur_pv_mmu_ops) = &boot_pv_mmu_ops; +} +static inline void pv_io_ops_to_boot_ops(void) +{ + pv_ops_to_boot_pv_ops((void **)&boot_pv_io_ops, + sizeof(boot_pv_io_ops) / sizeof(void *)); + /* switch PV_IO_OPS pointer to physical functions entries */ + boot_native_get_vo_value(cur_pv_io_ops) = &boot_pv_io_ops; +} + +static inline void boot_pv_v2p_ops_to_ops(void) +{ + /* switch PV_V2P_OPS pointer to virtual functions entries */ + boot_native_get_vo_value(cur_pv_v2p_ops) = &pv_v2p_ops; +} +static inline void boot_pv_boot_ops_to_ops(void) +{ + /* switch PV_V2P_OPS pointer to virtual functions entries */ + boot_native_get_vo_value(cur_pv_boot_ops) = &pv_boot_ops; +} +static inline void boot_pv_cpu_ops_to_ops(void) +{ + /* switch PV_V2P_OPS pointer to virtual functions entries */ + boot_native_get_vo_value(cur_pv_cpu_ops) = &pv_cpu_ops; +} +static inline void boot_pv_apic_ops_to_ops(void) +{ + /* switch PV_V2P_OPS pointer to virtual functions entries */ + boot_native_get_vo_value(cur_pv_apic_ops) = &pv_apic_ops; +} +static inline void boot_pv_epic_ops_to_ops(void) +{ + /* switch PV_V2P_OPS pointer to virtual functions entries */ + boot_native_get_vo_value(cur_pv_epic_ops) = &pv_epic_ops; +} +static inline void boot_pv_mmu_ops_to_ops(void) +{ + /* switch PV_V2P_OPS pointer to virtual functions entries */ + boot_native_get_vo_value(cur_pv_mmu_ops) = &pv_mmu_ops; +} +static inline void boot_pv_io_ops_to_ops(void) +{ + /* switch PV_IO_OPS pointer to virtual functions entries */ + boot_native_get_vo_value(cur_pv_io_ops) = &pv_io_ops; +} +void native_pv_ops_to_boot_ops(void) +{ + pv_v2p_ops_to_boot_ops(); + pv_boot_ops_to_boot_ops(); + pv_cpu_ops_to_boot_ops(); + pv_apic_ops_to_boot_ops(); + pv_epic_ops_to_boot_ops(); + pv_mmu_ops_to_boot_ops(); + pv_io_ops_to_boot_ops(); +} +void native_boot_pv_ops_to_ops(void) +{ + boot_pv_v2p_ops_to_ops(); + boot_pv_boot_ops_to_ops(); + boot_pv_cpu_ops_to_ops(); + boot_pv_apic_ops_to_ops(); + boot_pv_epic_ops_to_ops(); + boot_pv_mmu_ops_to_ops(); + boot_pv_io_ops_to_ops(); +} diff --git a/arch/e2k/kvm/pgtable-gp.h b/arch/e2k/kvm/pgtable-gp.h new file mode 100644 index 000000000000..83f1e15b4b42 --- /dev/null +++ b/arch/e2k/kvm/pgtable-gp.h @@ -0,0 +1,213 @@ +/* + * E2K guest physical page table structure and common definitions. + * GP_* page tables are used to translate guest physical addresses and + * as second level of TDP (Two Dimensional Paging) translations + * + * Copyright 2018 MCST, Salavat S. Guiliazov (attic@mcst.ru) + */ + +#ifndef _E2K_KVM_PGTABLE_GP_H +#define _E2K_KVM_PGTABLE_GP_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V6 guest physical page tables. + * NOTE: E2K FP tables have four levels of page tables. + */ + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * PTE-GP format + */ + +#define E2K_MAX_PHYS_BITS_GP E2K_MAX_PHYS_BITS_V6 + +/* numbers of PTE's bits */ +#define _PAGE_P_BIT_GP 0 /* Present */ +#define _PAGE_W_BIT_GP 1 /* Writable */ +#define _PAGE_A_HW_BIT_GP 5 /* page Accessed */ +#define _PAGE_D_BIT_GP 6 /* page Dirty */ +#define _PAGE_HUGE_BIT_GP 7 /* huge Page Size */ +#define _PAGE_MTCR_SHIFT_GP 8 /* shift of Memory Type Combination */ + /* Rule field */ +#define _PAGE_MTCR_BITS_NUM_GP 2 /* and occupies 2 bits */ +#define _PAGE_SW1_BIT_GP 10 /* SoftWare bit #1 */ +#define _PAGE_SW2_BIT_GP 11 /* SoftWare bit #2 */ +#define _PAGE_PFN_SHIFT_GP 12 /* shift of Physical Page Number */ +#define _PAGE_MT_SHIFT_GP 60 /* shift of Memory Type field */ +#define _PAGE_MT_BITS_NUM_GP 3 /* occupies 3 bits */ + +#define _PAGE_P_GP (1ULL << _PAGE_P_BIT_GP) +#define _PAGE_W_GP (1ULL << _PAGE_W_BIT_GP) +#define _PAGE_A_HW_GP (1ULL << _PAGE_A_HW_BIT_GP) +#define _PAGE_D_GP (1ULL << _PAGE_D_BIT_GP) +#define _PAGE_HUGE_GP (1ULL << _PAGE_HUGE_BIT_GP) +#define _PAGE_MTCR_GP \ + (((1ULL << _PAGE_MTCR_BITS_NUM_GP) - 1) << _PAGE_MTCR_SHIFT_GP) +#define _PAGE_SW1_GP (1ULL << _PAGE_SW1_BIT_GP) +#define _PAGE_SW2_GP (1ULL << _PAGE_SW2_BIT_GP) +#define _PAGE_PFN_GP \ + ((((1ULL << E2K_MAX_PHYS_BITS_GP) - 1) >> \ + PAGE_SHIFT) << _PAGE_PFN_SHIFT_GP) +#define _PAGE_MT_GP \ + (((1ULL << _PAGE_MT_BITS_NUM_GP) - 1) << _PAGE_MT_SHIFT_GP) + +#define _PAGE_MMIO_SW_GP 0x0c00000000000000ULL /* pte is MMIO */ + /* software flag */ + +/* Memory type and Combination rules manipulation */ +#define _PAGE_MT_GET_VAL_GP(x) (((x) & _PAGE_MT_GP) >> _PAGE_MT_SHIFT_GP) +#define _PAGE_MT_SET_VAL_GP(x, mt) \ + (((x) & ~_PAGE_MT_GP) | \ + (((pteval_t)(mt) << _PAGE_MT_SHIFT_GP) & _PAGE_MT_GP)) + +#define _PAGE_MTCR_GET_VAL_GP(x) \ + (((x) & _PAGE_MTCR_GP) >> _PAGE_MTCR_SHIFT_GP) +#define _PAGE_MTCR_SET_VAL_GP(x, mtcr) \ + (((x) & ~_PAGE_MTCR_GP) | \ + (((pteval_t)(mtcr) << _PAGE_MTCR_SHIFT_GP) & \ + _PAGE_MTCR_GP)) + +/* convert physical address to page frame number for PTE */ +#define _PAGE_PADDR_TO_PFN_GP(phys_addr) \ + (((e2k_addr_t)phys_addr) & _PAGE_PFN_GP) + +/* convert the page frame number from PTE to physical address */ +#define _PAGE_PFN_TO_PADDR_GP(pte_val) \ + ((e2k_addr_t)(pte_val) & _PAGE_PFN_GP) + +/* PTE flags mask to can update/reduce and restricted to update */ +#define _PAGE_CHG_MASK_GP (_PAGE_PFN_GP | _PAGE_A_HW_GP | _PAGE_D_GP | \ + _PAGE_SW1_GP | _PAGE_SW2_GP | \ + _PAGE_MTCR_GP | _PAGE_MT_GP) +#define _HPAGE_CHG_MASK_GP (_PAGE_CHG_MASK_GP | _PAGE_HUGE_GP) +#define _PROT_REDUCE_MASK_GP (_PAGE_P_GP | _PAGE_W_GP | _PAGE_A_HW_GP | \ + _PAGE_D_GP | _PAGE_MTCR_GP | _PAGE_MT_GP) +#define _PROT_RESTRICT_MASK_GP 0ULL + +/* some useful PT entries protection basis values */ +#define _PAGE_KERNEL_RX_GP \ + (_PAGE_P_GP | _PAGE_A_HW_GP) +#define _PAGE_KERNEL_RO_GP _PAGE_KERNEL_RX_GP +#define _PAGE_KERNEL_RW_GP \ + (_PAGE_KERNEL_RX_GP | _PAGE_W_GP | _PAGE_D_GP) +#define _PAGE_KERNEL_RWX_GP _PAGE_KERNEL_RW_GP +#define _PAGE_KERNEL_HUGE_RX_GP \ + (_PAGE_KERNEL_RX_GP | _PAGE_HUGE_GP) +#define _PAGE_KERNEL_HUGE_RO_GP _PAGE_KERNEL_HUGE_RX_GP +#define _PAGE_KERNEL_HUGE_RW_GP \ + (_PAGE_KERNEL_HUGE_RX_GP | _PAGE_W_GP | _PAGE_D_GP) +#define _PAGE_KERNEL_HUGE_RWX_GP _PAGE_KERNEL_HUGE_RW_GP + +#define _PAGE_KERNEL_PT_GP _PAGE_KERNEL_RW_GP + +static inline pteval_t +get_pte_val_gp_changeable_mask(void) +{ + return _PAGE_CHG_MASK_GP; +} +static inline pteval_t +get_huge_pte_val_gp_changeable_mask(void) +{ + return _HPAGE_CHG_MASK_GP; +} +static inline pteval_t +get_pte_val_gp_reduceable_mask(void) +{ + return 0; +} +static inline pteval_t +get_pte_val_gp_restricted_mask(void) +{ + return _PROT_RESTRICT_MASK_GP; +} + +static inline pteval_t +covert_uni_pte_flags_to_pte_val_gp(const uni_pteval_t uni_flags) +{ + pteval_t pte_flags = 0; + + if (uni_flags & UNI_PAGE_PRESENT) + pte_flags |= (_PAGE_P_GP); + if (uni_flags & UNI_PAGE_WRITE) + pte_flags |= (_PAGE_W_GP); + if (uni_flags & UNI_PAGE_MEM_TYPE_RULE) + pte_flags |= (_PAGE_MTCR_GP); + if (uni_flags & UNI_PAGE_HW_ACCESS) + pte_flags |= (_PAGE_A_HW_GP); + if (uni_flags & UNI_PAGE_DIRTY) + pte_flags |= (_PAGE_D_GP); + if (uni_flags & UNI_PAGE_HUGE) + pte_flags |= (_PAGE_HUGE_GP); + if (uni_flags & UNI_PAGE_PFN) + pte_flags |= (_PAGE_PFN_GP); + if (uni_flags & UNI_PAGE_MEM_TYPE) + pte_flags |= (_PAGE_MT_GP); + + BUG_ON(uni_flags & UNI_PAGE_AVAIL); + BUG_ON(uni_flags & UNI_PAGE_SW_ACCESS); + BUG_ON(uni_flags & UNI_PAGE_SPECIAL); + BUG_ON(uni_flags & UNI_PAGE_GFN); + BUG_ON(uni_flags & UNI_PAGE_ACCESSED); + + return pte_flags; +} + +static inline pteval_t +fill_pte_val_gp_flags(const uni_pteval_t uni_flags) +{ + return covert_uni_pte_flags_to_pte_val_gp(uni_flags); +} +static inline pteval_t +get_pte_val_gp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & covert_uni_pte_flags_to_pte_val_gp(uni_flags); +} +static inline bool +test_pte_val_gp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return get_pte_val_gp_flags(pte_val, uni_flags) != 0; +} +static inline pteval_t +set_pte_val_gp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val | covert_uni_pte_flags_to_pte_val_gp(uni_flags); +} +static inline pteval_t +clear_pte_val_gp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & ~covert_uni_pte_flags_to_pte_val_gp(uni_flags); +} + +static inline unsigned int +get_pte_val_gp_memory_type_rule(pteval_t pte_val) +{ + return _PAGE_MTCR_GET_VAL_GP(pte_val); +} +static inline unsigned int +get_pte_val_gp_memory_type(pteval_t pte_val) +{ + return _PAGE_MT_GET_VAL_GP(pte_val); +} +static inline pteval_t +set_pte_val_gp_memory_type(pteval_t pte_val, unsigned int memory_type) +{ + return set_pte_val_v6_memory_type(pte_val, memory_type); +} +static inline pteval_t +set_pte_val_gp_memory_type_rule(pteval_t pte_val, unsigned int mtcr) +{ + BUG_ON(mtcr != MOST_STRONG_MTCR && + mtcr != FROM_HYPERVISOR_MTCR && + mtcr != FROM_GUEST_MTCR); + + return _PAGE_MTCR_SET_VAL_GP(pte_val, mtcr); +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _E2K_KVM_PGTABLE_GP_H */ diff --git a/arch/e2k/kvm/pic.h b/arch/e2k/kvm/pic.h new file mode 100644 index 000000000000..302fabffc7ae --- /dev/null +++ b/arch/e2k/kvm/pic.h @@ -0,0 +1,291 @@ +#ifndef __KVM_PIC_H +#define __KVM_PIC_H + +#include "lapic.h" +#include "cepic.h" +#include "ioepic.h" + +/* + * Choose between paravirt LAPIC/IOAPIC and CEPIC/IOEPIC models, based on + * kvm->arch.is_epic. This variable is set after receiving an EPIC flag from + * QEMU + */ + +static inline bool kvm_is_epic(const struct kvm *kvm) +{ + return kvm->arch.is_epic; +} + +static inline bool kvm_vcpu_is_epic(const struct kvm_vcpu *vcpu) +{ + return kvm_is_epic(vcpu->kvm); +} + +static inline int kvm_create_local_pic(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) + return kvm_create_cepic(vcpu); + else + return kvm_create_lapic(vcpu); +} + +static inline int kvm_epic_sysrq_deliver(struct kvm_vcpu *vcpu); +static inline int kvm_pic_sysrq_deliver(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) + return kvm_epic_sysrq_deliver(vcpu); + else + return kvm_apic_sysrq_deliver(vcpu); +} + +static inline int kvm_pic_nmi_deliver(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) + return kvm_epic_nmi_deliver(vcpu); + else + return kvm_apic_nmi_deliver(vcpu); +} + +extern int init_cepic_state(struct kvm_vcpu *vcpu); +extern int init_lapic_state(struct kvm_vcpu *vcpu); +static inline int init_pic_state(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) + return init_cepic_state(vcpu); + else + return init_lapic_state(vcpu); +} + +extern int kvm_ioepic_init(struct kvm *kvm); +extern int kvm_ioapic_init(struct kvm *kvm); +static inline int kvm_io_pic_init(struct kvm *kvm) +{ + if (kvm_is_epic(kvm)) + return kvm_ioepic_init(kvm); + else + return kvm_ioapic_init(kvm); +} + +static inline int kvm_io_pic_set_base(struct kvm *kvm, u64 new_base, + int node_id) +{ + if (kvm_is_epic(kvm)) + return kvm_ioepic_set_base(kvm, new_base, node_id); + return -ENODEV; +} + +extern int kvm_setup_epic_irq_routing(struct kvm *kvm); +extern int kvm_setup_apic_irq_routing(struct kvm *kvm); +static inline int kvm_setup_default_irq_routing(struct kvm *kvm) +{ + if (kvm_is_epic(kvm)) + return kvm_setup_epic_irq_routing(kvm); + else + return kvm_setup_apic_irq_routing(kvm); +} + +extern void kvm_irq_routing_update_apic(struct kvm *kvm); +extern void kvm_irq_routing_update_epic(struct kvm *kvm); +static inline void kvm_irq_routing_update_pic(struct kvm *kvm) +{ + if (kvm_is_epic(kvm)) + kvm_irq_routing_update_epic(kvm); + else + kvm_irq_routing_update_apic(kvm); +} + +extern void kvm_post_irq_routing_update_epic(struct kvm *kvm); +extern void kvm_post_irq_routing_update_apic(struct kvm *kvm); +static inline void kvm_post_irq_routing_update_pic(struct kvm *kvm) +{ + if (kvm_is_epic(kvm)) + kvm_post_irq_routing_update_epic(kvm); + else + kvm_post_irq_routing_update_apic(kvm); +} + +static inline void kvm_pic_set_vapic_addr(struct kvm_vcpu *vcpu, + gpa_t vapic_addr) +{ + if (!kvm_vcpu_is_epic(vcpu)) + kvm_lapic_set_vapic_addr(vcpu, vapic_addr); +} + +/* Choose between software and hardware EPIC */ +extern int kvm_irq_delivery_to_hw_epic(struct kvm *kvm, int src, + const struct kvm_cepic_irq *irq); +extern int kvm_irq_delivery_to_sw_epic(struct kvm *kvm, int src, + struct kvm_cepic_irq *irq); +static inline int kvm_irq_delivery_to_epic(struct kvm *kvm, int src, + struct kvm_cepic_irq *irq) +{ + if (kvm->arch.is_hv) + return kvm_irq_delivery_to_hw_epic(kvm, src, irq); + else + return kvm_irq_delivery_to_sw_epic(kvm, src, irq); +} + +extern int kvm_hw_epic_sysrq_deliver(struct kvm_vcpu *vcpu); +static inline int kvm_epic_sysrq_deliver(struct kvm_vcpu *vcpu) +{ + if (vcpu->kvm->arch.is_hv) + return kvm_hw_epic_sysrq_deliver(vcpu); + else + return kvm_sw_epic_sysrq_deliver(vcpu); +} + +#ifdef CONFIG_KVM_ASYNC_PF +extern int kvm_hw_epic_async_pf_wake_deliver(struct kvm_vcpu *vcpu); +#endif /* CONFIG_KVM_ASYNC_PF */ + +extern int kvm_set_epic_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_id, int level, bool line_status); +extern int kvm_set_apic_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_id, int level, bool line_status); + +static inline int +kvm_set_pic_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_id, int level, bool line_status) +{ + if (kvm_is_epic(kvm)) + return kvm_set_epic_msi(e, kvm, irq_id, level, line_status); + else + return kvm_set_apic_msi(e, kvm, irq_id, level, line_status); +} + +extern void kvm_int_violat_delivery_to_hw_epic(struct kvm *kvm); +extern int kvm_hw_epic_deliver_to_icr(struct kvm_vcpu *vcpu, + unsigned int vector, u8 dlvm); + +extern void kvm_ioapic_release(struct kvm *kvm); +extern void kvm_ioepic_destroy(struct kvm *kvm); +static inline void kvm_iopic_release(struct kvm *kvm) +{ + if (kvm_is_epic(kvm)) + kvm_ioepic_destroy(kvm); + else + kvm_ioapic_release(kvm); +} + +extern void kvm_free_lapic(struct kvm_vcpu *vcpu); +extern void kvm_free_cepic(struct kvm_vcpu *vcpu); +static inline void kvm_free_local_pic(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) + kvm_free_cepic(vcpu); + else + kvm_free_lapic(vcpu); +} + +extern bool kvm_dy_has_epic_interrupts(const struct kvm_vcpu *vcpu); +extern bool kvm_vcpu_has_epic_interrupts(const struct kvm_vcpu *vcpu); +extern bool kvm_vcpu_has_apic_interrupts(const struct kvm_vcpu *vcpu); +static inline bool kvm_vcpu_has_pic_interrupts(const struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) + return kvm_vcpu_has_epic_interrupts(vcpu); + else + return kvm_vcpu_has_apic_interrupts(vcpu); +} + +extern int kvm_cpu_has_pending_epic_timer(struct kvm_vcpu *vcpu); +extern int kvm_cpu_has_pending_apic_timer(struct kvm_vcpu *vcpu); +static inline int kvm_cpu_has_pending_pic_timer(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_epic(vcpu)) + return kvm_cpu_has_pending_epic_timer(vcpu); + else + return kvm_cpu_has_pending_apic_timer(vcpu); +} + +/* Choose between software and hardware LAPIC */ +static inline bool kvm_is_hw_apic(const struct kvm *kvm) +{ + return kvm->arch.is_hv; +} + +static inline bool kvm_vcpu_is_hw_apic(const struct kvm_vcpu *vcpu) +{ + return kvm_is_hw_apic(vcpu->kvm); +} + +extern int kvm_irq_delivery_to_hw_apic(struct kvm *kvm, + struct kvm_lapic *src, struct kvm_lapic_irq *irq); +extern int kvm_irq_delivery_to_sw_apic(struct kvm *kvm, + struct kvm_lapic *src, struct kvm_lapic_irq *irq); +static inline int kvm_irq_delivery_to_apic(struct kvm *kvm, + struct kvm_lapic *src, struct kvm_lapic_irq *irq) +{ + if (kvm_is_hw_apic(kvm)) + return kvm_irq_delivery_to_hw_apic(kvm, src, irq); + else + return kvm_irq_delivery_to_sw_apic(kvm, src, irq); +} + +extern int kvm_get_hw_apic_interrupt(struct kvm_vcpu *vcpu); +extern int kvm_get_sw_apic_interrupt(struct kvm_vcpu *vcpu); +static inline int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_is_hw_apic(vcpu)) + return kvm_get_hw_apic_interrupt(vcpu); + else + return kvm_get_sw_apic_interrupt(vcpu); +} + +extern void hw_apic_set_eoi(struct kvm_lapic *apic); +extern void sw_apic_set_eoi(struct kvm_lapic *apic); +static inline void apic_set_eoi(struct kvm_lapic *apic) +{ + if (kvm_vcpu_is_hw_apic(apic->vcpu)) + hw_apic_set_eoi(apic); + else + sw_apic_set_eoi(apic); +} + +extern void start_hw_apic_timer(struct kvm_lapic *apic, u32 apic_tmict); +extern void start_sw_apic_timer(struct kvm_lapic *apic, u32 apic_tmict); +static inline void start_apic_timer(struct kvm_lapic *apic, u32 apic_tmict) +{ + if (kvm_vcpu_is_hw_apic(apic->vcpu)) + start_hw_apic_timer(apic, apic_tmict); + else + start_sw_apic_timer(apic, apic_tmict); +} + +extern void hw_apic_write_nm(struct kvm_lapic *apic, u32 val); +static inline void apic_write_nm(struct kvm_lapic *apic, u32 val) +{ + if (kvm_vcpu_is_hw_apic(apic->vcpu)) + hw_apic_write_nm(apic, val); +} + +extern u32 hw_apic_read_nm(struct kvm_lapic *apic); +extern u32 sw_apic_read_nm(struct kvm_lapic *apic); +static inline u32 apic_read_nm(struct kvm_lapic *apic) +{ + if (kvm_vcpu_is_hw_apic(apic->vcpu)) + return hw_apic_read_nm(apic); + else + return sw_apic_read_nm(apic); +} + +extern u32 hw_apic_get_tmcct(struct kvm_lapic *apic); +extern u32 sw_apic_get_tmcct(struct kvm_lapic *apic); +static inline u32 apic_get_tmcct(struct kvm_lapic *apic) +{ + if (kvm_vcpu_is_hw_apic(apic->vcpu)) + return hw_apic_get_tmcct(apic); + else + return sw_apic_get_tmcct(apic); +} + +extern void hw_apic_write_lvtt(struct kvm_lapic *apic, u32 apic_lvtt); +static inline void apic_write_lvtt(struct kvm_lapic *apic, u32 apic_lvtt) +{ + if (kvm_vcpu_is_hw_apic(apic->vcpu)) + return hw_apic_write_lvtt(apic, apic_lvtt); +} + +extern bool kvm_check_lapic_priority(struct kvm_vcpu *vcpu); + +#endif /* __KVM_PIC_H */ diff --git a/arch/e2k/kvm/process.c b/arch/e2k/kvm/process.c new file mode 100644 index 000000000000..cefec3dda058 --- /dev/null +++ b/arch/e2k/kvm/process.c @@ -0,0 +1,2616 @@ +/* + * arch/e2k/kvm/process.c + * + * This file handles the arch-dependent parts of kvm process handling + * + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "process.h" +#include "cpu.h" +#include "mman.h" +#include "mmu.h" +#include "io.h" +#include "gaccess.h" +#include "time.h" +#include "pic.h" + + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_THREAD_MODE +#undef DebugKVMT +#define DEBUG_KVM_THREAD_MODE 0 /* KVM thread debugging */ +#define DebugKVMT(fmt, args...) \ +({ \ + if (DEBUG_KVM_THREAD_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_KERNEL_MODE +#undef DebugKVMKS +#define DEBUG_KVM_KERNEL_MODE 0 /* KVM process copy debugging */ +#define DebugKVMKS(fmt, args...) \ +({ \ + if (DEBUG_KVM_KERNEL_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_STARTUP_MODE +#undef DebugKVMSTUP +#define DEBUG_KVM_STARTUP_MODE 0 /* VCPU startup debugging */ +#define DebugKVMSTUP(fmt, args...) \ +({ \ + if (DEBUG_KVM_STARTUP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_EXEC_MODE +#undef DebugKVMEX +#define DEBUG_KVM_EXEC_MODE 0 /* KVM execve() debugging */ +#define DebugKVMEX(fmt, args...) \ +({ \ + if (DEBUG_KVM_EXEC_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_CLONE_USER_MODE +#undef DebugKVMCLN +#define DEBUG_KVM_CLONE_USER_MODE 0 /* KVM thread clone debug */ +#define DebugKVMCLN(fmt, args...) \ +({ \ + if (DEBUG_KVM_CLONE_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_COPY_USER_MODE +#undef DebugKVMCPY +#define DEBUG_KVM_COPY_USER_MODE 0 /* KVM thread clone debugging */ +#define DebugKVMCPY(fmt, args...) \ +({ \ + if (DEBUG_KVM_COPY_USER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SIGNAL_MODE +#undef DebugSIG +#define DEBUG_SIGNAL_MODE 0 /* signal handling debugging */ +#define DebugSIG(fmt, args...) \ +({ \ + if (DEBUG_SIGNAL_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_THREAD_INFO_MODE +#undef DebugKVMTI +#define DEBUG_KVM_THREAD_INFO_MODE 0 /* KVM thread info debug */ +#define DebugKVMTI(fmt, args...) \ +({ \ + if (DEBUG_KVM_THREAD_INFO_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_FREE_TASK_STRUCT_MODE +#undef DebugFRTASK +#define DEBUG_FREE_TASK_STRUCT_MODE 0 /* free thread info debug */ +#define DebugFRTASK(fmt, args...) \ +({ \ + if (DEBUG_FREE_TASK_STRUCT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_ACTIVATION_MODE +#undef DebugKVMACT +#define DEBUG_KVM_ACTIVATION_MODE 0 /* KVM guest kernel data */ + /* stack activations */ + /* debugging */ +#define DebugKVMACT(fmt, args...) \ +({ \ + if (DEBUG_KVM_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_HOST_ACTIVATION_MODE +#undef DebugHACT +#define DEBUG_HOST_ACTIVATION_MODE 0 /* KVM host kernel data */ + /* stack activations */ + /* debugging */ +#define DebugHACT(fmt, args...) \ +({ \ + if (DEBUG_HOST_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_VCPU_MODE +#undef DebugSWVCPU +#define DEBUG_KVM_SWITCH_VCPU_MODE false /* guest thread switch to */ + /* other VCPU */ +#define DebugSWVCPU(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_VCPU_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GPT_REGS_MODE +#define DEBUG_GPT_REGS_MODE 0 /* KVM host and guest kernel */ + /* stack activations print */ +#define DebugHACT(fmt, args...) \ +({ \ + if (DEBUG_HOST_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_HS_MODE +#undef DebugKVMSW +#define DEBUG_KVM_SWITCH_HS_MODE 0 /* KVM switch guest hardware */ + /* stacks */ +#define DebugKVMSW(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_HS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IRQ_MODE +#undef DebugKVMIRQ +#define DEBUG_KVM_IRQ_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VIRQs_MODE +#undef DebugVIRQs +#define DEBUG_KVM_VIRQs_MODE debug_guest_virqs /* VIRQs debugging */ +#define DebugVIRQs(fmt, args...) \ +({ \ + if (DEBUG_KVM_VIRQs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IDLE_MODE +#undef DebugKVMIDLE +#define DEBUG_KVM_IDLE_MODE 0 /* KVM guest idle debugging */ +#define DebugKVMIDLE(fmt, args...) \ +({ \ + if (DEBUG_KVM_IDLE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SHOW_GUEST_STACKS_MODE +#undef DebugGST +#define DEBUG_SHOW_GUEST_STACKS_MODE true /* show all guest stacks */ +#define DebugGST(fmt, args...) \ +({ \ + if (DEBUG_SHOW_GUEST_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TO_VIRT_MODE +#undef DebugTOVM +#define DEBUG_KVM_TO_VIRT_MODE 0 /* switch guest to virtual mode */ +#define DebugTOVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_TO_VIRT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_USER_STACK_MODE +#undef DebugGUS +#define DEBUG_KVM_USER_STACK_MODE 0 /* guest user stacks */ +#define DebugGUS(fmt, args...) \ +({ \ + if (DEBUG_KVM_USER_STACK_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) +static bool debug_copy_guest = false; +bool debug_clone_guest = false; + +#undef DEBUG_KVM_GUEST_MM_MODE +#undef DebugGMM +#define DEBUG_KVM_GUEST_MM_MODE 0 /* guest MM support */ +#define DebugGMM(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_MM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SIG_HANDLER_MODE +#undef DebugSIGH +#define DEBUG_KVM_SIG_HANDLER_MODE 0 /* signal handler debug */ +#define DebugSIGH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SIG_HANDLER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_LONG_JUMP_MODE +#undef DebugLJMP +#define DEBUG_KVM_LONG_JUMP_MODE 0 /* long jump debug */ +#define DebugLJMP(fmt, args...) \ +({ \ + if (DEBUG_KVM_LONG_JUMP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +int clone_guest_kernel = 0; + +static gthread_info_t *alloc_guest_thread_info(struct kvm *kvm); +static void free_guest_thread_info(struct kvm *kvm, gthread_info_t *gti); +static void do_free_guest_thread_info(struct kvm *kvm, gthread_info_t *gti); +static int kvm_guest_failed(struct kvm_vcpu *vcpu); + +#define SET_VCPU_BREAKPOINT false + +#ifdef CONFIG_DATA_BREAKPOINT +atomic_t hw_data_breakpoint_num = ATOMIC_INIT(-1); +#endif /* CONFIG_DATA_BREAKPOINT */ + +#ifdef CONFIG_KVM_HOST_MODE +/* It is paravirtualized host and guest kernel */ +/* or native host kernel with virtualization support */ +/* FIXME: kvm host and hypervisor features is not supported on guest mode */ +/* and all files from arch/e2k/kvm should not be compiled for guest kernel */ +/* only arch/e2k/kvm/guest/ implements guest kernel support */ +/* So this ifdef should be deleted after excluding arch/e2k/kvm compilation */ +noinline notrace long +as_guest_entry_start(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + char *entry_point, bool priv_guest) +{ + /* + * Do not update this function. Any new operators can change + * behaviour and goal of the function - simulate guest + * at start point was interrupted by go2user() function + */ + go2guest((unsigned long)entry_point, priv_guest); + /* here we should not be never */ + return arg0; +} +#endif /* CONFIG_KVM_HOST_MODE */ + +/** + * The function return bool value - should this guest kernel thread be stopped + * now? + * + * When someone calls kvm_guest_vcpu_thread_stop() on your kthread, it will be + * woken and this will return true. + */ +bool kvm_guest_vcpu_thread_should_stop(struct kvm_vcpu *vcpu) +{ + BUG_ON(vcpu == NULL); + BUG_ON(!vcpu->arch.is_hv && vcpu->arch.host_task != current); + return vcpu->arch.should_stop || + vcpu->kvm->arch.halted || + vcpu->kvm->arch.reboot; +} + +gthread_info_t *create_guest_start_thread_info(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gthread_info; + + DebugKVMTI("started to launch guest kernel on VCPU %d\n", + vcpu->vcpu_id); + + gthread_info = alloc_guest_thread_info(vcpu->kvm); + if (gthread_info == NULL) { + DebugKVMTI("could not create guest thread info\n"); + return NULL; + } + set_gti_thread_flag(gthread_info, GTIF_VCPU_START_THREAD); + set_gti_thread_flag(gthread_info, GTIF_KERNEL_THREAD); + kvm_gmm_get(vcpu, gthread_info, pv_vcpu_get_init_gmm(vcpu)); + pv_vcpu_set_active_gmm(vcpu, pv_vcpu_get_init_gmm(vcpu)); + setup_vcpu_boot_stacks(vcpu, gthread_info); + return gthread_info; +} + +/* + * There are two VCPU threads: host thread and guest thread. + * Both threads are created and executed as user threads on host + * Host VCPU thread execute QEMU (virtual machine simulation) and + * start guest VCPU run, handle exit reasons from guest VCPU, + * resume VCPU execution, terminate VCPU running + * Guest VCPU thread is created by host VCPU thread and execute + * guest kernel on this thread (as one of guest VCPUs) + * Guest VCPU thread creates VIRQ VCPU threads to handle virtual + * interrupts + */ +void kvm_clear_host_thread_info(thread_info_t *ti) +{ + /* each VCPU can has own root pgd */ + ti->kernel_image_pgd_p = NULL; + pgd_val(ti->kernel_image_pgd) = 0; + + /* guest thread info does not yet created */ + ti->gthread_info = NULL; + + INIT_LIST_HEAD(&ti->tasks_to_spin); + ti->gti_to_spin = NULL; + + /* VCPU is not yet created */ + clear_ti_thread_flag(ti, TIF_VIRTUALIZED_HOST); + clear_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST); + clear_ti_thread_flag(ti, TIF_PARAVIRT_GUEST); +} + +static inline void resume_host_start_thread(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("started on %s (%d) VCPU #%d\n", + current->comm, current->pid, vcpu->vcpu_id); + BUG_ON(vcpu == NULL); + if (vcpu->arch.host_task == NULL) { + DebugKVMSH("host VCPU #%d thread is already halted or not " + "started\n", vcpu->vcpu_id); + } else if (vcpu->arch.host_task != current) { + /* it is not thread of VCPU host process */ + BUG_ON(1); + return; + } + + /* from here the thread will be as common thread */ + clear_thread_flag(TIF_VIRTUALIZED_HOST); + clear_thread_flag(TIF_MULTITHREADING); + + kvm_halt_host_vcpu_thread(vcpu); +} + +int kvm_resume_vm_thread(void) +{ + if (!test_thread_flag(TIF_MULTITHREADING)) + return 0; + + if (test_thread_flag(TIF_VIRTUALIZED_HOST)) + resume_host_start_thread(current_thread_info()->vcpu); + else if (test_thread_flag(TIF_VIRTUALIZED_GUEST)) + return kvm_guest_failed(current_thread_info()->vcpu); + clear_thread_flag(TIF_MULTITHREADING); + return 0; +} + +void kvm_spare_host_vcpu_release(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("%s (%d) started for VCPU #%d\n", + current->comm, current->pid, vcpu->vcpu_id); + resume_host_start_thread(vcpu); +} + +/* + * same as prepare_bu_stacks_to_startup_vcpu() but only for paravirtualization + * and boot stacks are from guest memory space + */ +static int prepare_pv_stacks_to_startup_vcpu(struct kvm_vcpu *vcpu, + guest_hw_stack_t *stack_regs, + u64 *args, int args_num, + char *entry_point, e2k_psr_t psr, + e2k_size_t usd_size, void *ps_base, void *pcs_base, + int cui, bool kernel) +{ + e2k_mem_crs_t pcs_frames[2]; + e2k_mem_ps_t ps_frames[8 * sizeof(*args) / (EXT_4_NR_SZ / 2)]; + e2k_mem_crs_t *g_pcs_frames; + e2k_mem_ps_t *g_ps_frames; + e2k_size_t ps_ind, pcs_ind; + int up_frame, ret; + + DebugKVMSTUP("started on VCPU #%d base PS %px, PCS %px\n", + vcpu->vcpu_id, ps_base, pcs_base); + + /*max number of arguments limited by above ps_frames[] size */ + KVM_BUG_ON(args_num > 8); + + prepare_stacks_to_startup_vcpu(vcpu, ps_frames, pcs_frames, + args, args_num, entry_point, psr, + usd_size, &ps_ind, &pcs_ind, cui, kernel); + + g_ps_frames = (e2k_mem_ps_t *)ps_base; + g_pcs_frames = (e2k_mem_crs_t *)pcs_base; + ret = kvm_vcpu_copy_to_guest(vcpu, g_ps_frames, ps_frames, ps_ind); + if (unlikely(ret < 0)) { + pr_err("%s(): could not prepare initial content of " + "guest boot procedure stack, error %d\n", + __func__, ret); + return ret; + } + /* very UP frame of chain stack will be loaded on registers */ + /* directly by host before return to guest */ + up_frame = pcs_ind / SZ_OF_CR - 1; + stack_regs->crs.cr0_lo = pcs_frames[up_frame].cr0_lo; + stack_regs->crs.cr0_hi = pcs_frames[up_frame].cr0_hi; + stack_regs->crs.cr1_lo = pcs_frames[up_frame].cr1_lo; + stack_regs->crs.cr1_hi = pcs_frames[up_frame].cr1_hi; + + ret = kvm_vcpu_copy_to_guest(vcpu, g_pcs_frames, pcs_frames, pcs_ind); + if (unlikely(ret < 0)) { + pr_err("%s(): could not prepare initial content of " + "guest boot chain stack, error %d\n", + __func__, ret); + return ret; + } + + /* correct stacks pointers indexes */ + stack_regs->stacks.psp_hi.PSP_hi_ind = ps_ind; + stack_regs->stacks.pcsp_hi.PCSP_hi_ind = pcs_ind - SZ_OF_CR; + DebugKVMSTUP("VCPU #%d boot PS.ind 0x%x PCS.ind 0x%x\n", + vcpu->vcpu_id, + stack_regs->stacks.psp_hi.PSP_hi_ind, + stack_regs->stacks.pcsp_hi.PCSP_hi_ind); + return 0; +} + +struct tty_struct *kvm_tty = NULL; + +int kvm_init_vcpu_thread(struct kvm_vcpu *vcpu) +{ + char name[80]; + + sprintf(name, "kvm/%d-vcpu/%d", + vcpu->kvm->arch.vmid.nr, vcpu->vcpu_id); + set_task_comm(current, name); + vcpu->arch.host_task = current; + + INIT_LIST_HEAD(¤t_thread_info()->tasks_to_spin); + current_thread_info()->gti_to_spin = NULL; + + DebugKVM("VCPU %d will be run as thread %s (%d) pgd %px\n", + vcpu->vcpu_id, current->comm, current->pid, current->mm->pgd); + + return 0; +} + +int hv_vcpu_start_thread(struct kvm_vcpu *vcpu) +{ + return kvm_init_vcpu_thread(vcpu); +} + +int pv_vcpu_start_thread(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gthread_info; + int ret; + + ret = kvm_init_vcpu_thread(vcpu); + if (ret) { + pr_err("%s(): could not init start thread for VCPU #%d " + "error %d\n", + __func__, vcpu->vcpu_id, ret); + goto out_failed; + } + gthread_info = create_guest_start_thread_info(vcpu); + if (gthread_info == NULL) { + pr_err("%s() could not create guest start thread info " + "structure\n", + __func__); + ret = -ENOMEM; + goto out_failed; + } + pv_vcpu_set_gti(vcpu, gthread_info); + + return 0; + +out_failed: + return ret; +} + +int kvm_prepare_pv_vcpu_start_stacks(struct kvm_vcpu *vcpu) +{ + vcpu_boot_stack_t *boot_stacks = &vcpu->arch.boot_stacks; + e2k_stacks_t *regs = &boot_stacks->regs.stacks; + int ret; + + DebugKVM("started to prepare boot stacks on VCPU #%d\n", + vcpu->vcpu_id); + + prepare_vcpu_startup_args(vcpu); + ret = prepare_pv_stacks_to_startup_vcpu(vcpu, + &boot_stacks->regs, + vcpu->arch.args, vcpu->arch.args_num, + vcpu->arch.entry_point, E2K_USER_INITIAL_PSR, + GET_VCPU_BOOT_CS_SIZE(boot_stacks), + GET_VCPU_BOOT_PS_BASE(boot_stacks), + GET_VCPU_BOOT_PCS_BASE(boot_stacks), 0, true); + if (ret) { + pr_err("%s(): failed to prepare VCPU #%d boot stacks, " + "error %d\n", + __func__, vcpu->vcpu_id, ret); + return ret; + } + + /* Make sure guest sees actual values of its own registers */ + kvm_set_guest_vcpu_PSP_lo(vcpu, regs->psp_lo); + kvm_set_guest_vcpu_PSP_hi(vcpu, regs->psp_hi); + kvm_set_guest_vcpu_PCSP_lo(vcpu, regs->pcsp_lo); + kvm_set_guest_vcpu_PCSP_hi(vcpu, regs->pcsp_hi); + kvm_set_guest_vcpu_USD_lo(vcpu, regs->usd_lo); + kvm_set_guest_vcpu_USD_hi(vcpu, regs->usd_hi); + kvm_set_guest_vcpu_SBR(vcpu, regs->top); + + return 0; +} + +static gthread_info_t *alloc_guest_thread_info(struct kvm *kvm) +{ + gthread_info_t *gthread_info; + gpid_t *gpid = NULL; + + DebugKVMTI("started\n"); + gthread_info = kmem_cache_alloc(kvm->arch.gti_cachep, + GFP_KERNEL | __GFP_ZERO); + if (!gthread_info) { + DebugKVMTI("could not allocate guest thread info structure\n"); + goto out; + } + + gpid = kvm_alloc_gpid(&kvm->arch.gpid_table); + if (!gpid) { + DebugKVMTI("could not allocate guest PID\n"); + goto out_free; + } + gpid->gthread_info = gthread_info; + gthread_info->gpid = gpid; + gthread_info->vcpu = NULL; + gthread_info->gmm = NULL; + gthread_info->nonp_root_hpa = E2K_INVALID_PAGE; + init_pv_vcpu_l_gregs(gthread_info); +/* gthread_info->upsr = E2K_USER_INITIAL_UPSR; */ + DebugKVMTI("allocated guest thread info GPID %d\n", gpid->nid.nr); + +out: + return gthread_info; + +out_free: + + kmem_cache_free(kvm->arch.gti_cachep, gthread_info); + gthread_info = NULL; + goto out; +} + +/* + * Clear guest thread info structure from old user task, + * while sys_execve() of new user task + */ +void kvm_pv_clear_guest_thread_info(gthread_info_t *gthread_info) +{ + gthread_info->gpid = NULL; /* not exist */ + gthread_info->gmm = NULL; + gthread_info->gregs_active = 0; + gthread_info->gregs_valid = 0; + gthread_info->gregs_for_currents_valid = 0; + gthread_info->u_upsr_valid = false; + gthread_info->k_upsr_valid = false; + gthread_info->gpt_regs = NULL; +} + +static void __free_guest_thread_info(struct kvm *kvm, gthread_info_t *gti, + bool lock_done) +{ + DebugKVMTI("started for GPID %d\n", gti->gpid->nid.nr); + + if (likely(!lock_done)) { + kvm_free_gpid(gti->gpid, &kvm->arch.gpid_table); + } else { + kvm_do_free_gpid(gti->gpid, &kvm->arch.gpid_table); + } + + kmem_cache_free(kvm->arch.gti_cachep, gti); +} + +static void free_guest_thread_info(struct kvm *kvm, gthread_info_t *gti) +{ + __free_guest_thread_info(kvm, gti, false); +} + +static void do_free_guest_thread_info(struct kvm *kvm, gthread_info_t *gti) +{ + __free_guest_thread_info(kvm, gti, true); +} + +int kvm_pv_guest_thread_info_init(struct kvm *kvm) +{ + int ret; + + DebugKVMTI("started\n"); + + sprintf(kvm->arch.gti_cache_name, "gthread_info_VM%d", + kvm->arch.vmid.nr); + kvm->arch.gti_cachep = + kmem_cache_create(kvm->arch.gti_cache_name, + sizeof(gthread_info_t), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (kvm->arch.gti_cachep == NULL) { + DebugKVMTI("could not allocate guest kernel info cache\n"); + return -ENOMEM; + } + + ret = kvm_gpidmap_init(kvm, &kvm->arch.gpid_table, + kvm->arch.gpid_nidmap, GPIDMAP_ENTRIES, + kvm->arch.gpid_hash, GPID_HASH_BITS); + if (ret != 0) { + kmem_cache_destroy(kvm->arch.gti_cachep); + kvm->arch.gti_cachep = NULL; + } + + return ret; +} + +void kvm_pv_guest_thread_info_destroy(struct kvm *kvm) +{ + gpid_t *gpid; + struct hlist_node *next; + unsigned long flags; + int i; + + DebugKVMTI("started\n"); + gpid_table_lock_irqsave(&kvm->arch.gpid_table, flags); + for_each_guest_thread_info(gpid, i, next, &kvm->arch.gpid_table) { + do_free_guest_thread_info(kvm, gpid->gthread_info); + } + gpid_table_unlock_irqrestore(&kvm->arch.gpid_table, flags); + kmem_cache_destroy(kvm->arch.gti_cachep); + kvm->arch.gti_cachep = NULL; + kvm_gpidmap_destroy(&kvm->arch.gpid_table); +} + +static int kvm_get_guest_kernel_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t *user_info, guest_hw_stack_t *stack_regs, + u64 *args, int args_num, char *entry_point) +{ + thread_info_t *ti = current_thread_info(); + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_addr_t sbr; + e2k_size_t us_size; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_addr_t ps_base; + e2k_size_t ps_size; + e2k_addr_t pcs_base; + e2k_size_t pcs_size; + e2k_psr_t psr; + bool priv_guest = vcpu->arch.is_hv; + int ret; + + DebugKVM("started\n"); + + /* + * New guest kernel stacks (data and hardware) were allocated by guest + * Create all register state for new guest stacks + */ + us_size = user_info->us_size; + sbr = user_info->us_base + us_size; /* top of stack */ + usd_hi.USD_hi_half = 0; + usd_lo.USD_lo_half = 0; + usd_hi.USD_hi_size = user_info->sp_offset; + usd_lo.USD_lo_base = user_info->us_base + user_info->sp_offset; + + psp_hi.PSP_hi_half = 0; + psp_lo.PSP_lo_half = 0; + ps_size = user_info->ps_size; + ps_base = user_info->ps_base; + psp_hi.PSP_hi_size = ps_size; + psp_lo.PSP_lo_base = ps_base; + + pcsp_hi.PCSP_hi_half = 0; + pcsp_lo.PCSP_lo_half = 0; + pcs_size = user_info->pcs_size; + pcs_base = user_info->pcs_base; + pcsp_hi.PCSP_hi_size = pcs_size; + pcsp_lo.PCSP_lo_base = pcs_base; + + stack_regs->stacks.top = sbr; + stack_regs->stacks.usd_hi = usd_hi; + stack_regs->stacks.usd_lo = usd_lo; + if (gti != NULL) { + gti->g_usd_lo = usd_lo; + gti->g_usd_hi = usd_hi; + gti->g_sbr.SBR_reg = stack_regs->stacks.top; + gti->us_size = us_size; + } + DebugTOVM("new local guest kernel data stack base 0x%llx size 0x%x " + "top 0x%lx\n", + usd_lo.USD_lo_base, usd_hi.USD_hi_size, sbr); + + /* host kernel data stack does not changed */ + DebugTOVM("host kernel current data stack bottom 0x%lx " + "base 0x%llx size 0x%x\n", + current->stack, ti->k_usd_lo.USD_lo_base, + ti->k_usd_hi.USD_hi_size); + + BUG_ON(sbr < GUEST_TASK_SIZE); + BUG_ON(sbr >= HOST_TASK_SIZE); + + stack_regs->stacks.psp_hi = psp_hi; + stack_regs->stacks.psp_lo = psp_lo; + if (gti != NULL) { + gti->g_psp_lo = psp_lo; + gti->g_psp_hi = psp_hi; + } + + DebugTOVM("new guest kernel procedure stack from 0x%llx size 0x%x\n", + psp_lo.PSP_lo_base, psp_hi.PSP_hi_size); + + stack_regs->stacks.pcsp_hi = pcsp_hi; + stack_regs->stacks.pcsp_lo = pcsp_lo; + if (gti != NULL) { + gti->g_pcsp_lo = pcsp_lo; + gti->g_pcsp_hi = pcsp_hi; + } + + DebugTOVM("new guest kernel chain stack from 0x%llx size 0x%x\n", + pcsp_lo.PCSP_lo_base, pcsp_hi.PCSP_hi_size); + + if (entry_point == NULL) + return 0; + + if (priv_guest) { + psr = E2K_KERNEL_PSR_ENABLED; + psr.PSR_sge = 1; + } else { + psr = E2K_USER_INITIAL_PSR; + } + ret = prepare_pv_stacks_to_startup_vcpu(vcpu, + stack_regs, args, args_num, entry_point, + psr, user_info->sp_offset, + (void *)ps_base, (void *)pcs_base, 0, true); + if (ret) + goto failed; + + return 0; + +failed: + return ret; +} + +static void kvm_init_guest_user_stacks(struct kvm_vcpu *vcpu, + gthread_info_t *gti, gmm_struct_t *gmm, + kvm_task_info_t *user_info, guest_hw_stack_t *stack_regs, + bool is_user_stacks) +{ + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_addr_t sbr; + e2k_size_t us_size; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_cutd_t cutd; + + /* + * New guest kernel stacks (data and hardware) were allocated by guest + * Create all register state for new guest stacks + */ + usd_hi.USD_hi_half = 0; + usd_lo.USD_lo_half = 0; + psp_hi.PSP_hi_half = 0; + psp_lo.PSP_lo_half = 0; + pcsp_hi.PCSP_hi_half = 0; + pcsp_lo.PCSP_lo_half = 0; + if (is_user_stacks) { + us_size = user_info->u_us_size; + sbr = user_info->u_us_base + us_size; /* top of stack */ + usd_hi.USD_hi_size = user_info->u_sp_offset; + usd_lo.USD_lo_base = user_info->u_us_base + + user_info->u_sp_offset; + + psp_hi.PSP_hi_size = user_info->u_ps_size; + psp_hi.PSP_hi_ind = user_info->u_ps_ind; + psp_lo.PSP_lo_base = user_info->u_ps_base; + + pcsp_hi.PCSP_hi_size = user_info->u_pcs_size; + pcsp_hi.PCSP_hi_ind = user_info->u_pcs_ind; + pcsp_lo.PCSP_lo_base = user_info->u_pcs_base; + } else { + us_size = user_info->us_size; + sbr = user_info->us_base + us_size; /* top of stack */ + usd_hi.USD_hi_size = user_info->sp_offset; + usd_lo.USD_lo_base = user_info->us_base + user_info->sp_offset; + + psp_hi.PSP_hi_size = user_info->ps_size; + psp_hi.PSP_hi_ind = user_info->ps_ind; + psp_lo.PSP_lo_base = user_info->ps_base; + + pcsp_hi.PCSP_hi_size = user_info->pcs_size; + pcsp_hi.PCSP_hi_ind = user_info->pcs_ind; + pcsp_lo.PCSP_lo_base = user_info->pcs_base; + } + + /* keep host local data stack unchanged */ + gti->us_size = us_size; + stack_regs->stacks.top = sbr; + stack_regs->stacks.usd_hi = usd_hi; + stack_regs->stacks.usd_lo = usd_lo; + DebugGUS("new local guest start stack base 0x%llx size 0x%x " + "top 0x%lx\n", + usd_lo.USD_lo_base, usd_hi.USD_hi_size, sbr); + + sbr = user_info->us_base + user_info->us_size; + sbr = round_up(sbr, E2K_ALIGN_STACK_BASE_REG); + usd_hi.USD_hi_half = 0; + usd_lo.USD_lo_half = 0; + usd_hi.USD_hi_size = user_info->us_size; + usd_lo.USD_lo_base = sbr; + gti->g_usd_lo = usd_lo; + gti->g_usd_hi = usd_hi; + gti->g_sbr.SBR_reg = 0; + gti->g_sbr.SBR_base = sbr; + + DebugGUS("new local guest kernel data stack base 0x%llx size 0x%x " + "top 0x%llx\n", + gti->g_usd_lo.USD_lo_base, gti->g_usd_hi.USD_hi_size, + gti->g_sbr.SBR_base); + /* host kernel data stack does not changed */ + DebugGUS("host kernel current data stack bottom %px " + "base 0x%llx size 0x%x\n", + current->stack + KERNEL_C_STACK_OFFSET, + current_thread_info()->k_usd_lo.USD_lo_base, + current_thread_info()->k_usd_hi.USD_hi_size); + + BUG_ON(stack_regs->stacks.top >= HOST_TASK_SIZE); + + stack_regs->stacks.psp_hi = psp_hi; + stack_regs->stacks.psp_lo = psp_lo; + + DebugGUS("new guest procedure stack from 0x%llx size 0x%x " + "ind 0x%x\n", + psp_lo.PSP_lo_base, psp_hi.PSP_hi_size, psp_hi.PSP_hi_ind); + + psp_hi.PSP_hi_half = 0; + psp_lo.PSP_lo_half = 0; + psp_hi.PSP_hi_size = user_info->ps_size; + psp_lo.PSP_lo_base = user_info->ps_base; + gti->g_psp_hi = psp_hi; + gti->g_psp_lo = psp_lo; + + DebugGUS("new guest kernel procedure stack from 0x%llx size 0x%x " + "ind 0x%x\n", + gti->g_psp_lo.PSP_lo_base, gti->g_psp_hi.PSP_hi_size, + gti->g_psp_hi.PSP_hi_ind); + + stack_regs->stacks.pcsp_hi = pcsp_hi; + stack_regs->stacks.pcsp_lo = pcsp_lo; + + DebugGUS("new guest chain stack from 0x%llx size 0x%x " + "ind 0x%x\n", + pcsp_lo.PCSP_lo_base, pcsp_hi.PCSP_hi_size, + pcsp_hi.PCSP_hi_ind); + + pcsp_hi.PCSP_hi_half = 0; + pcsp_lo.PCSP_lo_half = 0; + pcsp_hi.PCSP_hi_size = user_info->pcs_size; + pcsp_lo.PCSP_lo_base = user_info->pcs_base; + gti->g_pcsp_hi = pcsp_hi; + gti->g_pcsp_lo = pcsp_lo; + + DebugGUS("new guest kernel chain stack from 0x%llx size 0x%x " + "ind 0x%x\n", + gti->g_pcsp_lo.PCSP_lo_base, gti->g_pcsp_hi.PCSP_hi_size, + gti->g_pcsp_hi.PCSP_hi_ind); + + /* Set user local stack and Compilation Unit table context */ + cutd.CUTD_reg = 0; + cutd.CUTD_base = user_info->cut_base; + stack_regs->cutd = cutd; + + DebugGUS("new guest CUTD : %px\n", (void *)stack_regs->cutd.CUTD_base); +} + +static int kvm_setup_guest_user_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t *user_info, guest_hw_stack_t *stack_regs) +{ + gthread_info_t *gti; + gmm_struct_t *gmm; + char *entry_point; + e2k_psr_t psr; + int ret, cui; + bool kernel; + + gti = pv_vcpu_get_gti(vcpu); + BUG_ON(gti == NULL); + gmm = pv_vcpu_get_gmm(vcpu); + BUG_ON(gmm == NULL || pv_vcpu_is_init_gmm(vcpu, gmm)); + + kvm_init_guest_user_stacks(vcpu, gti, gmm, user_info, stack_regs, + true /* set guest user stacks */); + set_pv_vcpu_u_stack_context(vcpu, stack_regs); + + cui = user_info->cui; + kernel = user_info->kernel; + atomic_set(&gmm->context.cur_cui, cui); + DebugKVMEX("set new CUTD size 0x%lx, CUI to 0x%x\n", + user_info->cut_size, cui); + + entry_point = (char *)user_info->entry_point; + + psr = E2K_USER_INITIAL_PSR; + + ret = prepare_pv_stacks_to_startup_vcpu(vcpu, + stack_regs, NULL, 0, entry_point, + psr, user_info->u_sp_offset, + (void *)user_info->u_ps_base, + (void *)user_info->u_pcs_base, + cui, kernel); + if (ret) + goto out_failed; + + kvm_set_guest_vcpu_PSR(vcpu, psr); + kvm_set_guest_vcpu_UPSR(vcpu, E2K_USER_INITIAL_UPSR); + kvm_set_guest_vcpu_under_upsr(vcpu, false); + + DebugFRTASK("starting the new user task GPID #%d GMMID #%d\n", + gti->gpid->nid.nr, gmm->nid.nr); + + return 0; + +out_failed: + return ret; +} + +int kvm_switch_guest_kernel_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, char *entry_point, + unsigned long __user *task_args, int args_num, + guest_hw_stack_t *stack_regs) +{ + kvm_task_info_t user_info; + u64 args[4]; + int ret; + + DebugTOVM("started\n"); + + KVM_BUG_ON(vcpu->arch.is_hv); + + if (kvm_vcpu_copy_from_guest(vcpu, &user_info, task_info, + sizeof(*task_info))) { + pr_err("%s(): copy guest task info from guest failed\n", + __func__); + ret = -EFAULT; + goto failed; + } + + if (args_num > sizeof(args) / sizeof(*args)) { + pr_err("%s(): too many guest args %d, max %ld\n", + __func__, args_num, sizeof(args) / sizeof(*args)); + ret = -EINVAL; + goto failed; + } + if (kvm_vcpu_copy_from_guest(vcpu, &args, task_args, + sizeof(*task_args) * args_num)) { + pr_err("%s(): copy guest args from guest failed\n", + __func__); + ret = -EFAULT; + goto failed; + } + + ret = kvm_get_guest_kernel_stacks(vcpu, &user_info, stack_regs, + args, args_num, entry_point); + if (ret) { + pr_err("%s(): VCPU #%d could not switch guest kernel stacks, " + "error %d\n", + __func__, vcpu->vcpu_id, ret); + goto failed; + } + + stack_regs->cutd = vcpu->arch.hw_ctxt.sh_oscutd; + + raw_all_irq_disable(); + + startup_pv_vcpu(vcpu, stack_regs, FROM_HYPERCALL_SWITCH); + /* should not be here */ + KVM_BUG_ON(true); + + return 0; + +failed: + + vcpu->arch.exit_reason = EXIT_SHUTDOWN; + vcpu->run->exit_reason = KVM_EXIT_E2K_PANIC; + + DebugKVMSH("VCPU #%d thread exits\n", vcpu->vcpu_id); + + /* return to host VCPU to handle exit reason */ + return RETURN_TO_HOST_APP_HCRET; +} + +int kvm_switch_to_virt_mode(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, guest_hw_stack_t *stack_regs, + void (*func)(void *data, void *arg1, void *arg2), + void *data, void *arg1, void *arg2) +{ + kvm_task_info_t user_info; + gthread_info_t *gti; + u64 args[4]; + int ret; + + DebugTOVM("started on VCPU #%d to enable guest MMU virtual mode\n", + vcpu->vcpu_id); + + if (kvm_vcpu_copy_from_guest(vcpu, &user_info, task_info, + sizeof(*task_info))) { + DebugKVM("copy new task info from user failed\n"); + ret = -EFAULT; + goto failed; + } + args[0] = (u64)data; + args[1] = (u64)arg1; + args[2] = (u64)arg2; + args[3] = 0; + + if (!is_paging(vcpu)) { + /* it need create shadow PT based on PTs created by guest */ + /* and enable paging mode */ + KVM_BUG_ON(!is_shadow_paging(vcpu)); + ret = kvm_mmu_enable_shadow_paging(vcpu); + if (ret) { + pr_err("%s(): VCPU #%d could not switch to " + "shadow PT, error %d\n", + __func__, vcpu->vcpu_id, ret); + goto failed; + } + } + + ret = kvm_get_guest_kernel_stacks(vcpu, &user_info, stack_regs, + args, sizeof(args) / sizeof(*args), (char *)func); + if (ret) { + pr_err("%s(): VCPU #%d could not get guest kernel " + "stacks, error %d\n", + __func__, vcpu->vcpu_id, ret); + goto failed; + } + + KVM_BUG_ON(!is_shadow_paging(vcpu)); + + if (vcpu->arch.is_hv || vcpu->arch.is_pv) { + kvm_hw_cpu_context_t *hw_ctxt = &vcpu->arch.hw_ctxt; + kvm_sw_cpu_context_t *sw_ctxt = &vcpu->arch.sw_ctxt; + + raw_all_irq_disable(); + if (vcpu->arch.is_hv) { + /* set guest stacks registers to new guest kernel */ + /* stacks (hw context) */ + kvm_update_guest_stacks_registers(vcpu, stack_regs); + + /* update local data stack pointers */ + /* (software context) */ + sw_ctxt->sbr.SBR_reg = stack_regs->stacks.top; + sw_ctxt->usd_lo = stack_regs->stacks.usd_lo; + sw_ctxt->usd_hi = stack_regs->stacks.usd_hi; + } + + /* set guest OS compilation units context */ + ret = vcpu_init_os_cu_hw_ctxt(vcpu, &user_info); + if (ret != 0) { + pr_err("%s(): init guest OS CU context\n", + __func__); + goto failed; + } + stack_regs->cutd = hw_ctxt->sh_oscutd; + + gti = pv_vcpu_get_gti(vcpu); + if (gti != NULL) { + gti->stack_regs = *stack_regs; + } + + /* restore global register pointer to VCPU state */ + /* from now it should be virtual address */ + INIT_HOST_VCPU_STATE_GREG_COPY(current_thread_info(), vcpu); + /* all addresses into VCPU state too */ + guest_pv_vcpu_state_to_paging(vcpu); + + /* setup hypervisor to handle guest kernel intercepts */ + kvm_init_kernel_intc(vcpu); + + if (!vcpu->arch.is_hv) { + startup_pv_vcpu(vcpu, stack_regs, + FROM_HYPERCALL_SWITCH); + /* should not be here */ + KVM_BUG_ON(true); + } + raw_all_irq_enable(); + return 0; + } else { + KVM_BUG_ON(true); + } + + ret = -EINVAL; + +failed: + + raw_all_irq_enable(); + vcpu->arch.exit_reason = EXIT_SHUTDOWN; + vcpu->run->exit_reason = KVM_EXIT_E2K_PANIC; + + DebugKVMSH("VCPU #%d thread exits\n", vcpu->vcpu_id); + + if (!vcpu->arch.is_hv) { + /* return to host VCPU to handle exit reason */ + return RETURN_TO_HOST_APP_HCRET; + } + /* inject intercept as hypercall return to switch to */ + /* vcpu run thread and handle VM exit on guest panic */ + kvm_inject_vcpu_exit(vcpu); + + return ret; +} + +extern int guest_thread_copy; /* FIXME: only to debug */ + +static inline int +kvm_put_guest_new_sw_regs(struct kvm_vcpu *vcpu, gthread_info_t *new_gti, + e2k_stacks_t *new_stacks, e2k_mem_crs_t *new_crs, + __user unsigned long *g_gregs) +{ + struct sw_regs *sw_regs = &new_gti->sw_regs; + + sw_regs->top = new_stacks->top; + sw_regs->usd_lo = new_stacks->usd_lo; + sw_regs->usd_hi = new_stacks->usd_hi; + sw_regs->psp_lo = new_stacks->psp_lo; + sw_regs->psp_hi = new_stacks->psp_hi; + sw_regs->pcsp_lo = new_stacks->pcsp_lo; + sw_regs->pcsp_hi = new_stacks->pcsp_hi; + sw_regs->crs.cr0_lo = new_crs->cr0_lo; + sw_regs->crs.cr0_hi = new_crs->cr0_hi; + sw_regs->crs.cr1_lo = new_crs->cr1_lo; + sw_regs->crs.cr1_hi = new_crs->cr1_hi; + + init_sw_user_regs(sw_regs, false, new_gti->task_is_binco); + if (g_gregs != NULL) { + int ret; + + ret = kvm_copy_guest_all_glob_regs(vcpu, &sw_regs->gregs, + g_gregs); + if (ret != 0) { + pr_err("%s(): could not copy guest global registers, " + "error %d\n", __func__, ret); + return ret; + } + /* set BGR register to enable floating point stack */ + sw_regs->gregs.bgr = E2K_INITIAL_BGR; + } + sw_regs->cutd = new_gti->stack_regs.cutd; + sw_regs->dimar0 = 0; + sw_regs->dimar1 = 0; + sw_regs->ddmar0 = 0; + sw_regs->ddmar1 = 0; + AW(sw_regs->dibsr) = 0; + AW(sw_regs->dimcr) = 0; + AW(sw_regs->ddbsr) = 0; + AW(sw_regs->ddmcr) = 0; + if (!test_gti_thread_flag(new_gti, GTIF_KERNEL_THREAD)) { + DebugKVMACT("set guest data stack entry state: " + "base 0x%llx, size 0x%x, top 0x%lx\n", + new_stacks->usd_lo.USD_lo_base, + new_stacks->usd_hi.USD_hi_size, + new_stacks->top); + } + /* set initial state of guest kernel UPSR */ + /* guest kernel is user of host, so initial user UPSR state */ + DO_SAVE_GUEST_KERNEL_UPSR(new_gti, E2K_USER_INITIAL_UPSR); + return 0; +} + +int kvm_copy_guest_kernel_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, + e2k_cr1_hi_t cr1_hi) +{ + thread_info_t *cur_ti = current_thread_info(); + kvm_task_info_t user_info; + gthread_info_t *gti; + guest_hw_stack_t *stack_regs; + e2k_stacks_t *new_stacks; + e2k_mem_crs_t *new_crs; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_addr_t sbr; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_addr_t ps_base; + e2k_size_t ps_size; + e2k_addr_t pcs_base; + e2k_size_t pcs_size; + int ret; + + DebugKVMKS("started\n"); + + if (kvm_vcpu_copy_from_guest(vcpu, &user_info, task_info, + sizeof(*task_info))) { + pr_err("%s(): copy new task info from user failed\n", + __func__); + return -EFAULT; + } + gti = alloc_guest_thread_info(vcpu->kvm); + if (gti == NULL) { + pr_err("%s(): could not create guest thread info\n", + __func__); + return -ENOMEM; + } + set_gti_thread_flag(gti, GTIF_KERNEL_THREAD); + stack_regs = >i->stack_regs; + new_stacks = &stack_regs->stacks; + new_crs = &stack_regs->crs; + + /* + * New guest kernel stacks (data and hardware) were allocated by guest + * Create all register state for new guest stacks + */ + sbr = user_info.us_base + user_info.us_size; /* top of stack */ + usd_hi.USD_hi_half = 0; + usd_lo.USD_lo_half = 0; + usd_hi.USD_hi_size = user_info.sp_offset; + usd_lo.USD_lo_base = user_info.us_base + user_info.sp_offset; + new_stacks->top = sbr; + new_stacks->usd_hi = usd_hi; + new_stacks->usd_lo = usd_lo; + gti->g_usd_lo = usd_lo; + gti->g_usd_hi = usd_hi; + gti->g_sbr.SBR_reg = 0; + gti->g_sbr.SBR_base = sbr; + gti->us_size = user_info.us_size; + DebugKVMKS("new local guest kernel data stack base 0x%llx size 0x%x " + "top 0x%lx\n", + usd_lo.USD_lo_base, usd_hi.USD_hi_size, sbr); + + ps_size = user_info.ps_size; + ps_base = user_info.ps_base; + + pcs_size = user_info.pcs_size; + pcs_base = user_info.pcs_base; + + psp_hi.PSP_hi_half = 0; + psp_lo.PSP_lo_half = 0; + psp_hi.PSP_hi_size = ps_size; + psp_hi.PSP_hi_ind = user_info.ps_ind; + psp_lo.PSP_lo_base = ps_base; + new_stacks->psp_hi = psp_hi; + new_stacks->psp_lo = psp_lo; + gti->g_psp_lo = psp_lo; + gti->g_psp_hi = psp_hi; + + DebugKVMKS("new guest kernel procedure stack from 0x%llx size 0x%x, " + "ind 0x%x\n", + psp_lo.PSP_lo_base, psp_hi.PSP_hi_size, psp_hi.PSP_hi_ind); + + pcsp_hi.PCSP_hi_half = 0; + pcsp_lo.PCSP_lo_half = 0; + pcsp_hi.PCSP_hi_size = pcs_size; + pcsp_hi.PCSP_hi_ind = user_info.pcs_ind; + pcsp_lo.PCSP_lo_base = pcs_base; + new_stacks->pcsp_hi = pcsp_hi; + new_stacks->pcsp_lo = pcsp_lo; + gti->g_pcsp_lo = pcsp_lo; + gti->g_pcsp_hi = pcsp_hi; + + DebugKVMKS("new guest kernel procedure chain stack from 0x%llx " + "size 0x%x, ind 0x%x\n", + pcsp_lo.PCSP_lo_base, pcsp_hi.PCSP_hi_size, + pcsp_hi.PCSP_hi_ind); + + new_crs->cr0_lo.CR0_lo_half = user_info.cr0_lo; + new_crs->cr0_hi.CR0_hi_half = user_info.cr0_hi; + DebugKVMKS("new chain registers: IP 0x%llx, PF 0x%llx\n", + new_crs->cr0_hi.CR0_hi_IP, + new_crs->cr0_lo.CR0_lo_pf); + + new_crs->cr1_lo.CR1_lo_half = user_info.cr1_wd; + new_crs->cr1_hi.CR1_hi_half = user_info.cr1_ussz; + DebugKVMKS("new chain registers: wbs 0x%x, ussz 0x%x\n", + new_crs->cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ, + new_crs->cr1_hi.CR1_hi_ussz << 4); + + DebugKVMKS("current user data stack: bottom " + "0x%lx, top 0x%lx, max size 0x%lx\n", + cur_ti->u_stack.bottom, cur_ti->u_stack.top, + cur_ti->u_stack.size); + + kvm_gmm_get(vcpu, gti, pv_vcpu_get_init_gmm(vcpu)); + + /* FIXME: here should be copy of host kernel frames and guest pt_regs */ + /* from source process to new cloned process for recursive case of */ + /* fork(), but now it is not implemented, only without recursion case */ + + user_info.cr0_lo = new_crs->cr0_lo.CR0_lo_half; + user_info.cr0_hi = new_crs->cr0_hi.CR0_hi_half; + user_info.cr1_wd = new_crs->cr1_lo.CR1_lo_half; + user_info.cr1_ussz = new_crs->cr1_hi.CR1_hi_half; + if (kvm_vcpu_copy_to_guest(vcpu, task_info, &user_info, + sizeof(*task_info))) { + pr_err("%s(): copy updated task info to user failed, " + "retry\n", __func__); + ret = -EFAULT; + goto out_free_gti; + } + stack_regs->cutd = vcpu->arch.hw_ctxt.sh_oscutd; + + /* save user special registers to initial state while switch */ + gti->task_is_binco = 0; + kvm_put_guest_new_sw_regs(vcpu, gti, new_stacks, new_crs, NULL); + + DebugKVMKS("completed successfully, GPID #%d\n", + gti->gpid->nid.nr); + return gti->gpid->nid.nr; + +out_free_gti: + free_guest_thread_info(vcpu->kvm, gti); + return ret; +} + +int kvm_release_guest_task_struct(struct kvm_vcpu *vcpu, int gpid_nr) +{ + gthread_info_t *gti; + gthread_info_t *cur_gti; + gmm_struct_t *gmm; + int gmmid_nr; + bool kthread; + + if (gpid_nr < 0) { + pr_alert("%s(): invalid GPID # %d: nothing to release\n", + __func__, gpid_nr); + return 0; + } + gti = kvm_get_guest_thread_info(vcpu->kvm, gpid_nr); + if (gti == NULL) { + pr_alert("%s(): could not find guest thread GPID #%d\n", + __func__, gpid_nr); + return -ENODEV; + } + + kthread = test_gti_thread_flag(gti, GTIF_KERNEL_THREAD); + + if (!kthread) + DebugFRTASK("started for guest thread GPID #%d\n", gpid_nr); + + /* Guest should pass gpid number only of the dead process */ + /* FIXME: it need check the passed gpid is not number of active */ + /* (queued or running) guest thread. But now it is checker */ + /* only on current active process */ + cur_gti = pv_vcpu_get_gti(vcpu); + if (gti == cur_gti || gpid_nr == cur_gti->gpid->nid.nr) { + pr_alert("%s(): guest kernel try release current active " + "guest process GPID #%d\n", + __func__, gpid_nr); + return -EEXIST; + } + + if (likely(!test_gti_thread_flag(gti, GTIF_KERNEL_THREAD))) { + gmm = gti->gmm; + } else { + gmm = pv_vcpu_get_init_gmm(vcpu); + } + KVM_BUG_ON(gmm == NULL); + gmmid_nr = gmm->nid.nr; + if (!pv_vcpu_is_init_gmm(vcpu, gmm) && + !test_gti_thread_flag(gti, GTIF_USER_THREAD) && + (gmm == pv_vcpu_get_gmm(vcpu) || + gmm == pv_vcpu_get_active_gmm(vcpu))) { + pr_err("%s(): guest tries to release current active " + "gmm GMMID #%d\n", + __func__, gmm->nid.nr); + return -EBUSY; + } + + if (kvm_gmm_put(vcpu->kvm, gti) == 0) { + DebugFRTASK("gmm GMMID #%d was released\n", gmmid_nr); + } else { + if (!kthread) { + DebugFRTASK("gmm GMMID #%d cannot be released\n", + gmmid_nr); + } + } + free_guest_thread_info(vcpu->kvm, gti); + + if (!kthread) + DebugFRTASK("task GPID #%d released successfully\n", gpid_nr); + + return 0; +} + +/* + * End of sys_execve() for guest: + * switching to new user stacks; + * start user from entry point; + */ +int kvm_switch_to_guest_new_user(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, + guest_hw_stack_t *stack_regs) +{ + gthread_info_t *gthread_info; + kvm_task_info_t user_info; + bool syscall; + int ret; + + DebugKVMEX("started\n"); + + BUG_ON(vcpu == NULL); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_hcall); + + ret = kvm_vcpu_copy_from_guest(vcpu, &user_info, task_info, + sizeof(*task_info)); + if (unlikely(ret < 0)) { + pr_err("%s(): copy new task info from user failed\n", + __func__); + return ret; + } + + ret = kvm_setup_guest_user_stacks(vcpu, &user_info, stack_regs); + if (ret) { + pr_err("%s(): VCPU #%d could not get guest user " + "stacks, error %d\n", + __func__, vcpu->vcpu_id, ret); + goto failed; + } + + gthread_info = pv_vcpu_get_gti(vcpu); + BUG_ON(gthread_info == NULL); + gthread_info->stack_regs = *stack_regs; + if (user_info.flags & PROTECTED_CODE_TASK_FLAG) { + pr_err("%sd(): could not running of protected guest codes " + "(is not yet implemented)\n", + __func__); + ret = -ENOEXEC; + goto failed; + } + + /* Set some flags of new task */ + syscall = !test_and_clear_gti_thread_flag(gthread_info, + GTIF_KERNEL_THREAD); + if (user_info.flags & BIN_COMP_CODE_TASK_FLAG) + gthread_info->task_is_binco = 1; + else + gthread_info->task_is_binco = 0; + + if (syscall) { + syscall_handler_trampoline_start(vcpu, 0); + } + + return 0; + +failed: + return ret; +} + +static int prepare_pv_vcpu_last_user_crs(struct kvm_vcpu *vcpu, + e2k_stacks_t *stacks, e2k_mem_crs_t *crs) +{ + void __user *u_frame; + int ret; + + u_frame = (void __user *)stacks->pcsp_lo.PCSP_lo_base + + stacks->pcsp_hi.PCSP_hi_ind; + ret = pv_vcpu_user_crs_copy_to_kernel(vcpu, u_frame, crs); + if (unlikely(ret)) { + return ret; + } + DebugKVMCLN("copy last user frame from CRS at %px to guest " + "kernel chain %px (base 0x%llx + ind 0x%x)\n", + crs, u_frame, stacks->pcsp_lo.PCSP_lo_base, + stacks->pcsp_hi.PCSP_hi_ind); + + stacks->pcsp_hi.PCSP_hi_ind += SZ_OF_CR; + DebugKVMCLN("guest kernel chain stack index is now 0x%x\n", + stacks->pcsp_hi.PCSP_hi_ind); + + return 0; +} + +static int prepare_pv_vcpu_fork_trampoline(struct kvm_vcpu *vcpu, + gthread_info_t *gti, e2k_stacks_t *stacks) +{ + e2k_mem_crs_t crs; + e2k_mem_crs_t __user *u_frame; + int ret; + + /* + * Prepare 'syscall_copy pr_trampoline' frame + */ + memset(&crs, 0, sizeof(crs)); + + crs.cr0_lo.CR0_lo_pf = -1ULL; + crs.cr0_hi.CR0_hi_IP = (u64)syscall_fork_trampoline; + crs.cr1_lo.CR1_lo_psr = E2K_KERNEL_PSR_DISABLED.PSR_reg; + crs.cr1_lo.CR1_lo_cui = KERNEL_CODES_INDEX; + if (machine.native_iset_ver < E2K_ISET_V6) + crs.cr1_lo.CR1_lo_ic = 1; + crs.cr1_lo.CR1_lo_wpsz = 1; + crs.cr1_lo.CR1_lo_wbs = 0; + crs.cr1_hi.CR1_hi_ussz = gti->us_size >> 4; + + /* Copy the new frame into top of guest kernel chain stack */ + u_frame = (e2k_mem_crs_t __user *)(stacks->pcsp_lo.PCSP_lo_base + + stacks->pcsp_hi.PCSP_hi_ind); + ret = pv_vcpu_user_crs_copy_to_kernel(vcpu, u_frame, &crs); + if (unlikely(ret)) { + return ret; + } + DebugKVMCLN("set trampoline CRS at the top of guest kernel chain %px " + "(base 0x%llx + ind 0x%x)\n", + u_frame, stacks->pcsp_lo.PCSP_lo_base, + stacks->pcsp_hi.PCSP_hi_ind); + + stacks->pcsp_hi.PCSP_hi_ind += SZ_OF_CR; + DebugKVMCLN("guest kernel chain stack index is now 0x%x\n", + stacks->pcsp_hi.PCSP_hi_ind); + + return 0; +} + +static int prepare_pv_vcpu_ret_from_fork_frame(struct kvm_vcpu *vcpu, + gthread_info_t *gti, e2k_stacks_t *stacks, + e2k_mem_crs_t *crs, void *func_IP) +{ + e2k_mem_crs_t __user *u_frame; + int ret; + + /* + * Prepare 'switch_to() -> return_from_fork' frame + */ + memset(crs, 0, sizeof(*crs)); + + crs->cr0_lo.CR0_lo_pf = -1ULL; + crs->cr0_hi.CR0_hi_IP = (u64)func_IP; + crs->cr1_lo.CR1_lo_psr = E2K_KERNEL_PSR_DISABLED.PSR_reg; + crs->cr1_lo.CR1_lo_pm = 0; /* guest should be not privileged */ + crs->cr1_lo.CR1_lo_cui = KERNEL_CODES_INDEX; + if (machine.native_iset_ver < E2K_ISET_V6) + crs->cr1_lo.CR1_lo_ic = 1; + crs->cr1_lo.CR1_lo_wpsz = 1; + crs->cr1_lo.CR1_lo_wbs = 0; + crs->cr1_hi.CR1_hi_ussz = stacks->usd_hi.USD_hi_size >> 4; + + /* Copy the new frame into top of guest kernel chain stack */ + u_frame = (e2k_mem_crs_t __user *)(stacks->pcsp_lo.PCSP_lo_base + + stacks->pcsp_hi.PCSP_hi_ind); + ret = pv_vcpu_user_crs_copy_to_kernel(vcpu, u_frame, crs); + if (unlikely(ret)) { + return ret; + } + DebugKVMCLN("set return from fork CRS franme at the top of guest " + "kernel chain %px (base 0x%llx + ind 0x%x)\n", + u_frame, stacks->pcsp_lo.PCSP_lo_base, + stacks->pcsp_hi.PCSP_hi_ind); + + /* The frame will be setup from pt_regs structure directly to + * the registers CR0/CR1 to return to from 'return from fork' function, + * so should not be counted in memory + stacks->pcsp_hi.PCSP_hi_ind += SZ_OF_CR; + DebugKVMCPY("guest kernel chain stack index is now 0x%x\n", + stacks->pcsp_hi.PCSP_hi_ind); + */ + + return 0; +} + +/* + * End of copy_thread() for guest user process (clone_user_stack()): + */ +int kvm_copy_guest_user_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, + vcpu_gmmu_info_t __user *gmmu_info) +{ + struct kvm *kvm = vcpu->kvm; + pt_regs_t *cur_regs, *regs; + gthread_info_t *cur_gti; + gthread_info_t *gti; + gmm_struct_t *gmm; + guest_hw_stack_t *stack_regs; + e2k_stacks_t *new_stacks; + e2k_mem_crs_t *new_crs; + kvm_task_info_t user_info; + e2k_addr_t sbr; + e2k_size_t us_size; + vcpu_gmmu_info_t gmm_info; + gpa_t u_pptb; + int gmmid_nr; + int ret; + + ret = kvm_vcpu_copy_from_guest(vcpu, &user_info, task_info, + sizeof(*task_info)); + if (unlikely(ret < 0)) { + pr_err("%s(): copy new task info from user failed\n", + __func__); + return ret; + } + + ret = kvm_vcpu_copy_from_guest(vcpu, &gmm_info, gmmu_info, + sizeof(*gmmu_info)); + if (unlikely(ret < 0)) { + pr_err("%s(): copy new GMMU info from user failed\n", + __func__); + return ret; + } + KVM_BUG_ON(gmm_info.opcode != CREATE_NEW_GMM_GMMU_OPC); + + gti = alloc_guest_thread_info(kvm); + if (gti == NULL) { + pr_err("%s(): could not create guest thread info\n", + __func__); + return -ENOMEM; + } + DebugGMM("allocated guest thread info agent #%d\n", + gti->gpid->nid.nr); + + gmm = create_gmm(kvm); + if (gmm == NULL) { + pr_err("%s(): could not create new host agent of guest mm\n", + __func__); + ret = -ENOMEM; + goto out_free_gti; + } + gmmid_nr = gmm->nid.nr; + kvm_gmm_get(vcpu, gti, gmm); + + cur_gti = pv_vcpu_get_gti(vcpu); + BUG_ON(cur_gti == NULL); + + cur_regs = &cur_gti->fork_regs; + regs = >i->fork_regs; + KVM_BUG_ON(!is_sys_call_pt_regs(cur_regs)); + *regs = *cur_regs; + + stack_regs = >i->stack_regs; + new_stacks = &stack_regs->stacks; + new_crs = &stack_regs->crs; + + if (DEBUG_KVM_COPY_USER_MODE) + debug_copy_guest = true; + /* + * New user stacks (data and hardware) were allocated by guest + * Create all register state for new guest stacks + */ + kvm_init_guest_user_stacks(vcpu, gti, gmm, &user_info, stack_regs, + false /* set guest kernel stacks */); + + us_size = user_info.u_us_size; + sbr = user_info.u_us_base + us_size; /* top of stack */ + if (unlikely(regs->stacks.top != sbr)) { + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + + /* guest user local data stack was changed, so setup user */ + /* local data stack registers to return to new stack */ + usd_hi.USD_hi_half = 0; + usd_lo.USD_lo_half = 0; + usd_hi.USD_hi_size = user_info.u_sp_offset; + usd_lo.USD_lo_base = user_info.u_us_base + + user_info.u_sp_offset; + sbr = round_up(sbr, E2K_ALIGN_STACK_BASE_REG); + regs->stacks.top = sbr; + regs->stacks.usd_hi = usd_hi; + regs->stacks.usd_lo = usd_lo; + DebugKVMCPY("new local guest user data stack base 0x%llx " + "size 0x%x top 0x%lx\n", + usd_lo.USD_lo_base, usd_hi.USD_hi_size, sbr); + } + + new_crs->cr0_lo.CR0_lo_half = user_info.cr0_lo; + new_crs->cr0_hi.CR0_hi_half = user_info.cr0_hi; + DebugKVMCPY("new chain registers: IP 0x%llx, PF 0x%llx\n", + new_crs->cr0_hi.CR0_hi_ip << 3, + new_crs->cr0_lo.CR0_lo_pf); + + new_crs->cr1_lo.CR1_lo_half = user_info.cr1_wd; + new_crs->cr1_hi.CR1_hi_half = user_info.cr1_ussz; + DebugKVMCPY("new chain registers: wbs 0x%x, ussz 0x%x\n", + new_crs->cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ, + new_crs->cr1_hi.CR1_hi_ussz << 4); + + /* copy last guest user CRS frame to top of guest kernel stack */ + ret = prepare_pv_vcpu_last_user_crs(vcpu, new_stacks, new_crs); + if (unlikely(ret)) { + pr_err("%s(): user CRS frame copy failed\n", __func__); + goto out_free_gmm; + } + + /* inject CRS for return from system call to guest user */ + ret = prepare_pv_vcpu_fork_trampoline(vcpu, gti, new_stacks); + if (unlikely(ret)) { + pr_err("%s(): fork trampoline CRS frame copy failed\n", + __func__); + goto out_free_gmm; + } + + /* inject CRS frame to return from fork() on the child */ + /* these CRS should be as switch to frame (see switch_to()) */ + ret = prepare_pv_vcpu_ret_from_fork_frame(vcpu, gti, new_stacks, + new_crs, (void *)user_info.entry_point); + if (unlikely(ret)) { + pr_err("%s(): return from fork() CRS frame copy failed\n", + __func__); + goto out_free_gmm; + } + + /* save user special registers to initial state while switch */ + gti->task_is_binco = + ((user_info.flags & BIN_COMP_CODE_TASK_FLAG) != 0); + gti->task_is_protect = + ((user_info.flags & PROTECTED_CODE_TASK_FLAG) != 0); + + kvm_put_guest_new_sw_regs(vcpu, gti, new_stacks, new_crs, + (unsigned long *)user_info.gregs); + + /* prepare new shadow PT to switch to */ + u_pptb = gmm_info.u_pptb; + ret = kvm_pv_prepare_guest_mm(vcpu, gmm, u_pptb); + if (ret) { + pr_err("%s(): could not prepare shadow PT for new guest " + "process, error %d\n", + __func__, ret); + goto out_free_gmm; + } + + /* return ID of created gmm struct to guest */ + gmm_info.gmmid_nr = gmmid_nr; + ret = kvm_vcpu_copy_to_guest(vcpu, gmmu_info, &gmm_info, + sizeof(*gmmu_info)); + if (unlikely(ret < 0)) { + pr_err("%s(): copy updated gmm info to user failed, retry\n", + __func__); + goto out_free_gmm; + } + + DebugFRTASK("created task GPID #%d GMMID #%d\n", + gti->gpid->nid.nr, gmm->nid.nr); + + if (DEBUG_KVM_COPY_USER_MODE) + debug_copy_guest = false; + + return gti->gpid->nid.nr; + +out_free_gmm: + kvm_free_gmm(kvm, gmm); + gti->gmm = NULL; +out_free_gti: + free_guest_thread_info(kvm, gti); + + if (DEBUG_KVM_COPY_USER_MODE) + debug_copy_guest = false; + return ret; +} + +/* + * End of copy_thread() for guest user thread (clone_user_stacks()): + */ +int kvm_clone_guest_user_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info) +{ + struct kvm *kvm = vcpu->kvm; + pt_regs_t *cur_regs, *regs; + gthread_info_t *cur_gti, *gti; + gmm_struct_t *gmm; + guest_hw_stack_t *stack_regs; + e2k_stacks_t *new_stacks; + e2k_mem_crs_t *new_crs; + kvm_task_info_t user_info; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_addr_t sbr; + e2k_size_t us_size; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + int ret; + + ret = kvm_vcpu_copy_from_guest(vcpu, &user_info, task_info, + sizeof(*task_info)); + if (unlikely(ret < 0)) { + pr_err("%s(): copy new task info from user failed\n", + __func__); + return ret; + } + if (DEBUG_KVM_CLONE_USER_MODE) + debug_clone_guest = true; + + gti = alloc_guest_thread_info(kvm); + if (gti == NULL) { + pr_err("%s(): could not create guest thread info\n", + __func__); + return -ENOMEM; + } + DebugKVMCLN("allocated guest thread info agent GPID #%d\n", + gti->gpid->nid.nr); + + cur_gti = pv_vcpu_get_gti(vcpu); + BUG_ON(cur_gti == NULL); + gmm = cur_gti->gmm; + BUG_ON(gmm == NULL); + kvm_gmm_get(vcpu, gti, gmm); + + /* + * Mark the parent & child processes as user threads + * on common virtual memory (gmm structure) + */ + set_gti_thread_flag(cur_gti, GTIF_USER_THREAD); + set_gti_thread_flag(gti, GTIF_USER_THREAD); + + cur_regs = &cur_gti->fork_regs; + regs = >i->fork_regs; + KVM_BUG_ON(!is_sys_call_pt_regs(cur_regs)); + *regs = *cur_regs; + + stack_regs = >i->stack_regs; + new_stacks = &stack_regs->stacks; + new_crs = &stack_regs->crs; + + /* + * New user stacks (data and hardware) were allocated by guest + * Create all register state for new guest stacks + */ + kvm_init_guest_user_stacks(vcpu, gti, gmm, &user_info, stack_regs, + false /* set guest kernel stacks */); + + /* setup guest user register state to return to new thread */ + us_size = user_info.u_us_size; + sbr = user_info.u_us_base + us_size; /* top of stack */ + usd_hi.USD_hi_half = 0; + usd_lo.USD_lo_half = 0; + usd_hi.USD_hi_size = user_info.u_sp_offset; + usd_lo.USD_lo_base = user_info.u_us_base + user_info.u_sp_offset; + sbr = round_up(sbr, E2K_ALIGN_STACK_BASE_REG); + regs->stacks.top = sbr; + regs->stacks.usd_hi = usd_hi; + regs->stacks.usd_lo = usd_lo; + DebugKVMCLN("local guest user data stack base 0x%llx size 0x%x " + "top 0x%lx\n", + usd_lo.USD_lo_base, usd_hi.USD_hi_size, sbr); + + psp_hi.PSP_hi_half = 0; + psp_lo.PSP_lo_half = 0; + psp_hi.PSP_hi_size = user_info.u_ps_size; + psp_hi.PSP_hi_ind = user_info.u_ps_ind; + psp_lo.PSP_lo_base = user_info.u_ps_base; + regs->stacks.psp_hi = psp_hi; + regs->stacks.psp_lo = psp_lo; + DebugKVMCLN("new guest user procedure stack from 0x%llx size 0x%x " + "ind 0x%x\n", + psp_lo.PSP_lo_base, psp_hi.PSP_hi_size, psp_hi.PSP_hi_ind); + + pcsp_hi.PCSP_hi_half = 0; + pcsp_lo.PCSP_lo_half = 0; + pcsp_hi.PCSP_hi_size = user_info.u_pcs_size; + pcsp_hi.PCSP_hi_ind = user_info.u_pcs_ind; + pcsp_lo.PCSP_lo_base = user_info.u_pcs_base; + regs->stacks.pcsp_hi = pcsp_hi; + regs->stacks.pcsp_lo = pcsp_lo; + DebugKVMCLN("new guest user chain stack from 0x%llx size 0x%x " + "ind 0x%x\n", + pcsp_lo.PCSP_lo_base, pcsp_hi.PCSP_hi_size, + pcsp_hi.PCSP_hi_ind); + + new_crs->cr0_lo.CR0_lo_half = user_info.cr0_lo; + new_crs->cr0_hi.CR0_hi_half = user_info.cr0_hi; + DebugKVMCLN("new chain registers: IP 0x%llx, PF 0x%llx\n", + new_crs->cr0_hi.CR0_hi_IP, + new_crs->cr0_lo.CR0_lo_pf); + + new_crs->cr1_lo.CR1_lo_half = user_info.cr1_wd; + new_crs->cr1_hi.CR1_hi_half = user_info.cr1_ussz; + DebugKVMCLN("new chain registers: wbs 0x%x, ussz 0x%x\n", + new_crs->cr1_lo.CR1_lo_wbs * EXT_4_NR_SZ, + new_crs->cr1_hi.CR1_hi_ussz << 4); + + /* copy last guest user CRS frame to top of guest kernel stack */ + ret = prepare_pv_vcpu_last_user_crs(vcpu, new_stacks, new_crs); + if (unlikely(ret)) { + pr_err("%s(): user CRS frame copy failed\n", __func__); + goto out_free_gmm; + } + + /* inject CRS for return from system call to guest user */ + ret = prepare_pv_vcpu_fork_trampoline(vcpu, gti, new_stacks); + if (unlikely(ret)) { + pr_err("%s(): fork trampoline CRS frame copy failed\n", + __func__); + goto out_free_gmm; + } + + /* inject CRS frame to return from fork() on the child */ + /* these CRS should be as switch to frame (see switch_to()) */ + ret = prepare_pv_vcpu_ret_from_fork_frame(vcpu, gti, new_stacks, + new_crs, (void *)user_info.entry_point); + if (unlikely(ret)) { + pr_err("%s(): return from fork() CRS frame copy failed\n", + __func__); + goto out_free_gmm; + } + + /* save user special registers to initial state while switch */ + gti->task_is_binco = + ((user_info.flags & BIN_COMP_CODE_TASK_FLAG) != 0); + gti->task_is_protect = + ((user_info.flags & PROTECTED_CODE_TASK_FLAG) != 0); + + kvm_put_guest_new_sw_regs(vcpu, gti, new_stacks, new_crs, + (unsigned long *)user_info.gregs); + + DebugFRTASK("created thread GPID #%d GMMID #%d\n", + gti->gpid->nid.nr, gmm->nid.nr); + + if (DEBUG_KVM_CLONE_USER_MODE) + debug_clone_guest = false; + + return gti->gpid->nid.nr; + +out_free_gmm: + kvm_gmm_put(kvm, gti); + + free_guest_thread_info(kvm, gti); + + if (DEBUG_KVM_CLONE_USER_MODE) + debug_clone_guest = false; + return ret; +} + +int kvm_sig_handler_return(struct kvm_vcpu *vcpu, kvm_stacks_info_t *regs_info, + unsigned long sigreturn_entry, long sys_rval, + guest_hw_stack_t *stack_regs) +{ + kvm_stacks_info_t user_info; + struct signal_stack_context __user *context; + pv_vcpu_ctxt_t *vcpu_ctxt; + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + unsigned long ts_flag; + int ret; + + ret = kvm_vcpu_copy_from_guest(vcpu, &user_info, regs_info, + sizeof(*regs_info)); + if (ret < 0) { + pr_err("%s(): copy stack registers state info from user " + "failed\n", __func__); + return ret; + } + + stack_regs->stacks.top = user_info.top; + stack_regs->stacks.usd_lo.USD_lo_half = user_info.usd_lo; + stack_regs->stacks.usd_hi.USD_hi_half = user_info.usd_hi; + DebugSIGH("data stack new: top 0x%lx base 0x%llx size 0x%x\n", + stack_regs->stacks.top, stack_regs->stacks.usd_lo.USD_lo_base, + stack_regs->stacks.usd_hi.USD_hi_size); + /* update user local data stack pointer */ + sw_ctxt->sbr.SBR_reg = stack_regs->stacks.top; + sw_ctxt->usd_lo = stack_regs->stacks.usd_lo; + sw_ctxt->usd_hi = stack_regs->stacks.usd_hi; + + stack_regs->stacks.psp_lo.PSP_lo_half = user_info.psp_lo; + stack_regs->stacks.psp_hi.PSP_hi_half = user_info.psp_hi; + stack_regs->stacks.pshtp.PSHTP_reg = user_info.pshtp; + DebugSIGH("procedure stack new: base 0x%llx size 0x%x ind 0x%x " + "PSHTP 0x%llx\n", + stack_regs->stacks.psp_lo.PSP_lo_base, + stack_regs->stacks.psp_hi.PSP_hi_size, + stack_regs->stacks.psp_hi.PSP_hi_ind, + stack_regs->stacks.pshtp.PSHTP_reg); + + stack_regs->stacks.pcsp_lo.PCSP_lo_half = user_info.pcsp_lo; + stack_regs->stacks.pcsp_hi.PCSP_hi_half = user_info.pcsp_hi; + stack_regs->stacks.pcshtp = user_info.pcshtp; + DebugSIGH("chain stack new: base 0x%llx size 0x%x ind 0x%x " + "PCSHTP 0x%x\n", + stack_regs->stacks.pcsp_lo.PCSP_lo_base, + stack_regs->stacks.pcsp_hi.PCSP_hi_size, + stack_regs->stacks.pcsp_hi.PCSP_hi_ind, + stack_regs->stacks.pcshtp); + + stack_regs->crs.cr0_lo.CR0_lo_half = user_info.cr0_lo; + stack_regs->crs.cr0_hi.CR0_hi_half = user_info.cr0_hi; + stack_regs->crs.cr1_lo.CR1_lo_half = user_info.cr1_lo; + stack_regs->crs.cr1_hi.CR1_hi_half = user_info.cr1_hi; + DebugSIGH("chain CR0-CR1 : IP 0x%llx wbs 0x%x wpsz 0x%x wfx %d\n", + stack_regs->crs.cr0_hi.CR0_hi_IP, + stack_regs->crs.cr1_lo.CR1_lo_wbs, + stack_regs->crs.cr1_lo.CR1_lo_wpsz, + stack_regs->crs.cr1_lo.CR1_lo_wfx); + + KVM_BUG_ON(stack_regs->crs.cr1_lo.CR1_lo_pm || + !stack_regs->crs.cr1_lo.CR1_lo_ie || + !stack_regs->crs.cr1_lo.CR1_lo_nmie); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + context = get_signal_stack(); + vcpu_ctxt = &context->vcpu_ctxt; + ret = __put_user(true, &vcpu_ctxt->in_sig_handler); + ret |= __put_user(sigreturn_entry, &vcpu_ctxt->sigreturn_entry); + ret |= __put_user(sys_rval, &vcpu_ctxt->sys_rval); + clear_ts_flag(ts_flag); + + return ret; +} + +int kvm_long_jump_return(struct kvm_vcpu *vcpu, + kvm_long_jump_info_t *regs_info) +{ + kvm_long_jump_info_t user_info; + struct signal_stack_context __user *context; + e2k_stacks_t stacks; + e2k_mem_crs_t crs; + unsigned long ts_flag; + int ret; + + ret = kvm_vcpu_copy_from_guest(vcpu, &user_info, regs_info, + sizeof(*regs_info)); + if (unlikely(ret < 0)) { + pr_err("%s(): copy stack registers state info from user " + "failed\n", __func__); + return ret; + } + + context = get_signal_stack(); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + ret = __copy_from_user(&stacks, &context->regs.stacks, + sizeof(stacks)); + ret |= __copy_from_user(&crs, &context->regs.crs, + sizeof(crs)); + + clear_ts_flag(ts_flag); + if (ret) { + goto failed; + } + + stacks.top = user_info.top; + stacks.usd_lo.USD_lo_half = user_info.usd_lo; + stacks.usd_hi.USD_hi_half = user_info.usd_hi; + DebugLJMP("data stack new: top 0x%lx base 0x%llx size 0x%x\n", + stacks.top, stacks.usd_lo.USD_lo_base, + stacks.usd_hi.USD_hi_size); + + stacks.psp_lo.PSP_lo_half = user_info.psp_lo; + stacks.psp_hi.PSP_hi_half = user_info.psp_hi; + stacks.pshtp.PSHTP_reg = user_info.pshtp; + DebugLJMP("procedure stack new: base 0x%llx size 0x%x ind 0x%x " + "PSHTP 0x%llx\n", + stacks.psp_lo.PSP_lo_base, + stacks.psp_hi.PSP_hi_size, + stacks.psp_hi.PSP_hi_ind, + stacks.pshtp.PSHTP_reg); + + stacks.pcsp_lo.PCSP_lo_half = user_info.pcsp_lo; + stacks.pcsp_hi.PCSP_hi_half = user_info.pcsp_hi; + stacks.pcshtp = user_info.pcshtp; + DebugLJMP("chain stack new: base 0x%llx size 0x%x ind 0x%x " + "PCSHTP 0x%x\n", + stacks.pcsp_lo.PCSP_lo_base, + stacks.pcsp_hi.PCSP_hi_size, + stacks.pcsp_hi.PCSP_hi_ind, + stacks.pcshtp); + + crs.cr0_lo.CR0_lo_half = user_info.cr0_lo; + crs.cr0_hi.CR0_hi_half = user_info.cr0_hi; + crs.cr1_lo.CR1_lo_half = user_info.cr1_lo; + crs.cr1_hi.CR1_hi_half = user_info.cr1_hi; + DebugLJMP("chain CR0-CR1 : IP 0x%llx wbs 0x%x wpsz 0x%x wfx %d\n", + crs.cr0_hi.CR0_hi_IP, + crs.cr1_lo.CR1_lo_wbs, + crs.cr1_lo.CR1_lo_wpsz, + crs.cr1_lo.CR1_lo_wfx); + + KVM_BUG_ON(crs.cr1_lo.CR1_lo_pm || + !crs.cr1_lo.CR1_lo_ie || + !crs.cr1_lo.CR1_lo_nmie); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + ret = __copy_to_user(&context->regs.stacks, &stacks, sizeof(stacks)); + ret |= __copy_to_user(&context->regs.crs, &crs, sizeof(crs)); + + clear_ts_flag(ts_flag); + if (ret) { + goto failed; + } + + return 0; + +failed: + user_exit(); + do_exit(SIGKILL); + return ret; +} + +void kvm_guest_vcpu_common_idle(struct kvm_vcpu *vcpu, + long timeout, bool interruptable) +{ + long out; + + DebugKVMIDLE("started on VCPU %d\n", vcpu->vcpu_id); + + BUG_ON(!vcpu->arch.is_hv && vcpu->arch.host_task == NULL); + + if (kvm_guest_vcpu_thread_should_stop(vcpu)) + goto need_stopped; + + BUG_ON(vcpu->arch.on_idle); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_hcall); + if (kvm_test_pending_virqs(vcpu)) { + /* guest has pending VIRQs, so complete idle mode right now */ + /* to inject interrupt while hypercall return */ + return; + } + kvm_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_blocked); + vcpu->arch.on_idle = interruptable; + + if (timeout == 0) { + /* schedule the CPU away without any timeout */ + timeout = MAX_SCHEDULE_TIMEOUT; + } + set_current_state(TASK_INTERRUPTIBLE); + if (interruptable) + kvm_arch_vcpu_to_wait(vcpu); + out = schedule_timeout(timeout); + vcpu->arch.on_idle = false; + if (interruptable) + kvm_arch_vcpu_to_run(vcpu); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_blocked); + kvm_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_hcall); + __set_current_state(TASK_RUNNING); + if (out > 0) { + DebugKVMIDLE("VCPU %d waked up on some event\n", + vcpu->vcpu_id); + } else { + DebugKVMIDLE("VCPU %d waked up on timeout\n", + vcpu->vcpu_id); + } +need_stopped: + if (kvm_guest_vcpu_thread_should_stop(vcpu)) { + DebugKVMSH("guest VCPU #%d should be stopped\n", + vcpu->vcpu_id); + kvm_spare_host_vcpu_release(vcpu); + do_exit(-EINTR); + } +} + +/* + * Activate VCPU which is wating for activation in idle mode + */ +static int do_activate_host_vcpu(struct kvm_vcpu *vcpu) +{ + DebugKVMIDLE("started on VCPU %d to activate VCPU #%d\n", + current_thread_info()->vcpu->vcpu_id, vcpu->vcpu_id); + + mutex_lock(&vcpu->arch.lock); + if (vcpu->arch.host_task == NULL) { + mutex_unlock(&vcpu->arch.lock); + pr_err("%s(): guest thread of VCPU #%d does not exist, " + "probably completed\n", + __func__, vcpu->vcpu_id); + return -ENODEV; + } + wake_up_process(vcpu->arch.host_task); + mutex_unlock(&vcpu->arch.lock); + return 0; +} +int kvm_activate_host_vcpu(struct kvm *kvm, int vcpu_id) +{ + struct kvm_vcpu *vcpu_to; /* the VCPU to activate */ + struct kvm_vcpu *vcpu_from; /* current VCPU */ + int ret; + + vcpu_from = current_thread_info()->vcpu; + BUG_ON(vcpu_from == NULL); + BUG_ON(vcpu_from->kvm != kvm); + + mutex_lock(&kvm->lock); + vcpu_to = kvm_get_vcpu_on_id(kvm, vcpu_id); + if (IS_ERR(vcpu_to)) { + mutex_unlock(&kvm->lock); + pr_err("%s(): could not find VCPU #%d to activate\n", + __func__, vcpu_id); + return PTR_ERR(vcpu_to); + } + ret = do_activate_host_vcpu(vcpu_to); + mutex_unlock(&kvm->lock); + return ret; +} + +/* Suspend vcpu thread until it will be woken up by pv_kick */ +void kvm_pv_wait(struct kvm *kvm, struct kvm_vcpu *vcpu) +{ + /* + * If vcpu has pending VIRQs, do not put its thread + * into sleep. Exit from kvm_pv_wait to inject + * interrupt while hypercall returns. + */ + if (kvm_test_pending_virqs(vcpu)) + return; + + /* Update arch-dependent state of vcpu */ + kvm_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_blocked); + /* For PV guest */ + vcpu->arch.on_idle = true; + + vcpu->arch.mp_state = KVM_MP_STATE_HALTED; + + /* Suspend vcpu thread until it will be woken up by pv_kick */ + kvm_vcpu_block(vcpu); + + /* + * Clear KVM_REQ_UNHALT bit in vcpu->requests. + * We need to do it here because kvm_vcpu_block sets this bit + * after vcpu thread is woken up. + */ + kvm_check_request(KVM_REQ_UNHALT, vcpu); + + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + vcpu->arch.unhalted = false; + + /* Restore arch-dependent state of vcpu */ + vcpu->arch.on_idle = false; + kvm_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_hcall); +} + +/* Wake up sleeping vcpu thread */ +void kvm_pv_kick(struct kvm *kvm, int hard_cpu_id) +{ + struct kvm_vcpu *vcpu_to; + + /* Get vcpu by given cpu id */ + if (kvm_is_epic(kvm)) + /* FIXME ioepic does not currently support logical cp + * numbering, bug 122110 + */ + vcpu_to = kvm_get_vcpu_on_hard_cpu_id(kvm, hard_cpu_id); + else + vcpu_to = kvm_get_vcpu_on_id(kvm, hard_cpu_id); + + vcpu_to->arch.unhalted = true; + + /* Send wake up to target vcpu thread */ + kvm_vcpu_wake_up(vcpu_to); + + /* Yield our cpu to woken vcpu_to thread if possible */ + kvm_vcpu_yield_to(vcpu_to); +} +int kvm_activate_guest_all_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu_to; /* follow VCPU to activate */ + struct kvm_vcpu *vcpu_from; /* current VCPU */ + int r; + int ret; + int err = 0; + + vcpu_from = current_thread_info()->vcpu; + BUG_ON(vcpu_from == NULL); + BUG_ON(vcpu_from->kvm != kvm); + + mutex_lock(&kvm->lock); + kvm_for_each_vcpu(r, vcpu_to, kvm) { + if (vcpu_to != NULL && vcpu_to->vcpu_id != vcpu_from->vcpu_id) { + ret = do_activate_host_vcpu(vcpu_to); + if (ret && !err) + err = ret; + } + } + mutex_unlock(&kvm->lock); + return err; +} + +long kvm_guest_shutdown(struct kvm_vcpu *vcpu, void __user *msg, + unsigned long reason) +{ + char buffer[512]; + int count = sizeof(buffer); + char *todo; + char *buf; + e2k_addr_t hva_msg; + int exit_reason; + int ret; + kvm_arch_exception_t exception; + + DebugKVMSH("started for msg %px, reason %ld\n", + msg, reason); + if (msg != NULL) { + hva_msg = kvm_vcpu_gva_to_hva(vcpu, (gva_t)msg, + false, &exception); + if (kvm_is_error_hva(hva_msg)) { + DebugKVM("failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", msg); + kvm_vcpu_inject_page_fault(vcpu, (void *)msg, + &exception); + return -EAGAIN; + } + } else { + hva_msg = 0; + } + if (hva_msg == 0) { + DebugKVMSH("could not copy string from user\n"); + buf = NULL; + } else { + ret = copy_from_user(buffer, (char *)hva_msg, count); + if (ret) { + DebugKVMSH("could not copy string from user, err %d\n", + ret); + buf = NULL; + } else { + buffer[count - 1] = '\0'; + buf = buffer; + } + } + switch (reason) { + case KVM_SHUTDOWN_POWEROFF: + exit_reason = KVM_EXIT_SHUTDOWN; + todo = "power off"; + break; + case KVM_SHUTDOWN_RESTART: + exit_reason = KVM_EXIT_E2K_RESTART; + todo = "restart"; + break; + case KVM_SHUTDOWN_PANIC: + exit_reason = KVM_EXIT_E2K_PANIC; + todo = "panic"; + break; + default: + exit_reason = KVM_EXIT_UNKNOWN; + todo = "???"; + break; + } + DebugKVMSH("started to %s : %s\n", + todo, (buf) ? buf : "??? unknown reason ???"); + if (reason == KVM_SHUTDOWN_PANIC) { + /* FIXME: it need dump guest VCPU stack, */ + /* but it is not yet implemented here */ + } + + vcpu->arch.exit_reason = EXIT_SHUTDOWN; + vcpu->run->exit_reason = exit_reason; + + DebugKVMSH("VCPU #%d thread exits\n", vcpu->vcpu_id); + + if (!vcpu->arch.is_hv) { + /* return to host VCPU to handle exit reason */ + return RETURN_TO_HOST_APP_HCRET; + } else { + /* inject intercept as hypercall return to switch to */ + /* vcpu run thread and handle VM exit on guest shutdown */ + kvm_inject_vcpu_exit(vcpu); + } + return 0; +} + +static int kvm_guest_failed(struct kvm_vcpu *vcpu) +{ + DebugKVMSH("%s (%d) started on unknown failure\n", + current->comm, current->pid); + + vcpu->arch.exit_reason = EXIT_SHUTDOWN; + vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; + + kvm_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_offline); + + if (!vcpu->arch.is_hv) { + /* return to host VCPU to handle exit reason */ + return RETURN_TO_HOST_APP_HCRET; + } else { + /* inject intercept as hypercall return to switch to */ + /* vcpu run thread and handle VM exit on guest failure */ + kvm_inject_vcpu_exit(vcpu); + } + return 0; +} + +#ifdef CONFIG_KVM_ASYNC_PF + +/* + * Enable async page fault handling on current vcpu + */ +int kvm_pv_host_enable_async_pf(struct kvm_vcpu *vcpu, + u64 apf_reason_gpa, u64 apf_id_gpa, + u32 apf_ready_vector, u32 irq_controller) +{ + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.reason_gpa, + apf_reason_gpa, sizeof(u32))) + return 1; + + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.id_gpa, + apf_id_gpa, sizeof(u32))) + return 1; + + vcpu->arch.apf.cnt = 1; + vcpu->arch.apf.host_apf_reason = KVM_APF_NO; + vcpu->arch.apf.in_pm = false; + vcpu->arch.apf.apf_ready_vector = apf_ready_vector; + vcpu->arch.apf.irq_controller = irq_controller; + vcpu->arch.apf.enabled = true; + + return 0; +} + +#endif /* CONFIG_KVM_ASYNC_PF */ + +#define TIMEOUT_FOR_PRINT_VCPU_STACKS_MS 30000 +static int wait_for_discard(struct wait_bit_key *key, int mode) +{ + freezable_schedule_unsafe(); + if (signal_pending_state(mode, current)) + return -ERESTARTSYS; + return 0; +} +static inline void +do_wait_for_print_vcpu_stack(struct kvm_vcpu *vcpu) +{ + DebugGST("started for VCPU #%d\n", vcpu->vcpu_id); + if (kvm_start_vcpu_show_state(vcpu)) { + /* show of VCPU state is already in progress */ + DebugGST("show of VCPU state is already in progress on " + "VCPU #%d\n", + vcpu->vcpu_id); + return; + } + local_irq_enable(); + DebugGST("will send SYSRQ for VCPU #%d\n", vcpu->vcpu_id); + kvm_pic_sysrq_deliver(vcpu); + DebugGST("goto wait on bit of completion on for VCPU #%d\n", + vcpu->vcpu_id); + wait_on_bit_action((void *)&vcpu->requests, KVM_REG_SHOW_STATE, + wait_for_discard, TASK_KILLABLE); + DO_DUMP_VCPU_STACK(vcpu) = false; + DebugGST("waiting is completed for VCPU #%d\n", vcpu->vcpu_id); +} + +static int wait_for_print_vcpu_stack(void *data) +{ + struct kvm_vcpu *vcpu = data; + + DebugGST("started for VCPU #%d\n", vcpu->vcpu_id); + do_wait_for_print_vcpu_stack(vcpu); + return 0; +} +static inline void +do_wait_for_print_all_guest_stacks(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + struct kvm_vcpu *other_vcpu; + int r; + + mutex_lock(&kvm->lock); + vcpu = kvm_get_vcpu(kvm, 0); + if (vcpu == NULL) { + mutex_unlock(&kvm->lock); + DebugGST("nothing VCPUs detected\n"); + return; + } + DO_DUMP_VCPU_STATE(vcpu) = true; + do_wait_for_print_vcpu_stack(vcpu); + DO_DUMP_VCPU_STATE(vcpu) = false; + kvm_for_each_vcpu(r, other_vcpu, kvm) { + /* show state of the guest process on the VCPU */ + if (other_vcpu == NULL) + continue; + if (other_vcpu == vcpu) + continue; + DO_DUMP_VCPU_STACK(other_vcpu) = true; + do_wait_for_print_vcpu_stack(other_vcpu); + } + if (!test_and_clear_kvm_mode_flag(kvm, KVMF_IN_SHOW_STATE)) { + mutex_unlock(&kvm->lock); + DebugGST("show of KVM state was not started\n"); + return; + } + mutex_unlock(&kvm->lock); +} +void wait_for_print_all_guest_stacks(struct work_struct *work) +{ + struct kvm *kvm; + + mutex_lock(&kvm_lock); + if (list_empty(&vm_list)) { + mutex_unlock(&kvm_lock); + DebugGST("nothing VM detected\n"); + return; + } + list_for_each_entry(kvm, &vm_list, vm_list) { + DebugGST("started for VM #%d\n", kvm->arch.vmid.nr); + if (test_and_set_kvm_mode_flag(kvm, KVMF_IN_SHOW_STATE)) { + DebugGST("show of VM #%d state is already " + "in progress\n", kvm->arch.vmid.nr); + continue; + } + do_wait_for_print_all_guest_stacks(kvm); + } + mutex_unlock(&kvm_lock); +} +static inline void +deferred_print_vcpu_stack(struct kvm_vcpu *vcpu) +{ + struct task_struct *task; + + DebugGST("started for VCPU #%d\n", vcpu->vcpu_id); + + /* create thread to show state of guest current process on the VCPU */ + /* Function wait_for_print_all_guest_stacks() wait for print */ + /* so cannot be called directly from idle thread for example */ + if (!is_idle_task(current)) { + task = kthread_create_on_node(wait_for_print_vcpu_stack, vcpu, + numa_node_id(), + "show-vcpu/%d", vcpu->vcpu_id); + if (IS_ERR(task)) { + pr_err("%s(): could not create thread to dump VCPU #%d " + "current stack\n", + __func__, vcpu->vcpu_id); + return; + } + wake_up_process(task); + } else { + int pid; + + pid = kernel_thread(wait_for_print_vcpu_stack, vcpu, + CLONE_FS | CLONE_FILES); + if (pid < 0) { + pr_err("%s(): Could not create thread to dump VCPU #%d " + "stack(s)\n", + __func__, vcpu->vcpu_id); + return; + } + rcu_read_lock(); + task = find_task_by_pid_ns(pid, &init_pid_ns); + rcu_read_unlock(); + snprintf(task->comm, sizeof(task->comm), + "show-vcpu/%d", vcpu->vcpu_id); + } + DebugGST("created thread %s (%d) to wait for completion " + "on VCPU #%d\n", + task->comm, task->pid, vcpu->vcpu_id); +} +void kvm_print_vcpu_stack(struct kvm_vcpu *vcpu) +{ + DebugGST("started for VCPU #%d\n", vcpu->vcpu_id); + deferred_print_vcpu_stack(vcpu); +} +/* This could be called from IRQ context, so defer work instead of creating + * kthread */ +static inline void +deferred_print_all_guest_stacks(void) +{ + DebugGST("started for all VMs\n"); + schedule_work(&kvm_dump_stacks); + DebugGST("done\n"); +} + +void kvm_print_all_vm_stacks(void) +{ + mutex_lock(&kvm_lock); + if (list_empty(&vm_list)) { + mutex_unlock(&kvm_lock); + DebugGST("nothing VM detected\n"); + return; + } + deferred_print_all_guest_stacks(); + mutex_unlock(&kvm_lock); +} diff --git a/arch/e2k/kvm/process.h b/arch/e2k/kvm/process.h new file mode 100644 index 000000000000..6ac427bb8fa0 --- /dev/null +++ b/arch/e2k/kvm/process.h @@ -0,0 +1,837 @@ +/* + * process.h: In-kernel KVM process related definitions + * Copyright (c) 2011, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __KVM_PROCESS_H +#define __KVM_PROCESS_H + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "cpu_defs.h" +#include "irq.h" +#include "mmu.h" +#include "gaccess.h" + +#undef DEBUG_KVM_ACTIVATION_MODE +#undef DebugKVMACT +#define DEBUG_KVM_ACTIVATION_MODE 0 /* KVM guest kernel data */ + /* stack activations */ + /* debugging */ +#define DebugKVMACT(fmt, args...) \ +({ \ + if (DEBUG_KVM_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +extern bool debug_guest_user_stacks; +#undef DEBUG_KVM_GUEST_STACKS_MODE +#undef DebugGUST +#define DEBUG_KVM_GUEST_STACKS_MODE 0 /* guest user stacks */ + /* copy debug */ +#define DebugGUST(fmt, args...) \ +({ \ + if (debug_guest_user_stacks) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_GPT_REGS_MODE +#define DEBUG_GPT_REGS_MODE 0 /* KVM host and guest kernel */ + /* stack activations print */ + +#define GUEST_KERNEL_THREAD_STACK_SIZE (64 * 1024U) /* 64 KBytes */ + +static inline struct gthread_info * +kvm_get_guest_thread_info(struct kvm *kvm, int gpid_nr) +{ + gpid_t *gpid = kvm_find_gpid(&kvm->arch.gpid_table, gpid_nr); + if (gpid == NULL) + return NULL; + return gpid->gthread_info; +} + +/* + * Save and restore current state of host thread which can be changed + * in the case of long jump throw traps and signals. + * Host VCPU thread can run any guest thread (multi-thread or multi-stack mode) + * so store/restore current host state wile switch from one guest thread + * to other + */ +#define SAVE_HOST_THREAD_STATE(__task, __gti) \ +({ \ + thread_info_t *ti = task_thread_info(__task); \ + gthread_info_t *gti = (__gti); \ + \ + gti->pt_regs = ti->pt_regs; \ +}) +#define RESTORE_HOST_THREAD_STATE(__task, __gti) \ +({ \ + thread_info_t *ti = task_thread_info(__task); \ + gthread_info_t *gti = (__gti); \ + \ + ti->pt_regs = gti->pt_regs; \ + gti->pt_regs = NULL; \ +}) +#define INIT_HOST_THREAD_STATE(new_gti) \ +({ \ + (new_gti)->pt_regs = NULL; \ +}) +#define COPY_HOST_THREAD_STATE(cur_gti, new_gti) \ +({ \ + INIT_HOST_THREAD_STATE(new_gti); \ +}) + +/* + * Save and restore current state of host thread which can be changed + * in the case of long jump throw traps and signals. + * Guest process can cause recursive host kernel activations due to traps, + * system calls, signal handler running. + * So it needs save/restore host thread state in each activation of host + */ +#define SAVE_KVM_THREAD_STATE(__ti, __gregs) \ +({ \ + struct pt_regs *regs = (__ti)->pt_regs; \ + \ + (__gregs)->pt_regs = regs; \ +}) +#define RESTORE_KVM_THREAD_STATE(__ti, __gregs) \ +({ \ + struct pt_regs *regs = (__gregs)->pt_regs; \ + \ + (__ti)->pt_regs = regs; \ +}) + +#define IS_GUEST_USER_THREAD(ti) \ + test_gti_thread_flag((ti)->gthread_info, GTIF_KERNEL_THREAD) + +#define CHECK_BUG(cond, num) \ +({ \ + if (cond) { \ + E2K_LMS_HALT_OK; \ + dump_stack(); \ + panic("CHECK_GUEST_KERNEL_DATA_STACK #%d " \ + "failed\n", num); \ + } \ +}) + +#ifdef CONFIG_KVM_GUEST_HW_HCALL +#define CHECK_GUEST_KERNEL_DATA_STACK(ti, g_sbr, g_usd_size) \ +({ \ + if (!test_ti_status_flag((ti), TS_HOST_AT_VCPU_MODE)) { \ + /* It is host stack */ \ + CHECK_BUG((g_sbr) != (ti)->u_stack.top, 1); \ + CHECK_BUG((g_usd_size) > (ti)->u_stack.size, 2); \ + CHECK_BUG((ti)->u_stack.bottom + (ti)->u_stack.size != \ + (ti)->u_stack.top, 3); \ + } else { \ + /* It is VCPU stack */ \ + CHECK_BUG((ti)->vcpu->arch.is_hv, 8); \ + } \ + if ((ti)->gthread_info == NULL) { \ + CHECK_BUG(!test_ti_status_flag((ti), \ + TS_HOST_AT_VCPU_MODE), 4); \ + } else { \ + gthread_info_t *gti = (ti)->gthread_info; \ + e2k_stacks_t *stacks = >i->stack_regs.stacks; \ + \ + CHECK_BUG((g_sbr) != stacks->top, 6); \ + if ((g_sbr) == (ti)->u_stack.top) { \ + CHECK_BUG(stacks->usd_lo.USD_lo_base - \ + stacks->usd_hi.USD_hi_size != \ + (ti)->u_stack.bottom, 7); \ + } else { \ + CHECK_BUG(stacks->usd_lo.USD_lo_base - \ + stacks->usd_hi.USD_hi_size != \ + gti->data_stack.bottom, 5); \ + } \ + } \ +}) +#else /* ! CONFIG_KVM_GUEST_HW_HCALL */ +#define CHECK_GUEST_KERNEL_DATA_STACK(ti, g_sbr, g_usd_size) +#endif /* CONFIG_KVM_GUEST_HW_HCALL */ + +#define SAVE_KVM_HOST_KERNEL_STACKS_STATE(__ti, __gti, __gregs) \ +({ \ + (__gregs)->k_usd_size = (__ti)->k_usd_hi.USD_hi_size; \ + (__gregs)->k_stk_frame_no = (__gti)->k_stk_frame_no; \ +}) +#define UPDATE_KVM_HOST_KERNEL_STACKS_STATE(__ti, __gti, __usd_lo, __usd_hi) \ +({ \ + (__ti)->k_usd_lo = (__usd_lo); \ + (__ti)->k_usd_hi = (__usd_hi); \ + (__gti)->k_stk_frame_no++; \ +}) +#define DO_RESTORE_KVM_HOST_KERNEL_STACKS_STATE(__ti, __gti, __gregs) \ +({ \ + e2k_size_t usd_size = (__gregs)->k_usd_size; \ + \ + (__ti)->k_usd_hi.USD_hi_size = usd_size; \ + (__ti)->k_usd_lo.USD_lo_base = \ + (u64)thread_info_task(__ti)->stack + usd_size; \ + (__gti)->k_stk_frame_no = (__gregs)->k_stk_frame_no; \ +}) +#define RESTORE_KVM_HOST_KERNEL_STACKS_STATE(__ti) \ +({ \ + gthread_info_t *gti = (__ti)->gthread_info; \ + gpt_regs_t *gregs; \ + \ + gregs = get_gpt_regs(__ti); \ + GTI_BUG_ON(gregs == NULL); \ + DO_RESTORE_KVM_HOST_KERNEL_STACKS_STATE(__ti, gti, gregs); \ +}) + +#define SAVE_KVM_GUEST_KERNEL_STACKS_STATE(__gti, __gregs) \ +({ \ + (__gregs)->g_usd_size = \ + (__gti)->stack_regs.stacks.usd_hi.USD_hi_size; \ + (__gregs)->g_stk_frame_no = (__gti)->g_stk_frame_no; \ +}) + +#define UPDATE_KVM_GUEST_KERNEL_STACKS_STATE(__ti, __gti, __usd_lo, __usd_hi) \ +({ \ + e2k_size_t usd_new_size = (__usd_hi).USD_hi_size; \ + \ + /* data stack grows down */ \ + if (usd_new_size > (__gti)->stack_regs.u_usd_hi.USD_hi_size) { \ + gpt_regs_t *gregs; \ + \ + /* data stack shoulg grow down, bun in some case new */ \ + /* activation can be above last saved state */ \ + /* for example first trap or hypercall after fork() */ \ + pr_debug("%s(): new guest USD size 0x%lx > " \ + "0x%x current size, base 0x%llx, " \ + "activation #%d\n", \ + __func__, usd_new_size, \ + (__gti)->stack_regs.u_usd_hi.USD_hi_size, \ + (__gti)->stack_regs.u_usd_lo.USD_lo_base, \ + (__gti)->g_stk_frame_no); \ + gregs = get_gpt_regs(__ti); \ + if (gregs != NULL) { \ + gregs->g_usd_size = usd_new_size; \ + GTI_BUG_ON(get_next_gpt_regs((__ti), gregs)); \ + } \ + } \ + (__gti)->stack_regs.u_usd_lo = (__usd_lo); \ + (__gti)->stack_regs.u_usd_hi = (__usd_hi); \ + (__gti)->g_stk_frame_no++; \ +}) +#define INC_KVM_GUEST_KERNEL_STACKS_STATE(__ti, __gti, usd_new_size) \ +({ \ + /* data stack grows down */ \ + if ((usd_new_size) > \ + (__gti)->stack_regs.stacks.usd_hi.USD_hi_size) { \ + gpt_regs_t *gregs; \ + \ + /* data stack should grow down, but in some case new */ \ + /* activation can be above last saved state */ \ + /* for example first trap or hypercall after fork() */ \ + pr_debug("%s(): new guest USD size 0x%lx > " \ + "0x%x current size, base 0x%llx, " \ + "activation #%d\n", \ + __func__, (usd_new_size), \ + (__gti)->stack_regs.stacks.usd_hi.USD_hi_size, \ + (__gti)->stack_regs.stacks.usd_lo.USD_lo_base, \ + (__gti)->g_stk_frame_no); \ + gregs = get_gpt_regs(__ti); \ + if (gregs != NULL) { \ + gregs->g_usd_size = (usd_new_size); \ + GTI_BUG_ON(get_next_gpt_regs((__ti), gregs)); \ + } \ + } \ + (__gti)->stack_regs.stacks.usd_hi.USD_hi_size = (usd_new_size); \ + (__gti)->stack_regs.stacks.usd_lo.USD_lo_base = \ + (__gti)->data_stack.bottom + (usd_new_size); \ + (__gti)->g_stk_frame_no++; \ +}) +#define DO_RESTORE_KVM_GUEST_KERNEL_STACKS_STATE(__ti, __gti, __gregs) \ +({ \ + CHECK_GUEST_KERNEL_DATA_STACK(__ti, \ + (__gti)->stack_regs.stacks.top, (__gregs)->g_usd_size); \ + (__gti)->stack_regs.stacks.usd_hi.USD_hi_size = \ + (__gregs)->g_usd_size; \ + (__gti)->stack_regs.stacks.usd_lo.USD_lo_base = \ + (__gti)->data_stack.bottom + (__gregs)->g_usd_size; \ + (__gti)->g_stk_frame_no = (__gregs)->g_stk_frame_no; \ +}) +#define RESTORE_KVM_GUEST_KERNEL_STACKS_STATE(__ti) \ +({ \ + gthread_info_t *gti = (__ti)->gthread_info; \ + gpt_regs_t *gregs; \ + \ + gregs = get_gpt_regs(__ti); \ + GTI_BUG_ON(gregs == NULL); \ + DO_RESTORE_KVM_GUEST_KERNEL_STACKS_STATE(__ti, gti, gregs); \ +}) + +#define SAVE_KVM_KERNEL_STACKS_STATE(__ti, __gti, __gregs) \ +({ \ + GTI_BUG_ON((__gti) == NULL); \ + GTI_BUG_ON((__gregs) == NULL); \ + SAVE_KVM_THREAD_STATE(__ti, __gregs); \ + SAVE_KVM_HOST_KERNEL_STACKS_STATE(__ti, __gti, __gregs); \ + SAVE_KVM_GUEST_KERNEL_STACKS_STATE(gti, __gregs); \ +}) +#define DO_RESTORE_KVM_KERNEL_STACKS_STATE(__ti, __gti, __gregs) \ +({ \ + GTI_BUG_ON((__gti) == NULL); \ + GTI_BUG_ON((__gregs) == NULL); \ + RESTORE_KVM_THREAD_STATE(__ti, __gregs); \ + DO_RESTORE_KVM_HOST_KERNEL_STACKS_STATE(__ti, __gti, __gregs); \ + DO_RESTORE_KVM_GUEST_KERNEL_STACKS_STATE(__ti, __gti, __gregs); \ +}) + +#define RETURN_TO_GUEST_KERNEL_DATA_STACK(__ti, __g_usd_size) \ +({ \ + e2k_sbr_t sbr = { { 0 } }; \ + e2k_usd_lo_t usd_lo = { { 0 } }; \ + e2k_usd_hi_t usd_hi = { { 0 } }; \ + sbr.SBR_base = (__ti)->u_stack.top; \ + usd_hi.USD_hi_size = (__g_usd_size); \ + usd_lo.USD_lo_base = (__ti)->u_stack.bottom + (__g_usd_size); \ + NATIVE_NV_WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo); \ +}) + +#define KVM_SAVE_GUEST_KERNEL_GREGS_FROM_TI(__ti, \ + unused__, task__, cpu_id__, cpu_off__) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \ + \ + ONLY_COPY_FROM_KERNEL_GREGS(k_gregs, \ + unused__, task__, cpu_id__, cpu_off__); \ +}) +#define KVM_RESTORE_GUEST_KERNEL_GREGS_AT_TI(__ti, \ + unused__, task__, cpu_id__, cpu_off__) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \ + \ + ONLY_COPY_TO_KERNEL_GREGS(k_gregs, \ + unused__, task__, cpu_id__, cpu_off__); \ +}) + +static inline void print_gpt_regs(gpt_regs_t *gregs) +{ + if (gregs == NULL) { + pr_info("Empty (NULL) guest pt_regs structures\n"); + return; + } + pr_info("guest pt_regs structure at %px: type %d\n", + gregs, gregs->type); + pr_info(" data stack state: guest #%d usd size 0x%lx, " + "host #%d usd size 0x%lx, PCSP ind 0x%lx\n", + gregs->g_stk_frame_no, gregs->g_usd_size, + gregs->k_stk_frame_no, gregs->k_usd_size, + gregs->pcsp_ind); + pr_info(" current thread state: pt_regs %px\n", + gregs->pt_regs); +} + +static inline void print_all_gpt_regs(thread_info_t *ti) +{ + gpt_regs_t *gregs; + + gregs = get_gpt_regs(ti); + if (gregs == NULL) { + pr_info("none any guest pt_regs structures\n"); + return; + } + do { + print_gpt_regs(gregs); + gregs = get_next_gpt_regs(ti, gregs); + } while (gregs); +} + +static inline int +kvm_flush_hw_stacks_to_memory(kvm_hw_stacks_flush_t __user *hw_stacks) +{ + unsigned long psp_lo; + unsigned long psp_hi; + unsigned long pcsp_lo; + unsigned long pcsp_hi; + int error = 0; + + NATIVE_FLUSHCPU; + + psp_lo = NATIVE_NV_READ_PSP_LO_REG_VALUE(); + psp_hi = NATIVE_NV_READ_PSP_HI_REG_VALUE(); + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG_VALUE(); + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG_VALUE(); + + error |= put_user(psp_lo, &hw_stacks->psp_lo); + error |= put_user(psp_hi, &hw_stacks->psp_hi); + error |= put_user(pcsp_lo, &hw_stacks->pcsp_lo); + error |= put_user(pcsp_hi, &hw_stacks->pcsp_hi); + + return error; +} + +/* + * Procedure chain stacks can be mapped to user (user processes) + * or kernel space (kernel threads). But mapping is always to privileged area + * and directly can be accessed only by host kernel. + * SPECIAL CASE: access to current procedure chain stack: + * 1. Current stack frame must be locked (resident), so access is + * safety and can use common load/store operations + * 2. Top of stack can be loaded to the special hardware register file and + * must be spilled to memory before any access. + * 3. If items of chain stack are not updated, then spilling is enough to + * their access + * 4. If items of chain stack are updated, then interrupts and + * any calling of function should be disabled in addition to spilling, + * because of return (done) will fill some part of stack from memory and can be + * two copy of chain stack items: in memory and in registers file. + * We can update only in memory and following spill recover not updated + * value from registers file. + * Guest kernel can access to items of procedure chain stacks only through + * following host kernel light hypercalls + * WARNING: + * 1. interrupts NOW disabled for any light hypercall + * 2. should not be any calls of function using data stack + */ +static inline long +kvm_check_guest_active_cr_mem_item(e2k_addr_t base, e2k_addr_t cr_ind, + e2k_addr_t cr_item) +{ + e2k_psp_lo_t pcsp_lo; + e2k_psp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; + unsigned long pcs_bound; + + if (base & E2K_ALIGN_PCSTACK_MASK != 0) + return -EINVAL; + if (cr_ind & ((1UL << E2K_ALIGN_CHAIN_WINDOW) - 1) != 0) + return -EINVAL; + if (cr_item & (sizeof(e2k_cr0_lo_t) - 1) != 0) + return -EINVAL; + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); + pcs_bound = pcsp_hi.PCSP_hi_ind + pcshtp; + if (base < pcsp_lo.PCSP_lo_base || + base >= pcsp_lo.PCSP_lo_base + pcsp_hi.PCSP_hi_size) + return -EINVAL; + if (cr_ind >= pcs_bound) + return -EINVAL; + if (base + cr_ind >= pcsp_lo.PCSP_lo_base + pcs_bound) + return -EINVAL; + + if (cr_ind >= pcsp_hi.PCSP_hi_ind) { + /* CR to access is now into hardware chain registers file */ + /* spill it into memory */ + NATIVE_FLUSHC; + } + return 0; +} +static inline long +kvm_get_guest_active_cr_mem_item(unsigned long __user *cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, e2k_addr_t cr_item) +{ + unsigned long cr; + long error; + + error = kvm_check_guest_active_cr_mem_item(base, cr_ind, cr_item); + if (error) + return error; + cr = native_get_active_cr_mem_value(base, cr_ind, cr_item); + error = put_user(cr, cr_value); + return error; +} +static inline long +kvm_put_guest_active_cr_mem_item(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, e2k_addr_t cr_item) +{ + long error; + + error = kvm_check_guest_active_cr_mem_item(base, cr_ind, cr_item); + if (error) + return error; + native_put_active_cr_mem_value(cr_value, base, cr_ind, cr_item); + return 0; +} + +/* + * These functions for host kernel, see comment about virtualization at + * arch/e2k/include/asm/ptrace.h + * In this case host is main kernel and here knows that it is host + * Extra kernel is guest + * + * Get/set kernel stack limits of area reserved at the top of hardware stacks + * Kernel areas include two part: + * guest kernel stack reserved area at top of stack + * host kernel stack reserved area at top of stack + */ + +static __always_inline e2k_size_t +kvm_get_guest_hw_ps_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_ps_user_size(hw_stacks); +} +static __always_inline e2k_size_t +kvm_get_guest_hw_pcs_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_pcs_user_size(hw_stacks); +} +static __always_inline void +kvm_set_guest_hw_ps_user_size(hw_stack_t *hw_stacks, e2k_size_t u_ps_size) +{ + set_hw_ps_user_size(hw_stacks, u_ps_size); +} +static __always_inline void +kvm_set_guest_hw_pcs_user_size(hw_stack_t *hw_stacks, e2k_size_t u_pcs_size) +{ + set_hw_pcs_user_size(hw_stacks, u_pcs_size); +} + +extern int kvm_copy_hw_stacks_frames(struct kvm_vcpu *vcpu, + void __user *dst, void __user *src, long size, bool is_chain); + +extern void kvm_arch_vcpu_to_wait(struct kvm_vcpu *vcpu); +extern void kvm_arch_vcpu_to_run(struct kvm_vcpu *vcpu); + +extern int kvm_start_vcpu_thread(struct kvm_vcpu *vcpu); +extern int kvm_start_pv_guest(struct kvm_vcpu *vcpu); +extern void prepare_stacks_to_startup_vcpu(struct kvm_vcpu *vcpu, + e2k_mem_ps_t *ps_frames, e2k_mem_crs_t *pcs_frames, + u64 *args, int args_num, char *entry_point, e2k_psr_t psr, + e2k_size_t usd_size, e2k_size_t *ps_ind, e2k_size_t *pcs_ind, + int cui, bool kernel); +extern int kvm_prepare_pv_vcpu_start_stacks(struct kvm_vcpu *vcpu); +extern int vcpu_init_os_cu_hw_ctxt(struct kvm_vcpu *vcpu, + kvm_task_info_t *user_info); + +extern int kvm_init_vcpu_thread(struct kvm_vcpu *vcpu); +extern int hv_vcpu_start_thread(struct kvm_vcpu *vcpu); +extern int pv_vcpu_start_thread(struct kvm_vcpu *vcpu); + +extern int kvm_switch_guest_kernel_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, char *entry_point, + unsigned long *args, int args_num, + guest_hw_stack_t *stack_regs); +extern int kvm_switch_to_virt_mode(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, + guest_hw_stack_t *stack_regs, + void (*func)(void *data, void *arg1, void *arg2), + void *data, void *arg1, void *arg2); +extern void kvm_halt_host_vcpu_thread(struct kvm_vcpu *vcpu); +extern void kvm_spare_host_vcpu_release(struct kvm_vcpu *vcpu); +extern bool kvm_guest_vcpu_thread_should_stop(struct kvm_vcpu *vcpu); +extern void kvm_guest_vcpu_thread_stop(struct kvm_vcpu *vcpu); +extern void kvm_guest_vcpu_thread_restart(struct kvm_vcpu *vcpu); +extern int kvm_copy_guest_kernel_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, e2k_cr1_hi_t cr1_hi); +extern int kvm_release_guest_task_struct(struct kvm_vcpu *vcpu, int gpid_nr); +extern int kvm_switch_to_guest_new_user(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, + guest_hw_stack_t *stack_regs); +extern int kvm_clone_guest_user_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info); +extern int kvm_copy_guest_user_stacks(struct kvm_vcpu *vcpu, + kvm_task_info_t __user *task_info, + vcpu_gmmu_info_t __user *gmmu_info); +extern int kvm_sig_handler_return(struct kvm_vcpu *vcpu, + kvm_stacks_info_t *regs_info, unsigned long sigreturn_entry, + long sys_rval, guest_hw_stack_t *stack_regs); +extern int kvm_long_jump_return(struct kvm_vcpu *vcpu, + kvm_long_jump_info_t *regs_info); +extern void kvm_guest_vcpu_common_idle(struct kvm_vcpu *vcpu, + long timeout, bool interruptable); +extern void kvm_guest_vcpu_relax(void); + +extern void kvm_init_kernel_intc(struct kvm_vcpu *vcpu); + +#ifdef CONFIG_SMP +extern int kvm_activate_host_vcpu(struct kvm *kvm, int vcpu_id); +extern int kvm_activate_guest_all_vcpus(struct kvm *kvm); +#endif /* CONFIG_SMP */ + +extern void kvm_pv_wait(struct kvm *kvm, struct kvm_vcpu *vcpu); +extern void kvm_pv_kick(struct kvm *kvm, int hard_cpu_id); + +extern void prepare_vcpu_startup_args(struct kvm_vcpu *vcpu); +extern void setup_vcpu_boot_stacks(struct kvm_vcpu *vcpu, + gthread_info_t *gti); + +#ifdef CONFIG_KVM_HW_VIRTUALIZATION +extern int kvm_start_hv_guest(struct kvm_vcpu *vcpu); +extern void prepare_bu_stacks_to_startup_vcpu(struct kvm_vcpu *); +extern int startup_hv_vcpu(struct kvm_vcpu *); +#else /* ! CONFIG_KVM_HW_VIRTUALIZATION */ +static inline int kvm_start_hv_guest(struct kvm_vcpu *vcpu) +{ + pr_err("Hardware virtualization support turn OFF at kernel config\n"); + VM_BUG_ON(true); + return -EINVAL; +} +static inline void +prepare_bu_stacks_to_startup_vcpu(struct kvm_vcpu *vcpu, gthread_info_t *gti) +{ + /* are not used */ +} +static inline int +startup_hv_vcpu(struct kvm_vcpu *vcpu) +{ + return -ENOTSUPP; +} +#endif /* CONFIG_KVM_HW_VIRTUALIZATION */ + +extern long kvm_guest_shutdown(struct kvm_vcpu *vcpu, + void __user *msg, unsigned long reason); + +#ifdef CONFIG_KVM_ASYNC_PF +extern int kvm_pv_host_enable_async_pf(struct kvm_vcpu *vcpu, + u64 apf_reason_gpa, u64 apf_id_gpa, + u32 apf_ready_vector, u32 irq_controller); +#endif /* CONFIG_KVM_ASYNC_PF */ + +extern int kvm_apply_updated_psp_bounds(struct kvm_vcpu *vcpu, + unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta); +extern int kvm_apply_updated_pcsp_bounds(struct kvm_vcpu *vcpu, + unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta); + +/** + * user_hw_stacks_copy - copy guest user hardware stacks that have been + * SPILLed to kernel back to guest kernel stack + * @vcpu - saved user stack registers + * @ps_size - copy size of current window in procedure stack, + * @pcs_size - copy size of current window in chain stack, + */ +static __always_inline int +pv_vcpu_hw_stacks_copy(struct kvm_vcpu *vcpu, pt_regs_t *regs, + long ps_size, long pcs_size, + long ps_off, long pcs_off) +{ + e2k_stacks_t *g_stacks = ®s->g_stacks; + e2k_stacks_t *u_stacks = ®s->stacks; + e2k_psp_lo_t g_psp_lo = g_stacks->psp_lo, + k_psp_lo = current_thread_info()->k_psp_lo; + e2k_psp_hi_t g_psp_hi = g_stacks->psp_hi; + e2k_pcsp_lo_t g_pcsp_lo = g_stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi = g_stacks->pcsp_hi; + void *dst, *src; + int ret; + + DebugGUST("guest user procedure stack state: base 0x%llx " + "size 0x%x ind 0x%x PSHTP size 0x%llx\n", + u_stacks->psp_lo.PSP_lo_base, + u_stacks->psp_hi.PSP_hi_size, u_stacks->psp_hi.PSP_hi_ind, + GET_PSHTP_MEM_INDEX(u_stacks->pshtp)); + DebugGUST("guest user chain stack state: base 0x%llx " + "size 0x%x ind 0x%x PСSHTP size 0x%llx\n", + u_stacks->pcsp_lo.PCSP_lo_base, + u_stacks->pcsp_hi.PCSP_hi_size, u_stacks->pcsp_hi.PCSP_hi_ind, + PCSHTP_SIGN_EXTEND(u_stacks->pcshtp)); + + /* + * Copy guest user's part from kernel stacks into guest kernel stacks + * Update guest user's stack registers + */ + + if (likely(pcs_size <= 0 && ps_size <= 0)) + return 0; + + if (unlikely(pcs_size > 0)) { + e2k_pcsp_hi_t k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + if (unlikely(g_pcsp_hi.PCSP_hi_ind > g_pcsp_hi.PCSP_hi_size)) { + pr_err("%s(): guest kernel stack was overflown : " + "PCSP ind 0x%x > size 0x%x\n", + __func__, g_pcsp_hi.PCSP_hi_ind, + g_pcsp_hi.PCSP_hi_size); + KVM_BUG_ON(true); + } + + dst = (void *)(g_pcsp_lo.PCSP_lo_base + pcs_off); + src = (void *)(k_pcsp_lo.PCSP_lo_base + pcs_off); + DebugGUST("copy guest user chain stack frames from " + "host %px to guest kernel %px, size 0x%lx\n", + src, dst, pcs_size); + ret = user_hw_stack_frames_copy(dst, src, pcs_size, regs, + k_pcsp_hi.PCSP_hi_ind, true); + if (ret) + return ret; + g_pcsp_hi.PCSP_hi_ind += pcs_size; + g_stacks->pcsp_hi = g_pcsp_hi; + DebugGUST("guest kernel chain stack new ind 0x%x\n", + g_stacks->pcsp_hi.PCSP_hi_ind); + } + + if (unlikely(ps_size > 0)) { + e2k_psp_hi_t k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + + if (unlikely(g_psp_hi.PSP_hi_ind > g_psp_hi.PSP_hi_size)) { + pr_err("%s(): guest kernel stack was overflown : " + "PSP ind 0x%x > size 0x%x\n", + __func__, g_psp_hi.PSP_hi_ind, + g_psp_hi.PSP_hi_size); + KVM_BUG_ON(true); + } + + dst = (void *)(g_psp_lo.PSP_lo_base + ps_off); + src = (void *)(k_psp_lo.PSP_lo_base + ps_off); + DebugGUST("copy guest user procedure stack frames from " + "host %px to guest kernel %px, size 0x%lx\n", + src, dst, ps_size); + ret = user_hw_stack_frames_copy(dst, src, ps_size, regs, + k_psp_hi.PSP_hi_ind, false); + if (ret) + return ret; + g_psp_hi.PSP_hi_ind += ps_size; + g_stacks->psp_hi = g_psp_hi; + DebugGUST("guest kernel procedure stack new ind 0x%x\n", + g_stacks->psp_hi.PSP_hi_ind); + } + + return 0; +} + +static inline int +pv_vcpu_user_hw_stacks_copy_crs(struct kvm_vcpu *vcpu, e2k_stacks_t *g_stacks, + pt_regs_t *regs, e2k_mem_crs_t *crs) +{ + e2k_mem_crs_t __user *u_frame; + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + int ret; + + u_frame = (void __user *) g_stacks->pcsp_lo.PCSP_lo_base + + g_stacks->pcsp_hi.PCSP_hi_ind; + DebugGUST("copy last user frame from CRS at %px to guest " + "kernel chain %px (base 0x%llx + ind 0x%x)\n", + crs, u_frame, g_stacks->pcsp_lo.PCSP_lo_base, + g_stacks->pcsp_hi.PCSP_hi_ind); + ret = user_crs_frames_copy(u_frame, regs, crs); + if (unlikely(ret)) + return ret; + + g_stacks->pcsp_hi.PCSP_hi_ind += SZ_OF_CR; + DebugGUST("guest kernel chain stack index is now 0x%x\n", + g_stacks->pcsp_hi.PCSP_hi_ind); + return 0; +} + +static inline int pv_vcpu_user_hw_stacks_copy_full(struct kvm_vcpu *vcpu, + pt_regs_t *regs) +{ + e2k_stacks_t *g_stacks = ®s->g_stacks; + long ps_copy, pcs_copy, ps_ind, pcs_ind; + int ret; + + DebugUST("guest kernel procedure stack current state: base 0x%llx " + "size 0x%x ind 0x%x\n", + g_stacks->psp_lo.PSP_lo_base, g_stacks->psp_hi.PSP_hi_size, + g_stacks->psp_hi.PSP_hi_ind); + DebugUST("guest kernel chain stack current state: base 0x%llx " + "size 0x%x ind 0x%x\n", + g_stacks->pcsp_lo.PCSP_lo_base, g_stacks->pcsp_hi.PCSP_hi_size, + g_stacks->pcsp_hi.PCSP_hi_ind); + + ps_copy = GET_PSHTP_MEM_INDEX(g_stacks->pshtp); + pcs_copy = PCSHTP_SIGN_EXTEND(g_stacks->pcshtp); + DebugGUST("guest user size to copy PSHTP 0x%lx PCSHTP 0x%lx\n", + ps_copy, pcs_copy); + ps_ind = g_stacks->psp_hi.PSP_hi_ind; + if (ps_ind > 0) { + /* first part of procedure stack was alredy copied */ + ps_copy -= ps_ind; + KVM_BUG_ON(ps_copy < 0); + } + pcs_ind = g_stacks->pcsp_hi.PCSP_hi_ind; + if (pcs_ind > 0) { + /* first part of chain stack was alredy copied */ + pcs_copy -= pcs_ind; + KVM_BUG_ON(pcs_copy < 0); + } + + /* + * Copy part of guest user stacks that were SPILLed into kernel stacks + */ + ret = pv_vcpu_hw_stacks_copy(vcpu, regs, ps_copy, pcs_copy, + ps_ind, pcs_ind); + if (unlikely(ret)) + return ret; + + /* + * Nothing to FILL so remove the resulting hole from kernel stacks. + * + * IMPORTANT: there is always at least one user frame at the top of + * kernel stack - the one that issued a system call (in case of an + * exception we uphold this rule manually, see user_hw_stacks_prepare()) + * We keep this ABI and _always_ leave space for one user frame, + * this way we can later FILL using return trick (otherwise there + * would be no space in chain stack for the trick). + */ + collapse_kernel_hw_stacks(g_stacks); + + /* + * Copy saved %cr registers + * + * Caller must take care of filling of resulting hole + * (last user frame from pcshtp == SZ_OF_CR). + */ + ret = pv_vcpu_user_hw_stacks_copy_crs(vcpu, g_stacks, regs, ®s->crs); + if (unlikely(ret)) + return ret; + + if (DEBUG_KVM_GUEST_STACKS_MODE && debug_guest_user_stacks) + debug_guest_user_stacks = false; + + return 0; +} + +static inline int +pv_vcpu_user_crs_copy_to_kernel(struct kvm_vcpu *vcpu, + void __user *u_frame, e2k_mem_crs_t *crs) +{ + hva_t hva; + unsigned long ts_flag; + int ret; + kvm_arch_exception_t exception; + + hva = kvm_vcpu_gva_to_hva(vcpu, (gva_t)u_frame, true, &exception); + if (kvm_is_error_hva(hva)) { + pr_err("%s(): failed to find GPA for dst %lx GVA, " + "inject page fault to guest\n", + __func__, u_frame); + kvm_vcpu_inject_page_fault(vcpu, (void *)u_frame, + &exception); + return -EAGAIN; + } + + u_frame = (void *)hva; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user(u_frame, crs, sizeof(*crs)); + clear_ts_flag(ts_flag); + if (unlikely(ret)) { + pr_err("%s(): copy CRS frame to guest kernel stack failed, " + "error %d\n", + __func__, ret); + return -EFAULT; + } + + return 0; +} + +#endif /* __KVM_PROCESS_H */ diff --git a/arch/e2k/kvm/pt-structs.c b/arch/e2k/kvm/pt-structs.c new file mode 100644 index 000000000000..c926de5c0141 --- /dev/null +++ b/arch/e2k/kvm/pt-structs.c @@ -0,0 +1,548 @@ +/* + * MMU menegement (Instruction and Data caches, TLB, registers) + * + * Derived heavily from Linus's Alpha/AXP ASN code... + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "pgtable-gp.h" + +static unsigned int +mmu_get_pte_val_memory_type_v2(pgprot_t pte) +{ + return get_pte_val_v2_memory_type(pgprot_val(pte)); +} +static pgprot_t +mmu_set_pte_val_memory_type_v2(pgprot_t pte, unsigned int mtype) +{ + return __pgprot(set_pte_val_v2_memory_type(pgprot_val(pte), mtype)); +} +static unsigned int +mmu_get_pte_val_memory_type_v6(pgprot_t pte) +{ + return get_pte_val_v6_memory_type(pgprot_val(pte)); +} +static pgprot_t +mmu_set_pte_val_memory_type_v6(pgprot_t pte, unsigned int mtype) +{ + return __pgprot(set_pte_val_v6_memory_type(pgprot_val(pte), mtype)); +} +static unsigned int +mmu_get_pte_val_memory_type_gp(pgprot_t pte) +{ + return get_pte_val_gp_memory_type(pgprot_val(pte)); +} +static pgprot_t +mmu_set_pte_val_memory_type_gp(pgprot_t pte, unsigned int mtype) +{ + return __pgprot(set_pte_val_gp_memory_type(pgprot_val(pte), mtype)); +} +static unsigned int +mmu_get_pte_val_memory_type_rule_gp(pgprot_t pte) +{ + return get_pte_val_gp_memory_type_rule(pgprot_val(pte)); +} +static pgprot_t +mmu_set_pte_val_memory_type_rule_gp(pgprot_t pte, unsigned int mtcr) +{ + return __pgprot(set_pte_val_gp_memory_type_rule(pgprot_val(pte), mtcr)); +} + +/* + * Hardware MMUs page tables have some differences from one ISET to other + * moreover each MMU supports a few different page tables: + * native (primary) + * secondary page tables for sevral modes (VA32, VA48, PA32, PA48 ...) + * The follow structures presents all available page table structures + * + * Warning .boot_*() entries should be updated dinamicaly to point to + * physical addresses of functions for arch/e2k/p2v/ + */ +const pt_struct_t __nodedata pgtable_struct_e2k_v2 = { + .type = E2K_PT_TYPE, + .name = "primary e2k v2", + .pt_v6 = false, + .pfn_mask = _PAGE_PFN_V2, + .accessed_mask = _PAGE_A_HW_V2, + .dirty_mask = _PAGE_D_V2, + .present_mask = _PAGE_P_V2, + .valid_mask = _PAGE_VALID_V2, + .user_mask = 0ULL, + .priv_mask = _PAGE_PV_V2, + .non_exec_mask = _PAGE_NON_EX_V2, + .exec_mask = 0ULL, + .sw_bit1_mask = _PAGE_AVAIL_V2, + .sw_bit2_mask = _PAGE_A_SW_V2, + .sw_mmio_mask = _PAGE_MMIO_SW_V2, + .ptd_kernel_prot = _PAGE_KERNEL_PT_V2, + .ptd_user_prot = _PAGE_USER_PT_V2, + .levels_num = E2K_PT_LEVELS_NUM, + .get_pte_val_memory_type = &mmu_get_pte_val_memory_type_v2, + .set_pte_val_memory_type = &mmu_set_pte_val_memory_type_v2, + .get_pte_val_memory_type_rule = NULL, + .set_pte_val_memory_type_rule = NULL, + .levels = { + [E2K_PAGES_LEVEL_NUM] = { + .id = E2K_PAGES_LEVEL_NUM, + .page_size = PAGE_SIZE, + }, + [E2K_PTE_LEVEL_NUM] = { + .id = E2K_PTE_LEVEL_NUM, + .pt_size = PTE_SIZE, + .page_size = PAGE_SIZE, + .pt_shift = PTE_SHIFT, + .page_shift = PTE_SHIFT, + .pt_mask = PTE_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PTE_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PTE_MASK ^ PMD_MASK, + .page_mask = PTE_MASK, + .page_offset = ~PTE_MASK, + .ptrs_per_pt = PTRS_PER_PTE, + .is_pte = true, + .is_huge = false, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PMD_LEVEL_NUM] = { + .id = E2K_PMD_LEVEL_NUM, + .pt_size = PMD_SIZE, + .pt_shift = PMD_SHIFT, + .pt_mask = PMD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PMD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PMD_MASK ^ PUD_MASK, + .page_mask = PMD_MASK << 1, + .ptrs_per_pt = PTRS_PER_PMD, + .page_size = E2K_4M_PAGE_SIZE, + .page_shift = PMD_SHIFT + 1, + .page_offset = (E2K_4M_PAGE_SIZE - 1), + .is_pte = false, + .is_huge = true, + .huge_ptes = 2, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PUD_LEVEL_NUM] = { + .id = E2K_PUD_LEVEL_NUM, + .pt_size = PUD_SIZE, + .page_size = PAGE_PUD_SIZE, + .pt_shift = PUD_SHIFT, + .page_shift = PUD_SHIFT, + .pt_mask = PUD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PUD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PUD_MASK ^ PGDIR_MASK, + .page_mask = PUD_MASK, + .page_offset = ~PUD_MASK, + .ptrs_per_pt = PTRS_PER_PUD, + .is_pte = false, + .is_huge = false, + }, + [E2K_PGD_LEVEL_NUM] = { + .id = E2K_PGD_LEVEL_NUM, + .pt_size = PGDIR_SIZE, + .page_size = PAGE_PGD_SIZE, + .pt_shift = PGDIR_SHIFT, + .page_shift = PGDIR_SHIFT, + .pt_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .page_mask = PGDIR_MASK, + .page_offset = ~PGDIR_MASK, + .ptrs_per_pt = PTRS_PER_PGD, + .is_pte = false, + .is_huge = false, + }, + }, +}; +const pt_struct_t __nodedata pgtable_struct_e2k_v3 = { + .type = E2K_PT_TYPE, + .name = "primary e2k v3", + .pt_v6 = false, + .pfn_mask = _PAGE_PFN_V2, + .accessed_mask = _PAGE_A_HW_V2, + .dirty_mask = _PAGE_D_V2, + .present_mask = _PAGE_P_V2, + .valid_mask = _PAGE_VALID_V2, + .user_mask = 0ULL, + .priv_mask = _PAGE_PV_V2, + .non_exec_mask = _PAGE_NON_EX_V2, + .exec_mask = 0ULL, + .sw_bit1_mask = _PAGE_AVAIL_V2, + .sw_bit2_mask = _PAGE_A_SW_V2, + .sw_mmio_mask = _PAGE_MMIO_SW_V2, + .ptd_kernel_prot = _PAGE_KERNEL_PT_V2, + .ptd_user_prot = _PAGE_USER_PT_V2, + .levels_num = E2K_PT_LEVELS_NUM, + .get_pte_val_memory_type = &mmu_get_pte_val_memory_type_v2, + .set_pte_val_memory_type = &mmu_set_pte_val_memory_type_v2, + .get_pte_val_memory_type_rule = NULL, + .set_pte_val_memory_type_rule = NULL, + .levels = { + [E2K_PAGES_LEVEL_NUM] = { + .id = E2K_PAGES_LEVEL_NUM, + .page_size = PAGE_SIZE, + }, + [E2K_PTE_LEVEL_NUM] = { + .id = E2K_PTE_LEVEL_NUM, + .pt_size = PTE_SIZE, + .page_size = PAGE_SIZE, + .pt_shift = PTE_SHIFT, + .page_shift = PTE_SHIFT, + .pt_mask = PTE_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PTE_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PTE_MASK ^ PMD_MASK, + .page_mask = PTE_MASK, + .page_offset = ~PTE_MASK, + .ptrs_per_pt = PTRS_PER_PTE, + .is_pte = true, + .is_huge = false, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PMD_LEVEL_NUM] = { + .id = E2K_PMD_LEVEL_NUM, + .pt_size = PMD_SIZE, + .pt_shift = PMD_SHIFT, + .pt_mask = PMD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PMD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PMD_MASK ^ PUD_MASK, + .page_mask = PMD_MASK, + .ptrs_per_pt = PTRS_PER_PMD, + .page_size = E2K_2M_PAGE_SIZE, + .page_shift = PMD_SHIFT, + .page_offset = ~PMD_MASK, + .huge_ptes = 1, + .is_pte = false, + .is_huge = true, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PUD_LEVEL_NUM] = { + .id = E2K_PUD_LEVEL_NUM, + .pt_size = PUD_SIZE, + .page_size = PAGE_PUD_SIZE, + .pt_shift = PUD_SHIFT, + .page_shift = PUD_SHIFT, + .pt_mask = PUD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PUD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PUD_MASK ^ PGDIR_MASK, + .page_mask = PUD_MASK, + .page_offset = ~PUD_MASK, + .ptrs_per_pt = PTRS_PER_PUD, + .is_pte = false, + .is_huge = false, + .is_huge = false, + }, + [E2K_PGD_LEVEL_NUM] = { + .id = E2K_PGD_LEVEL_NUM, + .pt_size = PGDIR_SIZE, + .page_size = PAGE_PGD_SIZE, + .pt_shift = PGDIR_SHIFT, + .page_shift = PGDIR_SHIFT, + .pt_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .page_mask = PGDIR_MASK, + .page_offset = ~PGDIR_MASK, + .ptrs_per_pt = PTRS_PER_PGD, + .is_pte = false, + .is_huge = false, + }, + }, +}; +const pt_struct_t __nodedata pgtable_struct_e2k_v5 = { + .type = E2K_PT_TYPE, + .name = "primary e2k v5", + .pt_v6 = false, + .pfn_mask = _PAGE_PFN_V2, + .accessed_mask = _PAGE_A_HW_V2, + .dirty_mask = _PAGE_D_V2, + .present_mask = _PAGE_P_V2, + .valid_mask = _PAGE_VALID_V2, + .user_mask = 0ULL, + .priv_mask = _PAGE_PV_V2, + .non_exec_mask = _PAGE_NON_EX_V2, + .exec_mask = 0ULL, + .sw_bit1_mask = _PAGE_AVAIL_V2, + .sw_bit2_mask = _PAGE_A_SW_V2, + .sw_mmio_mask = _PAGE_MMIO_SW_V2, + .ptd_kernel_prot = _PAGE_KERNEL_PT_V2, + .ptd_user_prot = _PAGE_USER_PT_V2, + .levels_num = E2K_PT_LEVELS_NUM, + .get_pte_val_memory_type = &mmu_get_pte_val_memory_type_v2, + .set_pte_val_memory_type = &mmu_set_pte_val_memory_type_v2, + .get_pte_val_memory_type_rule = NULL, + .set_pte_val_memory_type_rule = NULL, + .levels = { + [E2K_PAGES_LEVEL_NUM] = { + .id = E2K_PAGES_LEVEL_NUM, + .page_size = PAGE_SIZE, + }, + [E2K_PTE_LEVEL_NUM] = { + .id = E2K_PTE_LEVEL_NUM, + .pt_size = PTE_SIZE, + .page_size = PAGE_SIZE, + .pt_shift = PTE_SHIFT, + .page_shift = PTE_SHIFT, + .pt_mask = PTE_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PTE_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PTE_MASK ^ PMD_MASK, + .page_mask = PTE_MASK, + .page_offset = ~PTE_MASK, + .ptrs_per_pt = PTRS_PER_PTE, + .is_pte = true, + .is_huge = false, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PMD_LEVEL_NUM] = { + .id = E2K_PMD_LEVEL_NUM, + .pt_size = PMD_SIZE, + .pt_shift = PMD_SHIFT, + .pt_mask = PMD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PMD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PMD_MASK ^ PUD_MASK, + .page_mask = PMD_MASK, + .ptrs_per_pt = PTRS_PER_PMD, + .page_size = E2K_2M_PAGE_SIZE, + .page_shift = PMD_SHIFT, + .page_offset = ~PMD_MASK, + .huge_ptes = 1, + .is_pte = false, + .is_huge = true, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PUD_LEVEL_NUM] = { + .id = E2K_PUD_LEVEL_NUM, + .pt_size = PUD_SIZE, + .page_size = PAGE_PUD_SIZE, + .pt_shift = PUD_SHIFT, + .page_shift = PUD_SHIFT, + .pt_mask = PUD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PUD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PUD_MASK ^ PGDIR_MASK, + .page_mask = PUD_MASK, + .page_offset = ~PUD_MASK, + .ptrs_per_pt = PTRS_PER_PUD, + .is_pte = false, + .is_huge = true, + .huge_ptes = 1, + .dtlb_type = FULL_ASSOCIATIVE_DTLB_TYPE, + }, + [E2K_PGD_LEVEL_NUM] = { + .id = E2K_PGD_LEVEL_NUM, + .pt_size = PGDIR_SIZE, + .page_size = PAGE_PGD_SIZE, + .pt_shift = PGDIR_SHIFT, + .page_shift = PGDIR_SHIFT, + .pt_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .page_mask = PGDIR_MASK, + .page_offset = ~PGDIR_MASK, + .ptrs_per_pt = PTRS_PER_PGD, + .is_pte = false, + .is_huge = false, + }, + }, +}; + +const pt_struct_t __nodedata pgtable_struct_e2k_v6_pt_v6 = { + .type = E2K_PT_TYPE, + .name = "primary e2k v6", + .pt_v6 = true, + .pfn_mask = _PAGE_PFN_V6, + .accessed_mask = _PAGE_A_HW_V6, + .dirty_mask = _PAGE_D_V6, + .present_mask = _PAGE_P_V6, + .valid_mask = _PAGE_VALID_V6, + .user_mask = 0ULL, + .priv_mask = _PAGE_PV_V6, + .non_exec_mask = _PAGE_NON_EX_V6, + .exec_mask = 0ULL, + .sw_bit1_mask = _PAGE_SW1_V6, + .sw_bit2_mask = _PAGE_SW2_V6, + .sw_mmio_mask = _PAGE_MMIO_SW_V6, + .ptd_kernel_prot = _PAGE_KERNEL_PT_V6, + .ptd_user_prot = _PAGE_USER_PT_V6, + .levels_num = E2K_PT_LEVELS_NUM, + .get_pte_val_memory_type = &mmu_get_pte_val_memory_type_v6, + .set_pte_val_memory_type = &mmu_set_pte_val_memory_type_v6, + .get_pte_val_memory_type_rule = NULL, + .set_pte_val_memory_type_rule = NULL, + .levels = { + [E2K_PAGES_LEVEL_NUM] = { + .id = E2K_PAGES_LEVEL_NUM, + .page_size = PAGE_SIZE, + }, + [E2K_PTE_LEVEL_NUM] = { + .id = E2K_PTE_LEVEL_NUM, + .pt_size = PTE_SIZE, + .page_size = PAGE_SIZE, + .pt_shift = PTE_SHIFT, + .page_shift = PTE_SHIFT, + .pt_mask = PTE_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PTE_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PTE_MASK ^ PMD_MASK, + .page_mask = PTE_MASK, + .page_offset = ~PTE_MASK, + .ptrs_per_pt = PTRS_PER_PTE, + .is_pte = true, + .is_huge = false, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PMD_LEVEL_NUM] = { + .id = E2K_PMD_LEVEL_NUM, + .pt_size = PMD_SIZE, + .pt_shift = PMD_SHIFT, + .pt_mask = PMD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PMD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PMD_MASK ^ PUD_MASK, + .page_mask = PMD_MASK, + .ptrs_per_pt = PTRS_PER_PMD, + .page_size = E2K_2M_PAGE_SIZE, + .page_shift = PMD_SHIFT, + .page_offset = ~PMD_MASK, + .huge_ptes = 1, + .is_pte = false, + .is_huge = true, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PUD_LEVEL_NUM] = { + .id = E2K_PUD_LEVEL_NUM, + .pt_size = PUD_SIZE, + .page_size = PAGE_PUD_SIZE, + .pt_shift = PUD_SHIFT, + .page_shift = PUD_SHIFT, + .pt_mask = PUD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PUD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PUD_MASK ^ PGDIR_MASK, + .page_mask = PUD_MASK, + .page_offset = ~PUD_MASK, + .ptrs_per_pt = PTRS_PER_PUD, + .is_pte = false, + .is_huge = true, + .huge_ptes = 1, + .dtlb_type = FULL_ASSOCIATIVE_DTLB_TYPE, + }, + [E2K_PGD_LEVEL_NUM] = { + .id = E2K_PGD_LEVEL_NUM, + .pt_size = PGDIR_SIZE, + .page_size = PAGE_PGD_SIZE, + .pt_shift = PGDIR_SHIFT, + .page_shift = PGDIR_SHIFT, + .pt_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .page_mask = PGDIR_MASK, + .page_offset = ~PGDIR_MASK, + .ptrs_per_pt = PTRS_PER_PGD, + .is_pte = false, + .is_huge = false, + }, + }, +}; + +const pt_struct_t __nodedata pgtable_struct_e2k_v6_gp = { + .type = E2K_PT_TYPE, + .name = "guest physical e2k v6", + .pt_v6 = true, + .pfn_mask = _PAGE_PFN_GP, + .accessed_mask = _PAGE_A_HW_GP, + .dirty_mask = _PAGE_D_GP, + .present_mask = _PAGE_P_GP, + .valid_mask = 0ULL, + .user_mask = 0ULL, + .priv_mask = 0ULL, + .non_exec_mask = 0ULL, + .exec_mask = 0ULL, + .sw_bit1_mask = _PAGE_SW1_GP, + .sw_bit2_mask = _PAGE_SW2_GP, + .sw_mmio_mask = _PAGE_MMIO_SW_GP, + .ptd_kernel_prot = _PAGE_KERNEL_PT_GP, + .ptd_user_prot = _PAGE_KERNEL_PT_GP, + .levels_num = E2K_PT_LEVELS_NUM, + .get_pte_val_memory_type = &mmu_get_pte_val_memory_type_gp, + .set_pte_val_memory_type = &mmu_set_pte_val_memory_type_gp, + .get_pte_val_memory_type_rule = &mmu_get_pte_val_memory_type_rule_gp, + .set_pte_val_memory_type_rule = &mmu_set_pte_val_memory_type_rule_gp, + .levels = { + [E2K_PAGES_LEVEL_NUM] = { + .id = E2K_PAGES_LEVEL_NUM, + .page_size = PAGE_SIZE, + }, + [E2K_PTE_LEVEL_NUM] = { + .id = E2K_PTE_LEVEL_NUM, + .pt_size = PTE_SIZE, + .page_size = PAGE_SIZE, + .pt_shift = PTE_SHIFT, + .page_shift = PTE_SHIFT, + .pt_mask = PTE_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PTE_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PTE_MASK ^ PMD_MASK, + .page_mask = PTE_MASK, + .page_offset = ~PTE_MASK, + .ptrs_per_pt = PTRS_PER_PTE, + .is_pte = true, + .is_huge = false, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PMD_LEVEL_NUM] = { + .id = E2K_PMD_LEVEL_NUM, + .pt_size = PMD_SIZE, + .pt_shift = PMD_SHIFT, + .pt_mask = PMD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PMD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PMD_MASK ^ PUD_MASK, + .page_mask = PMD_MASK, + .ptrs_per_pt = PTRS_PER_PMD, + .page_size = E2K_2M_PAGE_SIZE, + .page_shift = PMD_SHIFT, + .page_offset = ~PMD_MASK, + .huge_ptes = 1, + .is_pte = false, + .is_huge = true, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PUD_LEVEL_NUM] = { + .id = E2K_PUD_LEVEL_NUM, + .pt_size = PUD_SIZE, + .page_size = PAGE_PUD_SIZE, + .pt_shift = PUD_SHIFT, + .page_shift = PUD_SHIFT, + .pt_mask = PUD_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PUD_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PUD_MASK ^ PGDIR_MASK, + .page_mask = PUD_MASK, + .page_offset = ~PUD_MASK, + .ptrs_per_pt = PTRS_PER_PUD, + .is_pte = false, + .is_huge = true, + .huge_ptes = 1, + .dtlb_type = FULL_ASSOCIATIVE_DTLB_TYPE, + }, + [E2K_PGD_LEVEL_NUM] = { + .id = E2K_PGD_LEVEL_NUM, + .pt_size = PGDIR_SIZE, + .page_size = PAGE_PGD_SIZE, + .pt_shift = PGDIR_SHIFT, + .page_shift = PGDIR_SHIFT, + .pt_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_offset = ~PGDIR_MASK & E2K_VA_PAGE_MASK, + .pt_index_mask = PGDIR_MASK & E2K_VA_PAGE_MASK, + .page_mask = PGDIR_MASK, + .page_offset = ~PGDIR_MASK, + .ptrs_per_pt = PTRS_PER_PGD, + .is_pte = false, + .is_huge = false, + }, + }, +}; + diff --git a/arch/e2k/kvm/pv_mmu.h b/arch/e2k/kvm/pv_mmu.h new file mode 100644 index 000000000000..92508f94ee19 --- /dev/null +++ b/arch/e2k/kvm/pv_mmu.h @@ -0,0 +1,42 @@ +#ifndef __KVM_E2K_PV_MMU_H +#define __KVM_E2K_PV_MMU_H + +#include +#include + +/* + * Paravirtualized guest has not hardware shadow register. + * + * Any write to shadow MMU register shuld be duplicated by setting + * appropriate field at the structures 'hw_ctxt' or 'sw_ctxt'. + * So write to nonexistent register can be omitted. + * + * Any read from nonexistent register can be changed by read + * from appropriate field at the structures + */ + +static inline mmu_reg_t read_pv_MMU_CR_reg(struct kvm_vcpu *vcpu) +{ + struct kvm_hw_cpu_context *hw_ctxt = &vcpu->arch.hw_ctxt; + + return hw_ctxt->sh_mmu_cr; +} + +static inline void +write_pv_MMU_CR_reg(struct kvm_vcpu *vcpu, mmu_reg_t value) +{ +} + +static inline mmu_reg_t read_pv_PID_reg(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *mmu = &vcpu->arch.mmu; + + return mmu->pid; +} + +static inline void +write_pv_PID_reg(struct kvm_vcpu *vcpu, mmu_reg_t value) +{ +} + +#endif /* __KVM_E2K_PV_MMU_H */ diff --git a/arch/e2k/kvm/runstate.c b/arch/e2k/kvm/runstate.c new file mode 100644 index 000000000000..8944ccc0cb70 --- /dev/null +++ b/arch/e2k/kvm/runstate.c @@ -0,0 +1,81 @@ +/* + * This file manage VCPU run state in/out trap/interrupts + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include + + +/* guest VCPU run state should be updated in traps and interrupts */ + +void kvm_set_guest_runstate_in_user_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + BUG_ON(vcpu == NULL); + BUG_ON(!psr_and_upsr_irqs_disabled()); + WARN_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_running); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); +} +void kvm_set_guest_runstate_out_user_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + if (vcpu == NULL) + return; /* It is VIRQ VCPU: run state is unused */ + BUG_ON(!psr_and_upsr_irqs_disabled()); + WARN_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_trap); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); +} +int kvm_set_guest_runstate_in_kernel_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + int cur_runstate; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return -1; + vcpu = ti->vcpu; + if (vcpu == NULL) + return -1; /* It is VIRQ VCPU: run state is unused */ + BUG_ON(!psr_and_upsr_irqs_disabled()); + cur_runstate = kvm_get_guest_vcpu_runstate(vcpu); + if (cur_runstate == RUNSTATE_offline) + /* VCPU is not yet started */ + return -1; + WARN_ON(cur_runstate == RUNSTATE_running && + !test_ti_thread_flag(ti, TIF_GENERIC_HYPERCALL)); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); + return cur_runstate; +} +void kvm_set_guest_runstate_out_kernel_trap(int saved_runstate) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + int cur_runstate; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + if (vcpu == NULL) + return; /* It is VIRQ VCPU: run state is unused */ + BUG_ON(!psr_and_upsr_irqs_disabled()); + cur_runstate = kvm_get_guest_vcpu_runstate(vcpu); + if (cur_runstate == RUNSTATE_offline) + /* VCPU is not yet started */ + return; + WARN_ON(cur_runstate != RUNSTATE_in_trap); + kvm_do_update_guest_vcpu_current_runstate(vcpu, saved_runstate); +} diff --git a/arch/e2k/kvm/sic-nbsr.c b/arch/e2k/kvm/sic-nbsr.c new file mode 100644 index 000000000000..014c08da70d8 --- /dev/null +++ b/arch/e2k/kvm/sic-nbsr.c @@ -0,0 +1,2802 @@ +/* + * + * North Bridge registers emulation for guest VM + * + * Copyright 2019 MCST, Salavat S. Gilyazov (atic@mcst.ru) + */ + +#include +#include +#include +#include + +#include +#include + +#include "sic-nbsr.h" +#include "mmu.h" +#include "gaccess.h" +#include "pic.h" + +#if 0 +#define nbsr_debug(fmt, arg...) pr_warn(fmt, ##arg) +#else +#define nbsr_debug(fmt, arg...) +#endif + +#if 0 +#define nbsr_warn(fmt, arg...) pr_warn(fmt, ##arg) +#else +#define nbsr_warn(fmt, arg...) +#endif + +#define ALIGN_DOWN_TO_MASK(addr, mask) ((addr) & ~(mask)) +#define ALIGN_UP_TO_MASK(addr, mask) (((addr) + (mask)) & ~(mask)) +#define ALIGN_DOWN_TO_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_DOWN_TO_MASK(addr, ((size)-1))) +#define ALIGN_UP_TO_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_UP_TO_MASK(addr, ((size)-1))) + +#define NBSR_LOW_MEMORY_BOUND (1UL << 32) +#define NBSR_HI_MEMORY_BOUND (1UL << 48) /* physical memory size */ + +#define NBSR_ADDR64(hi, lo) ((((u64)hi) << 32) + ((u64)lo)) + +#define BC_MP_T_CORR_ADDR(hreg, reg) \ + ((((u64)hreg.E2K_MP_T_CORR_H_addr) << 32) + \ + (((u64)reg.E2K_MP_T_CORR_addr) << PAGE_SHIFT)) + +static inline struct kvm_nbsr *to_nbsr(struct kvm_io_device *dev) +{ + return container_of(dev, struct kvm_nbsr, dev); +} + +/* Max number of nodes is now 4, so link can be from 1 to 3 */ +static inline int nbsr_get_node_to_node_link(int node_on, int node_to) +{ + int link = 0; + + if (node_on == 0) { + if (node_to == 1) { + link = 1; + } else if (node_to == 2) { + link = 2; + } else if (node_to == 3) { + link = 3; + } else { + ASSERT(false); + } + } else if (node_on == 1) { + if (node_to == 2) { + link = 1; + } else if (node_to == 3) { + link = 2; + } else if (node_to == 0) { + link = 3; + } else { + ASSERT(false); + } + } else if (node_on == 2) { + if (node_to == 3) { + link = 1; + } else if (node_to == 0) { + link = 2; + } else if (node_to == 1) { + link = 3; + } else { + ASSERT(false); + } + } else if (node_on == 3) { + if (node_to == 0) { + link = 1; + } else if (node_to == 1) { + link = 2; + } else if (node_to == 2) { + link = 3; + } else { + ASSERT(false); + } + } else { + ASSERT(false); + } + ASSERT(link >= 1 && link <= 3); + return link; +} + +static inline bool nbsr_is_node_online(struct kvm_nbsr *nbsr, int node_id) +{ + return !!(nbsr->nodes_online & (1 << node_id)); +} + +static inline void nbsr_set_node_online(struct kvm_nbsr *nbsr, int node_id) +{ + nbsr->nodes_online |= (1 << node_id); +} + +static inline int nbsr_in_range(struct kvm_nbsr *nbsr, gpa_t addr) +{ + return (addr >= nbsr->base) && (addr < nbsr->base + nbsr->size); +} + +static inline int nbsr_addr_to_node(struct kvm_nbsr *nbsr, gpa_t addr) +{ + int node_id; + + if (!nbsr_in_range(nbsr, addr)) { + pr_err("%s(): address 0x%llx is out of North Bridge " + "registers space from 0x%llx to 0x%llx\n", + __func__, addr, nbsr->base, nbsr->base + nbsr->size); + BUG_ON(true); + } + node_id = (addr - nbsr->base) / nbsr->node_size; + return node_id; +} + +static inline unsigned nbsr_addr_to_reg_offset(struct kvm_nbsr *nbsr, + gpa_t addr) +{ + unsigned reg_offset; + + if (!nbsr_in_range(nbsr, addr)) { + pr_err("%s(): address 0x%llx is out of North Bridge " + "registers space from 0x%llx to 0x%llx\n", + __func__, addr, nbsr->base, nbsr->base + nbsr->size); + BUG_ON(true); + } + reg_offset = addr & (nbsr->node_size - 1); + return reg_offset; +} + +static inline unsigned int offset_to_no(unsigned int reg_offset) +{ + return reg_offset / 4; +} + +static inline bool nbsr_bc_reg_in_range(unsigned int reg_offset) +{ + return reg_offset >= BC_MM_REG_BASE && reg_offset < BC_MM_REG_END; +} + +static inline unsigned int nbsr_bc_reg_offset_to_no(unsigned int reg_offset) +{ + if (!nbsr_bc_reg_in_range(reg_offset)) { + pr_err("%s(): offset 0x%x is out of North Bridge " + "BC registers space from 0x%04x to 0x%04x\n", + __func__, reg_offset, BC_MM_REG_BASE, BC_MM_REG_END); + BUG_ON(true); + } + return (reg_offset - BC_MM_REG_BASE) / 4; +} + +static inline unsigned int nbsr_get_rt_mlo_offset(int node_id) +{ + if (node_id == 0) + return SIC_rt_mlo0; + else if (node_id == 1) + return SIC_rt_mlo1; + else if (node_id == 2) + return SIC_rt_mlo2; + else if (node_id == 3) + return SIC_rt_mlo3; + else + ASSERT(false); + return -1; +} + +static inline unsigned int nbsr_get_rt_mhi_offset(int node_id) +{ + if (node_id == 0) + return SIC_rt_mhi0; + else if (node_id == 1) + return SIC_rt_mhi1; + else if (node_id == 2) + return SIC_rt_mhi2; + else if (node_id == 3) + return SIC_rt_mhi3; + else + ASSERT(false); + return -1; +} + +static inline unsigned int nbsr_get_rt_pcim_offset(int node_id) +{ + if (node_id == 0) + return SIC_rt_pcim0; + else if (node_id == 1) + return SIC_rt_pcim1; + else if (node_id == 2) + return SIC_rt_pcim2; + else if (node_id == 3) + return SIC_rt_pcim3; + else + ASSERT(false); + return -1; +} + +static inline unsigned int nbsr_get_rt_pciio_offset(int node_id) +{ + if (node_id == 0) + return SIC_rt_pciio0; + else if (node_id == 1) + return SIC_rt_pciio1; + else if (node_id == 2) + return SIC_rt_pciio2; + else if (node_id == 3) + return SIC_rt_pciio3; + else + ASSERT(false); + return -1; +} + +static inline unsigned int nbsr_get_rt_pcimp_b_offset(int node_id) +{ + if (node_id == 0) + return SIC_rt_pcimp_b0; + else if (node_id == 1) + return SIC_rt_pcimp_b1; + else if (node_id == 2) + return SIC_rt_pcimp_b2; + else if (node_id == 3) + return SIC_rt_pcimp_b3; + else + ASSERT(false); + return -1; +} + +static inline unsigned int nbsr_get_rt_pcimp_e_offset(int node_id) +{ + if (node_id == 0) + return SIC_rt_pcimp_e0; + else if (node_id == 1) + return SIC_rt_pcimp_e1; + else if (node_id == 2) + return SIC_rt_pcimp_e2; + else if (node_id == 3) + return SIC_rt_pcimp_e3; + else + ASSERT(false); + return -1; +} + +static inline void +nbsr_debug_dump_rt_mlo(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_mlo_struct_t rt_mlo; + + rt_mlo.E2K_RT_MLO_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x [%08x:%08x]\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + (rt_mlo.E2K_RT_MLO_bgn << E2K_SIC_ALIGN_RT_MLO), + (rt_mlo.E2K_RT_MLO_end << E2K_SIC_ALIGN_RT_MLO) | + (E2K_SIC_SIZE_RT_MLO - 1)); +} + +static inline void +nbsr_debug_dump_rt_mhi(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_mhi_struct_t rt_mhi; + + rt_mhi.E2K_RT_MHI_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x [%016llx:%016llx]\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + ((u64)rt_mhi.E2K_RT_MHI_bgn << E2K_SIC_ALIGN_RT_MHI), + ((u64)rt_mhi.E2K_RT_MHI_end << E2K_SIC_ALIGN_RT_MHI) | + (E2K_SIC_SIZE_RT_MHI - 1)); +} + +static inline void +nbsr_debug_dump_rt_lcfg(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_lcfg_struct_t rt_lcfg; + int pn; + + E2K_RT_LCFG_reg(rt_lcfg) = reg_value; + pn = E8C_RT_LCFG_pln(rt_lcfg); + nbsr_debug("%s(): node #%d %s %s 0x%04x link to node #%d %s boot %s " + "IO link %s intercluster %s\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, pn, + (E2K_RT_LCFG_vp(rt_lcfg)) ? "ON" : "OFF", + (E2K_RT_LCFG_vb(rt_lcfg)) ? "ON" : "OFF", + (E2K_RT_LCFG_vio(rt_lcfg)) ? "ON" : "OFF", + (E2K_RT_LCFG_vics(rt_lcfg)) ? "ON" : "OFF"); +} + +static inline void +nbsr_debug_dump_rt_pcim(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_pcim_struct_t rt_pcim; + + rt_pcim.E2K_RT_PCIM_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x [%08x:%08x]\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + (rt_pcim.E2K_RT_PCIM_bgn << E2K_SIC_ALIGN_RT_PCIM), + (rt_pcim.E2K_RT_PCIM_end << E2K_SIC_ALIGN_RT_PCIM) | + (E2K_SIC_SIZE_RT_PCIM - 1)); +} + +static inline void +nbsr_debug_dump_rt_pciio(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_pciio_struct_t rt_pciio; + + rt_pciio.E2K_RT_PCIIO_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x [%08x:%08x]\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + (rt_pciio.E2K_RT_PCIIO_bgn << E2K_SIC_ALIGN_RT_PCIIO), + (rt_pciio.E2K_RT_PCIIO_end << E2K_SIC_ALIGN_RT_PCIIO) | + (E2K_SIC_SIZE_RT_PCIIO - 1)); +} + +static inline void +nbsr_debug_dump_rt_pcimp(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name, bool end) +{ + e2k_rt_pcimp_struct_t rt_pcimp; + + rt_pcimp.E2K_RT_PCIMP_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x %s : %08x\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + (end) ? "end " : "base", + (!end) ? (rt_pcimp.E2K_RT_PCIMP_bgn << E2K_SIC_ALIGN_RT_PCIMP) + : + (rt_pcimp.E2K_RT_PCIMP_end << E2K_SIC_ALIGN_RT_PCIMP) | + (E2K_SIC_SIZE_RT_PCIMP - 1)); +} + +static inline void +nbsr_debug_dump_rt_pcicfgb(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_pcicfgb_struct_t rt_pcicfgb; + + rt_pcicfgb.E2K_RT_PCICFGB_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x : %08x\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + rt_pcicfgb.E2K_RT_PCICFGB_bgn << E2K_SIC_ALIGN_RT_PCICFGB); +} + +static inline void +nbsr_debug_dump_rt_ioapic(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_ioapic_struct_t rt_ioapic; + u32 start, end; + + rt_ioapic.E2K_RT_IOAPIC_reg = reg_value; + start = (rt_ioapic.E2K_RT_IOAPIC_bgn << E2K_SIC_ALIGN_RT_IOAPIC) | + (IO_EPIC_DEFAULT_PHYS_BASE & + E2K_SIC_IOAPIC_FIX_ADDR_MASK); + end = start + (E2K_SIC_IOAPIC_SIZE- 1); + nbsr_debug("%s(): node #%d %s %s 0x%04x [%08x:%08x]\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, start, end); +} + +static inline void +nbsr_debug_dump_rt_msi(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_msi_struct_t rt_msi; + + rt_msi.E2K_RT_MSI_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x [%08x:%08x]\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + (rt_msi.E2K_RT_MSI_bgn << E2K_SIC_ALIGN_RT_MSI), + (rt_msi.E2K_RT_MSI_end << E2K_SIC_ALIGN_RT_MSI) | + (E2K_SIC_SIZE_RT_MSI - 1)); +} + +static inline void +nbsr_debug_dump_rt_msi_h(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + e2k_rt_msi_h_struct_t rt_msi_h; + + rt_msi_h.E2K_RT_MSI_H_reg = reg_value; + nbsr_debug("%s(): node #%d %s %s 0x%04x [%08x:%08x]\n", + __func__, node_id, (write) ? "write" : "read", + reg_name, reg_offset, + rt_msi_h.E2K_RT_MSI_H_bgn, rt_msi_h.E2K_RT_MSI_H_end); +} + +static inline void +nbsr_debug_dump_iommu(int node_id, unsigned int reg_offset, unsigned long val, + bool write, char *reg_name, bool dword) +{ + nbsr_debug("%s(): node #%d %svalue 0x%lx %s %s 0x%04x\n", + __func__, node_id, (dword) ? "64-bit " : "", val, + (write) ? "write to" : "read from", reg_name, reg_offset); +} + +static inline void +nbsr_debug_dump_pmc(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + nbsr_debug("%s(): node #%d %s %s 0x%04x\n", + __func__, node_id, (write) ? "write" : "read", reg_name, reg_offset); +} + +static inline void +nbsr_debug_dump_l3(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value, char *reg_name) +{ + nbsr_debug("%s(): node #%d %s %s 0x%04x\n", + __func__, node_id, (write) ? "write" : "read", reg_name, reg_offset); +} + +static inline void +nbsr_debug_dump_prepic(int node_id, unsigned int reg_offset, + unsigned int val, bool write, + char *reg_name) +{ + nbsr_debug("%s(): node #%d 32-bit value 0x%x %s %s 0x%04x\n", + __func__, node_id, val, (write) ? "write to" : "read from", + reg_name, reg_offset); +} + +static int node_nbsr_read_rt_mem(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + bool is_rt_mlo = false; + bool is_rt_mhi = false; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_mlo0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mlo0)]; + reg_name = "rt_mlo0"; + is_rt_mlo = true; + break; + case SIC_rt_mlo1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mlo1)]; + reg_name = "rt_mlo1"; + is_rt_mlo = true; + break; + case SIC_rt_mlo2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mlo2)]; + reg_name = "rt_mlo2"; + is_rt_mlo = true; + break; + case SIC_rt_mlo3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mlo3)]; + reg_name = "rt_mlo3"; + is_rt_mlo = true; + break; + case SIC_rt_mhi0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mhi0)]; + reg_name = "rt_mhi0"; + is_rt_mhi = true; + break; + case SIC_rt_mhi1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mhi1)]; + reg_name = "rt_mhi1"; + is_rt_mhi = true; + break; + case SIC_rt_mhi2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mhi2)]; + reg_name = "rt_mhi2"; + is_rt_mhi = true; + break; + case SIC_rt_mhi3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_mhi3)]; + reg_name = "rt_mhi3"; + is_rt_mhi = true; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + if (is_rt_mhi) { + nbsr_debug_dump_rt_mhi(node_id, reg_offset, false, *reg_val, + reg_name); + } else if (is_rt_mlo) { + nbsr_debug_dump_rt_mlo(node_id, reg_offset, false, *reg_val, + reg_name); + } else { + nbsr_debug("%s(): node #%d %s offset 0x%04x value 0x%x\n", + __func__, node_id, reg_name, reg_offset, *reg_val); + } + + return 0; +} + +static void node_nbsr_write_rt_mem(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + bool is_rt_mlo = false; + bool is_rt_mhi = false; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_mlo0: + node_nbsr->regs[offset_to_no(SIC_rt_mlo0)] = reg_value; + reg_name = "rt_mlo0"; + is_rt_mlo = true; + break; + case SIC_rt_mlo1: + node_nbsr->regs[offset_to_no(SIC_rt_mlo1)] = reg_value; + reg_name = "rt_mlo1"; + is_rt_mlo = true; + break; + case SIC_rt_mlo2: + node_nbsr->regs[offset_to_no(SIC_rt_mlo2)] = reg_value; + reg_name = "rt_mlo2"; + is_rt_mlo = true; + break; + case SIC_rt_mlo3: + node_nbsr->regs[offset_to_no(SIC_rt_mlo3)] = reg_value; + reg_name = "rt_mlo3"; + is_rt_mlo = true; + break; + case SIC_rt_mhi0: + node_nbsr->regs[offset_to_no(SIC_rt_mhi0)] = reg_value; + reg_name = "rt_mhi0"; + is_rt_mhi = true; + break; + case SIC_rt_mhi1: + node_nbsr->regs[offset_to_no(SIC_rt_mhi1)] = reg_value; + reg_name = "rt_mhi1"; + is_rt_mhi = true; + break; + case SIC_rt_mhi2: + node_nbsr->regs[offset_to_no(SIC_rt_mhi2)] = reg_value; + reg_name = "rt_mhi2"; + is_rt_mhi = true; + break; + case SIC_rt_mhi3: + node_nbsr->regs[offset_to_no(SIC_rt_mhi3)] = reg_value; + reg_name = "rt_mhi3"; + is_rt_mhi = true; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + if (is_rt_mhi) { + nbsr_debug_dump_rt_mhi(node_id, reg_offset, true, reg_value, + reg_name); + } else if (is_rt_mlo) { + nbsr_debug_dump_rt_mlo(node_id, reg_offset, true, reg_value, + reg_name); + } else { + nbsr_debug("%s(): node #%d %s offset 0x%04x value 0x%x\n", + __func__, node_id, reg_name, reg_offset, reg_value); + } +} + +static int node_nbsr_read_rt_lcfg(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_lcfg0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_lcfg0)]; + reg_name = "rt_lcfg0"; + break; + case SIC_rt_lcfg1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_lcfg1)]; + reg_name = "rt_lcfg1"; + break; + case SIC_rt_lcfg2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_lcfg2)]; + reg_name = "rt_lcfg2"; + break; + case SIC_rt_lcfg3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_lcfg3)]; + reg_name = "rt_lcfg3"; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_lcfg(node_id, reg_offset, false, *reg_val, + reg_name); + + return 0; +} + +static void node_nbsr_write_rt_lcfg(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_lcfg0: + node_nbsr->regs[offset_to_no(SIC_rt_lcfg0)] = reg_value; + reg_name = "rt_lcfg0"; + break; + case SIC_rt_lcfg1: + node_nbsr->regs[offset_to_no(SIC_rt_lcfg1)] = reg_value; + reg_name = "rt_lcfg1"; + break; + case SIC_rt_lcfg2: + node_nbsr->regs[offset_to_no(SIC_rt_lcfg2)] = reg_value; + reg_name = "rt_lcfg2"; + break; + case SIC_rt_lcfg3: + node_nbsr->regs[offset_to_no(SIC_rt_lcfg3)] = reg_value; + reg_name = "rt_lcfg3"; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_lcfg(node_id, reg_offset, true, reg_value, + reg_name); +} + +static int node_nbsr_read_rt_pcim(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pcim0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcim0)]; + reg_name = "rt_pcim0"; + break; + case SIC_rt_pcim1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcim1)]; + reg_name = "rt_pcim1"; + break; + case SIC_rt_pcim2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcim2)]; + reg_name = "rt_pcim2"; + break; + case SIC_rt_pcim3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcim3)]; + reg_name = "rt_pcim3"; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcim(node_id, reg_offset, false, *reg_val, + reg_name); + + return 0; +} + +static void node_nbsr_write_rt_pcim(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pcim0: + node_nbsr->regs[offset_to_no(SIC_rt_pcim0)] = reg_value; + reg_name = "rt_pcim0"; + break; + case SIC_rt_pcim1: + node_nbsr->regs[offset_to_no(SIC_rt_pcim1)] = reg_value; + reg_name = "rt_pcim1"; + break; + case SIC_rt_pcim2: + node_nbsr->regs[offset_to_no(SIC_rt_pcim2)] = reg_value; + reg_name = "rt_pcim2"; + break; + case SIC_rt_pcim3: + node_nbsr->regs[offset_to_no(SIC_rt_pcim3)] = reg_value; + reg_name = "rt_pcim3"; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcim(node_id, reg_offset, true, reg_value, + reg_name); +} + +static int node_nbsr_read_rt_pciio(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pciio0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pciio0)]; + reg_name = "rt_pciio0"; + break; + case SIC_rt_pciio1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pciio1)]; + reg_name = "rt_pciio1"; + break; + case SIC_rt_pciio2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pciio2)]; + reg_name = "rt_pciio2"; + break; + case SIC_rt_pciio3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pciio3)]; + reg_name = "rt_pciio3"; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pciio(node_id, reg_offset, false, *reg_val, + reg_name); + + return 0; +} + +static void node_nbsr_write_rt_pciio(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pciio0: + node_nbsr->regs[offset_to_no(SIC_rt_pciio0)] = reg_value; + reg_name = "rt_pciio0"; + break; + case SIC_rt_pciio1: + node_nbsr->regs[offset_to_no(SIC_rt_pciio1)] = reg_value; + reg_name = "rt_pciio1"; + break; + case SIC_rt_pciio2: + node_nbsr->regs[offset_to_no(SIC_rt_pciio2)] = reg_value; + reg_name = "rt_pciio2"; + break; + case SIC_rt_pciio3: + node_nbsr->regs[offset_to_no(SIC_rt_pciio3)] = reg_value; + reg_name = "rt_pciio3"; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pciio(node_id, reg_offset, true, reg_value, + reg_name); +} + +static int node_nbsr_read_rt_pcimp_b(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pcimp_b0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b0)]; + reg_name = "rt_pcimp_b0"; + break; + case SIC_rt_pcimp_b1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b1)]; + reg_name = "rt_pcimp_b1"; + break; + case SIC_rt_pcimp_b2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b2)]; + reg_name = "rt_pcimp_b2"; + break; + case SIC_rt_pcimp_b3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b3)]; + reg_name = "rt_pcimp_b3"; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcimp(node_id, reg_offset, false, *reg_val, + reg_name, false); + + return 0; +} + +static int node_nbsr_read_rt_pcimp_e(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pcimp_e0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e0)]; + reg_name = "rt_pcimp_e0"; + break; + case SIC_rt_pcimp_e1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e1)]; + reg_name = "rt_pcimp_e1"; + break; + case SIC_rt_pcimp_e2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e2)]; + reg_name = "rt_pcimp_e2"; + break; + case SIC_rt_pcimp_e3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e3)]; + reg_name = "rt_pcimp_e3"; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcimp(node_id, reg_offset, false, *reg_val, + reg_name, true); + + return 0; +} + +static int node_nbsr_read_rt_pcicfgb(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_pcicfgb)]; + reg_name = "rt_pcicfgb"; + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcicfgb(node_id, reg_offset, false, *reg_val, reg_name); + + return 0; +} + +static void node_nbsr_write_rt_pcimp_b(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pcimp_b0: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b0)] = reg_value; + reg_name = "rt_pcimp_b0"; + break; + case SIC_rt_pcimp_b1: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b1)] = reg_value; + reg_name = "rt_pcimp_b1"; + break; + case SIC_rt_pcimp_b2: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b2)] = reg_value; + reg_name = "rt_pcimp_b2"; + break; + case SIC_rt_pcimp_b3: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b3)] = reg_value; + reg_name = "rt_pcimp_b3"; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcimp(node_id, reg_offset, true, reg_value, + reg_name, false); +} + +static void node_nbsr_write_rt_pcimp_e(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_pcimp_e0: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e0)] = reg_value; + reg_name = "rt_pcimp_e0"; + break; + case SIC_rt_pcimp_e1: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e1)] = reg_value; + reg_name = "rt_pcimp_e1"; + break; + case SIC_rt_pcimp_e2: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e2)] = reg_value; + reg_name = "rt_pcimp_e2"; + break; + case SIC_rt_pcimp_e3: + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e3)] = reg_value; + reg_name = "rt_pcimp_e3"; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcimp(node_id, reg_offset, true, reg_value, + reg_name, true); +} + +static void node_nbsr_write_rt_pcicfgb(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + node_nbsr->regs[offset_to_no(SIC_rt_pcicfgb)] = reg_value; + reg_name = "rt_pcicfgb"; + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_pcicfgb(node_id, reg_offset, true, reg_value, reg_name); +} + +static int node_nbsr_read_rt_ioapic(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_ioapic0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_ioapic0)]; + reg_name = "rt_ioapic0"; + break; + case SIC_rt_ioapic1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_ioapic1)]; + reg_name = "rt_ioapic1"; + break; + case SIC_rt_ioapic2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_ioapic2)]; + reg_name = "rt_ioapic2"; + break; + case SIC_rt_ioapic3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_ioapic3)]; + reg_name = "rt_ioapic3"; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_ioapic(node_id, reg_offset, false, *reg_val, + reg_name); + + return 0; +} + +static void node_nbsr_write_rt_ioapic(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_ioapic0: + node_nbsr->regs[offset_to_no(SIC_rt_ioapic0)] = reg_value; + reg_name = "rt_ioapic0"; + break; + case SIC_rt_ioapic1: + node_nbsr->regs[offset_to_no(SIC_rt_ioapic1)] = reg_value; + reg_name = "rt_ioapic1"; + break; + case SIC_rt_ioapic2: + node_nbsr->regs[offset_to_no(SIC_rt_ioapic2)] = reg_value; + reg_name = "rt_ioapic2"; + break; + case SIC_rt_ioapic3: + node_nbsr->regs[offset_to_no(SIC_rt_ioapic3)] = reg_value; + reg_name = "rt_ioapic3"; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_rt_ioapic(node_id, reg_offset, true, reg_value, + reg_name); +} + +static int node_nbsr_read_rt_msi(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_msi: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_msi)]; + break; + case SIC_rt_msi_h: + *reg_val = node_nbsr->regs[offset_to_no(SIC_rt_msi_h)]; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + } + mutex_unlock(&nbsr->lock); + + switch (reg_offset) { + case SIC_rt_msi: + nbsr_debug_dump_rt_msi(node_id, reg_offset, false, *reg_val, + "rt_msi"); + break; + case SIC_rt_msi_h: + nbsr_debug_dump_rt_msi_h(node_id, reg_offset, false, *reg_val, + "rt_msi_h"); + break; + } + + return 0; +} + +static int node_nbsr_read_pmc(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + *reg_val = node_nbsr->regs[offset_to_no(reg_offset)]; + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_pmc(node_id, reg_offset, false, *reg_val, "pmc_sleep"); + + return 0; +} + +static int node_nbsr_read_l3(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_l3_ctrl: + *reg_val = node_nbsr->regs[offset_to_no(SIC_l3_ctrl)]; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + } + mutex_unlock(&nbsr->lock); + + switch (reg_offset) { + case SIC_l3_ctrl: + nbsr_debug_dump_l3(node_id, reg_offset, false, *reg_val, "l3_ctrl"); + break; + } + + return 0; +} + +static int node_nbsr_readll_iommu(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u64 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name = "???"; + int ret = 0; + u64 reg_lo, reg_hi; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_iommu_ba_lo: + reg_lo = node_nbsr->regs[offset_to_no(SIC_iommu_ba_lo)]; + reg_hi = node_nbsr->regs[offset_to_no(SIC_iommu_ba_hi)]; + *reg_val = reg_lo | (reg_hi << 32); + reg_name = "iommu_ba"; + break; + case SIC_iommu_dtba_lo: + reg_lo = node_nbsr->regs[offset_to_no(SIC_iommu_dtba_lo)]; + reg_hi = node_nbsr->regs[offset_to_no(SIC_iommu_dtba_hi)]; + *reg_val = reg_lo | (reg_hi << 32); + reg_name = "iommu_dtba"; + break; + /* SIC_iommu_err is emulated only in qemu */ + case SIC_iommu_err: + ret = -EOPNOTSUPP; + reg_name = "iommu_err"; + break; + /* SIC_iommu_err_info_hi is emulated only in qemu */ + case SIC_iommu_err_info_lo: + ret = -EOPNOTSUPP; + reg_name = "iommu_err_info"; + break; + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not" + "yet supported, so return 0x%llx\n", + __func__, node_id, reg_offset, *reg_val); + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_prepic(node_id, reg_offset, false, *reg_val, + reg_name); + + return ret; +} + +static int node_nbsr_read_prepic(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + int ret = 0; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_prepic_ctrl2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_prepic_ctrl2)]; + reg_name = "prepic_ctrl2"; + break; + /* Registers SIC_prepic_err_stat is emulated only in qemu */ + case SIC_prepic_err_stat: + ret = -EOPNOTSUPP; + reg_name = "prepic_err_stat"; + break; + /* Registers SIC_prepic_err_int is emulated only in qemu */ + case SIC_prepic_err_int: + ret = -EOPNOTSUPP; + reg_name = "prepic_err_int"; + break; + case SIC_prepic_linp0: + *reg_val = node_nbsr->regs[offset_to_no(SIC_prepic_linp0)]; + reg_name = "prepic_linp0"; + break; + case SIC_prepic_linp1: + *reg_val = node_nbsr->regs[offset_to_no(SIC_prepic_linp1)]; + reg_name = "prepic_linp1"; + break; + case SIC_prepic_linp2: + *reg_val = node_nbsr->regs[offset_to_no(SIC_prepic_linp2)]; + reg_name = "prepic_linp2"; + break; + case SIC_prepic_linp3: + *reg_val = node_nbsr->regs[offset_to_no(SIC_prepic_linp3)]; + reg_name = "prepic_linp3"; + break; + case SIC_prepic_linp4: + *reg_val = node_nbsr->regs[offset_to_no(SIC_prepic_linp4)]; + reg_name = "prepic_linp4"; + break; + case SIC_prepic_linp5: + *reg_val = node_nbsr->regs[offset_to_no(SIC_prepic_linp5)]; + reg_name = "prepic_linp5"; + break; + default: + *reg_val = -1; + reg_name = "???"; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_prepic(node_id, reg_offset, false, *reg_val, + reg_name); + + return ret; +} + +static void node_nbsr_write_rt_msi(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_rt_msi: + node_nbsr->regs[offset_to_no(SIC_rt_msi)] = reg_value; + break; + case SIC_rt_msi_h: + node_nbsr->regs[offset_to_no(SIC_rt_msi_h)] = reg_value; + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + break; + } + mutex_unlock(&nbsr->lock); + + switch (reg_offset) { + case SIC_rt_msi: + nbsr_debug_dump_rt_msi(node_id, reg_offset, true, reg_value, + "rt_msi"); + break; + case SIC_rt_msi_h: + nbsr_debug_dump_rt_msi_h(node_id, reg_offset, true, reg_value, + "rt_msi_h"); + break; + } +} + +static void kvm_write_iommu_ctrl(struct kvm_nbsr *nbsr, u32 reg_value) +{ + e2k_iommu_guest_write_ctrl(reg_value); +} + +/* FIXME This only works with single passthrough device */ +static void kvm_write_iommu_ba_lo(struct kvm_nbsr *nbsr, u64 reg_value) +{ + struct irq_remap_table *irt = nbsr->kvm->arch.irt; + + if (irt->vfio_dev) + e2k_iommu_setup_guest_2d_dte(nbsr->kvm, reg_value); +} + +static void kvm_write_iommu_flush(struct kvm_nbsr *nbsr, u64 reg_value) +{ + struct irq_remap_table *irt = nbsr->kvm->arch.irt; + + if (irt->vfio_dev) + e2k_iommu_flush_guest(nbsr->kvm, reg_value); +} + +static int node_nbsr_write_iommu(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + int ret = 0; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_iommu_ctrl: + node_nbsr->regs[offset_to_no(SIC_iommu_ctrl)] = reg_value; + reg_name = "iommu_ctrl"; + kvm_write_iommu_ctrl(nbsr, reg_value); + ret = -EOPNOTSUPP; + break; + default: + pr_err("%s(): node #%d IOMMU reg with offset 0x%04x does not " + "support 32-bit writes, so ignore it\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_iommu(node_id, reg_offset, reg_value, true, reg_name, + false); + + return ret; +} + +static u32 kvm_write_pmc_sleep(u32 reg_value) +{ + freq_core_sleep_t fr_state; + + AW(fr_state) = reg_value; + fr_state.status = 0; /* Stay in C0 state */ + + return AW(fr_state); +} + +static int node_nbsr_write_pmc(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + int ret = 0; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + node_nbsr->regs[offset_to_no(reg_offset)] = kvm_write_pmc_sleep(reg_value); + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_pmc(node_id, reg_offset, reg_value, true, "pmc_sleep"); + + return ret; +} + +static u32 kvm_write_l3_ctrl(struct kvm_nbsr *nbsr, u32 reg_value) +{ + l3_ctrl_t l3_ctrl; + + AW(l3_ctrl) = reg_value; + l3_ctrl.E2K_L3_CTRL_fl = 0; /* No need for L3 flush */ + + return AW(l3_ctrl); +} + +static int node_nbsr_write_l3(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + int ret = 0; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + switch (reg_offset) { + case SIC_l3_ctrl: + node_nbsr->regs[offset_to_no(SIC_l3_ctrl)] = kvm_write_l3_ctrl(nbsr, reg_value); + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + break; + } + mutex_unlock(&nbsr->lock); + + switch (reg_offset) { + case SIC_l3_ctrl: + nbsr_debug_dump_l3(node_id, reg_offset, reg_value, true, "l3_ctrl"); + break; + } + + return ret; +} + +static int node_nbsr_write_prepic(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, + u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + int ret = 0; + + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + /* + * TODO: Additional actions when writing to + * ctrl2, err_start, err_int ? + */ + switch (reg_offset) { + case SIC_prepic_ctrl2: + ret = -EOPNOTSUPP; + node_nbsr->regs[offset_to_no(SIC_prepic_ctrl2)] = reg_value; + reg_name = "prepic_ctrl2"; + break; + /* SIC_prepic_err_stat is emulated only in qemu */ + case SIC_prepic_err_stat: + ret = -EOPNOTSUPP; + reg_name = "prepic_err_stat"; + break; + /* SIC_prepic_err_int is emulated only in qemu */ + case SIC_prepic_err_int: + ret = -EOPNOTSUPP; + reg_name = "prepic_err_int"; + break; + case SIC_prepic_linp0: + ret = -EOPNOTSUPP; + node_nbsr->regs[offset_to_no(SIC_prepic_linp0)] = reg_value; + reg_name = "prepic_linp0"; + break; + case SIC_prepic_linp1: + ret = -EOPNOTSUPP; + node_nbsr->regs[offset_to_no(SIC_prepic_linp1)] = reg_value; + reg_name = "prepic_linp1"; + break; + case SIC_prepic_linp2: + ret = -EOPNOTSUPP; + node_nbsr->regs[offset_to_no(SIC_prepic_linp2)] = reg_value; + reg_name = "prepic_linp2"; + break; + case SIC_prepic_linp3: + ret = -EOPNOTSUPP; + node_nbsr->regs[offset_to_no(SIC_prepic_linp3)] = reg_value; + reg_name = "prepic_linp3"; + break; + case SIC_prepic_linp4: + ret = -EOPNOTSUPP; + node_nbsr->regs[offset_to_no(SIC_prepic_linp4)] = reg_value; + reg_name = "prepic_linp4"; + break; + case SIC_prepic_linp5: + ret = -EOPNOTSUPP; + node_nbsr->regs[offset_to_no(SIC_prepic_linp5)] = reg_value; + reg_name = "prepic_linp5"; + break; + default: + pr_err("%s(): node #%d prepic reg with offset 0x%04x " + "doesn't support 32-bit writes, so ignore it\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_prepic(node_id, reg_offset, reg_value, true, + reg_name); + + return ret; +} + +static int node_nbsr_writell_iommu(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u64 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + char *reg_name; + int ret = 0; + u32 reg_hi, reg_lo; + + reg_lo = reg_value & 0xffffffff; + reg_hi = reg_value >> 32; + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + /* Only *_lo halves 64-bit accesses are supported */ + switch (reg_offset) { + case SIC_iommu_ba_lo: + node_nbsr->regs[offset_to_no(SIC_iommu_ba_lo)] = reg_lo; + node_nbsr->regs[offset_to_no(SIC_iommu_ba_hi)] = reg_hi; + reg_name = "iommu_ba_lo"; + kvm_write_iommu_ba_lo(nbsr, reg_value); + ret = -EOPNOTSUPP; + break; + case SIC_iommu_dtba_lo: + node_nbsr->regs[offset_to_no(SIC_iommu_dtba_lo)] = reg_lo; + node_nbsr->regs[offset_to_no(SIC_iommu_dtba_hi)] = reg_hi; + reg_name = "iommu_dtba_lo"; + ret = -EOPNOTSUPP; + break; + case SIC_iommu_flush: + node_nbsr->regs[offset_to_no(SIC_iommu_flush)] = reg_lo; + node_nbsr->regs[offset_to_no(SIC_iommu_flushP)] = reg_hi; + reg_name = "iommu_flush"; + kvm_write_iommu_flush(nbsr, reg_value); + break; + /* SIC_iommu_err is emulated only in qemu */ + case SIC_iommu_err: + reg_name = "iommu_err"; + ret = -EOPNOTSUPP; + break; + /* SIC_iommu_err_info is emulated only in qemu */ + case SIC_iommu_err_info_lo: + reg_name = "iommu_err_info_lo"; + ret = -EOPNOTSUPP; + break; + default: + pr_err("%s(): node #%d IOMMU reg with offset 0x%04x does not " + "support 64-bit writes, so ignore it\n", + __func__, node_id, reg_offset); + reg_name = "???"; + break; + } + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_iommu(node_id, reg_offset, reg_value, true, reg_name, + true); + + return ret; +} + +static int node_nbsr_sic_read(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + ASSERT(reg_offset < MAX_SUPPORTED_NODE_NBSR_OFFSET); + + switch (reg_offset) { + case SIC_rt_mlo0: + case SIC_rt_mlo1: + case SIC_rt_mlo2: + case SIC_rt_mlo3: + case SIC_rt_mhi0: + case SIC_rt_mhi1: + case SIC_rt_mhi2: + case SIC_rt_mhi3: + return node_nbsr_read_rt_mem(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_lcfg0: + case SIC_rt_lcfg1: + case SIC_rt_lcfg2: + case SIC_rt_lcfg3: + return node_nbsr_read_rt_lcfg(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_pcim0: + case SIC_rt_pcim1: + case SIC_rt_pcim2: + case SIC_rt_pcim3: + return node_nbsr_read_rt_pcim(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_pciio0: + case SIC_rt_pciio1: + case SIC_rt_pciio2: + case SIC_rt_pciio3: + return node_nbsr_read_rt_pciio(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_pcimp_b0: + case SIC_rt_pcimp_b1: + case SIC_rt_pcimp_b2: + case SIC_rt_pcimp_b3: + return node_nbsr_read_rt_pcimp_b(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_pcimp_e0: + case SIC_rt_pcimp_e1: + case SIC_rt_pcimp_e2: + case SIC_rt_pcimp_e3: + return node_nbsr_read_rt_pcimp_e(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_pcicfgb: + return node_nbsr_read_rt_pcicfgb(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_ioapic0: + case SIC_rt_ioapic1: + case SIC_rt_ioapic2: + case SIC_rt_ioapic3: + return node_nbsr_read_rt_ioapic(nbsr, node_id, + reg_offset, reg_val); + case SIC_rt_msi: + case SIC_rt_msi_h: + return node_nbsr_read_rt_msi(nbsr, node_id, + reg_offset, reg_val); + case PMC_FREQ_CORE_N_SLEEP(0): + case PMC_FREQ_CORE_N_SLEEP(1): + case PMC_FREQ_CORE_N_SLEEP(2): + case PMC_FREQ_CORE_N_SLEEP(3): + case PMC_FREQ_CORE_N_SLEEP(4): + case PMC_FREQ_CORE_N_SLEEP(5): + case PMC_FREQ_CORE_N_SLEEP(6): + case PMC_FREQ_CORE_N_SLEEP(7): + case PMC_FREQ_CORE_N_SLEEP(8): + case PMC_FREQ_CORE_N_SLEEP(9): + case PMC_FREQ_CORE_N_SLEEP(10): + case PMC_FREQ_CORE_N_SLEEP(11): + case PMC_FREQ_CORE_N_SLEEP(12): + case PMC_FREQ_CORE_N_SLEEP(13): + case PMC_FREQ_CORE_N_SLEEP(14): + case PMC_FREQ_CORE_N_SLEEP(15): + return node_nbsr_read_pmc(nbsr, node_id, reg_offset, reg_val); + case SIC_l3_ctrl: + return node_nbsr_read_l3(nbsr, node_id, reg_offset, reg_val); + case SIC_prepic_ctrl2: + case SIC_prepic_err_stat: + case SIC_prepic_err_int: + case SIC_prepic_linp0: + case SIC_prepic_linp1: + case SIC_prepic_linp2: + case SIC_prepic_linp3: + case SIC_prepic_linp4: + case SIC_prepic_linp5: + return node_nbsr_read_prepic(nbsr, node_id, + reg_offset, reg_val); + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + break; + } + + return 0; +} + +static int node_nbsr_sic_readll(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u64 *reg_val) +{ + ASSERT(reg_offset < MAX_SUPPORTED_NODE_NBSR_OFFSET); + + switch (reg_offset) { + case SIC_iommu_ba_lo: + case SIC_iommu_dtba_lo: + case SIC_iommu_err: + case SIC_iommu_err_info_lo: + return node_nbsr_readll_iommu(nbsr, node_id, + reg_offset, reg_val); + default: + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%llx\n", + __func__, node_id, reg_offset, *reg_val); + break; + } + + return 0; +} + + +static inline void +nbsr_debug_dump_bc_reg(int node_id, unsigned int reg_offset, bool write, + unsigned int reg_value) +{ + nbsr_debug("%s(): node #%d %s BC memory protection register %04x " + "value %08x\n", + __func__, node_id, (write) ? "write" : "read ", + reg_offset, reg_value); +} + +static int mpdma_fixup_page_prot(u64 hva, u32 value) +{ + struct vm_area_struct *vma, *prev; + struct mm_struct *mm = current->mm; + unsigned long vm_flags; + int err = 0; + + down_write(&mm->mmap_sem); + + vma = find_vma_prev(mm, hva, &prev); + if (!vma || vma->vm_start > hva) { + up_write(&mm->mmap_sem); + return -EINVAL; + } + if (hva > vma->vm_start) + prev = vma; + + if (value) { + nbsr_warn("%s(): page hva 0x%llx isn't protected\n", + __func__, hva); + vm_flags = (vma->vm_flags & ~VM_MPDMA) | VM_WRITE; + } else { + nbsr_warn("%s(): page hva 0x%llx is already protected\n", + __func__, hva); + vm_flags = (vma->vm_flags & ~VM_WRITE) | VM_MPDMA; + } + + err = mprotect_fixup(vma, &prev, hva, hva + PAGE_SIZE, vm_flags); + + up_write(&mm->mmap_sem); + + return err; +} + +static void node_nbsr_write_bc_mp_t_corr(struct kvm_vcpu *vcpu, + struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_no, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr = &nbsr->nodes[node_id]; + bc_mp_t_corr_struct_t reg; + bc_mp_t_corr_h_struct_t hreg; + u64 gpa, hva; + u32 value; + + AW(reg) = reg_value; + + if (!reg.E2K_MP_T_CORR_corr) + return; + + AW(hreg) = node_nbsr->bc_regs[reg_no + 1]; + + value = reg.E2K_MP_T_CORR_value; + gpa = BC_MP_T_CORR_ADDR(hreg, reg); + hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); + + nbsr_debug("%s(): node #%d perform correction for gpa 0x%llx hva 0x%llx " + "to value %d\n", + __func__, node_id, gpa, hva, value); + + BUG_ON(mpdma_fixup_page_prot(hva, value)); + + reg.E2K_MP_T_CORR_corr = 0; + node_nbsr->bc_regs[reg_no] = AW(reg); +} + +static void node_nbsr_write_bc_mp_stat(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_no, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr = &nbsr->nodes[node_id]; + bc_mp_stat_struct_t reg; + + AW(reg) = reg_value; + + if (reg.E2K_MP_STAT_b_ne) + reg.E2K_MP_STAT_b_ne = 0; + + if (reg.E2K_MP_STAT_b_of) + reg.E2K_MP_STAT_b_of = 0; + + node_nbsr->bc_regs[reg_no] = AW(reg); + + nbsr_debug("%s(): node #%d BC_MP_STAT register changed to value 0x%x\n", + __func__, node_id, AW(reg)); +} + +static void node_nbsr_bc_write(struct kvm_vcpu *vcpu, struct kvm_nbsr *nbsr, + int node_id, unsigned int reg_offset, u32 reg_value) +{ + kvm_nbsr_regs_t *node_nbsr; + unsigned int reg_no; + + nbsr_debug_dump_bc_reg(node_id, reg_offset, true, reg_value); + + BUG_ON(!nbsr_bc_reg_in_range(reg_offset)); + BUG_ON(!vcpu); + + reg_no = nbsr_bc_reg_offset_to_no(reg_offset); + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + + switch (reg_offset) { + case BC_MP_T_CORR: + node_nbsr_write_bc_mp_t_corr( + vcpu, nbsr, node_id, reg_no, reg_value); + break; + case BC_MP_STAT: + node_nbsr_write_bc_mp_stat(nbsr, node_id, reg_no, reg_value); + break; + default: + node_nbsr->bc_regs[reg_no] = reg_value; + break; + } + + mutex_unlock(&nbsr->lock); +} + +static int node_nbsr_bc_read(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + kvm_nbsr_regs_t *node_nbsr; + unsigned int reg_no; + + BUG_ON(!nbsr_bc_reg_in_range(reg_offset)); + + reg_no = nbsr_bc_reg_offset_to_no(reg_offset); + node_nbsr = &nbsr->nodes[node_id]; + + mutex_lock(&nbsr->lock); + *reg_val = node_nbsr->bc_regs[reg_no]; + mutex_unlock(&nbsr->lock); + + nbsr_debug_dump_bc_reg(node_id, reg_offset, false, *reg_val); + + return 0; +} + +static int node_nbsr_read(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 *reg_val) +{ + if (!nbsr_is_node_online(nbsr, node_id)) { + *reg_val = -1; + pr_err("%s(): node #%d is not online, so return 0x%x " + "for reg offset 0x%04x\n", + __func__, node_id, *reg_val, reg_offset); + return 0; + } + + if (nbsr_bc_reg_in_range(reg_offset)) { + return node_nbsr_bc_read(nbsr, node_id, reg_offset, reg_val); + } else if (reg_offset < MAX_SUPPORTED_NODE_NBSR_OFFSET) { + return node_nbsr_sic_read(nbsr, node_id, reg_offset, reg_val); + } else { + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so return 0x%x\n", + __func__, node_id, reg_offset, *reg_val); + } + + return 0; +} + +static int node_nbsr_readll(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u64 *reg_val) +{ + if (!nbsr_is_node_online(nbsr, node_id)) { + *reg_val = -1; + pr_err("%s(): node #%d is not online, so return 0x%llx " + "for reg offset 0x%04x\n", + __func__, node_id, *reg_val, reg_offset); + return 0; + } + + if (reg_offset < MAX_SUPPORTED_NODE_NBSR_OFFSET) { + return node_nbsr_sic_readll(nbsr, node_id, + reg_offset, reg_val); + } else { + *reg_val = -1; + pr_err("%s(): node #%d NBSR reg with offset 0x%04x does not " + "support 64-bit reads, so return 0x%llx\n", + __func__, node_id, reg_offset, *reg_val); + } + + return 0; +} + +static int node_nbsr_sic_write(struct kvm_nbsr *nbsr, int node_id, + unsigned int reg_offset, u32 reg_value) +{ + int ret = 0; + + ASSERT(reg_offset < MAX_SUPPORTED_NODE_NBSR_OFFSET); + + switch (reg_offset) { + case SIC_rt_mlo0: + case SIC_rt_mlo1: + case SIC_rt_mlo2: + case SIC_rt_mlo3: + case SIC_rt_mhi0: + case SIC_rt_mhi1: + case SIC_rt_mhi2: + case SIC_rt_mhi3: + node_nbsr_write_rt_mem(nbsr, node_id, reg_offset, reg_value); + break; + case SIC_rt_lcfg0: + case SIC_rt_lcfg1: + case SIC_rt_lcfg2: + case SIC_rt_lcfg3: + node_nbsr_write_rt_lcfg(nbsr, node_id, reg_offset, reg_value); + break; + case SIC_rt_pcim0: + case SIC_rt_pcim1: + case SIC_rt_pcim2: + case SIC_rt_pcim3: + node_nbsr_write_rt_pcim(nbsr, node_id, reg_offset, reg_value); + ret = -EOPNOTSUPP; + break; + case SIC_rt_pciio0: + case SIC_rt_pciio1: + case SIC_rt_pciio2: + case SIC_rt_pciio3: + node_nbsr_write_rt_pciio(nbsr, node_id, reg_offset, reg_value); + ret = -EOPNOTSUPP; + break; + case SIC_rt_pcimp_b0: + case SIC_rt_pcimp_b1: + case SIC_rt_pcimp_b2: + case SIC_rt_pcimp_b3: + node_nbsr_write_rt_pcimp_b(nbsr, node_id, reg_offset, + reg_value); + ret = -EOPNOTSUPP; + break; + case SIC_rt_pcimp_e0: + case SIC_rt_pcimp_e1: + case SIC_rt_pcimp_e2: + case SIC_rt_pcimp_e3: + node_nbsr_write_rt_pcimp_e(nbsr, node_id, reg_offset, + reg_value); + ret = -EOPNOTSUPP; + break; + case SIC_rt_pcicfgb: + node_nbsr_write_rt_pcicfgb(nbsr, node_id, reg_offset, + reg_value); + ret = -EOPNOTSUPP; + break; + case SIC_rt_ioapic0: + case SIC_rt_ioapic1: + case SIC_rt_ioapic2: + case SIC_rt_ioapic3: + node_nbsr_write_rt_ioapic(nbsr, node_id, reg_offset, reg_value); + break; + case SIC_rt_msi: + case SIC_rt_msi_h: + node_nbsr_write_rt_msi(nbsr, node_id, reg_offset, reg_value); + ret = -EOPNOTSUPP; + break; + case SIC_iommu_ctrl: + case SIC_iommu_ba_lo: + case SIC_iommu_ba_hi: + case SIC_iommu_dtba_lo: + case SIC_iommu_dtba_hi: + case SIC_iommu_flush: + case SIC_iommu_flushP: + case SIC_iommu_err: + case SIC_iommu_err1: + case SIC_iommu_err_info_lo: + case SIC_iommu_err_info_hi: + ret = node_nbsr_write_iommu(nbsr, node_id, reg_offset, + reg_value); + break; + case PMC_FREQ_CORE_N_SLEEP(0): + case PMC_FREQ_CORE_N_SLEEP(1): + case PMC_FREQ_CORE_N_SLEEP(2): + case PMC_FREQ_CORE_N_SLEEP(3): + case PMC_FREQ_CORE_N_SLEEP(4): + case PMC_FREQ_CORE_N_SLEEP(5): + case PMC_FREQ_CORE_N_SLEEP(6): + case PMC_FREQ_CORE_N_SLEEP(7): + case PMC_FREQ_CORE_N_SLEEP(8): + case PMC_FREQ_CORE_N_SLEEP(9): + case PMC_FREQ_CORE_N_SLEEP(10): + case PMC_FREQ_CORE_N_SLEEP(11): + case PMC_FREQ_CORE_N_SLEEP(12): + case PMC_FREQ_CORE_N_SLEEP(13): + case PMC_FREQ_CORE_N_SLEEP(14): + case PMC_FREQ_CORE_N_SLEEP(15): + ret = node_nbsr_write_pmc(nbsr, node_id, reg_offset, reg_value); + break; + case SIC_l3_ctrl: + ret = node_nbsr_write_l3(nbsr, node_id, reg_offset, reg_value); + break; + case SIC_prepic_ctrl2: + case SIC_prepic_err_stat: + case SIC_prepic_err_int: + case SIC_prepic_linp0: + case SIC_prepic_linp1: + case SIC_prepic_linp2: + case SIC_prepic_linp3: + case SIC_prepic_linp4: + case SIC_prepic_linp5: + ret = node_nbsr_write_prepic(nbsr, node_id, reg_offset, + reg_value); + break; + default: + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + break; + } + + return ret; +} + +static int node_nbsr_write(struct kvm_vcpu *vcpu, struct kvm_nbsr *nbsr, + int node_id, unsigned int reg_offset, u32 reg_value) +{ + int ret = 0; + + if (!nbsr_is_node_online(nbsr, node_id)) { + pr_err("%s(): node #%d is not online, so ignore write to " + "reg with offset 0x%04x\n", + __func__, node_id, reg_offset); + return ret; + } + + if (nbsr_bc_reg_in_range(reg_offset)) { + node_nbsr_bc_write(vcpu, nbsr, node_id, reg_offset, reg_value); + } else if (reg_offset < MAX_SUPPORTED_NODE_NBSR_OFFSET) { + ret = node_nbsr_sic_write(nbsr, node_id, reg_offset, reg_value); + } else { + pr_err("%s(): node #%d NBSR reg with offset 0x%04x is not yet " + "supported, so ignore write\n", + __func__, node_id, reg_offset); + } + + return ret; +} + +static int node_nbsr_writell(struct kvm_vcpu *vcpu, struct kvm_nbsr *nbsr, + int node_id, unsigned int reg_offset, u64 reg_value) +{ + int ret = 0; + + if (!nbsr_is_node_online(nbsr, node_id)) { + pr_err("%s(): node #%d is not online, so ignore write to " + "reg with offset 0x%04x\n", + __func__, node_id, reg_offset); + return ret; + } + + /* Fail silently for writes to IOMMU for embedded devices */ + if (reg_offset >= SIC_iommu_ctrl && + reg_offset < SIC_iommu_err_info_hi) { + ret = node_nbsr_writell_iommu( + nbsr, node_id, reg_offset, reg_value); + } else if (!(reg_offset >= SIC_edbc_iommu_ctrl && + reg_offset < SIC_edbc_iommu_err_info_hi)) { + pr_err("%s(): node #%d NBSR reg with offset 0x%04x does not " + "support 64-bit writes, so ignore it\n", + __func__, node_id, reg_offset); + } + + return ret; +} + +static void nbsr_setup_lo_mem_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size) +{ + unsigned int reg_off; + unsigned int reg_value; + e2k_rt_mlo_struct_t rt_mlo; + gpa_t start, end; + int node, link; + + ASSERT(base < NBSR_LOW_MEMORY_BOUND && + base + size <= NBSR_LOW_MEMORY_BOUND); + + start = ALIGN_DOWN_TO_SIZE(base, E2K_SIC_SIZE_RT_MLO); + end = ALIGN_UP_TO_SIZE(base + size, E2K_SIC_SIZE_RT_MLO) - 1; + rt_mlo.E2K_RT_MLO_reg = 0; + rt_mlo.E2K_RT_MLO_bgn = start >> E2K_SIC_ALIGN_RT_MLO; + rt_mlo.E2K_RT_MLO_end = end >> E2K_SIC_ALIGN_RT_MLO; + reg_value = rt_mlo.E2K_RT_MLO_reg; + + node_nbsr_write(NULL, nbsr, node_id, SIC_rt_mlo0, reg_value); + + /* it need setup all routers on all nodes */ + for (node = 0; node < MAX_NUMNODES; node++) { + if (node == node_id) + continue; + if (!nbsr_is_node_online(nbsr, node)) + continue; + link = nbsr_get_node_to_node_link(node, node_id); + reg_off = nbsr_get_rt_mlo_offset(link); + node_nbsr_write(NULL, nbsr, node, reg_off, reg_value); + } +} + +static void nbsr_setup_hi_mem_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size) +{ + unsigned int reg_off; + u32 reg_value; + e2k_rt_mhi_struct_t rt_mhi; + gpa_t start, end; + int node, link; + + ASSERT(base >= NBSR_LOW_MEMORY_BOUND && + base + size > NBSR_LOW_MEMORY_BOUND); + + start = ALIGN_DOWN_TO_SIZE(base, E2K_SIC_SIZE_RT_MHI); + end = ALIGN_UP_TO_SIZE(base + size, E2K_SIC_SIZE_RT_MHI) - 1; + rt_mhi.E2K_RT_MHI_reg = 0; + rt_mhi.E2K_RT_MHI_bgn = start >> E2K_SIC_ALIGN_RT_MHI; + rt_mhi.E2K_RT_MHI_end = end >> E2K_SIC_ALIGN_RT_MHI; + reg_value = rt_mhi.E2K_RT_MHI_reg; + + node_nbsr_write(NULL, nbsr, node_id, SIC_rt_mhi0, reg_value); + + /* it need setup all routers on all nodes */ + for (node = 0; node < MAX_NUMNODES; node++) { + if (node == node_id) + continue; + if (!nbsr_is_node_online(nbsr, node)) + continue; + link = nbsr_get_node_to_node_link(node, node_id); + reg_off = nbsr_get_rt_mhi_offset(link); + node_nbsr_write(NULL, nbsr, node, reg_off, reg_value); + } +} + +int nbsr_setup_memory_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size) +{ + if (base < NBSR_LOW_MEMORY_BOUND) { + ASSERT(base + size <= NBSR_LOW_MEMORY_BOUND); + nbsr_setup_lo_mem_region(nbsr, node_id, base, size); + } else { + ASSERT(base + size > NBSR_LOW_MEMORY_BOUND); + nbsr_setup_hi_mem_region(nbsr, node_id, base, size); + } + return 0; +} + +int nbsr_setup_mmio_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size) +{ + unsigned int reg_off; + unsigned int reg_value; + e2k_rt_pcim_struct_t rt_pcim; + gpa_t start, end; + int node, link; + + ASSERT(base < NBSR_LOW_MEMORY_BOUND && + base + size <= NBSR_LOW_MEMORY_BOUND); + + start = ALIGN_DOWN_TO_SIZE(base, E2K_SIC_SIZE_RT_PCIM); + end = ALIGN_UP_TO_SIZE(base + size, E2K_SIC_SIZE_RT_PCIM) - 1; + rt_pcim.E2K_RT_PCIM_reg = 0; + rt_pcim.E2K_RT_PCIM_bgn = start >> E2K_SIC_ALIGN_RT_PCIM; + rt_pcim.E2K_RT_PCIM_end = end >> E2K_SIC_ALIGN_RT_PCIM; + reg_value = rt_pcim.E2K_RT_PCIM_reg; + + node_nbsr_write(NULL, nbsr, node_id, SIC_rt_pcim0, reg_value); + + /* it need setup all routers on all nodes */ + for (node = 0; node < MAX_NUMNODES; node++) { + if (node == node_id) + continue; + if (!nbsr_is_node_online(nbsr, node)) + continue; + link = nbsr_get_node_to_node_link(node, node_id); + reg_off = nbsr_get_rt_pcim_offset(link); + node_nbsr_write(NULL, nbsr, node, reg_off, reg_value); + } + return 0; +} + +int nbsr_setup_io_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size) +{ + unsigned int reg_off; + unsigned int reg_value; + e2k_rt_pciio_struct_t rt_pciio; + gpa_t start, end; + int node, link; + + ASSERT(base < NBSR_LOW_MEMORY_BOUND && + base + size <= NBSR_LOW_MEMORY_BOUND); + + start = ALIGN_DOWN_TO_SIZE(base, E2K_SIC_SIZE_RT_PCIIO); + end = ALIGN_UP_TO_SIZE(base + size, E2K_SIC_SIZE_RT_PCIIO) - 1; + rt_pciio.E2K_RT_PCIIO_reg = 0; + rt_pciio.E2K_RT_PCIIO_bgn = start >> E2K_SIC_ALIGN_RT_PCIIO; + rt_pciio.E2K_RT_PCIIO_end = end >> E2K_SIC_ALIGN_RT_PCIIO; + reg_value = rt_pciio.E2K_RT_PCIIO_reg; + + node_nbsr_write(NULL, nbsr, node_id, SIC_rt_pciio0, reg_value); + + /* it need setup all routers on all nodes */ + for (node = 0; node < MAX_NUMNODES; node++) { + if (node == node_id) + continue; + if (!nbsr_is_node_online(nbsr, node)) + continue; + link = nbsr_get_node_to_node_link(node, node_id); + reg_off = nbsr_get_rt_pciio_offset(link); + node_nbsr_write(NULL, nbsr, node, reg_off, reg_value); + } + return 0; +} + +int nbsr_setup_pref_mmio_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size) +{ + unsigned int reg_off; + unsigned int reg_value_b, reg_value_e; + e2k_rt_pcimp_struct_t rt_pcimp_b; + e2k_rt_pcimp_struct_t rt_pcimp_e; + gpa_t start, end; + int node, link; + + ASSERT(base < NBSR_HI_MEMORY_BOUND && + base + size <= NBSR_HI_MEMORY_BOUND); + + start = ALIGN_DOWN_TO_SIZE(base, E2K_SIC_SIZE_RT_PCIMP); + end = ALIGN_UP_TO_SIZE(base + size, E2K_SIC_SIZE_RT_PCIMP) - 1; + rt_pcimp_b.E2K_RT_PCIMP_reg = 0; + rt_pcimp_b.E2K_RT_PCIMP_bgn = start >> E2K_SIC_ALIGN_RT_PCIMP; + rt_pcimp_e.E2K_RT_PCIMP_reg = 0; + rt_pcimp_e.E2K_RT_PCIMP_end = end >> E2K_SIC_ALIGN_RT_PCIMP; + + reg_value_b = rt_pcimp_b.E2K_RT_PCIMP_reg; + node_nbsr_write(NULL, nbsr, node_id, SIC_rt_pcimp_b0, reg_value_b); + + reg_value_e = rt_pcimp_e.E2K_RT_PCIMP_reg; + node_nbsr_write(NULL, nbsr, node_id, SIC_rt_pcimp_e0, reg_value_e); + + /* it need setup all routers on all nodes */ + for (node = 0; node < MAX_NUMNODES; node++) { + if (node == node_id) + continue; + if (!nbsr_is_node_online(nbsr, node)) + continue; + link = nbsr_get_node_to_node_link(node, node_id); + reg_off = nbsr_get_rt_pcimp_b_offset(link); + node_nbsr_write(NULL, nbsr, node, reg_off, reg_value_b); + reg_off = nbsr_get_rt_pcimp_e_offset(link); + node_nbsr_write(NULL, nbsr, node, reg_off, reg_value_e); + } + return 0; +} + +int nbsr_setup_pci_region(struct kvm *kvm, kvm_pci_region_t *pci_region) +{ + struct kvm_nbsr *nbsr = kvm->arch.nbsr; + unsigned long base, size; + int node_id; + + if (nbsr == NULL) + return -ENXIO; + + node_id = pci_region->node_id; + + base = pci_region->base; + size = pci_region->size; + + switch (pci_region->type) { + case kvm_pci_io_type: + if (unlikely(base < KVM_PCI_IO_RANGE_START || + base + size > KVM_PCI_IO_RANGE_END)) { + return -EINVAL; + } + return nbsr_setup_io_region(nbsr, node_id, base, size); + case kvm_pci_mem_type: + if (unlikely(base < KVM_PCI_MEM_RANGE_START || + base + size > KVM_PCI_MEM_RANGE_END)) { + return -EINVAL; + } + return nbsr_setup_mmio_region(nbsr, node_id, base, size); + case kvm_pci_pref_mem_type: + if (unlikely(base < KVM_PCI_PREF_MEM_RANGE_START || + base + size > KVM_PCI_PREF_MEM_RANGE_END)) { + return -EINVAL; + } + return nbsr_setup_pref_mmio_region(nbsr, node_id, base, size); + default: + pr_err("%s(): invalid PCI memory region type %d\n", + __func__, pci_region->type); + break; + } + + return -EINVAL; +} + +static int nbsr_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t addr, int len, void *val) +{ + struct kvm_nbsr *nbsr = to_nbsr(this); + unsigned int reg_offset; + int node_id; + + if (!nbsr_in_range(nbsr, addr)) + return -EOPNOTSUPP; + + ASSERT(len == 4 || len == 8); /* 8 bytes access is only for IOMMU */ + + node_id = nbsr_addr_to_node(nbsr, addr); + reg_offset = nbsr_addr_to_reg_offset(nbsr, addr); + + if (len == 4) + return node_nbsr_read(nbsr, node_id, + reg_offset, (u32 *)val); + else + return node_nbsr_readll(nbsr, node_id, + reg_offset, (u64 *)val); +} + +static int nbsr_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t addr, int len, const void *val) +{ + struct kvm_nbsr *nbsr = to_nbsr(this); + unsigned int reg_offset; + int node_id; + int ret = 0; + + if (!nbsr_in_range(nbsr, addr)) + return -EOPNOTSUPP; + + ASSERT(len == 4 || len == 8); /* 8 bytes access is only for IOMMU */ + + node_id = nbsr_addr_to_node(nbsr, addr); + reg_offset = nbsr_addr_to_reg_offset(nbsr, addr); + if (len == 4) { + u32 reg_value = *(u32 *)val; + + ret = node_nbsr_write(vcpu, nbsr, node_id, reg_offset, + reg_value); + } else { + u64 reg_value = *(u64 *)val; + + ret = node_nbsr_writell( + vcpu, nbsr, node_id, reg_offset, reg_value); + } + + return ret; +} + +static void kvm_nbsr_reset(struct kvm_nbsr *nbsr) +{ + e2k_rt_mhi_struct_t rt_mhi; + e2k_rt_mlo_struct_t rt_mlo; + e2k_rt_lcfg_struct_t rt_lcfg0, rt_lcfg; + e2k_rt_pciio_struct_t rt_pciio; + e2k_rt_pcim_struct_t rt_pcim; + e2k_rt_pcimp_struct_t rt_pcimp_b, rt_pcimp_e; + e2k_rt_pcicfgb_struct_t rt_pcicfgb; + kvm_nbsr_regs_t *node_nbsr; + u32 mlo_value, mhi_value; + u32 pcim_value, pciio_value; + u32 pcimp_b_value, pcimp_e_value; + u32 pcicfgb_value; + int node, i; + + memset(nbsr->nodes, 0x00, sizeof(nbsr->nodes)); + + rt_mhi.E2K_RT_MHI_reg = 0; + rt_mhi.E2K_RT_MHI_bgn = 0xff; + rt_mhi.E2K_RT_MHI_end = 0x00; + mhi_value = rt_mhi.E2K_RT_MHI_reg; + rt_mlo.E2K_RT_MLO_reg = 0; + rt_mlo.E2K_RT_MLO_bgn = 0xff; + rt_mlo.E2K_RT_MLO_end = 0x00; + mlo_value = rt_mlo.E2K_RT_MLO_reg; + rt_pcim.E2K_RT_PCIM_reg = 0; + rt_pcim.E2K_RT_PCIM_bgn = 0xff; + rt_pcim.E2K_RT_PCIM_end = 0x00; + pcim_value = rt_pcim.E2K_RT_PCIM_reg; + rt_pciio.E2K_RT_PCIIO_reg = 0; + rt_pciio.E2K_RT_PCIIO_bgn = 0xff; + rt_pciio.E2K_RT_PCIIO_end = 0x00; + pciio_value = rt_pciio.E2K_RT_PCIIO_reg; + rt_pcimp_b.E2K_RT_PCIMP_reg = 0; + rt_pcimp_b.E2K_RT_PCIMP_bgn = 0xfffff; + rt_pcimp_e.E2K_RT_PCIMP_reg = 0; + rt_pcimp_e.E2K_RT_PCIMP_end = 0x00000; + pcimp_b_value = rt_pcimp_b.E2K_RT_PCIMP_reg; + pcimp_e_value = rt_pcimp_e.E2K_RT_PCIMP_reg; + rt_pcicfgb.E2K_RT_PCICFGB_bgn = 0x8; /* 0x0002 0000 0000 */ + pcicfgb_value = rt_pcicfgb.E2K_RT_PCICFGB_reg; + for (node = 0; node < MAX_NUMNODES; node++) { + u32 rt_msi_lo, rt_msi_hi; + + /* + * Set guest's RT_MSI the same as on host. This is needed for + * device passthrough: guest will be writing this address + * directly to hardware IOEPIC without intercepts + */ + get_io_epic_msi(0, &rt_msi_lo, &rt_msi_hi); + + node_nbsr = &nbsr->nodes[node]; + node_nbsr->regs[offset_to_no(SIC_rt_mhi0)] = mhi_value; + node_nbsr->regs[offset_to_no(SIC_rt_mhi1)] = mhi_value; + node_nbsr->regs[offset_to_no(SIC_rt_mhi2)] = mhi_value; + node_nbsr->regs[offset_to_no(SIC_rt_mhi3)] = mhi_value; + node_nbsr->regs[offset_to_no(SIC_rt_mlo0)] = mlo_value; + node_nbsr->regs[offset_to_no(SIC_rt_mlo1)] = mlo_value; + node_nbsr->regs[offset_to_no(SIC_rt_mlo2)] = mlo_value; + node_nbsr->regs[offset_to_no(SIC_rt_mlo3)] = mlo_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcim0)] = pcim_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcim1)] = pcim_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcim2)] = pcim_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcim3)] = pcim_value; + node_nbsr->regs[offset_to_no(SIC_rt_pciio0)] = pciio_value; + node_nbsr->regs[offset_to_no(SIC_rt_pciio1)] = pciio_value; + node_nbsr->regs[offset_to_no(SIC_rt_pciio2)] = pciio_value; + node_nbsr->regs[offset_to_no(SIC_rt_pciio3)] = pciio_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b0)] = pcimp_b_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b1)] = pcimp_b_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b2)] = pcimp_b_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_b3)] = pcimp_b_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e0)] = pcimp_e_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e1)] = pcimp_e_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e2)] = pcimp_e_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcimp_e3)] = pcimp_e_value; + node_nbsr->regs[offset_to_no(SIC_rt_pcicfgb)] = pcicfgb_value; + node_nbsr->regs[offset_to_no(SIC_rt_msi)] = rt_msi_lo; + node_nbsr->regs[offset_to_no(SIC_rt_msi_h)] = rt_msi_hi; + node_nbsr->regs[offset_to_no(SIC_l3_ctrl)] = 0x3f00f8; + for (i = 0; i < 16; i++) + node_nbsr->regs[offset_to_no(PMC_FREQ_CORE_N_SLEEP(i))] = 0; + } + + /* BSP node, now it should be #0 */ + E2K_RT_LCFG_reg(rt_lcfg0) = 0; + E8C_RT_LCFG_pln(rt_lcfg0) = 0; + E2K_RT_LCFG_vp(rt_lcfg0) = 1; + E2K_RT_LCFG_vb(rt_lcfg0) = 1; + E2K_RT_LCFG_vio(rt_lcfg0) = 1; + /* links to other nodes */ + E2K_RT_LCFG_reg(rt_lcfg) = 0; + E8C_RT_LCFG_pln(rt_lcfg) = 0xff; + E2K_RT_LCFG_vp(rt_lcfg) = 0; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + node_nbsr = &nbsr->nodes[0]; + node_nbsr->regs[offset_to_no(SIC_rt_lcfg0)] = E2K_RT_LCFG_reg(rt_lcfg0); + node_nbsr->regs[offset_to_no(SIC_rt_lcfg1)] = E2K_RT_LCFG_reg(rt_lcfg); + node_nbsr->regs[offset_to_no(SIC_rt_lcfg2)] = E2K_RT_LCFG_reg(rt_lcfg); + node_nbsr->regs[offset_to_no(SIC_rt_lcfg3)] = E2K_RT_LCFG_reg(rt_lcfg); + /* APP nodes links to other nodes */ + E2K_RT_LCFG_reg(rt_lcfg) = 0; + E8C_RT_LCFG_pln(rt_lcfg) = 0xff; + E2K_RT_LCFG_vp(rt_lcfg) = 1; + E2K_RT_LCFG_vb(rt_lcfg) = 0; + E2K_RT_LCFG_vio(rt_lcfg) = 0; + for (node = 1; node < MAX_NUMNODES; node++) { + if (!nbsr_is_node_online(nbsr, node)) + continue; + node_nbsr = &nbsr->nodes[node]; + node_nbsr->regs[offset_to_no(SIC_rt_lcfg0)] = + E2K_RT_LCFG_reg(rt_lcfg); + node_nbsr->regs[offset_to_no(SIC_rt_lcfg1)] = + E2K_RT_LCFG_reg(rt_lcfg); + node_nbsr->regs[offset_to_no(SIC_rt_lcfg2)] = + E2K_RT_LCFG_reg(rt_lcfg); + node_nbsr->regs[offset_to_no(SIC_rt_lcfg3)] = + E2K_RT_LCFG_reg(rt_lcfg); + } +} + +static const struct kvm_io_device_ops nbsr_mmio_ops = { + .read = nbsr_mmio_read, + .write = nbsr_mmio_write, +}; + +int kvm_nbsr_init(struct kvm *kvm) +{ + struct kvm_nbsr *nbsr; + int ret, i; + + nbsr = kzalloc(sizeof(struct kvm_nbsr), GFP_KERNEL); + if (!nbsr) { + pr_err("%s(): could not allocated NBSR structure\n", __func__); + return -ENOMEM; + } + mutex_init(&nbsr->lock); + kvm->arch.nbsr = nbsr; + + /* NBSR address and size are equal on all machines */ + /* so can be set same as on host */ + nbsr->base = (gpa_t)THE_NODE_NBSR_PHYS_BASE(0); + nbsr->size = NODE_NBSR_SIZE * MAX_NUMNODES; + nbsr->node_size = NODE_NBSR_SIZE; + + for (i = 0; i < kvm->arch.num_numa_nodes; i++) + nbsr_set_node_online(nbsr, i); + + kvm_nbsr_reset(nbsr); + kvm_iodevice_init(&nbsr->dev, &nbsr_mmio_ops); + nbsr->kvm = kvm; + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, nbsr->base, + nbsr->size, &nbsr->dev); + if (ret < 0) { + pr_err("%s(); could not created NBSR emulation device, " + "error %d\n", + __func__, ret); + kfree(nbsr); + } + + return ret; +} + +void kvm_nbsr_destroy(struct kvm *kvm) +{ + struct kvm_nbsr *nbsr = kvm->arch.nbsr; + + if (!nbsr) + return; + + mutex_lock(&kvm->slots_lock); + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &nbsr->dev); + mutex_unlock(&kvm->slots_lock); + kvm->arch.nbsr = NULL; + kfree(nbsr); +} + +static int handle_mpdma_request(struct kvm *kvm, u32 *regs, u64 gpa) +{ + bc_mp_stat_struct_t stat; + bc_mp_ctrl_struct_t ctrl; + u32 b_put_off, stat_off; + u32 b_put, b_get, b_hb, b_base, b_base_h, t_hb, + t_base, t_base_h, t_h_base, t_h_base_h, t_h_lb, + t_h_lb_h, t_h_hb, t_h_hb_h; + u64 b_base_64, t_base_64, t_h_base_64, t_h_lb_64, + t_h_hb_64; + u64 t_base_gpa, t_h_base_gpa, b_base_gpa; + u64 gpa_page, t_hb_page, t_h_lb_page, t_h_hb_page; + u64 t_h_base_id; + u8 t_base_val, t_h_base_val; + int ret; + + AW(ctrl) = regs[nbsr_bc_reg_offset_to_no(BC_MP_CTRL)]; + + nbsr_debug("%s(): ctrl 0x%x\n", __func__, AW(ctrl)); + + + if (!ctrl.E2K_MP_CTRL_mp_en) + return 0; + + t_hb = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_HB)]; + t_base = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_BASE)]; + t_base_h = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_BASE_H)]; + + t_base_64 = NBSR_ADDR64(t_base_h, t_base); + + gpa_page = gpa >> PAGE_SHIFT; + t_hb_page = t_hb >> PAGE_SHIFT; + + t_base_gpa = t_base_64 + gpa_page; + + if (ret = kvm_read_guest_phys_system(kvm, t_base_gpa, &t_base_val, 1)) + return ret; + + nbsr_debug("%s(): gpa 0x%llx t_hb_page 0x%llx t_base_gpa 0x%llx " + "t_base_val 0x%x\n", + __func__, gpa, t_hb_page, t_base_gpa, t_base_val); + + if (gpa < (1UL << 32) && gpa_page <= t_hb_page && t_base_val == 0) { + nbsr_debug("%s(): set 1 to MPT 0x%llx\n", + __func__, t_base_gpa); + + t_base_val = 1; + + if (ret = kvm_write_guest_phys_system( + kvm, t_base_gpa, &t_base_val, 1)) + return ret; + } else { + t_h_lb = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_H_LB)]; + t_h_lb_h = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_H_LB_H)]; + t_h_hb = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_H_HB)]; + t_h_hb_h = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_H_HB_H)]; + t_h_base = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_H_BASE)]; + t_h_base_h = regs[nbsr_bc_reg_offset_to_no(BC_MP_T_H_BASE_H)]; + + t_h_lb_64 = NBSR_ADDR64(t_h_lb_h, t_h_lb); + t_h_hb_64 = NBSR_ADDR64(t_h_hb_h, t_h_hb); + t_h_base_64 = NBSR_ADDR64(t_h_base_h, t_h_base); + + t_h_lb_page = t_h_lb_64 >> PAGE_SHIFT; + t_h_hb_page = t_h_hb_64 >> PAGE_SHIFT; + + t_h_base_id = (gpa - t_h_lb_64) >> PAGE_SHIFT; + + t_h_base_gpa = t_h_base_64 + t_h_base_id; + + if (ret = kvm_read_guest_phys_system( + kvm, t_h_base_gpa, &t_h_base_val, 1)) + return ret; + + nbsr_debug("%s(): gpa_page 0x%llx t_h_lb_page 0x%llx " + "t_h_hb_page 0x%llx t_h_base_gpa 0x%llx " + "t_h_base_val 0x%x\n", + __func__, gpa_page, t_h_lb_page, t_h_hb_page, + t_h_base_gpa, t_h_base_val); + + if (gpa_page >= t_h_lb_page && gpa_page <= t_h_hb_page && + t_h_base_val == 0) { + nbsr_debug("%s(): set 1 to MPT HI 0x%llx\n", + __func__, t_h_base_gpa); + + t_h_base_val = 1; + + if (ret = kvm_write_guest_phys_system( + kvm, t_h_base_gpa, &t_h_base_val, 1)) + return ret; + } else { + return 0; + } + } + + stat_off = nbsr_bc_reg_offset_to_no(BC_MP_STAT); + AW(stat) = regs[stat_off]; + + nbsr_debug("%s(): stat 0x%x\n", __func__, AW(stat)); + + if (ctrl.E2K_MP_CTRL_b_en && !stat.E2K_MP_STAT_b_of) { + b_put_off = nbsr_bc_reg_offset_to_no(BC_MP_B_PUT); + + b_put = regs[nbsr_bc_reg_offset_to_no(BC_MP_B_PUT)]; + b_get = regs[nbsr_bc_reg_offset_to_no(BC_MP_B_GET)]; + b_hb = regs[nbsr_bc_reg_offset_to_no(BC_MP_B_HB)]; + b_base = regs[nbsr_bc_reg_offset_to_no(BC_MP_B_BASE)]; + b_base_h = regs[nbsr_bc_reg_offset_to_no(BC_MP_B_BASE_H)]; + + b_base_64 = NBSR_ADDR64(b_base_h, b_base); + + b_base_gpa = b_base_64 + b_put; + + nbsr_debug("%s(): set page 0x%llx to BUF 0x%llx\n", + __func__, gpa_page, b_base_gpa); + + if (ret = kvm_write_guest_phys_system( + kvm, b_base_gpa, &gpa_page, 8)) + return ret; + + nbsr_debug("%s(): b_put 0x%x b_get 0x%x b_hb 0x%x\n", + __func__, b_put, b_get, b_hb); + + if (b_put == b_hb) + b_put = 0; + else + b_put += 8; + + nbsr_debug("%s(): set BC_MP_B_PUT 0x%llx to 0x%x\n", + __func__, ®s[b_put_off], b_put); + regs[b_put_off] = b_put; + + if (b_put == b_get) { + stat.E2K_MP_STAT_b_of = 1; + + nbsr_debug("%s(): set BC_MP_STAT 0x%llx to 0x%x\n", + __func__, ®s[stat_off], AW(stat)); + + regs[stat_off] = AW(stat); + + kvm_int_violat_delivery_to_hw_epic(kvm); + } + } + + if (!stat.E2K_MP_STAT_b_ne) { + stat.E2K_MP_STAT_b_ne = 1; + + nbsr_debug("%s(): set BC_MP_STAT 0x%llx to 0x%x\n", + __func__, ®s[stat_off], AW(stat)); + + regs[stat_off] = AW(stat); + + kvm_int_violat_delivery_to_hw_epic(kvm); + } + + return 0; +} + +void native_handle_mpdma_fault(e2k_addr_t hva) +{ + struct kvm *kvm = current_thread_info()->virt_machine; + struct kvm_nbsr *nbsr; + u32 *regs; + gpa_t gpa; + + nbsr_debug("%s(): started for hva 0x%lx\n", __func__, hva); + + BUG_ON(mpdma_fixup_page_prot(PAGE_ALIGN_UP(hva), 1)); + + BUG_ON(!kvm); + + nbsr = kvm->arch.nbsr; + BUG_ON(!nbsr); + + /* FIXME: now only one node #0 is allowed */ + regs = nbsr->nodes[0].bc_regs; + + gpa = kvm_hva_to_gpa(kvm, hva); + BUG_ON(gpa == INVALID_GPA); + + mutex_lock(&nbsr->lock); + BUG_ON(handle_mpdma_request(kvm, regs, gpa)); + mutex_unlock(&nbsr->lock); +} + +int kvm_get_nbsr_state(struct kvm *kvm, struct kvm_guest_nbsr_state *nbsr, + int node_id) +{ + struct kvm_nbsr *nbsr_kvm = kvm->arch.nbsr; + u32 *regs = nbsr_kvm->nodes[node_id].regs; + u64 reg_lo, reg_hi; + + nbsr->rt_pcim0 = regs[offset_to_no(SIC_rt_pcim0)]; + nbsr->rt_pcim1 = regs[offset_to_no(SIC_rt_pcim1)]; + nbsr->rt_pcim2 = regs[offset_to_no(SIC_rt_pcim2)]; + nbsr->rt_pcim3 = regs[offset_to_no(SIC_rt_pcim3)]; + + nbsr->rt_pciio0 = regs[offset_to_no(SIC_rt_pciio0)]; + nbsr->rt_pciio1 = regs[offset_to_no(SIC_rt_pciio1)]; + nbsr->rt_pciio2 = regs[offset_to_no(SIC_rt_pciio2)]; + nbsr->rt_pciio3 = regs[offset_to_no(SIC_rt_pciio3)]; + + nbsr->rt_pcimp_b0 = regs[offset_to_no(SIC_rt_pcimp_b0)]; + nbsr->rt_pcimp_b1 = regs[offset_to_no(SIC_rt_pcimp_b1)]; + nbsr->rt_pcimp_b2 = regs[offset_to_no(SIC_rt_pcimp_b2)]; + nbsr->rt_pcimp_b3 = regs[offset_to_no(SIC_rt_pcimp_b3)]; + + nbsr->rt_pcimp_e0 = regs[offset_to_no(SIC_rt_pcimp_e0)]; + nbsr->rt_pcimp_e1 = regs[offset_to_no(SIC_rt_pcimp_e1)]; + nbsr->rt_pcimp_e2 = regs[offset_to_no(SIC_rt_pcimp_e2)]; + nbsr->rt_pcimp_e3 = regs[offset_to_no(SIC_rt_pcimp_e3)]; + + nbsr->rt_pcicfgb = regs[offset_to_no(SIC_rt_pcicfgb)]; + + reg_lo = regs[offset_to_no(SIC_rt_msi)]; + reg_hi = regs[offset_to_no(SIC_rt_msi_h)]; + nbsr->rt_msi = reg_hi << 32 | reg_lo; + + nbsr->iommu_ctrl = regs[offset_to_no(SIC_iommu_ctrl)]; + + reg_lo = regs[offset_to_no(SIC_iommu_ba_lo)]; + reg_hi = regs[offset_to_no(SIC_iommu_ba_hi)]; + nbsr->iommu_ptbar = reg_hi << 32 | reg_lo; + + reg_lo = regs[offset_to_no(SIC_iommu_dtba_lo)]; + reg_hi = regs[offset_to_no(SIC_iommu_dtba_hi)]; + nbsr->iommu_dtbar = reg_hi << 32 | reg_lo; + + nbsr->prepic_ctrl2 = regs[offset_to_no(SIC_prepic_ctrl2)]; + nbsr->prepic_linp0 = regs[offset_to_no(SIC_prepic_linp0)]; + nbsr->prepic_linp1 = regs[offset_to_no(SIC_prepic_linp1)]; + nbsr->prepic_linp2 = regs[offset_to_no(SIC_prepic_linp2)]; + nbsr->prepic_linp3 = regs[offset_to_no(SIC_prepic_linp3)]; + nbsr->prepic_linp4 = regs[offset_to_no(SIC_prepic_linp4)]; + nbsr->prepic_linp5 = regs[offset_to_no(SIC_prepic_linp5)]; + + return 0; +} diff --git a/arch/e2k/kvm/sic-nbsr.h b/arch/e2k/kvm/sic-nbsr.h new file mode 100644 index 000000000000..deb187967986 --- /dev/null +++ b/arch/e2k/kvm/sic-nbsr.h @@ -0,0 +1,72 @@ +/* + * + * North Bridge registers emulation for guest VM + * + * Copyright 2019 MCST, Salavat S. Gilyazov (atic@mcst.ru) + */ + +#ifndef __KVM_SIC_NBSR_H +#define __KVM_SIC_NBSR_H + +#include +#include +#include + +/* only the following number of NBSR registers is now supported */ +#define MAX_SUPPORTED_NODE_NBSR_OFFSET (SIC_prepic_linp5 + 4) +#define MAX_SUPPORTED_NODE_NBSR_NUM (MAX_SUPPORTED_NODE_NBSR_OFFSET / 4) + +typedef struct kvm_nbsr_regs { + u32 regs[MAX_SUPPORTED_NODE_NBSR_NUM]; + u32 bc_regs[BC_MM_REG_NUM]; +} kvm_nbsr_regs_t; + +typedef struct kvm_nbsr { + gpa_t base; /* NBSR registers base address */ + int size; /* size of all registers of all nodes */ + int node_size; /* size of all registers on one node */ + struct kvm_io_device dev; + struct kvm *kvm; + unsigned nodes_online; + struct mutex lock; + kvm_nbsr_regs_t nodes[MAX_NUMNODES]; +} kvm_nbsr_t; + +#define DEBUG +#undef ASSERT +#ifdef DEBUG +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + pr_emerg("assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else +#define ASSERT(x) do { } while (0) +#endif + +/* + * max values of PCI memory regions limits + */ +#define KVM_PCI_IO_RANGE_START 0x00000000 +#define KVM_PCI_IO_RANGE_END 0x00010000 +#define KVM_PCI_MEM_RANGE_START 0x00000000 +#define KVM_PCI_MEM_RANGE_END 0xf8000000 +#define KVM_PCI_PREF_MEM_RANGE_START 0x00000000000 +#define KVM_PCI_PREF_MEM_RANGE_END 0x10000000000 + +extern int kvm_nbsr_init(struct kvm *kvm); +extern void kvm_nbsr_destroy(struct kvm *kvm); +extern int nbsr_setup_memory_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size); +extern int nbsr_setup_mmio_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size); +extern int nbsr_setup_pref_mmio_region(struct kvm_nbsr *nbsr, int node_id, + gpa_t base, gpa_t size); +extern int nbsr_setup_pci_region(struct kvm *kvm, kvm_pci_region_t *pci_region); +extern int kvm_get_nbsr_state(struct kvm *kvm, + struct kvm_guest_nbsr_state *nbsr, int node_id); + +#endif /* __KVM_SIC_NBSR_H */ diff --git a/arch/e2k/kvm/spinlock.c b/arch/e2k/kvm/spinlock.c new file mode 100644 index 000000000000..ffb502f4b092 --- /dev/null +++ b/arch/e2k/kvm/spinlock.c @@ -0,0 +1,830 @@ +/* + * This file implements on host the arch-dependent parts of kvm guest + * spinlock()/spinunlock() slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "irq.h" +#include "process.h" +#include "complete.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_DEADLOCK_MODE +#undef DebugKVMDL +#define DEBUG_KVM_DEADLOCK_MODE 0 /* spinlock deadlock debugging */ +#define DebugKVMDL(fmt, args...) \ +({ \ + if (DEBUG_KVM_DEADLOCK_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_UNLOCKED_MODE +#undef DebugKVMUN +#define DEBUG_KVM_UNLOCKED_MODE 0 /* spinlock deadlock debugging */ +#define DebugKVMUN(fmt, args...) \ +({ \ + if (DEBUG_KVM_UNLOCKED_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) +#undef DEBUG_UNLOCKED_MODE +#undef DebugKVMUL +#define DEBUG_UNLOCKED_MODE 0 /* spinlock deadlock debugging */ +#define DebugKVMUL(fmt, args...) \ +({ \ + if (DEBUG_UNLOCKED_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static bool debug_loop = false; +#undef DEBUG_KVM_LOOP_MODE +#undef DebugLOOP +#define DEBUG_KVM_LOOP_MODE 0 /* list loop debugging */ +#define DebugLOOP(fmt, args...) \ +({ \ + if (DEBUG_KVM_LOOP_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +typedef struct spinlock_waiter { + struct hlist_node wait_list; + struct completion done; + struct thread_info *ti; + struct gthread_info *gti; + void *lock; +} spinlock_waiter_t; + +/* + * Lock a guest spinlock, slowpath: + */ + +/* + * Probably spinlock was already unlocked, so search + * the spinlock in list of unlocked spinlocks + * spinlock hash table and unlocked list should be locked by caller + * If bool 'find' is true then function only scans list to find specified + * lock in the list of unlocked spinlocks. + * If bool 'find' is false, then the function scans list to add new entry with + * current thread. + */ +static inline spinlock_unlocked_t * +check_spin_unlocked_list(struct kvm *kvm, void *lock, bool find) +{ + spinlock_unlocked_t *u; + thread_info_t *ti; + struct task_struct *t; + int loop = 0; + + list_for_each_entry(u, &kvm->arch.spinunlocked_head, unlocked_list) { + t = thread_info_task(u->ti); + DebugKVM("next spinunlocked list entry %s (%d) gti %px " + "lock %px\n", + t->comm, t->pid, u->gti, u->lock); + if (u->lock == lock) { + DebugKVM("spinlock %px already was unlocked\n", + lock); + if (find) { + DebugKVM("spinlock found at the unlocked " + "list entry: by %s (%d) gti %px\n", + t->comm, t->pid, u->gti); + return u; + } + + /* FIXME: it seems guest threads lock/unlock spins, */ + /* so their thread agents should be considered here */ + /* 1. In some case on the VCPU (it is one thread of */ + /* host) the guest thread lock spin and guest */ + /* kernel switch to other thread, which can do */ + /* lock of the same spin. It is deadlock, but host */ + /* does not detect now this case. */ + /* 2. Same as case above, but other guest thread lock */ + /* other spin. It is good case, but host try check */ + /* and update list of unlocked spins on behalf the */ + /* same host thread? it is not good */ + /* Probably guest thread info (gti) should be here */ + /* instead of host thread info (ti) */ + loop = 0; + list_for_each_entry(ti, &u->checked_unlocked, + tasks_to_spin) { + t = thread_info_task(ti); + if (t->comm == NULL) { + pr_err("%s(): bad at list current " + "ti %px task %px\n", + __func__, ti, t); + dump_stack(); + } + DebugLOOP("next spinunlocked list entry %px " + "head %px task %s lock %px\n", + u, &u->checked_unlocked, t->comm, + u->lock); + DebugLOOP("next ti %px %s current %px %s\n", + ti, t->comm, current_thread_info(), + current->comm); + if (ti == current_thread_info()) { + if (ti->gti_to_spin != + ti->gthread_info) { + DebugKVMDL("spinlock %px " + "already was checked " + "by the %s but other " + "gti: was %px now %px\n", + lock, t->comm, + ti->gti_to_spin, + ti->gthread_info); + GTI_BUG_ON(true); + } + DebugLOOP("spinlock %px already was " + "checked by the task %s (%d)\n", + lock, t->comm, t->pid); + return NULL; + } + loop++; + if (loop == 50) + debug_loop = true; + else if (loop == 100) + debug_loop = false; + else if (loop > 100) + panic("infinity spinlock checked " + "threads\n"); + } + if (current->comm == NULL) { + pr_err("%s(): bad to add to list current " + "ti %px task %px\n", + __func__, current_thread_info(), + current); + dump_stack(); + } + GTI_BUG_ON(!list_empty( + ¤t_thread_info()->tasks_to_spin)); + current_thread_info()->gti_to_spin = + current_thread_info()->gthread_info; + list_add_tail(¤t_thread_info()->tasks_to_spin, + &u->checked_unlocked); + DebugKVM("task %s (%d) is added to the list of " + "spin %px unlock checked tasks\n", + current->comm, current->pid, lock); + return u; + } + } + return NULL; +} +static inline void +clear_spin_unlocked_list(struct kvm *kvm, spinlock_unlocked_t *node) +{ + thread_info_t *ti, *tmp; + + DebugKVM("started for node %px lock %px unlocked by %s gti %px\n", + node, node->lock, thread_info_task(node->ti)->comm, node->gti); + list_for_each_entry_safe(ti, tmp, &node->checked_unlocked, + tasks_to_spin) { + DebugKVM("next thread %px %s guest thread %px\n", + ti, thread_info_task(ti)->comm, ti->gti_to_spin); + /* current thread takes lock, gti cannot change */ + if (ti == current_thread_info()) + GTI_BUG_ON(ti->gthread_info != ti->gti_to_spin); + list_del_init(&ti->tasks_to_spin); + } +} +static inline void +free_spin_unlocked_node(struct kvm *kvm, spinlock_unlocked_t *u) +{ + clear_spin_unlocked_list(kvm, u); + list_move_tail(&u->unlocked_list, &kvm->arch.spinunlocked_free); + + if (!list_empty(&kvm->arch.spinunlocked_wait)) { + struct task_struct *t; + + u = list_first_entry(&kvm->arch.spinunlocked_wait, + spinlock_unlocked_t, unlocked_list); + list_del(&u->unlocked_list); + t = thread_info_task(u->ti); + wake_up_process(t); + DebugKVM("spinlock %s (%d) waiting for unlocked " + "spinlocks free entry is woken up\n", + t->comm, t->pid); + } +} + +/* + * Queue the lock to list of waiting for wake up + * Lock kvm->arch.spinlock_hash_lock should be taken by caller, + * the function will unlock the spin before calling of scheduler + * and take the spin again before returen from thr function + */ +static inline int +kvm_queue_spin_lock_to_wait(struct kvm *kvm, void *lock, unsigned long flags) +{ + struct thread_info *ti = current_thread_info(); + struct gthread_info *gti = ti->gthread_info; + struct kvm_vcpu *vcpu = ti->vcpu; + spinlock_waiter_t waiter; + spinlock_waiter_t *w; + struct hlist_node *next; + struct hlist_head *head; + struct hlist_node *node; + bool unlocked; + + DebugKVM("%s (%d) started for guest lock %px (hash index 0x%02x)\n", + current->comm, current->pid, lock, spinlock_hashfn(lock)); + + head = &kvm->arch.spinlock_hash[spinlock_hashfn(lock)]; + waiter.ti = ti; + if (unlikely(gti == NULL)) { + GTI_BUG_ON(!test_ti_thread_flag(ti, TIF_PSEUDOTHREAD)); + } + waiter.gti = gti; + waiter.lock = lock; + INIT_HLIST_NODE(&waiter.wait_list); + init_completion(&waiter.done); + + if (hlist_empty(head)) { + hlist_add_head(&waiter.wait_list, head); + DebugKVM("spinlock waitqueue is empty, add as first\n"); + } else { + /* add current thread to the end of the waitqueue */ + /* but before check that this thread and lock is not already */ + /* at waitqueue */ + hlist_for_each_safe(node, next, head) { + struct task_struct *t; + + w = hlist_entry(node, spinlock_waiter_t, wait_list); + t = thread_info_task(w->ti); + DebugKVM("next spinlock waitqueue entry %s (%d) " + "gti %px lock %px\n", + t->comm, t->pid, w->gti, w->lock); + while (w->gti == gti || w->ti == ti) { + if (gti == NULL) { + /* it is VIRQ VCPU, should be other */ + if (likely(w->ti != ti)) + /* other VIRQ VCPU */ + break; + } + if (w->lock == lock) { + DebugKVMDL("task %s (%d) gti %px same " + "lock %px detected at " + "waitqueue %px\n", + t->comm, t->pid, w->gti, + w->lock, head); + } else { + DebugKVMDL("task %s (%d) gti %px other " + "lock %px (new lock %px) " + "detected at waitqueue %px\n", + t->comm, t->pid, w->gti, + w->lock, lock, head); + } + GTI_BUG_ON(true); + break; + } + if (next == NULL) + break; + } + hlist_add_behind(&waiter.wait_list, node); + DebugKVM("add to the end\n"); + } + w = &waiter; + + /* + * Wait for the thread will be waked up, but reasons of waking up + * can be a few, for example some event occurred and should be passed + * to VCPU. In our case reason of waking up must be the spin unlocking. + * It need check spinlock waitqueue after waking up to make sure that + * the thread is unqueued and + * can try lock the spin again; + * take the spin lock + * If spinlock was detected in waitqueue again and pending VIRQs flag + * is set, then interrupt the waiting and return to guest to try + * handle pending VIRQs (in this case the function return -EINTR) + */ + + GTI_BUG_ON(vcpu == NULL); + vcpu->arch.on_spinlock = true; + wmb(); /* flag should be seen before read 'on_csd_lock' or */ + /* other VCPU waiting state flags */ + do { + struct kvm_vcpu *other_vcpu; + struct task_struct *t; + int ret; + int r; + + kvm_for_each_vcpu(r, other_vcpu, kvm) { + if (other_vcpu == vcpu) + continue; + if (other_vcpu->arch.on_csd_lock) { + if (kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + pr_debug("%s(): VCPU #%d is waiting " + "for IPI completion VCPU #%d " + "is waiting for spinlock %px\n", + __func__, other_vcpu->vcpu_id, + vcpu->vcpu_id, lock); + } else if (!kvm_test_pending_virqs(vcpu)) { + pr_debug("%s(): VCPU #%d there is IPI " + "but none VIRQs pending flag, " + "VIRQs count %d\n", + __func__, vcpu->vcpu_id, + kvm_get_pending_virqs_num( + vcpu)); + /* kvm_print_local_APIC(vcpu); */ + } + } + } + if (DO_DUMP_VCPU(vcpu) || + kvm_test_pending_virqs(vcpu) && + !kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + /* there is signal to do dump guest state or */ + /* there are VIRQs to handle, goto to try handle */ + if (likely(w == &waiter)) { + /* delete waiter from list */ + hlist_del(&w->wait_list); + } + vcpu->arch.on_spinlock = false; + DebugKVM("VCPU #%d there are pending VIRQs, " + "counter %d, mask enabled, try to handle\n", + vcpu->vcpu_id, + kvm_get_pending_virqs_num(vcpu)); + return -EINTR; + } + if (kvm_test_pending_virqs(vcpu) && + kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + DebugKVM("VCPU #%d there are pending VIRQs, counter " + "%d, mask disabled\n", + vcpu->vcpu_id, + kvm_get_pending_virqs_num(vcpu)); + } + raw_spin_unlock_irqrestore(&kvm->arch.spinlock_hash_lock, + flags); + DebugKVM("go to wait for completion\n"); + ret = kvm_wait_for_completion_interruptible(&w->done); + DebugKVM("waiting for completion terminated with %d\n", ret); + + if (kvm->arch.spinlock_hash_disable || ret == -ERESTARTSYS) { + DebugKVMSH("guest spinlock disabled or fatal signal: " + "exit from process\n"); + kvm_spare_host_vcpu_release(vcpu); + do_exit(ret); + } + raw_spin_lock_irqsave(&kvm->arch.spinlock_hash_lock, flags); + /* search thread at spinlock waitqueue */ + unlocked = true; + hlist_for_each_entry(w, head, wait_list) { + if (w == &waiter) { + unlocked = false; + break; + } + } + t = thread_info_task(w->ti); + if (!unlocked && ret == 0) { + pr_err("%s(): thread %s (%d) lock %px = 0x%lx detected " + "at spinlock waitqueue, when waiting was " + "completed\n", + __func__, t->comm, t->pid, w->lock, + (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)w->lock)) ? + (long)w->lock : *(long *)w->lock); + vcpu->arch.on_spinlock = false; + return -EINVAL; + } else if (!unlocked && + !test_ti_thread_flag(w->ti, TIF_PSEUDOTHREAD)) { + DebugKVM("thread %s (%d) lock %px = 0x%lx detected " + "at spinlock waitqueue, waiting was " + "interrupted ret = %d, so continue waiting\n", + t->comm, t->pid, w->lock, + (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)w->lock)) ? + (long)w->lock : *(long *)w->lock, + ret); + } else if (unlocked) { + t = thread_info_task(waiter.ti); + DebugKVM("thread %s (%d) is not detected at spinlock " + "waitqueue, so complete waiting\n", + t->comm, t->pid); + break; + } + } while (!unlocked); + vcpu->arch.on_spinlock = false; + + DebugKVM("%s (%d) is woken up, return to guest\n", + current->comm, current->pid); + return 0; +} + +int kvm_guest_spin_lock_slow(struct kvm *kvm, void *lock, bool check_unlock) +{ + spinlock_unlocked_t *u; + struct task_struct *t; + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&kvm->arch.spinlock_hash_lock, flags); + DebugKVMUL("%s (%d) lock %px started\n", + current->comm, current->pid, lock); + + /* probably spinlock was already unlocked, so first search our */ + /* spinlock in list of unlocked spinlocks */ + if (check_unlock) { + if (check_spin_unlocked_list(kvm, lock, false) != NULL) { + raw_spin_unlock_irqrestore( + &kvm->arch.spinlock_hash_lock, flags); + DebugKVM("spinlock %px already was unlocked, return to " + "try get locking\n", + lock); + return 0; + } + } else { + u = check_spin_unlocked_list(kvm, lock, true); + if (u != NULL) { + t = thread_info_task(u->ti); + /* lock is found as already unlocked */ + /* wake up unlocking process to wake up all process */ + /* waiting for the lock */ + DebugKVMUL("%s (%d) lock %px will wake up unlocking " + "process %s (%d)\n", + current->comm, current->pid, lock, + t->comm, t->pid); + wake_up_process(t); + } + } + + /* spinlock was not unlocked, so add our process to waitqueue */ + ret = kvm_queue_spin_lock_to_wait(kvm, lock, flags); + if (ret == -EINTR) { + DebugKVMDL("%s (%d) VCPU has pending VIRQs, return " + "to guest to try handle it\n", + current->comm, current->pid); + } + + raw_spin_unlock_irqrestore(&kvm->arch.spinlock_hash_lock, flags); + + return ret; +} + +/* + * Guest locked spinlock, slowpath: + */ + +int kvm_guest_spin_locked_slow(struct kvm *kvm, void *lock) +{ + spinlock_unlocked_t *u; + unsigned long flags; + int ret; + + DebugKVM("%s (%d) started for guest lock %px (hash index 0x%02x)\n", + current->comm, current->pid, lock, spinlock_hashfn(lock)); + + raw_spin_lock_irqsave(&kvm->arch.spinlock_hash_lock, flags); + + /* search spinlock at the list of unlocked spinlocks */ + do { + u = check_spin_unlocked_list(kvm, lock, true); + if (likely(u != NULL)) + break; + DebugKVMDL("%s (%d) could not find lock at the list of " + "unlocked spinloks\n", + current->comm, current->pid); + /* lock was not yet queued to waitqueue */ + /* so add our process to waitqueue and wait for wake up */ + /* to try find the lock in the list of unlocked spinlocks */ + ret = kvm_queue_spin_lock_to_wait(kvm, lock, flags); + if (ret && ret != -EINTR) { + pr_err("%s(): queue spinlock to waitqueue list failed " + "with error %d, abort the process %s (%d)\n", + __func__, ret, current->comm, current->pid); + raw_spin_unlock_irqrestore( + &kvm->arch.spinlock_hash_lock, flags); + do_exit(ret); + } else if (ret == -EINTR) { + DebugKVMDL("%s (%d) VCPU has pending VIRQs, return " + "to guest to try handle it\n", + current->comm, current->pid); + raw_spin_unlock_irqrestore( + &kvm->arch.spinlock_hash_lock, flags); + return ret; + } + } while (u == NULL); + + free_spin_unlocked_node(kvm, u); + + raw_spin_unlock_irqrestore(&kvm->arch.spinlock_hash_lock, flags); + DebugKVM("unlocked spinlock %px move to free list\n", lock); + return 0; +} + +/* + * Add the spinlock to the list of unlocked spinlocks, + * because of unlocking can outrun locking process, which can in progress + * and will be waiting for unlocking at any time + * Spinlock should be taken + */ +static spinlock_unlocked_t * +add_guest_spin_as_unlocked(struct kvm *kvm, void *lock, bool add_to_unlock, + unsigned long flags) +{ + spinlock_unlocked_t *u = NULL; + struct task_struct *t; + + DebugKVM("%s (%d) started for guest lock %px\n", + current->comm, current->pid, lock); + + do { + u = check_spin_unlocked_list(kvm, lock, true); + if (u != NULL) { + if (add_to_unlock) { + pr_err("%s() lock %px detected at unlocked " + "list WHY ???\n", + __func__, lock); + return ERR_PTR(-EINVAL); + } + t = thread_info_task(u->ti); + DebugKVMUN("guest lock %px already detected at " + "unlocked list, queued by %s (%d)\n", + lock, t->comm, t->pid); + return NULL; + } + + if (list_empty(&kvm->arch.spinunlocked_free)) { + spinlock_unlocked_t unlock_waiter; + + pr_warning("kvm_guest_spin_unlock_slow() overflow " + "of list of unlocked spinlocks\n"); + unlock_waiter.ti = current_thread_info(); + unlock_waiter.gti = current_thread_info()->gthread_info; + unlock_waiter.lock = lock; + INIT_LIST_HEAD(&unlock_waiter.unlocked_list); + list_add_tail(&unlock_waiter.unlocked_list, + &kvm->arch.spinunlocked_wait); + set_current_state(TASK_INTERRUPTIBLE); + raw_spin_unlock_irqrestore( + &kvm->arch.spinlock_hash_lock, flags); + + DebugKVM("go to schedule and wait for wake up\n"); + schedule(); + __set_current_state(TASK_RUNNING); + if (kvm->arch.spinlock_hash_disable || + fatal_signal_pending(current)) { + struct kvm_vcpu *vcpu = + current_thread_info()->vcpu; + + DebugKVMSH("guest spinlock disabled or fatal " + "signal: exit from process\n"); + kvm_spare_host_vcpu_release(vcpu); + do_exit(0); + } + raw_spin_lock_irqsave(&kvm->arch.spinlock_hash_lock, + flags); + } else { + break; + } + } while (u == NULL); + + u = list_first_entry(&kvm->arch.spinunlocked_free, + spinlock_unlocked_t, unlocked_list); + list_move_tail(&u->unlocked_list, &kvm->arch.spinunlocked_head); + u->ti = current_thread_info(); + u->gti = current_thread_info()->gthread_info; + u->lock = lock; + INIT_LIST_HEAD(&u->checked_unlocked); + DebugKVM("add spinlock %s (%d) gti %px lock %px to the list of " + "unlocked\n", + current->comm, current->pid, u->gti, u->lock); + return u; +} + +/* + * Unlock a guest spinlock, slowpath: + */ + +int kvm_guest_spin_unlock_slow(struct kvm *kvm, void *lock, bool add_to_unlock) +{ + spinlock_unlocked_t *u; + spinlock_waiter_t *w; + struct hlist_head *head; + struct hlist_node *tmp; + struct kvm_vcpu *vcpu; + struct task_struct *t; + int unlocked = 0; + unsigned long flags; + + DebugKVMUN("%s (%d) started for guest lock %px (hash index 0x%02x)\n", + current->comm, current->pid, lock, spinlock_hashfn(lock)); + + raw_spin_lock_irqsave(&kvm->arch.spinlock_hash_lock, flags); + + /* at first add our spinlock to the list of unlocked spinlocks, */ + /* because of unlocking can outrun locking process which is */ + /* trying to enable lock */ + if (add_to_unlock) { + u = add_guest_spin_as_unlocked(kvm, lock, true, flags); + if (IS_ERR(u)) { + raw_spin_unlock_irqrestore( + &kvm->arch.spinlock_hash_lock, flags); + return PTR_ERR(u); + } + } else { + u = NULL; + } + + head = &kvm->arch.spinlock_hash[spinlock_hashfn(lock)]; + +waking_up: + if (hlist_empty(head)) { + DebugKVMUN("spinlock waitqueue is empty\n"); + goto not_found; + } + /* find all task waiting for this spinlock and wake up its */ + hlist_for_each_entry_safe(w, tmp, head, wait_list) { + t = thread_info_task(w->ti); + DebugKVMUN("next spinlock waitqueue entry %s (%d) " + "gti %px lock %px\n", + t->comm, t->pid, w->gti, w->lock); + if (w->lock != lock) + continue; + hlist_del(&w->wait_list); + if (unlikely(completion_done(&w->done))) { + pr_err("%s(): process %s (%d) waiting for unlock " + "is already completed\n", + __func__, t->comm, t->pid); + } + complete(&w->done); + DebugKVMUN("spin unlocked and process %s (%d) is woken up\n", + t->comm, t->pid); + unlocked++; + } + +not_found: + if (unlikely(!add_to_unlock && unlocked == 0)) { + spinlock_unlocked_t *u_new; + + /* could not find any waiting for spin unlocking process, */ + /* so unlocking is first and locking process are in progress */ + /* It need wait for locking process in wait list queue */ + if (u == NULL) { + u_new = add_guest_spin_as_unlocked(kvm, lock, false, + flags); + if (unlikely(IS_ERR(u_new))) { + raw_spin_unlock_irqrestore( + &kvm->arch.spinlock_hash_lock, flags); + return PTR_ERR(u); + } else if (u_new == NULL) { + /* there is already unlocking process for */ + /* this lock, it need not second same process */ + goto done; + } + u = u_new; + t = thread_info_task(u->ti); + DebugKVMUN("spin lock %px is queued as unlocked " + "by %s (%d)\n", + lock, t->comm, t->pid); + } + /* the process should wait for any locking process */ + /* which will detect the spin as unlocked and wake up */ + /* this process to restart waking up of all waiting for */ + /* the lock processes */ + set_current_state(TASK_INTERRUPTIBLE); + DebugKVMUN("%s (%d) lock %px go to schedule and wait for " + "wake up by locking process\n", + current->comm, current->pid, lock); + raw_spin_unlock_irqrestore( + &kvm->arch.spinlock_hash_lock, flags); + + schedule(); + __set_current_state(TASK_RUNNING); + if (kvm->arch.spinlock_hash_disable || + fatal_signal_pending(current)) { + goto signaled; + } + raw_spin_lock_irqsave(&kvm->arch.spinlock_hash_lock, flags); + DebugKVMUN("%s (%d) lock %px is waked up by locking process\n", + current->comm, current->pid, lock); + goto waking_up; + } +done: + if (!add_to_unlock && u != NULL) { + free_spin_unlocked_node(kvm, u); + DebugKVMUN("%s (%d) lock %px is deleted from unlocking queue\n", + current->comm, current->pid, lock); + } + raw_spin_unlock_irqrestore(&kvm->arch.spinlock_hash_lock, flags); + + DebugKVMUN("%s (%d) completed for guest lock %px, unlocked %d\n", + current->comm, current->pid, lock, unlocked); + return 0; + +signaled: + vcpu = current_thread_info()->vcpu; + DebugKVMSH("guest spinlock disabled or fatal signal: " + "exit from process\n"); + kvm_spare_host_vcpu_release(vcpu); + do_exit(0); + return 0; +} + +int kvm_guest_spinlock_init(struct kvm *kvm) +{ + spinlock_unlocked_t *u; + int i; + + for (i = 0; i < SPINLOCK_HASH_SIZE; i++) + INIT_HLIST_HEAD(&kvm->arch.spinlock_hash[i]); + INIT_LIST_HEAD(&kvm->arch.spinunlocked_head); + INIT_LIST_HEAD(&kvm->arch.spinunlocked_free); + INIT_LIST_HEAD(&kvm->arch.spinunlocked_wait); + for (i = 0; i < SPINUNLOCKED_LIST_SIZE; i++) { + u = &kvm->arch.spinunlocked_list[i]; + INIT_LIST_HEAD(&u->unlocked_list); + list_add_tail(&u->unlocked_list, &kvm->arch.spinunlocked_free); + } + kvm->arch.spinlock_hash_lock = + __RAW_SPIN_LOCK_UNLOCKED(kvm->arch.spinlock_hash_lock); + kvm->arch.spinlock_hash_disable = false; + return 0; +} + +static void destroy_spinlock_list(struct hlist_head *head) +{ + spinlock_waiter_t *w; + struct hlist_node *tmp; + struct task_struct *t; + + hlist_for_each_entry_safe(w, tmp, head, wait_list) { + t = thread_info_task(w->ti); + DebugKVM("next spinlock waitqueue entry %s (%d) " + "gti %px lock %px\n", + t->comm, t->pid, w->gti, w->lock); + hlist_del(&w->wait_list); + wake_up_process(t); + } +} +void kvm_guest_spinlock_destroy(struct kvm *kvm) +{ + spinlock_unlocked_t *u; + spinlock_unlocked_t *tmp; + struct hlist_head *head; + struct task_struct *t; + unsigned long flags; + int i; + + DebugKVM("started\n"); + + raw_spin_lock_irqsave(&kvm->arch.spinlock_hash_lock, flags); + kvm->arch.spinlock_hash_disable = true; + for (i = 0; i < SPINLOCK_HASH_SIZE; i++) { + head = &kvm->arch.spinlock_hash[i]; + if (hlist_empty(head)) { + DebugKVM("hash index 0x%02x: waitqueue is empty\n", i); + continue; + } + DebugKVM("hash index 0x%02x waitqueue is not empty\n", i); + destroy_spinlock_list(head); + } + list_for_each_entry_safe(u, tmp, &kvm->arch.spinunlocked_head, + unlocked_list) { + t = thread_info_task(u->ti); + DebugKVM("next spin unlocked list entry %s (%d) " + "guest thread %px lock %px\n", + t->comm, t->pid, u->gti, u->lock); + list_del(&u->unlocked_list); + } + list_for_each_entry_safe(u, tmp, &kvm->arch.spinunlocked_free, + unlocked_list) { + DebugKVM("next spin unlocked free entry %px\n", u); + list_del(&u->unlocked_list); + } + list_for_each_entry_safe(u, tmp, &kvm->arch.spinunlocked_wait, + unlocked_list) { + t = thread_info_task(u->ti); + DebugKVM("next spin unlocked waiting list entry %s (%d) " + "guest thread %px lock %px\n", + t->comm, t->pid, u->gti, u->lock); + list_del(&u->unlocked_list); + wake_up_process(t); + } + raw_spin_unlock_irqrestore(&kvm->arch.spinlock_hash_lock, flags); +} diff --git a/arch/e2k/kvm/spmc.c b/arch/e2k/kvm/spmc.c new file mode 100644 index 000000000000..f9624b673d5b --- /dev/null +++ b/arch/e2k/kvm/spmc.c @@ -0,0 +1,1107 @@ +/* + * IOHUB-2/EIOHub System Power Management Controller emulation + * + * Copyright (c) 2019 MCST + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Authors: + * Salavat Gilyazov + * Based on e2k lms simulator implementation. + */ + +#include +#include +#include +#include +#include + +#include "ioepic.h" +#include "irq.h" +#include "spmc.h" + +#define mod_64(x, y) ((x) % (y)) + +#undef DEBUG_TIMER_MODE +#undef DebugTM +#define DEBUG_TIMER_MODE 0 /* system timer debugging */ +#define DebugTM(fmt, args...) \ +({ \ + if (DEBUG_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_VERBOSE_TIMER_MODE +#undef DebugVTM +#define DEBUG_VERBOSE_TIMER_MODE 0 /* system timer verbode */ + /* debugging */ +#define DebugVTM(fmt, args...) \ +({ \ + if (DEBUG_VERBOSE_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SCI_MODE +#undef DebugSCI +#define DEBUG_SCI_MODE 0 /* SPMC IRQs debugging */ +#define DebugSCI(fmt, args...) \ +({ \ + if (DEBUG_SCI_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_IRQ_MODE +#undef DebugIRQ +#define DEBUG_IRQ_MODE 0 /* IRQs debugging */ +#define DebugIRQ(fmt, args...) \ +({ \ + if (DEBUG_IRQ_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_HR_TIMER_MODE +#undef DebugHRTM +#define DEBUG_HR_TIMER_MODE 0 /* high resolution timer debugging */ +#define DebugHRTM(fmt, args...) \ +({ \ + if (DEBUG_HR_TIMER_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SPMC_STATUS_MODE +#undef DebugSTS +#define DEBUG_SPMC_STATUS_MODE 0 /* SPMC status updates debugging */ +#define DebugSTS(fmt, args...) \ +({ \ + if (DEBUG_SPMC_STATUS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef VERBOSE_DEBUG_SPMC_REGS_MODE +#undef DebugREGS +#define VERBOSE_DEBUG_SPMC_REGS_MODE 0 /* SPMC registers verbode */ + /* debugging */ +#if VERBOSE_DEBUG_SPMC_REGS_MODE +#define spmc_reg_debug(fmt, arg...) pr_err("%s() : " fmt, __func__, ##arg) +#else +#define spmc_reg_debug(fmt, arg...) +#endif /* VERBOSE_DEBUG_SPMC_REGS_MODE */ + +/* SPMC register read/write debug: 0 - OFF, 1 - ON */ +#undef DEBUG_SPMC_REGS_MODE +#define DEBUG_SPMC_REGS_MODE 0 + +#define HRTIMER_EXPIRES_PERCENT 90 /* percents */ +/* If hrtimer expires on HRTIMER_EXPIRES_PERCENTs it does not reactivate */ +#define HRTIMER_EXPIRES_APPROX(time) \ + (((time) / 100) * HRTIMER_EXPIRES_PERCENT) + +static inline struct kvm_spmc *to_spmc(struct kvm_io_device *dev) +{ + return container_of(dev, struct kvm_spmc, dev); +} + +static inline struct kvm_spmc *timer_to_spmc(struct kvm_timer *timer) +{ + return container_of(timer, struct kvm_spmc, sci_timer); +} + +static inline u64 cycles_to_count(struct kvm_spmc *spmc, u64 cycles) +{ + return mul_u64_u32_div(cycles, spmc->frequency, spmc->ticks_per_sec); +} + +static inline u64 count_to_cycles(struct kvm_spmc *spmc, u64 counter) +{ + return mul_u64_u32_div(counter, spmc->ticks_per_sec, spmc->frequency); +} + +#if DEBUG_SPMC_REGS_MODE +static inline void dump_spmc_pm_timer(u32 reg) +{ + spmc_pm_tmr_t timer; + + timer.reg = reg; + pr_cont("PM Timer: counter 0x%08x", timer.counter); +} +static inline void dump_spmc_pm1_status(u32 reg) +{ + spmc_pm1_sts_t status; + + status.reg = reg; + pr_cont("PM1 Status: 0x%08x : ON: %s %s %s %s %s %s " + "State: AC_power %d Bat_low %d", + reg, + (status.tmr_sts) ? "Timer" : "", + (status.ac_power_sts) ? "AC_power" : "", + (status.batlow_sts) ? "Bat_low" : "", + (status.atn_sts) ? "Atn_suspend" : "", + (status.pwrbtn_sts) ? "Power_batton" : "", + (status.wak_sts) ? "Wake_event" : "", + status.ac_power_state, + status.batlow_state); +} +static inline void dump_spmc_pm1_enable(u32 reg) +{ + spmc_pm1_en_t enable; + + enable.reg = reg; + pr_cont("PM1 Enable: 0x%08x : ON: %s %s %s %s " + "Time: %d bits", + reg, + (enable.tmr_en) ? "Timer" : "", + (enable.ac_pwr_en) ? "AC_power" : "", + (enable.batlow_en) ? "Bat_low" : "", + (enable.pwrbtn_en) ? "Power_batton" : "", + (enable.tmr_32) ? 32 : 24); +} +static inline void dump_spmc_pm1_control(u32 reg) +{ + spmc_pm1_cnt_t control; + + control.reg = reg; + pr_cont("PM1 Control: 0x%08x : ACPI: %s Sleep: %s", + reg, + (control.sci_en) ? "enable" : "disable", + (control.slp_en) ? "enable" : "disable"); + if (control.slp_en) { + pr_cont(" type: S%d", control.slp_typx); + } +} +static inline void dump_spmc_atnsus_counter(u32 reg) +{ + spmc_atnsus_cnt_t suspend; + + suspend.reg = reg; + pr_cont("Attention Suspend: counter: 0x%08x", suspend.counter); +} +static inline void dump_spmc_pu_rst_counter(u32 reg) +{ + spmc_pu_rst_cnt_t reset; + + reset.reg = reg; + pr_cont("Power Up Reset: counter: 0x%08x", reset.counter); +} +static inline void dump_spmc_register(int reg_off, u32 value, const char *title) +{ + pr_cont("%s", title); + + switch (reg_off) { + case SPMC_PM_TMR_OFF: + dump_spmc_pm_timer(value); + break; + case SPMC_PM1_STS_OFF: + dump_spmc_pm1_status(value); + break; + case SPMC_PM1_EN_OFF: + dump_spmc_pm1_enable(value); + break; + case SPMC_PM1_CNT_OFF: + dump_spmc_pm1_control(value); + break; + case SPMC_ATNSUS_CNT_OFF: + dump_spmc_atnsus_counter(value); + break; + case SPMC_PURST_CNT_OFF: + dump_spmc_pu_rst_counter(value); + break; + default: + pr_cont("Invalid SPMC register offset 0x%x", reg_off); + break; + } + pr_info("\n"); +} +#else /* DEBUG_SPMC_REGS_MODE == 0 */ +static inline void dump_spmc_pm_timer(u32 reg) +{ +} +static inline void dump_spmc_pm1_status(u32 reg) +{ +} +static inline void dump_spmc_pm1_enable(u32 reg) +{ +} +static inline void dump_spmc_pm1_control(u32 reg) +{ +} +static inline void dump_spmc_atnsus_counter(u32 reg) +{ +} +static inline void dump_spmc_pu_rst_counter(u32 reg) +{ +} +static inline void dump_spmc_register(int reg_off, u32 value, const char *title) +{ +} +#endif /* DEBUG_SPMC_REGS_MODE */ + +static u64 kvm_get_up_to_date_sci_timer(struct kvm_vcpu *vcpu, + struct kvm_spmc *spmc) +{ + struct kvm_timer *timer = &spmc->sci_timer; + u64 running_time; + s64 running_cycles; + s64 running_ns, host_ns; + s64 cycles, host_cycles; + ktime_t now; + u64 now_ns; + u64 counter, host_counter; + u32 limit, start_count, new_count; + unsigned long flags; + + DebugSCI("started: g_mode #%d timer period 0x%llx\n", + spmc->g_state, timer->period); + + if (spmc->g_state != SPMC_G0_STATE) { + /* timer is not incremented, so return current value */ + return spmc->regs.pm_timer.counter; + } + + ASSERT(timer != NULL); + + if (vcpu == NULL) { + /* call from hrtimer handler, it need use last VCPU */ + vcpu = timer->vcpu; + } + + raw_spin_lock_irqsave(&timer->lock, flags); + + if (unlikely(timer->period == 0)) { + raw_spin_unlock_irqrestore(&timer->lock, flags); + return spmc->regs.pm_timer.counter; + } + start_count = timer->start_count; + running_time = + (vcpu != NULL) ? kvm_do_get_guest_vcpu_running_time(vcpu) : 0; + cycles = get_cycles(); + now = timer->timer.base->get_time(); + now_ns = ktime_to_ns(now); + DebugSCI("%s : running cycles at start 0x%llx, now 0x%llx, " + "current cycles 0x%llx, start counter 0x%x period ns 0x%llx\n", + timer->name, timer->running_time, running_time, + cycles, start_count, timer->period); + DebugSCI("%s : host start time at nsec 0x%llx, now 0x%llx\n", + timer->name, timer->host_start_ns, now_ns); + + running_cycles = running_time - timer->running_time; + if (running_cycles < 0) { + /* probably it starts on or migrates to other VCPU/CPU */ + running_cycles = 0; + } + running_ns = cycles_2nsec(running_cycles); + host_ns = now_ns - timer->host_start_ns; + if (host_ns < 0) { + /* probably it starts on or migrates to other CPU */ + host_ns = 0; + } + host_cycles = nsecs_2cycles(host_ns); + DebugSCI("%s : current running cycles 0x%llx ns 0x%llx\n", + timer->name, running_cycles, running_ns); + DebugSCI("%s : host running cycles 0x%llx ns 0x%llx\n", + timer->name, host_cycles, host_ns); + + limit = kvm_get_sci_timer_limit(spmc); + + counter = cycles_to_count(spmc, running_cycles) + start_count; + host_counter = cycles_to_count(spmc, host_cycles) + start_count; + new_count = host_counter & kvm_get_sci_timer_max_mask(spmc); + + /* update timer counter value */ + if (timer->type == kvm_sci_timer_type) { + spmc->regs.pm_timer.counter = new_count; + } else { + pr_err("%s(): %d is unsupported or invalid timer type\n", + __func__, timer->type); + } + timer->start_count = new_count; + timer->host_start_ns = now_ns; + timer->running_time = running_time; + timer->vcpu = vcpu; + + raw_spin_unlock_irqrestore(&timer->lock, flags); + + DebugSCI("%s : guest running cycles 0x%llx " + "counter 0x%llx : %lld%%\n", + timer->name, running_cycles, counter, + (counter * 100) / host_counter); + DebugSCI("%s : host running cycles 0x%llx counter 0x%llx\n", + timer->name, host_cycles, host_counter); + DebugSCI("%s : host counter 0x%llx limit 0x%x : new counter 0x%x\n", + timer->name, host_counter, limit, new_count); + + return host_counter; +} + +static inline bool spmc_in_range(struct kvm_spmc *spmc, gpa_t addr) +{ + return addr >= spmc->base_address + SPMC_REGS_CFG_OFFSET && + addr < spmc->base_address + SPMC_REGS_CFG_OFFSET + + SPMC_REGS_CFG_LENGTH; +} +static inline u32 spmc_get_reg(struct kvm_spmc *spmc, int reg_off) +{ + int reg_no = reg_off - SPMC_REGS_CFG_OFFSET; + + ASSERT(reg_no >= 0 && reg_no < SPMC_REGS_CFG_LENGTH); + + spmc_reg_debug("%02x : %08x from %px\n", + reg_off, *((u32 *) ((void *)(&spmc->regs) + reg_no)), + ((u32 *) ((void *)(&spmc->regs) + reg_no))); + return *((u32 *) ((void *)(&spmc->regs) + reg_no)); +} + +static inline void spmc_set_reg(struct kvm_spmc *spmc, int reg_off, u32 val) +{ + int reg_no = reg_off - SPMC_REGS_CFG_OFFSET; + + ASSERT(reg_no >= 0 && reg_no < SPMC_REGS_CFG_LENGTH); + + *((u32 *) ((void *)(&spmc->regs) + reg_no)) = val; + spmc_reg_debug("%02x : %08x to %px\n", + reg_off, *((u32 *) ((void *)(&spmc->regs) + reg_no)), + ((u32 *) ((void *)(&spmc->regs) + reg_no))); +} + +static inline bool get_sci_timer_status(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_status.tmr_sts; +} + +static inline void set_sci_timer_status(struct kvm_spmc *spmc) +{ + spmc->regs.pm1_status.tmr_sts = 1; + DebugSTS("sci timer status, pm1_status : 0x%08x\n", + spmc->regs.pm1_status.reg); +} + +static inline bool get_ac_power_status(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_status.ac_power_sts; +} + +static inline void set_ac_power_status(struct kvm_spmc *spmc) +{ + spmc->regs.pm1_status.ac_power_sts = 1; + DebugSTS("ac power status, pm1_status : 0x%08x\n", + spmc->regs.pm1_status.reg); +} + +static inline bool get_batton_low_status(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_status.batlow_sts; +} + +static inline void set_batton_low_status(struct kvm_spmc *spmc) +{ + spmc->regs.pm1_status.batlow_sts = 1; + DebugSTS("batton low status, pm1_status : 0x%08x\n", + spmc->regs.pm1_status.reg); +} + +static inline bool get_power_batton_status(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_status.pwrbtn_sts; +} + +static inline void set_power_batton_status(struct kvm_spmc *spmc) +{ + spmc->regs.pm1_status.pwrbtn_sts = 1; + DebugSTS("power batton status, pm1_status : 0x%08x\n", + spmc->regs.pm1_status.reg); +} + +static inline bool get_wake_up_event_status(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_status.wak_sts; +} + +static inline void set_wake_up_event_status(struct kvm_spmc *spmc) +{ + spmc->regs.pm1_status.wak_sts = 1; + DebugSTS("wake up event status, pm1_status : 0x%08x\n", + spmc->regs.pm1_status.reg); +} + +static inline void set_sleep_state_enable(struct kvm_spmc *spmc) +{ + spmc->regs.pm1_control.slp_en = 1; +} + +static inline void reset_sleep_state_enable(struct kvm_spmc *spmc) +{ + spmc->regs.pm1_control.slp_en = 0; +} + +static void generate_interrupt(struct kvm *kvm, spmc_irq_map_t irq_id, + bool active) +{ + DebugIRQ("IRQ #%d level is %d\n", irq_id, active); + kvm_set_irq(kvm, irq_id, irq_id, active, false); +} + +static bool spmc_calculate_sci(struct kvm_spmc *spmc) +{ + return kvm_spmc_acpi_enable(spmc) && + (kvm_sci_timer_enable(spmc) && + get_sci_timer_status(spmc)) || + (kvm_spmc_ac_power_enable(spmc) && + get_ac_power_status(spmc)) || + (kvm_spmc_batton_low_enable(spmc) && + get_batton_low_status(spmc)) || + ((spmc->g_state == SPMC_G0_STATE) && + kvm_spmc_power_batton_enable(spmc) && + get_power_batton_status(spmc)); +} + +static void spmc_check_sci(struct kvm_spmc *spmc) +{ + bool new_sci = spmc_calculate_sci(spmc); + + if (new_sci != spmc->sci_state) { + generate_interrupt(spmc->kvm, spmc->sci_timer_irq_id, new_sci); + spmc->sci_state = new_sci; + } +} + +static void update_sleep_state(struct kvm_spmc *spmc) +{ + if (kvm_spmc_sleep_state_enable(spmc)) { + switch (kvm_spmc_sleep_state(spmc)) { + case SPMC_S3_SLEEP_STATE: + case SPMC_S4_SLEEP_STATE: + case SPMC_S5_SLEEP_STATE: + spmc->g_state = SPMC_G1_STATE; + spmc->s_state = kvm_spmc_sleep_state(spmc); + + pr_err("%s(): sleep state %d support is not yet " + "supported\n", + __func__, spmc->s_state); + + break; + default: + spmc->g_state = SPMC_G1_STATE; + spmc->s_state = SPMC_S0_SLEEP_STATE; + break; + } + reset_sleep_state_enable(spmc); + } +} + +static u32 update_sci_timer_value(struct kvm_vcpu *vcpu, struct kvm_spmc *spmc) +{ + kvm_get_up_to_date_sci_timer(vcpu, spmc); + return spmc->regs.pm_timer.reg; +} + +static void start_sci_timer(struct kvm_vcpu *vcpu, struct kvm_spmc *spmc, + u32 start_count, u64 cycles_period) +{ + struct kvm_timer *sci_timer = &spmc->sci_timer; + ktime_t now; + u64 ns_period; + s64 offset, ns_expired; + + ns_period = cycles_2nsec(cycles_period); + if (ns_period == 0) { + sci_timer->period = 0; + return; + } + /* + * Do not allow the guest to program periodic timers with small + * interval, since the hrtimers are not throttled by the host + * scheduler. + */ + if (ns_period < NSEC_PER_MSEC / 2) { + ns_period = NSEC_PER_MSEC / 2; + } + + + ASSERT(!hrtimer_active(&sci_timer->timer)); + + sci_timer->vcpu = vcpu; + sci_timer->start_count = start_count; + sci_timer->period = ns_period; + now = sci_timer->timer.base->get_time(); + sci_timer->host_start_ns = ktime_to_ns(now); + sci_timer->running_time = + (vcpu) ? kvm_get_guest_vcpu_running_time(vcpu) : 0; + ns_expired = ns_period; + if (start_count > 0) { + /* counter statrs from current freezed value */ + offset = count_to_cycles(spmc, start_count); + ns_expired -= cycles_2nsec(offset % cycles_period); + ASSERT(ns_expired >= 0); + } + hrtimer_start(&sci_timer->timer, + ktime_add_ns(now, ns_expired), + HRTIMER_MODE_ABS); + DebugTM("%s started hrtimer at host ns 0x%llx start count 0x%x, " + "period 0x%llx\n", + sci_timer->name, sci_timer->host_start_ns, + start_count, ns_period); + DebugTM("%s running time cycles 0x%llx\n", + sci_timer->name, sci_timer->running_time); + + DebugTM("%s freq is %d Hz, now 0x%llx, timer period cycles 0x%llx, " + "nsec %lld, expire @ 0x%llx\n", + sci_timer->name, spmc->frequency, ktime_to_ns(now), + cycles_period, sci_timer->period, + hrtimer_get_expires_ns(&sci_timer->timer)); +} + +static void restart_sci_timer(struct kvm_vcpu *vcpu, struct kvm_spmc *spmc) +{ + u32 start, limit; + u64 increments, cycles_increments; + + hrtimer_cancel(&spmc->sci_timer.timer); + kthread_flush_work(&spmc->sci_timer.expired); + DebugTM("PM timer counter hrtimer canceled at now 0x%llx\n", + ktime_to_ns(ktime_get())); + if (spmc->g_state != SPMC_G0_STATE) { + /* timer is not active and freez at current state */ + return; + } + start = spmc->regs.pm_timer.counter; /* current start value */ + limit = kvm_get_sci_timer_limit(spmc); + + increments = limit - 0 /* counter start value */; + cycles_increments = count_to_cycles(spmc, increments); + DebugTM("PM timer counter from 0x%x to limit 0x%x, increments: 0x%llx " + "cycles 0x%llx\n", + start, limit, increments, cycles_increments); + start_sci_timer(vcpu, spmc, start, cycles_increments); +} + +static int spmc_conf_io_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, void *data) +{ + struct kvm_spmc *spmc = to_spmc(this); + unsigned int offset = address - spmc->base_address; + u32 result, mask, reg; + const char *reg_name = "???"; + + spmc_reg_debug("address 0x%llx, offset %02x, len %d to %px\n", + address, offset, len, data); + + if (!spmc_in_range(spmc, address)) + return -EOPNOTSUPP; + + if (len == 1) { + mask = 0x000000ffUL; + } else if (len == 2) { + mask = 0x0000ffffUL; + } else if (len == 4) { + mask = 0xffffffffUL; + } else { + ASSERT(len == 4 || len == 2 || len == 1); + } + + mutex_lock(&spmc->lock); + switch (offset) { + case SPMC_PM_TMR_OFF: + reg = update_sci_timer_value(vcpu, spmc); + result = reg & mask; + reg_name = "PM Timer"; + break; + case SPMC_PM1_STS_OFF: + reg = spmc->regs.pm1_status.reg; + result = reg & mask; + reg_name = "PM1 Status"; + break; + case SPMC_PM1_EN_OFF: { + spmc_pm1_en_t pm_enable; + + pm_enable.reg = 0; + pm_enable.tmr_en = spmc->regs.pm1_enable.tmr_en; + pm_enable.tmr_32 = spmc->regs.pm1_enable.tmr_32; + pm_enable.ac_pwr_en = spmc->regs.pm1_enable.ac_pwr_en; + pm_enable.batlow_en = spmc->regs.pm1_enable.batlow_en; + pm_enable.pwrbtn_en = spmc->regs.pm1_enable.pwrbtn_en; + reg = pm_enable.reg; + result = reg & mask; + reg_name = "PM1 Enable"; + break; + } + case SPMC_PM1_CNT_OFF: { + spmc_pm1_cnt_t pm_control; + + pm_control.reg = 0; + pm_control.sci_en = spmc->regs.pm1_control.sci_en; + pm_control.slp_typx = spmc->regs.pm1_control.slp_typx; + reg = pm_control.reg; + result = reg & mask; + reg_name = "PM1 Control"; + break; + } + case SPMC_ATNSUS_CNT_OFF: + reg = spmc->regs.atnsus_counter.counter; + result = reg & mask; + reg_name = "ATteNtion Suspend counter"; + break; + case SPMC_PURST_CNT_OFF: + reg = spmc->regs.pu_rst_counter.counter; + result = reg & mask; + reg_name = "Power Up Reset counter"; + break; + default: + reg = 0xffffffff; + result = reg & mask; + pr_err("%s() : invalid SPMC register offset 0x%x\n", + __func__, offset); + break; + } + mutex_unlock(&spmc->lock); + + *(u32 *)data = result; + + spmc_reg_debug("%s data 0x%08x\n", reg_name, *(u32 *)data); + + dump_spmc_register(offset, reg, "get SPMC register: "); + + return 0; +} + +static int spmc_conf_io_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, + gpa_t address, int len, const void *data) +{ + struct kvm_spmc *spmc = to_spmc(this); + unsigned int offset = address - spmc->base_address; + u32 val, mask, reg; + bool acpi_was_enable; + const char *reg_name = "???"; + + spmc_reg_debug("address 0x%llx, offset %02x, len %d 0x%08x from %px\n", + address, offset, len, *(u32 *)data, data); + + if (!spmc_in_range(spmc, address)) + return -EOPNOTSUPP; + + val = *(u32 *)data; + reg = 0xffffffff; + if (len == 1) { + val &= 0xff; + mask = 0xffffff00UL; + } else if (len == 2) { + val &= 0xffff; + mask = 0xffff0000UL; + } else if (len == 4) { + mask = 0xffffffffUL; + } else { + ASSERT(len == 4 || len == 2 || len == 1); + } + + mutex_lock(&spmc->lock); + acpi_was_enable = kvm_spmc_acpi_enable(spmc); + if (likely(acpi_was_enable)) { + switch (offset) { + case SPMC_PM_TMR_OFF: + spmc->regs.pm_timer.counter &= mask; + spmc->regs.pm_timer.counter |= val; + reg = spmc->regs.pm_timer.reg; + reg_name = "PM Timer"; + break; + case SPMC_PM1_STS_OFF: { + spmc_pm1_sts_t pm_status; + + pm_status.reg = val; + if (pm_status.tmr_sts) { + /* clear timer status bit */ + spmc->regs.pm1_status.tmr_sts = 0; + } + if (pm_status.ac_power_sts) { + /* clear ac power status bit */ + spmc->regs.pm1_status.ac_power_sts = 0; + } + if (pm_status.batlow_sts) { + /* clear batton low status bit */ + spmc->regs.pm1_status.batlow_sts = 0; + } + if (pm_status.pwrbtn_sts) { + /* clear power batton status bit */ + spmc->regs.pm1_status.pwrbtn_sts = 0; + } + if (pm_status.wak_sts) { + /* clear wake up event status bit */ + spmc->regs.pm1_status.wak_sts = 0; + } + reg = spmc->regs.pm1_status.reg; + reg_name = "PM1 Status"; + break; + } + case SPMC_PM1_EN_OFF: { + spmc_pm1_en_t pm_enable; + bool was_tmr32 = kvm_sci_timer_32(spmc); + + pm_enable.reg = val; + spmc->regs.pm1_enable.tmr_en = pm_enable.tmr_en; + spmc->regs.pm1_enable.tmr_32 = pm_enable.tmr_32; + spmc->regs.pm1_enable.ac_pwr_en = pm_enable.ac_pwr_en; + spmc->regs.pm1_enable.batlow_en = pm_enable.batlow_en; + spmc->regs.pm1_enable.pwrbtn_en = pm_enable.pwrbtn_en; + reg = spmc->regs.pm1_enable.reg; + reg_name = "PM1 Enable"; + if (was_tmr32 != kvm_sci_timer_32(spmc)) { + restart_sci_timer(vcpu, spmc); + } + + break; + } + case SPMC_PM1_CNT_OFF: { + spmc_pm1_cnt_t pm_control; + + pm_control.reg = val; + spmc->regs.pm1_control.sci_en = pm_control.sci_en; + spmc->regs.pm1_control.slp_typx = pm_control.slp_typx; + spmc->regs.pm1_control.slp_en = pm_control.slp_en; + reg = spmc->regs.pm1_control.reg; + reg_name = "PM1 Control"; + update_sleep_state(spmc); + break; + } + case SPMC_ATNSUS_CNT_OFF: + if (spmc->g_state == SPMC_G0_STATE) { + spmc->regs.atnsus_counter.counter &= mask; + spmc->regs.atnsus_counter.counter |= val; + } + reg = spmc->regs.atnsus_counter.reg; + reg_name = "ATteNtion Suspend counter"; + break; + case SPMC_PURST_CNT_OFF: + if (spmc->g_state == SPMC_G0_STATE) { + if (val < SPMC_PU_RST_CNT_MIN) { + val = SPMC_PU_RST_CNT_MIN; + } + spmc->regs.pu_rst_counter.counter &= mask; + spmc->regs.pu_rst_counter.counter |= val; + } + reg = spmc->regs.pu_rst_counter.reg; + reg_name = "Power Up Reset counter"; + break; + default: + pr_err("%s() : invalid SPMC register offset 0x%x\n", + __func__, offset); + break; + } + spmc_check_sci(spmc); + } else { + /* SPMC/ACPI disabled and can only enable the mode */ + switch (offset) { + case SPMC_PM1_CNT_OFF: { + spmc_pm1_cnt_t pm_control; + + pm_control.reg = val; + spmc->regs.pm1_control.sci_en = pm_control.sci_en; + kvm_get_up_to_date_sci_timer(vcpu, spmc); + reg = spmc->regs.pm1_control.reg; + reg_name = "PM1 Control"; + break; + } + case SPMC_PM_TMR_OFF: + case SPMC_PM1_STS_OFF: + case SPMC_PM1_EN_OFF: + /* cannot be updated */ + pr_warning("%s(): SPMC/ACPI disabled, so writing to " + "SPMC register 0x%x is ignored\n", + __func__, offset); + break; + case SPMC_ATNSUS_CNT_OFF: + spmc->regs.atnsus_counter.counter &= mask; + spmc->regs.atnsus_counter.counter |= val; + reg = spmc->regs.atnsus_counter.reg; + reg_name = "ATteNtion Suspend counter"; + break; + case SPMC_PURST_CNT_OFF: + if (val < SPMC_PU_RST_CNT_MIN) { + val = SPMC_PU_RST_CNT_MIN; + } + spmc->regs.pu_rst_counter.counter &= mask; + spmc->regs.pu_rst_counter.counter |= val; + reg = spmc->regs.pu_rst_counter.reg; + reg_name = "Power Up Reset counter"; + break; + default: + pr_err("%s() : invalid SPMC register offset 0x%x\n", + __func__, offset); + break; + } + } + mutex_unlock(&spmc->lock); + + spmc_reg_debug("%s data 0x%08x\n", reg_name, reg); + + dump_spmc_register(offset, reg, "set SPMC register: "); + + return 0; +} + +static void spmc_sci_timer_do_work(struct kthread_work *work) +{ + struct kvm_timer *timer = container_of(work, struct kvm_timer, expired); + struct kvm *kvm = timer->kvm; + struct kvm_spmc *spmc = timer_to_spmc(timer); + + if (timer->work == kvm_set_irq_timer_work) { + ASSERT(spmc->sci_state == false); + generate_interrupt(kvm, spmc->sci_timer_irq_id, true); + spmc->sci_state = true; + } else if (timer->work == kvm_reset_irq_timer_work) { + ASSERT(spmc->sci_state == true); + generate_interrupt(kvm, spmc->sci_timer_irq_id, false); + spmc->sci_state = false; + } else { + pr_err("%s(): %d is unknown or unsupported timer " + "expires work\n", + __func__, timer->work); + } +} + +static void do_sci_timer(struct kvm_vcpu *vcpu, void *data) +{ + struct kvm_spmc *spmc = data; + struct kvm_timer *sci_timer = &spmc->sci_timer; + u64 counter; + u32 limit, period_start; + + counter = kvm_get_up_to_date_sci_timer(vcpu, spmc); + limit = kvm_get_sci_timer_limit(spmc); + period_start = sci_timer->period_start; + + if ((counter - period_start) > limit + limit / 4) { + DebugHRTM("timer counter 0x%llx, period start at 0x%x " + "exceeded limit 0x%x on +0x%llx)", + counter, period_start, limit, + (counter - period_start) - limit); + } else if ((counter - period_start) < (limit - limit / 8)) { + DebugHRTM("timer counter 0x%llx, period start at 0x%x " + "did not reach limit 0x%x on -0x%llx", + counter, period_start, limit, + limit - (counter - period_start)); + } + sci_timer->period_start = counter & kvm_get_sci_timer_max_mask(spmc); + + /* set SCI timer status bit */ + set_sci_timer_status(spmc); + + if (kvm_sci_timer_enable(spmc)) { + /* it need generate SCI interrupt */ + sci_timer->work = kvm_set_irq_timer_work; + kthread_queue_work(sci_timer->worker, &sci_timer->expired); + } +} + +static enum hrtimer_restart sci_timer_fn(struct kvm_spmc *spmc, + struct kvm_timer *ktimer) +{ + struct kvm_vcpu *vcpu; + s64 period = ktimer->period; + + vcpu = ktimer->vcpu; + + if (vcpu != NULL) { + DebugVTM("%s started on VCPU #%d\n", + ktimer->name, vcpu->vcpu_id); + } else { + DebugVTM("%s started on background stack\n", ktimer->name); + } + + ktimer->t_ops->timer_fn(vcpu, spmc); + + if (ktimer->t_ops->is_periodic(ktimer)) { + hrtimer_add_expires_ns(&ktimer->timer, period); + DebugVTM("%s periodic timer restarted " + "at host ns 0x%llx expires at 0x%llx\n", + ktimer->name, ktimer->host_start_ns, + hrtimer_get_expires_ns(&ktimer->timer)); + return HRTIMER_RESTART; + } + DebugVTM("%s handled\n", ktimer->name); + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart spmc_sci_timer_fn(struct hrtimer *data) +{ + struct kvm_spmc *spmc; + struct kvm_timer *sci_timer; + + sci_timer = container_of(data, struct kvm_timer, timer); + spmc = timer_to_spmc(sci_timer); + return sci_timer_fn(spmc, sci_timer); +} + +static bool sci_is_periodic(struct kvm_timer *ktimer) +{ + return true; /* SCI timer are periodic */ +} + +static void kvm_spmc_reset(struct kvm_spmc *spmc) +{ + + /* Stop the timer in case it's a reset to an active state */ + hrtimer_cancel(&spmc->sci_timer.timer); + kthread_flush_work(&spmc->sci_timer.expired); + + spmc->base_address = 0; + spmc->sci_timer_irq_id = SPMC_SCI_IRQ_ID; + + /* registers state on reset */ + spmc_set_reg(spmc, SPMC_PM_TMR_OFF, 0); + spmc_set_reg(spmc, SPMC_PM1_STS_OFF, 0); + spmc_set_reg(spmc, SPMC_PM1_EN_OFF, 0); + spmc_set_reg(spmc, SPMC_PM1_CNT_OFF, 0); + spmc_set_reg(spmc, SPMC_ATNSUS_CNT_OFF, 0x00370000); + spmc_set_reg(spmc, SPMC_PURST_CNT_OFF, 0x00370000); + + spmc->sci_state == false; + spmc->s_state = SPMC_S0_SLEEP_STATE; + spmc->g_state = SPMC_G0_STATE; + + restart_sci_timer(NULL, spmc); +} + +static const struct kvm_io_device_ops spmc_conf_io_ops = { + .read = spmc_conf_io_read, + .write = spmc_conf_io_write, +}; + +static const struct kvm_timer_ops spmc_sci_timer_ops = { + .is_periodic = sci_is_periodic, + .timer_fn = do_sci_timer, +}; + +struct kvm_spmc *kvm_create_spmc(struct kvm *kvm, int node_id, + u32 ticks_per_sec, /* CPU frequency at herz */ + u32 spmc_timer_freq) /* PM timer frequency at herz */ +{ + struct kvm_spmc *spmc; + pid_t pid_nr; + + ASSERT(kvm_get_spmc(kvm, node_id) == NULL); + + spmc = kzalloc(sizeof(struct kvm_spmc), GFP_KERNEL); + if (!spmc) + return NULL; + + mutex_init(&spmc->lock); + + spmc->kvm = kvm; + spmc->ticks_per_sec = ticks_per_sec; + spmc->frequency = spmc_timer_freq; + + pid_nr = task_pid_nr(current); + + hrtimer_init(&spmc->sci_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + spmc->sci_timer.timer.function = spmc_sci_timer_fn; + spmc->sci_timer.name = "pm timer"; + spmc->sci_timer.type = kvm_sci_timer_type; + spmc->sci_timer.t_ops = &spmc_sci_timer_ops; + raw_spin_lock_init(&spmc->sci_timer.lock); + spmc->sci_timer.worker = kthread_create_worker(0, "kvm-pmtmr/%d/%d", + pid_nr, node_id); + if (IS_ERR(spmc->sci_timer.worker)) + goto fail_sci_timer; + kthread_init_work(&spmc->sci_timer.expired, spmc_sci_timer_do_work); + spmc->sci_timer.kvm = kvm; + + kvm_spmc_reset(spmc); + + kvm_set_spmc(kvm, node_id, spmc); + + return spmc; + +fail_sci_timer: + kfree(spmc); + return NULL; +} + +int kvm_spmc_set_base(struct kvm *kvm, int node_id, unsigned long conf_base) +{ + struct kvm_spmc *spmc = kvm_get_spmc(kvm, node_id); + int ret; + + if (spmc == NULL) { + kvm_create_spmc(kvm, node_id, + ((cpu_freq_hz + (USEC_PER_SEC - 1)) / USEC_PER_SEC) * + USEC_PER_SEC, + EIOH_SPMC_PM_TIMER_FREQ /* only SPMC of EIOHub */ + /* is now supported */); + spmc = kvm_get_spmc(kvm, node_id); + if (spmc == NULL) { + pr_err("%s(): SPMC node #%d could not be created, " + "ignore setup\n", + __func__, node_id); + return -ENODEV; + } + } + if (spmc->base_address == conf_base) { + pr_info("%s(): SPMC node #%d base 0x%lx is the same, " + "so ignore update\n", + __func__, node_id, conf_base); + return 0; + } + + mutex_lock(&kvm->slots_lock); + if (spmc->base_address != 0) { + /* base address was already set, so update */ + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &spmc->dev); + } + spmc->base_address = conf_base; + kvm_iodevice_init(&spmc->dev, &spmc_conf_io_ops); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, + conf_base + SPMC_REGS_CFG_OFFSET, SPMC_REGS_CFG_LENGTH, + &spmc->dev); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kvm_set_spmc(kvm, node_id, NULL); + kfree(spmc); + pr_err("%s(): could not register SPMC node #%d as PIO " + "bus device, error %d\n", + __func__, node_id, ret); + } + + return ret; +} + +void kvm_free_spmc(struct kvm *kvm, int node_id) +{ + struct kvm_spmc *spmc = kvm_get_spmc(kvm, node_id); + + if (spmc) { + if (spmc->base_address != 0) { + mutex_lock(&kvm->slots_lock); + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, + &spmc->dev); + spmc->base_address = 0; + mutex_unlock(&kvm->slots_lock); + } + hrtimer_cancel(&spmc->sci_timer.timer); + kthread_flush_work(&spmc->sci_timer.expired); + kthread_destroy_worker(spmc->sci_timer.worker); + kfree(spmc); + kvm_set_spmc(kvm, node_id, NULL); + } +} +void kvm_free_all_spmc(struct kvm *kvm) +{ + int node_id; + + for (node_id = 0; node_id < KVM_MAX_EIOHUB_NUM; node_id++) { + kvm_free_spmc(kvm, node_id); + } +} diff --git a/arch/e2k/kvm/spmc.h b/arch/e2k/kvm/spmc.h new file mode 100644 index 000000000000..bca6e8eaeb13 --- /dev/null +++ b/arch/e2k/kvm/spmc.h @@ -0,0 +1,150 @@ +#ifndef __KVM_SPMC_H +#define __KVM_SPMC_H + +#include +#include +#include +#include "spmc_regs.h" +#include "kvm_timer.h" + +#define DEBUG_SPMC +#undef ASSERT +#ifdef DEBUG_SPMC +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + pr_emerg("assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else /* ! DEBUG_SPMC */ +#define ASSERT(x) do { } while (0) +#endif /* DEBUG_SPMC */ + +typedef struct kvm_spmc_regs { + spmc_pm_tmr_t pm_timer; /* PM timer counter */ + spmc_pm1_sts_t pm1_status; /* PM status */ + spmc_pm1_en_t pm1_enable; /* PM enables */ + spmc_pm1_cnt_t pm1_control; /* PM control */ + spmc_atnsus_cnt_t atnsus_counter; /* attention suspend counter */ + spmc_pu_rst_cnt_t pu_rst_counter; /* power up reset counter */ +} kvm_spmc_regs_t; + +typedef struct kvm_spmc { + u64 base_address; /* SPMC configuration space base */ + /* address */ + kvm_spmc_regs_t regs; + struct kvm_timer sci_timer; + spmc_sleep_state_t s_state; /* current sleep state */ + spmc_g_state_t g_state; /* current G state */ + bool sci_state; /* SCI interrupt state: active or not */ + int sci_timer_irq_id; + u32 ticks_per_sec; /* cycles (ticks) per 1 sec */ + u32 frequency; /* frequency of PM counter increment at herz */ + /* standard frequency of EIOHub SPMC timer */ + /* is 3,579,545 Herz */ + struct kvm_io_device dev; + struct kvm *kvm; + struct mutex lock; +} kvm_spmc_t; + +static inline bool kvm_spmc_acpi_enable(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_control.sci_en; +} + +static inline bool kvm_spmc_sleep_state_enable(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_control.slp_en; +} + +static inline spmc_sleep_state_t kvm_spmc_sleep_state(struct kvm_spmc *spmc) +{ + return spmc->regs.pm1_control.slp_typx; +} + +static inline bool kvm_sci_timer_enable(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_enable.tmr_en; +} + +static inline bool kvm_spmc_ac_power_enable(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_enable.ac_pwr_en; +} + +static inline bool kvm_spmc_batton_low_enable(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_enable.batlow_en; +} + +static inline bool kvm_spmc_power_batton_enable(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_enable.pwrbtn_en; +} + +static inline bool kvm_sci_timer_32(struct kvm_spmc *spmc) +{ + return !!spmc->regs.pm1_enable.tmr_32; +} + +static inline bool kvm_sci_timer_24(struct kvm_spmc *spmc) +{ + return !kvm_sci_timer_32(spmc); +} + +static inline u32 kvm_get_sci_timer_limit(struct kvm_spmc *spmc) +{ + if (kvm_sci_timer_32(spmc)) { + /* counter is 32 bits */ + return 1UL << 31; + } else { + /* counter is 24 bits */ + return 1UL << 23; + } +} + +static inline u32 kvm_get_sci_timer_limit_mask(struct kvm_spmc *spmc) +{ + if (kvm_sci_timer_32(spmc)) { + /* counter is 32 bits */ + return ~0UL; + } else { + /* counter is 24 bits */ + return (1UL << 24) - 1; + } +} + +static inline u32 kvm_get_sci_timer_max_mask(struct kvm_spmc *spmc) +{ + /* counter is 32 bits */ + return ~0UL; +} + +static inline struct kvm_spmc *kvm_get_spmc(struct kvm *kvm, int node_id) +{ + ASSERT(node_id < KVM_MAX_EIOHUB_NUM); + return kvm->arch.spmc[node_id]; +} + +static inline void kvm_set_spmc(struct kvm *kvm, int node_id, + struct kvm_spmc *spmc) +{ + ASSERT(node_id < KVM_MAX_EIOHUB_NUM); + kvm->arch.spmc[node_id] = spmc; +} + +static inline bool kvm_spmc_in_kernel(struct kvm *kvm, int node_id) +{ + return kvm_get_spmc(kvm, node_id) != NULL; +} +extern int kvm_spmc_set_base(struct kvm *kvm, int node_id, + unsigned long conf_base); + +extern struct kvm_spmc *kvm_create_spmc(struct kvm *kvm, int node_id, + u32 ticks_per_sec, u32 spmc_timer_freq); +extern void kvm_free_spmc(struct kvm *kvm, int node_id); +extern void kvm_free_all_spmc(struct kvm *kvm); + +#endif /* __KVM_SPMC_H */ diff --git a/arch/e2k/kvm/spmc_regs.h b/arch/e2k/kvm/spmc_regs.h new file mode 100644 index 000000000000..42f2e5742d75 --- /dev/null +++ b/arch/e2k/kvm/spmc_regs.h @@ -0,0 +1,117 @@ +#ifndef _KVM_SPMC_REGS_H +#define _KVM_SPMC_REGS_H + +#include + +/* + * System Power Management Controller registers offsets into configuration space + */ +#define SPMC_PM_TMR_OFF 0x40 +#define SPMC_PM1_STS_OFF 0x44 +#define SPMC_PM1_EN_OFF 0x48 +#define SPMC_PM1_CNT_OFF 0x4c +#define SPMC_ATNSUS_CNT_OFF 0x50 +#define SPMC_PURST_CNT_OFF 0x54 + +#define SPMC_REGS_CFG_OFFSET SPMC_PM_TMR_OFF +#define SPMC_REGS_CFG_LENGTH (SPMC_PURST_CNT_OFF + 4 - SPMC_PM_TMR_OFF) + +/* + * System Power Management Controller registers structures + */ + +typedef union spmc_pm_tmr { + u32 reg; + struct { /* as fields */ + u32 counter : 32; /* [31: 0] */ + }; + struct { /* as fields */ + u32 counter_0_22 : 23; /* [22: 0] */ + u32 counter_23 : 1; /* [23] */ + u32 counter_24_31 : 8; /* [31:24] */ + }; + struct { /* as fields */ + u32 counter_0_30 : 31; /* [30: 0] */ + u32 counter_31 : 1; /* [31] */ + }; +} spmc_pm_tmr_t; + +typedef union spmc_pm1_sts { + u32 reg; + struct { /* as fields */ + u32 tmr_sts : 1; /* [ 0] */ + u32 ac_power_state : 1; /* [ 1] */ + u32 ac_power_sts : 1; /* [ 2] */ + u32 batlow_state : 1; /* [ 3] */ + u32 batlow_sts : 1; /* [ 4] */ + u32 atn_sts : 1; /* [ 5] */ + u32 reserved1 : 2; /* [ 7: 6} */ + u32 pwrbtn_sts : 1; /* [ 8] */ + u32 reserved2 : 6; /* [14: 9} */ + u32 wak_sts : 1; /* [15] */ + u32 reserved3 : 16; /* {31:16] */ + }; +} spmc_pm1_sts_t; + +typedef union spmc_pm1_en { + u32 reg; + struct { /* as fields */ + u32 tmr_en : 1; /* [ 0] */ + u32 tmr_32 : 1; /* [ 1] */ + u32 ac_pwr_en : 1; /* [ 2] */ + u32 reserved1 : 1; /* [ 3} */ + u32 batlow_en : 1; /* [ 4] */ + u32 reserved2 : 3; /* [ 7: 5} */ + u32 pwrbtn_en : 1; /* [ 8] */ + u32 reserved3 : 23; /* {31: 9] */ + }; +} spmc_pm1_en_t; + +typedef union spmc_pm1_cnt { + u32 reg; + struct { /* as fields */ + u32 sci_en : 1; /* [ 0] */ + u32 reserved1 : 9; /* [ 9: 1} */ + u32 slp_typx : 3; /* [12:10] */ + u32 slp_en : 1; /* [13] */ + u32 reserved2 : 18; /* {31:14] */ + }; +} spmc_pm1_cnt_t; + +typedef union spmc_atnsus_cnt { /* attention suspend counter */ + u32 reg; + struct { /* as fields */ + u32 counter : 32; /* [31: 0] */ + }; +} spmc_atnsus_cnt_t; + +typedef union spmc_pu_rst_cnt { /* power up reset counter */ + u32 reg; + struct { /* as fields */ + u32 counter : 32; /* [31: 0] */ + }; +} spmc_pu_rst_cnt_t; + +#define SPMC_PU_RST_CNT_MIN 0x00004000UL /* minimal value: ~4.5 msk */ + +typedef enum spmc_sleep_state { + SPMC_S0_SLEEP_STATE = 0, /* G0 */ + SPMC_S3_SLEEP_STATE = 3, /* G1 */ + SPMC_S4_SLEEP_STATE = 4, /* G1 */ + SPMC_S5_SLEEP_STATE = 5, /* G2 */ + /* states 1, 2, 6, 7 - are not supported and == S0 (G0) */ +} spmc_sleep_state_t; + +typedef enum spmc_g_state { + SPMC_G0_STATE = 0, /* G0 */ + SPMC_G1_STATE = 1, /* G1 */ + SPMC_G2_state = 2, /* G2 */ +} spmc_g_state_t; + +typedef enum spmc_irq_map { + SPMC_SCI_IRQ_ID = 1, +} spmc_irq_map_t; + +#define EIOH_SPMC_PM_TIMER_FREQ 3579545 /* Herz */ + +#endif /* _KVM_SPMC_REGS_H */ diff --git a/arch/e2k/kvm/string.h b/arch/e2k/kvm/string.h new file mode 100644 index 000000000000..4c001569bced --- /dev/null +++ b/arch/e2k/kvm/string.h @@ -0,0 +1,153 @@ +#ifndef _KVM_STRING_H_ +#define _KVM_STRING_H_ + +#include +#include +#include +#include + +#include "mmu.h" +#include "gaccess.h" + +/* + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + * light case: all addresses should be from guest kernel address space, + * nothing shadow addresses + */ +static inline long +kvm_fast_guest_tagged_memory_copy(struct kvm_vcpu *vcpu, + void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + ldst_rec_op_t ldst_rec_op; + int ret; + + LD_ST_REC_OPC_reg(ldst_rec_op) = ldrd_opcode; + if (LD_ST_REC_OPC_mas(ldst_rec_op) == MAS_LOAD_PA || + LD_ST_REC_OPC_mas(ldst_rec_op) == MAS_STORE_PA) { + if (!IS_GUEST_PHYS_ADDRESS((e2k_addr_t)src)) { + pr_err("%s(): bad guest phys src %px ldrd 0x%lx" + "phys start 0x%lx end 0x%lx\n", + __func__, src, ldrd_opcode, GUEST_PAGE_OFFSET, + GUEST_PAGE_OFFSET + MAX_PM_SIZE); + ret = -EFAULT; + goto failed; + } + LD_ST_REC_OPC_mas(ldst_rec_op) = MAS_LOAD_OPERATION; + ldrd_opcode = LD_ST_REC_OPC_reg(ldst_rec_op); + } + LD_ST_REC_OPC_reg(ldst_rec_op) = strd_opcode; + if (LD_ST_REC_OPC_mas(ldst_rec_op) == MAS_LOAD_PA || + LD_ST_REC_OPC_mas(ldst_rec_op) == MAS_STORE_PA) { + if (!IS_GUEST_PHYS_ADDRESS((e2k_addr_t)dst)) { + pr_err("%s(): bad guest phys dst %px ldrd 0x%lx " + "phys start 0x%lx end 0x%lx\n", + __func__, dst, strd_opcode, GUEST_PAGE_OFFSET, + GUEST_PAGE_OFFSET + MAX_PM_SIZE); + ret = -EFAULT; + goto failed; + } + LD_ST_REC_OPC_mas(ldst_rec_op) = MAS_STORE_OPERATION; + strd_opcode = LD_ST_REC_OPC_reg(ldst_rec_op); + } + return kvm_vcpu_copy_guest_virt_system(vcpu, dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + +failed: + return ret; +} + +static inline long +kvm_fast_guest_tagged_memory_set(struct kvm_vcpu *vcpu, + void *addr, u64 val, u64 tag, size_t len, u64 strd_opcode) +{ + ldst_rec_op_t ldst_rec_op; + int ret; + + LD_ST_REC_OPC_reg(ldst_rec_op) = strd_opcode; + if (LD_ST_REC_OPC_mas(ldst_rec_op) == MAS_LOAD_PA || + LD_ST_REC_OPC_mas(ldst_rec_op) == MAS_STORE_PA) { + if (!IS_GUEST_PHYS_ADDRESS((e2k_addr_t)addr)) { + ret = -EFAULT; + goto failed; + } + LD_ST_REC_OPC_mas(ldst_rec_op) = MAS_STORE_OPERATION; + strd_opcode = LD_ST_REC_OPC_reg(ldst_rec_op); + } + return kvm_vcpu_set_guest_virt_system(vcpu, addr, val, tag, len, + strd_opcode); + +failed: + return ret; +} + +/* + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + * common case: some addresses can be from host kernel address space, + * but point to guest structures, shadow image ... + */ +static inline long +kvm_fast_tagged_guest_memory_copy(struct kvm_vcpu *vcpu, + void *dst, const void *src, + size_t len, unsigned long strd_opcode, + unsigned long ldrd_opcode, int prefetch) +{ + return kvm_fast_guest_tagged_memory_copy(vcpu, dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline long +kvm_fast_tagged_guest_memory_set(struct kvm_vcpu *vcpu, + void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return kvm_fast_guest_tagged_memory_set(vcpu, addr, val, tag, len, + strd_opcode); +} + +static inline long +kvm_copy_from_to_user_with_tags(struct kvm_vcpu *vcpu, + void __user *dst, void __user *src, size_t len) +{ + unsigned long st_opcode = TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT; + unsigned long ld_opcode = TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT; + + return kvm_vcpu_copy_guest_virt_system(vcpu, dst, src, len, + st_opcode, ld_opcode, 0); +} + +/* + * Extract tags from 32 bytes of data + */ +static inline long +kvm_extract_guest_tags_32(u16 *dst, const void *src) +{ + if (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)src) || + IS_HOST_KERNEL_ADDRESS((e2k_addr_t)dst)) { + return -EINVAL; + } + if (!IS_GUEST_KERNEL_ADDRESS((e2k_addr_t)src) || + !IS_GUEST_KERNEL_ADDRESS((e2k_addr_t)dst)) { + return -EINVAL; + } + if (!access_ok(dst, sizeof(u16))) { + pr_err("%s(): bad dst %px + len 0x%lx addr limit 0x%lx\n", + __func__, dst, sizeof(u16), + current_thread_info()->addr_limit.seg); + return -EFAULT; + } + if (!access_ok(src, 32)) { + pr_err("%s(): bad src %px + len 0x%x addr limit 0x%lx\n", + __func__, src, 32, + current_thread_info()->addr_limit.seg); + return -EFAULT; + } + return native_extract_tags_32(dst, src); +} + +#endif /* _KVM_STRING_H_ */ diff --git a/arch/e2k/kvm/switch.c b/arch/e2k/kvm/switch.c new file mode 100644 index 000000000000..9749e9d26a47 --- /dev/null +++ b/arch/e2k/kvm/switch.c @@ -0,0 +1,18 @@ +#include +#include +#include + +notrace void host_syscall_guest_exit_trap(struct thread_info *ti, + struct pt_regs *regs) +{ + if (likely(!test_ti_status_flag(ti, TS_HOST_AT_VCPU_MODE))) + return; + + /* CUTD register restore is important on host for guest syscall */ + HOST_RESTORE_USER_CUT_REGS(ti, regs, true); + + /* host return to paravirtualized guest (VCPU) mode */ + host_syscall_pv_vcpu_exit_trap(ti, regs); + + host_switch_trap_enable_mask(ti, regs, true); +} diff --git a/arch/e2k/kvm/time.h b/arch/e2k/kvm/time.h new file mode 100644 index 000000000000..c50d952d0dc4 --- /dev/null +++ b/arch/e2k/kvm/time.h @@ -0,0 +1,114 @@ +#ifndef __KVM_E2K_TIME_H +#define __KVM_E2K_TIME_H + +#include +#include +#include +#include + +/* + * VCPU state structure contains time structure of VCPU. + * The structure is common for host and guest and can (and should) + * be accessed by both. + * Guest access do through global pointer which should be load on some global + * register (GUEST_VCPU_STATE_GREG) or on special CPU register GD. + * But GD can be used only if guest kernel run as protected task + */ + +/* + * Basic functions to access to time structure (see asm/kvm/guest.h) on host. + */ +static inline long +kvm_get_guest_time(struct kvm *kvm, + kvm_timespec_t *vcpu_ts, struct timespec *ts) +{ + long secs, nsecs; + + /* read time in consistent state */ + raw_spin_lock(&kvm->arch.time_state_lock); + secs = vcpu_ts->tv_sec; + nsecs = vcpu_ts->tv_nsec; + raw_spin_unlock(&kvm->arch.time_state_lock); + + ts->tv_sec = secs; + ts->tv_nsec = nsecs; + return secs * NSEC_PER_SEC + nsecs; +} + +static inline void +kvm_set_guest_time(struct kvm *kvm, + kvm_timespec_t *kvm_ts, struct timespec *ts) +{ + long secs, nsecs; + + nsecs = ts->tv_nsec; + secs = ts->tv_sec; + + raw_spin_lock(&kvm->arch.time_state_lock); + kvm_ts->tv_nsec = nsecs; + kvm_ts->tv_sec = secs; + raw_spin_unlock(&kvm->arch.time_state_lock); +} + +static inline long +kvm_get_guest_system_time(struct kvm *kvm, struct timespec *ts) +{ + kvm_timespec_t *sys_time; + + sys_time = &(kvm->arch.kmap_host_info->time.sys_time); + return kvm_get_guest_time(kvm, sys_time, ts); +} + +static inline void +kvm_set_guest_system_time(struct kvm *kvm, struct timespec *ts) +{ + kvm_timespec_t *sys_time; + + sys_time = &(kvm->arch.kmap_host_info->time.sys_time); + kvm_set_guest_time(kvm, sys_time, ts); +} + +static inline long +kvm_get_guest_wall_time(struct kvm *kvm, struct timespec *ts) +{ + kvm_timespec_t *wall_time; + + wall_time = &(kvm->arch.kmap_host_info->time.wall_time); + return kvm_get_guest_time(kvm, wall_time, ts); +} + +static inline void +kvm_set_guest_wall_time(struct kvm *kvm, struct timespec *ts) +{ + kvm_timespec_t *wall_time; + + wall_time = &(kvm->arch.kmap_host_info->time.wall_time); + kvm_set_guest_time(kvm, wall_time, ts); +} + +static inline void +kvm_update_guest_wall_time(struct kvm *kvm) +{ + struct timespec ts; + + ts.tv_sec = mach_get_wallclock(); + ts.tv_nsec = 0; + kvm_set_guest_wall_time(kvm, &ts); +} + +static inline void +kvm_update_guest_system_time(struct kvm *kvm) +{ + struct timespec ts; + + ktime_get_ts(&ts); + kvm_set_guest_system_time(kvm, &ts); +} + +static inline void +kvm_update_guest_time(struct kvm *kvm) +{ + kvm_update_guest_wall_time(kvm); + kvm_update_guest_system_time(kvm); +} +#endif /* __KVM_E2K_TIME_H */ diff --git a/arch/e2k/kvm/timer.c b/arch/e2k/kvm/timer.c new file mode 100644 index 000000000000..e4c9d88644b5 --- /dev/null +++ b/arch/e2k/kvm/timer.c @@ -0,0 +1,375 @@ +/*P:800 + * Interrupts (traps) are complicated enough to earn their own file. + * There are three classes of interrupts: + * + * 1) Real hardware interrupts which occur while we're running the Guest, + * 2) Interrupts for virtual devices attached to the Guest, and + * 3) Traps and faults from the Guest. + * + * Real hardware interrupts must be delivered to the Host, not the Guest. + * Virtual interrupts must be delivered to the Guest, but we make them look + * just like real hardware would deliver them. Traps from the Guest can be set + * up to go directly back into the Guest, but sometimes the Host wants to see + * them first, so we also have a way of "reflecting" them into the Guest as if + * they had been delivered to it directly. +:*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pic.h" +#include "irq.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TIME_MODE +#undef DebugKVMT +#define DEBUG_KVM_TIME_MODE 0 /* KVM time/timer debugging */ +#define DebugKVMT(fmt, args...) \ +({ \ + if (DEBUG_KVM_TIME_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_TIMER_MODE +#undef DebugKVMTM +#define DEBUG_KVM_TIMER_MODE 0 /* KVM timer debugging */ +#define DebugKVMTM(fmt, args...) \ +({ \ + if (DEBUG_KVM_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_APIC_TIMER_MODE +#undef DebugKVMAT +#define DEBUG_KVM_APIC_TIMER_MODE 0 /* KVM LAPIC timer debugging */ +#define DebugKVMAT(fmt, args...) \ +({ \ + if (DEBUG_KVM_APIC_TIMER_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define HRTIMER_EXPIRES_PERCENT 90 /* percents */ +/* If hrtimer expires on HRTIMER_EXPIRES_PERCENTs it does not reactivate */ +#define HRTIMER_EXPIRES_APPROX(time) \ + (((time) / 100) * HRTIMER_EXPIRES_PERCENT) + +/*H:200 + * The Guest Timer. + * + * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to + * the next timer interrupt (in nanoseconds). We use the high-resolution timer + * infrastructure to set a callback at that time. + * + * 0 means "turn off the clock". + * FIXME: clock device should be on each VCPU + */ +static ktime_t expires; +void kvm_guest_set_clockevent(struct kvm_vcpu *vcpu, unsigned long delta) +{ + ktime_t real_time; + + DebugKVMTM("started for delta %ld\n", delta); + if (unlikely(delta == 0)) { + /* Clock event device is shutting down. */ + hrtimer_cancel(&vcpu->arch.hrt); + DebugKVM("Clock event device is shutting down\n"); + return; + } + + /* + * We use wallclock time here, so the Guest might not be running for + * all the time between now and the timer interrupt it asked for. This + * is almost always the right thing to do. + */ + real_time = ktime_get(); + if (ktime_to_ns(real_time) - ktime_to_ns(expires) > + (delta + (delta >> 4))) { + DebugKVMTM("current time 0x%llx too bigger of expires time " + "0x%llx jiffies 0x%lx\n", + ktime_to_ns(real_time), ktime_to_ns(expires), + jiffies); + } else { + DebugKVMTM("current time 0x%llx, expires time " + "0x%llx jiffies 0x%lx\n", + ktime_to_ns(real_time), ktime_to_ns(expires), + jiffies); + } + +again: + if (!hrtimer_active(&vcpu->arch.hrt)) { + vcpu->arch.hrt_period = delta; + expires = ktime_add_ns(real_time, delta); + hrtimer_start(&vcpu->arch.hrt, expires, HRTIMER_MODE_ABS); + vcpu->arch.hrt_running_start = + kvm_get_guest_vcpu_running_time(vcpu); + DebugKVMTM("starts hrtimer for expires time 0x%llx " + "current 0x%llx\n", + ktime_to_ns(expires), ktime_to_ns(real_time)); + } else if (hrtimer_callback_running(&vcpu->arch.hrt)) { + BUG_ON(vcpu->arch.hrt_period != 0); + hrtimer_add_expires_ns(&vcpu->arch.hrt, delta); + vcpu->arch.hrt_period = delta; + vcpu->arch.hrt_running_start = + kvm_get_guest_vcpu_running_time(vcpu); + DebugKVMTM("hrtimer is in interrupt handler now, " + "so only restart\n"); + } else { + /* timer is active probably is completing, so waiting */ + DebugKVMTM("hrtimer is completing, small waiting\n"); + cpu_relax(); + goto again; + } +} + +/* This is the function called when the Guest's timer expires. */ +static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu, arch.hrt); + int irq = vcpu->arch.hrt_virq_no; + long period = vcpu->arch.hrt_period; + s64 running_start; + s64 running_time; + s64 running; + + /* Remember the first interrupt is the timer interrupt. */ + DebugKVMTM("process %s (%d): started to set local timer IRQ #%d " + "on VCPU #%d\n", + current->comm, current->pid, irq, vcpu->vcpu_id); + running_start = vcpu->arch.hrt_running_start; + running_time = kvm_get_guest_vcpu_running_time(vcpu); + running = cycles_2nsec(running_time - running_start); + BUG_ON(running < 0); + if (running < HRTIMER_EXPIRES_APPROX(period)) { + hrtimer_add_expires_ns(&vcpu->arch.hrt, + (period - running)); + return HRTIMER_RESTART; + } + vcpu->arch.hrt_period = 0; /* signal timer interrupt happened */ + /* to clock event program function */ + kvm_vcpu_interrupt(vcpu, irq); + + if (vcpu->arch.hrt_period != 0) { + /* the timer was reprogrammed, so restart timer */ + return HRTIMER_RESTART; + } + return HRTIMER_NORESTART; +} + +/* This sets up the timer for the VCPU */ +void kvm_init_clockdev(struct kvm_vcpu *vcpu) +{ + DebugKVM("started to set up the timer for the VCPU #%d\n", + vcpu->vcpu_id); + + if (vcpu->arch.is_hv && kvm_vcpu_is_bsp(vcpu)) { + int ret; + + ret = kvm_get_guest_direct_virq(vcpu, + KVM_VIRQ_TIMER + (KVM_NR_VIRQS_PER_CPU * vcpu->vcpu_id), + KVM_VIRQ_TIMER); + if (ret != 0) { + pr_err("%s(): could not register early timer " + "VIRQ #%d on VCPU #%d\n", + __func__, + KVM_VIRQ_TIMER + + (KVM_NR_VIRQS_PER_CPU * vcpu->vcpu_id), + vcpu->vcpu_id); + KVM_BUG_ON(true); + } + } + hrtimer_init(&vcpu->arch.hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + vcpu->arch.hrt.function = clockdev_fn; + DebugKVM("created timer for the VCPU #%d at %px base 0x%lx\n", + vcpu->vcpu_id, &vcpu->arch.hrt, vcpu->arch.hrt.base); +} + +void kvm_cancel_clockdev(struct kvm_vcpu *vcpu) +{ + DebugKVM("started to cancel the timer for the VCPU #%d\n", + vcpu->vcpu_id); + if (unlikely(vcpu->arch.hrt.base == NULL)) { + return; /* is not yet inited */ + } + /* Clock event device is shutting down. */ + hrtimer_cancel(&vcpu->arch.hrt); + if (vcpu->arch.apic != NULL) + hrtimer_cancel(&vcpu->arch.apic->lapic_timer.timer); +} + +static void +do_kvm_apic_timer_fn(struct kvm_vcpu *vcpu) +{ + DebugKVMAT("will inject apic timer IRQ on VCPU #%d\n", vcpu->vcpu_id); + kvm_inject_apic_timer_irqs(vcpu); +} + +enum hrtimer_restart kvm_apic_timer_fn(struct hrtimer *data) +{ + struct kvm_vcpu *vcpu; + struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); + struct kvm_lapic *apic; + s64 period = ktimer->period; + s64 running_start; + s64 running_now; + s64 running; + bool handled = false; + + vcpu = ktimer->vcpu; + if (!vcpu) + return HRTIMER_NORESTART; + + DebugKVMAT("started on VCPU #%d\n", vcpu->vcpu_id); + + /* stolen time should be accounted and if it is considerable */ + /* the timer should be restarted on stolen time */ + apic = vcpu->arch.apic; + if (apic == NULL) + return HRTIMER_NORESTART; + running_start = apic->lapic_timer.running_time; + running_now = kvm_get_guest_vcpu_running_time(vcpu); + running = cycles_2nsec(running_now - running_start); +/* BUG_ON(running < 0); probably it starts on other CPU */ + DebugKVMAT("running start 0x%llx now 0x%llx ns 0x%llx period 0x%llx\n", + running_start, running_now, running, period); + if (running < 0) + running = 0; + if (running < HRTIMER_EXPIRES_APPROX(period) && + /* + * Do not allow the guest to program periodic timers with small + * interval, since the hrtimers are not throttled by the host + * scheduler. + */ + (period - running) >= NSEC_PER_MSEC / 2) { + hrtimer_add_expires_ns(&ktimer->timer, (period - running)); + DebugKVMAT("apic timer add expires 0x%llx and restarted\n", + period - running); + return HRTIMER_RESTART; + } + + /* + * There is a race window between reading and incrementing, but we do + * not care about potentially loosing timer events in the !reinject + * case anyway. + */ + if (ktimer->reinject || !atomic_read(&ktimer->pending)) { + atomic_inc(&ktimer->pending); + } + if (apic_has_pending_timer(vcpu)) { + ktimer->period = 0; /* signal timer interrupt is handling */ + /* to lapic timer start function */ + do_kvm_apic_timer_fn(vcpu); + handled = true; + } + + if (ktimer->t_ops->is_periodic(ktimer)) { + apic->lapic_timer.running_time = + kvm_get_guest_vcpu_running_time(apic->vcpu); + ktimer->period = period; + hrtimer_add_expires_ns(&ktimer->timer, period); + DebugKVMAT("apic periodic timer add expires 0x%llx and " + "restarted\n", period); + return HRTIMER_RESTART; + } + if (handled && ktimer->period != 0) { + /* the timer was reprogrammed, so restart timer */ + return HRTIMER_RESTART; + } + DebugKVMAT("apic timer handles\n"); + return HRTIMER_NORESTART; +} + +static void +do_kvm_epic_timer_fn(struct kvm_vcpu *vcpu) +{ + DebugKVMAT("will inject epic timer IRQ on VCPU #%d\n", vcpu->vcpu_id); + kvm_inject_epic_timer_irqs(vcpu); +} + +enum hrtimer_restart kvm_epic_timer_fn(struct hrtimer *data) +{ + struct kvm_vcpu *vcpu; + struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); + struct kvm_cepic *epic; + s64 period = ktimer->period; + s64 running_start; + s64 running_now; + s64 running; + bool handled = false; + + vcpu = ktimer->vcpu; + if (!vcpu) + return HRTIMER_NORESTART; + + DebugKVMAT("started on VCPU #%d\n", vcpu->vcpu_id); + + /* stolen time should be accounted and if it is considerable */ + /* the timer should be restarted on stolen time */ + epic = vcpu->arch.epic; + if (epic == NULL) + return HRTIMER_NORESTART; + running_start = epic->cepic_timer.running_time; + running_now = kvm_get_guest_vcpu_running_time(vcpu); + running = cycles_2nsec(running_now - running_start); +/* BUG_ON(running < 0); probably it starts on other CPU */ + DebugKVMAT("running start 0x%llx now 0x%llx ns 0x%llx period 0x%llx\n", + running_start, running_now, running, period); + if (running < 0) + running = 0; + if (running < HRTIMER_EXPIRES_APPROX(period) && + /* + * Do not allow the guest to program periodic timers with small + * interval, since the hrtimers are not throttled by the host + * scheduler. + */ + (period - running) >= NSEC_PER_MSEC / 2) { + hrtimer_add_expires_ns(&ktimer->timer, (period - running)); + DebugKVMAT("epic timer add expires 0x%llx and restarted\n", + period - running); + return HRTIMER_RESTART; + } + + /* + * There is a race window between reading and incrementing, but we do + * not care about potentially losing timer events in the !reinject + * case anyway. + */ + if (ktimer->reinject || !atomic_read(&ktimer->pending)) + atomic_inc(&ktimer->pending); + + if (epic_has_pending_timer(vcpu)) { + ktimer->period = 0; /* signal timer interrupt is handling */ + /* to cepic timer start function */ + do_kvm_epic_timer_fn(vcpu); + handled = true; + } + + if (ktimer->t_ops->is_periodic(ktimer)) { + epic->cepic_timer.running_time = + kvm_get_guest_vcpu_running_time(epic->vcpu); + ktimer->period = period; + hrtimer_add_expires_ns(&ktimer->timer, period); + DebugKVMAT("epic periodic timer add expires 0x%llx restarted\n", + period); + return HRTIMER_RESTART; + } + if (handled && ktimer->period != 0) { + /* the timer was reprogrammed, so restart timer */ + return HRTIMER_RESTART; + } + DebugKVMAT("epic timer handles\n"); + return HRTIMER_NORESTART; +} diff --git a/arch/e2k/kvm/trace_pgtable-gp.h b/arch/e2k/kvm/trace_pgtable-gp.h new file mode 100644 index 000000000000..7d8b92f5af46 --- /dev/null +++ b/arch/e2k/kvm/trace_pgtable-gp.h @@ -0,0 +1,45 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_PGTABLE_GP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_PGTABLE_GP_H + +#include "pgtable-gp.h" + +#define E2K_TRACE_PRINT_PT_GP_FLAGS(entry, print) \ + (print) ? (__print_flags(entry & (_PAGE_P_GP | _PAGE_HUGE_GP | \ + _PAGE_SW1_GP | _PAGE_SW2_GP), "|", \ + { _PAGE_P_GP , "present" }, \ + { _PAGE_HUGE_GP, "large" }, \ + { _PAGE_SW1_GP, "OS-1" }, \ + { _PAGE_SW2_GP, "OS-2" } \ + )) : "(none)", \ + (print) ? (__print_flags(entry & (_PAGE_W_GP | _PAGE_D_GP | \ + _PAGE_A_HW_GP), "|", \ + { _PAGE_W_GP, "writable" }, \ + { _PAGE_D_GP, "dirty" }, \ + { _PAGE_A_HW_GP, "accessed" } \ + )) : "(none)", \ + (print && entry != -1ULL && (entry & _PAGE_P_GP)) ? \ + (__print_symbolic(_PAGE_MTCR_GET_VAL_GP(entry), \ + { MOST_STRONG_MTCR, "More Strong MT" }, \ + { FROM_HYPERVISOR_MTCR, "Hypervisor MT" }, \ + { FROM_GUEST_MTCR, "Guest MT" }, \ + { 1, "Reserved-1" })) \ + : "" \ + (print && entry != -1ULL && (entry & _PAGE_P_GP)) ? \ + (__print_symbolic(_PAGE_MT_GET_VAL(entry), \ + { GEN_CACHE_MT, "General Cacheable" }, \ + { GEN_NON_CACHE_MT, "General nonCacheable" }, \ + { GEN_NON_CACHE_ORDERED_MT, \ + "General nonCacheable Ordered (same as GnC in hardware)" }, \ + { EXT_PREFETCH_MT, "External Prefetchable" }, \ + { EXT_NON_PREFETCH_MT, "External nonPrefetchable" }, \ + { EXT_CONFIG_MT, "External Configuration" }, \ + { EXT_CACHE_MT, "External Cached (same as GC in hardware)" }, \ + { 2, "Reserved-2" }, \ + { 3, "Reserved-3" }, \ + { 5, "Reserved-5" })) \ + : "" \ + +#endif /* _TRACE_E2K_PGTABLE_GP_H */ diff --git a/arch/e2k/kvm/trap_table.S b/arch/e2k/kvm/trap_table.S new file mode 100644 index 000000000000..ca58dc014a58 --- /dev/null +++ b/arch/e2k/kvm/trap_table.S @@ -0,0 +1,495 @@ +// +// Trap table entries implemented on assembler +// + +#undef NATIVE_TASK_SIZE +#undef HOST_TASK_SIZE +#undef GUEST_TASK_SIZE +#include +#include +#include +#include +#include +#include +#include +#include + +.global kvm_light_hcalls; +.global kvm_generic_hcalls; +.global dump_stack; +.global trap_handler_trampoline_continue; +.global syscall_handler_trampoline_continue; +.global syscall_fork_trampoline_continue; + +#ifdef CONFIG_CLW_ENABLE +# define CLW_ONLY(...) __VA_ARGS__ +#else +# define CLW_ONLY(...) +#endif + +#define HCALL_WSZ 11 + +/* ttable_entry16/17 - wrappers for software paravirtualiation mode. + * Hypervisor must use hret when entered from hcall, so we remember + * whether guest used `hcall' or `sdisp' here. */ +.global ttable_entry16 +.section .ttable_entry16, "ax",@progbits + .align 8 + .type ttable_entry16,@function // hypercalls +ttable_entry16: + { + /* Disable load/store generations */ + crp + } + { + setwd wsz = HCALL_WSZ, nfx = 0 + rrd %osr0, %dr17 // %dr17: current_thread_info + shld 1, 63, %dr18 + } + { + /* Important: the first memory access in kernel is store. + * This is needed to flush SLT before trying to load anything. */ + stw,sm %r0, [slt_disable + 0] + } + + // if (READ_SBR_REG() < NATIVE_TASK_SIZE) { + // thread_info = current_thread_info(); + // sbr = current->stack +KERNEL_C_STACK_SIZE + KERNEL_C_STACK_OFFSET + // usd_lo = thread_info->k_usd_lo; + // usd_hi = thread_info->k_usd_hi; + // WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo); + /* FIXME: generic hypercalls do not increment hardware stacks now + // psp_hi = READ_PSP_HI_REG(); + // pcsp_hi = READ_PCSP_HI_REG(); + // psp_hi.PSP_hi_size += thread_info->k_ps_sz; + // pcsp_hi.PCSP_hi_size += thread_info->k_pcs_sz; + // WRITE_PSP_HI_REG(psp_hi); + // WRITE_PCSP_HI_REG(pcsp_hi); + // thread_info->upsr = NATIVE_READ_UPSR_REG_VALUE(); + */ + // } + + /* flush hardware stacks to cause the possible page fault on guest */ + { + flushc + } + { + flushr + } + { + wait fl_c = 1 + } + { + rrd %sbr, %dr7 + addd 0, NATIVE_TASK_SIZE - 1, %dr19 + } + // %dr17: current_thread_info + // %pred0, %dr10, %dr11, %dr12: temporary predicate and registers + SWITCH_TO_KERNEL_IMAGE_PGD %dr17, %pred0, %dr10, %dr11, %dr12 + { + ldd [%dr17 + TSK_TI_STACK_DELTA], %dr10 // %dr10: stack + ldd [%dr17 + TI_K_USD_HI], %dr8 // %dr8: usd_hi + ipd 0 + disp %ctpr1, bad_hcall + } + { + nop 2 + rrd %usd.hi, %dr20 + cmpbedb %dr7, %dr19, %pred0 // sbr < NATIVE_TASK_SIZE + ldd [%dr17 + TI_K_USD_LO], %dr9 // %dr9: usd_lo + disp %ctpr3, kvm_generic_hcalls + } + { + addd %dr10, KERNEL_C_STACK_SIZE + KERNEL_C_STACK_OFFSET, %dr10 + ldd [%dr17 + TI_VCPU], %dr19 ? %pred0 + ct %ctpr1 ? ~ %pred0 + } + { + rrd %usd.lo, %dr21 + puttagd,5 %dr0, 0, %dr0 + } + { + rrs %psr, %dr13 // psr = READ_PSR_REG_VALUE(); + puttagd,2 %dr1, 0, %dr1 + puttagd,5 %dr2, 0, %dr2 + } + { + puttagd,2 %dr3, 0, %dr3 + puttagd,5 %dr4, 0, %dr4 + } + { + puttagd,2 %dr5, 0, %dr5 + puttagd,5 %dr6, 0, %dr6 + } + // Switch to kernel local data stack: WRITE_USBR_USD_REG(sbr, usd_hi, usd_lo) + ALTERNATIVE_1_ALTINSTR + /* CPU_HWBUG_USD_ALIGNMENT version */ + { + rwd %dr10, %sbr + sxt 6, %r0, %dr0 + nop 1 + } + ALTERNATIVE_2_OLDINSTR + /* Default version */ + /* CPU_HWBUG_USD_ALIGNMENT version */ + { + rwd %dr10, %sbr + sxt 6, %r0, %dr0 + } + ALTERNATIVE_3_FEATURE(CPU_HWBUG_USD_ALIGNMENT) + { + rwd %dr9, %usd.lo + andns %dr13, PSR_SGE_AS, %dr13 // psr &= ~PSR_SGE; + } + { + nop 4 /* usd->getsp */ + rwd %dr8, %usd.hi + ord %dr0, %dr18, %dr0 + std %dr7, [ %dr19 + VCPU_ARCH_CTXT_SBR ] + } + + /* + * Do not increment hardware stacks sizes on kernel resident part + * it should be done by appropriate trap handlers, + * but disable 'sge' flag to prevent from hardware stacks bounds traps + * while switch to host kernel context + * FIXME: 'sge' disabling should by done by hardware (as for traps) + */ + { + rws %dr13, %psr // WRITE_PSR_REG_VALUE(psr); + std %dr20, [ %dr19 + VCPU_ARCH_CTXT_USD_HI ] + std %dr21, [ %dr19 + VCPU_ARCH_CTXT_USD_LO ] + ct %ctpr3 + } + +#if 0 /* FIXME: generic hypercalls do not increment hardware stacks now */ +{ + rrd %psp.hi, %dr13; // %dr13: psp_hi + ldw [%dr17 + TI_K_PS_LIM], %dr15; // %dr15: k_ps_limit + ldw [%dr17 + TI_K_PCS_LIM], %dr16; // %dr16: k_pcs_limit +} +{ + /* ti->u_hw_stack.k_ps_reserved = k_ps_limit */ + /* ti->u_hw_stack.k_pcs_reserved = k_pcs_limit */ + stw %dr15, [%dr1 + TI_K_PS_RES]; + stw %dr16, [%dr1 + TI_K_PCS_RES]; + + rrd %pcsp.hi, %dr14; // %dr14: pcsp_hi + + shld %dr15, 32, %dr15; + shld %dr16, 32, %dr16; +} +{ + addd %dr13, %dr15, %dr13 // psp_hi.PSP_hi_size += k_ps_limit + addd %dr14, %dr16, %dr14 // pcsp_hi.PCSP_hi_size += k_pcs_limit +} +{ + rwd %dr13, %psp.hi // WRITE_PSP_HI_REG(psp_hi) + rwd %dr14, %pcsp.hi // WRITE_PCSP_HI_REG(pcsp_hi) +} +#endif /* 0 */ /* FIXME: generic hypercalls do not increment hardware */ + /* stacks now */ + +bad_hcall: + /* error */ + rrd %osr0, %dr7 // %dr7: current_thread_info + subd %dr7, TSK_TI, %dr8 // %dr8: current +#ifdef CONFIG_SMP + ldw [%dr7 + TSK_TI_CPU_DELTA], %r10 // %dr10: smp_processor_id() + shld %dr10, 3, %dr9 + ldd [__per_cpu_offset + %dr9], %dr9 // %dr9: per CPU offset +#endif + addd 0, 0, %dr7 + ONLY_SET_KERNEL_GREGS %dr7, %dr8, %dr9, %dr10 + ibranch dump_stack +#ifdef CONFIG_CPU_HWBUG_IBRANCH + {nop} {nop} +#endif + .size ttable_entry16, . -$ttable_entry16 + +.global ttable_entry17 +.section .ttable_entry17, "ax",@progbits + .align 8 + .type ttable_entry17,@function // light hypercalls +ttable_entry17: + { + /* Disable load/store generations */ + crp + } + { + setwd wsz = HCALL_WSZ, nfx = 0 + rrd %osr0, %dr9 // %dr9: current_thread_info + shld 1, 63, %dr18 + } + { + /* Important: the first memory access in kernel is store. + * This is needed to flush SLT before trying to load anything. */ + stw,sm %r0, [slt_disable + 0] + } + + /* flush hardware stacks to cause the possible page fault on guest */ + { + flushc + } + { + flushr + } + { + wait fl_c = 1 + } + + // trap can occur on guest kernel + // %dr9: current_thread_info + // %pred0, %dr10, %dr11, %dr12: temporary predicate and registers + SWITCH_TO_KERNEL_IMAGE_PGD %dr9, %pred0, %dr10, %dr11, %dr12 + + /* + * Light hypercalls do not switch to kernel local data stack + * and do not increment hardware stacks sizes on kernel resident part + * but disable 'sge' flag to prevent from hardware stacks bounds traps + * while switch to host kernel context + * FIXME: 'sge' disabling should by done by hardware (as for traps) + */ + { + rrs %psr, %dr13 // psr = READ_PSR_REG_VALUE(); + puttagd,2 %dr0, 0, %dr0 + puttagd,5 %dr1, 0, %dr1 + disp %ctpr1, kvm_light_hcalls + } + { + andns %dr13, PSR_SGE_AS, %dr13 // psr &= ~PSR_SGE; + puttagd,2 %dr2, 0, %dr2 + puttagd,5 %dr3, 0, %dr3 + } + { + rws %dr13, %psr // WRITE_PSR_REG_VALUE(psr); + puttagd,2 %dr4, 0, %dr4 + puttagd,5 %dr5, 0, %dr5 + } + { + ord %dr0, %dr18, %dr0 + puttagd,2 %dr6, 0, %dr6 + ct %ctpr1 + } + .size ttable_entry17, . -$ttable_entry17 + +.global slt_disable; + +/* + * lcc does not support setting __interrupt attribute on + * kvm_light_hypercalls() - it is too complex - so just switch + * the data stack the same way kvm_generic_hypercalls() does. + * Also switch clw context for protected mode (kvm_switch_clw_regs) + */ +#define HW_HCALL(KERNEL_ENTRY) \ + { \ + setwd wsz = HCALL_WSZ, nfx = 0; \ + rrd %rpr.lo, %dr20; \ + } \ + { \ + rrd %rpr.hi, %dr21; \ + /* Disable load/store generations */ \ + crp; \ + } \ + { \ + disp %ctpr3, KERNEL_ENTRY; \ + rrd %osr0, %dr17; /* %dr17: current_thread_info */ \ + /* Important: the first memory access in kernel is store. \ + * This is needed to flush SLT before trying to load anything. */ \ + stw,sm %r0, [slt_disable + 0]; \ + CLW_ONLY(addd,1 0, _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), %dr18); \ + CLW_ONLY(addd,2 0, 1, %dr16) \ + } \ + { \ + rwd %dr20, %rpr.lo; \ + CLW_ONLY(ldd,2 [%dr18], %dr12 MAS_MMU_REG) \ + } \ + { \ + nop 1; /* ldd */ \ + rwd %dr21, %rpr.hi; \ + ldd,2 [%dr17 + TI_VCPU], %dr19; \ + } \ + { \ + CLW_ONLY(nop 2;) /* mmurw us_cl_d -> mmurr us_cl_* */ \ + rrd %sbr, %dr7; \ + puttagd,5 %dr0, 0, %dr0; \ + CLW_ONLY(std,2 %dr16, [%dr18] MAS_MMU_REG) \ + } \ + { \ + rrd %usd.hi, %dr20; \ + puttagd,2 %dr1, 0, %dr1; \ + CLW_ONLY(mmurr,5 %us_cl_b, %dr13) \ + } \ + { \ + puttagd,2 %dr2, 0, %dr2; \ + CLW_ONLY(mmurr,5 %us_cl_up, %dr14) \ + } \ + { \ + ldd [%dr19 + VCPU_ARCH_CTXT_SBR], %dr10; \ + ldd [%dr19 + VCPU_ARCH_CTXT_USD_HI], %dr8; \ + ldd [%dr19 + VCPU_ARCH_CTXT_USD_LO], %dr9; \ + ldb [%dr19 + VCPU_ARCH_CTXT_SAVED_VALID], %r11; \ + } \ + { \ + rrd %usd.lo, %dr21; \ + puttagd,2 %dr3, 0, %dr3; \ + CLW_ONLY(mmurr,5 %us_cl_m0, %dr15) \ + } \ + { \ + puttagd,2 %dr4, 0, %dr4; \ + CLW_ONLY(mmurr,5 %us_cl_m1, %dr16) \ + } \ + { \ + puttagd,2 %dr5, 0, %dr5; \ + CLW_ONLY(mmurr,5 %us_cl_m2, %dr17) \ + } \ + { \ + puttagd,2 %dr6, 0, %dr6; \ + CLW_ONLY(mmurr,5 %us_cl_m3, %dr18;) \ + } \ + { \ + CLW_ONLY(std %dr12, [ %dr19 + VCPU_ARCH_CTXT_US_CL_D ];) \ + CLW_ONLY(std %dr13, [ %dr19 + VCPU_ARCH_CTXT_US_CL_B ];) \ + } \ + { \ + CLW_ONLY(std %dr14, [ %dr19 + VCPU_ARCH_CTXT_US_CL_UP ];) \ + CLW_ONLY(std %dr15, [ %dr19 + VCPU_ARCH_CTXT_US_CL_M0 ];) \ + } \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_HWBUG_USD_ALIGNMENT version */ \ + { \ + CLW_ONLY(std %dr16, [ %dr19 + VCPU_ARCH_CTXT_US_CL_M1 ];) \ + rwd %dr10, %sbr; \ + nop 1; \ + } \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + { \ + CLW_ONLY(std %dr16, [ %dr19 + VCPU_ARCH_CTXT_US_CL_M1 ];) \ + rwd %dr10, %sbr; \ + } \ + ALTERNATIVE_3_FEATURE(CPU_HWBUG_USD_ALIGNMENT) \ + { \ + rwd %dr9, %usd.lo; \ + CLW_ONLY(std %dr17, [ %dr19 + VCPU_ARCH_CTXT_US_CL_M2 ];) \ + CLW_ONLY(std %dr18, [ %dr19 + VCPU_ARCH_CTXT_US_CL_M3 ];) \ + } \ + { \ + rwd %dr8, %usd.hi; \ + nop 2; /* usd->getsp */ \ + sxt 6, %r0, %dr0; \ + std %dr7, [ %dr19 + VCPU_ARCH_CTXT_SBR ]; \ + cmpesb %r11, 0, %pred0; \ + } \ + { \ + std %dr20, [ %dr19 + VCPU_ARCH_CTXT_USD_HI ]; \ + std %dr21, [ %dr19 + VCPU_ARCH_CTXT_USD_LO ]; \ + adds,1 0, 1, %r11; \ + ibranch 1f ? ~%pred0; \ + } \ + { \ + stb,2 %r11, [ %dr19 + VCPU_ARCH_CTXT_SAVED_VALID ]; \ + std %dr10, [ %dr19 + VCPU_ARCH_CTXT_SAVED_SBR ]; \ + } \ + { \ + std %dr8, [ %dr19 + VCPU_ARCH_CTXT_SAVED_USD_HI ]; \ + std %dr9, [ %dr19 + VCPU_ARCH_CTXT_SAVED_USD_LO ]; \ + ct %ctpr3; \ + } \ +1: \ + { \ + setsft; \ + } + +.section .hcall_entry0, "ax",@progbits + .align 8 + .type hcall_entry0,@function // hypercalls +hcall_entry0: + HW_HCALL(kvm_generic_hcalls) + .size hcall_entry0, . -$hcall_entry0 + +.section .hcall_entry1, "ax",@progbits + .align 8 + .type hcall_entry1,@function // light hypercalls +hcall_entry1: + HW_HCALL(kvm_light_hcalls) + .size hcall_entry1, . -$hcall_entry1 + +.global trap_handler_trampoline +.section ".irqentry.text", "ax" +.type trap_handler_trampoline,@function +trap_handler_trampoline: + HANDLER_TRAMPOLINE %ctpr2, 11, trap_handler_trampoline_continue, 0 +trap_handler_switched_stacks: +{ + setsft; +} +.size $trap_handler_trampoline, . - $trap_handler_trampoline + +.global syscall_handler_trampoline +.section ".entry.text", "ax" +.type syscall_handler_trampoline,@function +syscall_handler_trampoline: + HANDLER_TRAMPOLINE %ctpr2, 11, syscall_handler_trampoline_continue, 1 +syscall_handler_switched_stacks: +{ + setsft; +} +.size $syscall_handler_trampoline, . - $syscall_handler_trampoline + +.global syscall_fork_trampoline +.section ".entry.text", "ax" +.type syscall_fork_trampoline,@function +syscall_fork_trampoline: + HANDLER_TRAMPOLINE %ctpr2, 11, syscall_fork_trampoline_continue, 1 +syscall_fork_switched_stacks: +{ + setsft; +} +.size $syscall_fork_trampoline, . - $syscall_fork_trampoline + + +/* + The next function simulates guest trap table entry and calls function + which returns to guest entry at point ttable_func +static long +as_guest_ttable_entry(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, + unsigned long ttable_func) +{ + return_to_guest_ttable_entry(ttable_func); + return (0); +} + */ +.global return_to_guest_ttable_entry +.global as_guest_ttable_entry + +.section .as_guest_ttable_entry, "ax",@progbits + .align 8 + .type .as_guest_ttable_entry,@function +as_guest_ttable_entry: + setwd wsz = 0x6, nfx = 0 + setbn rsz = 0x1, rbs = 0x4, rcur = 0x0 + setbp psz = 0x0 + disp %ctpr1, return_to_guest_ttable_entry +{ /* tags should be zeroed */ + puttagd %dr0, 0, %dr0 + puttagd %dr1, 0, %dr1 +} +{ + puttagd %dr2, 0, %dr2 + puttagd %dr3, 0, %dr3 +} +{ + puttagd %dr4, 0, %dr4 + puttagd %dr5, 0, %dr5 +} +{ + puttagd %dr6, 0, %dr6 + addd 0, %dr7, %db[0] +} + call %ctpr1, wbs = 4 + return %ctpr3 + ct %ctpr3 +$.Lfeas_gtt: + .size as_guest_ttable_entry, $.Lfeas_gtt-$as_guest_ttable_entry diff --git a/arch/e2k/kvm/ttable-inline.h b/arch/e2k/kvm/ttable-inline.h new file mode 100644 index 000000000000..226200a5b33d --- /dev/null +++ b/arch/e2k/kvm/ttable-inline.h @@ -0,0 +1,909 @@ +/* + * + * Copyright (C) 2020 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_KVM_TTABLE_H +#define _E2K_KVM_TTABLE_H + +#include +#include +#include +#include +#include + +#ifdef CONFIG_KVM_HOST_MODE +/* it is native kernel with virtualization support (hypervisor) */ + +#include "cpu.h" + +#undef DEBUG_PV_FORK_MODE +#undef DebugFORK +#define DEBUG_PV_FORK_MODE 0 /* syscall fork return debugging */ +#define DebugFORK(fmt, args...) \ +({ \ + if (DEBUG_PV_FORK_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_UST_MODE +#undef DebugUST +#define DEBUG_PV_UST_MODE 0 /* trap injection debugging */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_guest_ust) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_SYSCALL_MODE +#define DEBUG_PV_SYSCALL_MODE 0 /* syscall injection debugging */ + +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE +extern bool debug_guest_ust; +#else +#define debug_guest_ust false +#endif /* DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE */ + +#define CHECK_GUEST_SYSCALL_UPDATES + +#ifdef CHECK_GUEST_VCPU_UPDATES + +#define DebugKVMGT(fmt, args...) \ + pr_err("%s(): " fmt, __func__, ##args) + +static inline void +check_guest_stack_regs_updates(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + { + e2k_addr_t sbr = kvm_get_guest_vcpu_SBR_value(vcpu); + e2k_usd_lo_t usd_lo = kvm_get_guest_vcpu_USD_lo(vcpu); + e2k_usd_hi_t usd_hi = kvm_get_guest_vcpu_USD_hi(vcpu); + + if (usd_lo.USD_lo_half != regs->stacks.usd_lo.USD_lo_half || + usd_hi.USD_hi_half != regs->stacks.usd_hi.USD_hi_half || + sbr != regs->stacks.top) { + DebugKVMGT("FAULT: source USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + DebugKVMGT("NOT updated USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + usd_lo.USD_lo_base, + usd_hi.USD_hi_size, + sbr); + } + } + { + e2k_psp_lo_t psp_lo = kvm_get_guest_vcpu_PSP_lo(vcpu); + e2k_psp_hi_t psp_hi = kvm_get_guest_vcpu_PSP_hi(vcpu); + e2k_pcsp_lo_t pcsp_lo = kvm_get_guest_vcpu_PCSP_lo(vcpu); + e2k_pcsp_hi_t pcsp_hi = kvm_get_guest_vcpu_PCSP_hi(vcpu); + + if (psp_lo.PSP_lo_half != regs->stacks.psp_lo.PSP_lo_half || + psp_hi.PSP_hi_size != regs->stacks.psp_hi.PSP_hi_size) { + /* PSP_hi_ind/PCSP_hi_ind can be modified and should */ + /* be restored as saved at regs state */ + DebugKVMGT("FAULT: source PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); + DebugKVMGT("NOT updated PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + psp_lo.PSP_lo_base, + psp_hi.PSP_hi_size, + psp_hi.PSP_hi_ind); + } + if (pcsp_lo.PCSP_lo_half != regs->stacks.pcsp_lo.PCSP_lo_half || + pcsp_hi.PCSP_hi_size != + regs->stacks.pcsp_hi.PCSP_hi_size) { + DebugKVMGT("FAULT: source PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); + DebugKVMGT("NOT updated PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + pcsp_lo.PCSP_lo_base, + pcsp_hi.PCSP_hi_size, + pcsp_hi.PCSP_hi_ind); + } + } + { + unsigned long cr0_lo = kvm_get_guest_vcpu_CR0_lo_value(vcpu); + unsigned long cr0_hi = kvm_get_guest_vcpu_CR0_hi_value(vcpu); + e2k_cr1_lo_t cr1_lo = kvm_get_guest_vcpu_CR1_lo(vcpu); + e2k_cr1_hi_t cr1_hi = kvm_get_guest_vcpu_CR1_hi(vcpu); + + if (cr0_lo != regs->crs.cr0_lo.CR0_lo_half || + cr0_hi != regs->crs.cr0_hi.CR0_hi_half || + cr1_lo.CR1_lo_half != regs->crs.cr1_lo.CR1_lo_half || + cr1_hi.CR1_hi_half != regs->crs.cr1_hi.CR1_hi_half) { + DebugKVMGT("FAULT: source CR0.lo 0x%016llx CR0.hi " + "0x%016llx CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); + DebugKVMGT("NOT updated CR0.lo 0x%016lx CR0.hi " + "0x%016lx CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + cr0_lo, + cr0_hi, + cr1_lo.CR1_lo_wbs, + cr1_hi.CR1_hi_ussz); + } + } +} +#else /* ! CHECK_GUEST_VCPU_UPDATES */ +static inline void +check_guest_stack_regs_updates(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ +} +#endif /* CHECK_GUEST_VCPU_UPDATES */ + +static inline void +restore_guest_trap_stack_regs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + unsigned long regs_status = kvm_get_guest_vcpu_regs_status(vcpu); + + if (!KVM_TEST_UPDATED_CPU_REGS_FLAGS(regs_status)) { + DebugKVMVGT("competed: nothing updated"); + goto check_updates; + } + + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, WD_UPDATED_CPU_REGS)) { + e2k_wd_t wd = kvm_get_guest_vcpu_WD(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (wd.WD_psize != regs->wd.WD_psize) { + DebugKVMGT("source WD: size 0x%x\n", + regs->wd.WD_psize); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->wd.WD_psize = wd.WD_psize; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated WD: size 0x%x\n", + regs->wd.WD_psize); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, USD_UPDATED_CPU_REGS)) { + unsigned long sbr = kvm_get_guest_vcpu_SBR_value(vcpu); + unsigned long usd_lo = kvm_get_guest_vcpu_USD_lo_value(vcpu); + unsigned long usd_hi = kvm_get_guest_vcpu_USD_hi_value(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (usd_lo != regs->stacks.usd_lo.USD_lo_half || + usd_hi != regs->stacks.usd_hi.USD_hi_half || + sbr != regs->stacks.top) { + DebugKVMGT("source USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->stacks.usd_lo.USD_lo_half = usd_lo; + regs->stacks.usd_hi.USD_hi_half = usd_hi; + regs->stacks.top = sbr; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, + HS_REGS_UPDATED_CPU_REGS)) { + unsigned long psp_lo = kvm_get_guest_vcpu_PSP_lo_value(vcpu); + unsigned long psp_hi = kvm_get_guest_vcpu_PSP_hi_value(vcpu); + unsigned long pcsp_lo = kvm_get_guest_vcpu_PCSP_lo_value(vcpu); + unsigned long pcsp_hi = kvm_get_guest_vcpu_PCSP_hi_value(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (psp_lo != regs->stacks.psp_lo.PSP_lo_half || + psp_hi != regs->stacks.psp_hi.PSP_hi_half) { + DebugKVMGT("source PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->stacks.psp_lo.PSP_lo_half = psp_lo; + regs->stacks.psp_hi.PSP_hi_half = psp_hi; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (pcsp_lo != regs->stacks.pcsp_lo.PCSP_lo_half || + pcsp_hi != regs->stacks.pcsp_hi.PCSP_hi_half) { + DebugKVMGT("source PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->stacks.pcsp_lo.PCSP_lo_half = pcsp_lo; + regs->stacks.pcsp_hi.PCSP_hi_half = pcsp_hi; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, CRS_UPDATED_CPU_REGS)) { + unsigned long cr0_lo = kvm_get_guest_vcpu_CR0_lo_value(vcpu); + unsigned long cr0_hi = kvm_get_guest_vcpu_CR0_hi_value(vcpu); + unsigned long cr1_lo = kvm_get_guest_vcpu_CR1_lo_value(vcpu); + unsigned long cr1_hi = kvm_get_guest_vcpu_CR1_hi_value(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (cr0_lo != regs->crs.cr0_lo.CR0_lo_half || + cr0_hi != regs->crs.cr0_hi.CR0_hi_half || + cr1_lo != regs->crs.cr1_lo.CR1_lo_half || + cr1_hi != regs->crs.cr1_hi.CR1_hi_half) { + DebugKVMGT("source CR0.lo 0x%016llx CR0.hi 0x%016llx " + "CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->crs.cr0_lo.CR0_lo_half = cr0_lo; + regs->crs.cr0_hi.CR0_hi_half = cr0_hi; + regs->crs.cr1_lo.CR1_lo_half = cr1_lo; + regs->crs.cr1_hi.CR1_hi_half = cr1_hi; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated CR0.lo 0x%016llx CR0.hi 0x%016llx " + "CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + kvm_reset_guest_updated_vcpu_regs_flags(vcpu, regs_status); + +check_updates: + check_guest_stack_regs_updates(vcpu, regs); +} + +static inline void +restore_guest_syscall_stack_regs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + unsigned long regs_status = kvm_get_guest_vcpu_regs_status(vcpu); + + if (unlikely(regs->sys_num == __NR_e2k_longjmp2)) { + /* + * The guest long jump has been updated stack & CRs registers + * and has called hypercall to update all registers state + * on host at signal stack context. + * Update harware CRs registers values, stcak register will + * be updated later before return to user + */ + NATIVE_RESTORE_USER_CRs(regs); + kvm_reset_guest_vcpu_regs_status(vcpu); + return; + } + + regs_status = kvm_get_guest_vcpu_regs_status(vcpu); + + if (!KVM_TEST_UPDATED_CPU_REGS_FLAGS(regs_status)) { + DebugKVMVGT("competed: there is nothing updated"); + goto check_updates; + } + + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, WD_UPDATED_CPU_REGS)) { + e2k_wd_t wd = kvm_get_guest_vcpu_WD(vcpu); + + if (wd.WD_psize != regs->wd.WD_psize) { + DebugKVMGT("source WD: size 0x%x\n", + regs->wd.WD_psize); + +#ifndef CHECK_GUEST_SYSCALL_UPDATES + regs->wd.WD_psize = wd.WD_psize; + DebugKVMGT("updated WD: size 0x%x\n", + regs->wd.WD_psize); +#else /* CHECK_GUEST_SYSCALL_UPDATES */ + E2K_LMS_HALT_OK; + pr_err("%s(): guest updated WD, but it is not yet " + "supported case\n", + __func__); + KVM_BUG_ON(true); +#endif /* !CHECK_GUEST_SYSCALL_UPDATES */ + + } + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, USD_UPDATED_CPU_REGS)) { + unsigned long sbr = kvm_get_guest_vcpu_SBR_value(vcpu); + unsigned long usd_lo = kvm_get_guest_vcpu_USD_lo_value(vcpu); + unsigned long usd_hi = kvm_get_guest_vcpu_USD_hi_value(vcpu); + + if (usd_lo != regs->stacks.usd_lo.USD_lo_half || + usd_hi != regs->stacks.usd_hi.USD_hi_half || + sbr != regs->stacks.top) { + DebugKVMGT("source USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + +#ifndef CHECK_GUEST_SYSCALL_UPDATES + regs->stacks.usd_lo.USD_lo_half = usd_lo; + regs->stacks.usd_hi.USD_hi_half = usd_hi; + regs->stacks.top = sbr; + DebugKVMGT("updated USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); +#else /* CHECK_GUEST_SYSCALL_UPDATES */ + E2K_LMS_HALT_OK; + pr_err("%s(): guest updated stack USD/SBR, but it is " + "not yet supported case\n", + __func__); + KVM_BUG_ON(true); +#endif /* !CHECK_GUEST_SYSCALL_UPDATES */ + } + } + + /* hardware stacks registers should be updated by hypercall as */ + /* for long jump case, so ignore guest register updated state here */ +#ifdef CHECK_GUEST_SYSCALL_UPDATES + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, + HS_REGS_UPDATED_CPU_REGS)) { + unsigned long psp_lo = kvm_get_guest_vcpu_PSP_lo_value(vcpu); + e2k_psp_hi_t psp_hi = kvm_get_guest_vcpu_PSP_hi(vcpu); + unsigned long pcsp_lo = kvm_get_guest_vcpu_PCSP_lo_value(vcpu); + e2k_pcsp_hi_t pcsp_hi = kvm_get_guest_vcpu_PCSP_hi(vcpu); + e2k_pshtp_t pshtp = regs->stacks.pshtp; + e2k_pcshtp_t pcshtp = regs->stacks.pcshtp; + + if (psp_lo != regs->stacks.psp_lo.PSP_lo_half || + psp_hi.PSP_hi_ind != + regs->stacks.psp_hi.PSP_hi_ind - + GET_PSHTP_MEM_INDEX(pshtp) || + psp_hi.PSP_hi_size != + regs->stacks.psp_hi.PSP_hi_size) { + DebugKVMGT("source PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); + E2K_LMS_HALT_OK; + pr_err("%s(): guest updated proc stack PSP, but it is " + "not yet supported case\n", + __func__); + KVM_BUG_ON(true); + } + + if (pcsp_lo != regs->stacks.pcsp_lo.PCSP_lo_half || + pcsp_hi.PCSP_hi_ind != + regs->stacks.pcsp_hi.PCSP_hi_ind - + PCSHTP_SIGN_EXTEND(pcshtp) || + pcsp_hi.PCSP_hi_size != + regs->stacks.pcsp_hi.PCSP_hi_size) { + DebugKVMGT("source PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); + E2K_LMS_HALT_OK; + pr_err("%s(): guest updated chain stack PCSP, but " + "it is not yet supported case\n", + __func__); + KVM_BUG_ON(true); + } + } +#endif /* CHECK_GUEST_SYSCALL_UPDATES */ + + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, CRS_UPDATED_CPU_REGS)) { + unsigned long cr0_lo = kvm_get_guest_vcpu_CR0_lo_value(vcpu); + unsigned long cr0_hi = kvm_get_guest_vcpu_CR0_hi_value(vcpu); + unsigned long cr1_lo = kvm_get_guest_vcpu_CR1_lo_value(vcpu); + unsigned long cr1_hi = kvm_get_guest_vcpu_CR1_hi_value(vcpu); + + if (cr0_lo != regs->crs.cr0_lo.CR0_lo_half || + cr0_hi != regs->crs.cr0_hi.CR0_hi_half || + cr1_lo != regs->crs.cr1_lo.CR1_lo_half || + cr1_hi != regs->crs.cr1_hi.CR1_hi_half) { + DebugKVMGT("source CR0.lo 0x%016llx CR0.hi 0x%016llx " + "CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); + +#ifndef CHECK_GUEST_SYSCALL_UPDATES + regs->crs.cr0_lo.CR0_lo_half = cr0_lo; + regs->crs.cr0_hi.CR0_hi_half = cr0_hi; + regs->crs.cr1_lo.CR1_lo_half = cr1_lo; + regs->crs.cr1_hi.CR1_hi_half = cr1_hi; + + DebugKVMGT("updated CR0.lo 0x%016llx CR0.hi 0x%016llx " + "CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); +#else /* CHECK_GUEST_SYSCALL_UPDATES */ + E2K_LMS_HALT_OK; + pr_err("%s(): guest updated CRs (CR0-CR1), but it is " + "not yet supported case\n", + __func__); + KVM_BUG_ON(true); +#endif /* !CHECK_GUEST_SYSCALL_UPDATES */ + } + } + kvm_reset_guest_updated_vcpu_regs_flags(vcpu, regs_status); + +check_updates: + check_guest_stack_regs_updates(vcpu, regs); +} + +static inline void +restore_guest_trap_regs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + restore_guest_trap_stack_regs(vcpu, regs); +} + +static inline void +restore_guest_syscall_regs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + return restore_guest_syscall_stack_regs(vcpu, regs); +} + +static __always_inline +void return_to_injected_syscall(thread_info_t *ti, pt_regs_t *regs) +{ + e2k_pshtp_t pshtp; + u64 wsz, num_q; + + /* + * This can page fault so call with open interrupts + */ + wsz = get_wsz(FROM_PV_VCPU_SYSCALL); + pv_vcpu_user_hw_stacks_prepare(ti->vcpu, regs, wsz, + FROM_PV_VCPU_SYSCALL, true); + + pshtp = regs->g_stacks.pshtp; + +#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION + current->thread.fill.from = FROM_PV_VCPU_SYSCALL; + current->thread.fill.return_to_user = true; +#endif + + CHECK_PT_REGS_CHAIN(regs, NATIVE_NV_READ_USD_LO_REG().USD_lo_base, + current->stack + KERNEL_C_STACK_SIZE); + + num_q = get_ps_clear_size(wsz, pshtp); + + /* restore guest UPSR and disable all interrupts */ + NATIVE_RETURN_TO_KERNEL_UPSR(ti->upsr); + + RESTORE_USER_SYSCALL_STACK_REGS(regs); + + /* it is guest kernel process return to */ + host_syscall_pv_vcpu_exit_trap(ti, regs); + + /* + * We have FILLed user hardware stacks so no + * function calls are allowed after this point. + */ + user_hw_stacks_restore(regs, ®s->g_stacks, wsz, num_q); + + NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(ti); +} + +static inline void +guest_syscall_inject(thread_info_t *ti, pt_regs_t *regs) +{ + KVM_BUG_ON(!kvm_test_and_clear_intc_emul_flag(regs)); + do_return_from_pv_vcpu_intc(ti, regs); + return_to_injected_syscall(ti, regs); +} + +static __always_inline notrace void +return_pv_vcpu_inject(inject_caller_t from) +{ + struct thread_info *ti = current_thread_info(); + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt; + struct signal_stack_context __user *context; + typeof(context->vcpu_ctxt) vcpu_ctxt; + struct pt_regs regs; + struct trap_pt_regs saved_trap, *trap; + gthread_info_t *gti; + bool guest_user, user_stacks; + u64 sbbp[SBBP_ENTRIES_NUM]; + e2k_aau_t aau_context; + struct local_gregs l_gregs; + e2k_stacks_t cur_g_stacks; + e2k_pshtp_t u_pshtp; + e2k_pcshtp_t u_pcshtp; + e2k_wd_t wd; + unsigned long ts_flag; + int ret; + + gti = pv_vcpu_get_gti(vcpu); + COPY_U_HW_STACKS_FROM_TI(&cur_g_stacks, ti); + raw_all_irq_enable(); + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + E2K_SAVE_CLOCK_REG(clock); + { + register int count; + + GET_DECR_KERNEL_TIMES_COUNT(ti, count); + scall_times = &(ti->times[count].of.syscall); + scall_times->do_signal_done = clock; + } +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); + + context = get_signal_stack(); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_from_user(&vcpu_ctxt, &context->vcpu_ctxt, + sizeof(vcpu_ctxt)); + clear_ts_flag(ts_flag); + if (ret) { + user_exit(); + do_exit(SIGKILL); + } + + if (likely(!vcpu_ctxt.in_sig_handler)) { + KVM_BUG_ON(kvm_is_guest_migrated_to_other_vcpu(ti, vcpu)); + } else { + /* return from trampoline was from guest user signal handler, */ + /* so the guest global registers contain user values and */ + /* migration checker can not be running here */ + + ret = __copy_from_user_with_tags(®s, &context->regs, + sizeof(regs)); + if (ret) { + user_exit(); + do_exit(SIGKILL); + } + insert_pv_vcpu_sigreturn(vcpu, &vcpu_ctxt, ®s); + /* should not be here */ + KVM_BUG_ON(true); + } + + if (copy_context_from_signal_stack(&l_gregs, ®s, &saved_trap, + sbbp, &aau_context, NULL)) { + user_exit(); + do_exit(SIGKILL); + } + + KVM_BUG_ON(vcpu_ctxt.inject_from != from); + if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + syscall_handler_trampoline_finish(vcpu, ®s, + &vcpu_ctxt, host_ctxt); + } else if (from == FROM_PV_VCPU_TRAP_INJECT) { + trap_handler_trampoline_finish(vcpu, &vcpu_ctxt, host_ctxt); + } else { + KVM_BUG_ON(true); + } + + /* Always make any pending restarted system call return -EINTR. + * Otherwise we might restart the wrong system call. */ + current->restart_block.fn = do_no_restart_syscall; + /* Preserve current p[c]shtp as they indicate + * how much to FILL when returning */ + u_pshtp = regs.stacks.pshtp; + u_pcshtp = regs.stacks.pcshtp; + regs.stacks.pshtp = cur_g_stacks.pshtp; + regs.stacks.pcshtp = cur_g_stacks.pcshtp; + + if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + KVM_BUG_ON(!pv_vcpu_syscall_in_user_mode(vcpu)); + regs.sys_rval = vcpu_ctxt.sys_rval; + guest_user = true; + user_stacks = true; + restore_guest_syscall_regs(vcpu, ®s); + } else if (from == FROM_PV_VCPU_TRAP_INJECT) { + guest_user = !pv_vcpu_trap_on_guest_kernel(®s); + if (guest_user) { + if (regs.stacks.top >= GUEST_TASK_SIZE) { + /* guest user, but trap on guest kernel */ + user_stacks = false; + } else { + user_stacks = true; + } + } else { + user_stacks = false; + KVM_BUG_ON(regs.stacks.top < GUEST_TASK_SIZE); + } + if (user_stacks) { + /* guest trap handler can update some stack & system */ + /* registers state, so update the registers on host */ + restore_guest_trap_regs(vcpu, ®s); + } else { + /* clear updating flags for trap on guest kernel */ + kvm_reset_guest_vcpu_regs_status(vcpu); + } + } else { + KVM_BUG_ON(true); + } +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE + debug_guest_ust = user_stacks; +#endif + DebugUST("guest kernel chain stack final state: base 0x%llx " + "ind 0x%x size 0x%x PCSHTP 0x%x\n", + cur_g_stacks.pcsp_lo.PCSP_lo_base, + cur_g_stacks.pcsp_hi.PCSP_hi_ind, + cur_g_stacks.pcsp_hi.PCSP_hi_size, + cur_g_stacks.pcshtp); + DebugUST("guest kernel proc stack final state: base 0x%llx " + "ind 0x%x size 0x%x PSHTP 0x%llx\n", + cur_g_stacks.psp_lo.PSP_lo_base, + cur_g_stacks.psp_hi.PSP_hi_ind, + cur_g_stacks.psp_hi.PSP_hi_size, + GET_PSHTP_MEM_INDEX(cur_g_stacks.pshtp)); + DebugUST("guest user chain stack state: base 0x%llx " + "ind 0x%x size 0x%x PCSHTP 0x%x\n", + regs.stacks.pcsp_lo.PCSP_lo_base, + regs.stacks.pcsp_hi.PCSP_hi_ind, + regs.stacks.pcsp_hi.PCSP_hi_size, + regs.stacks.pcshtp); + DebugUST("guest user proc stack state: base 0x%llx " + "ind 0x%x size 0x%x PSHTP 0x%llx\n", + regs.stacks.psp_lo.PSP_lo_base, + regs.stacks.psp_hi.PSP_hi_ind, + regs.stacks.psp_hi.PSP_hi_size, + GET_PSHTP_MEM_INDEX(regs.stacks.pshtp)); + DebugUST("guest user already filled PSHTP 0x%llx PCSHTP 0x%x\n", + GET_PSHTP_MEM_INDEX(u_pshtp), u_pcshtp); + + /* + * Restore proper psize as it was when signal was delivered. + * Alternative would be to create non-empty frame for + * procedure stack in prepare_sighandler_trampoline() + * if signal is delivered after a system call. + */ + if (AS(regs.wd).psize) { + raw_all_irq_disable(); + wd = NATIVE_READ_WD_REG(); + wd.psize = AS(regs.wd).psize; + NATIVE_WRITE_WD_REG(wd); + raw_all_irq_enable(); + } + + if (from_trap(®s)) + regs.trap->prev_state = exception_enter(); + else + user_exit(); + + regs.next = NULL; + /* Make sure 'pt_regs' are ready before enqueuing them */ + barrier(); + ti->pt_regs = ®s; + + trap = regs.trap; + if (trap && (3 * trap->curr_cnt) < trap->tc_count && + trap->tc_count > 0) { + trap->from_sigreturn = 1; + do_trap_cellar(®s, 0); + } + + clear_restore_sigmask(); + + if (is_actual_pv_vcpu_l_gregs(vcpu)) { + /* update "local" global registers which were changed */ + /* by page fault handlers */ + update_pv_vcpu_local_glob_regs(vcpu, &l_gregs); + } + if (!gti->task_is_binco) { + restore_local_glob_regs(&l_gregs, false); + if (!regs.is_guest_user && + kvm_is_guest_migrated_to_other_vcpu(ti, vcpu)) { + u64 old_task, new_task; + + /* + * Return will be on guest handler and + * its process has been migrated to other VCPU, + * so it need update vcpu state pointer on gregs + */ + ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(&ti->k_gregs, + old_task); + RESTORE_GUEST_KERNEL_GREGS_COPY(ti, gti, vcpu); + ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(&ti->k_gregs, + new_task); + KVM_BUG_ON(old_task != new_task); + } + } + + if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + KVM_BUG_ON(regs.trap || regs.aau_context || + !regs.kernel_entry); + } else if (from == FROM_PV_VCPU_TRAP_INJECT) { + KVM_BUG_ON(!regs.trap || !regs.aau_context || + regs.kernel_entry); + } else { + KVM_BUG_ON(true); + } + + if (!user_stacks) { + KVM_BUG_ON(from == FROM_PV_VCPU_SYSCALL_INJECT); + finish_user_trap_handler(®s, FROM_RETURN_PV_VCPU_TRAP); + } else { + COPY_U_HW_STACKS_TO_STACKS(®s.g_stacks, &cur_g_stacks); + if (from == FROM_PV_VCPU_SYSCALL_INJECT) { + bool restart_needed = false; + + switch (regs.sys_rval) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs.sys_rval = -EINTR; + break; + case -ERESTARTSYS: + if (!(context->sigact.sa.sa_flags & SA_RESTART)) { + regs.sys_rval = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + restart_needed = true; + break; + } + + finish_syscall(®s, FROM_PV_VCPU_SYSCALL, !restart_needed); + } else if (from == FROM_PV_VCPU_TRAP_INJECT) { + finish_user_trap_handler(®s, + FROM_RETURN_PV_VCPU_TRAP); + } else { + KVM_BUG_ON(true); + } + } +} + +static __always_inline notrace void pv_vcpu_return_from_fork(void) +{ + struct thread_info *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + struct pt_regs *regs; + e2k_stacks_t cur_g_stacks; + gthread_info_t *gti; + e2k_pshtp_t u_pshtp; + e2k_pcshtp_t u_pcshtp; + e2k_wd_t wd; + + vcpu = current_thread_info()->vcpu; + gti = pv_vcpu_get_gti(vcpu); + COPY_U_HW_STACKS_FROM_TI(&cur_g_stacks, ti); + raw_all_irq_enable(); + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + E2K_SAVE_CLOCK_REG(clock); + { + register int count; + + GET_DECR_KERNEL_TIMES_COUNT(ti, count); + scall_times = &(ti->times[count].of.syscall); + scall_times->do_signal_done = clock; + } +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ + + KVM_BUG_ON(kvm_is_guest_migrated_to_other_vcpu(ti, vcpu)); + + /* Always make any pending restarted system call return -EINTR. + * Otherwise we might restart the wrong system call. */ + current->restart_block.fn = do_no_restart_syscall; + + regs = >i->fork_regs; + KVM_BUG_ON(!is_sys_call_pt_regs(regs)); + + /* Preserve current p[c]shtp as they indicate + * how much to FILL when returning */ + u_pshtp = regs->stacks.pshtp; + u_pcshtp = regs->stacks.pcshtp; + regs->stacks.pshtp = cur_g_stacks.pshtp; + regs->stacks.pcshtp = cur_g_stacks.pcshtp; + + DebugFORK("guest kernel chain stack final state: base 0x%llx " + "ind 0x%x size 0x%x PCSHTP 0x%x\n", + cur_g_stacks.pcsp_lo.PCSP_lo_base, + cur_g_stacks.pcsp_hi.PCSP_hi_ind, + cur_g_stacks.pcsp_hi.PCSP_hi_size, + cur_g_stacks.pcshtp); + DebugFORK("guest kernel proc stack final state: base 0x%llx " + "ind 0x%x size 0x%x PSHTP 0x%llx\n", + cur_g_stacks.psp_lo.PSP_lo_base, + cur_g_stacks.psp_hi.PSP_hi_ind, + cur_g_stacks.psp_hi.PSP_hi_size, + GET_PSHTP_MEM_INDEX(cur_g_stacks.pshtp)); + DebugFORK("guest user chain stack state: base 0x%llx " + "ind 0x%x size 0x%x PCSHTP 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_ind, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcshtp); + DebugFORK("guest user proc stack state: base 0x%llx " + "ind 0x%x size 0x%x PSHTP 0x%llx\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_ind, + regs->stacks.psp_hi.PSP_hi_size, + GET_PSHTP_MEM_INDEX(regs->stacks.pshtp)); + DebugFORK("guest user already filled PSHTP 0x%llx PCSHTP 0x%x\n", + GET_PSHTP_MEM_INDEX(u_pshtp), u_pcshtp); + + /* + * Restore proper psize as it was when signal was delivered. + * Alternative would be to create non-empty frame for + * procedure stack in prepare_sighandler_trampoline() + * if signal is delivered after a system call. + */ + if (AS(regs->wd).psize) { + raw_all_irq_disable(); + wd = NATIVE_READ_WD_REG(); + wd.psize = AS(regs->wd).psize; + NATIVE_WRITE_WD_REG(wd); + raw_all_irq_enable(); + } + + user_exit(); + + regs->next = NULL; + /* Make sure 'pt_regs' are ready before enqueuing them */ + barrier(); + ti->pt_regs = regs; + + clear_restore_sigmask(); + + KVM_BUG_ON(is_actual_pv_vcpu_l_gregs(vcpu)); + + COPY_U_HW_STACKS_TO_STACKS(®s->g_stacks, &cur_g_stacks); + + /* emulate restore of guest VCPU PSR state after return from syscall */ + kvm_emulate_guest_vcpu_psr_return(vcpu, regs); + + finish_syscall(regs, FROM_PV_VCPU_SYSFORK, true); +} +#else /* !CONFIG_KVM_HOST_MODE */ +/* It is native guest kernel whithout virtualization support */ +/* Virtualiztion in guest mode cannot be supported */ + +static inline void +guest_syscall_inject(thread_info_t *ti, pt_regs_t *regs) +{ + pr_err("%s() this kernel is not supported virtualization\n", __func__); +} + +static __always_inline notrace void +return_pv_vcpu_inject(inject_caller_t from) +{ + pr_err("%s() this kernel is not supported virtualization\n", __func__); +} +static __always_inline notrace void +pv_vcpu_return_from_fork(void) +{ + pr_err("%s() this kernel is not supported virtualization\n", __func__); +} +#endif /* CONFIG_KVM_HOST_MODE */ + +#endif /* _E2K_KVM_TTABLE_H */ diff --git a/arch/e2k/kvm/ttable.c b/arch/e2k/kvm/ttable.c new file mode 100644 index 000000000000..91346e935fce --- /dev/null +++ b/arch/e2k/kvm/ttable.c @@ -0,0 +1,1101 @@ +/* + * Guest user traps and system calls support on host + * + * Copyright 2015 Salavat S. Guilyazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "process.h" +#include "cpu.h" +#include "gaccess.h" +#include "mman.h" +#include "string.h" +#include "irq.h" +#include "time.h" +#include "lapic.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_UST_MODE +#undef DebugUST +#define DEBUG_PV_UST_MODE 0 /* trap injection debugging */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_guest_ust) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_SYSCALL_MODE +#define DEBUG_PV_SYSCALL_MODE 0 /* syscall injection debugging */ + +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE +extern bool debug_guest_ust; +#else +#define debug_guest_ust false +#endif /* DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE */ + +#undef DEBUG_KVM_GUEST_TRAPS_MODE +#undef DebugKVMGT +#define DEBUG_KVM_GUEST_TRAPS_MODE 0 /* KVM guest trap debugging */ +#define DebugKVMGT(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_TRAPS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_AAU_TRAPS_MODE +#undef DebugAAUGT +#define DEBUG_KVM_AAU_TRAPS_MODE 0 /* KVM guest trap debugging */ +#define DebugAAUGT(fmt, args...) \ +({ \ + if (DEBUG_KVM_AAU_TRAPS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE +#undef DebugKVMVGT +#define DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE 0 /* KVM verbose guest */ + /* trap debugging */ +#define DebugKVMVGT(fmt, args...) \ +({ \ + if (DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SGE_MODE +#undef DebugKVMSGE +#define DEBUG_KVM_SGE_MODE 0 /* KVM guest 'sge' flag debugging */ +#define DebugKVMSGE(fmt, args...) \ +({ \ + if (DEBUG_KVM_SGE_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_HW_STACK_BOUNDS_MODE +#undef DebugHWSB +#define DEBUG_KVM_HW_STACK_BOUNDS_MODE 0 /* guest hardware stacks */ + /* bounds trap debugging */ +#define DebugHWSB(fmt, args...) \ +({ \ + if (DEBUG_KVM_HW_STACK_BOUNDS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PARAVIRT_FAULT +#undef DebugPVF +#define DEBUG_PARAVIRT_FAULT 0 /* KVM paravirt fault */ +#define DebugPVF(fmt, args...) \ +({ \ + if (DEBUG_PARAVIRT_FAULT) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define DEBUG_ACT 0 +#define DEBUG_ACTIVATION_MODE 0 + +#undef DEBUG_KVM_ACTIVATION_MODE +#undef DebugKVMACT +#define DEBUG_KVM_ACTIVATION_MODE 0 /* KVM guest kernel data */ + /* stack activations */ + /* debugging */ +#define DebugKVMACT(fmt, args...) \ +({ \ + if (DEBUG_KVM_ACTIVATION_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SWITCH_VCPU_MODE +#undef DebugSWVCPU +#define DEBUG_KVM_SWITCH_VCPU_MODE false /* guest thread switch to */ + /* other VCPU */ +#define DebugSWVCPU(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_VCPU_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VIRQs_MODE +#undef DebugVIRQs +#define DEBUG_KVM_VIRQs_MODE 0 /* VIRQs debugging */ +#define DebugVIRQs(fmt, args...) \ +({ \ + if (DEBUG_KVM_VIRQs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* FIXME: the follow define only to debug, delete after completion and */ +/* turn on __interrupt atribute */ +#undef DEBUG_GTI +#define DEBUG_GTI 1 + +#undef DEBUG_GPT_REGS_MODE +#define DEBUG_GPT_REGS_MODE DEBUG_ACT /* KVM host and guest kernel */ + /* stack activations print */ + +#define CHECK_GUEST_VCPU_UPDATES +bool kvm_is_guest_TIRs_frozen(pt_regs_t *regs) +{ + if (check_is_guest_TIRs_frozen(regs, false)) { + /* guest TIRs should be unfrozen, but new traps can be */ + /* recieved by host and only for host */ + /* (for example interrupts) */ + pr_err("%s(): guest TIRs is now frozen\n", __func__); + dump_stack(); + pr_err("%s(): Trap in trap and may be recursion, " + "so kill the VCPU and VM\n", + __func__); + do_exit(-EDEADLK); + } + return false; +} + +/* + * Following functions run on host, check if traps occurred on guest user + * or kernel, so probably should be passed to guest kernel to handle. + * In some cases traps should be passed to guest, but need be preliminary + * handled by host (for example hardware stack bounds). + * Functions return flag or mask of traps which passed to guest and + * should not be handled by host + */ +unsigned long kvm_host_aau_page_fault(struct kvm_vcpu *vcpu, pt_regs_t *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + unsigned int aa_mask; + + aa_mask = GET_AA_TIRS(TIR_hi); + KVM_BUG_ON(aa_mask == 0); + + machine.do_aau_fault(aa_mask, regs); + + return SET_AA_TIRS(0UL, GET_AA_TIRS(TIR_hi)); +} +unsigned long kvm_pass_the_trap_to_guest(struct kvm_vcpu *vcpu, pt_regs_t *regs, + unsigned long TIR_hi, unsigned long TIR_lo, int trap_no) +{ + e2k_tir_lo_t tir_lo; + e2k_tir_hi_t tir_hi; + int tir_no; + unsigned long trap_mask; + + DebugKVMVGT("trap #%d TIRs hi 0x%016lx lo 0x%016lx\n", + trap_no, TIR_hi, TIR_lo); + + BUG_ON(trap_no > exc_max_num); + trap_mask = (1UL << trap_no); + BUG_ON(trap_mask == 0); + + if (trap_no == exc_illegal_opcode_num) { + /* Trap on guest kernel, so it probably can be because of */ + /* break point on debugger */ + if (is_gdb_breakpoint_trap(regs)) { + /* It is debugger trap, so pass to host */ + return 0; + } + } + + tir_lo.TIR_lo_reg = TIR_lo; + tir_hi.TIR_hi_reg = TIR_hi; + tir_hi.TIR_hi_aa = 0; /* clear AAU traps mask */ + tir_hi.TIR_hi_exc = trap_mask; + tir_no = tir_hi.TIR_hi_j; + if (tir_lo.TIR_lo_ip == 0) { + tir_lo.TIR_lo_ip = regs->crs.cr0_hi.CR0_hi_IP; + } + kvm_update_vcpu_intc_TIR(vcpu, tir_no, tir_hi, tir_lo); + regs->traps_to_guest |= trap_mask; + DebugKVMVGT("trap is set to guest TIRs #%d\n", tir_no); + return trap_mask; +} +static inline unsigned long +pass_virqs_to_guest_TIRs(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + struct kvm_vcpu *vcpu; + e2k_tir_hi_t tir_hi; + e2k_tir_hi_t g_TIR_hi; + e2k_tir_lo_t g_TIR_lo; + int TIR_no; + + BUG_ON(check_is_guest_TIRs_frozen(regs, true)); + vcpu = current_thread_info()->vcpu; + + if (TIR_hi == 0) { + TIR_no = 0; + } else { + tir_hi.TIR_hi_reg = TIR_hi; + TIR_no = tir_hi.TIR_hi_j; + } + g_TIR_lo.TIR_lo_reg = TIR_lo; + g_TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(TIR_no); + g_TIR_hi.TIR_hi_exc = exc_interrupt_mask; + kvm_update_guest_vcpu_TIR(vcpu, TIR_no, g_TIR_hi, g_TIR_lo); + regs->traps_to_guest |= exc_interrupt_mask; + DebugKVMVGT("interrupt is set to guest TIRs #%d hi 0x%016llx " + "lo 0x%016llx\n", + TIR_no, g_TIR_hi.TIR_hi_reg, g_TIR_lo.TIR_lo_reg); + return exc_interrupt_mask; +} + +static bool lapic_state_printed = false; + +unsigned long kvm_pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + struct kvm_vcpu *vcpu; + unsigned long ret; + + vcpu = current_thread_info()->vcpu; + BUG_ON(vcpu == NULL); + + if (DEBUG_KVM_VIRQs_MODE && !lapic_state_printed) { + lapic_state_printed = true; + kvm_print_local_APIC(vcpu); + } else if (!DEBUG_KVM_VIRQs_MODE && lapic_state_printed) { + lapic_state_printed = false; + } + BUG_ON(!irqs_disabled()); + + if (guest_trap_user_mode(regs) && !kvm_get_guest_vcpu_sge(vcpu)) { + pr_debug("%s(): sge disabled on guest user\n", __func__); + } + + raw_spin_lock(&vcpu->kvm->arch.virq_lock); + + if (unlikely(trap_from_host_kernel_mode(regs))) { + /* trap on host mode, for example at the beginning of */ + /* hypercall on spill hardware stacks */ + goto out_unlock; + } + if (!kvm_has_virqs_to_guest(vcpu)) { + /* nothing pending VIRQs to pass to guest */ + goto out_unlock; + } + if (atomic_read(&vcpu->arch.host_ctxt.signal.traps_num) > 1) { + /* VCPU is now at trap handling, probably VIRQs will */ + /* handled too, if not, pending VIRQs will be passed later */ + goto some_later; + } + if (kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + /* guest IRQs is now disabled, so it cannot pass interrupts */ + /* right now, so pending VIRQs flag is not cleared to pass */ + /* them to other appropriate case */ + DebugVIRQs("IRQs is disabled on guest kernel thread, " + "could not pass\n"); + goto some_later; + } + + BUG_ON(!kvm_test_pending_virqs(vcpu)); + + if (kvm_test_virqs_injected(vcpu)) { + KVM_BUG_ON(vcpu->arch.virq_wish); + goto already_injected; + } + + raw_spin_unlock(&vcpu->kvm->arch.virq_lock); + + DebugVIRQs("pass interrupt to guest\n"); + kvm_inject_interrupt(vcpu, regs); + + /* set flag to disable re-injection of the same pending VIRQs */ + /* through the last with or direct injection of interrupt */ + kvm_set_virqs_injected(vcpu); + if (vcpu->arch.virq_wish) { + /* it is request from host to inject last wish */ + /* on return from hypercall to cause preliminary */ + /* trap on guest and then inject interrupt for guest. */ + /* Convert last wish to interrupt and clear last wish flag */ + vcpu->arch.virq_wish = false; + } + + ret = exc_interrupt_mask; + + return ret; + +some_later: + if (vcpu->arch.virq_wish) { + /* interrupt cannot be passed right now, so clear wish flag */ + vcpu->arch.virq_wish = false; + } +already_injected: +out_unlock: + raw_spin_unlock(&vcpu->kvm->arch.virq_lock); + return 0; +} +unsigned long kvm_pass_coredump_trap_to_guest(struct kvm_vcpu *vcpu, + struct pt_regs *regs) +{ + e2k_tir_hi_t tir_hi; + e2k_tir_lo_t tir_lo; + int tir_no; + + if (regs->traps_to_guest != 0 || !kvm_check_is_guest_TIRs_empty(vcpu)) { + pr_err("%s(): TIRs is not empty, TIR[0] : 0x%016llx\n", + __func__, + kvm_get_guest_vcpu_TIR_hi(vcpu, 0).TIR_hi_reg); + WARN_ON(true); + kvm_reset_guest_vcpu_TIRs_num(vcpu); + } + /* empty TIRs is signal to do coredump */ + tir_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(0); + tir_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(0); + tir_no = 0; + kvm_update_vcpu_intc_TIR(vcpu, tir_no, tir_hi, tir_lo); + regs->traps_to_guest |= core_dump_mask; + DebugKVMVGT("trap is set to guest TIRs #0 hi 0x%016llx lo 0x%016llx\n", + tir_hi.TIR_hi_reg, tir_lo.TIR_lo_reg); + return core_dump_mask; +} + +/* + * CLW requests should be handled by host, but address to clear can be + * from guest user data stack range, so preliminary this page fault should + * be passed to guest kernel to handle page miss. + * CLW requests are executed before other faulted requests, so this page miss + * fault should be passed and handled by guest. + * Function returns non zero value if CLW request is from guest, guest kernel + * successfully it completed and host can terminate CLW and continue handle + * other trap cellar requests + */ +unsigned long kvm_pass_clw_fault_to_guest(struct pt_regs *regs, + trap_cellar_t *tcellar) +{ + trap_pt_regs_t *trap = regs->trap; + e2k_addr_t address; + tc_cond_t cond; + tc_cond_t g_cond; + struct kvm_vcpu *vcpu; + e2k_tir_hi_t g_TIR_hi; + e2k_tir_lo_t g_TIR_lo; + int TIR_no; + int tc_no; + bool handled; + + address = tcellar->address; + cond = tcellar->condition; + BUG_ON(!guest_user_addr_mode_page_fault(regs, + false /* instr page */, + address)); + DebugKVMVGT("trap occurred on guest user: address 0x%lx " + "condition 0x%016llx\n", + address, AW(cond)); + + BUG_ON(check_is_guest_TIRs_frozen(regs, true)); + TIR_no = trap->TIR_no; + g_TIR_lo.TIR_lo_reg = trap->TIR_lo; + g_TIR_hi.TIR_hi_reg = trap->TIR_hi; + WARN_ON(TIR_no != g_TIR_hi.TIR_hi_j); + g_TIR_hi.TIR_hi_aa = 0; + g_TIR_hi.TIR_hi_exc = exc_data_page_mask; + WARN_ON((1UL << trap->nr_trap) != exc_data_page_mask); + + /* set guest VCPU TIR registers state to simulate data page trap */ + vcpu = current_thread_info()->vcpu; + BUG_ON(vcpu == NULL); + kvm_update_guest_vcpu_TIR(vcpu, TIR_no, g_TIR_hi, g_TIR_lo); + regs->traps_to_guest |= exc_data_page_mask; + DebugKVMVGT("trap is set to guest TIRs #%d hi 0x%016llx lo 0x%016llx\n", + TIR_no, g_TIR_hi.TIR_hi_reg, g_TIR_lo.TIR_lo_reg); + + /* add new trap cellar entry for guest VCPU */ + /* to simulate CLW page fault */ + AW(g_cond) = 0; + AS(g_cond).fault_type = AS(cond).fault_type; + WARN_ON(AS(cond).fault_type == 0); + AS(g_cond).chan = AS(cond).chan; + AS(g_cond).opcode = AS(cond).opcode; + WARN_ON(!AS(cond).store); + AS(g_cond).store = 1; + AS(g_cond).empt = 1; /* special case: 'store empty' to ignore */ + /* recovery of store operation after */ + /* page fault handling */ + AS(g_cond).scal = 1; + AS(g_cond).dst_rcv = AS(cond).dst_rcv; + AS(g_cond).rcv = AS(cond).rcv; + tc_no = kvm_add_guest_vcpu_tc_entry(vcpu, address, g_cond, NULL); + DebugKVMVGT("new entry #%d added to guest trap cellar: address 0x%lx " + "condition 0x%016llx\n", + tc_no, address, AW(g_cond)); + + /* now it needs handle the page fault passed to guest kernel */ + handled = kvm_handle_guest_traps(regs); + if (!handled) + return 0; /* host should handle the trap */ + return exc_data_page_mask; +} + +/* + * Page faults on guest user addresses should be handled by guest kernel, so + * it need pass these faulted requests to guest. + * Function returns non zero value if the request is from guest and it + * successfully passed to guest (set VCPU TIRs and trap cellar) + */ +unsigned long kvm_pass_page_fault_to_guest(struct pt_regs *regs, + trap_cellar_t *tcellar) +{ + trap_pt_regs_t *trap = regs->trap; + struct kvm_vcpu *vcpu; + int ret; + unsigned long pfres; + + vcpu = current_thread_info()->vcpu; + BUG_ON(vcpu == NULL); + + KVM_BUG_ON(!kvm_test_intc_emul_flag(regs)); + + pfres = 0; + if (!is_paging(vcpu)) + pfres |= KVM_SHADOW_NONP_PF_MASK; + + ret = kvm_pv_mmu_page_fault(vcpu, regs, tcellar, false); + if (ret == 0) { + /* page fault successfully handled and need recover */ + /* load/store operation */ + pfres |= KVM_GUEST_KERNEL_ADDR_PF_MASK; + return pfres; + } + if (ret == 1) { + /* guest try write to protected PT, page fault handled */ + /* and recovered by hypervisor */ + pfres |= KVM_SHADOW_PT_PROT_PF_MASK; + return pfres; + } + if (ret == 2) { + /* page fault is injected to guest, and wiil be */ + /* handled by guest */ + return KVM_TRAP_IS_PASSED(trap->nr_trap); + } + + /* could not handle, so host should to do it */ + return KVM_NOT_GUEST_TRAP_RESULT; +} + +void kvm_complete_page_fault_to_guest(unsigned long what_complete) +{ + struct kvm_vcpu *vcpu; + + if (what_complete == 0) + return; + + vcpu = current_thread_info()->vcpu; + BUG_ON(vcpu == NULL); + + KVM_BUG_ON(what_complete != 0); +} + +/* + * Guest hardware stacks bounds can occure, but 'sge' mask can be disabled, + * so host handler incremented stack size on reserve limit of guest and + * update hardware stack pointers on host. + * But stack bounds trap should be handled by guest, increment user + * hardware stacks size and update own stack pointers state. + * Not zero value of hardware stack reserved part is signal to pass trap + * on stacks bouns to handle by guest kernel. + */ +bool kvm_is_guest_proc_stack_bounds(struct pt_regs *regs) +{ + if (likely(!test_guest_proc_bounds_waiting(current_thread_info()))) + return false; + WARN_ONCE(1, "implement me"); + return true; +} +bool kvm_is_guest_chain_stack_bounds(struct pt_regs *regs) +{ + if (likely(!test_guest_chain_bounds_waiting(current_thread_info()))) + return false; + WARN_ONCE(1, "implement me"); + return true; +} + +static inline unsigned long +pass_hw_stack_bounds_to_guest_TIRs(struct pt_regs *regs, + unsigned long trap_mask) +{ + struct kvm_vcpu *vcpu; + e2k_tir_hi_t g_TIR_hi; + e2k_tir_lo_t g_TIR_lo; + + vcpu = current_thread_info()->vcpu; + + /* trap on host kernel and IRQs at this moment were disabled */ + /* so trap cannot be passed immediatly to guest, because of */ + /* any call of guest kernel can be trapped and host receives */ + /* recursive trap and may be dedlock. */ + /* For example hardware stack bounds trap on native change */ + /* stacks from scheduler (see 2) below) */ + BUG_ON(native_kernel_mode(regs)); + + if (!kvm_get_guest_vcpu_sge(vcpu) || + kvm_guest_vcpu_irqs_disabled(vcpu, + kvm_get_guest_vcpu_UPSR_value(vcpu), + kvm_get_guest_vcpu_PSR_value(vcpu))) { + /* + * 1) 'sge' trap masked on guest, so cannot pass the trap; + * 2) interrupts disabled, so cannot too pass the trap. + * In this case, if the trap will be passed, then guest + * trap handler (parse_TIR_registers()) can enable + * interrupts and may be dedlock. For example while scheduler + * switch to other process interrupts disabled and spinlock + * rq->lock taken, so if trap is passed and is handling, then + * IRQs enable and new interrupt on timer can call some + * function (for example scheduler_tick()) which need take + * the same spinlock rq->lock + */ + DebugKVMSGE("%s (%d/%d) hardware stack bounds trap is masked " + "on guest, cannot pass the trap to guest\n", + current->comm, current->pid, + current_thread_info()->gthread_info->gpid->nid.nr); + /* trap on guest and should be handled by guest, */ + /* but now trap handling is masked, */ + /* still trap will repeat again some later */ + if (test_and_set_guest_hw_stack_bounds_waiting( + current_thread_info(), trap_mask)) { + DebugKVMSGE("trap on hardware stack bounds is already " + "waiting for pass trap to guest\n"); + } + return masked_hw_stack_bounds_mask | trap_mask; + } + BUG_ON(check_is_guest_TIRs_frozen(regs, true)); + /* set guest VCPU TIR registers state to simulate stack bounds trap */ + g_TIR_lo.TIR_lo_reg = GET_CLEAR_TIR_LO(0); + g_TIR_hi.TIR_hi_reg = GET_CLEAR_TIR_HI(0); + g_TIR_hi.TIR_hi_exc = trap_mask; + kvm_update_guest_vcpu_TIR(vcpu, 0, g_TIR_hi, g_TIR_lo); + regs->traps_to_guest |= trap_mask; + if (test_and_clear_guest_hw_stack_bounds_waiting( + current_thread_info(), trap_mask)) { + DebugKVMSGE("trap on hardware stack bounds was waiting and " + "trap now is passed to guest\n"); + } + DebugHWSB("hardware stack bounds trap is set to guest TIRs #0\n"); + return trap_mask; +} + +/* + * Guest process hardware stacks overflow or underflow occurred. + * This trap should handle guest kernel, but before transfer to guest, + * host should expand hardware stack on guest kernel reserved part to enable + * safe handling by guest. Otherwise can be recursive hardware stacks + * bounds traps. + * PSR.sge flag should be enabled to detect recursive bounds while guest + * handler running in user mode + * If the guest kernel trap handler cannot be started, then this function + * send signal to complete guest and return non-zero value to disable continue + * of the trap handling by host. + * WARNING: Interrupts should be disabled by caller + */ +static inline unsigned long +kvm_handle_guest_proc_stack_bounds(struct pt_regs *regs) +{ + hw_stack_t *hw_stacks; + struct kvm_vcpu *vcpu; + bool underflow = false; + e2k_size_t ps_size; + e2k_size_t ps_ind; + e2k_psp_hi_t gpsp_hi; + e2k_size_t gps_size; + int ret; + + hw_stacks = ¤t_thread_info()->u_hw_stack; + vcpu = current_thread_info()->vcpu; + BUG_ON(vcpu == NULL); + ps_size = regs->stacks.psp_hi.PSP_hi_size; + ps_ind = regs->stacks.psp_hi.PSP_hi_ind; + DebugHWSB("procedure stack bounds: index 0x%lx size 0x%lx\n", + ps_ind, ps_size); + if (ps_ind < (ps_size >> 1)) { + underflow = true; + DebugHWSB("procedure stack underflow, stack need not " + "be expanded on guest kernel part\n"); + goto guest_handler; + } + + WARN_ONCE(1, "implememt me"); + + gpsp_hi = kvm_get_guest_vcpu_PSP_hi(vcpu); + gps_size = gpsp_hi.PSP_hi_size; + kvm_set_guest_vcpu_PSP_hi(vcpu, gpsp_hi); + DebugHWSB("procedure stack will be incremented on guest kernel " + "reserved part: size 0x%lx, new size 0x%x\n", + gps_size, gpsp_hi.PSP_hi_size); + + ret = -ENOSYS; //TODO update_guest_kernel_hw_ps_state(vcpu); + if (ret) { + pr_err("%s(): could not expand guest procedure stack " + "on kernel reserved part, error %d\n", + __func__, ret); + goto out_failed; + } + /* correct PSP rigister state in pt_regs structure */ + regs->stacks.psp_hi.PSP_hi_size = gpsp_hi.PSP_hi_size; + +guest_handler: + return pass_hw_stack_bounds_to_guest_TIRs(regs, + exc_proc_stack_bounds_mask); +out_failed: + force_sig(SIGSEGV); + return exc_proc_stack_bounds_mask; +} + +static inline unsigned long +kvm_handle_guest_chain_stack_bounds(struct pt_regs *regs) +{ + hw_stack_t *hw_stacks; + struct kvm_vcpu *vcpu; + bool underflow = false; + e2k_size_t pcs_size; + e2k_size_t pcs_ind; + e2k_pcsp_hi_t gpcsp_hi; + e2k_size_t gpcs_size; + int ret; + + hw_stacks = ¤t_thread_info()->u_hw_stack; + vcpu = current_thread_info()->vcpu; + pcs_size = regs->stacks.pcsp_hi.PCSP_hi_size; + pcs_ind = regs->stacks.pcsp_hi.PCSP_hi_ind; + DebugHWSB("chain stack bounds: index 0x%lx size 0x%lx\n", + pcs_ind, pcs_size); + if (pcs_ind < (pcs_size >> 1)) { + underflow = true; + DebugHWSB("chain stack underflow, stack need not " + "be expanded on guest kernel part\n"); + goto guest_handler; + } + + WARN_ONCE(1, "implememt me"); + + gpcsp_hi = kvm_get_guest_vcpu_PCSP_hi(vcpu); + gpcs_size = gpcsp_hi.PCSP_hi_size; + kvm_set_guest_vcpu_PCSP_hi(vcpu, gpcsp_hi); + DebugHWSB("chain stack will be incremented on guest kernel " + "reserved part: size 0x%lx, new size 0x%x\n", + gpcs_size, gpcsp_hi.PCSP_hi_size); + + ret = -ENOSYS; //TODO update_guest_kernel_hw_pcs_state(vcpu); + if (ret) { + pr_err("%s(): could not expand guest chain stack " + "on kernel reserved part, error %d\n", + __func__, ret); + goto out_failed; + } + /* correct PCSP rigister state in pt_regs structure */ + regs->stacks.pcsp_hi.PCSP_hi_size = gpcsp_hi.PCSP_hi_size; + +guest_handler: + return pass_hw_stack_bounds_to_guest_TIRs(regs, + exc_chain_stack_bounds_mask); + +out_failed: + force_sig(SIGSEGV); + return exc_chain_stack_bounds_mask; +} + +unsigned long kvm_pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + unsigned long passed = 0; + unsigned long flags; + + /* hw stack bounds traps can have not trap IP and proper TIRs */ + if (LIGHT_HYPERCALL_MODE(regs)) { + DebugHWSB("hw stacks bounds occurred in light hypercall: " + "%s %s\n", + (proc_bounds) ? "proc" : "", + (chain_bounds) ? "chain" : ""); + } else if (guest_kernel_mode(regs)) { + DebugHWSB("hw stacks bounds occurred on guest kernel: " + "%s %s\n", + (proc_bounds) ? "proc" : "", + (chain_bounds) ? "chain" : ""); + } else if (guest_user_mode(regs)) { + DebugHWSB("hw stacks bounds occurred on guest user: %s %s\n", + (proc_bounds) ? "proc" : "", + (chain_bounds) ? "chain" : ""); + } else { + pr_err("hw stacks bounds occurred on host running guest " + "process: %s %s\n", + (proc_bounds) ? "proc" : "", + (chain_bounds) ? "chain" : ""); + BUG_ON(true); + } + + local_irq_save(flags); + if (proc_bounds) + passed |= kvm_handle_guest_proc_stack_bounds(regs); + if (chain_bounds) + passed |= kvm_handle_guest_chain_stack_bounds(regs); + local_irq_restore(flags); + + return passed; +} + +int kvm_apply_updated_psp_bounds(struct kvm_vcpu *vcpu, + unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + int ret; + + ret = apply_psp_delta_to_signal_stack(base, size, start, end, delta); + if (ret != 0) { + pr_err("%s(): could not apply updated procedure stack " + "boundaries, error %d\n", + __func__, ret); + } + return ret; +} + +int kvm_apply_updated_pcsp_bounds(struct kvm_vcpu *vcpu, + unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + int ret; + + ret = apply_pcsp_delta_to_signal_stack(base, size, start, end, delta); + if (ret != 0) { + pr_err("%s(): could not apply updated chain stack " + "boundaries, error %d\n", + __func__, ret); + } + return ret; +} + +#ifdef CHECK_GUEST_VCPU_UPDATES +static inline void +check_guest_stack_regs_updates(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + { + e2k_addr_t sbr = kvm_get_guest_vcpu_SBR_value(vcpu); + e2k_usd_lo_t usd_lo = kvm_get_guest_vcpu_USD_lo(vcpu); + e2k_usd_hi_t usd_hi = kvm_get_guest_vcpu_USD_hi(vcpu); + + if (usd_lo.USD_lo_half != regs->stacks.usd_lo.USD_lo_half || + usd_hi.USD_hi_half != regs->stacks.usd_hi.USD_hi_half || + sbr != regs->stacks.top) { + DebugKVMGT("FAULT: source USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + DebugKVMGT("NOT updated USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + usd_lo.USD_lo_base, + usd_hi.USD_hi_size, + sbr); + } + } + { + e2k_psp_lo_t psp_lo = kvm_get_guest_vcpu_PSP_lo(vcpu); + e2k_psp_hi_t psp_hi = kvm_get_guest_vcpu_PSP_hi(vcpu); + e2k_pcsp_lo_t pcsp_lo = kvm_get_guest_vcpu_PCSP_lo(vcpu); + e2k_pcsp_hi_t pcsp_hi = kvm_get_guest_vcpu_PCSP_hi(vcpu); + + if (psp_lo.PSP_lo_half != regs->stacks.psp_lo.PSP_lo_half || + psp_hi.PSP_hi_size != regs->stacks.psp_hi.PSP_hi_size) { + /* PSP_hi_ind/PCSP_hi_ind can be modified and should */ + /* be restored as saved at regs state */ + DebugKVMGT("FAULT: source PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); + DebugKVMGT("NOT updated PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + psp_lo.PSP_lo_base, + psp_hi.PSP_hi_size, + psp_hi.PSP_hi_ind); + } + if (pcsp_lo.PCSP_lo_half != regs->stacks.pcsp_lo.PCSP_lo_half || + pcsp_hi.PCSP_hi_size != + regs->stacks.pcsp_hi.PCSP_hi_size) { + DebugKVMGT("FAULT: source PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); + DebugKVMGT("NOT updated PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + pcsp_lo.PCSP_lo_base, + pcsp_hi.PCSP_hi_size, + pcsp_hi.PCSP_hi_ind); + } + } + { + unsigned long cr0_lo = kvm_get_guest_vcpu_CR0_lo_value(vcpu); + unsigned long cr0_hi = kvm_get_guest_vcpu_CR0_hi_value(vcpu); + e2k_cr1_lo_t cr1_lo = kvm_get_guest_vcpu_CR1_lo(vcpu); + e2k_cr1_hi_t cr1_hi = kvm_get_guest_vcpu_CR1_hi(vcpu); + + if (cr0_lo != regs->crs.cr0_lo.CR0_lo_half || + cr0_hi != regs->crs.cr0_hi.CR0_hi_half || + cr1_lo.CR1_lo_half != regs->crs.cr1_lo.CR1_lo_half || + cr1_hi.CR1_hi_half != regs->crs.cr1_hi.CR1_hi_half) { + DebugKVMGT("FAULT: source CR0.lo 0x%016llx CR0.hi " + "0x%016llx CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); + DebugKVMGT("NOT updated CR0.lo 0x%016lx CR0.hi " + "0x%016lx CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + cr0_lo, + cr0_hi, + cr1_lo.CR1_lo_wbs, + cr1_hi.CR1_hi_ussz); + } + } +} +#else /* ! CHECK_GUEST_VCPU_UPDATES */ +static inline void +check_guest_stack_regs_updates(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ +} +#endif /* CHECK_GUEST_VCPU_UPDATES */ + +static inline void +restore_guest_trap_stack_regs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + unsigned long regs_status = kvm_get_guest_vcpu_regs_status(vcpu); + + if (!KVM_TEST_UPDATED_CPU_REGS_FLAGS(regs_status)) { + DebugKVMVGT("competed: nothing updated"); + goto check_updates; + } + + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, WD_UPDATED_CPU_REGS)) { + e2k_wd_t wd = kvm_get_guest_vcpu_WD(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (wd.WD_psize != regs->wd.WD_psize) { + DebugKVMGT("source WD: size 0x%x\n", + regs->wd.WD_psize); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->wd.WD_psize = wd.WD_psize; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated WD: size 0x%x\n", + regs->wd.WD_psize); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, USD_UPDATED_CPU_REGS)) { + unsigned long sbr = kvm_get_guest_vcpu_SBR_value(vcpu); + unsigned long usd_lo = kvm_get_guest_vcpu_USD_lo_value(vcpu); + unsigned long usd_hi = kvm_get_guest_vcpu_USD_hi_value(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (usd_lo != regs->stacks.usd_lo.USD_lo_half || + usd_hi != regs->stacks.usd_hi.USD_hi_half || + sbr != regs->stacks.top) { + DebugKVMGT("source USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->stacks.usd_lo.USD_lo_half = usd_lo; + regs->stacks.usd_hi.USD_hi_half = usd_hi; + regs->stacks.top = sbr; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated USD: base 0x%llx size 0x%x " + "top 0x%lx\n", + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size, + regs->stacks.top); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, + HS_REGS_UPDATED_CPU_REGS)) { + unsigned long psp_lo = kvm_get_guest_vcpu_PSP_lo_value(vcpu); + unsigned long psp_hi = kvm_get_guest_vcpu_PSP_hi_value(vcpu); + unsigned long pcsp_lo = kvm_get_guest_vcpu_PCSP_lo_value(vcpu); + unsigned long pcsp_hi = kvm_get_guest_vcpu_PCSP_hi_value(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (psp_lo != regs->stacks.psp_lo.PSP_lo_half || + psp_hi != regs->stacks.psp_hi.PSP_hi_half) { + DebugKVMGT("source PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->stacks.psp_lo.PSP_lo_half = psp_lo; + regs->stacks.psp_hi.PSP_hi_half = psp_hi; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated PSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.psp_lo.PSP_lo_base, + regs->stacks.psp_hi.PSP_hi_size, + regs->stacks.psp_hi.PSP_hi_ind); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (pcsp_lo != regs->stacks.pcsp_lo.PCSP_lo_half || + pcsp_hi != regs->stacks.pcsp_hi.PCSP_hi_half) { + DebugKVMGT("source PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->stacks.pcsp_lo.PCSP_lo_half = pcsp_lo; + regs->stacks.pcsp_hi.PCSP_hi_half = pcsp_hi; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated PCSP: base 0x%llx size 0x%x " + "ind 0x%x\n", + regs->stacks.pcsp_lo.PCSP_lo_base, + regs->stacks.pcsp_hi.PCSP_hi_size, + regs->stacks.pcsp_hi.PCSP_hi_ind); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + if (KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, CRS_UPDATED_CPU_REGS)) { + unsigned long cr0_lo = kvm_get_guest_vcpu_CR0_lo_value(vcpu); + unsigned long cr0_hi = kvm_get_guest_vcpu_CR0_hi_value(vcpu); + unsigned long cr1_lo = kvm_get_guest_vcpu_CR1_lo_value(vcpu); + unsigned long cr1_hi = kvm_get_guest_vcpu_CR1_hi_value(vcpu); + +#ifdef CHECK_GUEST_VCPU_UPDATES + if (cr0_lo != regs->crs.cr0_lo.CR0_lo_half || + cr0_hi != regs->crs.cr0_hi.CR0_hi_half || + cr1_lo != regs->crs.cr1_lo.CR1_lo_half || + cr1_hi != regs->crs.cr1_hi.CR1_hi_half) { + DebugKVMGT("source CR0.lo 0x%016llx CR0.hi 0x%016llx " + "CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); +#endif /* CHECK_GUEST_VCPU_UPDATES */ + + regs->crs.cr0_lo.CR0_lo_half = cr0_lo; + regs->crs.cr0_hi.CR0_hi_half = cr0_hi; + regs->crs.cr1_lo.CR1_lo_half = cr1_lo; + regs->crs.cr1_hi.CR1_hi_half = cr1_hi; + +#ifdef CHECK_GUEST_VCPU_UPDATES + DebugKVMGT("updated CR0.lo 0x%016llx CR0.hi 0x%016llx " + "CR1.lo.wbs 0x%x CR1.hi.ussz 0x%x\n", + regs->crs.cr0_lo.CR0_lo_half, + regs->crs.cr0_hi.CR0_hi_half, + regs->crs.cr1_lo.CR1_lo_wbs, + regs->crs.cr1_hi.CR1_hi_ussz); + } +#endif /* CHECK_GUEST_VCPU_UPDATES */ + } + kvm_reset_guest_updated_vcpu_regs_flags(vcpu, regs_status); + +check_updates: + check_guest_stack_regs_updates(vcpu, regs); +} + +void restore_guest_trap_regs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + restore_guest_trap_stack_regs(vcpu, regs); +} + +int kvm_correct_guest_trap_return_ip(unsigned long return_ip) +{ + struct signal_stack_context __user *context; + struct pt_regs __user *u_regs; + e2k_cr0_hi_t cr0_hi; + unsigned long ts_flag; + int ret; + + context = get_signal_stack(); + u_regs = &context->regs; + cr0_hi.CR0_hi_half = 0; + cr0_hi.CR0_hi_IP = return_ip; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + ret = __put_user(cr0_hi.CR0_hi_half, &u_regs->crs.cr0_hi.CR0_hi_half); + + clear_ts_flag(ts_flag); + + if (ret != 0) { + pr_err("%s(): put to user corrected IP failed, error %d\n", + __func__, ret); + } + return ret; +} + +/* FIXME: kvm trap entry should be passed by guest kernel through common */ +/* locked area kvm_state_t or as arg of guest kernel entry_point to start it +static char *kvm_guest_ttable_base = NULL; + * should be deleted + */ + +trap_hndl_t kvm_do_handle_guest_traps(struct pt_regs *regs) +{ + pr_err("%s() should not be called and need delete\n", __func__); + return (trap_hndl_t)-ENOSYS; +} + +/* + * Any system calls from guest user start this function. + * User data stack was not switched to kernel (host or guest) stack, so + * the host function (including all called functions) should not use data stack. + * Function switch user data stack just to guest kernel stack and possible + * debugging mode will use guest stack (it is not right in theory, but it need + * only to debug) + */ +/* FIXME: only to debug (including gregs save/restore), __interrupt */ +/* should be uncommented */ +long /*__interrupt*/ +goto_guest_kernel_ttable_C(long sys_num_and_entry, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6) +{ + pr_err("%s() should not be called and need delete\n", __func__); + return -ENOSYS; +} + +int kvm_copy_hw_stacks_frames(struct kvm_vcpu *vcpu, + void __user *dst, void __user *src, long size, bool is_chain) +{ + int ret; + + KVM_BUG_ON(((unsigned long)dst & PAGE_MASK) != + ((unsigned long)(dst + (size - 1)) & PAGE_MASK)); + KVM_BUG_ON(((unsigned long)src & PAGE_MASK) != + ((unsigned long)(src + (size - 1)) & PAGE_MASK)); + + ret = kvm_copy_from_to_user_with_tags(vcpu, dst, src, size); + if (ret != size) { + pr_err("%s(): copy from %px to %px failed, error %d\n", + __func__, src, dst, ret); + return (ret < 0) ? ret : -EFAULT; + } + + return 0; +} diff --git a/arch/e2k/kvm/user_area.c b/arch/e2k/kvm/user_area.c new file mode 100644 index 000000000000..413c8c46f1bb --- /dev/null +++ b/arch/e2k/kvm/user_area.c @@ -0,0 +1,1424 @@ +/* + * + * Contigous virtual area of user memory menegement. + * The product is compilation of ideas of linux/mm/vmalloc, + * linux/mm/mmap and arch/e2k/mm/area_alloc.c + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "user_area.h" +#include "mmu.h" + +#undef DEBUG_USER_AREA_MODE +#undef DebugUA +#define DEBUG_USER_AREA_MODE 0 /* processes */ +#define DebugUA(fmt, args...) \ +({ \ + if (DEBUG_USER_AREA_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KERNEL_AREA_MODE +#undef DebugKA +#define DEBUG_KERNEL_AREA_MODE 0 /* kernel virtual machine debugging */ +#define DebugKA(fmt, args...) \ +({ \ + if (DEBUG_KERNEL_AREA_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* + * SLAB cache for user area chunk structures + */ +static struct kmem_cache *user_area_cachep = NULL; +static struct kmem_cache *user_area_chunk_cachep = NULL; + +static void user_area_free_queued_chunks(user_area_t *user_area); + +int +user_area_caches_init(void) +{ + DebugUA("user_area_caches_init() started\n"); + user_area_cachep = + kmem_cache_create("user_area", sizeof(user_area_t), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (user_area_cachep == NULL) { + printk(KERN_ERR "Cannot create user memore area structures " + "SLAB cache"); + return -ENOMEM; + } + user_area_chunk_cachep = + kmem_cache_create("user_area_chunk", sizeof(user_chunk_t), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (user_area_chunk_cachep == NULL) { + printk(KERN_ERR "Cannot create user memore chunk structures " + "SLAB cache"); + return -ENOMEM; + } + DebugUA("user_area_caches_init() finished\n"); + return 0; +} + +void +user_area_caches_destroy(void) +{ + DebugUA("user_area_caches_destroy() started\n"); + if (user_area_chunk_cachep) { + kmem_cache_destroy(user_area_chunk_cachep); + user_area_chunk_cachep = NULL; + } + if (user_area_cachep) { + kmem_cache_destroy(user_area_cachep); + user_area_cachep = NULL; + } + DebugUA("user_area_caches_destroy() finished\n"); +} + +#ifdef CONFIG_DEBUG_USER_AREA + +/* + * Print chunks + */ +static void +user_area_print_chunk(user_chunk_t *chunk) +{ + pr_info(" Chunk 0x%px next 0x%px prev 0x%px start 0x%lx end 0x%lx " + "size 0x%08lx flags 0x%04lx\n", + chunk, chunk->next, chunk->prev, + chunk->start, chunk->end, chunk->size, chunk->flags); +} +static long +user_area_print_all_chunks_in_list(user_chunk_t *chunk_list) +{ + user_chunk_t **p; + user_chunk_t *next = NULL; + long chunks_num = 0; + + for (p = &chunk_list; (next = *p); p = &next->next) { + user_area_print_chunk(next); + chunks_num++; + } + return chunks_num; +} +static long +user_area_print_all_free_chunks(user_area_t *user_area) +{ + long chunks_num; + pr_info("List of all free chunks in area from 0x%lx to 0x%lx\n", + user_area->area_start, user_area->area_end); + chunks_num = user_area_print_all_chunks_in_list(user_area->free_list); + pr_info("Total number of free chunks is %ld\n", chunks_num); + return chunks_num; +} +static long +user_area_print_all_busy_chunks(user_area_t *user_area) +{ + long chunks_num; + pr_info("List of all busy chunks in area from 0x%lx to 0x%lx\n", + user_area->area_start, user_area->area_end); + chunks_num = user_area_print_all_chunks_in_list(user_area->busy_list); + pr_info("Total number of busy chunks is %ld\n", chunks_num); + return chunks_num; +} +static void +user_area_print_all_chunks(user_area_t *user_area) +{ + long chunks_num; + long total_num = 0; + + chunks_num = user_area_print_all_free_chunks(user_area); + total_num += chunks_num; + + chunks_num = user_area_print_all_busy_chunks(user_area); + total_num += chunks_num; + + pr_info("List of chunks queued to free in area from 0x%lx to 0x%lx\n", + user_area->area_start, user_area->area_end); + chunks_num = user_area_print_all_chunks_in_list( + user_area->to_free_list); + pr_info("Total number of queued chunks is %ld\n", chunks_num); + total_num += chunks_num; + pr_info("Total number of all chunks is %ld\n", total_num); +} +#endif /* CONFIG_DEBUG_USER_AREA */ + +/* + * Find the address of the user virtual memory area into the list of chunks. + * Look up the first chunk which satisfies address < shunk_end, NULL if none. + * The list should be locked by caller + */ +static inline user_chunk_t * +user_area_find_chunk(user_chunk_t **chunk_list, user_chunk_t ***prev, + e2k_addr_t start, e2k_size_t size, e2k_size_t align, void *vmap_base) +{ + user_chunk_t **p; + user_chunk_t *next = NULL; + e2k_addr_t addr; + + DebugUA("user_area_find_chunk() started: chunk list %px -> %px address " + "0x%lx size 0x%lx\n", + chunk_list, *chunk_list, start, size); + for (p = chunk_list; (next = *p); p = &next->next) { + DebugUA("user_area_find_chunk() current chunk 0x%px start 0x%lx " + "end 0x%lx\n", + next, next->start, next->end); + addr = next->start; + addr = ALIGN_TO_SIZE(addr, align); + if (vmap_base != NULL) { + if (next->vmap_base == vmap_base) + break; + } else if (start == 0) { + if (next->end - addr >= size) + break; + } else if (start < next->end) { + break; + } + } + DebugUA("user_area_find_chunk() returns chunk %px prev %px -> %px\n", + next, p, *p); + *prev = p; + return next; +} + +/* + * Find a free chunk of the user virtual memory area. + * The list should be locked by caller + */ +static inline user_chunk_t * +user_area_find_free_chunk(user_area_t *user_area, e2k_addr_t start, + e2k_size_t size, e2k_size_t align) +{ + user_chunk_t **prev = NULL; + user_chunk_t *next; + e2k_addr_t end = start + size; + + DebugUA("user_area_find_free_chunk() started: address 0x%lx end " + "0x%lx\n", start, end); + next = user_area_find_chunk(&user_area->free_list, &prev, + start, size, align, NULL); + if (next == NULL) { + DebugUA("user_area_find_free_chunk() area is not found\n"); + return NULL; + } + DebugUA("user_area_find_free_chunk() area found: start 0x%lx " + "end 0x%lx\n", next->start, next->end); + if (start == 0) + return next; + if (start >= next->start && end <= next->end) { + return next; + } + return NULL; +} + +/* + * Find a free chunk of the user virtual memory area. + * The list should be locked by caller + */ +static inline user_chunk_t * +user_area_find_busy_chunk(user_area_t *user_area, e2k_addr_t start, + e2k_size_t size) +{ + user_chunk_t **prev = NULL; + user_chunk_t *next; + e2k_addr_t end = start + size; + + DebugUA("user_area_find_busy_chunk() started: address 0x%lx end " + "0x%lx\n", start, end); + next = user_area_find_chunk(&user_area->busy_list, &prev, + start, size, 0, NULL); + if (next == NULL) { + DebugUA("user_area_find_busy_chunk() area is not found\n"); + return NULL; + } + DebugUA("user_area_find_busy_chunk() area found: start 0x%lx " + "end 0x%lx\n", next->start, next->end); + return next; +} +static inline user_chunk_t * +user_area_find_vmap_chunk(user_area_t *user_area, void *vmap_base) +{ + user_chunk_t **prev = NULL; + user_chunk_t *next; + + DebugUA("user_area_find_busy_chunk() started: vmap base %px\n", + vmap_base); + next = user_area_find_chunk(&user_area->busy_list, &prev, + 0, 0, 0, vmap_base); + if (next == NULL) { + DebugUA("user_area_find_busy_chunk() area is not found\n"); + return NULL; + } + DebugUA("user_area_find_busy_chunk() area found: start 0x%lx " + "end 0x%lx\n", + next->start, next->end); + return next; +} + +static inline int +user_area_is_busy(user_area_t *user_area, e2k_addr_t address, + e2k_addr_t size) +{ + user_chunk_t *next; + unsigned long irq_flags; + e2k_addr_t end = address + size; + + DebugUA("user_area_is_busy() started: address 0x%lx end 0x%lx\n", + address, end); + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + next = user_area_find_busy_chunk(user_area, address, size); + if (next == NULL) { + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + DebugUA("user_area_is_busy() area is not found\n"); + return 0; + } + DebugUA("user_area_is_busy() area found: start 0x%lx end 0x%lx\n", + next->start, next->end); + if (address >= next->start || end > next->start) { + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + return 1; + } + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + return 0; +} + +/* + * Insert the chunk of the user virtual memory area to the list of the chunks. + * The list should be locked by caller + */ +static inline void +user_area_insert_chunk(user_chunk_t **chunk_list, user_chunk_t *chunk) +{ + user_chunk_t *next_chunk; + user_chunk_t **prev_chunk; + + DebugUA("user_area_insert_chunk() started: chunk list %px -> %px chunk " + "%px\n", chunk_list, *chunk_list, chunk); + next_chunk = user_area_find_chunk(chunk_list, &prev_chunk, + chunk->start, chunk->size, 0, + NULL); + DebugUA("user_area_insert_chunk() prev chunk %px next %px\n", + *prev_chunk, next_chunk); + *prev_chunk = chunk; + if (prev_chunk == chunk_list) + chunk->prev = NULL; + else + chunk->prev = (user_chunk_t *)((e2k_addr_t)prev_chunk - + offsetof(user_chunk_t, next)); + chunk->next = next_chunk; + if (next_chunk != NULL) { + next_chunk->prev = chunk; + } + DebugUA("user_area_insert_chunk() returns chunk->next %px prev " + "%px\n", chunk->next, chunk->prev); +} + +/* + * Delete the chunk of the user virtual memory area from the list + * of the chunks. The list should be locked by caller + */ +static inline void +user_area_delete_chunk(user_chunk_t *chunk_list, e2k_addr_t address) +{ + user_chunk_t **p; + user_chunk_t *next = NULL; + + DebugUA("user_area_delete_chunk() started: address 0x%lxn", + address); + for (p = &chunk_list; (next = *p); p = &next->next) { + DebugUA("user_area_delete_chunk() current chunk 0x%px " + "address 0x%lx\n", next, next->start); + if (next->start == address) { + break; + } + } + if (next == NULL) { + DebugUA("user_area_delete_chunk() could not find a chunk " + "for address 0x%lx\n", address); + return; + } + DebugUA("user_area_delete_chunk() found a chunk 0x%px " + "for address 0x%lx\n", next, address); + + *p = next->next; + if (next->next != NULL) + next->next->prev = next->prev; + DebugUA("user_area_delete_chunk() deleted chunk 0x%px, from prev " + "0x%px, next 0x%px\n", + next, next->prev, next->next); + next->next = NULL; + next->prev = NULL; +} + +/* + * Insert the chunk of the user virtual memory area to the list of busy + * chunks. + * The list should be locked by caller + */ +static inline void +user_area_insert_busy_chunk(user_area_t *user_area, + user_chunk_t *busy_chunk) +{ + unsigned long flags; + + DebugUA("user_area_insert_busy_chunk() started: chunk %px\n", + busy_chunk); + spin_lock_irqsave(&user_area->area_list_lock, flags); + if (user_area->flags & USER_AREA_ORDERED) { + user_area_insert_chunk(&user_area->busy_list, busy_chunk); + spin_unlock_irqrestore(&user_area->area_list_lock, flags); + DebugUA("user_area_insert_busy_chunk() returns ordered chunk " + "chunk->next %px chunk->prev %px\n", + busy_chunk->next, busy_chunk->prev); + return; + } + busy_chunk->next = user_area->busy_list; + busy_chunk->prev = NULL; + if (user_area->busy_list != NULL) { + user_area->busy_list->prev = busy_chunk; + } + user_area->busy_list = busy_chunk; + spin_unlock_irqrestore(&user_area->area_list_lock, flags); + DebugUA("user_area_insert_busy_chunk() returns unordered chunk " + "chunk->next %px chunk->prev %px\n", + busy_chunk->next, busy_chunk->prev); +} + +/* + * Insert the chunk of the user virtual memory area to the list of free + * chunks. + * The list should not be locked + */ +static inline void +user_area_insert_free_chunk(user_area_t *user_area, + user_chunk_t *free_chunk) +{ + unsigned long flags; + + DebugUA("user_area_insert_free_chunk() started: chunk %px\n", + free_chunk); + spin_lock_irqsave(&user_area->area_list_lock, flags); + if (user_area->flags & USER_AREA_ORDERED) { + user_area_insert_chunk(&user_area->free_list, free_chunk); + DebugUA("user_area_insert_free_chunk() returns ordered chunk " + "chunk->next %px chunk->prev %px\n", + free_chunk->next, free_chunk->prev); + user_area->freebytes += free_chunk->size; + spin_unlock_irqrestore(&user_area->area_list_lock, flags); + return; + } + free_chunk->next = user_area->free_list; + free_chunk->prev = NULL; + if (user_area->free_list != NULL) { + user_area->free_list->prev = free_chunk; + } + user_area->free_list = free_chunk; + user_area->freebytes += free_chunk->size; + spin_unlock_irqrestore(&user_area->area_list_lock, flags); + DebugUA("user_area_insert_free_chunk() returns unordered chunk " + "chunk->next %px chunk->prev %px\n", + free_chunk->next, free_chunk->prev); +} + +/* + * Insert the chunk of the user virtual memory area to the list of + * ready to free chunks. + */ +static inline void +user_area_insert_to_free_chunk(user_area_t *user_area, + user_chunk_t *to_free_chunk) +{ + unsigned long flags; + + DebugUA("user_area_insert_to_free_chunk() started: chunk %px\n", + to_free_chunk); + spin_lock_irqsave(&user_area->area_list_lock, flags); + to_free_chunk->next = user_area->to_free_list; + to_free_chunk->prev = NULL; + if (user_area->to_free_list != NULL) { + user_area->to_free_list->prev = to_free_chunk; + } + user_area->to_free_list = to_free_chunk; + spin_unlock_irqrestore(&user_area->area_list_lock, flags); + DebugUA("user_area_insert_to_free_chunk() inserted unordered " + "chunk: chunk->next %px chunk->prev %px\n", + to_free_chunk->next, to_free_chunk->prev); +} + +/* + * Init new structure of chunk of user virtual memory area + */ +static inline void +user_area_init_chunk(user_chunk_t *new_chunk, e2k_addr_t chunk_start, + e2k_size_t chunk_size, unsigned long flags) +{ + DebugUA("user_area_init_chunk() started: start 0x%lx size 0x%lx\n", + chunk_start, chunk_size); + new_chunk->flags = flags; + new_chunk->start = chunk_start; + new_chunk->end = chunk_start + chunk_size; + new_chunk->size = chunk_size; + new_chunk->next = NULL; + new_chunk->prev = NULL; + new_chunk->pages = NULL; + new_chunk->nr_pages = 0; + new_chunk->vmap_base = NULL; + + DebugUA("user_area_init_chunk() finished: start 0x%lx size 0x%lx\n", + chunk_start, chunk_size); +} + +/* + * Create new structure of chunk of user virtual memory area + */ +static inline user_chunk_t * +user_area_create_chunk(e2k_addr_t chunk_start, e2k_size_t chunk_size, + unsigned long flags) +{ + user_chunk_t *new_chunk; + + DebugUA("user_area_create_chunk() started: start 0x%lx size 0x%lx\n", + chunk_start, chunk_size); + new_chunk = kmem_cache_alloc(user_area_chunk_cachep, GFP_KERNEL); + if (new_chunk == NULL) { + printk(KERN_ERR "user_area_create_chunk() could not " + "allocate cached kernel memory for chunck struct\n"); + return NULL; + } + user_area_init_chunk(new_chunk, chunk_start, chunk_size, flags); + DebugUA("user_area_create_chunk() finished: start 0x%lx size 0x%lx\n", + chunk_start, chunk_size); + return new_chunk; +} +static inline user_chunk_t * +user_area_create_empty_chunk(void) +{ + return user_area_create_chunk(0, 0, 0); +} +static inline void +user_area_release_chunk(user_chunk_t *chunk) +{ + BUG_ON(chunk->pages != NULL || chunk->nr_pages != 0); + if (chunk->vmap_base != NULL) { + pr_err("%s: virtual mapping is not yet freed, chunk from 0x%lx " + "to 0x%lx cannot release\n", + __func__, chunk->start, chunk->end); + } + BUG_ON(chunk->vmap_base != NULL); + BUG_ON(chunk->flags & (USER_AREA_PRESENT | USER_AREA_LOCKED | + USER_AREA_VMAPPED)); + kmem_cache_free(user_area_chunk_cachep, chunk); +} + +/* + * Delete the address of the user virtual memory area from the list of free + * chunks. + * Look up all the chunks which include specified address range. + * The list should be locked by caller + */ +static inline int +user_area_occupy_chunk(user_area_t *user_area, user_chunk_t *chunk) +{ + user_chunk_t **p; + user_chunk_t *next; + user_chunk_t *new_chunk = NULL; + user_chunk_t *chunk_to_free = NULL; + user_chunk_t *queue_to_free = NULL; + e2k_addr_t start = chunk->start; + e2k_addr_t end = chunk->end; + unsigned long irq_flags; + + DebugUA("user_area_occupy_chunk() started: chunk 0x%px start 0x%lx " + "end 0x%lx\n", + chunk, start, end); + new_chunk = user_area_create_empty_chunk(); + if (new_chunk == NULL) { + DebugUA("user_area_occupy_chunk() could not allocate structure " + "for splited chunk\n"); + return -ENOMEM; + } + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + for (p = &user_area->free_list; (next = *p); p = &next->next) { + DebugUA("user_area_occupy_chunk() current chunk 0x%px " + "start 0x%lx end 0x%lx\n", + next, next->start, next->end); + if (chunk_to_free != NULL) { + chunk_to_free->next = queue_to_free; + queue_to_free = chunk_to_free; + chunk_to_free = NULL; + } + + if ((start < next->start || start >= next->end) && + (end <= next->start || end > next->end) && + (next->start < start || next->start >= end) && + (next->end <= start || next->end > end)) { + continue; + } + if (start > next->start) { + if (end < next->end) { + if (new_chunk == NULL) { + panic("user_area_occupy_chunk() " + "twice splitting of chunk " + "from 0x%lx to 0x%lx to free " + "area from 0x%lx to 0x%lx\n", + next->start, next->end, + start, end); + break; + } + user_area_init_chunk(new_chunk, + next->start, start - next->start, + next->flags); + new_chunk->next = next; + new_chunk->prev = next->prev; + next->start = end; + next->size = next->end - end; + next->prev = new_chunk; + DebugUA("user_area_occupy_chunk() new chunk " + "0x%px start 0x%lx end 0x%lx next " + "chunk start 0x%lx end 0x%lx\n", + new_chunk, new_chunk->start, + new_chunk->end, next->start, + next->end); + new_chunk = NULL; + continue; + } + next->end = start; + next->size = start - next->start; + DebugUA("user_area_occupy_chunk() next chunk " + "0x%px start 0x%lx end 0x%lx\n", + next, next->start, next->end); + continue; + } + if (end < next->end) { + next->start = end; + next->size = next->end - end; + DebugUA("user_area_occupy_chunk() next chunk " + "0x%px start 0x%lx end 0x%lx\n", + next, next->start, next->end); + continue; + } + *p = next->next; + if (next->next != NULL) { + next->next->prev = next->prev; + } + chunk_to_free = next; + DebugUA("user_area_occupy_chunk() will free next chunk " + "0x%px start 0x%lx end 0x%lx\n", + next, next->start, next->end); + } + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + if (new_chunk) { + user_area_release_chunk(new_chunk); + } + if (chunk_to_free) { + user_area_release_chunk(chunk_to_free); + } + while (queue_to_free) { + chunk_to_free = queue_to_free; + queue_to_free = chunk_to_free->next; + user_area_release_chunk(chunk_to_free); + } + + DebugUA("user_area_occupy_chunk() finished\n"); + return 0; +} + +/* + * Create new user virtual memory area descriptor. + * The function returns 0 on success and < 0 (-errno) if fails. + */ +user_area_t * +user_area_create(e2k_addr_t area_start, e2k_size_t area_size, + unsigned long flags) +{ + user_area_t *new_area; + user_chunk_t *free_chunk; + + DebugUA("user_area_create() started to init area start 0x%lx size " + "0x%lx flags 0x%lx\n", area_start, area_size, flags); + new_area = kmem_cache_alloc(user_area_cachep, GFP_KERNEL); + if (new_area == NULL) { + printk(KERN_ERR "Could not allocate memory for user area " + "structure\n"); + return NULL; + } + new_area->flags = flags; + new_area->area_start = area_start; + new_area->area_end = area_start + area_size; + spin_lock_init(&new_area->area_list_lock); + new_area->busy_list = NULL; + free_chunk = user_area_create_chunk(area_start, area_size, flags); + DebugUA("user_area_create() created free chunk 0x%px to full area\n", + free_chunk); + if (free_chunk == NULL) + return NULL; + new_area->free_list = free_chunk; + new_area->to_free_list = NULL; + new_area->freebytes = area_size; + DebugUA("user_area_create() returns area start 0x%lx size 0x%lx " + "flags 0x%lx\n", area_start, area_size, flags); + return new_area; +} + +/* + * Reserve the chunk of the user virtual memory area + */ +int +user_area_reserve_chunk(user_area_t *user_area, e2k_addr_t area_start, + e2k_size_t area_size) +{ + user_chunk_t *area_chunk; + int error; + + DebugUA("user_area_reserve_chunk() started: start 0x%lx size 0x%lx\n", + area_start, area_size); + area_chunk = user_area_create_chunk(area_start, area_size, + user_area->flags); + if (area_chunk == NULL) { + DebugUA("user_area_reserve_chunk() Could not allocate " + "structure to describe reserved chunk of user " + "memory area\n"); + return -ENOMEM; + } + + error = user_area_occupy_chunk(user_area, area_chunk); + if (error < 0) { + DebugUA("user_area_reserve_chunk() occupy " + "reserved area failed\n"); + return error; + } + area_chunk->flags = USER_AREA_RESERVED; + user_area_insert_busy_chunk(user_area, area_chunk); + + DebugUA("user_area_reserve_chunk() finished: start 0x%lx size 0x%lx\n", + area_start, area_size); + return 0; +} + +/* + * Merge a adjacent chunks into the list of free chunks. + * The list should be locked by caller + */ +static user_chunk_t * +user_area_merge_chunks(user_area_t *user_area) +{ + user_chunk_t **p, *tmp, **p1, *tmp1, *to_free; + user_chunk_t *queue_to_free = NULL; + long n = 0; + + DebugUA("user_area_merge_chunks() started: kmem area 0x%px\n", + user_area); + for (p = &user_area->free_list; (tmp = *p); p = &tmp->next) { + DebugUA("user_area_merge_chunks() chunk to merge 0x%px " + "start 0x%lx end 0x%lx\n", + tmp, tmp->start, tmp->end); + for (p1 = &tmp->next; (tmp1 = *p1); p1 = &tmp1->next) { + DebugUA("user_area_merge_chunks() current condidate " + "chunk 0x%px start 0x%lx end 0x%lx\n", + tmp1, tmp1->start, tmp1->end); + if (tmp->end == tmp1->start) { + to_free = tmp1; + DebugUA("user_area_merge_chunks() will merge " + "with the chunk 0x%px\n", + to_free); + tmp->end = to_free->end; + } else if (tmp->start == tmp1->end) { + to_free = tmp1; + DebugUA("user_area_merge_chunks() will merge " + "with the chunk 0x%px\n", + to_free); + tmp->start = to_free->start; + } else { + continue; + } + tmp->size += to_free->size; + if (to_free->prev == NULL) + user_area->free_list = to_free->next; + else + to_free->prev->next = to_free->next; + if (to_free->next != NULL) + to_free->next->prev = to_free->prev; + to_free->next = queue_to_free; + queue_to_free = to_free; + n++; + } + } + DebugUA("user_area_merge_chunks() finished: merged %ld chunks\n", n); + return queue_to_free; +} + +#define TMP_TO_FREE 0x1 +#define BUSY_TO_FREE 0x2 +#define FREE_TO_FREE 0x4 +#define MIN_USER_AREA_ALIGN (sizeof(void *)) + +user_chunk_t * +user_area_get_chunk(user_area_t *user_area, e2k_addr_t start, e2k_addr_t size, + e2k_addr_t align, unsigned long flags) +{ + e2k_addr_t addr; + e2k_addr_t align_add = 0; + e2k_addr_t free_end; + e2k_addr_t end; + e2k_size_t sz; + user_chunk_t *tmp = NULL, *busy_chunk = NULL, *free_chunk = NULL; + user_chunk_t *queue_to_free = NULL; + unsigned int to_free = 0; + int try; + unsigned long irq_flags; + + DebugUA("user_area_get_chunk() started: kmem area 0x%px start 0x%lx " + "size 0x%lx align 0x%lx\n", + user_area, start, size, align); + if (user_area->to_free_list != NULL) + user_area_free_queued_chunks(user_area); + if (start != ALIGN_TO_SIZE(start, align)) { + printk(KERN_ERR "user_area_get_chunk() start address " + "0x%lx is not aligned to 0x%lx\n", + start, align); + return NULL; + } + if (start != ALIGN_TO_SIZE(start, MIN_USER_AREA_ALIGN)) { + printk(KERN_ERR "user_area_get_chunk() start address " + "0x%lx is not aligned to 0x%lx\n", + start, MIN_USER_AREA_ALIGN); + return NULL; + } + size = ALIGN_TO_SIZE(size, MIN_USER_AREA_ALIGN); + if (start != 0) { + if (user_area_is_busy(user_area, start, size)) { + pr_err("user_area_get_chunk() area from 0x%lx " + "to 0x%lx is partially or fully busy\n", + start, start + size); + return NULL; + } + } + + busy_chunk = user_area_create_empty_chunk(); + if (start != 0 || align != 0) { + free_chunk = user_area_create_empty_chunk(); + } + + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + + for (try = 0; try < 3; try++) { + tmp = user_area_find_free_chunk(user_area, start, + size, align); + if (tmp != NULL) + break; + if (try == 0) + continue; + if (try == 1) { + queue_to_free = user_area_merge_chunks(user_area); + if (queue_to_free) + continue; + } + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + + printk("user_area_get_chunk() could not find chunk\n"); +#ifdef CONFIG_DEBUG_USER_AREA + user_area_print_all_chunks(user_area); +#endif /* CONFIG_DEBUG_USER_AREA */ + to_free = (FREE_TO_FREE | BUSY_TO_FREE); + goto free; + } + addr = tmp->start; + sz = tmp->size; + free_end = tmp->end; + if (align != 0) { + align_add = ALIGN_TO_SIZE(addr, align) - addr; + } + DebugUA("user_area_get_chunk() find chunk 0x%px addr 0x%lx start 0x%lx " + "size 0x%lx\n", tmp, addr, start, sz); + if (start != 0) { + end = start + size; + if (start > addr && busy_chunk == NULL || + end < tmp->end && free_chunk == NULL) { + spin_unlock_irqrestore(&user_area->area_list_lock, + irq_flags); + to_free |= (FREE_TO_FREE | BUSY_TO_FREE); + goto free; + } + if (start != addr) { + tmp->end = start; + tmp->size = start - addr; + DebugUA("user_area_get_chunk() left chunk 0x%px end " + "is now 0x%lx size 0x%lx\n", + tmp, start, tmp->size); + } + if (end != free_end) { + *free_chunk = *tmp; + free_chunk->start = end; + free_chunk->size = free_end - end; + if (start != addr) { + free_chunk->prev = tmp; + tmp->next = free_chunk; + } + DebugUA("user_area_get_chunk() new right chunk 0x%px " + "start 0x%lx end 0x%lx\n", + free_chunk, end, free_chunk->end); + } else { + to_free |= FREE_TO_FREE; + } + if (start == addr && end == free_end) { + if (tmp->next != NULL) + tmp->next->prev = tmp->prev; + if (tmp->prev != NULL) + tmp->prev->next = tmp->next; + else + user_area->free_list = tmp->next; + to_free |= TMP_TO_FREE; + } + } else { + end = addr + align_add + size; + if (align_add != 0) { + tmp->end = addr + align_add; + tmp->size = align_add; + DebugUA("user_area_get_chunk() left chunk 0x%px end " + "is now 0x%lx size 0x%lx\n", + tmp, tmp->end, tmp->size); + } + if (sz - align_add > size) { + if (align_add != 0) { + *free_chunk = *tmp; + free_chunk->start = end; + free_chunk->size = sz - align_add - size; + free_chunk->end = end + free_chunk->size; + free_chunk->prev = tmp; + tmp->next = free_chunk; + DebugUA("user_area_get_chunk() new right chunk " + "0x%px start 0x%lx end 0x%lx\n", + free_chunk, free_chunk->start, + free_chunk->end); + } else { + tmp->start += size; + tmp->size -= size; + to_free |= FREE_TO_FREE; + DebugUA("user_area_get_chunk() right chunk " + "0x%px start is now 0x%lx size 0x%lx\n", + tmp, tmp->start, tmp->size); + } + } else if (align_add == 0) { + if (tmp->next != NULL) + tmp->next->prev = tmp->prev; + if (tmp->prev != NULL) + tmp->prev->next = tmp->next; + else + user_area->free_list = tmp->next; + to_free |= (TMP_TO_FREE | FREE_TO_FREE); + } + start = addr + align_add; + } + + user_area->freebytes -= size; + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + + user_area_init_chunk(busy_chunk, start, size, flags); + + DebugUA("user_area_get_chunk() finished: user area chunk 0x%px : " + "start 0x%lx end 0x%lx flags 0x%lx\n", + busy_chunk, busy_chunk->start, busy_chunk->end, + busy_chunk->flags); + +free: + if ((to_free & TMP_TO_FREE) && tmp != NULL) + user_area_release_chunk(tmp); + if ((to_free & BUSY_TO_FREE) && busy_chunk != NULL) { + user_area_release_chunk(busy_chunk); + busy_chunk = NULL; + } + if ((to_free & FREE_TO_FREE) && free_chunk != NULL) + user_area_release_chunk(free_chunk); + while (free_chunk = queue_to_free) { + queue_to_free = free_chunk->next; + user_area_release_chunk(free_chunk); + } + + return busy_chunk; +} + +static inline void +user_area_put_chunk(user_area_t *user_area, user_chunk_t *chunk) +{ + DebugUA("user_area_put_chunk() started: for chunk 0x%px : " + "start 0x%lx end 0x%lx\n", + chunk, chunk->start, chunk->end); + user_area_insert_free_chunk(user_area, chunk); +} + +static inline void user_area_free_present_chunk(user_chunk_t *area_chunk) +{ + DebugUA("user_area_free_present_chunk() started for area: start 0x%lx " + "end 0x%lx\n", + area_chunk->start, area_chunk->end); + + BUG_ON(!(area_chunk->flags & USER_AREA_PRESENT)); + area_chunk->flags &= ~USER_AREA_PRESENT; +} + +static inline void user_area_free_chunk_vmapped(user_chunk_t *area_chunk) +{ + struct page **pages; + int page; + + DebugUA("user_area_free_chunk_vmapped() started for area: start 0x%lx " + "end 0x%lx\n", + area_chunk->start, area_chunk->end); + + pages = area_chunk->pages; + BUG_ON(pages == NULL || area_chunk->nr_pages <= 0); + for (page = 0; page < area_chunk->nr_pages; page++) { + KVM_BUG_ON(pages[page] == NULL); + } + release_pages(pages, area_chunk->nr_pages); + if (pages != area_chunk->few_pages) + kfree(pages); + area_chunk->pages = NULL; + area_chunk->nr_pages = 0; + area_chunk->flags &= ~USER_AREA_VMAPPED; + return; +} + +static void user_area_free_chunk_locked(user_chunk_t *chunk_to_free) +{ + e2k_addr_t start = chunk_to_free->start; + e2k_addr_t end = chunk_to_free->end; + int ret; + + DebugUA("user_area_free_chunk_locked() chunk %px : " + "from 0x%lx to 0x%lx\n", + chunk_to_free, start, end); + BUG_ON(!(chunk_to_free->flags & USER_AREA_LOCKED)); + if (current->mm) { + ret = sys_munlock(start, chunk_to_free->size); + if (ret < 0) { + DebugUA("user_area_free_chunk_locked() could not " + "unlock area\n"); + } + } + chunk_to_free->flags &= ~USER_AREA_LOCKED; +} + +static inline void user_area_free_chunk_alloc(user_chunk_t *area_chunk) +{ + DebugUA("user_area_free_chunk_alloc() started for area: start 0x%lx " + "end 0x%lx\n", + area_chunk->start, area_chunk->end); + + if (area_chunk->flags & USER_AREA_LOCKED) + user_area_free_chunk_locked(area_chunk); + if (area_chunk->flags & USER_AREA_VMAPPED) + user_area_free_chunk_vmapped(area_chunk); + else if (area_chunk->flags & USER_AREA_PRESENT) + user_area_free_present_chunk(area_chunk); +} + +static inline int user_area_do_present_chunk(user_chunk_t *area_chunk) +{ + int ret; + + DebugUA("user_area_do_present_chunk() started for area: start 0x%lx " + "end 0x%lx\n", + area_chunk->start, area_chunk->end); + + BUG_ON(area_chunk->start & ~PAGE_MASK || + area_chunk->end & ~PAGE_MASK); + ret = __mm_populate(area_chunk->start, + area_chunk->end - area_chunk->start, false); + if (ret) { + DebugUA("user_area_do_present_chunk() could not " + "allocate all do present user pages\n"); + return ret; + } + area_chunk->flags |= USER_AREA_PRESENT; + DebugUA("user_area_do_present_chunk() allocated and do present " + "%ld user pages\n", + (area_chunk->end - area_chunk->start) / PAGE_SIZE); + + return 0; +} + +static inline int user_area_do_alloc_chunk_pages(user_chunk_t *area_chunk) +{ + struct page **pages = NULL; + int npages; + int ret; + + DebugUA("user_area_do_alloc_chunk_pages() started for area: " + "start 0x%lx, end 0x%lx\n", + area_chunk->start, area_chunk->end); + + BUG_ON(area_chunk->start & ~PAGE_MASK || + area_chunk->end & ~PAGE_MASK); + npages = PAGE_ALIGN(area_chunk->size) / PAGE_SIZE; + DebugUA("user_area_do_alloc_chunk_pages() number of pages is %d\n", + npages); + if (npages <= MAX_NUM_A_FEW_PAGES) { + pages = area_chunk->few_pages; + } else { + pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL); + if (pages == NULL) { + DebugUA("user_area_do_alloc_chunk_pages() could not " + "allocate pages array\n"); + return -ENOMEM; + } + } + ret = get_user_pages_fast(area_chunk->start, npages, 1, pages); + if (unlikely(ret != npages)) { + DebugUA("user_area_do_alloc_chunk_pages() could not " + "allocate user pages\n"); + ret = -ENOMEM; + goto out; + } + area_chunk->pages = pages; + area_chunk->nr_pages = npages; + area_chunk->flags |= USER_AREA_VMAPPED; + + DebugUA("user_area_do_alloc_chunk_pages() allocate %d user pages\n", + ret); + return 0; +out: + user_area_free_chunk_vmapped(area_chunk); + return ret; +} + +void *user_area_alloc_chunk(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, + unsigned long flags) +{ + user_chunk_t *area_chunk; + unsigned long add_flags; + int ret; + + if (start != ALIGN_TO_SIZE(start, PAGE_SIZE)) { + DebugUA("user_area_alloc_chunk() start address 0x%lx is not " + "PAGE size aligned\n", start); + return NULL; + } + if (size != ALIGN_TO_SIZE(size, PAGE_SIZE)) { + DebugUA("user_area_alloc_chunk() size 0x%lx is not " + "PAGE size aligned, so align\n", size); + size = ALIGN_TO_SIZE(size, PAGE_SIZE); + } + if (align < PAGE_SIZE) { + DebugUA("user_area_alloc_chunk() align 0x%lx is not " + "PAGE size aligned, so align\n", align); + align = ALIGN_TO_SIZE(align, PAGE_SIZE); + } + area_chunk = user_area_get_chunk(user_area, start, size, align, 0); + if (area_chunk == NULL) { + DebugUA("user_area_alloc_chunk() could not get chunk from " + "0x%lx size 0x%lx align 0x%lx\n", + start, size, align); + return NULL; + } + add_flags = 0; + if (flags & KVM_ALLOC_AREA_MAP_FLAGS) { + unsigned long prot = 0; + + if (flags & KVM_ALLOC_AREA_PROT_READ) + prot |= PROT_READ; + if (flags & KVM_ALLOC_AREA_PROT_WRITE) + prot |= PROT_WRITE; + if (flags & KVM_ALLOC_AREA_PROT_EXEC) + prot |= PROT_EXEC; + if (prot != 0) { + ret = sys_mprotect(area_chunk->start, area_chunk->size, + prot); + if (ret) { + pr_err("%s() could not change protections " + "of allocated user chunk from 0x%lx " + "to 0x%lx, error %d\n", + __func__, start, start + size, ret); + goto out_put_chunk; + } + add_flags |= (flags & KVM_ALLOC_AREA_MAP_FLAGS); + } + } + if (flags & UA_VMAP_TO_KERNEL) { + ret = user_area_do_alloc_chunk_pages(area_chunk); + add_flags |= USER_AREA_VMAPPED; + } else if (flags & UA_ALLOC_PRESENT) { + ret = user_area_do_present_chunk(area_chunk); + add_flags |= USER_AREA_PRESENT; + } else { + add_flags |= USER_AREA_ALLOCATED; + ret = 0; + } + if (ret < 0) { + DebugUA("user_area_alloc_chunk() could not make area " + "as present\n"); + goto out_put_chunk; + } + area_chunk->flags |= add_flags; + if (flags & UA_ALLOC_LOCKED) { + ret = sys_mlock(area_chunk->start, area_chunk->size); + if (ret < 0) { + DebugUA("user_area_alloc_present() could not " + "lock area\n"); + goto out_free_alloc; + } + area_chunk->flags |= USER_AREA_LOCKED; + } + + user_area_insert_busy_chunk(user_area, area_chunk); + return (void *)area_chunk->start; + +out_free_alloc: + user_area_free_chunk_alloc(area_chunk); +out_put_chunk: + user_area_put_chunk(user_area, area_chunk); + return NULL; +} + +void *map_user_area_to_vmalloc_range(user_area_t *user_area, void *user_base, + pgprot_t prot) +{ + e2k_addr_t start = (e2k_addr_t)user_base; + user_chunk_t *tmp; + void *vmap_base; + unsigned long irq_flags; + + DebugKA("started for user area from %px\n", user_base); + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + tmp = user_area_find_busy_chunk(user_area, start, 0); + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + if (tmp == NULL) { + panic("map_user_area_to_vmalloc_range() could not find " + "busy area: start 0x%lx\n", start); + } else if (tmp->start != start) { + panic("map_user_area_to_vmalloc_range() found only busy area: " + "start 0x%lx end 0x%lx instead of 0x%lx\n", + tmp->start, tmp->end, start); + } + DebugKA("found user area from 0x%lx to 0x%lx\n", + tmp->start, tmp->end); + BUG_ON(tmp->pages == NULL || tmp->nr_pages <= 0); + vmap_base = vmap(tmp->pages, tmp->nr_pages, VM_ALLOC, prot); + if (vmap_base == NULL) { + DebugKA("could not map user area %px to kernel VM area\n", + user_base); + return NULL; + } + tmp->vmap_base = vmap_base; + DebugKA("user area start 0x%lx end 0x%lx mapped to kernel VM area " + "allocated from %px\n", + tmp->start, tmp->end, vmap_base); + + return vmap_base; +} + +void unmap_user_area_to_vmalloc_range(user_area_t *user_area, void *vmap_base) +{ + user_chunk_t *tmp = NULL; + unsigned long irq_flags; + + DebugKA("started for user area from %px\n", vmap_base); + if (user_area == NULL) + goto only_vmap; + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + tmp = user_area_find_vmap_chunk(user_area, vmap_base); + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + if (tmp == NULL) { + panic("unmap_user_area_to_vmalloc_range() could not find " + "busy area: start from %px\n", vmap_base); + } else if (tmp->vmap_base != vmap_base) { + panic("unmap_user_area_to_vmalloc_range() found only busy " + "area: from 0x%lx to 0x%lx instead of %px\n", + tmp->start, tmp->end, vmap_base); + } + DebugKA("found user area to unmap from 0x%lx to 0x%lx\n", + tmp->start, tmp->end); +only_vmap: + vunmap(vmap_base); + if (tmp != NULL) + tmp->vmap_base = NULL; +} + +static inline void +user_area_free_chunk_pages(user_area_t *user_area, user_chunk_t *chunk) +{ + user_area_free_chunk_alloc(chunk); + user_area_put_chunk(user_area, chunk); +} + +static void +user_area_do_free_chunk(user_area_t *user_area, void *chunk_base, + unsigned long flags) +{ + user_chunk_t *tmp; + e2k_addr_t start = (e2k_addr_t)chunk_base; + e2k_addr_t end; + unsigned long irq_flags; + + DebugUA("user_area_do_free_chunk() started: for chunk: start %px\n", + chunk_base); + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + tmp = user_area_find_busy_chunk(user_area, start, 0); + if (tmp == NULL) { + panic("user_area_do_free_chunk() could not find busy area: " + "start 0x%lx\n", start); + } else if (tmp->start != start) { + panic("user_area_do_free_chunk() found only busy area: " + "start 0x%lx end 0x%lx instead of 0x%lx\n", + tmp->start, tmp->end, start); + } + end = tmp->end; + DebugUA("user_area_do_free_chunk() delete busy chunk %px: next %px " + "prev %px\n", + tmp, tmp->next, tmp->prev); + if (tmp->next != NULL) { + tmp->next->prev = tmp->prev; + DebugUA("user_area_do_free_chunk() next chunk %px: next %px " + "prev %px\n", + tmp->next, tmp->next->next, tmp->next->prev); + } + if (tmp->prev != NULL) { + tmp->prev->next = tmp->next; + DebugUA("user_area_do_free_chunk() prev chunk %px: next %px " + "prev %px\n", + tmp->prev, tmp->prev->next, tmp->prev->prev); + } else { + user_area->busy_list = tmp->next; + DebugUA("user_area_do_free_chunk() set head of busy chunks %px: " + "to %px\n", + &user_area->busy_list, user_area->busy_list); + } + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + if (flags & USER_AREA_QUEUE) { + user_area_insert_to_free_chunk(user_area, tmp); + return; + } + user_area_free_chunk_pages(user_area, tmp); +} + +static void +user_area_free_queued_chunks(user_area_t *user_area) +{ + user_chunk_t *queue_to_free; + user_chunk_t *tmp; + int total = 0; + unsigned long irq_flags; + + DebugUA("user_area_free_queued_chunks() started: kmem area 0x%px : " + "start 0x%lx end 0x%lx\n", + user_area, user_area->area_start, user_area->area_end); + if (user_area->to_free_list == NULL) { + DebugUA("user_area_free_queued_chunks() returns: list " + "to free is empty\n"); + return; + } + + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + queue_to_free = user_area->to_free_list; + user_area->to_free_list = NULL; + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + + while ((tmp = queue_to_free) != NULL) { + DebugUA("user_area_free_queued_chunks() current chunk " + "0x%px, start 0x%lx, end 0x%lx\n", + tmp, tmp->start, tmp->end); + queue_to_free = tmp->next; + user_area_free_chunk_pages(user_area, tmp); + total++; + } + + DebugUA("user_area_free_queued_chunks() returns with freeed " + "chunks num %d\n", total); +} + +void user_area_free_chunk(user_area_t *user_area, void *chunk) +{ + user_area_free_queued_chunks(user_area); + user_area_do_free_chunk(user_area, chunk, USER_AREA_FREE); +} + +void user_area_queue_chunk_to_free(user_area_t *user_area, void *chunk) +{ + user_area_do_free_chunk(user_area, chunk, USER_AREA_QUEUE); +} + +static void user_area_free_all_busy_chunks(user_area_t *user_area) +{ + user_chunk_t *queue_to_free; + user_chunk_t *tmp; + int total = 0; + unsigned long irq_flags; + + DebugUA("user_area_free_all_busy_chunks() started: user area 0x%px : " + "start 0x%lx end 0x%lx\n", + user_area, user_area->area_start, user_area->area_end); + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + queue_to_free = user_area->busy_list; + user_area->busy_list = NULL; + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + + while ((tmp = queue_to_free) != NULL) { + DebugUA("user_area_free_all_busy_chunks() current chunk " + "0x%px, start 0x%lx, end 0x%lx\n", + tmp, tmp->start, tmp->end); + queue_to_free = tmp->next; + user_area_free_chunk_pages(user_area, tmp); + total++; + } + DebugUA("user_area_free_all_busy_chunks() released %d busy chunks\n", + total); +} + +static void user_area_release_all_free_chunks(user_area_t *user_area) +{ + user_chunk_t *queue_to_free; + user_chunk_t *tmp; + int total = 0; + unsigned long irq_flags; + + DebugUA("user_area_release_all_free_chunks() started: user area 0x%px : " + "start 0x%lx end 0x%lx\n", + user_area, user_area->area_start, user_area->area_end); + + user_area_free_queued_chunks(user_area); + + spin_lock_irqsave(&user_area->area_list_lock, irq_flags); + queue_to_free = user_area->free_list; + user_area->free_list = NULL; + spin_unlock_irqrestore(&user_area->area_list_lock, irq_flags); + + while ((tmp = queue_to_free) != NULL) { + DebugUA("user_area_release_all_free_chunks() current chunk " + "0x%px, start 0x%lx, end 0x%lx\n", + tmp, tmp->start, tmp->end); + queue_to_free = tmp->next; + user_area_release_chunk(tmp); + total++; + } + DebugUA("user_area_release_all_free_chunks() released %d chunks\n", + total); +} + +/* + * Release user virtual memory area + */ +void user_area_release(user_area_t *user_area) +{ + DebugUA("user_area_release() started: user area 0x%px : " + "start 0x%lx end 0x%lx\n", + user_area, user_area->area_start, user_area->area_end); + user_area_free_all_busy_chunks(user_area); + user_area_release_all_free_chunks(user_area); + if (user_area->busy_list || user_area->free_list || + user_area->to_free_list) { + printk(KERN_ERR "user_area_release() not empty some of lists: " + "busy %px or free %px or queue to free %px\n", + user_area->busy_list, user_area->free_list, + user_area->to_free_list); + } +} diff --git a/arch/e2k/kvm/user_area.h b/arch/e2k/kvm/user_area.h new file mode 100644 index 000000000000..5ab1131a64fb --- /dev/null +++ b/arch/e2k/kvm/user_area.h @@ -0,0 +1,147 @@ +#ifndef _E2K_USER_AREA_ALLOC_H +#define _E2K_USER_AREA_ALLOC_H + +#include +#include + + +/* user area and chunks flags */ +/* WARNING should not intersect with protection flags */ +/* KVM_ALLOC_AREA_PROT_READ/WRITE/EXEC */ +/* KVM_ALLOC_AREA_HUGE */ +#define USER_AREA_CHUNK_ALLOC 0x0001UL +#define USER_AREA_ORDERED 0x0002UL + +#define USER_AREA_RESERVED 0x0010UL +#define USER_AREA_ALLOCATED 0x0020UL +#define USER_AREA_PRESENT 0x0040UL +#define USER_AREA_LOCKED 0x0080UL +#define USER_AREA_VMAPPED 0x0100UL + +#define USER_AREA_FREE 0x1000UL +#define USER_AREA_QUEUE 0x2000UL + +/* user area allocation/free flags */ +#define UA_ALLOC_PRESENT USER_AREA_PRESENT +#define UA_ALLOC_LOCKED USER_AREA_LOCKED +#define UA_VMAP_TO_KERNEL USER_AREA_VMAPPED + +#define MAX_NUM_A_FEW_PAGES 2 /* optimization for tipical case: */ + /* only a few page need allocate */ + +typedef struct user_chunk { + unsigned long flags; + e2k_addr_t start; + e2k_addr_t end; + e2k_size_t size; + struct user_chunk *next; + struct user_chunk *prev; + struct page **pages; /* pages to map to kernel */ + int nr_pages; /* number of physical pages */ + void *vmap_base; /* kernel virtual area base */ + /* where user area map to */ + struct page *few_pages[MAX_NUM_A_FEW_PAGES]; +} user_chunk_t; + +typedef struct user_area { + unsigned long flags; + e2k_addr_t area_start; + e2k_addr_t area_end; + spinlock_t area_list_lock; + user_chunk_t *free_list; + user_chunk_t *busy_list; + user_chunk_t *to_free_list; + e2k_size_t freebytes; +} user_area_t; + +extern int user_area_caches_init(void); +extern void user_area_caches_destroy(void); + +extern user_area_t *user_area_create(e2k_addr_t area_start, + e2k_size_t area_size, unsigned long flags); +extern void user_area_release(user_area_t *user_area); + +extern int user_area_reserve_chunk(user_area_t *user_area, + e2k_addr_t area_start, e2k_size_t area_size); +extern void *user_area_alloc_chunk(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, + unsigned long flags); + +/* + * Allocate common chunk into the user virtual memory area + */ +extern inline void * +user_area_alloc(user_area_t *user_area, e2k_size_t size, unsigned long flags) +{ + return user_area_alloc_chunk(user_area, 0, size, 0, + flags & KVM_ALLOC_AREA_MAP_FLAGS); +} + +extern inline void * +user_area_get(user_area_t *user_area, e2k_addr_t start, e2k_size_t size, + e2k_size_t align, unsigned long flags) +{ + return user_area_alloc_chunk(user_area, start, size, align, + flags & KVM_ALLOC_AREA_MAP_FLAGS); +} + +extern inline void * +user_area_alloc_pages(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, unsigned long flags) +{ + return user_area_alloc_chunk(user_area, start, size, align, + flags & KVM_ALLOC_AREA_MAP_FLAGS | UA_VMAP_TO_KERNEL); +} + +extern inline void * +user_area_alloc_present(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, unsigned long flags) +{ + return user_area_alloc_chunk(user_area, start, size, align, + flags & KVM_ALLOC_AREA_MAP_FLAGS | UA_ALLOC_PRESENT); +} + +extern inline void * +user_area_alloc_zeroed(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, unsigned long flags) +{ + return user_area_alloc_present(user_area, start, size, align, flags); +} + +extern inline void * +user_area_alloc_locked(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, unsigned long flags) +{ + return user_area_alloc_chunk(user_area, start, size, align, + flags & KVM_ALLOC_AREA_MAP_FLAGS | UA_ALLOC_LOCKED); +} + +extern inline void * +user_area_alloc_locked_pages(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, unsigned long flags) +{ + return user_area_alloc_chunk(user_area, start, size, align, + flags & KVM_ALLOC_AREA_MAP_FLAGS | + UA_VMAP_TO_KERNEL | UA_ALLOC_LOCKED); +} + +extern inline void * +user_area_alloc_locked_present(user_area_t *user_area, e2k_addr_t start, + e2k_addr_t size, e2k_addr_t align, unsigned long flags) +{ + return user_area_alloc_chunk(user_area, start, size, align, + flags & KVM_ALLOC_AREA_MAP_FLAGS | + UA_ALLOC_PRESENT | UA_ALLOC_LOCKED); +} + +extern void *map_user_area_to_vmalloc_range(user_area_t *user_area, + void *user_base, pgprot_t prot); +extern void unmap_user_area_to_vmalloc_range(user_area_t *user_area, + void *vmalloc_area); + +extern void user_area_free_chunk(user_area_t *user_area, void *chunk); +extern void user_area_queue_chunk_to_free(user_area_t *user_area, void *chunk); + +#endif /* _E2K_USER_AREA_ALLOC_H */ + + diff --git a/arch/e2k/kvm/virq.c b/arch/e2k/kvm/virq.c new file mode 100644 index 000000000000..102cf823898d --- /dev/null +++ b/arch/e2k/kvm/virq.c @@ -0,0 +1,554 @@ +/* + * arch/e2k/kvm/virq.c + * + * Virtual IRQ manager + * + * Copyright 2014 Salavat S. Gilyazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include "process.h" +#include "irq.h" +#include "time.h" +#include "lapic.h" +#include "intercepts.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_THREAD_MODE +#undef DebugKVMT +#define DEBUG_KVM_THREAD_MODE 0 /* KVM thread debugging */ +#define DebugKVMT(fmt, args...) \ +({ \ + if (DEBUG_KVM_THREAD_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_IRQ_MODE +#undef DebugKVMIRQ +#define DEBUG_KVM_IRQ_MODE 0 /* KVM IRQ manage debugging */ +#define DebugKVMIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_IRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_DIRECT_VIRQ_MODE +#undef DebugDVIRQ +#define DEBUG_KVM_DIRECT_VIRQ_MODE 0 /* KVM direct IRQ manage */ + /* debugging */ +#define DebugDVIRQ(fmt, args...) \ +({ \ + if (DEBUG_KVM_DIRECT_VIRQ_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_INTR_MODE +#undef DebugKVMINTR +#define DEBUG_KVM_INTR_MODE 0 /* KVM interrupt recieve debugging */ +#define DebugKVMINTR(fmt, args...) \ +({ \ + if (DEBUG_KVM_INTR_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_INJECT_INTR_MODE +#undef DebugKVMII +#define DEBUG_KVM_INJECT_INTR_MODE 0 /* KVM interrupt injection */ + /* debugging */ +#define DebugKVMII(fmt, args...) \ +({ \ + if (DEBUG_KVM_INJECT_INTR_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_INJECT_NMI_MODE +#undef DebugKVMNMI +#define DEBUG_KVM_INJECT_NMI_MODE 0 /* KVM not masked interrupt */ + /* injection debugging */ +#define DebugKVMNMI(fmt, args...) \ +({ \ + if (DEBUG_KVM_INJECT_NMI_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_SHUTDOWN_MODE +#undef DebugKVMSH +#define DEBUG_KVM_SHUTDOWN_MODE 0 /* KVM shutdown debugging */ +#define DebugKVMSH(fmt, args...) \ +({ \ + if (DEBUG_KVM_SHUTDOWN_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +extern bool debug_VIRQs; +#undef DEBUG_KVM_VIRQs_MODE +#undef DebugVIRQs +#define DEBUG_KVM_VIRQs_MODE 0 /* VIRQs debugging */ +#define DebugVIRQs(fmt, args...) \ +({ \ + if (DEBUG_KVM_VIRQs_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static void kvm_register_vcpu_interrupt(struct kvm_vcpu *vcpu, + int irq, int virq_id); +static int kvm_wake_up_virq(kvm_guest_virq_t *guest_virq, + bool inject, bool do_wake_up); + +int debug_guest_virqs = 0; + +#ifdef CONFIG_DIRECT_VIRQ_INJECTION +int kvm_get_guest_direct_virq(struct kvm_vcpu *vcpu, int irq, int virq_id) +{ + struct kvm *kvm = vcpu->kvm; + kvm_guest_virq_t *guest_virq; + + DebugDVIRQ("started for IRQ #%d VIRQ ID #%d\n", + irq, virq_id); + if (virq_id >= KVM_NR_VIRQS) { + DebugKVMIRQ("invalid VIRQ ID #%d for IRQ #%d\n", + virq_id, irq); + return -EINVAL; + } + if (irq >= KVM_MAX_NR_VIRQS) { + DebugKVMIRQ("invalid IRQ num #%d VIRQ ID #%d\n", + irq, virq_id); + return -EINVAL; + } + raw_spin_lock(&kvm->arch.virq_lock); + guest_virq = &kvm->arch.guest_virq[irq]; + if (guest_virq->vcpu != NULL) { + raw_spin_unlock(&kvm->arch.virq_lock); + DebugKVM("IRQ #%d VIRQ ID #%d was already registered " + "on VCPU #%d\n", + irq, virq_id, guest_virq->vcpu->vcpu_id); + return -EEXIST; + } + guest_virq->virq_id = virq_id; + guest_virq->host_task = current; + guest_virq->vcpu = vcpu; + guest_virq->flags = DIRECT_INJ_VIRQ_FLAG; + guest_virq->count = + kvm_get_guest_virqs_atomic_counter(vcpu, virq_id); + atomic_set(guest_virq->count, 0); + kvm_register_vcpu_interrupt(vcpu, irq, virq_id); + set_thread_flag(TIF_VIRQS_ACTIVE); + if (irq > kvm->arch.max_irq_no) + kvm->arch.max_irq_no = irq; + raw_spin_unlock(&kvm->arch.virq_lock); + + DebugDVIRQ("IRQ #%d VIRQ ID %s (#%d) is registered in VCPU #%d\n", + irq, kvm_get_virq_name(virq_id), virq_id, vcpu->vcpu_id); + return 0; +} + +int kvm_free_guest_direct_virq(struct kvm *kvm, int irq) +{ + kvm_guest_virq_t *guest_virq; + + DebugDVIRQ("started for IRQ #%d\n", irq); + if (irq >= KVM_MAX_NR_VIRQS) { + DebugDVIRQ("invalid IRQ num #%d\n", irq); + return -EINVAL; + } + guest_virq = &kvm->arch.guest_virq[irq]; + if (!(guest_virq->flags & DIRECT_INJ_VIRQ_FLAG)) { + return 0; + } + raw_spin_lock_irq(&kvm->arch.virq_lock); + if (guest_virq->host_task == NULL) { + raw_spin_unlock_irq(&kvm->arch.virq_lock); + DebugDVIRQ("IRQ #%d VIRQ ID %s (#%d) is not active\n", + irq, kvm_get_virq_name(guest_virq->virq_id), + guest_virq->virq_id); + return 0; + } + guest_virq->flags = 0; + guest_virq->host_task = NULL; + guest_virq->vcpu = NULL; + clear_thread_flag(TIF_VIRQS_ACTIVE); + raw_spin_unlock_irq(&kvm->arch.virq_lock); + + DebugDVIRQ("IRQ #%d VIRQ ID %s (#%d) was stopped\n", + irq, kvm_get_virq_name(guest_virq->virq_id), + guest_virq->virq_id); + + return 0; +} +#else /* !CONFIG_DIRECT_VIRQ_INJECTION */ +int kvm_get_guest_direct_virq(struct kvm_vcpu *vcpu, int irq, int virq_id) +{ + pr_err("Direct VIRQ cannot be registered, turn on config flag to " + "enable this mode\n"); + return -ENOSYS; +} +int kvm_free_guest_direct_virq(struct kvm *kvm, int irq) +{ + pr_warning("Direct VIRQ cannot be freed, turn on config flag " + "to enable this mode\n"); + return -ENOSYS; +} +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + +static int find_irq_on_virq_id(struct kvm *kvm, int vcpu_id, int virq_id) +{ + kvm_guest_virq_t *guest_virq; + unsigned long flags; + int irq; + + raw_spin_lock_irqsave(&kvm->arch.virq_lock, flags); + for (irq = 0; irq < KVM_MAX_NR_VIRQS; irq++) { + guest_virq = &kvm->arch.guest_virq[irq]; + if (guest_virq->vcpu == NULL) + continue; + if (guest_virq->virq_id != virq_id) + continue; + if (guest_virq->vcpu->vcpu_id == vcpu_id) { + raw_spin_unlock_irqrestore(&kvm->arch.virq_lock, flags); + return irq; + } + } + raw_spin_unlock_irqrestore(&kvm->arch.virq_lock, flags); + return -1; +} + +static void +kvm_register_vcpu_interrupt(struct kvm_vcpu *vcpu, int irq, int virq_id) +{ + DebugKVMIRQ("started for VCPU #%d IRQ #%d VIRQ ID #%d\n", + vcpu->vcpu_id, irq, virq_id); + switch (virq_id) { + case KVM_VIRQ_TIMER: + vcpu->arch.hrt_virq_no = irq; + DebugKVMIRQ("set IRQ #%d for timer VCPU #%d\n", + irq, vcpu->vcpu_id); + break; + case KVM_VIRQ_LAPIC: + WARN_ON(vcpu->arch.apic == NULL); + vcpu->arch.apic->virq_no = irq; + DebugKVMIRQ("set IRQ #%d for local APIC of VCPU #%d\n", + irq, vcpu->vcpu_id); + break; + case KVM_VIRQ_CEPIC: + WARN_ON(vcpu->arch.epic == NULL); + vcpu->arch.epic->virq_no = irq; + DebugKVMIRQ("set IRQ #%d for CEPIC of VCPU #%d\n", + irq, vcpu->vcpu_id); + break; + case KVM_VIRQ_HVC: + DebugKVMIRQ("hvc console VIRQ, nothing to do\n"); + break; + default: + printk(KERN_WARNING "Bad VIRQ ID #%d\n", virq_id); + break; + } +} + +/* + * User Applications to support virtual machine emulation (for example QEMU) + * can send virtual interrupts using ioctl() KVM_INTERRUPT + * Argument 'virq_id' here is number of VIRQ from interface host <-> guest + * see include/asm/kvm/irq.h + */ +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, int virq_id) +{ + int irq; + + DebugKVMINTR("started for VIRQ ID #%d on VCPU #%d\n", + virq_id, vcpu->vcpu_id); + irq = find_irq_on_virq_id(vcpu->kvm, vcpu->vcpu_id, virq_id); + if (irq < 0) { + DebugKVMINTR("could not find IRQ which VIRQ ID #%d on " + "VCPU #%d bind to\n", + virq_id, vcpu->vcpu_id); + return -ENODEV; + } else { + DebugKVMINTR("started for VIRQ ID #%d on VCPU #%d " + "bind to IRQ #%d\n", + virq_id, vcpu->vcpu_id, irq); + } + return kvm_vcpu_interrupt(vcpu, irq); +} + +/* + * VCPU virtual IRQ handler + * Argument 'irq' here is internel number of IRQ which guest VIRQ bind to + * (index at table of all guest VIRQs, (VIRQ-ID, VCPU-ID) <-> IRQ) + */ +int kvm_vcpu_interrupt(struct kvm_vcpu *vcpu, int irq) +{ + struct kvm *kvm = vcpu->kvm; + kvm_guest_virq_t *guest_virq; + int virq_id; + int virqs_num; + bool do_wake_up; + bool has_pending_virqs; + unsigned long flags; + + DebugKVMINTR("started for virtual interrupt #%d\n", irq); + if (irq < 0 || irq >= KVM_MAX_NR_VIRQS) + return -EINVAL; + + raw_spin_lock_irqsave(&kvm->arch.virq_lock, flags); + guest_virq = &kvm->arch.guest_virq[irq]; + + if (guest_virq->vcpu == NULL) { + /* virtual IRQ does not exist or register */ + pr_warning("kvm_vcpu_interrupt() virtual IRQ #%d " + "does not exist or register\n", irq); + raw_spin_unlock_irqrestore(&kvm->arch.virq_lock, flags); + return -ENODEV; + } + if (guest_virq->stop_handler) { + /* virtual IRQ already stopped */ + raw_spin_unlock_irqrestore(&kvm->arch.virq_lock, flags); + return -EINTR; + } + virq_id = guest_virq->virq_id; + DebugVIRQs("started for VIRQ #%d (VCPU #%d, %s)\n", + irq, vcpu->vcpu_id, kvm_get_virq_name(virq_id)); + + BUG_ON(vcpu != guest_virq->vcpu); + + has_pending_virqs = kvm_test_pending_virqs(vcpu); + virqs_num = atomic_inc_return(guest_virq->count); + DebugVIRQs("injected on VCPU #%d, pending flag %d VIRQs number is %d\n", + vcpu->vcpu_id, has_pending_virqs, virqs_num); + GTI_BUG_ON(virqs_num > MAX_PENDING_VIRQS); + + /* + * Common NOTE: guest VIRQs model support arbitrary number of + * different types and injection modes of VIRQ. + * But in practice only one type of VIRQs can happen after local APIC + * initialization (KVM_VIRQ_LAPIC). All guest IRQs will be passed + * through LAPIC, so virqs_num is precise counter of current pending + * VIRQs. + * At the beginning of guest kernel booting is used second type + * of guest VIRQs (KVM_VIRQ_TIMER) and the timer VIRQ is single too at + * this time, so virqs_num is too precise. + */ + if (!has_pending_virqs && virqs_num <= 1) { + /* first VIRQs and none other pending VIRQs */ + /* so it can be first VIRQs or last VIRQs is handling */ + /* by guest right now. Pass new interrupt to guest */ + do_wake_up = true; + } else if (!has_pending_virqs && virqs_num > 1) { + /* one more VIRQs and other pending VIRQs is already handling */ + /* Do not pass new interrupt because of old interrupt is */ + /* in progress */ + do_wake_up = false; + } else if (has_pending_virqs && virqs_num <= 1) { + /* first VIRQS and there are other pending VIRQs */ + /* it can be if host want pass new interrupt, but guest */ + /* is now handling old interrupt and see already new VIRQ */ + /* so do not pass new interrupt, host should deliver old */ + do_wake_up = false; + } else if (has_pending_virqs && virqs_num > 1) { + /* one more VIRQs and there are other pending VIRQs */ + /* it can be if host should pass interrupt, but */ + /* 1) host has not still time do it, so do not pass new */ + /* interrupt, host should deliver old */ + /* 2) guest VCPU is on idle and host should wake up guest */ + if (vcpu->arch.on_idle || vcpu->arch.on_spinlock || + vcpu->arch.on_csd_lock) + do_wake_up = true; + else + do_wake_up = false; + } else { + /* unknown and impossible case */ + WARN_ON(true); + do_wake_up = true; + } + kvm_wake_up_virq(guest_virq, true, /* inject */ do_wake_up); + raw_spin_unlock_irqrestore(&kvm->arch.virq_lock, flags); + return 0; +} + +static inline int +kvm_wake_up_direct_virq(kvm_guest_virq_t *guest_virq, + bool inject, bool do_wake_up) +{ + struct task_struct *task; + int virq_id = guest_virq->virq_id; + int virqs_num; + int ret; + + if (!(guest_virq->flags & DIRECT_INJ_VIRQ_FLAG)) + KVM_BUG_ON(true); + if (!inject || !do_wake_up) + return 0; + task = guest_virq->host_task; + if (unlikely(task == NULL)) + return -EINVAL; + + virqs_num = atomic_read(guest_virq->count); + DebugVIRQs("VIRQ %s pending counter is %d\n", + kvm_get_virq_name(virq_id), virqs_num); + if (virqs_num <= 0) { + /* none pending VIRQs */ + return 0; + } + kvm_set_pending_virqs(guest_virq->vcpu); + if (task == current) { + DebugDVIRQ("current %s (%d) is VCPU thread to inject " + "VIRQ %s\n", + task->comm, task->pid, kvm_get_virq_name(virq_id)); + DebugVIRQs("current %s (%d) is VCPU thread to inject " + "VIRQ %s\n", + task->comm, task->pid, kvm_get_virq_name(virq_id)); + return virqs_num; + } + + /* received some VIRQs, so activate VCPU thread if it is on idle */ + if (!(guest_virq->vcpu->arch.on_idle || + guest_virq->vcpu->arch.on_spinlock || + guest_virq->vcpu->arch.on_csd_lock)) + return virqs_num; + ret = wake_up_process(task); + if (ret) { + DebugDVIRQ("wakeed up guest VIRQ %s VCPU thread %s (%d)\n", + kvm_get_virq_name(virq_id), task->comm, task->pid); + DebugVIRQs("wakeed up guest VIRQ %s VCPU thread %s (%d)\n", + kvm_get_virq_name(virq_id), task->comm, task->pid); + } else { + DebugDVIRQ("guest VIRQ %s VCPU thread already is running, " + "pending VIRQs counter is %d\n", + kvm_get_virq_name(virq_id), virqs_num); + DebugVIRQs("guest VIRQ %s VCPU thread %s (%d) already " + "is running, pending counter is %d\n", + kvm_get_virq_name(virq_id), task->comm, task->pid, + virqs_num); + } + kvm_vcpu_kick(guest_virq->vcpu); + return virqs_num; +} + +static int kvm_wake_up_virq(kvm_guest_virq_t *guest_virq, + bool do_inject, bool do_wake_up) +{ + int virq_id = guest_virq->virq_id; + int virqs_num; + + virqs_num = atomic_read(guest_virq->count); + DebugVIRQs("VIRQ %s pending counter is %d\n", + kvm_get_virq_name(virq_id), virqs_num); + if (virqs_num <= 0) { + /* none pending VIRQs */ + return 0; + } + if (guest_virq->flags & DIRECT_INJ_VIRQ_FLAG) { + return kvm_wake_up_direct_virq(guest_virq, + do_inject, do_wake_up); + } else { + BUG_ON(true); + } + return 0; +} + +/* + * spinlock (kvm->arch.virq_lock) should be take by caller + */ +int kvm_find_pending_virqs(struct kvm_vcpu *vcpu, bool inject, bool wakeup) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *virq_vcpu; + kvm_guest_virq_t *guest_virq; + int irq; + int ret; + int virqs_num = 0; + + for (irq = 0; irq <= kvm->arch.max_irq_no; irq++) { + guest_virq = &kvm->arch.guest_virq[irq]; + virq_vcpu = guest_virq->vcpu; + if (virq_vcpu == NULL || IS_ERR(virq_vcpu)) + continue; + if (virq_vcpu != vcpu) + continue; + ret = atomic_read(guest_virq->count); + virqs_num += ret; + if (ret == 0 || !(inject || wakeup)) + continue; + ret = kvm_wake_up_virq(guest_virq, inject, wakeup); + if (ret < 0) { + pr_err("%s(): waking up of VCPU #%d VIRQ #%d failed, " + "error %d\n", + __func__, vcpu->vcpu_id, guest_virq->virq_id, + ret); + } + } + return virqs_num; +} + +/* + * spinlock (kvm->arch.virq_lock) should be take by caller + */ +int kvm_dec_vcpu_pending_virq(struct kvm_vcpu *vcpu, int virq_no) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *virq_vcpu; + kvm_guest_virq_t *guest_virq; + int virqs_num = 0; + + guest_virq = &kvm->arch.guest_virq[virq_no]; + virq_vcpu = guest_virq->vcpu; + if (virq_vcpu == NULL || IS_ERR(virq_vcpu)) + return 0; + GTI_BUG_ON(virq_vcpu != vcpu); + virqs_num = atomic_dec_return(guest_virq->count); + + if (virqs_num > 0) { + DebugVIRQs("there are %d VIRQs on VCPU #%d\n", + virqs_num, vcpu->vcpu_id); + } + return virqs_num; +} + +void kvm_inject_lapic_virq(struct kvm_lapic *apic) +{ + struct kvm_vcpu *vcpu = apic->vcpu; + + DebugKVMII("started on VCPU #%d\n", vcpu->vcpu_id); + kvm_vcpu_interrupt(vcpu, apic->virq_no); +} + +void kvm_inject_cepic_virq(struct kvm_cepic *epic) +{ + struct kvm_vcpu *vcpu = epic->vcpu; + + DebugKVMII("started on VCPU #%d\n", vcpu->vcpu_id); + DebugKVMII("epic->virq_no = %d\n", epic->virq_no); + kvm_vcpu_interrupt(vcpu, epic->virq_no); +} + +void kvm_inject_nmi(struct kvm_vcpu *vcpu) +{ + DebugKVMNMI("started on VCPU #%d\n", vcpu->vcpu_id); + kvm_pic_nmi_deliver(vcpu); +} + +void kvm_free_all_VIRQs(struct kvm *kvm) +{ + kvm_guest_virq_t *guest_virq; + int irq; + + DebugKVMIRQ("started\n"); + for (irq = 0; irq <= kvm->arch.max_irq_no; irq++) { + guest_virq = &kvm->arch.guest_virq[irq]; + if (guest_virq->flags & DIRECT_INJ_VIRQ_FLAG) { + kvm_free_guest_direct_virq(kvm, irq); + } + } +} diff --git a/arch/e2k/kvm/vmid.c b/arch/e2k/kvm/vmid.c new file mode 100644 index 000000000000..ba593fa2516c --- /dev/null +++ b/arch/e2k/kvm/vmid.c @@ -0,0 +1,72 @@ +/* + * Generic KVM ID allocator + * + * Based on simplified kernel/pid.c + */ + +#include +#include +#include +#include +#include "vmid.h" + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE || kvm_debug) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +static kvm_nidmap_t vmid_nidmap[VMIDMAP_ENTRIES]; +static struct hlist_head vmid_hash[VMID_HASH_SIZE]; +static kvm_vmid_table_t vmid_table; + +int kvm_alloc_vmid(struct kvm *kvm) +{ + int nr; + + DebugKVM("started\n"); + + nr = kvm_alloc_nid(&vmid_table, &kvm->arch.vmid); + if (nr < 0) + DebugKVM("could not allocate VM ID, error %d\n", nr); + else + DebugKVM("allocated VM ID #%d\n", kvm->arch.vmid.nr); + + return nr; +} + +void kvm_free_vmid(struct kvm *kvm) +{ + DebugKVM("started\n"); + + kvm_free_nid(&kvm->arch.vmid, &vmid_table); +} + +int kvm_vmidmap_init(void) +{ + int ret; + + DebugKVM("started\n"); + vmid_table.nidmap = vmid_nidmap; + vmid_table.nidmap_entries = VMIDMAP_ENTRIES; + vmid_table.nid_hash = vmid_hash; + vmid_table.nid_hash_bits = VMID_HASH_BITS; + vmid_table.nid_hash_size = VMID_HASH_SIZE; + ret = kvm_nidmap_init(&vmid_table, VMID_MAX_LIMIT, RESERVED_VMIDS, + /* GID #0 reserved for hupervisor */ + RESERVED_VMIDS - 1); + if (ret != 0) { + pr_err("kvm_vmidmap_init() could not create NID map\n"); + return ret; + } + return 0; +} + +void kvm_vmidmap_destroy(void) +{ + DebugKVM("started\n"); + kvm_nidmap_destroy(&vmid_table); +} diff --git a/arch/e2k/kvm/vmid.h b/arch/e2k/kvm/vmid.h new file mode 100644 index 000000000000..da568bceefdb --- /dev/null +++ b/arch/e2k/kvm/vmid.h @@ -0,0 +1,38 @@ +#ifndef _KVM_E2K_VMID_H +#define _KVM_E2K_VMID_H + +/* + * Guest virtual machine identifier (vmid) allocator + * Based on simplified include/linux/pid.h + */ + +#include +#include +#include +#include +#include + +#define VMID_MAX_LIMIT MMU_GID_SIZE +#define RESERVED_VMIDS 1 /* GID #0 reserved for hypervisor */ + /* by hardware */ + +#define VMIDMAP_ENTRIES ((VMID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) + +#define VMID_HASH_BITS 5 +#define VMID_HASH_SIZE (1 << VMID_HASH_BITS) + +typedef struct nid vmid_t; + +typedef struct nidmap vmidmap_t; + +typedef struct kvm_nid_table kvm_vmid_table_t; + +#define vmid_hashfn(nr) hash_long((unsigned long)nr, VMID_HASH_BITS) + +extern int kvm_alloc_vmid(struct kvm *kvm); +extern void kvm_free_vmid(struct kvm *kvm); + +extern int kvm_vmidmap_init(void); +extern void kvm_vmidmap_destroy(void); + +#endif /* _KVM_E2K_VMID_H */ diff --git a/arch/e2k/lib/Makefile b/arch/e2k/lib/Makefile new file mode 100644 index 000000000000..abe12c062244 --- /dev/null +++ b/arch/e2k/lib/Makefile @@ -0,0 +1,22 @@ +# +# Makefile for E2K-specific library files +# + +subdir-ccflags-y := -Werror -Wswitch -Wenum-compare + +# compile with -O3 since only performance-sensitive functions are here +empty:= +space:= $(empty) $(empty) +ORIG_CFLAGS := $(KBUILD_CFLAGS) +ORIG_CFLAGS := $(subst $(space)-O2$(space),$(space),$(ORIG_CFLAGS)) +ORIG_CFLAGS := $(subst $(space)-Os$(space),$(space),$(ORIG_CFLAGS)) +KBUILD_CFLAGS = $(ORIG_CFLAGS) -O3 + +lib-y = delay.o usercopy.o builtin.o checksum.o + +# Put these two together (they reference each other so should be close +# in memory, in the same page if possible) +lib-y += string.o recovery_string.o + +# Remove -pg from low-level functions as they are used by arch/e2k/p2v/ +CFLAGS_REMOVE_string.o = -pg diff --git a/arch/e2k/lib/builtin.c b/arch/e2k/lib/builtin.c new file mode 100644 index 000000000000..192324df1f31 --- /dev/null +++ b/arch/e2k/lib/builtin.c @@ -0,0 +1,67 @@ +/* + * $Id: builtin.c,v 1.9 2008/11/05 11:40:46 atic Exp $ Replacement of gcc __builtin_ ... macros + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DEBUG_BLT 0 +#define DebugBLT(...) DebugPrint(DEBUG_BLT ,##__VA_ARGS__) + +extern int printk(const char *fmt, ...); + +/* + * in Makefile -D__builtin_return_address=__e2k_kernel_return_address + */ +noinline notrace void * __e2k_read_kernel_return_address(int n) +{ + e2k_addr_t ret = 0UL; + e2k_cr0_hi_t cr0_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + u64 base; + s64 cr_ind; + u64 flags; + + raw_all_irq_save(flags); + NATIVE_FLUSHC; + + cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + ret = AS_STRUCT(cr0_hi).ip << 3; + + pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + + cr_ind = AS_STRUCT(pcsp_hi).ind; + base = AS_STRUCT(pcsp_lo).base; + + DebugBLT("base 0x%llx\n", base); + DebugBLT("ind 0x%llx\n", cr_ind); + while (n >= 0) { + e2k_mem_crs_t *frame; + + cr_ind -= SZ_OF_CR; + + if (cr_ind < 0) { + ret = 0UL; + break; + } + + frame = (e2k_mem_crs_t *) (base + cr_ind); + ret = AS_STRUCT(frame->cr0_hi).ip << 3; + DebugBLT("ip 0x%lx\n", ret); + + --n; + } + + raw_all_irq_restore(flags); + + return (void *) ret; +} diff --git a/arch/e2k/lib/checksum.c b/arch/e2k/lib/checksum.c new file mode 100644 index 000000000000..798d03f08a86 --- /dev/null +++ b/arch/e2k/lib/checksum.c @@ -0,0 +1,176 @@ +/* + * Network checksum routines + * + * + * The code coming from arch/alpha/lib/checksum.c and arch/ia-64/lib/checksum.c + * + * This file contains network checksum routines that are better done + * in an architecture-specific manner due to speed.. + */ + +#include +#include + +#include + +#include + +static inline unsigned short from32to16(u32 x) +{ + x += __builtin_e2k_scls(x, 16); + return x >> 16; +} + +unsigned int __pure e2k_do_csum(const unsigned char *buff, int len) +{ + int odd; + u32 result = 0; + + if (len <= 0) + goto out; + odd = 1 & (unsigned long) buff; + if (odd) { + result += (*buff << 8); + len--; + buff++; + } + if (len >= 2) { + if (2 & (unsigned long) buff) { + result += *(unsigned short *) buff; + len -= 2; + buff += 2; + } + if (len >= 4) { + const u32 *buff32 = (u32 *) buff; + int i; + u64 sum = 0; + + buff += (unsigned) len & ~3; + if (len > 200) { + /* Packets */ +#pragma vector aligned +#pragma loop count (1000) +#pragma unroll (4) + for (i = 0; i < len / 4; i++) + sum += (u64) buff32[i]; + } else { + /* Packet headers */ +#pragma vector aligned +#pragma loop count (10) + for (i = 0; i < len / 4; i++) + sum += (u64) buff32[i]; + } + + sum += (u64) result; + sum += __builtin_e2k_scld(sum, 32); + result = (u32) (sum >> 32); + + result = (result & 0xffff) + (result >> 16); + } + if (len & 2) { + result += *(unsigned short *) buff; + buff += 2; + } + } + if (len & 1) + result += *buff; + result = from32to16(result); + if (odd) + result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); +out: + return result; +} +EXPORT_SYMBOL(e2k_do_csum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum __csum_partial(const void *buff, int len, __wsum wsum) +{ + u32 sum = (__force u32) wsum; + u32 result = e2k_do_csum(buff, len); + + return (__force __wsum) add32_with_carry(result, sum); +} +EXPORT_SYMBOL(__csum_partial); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +__sum16 ip_compute_csum(const void *buff, int len) +{ + return (__force __sum16)~e2k_do_csum(buff, len); +} +EXPORT_SYMBOL(ip_compute_csum); + +/* + * copy from fs while checksumming, otherwise like csum_partial + */ +__wsum +csum_partial_copy_from_user(const void __user *src, void *dst, int len, + __wsum sum, int *csum_err) +{ + int missing; + + missing = __copy_from_user(dst, src, len); + if (missing) { + memset(dst + len - missing, 0, missing); + *csum_err = -EFAULT; + } else { + *csum_err = 0; + } + + return csum_partial(dst, len, sum); +} +EXPORT_SYMBOL(csum_partial_copy_from_user); + +/* + * copy from ds while checksumming, otherwise like csum_partial + */ +__wsum +csum_partial_copy(const void *src, void *dst, int len, __wsum sum) +{ + memcpy(dst, src, len); + return csum_partial(dst, len, sum); +} +EXPORT_SYMBOL(csum_partial_copy); + +#ifdef _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + + __u32 ulen; + __u32 uproto; + __u64 sum = (__force u32) csum; + + sum += (__force u32) saddr->s6_addr32[0]; + sum += (__force u32) saddr->s6_addr32[1]; + sum += (__force u32) saddr->s6_addr32[2]; + sum += (__force u32) saddr->s6_addr32[3]; + sum += (__force u32) daddr->s6_addr32[0]; + sum += (__force u32) daddr->s6_addr32[1]; + sum += (__force u32) daddr->s6_addr32[2]; + sum += (__force u32) daddr->s6_addr32[3]; + + ulen = (__force u32) htonl((__u32) len); + sum += ulen; + + uproto = (__force u32) htonl(proto); + sum += uproto; + + return csum_fold((__force __wsum) from64to32(sum)); +} +EXPORT_SYMBOL(csum_ipv6_magic); +#endif diff --git a/arch/e2k/lib/delay.c b/arch/e2k/lib/delay.c new file mode 100644 index 000000000000..1ff8edc244fc --- /dev/null +++ b/arch/e2k/lib/delay.c @@ -0,0 +1,29 @@ +#include +#include +#include +#include +#include +#include + +void notrace __delay(unsigned long cycles) +{ + cycles_t start = get_cycles(); + + while (get_cycles() - start < cycles) + cpu_relax(); +} +EXPORT_SYMBOL(__delay); + +void notrace udelay(unsigned long usecs) +{ + __delay(usecs * loops_per_jiffy * HZ / USEC_PER_SEC); +} +EXPORT_SYMBOL(udelay); + +int native_read_current_timer(unsigned long *timer_val) +{ + *timer_val = get_cycles(); + + return 0; +} + diff --git a/arch/e2k/lib/recovery_string.S b/arch/e2k/lib/recovery_string.S new file mode 100644 index 000000000000..68862e134715 --- /dev/null +++ b/arch/e2k/lib/recovery_string.S @@ -0,0 +1,494 @@ +#include + +.text +.global $__recovery_memcpy_8 +.type __recovery_memcpy_8,@function +$__recovery_memcpy_8: +.ignore ld_st_style +/* + * dr0 - dst + * dr1 - src + * dr2 - len + * dr3 - strd opcode + * dr4 - ldrd opcode + * r5 - enable prefetching + * + * Does not return a value. + */ + { + setwd wsz = 0x14, nfx = 0x1 + ipd 1 + disp %ctpr2, 5f /* very_small_size */ + + setbn rsz = 0xb, rbs = 0x8, rcur = 0x0 + setbp psz = 0x1 + + /* dr14 holds the number of copied bytes + * in case pagefault happens */ + addd,4 0x0, 0x0, %dr14 + + addd,3 %dr4, 0x0, %dr10 + addd,5 %dr4, 0x8, %dr11 + } + { + ipd 1 + disp %ctpr1, 6f /* small_size */ + + addd,5 %dr4, 0x10, %dr12 + + /* %pred26 == 'true' if 'size' is zero (i.e. 'size' < 8) */ + cmpbdb,0 %dr2, 0x8, %pred26 + /* %pred27 == 'false' if 'size' >= 16 bytes */ + cmpbdb,1 %dr2, 0x10, %pred27 + + /* %pred28 == 'false' if 'size' >= 24 bytes */ + cmpbdb,3 %dr2, 0x18, %pred28 + /* %pred25 == 'true' if 'size' <= 32 bytes */ + cmpledb,4 %dr2, 0x20, %pred25 + } + { + return %ctpr3 + + addd,5 %dr4, 0x18, %dr13 + + cmpbdb,0 %dr2, 0x40, %pred12 + /* %pred29 == 'false' if 'size' >= 32 bytes */ + cmpbdb,1 %dr2, 0x20, %pred29 + + /* If %pred6 is 'false' then the remaining 16-bytes + * tail has to be copied after the main copying loop + * which copies data in 32-bytes blocks. */ + cmpandedb,3 %dr2, 0x10, %pred6 + /* %pred7 == 'size' < 192 (minimum allowed size + * for the optimized copying algorythm - 6 cachelines + * for unrolling) */ + cmpbdb,4 %dr2, 0xc0, %pred7 + } + { + /* %pred8 == 'true' if 'size' is a multiple of 16 */ + cmpandedb,1 %dr2, 0x8, %pred8 + + addd,0 %dr3, 0x0, %dr6 + addd,2 %dr3, 0x10, %dr8 + + addd,4 %dr3, 0x8, %dr7 + + ldrd,3 [ %dr1 + %dr10 ], %db[10] ? ~ %pred26 + ldrd,5 [ %dr1 + %dr11 ], %db[11] ? ~ %pred27 + } + { + addd,4 %dr3, 0x18, %dr9 + + ldrd,3 [ %dr1 + %dr12 ], %db[22] ? ~ %pred28 + ldrd,5 [ %dr1 + %dr13 ], %db[23] ? ~ %pred29 + + ct %ctpr2 ? %pred25 + } + { + ipd 0 + disp %ctpr2, 8f /* copy_tail_small */ + + addd,1 %dr1, 0x100, %db[0] + + /* Check whether prefetching is disabled */ + cmpesb,4 %r5, 0, %pred15 + + addd,0 %dr1, 0x20, %dr5 + addd,2 %dr1, 0x40, %dr4 + + addd,5 %dr1, %dr2, %dr3 + + /* If the block is small, use simple loop without unrolling */ + ct %ctpr1 ? %pred7 + } + { + ipd 0 + disp %ctpr3, 2f /* skip_prefetch_loop */ + + cmpbedb,4 %dr2, 0x2c0, %pred2 + + ldrd,0 [ %dr5 + %dr10 ], %db[8] + ldrd,2 [ %dr5 + %dr11 ], %db[9] + ldrd,3 [ %dr5 + %dr12 ], %db[20] + ldrd,5 [ %dr5 + %dr13 ], %db[21] + addd %dr5, 0x40, %dr5 + } + { + ipd 0 + disp %ctpr1, 1f /* prefetch */ + + cmpbedb,4 %dr2, 0xe0, %pred3 + + ldrd,0 [ %dr4 + %dr10 ], %db[6] + ldrd,2 [ %dr4 + %dr11 ], %db[7] + ldrd,3 [ %dr4 + %dr12 ], %db[18] + ldrd,5 [ %dr4 + %dr13 ], %db[19] + addd %dr4, 0x40, %dr4 + } + { + ipd 1 + disp %ctpr2, 3f /* copy */ + + subd,4 %dr14, 0x20, %dr14 + + ldrd,0 [ %dr5 + %dr10 ], %db[4] + ldrd,2 [ %dr5 + %dr11 ], %db[5] + ldrd,3 [ %dr5 + %dr12 ], %db[16] + ldrd,5 [ %dr5 + %dr13 ], %db[17] + addd %dr5, 0x40, %dr5 + } + { + cmpbdb,4 %dr2, 0x4c0, %pred0 + + addd,1 %dr1, 0x100, %db[1] + + ldrd,0 [ %dr4 + %dr10 ], %db[2] + ldrd,2 [ %dr4 + %dr11 ], %db[3] + ldrd,3 [ %dr4 + %dr12 ], %db[14] + ldrd,5 [ %dr4 + %dr13 ], %db[15] + + ct %ctpr3 ? %pred15 + } + + /* Load the src block into the L2 cache - prefetching to L1 + * is neither practical (only 1 line is fetched per cycle) + * nor needed (this loop is unrolled enough to do not worry + * about latency). */ + { + subd,4 %dr3, 0x5c0, %dr4 + ldb,sm [ %dr1 + 0xc0 ] MAS_BYPASS_L1_CACHE, %empty ? ~ %pred3 + ldb,sm [ %dr1 + 0xe0 ] MAS_BYPASS_L1_CACHE, %empty ? ~ %pred3 + + ct %ctpr3 ? %pred2 + } +1: /* prefetch */ + { + /* pred1 = dr4 <= db[0] = + * = dr1 + dr2 - 0x5c0 <= dr1 + prefetched = + * = dr2 - prefetched <= 0x5c0 = + * = size - prefetched <= 0x5c0 */ + cmpbedb,4 %dr4, %db[0], %pred1 + ldb,0,sm [ %db[0] + 0 ] MAS_BYPASS_L1_CACHE, %empty + ldb,2,sm [ %db[0] + 0x40 ] MAS_BYPASS_L1_CACHE, %empty + ldb,3,sm [ %db[0] + 0x80 ] MAS_BYPASS_L1_CACHE, %empty + ldb,5,sm [ %db[0] + 0xc0 ] MAS_BYPASS_L1_CACHE, %empty + addd %db[0], 0x200, %db[0] + } + { + ldb,0,sm [ %db[1] + 0x100 ] MAS_BYPASS_L1_CACHE, %empty + ldb,2,sm [ %db[1] + 0x140 ] MAS_BYPASS_L1_CACHE, %empty + ldb,3,sm [ %db[1] + 0x180 ] MAS_BYPASS_L1_CACHE, %empty + ldb,5,sm [ %db[1] + 0x1c0 ] MAS_BYPASS_L1_CACHE, %empty + addd %db[1], 0x200, %db[1] + abp abpf = 1, abpt = 1 + ct %ctpr1 ? ~ %pred0 + } + +2: /* skip_prefetch_loop */ + /* Copy the page */ + { + ipd 1 + disp %ctpr1, 4f /* copy_tail */ + + ldb,0,sm [ %db[0] + 0 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + ldb,2,sm [ %db[0] + 0x40 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + ldb,3,sm [ %db[0] + 0x80 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + ldb,5,sm [ %db[0] + 0xc0 ] (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE), %empty ? ~ %pred15 + + cmpbdb,1 %dr2, 0xe0, %pred0 + + /* dr3 = dr1 + dr2 - 0x60 */ + subd,4 %dr3, 0x60, %dr3 + } +3: /* copy */ + { + cmpldb,4 %dr3, %dr5, %pred1 + + ldrd,0 [ %dr5 + %dr10 ], %db[0] + ldrd,2 [ %dr5 + %dr11 ], %db[1] + ldrd,3 [ %dr5 + %dr12 ], %db[12] + ldrd,5 [ %dr5 + %dr13 ], %db[13] + addd %dr5, 0x20, %dr5 + } + { + /* If trap happens on previous instruction %dr14 + * will be negative, so we check for that in trap + * handler. */ + addd,3 %dr14, 0x20, %dr14 + + strd,2 [ %dr0 + %dr6 ], %db[10] + strd,5 [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x20, %dr6 + addd,4 %dr7, 0x20, %dr7 + } + { + strd,2 [ %dr0 + %dr8 ], %db[22] + strd,5 [ %dr0 + %dr9 ], %db[23] + addd,1 %dr8, 0x20, %dr8 + addd,4 %dr9, 0x20, %dr9 + abn abnf = 1, abnt = 1 + abp abpf = 1, abpt = 1 + ct %ctpr2 ? ~ %pred0 + } + /* Copy the remaining tail */ + { + subd,1 %dr2, 0x60, %dr3 + ldrd,0 [ %dr5 + %dr10 ], %db[0] ? ~ %pred6 + ldrd,2 [ %dr5 + %dr11 ], %db[1] ? ~ %pred6 + addd,3 %dr10, 0x10, %dr10 ? ~ %pred6 + cmpedb 0x0, 0x0, %pred0 + return %ctpr3 + } + { + ldrd,3 [ %dr5 + %dr10 ], %dr13 ? ~ %pred8 + } +4: /* copy_tail */ + { + addd,3 %dr14, 0x20, %dr14 + cmpbesb %r6, %r3, %pred1 + strd,2 [ %dr0 + %dr6 ], %db[10] + strd,5 [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x20, %dr6 + addd,4 %dr7, 0x20, %dr7 + } + { + strd,2 [ %dr0 + %dr8 ], %db[22] + strd,5 [ %dr0 + %dr9 ], %db[23] + addd,1 %dr8, 0x20, %dr8 + addd,4 %dr9, 0x20, %dr9 + abn abnf = 1, abnt = 1 + abp abpf = 1, abpt = 1 + ct %ctpr1 ? %pred0 + } + { + addd,3 %dr14, 0x20, %dr14 + strd,2 [ %dr0 + %dr6 ], %db[10] ? ~ %pred6 + strd,5 [ %dr0 + %dr7 ], %db[11] ? ~ %pred6 + addd,1 %dr6, 0x10, %dr6 ? ~ %pred6 + } + { + addd,3 %dr14, 0x10, %dr14 ? ~ %pred6 + strd [ %dr0 + %dr6 ], %dr13 ? ~ %pred8 + } + { + addd,3 %dr2, 0x0, %dr0 + ct %ctpr3 + } + + +5: /* very_small_size */ + { + strd [ %dr0 + %dr6 ], %db[10] ? ~ %pred26 + strd [ %dr0 + %dr7 ], %db[11] ? ~ %pred27 + } + { + addd,0 %dr14, 0x10, %dr14 ? ~ %pred27 + strd [ %dr0 + %dr8 ], %db[22] ? ~ %pred28 + strd [ %dr0 + %dr9 ], %db[23] ? ~ %pred29 + } + { + /* Return should not be in the same instruction + * with memory access, otherwise we will return + * on page fault and page fault handler will + * return from our caller. */ + addd,3 %dr2, 0x0, %dr0 + ct %ctpr3 + } + + +6: /* small_size */ + { + ipd 0 + disp %ctpr1, 7f /* copy_small */ + + cmpbdb %dr2, 0x60, %pred0 + subd,4 %dr3, 0x60, %dr3 + + subd,3 %dr14, 0x20, %dr14 ? ~ %pred12 + + ct %ctpr2 ? %pred12 + } +7: /* copy_small */ + { + cmpldb,4 %dr3, %dr5, %pred1 + + ldrd,0 [ %dr5 + %dr10 ], %db[8] + ldrd,3 [ %dr5 + %dr11 ], %db[9] + ldrd,2 [ %dr5 + %dr12 ], %db[20] + ldrd,5 [ %dr5 + %dr13 ], %db[21] + addd %dr5, 0x20, %dr5 + } + { + /* If trap happens on previous instruction %dr14 + * will be negative, so we check for that in trap + * handler. */ + addd,3 %dr14, 0x20, %dr14 + + strd,2 [ %dr0 + %dr6 ], %db[10] + strd,5 [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x20, %dr6 + addd,4 %dr7, 0x20, %dr7 + } + { + strd,2 [ %dr0 + %dr8 ], %db[22] + strd,5 [ %dr0 + %dr9 ], %db[23] + addd,1 %dr8, 0x20, %dr8 + addd,4 %dr9, 0x20, %dr9 + + abn abnf = 1, abnt = 1 + abp abpf = 1, abpt = 1 + ct %ctpr1 ? ~ %pred0 + } +8: /* copy_tail_small */ + { + addd,4 %dr14, 0x20, %dr14 ? ~ %pred12 + + ldrd,0 [ %dr5 + %dr10 ], %db[8] ? ~ %pred6 + ldrd,3 [ %dr5 + %dr11 ], %db[9] ? ~ %pred6 + addd,1 %dr10, 0x10, %dr10 ? ~ %pred6 + } + { + ldrd,2 [ %dr5 + %dr10 ], %dr13 ? ~ %pred8 + } + { + strd,2 [ %dr0 + %dr6 ], %db[10] + strd,5 [ %dr0 + %dr7 ], %db[11] + addd,1 %dr6, 0x20, %dr6 + addd,4 %dr7, 0x20, %dr7 + } + { + addd,3 %dr14, 0x10, %dr14 + strd,2 [ %dr0 + %dr8 ], %db[22] + strd,5 [ %dr0 + %dr9 ], %db[23] + } + { + addd,3 %dr14, 0x10, %dr14 + strd,2 [ %dr0 + %dr6 ], %db[8] ? ~ %pred6 + strd,5 [ %dr0 + %dr7 ], %db[9] ? ~ %pred6 + addd,1 %dr6, 0x10, %dr6 ? ~ %pred6 + } + { + addd,3 %dr14, 0x10, %dr14 ? ~ %pred6 + strd,2 [ %dr0 + %dr6 ], %dr13 ? ~ %pred8 + } + { + addd,3 %dr2, 0x0, %dr0 + ct %ctpr3 + } +.size $__recovery_memcpy_8, . - $__recovery_memcpy_8 + +.global $recovery_memcpy_fault +.global $.recovery_memcpy_fault +$recovery_memcpy_fault: +$.recovery_memcpy_fault: +.ignore ld_st_style + { + nop + cmpldb %dr14, 0, %pred0 + return %ctpr3 + } + { + addd 0, 0, %dr0 ? %pred0 + addd 0, %dr14, %dr0 ? ~ %pred0 + ct %ctpr3 + } +.size $recovery_memcpy_fault, . - $recovery_memcpy_fault + +.global $__recovery_memset_8 +.type __recovery_memset_8,@function +$__recovery_memset_8: +.ignore ld_st_style +/* + * dr0 - dst + * dr1 - value + * dr2 - tag + * dr3 - size + * dr4 - strd opcode + */ + { + setwd wsz = 0x5 + setbp psz = 0x1 + + ipd 0 + disp %ctpr2, 2f /* store_tail */ + } + { + ipd 0 + disp %ctpr1, 1f /* store */ + + cmpbesb,0 %r3, 0x18, %pred4 + cmpandesb,1 %r3, 0x10, %pred2 + + puttagd,2 %dr1, %dr2, %dr1 + + /* dr9 holds the number of cleared bytes in case + * pagefault happens. */ + subd,3 0x0, 0x20, %dr9 + } + { + return %ctpr3 + + cmpbsb,0 %r3, 0x40, %pred0 + cmpandesb,1 %r3, 0x8, %pred3 + } + { + subs,1 %r3, 0x60, %r3 + + addd,5 %dr4, 0x8, %dr5 + } + { + addd,1 %dr4, 0x10, %dr6 + addd,4 %dr4, 0x18, %dr7 + + ct %ctpr2 ? %pred4 + } + +1: /* store */ + { + cmplsb %r3, %r4, %pred1 + strd,2 [ %dr0 + %dr4 ], %dr1 + strd,5 [ %dr0 + %dr5 ], %dr1 + addd,1 %dr4, 0x20, %dr4 + addd,4 %dr5, 0x20, %dr5 + addd,3 %dr9, 0x20, %dr9 + } + { + strd,2 [ %dr0 + %dr6 ], %dr1 + strd,5 [ %dr0 + %dr7 ], %dr1 + addd,1 %dr6, 0x20, %dr6 + addd,4 %dr7, 0x20, %dr7 + abp abpf = 1, abpt = 1 + ct %ctpr1 ? ~ %pred0 + } + +2: /* store_tail */ + { + strd,2 [ %dr0 + %dr4 ], %dr1 ? ~ %pred2 + strd,5 [ %dr0 + %dr5 ], %dr1 ? ~ %pred2 + addd,3 %dr9, 0x20, %dr9 + addd,1 %dr4, 0x10, %dr4 ? ~ %pred2 + } + { + strd,2 [ %dr0 + %dr4 ], %dr1 ? ~ %pred3 + addd,3 %dr9, 0x10, %dr9 ? ~ %pred2 + } + { + addd,3 %dr9, 0x8, %dr0 ? ~ %pred3 + addd,4 %dr9, 0, %dr0 ? %pred3 + ct %ctpr3 + } +.size $__recovery_memset_8, . - $__recovery_memset_8 + +.global $recovery_memset_fault +.global $.recovery_memset_fault +$recovery_memset_fault: +$.recovery_memset_fault: +.ignore ld_st_style + { + addd 0, %dr9, %dr0 + return %ctpr3 + } + { + ct %ctpr3 + } +.size $recovery_memset_fault, . - $recovery_memset_fault diff --git a/arch/e2k/lib/string.c b/arch/e2k/lib/string.c new file mode 100644 index 000000000000..4fb9590302b3 --- /dev/null +++ b/arch/e2k/lib/string.c @@ -0,0 +1,1047 @@ +/* + * string routines + * + * This file contains memcpy/memset routines that are better done + * in an architecture-specific manner due to speed.. + */ + + +#include +#include +#include +#include + + +#ifdef BOOT +/* This file is included in kernel's builtin boot directly, + * undefine EXPORT_SYMBOL to avoid linking errors. */ +# undef EXPORT_SYMBOL +# define EXPORT_SYMBOL(sym) +#endif + + +#ifdef __HAVE_ARCH_MEMSET + +EXPORT_SYMBOL(__recovery_memset_8); +EXPORT_SYMBOL(__recovery_memset_16); + +void __memset(void *s, long c, size_t count) +{ + unsigned long head, head1, head3, head7, tail8, tail12, tail14; + void *tail; + + if (unlikely(count < 16)) { + u64 n8, n12, n14; + + n8 = count & 8; + n12 = count & 12; + n14 = count & 14; + + if (count & 8) + *(u64 *) s = c; + if (count & 4) + *(u32 *) (s + n8) = c; + if (count & 2) + *(u16 *) (s + n12) = c; + if (count & 1) + *(u8 *) (s + n14) = c; + + return; + } + + /* Set the head */ + head = 16 - ((unsigned long) s & 0xfUL); + + head1 = (unsigned long) s & 1; /* s & 1 == head & 1 */ + head3 = head & 3; + head7 = head & 7; + + if (head1) + *(u8 *) s = c; + if (head & 2) + *(u16 *) (s + head1) = c; + if (head & 4) + *(u32 *) (s + head3) = c; + if (head & 8) + *(u64 *) (s + head7) = c; + + s = PTR_ALIGN(s, 16); + count -= head & 0xf; + + /* Set the tail */ + tail = s + (count & ~0xfUL); + + tail8 = count & 8; + tail12 = count & 12; + tail14 = count & 14; + + if (count & 8) + *(u64 *) tail = c; + if (count & 4) + *(u32 *) (tail + tail8) = c; + if (count & 2) + *(u16 *) (tail + tail12) = c; + if (count & 1) + *(u8 *) (tail + tail14) = c; + + if (count & ~0xfUL) { + fast_tagged_memory_set(s, c, 0, count & ~0xfUL, + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT + | MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT); + } +} +EXPORT_SYMBOL(__memset); + +/* Same as __memset but makes sure that stores from the same long + * instruction land in the same page (to ensure ordering). */ +void __memset_io(void *s, long c, size_t count) +{ + unsigned long align = (unsigned long) s & 0xf, head, head1, head2, + head3, head4, head7, head8, tail1, tail2, tail4, tail6; + + if (unlikely(count < 16)) { + char *dst = (char *) s; + + /* Ugly, but otherwise lcc won't do it this way */ + if (count > 0) + WRITE_ONCE(dst[0], c); + if (count > 1) + WRITE_ONCE(dst[1], c); + if (count > 2) + WRITE_ONCE(dst[2], c); + if (count > 3) + WRITE_ONCE(dst[3], c); + if (count > 4) + WRITE_ONCE(dst[4], c); + if (count > 5) + WRITE_ONCE(dst[5], c); + if (count > 6) + WRITE_ONCE(dst[6], c); + if (count > 7) + WRITE_ONCE(dst[7], c); + if (count > 8) + WRITE_ONCE(dst[8], c); + if (count > 9) + WRITE_ONCE(dst[9], c); + if (count > 10) + WRITE_ONCE(dst[10], c); + if (count > 11) + WRITE_ONCE(dst[11], c); + if (count > 12) + WRITE_ONCE(dst[12], c); + if (count > 13) + WRITE_ONCE(dst[13], c); + if (count > 14) + WRITE_ONCE(dst[14], c); + + return; + } + + /* Set the head */ + head = 16 - align; + + head1 = (unsigned long) s & 1; /* s & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + head7 = head & 7; + head8 = head & 8; + + if (head1) + WRITE_ONCE(*(u8 *) s, c); + if (head2) + WRITE_ONCE(*(u16 *) (s + head1), c); + if (head4) + WRITE_ONCE(*(u32 *) (s + head3), c); + if (head8) + WRITE_ONCE(*(u64 *) (s + head7), c); + + s = PTR_ALIGN(s, 16); + count -= head & 0xf; + + fast_tagged_memory_set(s, c, 0, count & ~0x7UL, + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT + | MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT); + + /* Set the tail */ + s += count & ~0x7UL; + + tail1 = count & 1; + tail2 = count & 2; + tail4 = count & 4; + tail6 = count & 6; + + if (tail4) + WRITE_ONCE(*(u32 *) s, c); + if (tail2) + WRITE_ONCE(*(u16 *) (s + tail4), c); + if (tail1) + WRITE_ONCE(*(u8 *) (s + tail6), c); +} +EXPORT_SYMBOL(__memset_io); + + +#undef memset +void *memset(void *dst, int c, size_t n) +{ + long cc = __builtin_e2k_pshufb(c, c, 0); + + __memset(dst, cc, n); + + return dst; +} +EXPORT_SYMBOL(memset); +#endif /* __HAVE_ARCH_MEMSET */ + + +#ifdef __HAVE_ARCH_MEMCPY +static __always_inline void aligned_memcpy(char *__restrict dst, + const char *__restrict src, size_t n) +{ + int i; + + if (IS_ALIGNED((unsigned long) src, 4) && + IS_ALIGNED((unsigned long) dst, 4)) { + u32 *dst32 = (u32 *) dst; + u32 *src32 = (u32 *) src; + + for (i = 0; i < n / 4; i++) + dst32[i] = src32[i]; + + dst += n & ~0x3UL; + src += n & ~0x3UL; + for (i = 0; i < (n & 0x3); i++) + dst[i] = src[i]; + } else if (IS_ALIGNED((unsigned long) src, 2) && + IS_ALIGNED((unsigned long) dst, 2)) { + u16 *dst16 = (u16 *) dst; + u16 *src16 = (u16 *) src; + + for (i = 0; i < n / 2; i++) + dst16[i] = src16[i]; + + if (n & 1) + dst[n - 1] = src[n - 1]; + } else { + for (i = 0; i < n; i++) + dst[i] = src[i]; + } +} + + +#if !defined(CONFIG_BOOT_E2K) +# define HAS_HWBUG_UNALIGNED_LOADS unlikely(cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) +#else +# define HAS_HWBUG_UNALIGNED_LOADS IS_ENABLED(CONFIG_CPU_ES2) +#endif + +static __always_inline void smallest_memcpy(char *__restrict dst, + const char *__restrict src, size_t n) +{ + u64 n8, n12, n14; + + n8 = n & 8; + n12 = n & 12; + n14 = n & 14; + + if (n & 8) + *(u64 *) dst = *(u64 *) src; + if (n & 4) + *(u32 *) (dst + n8) = *(u32 *) (src + n8); + if (n & 2) + *(u16 *) (dst + n12) = *(u16 *) (src + n12); + if (n & 1) + *(u8 *) (dst + n14) = *(u8 *) (src + n14); +} + +void *__memcpy(void *dst, const void *src, size_t n) +{ + int hwbug = HAS_HWBUG_UNALIGNED_LOADS; + void *const orig_dst = dst; + unsigned long head, tail, head1, head3, head7, + tail8, tail12, tail14; + u64 head_val8, tail_val8; + u32 head_val4, tail_val4; + u16 head_val2, tail_val2; + u8 head_val1, tail_val1; + size_t length, orig_n = n; + + if (hwbug) { + /* + * bug 103351 comment 46 - try to avoid unaligned accesses + */ + if (!IS_ALIGNED((unsigned long) src, 8) || + !IS_ALIGNED((unsigned long) dst, 8)) { + /* can't use the optimized loop below */ + aligned_memcpy(dst, src, n); + + return orig_dst; + } + + prefetch_nospec_range(src, n); + + __E2K_WAIT(_ld_c); + } + + if (unlikely(n < 16)) { + smallest_memcpy(dst, src, n); + + return orig_dst; + } + + /* Copy the head */ + + head = 16 - ((unsigned long) dst & 0xfUL); + + n -= head & 0xf; + length = (orig_n >= 2 * 8192) ? 8192 : (n & ~0xfUL); + + head1 = (unsigned long) dst & 1; /* dst & 1 == head & 1 */ + head3 = head & 3; + head7 = head & 7; + + tail = n; + tail8 = tail & 8; + tail12 = tail & 12; + tail14 = tail & 14; + + if (head & 1) + head_val1 = *(u8 *) src; + if (head & 2) + head_val2 = *(u16 *) (src + head1); + if (head & 4) + head_val4 = *(u32 *) (src + head3); + if (head & 8) + head_val8 = *(u64 *) (src + head7); + + src += head & 0xf; + dst = PTR_ALIGN(dst, 16); + + /* Do the copy. Bypass L1 cache - usually after memcpy memory + * is not accessed immediately since user knows its contents */ + do { + n -= length; + + /* Copy with tags. This is useful for access_process_vm. */ + if (likely(length)) { + fast_tagged_memory_copy(dst, src, length, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + !hwbug); + } + + src += length; + dst += length; + + length = (n >= 2 * 8192) ? 8192 : (n & ~0xfUL); + } while (unlikely(n >= 16)); + + /* Copy the tail */ + + if (tail & 8) + tail_val8 = *(u64 *) src; + if (tail & 4) + tail_val4 = *(u32 *) (src + tail8); + if (tail & 2) + tail_val2 = *(u16 *) (src + tail12); + if (tail & 1) + tail_val1 = *(u8 *) (src + tail14); + + if (head & 1) + *(u8 *) orig_dst = head_val1; + if (head & 2) + *(u16 *) (orig_dst + head1) = head_val2; + if (head & 4) + *(u32 *) (orig_dst + head3) = head_val4; + if (head & 8) + *(u64 *) (orig_dst + head7) = head_val8; + + if (tail & 8) + *(u64 *) dst = tail_val8; + if (tail & 4) + *(u32 *) (dst + tail8) = tail_val4; + if (tail & 2) + *(u16 *) (dst + tail12) = tail_val2; + if (tail & 1) + *(u8 *) (dst + tail14) = tail_val1; + + return orig_dst; +} +EXPORT_SYMBOL(__memcpy); + +#undef memcpy +void *memcpy(void *dst, const void *src, size_t n) +{ + return __memcpy(dst, src, n); +} +EXPORT_SYMBOL(memcpy); + +/* Kernel's decompressor and built-in boot do not use the code below, + * so keep things simple with this #ifndef */ +# ifndef BOOT +static void __memcpy_from_io_slow(void *__restrict dst, + const volatile void *__restrict src, size_t n) +{ + unsigned long head, tail, head1, head2, head3, head4, head7, head8, + head15, head16, tail1, tail2, tail4, tail6; + u64 head_val8, head_val16_lo, head_val16_hi; + u32 head_val4, tail_val4; + u16 head_val2, tail_val2; + u8 head_val1, tail_val1; + + if (unlikely(n < 32)) { + int i; + + for (i = 0; i < n; i++) { + WRITE_ONCE(((u8 *) dst)[i], + READ_ONCE(((u8 *) src)[i])); + __E2K_WAIT(_st_c); + } + + return; + } + + /* Copy the head */ + + head = 32 - ((unsigned long) src & 0x1fUL); + + head1 = (unsigned long) src & 1; /* src & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + head7 = head & 7; + head8 = head & 8; + head15 = head & 15; + head16 = head & 16; + + if (head1) { + head_val1 = READ_ONCE(*(u8 *) src); + __E2K_WAIT(_ld_c); + } + if (head2) { + head_val2 = READ_ONCE(*(u16 *) (src + head1)); + __E2K_WAIT(_ld_c); + } + if (head4) { + head_val4 = READ_ONCE(*(u32 *) (src + head3)); + __E2K_WAIT(_ld_c); + } + if (head8) { + head_val8 = READ_ONCE(*(u64 *) (src + head7)); + __E2K_WAIT(_ld_c); + } + if (head16) { + head_val16_lo = READ_ONCE(*(u64 *) (src + head15)); + __E2K_WAIT(_ld_c); + head_val16_hi = READ_ONCE(*(u64 *) (src + head15 + 8)); + __E2K_WAIT(_ld_c); + } + + if (head1) { + WRITE_ONCE(*(u8 *) dst, head_val1); + __E2K_WAIT(_st_c); + } + if (head2) { + WRITE_ONCE(*(u16 *) (dst + head1), head_val2); + __E2K_WAIT(_st_c); + } + if (head4) { + WRITE_ONCE(*(u32 *) (dst + head3), head_val4); + __E2K_WAIT(_st_c); + } + if (head8) { + WRITE_ONCE(*(u64 *) (dst + head7), head_val8); + __E2K_WAIT(_st_c); + } + if (head16) { + WRITE_ONCE(*(u64 *) (dst + head15), head_val16_lo); + __E2K_WAIT(_st_c); + WRITE_ONCE(*(u64 *) (dst + head15 + 8), head_val16_hi); + __E2K_WAIT(_st_c); + } + + dst += head & 0x1f; + src = PTR_ALIGN(src, 32); + n -= head & 0x1f; + + while (n >= 8) { + WRITE_ONCE(*(u64 *) dst, READ_ONCE(*(u64 *) src)); + __E2K_WAIT(_st_c); + n -= 8; + src += 8; + dst += 8; + } + + /* Copy the tail */ + tail = n; + + tail1 = tail & 1; + tail2 = tail & 2; + tail4 = tail & 4; + tail6 = tail & 6; + + if (tail4) { + tail_val4 = READ_ONCE(*(u32 *) src); + __E2K_WAIT(_ld_c); + } + if (tail2) { + tail_val2 = READ_ONCE(*(u16 *) (src + tail4)); + __E2K_WAIT(_ld_c); + } + if (tail1) { + tail_val1 = READ_ONCE(*(u8 *) (src + tail6)); + __E2K_WAIT(_ld_c); + } + + if (tail4) { + WRITE_ONCE(*(u32 *) dst, tail_val4); + __E2K_WAIT(_st_c); + } + if (tail2) { + WRITE_ONCE(*(u16 *) (dst + tail4), tail_val2); + __E2K_WAIT(_st_c); + } + if (tail1) { + WRITE_ONCE(*(u8 *) (dst + tail6), tail_val1); + __E2K_WAIT(_st_c); + } +} + +/* + * __memcpy_fromio() - the same as __memcpy() but with ordered loads + * and disabled prefetch. Also makes sure that loads from the same + * long instruction land in the same page (to ensure ordering). + */ +void __memcpy_fromio(void *__restrict dst, const volatile void __iomem *__restrict src, size_t n) +{ + const void *const orig_dst = dst; + unsigned long head, tail, head1, head2, head3, head4, head7, head8, + head15, head16, tail1, tail2, tail4, tail6; + u64 head_val8, head_val16_lo, head_val16_hi; + u32 head_val4, tail_val4; + u16 head_val2, tail_val2; + u8 head_val1, tail_val1; + + if (unlikely(cpu_has(CPU_HWBUG_PIO_READS))) { + __memcpy_from_io_slow(dst, src, n); + return; + } + + if (unlikely(n < 32)) { + int i; + + for (i = 0; i < n; i++) + ((u8 *) dst)[i] = READ_ONCE(((u8 *) src)[i]); + + return; + } + + /* Copy the head */ + + head = 32 - ((unsigned long) src & 0x1fUL); + + head1 = (unsigned long) src & 1; /* src & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + head7 = head & 7; + head8 = head & 8; + head15 = head & 15; + head16 = head & 16; + + if (head1) + head_val1 = READ_ONCE(*(u8 *) src); + if (head2) + head_val2 = READ_ONCE(*(u16 *) (src + head1)); + if (head4) + head_val4 = READ_ONCE(*(u32 *) (src + head3)); + if (head8) + head_val8 = READ_ONCE(*(u64 *) (src + head7)); + if (head16) { + head_val16_lo = READ_ONCE(*(u64 *) (src + head15)); + head_val16_hi = READ_ONCE(*(u64 *) (src + head15 + 8)); + } + + dst += head & 0x1f; + src = PTR_ALIGN(src, 32); + n -= head & 0x1f; + + /* Do the copy. Bypass L1 cache - usually after memcpy memory + * is not accessed immediately since user knows its contents */ + do { + size_t length = (n >= 2 * 8192) ? 8192 : (n & ~0x7UL); + + n -= length; + + fast_tagged_memory_copy(dst, (__force const void *__restrict) src, length, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + 0); + + src += length; + dst += length; + } while (unlikely(n >= 8)); + + /* Copy the tail */ + tail = n; + + tail1 = tail & 1; + tail2 = tail & 2; + tail4 = tail & 4; + tail6 = tail & 6; + + if (tail4) + tail_val4 = READ_ONCE(*(u32 *) src); + if (tail2) + tail_val2 = READ_ONCE(*(u16 *) (src + tail4)); + if (tail1) + tail_val1 = READ_ONCE(*(u8 *) (src + tail6)); + + if (head1) + *(u8 *) orig_dst = head_val1; + if (head2) + *(u16 *) (orig_dst + head1) = head_val2; + if (head4) + *(u32 *) (orig_dst + head3) = head_val4; + if (head8) + *(u64 *) (orig_dst + head7) = head_val8; + if (head16) { + *(u64 *) (orig_dst + head15) = head_val16_lo; + *(u64 *) (orig_dst + head15 + 8) = head_val16_hi; + } + + if (tail4) + *(u32 *) dst = tail_val4; + if (tail2) + *(u16 *) (dst + tail4) = tail_val2; + if (tail1) + *(u8 *) (dst + tail6) = tail_val1; +} +EXPORT_SYMBOL(__memcpy_fromio); + +static __always_inline void smallest_memcpy_toio(volatile char *__restrict dst, + const char *__restrict src, size_t n) +{ + char c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14; + + /* Ugly, but otherwise lcc won't do it this way */ + + if (n > 0) + c0 = src[0]; + if (n > 1) + c1 = src[1]; + if (n > 2) + c2 = src[2]; + if (n > 3) + c3 = src[3]; + if (n > 4) + c4 = src[4]; + if (n > 5) + c5 = src[5]; + if (n > 6) + c6 = src[6]; + if (n > 7) + c7 = src[7]; + if (n > 8) + c8 = src[8]; + if (n > 9) + c9 = src[9]; + if (n > 10) + c10 = src[10]; + if (n > 11) + c11 = src[11]; + if (n > 12) + c12 = src[12]; + if (n > 13) + c13 = src[13]; + if (n > 14) + c14 = src[14]; + + if (n > 0) + WRITE_ONCE(dst[0], c0); + if (n > 1) + WRITE_ONCE(dst[1], c1); + if (n > 2) + WRITE_ONCE(dst[2], c2); + if (n > 3) + WRITE_ONCE(dst[3], c3); + if (n > 4) + WRITE_ONCE(dst[4], c4); + if (n > 5) + WRITE_ONCE(dst[5], c5); + if (n > 6) + WRITE_ONCE(dst[6], c6); + if (n > 7) + WRITE_ONCE(dst[7], c7); + if (n > 8) + WRITE_ONCE(dst[8], c8); + if (n > 9) + WRITE_ONCE(dst[9], c9); + if (n > 10) + WRITE_ONCE(dst[10], c10); + if (n > 11) + WRITE_ONCE(dst[11], c11); + if (n > 12) + WRITE_ONCE(dst[12], c12); + if (n > 13) + WRITE_ONCE(dst[13], c13); + if (n > 14) + WRITE_ONCE(dst[14], c14); +} + +/* + * __memcpy_toio() - the same as __memcpy() but with ordered stores + * and makes sure that stores from the same long instruction land in + * the same page (to ensure ordering). + */ +void __memcpy_toio(volatile void __iomem *__restrict dst, const void *__restrict src, size_t n) +{ + int hwbug = HAS_HWBUG_UNALIGNED_LOADS; + unsigned long head, tail, head1, head2, head3, head4, head7, head8, + tail1, tail2, tail4, tail6; + u64 tmp8; + u32 tmp4; + u16 tmp2; + u8 tmp1; + + if (unlikely(n < 16)) { + smallest_memcpy_toio(dst, src, n); + + return; + } + + if (hwbug) { + prefetch_nospec_range(src, n); + __E2K_WAIT(_ld_c); + } + + /* Copy the head */ + + head = 16 - ((unsigned long) dst & 0xfUL); + + head1 = (unsigned long) dst & 1; /* dst & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + head7 = head & 7; + head8 = head & 8; + + if (head1) + tmp1 = *(u8 *) src; + if (head2) + tmp2 = *(u16 *) (src + head1); + if (head4) + tmp4 = *(u32 *) (src + head3); + if (head8) + tmp8 = *(u64 *) (src + head7); + + if (head1) + WRITE_ONCE(*(u8 *) dst, tmp1); + if (head2) + WRITE_ONCE(*(u16 *) (dst + head1), tmp2); + if (head4) + WRITE_ONCE(*(u32 *) (dst + head3), tmp4); + if (head8) + WRITE_ONCE(*(u64 *) (dst + head7), tmp8); + + src += head & 0xf; + dst = PTR_ALIGN(dst, 16); + n -= head & 0xf; + + do { + size_t length = (n >= 2 * 8192) ? 8192 : (n & ~0x7UL); + + n -= length; + + fast_tagged_memory_copy((__force void *__restrict) dst, src, length, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + !hwbug); + + src += length; + dst += length; + } while (unlikely(n >= 8)); + + /* Copy the tail */ + tail = n; + + tail1 = tail & 1; + tail2 = tail & 2; + tail4 = tail & 4; + tail6 = tail & 6; + + if (tail4) + tmp4 = *(u32 *) src; + if (tail2) + tmp2 = *(u16 *) (src + tail4); + if (tail1) + tmp1 = *(u8 *) (src + tail6); + + if (tail4) + WRITE_ONCE(*(u32 *) dst, tmp4); + if (tail2) + WRITE_ONCE(*(u16 *) (dst + tail4), tmp2); + if (tail1) + WRITE_ONCE(*(u8 *) (dst + tail6), tmp1); +} +EXPORT_SYMBOL(__memcpy_toio); + + +/** + * tagged_memcpy_8() - copy memory along with tags + * + * All parameters must be 8-bytes aligned. + */ +void __tagged_memcpy_8(void *dst, const void *src, size_t n) +{ + int hwbug = HAS_HWBUG_UNALIGNED_LOADS; + + WARN_ONCE(((unsigned long) dst & 0x7) || ((unsigned long) src & 0x7) || + ((unsigned long) n & 0x7), + "BUG: bad parameters in tagged_memcpy_8: %lx %lx %lx\n", + dst, src, n); + + if (hwbug) { + prefetch_nospec_range(src, n); + __E2K_WAIT(_ld_c); + } + + /* Both src and dst are 8-bytes aligned. */ + for (;;) { + /* Copy with 8192 bytes blocks */ + if (n >= 2 * 8192) { + fast_tagged_memory_copy(dst, src, 8192, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + !hwbug); + n -= 8192; + src += 8192; + dst += 8192; + } else { + fast_tagged_memory_copy(dst, src, n & ~0x7, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + !hwbug); + break; + } + }; +} +EXPORT_SYMBOL(__tagged_memcpy_8); +# endif /* BOOT */ +#endif /* __HAVE_ARCH_MEMCPY */ + +#ifdef __HAVE_ARCH_MEMMOVE +void *memmove(void *dst, const void *src, size_t count) +{ + char *tmp; + const char *s; + + prefetch(src); + + if (dst + count <= src || dst >= src + count) + return __memcpy(dst, src, count); + + if (dst <= src) { + tmp = dst; + s = src; + while (count--) + *tmp++ = *s++; + } else { + tmp = dst; + tmp += count; + s = src; + s += count; + while (count--) + *--tmp = *--s; + } + return dst; +} +EXPORT_SYMBOL(memmove); +#endif + +#ifdef __HAVE_ARCH_MEMCMP +int __memcmp(const void *p1, const void *p2, size_t n) +{ + u64 v1, v2; + unsigned long head, head1, head2, head3, head4, + tail, tail1, tail2, tail4, tail6; + + if (unlikely(n < 8)) { + const u8 *cp1 = p1, *cp2 = p2; + int i, diff; + + for (i = 0; i < n; i++) { + diff = cp1[i] - cp2[i]; + if (diff) + return diff; + } + + return 0; + } + + /* Compare the head */ + head = 8 - ((unsigned long) p1 & 0x7UL); + + head1 = (unsigned long) p1 & 1; /* dst & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + + if (head1) { + v1 = *(u8 *) p1; + v2 = *(u8 *) p2; + if (v1 != v2) + return v1 - v2; + } + if (head2) { + v1 = *(u16 *) (p1 + head1); + v2 = *(u16 *) (p2 + head1); + if (v1 != v2) + return (u32) __swab16(v1) - (u32) __swab16(v2); + } + if (head4) { + v1 = *(u32 *) (p1 + head3); + v2 = *(u32 *) (p2 + head3); + if (v1 != v2) + return ((u32) __builtin_bswap32(v1) > + (u32) __builtin_bswap32(v2)) ? 1 : -1; + } + + p2 += head & 0x7; + p1 = PTR_ALIGN(p1, 8); + n -= head & 0x7; + + /* At least p1 is aligned at 8-bytes boundary. + * Do the check with 8-bytes loads. */ + + for (; n >= 8; p1 += 8, p2 += 8, n -= 8) { + v1 = *(u64 *) p1; + v2 = *(u64 *) p2; + if (v1 != v2) + break; + } + + if (v1 != v2) + return (__builtin_bswap64(v1) > __builtin_bswap64(v2)) ? 1 : -1; + + tail = n; + + tail1 = tail & 1; + tail2 = tail & 2; + tail4 = tail & 4; + tail6 = tail & 6; + + if (tail4) { + v1 = *(u32 *) p1; + v2 = *(u32 *) p2; + if (v1 != v2) + return ((u32) __builtin_bswap32(v1) > + (u32) __builtin_bswap32(v2)) ? 1 : -1; + } + if (tail2) { + v1 = *(u16 *) (p1 + tail4); + v2 = *(u16 *) (p2 + tail4); + if (v1 != v2) + return (u32) __swab16(v1) - (u32) __swab16(v2); + } + if (tail1) { + v1 = *(u8 *) (p1 + tail6); + v2 = *(u8 *) (p2 + tail6); + if (v1 != v2) + return v1 - v2; + } + + return 0; +} +EXPORT_SYMBOL(__memcmp); + +#undef memcmp +int memcmp(const void *p1, const void *p2, size_t n) +{ + return __memcmp(p1, p2, n); +} +EXPORT_SYMBOL(memcmp); +#endif + +#ifdef __HAVE_ARCH_STRNLEN +size_t strnlen(const char *src, size_t count) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + long align, res = 0; + unsigned long c, max = count; + + if (unlikely(!count)) + return 0; + + align = (sizeof(long) - 1) & (unsigned long) src; + src -= align; + /* Check for overflows */ + max = ((long) max >= 0) ? (max + align) : -1ul; + + c = *(unsigned long *) src; + c |= aligned_byte_mask(align); + + for (;;) { + unsigned long data; + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + res += find_zero(data) - align; + if (res > count) + res = count; + return res; + } + res += sizeof(unsigned long); + if (unlikely(max <= sizeof(unsigned long))) + break; + max -= sizeof(unsigned long); + c = *(unsigned long *) (src + res); + } + res -= align; + + return count; +} +EXPORT_SYMBOL(strnlen); +#endif + + +#ifdef __HAVE_ARCH_STRLEN +/** + * strlen - Find the length of a string + * @s: The string to be sized + */ +size_t strlen(const char *src) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + long align, res = 0; + unsigned long c; + + align = (sizeof(long) - 1) & (unsigned long) src; + src -= align; + + c = *(unsigned long *) src; + c |= aligned_byte_mask(align); + + for (;;) { + unsigned long data; + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + return res + find_zero(data) - align; + } + res += sizeof(unsigned long); + c = *(unsigned long *) (src + res); + } +} +EXPORT_SYMBOL(strlen); +#endif diff --git a/arch/e2k/lib/usercopy.c b/arch/e2k/lib/usercopy.c new file mode 100644 index 000000000000..8f318c87e5d3 --- /dev/null +++ b/arch/e2k/lib/usercopy.c @@ -0,0 +1,644 @@ +/* linux/arch/e2k/lib/usercopy.c, v 1.7 15/08/2001. + * + * Copyright (C) 2001 MCST + */ + +#include +#include + +#include +#include +#include + + +/** + * fill_user: - Fill a block of memory in user space with given bytes. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * @b: Byte to fill memory with. + * + * Zero a block of memory in user space. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +unsigned long +__fill_user(void __user *_to, const unsigned long _n, const u8 _b) +{ + u64 b = __builtin_e2k_pshufb(_b, _b, 0); + volatile long n = _n; + void *volatile to = _to; + unsigned long align = (unsigned long) _to & 0xf, head, tail, + head1, head2, head3, head4, head7, head8, + tail1, tail2, tail4, tail8, tail12, tail14; + void *to_aligned = PTR_ALIGN((void *)to, 16); + long i, count = _n; + + DebugUA("__clear_user %px : 0x%lx\n", to, n); + + head = 16 - align; + + /* save addr to return from trap if 'to' is bad */ + TRY_USR_PFAULT { + if (unlikely(n < 16)) { + int i; + + for (i = 0; i < n; i++) + ((u8 *) to)[i] = b; + + goto out; + } + + /* set the head */ + count -= head & 0xf; + + head1 = (unsigned long) to & 1; /* to & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + head7 = head & 7; + head8 = head & 8; + + if (head1) + *(u8 *) to = b; + if (head2) + *(u16 *) (to + head1) = b; + if (head4) + *(u32 *) (to + head3) = b; + if (head8) + *(u64 *) (to + head7) = b; + + for (i = 0; i < (count >> 3) - 1; i += 2) { + ((u64 *) to_aligned)[i] = b; + ((u64 *) to_aligned)[i + 1] = b; + } + + /* set the tail */ + to_aligned += count & ~0xfUL; + tail = count; + + tail1 = tail & 1; + tail2 = tail & 2; + tail4 = tail & 4; + tail8 = tail & 8; + tail12 = tail & 12; + tail14 = tail & 14; + + if (tail8) + *(u64 *) to_aligned = b; + if (tail4) + *(u32 *) (to_aligned + tail8) = b; + if (tail2) + *(u16 *) (to_aligned + tail12) = b; + if (tail1) + *(u8 *) (to_aligned + tail14) = b; + +out: + ; + } CATCH_USR_PFAULT { + /* + * There was a trap that could not be handled. + * Clearing all the area again with 1-byte stores + * is certainly slow, but this is an extremely + * unlikely case we do not care about. + */ + volatile int i; + + TRY_USR_PFAULT { + for (i = 0; i < n; i++) { + WRITE_ONCE(((u8 *) to)[i], b); + E2K_CMD_SEPARATOR; + } + } CATCH_USR_PFAULT { + return n - i; + } END_USR_PFAULT + + BUG_ON(n != i); + } END_USR_PFAULT + + return 0; +} +EXPORT_SYMBOL(__fill_user); + +/** + * strncpy_from_user: - Copy a NUL terminated string from userspace. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ + +long __strncpy_from_user(char *__restrict dst, + const char *__restrict src, long count) +{ + long i; + + TRY_USR_PFAULT { + DebugUA("to = %#llX, from = %#llX, count = %ld\n", + (u64) dst, (u64) src, count); + for (i = 0; likely(i < count); i++) + if (unlikely((dst[i] = src[i]) == 0)) + break; + } CATCH_USR_PFAULT { + /* It was trap */ + return -EFAULT; + } END_USR_PFAULT + + return i; +} +EXPORT_SYMBOL(__strncpy_from_user); + +/* + * Do a strnlen, return length of string *with* final '\0'. + * 'count' is the user-supplied count, while 'max' is the + * address space maximum. + * + * Return 0 for exceptions (which includes hitting the address + * space maximum), or 'count+1' if hitting the user-supplied + * maximum count. + */ +static __always_inline long do_strnlen_user(const char __user *src, + unsigned long count, unsigned long max) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + long align, res = 0; + unsigned long c; + + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + + /* + * Do everything aligned. But that means that we + * need to also expand the maximum.. + */ + align = (sizeof(long) - 1) & (unsigned long)src; + src -= align; + /* Cannot overflow - max is already limited by PAGE_OFFSET */ + max += align; + + c = *(unsigned long __user *) src; + c |= aligned_byte_mask(align); + + for (;;) { + unsigned long data; + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + res += find_zero(data) + 1 - align; + if (res > count) + res = count + 1; + return res; + } + res += sizeof(unsigned long); + if (unlikely(max <= sizeof(unsigned long))) + break; + max -= sizeof(unsigned long); + c = *(unsigned long __user *) (src + res); + } + res -= align; + + /* + * Uhhuh. We hit 'max'. But was that the user-specified maximum + * too? If so, return the marker for "too long". + */ + if (likely(res >= count)) + return count+1; + + /* + * Nope: we hit the address space limit, and we still had more + * characters the caller would have wanted. That's 0. + */ + return 0; +} + +/** + * strnlen_user: - Get the size of a user string INCLUDING final NUL. + * @str: The string to measure. + * @count: Maximum count (including NUL character) + * + * Context: User context only. This function may sleep. + * + * Get the size of a NUL-terminated string in user space. + * + * Returns the size of the string INCLUDING the terminating NUL. + * If the string is too long, returns 'count+1'. + * On exception (or invalid count), returns 0. + */ +long strnlen_user(const char __user *str, long count) +{ + unsigned long max_addr = user_addr_max(), src_addr; + long res; + + /* We cannot accept count = -1 since than (count + 1) wuold overflow. */ + if (unlikely(count < 0)) + return 0; + + TRY_USR_PFAULT { + src_addr = (unsigned long) str; + if (likely(src_addr < max_addr)) { + unsigned long max = max_addr - src_addr; + res = do_strnlen_user(str, count, max); + } else { + res = 0; + } + } CATCH_USR_PFAULT { + return 0; + } END_USR_PFAULT + + return res; +} +EXPORT_SYMBOL(strnlen_user); + +noinline unsigned long raw_copy_from_user(void *_to, + const void __user *_from, unsigned long _size) +{ + int hwbug = cpu_has(CPU_HWBUG_UNALIGNED_LOADS); + const void __user *volatile from = _from; + void __user *volatile to = _to; + volatile unsigned long size = _size; + volatile unsigned long n = _size; + + TRY_USR_PFAULT { + unsigned long head, tail, head1, head3, head7, + tail8, tail12, tail14; + void *dst = to, *orig_dst = to; + const void *src = from; + u64 tmp8; + u32 tmp4; + u16 tmp2; + u8 tmp1; + + if (hwbug) { + prefetch_nospec_range((void *) src, size); + E2K_WAIT(_ld_c); + } + + if (unlikely(n < 16)) + goto copy_tail; + + /* Copy the head */ + + head = 16 - ((unsigned long) dst & 0xfUL); + + head1 = (unsigned long) dst & 1; /* dst & 1 == head & 1 */ + head3 = head & 3; + head7 = head & 7; + + if (head & 1) + tmp1 = *(u8 *) src; + if (head & 2) + tmp2 = *(u16 *) (src + head1); + if (head & 4) + tmp4 = *(u32 *) (src + head3); + if (head & 8) + tmp8 = *(u64 *) (src + head7); + + /* Make sure "n" is changed *after* the actual + * user accesses have been issued */ + E2K_CMD_SEPARATOR; + + src += head & 0xf; + dst = PTR_ALIGN(dst, 16); + n -= head & 0xf; + + do { + size_t length = (n >= 2 * 8192) ? 8192 : (n & ~0xfUL); + size_t copied = 0; + + if (likely(length)) { + SET_USR_PFAULT("$.recovery_memcpy_fault"); + copied = fast_tagged_memory_copy((void *)dst, + (void *)src, length, + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + !hwbug); + RESTORE_USR_PFAULT; + } + + n -= copied; + + src += copied; + dst += copied; + + /* The first pagefault could have happened on prefetch, + * so we might end up doing a 1-byte-at-a-time copy of + * the whole area. But this is an extremely unlikely + * case, so we do not care. */ + if (unlikely(copied != length)) { + /* 'dst' cannot fault, so we can delay stores + * without worrying about possible page faults. */ + if (head & 1) + *(u8 *) orig_dst = tmp1; + if (head & 2) + *(u16 *) (orig_dst + head1) = tmp2; + if (head & 4) + *(u32 *) (orig_dst + head3) = tmp4; + if (head & 8) + *(u64 *) (orig_dst + head7) = tmp8; + + goto fallback; + } + } while (unlikely(n >= 16)); + + /* 'dst' cannot fault, so we can delay stores + * without worrying about possible page faults. */ + if (head & 1) + *(u8 *) orig_dst = tmp1; + if (head & 2) + *(u16 *) (orig_dst + head1) = tmp2; + if (head & 4) + *(u32 *) (orig_dst + head3) = tmp4; + if (head & 8) + *(u64 *) (orig_dst + head7) = tmp8; + +copy_tail: + /* Copy the tail */ + tail = n; + + BUG_ON((u64) tail >= 16); + + tail8 = tail & 8; + tail12 = tail & 12; + tail14 = tail & 14; + + if (tail & 8) + tmp8 = *(u64 *) src; + if (tail & 4) + tmp4 = *(u32 *) (src + tail8); + if (tail & 2) + tmp2 = *(u16 *) (src + tail12); + if (tail & 1) + tmp1 = *(u8 *) (src + tail14); + + if (tail & 8) + *(u64 *) dst = tmp8; + if (tail & 4) + *(u32 *) (dst + tail8) = tmp4; + if (tail & 2) + *(u16 *) (dst + tail12) = tmp2; + if (tail & 1) + *(u8 *) (dst + tail14) = tmp1; + + /* Make sure "n" is changed *after* the actual + * user accesses have been issued */ + E2K_CMD_SEPARATOR; + n = 0; +fallback:; + } CATCH_USR_PFAULT { + } END_USR_PFAULT + + if (n) { + const char *from_c = from; + char *to_c = to; + int i; + + TRY_USR_PFAULT { + for (i = size - n; i < size; i++) { + WRITE_ONCE(to_c[i], READ_ONCE(from_c[i])); + E2K_CMD_SEPARATOR; + --n; + } + } CATCH_USR_PFAULT { + return n; + } END_USR_PFAULT + + BUG_ON(n); + } + + return 0; +} +EXPORT_SYMBOL(raw_copy_from_user); + +noinline unsigned long raw_copy_in_user(void __user *_to, + const void __user *_from, unsigned long _size) +{ + int hwbug = cpu_has(CPU_HWBUG_UNALIGNED_LOADS); + const void __user *volatile from = _from; + void __user *volatile to = _to; + volatile unsigned long size = _size; + volatile unsigned long n = _size; + + TRY_USR_PFAULT { + unsigned long head, tail, head1, head3, head7, + tail8, tail12, tail14; + void *dst = to; + const void *src = from; + u64 tmp8; + u32 tmp4; + u16 tmp2; + u8 tmp1; + + if (hwbug) { + prefetch_nospec_range((void *) src, size); + E2K_WAIT(_ld_c); + } + + if (unlikely(n < 16)) + goto copy_tail; + + /* Copy the head */ + + head = 16 - ((unsigned long) dst & 0xfUL); + + head1 = (unsigned long) dst & 1; /* dst & 1 == head & 1 */ + head3 = head & 3; + head7 = head & 7; + + if (head & 1) + tmp1 = *(u8 *) src; + if (head & 2) + tmp2 = *(u16 *) (src + head1); + if (head & 4) + tmp4 = *(u32 *) (src + head3); + if (head & 8) + tmp8 = *(u64 *) (src + head7); + + if (head & 1) + *(u8 *) dst = tmp1; + if (head & 2) + *(u16 *) (dst + head1) = tmp2; + if (head & 4) + *(u32 *) (dst + head3) = tmp4; + if (head & 8) + *(u64 *) (dst + head7) = tmp8; + + /* Make sure "n" is changed *after* the actual + * user accesses have been issued */ + E2K_CMD_SEPARATOR; + + src += head & 0xf; + dst = PTR_ALIGN(dst, 16); + n -= head & 0xf; + + do { + size_t length = (n >= 2 * 8192) ? 8192 : (n & ~0xfUL); + size_t copied = 0; + + if (likely(length)) { + SET_USR_PFAULT("$.recovery_memcpy_fault"); + copied = fast_tagged_memory_copy((void *)dst, + (void *)src, length, + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + !hwbug); + RESTORE_USR_PFAULT; + } + + n -= copied; + + src += copied; + dst += copied; + + if (unlikely(copied != length)) + goto fallback; + } while (unlikely(n >= 16)); + +copy_tail: + /* Copy the tail */ + tail = n; + + BUG_ON((u64) tail >= 16); + + tail8 = tail & 8; + tail12 = tail & 12; + tail14 = tail & 14; + + if (tail & 8) + tmp8 = *(u64 *) src; + if (tail & 4) + tmp4 = *(u32 *) (src + tail8); + if (tail & 2) + tmp2 = *(u16 *) (src + tail12); + if (tail & 1) + tmp1 = *(u8 *) (src + tail14); + + if (tail & 8) + *(u64 *) dst = tmp8; + if (tail & 4) + *(u32 *) (dst + tail8) = tmp4; + if (tail & 2) + *(u16 *) (dst + tail12) = tmp2; + if (tail & 1) + *(u8 *) (dst + tail14) = tmp1; + + /* Make sure "n" is changed *after* the actual + * user accesses have been issued */ + E2K_CMD_SEPARATOR; + n = 0; +fallback:; + } CATCH_USR_PFAULT { + } END_USR_PFAULT + + if (n) { + const char *from_c = from; + char *to_c = to; + int i; + + TRY_USR_PFAULT { + for (i = size - n; i < size; i++) { + WRITE_ONCE(to_c[i], READ_ONCE(from_c[i])); + E2K_CMD_SEPARATOR; + --n; + } + } CATCH_USR_PFAULT { + return n; + } END_USR_PFAULT + + BUG_ON(n); + } + + return 0; +} +EXPORT_SYMBOL(raw_copy_in_user); + + +/* + * All arguments must be aligned + */ +unsigned long __copy_user_with_tags(void *to, const void *from, + unsigned long _n) +{ + int hwbug = cpu_has(CPU_HWBUG_UNALIGNED_LOADS); + void *volatile dst = to; + const void *volatile src = from; + volatile unsigned long n = _n; + + if (unlikely(((long) to & 0x7) || ((long) from & 0x7) || (_n & 0x7))) { + DebugUA(" copy_user_with_tags to=%px from=%px n=%ld\n", + to, from, n); + return _n; + } + + TRY_USR_PFAULT { + if (hwbug) { + prefetch_nospec_range((void *) from, n); + E2K_WAIT(_ld_c); + } + + do { + size_t length = (n >= 2 * 8192) ? 8192 : (n & ~0x7UL); + size_t copied; + + SET_USR_PFAULT("$.recovery_memcpy_fault"); + copied = fast_tagged_memory_copy(dst, src, length, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + !hwbug); + RESTORE_USR_PFAULT; + + n -= copied; + + src += copied; + dst += copied; + + if (unlikely(copied != length)) + break; + } while (unlikely(n >= 8)); + } CATCH_USR_PFAULT { + } END_USR_PFAULT + + return n; +} + +/* + * All arguments must be aligned + */ +unsigned long __fill_user_with_tags(void *to, unsigned long n, + unsigned long tag, unsigned long dw) +{ + unsigned long cleared; + + if (unlikely(((long) to & 0x7) || (n & 0x7))) { + DebugUA(" clear_user_with_tags to=%px n=%ld\n", to, n); + return n; + } + + SET_USR_PFAULT("$.recovery_memset_fault"); + cleared = fast_tagged_memory_set(to, dw, tag, n, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT); + RESTORE_USR_PFAULT; + + return n - cleared; +} diff --git a/arch/e2k/mm/Makefile b/arch/e2k/mm/Makefile new file mode 100644 index 000000000000..99720d84122d --- /dev/null +++ b/arch/e2k/mm/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the linux E2K-specific parts of the memory manager. +# + +subdir-ccflags-y := -Werror -Wswitch -Wenum-compare + +obj-y := init.o fault.o mmap.o mmu.o memory.o ioremap.o pageattr.o \ + pgtable.o + +obj-$(CONFIG_SOFTWARE_SWAP_TAGS) += tag_mem.o +obj-$(CONFIG_SWAP) += page_io.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +obj-$(CONFIG_NUMA) += node_vmap.o diff --git a/arch/e2k/mm/fault.c b/arch/e2k/mm/fault.c new file mode 100644 index 000000000000..b097dacaf1d7 --- /dev/null +++ b/arch/e2k/mm/fault.c @@ -0,0 +1,5092 @@ +/* + * + * Copyright (C) 2001 MCST + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SOFTWARE_SWAP_TAGS +#include +#endif +#ifdef CONFIG_PROTECTED_MODE +#include +#endif /* CONFIG_PROTECTED_MODE */ + +#ifdef CONFIG_MCST_RT +#include +#endif + +#include + + +/**************************** DEBUG DEFINES *****************************/ + +#define fault_dbg 0 +#define DEBUG_TRAP_CELLAR fault_dbg /* DEBUG_TRAP_CELLAR */ +#define DbgTC(...) DebugPrint(DEBUG_TRAP_CELLAR, ##__VA_ARGS__) +#define DEBUG_STATE_TC DEBUG_TRAP_CELLAR /* DEBUG_TRAP_CELLAR */ +#define PrintTC(a, b) \ + if(DEBUG_STATE_TC || DEBUG_CLW_FAULT) print_tc_state(a, b); + +#undef DEBUG_HS_MODE +#undef DebugHS +#define DEBUG_HS_MODE 0 /* Expand Hard Stack */ +#define DebugHS(...) DebugPrint(DEBUG_HS_MODE, ##__VA_ARGS__) + +#undef DEBUG_CS_MODE +#undef DebugCS +#define DEBUG_CS_MODE 0 /* Constrict Hard Stack */ +#define DebugCS(...) DebugPrint(DEBUG_CS_MODE, ##__VA_ARGS__) + +#undef DEBUG_US_EXPAND +#undef DebugUS +#define DEBUG_US_EXPAND 0 /* User stacks */ +#define DebugUS(...) DebugPrint(DEBUG_US_EXPAND, ##__VA_ARGS__) + +#undef DEBUG_VMA_MODE +#undef DebugVMA +#define DEBUG_VMA_MODE 0 /* Hard Stack Clone and Alloc */ +#define DebugVMA(...) DebugPrint(DEBUG_VMA_MODE, ##__VA_ARGS__) + +#undef DEBUG_USER_PTE_MODE +#undef DebugUPTE +#define DEBUG_USER_PTE_MODE 0 +#define DebugUPTE(...) DebugPrint(DEBUG_USER_PTE_MODE, ##__VA_ARGS__) + +#define DEBUG_PF_MODE fault_dbg /* Page fault */ +#define DebugPF(...) DebugPrint(DEBUG_PF_MODE, ##__VA_ARGS__) + +#define DEBUG_NAO_MODE 0 /* Not aligned operation */ +#define DebugNAO(...) DebugPrint(DEBUG_NAO_MODE, ##__VA_ARGS__) + +#define DEBUG_EXEC_MMU_OP 0 +#define DbgEXMMU(...) DebugPrint(DEBUG_EXEC_MMU_OP, ##__VA_ARGS__) + +#undef DEBUG_PGD_MODE +#undef DebugPGD +#define DEBUG_PGD_MODE 0 /* CPU PGD populate */ +#define DebugPGD(...) DebugPrint(DEBUG_PGD_MODE, ##__VA_ARGS__) + +#undef DEBUG_UF_MODE +#undef DebugUF +#define DEBUG_UF_MODE 0 /* VMA flags update */ +#define DebugUF(...) DebugPrint(DEBUG_UF_MODE, ##__VA_ARGS__) + +#undef DEBUG_CLW_FAULT +#undef DebugCLW +#define DEBUG_CLW_FAULT 0 +#define DebugCLW(...) DebugPrint(DEBUG_CLW_FAULT, ##__VA_ARGS__) + +#undef DEBUG_SRP_FAULT +#undef DebugSRP +#define DEBUG_SRP_FAULT 0 +#define DebugSRP(...) DebugPrint(DEBUG_SRP_FAULT, ##__VA_ARGS__) + +#undef DEBUG_SPRs_MODE +#define DEBUG_SPRs_MODE 0 /* stack pointers registers */ + +#undef DEBUG_RPR +#undef DebugRPR +#define DEBUG_RPR 0 /* Recovery point register */ +#define DebugRPR(...) DebugPrint(DEBUG_RPR, ##__VA_ARGS__) + +#undef DEBUG_RG_UPDATE +#undef DebugRG +#define DEBUG_RG_UPDATE 0 +#define DebugRG(...) DebugPrint(DEBUG_RG_UPDATE, ##__VA_ARGS__) + +#undef DEBUG_MULTI_THREAD_PM +#undef DebugMT_PM +#define DEBUG_MULTI_THREAD_PM 0 +#define DebugMT_PM(...) DebugPrint(DEBUG_MULTI_THREAD_PM, ##__VA_ARGS__) + +#undef DEBUG_KVM_PAGE_FAULT_MODE +#undef DebugKVMPF +#define DEBUG_KVM_PAGE_FAULT_MODE 0 /* KVM page fault debugging */ +#define DebugKVMPF(fmt, args...) \ +({ \ + if (DEBUG_KVM_PAGE_FAULT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Print pt_regs + */ +#define DEBUG_PtR_MODE 0 /* Print pt_regs */ +#define DebugPtR(pt_regs) \ + do { if (DEBUG_PtR_MODE) print_pt_regs(pt_regs); } while (0) + +/**************************** END of DEBUG DEFINES ***********************/ + +/************************* PAGE FAULT DEBUG for users ********************/ + +int debug_semi_spec = 0; + +static int __init semi_spec_setup(char *str) +{ + debug_semi_spec = 1; + return 1; +} + +__setup("debug_semi_spec", semi_spec_setup); + +int debug_pagefault = 0; + +static int __init pagefault_setup(char *str) +{ + debug_pagefault = 1; + return 1; +} + +__setup("debug_pagefault", pagefault_setup); + +/* abridged version */ +static int __init pagef_setup(char *str) +{ + debug_pagefault = 1; + return 1; +} +__setup("debug_pagef", pagef_setup); + +typedef union pf_mode { + struct { + u32 write : 1; + u32 spec : 1; + u32 user : 1; + u32 root : 1; + u32 empty : 1; + }; + u32 word; +} pf_mode_t; + +static int pf_force_sig_info(int si_signo, int si_code, unsigned long address, + struct pt_regs *regs); + +int show_unhandled_signals = 0; + +#undef GET_IP +#define GET_IP GET_IP_CR0_HI((regs)->crs.cr0_hi) +#define PFDBGPRINT(fmt, ...) \ +do { \ + if (debug_pagefault || DEBUG_PF_MODE) { \ + pr_notice("PAGE FAULT. " fmt ": IP=%px %s(pid=%d)\n" \ + ,##__VA_ARGS__, (void *) GET_IP, \ + current->comm, current->pid); \ + } \ +} while (0) + +/********************* END of PAGE FAULT DEBUG for users *****************/ + +int do_update_vm_area_flags(e2k_addr_t start, e2k_size_t len, + vm_flags_t flags_to_set, vm_flags_t flags_to_clear) +{ + unsigned long nstart, end, tmp; + struct vm_area_struct *vma, *next; + int error = 0; + + BUG_ON(flags_to_set & flags_to_clear); + + len = PAGE_ALIGN(len); + end = start + len; + if (end < start) + return -EINVAL; + if (end == start) + return 0; + vma = find_vma(current->mm, start); + if (vma == NULL) { + printk(KERN_ERR "Could not find VMA structure of user " + "virtual memory area: addr 0x%lx\n", + start); + BUG(); + } + if (vma->vm_start > start) { + printk(KERN_ERR "Invalid VMA structure start address of user " + "virtual memory area: addr 0x%lx (should be 0x%lx)\n", + vma->vm_start, start); + print_mmap(current); + BUG(); + } + if (vma->vm_start < start) { + DebugVMA("splitting vma at (0x%lx, 0x%lx) at 0x%lx\n", + vma->vm_start, vma->vm_end, start); + if (split_vma(current->mm, vma, start, 1)) + return -ENOMEM; + } + if (vma->vm_end > end) { + DebugVMA("splitting vma at " + "(0x%lx, 0x%lx) at 0x%lx\n", + vma->vm_start, vma->vm_end, end); + if (split_vma(current->mm, vma, end, 0)) + return -ENOMEM; + } + + + for (nstart = start ; ; ) { + unsigned long newflags; + + /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ + + newflags = vma->vm_flags; + newflags |= flags_to_set; + newflags &= ~flags_to_clear; + + if (vma->vm_end >= end) { + if (vma->vm_end > end) { + DebugVMA("splitting vma at (0x%lx, 0x%lx) at 0x%lx\n", + vma->vm_start, vma->vm_end, end); + if (split_vma(current->mm, vma, end, 0)) + return -ENOMEM; + } + /* + * vm_flags and vm_page_prot are protected by + * the mmap_sem held in write mode. + */ + vma->vm_flags = newflags; + break; + } + + tmp = vma->vm_end; + next = vma->vm_next; + /* + * vm_flags and vm_page_prot are protected by + * the mmap_sem held in write mode. + */ + vma->vm_flags = newflags; + nstart = tmp; + vma = next; + if (vma == NULL) { + pr_err("Could not find VMA structure of user virtual memory area: addr 0x%lx\n", + nstart); + BUG(); + } + if (vma->vm_start != nstart) { + pr_err("Invalid VMA structure start address of user virtual memory area: addr 0x%lx (should be 0x%lx)\n", + vma->vm_start, nstart); + BUG(); + } + } + return error; +} + +e2k_addr_t +user_address_to_pva(struct task_struct *tsk, e2k_addr_t address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + e2k_addr_t offset; + e2k_addr_t ret; + struct vm_area_struct *vma; + bool already_locked = false; + + ret = check_is_user_address(tsk, address); + if (ret != 0) + return ret; + + if (unlikely(IS_GUEST_USER_ADDRESS_TO_PVA(tsk, address))) { + return guest_user_address_to_pva(tsk, address); + } + + if (!down_read_trylock(&tsk->mm->mmap_sem)) + already_locked = true; + + vma = find_vma(tsk->mm, address); + if (vma == NULL) { + pr_err("Could not find VMA structure of user " + "virtual memory area: addr 0x%lx\n", + address); + goto out; + } + + pgd = pgd_offset(vma->vm_mm, address); + if (pgd_none(*pgd) || pgd_bad(*pgd)) { + pr_err("PGD 0x%px = 0x%lx none or bad for address 0x%lx\n", + pgd, pgd_val(*pgd), address); + goto out; + } + + pud = pud_offset(pgd, address); + if (pud_none(*pud) || pud_bad(*pud)) { + pr_err("PUD 0x%px = 0x%lx none or bad for address 0x%lx\n", + pud, pud_val(*pud), address); + goto out; + } + + pmd = pmd_offset(pud, address); + if (pmd_huge(*pmd)) { + pte = (pte_t *) pmd; + offset = address & (get_pmd_level_page_size() - 1); + } else { + if (pmd_none(*pmd) || pmd_bad(*pmd)) { + pr_err("PMD 0x%px = 0x%016lx none or bad for address 0x%016lx\n", + pmd, pmd_val(*pmd), address); + goto out; + } + pte = pte_offset_map(pmd, address); + offset = address & (get_pte_level_page_size() - 1); + } + + if (pte_none(*pte)) { + pr_err("PTE 0x%px = 0x%016lx none for address 0x%016lx\n", + pte, pte_val(*pte), address); + goto out; + } + + if (!already_locked) + up_read(&tsk->mm->mmap_sem); + return (e2k_addr_t)__va((pte_pfn(*pte) << PAGE_SHIFT) | offset); + +out: + if (!already_locked) + up_read(&tsk->mm->mmap_sem); + return -1; +} + +pte_t *get_user_address_pte(struct vm_area_struct *vma, e2k_addr_t address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (address < vma->vm_start || address >= vma->vm_end) { + DebugUPTE("User address 0x%lx is not from VMA start 0x%lx " + "end 0x%lx\n", + address, vma->vm_start, vma->vm_end); + return NULL; + } + pgd = pgd_offset(vma->vm_mm, address); + if (pgd_none(*pgd) && pgd_valid(*pgd)) { + DebugUPTE("PGD 0x%px = 0x%lx only valid for address 0x%lx\n", + pgd, pgd_val(*pgd), address); + return (pte_t *)pgd; + } + if (pgd_none(*pgd) || pgd_bad(*pgd)) { + DebugUPTE("PGD 0x%px = 0x%lx none or bad for address 0x%lx\n", + pgd, pgd_val(*pgd), address); + return NULL; + } + pud = pud_offset(pgd, address); + if (pud_huge(*pud)) + return (pte_t *) pud; + if (pud_none(*pud) && pud_valid(*pud)) { + DebugUPTE("PUD 0x%px = 0x%lx only valid for address 0x%lx\n", + pud, pud_val(*pud), address); + return (pte_t *)pud; + } + if (pud_none(*pud) || pud_bad(*pud)) { + DebugUPTE("PUD 0x%px = 0x%lx none or bad for address 0x%lx\n", + pud, pud_val(*pud), address); + return NULL; + } + pmd = pmd_offset(pud, address); + if (pmd_huge(*pmd)) + return (pte_t *) pmd; + if (pmd_none(*pmd) && pmd_valid(*pmd)) { + DebugUPTE("PMD 0x%px = 0x%016lx only valid for address " + "0x%016lx\n", + pmd, pmd_val(*pmd), address); + return (pte_t *)pmd; + } + if (pmd_none(*pmd) || pmd_bad(*pmd)) { + DebugUPTE("PMD 0x%px = 0x%016lx none or bad for address " + "0x%016lx\n", + pmd, pmd_val(*pmd), address); + return NULL; + } + /* pte */ + pte = pte_offset_map(pmd, address); + if (pte_none(*pte)) { + DebugUPTE("PTE 0x%px = 0x%016lx none for address 0x%016lx\n", + pte, pte_val(*pte), address); + } + return pte; +} + +/* + * Convrert kernel virtual address to physical + * (convertion based on page table lookup) + */ +e2k_addr_t +kernel_address_to_pva(e2k_addr_t address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + e2k_size_t page_size; + + if (address < TASK_SIZE) { + pr_alert("Address 0x%016lx is not kernel address " + "to get PFN's\n", + address); + return -1; + } + if (unlikely(IS_GUEST_ADDRESS_TO_HOST(address))) { + if (address >= KERNEL_BASE && address <= KERNEL_END) { + return kernel_va_to_pa(address); + } else { + pr_alert("Address 0x%016lx is host kernel address\n", + address); + return -1; + } + } + + pgd = pgd_offset_k(address); + if (pgd_none_or_clear_bad(pgd)) { + pr_alert("PGD 0x%px = 0x%016lx none or bad for address 0x%016lx\n", + pgd, pgd_val(*pgd), address); + return -1; + } + if (kernel_pgd_huge(*pgd)) { + pte = (pte_t *)pgd; + page_size = get_pgd_level_page_size(); + goto huge_pte; + } + + /* pud */ + pud = pud_offset(pgd, address); + if (kernel_pud_huge(*pud)) { + pte = (pte_t *)pud; + page_size = get_pud_level_page_size(); + goto huge_pte; + } + if (pud_none_or_clear_bad(pud)) { + pr_alert("PUD 0x%px = 0x%016lx none or bad for address 0x%016lx\n", + pud, pud_val(*pud), address); + return -1; + } + + /* pmd */ + pmd = pmd_offset(pud, address); + if (kernel_pmd_huge(*pmd)) { + pte = (pte_t *)pmd; + page_size = get_pmd_level_page_size(); + goto huge_pte; + } + if (pmd_none_or_clear_bad(pmd)) { + pr_alert("PMD 0x%px = 0x%016lx none or bad for address 0x%016lx\n", + pmd, pmd_val(*pmd), address); + return -1; + } + + /* pte */ + pte = pte_offset_kernel(pmd, address); + page_size = get_pte_level_page_size(); +huge_pte: + if (pte_none(*pte)) { + pr_alert("PTE 0x%px:0x%016lx none for address 0x%016lx\n", + pte, pte_val(*pte), address); + return -1; + } + if (!pte_present(*pte)) { + pr_alert("PTE 0x%px = 0x%016lx is pte of swaped page " + "for address 0x%016lx\n", + pte, pte_val(*pte), address); + return -1; + } + return (e2k_addr_t)__va((pte_pfn(*pte) << PAGE_SHIFT) | + (address & (page_size - 1))); +} + +unsigned long node_kernel_address_to_phys(int node, e2k_addr_t addr) +{ + e2k_addr_t phys_addr; + e2k_size_t page_size; + pgd_t *pgd = node_pgd_offset_kernel(node, addr); + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (unlikely(pgd_none_or_clear_bad(pgd))) { + pr_alert("node_kernel_address_to_phys(): pgd_none\n"); + return -EINVAL; + } + if (kernel_pgd_huge(*pgd)) { + pte = (pte_t *)pgd; + page_size = get_pgd_level_page_size(); + goto huge_pte; + } + pud = pud_offset(pgd, addr); + if (kernel_pud_huge(*pud)) { + pte = (pte_t *)pud; + page_size = get_pud_level_page_size(); + goto huge_pte; + } + if (unlikely(pud_none_or_clear_bad(pud))) { + pr_alert("node_kernel_address_to_phys(): pud_none\n"); + return -EINVAL; + } + pmd = pmd_offset(pud, addr); + if (kernel_pmd_huge(*pmd)) { + pte = (pte_t *)pmd; + page_size = get_pmd_level_page_size(); + goto huge_pte; + } + if (unlikely(pmd_none_or_clear_bad(pmd))) { + pr_alert("node_kernel_address_to_phys(): pmd_none\n"); + return -EINVAL; + } + + pte = pte_offset_kernel(pmd, addr); + page_size = get_pte_level_page_size(); + +huge_pte: + if (unlikely(pte_none(*pte) || !pte_present(*pte))) { + pr_alert("node_kernel_address_to_phys(): pte_none\n"); + return -EINVAL; + } + + phys_addr = _PAGE_PFN_TO_PADDR(pte_val(*pte)) + + (addr & (page_size - 1)); + + return phys_addr; +} + +static const char *get_memory_type_string(pte_t pte) +{ + char *memory_types_v6[8] = { "General Cacheable", + "General nonCacheable", "Reserved-2", "Reserved-3", + "External Prefetchable", "Reserved-5", + "External nonPrefetchable", "External Configuration" }; + + if (MMU_IS_PT_V6()) + return memory_types_v6[_PAGE_MT_GET_VAL(pte_val(pte))]; + + if (!(pte_val(pte) & ~_PAGE_VALID_V2)) + return ""; + + if ((pte_val(pte) & _PAGE_CD_MASK_V2) != _PAGE_CD_MASK_V2) + return "cacheable"; + + if ((pte_val(pte) & _PAGE_PWT_V2)) + return "uncacheable"; + else + return "write_combine"; +} + +e2k_addr_t print_address_ptes(pgd_t *pgdp, e2k_addr_t address, int kernel) +{ + pgd_t pgd = *pgdp; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + e2k_addr_t pa = 0; + e2k_size_t page_size; + + if (kernel && kernel_pgd_huge(pgd)) { + pte = (pte_t *)pgdp; + page_size = get_pgd_level_page_size(); + pr_alert("%s PGD 0x%px = 0x%016lx is PTE of huge page\n", + (kernel) ? "kernel" : "user", pgdp, pgd_val(pgd)); + goto huge_pte; + } + if (pgd_none(pgd) && (kernel || !pgd_valid(pgd)) || pgd_bad(pgd)) { + pr_alert("%s PGD 0x%px = 0x%016lx none or bad " + "for address 0x%016lx\n", + (kernel) ? "kernel" : "user", pgdp, pgd_val(pgd), + address); + return pa; + } + pr_alert("%s PGD 0x%px = 0x%016lx valid for address 0x%016lx\n", + (kernel) ? "kernel" : "user", pgdp, pgd_val(pgd), + address); + if (pgd_none(pgd)) + return pa; + + /* pud */ + pud = pud_offset(pgdp, address); + if (kernel && kernel_pud_huge(*pud)) { + pte = (pte_t *)pud; + page_size = get_pud_level_page_size(); + pr_alert("PUD 0x%px = 0x%016lx is PTE of huge page\n", + pud, pud_val(*pud)); + goto huge_pte; + } + if (pud_none(*pud) && (kernel || !pud_valid(*pud)) || pud_bad(*pud)) { + pr_alert("PUD 0x%px = 0x%016lx none or bad " + "for address 0x%016lx\n", + pud, pud_val(*pud), address); + return pa; + } + pr_alert("PUD 0x%px = 0x%016lx valid for address 0x%016lx\n", + pud, pud_val(*pud), address); + if (pud_none(*pud)) + return pa; + + /* pmd */ + pmd = pmd_offset(pud, address); + if (kernel && kernel_pmd_huge(*pmd) || !kernel && pmd_huge(*pmd)) { + pte = (pte_t *)pmd; + page_size = get_pmd_level_page_size(); + pr_alert("PMD 0x%px = 0x%016lx is PTE of huge page\n", + pmd, pmd_val(*pmd)); + goto huge_pte; + } + if (pmd_none(*pmd) && (kernel || !pmd_valid(*pmd)) || pmd_bad(*pmd)) { + pr_alert("PMD 0x%px = 0x%016lx none or bad for " + "address 0x%016lx\n", + pmd, pmd_val(*pmd), address); + return pa; + } + pr_alert("PMD 0x%px = 0x%016lx valid for address 0x%016lx\n", + pmd, pmd_val(*pmd), address); + if (pmd_none(*pmd)) + return pa; + + /* pte */ + pte = (kernel) ? + pte_offset_kernel(pmd, address) : pte_offset_map(pmd, address); + page_size = get_pte_level_page_size(); + +huge_pte: + if (pte_none(*pte)) { + pr_alert("PTE 0x%px = 0x%016lx none for address 0x%016lx\n", + pte, pte_val(*pte), address); + } else if (!pte_present(*pte)) { + pr_alert("PTE 0x%px = 0x%016lx is pte of not present page for address 0x%016lx\n", + pte, pte_val(*pte), address); + } else { + pr_alert("PTE 0x%px = 0x%016lx %s & present for address 0x%016lx %s\n", + pte, pte_val(*pte), + pte_valid(*pte) ? "valid" : "not valid", + address, get_memory_type_string(*pte)); + + pa = _PAGE_PFN_TO_PADDR(pte_val(*pte)) + + (address & (page_size - 1)); + } + + return pa; +} + + +void print_vma_and_ptes(struct vm_area_struct *vma, e2k_addr_t address) +{ + pgd_t *pgdp; + + printk("VMA 0x%px : start 0x%016lx, end 0x%016lx, flags 0x%lx, " + "prot 0x%016lx\n", + vma, vma->vm_start, vma->vm_end, vma->vm_flags, + pgprot_val(vma->vm_page_prot)); + + pgdp = pgd_offset(vma->vm_mm, address); + print_address_ptes(pgdp, address, 0); +} + +static e2k_addr_t +__print_user_address_ptes(struct mm_struct *mm, e2k_addr_t address) +{ + pgd_t *pgdp; + e2k_addr_t pa = 0; + + if (mm) { + pgdp = pgd_offset(mm, address); + print_address_ptes(pgdp, address, 0); + + /* Only for guest: print ptes of guest user address on host. */ + /* Guest page table is pseudo PT and only host PT is used */ + /* to translate any guest addresses */ + print_host_user_address_ptes(mm, address); + } + return pa; +} + +e2k_addr_t print_user_address_ptes(struct mm_struct *mm, e2k_addr_t address) +{ + if (address >= TASK_SIZE) { + pr_info("Address 0x%016lx is not user address to print PTE's\n", + address); + return 0; + } + return __print_user_address_ptes(mm, address); +} + +e2k_addr_t print_kernel_address_ptes(e2k_addr_t address) +{ + pgd_t *pgdp; + e2k_addr_t pa = 0; + + if (address < TASK_SIZE) { + printk("Address 0x%016lx is not kernel address to print PTE's\n", + address); + return pa; + } + pgdp = pgd_offset_k(address); + pa = print_address_ptes(pgdp, address, 1); + return pa; +} + +void print_address_page_tables(unsigned long address, int last_level_only) +{ + struct mm_struct *mm = current->mm; + + if (address < TASK_SIZE) + print_user_address_ptes(mm, address); + else + print_kernel_address_ptes(address); + + if (last_level_only) + return; + + if (address < TASK_SIZE) { + print_user_address_ptes(mm, + pte_virt_offset(round_down(address, PTE_SIZE))); + print_user_address_ptes(mm, + pmd_virt_offset(round_down(address, PMD_SIZE))); + print_user_address_ptes(mm, + pud_virt_offset(round_down(address, PUD_SIZE))); + } else { + print_kernel_address_ptes( + pte_virt_offset(round_down(address, PTE_SIZE))); + print_kernel_address_ptes( + pmd_virt_offset(round_down(address, PMD_SIZE))); + print_kernel_address_ptes( + pud_virt_offset(round_down(address, PUD_SIZE))); + } +} + +int is_kernel_address_valid(e2k_addr_t address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + e2k_size_t page_size; + + if (address < TASK_SIZE) { + printk("Address 0x%016lx is not kernel address to get PFN's\n", + address); + return 0; + } + pgd = pgd_offset_k(address); + if (pgd_none_or_clear_bad(pgd)) { + printk("\n============================================================\n"); + printk("PGD 0x%px = 0x%016lx none or bad for address 0x%016lx\n", + pgd, pgd_val(*pgd), address); + return 0; + } + if (kernel_pgd_huge(*pgd)) { + pte = (pte_t *)pgd; + page_size = get_pgd_level_page_size(); + goto huge_pte; + } + + /* pud */ + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) { + printk("\n============================================================\n"); + printk("PUD 0x%px = 0x%016lx none or bad for address 0x%016lx\n", + pud, pud_val(*pud), address); + return 0; + } + if (kernel_pud_huge(*pud)) { + pte = (pte_t *)pud; + page_size = get_pud_level_page_size(); + goto huge_pte; + } + + /* pmd */ + pmd = pmd_offset(pud, address); + if (pmd_none_or_clear_bad(pmd)) { + printk("\n============================================================\n"); + printk("PMD 0x%px = 0x%016lx none or bad for address 0x%016lx\n", + pmd, pmd_val(*pmd), address); + return 0; + } + if (kernel_pmd_huge(*pmd)) { + pte = (pte_t *)pmd; + page_size = get_pmd_level_page_size(); + goto huge_pte; + } + + /* pte */ + pte = pte_offset_kernel(pmd, address); + page_size = get_pte_level_page_size(); + +huge_pte: + if (pte_none(*pte)) { + printk("\n============================================================\n"); + printk("PTE: 0x%px = 0x%016lx for address 0x%016lx\n", + pte, pte_val(*pte), address); + return 0; + } + if (!pte_present(*pte)) { + printk("\n============================================================\n"); + printk("PTE 0x%px = 0x%016lx is pte of swaped page for address 0x%016lx\n", + pte, pte_val(*pte), address); + return 0; + } + return 1; +} + +#if 0 +void print_vma_node_ptes(struct vm_area_struct *vma, e2k_addr_t address) +{ + print_vma_and_ptes(vma, address); + + if (MMU_IS_SEPARATE_PT()) + return; + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (THERE_IS_DUP_KERNEL) { + pgd_t *pgdp; + + pgdp = cpu_kernel_root_pt + pgd_index(address); + pr_info("CPU #%d kernel root page table:\n", + smp_processor_id()); + print_address_ptes(pgdp, address, 0); + } +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +} +#endif + +#ifdef CONFIG_NUMA +static void +print_kernel_address_all_nodes_ptes(e2k_addr_t address) +{ + pgd_t *pgdp; + int cpu; + int nid = numa_node_id(); + + pgdp = node_pgd_offset_kernel(nid, address); + pr_info("NODE #%d kernel root page table:\n", nid); + print_address_ptes(pgdp, address, 1); + + for_each_online_cpu(cpu) { + pgdp = the_cpu_pg_dir(cpu) + pgd_index(address); + pr_info("CPU #%d kernel root page table:\n", cpu); + print_address_ptes(pgdp, address, 1); + } +} +#else /* !CONFIG_NUMA */ +static void +print_kernel_address_all_nodes_ptes(e2k_addr_t address) +{ +} +#endif /* CONFIG_NUMA */ + +struct page *e2k_virt_to_page(const void *kaddrp) +{ + e2k_addr_t kaddr = (e2k_addr_t)kaddrp; + + if (kaddr >= PAGE_OFFSET && kaddr < PAGE_OFFSET + MAX_PM_SIZE) { + return phys_to_page(__pa(kaddrp)); + } + if (kaddr >= KERNEL_BASE && kaddr <= KERNEL_END) { + return phys_to_page(vpa_to_pa(kernel_va_to_pa(kaddrp))); + } + if (is_vmalloc_addr(kaddrp)) { + return vmalloc_to_page(kaddrp); + } + if (kaddr < TASK_SIZE) { + panic("%s(): address 0x%px is not kernel address\n", + __func__, kaddrp); + } + panic("%s(): address 0x%px is invalid kernel address\n", + __func__, kaddrp); +} + +EXPORT_SYMBOL(e2k_virt_to_page); + +#ifndef CONFIG_CLW_ENABLE +#define terminate_CLW_operation(regs) +#else +struct clw_clear_user_args { + void __user *uaddr; + unsigned long size; + struct mm_struct *mm; +}; + +static long clw_clear_user_worker(void *pargs) +{ + struct clw_clear_user_args *args = pargs; + unsigned long ret; + + use_mm(args->mm); + ret = clear_user_with_tags(args->uaddr, args->size, ETAGEWD); + unuse_mm(args->mm); + return ret; +} + +static unsigned long clw_clear_user(const struct pt_regs *regs, + void __user *uaddr, unsigned long size) +{ + struct clw_clear_user_args args = { + .uaddr = uaddr, + .size = size, + .mm = current->mm + }; + unsigned long ret; + + if (!cpu_has(CPU_HWBUG_CLW_STALE_L1_ENTRY)) + return clear_user_with_tags(uaddr, size, ETAGEWD); + + migrate_disable(); + if (likely(smp_processor_id() == regs->clw_cpu)) { + /* Fast path - we are already on needed cpu */ + ret = clear_user_with_tags(uaddr, size, ETAGEWD); + } else { + /* Slow path - let kworker do the work on proper cpu */ + ret = work_on_cpu(regs->clw_cpu, clw_clear_user_worker, &args); + } + migrate_enable(); + + return ret; +} + +static int terminate_CLW_operation(const struct pt_regs *regs) +{ + e2k_addr_t us_cl_up = regs->us_cl_up; + e2k_addr_t us_cl_b = regs->us_cl_b; + const clw_reg_t *us_cl_m = regs->us_cl_m; + unsigned long us_addr; + u64 bit_no, mask_word, mask_bit; + int bmask; + + DebugCLW("started for us_cl_up 0x%lx us_cl_b 0x%lx\n", + us_cl_up, us_cl_b); + for (bmask = 0; bmask < CLW_MASK_WORD_NUM; bmask++) + DebugCLW(" mask[%d] = 0x%016lx\n", bmask, us_cl_m[bmask]); + + if (us_cl_up <= us_cl_b) { + DebugCLW("nothing to clean\n"); + return 0; + } + + for (us_addr = us_cl_up; us_addr > us_cl_b && + (us_cl_up - us_addr) < CLW_BYTES_PER_MASK; + us_addr -= CLW_BYTES_PER_BIT) { + DebugCLW("current US address 0x%lx\n" + "check bit-mask #%lld word %lld bit in word %lld\n", + us_addr, bit_no, mask_word, mask_bit); + + bit_no = (us_addr / CLW_BYTES_PER_BIT) & 0xffUL; + mask_word = bit_no / (sizeof (*us_cl_m) * 8); + mask_bit = bit_no % (sizeof (*us_cl_m) * 8); + + if (!(us_cl_m[mask_word] & (1UL << mask_bit))) { + DebugCLW("clean stack area from 0x%lx to 0x%lx\n", + us_addr, us_addr + CLW_BYTES_PER_BIT); + if (clw_clear_user(regs, (void __user *) us_addr, + CLW_BYTES_PER_BIT)) + return -EFAULT; + } + } + if (us_addr <= us_cl_b) { + DebugCLW("nothing to clean outside of area covered by bit-mask\n"); + return 0; + } + + DebugCLW("clean stack area from 0x%lx to 0x%lx, 0x%lx bytes\n", + us_cl_b + CLW_BYTES_PER_BIT, + us_addr + CLW_BYTES_PER_BIT, us_addr - us_cl_b); + + if (clw_clear_user(regs, (void __user *) (us_cl_b + CLW_BYTES_PER_BIT), + us_addr - us_cl_b)) + return -EFAULT; + + return 0; +} +#endif /* CONFIG_CLW_ENABLE */ + + +/* + * To increment or decrease user data stack size we need to update + * data stack size in the USD register and in the chain registers + * (CR1_hi.ussz field) into all user pt_regs structures of the process + */ +static int fix_all_user_stack_pt_regs(pt_regs_t *regs, e2k_size_t delta_sp, + bool incr) +{ + unsigned long ts_flag; + int ret = 0, regs_num = 0; + struct pt_regs __user *u_regs; + e2k_usd_hi_t usd_hi; + e2k_cr1_hi_t cr1_hi; + + DebugUS("started with pt_regs 0x%px, delta sp 0x%lx, incr %d\n", + regs, delta_sp, incr); + BUG_ON(!regs); + + usd_hi = regs->stacks.usd_hi; + if (incr) + usd_hi.USD_hi_size += delta_sp; + else + usd_hi.USD_hi_size -= delta_sp; + regs->stacks.usd_hi = usd_hi; + + cr1_hi = regs->crs.cr1_hi; + if (incr) + AS(cr1_hi).ussz += (delta_sp >> 4); + else + AS(cr1_hi).ussz -= (delta_sp >> 4); + regs->crs.cr1_hi = cr1_hi; + + ++regs_num; + + /* + * All other user pt_regs (except current, i.e. thread_info->pt_regs) + * are located in current thread's signal stack in userspace. + */ + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + signal_pt_regs_for_each(u_regs) { + ret = __get_user(AW(usd_hi), &AW(u_regs->stacks.usd_hi)); + if (ret) + break; + + if (incr) + usd_hi.USD_hi_size += delta_sp; + else + usd_hi.USD_hi_size -= delta_sp; + + ret = __put_user(AW(usd_hi), &AW(u_regs->stacks.usd_hi)); + if (ret) + break; + + ret = __get_user(AW(cr1_hi), &AW(u_regs->crs.cr1_hi)); + if (ret) + break; + + if (incr) + AS(cr1_hi).ussz += (delta_sp >> 4); + else + AS(cr1_hi).ussz -= (delta_sp >> 4); + + ret = __put_user(AW(cr1_hi), &AW(u_regs->crs.cr1_hi)); + if (ret) + break; + + ++regs_num; + } + clear_ts_flag(ts_flag); + + DebugUS("%d pt_regs structures (USD & CR1_hi.ussz) were corrected to decrement user stack sizes\n", + regs_num); + + return ret; +} + + +struct update_chain_params { + unsigned long delta_sp; + stack_frame_t frames_type; + unsigned long prev_size; + unsigned long corrected_size; + unsigned long prev_frame_addr; + bool incr; +}; + +static int update_chain_stack_ussz(e2k_mem_crs_t *frame, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, int flags, void *arg) +{ + struct update_chain_params *params = arg; + const unsigned long delta_sp = params->delta_sp; + const stack_frame_t frames_type = params->frames_type; + const bool incr = params->incr; + stack_frame_t cur_frame; + e2k_cr1_hi_t cr1_hi; + int ret; + + cur_frame = get_stack_frame_type_IP(frame->cr0_hi, frame->cr1_lo, false); + if (frames_type != cur_frame) { + /* + * It is a kernel function in the user PC stack. The data stack + * of this function is out of this stack and places in separate + * user or kernel space for each process. + * Do not correct this chain register. + */ + goto next; + } + + cr1_hi = frame->cr1_hi; + + if (frames_type == user_frame_type) { + unsigned long next_size, hw_delta, real_delta; + int correction; + + params->corrected_size += 0x100000000L * + getsp_adj_get_correction(corrected_frame_addr); + + next_size = ((u32) AS(cr1_hi).ussz << 4UL) + + params->corrected_size; + if (incr) + next_size += delta_sp; + else + next_size -= delta_sp; + + hw_delta = (next_size & 0xffffffffUL) - + (params->prev_size & 0xffffffffUL); + real_delta = next_size - params->prev_size; + params->prev_size = next_size; + + WARN_ONCE((real_delta - hw_delta) & 0xffffffffUL, + "Bad data stack parameters"); + correction = (real_delta - hw_delta) >> 32UL; + + ret = getsp_adj_set_correction(correction, + corrected_frame_addr); + if (ret) + return ret; + + if (correction) { + e2k_cr1_lo_t prev_cr1_lo; + + if (WARN_ONCE(params->prev_frame_addr == -1UL, + "trying to apply stack correction to the last frame\n")) + return -ESRCH; + + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + ret = get_cr1_lo(&prev_cr1_lo, + params->prev_frame_addr, 0); + if (ret) + return ret; + AS(prev_cr1_lo).lw = 1; + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + ret = put_cr1_lo(prev_cr1_lo, + params->prev_frame_addr, 0); + if (ret) + return ret; + } + } + + + if (incr) + cr1_hi.CR1_hi_ussz += (delta_sp >> 4); + else + cr1_hi.CR1_hi_ussz -= (delta_sp >> 4); + + if (flags & PCF_FLUSH_NEEDED) + NATIVE_FLUSHC; + ret = put_cr1_hi(cr1_hi, real_frame_addr, 0); + if (ret) + return ret; + +next: + params->prev_frame_addr = real_frame_addr; + + return 0; +} + +static int fix_all_chain_stack_sz(e2k_size_t delta_sp, + stack_frame_t frames_type, bool incr) +{ + struct update_chain_params params; + long ret; + + DebugUS("started with PCSP stack base 0x%px, delta sp 0x%lx, incr %d\n", + CURRENT_PCS_BASE(), delta_sp, incr); + + if (frames_type != user_frame_type) + return -EINVAL; + + params.delta_sp = delta_sp; + params.frames_type = frames_type; + params.prev_size = 0; + params.corrected_size = 0; + params.prev_frame_addr = -1UL; + params.incr = incr; + + ret = parse_chain_stack(PCS_USER, NULL, update_chain_stack_ussz, ¶ms); + + return (IS_ERR_VALUE(ret)) ? ret : 0; +} + + +/** + * constrict_user_data_stack - handles user data stack underflow + * @regs: pointer to pt_regs + * @incr: value of decrement in bytes + */ +int constrict_user_data_stack(struct pt_regs *regs, unsigned long incr) +{ + thread_info_t *ti = current_thread_info(); + u64 sp, stack_size; + int ret; + + DebugUS("started\n"); + + calculate_e2k_dstack_parameters(®s->stacks, &sp, &stack_size, NULL); + + DebugUS("base 0x%llx, size 0x%llx, top 0x%lx, bottom 0x%lx, max current size 0x%lx\n", + sp, stack_size, ti->u_stack.top, ti->u_stack.bottom, + ti->u_stack.size); + + /* + * We coudn't detect all underflows, but let's try to do something... + */ + if (ti->u_stack.top < sp + incr) { + pr_info_ratelimited("constrict_user_data_stack(): user data stack underflow\n"); + return -ENOMEM; + } + + ret = fix_all_user_stack_pt_regs(regs, stack_size, false); + if (ret) + return ret; + + if (ret = fix_all_chain_stack_sz(stack_size, user_frame_type, false)) { + pr_info_ratelimited("constrict_user_data_stack(): could not correct user stack sizes in chain stack: ret %d\n", + ret); + return ret; + } + + return 0; +} + +/** + * expand_user_data_stack - handles user data stack overflow + * @regs: pointer to pt_regs + * @incr: value of increment in bytes + * + * On e2k stack handling differs from everyone else for two reasons: + * 1) All data stack memory must be allocated with 'getsp' prior to accessing; + * 2) Data stack overflows are controlled with special registers which hold + * stack boundaries. + * + * This means that guard page mechanism used for other architectures + * isn't needed on e2k: all overflows accounting is done by hardware. + * So we do not need the gap below the stack vma: if an attacker tries + * to allocate a lot of stack at once in the hope of jumping over the + * guard page, he will just run into out-of-stack exception. + * + * Returns 0 on success. + */ +int expand_user_data_stack(struct pt_regs *regs, unsigned long incr) +{ + thread_info_t *ti = current_thread_info(); + struct mm_struct *mm = current->mm; + u64 sp, new_bottom, stack_size, new_size; + struct vm_area_struct *vma, *v, *prev; + int ret; + + if (usd_cannot_be_expanded(regs)) { + pr_warning("process %s (%d) local data stack cannot be expanded (size fixed), stack top 0x%lx, bottom 0x%lx, current base 0x%llx, size 0x%x\n", + current->comm, current->pid, + ti->u_stack.top, ti->u_stack.bottom, + regs->stacks.usd_lo.USD_lo_base, + regs->stacks.usd_hi.USD_hi_size); + return -EINVAL; + } + + calculate_e2k_dstack_parameters(®s->stacks, &sp, &stack_size, NULL); + + DebugUS("base 0x%llx, size 0x%llx, top 0x%lx, bottom 0x%lx, max current size 0x%lx\n", + sp, stack_size, ti->u_stack.top, ti->u_stack.bottom, + ti->u_stack.size); + + /* + * It can be if signal handler uses alternative stack + * and an overflow of this stack occured. + * + * This check must not return false positive if all of + * stack space is used (i.e. top == bottom). + */ + if ((sp > ti->u_stack.top || sp < ti->u_stack.bottom) && + ti->u_stack.top != ti->u_stack.bottom) { + if (on_sig_stack(sp)) { + pr_info_ratelimited("expand_user_data_stack(): alt stack overflow\n"); + } else { + pr_info_ratelimited("expand_user_data_stack(): SP of user data stack 0x%llx points out of main user stack allocated from bottom 0x%lx to top 0x%lx\n", + sp, ti->u_stack.bottom, ti->u_stack.top); + } + return -ENOMEM; + } + + incr = min(incr, (rlimit(RLIMIT_STACK) & PAGE_MASK) - + (ti->u_stack.top - ti->u_stack.bottom)); + DebugUS("rlim 0x%lx, incr 0x%lx\n", rlimit(RLIMIT_STACK), incr); + if (!incr) + return -ENOMEM; + + new_bottom = sp - stack_size - incr; + new_size = sp - new_bottom; + + /* + * While not all cases of stack underflow could be detected, there could + * be cases, where new_size > MAX_USD_HI_SIZE. Kernel shouldn't be + * broken in this case. + */ + if (new_size > MAX_USD_HI_SIZE) { + pr_info_ratelimited("expand_user_data_stack(): new_size > MAX_USD_HI_SIZE\n"); + return -ENOMEM; + } + + down_write(&mm->mmap_sem); + + vma = find_extend_vma(mm, new_bottom); + if (!vma) { + pr_info_ratelimited("expand_user_data_stack(): user data stack overflow: stack bottom 0x%lx, top 0x%lx, sp 0x%llx, rest free space size 0x%llx\n", + ti->u_stack.bottom, ti->u_stack.top, sp, stack_size); + goto error_unlock; + } + + /* Check that we didn't jump over a hole */ + for (v = vma->vm_next, prev = vma; v && v->vm_end < ti->u_stack.top; + prev = v, v = v->vm_next) { + if (unlikely(prev->vm_end != v->vm_start || + ((v->vm_flags ^ prev->vm_flags) & VM_GROWSDOWN))) { + pr_info_ratelimited("expand_user_data_stack(): jumped over a hole 0x%lx-0x%lx or inconsistent VM_GROWSDOWN flag\n", + prev->vm_end, v->vm_start); + goto error_unlock; + } + } + + DebugUS("find_extend_vma() returned VMA 0x%px, start 0x%lx, end 0x%lx\n", + vma, vma->vm_start, vma->vm_end); + + up_write(&mm->mmap_sem); + + /* + * Increment user data stack size in the USD register + * and in the chain registers (CR1_hi.ussz field) + * in all user pt_regs structures of the process. + */ + ret = fix_all_user_stack_pt_regs(regs, new_size - stack_size, true); + if (ret) + return ret; + + /* + * Correct cr1_hi.ussz fields for all functions in the PCSP + */ + ret = fix_all_chain_stack_sz(new_size - stack_size, user_frame_type, true); + if (ret) { + pr_info_ratelimited("expand_user_data_stack(): could not correct user stack sizes in chain stack: ret %d\n", + ret); + return ret; + } + + /* + * Update user data stack current state info + */ + ti->u_stack.bottom = new_bottom; + ti->u_stack.size += new_size - stack_size; + + DebugUS("extended stack: base 0x%llx, size 0x%llx, top 0x%lx, bottom 0x%lx, max current size 0x%lx\n", + sp, new_size, ti->u_stack.top, + ti->u_stack.bottom, ti->u_stack.size); + + return 0; + +error_unlock: + up_write(&mm->mmap_sem); + + return -ENOMEM; +} +EXPORT_SYMBOL(expand_user_data_stack); + +#ifdef CONFIG_COMPAT +void __user *arch_compat_alloc_user_space(unsigned long len) +{ + struct pt_regs *regs = current_pt_regs(); + u64 sp, free_space; + + calculate_e2k_dstack_parameters(®s->stacks, &sp, &free_space, NULL); + + if (len > free_space) { + if (expand_user_data_stack(regs, len - free_space)) + return NULL; + } + + return (void __user *) (sp - len); +} +#endif + +/** + * remap_e2k_stack - remap stack at the end of user address space + * + * It can be either e2k hardware stack (i.e. PSP stack or PCSP stack), + * or it can be signal stack which is saved in privileged area at the + * end of user space since it has some privileged structures saved + * such as trap cellar or CTPRs. + */ +unsigned long remap_e2k_stack(unsigned long addr, + unsigned long old_size, unsigned long new_size, bool after) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma, *next_vma; + unsigned long ret, ts_flag, new_addr, + end = addr + old_size, new_end = addr + new_size; + struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; + LIST_HEAD(uf_unmap_early); + LIST_HEAD(uf_unmap); + bool locked = false; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + down_write(&mm->mmap_sem); + + /* + * Try to expand without remapping + */ + vma = find_vma(mm, end - 1); + BUG_ON(!vma || vma->vm_start > end - 1); + BUG_ON(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)); + + if (vma->vm_end == end && + (!vma->vm_next || vma->vm_next->vm_start >= new_end)) { + if (!vma_adjust(vma, vma->vm_start, new_end, + vma->vm_pgoff, NULL)) { + int pages = (new_size - old_size) >> PAGE_SHIFT; + /* + * Set valid bit on the newly allocated area + */ + vma = find_vma(mm, addr + old_size); + BUG_ON(!vma || vma->vm_start > addr + old_size); + make_vma_pages_valid(vma, addr + old_size, + addr + new_size); + + vm_stat_account(mm, vma->vm_flags, pages); + if (vma->vm_flags & VM_LOCKED) { + mm->locked_vm += pages; + locked = true; + } + + ret = addr; + goto out_unlock; + } + } + + /* + * Remap all vmas + */ + new_addr = get_unmapped_area(NULL, (after) ? addr : USER_HW_STACKS_BASE, + new_size, 0, MAP_PRIVATE | MAP_ANONYMOUS); + if (IS_ERR_VALUE(new_addr)) { + ret = new_addr; + goto out_unlock; + } + + for (vma = find_vma(mm, addr); vma && vma->vm_start < end; + vma = next_vma) { + unsigned long remap_from, remap_to, + remap_from_size, remap_to_size; + + remap_from = vma->vm_start; + if (vma->vm_start < addr) + remap_from = addr; + + remap_from_size = vma->vm_end - remap_from; + if (vma->vm_end >= end) + remap_from_size = end - remap_from; + + remap_to = remap_from + new_addr - addr; + + remap_to_size = remap_from_size; + if (vma->vm_end >= end) { + remap_to_size = end - remap_from + + (new_size - old_size); + } + + next_vma = vma->vm_next; + + DebugHS("mremap_to(): from 0x%lx/0x%lx to 0x%lx/0x%lx\n", + remap_from, remap_from_size, remap_to, remap_to_size); + ret = mremap_to(remap_from, remap_from_size, + remap_to, remap_to_size, &locked, + &uf, &uf_unmap_early, &uf_unmap); + if (IS_ERR_VALUE(ret)) { + do_munmap(mm, new_addr, new_size, &uf_unmap); + goto out_unlock; + } + } + + ret = new_addr; + +out_unlock: + up_write(&mm->mmap_sem); + + if (!IS_ERR_VALUE(ret) && locked) + mm_populate(ret + old_size, new_size - old_size); + + clear_ts_flag(ts_flag); + + return ret; +} + +static unsigned long handle_hardware_stack_overflow( + struct hw_stack_area *area, bool after, size_t limit) +{ + unsigned long old_size, new_size, old_addr, new_addr; + + /* + * Increase size exponentially - needed to make sure we won't + * run into the end of virtual memory (because chain stack + * can only be remapped to a *higher* address for longjmp to + * work, and VM area for hardware stacks is limited in size). + */ + old_addr = (unsigned long) area->base; + old_size = area->size; + new_size = max(old_size + PAGE_SIZE, old_size * 11 / 8); + new_size = round_up(new_size, PAGE_SIZE); + + /* Check for rlimit */ + if (new_size > limit) { + if (old_size >= limit) + return -ENOMEM; + new_size = limit; + } + + new_addr = remap_e2k_stack((u64) area->base, old_size, new_size, after); + if (IS_ERR_VALUE(new_addr)) { + return new_addr; + } else { + area->base = (void *) new_addr; + area->size += new_size - old_size; + } + + return new_addr - old_addr; +} + +static int add_user_old_pc_stack_area(struct hw_stack_area *area) +{ + thread_info_t *ti = current_thread_info(); + struct old_pcs_area *old_pc; + + old_pc = kmalloc(sizeof(struct old_pcs_area), GFP_KERNEL); + if (!old_pc) + return -ENOMEM; + + old_pc->base = area->base; + old_pc->size = area->size; + + list_add_tail(&old_pc->list_entry, &ti->old_u_pcs_list); + + return 0; +} + +void __update_pcsp_regs(unsigned long base, unsigned long size, + unsigned long new_fp, + e2k_pcsp_lo_t *pcsp_lo, e2k_pcsp_hi_t *pcsp_hi) +{ + unsigned long new_base, new_top; + + /* + * Calculate new %pcsp + */ + new_base = max(new_fp - 0x80000000UL, base); + new_base = round_up(new_base, ALIGN_PCSTACK_SIZE); + new_top = min(new_fp + 0x80000000UL - 1, base + size); + new_top = round_down(new_top, ALIGN_PCSTACK_SIZE); + + /* + * Important: since saved %pcsp_hi.ind value includes %pcshtp + * after this function we must be sure that %pcsp_hi.ind > %pcshtp. + * This is achieved automatically by making window as big as possible. + */ + AS(*pcsp_lo).base = new_base; + AS(*pcsp_hi).size = new_top - new_base; + AS(*pcsp_hi).ind = new_fp - new_base; +} + +void update_pcsp_regs(unsigned long new_fp, + e2k_pcsp_lo_t *pcsp_lo, e2k_pcsp_hi_t *pcsp_hi) +{ + struct hw_stack_area *pcs = ¤t_thread_info()->u_hw_stack.pcs; + + __update_pcsp_regs((unsigned long)pcs->base, pcs->size, + new_fp, pcsp_lo, pcsp_hi); +} + +void __update_psp_regs(unsigned long base, unsigned long size, + unsigned long new_fp, + e2k_psp_lo_t *psp_lo, e2k_psp_hi_t *psp_hi) +{ + unsigned long new_base, new_top; + + new_base = max(new_fp - 0x80000000UL, base); + new_base = round_up(new_base, ALIGN_PSTACK_SIZE); + new_top = min(new_fp + 0x80000000UL - 1, base + size); + new_top = round_down(new_top, ALIGN_PSTACK_SIZE); + + /* + * Important: since saved %psp_hi.ind value includes %pshtp.ind + * after this function we must be sure that %psp_hi.ind > %pshtp.ind. + * This is achieved automatically by making window as big as possible. + */ + AS(*psp_lo).base = new_base; + AS(*psp_hi).size = new_top - new_base; + AS(*psp_hi).ind = new_fp - new_base; +} + +void update_psp_regs(unsigned long new_fp, + e2k_psp_lo_t *psp_lo, e2k_psp_hi_t *psp_hi) +{ + struct hw_stack_area *ps = ¤t_thread_info()->u_hw_stack.ps; + + __update_psp_regs((unsigned long)ps->base, ps->size, + new_fp, psp_lo, psp_hi); +} + +static void apply_delta_to_cellar(struct trap_pt_regs *trap, + unsigned long start, unsigned long end, unsigned long delta) +{ + int tc_count, cnt; + + if (!trap) + return; + + tc_count = trap->tc_count; + for (cnt = 0; 3 * cnt < tc_count; cnt++) { + unsigned long address = trap->tcellar[cnt].address; + + /* Hardware stack accesses are aligned */ + if (address >= start && address < end) + trap->tcellar[cnt].address += delta; + } +} + +static int apply_delta_to_signal_cellar(struct pt_regs __user *u_regs, + unsigned long start, unsigned long end, unsigned long delta) +{ + struct trap_pt_regs __user *u_trap; + int tc_count, cnt; + + u_trap = signal_pt_regs_to_trap(u_regs); + if (IS_ERR_OR_NULL(u_trap)) + return PTR_ERR_OR_ZERO(u_trap); + + if (__get_user(tc_count, &u_trap->tc_count)) + return -EFAULT; + + for (cnt = 0; 3 * cnt < tc_count; cnt++) { + unsigned long address; + + if (__get_user(address, &u_trap->tcellar[cnt].address)) + return -EFAULT; + + /* Hardware stack accesses are aligned */ + if (address >= start && address < end) { + if (__put_user(address + delta, + &u_trap->tcellar[cnt].address)) + return -EFAULT; + } + } + + return 0; +} + +int apply_psp_delta_to_signal_stack(unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + struct pt_regs __user *u_regs; + unsigned long ts_flag; + int ret = 0; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + signal_pt_regs_for_each(u_regs) { + unsigned long new_fp; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + + if (delta != 0) { + ret = apply_delta_to_signal_cellar(u_regs, start, end, + delta); + if (ret) + break; + } + + ret = __get_user(AW(psp_lo), &AW(u_regs->stacks.psp_lo)); + ret = (ret) ?: __get_user(AW(psp_hi), + &AW(u_regs->stacks.psp_hi)); + if (ret) + break; + + DebugHS("adding delta 0x%lx to signal PSP 0x%llx:0x%llx\n", + delta, AW(psp_lo), AW(psp_hi)); + new_fp = AS(psp_lo).base + AS(psp_hi).ind + delta; + __update_psp_regs(base, size, new_fp, &psp_lo, &psp_hi); + + ret = __put_user(AW(psp_hi), &AW(u_regs->stacks.psp_hi)); + ret = (ret) ?: __put_user(AW(psp_lo), + &AW(u_regs->stacks.psp_lo)); + if (ret) + break; + } + clear_ts_flag(ts_flag); + + return ret; +} + +int apply_pcsp_delta_to_signal_stack(unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + struct pt_regs __user *u_regs; + unsigned long ts_flag; + int ret = 0; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + signal_pt_regs_for_each(u_regs) { + unsigned long new_fp; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + + if (delta != 0) { + ret = apply_delta_to_signal_cellar(u_regs, start, end, + delta); + if (ret) + break; + } + + ret = __get_user(AW(pcsp_lo), &AW(u_regs->stacks.pcsp_lo)); + ret = (ret) ?: __get_user(AW(pcsp_hi), + &AW(u_regs->stacks.pcsp_hi)); + if (ret) + break; + + DebugHS("adding delta 0x%lx to signal PCSP 0x%llx:0x%llx\n", + delta, AW(pcsp_lo), AW(pcsp_hi)); + new_fp = AS(pcsp_lo).base + AS(pcsp_hi).ind + delta; + __update_pcsp_regs(base, size, new_fp, &pcsp_lo, &pcsp_hi); + + ret = __put_user(AW(pcsp_hi), &AW(u_regs->stacks.pcsp_hi)); + ret = (ret) ?: __put_user(AW(pcsp_lo), + &AW(u_regs->stacks.pcsp_lo)); + if (ret) + break; + } + clear_ts_flag(ts_flag); + + return ret; +} + +/* + * The function handles traps on hardware procedure stack overflow or + * underflow. If stack overflow occured then the procedure stack will be + * expanded. In the case of stack underflow it will be constricted + */ +int handle_proc_stack_bounds(struct e2k_stacks *stacks, + struct trap_pt_regs *trap) +{ + hw_stack_t *u_hw_stack = ¤t_thread_info()->u_hw_stack; + e2k_psp_lo_t psp_lo = stacks->psp_lo; + e2k_psp_hi_t psp_hi = stacks->psp_hi; + unsigned long delta, fp, real_base, real_top; + int ret; + + fp = AS(psp_lo).base + AS(psp_hi).ind; + real_base = (unsigned long) u_hw_stack->ps.base; + real_top = real_base + u_hw_stack->ps.size; + + if (AS(psp_hi).ind <= AS(psp_hi).size / 2) { + /* Underflow - check if we've hit the stack bottom */ + if (AS(psp_lo).base <= real_base) + return -ENOMEM; + } else if (AS(psp_lo).base + AS(psp_hi).size >= real_top) { + struct hw_stack_area *ps; + + /* Overflow & we've hit the stack top */ + delta = handle_hardware_stack_overflow(&u_hw_stack->ps, false, + current->signal->rlim[RLIMIT_P_STACK_EXT].rlim_cur); + if (IS_ERR_VALUE(delta)) + return delta; + + ps = ¤t_thread_info()->u_hw_stack.ps; + if (delta) { + apply_delta_to_cellar(trap, real_base, real_top, delta); + + ret = apply_psp_delta_to_signal_stack( + (unsigned long)ps->base, ps->size, + real_base, real_top, delta); + if (ret) + return ret; + } + + /* + * The follow call is actual only for paravirtualized + * guest to correct signal stack on host + */ + ret = host_apply_psp_delta_to_signal_stack( + (unsigned long)ps->base, ps->size, + real_base, real_top, delta); + if (ret) + return ret; + + fp += delta; + } + + update_psp_regs(fp, &stacks->psp_lo, &stacks->psp_hi); + + return 0; +} + +/* + * The function handles traps on hardware procedure chain stack overflow or + * underflow. If stack overflow occured then the procedure chaine stack will + * be expanded. In the case of stack underflow it will be constricted + */ +int handle_chain_stack_bounds(struct e2k_stacks *stacks, + struct trap_pt_regs *trap) +{ + hw_stack_t *u_hw_stack = ¤t_thread_info()->u_hw_stack; + e2k_pcsp_lo_t pcsp_lo = stacks->pcsp_lo; + e2k_pcsp_hi_t pcsp_hi = stacks->pcsp_hi; + unsigned long delta, fp, real_base, real_top; + int ret; + + fp = AS(pcsp_lo).base + AS(pcsp_hi).ind; + real_base = (unsigned long) u_hw_stack->pcs.base; + real_top = real_base + u_hw_stack->pcs.size; + + if (AS(pcsp_hi).ind <= AS(pcsp_hi).size / 2) { + /* Underflow - check if we've hit the stack bottom */ + if (AS(pcsp_lo).base <= real_base) + return -ENOMEM; + } else if (AS(pcsp_lo).base + AS(pcsp_hi).size >= real_top) { + struct hw_stack_area *pcs; + + /* Overflow & we've hit the stack top */ + hw_stack_area_t old_pcs_area = u_hw_stack->pcs; + + delta = handle_hardware_stack_overflow(&u_hw_stack->pcs, true, + current->signal->rlim[RLIMIT_PC_STACK_EXT].rlim_cur); + if (IS_ERR_VALUE(delta)) + return delta; + + pcs = ¤t_thread_info()->u_hw_stack.pcs; + if (delta) { + add_user_old_pc_stack_area(&old_pcs_area); + + apply_delta_to_cellar(trap, real_base, real_top, delta); + + ret = apply_pcsp_delta_to_signal_stack( + (unsigned long)pcs->base, pcs->size, + real_base, real_top, delta); + if (ret) + return ret; + } + + /* + * The follow call is actual only for paravirtualized + * guest to correct signal stack on host + */ + ret = host_apply_pcsp_delta_to_signal_stack( + (unsigned long)pcs->base, pcs->size, + real_base, real_top, delta); + if (ret) + return ret; + + fp += delta; + } + + update_pcsp_regs(fp, &stacks->pcsp_lo, &stacks->pcsp_hi); + + return 0; +} + +static void print_pagefault_info(struct trap_pt_regs *trap, e2k_addr_t address, + bool debug_mode, bool stack) +{ + struct mm_struct *mm = current->mm; + + if (mm == NULL) + mm = &init_mm; + + /* if this is guest, stop tracing in host to avoid buffer overwrite */ + host_ftrace_stop(); + + print_all_TIRs(trap->TIRs, trap->nr_TIRs); + print_all_TC(trap->tcellar, trap->tc_count); + print_mmap(current); + DebugPF("MMU_ADDR_CONT = 0x%llx\n", read_MMU_reg(MMU_ADDR_CONT)); + + if (debug_mode) + print_address_tlb(address); + print_address_page_tables(address, !debug_mode); + + if (trap->nr_page_fault_exc == exc_instr_page_miss_num || + trap->nr_page_fault_exc == exc_instr_page_prot_num) { + unsigned long instruction_end_page = + round_down(address + E2K_INSTR_MAX_SIZE - 1, PAGE_SIZE); + + if (instruction_end_page != round_down(address, PAGE_SIZE)) { + if (debug_mode) + print_address_tlb(instruction_end_page); + print_address_page_tables(instruction_end_page, + !debug_mode); + } + } + + if (stack) + dump_stack(); +} + +static inline void debug_print_trap_cellar(const trap_cellar_t *tcellar, + unsigned int tc_count) +{ + unsigned int cnt; + tc_fault_type_t ftype; + int chan; + + DbgTC("Counted %d records\n", tc_count); + + if (!(DEBUG_TRAP_CELLAR || DEBUG_STATE_TC)) + return; + + for (cnt = 0; (3 * cnt) < tc_count; cnt++) { + AW(ftype) = AS(tcellar[cnt].condition).fault_type; + chan = AS(tcellar[cnt].condition).chan; + pr_info("do_trap_cellar: cnt %d add 0x%lx ftype %x chan 0x%x\n", + cnt, tcellar[cnt].address, AW(ftype), chan); + PrintTC(&tcellar[cnt], cnt); + } +} + +static inline unsigned long get_fault_ip(struct pt_regs *regs) +{ + e2k_tir_lo_t tir_lo; + + tir_lo.TIR_lo_reg = regs->trap->TIR_lo; + return tir_lo.TIR_lo_ip; +} + +static inline int +copy_nested_tc_records(struct pt_regs *regs, + trap_cellar_t *tcellar, unsigned int tc_count) +{ + struct pt_regs *pregs = regs->next; + struct trap_pt_regs *ptrap = pregs->trap; + tc_cond_t *pcond, *cond; + int i, skip; + + DbgTC("nested exception detected\n"); + + if (unlikely(!ptrap)) + panic("do_trap_cellar() previous pt_regs are not from trap\n"); + + if (unlikely(!user_mode(pregs) && + !current_thread_info()->usr_pfault_jump && + !search_exception_tables(get_fault_ip(pregs)))) + panic("do_trap_cellar() previous pt_regs are not user's\n"); + + /* + * We suppose that there could be only one record in + * trap cellar because of nested exception in + * execute_mmu_operations() plus there could be few + * spill/fill records. Other records aren't allowed. + * + * Also allow two records for quadro format. + */ + skip = 1; +#pragma loop count (1) + for (i = 1; (3 * i) < tc_count; i++) { + tc_cond_t cond = tcellar[i].condition; + int fmt = TC_COND_FMT_FULL(cond); + + if (AS(cond).s_f) + continue; + + if (i == 1 && (fmt == LDST_QWORD_FMT || + fmt == TC_FMT_QWORD_QP)) { + ++skip; + continue; + } + + print_all_TC(tcellar, tc_count); + panic("do_trap_cellar() invalid trap cellar content\n"); + } + + /* Modify fault_type */ + cond = &tcellar[0].condition; + pcond = &ptrap->tcellar[ptrap->curr_cnt].condition; + AS(*pcond).fault_type = AS(*cond).fault_type; + + ptrap->tcellar[ptrap->curr_cnt].flags |= TC_NESTED_EXC_FLAG; + + return skip; +} + +/* + * abn abp instructions changed fields for RPR + * we must restore old values for this fields + */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + +/* see iset 5.4. (PR) */ +#define get_predicate_val(x, N) (((x) >> ((N) * 2)) & 0x1) + +/* see iset C.17.1.2. */ +static int +calculate_ct_operation(u64 lsr, instr_ss_t instr, u64 pf) +{ + int value; + e2k_ct_t ct_op; + e2k_lsr_t Lsr; + + AW(Lsr) = lsr; + AW(ct_op) = instr.ctcond; + switch (CT_CT(ct_op)) { + case 0: + value = 0; + break; + case 1: + value = 1; + break; + case 2: + value = get_predicate_val(pf, CT_PSRC(ct_op)); + break; + case 3: + value = !get_predicate_val(pf, CT_PSRC(ct_op)); + break; + case 4: + value = ls_loop_end(Lsr); + break; + case 5: + value = !ls_loop_end(Lsr); + break; + case 6: + value = ((Lsr.fields.semc || !ls_prlg(Lsr)) && + get_predicate_val(pf, CT_PSRC(ct_op))) || + ls_loop_end(Lsr); + break; + case 7: + value = !(((Lsr.fields.semc || !ls_prlg(Lsr)) && + get_predicate_val(pf, CT_PSRC(ct_op))) || + ls_loop_end(Lsr)); + break; + case 8: /* must be changed !!! */ + value = ((!(Lsr.fields.semc || !ls_prlg(Lsr)) && + get_predicate_val(pf, CT_PSRC(ct_op))) || + ls_loop_end(Lsr)); + break; + case 14: + value = (((Lsr.fields.semc || !ls_prlg(Lsr)) && + !get_predicate_val(pf, CT_PSRC(ct_op))) || + ls_loop_end(Lsr)); + break; + case 15: + value = !(((Lsr.fields.semc || !ls_prlg(Lsr)) && + !get_predicate_val(pf, CT_PSRC(ct_op))) || + ls_loop_end(Lsr)); + break; + default: + value = 0; + pr_info("calculate_ct_operation bad ct_op = %d CT_PSRC =%d\n", + CT_CT(ct_op), CT_PSRC(ct_op)); + break; + } + return 0; +} + +static void +calculate_new_rpr(struct pt_regs *regs, e2k_addr_t ip, int stp) +{ + instr_hs_t hs; + instr_ss_t ss; + e2k_rpr_lo_t rpr_lo; + e2k_rpr_hi_t rpr_hi; + + /* + * calculate new value of RPR + */ + AW(rpr_lo) = 0; + RPR_STP(rpr_lo) = stp; + RPR_IP(rpr_lo) = ip; + WRITE_RPR_LO_REG(rpr_lo); + + if (get_user(AW(hs), &E2K_GET_INSTR_HS(ip)) == -EFAULT) { + DebugRPR("HS does exist\n"); + return; + } + + /* Check presence of Stub Syllabe */ + if (AW(hs)) { + DebugRPR("HS does exist\n"); + } else { + DebugRPR("SS doesn't exist\n"); + return; + } + + /* Stub Syllabe encodes different short fragment of command */ + if (get_user(AW(ss), &E2K_GET_INSTR_SS(ip)) == -EFAULT) { + return; + } + if (ss.abn || ss.abp) { + if (calculate_ct_operation(regs->lsr, ss, + AW(regs->crs.cr0_lo))) { + rpr_hi = READ_RPR_HI_REG(); + RPR_BR_CUR(rpr_hi)++; + RPR_BR_PCUR(rpr_hi)++; + WRITE_RPR_HI_REG(rpr_hi); + } + } +} +#endif + +static int adjust_psp_regs(struct pt_regs *regs, s64 delta) +{ + e2k_psp_lo_t u_psp_lo = regs->stacks.psp_lo; + e2k_psp_hi_t u_psp_hi = regs->stacks.psp_hi; + + AS(u_psp_hi).ind -= GET_PSHTP_MEM_INDEX(regs->stacks.pshtp); + + return copy_user_to_current_hw_stack( + (void *) AS(current_thread_info()->k_psp_lo).base, + (void *) AS(u_psp_lo).base + AS(u_psp_hi).ind, + delta, regs, false); +} + +static int adjust_pcsp_regs(struct pt_regs *regs, s64 delta) +{ + e2k_pcsp_lo_t u_pcsp_lo = regs->stacks.pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi = regs->stacks.pcsp_hi; + + AS(u_pcsp_hi).ind -= PCSHTP_SIGN_EXTEND(regs->stacks.pcshtp); + + return copy_user_to_current_hw_stack( + (void *) AS(current_thread_info()->k_pcsp_lo).base, + (void *) AS(u_pcsp_lo).base + AS(u_pcsp_hi).ind, + delta, regs, true); +} + +s64 calculate_fill_delta_psp(struct pt_regs *regs, struct trap_pt_regs *trap, + trap_cellar_t *tcellar) +{ + e2k_psp_lo_t psp_lo = regs->stacks.psp_lo; + e2k_psp_hi_t psp_hi = regs->stacks.psp_hi; + unsigned long max_addr = 0; + int i = 0; + s64 delta; + + AS(psp_hi).ind -= GET_PSHTP_MEM_INDEX(regs->stacks.pshtp); + + for (; i < trap->tc_count / 3; i++) { + tc_cond_t condition = tcellar[i].condition; + unsigned long address = tcellar[i].address; + + if (!AS(condition).s_f && !IS_SPILL(tcellar[i]) || + AS(condition).store || AS(condition).sru) + continue; + + max_addr = max(address, max_addr); + } + + max_addr -= max_addr % 32; + delta = max_addr - (AS(psp_lo).base + AS(psp_hi).ind) + 32; + + return delta; +} + +static int handle_spill_fill(struct pt_regs *regs, trap_cellar_t *tcellar, + unsigned int cnt, s64 *last_store, s64 *last_load) +{ + struct trap_pt_regs *trap = regs->trap; + unsigned long address = tcellar[cnt].address; + tc_cond_t condition = tcellar[cnt].condition; + tc_mask_t mask = tcellar[cnt].mask; + unsigned long ts_flag; + bool call_pf = true; + int ret; + + /* Optimization: handle each SPILL and each FILL exactly once */ + if (kvm_test_intc_emul_flag(regs)) { + call_pf = false; + } else if (tcellar[cnt].flags & TC_NESTED_EXC_FLAG) { + call_pf = true; + } else if (AS(condition).store) { + if (*last_store != -1 && round_down(address, PAGE_SIZE) == + round_down(tcellar[*last_store].address, PAGE_SIZE)) + call_pf = false; + } else { + if (*last_load != -1 && round_down(address, PAGE_SIZE) == + round_down(tcellar[*last_load].address, PAGE_SIZE)) + call_pf = false; + } + + if (call_pf) { + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = do_page_fault(regs, address, condition, mask, 0); + clear_ts_flag(ts_flag); + if (ret != PFR_SUCCESS) + goto fail_sigsegv; + + if (AS(condition).store) + *last_store = cnt; + else + *last_load = cnt; + } else { + if (kvm_test_intc_emul_flag(regs)) { + if (AS(condition).store) + *last_store = cnt; + else + *last_load = cnt; + } + ret = PFR_SUCCESS; + } + + /* + * For SPILL execute_mmu_operations() will repeat interrupted stores + */ + if (AS(condition).store) + return ret; + + /* + * For FILL we must adjust %pshtp/%pcshtp so that + * hardware repeats the loads. + * + * Also make sure that %pshtp/%pcshtp are adjusted only + * once across all the requests in the trap cellar. + */ + if (AS(condition).sru && !(trap->flags & TRAP_PCSP_FILL_ADJUSTED)) { + if (adjust_pcsp_regs(regs, 32)) + goto fail_sigsegv; + + trap->flags |= TRAP_PCSP_FILL_ADJUSTED; + } else if (!AS(condition).sru && + !(trap->flags & TRAP_PSP_FILL_ADJUSTED)) { + s64 delta = calculate_fill_delta_psp(regs, trap, tcellar); + + if (adjust_psp_regs(regs, delta)) + goto fail_sigsegv; + + trap->flags |= TRAP_PSP_FILL_ADJUSTED; + } + + /* + * We have adjusted pt_regs so that hardware will + * repeat interrupted FILL, no need to repeat in software. + */ + return PFR_IGNORE; + +fail_sigsegv: + /* + * After failed SPILL/FILL we cannot return to user + * so use force_sigsegv() to exit gracefully. + */ + force_sigsegv(SIGSEGV); + + return PFR_SIGPENDING; +} + +void do_trap_cellar(struct pt_regs *regs, int only_system_tc) +{ + struct trap_pt_regs *trap = regs->trap; + trap_cellar_t *tcellar = trap->tcellar; + unsigned int tc_count, cnt; + tc_fault_type_t ftype; + int chan, rval = 0; + /* flag of global_sp operations */ + int gsp_flag; + /* number of global_sp records */ + int global_sp_num; + int ignore_request = 0; + int skip = 0; + unsigned long to_complete = 0; + int srp_flag = 0, store_flag; + e2k_addr_t srp_ip; + /* store recovery point in RPR */ + int rpr_srp_flag; + long multithread_addr = 0; + /* number of multithread_sp */ + int multithread_sp_num = 0; + s64 last_store = -1, last_load = -1; + + /* In TRAP_CELLAR we have records that was dropped by MMU when trap + * occured. Each record consist from 3 dword, fist is address (possible + * address that cause fault), second is data dword that contain + * information needed to store (stored data), third is a condition word + * Maximum records in TRAP_CELLAR is MAX_TC_SIZE (10). + * We should do that user signal handler will be run for every + * trap if it is needed. So we should continue do_trap_cellar() + * after we ret from user's sighandler (see handle_signal in signal.c). + */ + + DbgTC("tick %lld CPU #%ld trap cellar regs addr 0x%px\n", + READ_CLKR_REG(), (long)raw_smp_processor_id(), tcellar); + DbgTC("regs->CR0.hi ip 0x%lx user_mode %d\n", + (long)AS_STRUCT(regs->crs.cr0_hi).ip << 3, + trap_from_user(regs)); + + tc_count = trap->tc_count; + + if (trap->curr_cnt == -1) { + e2k_tir_lo_t tir_lo; + struct pt_regs *prev_regs = regs->next; + + if (trace_trap_cellar_enabled()) { + int cnt; + + for (cnt = 0; (3 * cnt) < tc_count; cnt++) + trace_trap_cellar(&tcellar[cnt], cnt); + } + + debug_print_trap_cellar(tcellar, tc_count); + + /* + * Check if we are in the nested exception that appeared while + * executing execute_mmu_operations() + */ + if (unlikely(prev_regs && prev_regs->flags.exec_mmu_op)) { + /* + * We suppose that spill/fill records are placed at the + * end of trap cellar so skip at the beginning. + */ + skip = copy_nested_tc_records(regs, tcellar, tc_count); + + /* + * Nested exc_data_page or exc_mem_lock appeared, so + * one needs to tell execute_mmu_operations() about it. + * execute_mmu_operations() will return EXEC_MMU_REPEAT + * in this case. do_trap_cellar() will analyze this + * returned value and repeat execution of current + * record with modified data. + */ + prev_regs->flags.exec_mmu_op_nested = 1; + } + + trap->curr_cnt = skip; + + if (unlikely(GET_CLW_REQUEST_COUNT(regs))) { + int clw_first = GET_CLW_FIRST_REQUEST(regs); + + DebugCLW("Detected CLW %d request(s)\n", + GET_CLW_REQUEST_COUNT(regs)); + if (DEBUG_CLW_FAULT) { + for (cnt = 0; (3 * cnt) < tc_count; cnt++) { + AW(ftype) = AS(tcellar[cnt].condition). + fault_type; + chan = AS(tcellar[cnt].condition).chan; + pr_info("do_trap_cellar: cnt %d " + "add 0x%lx ftype %x " + "chan 0x%x\n", + cnt, tcellar[cnt].address, + AW(ftype), chan); + PrintTC(&tcellar[cnt], cnt); + } + } + AW(ftype) = AS(tcellar[clw_first].condition). + fault_type; + if (AW(ftype) != 0) { + unsigned long handled; + + DebugCLW("starts do_page_fault() for first " + "CLW request #%d\n", + clw_first); + handled = pass_clw_fault_to_guest(regs, + &tcellar[clw_first]); + if (!handled) { + rval = do_page_fault(regs, + tcellar[clw_first].address, + tcellar[clw_first].condition, + tcellar[clw_first].mask, + false /* instr page */); + if (rval == PFR_SIGPENDING) { + DebugCLW("BAD CLW AREA\n"); + return; + } + } + } + terminate_CLW_operation(regs); + } + + if (TASK_IS_BINCO(current)) + srp_flag = READ_RPR_HI_REG_VALUE() >> 63 & 1; + if (srp_flag) + trap->flags |= TRAP_SRP_FLAG; + else + trap->flags &= ~TRAP_SRP_FLAG; + + /* + * One should save srp_ip, because trap->TIR_lo could be + * differed from current, when do_trap_cellar() is called from + * do_sigreturn(). + */ + tir_lo.TIR_lo_reg = trap->TIR_lo; + srp_ip = tir_lo.TIR_lo_ip; + trap->srp_ip = srp_ip; + } else { + /* + * We continue to do_trap_cellar() after user's sig handler + * to work for next trap in trap_cellar. + * If user's sighandler, for example, do nothing + * then we should do that call user's sighandler + * once more for the same trap. + * So trap->curr_cnt is here the same for which + * user's sighandler worked. + */ + if ((3 * trap->curr_cnt) >= tc_count) + return; + DbgTC("curr_cnt == %d tc_count / 3 %d\n", + trap->curr_cnt, tc_count / 3); + srp_flag = trap->flags & TRAP_SRP_FLAG; + srp_ip = trap->srp_ip; + } + + global_sp_num = 0; + gsp_flag = 0; /* clear the flag before the loop */ + +#pragma loop count (1) + for (cnt = trap->curr_cnt; (3 * cnt) < tc_count; + cnt++, trap->curr_cnt++) { + unsigned long pass_result; + unsigned long handled; + trap_cellar_t *next_tcellar; + + if (tcellar[cnt].flags & TC_DONE_FLAG) + continue; + + next_tcellar = NULL; + if ((3 * (cnt + 1)) < tc_count) + next_tcellar = &tcellar[cnt + 1]; + + if (unlikely(trap->ignore_user_tc) || only_system_tc) { + /* + * Can get here if: + * 1) Kernel wants to handle only system records of + * trap cellar. + * 2) Controlled access from kernel to user failed. + */ + if (!tc_record_asynchronous(&tcellar[cnt])) + continue; + } +retry_guest_kernel: + pass_result = pass_page_fault_to_guest(regs, &tcellar[cnt]); + to_complete |= KVM_GET_NEED_COMPLETE_PF(pass_result); + if (likely(KVM_IS_NOT_GUEST_TRAP(pass_result))) { + /* trap is not due to guest and should be handled */ + /* in the regular mode */ + ; + } else if (KVM_IS_TRAP_PASSED(pass_result)) { + DebugKVMPF("request #%d is passed to " + "guest: address 0x%lx condition 0x%016llx\n", + cnt, tcellar[cnt].address, + AW(tcellar[cnt].condition)); + goto continue_passed; + } else if (KVM_IS_GUEST_KERNEL_ADDR_PF(pass_result)) { + DebugKVMPF("request #%d guest kernel " + "address 0x%lx handled by host\n", + cnt, tcellar[cnt].address); + rval = PFR_KVM_KERNEL_ADDRESS; + goto handled; + } else if (KVM_IS_SHADOW_PT_PROT_PF(pass_result)) { + DebugKVMPF("request #%d is guest access to protected " + "shadow PT: address 0x%lx\n", + cnt, tcellar[cnt].address); + goto continue_passed; + } else if (KVM_IS_ERROR_RESULT_PF(pass_result)) { + union pf_mode mode; + + DebugKVMPF("request #%d failed: address 0x%lx " + "error %ld\n", + cnt, tcellar[cnt].address, + (long)pass_result); + mode.word = 0; + rval = pf_force_sig_info(SIGBUS, BUS_ADRERR, + tcellar[cnt].address, regs); + goto handled; + } else { + BUG_ON(true); + } + /* Probably it is KVM MMIO request (only on guest). */ + /* Handle same fault here to do not call slow path of */ + /* page fault handler (do_page_fault() ... */ + handled = mmio_page_fault(regs, &tcellar[cnt]); + if (handled) { + DbgTC("do_trap_cellar: request #%d was KVM MMIO guest " + "request, handled for address 0x%lx\n", + cnt, tcellar[cnt].address); + goto continue_passed; + } + +repeat: + store_flag = 0; + rpr_srp_flag = 0; + + AW(ftype) = AS(tcellar[cnt].condition).fault_type; + + DbgTC("ftype == %x address %lx\n", + AW(ftype), tcellar[cnt].address); + + ignore_request = 0; + + if (AS(tcellar[cnt].condition).clw) { + DbgTC("found CLW request in : trap cellar ,cnt %d\n", + cnt); + ignore_request = 1; + rval = PFR_IGNORE; + } else if (AS(tcellar[cnt].condition).s_f || + IS_SPILL(tcellar[cnt])) { + rval = handle_spill_fill(regs, tcellar, cnt, + &last_store, &last_load); + } else if (AS(tcellar[cnt].condition).sru && + !AS(tcellar[cnt].condition).s_f && + !AS(tcellar[cnt].condition).store) { + /* This is hardware load from CU table, mark it + * as having permission to access privileged area */ + unsigned long ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + rval = do_page_fault(regs, tcellar[cnt].address, + tcellar[cnt].condition, + tcellar[cnt].mask, 0); + clear_ts_flag(ts_flag); + } else if (AS(ftype).exc_mem_lock) { + DbgTC("do_trap_cellar: exc_mem_lock\n"); + if (!trap->from_sigreturn) { + S_SIG(regs, SIGBUS, exc_mem_lock_num, + BUS_OBJERR); + SDBGPRINT("SIGBUS. Memory lock signaled"); + break; + } + /* + * We can be here only after binary compiler's + * SIGBUS handler when handler wants kernel to + * complete memory operations from cellar. + * Never ignore this request and carry out + * execute_mmu_operations. + */ + rval = PFR_SUCCESS; + } else if (TASK_IS_BINCO(current) && + srp_flag && AS(tcellar[cnt].condition).store) { + DebugSRP("found memory store " + "request with SRP flag in trap cellar " + "cnt %d\n", + cnt); + if (AS(tcellar[cnt].condition).chan == 3) + rpr_srp_flag = 1; + rval = do_page_fault(regs, tcellar[cnt].address, + tcellar[cnt].condition, + tcellar[cnt].mask, 0); + store_flag = 1; + } else { + unsigned long old_address = tcellar[cnt].address; + bool same = false, + async = tc_record_asynchronous(&tcellar[cnt]); + + if (!async && + !(tcellar[cnt].flags & TC_NESTED_EXC_FLAG)) { + if (AS(tcellar[cnt].condition).store) { + if (last_store != -1 && + round_down(tcellar[cnt].address, + PAGE_SIZE) == + round_down(tcellar[last_store].address, + PAGE_SIZE)) { + same = true; + } + } else { + if (last_load != -1 && + round_down(tcellar[cnt].address, + PAGE_SIZE) == + round_down(tcellar[last_load].address, + PAGE_SIZE)) { + same = true; + } + } + } + + if (same) { + rval = PFR_SUCCESS; + } else { + rval = do_page_fault(regs, tcellar[cnt].address, + tcellar[cnt].condition, + tcellar[cnt].mask, 0); + + if (rval == PFR_SUCCESS && !async) { + if (AS(tcellar[cnt].condition).store) + last_store = cnt; + else + last_load = cnt; + } + } + if (rval == PFR_AP_THREAD_READ) { + multithread_sp_num++; + if (multithread_addr == 0) + multithread_addr = old_address; + DebugMT_PM("do_trap_cellar multithread_sp_num=%d cnt=%d\n", + multithread_sp_num, cnt); + + rval = PFR_SUCCESS; + } + } + +handled: + switch (rval) { + case PFR_SIGPENDING: + /* + * Either BAD AREA, so SIGSEGV or SIGBUS and maybe + * a sighandler, or SIGBUS due to page_bound in lock + * trap on load/store, or after invalidating unaligned + * MLT entry on lock trap on store PF handling. + */ + DbgTC("BAD AREA\n"); + goto out; + case PFR_CONTROLLED_ACCESS: + /* Controlled access from kernel to user space, + * just invalidate diagnostic tag in reg if load. */ + if (!AS(tcellar[cnt].condition).store) + execute_mmu_operations(&tcellar[cnt], + next_tcellar, regs, 1, 0, NULL, NULL); + + /* No need to execute the following user loads/stores */ + trap->ignore_user_tc = true; + break; + case PFR_SUCCESS: + /* check if the position is valid */ + if (AS(ftype).global_sp || global_sp_num == 1) { + global_sp_num++; + DbgTC("Store local to global #%d\n", + global_sp_num); + if (AS(ftype).global_sp && global_sp_num > 1) { + pr_info("TC request #%d with global_sp ftype\n", + global_sp_num); + } + + /* + * This will be executed twice. + * First time for the low dword of the SAP + * and the second time for high dword. + * Actual processsing does happen on the first + * pass only. + */ + /* + * ASSERT: should be only one request + * with global SP (2 records in the TC) + */ + BUG_ON(global_sp_num > 2); + + if (global_sp_num == 1) { + if (3 * (cnt + 1) >= tc_count) + panic("do_trap_cellar: only one record for global SP in the TC (should be two)"); +#ifdef CONFIG_PROTECTED_MODE + /* do nothing on the second pass */ + gsp_flag = do_global_sp( + regs, &tcellar[cnt]); +#endif /* CONFIG_PROTECTED_MODE */ + } + } + if (AS(tcellar[cnt].condition).sru && + !AS(tcellar[cnt].condition).s_f && + !IS_SPILL(tcellar[cnt])) { + DbgTC("page fault on CU upload" + " condition: 0x%llx\n", + AW(tcellar[cnt].condition)); + } else if (!gsp_flag && !ignore_request) { + e2k_addr_t addr; + rval = execute_mmu_operations(&tcellar[cnt], + next_tcellar, regs, 0, + &addr, NULL, NULL); + +#ifdef CONFIG_PROTECTED_MODE + /* + * We deal with quadro operations and must + * correct result after second load + */ + if (multithread_sp_num > 0 && + multithread_sp_num % 2 == 0) { + /* + * If we read this SAP in other thread + * than it needs change SAP to AP + */ + extern void change_sap( + int, pt_regs_t *, + e2k_addr_t, long); + change_sap(cnt-1, regs, addr, + multithread_addr); + multithread_sp_num = 0; + multithread_addr = 0; + } +#endif /* CONFIG_PROTECTED_MODE */ + DbgTC("execute_mmu_operations() finished for cnt %d rval %d addr=%lx\n", + cnt, rval, addr); + if (rval == EXEC_MMU_STOP) { + goto out; + } else if (rval == EXEC_MMU_REPEAT) { + goto repeat; + } + } + break; + case PFR_KERNEL_ADDRESS: + if (ignore_request) + break; + + DbgTC("kernel address has been detected in Trap Cellar for cnt %d\n", + cnt); + rval = execute_mmu_operations(&tcellar[cnt], + next_tcellar, regs, 0, 0, NULL, NULL); + DbgTC("execute_mmu_operations() finished for kernel addr 0x%lx cnt %d rval %d\n", + tcellar[cnt].address, cnt, rval); + if (rval == EXEC_MMU_STOP) { + goto out; + } else if (rval == EXEC_MMU_REPEAT) { + goto repeat; + } + break; + case PFR_KVM_KERNEL_ADDRESS: { + e2k_addr_t addr; + if (AS(tcellar[cnt].condition).s_f || + IS_SPILL(tcellar[cnt])) { + /* it is hardware stacks fill operation */ + /* and fill will be repeated by hardware */ + rval = handle_spill_fill(regs, tcellar, cnt, + &last_store, &last_load); + goto handled; + } else { + rval = execute_mmu_operations(&tcellar[cnt], + next_tcellar, regs, 0, + &addr, NULL, NULL); + } + + DebugKVMPF("execute_mmu_operations() finished for cnt %d rval %d addr=%lx\n", + cnt, rval, addr); + if (rval == EXEC_MMU_STOP) { + goto out; + } else if (rval == EXEC_MMU_REPEAT) { + DebugKVMPF("%s(): execute_mmu_operations() could not recover KVM guest kernel faulted operation, retry\n", + __func__); + goto retry_guest_kernel; + } + break; + } + case PFR_IGNORE: + DbgTC("ignore request in trap cellar and do not start execute_mmu_operations for cnt %d\n", + cnt); + break; + default: + panic("Unknown do_page_fault return value %d\n", rval); + } + + /* Do not update RPR when nested exception occured. */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (srp_flag && store_flag && !(trap->flags & TRAP_RP_FLAG)) + calculate_new_rpr(regs, srp_ip, rpr_srp_flag); +#endif + + trap->from_sigreturn = 0; + +continue_passed: + tcellar[cnt].flags |= TC_DONE_FLAG; + } + +out: + if (only_system_tc) + trap->curr_cnt = skip; + if (to_complete != 0) + complete_page_fault_to_guest(to_complete); +} + +static inline int is_spec_load_fault(union pf_mode mode) +{ + return mode.spec && !mode.write; +} + +/* + * Is the operation a semi-speculative load? If yes, the address + * could be any value. Ignore this record. The needed diagnostic + * value has been written to the register by hardware. + */ +static int handle_spec_load_fault(unsigned long address, struct pt_regs *regs, + union pf_mode mode) +{ + if (!is_spec_load_fault(mode)) + return 0; + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID + if (address < TASK_SIZE) { + /* + * Flush bad pte from TLB which have been written there + * by hardware (we must clear "valid" bit from TLB so that + * speculative accesses won't trigger a page fault anymnore). + */ + DebugPF("will flush bad address TLB\n"); + __flush_tlb_page_and_pgtables(current->mm, address); + } +#endif + + if (debug_semi_spec) + pr_notice("PAGE FAULT. ignore invalid LOAD address 0x%lx in speculative mode: IP=%px %s(pid=%d)\n", + address, (void *) GET_IP, current->comm, current->pid); + + return 1; +} + +/* + * Are we prepared to handle this kernel fault? + */ +static int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + unsigned long ip, new_ip; + + ip = get_fault_ip(regs); + + /* get_user/put_user case: */ + fixup = search_exception_tables(ip); + if (fixup) { + new_ip = fixup->fixup; + + correct_trap_return_ip(regs, new_ip); + return PFR_CONTROLLED_ACCESS; + } + + /* All user accesses besides get_user/put_user and wtrap: */ + if (current_thread_info()->usr_pfault_jump) { + new_ip = current_thread_info()->usr_pfault_jump; + correct_trap_return_ip(regs, new_ip); + + /* Controlled access from kernel to user space, + * just invalidate diagnostic tag in reg if load. */ + current_thread_info()->usr_pfault_jump = 0; + + return PFR_CONTROLLED_ACCESS; + } + + return 0; +} + +static int no_context(unsigned long address, struct pt_regs *regs, + union pf_mode mode) +{ + if (fixup_exception(regs)) + return PFR_CONTROLLED_ACCESS; + + /* + * Kernel should not use semi-speculative mode + * so we check only user accesses. + */ + if (mode.user && handle_spec_load_fault(address, regs, mode)) + return PFR_IGNORE; + + print_pagefault_info(regs->trap, address, DEBUG_PF_MODE, false); + + /* + * Oops. The kernel tried to access some bad page. + */ + if (current->pid <= 1) + panic("do_page_fault: no_context on pid %d so will be recursive traps. IP = 0x%lx\n", + current->pid, get_fault_ip(regs)); + + panic("do_page_fault: no_context for address %lx from IP = %lx\n", + address, get_fault_ip(regs)); +} + +static int vmalloc_fault(unsigned long address, struct pt_regs *regs, + tc_fault_type_t ftype, union pf_mode mode) +{ + pr_alert("Unexpected fault for kernel address 0x%lx from VMALLOC area ( >= 0x%lx < 0x%lx)\n", + address, VMALLOC_START, VMALLOC_END); + print_address_ptes(pgd_offset_k(address), address, 1); + print_kernel_address_all_nodes_ptes(address); + BUG(); +} + +/* + * Print out info about fatal segfaults, if the show_unhandled_signals + * sysctl is set: + */ +static inline void +show_signal_msg(struct pt_regs *regs, unsigned long address, + struct task_struct *tsk) +{ + void *cr_ip, *tir_ip; + + if (!unhandled_signal(tsk, SIGSEGV)) + return; + + if (!printk_ratelimit()) + return; + + tir_ip = (void *)get_fault_ip(regs); + cr_ip = (void *)GET_IP_CR0_HI(regs->crs.cr0_hi); + + if (tir_ip == cr_ip) + printk("%s%s[%d]: segfault at %lx ip %px", + task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, + tsk->comm, task_pid_nr(tsk), address, tir_ip); + else + printk("%s%s[%d]: segfault at %lx ip %px interrupt ip %px", + task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, + tsk->comm, task_pid_nr(tsk), address, tir_ip, cr_ip); + + print_vma_addr(KERN_CONT " in ", (unsigned long)tir_ip); + + printk(KERN_CONT "\n"); +} + +static int pf_force_sig_info(int si_signo, int si_code, unsigned long address, + struct pt_regs *regs) +{ + struct trap_pt_regs *trap = regs->trap; + + if (si_signo == SIGBUS) + SDBGPRINT("SIGBUS. Page fault"); + else if (si_signo == SIGSEGV) + SDBGPRINT("SIGSEGV. Page fault"); + + PFDBGPRINT("Signal %d for address 0x%lx", si_signo, address); + + if (debug_pagefault) + print_pagefault_info(trap, address, DEBUG_PF_MODE, true); + + force_sig_fault(si_signo, si_code, (void __user *)address, + trap->nr_page_fault_exc); + + return PFR_SIGPENDING; +} + +static int clear_valid_on_spec_load_one(struct vm_area_struct *vma, + unsigned long addr, struct pt_regs *regs, bool *unlocked) +{ + struct mm_struct *mm = current->mm; + unsigned long area_start, area_end; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + + /* + * Calculate invalid area size + */ + if (!vma) { + /* This is a speculative load from unmapped area */ + struct vm_area_struct *vma_prev; + vma = find_vma_prev(mm, addr, &vma_prev); + area_start = (vma_prev) ? vma_prev->vm_end : 0; + area_end = TASK_SIZE; + } else if (addr < vma->vm_start) { + /* This is a speculative load from unmapped area */ + area_start = (vma->vm_prev) ? vma->vm_prev->vm_end : 0; + area_end = vma->vm_start; + vma = NULL; + } else { + /* Check that this is a speculative load from PROT_NONE mapping */ + if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) + return 0; + + area_start = vma->vm_start; + area_end = vma->vm_end; + } + + /* + * OK, so remove the valid bit from PTE if it is there. + * Otherwise this load is _not_ the cause of page fault + * and can be safely ignored (we know thanks to the check + * above that this load will just return DW). + */ + + pgd = pgd_offset(mm, addr); + /* Check if we can mark whole pgd invalid */ + if (pgd_none(*pgd) && round_down(addr, PGDIR_SIZE) >= area_start && + round_up(addr, PGDIR_SIZE) <= area_end) { + spin_lock(&mm->page_table_lock); + if (pgd_none(*pgd) && pgd_valid(*pgd)) { + pgd_t entry = pgd_mknotvalid(*pgd); + set_pgd_at(mm, addr, pgd, entry); + } + spin_unlock(&mm->page_table_lock); + goto out_success; + } + + pud = pud_alloc(mm, pgd, addr); + if (!pud) + goto oom; + /* Avoid unnecessary splitting if we raced againt huge PUD fault + * (just for better performance) */ + if (pud_trans_huge(*pud)) + return 0; + /* Check if we can mark whole pud invalid */ + if (pud_none(*pud) && round_down(addr, PUD_SIZE) >= area_start && + round_up(addr, PUD_SIZE) <= area_end) { + spin_lock(&mm->page_table_lock); + if (pud_none(*pud) && pud_valid(*pud)) { + pud_t entry = pud_mknotvalid(*pud); + set_pud_at(mm, addr, pud, entry); + } + spin_unlock(&mm->page_table_lock); + goto out_success; + } + + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + goto oom; + /* Avoid unnecessary splitting if we raced againt huge PMD fault + * (just for better performance) */ + if (pmd_trans_huge(*pmd)) + return 0; + /* Check if we can mark whole pmd invalid */ + if (pmd_none(*pmd) && round_down(addr, PMD_SIZE) >= area_start && + round_up(addr, PMD_SIZE) <= area_end) { + spinlock_t *ptl; + if (vma && is_vm_hugetlb_page(vma)) { + pte_t *huge_pte = (pte_t *) pmd; + + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) { + if (huge_pte && pmd_index(addr) % 2) + huge_pte--; + } + + ptl = huge_pte_lockptr(hstate_vma(vma), mm, huge_pte); + } else { + ptl = pmd_lockptr(mm, pmd); + } + + spin_lock(ptl); + if (pmd_none(*pmd) && pmd_valid(*pmd)) { + pmd_t entry = pmd_mknotvalid(*pmd); + set_pmd_at(mm, addr, pmd, entry); + } + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE && + vma && is_vm_hugetlb_page(vma)) { + pmd = (pmd_index(addr) % 2) ? pmd - 1 : pmd + 1; + if (pmd_none(*pmd) && pmd_valid(*pmd)) { + pmd_t entry = pmd_mknotvalid(*pmd); + set_pmd_at(mm, addr, pmd, entry); + } + } + spin_unlock(ptl); + goto out_success; + } + + split_huge_pmd(vma, pmd, addr); + + /* + * Use pte_alloc() instead of pte_alloc_map(). We can't run + * pte_offset_map() on pmds where a huge pmd might be created + * from a different thread. + * + * pte_alloc_map() is safe to use under down_write(mmap_sem) or when + * parallel threads are excluded by other means. + * + * Here we only have down_read(mmap_sem). + */ + if (pte_alloc(mm, pmd)) + goto oom; + + /* See the comment in handle_pte_fault() */ + if (unlikely(pmd_trans_unstable(pmd))) + return 0; + + /* + * A regular pmd is established and it can't morph into a huge pmd + * from under us anymore at this point because we hold the mmap_sem + * read mode and khugepaged takes it in write mode. So now it's + * safe to run pte_offset_map(). + */ + pte = pte_offset_map(pmd, addr); + + if (!pte_none(*pte) || !pte_valid(*pte)) + return 0; + + ptl = pte_lockptr(mm, pmd); + spin_lock(ptl); + /* Check if we can mark pte invalid */ + if (pte_none(*pte) && pte_valid(*pte)) { + pte_t entry = pte_mknotvalid(*pte); + set_pte_at(mm, addr, pte, entry); + /* No need to flush - valid entries are not cached in DTLB */ + } + pte_unmap_unlock(pte, ptl); + +out_success: + if (debug_semi_spec) + pr_notice("PAGE FAULT. unmap invalid SPEC LD address 0x%lx: IP=%px %s(pid=%d)\n", + addr, (void *) GET_IP, current->comm, current->pid); + + return PFR_IGNORE; + +oom: + up_read(¤t->mm->mmap_sem); + *unlocked = true; + + /* OOM killer could have killed us */ + pagefault_out_of_memory(); + + return fatal_signal_pending(current) ? PFR_SIGPENDING : PFR_IGNORE; +} + +/* + * Setting valid bit always precisely matching vmas sometimes requires + * a _lot_ of e2k-specific edits in arch.-indep. code. It is simpler + * to set the valid bit by default and remove it in case it's not set + * in the corresponding vma (i.e. when is_pte_valid()=true but vma for + * the address in question is unmapped or mapped with PROT_NONE). + * + * In the case of a race we will try clearing the valid bit again the + * next time we get a page fault on half-spec. load. + * + * Returns: + * PFR_SIGPENDING: if this process was killed by Out-of-Memory handler; + * PFR_IGNORE: if the valid bit was cleared (or some race prevented us + * from clearing it); + * 0: otherwise. + */ +static int clear_valid_on_spec_load(unsigned long address, + struct vm_area_struct *vma, struct pt_regs *regs, + union pf_mode mode, int addr_num, bool *unlocked) +{ + int ret; + + if (!is_spec_load_fault(mode)) + return 0; + + ret = clear_valid_on_spec_load_one(vma, address, regs, unlocked); + if (ret || *unlocked) + return ret; + + if (addr_num > 1) { + unsigned long addr_hi = PAGE_ALIGN(address); + if (vma && vma->vm_end <= addr_hi) + vma = vma->vm_next; + ret = clear_valid_on_spec_load_one(vma, addr_hi, regs, unlocked); + if (ret) + return ret; + } + + return 0; +} + +__cold +static int bad_area(unsigned long address, struct pt_regs *regs, union pf_mode mode, + int addr_num, int si_code) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + bool unlocked = false; + int ret; + + /* + * __do_munmap() could change mmap_sem writelock to mmap_sem readlock, so one + * need to take mmap_sem writelock to process with page table in + * clear_valid_on_spec_load_one(). + */ + up_read(&mm->mmap_sem); + down_write(&mm->mmap_sem); + + vma = find_vma(mm, address); + + ret = clear_valid_on_spec_load(address, vma, regs, mode, addr_num, &unlocked); + + if (!unlocked) + up_write(&mm->mmap_sem); + + if (ret) + return ret; + + if (!mode.user && address >= TASK_SIZE) + return no_context(address, regs, mode); + + if (handle_spec_load_fault(address, regs, mode)) + return PFR_IGNORE; + + if (!mode.user) + return no_context(address, regs, mode); + + trace_unhandled_page_fault(address); + + if (likely(show_unhandled_signals)) + show_signal_msg(regs, address, current); + + return pf_force_sig_info(SIGSEGV, si_code, address, regs); +} + + +static int access_error(struct vm_area_struct *vma, unsigned long address, + struct pt_regs *regs, union pf_mode mode, + int instr_page) +{ + if (mode.write) { + /* Check write permissions */ + if (unlikely(!(vma->vm_flags & (VM_WRITE | VM_MPDMA)))) { + if (!is_spec_load_fault(mode)) + PFDBGPRINT("Page is not writable"); + return 1; + } + } else if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | + VM_WRITE)))) { + /* Check read permissions */ + if (!is_spec_load_fault(mode)) + PFDBGPRINT("Page is PROT_NONE"); + return 1; + } + + /* Check exec permissions */ + if (instr_page && unlikely(!(vma->vm_flags & VM_EXEC))) { + PFDBGPRINT("Page is not executable"); + return 1; + } + + /* Check privilege level */ + if (unlikely((vma->vm_flags & VM_PRIVILEGED) && + !test_ts_flag(TS_KERNEL_SYSCALL))) { + if (!is_spec_load_fault(mode)) + PFDBGPRINT("Page is privileged"); + return 1; + } + + return 0; +} + +/* + * bug #102076 + * + * There are areas that can be written but cannot be read; for example, + * areas past the end of file. Accessing them with `mova' will cause + * a page fault which we do not want in this case (because `mova' is + * speculative). + * + * For half-speculative loads we can just return to user and there will + * be DT in register (hardware puts it there), the user application will + * continue execution from the next wide instruction. But for AAU we have + * to remove the valid bit from page table, otherwise it will just repeat + * the load, resulting in an endless loop. + * + * Note that after removing the valid bit this entry can be written into + * DTLB, so we have to flush it in do_page_fault(). + */ +static int handle_forbidden_aau_load(struct vm_area_struct *vma, + unsigned long address, struct pt_regs *regs, union pf_mode mode) +{ + struct mm_struct *mm = current->mm; + e2k_tir_hi_t tir_hi; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + + if (mode.write || !vma->vm_ops) + return 0; + + AW(tir_hi) = regs->trap->TIR_hi; + + /* Is this not an AAU fault? */ + if (AS(tir_hi).j != 0 || !AS(tir_hi).aa) + return 0; + + /* + * OK, so we want to ignore this. + * If an error occurs, just return PFR_IGNORE and retry + * when the page fault is generated again by AAU. + */ + + pgd = pgd_offset(mm, address); + pud = pud_alloc(mm, pgd, address); + if (!pud) + goto oom; + + pmd = pmd_alloc(mm, pud, address); + if (!pmd) + goto oom; + + split_huge_pmd(vma, pmd, address); + + /* + * Use pte_alloc() instead of pte_alloc_map(). We can't run + * pte_offset_map() on pmds where a huge pmd might be created + * from a different thread. + * + * pte_alloc_map() is safe to use under down_write(mmap_sem) or when + * parallel threads are excluded by other means. + * + * Here we only have down_read(mmap_sem). + */ + if (pte_alloc(mm, pmd)) + goto oom; + + /* See the comment in handle_pte_fault() */ + if (unlikely(pmd_trans_unstable(pmd))) + goto ignore; + + /* + * A regular pmd is established and it can't morph into a huge pmd + * from under us anymore at this point because we hold the mmap_sem + * read mode and khugepaged takes it in write mode. So now it's + * safe to run pte_offset_map(). + */ + pte = pte_offset_map(pmd, address); + + if (!pte_none(*pte) || !pte_valid(*pte)) + return 0; + + ptl = pte_lockptr(mm, pmd); + spin_lock(ptl); + + if (pte_none(*pte) && pte_valid(*pte)) { + pte_t entry = pte_mknotvalid(*pte); + set_pte_at(mm, address, pte, entry); + /* No need to flush - valid entries are not cached in DTLB */ + } + + pte_unmap_unlock(pte, ptl); + + if (debug_semi_spec) + pr_notice("PAGE FAULT. unmap invalid MOVA address 0x%lx: IP=%px %s(pid=%d)\n", + address, (void *) GET_IP, current->comm, current->pid); + +ignore: + up_read(¤t->mm->mmap_sem); + + return PFR_IGNORE; + +oom: + up_read(¤t->mm->mmap_sem); + + /* OOM killer could have killed us */ + pagefault_out_of_memory(); + + return fatal_signal_pending(current) ? PFR_SIGPENDING : PFR_SUCCESS; +} + + +__cold +static int mm_fault_error(struct vm_area_struct *vma, unsigned long address, + struct pt_regs *regs, union pf_mode mode, unsigned int fault) +{ + int ret; + + /* + * Pagefault was interrupted by SIGKILL. We have no reason to + * continue pagefault. + */ + if (fatal_signal_pending(current)) { + up_read(¤t->mm->mmap_sem); + + if (!mode.user) + return no_context(address, regs, mode); + + return PFR_SIGPENDING; + } + + if (fault & VM_FAULT_OOM) { + up_read(¤t->mm->mmap_sem); + + if (!mode.user) + return no_context(address, regs, mode); + + pagefault_out_of_memory(); + + /* OOM killer could have killed us */ + return fatal_signal_pending(current) ? PFR_SIGPENDING : + PFR_SUCCESS; + } + + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_SIGSEGV)) { + ret = handle_forbidden_aau_load(vma, address, regs, mode); + if (ret) + return ret; + } + + up_read(¤t->mm->mmap_sem); + + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_SIGSEGV)) { + int signal, si_code; + + if (!mode.user) + return no_context(address, regs, mode); + + /* We cannot guarantee that another thread did not + * truncate the file we were reading from, thus we + * cannot rely on valid bit being cleared and must + * manually check for half-speculative mode. */ + if (handle_spec_load_fault(address, regs, mode)) + return PFR_IGNORE; + + if (fault & VM_FAULT_SIGBUS) { + signal = SIGBUS; + si_code = BUS_ADRERR; + } else { + signal = SIGSEGV; + si_code = SEGV_MAPERR; + } + + return pf_force_sig_info(signal, si_code, address, regs); + } + + BUG(); +} + +int pf_on_page_boundary(unsigned long address, tc_cond_t cond) +{ + unsigned long end_address; + const int size = tc_cond_to_size(cond); + + /* Special operations cannot cross page boundary + * as they do not access RAM. */ + if (tc_cond_is_special_mmu_aau(cond)) + return false; + + /* + * Always manually check for page boundary crossing. + * ftype.page_bound field is not reliable enough: + * + * 1) "ftype" field is present only in the first tcellar entry. + * 2) "page_bound" is shadowed by "page_miss", "nwrite_page", etc. + * 3) It was removed in iset V3. + */ + + DebugNAO("not aligned operation with address 0x%lx fmt %d size %d bytes\n", + address, TC_COND_FMT_FULL(cond), size); + + end_address = address + size - 1; + + return unlikely(end_address >> PAGE_SHIFT != address >> PAGE_SHIFT); +} + +static int handle_kernel_address(unsigned long address, struct pt_regs *regs, + union pf_mode mode, tc_fault_type_t ftype) +{ + if (mode.user) { + if (handle_spec_load_fault(address, regs, mode)) + return PFR_IGNORE; + + PFDBGPRINT("On kernel address 0x%lx in user mode", address); + return pf_force_sig_info(SIGBUS, BUS_ADRERR, address, regs); + } + + if (address >= VMALLOC_START && address < VMALLOC_END) + return vmalloc_fault(address, regs, ftype, mode); + + /* + * Handle 'page bound' on kernel address: if access + * address intersects page boundary then hardware + * causes 'page fault' trap (this was removed in iset V3). + */ + if (AS(ftype).page_bound) { + DebugPF("kernel page bound: addr 0x%lx\n", + address); + return PFR_SUCCESS; + } + + /* + * Check that it was the kernel address that caused the page fault + */ + if (regs->trap->tc_count <= 3 || AW(ftype)) + return no_context(address, regs, mode); + + DebugPF("kernel address 0x%lx due to user address page fault\n", + address); + + return PFR_KERNEL_ADDRESS; +} + +/* bug 118398: is this an unaligned qp store with masked out + * bytes landing in not existent page? */ +bool is_spurious_qp_store(bool store, unsigned long address, + int fmt, tc_mask_t mask, unsigned long *pf_address) +{ + if (!cpu_has(CPU_FEAT_ISET_V6) || !store || !tc_fmt_has_valid_mask(fmt)) + return false; + + /* User could do an stmqp with 0 mask. This operation makes + * no sense so we will just loop repeating it until killed. */ + if (unlikely(!mask.mask)) + return false; + + if (address >> PAGE_SHIFT != + (address + ffs(mask.mask) - 1) >> PAGE_SHIFT) { + if (pf_address) + *pf_address = address + ffs(mask.mask) - 1; + return true; + } + + if ((address + 15) >> PAGE_SHIFT != + (address + fls(mask.mask) - 1) >> PAGE_SHIFT) { + if (pf_address) + *pf_address = address; + return true; + } + + return false; +} + +#ifdef CONFIG_NESTED_PAGE_FAULT_INJECTION +static int npfi_enabled; + +static ssize_t npfi_write(struct file *f, + const char __user *buf, size_t count, loff_t *ppos) +{ + u8 val; + + int ret = kstrtou8_from_user(buf, count, 2, &val); + if (ret) + return ret; + + npfi_enabled = !!val; + return count; +} + +static ssize_t npfi_read(struct file *f, + char __user *ubuf, size_t count, loff_t *ppos) +{ + char buf[3]; + + snprintf(buf, sizeof(buf), "%d\n", npfi_enabled); + + return simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf)); +} + +static const struct file_operations npfi_debug_fops = { + .open = simple_open, + .read = npfi_read, + .write = npfi_write, +}; + +static int __init npfi_debugfs_init(void) +{ + if (!debugfs_create_file("nested_page_fault_injection", 0644, NULL, + NULL, &npfi_debug_fops)) + return -ENOMEM; + + return 0; +} +late_initcall(npfi_debugfs_init); + +static DEFINE_PER_CPU(unsigned int, injected_faults); +static int nested_page_fault_injected(void) +{ + if (npfi_enabled && (get_cycles() & 0x3ull)) { + unsigned long faults; + + faults = this_cpu_read(injected_faults); + if (faults < 10) + ++faults; + else + faults = 0; + this_cpu_write(injected_faults, faults); + + return faults != 0; + } + + return false; +} +#else +static int nested_page_fault_injected(void) +{ + return 0; +} +#endif + +int do_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const int instr_page) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + tc_fault_type_t ftype; + tc_opcode_t opcode; + union pf_mode mode; + const int fmt = TC_COND_FMT_FULL(condition); + const bool qp = (fmt == LDST_QP_FMT || fmt == TC_FMT_QPWORD_Q); + int ret, addr_num; + int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + vm_fault_t major = 0; + +#ifdef CONFIG_KVM_ASYNC_PF + /* + * If physical page was swapped out by host, than + * suspend current process until page will be loaded + * from swap. + */ + if (pv_apf_read_and_reset_reason() == KVM_APF_PAGE_IN_SWAP) { + pv_apf_wait(); + return PFR_IGNORE; + } +#endif /* CONFIG_KVM_ASYNC_PF */ + + if (nested_page_fault_injected()) + return PFR_SUCCESS; + + AW(ftype) = AS(condition).fault_type; + AW(opcode) = AS(condition).opcode; + + mode.word = 0; + mode.write = tc_cond_is_store(condition, machine.native_iset_ver); + mode.spec = AS(condition).spec; + mode.user = user_mode(regs); + mode.root = AS(condition).root; + mode.empty = !mode.write && !AS(condition).vr && !AS(condition).vl; + + if (AS(condition).num_align) { + if (!qp) + address -= 8; + else + address -= 16; + } + + /* + * ftype could be a combination of several fault types. One should + * reset all fault types, except illegal_page, if illegal_page + * happened. See bug #67315 for detailes. + */ + if (AS(ftype).illegal_page) { + AW(ftype) = 0; + AS(ftype).illegal_page = 1; + } + + NATIVE_CLEAR_DAM; + + DebugPF("started for address 0x%lx, instruction page:" + "%d fault type:0x%x condition 0x%llx root:%d missl:%d cpu%d" + " user_mode_fault=%d\n", + address, instr_page, AW(ftype), + AW(condition), mode.root, AS(condition).miss_lvl, + task_cpu(current), mode.user); + + if (mode.write) + flags |= FAULT_FLAG_WRITE; + if (mode.user) + flags |= FAULT_FLAG_USER; + +#ifdef CONFIG_MCST_RT + if ((rts_act_mask & RTS_PGFLT_RTWRN && rt_task(current)) || + rts_act_mask & RTS_PGFLT_WRN) + pr_info("page fault while RTS mode %lx in %d/%s addr=%08lx\n", + rts_act_mask, current->pid, current->comm, address); +#endif + + if (address >= TASK_SIZE) + return handle_kernel_address(address, regs, mode, ftype); + + if (!mm || faulthandler_disabled()) + return no_context(address, regs, mode); + + if (pf_on_page_boundary(address, condition)) { + unsigned long pf_address; + + if (is_spurious_qp_store(mode.write, address, fmt, + mask, &pf_address)) { + addr_num = 1; + address = pf_address; + } else { + addr_num = 2; + } + } else { + addr_num = 1; + } + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + + /* + * Kernel-mode access to the user address space should only occur + * on well-defined instructions. But, an erroneous kernel fault + * occurring outside one of those areas which also holds mmap_sem + * might deadlock attempting to validate the fault against + * the address space. + * + * Only do the expensive exception table search when we might be at + * risk of a deadlock. This happens if we + * 1. Failed to acquire mmap_sem, and + * 2. The access did not originate in userspace. + */ + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (!user_mode(regs) && + !current_thread_info()->usr_pfault_jump && + !search_exception_tables(get_fault_ip(regs))) { + /* It is kernel code where we do not expect faults */ + return no_context(address, regs, mode); + } +retry: + down_read(&mm->mmap_sem); + } else { + /* + * The above down_read_trylock() might have succeeded in + * which case we'll have missed the might_sleep() from + * down_read(): + */ + might_sleep(); + } + vma = find_vma(mm, address); + + DebugPF("find_vma() returned 0x%px\n", vma); + + if (!vma) { +#ifdef CONFIG_SOFTWARE_SWAP_TAGS + if (is_tags_area_addr(address)) { + DebugPF("fault address 0x%lx is from " + "tags virtual space\n", address); + vma = create_tags_vma(mm, tag_to_virt(address)); + if (!vma) + return pf_out_of_memory(address, regs, mode); + } else +#endif + { + if (!mode.spec) + PFDBGPRINT("PAGE FAULT. Trap with not " + "speculative load and invalid address"); + return bad_area(address, regs, mode, addr_num, SEGV_MAPERR); + } + } + + if (address < vma->vm_start) { +#ifdef CONFIG_SOFTWARE_SWAP_TAGS + if (is_tags_area_addr(address)) { + DebugPF("fault address 0x%lx is from tags virtual " + "space\n", + address); + vma = create_tags_vma(mm, tag_to_virt(address)); + if (vma == NULL) + return pf_out_of_memory(address, regs, mode); + goto good_area; + } +#endif + + return bad_area(address, regs, mode, addr_num, SEGV_MAPERR); + } + + /* + * pgd now should be populated while fault handling. + * + * This can happen on NUMA when user PGD entries are copied + * to per-cpu PGD table. So PGD user entries are updated + * only in process's 'mm' and on the CPU on which the + * thread which is manipulating page tables executes. + * + * But if there is another thread active when page table is + * updated then it still uses the old copy of PGD. So we have + * to update PGD and proceed with the normal handling (in the + * case not only PGD is missing but the page in RAM too). + */ + if (pgd_populate_cpu_root_pt(mm, pgd_offset(mm, address))) { + /* + * PGD only is populated at CPU root page table + * from main user page table mm->pgd + */ + DebugPGD("pgd 0x%px = 0x%lx populated on CPU #%d\n", + pgd_offset(mm, address), + pgd_val(*pgd_offset(mm, address)), + smp_processor_id()); + } + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID + /* + * Following check only to debug the mode when all pages + * should be valid 'CONFIG_MAKE_ALL_PAGES_VALID' + */ + if (AW(ftype)) { + int page_none = (vma->vm_flags & + (VM_READ | VM_WRITE | VM_EXEC)) == 0; + + if (instr_page && AS(ftype).illegal_page) { + print_va_tlb(address, 0); + PFDBGPRINT("Instruction page protection for valid address"); + return bad_area(address, regs, mode, addr_num, SEGV_MAPERR); + } + + /* bug #102076: now this situation is possible */ + if (debug_semi_spec && AS(ftype).illegal_page && !page_none) + pr_notice("illegal_page for valid page, address 0x%lx\n", + address); + + if (!(AS(ftype).page_miss || AS(ftype).priv_page || + AS(ftype).global_sp || AS(ftype).nwrite_page || + AS(ftype).page_bound || + AS(ftype).illegal_page)) { + PFDBGPRINT("trap with bad fault type for valid address ft:0x%x, wr:%d", + AW(ftype), AS(ftype).nwrite_page); + if (debug_pagefault) { + print_pagefault_info(regs->trap, address, + DEBUG_PF_MODE, true); + } + return bad_area(address, regs, mode, addr_num, SEGV_ACCERR); + } + } +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +#ifdef CONFIG_SOFTWARE_SWAP_TAGS +good_area: +#endif + DebugPF("have good vm_area\n"); + + /* We use bitwise OR for performance */ + if (unlikely(AS(ftype).exc_mem_lock | AS(ftype).ph_pr_page | + AS(ftype).io_page | AS(ftype).prot_page | + AS(ftype).intl_res_bits | AS(ftype).isys_page | + AS(ftype).ph_bound)) { + PFDBGPRINT("Bad fault type 0x%x", AW(ftype)); + goto force_sigbus; + } + + if (AS(ftype).nwrite_page) { + DebugPF("write protection occured.\n"); + +#ifdef CONFIG_VIRTUALIZATION + if (unlikely(vma->vm_flags & VM_MPDMA)) { + WARN_ON_ONCE(vma->vm_flags & VM_WRITE); + up_read(&mm->mmap_sem); + handle_mpdma_fault(address); + return PFR_SUCCESS; + } +#endif + } + + if (instr_page) + DebugPF("instruction page fault occured.\n"); + +#ifdef CONFIG_PROTECTED_MODE + /* + * Interpret the stack address in multithreaded protected mode + */ + if ((flags & FAULT_FLAG_ALLOW_RETRY) /* call just once */ && + (current->thread.flags & E2K_FLAG_PROTECTED_MODE) && + instr_page == 0 && WAS_MULTITHREADING) { + DebugPF("WAS_MULTITHREADING=%d address=%lx ip=%llx\n", + WAS_MULTITHREADING, address, GET_IP); + + ret = interpreted_ap_code(regs, &vma, &address); + if (!ret) + return bad_area(address, regs, mode, addr_num, SEGV_ACCERR); + + if (ret == 1 || ret == 2) { + up_read(&mm->mmap_sem); + + return (ret == 2) ? PFR_AP_THREAD_READ : PFR_SUCCESS; + } + } +#endif + + do { + int fault; + + if (access_error(vma, address, regs, mode, instr_page)) + return bad_area(address, regs, mode, addr_num, SEGV_ACCERR); + + fault = handle_mm_fault(vma, address, flags); + major |= fault & VM_FAULT_MAJOR; + DebugPF("handle_mm_fault() returned %x\n", fault); + + if (unlikely(fault & VM_FAULT_RETRY)) { + /* mmap_sem semaphore has been released by + * handle_mm_fault() already. Retry at most once. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + if (!fatal_signal_pending(current)) + goto retry; + + if (!mode.user) + return no_context(address, regs, mode); + + return PFR_SIGPENDING; + } + + if (unlikely(fault & VM_FAULT_ERROR)) + return mm_fault_error(vma, address, regs, mode, fault); + + if (major) { + current->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, + 1, regs, address); + } else { + /* VM_FAULT_MINOR */ + current->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, + 1, regs, address); + } + + if (fault == VM_FAULT_NOPAGE) + sync_addr_range(address, address); + + --addr_num; + if (unlikely(addr_num > 0)) { + address = PAGE_ALIGN(address); + + DebugNAO("not aligned operation will start handle_mm_fault() for next page 0x%lx\n", + address); + + if (vma->vm_end <= address) { + vma = vma->vm_next; + if (!vma || vma->vm_start > address) { + DebugNAO("end address is not valid (has not VMA)\n"); + PFDBGPRINT("End address is not valid (has not VMA)"); + return bad_area(address, regs, mode, + addr_num, SEGV_MAPERR); + } + } + } + } while (unlikely(addr_num > 0)); + + /* + * bug #102076 + * + * For our special case we have to flush DTLB + * after putting the valid bit back into the pte. + */ + if (vma->vm_ops && AS(ftype).illegal_page) + __flush_tlb_range(vma->vm_mm, address, + address + E2K_MAX_FORMAT); + + up_read(&mm->mmap_sem); + DebugPF("handle_mm_fault() finished\n"); + + return PFR_SUCCESS; + +force_sigbus: + up_read(&mm->mmap_sem); + + return pf_force_sig_info(SIGBUS, BUS_ADRERR, address, regs); +} + +/** + * get_recovery_mas - check for special cases when we have to use + * different mas from what was specified in trap cellar + * @condition: trap condition from trap cellar + * + * 1) We should recover LOAD operation with MAS == FILL_OPERATION + * to load the value with tags. In protected mode any value has tag. + * + * 2) Avoid exc_illegal_opcode on e2c+ + * + * 3) Do not lock SLT + */ +static unsigned int get_recovery_mas(tc_cond_t condition, int fmt) +{ + unsigned int mas = AS(condition).mas; + unsigned int mod = (mas & MAS_MOD_MASK) >> MAS_MOD_SHIFT; + int spec_mode = AS(condition).spec; + int chan = AS(condition).chan; + tc_opcode_t opcode; + int root = AS(condition).root; /* secondary space */ + int store = AS(condition).store; + + AW(opcode) = AS(condition).opcode; + + /* + * #127500 Do not execute "secondary lock trap on store" and + * "secondary lock trap on load/store" operations, instead + * downgrade them to simple loads: + * "secondary lock trap on store" -> "secondary normal" + * "secondary lock trap on load/store" -> "secondary normal" + */ + if (root && !store && !spec_mode && (chan == 0 || + chan == 2 && fmt == LDST_QWORD_FMT)) { + if (is_mas_secondary_lock_trap_on_store(mas) || + is_mas_secondary_lock_trap_on_load_store(mas)) + return _MAS_MODE_LOAD_OPERATION; + } + + /* + * If LOAD with 'lock wait' MAS type then we should not use MAS + * to recover LOAD as regular operation. The real LOAD with real + * MAS will be repeated later after return from trap as result + * of pair STORE operation with 'wait unlock' MAS + */ + if (!spec_mode) { + if (chan == 0 && (machine.native_iset_ver < E2K_ISET_V3 || + root == 0) && + mod == _MAS_MODE_LOAD_OP_WAIT) + return _MAS_MODE_LOAD_OPERATION; + + if (machine.native_iset_ver >= E2K_ISET_V3 && root && + chan <= 1 && !store && mas == MAS_SEC_SLT) + return _MAS_MODE_LOAD_OPERATION; + + if (machine.native_iset_ver >= E2K_ISET_V5 && !root && + chan <= 1 && !store && + mas == _MAS_MODE_LOAD_OP_WAIT_1) + return _MAS_MODE_LOAD_OPERATION; + } + + + if (AS(opcode).npsp || + !(current->thread.flags & E2K_FLAG_PROTECTED_MODE)) { + /* + * On e2c+ unprotected qword "check" and "check&unlock" + * instructions generated exc_illegal_opcode, use workaround. + */ + if (machine.native_iset_ver <= E2K_ISET_V2 && + AS(opcode).fmt == LDST_QWORD_FMT && + (mod == _MAS_MODE_LOAD_OP_UNLOCK || + mod == _MAS_MODE_LOAD_OP_CHECK)) + return _MAS_MODE_LOAD_OPERATION; + + return mas; + } + + /* + * If LOAD is protected then we should execute LDRD + * to get the value with tags. It is possible only using + * the special MAS in nonprotected mode + */ + if (mod == 0 || AS(opcode).fmt == 5 && !root) { + return MAS_FILL_OPERATION; + } + if (((chan == 0 || chan == 2) && + ((mod == _MAS_MODE_LOAD_OP_CHECK && !spec_mode) || + (mod == _MAS_MODE_LOAD_OP_UNLOCK && !spec_mode) || + (mod == _MAS_MODE_LOAD_OP_LOCK_CHECK && spec_mode) || + (mod == _MAS_MODE_FILL_OP && !spec_mode) || + (mod == _MAS_MODE_LOAD_OP_SPEC_LOCK_CHECK && spec_mode) || + (mod == _MAS_MODE_LOAD_OP_SPEC && spec_mode))) || + + ((chan == 1 || chan == 3) && + ((mod == MAS_MODE_LOAD_OP_CHECK && !spec_mode) || + (mod == MAS_MODE_LOAD_OP_UNLOCK && !spec_mode) || + (mod == MAS_MODE_LOAD_OP_LOCK_CHECK && spec_mode) || + (mod == MAS_MODE_FILL_OP && !spec_mode) || + (mod == MAS_MODE_LOAD_OP_SPEC_LOCK_CHECK && spec_mode) || + (mod == MAS_MODE_LOAD_OP_SPEC && spec_mode)))) { + return MAS_FILL_OPERATION; + } else { + printk("get_recovery_mas(): we do not know how to recover " + "protected access with MAS 0x%x\n", mas); + BUG(); + } + + return mas; +} + +static inline void calculate_wr_data(int fmt, int offset, + u64 *data, u8 *data_tag) +{ + u64 wr_data; + + /* Avoid undefined behavior when shifting more than argument size */ + if (offset == 0) { + wr_data = *data; + } else { + wr_data = (*data >> (offset * 8)) | + (*data << ((8 - offset) * 8)); + } + + *data = wr_data; + + switch (fmt & 0x7) { + case LDST_BYTE_FMT: + case LDST_HALF_FMT: + *data_tag = 0; + break; + case LDST_WORD_FMT: + if (offset == 0) + *data_tag &= 0x3; + else if (offset == 4) + *data_tag = ((*data_tag) >> 2); + break; + } +} + +static inline void calculate_qp_wr_data(int offset, + u64 *data, u8 *data_tag, u64 *data_ext, u8 *data_tag_ext) +{ + /* Avoid undefined behavior when shifting more than argument size */ + if (offset == 0) + return; + + u64 wr_data = (*data >> (offset * 8)) | + (*data_ext << ((8 - offset) * 8)); + u64 wr_data_ext = (*data_ext >> (offset * 8)) | + (*data << ((8 - offset) * 8)); + + *data = wr_data; + *data_ext = wr_data_ext; +} + +static void recovery_store_with_bytes(unsigned long address, + unsigned long address_hi, unsigned long address_hi_offset, + u64 data, u64 data_ext, ldst_rec_op_t st_rec_opc, int chan, + int length, int mask, int mask_ext) +{ + int byte; + + st_rec_opc.fmt = LDST_BYTE_FMT; + st_rec_opc.fmt_h = 0; + + for (byte = 0; byte < length; + byte++, address++, data >>= 8, mask >>= 1) { + if (address_hi_offset && byte == address_hi_offset) + address = address_hi; + + if (byte == 8) { + data = data_ext; + mask = mask_ext; + } + + if (mask & 1) { + recovery_faulted_tagged_store(address, data, 0, + AW(st_rec_opc), 0, 0, 0, chan, + 0 /* qp_store */, + 0 /* atomic_store */); + + } + } +} + +static enum exec_mmu_ret do_recovery_store(struct pt_regs *regs, + const trap_cellar_t *tcellar, const trap_cellar_t *next_tcellar, + e2k_addr_t address, e2k_addr_t address_hi_hva, + int fmt, int chan, unsigned long hva_page_offset) +{ + bool big_endian, qp_store, q_store, atomic_qp_store, atomic_q_store, + atomic_store, aligned_16 = IS_ALIGNED(address, 16); + int next_fmt, strd_fmt, offset = address & 0x7; + ldst_rec_op_t st_rec_opc, ld_rec_opc, st_opc_ext; + u64 data, data_ext, + mas = AS(tcellar->condition).mas, + root = AS(tcellar->condition).root; + u8 data_tag, data_ext_tag; +#ifdef CONFIG_ACCESS_CONTROL + e2k_upsr_t upsr_to_save; +#endif /* CONFIG_ACCESS_CONTROL */ + + if (DEBUG_EXEC_MMU_OP) { + u64 val; + u8 tag; + + load_value_and_tagd(&tcellar->data, &val, &tag); + DbgEXMMU("do_recovery_store: STRD store from trap cellar " + "the data 0x%016llx tag 0x%x address 0x%lx offset %d\n", + val, tag, address, offset); + } + + /* + * #74018 Do not execute store operation if rp_ret != 0 + */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (unlikely(regs->trap->flags & TRAP_RP_FLAG)) { + DbgEXMMU("do_recovery_store: rp_ret != 0\n"); + return EXEC_MMU_SUCCESS; + } +#endif + + big_endian = (mas & MAS_ENDIAN_MASK) && + ((mas & MAS_MOD_MASK) != MAS_MODE_STORE_MMU_AAU_SPEC) && + !root; + qp_store = (fmt == LDST_QP_FMT || fmt == TC_FMT_QPWORD_Q); + q_store = (fmt == LDST_QWORD_FMT || fmt == TC_FMT_QWORD_QP); + atomic_qp_store = (cpu_has(CPU_FEAT_ISET_V6) && aligned_16 && qp_store); + + /* SPILL RF without FX is done with LDST_QWORD_FMT but + * with next_tcellar->address == tcellar->address + 16. + * Do not try to repeat this atomically. */ + next_fmt = next_tcellar ? TC_COND_FMT_FULL(next_tcellar->condition) : -1; + atomic_q_store = (cpu_has(CPU_FEAT_ISET_V6) && aligned_16 && q_store && + chan == 1 && next_fmt == fmt && + (next_tcellar->address % 16) == 8); + + atomic_store = (atomic_q_store || atomic_qp_store); + + if (cpu_has(CPU_FEAT_ISET_V6) && q_store && (address % 16 == 8) && + chan == 3) { + /* + * Second part of an atomic quadro store which takes + * up 2 records in cellar, actual memory access has + * been done already when handling the first record. + * + * We only support atomic quadro stores since iset v6 + * because we have to distinguish between ldrd/strd + * with fmtr="qword" and real quadro stores. + */ + return EXEC_MMU_SUCCESS; + } + + ACCESS_CONTROL_DISABLE_AND_SAVE(upsr_to_save); + + /* + * Load data to store from trap cellar + */ + + AW(ld_rec_opc) = 0; + ld_rec_opc.prot = 1; + ld_rec_opc.mas = MAS_BYPASS_ALL_CACHES | MAS_FILL_OPERATION; + ld_rec_opc.fmt = LDST_QWORD_FMT; + ld_rec_opc.index = 0; + + recovery_faulted_load((e2k_addr_t)&tcellar->data, + &data, &data_tag, AW(ld_rec_opc), 0, + (tc_cond_t) {.word = 0}); + + if (atomic_q_store) { + recovery_faulted_load((e2k_addr_t) &next_tcellar->data, + &data_ext, &data_ext_tag, AW(ld_rec_opc), 0, + (tc_cond_t) {.word = 0}); + + /* This is aligned so offset == 0 */ + calculate_wr_data(fmt, offset, &data, &data_tag); + calculate_wr_data(fmt, offset, &data_ext, &data_ext_tag); + } else if (qp_store) { + recovery_faulted_load((e2k_addr_t)&tcellar->data_ext, + &data_ext, &data_ext_tag, AW(ld_rec_opc), 0, + (tc_cond_t) {.word = 0}); + + calculate_qp_wr_data(offset, &data, &data_tag, + &data_ext, &data_ext_tag); + } else { + calculate_wr_data(fmt, offset, &data, &data_tag); + } + + if (DEBUG_EXEC_MMU_OP) + pr_info("do_recovery_store: store(fmt 0x%x) chan = %d address = 0x%lx, data = 0x%llx tag = 0x%x tc_data = 0x%016llx\n", + fmt, chan, address, data, data_tag, data); + + /* + * Actually re-execute the store operation + */ + + AW(st_rec_opc) = 0; + /* Store as little endian. Do not clear the endianness bit + * unconditionally as it might mean something completely + * different depending on other bits in the trap cellar.*/ + st_rec_opc.mas = (big_endian) ? (mas & ~MAS_ENDIAN_MASK) : mas; + st_rec_opc.prot = !(AS(tcellar->condition).npsp); + if (fmt == TC_FMT_QPWORD_Q || fmt == TC_FMT_DWORD_Q) + strd_fmt = LDST_QWORD_FMT; + else if (fmt == TC_FMT_QWORD_QP || fmt == TC_FMT_DWORD_QP) + strd_fmt = LDST_QP_FMT; + else + strd_fmt = fmt & 0x7; + st_rec_opc.fmt = strd_fmt; + st_rec_opc.root = AS(tcellar->condition).root; + st_rec_opc.mask = tcellar->mask.mask_lo; + st_rec_opc.fmt_h = !!atomic_store; + + st_opc_ext = st_rec_opc; + st_opc_ext.mask = tcellar->mask.mask_hi; + st_opc_ext.index = 8; + + /* For big endian case should swap the two operations. */ + if (atomic_q_store && big_endian) { + swap(data, data_ext); + swap(data_tag, data_ext_tag); + swap(st_rec_opc, st_opc_ext); + } + + if (unlikely(hva_page_offset)) { + recovery_store_with_bytes(address, address_hi_hva, hva_page_offset, + data, data_ext, st_rec_opc, chan, + tc_cond_to_size(tcellar->condition), + tc_fmt_has_valid_mask(fmt) ? tcellar->mask.mask_lo : 0xff, + tc_fmt_has_valid_mask(fmt) ? tcellar->mask.mask_hi : 0xff); + } else if (IS_MACHINE_ES2 && AS(tcellar->condition).page_bound) { + /* + * 1) Page bound exception exists on e2c+ only. + * 2) bug 118398: handle unaligned qp store with masked out + * bytes landing in not existent page. + */ + + /* v2 only: page_bound exception */ + int length = min(8, 1 << (fmt - 1)); + + recovery_store_with_bytes(address, 0, 0, data, 0, + st_rec_opc, chan, length, 0xff, 0); + } else if (is_spurious_qp_store(true, address, + fmt, tcellar->mask, NULL)) { + /* Since v6: qp store with spurious fault, repeating the whole + * operation will generate another spurious fault so repeat + * each byte store separately. */ + recovery_store_with_bytes(address, 0, 0, data, data_ext, + st_rec_opc, chan, 16, st_rec_opc.mask, st_opc_ext.mask); + } else { + recovery_faulted_tagged_store(address, data, data_tag, + AW(st_rec_opc), data_ext, data_ext_tag, + AW(st_opc_ext), chan, qp_store, atomic_store); + } + + ACCESS_CONTROL_RESTORE(upsr_to_save); + + /* Make sure we finished recovery operations before reading flags */ + E2K_CMD_SEPARATOR; + + /* Nested exception appeared while do_recovery_store() */ + if (regs->flags.exec_mmu_op_nested) { + regs->flags.exec_mmu_op_nested = 0; + + if (fatal_signal_pending(current)) + return EXEC_MMU_STOP; + else + return EXEC_MMU_REPEAT; + } + + return EXEC_MMU_SUCCESS; +} + +/** + * calculate_recovery_load_parameters - calculate the stack address + * of the register where the load was done. + * @dst: trap cellar's "dst" field + * @greg_num_d: global register number + * @greg_recovery: was it a load to a global register? + * @rotatable_greg: was it a rotatable global register? + * @src_bgr: saved BGR if it was a rotatable global register + * @radr: address of a "normal" register + * + * This function calculates and sets @greg_num_d, @greg_recovery, + * @rotatable_greg, @src_bgr, @radr. + * + * Returns zero on success and value of type exec_mmu_ret on failure. + */ +static int calculate_recovery_load_parameters(struct pt_regs *regs, + tc_cond_t cond, unsigned *greg_num_d, bool *greg_recovery, + bool *rotatable_greg, e2k_bgr_t *src_bgr, u64 **radr) +{ + unsigned vr = AS(cond).vr; + unsigned vl = AS(cond).vl; + unsigned dst_addr = AS(cond).address; + + DbgTC("load request vr=%d\n", vr); + + /* + * Calculate register's address + */ + if (!vr && !vl) { + /* + * Destination register to load is NULL + * We should load the value from address into "air" + */ + *radr = NULL; + DbgEXMMU(" is NULL register\n"); + } else if (!vl) { + panic("Invalid destination: 0x%x : vl is 0 %s(%d)\n", + AS(cond).dst, __FILE__, __LINE__); + } else if (dst_addr >= E2K_MAXNR_d - E2K_MAXGR_d && + dst_addr < E2K_MAXNR_d) { + /* + * Destination register to load is global register + * We should only set the global register + * to value from
+ * + * WARNING: if kernel will use global registers then + * we should save all global registers in pt_regs + * structure, write value from
to the + * appropriate item in the pt_regs.gregs[greg_num_d] + */ + *greg_recovery = true; + *radr = (u64 *)(-1); + *greg_num_d = dst_addr - (E2K_MAXNR_d - E2K_MAXGR_d); + + if (*greg_num_d >= E2K_GB_START_REG_NO_d && + *greg_num_d < E2K_GB_START_REG_NO_d + + E2K_GB_REGS_NUM_d) { + /* + * The global register to recovery is from + * rotatable area. We should save current state + * of BGR register and set the register to + * initial state (as no any rotation), because + * is absolute # in register file + * and we can recovery only by absolute # of + * global register. + */ + *rotatable_greg = true; + *src_bgr = native_read_BGR_reg(); + init_BGR_reg(); + DbgEXMMU(" is global rotatable register: " + "rnum_d = 0x%x (dg%d) BGR 0x%x\n", + dst_addr, *greg_num_d, AWP(src_bgr)); + } else { + DbgEXMMU(" is global register: rnum_d = 0x%x " + "(dg%d)\n", + dst_addr, *greg_num_d); + } + } else if (dst_addr < E2K_MAXSR_d) { + /* it need calculate address of register */ + /* into register file frame */ + return -1; + } else { + panic("Invalid destination register %d in the trap " + "cellar %s(%d)\n", + dst_addr, __FILE__, __LINE__); + } + + return 0; +} + +/** + * calculate_recovery_load_to_rf_frame - calculate the stack address + * of the register into registers file frame where the load was done. + * @dst_addr: trap cellar's "dst" field + * @radr: address of a "normal" register + * @load_to_rf: load to rf should be done + * + * This function calculates and sets @radr. + * + * Returns zero on success and value of type exec_mmu_ret on failure. + */ +#define CHECK_PSHTP +static enum exec_mmu_ret calculate_recovery_load_to_rf_frame( + struct pt_regs *regs, tc_cond_t cond, + u64 **radr, bool *load_to_rf) +{ + unsigned dst_addr = AS(cond).address; + unsigned w_base_rnum_d; + u8 *ps_base = NULL; + unsigned rnum_offset_d; + e2k_psp_lo_t u_psp_lo; + e2k_psp_hi_t u_psp_hi; + unsigned long u_top; +#ifdef CHECK_PSHTP + register long lo_1, lo_2, hi_1, hi_2; + register long pshtp_tind_d = AS_STRUCT(regs->stacks.pshtp).tind / 8; + register long wd_base_d = AS_STRUCT(regs->wd).base / 8; + + if (!AS_STRUCT(regs->stacks.pshtp).tind) { + pr_err("%s(): PSHTP.tind is zero, PSHTP.ind is 0x%x\n", + __func__, regs->stacks.pshtp.PSHTP_ind); + return EXEC_MMU_SUCCESS; + } +#endif /* CHECK_PSHTP */ + + BUG_ON(!(dst_addr < E2K_MAXSR_d)); + + /* + * We can be sure that we search in right window, and we can be + * not afraid of nested calls, because we take as base registers + * that were saved when we entered in trap handler, these registers + * pointed to last window before interrupt. + * When we came to interrupt we have new window which is defined + * by WD (current window register) in double words which was saved + * in regs->wd and we use it: + * w_base_rnum_d = regs->wd; + * Window regs file (RF) is a ring buffer with size == E2K_MAXSR_d. + * So w_base_rnum_d can be > or < then num of destination register + * (dst_addr): + * + * w_base_rnum_d > dst_addr: + * + * RF 0<----| PREV-WD | TRAP WD |----------->E2K_MAXSR_d + * ^dst_addr + * ^w_base_rnum_d + * + * w_base_rnum_d < dst_addr: + * + * RF 0E2K_MAXSR_d + * ^dst_addr + * ^w_base_rnum_d + * + * We done E2K_FLUSHCPU and PREV WD is now in psp stack: + * --|-----------| PREV WD |-------------- + * ^psp.base ^psp.ind + * + * First address of first empty byte of psp stack is + * ps_base = base + ind; + */ + + ps_base = (u8 *)(AS(regs->stacks.psp_hi).ind + + AS(regs->stacks.psp_lo).base); + + /* + * w_base_rnum_d is address of double reg + * NR_REA_d(regs->wd, 0) is eguivalent to: + * w_base_rnum_d = AS_STRUCT(regs->wd).base / 8; + */ + w_base_rnum_d = NR_REA_d(regs->wd, 0); + + /* + * Offset from beginning spilled quad-NR for our + * dst_addr is + * rnum_offset_d. + * We define rnum_offset_d for dst_addr from ps_base + * in terms of double. + * Note. dst_addr is double too. + */ +#ifdef CHECK_PSHTP + if (wd_base_d >= pshtp_tind_d) { + lo_2 = wd_base_d - pshtp_tind_d; + hi_2 = wd_base_d - 1; + lo_1 = lo_2; + hi_1 = hi_2; + } else { + lo_1 = 0; + hi_1 = wd_base_d - 1; + lo_2 = wd_base_d + E2K_MAXSR_d - pshtp_tind_d; + hi_2 = E2K_MAXSR_d - 1; + } + + if (dst_addr >= lo_1 && dst_addr <= hi_1) { + rnum_offset_d = w_base_rnum_d - dst_addr; + } else if (dst_addr >= lo_2 && dst_addr <= hi_2) { + rnum_offset_d = w_base_rnum_d + E2K_MAXSR_d - dst_addr; + } else { + return EXEC_MMU_SUCCESS; + } +#else + rnum_offset_d = (w_base_rnum_d - dst_addr + E2K_MAXSR_d) % E2K_MAXSR_d; +#endif + /* + * Window boundaries are aligned at least to quad-NR. + * When windows spill then quad-NR is spilled as minimum. + * Also, extantion of regs is spilled too. + * So, each spilled quad-NR take 2*quad-NR size == 32 bytes + * So, bytes offset for our rnum_offset_d is + * (rnum_offset_d + 1) / 2) * 32 + * if it was uneven number we should add size of double: + * (rnum_offset_d % 2) * 8 + * starting from ISET V5 we should add size of quadro. + */ + *radr = (u64 *) (ps_base - ((rnum_offset_d + 1) / 2) * 32); + if (rnum_offset_d % 2) + *radr += ((machine.native_iset_ver < E2K_ISET_V5) ? 1 : 2); + DbgEXMMU(" is window " + "register: rnum_d = 0x%x offset 0x%x, " + "PS base 0x%px WD base = 0x%x, radr = 0x%px\n", + dst_addr, rnum_offset_d, ps_base, w_base_rnum_d, *radr); + + if (((unsigned long) *radr < AS(regs->stacks.psp_lo).base) || + ((unsigned long) *radr >= (u64)ps_base)) { + /* + * The load operation out of current + * register window frame (for example this + * load is placed in one long instruction with + * return. The load operationb should be ignored + */ + DbgEXMMU(" address of register window points " + "out of current procedure stack frame " + "0x%px >= 0x%px, load operation will be " + "ignored\n", + radr, ps_base); + return EXEC_MMU_SUCCESS; + } + + u_psp_lo = regs->stacks.psp_lo; + u_psp_hi = regs->stacks.psp_hi; + AS(u_psp_hi).ind -= GET_PSHTP_MEM_INDEX(regs->stacks.pshtp); + u_top = AS(u_psp_lo).base + AS(u_psp_hi).ind; + + /* + * Check if target register has been SPILLed to kernel + */ + if ((unsigned long) *radr < PAGE_OFFSET && + (unsigned long) *radr >= u_top) { + *radr = (u64 *) (((unsigned long) *radr - u_top) + + AS(current_thread_info()->k_psp_lo).base); + } + + *load_to_rf = true; + return 0; +} + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +static int is_MLT_mas(ldst_rec_op_t opcode) +{ + if (!opcode.root) + return 0; + + if ((int)machine.native_iset_ver >= ELBRUS_2S_ISET) { + unsigned int mas = opcode.mas; + + if (mas == MAS_LOAD_SEC_TRAP_ON_STORE || + mas == MAS_LOAD_SEC_TRAP_ON_LD_ST) + return 1; + } else { + unsigned int mod = (opcode.mas & MAS_MOD_MASK) >> MAS_MOD_SHIFT; + + if (mod == _MAS_MODE_LOAD_OP_TRAP_ON_STORE || + mod == _MAS_MODE_LOAD_OP_TRAP_ON_LD) + return 1; + } + + return 0; +} +#endif + +static void recovery_load_with_bytes(unsigned long address, + unsigned long address_hi, unsigned long address_hi_offset, + unsigned long reg_address, unsigned long reg_address_hi, + int vr, ldst_rec_op_t ld_rec_opc, int chan, int length, + tc_cond_t cond) +{ + int byte; + u32 first_time; + + ld_rec_opc.fmt = LDST_BYTE_FMT; + ld_rec_opc.fmt_h = 0; + + for (byte = 0; byte < length; byte++, address++, reg_address++) { + if (address_hi_offset && byte == address_hi_offset) + address = address_hi; + first_time = (byte == 0) ? 1 : 0; + if (byte == 8) + reg_address = reg_address_hi; + if (vr || byte >= 4) { + recovery_faulted_move(address, reg_address, 0, + 1 /* vr */, AW(ld_rec_opc), chan, + 0 /* qp_load */, 0 /* atomic_load */, + first_time /* is it first move? */, + cond); + } + } +} + +static void debug_print_recovery_load(unsigned long address, int fmt, + unsigned long radr, int chan, unsigned greg_recovery, + unsigned greg_num_d, ldst_rec_op_t ld_rec_opc) +{ + u64 val; + u8 tag = 0; +#ifdef CONFIG_ACCESS_CONTROL + e2k_upsr_t upsr_to_save; +#endif + + if (DEBUG_EXEC_MMU_OP) { + ACCESS_CONTROL_DISABLE_AND_SAVE(upsr_to_save); + if (!radr) { + recovery_faulted_load(address, &val, + &tag, AW(ld_rec_opc), 2, + (tc_cond_t) {.word = 0}); + } else if (greg_recovery) { + E2K_GET_DGREG_VAL_AND_TAG(greg_num_d, val, tag); + } else { + load_value_and_tagd((void *) radr, &val, &tag); + } + ACCESS_CONTROL_RESTORE(upsr_to_save); + + DbgEXMMU("do_recovery_load: load(fmt 0x%x) chan = %d " + "address = 0x%lx, %s = %d, rdata = 0x%llx tag = 0x%x\n", + fmt, chan, address, (greg_recovery) ? "greg" : "radr", + greg_num_d, val, tag); + } +} + +static enum exec_mmu_ret do_recovery_load(struct pt_regs *regs, + trap_cellar_t *tcellar, trap_cellar_t *next_tcellar, int zeroing, + unsigned long address, unsigned long address_hi_hva, + unsigned long radr, int fmt, int chan, unsigned greg_recovery, + unsigned greg_num_d, e2k_addr_t *adr, unsigned long hva_page_offset) +{ + ldst_rec_op_t ld_rec_opc; + unsigned vr = AS(tcellar->condition).vr; + int next_fmt, ldrd_fmt; +#ifdef CONFIG_ACCESS_CONTROL + e2k_upsr_t upsr_to_save; +#endif + bool aligned_16 = IS_ALIGNED(address, 16), q_load, qp_load, + atomic_qp_load, atomic_q_load, atomic_load; + + qp_load = (fmt == LDST_QP_FMT || fmt == TC_FMT_QPWORD_Q); + q_load = (fmt == LDST_QWORD_FMT || fmt == TC_FMT_QWORD_QP); + atomic_qp_load = (cpu_has(CPU_FEAT_ISET_V6) && aligned_16 && qp_load); + + /* FILL RF without FX is done with LDST_QWORD_FMT but + * with next_tcellar->address == tcellar->address + 16. + * Do not try to repeat this atomically. */ + next_fmt = next_tcellar ? TC_COND_FMT_FULL(next_tcellar->condition) : -1; + atomic_q_load = (cpu_has(CPU_FEAT_ISET_V6) && aligned_16 && q_load && + (chan == 0 || chan == 2) && next_fmt == fmt && + (next_tcellar->address % 16) == 8); + + atomic_load = (atomic_q_load || atomic_qp_load); + + if (DEBUG_EXEC_MMU_OP && radr) { + u64 val; + u8 tag; + + if (greg_recovery) { + E2K_GET_DGREG_VAL_AND_TAG(greg_num_d, val, tag); + } else { + load_value_and_tagd((void *) radr, + &val, &tag); + } + + DbgEXMMU("load from register file background register value 0x%llx tag 0x%x\n", + val, tag); + } + + /* + * #74018 Do not execute load operation if rp_ret != 0 + */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (unlikely(regs->trap->flags & TRAP_RP_FLAG)) { + DbgEXMMU("do_recovery_load: rp_ret != 0\n"); + return EXEC_MMU_SUCCESS; + } +#endif + + if (zeroing) { + if (!greg_recovery && radr) + store_tagged_dword((void *) radr, 0ULL, 0); + + return EXEC_MMU_SUCCESS; + } + + if (cpu_has(CPU_FEAT_ISET_V6) && q_load && (address % 16 == 8) && + (chan == 1 || chan == 3)) { + /* + * Second part of an atomic quadro load which takes + * up 2 records in cellar, actual memory access has + * been done already when handling the first record. + * + * We only support atomic quadro loads since iset v6 + * because we have to distinguish between ldrd/strd + * with fmtr="qword" and real quadro loads. + */ + return EXEC_MMU_SUCCESS; + } + + /* BUG 79642: ignore AS(tcellar->condition).empt field */ + AW(ld_rec_opc) = 0; + ld_rec_opc.mas = get_recovery_mas(tcellar->condition, fmt); + ld_rec_opc.prot = !(AS(tcellar->condition).npsp); + ld_rec_opc.root = AS(tcellar->condition).root; + if (fmt == TC_FMT_QPWORD_Q || fmt == TC_FMT_DWORD_Q) + ldrd_fmt = LDST_QWORD_FMT; + else if (fmt == TC_FMT_QWORD_QP || fmt == TC_FMT_DWORD_QP) + ldrd_fmt = LDST_QP_FMT; + else + ldrd_fmt = fmt & 0x7; + ld_rec_opc.fmt = ldrd_fmt; + ld_rec_opc.fmt_h = !!atomic_load; + + ACCESS_CONTROL_DISABLE_AND_SAVE(upsr_to_save); + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (is_MLT_mas(ld_rec_opc)) { + struct thread_info *ti = current_thread_info(); + u64 cr0_hi = AS_WORD(regs->crs.cr0_hi); + + WARN_ON(cr0_hi < ti->rp_start || cr0_hi >= ti->rp_end); + regs->trap->flags |= TRAP_RP_FLAG; + } +#endif + +#ifdef CONFIG_PROTECTED_MODE + if (adr) + *adr = radr; +#endif + + if (!greg_recovery) { + /* Load to %r/%b register - move data + * to register location in memory */ + unsigned long reg_address, reg_address_hi; + u64 fake_reg[2] __aligned(16); + + reg_address = radr ?: (unsigned long) fake_reg; + + if (!cpu_has(CPU_FEAT_QPREG) || qp_load) + reg_address_hi = reg_address + 8; + else + reg_address_hi = reg_address + 16; + + if (likely(!hva_page_offset)) { + recovery_faulted_move(address, reg_address, + reg_address_hi, vr, AW(ld_rec_opc), + chan, qp_load, atomic_load, 1, + tcellar->condition); + } else { + recovery_load_with_bytes(address, address_hi_hva, + hva_page_offset, reg_address, + reg_address_hi, vr, ld_rec_opc, chan, + tc_cond_to_size(tcellar->condition), + tcellar->condition); + } + } else { + /* Load to %g register */ + u64 *saved_greg_lo = NULL, *saved_greg_hi = NULL; + + if (KERNEL_GREGS_MASK != 0 && + (KERNEL_GREGS_MASK & (1UL << greg_num_d))) { + saved_greg_lo = current_thread_info()->k_gregs.g[ + greg_num_d - KERNEL_GREGS_PAIRS_START].xreg; + } else if (is_guest_kernel_gregs(current_thread_info(), + greg_num_d, &saved_greg_lo)) { + BUG_ON(saved_greg_lo == NULL); + } else { + saved_greg_lo = NULL; + } + if (saved_greg_lo) { + if (!atomic_q_load) + saved_greg_hi = &saved_greg_lo[1]; + else + saved_greg_hi = &saved_greg_lo[2]; + } + if (likely(!hva_page_offset)) { + recovery_faulted_load_to_greg(address, greg_num_d, vr, + AW(ld_rec_opc), chan, qp_load, + atomic_load, saved_greg_lo, + saved_greg_hi, tcellar->condition); + } else { + u64 tmp[2] __aligned(16); + recovery_load_with_bytes(address, address_hi_hva, hva_page_offset, + (unsigned long) (saved_greg_lo ?: &tmp[0]), + (unsigned long) (saved_greg_hi ?: &tmp[1]), + vr, ld_rec_opc, chan, + tc_cond_to_size(tcellar->condition), + tcellar->condition); + if (!saved_greg_lo) { + recovery_faulted_load_to_greg( + (unsigned long) tmp, + greg_num_d, vr, + AW(ld_rec_opc), chan, qp_load, + atomic_load, NULL, NULL, + tcellar->condition); + } + } + } + + ACCESS_CONTROL_RESTORE(upsr_to_save); + + debug_print_recovery_load(address, fmt, radr, chan, greg_recovery, + greg_num_d, ld_rec_opc); + + /* Make sure we finished recovery operations before reading flags */ + E2K_CMD_SEPARATOR; + + /* Nested exception appeared while do_recovery_load() */ + if (regs->flags.exec_mmu_op_nested) { + regs->flags.exec_mmu_op_nested = 0; + + if (fatal_signal_pending(current)) + return EXEC_MMU_STOP; + else + return EXEC_MMU_REPEAT; + } + + return EXEC_MMU_SUCCESS; +} + +static inline bool +check_spill_fill_recovery(tc_cond_t cond, e2k_addr_t address, bool s_f, + struct pt_regs *regs) +{ + bool store; + + store = AS(cond).store; + if (unlikely(AS(cond).s_f || s_f)) { + e2k_addr_t stack_base; + e2k_size_t stack_ind; + + /* + * Not completed SPILL operation should be completed here + * by data store + * Not completed FILL operation replaced by restore of saved + * filling data in trap handler + */ + + DbgEXMMU("completion of %s %s operation\n", + (AS(cond).sru) ? "PCS" : "PS", + (store) ? "SPILL" : "FILL"); + if (AS(cond).sru) { + stack_base = regs->stacks.pcsp_lo.PCSP_lo_base; + stack_ind = regs->stacks.pcsp_hi.PCSP_hi_ind; + } else { + stack_base = regs->stacks.psp_lo.PSP_lo_base; + stack_ind = regs->stacks.psp_hi.PSP_hi_ind; + } + if (address < stack_base || address >= stack_base + stack_ind) { + printk("%s(): invalid hardware stack addr 0x%lx < " + "stack base 0x%lx or >= current stack " + "offset 0x%lx\n", + __func__, address, stack_base, + stack_base + stack_ind); + BUG(); + } + if (!store && !AS(cond).sru) { + printk("execute_mmu_operations(): not completed PS FILL operation detected in TC (only PCS FILL operation can be dropped to TC)\n"); + BUG(); + } + return true; + } + return false; +} + +static enum exec_mmu_ret convert_pv_gva_to_hva(unsigned long *address_hva_p, + unsigned long address, size_t size, const struct pt_regs *regs) +{ + void *address_hva = guest_ptr_to_host((void *) address, size, regs); + + if (unlikely(IS_ERR(address_hva))) { + pr_err("%s(): could not convert page fault addr 0x%lx " + "to recovery format, error %ld\n", + __func__, address, PTR_ERR(address_hva)); + if (PTR_ERR(address_hva) == -EAGAIN) + return EXEC_MMU_REPEAT; + else + return EXEC_MMU_STOP; + } + + *address_hva_p = (unsigned long) address_hva; + + return EXEC_MMU_SUCCESS; +} + +enum exec_mmu_ret execute_mmu_operations(trap_cellar_t *tcellar, + trap_cellar_t *next_tcellar, struct pt_regs *regs, + int zeroing, e2k_addr_t *adr, + bool (*is_spill_fill_recovery)(tc_cond_t cond, + e2k_addr_t address, bool s_f, + struct pt_regs *regs), + enum exec_mmu_ret (*calculate_rf_frame)(struct pt_regs *regs, + tc_cond_t cond, u64 **radr, + bool *load_to_rf)) +{ + unsigned long flags, hva_page_offset = 0; + tc_cond_t cond = tcellar->condition; + e2k_addr_t address = tcellar->address, address_hi; + int chan, store, fmt, ret; + bool is_s_f; + + DbgEXMMU("started\n"); + DebugPtR(regs); + +#ifdef CONFIG_PROTECTED_MODE + /* + * for multithreading of protected mode + * (It needs to know address of register in chain stack + * to change SAP to AP for other threads) + */ + if (adr) + *adr = 0; +#endif /* CONFIG_PROTECTED_MODE */ + + regs->flags.exec_mmu_op = 1; + + fmt = TC_COND_FMT_FULL(cond); + BUG_ON(fmt == 6 || fmt == 0 || fmt > 7 && fmt < 0xd || fmt == 0xe || + fmt >= 0x10 && fmt < 0x14 || fmt >= 0x16 && fmt <= 0x1e || + fmt >= 0x20); + + /* + * If ld/st hits to page boundary page, page fault can occur on first + * or on second page. If page fault occurs on second page, we need to + * correct addr. In this case addr points to the end of touched area. + */ + if (AS(cond).num_align) { + if (fmt != LDST_QP_FMT && fmt != TC_FMT_QPWORD_Q) + address -= 8; + else + address -= 16; + } + + store = AS(cond).store; + + /* + * 1) In some case faulted address should be converted to some other + * one to enable recovery on the current MMU context. For example, + * the source paravirtualized guest faulted address should be converted + * to host user address mapped to: gva <-> hva + * 2) Guest user's loads and stores also can land on a page boundary + * and cause a page fault on guest's page table. After fixing the + * page table a hypercall is invoked to repeat the operation, and + * it is possible that hypercall will have to access not adjacent + * HVA pages. We could support this in hypercalls, but reusing + * code from 1) above is simpler (does not require duplicating + * functionality). + */ + if (host_test_intc_emul_mode(regs) && !(tcellar->flags & TC_IS_HVA_FLAG) || + IS_ENABLED(CONFIG_KVM_GUEST_KERNEL)) { + unsigned long address_lo_hva; + int size = tc_cond_to_size(cond); + int size_lo = min(PAGE_SIZE - offset_in_page(address), size); + ret = convert_pv_gva_to_hva(&address_lo_hva, address, size_lo, regs); + if (ret != EXEC_MMU_SUCCESS) + return ret; + + /* + * Check if ls/st really hits at page boundary. If guest ld/st hits + * at page boundary, gva may point to non-contigious area on the host + * side. So we need to split operation into two steps: + * 1. execute ld/st of low part of tcellar->data to the 1st page + * 2. execute ld/st of high part of tcellar->data to the 2nd page + */ + if (pf_on_page_boundary(address, cond) && + !is_spurious_qp_store(store, address, fmt, tcellar->mask, NULL)) { + unsigned long address_hi_hva; + ret = convert_pv_gva_to_hva(&address_hi_hva, + PAGE_ALIGN(address), size - size_lo, regs); + if (ret != EXEC_MMU_SUCCESS) + return ret; + + address_hi = address_hi_hva; + hva_page_offset = size_lo; + } + + address = address_lo_hva; + } + + if (likely(is_spill_fill_recovery == NULL)) { + is_s_f = check_spill_fill_recovery(cond, tcellar->address, + IS_SPILL(tcellar[0]), regs); + } else { + is_s_f = is_spill_fill_recovery(cond, address, + IS_SPILL(tcellar[0]), regs); + } + if (is_s_f) + store = 1; + + chan = AS(cond).chan; + BUG_ON((unsigned int) chan > 3 || store && !(chan & 1)); + + + raw_all_irq_save(flags); + if (store) { + /* + * Here performs dropped store operation, opcode.fmt contains + * size of data that must be stored, address it's address where + * data must be stored, data is data ;-) + */ + ret = do_recovery_store(regs, tcellar, next_tcellar, address, + address_hi, fmt, chan, hva_page_offset); + } else { + /* + * Here we perform a load operation which is more difficult + * than store, we know only the register's number in interrupted + * frame, so we need to SPILL register file to memory and then + * find the needed register in it; only then perform operation. + */ + unsigned greg_num_d = -1; + bool greg_recovery = false; + bool rotatable_greg = false; + bool load_to_rf = false; + u64 *radr; + e2k_bgr_t src_bgr; + + ret = calculate_recovery_load_parameters(regs, cond, + &greg_num_d, &greg_recovery, + &rotatable_greg, &src_bgr, &radr); + if (ret < 0) { + if (likely(calculate_rf_frame == NULL)) { + ret = calculate_recovery_load_to_rf_frame(regs, + cond, &radr, &load_to_rf); + } else { + ret = calculate_rf_frame(regs, + cond, &radr, &load_to_rf); + } + } + + if (!ret) { + if (load_to_rf) + COPY_STACKS_TO_MEMORY(); + ret = do_recovery_load(regs, tcellar, next_tcellar, + zeroing, address, address_hi, + (unsigned long) radr, fmt, chan, + greg_recovery, greg_num_d, + adr, hva_page_offset); + + /* + * Restore BGR register to recover rotatable state + */ + if (rotatable_greg) + write_BGR_reg(src_bgr); + } + } + + raw_all_irq_restore(flags); + + regs->flags.exec_mmu_op = 0; + regs->flags.exec_mmu_op_nested = 0; + + return ret; +} diff --git a/arch/e2k/mm/hugetlbpage.c b/arch/e2k/mm/hugetlbpage.c new file mode 100644 index 000000000000..b97b08baf017 --- /dev/null +++ b/arch/e2k/mm/hugetlbpage.c @@ -0,0 +1,182 @@ +/* + * E2K Huge TLB page support. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#undef DEBUG_HUGETLB_MODE +#undef DebugHP +#define DEBUG_HUGETLB_MODE 0 /* Huge pages */ +#define DebugHP(...) DebugPrint(DEBUG_HUGETLB_MODE ,##__VA_ARGS__) + +pte_t * +huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (pud == NULL) { + return NULL; + } + pmd = pmd_alloc(mm, pud, addr); + if (pmd == NULL) { + return NULL; + } + pte = (pte_t *)pmd; + + /* + * Large page pte should point to the first of two pmd's. + */ + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) { + if (pte && pmd_index(addr) % 2) + pte--; + } + + BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); + + return pte; +} + +pte_t * +huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) + return NULL; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return NULL; + pmd = pmd_offset(pud, addr); + pte = (pte_t *)pmd; + + /* + * Large page pte should point to the first of two pmd's. + */ + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) { + if (pte && pmd_index(addr) % 2) + pte--; + } + + return pte; +} + +void +set_huge_pte_at(struct mm_struct *mm, unsigned long address, + pte_t *ptep, pte_t entry) +{ + /* + * In this case virtual page occupied two sequential entries in + * page table on 2-th level (PMD). + * All two pte's (pmd's) should be set to identical entries. + */ + DebugHP("will set pte 0x%px = 0x%lx\n", + ptep, pte_val(entry)); + set_pte_at(mm, address, ptep, entry); + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) + set_pte_at(mm, address, (++ptep), entry); +} + +pte_t +huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_t entry = *ptep; + huge_pte_clear(mm, addr, ptep, + 0 /* unused now */); + return entry; +} + +/* Update this if adding upport for ARCH_ENABLE_HUGEPAGE_MIGRATION (see x86) */ +int pmd_huge(pmd_t pmd) +{ + return user_pmd_huge(pmd); +} + +int pud_huge(pud_t pud) +{ + BUG_ON(user_pud_huge(pud)); /* not implemented for user */ + return 0; +} + +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long begin, end; + unsigned long is_protected = TASK_IS_PROTECTED(current); + unsigned long is_32bit = (current->thread.flags & E2K_FLAG_32BIT) && + !is_protected; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (!test_ts_flag(TS_KERNEL_SYSCALL) && + (addr >= USER_HW_STACKS_BASE || + addr + len >= USER_HW_STACKS_BASE)) + return -ENOMEM; + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + begin = (addr) ?: mm->mmap_base; + if (!test_ts_flag(TS_KERNEL_SYSCALL)) { + if (is_32bit || is_protected && (flags & MAP_FIRST32)) + end = TASK32_SIZE; + else + end = TASK_SIZE; + end = min(end, USER_HW_STACKS_BASE); + } else { + end = TASK_SIZE; + } + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (TASK_IS_BINCO(current) && ADDR_IN_SS(addr)) { + end = min(end, SS_ADDR_END); + /* Lower mremap() address for binary compiler + * must be >= ss_rmp_bottom */ + if (current_thread_info()->ss_rmp_bottom > addr) + begin = current_thread_info()->ss_rmp_bottom; + } +#endif + + info.flags = 0; + info.length = len; + info.low_limit = begin; + info.high_limit = end; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + + return vm_unmapped_area(&info); +} +#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ + diff --git a/arch/e2k/mm/init.c b/arch/e2k/mm/init.c new file mode 100644 index 000000000000..4590e368fc99 --- /dev/null +++ b/arch/e2k/mm/init.c @@ -0,0 +1,902 @@ +/* $Id: init.c,v 1.55 2009/11/11 08:17:56 thay_k Exp $ + * arch/e2k/mm/init.c + * + * Memory menegement initialization + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_INIT_MODE +#undef DebugB +#define DEBUG_INIT_MODE 0 /* Boot paging init */ +#define DebugB(...) DebugPrint(DEBUG_INIT_MODE ,##__VA_ARGS__) + +#undef DEBUG_MEMMAP_INIT_MODE +#undef DebugMI +#define DEBUG_MEMMAP_INIT_MODE 0 /* memory mapping init */ +#define DebugMI(...) DebugPrint(DEBUG_MEMMAP_INIT_MODE ,##__VA_ARGS__) + +#undef DEBUG_ZONE_SIZE_MODE +#undef DebugZS +#define DEBUG_ZONE_SIZE_MODE 0 /* zone size calculation */ +#define DebugZS(...) DebugPrint(DEBUG_ZONE_SIZE_MODE ,##__VA_ARGS__) + +#undef DEBUG_DISCONTIG_MODE +#undef DebugDM +#define DEBUG_DISCONTIG_MODE 0 /* discontig. memory */ +#define DebugDM(...) DebugPrint(DEBUG_DISCONTIG_MODE ,##__VA_ARGS__) + +#undef DEBUG_PAGE_VALID_MODE +#undef DebugPV +#define DEBUG_PAGE_VALID_MODE 0 /* checking: is page valid */ +#define DebugPV(...) DebugPrint(DEBUG_PAGE_VALID_MODE ,##__VA_ARGS__) + +#undef DEBUG_PAGE_VALID_ERR_MODE +#undef DebugPVE +#define DEBUG_PAGE_VALID_ERR_MODE 0 /* checking: is page valid */ +#define DebugPVE(...) DebugPrint(DEBUG_PAGE_VALID_ERR_MODE ,##__VA_ARGS__) + +#undef DEBUG_NUMA_MODE +#undef DebugNUMA +#define DEBUG_NUMA_MODE 0 /* NUMA supporting */ +#define DebugNUMA(...) DebugPrint(DEBUG_NUMA_MODE ,##__VA_ARGS__) + +#ifdef CONFIG_NEED_MULTIPLE_NODES +pg_data_t *node_data[MAX_NUMNODES]; +EXPORT_SYMBOL(node_data); +#endif +static e2k_size_t __read_mostly last_valid_pfn; + +struct page __read_mostly *zeroed_page = NULL; +/* for ext4 fs */ +EXPORT_SYMBOL(zeroed_page); + +u64 __read_mostly zero_page_nid_to_pfn[MAX_NUMNODES] = { + [0 ... MAX_NUMNODES-1] = 0 +}; +struct page __read_mostly *zero_page_nid_to_page[MAX_NUMNODES] = { + [0 ... MAX_NUMNODES-1] = 0 +}; + +int mem_init_done = 0; +static int init_bootmem_done = 0; + +/* This is only called until mem_init is done. */ +static void __init *node_early_get_page(int node) +{ + void *p; + + if (init_bootmem_done) { + p = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); + } else { + BOOT_BUG("is not implemented for boot-time mode"); + } + return p; +} +void __init *node_early_get_zeroed_page(int nid) +{ + void *p = node_early_get_page(nid); + + if (p == NULL) + return p; + clear_page(p); + return p; +} + +static void __init nodes_up(void) +{ + unsigned long node_mask = 0x1; + int node; + + nodes_clear(node_online_map); + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + if (phys_nodes_map & node_mask) { + node_set_online(node); + } else { + node_mask <<= 1; + continue; /* node not online */ + } + + node_mask <<= 1; + } + + if (phys_nodes_num != num_online_nodes()) + INIT_BUG("Number of online nodes %d is not the same as set %d", + num_online_nodes(), phys_nodes_num); +} + +static void notrace __init +bootmem_init(void) +{ + int cur_nodes_num = 0; + int node; + + nodes_up(); + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + node_phys_mem_t *node_mem = &boot_phys_mem[node]; + boot_phys_bank_t *phys_bank; + boot_phys_bank_t *node_banks; + e2k_addr_t end_pfn; + int bank; + + if (cur_nodes_num >= phys_mem_nodes_num) + break; /* no more nodes with memory */ + + if (!node_mem->pfns_num) + continue; /* node has not memory */ + + end_pfn = node_mem->start_pfn + node_mem->pfns_num; + if (end_pfn > max_low_pfn) { + max_pfn = end_pfn; + max_low_pfn = end_pfn; + } + + node_banks = node_mem->banks; + cur_nodes_num++; + + for (bank = node_mem->first_bank; bank >= 0; + bank = phys_bank->next) { + e2k_addr_t start_addr; + e2k_size_t size; + + phys_bank = &node_banks[bank]; + if (!phys_bank->pages_num) + /* bank in the list has not pages */ + INIT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + + start_addr = phys_bank->base_addr; + size = phys_bank->pages_num * PAGE_SIZE; + + if (memblock_add_node(start_addr, size, node)) + INIT_BUG("Couldn't add node %d.", node); + + if (memblock_reserve(start_addr, size)) + INIT_BUG("Couldn't reserve node %d memory.", + node); + } + } +} + +#ifdef CONFIG_NEED_MULTIPLE_NODES +static void __init allocate_node_datas(void) +{ + int nid; + + for_each_online_node(nid) { + void *vaddr; + + vaddr = memblock_alloc_try_nid(sizeof(struct pglist_data), + SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), + MEMBLOCK_ALLOC_ACCESSIBLE, nid); + if (!vaddr) + INIT_BUG("Cannot allocate pglist_data for node %d\n", + nid); + + NODE_DATA(nid) = vaddr; + memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); + } +} +#endif + +/* + * Order the physical memory areas in the bank on increase of addresses + */ +static void __init init_check_order_bank_areas(int node_id, + boot_phys_bank_t *phys_bank) +{ + e2k_busy_mem_t *busy_areai = NULL; + e2k_busy_mem_t *busy_areaj = NULL; + e2k_size_t start_page; + e2k_size_t end_page; + int i; + int j; + + DebugB("boot_order_bank_areas() started\n"); + for (i = phys_bank->first_area; i >= 0; i = busy_areai->next) { + busy_areai = __va(&phys_bank->busy_areas[i]); + if (busy_areai->pages_num == 0) { + INIT_BUG("Node #%d empty physical memory busy area #%d " + "cannot be in the list", + node_id, i); + continue; + } + start_page = busy_areai->start_page; + end_page = start_page + busy_areai->pages_num; + DebugB("Node #%d the reserved area #%d from page 0x%lx " + "to 0x%lx should be ordered\n", + node_id, i, + phys_bank->base_addr + (start_page << PAGE_SHIFT), + phys_bank->base_addr + (end_page << PAGE_SHIFT)); + for (j = busy_areai->next; j >= 0; j = busy_areaj->next) { + busy_areaj = __va(&phys_bank->busy_areas[j]); + if (busy_areaj->pages_num == 0) { + INIT_BUG("Node #%d empty physical memory busy " + "area #%d cannot be in the list", + node_id, j); + continue; + } + if (start_page < busy_areaj->start_page) { + if (end_page > busy_areaj->start_page) { + INIT_BUG("The area #%d end page 0x%lx " + "> start page 0x%lx of " + "area #%d", + i, end_page, + busy_areaj->start_page, j); + } + continue; + } + if (start_page < busy_areaj->start_page + + busy_areaj->pages_num) { + INIT_BUG("The area #%d start page 0x%lx < end " + "page 0x%lx of area #%d", + i, start_page, + busy_areaj->start_page + + busy_areaj->pages_num, + j); + } + INIT_BUG("The reserved area #%d with start page " + "0x%lx should be exchanged with area #%d " + "with start page 0x%lx, sequence error\n", + i, start_page, + j, busy_areaj->start_page); +#ifdef CORRECT_SEQUENCE_ERROR + busy_areai->start_page = busy_areaj->start_page; + busy_areai->pages_num = busy_areaj->pages_num; + busy_areaj->start_page = start_page; + busy_areaj->pages_num = end_page - start_page; + start_page = busy_areai->start_page; + end_page = start_page + busy_areai->pages_num; +#endif /* CORRECT_SEQUENCE_ERROR */ + } + } +} + +static void __init register_free_bootmem(void) +{ + e2k_busy_mem_t *busy_area = NULL; + e2k_size_t size; + e2k_addr_t start_addr = -1; + e2k_size_t start_page; + long pages_num; + int nodes_num; + int cur_nodes_num = 0; + int node = 0; + int bank; + int area; + + nodes_num = phys_mem_nodes_num; + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + node_phys_mem_t *node_mem = &boot_phys_mem[node]; + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + continue; /* node has not memory */ + node_banks = node_mem->banks; + cur_nodes_num++; + for (bank = node_mem->first_bank; + bank >= 0; + bank = phys_bank->next) { + phys_bank = &node_banks[bank]; + + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + INIT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + } + + if (phys_bank->busy_areas_num == 0) { + /* + * The bank is fully free + */ + start_addr = phys_bank->base_addr; + size = phys_bank->pages_num * PAGE_SIZE; + memblock_free(start_addr, size); + DebugB("Node #%d bank #%d register free memory " + "from 0x%lx to 0x%lx\n", + node, bank, + start_addr, start_addr + size); + continue; + } + + /* + * Scan list of all busy areas of physical memory bank + * and collect the holes of contiguous free pages. + */ + start_page = 0; + start_addr = phys_bank->base_addr; + init_check_order_bank_areas(node, phys_bank); + for (area = phys_bank->first_area; + area >= 0; + area = busy_area->next) { + busy_area = __va(&phys_bank->busy_areas[area]); + if (busy_area->pages_num == 0) { + INIT_BUG("Node #%d bank #%d empty " + "physical memory busy area #%d " + "cannot be in the list", + node, bank, area); + continue; + } + if (busy_area->flags & + BOOT_RESERVED_TO_FREE_PHYS_MEM) + /* the area was reserved to free */ + /* it now */ + continue; + + pages_num = busy_area->start_page - start_page; + size = pages_num * PAGE_SIZE; + if (size != 0) { + memblock_free(start_addr, size); + DebugB("Node #%d bank #%d register " + "free memory from 0x%lx " + "to 0x%lx\n", + node, bank, + start_addr, start_addr + size); + } + start_page = busy_area->start_page + + busy_area->pages_num; + start_addr = phys_bank->base_addr + + start_page * PAGE_SIZE; + } + if (start_page < phys_bank->pages_num) { + pages_num = phys_bank->pages_num - start_page; + size = pages_num * PAGE_SIZE; + memblock_free(start_addr, size); + DebugB("Node #%d bank #%d register free " + "memory from 0x%lx to 0x%lx\n", + node, bank, + start_addr, start_addr + size); + } + + memblock_free((phys_addr_t)phys_bank->busy_areas, + BOOT_RESERVED_AREAS_SIZE); + DebugB("Node #%d bank #%d register free memory from 0x%lx to 0x%lx\n", + node, bank, phys_bank->busy_areas, + phys_bank->busy_areas + + BOOT_RESERVED_AREAS_SIZE); + } + } +} + +/* + * Initialize the boot-time allocator and register the available physical + * memory. + */ +static void __init notrace +setup_memory(void) +{ + /* + * Initialize the boot-time allocator. + */ + bootmem_init(); + + /* + * Register the available free physical memory with the + * allocator. + */ + register_free_bootmem(); + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_end > initrd_start) { + initrd_start = (long) phys_to_virt(initrd_start); + initrd_end = (long) phys_to_virt(initrd_end); + } +#endif + + memblock_set_current_limit(end_of_phys_memory); + +#ifdef CONFIG_NEED_MULTIPLE_NODES + allocate_node_datas(); +#endif + + init_bootmem_done = 1; + last_valid_pfn = end_of_phys_memory >> PAGE_SHIFT; +} + +static void __init +mark_memory_present(void) +{ + e2k_phys_bank_t *node_phys_banks; + e2k_phys_bank_t *phys_bank; + unsigned long bank_start_pfn, bank_end_pfn; + unsigned int nid = 0; + int bank; + + for_each_online_node(nid) { + node_phys_banks = nodes_phys_mem[nid].banks; + + for (bank = nodes_phys_mem[nid].first_bank; + bank >= 0; + bank = phys_bank->next) { + phys_bank = &node_phys_banks[bank]; + if (phys_bank->pages_num == 0) + break; /* no more memory on node */ + bank_start_pfn = phys_bank->base_addr >> PAGE_SHIFT; + bank_end_pfn = bank_start_pfn + phys_bank->pages_num; + + memory_present(nid, bank_start_pfn, bank_end_pfn); + } + } +} + +static void __init +zone_sizes_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long max_dma_pfn; + +#if defined(CONFIG_ZONE_DMA32) || defined(CONFIG_HIGHMEM) + BUG(); +#endif + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + + max_dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; + + max_zone_pfns[ZONE_DMA] = min(max_dma_pfn, max_low_pfn); + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + + free_area_init_nodes(max_zone_pfns); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, + struct mhp_restrictions *restrictions) +{ + BUG(); + return 0; +} + +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) +{ + BUG(); +} + +#endif + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#ifdef CONFIG_MEMORY_HOTPLUG +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) +{ + BUG(); +} +#endif + +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + int ret; + + BUILD_BUG_ON(VMEMMAP_END > KERNEL_VPTB_BASE_ADDR || + VMEMMAP_END > NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS); + + ret = vmemmap_populate_basepages(start, end, node); + if (ret) { + pr_err("%s(): could not populate sparse memory VMEMMAP " + "from 0x%lx to 0x%lx, error %d\n", + __func__, start, end, ret); + return ret; + } + ret = all_other_nodes_map_vm_area(numa_node_id(), start, end - start); + if (ret) { + pr_err("%s(): node #%d could not populate on other nodes " + "sparse memory VMEMMAP from 0x%lx to 0x%lx, error %d\n", + __func__, numa_node_id(), start, end, ret); + } + return ret; +} +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + +/* + * CONFIG_SPARSEMEM_VMEMMAP has a drawback: it works only in big chunks of + * memory called "sections" with section size defined by SECTION_SIZE_BITS. + * It cannot be set low otherwise the memory usage by the sections array + * would be too high. + * + * As a result, VGA area [0xa0000-0xc0000] is reported as valid by pfn_valid() + * and attempted to be saved and restored by hibernation code. Work around + * this by explicitly marking all non RAM areas as nosave. + * + * If pfn_valid() gets rid of legacy stuff like section_early() then this + * workaround will probably become unnecessary. + */ +static int __init mark_nonram_nosave(void) +{ + unsigned long spfn, epfn, prev = 0; + int i; + + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; + } + return 0; +} + +/* + * Setup the page tables + */ +void __init notrace paging_init(void) +{ + int node; + + /* + * Setup the boot-time allocator. + */ + DebugB("Start setup of boot-time allocator\n"); + setup_memory(); + + create_protection_map(protection_map); + + mark_memory_present(); + sparse_init(); + + zone_sizes_init(); + + mark_nonram_nosave(); + + for_each_node_has_dup_kernel(node) { + unsigned long addr = (unsigned long) empty_zero_page; + pgd_t *pgd = node_pgd_offset_kernel(node, addr); + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pr_info("node%d kernel phys base: 0x%lx\n", + node, +#ifdef CONFIG_NUMA + node_kernel_phys_base[node]); +#else + kernel_phys_base); +#endif + + /* + * Protect the zero page from writing + */ + if (WARN_ON(pgd_none_or_clear_bad(pgd))) { + pr_warning("zero_page: pgd_none returned 1\n"); + continue; + } + if (kernel_pgd_huge(*pgd)) { + /* We cannot protect ZERO_PAGE from writing + * if it is mapped as part of a huge page. */ + pr_warning("WARNING zero_page is mapped with huge page " + "on node %d\n", + node); + continue; + } + pud = pud_offset(pgd, addr); + if (WARN_ON(pud_none_or_clear_bad(pud))) { + pr_warning("zero_page: pud_none returned 1\n"); + continue; + } + if (kernel_pud_huge(*pud)) { + /* We cannot protect ZERO_PAGE from writing + * if it is mapped as part of a huge page. */ + pr_warning("WARNING zero_page is mapped with huge page " + "on node %d\n", + node); + continue; + } + pmd = pmd_offset(pud, addr); + if (WARN_ON(pmd_none_or_clear_bad(pmd))) { + pr_warning("zero_page: pmd_none returned 1\n"); + continue; + } + if (WARN_ON(kernel_pmd_huge(*pmd))) { + /* We cannot protect ZERO_PAGE from writing + * if it is mapped as part of a huge page. */ + pr_warning("WARNING zero_page is mapped with huge page " + "on node %d\n", + node); + continue; + } + pte = pte_offset_kernel(pmd, addr); + + if (WARN_ON(pte_none(*pte) || !pte_present(*pte))) { + pr_warning("zero_page: pte_none returned 1\n"); + continue; + } + + native_set_pte(pte, pte_wrprotect(*pte), false); + + /* + * Initialize the list of zero pages + */ + zero_page_nid_to_pfn[node] = pte_pfn(*pte); + zero_page_nid_to_page[node] = pte_page(*pte); + + /* + * zeroing the zero-page + */ + fast_tagged_memory_set( + (void *) __va(_PAGE_PFN_TO_PADDR(pte_val(*pte))), + 0, CLEAR_MEMORY_TAG, sizeof(empty_zero_page), + LDST_DWORD_FMT << LDST_REC_OPC_FMT_SHIFT); + } + + flush_TLB_page((unsigned long) empty_zero_page, E2K_KERNEL_CONTEXT); + + zeroed_page = phys_to_page(vpa_to_pa(kernel_va_to_pa(empty_zero_page))); +} + +void __init notrace mem_init(void) +{ + e2k_size_t total_pages_num = 0; + e2k_size_t valid_pages_num = 0; + e2k_size_t invalid_pages_num = 0; + + high_memory = __va(last_valid_pfn << PAGE_SHIFT); + + memblock_free_all(); + + set_secondary_space_MMU_state(); + + if (IS_CPU_ISET_V6()) { + if (MMU_IS_PT_V6()) { + pr_info("MMU: Page Table entries new format V6 " + "is used\n"); + } else { + pr_info("MMU: Page Table entries old legacy format " + "is used\n"); + } + } else { + pr_info("MMU: Page Table entries old format V1 is used\n"); + } + if (MMU_IS_SEPARATE_PT()) { + pr_info("MMU: Separate Page Tables for kernel and users\n"); + pr_info("Kernel page table virt base: 0x%lx\n", + MMU_SEPARATE_KERNEL_VPTB); + pr_info("User page table virt base: 0x%lx\n", + MMU_SEPARATE_USER_VPTB); + } else { + pr_info("MMU: United Page Tables for kernel and user\n"); + pr_info("kernel and users page table virt base: 0x%lx\n", + MMU_UNITED_KERNEL_VPTB); + } + pr_info("kernel virt base: 0x%lx, kernel virt end: 0x%lx\n", + KERNEL_BASE, KERNEL_END); + pr_info("Kernel virt base: %016lx, last valid phaddr: %016lx\n", + PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); + + total_pages_num = (end_of_phys_memory >> PAGE_SHIFT) - + (start_of_phys_memory >> PAGE_SHIFT); + valid_pages_num = pages_of_phys_memory; + invalid_pages_num = total_pages_num - valid_pages_num; + + pr_info("Memory total mapped pages number 0x%lx : valid 0x%lx, invalid 0x%lx\n", + total_pages_num, valid_pages_num, invalid_pages_num); + + mem_init_done = 1; + mem_init_print_info(NULL); +} + +void mark_rodata_ro(void) +{ + unsigned long size = __end_ro_after_init - __start_ro_after_init; + + set_memory_ro((unsigned long)__start_ro_after_init, + size >> PAGE_SHIFT); + + pr_info("Write protected read-only-after-init data: %luk\n", + size >> 10); +} + +/* The call to BOOT_TRACEPOINT and get_lt_timer is valid since it is done + * in the beginning, before .init section is freed. */ +__ref +void free_initmem(void) +{ +#if !defined(CONFIG_RECOVERY) && !defined(CONFIG_E2K_KEXEC) + e2k_addr_t addr; + e2k_addr_t stack_start; + e2k_size_t stack_size; + e2k_size_t pages; + int cpuid; +#endif + +#ifdef CONFIG_BOOT_TRACE + BOOT_TRACEPOINT("Boot trace finished"); + stop_boot_trace(); +#endif + + if (cpu_has(CPU_HWBUG_E8C_WATCHDOG)) { + get_lt_timer(); + writel(WD_EVENT, <_regs->wd_control); + writel(WD_SET_COUNTER_VAL(0), <_regs->wd_limit); + } + + free_reserved_area(__init_text_begin, __init_text_end, -1, "init text"); + free_reserved_area(__init_data_begin, __init_data_end, -1, "init data"); + +#if !defined(CONFIG_RECOVERY) && !defined(CONFIG_E2K_KEXEC) + /* + * Free boot-time hardware & sofware stacks to boot kernel + */ + for_each_online_cpu(cpuid) { + stack_start = kernel_boot_stack_virt_base(cpuid); + stack_size = kernel_boot_stack_size(cpuid); + pages = free_reserved_area((void *)stack_start, + (void *)(stack_start + stack_size), + -1, NULL); + pr_info("Freeing CPU%d boot-time data stack: %ldK (%lx - %lx)\n", + cpuid, (pages * E2K_KERNEL_US_PAGE_SIZE) >> 10, + stack_start, stack_start + stack_size); + + stack_start = kernel_boot_ps_virt_base(cpuid); + stack_size = kernel_boot_ps_size(cpuid); + pages = free_reserved_area((void *)stack_start, + (void *)(stack_start + stack_size), + -1, NULL); + pr_info("Freeing CPU%d boot-time procedure stack: %ldK (%lx - %lx)\n", + cpuid, (pages * E2K_KERNEL_PS_PAGE_SIZE) >> 10, + stack_start, stack_start + stack_size); + + stack_start = kernel_boot_pcs_virt_base(cpuid); + stack_size = kernel_boot_pcs_size(cpuid); + pages = free_reserved_area((void *)stack_start, + (void *)(stack_start + stack_size), + -1, NULL); + pr_info("Freeing CPU%d boot-time chain stack: %ldK (%lx - %lx)\n", + cpuid, (pages * E2K_KERNEL_PCS_PAGE_SIZE) >> 10, + stack_start, stack_start + stack_size); + } +#endif /* ! (CONFIG_RECOVERY) */ + +#ifdef CONFIG_DBG_CHAIN_STACK_PROC + if (kernel_symtab != NULL) { + printk("The kernel symbols table addr 0x%px size 0x%lx " + "(0x%lx ... 0x%lx)\n", + kernel_symtab, kernel_symtab_size, + ((long *)kernel_symtab)[0], + ((long *)kernel_symtab)[kernel_symtab_size / + sizeof (long) - 1]); + } + if (kernel_strtab != NULL) { + printk("The kernel strings table addr 0x%px size 0x%lx " + "(0x%lx ... 0x%lx)\n", + kernel_strtab, kernel_strtab_size, + ((long *)kernel_strtab)[0], + ((long *)kernel_strtab)[kernel_strtab_size / + sizeof (long) - 1]); + } +#endif /* CONFIG_DBG_CHAIN_STACK_PROC */ + + ide_info(USING_DMA); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + +/* Nothing to make 'free'. Init RD is now included in bootinfo data. */ +/* free_initmem() now clears the reservation flags for the bootinfo pages. */ + +#if 0 + if (start < end) + printk("Freeing initrd memory: %ldk freed\n", + (end - start) >> 10); + + for (; start < end; start += PAGE_SIZE) { + struct page *p = virt_to_page(start); + + free_reserved_page(p); + } + +#endif + +} +#endif + +void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn) +{ + unsigned long real_start, real_end, end_pfn = start_pfn + size; + struct memblock_region *r; + + /* + * Calculate real (actually existing) start and end pfn + */ + real_start = end_pfn; + real_end = start_pfn; + for_each_memblock(memory, r) { + unsigned long region_base = memblock_region_memory_base_pfn(r); + unsigned long region_end = memblock_region_memory_end_pfn(r); + + if (r->nid != nid) + continue; + + if (region_base < real_start && region_end > start_pfn) + real_start = max(region_base, start_pfn); + if (region_end > real_end && region_base < end_pfn) + real_end = min(region_end, end_pfn); + + if (real_start == start_pfn && real_end == end_pfn) + break; + } + + if (real_start >= real_end) { + pr_info(" (Node %d: pfn range [0x%lx-0x%lx] reduced to nothing)\n", + nid, start_pfn, end_pfn); + return; + } + + /* + * Remove hole from the beginning of zone. Cannot remove + * hole from the end, otherwise the check for `zone_end' + * in defer_init() won't work and we lose the optmization + * of startup time by deferring the memory init. So we + * check and remove hole at the end only if: + * - either it is much bigger then the area size, + * - or the deferring won't work anyway (see the check for + * pgdat_end_pfn() in defer_init()). + */ + if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)) || + (real_end - real_start) < (end_pfn - real_end) >> 6) { + pr_info(" (Node %d: pfn range [0x%lx-0x%lx] reduced to [0x%lx-0x%lx])\n", + nid, start_pfn, end_pfn, real_start, real_end); + memmap_init_zone(real_end - real_start, nid, zone, real_start, + MEMINIT_EARLY, NULL); + } else { + pr_info(" (Node %d: pfn range [0x%lx-0x%lx] reduced to [0x%lx-0x%lx], real end 0x%lx)\n", + nid, start_pfn, end_pfn, real_start, end_pfn, real_end); + memmap_init_zone(end_pfn - real_start, nid, zone, real_start, + MEMINIT_EARLY, NULL); + } +} + +/* + * System memory should not be in /proc/iomem but various tools expect it + * (eg kdump). + */ +static int __init add_system_ram_resources(void) +{ + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + struct resource *res; + unsigned long base = reg->base; + unsigned long size = reg->size; + + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + WARN_ON(!res); + + if (res) { + res->name = "System RAM"; + res->start = base; + res->end = base + size - 1; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + WARN_ON(request_resource(&iomem_resource, res) < 0); + } + } + + return 0; +} +subsys_initcall(add_system_ram_resources); diff --git a/arch/e2k/mm/ioremap.c b/arch/e2k/mm/ioremap.c new file mode 100644 index 000000000000..2b6284a80d6a --- /dev/null +++ b/arch/e2k/mm/ioremap.c @@ -0,0 +1,202 @@ +/* + * Same as arch/i386/mm/ioremap.c Special thanks to Linus Torvalds. + * + * Re-map IO memory to kernel address space so that we can access it. + * This is needed for high PCI addresses that aren't mapped in the + * 640k-1MB IO memory area + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#undef DebugIO +#undef DEBUG_IO_REMAP_MODE +#define DEBUG_IO_REMAP_MODE 0 /* Remap IO memory */ +#define DebugIO(...) DebugPrint(DEBUG_IO_REMAP_MODE ,##__VA_ARGS__) + +int no_writecombine; +EXPORT_SYMBOL(no_writecombine); + +int __init no_writecombine_setup(char *str) +{ + no_writecombine = 1; + return 1; +} +__setup("no_writecombine", no_writecombine_setup); + +static int remap_area_pages(unsigned long start, + unsigned long phys_addr, unsigned long size, + pte_mem_type_t memory_type) +{ + unsigned long end = start + size; + pgprot_t prot; + + DebugIO("started for addr 0x%lx, size 0x%lx, phys addr 0x%lx, memory type %d\n", + start, size, phys_addr, memory_type); + BUG_ON(start >= end); + if (unlikely(no_writecombine)) + memory_type = EXT_CONFIG_MT; + prot = __pgprot(_PAGE_SET_MEM_TYPE(_PAGE_IO_MAP_BASE, memory_type)); + + return ioremap_page_range(start, end, phys_addr, prot); +} + +/* + * Generic mapping function (not visible outside): + * + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high IO (PCI) + * addresses directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +static void __iomem *__ioremap_caller(resource_size_t phys_addr, + unsigned long size, pte_mem_type_t memory_type, void *caller) +{ + struct vm_struct * area; + unsigned long offset, last_addr, vaddr; + struct page *page; + + DebugIO("started for phys addr 0x%llx, size 0x%lx, memory type %d\n", + phys_addr, size, memory_type); + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * Don't remap the low PCI/ISA area, it's always mapped.. + */ + if (phys_addr >= VGA_VRAM_PHYS_BASE && + last_addr < (VGA_VRAM_PHYS_BASE + VGA_VRAM_SIZE)) { + DebugIO("VGA VRAM phys. area, it's always mapped\n"); + return phys_to_virt(phys_addr); + } + + /* + * Don't allow anybody to remap normal RAM that we're using.. + */ + if (phys_addr_valid(phys_addr)) { + for (page = virt_to_page(__va(phys_addr)); + page <= virt_to_page(__va(last_addr)); page++) { + if (!PageReserved(page)) { + WARN_ONCE(1, "phys. area at %pa - %pa is not reserved and can not be remapped\n", + &phys_addr, &last_addr); + return NULL; + } + } + } + + /* + * Why would we need ioremap() that early in the boot process? + */ + BUG_ON(!mem_init_done); + + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr+1) - phys_addr; + + /* + * Ok, go for it.. + */ + area = get_vm_area_caller(size, VM_IOREMAP, caller); + if (!area) + return NULL; + area->phys_addr = phys_addr; + vaddr = (unsigned long) area->addr; + DebugIO("get_vm_area() returned area from addr 0x%lx\n", vaddr); + + if (remap_area_pages(vaddr, phys_addr, size, memory_type)) { + free_vm_area(area); + return NULL; + } + + DebugIO("returns IO area from virt addr 0x%lx\n", vaddr + offset); + return (void *) (vaddr + offset); +} + +void __iomem *ioremap_nocache(resource_size_t address, unsigned long size) +{ + return __ioremap_caller(address, size, EXT_NON_PREFETCH_MT, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_nocache); + +void __iomem *ioremap_cache(resource_size_t address, unsigned long size) +{ + /* So, this is fragile. Driver *can* map its device memory + * with ioremap_cache() and later call set_memory_{wc/uc}() + * on it, in which case we must first set memory type to + * "GnC" (and it will work as expected) and then to "XnP/XP". + * + * But at the point when set_memory_{wc/uc} is called pte has + * no information that it belongs to a device so we can not + * possibly know that External memory type is needed. + * + * Fix this by using software bit in PTE to track memory type + * ("External" vs "General" distinction) essentially creating + * a new purely software type: EXT_CACHE_MT. */ + return __ioremap_caller(address, size, EXT_CACHE_MT, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); + +void __iomem *ioremap_wc(resource_size_t address, unsigned long size) +{ + return __ioremap_caller(address, size, EXT_PREFETCH_MT, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_wc); + +void iounmap(volatile void __iomem *addr) +{ + DebugIO("started for virtual addr 0x%px\n", addr); + + /* + * Don't unmap the VGA area, it's always mapped.. + */ + if (addr >= phys_to_virt(VGA_VRAM_PHYS_BASE) && + addr < phys_to_virt(VGA_VRAM_PHYS_BASE + VGA_VRAM_SIZE)) { + DebugIO("VGA VRAM phys. area, it's always mapped\n"); + return; + } + + addr = (volatile void __iomem *) + (PAGE_MASK & (unsigned long __force) addr); + vunmap((void __force *) addr); +} +EXPORT_SYMBOL(iounmap); + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +int arch_ioremap_pud_supported(void) +{ + return 0; +} + +int arch_ioremap_pmd_supported(void) +{ + return !IS_MACHINE_ES2; +} + +int arch_ioremap_p4d_supported(void) +{ + return 0; +} +#endif diff --git a/arch/e2k/mm/memory.c b/arch/e2k/mm/memory.c new file mode 100644 index 000000000000..c904f9c7a776 --- /dev/null +++ b/arch/e2k/mm/memory.c @@ -0,0 +1,710 @@ +/* + * arch/e2k/mm/memory.c + * + * Memory management utilities + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#undef DEBUG_PT_MODE +#undef DebugPT +#define DEBUG_PT_MODE 0 /* Page table */ +#define DebugPT(...) DebugPrint(DEBUG_PT_MODE, ##__VA_ARGS__) + +#undef DEBUG_PTE_MODE +#undef DebugPTE +#define DEBUG_PTE_MODE 0 /* Page table */ +#define DebugPTE(...) DebugPrint(DEBUG_PTE_MODE, ##__VA_ARGS__) + +#undef DEBUG_PMD_MODE +#undef DebugPMD +#define DEBUG_PMD_MODE 0 /* Page table */ +#define DebugPMD(...) DebugPrint(DEBUG_PMD_MODE, ##__VA_ARGS__) + +#undef DEBUG_PTD_MODE +#undef DebugPTD +#define DEBUG_PTD_MODE 0 /* Page table */ +#define DebugPTD(...) DebugPrint(DEBUG_PTD_MODE, ##__VA_ARGS__) + +#undef DEBUG_NUMA_MODE +#undef DebugNUMA +#define DEBUG_NUMA_MODE 0 /* NUMA supporting */ +#define DebugNUMA(...) DebugPrint(DEBUG_NUMA_MODE, ##__VA_ARGS__) + +void print_va_tlb(e2k_addr_t addr, int large_page) +{ + tlb_line_state_t tlb; + tlb_set_state_t *set; + int set_no; + + get_va_tlb_state(&tlb, addr, large_page); + + for (set_no = 0; set_no < NATIVE_TLB_SETS_NUM; set_no++) { + tlb_tag_t tlb_tag; + pte_t tlb_entry; + + set = &tlb.sets[set_no]; + tlb_tag = set->tlb_tag; + tlb_entry = set->tlb_entry; + printk("TLB addr 0x%lx : set #%d tag 0x%016lx entry " + "0x%016lx\n", + addr, set_no, tlb_tag_val(tlb_tag), pte_val(tlb_entry)); + } +} + +pte_t *node_pte_alloc_kernel(int nid, pmd_t *pmd, e2k_addr_t address) +{ + struct mm_struct *mm = &init_mm; + pte_t *new; + struct page *page; + + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + return NULL; + new = page_address(page); + + spin_lock(&mm->page_table_lock); + if (!pmd_present(*pmd)) { + pmd_populate_kernel(mm, pmd, new); + new = NULL; + } + spin_unlock(&mm->page_table_lock); + if (new) + __free_page(page); + return pte_offset_kernel(pmd, address); +} + +pmd_t *node_pmd_alloc_kernel(int nid, pud_t *pud, e2k_addr_t address) +{ + struct mm_struct *mm = &init_mm; + pmd_t *new; + struct page *page; + + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + return NULL; + new = page_address(page); + + spin_lock(&mm->page_table_lock); + if (!pud_present(*pud)) { + pud_populate_kernel(mm, pud, new); + new = NULL; + } + spin_unlock(&mm->page_table_lock); + if (new) + __free_page(page); + return pmd_offset(pud, address); +} + +pud_t *node_pud_alloc_kernel(int nid, pgd_t *pgd, e2k_addr_t address) +{ + struct mm_struct *mm = &init_mm; + pud_t *new; + struct page *page; + + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + return NULL; + new = page_address(page); + + spin_lock(&mm->page_table_lock); + if (!pgd_present(*pgd)) { + node_pgd_populate_kernel(nid, mm, pgd, new); + new = NULL; + } + spin_unlock(&mm->page_table_lock); + if (new) + __free_page(page); + return pud_offset(pgd, address); +} + +#ifdef CONFIG_NUMA +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +pgd_t * +node_pgd_offset_kernel(int nid, e2k_addr_t virt_addr) +{ + int node_cpu; + + if (!MMU_IS_SEPARATE_PT()) { + node_cpu = node_to_first_present_cpu(nid); + return the_cpu_pg_dir(node_cpu) + pgd_index(virt_addr); + } else { + return the_node_kernel_root_pt(nid) + pgd_index(virt_addr); + } +} + +/* + * Set pgd entry at root PTs of all CPUs of the node + */ +static void node_all_cpus_pgd_set_k(int the_node, int pgd_index, pgd_t pgd) +{ + pgd_t *pgdp; + cpumask_t node_cpus; + int cpu; + + node_cpus = node_to_present_cpumask(the_node); + DebugNUMA("node #%d online cpu mask 0x%lx pgd[0x%x] = 0x%lx\n", + the_node, node_cpus.bits[0], pgd_index, pgd_val(pgd)); + + for_each_cpu_of_node(the_node, cpu, node_cpus) { + pgdp = the_cpu_pg_dir(cpu); + DebugNUMA("set the node #%d CPU #%d pgd " + "entry 0x%px == 0x%lx\n", + the_node, cpu, &pgdp[pgd_index], pgd_val(pgd)); + if (!pgd_none(pgdp[pgd_index])) { + pr_err("node_pgd_set_k() pgd %px is not empty 0x%lx\n", + &pgdp[pgd_index], pgd_val(pgdp[pgd_index])); + BUG(); + } + pgdp[pgd_index] = pgd; + } +} + +/* + * Set specified kernel pgd entry to point to next-level page table PUD + * Need populate the pgd entry into follow root page tables: + * - all CPUs of the specified node; + * - all CPUs of other nodes which have not own copy of kernel image + * (DUP KERNEL) and use duplicated kernel of this node + */ +void node_pgd_set_k(int the_node, pgd_t *the_pgdp, pud_t *pudp) +{ + pgd_t pgd = mk_pgd_phys_k(pudp); + int pgd_index = pgd_to_index(the_pgdp); + int dup_node; + nodemask_t node_mask; + int node; + + if (!THERE_IS_DUP_KERNEL) { + kernel_root_pt[pgd_index] = pgd; + DebugNUMA("set kernel root PT pgd " + "entry 0x%px to pud 0x%px\n", + &kernel_root_pt[pgd_index], pudp); + return; + } + + DebugNUMA("node #%d pgd %px == 0x%lx, pudp at %px\n", + the_node, the_pgdp, pgd_val(*the_pgdp), pudp); + + dup_node = node_dup_kernel_nid(the_node); + if (dup_node == the_node) { + if (MMU_IS_SEPARATE_PT()) { + the_node_kernel_root_pt(the_node)[pgd_index] = pgd; + } else { + /* Set pgd entry at root PTs of all CPUs of the node */ + node_all_cpus_pgd_set_k(the_node, pgd_index, pgd); + } + } + + if (DUP_KERNEL_NUM >= phys_nodes_num) { + DebugNUMA("all %d nodes have duplicated " + "kernel so own root PT\n", + DUP_KERNEL_NUM); + return; + } + + /* + * Root and PTs of the node is on other node + * Set pgd entry at root PTs of all CPUs of other node on which + * this node has duplicated kernel + */ + if (dup_node != the_node) { + if (MMU_IS_SEPARATE_PT()) { + the_node_kernel_root_pt(dup_node)[pgd_index] = pgd; + } else { + node_all_cpus_pgd_set_k(dup_node, pgd_index, pgd); + } + } + + /* + * Set pgd entry at root PTs of all CPUs of all other nodes, + * which has duplicated kernel on the same node as this node + */ + for_each_node_has_not_dup_kernel(node, node_mask) { + if (node == the_node) + continue; + if (node_dup_kernel_nid(node) != dup_node) + continue; + if (MMU_IS_SEPARATE_PT()) { + the_node_kernel_root_pt(node)[pgd_index] = pgd; + } else { + node_all_cpus_pgd_set_k(node, pgd_index, pgd); + } + } +} +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +pgd_t * +node_pgd_offset_kernel(int nid, e2k_addr_t virt_addr) +{ + BUG_ON(!MMU_IS_SEPARATE_PT()); + return the_node_kernel_root_pt(nid) + pgd_index(virt_addr); +} + +/* + * Set specified kernel pgd entry to point to next-level page table PUD + * Need populate the pgd entry into follow root page tables: + * - PT of the specified node, if the node has duplicated kernel; + * - PT of node on which the node is duplicated + * - PTs of all other nodes which have not own copy of kernel image + * (DUP KERNEL) and use duplicated kernel of this node or + * are duplicated on the same node as this node + */ +void node_pgd_set_k(int the_node, pgd_t *the_pgdp, pud_t *pudp) +{ + pgd_t pgd = mk_pgd_phys_k(pudp); + int pgd_index = pgd_to_index(the_pgdp); + int dup_node; + nodemask_t node_mask; + int node; + + BUG_ON(!MMU_IS_SEPARATE_PT()); + + if (!THERE_IS_DUP_KERNEL) { + kernel_root_pt[pgd_index] = pgd; + DebugNUMA("set kernel root PT pgd entry 0x%px to pud 0x%px\n", + &kernel_root_pt[pgd_index], pudp); + return; + } + + DebugNUMA("node #%d pgd %px == 0x%lx, pudp at %px\n", + the_node, the_pgdp, pgd_val(*the_pgdp), pudp); + + dup_node = node_dup_kernel_nid(the_node); + if (dup_node == the_node) { + the_node_kernel_root_pt(the_node)[pgd_index] = pgd; + } + + if (DUP_KERNEL_NUM >= phys_nodes_num) { + DebugNUMA("all %d nodes have duplicated kernel so own PT\n", + DUP_KERNEL_NUM); + return; + } + + /* + * Root and PTs of the node is on other node + * Set pgd entry at root PTs on other node on which + * this node has duplicated kernel + */ + if (dup_node != the_node) { + the_node_kernel_root_pt(dup_node)[pgd_index] = pgd; + } + + /* + * Set pgd entry at root PTs of all CPUs of all other nodes, + * which has duplicated kernel on the same node as this node + */ + for_each_node_has_not_dup_kernel(node, node_mask) { + if (node == the_node) + continue; + if (node_dup_kernel_nid(node) != dup_node) + continue; + the_node_kernel_root_pt(node)[pgd_index] = pgd; + } +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#endif /* CONFIG_NUMA */ + +/* + * Simplistic page force to be valid + */ + +static int e2k_make_single_pmd_valid(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long address, unsigned long next, + int set_invalid, int hpage) +{ + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + pmd_t pmdv; + + DebugPMD("started from 0x%lx for pmd 0x%lx to %s\n", + address, pmd, (set_invalid) ? "invalidate" : "validate"); + + if (hpage) { + pte_t *huge_pte = (pte_t *) pmd; + + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) { + if (huge_pte && pmd_index(address) % 2) + huge_pte--; + } + + ptl = huge_pte_lockptr(hstate_vma(vma), mm, huge_pte); + } else { + ptl = pmd_lockptr(mm, pmd); + } + + spin_lock(ptl); + + pmdv = *pmd; + + /* Test pmd again under spinlock */ + if (!hpage && !pmd_trans_huge(pmdv) && + (!pmd_none(pmdv) || next - address != PMD_SIZE)) { + spin_unlock(ptl); + return -EAGAIN; + } + + DebugPMD("sets pmd 0x%px to 0x%lx for address 0x%lx\n", + pmd, pmd_val(*pmd), address); + + /* + * We just set _PAGE_VALID. Do not use vm_page_prot to make sure + * that huge_pte_none()/pmd_none() still returns true for this pte. + */ + if (set_invalid) + pmdv = __pmd(_PAGE_CLEAR_VALID(pmd_val(pmdv))); + else + pmdv = __pmd(_PAGE_SET_VALID(pmd_val(pmdv))); + + validate_pmd_at(mm, address, pmd, pmdv); + + spin_unlock(ptl); + + return 0; +} + +static int e2k_make_pte_pages_valid(struct mm_struct *mm, pmd_t *pmd, + e2k_addr_t start_addr, e2k_addr_t end, int set_invalid) +{ + e2k_addr_t address = start_addr; + spinlock_t *ptl; + pte_t *pte, *orig_pte; + + DebugPTE("started from 0x%lx to 0x%lx to %s\n", start_addr, end, + (set_invalid) ? "invalidate" : "validate"); + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + orig_pte = pte; + +#pragma loop count (512) +#pragma unroll (4) + do { + pte_t ptev = *pte; + + DebugPTE("sets pte 0x%px to 0x%lx for address 0x%lx\n", + pte, pte_val(ptev), address); + +#if defined CONFIG_PARAVIRT_GUEST || defined CONFIG_KVM_GUEST_KERNEL + /* probably pte can be already validated/invalidated, + * for example, data stack for sys_execve() is validated + * from setup_arg_pages() -> expand_stack() + * stack validation is completed at get_user_main_c_stack() + * + * It is important to avoid such double validation and + * invalidation in host's shadow page tables. */ + if ((!set_invalid) && pte_valid(*pte)) { + DebugPTE("pte 0x%px == 0x%lx is already valid\n", + pte, pte_val(*pte)); + continue; + } + if (set_invalid && (!pte_valid(*pte))) { + DebugPTE("pte 0x%px == 0x%lx is already invalid\n", + pte, pte_val(*pte)); + continue; + } +#endif + + /* + * We change _PAGE_VALID only here. Do not use + * vm_page_prot to make sure that pte_none() + * still returns true for this pte. + */ + if (set_invalid) + ptev = __pte(_PAGE_CLEAR_VALID(pte_val(ptev))); + else + ptev = __pte(_PAGE_SET_VALID(pte_val(ptev))); + + validate_pte_at(mm, address, pte, ptev); + } while (pte++, address += PAGE_SIZE, (address < end)); + + pte_unmap_unlock(orig_pte, ptl); + + DebugPTE("finished OK\n"); + return 0; +} + +static int +e2k_make_pmd_pages_valid(struct vm_area_struct *vma, pud_t *pud, + e2k_addr_t start_addr, e2k_addr_t end) +{ + unsigned long address = start_addr, next; + pmd_t *pmd = pmd_offset(pud, address); + int set_invalid, ret = 0, hpage = is_vm_hugetlb_page(vma); + struct mm_struct *mm = vma->vm_mm; + + DebugPTD("started from 0x%lx to 0x%lx\n", start_addr, end); + + set_invalid = !_PAGE_TEST_VALID(pgprot_val(vma->vm_page_prot)); + + do { + next = pmd_addr_end(address, end); + +again: + if (!hpage && !pmd_none_or_trans_huge_or_clear_bad(pmd)) { + /* + * pmd is stable and there is the next level + */ + ret = e2k_make_pte_pages_valid(mm, pmd, address, + next, set_invalid); + if (ret != 0) + return ret; + + continue; + } + + if (hpage || pmd_trans_huge(*pmd) || (pmd_none(*pmd) && + next - address == PMD_SIZE)) { + /* + * Set/clear the valid bit on the whole pmd + */ + ret = e2k_make_single_pmd_valid(vma, pmd, address, next, + set_invalid, hpage); + if (ret == -EAGAIN) + goto again; + if (ret) + return ret; + + continue; + } + + /* + * Use __pte_alloc instead of pte_alloc_map, because we can't + * run pte_offset_map on the pmd, if an huge pmd could + * materialize from under us from a different thread. + */ + DebugPTD("will pte_alloc_map(0x%px) for addr 0x%lx\n", + pmd, address); + if (unlikely(__pte_alloc(vma->vm_mm, pmd))) { + DebugPTD("could not alloc pte page for addr 0x%lx\n", + address); + return -ENOMEM; + } + + goto again; + } while (pmd++, address = next, (address < end)); + + DebugPTD("finished OK\n"); + return 0; +} + +static int e2k_make_pud_pages_valid(struct vm_area_struct *vma, pgd_t *pgd, + e2k_addr_t start_addr, e2k_addr_t end) +{ + e2k_addr_t address = start_addr, next; + pud_t *pud = pud_offset(pgd, address); + bool make_pmd_valid; + int ret = 0, hpage = is_vm_hugetlb_page(vma); + + DebugPTD("started from 0x%lx to 0x%lx\n", start_addr, end); + + do { + make_pmd_valid = true; + + if (pud_none(*pud)) { + if (!hpage && (address & PUD_MASK) == address && + end >= pud_addr_bound(address)) { + DebugPTD("will make pud 0x%lx valid & !present for addr 0x%lx\n", + pud, address); + make_pmd_valid = false; + validate_pud_at(vma->vm_mm, address, pud); + } else { + DebugPTD("will pmd_alloc(0x%px) for addr 0x%lx\n", + pud, address); + if (!pmd_alloc(vma->vm_mm, pud, address)) { + DebugPTD("could not alloc pmd for addr 0x%lx\n", + address); + return -ENOMEM; + } + } + } else if (pud_bad(*pud)) { + pud_ERROR(*pud); + BUG(); + } + + next = pud_addr_end(address, end); + + if (make_pmd_valid) { + DebugPTD("will make pmd range pages valid from address 0x%lx to 0x%lx\n", + address, next); + ret = e2k_make_pmd_pages_valid(vma, pud, address, next); + if (ret) + return ret; + } + } while (pud++, address = next, (address < end)); + + DebugPTD("finished OK\n"); + return 0; +} + +static int e2k_make_vma_pages_valid(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, int flags) +{ + e2k_addr_t address = start_addr, next; + pgd_t *pgd = pgd_offset(vma->vm_mm, address); + bool make_pud_valid; + int ret = 0, hpage = is_vm_hugetlb_page(vma); + + DebugPT("started from 0x%lx to 0x%lx\n", start_addr, end_addr); + + do { + make_pud_valid = true; + + if (pgd_none(*pgd)) { + if (!hpage && (address & PGDIR_MASK) == address && + end_addr >= pgd_addr_bound(address)) { + DebugPTD("will make pgd 0x%lx valid & !present for addr 0x%lx\n", + pgd, address); + make_pud_valid = false; + pgd_populate_not_present(vma->vm_mm, + address, pgd); + } else { + DebugPTD("will pud_alloc(0x%px) for addr 0x%lx\n", + pgd, address); + if (!pud_alloc(vma->vm_mm, pgd, address)) { + DebugPTD("could not alloc pud for addr 0x%lx\n", + address); + return -ENOMEM; + } + } + } else if (pgd_bad(*pgd)) { + pgd_ERROR(*pgd); + BUG(); + } + + next = pgd_addr_end(address, end_addr); + + if (make_pud_valid) { + DebugPTD("will make pud range pages valid from address 0x%lx to 0x%lx\n", + address, next); + ret = e2k_make_pud_pages_valid(vma, pgd, address, next); + if (ret) + return ret; + } + } while (pgd++, address = next, (address < end_addr)); + + /* + * Semispeculative requests can access virtual addresses + * from this validated VM area while these addresses were not + * yet existed and write invalid TLB entry (valid bit = 0). + * So it's need to flush the same TLB entries for all VM areas. + * + * Invalid TLB entries can be created for any level of page table, + * so flush all 4 levels. + */ + if (!ret && (flags & MV_FLUSH)) { + DebugPTD("flush TLB from 0x%lx to 0x%lx\n", + start_addr, end_addr); + flush_tlb_range_and_pgtables(vma->vm_mm, start_addr, end_addr); + } + + DebugPT("finished with %d\n", ret); + + return ret; +} + +int +make_vma_pages_valid(struct vm_area_struct *vma, + unsigned long start_addr, unsigned long end_addr) +{ + int ret; + + BUG_ON(end_addr < start_addr); + + DebugPT("started for VMA 0x%px from start addr 0x%lx to end addr 0x%lx\n", + vma, start_addr, end_addr); + + ret = e2k_make_vma_pages_valid(vma, start_addr, end_addr, MV_FLUSH); + if (ret != 0) { + DebugPT("finished with error %d\n", + ret); + return ret; + } + + DebugPT("finished OK\n"); + return 0; +} + +int +make_all_vma_pages_valid(struct vm_area_struct *vma, int flags) +{ + int ret; + + DebugPT("started for VMA 0x%px from start addr 0x%lx to end addr 0x%lx\n", + vma, vma->vm_start, vma->vm_end); + + ret = e2k_make_vma_pages_valid(vma, vma->vm_start, vma->vm_end, flags); + if (ret != 0) { + DebugPT("finished with error %d\n", + ret); + return ret; + } + + DebugPT("finished OK\n"); + return 0; +} + +int e2k_set_vmm_cui(struct mm_struct *mm, int cui, + unsigned long code_base, unsigned long code_end) +{ + struct vm_area_struct *vma, *prev; + int ret = -EINVAL; + unsigned long vm_flags; + unsigned long off = code_base; + + down_write(&mm->mmap_sem); + while (off < code_end) { + vma = find_vma_prev(mm, off, &prev); + if (!vma || (off == code_base && vma->vm_start > off)) { + pr_err("No vma for 0x%lx : 0x%lx (found vma %lx : %lx)\n", + off, code_end, vma ? vma->vm_start : 0, + vma ? vma->vm_end : 0); + ret = -EINVAL; + goto out; + } + if (off > vma->vm_start) + prev = vma; + vm_flags = (vma->vm_flags & ~VM_CUI) | + ((u64) cui << VM_CUI_SHIFT); + if (ret = mprotect_fixup(vma, &prev, + vma->vm_start < off ? off : vma->vm_start, + vma->vm_end > code_end ? code_end : vma->vm_end, + vm_flags)) + goto out; + + off = vma->vm_end; + } + +out: + up_write(&mm->mmap_sem); + return ret; +} diff --git a/arch/e2k/mm/mmap.c b/arch/e2k/mm/mmap.c new file mode 100644 index 000000000000..103953185aa1 --- /dev/null +++ b/arch/e2k/mm/mmap.c @@ -0,0 +1,173 @@ +#include +#include +#include +#include + +#ifdef CONFIG_DRM +#include +#endif /* CONFIG_DRM */ + +#include + +#include + + +#define DEBUG_MMP_MODE 0 /* Memory mapping in protected mode */ +#define DebugMMP(...) DebugPrint(DEBUG_MMP_MODE ,##__VA_ARGS__) + + +/* Get an address range which is currently unmapped. + * For mmap() without MAP_FIXED and shmat() with addr=0. + * + * Ugly calling convention alert: + * Return value with the low bits set means error value, + * ie + * if (ret & ~PAGE_MASK) + * error = ret; + * + * This function "knows" that -ENOMEM has the bits set. + */ +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_unmapped_area_info info; + unsigned long begin, end, ret, hole_size; + unsigned long is_protected = TASK_IS_PROTECTED(current); + unsigned long is_32bit = (current->thread.flags & E2K_FLAG_32BIT) && + !is_protected; + + if (flags & MAP_FIXED) { + if (!test_ts_flag(TS_KERNEL_SYSCALL)) { + if (addr >= USER_HW_STACKS_BASE || + addr + len >= USER_HW_STACKS_BASE) + return -ENOMEM; + + if (!TASK_IS_BINCO(current) && is_32bit && + (addr >= TASK32_SIZE || + addr + len >= TASK32_SIZE)) + return -ENOMEM; + } + + return addr; + } + + begin = (addr) ?: mm->mmap_base; + if (!test_ts_flag(TS_KERNEL_SYSCALL)) { + if (is_32bit || is_protected && (flags & MAP_FIRST32)) + end = TASK32_SIZE; + else + end = TASK_SIZE; + end = min(end, USER_HW_STACKS_BASE); + } else { + end = TASK_SIZE; + } + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + if (TASK_IS_BINCO(current) && ADDR_IN_SS(addr)) { + end = min(end, SS_ADDR_END); + /* Lower mremap() address for binary compiler + * must be >= ss_rmp_bottom */ + if (current_thread_info()->ss_rmp_bottom > addr) + begin = current_thread_info()->ss_rmp_bottom; + } +#endif + + hole_size = 0; +#ifdef CONFIG_DRM + if (cpu_has(CPU_HWBUG_PIO_READS)) { + if (filp && filp->f_op && (filp->f_op->open == &drm_open)) + hole_size = 15 * PAGE_SIZE; + } +#endif /* CONFIG_DRM */ + + info.flags = 0; + info.length = len + 2 * hole_size; + info.low_limit = begin; + info.high_limit = end; + info.align_mask = 0; + info.align_offset = 0; + + ret = vm_unmapped_area(&info); + if (!(ret & ~PAGE_MASK)) + ret += hole_size; + + return ret; +} + +unsigned long arch_mmap_rnd(void) +{ + unsigned long rnd; + +#ifdef CONFIG_COMPAT + if (current->thread.flags & (E2K_FLAG_32BIT | E2K_FLAG_PROTECTED_MODE)) + rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); + else +#endif + rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); + return rnd << PAGE_SHIFT; +} + +/* + * This function, called very early during the creation of a new + * process VM image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; +} + +/* + * This function is based on vm_munmap() function. + */ +int vm_munmap_notkillable(unsigned long start, size_t len) +{ + struct mm_struct *mm = current->mm; + unsigned long ts_flag; + int ret; + + down_write(&mm->mmap_sem); + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = do_munmap(mm, start, len, NULL); + clear_ts_flag(ts_flag); + up_write(&mm->mmap_sem); + + return ret; +} + +/* + * This function is based on vm_mmap() function. + */ +unsigned long vm_mmap_notkillable(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + unsigned long ret, populate, ts_flag; + struct mm_struct *mm = current->mm; + + if (unlikely(offset + PAGE_ALIGN(len) < offset)) + return -EINVAL; + if (unlikely(offset_in_page(offset))) + return -EINVAL; + + ret = security_mmap_file(file, prot, flag); + if (!ret) { + down_write(&mm->mmap_sem); + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = do_mmap_pgoff(file, addr, len, prot, flag, + offset >> PAGE_SHIFT, &populate, NULL); + clear_ts_flag(ts_flag); + up_write(&mm->mmap_sem); + if (populate) + mm_populate(ret, populate); + } + return ret; +} diff --git a/arch/e2k/mm/mmu.c b/arch/e2k/mm/mmu.c new file mode 100644 index 000000000000..f917ba0e1d28 --- /dev/null +++ b/arch/e2k/mm/mmu.c @@ -0,0 +1,994 @@ +/* $Id: mmu.c,v 1.21 2009/08/05 16:11:10 kravtsunov_e Exp $ + * arch/e2k/mm/init.c + * + * MMU menegement (Instruction and Data caches, TLB, registers) + * + * Derived heavily from Linus's Alpha/AXP ASN code... + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_IC_MODE +#undef DebugIC +#define DEBUG_IC_MODE 0 /* Instruction Caches */ +#define DebugIC(...) DebugPrint(DEBUG_IC_MODE ,##__VA_ARGS__) + +#undef DEBUG_PT_MODE +#undef DebugPT +#define DEBUG_PT_MODE 0 /* Data Caches */ +#define DebugPT(...) DebugPrint(DEBUG_PT_MODE ,##__VA_ARGS__) + +#ifndef CONFIG_SMP +unsigned long mmu_last_context = CTX_FIRST_VERSION; +#endif /* !CONFIG_SMP */ + +/* + * Hardware MMUs page tables have some differences from one ISET to other + * moreover each MMU supports a few different page tables: + * native (primary) + * secondary page tables for sevral modes (VA32, VA48, PA32, PA48 ...) + * The follow structure presents native page table structure + * + * Warning .boot_*() entries should be updated dinamicaly to point to + * physical addresses of functions for arch/e2k/p2v/ + */ +pt_struct_t __nodedata pgtable_struct = { + .type = E2K_PT_TYPE, + .pt_v6 = false, /* as default for compatibility */ + .pfn_mask = _PAGE_PFN_V2, + .accessed_mask = _PAGE_A_HW_V2, + .dirty_mask = _PAGE_D_V2, + .present_mask = _PAGE_P_V2, + .user_mask = 0ULL, + .priv_mask = _PAGE_PV_V2, + .non_exec_mask = _PAGE_NON_EX_V2, + .exec_mask = 0ULL, + .sw_bit1_mask = _PAGE_AVAIL_BIT_V2, + .sw_bit2_mask = _PAGE_A_SW_V2, + .levels_num = E2K_PT_LEVELS_NUM, + .levels = { + [E2K_PAGES_LEVEL_NUM] = { + .id = E2K_PAGES_LEVEL_NUM, + .page_size = PAGE_SIZE, + }, + [E2K_PTE_LEVEL_NUM] = { + .id = E2K_PTE_LEVEL_NUM, + .pt_size = PTE_SIZE, + .page_size = PAGE_SIZE, + .pt_shift = PTE_SHIFT, + .page_shift = PTE_SHIFT, + .pt_mask = PTE_MASK & _PAGE_PFN_V2, + .pt_offset = ~PTE_MASK & _PAGE_PFN_V2, + .pt_index_mask = PTE_MASK ^ PMD_MASK, + .page_mask = PTE_MASK, + .page_offset = ~PTE_MASK, + .ptrs_per_pt = PTRS_PER_PTE, + .is_pte = true, + .is_huge = false, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PMD_LEVEL_NUM] = { + .id = E2K_PMD_LEVEL_NUM, + .pt_size = PMD_SIZE, + .pt_shift = PMD_SHIFT, + .pt_mask = PMD_MASK & _PAGE_PFN_V2, + .pt_offset = ~PMD_MASK & _PAGE_PFN_V2, + .pt_index_mask = PMD_MASK ^ PUD_MASK, + .ptrs_per_pt = PTRS_PER_PMD, +#if CONFIG_CPU_ISET >= 3 + .page_size = E2K_2M_PAGE_SIZE, + .page_shift = PMD_SHIFT, + .page_offset = ~PMD_MASK, + .huge_ptes = 1, +#elif CONFIG_CPU_ISET >= 1 + .page_size = E2K_4M_PAGE_SIZE, + .page_shift = PMD_SHIFT + 1, + .page_offset = (E2K_4M_PAGE_SIZE - 1), + .huge_ptes = 2, + .boot_set_pte = &boot_set_double_pte, + .init_pte_clear = &init_double_pte_clear, + .boot_get_huge_pte = &boot_get_double_huge_pte, + .init_get_huge_pte = &init_get_double_huge_pte, + .split_pt_page = &split_multiple_pmd_page, +#elif CONFIG_CPU_ISET == 0 + /* page size and functions should be set dinamicaly */ + .page_size = -1, +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, PAGE SIZE not defined" + .page_size = -1, +#endif /* CONFIG_CPU_ISET 0-6 */ + .is_pte = false, + .is_huge = true, + .dtlb_type = COMMON_DTLB_TYPE, + }, + [E2K_PUD_LEVEL_NUM] = { + .id = E2K_PUD_LEVEL_NUM, + .pt_size = PUD_SIZE, + .page_size = PAGE_PUD_SIZE, + .pt_shift = PUD_SHIFT, + .page_shift = PUD_SHIFT, + .pt_mask = PUD_MASK & _PAGE_PFN_V2, + .pt_offset = ~PUD_MASK & _PAGE_PFN_V2, + .pt_index_mask = PUD_MASK ^ PGDIR_MASK, + .page_mask = PUD_MASK, + .page_offset = ~PUD_MASK, + .ptrs_per_pt = PTRS_PER_PUD, + .is_pte = false, +#if CONFIG_CPU_ISET >= 5 + .is_huge = true, + .huge_ptes = 1, + .dtlb_type = FULL_ASSOCIATIVE_DTLB_TYPE, +#elif CONFIG_CPU_ISET >= 1 + .is_huge = false, +#elif CONFIG_CPU_ISET == 0 + /* huge page enable should be set dinamicaly */ + .is_huge = false, +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, huge page enable not defined" + .is_huge = false, +#endif /* CONFIG_CPU_ISET 0-6 */ + +#if CONFIG_CPU_ISET == 1 || CONFIG_CPU_ISET == 2 + .map_pt_huge_page_to_prev_level = + &map_pud_huge_page_to_multiple_pmds, +#endif /* CONFIG_CPU_ISET 1-2 */ + }, + [E2K_PGD_LEVEL_NUM] = { + .id = E2K_PGD_LEVEL_NUM, + .pt_size = PGDIR_SIZE, + .page_size = PAGE_PGD_SIZE, + .pt_shift = PGDIR_SHIFT, + .page_shift = PGDIR_SHIFT, + .pt_mask = PGDIR_MASK & E2K_VA_MASK, + .pt_offset = ~PGDIR_MASK & E2K_VA_MASK, + .pt_index_mask = PGDIR_MASK & E2K_VA_MASK, + .page_mask = PGDIR_MASK, + .page_offset = ~PGDIR_MASK, + .ptrs_per_pt = PTRS_PER_PGD, + .is_pte = false, + .is_huge = false, + }, + }, +}; +EXPORT_SYMBOL(pgtable_struct); +/* + * TLB flushing: + */ + +/* + * Flush all processes TLBs of the processor + */ +void +__flush_tlb_all(void) +{ + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + flush_TLB_all(); +} + +/* + * Flush just one specified address of current process. + */ +void __flush_tlb_address(e2k_addr_t addr) +{ + unsigned long context; + + context = current->active_mm->context.cpumsk[raw_smp_processor_id()]; + + if (unlikely(context == 0)) { + /* See comment in __flush_tlb_range(). */ + __flush_tlb_mm(current->active_mm); + } else { + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); + flush_TLB_page(addr, CTX_HARDWARE(context)); + } +} + +/* + * Flush the TLB entries mapping the virtually mapped linear page + * table corresponding to specified address of current process. + */ +void __flush_tlb_address_pgtables(e2k_addr_t addr) +{ + unsigned long context; + + context = current->active_mm->context.cpumsk[raw_smp_processor_id()]; + + if (unlikely(context == 0)) { + /* See comment in __flush_tlb_range(). */ + __flush_tlb_mm(current->active_mm); + } else { + flush_TLB_page_begin(); + /* flush virtual mapping of PTE entry (third level) */ + __flush_TLB_page(pte_virt_offset(_PAGE_ALIGN_UP(addr, + PTE_SIZE)), + CTX_HARDWARE(context)); + /* flush virtual mapping of PMD entry (second level) */ + __flush_TLB_page(pmd_virt_offset(_PAGE_ALIGN_UP(addr, + PMD_SIZE)), + CTX_HARDWARE(context)); + /* flush virtual mapping of PUD entry (first level) */ + __flush_TLB_page(pud_virt_offset(_PAGE_ALIGN_UP(addr, + PUD_SIZE)), + CTX_HARDWARE(context)); + flush_TLB_page_end(); + } +} + +/* + * Flush just one page of a specified user. + */ +void +__flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr) +{ + unsigned long context; + + context = mm->context.cpumsk[raw_smp_processor_id()]; + + if (unlikely(context == 0)) { + /* See comment in __flush_tlb_range(). */ + __flush_tlb_mm(mm); + return; + } + + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); + + flush_TLB_page_begin(); + __flush_TLB_page(addr, CTX_HARDWARE(context)); + /* flush virtual mapping of PTE entry (third level) */ + __flush_TLB_page(pte_virt_offset(addr), CTX_HARDWARE(context)); + flush_TLB_page_end(); +} + +/* + * Flush a specified user mapping on the processor + */ +void +__flush_tlb_mm(struct mm_struct *mm) +{ + int cpu; + + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + + if (mm == current->active_mm) { + unsigned long ctx, flags; + + /* Should update right now */ + DebugPT("mm context will be reloaded\n"); + raw_all_irq_save(flags); + cpu = smp_processor_id(); + ctx = get_new_mmu_context(mm, cpu); + reload_context_mask(ctx); + raw_all_irq_restore(flags); + + DebugPT("CPU #%d new mm context is 0x%lx\n", + cpu, mm->context.cpumsk[cpu]); + } else { + cpu = raw_smp_processor_id(); +#ifdef CONFIG_SMP + /* Remove this cpu from mm_cpumask. This might be + * needed, for example, after sys_io_setup() if the + * kernel thread which was using this mm received + * flush ipi (unuse_mm() does not clear mm_cpumask). + * And maybe there are other such places where + * a kernel thread uses user mm. */ + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +#endif + mm->context.cpumsk[cpu] = 0; + } +} + + +/* + * Flush a specified range of pages + */ + +/* If the number of pages to be flushed is below this value, + * then only those pages will be flushed. + * + * Flushing one page takes ~150 cycles, flushing the whole mm + * takes ~400 cycles. Also note that __flush_tlb_range() may + * be called repeatedly for the same process so high values + * are bad. */ +#define FLUSH_TLB_RANGE_MAX_PAGES 8 + +void __flush_tlb_range(struct mm_struct *const mm, + const e2k_addr_t start, const e2k_addr_t end) +{ + const long pages_num = (PAGE_ALIGN_DOWN(end) - PAGE_ALIGN_UP(start)) + / PAGE_SIZE; + + BUG_ON(start > end); + + DebugPT("range start 0x%lx end 0x%lx context 0x%lx mm 0x%px cnt 0x%lx CPU #%d\n", + PAGE_ALIGN_UP(start), PAGE_ALIGN_DOWN(end), + CTX_HARDWARE(mm->context.cpumsk[raw_smp_processor_id()]), + mm, mm->context.cpumsk[raw_smp_processor_id()], + raw_smp_processor_id()); + + if (pages_num <= FLUSH_TLB_RANGE_MAX_PAGES) { + unsigned long page, pmd_start, pmd_end; + unsigned long ctx = CTX_HARDWARE( + mm->context.cpumsk[raw_smp_processor_id()]); + + if (unlikely(ctx == 0)) { + /* We were trying to flush a range of pages, + * but someone is flushing the whole mm. + * Now we cannot flush pages (we do not know + * the context) so we have to flush the whole mm. + * + * Even if we will receive the flush ipi we will + * just end up flushing mm twice - which is OK + * considering how rare this case is. */ + goto flush_mm; + } + + count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, pages_num); + + flush_TLB_page_begin(); + for (page = PAGE_ALIGN_UP(start); page < end; + page += PAGE_SIZE) + __flush_TLB_page(page, ctx); + /* + * flush virtual mapping of PTE entry (third level) + * + * Needed because Linux assumes that flush_tlb_*() + * interfaces flush both pte and pmd levels (this + * may be changed in future versions, in which case + * this flush can be removed). + */ + pmd_start = pte_virt_offset(round_down(start, PMD_SIZE)); + pmd_end = pte_virt_offset(round_up(end, PMD_SIZE)); + for (page = round_down(pmd_start, PAGE_SIZE); + page < pmd_end; page += PAGE_SIZE) + __flush_TLB_page(page, ctx); + flush_TLB_page_end(); + } else { +flush_mm: + /* Too many pages to flush. + * It is faster to change the context instead. + * If mm != current->active_mm then setting this + * CPU's mm context to 0 will do the trick, + * otherwise we duly increment it. */ + __flush_tlb_mm(mm); + } +} + +void __flush_pmd_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + long pages_num; + + BUG_ON(start > end); + + end = round_up(end, PMD_SIZE); + start = round_down(start, PMD_SIZE); + + pages_num = (end - start) / PMD_SIZE; + + if (pages_num <= FLUSH_TLB_RANGE_MAX_PAGES) { + unsigned long pmd_start, pmd_end; + e2k_addr_t page; + unsigned long ctx = CTX_HARDWARE( + mm->context.cpumsk[raw_smp_processor_id()]); + + if (unlikely(ctx == 0)) { + /* We were trying to flush a range of pages, + * but someone is flushing the whole mm. + * Now we cannot flush pages (we do not know + * the context) so we have to flush the whole mm. + * + * Even if we will receive the flush ipi we will + * just end up flushing mm twice - which is OK + * considering how rare this case is. */ + goto flush_mm; + } + + count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, + pages_num * (PMD_SIZE / PTE_SIZE)); + + flush_TLB_page_begin(); + for (page = start; page < end; page += PMD_SIZE) + __flush_TLB_page(page, ctx); + /* + * flush virtual mapping of PTE entry (third level). + * + * When flushing high order page table entries, + * we must also flush all links below it. E.g. when + * flushing PMD, also flush PMD->PTE link (i.e. DTLB + * entry for address 0xff8000000000|(address >> 9)). + * + * Otherwise the following can happen: + * 1) High-order page is allocated. + * 2) Someone accesses the PMD->PTE link (e.g. half-spec. load) + * and creates invalid entry in DTLB. + * 3) High-order page is split into 4 Kb pages. + * 4) Someone accesses the PMD->PTE link address (e.g. DTLB + * entry probe) and reads the invalid entry created earlier. + */ + pmd_start = pte_virt_offset(round_down(start, PMD_SIZE)); + pmd_end = pte_virt_offset(round_up(end, PMD_SIZE)); + for (page = round_down(pmd_start, PAGE_SIZE); + page < pmd_end; page += PAGE_SIZE) + __flush_TLB_page(page, ctx); + flush_TLB_page_end(); + } else { +flush_mm: + /* Too many pages to flush. + * It is faster to change the context instead. + * If mm != current->active_mm then setting this + * CPU's mm context to 0 will do the trick, + * otherwise we duly increment it. */ + __flush_tlb_mm(mm); + } +} + +/* + * Flush the TLB entries mapping the virtually mapped linear page + * table corresponding to address range [start : end]. + */ +void __flush_tlb_pgtables(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + const long pages_num = (PAGE_ALIGN_DOWN(end) - PAGE_ALIGN_UP(start)) + / PAGE_SIZE; + + BUG_ON(start > end); + + DebugPT("range start 0x%lx end 0x%lx context 0x%lx mm 0x%px cnt 0x%lx CPU #%d\n", + PAGE_ALIGN_UP(start), PAGE_ALIGN_DOWN(end), + CTX_HARDWARE(mm->context.cpumsk[raw_smp_processor_id()]), + mm, mm->context.cpumsk[raw_smp_processor_id()], + raw_smp_processor_id()); + + if (pages_num <= FLUSH_TLB_RANGE_MAX_PAGES) { + e2k_addr_t page; + unsigned long range_begin, range_end; + unsigned long ctx = CTX_HARDWARE( + mm->context.cpumsk[raw_smp_processor_id()]); + + if (unlikely(ctx == 0)) { + /* We were trying to flush a range of pages, + * but someone is flushing the whole mm. + * Now we cannot flush pages (we do not know + * the context) so we have to flush the whole mm. + * + * Even if we will receive the flush ipi we will + * just end up flushing mm twice - which is OK + * considering how rare this case is. */ + goto flush_mm; + } + + flush_TLB_page_begin(); + + /* flush virtual mapping of PTE entries (third level) */ + range_begin = pte_virt_offset(_PAGE_ALIGN_UP(start, PTE_SIZE)); + range_end = pte_virt_offset(_PAGE_ALIGN_DOWN(end, PTE_SIZE)); + for (page = PAGE_ALIGN_UP(range_begin); page < range_end; + page += PAGE_SIZE) + __flush_TLB_page(page, ctx); + + /* flush virtual mapping of PMD entries (second level) */ + range_begin = pmd_virt_offset(_PAGE_ALIGN_UP(start, PMD_SIZE)); + range_end = pmd_virt_offset(_PAGE_ALIGN_DOWN(end, PMD_SIZE)); + for (page = PAGE_ALIGN_UP(range_begin); page < range_end; + page += PAGE_SIZE) + __flush_TLB_page(page, ctx); + + /* flush virtual mapping of PUD entries (first level) */ + range_begin = pud_virt_offset(_PAGE_ALIGN_UP(start, PUD_SIZE)); + range_end = pud_virt_offset(_PAGE_ALIGN_DOWN(end, PUD_SIZE)); + for (page = PAGE_ALIGN_UP(range_begin); page < range_end; + page += PAGE_SIZE) + __flush_TLB_page(page, ctx); + + flush_TLB_page_end(); + } else { +flush_mm: + /* Too many pages to flush. + * It is faster to change the context instead. + * If mm != current->active_mm then setting this + * CPU's mm context to 0 will do the trick, + * otherwise we duly increment it. */ + __flush_tlb_mm(mm); + } +} + +/* + * Flush a specified range of pages and the TLB entries mapping the virtually + * mapped linear page table corresponding to address range [start : end]. + */ +void +__flush_tlb_range_and_pgtables(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + __flush_tlb_range(mm, start, end); + __flush_tlb_pgtables(mm, start, end); +} + +void __flush_tlb_page_and_pgtables(struct mm_struct *mm, unsigned long address) +{ + unsigned long page; + unsigned long start = address, end = address + E2K_MAX_FORMAT; + unsigned long range_begin, range_end; + unsigned long context = mm->context.cpumsk[raw_smp_processor_id()]; + + if (unlikely(context == 0)) { + /* See comment in __flush_tlb_range(). */ + __flush_tlb_mm(mm); + return; + } + + context = CTX_HARDWARE(context); + + flush_TLB_page_begin(); + + /* flush virtual mapping of PUD entries (first level) */ + range_begin = pud_virt_offset(_PAGE_ALIGN_UP(start, PUD_SIZE)); + range_end = pud_virt_offset(_PAGE_ALIGN_DOWN(end, PUD_SIZE)); + for (page = PAGE_ALIGN_UP(range_begin); page < range_end; + page += PAGE_SIZE) + __flush_TLB_page(page, context); + + /* flush virtual mapping of PMD entries (second level) */ + range_begin = pmd_virt_offset(_PAGE_ALIGN_UP(start, PMD_SIZE)); + range_end = pmd_virt_offset(_PAGE_ALIGN_DOWN(end, PMD_SIZE)); + for (page = PAGE_ALIGN_UP(range_begin); page < range_end; + page += PAGE_SIZE) + __flush_TLB_page(page, context); + + /* flush virtual mapping of PTE entries (third level) */ + range_begin = pte_virt_offset(_PAGE_ALIGN_UP(start, PTE_SIZE)); + range_end = pte_virt_offset(_PAGE_ALIGN_DOWN(end, PTE_SIZE)); + for (page = PAGE_ALIGN_UP(range_begin); page < range_end; + page += PAGE_SIZE) + __flush_TLB_page(page, context); + + for (page = PAGE_ALIGN_UP(start); page < end; page += PAGE_SIZE) + __flush_TLB_page(page, context); + + flush_TLB_page_end(); +} + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +/* + * Update all user PGD entries of current active mm. + * PGDs are updated into CPU root page table from main user PGD table + */ +void +__flush_cpu_root_pt_mm(struct mm_struct *mm) +{ + if (MMU_IS_SEPARATE_PT()) + return; + if (!THERE_IS_DUP_KERNEL) + return; + if (current->active_mm != mm) + return; + copy_user_pgd_to_kernel_root_pt(mm->pgd); +} +/* + * Update all user PGD entries of current active mm. + * PGDs are updated into CPU root page table from main user PGD table + */ +void +__flush_cpu_root_pt(void) +{ + if (MMU_IS_SEPARATE_PT()) + return; + if (!THERE_IS_DUP_KERNEL) + return; + if (current->active_mm == &init_mm || !current->active_mm) + return; + copy_user_pgd_to_kernel_root_pt(current->active_mm->pgd); +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +/* + * CACHES flushing: + */ + +static void __write_back_cache_L3(void) +{ + unsigned long flags; + l3_ctrl_t l3_ctrl; + int node; + + raw_all_irq_save(flags); + node = numa_node_id(); + + /* Set bit of L3 control register to flush L3 */ + AW(l3_ctrl) = sic_read_node_nbsr_reg(node, SIC_l3_ctrl); + AS(l3_ctrl).fl = 1; + sic_write_node_nbsr_reg(node, SIC_l3_ctrl, AW(l3_ctrl)); + + /* Wait for flush completion */ + if (cpu_has(CPU_FEAT_ISET_V5)) { + do { + AW(l3_ctrl) = sic_read_node_nbsr_reg(node, SIC_l3_ctrl); + } while (AS(l3_ctrl).fl); + } else { + l3_reg_t l3_diag; + + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b0_diag_dw); + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b1_diag_dw); + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b2_diag_dw); + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b3_diag_dw); + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b4_diag_dw); + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b5_diag_dw); + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b6_diag_dw); + l3_diag = sic_read_node_nbsr_reg(node, SIC_l3_b7_diag_dw); + + __E2K_WAIT_ALL; + } + + raw_all_irq_restore(flags); +} + +static void write_back_cache_all_ipi(void *unused) +{ + int cpu, node; + + write_back_CACHE_L12(); + + cpu = smp_processor_id(); + node = numa_node_id(); + if (machine.L3_enable && cpu == cpumask_first(cpumask_of_node(node))) + __write_back_cache_L3(); +} + +/* + * Write Back and Invalidate all caches in the system + */ +void write_back_cache_all(void) +{ + /* + * This is rather low-level function + * so do not use on_each_cpu() here. + */ + nmi_on_each_cpu(write_back_cache_all_ipi, NULL, 1, 0); +} +EXPORT_SYMBOL(write_back_cache_all); + +void write_back_cache_range(unsigned long start, size_t size) +{ + /* Some arbitrary condition */ + if (size < SZ_64K) + flush_DCACHE_range((void *) start, size); + else + write_back_cache_all(); +} + + +/* + * Write Back and Invalidate all caches for current cpu + */ +void local_write_back_cache_all(void) +{ + migrate_disable(); + write_back_CACHE_L12(); + __write_back_cache_L3(); + migrate_enable(); +} + +void local_write_back_cache_range(unsigned long start, size_t size) +{ + /* Some arbitrary condition */ + if (size < SZ_64K) + flush_DCACHE_range((void *) start, size); + else + local_write_back_cache_all(); +} + +/* + * Invalidate all ICACHES of the host processor + */ +void native_flush_icache_all(void) +{ + DebugIC("started flush_icache_all()\n"); + flush_ICACHE_all(); +} + +/* + * Flush a specified range of addresses of specified context + * from ICACHE of the processor + */ +void +flush_icache_other_range(e2k_addr_t start, e2k_addr_t end, + unsigned long context) +{ + e2k_addr_t addr; + + preempt_disable(); + DebugIC("started: start 0x%lx end 0x%lx context 0x%lx\n", + start, end, context); + + /* + * It is better to flush_ICACHE_all() if flush range is very big. + */ + if ((end - start) / E2K_ICACHE_SET_SIZE > E2K_ICACHE_LINES_NUM) { + DebugIC("will flush_ICACHE_all()\n"); + flush_ICACHE_all(); + preempt_enable(); + return; + } + + flush_ICACHE_line_begin(); + for (addr = round_down(start, E2K_ICACHE_SET_SIZE); + addr < round_up(end, E2K_ICACHE_SET_SIZE); + addr += E2K_ICACHE_SET_SIZE) { + DebugIC("will flush_ICACHE_line_sys() 0x%lx\n", + addr); + __flush_ICACHE_line_sys(addr, CTX_HARDWARE(context)); + } + flush_ICACHE_line_end(); + + DebugIC("finished: start 0x%lx end 0x%lx context 0x%lx\n", + start, end, context); + preempt_enable(); +} + +/* + * Flush a specified range of addresses of kernel from ICACHE + * of the processor + */ + +void native_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + e2k_addr_t addr; + + DebugIC("started: start 0x%lx end 0x%lx\n", start, end); + + start = round_down(start, E2K_ICACHE_SET_SIZE); + end = round_up(end, E2K_ICACHE_SET_SIZE); + + if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) { + flush_DCACHE_line_begin(); + for (addr = start; addr < end; addr += E2K_ICACHE_SET_SIZE) { + DebugIC("will flush_DCACHE_line() 0x%lx\n", addr); + __flush_DCACHE_line(addr); + } + flush_DCACHE_line_end(); + } else { + flush_ICACHE_line_begin(); + for (addr = start; addr < end; addr += E2K_ICACHE_SET_SIZE) { + DebugIC("will flush_ICACHE_line_sys() 0x%lx\n", addr); + __flush_ICACHE_line_sys(addr, E2K_KERNEL_CONTEXT); + } + flush_ICACHE_line_end(); + } + + DebugIC("finished: start 0x%lx end 0x%lx\n", start, end); +} +EXPORT_SYMBOL(native_flush_icache_range); + +/* + * Flush an array of a specified range of addresses of specified context from + * ICACHE of the processor + */ + +void native_flush_icache_range_array(icache_range_array_t *icache_range_arr) +{ + int i; + unsigned long context; + int cpu = smp_processor_id(); + + context = icache_range_arr->mm->context.cpumsk[cpu]; + + DebugIC("started: icache_range_arr " + "0x%lx\n", + icache_range_arr); + if (context) { + for (i = 0; i < icache_range_arr->count; i++) { + icache_range_t icache_range = + icache_range_arr->ranges[i]; + flush_icache_other_range( + icache_range.start, + icache_range.end, + context); + } + } else if (icache_range_arr->mm == current->active_mm) { + unsigned long ctx, flags; + + raw_all_irq_save(flags); + ctx = get_new_mmu_context(icache_range_arr->mm, cpu); + reload_context_mask(ctx); + raw_all_irq_restore(flags); + } + DebugIC("finished: icache_range_arr " + "0x%lx\n", + icache_range_arr); +} + +/* + * Flush just one specified page from ICACHE of all processors + */ +void native_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + /* + * icache on all cpus can be flushed from current cpu + * on E2S + */ + if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) { + unsigned long start = (e2k_addr_t) page_address(page); + + BUILD_BUG_ON(PAGE_SIZE != 16 * E2K_ICACHE_SET_SIZE); + flush_DCACHE_line_begin(); + __flush_DCACHE_line(start); + __flush_DCACHE_line_offset(start, E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 2 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 3 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 4 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 5 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 6 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 7 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 8 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 9 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 10 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 11 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 12 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 13 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 14 * E2K_ICACHE_SET_SIZE); + __flush_DCACHE_line_offset(start, 15 * E2K_ICACHE_SET_SIZE); + flush_DCACHE_line_end(); + + return; + } + + preempt_disable(); + DebugIC("started: VMA 0x%px page 0x%px\n", + vma, page); + if (vma->vm_flags & VM_EXEC) { + struct mm_struct *mm = vma->vm_mm; + /* + * invalid context will update + * while activating or switching to + */ + mm->context.cpumsk[raw_smp_processor_id()] = 0; + if (mm == current->active_mm) { + int num_cpu = raw_smp_processor_id(); + unsigned long ctx, flags; + + /* This is called, e.g., as a result of exec(). */ + /* Should update right now */ + DebugIC("mm context will be " + "reload\n"); + raw_all_irq_save(flags); + ctx = get_new_mmu_context(mm, num_cpu); + reload_context_mask(ctx); + raw_all_irq_restore(flags); + } else { + DebugIC("mm context will be " + "invalidate\n"); + } + } + DebugIC("finished: VMA 0x%px page 0x%px\n", + vma, page); + preempt_enable(); +} + +int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +{ + mm_context_t *mmu, *oldmmu; + struct sival_ptr_list *oldlink; + + if (!oldmm) + return 0; + if (!mm) + return -EINVAL; + + oldmmu = &oldmm->context; + mmu = &mm->context; + + init_rwsem(&mmu->sival_ptr_list_sem); + INIT_LIST_HEAD(&mmu->sival_ptr_list_head); + + /* Duplicating oldmmu->sival_ptr_list: */ + down_read(&oldmmu->sival_ptr_list_sem); + list_for_each_entry(oldlink, &oldmmu->sival_ptr_list_head, link) { + struct sival_ptr_list *newlink; + + newlink = kmalloc(sizeof(*newlink), GFP_KERNEL); + if (!newlink) { + up_read(&oldmmu->sival_ptr_list_sem); + return -ENOMEM; + } + *newlink = *oldlink; + list_add(&newlink->link, &mmu->sival_ptr_list_head); + } + up_read(&oldmmu->sival_ptr_list_sem); + DebugIC(": sival_ptr_list duplicated 0x%px --> 0x%px\n", + oldmm, mm); + + mmu->pm_sc_debug_mode = oldmmu->pm_sc_debug_mode; + + return 0; +} + +void arch_exit_mmap(struct mm_struct *mm) +{ + struct sival_ptr_list *sival_ptr, *tmp; + + if (mm == NULL) + return; + + /* Release mmu->sival_ptr_list */ + list_for_each_entry_safe(sival_ptr, tmp, + &mm->context.sival_ptr_list_head, link) { + DebugIC(": kfree(%px)\n", sival_ptr); + list_del(&sival_ptr->link); + kfree(sival_ptr); + } + + /* Release hw_contexts */ + hw_contexts_destroy(&mm->context); +} + +/* + * Initialize a new mmu context. This is invoked when a new + * address space instance (unique or shared) is instantiated. + * This just needs to set mm->context[] to an invalid context. + */ +int __init_new_context(struct task_struct *p, struct mm_struct *mm, + mm_context_t *context) +{ + bool is_fork = p && (p != current); + int ret; + + memset(&context->cpumsk, 0, nr_cpu_ids * sizeof(context->cpumsk[0])); + + if (is_fork) { + /* + * Copy data on user fork + */ + mm_context_t *curr_context = ¤t->mm->context; + + /* + * Copy cut mask from the context of parent process + * to the context of new process + */ + mutex_lock(&curr_context->cut_mask_lock); + bitmap_copy((unsigned long *) &context->cut_mask, + (unsigned long *) &curr_context->cut_mask, + USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)); + mutex_unlock(&curr_context->cut_mask_lock); + } else { + /* + * Initialize by zero cut_mask of new process + */ + mutex_init(&context->cut_mask_lock); + bitmap_zero((unsigned long *) &context->cut_mask, + USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)); + } + + atomic_set(&context->tstart, 1); + + init_rwsem(&context->sival_ptr_list_sem); + INIT_LIST_HEAD(&context->sival_ptr_list_head); + + INIT_LIST_HEAD(&context->delay_free_stacks); + init_rwsem(&context->core_lock); + + INIT_LIST_HEAD(&context->cached_stacks); + spin_lock_init(&context->cached_stacks_lock); + context->cached_stacks_size = 0; + + if (mm == NULL) + return 0; + + ret = hw_contexts_init(p, context, is_fork); + return ret; +} diff --git a/arch/e2k/mm/node_vmap.c b/arch/e2k/mm/node_vmap.c new file mode 100644 index 000000000000..ab36277e04dc --- /dev/null +++ b/arch/e2k/mm/node_vmap.c @@ -0,0 +1,419 @@ +/* + * Mapping/unmapping of kernel virtual area on the NUMA node. + * Each node can have own page table to access to own copy of + * kernel duplicated text and data + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DebugNM +#undef DEBUG_NODE_MAP_MODE +#define DEBUG_NODE_MAP_MODE 0 /* Map kernel virtual memory on node */ +#define DebugNM(...) DebugPrint(DEBUG_NODE_MAP_MODE ,##__VA_ARGS__) + +#undef DebugNUMA +#undef DEBUG_NUMA_MODE +#define DEBUG_NUMA_MODE 0 /* NUMA */ +#define DebugNUMA(...) DebugPrint(DEBUG_NUMA_MODE ,##__VA_ARGS__) + +#undef DebugNUMAM +#undef DEBUG_NUMA_MAP_MODE +#define DEBUG_NUMA_MAP_MODE 0 /* NUMA mapping */ +#define DebugNUMAM(...) DebugPrint(DEBUG_NUMA_MAP_MODE ,##__VA_ARGS__) + +static inline pte_t *node_pte_alloc_k(int nid, pmd_t *pmd, + unsigned long address) +{ + if (pmd_none(*pmd)) { + if (mem_init_done) + return node_pte_alloc_kernel(nid, pmd, address); + else + return node_early_pte_alloc(nid, pmd, address); + } + return pte_offset_kernel(pmd, address); +} + +static inline pmd_t *node_pmd_alloc_k(int nid, pud_t *pud, + unsigned long address) +{ + if (pud_none(*pud)) { + if (mem_init_done) + return node_pmd_alloc_kernel(nid, pud, address); + else + return node_early_pmd_alloc(nid, pud, address); + } + return pmd_offset(pud, address); +} + +static inline pud_t *node_pud_alloc_k(int nid, pgd_t *pgd, + unsigned long address) +{ + if (pgd_none(*pgd)) { + if (mem_init_done) + return node_pud_alloc_kernel(nid, pgd, address); + else + return node_early_pud_alloc(nid, pgd, address); + } + return pud_offset(pgd, address); +} + +static inline int node_map_vm_area_pte(int nid_to, + pmd_t *pmd_from, pmd_t *pmd_to, + unsigned long address, unsigned long end) +{ + pte_t *pte_from, *pte_to; + + DebugNM("started for pmd from 0x%px == 0x%lx, " + "pmd to 0x%px = 0x%lx, addr 0x%lx, end 0x%lx\n", + pmd_from, pmd_val(*pmd_from), pmd_to, pmd_val(*pmd_to), + address, end); + pte_from = pte_offset_kernel(pmd_from, address); + if (pte_none(*pte_from)) { + printk("CPU #%d node_map_vm_area_pte() pte from is none " + "0x%px = 0x%lx\n", + smp_processor_id(), pte_from, pte_val(*pte_from)); + BUG(); + } + pte_to = node_pte_alloc_k(nid_to, pmd_to, address); + if (!pte_to) + return -ENOMEM; + if (address >= end) + BUG(); + do { + DebugNM("addr 0x%lx, pte from 0x%px = " + "0x%lx, pte to 0x%px = 0x%lx\n", + address, pte_from, pte_val(*pte_from), + pte_to, pte_val(*pte_to)); + if (!pte_none(*pte_to)) { + printk("node_map_vm_area_pte(): page already exists, " + "addr 0x%lx pte 0x%px == 0x%lx\n", + address, pte_to, pte_val(*pte_to)); + BUG(); + } + set_pte(pte_to, *pte_from); + } while (pte_from ++, pte_to ++, address += PAGE_SIZE, address != end); + return 0; +} + +static inline int node_map_vm_area_pmd(int nid_to, + pud_t * pud_from, pud_t *pud_to, + unsigned long address, unsigned long end) +{ + pmd_t *pmd_from, *pmd_to; + unsigned long next; + e2k_size_t page_size; + + DebugNM("started for pud from 0x%px == 0x%lx, pud to 0x%px = 0x%lx, " + "addr 0x%lx, end 0x%lx\n", + pud_from, pud_val(*pud_from), pud_to, pud_val(*pud_to), + address, end); + pmd_from = pmd_offset(pud_from, address); + if (pmd_none(*pmd_from)) + BUG(); + pmd_to = node_pmd_alloc_k(nid_to, pud_to, address); + if (!pmd_to) + return -ENOMEM; + if (address >= end) + BUG(); + page_size = get_pmd_level_page_size(); + do { + DebugNM("addr 0x%lx, pmd from 0x%px = 0x%lx, " + "pmd to 0x%px = 0x%lx\n", + address, pmd_from, pmd_val(*pmd_from), + pmd_to, pmd_val(*pmd_to)); + next = pmd_addr_end(address, end); + if (kernel_pmd_huge(*pmd_from)) { + pte_t *pte = (pte_t *) pmd_offset(pud_from, + (address & ~(page_size - 1))); + DebugNM("detected large page pmd 0x%px = 0x%lx " + "for address 0x%lx\n", + pte, pte_val(*pte), address); + set_pte((pte_t *)pmd_to, *pte); + continue; + } + if (node_map_vm_area_pte(nid_to, pmd_from, pmd_to, + address, next)) + return -ENOMEM; + } while (pmd_from++, pmd_to++, address = next, address != end); + return 0; +} + +static inline int node_map_vm_area_pud(int nid_to, + pgd_t *dir_from, pgd_t *dir_to, + unsigned long address, unsigned long end) +{ + pud_t *pud_from, *pud_to; + unsigned long next; + e2k_size_t page_size; + + DebugNM("started for pgd from 0x%px = 0x%lx, " + "pgd to 0x%px = 0x%lx, addr 0x%lx, end 0x%lx\n", + dir_from, pgd_val(*dir_from), dir_to, pgd_val(*dir_to), + address, end); + pud_from = pud_offset(dir_from, address); + if (pud_none(*pud_from)) + BUG(); + pud_to = node_pud_alloc_k(nid_to, dir_to, address); + if (!pud_to) + return -ENOMEM; + if (address >= end) + BUG(); + page_size = get_pud_level_page_size(); + do { + DebugNM("addr 0x%lx, pud from 0x%px = " + "0x%lx, pud to 0x%px = 0x%lx\n", + address, pud_from, pud_val(*pud_from), + pud_to, pud_val(*pud_to)); + next = pud_addr_end(address, end); + if (kernel_pud_huge(*pud_from)) { + pte_t *pte = (pte_t *) pud_offset(dir_from, + (address & ~(page_size - 1))); + DebugNM("detected large page pud 0x%px = 0x%lx " + "for address 0x%lx\n", + pte, pte_val(*pte), address); + set_pte((pte_t *)pud_to, *pte); + continue; + } + if (node_map_vm_area_pmd(nid_to, pud_from, pud_to, + address, next)) + return -ENOMEM; + } while (pud_from++, pud_to++, address = next, address != end); + return 0; +} + +static int __ref node_do_map_vm_area(int nid_from, int nid_to, + unsigned long address, unsigned long size) +{ + pgd_t *dir_from, *dir_to; + unsigned long end = address + size; + unsigned long next; + e2k_size_t page_size; + int ret = 0; + + DebugNUMA("started on node #%d, node from #%d " + "node to #%d, addr 0x%lx, size 0x%lx\n", + numa_node_id(), nid_from, nid_to, address, size); + if (address >= end) + BUG(); + if (nid_from == nid_to) + BUG(); + dir_from = node_pgd_offset_kernel(nid_from, address); + if (pgd_none(*dir_from)) + BUG(); + dir_to = node_pgd_offset_kernel(nid_to, address); + page_size = get_pgd_level_page_size(); + do { + DebugNM("addr 0x%lx, pgd from 0x%px = " + "0x%lx, pgd to 0x%px = 0x%lx\n", + address, dir_from, pgd_val(*dir_from), + dir_to, pgd_val(*dir_to)); + next = pgd_addr_end(address, end); + if (kernel_pgd_huge(*dir_from)) { + pte_t *pte = (pte_t *)node_pgd_offset_kernel(nid_from, + (address & ~(page_size - 1))); + DebugNM("detected large page pgd 0x%px = 0x%lx " + "for address 0x%lx\n", + pte, pte_val(*pte), address); + set_pte((pte_t *)dir_to, *pte); + continue; + } + if (node_map_vm_area_pud(nid_to, dir_from, dir_to, + address, next)) { + ret = -ENOMEM; + break; + } + } while (dir_from ++, dir_to ++, address = next, address != end); + return ret; +} + +int node_map_vm_area(int nid_from, nodemask_t nodes_to, + unsigned long address, unsigned long size) +{ + int dup_nid_from; + int nid_to; + int ret = 0; + + DebugNUMAM("started on node #%d to map from #%d " + "to 0x%lx addr 0x%lx, size 0x%lx\n", + numa_node_id(), nid_from, nodes_addr(nodes_to)[0], + address, size); + dup_nid_from = node_dup_kernel_nid(nid_from); + if (dup_nid_from != nid_from) { + DebugNUMAM("node #%d has not own copy and " + "use copy of node #%d\n", + nid_from, dup_nid_from); + } + for_each_node_mask(nid_to, nodes_to) { + if (!node_has_dup_kernel(nid_to)) + continue; + if (nid_to == dup_nid_from) + continue; + ret = node_do_map_vm_area(dup_nid_from, nid_to, address, size); + if (ret) + break; + } + return ret; +} + +static void node_unmap_vm_area_pte(int nid, pmd_t *pmd, + unsigned long addr, unsigned long end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + pte_t ptent = ptep_get_and_clear((&init_mm), addr, pte); + WARN_ON(!pte_none(ptent) && !pte_present(ptent)); + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static inline void node_unmap_vm_area_pmd(int nid, pud_t *pud, + unsigned long addr, unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + e2k_size_t page_size; + + pmd = pmd_offset(pud, addr); + page_size = get_pmd_level_page_size(); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + if (kernel_pmd_huge(*pmd)) { + pte_t *pte = (pte_t *) pmd_offset(pud, + (addr & ~(page_size - 1))); + DebugNM("detected large page pmd 0x%px = 0x%lx " + "for address 0x%lx\n", + pte, pte_val(*pte), addr); + ptep_get_and_clear((&init_mm), addr, pte); + continue; + } + node_unmap_vm_area_pte(nid, pmd, addr, next); + } while (pmd++, addr = next, addr != end); +} + +static inline void node_unmap_vm_area_pud(int nid, pgd_t *pgd, + unsigned long addr, unsigned long end) +{ + pud_t *pud; + unsigned long next; + e2k_size_t page_size; + + pud = pud_offset(pgd, addr); + page_size = get_pud_level_page_size(); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + if (kernel_pud_huge(*pud)) { + pte_t *pte = (pte_t *) pud_offset(pgd, + (addr & ~(page_size - 1))); + DebugNM("detected large page pmd 0x%px = 0x%lx " + "for address 0x%lx\n", + pte, pte_val(*pte), addr); + ptep_get_and_clear((&init_mm), addr, pte); + continue; + } + node_unmap_vm_area_pmd(nid, pud, addr, next); + } while (pud++, addr = next, addr != end); +} + +static void +node_do_unmap_vm_area(int nid, unsigned long address, unsigned long size) +{ + pgd_t *pgd; + unsigned long next; + unsigned long addr = address; + unsigned long end = address + size; + e2k_size_t page_size; + + DebugNUMA("started on node #%d for " + "node #%d, addr 0x%lx, size 0x%lx\n", + numa_node_id(), nid, address, size); + pgd = node_pgd_offset_kernel(nid, addr); + page_size = get_pgd_level_page_size(); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + if (kernel_pgd_huge(*pgd)) { + pte_t *pte = (pte_t *) node_pgd_offset_kernel(nid, + (addr & ~(page_size - 1))); + DebugNM("detected large page pgd 0x%px = 0x%lx " + "for address 0x%lx\n", + pte, pte_val(*pte), addr); + ptep_get_and_clear((&init_mm), addr, pte); + continue; + } + node_unmap_vm_area_pud(nid, pgd, addr, next); + } while (pgd++, addr = next, addr != end); +} + +nodemask_t inline get_node_dup_kernel_map(nodemask_t nodes) +{ + nodemask_t dup_nodes = nodes; + int nid; + int dup_nid; + + for_each_node_mask(nid, nodes) { + if (node_has_dup_kernel(nid)) + continue; + dup_nid = node_dup_kernel_nid(nid); + if (dup_nid != nid) { + node_clear(nid, dup_nodes); + node_set(dup_nid, dup_nodes); + } + } + return dup_nodes; +} + +void +node_unmap_kernel_vm_area_noflush(nodemask_t nodes, unsigned long start, + unsigned long end) +{ + unsigned long size = end - start; + int nid; + + BUG_ON(start >= end); + /* + * If the function will be used for arbitrary nodes map (not only + * whole node_has_dup_kernel_map or without current node + * then need uncomment following function call + */ + /* nodes = get_node_dup_kernel_map(nodes); */ + for_each_node_mask(nid, nodes) { + if (!node_has_dup_kernel(nid)) + continue; + node_do_unmap_vm_area(nid, start, size); + } +} + +void node_unmap_vm_area_noflush(nodemask_t nodes, struct vm_struct *area) +{ + unsigned long address = (unsigned long) area->addr; + unsigned long end = address + area->size; + + node_unmap_kernel_vm_area_noflush(nodes, address, end); +} + +void node_unmap_kmem_area(nodemask_t nodes, + unsigned long address, unsigned long size) +{ + unsigned long end = address + size; + int nid; + + BUG_ON(address >= end); + for_each_node_mask(nid, nodes) { + if (!node_has_dup_kernel(nid)) + continue; + node_do_unmap_vm_area(nid, address, size); + } +} diff --git a/arch/e2k/mm/page_io.c b/arch/e2k/mm/page_io.c new file mode 100644 index 000000000000..abef14cc78eb --- /dev/null +++ b/arch/e2k/mm/page_io.c @@ -0,0 +1,1032 @@ +/* + * page_io.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_TAG_PAGE +/*define DEBUG_TAG_PAGE 1 */ + +#undef DEBUG_M +/*#define DEBUG_M 1 */ + +#undef DEBUG_TAG_MODE +#undef DebugTM +#define DEBUG_TAG_MODE 0 /* Tag memory */ +#define DebugTM(...) DebugPrint(DEBUG_TAG_MODE, ##__VA_ARGS__) + +#undef DEBUG_TAG_MODE_W +#undef DebugTM_W +#define DEBUG_TAG_MODE_W 1 /* Tag memory */ +#define DebugTM_W(...) DebugPrint(DEBUG_TAG_MODE_W, ##__VA_ARGS__) + + +#define SetPageLocked __SetPageLocked +#define ClearPageLocked __ClearPageLocked + +static struct tags_swap_page_table tags_swap_table[MAX_SWAPFILES]; +static int was_swap_write_page = 0; +static int CLEAR = 0; +static int WRITING = 0; +static int READ_SWAP = 0; +static int READ_SWAP_WITH_TAGS = 0; +static int WRITE_SWAP = 0; +static int WRITE_SWAP_WITH_TAG = 0; +static int READ_SWAP_TAGS_FROM_CASH = 0; +static int PROCESSED_WRITE_SWAP_WITH_TAG = 0; +static int MIN_OFFSET = 0xfffffff; +static int MAX_OFFSET = 0; +static int FREE_ENTRY = 0; +static int LOOP_ENTRY = 0; +static int IO_LOOP_ENTRY = 0; +static int COUNT = 0; +static int WR = 0; + + +extern void wake_up_page(struct page *page, int bit); +extern struct swap_extent *offset_to_swap_extent(struct swap_info_struct *sis, + unsigned long offset); + +static void unlock_tag_page(struct page *page) +{ + page = compound_head(page); + clear_bit_unlock(PG_locked, &page->flags); + smp_mb__after_atomic(); + wake_up_page(page, PG_locked); +} + +static inline int is_busy_tag_page(struct page *page) +{ + return test_bit(PG_locked, &page->flags); +} + +static inline void set_buse_tag_page(struct page *page) +{ + return set_bit(PG_locked, &page->flags); +} + +/* + * similar to swap_info_get in swapfile.c + */ +static inline int get_swp_type(struct page *page) +{ + swp_entry_t entry; + + entry.val = page_private(page); + return swp_type(entry); +} + +static inline int get_swp_offset(struct page *page) +{ + swp_entry_t entry; + + entry.val = page_private(page); + return swp_offset(entry); +} + + +static struct swap_info_struct *e2k_page_swap_info(struct page *page) +{ + swp_entry_t swap = { .val = page_private(page) }; + + return swap_info[swp_type(swap)]; +} + +static struct swap_info_struct *e2k_swap_info_get(swp_entry_t entry) +{ + struct swap_info_struct *p; + unsigned long offset, type; + + if (!entry.val) + goto out; + type = swp_type(entry); + if (type >= nr_swapfiles) + goto bad_nofile; + p = swap_info[type]; + if (!(p->flags & SWP_USED)) + goto bad_device; + offset = swp_offset(entry); + if (offset >= p->max) + goto bad_offset; + if (!p->swap_map[offset]) + goto bad_free; + return p; + +bad_free: + goto out; +bad_offset: + pr_err("2 swap_free: %08lx\n", entry.val); + goto out; +bad_device: + pr_err("3 swap_free: %08lx\n", entry.val); + goto out; +bad_nofile: + pr_err("4 swap_free: %08lx\n", entry.val); +out: + return NULL; +} + +/* + * Now the data of pages and tags are located as follows: + * Firstly, the data of all swap pages is then their tags + * N sector for tag = (se->start_block + se->nr_pages )<<(PAGE_SHIFT - 9)+ + * (offset - se->start_page) + * N sector for data = (se->start_block + (offset - se->start_page)) + * << (PAGE_SHIFT - 9) + */ +void e2k_map_swap_page(struct page *page, struct bio *bio, + struct block_device **bdev) +{ + struct swap_info_struct *sis; + struct swap_extent *se; + swp_entry_t entry; + pgoff_t offset; + + if (!was_swap_write_page) + BUG(); + + entry.val = page_private(page); + offset = swp_offset(entry); + + sis = e2k_swap_info_get(entry); + if (!sis) + BUG(); + + *bdev = sis->bdev; + + se = offset_to_swap_extent(sis, offset); + + bio->bi_iter.bi_sector = + (((se->start_block + se->nr_pages) << (PAGE_SHIFT - 9)) + + (offset - se->start_page)); +} +#ifdef CONFIG_MCST_MEMORY_SANITIZE +EXPORT_SYMBOL(e2k_map_swap_page); /* for lkdm testing */ +#endif + + +static long e2k_real_map_swap_page1(struct page *page, struct bio *bio, int tag) +{ + struct swap_info_struct *sis; + struct swap_extent *se; + swp_entry_t entry; + pgoff_t offset; + long sector; + + if (!was_swap_write_page) + return 0; + + entry.val = page_private(page); + offset = swp_offset(entry); + + sis = e2k_swap_info_get(entry); + if (!sis) + return 0; + + se = offset_to_swap_extent(sis, offset); + + sector = (tag) ? + ((se->start_block + se->nr_pages) << (PAGE_SHIFT - 9) + + (offset - se->start_page)) : + (se->start_block + (offset - se->start_page)); + + return sector; +} + +static void set_clear_tag_page(struct page *page) +{ + if (is_busy_tag_page(page)) + unlock_tag_page(page); +} + +u32 save_tags_from_data(u64 *datap, u8 *tagp) +{ + u32 res = 0; + int i; + + for (i = 0; i < (int) TAGS_BYTES_PER_PAGE; i++) { + u64 data_lo, data_hi; + u8 tag_lo, tag_hi, tag; + + load_qvalue_and_tagq((unsigned long) &datap[2 * i], + &data_lo, &data_hi, &tag_lo, &tag_hi); + tag = tag_lo | (tag_hi << 4); + + tagp[i] = tag; + res |= tag; + } + + return res; +} + +static long save_tags_from_page(struct page *src_page, struct page *dst_page) +{ + u64 *data_addr; + u8 *tag_addr; + long res; + + DebugTM("Starting copying the tags from the " + "page 0x%px (addr 0x%px) to the tags page 0x%px (addr 0x%px)\n", + src_page, page_address(src_page), + dst_page, page_address(dst_page)); + + tag_addr = (u8 *)page_address(dst_page); + data_addr = (u64 *)page_address(src_page); + res = save_tags_from_data(data_addr, tag_addr); + return res; +} + +void restore_tags_for_data(u64 *datap, u8 *tagp) +{ + int i; + + for (i = 0; i < (int) TAGS_BYTES_PER_PAGE; i++) { + u64 data_lo = datap[2 * i], data_hi = datap[2 * i + 1]; + u32 tag = (u32) tagp[i]; + + store_tagged_dword(&datap[2 * i], data_lo, tag); + store_tagged_dword(&datap[2 * i + 1], data_hi, tag >> 4); + } +} + +static void restore_tags_from_page(struct page *dst_page, struct page *src_page) +{ + u64 *data_addr; + u8 *tag_addr; + + DebugTM("Starting copying the tags from the " + "page 0x%px (addr 0x%px) to the data page 0x%px (addr 0x%px)\n", + src_page, page_address(src_page), + dst_page, page_address(dst_page)); + + tag_addr = (u8 *)page_address(src_page); + data_addr = (u64 *)page_address(dst_page); + restore_tags_for_data(data_addr, tag_addr); + set_clear_tag_page(src_page); +} + +static struct bio *e2k_get_swap_bio(int gfp_flags, struct page *tag_page, + struct page *page, + bio_end_io_t end_io, int size) +{ int i, nr = hpage_nr_pages(page); + struct bio *bio; + + bio = bio_alloc(gfp_flags, nr); + if (bio) { + struct block_device *bdev; + + e2k_map_swap_page(page, bio, &bdev); + bio_set_dev(bio, bdev); + bio->bi_end_io = end_io; + + DebugTM(" bi_sector = %llx page=%px tag_page=%px\n", + bio->bi_iter.bi_sector, page, tag_page); + + for (i = 0; i < nr; i++) + bio_add_page(bio, tag_page + i, size, 0); + + VM_BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE * nr); + } + + return bio; +} + +static void clear_tags_was_writing(struct page *page) +{ + struct swap_info_struct *sis = e2k_page_swap_info(page); + unsigned long offset; + swp_entry_t entry; + + CLEAR++; + if (!sis) { + DebugTM_W(" %s BAD0!!!! page=%px\n", __func__, page); + return; + } + entry.val = page_private(page); + offset = swp_offset(entry); + + clear_bit(2*offset + 1, sis->tag_swap_map); +} + +static void e2k_end_swap_bio_write(struct bio *bio) +{ + struct page *page = bio->bi_io_vec[0].bv_page; + + DebugTM(" page=%px bi_sector=0x%llx page_address=%px\n", + page, (unsigned long long)bio->bi_iter.bi_sector, + page_address(bio->bi_io_vec[0].bv_page)); + if (bio->bi_status) { + SetPageError(page); + /* + * We failed to write the page out to swap-space. + * Re-dirty the page in order to avoid it being reclaimed. + * Also print a dire warning that things will go BAD (tm) + * very quickly. + * + * Also clear PG_reclaim to avoid rotate_reclaimable_page() + */ + set_page_dirty(page); + pr_info("%s: Write-error %d on swap-device (%u:%u:%llu)\n", + current->comm, bio->bi_status, + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), + (unsigned long long)bio->bi_iter.bi_sector); + ClearPageReclaim(page); + } + smp_mb__after_atomic(); + wake_up_page(page, PG_writeback); + bio_put(bio); + clear_tags_was_writing(page); + set_clear_tag_page(page); + PROCESSED_WRITE_SWAP_WITH_TAG++; +} + +#ifdef DEBUG_M +#define MAX_CMP_PAGE 8192 +struct page *WRITE_PAGE[MAX_CMP_PAGE]; +long OFFSET_PAGE[MAX_CMP_PAGE]; +int index_write_page; +raw_spinlock_t lock_write_pages; +long FIND_PAGE = 0, NOT_FIND_PAGE = 0; + +static void compare_page(struct page *page, struct page *write_page, int j) +{ + int i; + u64 data, write_data; + u8 tag, write_tag; + u64 *addr_page = (u64 *)page_address(page); + u64 *addr_write_page = (u64 *)page_address(write_page); + int once = 1; + static int BAD = 0; + + for (i = 0; i < PAGE_SIZE/8; i++) { + load_value_and_tagd(addr_page, &data, &tag); + load_value_and_tagd(addr_write_page, &write_data, &write_tag); + if (tag != write_tag || data != write_data) { + if (BAD++ > 10) + continue; + if (once) { + unsigned long offset; + swp_entry_t entry; + entry.val = page_private(page); + offset = swp_offset(entry); + once = 0; + pr_info(" ind =%d page=%px offset=0x%lx flags==0x%lx\n", + j, page, offset, page->flags); + tracing_off(); + dump_stack(); + } + pr_info(" i=%d tag=%d write_tag=%d data=0x%lx" + " write_data=0x%lx addr_page=0x%px" + " addr_write_page=%px\n", + i, tag, write_tag, data, write_data, addr_page, + addr_write_page); + + } + addr_page++; + addr_write_page++; + } +} + +static void init_cmp_page(void) +{ + int i; + struct page *page; + + raw_spin_lock_init(&lock_write_pages); + index_write_page = 0; + for (i = 0; i < MAX_CMP_PAGE; i++) { + page = alloc_page(GFP_KERNEL); + if (!page) + panic("Cannot allocate pages for init_cmp_page"); + get_page(page); + WRITE_PAGE[i] = page; + } +} + +static void save_page(struct page *page) +{ + struct page *res; + swp_entry_t entry; + int ind = 0; + unsigned long flags; + + entry.val = page_private(page); + raw_spin_lock_irqsave(&lock_write_pages, flags); + ind = index_write_page; + index_write_page++; + if (index_write_page >= MAX_CMP_PAGE) { + index_write_page = 0; + } + raw_spin_unlock_irqrestore(&lock_write_pages, flags); + res = WRITE_PAGE[ind]; + copy_tagged_page(page_address(res), page_address(page)); + OFFSET_PAGE[ind] = entry.val; + page_private(res) = entry.val; +} + +static void cmp_page(struct page *page) +{ + int i; + struct swap_info_struct *sis = e2k_page_swap_info(page); + unsigned long offset; + swp_entry_t entry; + unsigned long flags; + + entry.val = page_private(page); + offset = swp_offset(entry); + + raw_spin_lock_irqsave(&lock_write_pages, flags); + for (i = 0; i < MAX_CMP_PAGE; i++) { + if (offset == OFFSET_PAGE[i]) { + raw_spin_unlock_irqrestore(&lock_write_pages, flags); + DebugTM(" find OFFSET =0x%lx\n", offset); + compare_page(page, WRITE_PAGE[i], i); + FIND_PAGE++; + return; + } + } + raw_spin_unlock_irqrestore(&lock_write_pages, flags); + NOT_FIND_PAGE++; + DebugTM("NOT find OFFSET =0x%lx\n", offset); +} +#else /* !DEBUG_M */ +#define cmp_page(x) +#define save_page(x) +#define init_cmp_page() + +#endif /* DEBUG_M */ + +static bool tags_was_writing(struct page *page) +{ + struct swap_info_struct *sis = e2k_page_swap_info(page); + unsigned long offset; + swp_entry_t entry; + + if (!sis) { + DebugTM_W(" %s BAD!!!! page=%px\n", __func__, page); + return 1; + } + entry.val = page_private(page); + offset = swp_offset(entry); + return test_bit(2*offset + 1, sis->tag_swap_map); +} + +static int tags_swap_test(struct page *page) +{ + struct swap_info_struct *sis = e2k_page_swap_info(page); + unsigned long offset; + swp_entry_t entry; + + entry.val = page_private(page); + offset = swp_offset(entry); + if (!sis || offset == 0) { + DebugTM_W(" == %s BAD!!!! page=%px offset=0x%lx, sis=%px, entry=0x%lx\n", + __func__, page, offset, sis, entry.val); + return 11; + } + return test_bit(2*offset, sis->tag_swap_map); +} + +static void print_all_tag_swap_pages(void) +{ + int i, j; + + show_swap_cache_info(); + pr_info(" %s WRITE_SWAP=%d WRITE_SWAP_WITH_TAG=%d READ_SWAP=%d" + " READ_SWAP_WITH_TAGS=%d READ_SWAP_TAGS_FROM_CASH=%d" + " PROCESSED_WRITE_SWAP_WITH_TAG=%d MIN_OFFSET=%d" + " MAX_OFFSET=%d FREE_ENTRY=%d" + " LOOP_ENTRY=%d IO_LOOP_ENTRY=%d COUNT=%d WR=%d\n", + __func__, WRITE_SWAP, WRITE_SWAP_WITH_TAG, READ_SWAP, + READ_SWAP_WITH_TAGS, READ_SWAP_TAGS_FROM_CASH, + PROCESSED_WRITE_SWAP_WITH_TAG, MIN_OFFSET, MAX_OFFSET, + FREE_ENTRY, LOOP_ENTRY, IO_LOOP_ENTRY, COUNT, WR); + if (!WRITE_SWAP && !READ_SWAP) { + /* no swap */ + return; + } +return; +#ifdef DEBUG_M + pr_info(" %s FIND_PAGE=%ld NOT_FIND_PAGE=%ld\n", + __func__, FIND_PAGE, NOT_FIND_PAGE); +#endif /* DEBUG_M */ + return; + for (i = 0; i < MAX_SWAPFILES; i++) { + struct tags_swap_page_table *curr_tag_page_table = + &tags_swap_table[i]; + struct page *tag_page; + + if (curr_tag_page_table->size[1] != TAGS_PAGES || + curr_tag_page_table->size[0] != TAGS_READ_PAGES || + !curr_tag_page_table->pages) { + /* null table */ + continue; + } + pr_info("======= i = %d index=%d index_read=%d\n", + i, curr_tag_page_table->index, + curr_tag_page_table->index_read); + + for (j = 0; j < TAGS_PAGES; j++) { + tag_page = curr_tag_page_table->pages[j]; + pr_info(" j=%d tag_page=%px " + " is_busy_tag_page=%d tags_was_writing=%d" + " tags_swap_test=%d flags=0x%lx offset=%d\n", + j, tag_page, is_busy_tag_page(tag_page), + tags_was_writing(tag_page), + tags_swap_test(tag_page), tag_page->flags, + get_swp_offset(tag_page)); + } + } +} + +static struct page *find_swap_page(struct page *page) +{ + struct page *res; + int i; + struct tags_swap_page_table *curr_tag_page_table = + &tags_swap_table[get_swp_type(page)]; + struct page **ptr_page; + + ptr_page = curr_tag_page_table->pages; + for (i = 0; i < TAGS_PAGES; i++) { + res = ptr_page[i]; + if (page_private(page) == page_private(res)) { + DebugTM(" %s FOUND!!! res =%px\n", __func__, res); + return res; + } + } + if (tags_was_writing(page)) { + DebugTM(" %s BAD NO_FIND!!! CLEAR=%d WRITING=%d " + "page=%px SECTOR=0x%lx\n", __func__, + CLEAR, WRITING, page, + e2k_real_map_swap_page1(page, NULL, 1)); + } + return NULL; +} + +static struct page *get_tag_swap_page(int wr, struct page *page, int *NO_READ) +{ + struct page *res; + int i, ind; + struct tags_swap_page_table *curr_tag_page_table; + int *ptr_ind; + struct page **ptr_page; + int type = get_swp_type(page); + int size; + struct page *writing_page; + spinlock_t *lock; + unsigned long flags; + + curr_tag_page_table = &tags_swap_table[type]; + size = curr_tag_page_table->size[wr]; + if (wr == 0) { + /* read */ + ptr_ind = &curr_tag_page_table->index_read; + ptr_page = curr_tag_page_table->read_pages; + lock = &(curr_tag_page_table->lock_read_pages); + } else { + ptr_ind = &curr_tag_page_table->index; + ptr_page = curr_tag_page_table->pages; + lock = &(curr_tag_page_table->lock_pages); + } + COUNT = 0; + WR = wr; + while (1) { + spin_lock_irqsave(lock, flags); + ind = *ptr_ind; + for (i = 0; i < size; i++) { + res = ptr_page[ind]; + if (is_busy_tag_page(res)) { + /* change index for next get_tag_swap_page */ + ind = (ind + 1) % size; + } else { + /* found free page */ + *ptr_ind = (ind + 1) % size; + page_private(res) = page_private(page); + set_buse_tag_page(res); + if (wr == 0 && tags_was_writing(page)) { + /* tags for this page are writing in swap */ + writing_page = find_swap_page(page); + DebugTM(" %s AFTER WAIT cpu=%d page=%px " + " writing_page=%px\n", current->comm, + smp_processor_id(), page, writing_page); + memcpy(page_address(res), + page_address(writing_page), PAGE_SIZE/8); + *NO_READ = 1; + } + spin_unlock_irqrestore(lock, flags); +#ifdef DEBUG_TAG_PAGE + if (COUNT) { + print_all_tag_swap_pages(); + } +#endif /* DEBUG_TAG_PAGE */ + return res; + } + } + *ptr_ind = (ind + 1) % size; + spin_unlock_irqrestore(lock, flags); + LOOP_ENTRY++; + COUNT++; + if (wr && total_swapcache_pages() >= size) { + io_schedule_timeout(5*HZ); + IO_LOOP_ENTRY++; + } + wait_on_page_locked(res); + if (is_busy_tag_page(res)) { + wait_on_page_bit(res, PG_locked); + } + } + /* !!! unreachable place */ + spin_unlock_irqrestore(lock, flags); + panic(" +++ERRROR %s CLEAR=%d WRITING=%d\n", + current->comm, CLEAR, WRITING); + return NULL; + +} + +static void clear_tags_swap(struct page *page) +{ + struct swap_info_struct *sis = e2k_page_swap_info(page); + unsigned long offset; + swp_entry_t entry; + + entry.val = page_private(page); + offset = swp_offset(entry); + if (!sis || offset == 0) { + DebugTM_W(" %s BAD!!!! page=%px\n", __func__, page); + return; + } + clear_bit(2*offset, sis->tag_swap_map); +} + +static void set_tags_was_writing(struct page *page) +{ + struct swap_info_struct *sis = e2k_page_swap_info(page); + unsigned long offset; + swp_entry_t entry; + + WRITING++; + entry.val = page_private(page); + offset = swp_offset(entry); + if (!sis || offset == 0) { + DebugTM_W(" %s BAD!!!! page=%px\n", __func__, page); + return; + } + set_bit(2*offset + 1, sis->tag_swap_map); +} + +static void set_tags_swap(struct page *page) +{ + struct swap_info_struct *sis = e2k_page_swap_info(page); + unsigned long offset; + swp_entry_t entry; + + entry.val = page_private(page); + offset = swp_offset(entry); + + if (!sis || offset == 0) { + DebugTM_W(" %s BAD!!!! page=%px\n", __func__, page); + return; + } + set_bit(2*offset, sis->tag_swap_map); +} + +void tag_swap_write_page(struct page *page, struct writeback_control *wbc) +{ + struct page *dst_page; + long res; + struct bio *bio; + int size; + int NO_READ; + + was_swap_write_page++; + WRITE_SWAP++; + size = PAGE_SIZE / 8; + dst_page = get_tag_swap_page(1, page, &NO_READ); + if (!dst_page) { + DebugTM(" can't find pages for tags\n"); + panic("tag_swap_write_page can't find pages for tags"); + return; + } + res = save_tags_from_page(page, dst_page); + clear_tags_swap(page); + if (res) { + WRITE_SWAP_WITH_TAG++; + /* DEBUG only */ + save_page(page); + set_tags_swap(page); + set_tags_was_writing(dst_page); + bio = e2k_get_swap_bio(GFP_NOIO, dst_page, page, + e2k_end_swap_bio_write, size); + if (bio == NULL) { + BUG(); + return; + } + count_vm_event(PSWPOUT); + if (wbc->sync_mode == WB_SYNC_ALL) + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC); + else + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + submit_bio(bio); + } else { + /* if all tags equal 0 than - no save tags */ + set_clear_tag_page(dst_page); + } + return; +} + + +#ifdef CONFIG_MAGIC_SYSRQ +static void sysrq_handle_swap(int key) +{ + print_all_tag_swap_pages(); +} + +static struct sysrq_key_op sysrq_swap_op = { + .handler = sysrq_handle_swap, + .help_msg = "debug(g)", + .action_msg = "DEBUG", +}; +#endif + +void e2k_swap_setup(int type, int block_size) +{ + int i; + struct page *page; + struct tags_swap_page_table *curr_tag_page_table = + &tags_swap_table[type]; + + DebugTM("swap_setup() Initializing swap, type is %d, " + "block_size is %d\n", type, block_size); + curr_tag_page_table->size[1] = TAGS_PAGES; + spin_lock_init(&curr_tag_page_table->lock_pages); + curr_tag_page_table->pages = + kmalloc(TAGS_PAGES * sizeof(void *), GFP_KERNEL); +#ifdef CONFIG_MAGIC_SYSRQ + register_sysrq_key('x', &sysrq_swap_op); +#endif + + for (i = 0; i < TAGS_PAGES; i++) { + page = alloc_page(GFP_KERNEL); + get_page(page); + if (!page) + panic("Cannot allocate pages for swap"); + page->mapping = swapper_spaces[type]; + curr_tag_page_table->pages[i] = page; + } + /* + * for reading swap + */ + spin_lock_init(&curr_tag_page_table->lock_read_pages); + curr_tag_page_table->read_pages = + kmalloc(TAGS_READ_PAGES * sizeof(void *), GFP_KERNEL); + curr_tag_page_table->size[0] = TAGS_READ_PAGES; + for (i = 0; i < TAGS_READ_PAGES; i++) { + page = alloc_page(GFP_KERNEL); + get_page(page); + if (!page) + panic("Cannot allocate pages for swap"); + page->mapping = swapper_spaces[type]; + curr_tag_page_table->read_pages[i] = page; + } + curr_tag_page_table->index = 0; + curr_tag_page_table->index_read = 0; + /* DEBUG only */ + init_cmp_page(); +} + +static void tags_swap_free(struct swap_info_struct *sis) +{ + unsigned long *tag_swap_map = sis->tag_swap_map; + + BUG_ON(sis == NULL); + if (!tag_swap_map) + return; + DebugTM(" tags_swap_free sis->max =%d\n", sis->max); + sis->tag_swap_map = 0; + vfree(tag_swap_map); +} + +static struct page *swap_free_pages[TAGS_PAGES + TAGS_READ_PAGES]; + +void e2k_remove_swap(struct swap_info_struct *sis) +{ + int i; + struct page *page; + struct tags_swap_page_table *curr_tag_page_table = + &tags_swap_table[sis->type]; + int ind_swap_free_pages = 0; + struct page **tbl_write_pages; + struct page **tbl_read_pages; + + DebugTM("e2k_remove_swap\n"); + spin_lock(&(curr_tag_page_table->lock_pages)); + spin_lock(&(curr_tag_page_table->lock_read_pages)); + + for (i = 0; i < TAGS_PAGES; i++) { + page = curr_tag_page_table->pages[i]; + if (!page) { + continue; + } + curr_tag_page_table->pages[i] = NULL; + page->mapping = NULL; + ClearPageSwapCache(page); + clear_bit_unlock(PG_locked, &page->flags); + if (is_busy_tag_page(page)) { + wait_on_page_bit(page, PG_locked); + } + put_page(page); + swap_free_pages[ind_swap_free_pages++] = page; + } + for (i = 0; i < TAGS_READ_PAGES; i++) { + page = curr_tag_page_table->read_pages[i]; + if (!page) { + continue; + } + curr_tag_page_table->read_pages[i] = NULL; + if (is_busy_tag_page(page)) { + wait_on_page_bit(page, PG_locked); + } + page->mapping = NULL; + ClearPageSwapCache(page); + clear_bit_unlock(PG_locked, &page->flags); + put_page(page); + swap_free_pages[ind_swap_free_pages++] = page; + } + tbl_write_pages = curr_tag_page_table->pages; + curr_tag_page_table->pages = NULL; + tbl_read_pages = curr_tag_page_table->read_pages; + curr_tag_page_table->read_pages = NULL; + + spin_unlock(&(curr_tag_page_table->lock_read_pages)); + spin_unlock(&(curr_tag_page_table->lock_pages)); + /* can not use free() under spin_lock */ + for (i = 0; i < ind_swap_free_pages; i++) { + __free_page(swap_free_pages[i]); + } + kfree(tbl_read_pages); + kfree(tbl_write_pages); + tags_swap_free(sis); +} + +static void e2k_end_swap_bio_read(struct bio *bio) +{ + struct page *page = bio->bi_io_vec[0].bv_page; + + if (bio->bi_status) { + SetPageError(page); + ClearPageUptodate(page); + pr_info("Read-error on swap-device (%u:%u:%llu)\n", + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), + (unsigned long long)bio->bi_iter.bi_sector); + } + unlock_page(page); + bio_put(bio); +} + +static struct page *tag_swap_readpage(struct page *page) +{ + struct page *tag_page; + struct bio *bio; + int NO_READ = 0; + + tag_page = get_tag_swap_page(0, page, &NO_READ); + if (NO_READ) { + /* page was copyed already*/ + READ_SWAP_TAGS_FROM_CASH++; + ClearPageLocked(tag_page); + return tag_page; + } + if (!tag_page) { + DebugTM("tag_swap_readpage can't find pages for tags"); + return NULL; + } + bio = e2k_get_swap_bio(GFP_KERNEL, tag_page, page, + e2k_end_swap_bio_read, PAGE_SIZE / 8); + if (bio == NULL) { + unlock_page(tag_page); + panic("tag_swap_readpage can't find memory for bio"); + } + count_vm_event(PSWPIN); + bio_set_op_attrs(bio, REQ_OP_READ, 0); + submit_bio(bio); + + wait_on_page_locked(tag_page); + return tag_page; +} + +/* only for compress & decompress */ +#ifdef CONFIG_ZSWAP +u8 *alloc_page_with_tags(void) +{ + return kmalloc(PAGE_SIZE * 2, GFP_KERNEL); +} + +void free_page_with_tags(u8 *p) +{ + kfree(p); +} + +void get_page_with_tags(u8 *dst, u8 *src, int *tag_length) +{ + int res; + + dst = alloc_page_with_tags(); + BUG_ON(!dst); + + copy_tagged_page(dst, src); + res = save_tags_from_data((u64 *)dst, (u8 *)(dst+PAGE_SIZE)); + *tag_length = (res) ? TAGS_BYTES_PER_PAGE : 0; + + return; +} +#endif + +static int was_write_tag_page(struct page *page) +{ + return tags_swap_test(page); +} + +int e2k_swap_readpage(struct page *page) +{ + int ret = 0; + struct swap_info_struct *sis = e2k_page_swap_info(page); + struct page *tag_page; + + READ_SWAP++; + if (!was_write_tag_page(page)) { + ret = swap_readpage(page, false); + return ret; + } + READ_SWAP_WITH_TAGS++; + /* read tags */ + tag_page = tag_swap_readpage(page); + ret = swap_readpage(page, false); + wait_on_page_locked(page); + + if (!tag_page) { + DebugTM_W(" ###!!!! %s can't find tag_page\n", __func__); + return 0; + } + wait_on_page_locked(tag_page); + restore_tags_from_page(page, tag_page); + /* DEBUG only */ + cmp_page(page); + return ret; +} + +/* + * This is optimization for tags_pages + * It is similar to source code of frontswap .c + * It needs one bit for every page + * if bit =1 we must write and read tags + * owherwise NO WRITE, NO READ tags + */ + +/* + * Called when a swap device is swapon'd. + */ +void tags_swap_init(unsigned type, unsigned long *map) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + BUG_ON(type >= MAX_SWAPFILES); + if (WARN_ON(!map)) + return; + sis->tag_swap_map = map; + DebugTM(" tags_swap_init sis->max =%d\n", sis->max); +} + +int check_tags(unsigned type, unsigned long beg, unsigned long end) +{ + unsigned long offset; + struct swap_info_struct *sis = swap_info[type]; + + for (offset = beg; offset <= end; offset++) { + if (test_bit(2*offset, sis->tag_swap_map)) { + return 1; + } + } + return 0; +} diff --git a/arch/e2k/mm/pageattr.c b/arch/e2k/mm/pageattr.c new file mode 100644 index 000000000000..fcf0b8e8371c --- /dev/null +++ b/arch/e2k/mm/pageattr.c @@ -0,0 +1,774 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +static void modify_pte_page(pte_t *ptep, enum sma_mode mode) +{ + pte_t new; + + switch (mode) { + case SMA_RO: + new = pte_wrprotect(*ptep); + break; + case SMA_RW: + new = pte_mkwrite(*ptep); + break; + case SMA_NX: + new = pte_mknotexec(*ptep); + break; + case SMA_X: + new = pte_mkexec(*ptep); + break; + case SMA_P: + new = pte_mk_present_valid(*ptep); + break; + case SMA_NP: + new = pte_mknot_present_valid(*ptep); + break; + case SMA_WB_MT: + new = pte_mk_wb(*ptep); + break; + case SMA_WC_MT: + new = pte_mk_wc(*ptep); + break; + case SMA_UC_MT: + new = pte_mk_uc(*ptep); + break; + default: + BUG(); + }; + + native_set_pte(ptep, new, false); +} + +static int pte_modified(pte_t pte, enum sma_mode mode) +{ + switch (mode) { + case SMA_RO: + return !pte_write(pte); + case SMA_RW: + return pte_write(pte); + case SMA_NX: + return !pte_exec(pte); + case SMA_X: + return pte_exec(pte); + case SMA_P: + return pte_present(pte); + case SMA_NP: + return !pte_present(pte); + case SMA_WB_MT: + return pte_wb(pte); + case SMA_WC_MT: + return pte_wc(pte); + case SMA_UC_MT: + return pte_uc(pte); + default: + BUG(); + }; + + return -EINVAL; +} + +static int walk_pte_level(pmd_t *pmd, unsigned long addr, unsigned long end, + enum sma_mode mode, int *need_flush) +{ + pte_t *ptep; + + ptep = pte_offset_kernel(pmd, addr); + do { + if (pte_none(*ptep)) + return -EINVAL; + if (!pte_modified(*ptep, mode)) { + *need_flush = 1; + modify_pte_page(ptep, mode); + } + ptep++; + addr += PAGE_SIZE; + } while (addr < end); + + return 0; +} + +static __ref void *sma_alloc_page(int node) +{ + void *addr = NULL; + + if (slab_is_available()) { + struct page *page = alloc_pages_node(node, + GFP_KERNEL|__GFP_NOWARN, 0); + if (page) + addr = page_address(page); + } else { + addr = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); + } + + return addr; +} + +static __ref void sma_free_page(int node, void *addr) +{ + if (slab_is_available()) + free_page((unsigned long) addr); + else + memblock_free(__pa(addr), PAGE_SIZE); +} + +DEFINE_RAW_SPINLOCK(sma_lock); + +static void +map_pmd_huge_page_to_ptes(pte_t *pte_page, e2k_addr_t phys_page, + pgprot_t pgprot) +{ + int i; + + for (i = 0; i < PTRS_PER_PTE; i++) { + pte_page[i] = mk_pte_phys(phys_page, pgprot); + phys_page += PTE_SIZE; + } +} + +static void +split_one_pmd_page(pmd_t *pmdp, e2k_addr_t phys_page, pte_t *pte_page) +{ + pgprot_t pgprot; + pmd_t new; + + BUG_ON(pte_page == NULL); + pgprot_val(pgprot) = _PAGE_CLEAR(pmd_val(*pmdp), + UNI_PAGE_HUGE | UNI_PAGE_PFN); + map_pmd_huge_page_to_ptes(pte_page, phys_page, pgprot); + smp_wmb(); /* make pte visible before page table entry */ + new = mk_pmd_phys(__pa(pte_page), PAGE_KERNEL_PTE); + native_set_pmd(pmdp, new); +} +void split_simple_pmd_page(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]) +{ + const pt_level_t *pmd_level = get_pt_level_on_id(E2K_PMD_LEVEL_NUM); + pte_t *ptep; + pte_t *pte_page; + e2k_addr_t phys_page; + + if (pmd_level->get_huge_pte != NULL) { + ptep = pmd_level->get_huge_pte(0, ptp); + } else { + ptep = (pte_t *)ptp; + } + phys_page = pte_pfn(*ptep) << PAGE_SHIFT; + pte_page = *ptes; + split_one_pmd_page((pmd_t *)ptep, phys_page, pte_page); +} +void split_multiple_pmd_page(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]) +{ + const pt_level_t *pmd_level = get_pt_level_on_id(E2K_PMD_LEVEL_NUM); + pte_t *ptep; + pte_t *pte_page; + e2k_addr_t phys_page; + int ptes_num; + int no; + + ptes_num = get_pt_level_huge_ptes_num(pmd_level); + if (pmd_level->get_huge_pte != NULL) { + ptep = pmd_level->get_huge_pte(0, ptp); + } else { + /* first pte is always multiple ptes size */ + ptep = (pte_t *)(((e2k_addr_t)ptp) & + ~((sizeof(*ptp) * ptes_num) - 1)); + } + phys_page = pte_pfn(*ptep) << PAGE_SHIFT; + for (no = 0; no < ptes_num; no++) { + pte_page = ptes[no]; + split_one_pmd_page((pmd_t *)ptep, phys_page, pte_page); + /* next page table entry */ + ptep++; + phys_page += PMD_SIZE; + } +} + +static inline void +free_pmd_huge_ptes_pages(int node, pte_t *ptes[MAX_NUM_HUGE_PTES], int ptes_num) +{ + int no; + + for (no = 0; no < ptes_num; no++) { + sma_free_page(node, ptes[no]); + ptes[no] = NULL; + } +} +static inline int +alloc_pmd_huge_ptes_pages(int node, pte_t *ptes[MAX_NUM_HUGE_PTES]) +{ + int ptes_num; + int no; + + ptes_num = get_e2k_pt_level_huge_ptes_num(E2K_PMD_LEVEL_NUM); + BUG_ON(ptes_num > MAX_NUM_HUGE_PTES); + for (no = 0; no < ptes_num; no++) { + pte_t *ptep; + + ptep = sma_alloc_page(node); + if (unlikely(ptep == NULL)) + break; + ptes[no] = ptep; + } + if (likely(no >= ptes_num)) + return ptes_num; + + free_pmd_huge_ptes_pages(node, ptes, no); + return -ENOMEM; +} + +/* FIXME; split is not fully implemented for guest kernel */ +/* Guest kernel should register spliting on host */ +static int split_pmd_page(int node, pmd_t *pmdp) +{ + pte_t *ptes[MAX_NUM_HUGE_PTES]; + const pt_level_t *pmd_level = get_pt_level_on_id(E2K_PMD_LEVEL_NUM); + int ptes_num; + bool was_updated = false; + + ptes_num = alloc_pmd_huge_ptes_pages(node, ptes); + if (unlikely(ptes_num < 0)) + return ptes_num; + + /* Re-read `*pmdp' again under spinlock */ + raw_spin_lock(&sma_lock); + if (!kernel_pmd_huge(*pmdp)) { + was_updated = true; + } else { + + if (pmd_level->split_pt_page != NULL) { + pmd_level->split_pt_page((pgprot_t *)pmdp, ptes); + } else { + split_simple_pmd_page((pgprot_t *)pmdp, ptes); + } + } + raw_spin_unlock(&sma_lock); + + if (was_updated) + free_pmd_huge_ptes_pages(node, ptes, ptes_num); + + return 0; +} + +static void modify_pmd_page(pmd_t *pmdp, enum sma_mode mode) +{ + pmd_t new; + + switch (mode) { + case SMA_RO: + new = pmd_wrprotect(*pmdp); + break; + case SMA_RW: + new = pmd_mkwrite(*pmdp); + break; + case SMA_NX: + new = pmd_mknotexec(*pmdp); + break; + case SMA_X: + new = pmd_mkexec(*pmdp); + break; + case SMA_P: + new = pmd_mk_present_valid(*pmdp); + break; + case SMA_NP: + new = pmd_mknot_present_valid(*pmdp); + break; + case SMA_WB_MT: + new = pmd_mk_wb(*pmdp); + break; + case SMA_WC_MT: + new = pmd_mk_wc(*pmdp); + break; + case SMA_UC_MT: + new = pmd_mk_uc(*pmdp); + break; + default: + BUG(); + }; + + native_set_pmd(pmdp, new); +} + +static int pmd_modified(pmd_t pmd, enum sma_mode mode) +{ + switch (mode) { + case SMA_RO: + return !pmd_write(pmd); + case SMA_RW: + return pmd_write(pmd); + case SMA_NX: + return !pmd_exec(pmd); + case SMA_X: + return pmd_exec(pmd); + case SMA_P: + return pmd_present(pmd); + case SMA_NP: + return !pmd_present(pmd); + case SMA_WB_MT: + return pmd_wb(pmd); + case SMA_WC_MT: + return pmd_wc(pmd); + case SMA_UC_MT: + return pmd_uc(pmd); + default: + BUG(); + }; + + return -EINVAL; +} + +static int walk_pmd_level(int node, pud_t *pud, unsigned long addr, + unsigned long end, enum sma_mode mode, int *need_flush) +{ + unsigned long next; + pmd_t *pmdp; + e2k_size_t page_size; + int ret = 0; + + pmdp = pmd_offset(pud, addr); + do { + if (pmd_none(*pmdp)) + return -EINVAL; + next = pmd_addr_end(addr, end); + if (!kernel_pmd_huge(*pmdp)) { + ret = walk_pte_level(pmdp, addr, next, mode, + need_flush); + } else if (!pmd_modified(*pmdp, mode)) { + page_size = get_pmd_level_page_size(); + if (addr & (page_size - 1) || + addr + page_size > next) { + ret = split_pmd_page(node, pmdp); + continue; + } + *need_flush = 1; + modify_pmd_page(pmdp, mode); + } + ++pmdp; + addr = next; + } while (addr < end && !ret); + + return ret; +} + +void map_pud_huge_page_to_simple_pmds(pgprot_t *pmd_page, e2k_addr_t phys_page, + pgprot_t pgprot) +{ + int i; + + for (i = 0; i < PTRS_PER_PMD; i++) { + ((pmd_t *)pmd_page)[i] = mk_pmd_phys(phys_page, pgprot); + phys_page += PMD_SIZE; + } +} +void map_pud_huge_page_to_multiple_pmds(pgprot_t *pmd_page, + e2k_addr_t phys_page, pgprot_t pgprot) +{ + int ptes_num = get_e2k_pt_level_huge_ptes_num(E2K_PMD_LEVEL_NUM); + pmd_t pmd; + int i, no; + + for (i = 0; i < PTRS_PER_PMD; i += ptes_num) { + pmd = mk_pmd_phys(phys_page, pgprot); + for (no = 0; no < ptes_num; no++) { + ((pmd_t *)pmd_page)[i + no] = pmd; + } + phys_page += (PMD_SIZE * ptes_num); + } +} +static void +split_one_pud_page(pud_t *pudp, pmd_t *pmd_page) +{ + const pt_level_t *pud_level = get_pt_level_on_id(E2K_PUD_LEVEL_NUM); + e2k_addr_t phys_page; + pgprot_t pgprot; + pud_t new; + + phys_page = pud_pfn(*pudp) << PAGE_SHIFT; + pgprot_val(pgprot) = _PAGE_CLEAR(pud_val(*pudp), UNI_PAGE_PFN); + if (pud_level->map_pt_huge_page_to_prev_level != NULL) + pud_level->map_pt_huge_page_to_prev_level((pgprot_t *)pmd_page, + phys_page, pgprot); + else + map_pud_huge_page_to_simple_pmds((pgprot_t *)pmd_page, + phys_page, pgprot); + + smp_wmb(); /* make pmd visible before pud */ + new = mk_pud_phys(__pa(pmd_page), PAGE_KERNEL_PMD); + native_set_pud(pudp, new); +} + +/* FIXME; split is not fully implemented for guest kernel. */ +/* Guest kernel should register spliting on host */ +static int split_pud_page(int node, pud_t *pudp) +{ + pmd_t *pmdp; + bool was_updated = false; + + pmdp = sma_alloc_page(node); + if (!pmdp) + return -ENOMEM; + + /* Re-read `*pudp' again under spinlock */ + raw_spin_lock(&sma_lock); + if (!kernel_pud_huge(*pudp)) { + was_updated = true; + } else { + split_one_pud_page(pudp, pmdp); + } + raw_spin_unlock(&sma_lock); + + if (was_updated) + sma_free_page(node, pmdp); + + return 0; +} + +static void modify_pud_page(pud_t *pudp, enum sma_mode mode) +{ + pud_t new; + + switch (mode) { + case SMA_RO: + new = pud_wrprotect(*pudp); + break; + case SMA_RW: + new = pud_mkwrite(*pudp); + break; + case SMA_NX: + new = pud_mknotexec(*pudp); + break; + case SMA_X: + new = pud_mkexec(*pudp); + break; + case SMA_P: + new = pud_mk_present_valid(*pudp); + break; + case SMA_NP: + new = pud_mknot_present_valid(*pudp); + break; + case SMA_WB_MT: + new = pud_mk_wb(*pudp); + break; + case SMA_WC_MT: + new = pud_mk_wc(*pudp); + break; + case SMA_UC_MT: + new = pud_mk_uc(*pudp); + break; + default: + BUG(); + } + + native_set_pud(pudp, new); +} + +static int pud_modified(pud_t pud, enum sma_mode mode) +{ + switch (mode) { + case SMA_RO: + return !pud_write(pud); + case SMA_RW: + return pud_write(pud); + case SMA_NX: + return !pud_exec(pud); + case SMA_X: + return pud_exec(pud); + case SMA_P: + return pud_present(pud); + case SMA_NP: + return !pud_present(pud); + case SMA_WB_MT: + return pud_wb(pud); + case SMA_WC_MT: + return pud_wc(pud); + case SMA_UC_MT: + return pud_uc(pud); + default: + BUG(); + }; + + return -EINVAL; +} +static int walk_pud_level(int node, pgd_t *pgd, unsigned long addr, + unsigned long end, enum sma_mode mode, int *need_flush) +{ + unsigned long next; + pud_t *pudp; + e2k_size_t page_size; + int ret = 0; + + pudp = pud_offset(pgd, addr); + do { + if (pud_none(*pudp)) + return -EINVAL; + next = pud_addr_end(addr, end); + if (!kernel_pud_huge(*pudp)) { + ret = walk_pmd_level(node, pudp, addr, next, mode, + need_flush); + } else if (!pud_modified(*pudp, mode)) { + page_size = get_pud_level_page_size(); + if (addr & (page_size - 1) || addr + page_size > next) { + ret = split_pud_page(node, pudp); + continue; + } + *need_flush = 1; + modify_pud_page(pudp, mode); + } + ++pudp; + addr = next; + } while (addr < end && !ret); + + return ret; +} + +static void sma_flush_tlb_ipi(void *unused) +{ + __flush_tlb_all(); +} + +static int set_memory_attr(unsigned long start, unsigned long end, + enum sma_mode mode) +{ + unsigned long addr, next; + int node, ret, need_flush = 0; + pgd_t *pgdp; + + if (end > E2K_MODULES_END && (start < VMALLOC_START || + end > VMALLOC_END)) + return -EINVAL; + + if (start >= end) + return 0; + + if (WARN_ON(!IS_ALIGNED(start, PAGE_SIZE))) + start = round_down(start, PAGE_SIZE); + if (WARN_ON(!IS_ALIGNED(end, PAGE_SIZE))) + end = round_up(end, PAGE_SIZE); + + /* + * Get rid of potentially aliasing lazily unmapped vm areas that may + * have permissions set that deviate from the ones we are setting here. + */ + vm_unmap_aliases(); + + for_each_node_has_dup_kernel(node) { + addr = start; + pgdp = node_pgd_offset_kernel(node, addr); + do { + if (pgd_none(*pgdp)) + return -EINVAL; + /* FIXME: should be implemented, */ + /* if pgd level can have PTEs */ + BUG_ON(kernel_pgd_huge(*pgdp)); + next = pgd_addr_end(addr, end); + ret = walk_pud_level(node, pgdp, addr, next, mode, + &need_flush); + if (ret) + return ret; + } while (pgdp++, addr = next, addr < end); + } + + if (IS_ENABLED(CONFIG_KVM_GUEST_MODE) && + !IS_ENABLED(CONFIG_KVM_SHADOW_PT) || need_flush) { + /* + * Sometimes allocators are called under closed + * interrupts, so do not use on_each_cpu() here. + */ + nmi_on_each_cpu(sma_flush_tlb_ipi, NULL, 1, 0); + + /* + * gpu-imgtec expects the caches to be dropped when remapping + * to WC/UC (see _ApplyOSPagesAttribute()). + * + * Also (#134896): + * 1) When remapping memory from General/WB/WC to External/UC + * we must flush previous cache contents so that they won't + * overwrite RAM contents later. + * 2) When remapping memory from External/UC to General/WB/WC + * it is possible that hardware prefetcher has loaded some of + * its older contents into cache so it must be flushed. + */ + if (mode == SMA_UC_MT || mode == SMA_WC_MT || + cpu_has(CPU_FEAT_HW_PREFETCHER) && mode == SMA_WB_MT) + write_back_cache_range(start, end - start); + } + + return 0; +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + addr &= PAGE_MASK; + return set_memory_attr(addr, addr + numpages * PAGE_SIZE, SMA_RO); +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + addr &= PAGE_MASK; + return set_memory_attr(addr, addr + numpages * PAGE_SIZE, SMA_RW); +} + +int set_memory_nx(unsigned long addr, int numpages) +{ + addr &= PAGE_MASK; + return set_memory_attr(addr, addr + numpages * PAGE_SIZE, SMA_NX); +} + +int set_memory_x(unsigned long addr, int numpages) +{ + addr &= PAGE_MASK; + return set_memory_attr(addr, addr + numpages * PAGE_SIZE, SMA_X); +} + + +#ifdef CONFIG_DEBUG_PAGEALLOC +void __kernel_map_pages(struct page *page, int numpages, int enable) +{ + unsigned long addr = (unsigned long) page_address(page); + + set_memory_attr(addr, addr + numpages * PAGE_SIZE, + (enable) ? SMA_P : SMA_NP); +} + +# ifdef CONFIG_HIBERNATION +/* + * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function + * is used to determine if a linear map page has been marked as not-valid by + * CONFIG_DEBUG_PAGEALLOC. + */ +bool kernel_page_present(struct page *page) +{ + unsigned long addr, entry_val; + probe_entry_t entry; + + addr = (unsigned long) page_address(page); + entry = get_MMU_DTLB_ENTRY(addr); + entry_val = probe_entry_val(entry); + + if ((entry_val & ~DTLB_EP_RES) || !(entry_val & DTLB_ENTRY_VVA)) + return false; + + return true; +} +# endif +#endif + +#define CPA_PAGES_ARRAY 1 + +static int change_page_attr(unsigned long addr, int numpages, + enum sma_mode mode, int flags, struct page **pages) +{ + int i = 0; + + if (!(flags & CPA_PAGES_ARRAY)) { + if (addr & ~PAGE_MASK) { + addr &= PAGE_MASK; + WARN_ON_ONCE(1); + } + + return set_memory_attr(addr, addr + numpages * PAGE_SIZE, mode); + } + + for (i; i < numpages; i++) { + int err; + + addr = (unsigned long)page_address(pages[i]); + + if (err = set_memory_attr(addr, addr + PAGE_SIZE, mode)) { + WARN_ON_ONCE(1); + return err; + } + } + + return 0; +} + +/* Mapping device memory as UC disables cache coherency since v6. + * + * Mapping RAM as UC keeps cache coherency on but beware that + * there must not exist any aliases for the remapped memory, + * otherwise speculative access at an alias address could load + * data into cache and consequent stores and loads will work with + * cache instead of memory. + * + * set_memory_uc() itself does NOT take care of cache flushing as + * on e2k everything is coherent including DMA, thus the flush is + * needed on one device only: Imagination video card on e2c3. */ +int set_memory_uc(unsigned long addr, int numpages) +{ + return change_page_attr(addr, numpages, SMA_UC_MT, 0, NULL); +} +EXPORT_SYMBOL(set_memory_uc); + +int set_pages_uc(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_uc(addr, numpages); +} +EXPORT_SYMBOL(set_pages_uc); + +int set_pages_array_uc(struct page **pages, int addrinarray) +{ + return change_page_attr(0, addrinarray, SMA_UC_MT, + CPA_PAGES_ARRAY, pages); +} +EXPORT_SYMBOL(set_pages_array_uc); + +/* Mapping device memory as WC disables cache coherency since v6. + * + * Mapping RAM as WC keeps cache coherency on but beware that + * there must not exist any aliases for the remapped memory, + * otherwise speculative access at an alias address could load + * data into cache and consequent stores and loads will work with + * cache instead of memory. + * + * set_memory_wc() itself does NOT take care of cache flushing as + * on e2k everything is coherent including DMA, thus the flush is + * needed on one device only: Imagination video card on e2c3. */ +int set_memory_wc(unsigned long addr, int numpages) +{ + return change_page_attr(addr, numpages, SMA_WC_MT, 0, NULL); +} +EXPORT_SYMBOL(set_memory_wc); + +int set_pages_wc(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_wc(addr, numpages); +} +EXPORT_SYMBOL(set_pages_wc); + +int set_pages_array_wc(struct page **pages, int addrinarray) +{ + return change_page_attr(0, addrinarray, SMA_WC_MT, + CPA_PAGES_ARRAY, pages); +} +EXPORT_SYMBOL(set_pages_array_wc); + +int set_memory_wb(unsigned long addr, int numpages) +{ + return change_page_attr(addr, numpages, SMA_WB_MT, 0, NULL); +} +EXPORT_SYMBOL(set_memory_wb); + +int set_pages_wb(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_wb(addr, numpages); +} +EXPORT_SYMBOL(set_pages_wb); + +int set_pages_array_wb(struct page **pages, int addrinarray) +{ + return change_page_attr(0, addrinarray, SMA_WB_MT, + CPA_PAGES_ARRAY, pages); +} +EXPORT_SYMBOL(set_pages_array_wb); diff --git a/arch/e2k/mm/pgtable.c b/arch/e2k/mm/pgtable.c new file mode 100644 index 000000000000..30b0e336a614 --- /dev/null +++ b/arch/e2k/mm/pgtable.c @@ -0,0 +1,151 @@ +#include +#include +#include + +#include +#include + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, unsigned long size) +{ + pgprot_t old_prot = *prot; + + if (pfn_valid(pfn)) + *prot = set_general_mt(old_prot); + else + *prot = set_external_mt(old_prot); + + return 0; +} + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vm_insert_pfn(). + * + * This does not cover vm_insert_page so if some bad driver decides + * to use it on I/O memory we could get into trouble. + */ +void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) +{ + pgprot_t old_prot = *prot; + + if (likely(pfn_valid(pfn_t_to_pfn(pfn)))) { + *prot = set_general_mt(old_prot); + } else { + VM_WARN_ON_ONCE(1); + *prot = set_external_mt(old_prot); + } +} + +/* + * Used to set accessed or dirty bits in the page table entries + * on other architectures. On e2k, the accessed and dirty bits + * are tracked by hardware. However, do_wp_page calls this function + * to also make the pte writeable at the same time the dirty bit is + * set. In that case we do actually need to write the PTE. + * + * This also fixes race in arch-independent ptep_set_access_flags() + * (see commit 66dbd6e6 + * "arm64: Implement ptep_set_access_flags() for hardware AF/DBM") + */ +int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + int changed = !pte_same(*ptep, entry); + + if (changed && dirty) { + set_pte_at(vma->vm_mm, address, ptep, entry); + flush_tlb_page(vma, address); + } + + return changed; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * Same as ptep_set_access_flags() but for PMD + */ +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + int changed = !pmd_same(*pmdp, entry); + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (changed && dirty) { + set_pmd_at(vma->vm_mm, address, pmdp, entry); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + + return changed; +} +#endif + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) +{ + BUG_ON(phys & ~PMD_MASK); + BUG_ON(!pmd_none(*pmd)); + + set_pmd(pmd, pmd_mkhuge(mk_pmd_phys(phys, prot))); + + return 1; +} + +int pmd_clear_huge(pmd_t *pmd) +{ + if (!kernel_pmd_huge(*pmd)) + return 0; + pmd_clear(pmd); + return 1; +} + +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + pte_t *pte; + + //TODO remove this after upgrading - check is moved to arch-indep. code + if (!pmd_present(*pmd)) + return 1; + + pte = (pte_t *) pmd_page_vaddr(*pmd); + pmd_clear(pmd); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + + pte_free_kernel(&init_mm, pte); + + return 1; +} + +int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) +{ + /* Not supported (see arch_ioremap_pud_supported()) */ + BUG(); +} + +int pud_clear_huge(pud_t *pud) +{ + if (!kernel_pud_huge(*pud)) + return 0; + pud_clear(pud); + return 1; +} + +int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + /* Not supported (see arch_ioremap_pud_supported()) */ + BUG(); +} + +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +{ + return 0; +} +#endif diff --git a/arch/e2k/mm/tag_mem.c b/arch/e2k/mm/tag_mem.c new file mode 100644 index 000000000000..feb8ab68c98c --- /dev/null +++ b/arch/e2k/mm/tag_mem.c @@ -0,0 +1,1200 @@ +/* $Id: tag_mem.c,v 1.10 2009/12/10 17:34:00 kravtsunov_e Exp $ + * arch/e2k/mm/tag_mem.c + * + * Tag's memory management + * + * Copyright 2003 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_TAG_MODE +#undef DebugTM +#define DEBUG_TAG_MODE 0 /* Tag memory */ +#define DebugTM(...) DebugPrint(DEBUG_TAG_MODE ,##__VA_ARGS__) + + +/* Cache for swap_info_t structs */ +kmem_cache_t* swap_info_cache = NULL; + +atomic_t launder_count = ATOMIC_INIT(0); + +/* + * Create a new vma struct for the tags area map from the specified + * begining address of tags area to the end adress of this area. + */ + +static struct vm_area_struct * +create_new_tag_mmap(struct mm_struct *mm, e2k_addr_t tag_start, + e2k_addr_t tag_end) +{ + struct vm_area_struct *tag_vma; + struct vm_area_struct *vma_start; + struct vm_area_struct *vma_end; + + /* + * Check an intersection with the previous and next tags areas maps + * It possible if the end of the previous data VM area and the + * begining of the new area are into the same page of tags and/or + * the begining of the next data VM area and the end of the new area + * are into one tags page. + */ + + DebugTM("will start find_vma() for tags area " + "start page 0x%lx\n", tag_start); + vma_start = find_vma(mm, tag_start); + if (vma_start && vma_start->vm_start <= tag_start) { + DebugTM("found VMA 0x%px intersected with " + "tags area start page\n", vma_start); + tag_start = vma_start->vm_end; + DebugTM("set new tag page start addr to " + "end of intersected area 0x%lx\n", tag_start); + if (tag_start >= tag_end) { + DebugTM("returns : tag page was " + "included to old tage page VMA\n"); + return vma_start; + } + } + DebugTM("will start find_vma() for tags area " + "end page 0x%lx\n", tag_end); + vma_end = find_vma(mm, tag_end); + if (vma_end && vma_end->vm_start < tag_end) { + DebugTM("found VMA 0x%px intersected with " + "tags area end page\n", vma_end); + tag_end = vma_end->vm_start; + DebugTM("set new tag page end addr to " + "start of intersected area 0x%lx\n", tag_end); + if (tag_start >= tag_end) { + DebugTM("returns : tag page was " + "included to old tage page VMA\n"); + return vma_end; + } + } + + /* + * Create a new vma struct for the tags area mapping + */ + + DebugTM("will start kmem_cache_alloc()\n"); + tag_vma = vm_area_alloc(mm); + DebugTM("kmem_cache_alloc() returned vma 0x%px\n", + tag_vma); + if (!tag_vma) + return NULL; /* no memory */ + + tag_vma->vm_start = tag_start; + tag_vma->vm_end = tag_end; + tag_vma->vm_flags = TAG_VM_FLAGS; + tag_vma->vm_page_prot = PAGE_TAG_MEMORY; + tag_vma->vm_ops = NULL; + DebugTM("vma->vm_start 0x%lx vm_end 0x%lx\n", + tag_vma->vm_start, tag_vma->vm_end); + + DebugTM("will start insert_vm_struct()\n"); + __insert_vm_struct(mm, tag_vma); + + /* + * Virtual memory for tags maps should not be taken + * into account* as it is imposed by kernel??? + */ + +// mm->total_vm += ((tag_end - tag_start) >> PAGE_SHIFT); +// DebugTM("create_new_tag_mmap() mm->total_vm 0x%lx\n", mm->total_vm); + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID + if (tag_vma->vm_flags & VM_PAGESVALID) { + int ret; + DebugTM("starts make_vma_pages_valid() " + "for VMA 0x%px\n", tag_vma); + ret = make_vma_pages_valid(tag_vma, tag_start, tag_end); + if (ret != 0) { + DebugTM("make_vma_pages_valid() " + "finished with error %d\n", + ret); + vm_area_free(tag_vma); + return NULL; + } + DebugTM("make_vma_pages_valid() finished " + "OK\n"); + } +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + + if (tag_vma->vm_flags & VM_LOCKED) { + /* + * Locked memory for tags maps should not be taken + * into account as it is imposed by kernel??? + */ +// mm->locked_vm += ((tag_end - tag_start) >> PAGE_SHIFT); + + DebugTM("will do __get_user_pages()\n"); + __get_user_pages(current, current->mm, tag_start, + (tag_end - tag_start) / PAGE_SIZE, + FOLL_TOUCH | FOLL_MLOCK | FOLL_WRITE | FOLL_FORCE, + NULL, NULL, NULL); + } + DebugTM("finished for tag area from start addr " + "0x%lx to end addr 0x%lx\n", tag_start, tag_end); + return tag_vma; +} + +/* + * Map tags memory area approoriate to specified virtual address of data. + * Function maps all VM area containing specified virtual address. + * One page of tags memory can contain tags from a few VM areas of data. + * In maximal case up to 16 pages from different VM areas (if VMA consists + * of one page). + * Here it is supposed that tags memories are not unmapped while unmapping + * the appropriate VM area of data. + */ + +static struct vm_area_struct * +do_tag_mmap(struct mm_struct *mm, e2k_addr_t data_addr) +{ + e2k_addr_t tag_start; + e2k_addr_t tag_end; + e2k_addr_t prev_end = 0; + e2k_addr_t next_start = TASK_SIZE; + struct vm_area_struct *data_vma; + struct vm_area_struct *prev_vma; + struct vm_area_struct *tag_vma; + unsigned long grow; + int ret; + + DebugTM("started for addr 0x%lx\n", data_addr); + + /* + * Find VM area which contains address specified by arg. 'data_addr' + * Tags of full VM area should be mapped to appropriate virtual + * user space. + */ + + DebugTM("will start find_vma() for data addr 0x%lx\n", + data_addr); + data_vma = find_vma_prev(mm, data_addr, &prev_vma); + if (data_vma == NULL || data_vma->vm_start > data_addr) { + printk("do_tag_mmap(): find_vma() could not find VMA for " + "data addr 0x%lx\n", data_addr); + BUG(); + return NULL; + } + DebugTM("find_vma() returned VMA from 0x%lx to 0x%lx\n", + data_vma->vm_start, data_vma->vm_end); + if (prev_vma != NULL) + prev_end = prev_vma->vm_end; + if (data_vma->vm_next != NULL) + next_start = data_vma->vm_next->vm_start; + DebugTM("previous VMA end is 0x%lx, next VMA start is " + "0x%lx\n", prev_end, next_start); + + /* + * Transform the starting and final addresses of VM area of the data + * to appropriate addresses of tags VM area. + * One page of tags memory consists of 16 pages of data + * VM area should be page aligned, so the starting address of tags + * VM area is aligned to the begining of the page and the final address + * is aligned to the end of page + */ + + tag_start = PAGE_ALIGN_UP(virt_to_tag(data_vma->vm_start)); + tag_end = PAGE_ALIGN_DOWN(virt_to_tag(data_vma->vm_end)); + DebugTM("tag page start addr 0x%lx, end addr 0x%lx\n", + tag_start, tag_end); + if (tag_start >= tag_end) { + printk("do_tag_mmap(): tag pages start addr 0x%lx >= end " + "addr 0x%lx\n", + tag_start, tag_end); + BUG(); + return NULL; + } + + /* + * If the tags of the data VM area were not mapped yet to virtual + * space then we should create new VM area of tags and clear old + * maps. Old maps will be cleared from end of the previous VMA to + * start of the next VMA to release the possible hanged old maps. + */ + + if (!(data_vma->vm_flags & VM_TAGMAPPED)) { + DebugTM("tags of the data VMA were not mapped " + "yet : call do_tag_munmap() to unmap tag memory of " + "the data from 0x%lx to 0x%lx\n", + prev_end, next_start); + ret = do_tag_munmap(mm, prev_end, next_start - prev_end); + if (ret != 0) { + DebugTM("do_tag_munmap() returned " + "error %d\n", ret); + return NULL; + } + DebugTM("do_tag_munmap() cleared old maps\n"); + DebugTM("is starting create_new_tag_mmap() " + "for tags area from 0x%lx to 0x%lx\n", + tag_start, tag_end); + tag_vma = create_new_tag_mmap(mm, tag_start, tag_end); + if (tag_vma != NULL) + data_vma->vm_flags |= VM_TAGMAPPED; + return tag_vma; + } + + /* + * Further a case when the tags of the data VM area were already + * mapped to virtual space. It means that data VM area was expanded + * from left (low addresses : do_brk()) and/or from right (high + * addresses : stack expansion), so it needs find existing VMA + * of tags area and expand it from left/right + */ + + DebugTM("tags of the data VMA were already mapped\n"); + + /* + * Clear the possible hanged old tags maps from end of the previous VMA + * to the start of the current VMA and from end of the current VMA to + * the start of the next VMA + */ + + if (data_vma->vm_start > prev_end) { + DebugTM("will start do_tag_munmap() to unmap " + "tag memory of the data from previous end 0x%lx to " + "start of our area 0x%lx\n", + prev_end, data_vma->vm_start); + ret = do_tag_munmap(mm, prev_end, data_vma->vm_start - + prev_end); + if (ret != 0) { + DebugTM("do_tag_munmap() returned " + "error %d\n", ret); + return NULL; + } + DebugTM("do_tag_munmap() cleared old left (low " + "addresses) maps\n"); + } + + if (next_start > data_vma->vm_end) { + DebugTM("will start do_tag_munmap() to unmap " + "tags memory of the data from our area end 0x%lx to " + "start of the next area 0x%lx\n", + data_vma->vm_end, next_start); + ret = do_tag_munmap(mm, data_vma->vm_end, + next_start - data_vma->vm_end); + if (ret != 0) { + DebugTM("do_tag_munmap() returned " + "error %d\n", ret); + return NULL; + } + DebugTM("do_tag_munmap() cleared old right " + "(high addresses) maps\n"); + } + + /* + * Find VMA of the tags area appropriate to the initial data area + * which it was expanded later. + * First search the new starting address of tags area, probably the + * data area was expanded from the start ('expand_stack()'). + */ + + DebugTM("will start find_vma() for tags area " + "start addr 0x%lx\n", tag_start); + tag_vma = find_vma(mm, tag_start); + if (tag_vma == NULL) { + printk("do_tag_mmap(): find_vma() could not find VMA of " + "the mapped earlier tags area with starting addr " + "0x%lx\n", tag_start); + BUG(); + return NULL; + } + if (tag_vma->vm_start > tag_start) { + DebugTM("found VMA from 0x%lx to 0x%lx " + "not intersected with the tags area start addr\n", + tag_vma->vm_start, tag_vma->vm_end); + /* + * Similar the data area was expanded at the left + * (removed the starting address ('expand_stack()')) + * Expand VMA of tags area at the left + */ + grow = tag_vma->vm_start - tag_start; + DebugTM("similar the data area was expanded " + "at the left from 0x%lx to 0x%lx\n", + tag_vma->vm_start, tag_start); + tag_vma->vm_start = tag_start; + tag_vma->vm_pgoff -= grow; + DebugTM("tags area VMA expanded from " + "left ; start addr moved to 0x%lx\n", + tag_vma->vm_start); + } else { + DebugTM("found VMA from 0x%lx to 0x%lx " + "intersected with the tags area start addr\n", + tag_vma->vm_start, tag_vma->vm_end); + } + + /* + * If the next VMA intersects with new tags area then + * it will be considered as main VMA, which should be + * expanded + */ + if (tag_vma->vm_next == NULL) { + DebugTM("next VMA does not exist\n"); + prev_vma = NULL; + } else if (tag_vma->vm_next->vm_start < tag_end) { + /* + * The next VMA is considered as main VMA, which + * should be expanded + */ + DebugTM("next VMA from 0x%lx to 0x%lx " + "will be considered as main VMA to expand\n", + tag_vma->vm_next->vm_start, + tag_vma->vm_next->vm_end); + prev_vma = tag_vma; + tag_vma = tag_vma->vm_next; + } else { + DebugTM("next VMA exists but has not " + "intersection with our tags area\n"); + prev_vma = NULL; + } + if (prev_vma != NULL) { + grow = tag_vma->vm_start - prev_vma->vm_end; + if (grow > 0) { + /* + * There is a hole between previous VMA. where + * tags area starts and main VMA where tags area + * continues. + * Expand main VMA from left (low addresses) + */ + DebugTM("there is a hole between " + "previous VMA and main VMA\n"); + tag_vma->vm_start = prev_vma->vm_end; + tag_vma->vm_pgoff -= grow; + DebugTM("main VMA expanded from " + "left ; start addr moved to 0x%lx\n", + tag_vma->vm_start); + } + } + + /* + * Try to expand VMA of tags area from right (move the end), + * if it needs + */ + if (tag_vma->vm_end < tag_end) { + DebugTM("VMA of tags area should be " + "expanded from right : move end from 0x%lx " + "to 0x%lx\n", + tag_vma->vm_end, tag_end); + if (tag_vma->vm_next && tag_vma->vm_next->vm_start < tag_end) { + printk("do_tag_mmap(): ERROR there are at least" + " 3 VMAs which intersects with tags " + "area (3-th VMA from 0x%lx to 0x%lx\n", + tag_vma->vm_next->vm_start, + tag_vma->vm_next->vm_end); + BUG(); + return NULL; + } + tag_vma->vm_end = tag_end; + DebugTM("main VMA expanded from " + "right ; ennd addr moved to 0x%lx\n", + tag_vma->vm_end); + } + + return tag_vma; +} + +/* + * We special-case the C-O-W ZERO_PAGE, because it's such + * a common occurrence (no need to read the page to know + * that it's zero - better for the cache and memory subsystem). + */ +static inline void +copy_cow_page(struct page * from, struct page * to, unsigned long address) +{ + if (is_zero_page(from)) { + clear_user_highpage(to, address); + return; + } + copy_user_highpage(to, from, address); +} + +/* + * Establish a new mapping: + * - flush the old one + * - update the page tables + * - inform the TLB about the new one + */ +static inline void +establish_tags_pte(struct vm_area_struct *vma, unsigned long address, + pte_t *page_table, pte_t entry) +{ + DebugTM("set pte 0x%px to 0x%lx for addr 0x%lx\n", + page_table, pte_val(entry), address); + set_pte_at(vma->vm_mm, address, page_table, entry); + flush_tlb_page(vma, address); + update_mmu_cache(vma, address, entry); +} + +static inline void +break_tags_cow(struct vm_area_struct *vma, struct page *old_page, + struct page *new_page, unsigned long address, pte_t *page_table) +{ + DebugTM("will copy page pte 0x%px == 0x%lx for addr " + "0x%lx\n", + page_table, pte_val(*page_table), address); + copy_cow_page(old_page, new_page, address); + flush_page_to_ram(new_page); + flush_cache_page(vma, address); + establish_tags_pte(vma, address, page_table, + pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)))); +} + +/* + * This routine handles present pages, when users try to write + * to a shared page. It is done by copying the page to a new address + * and decrementing the shared-page counter for the old page. + * + * Goto-purists beware: the only reason for goto's here is that it results + * in better assembly code.. The "default" path will see no jumps at all. + * + * Note that this routine assumes that the protection checks have been + * done by the caller (the low-level page fault routine in most cases). + * Thus we can safely just mark it writable once we've done any necessary + * COW. + * + * We also mark the page dirty at this point even though the page will + * change only once the write actually happens. This avoids a few races, + * and potentially makes it more efficient. + * + * We enter with the page table read-lock held, and need to exit without + * it. + */ +static int +do_wp_tags_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pte_t pte) +{ + struct page *old_page, *new_page; + + DebugTM("will do writable page pte 0x%px == 0x%lx for " + "addr 0x%lx\n", + page_table, pte_val(pte), address); + old_page = pte_page(pte); + if (!VALID_PAGE(old_page)) + goto bad_wp_page; + + /* + * We can avoid the copy if: + * - we're the only user (count == 1) + * - the only other user is the swap cache, + * and the only swap cache user is itself, + * in which case we can just continue to + * use the same swap cache (it will be + * marked dirty). + */ + switch (page_count(old_page)) { + case 2: + /* + * Lock the page so that no one can look it up from + * the swap cache, grab a reference and start using it. + * Can not do lock_page, holding page_table_lock. + */ + if (!PageSwapCache(old_page) || TryLockPage(old_page)) + break; + if (is_page_shared(old_page)) { + UnlockPage(old_page); + break; + } + UnlockPage(old_page); + /* FallThrough */ + case 1: + flush_cache_page(vma, address); + establish_tags_pte(vma, address, page_table, + pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); + return 1; /* Minor fault */ + } + + /* + * Ok, we need to copy. Oh, well.. + */ + new_page = page_cache_alloc(); + if (!new_page) + return -1; + + /* + * Re-check the pte - we dropped the lock + */ + if (pte_same(*page_table, pte)) { + if (PageReserved(old_page)) + ++mm->rss; + break_tags_cow(vma, old_page, new_page, address, page_table); + + /* Free the old page.. */ + new_page = old_page; + } + page_cache_release(new_page); + return 1; /* Minor fault */ + +bad_wp_page: + printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n", + address, (unsigned long)old_page); + return -1; +} + +static int do_swap_tags_page(struct mm_struct * mm, + struct vm_area_struct * vma, unsigned long address, + pte_t * page_table, swp_entry_t entry, int write_access) +{ + struct page *page = lookup_swap_cache(entry); + pte_t pte; + + if (!page) { + lock_kernel(); + swapin_readahead(entry); + page = read_swap_cache(entry); + unlock_kernel(); + if (!page) + return -1; + + flush_page_to_ram(page); + flush_icache_page(vma, page); + } + + mm->rss++; + + pte = mk_pte(page, vma->vm_page_prot); + + /* + * Freeze the "shared"ness of the page, ie page_count + swap_count. + * Must lock page before transferring our swap count to already + * obtained page count. + */ + lock_page(page); + + if (PageWithSwapInfo(page)) remove_swap_info_from_page(page); + + swap_free(entry); + if (write_access && !is_page_shared(page)) + pte = pte_mkwrite(pte_mkdirty(pte)); + + UnlockPage(page); + set_pte_at(mm, address, page_table, pte); + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, pte); + return 1; /* Minor fault */ +} + +/* + * do_no_page() tries to create a new page mapping. It aggressively + * tries to share with existing pages, but makes a separate copy if + * the "write_access" parameter is true in order to avoid the next + * page fault. + * + * As this is called only for pages that do not currently exist, we + * do not need to flush old virtual caches or the TLB. + * + * This is called with the MM semaphore held. + */ +static int +do_no_anonymous_tags_page(struct mm_struct * mm, struct vm_area_struct * vma, + unsigned long address, int write_access, pte_t *page_table) +{ + struct page *page = NULL; + pte_t entry; + + DebugTM("started for pte 0x%px == 0x%lx " + "addr 0x%lx\n", + page_table, pte_val(*page_table), address); + entry = pte_wrprotect(mk_pte(ZERO_PAGE(address), vma->vm_page_prot)); + if (write_access) { + page = alloc_page(GFP_ATOMIC); + if (!page) + return -1; + clear_user_highpage(page, address); + entry = pte_mkwrite(pte_mkdirty(mk_pte(page, + vma->vm_page_prot))); + mm->rss++; + flush_page_to_ram(page); + } + DebugTM("set pte 0x%px to 0x%lx\n", + page_table, pte_val(entry)); + set_pte_at(mm, address, page_table, entry); + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, entry); + return 1; /* Minor fault */ +} + +/* + * The function is same as 'handle_pte_fault()', but handles tags pages only + * Note the "page_table_lock" should be locked by caller (by kswapd). + * + * The adding of pages is protected by the MM semaphore, which does not hold, + * so we need to worry about a page being suddenly been added into + * our VM. + */ +static inline int +handle_tags_pte_fault(struct mm_struct *mm, + struct vm_area_struct * vma, e2k_addr_t address, int write_access, + pte_t * pte) +{ + pte_t entry; + + DebugTM("started for address 0x%lx pte 0x%px == " + "0x%lx\n", address, pte, pte_val(*pte)); + entry = *pte; + if (!pte_present(entry)) { + DebugTM("pte 0x%px == 0x%lx not " + "present\n", pte, pte_val(*pte)); + if (pte_none(entry)) { + DebugTM("will start " + "do_no_anonymous_tags_page()\n"); + return do_no_anonymous_tags_page(mm, vma, address, + write_access, pte); + } + DebugTM("tags page is swapped out, address is 0x%lx\n", + address); + return do_swap_tags_page(mm, vma, address, pte, pte_to_swp_entry(entry), write_access); + } + + if (write_access) { + if (!pte_write(entry)) { + DebugTM("will start " + "do_wp_tags_page()\n"); + return do_wp_tags_page(mm, vma, address, pte, entry); + } + + DebugTM("will do pte_mkdirty()\n"); + entry = pte_mkdirty(entry); + } + DebugTM("will do pte_mkyoung()\n"); + entry = pte_mkyoung(entry); + DebugTM("will start establish_pte()\n"); + establish_tags_pte(vma, address, pte, entry); + DebugTM("returns 1\n"); + return 1; +} + +/* + * The function is the same as 'handle_mm_fault()', but makes present + * tags pages only. + * By the time we get here, we already hold the 'page_table_lock' + */ +static struct page * +make_tags_page_present(struct mm_struct *mm, struct vm_area_struct *vma, + e2k_addr_t tags_addr, int write_access) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int ret; + + DebugTM("started addr 0x%lx\n", tags_addr); + pgd = pgd_offset(mm, tags_addr); + pud = pud_alloc(pgd, tags_addr); + DebugTM("pud_alloc() returned PUD 0x%px\n", + pud); + if (pud == NULL) { + DebugTM("pud_alloc() could not " + "allocate PUD\n"); + return NULL; + } + pmd = pmd_alloc(pud, tags_addr); + DebugTM("pmd_alloc() returned PMD 0x%px\n", + pmd); + if (pmd == NULL) { + DebugTM("pmd_alloc() could not " + "allocate PMD\n"); + return NULL; + } + pte = pte_alloc_map(pmd, tags_addr); + DebugTM("pte_alloc_map() returned PTE 0x%px\n", + pte); + if (pte == NULL) { + DebugTM("pte_alloc_map() could not " + "allocate PTE\n"); + return NULL; + } +/* + if (pte_present(*pte)) { + DebugTM("pte 0x%px == 0x%lx : " + "tags page for addr 0x%lx is present already\n", + pte, pte_val(*pte), tags_addr); + return (pte_page(*pte)); + } +*/ + + DebugTM("will start handle_tags_pte_fault() " + "pte 0x%px for tags address 0x%lx\n", pte, tags_addr); + ret = handle_tags_pte_fault(mm, vma, tags_addr, write_access, pte); + if (ret <= 0) { + DebugTM("handle_tags_pte_fault() " + "returned error %d : could not make present page " + "for tags addr 0x%lx\n", ret, tags_addr); + return NULL; + } + DebugTM("returns with OK pte 0x%px == 0x%lx : " + "tags page for addr 0x%lx is present now\n", + pte, pte_val(*pte), tags_addr); + return pte_page(*pte); +} + +struct vm_area_struct * +create_tags_vma(struct mm_struct *mm, e2k_addr_t data_addr) +{ + e2k_addr_t tag_addr; + struct vm_area_struct *vma; + + DebugTM("started for data addr 0x%lx\n", data_addr); + + tag_addr = virt_to_tag(data_addr); + DebugTM("will start do_tag_mmap() for " + "tag addr 0x%lx\n", tag_addr); + vma = do_tag_mmap(mm, data_addr); + if (vma == NULL) { + DebugTM("do_tag_mmap() returned NULL for " + "tag addr 0x%lx : out of memory\n", tag_addr); + return NULL; + } + DebugTM("do_tag_mmap() returned VMA " + "0x%px for tag addr 0x%lx\n", vma, tag_addr); + DebugTM("will start find_vma() for tag addr " + "0x%lx\n", tag_addr); + vma = find_vma(mm, tag_addr); + if (vma == NULL || vma->vm_start > tag_addr) { + printk("create_tags_vma() : could not map tags " + "area for data address 0x%lx\n", + data_addr); + BUG(); + return NULL; + } else { + DebugTM("find_vma() returned VMA " + "0x%px\n", vma); + } + return vma; +} + +static struct vm_area_struct * +get_tags_addr_vma(struct mm_struct *mm, e2k_addr_t data_addr) +{ + e2k_addr_t tags_addr; + struct vm_area_struct *vma; + + DebugTM("started for data addr 0x%lx\n", data_addr); + + tags_addr = virt_to_tag(data_addr); + DebugTM("will start find_vma() for tags addr " + "0x%lx\n", tags_addr); + vma = find_vma(mm, tags_addr); + if (vma == NULL || vma->vm_start > tags_addr) { + DebugTM("will start create_tags_vma() for " + "tags addr 0x%lx\n", tags_addr); + vma = create_tags_vma(mm, data_addr); + if (vma == NULL) { + DebugTM("create_tags_vma() " + "returned NULL for tags addr 0x%lx : " + "out of memory\n", tags_addr); + return NULL; + } + DebugTM("create_tags_vma() returned VMA " + "0x%px for tags addr 0x%lx\n", vma, tags_addr); + } else { + DebugTM("find_vma() returned VMA " + "0x%px\n", vma); + } + DebugTM("returns VMA for data addr 0x%lx\n", + data_addr); + return vma; +} + +/* + * Get tags virtual memory address appropriate to specified virtual addresss + * of data. + * Second argument 'data_addr' should be quad-word aligned (16 bytes). + * Pass start address of virtual page of data to get start address of tags area + */ + +struct page * +get_tags_page(struct mm_struct *mm, e2k_addr_t data_addr, int write_access) +{ + e2k_addr_t tags_addr; + struct vm_area_struct *vma; + struct page *tags_page; + + DebugTM("started for data addr 0x%lx\n", data_addr); + + tags_addr = virt_to_tag(data_addr); + DebugTM("will start get_tags_addr_vma() for tags addr " + "0x%lx\n", tags_addr); + vma = get_tags_addr_vma(mm, data_addr); + if (vma == NULL) { + DebugTM("get_tags_addr_vma() " + "returned NULL for tags addr 0x%lx : " + "out of memory\n", tags_addr); + return NULL; + } else { + DebugTM("get_tags_addr_vma() returned VMA " + "0x%px\n", vma); + } + DebugTM("will start make_tags_page_present() for " + "tags addr 0x%lx\n", tags_addr); + tags_page = make_tags_page_present(mm, vma, tags_addr, write_access); + if (tags_page != NULL) { + DebugTM("make_tags_page_present() returned " + "page 0x%px : tag addr 0x%lx is now present\n", + tags_page, tags_addr); + DebugTM("returns with OK for tags addr 0x%lx\n", + tags_addr); + return tags_page; + } + DebugTM("make_tags_page_present() returned NULL : " + "could not allocate tags page or it is swapped out\n"); + return NULL; +} + +/* + * Get tags virtual memory address appropriate to specified virtual addresss + * of data. + * Second argument 'data_addr' should be quad-word aligned (16 bytes). + * Pass start address of virtual page of data to get start address of tags area + */ + +e2k_addr_t +get_tags_address(struct mm_struct *mm, e2k_addr_t data_addr, int write) +{ + e2k_addr_t tags_addr; + struct vm_area_struct *vma; + int ret; + + DebugTM("started for data addr 0x%lx\n", data_addr); + + tags_addr = virt_to_tag(data_addr); + DebugTM("will start get_tags_addr_vma() for tags " + "addr 0x%lx\n", tags_addr); + vma = get_tags_addr_vma(mm, data_addr); + if (vma == NULL) { + DebugTM("get_tags_addr_vma() " + "returned NULL for tags addr 0x%lx : " + "out of memory\n", tags_addr); + return (e2k_addr_t) 0; + } else { + DebugTM("get_tags_addr_vma() returned VMA " + "0x%px\n", vma); + } + DebugTM("will start handle_mm_fault() for " + "tags addr 0x%lx\n", tags_addr); + ret = handle_mm_fault(mm, vma, tags_addr, write); + if (ret > 0) { + DebugTM("handle_mm_fault() returned %d " + ": tags addr 0x%lx is now present\n", + ret, tags_addr); + DebugTM("returns tags addr 0x%lx\n", + tags_addr); + return tags_addr; + } + DebugTM("handle_mm_fault() returned %d : out of " + "memory\n", ret); + return (e2k_addr_t) 0; +} + +/* + * Unmap tags memory area appropriate to specified user virtual addresses + */ +int +do_tag_munmap(struct mm_struct *mm, e2k_addr_t data_addr, e2k_size_t data_len) +{ + e2k_addr_t data_end; + e2k_addr_t tag_start; + e2k_addr_t tag_end; + e2k_addr_t prev_end = 0; + e2k_addr_t next_start = TASK_SIZE; + struct vm_area_struct *data_vma; + struct vm_area_struct *prev_vma = NULL; + + DebugTM("started for addr 0x%lx size 0x%lx\n", + data_addr, data_len); + + if ((data_addr & ~PAGE_MASK) || data_addr > TASK_SIZE || + data_len > TASK_SIZE - data_addr) { + printk("do_tag_munmap() : bad data address 0x%lx or size " + "0x%lx\n", data_addr, data_len); + BUG(); + return -EINVAL; + } + + if ((data_len = PAGE_ALIGN(data_len)) == 0) { + printk("do_tag_munmap() : empty unmaped data area\n"); + BUG(); + return -EINVAL; + } + data_end = data_addr + data_len; + DebugTM("will start find_vma_prev() for data start " + "addr 0x%lx\n", data_addr); + data_vma = find_vma_prev(mm, data_addr, &prev_vma); + if (data_vma == NULL) { + DebugTM("find_vma_prev() could not find VMA " + "and returned NULL for data addr 0x%lx\n", data_addr); + if (prev_vma != NULL) + prev_end = prev_vma->vm_end; + } else if (data_vma->vm_start >= data_end) { + DebugTM("find_vma_prev() found VMA " + "but VMA has not any intersection with data area from " + "start addr 0x%lx to end addr 0x%lx\n", + data_addr, data_end); + if (prev_vma != NULL) + prev_end = prev_vma->vm_end; + next_start = data_vma->vm_start; + } else { + DebugTM("find_vma_prev() found VMA " + "intersected with data area from start addr 0x%lx " + "to end addr 0x%lx\n", + data_addr, data_end); + if (data_vma->vm_start >= data_addr) { + DebugTM("VMA start addr 0x%lx >= " + "data area start addr 0x%lx : take previous " + "VMA end\n", + data_vma->vm_start, data_addr); + if (prev_vma != NULL) + prev_end = prev_vma->vm_end; + } else { /* data_addr > data_vma->vm_start */ + DebugTM("VMA start addr 0x%lx < " + "data area start addr 0x%lx : could not be " + "hole from left\n", + data_vma->vm_start, data_addr); + prev_end = data_addr; + } + if (data_vma->vm_end > data_end) { + DebugTM("VMA end addr 0x%lx > " + "data area end addr 0x%lx : could not be " + "hole from right\n", + data_vma->vm_end, data_end); + next_start = data_end; + } else if (data_vma->vm_end == data_end) { + DebugTM("VMA end addr 0x%lx == " + "data area end addr 0x%lx : take next " + "VMA start\n", + data_vma->vm_start, data_end); + if (data_vma->vm_next != NULL) + next_start = data_vma->vm_next->vm_start; + } else { + DebugTM("will start find_vma() " + "for data end addr 0x%lx\n", data_end); + data_vma = find_vma(mm, data_end); + if (data_vma == NULL) { + DebugTM("find_vma() " + "could not find VMA and returned NULL " + "for data end addr 0x%lx\n", data_end); + } else if (data_vma->vm_start > data_end) { + DebugTM("VMA start addr 0x%lx " + ">= data area end addr 0x%lx : take " + "this VMA start addr\n", + data_vma->vm_start, data_end); + next_start = data_vma->vm_start; + } else { /* data_vma->vm_start <= data_end */ + DebugTM("VMA start addr 0x%lx " + "<= data area end addr 0x%lx : could " + "not be hole from right\n", + data_vma->vm_start, data_end); + next_start = data_end; + } + } + } + DebugTM("unmapped start addr 0x%lx, previous end addr " + "0x%lx, unmapped end addr 0x%lx, next start addr 0x%lx\n", + data_addr, prev_end, data_end, next_start); + if (PAGE_ALIGN_DOWN(virt_to_tag(prev_end)) <= + PAGE_ALIGN_UP(virt_to_tag(data_addr))) { + tag_start = PAGE_ALIGN_UP(virt_to_tag(data_addr)); + } else { + tag_start = PAGE_ALIGN_DOWN(virt_to_tag(data_addr)); + } + if (PAGE_ALIGN_DOWN(virt_to_tag(data_end)) <= + PAGE_ALIGN_UP(virt_to_tag(next_start))) { + tag_end = PAGE_ALIGN_DOWN(virt_to_tag(data_end)); + } else { + tag_end = PAGE_ALIGN_UP(virt_to_tag(data_end)); + } + DebugTM("unmapped tag memory start addr 0x%lx end " + "0x%lx\n", tag_start, tag_end); + if (tag_start >= tag_end) + return 0; + DebugTM("will start do_munmap() to unmap tag memory " + "from start addr 0x%lx to end addr 0x%lx\n", + tag_start, tag_end); + return do_munmap(mm, tag_start, tag_end - tag_start, NULL); +} + +int +save_swapped_page_tags(struct mm_struct * mm, struct page *swapped_page, + e2k_addr_t data_addr) +{ + e2k_addr_t tags_addr = virt_to_tag(data_addr); + struct page *tags_page; + struct vm_area_struct *tags_vma; + e2k_addr_t k_data_addr = (e2k_addr_t)page_address(swapped_page); + e2k_addr_t k_tags_addr; + + DebugTM("started for data addr 0x%lx\n", + data_addr); + + down(&mm->swap_info_sem); + + tags_page = get_tags_page(mm, data_addr, 1); + if (tags_page == NULL) { + DebugTM("get_tags_page() could not " + "get tags page : out of memory\n"); + up(&mm->swap_info_sem); + return -1; + } + DebugTM("get_tags_page() returned page " + "structure 0x%px\n", tags_page); + tags_vma = find_vma(mm, tags_addr); + if (tags_vma == NULL || tags_vma->vm_start > tags_addr) { + printk("save_swapped_page_tags() : could not find VMA for " + "existing tags addr 0x%lx\n", + tags_addr); + BUG(); + return -1; + } + k_tags_addr = (e2k_addr_t)page_address(tags_page) + + (tags_addr & ~PAGE_MASK); + DebugTM("wiil start save_mem_page_tags() " + "to save tags from addr 0x%lx to addr 0x%lx\n", + k_data_addr, k_tags_addr); + save_mem_page_tags(k_data_addr, k_tags_addr); + flush_page_to_ram(tags_page); + flush_cache_page(tags_vma, tags_addr); + DebugTM("returns with OK for data addr " + "0x%lx and tags addr 0x%lx\n", + data_addr, tags_addr); + up(&mm->swap_info_sem); + return 0; +} + +int +restore_swapped_page_tags(struct mm_struct * mm, struct page *swapped_page, + e2k_addr_t data_addr) +{ + e2k_addr_t tags_addr = virt_to_tag(data_addr); + struct page *tags_page; + struct vm_area_struct *tags_vma; + e2k_addr_t k_data_addr = (e2k_addr_t)page_address(swapped_page); + e2k_addr_t k_tags_addr; + + DebugTM("started for data addr 0x%lx\n", + data_addr); + DebugTM("will start get_tags_page()\n"); + + down(&mm->swap_info_sem); + tags_page = get_tags_page(mm, data_addr, 1); + if (tags_page == NULL) { + DebugTM("get_tags_page() could not " + "get tags page : out of memory\n"); + up(&mm->swap_info_sem); + return -1; + } + DebugTM("get_tags_page() returned page " + "structure 0x%px\n", tags_page); + tags_vma = find_vma(mm, tags_addr); + if (tags_vma == NULL || tags_vma->vm_start > tags_addr) { + printk("restore_swapped_page_tags() : could not find VMA for " + "existing tags addr 0x%lx\n", + tags_addr); + BUG(); + return -1; + } + k_tags_addr = (e2k_addr_t)page_address(tags_page) + + (tags_addr & ~PAGE_MASK); + DebugTM("will start " + "restore_mem_page_tags() to load tags from addr 0x%lx to " + "addr 0x%lx\n", + k_tags_addr, k_data_addr); + restore_mem_page_tags(k_data_addr, k_tags_addr); + flush_page_to_ram(tags_page); + flush_cache_page(tags_vma, tags_addr); + DebugTM("returns with OK for data addr " + "0x%lx and tags addr 0x%lx\n", + data_addr, tags_addr); + up(&mm->swap_info_sem); + return 0; +} + + +void __init +swap_info_cache_init(void) +{ + swap_info_cache = kmem_cache_create("swp_pg_inf_strct", + sizeof(struct swap_page_info), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!swap_info_cache) + panic("Cannot create swap info structures SLAB cache"); + +} + +void +free_swap_info_struct(swap_page_info_t* info) +{ + kmem_cache_free(swap_info_cache, info); +} + + +int +add_swap_info_to_page(struct mm_struct* mm, struct page* page, e2k_addr_t addr) { + swap_page_info_t* page_info; + + if (PageWithSwapInfo(page)) { + printk("add_swap_info_to_page() Page 0x%px already has swap " + "info\n",page); + BUG(); + } + page_info = (swap_page_info_t*) kmem_cache_alloc(swap_info_cache, + SLAB_ATOMIC); + if (page_info == NULL) return -1; + DebugTM("Adding swap info to the page " + "0x%px\n",page); + page_info->mm = mm; + page_info->addr = addr; + page_info->next = NULL; + page->swap_info = page_info; + return 0; +} + +int +add_swap_info_to_page_next(struct mm_struct *mm, struct page *page, + e2k_addr_t addr) +{ + swap_page_info_t* page_info; + + if (!PageWithSwapInfo(page)) { + printk("add_swap_info_to_page_rec() Page 0x%px doesnt have " + "swap info\n",page); + BUG(); + } + page_info = (swap_page_info_t*) kmem_cache_alloc(swap_info_cache, + SLAB_ATOMIC); + if (page_info == NULL) return -1; + DebugTM("Adding next swap info to the " + "page 0x%px",page); + page_info->mm = mm; + page_info->addr = addr; + page_info->next = page->swap_info; + page->swap_info = page_info; + return 0; +} + +swap_page_info_t * +get_swap_info_from_page(struct page* page) +{ + swap_page_info_t *info = page->swap_info; + if (!PageWithSwapInfo(page)) { + printk("get_swap_info_from_page() Page 0x%px doesnt have swap " + "info\n", page); + BUG(); + } + DebugTM("Getting swap info from the page " + "0x%px\n",page); + page->swap_info = info->next; + return info; +} diff --git a/arch/e2k/p2v/Makefile b/arch/e2k/p2v/Makefile new file mode 100644 index 000000000000..81ff520b10ab --- /dev/null +++ b/arch/e2k/p2v/Makefile @@ -0,0 +1,34 @@ +# +# Makefile for "Physical-to-Virtual Switch". +# + +subdir-ccflags-y := -Werror -Wswitch -Wenum-compare + +GCOV_PROFILE := n + +# remove profile flags +ORIG_CFLAGS := $(KBUILD_CFLAGS) +KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) -DE2K_P2V + +#TODO LCC_125 LCC_126: remove fno-ld-spec everywhere through kernel and +#use -f[no-]semi-spec-ld -f[no-]loop-apb everywhere unconditionaly when +#removing support for lcc-1.25 +ifeq ($(call cc-option-yn,-fno-semi-spec-ld -fno-spec-ld),y) + KBUILD_CFLAGS += -fno-semi-spec-ld -fno-spec-ld +else + KBUILD_CFLAGS += -fno-ld-spec +endif + +empty:= +space:= $(empty) $(empty) +ORIG_CFLAGS := $(KBUILD_CFLAGS) +ORIG_CFLAGS := $(subst $(space)-fprofile-generate-kernel$(space),$(space),$(ORIG_CFLAGS)) +KBUILD_CFLAGS = $(ORIG_CFLAGS) + +obj-y = boot_head.o boot_init.o boot_map.o boot_phys.o boot_param.o \ + boot_string.o boot_find_bit.o boot_e2k_sic.o machdep.o \ + boot_printk/ cpu/ + +obj-$(CONFIG_SMP) += boot_smp.o +obj-$(CONFIG_RECOVERY) += boot_recovery.o +obj-$(CONFIG_BOOT_TRACE) += boot_profiling.o diff --git a/arch/e2k/p2v/boot_e2k_sic.c b/arch/e2k/p2v/boot_e2k_sic.c new file mode 100644 index 000000000000..7959fac9bcf8 --- /dev/null +++ b/arch/e2k/p2v/boot_e2k_sic.c @@ -0,0 +1,80 @@ +#include +#include +#include +#include + +#undef BOOT_DEBUG_SIC_MODE +#undef BootDebugSIC +#define BOOT_DEBUG_SIC_MODE 0 /* SIC mapping & init */ +#define BootDebugSIC(fmt, args...) \ + ({ if (BOOT_DEBUG_SIC_MODE) \ + dump_printk(fmt, ##args); }) + +#ifndef CONFIG_E2K_MACHINE +int boot_get_e2k_machine_id(void) +{ + e2k_idr_t idr; + int mdl; + int mach_id; + + idr = boot_read_IDR_reg(); + mdl = idr.IDR_mdl; + BootDebugSIC("boot_get_e2k_machine_id() CPU model is %d, IDR 0x%llx\n", + mdl, idr.IDR_reg); +#if CONFIG_E2K_MINVER == 2 + if (mdl == IDR_ES2_DSP_MDL) { + mach_id = MACHINE_ID_ES2_DSP; + } else if (mdl == IDR_ES2_RU_MDL) { + mach_id = MACHINE_ID_ES2_RU; + } else +#endif +#if CONFIG_E2K_MINVER <= 3 + if (mdl == IDR_E2S_MDL) { + mach_id = MACHINE_ID_E2S; + } else +#endif +#if CONFIG_E2K_MINVER <= 4 + if (mdl == IDR_E8C_MDL) { + mach_id = MACHINE_ID_E8C; + } else if (mdl == IDR_E1CP_MDL) { + mach_id = MACHINE_ID_E1CP; + } else +#endif +#if CONFIG_E2K_MINVER <= 5 + if (mdl == IDR_E8C2_MDL) { + mach_id = MACHINE_ID_E8C2; + } else +#endif +#if CONFIG_E2K_MINVER <= 6 + if (mdl == IDR_E12C_MDL) { + mach_id = MACHINE_ID_E12C; + } else if (mdl == IDR_E16C_MDL) { + mach_id = MACHINE_ID_E16C; + } else if (mdl == IDR_E2C3_MDL) { + mach_id = MACHINE_ID_E2C3; + } else +#endif + { + BootDebugSIC("Undefined CPU model number %d\n", mdl); + return MACHINE_ID_NONE; + } + + return mach_id; +} +#endif + +void boot_e2k_sic_setup_arch(void) +{ + if (BOOT_HAS_MACHINE_E2K_FULL_SIC) { + boot_machine.x86_io_area_base = E2K_FULL_SIC_IO_AREA_PHYS_BASE; + boot_machine.x86_io_area_size = E2K_FULL_SIC_IO_AREA_SIZE; + } else if (BOOT_HAS_MACHINE_E2K_LEGACY_SIC) { + boot_machine.x86_io_area_base = + E2K_LEGACY_SIC_IO_AREA_PHYS_BASE; + boot_machine.x86_io_area_size = E2K_LEGACY_SIC_IO_AREA_SIZE; + } else { + do_boot_printk("boot_e2k_sic_setup_arch(): this machine does not have SIC\n"); + } + boot_machine.native_rev = boot_read_IDR_reg().IDR_rev; +} + diff --git a/arch/e2k/p2v/boot_find_bit.c b/arch/e2k/p2v/boot_find_bit.c new file mode 100644 index 000000000000..dcc0b6a5dfac --- /dev/null +++ b/arch/e2k/p2v/boot_find_bit.c @@ -0,0 +1,48 @@ +#include +#include + + +/* + * This is a common helper function for find_next_bit and + * find_next_zero_bit. The difference is the "invert" argument, which + * is XORed with each fetched word before searching it for one bits. + */ +static unsigned long _find_next_bit(const unsigned long *addr, + unsigned long nbits, unsigned long start, unsigned long invert) +{ + unsigned long tmp; + + if (!nbits || start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_LONG] ^ invert; + + /* Handle 1st word. */ + tmp &= BITMAP_FIRST_WORD_MASK(start); + start = round_down(start, BITS_PER_LONG); + + while (!tmp) { + start += BITS_PER_LONG; + if (start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_LONG] ^ invert; + } + + return min(start + __ffs(tmp), nbits); +} + +/* + * Find the next set bit in a memory region. + */ +unsigned long boot_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + return _find_next_bit(addr, size, offset, 0UL); +} + +unsigned long boot_find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + return _find_next_bit(addr, size, offset, ~0UL); +} diff --git a/arch/e2k/p2v/boot_head.c b/arch/e2k/p2v/boot_head.c new file mode 100644 index 000000000000..f8091018aa99 --- /dev/null +++ b/arch/e2k/p2v/boot_head.c @@ -0,0 +1,952 @@ +/* $Id: boot_head.c,v 1.41 2009/02/24 15:15:42 atic Exp $ + * + * Control of boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "boot_string.h" + +#undef DEBUG_BOOT_MODE +#undef boot_printk +#undef DebugB +#undef DEBUG_BOOT_INFO_MODE +#define DEBUG_BOOT_MODE 0 /* Boot process */ +#define DEBUG_BOOT_INFO_MODE 0 /* Boot info */ +#define boot_printk if (DEBUG_BOOT_MODE) do_boot_printk +#define DebugB if (DEBUG_BOOT_MODE) printk + +atomic_t boot_cpucount = ATOMIC_INIT(0); + +#ifndef CONFIG_SMP +unsigned char boot_init_started = 0; /* boot-time initialization */ + /* has been started */ +unsigned char _va_support_on = 0; /* virtual addressing support */ + /* has turned on */ +#else +unsigned char boot_init_started[NR_CPUS] = { [0 ... (NR_CPUS-1)] = 0 }; + /* boot-time initialization */ + /* has been started on CPU */ +unsigned char _va_support_on[NR_CPUS] = { [0 ... (NR_CPUS-1)] = 0 }; + /* virtual addressing support */ + /* has turned on on CPU */ +#endif /* CONFIG_SMP */ + +bootblock_struct_t *bootblock_phys; /* bootblock structure */ + /* physical pointer */ +bootblock_struct_t *bootblock_virt; /* bootblock structure */ + /* virtual pointer */ +#ifdef CONFIG_SMP +static atomic_t __initdata boot_bss_cleaning_finished = ATOMIC_INIT(0); +static atomic_t __initdata bootblock_checked = ATOMIC_INIT(0); +static atomic_t __initdata boot_info_setup_finished = ATOMIC_INIT(0); +#endif /* CONFIG_SMP */ + +bool pv_ops_is_set = false; +#define boot_pv_ops_is_set boot_native_get_vo_value(pv_ops_is_set) + +/* SCALL 12 is used as a kernel jumpstart */ +void notrace __section(.ttable_entry12) +ttable_entry12(int n, bootblock_struct_t *bootblock) +{ + bool bsp; + + /* CPU will stall if we have unfinished memory operations. + * This shows bootloader problems if they present */ + __E2K_WAIT_ALL; + + bsp = boot_early_pic_is_bsp(); + /* Convert virtual PV_OPS function addresses to physical */ + if (bsp) { + native_pv_ops_to_boot_ops(); + boot_pv_ops_is_set = true; + } else { + while (!boot_pv_ops_is_set) + native_cpu_relax(); + } + + /* Clear global registers and set current pointers to 0 */ + /* to indicate that current_thread_info() is not ready yet */ + BOOT_INIT_G_REGS(); + + boot_startup(bsp, bootblock); +} + +/* + * Native/guest VM indicator + */ +static inline bool boot_is_guest_hv_vm(struct machdep *mach) +{ + if (likely(mach->native_iset_ver > E2K_ISET_V2)) { + /* there is CPU register CORE_MODE to check 'gmi' */ + e2k_core_mode_t CORE; + + CORE.CORE_MODE_reg = boot_native_read_CORE_MODE_reg_value(); + return !!CORE.CORE_MODE_gmi; + } else { + /* host set IDR.hw_virt instead of CORE_MODE.gmi */ + /* (this field is reserved on iset V2) */ + e2k_idr_t IDR; + + IDR.IDR_reg = boot_native_read_IDR_reg_value(); + return !!IDR.hw_virt; + } +} + +static void boot_setup_machine_cpu_features(struct machdep *machine) +{ + int cpu = machine->native_id & MACHINE_ID_CPU_TYPE_MASK; + int revision = machine->native_rev; + int iset_ver = machine->native_iset_ver; + int guest_cpu; + cpuhas_initcall_t *fn, *start, *end, *fnv; + +#ifdef CONFIG_KVM_GUEST_KERNEL + guest_cpu = machine->guest.id & MACHINE_ID_CPU_TYPE_MASK; +#else + guest_cpu = cpu; +#endif + + start = (cpuhas_initcall_t *) __cpuhas_initcalls; + end = (cpuhas_initcall_t *) __cpuhas_initcalls_end; + fn = boot_vp_to_pp(start); + for (fnv = start; fnv < end; fnv++, fn++) + boot_func_to_pp(*fn)(cpu, revision, iset_ver, guest_cpu, + machine); +} + +void __init_recv boot_setup_iset_features(struct machdep *machine) +{ + /* Initialize this as early as possible (but after setting cpu + * id and revision and boot_machine.native_iset_ver) */ + boot_setup_machine_cpu_features(machine); + +#ifdef CONFIG_GREGS_CONTEXT + if (machine->native_iset_ver < E2K_ISET_V5) { + machine->save_kernel_gregs = &save_kernel_gregs_v2; + machine->save_gregs = &save_gregs_v2; + machine->save_local_gregs = &save_local_gregs_v2; + machine->save_gregs_dirty_bgr = &save_gregs_dirty_bgr_v2; + machine->save_gregs_on_mask = &save_gregs_on_mask_v2; + machine->restore_gregs = &restore_gregs_v2; + machine->restore_local_gregs = &restore_local_gregs_v2; + machine->restore_gregs_on_mask = &restore_gregs_on_mask_v2; + } else { + machine->save_kernel_gregs = &save_kernel_gregs_v5; + machine->save_gregs = &save_gregs_v5; + machine->save_local_gregs = &save_local_gregs_v5; + machine->save_gregs_dirty_bgr = &save_gregs_dirty_bgr_v5; + machine->save_gregs_on_mask = &save_gregs_on_mask_v5; + machine->restore_gregs = &restore_gregs_v5; + machine->restore_local_gregs = &restore_local_gregs_v5; + machine->restore_gregs_on_mask = &restore_gregs_on_mask_v5; + } +#endif + +#ifdef CONFIG_USE_AAU + if (machine->native_iset_ver < E2K_ISET_V5) { + machine->calculate_aau_aaldis_aaldas = + &calculate_aau_aaldis_aaldas_v2; + machine->do_aau_fault = &do_aau_fault_v2; + machine->save_aaldi = &save_aaldi_v2; + machine->get_aau_context = &get_aau_context_v2; + } else if (machine->native_iset_ver == E2K_ISET_V5) { + machine->calculate_aau_aaldis_aaldas = + &calculate_aau_aaldis_aaldas_v5; + machine->do_aau_fault = &do_aau_fault_v5; + machine->save_aaldi = &save_aaldi_v5; + machine->get_aau_context = &get_aau_context_v5; + } else { + machine->calculate_aau_aaldis_aaldas = + &calculate_aau_aaldis_aaldas_v6; + machine->do_aau_fault = &do_aau_fault_v6; + machine->save_aaldi = &save_aaldi_v5; + machine->get_aau_context = &get_aau_context_v5; + } +#endif + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + machine->flushts = ((machine->native_iset_ver < E2K_ISET_V3) ? + NULL : &flushts_v3); +#endif + +#ifdef CONFIG_MLT_STORAGE + if (machine->native_iset_ver >= E2K_ISET_V6) { + machine->invalidate_MLT = &invalidate_MLT_v3; + machine->get_and_invalidate_MLT_context = + &get_and_invalidate_MLT_context_v6; + } else if (machine->native_iset_ver >= E2K_ISET_V3) { + machine->invalidate_MLT = &invalidate_MLT_v3; + machine->get_and_invalidate_MLT_context = + &get_and_invalidate_MLT_context_v3; + } else { + machine->invalidate_MLT = &invalidate_MLT_v2; + machine->get_and_invalidate_MLT_context = + &get_and_invalidate_MLT_context_v2; + } +#endif + + if (machine->native_iset_ver == E2K_ISET_V2) { + machine->rrd = &rrd_v2; + machine->rwd = &rwd_v2; + machine->boot_rrd = &boot_rrd_v2; + machine->boot_rwd = &boot_rwd_v2; + } else if (machine->native_iset_ver < E2K_ISET_V6) { + machine->rrd = &rrd_v3; + machine->rwd = &rwd_v3; + machine->boot_rrd = &boot_rrd_v3; + machine->boot_rwd = &boot_rwd_v3; + } else { + machine->rrd = &rrd_v6; + machine->rwd = &rwd_v6; + machine->boot_rrd = &boot_rrd_v6; + machine->boot_rwd = &boot_rwd_v6; + machine->save_kvm_context = &save_kvm_context_v6; + machine->restore_kvm_context = &restore_kvm_context_v6; + machine->save_dimtp = &save_dimtp_v6; + machine->restore_dimtp = &restore_dimtp_v6; + machine->clear_dimtp = &clear_dimtp_v6; + } + + if (machine->native_iset_ver < E2K_ISET_V5) { + machine->get_cu_hw1 = &native_get_cu_hw1_v2; + machine->set_cu_hw1 = &native_set_cu_hw1_v2; + } else { + machine->get_cu_hw1 = &native_get_cu_hw1_v5; + machine->set_cu_hw1 = &native_set_cu_hw1_v5; + } + + if (machine->native_iset_ver >= E2K_ISET_V6) { + machine->C1_enter = C1_enter_v6; + machine->C3_enter = C3_enter_v6; + } else if (machine->native_iset_ver >= E2K_ISET_V3) { + machine->C1_enter = C1_enter_v2; + machine->C3_enter = C3_enter_v3; + } else { + machine->C1_enter = C1_enter_v2; + } + +#ifdef CONFIG_SMP + if (machine->native_iset_ver >= E2K_ISET_V3) { + machine->clk_off = clock_off_v3; + machine->clk_on = clock_on_v3; + } +#endif +} + +void __init_recv +boot_common_setup_arch_mmu(struct machdep *machine, pt_struct_t *pt_struct) +{ + pt_level_t *pmd_level; + pt_level_t *pud_level; + + if (boot_machine_has(machine, CPU_HWBUG_PAGE_A)) + pt_struct->accessed_mask = _PAGE_A_SW_V2; + + pmd_level = &pt_struct->levels[E2K_PMD_LEVEL_NUM]; + pud_level = &pt_struct->levels[E2K_PUD_LEVEL_NUM]; + if (machine->native_iset_ver >= E2K_ISET_V3) { + pmd_level->page_size = E2K_2M_PAGE_SIZE; + pmd_level->page_shift = PMD_SHIFT; + pmd_level->page_offset = ~PMD_MASK; + pmd_level->huge_ptes = 1; + } else { + pmd_level->page_size = E2K_4M_PAGE_SIZE; + pmd_level->page_shift = PMD_SHIFT + 1; + pmd_level->page_offset = E2K_4M_PAGE_SIZE - 1; + pmd_level->huge_ptes = 2; + pmd_level->boot_set_pte = boot_vp_to_pp(&boot_set_double_pte); + pmd_level->boot_get_huge_pte = + boot_vp_to_pp(&boot_get_double_huge_pte); + pmd_level->init_pte_clear = &init_double_pte_clear; + pmd_level->init_get_huge_pte = &init_get_double_huge_pte; + pmd_level->split_pt_page = &split_multiple_pmd_page; + + pud_level->map_pt_huge_page_to_prev_level = + &map_pud_huge_page_to_multiple_pmds; + } + if (machine->native_iset_ver >= E2K_ISET_V5) { + + pud_level->is_huge = true; + pud_level->huge_ptes = 1; + pud_level->dtlb_type = FULL_ASSOCIATIVE_DTLB_TYPE; + } +} + +void boot_native_setup_machine_id(bootblock_struct_t *bootblock) +{ +#ifdef CONFIG_E2K_MACHINE +#if defined(CONFIG_E2K_ES2_DSP) || defined(CONFIG_E2K_ES2_RU) + boot_es2_setup_arch(); +#elif defined(CONFIG_E2K_E2S) + boot_e2s_setup_arch(); +#elif defined(CONFIG_E2K_E8C) + boot_e8c_setup_arch(); +#elif defined(CONFIG_E2K_E8C2) + boot_e8c2_setup_arch(); +#elif defined(CONFIG_E2K_E1CP) + boot_e1cp_setup_arch(); +#elif defined(CONFIG_E2K_E12C) + boot_e12c_setup_arch(); +#elif defined(CONFIG_E2K_E16C) + boot_e16c_setup_arch(); +#elif defined(CONFIG_E2K_E2C3) + boot_e2c3_setup_arch(); +#else +# error "E2K MACHINE type does not defined" +#endif +#else /* ! CONFIG_E2K_MACHINE */ + int simul_flag; + int iohub_flag; + int mach_id = 0; + + simul_flag = bootblock->info.mach_flags & SIMULATOR_MACH_FLAG; + iohub_flag = bootblock->info.mach_flags & IOHUB_MACH_FLAG; + if (simul_flag) + mach_id |= MACHINE_ID_SIMUL; + if (iohub_flag) + mach_id |= MACHINE_ID_E2K_IOHUB; + + mach_id |= boot_get_e2k_machine_id(); +#if CONFIG_E2K_MINVER == 2 + if (mach_id == MACHINE_ID_ES2_DSP_LMS || + mach_id == MACHINE_ID_ES2_RU_LMS || + mach_id == MACHINE_ID_ES2_DSP || + mach_id == MACHINE_ID_ES2_RU) { + boot_es2_setup_arch(); + } else +#endif +#if CONFIG_E2K_MINVER <= 3 + if (mach_id == MACHINE_ID_E2S_LMS || + mach_id == MACHINE_ID_E2S) { + boot_e2s_setup_arch(); + } else +#endif +#if CONFIG_E2K_MINVER <= 4 + if (mach_id == MACHINE_ID_E8C_LMS || + mach_id == MACHINE_ID_E8C) { + boot_e8c_setup_arch(); + } else if (mach_id == MACHINE_ID_E1CP_LMS || + mach_id == MACHINE_ID_E1CP) { + boot_e1cp_setup_arch(); + } else +#endif +#if CONFIG_E2K_MINVER <= 5 + if (mach_id == MACHINE_ID_E8C2_LMS || + mach_id == MACHINE_ID_E8C2) { + boot_e8c2_setup_arch(); + } else +#endif +#if CONFIG_E2K_MINVER <= 6 + if (mach_id == MACHINE_ID_E12C_LMS || + mach_id == MACHINE_ID_E12C) { + boot_e12c_setup_arch(); + } else if (mach_id == MACHINE_ID_E16C_LMS || + mach_id == MACHINE_ID_E16C) { + boot_e16c_setup_arch(); + } else if (mach_id == MACHINE_ID_E2C3_LMS || + mach_id == MACHINE_ID_E2C3) { + boot_e2c3_setup_arch(); + } +#endif /* CONFIG_E2K_MINVER */ + + boot_native_machine_id = mach_id; +#endif /* CONFIG_E2K_MACHINE */ + boot_machine.native_id = boot_native_machine_id; +} + +static void __init +boot_loader_type_banner(boot_info_t *boot_info) +{ + if (boot_info->signature == ROMLOADER_SIGNATURE) { + boot_printk("Boot information passed by ROMLOADER\n"); + } else if (boot_info->signature == X86BOOT_SIGNATURE) { + boot_printk("Boot information passed by BIOS (x86)\n"); + } else if (boot_info->signature == KVM_GUEST_SIGNATURE) { + boot_printk("Boot information passed by HOST kernel " + "to KVM GUEST\n"); + } else { + BOOT_BUG("Boot information passed by unknown loader\n"); + } +} + +static void __init +boot_setup(bool bsp, bootblock_struct_t *bootblock) +{ + register boot_info_t *boot_info = &bootblock->info; + register e2k_rwap_lo_struct_t reg_lo = {{ 0 }}; + register e2k_rwap_hi_struct_t reg_hi = {{ 0 }}; + register e2k_addr_t addr; + register e2k_size_t size; +#ifdef CONFIG_NUMA + unsigned int cpuid; +#endif + + /* + * Set 'data/bss' segment CPU registers OSGD & GD + * to kernel image unit + */ + + addr = (e2k_addr_t)_sdata; + BOOT_BUG_ON(addr & E2K_ALIGN_OS_GLOBALS_MASK, + "Kernel 'data' segment start address 0x%lx " + "is not aligned to mask 0x%lx\n", + addr, E2K_ALIGN_OS_GLOBALS_MASK); + addr = (e2k_addr_t)boot_vp_to_pp(&_sdata); + reg_lo.GD_lo_base = addr; + reg_lo._GD_lo_rw = E2K_GD_RW_PROTECTIONS; + + /* Assume that BSS is placed immediately after data */ + size = (e2k_addr_t)_edata_bss - (e2k_addr_t)_sdata; + size = ALIGN_TO_MASK(size, E2K_ALIGN_OS_GLOBALS_MASK); + reg_hi.GD_hi_size = size; + reg_hi._GD_hi_curptr = 0; + + BOOT_WRITE_GD_REG(reg_hi, reg_lo); + BOOT_WRITE_OSGD_REG(reg_hi, reg_lo); + + boot_printk("Kernel DATA/BSS segment pointers OSGD & GD are set to " + "base physical address 0x%lx size 0x%lx\n", + addr, size); + +#ifdef CONFIG_SMP + boot_printk("Kernel boot-time initialization in progress " + "on CPU %d PIC id %d\n", + boot_smp_processor_id(), + boot_early_pic_read_id()); +#endif /* CONFIG_SMP */ + + /* + * Clear kernel BSS segment (on BSP only) + */ +#ifdef CONFIG_SMP + if (bsp) { +#endif /* CONFIG_SMP */ + boot_clear_bss(); +#ifdef CONFIG_SMP + boot_set_event(&boot_bss_cleaning_finished); + } else { + boot_wait_for_event(&boot_bss_cleaning_finished); + } +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_NUMA + /* + * Do initialization of CPUs possible and present masks again because + * these masks could be cleared while BSS cleaning + */ + cpuid = boot_smp_processor_id(); + boot_set_phys_cpu_present(cpuid); + + boot___apicid_to_node[cpuid] = boot_numa_node_id(); +#endif + + /* + * Set 'text' segment CPU registers OSCUD & CUD + * to kernel image unit + */ + + addr = (e2k_addr_t)_start; + BOOT_BUG_ON(addr & E2K_ALIGN_OSCU_MASK, + "Kernel 'text' segment start address 0x%lx " + "is not aligned to mask 0x%lx\n", + addr, E2K_ALIGN_OSCU_MASK); + addr = (e2k_addr_t)boot_vp_to_pp(&_start); + reg_lo.CUD_lo_base = addr; + reg_lo.CUD_lo_c = E2K_CUD_CHECKED_FLAG; + reg_lo._CUD_lo_rw = E2K_CUD_RW_PROTECTIONS; + + size = (e2k_addr_t)_etext - (e2k_addr_t)_start; + size = ALIGN_TO_MASK(size, E2K_ALIGN_OSCU_MASK); + reg_hi.CUD_hi_size = size; + reg_hi._CUD_hi_curptr = 0; + + BOOT_WRITE_CUD_REG(reg_hi, reg_lo); + BOOT_WRITE_OSCUD_REG(reg_hi, reg_lo); + + boot_printk("Kernel TEXT segment pointers OSCUD & CUD are set to " + "base physical address 0x%lx size 0x%lx\n", + addr, size); + + if (BOOT_IS_BSP(bsp)) { + boot_check_bootblock(bsp, bootblock); +#ifdef CONFIG_SMP + boot_set_event(&bootblock_checked); + } else { + boot_wait_for_event(&bootblock_checked); +#endif /* CONFIG_SMP */ + } + + if (addr != boot_info->kernel_base) { + BOOT_WARNING("Kernel start address 0x%lx is not the same " + "as base address to load kernel in bootblock " + "structure 0x%lx\n", + addr, boot_info->kernel_base); + boot_info->kernel_base = addr; + } + BOOT_BUG_ON(size > boot_info->kernel_size, + "Kernel size 0x%lx is not the same " + "as size to load kernel in bootblock structure 0x%lx\n", + size, boot_info->kernel_size); + + /* + * Set Trap Cellar pointer and MMU register to kernel image area + * and reset Trap Counter register + * In NUMA mode now we set pointer to base trap cellar on + * bootstrap node + */ + + boot_set_MMU_TRAP_POINT(boot_trap_cellar); + boot_reset_MMU_TRAP_COUNT(); + + boot_printk("Kernel trap cellar set to physical address 0x%lx " + "MMU_TRAP_CELLAR_MAX_SIZE 0x%x kernel_trap_cellar 0x%lx\n", + boot_kernel_trap_cellar, MMU_TRAP_CELLAR_MAX_SIZE, + BOOT_KERNEL_TRAP_CELLAR); + + /* + * Remember phys. address of boot information block in + * an appropriate data structure. + */ + +#ifdef CONFIG_SMP + if (bsp) { +#endif /* CONFIG_SMP */ + boot_bootblock_phys = bootblock; + boot_bootinfo_phys_base = (e2k_addr_t)boot_bootblock_phys; + + boot_printk("Boot block physical address: 0x%lx\n", + boot_bootblock_phys); + + boot_loader_type_banner(boot_info); + if (DEBUG_BOOT_INFO_MODE) { + int i; + for (i = 0; i < sizeof(bootblock_struct_t) / 8; i ++) { + do_boot_printk("boot_info[%d] = 0x%lx\n", + i, ((u64 *)boot_info)[i]); + } + } +#ifdef CONFIG_SMP + boot_setup_smp_cpu_config(boot_info); + boot_set_event(&boot_info_setup_finished); + } else { + boot_wait_for_event(&boot_info_setup_finished); + if (boot_smp_processor_id() >= NR_CPUS) { + BOOT_BUG("CPU #%d : this processor number >= than max supported CPU number %d\n", + boot_smp_processor_id(), + NR_CPUS); + } + } +#endif /* CONFIG_SMP */ +} + +/* + * Sequel of process of initialization. This function is run into virtual + * space and controls farther system boot + */ +static void __init +boot_init_sequel(bool bsp, int cpuid, int cpus_to_sync) +{ + boot_set_kernel_MMU_state_after(); + + va_support_on = 1; + + /* + * SYNCHRONIZATION POINT #3 + * At this point all processors should complete switching to + * virtual memory + * After synchronization all processors can terminate + * boot-time initialization of virtual memory support + * + * No tracepoint calls before sync all processors should be. All cpus + * should end switching to virtual memory support to prevent accessing + * to memory by high and low physical addresses simultaneously inside + * boot tracepoint. It needs only in the case of + * CONFIG_ONLY_HIGH_PHYS_MEM enabled. + */ +#if 0 + EARLY_BOOT_TRACEPOINT("SYNCHRONIZATION POINT #3"); +#endif + init_sync_all_processors(cpus_to_sync); + +#ifdef CONFIG_SMP + if (bsp) +#endif + EARLY_BOOT_TRACEPOINT("kernel boot-time init finished"); + + /* + * Reset processors number for recovery + */ + init_reset_smp_processors_num(); + + /* + * Initialize dump_printk() - simple printk() which + * outputs straight to the serial port. + */ +#if defined(CONFIG_SERIAL_PRINTK) + setup_serial_dump_console(&bootblock_virt->info); +#endif + + /* + * Show disabled caches + */ + +#ifdef CONFIG_SMP + if (bsp) { +#endif /* CONFIG_SMP */ + if (disable_caches != _MMU_CD_EN) { + if (disable_caches == _MMU_CD_D1_DIS) + pr_info("Disable L1 cache\n"); + else if (disable_caches == _MMU_CD_D_DIS) + pr_info("Disable L1 and L2 caches\n"); + else if (disable_caches == _MMU_CD_DIS) + pr_info("Disable L1, L2 and L3 caches\n"); + } + if (disable_secondary_caches) + pr_info("Disable secondary INTEL caches\n"); + if (disable_IP == _MMU_IPD_DIS) + pr_info("Disable IB prefetch\n"); + DebugB("MMU CR 0x%llx\n", READ_MMU_CR()); +#ifdef CONFIG_SMP + } +#endif /* CONFIG_SMP */ + + /* + * Terminate boot-time initialization and start kernel init + */ + init_terminate_boot_init(bsp, cpuid); + +#ifndef CONFIG_SMP +#undef cpuid +#endif /* CONFIG_SMP */ +} + +/* + * Control process of boot-time initialization. + * Loader or bootloader program should call this function to start boot + * process of the system. The function provide for virtual memory support + * and switching to execution into the virtual space. The following part + * of initialization should be made by 'boot_init_sequel()' function, which + * will be run with virtual environment support. + */ +static void __init +boot_init(bool bsp, bootblock_struct_t *bootblock) +{ + register int cpuid; + + cpuid = boot_smp_get_processor_id(); + boot_smp_set_processor_id(cpuid); + boot_printk("boot_init() started on CPU #%d\n", cpuid); + +#ifndef CONFIG_SMP + if (!bsp) { + boot_atomic_dec(&boot_cpucount); + while (1) /* Idle if not boot CPU */ + boot_cpu_relax(); + } else { +#endif /* !CONFIG_SMP */ + boot_set_phys_cpu_present(cpuid); +#ifndef CONFIG_SMP + } +#endif /* !CONFIG_SMP */ + /* + * Preserve recursive call of boot, if some trap occured + * while trap table is not installed + */ + + if (boot_boot_init_started) { + if (boot_va_support_on) { + INIT_BUG("Recursive call of boot_init(), perhaps, " + "due to trap\n"); + } else { + BOOT_BUG("Recursive call of boot_init(), perhaps, " + "due to trap\n"); + } + } else { + boot_boot_init_started = 1; + } + + /* + * Initialize virtual memory support for farther system boot and + * switch sequel initialization to the function 'boot_init_sequel()' + * into the real virtual space. Should not be return here. + */ + + boot_printk("Kernel boot-time initialization started\n"); + boot_setup(bsp, bootblock); + boot_mem_init(bsp, cpuid, &bootblock->info, boot_init_sequel); +} + +void __ref +boot_startup(bool bsp, bootblock_struct_t *bootblock) +{ + boot_info_t *boot_info = NULL; + u16 signature; +#ifdef CONFIG_RECOVERY + int recovery = bootblock->kernel_flags & RECOVERY_BB_FLAG; +#else /* ! CONFIG_RECOVERY */ + #define recovery 0 +#endif /* CONFIG_RECOVERY */ + + /* CPU will stall if we have unfinished memory operations. + * This shows bootloader problems if they present */ + __E2K_WAIT_ALL; + + if (bsp) + EARLY_BOOT_TRACEPOINT("kernel boot-time init started"); + + /* + * An early parse of cmd line. + */ +#ifdef CONFIG_SMP + if (bsp) { +#endif /* CONFIG_SMP */ + boot_machine.cmdline_iset_ver = false; + boot_parse_param(bootblock); +#ifdef CONFIG_SMP + } +#endif /* CONFIG_SMP */ + + if (!recovery && bsp) { + boot_setup_machine_id(bootblock); + boot_setup_iset_features(&boot_machine); + + /* set indicator of guest hardware virtualized VM */ + /* can be called only after 'boot_rrd' setup */ + boot_machine.gmi = boot_is_guest_hv_vm(&boot_machine); + + boot_common_setup_arch_mmu(&boot_machine, + boot_pgtable_struct_p); + } + + /* early setup CPU # */ + boot_smp_set_processor_id(boot_early_pic_read_id()); + + /* Try to determine automatically if we are under virtualization */ +#ifndef CONFIG_CPU_ES2 + if (bsp) { + e2k_core_mode_t core_mode; + + AW(core_mode) = NATIVE_READ_CORE_MODE_REG_VALUE(); + if (core_mode.gmi) + boot_machine.native_iset_ver = E2K_ISET_V6; + } +#endif + +#if defined(CONFIG_SERIAL_BOOT_PRINTK) + if (!recovery) { + boot_setup_serial_console(bsp, &bootblock->info); + } +#endif + +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE + if (boot_paravirt_enabled()) { + /* only guest kernel can use VIRTIO HVC console */ +#ifdef CONFIG_SMP + if (!bsp) + while (!boot_early_virtio_cons_enabled) { + mb(); /* wait for all read completed */ + } + else +#endif /* CONFIG_SMP */ + boot_hvc_l_cons_init(bootblock->info.serial_base); + } +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ + +#if defined(DEBUG_BOOT_INFO) && DEBUG_BOOT_INFO + if (bsp) { + /* + * Set boot strap CPU id to enable erly boot print with + * nodes and CPUs numbers + */ + int cpu_id = boot_early_pic_read_id(); + boot_smp_set_processor_id(cpu_id); + do_boot_printk("bootblock 0x%x, flags 0x%x\n", + bootblock, bootblock->kernel_flags); + } +#endif + + /* + * BIOS/x86 loader has following incompatibilities with kernel + * boot process assumption: + * 1. Not set USBR register to C stack high address + * 2. Set PSP register size to full procedure stack memory + * when this size should be without last page (last page + * used as guard to preserve stack overflow) + * 3. Set PCSP register size to full procedure chain stack memory + * when this size should be without last page (last page + * used as guard to preserve stack overflow) + */ + boot_info = &bootblock->info; + signature = boot_info->signature; + + if (signature == X86BOOT_SIGNATURE) { + e2k_usbr_t USBR = { {0} }; + usd_struct_t USD; + psp_struct_t PSP; + pcsp_struct_t PCSP; + + if (!recovery) { + boot_read_USD_reg(&USD); + USBR.USBR_base = PAGE_ALIGN_DOWN(USD.USD_base); + boot_write_USBR_reg(USBR); + + boot_read_PSP_reg(&PSP); + PSP.PSP_size -= PAGE_SIZE; + boot_write_PSP_reg(PSP); + + boot_read_PCSP_reg(&PCSP); + PCSP.PCSP_size -= PAGE_SIZE; + boot_write_PCSP_reg(PCSP); + } + } + + /* + * Set UPSR register in the initial state (where interrupts + * are disabled). NMI should be disabled too, because of spureous + * interrupts can be occur while booting time and kernel is not now + * ready to handle any traps and interrupts. + * Switch control from PSR register to UPSR if it needs + */ + BOOT_SET_KERNEL_UPSR(); + + /* + * Check supported CPUs number. Some structures and tables + * allocated support only NR_CPUS number of CPUs + */ + if (boot_smp_processor_id() >= NR_CPUS) { + static int printed = 0; + + /* Make sure the message gets out on !SMP kernels + * which have spinlocks compiled out. */ + if (!xchg(boot_vp_to_pp(&printed), 1)) { + BOOT_BUG_POINT("boot_startup()"); + BOOT_BUG("CPU #%d : this processor number >= than max supported CPU number %d\n", + boot_smp_processor_id(), NR_CPUS); + } + + for (;;) + cpu_relax(); + } + +#ifdef CONFIG_RECOVERY + if (recovery) + boot_recovery(bootblock); + else +#endif /* CONFIG_RECOVERY */ + boot_init(bsp, bootblock); +} + +static int __init boot_set_iset(char *cmd) +{ + unsigned long iset; + + if (*cmd != 'v') { + boot_printk("Bad 'iset' kernel parameter value: \"%s\"\n", cmd); + return 1; + } + + ++cmd; + + iset = boot_simple_strtoul(cmd, &cmd, 0); + boot_printk("Setting machine iset version to %d\n", iset); + + boot_machine.native_iset_ver = iset; + boot_machine.cmdline_iset_ver = true; + + return 0; +} +boot_param("iset", boot_set_iset); + +/* + * Clear kernel BSS segment in native mode + */ +void __init boot_native_clear_bss(void) +{ + e2k_size_t size; + unsigned long *bss_p; + + bss_p = (unsigned long *)boot_vp_to_pp(&__bss_start); + size = (e2k_addr_t)__bss_stop - (e2k_addr_t)__bss_start; + boot_printk("Kernel BSS segment will be cleared from " + "physical address 0x%lx size 0x%lx\n", + bss_p, size); + boot_fast_memset(bss_p, 0, size); +} + +void __init +boot_native_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + /* nothing to check */ +} + +/* + * Start kernel initialization on bootstrap processor. + * Other processors will do some internal initialization and wait + * for commands from bootstrap processor. + */ +void __init init_start_kernel_init(bool bsp, int cpuid) +{ + setup_stack_print(); + + if (bsp) { + init_preempt_count_resched(INIT_PREEMPT_COUNT, false); + e2k_start_kernel(); + } else { + init_preempt_count_resched(PREEMPT_ENABLED, false); + e2k_start_secondary(cpuid); + } + + /* + * Never should be here + */ + BUG(); + boot_panic("BOOT: Return from start_kernel().\n"); + E2K_HALT_ERROR(-1); +} + +/* + * Sequel of process of initialization. This function is run into virtual + * space and controls termination of boot-time init and start kernel init + */ +void __init init_native_terminate_boot_init(bool bsp, int cpuid) +{ + + /* + * Flush instruction and data cashes to delete all physical + * instruction and data pages + */ + flush_ICACHE_all(); + + /* + * Terminate boot-time initialization of virtual memory support + */ + init_mem_term(cpuid); + + /* + * Start kernel initialization process + */ + init_start_kernel_init(bsp, cpuid); +} diff --git a/arch/e2k/p2v/boot_init.c b/arch/e2k/p2v/boot_init.c new file mode 100644 index 000000000000..e0c247b311f1 --- /dev/null +++ b/arch/e2k/p2v/boot_init.c @@ -0,0 +1,4235 @@ +/* $Id: boot_init.c,v 1.56 2009/06/29 15:10:41 atic Exp $ + * + * Boot-time initialization of Virtual memory support. + * Switch from boot execution on physical memory to continuation of boot + * on virtual memory + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "boot_string.h" + +#undef DEBUG_BOOT_MODE +#undef boot_printk +#define DEBUG_BOOT_MODE 0 /* Boot process */ +#define boot_printk if (DEBUG_BOOT_MODE) do_boot_printk + +#undef DEBUG_LO_TO_HI_MODE +#undef DebugLoHi +#define DEBUG_LO_TO_HI_MODE 0 /* Convertion low to high address */ +#define DebugLoHi if (DEBUG_LO_TO_HI_MODE) do_boot_printk + +#undef DEBUG_PHYS_BANK_MODE +#undef DebugBank +#define DEBUG_PHYS_BANK_MODE 0 /* Physical memory bank management */ +#define DebugBank if (DEBUG_PHYS_BANK_MODE) do_boot_printk + +#undef DEBUG_NUMA_MODE +#undef DebugNUMA +#define DEBUG_NUMA_MODE 0 /* Boot NUMA */ +#define DebugNUMA if (DEBUG_NUMA_MODE) do_boot_printk + +/* + * Array of 'BOOT_MAX_MEM_NUMNODES' of 'BOOT_MAX_MEM_NUMNODES' structures + * is statically allocated into the kernel image. + * The array of structures is used to hold the + * physical memory configuration of the machine. This is filled in + * 'boot_probe_memory()' and is later used by 'boot_mem_init()' to setup + * boot-time memory map and by 'mem_init()' to set up 'mem_map[]'. + */ + +node_phys_mem_t nodes_phys_mem[L_MAX_MEM_NUMNODES]; +EXPORT_SYMBOL(nodes_phys_mem); + +#ifdef PA_TO_HIGH_DINAMICALY +/* is enabled/disabled usage only of high partition of physical memory */ +/* and conversion addresses from low memory to high */ +bool pa_to_high_disabled = false; /* default is enabled */ +#endif /* PA_TO_HIGH_DINAMICALY */ + +/* + * The next structure contains list of descriptors of the memory areas + * used by boot-time initialization. + * All the used memory areas enumerate in this structure. If a some new + * area will be used, then it should be added to the list of already known ones. + */ + +bootmem_areas_t kernel_bootmem; +long phys_memory_mgb_size; + +#ifdef CONFIG_SMP +static atomic_t boot_physmem_maps_ready = ATOMIC_INIT(0); +static atomic_t __initdata_recv boot_pv_ops_switched = ATOMIC_INIT(0); +#ifndef CONFIG_NUMA +static atomic_t boot_mapping_ready = ATOMIC_INIT(0); +#endif /* ! CONFIG_NUMA */ +#endif /* CONFIG_SMP */ + +/* + * FIXME: Nodes number is limited by bits in unsigned long size - 64 + */ +int phys_nodes_num; +unsigned long phys_nodes_map; +int phys_mem_nodes_num; +unsigned long phys_mem_nodes_map; + +#ifdef CONFIG_NUMA +e2k_addr_t node_kernel_phys_base[MAX_NUMNODES] = { + [0 ... (MAX_NUMNODES-1)] = -1 + }; +static boot_spinlock_t __initdata boot_node_kernel_dup_lock[MAX_NUMNODES] = { + [0 ... (MAX_NUMNODES-1)] = __BOOT_SPIN_LOCK_UNLOCKED +}; +atomic_t early_node_has_dup_kernel_num = ATOMIC_INIT(0); +static int __initdata node_kernel_duplicated[MAX_NUMNODES] = { 0 }; +static int __initdata node_set_kernel_duplicated[MAX_NUMNODES] = { 0 }; +static int __initdata node_kernel_base_is_set[MAX_NUMNODES] = { 0 }; +#define boot_node_kernel_duplicated \ + boot_get_vo_value(node_kernel_duplicated[boot_numa_node_id()]) +#define boot_node_set_kernel_duplicated \ + boot_get_vo_value(node_set_kernel_duplicated[ \ + boot_numa_node_id()]) +#define boot_node_kernel_base_is_set \ + boot_get_vo_value(node_kernel_base_is_set[ \ + boot_numa_node_id()]) +boot_spinlock_t __initdata boot_node_map_lock[MAX_NUMNODES] = { + [0 ... (MAX_NUMNODES-1)] = __BOOT_SPIN_LOCK_UNLOCKED +}; +boot_spinlock_t __initdata_recv boot_node_flush_lock[MAX_NUMNODES] = { + [0 ... (MAX_NUMNODES-1)] = __BOOT_SPIN_LOCK_UNLOCKED +}; +int __initdata node_mem_mapped[MAX_NUMNODES] = { 0 }; +static int __initdata node_image_mapped[MAX_NUMNODES] = { 0 }; +static int __initdata node_io_mapped[MAX_NUMNODES] = { 0 }; +static int __initdata node_info_mapped[MAX_NUMNODES] = { 0 }; +static int __initdata node_ports_mapped[MAX_NUMNODES] = { 0 }; +static int __initdata node_hwbug_mapped[MAX_NUMNODES] = { 0 }; +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM +static int __initdata_recv node_flushed[MAX_NUMNODES] = { 0 }; +# define boot_node_flushed \ + boot_get_vo_value(node_flushed[boot_numa_node_id()]) +#endif +#define boot_node_image_mapped \ + boot_get_vo_value(node_image_mapped[boot_numa_node_id()]) +#define boot_node_mem_mapped \ + boot_get_vo_value(node_mem_mapped[boot_numa_node_id()]) +#define boot_node_io_mapped \ + boot_get_vo_value(node_io_mapped[boot_numa_node_id()]) +#define boot_node_info_mapped \ + boot_get_vo_value(node_info_mapped[boot_numa_node_id()]) +#define boot_node_ports_mapped \ + boot_get_vo_value(node_ports_mapped[boot_numa_node_id()]) +#define boot_node_hwbug_mapped \ + boot_get_vo_value(node_hwbug_mapped[boot_numa_node_id()]) +#else /* ! CONFIG_NUMA */ +e2k_addr_t kernel_phys_base; +#define boot_node_image_mapped 0 +#define boot_node_mem_mapped 0 +#define boot_node_io_mapped 0 +#define boot_node_info_mapped 0 +#define boot_node_ports_mapped 0 +#define boot_node_hwbug_mapped 0 +#define boot_node_flushed 0 +#endif /* CONFIG_NUMA */ + +static bool mmu_pt_v6 = false; +#define boot_mmu_pt_v6 boot_get_vo_value(mmu_pt_v6) + +static __init void boot_reserve_bootinfo_areas(boot_info_t *boot_info); + +/* + * Memory limit setup + */ +static e2k_size_t mem_limit = -1UL; +#define boot_mem_limit boot_get_vo_value(mem_limit) + +static int __init boot_mem_set(char *cmd) +{ + boot_mem_limit = boot_simple_strtoul(cmd, &cmd, 0); + + if (*cmd == 'K' || *cmd == 'k') + boot_mem_limit <<= 10; + else if (*cmd == 'M' || *cmd == 'm') + boot_mem_limit <<= 20; + else if (*cmd == 'G' || *cmd == 'g') + boot_mem_limit <<= 30; + + boot_mem_limit &= ~(PAGE_SIZE-1); + + boot_printk("Physical memory limit set to 0x%lx\n", boot_mem_limit); + + return 0; +} +boot_param("mem", boot_mem_set); + +static e2k_size_t node_mem_limit = -1UL; +#define boot_node_mem_limit boot_get_vo_value(node_mem_limit) + +static int __init boot_node_mem_set(char *cmd) +{ + boot_node_mem_limit = boot_simple_strtoul(cmd, &cmd, 0); + + if (*cmd == 'K' || *cmd == 'k') + boot_node_mem_limit <<= 10; + else if (*cmd == 'M' || *cmd == 'm') + boot_node_mem_limit <<= 20; + + boot_node_mem_limit &= ~(PAGE_SIZE-1); + + boot_printk("Node physical memory limit set to 0x%lx\n", + boot_node_mem_limit); + + return 0; +} +boot_param("nodemem", boot_node_mem_set); + +static int __init boot_set_mmu_pt_v6(char *cmd) +{ + machdep_t *mach = &boot_machine; + +#ifndef CONFIG_MMU_PT_V6 + do_boot_printk("CONFIG_MMU_PT_V6 is disabled, so MMU PT_V6 cannot be " + "set\n"); + boot_mmu_pt_v6 = false; +#else /* CONFIG_MMU_PT_V6 */ + if (!mach->mmu_pt_v6) { + /* new format is not supported */ + do_boot_printk("MMU doesn't support new format of page table, " + "so MMU PT_V6 cannot be set\n"); + boot_mmu_pt_v6 = false; + } else { + boot_mmu_pt_v6 = true; + } +#endif /* ! CONFIG_MMU_PT_V6 */ + mach->mmu_pt_v6 = boot_mmu_pt_v6; + return 0; +} +boot_param("set_pt_v6", boot_set_mmu_pt_v6); + +static int __init boot_reset_mmu_pt_v6(char *cmd) +{ + machdep_t *mach = &boot_machine; + +#ifndef CONFIG_MMU_PT_V6 + do_boot_printk("CONFIG_MMU_PT_V6 is disabled, so MMU PT_V6 always is " + "OFF\n"); + boot_mmu_pt_v6 = false; +#else /* CONFIG_MMU_PT_V6 */ + if (!mach->mmu_pt_v6) { + /* new format is not supported */ + boot_mmu_pt_v6 = false; + } else { + do_boot_printk("CONFIG_MMU_PT_V6 is enabled staticaly, " + "so MMU PT_V6 cannot be reset\n"); + boot_mmu_pt_v6 = true; + } +#endif /* ! CONFIG_MMU_PT_V6 */ + mach->mmu_pt_v6 = boot_mmu_pt_v6; + return 0; +} +boot_param("reset_pt_v6", boot_reset_mmu_pt_v6); + +static int __init boot_set_mmu_separate_pt(char *cmd) +{ + machdep_t *mach = &boot_machine; + +#ifndef CONFIG_MMU_SEP_VIRT_SPACE + do_boot_printk("CONFIG_MMU_SEP_VIRT_SPACE is disabled, " + "so MMU SEPARATE_PT cannot be set\n"); + mach->mmu_separate_pt = false; +#else /* CONFIG_MMU_SEP_VIRT_SPACE */ + if (!mach->mmu_separate_pt) { + do_boot_printk("MMU doesn't support separate page tables mode, " + "so MMU SEPARATE_PT cannot be set\n"); + } +#endif /* ! CONFIG_MMU_SEP_VIRT_SPACE */ + return 0; +} +boot_param("set_sep_pt", boot_set_mmu_separate_pt); + +static int __init boot_reset_mmu_separate_pt(char *cmd) +{ + machdep_t *mach = &boot_machine; + +#ifndef CONFIG_MMU_SEP_VIRT_SPACE + do_boot_printk("CONFIG_MMU_SEP_VIRT_SPACE is disabled, " + "so MMU SEPARATE_PT always is OFF\n"); + mach->mmu_separate_pt = false; +#else /* CONFIG_MMU_SEP_VIRT_SPACE */ +# ifdef CONFIG_DYNAMIC_SEP_VIRT_SPACE + mach->mmu_separate_pt = false; +# else /* ! CONFIG_DYNAMIC_SEP_VIRT_SPACE */ + if (!mach->mmu_separate_pt) { + /* MMU does not support this mode */ + } else { + do_boot_printk("CONFIG_MMU_SEP_VIRT_SPACE is enabled " + "staticaly, so MMU SEPARATE_PT cannot be reset\n"); + } +# endif /* CONFIG_DYNAMIC_SEP_VIRT_SPACE */ +#endif /* ! CONFIG_MMU_SEP_VIRT_SPACE */ + return 0; +} +boot_param("reset_sep_pt", boot_reset_mmu_separate_pt); + +/* + * Disabling caches setup + */ + +unsigned long disable_caches = _MMU_CD_EN; +#define boot_disable_caches boot_get_vo_value(disable_caches) + +static int __init boot_disable_L1_setup(char *cmd) +{ + if (boot_disable_caches < _MMU_CD_D1_DIS) + boot_disable_caches = _MMU_CD_D1_DIS; + return 0; +} +boot_param("disL1", boot_disable_L1_setup); + +static int __init boot_disable_L2_setup(char *cmd) +{ + if (boot_disable_caches < _MMU_CD_D_DIS) + boot_disable_caches = _MMU_CD_D_DIS; + return 0; +} +boot_param("disL2", boot_disable_L2_setup); + +static int __init boot_disable_L3_setup(char *cmd) +{ + if (boot_disable_caches < _MMU_CD_DIS) + boot_disable_caches = _MMU_CD_DIS; + return 0; +} +boot_param("disL3", boot_disable_L3_setup); + +unsigned long disable_secondary_caches = 0; +#define boot_disable_secondary_caches \ + boot_get_vo_value(disable_secondary_caches) + +static int __init boot_disable_LI_setup(char *cmd) +{ + boot_disable_secondary_caches = _MMU_CR_CR0_CD; + return 0; +} +boot_param("disLI", boot_disable_LI_setup); + +unsigned long disable_IP = _MMU_IPD_2_LINE; +#define boot_disable_IP boot_get_vo_value(disable_IP) + +static int __init boot_disable_IP_setup(char *cmd) +{ + boot_disable_IP = _MMU_IPD_DIS; + return 0; +} +boot_param("disIP", boot_disable_IP_setup); + +static bool enable_l2_cint = false; +#define boot_enable_l2_cint boot_get_vo_value(enable_l2_cint) + +static int __init boot_enable_L2_CINT_setup(char *str) +{ + boot_enable_l2_cint = true; + return 0; +} +boot_param("L2CINT", boot_enable_L2_CINT_setup); + +static inline void boot_native_set_l2_crc_state(bool enable) +{ + unsigned long l2_cntr; + int l2_bank; + + if (!enable) + return; + for (l2_bank = 0; l2_bank < E2K_L2_BANK_NUM; l2_bank++) { + l2_cntr = native_read_DCACHE_L2_CNTR_reg(l2_bank); + l2_cntr |= E2K_L2_CNTR_EN_CINT; + native_write_DCACHE_L2_CNTR_reg(l2_cntr, l2_bank); + __E2K_WAIT_ALL; + l2_cntr = native_read_DCACHE_L2_CNTR_reg(l2_bank); + } +} + +/* + * bootblock.bios.banks_ex is extended area for all nodes. Firstly, we fill + * node_phys_mem.banks from bootblock.nodes_mem.banks, which presents for each + * node. If there are more than L_MAX_NODE_PHYS_BANKS_FUSTY phys banks for a + * node, we continue to fill node_phys_mem.banks from bootblock.bios.banks_ex, + * which is one for all nodes. Last element in bootblock.bios.banks_ex for a + * node, which uses it, should be with size = 0. If a node has only + * L_MAX_NODE_PHYS_BANKS_FUSTY phys banks, there should be element with size = 0 + * in bootblock.bios.banks_ex for this node. + * + * node_phys_mem.banks bootblock.nodes_mem.banks + * __________________________________________ + * ______|_____________________________ __________|____________ + * |__________________|_________________| |_______________________| + * | + * L_MAX_NODE_PHYS_BANKS_FUSTY | bootblock.bios.banks_ex + * <------------------> |_____________________ + * __________|____________ + * |_______________________| + * L_MAX_NODE_PHYS_BANKS + * <-----------------------------------> + */ + +static bank_info_t * __init_recv +boot_do_get_next_node_bank(int node, /* only for node # info */ + bank_info_t *node_banks_info, bank_info_t *node_banks_info_ex, + int *node_banks_ind_p, int *node_banks_ind_ex_p) +{ + bank_info_t *bank_info; + e2k_size_t bank_size; + int bank = 0; + + if (node_banks_info == NULL || node_banks_ind_p == NULL) { + /* no more main banks on node, switch to extended partition */ + /* of nodes banks info */ + DebugLoHi("no main banks info, it need at once switch to " + "extended partition of banks info\n"); + } else if ((bank = *node_banks_ind_p) < L_MAX_NODE_PHYS_BANKS_FUSTY) { + bank_info = &node_banks_info[bank]; + bank_size = bank_info->size; + if (bank_size == 0) { + DebugLoHi("Node #%d empty main bank #%d: no more " + "banks on node\n", + node, bank); + return NULL; /* no more banks on node */ + } + + DebugLoHi("Node #%d main bank #%d: address 0x%lx, " + "size 0x%lx\n", + node, bank, bank_info->address, bank_size); + + /* return current main bank and increment index to point */ + /* to next bank of node */ + *node_banks_ind_p = bank + 1; + return bank_info; + } else { + /* main banks info is completed, switch to extended partition */ + /* of nodes banks info */ + DebugLoHi("main banks info is completed, so switch to " + "extended partition of banks info\n"); + } + + if (unlikely(node_banks_info_ex == NULL || + node_banks_ind_ex_p == NULL)) { + BOOT_BUG("No extended partition of phys. memory banks info\n"); + } else if ((node = *node_banks_ind_ex_p) < L_MAX_PHYS_BANKS_EX) { + bank_info = &node_banks_info_ex[bank]; + bank_size = bank_info->size; + if (bank_size == 0) { + DebugLoHi("Node #%d empty extended bank #%d: no more " + "banks on node\n", + node, bank); + /* skip empty bank and set index of extended */ + /* partition to next bank from which starts extended */ + /* partition of new node */ + *node_banks_ind_ex_p = bank + 1; + return NULL; /* no more banks on node */ + } + + DebugLoHi("Node #%d extended bank #%d: address 0x%lx, " + "size 0x%lx\n", + node, bank, bank_info->address, bank_size); + + /* return current extended bank and increment index to point */ + /* to next extended bank of node */ + *node_banks_ind_ex_p = bank + 1; + return bank_info; + } else { + /* extended partition of banks info is completed */ + /* so cannot be any new banks info for this and other nodes */ + DebugLoHi("extended partition of banks info is completed, " + "so no more any phys. memory banks\n"); + } + + return NULL; +} +static inline bank_info_t * __init_recv +boot_has_node_banks_info(boot_info_t *bootblock, int node) +{ + int node_banks_ind = 0; + + return boot_do_get_next_node_bank(node, + bootblock->nodes_mem[node].banks, NULL, + &node_banks_ind, NULL); +} +static inline bank_info_t * __init_recv +boot_get_next_node_bank(boot_info_t *bootblock, int node, + int *node_banks_ind_p, int *node_banks_ind_ex_p) +{ + return boot_do_get_next_node_bank(node, + bootblock->nodes_mem[node].banks, + bootblock->bios.banks_ex, + node_banks_ind_p, node_banks_ind_ex_p); +} + +bool __init boot_has_node_low_memory(int node, boot_info_t *bootblock) +{ + bank_info_t *bank_info; + int banks_ind = 0; + int banks_ind_ex = 0; + + while (bank_info = boot_get_next_node_bank(bootblock, node, + &banks_ind, &banks_ind_ex), + bank_info != NULL) { + e2k_addr_t bank_start, bank_end; + + bank_start = bank_info->address; + bank_end = bank_start + bank_info->size; + if (is_addr_from_low_memory(bank_end -1)) + /* found low memory bank */ + return true; + } + return false; +} + +bool __init boot_has_node_high_memory(int node, boot_info_t *bootblock) +{ + bank_info_t *bank_info; + int banks_ind = 0; + int banks_ind_ex = 0; + + while (bank_info = boot_get_next_node_bank(bootblock, node, + &banks_ind, &banks_ind_ex), + bank_info != NULL) { + e2k_addr_t bank_start, bank_end; + + bank_start = bank_info->address; + bank_end = bank_start + bank_info->size; + if (is_addr_from_high_memory(bank_start)) + /* found high memory bank */ + return true; + } + return false; +} + +bool __init_recv boot_has_high_memory(boot_info_t *bootblock) +{ + int node; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + bank_info_t *node_bank; + + node_bank = boot_has_node_banks_info(bootblock, node); + if (node_bank == NULL) + continue; /* node has not memory */ + if (boot_has_node_high_memory(node, bootblock)) + return true; + } + return false; +} + +static inline short __init +boot_get_free_phys_bank(int node, node_phys_mem_t *node_mem) +{ + e2k_phys_bank_t *phys_banks; + short bank; + + phys_banks = node_mem->banks; + for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS; bank++) { + e2k_phys_bank_t *cur_phys_bank = &phys_banks[bank]; + + if (cur_phys_bank->pages_num == 0) + /* found empty entry at table */ + return bank; + } + if (node_mem->banks_num >= L_MAX_NODE_PHYS_BANKS) { + BOOT_WARNING("Node #%d number of phys banks %d exceeds " + "permissible limit", + node, node_mem->banks_num); + return -1; + } + BOOT_BUG("Node #%d number of phys banks is only %d from %d, " + "but could not find empty entry at table", + node, node_mem->banks_num, L_MAX_NODE_PHYS_BANKS); + return -1; +} + +static inline short __init +boot_find_node_phys_bank(int node, node_phys_mem_t *node_mem, short bank) +{ + e2k_phys_bank_t *cur_phys_bank; + short prev_bank_ind = -1; + short cur_bank_ind; + + for (cur_bank_ind = node_mem->first_bank; + cur_bank_ind >= 0; + cur_bank_ind = cur_phys_bank->next) { + if (cur_bank_ind == bank) + break; + prev_bank_ind = cur_bank_ind; + cur_phys_bank = &node_mem->banks[cur_bank_ind]; + } + if (cur_bank_ind != bank) { + BOOT_BUG("Node #%d: could not find bank #%d at the list of " + "node banks\n", + node, bank); + } + return prev_bank_ind; +} + +void __init_recv boot_add_new_phys_bank(int node, node_phys_mem_t *node_mem, + e2k_phys_bank_t *new_phys_bank, short new_bank_ind) +{ + e2k_phys_bank_t *phys_banks; + e2k_phys_bank_t *cur_phys_bank; + short prev_bank_ind; + short cur_bank_ind; + e2k_addr_t new_bank_start; + e2k_addr_t new_bank_end; + e2k_size_t new_bank_size; + e2k_addr_t node_start; + e2k_addr_t node_end; + + if (node_mem->banks_num == 0 && node_mem->first_bank >= 0 || + node_mem->banks_num != 0 && node_mem->first_bank < 0) { + BOOT_BUG("No physical banks on node #%d, but list of banks " + "is not empty or vice versa", + node); + } + if (node_mem->banks_num >= L_MAX_NODE_PHYS_BANKS) { + BOOT_BUG("Node #%d number of phys banks %d exceeds " + "permissible limit, ignored", + node, node_mem->banks_num); + return; + } + + new_bank_start = new_phys_bank->base_addr; + new_bank_size = new_phys_bank->pages_num << PAGE_SHIFT; + if (new_bank_size == 0) { + BOOT_BUG("Node #%d empty physical memory bank #%d " + "cannot be added", + node, new_bank_ind); + return; + } + new_bank_end = new_bank_start + new_bank_size; + DebugBank("Node #%d : should be added new bank #%d from 0x%lx " + "to 0x%lx\n", + node, new_bank_ind, new_bank_start, new_bank_end); + + prev_bank_ind = -1; + phys_banks = node_mem->banks; + for (cur_bank_ind = node_mem->first_bank; + cur_bank_ind >= 0; + cur_bank_ind = cur_phys_bank->next) { + e2k_addr_t cur_bank_start; + + cur_phys_bank = &phys_banks[cur_bank_ind]; + cur_bank_start = cur_phys_bank->base_addr; + if (cur_phys_bank->pages_num == 0) { + BOOT_BUG("Node #%d: empty physical memory bank #%d " + "cannot be at the node list", + node, cur_bank_ind); + return; + } + DebugBank("Node #%d bank cur #%d prev #%d from 0x%lx " + "new end 0x%lx\n", + node, cur_bank_ind, prev_bank_ind, + cur_bank_start, new_bank_end); + if (new_bank_end <= cur_bank_start) + /* new bank should be added before current */ + break; + prev_bank_ind = cur_bank_ind; + } + + if (node_mem->banks_num > 0) { + node_start = node_mem->start_pfn << PAGE_SHIFT; + node_end = node_start + (node_mem->pfns_num << PAGE_SHIFT); + } else { + node_start = -1UL; + node_end = 0; + } + DebugBank("Node #%d : before add bunk #%d start 0x%lx end 0x%lx " + "pfns 0x%lx, banks %d\n", + node, new_bank_ind, node_start, node_end, + node_mem->pfns_num, node_mem->banks_num); + if (prev_bank_ind < 0) { + /* new bank should be first entry in the list */ + + if (node_mem->first_bank < 0) { + /* it is first bank on the node */ + if (new_bank_start > node_start) { + BOOT_BUG("Node #%d : added bank #%d is first " + "on node, so its start 0x%lx should be " + "below node start 0x%lx\n", + node, new_bank_ind, + new_bank_start, node_start); + } + if (new_bank_end < node_end) { + BOOT_BUG("Node #%d : added bank #%d is first " + "on node, so its end 0x%lx should be " + "above node end 0x%lx\n", + node, new_bank_ind, + new_bank_end, node_end); + } + node_mem->start_pfn = new_bank_start >> PAGE_SHIFT; + node_mem->pfns_num = new_phys_bank->pages_num; + node_start = new_bank_start; + node_end = new_bank_end; + DebugBank("Node #%d : added bank #%d is first " + "on node, node is now from 0x%lx to 0x%lx " + "pfns 0x%lx\n", + node, new_bank_ind, node_start, node_end, + node_mem->pfns_num); + } else { + /* it is not last on the node, correcr only node */ + /* start and size */ + e2k_phys_bank_t *next_phys_bank; + + if (new_bank_end >= node_end) { + BOOT_BUG("Node #%d added bank #%d should be at " + "head of banks, but bank end 0x%lx " + "is above of node end 0x%lx\n", + node, new_bank_ind, + new_bank_end, node_end); + } + next_phys_bank = &node_mem->banks[node_mem->first_bank]; + node_mem->start_pfn = new_bank_start >> PAGE_SHIFT; + node_mem->pfns_num += + ((next_phys_bank->base_addr - + new_bank_start) >> PAGE_SHIFT); + node_start = new_bank_start; + DebugBank("Node #%d : added bunk #%d is at the " + "head of node, next bank #%d, node " + "start 0x%lx end 0x%lx pfns 0x%lx\n", + node, new_bank_ind, node_mem->first_bank, + node_start, node_end, node_mem->pfns_num); + } + /* insert bank at the node list of all banks */ + new_phys_bank->next = node_mem->first_bank; + node_mem->first_bank = new_bank_ind; + } else { + /* add new bank in the list after previous */ + e2k_phys_bank_t *prev_phys_bank; + + prev_phys_bank = &phys_banks[prev_bank_ind]; + + if (node_start >= new_bank_start) { + BOOT_BUG("Node #%d : added bank #%d from 0x%lx below " + "node start address 0x%lx\n", + node, new_bank_ind, new_bank_start, node_start); + } + if (prev_phys_bank->next < 0) { + /* added bank will be last bank on the node, */ + /* correct node end or size */ + if (node_end >= new_bank_end) { + BOOT_BUG("Node #%d : added bank #%d wiil be " + "last on node, so its end 0x%lx should " + "be above node end 0x%lx\n", + node, new_bank_ind, + new_bank_end, node_end); + } + node_mem->pfns_num += + ((new_bank_end - node_end) >> PAGE_SHIFT); + node_end = new_bank_end; + DebugBank("Node #%d : added bunk #%d is last " + "on node, previous bank #%d, node " + "start 0x%lx end 0x%lx pfns 0x%lx\n", + node, new_bank_ind, prev_bank_ind, + node_start, node_end, node_mem->pfns_num); + } else { + /* added bank is into midle of banks list, */ + /* so node start and end should not change */ + if (node_end <= new_bank_end) { + BOOT_BUG("Node #%d : added bank #%d is not " + "last on node, so its end 0x%lx should " + "be below node end 0x%lx\n", + node, new_bank_ind, + new_bank_end, node_end); + } + DebugBank("Node #%d : added bunk #%d is at midle " + "of bank list, previous bank #%d , next %d, " + "node start 0x%lx end 0x%lx pfns 0x%lx\n", + node, new_bank_ind, prev_bank_ind, + prev_phys_bank->next, + node_start, node_end, node_mem->pfns_num); + } + /* insert new bank at midle of the node list of banjs */ + new_phys_bank->next = prev_phys_bank->next; + prev_phys_bank->next = new_bank_ind; + } + node_mem->banks_num++; +} + +/* node bank management lock should be taken by caller */ +short __init_recv boot_init_new_phys_bank(int node, node_phys_mem_t *node_mem, + e2k_addr_t bank_start, e2k_size_t bank_size) +{ + e2k_phys_bank_t *new_phys_bank; + short new_bank_ind; + + new_bank_ind = boot_get_free_phys_bank(node, node_mem); + if (new_bank_ind < 0) { + BOOT_WARNING("Node #%d: could not find empty bank " + "entry to add one more physical memory bank", + node); + return new_bank_ind; + } + new_phys_bank = &node_mem->banks[new_bank_ind]; + new_phys_bank->base_addr = bank_start; + new_phys_bank->pages_num = bank_size >> PAGE_SHIFT; + atomic64_set(&new_phys_bank->free_pages_num, new_phys_bank->pages_num); + new_phys_bank->busy_areas = boot_vp_to_pp((e2k_busy_mem_t *) + new_phys_bank->busy_areas_prereserved); + new_phys_bank->busy_areas_num = 0; + new_phys_bank->first_area = -1; + + return new_bank_ind; +} + +/* node bank management lock should be taken by caller */ +short __init boot_create_new_phys_bank(int node, node_phys_mem_t *node_mem, + e2k_addr_t bank_start, e2k_size_t bank_size) +{ + e2k_phys_bank_t *new_phys_bank; + short new_bank_ind; + + new_bank_ind = boot_init_new_phys_bank(node, node_mem, + bank_start, bank_size); + if (new_bank_ind < 0) + /* could not find new empty bank */ + return new_bank_ind; + new_phys_bank = &node_mem->banks[new_bank_ind]; + + boot_add_new_phys_bank(node, node_mem, new_phys_bank, new_bank_ind); + + if (bank_start < boot_start_of_phys_memory) + boot_start_of_phys_memory = bank_start; + if (boot_end_of_phys_memory < bank_start + bank_size) + boot_end_of_phys_memory = bank_start + bank_size; + + return new_bank_ind; +} + +/* node bank management lock should be taken by caller */ +/* should return source bank index in the list of node banks */ +/* after deleting the bank from list its index should be -1 */ +/* as flag of free entry */ +static inline short __init +boot_delete_phys_bank(int node_id, node_phys_mem_t *node_mem, + short bank, e2k_phys_bank_t *phys_bank) +{ + e2k_addr_t bank_start, bank_end; + e2k_addr_t node_start_pfn, node_end_pfn; + e2k_addr_t node_start, node_end; + short prev_bank_ind; + + if (phys_bank->busy_areas_num != 0) { + BOOT_BUG("Node #%d bank #%d: could not be deleted because of " + "is not empty (%d entries) list of busy areas\n", + node_id, bank, phys_bank->busy_areas_num); + } + + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + DebugBank("node #%d bank #%d from 0x%lx to 0x%lx: should be " + "deleted fully\n", + node_id, bank, bank_start, bank_end); + + /* delete bank from list of all node banks */ + prev_bank_ind = boot_find_node_phys_bank(node_id, node_mem, bank); + node_start_pfn = node_mem->start_pfn; + node_end_pfn = node_start_pfn + node_mem->pfns_num; + node_start = node_start_pfn << PAGE_SHIFT; + node_end = node_end_pfn << PAGE_SHIFT; + + DebugBank("Node #%d : before delete bank #%d is from 0x%lx to 0x%lx, " + "pfns 0x%lx, banks num %d\n", + node_id, bank, node_start, node_end, + node_mem->pfns_num, node_mem->banks_num); + + if (prev_bank_ind < 0) { + /* the deleted bank or part is at the head of the list */ + if (node_mem->first_bank != bank) { + BOOT_BUG("Node #%d: head of list of node banks points " + "to bank #%d, but should point to #%d\n", + node_id, node_mem->first_bank, bank); + } + if (node_start != bank_start) { + BOOT_BUG("Node #%d : deleted bank #%d from 0x%lx " + "should start from node start 0x%lx\n", + node_id, bank, bank_start, node_start); + } + if (phys_bank->next < 0) { + /* it is last bank on the node */ + if (node_end != bank_end) { + BOOT_BUG("Node #%d : deleted bank #%d is last " + "on node, so its end 0x%lx should be " + "equal to node end 0x%lx\n", + node_id, bank, bank_end, node_end); + } + node_mem->pfns_num = 0; + node_start = -1UL; + node_end = 0; + } else { + /* it is not last on the node, correcr start and end */ + /* to next banks */ + e2k_phys_bank_t *next_phys_bank; + + next_phys_bank = &node_mem->banks[phys_bank->next]; + node_mem->start_pfn = + next_phys_bank->base_addr >> PAGE_SHIFT; + node_mem->pfns_num -= + ((next_phys_bank->base_addr - + node_start) >> PAGE_SHIFT); + node_start = next_phys_bank->base_addr; + DebugBank("Node #%d : deleted bunk #%d is at the " + "head of node, new head is now bank #%d, node " + "start 0x%lx end 0x%lx pfns 0x%lx\n", + node_id, bank, phys_bank->next, + node_start, node_end, node_mem->pfns_num); + } + node_mem->first_bank = phys_bank->next; + } else { + /* the deleted bank is after current bank */ + e2k_phys_bank_t *prev_phys_bank; + + prev_phys_bank = &node_mem->banks[prev_bank_ind]; + if (node_mem->first_bank == bank) { + BOOT_BUG("Node #%d: head of list of node banks points " + "to bank #%d, but should point to other\n", + node_id, node_mem->first_bank); + } + if (node_start >= bank_start) { + BOOT_BUG("Node #%d : deleted bank #%d from 0x%lx below " + "node start address 0x%lx\n", + node_id, bank, bank_start, node_start); + } + if (phys_bank->next < 0) { + /* deleted bank is last bank on the node, */ + /* correct node end to previous bank */ + if (node_end != bank_end) { + BOOT_BUG("Node #%d : deleted bank #%d is last " + "on node, so its end 0x%lx should be " + "equal to node end 0x%lx\n", + node_id, bank, bank_end, node_end); + } + node_mem->pfns_num = + ((prev_phys_bank->base_addr - + node_start) >> PAGE_SHIFT) + + prev_phys_bank->pages_num; + node_end = prev_phys_bank->base_addr + + (prev_phys_bank->pages_num << + PAGE_SHIFT); + DebugBank("Node #%d : deleted bunk #%d is last " + "on node, new last is now bank #%d, node " + "start 0x%lx end 0x%lx pfns 0x%lx\n", + node_id, bank, prev_bank_ind, + node_start, node_end, node_mem->pfns_num); + } else { + /* deleted area is into midle of banks list, */ + /* so node start and end should not change */ + if (node_end <= bank_end) { + BOOT_BUG("Node #%d : deleted bank #%d is not " + "last on node, so its end 0x%lx should " + "be below node end 0x%lx\n", + node_id, bank, bank_end, node_end); + } + DebugBank("Node #%d : deleted bunk #%d is at midle " + "of bank list, previous bank #%d , next %d, " + "node start 0x%lx end 0x%lx pfns 0x%lx\n", + node_id, bank, prev_bank_ind, phys_bank->next, + node_start, node_end, node_mem->pfns_num); + } + prev_phys_bank->next = phys_bank->next; + } + node_mem->banks_num--; + phys_bank->next = -1; + phys_bank->pages_num = 0; + + DebugBank("Node #%d : after delete bank #%d is from 0x%lx to 0x%lx, " + "pfns 0x%lx, banks num %d\n", + node_id, bank, node_start, node_end, + node_mem->pfns_num, node_mem->banks_num); + + return -1; /* the bank is deleted from list */ +} + +/* should return source low bank index, which can be updated while truncating */ +/* but now number should not be changed */ +short __init_recv boot_delete_phys_bank_part(int node_id, + node_phys_mem_t *node_mem, short bank, + e2k_phys_bank_t *phys_bank, e2k_addr_t from_addr, + e2k_addr_t to_addr) +{ + e2k_addr_t bank_start, bank_end; + e2k_addr_t node_start, node_end; + e2k_size_t pages_del; + e2k_busy_mem_t *busy_area; + short prev_bank_ind; + short area; + + if (from_addr >= to_addr) { + BOOT_BUG("Node #%d bank #%d: area to truncate from 0x%lx " + "is above or equal to 0x%lx\n", + node_id, bank, from_addr, to_addr); + } + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + if (from_addr != bank_start) { + BOOT_BUG("Node #%d bank #%d: area to truncate from 0x%lx " + "to 0x%lx is not started from bank base 0x%lx\n", + node_id, bank, from_addr, to_addr, bank_start); + } + pages_del = (to_addr - from_addr) >> PAGE_SHIFT; + if (pages_del > phys_bank->pages_num) { + BOOT_BUG("Node #%d bank #%d: area to truncate from 0x%lx " + "to 0x%lx is out of bank from 0x%lx to 0x%lx\n", + node_id, bank, from_addr, to_addr, + bank_start, bank_end); + } + if (pages_del == phys_bank->pages_num) + /* bank should be deleted fully */ + return boot_delete_phys_bank(node_id, + node_mem, bank, phys_bank); + + DebugBank("node #%d bank #%d from 0x%lx to 0x%lx: should be " + "truncated partially from 0x%lx to 0x%lx\n", + node_id, bank, bank_start, bank_end, from_addr, to_addr); + + /* loop on busy areas of bank to update start page of the area */ + /* because of bank base address change */ + for (area = phys_bank->first_area; + area >= 0; + area = busy_area->next) { + busy_area = &phys_bank->busy_areas[area]; + DebugBank("Node #%d bank #%d busy area #%d from 0x%lx " + "to 0x%lx\n", + node_id, bank, area, + bank_start + (busy_area->start_page << PAGE_SHIFT), + bank_start + ((busy_area->start_page + + busy_area->pages_num) << PAGE_SHIFT)); + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d bank #%d : empty physical memory " + "busy area #%d cannot be in the list", + node_id, bank, area); + continue; + } + if (busy_area->start_page < pages_del) { + BOOT_BUG("Node #%d bank #%d busy area #%d from 0x%lx " + "to 0x%lx cannot intersect truncated part " + "from 0x%lx to 0x%lx\n", + node_id, bank, area, + bank_start + + (busy_area->start_page << PAGE_SHIFT), + bank_start + + ((busy_area->start_page + + busy_area->pages_num) << + PAGE_SHIFT), + from_addr, to_addr); + continue; + } + busy_area->start_page -= pages_del; + DebugBank("Node #%d bank #%d updated busy area #%d is now " + "from 0x%lx to 0x%lx\n", + node_id, bank, area, + to_addr + (busy_area->start_page << PAGE_SHIFT), + to_addr + ((busy_area->start_page + + busy_area->pages_num) << PAGE_SHIFT)); + } + + /* now truncate begining part of bank and correct bank & node */ + /* start, end or size */ + prev_bank_ind = boot_find_node_phys_bank(node_id, node_mem, bank); + node_start = node_mem->start_pfn << PAGE_SHIFT; + node_end = (node_mem->start_pfn + node_mem->pfns_num) << PAGE_SHIFT; + if (prev_bank_ind < 0) { + /* the truncated bank is at the head of the list */ + if (node_mem->first_bank != bank) { + BOOT_BUG("Node #%d: head of list of node banks points " + "to bank #%d, but should point to #%d\n", + node_id, node_mem->first_bank, bank); + } + if (node_start != bank_start) { + BOOT_BUG("Node #%d : truncated bank #%d from 0x%lx " + "should start from node start 0x%lx\n", + node_id, bank, bank_start, node_start); + } + if (phys_bank->next < 0) { + /* it is last bank on the node */ + if (node_end != bank_end) { + BOOT_BUG("Node #%d : truncated bank #%d is " + "last on node, so its end 0x%lx should " + "be equal to node end 0x%lx\n", + node_id, bank, bank_end, node_end); + } + } + /* correcr start & size of node on truncated size */ + /* to point to new bank start and tructated pages number */ + node_mem->start_pfn += pages_del; + node_mem->pfns_num -= pages_del; + node_start = to_addr; + } else { + /* the truncated bank is after current bank */ + /* so truncated pages transform to hole and node start & end */ + /* are not changed */ + if (node_mem->first_bank == bank) { + BOOT_BUG("Node #%d: head of list of node banks points " + "to bank #%d, but should point to other\n", + node_id, node_mem->first_bank); + } + if (node_start >= bank_start) { + BOOT_BUG("Node #%d : truncated bank #%d from 0x%lx " + "below node start address 0x%lx\n", + node_id, bank, bank_start, node_start); + } + if (phys_bank->next < 0) { + /* truncated bank is last bank on the node */ + if (node_end != bank_end) { + BOOT_BUG("Node #%d : truncated bank #%d is " + "last on node, so its end 0x%lx should " + "be equal to node end 0x%lx\n", + node_id, bank, bank_end, node_end); + } + } else { + /* truncated area is into midle of banks list, */ + if (node_end <= bank_end) { + BOOT_BUG("Node #%d : truncated bank #%d is not " + "last on node, so its end 0x%lx should " + "be below node end 0x%lx\n", + node_id, bank, bank_end, node_end); + } + } + } + + /* correct bank base address and size on truncated pages */ + /* truncated area can not be reserved, so number of free pages */ + /* should be decremented on truncated pages number */ + phys_bank->base_addr = to_addr; + phys_bank->pages_num -= pages_del; + atomic64_sub(pages_del, &phys_bank->free_pages_num); + + DebugBank("node #%d truncated bank #%d is now from 0x%lx to 0x%lx, " + "free pages 0x%lx\n", + node_id, bank, phys_bank->base_addr, + phys_bank->base_addr + (phys_bank->pages_num << PAGE_SHIFT), + atomic64_read(&phys_bank->free_pages_num)); + + DebugBank("Node #%d : after truncating bank #%d is from 0x%lx " + "to 0x%lx, pfns with holes 0x%lx\n", + node_id, bank, node_start, node_end, node_mem->pfns_num); + + return bank; +} + +/* should return source low bank index, which can be updated while creation */ +/* but now number should not be changed */ +short __init boot_create_phys_bank_part(int node_id, node_phys_mem_t *node_mem, + short bank, e2k_phys_bank_t *phys_bank, + e2k_addr_t from_addr, e2k_addr_t to_addr) +{ + boot_phys_bank_t *node_banks; + e2k_phys_bank_t *new_phys_bank; + e2k_addr_t bank_start, bank_end; + e2k_size_t pages; + e2k_busy_mem_t *busy_area; + short new_bank, old_bank; + short area, next_area; + + if (from_addr >= to_addr) { + BOOT_BUG("Node #%d bank #%d: area to create from 0x%lx " + "is above or equal to 0x%lx\n", + node_id, bank, from_addr, to_addr); + } + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + if (from_addr != bank_start) { + BOOT_BUG("Node #%d bank #%d: area to create from 0x%lx " + "to 0x%lx is not started from bank base 0x%lx\n", + node_id, bank, from_addr, to_addr, bank_start); + } + pages = (to_addr - from_addr) >> PAGE_SHIFT; + if (pages > phys_bank->pages_num) { + BOOT_BUG("Node #%d bank #%d: area to create from 0x%lx " + "to 0x%lx is out of bank from 0x%lx to 0x%lx\n", + node_id, bank, from_addr, to_addr, + bank_start, bank_end); + } + if (pages == phys_bank->pages_num) + /* new bank should be created from source bank fully */ + /* and source bank will be empty, so it does not need */ + /* devide source bank on two part, create real new bank, */ + /* the source bank is now as new, the old source bank */ + /* is now empty and as deleted */ + return -1; /* no longer source bank */ + + new_bank = boot_init_new_phys_bank(node_id, node_mem, + from_addr, to_addr - from_addr); + if (new_bank < 0) { + BOOT_WARNING("Node #%d: could not create new bank from 0x%lx " + "to 0x%lx for unremapped area of bank #%d", + node_id, from_addr, to_addr, bank); + return new_bank; + } + node_banks = node_mem->banks; + new_phys_bank = &node_banks[new_bank]; + DebugBank("Node #%d: created new bank #%d from 0x%lx to 0x%lx " + "for unremapped area of bank #%d\n", + node_id, new_bank, from_addr, to_addr, bank); + + /* loop on unremapable areas of memory bank to remap them */ + /* to created new memory bank */ + for (area = phys_bank->first_area; area >= 0; area = next_area) { + e2k_size_t start, end; + + busy_area = &phys_bank->busy_areas[area]; + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d low bank #%d empty physical memory " + "busy area #%d cannot be in the list", + node_id, bank, area); + } + start = busy_area->start_page; + end = start + busy_area->pages_num; + if (from_addr + (start << PAGE_SHIFT) >= to_addr) { + DebugBank("Node #%d bank #%d current area #%d " + "from 0x%lx to 0x%lx is out of unremable " + "range\n", + node_id, bank, area, + from_addr + (start << PAGE_SHIFT), + from_addr + (end << PAGE_SHIFT)); + break; + } + DebugBank("Node #%d bank #%d current unremapable area #%d " + "from 0x%lx to 0x%lx\n", + node_id, bank, area, + from_addr + (start << PAGE_SHIFT), + from_addr + (end << PAGE_SHIFT)); + + /* remapping of some area should delete it from list of areas */ + /* so save reference to next entry of the list before */ + next_area = busy_area->next; + + if (start >= phys_bank->pages_num || + end > phys_bank->pages_num) { + BOOT_BUG("Node #%d bank #%d area #%d start 0x%lx " + "or end 0x%lx is out of bank size 0x%lx\n", + node_id, bank, area, start, end, + phys_bank->pages_num); + } + + boot_rereserve_bank_area(node_id, node_mem, + bank, new_bank, area, busy_area); + } + + /* now old bank (or part of bank) can be deleted */ + old_bank = boot_delete_phys_bank_part(node_id, node_mem, + bank, phys_bank, from_addr, to_addr); + if (old_bank < 0) { + BOOT_BUG("Node #%d low bank #%d could not be empty after " + "delete its part from 0x%lx to 0x%lx\n", + node_id, bank, from_addr, to_addr); + } + + /* insert new bank of unremapable low memory at the list */ + boot_add_new_phys_bank(node_id, node_mem, new_phys_bank, new_bank); + + return old_bank; +} + +static int __init +boot_biosx86_probe_node_memory(boot_info_t *bootblock, int node, + node_phys_mem_t *node_mem, e2k_size_t phys_memory_size, + int *node_banks_ind_ex_p, e2k_size_t *bank_memory_size_p) +{ + int node_banks_ind = 0; + bank_info_t *bank_info; + e2k_phys_bank_t *phys_banks = node_mem->banks; + e2k_size_t bank_memory_size = *bank_memory_size_p; + int bank_num = 0; + int bank = 0; + + node_mem->first_bank = -1; /* initial state: empty list */ + + while (bank_info = boot_get_next_node_bank(bootblock, node, + &node_banks_ind, node_banks_ind_ex_p), + bank_info != NULL) { + e2k_size_t bank_size; + e2k_addr_t bank_start; + e2k_phys_bank_t *new_phys_bank; + short new_bank_ind; + + if (bank >= L_MAX_NODE_PHYS_BANKS) { + BOOT_WARNING("Node #%d number of phys banks %d exceeds " + "permissible limit, ignored", + node, bank); + bank++; + continue; + } + + if ((phys_memory_size + bank_memory_size) >= boot_mem_limit) { + BOOT_WARNING("Node #%d bank #%d: total memory " + "size 0x%lx exceeds permissible limit 0x%lx, " + "ignored", + node, bank, + phys_memory_size + bank_memory_size, + boot_mem_limit); + bank++; + continue; + } + if (bank_memory_size >= boot_node_mem_limit) { + BOOT_WARNING("Node #%d bank #%d memory size 0x%lx " + "exceeds permissible node limit 0x%lx, " + "ignored", + node, bank, + bank_memory_size, boot_node_mem_limit); + bank++; + continue; + } + + bank_start = bank_info->address; + bank_size = bank_info->size; + + if (bank_size == 0) { + BOOT_BUG("Node #%d empty bank #%d", node, bank); + bank_info = NULL; + break; + } + + if ((bank_size & (PAGE_SIZE - 1)) != 0) { + BOOT_BUG("Node #%d: phys bank #%d size 0x%lx " + "is not page aligned", + node, bank, bank_size); + bank_size &= ~(PAGE_SIZE - 1); + } + + if ((bank_start & (PAGE_SIZE - 1)) != 0) { + BOOT_BUG("Node #%d: phys bank #%d base address 0x%lx " + "is not page aligned", + node, bank, bank_start); + bank_size += (bank_start & (PAGE_SIZE - 1)); + bank_start &= ~(PAGE_SIZE - 1); + } + + if ((phys_memory_size + bank_memory_size + bank_size) > + boot_mem_limit) { + bank_size -= phys_memory_size + bank_memory_size + + bank_size - boot_mem_limit; + boot_printk("Node #%d: phys bank #%d size is reduced " + "to 0x%lx bytes\n", + node, bank, bank_size); + } + + if ((bank_memory_size + bank_size) > boot_node_mem_limit) { + bank_size -= bank_memory_size + bank_size - + boot_node_mem_limit; + boot_printk("Node #%d: phys bank #%d size is reduced " + "to 0x%lx bytes\n", + node, bank, bank_size); + } + + new_bank_ind = boot_create_new_phys_bank(node, node_mem, + bank_start, bank_size); + if (new_bank_ind < 0) { + BOOT_WARNING("Node #%d: could not find empty bank " + "entry to add one more physical memory bank", + node); + break; + } + new_phys_bank = &phys_banks[new_bank_ind]; + bank_num++; + bank_memory_size += bank_size; + boot_printk("Node #%d: phys bank #%d (list index %d) " + "address 0x%lx, size 0x%lx pages (0x%lx bytes)\n", + node, bank, new_bank_ind, + new_phys_bank->base_addr, new_phys_bank->pages_num, + new_phys_bank->pages_num * PAGE_SIZE); + bank++; + } + + *bank_memory_size_p = bank_memory_size; + + return bank_num; +} + +/* + * Probe physical memory configuration of the machine and fill the array of + * structures of physical memory banks 'e2k_phys_bank'. + * It is better to merge contiguous memory banks for allocation goals. + * Base address of a bank should be page aligned. + */ + +int __init +boot_biosx86_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + node_phys_mem_t *node_mem = nodes_phys_mem; + int nodes_banks_ind_ex = 0; + unsigned long nodes_map = 0; + int nodes_num = 0; + unsigned long node_mask = 0x1UL; + int boot_bank_num; + int bank_num = 0; + int node; + e2k_size_t phys_memory_size = 0; + +#ifndef CONFIG_SMP + boot_phys_nodes_num = 1; + boot_phys_nodes_map = 0x1; +#endif /* CONFIG_SMP */ + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + bank_info_t *node_bank; + e2k_size_t bank_memory_size = 0; + int node_bank_num = 0; + + if (phys_memory_size >= boot_mem_limit) + break; + + node_bank = boot_has_node_banks_info(bootblock, node); + if (!(boot_phys_nodes_map & node_mask) && + BOOT_HAS_MACHINE_L_SIC) { + if (node_bank != NULL) { + BOOT_WARNING("Node #%d is not online but " + "has not empty memory bank " + "address 0x%lx, size 0x%lx, ignored", + node, node_bank->address, + node_bank->size); + } + goto next_node; + } + if (node_bank == NULL) + goto next_node; /* node has not memory */ + if ((!BOOT_HAS_MACHINE_E2K_FULL_SIC) && node != 0) { + BOOT_WARNING("Machine can have only one node #0, " + "but memory node #%d has not empty phys bank " + "address 0x%lx, size 0x%lx, ignored", + node, node_bank->address, node_bank->size); + goto next_node; + } + + nodes_num++; + nodes_map |= node_mask; + + node_bank_num = boot_biosx86_probe_node_memory(bootblock, node, + node_mem, phys_memory_size, + &nodes_banks_ind_ex, + &bank_memory_size); + + phys_memory_size += bank_memory_size; + bank_num += node_bank_num; + boot_printk("Node #%d: banks num %d, first bank index %d " + "start pfn 0x%lx, size 0x%lx pfns\n", + node, + node_mem->banks_num, node_mem->first_bank, + node_mem->start_pfn, node_mem->pfns_num); + +next_node: + boot_printk("Node #%d: phys memory total size is %d Mgb\n", + node, bank_memory_size / (1024 * 1024)); + node_mem++; + node_mask <<= 1; + } + + boot_bank_num = bootblock->num_of_banks; + + if (boot_mem_limit != -1UL && boot_node_mem_limit != -1UL && + boot_bank_num != 0 && boot_bank_num != bank_num) { + BOOT_WARNING("Number of banks of physical memory passed " + "by boot loader %d is not the same as banks " + "at boot_info structure %d", + boot_bank_num, bank_num); + } + if (nodes_num == 0) { + BOOT_BUG("Empty online nodes map passed by boot loader " + "at boot_info structure"); + } + if (boot_phys_nodes_map && ((boot_phys_nodes_map & nodes_map) + != nodes_map)) { + BOOT_BUG("Calculated map of nodes with memory 0x%lx " + "contains node(s) out of total nodes map 0x%lx", + nodes_map, boot_phys_nodes_map); + } + if (boot_phys_nodes_map & ~((1 << L_MAX_MEM_NUMNODES) - 1)) { + BOOT_WARNING("Probably some nodes 0x%lx out of memory " + "max nodes range 0x%lx contain memory, " + "but cannot be accounted", + boot_phys_nodes_map, (1 << L_MAX_MEM_NUMNODES) - 1); + } + + boot_phys_mem_nodes_num = nodes_num; + boot_phys_mem_nodes_map = nodes_map; + boot_totalram_real_pages = phys_memory_size / PAGE_SIZE; + boot_printk("Phys memory total size is %d Mgb\n", + phys_memory_size / (1024 * 1024)); + return bank_num; +} + +static inline int __init +boot_romloader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + return boot_biosx86_probe_memory(nodes_phys_mem, bootblock); +} + +int __init +boot_native_loader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + int bank_num = 0; + + if (bootblock->signature == ROMLOADER_SIGNATURE) { + bank_num = boot_romloader_probe_memory(nodes_phys_mem, + bootblock); + } else if (bootblock->signature == X86BOOT_SIGNATURE) { + bank_num = boot_biosx86_probe_memory(nodes_phys_mem, bootblock); + } else { + BOOT_BUG_POINT("boot_native_loader_probe_memory"); + BOOT_BUG("Unknown type of Boot information structure"); + } + return bank_num; +} + +static void __init +boot_probe_memory(boot_info_t *boot_info) +{ + node_phys_mem_t *all_phys_banks = NULL; + int bank_num = 0; + + all_phys_banks = boot_vp_to_pp((node_phys_mem_t *)nodes_phys_mem); + boot_fast_memset(all_phys_banks, 0x00, sizeof(*all_phys_banks)); + + bank_num = boot_loader_probe_memory(all_phys_banks, boot_info); +} + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + +static bank_info_t * __init_recv +boot_find_low_pa_bank(e2k_addr_t lo_pa, + boot_info_t *bootblock, int node, int *node_banks_ind_ex_p) +{ + bank_info_t *bank_info; + int node_banks_ind = 0; + + while (bank_info = boot_get_next_node_bank(bootblock, node, + &node_banks_ind, node_banks_ind_ex_p), + bank_info != NULL) { + e2k_addr_t bank_start; + e2k_addr_t bank_end; + + bank_start = bank_info->address; + bank_end = bank_start + bank_info->size; + + if (lo_pa >= bank_start && lo_pa < bank_end) + /* low address bank is found */ + return bank_info; + + } + return NULL; +} + +static bank_info_t * __init_recv +boot_find_high_pa_bank(bank_info_t *lo_bank_info, bool above, /* else below */ + boot_info_t *bootblock, int node, int node_banks_ind_ex) +{ + bank_info_t *bank_info; + int banks_ind = 0; + int banks_ind_ex = node_banks_ind_ex; + e2k_addr_t lo_start = lo_bank_info->address; + e2k_addr_t lo_end = lo_start + lo_bank_info->size; + + while (bank_info = boot_get_next_node_bank(bootblock, node, + &banks_ind, &banks_ind_ex), + bank_info != NULL) { + e2k_addr_t bank_start, bank_end; + e2k_addr_t lo_addr, hi_addr; + + bank_start = bank_info->address; + bank_end = bank_start + bank_info->size; + if (is_addr_from_low_memory(bank_end - 1)) + /* it is low memory bank, ignore */ + continue; + + if (above) { + /* contiguity should be from low end to high start */ + hi_addr = bank_start; + lo_addr = lo_end; + } else { + /* contiguity should be from high end to low start */ + hi_addr = bank_end; + lo_addr = lo_start; + } + lo_addr |= (hi_addr & ~LOW_PHYS_MEM_MASK); + if (lo_addr == hi_addr) + /* high address bank is found */ + return bank_info; + } + return NULL; +} +static inline bank_info_t * __init_recv +boot_find_above_high_pa_bank(bank_info_t *lo_bank_info, + boot_info_t *bootblock, int node, int node_banks_ind_ex) +{ + return boot_find_high_pa_bank(lo_bank_info, true, /* i.e. above */ + bootblock, node, node_banks_ind_ex); +} +static inline bank_info_t * __init_recv +boot_find_below_high_pa_bank(bank_info_t *lo_bank_info, + boot_info_t *bootblock, int node, int node_banks_ind_ex) +{ + return boot_find_high_pa_bank(lo_bank_info, false, /* i.e. below */ + bootblock, node, node_banks_ind_ex); +} + +static e2k_addr_t __init_recv +boot_node_pa_to_high_pa(e2k_addr_t pa, boot_info_t *bootblock) +{ + int nodes_banks_ind_ex = 0; + bank_info_t *lo_bank_info = NULL; + int node_lo_banks_ind_ex; + bank_info_t *below_hi_bank_info; + bank_info_t *above_hi_bank_info; + e2k_addr_t lo_pa_offset; + e2k_addr_t hi_pa; + int node; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + bank_info_t *node_bank; + + node_bank = boot_has_node_banks_info(bootblock, node); + if (node_bank == NULL) + continue; /* node has not memory */ + + node_lo_banks_ind_ex = nodes_banks_ind_ex; + lo_bank_info = boot_find_low_pa_bank(pa, bootblock, node, + &nodes_banks_ind_ex); + if (lo_bank_info != NULL) + /* low address bank is found */ + break; + } + if (lo_bank_info == NULL) + /* could not find low memory bank for source low address */ + return -1; + lo_pa_offset = pa - lo_bank_info->address; + + below_hi_bank_info = boot_find_below_high_pa_bank(lo_bank_info, + bootblock, node, node_lo_banks_ind_ex); + above_hi_bank_info = boot_find_above_high_pa_bank(lo_bank_info, + bootblock, node, node_lo_banks_ind_ex); + if (below_hi_bank_info == NULL && above_hi_bank_info == NULL) + /* could not find high memory bank from which low area */ + /* was cut out */ + return -1; + if (below_hi_bank_info == NULL) { + /* low area was cut out from the very beginning of high bank */ + hi_pa = above_hi_bank_info->address - lo_bank_info->size + + lo_pa_offset; + if ((hi_pa - lo_pa_offset) + lo_bank_info->size + + above_hi_bank_info->size != + above_hi_bank_info->address + + above_hi_bank_info->size) { + BOOT_WARNING("high address calculated from begining " + "of the above area 0x%lx + low area size 0x%lx " + "+ high area size is not equal to address " + "of bank end 0x%lx\n", + hi_pa - lo_pa_offset, lo_bank_info->size, + above_hi_bank_info->size, + above_hi_bank_info->address + + above_hi_bank_info->size); + return -1; + } + return hi_pa; + } + if (above_hi_bank_info == NULL) { + /* low area was cut out from the very ending of high bank */ + hi_pa = below_hi_bank_info->address + below_hi_bank_info->size + + lo_pa_offset; + if ((hi_pa - lo_pa_offset) + lo_bank_info->size != + below_hi_bank_info->address + + below_hi_bank_info->size + lo_bank_info->size) { + BOOT_WARNING("high address calculated from ending " + "of the below area 0x%lx + low area size 0x%lx " + "is not equal to address " + "of bank merged end 0x%lx\n", + hi_pa - lo_pa_offset, lo_bank_info->size, + below_hi_bank_info->address + + below_hi_bank_info->size + + lo_bank_info->size); + return -1; + } + return hi_pa; + } + if (below_hi_bank_info == above_hi_bank_info) { + /* below and above banks are the same bank, */ + /* it can be if start of low area is aligned to the bank end */ + /* so low area was cut out from the beginning of the bank */ + hi_pa = above_hi_bank_info->address - lo_bank_info->size + + lo_pa_offset; + if ((hi_pa - lo_pa_offset) + lo_bank_info->size + + above_hi_bank_info->size != + above_hi_bank_info->address + + above_hi_bank_info->size) { + BOOT_WARNING("high address calculated from begining " + "of the same area 0x%lx + low area size 0x%lx " + "+ high area size is not equal to address " + "of bank end 0x%lx\n", + hi_pa - lo_pa_offset, lo_bank_info->size, + above_hi_bank_info->size, + above_hi_bank_info->address + + above_hi_bank_info->size); + return -1; + } + return hi_pa; + } + + /* low area was cut out from the middle of high bank */ + /* (from ending of below and to beginning of above) */ + hi_pa = below_hi_bank_info->address + below_hi_bank_info->size + + lo_pa_offset; + if (hi_pa != above_hi_bank_info->address - + lo_bank_info->size + lo_pa_offset) { + BOOT_WARNING("high address calculated from ending of below " + "area 0x%lx is not equal 0x%lx : address calculated " + "from beginning of above area\n", + hi_pa, above_hi_bank_info->address - + lo_bank_info->size + lo_pa_offset); + return -1; + } + return hi_pa; +} + +void * __init_recv boot_pa_to_high_pa(void *pa, boot_info_t *bootblock) +{ + e2k_addr_t lo_pa = (e2k_addr_t)pa; + e2k_addr_t hi_pa; + + if (likely(is_addr_from_high_memory(lo_pa))) { + /* address is already from high area */ + DebugLoHi("physical address 0x%lx is already from high " + "addresses range\n", + lo_pa); + return pa; + } + if (BOOT_LOW_MEMORY_ENABLED()) { + /* conversion is disabled. return source address */ + DebugLoHi("physical address 0x%lx conversion is disabled\n", + lo_pa); + return pa; + } + hi_pa = boot_node_pa_to_high_pa(lo_pa, bootblock); + if (hi_pa == (e2k_addr_t)-1) { + if (boot_has_high_memory(bootblock)) { + BOOT_WARNING("could not convert low physical " + "address 0x%lx to equivalent from high range", + lo_pa); + } + return pa; + } else { + DebugLoHi("low physical address 0x%lx is converted " + "to 0x%lx from high addresses range\n", + lo_pa, hi_pa); + } + return (void *)hi_pa; +} + +bool __init boot_has_lo_bank_remap_to_hi(boot_phys_bank_t *phys_bank, + boot_info_t *boot_info) +{ + e2k_addr_t bank_start, bank_end; + e2k_addr_t bank_start_hi, bank_end_hi; + + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + bank_start_hi = (e2k_addr_t)boot_pa_to_high_pa((void *)bank_start, + boot_info); + bank_end_hi = (e2k_addr_t)boot_pa_end_to_high((void *)bank_end, + boot_info); + if (bank_start_hi == bank_start || bank_end_hi == bank_end) + return false; + return true; +} + +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + +e2k_size_t __init +boot_native_get_bootblock_size(boot_info_t *bblock) +{ + e2k_size_t area_size = 0; + + if (bblock->signature == ROMLOADER_SIGNATURE) { + area_size = sizeof(bootblock_struct_t); + } else if (bblock->signature == X86BOOT_SIGNATURE) { + area_size = sizeof(bootblock_struct_t); + } else { + BOOT_BUG_POINT("boot_native_get_bootblock_size"); + BOOT_BUG("Unknown type of Boot information structure"); + } + return area_size; +} + +static void __init +boot_reserve_0_phys_page(bool bsp, boot_info_t *boot_info) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + + if (BOOT_IS_BSP(bsp)) { + area_base = 0; + area_size = PAGE_SIZE; + ret = boot_reserve_physmem(area_base, area_size, + hw_reserved_mem_type, + BOOT_NOT_IGNORE_BUSY_BANK | + BOOT_IGNORE_BANK_NOT_FOUND); + if (ret != 0) { + BOOT_BUG("Could not reserve 0-page area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, PAGE_SIZE); + } + boot_fast_memset((void *)0, 0x00, PAGE_SIZE); + boot_printk("The 0-page reserved area: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, PAGE_SIZE); + } +} + +void __init +boot_reserve_kernel_image(bool bsp, boot_info_t *boot_info) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + oscud_struct_t OSCUD = { { {0} }, { {0} } }; + osgd_struct_t OSGD = { { {0} }, { {0} } }; + int ret; + + /* + * Reserve kernel image 'text/data/bss' segments. + * 'OSCUD' & 'OSGD' register-pointers describe these areas. + * 'text' and 'data/bss' segments can intersect or one can include + * other. + */ + + if (BOOT_IS_BSP(bsp)) { + boot_read_OSCUD_reg(&OSCUD); + area_base = OSCUD.OSCUD_base; + area_size = OSCUD.OSCUD_size; + ret = boot_reserve_physmem(area_base, area_size, + kernel_image_mem_type, + BOOT_NOT_IGNORE_BUSY_BANK | + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve kernel 'text' segment: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, + BOOT_E2K_KERNEL_PAGE_SIZE); + } + boot_text_phys_base = area_base; + boot_text_size = area_size; + boot_printk("The kernel 'text' segment: base 0x%lx " + "size 0x%lx page size 0x%x\n", + boot_text_phys_base, boot_text_size, + BOOT_E2K_KERNEL_PAGE_SIZE); + + area_base = (e2k_addr_t)boot_vp_to_pp( + &__start_ro_after_init); + area_size = (e2k_addr_t)__end_ro_after_init - + (e2k_addr_t)__start_ro_after_init; + area_size = _PAGE_ALIGN_DOWN(area_size, PAGE_SIZE); + if (area_size != 0) { + ret = boot_reserve_physmem(area_base, area_size, + kernel_image_mem_type, + BOOT_IGNORE_BUSY_BANK | + BOOT_CAN_BE_INTERSECTIONS); + if (ret) { + BOOT_BUG("Could not reserve kernel " + "'.data.ro_after_init' segment: " + "base addr 0x%lx size 0x%lx " + "page size 0x%x", + area_base, area_size, PAGE_SIZE); + } + boot_printk("The kernel '.data.ro_after_init' segment: " + "base 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, PAGE_SIZE); + } else { + boot_printk("The kernel '.data.ro_after_init' segment " + "is empty\n"); + } + +#ifndef CONFIG_NUMA + area_base = (u64) boot_vp_to_pp((void *) empty_zero_page); + area_size = PAGE_SIZE; + ret = boot_reserve_physmem(area_base, area_size, + kernel_image_mem_type, BOOT_NOT_IGNORE_BUSY_BANK); + if (ret) { + BOOT_BUG("Could not reserve kernel 'zero_page' segment: base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, PAGE_SIZE); + } +#endif + + boot_read_OSGD_reg(&OSGD); + area_base = OSGD.OSGD_base; + area_size = OSGD.OSGD_size; + ret = boot_reserve_physmem(area_base, area_size, + kernel_image_mem_type, + BOOT_IGNORE_BUSY_BANK | + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve kernel 'data/bss' " + "segments: base addr 0x%lx size 0x%lx " + "page size 0x%x", + area_base, area_size, + BOOT_E2K_KERNEL_PAGE_SIZE); + } + boot_data_phys_base = area_base; + boot_data_size = area_size; + boot_printk("The kernel 'data/bss' segment: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, BOOT_E2K_KERNEL_PAGE_SIZE); + + area_base = (e2k_addr_t)boot_vp_to_pp(&__init_begin); + area_size = (e2k_addr_t) (__init_end - __init_begin); + ret = boot_reserve_physmem(area_base, area_size, + kernel_image_mem_type, + BOOT_IGNORE_BUSY_BANK | + BOOT_CAN_BE_INTERSECTIONS); + if (ret) { + BOOT_BUG("Could not reserve kernel 'init' segment: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, PAGE_SIZE); + } + boot_printk("The kernel 'init' segment: base 0x%lx " + "size 0x%lx page size 0x%x\n", + area_base, area_size, PAGE_SIZE); + } +} + +void __init boot_reserve_stacks(boot_info_t *boot_info) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + e2k_addr_t area_offset; + psp_struct_t PSP = { { {0} }, { {0} } }; + pcsp_struct_t PCSP = { { {0} }, { {0} } }; + e2k_usbr_t USBR = { {0} }; + usd_struct_t USD = { { {0} }, { {0} } }; + int ret; + + /* + * Reserve memory of boot-time hardware procedures stack (PS). + * 'PSP' register-pointer describes this area. + */ + + boot_read_PSP_reg(&PSP); + area_base = PSP.PSP_base; + area_size = PSP.PSP_size; + ret = boot_reserve_physmem(area_base, area_size, boot_loader_mem_type, + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve kernel boot-time procedure stack: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size + E2K_KERNEL_PS_PAGE_SIZE, + E2K_KERNEL_PS_PAGE_SIZE); + } + boot_boot_ps_phys_base = area_base; + boot_boot_ps_size = area_size; + boot_printk("The kernel boot-time procedures stack: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size + E2K_KERNEL_PS_PAGE_SIZE, + E2K_KERNEL_PS_PAGE_SIZE); + + /* + * Reserve memory of boot-time hardware procedure chain stack (PCS). + * 'PCSP' register-pointer describes this area. + */ + + boot_read_PCSP_reg(&PCSP); + area_base = PCSP.PCSP_base; + area_size = PCSP.PCSP_size; + ret = boot_reserve_physmem(area_base, area_size, boot_loader_mem_type, + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve kernel boot-time procedure chain " + "stack: base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size + E2K_KERNEL_PCS_PAGE_SIZE, + E2K_KERNEL_PCS_PAGE_SIZE); + } + boot_boot_pcs_phys_base = area_base; + boot_boot_pcs_size = area_size; + boot_printk("The kernel boot-time procedure chain stack: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size + E2K_KERNEL_PCS_PAGE_SIZE, + E2K_KERNEL_PCS_PAGE_SIZE); + + /* + * Reserve memory of boot-time kernel stack (user stack) (US). + * 'SBR + USD' registers describe this area. + */ + + USBR = boot_read_USBR_reg(); + area_base = USBR.USBR_base; + boot_read_USD_reg(&USD); + boot_printk("The kernel boot-time data stack: " + "USBR_base 0x%lx USD_base 0x%lx USD_size 0x%lx\n", + USBR.USBR_base, USD.USD_base, USD.USD_size); + area_size = area_base - USD.USD_base; + area_offset = USD.USD_size; + area_size += area_offset; + area_base -= area_size; + ret = boot_reserve_physmem(area_base, area_size, + boot_loader_mem_type, + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve kernel boot-time data stack: " + "base addr 0x%lx size 0x%lx USD offset 0x%lx page " + "size 0x%x", + area_base, area_size, area_offset, + E2K_KERNEL_US_PAGE_SIZE); + } + boot_boot_stack_phys_base = area_base; + boot_boot_stack_phys_offset = area_offset; + boot_boot_stack_size = area_size; + boot_printk("The kernel boot-time data stack: " + "base addr 0x%lx size 0x%lx USD offset 0x%lx page size 0x%x\n", + area_base, area_size, area_offset, E2K_KERNEL_US_PAGE_SIZE); +} + +static void __init +boot_reserve_low_io_mem(bool bsp) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + + /* + * Reserve memory of low VGAMEM area. + */ + + if (BOOT_IS_BSP(bsp)) { + area_base = VGA_VRAM_PHYS_BASE; /* VGA ... */ + area_size = VGA_VRAM_SIZE; + ret = boot_delete_physmem(area_base, area_size); + if (ret != 0) { + BOOT_BUG("Could not delete low VGAMEM area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, E2K_X86_HW_PAGE_SIZE); + } + boot_x86_hw_phys_base = area_base; + boot_x86_hw_size = area_size; + boot_printk("The low VGAMEM deleted area: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, E2K_X86_HW_PAGE_SIZE); + } +} + +void __init boot_reserve_bootblock(bool bsp, boot_info_t *boot_info) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + + /* + * Reserve boot information records. + */ + + if (BOOT_IS_BSP(bsp)) { + area_base = boot_bootinfo_phys_base; /* cmdline ... */ + area_size = 0; + area_size = boot_get_bootblock_size(boot_info); + ret = boot_reserve_physmem(area_base, area_size, + boot_loader_mem_type, + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve BOOTINFO area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, E2K_BOOTINFO_PAGE_SIZE); + } + + boot_bootinfo_phys_base = area_base; + boot_bootinfo_size = area_size; + + boot_printk("The BOOTINFO reserved area: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, E2K_BOOTINFO_PAGE_SIZE); + + /* + * Reserve the needed areas from boot information records. + */ + + boot_reserve_bootinfo_areas(boot_info); + } +} + +static void __init +boot_reserve_boot_memory(bool bsp, boot_info_t *boot_info) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int bank; + int ret; + + /* + * Reserve memory used by BOOT (e2k boot-loader) + */ + + if (BOOT_IS_BSP(bsp)) { + for (bank = 0; bank < boot_info->num_of_busy; bank++) { + bank_info_t *busy_area; + busy_area = &boot_info->busy[bank]; + area_base = busy_area->address; + area_size = busy_area->size; + ret = boot_reserve_physmem(area_base, + area_size, + boot_loader_mem_type, + BOOT_IGNORE_BUSY_BANK | + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) + BOOT_BUG("Could not reserve BIOS data " + "area #%d : base addr 0x%lx size 0x%lx " + "page size 0x%x", + bank, area_base, area_size, + PAGE_SIZE); + boot_printk("The BIOS data reserved area #%d : " + "base addr 0x%lx size 0x%lx page size " + "0x%x\n", + bank, area_base, area_size, PAGE_SIZE); + } + + /* FIXME: the BOOT should do this */ + if (boot_cpu_has(CPU_HWBUG_DMA_AT_APIC_ADDR)) { + area_base = APIC_DEFAULT_PHYS_BASE & 0x7fffFFFF; + area_size = PAGE_SIZE; + ret = boot_reserve_physmem(area_base, area_size, + hw_reserved_mem_type, + BOOT_ONLY_LOW_PHYS_MEM | + BOOT_IGNORE_AT_HIGH_PHYS_MEM | + BOOT_NOT_IGNORE_BUSY_BANK | + BOOT_IGNORE_BANK_NOT_FOUND); + if (ret != 0) { + BOOT_BUG_POINT("boot_reserve_boot_memory"); + BOOT_BUG("Could not reserve HW bug area : " + "base addr 0x%lx size 0x%lx page " + "size 0x%x", + area_base, area_size, PAGE_SIZE); + } + boot_printk("The HW bug reserved area : " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, PAGE_SIZE); + } + + } +} + +/* + * Reserve the memory used by boot-time initialization. + * All the used memory areas enumerate below. If a some new area will be used, + * then it should be added to the list of already known ones. + */ +void __init +boot_native_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + /* + * Reserve 0 phys page area for software fix of hardware bug: + * "page miss" for semi-speculative load for invalid address instead of + * diagnostic value because of "illegal page". + */ + boot_reserve_0_phys_page(bsp, boot_info); + + /* + * Reserve kernel image 'text/data/bss' segments. + * 'OSCUD' & 'OSGD' register-pointers describe these areas. + * 'text' and 'data/bss' segments can intersect or one can include + * other. + */ + boot_reserve_kernel_image(bsp, boot_info); + + /* + * Reserve memory of boot-time stacks. + */ + boot_reserve_stacks(boot_info); + + /* + * Reserve memory of PC reserved area (640K - 1M). + */ + boot_reserve_low_io_mem(bsp); + + /* + * SYNCHRONIZATION POINT #0.1 + * At this point all processors should complete reservation of + * themself used memory. + * Now boot loader busy area can be reserved, but only after + * this synchronization, because of this area can include all + * other before reserved areas (bug 101002) + */ + boot_sync_all_processors(); + + /* + * Reserve boot information records. + */ + boot_reserve_bootblock(bsp, boot_info); + + /* + * Reserve memory used by BOOT (e2k boot-loader) + */ + boot_reserve_boot_memory(bsp, boot_info); +} + +#ifdef CONFIG_L_IO_APIC +/* + * Reserve the needed memory from MP - tables + */ + +static void __init +boot_reserve_mp_table(boot_info_t *bblock) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + struct intel_mp_floating *mpf; + + if (bblock->mp_table_base == (e2k_addr_t)0UL) + return; + + /* + * MP floating specification table + */ + + area_base = bblock->mp_table_base; + area_size = E2K_MPT_PAGE_SIZE; + ret = boot_reserve_physmem(area_base, area_size, + boot_loader_mem_type, + BOOT_IGNORE_BUSY_BANK | + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve MP floating table area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, E2K_MPT_PAGE_SIZE); + } + boot_mpf_phys_base = area_base; + boot_mpf_size = area_size; + boot_printk("The MP floating table: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, E2K_MPT_PAGE_SIZE); + + mpf = (struct intel_mp_floating *)bblock->mp_table_base; + if (DEBUG_BOOT_MODE) { + int i; + for (i = 0; i < sizeof(struct intel_mp_floating) / 8; i++) { + do_boot_printk("mpf[%d] = 0x%lx\n", i, ((u64 *)mpf)[i]); + } + } + + /* + * MP configuration table + */ + + if (mpf->mpf_physptr != (e2k_addr_t)0UL) { + area_base = mpf->mpf_physptr; + area_size = E2K_MPT_PAGE_SIZE; + ret = boot_reserve_physmem(area_base, area_size, + boot_loader_mem_type, + BOOT_IGNORE_BUSY_BANK | + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve MP configuration table " + "area: base addr 0x%lx size 0x%lx " + "page size 0x%x", + area_base, area_size, E2K_MPT_PAGE_SIZE); + } + boot_mpc_phys_base = area_base; + boot_mpc_size = area_size; + boot_printk("The MP configuration table: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, E2K_MPT_PAGE_SIZE); + } else { + boot_mpc_size = 0; + boot_printk("The MP configuration table: is absent\n"); + } +} +#endif /* CONFIG_L_IO_APIC */ + +/* + * Reserve the needed memory from boot-info used by boot-time initialization. + * All the used memory areas from boot info enumerate below. + * If a some new area will be used, then it should be added to the list + * of already known ones. + */ + +static void __init +boot_reserve_bootinfo_areas(boot_info_t *boot_info) +{ +#ifdef CONFIG_BLK_DEV_INITRD + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_BLK_DEV_INITRD + + /* + * Reserve memory of initial ramdisk (initrd). + */ + + area_base = boot_info->ramdisk_base; /* INITRD_BASE and */ + area_size = boot_info->ramdisk_size; /* INITRD_SIZE */ + /* comes from Loader */ + if (area_size) { + ret = boot_reserve_physmem(area_base, area_size, + boot_loader_mem_type, + BOOT_CAN_BE_INTERSECTIONS); + if (ret != 0) { + BOOT_BUG("Could not reserve initial ramdisk area: " + "base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, E2K_INITRD_PAGE_SIZE); + } + boot_initrd_phys_base = area_base; + boot_initrd_size = area_size; + boot_printk("The initial ramdisk area: " + "base addr 0x%lx size 0x%lx page size 0x%x\n", + area_base, area_size, E2K_INITRD_PAGE_SIZE); + } else { + boot_printk("Initial ramdisk is empty\n"); + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* + * Reserv MP configuration table + */ + +#ifdef CONFIG_L_IO_APIC + if (boot_info->mp_table_base != (e2k_addr_t)0UL) + boot_reserve_mp_table(boot_info); +#endif /* CONFIG_L_IO_APIC */ +} + +#ifdef CONFIG_NUMA +static void __init +boot_node_set_dup_kernel(void *dup_start) +{ + e2k_addr_t data_offset; + + if (dup_start == (void *)-1) + BOOT_BUG("Invalid or was not allocated duplicated " + "kernel base\n"); + boot_kernel_phys_base = (e2k_addr_t)dup_start; + DebugNUMA("boot_node_set_dup_kernel() set kernel base to 0x%lx\n", + (e2k_addr_t)dup_start); + + boot_text_phys_base = (e2k_addr_t)dup_start; + boot_data_phys_base = boot_node_data_phys_base(BOOT_BS_NODE_ID); + boot_data_size = boot_node_data_size(BOOT_BS_NODE_ID); + data_offset = (e2k_addr_t)__node_data_start - KERNEL_BASE; + boot_dup_data_phys_base = (e2k_addr_t)dup_start + data_offset; +} + +static void __init +boot_node_duplicate_kernel(boot_info_t *bootblock) +{ + e2k_addr_t area_base; + e2k_addr_t area_end; + e2k_size_t area_size; + e2k_size_t data_offset; + void *dup_start; + int node_id = boot_numa_node_id(); + + if (BOOT_TEST_AND_SET_NODE_LOCK(boot_node_kernel_dup_lock, + boot_node_kernel_duplicated)) { + DebugNUMA("boot_node_duplicate_kernel() kernel was " + "duplicated already on node\n"); + return; + } + area_base = boot_read_OSCUD_lo_reg().OSCUD_lo_base; + area_end = (e2k_addr_t)boot_vp_to_pp(&__node_data_end); + area_end = _PAGE_ALIGN_DOWN(area_end, PAGE_SIZE); + if (area_end <= area_base) + BOOT_BUG("Kernel node duplicate area end 0x%lx <= start 0x%lx", + area_end, area_base); + area_size = area_end - area_base; + data_offset = (e2k_addr_t)boot_vp_to_pp(&__node_data_start) - + area_base; + if (data_offset > area_size) + BOOT_BUG("Kernel node duplicate data offset 0x%lx > all area size 0x%lx", + data_offset, area_size); + boot_dup_data_size = area_size - data_offset; + if (!BOOT_IS_BS_NODE) { + dup_start = boot_the_node_try_alloc_pages(node_id, + area_size, BOOT_E2K_KERNEL_PAGE_SIZE, + kernel_image_mem_type); + boot_kernel_phys_base = (e2k_addr_t)dup_start; + boot_text_size = boot_node_text_size(BOOT_BS_NODE_ID); + if (dup_start == (void *)-1) { + BOOT_WARNING("Could not allocate memory on the node #%d to duplicate kernel text, size 0x%lx", + node_id, area_size); + } else { + boot_fast_memcpy(dup_start, (char *)area_base, + area_size); + boot_atomic_inc(&boot_early_node_has_dup_kernel_num); + DebugNUMA("boot_node_duplicate_kernel() allocated " + "area and duplicate to 0x%lx, size 0x%lx\n", + (e2k_addr_t)dup_start, area_size); + boot_node_set_dup_kernel(dup_start); + } + } else { + dup_start = (void *)boot_kernel_phys_base; + DebugNUMA("boot_node_duplicate_kernel() node " + "is BS NODE area 0x%lx, size 0x%lx\n", + (e2k_addr_t)dup_start, area_size); + boot_dup_data_phys_base = (e2k_addr_t)dup_start + data_offset; + } + BOOT_NODE_UNLOCK(boot_node_kernel_dup_lock, + boot_node_kernel_duplicated); +} + +static void __init +boot_node_set_duplicated_mode(void) +{ + int has_not_dup = 0; + int node_id = boot_numa_node_id(); + int dup_nid; + int nid; + int dup_nodes_num = 0; + + if (BOOT_TEST_AND_SET_NODE_LOCK(boot_node_kernel_dup_lock, + boot_node_set_kernel_duplicated)) { + DebugNUMA("boot_node_set_duplicated_mode() kernel was " + "set duplicated mode already on node\n"); + return; + } + if (!BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(node_id)) { + has_not_dup = 1; + dup_nid = boot_early_get_next_node_has_dup_kernel(node_id); + if (dup_nid >= MAX_NUMNODES || dup_nid < 0) + BOOT_BUG("Could not find node with duplicated kernel to share it\n"); + DebugNUMA("boot_node_set_duplicated_mode() node has not " + "own copy of kernel image and will use NODE #%d " + "image and page table\n", + dup_nid); + } else { + dup_nid = node_id; + DebugNUMA("boot_node_set_duplicated_mode() node has own " + "copy of kernel image from 0x%lx\n", + boot_kernel_phys_base); + } + if (BOOT_IS_BS_NODE) { + dup_nodes_num = boot_atomic_read( + &boot_early_node_has_dup_kernel_num); + } + + boot_for_each_node_has_online_mem(nid) { + if (!BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(nid)) + continue; + boot_the_node_dup_kernel_nid(nid)[node_id] = dup_nid; + DebugNUMA("boot_node_set_duplicated_mode() set " + "duplicated node id 0x%px to #%d on node #%d\n", + &(boot_the_node_dup_kernel_nid(nid)[node_id]), + boot_the_node_dup_kernel_nid(nid)[node_id], nid); + if (!has_not_dup) { + boot_the_node_set_has_dup_kernel(nid, node_id); + } + if (BOOT_IS_BS_NODE) { + boot_atomic_set(&boot_the_node_has_dup_kernel_num(nid), + dup_nodes_num); + DebugNUMA("boot_node_set_duplicated_mode() set " + "duplicated nodes number 0x%px to %d on " + "node #%d\n", + &(boot_the_node_has_dup_kernel_num(nid)), + boot_the_node_has_dup_kernel_num(nid), nid); + } + boot_the_node_pg_dir(nid)[node_id] = + __boot_va(boot_vpa_to_pa( + (e2k_addr_t)boot_the_node_root_pt(dup_nid))); + DebugNUMA("boot_node_set_duplicated_mode() set " + "pg_dir pointer 0x%px to 0x%lx on node #%d\n", + &(boot_the_node_pg_dir(nid)[node_id]), + boot_the_node_pg_dir(nid)[node_id], nid); + } + BOOT_NODE_UNLOCK(boot_node_kernel_dup_lock, + boot_node_set_kernel_duplicated); +} + +static void __init +boot_node_set_kernel_base(void) +{ + int dup_nid; + + if (BOOT_EARLY_NODE_HAS_DUP_KERNEL()) { + DebugNUMA("boot_node_set_kernel_base() node has own copy and " + "set already kernel base of copy\n"); + return; + } + if (BOOT_TEST_AND_SET_NODE_LOCK(boot_node_kernel_dup_lock, + boot_node_kernel_base_is_set)) { + DebugNUMA("boot_node_set_kernel_base() kernel base was " + "set already on node\n"); + return; + } + dup_nid = boot_my_node_dup_kernel_nid; + if (dup_nid >= MAX_NUMNODES || dup_nid < 0) + BOOT_BUG("Invalid duplicated kernel node id %d\n", dup_nid); + boot_node_set_dup_kernel((void *)boot_node_kernel_phys_base(dup_nid)); + BOOT_NODE_UNLOCK(boot_node_kernel_dup_lock, + boot_node_kernel_base_is_set); +} +#endif /* CONFIG_NUMA */ + +static int __init +boot_is_pfn_valid(e2k_size_t pfn) +{ + node_phys_mem_t *all_nodes_mem = NULL; + int nodes_num; + int cur_nodes_num = 0; + int node; + short bank; + + all_nodes_mem = boot_vp_to_pp((node_phys_mem_t *)boot_phys_mem); + nodes_num = boot_phys_mem_nodes_num; + for (node = 0; node < L_MAX_MEM_NUMNODES; node ++) { + node_phys_mem_t *node_mem = &all_nodes_mem[node]; + boot_phys_bank_t *node_banks; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + continue; /* node has not memory */ + node_banks = node_mem->banks; + cur_nodes_num ++; + bank = node_mem->first_bank; + while (bank >= 0) { + boot_phys_bank_t *phys_bank = &node_banks[bank]; + e2k_addr_t bank_pfn; + + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + BOOT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + } + bank_pfn = phys_bank->base_addr >> PAGE_SHIFT; + if (pfn >= bank_pfn && + pfn < bank_pfn + phys_bank->pages_num) + return 1; + bank = phys_bank->next; + } + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + } + return 0; +} + +/* + * Map into the virtual space all physical areas used by kernel while + * boot-time initialization and needed later. + * All the mapped areas enumerate below. If a some new area will be used, + * then it should be added to the list of already known ones. + */ + +void __init boot_map_kernel_image(bool populate_on_host) +{ + e2k_addr_t kernel_base; + e2k_addr_t bs_text_phys_base; + e2k_addr_t text_phys_base; + e2k_addr_t text_virt_base; + e2k_size_t text_size; + e2k_addr_t init_base; + e2k_size_t init_size; +#ifdef CONFIG_NUMA + e2k_addr_t dup_data_phys_base; + e2k_addr_t dup_data_virt_base; + e2k_size_t dup_data_size; + e2k_addr_t rem_text_phys_base = 0; + e2k_addr_t rem_text_virt_base = 0; + e2k_size_t rem_text_size; + e2k_addr_t rem_text_end; + pgprot_t rem_text_prot; +#endif /* CONFIG_NUMA */ +#if defined(CONFIG_NUMA) + e2k_size_t map_size; +#endif /* CONFIG_NUMA */ + e2k_addr_t data_phys_base; + e2k_addr_t data_virt_base; + e2k_size_t data_size; + e2k_addr_t area_base; + e2k_addr_t area_offset; + e2k_size_t area_size; + e2k_addr_t area_virt_base; + int is_bs_node = BOOT_IS_BS_NODE; + int ret; + + /* + * Map the kernel image 'text/data/bss' segments. + * 'text' and 'data/bss' segments can intersect or one can include + * other. + */ + + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_image_mapped)) { +#ifdef CONFIG_NUMA + if (!BOOT_EARLY_THERE_IS_DUP_KERNEL && !is_bs_node) { + DebugNUMA("boot_map_kernel_image() node " + "has not own page table and will use " + "BS image mapping\n"); + goto no_mapping; + } else { + DebugNUMA("boot_map_kernel_image() will map kernel " + "image\n"); + } +#endif /* CONFIG_NUMA */ + kernel_base = boot_kernel_phys_base; + text_phys_base = boot_text_phys_base; + text_size = boot_text_size; + bs_text_phys_base = text_phys_base; + DebugNUMA("boot_map_kernel_image() text phys base 0x%lx, size " + "0x%lx\n", + text_phys_base, text_size); + + data_phys_base = boot_data_phys_base; + data_size = boot_data_size; + DebugNUMA("boot_map_kernel_image() data phys base 0x%lx, size " + "0x%lx\n", + data_phys_base, data_size); + +#ifdef CONFIG_NUMA + dup_data_phys_base = boot_dup_data_phys_base; + dup_data_size = boot_dup_data_size; + if (is_bs_node) { + DebugNUMA("boot_map_kernel_image() node " + "is BS node, so does not duplicate kernel, " + "BS image from 0x%lx\n", + text_phys_base); + } else if (!boot_node_has_dup_kernel()) { + DebugNUMA("boot_map_kernel_image() node " + "has not duplicated kernel image and will use " + "image of node #%d from 0x%lx\n", + text_phys_base, boot_my_node_dup_kernel_nid); + goto no_mapping; + } else { + bs_text_phys_base = + boot_node_text_phys_base(BOOT_BS_NODE_ID); + DebugNUMA("boot_map_kernel_image() node " + "has duplicated kernel image from 0x%lx\n", + text_phys_base); + } + DebugNUMA("boot_map_kernel_image() dup data phys base 0x%lx, " + "size 0x%lx\n", + dup_data_phys_base, dup_data_size); +#else + bs_text_phys_base = text_phys_base; +#endif /* CONFIG_NUMA */ + + area_virt_base = KERNEL_BASE; + + if (is_bs_node && text_phys_base > data_phys_base) { + BOOT_BUG("The kernel 'text' segment base addr " + "0x%lx > 0x%lx 'data' segment base", + text_phys_base, data_phys_base); + } + + text_virt_base = area_virt_base; +#if defined(CONFIG_NUMA) + data_virt_base = _PAGE_ALIGN_UP(text_virt_base + + (data_phys_base - bs_text_phys_base), + E2K_SMALL_PAGE_SIZE); + data_phys_base = _PAGE_ALIGN_UP(data_phys_base, + E2K_SMALL_PAGE_SIZE); + DebugNUMA("boot_map_kernel_image() UP data phys base 0x%lx, " + "size 0x%lx\n", + data_phys_base, data_size); + dup_data_phys_base = _PAGE_ALIGN_UP(dup_data_phys_base, + E2K_SMALL_PAGE_SIZE); + DebugNUMA("boot_map_kernel_image() UP dup data phys base " + "0x%lx, size 0x%lx\n", + dup_data_phys_base, dup_data_size); +#else /* ! CONFIG_NUMA */ + data_virt_base = _PAGE_ALIGN_UP(text_virt_base + + (data_phys_base - bs_text_phys_base), + BOOT_E2K_KERNEL_PAGE_SIZE); + data_phys_base = _PAGE_ALIGN_UP(data_phys_base, + BOOT_E2K_KERNEL_PAGE_SIZE); +#endif /* CONFIG_NUMA */ + data_size += (boot_data_phys_base - data_phys_base); + DebugNUMA("boot_map_kernel_image() updated data size: phys " + "base 0x%lx, size 0x%lx\n", + data_phys_base, data_size); +#ifdef CONFIG_NUMA + if (is_bs_node && dup_data_phys_base != data_phys_base) { + BOOT_BUG("The kernel 'data' segment base " + "addr 0x%lx is not the same as node " + "duplicated data base 0x%lx", + data_phys_base, dup_data_phys_base); + } +#endif /* CONFIG_NUMA */ +#ifdef CONFIG_NUMA + dup_data_size = _PAGE_ALIGN_DOWN(dup_data_size, + E2K_SMALL_PAGE_SIZE); + dup_data_size += (boot_dup_data_phys_base - dup_data_phys_base); + dup_data_virt_base = data_virt_base; + DebugNUMA("boot_map_kernel_image() down dup data size: phys " + "base 0x%lx, size 0x%lx\n", + dup_data_phys_base, dup_data_size); + data_phys_base += dup_data_size; + data_virt_base += dup_data_size; + data_size -= dup_data_size; + DebugNUMA("boot_map_kernel_image() update data phys " + "base 0x%lx, size 0x%lx\n", + data_phys_base, data_size); + rem_text_end = text_phys_base + text_size; + rem_text_end = _PAGE_ALIGN_DOWN(rem_text_end, + BOOT_E2K_KERNEL_PAGE_SIZE); + rem_text_phys_base = _PAGE_ALIGN_UP(text_phys_base + text_size, + BOOT_E2K_KERNEL_PAGE_SIZE); + DebugNUMA("boot_map_kernel_image() rem text phys " + "base 0x%lx, end 0x%lx\n", + rem_text_phys_base, rem_text_end); + if (rem_text_end > dup_data_phys_base) { + /* + * Intersection of kernel text last page and + * duplicated data + */ + rem_text_size = dup_data_phys_base - rem_text_phys_base; + DebugNUMA("boot_map_kernel_image() rem text size " + "0x%lx\n", + rem_text_size); + text_size -= rem_text_size; + rem_text_virt_base = text_virt_base + + rem_text_phys_base - text_phys_base; + rem_text_prot = PAGE_KERNEL_TEXT; + DebugNUMA("boot_map_kernel_image() update text size: " + "phys base 0x%lx, size 0x%lx\n", + text_phys_base, text_size); + } else { + rem_text_size = 0; + DebugNUMA("boot_map_kernel_image() empty rem text size " + "0x%lx\n", + rem_text_size); + } +#endif /* CONFIG_NUMA */ + + ret = boot_map_phys_area(text_phys_base, text_size, + text_virt_base, + PAGE_KERNEL_TEXT, BOOT_E2K_KERNEL_PAGE_SIZE, + false, /* do not ignore if text mapping virtual */ + /* area is busy */ + populate_on_host); + if (ret <= 0) { + BOOT_BUG("Could not map kernel 'text' segment: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + text_phys_base, text_size, + BOOT_E2K_KERNEL_PAGE_SIZE, + text_virt_base); + } + boot_text_virt_base = text_virt_base; + boot_printk("The kernel 'text' segment: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + text_phys_base, text_size, ret, text_virt_base, + BOOT_E2K_KERNEL_PAGE_SIZE); + +#ifdef CONFIG_NUMA + if (rem_text_size != 0) { + ret = boot_map_phys_area(rem_text_phys_base, + rem_text_size, + rem_text_virt_base, + rem_text_prot, E2K_SMALL_PAGE_SIZE, + false, /* do not ignore if data mapping */ + /* virtual area is busy */ + populate_on_host); + + if (ret <= 0) { + BOOT_BUG("Could not map kernel ending of " + "'text' segment: base addr 0x%lx size " + "0x%lx page size 0x%x to virtual addr " + "0x%lx", + rem_text_phys_base, rem_text_size, + E2K_SMALL_PAGE_SIZE, + rem_text_virt_base); + } + boot_printk("The kernel ending of 'text' segment: " + "base addr 0x%lx size 0x%lx is mapped to %d " + "virtual page(s) base addr 0x%lx page size " + "0x%x\n", + rem_text_phys_base, rem_text_size, ret, + rem_text_virt_base, E2K_SMALL_PAGE_SIZE); + } + if (dup_data_size != 0) { + ret = boot_map_phys_area(dup_data_phys_base, + dup_data_size, + dup_data_virt_base, + PAGE_KERNEL_DATA, E2K_SMALL_PAGE_SIZE, + false, /* do not ignore if data mapping */ + /* virtual area is busy */ + populate_on_host); + + if (ret <= 0) { + BOOT_BUG("Could not map kernel ' duplicated " + "data/bss' area: base addr 0x%lx size " + "0x%lx page size 0x%x to virtual addr " + "0x%lx", + dup_data_phys_base, dup_data_size, + E2K_SMALL_PAGE_SIZE, + dup_data_virt_base); + } + boot_dup_data_virt_base = dup_data_virt_base + + (boot_dup_data_phys_base - dup_data_phys_base); + boot_printk("The kernel 'duplicated data/bss' area: " + "base addr 0x%lx size 0x%lx is mapped to %d " + "virtual page(s) base addr 0x%lx page size " + "0x%x\n", + dup_data_phys_base, dup_data_size, ret, + dup_data_virt_base, + E2K_SMALL_PAGE_SIZE); + } +#endif /* CONFIG_NUMA */ + + area_virt_base = (e2k_addr_t)__start_ro_after_init; + if (area_virt_base < (e2k_addr_t)data_virt_base) { + BOOT_BUG("Kernel image segment '.data.ro_after_init' " + "start addr 0x%lx is out of common " + "data base 0x%lx\n", + area_virt_base, data_virt_base); + } + area_offset = area_virt_base - data_virt_base; + area_base = data_phys_base + area_offset; + area_size = __end_ro_after_init - + __start_ro_after_init; + if (area_size != 0) { + ret = boot_map_phys_area(area_base, area_size, + area_virt_base, + PAGE_KERNEL_DATA, PAGE_SIZE, + false, /* do not ignore if data mapping */ + /* virtual area is busy */ + populate_on_host); + if (ret <= 0) { + BOOT_BUG("Could not map kernel " + "'.data.ro_after_init' " + "segment: base addr 0x%lx size 0x%lx " + "page size 0x%x to virtual " + "addr 0x%lx\n", + area_base, area_size, PAGE_SIZE, + area_virt_base); + } + boot_printk("The kernel '.data.ro_after_init' segment: " + "base addr 0x%lx size 0x%lx is mapped " + "to %d virtual page(s) base addr 0x%lx " + "page size 0x%x\n", + area_base, area_size, ret, + area_virt_base, PAGE_SIZE); + } + data_phys_base += area_size; + data_virt_base += area_size; + data_size -= area_size; + DebugNUMA("boot_map_kernel_image() update data phys " + "base 0x%lx, virt base 0x%lx, size 0x%lx\n", + data_phys_base, data_virt_base, data_size); + + area_virt_base = (e2k_addr_t)__init_text_begin; + if (area_virt_base < (e2k_addr_t)data_virt_base) { + BOOT_BUG("Kernel image segment '.init.text' " + "start addr 0x%lx is out of common " + "data base 0x%lx\n", + area_virt_base, data_virt_base); + } + area_offset = area_virt_base - data_virt_base; + init_base = data_phys_base + area_offset; + init_size = __init_text_end - __init_text_begin; + ret = boot_map_phys_area(init_base, init_size, + area_virt_base, + PAGE_KERNEL_TEXT, PAGE_SIZE, + false, /* do not ignore if data mapping */ + /* virtual area is busy */ + populate_on_host); + if (ret <= 0) { + BOOT_BUG("Could not map kernel '.init.text' segment: " + "base addr 0x%lx size 0x%lx page size 0x%x " + "to virtual addr 0x%lx\n", + init_base, init_size, PAGE_SIZE, + area_virt_base); + } + boot_printk("The kernel '.init.text' segment: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + init_base, init_size, ret, + area_virt_base, PAGE_SIZE); + + area_virt_base = (e2k_addr_t)__init_data_begin; + if (area_virt_base < (e2k_addr_t)data_virt_base) { + BOOT_BUG("Kernel image segment '.init.data' " + "start addr 0x%lx is out of common " + "data base 0x%lx\n", + area_virt_base, data_virt_base); + } + area_offset = area_virt_base - data_virt_base; + init_base = data_phys_base + area_offset; + init_size = __init_data_end - __init_data_begin; + ret = boot_map_phys_area(init_base, init_size, + area_virt_base, + PAGE_KERNEL_DATA, PAGE_SIZE, + false, /* do not ignore if data mapping */ + /* virtual area is busy */ + populate_on_host); + if (ret <= 0) { + BOOT_BUG("Could not map kernel '.init.data' segment: " + "base addr 0x%lx size 0x%lx page size 0x%x " + "to virtual addr 0x%lx", + init_base, init_size, PAGE_SIZE, + area_virt_base); + } + boot_printk("The kernel '.init.data' segment: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + init_base, init_size, ret, + area_virt_base, PAGE_SIZE); + + area_virt_base = (e2k_addr_t)__common_data_begin; + area_offset = area_virt_base - data_virt_base; + if (area_offset < 0) { + BOOT_BUG("The kernel 'common not duplicated data' " + "start addr 0x%lx is lower of data segment " + "virtual addr 0x%lx", + area_virt_base, data_virt_base); + } + data_size -= area_offset; + data_virt_base += area_offset; + data_phys_base += area_offset; + +#if defined(CONFIG_NUMA) + map_size = data_phys_base & (BOOT_E2K_KERNEL_PAGE_SIZE - 1); + if (map_size != 0) { + map_size = _PAGE_ALIGN_DOWN(map_size, + BOOT_E2K_KERNEL_PAGE_SIZE) - + map_size; + if (map_size > data_size) + map_size = data_size; + ret = boot_map_phys_area(data_phys_base, map_size, + data_virt_base, + PAGE_KERNEL_DATA, E2K_SMALL_PAGE_SIZE, + false, /* do not ignore if data mapping */ + /* virtual area is busy */ + populate_on_host); + + if (ret <= 0) { + BOOT_BUG("Could not map kernel 'data/bss' " + "segment: base addr 0x%lx size 0x%lx " + "page size 0x%x to virtual addr 0x%lx", + data_phys_base, map_size, + E2K_SMALL_PAGE_SIZE, + data_virt_base); + } + boot_printk("The kernel 'data/bss' segment: " + "base addr 0x%lx size 0x%lx is mapped to %d " + "virtual small page(s) base addr 0x%lx page " + "size 0x%x\n", + data_phys_base, map_size, ret, data_virt_base, + E2K_SMALL_PAGE_SIZE); + data_size -= map_size; + data_phys_base += map_size; + data_virt_base += map_size; + } +#endif /* CONFIG_NUMA */ + boot_data_virt_base = data_virt_base + + (boot_data_phys_base - data_phys_base); + if (data_size != 0) { + ret = boot_map_phys_area(data_phys_base, data_size, + data_virt_base, + PAGE_KERNEL_DATA, BOOT_E2K_KERNEL_PAGE_SIZE, +#if !defined(CONFIG_NUMA) + true, /* ignore if data mapping virtual */ + /* area is busy */ +#else /* CONFIG_NUMA */ + false, /* do not ignore if data mapping */ + /* virtual area is busy */ +#endif /* ! CONFIG_NUMA */ + populate_on_host); + if (ret <= 0) { + BOOT_BUG("Could not map kernel 'data/bss' " + "segment: base addr 0x%lx size 0x%lx " + "page size 0x%x to virtual addr 0x%lx", + data_phys_base, data_size, + BOOT_E2K_KERNEL_PAGE_SIZE, + data_virt_base); + } + boot_printk("The kernel 'data/bss' segment: " + "base addr 0x%lx size 0x%lx is mapped to %d " + "virtual page(s) base addr 0x%lx page size " + "0x%x\n", + data_phys_base, data_size, ret, data_virt_base, + BOOT_E2K_KERNEL_PAGE_SIZE); + } + +#ifndef CONFIG_NUMA + area_base = (u64) boot_vp_to_pp((void *) empty_zero_page); + area_virt_base = (unsigned long) empty_zero_page; + ret = boot_map_phys_area(area_base, PAGE_SIZE, area_virt_base, + PAGE_KERNEL_DATA, PAGE_SIZE, false, + populate_on_host); + if (ret <= 0) { + BOOT_BUG("Could not map kernel 'zero_page' segment: base addr 0x%lx size 0x%lx page size 0x%x to virtual addr 0x%lx", + area_base, PAGE_SIZE, PAGE_SIZE, + area_virt_base); + } + boot_printk("The kernel 'zero_page' segment: base addr 0x%lx size 0x%lx is mapped to %d virtual page(s) base addr 0x%lx page size 0x%x\n", + area_base, area_size, ret, area_virt_base, PAGE_SIZE); +#endif + + if (is_bs_node) { + area_virt_base = KERNEL_BASE; + area_size = KERNEL_END - KERNEL_BASE; + boot_kernel_image_size = area_size; + boot_printk("The kernel full image: " + "is mapped from base addr 0x%lx size 0x%lx\n", + area_virt_base, area_size); + } +#ifdef CONFIG_NUMA +no_mapping: +#endif /* CONFIG_NUMA */ + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_image_mapped); + } +} + +void __init boot_map_kernel_boot_stacks(void) +{ + e2k_addr_t area_phys_base; + e2k_addr_t area_offset; + e2k_addr_t area_virt_base; + + /* + * Map the kernel boot-time hardware procedures stack (PS). + * The first PS maps to virtual space from the very begining + * of the area, dedicated for hardware kernel stacks. + * The following stacks are allocated from end of the previous stack. + */ + area_phys_base = boot_boot_ps_phys_base; + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa(area_phys_base)); + boot_boot_ps_virt_base = area_virt_base; + boot_printk("The kernel boot-time procedure stack: %d pages from 0x%lx\n", + boot_boot_ps_size / PAGE_SIZE, boot_boot_ps_virt_base); + + /* + * Map the kernel boot-time hardware procedure chain stack (PCS). + * PCS maps to virtual space right after PS + */ + area_phys_base = boot_boot_pcs_phys_base; + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa(area_phys_base)); + boot_boot_pcs_virt_base = area_virt_base; + boot_printk("The kernel boot-time chain stack: %d pages from 0x%lx\n", + boot_boot_pcs_size / PAGE_SIZE, boot_boot_pcs_virt_base); + + /* + * Map the kernel boot-time data stack (user stack) (US). + * The first stack maps to virtual space from the very begining + * of the area, dedicated for all data kernel stacks. + * The following stacks are allocated from end of the previous stack. + */ + area_phys_base = boot_boot_stack_phys_base; + area_offset = boot_boot_stack_phys_offset; + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa(area_phys_base)); + boot_boot_stack_virt_base = area_virt_base; + boot_boot_stack_virt_offset = area_offset; + boot_printk("The kernel boot-time data stack: %d pages from 0x%lx\n", + boot_boot_stack_size / PAGE_SIZE, + boot_boot_stack_virt_base); +} + +void __init boot_map_all_phys_memory(void) +{ + long ret; + + /* + * Map the available physical memory into virtual space to direct + * access to physical memory using kernel pa <-> va translations + * All physical memory pages are mapped to virtual space starting + * from 'PAGE_OFFSET' + */ + + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_mem_mapped)) { +#ifdef CONFIG_NUMA + if (!boot_node_has_dup_kernel()) { + DebugNUMA("boot_map_all_phys_memory() node " + "has not own page table and will use " + "node #%d physical memory mapping\n", + boot_my_node_dup_kernel_nid); + goto no_mem_mapping; + } else { + DebugNUMA("boot_map_all_phys_memory() will map all " + "physical memory\n"); + } +#endif /* CONFIG_NUMA */ + boot_printk("The physical memory start address 0x%lx, " + "end 0x%lx\n", + boot_start_of_phys_memory, + boot_end_of_phys_memory); + ret = boot_map_physmem(PAGE_MAPPED_PHYS_MEM, + IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) ? PAGE_SIZE : + 0 /* any max possible page size */); + if (ret <= 0) { + BOOT_BUG("Could not map all physical memory: error %ld", + ret); + } + boot_printk("All physical memory is mapped to %d virtual " + "pages from base offset 0x%lx\n", + ret, (e2k_addr_t)__boot_va(boot_start_of_phys_memory)); +#ifdef CONFIG_NUMA +no_mem_mapping: +#endif /* CONFIG_NUMA */ + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_mem_mapped); + } +} + +static void __init +boot_map_low_io_memory(void) +{ + e2k_addr_t area_phys_base; + e2k_size_t area_size; + e2k_addr_t area_virt_base; + int ret; + + /* + * Map the low VGAMEM. + */ + + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_io_mapped)) { +#ifdef CONFIG_NUMA + if (!boot_node_has_dup_kernel()) { + goto no_io_mapping; + } +#endif /* CONFIG_NUMA */ + area_phys_base = VGA_VRAM_PHYS_BASE; + area_size = VGA_VRAM_SIZE; + area_virt_base = + (e2k_addr_t)__boot_va(area_phys_base); + ret = boot_map_phys_area(area_phys_base, area_size, + area_virt_base, + PAGE_X86_IO_PORTS, E2K_SMALL_PAGE_SIZE, + false, /* do not ignore if data mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map low VGAMEM area: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + area_phys_base, area_size, E2K_SMALL_PAGE_SIZE, + area_virt_base); + } + boot_printk("The low VGAMEM area: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + E2K_SMALL_PAGE_SIZE); +#ifdef CONFIG_NUMA +no_io_mapping: +#endif /* CONFIG_NUMA */ + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_io_mapped); + } +} + +static void __init +boot_map_high_io_memory(bool bsp) +{ + unsigned long first_base; + e2k_addr_t area_phys_base; + e2k_size_t area_size; + e2k_addr_t area_virt_base; + int ret, node; + + /* + * Map the PCI/IO ports area to allow IO operations on system console. + */ + + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_ports_mapped)) { +#ifdef CONFIG_NUMA + if (!boot_node_has_dup_kernel()) + goto no_ports_mapping; +#endif /* CONFIG_NUMA */ + area_phys_base = boot_machine.x86_io_area_base; + if (BOOT_HAS_MACHINE_E2K_FULL_SIC) + area_size = E2K_FULL_SIC_IO_AREA_SIZE; + else if (BOOT_HAS_MACHINE_E2K_LEGACY_SIC) + area_size = E2K_LEGACY_SIC_IO_AREA_SIZE; + else + BOOT_BUG("Unknown x86 I/O ports area size"); + area_virt_base = E2K_X86_IO_AREA_BASE; + ret = boot_map_phys_area(area_phys_base, area_size, + area_virt_base, + PAGE_X86_IO_PORTS, BOOT_E2K_X86_IO_PAGE_SIZE, + false, /* do not ignore if data mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map PCI/IO ports area: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + area_phys_base, area_size, + BOOT_E2K_X86_IO_PAGE_SIZE, area_virt_base); + } + boot_printk("The PCI/IO ports area: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + BOOT_E2K_X86_IO_PAGE_SIZE); +#ifdef CONFIG_NUMA +no_ports_mapping: +#endif /* CONFIG_NUMA */ + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_ports_mapped); + } + + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_hwbug_mapped)) { +#ifdef CONFIG_NUMA + if (!boot_node_has_dup_kernel()) + goto no_hwbug_mapping; +#endif /* CONFIG_NUMA */ + /* + * Only 4 nodes on e8c with the problem + * Allocate and map 8 * 4 = 32 pages on every node. + * Then every core will have its own 4 pages: one on every node. + */ + first_base = -1UL; + for (node = 0; node < 4; node++) { + if (!BOOT_IS_MACHINE_E8C || node >= MAX_NUMNODES || + !boot_node_has_online_mem(node)) { + if (first_base == -1UL) { + first_base = (u64) boot_alloc_phys_mem( + 8 * PAGE_SIZE, PAGE_SIZE, + hw_reserved_mem_type); + } + area_phys_base = first_base; + } else { + area_phys_base = (u64) boot_node_alloc_physmem( + node, 8 * PAGE_SIZE, PAGE_SIZE, + hw_reserved_mem_type); + if (first_base == -1UL) + first_base = area_phys_base; + } + if (area_phys_base == -1UL) + BOOT_BUG("Failed to allocate memory for hwbug workaround\n"); + + area_virt_base = node * 8 * PAGE_SIZE + + NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS; + + ret = boot_map_phys_area(area_phys_base, 8 * PAGE_SIZE, + area_virt_base, PAGE_USER_RO_ACCESSED, + PAGE_SIZE, + true, /* ignory busy mapping ? */ + false); /* populate map on host ? */ + if (ret <= 0) + BOOT_BUG("Could not map hwbug workaround area: pa 0x%lx to va 0x%lx", + area_phys_base, area_virt_base); + } +#ifdef CONFIG_NUMA +no_hwbug_mapping: +#endif /* CONFIG_NUMA */ + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_hwbug_mapped); + } +} + +void __init boot_native_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + + /* + * Map the kernel image 'text/data/bss' segments. + */ + boot_map_kernel_image(false); + + /* + * Map the kernel stacks + */ + boot_map_kernel_boot_stacks(); + + /* + * Map all available physical memory + */ + boot_map_all_phys_memory(); + + /* + * Map the low VGAMEM. + */ + boot_map_low_io_memory(); + + /* + * Map all needed physical areas from boot-info. + */ + boot_map_all_bootinfo_areas(boot_info); + + /* + * Map the PCI/IO ports area to allow IO operations on system console. + */ + boot_map_high_io_memory(bsp); +} + +#ifdef CONFIG_L_IO_APIC +/* + * Map the needed memory from MP - tables + */ + +static void __init +boot_map_mp_table(boot_info_t *boot_info) +{ + e2k_addr_t area_phys_base; + e2k_addr_t area_virt_base; + e2k_size_t area_size; + e2k_size_t area_offset; + e2k_addr_t area_pfn; + int ret; + + if (boot_info->mp_table_base == (e2k_addr_t)0UL) + return; + + /* + * MP floating specification table + */ + + area_phys_base = _PAGE_ALIGN_UP(boot_mpf_phys_base, E2K_MPT_PAGE_SIZE); + area_pfn = boot_vpa_to_pa(area_phys_base) >> PAGE_SHIFT; + area_offset = boot_mpf_phys_base - area_phys_base; + area_size = boot_mpf_size + area_offset; + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa(area_phys_base)); + if (!boot_is_pfn_valid(area_pfn)) { + ret = boot_map_phys_area(area_phys_base, area_size, + area_virt_base, + PAGE_MPT, E2K_MPT_PAGE_SIZE, + false, /* do not ignore if data mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map MP floating table page(s): " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + area_phys_base, area_size, + E2K_MPT_PAGE_SIZE, + area_virt_base); + } + boot_printk("The MP floating table page(s): " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + E2K_MPT_PAGE_SIZE); + } + boot_printk("The MP floating table: base addr 0x%lx size 0x%lx " + "is mapped to virtual base addr 0x%lx\n", + area_phys_base, area_size, area_virt_base); + + /* + * MP configuration table + */ + + if (boot_mpc_size == 0) + return; + + area_phys_base = _PAGE_ALIGN_UP(boot_mpc_phys_base, E2K_MPT_PAGE_SIZE); + area_pfn = boot_vpa_to_pa(area_phys_base) >> PAGE_SHIFT; + area_offset = boot_mpc_phys_base - area_phys_base; + area_size = boot_mpc_size + area_offset; + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa(area_phys_base)); + if (!boot_is_pfn_valid(area_pfn)) { + ret = boot_map_phys_area(area_phys_base, area_size, + area_virt_base, + PAGE_MPT, E2K_MPT_PAGE_SIZE, + true, /* ignore if data mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map MP configuration table " + "page(s): base addr 0x%lx size 0x%lx page " + "size 0x%x to virtual addr 0x%lx", + area_phys_base, area_size, + E2K_MPT_PAGE_SIZE, + area_virt_base); + } + boot_printk("The MP configuration table page(s): " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + E2K_MPT_PAGE_SIZE); + } + boot_printk("The MP configuration table : base addr 0x%lx size 0x%lx " + "is mapped to virtual base addr 0x%lx\n", + area_phys_base, area_size, area_virt_base); +} +#endif /* CONFIG_L_IO_APIC */ + +/* + * Map into the virtual space all needed physical areas from boot-info. + * All the mapped areas enumerate below. If a some new area will be used, + * then it should be added to the list of already known ones. + */ + +void __init boot_map_all_bootinfo_areas(boot_info_t *boot_info) +{ + e2k_addr_t area_phys_base; + e2k_size_t area_size; + e2k_size_t area_offset; + e2k_addr_t area_pfn; + e2k_addr_t area_virt_base; + e2k_addr_t symtab_phys_base; + e2k_addr_t symtab_virt_base; + e2k_size_t symtab_size; + e2k_addr_t strtab_phys_base; + e2k_addr_t strtab_virt_base; + e2k_size_t strtab_size; + int ret = 0; + + + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_map_lock, + boot_node_info_mapped)) { +#ifdef CONFIG_NUMA + if (!boot_node_has_dup_kernel()) { + BOOT_NODE_UNLOCK(boot_node_map_lock, + boot_node_info_mapped); + return; + } +#endif /* CONFIG_NUMA */ + } else { + return; + } + + /* + * Map the bootinfo structure. + */ + area_phys_base = _PAGE_ALIGN_UP(boot_bootinfo_phys_base, + E2K_BOOTINFO_PAGE_SIZE); + area_pfn = boot_vpa_to_pa(area_phys_base) >> PAGE_SHIFT; + area_offset = boot_bootinfo_phys_base - area_phys_base; + area_size = boot_bootinfo_size + area_offset; + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa( + area_phys_base)); + + if (!boot_is_pfn_valid(area_pfn)) { + ret = boot_map_phys_area(area_phys_base, area_size, + area_virt_base, + PAGE_BOOTINFO, E2K_BOOTINFO_PAGE_SIZE, + false, /* do not ignore if data mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map BOOTINFO structue: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + area_phys_base, area_size, + E2K_BOOTINFO_PAGE_SIZE, + area_virt_base); + } + boot_printk("The BOOTINFO structure pages: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + E2K_BOOTINFO_PAGE_SIZE); + } + boot_bootblock_virt = + (bootblock_struct_t *)__boot_va(boot_vpa_to_pa( + boot_bootinfo_phys_base)); + boot_printk("The BOOTINFO structure pages: base addr 0x%lx size 0x%lx " + "is mapped to virtual base addr 0x%lx\n", + area_phys_base, area_size, area_virt_base); + +#ifdef CONFIG_BLK_DEV_INITRD + /* + * Map the memory of initial ramdisk (initrd). + */ + + area_phys_base = boot_initrd_phys_base; /* INITRD_BASE and */ + area_size = boot_initrd_size; /* INITRD_SIZE */ + /* comes from Loader */ + area_pfn = boot_vpa_to_pa(area_phys_base) >> PAGE_SHIFT; + if (area_size && !boot_is_pfn_valid(area_pfn)) { + area_virt_base = (e2k_addr_t)__boot_va(boot_vpa_to_pa( + area_phys_base)); + ret = boot_map_phys_area(area_phys_base, area_size, + area_virt_base, + PAGE_INITRD, E2K_INITRD_PAGE_SIZE, + false, /* do not ignore if data mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map initial ramdisk area: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + area_phys_base, area_size, + E2K_INITRD_PAGE_SIZE, + area_virt_base); + } + boot_printk("The initial ramdisk area: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + area_phys_base, area_size, ret, area_virt_base, + E2K_INITRD_PAGE_SIZE); + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + boot_map_mp_table(boot_info); + + /* + * Map the kernel SYMTAB (symbols table). + */ + + symtab_phys_base = boot_symtab_phys_base; + symtab_size = boot_symtab_size; + + strtab_phys_base = boot_strtab_phys_base; + strtab_size = boot_strtab_size; + if (symtab_size != 0 || strtab_size != 0) + area_virt_base = E2K_KERNEL_NAMETAB_AREA_BASE; + else + area_virt_base = (e2k_addr_t)NULL; + + if (symtab_size == 0) { + symtab_virt_base = (e2k_addr_t)NULL; + } else { + symtab_phys_base = _PAGE_ALIGN_UP(symtab_phys_base, + E2K_NAMETAB_PAGE_SIZE); + symtab_size += (boot_symtab_phys_base - symtab_phys_base); + } + if (strtab_size == 0) { + strtab_virt_base = (e2k_addr_t)NULL; + } else { + strtab_phys_base = _PAGE_ALIGN_UP(strtab_phys_base, + E2K_NAMETAB_PAGE_SIZE); + strtab_size += (boot_strtab_phys_base - strtab_phys_base); + } + if (symtab_size != 0 && strtab_size != 0) { + if (symtab_phys_base <= strtab_phys_base) { + symtab_virt_base = area_virt_base; + strtab_virt_base = symtab_virt_base + + (strtab_phys_base - symtab_phys_base); + } else { + strtab_virt_base = area_virt_base; + symtab_virt_base = strtab_virt_base + + (symtab_phys_base - strtab_phys_base); + } + } else if (symtab_size == 0) { + symtab_virt_base = (e2k_addr_t)NULL; + strtab_virt_base = area_virt_base; + } else { + strtab_virt_base = (e2k_addr_t)NULL; + symtab_virt_base = area_virt_base; + } + + if (symtab_size != 0) { + ret = boot_map_phys_area(symtab_phys_base, symtab_size, + symtab_virt_base, PAGE_KERNEL_NAMETAB, + E2K_NAMETAB_PAGE_SIZE, + false, /* do not ignore if symbols table mapping */ + /* virtual area is busy */ + false); /* populate map on host? */ + if (ret <= 0) { + BOOT_BUG("Could not map kernel symbols table: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + symtab_phys_base, symtab_size, + E2K_NAMETAB_PAGE_SIZE, + symtab_virt_base); + } + } + boot_symtab_virt_base = symtab_virt_base; + if (symtab_size != 0) { + boot_printk("The kernel symbols table: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + symtab_phys_base, symtab_size, ret, symtab_virt_base, + E2K_NAMETAB_PAGE_SIZE); + } else { + boot_printk("The kernel symbols table is empty\n"); + } + + if (strtab_size != 0) { + ret = boot_map_phys_area(strtab_phys_base, strtab_size, + strtab_virt_base, PAGE_KERNEL_NAMETAB, + E2K_NAMETAB_PAGE_SIZE, + true, /* ignore if strings table mapping virtual */ + /* area is busy */ + false); /* populate map on host? */ + + if (ret <= 0) { + BOOT_BUG("Could not map kernel strings table: " + "base addr 0x%lx size 0x%lx page size 0x%x to " + "virtual addr 0x%lx", + strtab_phys_base, strtab_size, + E2K_NAMETAB_PAGE_SIZE, + strtab_virt_base); + } + } + boot_strtab_virt_base = strtab_virt_base; + if (strtab_size != 0) { + boot_printk("The kernel strings table: " + "base addr 0x%lx size 0x%lx is mapped to %d virtual " + "page(s) base addr 0x%lx page size 0x%x\n", + strtab_phys_base, strtab_size, ret, strtab_virt_base, + E2K_NAMETAB_PAGE_SIZE); + } else { + boot_printk("The kernel strings table is empty\n"); + } + + boot_kernel_symtab = (void *)(symtab_virt_base + + (boot_symtab_phys_base & (E2K_NAMETAB_PAGE_SIZE - 1))); + boot_kernel_symtab_size = boot_symtab_size; + boot_printk("The kernel symbols table: addr 0x%lx size 0x%lx\n", + boot_kernel_symtab, boot_kernel_symtab_size); + boot_kernel_strtab = (void *)(strtab_virt_base + + (boot_strtab_phys_base & (E2K_NAMETAB_PAGE_SIZE - 1))); + boot_kernel_strtab_size = boot_strtab_size; + boot_printk("The kernel strings table: addr 0x%lx size 0x%lx\n", + boot_kernel_strtab, boot_kernel_strtab_size); + + BOOT_NODE_UNLOCK(boot_node_map_lock, boot_node_info_mapped); +} + +/* + * Switch kernel execution into the physical space to execution into the + * virtual space. This function should be coded very careful. + * Each the function operator should be weighted, what conseguences it will + * have. + */ + +static __always_inline void +boot_native_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + bootmem_areas_t *bootmem = boot_kernel_bootmem; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_cud_lo_t cud_lo; + e2k_gd_lo_t gd_lo; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_usbr_t usbr; + unsigned long loc_disable_caches = boot_disable_caches; + unsigned long loc_disable_secondary_caches = + boot_disable_secondary_caches; + unsigned long loc_disable_IP = boot_disable_IP; + bool loc_enable_l2_cint = boot_enable_l2_cint; + unsigned long mmu_cr = _MMU_CR_KERNEL; +#ifdef CONFIG_SMP + int cpus_to_sync = boot_cpu_to_sync_num; + atomic_t *pv_ops_switched = boot_vp_to_pp(&boot_pv_ops_switched); + atomic_t *error_flag_p = boot_vp_to_pp(&boot_error_flag); +#endif /* CONFIG_SMP */ +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + register bool flush_caches; + register bool l3_enable = false; + register bool do_i_flush = false; + register int iset_ver; + register unsigned char *node_nbsr; +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + + /* + * Set all needed MMU registers before to turn on virtual addressing + * translation mode + */ + boot_set_kernel_MMU_state_before(); + + /* + * SYNCHRONIZATION POINT #2.1 + * At this point all processors should be here + * After synchronization BSP processor switch PV_OPS + */ + boot_sync_all_processors(); + + /* switch PV_OPS to virtual functions */ + /* WARNING: should not be PV_OPS usage from here to completion of */ + /* virtual space switching (call function boot_init_sequel_func()) */ + if (BOOT_IS_BSP(bsp)) { + native_boot_pv_ops_to_ops(); +#ifdef CONFIG_SMP + boot_set_boot_event(pv_ops_switched); + } else { + boot_wait_for_boot_event(pv_ops_switched, error_flag_p); +#endif /* CONFIG_SMP */ + } + + /* + * Calculate hardware procedure and chain stacks pointers + */ + + psp_lo.PSP_lo_half = 0; +#ifndef CONFIG_SMP + psp_lo.PSP_lo_base = bootmem->boot_ps.virt; +#else + psp_lo.PSP_lo_base = bootmem->boot_ps[cpuid].virt; +#endif /* CONFIG_SMP */ + psp_lo._PSP_lo_rw = E2K_PSP_RW_PROTECTIONS; + psp_hi.PSP_hi_half = 0; +#ifndef CONFIG_SMP + psp_hi.PSP_hi_size = bootmem->boot_ps.size; +#else + psp_hi.PSP_hi_size = bootmem->boot_ps[cpuid].size; +#endif /* CONFIG_SMP */ + psp_hi.PSP_hi_ind = 0; + + pcsp_lo.PCSP_lo_half = 0; +#ifndef CONFIG_SMP + pcsp_lo.PCSP_lo_base = bootmem->boot_pcs.virt; +#else + pcsp_lo.PCSP_lo_base = bootmem->boot_pcs[cpuid].virt; +#endif /* CONFIG_SMP */ + pcsp_lo._PCSP_lo_rw = E2K_PCSR_RW_PROTECTIONS; + pcsp_hi.PCSP_hi_half = 0; +#ifndef CONFIG_SMP + pcsp_hi.PCSP_hi_size = bootmem->boot_pcs.size; +#else + pcsp_hi.PCSP_hi_size = bootmem->boot_pcs[cpuid].size; +#endif /* CONFIG_SMP */ + pcsp_hi.PCSP_hi_ind = 0; + + /* + * Turn on virtual addressing translation mode and disable caches + * (write to the MMU control register enables TLB & TLU) + */ + + if (loc_disable_caches != _MMU_CD_EN) { + mmu_cr &= ~_MMU_CR_CD_MASK; + mmu_cr |= (loc_disable_caches & _MMU_CR_CD_MASK); + } + if (loc_disable_secondary_caches) { + mmu_cr &= ~_MMU_CR_CR0_CD; + mmu_cr |= (loc_disable_secondary_caches & _MMU_CR_CR0_CD); + } + if (loc_disable_IP == _MMU_IPD_DIS) { + mmu_cr &= ~_MMU_CR_IPD_MASK; + mmu_cr |= (loc_disable_IP & _MMU_CR_IPD_MASK); + } + + /* set L2 CRC control state */ + boot_native_set_l2_crc_state(loc_enable_l2_cint); + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + /* low memory kernel data remapped to equal high memory */ + /* all virtual addresses to low data point now to high memory */ + /* so need flush all caches from low physical addresses */ + flush_caches = !BOOT_LOW_MEMORY_ENABLED(); + if (flush_caches) { + iset_ver = boot_machine.native_iset_ver; + if (iset_ver >= E2K_ISET_V4 && boot_machine.L3_enable) + l3_enable = true; + if (!BOOT_TEST_AND_SET_NODE_LOCK(boot_node_flush_lock, + boot_node_flushed)) { + do_i_flush = true; + if (l3_enable) + node_nbsr = BOOT_THE_NODE_NBSR_PHYS_BASE(0); + BOOT_NODE_UNLOCK(boot_node_flush_lock, + boot_node_flushed); + } + } +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + + /* + * Calculate Kernel 'text/data/bss' segment registers + * at virtual space addresses + */ + + cud_lo.CUD_lo_half = 0; +#ifndef CONFIG_NUMA + cud_lo.CUD_lo_base = bootmem->text.virt; +#else /* CONFIG_NUMA */ + cud_lo.CUD_lo_base = bootmem->text.nodes[BOOT_BS_NODE_ID].virt; +#endif /* ! CONFIG_NUMA */ + cud_lo._CUD_lo_rw = E2K_CUD_RW_PROTECTIONS; + cud_lo.CUD_lo_c = CUD_CFLAG_SET; + + gd_lo.GD_lo_half = 0; +#ifndef CONFIG_NUMA + gd_lo.GD_lo_base = bootmem->data.virt; +#else /* CONFIG_NUMA */ + gd_lo.GD_lo_base = bootmem->data.nodes[BOOT_BS_NODE_ID].virt; +#endif /* ! CONFIG_NUMA */ + gd_lo._GD_lo_rw = E2K_GD_RW_PROTECTIONS; + + /* + * calculate User LOcal data Stack registers at virtual space + */ + + usbr.USBR_reg = 0; +#ifndef CONFIG_SMP + usbr.USBR_base = bootmem->boot_stack.virt + bootmem->boot_stack.size; +#else + usbr.USBR_base = bootmem->boot_stack[cpuid].virt + + bootmem->boot_stack[cpuid].size; +#endif /* CONFIG_SMP */ + + usd_lo.USD_lo_half = 0; + usd_hi.USD_hi_half = 0; + +#ifndef CONFIG_SMP + usd_lo.USD_lo_base = bootmem->boot_stack.virt + + bootmem->boot_stack.virt_offset; + usd_hi.USD_hi_size = bootmem->boot_stack.virt_offset; +#else + usd_lo.USD_lo_base = bootmem->boot_stack[cpuid].virt + + bootmem->boot_stack[cpuid].virt_offset; + usd_hi.USD_hi_size = bootmem->boot_stack[cpuid].virt_offset; +#endif /* CONFIG_SMP */ + usd_lo.USD_lo_p = 0; + + /* + * SYNCHRONIZATION POINT #2.2 + * Before this synchronization all processors should calculate + * state of context registers and complete access to physical memory + * At this point all processors should be here + * After synchronization all variables can be accessed only from + * registers file + */ + boot_sync_all_processors(); + + /* + * Set Procedure Stack and Procedure Chain stack registers + * to begining virtual stacks addresses and collapse in that way + * previuos useless stack frames + */ + NATIVE_FLUSHCPU; + NATIVE_NV_WRITE_PSP_REG(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG(pcsp_hi, pcsp_lo); + + /* + * Enable control of PS & PCS stack guard + */ + boot_native_set_sge(); + + /* + * Switch User Stack registers to virtual kernel stack addresses + * The assumption is - stack allocation does not use GETSAP operation + * but uses SP and FP pointers and allocates stack from end. + * Set stack pointer to the very begining of initial stack to collapse + * useless previuos stack frames + */ + + NATIVE_NV_WRITE_USBR_USD_REG(usbr, usd_hi, usd_lo); + + /* + * Set Kernel 'text/data/bss' segment registers to consistent + * virtual addresses + */ + + NATIVE_WRITE_CUD_LO_REG(cud_lo); + NATIVE_WRITE_OSCUD_LO_REG(cud_lo); + + NATIVE_WRITE_GD_LO_REG(gd_lo); + NATIVE_WRITE_OSGD_LO_REG(gd_lo); + + /* + * Set CPU registers to point to kernel CUT & index + */ + native_set_kernel_CUTD(); + + __E2K_WAIT_ALL; + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + /* variable 'flush_caches' should be local register of function */ + if (flush_caches) { + native_raw_write_back_CACHE_L12(); + __E2K_WAIT_ALL; + if (l3_enable && do_i_flush) + boot_native_flush_L3(iset_ver, node_nbsr); + } +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + + E2K_CLEAR_CTPRS(); + __E2K_WAIT_ALL; + + NATIVE_WRITE_MMU_CR(mmu_cr); + __E2K_WAIT_ALL; + + /* + * The following call completes switching into the virtual execution. + * Now full virtual addressing support is enable. Should not be + * return here from this function. + */ + +#ifdef CONFIG_SMP + boot_init_sequel_func(bsp, cpuid, cpus_to_sync); +#else /* ! CONFIG_SMP */ + boot_init_sequel_func(bsp, 0, 0); +#endif /* CONFIG_SMP */ +} +noinline void __init_recv +boot_native_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + boot_native_kernel_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +/* + * The funcrtion is fictitious, only to determine the size of previous function. + * The function should follow previous function 'boot_switch_to_virt()' + */ + +static void __init_recv +boot_native_switch_to_virt_end(void) +{ +} + +/* + * Map some necessary physical areas to the equal virtual addresses to + * switch kernel execution into the physical space to execution into the + * virtual space. + * Sometime after turn on TLB and translation virtual addresses to physical + * becomes inevitable, some kernel text and data should be accessed on old + * physical addresses, which will be treated now as virtual addresses. + */ + +void __init_recv +boot_native_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + e2k_addr_t area_base; + e2k_size_t area_size; + int ret; + + /* + * Map the function 'boot_native_switch_to_virt()' of kernel image + * 'text' segments. This function will make switching to virtual + * space. The first part of the function is executed into the + * physical space without any translation virtual addresses. + * But second part of one is executed into the equal virtual spce. + */ + + area_base = (e2k_addr_t)boot_vp_to_pp(&boot_native_switch_to_virt); + area_size = (e2k_size_t)boot_native_switch_to_virt_end - + (e2k_size_t)boot_native_switch_to_virt; + + ret = boot_map_to_equal_virt_area(area_base, area_size, + PAGE_KERNEL_SWITCHING_TEXT, TLB_KERNEL_SWITCHING_TEXT, + BOOT_E2K_EQUAL_MAP_PAGE_SIZE, ITLB_ACCESS_MASK, 0); + if (ret <= 0) { + BOOT_BUG("Could not map to equal virtual space the kernel " + "function 'boot_switch_to_virt()': base addr 0x%lx " + "size 0x%lx page size 0x%x", + area_base, area_size, BOOT_E2K_KERNEL_PAGE_SIZE); + } + boot_printk("The kernel function 'boot_switch_to_virt()' : " + "base addr 0x%lx size 0x%lx is mapped to %d equal " + "virtual page(s) page size 0x%lx\n", + area_base, area_size, ret, + (e2k_size_t)BOOT_E2K_KERNEL_PAGE_SIZE); + + /* + * Map the structure 'kernel_bootmem', which contains all boot-time + * memory info. + */ + + area_base = (e2k_addr_t)boot_kernel_bootmem; + area_size = sizeof(kernel_bootmem); + + ret = boot_map_to_equal_virt_area(area_base, area_size, + PAGE_KERNEL_SWITCHING_DATA, TLB_KERNEL_SWITCHING_DATA, + BOOT_E2K_EQUAL_MAP_PAGE_SIZE, ITLB_ACCESS_MASK, 0); + if (ret <= 0) { + BOOT_BUG("Could not map to equal virtual space the " + "structure 'kernel_bootmem': base addr 0x%lx " + "size 0x%lx page size 0x%x", + area_base, area_size, BOOT_E2K_KERNEL_PAGE_SIZE); + } + boot_printk("The kernel structure 'kernel_bootmem': base addr 0x%lx " + "size 0x%lx was mapped to %d equal virtual page(s) " + "page size 0x%lx\n", + area_base, area_size, ret, + (e2k_size_t)BOOT_E2K_KERNEL_PAGE_SIZE); + + /* + * Map the top of the kernel data stack to have access to some + * functions locals. + */ + + area_base = stack_top_addr - E2K_KERNEL_US_PAGE_SWITCHING_SIZE + + sizeof(long); + area_size = E2K_KERNEL_US_PAGE_SWITCHING_SIZE; + + ret = boot_map_to_equal_virt_area(area_base, area_size, + PAGE_KERNEL_SWITCHING_US_STACK, TLB_KERNEL_SWITCHING_US_STACK, + BOOT_E2K_EQUAL_MAP_PAGE_SIZE, ITLB_ACCESS_MASK, 0); + if (ret <= 0) { + BOOT_BUG("Could not map to equal virtual space the top of the kernel stack: base addr 0x%lx size 0x%lx page size 0x%x", + area_base, area_size, E2K_KERNEL_US_PAGE_SIZE); + } + boot_printk("The kernel top of the stack : " + "base addr 0x%lx size 0x%lx was mapped to %d equal virtual " + "page(s) page size 0x%lx\n", + area_base, area_size, ret, (e2k_size_t)E2K_KERNEL_US_PAGE_SIZE); +} + +static void boot_init_mmu_support(void) +{ + machdep_t *mach = &boot_machine; + e2k_core_mode_t core_mode; + + boot_mmu_pt_v6 = mach->mmu_pt_v6; + + if (mach->native_iset_ver < E2K_ISET_V6) { + boot_printk("MMU: old legacy Page Table entries format\n"); + return; + } + core_mode.CORE_MODE_reg = BOOT_READ_CORE_MODE_REG_VALUE(); + core_mode.CORE_MODE_sep_virt_space = 0; + if (mach->mmu_pt_v6) + core_mode.CORE_MODE_pt_v6 = 1; + else + core_mode.CORE_MODE_pt_v6 = 0; + BOOT_WRITE_CORE_MODE_REG_VALUE(core_mode.CORE_MODE_reg); + + core_mode.CORE_MODE_reg = BOOT_READ_CORE_MODE_REG_VALUE(); + if (core_mode.CORE_MODE_pt_v6) { + boot_printk("Set MMU Page Table entries format " + "to new V6 mode\n"); + } else { + boot_printk("Set MMU Page Table entries format " + "to old legacy mode\n"); + } + if (core_mode.CORE_MODE_sep_virt_space) { + boot_printk("Enable MMU Separate Page Tables mode\n"); + } else { + boot_printk("Disable MMU Separate Page Tables mode\n"); + } + boot_printk("CORE_MODE is set to: 0x%x\n", core_mode.CORE_MODE_reg); + + /* set flag of PT version at abstruct page table structure */ + boot_pgtable_struct_p->pt_v6 = mach->mmu_pt_v6; +} + +/* + * Control process of boot-time initialization of Virtual memory support. + * The main goal of the initialization is switching to further boot execution + * on virtual memory. + */ + +void __init +boot_mem_init(bool bsp, int cpuid, boot_info_t *boot_info, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + e2k_size_t pages_num; + + if (BOOT_IS_BSP(bsp)) { + + /* + * Probe the system memory and fill the structures + * 'nodes_phys_mem' of physical memory configuration. + */ + boot_probe_memory(boot_info); + boot_kernel_phys_base = (e2k_addr_t)boot_vp_to_pp(KERNEL_BASE); + boot_printk("The kernel image physical address is 0x%lx\n", + boot_kernel_phys_base); + + /* + * Create the physical memory pages maps to support + * simple boot-time memory allocator. + */ + pages_num = boot_create_physmem_maps(boot_info); + boot_printk("The physical memory size is 0x%lx " + "pages * 0x%x = 0x%lx bytes\n", + pages_num, PAGE_SIZE, pages_num * PAGE_SIZE); +#ifdef CONFIG_SMP + /* + * Bootstrap processor completed creation of simple + * boot-time memory allocator and all CPUs can start + * to reserve used physical memory + */ + boot_set_event(&boot_physmem_maps_ready); + } else { + + /* + * Other processors are waiting for completion of creation + * to start reservation of used memory by each CPU + */ + boot_wait_for_event(&boot_physmem_maps_ready); +#endif /* CONFIG_SMP */ + } + + /* + * Reserve the memory used now by boot-time initialization. + */ + boot_reserve_all_bootmem(bsp, boot_info); + + /* define MMU type and initial setup of MMU modes */ + boot_init_mmu_support(); + + /* + * SYNCHRONIZATION POINT #0.2 + * At this point all processors should complete reservation of + * used memory and all busy physical memory is known + * After synchronization any processor can remap reserved area + * from low to high physical memory range + */ + boot_sync_all_processors(); + + /* update common info about present physical memory */ + /* which can be changed after reserve & delete */ + if (!boot_has_high_memory(boot_info)) { + BOOT_SET_LOW_MEMORY_ENABLED(); + boot_printk("Nothing high memory on machine, so remapping " + "of low memory to high is impossible\n"); + } + if (BOOT_IS_BSP(bsp) && BOOT_LOW_MEMORY_ENABLED()) + boot_update_physmem_maps(boot_info); + + /* + * Remap the low memory to high addresses range, if need and possible. + */ + boot_remap_low_memory(bsp, boot_info); + + /* + * SYNCHRONIZATION POINT #0.3 + * At this point all changes in phys_banks busy_areas are completed. + */ + boot_sync_all_processors(); + + if (BOOT_IS_BSP(bsp)) + boot_expand_phys_banks_reserved_areas(); + + /* + * SYNCHRONIZATION POINT #0.4 + * At this point phys_banks busy_areas are expanded. + * After synchronization any processor can allocate needed physical + * memory. + */ + boot_sync_all_processors(); + +#ifdef CONFIG_NUMA + boot_node_duplicate_kernel(boot_info); + + /* + * SYNCHRONIZATION POINT for NUMA #0.5 + * At this point all nodes should complete creation of + * own copy of kernel image and page tables + */ + boot_sync_all_processors(); + + /* + * After synchronization all nodes should switch to duplicated + * kernel mode and can use own copy of kernel image and page tables + */ + boot_node_set_duplicated_mode(); + + /* + * SYNCHRONIZATION POINT for NUMA #0.6 + * At this point all nodes should complete switch to duplicated + * kernel image and page tables + */ + boot_sync_all_processors(); + + /* + * After synchronization all nodes run on duplicated image + * but if node has not own copy and use some other node copy then + * it need change kernel image base from -1 to base address of used + * node's image. Base address -1 was used to early detection nodes + * without duplicated image + */ + boot_node_set_kernel_base(); + + /* + * Now for NUMA mode we can set Trap Cellar pointer and MMU + * register to own copy of kernel image area on each node + * and reset Trap Counter register + */ + boot_set_MMU_TRAP_POINT(boot_kernel_trap_cellar); + + boot_printk("Kernel trap cellar set to physical " + "address 0x%lx MMU_TRAP_CELLAR_MAX_SIZE 0x%x " + "kernel_trap_cellar 0x%lx\n", + boot_kernel_trap_cellar, MMU_TRAP_CELLAR_MAX_SIZE, + BOOT_KERNEL_TRAP_CELLAR); +#endif /* CONFIG_NUMA */ + +#ifndef CONFIG_NUMA + if (BOOT_IS_BSP(bsp)) { + + /* + * Init the boot-time support of physical areas mapping + * to virtual space + */ + + boot_init_mapping(); + +#ifdef CONFIG_SMP + /* + * Bootstrap processor completed initialization of support + * of physical areas mapping to virtual space + */ + boot_set_event(&boot_mapping_ready); + } else { + + /* + * Other processors are waiting for completion of + * initialization to start mapping + */ + boot_wait_for_event(&boot_mapping_ready); +#endif /* CONFIG_SMP */ + } +#else /* CONFIG_NUMA */ + /* + * Init the boot-time support of physical areas mapping + * to virtual space on each node. + * A node has own page table and own mapping of some kernel objects + */ + boot_node_init_mapping(); + + /* + * SYNCHRONIZATION POINT #0.7 + * Waiting for all nodes init mapping before pgd sets on + * cpus of same node + */ + boot_sync_all_processors(); +#endif /* ! CONFIG_NUMA */ + + /* + * Map the kernel memory areas used at boot-time + * into the virtual space. + */ + boot_map_all_bootmem(bsp, boot_info); + + /* + * SYNCHRONIZATION POINT #1 + * At this point all processors should complete map all + * used memory for each CPU and general (shared) memory + * After synchronization page table is completely constructed for + * switching on virtual addresses. + */ + boot_sync_all_processors(); + + /* + * Map some necessary physical areas to the equal virtual addresses to + * switch kernel execution into the physical space to execution + * into the virtual space. + */ + boot_map_needful_to_equal_virt_area( + BOOT_READ_USD_LO_REG().USD_lo_base); + + /* + * SYNCHRONIZATION POINT #2 + * At this point all processors maped necessary physical areas + * to the equal virtual addresses and bootstrap processor maped + * general (shared) physical areas. + * After synchronization all procxessors are ready to switching + */ + boot_sync_all_processors(); + + /* + * Switch kernel execution into the physical space to execution + * into the virtual space. All following initializations will be + * control by 'boot_init_sequel_func()' function. + * Should not be return here from this function. + */ + boot_kernel_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +/* + * Control process of termination of boot-time initialization of Virtual memory + * support. The function terminates this process and is executed on virtual + * memory. + */ + +void __init +init_mem_term(int cpuid) +{ + + /* + * Flush the temporarly mapped areas to virtual space. + */ + + init_clear_temporary_ptes(ALL_TLB_ACCESS_MASK, cpuid); +} diff --git a/arch/e2k/p2v/boot_map.c b/arch/e2k/p2v/boot_map.c new file mode 100644 index 000000000000..a03abd35770c --- /dev/null +++ b/arch/e2k/p2v/boot_map.c @@ -0,0 +1,1611 @@ +/* $Id: boot_map.c,v 1.24 2009/01/22 17:04:21 atic Exp $ + * + * Boot-time support of mappings physical memory areas into + * the kernel virtual space. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include + +#include +#include +#include +#include +#include + +#undef DEBUG_BOOT_MAP_MODE +#undef boot_printk +#define DEBUG_BOOT_MAP_MODE 0 /* Boot map process */ +#define boot_printk if (DEBUG_BOOT_MAP_MODE) do_boot_printk + +#undef DEBUG_MAP_EQUAL_MODE +#undef DebugME +#define DEBUG_MAP_EQUAL_MODE 0 /* Map equal addresses */ +#define DebugME if (DEBUG_MAP_EQUAL_MODE) do_boot_printk + +#undef DEBUG_NUMA_MODE +#undef DebugNUMA +#define DEBUG_NUMA_MODE 0 /* Boot NUMA */ +#define DebugNUMA if (DEBUG_NUMA_MODE) do_boot_printk + +#undef DEBUG_MAP_AREA_MODE +#undef DebugMA +#define DEBUG_MAP_AREA_MODE 0 /* Map physical area to virtual */ +#define DebugMA if (DEBUG_MAP_AREA_MODE) do_boot_printk + +#undef DEBUG_MAP_VERBOSE_MODE +#undef DebugMAV +#define DEBUG_MAP_VERBOSE_MODE 0 /* Verbose mapping of physical area */ +#define DebugMAV if (DEBUG_MAP_VERBOSE_MODE) do_boot_printk + +/* + * The structure to simulate TLB contents + */ +#ifndef CONFIG_SMP +e2k_tlb_t __initdata_recv dtlb_contents; +e2k_tlb_t __initdata_recv itlb_contents; +#else +e2k_tlb_t __initdata_recv dtlb_contents[NR_CPUS]; +e2k_tlb_t __initdata_recv itlb_contents[NR_CPUS]; +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_SMP +#ifndef CONFIG_NUMA +static boot_spinlock_t boot_page_table_lock = __BOOT_SPIN_LOCK_UNLOCKED; +#define boot_numa_node_spin_lock(lock) boot_spin_lock(&(lock)) +#define boot_numa_node_spin_unlock(lock) boot_spin_unlock(&(lock)) +#define init_numa_node_spin_lock(lock) init_spin_lock(&(lock)) +#define init_numa_node_spin_unlock(lock) init_spin_unlock(&(lock)) +#else /* CONFIG_NUMA */ +static boot_spinlock_t __initdata_recv boot_page_table_lock[MAX_NUMNODES] = { + [ 0 ... (MAX_NUMNODES-1) ] = __BOOT_SPIN_LOCK_UNLOCKED +}; +#define boot_numa_node_spin_lock(lock) boot_dup_node_spin_lock(lock) +#define boot_numa_node_spin_unlock(lock) boot_dup_node_spin_unlock(lock) +#define init_numa_node_spin_lock(lock) init_dup_node_spin_lock(lock) +#define init_numa_node_spin_unlock(lock) init_dup_node_spin_unlock(lock) +#endif /* ! CONFIG_NUMA */ +#else /* ! CONFIG_SMP */ +#define boot_page_table_lock +#define boot_numa_node_spin_lock(lock) +#define boot_numa_node_spin_unlock(lock) +#define init_numa_node_spin_lock(lock) +#define init_numa_node_spin_unlock(lock) +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_NUMA +static boot_spinlock_t __initdata boot_node_init_map_lock[MAX_NUMNODES] = { + [ 0 ... (MAX_NUMNODES-1) ] = __BOOT_SPIN_LOCK_UNLOCKED +}; +static int __initdata node_map_inited[MAX_NUMNODES] = { 0 }; +#define boot_node_map_inited \ + boot_get_vo_value(node_map_inited[boot_numa_node_id()]) +#endif /* CONFIG_NUMA */ + +static inline int +boot_get_pt_level_id(const pt_level_t *pt_level) +{ + /* now PT level is number of the level */ + return get_pt_level_id(pt_level); +} + +static inline bool +boot_is_pt_level_of_page_size(e2k_size_t page_size, + const pt_struct_t *pt_struct, + int level) +{ + return pt_struct->levels[level].page_size == page_size; +} + +static inline const pt_level_t * +boot_find_pt_level_of_page_size(e2k_size_t page_size) +{ + const pt_struct_t *pt_struct = boot_pgtable_struct_p; + int level; + + for (level = pt_struct->levels_num; level > 0; level--) { + if (boot_is_pt_level_of_page_size(page_size, pt_struct, level)) + return &pt_struct->levels[level]; + } + return NULL; +} + +static inline bool +boot_is_huge_pte(pgprot_t *ptp, const pt_struct_t *pt_struct, int level) +{ + const pt_level_t *pt_level = &pt_struct->levels[level]; + pte_t pte = *(pte_t *)ptp; + + if (pt_level->is_huge) + return pte_huge(pte); + return false; +} + +static inline pte_t * +boot_get_huge_pte(e2k_addr_t virt_addr, pgprot_t *ptp, const pt_level_t *pt_level) +{ + if (unlikely(!pt_level->is_huge)) { + BOOT_BUG("Page table level #%d cannot contain page " + "table entries (pte)\n", + boot_get_pt_level_id(pt_level)); + return (pte_t *)-1; + } + if (likely(pt_level->boot_get_huge_pte == NULL)) + return (pte_t *)ptp; + return pt_level->boot_get_huge_pte(virt_addr, ptp); +} + +static inline bool +init_is_pt_level_of_page_size(e2k_size_t page_size, int level) +{ + return pgtable_struct.levels[level].page_size == page_size; +} + +static inline const pt_level_t * +init_find_pt_level_of_page_size(e2k_size_t page_size) +{ + int level; + + for (level = pgtable_struct.levels_num; level > 0; level--) { + if (init_is_pt_level_of_page_size(page_size, level)) + return &pgtable_struct.levels[level]; + } + return NULL; +} + +static inline pte_t * +init_get_huge_pte(e2k_addr_t virt_addr, pgprot_t *ptp, const pt_level_t *pt_level) +{ + if (unlikely(!pt_level->is_huge)) { + INIT_BUG("Page table level #%d cannot contain page " + "table entries (pte)\n", + get_pt_level_id(pt_level)); + return (pte_t *)-1; + } + if (likely(pt_level->init_get_huge_pte == NULL)) + return (pte_t *)ptp; + return pt_level->init_get_huge_pte(virt_addr, ptp); +} + +pte_t * __init_recv +boot_get_double_huge_pte(e2k_addr_t addr, pgprot_t *ptp) +{ + /* + * In this case virtual page occupied two sequential + * entries in page table directory level + */ + + /* first pte is always even */ + return (pte_t *)(((e2k_addr_t)ptp) & ~((sizeof(*ptp) * 2) - 1)); +} + +pte_t * __init_recv +boot_get_common_huge_pte(e2k_addr_t addr, pgprot_t *ptp) +{ + return (pte_t *)ptp; +} + +void __init_recv +boot_set_double_pte(e2k_addr_t addr, pte_t *ptep, pte_t pte, bool host_map) +{ + /* + * In this case virtual page occupied two sequential + * entries in page table directory level + * All two pte's (pmd's) should be set to identical + * entries + */ + DebugMAV("boot_set_double_pte() will set pte 0x%px to 0x%lx for " + "address 0x%lx %s mapping\n", + ptep, pte_val(pte), addr, + (host_map) ? "host" : "native"); + + /* first pte is always even */ + ptep = (pte_t *)(((e2k_addr_t)ptep) & ~((sizeof(*ptep) * 2) - 1)); + + boot_set_pte_kernel(addr, ptep, pte); + boot_set_pte_kernel(addr, ++ptep, pte); +} + +void __init_recv +boot_set_common_pte(e2k_addr_t addr, pte_t *ptep, pte_t pte, bool host_map) +{ + DebugMAV("boot_set_common_pte() will set pte 0x%px to 0x%lx for " + "address 0x%lx %s mapping\n", + ptep, pte_val(pte), addr, + (host_map) ? "host" : "native"); + boot_set_pte_kernel(addr, ptep, pte); +} + +static inline __init_recv void +boot_set_pte(e2k_addr_t addr, pte_t *ptep, pte_t pte, const pt_level_t *pt_level, + bool host_map) +{ + if (unlikely(!pt_level->is_pte && !pt_level->is_huge)) { + BOOT_BUG("Page table level #%d cannot contain page " + "table entries (pte)\n", + boot_get_pt_level_id(pt_level)); + } + if (pt_level->is_huge) + pte = pte_set_large_size(pte); + else + pte = pte_set_small_size(pte); + if (pt_level->boot_set_pte != NULL) { + pt_level->boot_set_pte(addr, ptep, pte, host_map); + } else { + boot_set_common_pte(addr, ptep, pte, host_map); + } +} + +pte_t * __init_recv +init_get_double_huge_pte(e2k_addr_t addr, pgprot_t *ptp) +{ + return boot_get_double_huge_pte(addr, ptp); +} + +pte_t * __init_recv +init_get_common_huge_pte(e2k_addr_t addr, pgprot_t *ptp) +{ + return boot_get_common_huge_pte(addr, ptp); +} + +/* + * Get virtual address entry in the third-level page table. + */ + +static pte_t * __init_recv +init_get_pte(e2k_addr_t virt_addr, const pt_level_t *pt_level) +{ + e2k_size_t page_size = pt_level->page_size; + int level; + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + /* + * Get entry in the 4-th root-level page table + */ + DebugMAV("init_get_pte() started for virt addr 0x%lx page size 0x%lx\n", + virt_addr, page_size); + + level = pgtable_struct.levels_num; + pgdp = pgd_offset_k(virt_addr); + DebugMAV("init_get_pte() pgd pointer is %px == 0x%lx\n", + pgdp, pgd_val(*pgdp)); + if (init_is_pt_level_of_page_size(page_size, level)) + return init_get_huge_pte(virt_addr, (pgprot_t *)pgdp, pt_level); + if (pgd_none(*pgdp)) + return NULL; + + /* + * Get entry in the 3-th level (high part of middle) page table + */ + level--; + pudp = pud_offset(pgdp, virt_addr); + if (init_is_pt_level_of_page_size(page_size, level)) + return init_get_huge_pte(virt_addr, (pgprot_t *)pudp, pt_level); + if (pud_none(*pudp)) + return NULL; + + /* + * Get entry in the 2-nd level (low part of middle) page table + */ + level--; + pmdp = pmd_offset(pudp, virt_addr); + if (init_is_pt_level_of_page_size(page_size, level)) + return init_get_huge_pte(virt_addr, (pgprot_t *)pmdp, pt_level); + if (pmd_none(*pmdp)) + return NULL; + + /* + * Get entry in the 1-st-level page table + */ + level--; + ptep = pte_offset_kernel(pmdp, virt_addr); + DebugMAV("init_get_pte() pte pointer is %px == 0x%lx\n", + ptep, pte_val(*ptep)); + return ptep; +} + +void __init_recv +init_double_pte_clear(pte_t *ptep) +{ + /* + * In this case virtual page occupied two sequential + * entries in page table directory level + * All two pte's (ptd's) should be cleared + */ + /* first pte is always even */ + ptep = (pte_t *)(((e2k_addr_t)ptep) & ~((sizeof(*ptep) * 2) - 1)); + + pte_clear_kernel(ptep); + pte_clear_kernel(++ptep); +} +void __init_recv +init_common_pte_clear(pte_t *ptep) +{ + pte_clear_kernel(ptep); +} + +static void inline +init_pte_clear(pte_t *ptep, const pt_level_t *pt_level) +{ + if (unlikely(!pt_level->is_pte && !pt_level->is_huge)) { + INIT_BUG("Page table level #%d cannot contain page " + "table entries (pte)\n", + get_pt_level_id(pt_level)); + } + if (pt_level->init_pte_clear != NULL) { + pt_level->init_pte_clear(ptep); + } else { + init_common_pte_clear(ptep); + } +} + +#ifdef CONFIG_NUMA +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + +static void __init_recv +boot_all_cpus_pgd_set(int nid, int pgd_index, pgd_t pgd) +{ + pgd_t *pgdp; + int cpu; + cpumask_t node_cpus; + + node_cpus = boot_node_to_cpumask(nid); + DebugNUMA("boot_all_cpus_pgd_set() node #%d online cpu mask 0x%lx\n", + nid, cpumask_test_cpu(0, &node_cpus)); + boot_for_each_online_cpu_of_node(nid, cpu, node_cpus) { + pgdp = boot_node_cpu_pg_dir(nid, cpu); + DebugNUMA("boot_all_cpus_pgd_set() set own node CPU #%d pgd " + "entry 0x%lx to pud == 0x%lx\n", + cpu, &pgdp[pgd_index], pgd_val(pgd)); + pgdp[pgd_index] = pgd; + } +} + +/* + * Set specified pgd entry to point to next-level page table PUD + * Need populate the pgd entry into follow root page tables: + * - all CPUs of the current node; + * - all CPUs of other nodes which have not own copy of kernel image + * (DUP KERNEL) and use duplicated kernel of this node + */ +void __init_recv +boot_pgd_set(pgd_t *my_pgdp, pud_t *pudp, int user) +{ + pgd_t pgd; + int pgd_index = pgd_to_index(my_pgdp); + int my_node = boot_numa_node_id(); + int my_cpu = boot_smp_processor_id(); + int dup_node; + int node; + + DebugNUMA("boot_pgd_set_k() set own pgd entry 0x%lx to pud 0x%lx\n", + my_pgdp, pudp); + if (user) { + pgd = boot_mk_pgd_phys_u(pudp); + } else { + pgd = boot_mk_pgd_phys_k(pudp); + } + *my_pgdp = pgd; + if (!BOOT_NODE_THERE_IS_DUP_KERNEL()) { + DebugNUMA("boot_pgd_set_k() has not duplicated kernel, so " + "all CPUs use BS root PT\n"); + return; + } + + dup_node = boot_node_dup_kernel_nid(my_node); + if (dup_node == my_node) { + if (MMU_IS_SEPARATE_PT()) { + BOOT_BUG_ON(my_pgdp != &boot_node_root_pt[pgd_index], + "pgd should be on current the node\n"); + boot_node_root_pt[pgd_index] = pgd; + } else { + BOOT_BUG_ON(my_pgdp != + &boot_cpu_pg_dir(my_cpu)[pgd_index], + "pgd should be on current the node\n"); + boot_all_cpus_pgd_set(my_node, pgd_index, pgd); + } + } + if (BOOT_NODE_DUP_KERNEL_NUM() >= boot_phys_nodes_num) { + DebugNUMA("boot_pgd_set_k() all %d nodes have duplicated " + "kernel so own root PT\n", + BOOT_NODE_DUP_KERNEL_NUM()); + return; + } + if (dup_node != my_node) { + if (MMU_IS_SEPARATE_PT()) { + BOOT_BUG_ON(my_pgdp == &boot_node_root_pt[pgd_index], + "pgd cannot be on the node\n"); + boot_the_node_root_pt(dup_node)[pgd_index] = pgd; + } else { + BOOT_BUG_ON(my_pgdp == + &boot_cpu_pg_dir(my_cpu)[pgd_index], + "pgd cannot be on the node\n"); + boot_all_cpus_pgd_set(dup_node, pgd_index, pgd); + } + } + boot_for_each_node_has_not_dup_kernel(node) { + DebugNUMA("boot_pgd_set_k() check other node #%d\n", + node); + if (node == my_node) + continue; + if (boot_node_dup_kernel_nid(node) != dup_node) + continue; + if (MMU_IS_SEPARATE_PT()) { + boot_the_node_root_pt(node)[pgd_index] = pgd; + } else { + boot_all_cpus_pgd_set(node, pgd_index, pgd); + } + } +} +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +/* + * Set specified kernel pgd entry to point to next-level page table PUD + * Need populate the pgd entry into follow root page tables: + * - PT of the specified node, if the node has duplicated kernel; + * - PT of node on which the node is duplicated + * - PTs of all other nodes which have not own copy of kernel image + * (DUP KERNEL) and use duplicated kernel of this node or + * are duplicated on the same node as this node + */ +void __init_recv +boot_pgd_set(pgd_t *my_pgdp, pud_t *pudp, int user) +{ + pgd_t pgd; + int pgd_index = pgd_to_index(my_pgdp); + int my_node = boot_numa_node_id(); + int dup_node; + int node; + + DebugNUMA("boot_pgd_set_k() set own pgd entry 0x%lx to pud 0x%lx\n", + my_pgdp, pudp); + BOOT_BUG_ON(!MMU_IS_SEPARATE_PT(), + "function can be call only for separate PT mode\n") + if (user) { + pgd = boot_mk_pgd_phys_u(pudp); + } else { + pgd = boot_mk_pgd_phys_k(pudp); + } + *my_pgdp = pgd; + if (!BOOT_NODE_THERE_IS_DUP_KERNEL()) { + DebugNUMA("boot_pgd_set_k() has not duplicated kernel, so " + "all CPUs use BS root PT\n"); + return; + } + + dup_node = boot_node_dup_kernel_nid(my_node); + if (dup_node == my_node) { + BOOT_BUG_ON(my_pgdp != &boot_node_root_pt[pgd_index], + "pgd should be on current the node\n"); + boot_node_root_pt[pgd_index] = pgd; + } + if (BOOT_NODE_DUP_KERNEL_NUM() >= boot_phys_nodes_num) { + DebugNUMA("boot_pgd_set_k() all %d nodes have duplicated " + "kernel so own root PT\n", + BOOT_NODE_DUP_KERNEL_NUM()); + return; + } + if (dup_node != my_node) { + BOOT_BUG_ON(my_pgdp == &boot_node_root_pt[pgd_index], + "pgd cannot be on the node\n"); + boot_the_node_root_pt(dup_node)[pgd_index] = pgd; + } + boot_for_each_node_has_not_dup_kernel(node) { + DebugNUMA("boot_pgd_set_k() check other node #%d\n", + node); + if (node == my_node) + continue; + if (boot_node_dup_kernel_nid(node) != dup_node) + continue; + boot_the_node_root_pt(node)[pgd_index] = pgd; + } +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#endif /* CONFIG_NUMA */ + +/* + * Allocate memory for first-level (high part of middle) page table directory + */ +static pud_t * __init_recv +boot_pud_alloc(void) +{ + pud_t *pudp; + int entry; + + pudp = (pud_t *)boot_alloc_phys_mem(PUD_TABLE_SIZE, PAGE_SIZE, + boot_time_data_mem_type); + if (pudp == (pud_t *)-1) { + BOOT_BUG("Could not allocate memory for first-level page table (PUD)"); + return (pud_t *)-1; + } + for (entry = 0; entry < PTRS_PER_PUD; entry++) { + pud_clear_kernel(&pudp[entry]); + } + return pudp; +} + +/* + * Allocate memory for second-level (low part of middle) page table directory + */ +static pmd_t * __init_recv +boot_pmd_alloc(void) +{ + pmd_t *pmdp; + int entry; + + pmdp = (pmd_t *)boot_alloc_phys_mem(PMD_TABLE_SIZE, PAGE_SIZE, + boot_time_data_mem_type); + if (pmdp == (pmd_t *)-1) { + BOOT_BUG("Could not allocate memory for second-level page table (PMD)"); + return (pmd_t *)-1; + } + for (entry = 0; entry < PTRS_PER_PMD; entry++) { + pmd_clear_kernel(&pmdp[entry]); + + } + return pmdp; +} + +/* + * Allocate memory for third-level page table + */ +static pte_t * __init_recv +boot_pte_alloc(void) +{ + pte_t *ptep; + int entry; + + ptep = (pte_t *)boot_alloc_phys_mem(PTE_TABLE_SIZE, PAGE_SIZE, + boot_time_data_mem_type); + if (ptep == (pte_t *)-1) { + BOOT_BUG("Could not allocate memory for third-level page table (PTE)"); + return (pte_t *)-1; + } + for (entry = 0; entry < PTRS_PER_PTE; entry++) { + pte_clear_kernel(&ptep[entry]); + + } + return ptep; +} + +/* + * Get virtual address entry in the third-level page table. + */ + +static pte_t * __init_recv +boot_get_pte(e2k_addr_t virt_addr, const pt_level_t *pt_level, int user, int va) +{ + const pt_struct_t *pt_struct = boot_pgtable_struct_p; + e2k_size_t page_size = pt_level->page_size; + int level; + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + /* + * Get entry in the 4-th root-level page table + */ + DebugMAV("boot_get_pte() started for virt addr 0x%lx page size 0x%lx\n", + virt_addr, page_size); + + level = pt_struct->levels_num; + pgdp = boot_pgd_offset_k(virt_addr); + DebugMAV("boot_get_pte() pgd pointer is %px == 0x%lx\n", + pgdp, pgd_val(*pgdp)); + if (boot_is_pt_level_of_page_size(page_size, pt_struct, level)) + return boot_get_huge_pte(virt_addr, (pgprot_t *)pgdp, pt_level); + if (pgd_none(*pgdp)) { + if (va) { + BOOT_BUG("Could not find PGD for virtual address 0x%lx " + "while reset", + virt_addr); + return (pte_t *)-1; + + } + pudp = boot_pud_alloc(); + if (pudp == (pud_t *)-1) { + BOOT_BUG("Could not allocate memory for PUD to map " + "virtual address 0x%lx", + virt_addr); + return (pte_t *)-1; + } + if (user) { + boot_pgd_set_u(pgdp, pudp); + } else { + boot_pgd_set_k(pgdp, pudp); + } + } else if (boot_is_huge_pte((pgprot_t *)pgdp, pt_struct, level)) { + BOOT_BUG("Page table level #%d PGD 0x%lx contains already pte " + "0x%lx of other page size not 0x%lx", + level, pgdp, pgd_val(*pgdp), page_size); + return (pte_t *)-1; + } + + /* + * Get entry in the 3-th level (high part of middle) page table + */ + level--; + pudp = boot_pud_offset(pgdp, virt_addr); + if (boot_is_pt_level_of_page_size(page_size, pt_struct, level)) + return boot_get_huge_pte(virt_addr, (pgprot_t *)pudp, pt_level); + if (pud_none(*pudp)) { + if (va) { + BOOT_BUG("Could not find PUD for virtual address 0x%lx " + "while reset", + virt_addr); + return (pte_t *)-1; + + } + pmdp = boot_pmd_alloc(); + if (pmdp == (pmd_t *)-1) { + BOOT_BUG("Could not allocate memory for PMD to map " + "virtual address 0x%lx", + virt_addr); + return (pte_t *)-1; + } + if (user) { + boot_pud_set_u(pudp, pmdp); + } else { + boot_pud_set_k(pudp, pmdp); + } + } else if (boot_is_huge_pte((pgprot_t *)pudp, pt_struct, level)) { + BOOT_BUG("Page table level #%d PUD 0x%lx contains already pte " + "0x%lx of other page size not 0x%lx", + level, pudp, pud_val(*pudp), page_size); + return (pte_t *)-1; + } + + /* + * Get entry in the 2-nd level (low part of middle) page table + */ + level--; + pmdp = boot_pmd_offset(pudp, virt_addr); + if (boot_is_pt_level_of_page_size(page_size, pt_struct, level)) + return boot_get_huge_pte(virt_addr, (pgprot_t *)pmdp, pt_level); + if (pmd_none(*pmdp)) { + if (va) { + BOOT_BUG("Could not find PMD for virtual address 0x%lx " + "while reset", + virt_addr); + return (pte_t *)-1; + + } + ptep = boot_pte_alloc(); + if (ptep == (pte_t *)-1) { + BOOT_BUG("Could not allocate memory for PTE to map " + "virtual address 0x%lx", + virt_addr); + return (pte_t *)-1; + } + if (user) { + boot_pmd_set_u(pmdp, ptep); + } else { + boot_pmd_set_k(pmdp, ptep); + } + } else if (boot_is_huge_pte((pgprot_t *)pmdp, pt_struct, level)) { + BOOT_BUG("Page table level #%d PMD 0x%lx contains already pte " + "0x%lx of other page size not 0x%lx", + level, pmdp, pmd_val(*pmdp), page_size); + return (pte_t *)-1; + } + + /* + * Get entry in the 1-st-level page table + */ + level--; + ptep = boot_pte_offset(pmdp, virt_addr); + DebugMAV("boot_get_pte() pte pointer is %px == 0x%lx\n", + ptep, pte_val(*ptep)); + return ptep; +} + +/* + * Init. root-level page table directory + * All page tables is virtually mapped into the same virtual space as kernel + * Virtually mapped linear page table base address is passed as argument. + * The entry conforming to root page table is set to itself. + */ +static void __init +boot_pgd_init(pgd_t *pgdp, e2k_addr_t vmlpt_base) +{ + int entry; + int root_pt_index; + + for (entry = 0; entry < PTRS_PER_PGD; entry ++) { + pgd_clear_kernel(&pgdp[entry]); + } + root_pt_index = pgd_index(vmlpt_base); + boot_vmlpt_pgd_set(&pgdp[root_pt_index], pgdp); +} + +/* + * Init. TLB structure to simulate it contents + */ +static void __init +boot_tlb_contents_simul_init(e2k_tlb_t *tlb) +{ + int line; + int set; + + for (line = 0; line < BOOT_NATIVE_TLB_LINES_NUM; line++) { + for (set = 0; set < BOOT_NATIVE_TLB_SETS_NUM; set++) { + tlb->lines[line].sets[set].virt_addr = 0; + tlb->lines[line].sets[set].valid_bit = 0; + } + tlb->lines[line].sets_num = 0; + } + tlb->entries_num = 0; +} + +/* + * Init. TLB structures (DTLB & ITLB) to simulate it contents + */ +#ifndef CONFIG_SMP +static void __init +boot_all_tlb_contents_simul_init(void) +{ + boot_tlb_contents_simul_init(boot_dtlb_contents); + boot_tlb_contents_simul_init(boot_itlb_contents); +} +#else /* CONFIG_SMP */ +static void __init +boot_cpu_tlb_contents_simul_init(int cpuid) +{ + boot_tlb_contents_simul_init(boot_vp_to_pp(&dtlb_contents[cpuid])); + boot_tlb_contents_simul_init(boot_vp_to_pp(&itlb_contents[cpuid])); +} + +#ifndef CONFIG_NUMA +static void __init +boot_all_tlb_contents_simul_init(void) +{ + int cpuid; + + for (cpuid = 0; cpuid < NR_CPUS; cpuid++) { + if (!boot_phys_cpu_present(cpuid)) + continue; + boot_cpu_tlb_contents_simul_init(cpuid); + } +} +#endif /* CONFIG_NUMA */ +#endif /* ! CONFIG_SMP */ + +/* + * Initialization of boot-time support of physical areas mapping + * to virtual space. + */ + +#ifndef CONFIG_NUMA +void __init +boot_init_mapping(void) +{ + boot_pgd_init(boot_root_pt, KERNEL_VPTB_BASE_ADDR); + boot_all_tlb_contents_simul_init(); +} +#else /* CONFIG_NUMA */ +static void __init +boot_node_one_cpu_init_mapping(void) +{ + if (BOOT_TEST_AND_SET_NODE_LOCK(boot_node_init_map_lock, + boot_node_map_inited)) { + boot_cpu_tlb_contents_simul_init(boot_smp_processor_id()); + DebugNUMA("boot_node_init_mapping() init mapping " + "%s on node #%d CPU #%d\n", + (boot_node_map_inited) ? "completed already" + : + "no memory", + boot_numa_node_id(), boot_smp_processor_id()); + return; + } + if (!boot_node_has_dup_kernel()) { + goto no_init_mapping; + } + boot_pgd_init(boot_node_root_pt, KERNEL_VPTB_BASE_ADDR); + DebugNUMA("boot_node_init_mapping() init mapping on node #%d CPU #%d " + "root PT 0x%lx\n", + boot_numa_node_id(), boot_smp_processor_id(), + boot_node_root_pt); +no_init_mapping: + boot_cpu_tlb_contents_simul_init(boot_smp_processor_id()); + BOOT_NODE_UNLOCK(boot_node_init_map_lock, boot_node_map_inited); +} +void __init +boot_node_init_mapping(void) +{ + if (MMU_IS_SEPARATE_PT()) { + boot_node_one_cpu_init_mapping(); + } else { +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + boot_node_one_cpu_init_mapping(); +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + if (!BOOT_NODE_THERE_IS_DUP_KERNEL() && !BOOT_IS_BS_NODE) { + DebugNUMA("boot_node_init_mapping() will use " + "BS root PT 0x%lx\n", + boot_cpu_kernel_root_pt); + return; + } + boot_pgd_init(boot_cpu_kernel_root_pt, KERNEL_VPTB_BASE_ADDR); + DebugNUMA("boot_node_init_mapping() init mapping on node #%d " + "CPU #%d root PT 0x%lx\n", + boot_numa_node_id(), boot_smp_processor_id(), + boot_cpu_kernel_root_pt); + boot_cpu_tlb_contents_simul_init(boot_smp_processor_id()); +#endif /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + } +} +#endif /* ! CONFIG_NUMA */ + +/* + * Clear PTEs of kernel virtual page in the fourth-level page table. + */ + +static int __init_recv +init_clear_ptes(e2k_addr_t virt_addr, bool ignore_absence, int pt_level_id) +{ + const pt_level_t *pt_level; + pte_t *ptep; + + DebugME("init_clear_ptes() started for " + "virt addr 0x%lx page table level id %d\n", + virt_addr, pt_level_id); + + pt_level = get_pt_level_on_id(pt_level_id); + + /* + * Clear entry in the third-level page table + */ + init_numa_node_spin_lock(boot_page_table_lock); + ptep = init_get_pte(virt_addr, pt_level); + if (ptep == NULL || pte_none(*ptep)) { + if (!ignore_absence) { + init_numa_node_spin_unlock(boot_page_table_lock); + INIT_BUG("Third level PTE[0x%lx] of virtual" + "address 0x%lx is absent", + (long)ptep, virt_addr); + return (1); + } + DebugME("PTE[0x%lx} of virt addr 0x%lx is already clear\n", + (long)ptep, virt_addr); + } else { + init_pte_clear(ptep, pt_level); + DebugME("clear PTE[0x%lx] of virt addr 0x%lx\n", + (long)ptep, virt_addr); + } + init_numa_node_spin_unlock(boot_page_table_lock); + + return 0; +} + +/* + * Map the physical area to the kernel virtual space. + * Function return number of mapped pages or 0, if error was detected, + * for example if some of the pages is already mapped + */ + +long __init_recv +boot_do_map_phys_area(e2k_addr_t phys_area_addr, e2k_size_t phys_area_size, + e2k_addr_t area_virt_addr, pgprot_t prot_flags, + const pt_level_t *pt_level, + bool ignore_busy, bool host_map) +{ + pte_t pte; + pgprot_t prot = prot_flags; + pte_t *ptep; + e2k_addr_t phys_addr; + e2k_addr_t virt_addr; + e2k_size_t page_size; + int pages_num; + int page; + + page_size = pt_level->page_size; + if (phys_area_addr == (e2k_addr_t)-1) { + phys_addr = 0; + pages_num = (_PAGE_ALIGN_DOWN(area_virt_addr + phys_area_size, + page_size) - area_virt_addr) / page_size; + prot = pgprot_present_flag_reset(prot); + } else { + if ((phys_area_addr & (page_size - 1)) != 0) { + BOOT_BUG("Physical adress 0x%lx is not page size 0x%lx " + "aligned", + phys_area_addr, page_size); + return -EINVAL; + } + phys_addr = _PAGE_ALIGN_UP(phys_area_addr, page_size); + pages_num = (_PAGE_ALIGN_DOWN(phys_area_addr + phys_area_size, + page_size) - phys_addr) / page_size; + } + if ((area_virt_addr & (page_size - 1)) != 0) { + BOOT_BUG("Virtual adress 0x%lx is not page size 0x%lx aligned", + area_virt_addr, page_size); + return -EINVAL; + } + virt_addr = area_virt_addr; + DebugMA("boot_map_phys_area() will map from phys addr 0x%lx, pages " + "num 0x%x to virtual base 0x%lx\n", + phys_addr, pages_num, virt_addr); + + boot_numa_node_spin_lock(boot_page_table_lock); + for (page = 0; page < pages_num; page++) { + ptep = boot_get_pte(virt_addr, pt_level, + 0, /* user ? */ + 0 /* va ? */); + if (ptep == (pte_t *)-1) { + boot_numa_node_spin_unlock(boot_page_table_lock); + BOOT_BUG("Could not get PTE pointer to map virtual " + "address 0x%lx", + virt_addr); + return 0; + } + if (!pte_none(*ptep)) { + DebugMA("boot_map_phys_area() pte %px == 0x%lx is not " + "empty\n", + ptep, pte_val(*ptep)); + if (!ignore_busy) { + boot_numa_node_spin_unlock( + boot_page_table_lock); + boot_printk(" pte:%px pte_Val: 0x%px\n", + ptep, pte_val(*ptep)); + BOOT_BUG("The PTE entry is not empty to map " + "virtual address 0x%lx", + virt_addr); + } + pte = pte_restrict_prot(*ptep, prot); + pte = pte_reduce_prot(pte, prot); + } else if (pgprot_present(prot)) { + pte = mk_pte_phys(phys_addr, prot); + } else { + pte = mk_not_present_pte(prot); + } + boot_set_pte(virt_addr, ptep, pte, pt_level, host_map); + if (phys_area_addr != (e2k_addr_t)-1) + phys_addr += page_size; + virt_addr += page_size; + } + boot_numa_node_spin_unlock(boot_page_table_lock); + return page; +} + +long __init_recv +boot_map_phys_area(e2k_addr_t virt_phys_area_addr, e2k_size_t phys_area_size, + e2k_addr_t area_virt_addr, pgprot_t prot_flags, + e2k_size_t max_page_size, bool ignore_busy, bool host_map) +{ + const pt_level_t *pt_level; + e2k_addr_t phys_area_addr; + + if (virt_phys_area_addr == (e2k_addr_t) -1) { + phys_area_addr = virt_phys_area_addr; + } else { + phys_area_addr = boot_vpa_to_pa(virt_phys_area_addr); + + if (!IS_ALIGNED(phys_area_addr, max_page_size)) { + BOOT_WARNING("phys address 0x%lx isn't page size 0x%lx " + "aligned, so page size is reduced to 4K", + phys_area_addr, max_page_size); + max_page_size = PAGE_SIZE; + } + } + + if (!IS_ALIGNED(area_virt_addr, max_page_size)) { + BOOT_WARNING("virt address 0x%lx isn't page size 0x%lx " + "aligned, so page size is reduced to 4K", + phys_area_addr, max_page_size); + max_page_size = PAGE_SIZE; + } + + DebugMA("boot_map_phys_area() started for phys addr 0x%lx (0x%lx) " + "virt addr 0x%lx, size 0x%lx\n", + virt_phys_area_addr, phys_area_addr, area_virt_addr, + phys_area_size); + + pt_level = boot_find_pt_level_of_page_size(max_page_size); + if (pt_level == NULL) { + BOOT_BUG("Invalid page size 0x%lx", max_page_size); + return -EINVAL; + } + + return boot_do_map_phys_area(phys_area_addr, phys_area_size, + area_virt_addr, prot_flags, pt_level, + ignore_busy, host_map); +} + +/* + * Find the address in the list of pages currently mapped to the equal + * virtual space into the TLB + */ + +static int __init_recv +boot_find_equal_addr_tlb(e2k_addr_t address, e2k_tlb_t *tlb, + const pt_level_t *pt_level) +{ + int line; + e2k_tlb_line_t *tlb_line; + int set; + int entries = 0; + bool large_page_flag; + + if (pt_level->dtlb_type != COMMON_DTLB_TYPE) { + BOOT_BUG("Pages of page table level #%d cannot be placed " + "into common DTLB\n", + boot_get_pt_level_id(pt_level)); + } + large_page_flag = pt_level->is_huge; + + DebugME("boot_find_equal_addr_tlb() started for addr 0x%lx " + "TLB pointer is 0x%lx, large page flag is %d\n", + address, tlb, large_page_flag); + if (tlb->entries_num == 0) { + DebugME("boot_find_equal_addr_tlb() TLB is empty: " + "entries_num is %d, return (1)\n", tlb->entries_num); + return (1); + } + line = BOOT_VADDR_TO_TLB_LINE_NUM(address, large_page_flag); + tlb_line = &tlb->lines[line]; + DebugME("boot_find_equal_addr_tlb() TLB line is %d\n", line); + if (tlb_line->sets_num == 0) { + DebugME("boot_find_equal_addr_tlb() TLB line is empty: " + "sets_num is %d, return (1)\n", tlb_line->sets_num); + return (1); + } + for (set = 0; set < NATIVE_TLB_SETS_NUM; set++) { + if (!tlb_line->sets[set].valid_bit) { + DebugME("boot_find_equal_addr_tlb() TLB line set " + "#%d is not valid, continue\n", set); + continue; + } + entries ++; + DebugME("boot_find_equal_addr_tlb() entries is now %d " + "set #%d is valid, addr == 0x%lx\n", + entries, set, tlb_line->sets[set].virt_addr); + if (tlb_line->sets[set].virt_addr == address) { + DebugME("boot_find_equal_addr_tlb() set " + "#%d addr is the same, return(0)\n", set); + return (0); + } + if (entries >= tlb_line->sets_num) { + DebugME("boot_find_equal_addr_tlb() entries %d " + ">= tlb_line->sets_num %d, return(1)\n", + entries, tlb_line->sets_num); + return (1); + } + } + DebugME("boot_find_equal_addr_tlb() does not find virt addr 0x%lx " + "into TLB, return(1)\n", + address); + return (1); +} + +/* + * Find the address in the list of pages currently mapped to the equal + * virtual space. + * All these pages are located into two TLBs contents simulation structures + */ + +static int __init_recv +boot_find_equal_addr(e2k_addr_t address, int tlb_mask, + const pt_level_t *pt_level, int va) +{ + int mask = 0; + int ret; + + if (tlb_mask & ITLB_ACCESS_MASK) { + if (va) +#ifdef CONFIG_SMP + ret = boot_find_equal_addr_tlb(address, + &itlb_contents[boot_smp_processor_id()], + pt_level); +#else /* !CONFIG_SMP */ + ret = boot_find_equal_addr_tlb(address, + &itlb_contents, pt_level); +#endif /* CONFIG_SMP */ + else + ret = boot_find_equal_addr_tlb(address, + boot_itlb_contents, pt_level); + if (ret == 0) + mask |= ITLB_ACCESS_MASK; + } + if (tlb_mask & DTLB_ACCESS_MASK) { + if (va) +#ifdef CONFIG_SMP + ret = boot_find_equal_addr_tlb(address, + &dtlb_contents[boot_smp_processor_id()], + pt_level); +#else /* !CONFIG_SMP */ + ret = boot_find_equal_addr_tlb(address, + &dtlb_contents, pt_level); +#endif /* CONFIG_SMP */ + else + ret = boot_find_equal_addr_tlb(address, + boot_dtlb_contents, pt_level); + if (ret == 0) + mask |= DTLB_ACCESS_MASK; + } + return mask; +} + +/* + * Get the TLB empty entry + */ + +static int __init_recv +boot_get_tlb_empty_set(e2k_addr_t address, e2k_tlb_t *tlb, + const pt_level_t *pt_level) +{ + int line; + e2k_tlb_line_t *tlb_line; + int set; + bool large_page_flag; + + if (pt_level->dtlb_type != COMMON_DTLB_TYPE) { + BOOT_BUG("Pages of page table level #%d cannot be placed " + "into common DTLB\n", + boot_get_pt_level_id(pt_level)); + } + large_page_flag = pt_level->is_huge; + + DebugME("boot_get_tlb_empty_set() started for addr 0x%lx " + "large page flag is %d\n", + address, large_page_flag); + line = BOOT_VADDR_TO_TLB_LINE_NUM(address, large_page_flag); + tlb_line = &tlb->lines[line]; + DebugME("boot_get_tlb_empty_set() TLB line is %d occupied sets " + "num is %d\n", + line, tlb_line->sets_num); + if (tlb_line->sets_num >= NATIVE_TLB_SETS_NUM) + return (-1); + for (set = 0; set < NATIVE_TLB_SETS_NUM; set++) { + if (tlb_line->sets[set].virt_addr == address) { + DebugME("boot_get_tlb_empty_set() TLB line #%d set " + "#%d was already occupied by the specified " + "addr 0x%lx\n", + line, set, address); + } + if (tlb_line->sets[set].valid_bit) { + DebugME("boot_get_tlb_empty_set() TLB line #%d " + "set #%d was already occupied by addr 0x%lx\n", + line, set, tlb_line->sets[set].virt_addr); + continue; + } + if (large_page_flag && set != NATIVE_TLB_LARGE_PAGE_SET_NO) { + DebugME("boot_get_tlb_empty_set() TLB line #%d " + "set #%d cannot be used for large page\n", + line, set); + continue; + } + tlb_line->sets[set].virt_addr = address; + tlb_line->sets[set].valid_bit = 1; + tlb_line->sets[set].pt_level_id = + boot_get_pt_level_id(pt_level); + tlb_line->sets_num ++; + tlb->entries_num ++; + DebugME("boot_get_tlb_empty_set() TLB line #%d " + "set #%d is selected for addr 0x%lx\n", + line, set, tlb_line->sets[set].virt_addr); + return (set); + } + DebugME("boot_get_tlb_empty_set() could not find empty TLB set " + "for addr 0x%lx\n", + address); + return (-1); +} + +/* + * Write PTE of the virtual address to Data TLB. + */ + +static int __init_recv +boot_write_pte_to_tlb(pte_t pte, tlb_tag_t prot_flags, e2k_addr_t virt_addr, + e2k_tlb_t *tlb, const pt_level_t *pt_level) +{ + int set_num; + tlb_addr_t tlb_addr; + tlb_tag_t tlb_tag; + bool large_page_flag; + + if (pt_level->dtlb_type != COMMON_DTLB_TYPE) { + BOOT_BUG("Pages of page table level #%d cannot be placed " + "into common DTLB\n", + boot_get_pt_level_id(pt_level)); + } + large_page_flag = pt_level->is_huge; + /* + * Create and write tag to the matching TLB tag register + */ + tlb_addr = tlb_addr_tag_access; + tlb_addr = boot_tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, + large_page_flag); + set_num = boot_get_tlb_empty_set(virt_addr, tlb, pt_level); + if (set_num < 0) { + BOOT_BUG("Could not find empty entry set of TLB for virtual address 0x%lx", + virt_addr); + return (1); + } + tlb_addr = boot_tlb_addr_set_set_num(tlb_addr, set_num); + tlb_tag = mk_tlb_tag_vaddr(virt_addr, prot_flags); + write_DTLB_tag_reg(tlb_addr, tlb_tag); + + /* + * Write pte to the matching TLB entry register + */ + tlb_addr = tlb_addr_set_entry_access(tlb_addr); + write_DTLB_entry_reg(tlb_addr, pte_val(pte)); + + return (0); +} + +/* + * Write PTE of the virtual address to Instruction TLB. + * This operation is not implemented for ITLB, instead of this write PTE + * temporarly to page table entry + */ + +static int __init_recv +boot_write_pte_to_pt(pte_t pte, e2k_addr_t virt_addr, e2k_tlb_t *tlb, + const pt_level_t *pt_level, int va) +{ + pte_t *ptep; + int set_num; + + DebugME("boot_write_pte_to_pt() started for address 0x%lx and " + "pte == 0x%lx\n", + virt_addr, pte_val(pte)); + boot_numa_node_spin_lock(boot_page_table_lock); + ptep = boot_get_pte(virt_addr, pt_level, 1, va); + if (ptep == (pte_t *)-1) { + boot_numa_node_spin_unlock(boot_page_table_lock); + BOOT_BUG("Could not take PTE pointer to map virtual " + "address 0x%lx", + virt_addr); + return (1); + } + if (pt_level->is_huge) + pte = pte_set_large_size(pte); + else + pte = pte_set_small_size(pte); + DebugME("boot_write_pte_to_pt() ptep is 0x%lx == 0x%lx to write " + "new pte == 0x%lx\n", + ptep, pte_val(*ptep), pte_val(pte)); + if (!pte_none(*ptep)) { +#ifdef CONFIG_SMP + if (pte_val(*ptep) != pte_val(pte)) { + boot_numa_node_spin_unlock(boot_page_table_lock); +#endif /* CONFIG_SMP */ + BOOT_BUG("The PTE entry is not empty - virtual " + "address 0x%lx has been already occupied " + "by 0x%lx new pte is 0x%lx", + virt_addr, pte_val(*ptep), pte_val(pte)); + return (1); +#ifdef CONFIG_SMP + } else { + set_num = boot_get_tlb_empty_set(virt_addr, tlb, + pt_level); + boot_numa_node_spin_unlock(boot_page_table_lock); + if (set_num < 0) + DebugME("Could not find empty entry set " + "of TLB for virtual address 0x%lx", + virt_addr); + DebugME("boot_write_pte_to_pt() new pte is the " + "same as existed: return\n"); + return (0); + } +#endif /* CONFIG_SMP */ + } + set_num = boot_get_tlb_empty_set(virt_addr, tlb, pt_level); + if (set_num < 0) + DebugME("Could not find empty entry set of TLB for virtual address 0x%lx", + virt_addr); + + boot_set_pte(virt_addr, ptep, pte, pt_level, false); + + boot_numa_node_spin_unlock(boot_page_table_lock); + + DebugME("boot_write_pte_to_pt() set ptep 0x%lx to new pte 0x%lx\n", + ptep, pte_val(*ptep)); + + return (0); +} + +/* + * Write PTE of page mapped to the equal virtual address to TLB or page table. + * Write to ITLB is not implemented - pte is temporarly written to page table + * entry. + */ + +static int __init_recv +boot_write_equal_addr_pte(pte_t pte, tlb_tag_t prot_flags, e2k_addr_t address, + int tlb_mask, const pt_level_t *pt_level, int va) +{ + int ret; + + if (tlb_mask & ITLB_ACCESS_MASK) { + if (va) +#ifdef CONFIG_SMP + ret = boot_write_pte_to_pt(pte, address, + &itlb_contents[boot_smp_processor_id()], + pt_level, va); +#else /* !CONFIG_SMP */ + ret = boot_write_pte_to_pt(pte, address, + &itlb_contents, pt_level, va); +#endif /* CONFIG_SMP */ + else + ret = boot_write_pte_to_pt( + pte, address, boot_itlb_contents, pt_level, va); + if (ret != 0) + return (ret); + } + if (tlb_mask & DTLB_ACCESS_MASK) { + if (va) +#ifdef CONFIG_SMP + ret = boot_write_pte_to_tlb(pte, prot_flags, address, + &dtlb_contents[boot_smp_processor_id()], + pt_level); +#else /* !CONFIG_SMP */ + ret = boot_write_pte_to_tlb(pte, prot_flags, address, + &dtlb_contents, pt_level); +#endif /* CONFIG_SMP */ + else + ret = boot_write_pte_to_tlb(pte, prot_flags, address, + boot_dtlb_contents, pt_level); + if (ret != 0) + return (ret); + } + return (0); +} + +/* + * Map the physical area to the equal virtual space. + * area_addr == area_phys_addr == area_virt_addr + * PTEs of mapped pages should write only to TLB + */ + +int __init_recv +boot_map_to_equal_virt_area(e2k_addr_t area_addr, e2k_size_t area_size, + pgprot_t prot_flags, tlb_tag_t tlb_prot_flags, + e2k_size_t max_page_size, int tlb_mask, int va) +{ + const pt_level_t *pt_level; + pte_t pte; + pgprot_t prot; + e2k_addr_t phys_addr; + e2k_addr_t virt_addr; + int pages_num; + int page; + int cur_tlb_mask; + int ret; + + DebugME("boot_map_to_equal_virt_area() started for addr 0x%lx " + "size 0x%lx\n", + area_addr, area_size); + + pt_level = boot_find_pt_level_of_page_size(max_page_size); + if (pt_level == NULL) { + BOOT_BUG("Invalid page size 0x%lx", max_page_size); + return -EINVAL; + } + if (pt_level->is_huge) + prot = pgprot_large_size_set(prot_flags); + else + prot = pgprot_small_size_set(prot_flags); + phys_addr = _PAGE_ALIGN_UP(area_addr, max_page_size); + virt_addr = phys_addr; + pages_num = (_PAGE_ALIGN_DOWN(area_addr + area_size, + max_page_size) - phys_addr) / max_page_size; + DebugME("boot_map_to_equal_virt_area() virt addr will be 0x%lx " + "page size is 0x%lx\n", + virt_addr, max_page_size); + + for (page = 0; page < pages_num; page++) { + ret = boot_find_equal_addr(virt_addr, tlb_mask, pt_level, va); + DebugME("boot_map_to_equal_virt_area() " + "boot_find_equal_addr(0x%lx) returned mask 0x%x " + "source mask is 0x%x\n", + virt_addr, ret, tlb_mask); + if (ret == tlb_mask) + continue; + cur_tlb_mask = tlb_mask ^ ret; + pte = mk_pte_phys(phys_addr, prot); + DebugME("boot_map_to_equal_virt_area() will start " + "boot_write_equal_addr_pte() for addr 0x%lx " + "pte 0x%lx\n", + virt_addr, pte_val(pte)); + ret = boot_write_equal_addr_pte(pte, tlb_prot_flags, virt_addr, + cur_tlb_mask, pt_level, va); + if (ret != 0) { + BOOT_BUG("Could not write PTE 0x%lx of virtual " + "addr 0x%lx to TLB", + pte_val(pte), virt_addr); + return 0; + } + phys_addr += max_page_size; + virt_addr = phys_addr; + } + DebugME("boot_map_to_equal_virt_area() returns with mapped " + "pages num %d\n", page); + return page; +} + +/* + * Flush the TLB entries mapping the virtually mapped linear page + * table corresponding to address. + */ +void +init_flush_tlb_pgtable(e2k_addr_t address) +{ + + /* flush virtual mapping of PTE entries (third level of page table) */ + flush_TLB_kernel_page( + pte_virt_offset(_PAGE_ALIGN_UP(address, PTE_SIZE))); + + /* flush virtual mapping of PMD entries (second level of page table) */ + flush_TLB_kernel_page( + pmd_virt_offset(_PAGE_ALIGN_UP(address, PMD_SIZE))); + + /* flush virtual mapping of PUD entries (first level of page table) */ + flush_TLB_kernel_page( + pud_virt_offset(_PAGE_ALIGN_UP(address, PUD_SIZE))); +} + +/* + * Clear PTE of the virtual address into the TLB. + */ + +static int __init_recv +init_clear_tlb_entry(e2k_addr_t virt_addr, int tlb_mask) +{ + + /* + * Clear TLB entry + */ + flush_TLB_kernel_page(virt_addr); + init_flush_tlb_pgtable(virt_addr); + + /* + * Clear ICACHE lines if TLB is ITLB + */ + if (tlb_mask & ITLB_ACCESS_MASK) { + flush_ICACHE_kernel_line(virt_addr); + } + + return 0; +} + +/* + * Clear all pages which were temporarly written to TLB only + */ +static int __init_recv +init_clear_temporary_tlb(e2k_tlb_t *tlb, int tlb_mask) +{ + int line; + int set; + e2k_tlb_line_t *tlb_line; + int ret; + + if (tlb->entries_num <= 0) + return (0); + for (line = 0; line < NATIVE_TLB_LINES_NUM; line++) { + tlb_line = &tlb->lines[line]; + if (tlb_line->sets_num == 0) + continue; + for (set = 0; set < NATIVE_TLB_SETS_NUM; set++) { + if (!tlb_line->sets[set].valid_bit) + continue; + ret = init_clear_tlb_entry( + tlb_line->sets[set].virt_addr, + tlb_mask); + if (ret != 0) { + BOOT_BUG("Could not clear ITLB virtual address 0x%lx from line %d set %d", + tlb_line->sets[set].virt_addr, + line, set); + return (1); + } + tlb_line->sets[set].valid_bit = 0; + tlb_line->sets[set].virt_addr = 0; + tlb_line->sets[set].pt_level_id = 0; + tlb_line->sets_num --; + tlb->entries_num --; + if (tlb_line->sets_num <= 0) + break; + } + if (tlb->entries_num <= 0) + break; + } + return (0); +} + +/* + * Clear all pages which were temporarly written to page table + * Write to ITLB is not implemented - PTEs are temporarly written to page table + * entries + */ +static int __init_recv +init_clear_temporary_pt(e2k_tlb_t *tlb, int tlb_mask) +{ + int line; + int set; + e2k_tlb_line_t *tlb_line; + int ret; + + if (tlb->entries_num <= 0) + return 0; + for (line = 0; line < NATIVE_TLB_LINES_NUM; line++) { + tlb_line = &tlb->lines[line]; + if (tlb_line->sets_num == 0) + continue; + for (set = 0; set < NATIVE_TLB_SETS_NUM; set++) { + if (!tlb_line->sets[set].valid_bit) + continue; + ret = init_clear_ptes(tlb_line->sets[set].virt_addr, +#ifndef CONFIG_SMP + false, /* do not ignore the page absence */ +#else + true, /* ignore the page absence */ +#endif /* CONFIG_SMP */ + tlb_line->sets[set].pt_level_id); + if (ret != 0) { + BOOT_BUG("Could not clear PT virtual address 0x%lx from line %d set %d", + tlb_line->sets[set].virt_addr, + line, set); + return 1; + } + ret = init_clear_tlb_entry( + tlb_line->sets[set].virt_addr, + tlb_mask); + if (ret != 0) { + BOOT_BUG("Could not clear ITLB virtual address 0x%lx from line %d set %d", + tlb_line->sets[set].virt_addr, + line, set); + return (1); + } + tlb_line->sets[set].valid_bit = 0; + tlb_line->sets[set].virt_addr = 0; + tlb_line->sets[set].pt_level_id = 0; + tlb_line->sets_num --; + tlb->entries_num --; + if (tlb_line->sets_num <= 0) + break; + } + if (tlb->entries_num <= 0) + break; + } + return (0); +} + +/* + * Clear all PTEs, which were temporarly written to TLB or page table. + * Write to ITLB is not implemented - PTEs are temporarly written to page table + * entries + */ + +int __init_recv +init_clear_temporary_ptes(int tlb_mask, int cpuid) +{ + int ret; + + if (tlb_mask & ITLB_ACCESS_MASK) { +#ifndef CONFIG_SMP + ret = init_clear_temporary_pt(&itlb_contents, ITLB_ACCESS_MASK); +#else + ret = init_clear_temporary_pt(&itlb_contents[cpuid], + ITLB_ACCESS_MASK); +#endif /* CONFIG_SMP */ + if (ret != 0) + return ret; + } + if (tlb_mask & DTLB_ACCESS_MASK) { +#ifndef CONFIG_SMP + ret = init_clear_temporary_tlb(&dtlb_contents, + DTLB_ACCESS_MASK); +#else + ret = init_clear_temporary_tlb(&dtlb_contents[cpuid], + DTLB_ACCESS_MASK); +#endif /* CONFIG_SMP */ + if (ret != 0) + return ret; + } + return (0); +} diff --git a/arch/e2k/p2v/boot_param.c b/arch/e2k/p2v/boot_param.c new file mode 100644 index 000000000000..3b789d393aff --- /dev/null +++ b/arch/e2k/p2v/boot_param.c @@ -0,0 +1,343 @@ +/* + * Boot-time command line parsing. + * + * Copyright (C) 2011-2013 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include +#include +#include +#include + +#include + +#include "boot_string.h" + +/* + * One should store original cmdline passed by boot to restore it in + * setup_arch() function. + */ +char __initdata_recv saved_boot_cmdline[COMMAND_LINE_SIZE] = {0}; + +/* + * This function is based on simple_guess_base function from lib/vsprintf.c + */ +static unsigned int __init_recv boot_simple_guess_base(const char *cp) +{ + if (cp[0] == '0') { + if (BOOT_TOLOWER(cp[1]) == 'x' && boot_isxdigit(cp[2])) + return 16; + else + return 8; + } else { + return 10; + } +} + +/* + * This function is based on simple_strtoull function from lib/vsprintf.c + * + * boot_simple_strtoull - convert a string to an unsigned long long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +unsigned long long __init_recv +boot_simple_strtoull(const char *cp, char **endp, unsigned int base) +{ + unsigned long long result = 0; + + if (!base) + base = boot_simple_guess_base(cp); + + if (base == 16 && cp[0] == '0' && BOOT_TOLOWER(cp[1]) == 'x') + cp += 2; + + while (boot_isxdigit(*cp)) { + unsigned int value; + + value = boot_isdigit(*cp) ? *cp - '0' : BOOT_TOLOWER(*cp) - 'a' + 10; + if (value >= base) + break; + result = result * base + value; + cp++; + } + if (endp) + *endp = (char *)cp; + + return result; +} + +/* + * This function is based on simple_strtoul function from lib/vsprintf.c + * + * boot_simple_strtoul - convert a string to an unsigned long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +unsigned long __init_recv +boot_simple_strtoul(const char *cp, char **endp, unsigned int base) +{ + return boot_simple_strtoull(cp, endp, base); +} + +/* + * This function is based on simple_strtol function from lib/vsprintf.c + * + * boot_simple_strtol - convert a string to a signed long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long __init_recv +boot_simple_strtol(const char *cp, char **endp, unsigned int base) +{ + if (*cp == '-') + return -boot_simple_strtoul(cp + 1, endp, base); + + return boot_simple_strtoul(cp, endp, base); +} + +/* + * This function is based on simple_strtoll function from lib/vsprintf + * + * boot_simple_strtoll - convert a string to a signed long long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long long __init_recv +boot_simple_strtoll(const char *cp, char **endp, unsigned int base) +{ + if (*cp == '-') + return -boot_simple_strtoull(cp + 1, endp, base); + + return boot_simple_strtoull(cp, endp, base); +} + +/* + * This function is based on get_option function from lib/cmdline.c + * + * boot_get_option - Parse integer from an option string + * @str: option string + * @pint: (output) integer value parsed from @str + * + * Read an int from an option string; if available accept a subsequent + * comma as well. + * + * Return values: + * 0 - no int in string + * 1 - int found, no subsequent comma + * 2 - int found including a subsequent comma + * 3 - hyphen found to denote a range + */ +int __init_recv boot_get_option(char **str, int *pint) +{ + char *cur = *str; + + if (!cur || !(*cur)) + return 0; + *pint = boot_simple_strtol (cur, str, 0); + if (cur == *str) + return 0; + if (**str == ',') { + (*str)++; + return 2; + } + if (**str == '-') + return 3; + + return 1; +} + +/* + * This function is based on skip_spaces function from lib/string.c + */ +char __init_recv *boot_skip_spaces(const char *str) +{ + while (boot_isspace(*str)) + ++str; + return (char *)str; +} + +/* + * This function is based on next_arg function from kernel/params.c + * + * It uses boot_skip_spaces and boot_isspace instead of original funcs. + * You can use " around spaces, but can't escape ". + * Hyphens and underscores equivalent in parameter names. + */ +static char __init_recv +*boot_next_arg(char *args, char **param, char **val) +{ + unsigned int i, equals = 0; + int in_quote = 0, quoted = 0; + char *next; + + if (*args == '"') { + args++; + in_quote = 1; + quoted = 1; + } + + for (i = 0; args[i]; i++) { + if (boot_isspace(args[i]) && !in_quote) + break; + if (equals == 0) { + if (args[i] == '=') + equals = i; + } + if (args[i] == '"') + in_quote = !in_quote; + } + + *param = args; + if (!equals) + *val = NULL; + else { + args[equals] = '\0'; + *val = args + equals + 1; + + /* Don't include quotes in value. */ + if (**val == '"') { + (*val)++; + if (args[i-1] == '"') + args[i-1] = '\0'; + } + if (quoted && args[i-1] == '"') + args[i-1] = '\0'; + } + + if (args[i]) { + args[i] = '\0'; + next = args + i + 1; + } else + next = args + i; + + /* Chew up trailing spaces. */ + return boot_skip_spaces(next); +} + +/* + * This function is based on do_early_param function in init/main.c + */ +static int __init_recv +boot_do_param(char *param, char *val) +{ + boot_kernel_param_t *p; + boot_kernel_param_t *start = + boot_vp_to_pp((boot_kernel_param_t *)__boot_setup_start); + boot_kernel_param_t *end = + boot_vp_to_pp((boot_kernel_param_t *)__boot_setup_end); + + for (p = start; p < end; p++) { + if (boot_strcmp(param, boot_vp_to_pp(p->str)) == 0) { + int ret; + + ret = boot_func_to_pp(p->setup_func)(val); + if (ret != 0 && ret != 2) { + do_boot_printk(KERN_WARNING + "Malformed boot option '%s'\n", param); + return 1; + } else { + return ret; + } + } + } + + return 0; +} + +/* + * This function is based on parse_args function from kernel/params.c + * + * It uses boot_skip_spaces instead of skip_spaces. + * Args looks like "foo=bar,bar2 baz=fuz wiz". + */ +static int __init_recv +boot_parse_args(const char *name, char *args, char *boot_cmdline) +{ + char *param, *val, *args_start = args; + int del_cnt = 0; + + /* Chew leading spaces */ + args = boot_skip_spaces(args); + + while (*args) { + char *args_prev, *del_start, *del_end; + int cmdline_len, ret; + + args_prev = args; + args = boot_next_arg(args, ¶m, &val); + ret = boot_do_param(param, val); + + switch (ret) { + case 0: + /* boot param not found */ + break; + case 1: + /* + * boot param found: we should exclude it from kernel + * cmd line + */ + del_start = boot_cmdline + + (int)(args_prev - args_start) - del_cnt; + del_end = boot_cmdline + + (int)(args - args_start) - del_cnt; + cmdline_len = boot_strlen(boot_cmdline); + boot_fast_memcpy(del_start, del_end, + cmdline_len - (int)(del_end - boot_cmdline)); + boot_fast_memset(boot_cmdline + cmdline_len - + args + args_prev, + 0, 1); + del_cnt += args - args_prev; + break; + case 2: + /* boot param found, but do not exclude it from */ + /* kernel cmd line to parse it one more time later */ + break; + default: + do_boot_printk(KERN_ERR + "%s: `%s' invalid for parameter `%s'\n", + name, val ?: "", param); + return ret; + } + } + + /* All parsed OK. */ + return 0; +} + +void __init_recv +boot_native_parse_param(bootblock_struct_t *bootblock) +{ + char tmp_cmdline[COMMAND_LINE_SIZE]; + char *boot_cmdline; + int boot_cmdline_len; + +#ifdef CONFIG_CMDLINE_OVERRIDE + boot_strlcpy(bootblock->info.kernel_args_string, + BOOT_KERNEL_ARGS_STRING_EX_SIGNATURE, + KERNEL_ARGS_STRING_EX_SIGN_SIZE); + boot_strlcpy(bootblock->info.bios.kernel_args_string_ex, + boot_va_to_pa(CONFIG_CMDLINE), + KSTRMAX_SIZE_EX); +#endif + + if (!boot_strncmp(bootblock->info.kernel_args_string, + BOOT_KERNEL_ARGS_STRING_EX_SIGNATURE, + KERNEL_ARGS_STRING_EX_SIGN_SIZE)) { + /* Extended command line (512 bytes) */ + boot_cmdline = bootblock->info.bios.kernel_args_string_ex; + boot_cmdline_len = KSTRMAX_SIZE_EX; + } else { + /* Standart command line (128 bytes) */ + boot_cmdline = bootblock->info.kernel_args_string; + boot_cmdline_len = KSTRMAX_SIZE; + } + + boot_strlcpy(boot_saved_boot_cmdline, boot_cmdline, boot_cmdline_len); + boot_strlcpy(tmp_cmdline, boot_cmdline, boot_cmdline_len); + boot_parse_args("boot options", tmp_cmdline, boot_cmdline); +} diff --git a/arch/e2k/p2v/boot_phys.c b/arch/e2k/p2v/boot_phys.c new file mode 100644 index 000000000000..93517cb6fa0b --- /dev/null +++ b/arch/e2k/p2v/boot_phys.c @@ -0,0 +1,2617 @@ +/* $Id: boot_phys.c,v 1.17 2009/06/29 10:39:10 atic Exp $ + * + * Simple boot-time physical memory accounting and memory allocator. + * Discontiguous memory supports on memory banks level. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "boot_string.h" + +#undef DEBUG_BOOT_MODE +#undef boot_printk +#undef DebugB +#define DEBUG_BOOT_MODE 0 /* Boot process */ +#define boot_printk if (DEBUG_BOOT_MODE) do_boot_printk +#define DebugB if (DEBUG_BOOT_MODE) printk + +#undef DEBUG_PHYS_BANK_MODE +#undef DebugBank +#define DEBUG_PHYS_BANK_MODE 0 /* Reserve bank of memory */ +#define DebugBank if (DEBUG_PHYS_BANK_MODE) do_boot_printk + +#undef DEBUG_DELETE_MEM_MODE +#undef DebugDEL +#define DEBUG_DELETE_MEM_MODE 0 /* Delete area of memory bank */ +#define DebugDEL if (DEBUG_DELETE_MEM_MODE) do_boot_printk + +#undef DEBUG_PHYS_MAP_MODE +#undef DebugMAP +#define DEBUG_PHYS_MAP_MODE 0 /* Physical memory map */ +#define DebugMAP if (DEBUG_PHYS_MAP_MODE) do_boot_printk + +#undef DEBUG_MAP_PHYS_MEM_MODE +#undef DebugMP +#define DEBUG_MAP_PHYS_MEM_MODE 0 /* Physical memory mapping to virtual */ +#define DebugMP if (DEBUG_MAP_PHYS_MEM_MODE) do_boot_printk + +#undef DEBUG_REMAP_LOW_MEM_MODE +#undef DebugRML +#define DEBUG_REMAP_LOW_MEM_MODE 0 /* Low physical memory remapping */ +#define DebugRML if (DEBUG_REMAP_LOW_MEM_MODE) do_boot_printk + +#undef DEBUG_REMAP_SUM_MODE +#undef DebugRMLT +#define DEBUG_REMAP_SUM_MODE 0 /* Sum of low memory remapping */ +#define DebugRMLT if (DEBUG_REMAP_SUM_MODE) do_boot_printk + +#undef DEBUG_MEM_ALLOC_MODE +#undef DebugAM +#define DEBUG_MEM_ALLOC_MODE 0 /* Physical memory allocation */ +#define DebugAM if (DEBUG_MEM_ALLOC_MODE) do_boot_printk + +#undef DEBUG_NUMA_MODE +#undef DebugNUMA +#define DEBUG_NUMA_MODE 0 /* Boot NUMA */ +#define DebugNUMA if (DEBUG_NUMA_MODE) do_boot_printk + +/* + * The array 'boot_mem_bitmaps[]' is a buffer for boot maps of physical memory + * banks. The size of array is restricted by memory needed to boot tasks only. + * This size is constant described by '#define BOOT_MAX_PHYS_MEM_SIZE'. + * It is needed to allocate the boot bitmap array statically into the kernel + * image. Creation of full physical memory map can be completed later, + * when virtual memory support will be ready. + */ + +#ifndef CONFIG_NUMA +#define boot_node_dma_low_mem_reseerved 0 +#define boot_node_low_mem_remapped 0 +#else /* CONFIG_NUMA */ +static boot_spinlock_t __initdata boot_node_low_mem_lock[MAX_NUMNODES] = { + [0 ... (MAX_NUMNODES-1)] = __BOOT_SPIN_LOCK_UNLOCKED +}; +static int __initdata node_dma_low_mem_reseerved[MAX_NUMNODES] = { 0 }; +#define boot_node_dma_low_mem_reseerved \ + boot_get_vo_value( \ + node_dma_low_mem_reseerved[boot_numa_node_id()]) +static int __initdata node_low_mem_remapped[MAX_NUMNODES] = { 0 }; +#define boot_node_low_mem_remapped \ + boot_get_vo_value(node_low_mem_remapped[boot_numa_node_id()]) +#endif /* ! CONFIG_NUMA */ + +e2k_addr_t start_of_phys_memory; /* start address of physical memory */ +e2k_addr_t end_of_phys_memory; /* end address + 1 of physical memory */ +e2k_size_t pages_of_phys_memory; /* number of pages of physical memory */ +e2k_addr_t kernel_image_size; /* size of full kernel image in the */ + /* memory ("text" + "data" + "bss") */ +#ifdef CONFIG_SMP +#ifndef CONFIG_NUMA +static boot_spinlock_t boot_phys_mem_lock = __BOOT_SPIN_LOCK_UNLOCKED; +#define boot_the_node_spin_lock(node, lock) boot_spin_lock(&(lock)) +#define boot_the_node_spin_unlock(node, lock) boot_spin_unlock(&(lock)) +#else /* CONFIG_NUMA */ +static boot_spinlock_t __initdata_recv boot_phys_mem_lock[MAX_NUMNODES] = { + [0 ... (MAX_NUMNODES-1)] = __BOOT_SPIN_LOCK_UNLOCKED +}; +#define boot_the_node_spin_lock(node, lock) \ + boot_spin_lock(&((lock)[node])) +#define boot_the_node_spin_unlock(node, lock) \ + boot_spin_unlock(&((lock)[node])) +#endif /* ! CONFIG_NUMA */ +#else /* ! CONFIG_SMP */ +#define boot_phys_mem_lock +#define boot_the_node_spin_lock(node, lock) +#define boot_the_node_spin_unlock(node, lock) +#endif /* CONFIG_SMP */ + +void boot_expand_phys_banks_reserved_areas(void) +{ + boot_phys_mem_t *all_nodes_mem = NULL; + int nodes_num; + int cur_nodes_num = 0; + int node; + int bank; + + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + nodes_num = boot_phys_mem_nodes_num; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + node_phys_mem_t *node_mem = &all_nodes_mem[node]; + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + continue; /* node has not memory */ + + node_banks = node_mem->banks; + cur_nodes_num++; + + for (bank = node_mem->first_bank; + bank >= 0; + bank = phys_bank->next) { + e2k_addr_t area_base; + + phys_bank = &node_banks[bank]; + + if (phys_bank->pages_num == 0) + BOOT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + + area_base = (e2k_addr_t)boot_alloc_phys_mem( + BOOT_RESERVED_AREAS_SIZE, PAGE_SIZE, + boot_time_data_mem_type); + boot_memcpy((void *)area_base, phys_bank->busy_areas, + sizeof(phys_bank->busy_areas_prereserved)); + DebugBank("Node #%d bank #%d busy_areas array at " + "address 0x%lx and size 0x%lx moved to 0x%lx " + "and expanded to 0x%lx bytes\n", + node, bank, phys_bank->busy_areas, + sizeof(phys_bank->busy_areas_prereserved), + area_base, BOOT_RESERVED_AREAS_SIZE); + phys_bank->busy_areas = (e2k_busy_mem_t *)area_base; + } + } +} + +/* + * Create pages maps of physical memory banks. + */ +e2k_size_t __init +boot_do_create_physmem_maps(boot_info_t *boot_info, bool create) +{ + boot_phys_mem_t *all_nodes_mem = NULL; + int nodes_num; + int cur_nodes_num = 0; + e2k_size_t pages_num = 0; + e2k_addr_t top_addr; + int node; + short bank; + + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + boot_start_of_phys_memory = 0xffffffffffffffffUL; + boot_end_of_phys_memory = 0x0000000000000000UL; + + nodes_num = boot_phys_mem_nodes_num; + for (node = 0; node < L_MAX_MEM_NUMNODES; node ++) { + node_phys_mem_t *node_mem = &all_nodes_mem[node]; + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + continue; /* node has not memory */ + node_banks = node_mem->banks; + DebugMAP("Node #%d: physical memory banks number %d\n", + node, node_mem->banks_num); + cur_nodes_num ++; + for (bank = node_mem->first_bank; + bank >= 0; + bank = phys_bank->next) { + phys_bank = &node_banks[bank]; + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + BOOT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + } + if (phys_bank->base_addr < boot_start_of_phys_memory) + boot_start_of_phys_memory = + phys_bank->base_addr; + top_addr = phys_bank->base_addr + + phys_bank->pages_num * PAGE_SIZE; + if (boot_end_of_phys_memory < top_addr) + boot_end_of_phys_memory = top_addr; + pages_num += phys_bank->pages_num; + if (create && is_addr_from_low_memory(top_addr -1)) { + /* it is low memory bank */ + phys_bank->maybe_remapped_to_hi = + boot_has_lo_bank_remap_to_hi(phys_bank, + boot_info); + } + DebugMAP("Node %d bank #%d: from addr 0x%lx to 0x%lx, " + "phys memory start 0x%lx end 0x%lx\n", + node, bank, phys_bank->base_addr, top_addr, + boot_start_of_phys_memory, + boot_end_of_phys_memory); + } + } + boot_pages_of_phys_memory = pages_num; + DebugMAP("Total phys memory pages number is 0x%lx on %d node(s), " + "start from 0x%lx to end 0x%lx\n", + pages_num, nodes_num, + boot_start_of_phys_memory, boot_end_of_phys_memory); + return pages_num; +} + +/* lock should be taken by caller */ +static inline void __init +boot_delete_busy_area(int node, e2k_phys_bank_t *phys_bank, + e2k_busy_mem_t *busy_area, short area_id, e2k_busy_mem_t *prev_area) +{ + if (prev_area == NULL) { + /* area should be at head of the list */ + if (phys_bank->first_area != area_id) { + BOOT_BUG("Node #%d busy area #%d from 0x%lx to 0x%lx " + "should be at head, but head point to area #%d", + node, area_id, + phys_bank->base_addr + + (busy_area->start_page << PAGE_SHIFT), + phys_bank->base_addr + + (busy_area->start_page + + busy_area->pages_num) << + PAGE_SHIFT); + } + phys_bank->first_area = busy_area->next; + } else { + /* previous area should point to the deleted area */ + if (prev_area->next != area_id) { + BOOT_BUG("Node #%d busy area #%d from 0x%lx to 0x%lx " + "should be pointed by previous area, " + "but it point to area #%d", + node, area_id, + phys_bank->base_addr + + (busy_area->start_page << PAGE_SHIFT), + phys_bank->base_addr + + (busy_area->start_page + + busy_area->pages_num) << + PAGE_SHIFT, + prev_area->next); + } + prev_area->next = busy_area->next; + } + phys_bank->busy_areas_num--; + /* busy area is now free, so increment number of free pages at bank */ + atomic64_add(busy_area->pages_num, &phys_bank->free_pages_num); + busy_area->next = -1; + busy_area->pages_num = 0; +} + +/* lock should be taken by caller */ +static inline short __init +boot_get_free_busy_area(int node, e2k_phys_bank_t *phys_bank) +{ + e2k_busy_mem_t *busy_areas; + short area; + + busy_areas = phys_bank->busy_areas; + for (area = 0; area < E2K_MAX_PRERESERVED_AREAS; area++) { + e2k_busy_mem_t *cur_busy_area = &busy_areas[area]; + + if (cur_busy_area->pages_num == 0) + /* found empty entry at table */ + return area; + } + if (phys_bank->busy_areas_num >= E2K_MAX_PRERESERVED_AREAS) { + BOOT_WARNING("Node #%d number of busy areas %d exceeds " + "permissible limit %d", + node, phys_bank->busy_areas_num, + E2K_MAX_PRERESERVED_AREAS); + return -1; + } + BOOT_BUG("Node #%d number of busy areas is only %d from %d, " + "but could not find empty entry at table", + node, phys_bank->busy_areas_num, E2K_MAX_PRERESERVED_AREAS); + return -1; +} + +static bool __init_recv +boot_try_merge_bank_area(int node_id, boot_phys_bank_t *phys_bank, + e2k_busy_mem_t *prev_busy_area, + e2k_size_t start_page, long pages_num, + busy_mem_type_t mem_type, unsigned short flags) +{ + if (!(flags & BOOT_MERGEABLE_ALLOC_MEM)) + return false; /* new area is not mergeable */ + + if (prev_busy_area == NULL) { + BOOT_BUG("Node #%d new reserved area from 0x%lx to 0x%lx " + "cannot be merged with empty previous area\n", + node_id, + phys_bank->base_addr + (start_page << PAGE_SHIFT), + phys_bank->base_addr + (start_page << PAGE_SHIFT) + + (pages_num << PAGE_SHIFT)); + } + if (!(prev_busy_area->flags & BOOT_MERGEABLE_ALLOC_MEM)) + return false; /* previous area is not mergeable */ + if (mem_type != prev_busy_area->type) + return false; /* areas with different memory type */ + /* cannot be merged */ + + if (pages_num > phys_bank->pages_num - start_page) + return false; /* merged area cannot intersect */ + /* bank boundaries */ + if (start_page != prev_busy_area->start_page + + prev_busy_area->pages_num) { + BOOT_BUG("Node #%d new merged area start 0x%lx is not " + "adjacent to the end 0x%lx of previous area\n", + node_id, + phys_bank->base_addr + (start_page << PAGE_SHIFT), + phys_bank->base_addr + + ((prev_busy_area->start_page + + prev_busy_area->pages_num) << + PAGE_SHIFT)); + } + + prev_busy_area->pages_num += pages_num; + DebugAM("Node #%d new area from 0x%lx to 0x%lx was merged and " + "total area is now from 0x%lx to 0x%lx\n", + node_id, + phys_bank->base_addr + (start_page << PAGE_SHIFT), + phys_bank->base_addr + ((start_page + pages_num) << PAGE_SHIFT), + phys_bank->base_addr + + (prev_busy_area->start_page << PAGE_SHIFT), + phys_bank->base_addr + + ((prev_busy_area->start_page + + prev_busy_area->pages_num) << PAGE_SHIFT)); + + return true; /* new area is successfully merged */ +} + +static void __init_recv +boot_move_bank_busy_areas_part(int node_id, boot_phys_mem_t *node_mem, + short old_bank, short new_bank, + e2k_addr_t start_addr, e2k_addr_t end_addr, + unsigned short flags) +{ + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + e2k_busy_mem_t *busy_area; + short area, next_area; + + node_banks = node_mem->banks; + phys_bank = &node_banks[old_bank]; + + /* loop on busy areas of old memory bank to move them */ + /* to created new memory bank */ + for (area = phys_bank->first_area; area >= 0; area = next_area) { + e2k_size_t start, end; + + busy_area = &phys_bank->busy_areas[area]; + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d old bank #%d empty physical memory " + "busy area #%d cannot be in the list", + node_id, old_bank, area); + } + start = busy_area->start_page; + end = start + busy_area->pages_num; + DebugRML("Node #%d old bank #%d current busy area #%d " + "from 0x%lx to 0x%lx\n", + node_id, old_bank, area, + start_addr + (start << PAGE_SHIFT), + start_addr + (end << PAGE_SHIFT)); + if (start_addr + (start << PAGE_SHIFT) >= end_addr) + /* the area is out of moving range, so complete */ + break; + if (start_addr + (end << PAGE_SHIFT) > end_addr) { + BOOT_BUG("Node #%d old bank #%d busy area #%d " + "from 0x%lx to 0x%lx is partially out of " + "moving range from 0x%lx to 0x%lx\n", + node_id, old_bank, area, + start_addr + (start << PAGE_SHIFT), + start_addr + (end << PAGE_SHIFT), + start_addr, end_addr); + } + + /* moving of busy area should delete it from list of areas */ + /* so save reference to next entry of the list before */ + next_area = busy_area->next; + + if (start >= phys_bank->pages_num || + end > phys_bank->pages_num) { + BOOT_BUG("Node #%d old bank #%d area #%d start 0x%lx " + "or end 0x%lx is out of bank size 0x%lx\n", + node_id, old_bank, area, start, end, + phys_bank->pages_num); + } + if ((flags & BOOT_EXCLUDE_AT_HIGH_PHYS_MEM) && + (busy_area->flags & + BOOT_EXCLUDE_AT_HIGH_PHYS_MEM)) { + BOOT_BUG("Node #%d old bank #%d area #%d flags %04x: " + "cannot be remapped to high memory range\n", + node_id, old_bank, area, busy_area->flags); + } + if ((flags & BOOT_IGNORE_AT_HIGH_PHYS_MEM) && + (busy_area->flags & + BOOT_IGNORE_AT_HIGH_PHYS_MEM)) { + DebugRML("Node #%d old bank #%d busy area #%d " + "can not be remapped\n", + node_id, old_bank, area); + /* delete busy area from old memory range */ + boot_delete_busy_area(node_id, phys_bank, + busy_area, area, + NULL); /* rereserved area should be at head */ + /* of old bank */ + continue; + } + + boot_rereserve_bank_area(node_id, node_mem, + old_bank, new_bank, area, busy_area); + } +} + +/* should return source old bank index, which can be updated while moving */ +/* but now number should not be changed */ +/* parameter 'delete_size' is size of additional area which should be deleted */ +/* from bank memory truncated end */ +static short __init_recv +boot_move_node_bank_part(int node_id, boot_phys_mem_t *node_mem, short old_bank, + e2k_addr_t start_addr, e2k_addr_t end_addr, + e2k_addr_t delete_size) +{ + boot_phys_bank_t *node_banks; + boot_phys_bank_t *old_phys_bank, *new_phys_bank; + short new_bank, bank; + + node_banks = node_mem->banks; + old_phys_bank = &node_banks[old_bank]; + + new_bank = boot_init_new_phys_bank(node_id, node_mem, + start_addr, end_addr - start_addr); + if (new_bank < 0) { + boot_printk("Node #%d: could not create new bank " + "from 0x%lx to 0x%lx to move old bank #%d", + node_id, start_addr, end_addr, old_bank); + return old_bank; + } + new_phys_bank = &node_banks[new_bank]; + DebugRML("Node #%d: created new bank #%d from 0x%lx to 0x%lx " + "to remap old bank #%d\n", + node_id, new_bank, start_addr, end_addr, old_bank); + + boot_move_bank_busy_areas_part(node_id, node_mem, old_bank, new_bank, + start_addr, end_addr, 0); + + /* now old bank (or part of bank) can be deleted */ + bank = boot_delete_phys_bank_part(node_id, node_mem, + old_bank, old_phys_bank, start_addr, + end_addr + delete_size); + + boot_add_new_phys_bank(node_id, node_mem, new_phys_bank, new_bank); + + return bank; +} + +static long __init_recv +boot_delete_bank_area(int node_id, boot_phys_mem_t *node_mem, + short bank, boot_phys_bank_t *phys_bank, + e2k_addr_t area_start, e2k_size_t area_pages) +{ + e2k_addr_t bank_start, bank_end, new_end; + e2k_addr_t area_end; + e2k_size_t area_size; + short new_bank; + + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + area_size = (area_pages << PAGE_SHIFT); + area_end = area_start + area_size; + + DebugDEL("Node #%d bank #%d from 0x%lx to 0x%lx, should delete " + "memory area from 0x%lx to 0x%lx\n", + node_id, bank, bank_start, bank_end, area_start, area_end); + + new_end = area_start; + if (new_end > bank_start) { + new_bank = boot_move_node_bank_part(node_id, node_mem, + bank, bank_start, new_end, area_size); + } else if (new_end == bank_start) { + /* deleted area from start of bank, truncate bank */ + new_bank = boot_delete_phys_bank_part(node_id, node_mem, + bank, phys_bank, bank_start, area_size); + } else { + BOOT_BUG("Node #%d bank #%d from 0x%lx to 0x%lx does not " + "contain memory area to delete from 0x%lx\n", + node_id, bank, bank_start, bank_end, new_end); + } + if (new_bank < 0) { + /* deleted area was right in the end of bank */ + /* the old bank was deleted and fully moved to new */ + DebugDEL("Node #%d bank #%d area from 0x%lx to 0x%lx " + "was deleted from end, new bank end 0x%lx\n", + node_id, bank, bank_start, bank_end, new_end); + return area_pages; + } + if (new_bank != bank) { + BOOT_BUG("Node #%d old bank #%d cannot be changed, but " + "after partially moveing to high range " + "the bank index is updated to #%d", + node_id, bank, new_bank); + } + /* source bank can be updated after moveing of part */ + /* of the old bank, so recalculate its parameters */ + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + if (bank_start != area_end) { + BOOT_BUG("Node #%d bank #%d new base address 0x%lx should " + "start from end of deleted area 0x%lx", + node_id, bank, bank_start, area_end); + } + + DebugDEL("Node #%d bank #%d new base from 0x%lx to 0x%lx " + "after delete area from 0x%lx to 0x%lx\n", + node_id, bank, bank_start, bank_end, area_start, area_end); + + return area_pages; +} + +/* + * Reserve the physical memory area in the bank. + * This pages od area mark as unallocatable. + * The function returns the number of reserved pages in the bank + * or negative number of reserved pages, if any reserved page is already + * occupied. + */ +static long __init_recv +boot_reserve_bank_area(int node_id, boot_phys_mem_t *node_mem, + short bank, boot_phys_bank_t *phys_bank, + e2k_size_t start_page, long pages_num, + busy_mem_type_t mem_type, unsigned short flags) +{ + e2k_busy_mem_t *busy_area; + e2k_busy_mem_t *prev_busy_area; + e2k_addr_t start_addr; + e2k_size_t end_page; + long pages; + short area; + bool busy_flag; + bool area_changed; + bool mergeable; + + DebugBank("boot_reserve_bank_area() started: start page # 0x%lx " + "number of pages 0x%lx\n", + start_page, pages_num); + + start_addr = phys_bank->base_addr + (start_page << PAGE_SHIFT); + pages = phys_bank->pages_num - start_page; + if (pages_num < pages) + pages = pages_num; + end_page = start_page + pages; + + if ((flags & BOOT_IGNORE_AT_HIGH_PHYS_MEM) && + !phys_bank->maybe_remapped_to_hi) + flags |= BOOT_DELETE_PHYS_MEM; + if (flags & BOOT_DELETE_PHYS_MEM) { + DebugBank("Node #%d bank #%d: will delete area from " + "base 0x%lx to end 0x%lx\n", + node_id, bank, start_addr, + start_addr + ((end_page - start_page) << PAGE_SHIFT)); + } else { + DebugBank("Node #%d bank #%d: will reserve area from " + "base 0x%lx to end 0x%lx\n", + node_id, bank, start_addr, + start_addr + ((end_page - start_page) << PAGE_SHIFT)); + } + +again: + busy_flag = false; + area_changed = false; + mergeable = false; + prev_busy_area = NULL; + + for (area = phys_bank->first_area; + area >= 0; + prev_busy_area = busy_area, area = busy_area->next) { + busy_area = &phys_bank->busy_areas[area]; + + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d empty physical memory busy area #%d " + "cannot be in the list", + node_id, area); + continue; + } + if (start_page > busy_area->start_page + busy_area->pages_num) + continue; + if (start_page == busy_area->start_page + + busy_area->pages_num) { + /* new area should be added after current busy area, */ + /* but can be intersections with next area, */ + /* so it need consider this case */ + mergeable = true; /* can be merged with current */ + continue; + } + if (end_page < busy_area->start_page) + /* cannot be intersections with residuary areas */ + /* at the tail, need add as new area after previous */ + break; + if (end_page == busy_area->start_page) + /* cannot be other intersections with residuary areas */ + /* at the tail, need add as new area after previous */ + break; + mergeable = false; /* there is intersections */ + if (!(flags & BOOT_IGNORE_BUSY_BANK)) { + BOOT_WARNING("The area from 0x%lx to 0x%lx or some " + "its part is reserved twice", + start_addr, start_addr + pages * PAGE_SIZE); + busy_flag = true; + } + if (flags & BOOT_DELETE_PHYS_MEM) { + /* area should be deleted and cannot intersect other */ + BOOT_BUG("Deleted area from 0x%lx to 0x%lx or some " + "its part intersects other busy area(s)", + start_addr, start_addr + pages * PAGE_SIZE); + break; + } + if (!(busy_area->flags & BOOT_CAN_BE_INTERSECTIONS)) { + BOOT_WARNING("The area from 0x%lx to 0x%lx " + "intersects with area from 0x%lx to 0x%lx " + "CANNOT INTERSECT", + start_addr, start_addr + pages * PAGE_SIZE, + busy_area->start_page << PAGE_SHIFT, + (busy_area->start_page + + busy_area->pages_num) << PAGE_SHIFT); + flags &= ~BOOT_CAN_BE_INTERSECTIONS; + } + if ((busy_area->flags & BOOT_ONLY_LOW_PHYS_MEM) != + (flags & BOOT_ONLY_LOW_PHYS_MEM)) { + BOOT_WARNING("The area from 0x%lx to 0x%lx %s " + "intersects with area from 0x%lx to 0x%lx %s", + start_addr, start_addr + pages * PAGE_SIZE, + (flags & BOOT_ONLY_LOW_PHYS_MEM) ? + "ONLY LOW MEM" : "ANY MEM", + busy_area->start_page << PAGE_SHIFT, + (busy_area->start_page + + busy_area->pages_num) << PAGE_SHIFT, + (busy_area->flags & BOOT_ONLY_LOW_PHYS_MEM) ? + "ONLY LOW MEM" : "ANY MEM"); + flags |= BOOT_ONLY_LOW_PHYS_MEM; + } + if ((busy_area->flags & BOOT_IGNORE_AT_HIGH_PHYS_MEM) != + (flags & BOOT_IGNORE_AT_HIGH_PHYS_MEM)) { + BOOT_WARNING("The area from 0x%lx to 0x%lx %s " + "intersects with area from 0x%lx to 0x%lx %s", + start_addr, start_addr + pages * PAGE_SIZE, + (flags & BOOT_IGNORE_AT_HIGH_PHYS_MEM) ? + "NEED NOT REMAP TO HIGH MEM" : + "CAN REMAP TO HIGH MEM", + busy_area->start_page << PAGE_SHIFT, + (busy_area->start_page + + busy_area->pages_num) << PAGE_SHIFT, + (busy_area->flags & + BOOT_IGNORE_AT_HIGH_PHYS_MEM) ? + "NEED NOT REMAP TO HIGH MEM" : + "CAN REMAP TO HIGH MEM"); + flags |= BOOT_IGNORE_AT_HIGH_PHYS_MEM; + } + if (mem_type != busy_area->type) { + BOOT_WARNING("The area from 0x%lx to 0x%lx type %d " + "intersects with area from 0x%lx to 0x%lx " + "type %d", + start_addr, start_addr + pages * PAGE_SIZE, + mem_type, + busy_area->start_page << PAGE_SHIFT, + (busy_area->start_page + + busy_area->pages_num) << PAGE_SHIFT, + busy_area->type); + /* keep memory type of source busy area */ + mem_type = busy_area->type; + } + if (start_page < busy_area->start_page) { + DebugBank("The reserved area #%d start page will be " + "moved from 0x%lx to 0x%lx\n", + area, busy_area->start_page, start_page); + busy_area->pages_num += (busy_area->start_page - + start_page); + busy_area->start_page = start_page; + DebugBank("The reserved area #%d new size is " + "0x%lx pages\n", + area, busy_area->pages_num); + area_changed = true; + } + if (end_page > busy_area->start_page + busy_area->pages_num) { + DebugBank("The reserved area #%d finish page will be " + "moved from 0x%lx to 0x%lx\n", + area, + busy_area->start_page + busy_area->pages_num, + end_page); + busy_area->pages_num += (end_page - + (busy_area->start_page + + busy_area->pages_num)); + DebugBank("The reserved area new size is " + "0x%lx pages\n", + busy_area->pages_num); + area_changed = true; + } + if (area_changed) { + start_page = busy_area->start_page; + pages_num = busy_area->pages_num; + boot_delete_busy_area(node_id, phys_bank, + busy_area, area, prev_busy_area); + DebugBank("The reserved area #%d will be deleted as it" + " is included into new area from 0x%lx page" + " and size 0x%lx\n", + area, start_page, pages_num); + goto again; + } else { + /* probably area memory type or flags were changed, */ + /* update its */ + busy_area->type = mem_type; + busy_area->flags = flags; + DebugBank("The area from addr 0x%lx to addr 0x%lx " + "is included into the area #%d\n", + start_addr, start_addr + pages * PAGE_SIZE, + area); + } + return (busy_flag) ? (long)-pages : (long)pages; + } + if (flags & BOOT_DELETE_PHYS_MEM) { + return boot_delete_bank_area(node_id, node_mem, + bank, phys_bank, start_addr, pages); + } + if (mergeable) { + /* probably new area can be merged with previous */ + if (boot_try_merge_bank_area(node_id, phys_bank, prev_busy_area, + start_page, pages, mem_type, flags)) + /* yes, area has been merged */ + return pages; + } + area = boot_get_free_busy_area(node_id, phys_bank); + if (unlikely(area < 0)) { + BOOT_BUG("Node #%d: cannot prereserve busy area from 0x%lx " + "to 0x%lx, no empty entries in the tabale", + node_id, start_addr, start_addr + pages * PAGE_SIZE); + return 0; + } + busy_area = &phys_bank->busy_areas[area]; + busy_area->start_page = start_page; + busy_area->pages_num = pages; + busy_area->type = mem_type; + busy_area->flags = flags; + if (prev_busy_area == NULL) { + /* add new area to head of the list */ + busy_area->next = phys_bank->first_area; + phys_bank->first_area = area; + } else { + /* add new area after previous at the list */ + busy_area->next = prev_busy_area->next; + prev_busy_area->next = area; + } + phys_bank->busy_areas_num++; + DebugBank("The node #%d new reserved area #%d from 0x%lx to 0x%lx " + "was added to the list of occupied areas\n", + node_id, area, + start_addr, start_addr + pages * PAGE_SIZE); + + return (busy_flag) ? (long)-pages : (long)pages; +} + +/* + * Reserve the physical memory pages of the bank. + * This pages mark as unallocatable. + * The function returns the number of reserved pages in the bank + * or negative number of reserved pages, if any reserved page is already + * occupied. + */ +static long __init_recv +boot_reserve_bank_physmem(int node_id, boot_phys_mem_t *node_mem, + short bank, boot_phys_bank_t *phys_bank, + e2k_addr_t phys_addr, long pages_num, + busy_mem_type_t mem_type, unsigned short flags) +{ + e2k_size_t start_page, end_page; + long pages; + + DebugBank("boot_reserve_bank_physmem() started for addr 0x%lx and " + "page(s) 0x%lx\n", + phys_addr, pages_num); + if (phys_addr < phys_bank->base_addr || + phys_addr >= phys_bank->base_addr + + phys_bank->pages_num * PAGE_SIZE) { + BOOT_BUG("The address 0x%lx is not in the range of " + "the physical memory bank addresses 0x%lx : 0x%lx", + phys_addr, phys_bank->base_addr, + phys_bank->base_addr + + phys_bank->pages_num * PAGE_SIZE); + } + start_page = (phys_addr - phys_bank->base_addr) / PAGE_SIZE; + end_page = start_page + pages_num; + if (end_page > phys_bank->pages_num) { + end_page = phys_bank->pages_num; + pages_num = end_page - start_page; + } + DebugBank("boot_reserve_bank_physmem() start pages from 0x%lx " + "to 0x%lx, number 0x%lx\n", + start_page, end_page, pages_num); + pages = boot_reserve_bank_area(node_id, node_mem, bank, phys_bank, + start_page, pages_num, mem_type, flags); + DebugBank("boot_reserve_bank_physmem() reserved 0x%lx page(s) " + "in the bank\n", + pages); + if (pages <= 0) + return pages; + + atomic64_sub(pages, &phys_bank->free_pages_num); + return pages; +} + +/* + * Find a bank including the physical address. + * Function returns the physical pointer of the bank description structure or + * NULL, if memory bank did not found. + */ +static boot_phys_bank_t * __init_recv +boot_find_bank_of_addr(e2k_addr_t phys_addr, int *node_id, short *bank_index) +{ + boot_phys_mem_t *all_nodes_mem = NULL; + int nodes_num; + int cur_nodes_num = 0; + int node; + int bank; + + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + nodes_num = boot_phys_mem_nodes_num; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + node_phys_mem_t *node_mem = &all_nodes_mem[node]; + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + continue; /* node has not memory */ + node_banks = node_mem->banks; + cur_nodes_num++; + boot_the_node_spin_lock(node, boot_phys_mem_lock); + for (bank = node_mem->first_bank; + bank >= 0; + bank = phys_bank->next) { + + phys_bank = &node_banks[bank]; + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + boot_the_node_spin_unlock(node, + boot_phys_mem_lock); + BOOT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + } + if (phys_addr >= phys_bank->base_addr && + phys_addr < phys_bank->base_addr + + phys_bank->pages_num * PAGE_SIZE) { + if (bank_index != NULL) + *bank_index = bank; + if (node_id != NULL) + *node_id = node; + boot_the_node_spin_unlock(node, + boot_phys_mem_lock); + return phys_bank; + } + } + boot_the_node_spin_unlock(node, boot_phys_mem_lock); + } + if (bank_index != NULL) + *bank_index = -1; + if (node_id != NULL) + *node_id = -1; + return (boot_phys_bank_t *)NULL; +} + +/* + * Reserve/delete a particular physical memory range. This range marks as + * unallocatable. + * Usable RAM might be used for boot-time allocations - + * or it might get added to the free page pool later on. + * The function returns 0 on reservation success and 1, if all or some part + * of reserved memory range is already occupied and 'ignore_busy' is not set. + */ + +int __init_recv boot_reserve_physmem(e2k_addr_t virt_phys_addr, + e2k_size_t mem_size, busy_mem_type_t mem_type, + unsigned short flags) +{ + e2k_addr_t phys_addr; + e2k_addr_t base_addr; + e2k_addr_t end_addr; + boot_phys_mem_t *all_nodes_mem = NULL; + boot_phys_bank_t *phys_bank = NULL; + long pages_num; + long bank_pages_num; + int error_flag = 0; + + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + phys_addr = boot_vpa_to_pa(virt_phys_addr); + end_addr = phys_addr + mem_size; + DebugBank("boot_reserve_physmem() started: mem addr 0x%lx " + "size 0x%lx\n", + phys_addr, mem_size); + if (mem_size == 0) + BOOT_BUG("Reserved memory area size %ld is empty", mem_size); + + /* + * Round up according to PAGE_SIZE, partially reserved pages are + * considered fully reserved. + */ + + base_addr = _PAGE_ALIGN_UP(phys_addr, PAGE_SIZE); + end_addr = _PAGE_ALIGN_DOWN(end_addr, PAGE_SIZE); + pages_num = (end_addr - base_addr) >> PAGE_SHIFT; + + /* + * The memory range can occupy a few contiguous physical banks. + * The pages bits set in all of these banks + */ + while (pages_num > 0) { + node_phys_mem_t *node_mem; + int node_id; + short bank; + + phys_bank = boot_find_bank_of_addr(base_addr, &node_id, &bank); + if (phys_bank == NULL) { + DebugBank("boot_reserve_physmem() bank including " + "address 0x%lx was not found\n", + base_addr); + if (flags & BOOT_IGNORE_BANK_NOT_FOUND) + return 0; + /* Some guest areas can be allocated by QEMU/host */ + /* into special address space to emulate hardware */ + if (!boot_paravirt_enabled()) { + BOOT_BUG("Could not find the physical memory " + "bank including reserved address 0x%lx", + base_addr); + } + base_addr += PAGE_SIZE; + pages_num -= 1; + DebugBank("boot_reserve_physmem go to the next " + "page of reserved area with address 0x%lx", + base_addr); + continue; + } + node_mem = &all_nodes_mem[node_id]; + boot_the_node_spin_lock(node_id, boot_phys_mem_lock); + bank_pages_num = boot_reserve_bank_physmem(node_id, node_mem, + bank, phys_bank, + base_addr, pages_num, + mem_type, flags); + boot_the_node_spin_unlock(node_id, boot_phys_mem_lock); + DebugBank("boot_reserve_physmem() " + "boot_reserve_bank_physmem() returned bank " + "pages num 0x%lx\n", + bank_pages_num); + if (bank_pages_num <= 0) { + error_flag = 1; + if (bank_pages_num == 0) + break; + bank_pages_num = -bank_pages_num; + } + pages_num -= bank_pages_num; + base_addr += (bank_pages_num << PAGE_SHIFT); + } + return error_flag; +} + +int __init_recv boot_delete_physmem(e2k_addr_t virt_phys_addr, + e2k_size_t mem_size) +{ + unsigned short flags; + + if (BOOT_LOW_MEMORY_ENABLED()) { + /* the area should be deleted really */ + flags = BOOT_DELETE_PHYS_MEM | + BOOT_NOT_IGNORE_BUSY_BANK | + BOOT_IGNORE_BANK_NOT_FOUND; + } else { + /* the area will be deleted while low memory will be remmaped */ + /* to high, where hardware does not strip out any areas */ + flags = BOOT_ONLY_LOW_PHYS_MEM | + BOOT_IGNORE_AT_HIGH_PHYS_MEM | + BOOT_NOT_IGNORE_BUSY_BANK | + BOOT_IGNORE_BANK_NOT_FOUND; + } + return boot_reserve_physmem(virt_phys_addr, mem_size, + hw_stripped_mem_type, flags); +} + +void __init_recv boot_rereserve_bank_area(int node_id, + boot_phys_mem_t *node_mem, short bank, + short new_bank, short area, e2k_busy_mem_t *busy_area) +{ + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank, *new_phys_bank; + long pages; + + node_banks = node_mem->banks; + phys_bank = &node_banks[bank]; + new_phys_bank = &node_banks[new_bank]; + + /* copy busy area from low bank to high memory range */ + pages = boot_reserve_bank_area(node_id, node_mem, + new_bank, new_phys_bank, + busy_area->start_page, busy_area->pages_num, + busy_area->type, busy_area->flags); + if (pages != busy_area->pages_num) { + BOOT_BUG("Node #%d bank #%d: could not rereserve area #%d " + "from 0x%lx 0x%lx pages at new bank #%d\n", + node_id, bank, area, + busy_area->start_page, busy_area->pages_num, + new_bank); + } + + /* delete busy area from low memory range */ + boot_delete_busy_area(node_id, phys_bank, busy_area, area, + NULL); /* rereserved area should be at head of low bank */ +} + +/* + * Need try use only high memory range addresses of physical memory + * But in some case it is better to do it dinamicaly, so there is boot-time + * parameter to force enabling of low memory range + */ +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM +bool low_memory_enabled = LOW_MEMORY_ENABLED_DEFAULT; +#endif + +static int __init boot_low_memory_setup(char *cmd) +{ + if (!BOOT_NATIVE_IS_MACHINE_E1CP) + BOOT_SET_LOW_MEMORY_DISABLED(); + return 0; +} +boot_param("lowmem_disable", boot_low_memory_setup); + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + +/* + * IOMMU on/off setup + * + * Based on l_iommu_setup() from arch/l/kernel/iommu.c to determine + * 'Will be IOMMMU turned ON or OFF' on early stage and need or not reserve + * DMA bounce buffers while boot-time initialization. + * Real option parsing will be done by common function l_iommu_setup() + */ +static bool iommu_win_supported = true; +#define boot_iommu_win_supported boot_get_vo_value(iommu_win_supported) + +static int __init boot_iommu_win_setup(char *cmd) +{ + if (!boot_strcmp(cmd, "noprefetch")) { + /* unused while boot-time */ + return 2; + } else { + e2k_size_t iommu_win_size = boot_simple_strtoul(cmd, &cmd, 0); + + if (*cmd == 'K' || *cmd == 'k') + iommu_win_size <<= 10; + else if (*cmd == 'M' || *cmd == 'm') + iommu_win_size <<= 20; + + iommu_win_size &= ~PAGE_MASK; + + if (iommu_win_size == 0) { + boot_iommu_win_supported = false; + boot_printk("IOMMU will be turned OFF\n"); + } else { + boot_iommu_win_supported = true; + boot_printk("IOMMU window limit set to 0x%lx\n", + iommu_win_size); + } + } + return 2; +} +boot_param("iommu", boot_iommu_win_setup); + +static void __init +boot_reserve_dma_low_memory(boot_info_t *boot_info) +{ + e2k_size_t area_size; + e2k_size_t min_size; + e2k_size_t max_size; + e2k_size_t large_page_size; + void *dma_low_mem = NULL; + + /* reserve DMA bounce buffers, if it need */ + if (boot_l_iommu_supported() && boot_iommu_win_supported && + !boot_cpu_has(CPU_HWBUG_IOMMU)) + /* IOMMU will be ON, nothing DMA bounce buffers need */ + return; + + /* IOMMU cannot be used, DMA bounce buffers will be need */ + if (!boot_has_node_low_memory(boot_numa_node_id(), boot_info)) { + BOOT_WARNING("Node has not low memory to reserve " + "DMA bounce buffers area"); + return; + } + + if (BOOT_TEST_AND_SET_NODE_LOCK(boot_node_low_mem_lock, + boot_node_dma_low_mem_reseerved)) { + DebugNUMA("boot_reserve_dma_low_memory() DMA bounce buffers " + "was already reserved on node\n"); + return; + } + + large_page_size = BOOT_E2K_LARGE_PAGE_SIZE; + area_size = L_SWIOTLB_DEFAULT_SIZE; + min_size = L_SWIOTLB_MIN_SIZE; + area_size = ALIGN_TO_MASK(area_size, large_page_size); + min_size = ALIGN_TO_MASK(min_size, large_page_size); + max_size = area_size; + while (area_size >= min_size) { + dma_low_mem = boot_node_try_alloc_low_mem(area_size, + large_page_size, large_page_size, + dma32_mem_type); + if (dma_low_mem != (void *)-1) + break; + area_size = area_size >> 1; + } + if (dma_low_mem == (void *)-1) { + BOOT_WARNING("Could not allocate low memory to reserve " + "DMA bounce buffers area"); + } else if (area_size < max_size) { + BOOT_WARNING("Could allocate only 0x%lx Mb from 0x%lx Mb of " + "low memory to reserve DMA bounce buffers area", + area_size >> 20, max_size >> 20); + } else { + DebugMAP("Allocated 0x%lx Mb of low memory from 0x%lx " + "to reserve DMA bounce buffers area\n", + area_size >> 20, (e2k_addr_t)dma_low_mem); + } + + BOOT_NODE_UNLOCK(boot_node_low_mem_lock, + boot_node_dma_low_mem_reseerved); +} + +static void __init +boot_reserve_netdev_dma_memory(bool bsp, boot_info_t *boot_info) +{ + e2k_size_t area_size; + e2k_size_t large_page_size; + void *netdev_low_mem = NULL; + + /* FIXME TODO: drivers of l_e1000 & sunlance ethernet cards need */ + /* direct allocation DMA low memory */ + + if (!BOOT_IS_BSP(bsp)) + return; + + if (!boot_has_node_low_memory(boot_numa_node_id(), boot_info)) { + BOOT_WARNING("Node has not low memory to reserve " + "DMA memory for ethernet l_e1000 & sunlance " + "net devices"); + return; + } + + large_page_size = BOOT_E2K_LARGE_PAGE_SIZE; + area_size = 1 * large_page_size; /* one huge page */ + netdev_low_mem = boot_node_try_alloc_low_mem(area_size, + large_page_size, large_page_size, + dma32_mem_type); + if (netdev_low_mem == (void *)-1) { + BOOT_WARNING("Could not allocate low memory to reserve " + "DMA net devices structures"); + } else { + DebugMAP("Allocated 0x%lx Mb of low memory from 0x%lx " + "to reserve DMA net devices structures\n", + area_size >> 20, (e2k_addr_t)netdev_low_mem); + } +} + +/* + * In some cases memory areas should be allocated only at low addresses + * range (below 2**32). For example DMA bounce buffers. + * All such areas should be preliminarily reserved here while boot-time + * initialization and will be realised while bootmem registration and freeing. + */ +static void __init +boot_reserve_low_memory(bool bsp, boot_info_t *boot_info) +{ + /* reserve DMA bounce buffers, if it need */ + boot_reserve_dma_low_memory(boot_info); + + /* FIXME TODO: reserve for DMA net devices structures */ + boot_reserve_netdev_dma_memory(bsp, boot_info); +} + +/* should return source low bank index, which can be updated while remapping */ +/* but now number should not be changed */ +static short __init +boot_remap_node_low_bank_area(boot_info_t *boot_info, int node_id, + boot_phys_mem_t *node_mem, short lo_bank, + e2k_addr_t start_lo_addr, e2k_addr_t end_lo_addr) +{ + boot_phys_bank_t *node_banks; + boot_phys_bank_t *lo_phys_bank, *hi_phys_bank; + e2k_addr_t start_hi_addr, end_hi_addr; + short hi_bank, new_bank; + + node_banks = node_mem->banks; + lo_phys_bank = &node_banks[lo_bank]; + + start_hi_addr = (e2k_addr_t)boot_pa_to_high_pa((void *)start_lo_addr, + boot_info); + end_hi_addr = (e2k_addr_t)boot_pa_end_to_high((void *)end_lo_addr, + boot_info); + if (start_hi_addr == start_lo_addr || end_hi_addr == end_lo_addr) { + if (boot_has_node_high_memory(node_id, boot_info)) { + BOOT_WARNING("Could not convert addresses of low bank " + "from 0x%lx to 0x%lx to remap to high memory " + "range", + start_hi_addr, end_hi_addr); + } + return lo_bank; + } + hi_bank = boot_init_new_phys_bank(node_id, node_mem, + start_hi_addr, end_hi_addr - start_hi_addr); + if (hi_bank < 0) { + boot_printk("Node #%d: could not create high bank " + "from 0x%lx to 0x%lx to remap low bank #%d", + node_id, start_hi_addr, end_hi_addr, lo_bank); + return lo_bank; + } + hi_phys_bank = &node_banks[hi_bank]; + DebugRML("Node #%d: created high bank #%d from 0x%lx to 0x%lx " + "to remap low bank #%d\n", + node_id, hi_bank, start_hi_addr, end_hi_addr, lo_bank); + + boot_move_bank_busy_areas_part(node_id, node_mem, lo_bank, hi_bank, + start_lo_addr, end_lo_addr, + BOOT_EXCLUDE_AT_HIGH_PHYS_MEM | + BOOT_IGNORE_AT_HIGH_PHYS_MEM); + + /* now low bank (or part of bank) can be deleted */ + new_bank = boot_delete_phys_bank_part(node_id, node_mem, + lo_bank, lo_phys_bank, start_lo_addr, end_lo_addr); + + boot_add_new_phys_bank(node_id, node_mem, hi_phys_bank, hi_bank); + + return new_bank; +} + +/* should return source low bank index, which can be updated while remapping */ +/* but now number should not be changed */ +static short __init +boot_unremap_node_low_bank_area(int node_id, boot_phys_mem_t *node_mem, + short bank, boot_phys_bank_t *phys_bank) +{ + e2k_busy_mem_t *busy_area; + e2k_addr_t bank_start, bank_end; + e2k_addr_t start_addr, end_addr; + e2k_addr_t prev_area_end; + short area; + + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + start_addr = -1; + end_addr = start_addr; + prev_area_end = -1; + for (area = phys_bank->first_area; area >= 0; area = busy_area->next) { + e2k_size_t area_start, area_end; + + /* loop on busy areas to find max bank contigous area */ + /* which cannot be remapped as high bank of memory */ + busy_area = &phys_bank->busy_areas[area]; + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d bank #%d empty physical memory " + "busy area #%d cannot be in the list", + node_id, bank, area); + break; + } + area_start = busy_area->start_page; + area_end = area_start + busy_area->pages_num; + DebugRML("Node #%d bank #%d current busy area #%d " + "from 0x%lx to 0x%lx\n", + node_id, bank, area, + bank_start + (area_start << PAGE_SHIFT), + bank_start + (area_end << PAGE_SHIFT)); + if (area_start >= phys_bank->pages_num || + area_end > phys_bank->pages_num) { + BOOT_BUG("Node #%d low bank #%d area #%d from 0x%lx " + "to 0x%lx is out of bank from 0x%lx to 0x%lx\n", + node_id, bank, area, + bank_start + (area_start << PAGE_SHIFT), + bank_start + (area_end << PAGE_SHIFT), + bank_start, bank_end); + } + if (prev_area_end == -1) { + prev_area_end = area_end; + } else if (area_start != prev_area_end) { + /* continuity is broken */ + DebugRML("Node #%d bank #%d area #%d continuity is " + "broken: area start 0x%lx is not previous " + "area end 0x%lx\n", + node_id, bank, area, + bank_start + (area_start << PAGE_SHIFT), + bank_start + (prev_area_end << PAGE_SHIFT)); + break; + } + if (!(busy_area->flags & BOOT_EXCLUDE_AT_HIGH_PHYS_MEM)) + /* area can be remapped to high range */ + /* so it is end of areas which cannot be remapped */ + break; + + /* it is area which cannot be remapped, so account it */ + if (start_addr == -1) + start_addr = bank_start + (area_start << PAGE_SHIFT); + end_addr = bank_start + (area_end << PAGE_SHIFT); + prev_area_end = area_end; + } + if (start_addr == -1 || start_addr == end_addr) { + DebugRML("Node #%d low bank #%d : could not find any not " + "remapped area\n", + node_id, bank); + return bank; + } + if (start_addr != bank_start) { + BOOT_BUG("Node #%d low bank #%d not remapped areas starts " + "from 0x%lx, but should starts from bank base 0x%lx\n", + node_id, bank, start_addr, bank_start); + } + + DebugRML("Node #%d bank #%d will create new bank " + "from 0x%lx to 0x%lx\n", + node_id, bank, start_addr, end_addr); + return boot_create_phys_bank_part(node_id, node_mem, + bank, phys_bank, start_addr, end_addr); +} + +/* should return source low bank index, which can be updated while remapping */ +/* but now number should not be changed */ +static short __init +boot_remap_node_low_bank_mem(boot_info_t *boot_info, int node_id, + short bank, boot_phys_mem_t *node_mem) +{ + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + e2k_busy_mem_t *busy_area; + e2k_addr_t bank_start, bank_end; + e2k_addr_t start_addr, end_addr; + short new_bank; + short area, next_area; + + node_banks = node_mem->banks; + phys_bank = &node_banks[bank]; + + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + start_addr = bank_start; + end_addr = start_addr; + for (area = phys_bank->first_area; area >= 0; area = next_area) { + e2k_size_t area_start, area_end; + + /* loop on busy areas to find max bank contigous area */ + /* which can be remapped as high bank of memory */ + busy_area = &phys_bank->busy_areas[area]; + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d bank #%d empty physical memory " + "busy area #%d cannot be in the list", + node_id, bank, area); + continue; + } + area_start = busy_area->start_page; + area_end = area_start + busy_area->pages_num; + DebugRML("Node #%d bank #%d current busy area #%d " + "from 0x%lx to 0x%lx\n", + node_id, bank, area, + bank_start + (area_start << PAGE_SHIFT), + bank_start + (area_end << PAGE_SHIFT)); + next_area = busy_area->next; + if (!(busy_area->flags & BOOT_EXCLUDE_AT_HIGH_PHYS_MEM)) + /* area can be remapped to high range */ + /* so continue loop and search end of max area */ + continue; + if (!phys_bank->maybe_remapped_to_hi) + /* area cannot be remapped to high range, but */ + /* high range to remap is absent, so area should */ + /* stay at low bank */ + continue; + + /* found low busy area which cannot be remapped to high */ + /* so its start is end of current max bank contigous area */ + DebugRML("Node #%d bank #%d area #%d need not be remapped " + "to high\n", + node_id, bank, area); + end_addr = bank_start + (area_start << PAGE_SHIFT); + if (end_addr > start_addr) { + new_bank = boot_remap_node_low_bank_area(boot_info, + node_id, node_mem, + bank, start_addr, end_addr); + if (new_bank != bank) { + BOOT_BUG("Node #%d low bank #%d cannot be " + "changed, but after partially " + "remapping to high range the bank " + "index is updated to #%d\n", + node_id, bank, new_bank); + } + /* source bank can be updated after remapping of part */ + /* of the low bank, so recalculate its parameters */ + bank_start = phys_bank->base_addr; + bank_end = bank_start + + (phys_bank->pages_num << PAGE_SHIFT); + } + /* now bank starts from area which cannot be remapped */ + /* from low memory to high, create new bank for such areas */ + new_bank = boot_unremap_node_low_bank_area(node_id, + node_mem, bank, phys_bank); + if (new_bank < 0) + /* uremapped areas were right in the end of bank */ + /* the bank was deleted, so bank fully remapped */ + return new_bank; + if (new_bank != bank) { + BOOT_BUG("Node #%d low bank #%d cannot be changed, " + "but after creation of unremapping bank " + "the bank index is updated to #%d\n", + node_id, bank, new_bank); + } + /* source bank can be updated after creation of unremapping */ + /* the low bank, so recalculate its parameters */ + bank_start = phys_bank->base_addr; + bank_end = bank_start + (phys_bank->pages_num << PAGE_SHIFT); + next_area = phys_bank->first_area; + start_addr = bank_start; + end_addr = start_addr; + } + if (start_addr >= bank_end) + /* nothing pages at the end of bank */ + return bank; + end_addr = bank_end; + DebugRML("Node #%d bank #%d free area from 0x%lx to 0x%lx " + "at the end of bank\n", + node_id, bank, start_addr, end_addr); + new_bank = boot_remap_node_low_bank_area(boot_info, node_id, + node_mem, bank, start_addr, end_addr); + if (new_bank >= 0) { + if (boot_has_node_high_memory(node_id, boot_info)) { + BOOT_WARNING("Node #%d low bank #%d should be deleted, " + "but after full remapping to high range " + "bank there is #%d\n", + node_id, bank, new_bank); + } + } + return new_bank; +} +static void __init +boot_remap_node_low_memory(boot_info_t *boot_info, int node_id, + boot_phys_mem_t *node_mem) +{ + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + short bank, next_bank, new_bank; + + boot_the_node_spin_lock(node_id, boot_phys_mem_lock); + node_banks = node_mem->banks; + for (bank = node_mem->first_bank; bank >= 0; bank = next_bank) { + e2k_addr_t bank_start; + + phys_bank = &node_banks[bank]; + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + BOOT_BUG("Node #%d bank #%d at the list has not " + "memory pages", + node_id, bank); + break; + } + bank_start = phys_bank->base_addr; + if (is_addr_from_high_memory(bank_start)) + /* bank is from high memory, no more banks from low */ + break; + DebugRMLT("Node #%d bank #%d from 0x%lx to 0x%lx will be " + "remapped to high memory\n", + node_id, bank, bank_start, + bank_start + (phys_bank->pages_num << PAGE_SHIFT)); + + /* the bank can be deleted, so remember next bank # */ + next_bank = phys_bank->next; + new_bank = boot_remap_node_low_bank_mem(boot_info, node_id, + bank, node_mem); + if (new_bank < 0) { + /* the bank was deleted, so bank fully remapped */ + } else if (new_bank != bank) { + BOOT_BUG("Node #%d low bank #%d cannot be changed, " + "but after creation of unremapping bank " + "the bank index is updated to #%d\n", + node_id, bank, new_bank); + } + } + boot_the_node_spin_unlock(node_id, boot_phys_mem_lock); +} +static void __init +boot_remap_low_to_high_memory(boot_info_t *boot_info) +{ + boot_phys_mem_t *all_nodes_mem; + boot_phys_mem_t *node_mem; + int node_id; + + if (BOOT_TEST_AND_SET_NODE_LOCK(boot_node_low_mem_lock, + boot_node_low_mem_remapped)) { + DebugNUMA("boot_remap_low_to_high_memory() low memory " + "was already remapped on node\n"); + return; + } + + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + + node_id = boot_numa_node_id(); + node_mem = &all_nodes_mem[node_id]; + if (node_mem->pfns_num != 0) + boot_remap_node_low_memory(boot_info, node_id, node_mem); + + BOOT_NODE_UNLOCK(boot_node_low_mem_lock, boot_node_low_mem_remapped); +} + +static e2k_busy_mem_t * __init_recv +boot_find_node_buse_area_of_addr(e2k_addr_t phys_addr, + int node_id, node_phys_mem_t *node_mem) +{ + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + e2k_busy_mem_t *busy_area; + short bank; + + node_banks = node_mem->banks; + + for (bank = node_mem->first_bank; bank >= 0; bank = phys_bank->next) { + e2k_addr_t bank_start; + short area; + + phys_bank = &node_banks[bank]; + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + BOOT_BUG("Node #%d bank #%d at the list has not " + "memory pages\n", + node_id, bank); + } + bank_start = phys_bank->base_addr; + for (area = phys_bank->first_area; + area >= 0; + area = busy_area->next) { + e2k_size_t start, end; + + busy_area = &phys_bank->busy_areas[area]; + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d low bank #%d empty physical " + "memory busy area #%d cannot be in " + "the list", + node_id, bank, area); + } + start = bank_start + (busy_area->start_page << + PAGE_SHIFT); + end = start + (busy_area->pages_num << PAGE_SHIFT); + DebugRML("Node #%d low bank #%d current busy area #%d " + "from 0x%lx to 0x%lx\n", + node_id, bank, area, start, end); + if (phys_addr >= start && phys_addr < end) + return busy_area; + } + } + return NULL; /* area is not found */ +} + +static e2k_busy_mem_t * __init_recv +boot_find_busy_area_of_addr(e2k_addr_t phys_addr) +{ + boot_phys_mem_t *all_nodes_mem = NULL; + e2k_busy_mem_t *area; + int nodes_num; + int cur_nodes_num = 0; + int node; + + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + nodes_num = boot_phys_mem_nodes_num; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + node_phys_mem_t *node_mem = &all_nodes_mem[node]; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + continue; /* node has not memory */ + cur_nodes_num++; + boot_the_node_spin_lock(node, boot_phys_mem_lock); + area = boot_find_node_buse_area_of_addr(phys_addr, + node, node_mem); + boot_the_node_spin_unlock(node, boot_phys_mem_lock); + if (area != NULL) + return area; /* area is found */ + } + return NULL; /* area is not found */ +} + +static e2k_addr_t __init +boot_get_remapped_area_addr(boot_info_t *boot_info, + e2k_addr_t old_addr, busy_mem_type_t mem_type) +{ + e2k_addr_t new_addr; + e2k_busy_mem_t *area; + + /* area could be remapped from low to high memory */ + /* so new address should be from high memory */ + + if (is_addr_from_high_memory(old_addr)) { + /* address is already from high memory */ + new_addr = old_addr; + } else { + new_addr = (e2k_addr_t)boot_pa_to_high_pa((void *)old_addr, + boot_info); + } + area = boot_find_busy_area_of_addr(new_addr); + if (unlikely(area == NULL)) { + BOOT_BUG("Could not find remapped from low to high busy area, " + "low address 0x%lx, high 0x%lx\n", + old_addr, new_addr); + } + if (area->type != mem_type) { + BOOT_WARNING("Memory type %d of remapped from low 0x%lx to " + "high 0x%lx area is not the same as source area " + "type %d\n", + area->type, old_addr, new_addr, mem_type); + } + return new_addr; +} + +static void __init +boot_update_kernel_image_addr(bool bsp, boot_info_t *boot_info) +{ + e2k_addr_t old_addr, new_addr; + + if (BOOT_IS_BSP(bsp)) { + /* kernel image base address */ + old_addr = boot_kernel_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, kernel_image_mem_type); + if (new_addr != old_addr) { + boot_kernel_phys_base = new_addr; + DebugRMLT("kernel image base address was remapped from " + "low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("kernel image base address could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } + + /* kernel image 'text' segment */ + old_addr = boot_text_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, kernel_image_mem_type); + if (new_addr != old_addr) { + boot_text_phys_base = new_addr; + DebugRMLT("kernel 'text' segment was remapped from " + "low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("kernel 'text' segment could not remap from " + "low memory 0x%lx to high\n", + old_addr); + } + + /* kernel image 'data/bss' segment */ + old_addr = boot_data_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, kernel_image_mem_type); + if (new_addr != old_addr) { + boot_data_phys_base = new_addr; + DebugRMLT("kernel 'data/bss' segment was remapped from " + "low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("kernel 'data/bss' segment could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } + } + + /* kernel 'trap cellar' */ + old_addr = (e2k_addr_t)boot_trap_cellar; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, kernel_image_mem_type); + if (new_addr != old_addr) { + boot_set_MMU_TRAP_POINT(boot_trap_cellar); + DebugRMLT("kernel 'trap cellar' area was remapped from " + "low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("kernel 'trap cellar' area could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } +} + +static void __init +boot_update_bootblock_addr(bool bsp, boot_info_t *boot_info) +{ + e2k_addr_t old_addr, new_addr; +#ifdef CONFIG_L_IO_APIC + struct intel_mp_floating *mpf; +#endif /* CONFIG_L_IO_APIC */ + + if (BOOT_IS_BSP(bsp)) { + /* kernel <-> boot loader BOOTINFO area */ + old_addr = boot_bootinfo_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + boot_bootinfo_phys_base = new_addr; + boot_bootblock_virt = + (bootblock_struct_t *) + __boot_va(boot_vpa_to_pa(new_addr)); + DebugRMLT("kernel <-> boot loader info was remapped " + "from low memory 0x%lx to high 0x%lx (0x%lx)\n", + old_addr, new_addr, boot_bootblock_virt); + } else { + DebugRMLT("kernel <-> boot loader info could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } + +#ifdef CONFIG_BLK_DEV_INITRD + /* initial ramdisk INITRD */ + if (boot_info->ramdisk_size != 0) { + old_addr = boot_initrd_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + boot_initrd_phys_base = new_addr; + DebugRMLT("initial ramdisk INITRD was remapped " + "from low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("initial ramdisk INITRD could not " + "remap from low memory 0x%lx to high\n", + old_addr); + } + } +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_L_IO_APIC + if (boot_info->mp_table_base == (e2k_addr_t)0UL) + /* nothing additional tables */ + return; + + mpf = (struct intel_mp_floating *)boot_info->mp_table_base; + + /* additional MP tables */ + old_addr = boot_info->mp_table_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + boot_info->mp_table_base = new_addr; + boot_mpf_phys_base = new_addr; + DebugRMLT("MP floating table was remapped " + "from low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("MP floating table could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } + + /* MP configuration tables */ + if (mpf->mpf_physptr == (e2k_addr_t)0UL) + return; + + old_addr = mpf->mpf_physptr; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + mpf->mpf_checksum = 0; + mpf->mpf_physptr = new_addr; + boot_mpc_phys_base = new_addr; + /* recalculate structure sum */ + mpf->mpf_checksum = + boot_mpf_do_checksum((unsigned char *)mpf, + sizeof(*mpf)); + DebugRMLT("MP configuration table was remapped " + "from low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("MP configuration table could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } +#endif /* CONFIG_L_IO_APIC */ + } +} + +static void __init +boot_update_boot_memory_addr(bool bsp, boot_info_t *boot_info) +{ + e2k_addr_t old_addr, new_addr; + + if (BOOT_IS_BSP(bsp)) { + int bank; + + /* boot loader busy areas */ + for (bank = 0; bank < boot_info->num_of_busy; bank++) { + bank_info_t *busy_area; + busy_area = &boot_info->busy[bank]; + old_addr = busy_area->address; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + busy_area->address = new_addr; + DebugRMLT("memory area occupied by boot loader " + "was remapped from low memory 0x%lx " + "to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("memory area occupied by boot loader " + "could not remap from low memory 0x%lx " + "to high\n", + old_addr); + } + } + } +} + +static void __init +boot_update_stacks_addr(boot_info_t *boot_info) +{ + e2k_addr_t old_addr, new_addr; + + /* kernel procedure stack */ + old_addr = boot_boot_ps_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + boot_boot_ps_phys_base = new_addr; + DebugRMLT("kernel procedure stack was remapped from " + "low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("kernel procedure stack could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } + + /* kernel procedure chain stack */ + old_addr = boot_boot_pcs_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + boot_boot_pcs_phys_base = new_addr; + DebugRMLT("kernel procedure chain stack was remapped from " + "low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("kernel procedure chain stack could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } + + /* kernel local data stack */ + old_addr = boot_boot_stack_phys_base; + new_addr = boot_get_remapped_area_addr(boot_info, + old_addr, boot_loader_mem_type); + if (new_addr != old_addr) { + boot_boot_stack_phys_base = new_addr; + DebugRMLT("kernel local data stack was remapped from " + "low memory 0x%lx to high 0x%lx\n", + old_addr, new_addr); + } else { + DebugRMLT("kernel local data stack could not remap " + "from low memory 0x%lx to high\n", + old_addr); + } +} + +static void __init +boot_update_reserved_areas_addr(bool bsp, boot_info_t *boot_info) +{ + /* Update kernel image 'text/data/bss' segments */ + boot_update_kernel_image_addr(bsp, boot_info); + + /* Update memory of boot-time local data & hardware stacks */ + boot_update_stacks_addr(boot_info); + + /* Update boot information records */ + boot_update_bootblock_addr(bsp, boot_info); + + /* Update pointers to memory used by BOOT (e2k boot-loader) */ + boot_update_boot_memory_addr(bsp, boot_info); +} + +void __init boot_remap_low_memory(bool bsp, boot_info_t *boot_info) +{ + if (BOOT_LOW_MEMORY_ENABLED()) { + boot_printk("Remapping low memory to high is disabled\n"); + return; + } + + /* + * Preliminarily reserve low memory, if it need + */ + boot_reserve_low_memory(bsp, boot_info); + + /* + * SYNCHRONIZATION POINT #0.0.1 + * At this point all processors should complete reserving of + * used low memory. + * After synchronization can start remapping of low memory to high + */ + boot_sync_all_processors(); + + /* + * Remap all low memory to high memory range on all nodes + */ + boot_remap_low_to_high_memory(boot_info); + + /* + * SYNCHRONIZATION POINT #0.0.2 + * At this point all processors should complete remapping of + * used low memory to high adrresses memory range. + * After synchronization need be updated common info about + * present physical memory + */ + boot_sync_all_processors(); + + /* update common info about present physical memory */ + if (BOOT_IS_BSP(bsp)) + boot_update_physmem_maps(boot_info); + + /* update addresses of remapped kernel boot-time data, structures, */ + /* images, tables and other allocated & reserved areas */ + boot_update_reserved_areas_addr(bsp, boot_info); +} +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + +/* + * Allocate memory area into the free physical memory space on the node. + * Start address of allocation should have the alignment 'align' and + * The memory area is allocated in terms of pages with size 'page_size'. + * Partially occupied pages (in terms of 'page_size') are considered fully + * reserved and can not be used for other memory request. + * + * Alignment 'align' and 'page_size' has to be a power of 2 value. + * + * Function returns base address of allocated memory or (void *)-1, if + * allocation failed or no memory on the node + */ +static void * __init_recv +boot_alloc_node_physmem(int node_id, e2k_size_t mem_size, + e2k_size_t align, e2k_size_t page_size, + busy_mem_type_t mem_type, unsigned short flags) +{ + boot_phys_mem_t *all_nodes_mem; + boot_phys_mem_t *node_mem; + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank = NULL; + e2k_size_t max_align; + short bank; + long mem_pages; + e2k_size_t start_page; + bool start_found; + e2k_addr_t start_addr = -1; + long bank_pages_num; + + DebugAM("boot_alloc_node_physmem() node #%d: mem size 0x%lx\n", + node_id, mem_size); + if (mem_size == 0) + BOOT_BUG("Allocated memory area size %ld is empty", mem_size); + + DebugAM("boot_alloc_node_physmem() page size 0x%lx\n", page_size); + if (page_size == 0) + BOOT_BUG("The page size to round up %ld is empty", page_size); + + if (align > page_size) + max_align = align; + else + max_align = page_size; + DebugAM("boot_alloc_node_physmem() max align 0x%lx\n", max_align); + mem_pages = (mem_size + (page_size-1)) / page_size; + mem_pages *= (page_size / PAGE_SIZE); + DebugAM("boot_alloc_node_physmem() mem pages 0x%lx\n", mem_pages); + + /* + * Scan the node physical memory banks and search an area of contiguous + * free pages, which satisfies to conditions of start address alignment, + * needed page size alignment and requested memory size. + * The allocated memory range can occupy a few contiguous physical + * banks. + */ + + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + + start_found = false; + boot_the_node_spin_lock(node_id, boot_phys_mem_lock); + node_mem = &all_nodes_mem[node_id]; + if (node_mem->pfns_num == 0) { + goto no_memory; /* node has not memory */ + } + node_banks = node_mem->banks; + for (bank = node_mem->first_bank; bank >= 0; bank = phys_bank->next) { + e2k_addr_t bank_start; + e2k_busy_mem_t *busy_area; + short area; + + phys_bank = &node_banks[bank]; + bank_start = phys_bank->base_addr; + DebugAM("boot_alloc_node_physmem() current bank #%d is " + "from 0x%lx to 0x%lx\n", + bank, bank_start, + bank_start + (phys_bank->pages_num << PAGE_SHIFT)); + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + BOOT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node_id, bank); + break; + } + if ((flags & BOOT_ONLY_LOW_PHYS_MEM) && + is_addr_from_high_memory(bank_start)) { + DebugAM("boot_alloc_node_physmem() bank is from high " + "memory 0x%lx, need only low\n", + bank_start); + continue; + } + if ((flags & (BOOT_ONLY_HIGH_PHYS_MEM | + BOOT_FIRST_HIGH_PHYS_MEM)) && + is_addr_from_low_memory(bank_start + + (phys_bank->pages_num << PAGE_SHIFT) - 1)) { + DebugAM("boot_alloc_node_physmem() bank is from low " + "memory 0x%lx, need only or first high\n", + bank_start); + continue; + } + DebugAM("boot_alloc_node_physmem() node #%d bank #%d " + "free pages num is 0x%lx\n", + node_id, bank, + atomic64_read(&phys_bank->free_pages_num)); + if (atomic64_read(&phys_bank->free_pages_num) == 0) { + DebugAM("boot_alloc_node_physmem() node #%d bank #%d " + "has not free pages\n", + node_id, bank); + continue; + } + + /* + * Scan all busy areas of physical memory bank and + * search a suitable hole of contiguous free pages. + */ + start_addr = phys_bank->base_addr; + start_addr = ALIGN_TO_SIZE(start_addr, max_align); + start_page = (start_addr - phys_bank->base_addr) >> PAGE_SHIFT; + if (start_page + mem_pages > phys_bank->pages_num) { + DebugAM("boot_alloc_node_physmem() node #%d bank #%d " + "has not enough memory from 0x%lx to 0x%lx\n", + node_id, bank, start_addr, + start_addr + (mem_pages << PAGE_SHIFT)); + continue; + } + for (area = phys_bank->first_area; + area >= 0; + area = busy_area->next) { + e2k_size_t area_start, area_end; + + busy_area = &phys_bank->busy_areas[area]; + if (busy_area->pages_num == 0) { + BOOT_BUG("Node #%d bank #%d empty physical " + "memory busy area #%d cannot be " + "in the list", + node_id, bank, area); + continue; + } + area_start = busy_area->start_page; + area_end = area_start + busy_area->pages_num; + DebugAM("boot_alloc_node_physmem() node #%d bank #%d " + "busy area #%d from 0x%lx to 0x%lx\n", + node_id, bank, area, + bank_start + (area_start << PAGE_SHIFT), + bank_start + (area_end << PAGE_SHIFT)); + if (start_page < area_start && + area_start - start_page >= mem_pages) { + /* suitable free area is found */ + start_found = true; + DebugAM("boot_alloc_node_physmem() node #%d " + "bank #%d area #%d found free hole " + "from 0x%lx to 0x%lx\n", + node_id, bank, area, start_addr, + start_addr + (mem_pages << PAGE_SHIFT)); + break; + } + if (start_page < area_start || + start_page < area_end) { + /* hole is too small or start into already */ + /* busy area, shift start address outside */ + /* the end of current area and goto next */ + start_page = area_end; + start_addr = bank_start + + (start_page << PAGE_SHIFT); + start_addr = ALIGN_TO_SIZE(start_addr, + max_align); + start_page = (start_addr - + bank_start) >> PAGE_SHIFT; + DebugAM("boot_alloc_node_physmem() node #%d " + "bank #%d area #%d shift start of " + "search to 0x%lx\n", + node_id, bank, area, start_addr); + if (start_page + mem_pages > + phys_bank->pages_num) { + DebugAM("boot_alloc_node_physmem() " + "node #%d bank #%d " + "has not enough memory " + "from 0x%lx to 0x%lx\n", + node_id, bank, start_addr, + start_addr +(mem_pages << + PAGE_SHIFT)); + break; + } + continue; + } + /* start address above current area, goto next */ + } + + if (start_found) + break; + + if (start_page + mem_pages <= phys_bank->pages_num) { + /* suitable free hole is found at bank end */ + start_found = true; + DebugAM("boot_alloc_node_physmem() node #%d " + "bank #%d found free hole from 0x%lx " + "to 0x%lx at bank end\n", + node_id, bank, start_addr, + start_addr + (mem_pages << PAGE_SHIFT)); + break; + } + DebugAM("boot_alloc_node_physmem() node #%d bank #%d " + "has not enough memory from 0x%lx to 0x%lx\n", + node_id, bank, start_addr, + start_addr + (mem_pages << PAGE_SHIFT)); + } + + if (!start_found) { +no_memory: + boot_the_node_spin_unlock(node_id, boot_phys_mem_lock); + DebugAM("boot_alloc_node_physmem() node #%d: could not find " + "free memory enough to allocate area: size 0x%lx " + "align 0x%lx page size 0x%lx\n", + node_id, mem_size, align, page_size); + return ((void *)-1); + } + + /* Reserve the area now */ + bank_pages_num = boot_reserve_bank_physmem(node_id, node_mem, + bank, phys_bank, + start_addr, mem_pages, + mem_type, flags); + + boot_the_node_spin_unlock(node_id, boot_phys_mem_lock); + + if (bank_pages_num <= 0) { + BOOT_BUG("Could not reserve allocated free memory area: " + "node #%d size %ld align 0x%lx page size 0x%lx", + node_id, mem_size, align, page_size); + return ((void *)-1); + } + + /* VCPUs are starting with virtual memory support ON, so all */ + /* guest "physical addresses" (gpa) should be virtual (vpa) */ + /* i.e. PAGE_OFFSET + gpa */ + start_addr = boot_pa_to_vpa(start_addr); + + return((void *)start_addr); +} + +void * __init_recv +boot_alloc_node_mem(int node_id, e2k_size_t mem_size, + e2k_size_t align, e2k_size_t page_size, + busy_mem_type_t mem_type, unsigned short flags) +{ + node_phys_mem_t *all_nodes_mem = NULL; + void *node_mem; + int cur_node = node_id; + int node; + int nodes_num; + int cur_nodes_num = 0; + int cur_try; + + DebugAM("boot_alloc_node_mem() node #%d: mem size 0x%lx %s\n", + node_id, mem_size, + (flags & BOOT_ONLY_ON_NODE_ALLOC_MEM) ? + "only on this node" : "may be on other node"); + nodes_num = boot_phys_mem_nodes_num; + all_nodes_mem = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + for (cur_try = 0; cur_try < 3; cur_try ++) { + for (node = 0; node < L_MAX_MEM_NUMNODES; node ++) { + if (cur_nodes_num >= nodes_num) + goto next_try; /* no more nodes with memory */ + if (all_nodes_mem[cur_node].pfns_num == 0) { + if (flags & BOOT_ONLY_ON_NODE_ALLOC_MEM) + break; + goto next_node; /* node has not memory */ + } +node_next_try: + node_mem = boot_alloc_node_physmem(cur_node, mem_size, align, + page_size, mem_type, flags); + if (node_mem != (void *)-1) { + if (cur_node != node_id) { + BOOT_WARNING("Could allocate area on node #%d " + "insteed of #%d, addr 0x%lx size 0x%lx " + "align 0x%lx page size 0x%lx", + cur_node, node_id, node_mem, + mem_size, align, page_size); + } + DebugAM("boot_alloc_node_mem() node #%d: allocated " + "on node #%d from 0x%px, size 0x%lx\n", + node_id, cur_node, node_mem, mem_size); + return (node_mem); + } + if (flags & BOOT_ONLY_ON_NODE_ALLOC_MEM) { + if (flags & BOOT_FIRST_HIGH_PHYS_MEM) { + DebugAM("boot_alloc_node_mem() node #%d: could " + "not allocate high memory as first, " + "try allocate any only on node\n", + node_id); + flags &= ~BOOT_FIRST_HIGH_PHYS_MEM; + goto node_next_try; + } + break; + } + cur_nodes_num ++; +next_node: + cur_node ++; + if (cur_node >= L_MAX_MEM_NUMNODES) { + /* + * If there is not more nodes, we start new search + * from node #1 and only at last we take node #0 + * so same algorithm is used while building zone lists + * on each node (see mm/page_alloc.c) + */ +next_try: + if (cur_try == 0) { + cur_node = 1; + cur_nodes_num = 1; + break; + } else if (cur_try == 1) { + cur_node = 0; + cur_nodes_num = 0; + break; + } + } + } + if (flags & BOOT_ONLY_ON_NODE_ALLOC_MEM) + break; + if (flags & BOOT_FIRST_HIGH_PHYS_MEM) { + DebugAM("boot_alloc_node_mem() node #%d: could not " + "allocate high memory as first, try " + "allocate any on any node\n", + node_id); + flags &= ~BOOT_FIRST_HIGH_PHYS_MEM; + cur_node = node_id; + cur_nodes_num = 0; + } + } + if (!(flags & BOOT_IS_TRY_ALLOC_MEM)) { + BOOT_BUG("Could not find free memory enough to allocate area: " + "node #%d (%s) size %ld align 0x%lx page size 0x%lx", + node_id, + (flags & BOOT_ONLY_ON_NODE_ALLOC_MEM) ? + "only on this node" : "and on other node", + mem_size, align, page_size); + } + return ((void *)-1); +} + +/* + * Map the physical memory pages of the banks into the virtual pages + * The function returns the number of mapped pages in the bank + */ +static long __init_recv +boot_map_physmem_area(e2k_addr_t phys_start, e2k_addr_t phys_end, + pgprot_t prot_flags, e2k_size_t max_page_size, + pt_struct_t *pt_struct, int start_level) +{ + pt_level_t *pt_level; + e2k_addr_t level_start; + e2k_addr_t level_end; + e2k_size_t page_size; + e2k_addr_t map_virt_addr; + e2k_size_t map_size; + int level; + long pages = 0; + long ret; + + /* loop on all page table levels from possible max to min level, */ + /* it allows to map physical memory to virtual pages of max sizes */ + for (level = start_level; level > 0; level--) { + pt_level = &pt_struct->levels[level]; + if (!pt_level->is_huge && !pt_level->is_pte) + /* level cannot point to physical pages */ + /* (can be as pte) */ + continue; + page_size = pt_level->page_size; + if (max_page_size != 0 && page_size > max_page_size) + /* it is not level with specified page size */ + continue; + + level_start = _PAGE_ALIGN_DOWN(phys_start, page_size); + level_end = _PAGE_ALIGN_UP(phys_end, page_size); + if (level_start >= level_end) + continue; /* too big page size to map */ + + /* this lavel and page size is suitable to map */ + break; + } + if (unlikely(level <= 0)) { + BOOT_BUG("Could not find page table level to map physical " + "memory from addr 0x%lx to 0x%lx, specified max page " + "size 0x%lx", + phys_start, phys_end, max_page_size); + return -EINVAL; + } + + if (level_start != phys_start) { + /* there is area at beginning which can be mapped */ + /* only a smaller pages */ + pages += boot_map_physmem_area(phys_start, level_start, + prot_flags, max_page_size, + pt_struct, level - 1); + } + + map_virt_addr = (e2k_addr_t)__boot_va(level_start); + map_size = level_end - level_start; + ret = boot_do_map_phys_area(level_start, map_size, map_virt_addr, + prot_flags, pt_level, + false, /* ignore mapping virtual area is busy ? */ + false); /* populate map on host ? */ + if (unlikely(ret <= 0)) { + BOOT_BUG("Could not map physical memory from addr 0x%lx " + "to 0x%lx, page size 0x%lx", + level_start, level_end, page_size); + return ret; + } + pages += (ret * (page_size >> PAGE_SHIFT)); + DebugMP("Map physical memory from addr 0x%lx to 0x%lx to virtual space " + "base 0x%lx, 0x%lx pages of size 0x%lx\n", + level_start, level_end, map_virt_addr, ret, page_size); + + if (level_end != phys_end) { + /* there is area at ending which can be mapped */ + /* only a smaller pages */ + pages += boot_map_physmem_area(level_end, phys_end, + prot_flags, max_page_size, + pt_struct, level - 1); + } + + return pages; +} +static long __init_recv +boot_map_banks_physmem(e2k_addr_t phys_start, e2k_addr_t phys_end, + pgprot_t prot_flags, e2k_size_t max_page_size) +{ + pt_struct_t *pt_struct = boot_pgtable_struct_p; + + DebugMP("will map physical area from 0x%lx to 0x%lx\n", + phys_start, phys_end); + return boot_map_physmem_area(phys_start, phys_end, + prot_flags, max_page_size, pt_struct, + pt_struct->levels_num); +} + +e2k_addr_t __init +boot_get_adjacent_phys_bank_addr(int start_node, short start_bank, + e2k_addr_t start_addr, + bool lowest /* if false then highest */) +{ + boot_phys_mem_t *all_phys_banks = NULL; + int my_node_id = boot_numa_node_id(); + int nodes_num; + int cur_nodes_num = 0; + e2k_addr_t bank_base; + e2k_addr_t bank_end; + e2k_addr_t new_start; + e2k_addr_t phys_addr; + int nodes; + int node; + short bank; + + all_phys_banks = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + nodes_num = boot_phys_mem_nodes_num; + node = start_node; + for (nodes = 0; nodes < L_MAX_MEM_NUMNODES; nodes++) { + boot_phys_mem_t *node_mem = &all_phys_banks[node]; + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + goto next_node; /* node has not memory */ + node_banks = node_mem->banks; + if (node == start_node) + bank = node_banks[start_bank].next; + else + bank = node_mem->first_bank; + cur_nodes_num++; + for (; bank >= 0; bank = phys_bank->next) { + phys_bank = &node_banks[bank]; + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + BOOT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + } + DebugMP("Node #%d bank #%d: from 0x%lx to 0x%lx\n", + node, bank, phys_bank->base_addr, + phys_bank->base_addr + + (phys_bank->pages_num << PAGE_SHIFT)); + if (phys_bank->mapped[my_node_id]) { + /* bank already mapped */ + DebugMP("Node #%d bank #%d: already mapped\n", + node, bank); + continue; + } + bank_base = phys_bank->base_addr; + bank_end = bank_base + + (phys_bank->pages_num << PAGE_SHIFT); + if (start_addr > bank_base && start_addr < bank_end) { + BOOT_BUG("Node #%d bank #%d: start addr 0x%lx " + "is into bank range deom 0x%lx " + "to 0x%lx", + node, bank, start_addr, + bank_base, bank_end); + } + if (lowest) + /* contiguity should be to end */ + phys_addr = bank_end; + else + /* contiguity should be to begin */ + phys_addr = bank_base; + if (phys_addr == start_addr) { + if (lowest) + new_start = bank_base; + else + new_start = bank_end; + phys_bank->mapped[my_node_id] = true; + DebugMP("Node #%d bank #%d: there is " + "contiguity from %s, contigous bank %s " + "is now 0x%lx\n", + node, bank, + (lowest) ? "end" : "start", + (lowest) ? "start" : "end", + new_start); + return boot_get_adjacent_phys_bank_addr( + node, bank, new_start, lowest); + } + if (start_addr < phys_addr) + /* all other banks higher and cannot have */ + /* contiguity from start or end */ + break; + } +next_node: + node++; + if (node >= L_MAX_MEM_NUMNODES) + node = 0; + } + DebugMP("Node #%d bank #%d: there is not more contiguity from %s, so " + "contigous bank %s stay the same 0x%lx\n", + start_node, start_bank, + (lowest) ? "end" : "start", (lowest) ? "start" : "end", + start_addr); + return start_addr; +} +static inline e2k_addr_t __init +boot_get_lowest_phys_bank_base(int start_node, short start_bank, + e2k_addr_t start_addr) +{ + return boot_get_adjacent_phys_bank_addr(start_node, start_bank, + start_addr, + true /* lowest ? */); +} +static inline e2k_addr_t __init +boot_get_highest_phys_bank_end(int start_node, short start_bank, + e2k_addr_t start_addr) +{ + return boot_get_adjacent_phys_bank_addr(start_node, start_bank, + start_addr, + false /* lowest ? */); +} + +/* + * Map all physical memory into virtual space + * + * Function returns number of mapped physical pages + */ +long __init +boot_map_physmem(pgprot_t prot_flags, e2k_size_t max_page_size) +{ + boot_phys_mem_t *all_phys_banks = NULL; + int my_node_id = boot_numa_node_id(); + int nodes_num; + int cur_nodes_num = 0; + e2k_addr_t bank_base; + e2k_addr_t bank_end; + e2k_addr_t phys_addr; + e2k_addr_t phys_end; + long pages_num; + long mapped_pages = 0; + long all_pages_num = boot_pages_of_phys_memory; + int node; + short bank; + + all_phys_banks = boot_vp_to_pp((boot_phys_mem_t *)boot_phys_mem); + nodes_num = boot_phys_mem_nodes_num; + for (node = 0; node < L_MAX_MEM_NUMNODES; node++) { + boot_phys_mem_t *node_mem = &all_phys_banks[node]; + boot_phys_bank_t *node_banks; + boot_phys_bank_t *phys_bank; + + if (cur_nodes_num >= nodes_num) + break; /* no more nodes with memory */ + if (node_mem->pfns_num == 0) + continue; /* node has not memory */ + node_banks = node_mem->banks; + DebugMP("Node #%d: physical memory banks number %d\n", + node, node_mem->banks_num); + cur_nodes_num++; + for (bank = node_mem->first_bank; + bank >= 0; + bank = phys_bank->next) { + phys_bank = &node_banks[bank]; + if (phys_bank->pages_num == 0) { + /* bank in the list has not pages */ + BOOT_BUG("Node #%d bank #%d at the list " + "has not memory pages", + node, bank); + } + if (phys_bank->mapped[my_node_id]) + /* bank alread mapped */ + continue; + bank_base = phys_bank->base_addr; + bank_end = bank_base + + (phys_bank->pages_num << PAGE_SHIFT); + phys_bank->mapped[my_node_id] = true; + DebugMP("Node #%d bank #%d from base 0x%lx to 0x%lx " + "try expand continuously from start and end\n", + node, bank, bank_base, bank_end); + if (bank_base > boot_start_of_phys_memory) + phys_addr = boot_get_lowest_phys_bank_base( + node, bank, bank_base); + else + phys_addr = bank_base; + if (phys_addr != bank_base) { + DebugMP("Node #%d bank #%d base bank " + "addr 0x%lx was decrement to 0x%lx\n", + node, bank, bank_base, phys_addr); + } else { + DebugMP("Node #%d bank #%d base bank " + "addr 0x%lx was not changed\n", + node, bank, phys_addr); + } + if (bank_end < boot_end_of_phys_memory) + phys_end = boot_get_highest_phys_bank_end( + node, bank, bank_end); + else + phys_end = bank_end; + if (phys_end != bank_end) { + DebugMP("Node #%d bank #%d end bank " + "addr 0x%lx was increment to 0x%lx\n", + node, bank, bank_end, phys_end); + } else { + DebugMP("Node #%d bank #%d end bank " + "addr 0x%lx was not changed\n", + node, bank, phys_end); + } + pages_num = boot_map_banks_physmem(phys_addr, phys_end, + prot_flags, max_page_size); + DebugMP("Node #%d bank #%d: physical memory from 0x%lx " + "to 0x%lx mapped to 0x%lx pages\n", + node, bank, phys_addr, phys_end, pages_num); + mapped_pages += pages_num; + if (mapped_pages >= all_pages_num) + break; + } + if (mapped_pages >= all_pages_num) + break; + } + if (mapped_pages != all_pages_num) { + BOOT_BUG("Could not map all needed physical memory pages " + "only 0x%lx pages instead of 0x%lx", + mapped_pages, all_pages_num); + } + return mapped_pages; +} diff --git a/arch/e2k/p2v/boot_printk/Makefile b/arch/e2k/p2v/boot_printk/Makefile new file mode 100644 index 000000000000..f8cc28acc3cf --- /dev/null +++ b/arch/e2k/p2v/boot_printk/Makefile @@ -0,0 +1,5 @@ +GCOV_PROFILE := n + +obj-$(CONFIG_SERIAL_AM85C30_BOOT_CONSOLE) += am85c30.o +obj-$(CONFIG_BOOT_PRINTK) += console.o +obj-$(CONFIG_EARLY_VIRTIO_CONSOLE) += boot_hvc_l.o diff --git a/arch/e2k/p2v/boot_printk/am85c30.c b/arch/e2k/p2v/boot_printk/am85c30.c new file mode 100644 index 000000000000..dcd4392facfe --- /dev/null +++ b/arch/e2k/p2v/boot_printk/am85c30.c @@ -0,0 +1,137 @@ +/* + * COM port console AM85C30 support + */ + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#undef DEBUG_SC_MODE +#undef DebugSC +#define DEBUG_SC_MODE 0 /* serial console debug */ +#define DebugSC if (DEBUG_SC_MODE) do_boot_printk + +static unsigned long am85c30_com_port = 0; +extern serial_console_opts_t am85c30_serial_boot_console; + +#define boot_am85c30_com_port boot_get_vo_value(am85c30_com_port) +#define boot_am85c30_serial_boot_console \ + ((boot_get_vo_value(am85c30_serial_boot_console))) + +static inline void +am85c30_com_outb(u64 iomem_addr, u8 byte) +{ + boot_writeb(byte, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ +} + +static inline u8 +am85c30_com_inb(u64 iomem_addr) +{ + rmb(); /* waiting for read from serial port completion */ + return boot_readb((void __iomem *)iomem_addr); +} + +static inline u8 +am85c30_com_inb_command(u64 iomem_addr, u8 reg_num) +{ + boot_writeb(reg_num, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ + return boot_readb((void __iomem *)iomem_addr); +} + +static inline void +am85c30_com_outb_command(u64 iomem_addr, u8 reg_num, u8 val) +{ + boot_writeb(reg_num, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ + boot_writeb(val, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ +} + +static void +boot_am85c30_serial_putc(unsigned char c) +{ + unsigned long port; + u8 cmd_saved; + + port = boot_am85c30_com_port + 2 * boot_serial_boot_console_num; + cmd_saved = am85c30_com_inb_command(port, AM85C30_RR1); + + am85c30_com_outb_command(port, AM85C30_RR1, + cmd_saved & ~(AM85C30_EXT_INT_ENAB | AM85C30_TxINT_ENAB | + AM85C30_RxINT_MASK)); + + while ((am85c30_com_inb_command(port, AM85C30_RR0) & AM85C30_D2) == 0) + ; + am85c30_com_outb(port + 0x01, c); + + while ((am85c30_com_inb_command(port, AM85C30_RR0) & AM85C30_D2) == 0) + ; + am85c30_com_outb_command(port, AM85C30_RR0, AM85C30_RES_Tx_P); + am85c30_com_outb_command(port, AM85C30_RR1, cmd_saved); +} + +static unsigned char +boot_am85c30_serial_getc(void) +{ + unsigned long port; + + port = boot_am85c30_com_port + 2 * boot_serial_boot_console_num; + while (((am85c30_com_inb_command(port, AM85C30_RR0)) & AM85C30_D0) == 0) + ; + return am85c30_com_inb(port + 0x01); +} + +void __init_cons boot_debug_puts(char *s) +{ + if (boot_am85c30_com_port == 0) + return; + s = boot_vp_to_pp(s); + while (*s) + boot_am85c30_serial_putc(*s++); + boot_am85c30_serial_putc('\n'); +} + +static int __init +boot_am85c30_init(void *serial_base) +{ + DebugSC("boot_am85c30_init() started\n"); +#ifdef CONFIG_E2K + if (!BOOT_HAS_MACHINE_E2K_IOHUB) { + DebugSC("boot_am85c30_init() on this machine AM85C30 serial " + "device is not used\n"); + return (-ENODEV); + } +#endif /* CONFIG_E2K */ + + if (serial_base == NULL) { + do_boot_printk("boot_am85c30_init() Serial console base IO " + "address is not passed by BIOS\n"); + do_boot_printk("boot_am85c30_init() Serial console is not " + "enabled\n"); + return (-ENODEV); + } + boot_am85c30_com_port = (unsigned long)serial_base; + boot_am85c30_serial_boot_console.io_base = (unsigned long)serial_base; + DebugSC("boot_am85c30_init() enabled serial console at %p " + "IO memory base\n", serial_base); + return (0); +} + +/* AM85C30 serial console opts struct */ +serial_console_opts_t am85c30_serial_boot_console = { + .name = SERIAL_CONSOLE_AM85C30_NAME, + .io_base = 0, + .serial_putc = boot_am85c30_serial_putc, + .serial_getc = boot_am85c30_serial_getc, + .init = boot_am85c30_init, +}; diff --git a/arch/e2k/p2v/boot_printk/boot_hvc_l.c b/arch/e2k/p2v/boot_printk/boot_hvc_l.c new file mode 100644 index 000000000000..bb4d53ed971e --- /dev/null +++ b/arch/e2k/p2v/boot_printk/boot_hvc_l.c @@ -0,0 +1,92 @@ +/* + * l (Elbrus) console driver interface to hvc_console.c + * based on xen console driver + * + * (c) 2007 Gerd Hoffmann + * (c) 2013 Salavat Gilyazov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#undef DEBUG_EARLY_CONSOLE_MODE +#undef DebugBEC +#define DEBUG_EARLY_CONSOLE_MODE 0 /* early console debugging */ +#define DebugBEC(fmt, args...) \ +({ \ + if (DEBUG_EARLY_CONSOLE_MODE) \ + do_boot_printk("%s(): " fmt, __func__, ##args); \ +}) + +bool early_virtio_cons_enabled = false; + +static unsigned char buffer[512]; +static int buf_pos = 0; +#define boot_buffer boot_get_vo_value(buffer) +#define boot_buf_pos boot_get_vo_value(buf_pos) + +static int boot_raw_console_write(const char *str, int len) +{ + int count = len; + + while (len > 0) { + int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, + (char *)str); + if (rc <= 0) + break; + + str += rc; + len -= rc; + } + return count - len; +} +void boot_hvc_l_raw_putc(unsigned char c) +{ + unsigned char *cur_buffer = boot_buffer; + int cur_pos = boot_buf_pos; + + cur_buffer[cur_pos] = c; + cur_pos++; + if (cur_pos >= sizeof(buffer) || c == '\n') { + boot_raw_console_write((const char *)cur_buffer, cur_pos); + cur_pos = 0; + } + boot_buf_pos = cur_pos; +} + +int __init boot_hvc_l_cons_init(e2k_addr_t console_base) +{ + if (!boot_paravirt_enabled()) + return -ENODEV; + +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE + boot_early_virtio_cons_enabled = true; + DebugBEC("VIRTIO HVC Console interface will be used as " + "boot console\n"); +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ + return 0; +} diff --git a/arch/e2k/p2v/boot_printk/console.c b/arch/e2k/p2v/boot_printk/console.c new file mode 100644 index 000000000000..ffc246ae995d --- /dev/null +++ b/arch/e2k/p2v/boot_printk/console.c @@ -0,0 +1,468 @@ +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../boot_string.h" + +#undef DEBUG_SC_MODE +#undef DebugSC +#define DEBUG_SC_MODE 0 /* serial console debug */ +#define DebugSC if (DEBUG_SC_MODE) do_boot_printk + +#define FALSE 0 +#define TRUE 1 + +#define is_digit(c) ((c >= '0') && (c <= '9')) + +#ifdef CONFIG_SERIAL_BOOT_PRINTK +/* + * Serial dump console num setup + */ + +static int __init boot_dump_console_set(char *cmd) +{ + boot_serial_boot_console_num = boot_simple_strtoul(cmd, &cmd, 0); + return 0; +} +boot_param("dump_console", boot_dump_console_set); + +/* list of all enabled serial consoles, NULL terminated */ +static serial_console_opts_t* serial_boot_consoles[] = { +#if defined(CONFIG_SERIAL_AM85C30_BOOT_CONSOLE) + &am85c30_serial_boot_console, +#endif /* SERIAL AM85C30 CONSOLE */ + NULL, +}; + +static volatile int serial_boot_console_inited = 0; +serial_console_opts_t *serial_boot_console_opts = NULL; +#define boot_serial_boot_console_inited \ + boot_get_vo_value(serial_boot_console_inited) +#define boot_serial_boot_consoles \ + boot_vp_to_pp((serial_console_opts_t **)serial_boot_consoles) + +/* + * Iterates through the list of serial consoles, + * returning the first one that initializes successfully. + */ +void __init_recv +boot_setup_serial_console(bool bsp, boot_info_t *boot_info) +{ + serial_console_opts_t **consoles = boot_serial_boot_consoles; + serial_console_opts_t *console; + int i; + + DebugSC("boot_setup_serial_console() started for consoles " + "list 0x%lx\n", consoles); + +#ifdef CONFIG_SMP + if (!bsp) { + DebugSC("boot_setup_serial_console() CPU is not BSP " + "waiting for init completion\n"); + while(!boot_serial_boot_console_inited) + ; + DebugSC("boot_setup_serial_console() waiting for init " + "completed\n"); + return; + } +#endif /* CONFIG_SMP */ + + /* find most preferred working serial console */ + i = 0; + console = consoles[i]; + DebugSC("boot_setup_serial_console() start console is 0x%lx\n", + console); + while (console != NULL) { + int (*boot_init)(void *serial_base); + + boot_init = boot_opts_func_entry(console, init); + DebugSC("boot_setup_serial_console() console phys " + "init entry 0x%lx\n", boot_init); + if (boot_init != NULL) { + if (boot_init((void *)boot_info->serial_base) == 0) { + boot_serial_boot_console_opts = console; + boot_serial_boot_console_inited = 1; + DebugSC("boot_setup_serial_console() set " + "this console for using\n"); + return; + } + } + i++; + console = consoles[i]; + DebugSC("boot_setup_serial_console() next console " + "pointer 0x%lx\n", console); + } + do_boot_printk("boot_setup_serial_console() could not find working " + "serial console\n"); + boot_serial_boot_console_inited = -1; +} +#endif /* CONFIG_SERIAL_BOOT_PRINTK */ + +static void __init_cons +boot_putc(char c) +{ +#if defined(CONFIG_LMS_CONSOLE) + if (!BOOT_NATIVE_IS_MACHINE_SIM) { + /* LMS debug port can be used only on simulator */ + } else if (boot_debug_cons_inl(LMS_CONS_DATA_PORT) != 0xFFFFFFFF) { + + while (boot_debug_cons_inl(LMS_CONS_DATA_PORT)) + ; + + boot_debug_cons_outb(c, LMS_CONS_DATA_PORT); + boot_debug_cons_outb(0, LMS_CONS_DATA_PORT); + } +#endif /* CONFIG_LMS_CONSOLE */ + +#if defined(CONFIG_SERIAL_BOOT_PRINTK) + if (boot_serial_boot_console_opts != NULL) + boot_serial_boot_console_opts_func_entry(serial_putc)(c); +#endif /* serial console or LMS console or early printk */ + +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE + if (boot_early_virtio_cons_enabled) { + boot_hvc_l_raw_putc(c); + } +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ +} + +/* + * Write formatted output while booting process is in the progress and + * virtual memory support is not still ready + * All function pointer arguments consider as pointers to virtual addresses and + * convert to conforming physical pointers (These are the pointer of format + * 'fmt_v', pointer of operand list 'ap_v' and pointers in the operands list). + * Therefore, all passed pointer arguments should be virtual (without any + * conversion) + */ + +static char boot_temp[80]; + +static int __init_cons +boot_cvt(unsigned long val, char *buf, long radix, char *digits) +{ + register char *temp = boot_vp_to_pp((char *)boot_temp); + register char *cp = temp; + register int length = 0; + + if (val == 0) { + /* Special case */ + *cp++ = '0'; + } else { + while (val) { + *cp++ = digits[val % radix]; + val /= radix; + } + } + while (cp != temp) { + *buf++ = *--cp; + length++; + } + *buf = '\0'; + return (length); +} + +static char boot_buf[32]; +static const char boot_all_dec[] = "0123456789"; +static const char boot_all_hex[] = "0123456789abcdef"; +static const char boot_all_HEX[] = "0123456789ABCDEF"; + +static void __init_cons +do_boot_vprintk(const char *fmt_v, va_list ap_v) +{ + register const char *fmt = boot_vp_to_pp(fmt_v); + register va_list ap = boot_vp_to_pp(ap_v); + register char c, sign, *cp; + register int left_prec, right_prec, zero_fill, var_size; + register int length = 0, pad, pad_on_right, always_blank_fill; + register char *buf = boot_vp_to_pp((char *)boot_buf); + register long long val = 0; + + while ((c = *fmt++) != 0) { + if (c == '%') { + c = *fmt++; + left_prec = right_prec = pad_on_right = var_size = 0; + if (c == '-') { + c = *fmt++; + pad_on_right++; + always_blank_fill = TRUE; + } else { + always_blank_fill = FALSE; + } + if (c == '0') { + zero_fill = TRUE; + c = *fmt++; + } else { + zero_fill = FALSE; + } + while (is_digit(c)) { + left_prec = (left_prec * 10) + (c - '0'); + c = *fmt++; + } + if (c == '.') { + c = *fmt++; + zero_fill++; + while (is_digit(c)) { + right_prec = (right_prec * 10) + + (c - '0'); + c = *fmt++; + } + } else { + right_prec = left_prec; + } + if (c == 'l' || c == 'L') { + var_size = sizeof(long); + c = *fmt++; + if (c == 'l' || c == 'L') { + var_size = sizeof(long long); + c = *fmt++; + } + } else if (c == 'h') { + c = *fmt++; + if (c == 'h') { + c = *fmt++; + var_size = sizeof(char); + } else { + var_size = sizeof(short); + } + } else if (c == 'z' || c == 'Z') { + c = *fmt++; + var_size = sizeof(size_t); + } else if (c == 't') { + c = *fmt++; + var_size = sizeof(ptrdiff_t); + } else { + var_size = 4; + } + if (c == 'p') { + var_size = sizeof(void *); + } + sign = '\0'; + if (c == 'd' || c == 'i' || c == 'u' ||\ + c == 'x' || c == 'X' || c == 'p') { + int var_signed = (c == 'd'|| c == 'i'); + switch (var_size) { + case sizeof(long long): + if (var_signed) + val = (long long) + va_arg(ap, long long); + else + val = (unsigned long long) + va_arg(ap, long long); + break; + case sizeof(int): + if (var_signed) + val = (int) va_arg(ap, int); + else + val = (unsigned int) + va_arg(ap, int); + break; + case sizeof(short): + if (var_signed) + val = (short) va_arg(ap, int); + else + val = (unsigned short) + va_arg(ap, int); + break; + case sizeof(char): + if (var_signed) + val = (char) va_arg(ap, int); + else + val = (unsigned char) + va_arg(ap, int); + break; + } + if (val < 0 && (c == 'd' || c == 'i')) { + sign = '-'; + val = -val; + } + if (c == 'd' || c == 'i' || c == 'u') { + length = boot_cvt(val, buf, 10, + boot_vp_to_pp((char *) + boot_all_dec)); + } else if (c == 'x' || c == 'p') { + length = boot_cvt(val, buf, 16, + boot_vp_to_pp((char *) + boot_all_hex)); + } else if (c == 'X') { + length = boot_cvt(val, buf, 16, + boot_vp_to_pp((char *) + boot_all_HEX)); + } + cp = buf; + } else if (c == 's') { + cp = va_arg(ap, char *); + cp = boot_vp_to_pp(cp); + length = boot_strlen(cp); + } else if (c == 'c') { + c = va_arg(ap, int); + boot_putc(c); + continue; + } else { + boot_putc('?'); + continue; + } + + pad = left_prec - length; + if (sign != '\0') { + pad--; + } + if (zero_fill && !always_blank_fill) { + c = '0'; + if (sign != '\0') { + boot_putc(sign); + sign = '\0'; + } + } else { + c = ' '; + } + if (!pad_on_right) { + while (pad-- > 0) { + boot_putc(c); + } + } + if (sign != '\0') { + boot_putc(sign); + } + while (length-- > 0) { + boot_putc(c = *cp++); + if (c == '\n') { + boot_putc('\r'); + } + } + if (pad_on_right) { + if (zero_fill && !always_blank_fill) + c = '0'; + else + c = ' '; + + while (pad-- > 0) { + boot_putc(c); + } + } + } else { + boot_putc(c); + if (c == '\n') { + boot_putc('\r'); + } + } + } +} + + +static void __init_cons +boot_prefix_printk(char const *fmt_v, ...) +{ + register va_list ap; + + va_start(ap, fmt_v); + do_boot_vprintk(fmt_v, ap); + va_end(ap); +} + + +#ifndef CONFIG_L_EARLY_PRINTK +/* dump_printk() is not configured, so define + * the spinlock to synchronize print on SMP here. */ +boot_spinlock_t vprint_lock = __BOOT_SPIN_LOCK_UNLOCKED; +#endif /* !CONFIG_L_EARLY_PRINTK */ + +void __init_cons +boot_vprintk(const char *fmt_v, va_list ap_v) +{ + unsigned long flags; + + /* Disable NMIs as well as normal interrupts */ + boot_raw_all_irq_save(flags); + boot_spin_lock(&vprint_lock); + boot_prefix_printk("BOOT NODE %d CPU %d: ", + boot_numa_node_id(), boot_smp_processor_id()); + do_boot_vprintk(fmt_v, ap_v); + boot_spin_unlock(&vprint_lock); + boot_raw_all_irq_restore(flags); +} + +void __init_cons +boot_vprintk_no_prefix(const char *fmt_v, va_list ap_v) +{ + unsigned long flags; + + /* Disable NMIs as well as normal interrupts */ + boot_raw_all_irq_save(flags); + boot_spin_lock(&vprint_lock); + do_boot_vprintk(fmt_v, ap_v); + boot_spin_unlock(&vprint_lock); + boot_raw_all_irq_restore(flags); +} + +void __init_cons +do_boot_printk(char const *fmt_v, ...) +{ + register va_list ap; + + va_start(ap, fmt_v); + boot_vprintk(fmt_v, ap); + va_end(ap); +} + +void __init_cons +boot_puts(char *s) +{ + s = boot_vp_to_pp(s); + while (*s) + boot_putc(*s++); +} + + +/* + * Handler of boot-time errors. + * The error message is output on console and CPU goes to suspended state + * (executes infinite unmeaning cicle). + * In simulation mode CPU is halted with error sign. + */ + +void __init_recv +boot_bug(const char *fmt_v, ...) +{ + register va_list ap; + + va_start(ap, fmt_v); + boot_vprintk(fmt_v, ap); + va_end(ap); + boot_vprintk_no_prefix("\n\n\n", NULL); + +#ifdef CONFIG_SMP + boot_set_event(&boot_error_flag); +#endif /* CONFIG_SMP */ + + BOOT_E2K_HALT_ERROR(1); + + for (;;) + boot_cpu_relax(); +} + +/* + * Handler of boot-time warnings. + * The warning message is output on console and CPU continues execution of + * boot process. + */ + +void __init_recv +boot_warning(const char *fmt_v, ...) +{ + register va_list ap; + + va_start(ap, fmt_v); + boot_vprintk(fmt_v, ap); + va_end(ap); + boot_vprintk_no_prefix("\n", NULL); +} + diff --git a/arch/e2k/p2v/boot_profiling.c b/arch/e2k/p2v/boot_profiling.c new file mode 100644 index 000000000000..642ce1687e60 --- /dev/null +++ b/arch/e2k/p2v/boot_profiling.c @@ -0,0 +1,80 @@ +/* linux/arch/e2k/lib/boot_profiling.c. + * + * Copyright (C) 2011 MCST + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "boot_string.h" + +#define BL(list) ((struct list_head *) boot_vp_to_pp(list)) + +#ifdef CONFIG_RECOVERY +/* Makes sure events lists are cleared before saving a control point. */ +void reinitialize_boot_trace_data(void) +{ + int i; + + for (i = 0; i < NR_CPUS; i++) { + BL(&boot_trace_cpu_events_list[i])->next = + &boot_trace_cpu_events_list[i]; + BL(&boot_trace_cpu_events_list[i])->prev = + &boot_trace_cpu_events_list[i]; + } + atomic_set((atomic_t *) boot_vp_to_pp(&boot_trace_top_event), -1); + boot_get_vo_value(boot_trace_enabled) = 1; +} +#endif + +void notrace __init_recv boot_add_boot_trace_event(char *name) +{ + struct boot_tracepoint *event; + static int overflow = 0; + unsigned int cpu; + long index; + + if (*(int *) boot_vp_to_pp(&boot_trace_enabled) == 0) + return; + + index = atomic_inc_return( + (atomic_t *) boot_vp_to_pp(&boot_trace_top_event)); + if (unlikely(index >= BOOT_TRACE_ARRAY_SIZE)) { + if (*(int *) boot_vp_to_pp(&overflow) == 0) { + *(int *) boot_vp_to_pp(&overflow) = 1; + do_boot_printk("WARNING Overflow of boot tracepoints array! Disabling it...\n"); + } + atomic_set((atomic_t *) boot_vp_to_pp(&boot_trace_top_event), + BOOT_TRACE_ARRAY_SIZE - 1); + return; + } + + event = (struct boot_tracepoint *) + boot_vp_to_pp(&boot_trace_events[index]); + boot_strcpy(event->name, (char *) boot_vp_to_pp(name)); + +#ifdef CONFIG_SMP + cpu = boot_early_pic_read_id(); + if ((unsigned) cpu >= NR_CPUS) + cpu = 0; +#else + cpu = 0; +#endif + + event->cpu = cpu; + + BL(BL(&boot_trace_cpu_events_list[cpu])->prev)->next = + &boot_trace_events[index].list; + event->list.next = &boot_trace_cpu_events_list[cpu]; + event->list.prev = BL(&boot_trace_cpu_events_list[cpu])->prev; + BL(&boot_trace_cpu_events_list[cpu])->prev = + &boot_trace_events[index].list; + + event->cycles = get_cycles(); +} diff --git a/arch/e2k/p2v/boot_recovery.c b/arch/e2k/p2v/boot_recovery.c new file mode 100644 index 000000000000..24590b464d4d --- /dev/null +++ b/arch/e2k/p2v/boot_recovery.c @@ -0,0 +1,399 @@ +/* $Id: boot_recovery.c,v 1.16 2009/06/29 10:37:05 atic Exp $ + * + * Architecture-specific recovery. + * + * Copyright 2001-2003 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef boot_printk +#undef DebugR +#define DEBUG_RECOVERY_MODE 0 /* system recovery */ +#define boot_printk if (DEBUG_RECOVERY_MODE) do_boot_printk +#define DebugR(...) DebugPrint(DEBUG_RECOVERY_MODE ,##__VA_ARGS__) + + +#ifdef CONFIG_SMP +static atomic_t boot_info_recovery_finished = ATOMIC_INIT(0); +#endif /* CONFIG_SMP */ + + +static void +init_recovery_mem_term(int cpuid) +{ + /* + * Flush the temporarly mapped areas to virtual space. + */ + + DebugR("init_recovery_mem_term() will start init_clear_temporary_ptes() on CPU %d\n", + cpuid); + init_clear_temporary_ptes(ALL_TLB_ACCESS_MASK, cpuid); + + set_secondary_space_MMU_state(); +} + +static noinline void +init_switch_to_interrupted_process(void) +{ + struct task_struct *task; + + DebugR("init_switch_to_interrupted_process() started on CPU #%d\n", + raw_smp_processor_id()); + + task = task_to_recover; + set_current_thread_info(task_thread_info(task), task); + if (current->mm != NULL) { + reload_thread(current->mm); + } + + /* + * Restore state registers of current process to enable + * switching to the interrupted task as end of recovery of the system + */ + + NATIVE_FLUSHCPU; + NATIVE_RESTORE_TASK_REGS_TO_SWITCH(task, task_thread_info(task)); + + /* + * Return to interrupted point + */ + return; +} + +static void +init_recover_system(int cpuid) +{ + bool bsp = boot_early_pic_is_bsp(); + + DebugR("init_recover_system() entered.\n"); + + /* + * Start kernel recovery on bootstrap processor. + * Other processors will do some internal recovery and wait + * for commands from bootstrap processor. + */ +#ifdef CONFIG_SMP + if (bsp) { +#endif /* CONFIG_SMP */ + DebugR("init_recover_system() will start recover_kernel() " + "on CPU #%d\n", cpuid); + recover_kernel(); +#ifdef CONFIG_BOOT_TRACE + BOOT_TRACEPOINT("Recovery trace finished"); + stop_boot_trace(); +#endif +#ifdef CONFIG_SMP + } else { + DebugR("STR is supported only for one CPU now.\n"); + BUG(); + } +#endif /* CONFIG_SMP */ + + /* + * Kernel and system recovery process complited + * Switch to interrupted processes on each CPU + */ + init_switch_to_interrupted_process(); + + /* + * Never should be here + */ + BUG(); +} + +/* + * Sequel of process of initialization. This function is run into virtual + * space and controls farther system boot + */ +static void +boot_recovery_sequel(bool bsp, int cpuid, int cpus_to_sync) +{ + int cpu; + + va_support_on = 1; + + /* + * SYNCHRONIZATION POINT #2 + * At this point all processors should complete switching to + * virtual memory + * After synchronization all processors can terminate + * boot-time recovery of virtual memory support + */ + init_sync_all_processors(cpus_to_sync); + +#ifdef CONFIG_SMP + if (bsp) +#endif + EARLY_BOOT_TRACEPOINT("kernel boot-time init finished"); + + cpu = cpuid_to_cpu(cpuid); + DebugR("CPU #%d has ID #%d\n", cpu, cpuid); + + /* __my_cpu_offset is now stored in g18, so we should to restore it */ + set_my_cpu_offset(__per_cpu_offset[cpu]); + + + /* + * Set pointer of current task structure to kernel restart task for + * this CPU + */ + set_current_thread_info(&task_to_restart[cpu].t.thread_info, + &task_to_restart[cpu].t); + DebugR("'current' task pointer is set to initial kernel task structure virtual address 0x%px size 0x%lx\n", + current_thread_info(), sizeof(union thread_union)); + + /* This also clears preempt_count and PREEMPT_NEED_RESCHED */ + E2K_SET_DGREG_NV(SMP_CPU_ID_GREG, 0); +#ifdef CONFIG_SMP + current->cpu = cpu; + set_smp_processor_id(cpu); + init_reset_smp_processors_num(); +#endif /* CONFIG_SMP */ + + /* + * Flush instruction and data cashes to delete all physical + * instruction and data pages + */ + flush_ICACHE_all(); + + /* + * Terminate boot-time recovery of virtual memory support + */ + DebugR("boot_recovery_sequel() will start init_recovery_mem_term() on CPU %d\n", + cpuid); + init_recovery_mem_term(cpuid); + + /* + * Start kernel recovery process + */ + init_recover_system(cpuid); +} + +static void +boot_recovery_mem_init(int cpuid, bootblock_struct_t *bootblock, + void (*boot_recovery_sequel_func)(bool bsp, int cpuid, int cpus)) +{ + bool bsp = boot_early_pic_is_bsp(); + + boot_printk("boot_recovery_mem_init() started()\n"); + + /* + * SYNCHRONIZATION POINT #0 + * At this point all processors should complete memory initialization + * After synchronization page table is completely constructed for + * switching on virtual addresses. + */ + boot_sync_all_processors(); +#ifdef CONFIG_SMP + boot_atomic_set(&boot_info_recovery_finished, 0); +#endif /* CONFIG_SMP */ + + /* + * Reset recovery flags into bootblock structure to avoid + * recursive recovery while check point to recovery is not ready + * Write back all new flags state from cache to memory, else if + * CPU restarts then caches will not be flushed and we can have + * old state of bootblock info and flags + */ +#ifdef CONFIG_SMP + if (bsp) { +#endif /* CONFIG_SMP */ + bootblock->kernel_flags &= + ~(RECOVERY_BB_FLAG | NO_READ_IMAGE_BB_FLAG); + bootblock->boot_flags &= + ~(RECOVERY_BB_FLAG | NO_READ_IMAGE_BB_FLAG); + write_back_CACHE_L12(); + __E2K_WAIT_ALL; + if (boot_machine.L3_enable) + boot_native_flush_L3(boot_machine.native_iset_ver, + BOOT_THE_NODE_NBSR_PHYS_BASE(0)); +#ifdef CONFIG_SMP + } +#endif /* CONFIG_SMP */ + + /* + * Map some necessary physical areas to the equal virtual addresses to + * switch kernel execution into the physical space to execution + * into the virtual space. + */ + + boot_printk("boot_recovery_mem_init() will start " + "boot_map_needful_to_equal_virt_area()\n"); + boot_map_needful_to_equal_virt_area( + NATIVE_NV_READ_USD_LO_REG().USD_lo_base); + + /* + * SYNCHRONIZATION POINT #1 + * At this point all processors maped necessary physical areas + * to the equal virtual addresses and bootstrap processor maped + * general (shared) physical areas. + * After synchronization all processors are ready to switching + */ + boot_sync_all_processors(); + + /* + * Switch kernel execution into the physical space to execution + * into the virtual space. All following initializations will be + * control by 'boot_init_sequel_func()' function. + * Should not be return here from this function. + */ + + boot_printk("boot_recovery_mem_init() will start " + "boot_native_switch_to_virt()\n"); + boot_native_switch_to_virt(bsp, cpuid, boot_recovery_sequel_func); +} + +static void +boot_recovery_setup(bootblock_struct_t *bootblock) +{ + e2k_rwap_lo_struct_t reg_lo; + e2k_rwap_hi_struct_t reg_hi; + e2k_addr_t addr; + e2k_size_t size; + boot_info_t *recovery_info = &bootblock->info; + bool bsp = boot_early_pic_is_bsp(); + + /* + * Set 'text' segment CPU registers OSCUD & CUD + * to kernel image unit into the physical space + */ + +#ifndef CONFIG_NUMA + reg_lo.CUD_lo_base = boot_text_phys_base; +#else /* CONFIG_NUMA */ + reg_lo.CUD_lo_base = boot_node_text_phys_base(BOOT_BS_NODE_ID); +#endif /* !CONFIG_NUMA */ + reg_lo.CUD_lo_c = E2K_CUD_CHECKED_FLAG; + reg_lo._CUD_lo_rw = E2K_CUD_RW_PROTECTIONS; + + reg_hi.CUD_hi_size = boot_text_size; + reg_hi._CUD_hi_curptr = 0; + + NATIVE_WRITE_CUD_REG(reg_hi, reg_lo); + NATIVE_WRITE_OSCUD_REG(reg_hi, reg_lo); + + /* + * Set 'data/bss' segment CPU registers OSGD & GD + * to kernel image unit into the physical space + */ + + addr = boot_data_phys_base; + reg_lo.GD_lo_base = addr; + reg_lo._GD_lo_rw = E2K_GD_RW_PROTECTIONS; + + size = boot_data_size; + reg_hi.GD_hi_size = size; + reg_hi._GD_hi_curptr = 0; + + NATIVE_WRITE_GD_REG(reg_hi, reg_lo); + NATIVE_WRITE_OSGD_REG(reg_hi, reg_lo); + + boot_printk("Kernel TEXT segment pointers OSCUD & CUD are set to " + "base physical address 0x%lx size 0x%lx\n", + boot_text_phys_base, boot_text_size); + boot_printk("Kernel DATA/BSS segment pointers OSGD & GD are set to " + "base physical address 0x%lx size 0x%lx\n", + addr, size); + +#ifdef CONFIG_SMP + boot_printk("Kernel boot-time initialization in progress on CPU %d\n", + boot_smp_processor_id()); +#endif /* CONFIG_SMP */ + + /* + * Set Trap Cellar pointer and MMU register to kernel image area + * and reset Trap Counter register + */ + + boot_set_MMU_TRAP_POINT(boot_kernel_trap_cellar); + boot_reset_MMU_TRAP_COUNT(); + + boot_printk("Kernel trap cellar set to physical address 0x%lx " + "MMU_TRAP_CELLAR_MAX_SIZE 0x%x kernel_trap_cellar 0x%lx\n", + boot_kernel_trap_cellar, MMU_TRAP_CELLAR_MAX_SIZE, + BOOT_KERNEL_TRAP_CELLAR); + + /* + * Recover phys. address of boot information block in + * from appropriate data structure. + */ + +#ifdef CONFIG_SMP + if (bsp) { +#endif /* CONFIG_SMP */ + boot_bootinfo_phys_base = + (e2k_addr_t)boot_pa_to_high_pa(bootblock, + recovery_info); + if (boot_bootinfo_phys_base != + (e2k_addr_t)boot_bootblock_phys) { + BOOT_BUG("Invalid address of bootblock 0x%lx != " + "source bootblock address 0x%lx\n", + boot_bootinfo_phys_base, + (e2k_addr_t)boot_bootblock_phys); + } + boot_printk("Recovery information physical address: 0x%lx\n", + boot_bootblock_phys); + + if (recovery_info->signature == ROMLOADER_SIGNATURE) { + boot_printk("Recovery information passed by ROMLOADER\n"); + } else if (recovery_info->signature == X86BOOT_SIGNATURE) { + boot_printk("Recovery information passed by BIOS (x86)\n"); + } else { + BOOT_BUG("Boot information passed by unknown loader\n"); + } +#ifdef CONFIG_SMP + boot_recover_smp_cpu_config(recovery_info); + boot_set_event(&boot_info_recovery_finished); + } else { + boot_wait_for_event(&boot_info_recovery_finished); + if (boot_smp_processor_id() >= NR_CPUS) { + BOOT_BUG("CPU #%d : this processor number >= than max supported CPU number %d\n", + boot_smp_processor_id(), + NR_CPUS); + } + } +#endif /* CONFIG_SMP */ +} + +void +boot_recovery(bootblock_struct_t *bootblock) +{ + int cpuid = 0; + +#ifdef CONFIG_SMP + cpuid = boot_smp_get_processor_id(); + boot_smp_set_processor_id(cpuid); +#endif /* CONFIG_SMP */ + + boot_printk("boot_recovery() started\n"); + +#ifdef CONFIG_BOOT_TRACE + reinitialize_boot_trace_data(); +#endif + + /* + * Initialize virtual memory support for farther system recovery and + * switch sequel recovery process to the function + * 'boot_recovery_sequel()' which will be executed into + * the virtual space. + */ + + boot_recovery_setup(bootblock); + boot_recovery_mem_init(cpuid, bootblock, boot_recovery_sequel); + + /* + * Never should be here + */ + BUG(); +} diff --git a/arch/e2k/p2v/boot_smp.c b/arch/e2k/p2v/boot_smp.c new file mode 100644 index 000000000000..301d28fb1cbe --- /dev/null +++ b/arch/e2k/p2v/boot_smp.c @@ -0,0 +1,242 @@ +/* + * + * SMP mode of boot-time initialization helpers + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#include +#include +#include +#include + +#undef DEBUG_BOOT_SMP_MODE +#undef boot_printk +#define DEBUG_BOOT_SMP_MODE 0 /* Boot SMP process */ +#define boot_printk if (DEBUG_BOOT_SMP_MODE) do_boot_printk + +cpu_sync_count_t __cacheline_aligned_in_smp num_arrived = {.pad = 0}; + +/* error occured while boot-time initialization */ +atomic_t boot_error_flag = ATOMIC_INIT(0); + +int cpu_to_sync_num = NR_CPUS; + +void __boot_sync_all_processors(atomic_t *num_arrived) +{ + int phys_cpu_num = boot_cpu_to_sync_num; + int current_num_arrived, max_num_arrived; + + current_num_arrived = boot_atomic_inc_return(num_arrived); + + max_num_arrived = current_num_arrived / phys_cpu_num; + max_num_arrived += (current_num_arrived % phys_cpu_num) ? 1 : 0; + max_num_arrived *= phys_cpu_num; + + while (boot_atomic_read(num_arrived) < max_num_arrived) + boot_cpu_relax(); +} + +void __init_sync_all_processors(atomic_t *num_arrived, int cpus_to_sync) +{ + int current_num_arrived, max_num_arrived; + + current_num_arrived = atomic_inc_return(num_arrived); + + max_num_arrived = current_num_arrived / cpus_to_sync; + max_num_arrived += (current_num_arrived % cpus_to_sync) ? 1 : 0; + max_num_arrived *= cpus_to_sync; + + while (atomic_read(num_arrived) < max_num_arrived) + cpu_relax(); +} + +/* + * Setup CPU configuration for boot-time initialization, + * passed by BIOS thru bootblock structure + */ + +int __init_recv +boot_biosx86_smp_cpu_config(boot_info_t *bootblock) +{ + int phys_cpu_num; + + phys_cpu_num = bootblock->num_of_cpus; + + if (phys_cpu_num <= 0) { + BOOT_WARNING("Boot info structure passed by BIOS does not " + "specify number of live physical CPUs\n"); + } else if (phys_cpu_num > NR_CPUS) { + BOOT_WARNING("Boot info structure passed by BIOS specifies " + "bad number of live physical CPUs %d\n", + phys_cpu_num); + phys_cpu_num = 0; + } + boot_phys_cpu_present_num = phys_cpu_num; + boot_cpu_to_sync_num = phys_cpu_num; + return (phys_cpu_num); +} + +static inline int __init_recv +boot_romloader_smp_cpu_config(boot_info_t *bootblock) +{ + return boot_biosx86_smp_cpu_config(bootblock); +} + +int __init_recv +boot_native_smp_cpu_config(boot_info_t *bootblock) +{ + int phys_cpu_num = 0; + + if (bootblock->signature == ROMLOADER_SIGNATURE) { + phys_cpu_num = boot_romloader_smp_cpu_config(bootblock); + } else if (bootblock->signature == X86BOOT_SIGNATURE) { + phys_cpu_num = boot_biosx86_smp_cpu_config(bootblock); + } else { + BOOT_BUG_POINT("boot_native_smp_cpu_config()"); + BOOT_BUG("Unknown type of Boot information structure"); + } + return phys_cpu_num; +} + +void __init_recv +boot_biosx86_smp_node_config(boot_info_t *bootblock) +{ + int boot_nodes_num = bootblock->num_of_nodes; + unsigned long boot_nodes_map = bootblock->nodes_map; + int nodes_num; + unsigned long node_mask; + + if (boot_nodes_num == 0) { + boot_nodes_num = 1; /* pure SMP or old boot loader */ + /* without nodes support */ + boot_nodes_map = 0x1UL; /* only node #0 */ + } else if (boot_nodes_num > L_MAX_MEM_NUMNODES) { + BOOT_WARNING("Too many nodes : max number can be %d, other %d will be ignored", + L_MAX_MEM_NUMNODES, + boot_nodes_num - L_MAX_MEM_NUMNODES); + boot_nodes_num = L_MAX_MEM_NUMNODES; + } + node_mask = 0x1UL; + nodes_num = 0; + while (node_mask) { + if (boot_nodes_map & node_mask) + nodes_num ++; + if (nodes_num > L_MAX_MEM_NUMNODES) { + BOOT_WARNING("Too many nodes in node map : max number can be %d, map 0x%lx, following 0x%lx will be ignored", + L_MAX_MEM_NUMNODES, boot_nodes_map, + boot_nodes_map & ~(node_mask - 1)); + boot_nodes_map &= (node_mask - 1); + nodes_num = L_MAX_MEM_NUMNODES; + break; + } + node_mask <<= 1; + } + if (nodes_num != boot_nodes_num) { + BOOT_WARNING("Number of nodes passed by boot loader %d is not the same as nodes in the passed %d (map 0x%lx", + boot_nodes_num, nodes_num, boot_nodes_map); + } + + boot_phys_nodes_num = nodes_num; + boot_phys_nodes_map = boot_nodes_map; +} + +static inline void __init_recv +boot_romloader_smp_node_config(boot_info_t *bootblock) +{ + boot_biosx86_smp_node_config(bootblock); +} + +void __init_recv +boot_native_smp_node_config(boot_info_t *bootblock) +{ + if (bootblock->signature == ROMLOADER_SIGNATURE) { + boot_romloader_smp_node_config(bootblock); + } else if (bootblock->signature == X86BOOT_SIGNATURE) { + boot_biosx86_smp_node_config(bootblock); + } else { + BOOT_BUG_POINT("boot_native_smp_node_config()"); + BOOT_BUG("Unknown type of Boot information structure"); + } +} + +/* + * Setup CPU configuration for boot-time initialization + * Needed info passed by loader/BIOS thru bootinfo structure + */ + +void __init +boot_setup_smp_cpu_config(boot_info_t *boot_info) +{ + int phys_cpu_num = -1; + + phys_cpu_num = boot_smp_cpu_config(boot_info); + boot_smp_node_config(boot_info); + if (phys_cpu_num <= 0) { + BOOT_WARNING("Boot info structure (passed by loader/BIOS) " + "does not specify number of live physical CPUs\n"); + phys_cpu_num = boot_smp_processors_num(); + BOOT_WARNING("The number of live physical CPUs will be %d " + "(all CPU(s) started boot process)\n", + phys_cpu_num); + boot_phys_cpu_present_num = phys_cpu_num; + boot_cpu_to_sync_num = phys_cpu_num; + } else if (phys_cpu_num > NR_CPUS) { + BOOT_BUG("Number of live physical CPUs (passed thru boot info " + "structure) is %d > %d (NR_CPUS - max allowed number " + "of CPUs)\n", + phys_cpu_num, NR_CPUS); + } else if (boot_smp_processors_num() > phys_cpu_num) { + BOOT_BUG("Number of live physical CPUs (passed thru boot info " + "structure) is %d < %d (number of CPU(s) started boot " + "process\n", + phys_cpu_num, boot_smp_processors_num()); + } + boot_printk("Number of live physical CPU(s) is set to %d\n", + phys_cpu_num); +} + +#ifdef CONFIG_RECOVERY +/* + * Setup CPU configuration for boot-time recovery of the system + */ + +void boot_recover_smp_cpu_config(boot_info_t *boot_info) +{ + cpumask_t *cpu_mask; + int phys_cpu_num = -1; + int new_phys_cpu_num = -1; + + boot_printk("boot_recover_smp_cpu_config() started with %d live physical CPU(s)\n", + boot_phys_cpu_present_num); + phys_cpu_num = boot_phys_cpu_present_num; + new_phys_cpu_num = boot_smp_cpu_config(boot_info); + + /* From all global cpu masks (cpu_present_mask, cpu_online_mask, + * cpu_active_mask, cpu_possible_mask) only online mask is used + * for synchronization when recovering, so do not clear any other + * masks here. */ + cpu_mask = boot_vp_to_pp(&boot_get_vo_value(__cpu_online_mask)); + cpumask_clear(cpu_mask); + if (new_phys_cpu_num <= 0) { + BOOT_WARNING("Boot info structure (passed by loader/BIOS) does not specify number of live physical CPUs\n"); + new_phys_cpu_num = boot_smp_processors_num(); + BOOT_WARNING("The number of live physical CPUs will be %d (all CPU(s) started recovery process)\n", + new_phys_cpu_num); + boot_phys_cpu_present_num = new_phys_cpu_num; + boot_cpu_to_sync_num = new_phys_cpu_num; + } else if (new_phys_cpu_num > NR_CPUS) { + BOOT_BUG("Number of live physical CPUs (passed thru boot info structure) is %d > %d (NR_CPUS - max allowed number of CPUs)\n", + new_phys_cpu_num, NR_CPUS); + } else if (boot_smp_processors_num() > new_phys_cpu_num) { + BOOT_BUG("Number of live physical CPUs (passed thru boot info structure) is %d < %d (number of CPU(s) started recovery process\n", + new_phys_cpu_num, boot_smp_processors_num()); + } + if (phys_cpu_num != new_phys_cpu_num) { + BOOT_BUG("Number of live physical CPUs started recovery process (%d) is not the same as were interrupted at the control point (%d)\n", + new_phys_cpu_num, phys_cpu_num); + } + boot_printk("Number of live physical CPU(s) is %d\n", + phys_cpu_num); +} +#endif /* CONFIG_RECOVERY */ diff --git a/arch/e2k/p2v/boot_string.c b/arch/e2k/p2v/boot_string.c new file mode 100644 index 000000000000..3d6bb5f4f01d --- /dev/null +++ b/arch/e2k/p2v/boot_string.c @@ -0,0 +1,334 @@ +/* + * boot-time initialization string library routines + * based on general lib/string.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include "boot_string.h" + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + */ +char *boot_strcpy(char *dest_va, const char *src_va) +{ + char *dest = boot_vp_to_pp(dest_va); + const char *src = boot_vp_to_pp(src_va); + char *tmp = dest; + + while ((*dest++ = *src++) != '\0') + /* nothing */; + return tmp; +} + +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @count bytes. + * + * In the case where the length of @src is less than that of + * count, the remainder of @dest will be padded with %NUL. + * + */ +char *boot_strncpy(char *dest_va, const char *src_va, size_t count) +{ + char *dest = boot_vp_to_pp(dest_va); + const char *src = boot_vp_to_pp(src_va); + char *tmp = dest; + + while (count) { + if ((*tmp = *src) != 0) + src++; + tmp++; + count--; + } + return dest; +} + +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t boot_strlcpy(char *dest_va, const char *src_va, size_t size) +{ + char *dest = boot_vp_to_pp(dest_va); + const char *src = boot_vp_to_pp(src_va); + size_t ret = boot_strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + boot_memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} +int boot_strcmp(const char *cs_va, const char *ct_va) +{ + const char *cs = boot_vp_to_pp(cs_va); + const char *ct = boot_vp_to_pp(ct_va); + unsigned char c1, c2; + + while (1) { + c1 = *cs++; + c2 = *ct++; + if (c1 != c2) + return c1 < c2 ? -1 : 1; + if (!c1) + break; + } + return 0; +} +/** + * strncmp - Compare two length-limited strings + * @cs: One string + * @ct: Another string + * @count: The maximum number of bytes to compare + */ +int boot_strncmp(const char *cs_va, const char *ct_va, size_t count) +{ + const char *cs = boot_vp_to_pp(cs_va); + const char *ct = boot_vp_to_pp(ct_va); + unsigned char c1, c2; + + while (count) { + c1 = *cs++; + c2 = *ct++; + if (c1 != c2) + return c1 < c2 ? -1 : 1; + if (!c1) + break; + count--; + } + return 0; +} +/** + * strlen - Find the length of a string + * @s: The string to be sized + */ +size_t boot_strlen(const char *s_va) +{ + const char *s = boot_vp_to_pp(s_va); + const char *sc; + + for (sc = s; *sc != '\0'; ++sc) + /* nothing */; + return sc - s; +} +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @count: The maximum number of bytes to search + */ +size_t boot_strnlen(const char *s_va, size_t count) +{ + const char *s = boot_vp_to_pp(s_va); + const char *sc; + + for (sc = s; count-- && *sc != '\0'; ++sc) + /* nothing */; + return sc - s; +} +/** + * memset - Fill a region of memory with the given value + * @s: Pointer to the start of the area. + * @c: The byte to fill the area with + * @count: The size of the area. + * + * Do not use memset() to access IO space, use memset_io() instead. + */ +void *boot_memset(void *s_va, int c, size_t count) +{ + void *s = boot_vp_to_pp(s_va); + char *xs = s; + + while (count--) + *xs++ = c; + return s; +} +/** + * memcpy - Copy one area of memory to another + * @dest: Where to copy to + * @src: Where to copy from + * @count: The size of the area. + * + * You should not use this function to access IO space, use memcpy_toio() + * or memcpy_fromio() instead. + */ +void *boot_memcpy(void *dest_va, const void *src_va, size_t count) +{ + void *dest = boot_vp_to_pp(dest_va); + const void *src = boot_vp_to_pp(src_va); + char *tmp = dest; + const char *s = src; + + while (count--) + *tmp++ = *s++; + return dest; +} +/* + * The following function is same as arch/e2k/lib/string.c function __memset() + * but can operate with physical addresses + */ + +notrace void boot_fast_memset(void *s_va, long c, size_t count) +{ + void *s = boot_vp_to_pp(s_va); + unsigned long align = (unsigned long) s & 0x7, head, tail, + head1, head2, head3, head4, tail1, tail2, tail4, tail6; + + if (unlikely(count < 8)) + goto set_tail; + + /* Set the head */ + head = 8 - align; + + head1 = (unsigned long) s & 1; /* s & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + + if (head1) + WRITE_ONCE(*(u8 *) s, c); + if (head2) + WRITE_ONCE(*(u16 *) (s + head1), c); + if (head4) + WRITE_ONCE(*(u32 *) (s + head3), c); + + s = PTR_ALIGN(s, 8); + count -= head & 0x7; + + /* Bypass L1 cache - usually after memset memory is not accessed + * immediately since user knows its contents. + * + * Do NOT use WC memory access here - otherwise + * cpu_has() -> boot_cpu_has() in recovery_memset_8() + * might access uninitilized data when clearing kernel BSS. */ + boot_fast_tagged_memory_set(s, c, 0, count & ~0x7UL, + LDST_DWORD_FMT << LDST_REC_OPC_FMT_SHIFT + | MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT); + + /* Set the tail */ + s += count & ~0x7UL; +set_tail: + tail = count; + + tail1 = tail & 1; + tail2 = tail & 2; + tail4 = tail & 4; + tail6 = tail & 6; + + if (tail4) + WRITE_ONCE(*(u32 *) s, c); + if (tail2) + WRITE_ONCE(*(u16 *) (s + tail4), c); + if (tail1) + WRITE_ONCE(*(u8 *) (s + tail6), c); +} + +/* Same as __memcpy, but assume "hwbug == true" */ +notrace void +boot_fast_memcpy(void *dst_va, const void *src_va, size_t n) +{ + void *dst = boot_vp_to_pp(dst_va); + void *src = boot_vp_to_pp(((void *)src_va)); + void *const orig_dst = dst; + unsigned long head, tail, head1, head2, head3, head4, + tail1, tail2, tail4, tail6; + u32 head_val4, tail_val4; + u16 head_val2, tail_val2; + u8 head_val1, tail_val1; + + if (unlikely(n < 8)) { + boot_memcpy(dst_va, src_va, n); + return; + } + + prefetch_nospec_range(src, n); + __E2K_WAIT(_ld_c); + + /* Copy the head */ + + head = 8 - ((unsigned long) dst & 0x7UL); + + head1 = (unsigned long) dst & 1; /* dst & 1 == head & 1 */ + head2 = head & 2; + head3 = head & 3; + head4 = head & 4; + + if (head1) + head_val1 = READ_ONCE(*(u8 *) src); + if (head2) + head_val2 = READ_ONCE(*(u16 *) (src + head1)); + if (head4) + head_val4 = READ_ONCE(*(u32 *) (src + head3)); + + src += head & 0x7; + dst = PTR_ALIGN(dst, 8); + n -= head & 0x7; + + /* Do the copy. Bypass L1 cache - usually after memcpy memory + * is not accessed immediately since user knows its contents */ + do { + size_t length = (n >= 2 * 8192) ? 8192 : (n & ~0x7UL); + + n -= length; + + /* Copy with tags. This is useful for access_process_vm. */ + boot_fast_tagged_memory_copy(dst, src, length, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_ALL_CACHES << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_ALL_CACHES << LDST_REC_OPC_MAS_SHIFT, + 0); + + src += length; + dst += length; + } while (unlikely(n >= 8)); + + /* Copy the tail */ + tail = n; + + tail1 = tail & 1; + tail2 = tail & 2; + tail4 = tail & 4; + tail6 = tail & 6; + + if (tail4) + tail_val4 = READ_ONCE(*(u32 *) src); + if (tail2) + tail_val2 = READ_ONCE(*(u16 *) (src + tail4)); + if (tail1) + tail_val1 = READ_ONCE(*(u8 *) (src + tail6)); + + if (head1) + WRITE_ONCE(*(u8 *) orig_dst, head_val1); + if (head2) + WRITE_ONCE(*(u16 *) (orig_dst + head1), head_val2); + if (head4) + WRITE_ONCE(*(u32 *) (orig_dst + head3), head_val4); + + if (tail4) + WRITE_ONCE(*(u32 *) dst, tail_val4); + if (tail2) + WRITE_ONCE(*(u16 *) (dst + tail4), tail_val2); + if (tail1) + WRITE_ONCE(*(u8 *) (dst + tail6), tail_val1); +} diff --git a/arch/e2k/p2v/boot_string.h b/arch/e2k/p2v/boot_string.h new file mode 100644 index 000000000000..32c028b705c0 --- /dev/null +++ b/arch/e2k/p2v/boot_string.h @@ -0,0 +1,22 @@ +#ifndef _E2K_BOOT_STRING_H_ +#define _E2K_BOOT_STRING_H_ + +/* boot-time initialization string library functions */ + + +#include /* for inline */ +#include /* for size_t */ +#include /* for NULL */ +#include + +extern char *boot_strcpy(char *, const char *); +extern char *boot_strncpy(char *, const char *, __kernel_size_t); +extern size_t boot_strlcpy(char *, const char *, size_t); +extern int boot_strcmp(const char *, const char *); +extern int boot_strncmp(const char *, const char *, __kernel_size_t); +extern __kernel_size_t boot_strlen(const char *); +extern __kernel_size_t boot_strnlen(const char *, __kernel_size_t); +extern void *boot_memset(void *, int, __kernel_size_t); +extern void *boot_memcpy(void *, const void *, __kernel_size_t); + +#endif /* _E2K_BOOT_STRING_H_ */ diff --git a/arch/e2k/p2v/cpu/Makefile b/arch/e2k/p2v/cpu/Makefile new file mode 100644 index 000000000000..5cc8e38f05b2 --- /dev/null +++ b/arch/e2k/p2v/cpu/Makefile @@ -0,0 +1,41 @@ +GCOV_PROFILE := n + +obj-y := boot_iset_v2.o boot_iset_v3.o boot_iset_v6.o + +CFLAGS_REMOVE_boot_iset_v2.o = $(CFLAGS_ALL_CPUS) +CFLAGS_REMOVE_boot_iset_v3.o = $(CFLAGS_ALL_CPUS) +CFLAGS_REMOVE_boot_iset_v6.o = $(CFLAGS_ALL_CPUS) + +CFLAGS_boot_iset_v2.o := -march=elbrus-v2 +CFLAGS_boot_iset_v3.o := -march=elbrus-v3 +CFLAGS_boot_iset_v6.o := -march=elbrus-v6 + +ifeq ($(CONFIG_E2K_MACHINE),y) +obj-$(CONFIG_E2K_ES2_DSP) += es2.o +obj-$(CONFIG_E2K_ES2_RU) += es2.o +obj-$(CONFIG_E2K_E2S) += e2s.o +obj-$(CONFIG_E2K_E8C) += e8c.o +obj-$(CONFIG_E2K_E1CP) += e1cp.o +obj-$(CONFIG_E2K_E8C2) += e8c2.o +obj-$(CONFIG_E2K_E12C) += e12c.o +obj-$(CONFIG_E2K_E16C) += e16c.o +obj-$(CONFIG_E2K_E2C3) += e2c3.o +else +CFLAGS_es2.o = $(CFLAGS_ES2) +CFLAGS_e2s.o = $(CFLAGS_E2S) +CFLAGS_e8c.o = $(CFLAGS_E8C) +CFLAGS_e1cp.o = $(CFLAGS_E1CP) +CFLAGS_e8c2.o = $(CFLAGS_E8C2) +CFLAGS_e12c.o = $(CFLAGS_E12C) +CFLAGS_e16c.o = $(CFLAGS_E16C) +CFLAGS_e2c3.o = $(CFLAGS_E2C3) +obj-$(CONFIG_CPU_E2S) += e2s.o +obj-$(CONFIG_CPU_ES2) += es2.o +obj-$(CONFIG_CPU_E8C) += e8c.o +obj-$(CONFIG_CPU_E1CP) += e1cp.o +obj-$(CONFIG_CPU_E8C2) += e8c2.o +obj-$(CONFIG_CPU_E12C) += e12c.o +obj-$(CONFIG_CPU_E16C) += e16c.o +obj-$(CONFIG_CPU_E2C3) += e2c3.o +endif + diff --git a/arch/e2k/p2v/cpu/boot_iset_v2.c b/arch/e2k/p2v/cpu/boot_iset_v2.c new file mode 100644 index 000000000000..f40ede45724d --- /dev/null +++ b/arch/e2k/p2v/cpu/boot_iset_v2.c @@ -0,0 +1,17 @@ +#include +#include +#include + +unsigned long boot_rrd_v2(int reg) +{ + return 0; +} + +void boot_rwd_v2(int reg, unsigned long value) +{ +} + +notrace unsigned long boot_native_read_IDR_reg_value() +{ + return NATIVE_READ_IDR_REG_VALUE(); +} diff --git a/arch/e2k/p2v/cpu/boot_iset_v3.c b/arch/e2k/p2v/cpu/boot_iset_v3.c new file mode 100644 index 000000000000..90d869d0d2b0 --- /dev/null +++ b/arch/e2k/p2v/cpu/boot_iset_v3.c @@ -0,0 +1,22 @@ +#include +#include + +unsigned long boot_rrd_v3(int reg) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + return NATIVE_READ_CORE_MODE_REG_VALUE(); + } + + return 0; +} + +void boot_rwd_v3(int reg, unsigned long value) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + NATIVE_WRITE_CORE_MODE_REG_VALUE(value); + return; + } +} + diff --git a/arch/e2k/p2v/cpu/boot_iset_v6.c b/arch/e2k/p2v/cpu/boot_iset_v6.c new file mode 100644 index 000000000000..a8fcbf1968c4 --- /dev/null +++ b/arch/e2k/p2v/cpu/boot_iset_v6.c @@ -0,0 +1,92 @@ +#include +#include + +unsigned long boot_rrd_v6(int reg) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + return NATIVE_READ_CORE_MODE_REG_VALUE(); + case E2K_REG_HCEM: + return READ_HCEM_REG(); + case E2K_REG_HCEB: + return READ_HCEB_REG(); + case E2K_REG_OSCUTD: + return NATIVE_READ_OSCUTD_REG_VALUE(); + case E2K_REG_OSCUIR: + return NATIVE_READ_OSCUIR_REG_VALUE(); + } + + return 0; +} + +void boot_rwd_v6(int reg, unsigned long value) +{ + switch (reg) { + case E2K_REG_CORE_MODE: + NATIVE_WRITE_CORE_MODE_REG_VALUE(value); + return; + case E2K_REG_HCEM: + WRITE_HCEM_REG(value); + return; + case E2K_REG_HCEB: + WRITE_HCEB_REG(value); + return; + case E2K_REG_OSCUTD: + NATIVE_WRITE_OSCUTD_REG_VALUE(value); + return; + case E2K_REG_OSCUIR: + NATIVE_WRITE_OSCUIR_REG_VALUE(value); + return; + } +} + +unsigned long light_hw_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + unsigned long ret; + + ret = E2K_HCALL(LINUX_HCALL_LIGHT_TRAPNUM, nr, 6, + arg1, arg2, arg3, arg4, arg5, arg6); + return ret; +} + +unsigned long generic_hw_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, unsigned long arg6, + unsigned long arg7) +{ + unsigned long ret; + + ret = E2K_HCALL(LINUX_HCALL_GENERIC_TRAPNUM, nr, 7, + arg1, arg2, arg3, arg4, arg5, arg6, arg7); + return ret; +} + +unsigned long boot_native_read_MMU_OS_PPTB_reg_value(void) +{ + return BOOT_NATIVE_READ_MMU_OS_PPTB_REG_VALUE(); +} +void boot_native_write_MMU_OS_PPTB_reg_value(unsigned long value) +{ + BOOT_NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(value); +} + +unsigned long boot_native_read_MMU_OS_VPTB_reg_value(void) +{ + return BOOT_NATIVE_READ_MMU_OS_VPTB_REG_VALUE(); +} +void boot_native_write_MMU_OS_VPTB_reg_value(unsigned long value) +{ + BOOT_NATIVE_WRITE_MMU_OS_VPTB_REG_VALUE(value); +} + +unsigned long boot_native_read_MMU_OS_VAB_reg_value(void) +{ + return BOOT_NATIVE_READ_MMU_OS_VAB_REG_VALUE(); +} +void boot_native_write_MMU_OS_VAB_reg_value(unsigned long value) +{ + BOOT_NATIVE_WRITE_MMU_OS_VAB_REG_VALUE(value); +} diff --git a/arch/e2k/p2v/cpu/e12c.c b/arch/e2k/p2v/cpu/e12c.c new file mode 100644 index 000000000000..5fd4674b1fcb --- /dev/null +++ b/arch/e2k/p2v/cpu/e12c.c @@ -0,0 +1,43 @@ +#include +#include + +void boot_e12c_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_12C_ISET; +#ifdef CONFIG_MMU_PT_V6 + boot_machine.mmu_pt_v6 = true; +#else + boot_machine.mmu_pt_v6 = false; +#endif +#ifdef CONFIG_MMU_SEP_VIRT_SPACE + boot_machine.mmu_separate_pt = true; +#else + boot_machine.mmu_separate_pt = false; +#endif + boot_machine.L3_enable = true; + boot_machine.max_nr_node_cpus = E12C_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E12C_NR_NODE_CPUS; + boot_machine.node_iolinks = E12C_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = E12C_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = E12C_PCICFG_AREA_SIZE; + boot_machine.nsr_area_phys_base = E12C_NSR_AREA_PHYS_BASE; + boot_machine.nbsr_area_offset = E12C_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = E12C_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = E12C_COPSR_AREA_PHYS_BASE; + boot_machine.copsr_area_size = E12C_COPSR_AREA_SIZE; + boot_machine.mlt_size = E12C_MLT_SIZE; + boot_machine.tlb_lines_bits_num = E12C_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = E12C_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = E12C_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = E12C_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = E12C_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = E12C_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = E12C_SIC_MC_SIZE; + boot_machine.sic_mc_count = E12C_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = 0; /* no MC1_ECC reg */ + boot_machine.sic_io_str1 = 0; /* no IO_STR1 reg */ +} + diff --git a/arch/e2k/p2v/cpu/e16c.c b/arch/e2k/p2v/cpu/e16c.c new file mode 100644 index 000000000000..2d9a10c910f2 --- /dev/null +++ b/arch/e2k/p2v/cpu/e16c.c @@ -0,0 +1,43 @@ +#include +#include + +void boot_e16c_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_16C_ISET; +#ifdef CONFIG_MMU_PT_V6 + boot_machine.mmu_pt_v6 = true; +#else + boot_machine.mmu_pt_v6 = false; +#endif +#ifdef CONFIG_MMU_SEP_VIRT_SPACE + boot_machine.mmu_separate_pt = true; +#else + boot_machine.mmu_separate_pt = false; +#endif + boot_machine.L3_enable = true; + boot_machine.max_nr_node_cpus = E16C_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E16C_NR_NODE_CPUS; + boot_machine.node_iolinks = E16C_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = E16C_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = E16C_PCICFG_AREA_SIZE; + boot_machine.nsr_area_phys_base = E16C_NSR_AREA_PHYS_BASE; + boot_machine.nbsr_area_offset = E16C_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = E16C_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = E16C_COPSR_AREA_PHYS_BASE; + boot_machine.copsr_area_size = E16C_COPSR_AREA_SIZE; + boot_machine.mlt_size = E12C_MLT_SIZE; + boot_machine.tlb_lines_bits_num = E16C_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = E16C_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = E16C_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = E16C_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = E16C_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = E16C_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = E16C_SIC_MC_SIZE; + boot_machine.sic_mc_count = E16C_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = 0; /* no MC1_ECC reg */ + boot_machine.sic_io_str1 = 0; /* no IO_STR1 reg */ +} + diff --git a/arch/e2k/p2v/cpu/e1cp.c b/arch/e2k/p2v/cpu/e1cp.c new file mode 100644 index 000000000000..10e85284b85c --- /dev/null +++ b/arch/e2k/p2v/cpu/e1cp.c @@ -0,0 +1,39 @@ +#include +#include +#include +#include + +void boot_e1cp_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_1CP_ISET; + boot_machine.mmu_pt_v6 = false; + boot_machine.mmu_separate_pt = false; + boot_machine.max_nr_node_cpus = E1CP_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E1CP_NR_NODE_CPUS; + boot_machine.node_iolinks = E1CP_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = E1CP_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = E1CP_PCICFG_AREA_SIZE; + + /* should be only after machine.pcicfg_area-* setting */ + boot_machine.nsr_area_phys_base = boot_get_legacy_nbsr_base(); + + boot_machine.nbsr_area_offset = E1CP_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = E1CP_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = 0; + boot_machine.copsr_area_size = 0; + boot_machine.mlt_size = E1CP_MLT_SIZE; + boot_machine.tlb_lines_bits_num = E1CP_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = E1CP_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = E1CP_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = E1CP_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = E1CP_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = E1CP_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = 0; + boot_machine.sic_mc_count = E1CP_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = E1CP_SIC_MC1_ECC; + boot_machine.sic_io_str1 = SIC_io_str_hi; +} + diff --git a/arch/e2k/p2v/cpu/e2c3.c b/arch/e2k/p2v/cpu/e2c3.c new file mode 100644 index 000000000000..e3e7958f9c95 --- /dev/null +++ b/arch/e2k/p2v/cpu/e2c3.c @@ -0,0 +1,43 @@ +#include +#include + +void boot_e2c3_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_2C3_ISET; +#ifdef CONFIG_MMU_PT_V6 + boot_machine.mmu_pt_v6 = true; +#else + boot_machine.mmu_pt_v6 = false; +#endif +#ifdef CONFIG_MMU_SEP_VIRT_SPACE + boot_machine.mmu_separate_pt = true; +#else + boot_machine.mmu_separate_pt = false; +#endif + boot_machine.L3_enable = false; /* no cache L3 */ + boot_machine.max_nr_node_cpus = E2C3_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E2C3_NR_NODE_CPUS; + boot_machine.node_iolinks = E2C3_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = E2C3_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = E2C3_PCICFG_AREA_SIZE; + boot_machine.nsr_area_phys_base = E2C3_NSR_AREA_PHYS_BASE; + boot_machine.nbsr_area_offset = E2C3_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = E2C3_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = E2C3_COPSR_AREA_PHYS_BASE; + boot_machine.copsr_area_size = E2C3_COPSR_AREA_SIZE; + boot_machine.mlt_size = E2C3_MLT_SIZE; + boot_machine.tlb_lines_bits_num = E2C3_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = E2C3_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = E2C3_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = E2C3_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = E2C3_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = E2C3_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = E2C3_SIC_MC_SIZE; + boot_machine.sic_mc_count = E2C3_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = 0; /* no MC1_ECC reg */ + boot_machine.sic_io_str1 = 0; /* no IO_STR1 reg */ +} + diff --git a/arch/e2k/p2v/cpu/e2s.c b/arch/e2k/p2v/cpu/e2s.c new file mode 100644 index 000000000000..a8639551392d --- /dev/null +++ b/arch/e2k/p2v/cpu/e2s.c @@ -0,0 +1,35 @@ +#include +#include +#include + +void boot_e2s_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_2S_ISET; + boot_machine.mmu_pt_v6 = false; + boot_machine.mmu_separate_pt = false; + boot_machine.max_nr_node_cpus = E2S_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E2S_NR_NODE_CPUS; + boot_machine.node_iolinks = E2S_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = E2S_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = E2S_PCICFG_AREA_SIZE; + boot_machine.nsr_area_phys_base = E2S_NSR_AREA_PHYS_BASE; + boot_machine.nbsr_area_offset = E2S_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = E2S_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = E2S_COPSR_AREA_PHYS_BASE; + boot_machine.copsr_area_size = E2S_COPSR_AREA_SIZE; + boot_machine.mlt_size = E2S_MLT_SIZE; + boot_machine.tlb_lines_bits_num = E2S_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = E2S_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = E2S_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = E2S_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = E2S_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = E2S_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = E2S_SIC_MC_SIZE; + boot_machine.sic_mc_count = E2S_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = E2S_SIC_MC1_ECC; + boot_machine.sic_io_str1 = SIC_io_str_hi; +} + diff --git a/arch/e2k/p2v/cpu/e8c.c b/arch/e2k/p2v/cpu/e8c.c new file mode 100644 index 000000000000..fbe64894e83c --- /dev/null +++ b/arch/e2k/p2v/cpu/e8c.c @@ -0,0 +1,35 @@ +#include +#include + +void boot_e8c_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_8C_ISET; + boot_machine.mmu_pt_v6 = false; + boot_machine.mmu_separate_pt = false; + boot_machine.L3_enable = true; + boot_machine.max_nr_node_cpus = E8C_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E8C_NR_NODE_CPUS; + boot_machine.node_iolinks = E8C_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = E8C_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = E8C_PCICFG_AREA_SIZE; + boot_machine.nsr_area_phys_base = E8C_NSR_AREA_PHYS_BASE; + boot_machine.nbsr_area_offset = E8C_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = E8C_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = E8C_COPSR_AREA_PHYS_BASE; + boot_machine.copsr_area_size = E8C_COPSR_AREA_SIZE; + boot_machine.mlt_size = E8C_MLT_SIZE; + boot_machine.tlb_lines_bits_num = E8C_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = E8C_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = E8C_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = E8C_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = E8C_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = E8C_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = E8C_SIC_MC_SIZE; + boot_machine.sic_mc_count = E8C_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = E8C_SIC_MC1_ECC; + boot_machine.sic_io_str1 = 0; +} + diff --git a/arch/e2k/p2v/cpu/e8c2.c b/arch/e2k/p2v/cpu/e8c2.c new file mode 100644 index 000000000000..48c4170de7df --- /dev/null +++ b/arch/e2k/p2v/cpu/e8c2.c @@ -0,0 +1,35 @@ +#include +#include + +void boot_e8c2_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_8C2_ISET; + boot_machine.mmu_pt_v6 = false; + boot_machine.mmu_separate_pt = false; + boot_machine.L3_enable = true; + boot_machine.max_nr_node_cpus = E8C2_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = E8C2_NR_NODE_CPUS; + boot_machine.node_iolinks = E8C2_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = E8C2_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = E8C2_PCICFG_AREA_SIZE; + boot_machine.nsr_area_phys_base = E8C2_NSR_AREA_PHYS_BASE; + boot_machine.nbsr_area_offset = E8C2_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = E8C2_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = E8C2_COPSR_AREA_PHYS_BASE; + boot_machine.copsr_area_size = E8C2_COPSR_AREA_SIZE; + boot_machine.mlt_size = E8C2_MLT_SIZE; + boot_machine.tlb_lines_bits_num = E8C2_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = E8C2_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = E8C2_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = E8C2_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = E8C2_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = E8C2_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = E8C2_SIC_MC_SIZE; + boot_machine.sic_mc_count = E8C2_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = E8C2_SIC_MC1_ECC; + boot_machine.sic_io_str1 = 0; +} + diff --git a/arch/e2k/p2v/cpu/es2.c b/arch/e2k/p2v/cpu/es2.c new file mode 100644 index 000000000000..fbf068747e3c --- /dev/null +++ b/arch/e2k/p2v/cpu/es2.c @@ -0,0 +1,35 @@ +#include +#include +#include + +void boot_es2_setup_arch(void) +{ + boot_e2k_sic_setup_arch(); + + if (!boot_machine.cmdline_iset_ver) + boot_machine.native_iset_ver = ELBRUS_S_ISET; + boot_machine.mmu_pt_v6 = false; + boot_machine.mmu_separate_pt = false; + boot_machine.max_nr_node_cpus = ES2_MAX_NR_NODE_CPUS; + boot_machine.nr_node_cpus = ES2_NR_NODE_CPUS; + boot_machine.node_iolinks = ES2_NODE_IOLINKS; + boot_machine.pcicfg_area_phys_base = ES2_PCICFG_AREA_PHYS_BASE; + boot_machine.pcicfg_area_size = ES2_PCICFG_AREA_SIZE; + boot_machine.nsr_area_phys_base = ES2_NSR_AREA_PHYS_BASE; + boot_machine.nbsr_area_offset = ES2_NBSR_AREA_OFFSET; + boot_machine.nbsr_area_size = ES2_NBSR_AREA_SIZE; + boot_machine.copsr_area_phys_base = ES2_COPSR_AREA_PHYS_BASE; + boot_machine.copsr_area_size = ES2_COPSR_AREA_SIZE; + boot_machine.mlt_size = ES2_MLT_SIZE; + boot_machine.tlb_lines_bits_num = ES2_TLB_LINES_BITS_NUM; + boot_machine.tlb_addr_line_num = ES2_TLB_ADDR_LINE_NUM; + boot_machine.tlb_addr_line_num2 = ES2_TLB_ADDR_LINE_NUM2; + boot_machine.tlb_addr_line_num_shift2 = ES2_TLB_ADDR_LINE_NUM_SHIFT2; + boot_machine.tlb_addr_set_num = ES2_TLB_ADDR_SET_NUM; + boot_machine.tlb_addr_set_num_shift = ES2_TLB_ADDR_SET_NUM_SHIFT; + boot_machine.sic_mc_size = 0; + boot_machine.sic_mc_count = ES2_SIC_MC_COUNT; + boot_machine.sic_mc1_ecc = ES2_SIC_MC1_ECC; + boot_machine.sic_io_str1 = SIC_io_str1; +} + diff --git a/arch/e2k/p2v/machdep.c b/arch/e2k/p2v/machdep.c new file mode 100644 index 000000000000..fea614d717b0 --- /dev/null +++ b/arch/e2k/p2v/machdep.c @@ -0,0 +1,2 @@ +#define BUILD_CPUHAS_INITIALIZERS +#include diff --git a/arch/e2k/pci/Makefile b/arch/e2k/pci/Makefile new file mode 100644 index 000000000000..62d163e2cf66 --- /dev/null +++ b/arch/e2k/pci/Makefile @@ -0,0 +1,2 @@ + +obj-$(CONFIG_PCI_ELBRUS) := pci.o diff --git a/arch/e2k/pci/pci.c b/arch/e2k/pci/pci.c new file mode 100644 index 000000000000..0bfb09ed7492 --- /dev/null +++ b/arch/e2k/pci/pci.c @@ -0,0 +1,553 @@ +/* + * Low-Level PCI Support for PC + * + * (c) 1999--2000 Martin Mares + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +char *pcibios_setup(char *str) +{ + if (!strcmp(str, "off")) { + pci_probe = 0; + return NULL; + } + else if (!strcmp(str, "conf1")) { + pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS; + return NULL; + } + else if (!strcmp(str, "conf2")) { + pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS; + return NULL; + } + else if (!strcmp(str, "noacpi")) { + acpi_noirq_set(); + return NULL; + } + else if (!strcmp(str, "rom")) { + pci_probe |= PCI_ASSIGN_ROMS; + return NULL; + } else if (!strcmp(str, "assign-busses")) { + pci_probe |= PCI_ASSIGN_ALL_BUSSES; + return NULL; + } + return str; +} + +unsigned int pcibios_assign_all_busses(void) +{ + return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0; +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + int err; + + if ((err = pci_enable_resources(dev, mask)) < 0) + return err; + + if (!pci_dev_msi_enabled(dev)) + return pcibios_enable_irq(dev); + return 0; +} + +void pcibios_disable_device (struct pci_dev *dev) +{ + if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) + pcibios_disable_irq(dev); +} + +void __init pcibios_fixup_resources(struct pci_bus *pbus) +{ + /* Nothing to do */ +} + +/* + * Functions for accessing PCI configuration space with type 1 accesses + */ + +#define PCI_CONF1_ADDRESS(bus, devfn, reg) \ + (0x80000000 | (bus << 16) | (devfn << 8) | (reg & ~3)) + +static int pci_conf1_read(unsigned int seg, unsigned int bus, + unsigned int devfn, int reg, int len, u32 *value) +{ + unsigned long flags; + + if (!value || (bus > 255) || (devfn > 255) || (reg > 255)) + return -EINVAL; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8); + + switch (len) { + case 1: + *value = inb(0xCFC + (reg & 3)); + break; + case 2: + *value = inw(0xCFC + (reg & 2)); + break; + case 4: + *value = inl(0xCFC); + break; + } + + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return 0; +} + +static int pci_conf1_write(unsigned int seg, unsigned int bus, + unsigned int devfn, int reg, int len, u32 value) +{ + unsigned long flags; + + if ((bus > 255) || (devfn > 255) || (reg > 255)) + return -EINVAL; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8); + + switch (len) { + case 1: + outb((u8)value, 0xCFC + (reg & 3)); + break; + case 2: + outw((u16)value, 0xCFC + (reg & 2)); + break; + case 4: + outl((u32)value, 0xCFC); + break; + } + + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return 0; +} + +#undef PCI_CONF1_ADDRESS + +struct pci_raw_ops pci_direct_conf1 = { + .read = pci_conf1_read, + .write = pci_conf1_write, +}; + + +/* + * Functions for accessing PCI configuration space with type 2 accesses + */ + +#define PCI_CONF2_ADDRESS(dev, reg) (u16)(0xC000 | (dev << 8) | reg) + +static int pci_conf2_read(unsigned int seg, unsigned int bus, + unsigned int devfn, int reg, int len, u32 *value) +{ + unsigned long flags; + int dev, fn; + + if (!value || (bus > 255) || (devfn > 255) || (reg > 255)) + return -EINVAL; + + dev = PCI_SLOT(devfn); + fn = PCI_FUNC(devfn); + + if (dev & 0x10) + return PCIBIOS_DEVICE_NOT_FOUND; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + outb((u8)(0xF0 | (fn << 1)), 0xCF8); + outb((u8)bus, 0xCFA); + + switch (len) { + case 1: + *value = inb(PCI_CONF2_ADDRESS(dev, reg)); + break; + case 2: + *value = inw(PCI_CONF2_ADDRESS(dev, reg)); + break; + case 4: + *value = inl(PCI_CONF2_ADDRESS(dev, reg)); + break; + } + + outb(0, 0xCF8); + + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return 0; +} + +static int pci_conf2_write(unsigned int seg, unsigned int bus, + unsigned int devfn, int reg, int len, u32 value) +{ + unsigned long flags; + int dev, fn; + + if ((bus > 255) || (devfn > 255) || (reg > 255)) + return -EINVAL; + + dev = PCI_SLOT(devfn); + fn = PCI_FUNC(devfn); + + if (dev & 0x10) + return PCIBIOS_DEVICE_NOT_FOUND; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + outb((u8)(0xF0 | (fn << 1)), 0xCF8); + outb((u8)bus, 0xCFA); + + switch (len) { + case 1: + outb((u8)value, PCI_CONF2_ADDRESS(dev, reg)); + break; + case 2: + outw((u16)value, PCI_CONF2_ADDRESS(dev, reg)); + break; + case 4: + outl((u32)value, PCI_CONF2_ADDRESS(dev, reg)); + break; + } + + outb(0, 0xCF8); + + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return 0; +} + +#undef PCI_CONF2_ADDRESS + +static struct pci_raw_ops pci_direct_conf2 = { + .read = pci_conf2_read, + .write = pci_conf2_write, +}; + +/* + * Before we decide to use direct hardware access mechanisms, we try to do some + * trivial checks to ensure it at least _seems_ to be working -- we just test + * whether bus 00 contains a host bridge (this is similar to checking + * techniques used in XFree86, but ours should be more reliable since we + * attempt to make use of direct access hints provided by the PCI BIOS). + * + * This should be close to trivial, but it isn't, because there are buggy + * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. + */ +static int __init pci_sanity_check(struct pci_raw_ops *o) +{ + u32 x = 0; + int devfn; + + if (pci_probe & PCI_NO_CHECKS) + return 1; + + for (devfn = 0; devfn < 0x100; devfn++) { + if (o->read(0, 0, devfn, PCI_CLASS_DEVICE, 2, &x)) + continue; + if (x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA) + return 1; + + if (o->read(0, 0, devfn, PCI_VENDOR_ID, 2, &x)) + continue; + if (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ) + return 1; + } + + DBG("PCI: Sanity check failed\n"); + return 0; +} + +static int __init pci_check_type1(void) +{ + unsigned long flags; + unsigned int tmp; + int works = 0; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + outb(0x01, 0xCFB); + tmp = inl(0xCF8); + outl(0x80000000, 0xCF8); + + if (inl(0xCF8) == 0x80000000) { + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf1)) + works = 1; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + } + outl(tmp, 0xCF8); + + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return works; +} + +static int __init pci_check_type2(void) +{ + unsigned long flags; + int works = 0; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + outb(0x00, 0xCFB); + outb(0x00, 0xCF8); + outb(0x00, 0xCFA); + + if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00) { + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf2)) + works = 1; + } else + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return works; +} + +static int __init pci_direct_init(void) +{ + struct resource *region, *region2; + + pci_probe = PCI_PROBE_L; + if (!HAS_MACHINE_L_SIC) + pci_probe |= (PCI_PROBE_CONF1 | PCI_PROBE_CONF2); + + if ((pci_probe & PCI_PROBE_CONF1) == 0) + goto type2; + region = request_region(0xCF8, 8, "PCI conf1"); + if (!region) + goto type2; + + if (pci_check_type1()) { + printk(KERN_INFO "PCI: Using configuration type 1\n"); + raw_pci_ops = &pci_direct_conf1; + return 0; + } + release_resource(region); + + type2: + if ((pci_probe & PCI_PROBE_CONF2) == 0) + goto type_l; + region = request_region(0xCF8, 4, "PCI conf2"); + if (!region) + goto type_l; + region2 = request_region(0xC000, 0x1000, "PCI conf2"); + if (!region2) + goto fail2; + + if (pci_check_type2()) { + printk(KERN_INFO "PCI: Using configuration type 2\n"); + raw_pci_ops = &pci_direct_conf2; + return 0; + } + + release_resource(region2); + fail2: + release_resource(region); + + type_l: + if (HAS_MACHINE_L_SIC) + return l_pci_direct_init(); + return -1; +} + +int __init native_arch_pci_init(void) +{ + return pci_direct_init(); +} + +static int __init pci_init(void) +{ + return arch_pci_init(); +} + +arch_initcall(pci_init); + +/* + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + * + * Why? Because some silly external IO cards only decode + * the low 10 bits of the IO address. The 0x00-0xff region + * is reserved for motherboard devices that decode all 16 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff, + * but we want to try to avoid allocating at 0x2900-0x2bff + * which might have be mirrored at 0x0100-0x03ff.. + */ +resource_size_t +pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } + + return start; +} + +void pcibios_set_master(struct pci_dev *dev) +{ + u8 lat; + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); + if (lat < 16) + lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; + else if (lat > pcibios_max_latency) + lat = pcibios_max_latency; + else + return; + printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); +} + +int pci_mmap_page_range(struct pci_dev *dev, int bar, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + unsigned long prot; + + /* I/O space cannot be accessed via normal processor loads and + * stores on this platform. + */ + if (mmap_state == pci_mmap_io) + return -EINVAL; + + /* Leave vm_pgoff as-is, the PCI space address is the physical + * address on this platform. + */ + + prot = pgprot_val(vma->vm_page_prot); + if (!write_combine || unlikely(no_writecombine)) + prot = _PAGE_SET_MEM_TYPE(prot, EXT_CONFIG_MT); + else + prot = _PAGE_SET_MEM_TYPE(prot, EXT_PREFETCH_MT); + vma->vm_page_prot = __pgprot(prot); + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +#if HAVE_PCI_LEGACY +/** + * pci_mmap_legacy_page_range - map legacy memory space to userland + * @bus: bus whose legacy space we're mapping + * @vma: vma passed in by mmap + * + * Map legacy memory space for this device back to userspace using a machine + * vector to get the base address. + */ +int +pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state) +{ + unsigned long size = vma->vm_end - vma->vm_start; + pgprot_t prot; + unsigned long addr = 0; + + /* We only support mmap'ing of legacy memory space */ + if (mmap_state != pci_mmap_mem) + return -ENOSYS; + + prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff += addr >> PAGE_SHIFT; + vma->vm_page_prot = prot; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + size, vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +/** + * ia64_pci_legacy_read - read from legacy I/O space + * @bus: bus to read + * @port: legacy port value + * @val: caller allocated storage for returned value + * @size: number of bytes to read + * + * Simply reads @size bytes from @port and puts the result in @val. + * + * Again, this (and the write routine) are generic versions that can be + * overridden by the platform. This is necessary on platforms that don't + * support legacy I/O routing or that hard fail on legacy I/O timeouts. + */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + int ret = size; + switch (size) { + case 1: + *((u8 *)val) = inb(port); + break; + case 2: + *((u16 *)val) = inw(port); + break; + case 4: + *((u32 *)val) = inl(port); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +/** + * ia64_pci_legacy_write - perform a legacy I/O write + * @bus: bus pointer + * @port: port to write + * @val: value to write + * @size: number of bytes to write from @val + * + * Simply writes @size bytes of @val to @port. + */ +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + int ret = size; + switch (size) { + case 1: + outb(val, port); + break; + case 2: + outw(val, port); + break; + case 4: + outl(val, port); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} +#endif /*HAVE_PCI_LEGACY*/ diff --git a/arch/e2k/power/Makefile b/arch/e2k/power/Makefile new file mode 100644 index 000000000000..548dc860e8ef --- /dev/null +++ b/arch/e2k/power/Makefile @@ -0,0 +1,5 @@ +# __restore_processor_state() restores %gs after S3 resume and so should not +# itself be stack-protected + +obj-$(CONFIG_PM_SLEEP) += cpu.o +obj-$(CONFIG_HIBERNATION) += hibernate.o diff --git a/arch/e2k/power/cpu.c b/arch/e2k/power/cpu.c new file mode 100644 index 000000000000..ac8a69667c50 --- /dev/null +++ b/arch/e2k/power/cpu.c @@ -0,0 +1,27 @@ +/* + * Suspend support specific for e2k. + * + * Distribute under GPLv2 + * + * Copyright (c) 2011 Evgeny M. Kravtsunov + */ + +#include + +#include + +static unsigned long long suspended_sched_clock_value; + +void save_processor_state(void) +{ + if (use_sclkr_sched_clock()) + suspended_sched_clock_value = sched_clock(); +} + +void restore_processor_state(void) +{ + if (use_sclkr_sched_clock()) { + atomic64_set(&prev_sclkr.res, 0); + sclkr_sched_offset = suspended_sched_clock_value - raw_read_sclkr(); + } +} diff --git a/arch/e2k/power/hibernate.c b/arch/e2k/power/hibernate.c new file mode 100644 index 000000000000..b72f7fc60722 --- /dev/null +++ b/arch/e2k/power/hibernate.c @@ -0,0 +1,423 @@ +#include +#include +#include + +#include +#include +#include +#include + +#define TAGS_PER_PAGE (PAGE_SIZE / TAGS_BYTES_PER_PAGE) + +struct tags_info { + void *page[TAGS_PER_PAGE]; + void *tags; +}; + +struct tag_data { + struct tags_info tags_info[(PAGE_SIZE - 8) / + sizeof(struct tags_info)]; + struct tag_data *next; +}; + +static struct tag_data *e2k_tag_data, *tag_wp; +static unsigned long tag_in_page, tags_info_cnt; +static unsigned long *metadata_pfns, metadata_nr; + +/* + * Free e2k_tag_data list of arrays. + */ +noinline /* To make sure we use stacks restored in restore_image() */ +static void free_tag_pages(void) +{ + struct tag_data *pkd; + int i; + while (e2k_tag_data) { + pkd = e2k_tag_data; + e2k_tag_data = pkd->next; + for (i = 0; i < ARRAY_SIZE(pkd->tags_info); i++) + free_page((unsigned long)pkd->tags_info[i].tags); + free_page((unsigned long)pkd); + } + free_pages((unsigned long)metadata_pfns, get_order( + metadata_nr * sizeof(metadata_pfns))); +} + +static int cmplong(const void *a, const void *b) +{ + return *(long *)a - *(long *)b; +} + +/* + * Allocate e2k_tag_data list of arrays. + */ +int alloc_tag_pages(unsigned long pages, unsigned long *tags) +{ + long j, k; + pages += pages / 100; /* BUG: sometimes pages more, so add 1% */ + metadata_nr = DIV_ROUND_UP(pages, TAGS_PER_PAGE); + k = get_order(metadata_nr * sizeof(metadata_pfns)); + metadata_pfns = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, k); + if (!metadata_pfns) { + free_tag_pages(); + return -ENOMEM; + } + k = 1 << k; + + for (j = 0; j < metadata_nr; k++) { + struct tag_data *pk; + int i; + pk = (void *)get_zeroed_page(GFP_KERNEL); + if (!pk) { + free_tag_pages(); + return -ENOMEM; + } + for (i = 0; i < ARRAY_SIZE(pk->tags_info) && + j < metadata_nr; i++, j++) { + pk->tags_info[i].tags = (void *) + __get_free_page(GFP_KERNEL); + if (!pk->tags_info[i].tags) { + free_tag_pages(); + return -ENOMEM; + } + metadata_pfns[j] = page_to_pfn(virt_to_page( + pk->tags_info[i].tags)); + } + pk->next = e2k_tag_data; + e2k_tag_data = pk; + } + sort(metadata_pfns, metadata_nr, sizeof(long), cmplong, NULL); + tag_wp = e2k_tag_data; + *tags = metadata_nr + k; + + return 0; +} + +/* + * Save the tags. + */ +void save_tag_for_pfn(unsigned long pfn) +{ + void *to, *r, *from = page_address(pfn_to_page(pfn)); + struct tags_info *t; + r = bsearch(&pfn, metadata_pfns, metadata_nr, sizeof(long), cmplong); + if (r) + return; + if (WARN_ON_ONCE(!tag_wp)) { + return; + } + t = &tag_wp->tags_info[tags_info_cnt]; + + if (!t->tags) { + tag_wp = tag_wp->next; + tag_in_page = 0; + tags_info_cnt = 0; + if (WARN_ON_ONCE(!tag_wp)) + return; + t = &tag_wp->tags_info[tags_info_cnt]; + if (WARN_ON_ONCE(!t->tags)) + return; + } + to = t->tags + tag_in_page * TAGS_BYTES_PER_PAGE; + + if (!save_tags_from_data(from, to)) { + /* No tags in the page, skip it when restoring tags */ + return; + } + + t->page[tag_in_page] = from; + tag_in_page++; + if (tag_in_page < ARRAY_SIZE(t->page)) + return; + + tag_in_page = 0; + tags_info_cnt++; + if (tags_info_cnt < ARRAY_SIZE(tag_wp->tags_info)) + return; + + tags_info_cnt = 0; + tag_wp = tag_wp->next; +} + +noinline /* To make sure we use stacks restored in restore_image() */ +static void restore_tags(void) +{ + struct tag_data *pk = e2k_tag_data; + for (pk = e2k_tag_data; pk; pk = pk->next) { + unsigned long i, j; + for (i = 0; i < ARRAY_SIZE(pk->tags_info); i++) { + struct tags_info *t = pk->tags_info; + for (j = 0; j < ARRAY_SIZE(t->page); j++) { + void *to = t->page[j]; + void *from = t->tags + j * TAGS_BYTES_PER_PAGE; + if (!to) + goto out; + restore_tags_for_data(to, from); + } + } + } +out:; +} + +/* + * pfn_is_nosave - check if given pfn is in the 'nosave' section + */ + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; + unsigned long nosave_end_pfn = + PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} + + +static struct task_struct *task_to_recover; +static struct sw_regs sw_regs_to_recover; + +int swsusp_arch_suspend(void) +{ + unsigned long flags; + BUILD_BUG_ON(sizeof(struct tag_data) > PAGE_SIZE); + + task_to_recover = current; + + raw_all_irq_save(flags); + NATIVE_SAVE_TASK_REGS_TO_SWITCH(current); + sw_regs_to_recover = current->thread.sw_regs; + raw_all_irq_restore(flags); + + return swsusp_save(); +} + +#define r64(_a) ({ \ + void *_v = (void *)NATIVE_READ_MAS_D(__pa(_a), MAS_LOAD_PA); \ + _v; }) +#define w64(_v, _a) NATIVE_WRITE_MAS_D(__pa(_a), _v, MAS_STORE_PA) + +static inline void copy_image(void) +{ + struct pbe *pbe; + for (pbe = restore_pblist; pbe; pbe = r64(&pbe->next)) { + u64 *to = r64(&pbe->orig_address); + u64 *from = r64(&pbe->address); + int i; + for (i = 0; i < PAGE_SIZE / sizeof(*to); i++, to++, from++) + w64(r64(from), to); + } +} + +__used +static void restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist) +{ + extern int in_suspend; + struct task_struct *task; + struct sw_regs *to, *from; + + set_root_pt(resume_pg_dir); + native_raw_flush_TLB_all(); + + copy_image(); + + set_kernel_MMU_state(); + native_raw_flush_TLB_all(); + + task = task_to_recover; + from = &sw_regs_to_recover; + to = &task->thread.sw_regs; + + *to = *from; + + set_current_thread_info(task_thread_info(task), task); + if (task->mm != NULL) + reload_thread(task->mm); + + /* + * Restore state registers of current process to enable + * switching to the interrupted task as end of recovery of the system + */ + + NATIVE_RESTORE_TASK_REGS_TO_SWITCH(task, task_thread_info(task)); + + /* Start receiving NMIs again */ + raw_local_irq_disable(); + + /* tags restoring and freeing can have stack recursions possibly + * more than 1 page of safe procedure stack. So move this *after* + * hardware stacks are switched back. */ + restore_tags(); + free_tag_pages(); + + /* + * Tell the hibernation core that we've just restored + * the memory + */ + in_suspend = 0; +} + +/* Switch to a safe stack so that we don't accidentally SPILL into just + * restored data. + * + * Note that we cannot switch stacks and restore image data in the same + * function because then SP would be initialized *before* the stack switch. + * + * Also note that 1 page for procedure stack is *very* little for e2k + * (just 128 quadro registers, and one function can use at maximum + * E2K_MAXSR_q == 112 quadro registers). That's why we use JUMP - to save + * some registers for callee. */ +static int switch_stacks_and_goto_restore_image(pgd_t *resume_pg_dir, + struct pbe *restore_pblist) +{ + unsigned long chain_stack, proc_stack, data_stack; + + chain_stack = get_safe_page(GFP_ATOMIC); + proc_stack = get_safe_page(GFP_ATOMIC); + data_stack = get_safe_page(GFP_ATOMIC); + if (!chain_stack || !proc_stack || !data_stack) + return -ENOMEM; + + /* POINT OF NO RETURN + * + * We have got enough memory and from now on we cannot recover. */ + + raw_all_irq_disable(); /* Protect ourselves from NMIs */ + NATIVE_SWITCH_TO_KERNEL_STACK(proc_stack, PAGE_SIZE, + chain_stack, PAGE_SIZE, data_stack, PAGE_SIZE); + + E2K_JUMP_WITH_ARGUMENTS(restore_image, 2, resume_pg_dir, restore_pblist); +} + +static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) +{ + pte_t pte = *src_pte; + if (pte_valid(pte)) + set_pte(dst_pte, pte_mkwrite(pte)); +} + +static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, + unsigned long end) +{ + pte_t *src_pte; + pte_t *dst_pte; + unsigned long addr = start; + + dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pte) + return -ENOMEM; + pmd_populate_kernel(&init_mm, dst_pmd, dst_pte); + dst_pte = pte_offset_kernel(dst_pmd, start); + + src_pte = pte_offset_kernel(src_pmd, start); + do { + _copy_pte(dst_pte, src_pte, addr); + } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); + + return 0; +} + +#define pmd_table(x) (!kernel_pmd_huge(x)) +#define pud_table(x) (!kernel_pud_huge(x)) + +static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start, + unsigned long end) +{ + pmd_t *src_pmd; + pmd_t *dst_pmd; + unsigned long next; + unsigned long addr = start; + + if (pud_none(*dst_pud)) { + dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pmd) + return -ENOMEM; + pud_populate(&init_mm, dst_pud, dst_pmd); + } + dst_pmd = pmd_offset(dst_pud, start); + + src_pmd = pmd_offset(src_pud, start); + do { + next = pmd_addr_end(addr, end); + if (pmd_none(*src_pmd)) + continue; + if (pmd_table(*src_pmd)) { + if (copy_pte(dst_pmd, src_pmd, addr, next)) + return -ENOMEM; + } else { + set_pmd(dst_pmd, pmd_mkwrite(*src_pmd)); + } + } while (dst_pmd++, src_pmd++, addr = next, addr != end); + + return 0; +} + + +#define my_pgd_populate(mm, pgdp, pudp) (*(pgdp) = mk_pgd_phys_k(pudp)) + +static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start, + unsigned long end) +{ + pud_t *dst_pud; + pud_t *src_pud; + unsigned long next; + unsigned long addr = start; + + if (pgd_none(*dst_pgd)) { + dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pud) + return -ENOMEM; + my_pgd_populate(&init_mm, dst_pgd, dst_pud); + } + dst_pud = pud_offset(dst_pgd, start); + + src_pud = pud_offset(src_pgd, start); + do { + next = pud_addr_end(addr, end); + if (pud_none(*src_pud)) + continue; + if (pud_table(*(src_pud))) { + if (copy_pmd(dst_pud, src_pud, addr, next)) + return -ENOMEM; + } else { + set_pud(dst_pud, pud_mkwrite(*src_pud)); + } + } while (dst_pud++, src_pud++, addr = next, addr != end); + + return 0; +} + +static int copy_page_tables(pgd_t *dst_pgd, unsigned long start, + unsigned long end) +{ + unsigned long next; + unsigned long addr = start; + pgd_t *src_pgd = pgd_offset_k(start); + + dst_pgd = dst_pgd + pgd_index(start); + do { + next = pgd_addr_end(addr, end); + if (pgd_none(*src_pgd)) + continue; + if (copy_pud(dst_pgd, src_pgd, addr, next)) + return -ENOMEM; + } while (dst_pgd++, src_pgd++, addr = next, addr != end); + + return 0; +} + +int swsusp_arch_resume(void) +{ + int error; + pgd_t *resume_pg_dir; + + resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); + if (!resume_pg_dir) + return -ENOMEM; + + error = copy_page_tables(resume_pg_dir, + PAGE_OFFSET, 0x0001000000000000); + if (error) + return error; + + error = switch_stacks_and_goto_restore_image(resume_pg_dir, restore_pblist); + return error; +} diff --git a/arch/l/Kconfig b/arch/l/Kconfig new file mode 100644 index 000000000000..d9cf20cc6d29 --- /dev/null +++ b/arch/l/Kconfig @@ -0,0 +1,287 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# +config IOMMU_HELPER + bool + default y + select GENERIC_ALLOCATOR + +config HAVE_DMA_ATTRS + bool + default y + +menu "Elbrus Architecture Linux Kernel Configuration" + +config EPIC + bool "Elbrus PIC support" + depends on E90S || (E2K && (CPU_ISET >= 6 || CPU_ISET = 0 || KVM_HOST_MODE)) + def_bool y + help + Elbrus Programmable Interrupt Controller supports multiprocessor + systems with up to 1024 cores and uses 10 bit vectors for + masked interrupts. + Controller implements hardware support for interrupt virtualization. + Consists of CEPIC (per core), PREPIC (per processor) and IOEPIC. + Say 'Y' to enable EPIC support in kernel + +menu "Boot/prom console support" + +config L_EARLY_PRINTK + def_bool n + +config SERIAL_PRINTK + bool "dump_printk() support" + depends on E2K || E90S + default y + select L_EARLY_PRINTK + help + dump_printk() outputs directly to serial port bypassing all + buffers and locks. It is useful for hardware debugging. + + Only two devices are supported currently: ns16550 (== 8550) + and l_zilog (== am85c30). + + On simulator dump_printk() also outputs to LMS console. + + Kernel parameter "boot_printk_all" is added. If set, dump_print() + will be used everywhere instead of printk(). + +config NVRAM_PANIC + bool "save panic output to nvram" + depends on E2K || E90S + default y + help + panic output is saved into nvram. It can be extracted from + nvram after reboot using /proc/sys/kernel/nvram_panic + +config SERIAL_AM85C30_CONSOLE + bool "Init-time serial console on Am85c30 zilog and compatible devices" + depends on SERIAL_PRINTK && (!E2K || SERIAL_AM85C30_BOOT_CONSOLE) + default y + help + Say Y to support simple console based on Am85c30 serial port. + +config EARLY_DUMP_CONSOLE + bool "Early serial console based on Am85c30/8250 or hvc" + depends on L_EARLY_PRINTK + default y + help + Say Y to enable early serial console support. + Useful when debugging kernel boot process. + +endmenu # "Boot/prom console support" + +config MCST + bool + default y + +config CLKR_CLOCKSOURCE + bool + default E2K || E90S + +config CLKR_SYNCHRONIZATION_WARNING + bool + depends on CLKR_CLOCKSOURCE && SMP + default n + help + Set to Y to enable warnings about CLKR registers synchronization + across several CPUs. + +config CLKR_OFFSET + bool + depends on CLKR_CLOCKSOURCE && SMP + default E2K && RECOVERY || E90S + help + Set to Y when different cpus' clock registers have the same + frequency but different initial values. Then per-cpu offset + will be added to each cpu's register value. + +config IOHUB_GPIO + tristate "IOHUB Gpiolib" + depends on GPIOLIB + default m + help + Gpiolib implementation for Elbrus IOHUB. + +config PIC + bool "PIC support" + depends on E2K + default n + help + An PIC (Programmable Interrupt Controller) is an old-style + interrupt controllers. If you say Y here kernel will use PIC + only while initialization to service erly time interrupts. + Later after initialization SMP mode, local and IO APICs + kernel will switch all interrupts from PIC to IO-APIC + If you say N (recomended) PIC is not used never and not even + initialized. + +config L_X86_64 + def_bool y + depends on L_LOCAL_APIC + ---help--- + This option helps compiling code copied from arch/x86/kernel/apic + with minimal changes to code (renamed X86_64 -> L_X86_64). + Hopefully it will be easier to update the code this way. + +config L_UP_APIC + bool + depends on !SMP + default E2K || E90S + ---help--- + A local APIC (Advanced Programmable Interrupt Controller) is an + integrated interrupt controller in the CPU. If you have a single-CPU + system which has a processor with a local APIC, you can say Y here to + enable and use it. If you say Y here even though your machine doesn't + have a local APIC, then the kernel will still run with no slowdown at + all. The local APIC supports CPU-generated self-interrupts (timer, + performance counters), and the NMI watchdog which detects hard + lockups. + + If you have a system with several CPUs, you do not need to say Y + here: the local APIC will be used automatically. + +config L_UP_IOAPIC + bool + depends on L_UP_APIC + default E2K || E90S + ---help--- + An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an + SMP-capable replacement for PC-style interrupt controllers. Most + SMP systems and many recent uniprocessor systems have one. + + If you have a single-CPU system with an IO-APIC, you can say Y here + to use it. If you say Y here even though your machine doesn't have + an IO-APIC, then the kernel will still run with no slowdown at all. + +config L_LOCAL_APIC + def_bool E2K || E90S + depends on SMP || L_UP_APIC + +config L_IO_APIC + def_bool E2K || E90S + depends on SMP || L_UP_IOAPIC + +config L_PCI_QUIRKS + def_bool PCI_MSI + depends on L_IO_APIC + select PCI_QUIRKS + +config L_SIC_IPLINK_OFF + bool "MCST E2S: support for ipcc2 iplinks off" + depends on CPU_E2S + select PROC_FS + default y + help + Support for IPCC2 iplinks switching off + +config L_MMPD + bool "MMPD: support for test result field in kernel" + depends on E2K + select PROC_FS + default y + help + MMPD: support for test result field in kernel + provide /proc/mmpdstatus interface to hold + FPO error codes + +config L_PMC + tristate "Elbrus-1C+ Power Management Controller" + depends on E2K + select CPU_FREQ if !MCST_RT + select PM_OPP + default m + help + Power Management Controller for Elbrus-1C+ + +config S2_PMC + bool "R2000 Power Management Controller (S2 PMC)" + depends on E90S + select CPU_IDLE if !MCST_RT + select CPU_FREQ if !MCST_RT + default y # we need s2_get_freq_mult() for r2000 + help + Power Management Controller (PMC) for MCST R2000 (E90S/SPARC v9) CPU + +config I2C_SPI_RESET_CONTROLLER + bool "MCST I2C SPI Controller Support" + depends on PCI && (E2K || E90S) + default y + help + Support for MCST I2C SPI Controller + +config L_I2C_CONTROLLER + tristate "Elbrus I2C Controller" + depends on I2C && I2C_SPI_RESET_CONTROLLER + default y + help + If you say yes to this option, support will be + included for the Elbrus I2C controller that is + part of Elbrus IOHUB. + +config L_SPI_CONTROLLER + tristate "Elbrus SPI controller" + depends on SPI && I2C_SPI_RESET_CONTROLLER + default y + help + If you say yes to this option, support will be + included for the Elbrus SPI controller that is + part of Elbrus IOHUB. + +config I2C_SPI_IRQ + bool + depends on L_SPI_CONTROLLER || L_I2C_CONTROLLER + default n + +config L_MTD_SPI_NOR + bool "Elbrus MTD support" + depends on L_SPI_CONTROLLER && MTD_SPI_NOR + default n + help + If you say yes to this option, embedded flash (S25FL064A or similar) driver will + be switched from SPIDEV to SPI-NOR. + Both can be used by flashrom userspace tool, but SPI-NOR/MTD also requires mtd-utils + package. Selecting SPIDEV by default + +config IPE2ST_POWER + tristate "Elbrus IPE2-ST Power Source" + depends on E2K && L_I2C_CONTROLLER && GPIO_PCA953X && ISL22317 + default m + help + If you say yes to this option, support will be included for the + Elbrus IPE2-ST Power Source. IPE2-ST uses GPIO_PCA953x and ISL22317 + by modprobing them from /etc. + +config ACPI_L_SPMC + bool "Processor-8 SPMC Controller (SCI, PM Timer, sleep states)" + default y + help + If you say yes to this option, support will be + included for the Processor-8 SPMC Controller (SCI, PM Timer, + sleep states) that is part of IOHUB-2. + +menu "Device Tree" +config OF + bool "Device Tree support" + default y + select DTC if E2K + select OF_EARLY_FLATTREE + select PROC_DEVICETREE + select OF_FLATTREE if E90S + select OF_ADDRESS if E90S + help + If you say yes to this option, support will be included for + Device Tree data structure. The tree will be displaed in + /proc/device-tree. +config DTB_L_TEST + bool "Use kernel's built-in dtb blob" + default n + depends on OF + help + If you say yes to this option, kernel will use built-in dtb blob + instead of trying to get it from bootloader. Built-in blob is an + array defined in arch/l/kernel/devtree.c. + +endmenu +endmenu # "Elbrus Architecture Linux Kernel Configuration" diff --git a/arch/l/Kconfig.debug b/arch/l/Kconfig.debug new file mode 100644 index 000000000000..347b6ef83122 --- /dev/null +++ b/arch/l/Kconfig.debug @@ -0,0 +1,19 @@ +menu "Elbrus architecture kernel hacking" + +config BOOT_TRACE + bool "Profile boot process" + depends on ARCH_BOOT_TRACE_POSSIBLE + default n + ---help--- + If this option is enabled, some time statictics will be collected + during boot. They are available in the proc filesystem in + /proc/loadtime_kernel and /proc/loadtime. + +config BOOT_TRACE_THRESHOLD + int "Threshold in milliseconds" + depends on BOOT_TRACE + default 100 + ---help--- + Minimum delta between events for them to be printed. + +endmenu diff --git a/arch/l/Makefile b/arch/l/Makefile new file mode 100644 index 000000000000..a7e2ec2ceb34 --- /dev/null +++ b/arch/l/Makefile @@ -0,0 +1,13 @@ + +# +# l/Makefile +# +# This file is included by the Elbrus arches makefile to support common +# features of the architecture +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + +obj-y += kernel/ diff --git a/arch/l/include/asm/acenv.h b/arch/l/include/asm/acenv.h new file mode 100644 index 000000000000..69935141fc64 --- /dev/null +++ b/arch/l/include/asm/acenv.h @@ -0,0 +1,14 @@ + +#ifndef _ASM_L_ACENV_H_ +#define _ASM_L_ACENV_H_ + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +#endif /* _ASM_L_ACENV_H_ */ diff --git a/arch/l/include/asm/acpi.h b/arch/l/include/asm/acpi.h new file mode 100644 index 000000000000..cab294b6a861 --- /dev/null +++ b/arch/l/include/asm/acpi.h @@ -0,0 +1,137 @@ +#ifndef _ASM_L_ACPI_H +#define _ASM_L_ACPI_H + +/* + * Copyright (C) 2001 Paul Diefenbaugh + * Copyright (C) 2001 Patrick Mochel + * Copuright (C) 2012 Evgeny Kravtsunov + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include + +#include + +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() raw_local_irq_disable() +#define ACPI_ENABLE_IRQS() raw_local_irq_enable() + + + +#ifdef CONFIG_ACPI +#include +enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, + IDLE_POLL}; + +extern int acpi_lapic; +extern int acpi_ioapic; +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_ht; +extern int acpi_pci_disabled; +extern int acpi_skip_timer_override; +extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; + +extern u8 acpi_sci_flags; +extern int acpi_sci_override_gsi; +void acpi_pic_sci_set_trigger(unsigned int, u16); + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_ht = 0; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); + +static inline void acpi_noirq_set(void) { acpi_noirq = 1; } +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + +/* routines for saving/restoring kernel state */ +extern int acpi_save_state_mem(void); +extern void acpi_restore_state_mem(void); + +extern unsigned long acpi_wakeup_address; + +/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + /* here check machine type taken from mptable */ + return 1; +} + +/* + * Elbrus won't implement _PDC as it is deprecated in ACPI4.0 in favor of _OSC + */ +static inline bool arch_has_acpi_pdc(void) +{ + return 0; +} + +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ + return; +} + +#else /* !CONFIG_ACPI */ + +#define acpi_lapic 0 +#define acpi_ioapic 0 +#define acpi_disable_cmcff 0 +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define ARCH_HAS_POWER_INIT 0 + +#define acpi_unlazy_tlb(x) + +#endif /* _ASM_L_ACPI_H */ diff --git a/arch/l/include/asm/apic.h b/arch/l/include/asm/apic.h new file mode 100644 index 000000000000..e26698841344 --- /dev/null +++ b/arch/l/include/asm/apic.h @@ -0,0 +1,776 @@ +#ifndef _ASM_L_APIC_H +#define _ASM_L_APIC_H + +#include +#include + +#if 0 +#include +#include +#include +#include +#else +#include +#include +#endif +#include +#include +#include +#include +#include +#include + +#if defined CONFIG_E2K || defined CONFIG_E90S +# define cpu_has_tsc 1 +# define cpu_has_apic 1 +# define cpu_has_x2apic 0 + +# define READ_APIC_ID() GET_APIC_ID(arch_apic_read(APIC_ID)) + +extern int first_system_vector; +#endif + +#if 0 +#define ARCH_APICTIMER_STOPS_ON_C3 1 +#endif + +/* + * Debugging macros + */ +#define APIC_QUIET 0 +#define APIC_VERBOSE 1 +#define APIC_DEBUG 2 + +/* + * Define the default level of output to be very little + * This can be turned up by using apic=verbose for more + * information and apic=debug for _lots_ of information. + * apic_verbosity is defined in apic.c + */ +#define apic_printk(v, s, a...) do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ + } while (0) + +extern unsigned int calibration_result; + +#if defined(CONFIG_L_LOCAL_APIC) && defined(CONFIG_L_X86_32) +extern void generic_apic_probe(void); +#else +static inline void generic_apic_probe(void) +{ +} +#endif + +#ifdef CONFIG_L_LOCAL_APIC + +# define READ_APIC_ID() GET_APIC_ID(arch_apic_read(APIC_ID)) +# define BOOT_READ_APIC_ID() GET_APIC_ID(boot_arch_apic_read(APIC_ID)) + +extern unsigned int apic_verbosity; +extern int local_apic_timer_c2_ok; + +#if 0 +extern int disable_apic; +#else +#define disable_apic 0 +#endif +extern unsigned int lapic_timer_frequency; + +#ifdef CONFIG_SMP +extern void __inquire_remote_apic(int apicid); +#else /* CONFIG_SMP */ +static inline void __inquire_remote_apic(int apicid) +{ +} +#endif /* CONFIG_SMP */ + +static inline void default_inquire_remote_apic(int apicid) +{ + if (apic_verbosity >= APIC_DEBUG) + __inquire_remote_apic(apicid); +} + +/* + * With 82489DX we can't rely on apic feature bit + * retrieved via cpuid but still have to deal with + * such an apic chip so we assume that SMP configuration + * is found from MP table (64bit case uses ACPI mostly + * which set smp presence flag as well so we are safe + * to use this helper too). + */ +static inline bool apic_from_smp_config(void) +{ + return smp_found_config && !disable_apic; +} + +/* + * Basic functions accessing APICs. + */ +#ifdef CONFIG_PARAVIRT +#include +#endif + +#if 0 +#ifdef CONFIG_L_X86_64 +extern int is_vsmp_box(void); +#else +static inline int is_vsmp_box(void) +{ + return 0; +} +#endif +#else +# define is_vsmp_box() 0 +#endif +extern void xapic_wait_icr_idle(void); +extern u32 safe_xapic_wait_icr_idle(void); +extern void xapic_icr_write(u32, u32); +extern int setup_profiling_timer(unsigned int); + +#if 0 +static inline void native_apic_mem_write(u32 reg, u32 v) +{ + volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); + + alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP, + ASM_OUTPUT2("=r" (v), "=m" (*addr)), + ASM_OUTPUT2("0" (v), "m" (*addr))); +} + +static inline u32 native_apic_mem_read(u32 reg) +{ + return *((volatile u32 *)(APIC_BASE + reg)); +} +#else +static inline void native_apic_mem_write(u32 reg, u32 v) +{ + arch_apic_write(reg, v); +} + +static inline u32 native_apic_mem_read(u32 reg) +{ + return arch_apic_read(reg); +} +#endif + +extern void native_apic_wait_icr_idle(void); +extern u32 native_safe_apic_wait_icr_idle(void); +extern void native_apic_icr_write(u32 low, u32 id); +extern u64 native_apic_icr_read(void); + +extern int x2apic_mode; + +#ifdef CONFIG_X86_X2APIC +/* + * Make previous memory operations globally visible before + * sending the IPI through x2apic wrmsr. We need a serializing instruction or + * mfence for this. + */ +static inline void x2apic_wrmsr_fence(void) +{ + asm volatile("mfence" : : : "memory"); +} + +static inline void native_apic_msr_write(u32 reg, u32 v) +{ + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || + reg == APIC_LVR) + return; + + wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); +} + +static inline void native_apic_msr_eoi_write(u32 reg, u32 v) +{ + wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); +} + +static inline u32 native_apic_msr_read(u32 reg) +{ + u64 msr; + + if (reg == APIC_DFR) + return -1; + + rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); + return (u32)msr; +} + +static inline void native_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return; +} + +static inline u32 native_safe_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return 0; +} + +static inline void native_x2apic_icr_write(u32 low, u32 id) +{ + wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); +} + +static inline u64 native_x2apic_icr_read(void) +{ + unsigned long val; + + rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); + return val; +} + +extern int x2apic_phys; +extern int x2apic_preenabled; +extern void check_x2apic(void); +extern void enable_x2apic(void); +extern void x2apic_icr_write(u32 low, u32 id); +static inline int x2apic_enabled(void) +{ + u64 msr; + + if (!cpu_has_x2apic) + return 0; + + rdmsrl(MSR_IA32_APICBASE, msr); + if (msr & X2APIC_ENABLE) + return 1; + return 0; +} + +#define x2apic_supported() (cpu_has_x2apic) +static inline void x2apic_force_phys(void) +{ + x2apic_phys = 1; +} +#else +static inline void disable_x2apic(void) +{ +} +static inline void check_x2apic(void) +{ +} +static inline void enable_x2apic(void) +{ +} +static inline int x2apic_enabled(void) +{ + return 0; +} +static inline void x2apic_force_phys(void) +{ +} + +#define nox2apic 0 +#define x2apic_preenabled 0 +#define x2apic_supported() 0 +#endif + +extern void enable_IR_x2apic(void); + +extern int get_physical_broadcast(void); + +extern int lapic_get_maxlvt(void); +extern void clear_local_APIC(void); +extern void connect_bsp_APIC(void); +extern void disconnect_bsp_APIC(int virt_wire_setup); +extern void disable_local_APIC(void); + +#ifdef CONFIG_E2K +extern void clear_local_APIC(void); +#endif /* CONFIG_E2K */ + +extern void lapic_shutdown(void); +extern int verify_local_APIC(void); +extern void sync_Arb_IDs(void); +extern void init_bsp_APIC(void); +extern void setup_local_APIC(void); +extern void end_local_APIC_setup(void); +extern void bsp_end_local_APIC_setup(void); +extern void init_apic_mappings(void); +void register_lapic_address(unsigned long address); +extern void setup_boot_APIC_clock(void); +extern void setup_secondary_APIC_clock(void); +extern int APIC_init_uniprocessor(void); +extern int apic_force_enable(unsigned long addr); + +/* + * On 32bit this is mach-xxx local + */ +#ifdef CONFIG_L_X86_64 +extern int apic_is_clustered_box(void); +#else +static inline int apic_is_clustered_box(void) +{ + return 0; +} +#endif + +extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); + +#else /* !CONFIG_L_LOCAL_APIC */ +static inline void lapic_shutdown(void) { } +#define local_apic_timer_c2_ok 1 +static inline void init_apic_mappings(void) { } +static inline void disable_local_APIC(void) { } + +#ifdef CONFIG_E2K +static inline void clear_local_APIC(void) { } +#endif /* CONFIG_E2K */ + +# define setup_boot_APIC_clock x86_init_noop +# define setup_secondary_APIC_clock x86_init_noop +#endif /* !CONFIG_L_LOCAL_APIC */ + +#ifdef CONFIG_L_X86_64 +#define SET_APIC_ID(x) (apic->set_apic_id(x)) +#else + +#endif + +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch data struct. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +struct apic { + char *name; + + int (*probe)(void); + int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + int (*apic_id_valid)(int apicid); + int (*apic_id_registered)(void); + + u32 irq_delivery_mode; + u32 irq_dest_mode; + + const struct cpumask *(*target_cpus)(void); + + int disable_esr; + + int dest_logical; + unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); + unsigned long (*check_apicid_present)(int apicid); + + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask, + const struct cpumask *mask); + void (*init_apic_ldr)(void); + + void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); + + void (*setup_apic_routing)(void); + int (*multi_timer_check)(int apic, int irq); + int (*cpu_present_to_apicid)(int mps_cpu); + void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); + void (*setup_portio_remap)(void); + int (*check_phys_apicid_present)(int phys_apicid); + void (*enable_apic_mode)(void); + int (*phys_pkg_id)(int cpuid_apic, int index_msb); + + /* + * When one of the next two hooks returns 1 the apic + * is switched to this. Essentially they are additional + * probe functions: + */ + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); + + unsigned int (*get_apic_id)(unsigned long x); + unsigned long (*set_apic_id)(unsigned int id); + unsigned long apic_id_mask; + + int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask, + unsigned int *apicid); + + /* ipi */ + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, + int vector); + void (*send_IPI_allbutself)(int vector); + void (*send_IPI_all)(int vector); + void (*send_IPI_self)(int vector); + + /* wakeup_secondary_cpu */ + int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); + + int trampoline_phys_low; + int trampoline_phys_high; + + void (*wait_for_init_deassert)(atomic_t *deassert); + void (*smp_callin_clear_local_apic)(void); + void (*inquire_remote_apic)(int apicid); + + /* apic ops */ + u32 (*read)(u32 reg); + void (*write)(u32 reg, u32 v); + /* + * ->eoi_write() has the same signature as ->write(). + * + * Drivers can support both ->eoi_write() and ->write() by passing the same + * callback value. Kernel can override ->eoi_write() and fall back + * on write for EOI. + */ + void (*eoi_write)(u32 reg, u32 v); + u64 (*icr_read)(void); + void (*icr_write)(u32 low, u32 high); + void (*wait_icr_idle)(void); + u32 (*safe_wait_icr_idle)(void); + +#ifdef CONFIG_L_X86_32 + /* + * Called very early during boot from get_smp_config(). It should + * return the logical apicid. x86_[bios]_cpu_to_apicid is + * initialized before this function is called. + * + * If logical apicid can't be determined that early, the function + * may return BAD_APICID. Logical apicid will be configured after + * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity + * won't be applied properly during early boot in this case. + */ + int (*x86_32_early_logical_apicid)(int cpu); + + /* + * Optional method called from setup_local_APIC() after logical + * apicid is guaranteed to be known to initialize apicid -> node + * mapping if NUMA initialization hasn't done so already. Don't + * add new users. + */ + int (*x86_32_numa_cpu_node)(int cpu); +#endif +}; + +/* + * Pointer to the local APIC driver in use on this system (there's + * always just one such driver in use - the kernel decides via an + * early probing process which one it picks - and then sticks to it): + */ +extern struct apic *apic; + +/* + * APIC drivers are probed based on how they are listed in the .apicdrivers + * section. So the order is important and enforced by the ordering + * of different apic driver files in the Makefile. + * + * For the files having two apic drivers, we use apic_drivers() + * to enforce the order with in them. + */ +#define apic_driver(sym) \ + static const struct apic *__apicdrivers_##sym __used \ + __aligned(sizeof(struct apic *)) \ + __section(.apicdrivers) = { &sym } + +#define apic_drivers(sym1, sym2) \ + static struct apic *__apicdrivers_##sym1##sym2[2] __used \ + __aligned(sizeof(struct apic *)) \ + __section(.apicdrivers) = { &sym1, &sym2 } + +extern struct apic *__apicdrivers[], *__apicdrivers_end[]; + +/* + * APIC functionality to boot other CPUs - only used on SMP: + */ +#ifdef CONFIG_SMP +extern atomic_t init_deasserted; +extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); +#endif + +#ifdef CONFIG_L_LOCAL_APIC + +static inline u32 apic_read(u32 reg) +{ + return apic->read(reg); +} + +static inline void apic_write(u32 reg, u32 val) +{ + apic->write(reg, val); +} + +static inline void apic_eoi(void) +{ + apic->eoi_write(APIC_EOI, APIC_EOI_ACK); +} + +static inline u64 apic_icr_read(void) +{ + return apic->icr_read(); +} + +static inline void apic_icr_write(u32 low, u32 high) +{ + apic->icr_write(low, high); +} + +static inline void apic_wait_icr_idle(void) +{ + apic->wait_icr_idle(); +} + +static inline u32 safe_apic_wait_icr_idle(void) +{ + return apic->safe_wait_icr_idle(); +} + +extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)); + +#else /* CONFIG_L_LOCAL_APIC */ + +static inline u32 apic_read(u32 reg) { return 0; } +static inline void apic_write(u32 reg, u32 val) { } +static inline void apic_eoi(void) { } +static inline u64 apic_icr_read(void) { return 0; } +static inline void apic_icr_write(u32 low, u32 high) { } +static inline void apic_wait_icr_idle(void) { } +static inline u32 safe_apic_wait_icr_idle(void) { return 0; } +static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {} + +#endif /* CONFIG_L_LOCAL_APIC */ + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction + * ... yummie. + */ + apic_eoi(); +} + +static inline unsigned default_get_apic_id(unsigned long x) +{ + unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); + + if (APIC_XAPIC(ver)/* || boot_cpu_has(X86_FEATURE_EXTD_APICID)*/) + return (x >> 24) & 0xFF; + else + return (x >> 24) & 0x0F; +} + +/* + * Warm reset vector default position: + */ +#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 +#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 + +#ifdef CONFIG_L_X86_64 +extern int default_acpi_madt_oem_check(char *, char *); + +extern void apic_send_IPI_self(int vector); + +#endif + +static inline void default_wait_for_init_deassert(atomic_t *deassert) +{ + while (!atomic_read(deassert)) + cpu_relax(); + return; +} + +extern void generic_bigsmp_probe(void); + + +#ifdef CONFIG_L_LOCAL_APIC + +#if 0 +#include +#endif + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +static inline const struct cpumask *default_target_cpus(void) +{ +#ifdef CONFIG_SMP + return cpu_online_mask; +#else + return cpumask_of(0); +#endif +} + +static inline const struct cpumask *online_target_cpus(void) +{ + return cpu_online_mask; +} + +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); +#ifdef CONFIG_SMP +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +#else +#define cpu_physical_id(cpu) boot_cpu_physical_apicid +#endif + + +static inline unsigned int read_apic_id(void) +{ + unsigned int reg; + + reg = apic_read(APIC_ID); + + return apic->get_apic_id(reg); +} + +static inline int default_apic_id_valid(int apicid) +{ + return (apicid < 255); +} + +extern void default_setup_apic_routing(void); + +extern struct apic apic_noop; + +#ifdef CONFIG_L_X86_32 + +static inline int noop_x86_32_early_logical_apicid(int cpu) +{ + return BAD_APICID; +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +extern void default_init_apic_ldr(void); + +static inline int default_apic_id_registered(void) +{ + return physid_isset(read_apic_id(), phys_cpu_present_map); +} + +static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +#endif + +static inline int +flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask, + unsigned int *apicid) +{ + unsigned long cpu_mask = cpumask_bits(cpumask)[0] & + cpumask_bits(andmask)[0] & + cpumask_bits(cpu_online_mask)[0] & + APIC_ALL_CPUS; + + if (likely(cpu_mask)) { + *apicid = (unsigned int)cpu_mask; + return 0; + } else { + return -EINVAL; + } +} + +extern int +default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask, + unsigned int *apicid); + +static inline void +flat_vector_allocation_domain(int cpu, struct cpumask *retmask, + const struct cpumask *mask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + +static inline void +default_vector_allocation_domain(int cpu, struct cpumask *retmask, + const struct cpumask *mask) +{ + cpumask_copy(retmask, cpumask_of(cpu)); +} + +static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) +{ + return physid_isset(apicid, *map); +} + +static inline unsigned long default_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) +{ + *retmap = *phys_map; +} + +static inline int __default_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static inline int +__default_check_phys_apicid_present(int phys_apicid) +{ + return physid_isset(phys_apicid, phys_cpu_present_map); +} + +/* #ifdef CONFIG_L_X86_32 */ +#if 1 +static inline int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} + +static inline int +default_check_phys_apicid_present(int phys_apicid) +{ + return __default_check_phys_apicid_present(phys_apicid); +} +#else +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int phys_apicid); +#endif + +#endif /* CONFIG_L_LOCAL_APIC */ + +static inline void entering_irq(void) +{ + l_irq_enter(); + exit_idle(); +} + +static inline void entering_ack_irq(void) +{ + entering_irq(); + ack_APIC_irq(); +} + +static inline void exiting_irq(void) +{ + l_irq_exit(); +} + +static inline void exiting_ack_irq(void) +{ + l_irq_exit(); + /* Ack only at the end to avoid potential reentry */ + ack_APIC_irq(); +} + +extern void ioapic_zap_locks(void); +struct irq_data; +extern void ack_apic_edge(struct irq_data *data); +#endif /* _ASM_L_APIC_H */ diff --git a/arch/l/include/asm/apicdef.h b/arch/l/include/asm/apicdef.h new file mode 100644 index 000000000000..eb0890785d55 --- /dev/null +++ b/arch/l/include/asm/apicdef.h @@ -0,0 +1,529 @@ +#ifndef _ASM_L_APICDEF_H +#define _ASM_L_APICDEF_H + +/* + * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) + * + * Alan Cox , 1995. + * Ingo Molnar , 1999, 2000 + */ + +#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000UL +#define APIC_DEFAULT_PHYS_BASE 0xfee00000UL + +/* + * This is the IO-APIC register space as specified + * by Intel docs: + */ +#define IO_APIC_SLOT_SIZE 1024 +#define APIC_REGS_SIZE 0x1000 + +#define APIC_BSP 0x10 +#define APIC_BSP_ENABLE 0x00000800 +#define APIC_BSP_IS_BSP 0x00000100 +#define APIC_ENABLE(x) ((x) & APIC_BSP_ENABLE) +#define BootStrap(x) ((x) & APIC_BSP_IS_BSP) +#define APIC_ID 0x20 +#define APIC_ID_SHIFT 24 +#define APIC_ID_SIZE 8 +#define APIC_ID_BIT_MASK ((1 << APIC_ID_SIZE) - 1) +#define APIC_ID_MASK (APIC_ID_BIT_MASK << \ + APIC_ID_SHIFT) +#define GET_APIC_ID(x) (((x) >> APIC_ID_SHIFT) & \ + APIC_ID_BIT_MASK) +#define APIC_LVR 0x30 +#define APIC_LVR_MASK 0xFF00FF +#define APIC_LVR_DIRECTED_EOI (1 << 24) +#define APIC_MAXLVT 0x03 +#define APIC_VERSION 0x10 +#define GET_APIC_VERSION(x) ((x) & 0xFFu) +#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) +#define SET_APIC_VERSION(x) ((x) & 0xFF) +#define SET_APIC_MAXLVT(x) (((x) & 0xff) << 16) +#if 0 +# define APIC_INTEGRATED(x) ((x) & 0xF0u) +#else +# define APIC_INTEGRATED(x) (1) +#endif +#define APIC_XAPIC(x) ((x) >= 0x14) +#define APIC_EXT_SPACE(x) ((x) & 0x80000000) +#define APIC_TASKPRI 0x80 +#define APIC_TPRI_MASK 0xFFu +#define APIC_ARBPRI 0x90 +#define APIC_ARBPRI_MASK 0xFFu +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_EOI_ACK 0x0 +#define APIC_RRR 0xC0 +#define APIC_LDR 0xD0 +#define APIC_LDR_MASK (0xFFu << 24) +#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) +#define SET_APIC_LOGICAL_ID(x) (((x) << 24)) +#define APIC_ALL_CPUS 0xFFu +#define APIC_DFR 0xE0 +#define GET_APIC_DLVR_MODE(x) (((x) >> 28) & 0xF) +#define APIC_DFR_CLUSTER 0x0FFFFFFFul +#define APIC_DFR_FLAT 0xFFFFFFFFul +#define APIC_SPIV 0xF0 +#define APIC_SPIV_DIRECTED_EOI (1 << 12) +#define APIC_SPIV_FOCUS_DISABLED (1 << 9) +#define APIC_SPIV_APIC_ENABLED (1 << 8) +#define APIC_SOFT_ENABLED(x) ((x) & APIC_SPIV_APIC_ENABLED) +#define APIC_FOCUS_DISABLED(x) ((x) & APIC_SPIV_FOCUS_DISABLED) +#define APIC_SPIV_SPURIOUS_VECT 0x000FF +#define GET_SPURIOUS_VECTOR(x) ((x) & APIC_SPIV_SPURIOUS_VECT) +#define SET_SPURIOUS_VECTOR(x) ((x) & APIC_SPIV_SPURIOUS_VECT) +#define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ +#define APIC_TMR 0x180 +#define APIC_IRR 0x200 +#define APIC_ESR 0x280 +#define APIC_ESR_SEND_CS 0x00001 +#define APIC_ESR_RECV_CS 0x00002 +#define APIC_ESR_SEND_ACC 0x00004 +#define APIC_ESR_RECV_ACC 0x00008 +#define APIC_ESR_SENDILL 0x00020 +#define APIC_ESR_RECVILL 0x00040 +#define APIC_ESR_ILLREGA 0x00080 +#define APIC_LVTCMCI 0x2f0 +#define APIC_ICR 0x300 +#define APIC_DEST_SELF 0x40000 +#define APIC_DEST_ALLINC 0x80000 +#define APIC_DEST_ALLBUT 0xC0000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 +#define APIC_DEST_LOGICAL 0x00800 +#define APIC_DEST_PHYSICAL 0x00000 +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF +#define APIC_ICR2 0x310 +#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) +#define SET_APIC_DEST_FIELD(x) ((x) << 24) +#define APIC_LVTT 0x320 +#define APIC_LVTTHMR 0x330 +#define APIC_LVTPC 0x340 +#define APIC_LVT0 0x350 +#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) +#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) +#define SET_APIC_TIMER_BASE(x) (((x) << 18)) +#define APIC_TIMER_BASE_CLKIN 0x0 +#define APIC_TIMER_BASE_TMBASE 0x1 +#define APIC_TIMER_BASE_DIV 0x2 +#define APIC_LVT_TIMER_PERIODIC (1 << 17) +#define APIC_LVT_MASKED (1 << 16) +#define APIC_LVT_LEVEL_TRIGGER (1 << 15) +#define APIC_LVT_REMOTE_IRR (1 << 14) +#define APIC_INPUT_POLARITY (1 << 13) +#define APIC_SEND_PENDING (1 << 12) +#define APIC_MODE_MASK 0x700 +#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) +#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) +#define APIC_MODE_FIXED 0x0 +#define APIC_MODE_NMI 0x4 +#define APIC_MODE_EXTINT 0x7 +#define APIC_LVT1 0x360 +#define APIC_LVTERR 0x370 +#define APIC_TMICT 0x380 +#define APIC_TMCCT 0x390 +#define APIC_TDCR 0x3E0 +#define APIC_SELF_IPI 0x3F0 +#define APIC_TDR_DIV_TMBASE (1 << 2) +#define APIC_TDR_DIV_1 0xB +#define APIC_TDR_DIV_2 0x0 +#define APIC_TDR_DIV_4 0x1 +#define APIC_TDR_DIV_8 0x2 +#define APIC_TDR_DIV_16 0x3 +#define APIC_TDR_DIV_32 0x8 +#define APIC_TDR_DIV_64 0x9 +#define APIC_TDR_DIV_128 0xA +#if 0 +#define APIC_EFEAT 0x400 +#define APIC_ECTRL 0x410 +#define APIC_EILVTn(n) (0x500 + 0x10 * n) +#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ +#define APIC_EILVT_NR_AMD_10H 4 +#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) +#define APIC_EILVT_MSG_FIX 0x0 +#define APIC_EILVT_MSG_SMI 0x2 +#define APIC_EILVT_MSG_NMI 0x4 +#define APIC_EILVT_MSG_EXT 0x7 +#define APIC_EILVT_MASKED (1 << 16) +#endif +#define APIC_NM_TIMER_LVTT 0xf00 +#define APIC_NM_TIMER_INIT_COUNT 0xf10 +#define APIC_NM_TIMER_CURRENT_COUNT 0xf20 +#define APIC_NM_TIMER_DIVIDER 0xf30 +#define APIC_LVT2 0xf40 +#define APIC_LVT3 0xf50 +#define APIC_DSP APIC_LVT3 +#define APIC_LVT4 0xf60 +#define APIC_M_ERM 0xfc0 +#define APIC_NM_WATCHDOG 0x80000000 +#define APIC_NM_WATCHDOG1 0x40000000 +#define APIC_NM_SPECIAL 0x20000 +#define APIC_NM_TIMER 0x10000 +#define APIC_NM_NMI_DEBUG_MASK 0x8000 +#define APIC_NM_INTQLAPIC_MASK 0x4000 +#define APIC_NM_INT_VIOLAT_MASK 0x2000 +#define APIC_NM 0xfe0 +#define APIC_NM_BIT_MASK 0x7ff00 +#define APIC_NM_PCI 0x40000 +#define APIC_NM_SPECIAL 0x20000 +#define APIC_NM_TIMER 0x10000 +#define APIC_NM_NMI_DEBUG 0x8000 +#define APIC_NM_INTQLAPIC 0x4000 +#define APIC_NM_INT_VIOLAT 0x2000 +#define APIC_NM_STARTUP 0x1000 +#define APIC_NM_INIT 0x0800 +#define APIC_NM_NMI 0x0400 +#define APIC_NM_SMI 0x0200 +#define APIC_NM_EXTINT 0x0100 +#define APIC_NM_STARTUP_ADDR 0x00ff +#define GET_APIC_STARTUP_ADDR(x) ((x) & APIC_NM_STARTUP_ADDR) +#define APIC_NM_MASK(x) ((x) & APIC_NM_BIT_MASK) +#define GET_APIC_NM_BITS(x) (((x) & APIC_NM_BIT_MASK) >> 9) +#define APIC_NM_IS_STRATUP(x) ((x) & APIC_NM_STARTUP) +#define APIC_NM_IS_INIT(x) ((x) & APIC_NM_INIT) +#define APIC_NM_IS_NMI(x) ((x) & APIC_NM_NMI) +#define APIC_NM_IS_SMI(x) ((x) & APIC_NM_SMI) +#define APIC_VECT 0xff0 +#define APIC_VECT_VECTOR_MASK 0x000000ff +#define APIC_VECT_EXTINT (1 << 31) +#define APIC_VECT_VECTOR(x) ((x) & APIC_VECT_VECTOR_MASK) +#define APIC_VECT_IS_EXTINT(x) ((x) & APIC_VECT_EXTINT) + +#if 0 +#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) +#define APIC_BASE_MSR 0x800 +#else +#define APIC_BASE 0x00000000fee00000UL +#endif +#define X2APIC_ENABLE (1UL << 10) + +/* + * a maximum number of IO-APICs depends on the following: + * each IO link can have IOHUB with IO-APIC + * each node can have embedded IO-APIC + */ +#define MAX_IO_APICS (MAX_NUMIOLINKS + MAX_NUMNODES) +#define MAX_LOCAL_APIC MAX_APICS +#if 0 +#ifdef CONFIG_L_X86_32 +# define MAX_IO_APICS 64 +# define MAX_LOCAL_APIC 256 +#else +# define MAX_IO_APICS 128 +# define MAX_LOCAL_APIC 32768 +#endif +#endif + +/* + * All x86-64 systems are xAPIC compatible. + * In the following, "apicid" is a physical APIC ID. + */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) +#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) +#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) +#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) +#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) + +#if 0 +#ifndef __ASSEMBLY__ +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved[4]; } __reserved_02; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { /* LVT - Thermal Sensor */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_thermal; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; + +} __attribute__ ((packed)); + +#undef u32 +#endif /* __ASSEMBLY__ */ +#endif + +#if 0 +#ifdef CONFIG_L_X86_32 + #define BAD_APICID 0xFFu +#else + #define BAD_APICID 0xFFFFu +#endif +#else + #define BAD_APICID 0xFFu +#endif + +#ifndef __ASSEMBLY__ +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_SMI = 2, + dest__reserved_1 = 3, + dest_NMI = 4, + dest_INIT = 5, + dest__reserved_2 = 6, + dest_ExtINT = 7 +}; +#endif + +#endif /* _ASM_L_APICDEF_H */ diff --git a/arch/l/include/asm/boot_profiling.h b/arch/l/include/asm/boot_profiling.h new file mode 100644 index 000000000000..a03efb9c620f --- /dev/null +++ b/arch/l/include/asm/boot_profiling.h @@ -0,0 +1,35 @@ +#ifndef _ASM_L_BOOT_PROFILING_H +#define _ASM_L_BOOT_PROFILING_H + +#ifdef CONFIG_BOOT_TRACE + +#include + +extern void notrace add_boot_trace_event(const char *fmt, ...); +extern struct boot_tracepoint *boot_trace_prev_event(int cpu, + struct boot_tracepoint *event); +extern struct boot_tracepoint *boot_trace_next_event(int cpu, + struct boot_tracepoint *event); +extern void stop_boot_trace(void); + +# define BOOT_TRACE_ARRAY_SIZE (1500 + 20 * NR_CPUS) + +struct boot_tracepoint { + char name[81]; + unsigned int cpu; + u64 cycles; + struct list_head list; +}; + +extern struct boot_tracepoint boot_trace_events[BOOT_TRACE_ARRAY_SIZE]; +extern struct list_head boot_trace_cpu_events_list[]; +extern atomic_t boot_trace_top_event; +extern int boot_trace_enabled; +# define BOOT_TRACEPOINT(...) add_boot_trace_event(__VA_ARGS__) + +#else /* !CONFIG_BOOT_TRACE */ +# define BOOT_TRACEPOINT(...) do { } while(0) +#endif /* CONFIG_BOOT_TRACE */ + +#endif /* _ASM_L_BOOT_PROFILING_H */ + diff --git a/arch/l/include/asm/bootinfo.h b/arch/l/include/asm/bootinfo.h new file mode 100644 index 000000000000..0ff5c6435baa --- /dev/null +++ b/arch/l/include/asm/bootinfo.h @@ -0,0 +1,315 @@ +#ifndef _L_BOOTINFO_H_ +#define _L_BOOTINFO_H_ + +#if defined(__KERNEL__) || defined(__KVM_BOOTINFO_SUPPORT__) + +/* + * 0x0: + * 0x1: extended command line + */ +#define BOOTBLOCK_VER 0x1 + +#define KSTRMAX_SIZE 128 +#define KSTRMAX_SIZE_EX 512 +#define BIOS_INFO_SIGN_SIZE 8 +#define KERNEL_ARGS_STRING_EX_SIGN_SIZE 22 +#define BOOT_VER_STR_SIZE 128 +#define BOOTBLOCK_SIZE 0x1000 /* 1 PAGE_SIZE */ +#define X86BOOT_SIGNATURE 0x8086 +#define ROMLOADER_SIGNATURE 0xe200 +#define KVM_GUEST_SIGNATURE 0x20e2 +#define BIOS_INFO_SIGNATURE "E2KBIOS" +#define KVM_INFO_SIGNATURE "E2KKVM" +#define KERNEL_ARGS_STRING_EX_SIGNATURE "KERNEL_ARGS_STRING_EX" +#define BOOT_KERNEL_ARGS_STRING_EX_SIGNATURE \ + boot_va_to_pa(KERNEL_ARGS_STRING_EX_SIGNATURE) + +/* + * Below is boot information that comes out of the x86 code of Linux/E2K + * loader proto. + */ + +/* L_MAX_NODE_PHYS_BANKS = 4 sometimes is not enough, so we increase it to + * an arbitary value (8 now). The old L_MAX_NODE_PHYS_BANKS we rename to + * L_MAX_NODE_PHYS_BANKS_FUSTY and take in mind for boot_info compatibility. + * + * L_MAX_NODE_PHYS_BANKS_FUSTY and L_MAX_MEM_NUMNODES describe max size of + * array of memory banks on all nodes and should be in accordance with old value + * of L_MAX_PHYS_BANKS for compatibility with boot_info old structure (bank) + * size, so L_MAX_NODE_PHYS_BANKS_FUSTY * L_MAX_MEM_NUMNODES should be + * equal to 32. + */ +#define L_MAX_NODE_PHYS_BANKS 64 /* max number of memory banks */ + /* on one node */ +#define L_MAX_NODE_PHYS_BANKS_FUSTY 4 /* fusty max number of memory */ + /* banks on one node */ +#define L_MAX_PHYS_BANKS_EX 64 /* max number of memory banks */ + /* in banks_ex field of */ + /* boot_info */ +#define L_MAX_MEM_NUMNODES 8 /* max number of nodes in the */ + /* list of memory banks on */ + /* each node */ +#define L_MAX_BUSY_AREAS 4 /* max number of busy areas */ + /* occupied by BIOS and should be */ + /* kept unchanged by kernel to */ + /* support recovery mode */ + +#ifndef __ASSEMBLY__ + +typedef struct bank_info { + __u64 address; /* start address of bank */ + __u64 size; /* size of bank in bytes */ +} bank_info_t; + +typedef struct node_banks { + bank_info_t banks[L_MAX_NODE_PHYS_BANKS_FUSTY]; /* memory banks array */ + /* of a node */ +} node_banks_t; + +typedef struct boot_times { + __u64 arch; + __u64 unpack; + __u64 pci; + __u64 drivers1; + __u64 drivers2; + __u64 menu; + __u64 sm; + __u64 kernel; + __u64 reserved[8]; +} boot_times_t; + +typedef struct bios_info { + __u8 signature[BIOS_INFO_SIGN_SIZE]; /* signature, */ + /* 'E2KBIOS' */ + __u8 boot_ver[BOOT_VER_STR_SIZE]; /* boot version */ + __u8 mb_type; /* mother board type */ + __u8 chipset_type; /* chipset type */ + __u8 cpu_type; /* cpu type */ + __u8 kernel_args_string_ex[KSTRMAX_SIZE_EX]; /* extended command */ + /* line of kernel */ + /* used to pass */ + /* command line */ + /* from e2k BIOS */ + __u8 reserved1; /* reserved1 */ + __u32 cache_lines_damaged; /* number of damaged */ + /* cache lines */ + __u64 nodes_mem_slabs_deprecated[52]; /* array of slabs */ + /* accessible memory */ + /* on each node */ + /* accessible memory */ + /* on each node */ + bank_info_t banks_ex[L_MAX_PHYS_BANKS_EX]; /* extended array of */ + /* descriptors of */ + /* banks of available */ + /* physical memory */ + __u64 devtree; /* devtree pointer */ + __u32 bootlog_addr; /* bootlog address */ + __u32 bootlog_len; /* bootlog length */ + __u8 uuid[16]; /* UUID boot device */ +} bios_info_t; + +typedef struct boot_info { + __u16 signature; /* signature, 0x8086 */ + __u8 target_mdl; /* target cpu model number */ + __u8 reserved1; /* reserved1 */ + __u16 reserved2; /* reserved2 */ + __u8 vga_mode; /* vga mode */ + __u8 num_of_banks; /* number of available physical memory banks */ + /* see below bank array */ + /* total number on all nodes or 0 */ + __u64 kernel_base; /* base address to load kernel image */ + /* if 0 then BIOS can load at any address */ + /* but address should be large page size */ + /* aligned - 4 Mb */ + __u64 kernel_size; /* kernel image byte's size */ + __u64 ramdisk_base; /* base address to load RAM-disk */ + /* now not used */ + __u64 ramdisk_size; /* RAM-disk byte's size */ + + __u16 num_of_cpus; /* number of started physical CPU(s) */ + __u16 mach_flags; /* machine identifacition flags */ + /* should be set by our romloader and BIOS */ + __u16 num_of_busy; /* number of busy areas occupied by BIOS */ + /* see below busy array */ + __u16 num_of_nodes; /* number of nodes on NUMA system */ + __u64 mp_table_base; /* MP-table base address */ + __u64 serial_base; /* base address of serial port for Am85c30 */ + /* Used for debugging purpose */ + __u64 nodes_map; /* online nodes map */ + __u64 mach_serialn; /* serial number of the machine */ + __u8 mac_addr[6]; /* base MAC address for ethernet cards */ + __u16 reserved3; /* reserved3 */ + + char kernel_args_string[KSTRMAX_SIZE]; /* command line of kernel */ + /* used to pass command line */ + /* from e2k BIOS */ + node_banks_t nodes_mem[L_MAX_MEM_NUMNODES]; /* array of */ + /* descriptors of banks of */ + /* available physical memory */ + /* on each node */ + bank_info_t busy[L_MAX_BUSY_AREAS]; /* descriptors of areas */ + /* occupied by BIOS, all this */ + /* shoud be kept in system */ + /* recovery mode */ + u64 cntp_info_deprecated[32]; /* control points */ + /* info to save and */ + /* restore them state */ + u64 dmp_deprecated[20]; /* Info for future work of */ + /* dump analyzer */ + __u64 reserved4[13]; /* reserved4 */ + __u8 mb_name[16]; /* Motherboard product name */ + __u32 reserved5; /* reserved5 */ + __u32 kernel_csum; /* kernel image control sum */ + bios_info_t bios; /* extended BIOS info */ + /* SHOULD BE LAST ITEM into this */ + /* structure */ +} boot_info_t; + +typedef struct bootblock_struct { + boot_info_t info; /* general kernel<->BIOS info */ + __u8 /* zip area to make size of */ + /* bootblock struct - constant */ + gap[BOOTBLOCK_SIZE - + sizeof (boot_info_t) - + sizeof (boot_times_t) - + 1 - /* u8 : bootblock_ver */ + 4 - /* u32 : reserved1 */ + 2 - /* u16 : kernel_flags */ + 1 - /* u8 : reserved2 */ + 5 - /* u8 : number of cnt points */ + /* u8 : current # of cnt point */ + /* u8 : number of cnt points */ + /* ready in the memory */ + /* u8 : number of cnt points */ + /* saved on the disk */ + /* u8 : all control points */ + /* is created */ + 8 - /* u64 : dump sector */ + 8 - /* u64 : cnt point sector */ + 2 - /* u16 : dump device */ + 2 - /* u16 : cnt point device */ + 2 - /* u16 : boot_flags */ + 2]; /* u16 : x86_marker */ + __u8 bootblock_ver; /* bootblock version number */ + __u32 reserved1; /* reserved1 */ + boot_times_t boot_times; /* boot load times */ + __u16 kernel_flags; /* kernel flags, boot should */ + /* not modify it */ + __u8 reserved2; /* reserved2 */ + + __u8 cnt_points_num_deprecated; /* number of control points */ + /* all memory will be devided */ + /* on this number of parts */ + __u8 cur_cnt_point_deprecated; /* current # of active */ + /* control point (running */ + /* part) */ + __u8 mem_cnt_points_deprecated; /* number of started control */ + /* points (ready in the memory) */ + __u8 disk_cnt_points_deprecated; /* number of control points */ + /* saved on the disk (ready */ + /* to be loaded from disk) */ + __u8 cnt_points_created_deprecated; /* all control points created */ + /* in the memory and on disk */ + __u64 dump_sector_deprecated; /* start sector # to dump */ + /* physical memory */ + __u64 cnt_point_sector_deprecated; /* start sector # to save */ + /* restore control points */ + __u16 dump_dev_deprecated; /* disk # to dump memory */ + __u16 cnt_point_dev_deprecated; /* disk # for save/restore */ + /* control point */ + + __u16 boot_flags; /* boot flags: if non */ + /* zero then this structure */ + /* is recovery info */ + /* structure instead of boot */ + /* info structure */ + __u16 x86_marker; /* marker of the end of x86 */ + /* boot block (0xAA55) */ +} bootblock_struct_t; + +extern bootblock_struct_t *bootblock_virt; /* bootblock structure */ + /* virtual pointer */ +#endif /* ! __ASSEMBLY__ */ + +/* + * Boot block flags to elaborate boot modes + */ + +#define RECOVERY_BB_FLAG 0x0001 /* recovery flag: if non zero then */ + /* this structure is recovery info */ + /* structure instead of boot info */ + /* structure */ + /* BIOS should not clear memory */ + /* and should keep current state of */ + /* physical memory */ +#define CNT_POINT_BB_FLAG 0x0002 /* kernel restarted in the mode of */ + /* control point creation */ + /* BIOS should read kernel image from */ + /* the disk to the specified area of */ + /* the memory and start kernel (this */ + /* flag should be with */ + /* RECOVERY_BB_FLAG flag) */ +#define NO_READ_IMAGE_BB_FLAG 0x0004 /* BIOS should not read kernel image */ + /* from disk and start current */ + /* image in the specified area of */ + /* the memory (this flag should be */ + /* with RECOVERY_BB_FLAG flag) */ +#define DUMP_ANALYZE_BB_FLAG 0x0008 /* This flag is used only by kernel */ + /* to indicate dump analyzer mode */ +#define MEMORY_DUMP_BB_FLAG 0x0010 /* BIOS should dump all physical */ + /* memory before start all other */ + /* actions */ + +/* + * The machine identification flags + */ + +#define SIMULATOR_MACH_FLAG 0x0001 /* system is running on */ + /* simulator */ +#define PROTOTYPE_MACH_FLAG_DEPRECATED 0x0002 /* machine is prototype */ +#define IOHUB_MACH_FLAG 0x0004 /* machine has IOHUB */ +#define OLDMGA_MACH_FLAG 0x0008 /* MGA card has old firmware */ +#define MULTILINK_MACH_FLAG 0x0010 /* some nodes are connected */ + /* by sevral IP links */ +#define MSI_MACH_FLAG 0x0020 /* boot inits right values in */ + /* apic to support MSI. */ + /* Meanfull for e2k only. For */ + /* v9 it always true */ +#define KVM_GUEST_MACH_FLAG 0x0100 /* system is running */ + /* as KVM guest */ + +/* + * The chipset types + */ + +#define CHIPSET_TYPE_PIIX4 0x01 /* PIIX4 */ +#define CHIPSET_TYPE_IOHUB 0x02 /* IOHUB */ + +/* + * The chipset types names + */ + +#define GET_CHIPSET_TYPE_NAME(type) \ +({ \ + char *name; \ + \ + switch (type) { \ + case CHIPSET_TYPE_PIIX4: \ + name = "PIIX4"; \ + break; \ + case CHIPSET_TYPE_IOHUB: \ + name = "IOHUB"; \ + break; \ + default: \ + name = "?????"; \ + } \ + \ + name; \ +}) + +extern char *mcst_mb_name; + +#endif /* __KERNEL__ || __KVM_BOOTINFO_SUPPORT__ */ + +#endif /* _L_BOOTINFO_H_ */ + diff --git a/arch/l/include/asm/clk_rt.h b/arch/l/include/asm/clk_rt.h new file mode 100644 index 000000000000..b1aea1d26340 --- /dev/null +++ b/arch/l/include/asm/clk_rt.h @@ -0,0 +1,22 @@ +#ifndef _ASM_L_CLK_RT_H +#define _ASM_L_CLK_RT_H + +#define CLK_RT_NO 0 +#define CLK_RT_RTC 1 +#define CLK_RT_EXT 2 +#define CLK_RT_RESUME 3 + +extern struct clocksource clocksource_clk_rt; + +extern int clk_rt_mode; +extern atomic_t num_clk_rt_register; +extern int clk_rt_register(void *); +extern struct clocksource clocksource_clk_rt; +extern int proc_clk_rt(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int read_clk_rt_freq(void); +extern void clk_rt_set_mode(void *mode_arg); +extern u64 raw_read_clk_rt(void); +extern struct clocksource lt_cs; +extern struct clocksource *curr_clocksource; +#endif diff --git a/arch/l/include/asm/clkr.h b/arch/l/include/asm/clkr.h new file mode 100644 index 000000000000..c6136a4cd1e6 --- /dev/null +++ b/arch/l/include/asm/clkr.h @@ -0,0 +1,6 @@ +#ifndef _ASM_L_CLKR_H +#define _ASM_L_CLKR_H + +extern struct clocksource clocksource_clkr; + +#endif diff --git a/arch/l/include/asm/console.h b/arch/l/include/asm/console.h new file mode 100644 index 000000000000..3c4c90047ae1 --- /dev/null +++ b/arch/l/include/asm/console.h @@ -0,0 +1,59 @@ + +#ifndef _L_CONSOLE_H_ +#define _L_CONSOLE_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SERIAL_PRINTK +# ifdef CONFIG_SERIAL_AM85C30_CONSOLE +extern serial_console_opts_t am85c30_serial_console; +# endif + +extern serial_console_opts_t *serial_console_opts; +# define opts_entry(opts, member) opts->member +# define serial_console_opts_entry(entry) opts_entry(serial_console_opts, entry) + +extern unsigned char serial_dump_console_num; + +extern void *get_serial_console_io_base(void); + +extern void setup_serial_dump_console(boot_info_t *); +#endif /* CONFIG_SERIAL_PRINTK */ + +#ifdef CONFIG_L_EARLY_PRINTK +extern void dump_printk(char const *fmt_v, ...); +extern void dump_vprintk(char const *fmt, va_list ap); +extern void dump_puts(const char *s); +extern void dump_putns(const char *s, int n); + +# ifdef CONFIG_EARLY_DUMP_CONSOLE +extern void register_early_dump_console(void); +# else +static inline void register_early_dump_console(void) { }; +# endif + +# ifdef CONFIG_EARLY_PRINTK +extern int switch_to_early_dump_console(void); +extern void switch_from_early_dump_console(void); +# endif + +#else /* !CONFIG_L_EARLY_PRINTK */ +# define dump_printk printk +# define dump_vprintk vprintk +# define dump_puts(s) printk("%s", (s)) +static inline void register_early_dump_console(void) { }; + +#endif /* CONFIG_L_EARLY_PRINTK */ + +#if defined(CONFIG_SERIAL_AM85C30_CONSOLE) && defined(CONFIG_SERIAL_L_ZILOG) +extern raw_spinlock_t *uap_a_reg_lock; +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _L_CONSOLE_H_ */ diff --git a/arch/l/include/asm/console_types.h b/arch/l/include/asm/console_types.h new file mode 100644 index 000000000000..c27a654ef55a --- /dev/null +++ b/arch/l/include/asm/console_types.h @@ -0,0 +1,31 @@ +#ifndef _L_CONSOLE_TYPES_H_ +#define _L_CONSOLE_TYPES_H_ + +#ifndef __ASSEMBLY__ +#include +#ifdef CONFIG_E2K +# include +extern boot_spinlock_t vprint_lock; +#endif + +#define L_LMS_CONS_DATA_PORT LMS_CONS_DATA_PORT +#define L_LMS_CONS_STATUS_PORT LMS_CONS_STATUS_PORT + +#define SERIAL_CONSOLE_8250_NAME "8250" + +#if defined CONFIG_SERIAL_PRINTK || defined CONFIG_SERIAL_BOOT_PRINTK +# define SERIAL_CONSOLE_16550_NAME "ns16550" +# define SERIAL_CONSOLE_AM85C30_NAME "AM85C30" + +typedef struct serial_console_opts_ { + char* name; + unsigned long long io_base; + unsigned char (*serial_getc)(void); + int (*serial_tstc)(void); + int (*init)(void *serial_io_base); + void (*serial_putc)(unsigned char c); +} serial_console_opts_t; +#endif /* SERIAL_PRINTK || SERIAL_BOOT_PRINTK */ + +#endif /* __ASSEMBLY__ */ +#endif /* _L_CONSOLE_H_ */ diff --git a/arch/l/include/asm/devtree.h b/arch/l/include/asm/devtree.h new file mode 100644 index 000000000000..9727c8627839 --- /dev/null +++ b/arch/l/include/asm/devtree.h @@ -0,0 +1,13 @@ +#ifndef _ASM_L_DEVTREE_H +#define _ASM_L_DEVTREE_H +#include +int device_tree_init(void); +void get_dtb_from_boot(u8*, u32); +u32 get_dtb_size(void); +extern int devtree_detected; + +#ifdef CONFIG_DTB_L_TEST +extern unsigned char test_blob[]; +#endif + +#endif /* _ASM_L_DEVTREE_H */ diff --git a/arch/l/include/asm/dma-direct.h b/arch/l/include/asm/dma-direct.h new file mode 100644 index 000000000000..2dfbc53801b2 --- /dev/null +++ b/arch/l/include/asm/dma-direct.h @@ -0,0 +1,29 @@ +#ifndef ___ASM_L_DMA_DIRECT_H +#define ___ASM_L_DMA_DIRECT_H + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + if (!dev) /* caller knows better */ + return true; + if (!dev->dma_mask) + return false; +#if defined(CONFIG_E2K) && defined(CONFIG_NUMA) + if (cpu_has(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE)) { + if (page_to_nid(phys_to_page(addr)) != dev_to_node(dev)) + return false; + } +#endif + return addr + size - 1 <= *dev->dma_mask; +} + +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} + +#endif /* ___ASM_L_DMA_DIRECT_H */ diff --git a/arch/l/include/asm/dma-mapping.h b/arch/l/include/asm/dma-mapping.h new file mode 100644 index 000000000000..5211538dba61 --- /dev/null +++ b/arch/l/include/asm/dma-mapping.h @@ -0,0 +1,21 @@ +#ifndef ___ASM_L_DMA_MAPPING_H +#define ___ASM_L_DMA_MAPPING_H + +#include +#include +#include + +/* + * No easy way to get cache size on all processors + * so return the maximum possible to be safe. + */ +#define ARCH_DMA_MINALIGN (1 << INTERNODE_CACHE_SHIFT) + +extern const struct dma_map_ops *dma_ops; + +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return dma_ops; +} + +#endif /* ___ASM_L_DMA_MAPPING_H */ diff --git a/arch/l/include/asm/epic.h b/arch/l/include/asm/epic.h new file mode 100644 index 000000000000..f1eaa0eba911 --- /dev/null +++ b/arch/l/include/asm/epic.h @@ -0,0 +1,117 @@ +#ifndef __ASM_L_EPIC_H +#define __ASM_L_EPIC_H + +#ifdef __KERNEL__ +#include +#include + +extern unsigned int early_prepic_node_read_w(int node, unsigned int reg); +extern void early_prepic_node_write_w(int node, unsigned int reg, + unsigned int v); +extern unsigned int prepic_node_read_w(int node, unsigned int reg); +extern void prepic_node_write_w(int node, unsigned int reg, unsigned int v); + +/* + * Verbosity can be turned on by passing 'epic_debug' cmdline parameter + * epic_debug is defined in epic.c + */ +extern bool epic_debug; +#define epic_printk(s, a...) do { \ + if (epic_debug) \ + printk(s, ##a); \ + } while (0) + +extern bool epic_bgi_mode; +extern unsigned int cepic_timer_delta; +extern void setup_boot_epic_clock(void); +extern void __init setup_bsp_epic(void); + +/* + * CEPIC_ID register has 10 valid bits: 2 for prepicn (node) and 8 for + * cepicn (core in node). Since currently kernel does not support NR_CPUS > 64, + * we ignore 4 most significant bits of cepicn. + * + * For example, core 0 on node 1 will have full cepic id = 256 and short cepic + * id = 16 + */ +static inline unsigned int cepic_id_full_to_short(unsigned int reg_value) +{ + union cepic_id reg_id; + + reg_id.raw = reg_value; + reg_id.bits.cepicn_reserved = 0; + return reg_id.bits.prepicn << CEPIC_ID_SHORT_VALID_BITS + | reg_id.bits.cepicn; +} + +static inline unsigned int cepic_id_short_to_full(unsigned int cepic_id) +{ + union cepic_id reg_id; + + reg_id.raw = 0; + reg_id.bits.cepicn = cepic_id & CEPIC_ID_SHORT_VALID_MASK; + reg_id.bits.prepicn = cepic_id >> CEPIC_ID_SHORT_VALID_BITS; + return reg_id.raw; +} + +static inline unsigned int read_epic_id(void) +{ + return cepic_id_full_to_short(epic_read_w(CEPIC_ID)); +} + +static inline bool read_epic_bsp(void) +{ + union cepic_ctrl reg; + + reg.raw = epic_read_w(CEPIC_CTRL); + return reg.bits.bsp_core; +} + +static inline u32 epic_vector_prio(u32 vector) +{ + return 1 + ((vector >> 8) & 0x3); +} + +extern void __init_recv setup_prepic(void); +extern void ack_epic_irq(void); +extern void epic_send_IPI(unsigned int dest_id, int vector); +extern void epic_send_IPI_mask(const struct cpumask *mask, int vector); +extern void epic_send_IPI_self(int vector); +extern void epic_send_IPI_mask_allbutself(const struct cpumask *mask, + int vector); +extern void epic_wait_icr_idle(void); +extern void clear_cepic(void); + +extern bool pcsm_adjust_enable; + +struct pcs_handle { + void (*pcs_interrupt)(void); +}; + +extern void register_pcs_handle(const struct pcs_handle *handle); +extern void unregister_pcs_handle(void); + +extern __visible void epic_smp_timer_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_spurious_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_error_interrupt(struct pt_regs *regs); +extern __visible void prepic_smp_error_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_irq_work_interrupt(struct pt_regs *regs); +extern __visible void cepic_epic_interrupt(struct pt_regs *regs); +extern __visible void epic_hc_emerg_interrupt(struct pt_regs *regs); +extern __visible void epic_iommu_interrupt(struct pt_regs *regs); +extern __visible void epic_uncore_interrupt(struct pt_regs *regs); +extern __visible void epic_ipcc_interrupt(struct pt_regs *regs); +extern __visible void epic_hc_interrupt(struct pt_regs *regs); +extern __visible void epic_pcs_interrupt(struct pt_regs *regs); +#ifdef CONFIG_KVM_ASYNC_PF +extern __visible void epic_pv_apf_wake(struct pt_regs *regs); +#endif /* CONFIG_KVM_ASYNC_PF */ +#ifdef CONFIG_SMP +extern __visible void epic_smp_irq_move_cleanup_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_reschedule_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_call_function_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_call_function_single_interrupt( + struct pt_regs *regs); +#endif +#endif /* __KERNEL__ */ +#endif /* __ASM_L_EPIC_H */ diff --git a/arch/l/include/asm/epic_regs.h b/arch/l/include/asm/epic_regs.h new file mode 100644 index 000000000000..f1c1eded9e91 --- /dev/null +++ b/arch/l/include/asm/epic_regs.h @@ -0,0 +1,669 @@ +#ifndef __ASM_L_EPIC_REGS_H +#define __ASM_L_EPIC_REGS_H + +#include + +#ifndef __ASSEMBLY__ +#ifdef __LITTLE_ENDIAN +union cepic_ctrl { + u32 raw; + struct { + u32 __reserved1 : 8, + bsp_core : 1, + __reserved2 : 1, + soft_en : 1, + __reserved3 : 21; + } __packed bits; +}; + +/* Ignore 4 bits of CEPIC (core) ID so that physical core ID is <= 64 */ +union cepic_id { + u32 raw; + struct { + u32 cepicn : 4, + cepicn_reserved : 4, + prepicn : 2, + __reserved2 : 22; + } __packed bits; +}; + +union cepic_ctrl2 { + u32 raw; + struct { + u32 mi_gst_blk : 1, + nmi_gst_blk : 1, + int_hv : 1, + __reserved1 : 1, + clear_gst : 1, + __reserved2 : 3, + timer_stop : 1, + __reserved3 : 23; + } __packed bits; +}; + +union cepic_dat { + u64 raw; + struct { + u64 __reserved1 : 6, + dat_cop : 2, + __reserved2 : 4, + stat : 1, + __reserved3 : 7, + index : 10, + __reserved4 : 2, + __reserved5 : 8, + gst_dst : 10, + __reserved6 : 2, + gst_id : 12; + } __packed bits; +}; + +union cepic_epic_int { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 3, + mask : 1, + __reserved3 : 15; + } __packed bits; +}; + +union cepic_epic_int2 { + u64 raw; + struct { + u64 vect : 10, + dst_sh : 2, + __reserved1 : 1, + dlvm : 3, + __reserved2 : 4, + gst_id : 12, + __reserved3 : 8, + gst_dst : 10, + __reserved4 : 14; + } __packed bits; +}; + +union cepic_cpr { + u32 raw; + struct { + u32 __reserved1 : 8, + cpr : 3, + __reserved2 : 21; + } __packed bits; +}; + +union cepic_esr { + u32 raw; + struct { + u32 __reserved1 : 5, + rq_addr_err : 1, + rq_virt_err : 1, + rq_cop_err : 1, + ms_gstid_err : 1, + ms_virt_err : 1, + ms_err : 1, + ms_icr_err : 1, + __reserved2 : 20; + } __packed bits; +}; + +union cepic_esr2 { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 3, + mask : 1, + __reserved3 : 15; + } __packed bits; +}; + +union cepic_eoi { + u32 raw; + struct { + u32 __reserved1 : 16, + rcpr : 3, + __reserved2 : 13; + } __packed bits; +}; + +union cepic_cir { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 19; + } __packed bits; +}; + +union cepic_gstbase_hi { + u32 raw; + struct { + u32 gstbase_hi : 4, + __reserved : 28; + } __packed bits; +}; + +union cepic_gstid { + u32 raw; + struct { + u32 gstid : 12, + __reserved : 20; + } __packed bits; +}; + +union cepic_pnmirr { + u32 raw; + struct { + u32 startup_entry : 8, + __reserved1 : 1, + smi : 1, + nmi : 1, + init : 1, + startup : 1, + int_violat : 1, + __reserved2 : 2, + nm_timer : 1, + nm_special : 1, + __reserved3 : 14; + } __packed bits; +}; + +union cepic_icr { + u64 raw; + struct { + u64 vect : 10, + dst_sh : 2, + stat : 1, + dlvm : 3, + __reserved1 : 4, + gst_id : 12, + __reserved2 : 8, + dst : 10, + __reserved3 : 14; + } __packed bits; +}; + +union cepic_timer_lvtt { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 3, + mask : 1, + mode : 1, + __reserved3 : 14; + } __packed bits; +}; + +union cepic_timer_div { + u32 raw; + struct { + u32 divider : 4, + __reserved1 : 28; + } __packed bits; +}; + +union cepic_nm_timer_lvtt { + u32 raw; + struct { + u32 __reserved1 : 17, + mode : 1, + __reserved2 : 14; + } __packed bits; +}; + +union cepic_nm_timer_div { + u32 raw; + struct { + u32 divider : 4, + __reserved1 : 28; + } __packed bits; +}; + +union cepic_svr { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 22; + } __packed bits; +}; + +union cepic_pnmirr_mask { + u32 raw; + struct { + u32 __reserved1 : 9, + smi : 1, + nmi : 1, + __reserved2 : 2, + int_violat : 1, + __reserved3 : 2, + nm_timer : 1, + nm_special : 1, + __reserved4 : 14; + } __packed bits; +}; + +union cepic_vect_inta { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 6, + cpr : 3, + __reserved2 : 13; + } __packed bits; +}; + +union prepic_ctrl { + u32 raw; + struct { + u32 __reserved1 : 8, + bsp : 1, + __reserved2 : 2, + epic_en : 1, + __reserved3 : 20; + } __packed bits; +}; + +union prepic_id { + u32 raw; + struct { + u32 __reserved1 : 8, + prepicn : 2, + __reserved2 : 22; + } __packed bits; +}; + +union prepic_ctrl2 { + u32 raw; + struct { + u32 __reserved1 : 9, + bgi_mode : 1, + __reserved2 : 2, + virt_en : 1, + __reserved3 : 19; + } __packed bits; +}; + +union prepic_err_int { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + dlvm : 3, + mask : 1, + __reserved2 : 3, + dst : 10, + __reserved3 : 2; + } __packed bits; +}; + +union prepic_linpn { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + dlvm : 3, + mask : 1, + __reserved2 : 3, + dst : 10, + __reserved3 : 2; + } __packed bits; +}; + +typedef struct kvm_epic_page { +/*000*/ u32 ctrl; + u32 id; + u32 cpr; + u32 esr; + union cepic_esr2 esr2; + union cepic_cir cir; + atomic_t esr_new; + u32 svr; + union cepic_icr icr; + union cepic_timer_lvtt timer_lvtt; + u32 timer_init; + u32 timer_cur; + u32 timer_div; + u32 nm_timer_lvtt; + u32 nm_timer_init; + u32 nm_timer_cur; + u32 nm_timer_div; + u32 pnmirr_mask; +/*04c*/ u32 __reserved1[45]; +/*100*/ atomic64_t pmirr[CEPIC_PMIRR_NR_DREGS]; +/*180*/ u32 __reserved2[24]; +/*1e0*/ atomic_t pnmirr; + u32 __reserved3[263]; +/*600*/ u8 pnmirr_byte[16]; +/*610*/ u32 __reserved4[124]; +/*800*/ u8 pmirr_byte[CEPIC_PMIRR_NR_BITS]; +} epic_page_t; + +#elif defined(__BIG_ENDIAN) + +union cepic_ctrl { + u32 raw; + struct { + u32 __reserved3 : 21, + soft_en : 1, + __reserved2 : 1, + bsp_core : 1, + __reserved1 : 8; + } __packed bits; +}; + +/* Ignore 4 bits of CEPIC (core) ID so that physical core ID is <= 64 */ +union cepic_id { + u32 raw; + struct { + u32 __reserved2 : 22, + prepicn : 2, + cepicn_reserved : 4, + cepicn : 4; + } __packed bits; +}; + +union cepic_ctrl2 { + u32 raw; + struct { + u32 __reserved3 : 23, + timer_stop : 1, + __reserved2 : 3, + clear_gst : 1, + __reserved1 : 1, + int_hv : 1, + nmi_gst_blk : 1, + mi_gst_blk : 1; + } __packed bits; +}; + +union cepic_dat { + u64 raw; + struct { + u64 gst_id : 12, + __reserved6 : 2, + gst_dst : 10, + __reserved5 : 8, + __reserved4 : 2, + index : 10, + __reserved3 : 7, + stat : 1, + __reserved2 : 4, + dat_cop : 2, + __reserved1 : 6; + } __packed bits; +}; + +union cepic_epic_int { + u32 raw; + struct { + u32 __reserved3 : 15, + mask : 1, + __reserved2 : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_epic_int2 { + u64 raw; + struct { + u64 __reserved4 : 14, + gst_dst : 10, + __reserved3 : 8, + gst_id : 12, + __reserved2 : 4, + dlvm : 3, + __reserved1 : 1, + dst_sh : 2, + vect : 10; + } __packed bits; +}; + +union cepic_cpr { + u32 raw; + struct { + u32 __reserved2 : 21, + cpr : 3, + __reserved1 : 8; + } __packed bits; +}; + +union cepic_esr { + u32 raw; + struct { + u32 __reserved2 : 20, + ms_icr_err : 1, + ms_err : 1, + ms_virt_err : 1, + ms_gstid_err : 1, + rq_cop_err : 1, + rq_virt_err : 1, + rq_addr_err : 1, + __reserved1 : 5; + } __packed bits; +}; + +union cepic_esr2 { + u32 raw; + struct { + u32 __reserved3 : 15, + mask : 1, + __reserved2 : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_eoi { + u32 raw; + struct { + u32 __reserved2 : 13, + rcpr : 3, + __reserved1 : 16; + } __packed bits; +}; + +union cepic_cir { + u32 raw; + struct { + u32 __reserved2 : 19, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_gstbase_hi { + u32 raw; + struct { + u32 __reserved : 28, + gstbase_hi : 4; + } __packed bits; +}; + +union cepic_gstid { + u32 raw; + struct { + u32 __reserved : 20, + gstid : 12; + } __packed bits; +}; + +union cepic_pnmirr { + u32 raw; + struct { + u32 __reserved3 : 14, + nm_special : 1, + nm_timer : 1, + __reserved2 : 2, + int_violat : 1, + startup : 1, + init : 1, + nmi : 1, + smi : 1, + __reserved1 : 1, + startup_entry : 8; + } __packed bits; +}; + +union cepic_icr { + u64 raw; + struct { + u64 __reserved3 : 14, + dst : 10, + __reserved2 : 8, + gst_id : 12, + __reserved1 : 4, + dlvm : 3, + stat : 1, + dst_sh : 2, + vect : 10; + } __packed bits; +}; + +union cepic_timer_lvtt { + u32 raw; + struct { + u32 __reserved3 : 14, + mode : 1, + mask : 1, + __reserved2 : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_timer_div { + u32 raw; + struct { + u32 __reserved1 : 28, + divider : 4; + } __packed bits; +}; + +union cepic_nm_timer_lvtt { + u32 raw; + struct { + u32 __reserved2 : 14, + mode : 1, + __reserved1 : 17; + } __packed bits; +}; + +union cepic_nm_timer_div { + u32 raw; + struct { + u32 __reserved1 : 28, + divider : 4; + } __packed bits; +}; + +union cepic_svr { + u32 raw; + struct { + u32 __reserved1 : 22, + vect : 10; + } __packed bits; +}; + +union cepic_pnmirr_mask { + u32 raw; + struct { + u32 __reserved4 : 14, + nm_special : 1, + nm_timer : 1, + __reserved3 : 2, + int_violat : 1, + __reserved2 : 2, + nmi : 1, + smi : 1, + __reserved1 : 9; + } __packed bits; +}; + +union cepic_vect_inta { + u32 raw; + struct { + u32 __reserved2 : 13, + cpr : 3, + __reserved1 : 6, + vect : 10; + } __packed bits; +}; + +union prepic_ctrl { + u32 raw; + struct { + u32 __reserved3 : 20, + epic_en : 1, + __reserved2 : 2, + bsp : 1, + __reserved1 : 8; + } __packed bits; +}; + +union prepic_id { + u32 raw; + struct { + u32 __reserved2 : 22, + prepicn : 2, + __reserved1 : 8; + } __packed bits; +}; + +union prepic_ctrl2 { + u32 raw; + struct { + u32 __reserved3 : 19, + virt_en : 1, + __reserved2 : 2, + bgi_mode : 1, + __reserved1 : 9; + } __packed bits; +}; + +union prepic_err_int { + u32 raw; + struct { + u32 __reserved3 : 2, + dst : 10, + __reserved2 : 3, + mask : 1, + dlvm : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union prepic_linpn { + u32 raw; + struct { + u32 __reserved3 : 2, + dst : 10, + __reserved2 : 3, + mask : 1, + dlvm : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +#else /*__BIG_ENDIAN*/ +# error FIXME +#endif +#endif /* !(__ASSEMBLY__) */ +#endif /* __ASM_L_EPIC_REGS_H */ diff --git a/arch/l/include/asm/epicdef.h b/arch/l/include/asm/epicdef.h new file mode 100644 index 000000000000..4ea0ef864e3b --- /dev/null +++ b/arch/l/include/asm/epicdef.h @@ -0,0 +1,92 @@ +#ifndef _ASM_L_EPICDEF_H +#define _ASM_L_EPICDEF_H + +/* + * Constants for EPICs (CEPIC, IOEPIC) + */ + +#define MAX_EPICS_ORDER 10 + +#define EPIC_REGS_SIZE 0x2000 +#define IO_EPIC_REGS_SIZE 0x100000 + + +/* CEPIC registers */ +#define CEPIC_CTRL 0x0 +#define CEPIC_CTRL_BSP_CORE 0x100 +#define CEPIC_ID 0x10 +#define CEPIC_ID_BIT_MASK 0x3ff +#define CEPIC_ID_SHORT_VALID_BITS 4 +#define CEPIC_ID_SHORT_VALID_MASK 0xf +#define CEPIC_CPR 0x70 +#define CEPIC_CPR_CORE_PRIORITY_SHIFT 8 +#define CEPIC_ESR 0x80 +#define CEPIC_ESR_BIT_MASK 0x7e0 +#define CEPIC_ESR2 0x90 +#define CEPIC_EOI 0xa0 +#define CEPIC_CIR 0xb0 + +#define CEPIC_PMIRR 0x100 +#define CEPIC_PMIRR_NR_BITS 0x400 +#define CEPIC_PMIRR_NR_REGS 0x20 +#define CEPIC_PMIRR_NR_DREGS 0x10 +#define CEPIC_PNMIRR 0x1e0 +#define CEPIC_PNMIRR_BIT_MASK 0x33e00 +#define CEPIC_PNMIRR_NMI 0x400 +#define CEPIC_PNMIRR_STARTUP 0x1000 +#define CEPIC_PNMIRR_STARTUP_ENTRY 0xff +#define CEPIC_ESR_NEW 0x1f0 + +#define CEPIC_ICR 0x200 +#define CEPIC_ICR_DST_FULL 0 +#define CEPIC_ICR_DST_SELF 1 +#define CEPIC_ICR_DST_ALLBUT 2 +#define CEPIC_ICR_DST_ALLINC 3 +#define CEPIC_ICR_DLVM_FIXED_EXT 0 +#define CEPIC_ICR_DLVM_FIXED_IPI 1 +#define CEPIC_ICR_DLVM_SMI 2 +#define CEPIC_ICR_DLVM_NM_SPECIAL 3 +#define CEPIC_ICR_DLVM_NMI 4 +#define CEPIC_ICR_DLVM_INIT 5 +#define CEPIC_ICR_DLVM_STARTUP 6 +#define CEPIC_ICR2 0x204 +#define CEPIC_TIMER_LVTT 0x220 +#define CEPIC_TIMER_INIT 0x230 +#define CEPIC_TIMER_CUR 0x240 +#define CEPIC_TIMER_DIV 0x250 +#define CEPIC_TIMER_DIV_1 0xb +#define CEPIC_NM_TIMER_LVTT 0x260 +#define CEPIC_NM_TIMER_INIT 0x270 +#define CEPIC_NM_TIMER_CUR 0x280 +#define CEPIC_NM_TIMER_DIV 0x290 +#define CEPIC_SVR 0x2a0 +#define CEPIC_PNMIRR_MASK 0x2d0 +#define CEPIC_VECT_INTA 0x2f0 +#define CEPIC_VECT_INTA_VMASK 0x3ff +#define CEPIC_VECT_INTA_PRI_SHIFT 16 + +/* CEPIC (HP) registers */ +#define CEPIC_GUEST 0x1000 + +#define CEPIC_CTRL2 0x1820 +#define CEPIC_DAT 0x1830 +#define CEPIC_DAT_READ 0 +#define CEPIC_DAT_INVALIDATE 2 +#define CEPIC_DAT_WRITE 3 +#define CEPIC_DAT2 0x1834 +#define CEPIC_EPIC_INT 0x1850 +#define CEPIC_EPIC_INT2 0x1860 +#define CEPIC_EPIC_INT3 0x1864 +#define CEPIC_GSTBASE_LO 0x18c0 +#define CEPIC_GSTBASE_HI 0x18c4 +#define CEPIC_GSTID 0x18d0 + +#define CEPIC_PMIRR_OR 0x1900 +#define CEPIC_PNMIRR_OR 0x19e0 +#define CEPIC_ESR_NEW_OR 0x19f0 + +#define CEPIC_PNMIRR_INT_VIOLAT_BIT 13 + +#define BAD_EPICID 0xffff + +#endif /* _ASM_L_EPICDEF_H */ diff --git a/arch/l/include/asm/gpio.h b/arch/l/include/asm/gpio.h new file mode 100644 index 000000000000..be5509108f3d --- /dev/null +++ b/arch/l/include/asm/gpio.h @@ -0,0 +1,50 @@ +/* + * arch/l/include/gpio.h + * + * Copyright (C) 2012 Evgeny Kravtsunov + * + * AC97-GPIO Controller (part of Elbrus IOHUB). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_GPIO_H_ +#define __ASM_ARCH_GPIO_H_ + +#include + +/* IOHUB GPIO pins */ +#define IOUHB_GPIO_0 0 +#define IOHUB_GPIO_1 1 +#define IOHUB_GPIO_2 2 +#define IOHUB_GPIO_3 3 +#define IOHUB_GPIO_4 4 +#define IOHUB_GPIO_5 5 +#define IOHUB_GPIO_6 6 +#define IOHUB_GPIO_7 7 +#define IOHUB_GPIO_8 8 +#define IOHUB_GPIO_9 9 +#define IOHUB_GPIO_10 10 +#define IOHUB_GPIO_11 11 +#define IOHUB_GPIO_12 12 +#define IOHUB_GPIO_13 13 +#define IOHUB_GPIO_14 14 +#define IOHUB_GPIO_15 15 + +/* Amount of iohub's own gpios: */ +#define ARCH_NR_IOHUB_GPIOS 16 +#define ARCH_NR_IOHUB2_GPIOS 32 +#define ARCH_MAX_NR_OWN_GPIOS ARCH_NR_IOHUB2_GPIOS + +#if IS_ENABLED(CONFIG_INPUT_LTC2954) +#define LTC2954_IRQ_GPIO_PIN IOHUB_GPIO_3 +#define LTC2954_KILL_GPIO_PIN IOHUB_GPIO_4 +#endif /* CONFIG_INPUT_LTC2954 */ + +#ifdef CONFIG_GPIOLIB +#include +#endif /* CONFIG_GPIOLIB */ + +#endif diff --git a/arch/l/include/asm/hardirq.h b/arch/l/include/asm/hardirq.h new file mode 100644 index 000000000000..816611f4ae0d --- /dev/null +++ b/arch/l/include/asm/hardirq.h @@ -0,0 +1,55 @@ +#ifndef __ASM_L_HARDIRQ_H +#define __ASM_L_HARDIRQ_H + +#include +#include + +typedef struct { + unsigned int __softirq_pending; + unsigned int __nmi_count; /* arch dependent */ +#ifdef CONFIG_L_LOCAL_APIC + unsigned int apic_timer_irqs; /* arch dependent */ + unsigned int irq_spurious_count; + unsigned int icr_read_retry_count; + unsigned int apic_irq_work_irqs; +#endif +#ifdef CONFIG_SMP + unsigned int irq_resched_count; + unsigned int irq_call_count; +# ifdef CONFIG_E2K + /* + * irq_tlb_count is double-counted in irq_call_count, so it must be + * subtracted from irq_call_count when displaying irq_call_count + */ + unsigned int irq_tlb_count; +# endif +#endif +#if (IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET)) + unsigned int irq_rdma_count; +#endif +#ifdef CONFIG_E2K +#if IS_ENABLED(CONFIG_ELDSP) + unsigned int irq_eldsp_count; +#endif +#endif +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +extern void ack_bad_irq(unsigned int irq); + +#define __ARCH_IRQ_STAT +#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) + +#define inc_irq_stat(member) __IRQ_STAT(raw_smp_processor_id(), member) ++ + +extern u64 arch_irq_stat_cpu(unsigned int cpu); +#define arch_irq_stat_cpu arch_irq_stat_cpu + +extern u64 arch_irq_stat(void); +#define arch_irq_stat arch_irq_stat + +#include + +#endif /* __ASM_L_HARDIRQ_H */ diff --git a/arch/l/include/asm/hw_irq.h b/arch/l/include/asm/hw_irq.h new file mode 100644 index 000000000000..709e4b53eb66 --- /dev/null +++ b/arch/l/include/asm/hw_irq.h @@ -0,0 +1,141 @@ +#ifndef _ASM_L_HW_IRQ_H +#define _ASM_L_HW_IRQ_H + +/* required by linux/irq.h */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_L_LOCAL_APIC +#ifdef CONFIG_PIC +# define platform_legacy_irq(irq) ((irq) < 16) +#else +# define platform_legacy_irq(irq) 0 +#endif +#endif + +/* + * Various low-level irq details needed by irq.c, process.c, + * time.c, io_apic.c and smp.c + * + * Interrupt entry/exit code at both C and assembly level + */ + +extern atomic_t irq_err_count; + +/* IOAPIC */ +#ifdef CONFIG_PIC +# define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) +extern unsigned long io_apic_irqs; +#else +# define IO_APIC_IRQ(x) 1 +#endif + +extern void disable_IO_APIC(void); + +struct io_apic_irq_attr { + int ioapic; + int ioapic_pin; + int trigger; + int polarity; +}; + +static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, + int ioapic, int ioapic_pin, + int trigger, int polarity) +{ + irq_attr->ioapic = ioapic; + irq_attr->ioapic_pin = ioapic_pin; + irq_attr->trigger = trigger; + irq_attr->polarity = polarity; +} + +/* + * This is performance-critical, we want to do it O(1) + * + * Most irqs are mapped 1:1 with pins. + */ +struct irq_cfg { + struct irq_pin_list *irq_2_pin; + cpumask_var_t domain; + cpumask_var_t old_domain; + u8 vector; + u8 move_in_progress : 1; +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu irq_2_iommu; +#endif +}; + +extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin, + struct io_apic_irq_attr *irq_attr); +extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); + +extern void (*interrupt[NR_VECTORS])(struct pt_regs *regs); +#ifdef CONFIG_TRACING +#define trace_interrupt interrupt +#endif + +#define VECTOR_UNDEFINED -1 +#define VECTOR_RETRIGGERED -2 + +typedef int vector_irq_t[NR_VECTORS]; +DECLARE_PER_CPU(vector_irq_t, vector_irq); + +extern void lock_vector_lock(void); +extern void unlock_vector_lock(void); +extern void __setup_vector_irq(int cpu); + +#define IO_APIC_VECTOR(irq) ({ \ + struct irq_cfg *__cfg = irq_cfg(irq); \ + (__cfg) ? __cfg->vector : 0; \ +}) + +extern void setup_ioapic_dest(void); + +/* Statistics */ +extern atomic_t irq_err_count; +extern atomic_t irq_mis_count; + +/* EISA */ +extern void eisa_set_level_irq(unsigned int irq); + +/* SMP */ +extern __visible void smp_apic_timer_interrupt(struct pt_regs *); +extern __visible void smp_spurious_interrupt(struct pt_regs *); +extern __visible void smp_error_interrupt(struct pt_regs *); +extern __visible void smp_irq_move_cleanup_interrupt(struct pt_regs *); +extern __visible void smp_irq_work_interrupt(struct pt_regs *); +#ifdef CONFIG_SMP +extern __visible void smp_reschedule_interrupt(struct pt_regs *regs); +extern __visible void smp_call_function_interrupt(struct pt_regs *regs); +extern __visible void smp_call_function_single_interrupt(struct pt_regs *regs); +#endif + +#ifdef CONFIG_TRACING +/* Interrupt handlers registered during init_IRQ */ +extern void smp_trace_apic_timer_interrupt(struct pt_regs *regs); +extern void smp_trace_error_interrupt(struct pt_regs *regs); +extern void smp_trace_irq_work_interrupt(struct pt_regs *regs); +extern void smp_trace_spurious_interrupt(struct pt_regs *regs); +extern void smp_trace_reschedule_interrupt(struct pt_regs *regs); +extern void smp_trace_call_function_interrupt(struct pt_regs *regs); +extern void smp_trace_call_function_single_interrupt(struct pt_regs *regs); +#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt +#endif /* CONFIG_TRACING */ + +extern void do_nmi(struct pt_regs * regs); +extern void l_init_system_handlers_table(void); +extern void epic_init_system_handlers_table(void); +extern void setup_PIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +extern void do_IRQ(struct pt_regs * regs, unsigned int vector); + +#endif /* _ASM_L_HW_IRQ_H */ diff --git a/arch/l/include/asm/i2c-spi.h b/arch/l/include/asm/i2c-spi.h new file mode 100644 index 000000000000..105c25704c1e --- /dev/null +++ b/arch/l/include/asm/i2c-spi.h @@ -0,0 +1,42 @@ +#ifndef __L_ASM_SPI_H__ +#define __L_ASM_SPI_H__ + +#include +#include + +/* PCI registers definitions for reset */ + +#define PCI_RESET_CONTROL 0x60 +#define L_SOFTWARE_RESET_TO_HARD 0x00000004 /* software reset */ + /* to hardware reset */ +#define L_WATCHDOG_RESET_TO_HARD 0x00000008 /* watchdog reset */ + /* to hardware reset */ +#define L_SOFTWARE_RESET_TO_SOFT 0x00000010 /* software reset */ + /* to soft reset */ +#define L_WATCHDOG_RESET_TO_SOFT 0x00000020 /* watchdog reset */ + /* to soft reset */ +#define L_RED_RESET_OUT 0x80000080 /* Led control */ +#define PCI_SOFT_RESET_CONTROL 0x64 +#define L_SOFTWARE_RESET 0x00000001 +#define L_SOFTWARE_RESET_DONE 0x00000002 +#define L_LAST_RESET_INFO 0x000000fc /* last reset type */ +#define PCI_SOFT_RESET_DURATION 0x68 +#define L_IOHUB_SOFT_RESET_DURATION 0x0000ffff +#define L_IOHUB2_SOFT_RESET_DURATION 0x00ffffff + +/* Common SPI & I2C definitions */ + +#define I2C_SPI_CNTRL_AREA_SIZE 0x40 +#define I2C_SPI_DATA_AREA_SIZE 0x40 + +#define I2C_SPI_DEFAULT_IRQ 23 + +#define I2C_MAX_BUSSES 5 +#define I2C_DST_BUSSES 4 + +#ifdef CONFIG_E2K +extern int iohub_i2c_line_id; +#else +#define iohub_i2c_line_id 0 +#endif +#endif /* __L_ASM_SPI_H__ */ diff --git a/arch/l/include/asm/idle.h b/arch/l/include/asm/idle.h new file mode 100644 index 000000000000..0cf10bb31c89 --- /dev/null +++ b/arch/l/include/asm/idle.h @@ -0,0 +1,7 @@ +#ifndef _ASM_L_IDLE_H +#define _ASM_L_IDLE_H + +static inline void enter_idle(void) { } +static inline void exit_idle(void) { } + +#endif /* _ASM_L_IDLE_H */ diff --git a/arch/l/include/asm/io_apic.h b/arch/l/include/asm/io_apic.h new file mode 100644 index 000000000000..abc67d13bf4e --- /dev/null +++ b/arch/l/include/asm/io_apic.h @@ -0,0 +1,307 @@ +#ifndef _ASM_L_IO_APIC_H +#define _ASM_L_IO_APIC_H + +#include +#include +#include +#include +#if 0 +#include +#endif +/* + * Intel IO-APIC support for SMP and UP systems. + * + * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar + */ + +/* I/O Unit Redirection Table */ +#define IO_APIC_REDIR_VECTOR_MASK 0x000FF +#define IO_APIC_REDIR_DEST_LOGICAL 0x00800 +#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000 +#define IO_APIC_REDIR_SEND_PENDING (1 << 12) +#define IO_APIC_REDIR_REMOTE_IRR (1 << 14) +#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15) +#define IO_APIC_REDIR_MASKED (1 << 16) + +#if 0 +/* + * The structure of the IO-APIC: + */ +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2 : 14, + LTS : 1, + delivery_type : 1, + __reserved_1 : 8, + ID : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version : 8, + __reserved_2 : 7, + PRQ : 1, + entries : 8, + __reserved_1 : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2 : 24, + arbitration : 4, + __reserved_1 : 4; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT : 1, + __reserved_1 : 31; + } __attribute__ ((packed)) bits; +}; + +struct IO_APIC_route_entry { + __u32 vector : 8, + delivery_mode : 3, /* 000: FIXED + * 001: lowest prio + * 111: ExtINT + */ + dest_mode : 1, /* 0: physical, 1: logical */ + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, /* 0: edge, 1: level */ + mask : 1, /* 0: enabled, 1: disabled */ + __reserved_2 : 15; + + __u32 __reserved_3 : 24, + dest : 8; +} __attribute__ ((packed)); + +struct IR_IO_APIC_route_entry { + __u64 vector : 8, + zero : 3, + index2 : 1, + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, + mask : 1, + reserved : 31, + format : 1, + index : 15; +} __attribute__ ((packed)); +#endif + +#define IOAPIC_AUTO -1 +#define IOAPIC_EDGE 0 +#define IOAPIC_LEVEL 1 + +#ifdef CONFIG_L_IO_APIC + +extern DECLARE_BITMAP(used_vectors, NR_VECTORS); + +/* + * # of IO-APICs and # of IRQ routing registers + */ +extern int nr_ioapics; + +extern int mpc_ioapic_id(int ioapic); +extern unsigned long mpc_ioapic_addr(int ioapic); +extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic); + +#define MP_MAX_IOAPIC_PIN 127 + +/* # of MP IRQ source entries */ +extern int mp_irq_entries; + +/* MP IRQ source entries */ +extern struct mpc_intsrc mp_irqs[]; + +/* non-0 if default (table-less) MP configuration */ +extern int mpc_default_type; + +/* Older SiS APIC requires we rewrite the index register */ +extern int sis_apic_bug; + +/* 1 if "noapic" boot option passed */ +extern int skip_ioapic_setup; + +/* 1 if "noapic" boot option passed */ +extern int noioapicquirk; + +/* -1 if "noapic" boot option passed */ +extern int noioapicreroute; + +/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ +extern int timer_through_8259; + +/* + * If we use the IO-APIC for IRQ routing, disable automatic + * assignment of PCI IRQ's. + */ +#ifdef CONFIG_PIC +#define io_apic_assign_pci_irqs \ + (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) +#else +#define io_apic_assign_pci_irqs \ + (mp_irq_entries && !skip_ioapic_setup) +#endif + +extern void setup_IO_APIC(void); +extern void enable_IO_APIC(void); + +struct io_apic_irq_attr; +struct irq_cfg; +struct device; +extern int io_apic_set_pci_routing(struct device *dev, int irq, + struct io_apic_irq_attr *irq_attr); +void setup_IO_APIC_irq_extra(u32 gsi); +extern void ioapic_insert_resources(void); + +extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, + unsigned int, int, + struct io_apic_irq_attr *); +extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, + unsigned int, int, + struct io_apic_irq_attr *); +extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg); + +struct pci_dev; +struct msi_msg; +extern void native_compose_msi_msg(struct pci_dev *pdev, + unsigned int irq, unsigned int dest, + struct msi_msg *msg, u8 hpet_id); +extern void native_eoi_ioapic_pin(int apic, int pin, int vector); +int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); + +extern int save_ioapic_entries(void); +extern void mask_ioapic_entries(void); +extern int restore_ioapic_entries(void); + +extern void probe_nr_irqs_gsi(void); +extern int get_nr_irqs_gsi(void); +extern int set_ioapic_affinity_irq(unsigned int, const struct cpumask *); + +extern void setup_ioapic_ids_from_mpc(void); +extern void setup_ioapic_ids_from_mpc_nocheck(void); + +struct mp_ioapic_gsi{ + u32 gsi_base; + u32 gsi_end; +}; +extern struct mp_ioapic_gsi mp_gsi_routing[]; +extern u32 gsi_top; +int mp_find_ioapic(u32 gsi); +int mp_find_ioapic_pin(int ioapic, u32 gsi); +#if defined CONFIG_E2K || defined CONFIG_E90S +void __init mp_register_ioapic(int id, unsigned long address, u32 gsi_base); +#else +void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); +#endif +extern void __init pre_init_apic_IRQ0(void); + +extern void mp_save_irq(struct mpc_intsrc *m); + +extern void disable_ioapic_support(void); + +extern void __init native_io_apic_init_mappings(void); +extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); +extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); +extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); +extern void native_disable_io_apic(void); +extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); +extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); +struct irq_data; +extern int native_ioapic_set_affinity(struct irq_data *, + const struct cpumask *, + bool); + +static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) +{ +#if 0 + return x86_io_apic_ops.read(apic, reg); +#else + return native_io_apic_read(apic, reg); +#endif +} + +static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) +{ +#if 0 + x86_io_apic_ops.write(apic, reg, value); +#else + native_io_apic_write(apic, reg, value); +#endif +} +static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) +{ +#if 0 + x86_io_apic_ops.modify(apic, reg, value); +#else + native_io_apic_modify(apic, reg, value); +#endif +} + +extern void io_apic_eoi(unsigned int apic, unsigned int vector); + +extern unsigned int __create_irqs(unsigned int from, unsigned int count, + int node); +extern void destroy_irqs(unsigned int irq, unsigned int count); +extern int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, + struct msi_msg *msg, u8 hpet_id); +extern int ioapic_retrigger_irq(struct irq_data *data); +extern int __ioapic_set_affinity(struct irq_data *data, + const struct cpumask *mask, + unsigned int *dest_id); +extern int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, + int pin); +extern unsigned int ioapic_cfg_get_pin(struct irq_cfg *cfg); +extern unsigned int ioapic_cfg_get_idx(struct irq_cfg *cfg); + +#else /* !CONFIG_L_IO_APIC */ + +#define io_apic_assign_pci_irqs 0 +#define setup_ioapic_ids_from_mpc x86_init_noop +static const int timer_through_8259 = 0; +static inline void ioapic_insert_resources(void) { } +#define gsi_top (NR_IRQS_LEGACY) +static inline int mp_find_ioapic(u32 gsi) { return 0; } + +struct io_apic_irq_attr; +static inline int io_apic_set_pci_routing(struct device *dev, int irq, + struct io_apic_irq_attr *irq_attr) { return 0; } + +static inline int save_ioapic_entries(void) +{ + return -ENOMEM; +} + +static inline void mask_ioapic_entries(void) { } +static inline int restore_ioapic_entries(void) +{ + return -ENOMEM; +} + +static inline void mp_save_irq(struct mpc_intsrc *m) { }; +static inline void disable_ioapic_support(void) { } +#define native_io_apic_init_mappings NULL +#define native_io_apic_read NULL +#define native_io_apic_write NULL +#define native_io_apic_modify NULL +#define native_disable_io_apic NULL +#define native_io_apic_print_entries NULL +#define native_ioapic_set_affinity NULL +#define native_setup_ioapic_entry NULL +#define native_compose_msi_msg NULL +#define native_eoi_ioapic_pin NULL +#endif +extern int __init calibrate_APIC_clock(void); + +#endif /* _ASM_L_IO_APIC_H */ diff --git a/arch/l/include/asm/io_epic.h b/arch/l/include/asm/io_epic.h new file mode 100644 index 000000000000..947cdc46e877 --- /dev/null +++ b/arch/l/include/asm/io_epic.h @@ -0,0 +1,77 @@ +#ifndef _ASM_L_IO_EPIC_H +#define _ASM_L_IO_EPIC_H + +#include +#include +#include +#include + +#define IOEPIC_ID 0x0 +#define IOEPIC_VERSION 0x4 +#define IOEPIC_INT_RID(pin) (0x800 + 0x4 * pin) +#define IOEPIC_TABLE_INT_CTRL(pin) (0x20 + 0x1000 * pin) +#define IOEPIC_TABLE_MSG_DATA(pin) (0x24 + 0x1000 * pin) +#define IOEPIC_TABLE_ADDR_HIGH(pin) (0x28 + 0x1000 * pin) +#define IOEPIC_TABLE_ADDR_LOW(pin) (0x2c + 0x1000 * pin) + +#define MAX_IO_EPICS (MAX_NUMIOLINKS + MAX_NUMNODES) + +#define IOEPIC_AUTO -1 +#define IOEPIC_EDGE 0 +#define IOEPIC_LEVEL 1 + +#define IOEPIC_VERSION_1 1 +#define IOEPIC_VERSION_2 2 /* Fast level EOI (without reading int_ctrl) */ + +extern int nr_ioepics; +extern void setup_io_epic(void); +extern void __init mp_register_ioepic(int ver, int id, int node, + unsigned long address, u32 gsi_base); +extern int ioepic_pin_to_irq(unsigned int pin, struct pci_dev *dev); + +struct mp_ioepic_gsi { + unsigned int gsi_base; + unsigned int gsi_end; +}; + +/* + * cpumask fields 'domain' and 'old_domain' from APIC irq_cfg are replaced with + * int dest here. Similar to APIC in physical addressing mode, there is + * no need for a cpumask, if only one CPU bit is set in it at all times + */ +struct epic_irq_cfg { + unsigned short pin; + unsigned short epic; + unsigned short old_dest; + unsigned short dest; + unsigned short vector; + unsigned char move_in_progress : 1; +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu irq_2_iommu; +#endif +}; + +#define IO_EPIC_VECTOR(irq) ({ \ + struct epic_irq_cfg *__cfg = irq_get_chip_data(irq); \ + (__cfg) ? __cfg->vector : 0; \ +}) + +struct io_epic_irq_attr { + int ioepic; + int ioepic_pin; + int trigger; + int rid; +}; + +struct irq_chip; +extern struct irq_chip ioepic_chip; +extern unsigned long used_vectors[]; + +extern unsigned long io_epic_base_node(int node); +/* FIXME should be removed after proper passthrough implementation */ +extern unsigned int io_epic_read(unsigned int epic, unsigned int reg); +extern void io_epic_write(unsigned int epic, unsigned int reg, + unsigned int value); +extern int pirq_enable_irq(struct pci_dev *dev); + +#endif /* _ASM_L_IO_EPIC_H */ diff --git a/arch/l/include/asm/io_epic_regs.h b/arch/l/include/asm/io_epic_regs.h new file mode 100644 index 000000000000..7e6ef45394f8 --- /dev/null +++ b/arch/l/include/asm/io_epic_regs.h @@ -0,0 +1,147 @@ +#ifndef __ASM_L_IO_EPIC_REGS_H +#define __ASM_L_IO_EPIC_REGS_H + +#include + +#ifdef __LITTLE_ENDIAN +/* The structure of the IO-EPIC */ +union IO_EPIC_ID { + u32 raw; + struct { + u32 id : 16, + nodeid : 16; + } __packed bits; +}; + +union IO_EPIC_VERSION { + u32 raw; + struct { + u32 version : 8, + __reserved2 : 8, + entries : 8, + __reserved1 : 8; + } __packed bits; +}; + +union IO_EPIC_INT_CTRL { + u32 raw; + struct { + u32 __reserved3 : 12, + delivery_status : 1, + software_int : 1, + __reserved2 : 1, + trigger : 1, /* 0: edge, 1: level */ + mask : 1, /* 0: enabled, 1: disabled */ + __reserved1 : 15; + } __packed bits; +}; + +union IO_EPIC_MSG_DATA { + u32 raw; + struct { + u32 vector : 10, + __reserved2 : 3, + dlvm : 3, + __reserved1 : 16; + } __packed bits; +}; + +union IO_EPIC_MSG_ADDR_LOW { + u32 raw; + struct { + u32 __reserved3 : 2, + msg_type : 3, + __reserved2 : 1, + dst : 10, + __reserved1 : 4, + MSI : 12; + } __packed bits; +}; + +union IO_EPIC_REQ_ID { + u32 raw; + struct { + u32 fn : 3, + dev : 5, + bus : 8, + __reserved1 : 16; + } __packed bits; +}; +#elif defined(__BIG_ENDIAN) +/* The structure of the IO-EPIC */ +union IO_EPIC_ID { + u32 raw; + struct { + u32 nodeid : 16, + id : 16; + } __packed bits; +}; + +union IO_EPIC_VERSION { + u32 raw; + struct { + u32 __reserved1 : 8, + entries : 8, + __reserved2 : 8, + version : 8; + } __packed bits; +}; + +union IO_EPIC_INT_CTRL { + u32 raw; + struct { + u32 __reserved1 : 15, + mask : 1, /* 0: enabled, 1: disabled */ + trigger : 1, /* 0: edge, 1: level */ + __reserved2 : 1, + software_int : 1, + delivery_status : 1, + __reserved3 : 12; + } __packed bits; +}; + +union IO_EPIC_MSG_DATA { + u32 raw; + struct { + u32 __reserved1 : 16, + dlvm : 3, + __reserved2 : 3, + vector : 10; + } __packed bits; +}; + +union IO_EPIC_MSG_ADDR_LOW { + u32 raw; + struct { + u32 MSI : 12, + __reserved1 : 4, + dst : 10, + __reserved2 : 1, + msg_type : 3, + __reserved3 : 2; + } __packed bits; +}; + +union IO_EPIC_REQ_ID { + u32 raw; + struct { + u32 __reserved1 : 16, + bus : 8, + dev : 5, + fn : 3; + } __packed bits; +}; + +#else /*__BIG_ENDIAN*/ +# error What is the endianess? +#endif + +struct IO_EPIC_route_entry { + union IO_EPIC_INT_CTRL int_ctrl; + union IO_EPIC_MSG_DATA msg_data; + u32 addr_high; + union IO_EPIC_MSG_ADDR_LOW addr_low; + union IO_EPIC_REQ_ID rid; +} __packed; + +#endif /* __ASM_L_IO_EPIC_REGS_H */ diff --git a/arch/l/include/asm/io_pic.h b/arch/l/include/asm/io_pic.h new file mode 100644 index 000000000000..72806e3efc50 --- /dev/null +++ b/arch/l/include/asm/io_pic.h @@ -0,0 +1,122 @@ +#ifndef __ASM_L_IO_PIC_H +#define __ASM_L_IO_PIC_H + +/* + * Choose between IO-PICs in arch/l. If CONFIG_EPIC=n, IO-APIC is chosen + * statically. If CONFIG_EPIC=y (only on e2k), use both IO-APIC and IO-EPIC + * calls, depending on nr_ioapics and nr_ioepics variables + */ + +#ifdef CONFIG_EPIC + +#include +#include + +struct io_apic_irq_attr; +extern int io_epic_get_PCI_irq_vector(int bus, int devfn, int pin); +extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin, + struct io_apic_irq_attr *irq_attr); +static inline int IO_PIC_get_PCI_irq_vector(int domain, int bus, int slot, + int pin, struct io_apic_irq_attr *irq_attr) +{ + int pic_irq = -1; + + if (nr_ioepics) + pic_irq = io_epic_get_PCI_irq_vector(bus, slot, pin); + + if (pic_irq == -1 && nr_ioapics) + pic_irq = IO_APIC_get_PCI_irq_vector(domain, bus, slot, pin, + irq_attr); + return pic_irq; +} + +extern int io_epic_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); +extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); +static inline int IO_PIC_get_fix_irq_vector(int domain, int bus, int slot, + int func, int irq) +{ + int pic_irq = -1; + + if (nr_ioepics) + pic_irq = io_epic_get_fix_irq_vector(domain, bus, slot, func, + irq); + + if (pic_irq == -1 && nr_ioapics) + pic_irq = IO_APIC_get_fix_irq_vector(domain, bus, slot, func, + irq); + return pic_irq; +} + +extern void __epic_setup_vector_irq(int cpu); +extern void __apic_setup_vector_irq(int cpu); +static inline void __pic_setup_vector_irq(int cpu) +{ + if (nr_ioepics) + __epic_setup_vector_irq(cpu); + if (nr_ioapics) + __apic_setup_vector_irq(cpu); +} + +extern void fixup_irqs_epic(void); +extern void fixup_irqs_apic(void); +static inline void fixup_irqs_pic(void) +{ + if (nr_ioepics) + fixup_irqs_epic(); + if (nr_ioapics) + fixup_irqs_apic(); +} + +extern void print_IO_APICs(void); +extern void print_IO_EPICs(void); +static inline void print_IO_PICs(void) +{ + if (nr_ioepics) + print_IO_EPICs(); + if (nr_ioapics) + print_IO_APICs(); +} + +#else /* !(CONFIG_EPIC) */ + +#include + +struct io_apic_irq_attr; +extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin, + struct io_apic_irq_attr *irq_attr); +static inline int IO_PIC_get_PCI_irq_vector(int domain, int bus, int slot, + int pin, struct io_apic_irq_attr *irq_attr) +{ + return IO_APIC_get_PCI_irq_vector(domain, bus, slot, pin, irq_attr); +} + +extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); +static inline int IO_PIC_get_fix_irq_vector(int domain, int bus, int slot, + int func, int irq) +{ + return IO_APIC_get_fix_irq_vector(domain, bus, slot, func, irq); +} + +extern void __apic_setup_vector_irq(int cpu); +static inline void __pic_setup_vector_irq(int cpu) +{ + __apic_setup_vector_irq(cpu); +} + +extern void fixup_irqs_apic(void); +static inline void fixup_irqs_pic(void) +{ + fixup_irqs_apic(); +} + +extern void print_IO_APICs(void); +static inline void print_IO_PICs(void) +{ + print_IO_APICs(); +} + +#endif /* !(CONFIG_EPIC) */ +#endif /* __ASM_L_IO_PIC_H */ diff --git a/arch/l/include/asm/iolinkmask.h b/arch/l/include/asm/iolinkmask.h new file mode 100644 index 000000000000..bc83a3f9ffcd --- /dev/null +++ b/arch/l/include/asm/iolinkmask.h @@ -0,0 +1,606 @@ +#ifndef __ASM_L_IOLINKMASK_H +#define __ASM_L_IOLINKMASK_H + +/* + * Based on include/linux/nodemask.h + * IOLINKmasks provide a bitmap suitable for representing the + * set of IOLINK's in a system, one bit position per IOLINK domain number. + * + * IOLINK can be represented by global domain number and as + * pair: node and local link number on the node, + * So main macroses and functions operate with domain number and + * can have appropriate macroses to operate with pair of node and link #, + * for axample: + * iolink_set(domain, ...) + * node_iolink_set(node, link, ...) + * + * IOLINK is common name of IO management and can be connected to IOHUB + * (controller of peripheral interfaces) or RDMA (DMA with remoute systems) + * So macroses have alternative to operate with IOLINKS as IOHUBs and RDMAs, + * for example: + * iolink_set(...) + * iohub_set(...) + * rdma_set(...) + * + * See detailed comments in the file linux/bitmap.h describing the + * data type on which these iolinkmasks are based. + * + * For details of iolinkmask_scnprintf() and iolinkmask_parse(), + * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c. + * For details of iolinklist_scnprintf() and iolinklist_parse(), see + * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. + * + * The available iolinkmask operations are: + * + * void iolink_set(iolink, mask) turn on bit 'iolink' in mask + * void iolink_clear(iolink, mask) turn off bit 'iolink' in mask + * void iolinks_setall(mask) set all bits + * void iolinks_clear(mask) clear all bits + * int iolink_isset(iolink, mask) true iff bit 'iolink' set in mask + * int iolink_test_and_set(iolink, mask) test and set bit 'iolink' in mask + * + * void iolinks_and(dst, src1, src2) dst = src1 & src2 [intersection] + * void iolinks_or(dst, src1, src2) dst = src1 | src2 [union] + * void iolinks_xor(dst, src1, src2) dst = src1 ^ src2 + * void iolinks_andnot(dst, src1, src2) dst = src1 & ~src2 + * void iolinks_complement(dst, src) dst = ~src + * + * int iolinks_equal(mask1, mask2) Does mask1 == mask2? + * int iolinks_intersects(mask1, mask2) Do mask1 and mask2 intersect? + * int iolinks_subset(mask1, mask2) Is mask1 a subset of mask2? + * int iolinks_empty(mask) Is mask empty (no bits sets)? + * int iolinks_full(mask) Is mask full (all bits sets)? + * int iolinks_weight(mask) Hamming weight - number of set bits + * + * void iolinks_shift_right(dst, src, n) Shift right + * void iolinks_shift_left(dst, src, n) Shift left + * + * int first_iolink(mask) Number lowest set bit, or MAX_NUMIOLINKS + * int next_iolink(iolink, mask) Next iolink past 'iolink', or MAX_NUMIOLINKS + * int first_unset_iolink(mask) First iolink not set in mask, or + * MAX_NUMIOLINKS. + * + * iolinkmask_t iolinkmask_of_iolink(iolink) Return iolinkmask with bit 'iolink' set + * IOLINK_MASK_ALL Initializer - all bits set + * IOLINK_MASK_NONE Initializer - no bits set + * unsigned long *iolinks_addr(mask) Array of unsigned long's in mask + * + * int iolinkmask_scnprintf(buf, len, mask) Format iolinkmask for printing + * int iolinkmask_parse(ubuf, ulen, mask) Parse ascii string as iolinkmask + * int iolinklist_scnprintf(buf, len, mask) Format iolinkmask as list for printing + * int iolinklist_parse(buf, map) Parse ascii string as iolinklist + * + * for_each_iolink_mask(iolink, mask) for-loop iolink over mask + * + * int num_online_iolinks() Number of online IOLINKs + * int num_possible_iolinks() Number of all possible IOLINKs + * + * int iolink_online(iolink) Is some iolink domain online? + * int iolink_possible(iolink) Is some iolink domain possible? + * + * iolink_set_online(iolink) set bit 'iolink' in iolink_online_map + * iolink_set_offline(iolink) clear bit 'iolink' in iolink_online_map + * + * for_each_iolink(iolink) for-loop iolink over iolink_possible_map + * for_each_online_iolink(iolink) for-loop iolink over iolink_online_map + * + * Subtlety: + * 1) The 'type-checked' form of iolink_isset() causes gcc (3.3.2, anyway) + * to generate slightly worse code. So use a simple one-line #define + * for iolink_isset(), instead of wrapping an inline inside a macro, the + * way we do the other calls. + */ + +#include +#include +#include +#include +#include +#include + +#define MAX_NUMIOLINKS MACH_MAX_NUMIOLINKS +#define MAX_NUMIOHUBS MAX_NUMIOLINKS +#define NODE_NUMIOLINKS MACH_NODE_NUMIOLINKS + +typedef struct { DECLARE_BITMAP(bits, MAX_NUMIOLINKS); } iolinkmask_t; +extern iolinkmask_t _unused_iolinkmask_arg_; + +#define iolink_set(domain, dst) __iolink_set((domain), &(dst)) +#define node_iolink_set(node, link, dst) \ + iolink_set(node_iolink_to_domain((node), (link)), (dst)) +#define iohub_set(domain, dst) iolink_set((domain), (dst)) +#define node_iohub_set(node, link, dst) \ + iohub_set(node_iohub_to_domain((node), (link)), (dst)) +#define rdma_set(domain, dst) iolink_set((domain), (dst)) +#define node_rdma_set(node, link, dst) \ + rdma_set(node_rdma_to_domain((node), (link)), (dst)) +static inline void __iolink_set(int domain, volatile iolinkmask_t *dstp) +{ + set_bit(domain, dstp->bits); +} + +#define iolink_clear(domain, dst) __iolink_clear((domain), &(dst)) +#define node_iolink_clear(node, link, dst) \ + iolink_clear(node_iolink_to_domain((node), (link)), (dst)) +#define iohub_clear(domain, dst) iolink_clear((domain), (dst)) +#define node_iohub_clear(node, link, dst) \ + iohub_clear(node_iohub_to_domain((node), (link)), (dst)) +#define rdma_clear(domain, dst) iolink_clear((domain), (dst)) +#define node_rdma_clear(node, link, dst) \ + rdma_clear(node_rdma_to_domain((node), (link)), (dst)) +static inline void __iolink_clear(int domain, volatile iolinkmask_t *dstp) +{ + clear_bit(domain, dstp->bits); +} + +#define iolinks_setall(dst) __iolinks_setall(&(dst), MAX_NUMIOLINKS) +static inline void __iolinks_setall(iolinkmask_t *dstp, int nbits) +{ + bitmap_fill(dstp->bits, nbits); +} + +#define iolinks_clear(dst) __iolinks_clear(&(dst), MAX_NUMIOLINKS) +static inline void __iolinks_clear(iolinkmask_t *dstp, int nbits) +{ + bitmap_zero(dstp->bits, nbits); +} + +/* No static inline type checking - see Subtlety (1) above. */ +#define iolink_isset(domain, iolinkmask) test_bit((domain), (iolinkmask).bits) +#define node_iolink_isset(node, link, iolinkmask) \ + iolink_isset(node_iolink_to_domain((node), (link)), \ + (iolinkmask).bits) +#define iohub_isset(domain, iolinkmask) iolink_isset((domain), (iolinkmask)) +#define node_iohub_isset(node, link, iolinkmask) \ + iohub_isset(node_iohub_to_domain((node), (link)), \ + (iolinkmask).bits) +#define rdma_isset(domain, iolinkmask) iolink_isset((domain), (iolinkmask)) +#define node_rdma_isset(node, link, iolinkmask) \ + rdma_isset(node_rdma_to_domain((node), (link)), \ + (iolinkmask).bits) + +#define iolink_test_and_set(domain, iolinkmask) \ + __iolink_test_and_set((domain), &(iolinkmask)) +#define node_iolink_test_and_set(node, link, iolinkmask) \ + iolink_test_and_set(node_iolink_to_domain((node), (link)), \ + (iolinkmask)) +#define iohub_test_and_set(domain, iolinkmask) \ + iolink_test_and_set((domain), (iolinkmask)) +#define node_iohub_test_and_set(node, link, iolinkmask) \ + iohub_test_and_set(node_iohub_to_domain((node), (link)), \ + (iolinkmask)) +#define rdma_test_and_set(domain, iolinkmask) \ + iolink_test_and_set((domain), (iolinkmask)) +#define node_rdma_test_and_set(node, link, iolinkmask) \ + rdma_test_and_set(node_rdma_to_domain((node), (link)), \ + (iolinkmask)) +static inline int __iolink_test_and_set(int domain, iolinkmask_t *addr) +{ + return test_and_set_bit(domain, addr->bits); +} + +#define iolinks_and(dst, src1, src2) \ + __iolinks_and(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_and(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_or(dst, src1, src2) \ + __iolinks_or(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_or(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_xor(dst, src1, src2) \ + __iolinks_xor(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_xor(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_andnot(dst, src1, src2) \ + __iolinks_andnot(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_andnot(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_complement(dst, src) \ + __iolinks_complement(&(dst), &(src), MAX_NUMIOLINKS) +static inline void __iolinks_complement(iolinkmask_t *dstp, + const iolinkmask_t *srcp, int nbits) +{ + bitmap_complement(dstp->bits, srcp->bits, nbits); +} + +#define iolinks_equal(src1, src2) \ + __iolinks_equal(&(src1), &(src2), MAX_NUMIOLINKS) +static inline int __iolinks_equal(const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + return bitmap_equal(src1p->bits, src2p->bits, nbits); +} + +#define iolinks_intersects(src1, src2) \ + __iolinks_intersects(&(src1), &(src2), MAX_NUMIOLINKS) +static inline int __iolinks_intersects(const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + return bitmap_intersects(src1p->bits, src2p->bits, nbits); +} + +#define iolinks_subset(src1, src2) \ + __iolinks_subset(&(src1), &(src2), MAX_NUMIOLINKS) +static inline int __iolinks_subset(const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + return bitmap_subset(src1p->bits, src2p->bits, nbits); +} + +#define iolinks_empty(src) __iolinks_empty(&(src), MAX_NUMIOLINKS) +static inline int __iolinks_empty(const iolinkmask_t *srcp, int nbits) +{ + return bitmap_empty(srcp->bits, nbits); +} + +#define iolinks_full(iolinkmask) __iolinks_full(&(iolinkmask), MAX_NUMIOLINKS) +static inline int __iolinks_full(const iolinkmask_t *srcp, int nbits) +{ + return bitmap_full(srcp->bits, nbits); +} + +#define iolinks_weight(iolinkmask) __iolinks_weight(&(iolinkmask), MAX_NUMIOLINKS) +static inline int __iolinks_weight(const iolinkmask_t *srcp, int nbits) +{ + return bitmap_weight(srcp->bits, nbits); +} + +#define iolinks_shift_right(dst, src, n) \ + __iolinks_shift_right(&(dst), &(src), (n), MAX_NUMIOLINKS) +static inline void __iolinks_shift_right(iolinkmask_t *dstp, + const iolinkmask_t *srcp, int n, int nbits) +{ + bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); +} + +#define iolinks_shift_left(dst, src, n) \ + __iolinks_shift_left(&(dst), &(src), (n), MAX_NUMIOLINKS) +static inline void __iolinks_shift_left(iolinkmask_t *dstp, + const iolinkmask_t *srcp, int n, int nbits) +{ + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); +} + +/* FIXME: better would be to fix all architectures to never return + > MAX_NUMIOLINKS, then the silly min_ts could be dropped. */ + +#define first_iolink(src) __first_iolink(&(src)) +static inline int __first_iolink(const iolinkmask_t *srcp) +{ + return min_t(int, MAX_NUMIOLINKS, find_first_bit(srcp->bits, MAX_NUMIOLINKS)); +} + +#define next_iolink(n, src) __next_iolink((n), &(src)) +static inline int __next_iolink(int n, const iolinkmask_t *srcp) +{ + return min_t(int, MAX_NUMIOLINKS, find_next_bit(srcp->bits, + MAX_NUMIOLINKS, n+1)); +} + +#define iolinkmask_of_iolink(domain) \ +({ \ + typeof(_unused_iolinkmask_arg_) m; \ + if (sizeof(m) == sizeof(unsigned long)) { \ + m.bits[0] = 1UL<<(domain); \ + } else { \ + iolinks_clear(m); \ + iolink_set((domain), m); \ + } \ + m; \ +}) +#define iolinkmask_of_node_iolink(node, link) \ + iolinkmask_of_iolink(node_iohub_to_domain((node), (link))) + +#define first_unset_iolink(mask) __first_unset_iolink(&(mask)) +static inline int __first_unset_iolink(const iolinkmask_t *maskp) +{ + return min_t(int,MAX_NUMIOLINKS, + find_first_zero_bit(maskp->bits, MAX_NUMIOLINKS)); +} + +#define IOLINK_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMIOLINKS) + +#if MAX_NUMIOLINKS <= BITS_PER_LONG + +#define IOLINK_MASK_ALL \ +((iolinkmask_t) { { \ + [BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = IOLINK_MASK_LAST_WORD \ +} }) + +#else + +#define IOLINK_MASK_ALL \ +((iolinkmask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMIOLINKS)-2] = ~0UL, \ + [BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = IOLINK_MASK_LAST_WORD \ +} }) + +#endif + +#define IOLINK_MASK_NONE \ +((iolinkmask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = 0UL \ +} }) + +#define CHUNKSZ 32 +#define nbits_to_hold_value(val) fls(val) +#define BASEDEC 10 /* fancier cpuset lists input in decimal */ + +/** + * bitmap_scnprintf - convert bitmap to an ASCII hex string. + * @buf: byte buffer into which string is placed + * @buflen: reserved size of @buf, in bytes + * @maskp: pointer to bitmap to convert + * @nmaskbits: size of bitmap, in bits + * + * Exactly @nmaskbits bits are displayed. Hex digits are grouped into + * comma-separated sets of eight digits per set. Returns the number of + * characters which were written to *buf, excluding the trailing \0. + */ +static int bitmap_scnprintf(char *buf, unsigned int buflen, + const unsigned long *maskp, int nmaskbits) +{ + int i, word, bit, len = 0; + unsigned long val; + const char *sep = ""; + int chunksz; + u32 chunkmask; + + chunksz = nmaskbits & (CHUNKSZ - 1); + if (chunksz == 0) + chunksz = CHUNKSZ; + + i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ; + for (; i >= 0; i -= CHUNKSZ) { + chunkmask = ((1ULL << chunksz) - 1); + word = i / BITS_PER_LONG; + bit = i % BITS_PER_LONG; + val = (maskp[word] >> bit) & chunkmask; + len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep, + (chunksz+3)/4, val); + chunksz = CHUNKSZ; + sep = ","; + } + return len; +} +#undef CHUNKSZ + +/* + * bscnl_emit(buf, buflen, rbot, rtop, bp) + * + * Helper routine for bitmap_scnlistprintf(). Write decimal number + * or range to buf, suppressing output past buf+buflen, with optional + * comma-prefix. Return len of what was written to *buf, excluding the + * trailing \0. + */ +static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) +{ + if (len > 0) + len += scnprintf(buf + len, buflen - len, ","); + if (rbot == rtop) + len += scnprintf(buf + len, buflen - len, "%d", rbot); + else + len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop); + return len; +} + +/** + * bitmap_scnlistprintf - convert bitmap to list format ASCII string + * @buf: byte buffer into which string is placed + * @buflen: reserved size of @buf, in bytes + * @maskp: pointer to bitmap to convert + * @nmaskbits: size of bitmap, in bits + * + * Output format is a comma-separated list of decimal numbers and + * ranges. Consecutively set bits are shown as two hyphen-separated + * decimal numbers, the smallest and largest bit numbers set in + * the range. Output format is compatible with the format + * accepted as input by bitmap_parselist(). + * + * The return value is the number of characters which were written to *buf + * excluding the trailing '\0', as per ISO C99's scnprintf. + */ +static int bitmap_scnlistprintf(char *buf, unsigned int buflen, + const unsigned long *maskp, int nmaskbits) +{ + int len = 0; + /* current bit is 'cur', most recently seen range is [rbot, rtop] */ + int cur, rbot, rtop; + + if (buflen == 0) + return 0; + buf[0] = 0; + + cur = find_first_bit(maskp, nmaskbits); + rbot = cur; + while (cur < nmaskbits) { + rtop = cur; + cur = find_next_bit(maskp, nmaskbits, cur+1); + if (cur >= nmaskbits || cur > rtop + 1) { + len = bscnl_emit(buf, buflen, rbot, rtop, len); + rbot = cur; + } + } + return len; +} + +#define iolinks_addr(src) ((src).bits) + +#define iolinkmask_scnprintf(buf, len, src) \ + __iolinkmask_scnprintf((buf), (len), &(src), MAX_NUMIOLINKS) +static inline int __iolinkmask_scnprintf(char *buf, int len, + const iolinkmask_t *srcp, int nbits) +{ + return bitmap_scnprintf(buf, len, srcp->bits, nbits); +} + +#define iolinkmask_parse(ubuf, ulen, dst) \ + __iolinkmask_parse((ubuf), (ulen), &(dst), MAX_NUMIOLINKS) +static inline int __iolinkmask_parse(const char __user *buf, int len, + iolinkmask_t *dstp, int nbits) +{ + return bitmap_parse(buf, len, dstp->bits, nbits); +} + +#define iolinklist_scnprintf(buf, len, src) \ + __iolinklist_scnprintf((buf), (len), &(src), MAX_NUMIOLINKS) +static inline int __iolinklist_scnprintf(char *buf, int len, + const iolinkmask_t *srcp, int nbits) +{ + return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); +} + +#define iolinklist_parse(buf, dst) __iolinklist_parse((buf), &(dst), MAX_NUMIOLINKS) +static inline int __iolinklist_parse(const char *buf, iolinkmask_t *dstp, int nbits) +{ + return bitmap_parselist(buf, dstp->bits, nbits); +} + +#if defined(CONFIG_IOHUB_DOMAINS) && MAX_NUMIOLINKS > 1 +#define for_each_iolink_mask(domain, mask) \ + for ((domain) = first_iolink(mask); \ + (domain) < MAX_NUMIOLINKS; \ + (domain) = next_iolink((domain), (mask))) +#define for_each_node_iolink_mask(domain, node, link, mask) \ + for ((domain) = first_iolink(mask), \ + (node) = iolink_domain_to_node((domain)), \ + (link) = iolink_domain_to_link((domain)); \ + (domain) < MAX_NUMIOLINKS; \ + (domain) = next_iolink((domain), (mask)), \ + (node) = iolink_domain_to_node((domain)), \ + (link) = iolink_domain_to_link((domain))) +#else /* MAX_NUMIOLINKS == 1 */ +#define for_each_iolink_mask(domain, mask) \ + if (HAS_MACHINE_E2K_IOHUB) \ + for ((domain) = 0; (domain) < 1; (domain)++) +#define for_each_node_iolink_mask(domain, node, link, mask) \ + if (HAS_MACHINE_E2K_IOHUB) \ + for ((domain) = 0, (node) = 0, (link) = 0; \ + (domain) < 1; (domain)++) +#endif /* MAX_NUMIOLINKS */ + +/* + * The following particular system iolinkmasks and operations + * on them manage all possible and online iolinks. + */ + +#if defined(CONFIG_IOHUB_DOMAINS) && MAX_NUMIOLINKS > 1 +extern int iolinks_num; +extern iolinkmask_t iolink_iohub_map; +extern iolinkmask_t iolink_online_iohub_map; +extern int iolink_iohub_num; +extern int iolink_online_iohub_num; +extern iolinkmask_t iolink_rdma_map; +extern iolinkmask_t iolink_online_rdma_map; +extern int iolink_rdma_num; +extern int iolink_online_rdma_num; + +#define num_online_iolinks() (num_online_iohubs() + num_online_rdmas()) +#define num_possible_iolinks() iolinks_num +#define num_online_iohubs() iolink_online_iohub_num +#define num_possible_iohubs() iolink_iohub_num +#define num_online_rdmas() iolink_online_rdma_num +#define num_possible_rdmas() iolink_rdma_num +#define iolink_online(domain) (iohub_online(domain) || rdma_online(domain)) +#define iolink_possible(domain) (iohab_possible(domain) || \ + rdma_possible(domain)) +#define node_iolink_online(node, link) \ + iolink_online(node_iolink_to_domain(node, link)) +#define node_iolink_possible(node, link) \ + iolink_possible(node_iolink_to_domain(node, link)) +#define iohub_online(domain) iolink_isset((domain), iolink_online_iohub_map) +#define iohab_possible(domain) iolink_isset((domain), iolink_iohub_map) +#define node_iohub_online(node, link) \ + iohub_online(node_iohub_to_domain(node, link)) +#define node_iohub_possible(node, link) \ + iohab_possible(node_iohub_to_domain(node, link)) +#define first_iohub_online() first_iolink(iolink_online_iohub_map) +#define rdma_online(domain) iolink_isset((domain), iolink_online_rdma_map) +#define rdma_possible(domain) iolink_isset((domain), iolink_rdma_map) +#define node_rdma_online(node, link) \ + rdma_online(node_rdma_to_domain(node, link)) +#define node_rdma_possible(node, link) \ + rdma_possible(node_rdma_to_domain(node, link)) +#else +#define iolinks_num 1 +#define iolink_iohub_num 1 +#define num_online_iolinks() 1 +#define num_possible_iolinks() 1 +#define num_online_iohubs() 1 +#define num_possible_iohubs() 1 +#define num_online_rdmas() 0 +#define num_possible_rdmas() 0 +#define iolink_online(domain) ((domain) == 0) +#define iolink_possible(domain) ((domain) == 0) +#define node_iolink_online(node, link) \ + ((node) == 0 && (link) == 0) +#define node_iolink_possible(node, link) \ + ((node) == 0 && (link) == 0) +#define iohub_online(domain) ((domain) == 0) +#define iohab_possible(domain) ((domain) == 0) +#define node_iohub_online(node, link) \ + ((node) == 0 && (link) == 0) +#define node_iohub_possible(node, link) \ + ((node) == 0 && (link) == 0) +#define first_iohub_online() 0 +#define rdma_online(domain) 0 +#define rdma_possible(domain) 0 +#define node_rdma_online(node, link) 0 +#define node_rdma_possible(node, link) 0 +#endif + +#define iohub_set_online(domain) \ + set_bit((domain), iolink_online_iohub_map.bits) +#define iohub_set_offline(domain) \ + clear_bit((domain), iolink_online_iohub_map.bits) +#define node_iohub_set_online(node, link) \ + iohub_set_online(node_iohub_to_domain((node), (link)) +#define node_iohub_set_offline(node, link) \ + iohub_set_offline(node_iohub_to_domain((node), (link)) +#define rdma_set_online(domain) \ + set_bit((domain), iolink_online_rdma_map.bits) +#define rdma_set_offline(domain) \ + clear_bit((domain), iolink_online_rdma_map.bits) +#define node_rdma_set_online(node, link) \ + rdma_set_online(node_rdma_to_domain((node), (link)) +#define node_rdma_set_offline(node, link) \ + rdma_set_offline(node_rdma_to_domain((node), (link)) + +#define for_each_iohub(domain) \ + for_each_iolink_mask((domain), iolink_iohub_map) +#define for_each_online_iohub(domain) \ + for_each_iolink_mask((domain), iolink_online_iohub_map) +#define for_each_node_iohub(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_iohub_map) +#define for_each_online_node_iohub(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_online_iohub_map) +#define for_each_rdma(domain) \ + for_each_iolink_mask((domain), iolink_rdma_map) +#define for_each_online_rdma(domain) \ + for_each_iolink_mask((domain), iolink_online_rdma_map) +#define for_each_node_rdma(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_rdma_map) +#define for_each_online_node_rdma(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_online_rdma_map) + +#endif /* __ASM_L_IOLINKMASK_H */ diff --git a/arch/l/include/asm/ipi.h b/arch/l/include/asm/ipi.h new file mode 100644 index 000000000000..9209adce018e --- /dev/null +++ b/arch/l/include/asm/ipi.h @@ -0,0 +1,164 @@ +#ifndef _ASM_L_IPI_H +#define _ASM_L_IPI_H + +#ifdef CONFIG_L_LOCAL_APIC + +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC InterProcessor Interrupt code. + * + * Moved to include file by James Cleverdon from + * arch/x86-64/kernel/smp.c + * + * Copyrights from kernel/smp.c: + * + * (c) 1995 Alan Cox, Building #3 + * (c) 1998-99, 2000 Ingo Molnar + * (c) 2002,2003 Andi Kleen, SuSE Labs. + * Subject to the GNU Public License, v.2 + */ + +#include +#include +#include + +/* + * the following functions deal with sending IPIs between CPUs. + * + * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. + */ + +static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, + unsigned int dest) +{ + unsigned int icr = shortcut | dest; + + switch (vector) { + default: + icr |= APIC_DM_FIXED | vector; + break; + case NMI_VECTOR: + icr |= APIC_DM_NMI; + break; + } + return icr; +} + +static inline int __prepare_ICR2(unsigned int mask) +{ + return SET_APIC_DEST_FIELD(mask); +} + +static inline void __xapic_wait_icr_idle(void) +{ + while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) + cpu_relax(); +} + +static inline void +__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) +{ + /* + * Subtle. In the case of the 'never do double writes' workaround + * we have to lock out interrupts to be safe. As we don't care + * of the value read we use an atomic rmw access to avoid costly + * cli/sti. Otherwise we use an even cheaper single atomic write + * to the APIC. + */ + unsigned int cfg; + + /* + * Wait for idle. + */ + __xapic_wait_icr_idle(); + + /* + * No need to touch the target chip field + */ + cfg = __prepare_ICR(shortcut, vector, dest); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + native_apic_mem_write(APIC_ICR, cfg); +} + +/* + * This is used to send an IPI with no shorthand notation (the destination is + * specified in bits 56 to 63 of the ICR). + */ +static inline void + __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) +{ + unsigned long cfg; + + /* + * Wait for idle. + */ + if (unlikely(vector == NMI_VECTOR)) + safe_apic_wait_icr_idle(); + else + __xapic_wait_icr_idle(); + + /* + * prepare target chip field + */ + cfg = __prepare_ICR2(mask); + native_apic_mem_write(APIC_ICR2, cfg); + + /* + * program the ICR + */ + cfg = __prepare_ICR(0, vector, dest); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + native_apic_mem_write(APIC_ICR, cfg); +} + +extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, + int vector); +#if 0 +extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector); +#endif + +/* Avoid include hell */ +#define NMI_VECTOR 0x02 + +extern int no_broadcast; + +static inline void __default_local_send_IPI_allbutself(int vector) +{ + if (no_broadcast || vector == NMI_VECTOR) + apic->send_IPI_mask_allbutself(cpu_online_mask, vector); + else + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical); +} + +static inline void __default_local_send_IPI_all(int vector) +{ + if (no_broadcast || vector == NMI_VECTOR) + apic->send_IPI_mask(cpu_online_mask, vector); + else + __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); +} + +#ifdef CONFIG_L_X86_32 +extern void default_send_IPI_mask_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_allbutself(int vector); +extern void default_send_IPI_all(int vector); +extern void default_send_IPI_self(int vector); +#endif + +#endif + +#endif /* _ASM_L_IPI_H */ diff --git a/arch/l/include/asm/irq_numbers.h b/arch/l/include/asm/irq_numbers.h new file mode 100644 index 000000000000..a2f34e381185 --- /dev/null +++ b/arch/l/include/asm/irq_numbers.h @@ -0,0 +1,9 @@ +#ifndef _ASM_L_IRQ_NUMBERS_H +#define _ASM_L_IRQ_NUMBERS_H + +#include + +/* Number of additional (chained) interrupts */ +#define I2C_SPI_IRQS_NUM 2 + +#endif diff --git a/arch/l/include/asm/irq_remapping.h b/arch/l/include/asm/irq_remapping.h new file mode 100644 index 000000000000..d0d264e39ed8 --- /dev/null +++ b/arch/l/include/asm/irq_remapping.h @@ -0,0 +1,6 @@ +#ifndef _ASM_L_IRQ_REMAPPING_H +#define _ASM_L_IRQ_REMAPPING_H + +#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) + +#endif /* _ASM_L_IRQ_REMAPPING_H */ diff --git a/arch/l/include/asm/irq_vectors.h b/arch/l/include/asm/irq_vectors.h new file mode 100644 index 000000000000..2141f821282b --- /dev/null +++ b/arch/l/include/asm/irq_vectors.h @@ -0,0 +1,201 @@ +#ifndef _ASM_L_IRQ_VECTORS_H +#define _ASM_L_IRQ_VECTORS_H + +#include + +/* + * Linux IRQ vector layout. + * + * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can + * be defined by Linux. They are used as a jump table by the CPU when a + * given vector is triggered - by a CPU-external, CPU-internal or + * software-triggered event. + * + * Linux sets the kernel code address each entry jumps to early during + * bootup, and never changes them. This is the general layout of the + * IDT entries: + * + * Vectors 0 ... 31 : system traps and exceptions - hardcoded events + * Vectors 32 ... 127 : device interrupts + * Vector 128 : legacy int80 syscall interface + * Vectors 129 ... 237 : device interrupts + * Vectors 238 ... 255 : special interrupts + * + * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. + * + * This file enumerates the exact layout of them: + */ + +#define NMI_VECTOR 0x02 +#define MCE_VECTOR 0x12 + +/* + * IDT vectors usable for external interrupt sources start + * at 0x20: + */ +#define FIRST_EXTERNAL_VECTOR 0x20 +#if 0 +/* + * We start allocating at 0x21 to spread out vectors evenly between + * priority levels. (0x80 is the syscall vector) + */ +#define VECTOR_OFFSET_START 1 +#else +#define VECTOR_OFFSET_START 0 +#endif + +#if 0 +#ifdef CONFIG_X86_32 +# define SYSCALL_VECTOR 0x80 +# define IA32_SYSCALL_VECTOR 0x80 +#else +# define IA32_SYSCALL_VECTOR 0x80 +#endif +#endif + +/* + * Reserve the lowest usable priority level 0x20 - 0x2f for triggering + * cleanup after irq migration. + */ +#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR + +/* + * Vectors 0x30-0x3f are used for ISA interrupts. + */ +#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) + +#define IRQ1_VECTOR (IRQ0_VECTOR + 1) +#define IRQ2_VECTOR (IRQ0_VECTOR + 2) +#define IRQ3_VECTOR (IRQ0_VECTOR + 3) +#define IRQ4_VECTOR (IRQ0_VECTOR + 4) +#define IRQ5_VECTOR (IRQ0_VECTOR + 5) +#define IRQ6_VECTOR (IRQ0_VECTOR + 6) +#define IRQ7_VECTOR (IRQ0_VECTOR + 7) +#define IRQ8_VECTOR (IRQ0_VECTOR + 8) +#define IRQ9_VECTOR (IRQ0_VECTOR + 9) +#define IRQ10_VECTOR (IRQ0_VECTOR + 10) +#define IRQ11_VECTOR (IRQ0_VECTOR + 11) +#define IRQ12_VECTOR (IRQ0_VECTOR + 12) +#define IRQ13_VECTOR (IRQ0_VECTOR + 13) +#define IRQ14_VECTOR (IRQ0_VECTOR + 14) +#define IRQ15_VECTOR (IRQ0_VECTOR + 15) + +/* + * Special IRQ vectors used by the SMP architecture, 0xf0-0xff + * + * some of the following vectors are 'rare', they are merged + * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. + * TLB, reschedule and local APIC vectors are performance-critical. + */ + +#define SPURIOUS_APIC_VECTOR 0xff +/* + * Sanity check + */ +#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) +# error SPURIOUS_APIC_VECTOR definition error +#endif + +#if 0 +#define ERROR_APIC_VECTOR 0xfe +#define RESCHEDULE_VECTOR 0xfd +#define CALL_FUNCTION_VECTOR 0xfc +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb +#define THERMAL_APIC_VECTOR 0xfa +#define THRESHOLD_APIC_VECTOR 0xf9 +#define REBOOT_VECTOR 0xf8 + +/* f0-f7 used for spreading out TLB flushes: */ +#define INVALIDATE_TLB_VECTOR_END 0xf7 +#define INVALIDATE_TLB_VECTOR_START 0xf0 +#define NUM_INVALIDATE_TLB_VECTORS 8 + +/* + * Local APIC timer IRQ vector is on a different priority level, + * to work around the 'lost local interrupt if more than 2 IRQ + * sources per level' errata. + */ +#define LOCAL_TIMER_VECTOR 0xef + +/* + * Generic system vector for platform specific use + */ +#define X86_PLATFORM_IPI_VECTOR 0xed + +/* + * Performance monitoring pending work vector: + */ +#define LOCAL_PENDING_VECTOR 0xec + +#define UV_BAU_MESSAGE 0xea + +/* + * Self IPI vector for machine checks + */ +#define MCE_SELF_VECTOR 0xeb +#endif + +/* + * First APIC vector available to drivers: (vectors 0x30-0xee) we + * start at 0x31(0x41) to spread out vectors evenly between priority + * levels. (0x80 is the syscall vector) + */ +#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) + +#ifdef CONFIG_EPIC +#define NR_VECTORS 1024 +#else +#define NR_VECTORS 256 +#endif +#define NR_VECTORS_APIC 256 + +#define FPU_IRQ 13 + +#define FIRST_VM86_IRQ 3 +#define LAST_VM86_IRQ 15 + +#ifndef __ASSEMBLY__ +static inline int invalid_vm86_irq(int irq) +{ + return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; +} +#endif + +/* + * Size the maximum number of interrupts. + * + * If the irq_desc[] array has a sparse layout, we can size things + * generously - it scales up linearly with the maximum number of CPUs, + * and the maximum number of IO-APICs, whichever is higher. + * + * In other cases we size more conservatively, to not create too large + * static arrays. + */ + +#if 0 +#define NR_IRQS_LEGACY 16 +#else +#define NR_IRQS_LEGACY 0 +#endif + +#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS ) +#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) + +#ifdef CONFIG_L_IO_APIC +# ifdef CONFIG_SPARSE_IRQ +# define NR_IRQS \ + (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ + (NR_VECTORS + CPU_VECTOR_LIMIT) : \ + (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) +# else +# if NR_CPUS < MAX_IO_APICS +# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) +# else +# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) +# endif +# endif +#else /* !CONFIG_L_IO_APIC: */ +# define NR_IRQS NR_IRQS_LEGACY +#endif + +#endif /* _ASM_L_IRQ_VECTORS_H */ diff --git a/arch/l/include/asm/irq_work.h b/arch/l/include/asm/irq_work.h new file mode 100644 index 000000000000..5ecb04c02562 --- /dev/null +++ b/arch/l/include/asm/irq_work.h @@ -0,0 +1,16 @@ +#ifndef _ASM_L_IRQ_WORK_H +#define _ASM_L_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + //TODO only arm does it this way! (see bug 120742) +#ifdef CONFIG_SMP + return true; +#else + return false; +#endif +} + +extern void arch_irq_work_raise(void); + +#endif /* _ASM_L_IRQ_WORK_H */ diff --git a/arch/l/include/asm/irqdomain.h b/arch/l/include/asm/irqdomain.h new file mode 100644 index 000000000000..d26075b52885 --- /dev/null +++ b/arch/l/include/asm/irqdomain.h @@ -0,0 +1,63 @@ +#ifndef _ASM_IRQDOMAIN_H +#define _ASM_IRQDOMAIN_H + +#include +#include + +#ifdef CONFIG_X86_LOCAL_APIC +enum { + /* Allocate contiguous CPU vectors */ + X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 0x1, +}; + +extern struct irq_domain *x86_vector_domain; + +extern void init_irq_alloc_info(struct irq_alloc_info *info, + const struct cpumask *mask); +extern void copy_irq_alloc_info(struct irq_alloc_info *dst, + struct irq_alloc_info *src); +#endif /* CONFIG_X86_LOCAL_APIC */ + +#ifdef CONFIG_X86_IO_APIC +struct device_node; +struct irq_data; + +enum ioapic_domain_type { + IOAPIC_DOMAIN_INVALID, + IOAPIC_DOMAIN_LEGACY, + IOAPIC_DOMAIN_STRICT, + IOAPIC_DOMAIN_DYNAMIC, +}; + +struct ioapic_domain_cfg { + enum ioapic_domain_type type; + const struct irq_domain_ops *ops; + struct device_node *dev; +}; + +extern const struct irq_domain_ops mp_ioapic_irqdomain_ops; + +extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg); +extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs); +extern void mp_irqdomain_activate(struct irq_domain *domain, + struct irq_data *irq_data); +extern void mp_irqdomain_deactivate(struct irq_domain *domain, + struct irq_data *irq_data); +extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain); +#endif /* CONFIG_X86_IO_APIC */ + +#ifdef CONFIG_PCI_MSI +extern void arch_init_msi_domain(struct irq_domain *domain); +#else +static inline void arch_init_msi_domain(struct irq_domain *domain) { } +#endif + +#ifdef CONFIG_HT_IRQ +extern void arch_init_htirq_domain(struct irq_domain *domain); +#else +static inline void arch_init_htirq_domain(struct irq_domain *domain) { } +#endif + +#endif diff --git a/arch/l/include/asm/l-uncached.h b/arch/l/include/asm/l-uncached.h new file mode 100644 index 000000000000..5ab2e6cb0861 --- /dev/null +++ b/arch/l/include/asm/l-uncached.h @@ -0,0 +1,11 @@ +#ifndef _L_UNCACHED_H +#define _L_UNCACHED_H + +void *l_alloc_uncached(struct device *dev, size_t size, + phys_addr_t *phys_addr, gfp_t gfp); +void l_free_uncached(struct device *dev, size_t size, void *cpu_addr); + +int l_init_uncached_pool(void); +void l_destroy_uncached_pool(void); + +#endif /* !(_L_UNCACHED_H) */ diff --git a/arch/l/include/asm/l_ide.h b/arch/l/include/asm/l_ide.h new file mode 100644 index 000000000000..5b96ab33f99b --- /dev/null +++ b/arch/l/include/asm/l_ide.h @@ -0,0 +1,13 @@ +#ifndef _L_IDE_H_ +#define _L_IDE_H_ + +#include + +static void l_init_iops (ide_hwif_t *hwif) +{ +} + +#define L_FORCE_NATIVE_MODE 1 +#define L_DEAULT_IDE_DMA_MODE ATA_UDMA5 /* default max UDMA capable */ + +#endif /*_L_IDE_H_*/ diff --git a/arch/l/include/asm/l_pmc.h b/arch/l/include/asm/l_pmc.h new file mode 100644 index 000000000000..f9006a9d82c2 --- /dev/null +++ b/arch/l/include/asm/l_pmc.h @@ -0,0 +1,140 @@ +#ifndef __L_ASM_PMC_H__ +#define __L_ASM_PMC_H__ + +#include +#include +#include +#include + +#define PMC_L_MAX_IDLE_STATES 4 + +#define PMC_L_TEMP_RG_CUR_REG_0 0x20 +#define PMC_L_TEMP_RG_CUR_REG_1 0x24 +#define PMC_L_GPE0_STS_REG 0x28 +#define PMC_L_GPE0_STS_CLR 0xf +#define PMC_L_GPE0_EN_REG 0x2c +#define PMC_L_TEMP_RG0_REG 0x30 +#define PMC_L_TEMP_RG1_REG 0x34 +#define PMC_L_TEMP_RG2_REG 0x38 +#define PMC_L_TEMP_RG3_REG 0x3c +#define PMC_L_TEMP_RG_CUR_REG_2 0x40 +#define PMC_L_TEMP_RGX_FALL (0x0 << 12) +#define PMC_L_TEMP_RGX_RISE (0x3 << 12) +#define PMC_L_PC_S0_REG 0x100 +#define PMC_L_PC_S1_REG 0x104 + +#define PMC_L_COFVID_3D_STATUS_REG 0x140 +#define PMC_L_P_STATE_3D_CNTRL_REG 0x148 +#define PMC_L_P_STATE_3D_STATUS_REG 0x14c +#define PMC_L_P_STATE_3D_VALUE_0_REG 0x150 +#define PMC_L_P_STATE_3D_VALUE_1_REG 0x154 +#define PMC_L_P_STATE_3D_VALUE_2_REG 0x158 +#define PMC_L_P_STATE_3D_VALUE_3_REG 0x15c +#define PMC_L_C_STATE_3D_REG 0x160 +#define PMC_L_2D_FC_REG 0x164 + +#define PMC_L_REGS_AREA_SIZE 0x168 + +/* Bits in PMC registers: */ +/* P_State_value_X (RW): */ +#define PMC_L_P_STATE_VALUE_VID_MASK 0x0000fe00 +#define PMC_L_P_STATE_VALUE_VID_SHIFT 9 +#define PMC_L_P_STATE_VALUE_DID_MASK 0x000001f0 +#define PMC_L_P_STATE_VALUE_DID_SHIFT 4 +#define PMC_L_P_STATE_VALUE_FID_MASK 0x0000000f +#define PMC_L_P_STATE_VALUE_FID_SHIFT 0 + +/* P_State_Cntrl (RW): */ +#define PMC_L_P_STATE_CNTRL_MASK 0x3 +#define PMC_L_P_STATE_CNTRL_SHIFT 0 +#define PMC_L_P_STATE_CNTRL_P0_VAL 0x0 +#define PMC_L_P_STATE_CNTRL_P1_VAL 0x1 +#define PMC_L_P_STATE_CNTRL_P2_VAL 0x2 +#define PMC_L_P_STATE_CNTRL_P3_VAL 0x3 + +/* P_State_status (RO): */ +#define PMC_L_P_STATE_STATUS_MASK 0x3 +#define PMC_L_P_STATE_STATUS_SHIFT 0 + +/* P_State_3D_Cntrl (RW): */ +#define PMC_L_P_STATE_3D_CNTRL_MASK 0x3 +#define PMC_L_P_STATE_3D_CNTRL_SHIFT 0 +#define PMC_L_P_STATE_3D_CNTRL_P0_VAL 0x0 +#define PMC_L_P_STATE_3D_CNTRL_P1_VAL 0x1 +#define PMC_L_P_STATE_3D_CNTRL_P2_VAL 0x2 +#define PMC_L_P_STATE_3D_CNTRL_P3_VAL 0x3 + +/* COVFID_status (contains RW, Status, RM, RO bits): */ +#define PMC_L_COVFID_STATUS_PMCEN_VAL 0x0000000000000001 /* RW - 0 Bit */ +#define PMC_L_COVFID_STATUS_RMWEN_VAL 0x4000000000000000 /* RM - 62 Bit */ +#define PMC_L_COVFID_STATUS_VMAX_MASK 0x3f80000000000000 /* RM - 61:55 Bits */ +#define PMC_L_COVFID_STATUS_VMAX_SHIFT 55 +#define PMC_L_COVFID_STATUS_VMIN_MASK 0x007f000000000000 /* RM - 54:48 Bits */ +#define PMC_L_COVFID_STATUS_VMIN_SHIFT 48 +#define PMC_L_COVFID_STATUS_FMAX_MASK 0x0000ff0000000000 /* RM - 26:20 Bits */ +#define PMC_L_COVFID_STATUS_FMAX_SHIFT 40 +#define PMC_L_COVFID_STATUS_TRANS_VAL 0x0000000000000002 /* RO - 1 Bit */ +#define PMC_L_COVFID_STATUS_PNUM_MASK 0x000000000000000c /* RO - 3:2 Bits */ +#define PMC_L_COVFID_STATUS_PNUM_SHIFT 2 +#define PMC_L_COVFID_STATUS_VID_MASK 0x000000000003f000 /* RO - 18:12 Bits */ +#define PMC_L_COVFID_STATUS_VID_SHIFT 12 +#define PMC_L_COVFID_STATUS_FID_MASK 0x0000000000000ff0 /* RO - 11:4 Bits */ +#define PMC_L_COVFID_STATUS_FID_SHIFT 4 + +#define PMC_L_COVFID_RM_MASK (PMC_L_COVFID_STATUS_VMAX_MASK | \ + PMC_L_COVFID_STATUS_VMIN_MASK | \ + PMC_L_COVFID_STATUS_FMAX_MASK) + +#define PMC_L_MAX_PSTATES 4 +#define PMC_L_PRECISION 10 +#define MAX_NUM_PMCS 1 +#define SPMC_TEMP_BAD_VALUE -1000 + +/* The driver supports 1 passive trip point and 1 critical trip point */ +enum l_pmc_thermal_trip { + LPMC_TRIP_PASSIVE, + LPMC_TRIP_CRITICAL, + LPMC_TRIP_NUM, +}; + +#define LPMC_TRIP_POINTS_MSK ((1 << LPMC_TRIP_NUM) - 1) + +struct l_pmc { + unsigned char type; + unsigned char version; + void __iomem *cntrl_base; + void __iomem *data_base; + unsigned long vrange; /* VMAX, VMIN, FMAX */ + unsigned int data_size; + unsigned int p_state[PMC_L_MAX_PSTATES]; /* VID, + * DID, + * FID + */ + unsigned int freq; /* Frequency in KHz */ + struct pci_dev *pdev; + struct platform_device *i2c_chan; + struct thermal_zone_device *thermal; + enum thermal_device_mode thermal_mode; + int trip_temp[LPMC_TRIP_NUM]; + int trip_hyst[LPMC_TRIP_NUM]; + raw_spinlock_t thermal_lock; + struct thermal_cooling_device *cdev; + struct cpufreq_policy *policy; +}; + +extern struct l_pmc l_pmc[MAX_NUM_PMCS]; + +#if defined(CONFIG_L_PMC_MODULE) || defined(CONFIG_L_PMC) || defined(CONFIG_S2_PMC) +extern int spmc_get_temp_cur0(void); +int pmc_l_gpufreq_set_scale(unsigned char scale); +int pmc_l_gpufreq_get_scale(void); +int pmc_l_gpufreq_get_frequency(void); +extern unsigned int load_threshold; +#else +int spmc_get_temp_cur0(void) { return SPMC_TEMP_BAD_VALUE; } +#endif /* CONFIG_L_PMC || CONFIG_S2_PMC */ + + + +#endif /* __L_ASM_PMC_H__ */ + diff --git a/arch/l/include/asm/l_spmc.h b/arch/l/include/asm/l_spmc.h new file mode 100644 index 000000000000..22327e9e1a26 --- /dev/null +++ b/arch/l/include/asm/l_spmc.h @@ -0,0 +1,14 @@ +#ifndef __L_ASM_SPMC_H__ +#define __L_ASM_SPMC_H__ + +#ifdef CONFIG_ACPI_L_SPMC +extern void do_spmc_halt(void); +#else +static inline void do_spmc_halt(void) { + printk(KERN_ERR "Board does not use KPI-2: SPMC is not present.\n"); + return; +} +#endif + +#endif /* __L_ASM_SPMC_H__ */ + diff --git a/arch/l/include/asm/l_timer.h b/arch/l/include/asm/l_timer.h new file mode 100644 index 000000000000..6afbc5286e7c --- /dev/null +++ b/arch/l/include/asm/l_timer.h @@ -0,0 +1,104 @@ +#ifndef _L_ASM_L_TIMER_H +#define _L_ASM_L_TIMER_H + +#include + +/* + * Elbrus timer + */ + +extern struct clock_event_device *global_clock_event; +extern int get_lt_timer(void); +extern u32 lt_read(void); +extern struct clocksource lt_cs; + +/* New timer registers */ +#define PIT_COUNTER_LIMIT 0x00 +#define PIT_COUNTER_START_VALUE 0x04 +#define PIT_COUNTER 0x08 +#define PIT_COUNTER_CONTROL 0x0c +#define PIT_WD_COUNTER 0x10 +#define PIT_WD_COUNTER_LOW PIT_WD_COUNTER +#define PIT_WD_COUNTER_HIGH (PIT_WD_COUNTER_LOW + 0x04) +#define PIT_WD_LIMIT 0x18 +#define PIT_POWER_COUNTER 0x1c +#define PIT_POWER_COUNTER_LOW PIT_POWER_COUNTER +#define PIT_POWER_COUNTER_HIGH (PIT_POWER_COUNTER_LOW + 0x04) +#define PIT_WD_CONTROL 0x24 +#define PIT_RESET_COUNTER 0x28 +#define PIT_RESET_COUNTER_LOW PIT_RESET_COUNTER +#define PIT_RESET_COUNTER_HIGH (PIT_RESET_COUNTER_LOW + 0x04) + +typedef struct lt_regs { + u32 counter_limit; /* timer counter limit value */ + u32 counter_start; /* start value of counter */ + u32 counter; /* timer counter */ + u32 counter_cntr; /* timer control register */ + u32 wd_counter; /* watchdog counter */ + u32 wd_prescaler; /* watchdog prescaler */ + u32 wd_limit; /* watchdog limit */ + u32 power_counter_lo; /* power counter low bits */ + u32 power_counter_hi; /* power counter high bits */ + u32 wd_control; /* watchdog control register */ + u32 reset_counter_lo; /* reset counter low bits */ + u32 reset_counter_hi; /* reset counter low bits */ +} lt_regs_t; + +extern unsigned long long lt_phys_base; +extern lt_regs_t *lt_regs; +extern long lt_clock_rate; + +extern void setup_lt_timer(void); +extern int __init init_lt_clocksource(void); + +/* counters registers structure */ +#define LT_COUNTER_SHIFT 9 /* [30: 9] counters value */ +#define LT_COUNTER_LIMIT_SHIFT 31 /* [31] Limit bit */ +#define LT_COUNTER_LIMIT_BIT (1 << LT_COUNTER_LIMIT_SHIFT) + +#define LT_WRITE_COUNTER_VALUE(count) ((count) << LT_COUNTER_SHIFT) +#define LT_READ_COUNTER_VALUE(count) ((count) >> LT_COUNTER_SHIFT) +#define LT_NSEC_PER_COUNTER_INCR 100 /* 10 MHz == 100 nunosec */ + +/* counter control register structure */ +#define LT_COUNTER_CNTR_START 0x00000001 /* start/stop timer */ +#define LT_COUNTER_CNTR_INVERTL 0x00000002 /* invert limit bit */ +#define LT_COUNTER_CNTR_LINIT 0x00000004 /* Limit bit initial state */ + /* 1 - limit bit set to 1 */ + +#define LT_COUNTER_CNTR_LAUNCH (LT_COUNTER_CNTR_START) +#define LT_INVERT_COUNTER_CNTR_LAUNCH (LT_COUNTER_CNTR_LAUNCH | \ + LT_COUNTER_CNTR_INVERTL | \ + LT_COUNTER_CNTR_LINIT) +#define LT_COUNTER_CNTR_STOP (0) + +#define WD_CLOCK_TICK_RATE 10000000L +#define WD_LATCH(tick_rate) (((tick_rate) + HZ/2) / HZ) +#define WD_LIMIT_SHIFT 12 +#define WD_WRITE_COUNTER_VALUE(count) (count) +#define WD_READ_COUNTER_VALUE(count) ((count) << WD_LIMIT_SHIFT) +#define WD_SET_COUNTER_VAL(sek) \ + (WD_WRITE_COUNTER_VALUE(WD_CLOCK_TICK_RATE * (sek))) + +#define WD_INTR_MODE 0x1 +#define WD_ENABLE 0x2 +#define WD_EVENT 0x4 + +#define WD_COUNTER_BASE 0x10 + +/* System timer Registers (structure see asm/l_timer_regs.h) */ + +#define COUNTER_LIMIT 0x00 +#define COUNTER_START_VALUE 0x04 +#define L_COUNTER 0x08 +#define COUNTER_CONTROL 0x0c +#define WD_COUNTER_L 0x10 +#define WD_COUNTER_H 0x14 +#define WD_LIMIT 0x18 +#define POWER_COUNTER_L 0x1c +#define POWER_COUNTER_H 0x20 +#define WD_CONTROL 0x24 +#define RESET_COUNTER_L 0x28 +#define RESET_COUNTER_H 0x2c + +#endif /* _L_ASM_L_TIMER_H */ diff --git a/arch/l/include/asm/l_timer_regs.h b/arch/l/include/asm/l_timer_regs.h new file mode 100644 index 000000000000..1846cbd7ab78 --- /dev/null +++ b/arch/l/include/asm/l_timer_regs.h @@ -0,0 +1,119 @@ +#ifndef _L_ASM_L_TIMER_REGS_H +#define _L_ASM_L_TIMER_REGS_H + +#include + +/* + * Elbrus System timer Registers + */ + +#define COUNTER_LIMIT 0x00 +typedef struct counter_limit_fields { + u32 unused : 9; /* [8:0] */ + u32 c_l : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_limit_fields_t; +typedef union counter_limit { + u32 word; + counter_limit_fields_t fields; +} counter_limit_t; +#define COUNTER_START_VALUE 0x04 +typedef struct counter_st_v_fields { + u32 unused : 9; /* [8:0] */ + u32 c_st_v : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_st_v_fields_t; +typedef union counter_st_v { + u32 word; + counter_st_v_fields_t fields; +} counter_st_v_t; +#define COUNTER 0x08 +typedef struct counter_fields { + u32 unused : 9; /* [8:0] */ + u32 c : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_fields_t; +typedef union counter { + u32 word; + counter_fields_t fields; +} counter_t; +#define COUNTER_CONTROL 0x0c +typedef struct counter_control_fields { + u32 s_s : 1; /* [0] */ + u32 inv_l : 1; /* [1] */ + u32 l_ini : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} counter_control_fields_t; +typedef union counter_control { + u32 word; + counter_control_fields_t fields; +} counter_control_t; +#define WD_COUNTER_L 0x10 +typedef struct wd_counter_l_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_l_fields_t; +typedef union wd_counter_l { + u32 word; + wd_counter_l_fields_t fields; +} wd_counter_l_t; +#define WD_COUNTER_H 0x14 +typedef struct wd_counter_h_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_h_fields_t; +typedef union wd_counter_h { + u32 word; + wd_counter_h_fields_t fields; +} wd_counter_h_t; +#define WD_LIMIT 0x18 +typedef struct wd_limit_fields { + u32 wd_l : 32; /* [31:0] */ +} wd_limit_fields_t; +typedef union wd_limit { + u32 word; + wd_limit_fields_t fields; +} wd_limit_t; +#define POWER_COUNTER_L 0x1c +typedef struct power_counter_l_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_l_fields_t; +typedef union power_counter_l { + u32 word; + power_counter_l_fields_t fields; +} power_counter_l_t; +#define POWER_COUNTER_H 0x20 +typedef struct power_counter_h_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_h_fields_t; +typedef union power_counter_h { + u32 word; + power_counter_h_fields_t fields; +} power_counter_h_t; +#define WD_CONTROL 0x24 +typedef struct wd_control_fields { + u32 w_m : 1; /* [0] */ + u32 w_out_e : 1; /* [1] */ + u32 w_evn : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} wd_control_fields_t; +typedef union wd_control { + u32 word; + wd_control_fields_t fields; +} wd_control_t; +#define RESET_COUNTER_L 0x28 +typedef struct reset_counter_l_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_l_fields_t; +typedef union reset_counter_l { + u32 word; + reset_counter_l_fields_t fields; +} reset_counter_l_t; +#define RESET_COUNTER_H 0x2c +typedef struct reset_counter_h_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_h_fields_t; +typedef union reset_counter_h { + u32 word; + reset_counter_h_fields_t fields; +} reset_counter_h_t; + +#endif /* _L_ASM_L_TIMER_REGS_H */ diff --git a/arch/l/include/asm/mpspec.h b/arch/l/include/asm/mpspec.h new file mode 100644 index 000000000000..20087e82ba57 --- /dev/null +++ b/arch/l/include/asm/mpspec.h @@ -0,0 +1,635 @@ +#ifndef __L_ASM_MPSPEC_H +#define __L_ASM_MPSPEC_H + +/* + * Structure definitions for SMP machines following the + * Intel Multiprocessing Specification 1.1 and 1.4. + */ + +#ifndef __ASSEMBLY__ + +#include + +#include + +#include +#include +#ifdef CONFIG_E2K +#include +#endif + + +/* + * This tag identifies where the SMP configuration + * information is. + */ +#ifdef __LITTLE_ENDIAN +#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') +#elif __BIG_ENDIAN +#define SMP_MAGIC_IDENT ('_'|('P'<<8)|('M'<<16)|('_'<<24)) +#else +#error not byte order defined +#endif /*__BIG_ENDIAN*/ + +/* + * a maximum of NR_CPUS APICs with the current APIC ID architecture. + * a maximum of IO-APICs is summary: + * each IO link can have IOHUB with IO-APIC + * each node can have embeded IO-APIC + */ +#define MAX_LOCAL_APICS (NR_CPUS * 2) /* apic numbering can be with holes */ +#define MAX_IO_APICS (MAX_NUMIOLINKS + MAX_NUMNODES) +#define MAX_APICS MAX_LOCAL_APICS + +#define SMP_FLOATING_TABLE_LEN sizeof(struct intel_mp_floating) + +struct intel_mp_floating +{ + char mpf_signature[4]; /* "_MP_" */ + unsigned long mpf_physptr; /* Configuration table address */ + unsigned char mpf_length; /* Our length (paragraphs) */ + unsigned char mpf_specification;/* Specification version */ + unsigned char mpf_checksum; /* Checksum (makes sum 0) */ + unsigned char mpf_feature1; /* Standard or configuration ? */ + unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ + unsigned char mpf_feature3; /* Unused (0) */ + unsigned char mpf_feature4; /* Unused (0) */ + unsigned char mpf_feature5; /* Unused (0) */ +}; + +#define MPF_64_BIT_SPECIFICATION 8 /* MPF specification describe */ + /* new MP table compatible */ + /* with 64-bits arch */ +#define MP_SPEC_ADDR_ALIGN 4 /* addresses can be */ + /* word-aligned */ +#define MP_NEW_ADDR_ALIGN 8 /* all addresses should be */ + /* double-word aligned */ + +#define ALIGN_BYTES_DOWN(addr, bytes) (((addr) / (bytes)) * (bytes)) +#define ALIGN_BYTES_UP(addr, bytes) ((((addr) + (bytes)-1) / (bytes)) * \ + (bytes)) +#define MP_ALIGN_BYTES(addr, bytes) ALIGN_BYTES_UP(addr, bytes) + +#define IS_64_BIT_MP_SPECS() \ + (boot_mpf_found->mpf_specification == MPF_64_BIT_SPECIFICATION) +#define MP_ADDR_ALIGN(addr) \ + (unsigned char *)(MP_ALIGN_BYTES((unsigned long long)(addr), \ + (IS_64_BIT_MP_SPECS()) ? MP_NEW_ADDR_ALIGN : \ + MP_SPEC_ADDR_ALIGN)) +#define MP_SIZE_ALIGN(addr) \ + MP_ALIGN_BYTES((unsigned long long)(addr), \ + (IS_64_BIT_MP_SPECS()) ? MP_NEW_ADDR_ALIGN : \ + MP_SPEC_ADDR_ALIGN) +#define enable_update_mptable 0 + +struct mpc_table +{ + char mpc_signature[4]; +#define MPC_SIGNATURE "PCMP" + unsigned short mpc_length; /* Size of table */ + char mpc_spec; /* 0x01 */ + char mpc_checksum; + char mpc_oem[8]; + char mpc_productid[12]; + unsigned int mpc_oemptr; /* 0 if not present */ + unsigned short mpc_oemsize; /* 0 if not present */ + unsigned short mpc_oemcount; + unsigned int mpc_lapic; /* APIC address */ + unsigned short mpe_length; /* Extended Table size */ + unsigned char mpe_checksum; /* Extended Table checksum */ + unsigned char reserved; +}; + +/* Followed by entries */ + +#define MP_PROCESSOR 0 +#define MP_BUS 1 +#define MP_IOAPIC 2 +#define MP_INTSRC 3 +#define MP_LINTSRC 4 +#define MP_TIMER 5 +#define MP_I2C_SPI 6 +#define MP_IOLINK 7 +#define MP_PMC 8 +#define MP_BDEV 9 +#define MP_GPIO_ACT 10 +#define MP_IOEPIC 11 + +struct mpc_config_processor +{ + unsigned char mpc_type; /* MP_PROCESSOR */ + unsigned char mpc_apicid; /* Local APIC number */ + unsigned char mpc_apicver; /* Its versions */ + unsigned char mpc_cpuflag; +#define CPU_ENABLED 1 /* Processor is available */ +#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ + unsigned int mpc_cpufeature; +#define CPU_STEPPING_MASK 0x0F +#define CPU_MODEL_MASK 0xF0 +#define CPU_FAMILY_MASK 0xF00 + unsigned int mpc_featureflag; /* CPUID feature value */ + unsigned int mpc_cepictimerfreq; /* Frequency of CEPIC timer */ + unsigned int mpc_reserved; +}; + +struct mpc_config_bus +{ + unsigned char mpc_type; /* MP_BUS */ + unsigned char mpc_busid; + unsigned char mpc_bustype[6]; +}; + +/* List of Bus Type string values, Intel MP Spec. */ +#define BUSTYPE_EISA "EISA" +#define BUSTYPE_ISA "ISA" +#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ +#define BUSTYPE_MCA "MCA" +#define BUSTYPE_VL "VL" /* Local bus */ +#define BUSTYPE_PCI "PCI" +#define BUSTYPE_PCMCIA "PCMCIA" +#define BUSTYPE_CBUS "CBUS" +#define BUSTYPE_CBUSII "CBUSII" +#define BUSTYPE_FUTURE "FUTURE" +#define BUSTYPE_MBI "MBI" +#define BUSTYPE_MBII "MBII" +#define BUSTYPE_MPI "MPI" +#define BUSTYPE_MPSA "MPSA" +#define BUSTYPE_NUBUS "NUBUS" +#define BUSTYPE_TC "TC" +#define BUSTYPE_VME "VME" +#define BUSTYPE_XPRESS "XPRESS" + +struct mpc_ioapic +{ + unsigned char type; /* MP_IOAPIC */ + unsigned char apicid; + unsigned char apicver; + unsigned char flags; +#define MPC_APIC_USABLE 0x01 + unsigned long apicaddr; +}; + +struct mpc_ioepic { + unsigned char type; /* MP_IOEPIC */ + unsigned char epicver; + unsigned short epicid; + unsigned short nodeid; + unsigned char reserved[2]; + unsigned long epicaddr; +} __packed; + +#define MPC_IOIRQFLAG_PO_BS 0x0 /* Bus specific */ +#define MPC_IOIRQFLAG_PO_AH 0x1 /* Active high */ +#define MPC_IOIRQFLAG_PO_RES 0x2 /* Reserved */ +#define MPC_IOIRQFLAG_PO_AL 0x3 /* Active low */ + +#define MPC_IOIRQFLAG_EL_BS 0x0 /* Bus specific */ +#define MPC_IOIRQFLAG_EL_FS 0x4 /* Trigger by front */ +#define MPC_IOIRQFLAG_EL_RES 0x8 /* Reserved */ +#define MPC_IOIRQFLAG_EL_LS 0xC /* Trigger by level */ + +struct mpc_intsrc +{ + unsigned char type; /* MP_INTSRC */ + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbus; + unsigned char srcbusirq; + unsigned char dstapic; + unsigned char dstirq; +}; + +enum mp_irq_source_types { + mp_INT = 0, + mp_NMI = 1, + mp_SMI = 2, + mp_ExtINT = 3, + mp_FixINT = 4 /* fixed interrupt pin for PCI */ +}; + +#define MP_IRQDIR_DEFAULT 0 +#define MP_IRQDIR_HIGH 1 +#define MP_IRQDIR_LOW 3 + +#ifdef CONFIG_BIOS +#define MP_IRQ_POLARITY_DEFAULT 0x0 +#define MP_IRQ_POLARITY_HIGH 0x1 +#define MP_IRQ_POLARITY_LOW 0x3 +#define MP_IRQ_POLARITY_MASK 0x3 +#define MP_IRQ_TRIGGER_DEFAULT 0x0 +#define MP_IRQ_TRIGGER_EDGE 0x4 +#define MP_IRQ_TRIGGER_LEVEL 0xc +#define MP_IRQ_TRIGGER_MASK 0xc +#endif /* CONFIG_BIOS */ + + +struct mpc_config_lintsrc +{ + unsigned char mpc_type; /* MP_LINTSRC */ + unsigned char mpc_irqtype; + unsigned short mpc_irqflag; + unsigned char mpc_srcbusid; + unsigned char mpc_srcbusirq; + unsigned char mpc_destapic; +#define MP_APIC_ALL 0xFF + unsigned char mpc_destapiclint; +}; + +/* + * Default configurations + * + * 1 2 CPU ISA 82489DX + * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining + * 3 2 CPU EISA 82489DX + * 4 2 CPU MCA 82489DX + * 5 2 CPU ISA+PCI + * 6 2 CPU EISA+PCI + * 7 2 CPU MCA+PCI + */ + +#define MAX_IRQ_SOURCES (128 * MAX_NUMIOHUBS) + +/* (32 * nodes) for PCI, and one number is a special case */ +#define MAX_MP_BUSSES 256 + +enum mp_bustype { + MP_BUS_ISA = 1, + MP_BUS_EISA, + MP_BUS_PCI, + MP_BUS_MCA +}; + +/* + * IO link configurations + */ + +#define MAX_NUMIOLINKS MACH_MAX_NUMIOLINKS +#define MAX_NUMIOHUBS MAX_NUMIOLINKS +#define NODE_NUMIOLINKS MACH_NODE_NUMIOLINKS + +typedef struct mpc_config_iolink { + unsigned char mpc_type; /* type is MP_IOLINK */ + unsigned char mpc_iolink_type; /* type of IO link: IOHUB or RDMA */ + unsigned short mpc_iolink_ver; /* version of IOHUB or RDMA */ + unsigned int mpc_reserved; /* reserved */ + int node; /* number od node: 0 - 3 */ + int link; /* local number of link on node: 0-1 */ + short bus_min; /* number of root bus on IOHUB */ + short bus_max; /* number of max bus on IOHUB */ + short apicid; /* IO-APIC id connected to the */ + /* IOHUB */ + short mpc_reserv16; /* reserved 16-bits value */ + unsigned long pci_mem_start; /* PCI mem area for IOMMU v6 */ + unsigned long pci_mem_end; +} mpc_config_iolink_t; + +enum mp_iolink_type { + MP_IOLINK_IOHUB = 1, /* IO link is IOHUB */ + MP_IOLINK_RDMA /* IO link is RDMA controller */ +}; + +enum mp_iolink_ver { + MP_IOHUB_FPGA_VER = 0x10, /* IOHUB implemented on FPGA (Altera) */ +}; +#define MAX_MP_TIMERS 4 + +typedef struct mpc_config_timer { + unsigned char mpc_type; /* MP_TIMER */ + unsigned char mpc_timertype; + unsigned char mpc_timerver; + unsigned char mpc_timerflags; + unsigned long mpc_timeraddr; +} mpc_config_timer_t; + +enum mp_timertype { + MP_PIT_TYPE, /* programmed interval timer */ + MP_LT_TYPE, /* Elbrus iohub timer */ + MP_HPET_TYPE, /* High presicion eventualy timer */ + MP_RTC_TYPE, /* real time clock */ + MP_PM_TYPE /* power managment timer */ +}; + +#define MP_LT_VERSION 1 +#define MP_LT_FLAGS 0 + +#define MP_RTC_VER_CY14B101P 2 +#define MP_RTC_FLAG_SYNCINTR 0x01 + +typedef struct mpc_config_i2c { + unsigned char mpc_type; /* MP_I2C_SPI */ + unsigned char mpc_max_channel; + unsigned char mpc_i2c_irq; + unsigned char mpc_revision; + unsigned long mpc_i2ccntrladdr; + unsigned long mpc_i2cdataaddr; +} mpc_config_i2c_t; + +typedef struct mpc_config_pmc { + unsigned char mpc_type; /* MP_PMC */ + unsigned char mpc_pmc_type; /* Izumrud or Processor-2 */ + unsigned char mpc_pmc_version; + unsigned char mpc_pmc_vmax; /* VMAX: bits 40:34 in l_pmc.vrange */ + unsigned char mpc_pmc_vmin; /* VMIN: bits 33:27 in l_pmc.vrange */ + unsigned char mpc_pmc_fmax; /* FMAX: bits 26:20 in l_pmc.vrange */ + unsigned char reserved[2]; + unsigned long mpc_pmc_cntrl_addr; /* base of pmc regs */ + unsigned long mpc_pmc_data_addr; + unsigned int mpc_pmc_data_size; + unsigned int mpc_pmc_p_state[4]; /* VID 15:9, DID 8:4, FID 3:0 */ + unsigned int mpc_pmc_freq; /* Frequency in KHz */ +} mpc_config_pmc_t; + + + +typedef struct mpc_bdev { + unsigned char mpc_type; /* MP_BDEV */ + unsigned char mpc_bustype; /* I2C or SPI */ + unsigned char mpc_nodeid; + unsigned char mpc_linkid; + unsigned char mpc_busid; + unsigned char mpc_baddr; + unsigned char mpc_bdev_name[16]; +} mpc_bdev_t; + +#define MPC_BDEV_DTYPE_I2C 1 +#define MPC_BDEV_DTYPE_SPI 2 + +typedef struct mpc_gpio_act { + unsigned char mpc_type; /* MP_GPIO_ACT */ + unsigned char mpc_nodeid; + unsigned char mpc_linkid; + unsigned char mpc_busid; + unsigned char mpc_gpio_pin; + unsigned char mpc_pin_direction; + unsigned char mpc_gpio_act_name[16]; +} mpc_gpio_act_t; + +#define MP_GPIO_ACT_DIRECTION_IN 1 +#define MP_GPIO_ACT_DIRECTION_OUT 2 + +#ifdef __KERNEL__ +struct iohub_sysdata; +void mp_pci_add_resources(struct list_head *resources, + struct iohub_sysdata *sd); +extern int __init mp_ioepic_find_bus(int ioepic_id); +#ifdef CONFIG_IOHUB_DOMAINS +struct iohub_sysdata; +extern int mp_find_iolink_root_busnum(int node, int link); +extern int mp_find_iolink_io_apicid(int node, int link); +extern int mp_fix_io_apicid(unsigned int src_apicid, unsigned int new_apicid); +void mp_pci_add_resources(struct list_head *resources, + struct iohub_sysdata *sd); +#else +static inline int mp_fix_io_apicid(unsigned int src_apicid, + unsigned int new_apicid) +{ + return 0; +} +#endif /* CONFIG_IOHUB_DOMAINS */ +extern int get_bus_to_io_apicid(int busnum); + +#if defined(CONFIG_MCA) || defined(CONFIG_EISA) +extern int mp_bus_id_to_type [MAX_MP_BUSSES]; +#endif + +extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); + +extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; +extern unsigned int boot_cpu_physical_apicid; +extern int smp_found_config; +extern void find_smp_config(boot_info_t *bblock); +extern void get_smp_config(void); +extern int nr_ioapics; +extern int apic_version[MAX_LOCAL_APIC]; +extern int mp_irq_entries; +extern struct mpc_intsrc mp_irqs []; +extern int mpc_default_type; +extern unsigned long mp_lapic_addr; +extern int pic_mode; +extern int using_apic_timer; +extern mpc_config_timer_t mp_timers[MAX_MP_TIMERS]; +extern int nr_timers; +extern int rtc_model; +extern int rtc_syncintr; + +#define early_iohub_online(node, link) mach_early_iohub_online((node), (link)) +#define early_sic_init() mach_early_sic_init() +#endif /* __KERNEL__ */ + +#ifdef CONFIG_ENABLE_BIOS_MPTABLE +#define MPE_SYSTEM_ADDRESS_SPACE 0x80 +#define MPE_BUS_HIERARCHY 0x81 +#define MPE_COMPATIBILITY_ADDRESS_SPACE 0x82 + +struct mp_exten_config { + unsigned char mpe_type; + unsigned char mpe_length; +}; + +typedef struct mp_exten_config *mpe_t; + +struct mp_exten_system_address_space { + unsigned char mpe_type; + unsigned char mpe_length; + unsigned char mpe_busid; + unsigned char mpe_address_type; +#define ADDRESS_TYPE_IO 0 +#define ADDRESS_TYPE_MEM 1 +#define ADDRESS_TYPE_PREFETCH 2 + unsigned int mpe_address_base_low; + unsigned int mpe_address_base_high; + unsigned int mpe_address_length_low; + unsigned int mpe_address_length_high; +}; + +struct mp_exten_bus_hierarchy { + unsigned char mpe_type; + unsigned char mpe_length; + unsigned char mpe_busid; + unsigned char mpe_bus_info; +#define BUS_SUBTRACTIVE_DECODE 1 + unsigned char mpe_parent_busid; + unsigned char reserved[3]; +}; + +struct mp_exten_compatibility_address_space { + unsigned char mpe_type; + unsigned char mpe_length; + unsigned char mpe_busid; + unsigned char mpe_address_modifier; +#define ADDRESS_RANGE_SUBTRACT 1 +#define ADDRESS_RANGE_ADD 0 + unsigned int mpe_range_list; +#define RANGE_LIST_IO_ISA 0 + /* X100 - X3FF + * X500 - X7FF + * X900 - XBFF + * XD00 - XFFF + */ +#define RANGE_LIST_IO_VGA 1 + /* X3B0 - X3BB + * X3C0 - X3DF + * X7B0 - X7BB + * X7C0 - X7DF + * XBB0 - XBBB + * XBC0 - XBDF + * XFB0 - XFBB + * XFC0 - XCDF + */ +}; + +/* Default local apic addr */ +#define LAPIC_ADDR 0xFEE00000 + +#ifdef __KERNEL__ +void *smp_next_mpc_entry(struct mpc_table *mc); +void *smp_next_mpe_entry(struct mpc_table *mc); + +void smp_write_processor(struct mpc_table *mc, + unsigned char apicid, unsigned char apicver, + unsigned char cpuflag, unsigned int cpufeature, + unsigned int featureflag, unsigned int cepictimerfreq); +void smp_write_processors(struct mpc_table *mc, + unsigned int phys_cpu_num); +void smp_write_bus(struct mpc_table *mc, + unsigned char id, unsigned char *bustype); +void smp_write_ioapic(struct mpc_table *mc, + unsigned char id, unsigned char ver, + unsigned long apicaddr); +void smp_write_ioepic(struct mpc_table *mc, + unsigned short id, unsigned short nodeid, + unsigned char ver, unsigned long epicaddr); +void smp_write_iolink(struct mpc_table *mc, + int node, int link, + short bus_min, short bus_max, + short picid, + unsigned long pci_mem_start, unsigned long pci_mem_end); +void smp_write_intsrc(struct mpc_table *mc, + unsigned char irqtype, unsigned short irqflag, + unsigned char srcbus, unsigned char srcbusirq, + unsigned char dstapic, unsigned char dstirq); +void smp_write_lintsrc(struct mpc_table *mc, + unsigned char irqtype, unsigned short irqflag, + unsigned char srcbusid, unsigned char srcbusirq, + unsigned char destapic, unsigned char destapiclint); +void smp_write_address_space(struct mpc_table *mc, + unsigned char busid, unsigned char address_type, + unsigned int address_base_low, unsigned int address_base_high, + unsigned int address_length_low, unsigned int address_length_high); +void smp_write_bus_hierarchy(struct mpc_table *mc, + unsigned char busid, unsigned char bus_info, + unsigned char parent_busid); +void smp_write_compatibility_address_space(struct mpc_table *mc, + unsigned char busid, unsigned char address_modifier, + unsigned int range_list); +unsigned char smp_compute_checksum(void *v, int len); +void smp_write_floating_table(struct intel_mp_floating *mpf); +unsigned int write_smp_table(struct intel_mp_floating *mpf, unsigned int phys_cpu_num); +void smp_i2c_spi_timer(struct mpc_table *mc, + unsigned char timertype, unsigned char timerver, + unsigned char timerflags, unsigned long timeraddr); +void smp_i2c_spi_dev(struct mpc_table *mc, unsigned char max_channel, + unsigned char irq, unsigned long i2cdevaddr); +//#define MAX_CPUS 16 /* 16 way CPU system */ +#endif /* __KERNEL__ */ + +/* A table (per mainboard) listing the initial apicid of each cpu. */ +//extern unsigned int initial_apicid[MAX_CPUS]; +#endif /* CONFIG_ENABLE_BIOS_MPTABLE */ + +int generic_processor_info(int apicid, int version); + +#ifdef __KERNEL__ +extern void print_bootblock(bootblock_struct_t *bootblock); +#endif /* __KERNEL__ */ + +#ifdef CONFIG_ACPI +extern void mp_register_ioapic(int id, unsigned long address, u32 gsi_base); +extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, + u32 gsi); +extern void mp_config_acpi_legacy_irqs(void); +struct device; +extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, + int active_high_low); +extern int acpi_probe_gsi(void); +#ifdef CONFIG_L_IO_APIC +extern int mp_find_ioapic(u32 gsi); +extern int mp_find_ioapic_pin(int ioapic, u32 gsi); +#endif +#else /* !CONFIG_ACPI: */ +static inline int acpi_probe_gsi(void) +{ + return 0; +} +#endif /* CONFIG_ACPI */ + +/* physid definitions */ +/* + * On e2k and sparc lapics number is the same as cpus number + * IO-APICs number is defined by MAX_IO_APICS + * IO-APICs IDs can be placed higher than local APICs IDs or at its hole + * so physid_t cannot be a synonim to cpumask_t. + */ +#include + +#define MAX_PHYSID_NUM (NR_CPUS + MAX_IO_APICS) +typedef struct physid_mask { + DECLARE_BITMAP(bits, MAX_PHYSID_NUM); +} physid_mask_t; + +#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_PHYSID_NUM) + +#define physid_set(physid, map) set_bit((physid), (map).bits) +#define physid_clear(physid, map) clear_bit((physid), (map).bits) +#define physid_isset(physid, map) test_bit((physid), (map).bits) +#define physid_test_and_set(physid, map) test_and_set_bit((physid), (map).bits) + +#define physids_and(dstp, src1, src2) \ + bitmap_and((dst).bits, (src1).bits, (src2).bits, MAX_PHYSID_NUM) + +#define physids_or(dst, src1, src2) \ + bitmap_or((dst).bits, (src1).bits, (src2).bits, MAX_PHYSID_NUM) + +#define physids_clear(map) \ + bitmap_zero((map).bits, MAX_PHYSID_NUM) + +#define physids_complement(dst, src) \ + bitmap_complement((dst).bits, (src).bits, MAX_PHYSID_NUM) + +#define physids_empty(map) \ + bitmap_empty((map).bits, MAX_PHYSID_NUM) + +#define physids_equal(map1, map2) \ + bitmap_equal((map1).bits, (map2).bits, MAX_PHYSID_NUM) + +#define physids_weight(map) \ + bitmap_weight((map).bits, MAX_PHYSID_NUM) + +#define physids_shift_left(dst, src, n) \ + bitmap_shift_left((dst).bits, (src).bits, (n), MAX_PHYSID_NUM) + +static inline unsigned long physids_coerce(physid_mask_t *map) +{ + return map->bits[0]; +} + +static inline void physids_promote(unsigned long physids, physid_mask_t *map) +{ + physids_clear(*map); + map->bits[0] = physids; +} + +static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map) +{ + physids_clear(*map); + physid_set(physid, *map); +} + +#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } +#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } + +extern physid_mask_t phys_cpu_present_map; + +#endif /* __ASSEMBLY__ */ + +#endif /* __L_ASM_MPSPEC_H */ diff --git a/arch/l/include/asm/msidef.h b/arch/l/include/asm/msidef.h new file mode 100644 index 000000000000..b3bd65decb07 --- /dev/null +++ b/arch/l/include/asm/msidef.h @@ -0,0 +1,53 @@ +#ifndef _ASM_L_MSIDEF_H +#define _ASM_L_MSIDEF_H + +/* + * Constants for Intel APIC based MSI messages. + */ + +/* + * Shifts for MSI data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR_MASK 0x000000ff +#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ + MSI_DATA_VECTOR_MASK) + +#define MSI_DATA_DELIVERY_MODE_SHIFT 8 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) + +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) + +#define MSI_DATA_TRIGGER_SHIFT 15 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) + +/* + * Shift/mask fields for msi address + */ + +#define MSI_ADDR_DEST_MODE_SHIFT 2 +#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT) +#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) + +#define MSI_ADDR_REDIRECTION_SHIFT 3 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) + /* dedicated cpu */ +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) + /* lowest priority */ + +#define MSI_ADDR_DEST_ID_SHIFT 12 +#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 +#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ + MSI_ADDR_DEST_ID_MASK) +#define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00) + +#define MSI_ADDR_IR_EXT_INT (1 << 4) +#define MSI_ADDR_IR_SHV (1 << 3) +#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13) +#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5) +#endif /* _ASM_L_MSIDEF_H */ diff --git a/arch/l/include/asm/nmi.h b/arch/l/include/asm/nmi.h new file mode 100644 index 000000000000..8d5aedfcd92e --- /dev/null +++ b/arch/l/include/asm/nmi.h @@ -0,0 +1,22 @@ +#ifndef _ASM_L_NMI_H +#define _ASM_L_NMI_H + +#include +#include + +#ifdef ARCH_HAS_NMI_WATCHDOG +extern unsigned int nmi_watchdog; +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 +#endif + +void lapic_watchdog_stop(void); +int lapic_watchdog_init(unsigned nmi_hz); +int lapic_wd_event(unsigned nmi_hz); +unsigned lapic_adjust_nmi_hz(unsigned hz); +void stop_nmi(void); +void restart_nmi(void); + +#endif /* _ASM_L_NMI_H */ diff --git a/arch/l/include/asm/of_device.h b/arch/l/include/asm/of_device.h new file mode 100644 index 000000000000..32a43a1180ea --- /dev/null +++ b/arch/l/include/asm/of_device.h @@ -0,0 +1,47 @@ + +#ifndef _ASM_L_OF_DEVICE_H +#define _ASM_L_OF_DEVICE_H +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* + * The of_device is a kind of "base class" that is a superset of + * struct device for use by devices attached to an OF node and + * probed using OF properties. + */ +struct of_device; +struct of_device +{ + char name[32]; + struct of_device *parent; + struct device dev; + struct device_node *node; + struct resource resource[PROMREG_MAX]; + unsigned int irqs[PROMINTR_MAX]; + int num_irqs; + struct proc_dir_entry *pde; /* this node's proc directory */ + int registered; +// void *sysdata; + + int p2s_id; +// int slot; +// int portid; + int clock_freq; +}; + +extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name); +extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size); + +extern struct device_node **l_allnodes; + +#if 0 +extern int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus); +extern void of_unregister_driver(struct of_platform_driver *drv); +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_L_OF_DEVI */ diff --git a/arch/l/include/asm/pci.h b/arch/l/include/asm/pci.h new file mode 100644 index 000000000000..a1eb4fb21edc --- /dev/null +++ b/arch/l/include/asm/pci.h @@ -0,0 +1,170 @@ +#ifndef _L_PCI_H +#define _L_PCI_H + +#if !defined ___ASM_SPARC_PCI_H && !defined _E2K_PCI_H +# error Do not include "asm-l/pci.h" directly, use "linux/pci.h" instead +#endif + +#include +#include +#include + +#ifdef __KERNEL__ + +#define PCI_PROBE_BIOS 0x0001 +#define PCI_PROBE_CONF1 0x0002 +#define PCI_PROBE_CONF2 0x0004 +#define PCI_PROBE_MMCONF 0x0008 +#define PCI_PROBE_L 0x0010 +#define PCI_PROBE_MASK 0x001f + +#define PCI_NO_SORT 0x0100 +#define PCI_BIOS_SORT 0x0200 +#define PCI_NO_CHECKS 0x0400 +#define PCI_USE_PIRQ_MASK 0x0800 +#define PCI_ASSIGN_ROMS 0x1000 +#define PCI_BIOS_IRQ_SCAN 0x2000 +#define PCI_ASSIGN_ALL_BUSSES 0x4000 + +#undef CONFIG_CMD +#define CONFIG_CMD(bus, devfn, where) \ + ((bus&0xFF)<<20)|((devfn&0xFF)<<12)|(where&0xFFF) + +#define L_IOHUB_ROOT_BUS_NUM 0x00 +#define L_IOHUB_ROOT_SLOT 0x00 /* BSP IOHUB start slot (devfn) */ + /* on root bus 0 */ +#define SLOTS_PER_L_IOHUB 4 /* number of slots reserved per */ + /* each IOHUB */ + +#ifndef L_IOHUB_SLOTS_NUM +#define L_IOHUB_SLOTS_NUM 2 /* number of slots (devfns) for */ + /* each IOHUB on root bus */ +#endif + +extern int IOHUB_revision; +static inline int is_prototype(void) +{ + return IOHUB_revision >= 0xf0; +} + +extern unsigned long pirq_table_addr; +struct e2k_iommu; +struct pci_dev; +struct pci_bus; +enum pci_mmap_state; +struct pci_ops; + +typedef struct iohub_sysdata { +#ifdef CONFIG_IOHUB_DOMAINS + int domain; /* IOHUB (PCI) domain */ + int node; /* NUMA node */ + int link; /* local number of IO link on the node */ +#endif /* CONFIG_IOHUB_DOMAINS */ + u32 pci_msi_addr_lo; /* MSI transaction address */ + u32 pci_msi_addr_hi;/* MSI transaction upper address */ + /*IOHUB can be connected to EIOHUB and vice versa */ + bool has_iohub; + u8 iohub_revision; /* IOHUB revision */ + u8 iohub_generation; /* IOHUB generation */ + bool has_eioh; + u8 eioh_generation; /* EIOHUB generation */ + u8 eioh_revision; /* EIOHUB revision */ + + struct resource mem_space; /* pci registers memory */ + void *l_iommu; +} iohub_sysdata_t; + +bool l_eioh_device(struct pci_dev *pdev); + +#define iohub_revision(pdev) ({ \ + struct iohub_sysdata *sd = pdev->bus->sysdata; \ + u8 rev = l_eioh_device(pdev) ? sd->eioh_revision : \ + sd->iohub_revision; \ + (rev >> 1); \ +}) + +#define iohub_generation(pdev) ({ \ + struct iohub_sysdata *sd = pdev->bus->sysdata; \ + (l_eioh_device(pdev) ? sd->eioh_generation : \ + sd->iohub_generation); \ +}) + +#ifdef CONFIG_IOHUB_DOMAINS + +#define pci_domain_nr(bus) ({ \ + struct iohub_sysdata *sd = bus->sysdata; \ + sd->domain; \ +}) + +#define pci_proc_domain(bus) pci_domain_nr(bus) + +static inline int pci_iohub_domain_to_slot(const int domain) +{ + return L_IOHUB_ROOT_SLOT + domain * SLOTS_PER_L_IOHUB; +} +/* Returns the node based on pci bus */ +#define __pcibus_to_node(bus) ({ \ + const struct iohub_sysdata *sd = bus->sysdata; \ + sd->node; \ +}) +#define __pcibus_to_link(bus) ({ \ + const struct iohub_sysdata *sd = bus->sysdata; \ + sd->link; \ +}) + +#else /* ! CONFIG_IOHUB_DOMAINS */ +#define __pcibus_to_node(bus) 0 /* only one IOHUB on node #0 */ +#define __pcibus_to_link(bus) 0 +#endif /* CONFIG_IOHUB_DOMAINS */ + +/* Can be used to override the logic in pci_scan_bus for skipping + already-configured bus numbers - to be used for buggy BIOSes + or architectures with incomplete PCI setup by the loader */ + +#ifdef CONFIG_PCI +extern unsigned int pcibios_assign_all_busses(void); +#else +#define pcibios_assign_all_busses() 0 +#endif +#define pcibios_scan_all_fns(a, b) 0 + +/* the next function placed at drivers/pci/probe.c and updated only to */ +/* support commonroot bus domains */ +unsigned int pci_scan_root_child_bus(struct pci_bus *bus); + +struct pci_bus * pcibios_scan_root(int bus); + +/* scan a bus after allocating a iohub_sysdata for it */ +extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, + int node); + +void __init pcibios_fixup_resources(struct pci_bus *pbus); +int pcibios_enable_resources(struct pci_dev *, int); + +void pcibios_set_master(struct pci_dev *dev); +void pcibios_penalize_isa_irq(int irq, int active); +int l_pci_direct_init(void); + +extern int (*pcibios_enable_irq)(struct pci_dev *dev); +extern void (*pcibios_disable_irq)(struct pci_dev *dev); + +extern raw_spinlock_t pci_config_lock; + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); + +#ifndef L_IOPORT_RESOURCE_OFFSET +#define L_IOPORT_RESOURCE_OFFSET 0UL +#endif +#ifndef L_IOMEM_RESOURCE_OFFSET +#define L_IOMEM_RESOURCE_OFFSET 0UL +#endif + +#endif /* __KERNEL__ */ + +#endif /* _L_PCI_H */ diff --git a/arch/l/include/asm/pci_l.h b/arch/l/include/asm/pci_l.h new file mode 100644 index 000000000000..c63a36740927 --- /dev/null +++ b/arch/l/include/asm/pci_l.h @@ -0,0 +1,7 @@ +#ifndef _L_PCI_L_H +#define _L_PCI_L_H + +extern unsigned int pci_probe; + +#endif + diff --git a/arch/l/include/asm/pcie_fixup.h b/arch/l/include/asm/pcie_fixup.h new file mode 100644 index 000000000000..f0e69efab95d --- /dev/null +++ b/arch/l/include/asm/pcie_fixup.h @@ -0,0 +1,34 @@ +#ifndef _ASM_L_PCIE_FIXUP_H_ +#define _ASM_L_PCIE_FIXUP_H_ + +#undef memset_io +#define memset_io(a,b,c) \ +({ \ + u64 i; \ + for (i = 0; i != (c); i++) { \ + writeb((b), (u8 *)(a) + i); \ + readb((u8 *)(a) + i); \ + } \ +}) + +#undef memcpy_fromio +#define memcpy_fromio(a,b,c) \ +({ \ + u64 i; \ + for (i = 0; i != (c); i++) { \ + u8 t = readb((u8 *)(b) + i); \ + *((u8 *)(a) + i) = t; \ + } \ +}) + +#undef memcpy_toio +#define memcpy_toio(a,b,c) \ +({ \ + u64 i; \ + for (i = 0; i != (c); i++) { \ + writeb(*((u8 *)(b) + i), (u8 *)(a) + i); \ + readb((u8 *)(a) + i); \ + } \ +}) + +#endif /*_ASM_L_PCIE_FIXUP_H_*/ diff --git a/arch/l/include/asm/percpu.h b/arch/l/include/asm/percpu.h new file mode 100644 index 000000000000..1a0304548827 --- /dev/null +++ b/arch/l/include/asm/percpu.h @@ -0,0 +1,69 @@ +#ifndef _ASM_L_PERCPU_H_ +#define _ASM_L_PERCPU_H_ + +#ifdef CONFIG_SMP + +/* + * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu + * variables that are initialized and accessed before there are per_cpu + * areas allocated. + */ + +#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ + DEFINE_PER_CPU(_type, _name) = _initvalue; \ + __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ + { [0 ... NR_CPUS-1] = _initvalue }; \ + __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map + +#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ + DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ + __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ + { [0 ... NR_CPUS-1] = _initvalue }; \ + __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map + +#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ + EXPORT_PER_CPU_SYMBOL(_name); \ + EXPORT_PER_CPU_SYMBOL(_name##_early_ptr); \ + EXPORT_PER_CPU_SYMBOL(_name##_early_map); + +#define DECLARE_EARLY_PER_CPU(_type, _name) \ + DECLARE_PER_CPU(_type, _name); \ + extern __typeof__(_type) *_name##_early_ptr; \ + extern __typeof__(_type) _name##_early_map[] + +#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ + DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ + extern __typeof__(_type) *_name##_early_ptr; \ + extern __typeof__(_type) _name##_early_map[] + +#define early_per_cpu_ptr(_name) (_name##_early_ptr) +#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) +#define early_per_cpu(_name, _cpu) \ + *(early_per_cpu_ptr(_name) ? \ + &early_per_cpu_ptr(_name)[_cpu] : \ + &per_cpu(_name, _cpu)) + +#else /* !CONFIG_SMP */ +#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ + DEFINE_PER_CPU(_type, _name) = _initvalue + +#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ + DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue + +#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ + EXPORT_PER_CPU_SYMBOL(_name) + +#define DECLARE_EARLY_PER_CPU(_type, _name) \ + DECLARE_PER_CPU(_type, _name) + +#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ + DECLARE_PER_CPU_READ_MOSTLY(_type, _name) + +#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) +#define early_per_cpu_ptr(_name) NULL +/* no early_per_cpu_map() */ + +#endif /* !CONFIG_SMP */ + +#endif /* _ASM_L_PERCPU_H_ */ + diff --git a/arch/l/include/asm/pic.h b/arch/l/include/asm/pic.h new file mode 100644 index 000000000000..a0c0a8b428ba --- /dev/null +++ b/arch/l/include/asm/pic.h @@ -0,0 +1,370 @@ +#ifndef __ASM_L_PIC_H +#define __ASM_L_PIC_H + +/* + * Choose between PICs in arch/l. If CONFIG_EPIC=n, APIC is chosen statically + * If CONFIG_EPIC=y (only on e2k), choose dynamically based on CPU_FEAT_EPIC + */ + +extern int first_system_vector; +extern int apic_get_vector(void); + +#ifdef CONFIG_EPIC + +#include +#include +#include + +#define pic_printk(v, s, a...) \ +do { \ + if (cpu_has_epic()) \ + epic_printk(s, a); \ + else \ + apic_printk(v, s, a); \ +} while (0) + +static inline unsigned int read_pic_id(void) +{ + if (cpu_has_epic()) + return read_epic_id(); + else + return read_apic_id(); +} + +extern void epic_processor_info(int epicid, int version, + unsigned int cepic_freq); +extern int generic_processor_info(int apicid, int version); +static inline void pic_processor_info(int picid, int picver, unsigned int freq) +{ + if (cpu_has_epic()) + epic_processor_info(picid, picver, freq); + else + generic_processor_info(picid, picver); +} + +extern int get_cepic_timer_frequency(void); +static inline int get_pic_timer_frequency(void) +{ + if (cpu_has_epic()) + return get_cepic_timer_frequency(); + else + return -1; /* standard constant value */ +} + + +/* IO-APIC definitions */ +struct irq_data; +extern void ioapic_ack_epic_edge(struct irq_data *data); +extern void ack_apic_edge(struct irq_data *data); +static inline void ioapic_ack_pic_edge(struct irq_data *data) +{ + if (cpu_has_epic()) + ioapic_ack_epic_edge(data); + else + ack_apic_edge(data); +} + +extern void ioapic_ack_epic_level(struct irq_data *data); +extern void ack_apic_level(struct irq_data *data); +static inline void ioapic_ack_pic_level(struct irq_data *data) +{ + if (cpu_has_epic()) + ioapic_ack_epic_level(data); + else + ack_apic_level(data); +} + +struct irq_chip; +extern struct irq_chip ioepic_to_apic_chip; +static inline bool irqchip_is_ioepic_to_apic(struct irq_chip *chip) +{ + return chip == &ioepic_to_apic_chip; +} + +/* IRQ definitions */ +#ifdef CONFIG_IRQ_WORK +extern void epic_irq_work_raise(void); +extern void apic_irq_work_raise(void); +static inline void pic_irq_work_raise(void) +{ + if (cpu_has_epic()) + epic_irq_work_raise(); + else + apic_irq_work_raise(); +} +#endif + +#ifdef CONFIG_SMP +extern void epic_send_call_function_ipi_mask(const struct cpumask *mask); +extern void apic_send_call_function_ipi_mask(const struct cpumask *mask); +static inline void pic_send_call_function_ipi_mask(const struct cpumask *mask) +{ + if (cpu_has_epic()) + epic_send_call_function_ipi_mask(mask); + else + apic_send_call_function_ipi_mask(mask); +} + +extern void epic_send_call_function_single_ipi(int cpu); +extern void apic_send_call_function_single_ipi(int cpu); +static inline void pic_send_call_function_single_ipi(int cpu) +{ + if (cpu_has_epic()) + epic_send_call_function_single_ipi(cpu); + else + apic_send_call_function_single_ipi(cpu); +} + +extern void epic_smp_send_reschedule(int cpu); +extern void apic_smp_send_reschedule(int cpu); +static inline void pic_send_reschedule(int cpu) +{ + if (cpu_has_epic()) + epic_smp_send_reschedule(cpu); + else + apic_smp_send_reschedule(cpu); +} + +struct irq_desc; +extern void apic_irq_force_complete_move(struct irq_desc *desc); +extern void epic_irq_force_complete_move(struct irq_desc *desc); +static inline void pic_irq_force_complete_move(struct irq_desc *desc) +{ + if (cpu_has_epic()) + epic_irq_force_complete_move(desc); + else + apic_irq_force_complete_move(desc); +} +#endif + +struct pt_regs; +extern noinline notrace void epic_do_nmi(struct pt_regs *regs); +extern noinline notrace void apic_do_nmi(struct pt_regs *regs); +static inline void pic_do_nmi(struct pt_regs *regs) +{ + if (cpu_has_epic()) + epic_do_nmi(regs); + else + apic_do_nmi(regs); +} + +static inline void ack_pic_irq(void) +{ + if (cpu_has_epic()) + ack_epic_irq(); + else + ack_APIC_irq(); +} + +/* For do_postpone_tick() */ +extern void cepic_timer_interrupt(void); +extern void local_apic_timer_interrupt(void); +static inline void local_pic_timer_interrupt(void) +{ + if (cpu_has_epic()) + cepic_timer_interrupt(); + else + local_apic_timer_interrupt(); +} + +extern int print_local_APICs(bool force); +extern int print_epics(bool force); +static inline int print_local_pics(bool force) +{ + if (cpu_has_epic()) + return print_epics(force); + else + return print_local_APICs(force); +} + +struct pci_dev; +extern int native_setup_msi_irqs_epic(struct pci_dev *dev, int nvec, int type); +extern int native_setup_msi_irqs_apic(struct pci_dev *dev, int nvec, int type); +static inline int setup_msi_irqs_pic(struct pci_dev *dev, int nvec, int type) +{ + if (cpu_has_epic()) + return native_setup_msi_irqs_epic(dev, nvec, type); + else + return native_setup_msi_irqs_apic(dev, nvec, type); +} + +extern void native_teardown_msi_irq_epic(unsigned int irq); +extern void native_teardown_msi_irq_apic(unsigned int irq); +static inline void teardown_msi_irq_pic(unsigned int irq) +{ + if (cpu_has_epic()) + native_teardown_msi_irq_epic(irq); + else + native_teardown_msi_irq_apic(irq); +} + +extern void __init_recv setup_secondary_epic_clock(void); +extern void setup_secondary_APIC_clock(void); +static inline void __init_recv setup_secondary_pic_clock(void) +{ + if (cpu_has_epic()) + setup_secondary_epic_clock(); + else + setup_secondary_APIC_clock(); +} + +extern int epic_get_vector(void); +static inline int pic_get_vector(void) +{ + if (cpu_has_epic()) + return epic_get_vector(); + else + return apic_get_vector(); +} + +extern int ioepic_pin_to_irq_num(unsigned int pin, struct pci_dev *dev); +extern int ioepic_pin_to_msi_ioapic_irq(unsigned int pin, struct pci_dev *dev); +static inline int ioepic_pin_to_irq_pic(unsigned int pin, struct pci_dev *dev) +{ + if (cpu_has_epic()) + return ioepic_pin_to_irq_num(pin, dev); + else + return ioepic_pin_to_msi_ioapic_irq(pin, dev); +} + +static inline void __init setup_boot_pic_clock(void) +{ + if (cpu_has_epic()) + setup_boot_epic_clock(); + else + setup_boot_APIC_clock(); +} + +extern void __init init_apic_mappings(void); +static inline void __init init_pic_mappings(void) +{ + if (!cpu_has_epic()) + return init_apic_mappings(); +} + +extern void setup_cepic(void); + +#else /* !(CONFIG_EPIC) */ + +#include + +static inline unsigned int read_pic_id(void) +{ + return read_apic_id(); +} + +extern int generic_processor_info(int apicid, int version); +static inline void pic_processor_info(int picid, int picver, unsigned int freq) +{ + generic_processor_info(picid, picver); +} + +static inline int get_pic_timer_frequency(void) +{ + return -1; /* standard constant value */ +} + +/* IO-APIC definitions */ +struct irq_data; +extern void ack_apic_edge(struct irq_data *data); +static inline void ioapic_ack_pic_edge(struct irq_data *data) +{ + ack_apic_edge(data); +} + +extern void ack_apic_level(struct irq_data *data); +static inline void ioapic_ack_pic_level(struct irq_data *data) +{ + ack_apic_level(data); +} + +struct irq_chip; +static inline bool irqchip_is_ioepic_to_apic(struct irq_chip *chip) +{ + return 0; +} + +/* IRQ definitions */ +extern void apic_irq_work_raise(void); +static inline void pic_irq_work_raise(void) +{ + apic_irq_work_raise(); +} + +#ifdef CONFIG_SMP +extern void apic_send_call_function_ipi_mask(const struct cpumask *mask); +static inline void pic_send_call_function_ipi_mask(const struct cpumask *mask) +{ + apic_send_call_function_ipi_mask(mask); +} + +extern void apic_send_call_function_single_ipi(int cpu); +static inline void pic_send_call_function_single_ipi(int cpu) +{ + apic_send_call_function_single_ipi(cpu); +} + +extern void apic_smp_send_reschedule(int cpu); +static inline void pic_send_reschedule(int cpu) +{ + apic_smp_send_reschedule(cpu); +} + +struct irq_desc; +extern void apic_irq_force_complete_move(struct irq_desc *desc); +static inline void pic_irq_force_complete_move(struct irq_desc *desc) +{ + apic_irq_force_complete_move(desc); +} +#endif /* CONFIG_SMP */ + +struct pt_regs; +extern noinline notrace void apic_do_nmi(struct pt_regs *regs); +static inline void pic_do_nmi(struct pt_regs *regs) +{ + apic_do_nmi(regs); +} + +static inline void ack_pic_irq(void) +{ + ack_APIC_irq(); +} + +/* For do_postpone_tick() */ +extern void local_apic_timer_interrupt(void); +static inline void local_pic_timer_interrupt(void) +{ + local_apic_timer_interrupt(); +} + +extern int print_local_APICs(bool force); +static inline int print_local_pics(bool force) +{ + return print_local_APICs(force); +} + +struct pci_dev; +extern int native_setup_msi_irqs_apic(struct pci_dev *dev, int nvec, int type); +static inline int setup_msi_irqs_pic(struct pci_dev *dev, int nvec, int type) +{ + return native_setup_msi_irqs_apic(dev, nvec, type); +} + +extern void native_teardown_msi_irq_apic(unsigned int irq); +static inline void teardown_msi_irq_pic(unsigned int irq) +{ + native_teardown_msi_irq_apic(irq); +} + +static inline void __init setup_boot_pic_clock(void) +{ + setup_boot_APIC_clock(); +} + +extern void __init init_apic_mappings(void); +static inline void __init init_pic_mappings(void) +{ + return init_apic_mappings(); +} +#endif /* !(CONFIG_EPIC) */ +#endif /* __ASM_L_PIC_H */ diff --git a/arch/l/include/asm/serial.h b/arch/l/include/asm/serial.h new file mode 100644 index 000000000000..c5f7f33ce8fb --- /dev/null +++ b/arch/l/include/asm/serial.h @@ -0,0 +1,79 @@ +/* + * include/asm-l/serial.h + */ +#ifndef _L_SERIAL_H +#define _L_SERIAL_H + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + * + * It'd be nice if someone built a serial card with a 24.576 MHz + * clock, since the 16550A is capable of handling a top speed of 1.5 + * megabits/second; but this requires the faster clock. + */ + +#define BASE_BAUD ( 1843200 / 16 ) + +/* Standard COM flags (except for COM4, because of the 8514 problem) */ +#ifdef CONFIG_SERIAL_DETECT_IRQ +#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ) +#define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ) +#else +#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST) +#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF +#endif + +#ifdef CONFIG_E2K +#define SERIAL_PORT_DFNS \ + /* UART CLK PORT IRQ FLAGS */ \ + { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ + { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ + { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ + { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ +#endif + +#define AM85C30_RES_Tx_P 0x28 +#define AM85C30_EXT_INT_ENAB 0x01 +#define AM85C30_TxINT_ENAB 0x02 +#define AM85C30_RxINT_MASK 0x18 + +/* AM85C30 WRITE Registers */ + +#define AM85C30_WR0 0x00 +#define AM85C30_WR1 0x01 +#define AM85C30_WR2 0x02 +#define AM85C30_WR3 0x03 +#define AM85C30_WR4 0x04 +#define AM85C30_WR5 0x05 +#define AM85C30_WR6 0x06 +#define AM85C30_WR7 0x07 +#define AM85C30_WR8 0x08 +#define AM85C30_WR9 0x09 +#define AM85C30_WR10 0x0a +#define AM85C30_WR11 0x0b +#define AM85C30_WR12 0x0c +#define AM85C30_WR13 0x0d +#define AM85C30_WR14 0x0e +#define AM85C30_WR15 0x0f + +/* READ (Status) Registers */ + +#define AM85C30_RR0 0x00 +#define AM85C30_RR1 0x01 +#define AM85C30_RR2 0x02 +#define AM85C30_RR3 0x03 +#define AM85C30_RR8 0x08 +#define AM85C30_RR10 0x0a +#define AM85C30_RR12 0x0c +#define AM85C30_RR13 0x0d + +#define AM85C30_D0 (0x01 << 0) +#define AM85C30_D1 (0x01 << 1) +#define AM85C30_D2 (0x01 << 2) +#define AM85C30_D3 (0x01 << 3) +#define AM85C30_D4 (0x01 << 4) +#define AM85C30_D5 (0x01 << 5) +#define AM85C30_D6 (0x01 << 6) +#define AM85C30_D7 (0x01 << 7) + +#endif /* ! _L_SERIAL_H */ diff --git a/arch/l/include/asm/setup.h b/arch/l/include/asm/setup.h new file mode 100644 index 000000000000..62fe6fbd1af7 --- /dev/null +++ b/arch/l/include/asm/setup.h @@ -0,0 +1,12 @@ +#ifndef _L_SETUP_H +#define _L_SETUP_H + +#include + +int l_set_ethernet_macaddr(struct pci_dev *pdev, char *macaddr); +extern int (*l_set_boot_mode)(int); + +int l_setup_arch(void); +void l_setup_vga(void); +unsigned long measure_cpu_freq(int cpu); +#endif /* _L_SETUP_H */ diff --git a/arch/l/include/asm/sic_regs.h b/arch/l/include/asm/sic_regs.h new file mode 100644 index 000000000000..c468b7fd6638 --- /dev/null +++ b/arch/l/include/asm/sic_regs.h @@ -0,0 +1,332 @@ + +#ifndef _L_SIC_REGS_H_ +#define _L_SIC_REGS_H_ + +#ifdef __KERNEL__ + +#include + +#include +#include + +#undef DEBUG_ERALY_NBSR_MODE +#undef DebugENBSR +#define DEBUG_ERALY_NBSR_MODE 0 /* early NBSR access */ +#ifndef CONFIG_BOOT_E2K +#define DebugENBSR(fmt, args...) \ + ({ if (DEBUG_ERALY_NBSR_MODE) \ + printk(fmt, ##args); }) +#else /* CONFIG_BOOT_E2K */ +#define DebugENBSR(fmt, args...) \ + ({ if (DEBUG_ERALY_NBSR_MODE) \ + rom_printk(fmt, ##args); }) +#endif /* ! CONFIG_BOOT_E2K */ + +#undef DEBUG_NBSR_MODE +#undef DebugNBSR +#define DEBUG_NBSR_MODE 0 /* NBSR access */ +#define DebugNBSR(fmt, args...) \ + ({ if (DEBUG_NBSR_MODE) \ + printk(fmt, ##args); }) + +#ifndef __ASSEMBLY__ + +static inline unsigned int +early_sic_read_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + unsigned char *addr; + unsigned int reg_value; + + node_nbsr = THE_NODE_NBSR_PHYS_BASE(node_id); + addr = node_nbsr + reg_offset; + reg_value = nbsr_early_read(addr); + DebugENBSR("early_sic_read_node_nbsr_reg() node %d reg 0x%x read 0x%x " + "from 0x%px\n", + node_id, reg_offset, reg_value, addr); + return reg_value; +} + +static inline void +early_sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_val) +{ + unsigned char *node_nbsr; + unsigned char *addr; + + node_nbsr = THE_NODE_NBSR_PHYS_BASE(node_id); + DebugENBSR("early_sic_write_node_nbsr_reg() node NBSR is %px\n", + node_nbsr); + addr = node_nbsr + reg_offset; + nbsr_early_write(reg_val, addr); + DebugENBSR("early_sic_write_node_nbsr_reg() node %d reg 0x%x write " + "0x%x to 0x%px\n", + node_id, reg_offset, reg_val, addr); +} + +static inline unsigned int +early_sic_read_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset) +{ + unsigned int reg_value; + +#ifndef CONFIG_BOOT_E2K + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return (unsigned int)-1; + } +#endif /* ! CONFIG_BOOT_E2K */ + reg_value = early_sic_read_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset)); + return reg_value; +} + +static inline void +early_sic_write_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset, + unsigned int reg_value) +{ +#ifndef CONFIG_BOOT_E2K + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "early_sic_write_node_iolink_nbsr_reg() bad " + "IO link # %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return; + } +#endif /* ! CONFIG_BOOT_E2K */ + early_sic_write_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset), reg_value); +} + +static inline unsigned int +sic_read_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + unsigned int reg_value; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_read_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + reg_value = nbsr_read(&node_nbsr[reg_offset]); + DebugNBSR("sic_read_node_nbsr_reg() node %d reg 0x%x read 0x%x " + "from 0x%px\n", + node_id, reg_offset, reg_value, + &node_nbsr[reg_offset]); + return reg_value; +} + +static inline unsigned long +sic_readll_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + unsigned long reg_value; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_readll_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + reg_value = nbsr_readll(&node_nbsr[reg_offset]); + DebugNBSR("sic_readll_node_nbsr_reg() node %d reg 0x%x read 0x%lx " + "from 0x%px\n", + node_id, reg_offset, reg_value, + &node_nbsr[reg_offset]); + return reg_value; +} + +static inline u16 +sic_readw_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + u16 reg_value; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_readw_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + reg_value = nbsr_readw(&node_nbsr[reg_offset]); + DebugNBSR("sic_readw_node_nbsr_reg() node %d reg 0x%x read 0x%x " + "from 0x%px\n", + node_id, reg_offset, reg_value, + &node_nbsr[reg_offset]); + return reg_value; +} + +static inline unsigned int +sic_read_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset) +{ + unsigned int reg_value; + + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return (unsigned int)-1; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return (unsigned int)-1; + } + reg_value = sic_read_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset)); + return reg_value; +} + +static inline unsigned long +sic_readll_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset) +{ + unsigned long reg_value; + + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_readll_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return (unsigned int)-1; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_readll_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return (unsigned int)-1; + } + reg_value = sic_readll_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset)); + return reg_value; +} + +static inline void +sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_write_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_write(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_write_node_nbsr_reg() node %d reg 0x%x writenn 0x%x to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void sic_write_node_nbsr_reg_relaxed(int node_id, int reg_offset, + unsigned int reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_write_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_write_relaxed(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_write_node_nbsr_reg() node %d reg 0x%x writenn 0x%x to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void +sic_writell_node_nbsr_reg(int node_id, int reg_offset, unsigned long reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_writell_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_writell(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_writell_node_nbsr_reg() node %d reg 0x%x written 0x%lx to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void +sic_writew_node_nbsr_reg(int node_id, int reg_offset, u16 reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_writew_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_writew(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_writew_node_nbsr_reg() node %d reg 0x%x written 0x%x to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void +sic_write_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset, + unsigned int reg_value) +{ + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_write_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_write_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return; + } + sic_write_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset), reg_value); +} + +static inline void +sic_writell_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset, + unsigned long reg_value) +{ + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_writell_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_writell_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return; + } + sic_writell_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset), reg_value); +} + + +static inline unsigned int +sic_read_nbsr_reg(int reg_offset) +{ + return sic_read_node_nbsr_reg(numa_node_id(), reg_offset); +} + +static inline unsigned int +sic_read_iolink_nbsr_reg(int io_link, int reg_offset) +{ + return sic_read_node_iolink_nbsr_reg(numa_node_id(), io_link, + reg_offset); +} + +static inline void +sic_write_nbsr_reg(int reg_offset, unsigned int reg_value) +{ + sic_write_node_nbsr_reg(numa_node_id(), reg_offset, reg_value); +} + +static inline void +sic_write_iolink_nbsr_reg(int io_link, int reg_offset, unsigned int reg_value) +{ + sic_write_node_iolink_nbsr_reg(numa_node_id(), io_link, reg_offset, + reg_value); +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _L_SIC_REGS_H_ */ diff --git a/arch/l/include/asm/smp.h b/arch/l/include/asm/smp.h new file mode 100644 index 000000000000..12ef529c2da3 --- /dev/null +++ b/arch/l/include/asm/smp.h @@ -0,0 +1,8 @@ +#ifndef _ASM_L_SMP_H +#define _ASM_L_SMP_H + +extern unsigned int mp_num_processors; +extern unsigned int num_processors; +extern unsigned int disabled_cpus; + +#endif /* _ASM_L_SMP_H */ diff --git a/arch/l/include/asm/swiotlb.h b/arch/l/include/asm/swiotlb.h new file mode 100644 index 000000000000..27594bf32399 --- /dev/null +++ b/arch/l/include/asm/swiotlb.h @@ -0,0 +1,3 @@ +#pragma once + +extern int l_use_swiotlb; diff --git a/arch/l/include/asm/tree_entry.h b/arch/l/include/asm/tree_entry.h new file mode 100644 index 000000000000..2cbbd7a3cf2e --- /dev/null +++ b/arch/l/include/asm/tree_entry.h @@ -0,0 +1,42 @@ +#ifndef __TREE_ENTRY_H +#define __TREE_ENTRY_H + +#define MAX_PROPERTY 8 +#define ATTRIB_NAME 0 + +struct prom_property { + const char *name; + void *value; + int size; +}; +struct tree_entry { + struct tree_entry *sibling; + struct tree_entry *child; + int node; + struct prom_property prop[MAX_PROPERTY]; /*NULEWOE SWOJSTWO D.B. IMENEM */ +}; + +extern struct tree_entry *sbus_root_node; + +extern void scan_sbus(struct tree_entry *root, unsigned long start_addr, + int slot_len, int slot_num); +extern void init_known_nodes(struct tree_entry *root); +extern struct tree_entry *get_te_by_node(int node); +extern struct tree_entry *copy_sbus_dev(struct tree_entry *dev); +extern void free_sbus_dev(struct tree_entry *dev); + +extern int prom_getchild(int node); +extern int prom_getproperty(int node, const char *prop, char *buffer, int bufsize); +extern int prom_node_has_property(int node, char *prop); +extern int prom_getproplen(int node, const char *prop); +extern int prom_setprop(int node, const char *pname, char *value, int size); +extern char * prom_firstprop(int node, char *bufer); +extern char * prom_nextprop(int node, char *oprop, char *buffer); +extern int prom_searchsiblings(int node_start, char *nodename); +extern int prom_getsibling(int node); +extern int prom_getint(int node, char *prop); +extern int prom_getbool(int node, char *prop); +extern int prom_getintdefault(int node, char *property, int deflt); +extern void prom_getstring(int node, char *prop, char *user_buf, int ubuf_size); + +#endif /* __TREE_ENTRY_H */ diff --git a/arch/l/kernel/Makefile b/arch/l/kernel/Makefile new file mode 100644 index 000000000000..196306e15225 --- /dev/null +++ b/arch/l/kernel/Makefile @@ -0,0 +1,33 @@ +# +# Makefile for the Elbrus linux kernel. +# /arch/l/kernel +# +obj-y += l-i2c2.o l-iommu.o procshow.o mpparse.o setup.o procregs.o \ + l-mcmonitor.o l-uncached.o +obj-y += apic/ +obj-y += pmc/ +obj-y += pic_irq.o cpufreq.o + +obj-$(CONFIG_L_EARLY_PRINTK) += console.o +obj-$(CONFIG_NVRAM_PANIC) += panic2nvram.o +obj-$(CONFIG_L_LOCAL_APIC) += lt.o +obj-$(CONFIG_L_SIC_IPLINK_OFF) += procipcc2.o +obj-$(CONFIG_L_MMPD) += procmmpddiag.o +obj-$(CONFIG_SERIAL_AM85C30_CONSOLE) += am85c30.o +obj-$(CONFIG_CLKR_CLOCKSOURCE) += clkr.o +obj-y += clk_rt.o + +obj-$(CONFIG_IOHUB_GPIO) += gpio.o +obj-$(CONFIG_BOOT_TRACE) += boot_profiling.o +obj-$(CONFIG_I2C_SPI_RESET_CONTROLLER) += i2c-spi/ +obj-$(CONFIG_ACPI) += acpi/ +obj-$(CONFIG_ACPI_L_SPMC) += acpi_tainted/ +obj-$(CONFIG_OF) += devtree.o +obj-$(CONFIG_EPIC) += epic/ + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities + CFLAGS_REMOVE_console.o = -pg + CFLAGS_REMOVE_am85c30.o = -pg + CFLAGS_REMOVE_ns16550.o = -pg +endif diff --git a/arch/l/kernel/acpi/Makefile b/arch/l/kernel/acpi/Makefile new file mode 100644 index 000000000000..823f66b6ff1c --- /dev/null +++ b/arch/l/kernel/acpi/Makefile @@ -0,0 +1,3 @@ +obj-y += boot.o +obj-$(CONFIG_ACPI_SLEEP) += sleep.o +obj-y += cstate.o diff --git a/arch/l/kernel/acpi/boot.c b/arch/l/kernel/acpi/boot.c new file mode 100644 index 000000000000..f1ae2a52fc2a --- /dev/null +++ b/arch/l/kernel/acpi/boot.c @@ -0,0 +1,1692 @@ +/* + * boot.c - Architecture-Specific Low-Level ACPI Boot Support + * + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2001 Jun Nakajima + * Copyright (C) 2012 Evgeny Kravtsunov + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +int sbf_port __initdata = -1; /* set via acpi_boot_init() */ + +static int __initdata acpi_force = 0; +u32 acpi_rsdt_forced; +int acpi_disabled; +EXPORT_SYMBOL(acpi_disabled); + +#define BAD_MADT_ENTRY(entry, end) ( \ + (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ + ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) + +#define PREFIX "ACPI: " + +int acpi_noirq; /* skip ACPI IRQ initialization */ +int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ +EXPORT_SYMBOL(acpi_pci_disabled); +int acpi_ht __initdata = 1; /* enable HT */ + +int acpi_lapic; +int acpi_ioapic; +int acpi_strict; + +u8 acpi_sci_flags __initdata; +int acpi_sci_override_gsi __initdata; +int acpi_skip_timer_override __initdata; +int acpi_use_timer_override __initdata; +int acpi_fix_pin2_polarity __initdata; + +#ifdef CONFIG_L_LOCAL_APIC +static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; +#endif + +#ifndef __HAVE_ARCH_CMPXCHG +#warning ACPI uses CMPXCHG +#endif + +/* -------------------------------------------------------------------------- + Boot-time Configuration + -------------------------------------------------------------------------- */ + +/* + * The default interrupt routing model is PIC (8259). This gets + * overridden if IOAPICs are enumerated (below). + */ +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; + + +/* + * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, + * to map the target physical address. The problem is that set_fixmap() + * provides a single page, and it is possible that the page is not + * sufficient. + * By using this area, we can map up to MAX_IO_APICS pages temporarily, + * i.e. until the next __va_range() call. + * + * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* + * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and + * count idx down while incrementing the phys address. + */ +char *__init __acpi_map_table(unsigned long phys, unsigned long size) +{ + + if (!phys || !size) + return NULL; + + return ioremap(phys, size); +} +void __init __acpi_unmap_table(char *map, unsigned long size) +{ +} + +#ifdef CONFIG_L_LOCAL_APIC +static int __init acpi_parse_madt(struct acpi_table_header *table) +{ + struct acpi_table_madt *madt = NULL; + + if (!cpu_has_apic) + return -EINVAL; + + madt = (struct acpi_table_madt *)table; + if (!madt) { + pr_warning(PREFIX "Unable to map MADT\n"); + return -ENODEV; + } + + if (madt->address) { + acpi_lapic_addr = (u64) madt->address; + + pr_debug(PREFIX "Local APIC address 0x%08x\n", + madt->address); + } + + default_acpi_madt_oem_check(madt->header.oem_id, + madt->header.oem_table_id); + + return 0; +} + +static void acpi_register_lapic(int id, u8 enabled) +{ + unsigned int ver = 0; + + if (!enabled) { + ++disabled_cpus; + return; + } + + if (boot_cpu_physical_apicid != -1U) + ver = apic_version[boot_cpu_physical_apicid]; + + generic_processor_info(id, ver); +} + +static int __init +acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_local_x2apic *processor = NULL; + + processor = (struct acpi_madt_local_x2apic *)header; + + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + pr_warning(PREFIX "x2apic entry ignored\n"); + + return 0; +} + +static int __init +acpi_parse_lapic(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_local_apic *processor = NULL; + + processor = (struct acpi_madt_local_apic *)header; + + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + /* + * We need to register disabled CPU as well to permit + * counting disabled CPUs. This allows us to size + * cpus_possible_map more accurately, to permit + * to not preallocating memory for all NR_CPUS + * when we use CPU hotplug. + */ + acpi_register_lapic(processor->id, /* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); + + return 0; +} + +static int __init +acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_local_sapic *processor = NULL; + + processor = (struct acpi_madt_local_sapic *)header; + + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); + + return 0; +} + +static int __init +acpi_parse_lapic_addr_ovr(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; + + lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; + + if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) + return -EINVAL; + + acpi_lapic_addr = lapic_addr_ovr->address; + + return 0; +} + +static int __init +acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; + + x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header; + + if (BAD_MADT_ENTRY(x2apic_nmi, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + if (x2apic_nmi->lint != 1) + pr_warning(PREFIX "NMI not connected to LINT 1!\n"); + + return 0; +} + +static int __init +acpi_parse_lapic_nmi(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; + + lapic_nmi = (struct acpi_madt_local_apic_nmi *)header; + + if (BAD_MADT_ENTRY(lapic_nmi, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + if (lapic_nmi->lint != 1) + pr_warning(PREFIX "NMI not connected to LINT 1!\n"); + + return 0; +} + +#endif /*CONFIG_L_LOCAL_APIC */ + +#ifdef CONFIG_L_IO_APIC + +static int __init +acpi_parse_ioapic(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_io_apic *ioapic = NULL; + + ioapic = (struct acpi_madt_io_apic *)header; + + if (BAD_MADT_ENTRY(ioapic, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + mp_register_ioapic(ioapic->id, + ioapic->address, ioapic->global_irq_base); + + return 0; +} + +/* + * Parse Interrupt Source Override for the ACPI SCI + */ +static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) +{ + if (trigger == 0) /* compatible SCI trigger is level */ + trigger = 3; + + if (polarity == 0) /* compatible SCI polarity is low */ + polarity = 3; + + /* Command-line over-ride via acpi_sci= */ + if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) + trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2; + + if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) + polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; + + /* + * mp_config_acpi_legacy_irqs() already setup IRQs < 16 + * If GSI is < 16, this will update its flags, + * else it will create a new mp_irqs[] entry. + */ + mp_override_legacy_irq(gsi, polarity, trigger, gsi); + + /* + * stash over-ride to indicate we've been here + * and for later update of acpi_gbl_FADT + */ + acpi_sci_override_gsi = gsi; + return; +} + +static int __init +acpi_parse_int_src_ovr(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_interrupt_override *intsrc = NULL; + + intsrc = (struct acpi_madt_interrupt_override *)header; + + if (BAD_MADT_ENTRY(intsrc, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { + acpi_sci_ioapic_setup(intsrc->global_irq, + intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, + (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2); + return 0; + } + + if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { + if (acpi_skip_timer_override) { + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + return 0; + } + if (acpi_fix_pin2_polarity && + (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { + intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; + printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); + } + } + + mp_override_legacy_irq(intsrc->source_irq, + intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, + (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, + intsrc->global_irq); + + return 0; +} + +static int __init +acpi_parse_nmi_src(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_nmi_source *nmi_src = NULL; + + nmi_src = (struct acpi_madt_nmi_source *)header; + + if (BAD_MADT_ENTRY(nmi_src, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + /* TBD: Support nimsrc entries? */ + + return 0; +} + +#endif /* CONFIG_L_IO_APIC */ + +/* + * acpi_pic_sci_set_trigger() + * + * use ELCR to set PIC-mode trigger type for SCI + * + * If a PIC-mode SCI is not recognized or gives spurious IRQ7's + * it may require Edge Trigger -- use "acpi_sci=edge" + * + * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers + * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. + * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0) + * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0) + */ + +void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) +{ + unsigned int mask = 1 << irq; + unsigned int old, new; + + /* Real old ELCR mask */ + old = inb(0x4d0) | (inb(0x4d1) << 8); + + /* + * If we use ACPI to set PCI IRQs, then we should clear ELCR + * since we will set it correctly as we enable the PCI irq + * routing. + */ + new = acpi_noirq ? old : 0; + + /* + * Update SCI information in the ELCR, it isn't in the PCI + * routing tables.. + */ + switch (trigger) { + case 1: /* Edge - clear */ + new &= ~mask; + break; + case 3: /* Level - set */ + new |= mask; + break; + } + + if (old == new) + return; + + printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); + outb(new, 0x4d0); + outb(new >> 8, 0x4d1); +} + +int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) +{ + *irq = gsi; + +#ifdef CONFIG_L_IO_APIC + if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) + setup_IO_APIC_irq_extra(gsi); +#endif + + return 0; +} + +/* + * success: return IRQ number (>=0) + * failure: return < 0 + */ +int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +{ + unsigned int irq; + unsigned int plat_gsi = gsi; + +#ifdef CONFIG_PCI + /* + * Make sure all (legacy) PCI IRQs are set as level-triggered. + */ + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { + if (trigger == ACPI_LEVEL_SENSITIVE) + eisa_set_level_irq(gsi); + } +#endif + +#ifdef CONFIG_L_IO_APIC + if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { + plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); + } +#endif + irq = plat_gsi; + + return irq; +} + +/* + * ACPI based hotplug support for CPU + */ +#ifdef CONFIG_ACPI_HOTPLUG_CPU + +static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + struct acpi_madt_local_apic *lapic; + cpumask_var_t tmp_map, new_map; + u8 physid; + int cpu; + int retval = -ENOMEM; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) + return -EINVAL; + + if (!buffer.length || !buffer.pointer) + return -EINVAL; + + obj = buffer.pointer; + if (obj->type != ACPI_TYPE_BUFFER || + obj->buffer.length < sizeof(*lapic)) { + kfree(buffer.pointer); + return -EINVAL; + } + + lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; + + if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || + !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { + kfree(buffer.pointer); + return -EINVAL; + } + + physid = lapic->id; + + kfree(buffer.pointer); + buffer.length = ACPI_ALLOCATE_BUFFER; + buffer.pointer = NULL; + + if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) + goto out; + + if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) + goto free_tmp_map; + + cpumask_copy(tmp_map, cpu_present_mask); + acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); + + /* + * If mp_register_lapic successfully generates a new logical cpu + * number, then the following will get us exactly what was mapped + */ + cpumask_andnot(new_map, cpu_present_mask, tmp_map); + if (cpumask_empty(new_map)) { + pr_warning("Unable to map lapic to logical cpu number\n"); + retval = -EINVAL; + goto free_new_map; + } + + cpu = cpumask_first(new_map); + + *pcpu = cpu; + retval = 0; + +free_new_map: + free_cpumask_var(new_map); +free_tmp_map: + free_cpumask_var(tmp_map); +out: + return retval; +} + +/* wrapper to silence section mismatch warning */ +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +{ + return _acpi_map_lsapic(handle, pcpu); +} +EXPORT_SYMBOL(acpi_map_lsapic); + +int acpi_unmap_lsapic(int cpu) +{ + /* Do _not_ clear cpu to apicid map here as it is used to bring back + * an offline cpu (see clock_off_v3 and nmi_call_function_offline()). */ + set_cpu_present(cpu, false); + num_processors--; + + return 0; +} +EXPORT_SYMBOL(acpi_unmap_lsapic); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) +{ + /* TBD */ + return -EINVAL; +} +EXPORT_SYMBOL(acpi_register_ioapic); + +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) +{ + /* TBD */ + return -EINVAL; +} +EXPORT_SYMBOL(acpi_unregister_ioapic); + +static int __init acpi_parse_sbf(struct acpi_table_header *table) +{ + struct acpi_table_boot *sb; + + sb = (struct acpi_table_boot *)table; + if (!sb) { + pr_warning(PREFIX "Unable to map SBF\n"); + return -ENODEV; + } + + sbf_port = sb->cmos_index; /* Save CMOS port */ + + return 0; +} + +#ifdef CONFIG_HPET_TIMER +#include + +static struct __initdata resource *hpet_res; + +static int __init acpi_parse_hpet(struct acpi_table_header *table) +{ + struct acpi_table_hpet *hpet_tbl; + + hpet_tbl = (struct acpi_table_hpet *)table; + if (!hpet_tbl) { + pr_warning(PREFIX "Unable to map HPET\n"); + return -ENODEV; + } + + if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { + pr_warning(PREFIX "HPET timers must be located in " + "memory.\n"); + return -1; + } + + hpet_address = hpet_tbl->address.address; + hpet_blockid = hpet_tbl->sequence; + + /* + * Some broken BIOSes advertise HPET at 0x0. We really do not + * want to allocate a resource there. + */ + if (!hpet_address) { + pr_warning(PREFIX + "HPET id: %#x base: %#lx is invalid\n", + hpet_tbl->id, hpet_address); + return 0; + } +#ifdef CONFIG_X86_64 + /* + * Some even more broken BIOSes advertise HPET at + * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add + * some noise: + */ + if (hpet_address == 0xfed0000000000000UL) { + if (!hpet_force_user) { + pr_warning(PREFIX "HPET id: %#x " + "base: 0xfed0000000000000 is bogus\n " + "try hpet=force on the kernel command line to " + "fix it up to 0xfed00000.\n", hpet_tbl->id); + hpet_address = 0; + return 0; + } + pr_warning(PREFIX + "HPET id: %#x base: 0xfed0000000000000 fixed up " + "to 0xfed00000.\n", hpet_tbl->id); + hpet_address >>= 32; + } +#endif + pr_info(PREFIX "HPET id: %#x base: %#lx\n", + hpet_tbl->id, hpet_address); + + /* + * Allocate and initialize the HPET firmware resource for adding into + * the resource tree during the lateinit timeframe. + */ +#define HPET_RESOURCE_NAME_SIZE 9 + hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); + + hpet_res->name = (void *)&hpet_res[1]; + hpet_res->flags = IORESOURCE_MEM; + snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", + hpet_tbl->sequence); + + hpet_res->start = hpet_address; + hpet_res->end = hpet_address + (1 * 1024) - 1; + + return 0; +} + +/* + * hpet_insert_resource inserts the HPET resources used into the resource + * tree. + */ +static __init int hpet_insert_resource(void) +{ + if (!hpet_res) + return 1; + + return insert_resource(&iomem_resource, hpet_res); +} + +late_initcall(hpet_insert_resource); + +#else +#define acpi_parse_hpet NULL +#endif + +static int __init acpi_parse_fadt(struct acpi_table_header *table) +{ + +#ifdef CONFIG_X86_PM_TIMER + /* detect the location of the ACPI PM Timer */ + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { + /* FADT rev. 2 */ + if (acpi_gbl_FADT.xpm_timer_block.space_id != + ACPI_ADR_SPACE_SYSTEM_IO) + return 0; + + pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; + /* + * "X" fields are optional extensions to the original V1.0 + * fields, so we must selectively expand V1.0 fields if the + * corresponding X field is zero. + */ + if (!pmtmr_ioport) + pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; + } else { + /* FADT rev. 1 */ + pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; + } + if (pmtmr_ioport) + pr_info(PREFIX "PM-Timer IO Port: %#x\n", + pmtmr_ioport); +#endif + return 0; +} + +#ifdef CONFIG_L_LOCAL_APIC +/* + * Parse LAPIC entries in MADT + * returns 0 on success, < 0 on error + */ + +static void __init acpi_register_lapic_address(unsigned long address) +{ + mp_lapic_addr = address; +} + +static int __init early_acpi_parse_madt_lapic_addr_ovr(void) +{ + int count; + + if (!cpu_has_apic) + return -ENODEV; + + /* + * Note that the LAPIC address is obtained from the MADT (32-bit value) + * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + */ + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, + acpi_parse_lapic_addr_ovr, 0); + if (count < 0) { + pr_err(PREFIX + "Error parsing LAPIC address override entry\n"); + return count; + } + + acpi_register_lapic_address(acpi_lapic_addr); + + return count; +} + +static int __init acpi_parse_madt_lapic_entries(void) +{ + int count; + int x2count = 0; + + if (!cpu_has_apic) + return -ENODEV; + + /* + * Note that the LAPIC address is obtained from the MADT (32-bit value) + * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + */ + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, + acpi_parse_lapic_addr_ovr, 0); + if (count < 0) { + pr_err(PREFIX + "Error parsing LAPIC address override entry\n"); + return count; + } + + acpi_register_lapic_address(acpi_lapic_addr); + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, + acpi_parse_sapic, MAX_APICS); + + if (!count) { + x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, + acpi_parse_x2apic, MAX_APICS); + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, + acpi_parse_lapic, MAX_APICS); + } + if (!count && !x2count) { + pr_err(PREFIX "No LAPIC entries present\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return -ENODEV; + } else if (count < 0 || x2count < 0) { + pr_err(PREFIX "Error parsing LAPIC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + x2count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, + acpi_parse_x2apic_nmi, 0); + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, + acpi_parse_lapic_nmi, 0); + if (count < 0 || x2count < 0) { + pr_err(PREFIX "Error parsing LAPIC NMI entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + return 0; +} +#endif /* CONFIG_L_LOCAL_APIC */ + +#ifdef CONFIG_L_IO_APIC +#define MP_ISA_BUS 0 + +#ifdef CONFIG_X86_ES7000 +extern int es7000_plat; +#endif + +int __init acpi_probe_gsi(void) +{ + int idx; + int gsi; + int max_gsi = 0; + + if (acpi_disabled) + return 0; + + if (!acpi_ioapic) + return 0; + + max_gsi = 0; + for (idx = 0; idx < nr_ioapics; idx++) { + gsi = mp_gsi_routing[idx].gsi_end; + + if (gsi > max_gsi) + max_gsi = gsi; + } + + return max_gsi + 1; +} + +static void assign_to_mp_irq(struct mpc_intsrc *m, + struct mpc_intsrc *mp_irq) +{ + memcpy(mp_irq, m, sizeof(struct mpc_intsrc)); +} + +static int mp_irq_cmp(struct mpc_intsrc *mp_irq, + struct mpc_intsrc *m) +{ + return memcmp(mp_irq, m, sizeof(struct mpc_intsrc)); +} + +static void save_mp_irq(struct mpc_intsrc *m) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) { + if (!mp_irq_cmp(&mp_irqs[i], m)) + return; + } + + assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]); + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!!\n"); +} + +void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) +{ + int ioapic; + int pin; + struct mpc_intsrc mp_irq; + + /* + * Convert 'gsi' to 'ioapic.pin'. + */ + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) + return; + pin = mp_find_ioapic_pin(ioapic, gsi); + + /* + * TBD: This check is for faulty timer entries, where the override + * erroneously sets the trigger to level, resulting in a HUGE + * increase of timer interrupts! + */ + if ((bus_irq == 0) && (trigger == 3)) + trigger = 1; + + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (trigger << 2) | polarity; + mp_irq.srcbus = MP_ISA_BUS; + mp_irq.srcbusirq = bus_irq; /* IRQ */ + mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ + mp_irq.dstirq = pin; /* INTIN# */ + + save_mp_irq(&mp_irq); +} + +void __init mp_config_acpi_legacy_irqs(void) +{ + int i; + int ioapic; + unsigned int dstapic; + struct mpc_intsrc mp_irq; + +#if defined(CONFIG_MCA) || defined(CONFIG_EISA) + /* + * Fabricate the legacy ISA bus (bus #31). + */ + mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; +#endif + set_bit(MP_ISA_BUS, mp_bus_not_pci); + pr_debug("Bus #%d is ISA\n", MP_ISA_BUS); + +#ifdef CONFIG_X86_ES7000 + /* + * Older generations of ES7000 have no legacy identity mappings + */ + if (es7000_plat == 1) + return; +#endif + + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). + */ + ioapic = mp_find_ioapic(0); + if (ioapic < 0) + return; + dstapic = mpc_ioapic_id(ioapic); + + /* + * Use the default configuration for the IRQs 0-15. Unless + * overridden by (MADT) interrupt source override entries. + */ + for (i = 0; i < 16; i++) { + int idx; + + for (idx = 0; idx < mp_irq_entries; idx++) { + struct mpc_intsrc *irq = mp_irqs + idx; + + /* Do we already have a mapping for this ISA IRQ? */ + if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i) + break; + + /* Do we already have a mapping for this IOAPIC pin */ + if (irq->dstapic == dstapic && irq->dstirq == i) + break; + } + + if (idx != mp_irq_entries) { + pr_debug("ACPI: IRQ%d used by override.\n", i); + continue; /* IRQ already used */ + } + + mp_irq.type = MP_INTSRC; + mp_irq.irqflag = 0; /* Conforming */ + mp_irq.srcbus = MP_ISA_BUS; + mp_irq.dstapic = dstapic; + mp_irq.irqtype = mp_INT; + mp_irq.srcbusirq = i; /* Identity mapped */ + mp_irq.dstirq = i; + + save_mp_irq(&mp_irq); + } +} + +static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, + int polarity) +{ +#ifdef CONFIG_X86_MPPARSE + struct mpc_intsrc mp_irq; + struct pci_dev *pdev; + unsigned char number; + unsigned int devfn; + int ioapic; + u8 pin; + + if (!acpi_ioapic) + return 0; + if (!dev) + return 0; + if (dev->bus != &pci_bus_type) + return 0; + + pdev = to_pci_dev(dev); + number = pdev->bus->number; + devfn = pdev->devfn; + pin = pdev->pin; + /* print the entry should happen on mptable identically */ + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | + (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); + mp_irq.srcbus = number; + mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); + ioapic = mp_find_ioapic(gsi); + mp_irq.dstapic = mp_ioapics[ioapic].apicid; + mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); + + save_mp_irq(&mp_irq); +#endif + return 0; +} + +int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +{ + int ioapic; + int ioapic_pin; + struct io_apic_irq_attr irq_attr; + + if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) + return gsi; + + /* Don't set up the ACPI SCI because it's already set up */ + if (acpi_gbl_FADT.sci_interrupt == gsi) + return gsi; + + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) { + pr_warning("No IOAPIC for GSI %u\n", gsi); + return gsi; + } + + ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); + +#ifdef CONFIG_X86_32 + if (ioapic_renumber_irq) + gsi = ioapic_renumber_irq(ioapic, gsi); +#endif + + if (ioapic_pin > MP_MAX_IOAPIC_PIN) { + pr_err("Invalid reference to IOAPIC pin " + "%d-%d\n", mpc_ioapic_id(ioapic), + ioapic_pin); + return gsi; + } + + if (enable_update_mptable) + mp_config_acpi_gsi(dev, gsi, trigger, polarity); + + set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, + trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, + polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + io_apic_set_pci_routing(dev, gsi, &irq_attr); + + return gsi; +} + +/* + * Parse IOAPIC related entries in MADT + * returns 0 on success, < 0 on error + */ +static int __init acpi_parse_madt_ioapic_entries(void) +{ + int count; + + /* + * ACPI interpreter is required to complete interrupt setup, + * so if it is off, don't enumerate the io-apics with ACPI. + * If MPS is present, it will handle them, + * otherwise the system will stay in PIC mode + */ + if (acpi_disabled || acpi_noirq) + return -ENODEV; + + if (!cpu_has_apic) + return -ENODEV; + + /* + * if "noapic" boot option, don't look for IO-APICs + */ + if (skip_ioapic_setup) { + pr_info(PREFIX "Skipping IOAPIC probe " + "due to 'noapic' option.\n"); + return -ENODEV; + } + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, + MAX_IO_APICS); + if (!count) { + pr_err(PREFIX "No IOAPIC entries present\n"); + return -ENODEV; + } else if (count < 0) { + pr_err(PREFIX "Error parsing IOAPIC entry\n"); + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, + acpi_parse_int_src_ovr, + nr_irqs); + if (count < 0) { + pr_err(PREFIX + "Error parsing interrupt source overrides entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + /* + * If BIOS did not supply an INT_SRC_OVR for the SCI + * pretend we got one so we can set the SCI flags. + */ + if (!acpi_sci_override_gsi) + acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0); + + /* Fill in identity legacy mappings where no override */ + mp_config_acpi_legacy_irqs(); + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, + nr_irqs); + if (count < 0) { + pr_err(PREFIX "Error parsing NMI SRC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + return 0; +} +#else +static inline int acpi_parse_madt_ioapic_entries(void) +{ + return -1; +} +#endif /* !CONFIG_L_IO_APIC */ + +static void __init early_acpi_process_madt(void) +{ +#ifdef CONFIG_L_LOCAL_APIC + int error; + + if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { + + /* + * Parse MADT LAPIC entries + */ + error = early_acpi_parse_madt_lapic_addr_ovr(); + if (!error) { + acpi_lapic = 1; + smp_found_config = 1; + } + if (error == -EINVAL) { + /* + * Dell Precision Workstation 410, 610 come here. + */ + pr_err(PREFIX + "Invalid BIOS MADT, disabling ACPI\n"); + disable_acpi(); + } + } +#endif +} + +static void __init acpi_process_madt(void) +{ +#ifdef CONFIG_L_LOCAL_APIC + int error; + + if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { + + /* + * Parse MADT LAPIC entries + */ + error = acpi_parse_madt_lapic_entries(); + if (!error) { + acpi_lapic = 1; + + /* + * Parse MADT IO-APIC entries + */ + error = acpi_parse_madt_ioapic_entries(); + if (!error) { + acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; + acpi_ioapic = 1; + + smp_found_config = 1; + } + } + if (error == -EINVAL) { + /* + * Dell Precision Workstation 410, 610 come here. + */ + pr_err(PREFIX + "Invalid BIOS MADT, disabling ACPI\n"); + disable_acpi(); + } + } else { + /* + * ACPI found no MADT, and so ACPI wants UP PIC mode. + * In the event an MPS table was found, forget it. + * Boot with "acpi=off" to use MPS on such a system. + */ + if (smp_found_config) { + pr_warning(PREFIX + "No APIC-table, disabling MPS\n"); + smp_found_config = 0; + } + } + + /* + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. + */ + if (acpi_lapic && acpi_ioapic) + pr_info("Using ACPI (MADT) for SMP configuration " + "information\n"); + else if (acpi_lapic) + pr_info("Using ACPI for processor (LAPIC) " + "configuration information\n"); +#endif + return; +} + +static int __init disable_acpi_irq(const struct dmi_system_id *d) +{ + if (!acpi_force) { + pr_notice("%s detected: force use of acpi=noirq\n", + d->ident); + acpi_noirq_set(); + } + return 0; +} + +static int __init disable_acpi_pci(const struct dmi_system_id *d) +{ + if (!acpi_force) { + pr_notice("%s detected: force use of pci=noacpi\n", + d->ident); + acpi_disable_pci(); + } + return 0; +} + +static int __init dmi_disable_acpi(const struct dmi_system_id *d) +{ + if (!acpi_force) { + pr_notice("%s detected: acpi off\n", d->ident); + disable_acpi(); + } else { + pr_notice( + "Warning: DMI blacklist says broken, but acpi forced\n"); + } + return 0; +} + +/* + * Limit ACPI to CPU enumeration for HT + */ +static int __init force_acpi_ht(const struct dmi_system_id *d) +{ + if (!acpi_force) { + pr_notice("%s detected: force use of acpi=ht\n", + d->ident); + disable_acpi(); + acpi_ht = 1; + } else { + pr_notice( + "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); + } + return 0; +} + +/* + * Force ignoring BIOS IRQ0 pin2 override + */ +static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) +{ + /* + * The ati_ixp4x0_rev() early PCI quirk should have set + * the acpi_skip_timer_override flag already: + */ + if (!acpi_skip_timer_override) { + WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n"); + pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n", + d->ident); + acpi_skip_timer_override = 1; + } + return 0; +} + +/* + * If your system is blacklisted here, but you find that acpi=force + * works for you, please contact linux-acpi@vger.kernel.org + */ +static struct dmi_system_id __initdata acpi_dmi_table[] = { + /* + * Boxes that need ACPI disabled + */ + { + .callback = dmi_disable_acpi, + .ident = "IBM Thinkpad", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), + }, + }, + + /* + * Boxes that need acpi=ht + */ + { + .callback = force_acpi_ht, + .ident = "FSC Primergy T850", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "HP VISUALIZE NT Workstation", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "Compaq Workstation W8000", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), + DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "ASUS CUR-DLS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "ABIT i440BX-W83977", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ABIT "), + DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM Bladecenter", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM eServer xSeries 360", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM eserver xSeries 330", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM eserver xSeries 440", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), + }, + }, + + /* + * Boxes that need ACPI PCI IRQ routing disabled + */ + { + .callback = disable_acpi_irq, + .ident = "ASUS A7V", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), + DMI_MATCH(DMI_BOARD_NAME, ""), + /* newer BIOS, Revision 1011, does work */ + DMI_MATCH(DMI_BIOS_VERSION, + "ASUS A7V ACPI BIOS Revision 1007"), + }, + }, + { + /* + * Latest BIOS for IBM 600E (1.16) has bad pcinum + * for LPC bridge, which is needed for the PCI + * interrupt links to work. DSDT fix is in bug 5966. + * 2645, 2646 model numbers are shared with 600/600E/600X + */ + .callback = disable_acpi_irq, + .ident = "IBM Thinkpad 600 Series 2645", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2645"), + }, + }, + { + .callback = disable_acpi_irq, + .ident = "IBM Thinkpad 600 Series 2646", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2646"), + }, + }, + /* + * Boxes that need ACPI PCI IRQ routing and PCI scan disabled + */ + { /* _BBN 0 bug */ + .callback = disable_acpi_pci, + .ident = "ASUS PR-DLS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), + DMI_MATCH(DMI_BIOS_VERSION, + "ASUS PR-DLS ACPI BIOS Revision 1010"), + DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") + }, + }, + { + .callback = disable_acpi_pci, + .ident = "Acer TravelMate 36x Laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), + }, + }, + {} +}; + +/* second table for DMI checks that should run after early-quirks */ +static struct dmi_system_id __initdata acpi_dmi_table_late[] = { + /* + * HP laptops which use a DSDT reporting as HP/SB400/10000, + * which includes some code which overrides all temperature + * trip points to 16C if the INTIN2 input of the I/O APIC + * is enabled. This input is incorrectly designated the + * ISA IRQ 0 via an interrupt source override even though + * it is wired to the output of the master 8259A and INTIN0 + * is not connected at all. Force ignoring BIOS IRQ0 pin2 + * override in that cases. + */ + { + .callback = dmi_ignore_irq0_timer_override, + .ident = "HP nx6115 laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"), + }, + }, + { + .callback = dmi_ignore_irq0_timer_override, + .ident = "HP NX6125 laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"), + }, + }, + { + .callback = dmi_ignore_irq0_timer_override, + .ident = "HP NX6325 laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), + }, + }, + { + .callback = dmi_ignore_irq0_timer_override, + .ident = "HP 6715b laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), + }, + }, + {} +}; + +/* + * acpi_boot_table_init() and acpi_boot_init() + * called from setup_arch(), always. + * 1. checksums all tables + * 2. enumerates lapics + * 3. enumerates io-apics + * + * acpi_table_init() is separate to allow reading SRAT without + * other side effects. + * + * side effects of acpi_boot_init: + * acpi_lapic = 1 if LAPIC found + * acpi_ioapic = 1 if IOAPIC found + * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; + * if acpi_blacklisted() acpi_disabled = 1; + * acpi_irq_model=... + * ... + */ + +void __init acpi_boot_table_init(void) +{ + dmi_check_system(acpi_dmi_table); + + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if (acpi_disabled && !acpi_ht) + return; + + /* + * Initialize the ACPI boot-time table parser. + */ + if (acpi_table_init()) { + disable_acpi(); + return; + } + + acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); +} + +int __init early_acpi_boot_init(void) +{ + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if (acpi_disabled && !acpi_ht) + return 1; + + /* + * Process the Multiple APIC Description Table (MADT), if present + */ + early_acpi_process_madt(); + + return 0; +} + +int __init acpi_boot_init(void) +{ + /* those are executed after early-quirks are executed */ + dmi_check_system(acpi_dmi_table_late); + + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if (acpi_disabled && !acpi_ht) + return 1; + + acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); + + /* + * set sci_int and PM timer address + */ + acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); + + /* + * Process the Multiple APIC Description Table (MADT), if present + */ + acpi_process_madt(); + + acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); + + return 0; +} + +static int __init parse_acpi(char *arg) +{ + if (!arg) + return -EINVAL; + + /* "acpi=off" disables both ACPI table parsing and interpreter */ + if (strcmp(arg, "off") == 0) { + disable_acpi(); + } + /* acpi=force to over-ride black-list */ + else if (strcmp(arg, "force") == 0) { + acpi_force = 1; + acpi_ht = 1; + acpi_disabled = 0; + } + /* acpi=strict disables out-of-spec workarounds */ + else if (strcmp(arg, "strict") == 0) { + acpi_strict = 1; + } + /* Limit ACPI just to boot-time to enable HT */ + else if (strcmp(arg, "ht") == 0) { + if (!acpi_force) + disable_acpi(); + acpi_ht = 1; + } + /* acpi=rsdt use RSDT instead of XSDT */ + else if (strcmp(arg, "rsdt") == 0) { + acpi_rsdt_forced = 1; + } + /* "acpi=noirq" disables ACPI interrupt routing */ + else if (strcmp(arg, "noirq") == 0) { + acpi_noirq_set(); + } else { + /* Core will printk when we return error. */ + return -EINVAL; + } + return 0; +} +early_param("acpi", parse_acpi); + +/* FIXME: Using pci= for an ACPI parameter is a travesty. */ +static int __init parse_pci(char *arg) +{ + if (arg && strcmp(arg, "noacpi") == 0) + acpi_disable_pci(); + return 0; +} +early_param("pci", parse_pci); + +int __init acpi_mps_check(void) +{ +#if defined(CONFIG_L_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE) +/* mptable code is not built-in*/ + if (acpi_disabled || acpi_noirq) { + pr_warning("MPS support code is not built-in.\n" + "Using acpi=off or acpi=noirq or pci=noacpi " + "may have problem\n"); + return 1; + } +#endif + return 0; +} + +#ifdef CONFIG_L_IO_APIC +static int __init parse_acpi_skip_timer_override(char *arg) +{ + acpi_skip_timer_override = 1; + return 0; +} +early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); + +static int __init parse_acpi_use_timer_override(char *arg) +{ + acpi_use_timer_override = 1; + return 0; +} +early_param("acpi_use_timer_override", parse_acpi_use_timer_override); +#endif /* CONFIG_L_IO_APIC */ + +static int __init setup_acpi_sci(char *s) +{ + if (!s) + return -EINVAL; + if (!strcmp(s, "edge")) + acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE | + (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); + else if (!strcmp(s, "level")) + acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL | + (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); + else if (!strcmp(s, "high")) + acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH | + (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); + else if (!strcmp(s, "low")) + acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW | + (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); + else + return -EINVAL; + return 0; +} +early_param("acpi_sci", setup_acpi_sci); + +/* + * We are out of acpi tables support in boot, there is no need to sync with + * boot. Global lock is not supported in hardware. + */ + +int __acpi_acquire_global_lock(unsigned int *lock) +{ + return -1; +} + +int __acpi_release_global_lock(unsigned int *lock) +{ + return 1; +} diff --git a/arch/l/kernel/acpi/cstate.c b/arch/l/kernel/acpi/cstate.c new file mode 100644 index 000000000000..c466f1ede48a --- /dev/null +++ b/arch/l/kernel/acpi/cstate.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2012 MCST Evgeny Kravtsunov + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Initialize bm_flags based on the CPU cache properties + * On SMP it depends on cache configuration + * - When cache is not shared among all CPUs, we flush cache + * before entering C3. + * - When cache is shared among all CPUs, we use bm_check + * mechanism as in UP case + * + * This routine is called only after all the CPUs are online + */ +void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, + unsigned int cpu) +{ + return; +} +EXPORT_SYMBOL(acpi_processor_power_init_bm_check); + +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, struct acpi_power_register *reg) +{ + return 0; +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); + +void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) +{ + return; +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); + +static int __init ffh_cstate_init(void) +{ + return 0; +} + +static void __exit ffh_cstate_exit(void) +{ +} + +arch_initcall(ffh_cstate_init); +__exitcall(ffh_cstate_exit); diff --git a/arch/l/kernel/acpi/sleep.c b/arch/l/kernel/acpi/sleep.c new file mode 100644 index 000000000000..59e4dbcf82f8 --- /dev/null +++ b/arch/l/kernel/acpi/sleep.c @@ -0,0 +1,61 @@ +/* + * sleep.c - Elbrus-specific ACPI sleep support. + * + * Copyright (C) 2001-2003 Patrick Mochel + * Copyright (C) 2001-2003 Pavel Machek + * Copyright (C) 2012 Evgeny Kravtsunov + */ + +#include + +unsigned long acpi_wakeup_address; +unsigned long acpi_realmode_flags; + +/** + * acpi_save_state_mem - save kernel state + */ +int acpi_save_state_mem(void) +{ + return 0; +} + +/* + * acpi_restore_state - undo effects of acpi_save_state_mem + */ +void acpi_restore_state_mem(void) +{ +} + +static int __init acpi_sleep_setup(char *str) +{ + while ((str != NULL) && (*str != '\0')) { + if (strncmp(str, "s3_bios", 7) == 0) + acpi_realmode_flags |= 1; + if (strncmp(str, "s3_mode", 7) == 0) + acpi_realmode_flags |= 2; + if (strncmp(str, "s3_beep", 7) == 0) + acpi_realmode_flags |= 4; +#ifdef CONFIG_HIBERNATION + if (strncmp(str, "s4_nohwsig", 10) == 0) + acpi_no_s4_hw_signature(); + if (strncmp(str, "s4_nonvs", 8) == 0) + acpi_s4_no_nvs(); +#endif + if (strncmp(str, "old_ordering", 12) == 0) + acpi_old_suspend_ordering(); + str = strchr(str, ','); + if (str != NULL) + str += strspn(str, ", \t"); + } + return 1; +} + +__setup("acpi_sleep=", acpi_sleep_setup); + +/* + * do_suspend_lowlevel() + */ +void do_suspend_lowlevel(void) +{ + return; +} diff --git a/arch/l/kernel/acpi_tainted/Makefile b/arch/l/kernel/acpi_tainted/Makefile new file mode 100644 index 000000000000..3b00bf917f55 --- /dev/null +++ b/arch/l/kernel/acpi_tainted/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ACPI_L_SPMC) += l_spmc.o diff --git a/arch/l/kernel/acpi_tainted/l_spmc.c b/arch/l/kernel/acpi_tainted/l_spmc.c new file mode 100644 index 000000000000..eaaa7c610695 --- /dev/null +++ b/arch/l/kernel/acpi_tainted/l_spmc.c @@ -0,0 +1,1035 @@ +/* + * arch/l/kernel/acpi/l_spmc.c + * + * Copyright (C) 2015 Evgeny Kravtsunov MCST. + * + * Driver for SPMC controller that is part of IOHub-2/EIOHub. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_E2K +#include +#endif + +/* Offsets from BAR for ACPI-MCST registers */ +#define ACPI_SPMC_DEVICE_ID 0x00 + +/* ACPI 4.0 regs: */ +#define ACPI_SPMC_PM_TMR 0x40 +#define ACPI_SPMC_PM1_STS 0x44 +#define ACPI_SPMC_PM1_EN 0x48 +#define ACPI_SPMC_PM1_CNT 0x4c + +/* Additional regs: */ +#define ACPI_SPMC_ATNSUS_CNT 0x50 +#define ACPI_SPMC_PURST_CNT 0x54 +#define ACPI_SPMC_USB_CNTRL 0x58 + +/* Control area size: */ +#define ACPI_SPMC_CNTRL_AREA_SIZE 0x5c + +/* Define bit shifts for regs */ +/* PM1_STS: */ +#define SPMC_PM1_STS_TMR_STS 0 /* R/W1C */ +#define SPMC_PM1_STS_AC_PWR_STATE 1 /* RO */ +#define SPMC_PM1_STS_AC_PWR_STS 2 /* R/W1C */ +#define SPMC_PM1_STS_BATLOW_STATE 3 /* RO */ +#define SPMC_PM1_STS_BATLOW_STS 4 /* R/W1C */ +#define SPMC_PM1_STS_ATN_STS 5 /* RO */ +#define SPMC_PM1_STS_PWRBTN_STS 8 /* R/W1C */ +#define SPMC_PM1_STS_WAK_STS 15 /* R/W1C */ + +/* PM1_EN: */ +#define SPMC_PM1_EN_TMR_EN 0 /* WO */ +#define SPMC_PM1_EN_TMR_32 1 /* RW */ +#define SPMC_PM1_EN_AC_PWR_EN 2 /* WO */ +#define SPMC_PM1_EN_BATLOW_EN 4 /* WO */ +#define SPMC_PM1_EN_PWRBTN_EN 8 /* WO */ + +/* PM1_CNT: */ +#define SPMC_PM1_CNT_SCI_EN 0 /* RW */ +#define SPMC_PM1_CNT_SLP_TYP 10 /* RW */ +#define SPMC_PM1_CNT_SLP_EN 13 /* WO */ + +/* Sleep types: */ +#define SLP_TYP_S0 0x0 +#define SLP_TYP_S1 SLP_TYP_S0 +#define SLP_TYP_S2 SLP_TYP_S0 +#define SLP_TYP_S3 0x3 +#define SLP_TYP_S4 0x4 +#define SLP_TYP_S5 0x5 +#define SLP_TYP_S6 SLP_TYP_S0 +#define SLP_TYP_S7 SLP_TYP_S0 + +/* USB_CNTRL: */ +/* Place then here. */ +#define ACPI_SPMC_USB_CNTRL_WAKEUP_EN (3 << 2) +#define ACPI_SPMC_USB_ISOL_CNTRL (3 << 0) + +/* Bit access helpers: */ +#define ACPI_SPMC_ONE_MASK(x) (1 << (x)) +#define ACPI_SPMC_ZERO_MASK(x) (~(1 << (x))) +#define ACPI_SPMC_SET_SLP_TYP(x) ((x & 7) << SPMC_PM1_CNT_SLP_TYP) +#define ACPI_SPMC_GET_SLP_TYP(x) ((x >> SPMC_PM1_CNT_SLP_TYP) & 7) + +/* Initial configuration values: */ +#define SPMC_LEGACY_PM1_CNT_DEF ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN) +#define SPMC_TMR32_PM1_EN_DEF 0x00000002 + +#define DRV_NAME "acpi-spmc" +#define IOAPIC_SPMC_IRQ 1 + +struct acpi_spmc_data { + struct pci_dev *pdev; + raw_spinlock_t lock; +}; + +static struct acpi_spmc_data *gdata; + + +/* ACPI tainted interfaces and variables */ + +struct kobject *acpi_kobj; + +#define ACPI_BUS_FILE_ROOT "acpi" +struct proc_dir_entry *acpi_root_dir; +#define ACPI_MAX_STRING 80 + +/* Global vars for handling event proc entry */ +static DEFINE_SPINLOCK(acpi_system_event_lock); +int event_is_open = 0; +static DEFINE_SPINLOCK(acpi_bus_event_lock); + +LIST_HEAD(acpi_bus_event_list); +DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue); + +typedef char acpi_bus_id[8]; +typedef char acpi_device_name[40]; +typedef char acpi_device_class[20]; + +struct acpi_bus_event { + struct list_head node; + acpi_device_class device_class; + acpi_bus_id bus_id; + u32 type; + u32 data; +}; + +/* Event related staff */ +#define ACPI_AC_EVENT 0x1 +#define ACPI_BATTERY_EVENT 0x2 +#define ACPI_BUTTON_EVENT 0x3 +#define ACPI_PMTIMER_EVENT 0x4 +#define ACPI_UNKNOWN_EVENT 0xff + +#define ACPI_AC_CLASS "ac_adapter" +#define ACPI_BATTERY_CLASS "battery" +#define ACPI_BUTTON_CLASS "button" +#define ACPI_PMTIMER_CLASS "pmtimer" + +#define ACPI_BUSID_CLASS "spmc" + +#define ACPI_FIXED_HARDWARE_EVENT 0x00 + +int acpi_bus_generate_proc_event(const char *device_class, const char *bus_id, u8 type, int data) +{ + struct acpi_bus_event *event; + unsigned long flags = 0; + + /* drop event on the floor if no one's listening */ + if (!event_is_open) + return 0; + + event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC); + if (!event) + return -ENOMEM; + + strcpy(event->device_class, device_class); + strcpy(event->bus_id, bus_id); + event->type = type; + event->data = data; + + spin_lock_irqsave(&acpi_bus_event_lock, flags); + list_add_tail(&event->node, &acpi_bus_event_list); + spin_unlock_irqrestore(&acpi_bus_event_lock, flags); + + wake_up_interruptible(&acpi_bus_event_queue); + + return 0; +} + +int acpi_bus_receive_event(struct acpi_bus_event *event) +{ + unsigned long flags = 0; + struct acpi_bus_event *entry = NULL; + + DECLARE_WAITQUEUE(wait, current); + + if (!event) + return -EINVAL; + + if (list_empty(&acpi_bus_event_list)) { + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&acpi_bus_event_queue, &wait); + + if (list_empty(&acpi_bus_event_list)) + schedule(); + + remove_wait_queue(&acpi_bus_event_queue, &wait); + set_current_state(TASK_RUNNING); + + if (signal_pending(current)) + return -ERESTARTSYS; + } + + spin_lock_irqsave(&acpi_bus_event_lock, flags); + if (!list_empty(&acpi_bus_event_list)) { + entry = list_entry(acpi_bus_event_list.next, + struct acpi_bus_event, node); + list_del(&entry->node); + } + spin_unlock_irqrestore(&acpi_bus_event_lock, flags); + + if (!entry) + return -ENODEV; + + memcpy(event, entry, sizeof(struct acpi_bus_event)); + + kfree(entry); + + return 0; +} + +static int acpi_system_open_event(struct inode *inode, struct file *file) +{ + spin_lock_irq(&acpi_system_event_lock); + + if (event_is_open) + goto out_busy; + + event_is_open = 1; + + spin_unlock_irq(&acpi_system_event_lock); + return 0; + + out_busy: + spin_unlock_irq(&acpi_system_event_lock); + return -EBUSY; +} + +static ssize_t +acpi_system_read_event(struct file *file, char __user * buffer, size_t count, + loff_t * ppos) +{ + int result = 0; + struct acpi_bus_event event; + static char str[ACPI_MAX_STRING]; + static int chars_remaining = 0; + static char *ptr; + + if (!chars_remaining) { + memset(&event, 0, sizeof(struct acpi_bus_event)); + + if ((file->f_flags & O_NONBLOCK) + && (list_empty(&acpi_bus_event_list))) + return -EAGAIN; + + result = acpi_bus_receive_event(&event); + if (result) + return result; + + chars_remaining = sprintf(str, "%s %s %08x %08x\n", + event.device_class ? event. + device_class : "", + event.bus_id ? event. + bus_id : "", event.type, + event.data); + ptr = str; + } + + if (chars_remaining < count) { + count = chars_remaining; + } + + if (copy_to_user(buffer, ptr, count)) + return -EFAULT; + + *ppos += count; + chars_remaining -= count; + ptr += count; + + return count; +} + +static int acpi_system_close_event(struct inode *inode, struct file *file) +{ + spin_lock_irq(&acpi_system_event_lock); + event_is_open = 0; + spin_unlock_irq(&acpi_system_event_lock); + return 0; +} + +static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait) +{ + poll_wait(file, &acpi_bus_event_queue, wait); + if (!list_empty(&acpi_bus_event_list)) + return POLLIN | POLLRDNORM; + return 0; +} + +static const struct file_operations acpi_system_event_ops = { + .owner = THIS_MODULE, + .open = acpi_system_open_event, + .read = acpi_system_read_event, + .release = acpi_system_close_event, + .poll = acpi_system_poll_event, +}; + +/* handler for irq line 1 (acpi-spmc) */ +static irqreturn_t acpi_spmc_irq_handler(int irq, void *dev_id) +{ + unsigned long flags; + unsigned int x; + + unsigned int event_id = ACPI_UNKNOWN_EVENT; + unsigned int event_data = 0; + + + struct acpi_spmc_data *c = (struct acpi_spmc_data *)dev_id; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_STS, &x); + + /* Get the source of interrupt */ + if (x & ACPI_SPMC_ONE_MASK(SPMC_PM1_STS_TMR_STS)) { + /* SCI interrupt form PM timer */ + /* handle it here */ + /* printk(KERN_ERR "SCI interrupt from PM timer.\n"); */ + event_id = ACPI_PMTIMER_EVENT; + } else if (x & ACPI_SPMC_ONE_MASK(SPMC_PM1_STS_AC_PWR_STS)) { + /* SCI interrupt due change of ac_power_psnt */ + /* handle it here */ + /* printk(KERN_ERR "SCI interrupt from ac_power_psnt.\n"); */ + /* 1) check power source ac or battery */ + event_id = ACPI_AC_EVENT; + if (x & ACPI_SPMC_ONE_MASK(SPMC_PM1_STS_AC_PWR_STATE)) { + event_data = 1; /* ac on */ +#ifdef CONFIG_CPU_FREQ_GOV_PSTATES + set_cpu_pwr_limit(battery_pwr); +#endif + } else { + event_data = 0; /* ac off */ +#ifdef CONFIG_CPU_FREQ_GOV_PSTATES + set_cpu_pwr_limit(init_cpu_pwr_limit); +#endif + } + } else if (x & ACPI_SPMC_ONE_MASK(SPMC_PM1_STS_BATLOW_STS)) { + /* SCI interrupt due change of ac_power_psnt */ + /* handle it here */ + /* printk(KERN_ERR "SCI interrupt from batlow.\n"); */ + event_id = ACPI_BATTERY_EVENT; + if (x & ACPI_SPMC_ONE_MASK(SPMC_PM1_STS_BATLOW_STATE)) { + event_data = 1; /* battery low */ + } else { + event_data = 0; /* battery ok */ + } + } else if (x & ACPI_SPMC_ONE_MASK(SPMC_PM1_STS_PWRBTN_STS)) { + /* SCI interrupt due to power button */ + /* handle it here */ + /* printk(KERN_ERR "SCI interrupt from power button.\n"); */ + event_id = ACPI_BUTTON_EVENT; + } else if (x & ACPI_SPMC_ONE_MASK(SPMC_PM1_STS_WAK_STS)) { + /* SCI interrupt due to wakeup event */ + /* handle it here */ + /* printk(KERN_ERR "SCI interrupt from wakeup event.\n"); */ + } + + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_STS, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + /* notify acpid on event */ + if (event_id == ACPI_PMTIMER_EVENT) { + acpi_bus_generate_proc_event(ACPI_PMTIMER_CLASS, + ACPI_BUSID_CLASS, + ACPI_FIXED_HARDWARE_EVENT, + 1); + } else if (event_id == ACPI_BUTTON_EVENT) { + acpi_bus_generate_proc_event(ACPI_BUTTON_CLASS, + ACPI_BUSID_CLASS, + ACPI_FIXED_HARDWARE_EVENT, + 1); + } else if (event_id == ACPI_AC_EVENT) { + acpi_bus_generate_proc_event(ACPI_AC_CLASS, + ACPI_BUSID_CLASS, + ACPI_FIXED_HARDWARE_EVENT, + event_data); + } else if (event_id == ACPI_BATTERY_EVENT) { + acpi_bus_generate_proc_event(ACPI_BATTERY_CLASS, + ACPI_BUSID_CLASS, + ACPI_FIXED_HARDWARE_EVENT, + event_data); + } + + return IRQ_HANDLED; +} + +/* Sysfs layer */ +/* sci */ +static ssize_t spmc_show_sci(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned int x; + unsigned int val; + struct acpi_spmc_data *c = gdata; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + if ( x & ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN)) + val = 1; + else + val = 0; + + return sprintf(buf, "%i\n", val); +} + +static ssize_t spmc_store_sci(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + unsigned int x; + unsigned long val; + struct acpi_spmc_data *c = gdata; + + if ((kstrtoul(buf, 10, &val) < 0) || (val > 1)) + return -EINVAL; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + if (val) { + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN); + } else { + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN)); + } + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return count; +} + +/* tmr */ +static ssize_t spmc_store_tmr(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + unsigned int x; + unsigned long val; + struct acpi_spmc_data *c = gdata; + + if ((kstrtoul(buf, 10, &val) < 0) || (val > 1)) + return -EINVAL; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + if (val) { + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_TMR_EN); + } else { + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_TMR_EN)); + } + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_EN, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return count; +} + +/* tmr32 */ +static ssize_t spmc_show_tmr32(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned int x; + unsigned int val; + struct acpi_spmc_data *c = gdata; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + if ( x & ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_TMR_32)) + val = 1; + else + val = 0; + + return sprintf(buf, "%i\n", val); +} + +static ssize_t spmc_store_tmr32(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + unsigned int x; + unsigned long val; + struct acpi_spmc_data *c = gdata; + + if ((kstrtoul(buf, 10, &val) < 0) || (val > 1)) + return -EINVAL; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + if (val) { + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_TMR_32); + } else { + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_TMR_32)); + } + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_EN, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return count; +} + +/* ac_pwr */ +static ssize_t spmc_store_ac_pwr(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + unsigned int x; + unsigned long val; + struct acpi_spmc_data *c = gdata; + + if ((kstrtoul(buf, 10, &val) < 0) || (val > 1)) + return -EINVAL; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + if (val) { + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_AC_PWR_EN); + } else { + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_AC_PWR_EN)); + } + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_EN, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return count; +} + +/* batlow */ +static ssize_t spmc_store_batlow(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + unsigned int x; + unsigned long val; + struct acpi_spmc_data *c = gdata; + + if ((kstrtoul(buf, 10, &val) < 0) || (val > 1)) + return -EINVAL; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + if (val) { + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_BATLOW_EN); + } else { + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_BATLOW_EN)); + } + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_EN, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return count; +} + +/* pwrbtn */ +static ssize_t spmc_store_pwrbtn(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + unsigned int x; + unsigned long val; + struct acpi_spmc_data *c = gdata; + + if ((kstrtoul(buf, 10, &val) < 0) || (val > 1)) + return -EINVAL; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + if (val) { + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_PWRBTN_EN); + } else { + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_PWRBTN_EN)); + } + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_EN, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return count; +} + +/* slptyp */ +static ssize_t spmc_show_slptyp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned int x; + unsigned int val; + struct acpi_spmc_data *c = gdata; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + val = ACPI_SPMC_GET_SLP_TYP(x); + + return sprintf(buf, "%i\n", val); +} + +static ssize_t spmc_store_slptyp(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long flags; + unsigned int x; + unsigned long val; + struct acpi_spmc_data *c = gdata; + + if ((kstrtoul(buf, 10, &val) < SLP_TYP_S0) || + (val > SLP_TYP_S5)) + return -EINVAL; + + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + x &= ~(ACPI_SPMC_SET_SLP_TYP(0x7)); + x |= (ACPI_SPMC_SET_SLP_TYP((unsigned int)val)); + x |= (ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SLP_EN)); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return count; +} + +/* pm_tmr */ +static ssize_t spmc_show_pm_tmr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned int x; + struct acpi_spmc_data *c = gdata; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM_TMR, &x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return sprintf(buf, "0x%x\n", x); +} + +/* pm1_sts */ +static ssize_t spmc_show_pm1_sts(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned int x; + struct acpi_spmc_data *c = gdata; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_STS, &x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return sprintf(buf, "0x%x\n", x); +} + +/* pm1_en */ +static ssize_t spmc_show_pm1_en(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned int x; + struct acpi_spmc_data *c = gdata; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return sprintf(buf, "0x%x\n", x); +} + +/* pm1_cnt */ +static ssize_t spmc_show_pm1_cnt(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned int x; + struct acpi_spmc_data *c = gdata; + + raw_spin_lock_irqsave(&c->lock, flags); + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + raw_spin_unlock_irqrestore(&c->lock, flags); + + return sprintf(buf, "0x%x\n", x); +} + +static DEVICE_ATTR(sci, S_IWUSR | S_IRUGO, spmc_show_sci, spmc_store_sci); +static DEVICE_ATTR(tmr, S_IWUSR, NULL, spmc_store_tmr); +static DEVICE_ATTR(tmr32, S_IWUSR | S_IRUGO, spmc_show_tmr32, spmc_store_tmr32); +static DEVICE_ATTR(ac_pwr, S_IWUSR, NULL, spmc_store_ac_pwr); +static DEVICE_ATTR(batlow, S_IWUSR, NULL, spmc_store_batlow); +static DEVICE_ATTR(pwrbtn, S_IWUSR, NULL, spmc_store_pwrbtn); +static DEVICE_ATTR(slptyp, S_IWUSR | S_IRUGO, spmc_show_slptyp, spmc_store_slptyp); + +/* Debug monitors */ +static DEVICE_ATTR(pm_tmr, S_IRUGO, spmc_show_pm_tmr, NULL); +static DEVICE_ATTR(pm1_sts, S_IRUGO, spmc_show_pm1_sts, NULL); +static DEVICE_ATTR(pm1_en, S_IRUGO, spmc_show_pm1_en, NULL); +static DEVICE_ATTR(pm1_cnt, S_IRUGO, spmc_show_pm1_cnt, NULL); + +static struct attribute *acpi_spmc_attributes[] = { + &dev_attr_sci.attr, + &dev_attr_tmr.attr, + &dev_attr_tmr32.attr, + &dev_attr_ac_pwr.attr, + &dev_attr_batlow.attr, + &dev_attr_pwrbtn.attr, + &dev_attr_slptyp.attr, + &dev_attr_pm_tmr.attr, + &dev_attr_pm1_sts.attr, + &dev_attr_pm1_en.attr, + &dev_attr_pm1_cnt.attr, + NULL +}; + +static const struct attribute_group acpi_spmc_attr_group = { + .attrs = acpi_spmc_attributes, +}; + +#ifdef CONFIG_SUSPEND +/* S3 (STR support) */ + +static struct pci_dev *l_spmc_pdev; + +static int l_spmc_suspend_valid(suspend_state_t state) +{ + return state == PM_SUSPEND_TO_IDLE || state == PM_SUSPEND_MEM; +} + +static void l_spmc_s3_enter(void *arg) +{ + unsigned x; + struct pci_dev *pdev = arg; + + pci_read_config_dword(pdev, ACPI_SPMC_ATNSUS_CNT, &x); + + pci_read_config_dword(pdev, ACPI_SPMC_PM1_CNT, &x); + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN); + pci_write_config_dword(pdev, ACPI_SPMC_PM1_CNT, x); + + /* Here write S3 state to SPMC config space */ + x &= ~(ACPI_SPMC_SET_SLP_TYP(0x7)); + x |= ACPI_SPMC_SET_SLP_TYP(SLP_TYP_S3); + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SLP_EN); + + pci_write_config_dword(pdev, ACPI_SPMC_PM1_CNT, x); + pci_read_config_dword(pdev, ACPI_SPMC_PM1_CNT, &x); +} + +static int l_spmc_suspend_enter(suspend_state_t state) +{ + int ret = 0; +#ifdef CONFIG_E2K + restart_system(l_spmc_s3_enter, l_spmc_pdev); +#else + /*TODO:*/ + l_spmc_s3_enter(l_spmc_pdev); +#endif + return ret; +} + +static const struct platform_suspend_ops l_spmc_suspend_ops = { + .valid = l_spmc_suspend_valid, + .enter = l_spmc_suspend_enter, +}; + + +static int l_power_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int ret = 0; + +#ifdef CONFIG_E2K + /* Suspend-to-ram requires support from both boot and + * hardware; hardware has support since iset v4 but + * boot has support only for e1cp. So for everything + * but e1cp we allow only suspend-to-disk. */ + if (!IS_MACHINE_E1CP) { + if (event != PM_HIBERNATION_PREPARE && + event != PM_POST_HIBERNATION && + event != PM_RESTORE_PREPARE && + event != PM_POST_RESTORE) + ret = -EOPNOTSUPP; + } +#endif + return notifier_from_errno(ret); +} + +static struct notifier_block l_power_notifier = { + .notifier_call = l_power_event, +}; + +#endif /*CONFIG_SUSPEND*/ + +static int __init acpi_spmc_probe(struct pci_dev *pdev, + struct acpi_spmc_data *c) +{ + struct device_node *np; + int err, ret; + char *dsc = "SCI"; + unsigned x; + u32 prop; + + err = pci_enable_device(pdev); + if (err) + return err; + + c->pdev = pdev; + + raw_spin_lock_init(&(c->lock)); + + /* Default settings: */ + /* 1) ACPI (SCI enable or disable) & force S0 state */ + pci_write_config_dword(pdev, ACPI_SPMC_PM1_CNT, + ACPI_SPMC_SET_SLP_TYP(SLP_TYP_S0) | + ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SLP_EN) | + SPMC_LEGACY_PM1_CNT_DEF); + + /* 2) TMR_32 */ + pci_write_config_dword(pdev, ACPI_SPMC_PM1_EN, + SPMC_TMR32_PM1_EN_DEF); + + /* 3) enable wakeup from usb */ + pci_read_config_dword(pdev, ACPI_SPMC_USB_CNTRL, &x); + pci_write_config_dword(pdev, ACPI_SPMC_USB_CNTRL, + x | ACPI_SPMC_USB_CNTRL_WAKEUP_EN); + + np = of_find_node_by_name(NULL, "acpi-spmc"); + if (np) { + /* 5) SCI value from device tree (enable or disable) */ + ret = of_property_read_u32(np, "sci", &prop); + if ((!ret) && (prop < 2)) { + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + if (prop) + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN); + else + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN)); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, x); + } + /* 6) PWRBTN value from device tree (enable or disable) */ + ret = of_property_read_u32(np, "pwrbtn", &prop); + if ((!ret) && (prop < 2)) { + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + if (prop) + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_PWRBTN_EN); + else + x &= ~(ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_PWRBTN_EN)); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_EN, x); + } + /* 7) SLPTYP value from device tree (0-5) */ + ret = of_property_read_u32(np, "slptyp", &prop); + if ((!ret) && (prop <= SLP_TYP_S5)) { + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + x &= ~(ACPI_SPMC_SET_SLP_TYP(0x7)); + x |= (ACPI_SPMC_SET_SLP_TYP((unsigned int)prop)); + x |= (ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SLP_EN)); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, x); + } + } else { + /* 8) PWRBTN enable without device tree */ + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_EN, &x); + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_EN_PWRBTN_EN); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_EN, x); + } + + /* register sysfs entries */ + err = sysfs_create_group(&pdev->dev.kobj, &acpi_spmc_attr_group); + if (err) + goto done; + + /* SCI IRQ, Line 1: */ + err = request_irq(IOAPIC_SPMC_IRQ, acpi_spmc_irq_handler, + IRQF_ONESHOT | IRQF_SHARED, + dsc, c); + if (err) { + dev_err(&pdev->dev, + "ACPI-SPMC: unable to claim irq %d; err %d\n", + IOAPIC_SPMC_IRQ, err); + goto cleanup; + } + + dev_info(&pdev->dev, + DRV_NAME ": ACPI-SPMC support successfully loaded.\n"); + +#ifdef CONFIG_SUSPEND + suspend_set_ops(&l_spmc_suspend_ops); + l_spmc_pdev = pdev; +#endif + + return 0; + +cleanup: + sysfs_remove_group(&pdev->dev.kobj, &acpi_spmc_attr_group); + +done: + return err; +} + +static void __exit acpi_spmc_remove(struct acpi_spmc_data *p) +{ + struct pci_dev *pdev = p->pdev; + + free_irq(IOAPIC_SPMC_IRQ, p); + sysfs_remove_group(&pdev->dev.kobj, &acpi_spmc_attr_group); +} + +static int __init acpi_spmc_init(void) +{ + struct pci_dev *pdev = NULL; + int err = -ENODEV; + struct acpi_spmc_data *idata; + struct proc_dir_entry *entry; + +#ifdef CONFIG_SUSPEND + err = register_pm_notifier(&l_power_notifier); + if (err) + return err; +#endif + /* Implementation for single IOHUB-2 on board (no domains) */ + + pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_SPMC, + pdev); + if (!pdev) + return 0; + + if (!(idata = kzalloc(sizeof(*idata), GFP_KERNEL))) + return -ENOMEM; + + err = acpi_spmc_probe(pdev, idata); + if (err) { + pci_dev_put(pdev); + return err; + } + + gdata = idata; + + /* Long initialization process of ACPI tainted interfaces */ + + /* ACPI sysfs top dir */ + acpi_kobj = kobject_create_and_add("acpi", firmware_kobj); + if (!acpi_kobj) { + printk(KERN_WARNING "%s: kset create error\n", __func__); + acpi_kobj = NULL; + } + + /* Create the top ACPI proc directory */ + acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL); + + /* /proc/acpi/event [R] */ + entry = proc_create("event", S_IRUSR, acpi_root_dir, + &acpi_system_event_ops); + if (!entry) { + + pci_dev_put(pdev); + gdata = NULL; + return -ENODEV; + } + return err; +} + +static void __exit acpi_spmc_exit(void) +{ + proc_remove(acpi_root_dir); + kobject_put(acpi_kobj); + acpi_spmc_remove(gdata); + pci_dev_put(gdata->pdev); + kfree(gdata); + gdata = NULL; +} + +device_initcall(acpi_spmc_init); +/* module_init(acpi_spmc_init); */ +/* module_exit(acpi_spmc_exit); */ + + +/* If board contains IOHUB-2, SPMC can be used for implementing "halt" + * by writing S5 to slptyp. This function is to be called from + * l_halt_machine(). + */ + +void do_spmc_halt(void) +{ + unsigned int x; + unsigned long val0 = SLP_TYP_S0; + unsigned long val5 = SLP_TYP_S5; + struct acpi_spmc_data *c = gdata; + if (!c) + return; + + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + x |= ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SCI_EN); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, x); + + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + x &= ~(ACPI_SPMC_SET_SLP_TYP(0x7)); + x |= (ACPI_SPMC_SET_SLP_TYP((unsigned int)val0)); + x |= (ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SLP_EN)); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, x); + + pci_read_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, &x); + x &= ~(ACPI_SPMC_SET_SLP_TYP(0x7)); + x |= (ACPI_SPMC_SET_SLP_TYP((unsigned int)val5)); + x |= (ACPI_SPMC_ONE_MASK(SPMC_PM1_CNT_SLP_EN)); + pci_write_config_dword(c->pdev, ACPI_SPMC_PM1_CNT, x); + while (1) {} +} +EXPORT_SYMBOL(do_spmc_halt); + +MODULE_AUTHOR("Evgeny Kravtsunov "); +MODULE_DESCRIPTION("IOHub-2/EIOHub SPMC driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/l/kernel/am85c30.c b/arch/l/kernel/am85c30.c new file mode 100644 index 000000000000..bd1b709ccdd2 --- /dev/null +++ b/arch/l/kernel/am85c30.c @@ -0,0 +1,175 @@ +/* + * COM port console AM85C30 support + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#undef DEBUG_SC_MODE +#undef DebugSC +#define DEBUG_SC_MODE 0 /* serial console debug */ +#define DebugSC if (DEBUG_SC_MODE) dump_printk + +static unsigned long am85c30_com_port = 0; +extern serial_console_opts_t am85c30_serial_console; + +static inline void +am85c30_com_outb(u64 iomem_addr, u8 byte) +{ + boot_writeb(byte, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ +} + +static inline u8 +am85c30_com_inb(u64 iomem_addr) +{ + rmb(); /* waiting for read from serial port completion */ + return boot_readb((void __iomem *)iomem_addr); +} + +static inline u8 +am85c30_com_inb_command(u64 iomem_addr, u8 reg_num) +{ + boot_writeb(reg_num, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ + return boot_readb((void __iomem *)iomem_addr); +} + +static inline void +am85c30_com_outb_command(u64 iomem_addr, u8 reg_num, u8 val) +{ + boot_writeb(reg_num, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ + boot_writeb(val, (void __iomem *)iomem_addr); + wmb(); /* waiting for write to serial port completion */ +} + +#if defined(CONFIG_SERIAL_L_ZILOG) +static inline unsigned long lock_l_zilog(void) +{ + unsigned long flags; + + if (uap_a_reg_lock) { +# ifdef CONFIG_E2K + raw_all_irq_save(flags); +# else + raw_local_irq_save(flags); +# endif + arch_spin_lock(&uap_a_reg_lock->raw_lock); + } else { + raw_local_save_flags(flags); + } + + return flags; +} + +static inline void unlock_l_zilog(unsigned long flags) +{ + if (uap_a_reg_lock) { + arch_spin_unlock(&uap_a_reg_lock->raw_lock); +# ifdef CONFIG_E2K + raw_all_irq_restore(flags); +# else + raw_local_irq_restore(flags); +# endif + } +} +#else +static inline unsigned long lock_l_zilog(void) +{ + return 0; +} + +static inline unsigned long unlock_l_zilog(unsigned long flags) +{ +} +#endif + +static __interrupt void am85c30_serial_putc(unsigned char c) +{ + unsigned long port = am85c30_com_port + 2 * serial_dump_console_num; + unsigned long flags; + u8 cmd_saved; + + flags = lock_l_zilog(); + + cmd_saved = am85c30_com_inb_command(port, AM85C30_RR1); + + am85c30_com_outb_command(port, AM85C30_RR1, + cmd_saved & ~(AM85C30_EXT_INT_ENAB | AM85C30_TxINT_ENAB | + AM85C30_RxINT_MASK)); + + while ((am85c30_com_inb_command(port, AM85C30_RR0) & AM85C30_D2) == 0) + ; + am85c30_com_outb(port + 0x01, c); + + while ((am85c30_com_inb_command(port, AM85C30_RR0) & AM85C30_D2) == 0) + ; + am85c30_com_outb_command(port, AM85C30_RR0, AM85C30_RES_Tx_P); + am85c30_com_outb_command(port, AM85C30_RR1, cmd_saved); + + unlock_l_zilog(flags); +} + +static __interrupt unsigned char am85c30_serial_getc(void) +{ + unsigned long port; + unsigned long flags; + u8 ret; + + flags = lock_l_zilog(); + + port = am85c30_com_port + 2 * serial_dump_console_num; + while (((am85c30_com_inb_command(port, AM85C30_RR0)) & AM85C30_D0) == 0) + ; + ret = am85c30_com_inb(port + 0x01); + + unlock_l_zilog(flags); + + return ret; +} + +static int __init +am85c30_init(void *serial_base) +{ + DebugSC("boot_am85c30_init() started\n"); +#ifdef CONFIG_E2K + if (!HAS_MACHINE_E2K_IOHUB) { + DebugSC("boot_am85c30_init() on this machine AM85C30 serial " + "device is not used\n"); + return (-ENODEV); + } +#endif /* CONFIG_E2K */ + + if (serial_base == NULL) { + dump_printk("am85c30_init() Serial console base IO " + "address is not passed by BIOS\n"); + dump_printk("am85c30_init() Serial console is not " + "enabled\n"); + return (-ENODEV); + } + am85c30_com_port = (unsigned long)serial_base; + am85c30_serial_console.io_base = (unsigned long)serial_base; + DebugSC("am85c30_init() enabled serial console at %px " + "IO memory base\n", serial_base); + return (0); +} + +/* AM85C30 serial console opts struct */ +serial_console_opts_t am85c30_serial_console = { + .name = SERIAL_CONSOLE_AM85C30_NAME, + .io_base = 0, + .serial_putc = am85c30_serial_putc, + .serial_getc = am85c30_serial_getc, + .init = am85c30_init, +}; diff --git a/arch/l/kernel/apic/Makefile b/arch/l/kernel/apic/Makefile new file mode 100644 index 000000000000..020aa07f8566 --- /dev/null +++ b/arch/l/kernel/apic/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for local APIC drivers and for the IO-APIC code +# + +obj-$(CONFIG_L_LOCAL_APIC) += apic.o ipi.o irq.o probe_64.o +obj-$(CONFIG_L_IO_APIC) += io_apic.o +obj-$(CONFIG_L_X86_64) += apic_flat_64.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_IRQ_WORK) += irq_work.o diff --git a/arch/l/kernel/apic/apic.c b/arch/l/kernel/apic/apic.c new file mode 100644 index 000000000000..f6becdcfd822 --- /dev/null +++ b/arch/l/kernel/apic/apic.c @@ -0,0 +1,2939 @@ +/* + * Local APIC handling, local APIC timers + * + * (c) 1999, 2000, 2009 Ingo Molnar + * + * Fixes + * Maciej W. Rozycki : Bits for genuine 82489DX APICs; + * thanks to Eric Gilmore + * and Rolf G. Tews + * for testing these extensively. + * Maciej W. Rozycki : Various updates and fixes. + * Mikael Pettersson : Power Management for UP-APIC. + * Pavel Machek and + * Mikael Pettersson : PM converted to driver model. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef CONFIG_E90S +#include +#endif +#include +#include +#include +#include +#include +#include +#if 0 +#include +#include +#include +#endif +#include +#include +#include +#include +#ifdef CONFIG_PIC +#include +#endif +#if !defined CONFIG_E2K && !defined CONFIG_E90S +#include +#include +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#include + +#include +#include +#include +#include + +unsigned int num_processors; + +unsigned disabled_cpus; + +/* Processor that is doing the boot up */ +unsigned int boot_cpu_physical_apicid = -1U; +EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid); + +/* + * The highest APIC ID seen during enumeration. + */ +unsigned int max_physical_apicid; + +/* + * Bitmask of physically existing CPUs: + */ +physid_mask_t phys_cpu_present_map; + +/* + * Processor to be disabled specified by kernel parameter + * disable_cpu_apicid=, mostly used for the kdump 2nd kernel to + * avoid undefined behaviour caused by sending INIT from AP to BSP. + */ +static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID; + +/* + * Map cpu index to physical APIC ID + */ +DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID); +DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); + +#ifdef CONFIG_L_X86_32 + +/* + * On x86_32, the mapping between cpu and logical apicid may vary + * depending on apic in use. The following early percpu variable is + * used for the mapping. This is where the behaviors of x86_64 and 32 + * actually diverge. Let's keep it ugly for now. + */ +DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID); + +/* Local APIC was disabled by the BIOS and enabled by the kernel */ +static int enabled_via_apicbase; + +/* + * Handle interrupt mode configuration register (IMCR). + * This register controls whether the interrupt signals + * that reach the BSP come from the master PIC or from the + * local APIC. Before entering Symmetric I/O Mode, either + * the BIOS or the operating system must switch out of + * PIC Mode by changing the IMCR. + */ +static inline void imcr_pic_to_apic(void) +{ + /* select IMCR register */ + outb(0x70, 0x22); + /* NMI and 8259 INTR go through APIC */ + outb(0x01, 0x23); +} + +static inline void imcr_apic_to_pic(void) +{ + /* select IMCR register */ + outb(0x70, 0x22); + /* NMI and 8259 INTR go directly to BSP */ + outb(0x00, 0x23); +} +#endif + +#if 0 +/* + * Knob to control our willingness to enable the local APIC. + * + * +1=force-enable + */ +static int force_enable_local_apic __initdata; +/* + * APIC command line parameters + */ +static int __init parse_lapic(char *arg) +{ + if (config_enabled(CONFIG_L_X86_32) && !arg) + force_enable_local_apic = 1; + else if (arg && !strncmp(arg, "notscdeadline", 13)) + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); + return 0; +} +early_param("lapic", parse_lapic); + +#ifdef CONFIG_L_X86_64 +static int apic_calibrate_pmtmr __initdata; +static __init int setup_apicpmtimer(char *s) +{ + apic_calibrate_pmtmr = 1; + notsc_setup(NULL); + return 0; +} +__setup("apicpmtimer", setup_apicpmtimer); +#endif +#endif + +int x2apic_mode; +#ifdef CONFIG_X86_X2APIC +/* x2apic enabled before OS handover */ +int x2apic_preenabled; +static int x2apic_disabled; +static int nox2apic; +static __init int setup_nox2apic(char *str) +{ + if (x2apic_enabled()) { + int apicid = native_apic_msr_read(APIC_ID); + + if (apicid >= 255) { + pr_warning("Apicid: %08x, cannot enforce nox2apic\n", + apicid); + return 0; + } + + pr_warning("x2apic already enabled. will disable it\n"); + } else + setup_clear_cpu_cap(X86_FEATURE_X2APIC); + + nox2apic = 1; + + return 0; +} +early_param("nox2apic", setup_nox2apic); +#endif + +unsigned long mp_lapic_addr; +#if 0 +int disable_apic; +#endif +/* Disable local APIC timer from the kernel commandline or via dmi quirk */ +#ifndef CONFIG_VIRTUALIZATION +static __initdata bool disable_apic_timer; +#else /* CONFIG_VIRTUALIZATION */ +/* Variable should be global to can to disable on guest kernel */ +__initdata bool disable_apic_timer; +#endif /* ! CONFIG_VIRTUALIZATION */ + +#if 0 +/* Local APIC timer works in C2 */ +int local_apic_timer_c2_ok; +EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); +#endif + +/* + * Debug level, exported for io_apic.c + */ +unsigned int apic_verbosity; + +int pic_mode; + +/* Have we found an MP table */ +int smp_found_config; + +static struct resource lapic_resource = { + .name = "Local APIC", + .flags = IORESOURCE_MEM | IORESOURCE_BUSY, +}; + +unsigned int lapic_timer_frequency = 0; + +static void apic_pm_activate(void); + +static unsigned long apic_phys; + +#if 0 +/* + * Get the LAPIC version + */ +static inline int lapic_get_version(void) +{ + return GET_APIC_VERSION(apic_read(APIC_LVR)); +} +#endif + +/* + * Check, if the APIC is integrated or a separate chip + */ +static inline int lapic_is_integrated(void) +{ +#ifdef CONFIG_L_X86_64 + return 1; +#else + return APIC_INTEGRATED(lapic_get_version()); +#endif +} + +#if 0 +/* + * Check, whether this is a modern or a first generation APIC + */ +static int modern_apic(void) +{ + /* AMD systems use old APIC versions, so check the CPU */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0xf) + return 1; + return lapic_get_version() >= 0x14; +} + +/* + * right after this call apic become NOOP driven + * so apic->write/read doesn't do anything + */ +static void __init apic_disable(void) +{ + pr_info("APIC: switched to apic NOOP\n"); + apic = &apic_noop; +} +#endif + +void native_apic_wait_icr_idle(void) +{ + while (apic_read(APIC_ICR) & APIC_ICR_BUSY) + cpu_relax(); +} + +u32 native_safe_apic_wait_icr_idle(void) +{ + u32 send_status; + int timeout; + + timeout = 0; + do { + send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; + if (!send_status) + break; + inc_irq_stat(icr_read_retry_count); + udelay(100); + } while (timeout++ < 1000); + + return send_status; +} + +void native_apic_icr_write(u32 low, u32 id) +{ + apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); + apic_write(APIC_ICR, low); +} + +u64 native_apic_icr_read(void) +{ + u32 icr1, icr2; + + icr2 = apic_read(APIC_ICR2); + icr1 = apic_read(APIC_ICR); + + return icr1 | ((u64)icr2 << 32); +} + +#ifdef CONFIG_L_X86_32 +/** + * get_physical_broadcast - Get number of physical broadcast IDs + */ +int get_physical_broadcast(void) +{ + return modern_apic() ? 0xff : 0xf; +} +#endif + +/** + * lapic_get_maxlvt - get the maximum number of local vector table entries + */ +int lapic_get_maxlvt(void) +{ + unsigned int v; + + v = apic_read(APIC_LVR); + /* + * - we always have APIC integrated on 64bit mode + * - 82489DXs do not report # of LVT entries + */ + return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; +} + +/* + * Local APIC timer + */ + +#if 0 +/* Clock divisor */ +#define APIC_DIVISOR 16 +#else +/* Clock divisor. + * + * APIC clock speed is approximated by two integers: 'mult' and + * 'shift'. They work as follows: + * + * APIC_clocks = (mult * nanoseconds) >> shift + * + * Thus the inherent error of this computation is: + * + * error = 0.5 / mult = nanoseconds / (2 * (APIC_clocks << shift)) + * + * Now let's denote the ratio of APIC bus speed to CPU clock speed + * as X. Then: + * + * APIC_clocks = X * nanoseconds + * + * nanoseconds = APIC_clocks / X + * + * error = APIC_clocks / (2 * X * (APIC_clocks << shift)) = + * = APIC_clocks / (2 * X * APIC_clocks * (2^shift)) = + * = 1 / (2 * X * (2^shift)) + * + * Thus increasing X (which is backwards proportional to APIC_DIVISOR) + * or shift will reduce the approximation error. + * + * Reducing APIC_DIVISOR will decrease the max_delta_ns. But when + * increasing the shift we must make sure that nothing will overflow + * 64-bits values. + * + * log2(mult * nanoseconds) = log2(APIC_clocks << shift) <= 64 + * + * (Actually under log2() here and below I mean rounded up + * logarythm, i.e. the number of bits needed to hold the value). + * After simple conversion we get: + * + * log2(APIC_clocks) + shift <= 64 + * + * Maximum value of log2(APIC_clocks) is 32 as TMICT and TMCCT + * registers are 32-bits long. We want to use the maximum value + * to allow the system to go idle for a long times, so we get: + * + * shift <= 32 + * + * So if we increase the shift we will have to decrease APIC_clocks, + * that is, decrease max_delta_ns. + * + * In other words - nothing can be done... + * + * Some numbers: + * + * APIC clock (MHz) | APIC_DIVISOR | max delta (seconds) + * ----------------------------------------------------- + * 10 (LMS) | 16 | 6871 + * 10 (LMS) | 1 | 429 + * 1000 (E90S) | 16 | 69 + * 1000 (E90S) | 1 | 4 + * + * So it is OK to have divisor of 1 + */ +#define APIC_DIVISOR 1 +#endif +#define TSC_DIVISOR 32 + +/* + * This function sets up the local APIC timer, with a timeout of + * 'clocks' APIC bus clock. During calibration we actually call + * this function twice on the boot CPU, once with a bogus timeout + * value, second time for real. The other (noncalibrating) CPUs + * call this function only once, with the real, calibrated value. + * + * We do reads before writes even if unnecessary, to get around the + * P5 APIC double write bug. + */ +static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) +{ + unsigned int lvtt_value, tmp_value; + + lvtt_value = LOCAL_TIMER_VECTOR; + if (!oneshot) + lvtt_value |= APIC_LVT_TIMER_PERIODIC; +#if 0 + else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) + lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE; +#endif + + if (!lapic_is_integrated()) + lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); + + if (!irqen) + lvtt_value |= APIC_LVT_MASKED; + + apic_write(APIC_LVTT, lvtt_value); + apic_printk(APIC_DEBUG, KERN_DEBUG "__setup_APIC_LVTT() APIC_LVTT == " + "0x%x (w) 0x%x (r)\n", lvtt_value, (int) apic_read(APIC_LVTT)); + +#if 0 + if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) { + printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); + return; + } + + /* + * Divide PICLK by 16 + */ + tmp_value = apic_read(APIC_TDCR); + apic_write(APIC_TDCR, + (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | + APIC_TDR_DIV_16); +#else + /* + * Do not divide APIC clock. + */ + tmp_value = apic_read(APIC_TDCR); + apic_write(APIC_TDCR, + (tmp_value & ~APIC_TDR_DIV_TMBASE) | APIC_TDR_DIV_1); + apic_printk(APIC_DEBUG, KERN_DEBUG "__setup_APIC_LVTT() APIC_TDCR == 0x%x\n", + (int) apic_read(APIC_TDCR)); +#endif + + if (!oneshot) + apic_write(APIC_TMICT, clocks / APIC_DIVISOR); + + apic_printk(APIC_DEBUG, KERN_DEBUG "__setup_APIC_LVTT() APIC_TMICT == %d\n", + (int) apic_read(APIC_TMICT)); +} + +#if 0 +/* + * Setup extended LVT, AMD specific + * + * Software should use the LVT offsets the BIOS provides. The offsets + * are determined by the subsystems using it like those for MCE + * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts + * are supported. Beginning with family 10h at least 4 offsets are + * available. + * + * Since the offsets must be consistent for all cores, we keep track + * of the LVT offsets in software and reserve the offset for the same + * vector also to be used on other cores. An offset is freed by + * setting the entry to APIC_EILVT_MASKED. + * + * If the BIOS is right, there should be no conflicts. Otherwise a + * "[Firmware Bug]: ..." error message is generated. However, if + * software does not properly determines the offsets, it is not + * necessarily a BIOS bug. + */ + +static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; + +static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) +{ + return (old & APIC_EILVT_MASKED) + || (new == APIC_EILVT_MASKED) + || ((new & ~APIC_EILVT_MASKED) == old); +} + +static unsigned int reserve_eilvt_offset(int offset, unsigned int new) +{ + unsigned int rsvd, vector; + + if (offset >= APIC_EILVT_NR_MAX) + return ~0; + + rsvd = atomic_read(&eilvt_offsets[offset]); + do { + vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */ + if (vector && !eilvt_entry_is_changeable(vector, new)) + /* may not change if vectors are different */ + return rsvd; + rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); + } while (rsvd != new); + + rsvd &= ~APIC_EILVT_MASKED; + if (rsvd && rsvd != vector) + pr_info("LVT offset %d assigned for vector 0x%02x\n", + offset, rsvd); + + return new; +} + +/* + * If mask=1, the LVT entry does not generate interrupts while mask=0 + * enables the vector. See also the BKDGs. Must be called with + * preemption disabled. + */ + +int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) +{ + unsigned long reg = APIC_EILVTn(offset); + unsigned int new, old, reserved; + + new = (mask << 16) | (msg_type << 8) | vector; + old = apic_read(reg); + reserved = reserve_eilvt_offset(offset, new); + + if (reserved != new) { + pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for " + "vector 0x%x, but the register is already in use for " + "vector 0x%x on another cpu\n", + smp_processor_id(), reg, offset, new, reserved); + return -EINVAL; + } + + if (!eilvt_entry_is_changeable(old, new)) { + pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for " + "vector 0x%x, but the register is already in use for " + "vector 0x%x on this cpu\n", + smp_processor_id(), reg, offset, new, old); + return -EBUSY; + } + + apic_write(reg, new); + + return 0; +} +EXPORT_SYMBOL_GPL(setup_APIC_eilvt); +#endif + +/* + * Program the next event, relative to now + */ +static int lapic_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + apic_write(APIC_TMICT, delta); + return 0; +} + +#if 0 +static int lapic_next_deadline(unsigned long delta, + struct clock_event_device *evt) +{ + u64 tsc; + + rdtscll(tsc); + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + return 0; +} +#endif + +static int lapic_timer_shutdown(struct clock_event_device *evt) +{ + unsigned int v; + + v = apic_read(APIC_LVTT); + v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); + apic_write(APIC_LVTT, v); + apic_write(APIC_TMICT, 0); + return 0; +} + +static inline int +lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot) +{ +#ifdef CONFIG_E2K + __setup_APIC_LVTT(lapic_timer_frequency, + cpu_has(CPU_HWBUG_LAPIC_TIMER) ? 0 : oneshot, 1); +#else + __setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1); +#endif + return 0; +} + +static int lapic_timer_set_periodic(struct clock_event_device *evt) +{ + return lapic_timer_set_periodic_oneshot(evt, false); +} + +static int lapic_timer_set_oneshot(struct clock_event_device *evt) +{ + return lapic_timer_set_periodic_oneshot(evt, true); +} + +/* + * Local APIC timer broadcast function + */ +static void lapic_timer_broadcast(const struct cpumask *mask) +{ +#ifdef CONFIG_SMP + apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR); +#endif +} + + +/* + * The local apic timer can be used for any function which is CPU local. + */ +static struct clock_event_device lapic_clockevent = { + .name = "lapic", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .shift = 32, + .set_state_shutdown = lapic_timer_shutdown, + .set_state_periodic = lapic_timer_set_periodic, + .set_state_oneshot = lapic_timer_set_oneshot, + .set_next_event = lapic_next_event, + .broadcast = lapic_timer_broadcast, + .rating = 100, + .irq = -1, +}; +static DEFINE_PER_CPU(struct clock_event_device, lapic_events); + +/* + * In this functions we calibrate APIC bus clocks to the external timer. + * + * We want to do the calibration only once since we want to have local timer + * irqs syncron. CPUs connected by the same APIC bus have the very same bus + * frequency. + * + * This was previously done by reading the PIT/HPET and waiting for a wrap + * around to find out, that a tick has elapsed. I have a box, where the PIT + * readout is broken, so it never gets out of the wait loop again. This was + * also reported by others. + * + * Monitoring the jiffies value is inaccurate and the clockevents + * infrastructure allows us to do a simple substitution of the interrupt + * handler. + * + * The calibration routine also uses the pm_timer when possible, as the PIT + * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes + * back to normal later in the boot process). + */ + +#if 0 +#define LAPIC_CAL_LOOPS (HZ/10) +#else +/* Prototypes have HZ set to 10 */ +#define LAPIC_CAL_LOOPS (HZ/5) +#endif + +static __initdata int lapic_cal_loops = -1; +static __initdata long lapic_cal_t1, lapic_cal_t2; +static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2; +static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; +static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; +static u32 levt_freq; + +void (*real_handler)(struct clock_event_device *dev); + +/* + * Setup the local APIC timer for this CPU. Copy the initialized values + * of the boot CPU and register the clock event in the framework. + */ +static void setup_APIC_timer(void) +{ + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); + +#if 0 + if (this_cpu_has(X86_FEATURE_ARAT)) { + lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; + /* Make LAPIC timer preferrable over percpu HPET */ + lapic_clockevent.rating = 150; + } +#endif + + memcpy(levt, &lapic_clockevent, sizeof(*levt)); + levt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register(levt, levt_freq, 0xF, 0xFFFFFFFF); +} + +/* + * Temporary interrupt handler. + */ +static noinline void __init lapic_cal_handler(struct clock_event_device *dev) +{ + unsigned long long tsc = 0; + long tapic = apic_read(APIC_TMCCT); + unsigned long pm = acpi_pm_read_early(); + + if (paravirt_enabled()) { + /* real handler should be called too */ + /* restore handler into structure, because of */ + /* tick_handle_periodic() check handler function */ + /* see kernel/time/tick-common.c */ + dev->event_handler = real_handler; + real_handler(dev); + dev->event_handler = lapic_cal_handler; + } + if (cpu_has_tsc) + tsc = get_cycles(); + + switch (lapic_cal_loops++) { + case 0: + lapic_cal_t1 = tapic; + lapic_cal_tsc1 = tsc; + lapic_cal_pm1 = pm; + lapic_cal_j1 = jiffies; + break; + + case LAPIC_CAL_LOOPS: + lapic_cal_t2 = tapic; + lapic_cal_tsc2 = tsc; + if (pm < lapic_cal_pm1) + pm += ACPI_PM_OVRRUN; + lapic_cal_pm2 = pm; + lapic_cal_j2 = jiffies; + break; + } +} + +#ifdef CONFIG_X86_PM_TIMER +static int __init +calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) +{ + const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; + const long pm_thresh = pm_100ms / 100; + unsigned long mult; + u64 res; + +#ifndef CONFIG_X86_PM_TIMER + return -1; +#endif + + apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm); + + /* Check, if the PM timer is available */ + if (!deltapm) + return -1; + + mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22); + + if (deltapm > (pm_100ms - pm_thresh) && + deltapm < (pm_100ms + pm_thresh)) { + apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n"); + return 0; + } + + res = (((u64)deltapm) * mult) >> 22; + do_div(res, 1000000); + pr_warning("APIC calibration not consistent " + "with PM-Timer: %ldms instead of 100ms\n",(long)res); + + /* Correct the lapic counter value */ + res = (((u64)(*delta)) * pm_100ms); + do_div(res, deltapm); + pr_info("APIC delta adjusted to PM-Timer: " + "%lu (%ld)\n", (unsigned long)res, *delta); + *delta = (long)res; + + /* Correct the tsc counter value */ + if (cpu_has_tsc) { + res = (((u64)(*deltatsc)) * pm_100ms); + do_div(res, deltapm); + apic_printk(APIC_VERBOSE, "TSC delta adjusted to " + "PM-Timer: %lu (%ld)\n", + (unsigned long)res, *deltatsc); + *deltatsc = (long)res; + } + + return 0; +} +#endif + +int __init calibrate_APIC_clock(void) +{ + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); +#ifdef CONFIG_X86_PM_TIMER + unsigned long deltaj; + int pm_referenced = 0; +#endif + long delta, deltatsc; + + /** + * check if lapic timer has already been calibrated by platform + * specific routine, such as tsc calibration code. if so, we just fill + * in the clockevent structure and return. + */ +#if 0 + if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) { + return 0; + } else +#endif + if (lapic_timer_frequency) { + apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n", + lapic_timer_frequency); + lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR, + TICK_NSEC, lapic_clockevent.shift); + lapic_clockevent.max_delta_ns = + clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); + lapic_clockevent.min_delta_ns = + clockevent_delta2ns(0xF, &lapic_clockevent); + return 0; + } + + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" + "calibrating APIC timer ...\n"); + + local_irq_disable(); + + /* + * Setup the APIC counter to maximum. There is no way the lapic + * can underflow in the 100ms detection time frame + */ + __setup_APIC_LVTT(0xffffffff, 0, 0); + + /* Replace the global interrupt handler */ + real_handler = global_clock_event->event_handler; + global_clock_event->event_handler = lapic_cal_handler; + + /* Let the interrupts run */ + local_irq_enable(); + + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) + cpu_relax(); + + local_irq_disable(); + + /* Restore the real event handler */ + global_clock_event->event_handler = real_handler; + + /* Build delta t1-t2 as apic timer counts down */ + delta = lapic_cal_t1 - lapic_cal_t2; + apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); + + deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1); + +#ifdef CONFIG_X86_PM_TIMER + /* we trust the PM based calibration if possible */ + pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, + &delta, &deltatsc); +#endif + +#if 0 + /* Calculate the scaled math multiplication factor */ + lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, + lapic_clockevent.shift); + lapic_clockevent.max_delta_ns = + clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent); + lapic_clockevent.min_delta_ns = + clockevent_delta2ns(0xF, &lapic_clockevent); +#endif + + lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; + levt_freq = (lapic_cal_t1 - lapic_cal_t2) * (HZ / LAPIC_CAL_LOOPS); + + apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); + apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult); + apic_printk(APIC_VERBOSE, "..... shift: %u\n", lapic_clockevent.shift); + apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", + lapic_timer_frequency); + + if (cpu_has_tsc) { + apic_printk(APIC_VERBOSE, "..... CPU clock speed is %ld.%04ld MHz.\n", + (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ), + (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ)); + } + + apic_printk(APIC_VERBOSE, "..... host bus clock speed is " + "%u.%04u MHz.\n", + lapic_timer_frequency / (1000000 / HZ), + lapic_timer_frequency % (1000000 / HZ)); + + /* + * Do a sanity check on the APIC calibration result + */ + if (lapic_timer_frequency < (1000000 / HZ)) { + local_irq_enable(); + pr_warning("APIC frequency too slow, disabling apic timer\n"); + return -1; + } + + /* On Elbrus it makes no sense to check timer with external timer + * since it was calibrated with the very same external timer. */ +#ifdef CONFIG_X86_PM_TIMER + /* + * PM timer calibration failed or not turned on + * so lets try APIC timer based calibration + */ + if (!pm_referenced) { + apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); + + /* + * Setup the apic timer manually + */ + levt->event_handler = lapic_cal_handler; + lapic_timer_set_periodic(levt); + lapic_cal_loops = -1; + + /* Let the interrupts run */ + local_irq_enable(); + + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) + cpu_relax(); + + /* Stop the lapic timer */ + local_irq_disable(); + lapic_timer_shutdown(levt); + + /* Jiffies delta */ + deltaj = lapic_cal_j2 - lapic_cal_j1; + apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); + } else +#endif + local_irq_enable(); + + return 0; +} +EXPORT_SYMBOL(calibrate_APIC_clock); + +/* + * Setup the boot APIC + * + * Calibrate and verify the result. + */ +void __init setup_boot_APIC_clock(void) +{ + /* + * The local apic timer can be disabled via the kernel + * commandline or from the CPU detection code. Register the lapic + * timer as a dummy clock event source on SMP systems, so the + * broadcast mechanism is used. On UP systems simply ignore it. + */ + if (disable_apic_timer) { + pr_info("Disabling APIC timer\n"); + /* No broadcast on UP ! */ + /* Local APIC support on guest is not ready at present time */ + if (num_possible_cpus() > 1 && !paravirt_enabled()) { + lapic_clockevent.mult = 1; + setup_APIC_timer(); + } + return; + } + + if (calibrate_APIC_clock()) { + /* No broadcast on UP ! */ + if (num_possible_cpus() > 1) + setup_APIC_timer(); + return; + } + + /* Setup the lapic or request the broadcast */ + setup_APIC_timer(); +} + +void setup_secondary_APIC_clock(void) +{ + setup_APIC_timer(); +} + +#ifdef CONFIG_L_WDT +void (*wd_reset_ask)(void) = NULL; +#endif + +/* + * The guts of the apic timer interrupt + */ +void local_apic_timer_interrupt(void) +{ + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(lapic_events, cpu); + +#ifdef CONFIG_L_WDT + if (wd_reset_ask) + wd_reset_ask(); +#endif + + /* + * Normally we should not be here till LAPIC has been initialized but + * in some cases like kdump, its possible that there is a pending LAPIC + * timer interrupt from previous kernel's context and is delivered in + * new kernel the moment interrupts are enabled. + * + * Interrupts are enabled early and LAPIC is setup much later, hence + * its possible that when we get here evt->event_handler is NULL. + * Check for event_handler being NULL and discard the interrupt as + * spurious. + */ + if (!evt->event_handler) { + pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu); + /* Switch it off */ + lapic_timer_shutdown(evt); + return; + } + + /* + * the NMI deadlock-detector uses this. + */ + inc_irq_stat(apic_timer_irqs); + + evt->event_handler(evt); +} + +#ifdef CONFIG_MCST +#define DELTA_NS (NSEC_PER_SEC / HZ / 2) +#endif + +/* + * Local APIC timer interrupt. This is the most natural way for doing + * local interrupts, but local timer interrupts can be emulated by + * broadcast interrupts too. [in case the hw doesn't support APIC timers] + * + * [ if a single-CPU system runs an SMP kernel then we call the local + * interrupt as well. Thus we cannot inline the local irq ... ] + */ +__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); +#ifdef CONFIG_MCST + int cpu; + long long cur_time; + long long next_time; + + cpu = smp_processor_id(); + next_time = per_cpu(next_rt_intr, cpu); +#if 0 + trace_printk("POSTT next_time=%lld cur=%lld\n", + next_time, ktime_to_ns(ktime_get())); +#endif + if (next_time) { + cur_time = ktime_to_ns(ktime_get()); + if (cur_time > next_time + DELTA_NS) { + per_cpu(next_rt_intr, cpu) = 0; + } else if (cur_time > next_time - DELTA_NS && + cur_time < next_time + DELTA_NS) { + /* set 1 -- must do timer later + * in do_postpone_tick() */ + per_cpu(next_rt_intr, cpu) = 1; + set_irq_regs(old_regs); + ack_APIC_irq(); + /* if do_postpone_tick() will not called: */ + apic_write(APIC_TMICT, + usecs_2cycles(USEC_PER_SEC / HZ)); + return; + } + } +#endif + + /* + * NOTE! We'd better ACK the irq immediately, + * because timer handling can be slow. + * + * update_process_times() expects us to have done l_irq_enter(). + * Besides, if we don't timer interrupts ignore the global + * interrupt lock, which is the WrongThing (tm) to do. + */ + entering_ack_irq(); + local_apic_timer_interrupt(); + exiting_irq(); + + set_irq_regs(old_regs); +} + +#if 0 +__visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + /* + * NOTE! We'd better ACK the irq immediately, + * because timer handling can be slow. + * + * update_process_times() expects us to have done l_irq_enter(). + * Besides, if we don't timer interrupts ignore the global + * interrupt lock, which is the WrongThing (tm) to do. + */ + entering_ack_irq(); + trace_local_timer_entry(LOCAL_TIMER_VECTOR); + local_apic_timer_interrupt(); + trace_local_timer_exit(LOCAL_TIMER_VECTOR); + exiting_irq(); + + set_irq_regs(old_regs); +} +#endif + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +/* + * Local APIC start and shutdown + */ + +/** + * clear_local_APIC - shutdown the local APIC + * + * This is called, when a CPU is disabled and before rebooting, so the state of + * the local APIC has no dangling leftovers. Also used to cleanout any BIOS + * leftovers during boot. + */ +void clear_local_APIC(void) +{ + int maxlvt; + u32 v; +#ifdef CONFIG_E2K + int cpu = boot_smp_processor_id(); +#endif + + /* APIC hasn't been mapped yet */ + if (!x2apic_mode && !apic_phys) + return; + + maxlvt = lapic_get_maxlvt(); + /* + * Masking an LVT entry can trigger a local APIC error + * if the vector is zero. Mask LVTERR first to prevent this. + */ + if (maxlvt >= 3) { + v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ + apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); + } + /* + * Careful: we have to set masks only first to deassert + * any level-triggered sources. + */ + v = apic_read(APIC_LVTT); + apic_write(APIC_LVTT, v | APIC_LVT_MASKED); + v = apic_read(APIC_LVT0); + apic_write(APIC_LVT0, v | APIC_LVT_MASKED); + v = apic_read(APIC_LVT1); + apic_write(APIC_LVT1, v | APIC_LVT_MASKED); + if (maxlvt >= 4) { + v = apic_read(APIC_LVTPC); + apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); + } + + /* lets not touch this if we didn't frob it */ +#ifdef CONFIG_X86_THERMAL_VECTOR + if (maxlvt >= 5) { + v = apic_read(APIC_LVTTHMR); + apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); + } +#endif +#ifdef CONFIG_X86_MCE_INTEL + if (maxlvt >= 6) { + v = apic_read(APIC_LVTCMCI); + if (!(v & APIC_LVT_MASKED)) + apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED); + } +#endif + + /* + * Clean APIC state for other OSs: + */ + apic_write(APIC_LVTT, APIC_LVT_MASKED); + apic_write(APIC_LVT0, APIC_LVT_MASKED); + apic_write(APIC_LVT1, APIC_LVT_MASKED); + if (maxlvt >= 3) + apic_write(APIC_LVTERR, APIC_LVT_MASKED); + if (maxlvt >= 4) + apic_write(APIC_LVTPC, APIC_LVT_MASKED); + + /* Integrated APIC (!82489DX) ? */ + if (lapic_is_integrated()) { + if (maxlvt > 3) + /* Clear ESR due to Pentium errata 3AP and 11AP */ + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + } +} + +/** + * disable_local_APIC - clear and disable the local APIC + */ +void disable_local_APIC(void) +{ + unsigned int value; + + /* APIC hasn't been mapped yet */ + if (!x2apic_mode && !apic_phys) + return; + + clear_local_APIC(); + + /* + * Disable APIC (implies clearing of registers + * for 82489DX!). + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_SPIV_APIC_ENABLED; + apic_write(APIC_SPIV, value); + +#ifdef CONFIG_L_X86_32 + /* + * When LAPIC was disabled by the BIOS and enabled by the kernel, + * restore the disabled state. + */ + if (enabled_via_apicbase) { + unsigned int l, h; + + rdmsr(MSR_IA32_APICBASE, l, h); + l &= ~MSR_IA32_APICBASE_ENABLE; + wrmsr(MSR_IA32_APICBASE, l, h); + } +#endif +} + +#if 0 +/* + * If Linux enabled the LAPIC against the BIOS default disable it down before + * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and + * not power-off. Additionally clear all LVT entries before disable_local_APIC + * for the case where Linux didn't enable the LAPIC. + */ +void lapic_shutdown(void) +{ + unsigned long flags; + + if (!cpu_has_apic && !apic_from_smp_config()) + return; + + local_irq_save(flags); + +#ifdef CONFIG_L_X86_32 + if (!enabled_via_apicbase) + clear_local_APIC(); + else +#endif + disable_local_APIC(); + + + local_irq_restore(flags); +} +#endif + +/* + * This is to verify that we're looking at a real local APIC. + * Check these against your board if the CPUs aren't getting + * started for no apparent reason. + */ +int __init_recv verify_local_APIC(void) +{ + unsigned int reg0, reg1; + + /* + * The version register is read-only in a real APIC. + */ + reg0 = apic_read(APIC_LVR); + apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0); + apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK); + reg1 = apic_read(APIC_LVR); + apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1); + + /* + * The two version reads above should print the same + * numbers. If the second one is different, then we + * poke at a non-APIC. + */ + if (reg1 != reg0) + return 0; + + /* + * Check if the version looks reasonably. + */ + reg1 = GET_APIC_VERSION(reg0); + if (reg1 == 0x00 || reg1 == 0xff) + return 0; + reg1 = lapic_get_maxlvt(); + if (reg1 < 0x02 || reg1 == 0xff) + return 0; + + /* The following check might break our LAPIC + * (bug 46759 comment 44, bug 47853) */ +#if !defined CONFIG_E2K && !defined CONFIG_E90S + /* + * The ID register is read/write in a real APIC. + */ + reg0 = apic_read(APIC_ID); + apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); + apic_write(APIC_ID, reg0 ^ apic->apic_id_mask); + reg1 = apic_read(APIC_ID); + apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); + apic_write(APIC_ID, reg0); + if (reg1 != (reg0 ^ apic->apic_id_mask)) + return 0; +#endif + +#if 0 + /* + * The next two are just to see if we have sane values. + * They're only really relevant if we're in Virtual Wire + * compatibility mode, but most boxes are anymore. + */ + reg0 = apic_read(APIC_LVT0); + apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0); + reg1 = apic_read(APIC_LVT1); + apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1); +#endif + + return 1; +} + +#if 0 +/** + * sync_Arb_IDs - synchronize APIC bus arbitration IDs + */ +void __init sync_Arb_IDs(void) +{ + /* + * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not + * needed on AMD. + */ + if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return; + + /* + * Wait for idle. + */ + apic_wait_icr_idle(); + + apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); + apic_write(APIC_ICR, APIC_DEST_ALLINC | + APIC_INT_LEVELTRIG | APIC_DM_INIT); +} +#endif + +/* + * An initial setup of the virtual wire mode. + */ +void __init_recv init_bsp_APIC(void) +{ + unsigned int value; + + /* + * Don't do the setup now if we have a SMP BIOS as the + * through-I/O-APIC virtual wire mode might be active. + */ + if (smp_found_config || !cpu_has_apic) + return; + + /* + * Do not trust the local APIC being empty at bootup. + */ + clear_local_APIC(); + + /* + * Enable APIC. + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_VECTOR_MASK; + value |= APIC_SPIV_APIC_ENABLED; + +#ifdef CONFIG_L_X86_32 + /* This bit is reserved on P4/Xeon and should be cleared */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + (boot_cpu_data.x86 == 15)) + value &= ~APIC_SPIV_FOCUS_DISABLED; + else +#endif + value |= APIC_SPIV_FOCUS_DISABLED; + value |= SPURIOUS_APIC_VECTOR; + apic_write(APIC_SPIV, value); + + /* + * Set up the virtual wire mode. + */ + apic_write(APIC_LVT0, APIC_DM_EXTINT); + value = APIC_DM_NMI; + if (!lapic_is_integrated()) /* 82489DX */ + value |= APIC_LVT_LEVEL_TRIGGER; + apic_write(APIC_LVT1, value); +} + +static void lapic_setup_esr(void) +{ + unsigned int oldvalue, value, maxlvt; + + if (!lapic_is_integrated()) { + pr_info("No ESR for 82489DX.\n"); + return; + } + + if (apic->disable_esr) { + /* + * Something untraceable is creating bad interrupts on + * secondary quads ... for the moment, just leave the + * ESR disabled - we can't do anything useful with the + * errors anyway - mbligh + */ + pr_info("Leaving ESR disabled.\n"); + return; + } + + maxlvt = lapic_get_maxlvt(); + if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ + apic_write(APIC_ESR, 0); + oldvalue = apic_read(APIC_ESR); + + /* enables sending errors */ + value = ERROR_APIC_VECTOR; + apic_write(APIC_LVTERR, value); + + /* + * spec says clear errors after enabling vector. + */ + if (maxlvt > 3) + apic_write(APIC_ESR, 0); + value = apic_read(APIC_ESR); + if (value != oldvalue) + apic_printk(APIC_VERBOSE, "ESR value before enabling " + "vector: 0x%08x after: 0x%08x\n", + oldvalue, value); +} + +int apic_get_vector(void) +{ + int vector; + + vector = arch_apic_read(APIC_VECT); + + if (unlikely(APIC_VECT_IS_EXTINT(vector))) { + pr_emerg("Received ExtINT interrupt on kernel without PIC! " + "Read vector is %x\n", vector); + vector = SPURIOUS_APIC_VECTOR; + } + + return APIC_VECT_VECTOR(vector); +} + +/** + * setup_local_APIC - setup the local APIC + * + * Used to setup local APIC while initializing BSP or bringin up APs. + * Always called with preemption disabled. + */ +void setup_local_APIC(void) +{ + int cpu = smp_processor_id(); + unsigned int value; + int i, j, acked = 0; + + if (disable_apic) { + disable_ioapic_support(); + return; + } + + /* + * If this comes from kexec/kcrash the APIC might be enabled in + * SPIV. Soft disable it before doing further initialization. + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_SPIV_APIC_ENABLED; + apic_write(APIC_SPIV, value); + + /* + * Double-check whether this APIC is really registered. + * This is meaningless in clustered apic mode, so we skip it. + */ + BUG_ON(!apic->apic_id_registered()); + + apic->init_apic_ldr(); + + /* + * After a crash, we no longer service the interrupts and a pending + * interrupt from previous kernel might still have ISR bit set. + */ + for (i = APIC_ISR_NR - 1; i >= 0; i--) { + value = apic_read(APIC_ISR + i*0x10); + if (!value) + continue; + for (j = 31; j >= 0; j--) { + if (value & (1<= 0; i--) { + if ((value = apic_read(APIC_IRR + i*0x10))) + break; + } + if (value) { + apic_get_vector(); + ack_APIC_irq(); + acked++; + } + } while (value && acked <= 256); + + if (acked > 256) + pr_err("LAPIC pending interrupts after %d EOI\n", acked); + + /* + * Set Task Priority to 'accept all'. We never change this + * later on. + */ + value = apic_read(APIC_TASKPRI); + value &= ~APIC_TPRI_MASK; + apic_write(APIC_TASKPRI, value); + + /* + * Now that we are all set up, enable the APIC + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_VECTOR_MASK; + /* + * Enable APIC + */ + value |= APIC_SPIV_APIC_ENABLED; + +#ifdef CONFIG_L_X86_32 + /* + * Some unknown Intel IO/APIC (or APIC) errata is biting us with + * certain networking cards. If high frequency interrupts are + * happening on a particular IOAPIC pin, plus the IOAPIC routing + * entry is masked/unmasked at a high rate as well then sooner or + * later IOAPIC line gets 'stuck', no more interrupts are received + * from the device. If focus CPU is disabled then the hang goes + * away, oh well :-( + * + * [ This bug can be reproduced easily with a level-triggered + * PCI Ne2000 networking cards and PII/PIII processors, dual + * BX chipset. ] + */ + /* + * Actually disabling the focus CPU check just makes the hang less + * frequent as it makes the interrupt distributon model be more + * like LRU than MRU (the short-term load is more even across CPUs). + * See also the comment in end_level_ioapic_irq(). --macro + */ + + /* + * - enable focus processor (bit==0) + * - 64bit mode always use processor focus + * so no need to set it + */ + value &= ~APIC_SPIV_FOCUS_DISABLED; +#endif + + /* + * Set spurious IRQ vector + */ + value |= SPURIOUS_APIC_VECTOR; + apic_write(APIC_SPIV, value); + + /* + * Set up LVT0, LVT1: + * + * set up through-local-APIC on the BP's LINT0. This is not + * strictly necessary in pure symmetric-IO mode, but sometimes + * we delegate interrupts to the 8259A. + */ + /* + * TODO: set up through-local-APIC from through-I/O-APIC? --macro + */ + value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; +#ifdef CONFIG_PIC + if (!cpu && (pic_mode || !value)) { + value = APIC_DM_EXTINT; + apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); + } else { +#endif + /* For some reason on E2K a spurious ExtINT interrupt arrives + * if we do not mask it now... Probably configuration + * option X86_REROUTE_FOR_BROKEN_BOOT_IRQS has something + * to do with it (but our PCI code does not have any of + * the necessary workarounds and this spurious interrupt + * is most likely unharmful anyway). */ + value = APIC_DM_EXTINT | APIC_LVT_MASKED; + apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu); +#ifdef CONFIG_PIC + } +#endif +#ifdef CONFIG_E90S + /*e90s uses vector field */ + value |= apic_read(APIC_LVT0); +#endif + apic_write(APIC_LVT0, value); + +#if (defined(CONFIG_E90S) || defined(CONFIG_E2K)) && \ + (defined(CONFIG_RDMA) || defined(CONFIG_RDMA_MODULE) || \ + defined(CONFIG_RDMA_SIC) || defined(CONFIG_RDMA_SIC_MODULE) || \ + defined(CONFIG_RDMA_NET) || defined(CONFIG_RDMA_NET_MODULE)) + if ((!smp_processor_id() || HAS_MACHINE_E2K_FULL_SIC) +#if defined(CONFIG_E90S) + && (e90s_get_cpu_type() == E90S_CPU_R1000) +#endif + ) { + unsigned int value_lvt2; + unsigned int apic_id, cpu; + int node; + + value_lvt2 = apic_read(APIC_LVT2); +#ifdef CONFIG_E2K + if (!HAS_MACHINE_E2K_FULL_SIC && value_lvt2 != 0x00018000) { + rdma_apic_init = 0; + } else { +#endif + cpu = smp_processor_id(); + node = cpu_to_node(cpu); + rdma_apic_init = 1; + if (!rdma_node[node]) { + apic_id = GET_APIC_ID(apic_read(APIC_ID)); + /* Enable LVT2 */ + value_lvt2 = APIC_LVT_LEVEL_TRIGGER | + APIC_DM_FIXED | + RDMA_INTERRUPT_VECTOR | + SET_APIC_DEST_FIELD(apic_id); +#ifdef CONFIG_E2K + if (IS_MACHINE_E2S) + rdma_node[node] = 1; +#endif + } else { + /* Disable LVT2 */ + value_lvt2 = APIC_LVT_MASKED; + } + apic_write(APIC_LVT2, value_lvt2); +#ifdef CONFIG_E2K + } +#endif + } +#endif + +#ifdef CONFIG_E2K + if(l_iommu_supported() +#if defined(CONFIG_ELDSP) || defined(CONFIG_ELDSP_MODULE) + || IS_MACHINE_ES2 +#endif + ) { + unsigned int value; + unsigned int apic_id; + + value = apic_read(APIC_LVT3); + apic_id = read_apic_id(); + value = APIC_LVT_LEVEL_TRIGGER | APIC_DM_FIXED | + LVT3_INTERRUPT_VECTOR | + SET_APIC_DEST_FIELD(apic_id); + apic_write(APIC_LVT3, value); + } + + if (IS_MACHINE_E2S || IS_MACHINE_E1CP || IS_MACHINE_E8C || + IS_MACHINE_E8C2) { + unsigned int value; + unsigned int apic_id; + + value = apic_read(APIC_LVT4); + apic_id = read_apic_id(); + value = APIC_LVT_LEVEL_TRIGGER | APIC_DM_FIXED | + LVT4_INTERRUPT_VECTOR | + SET_APIC_DEST_FIELD(apic_id); + apic_write(APIC_LVT4, value); + } +#endif /*CONFIG_E2K*/ + + /* + * only the BP should see the LINT1 NMI signal, obviously. + */ + if (!cpu) + value = APIC_DM_NMI; + else + value = APIC_DM_NMI | APIC_LVT_MASKED; + if (!lapic_is_integrated()) /* 82489DX */ + value |= APIC_LVT_LEVEL_TRIGGER; + apic_write(APIC_LVT1, value); + +#ifdef CONFIG_X86_MCE_INTEL + /* Recheck CMCI information after local APIC is up on CPU #0 */ + if (!cpu) + cmci_recheck(); +#endif +} + +void end_local_APIC_setup(void) +{ + lapic_setup_esr(); + +#ifdef CONFIG_L_X86_32 + { + unsigned int value; + /* Disable the local apic timer */ + value = apic_read(APIC_LVTT); + value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); + apic_write(APIC_LVTT, value); + } +#endif + + apic_pm_activate(); +} + +void __init_recv bsp_end_local_APIC_setup(void) +{ + end_local_APIC_setup(); + +#if 0 + /* + * Now that local APIC setup is completed for BP, configure the fault + * handling for interrupt remapping. + */ + irq_remap_enable_fault_handling(); +#endif +} + +#ifdef CONFIG_X86_X2APIC +/* + * Need to disable xapic and x2apic at the same time and then enable xapic mode + */ +static inline void __disable_x2apic(u64 msr) +{ + wrmsrl(MSR_IA32_APICBASE, + msr & ~(X2APIC_ENABLE | XAPIC_ENABLE)); + wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE); +} + +static __init void disable_x2apic(void) +{ + u64 msr; + + if (!cpu_has_x2apic) + return; + + rdmsrl(MSR_IA32_APICBASE, msr); + if (msr & X2APIC_ENABLE) { + u32 x2apic_id = read_apic_id(); + + if (x2apic_id >= 255) + panic("Cannot disable x2apic, id: %08x\n", x2apic_id); + + pr_info("Disabling x2apic\n"); + __disable_x2apic(msr); + + if (nox2apic) { + clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC); + setup_clear_cpu_cap(X86_FEATURE_X2APIC); + } + + x2apic_disabled = 1; + x2apic_mode = 0; + + register_lapic_address(mp_lapic_addr); + } +} + +void check_x2apic(void) +{ + if (x2apic_enabled()) { + pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); + x2apic_preenabled = x2apic_mode = 1; + } +} + +void enable_x2apic(void) +{ + u64 msr; + + rdmsrl(MSR_IA32_APICBASE, msr); + if (x2apic_disabled) { + __disable_x2apic(msr); + return; + } + + if (!x2apic_mode) + return; + + if (!(msr & X2APIC_ENABLE)) { + printk_once(KERN_INFO "Enabling x2apic\n"); + wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); + } +} +#endif /* CONFIG_X86_X2APIC */ + +int __init enable_IR(void) +{ +#ifdef CONFIG_IRQ_REMAP + if (!irq_remapping_supported()) { + pr_debug("intr-remapping not supported\n"); + return -1; + } + + if (!x2apic_preenabled && skip_ioapic_setup) { + pr_info("Skipped enabling intr-remap because of skipping " + "io-apic setup\n"); + return -1; + } + + return irq_remapping_enable(); +#endif + return -1; +} + +#ifdef CONFIG_X86_X2APIC +void __init enable_IR_x2apic(void) +{ + unsigned long flags; + int ret, x2apic_enabled = 0; + int hardware_init_ret; + + /* Make sure irq_remap_ops are initialized */ + setup_irq_remapping_ops(); + + hardware_init_ret = irq_remapping_prepare(); + if (hardware_init_ret && !x2apic_supported()) + return; + + ret = save_ioapic_entries(); + if (ret) { + pr_info("Saving IO-APIC state failed: %d\n", ret); + return; + } + + local_irq_save(flags); + legacy_pic->mask_all(); + mask_ioapic_entries(); + + if (x2apic_preenabled && nox2apic) + disable_x2apic(); + + if (hardware_init_ret) + ret = -1; + else + ret = enable_IR(); + + if (!x2apic_supported()) + goto skip_x2apic; + + if (ret < 0) { + /* IR is required if there is APIC ID > 255 even when running + * under KVM + */ + if (max_physical_apicid > 255 || + !hypervisor_x2apic_available()) { + if (x2apic_preenabled) + disable_x2apic(); + goto skip_x2apic; + } + /* + * without IR all CPUs can be addressed by IOAPIC/MSI + * only in physical mode + */ + x2apic_force_phys(); + } + + if (ret == IRQ_REMAP_XAPIC_MODE) { + pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n"); + goto skip_x2apic; + } + + x2apic_enabled = 1; + + if (x2apic_supported() && !x2apic_mode) { + x2apic_mode = 1; + enable_x2apic(); + pr_info("Enabled x2apic\n"); + } + +skip_x2apic: + if (ret < 0) /* IR enabling failed */ + restore_ioapic_entries(); + legacy_pic->restore_mask(); + local_irq_restore(flags); +} +#endif + +#ifdef CONFIG_L_X86_64 +/* + * Detect and enable local APICs on non-SMP boards. + * Original code written by Keir Fraser. + * On AMD64 we trust the BIOS - if it says no APIC it is likely + * not correctly set up (usually the APIC timer won't work etc.) + */ +static int __init detect_init_APIC(void) +{ + if (!cpu_has_apic) { + pr_info("No local APIC present\n"); + return -1; + } + + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + return 0; +} +#else + +static int __init apic_verify(void) +{ + u32 features, h, l; + + /* + * The APIC feature bit should now be enabled + * in `cpuid' + */ + features = cpuid_edx(1); + if (!(features & (1 << X86_FEATURE_APIC))) { + pr_warning("Could not enable APIC!\n"); + return -1; + } + set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + + /* The BIOS may have set up the APIC at some other address */ + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + if (l & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + } + + pr_info("Found and enabled local APIC!\n"); + return 0; +} + +int __init apic_force_enable(unsigned long addr) +{ + u32 h, l; + + if (disable_apic) + return -1; + + /* + * Some BIOSes disable the local APIC in the APIC_BASE + * MSR. This can only be done in software for Intel P6 or later + * and AMD K7 (Model > 1) or later. + */ + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + if (!(l & MSR_IA32_APICBASE_ENABLE)) { + pr_info("Local APIC disabled by BIOS -- reenabling.\n"); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | addr; + wrmsr(MSR_IA32_APICBASE, l, h); + enabled_via_apicbase = 1; + } + } + return apic_verify(); +} + +/* + * Detect and initialize APIC + */ +static int __init detect_init_APIC(void) +{ + /* Disabled by kernel option? */ + if (disable_apic) + return -1; + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || + (boot_cpu_data.x86 >= 15)) + break; + goto no_apic; + case X86_VENDOR_INTEL: + if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || + (boot_cpu_data.x86 == 5 && cpu_has_apic)) + break; + goto no_apic; + default: + goto no_apic; + } + + if (!cpu_has_apic) { + /* + * Over-ride BIOS and try to enable the local APIC only if + * "lapic" specified. + */ + if (!force_enable_local_apic) { + pr_info("Local APIC disabled by BIOS -- " + "you can enable it with \"lapic\"\n"); + return -1; + } + if (apic_force_enable(APIC_DEFAULT_PHYS_BASE)) + return -1; + } else { + if (apic_verify()) + return -1; + } + + apic_pm_activate(); + + return 0; + +no_apic: + pr_info("No local APIC present or hardware disabled\n"); + return -1; +} +#endif + +/** + * init_apic_mappings - initialize APIC mappings + */ +void __init init_apic_mappings(void) +{ + unsigned int new_apicid; + + if (x2apic_mode) { + boot_cpu_physical_apicid = read_apic_id(); + return; + } + + /* If no local APIC can be found return early */ + if (!smp_found_config && detect_init_APIC()) { +#if 0 + /* lets NOP'ify apic operations */ + pr_info("APIC: disable apic facility\n"); + apic_disable(); +#else + panic("No APIC?!"); +#endif + } else { + apic_phys = mp_lapic_addr; + + /* + * acpi lapic path already maps that address in + * acpi_register_lapic_address() + */ + if (!acpi_lapic && !smp_found_config) + register_lapic_address(apic_phys); + } + + /* + * Fetch the APIC ID of the BSP in case we have a + * default configuration (or the MP table is broken). + */ + new_apicid = read_apic_id(); + if (boot_cpu_physical_apicid != new_apicid) { + boot_cpu_physical_apicid = new_apicid; + /* + * yeah -- we lie about apic_version + * in case if apic was disabled via boot option + * but it's not a problem for SMP compiled kernel + * since smp_sanity_check is prepared for such a case + * and disable smp mode + */ + apic_version[new_apicid] = + GET_APIC_VERSION(apic_read(APIC_LVR)); + } +} + +void __init register_lapic_address(unsigned long address) +{ + mp_lapic_addr = address; + + if (!x2apic_mode) { +#if 0 + set_fixmap_nocache(FIX_APIC_BASE, address); +#endif + apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", + APIC_BASE, mp_lapic_addr); + } + if (boot_cpu_physical_apicid == -1U) { + boot_cpu_physical_apicid = read_apic_id(); + apic_version[boot_cpu_physical_apicid] = + GET_APIC_VERSION(apic_read(APIC_LVR)); + } +} + +/* + * This initializes the IO-APIC and APIC hardware if this is + * a UP kernel. + */ +int apic_version[MAX_LOCAL_APIC]; + +#if 0 +int __init APIC_init_uniprocessor(void) +{ + if (disable_apic) { + pr_info("Apic disabled\n"); + return -1; + } +#ifdef CONFIG_L_X86_64 + if (!cpu_has_apic) { + disable_apic = 1; + pr_info("Apic disabled by BIOS\n"); + return -1; + } +#else + if (!smp_found_config && !cpu_has_apic) + return -1; + + /* + * Complain if the BIOS pretends there is one. + */ + if (!cpu_has_apic && + APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { + pr_err("BIOS bug, local APIC 0x%x not detected!...\n", + boot_cpu_physical_apicid); + return -1; + } +#endif + + default_setup_apic_routing(); + + verify_local_APIC(); + connect_bsp_APIC(); + +#ifdef CONFIG_L_X86_64 + apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid)); +#else + /* + * Hack: In case of kdump, after a crash, kernel might be booting + * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid + * might be zero if read from MP tables. Get it from LAPIC. + */ +# ifdef CONFIG_CRASH_DUMP + boot_cpu_physical_apicid = read_apic_id(); +# endif +#endif + physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); + setup_local_APIC(); + +#ifdef CONFIG_X86_IO_APIC + /* + * Now enable IO-APICs, actually call clear_IO_APIC + * We need clear_IO_APIC before enabling error vector + */ + if (!skip_ioapic_setup && nr_ioapics) + enable_IO_APIC(); +#endif + + bsp_end_local_APIC_setup(); + +#ifdef CONFIG_X86_IO_APIC + if (smp_found_config && !skip_ioapic_setup && nr_ioapics) + setup_IO_APIC(); + else { + nr_ioapics = 0; + } +#endif + + x86_init.timers.setup_percpu_clockev(); + return 0; +} +#endif + +/* + * Local APIC interrupts + */ + +/* + * This interrupt should _never_ happen with our APIC/SMP architecture + */ +static inline void __smp_spurious_interrupt(void) +{ + u32 v; + + /* + * Check if this really is a spurious interrupt and ACK it + * if it is a vectored one. Just in case... + * Spurious interrupts should not be ACKed. + */ + v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); + if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) + ack_APIC_irq(); + + inc_irq_stat(irq_spurious_count); + + /* see sw-dev-man vol 3, chapter 7.4.13.5 */ + pr_info("spurious APIC interrupt on CPU#%d, " + "should never happen.\n", smp_processor_id()); +} + +__visible void smp_spurious_interrupt(struct pt_regs *regs) +{ + entering_irq(); + __smp_spurious_interrupt(); + exiting_irq(); +} + +#if 0 +__visible void smp_trace_spurious_interrupt(struct pt_regs *regs) +{ + entering_irq(); + trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR); + __smp_spurious_interrupt(); + trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR); + exiting_irq(); +} +#endif + +/* + * This interrupt should never happen with our APIC/SMP architecture + */ +static inline void __smp_error_interrupt(void) +{ + u32 v; + u32 i = 0; + static const char * const error_interrupt_reason[] = { + "Send CS error", /* APIC Error Bit 0 */ + "Receive CS error", /* APIC Error Bit 1 */ + "Send accept error", /* APIC Error Bit 2 */ + "Receive accept error", /* APIC Error Bit 3 */ + "Redirectable IPI", /* APIC Error Bit 4 */ + "Send illegal vector", /* APIC Error Bit 5 */ + "Received illegal vector", /* APIC Error Bit 6 */ + "Illegal register address", /* APIC Error Bit 7 */ + }; + + /* First tickle the hardware, only then report what went on. -- REW */ + apic_write(APIC_ESR, 0); + v = apic_read(APIC_ESR); + ack_APIC_irq(); + atomic_inc(&irq_err_count); + + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", + smp_processor_id(), v); + + v = v & 0xff; + while (v) { + if (v & 0x1) + apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); + i++; + v >>= 1; + } + + apic_printk(APIC_DEBUG, KERN_CONT "\n"); +} + +__visible void smp_error_interrupt(struct pt_regs *regs) +{ + entering_irq(); + __smp_error_interrupt(); + exiting_irq(); +} + +#if 0 +__visible void smp_trace_error_interrupt(struct pt_regs *regs) +{ + entering_irq(); + trace_error_apic_entry(ERROR_APIC_VECTOR); + __smp_error_interrupt(); + trace_error_apic_exit(ERROR_APIC_VECTOR); + exiting_irq(); +} +#endif + +/** + * connect_bsp_APIC - attach the APIC to the interrupt system + */ +void __init_recv connect_bsp_APIC(void) +{ +#ifdef CONFIG_L_X86_32 + if (pic_mode) { + /* + * Do not trust the local APIC being empty at bootup. + */ + clear_local_APIC(); + /* + * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's + * local APIC to INT and NMI lines. + */ + apic_printk(APIC_VERBOSE, "leaving PIC mode, " + "enabling APIC mode.\n"); + imcr_pic_to_apic(); + } +#endif + if (apic->enable_apic_mode) + apic->enable_apic_mode(); +} + +/** + * disconnect_bsp_APIC - detach the APIC from the interrupt system + * @virt_wire_setup: indicates, whether virtual wire mode is selected + * + * Virtual wire mode is necessary to deliver legacy interrupts even when the + * APIC is disabled. + */ +void disconnect_bsp_APIC(int virt_wire_setup) +{ + unsigned int value; + +#ifdef CONFIG_L_X86_32 + if (pic_mode) { + /* + * Put the board back into PIC mode (has an effect only on + * certain older boards). Note that APIC interrupts, including + * IPIs, won't work beyond this point! The only exception are + * INIT IPIs. + */ + apic_printk(APIC_VERBOSE, "disabling APIC mode, " + "entering PIC mode.\n"); + imcr_apic_to_pic(); + return; + } +#endif + + /* Go back to Virtual Wire compatibility mode */ + + /* For the spurious interrupt use vector F, and enable it */ + value = apic_read(APIC_SPIV); + value &= ~APIC_VECTOR_MASK; + value |= APIC_SPIV_APIC_ENABLED; + value |= 0xf; + apic_write(APIC_SPIV, value); + + if (!virt_wire_setup) { + /* + * For LVT0 make it edge triggered, active high, + * external and enabled + */ + value = apic_read(APIC_LVT0); + value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | + APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | + APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); + value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; + value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); + apic_write(APIC_LVT0, value); + } else { + /* Disable LVT0 */ + apic_write(APIC_LVT0, APIC_LVT_MASKED); + } + + /* + * For LVT1 make it edge triggered, active high, + * nmi and enabled + */ + value = apic_read(APIC_LVT1); + value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | + APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | + APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); + value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; + value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); + apic_write(APIC_LVT1, value); +} + +/* + * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated + * contiguously, it equals to current allocated max logical CPU ID plus 1. + * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range, + * so the maximum of nr_logical_cpuids is nr_cpu_ids. + * + * NOTE: Reserve 0 for BSP. + */ +static int nr_logical_cpuids = 1; + +/* + * Used to store mapping between logical CPU IDs and APIC IDs. + */ +static int cpuid_to_apicid[] = { + [0 ... NR_CPUS - 1] = -1, +}; + +/* + * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids + * and cpuid_to_apicid[] synchronized. + */ +static int allocate_logical_cpuid(int apicid) +{ + int i; + + /* + * cpuid <-> apicid mapping is persistent, so when a cpu is up, + * check if the kernel has allocated a cpuid for it. + */ + for (i = 0; i < nr_logical_cpuids; i++) { + if (cpuid_to_apicid[i] == apicid) + return i; + } + + /* Allocate a new cpuid. */ + if (nr_logical_cpuids >= nr_cpu_ids) { + WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. " + "Processor %d/0x%x and the rest are ignored.\n", + nr_cpu_ids, nr_logical_cpuids, apicid); + return -EINVAL; + } + + cpuid_to_apicid[nr_logical_cpuids] = apicid; + return nr_logical_cpuids++; +} + +int generic_processor_info(int apicid, int version) +{ + int cpu, max = nr_cpu_ids; + bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, + phys_cpu_present_map); + + /* + * boot_cpu_physical_apicid is designed to have the apicid + * returned by read_apic_id(), i.e, the apicid of the + * currently booting-up processor. However, on some platforms, + * it is temporarily modified by the apicid reported as BSP + * through MP table. Concretely: + * + * - arch/x86/kernel/mpparse.c: MP_processor_info() + * - arch/x86/mm/amdtopology.c: amd_numa_init() + * - arch/x86/platform/visws/visws_quirks.c: MP_processor_info() + * + * This function is executed with the modified + * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel + * parameter doesn't work to disable APs on kdump 2nd kernel. + * + * Since fixing handling of boot_cpu_physical_apicid requires + * another discussion and tests on each platform, we leave it + * for now and here we use read_apic_id() directly in this + * function, generic_processor_info(). + */ + if (disabled_cpu_apicid != BAD_APICID && + disabled_cpu_apicid != read_apic_id() && + disabled_cpu_apicid == apicid) { + int thiscpu = num_processors + disabled_cpus; + + pr_warning("APIC: Disabling requested cpu." + " Processor %d/0x%x ignored.\n", + thiscpu, apicid); + + disabled_cpus++; + return -ENODEV; + } + + /* + * If boot cpu has not been detected yet, then only allow upto + * nr_cpu_ids - 1 processors and keep one slot free for boot cpu + */ + if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 && + apicid != boot_cpu_physical_apicid) { + int thiscpu = max + disabled_cpus - 1; + + pr_warning( + "ACPI: NR_CPUS/possible_cpus limit of %i almost" + " reached. Keeping one slot for boot cpu." + " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); + + disabled_cpus++; + return -ENODEV; + } + + if (num_processors >= nr_cpu_ids) { + int thiscpu = max + disabled_cpus; + + pr_warning( + "ACPI: NR_CPUS/possible_cpus limit of %i reached." + " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); + + disabled_cpus++; + return -EINVAL; + } + + num_processors++; + + if (apicid == boot_cpu_physical_apicid) { + /* Logical cpuid 0 is reserved for BSP. */ + cpu = 0; + cpuid_to_apicid[0] = apicid; + } else { + cpu = allocate_logical_cpuid(apicid); + } + + /* + * Validate version + */ + if (version == 0x0) { + pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n", + cpu, apicid); + version = APIC_VERSION; + } + apic_version[apicid] = version; + + if (version != apic_version[boot_cpu_physical_apicid]) { + pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n", + apic_version[boot_cpu_physical_apicid], cpu, version); + } + + physid_set(apicid, phys_cpu_present_map); + if (apicid > max_physical_apicid) + max_physical_apicid = apicid; + +#if defined(CONFIG_SMP) || defined(CONFIG_L_X86_64) + early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; + early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; +#endif +#ifdef CONFIG_L_X86_32 + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = + apic->x86_32_early_logical_apicid(cpu); +#endif + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); + + return cpu; +} + +#if 0 +void default_init_apic_ldr(void) +{ + unsigned long val; + + apic_write(APIC_DFR, APIC_DFR_VALUE); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); + apic_write(APIC_LDR, val); +} +#endif + +int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask, + unsigned int *apicid) +{ + unsigned int cpu; + + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + + if (likely(cpu < nr_cpu_ids)) { + *apicid = per_cpu(x86_cpu_to_apicid, cpu); + return 0; + } + + return -EINVAL; +} + +/* + * Override the generic EOI implementation with an optimized version. + * Only called during early boot when only one CPU is active and with + * interrupts disabled, so we know this does not race with actual APIC driver + * use. + */ +void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) +{ + struct apic **drv; + + for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { + /* Should happen once for each apic */ + WARN_ON((*drv)->eoi_write == eoi_write); + (*drv)->eoi_write = eoi_write; + } +} + +/* + * Power management + */ +#ifdef CONFIG_PM + +static struct { + /* + * 'active' is true if the local APIC was enabled by us and + * not the BIOS; this signifies that we are also responsible + * for disabling it before entering apm/acpi suspend + */ + int active; + /* r/w apic fields */ + unsigned int apic_id; + unsigned int apic_taskpri; + unsigned int apic_ldr; + unsigned int apic_dfr; + unsigned int apic_spiv; + unsigned int apic_lvtt; + unsigned int apic_lvtpc; + unsigned int apic_lvt0; + unsigned int apic_lvt1; + unsigned int apic_lvterr; + unsigned int apic_tmict; + unsigned int apic_tdcr; + unsigned int apic_thmr; +} apic_pm_state; + +static int lapic_suspend(void) +{ + unsigned long flags; + int maxlvt; + + if (!apic_pm_state.active) + return 0; + + maxlvt = lapic_get_maxlvt(); + + apic_pm_state.apic_id = apic_read(APIC_ID); + apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); + apic_pm_state.apic_ldr = apic_read(APIC_LDR); + apic_pm_state.apic_dfr = apic_read(APIC_DFR); + apic_pm_state.apic_spiv = apic_read(APIC_SPIV); + apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); + if (maxlvt >= 4) + apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); + apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); + apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); + apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); + apic_pm_state.apic_tmict = apic_read(APIC_TMICT); + apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); +#ifdef CONFIG_X86_THERMAL_VECTOR + if (maxlvt >= 5) + apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); +#endif + + local_irq_save(flags); + mask_ioapic_entries(); + disable_local_APIC(); + local_irq_restore(flags); + + return 0; +} + +static void lapic_resume(void) +{ +#if 0 + unsigned int l, h; +#endif + unsigned long flags; + int maxlvt; + + if (!apic_pm_state.active) + return; + + local_irq_save(flags); + + /* + * IO-APIC and PIC have their own resume routines. + * We just mask them here to make sure the interrupt + * subsystem is completely quiet while we enable x2apic + * and interrupt-remapping. + */ + mask_ioapic_entries(); +#if 0 + legacy_pic->mask_all(); +#endif + + if (x2apic_mode) + enable_x2apic(); +#if 0 + else { + /* + * Make sure the APICBASE points to the right address + * + * FIXME! This will be wrong if we ever support suspend on + * SMP! We'll need to do this as part of the CPU restore! + */ + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; + wrmsr(MSR_IA32_APICBASE, l, h); + } + } +#endif + + maxlvt = lapic_get_maxlvt(); + apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); + apic_write(APIC_ID, apic_pm_state.apic_id); + apic_write(APIC_DFR, apic_pm_state.apic_dfr); + apic_write(APIC_LDR, apic_pm_state.apic_ldr); + apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); + apic_write(APIC_SPIV, apic_pm_state.apic_spiv); + apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); + apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); +#if defined(CONFIG_X86_MCE_INTEL) + if (maxlvt >= 5) + apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); +#endif + if (maxlvt >= 4) + apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); + apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); + apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); + apic_write(APIC_TMICT, apic_pm_state.apic_tmict); + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + +#if 0 + irq_remapping_reenable(x2apic_mode); +#endif + + local_irq_restore(flags); +} + +/* + * This device has no shutdown method - fully functioning local APICs + * are needed on every CPU up until machine_halt/restart/poweroff. + */ + +static struct syscore_ops lapic_syscore_ops = { + .resume = lapic_resume, + .suspend = lapic_suspend, +}; + +static void apic_pm_activate(void) +{ + apic_pm_state.active = 1; +} + +static int __init init_lapic_sysfs(void) +{ + /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ + if (cpu_has_apic) + register_syscore_ops(&lapic_syscore_ops); + + return 0; +} + +/* local apic needs to resume before other devices access its registers. */ +core_initcall(init_lapic_sysfs); + +#else /* CONFIG_PM */ + +static void apic_pm_activate(void) { } + +#endif /* CONFIG_PM */ + +#if 0 +#ifdef CONFIG_L_X86_64 + +static int apic_cluster_num(void) +{ + int i, clusters, zeros; + unsigned id; + u16 *bios_cpu_apicid; + DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); + + bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); + bitmap_zero(clustermap, NUM_APIC_CLUSTERS); + + for (i = 0; i < nr_cpu_ids; i++) { + /* are we being called early in kernel startup? */ + if (bios_cpu_apicid) { + id = bios_cpu_apicid[i]; + } else if (i < nr_cpu_ids) { + if (cpu_present(i)) + id = per_cpu(x86_bios_cpu_apicid, i); + else + continue; + } else + break; + + if (id != BAD_APICID) + __set_bit(APIC_CLUSTERID(id), clustermap); + } + + /* Problem: Partially populated chassis may not have CPUs in some of + * the APIC clusters they have been allocated. Only present CPUs have + * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap. + * Since clusters are allocated sequentially, count zeros only if + * they are bounded by ones. + */ + clusters = 0; + zeros = 0; + for (i = 0; i < NUM_APIC_CLUSTERS; i++) { + if (test_bit(i, clustermap)) { + clusters += 1 + zeros; + zeros = 0; + } else + ++zeros; + } + + return clusters; +} + +static int multi_checked; +static int multi; + +static int set_multi(const struct dmi_system_id *d) +{ + if (multi) + return 0; + pr_info("APIC: %s detected, Multi Chassis\n", d->ident); + multi = 1; + return 0; +} + +static const struct dmi_system_id multi_dmi_table[] = { + { + .callback = set_multi, + .ident = "IBM System Summit2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"), + }, + }, + {} +}; + +static void dmi_check_multi(void) +{ + if (multi_checked) + return; + + dmi_check_system(multi_dmi_table); + multi_checked = 1; +} + +/* + * apic_is_clustered_box() -- Check if we can expect good TSC + * + * Thus far, the major user of this is IBM's Summit2 series: + * Clustered boxes may have unsynced TSC problems if they are + * multi-chassis. + * Use DMI to check them + */ +int apic_is_clustered_box(void) +{ + dmi_check_multi(); + if (multi) + return 1; + + if (!is_vsmp_box()) + return 0; + + /* + * ScaleMP vSMPowered boxes have one cluster per board and TSCs are + * not guaranteed to be synced between boards + */ + if (apic_cluster_num() > 1) + return 1; + + return 0; +} +#endif +#endif + +/* + * APIC command line parameters + */ +#if 0 +static int __init setup_disableapic(char *arg) +{ + disable_apic = 1; + setup_clear_cpu_cap(X86_FEATURE_APIC); + return 0; +} +early_param("disableapic", setup_disableapic); + +/* same as disableapic, for compatibility */ +static int __init setup_nolapic(char *arg) +{ + return setup_disableapic(arg); +} +early_param("nolapic", setup_nolapic); + +static int __init parse_lapic_timer_c2_ok(char *arg) +{ + local_apic_timer_c2_ok = 1; + return 0; +} +early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); +#endif + +static int __init parse_disable_apic_timer(char *arg) +{ + disable_apic_timer = true; + return 0; +} +early_param("noapictimer", parse_disable_apic_timer); + +static int __init parse_nolapic_timer(char *arg) +{ + disable_apic_timer = true; + return 0; +} +early_param("nolapic_timer", parse_nolapic_timer); + +static int __init apic_set_verbosity(char *arg) +{ + if (!arg) { +#ifdef CONFIG_L_X86_64 + skip_ioapic_setup = 0; + return 0; +#endif + return -EINVAL; + } + + if (strcmp("debug", arg) == 0) + apic_verbosity = APIC_DEBUG; + else if (strcmp("verbose", arg) == 0) + apic_verbosity = APIC_VERBOSE; + else { + pr_warning("APIC Verbosity level %s not recognised" + " use apic=verbose or apic=debug\n", arg); + return -EINVAL; + } + + return 0; +} +early_param("apic", apic_set_verbosity); + +static int __init lapic_insert_resource(void) +{ + if (!apic_phys) + return -1; + + /* Put local APIC into the resource map. */ + lapic_resource.start = apic_phys; + lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; + insert_resource(&iomem_resource, &lapic_resource); + + return 0; +} + +/* + * need call insert after e820_reserve_resources() + * that is using request_resource + */ +late_initcall(lapic_insert_resource); + +static int __init apic_set_disabled_cpu_apicid(char *arg) +{ + if (!arg || !get_option(&arg, &disabled_cpu_apicid)) + return -EINVAL; + + return 0; +} +early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid); diff --git a/arch/l/kernel/apic/apic_flat_64.c b/arch/l/kernel/apic/apic_flat_64.c new file mode 100644 index 000000000000..f13ad80fe48f --- /dev/null +++ b/arch/l/kernel/apic/apic_flat_64.c @@ -0,0 +1,379 @@ +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Flat APIC subarch code. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static struct apic apic_physflat; +static struct apic apic_flat; + +struct apic __read_mostly *apic = &apic_physflat; +EXPORT_SYMBOL_GPL(apic); + +static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return 1; +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +void flat_init_apic_ldr(void) +{ + unsigned long val; + unsigned long num, id; + + num = smp_processor_id(); + id = 1UL << num; + apic_write(APIC_DFR, APIC_DFR_FLAT); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(id); + apic_write(APIC_LDR, val); +} + +static inline void _flat_send_IPI_mask(unsigned long mask, int vector) +{ + unsigned long flags; + + local_irq_save(flags); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); + local_irq_restore(flags); +} + +static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + + _flat_send_IPI_mask(mask, vector); +} + +static void +flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + int cpu = smp_processor_id(); + + if (cpu < BITS_PER_LONG) + clear_bit(cpu, &mask); + + _flat_send_IPI_mask(mask, vector); +} + +static void flat_send_IPI_allbutself(int vector) +{ + int cpu = smp_processor_id(); +#ifdef CONFIG_HOTPLUG_CPU + int hotplug = 1; +#else + int hotplug = 0; +#endif + if (hotplug || vector == NMI_VECTOR) { + if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { + unsigned long mask = cpumask_bits(cpu_online_mask)[0]; + + if (cpu < BITS_PER_LONG) + clear_bit(cpu, &mask); + + _flat_send_IPI_mask(mask, vector); + } + } else if (num_online_cpus() > 1) { + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, + vector, apic->dest_logical); + } +} + +static void flat_send_IPI_all(int vector) +{ + if (vector == NMI_VECTOR) { + flat_send_IPI_mask(cpu_online_mask, vector); + } else { + __default_send_IPI_shortcut(APIC_DEST_ALLINC, + vector, apic->dest_logical); + } +} + +static unsigned int flat_get_apic_id(unsigned long x) +{ + unsigned int id; + + id = (((x)>>24) & 0xFFu); + + return id; +} + +static unsigned long set_apic_id(unsigned int id) +{ + unsigned long x; + + x = ((id & 0xFFu)<<24); + return x; +} + +static unsigned int read_xapic_id(void) +{ + unsigned int id; + + id = flat_get_apic_id(apic_read(APIC_ID)); + return id; +} + +static int flat_apic_id_registered(void) +{ + return physid_isset(read_xapic_id(), phys_cpu_present_map); +} + +static int flat_phys_pkg_id(int initial_apic_id, int index_msb) +{ + return initial_apic_id >> index_msb; +} + +static int flat_probe(void) +{ +#if defined(CONFIG_E2K) || defined(CONFIG_E90S) + /* On e90s and e2k machines flat logical addresation + * still is not supported (bug #35585, bug #57594). + * + * IMPORTANT! There is another bug on some APICs: if + * interrupts arrive on non-boot CPU we might lose them. + * Physical addressation by default delivers all interrupts + * to the boot CPU, but logical flat addressation requires + * an explicit workaround. (bugs #46675, #48128) */ + return 0; +#else + return 1; +#endif +} + +static struct apic apic_flat = { + .name = "flat", + .probe = flat_probe, + .acpi_madt_oem_check = flat_acpi_madt_oem_check, + .apic_id_valid = default_apic_id_valid, + .apic_id_registered = flat_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + .irq_dest_mode = 1, /* logical */ + + .target_cpus = online_target_cpus, + .disable_esr = 0, + .dest_logical = APIC_DEST_LOGICAL, +#if defined CONFIG_E2K || defined CONFIG_E90S + .check_apicid_used = default_check_apicid_used, +#else + .check_apicid_used = NULL, +#endif + .check_apicid_present = NULL, + + .vector_allocation_domain = flat_vector_allocation_domain, + .init_apic_ldr = flat_init_apic_ldr, +#if defined CONFIG_E2K || defined CONFIG_E90S + .ioapic_phys_id_map = default_ioapic_phys_id_map, +#else + .ioapic_phys_id_map = NULL, +#endif + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, +#if defined CONFIG_E2K || defined CONFIG_E90S + .apicid_to_cpu_present = physid_set_mask_of_physid, +#else + .apicid_to_cpu_present = NULL, +#endif + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = flat_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = flat_get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFu << 24, + + .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, + + .send_IPI_mask = flat_send_IPI_mask, + .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, + .send_IPI_allbutself = flat_send_IPI_allbutself, + .send_IPI_all = flat_send_IPI_all, + .send_IPI_self = apic_send_IPI_self, + + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .eoi_write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; + +/* + * Physflat mode is used when there are more than 8 CPUs on a system. + * We cannot use logical delivery in this case because the mask + * overflows, so use physical mode. + */ +static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ +#ifdef CONFIG_ACPI + /* + * Quirk: some x86_64 machines can only use physical APIC mode + * regardless of how many processors are present (x86_64 ES7000 + * is an example). + */ + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && + (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { + printk(KERN_DEBUG "system APIC only can use physical flat"); + return 1; + } + + if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { + printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); + return 1; + } +#endif + + return 0; +} + +static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) +{ + default_send_IPI_mask_sequence_phys(cpumask, vector); +} + +static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, + int vector) +{ + default_send_IPI_mask_allbutself_phys(cpumask, vector); +} + +static void physflat_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); +} + +static void physflat_send_IPI_all(int vector) +{ + physflat_send_IPI_mask(cpu_online_mask, vector); +} + +static int physflat_probe(void) +{ + +#if defined CONFIG_E2K || defined CONFIG_E90S + return 1; +#else + if (apic == &apic_physflat || num_possible_cpus() > 8) + return 1; + +#endif + return 0; +} + +#if defined CONFIG_E2K || defined CONFIG_E90S +static void noop_init_apic_ldr(void) { } +#endif + +static struct apic apic_physflat = { + + .name = "physical flat", + .probe = physflat_probe, + .acpi_madt_oem_check = physflat_acpi_madt_oem_check, + .apic_id_valid = default_apic_id_valid, + .apic_id_registered = flat_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = 0, /* physical */ + + .target_cpus = online_target_cpus, + .disable_esr = 0, + .dest_logical = 0, +#if defined CONFIG_E2K || defined CONFIG_E90S + .check_apicid_used = default_check_apicid_used, +#else + .check_apicid_used = NULL, +#endif + .check_apicid_present = NULL, + + .vector_allocation_domain = default_vector_allocation_domain, +#if defined CONFIG_E2K || defined CONFIG_E90S + .init_apic_ldr = noop_init_apic_ldr, + + .ioapic_phys_id_map = default_ioapic_phys_id_map, +#else + /* not needed, but shouldn't hurt: */ + .init_apic_ldr = flat_init_apic_ldr, + + .ioapic_phys_id_map = NULL, +#endif + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, +#if defined CONFIG_E2K || defined CONFIG_E90S + .apicid_to_cpu_present = physid_set_mask_of_physid, +#else + .apicid_to_cpu_present = NULL, +#endif + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = flat_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = flat_get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFu << 24, + + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + + .send_IPI_mask = physflat_send_IPI_mask, + .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, + .send_IPI_allbutself = physflat_send_IPI_allbutself, + .send_IPI_all = physflat_send_IPI_all, + .send_IPI_self = apic_send_IPI_self, + + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .eoi_write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; + +/* + * We need to check for physflat first, so this order is important. + */ +apic_drivers(apic_physflat, apic_flat); diff --git a/arch/l/kernel/apic/io_apic.c b/arch/l/kernel/apic/io_apic.c new file mode 100644 index 000000000000..8974301d62a6 --- /dev/null +++ b/arch/l/kernel/apic/io_apic.c @@ -0,0 +1,4591 @@ +/* + * Intel IO-APIC support for multi-Pentium hosts. + * + * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo + * + * Many thanks to Stig Venaas for trying out countless experimental + * patches and reporting/debugging problems patiently! + * + * (c) 1999, Multiple IO-APIC support, developed by + * Ken-ichi Yaku and + * Hidemi Kishimoto , + * further tested and cleaned up by Zach Brown + * and Ingo Molnar + * + * Fixes + * Maciej W. Rozycki : Bits for genuine 82489DX APICs; + * thanks to Eric Gilmore + * and Rolf G. Tews + * for testing these extensively + * Paul Diefenbaugh : Added full ACPI support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* time_after() */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#if 0 +#include +#include +#include +#include +#include +#include +#include +#endif +#include +#include +#ifdef CONFIG_PIC +#include +#endif +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_E2K +#include +#endif + +#ifdef CONFIG_E90S +#include +# define acpi_ioapic 0 +#endif + +#define __apicdebuginit(type) static type + +#define for_each_irq_pin(entry, head) \ + for (entry = head; entry; entry = entry->next) + +#if 0 +/* + * Is the SiS APIC rmw bug present ? + * -1 = don't know, 0 = no, 1 = yes + */ +int sis_apic_bug = -1; +#endif + +static DEFINE_RAW_SPINLOCK(ioapic_lock); +static DEFINE_RAW_SPINLOCK(vector_lock); + +DECLARE_BITMAP(used_vectors, NR_VECTORS); + +static struct ioapic { + /* + * # of IRQ routing registers + */ + int nr_registers; + /* + * Saved state during suspend/resume, or while enabling intr-remap. + */ + struct IO_APIC_route_entry *saved_registers; + /* I/O APIC config */ + struct mpc_ioapic mp_config; + /* IO APIC gsi routing info */ + struct mp_ioapic_gsi gsi_config; + DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); +} ioapics[MAX_IO_APICS]; + +#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver + +int mpc_ioapic_id(int ioapic_idx) +{ + return ioapics[ioapic_idx].mp_config.apicid; +} + +unsigned long mpc_ioapic_addr(int ioapic_idx) +{ + return ioapics[ioapic_idx].mp_config.apicaddr; +} + +struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx) +{ + return &ioapics[ioapic_idx].gsi_config; +} + +int nr_ioapics; + +/* The one past the highest gsi number used */ +u32 gsi_top; + +/* MP IRQ source entries */ +struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; + +/* # of MP IRQ source entries */ +int mp_irq_entries; + +/* GSI interrupts */ +static int nr_irqs_gsi = NR_IRQS_LEGACY; + +#ifdef CONFIG_EISA +int mp_bus_id_to_type[MAX_MP_BUSSES]; +#endif + +DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); + +int skip_ioapic_setup; + +/** + * disable_ioapic_support() - disables ioapic support at runtime + */ +void disable_ioapic_support(void) +{ +#ifdef CONFIG_PCI + noioapicquirk = 1; + noioapicreroute = -1; +#endif + skip_ioapic_setup = 1; +} + +#if 0 +static int __init parse_noapic(char *str) +{ + /* disable IO-APIC */ + disable_ioapic_support(); + return 0; +} +early_param("noapic", parse_noapic); +#endif +#ifdef CONFIG_E2K +int e2k_msi_disabled = 0; +#endif + +/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ +void mp_save_irq(struct mpc_intsrc *m) +{ + int i; + + apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," + " IRQ %02x, APIC ID %x, APIC INT %02x\n", + m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, + m->srcbusirq, m->dstapic, m->dstirq); + + for (i = 0; i < mp_irq_entries; i++) { + if (!memcmp(&mp_irqs[i], m, sizeof(*m))) + return; + } + + memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!!\n"); +} + +struct irq_pin_list { + int apic, pin; + struct irq_pin_list *next; +}; + +static struct irq_pin_list *alloc_irq_pin_list(int node) +{ + return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); +} + + +/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ +static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; + +int __init arch_early_irq_init(void) +{ + struct irq_cfg *cfg; + int count, node, i; + +#ifdef CONFIG_PIC + if (!legacy_pic->nr_legacy_irqs) + io_apic_irqs = ~0UL; +#endif + + for (i = 0; i < nr_ioapics; i++) { + ioapics[i].saved_registers = + kzalloc(sizeof(struct IO_APIC_route_entry) * + ioapics[i].nr_registers, GFP_KERNEL); + if (!ioapics[i].saved_registers) + pr_err("IOAPIC %d: suspend/resume impossible!\n", i); + } + + cfg = irq_cfgx; + count = ARRAY_SIZE(irq_cfgx); +#if 0 + node = cpu_to_node(0); +#else + node = cpu_to_node(boot_cpu_physical_apicid); +#endif + +#ifdef CONFIG_PIC + /* Make sure the legacy interrupts are marked in the bitmap */ + irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); +#endif + + for (i = 0; i < count; i++) { + irq_set_chip_data(i, &cfg[i]); + zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); + zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); +#ifdef CONFIG_PIC + /* + * For legacy IRQ's, start with assigning irq0 to irq15 to + * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. + */ + if (i < legacy_pic->nr_legacy_irqs) { + cfg[i].vector = IRQ0_VECTOR + i; + cpumask_setall(cfg[i].domain); + } +#endif + } + + return 0; +} + +static struct irq_cfg *irq_cfg(unsigned int irq) +{ + return irq_get_chip_data(irq); +} + +static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) +{ + struct irq_cfg *cfg; + + cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); + if (!cfg) + return NULL; + if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) + goto out_cfg; + if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) + goto out_domain; + return cfg; +out_domain: + free_cpumask_var(cfg->domain); +out_cfg: + kfree(cfg); + return NULL; +} + +static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) +{ + if (!cfg) + return; + irq_set_chip_data(at, NULL); + free_cpumask_var(cfg->domain); + free_cpumask_var(cfg->old_domain); + kfree(cfg); +} + +static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) +{ + int res = irq_alloc_desc_at(at, node); + struct irq_cfg *cfg; + + if (res < 0) { + if (res != -EEXIST) + return NULL; + cfg = irq_get_chip_data(at); + if (cfg) + return cfg; + } + + cfg = alloc_irq_cfg(at, node); + if (cfg) + irq_set_chip_data(at, cfg); + else + irq_free_desc(at); + return cfg; +} + +static int alloc_irqs_from(unsigned int from, unsigned int count, int node) +{ + return irq_alloc_descs_from(from, count, node); +} + +static void free_irq_at(unsigned int at, struct irq_cfg *cfg) +{ + free_irq_cfg(at, cfg); + irq_free_desc(at); +} + + +struct io_apic { + unsigned int index; + unsigned int unused[3]; + unsigned int data; + unsigned int unused2[11]; + unsigned int eoi; +}; + +static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) +{ +#if defined CONFIG_E2K || defined CONFIG_E90S + return (struct io_apic __iomem *) mpc_ioapic_addr(idx); +#else + return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) + + (mpc_ioapic_addr(idx) & ~PAGE_MASK); +#endif +} + +/* HACK! In arch/l we do not map this area anywhere, so we + * have to access them directly by their physical address, + * to do that we redefine writel/readl. */ +#ifdef CONFIG_E2K +#define my_writel boot_writel +#define my_readl boot_readl +#elif defined CONFIG_E90S +#define my_writel writel +#define my_readl readl +#else +# error Unsupported architecture! +#endif + +void io_apic_eoi(unsigned int apic, unsigned int vector) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + my_writel(vector, &io_apic->eoi); +} + +unsigned int native_io_apic_read(unsigned int apic, unsigned int reg) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + my_writel(reg, &io_apic->index); + return my_readl(&io_apic->data); +} + +void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + + my_writel(reg, &io_apic->index); + my_writel(value, &io_apic->data); +} + +/* + * Re-write a value: to be used for read-modify-write + * cycles where the read already set up the index register. + * + * Older SiS APIC requires we rewrite the index register + */ +void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + +#if 0 + if (sis_apic_bug) + writel(reg, &io_apic->index); +#endif + my_writel(value, &io_apic->data); +} + + +union entry_union { + struct { u32 w1, w2; }; + struct IO_APIC_route_entry entry; +}; + +static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) +{ + union entry_union eu; + + eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); + eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); + + return eu.entry; +} + +static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) +{ + union entry_union eu; + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + eu.entry = __ioapic_read_entry(apic, pin); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + return eu.entry; +} + +/* + * When we write a new IO APIC routing entry, we need to write the high + * word first! If the mask bit in the low word is clear, we will enable + * the interrupt, and we need to make sure the entry is fully populated + * before that happens. + */ +static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) +{ + union entry_union eu = {{0, 0}}; + + eu.entry = e; + io_apic_write(apic, 0x11 + 2*pin, eu.w2); + io_apic_write(apic, 0x10 + 2*pin, eu.w1); +} + +static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + __ioapic_write_entry(apic, pin, e); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); +} + +/* + * When we mask an IO APIC routing entry, we need to write the low + * word first, in order to set the mask bit before we change the + * high bits! + */ +static void ioapic_mask_entry(int apic, int pin) +{ + unsigned long flags; + union entry_union eu = { .entry.mask = 1 }; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(apic, 0x10 + 2*pin, eu.w1); + io_apic_write(apic, 0x11 + 2*pin, eu.w2); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); +} + +/* + * The common case is 1:1 IRQ<->pin mappings. Sometimes there are + * shared ISA-space IRQs, so we have to support them. We are super + * fast in the common case, and fast for shared ISA-space IRQs. + */ +int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) +{ + struct irq_pin_list **last, *entry; + + /* don't allow duplicates */ + last = &cfg->irq_2_pin; + for_each_irq_pin(entry, cfg->irq_2_pin) { + if (entry->apic == apic && entry->pin == pin) + return 0; + last = &entry->next; + } + + entry = alloc_irq_pin_list(node); + if (!entry) { + pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", + node, apic, pin); + return -ENOMEM; + } + entry->apic = apic; + entry->pin = pin; + + *last = entry; + return 0; +} + +#ifdef CONFIG_PIC +static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) +{ + if (__add_pin_to_irq_node(cfg, node, apic, pin)) + panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); +} + +/* + * Reroute an IRQ to a different pin. + */ +static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, + int oldapic, int oldpin, + int newapic, int newpin) +{ + struct irq_pin_list *entry; + + for_each_irq_pin(entry, cfg->irq_2_pin) { + if (entry->apic == oldapic && entry->pin == oldpin) { + entry->apic = newapic; + entry->pin = newpin; + /* every one is different, right? */ + return; + } + } + + /* old apic/pin didn't exist, so just add new ones */ + add_pin_to_irq_node(cfg, node, newapic, newpin); +} +#endif + +static void __io_apic_modify_irq(struct irq_pin_list *entry, + int mask_and, int mask_or, + void (*final)(struct irq_pin_list *entry)) +{ + unsigned int reg, pin; + + pin = entry->pin; + reg = io_apic_read(entry->apic, 0x10 + pin * 2); + reg &= mask_and; + reg |= mask_or; + io_apic_modify(entry->apic, 0x10 + pin * 2, reg); + if (final) + final(entry); +} + +static void io_apic_modify_irq(struct irq_cfg *cfg, + int mask_and, int mask_or, + void (*final)(struct irq_pin_list *entry)) +{ + struct irq_pin_list *entry; + + for_each_irq_pin(entry, cfg->irq_2_pin) + __io_apic_modify_irq(entry, mask_and, mask_or, final); +} + +static void io_apic_sync(struct irq_pin_list *entry) +{ + /* + * Synchronize the IO-APIC and the CPU by doing + * a dummy read from the IO-APIC + */ + struct io_apic __iomem *io_apic; + + io_apic = io_apic_base(entry->apic); + my_readl(&io_apic->data); +} + +static void mask_ioapic(struct irq_cfg *cfg) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void mask_ioapic_irq(struct irq_data *data) +{ + mask_ioapic(data->chip_data); +} + +static void __unmask_ioapic(struct irq_cfg *cfg) +{ + io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); +} + +static void unmask_ioapic(struct irq_cfg *cfg) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + __unmask_ioapic(cfg); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void unmask_ioapic_irq(struct irq_data *data) +{ + unmask_ioapic(data->chip_data); +} + +/* + * IO-APIC versions below 0x20 don't support EOI register. + * For the record, here is the information about various versions: + * 0Xh 82489DX + * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant + * 2Xh I/O(x)APIC which is PCI 2.2 Compliant + * 30h-FFh Reserved + * + * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic + * version as 0x2. This is an error with documentation and these ICH chips + * use io-apic's of version 0x20. + * + * For IO-APIC's with EOI register, we use that to do an explicit EOI. + * Otherwise, we simulate the EOI message manually by changing the trigger + * mode to edge and then back to level, with RTE being masked during this. + */ +void native_eoi_ioapic_pin(int apic, int pin, int vector) +{ + if (mpc_ioapic_ver(apic) >= 0x20) { + io_apic_eoi(apic, vector); + } else { + struct IO_APIC_route_entry entry, entry1; + + entry = entry1 = __ioapic_read_entry(apic, pin); + + /* + * Mask the entry and change the trigger mode to edge. + */ + entry1.mask = 1; + entry1.trigger = IOAPIC_EDGE; + + __ioapic_write_entry(apic, pin, entry1); + + /* + * Restore the previous level triggered entry. + */ + __ioapic_write_entry(apic, pin, entry); + } +} + +void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) +{ + struct irq_pin_list *entry; + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + for_each_irq_pin(entry, cfg->irq_2_pin) +#if 0 + x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin, + cfg->vector); +#else + native_eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector); +#endif + raw_spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) +{ + struct IO_APIC_route_entry entry; + + /* Check delivery_mode to be sure we're not clearing an SMI pin */ + entry = ioapic_read_entry(apic, pin); + if (entry.delivery_mode == dest_SMI) + return; + + /* + * Make sure the entry is masked and re-read the contents to check + * if it is a level triggered pin and if the remote-IRR is set. + */ + if (!entry.mask) { + entry.mask = 1; + ioapic_write_entry(apic, pin, entry); + entry = ioapic_read_entry(apic, pin); + } + + if (entry.irr) { + unsigned long flags; + + /* + * Make sure the trigger mode is set to level. Explicit EOI + * doesn't clear the remote-IRR if the trigger mode is not + * set to level. + */ + if (!entry.trigger) { + entry.trigger = IOAPIC_LEVEL; + ioapic_write_entry(apic, pin, entry); + } + + raw_spin_lock_irqsave(&ioapic_lock, flags); +#if 0 + x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector); +#else + native_eoi_ioapic_pin(apic, pin, entry.vector); +#endif + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + } + + /* + * Clear the rest of the bits in the IO-APIC RTE except for the mask + * bit. + */ + ioapic_mask_entry(apic, pin); + entry = ioapic_read_entry(apic, pin); + if (entry.irr) + pr_err("Unable to reset IRR for apic: %d, pin :%d\n", + mpc_ioapic_id(apic), pin); +} + +static void clear_IO_APIC (void) +{ + int apic, pin; + + for (apic = 0; apic < nr_ioapics; apic++) + for (pin = 0; pin < ioapics[apic].nr_registers; pin++) + clear_IO_APIC_pin(apic, pin); +} + +#ifdef CONFIG_L_X86_32 +/* + * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to + * specific CPU-side IRQs. + */ + +#define MAX_PIRQS 8 +static int pirq_entries[MAX_PIRQS] = { + [0 ... MAX_PIRQS - 1] = -1 +}; + +static int __init ioapic_pirq_setup(char *str) +{ + int i, max; + int ints[MAX_PIRQS+1]; + + get_options(str, ARRAY_SIZE(ints), ints); + + apic_printk(APIC_VERBOSE, KERN_INFO + "PIRQ redirection, working around broken MP-BIOS.\n"); + max = MAX_PIRQS; + if (ints[0] < MAX_PIRQS) + max = ints[0]; + + for (i = 0; i < max; i++) { + apic_printk(APIC_VERBOSE, KERN_DEBUG + "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); + /* + * PIRQs are mapped upside down, usually. + */ + pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; + } + return 1; +} + +__setup("pirq=", ioapic_pirq_setup); +#endif /* CONFIG_L_X86_32 */ + +/* + * Saves all the IO-APIC RTE's + */ +int save_ioapic_entries(void) +{ + int apic, pin; + int err = 0; + + for (apic = 0; apic < nr_ioapics; apic++) { + if (!ioapics[apic].saved_registers) { + err = -ENOMEM; + continue; + } + + for (pin = 0; pin < ioapics[apic].nr_registers; pin++) + ioapics[apic].saved_registers[pin] = + ioapic_read_entry(apic, pin); + } + + return err; +} + +/* + * Mask all IO APIC entries. + */ +void mask_ioapic_entries(void) +{ + int apic, pin; + + for (apic = 0; apic < nr_ioapics; apic++) { + if (!ioapics[apic].saved_registers) + continue; + + for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { + struct IO_APIC_route_entry entry; + + entry = ioapics[apic].saved_registers[pin]; + if (!entry.mask) { + entry.mask = 1; + ioapic_write_entry(apic, pin, entry); + } + } + } +} + +/* + * Restore IO APIC entries which was saved in the ioapic structure. + */ +int restore_ioapic_entries(void) +{ + int apic, pin; + + for (apic = 0; apic < nr_ioapics; apic++) { + if (!ioapics[apic].saved_registers) + continue; + + for (pin = 0; pin < ioapics[apic].nr_registers; pin++) + ioapic_write_entry(apic, pin, + ioapics[apic].saved_registers[pin]); + } + return 0; +} + +/* + * Find the IRQ entry number of a certain pin. + */ +static int find_irq_entry(int ioapic_idx, int pin, int type) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].irqtype == type && + (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) || + mp_irqs[i].dstapic == MP_APIC_ALL) && + mp_irqs[i].dstirq == pin) + return i; + + return -1; +} + +#ifdef CONFIG_PIC +/* + * Find the pin to which IRQ[irq] (ISA) is connected + */ +static int __init find_isa_irq_pin(int irq, int type) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus; + + if (test_bit(lbus, mp_bus_not_pci) && + (mp_irqs[i].irqtype == type) && + (mp_irqs[i].srcbusirq == irq)) + + return mp_irqs[i].dstirq; + } + return -1; +} + +static int __init find_isa_irq_apic(int irq, int type) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus; + + if (test_bit(lbus, mp_bus_not_pci) && + (mp_irqs[i].irqtype == type) && + (mp_irqs[i].srcbusirq == irq)) + break; + } + + if (i < mp_irq_entries) { + int ioapic_idx; + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) + if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic) + return ioapic_idx; + } + + return -1; +} +#endif + +#ifdef CONFIG_EISA +/* + * EISA Edge/Level control register, ELCR + */ +static int EISA_ELCR(unsigned int irq) +{ + if (irq < legacy_pic->nr_legacy_irqs) { + unsigned int port = 0x4d0 + (irq >> 3); + return (inb(port) >> (irq & 7)) & 1; + } + apic_printk(APIC_VERBOSE, KERN_INFO + "Broken MPtable reports ISA irq %d\n", irq); + return 0; +} + +#endif + +/* ISA interrupts are always polarity zero edge triggered, + * when listed as conforming in the MP table. */ + +#define default_ISA_trigger(idx) (0) +#define default_ISA_polarity(idx) (0) + +/* EISA interrupts are always polarity zero and can be edge or level + * trigger depending on the ELCR value. If an interrupt is listed as + * EISA conforming in the MP table, that means its trigger type must + * be read in from the ELCR */ + +#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) +#define default_EISA_polarity(idx) default_ISA_polarity(idx) + +/* PCI interrupts are always polarity one level triggered, + * when listed as conforming in the MP table. */ + +#define default_PCI_trigger(idx) (1) +#define default_PCI_polarity(idx) (1) + +static int irq_polarity(int idx) +{ + int bus = mp_irqs[idx].srcbus; + int polarity; + + /* + * Determine IRQ line polarity (high active or low active): + */ + switch (mp_irqs[idx].irqflag & 3) + { + case 0: /* conforms, ie. bus-type dependent polarity */ + if (test_bit(bus, mp_bus_not_pci)) + polarity = default_ISA_polarity(idx); + else + polarity = default_PCI_polarity(idx); + break; + case 1: /* high active */ + { + polarity = 0; + break; + } + case 2: /* reserved */ + { + pr_warn("broken BIOS!!\n"); + polarity = 1; + break; + } + case 3: /* low active */ + { + polarity = 1; + break; + } + default: /* invalid */ + { + pr_warn("broken BIOS!!\n"); + polarity = 1; + break; + } + } + return polarity; +} + +static int irq_trigger(int idx) +{ + int bus = mp_irqs[idx].srcbus; + int trigger; + + /* + * Determine IRQ trigger mode (edge or level sensitive): + */ + switch ((mp_irqs[idx].irqflag>>2) & 3) + { + case 0: /* conforms, ie. bus-type dependent */ + if (test_bit(bus, mp_bus_not_pci)) + trigger = default_ISA_trigger(idx); + else + trigger = default_PCI_trigger(idx); +#ifdef CONFIG_EISA + switch (mp_bus_id_to_type[bus]) { + case MP_BUS_ISA: /* ISA pin */ + { + /* set before the switch */ + break; + } + case MP_BUS_EISA: /* EISA pin */ + { + trigger = default_EISA_trigger(idx); + break; + } + case MP_BUS_PCI: /* PCI pin */ + { + /* set before the switch */ + break; + } + default: + { + pr_warn("broken BIOS!!\n"); + trigger = 1; + break; + } + } +#endif + break; + case 1: /* edge */ + { + trigger = 0; + break; + } + case 2: /* reserved */ + { + pr_warn("broken BIOS!!\n"); + trigger = 1; + break; + } + case 3: /* level */ + { + trigger = 1; + break; + } + default: /* invalid */ + { + pr_warn("broken BIOS!!\n"); + trigger = 0; + break; + } + } + return trigger; +} + +static int pin_2_irq(int idx, int apic, int pin) +{ + int irq; + int bus = mp_irqs[idx].srcbus; + struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic); + + /* + * Debugging check, we are in big trouble if this message pops up! + */ + if (mp_irqs[idx].dstirq != pin) + pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); + +#if defined CONFIG_E2K || defined CONFIG_E90S + /* ISA device interrupts are allowed only for the IO-APIC + * on BSP. if boot passes such interrupts for other IO-APICs + * then their IRQ numbers are calculated as for PCI devices. + * For example, system timer interrupt number is 26 on + * the second IO APIC but it is masked. */ + if (test_bit(bus, mp_bus_not_pci) && apic == 0 && !cpu_has_epic()) { +#else + if (test_bit(bus, mp_bus_not_pci)) { +#endif + irq = mp_irqs[idx].srcbusirq; + } else { + u32 gsi = gsi_cfg->gsi_base + pin; + + if (gsi >= NR_IRQS_LEGACY) + irq = gsi; + else + irq = gsi_top + gsi; + } + +#ifdef CONFIG_L_X86_32 + /* + * PCI IRQ command line redirection. Yes, limits are hardcoded. + */ + if ((pin >= 16) && (pin <= 23)) { + if (pirq_entries[pin-16] != -1) { + if (!pirq_entries[pin-16]) { + apic_printk(APIC_VERBOSE, KERN_DEBUG + "disabling PIRQ%d\n", pin-16); + } else { + irq = pirq_entries[pin-16]; + apic_printk(APIC_VERBOSE, KERN_DEBUG + "using PIRQ%d -> IRQ %d\n", + pin-16, irq); + } + } + } +#endif + + return irq; +} + +/* + * Find a specific PCI IRQ entry. + * Not an __init, possibly needed by modules + */ +#if defined CONFIG_E2K || defined CONFIG_E90S +int IO_APIC_get_PCI_irq_vector(int domain, int bus, int slot, int pin, + struct io_apic_irq_attr *irq_attr) +#else +int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, + struct io_apic_irq_attr *irq_attr) +#endif +{ + int ioapic_idx, i, best_guess = -1; + + apic_printk(APIC_DEBUG, + "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", + bus, slot, pin); + if (test_bit(bus, mp_bus_not_pci)) { + apic_printk(APIC_VERBOSE, + "PCI BIOS passed nonexistent PCI bus %d!\n", bus); + return -1; + } + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus, found = 0; + +#if defined CONFIG_E2K || defined CONFIG_E90S + apic_printk(APIC_DEBUG, + "MP entry #%d src bus #%d PCI = %d dst APIC id %d " + "irq type %d src bus irq %d dst bus irq %d\n", + i, lbus, test_bit(lbus, mp_bus_not_pci), + mp_irqs[i].dstapic, mp_irqs[i].irqtype, + mp_irqs[i].srcbusirq, mp_irqs[i].dstirq); +#endif + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) + if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic || + mp_irqs[i].dstapic == MP_APIC_ALL) { + found = 1; + break; + } + if (!found) + continue; + + if (!test_bit(lbus, mp_bus_not_pci) && + !mp_irqs[i].irqtype && + (bus == lbus) && + (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { + int irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq); + + apic_printk(APIC_DEBUG, "Found our bus & pin -> IRQ %d\n", + irq); + if (!(ioapic_idx || IO_APIC_IRQ(irq))) { + apic_printk(APIC_DEBUG, "Continue APIC %d & IRQ %d are 0\n", + ioapic_idx, IO_APIC_IRQ(irq)); + continue; + } + + if (pin == (mp_irqs[i].srcbusirq & 3)) { + set_io_apic_irq_attr(irq_attr, ioapic_idx, + mp_irqs[i].dstirq, + irq_trigger(i), + irq_polarity(i)); + apic_printk(APIC_DEBUG, "pin %d == src bus irg %d, return IRQ %d\n", + pin, (mp_irqs[i].srcbusirq & 3), + irq); + return irq; + } + /* + * Use the first all-but-pin matching entry as a + * best-guess fuzzy result for broken mptables. + */ + if (best_guess < 0) { + set_io_apic_irq_attr(irq_attr, ioapic_idx, + mp_irqs[i].dstirq, + irq_trigger(i), + irq_polarity(i)); + apic_printk(APIC_DEBUG, "Use the first all-but-pin matching entry as a best-guess fuzzy result IRQ %d\n", + irq); + best_guess = irq; + } + } + } + apic_printk(APIC_DEBUG, "IO_APIC_get_PCI_irq_vector() Return IRQ %d\n", + best_guess); + return best_guess; +} +EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); + +#if defined CONFIG_E2K || defined CONFIG_E90S +int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func, int irq) +{ + int i; + + apic_printk(APIC_DEBUG, "IO_APIC_get_fix_irq_vector() domain:%d, " + "bus:%d, irq:%d\n", + domain, bus, irq); + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus, found = 0, ioapic_idx; + + apic_printk(APIC_DEBUG, "MP entry #%d src bus #%d PCI = %d " + "dst APIC id %d irq type %d src bus irq %d dst bus " + "irq %d\n", + i, lbus, test_bit(lbus, mp_bus_not_pci), + mp_irqs[i].dstapic, mp_irqs[i].irqtype, + mp_irqs[i].srcbusirq, mp_irqs[i].dstirq); + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) + if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic || + mp_irqs[i].dstapic == MP_APIC_ALL) { + apic_printk(APIC_DEBUG, + "IO APIC id %d found as #%d\n", + mp_irqs[i].dstapic, ioapic_idx); + found = 1; + break; + } + if (!found) + continue; + + if ((!test_bit(lbus, mp_bus_not_pci)) && + (mp_irqs[i].irqtype == mp_FixINT) && + (bus == lbus) && + (PCI_SLOT(mp_irqs[i].srcbusirq) == slot) && + (PCI_FUNC(mp_irqs[i].srcbusirq) == func) && + ((irq == mp_irqs[i].dstirq) || irq == 0)) { + irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq); + apic_printk(APIC_DEBUG, "Found our bus %d slot %d " + "func %d IRQ %d\n", + lbus, slot, func, irq); + return (irq); + } else if ((((test_bit(lbus, mp_bus_not_pci)) && + (test_bit(bus, mp_bus_not_pci)) && + (mp_irqs[i].irqtype == mp_FixINT)) || + ((test_bit(lbus, mp_bus_not_pci)) && + (mp_irqs[i].irqtype == mp_INT) && + (irq != 0))) && + (irq == mp_irqs[i].srcbusirq)) { + irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq); + apic_printk(APIC_DEBUG, "Found our bus %d, src IRQ " + "%d -> dst IRQ %d\n", + lbus, mp_irqs[i].srcbusirq, irq); + return (irq); + } + } + apic_printk(APIC_DEBUG, "IO_APIC_get_fix_irq_vector() could not " + "find IRQ\n"); + return (-1); +} +EXPORT_SYMBOL(IO_APIC_get_fix_irq_vector); +#endif + +void lock_vector_lock(void) +{ + /* Used to the online set of cpus does not change + * during assign_irq_vector. + */ + raw_spin_lock(&vector_lock); +} + +void unlock_vector_lock(void) +{ + raw_spin_unlock(&vector_lock); +} + +static int +__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) +{ + /* + * NOTE! The local APIC isn't very good at handling + * multiple interrupts at the same interrupt level. + * As the interrupt level is determined by taking the + * vector number and shifting that right by 4, we + * want to spread these out a bit so that they don't + * all fall in the same interrupt level. + * + * Also, we've got to be careful not to trash gate + * 0x80, because int 0x80 is hm, kind of importantish. ;) + */ + static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; + static int current_offset = VECTOR_OFFSET_START % 16; + int cpu, err; + cpumask_var_t tmp_mask; + + if (cfg->move_in_progress) + return -EBUSY; + + if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) + return -ENOMEM; + + /* Only try and allocate irqs on cpus that are present */ + err = -ENOSPC; + cpumask_clear(cfg->old_domain); + cpu = cpumask_first_and(mask, cpu_online_mask); + while (cpu < nr_cpu_ids) { + int new_cpu, vector, offset; + + apic->vector_allocation_domain(cpu, tmp_mask, mask); + + if (cpumask_subset(tmp_mask, cfg->domain)) { + err = 0; + if (cpumask_equal(tmp_mask, cfg->domain)) + break; + /* + * New cpumask using the vector is a proper subset of + * the current in use mask. So cleanup the vector + * allocation for the members that are not used anymore. + */ + cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); + cfg->move_in_progress = + cpumask_intersects(cfg->old_domain, cpu_online_mask); + cpumask_and(cfg->domain, cfg->domain, tmp_mask); + if (cfg->move_in_progress) { + apic_printk(APIC_DEBUG, KERN_DEBUG "Moving vector %d: reduced CPU set\n", + cfg->vector); + } + + break; + } + + vector = current_vector; + offset = current_offset; +next: + vector += 16; + if (vector >= first_system_vector || + vector >= NR_VECTORS_APIC) { + offset = (offset + 1) % 16; + vector = FIRST_EXTERNAL_VECTOR + offset; + } + + if (unlikely(current_vector == vector)) { + cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); + cpumask_andnot(tmp_mask, mask, cfg->old_domain); + cpu = cpumask_first_and(tmp_mask, cpu_online_mask); + continue; + } + + /* + * Bug in IOH and IOH2. Can't use (vector & 4) == 1. + * Bug fixed in IOH2 rev5. + */ +#ifdef CONFIG_E90S + /* FIXME: it's too early to check iohub generation. */ + if (e90s_get_cpu_type() == E90S_CPU_R1000) +#endif + if (vector & 4) { + goto next; + } + if (test_bit(vector, used_vectors)) + goto next; + + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { + if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED) + goto next; + } + /* Found one! */ + current_vector = vector; + current_offset = offset; + if (cfg->vector) { + cpumask_copy(cfg->old_domain, cfg->domain); + cfg->move_in_progress = + cpumask_intersects(cfg->old_domain, cpu_online_mask); + if (cfg->move_in_progress) { + apic_printk(APIC_DEBUG, KERN_DEBUG "Started moving vector %d to vector %d\n", + cfg->vector, vector); + } + } + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) + per_cpu(vector_irq, new_cpu)[vector] = irq; + cfg->vector = vector; + cpumask_copy(cfg->domain, tmp_mask); + err = 0; + break; + } + free_cpumask_var(tmp_mask); + return err; +} + +int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, cfg, mask); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void __clear_irq_vector(int irq, struct irq_cfg *cfg) +{ + int cpu, vector; + + BUG_ON(!cfg->vector); + + vector = cfg->vector; + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; + + cfg->vector = 0; + cpumask_clear(cfg->domain); + + if (likely(!cfg->move_in_progress)) + return; + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + if (per_cpu(vector_irq, cpu)[vector] != irq) + continue; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; + break; + } + } + cfg->move_in_progress = 0; +} + +static bool irqchip_is_ioapic(struct irq_chip *chip); + +void __apic_setup_vector_irq(int cpu) +{ + /* Initialize vector_irq on a new cpu */ + int irq, vector; + struct irq_cfg *cfg; + + /* + * vector_lock will make sure that we don't run into irq vector + * assignments that might be happening on another cpu in parallel, + * while we setup our initial vector to irq mappings. + */ + raw_spin_lock(&vector_lock); + /* Mark the inuse vectors */ + for_each_active_irq(irq) { + if (!irqchip_is_ioapic(irq_get_chip(irq))) + continue; + + cfg = irq_get_chip_data(irq); + if (!cfg) + continue; + + if (!cpumask_test_cpu(cpu, cfg->domain)) + continue; + vector = cfg->vector; + per_cpu(vector_irq, cpu)[vector] = irq; + } + /* Mark the free vectors */ + for (vector = 0; vector < NR_VECTORS; ++vector) { + irq = per_cpu(vector_irq, cpu)[vector]; + if (irq <= VECTOR_UNDEFINED) + continue; + + if (!irqchip_is_ioapic(irq_get_chip(irq))) + continue; + + cfg = irq_cfg(irq); + if (!cpumask_test_cpu(cpu, cfg->domain)) + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; + } + raw_spin_unlock(&vector_lock); +} + +static struct irq_chip ioapic_chip; + +#ifdef CONFIG_L_X86_32 +static inline int IO_APIC_irq_trigger(int irq) +{ + int apic, idx, pin; + + for (apic = 0; apic < nr_ioapics; apic++) { + for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { + idx = find_irq_entry(apic, pin, mp_INT); + if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) + return irq_trigger(idx); + } + } + /* + * nonexistent IRQs are edge default + */ + return 0; +} +#else +static inline int IO_APIC_irq_trigger(int irq) +{ + return 1; +} +#endif + +static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, + unsigned long trigger) +{ + struct irq_chip *chip = &ioapic_chip; + irq_flow_handler_t hdl; + bool fasteoi; + + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || + trigger == IOAPIC_LEVEL) { + irq_set_status_flags(irq, IRQ_LEVEL); + fasteoi = true; + } else { + irq_clear_status_flags(irq, IRQ_LEVEL); + fasteoi = false; + } + +#if 0 + if (setup_remapped_irq(irq, cfg, chip)) + fasteoi = trigger != 0; +#endif + + hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; + irq_set_chip_and_handler_name(irq, chip, hdl, + fasteoi ? "fasteoi" : "edge"); +} + +int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, + unsigned int destination, int vector, + struct io_apic_irq_attr *attr) +{ + memset(entry, 0, sizeof(*entry)); + + entry->delivery_mode = apic->irq_delivery_mode; + entry->dest_mode = apic->irq_dest_mode; + entry->dest = destination; + entry->vector = vector; + entry->mask = 0; /* enable IRQ */ + entry->trigger = attr->trigger; + entry->polarity = attr->polarity; + + /* + * Mask level triggered irqs. + * Use IRQ_DELAYED_DISABLE for edge triggered irqs. + */ + if (attr->trigger) + entry->mask = 1; + + return 0; +} + +static int setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, + struct io_apic_irq_attr *attr) +{ + struct IO_APIC_route_entry entry; + unsigned int dest; + int ret; + + if (!IO_APIC_IRQ(irq)) + return -EINVAL; + + if ((ret = assign_irq_vector(irq, cfg, apic->target_cpus()))) + return ret; + + if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(), + &dest)) { + pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n", + mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); + __clear_irq_vector(irq, cfg); + + return -EINVAL; + } + + apic_printk(APIC_VERBOSE,KERN_DEBUG + "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " + "IRQ %d Mode:%i Active:%i Dest:%d)\n", + attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, + cfg->vector, irq, attr->trigger, attr->polarity, dest); + +#if 0 + if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) { +#else + if (native_setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { +#endif + pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", + mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); + __clear_irq_vector(irq, cfg); + + return -EINVAL; + } + + ioapic_register_intr(irq, cfg, attr->trigger); +#ifdef CONFIG_PIC + if (irq < legacy_pic->nr_legacy_irqs) + legacy_pic->mask(irq); +#endif + + ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry); + + return ret; +} + +static bool __init_recv io_apic_pin_not_connected(int idx, int ioapic_idx, + int pin) +{ + if (idx != -1) + return false; + + apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", + mpc_ioapic_id(ioapic_idx), pin); + return true; +} + +static int +io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) +{ + struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); + int ret; + + if (!cfg) + return -EINVAL; + ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); + if (!ret) + return setup_ioapic_irq(irq, cfg, attr); + return ret; +} + +static void __init __io_apic_setup_irqs(unsigned int ioapic_idx) +{ + int idx, node = cpu_to_node(0); + struct io_apic_irq_attr attr; + unsigned int pin, irq, ret; + + for (pin = 0; pin < ioapics[ioapic_idx].nr_registers; pin++) { + idx = find_irq_entry(ioapic_idx, pin, mp_INT); +#if defined CONFIG_E2K || defined CONFIG_E90S + if (idx == -1) { + idx = find_irq_entry(ioapic_idx, pin, mp_FixINT); + +#if defined(CONFIG_ACPI_L_SPMC) || defined(CONFIG_ACPI_L_SPMC_MODULE) + if (idx == -1) { + /* Boot should provide this data for P8 */ + if ( pin == 1 ) { + /* Then according to doc: + * irq = 1; + * trigger = 1; + * polarity = 0; + */ + irq = 1; + + set_io_apic_irq_attr( + &attr, ioapic_idx, pin, 1, 0); + + io_apic_setup_irq_pin(irq, node, &attr); + + continue; + } + } +#endif /* CONFIG_ACPI_L_SPMC */ + } +#endif + if (io_apic_pin_not_connected(idx, ioapic_idx, pin)) + continue; + + irq = pin_2_irq(idx, ioapic_idx, pin); + + /* We have more interrupts than Intel */ +#if !defined(CONFIG_E2K) && !defined(CONFIG_E90S) + if ((ioapic_idx > 0) && (irq > 16)) + continue; +#endif + + /* + * Skip the timer IRQ if there's a quirk handler + * installed and if it returns 1: + */ + if (apic->multi_timer_check && + apic->multi_timer_check(ioapic_idx, irq)) + continue; + + set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), + irq_polarity(idx)); + + if ((ret = io_apic_setup_irq_pin(irq, node, &attr))) + pr_warn("Failed (%d) to setup irq for" + " ioapic %d, pin %d\n", + ret, mpc_ioapic_id(ioapic_idx), pin); + } +} + +static void __init setup_IO_APIC_irqs(void) +{ + unsigned int ioapic_idx; + + apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) + __io_apic_setup_irqs(ioapic_idx); +} + +/* + * for the gsit that is not in first ioapic + * but could not use acpi_register_gsi() + * like some special sci in IBM x3330 + */ +void setup_IO_APIC_irq_extra(u32 gsi) +{ + int ioapic_idx = 0, pin, idx, irq, node = cpu_to_node(0); + struct io_apic_irq_attr attr; + + /* + * Convert 'gsi' to 'ioapic.pin'. + */ + ioapic_idx = mp_find_ioapic(gsi); + if (ioapic_idx < 0) + return; + + pin = mp_find_ioapic_pin(ioapic_idx, gsi); + idx = find_irq_entry(ioapic_idx, pin, mp_INT); + if (idx == -1) + return; + + irq = pin_2_irq(idx, ioapic_idx, pin); + + /* Only handle the non legacy irqs on secondary ioapics */ + if (ioapic_idx == 0 || irq < NR_IRQS_LEGACY) + return; + + set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), + irq_polarity(idx)); + + io_apic_setup_irq_pin_once(irq, node, &attr); +} + +#ifdef CONFIG_PIC +/* + * Set up the timer pin, possibly with the 8259A-master behind. + */ +static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, + unsigned int pin, int vector) +{ + struct IO_APIC_route_entry entry; + unsigned int dest; + + memset(&entry, 0, sizeof(entry)); + + /* + * We use logical delivery to get the timer IRQ + * to the first CPU. + */ + if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(), + apic->target_cpus(), &dest))) + dest = BAD_APICID; + + entry.dest_mode = apic->irq_dest_mode; + entry.mask = 0; /* don't mask IRQ for edge */ + entry.dest = dest; + entry.delivery_mode = apic->irq_delivery_mode; + entry.polarity = 0; + entry.trigger = 0; + entry.vector = vector; + + /* + * The timer IRQ doesn't have to know that behind the + * scene we may have a 8259A-master in AEOI mode ... + */ + irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, + "edge"); + + /* + * Add it to the IO-APIC irq-routing table: + */ + ioapic_write_entry(ioapic_idx, pin, entry); +} +#endif + +void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries) +{ + int i; + + pr_info(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n"); + + for (i = 0; i <= nr_entries; i++) { + struct IO_APIC_route_entry entry; + + entry = ioapic_read_entry(apic, i); + + pr_info(" %02x %02X ", i, entry.dest); + pr_cont("%1d %1d %1d %1d %1d " + "%1d %1d %02X\n", + entry.mask, + entry.trigger, + entry.irr, + entry.polarity, + entry.delivery_status, + entry.dest_mode, + entry.delivery_mode, + entry.vector); + } +} + +void intel_ir_io_apic_print_entries(unsigned int apic, + unsigned int nr_entries) +{ + int i; + + pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n"); + + for (i = 0; i <= nr_entries; i++) { + struct IR_IO_APIC_route_entry *ir_entry; + struct IO_APIC_route_entry entry; + + entry = ioapic_read_entry(apic, i); + + ir_entry = (struct IR_IO_APIC_route_entry *)&entry; + + pr_debug(" %02x %04X ", i, ir_entry->index); + pr_cont("%1d %1d %1d %1d %1d " + "%1d %1d %X %02X\n", + ir_entry->format, + ir_entry->mask, + ir_entry->trigger, + ir_entry->irr, + ir_entry->polarity, + ir_entry->delivery_status, + ir_entry->index2, + ir_entry->zero, + ir_entry->vector); + } +} + +void ioapic_zap_locks(void) +{ + raw_spin_lock_init(&ioapic_lock); +} + +__apicdebuginit(void) print_IO_APIC(int ioapic_idx) +{ + union IO_APIC_reg_00 reg_00; + union IO_APIC_reg_01 reg_01; + union IO_APIC_reg_02 reg_02; + union IO_APIC_reg_03 reg_03; + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(ioapic_idx, 0); + reg_01.raw = io_apic_read(ioapic_idx, 1); + if (reg_01.bits.version >= 0x10) + reg_02.raw = io_apic_read(ioapic_idx, 2); + if (reg_01.bits.version >= 0x20) + reg_03.raw = io_apic_read(ioapic_idx, 3); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); + printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); + printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); + printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); + printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); + + printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); + printk(KERN_DEBUG "....... : max redirection entries: %02X\n", + reg_01.bits.entries); + + printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); + printk(KERN_DEBUG "....... : IO APIC version: %02X\n", + reg_01.bits.version); + + /* + * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, + * but the value of reg_02 is read as the previous read register + * value, so ignore it if reg_02 == reg_01. + */ + if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { + printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); + printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); + } + + /* + * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 + * or reg_03, but the value of reg_0[23] is read as the previous read + * register value, so ignore it if reg_03 == reg_0[12]. + */ + if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && + reg_03.raw != reg_01.raw) { + printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); + printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); + } + + printk(KERN_DEBUG ".... IRQ redirection table:\n"); + +#if 0 + x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); +#else + native_io_apic_print_entries(ioapic_idx, reg_01.bits.entries); +#endif +} + +#ifdef CONFIG_E90S +static void print_IO_APIC_E90S(void) +{ +#define PCI_SCBA_0 0xf0 /* System commutator base address [31:00] */ +#define A2_BA0 0x440 /* 32/0xffffffe0 "ioapic" Mem Base Address + * Register 0 m:2:1{0x40-0x43} + */ +#define A2_BUA0 0x444 /* 32/0xffffffff "ioapic" Mem Base Address 0 + * Upper 32 bits m:2:1{0x44-0x47} + */ +#define A2_BA1 0x448 /* 32/0xfffffffc "ioapic" Mem Base Address + * Register 1 m:2:1{0x48-0x4b} + */ +#define A2_BUA1 0x44c /* 32/0xffffffff "ioapic" Mem Base Address 1 + * Upper 32 bits m:2:1{0x4c-0x4f} + */ +#define A2_BA2 0x450 /* 32/0xfffff000 "ioapic" Mem Base Address + * Register 2 m:2:1{0x50-0x53} + */ +#define A2_BUA2 0x454 /* 32/0xffffffff "ioapic" Mem Base Address 2 + * Upper 32 bits m:2:1{0x54-0x57} + */ + + struct pci_dev *dev = NULL; + int i; + printk (KERN_DEBUG "Interrupt Subsystem Configuration\n"); + for_each_online_node(i) { + u64 nodeid = 0xFE00007000 | (i << 28); + u64 nodeconfig = 0xFE00007004 | (i << 28); + u64 ioapic_message_base = 0xFE00001078 | (i << 28); + u64 lapic_message_base = 0xFE0000107c | (i << 28); + + printk(KERN_DEBUG "node %d: NodeId = 0x%08x;" + "Nodeconfig = 0x%08x\n", + i, __raw_readl(&nodeid), __raw_readl((const volatile void *)nodeconfig)); + printk(KERN_DEBUG "node %d: IOAPICMESSAGEBASE = 0x%08x\n", + i, __raw_readl((const volatile void *)ioapic_message_base)); + printk(KERN_DEBUG "node %d: LAPICMESSAGEBASE = 0x%08x\n", + i, __raw_readl( + (const volatile void *)lapic_message_base)); + } + + while ((dev = pci_get_device(PCI_VENDOR_ID_ELBRUS, + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE, dev))) { + u32 addr; + void __iomem * scrb; + pci_read_config_dword(dev, PCI_SCBA_0, &addr); + addr &= ~3; + scrb = ioremap(addr, 0x1000); + printk(KERN_DEBUG "%s: SCRB at %08x:\n", pci_name(dev), addr); + printk(KERN_DEBUG " A2_BA0 : %08x\n", readl(scrb + A2_BA0)); + printk(KERN_DEBUG " A2_BUA0: %08x\n", readl(scrb + A2_BUA0)); + printk(KERN_DEBUG " A2_BA1 : %08x\n", readl(scrb + A2_BA1)); + printk(KERN_DEBUG " A2_BUA1: %08x\n", readl(scrb + A2_BUA1)); + printk(KERN_DEBUG " A2_BA2 : %08x\n", readl(scrb + A2_BA2)); + printk(KERN_DEBUG " A2_BUA2: %08x\n", readl(scrb + A2_BUA2)); + iounmap(scrb); + } + dev = NULL; + while ((dev = pci_get_device(PCI_VENDOR_ID_ELBRUS, + PCI_DEVICE_ID_MCST_I2CSPI, dev))) { + u32 v, v2; + pci_read_config_dword(dev, 0x50, &v); + pci_read_config_dword(dev, 0x54, &v2); + printk(KERN_DEBUG "%s: LAPIC Message Base & Upper Base Address:" + " %08x, %08x\n", pci_name(dev), v, v2); + pci_read_config_dword(dev, 0x6c, &v); + pci_read_config_dword(dev, 0x70, &v2); + printk(KERN_DEBUG "%s: IOAPIC Message Base & Upper Base Address:" + " %08x, %08x\n", pci_name(dev), v, v2); + } +} +#else +static inline void print_IO_APIC_E90S(void) { } +#endif /* CONFIG_E90S */ + +void print_IO_APICs(void) +{ + int ioapic_idx; + struct irq_cfg *cfg; + unsigned int irq; + struct irq_chip *chip; + + print_IO_APIC_E90S(); + + printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) + printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", + mpc_ioapic_id(ioapic_idx), + ioapics[ioapic_idx].nr_registers); + + /* + * We are a bit conservative about what we expect. We have to + * know about every hardware change ASAP. + */ + printk(KERN_INFO "testing the IO APIC.......................\n"); + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) + print_IO_APIC(ioapic_idx); + + printk(KERN_DEBUG "IRQ to pin mappings:\n"); + for_each_active_irq(irq) { + struct irq_pin_list *entry; + + chip = irq_get_chip(irq); + if (chip != &ioapic_chip) + continue; + + cfg = irq_get_chip_data(irq); + if (!cfg) + continue; + entry = cfg->irq_2_pin; + if (!entry) + continue; + printk(KERN_DEBUG "IRQ%d ", irq); + for_each_irq_pin(entry, cfg->irq_2_pin) + pr_cont("-> %d:%d", entry->apic, entry->pin); + pr_cont("\n"); + } + + printk(KERN_INFO ".................................... done.\n"); +} + +__apicdebuginit(void) save_APIC_field(int base, u32 saved_reg[]) +{ + int i; + + for (i = 0; i < 8; i++) + saved_reg[i] = apic_read(base + i*0x10); +} + +__apicdebuginit(void) print_saved_APIC_field(const u32 saved_reg[]) +{ + int i, j, bit = 0; + u32 reg; + + for (i = 0; i < 8; i++) { + reg = saved_reg[i]; + for (j = 0; j < 32; j++) { + if (reg & 1) + pr_cont("0x%x ", bit); + reg = reg >> 1; + bit++; + } + } + pr_cont("\n"); +} + +__apicdebuginit(void) print_APIC_field(int base) +{ + int i, j, bit = 0; + u32 reg; + + for (i = 0; i < 8; i++) { + reg = apic_read(base + i*0x10); + for (j = 0; j < 32; j++) { + if (reg & 1) + pr_cont("0x%x ", bit); + reg = reg >> 1; + bit++; + } + } + pr_cont("\n"); +} + +struct saved_apic_regs { + bool valid; + int hard_cpu; + int maxlvt; + u64 icr; + u32 ver; + u32 apic_id; + u32 apic_lvr; + u32 apic_taskpri; + u32 apic_arbpri; + u32 apic_procpri; + u32 apic_ldr; + u32 apic_dfr; + u32 apic_spiv; + u32 apic_esr; + u32 apic_lvtt; + u32 apic_lvtpc; + u32 apic_lvt0; + u32 apic_lvt1; + u32 apic_lvterr; + u32 apic_tmict; + u32 apic_tmcct; + u32 apic_tdcr; + u32 apic_isr[8]; + u32 apic_tmr[8]; + u32 apic_irr[8]; +}; + +__apicdebuginit(void) print_saved_local_APIC(int cpu, + const struct saved_apic_regs *regs) +{ + pr_info("printing local APIC contents on CPU#%d/%d:\n", + cpu, regs->hard_cpu); + pr_info("... APIC ID: %08x (%01x)\n", regs->apic_id, + apic->get_apic_id(regs->apic_id)); + pr_info("... APIC VERSION: %08x\n", regs->apic_lvr); + pr_info("... APIC TASKPRI: %08x (%02x)\n", regs->apic_taskpri, + regs->apic_taskpri & APIC_TPRI_MASK); + + if (APIC_INTEGRATED(regs->ver)) { /* !82489DX */ + if (!APIC_XAPIC(regs->ver)) { + pr_info("... APIC ARBPRI: %08x (%02x)\n", + regs->apic_arbpri, + regs->apic_arbpri & APIC_ARBPRI_MASK); + } + pr_info("... APIC PROCPRI: %08x\n", regs->apic_procpri); + } + + pr_info("... APIC LDR: %08x\n", regs->apic_ldr); + if (!x2apic_enabled()) + pr_info("... APIC DFR: %08x\n", regs->apic_dfr); + pr_info("... APIC SPIV: %08x\n", regs->apic_spiv); + + pr_info("... APIC ISR field: "); + print_saved_APIC_field(regs->apic_isr); + pr_info("... APIC TMR field: "); + print_saved_APIC_field(regs->apic_tmr); + pr_info("... APIC IRR field: "); + print_saved_APIC_field(regs->apic_irr); + + if (APIC_INTEGRATED(ver)) /* !82489DX */ + pr_info("... APIC ESR: %08x\n", regs->apic_esr); + + pr_info("... APIC ICR: %08x\n", (u32) regs->icr); + pr_info("... APIC ICR2: %08x\n", (u32) (regs->icr >> 32)); + + pr_info("... APIC LVTT: %08x\n", regs->apic_lvtt); + + if (regs->maxlvt > 3) /* PC is LVT#4. */ + pr_info("... APIC LVTPC: %08x\n", regs->apic_lvtpc); + pr_info("... APIC LVT0: %08x\n", regs->apic_lvt0); + pr_info("... APIC LVT1: %08x\n", regs->apic_lvt1); + + if (regs->maxlvt > 2) /* ERR is LVT#3. */ + pr_info("... APIC LVTERR: %08x\n", regs->apic_lvterr); + + pr_info("... APIC TMICT: %08x\n", regs->apic_tmict); + pr_info("... APIC TMCCT: %08x\n", regs->apic_tmcct); + pr_info("... APIC TDCR: %08x\n", regs->apic_tdcr); +} + +__apicdebuginit(void) save_local_APIC(void *apic_regs) +{ + struct saved_apic_regs *regs = apic_regs; + + regs->hard_cpu = hard_smp_processor_id(); + regs->apic_id = apic_read(APIC_ID); + regs->apic_lvr = apic_read(APIC_LVR); + regs->ver = GET_APIC_VERSION(regs->apic_lvr); + /* Note that we don't have APIC_RRR even though maxlvt is 3 */ + regs->maxlvt = lapic_get_maxlvt(); + + regs->apic_taskpri = apic_read(APIC_TASKPRI); + + if (APIC_INTEGRATED(regs->ver)) { /* !82489DX */ + if (!APIC_XAPIC(regs->ver)) + regs->apic_arbpri = apic_read(APIC_ARBPRI); + regs->apic_procpri = apic_read(APIC_PROCPRI); + } + + regs->apic_ldr = apic_read(APIC_LDR); + if (!x2apic_enabled()) + regs->apic_dfr = apic_read(APIC_DFR); + regs->apic_spiv = apic_read(APIC_SPIV); + + save_APIC_field(APIC_ISR, regs->apic_isr); + save_APIC_field(APIC_TMR, regs->apic_tmr); + save_APIC_field(APIC_IRR, regs->apic_irr); + + if (APIC_INTEGRATED(regs->ver)) { /* !82489DX */ + if (regs->maxlvt > 3) /* Due to the Pentium erratum 3AP. */ + apic_write(APIC_ESR, 0); + + regs->apic_esr = apic_read(APIC_ESR); + } + + regs->icr = apic_icr_read(); + + regs->apic_lvtt = apic_read(APIC_LVTT); + + if (regs->maxlvt > 3) /* PC is LVT#4. */ + regs->apic_lvtpc = apic_read(APIC_LVTPC); + regs->apic_lvt0 = apic_read(APIC_LVT0); + regs->apic_lvt1 = apic_read(APIC_LVT1); + + if (regs->maxlvt > 2) /* ERR is LVT#3. */ + regs->apic_lvterr = apic_read(APIC_LVTERR); + + regs->apic_tmict = apic_read(APIC_TMICT); + regs->apic_tmcct = apic_read(APIC_TMCCT); + regs->apic_tdcr = apic_read(APIC_TDCR); + + regs->valid = true; +} + +__apicdebuginit(void) print_local_APIC(void *dummy) +{ + unsigned int v, ver, maxlvt; + u64 icr; + + pr_info("printing local APIC contents on CPU#%d/%d:\n", + smp_processor_id(), hard_smp_processor_id()); + v = apic_read(APIC_ID); + pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); + v = apic_read(APIC_LVR); + pr_info("... APIC VERSION: %08x\n", v); + ver = GET_APIC_VERSION(v); + /* Note that we don't have RRR even though maxlvt is 3 */ + maxlvt = lapic_get_maxlvt(); + + v = apic_read(APIC_TASKPRI); + pr_info("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); + + if (APIC_INTEGRATED(ver)) { /* !82489DX */ + if (!APIC_XAPIC(ver)) { + v = apic_read(APIC_ARBPRI); + pr_info("... APIC ARBPRI: %08x (%02x)\n", v, + v & APIC_ARBPRI_MASK); + } + v = apic_read(APIC_PROCPRI); + pr_info("... APIC PROCPRI: %08x\n", v); + } + + v = apic_read(APIC_LDR); + pr_info("... APIC LDR: %08x\n", v); + if (!x2apic_enabled()) { + v = apic_read(APIC_DFR); + pr_info("... APIC DFR: %08x\n", v); + } + v = apic_read(APIC_SPIV); + pr_info("... APIC SPIV: %08x\n", v); + + pr_info("... APIC ISR field: "); + print_APIC_field(APIC_ISR); + pr_info("... APIC TMR field: "); + print_APIC_field(APIC_TMR); + pr_info("... APIC IRR field: "); + print_APIC_field(APIC_IRR); + + if (APIC_INTEGRATED(ver)) { /* !82489DX */ + if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ + apic_write(APIC_ESR, 0); + + v = apic_read(APIC_ESR); + pr_info("... APIC ESR: %08x\n", v); + } + + icr = apic_icr_read(); + pr_info("... APIC ICR: %08x\n", (u32)icr); + pr_info("... APIC ICR2: %08x\n", (u32)(icr >> 32)); + + v = apic_read(APIC_LVTT); + pr_info("... APIC LVTT: %08x\n", v); + + if (maxlvt > 3) { /* PC is LVT#4. */ + v = apic_read(APIC_LVTPC); + pr_info("... APIC LVTPC: %08x\n", v); + } + v = apic_read(APIC_LVT0); + pr_info("... APIC LVT0: %08x\n", v); + v = apic_read(APIC_LVT1); + pr_info("... APIC LVT1: %08x\n", v); + + if (maxlvt > 2) { /* ERR is LVT#3. */ + v = apic_read(APIC_LVTERR); + pr_info("... APIC LVTERR: %08x\n", v); + } + + v = apic_read(APIC_TMICT); + pr_info("... APIC TMICT: %08x\n", v); + v = apic_read(APIC_TMCCT); + pr_info("... APIC TMCCT: %08x\n", v); + v = apic_read(APIC_TDCR); + pr_info("... APIC TDCR: %08x\n", v); +} + +#ifdef CONFIG_PIC +__apicdebuginit(void) print_PIC(void) +{ + unsigned int v; + unsigned long flags; + + if (!legacy_pic->nr_legacy_irqs) + return; + + printk(KERN_DEBUG "\nprinting PIC contents\n"); + + raw_spin_lock_irqsave(&i8259A_lock, flags); + + v = inb(0xa1) << 8 | inb(0x21); + printk(KERN_DEBUG "... PIC IMR: %04x\n", v); + + v = inb(0xa0) << 8 | inb(0x20); + printk(KERN_DEBUG "... PIC IRR: %04x\n", v); + + outb(0x0b,0xa0); + outb(0x0b,0x20); + v = inb(0xa0) << 8 | inb(0x20); + outb(0x0a,0xa0); + outb(0x0a,0x20); + + raw_spin_unlock_irqrestore(&i8259A_lock, flags); + + printk(KERN_DEBUG "... PIC ISR: %04x\n", v); + + v = inb(0x4d1) << 8 | inb(0x4d0); + printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); +} +#endif + +int print_local_APICs(bool force) +{ + int cpu; + + if (!force && apic_verbosity == APIC_QUIET) + return 1; + +#ifdef CONFIG_PIC + print_PIC(); +#endif + + /* don't print out if apic is not there */ + if (!cpu_has_apic && !apic_from_smp_config()) + return 0; + + preempt_disable(); + for_each_online_cpu(cpu) { + struct saved_apic_regs regs; + + if (cpu == smp_processor_id()) { + print_local_APIC(NULL); + continue; + } + + regs.valid = false; +#ifdef CONFIG_E2K + /* This function can be called through SysRq under + * disabled interrupts, so we have to be careful + * and use nmi_call_function() with a timeout + * instead of smp_call_function(). */ + nmi_call_function_single(cpu, save_local_APIC, ®s, 1, 30000); +#else + smp_call_function_single(cpu, save_local_APIC, ®s, 1); +#endif + if (regs.valid) + print_saved_local_APIC(cpu, ®s); + } + preempt_enable(); + + return 0; +} + + +#ifdef CONFIG_PIC +/* Where if anywhere is the i8259 connect in external int mode */ +static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; +#endif + +void __init_recv enable_IO_APIC(void) +{ +#ifdef CONFIG_PIC + int i8259_apic, i8259_pin; + int apic; + + if (!legacy_pic->nr_legacy_irqs) + return; + + for(apic = 0; apic < nr_ioapics; apic++) { + int pin; + /* See if any of the pins is in ExtINT mode */ + for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { + struct IO_APIC_route_entry entry; + entry = ioapic_read_entry(apic, pin); + + /* If the interrupt line is enabled and in ExtInt mode + * I have found the pin where the i8259 is connected. + */ + if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { + ioapic_i8259.apic = apic; + ioapic_i8259.pin = pin; + goto found_i8259; + } + } + } + found_i8259: + /* Look to see what if the MP table has reported the ExtINT */ + /* If we could not find the appropriate pin by looking at the ioapic + * the i8259 probably is not connected the ioapic but give the + * mptable a chance anyway. + */ + i8259_pin = find_isa_irq_pin(0, mp_ExtINT); + i8259_apic = find_isa_irq_apic(0, mp_ExtINT); + /* Trust the MP table if nothing is setup in the hardware */ + if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { + printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); + ioapic_i8259.pin = i8259_pin; + ioapic_i8259.apic = i8259_apic; + } + /* Complain if the MP table and the hardware disagree */ + if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && + (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) + { + printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); + } +#endif + +#if 0 + /* + * Do not trust the IO-APIC being empty at bootup + */ + clear_IO_APIC(); +#endif +} + +void native_disable_io_apic(void) +{ +#ifdef CONFIG_PIC + /* + * If the i8259 is routed through an IOAPIC + * Put that IOAPIC in virtual wire mode + * so legacy interrupts can be delivered. + */ + if (ioapic_i8259.pin != -1) { + struct IO_APIC_route_entry entry; + + memset(&entry, 0, sizeof(entry)); + entry.mask = 0; /* Enabled */ + entry.trigger = 0; /* Edge */ + entry.irr = 0; + entry.polarity = 0; /* High */ + entry.delivery_status = 0; + entry.dest_mode = 0; /* Physical */ + entry.delivery_mode = dest_ExtINT; /* ExtInt */ + entry.vector = 0; + entry.dest = read_apic_id(); + + /* + * Add it to the IO-APIC irq-routing table: + */ + ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); + } +#endif + + if (cpu_has_apic || apic_from_smp_config()) +#ifdef CONFIG_PIC + disconnect_bsp_APIC(ioapic_i8259.pin != -1); +#else + disconnect_bsp_APIC(0); +#endif +} + +/* + * Not an __init, needed by the reboot code + */ +void disable_IO_APIC(void) +{ + /* + * Clear the IO-APIC before rebooting: + */ + clear_IO_APIC(); + +#ifdef CONFIG_PIC + if (!legacy_pic->nr_legacy_irqs) + return; +#endif + +#if 0 + x86_io_apic_ops.disable(); +#else + native_disable_io_apic(); +#endif +} + +#if defined CONFIG_L_X86_32 || defined CONFIG_E2K || defined CONFIG_E90S +# if defined CONFIG_E2K || defined CONFIG_E90S +int get_physical_broadcast(void) +{ + return 0xff; +} +# endif +/* + * function to set the IO-APIC physical IDs based on the + * values stored in the MPC table. + * + * by Matt Domsch Tue Dec 21 12:25:05 CST 1999 + */ +void __init_recv setup_ioapic_ids_from_mpc_nocheck(void) +{ + union IO_APIC_reg_00 reg_00; + physid_mask_t phys_id_present_map; + int ioapic_idx; + int i; + unsigned char old_id; + unsigned long flags; + + /* + * This is broken; anything with a real cpu count has to + * circumvent this idiocy regardless. + */ + apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); + + /* + * Set the IOAPIC ID to the value stored in the MPC table. + */ + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { + /* Read the register 0 value */ + raw_spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(ioapic_idx, 0); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + old_id = mpc_ioapic_id(ioapic_idx); + + if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) { + printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", + ioapic_idx, mpc_ioapic_id(ioapic_idx)); + printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", + reg_00.bits.ID); + ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID; + } + + /* + * Sanity check, is the ID really free? Every APIC in a + * system must have a unique ID or we get lots of nice + * 'stuck on smp_invalidate_needed IPI wait' messages. + */ + if (apic->check_apicid_used(&phys_id_present_map, + mpc_ioapic_id(ioapic_idx))) { + printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", + ioapic_idx, mpc_ioapic_id(ioapic_idx)); + for (i = 0; i < get_physical_broadcast(); i++) + if (!physid_isset(i, phys_id_present_map)) + break; + if (i >= get_physical_broadcast()) + panic("Max APIC ID exceeded!\n"); + printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", + i); + physid_set(i, phys_id_present_map); + ioapics[ioapic_idx].mp_config.apicid = i; + } else { + physid_mask_t tmp; + apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx), + &tmp); + apic_printk(APIC_VERBOSE, "Setting %d in the " + "phys_id_present_map\n", + mpc_ioapic_id(ioapic_idx)); + physids_or(phys_id_present_map, phys_id_present_map, tmp); + } + +#if defined CONFIG_E2K || defined CONFIG_E90S + /* + * Adjust the IOLINK table if the ID changed. + */ + if (old_id != mpc_ioapic_id(ioapic_idx)) + mp_fix_io_apicid(old_id, mpc_ioapic_id(ioapic_idx)); +#endif + + /* + * We need to adjust the IRQ routing table + * if the ID changed. + */ + if (old_id != mpc_ioapic_id(ioapic_idx)) + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].dstapic == old_id) + mp_irqs[i].dstapic + = mpc_ioapic_id(ioapic_idx); + + /* + * Update the ID register according to the right value + * from the MPC table if they are different. + */ + if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID) + continue; + + apic_printk(APIC_VERBOSE, KERN_INFO + "...changing IO-APIC physical APIC ID to %d ...", + mpc_ioapic_id(ioapic_idx)); + + reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); + raw_spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic_idx, 0, reg_00.raw); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + /* + * Sanity check + */ + raw_spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(ioapic_idx, 0); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) + pr_cont("could not set ID!\n"); + else + apic_printk(APIC_VERBOSE, " ok.\n"); + } +} + +#if 0 +void __init setup_ioapic_ids_from_mpc(void) +{ + + if (acpi_ioapic) + return; + /* + * Don't check I/O APIC IDs for xAPIC systems. They have + * no meaning without the serial APIC bus. + */ + if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return; + setup_ioapic_ids_from_mpc_nocheck(); +} +#endif +#endif + +#ifdef CONFIG_PIC +int no_timer_check __initdata; + +static int __init notimercheck(char *s) +{ + no_timer_check = 1; + return 1; +} +__setup("no_timer_check", notimercheck); + +/* + * There is a nasty bug in some older SMP boards, their mptable lies + * about the timer IRQ. We do the following to work around the situation: + * + * - timer IRQ defaults to IO-APIC IRQ + * - if this function detects that timer IRQs are defunct, then we fall + * back to ISA timer IRQs + */ +static int __init timer_irq_works(void) +{ + unsigned long t1 = jiffies; + unsigned long flags; + + if (no_timer_check) + return 1; + + local_save_flags(flags); + local_irq_enable(); + /* Let ten ticks pass... */ + mdelay((10 * 1000) / HZ); + local_irq_restore(flags); + + /* + * Expect a few ticks at least, to be sure some possible + * glue logic does not lock up after one or two first + * ticks in a non-ExtINT mode. Also the local APIC + * might have cached one ExtINT interrupt. Finally, at + * least one tick may be lost due to delays. + */ + + /* jiffies wrap? */ + if (time_after(jiffies, t1 + 4)) + return 1; + return 0; +} +#endif + +/* + * In the SMP+IOAPIC case it might happen that there are an unspecified + * number of pending IRQ events unhandled. These cases are very rare, + * so we 'resend' these IRQs via IPIs, to the same CPU. It's much + * better to do it this way as thus we do not have to be aware of + * 'pending' interrupts in the IRQ path, except at this point. + */ +/* + * Edge triggered needs to resend any interrupt + * that was delayed but this is now handled in the device + * independent code. + */ + +/* + * Starting up a edge-triggered IO-APIC interrupt is + * nasty - we need to make sure that we get the edge. + * If it is already asserted for some reason, we need + * return 1 to indicate that is was pending. + * + * This is not complete - we should be able to fake + * an edge even if it isn't on the 8259A... + */ + +static unsigned int startup_ioapic_irq(struct irq_data *data) +{ + int was_pending = 0; +#ifdef CONFIG_PIC + int irq = data->irq; +#endif + unsigned long flags; + + apic_printk(APIC_DEBUG, KERN_DEBUG "Starting up IO-APIC irq %u\n", + data->irq); + raw_spin_lock_irqsave(&ioapic_lock, flags); +#ifdef CONFIG_PIC + if (irq < legacy_pic->nr_legacy_irqs) { + legacy_pic->mask(irq); + if (legacy_pic->irq_pending(irq)) + was_pending = 1; + } +#endif + __unmask_ioapic(data->chip_data); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + return was_pending; +} + +int ioapic_retrigger_irq(struct irq_data *data) +{ + struct irq_cfg *cfg = data->chip_data; + unsigned long flags; + int cpu; + + raw_spin_lock_irqsave(&vector_lock, flags); + cpu = cpumask_first_and(cfg->domain, cpu_online_mask); + apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 1; +} + +/* + * Level and edge triggered IO-APIC interrupts need different handling, + * so we use two separate IRQ descriptors. Edge triggered IRQs can be + * handled with the level-triggered descriptor, but that one has slightly + * more overhead. Level-triggered interrupts cannot be handled with the + * edge-triggered handler, without risking IRQ storms and other ugly + * races. + */ + +#ifdef CONFIG_SMP +static void send_cleanup_vector(struct irq_cfg *cfg) +{ + cpumask_var_t cleanup_mask; + + if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { + unsigned int i; + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) + apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); + } else { + cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); + apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + free_cpumask_var(cleanup_mask); + } + apic_printk(APIC_DEBUG, KERN_DEBUG "Finished moving vector %d\n", + cfg->vector); + cfg->move_in_progress = 0; +} + +asmlinkage void smp_irq_move_cleanup_interrupt(struct pt_regs *regs) +{ + unsigned vector, me; + + ack_APIC_irq(); + l_irq_enter(); + exit_idle(); + + me = smp_processor_id(); + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + int irq; + unsigned int irr; + struct irq_desc *desc; + struct irq_cfg *cfg; + irq = __this_cpu_read(vector_irq[vector]); + + if (irq <= VECTOR_UNDEFINED) + continue; + + desc = irq_to_desc(irq); + if (!desc) + continue; + + cfg = irq_cfg(irq); + if (!cfg) + continue; + + raw_spin_lock(&desc->lock); + + /* + * Check if the irq migration is in progress. If so, we + * haven't received the cleanup request yet for this irq. + */ + if (cfg->move_in_progress) + goto unlock; + + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) + goto unlock; + + irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); + /* + * Check if the vector that needs to be cleanedup is + * registered at the cpu's IRR. If so, then this is not + * the best time to clean it up. Lets clean it up in the + * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR + * to myself. + */ + if (irr & (1 << (vector % 32))) { + apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); + goto unlock; + } + __this_cpu_write(vector_irq[vector], -1); +unlock: + raw_spin_unlock(&desc->lock); + } + + l_irq_exit(); +} + +static void irq_complete_move_vector(struct irq_cfg *cfg, unsigned vector) +{ + unsigned me; + + if (likely(!cfg->move_in_progress)) + return; + + me = smp_processor_id(); + + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) + send_cleanup_vector(cfg); +} + +static void irq_complete_move(struct irq_cfg *cfg) +{ +#if defined CONFIG_E2K + irq_complete_move_vector(cfg, (unsigned int) + get_irq_regs()->interrupt_vector); +#elif defined CONFIG_E90S + irq_complete_move_vector(cfg, (unsigned int) + e90s_irq_pending[smp_processor_id()].vector); +#else + + irq_complete_move_vector(cfg, ~get_irq_regs()->orig_ax); +#endif +} + +void apic_irq_force_complete_move(struct irq_desc *desc) +{ + struct irq_data *data = irq_desc_get_irq_data(desc); + struct irq_cfg *cfg; + unsigned int irq; + + if (!data) + return; + + irq = data->irq; + cfg = irq_data_get_irq_chip_data(data); + if (cfg) + irq_complete_move_vector(cfg, cfg->vector); +} +#else +static inline void irq_complete_move(struct irq_cfg *cfg) { } +#endif + +static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) +{ + int apic, pin; + struct irq_pin_list *entry; + u8 vector = cfg->vector; + + for_each_irq_pin(entry, cfg->irq_2_pin) { + unsigned int reg; + + apic = entry->apic; + pin = entry->pin; + +#if defined CONFIG_E2K || defined CONFIG_E90S + /* We cannot write the whole entry as once, only 32 bits + * at a time. So mask the IRQ while changing entry to + * avoid races in half-changed entries. */ + reg = io_apic_read(apic, 0x10 + pin*2); + reg &= ~IO_APIC_REDIR_VECTOR_MASK; + reg |= vector; + io_apic_modify(apic, 0x10 + pin*2, reg | IO_APIC_REDIR_MASKED); + io_apic_write(apic, 0x11 + pin*2, dest); + if (!(reg & IO_APIC_REDIR_MASKED)) + io_apic_write(apic, 0x10 + pin*2, reg); +#else + io_apic_write(apic, 0x11 + pin*2, dest); + reg = io_apic_read(apic, 0x10 + pin*2); + reg &= ~IO_APIC_REDIR_VECTOR_MASK; + reg |= vector; + io_apic_modify(apic, 0x10 + pin*2, reg); +#endif + } +} + +/* + * Either sets data->affinity to a valid value, and returns + * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and + * leaves data->affinity untouched. + */ +int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + unsigned int *dest_id) +{ + struct irq_cfg *cfg = data->chip_data; + unsigned int irq = data->irq; + int err; + + if (!IS_ENABLED(CONFIG_SMP)) + return -1; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return -EINVAL; + + err = assign_irq_vector(irq, cfg, mask); + if (err) + return err; + + err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); + if (err) { + if (assign_irq_vector(irq, cfg, + irq_data_get_affinity_mask(data))) + pr_err("Failed to recover vector for irq %d\n", irq); + return err; + } + + cpumask_copy(irq_data_get_affinity_mask(data), mask); + + return 0; +} + + +int native_ioapic_set_affinity(struct irq_data *data, + const struct cpumask *mask, + bool force) +{ + unsigned int dest, irq = data->irq; + unsigned long flags; + int ret; + + if (!IS_ENABLED(CONFIG_SMP)) + return -1; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + ret = __ioapic_set_affinity(data, mask, &dest); + if (!ret) { + /* Only the high 8 bits are valid. */ + dest = SET_APIC_LOGICAL_ID(dest); + __target_IO_APIC_irq(irq, dest, data->chip_data); + ret = IRQ_SET_MASK_OK_NOCOPY; + } + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + return ret; +} + +void ack_apic_edge(struct irq_data *data) +{ + irq_complete_move(data->chip_data); + irq_move_irq(data); + ack_APIC_irq(); +} + +atomic_t irq_mis_count; + +#ifdef CONFIG_GENERIC_PENDING_IRQ +static bool io_apic_level_ack_pending(struct irq_cfg *cfg) +{ + struct irq_pin_list *entry; + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + for_each_irq_pin(entry, cfg->irq_2_pin) { + unsigned int reg; + int pin; + + pin = entry->pin; + reg = io_apic_read(entry->apic, 0x10 + pin*2); + /* Is the remote IRR bit set? */ + if (reg & IO_APIC_REDIR_REMOTE_IRR) { + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + return true; + } + } + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + return false; +} + +static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) +{ + /* If we are moving the irq we need to mask it */ + if (unlikely(irqd_is_setaffinity_pending(data))) { + if (!irqd_irq_masked(data)) + mask_ioapic(cfg); + return true; + } + return false; +} + +static inline void ioapic_irqd_unmask(struct irq_data *data, + struct irq_cfg *cfg, bool masked) +{ + if (unlikely(masked)) { + /* Only migrate the irq if the ack has been received. + * + * On rare occasions the broadcast level triggered ack gets + * delayed going to ioapics, and if we reprogram the + * vector while Remote IRR is still set the irq will never + * fire again. + * + * To prevent this scenario we read the Remote IRR bit + * of the ioapic. This has two effects. + * - On any sane system the read of the ioapic will + * flush writes (and acks) going to the ioapic from + * this cpu. + * - We get to see if the ACK has actually been delivered. + * + * Based on failed experiments of reprogramming the + * ioapic entry from outside of irq context starting + * with masking the ioapic entry and then polling until + * Remote IRR was clear before reprogramming the + * ioapic I don't trust the Remote IRR bit to be + * completey accurate. + * + * However there appears to be no other way to plug + * this race, so if the Remote IRR bit is not + * accurate and is causing problems then it is a hardware bug + * and you can go talk to the chipset vendor about it. + */ + if (!io_apic_level_ack_pending(cfg)) + irq_move_masked_irq(data); + /* If the IRQ is masked in the core, leave it: */ + if (!irqd_irq_masked(data)) + unmask_ioapic(cfg); + } +} +#else +static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) +{ + return false; +} +static inline void ioapic_irqd_unmask(struct irq_data *data, + struct irq_cfg *cfg, bool masked) +{ +} +#endif + +void ack_apic_level(struct irq_data *data) +{ + struct irq_cfg *cfg = data->chip_data; + int i, irq = data->irq, eoi_bug = false; + unsigned long v, flags = 0; + bool masked; + +#ifdef CONFIG_E2K + eoi_bug = cpu_has(CPU_HWBUG_LEVEL_EOI); +#elif CONFIG_E90S + eoi_bug = get_cpu_revision() <= 0x11; +#endif + + irq_complete_move(cfg); + masked = ioapic_irqd_mask(data, cfg); + + /* + * It appears there is an erratum which affects at least version 0x11 + * of I/O APIC (that's the 82093AA and cores integrated into various + * chipsets). Under certain conditions a level-triggered interrupt is + * erroneously delivered as edge-triggered one but the respective IRR + * bit gets set nevertheless. As a result the I/O unit expects an EOI + * message but it will never arrive and further interrupts are blocked + * from the source. The exact reason is so far unknown, but the + * phenomenon was observed when two consecutive interrupt requests + * from a given source get delivered to the same CPU and the source is + * temporarily disabled in between. + * + * A workaround is to simulate an EOI message manually. We achieve it + * by setting the trigger mode to edge and then to level when the edge + * trigger mode gets detected in the TMR of a local APIC for a + * level-triggered interrupt. We mask the source for the time of the + * operation to prevent an edge-triggered interrupt escaping meanwhile. + * The idea is from Manfred Spraul. --macro + * + * Also in the case when cpu goes offline, fixup_irqs() will forward + * any unhandled interrupt on the offlined cpu to the new cpu + * destination that is handling the corresponding interrupt. This + * interrupt forwarding is done via IPI's. Hence, in this case also + * level-triggered io-apic interrupt will be seen as an edge + * interrupt in the IRR. And we can't rely on the cpu's EOI + * to be broadcasted to the IO-APIC's which will clear the remoteIRR + * corresponding to the level-triggered interrupt. Hence on IO-APIC's + * supporting EOI register, we do an explicit EOI to clear the + * remote IRR and on IO-APIC's which don't have an EOI register, + * we use the above logic (mask+edge followed by unmask+level) from + * Manfred Spraul to clear the remote IRR. + */ + i = cfg->vector; + v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); + + if (eoi_bug) + raw_local_irq_save(flags); + + /* + * We must acknowledge the irq before we move it or the acknowledge will + * not propagate properly. + */ + ack_APIC_irq(); + + /* + * Tail end of clearing remote IRR bit (either by delivering the EOI + * message via io-apic EOI register write or simulating it using + * mask+edge followed by unnask+level logic) manually when the + * level triggered interrupt is seen as the edge triggered interrupt + * at the cpu. + */ + if (!(v & (1 << (i & 0x1f)))) { + atomic_inc(&irq_mis_count); + + eoi_ioapic_irq(irq, cfg); + } + + if (eoi_bug) { + do { + v = apic_read(APIC_ISR + ((i & ~0x1f) >> 1)); + } while (v & (1 << (i & 0x1f))); + raw_local_irq_restore(flags); + } + + ioapic_irqd_unmask(data, cfg, masked); +} + +#ifdef CONFIG_EPIC +void ioapic_ack_epic_edge(struct irq_data *data) +{ + irq_complete_move(data->chip_data); + irq_move_irq(data); + ack_epic_irq(); +} + +void ioapic_ack_epic_level(struct irq_data *data) +{ + struct irq_cfg *cfg = data->chip_data; + bool masked; + + irq_complete_move(cfg); + masked = ioapic_irqd_mask(data, cfg); + + ack_epic_irq(); + + /* + * To send a message from CEPIC to IOAPIC we need to write HC_IOAPIC_EOI + * SIC register + */ + epic_ioapic_eoi(cfg->vector); + + ioapic_irqd_unmask(data, cfg, masked); +} +#endif + +static struct irq_chip ioapic_chip __read_mostly = { + .name = "IO-APIC", + .irq_startup = startup_ioapic_irq, + .irq_mask = mask_ioapic_irq, + .irq_unmask = unmask_ioapic_irq, + .irq_ack = ioapic_ack_pic_edge, + .irq_eoi = ioapic_ack_pic_level, + .irq_set_affinity = native_ioapic_set_affinity, + .irq_retrigger = ioapic_retrigger_irq, +}; + +static inline void init_IO_APIC_traps(void) +{ + struct irq_cfg *cfg; + unsigned int irq; + + /* + * NOTE! The local APIC isn't very good at handling + * multiple interrupts at the same interrupt level. + * As the interrupt level is determined by taking the + * vector number and shifting that right by 4, we + * want to spread these out a bit so that they don't + * all fall in the same interrupt level. + * + * Also, we've got to be careful not to trash gate + * 0x80, because int 0x80 is hm, kind of importantish. ;) + */ + for_each_active_irq(irq) { + if (!irqchip_is_ioapic(irq_get_chip(irq))) + continue; + + cfg = irq_get_chip_data(irq); + if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { +#ifdef CONFIG_PIC + /* + * Hmm.. We don't have an entry for this, + * so default to an old-fashioned 8259 + * interrupt if we can.. + */ + if (irq < legacy_pic->nr_legacy_irqs) + legacy_pic->make_irq(irq); + else +#endif + /* Strange. Oh, well.. */ + irq_set_chip(irq, &no_irq_chip); + } + } +} + +/* + * The local APIC irq-chip implementation: + */ + +#ifdef CONFIG_PIC +static void mask_lapic_irq(struct irq_data *data) +{ + unsigned long v; + + v = apic_read(APIC_LVT0); + apic_write(APIC_LVT0, v | APIC_LVT_MASKED); +} + +static void unmask_lapic_irq(struct irq_data *data) +{ + unsigned long v; + + v = apic_read(APIC_LVT0); + apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); +} + +static void ack_lapic_irq(struct irq_data *data) +{ + ack_APIC_irq(); +} + +static struct irq_chip lapic_chip __read_mostly = { + .name = "local-APIC", + .irq_mask = mask_lapic_irq, + .irq_unmask = unmask_lapic_irq, + .irq_ack = ack_lapic_irq, +}; + +static void lapic_register_intr(int irq) +{ + irq_clear_status_flags(irq, IRQ_LEVEL); + irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, + "edge"); +} + +/* + * This looks a bit hackish but it's about the only one way of sending + * a few INTA cycles to 8259As and any associated glue logic. ICR does + * not support the ExtINT mode, unfortunately. We need to send these + * cycles as some i82489DX-based boards have glue logic that keeps the + * 8259A interrupt line asserted until INTA. --macro + */ +static inline void __init unlock_ExtINT_logic(void) +{ + int apic, pin, i; + struct IO_APIC_route_entry entry0, entry1; + unsigned char save_control, save_freq_select; + + pin = find_isa_irq_pin(8, mp_INT); + if (pin == -1) { + WARN_ON_ONCE(1); + return; + } + apic = find_isa_irq_apic(8, mp_INT); + if (apic == -1) { + WARN_ON_ONCE(1); + return; + } + + entry0 = ioapic_read_entry(apic, pin); + clear_IO_APIC_pin(apic, pin); + + memset(&entry1, 0, sizeof(entry1)); + + entry1.dest_mode = 0; /* physical delivery */ + entry1.mask = 0; /* unmask IRQ now */ + entry1.dest = hard_smp_processor_id(); + entry1.delivery_mode = dest_ExtINT; + entry1.polarity = entry0.polarity; + entry1.trigger = 0; + entry1.vector = 0; + + ioapic_write_entry(apic, pin, entry1); + + save_control = CMOS_READ(RTC_CONTROL); + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, + RTC_FREQ_SELECT); + CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); + + i = 100; + while (i-- > 0) { + mdelay(10); + if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) + i -= 10; + } + + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + clear_IO_APIC_pin(apic, pin); + + ioapic_write_entry(apic, pin, entry0); +} + +static int disable_timer_pin_1 __initdata; +/* Actually the next is obsolete, but keep it for paranoid reasons -AK */ +static int __init disable_timer_pin_setup(char *arg) +{ + disable_timer_pin_1 = 1; + return 0; +} +early_param("disable_timer_pin_1", disable_timer_pin_setup); + +int timer_through_8259 __initdata; + +/* + * This code may look a bit paranoid, but it's supposed to cooperate with + * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ + * is so screwy. Thanks to Brian Perkins for testing/hacking this beast + * fanatically on his truly buggy board. + * + * FIXME: really need to revamp this for all platforms. + */ +static inline void __init check_timer(void) +{ + struct irq_cfg *cfg = irq_get_chip_data(0); + int node = cpu_to_node(0); + int apic1, pin1, apic2, pin2; + unsigned long flags; + int no_pin1 = 0; + + local_irq_save(flags); + + /* + * get/set the timer IRQ vector: + */ + legacy_pic->mask(0); + assign_irq_vector(0, cfg, apic->target_cpus()); + + /* + * As IRQ0 is to be enabled in the 8259A, the virtual + * wire has to be disabled in the local APIC. Also + * timer interrupts need to be acknowledged manually in + * the 8259A for the i82489DX when using the NMI + * watchdog as that APIC treats NMIs as level-triggered. + * The AEOI mode will finish them in the 8259A + * automatically. + */ + apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); + legacy_pic->init(1); + + pin1 = find_isa_irq_pin(0, mp_INT); + apic1 = find_isa_irq_apic(0, mp_INT); + pin2 = ioapic_i8259.pin; + apic2 = ioapic_i8259.apic; + + apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " + "apic1=%d pin1=%d apic2=%d pin2=%d\n", + cfg->vector, apic1, pin1, apic2, pin2); + + /* + * Some BIOS writers are clueless and report the ExtINTA + * I/O APIC input from the cascaded 8259A as the timer + * interrupt input. So just in case, if only one pin + * was found above, try it both directly and through the + * 8259A. + */ + if (pin1 == -1) { + panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC"); + pin1 = pin2; + apic1 = apic2; + no_pin1 = 1; + } else if (pin2 == -1) { + pin2 = pin1; + apic2 = apic1; + } + + if (pin1 != -1) { + /* + * Ok, does IRQ0 through the IOAPIC work? + */ + if (no_pin1) { + add_pin_to_irq_node(cfg, node, apic1, pin1); + setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); + } else { + /* for edge trigger, setup_ioapic_irq already + * leave it unmasked. + * so only need to unmask if it is level-trigger + * do we really have level trigger timer? + */ + int idx; + idx = find_irq_entry(apic1, pin1, mp_INT); + if (idx != -1 && irq_trigger(idx)) + unmask_ioapic(cfg); + } + if (timer_irq_works()) { + if (disable_timer_pin_1 > 0) + clear_IO_APIC_pin(0, pin1); + goto out; + } + panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); + local_irq_disable(); + clear_IO_APIC_pin(apic1, pin1); + if (!no_pin1) + apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " + "8254 timer not connected to IO-APIC\n"); + + apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " + "(IRQ0) through the 8259A ...\n"); + apic_printk(APIC_QUIET, KERN_INFO + "..... (found apic %d pin %d) ...\n", apic2, pin2); + /* + * legacy devices should be connected to IO APIC #0 + */ + replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); + setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); + legacy_pic->unmask(0); + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); + timer_through_8259 = 1; + goto out; + } + /* + * Cleanup, just in case ... + */ + local_irq_disable(); + legacy_pic->mask(0); + clear_IO_APIC_pin(apic2, pin2); + apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); + } + + apic_printk(APIC_QUIET, KERN_INFO + "...trying to set up timer as Virtual Wire IRQ...\n"); + + lapic_register_intr(0); + apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ + legacy_pic->unmask(0); + + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); + goto out; + } + local_irq_disable(); + legacy_pic->mask(0); + apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); + apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); + + apic_printk(APIC_QUIET, KERN_INFO + "...trying to set up timer as ExtINT IRQ...\n"); + + legacy_pic->init(0); + legacy_pic->make_irq(0); + apic_write(APIC_LVT0, APIC_DM_EXTINT); + + unlock_ExtINT_logic(); + + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); + goto out; + } + local_irq_disable(); + apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); + if (x2apic_preenabled) + apic_printk(APIC_QUIET, KERN_INFO + "Perhaps problem with the pre-enabled x2apic mode\n" + "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); + panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " + "report. Then try booting with the 'noapic' option.\n"); +out: + local_irq_restore(flags); +} +#endif + +/* + * Traditionally ISA IRQ2 is the cascade IRQ, and is not available + * to devices. However there may be an I/O APIC pin available for + * this interrupt regardless. The pin may be left unconnected, but + * typically it will be reused as an ExtINT cascade interrupt for + * the master 8259A. In the MPS case such a pin will normally be + * reported as an ExtINT interrupt in the MP table. With ACPI + * there is no provision for ExtINT interrupts, and in the absence + * of an override it would be treated as an ordinary ISA I/O APIC + * interrupt, that is edge-triggered and unmasked by default. We + * used to do this, but it caused problems on some systems because + * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using + * the same ExtINT cascade interrupt to drive the local APIC of the + * bootstrap processor. Therefore we refrain from routing IRQ2 to + * the I/O APIC in all cases now. No actual device should request + * it anyway. --macro + */ +#define PIC_IRQS (1UL << PIC_CASCADE_IR) + +void __init setup_IO_APIC(void) +{ +#ifdef CONFIG_PIC + /* + * calling enable_IO_APIC() is moved to setup_local_APIC for BP + */ + io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; +#endif + + apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); + /* + * Set up IO-APIC IRQ routing. + */ +#if defined CONFIG_E2K || defined CONFIG_E90S + setup_ioapic_ids_from_mpc_nocheck(); +#endif +#if 0 + x86_init.mpparse.setup_ioapic_ids(); + + sync_Arb_IDs(); +#endif + setup_IO_APIC_irqs(); + init_IO_APIC_traps(); +#ifdef CONFIG_PIC + if (legacy_pic->nr_legacy_irqs) + check_timer(); +#endif +} + +/* + * Called after all the initialization is done. If we didn't find any + * APIC bugs then we can allow the modify fast path + */ + +static int __init io_apic_bug_finalize(void) +{ +#if 0 + if (sis_apic_bug == -1) + sis_apic_bug = 0; +#endif + return 0; +} + +late_initcall(io_apic_bug_finalize); + +static void resume_ioapic_id(int ioapic_idx) +{ + unsigned long flags; + union IO_APIC_reg_00 reg_00; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(ioapic_idx, 0); + if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) { + reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); + io_apic_write(ioapic_idx, 0, reg_00.raw); + } + raw_spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void ioapic_resume(void) +{ + int ioapic_idx; + + for (ioapic_idx = nr_ioapics - 1; ioapic_idx >= 0; ioapic_idx--) + resume_ioapic_id(ioapic_idx); + + restore_ioapic_entries(); +} + +static struct syscore_ops ioapic_syscore_ops = { + .suspend = save_ioapic_entries, + .resume = ioapic_resume, +}; + +static int __init ioapic_init_ops(void) +{ + register_syscore_ops(&ioapic_syscore_ops); + + return 0; +} + +device_initcall(ioapic_init_ops); + +/* + * Dynamic irq allocate and deallocation + */ +unsigned int __create_irqs(unsigned int from, unsigned int count, int node) +{ + struct irq_cfg **cfg; + unsigned long flags; + int irq, i; + + if (from < nr_irqs_gsi) + from = nr_irqs_gsi; + + cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node); + if (!cfg) + return 0; + + irq = alloc_irqs_from(from, count, node); + if (irq < 0) + goto out_cfgs; + + for (i = 0; i < count; i++) { + cfg[i] = alloc_irq_cfg(irq + i, node); + if (!cfg[i]) + goto out_irqs; + } + + raw_spin_lock_irqsave(&vector_lock, flags); + for (i = 0; i < count; i++) + if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) + goto out_vecs; + raw_spin_unlock_irqrestore(&vector_lock, flags); + + for (i = 0; i < count; i++) { + irq_set_chip_data(irq + i, cfg[i]); + irq_clear_status_flags(irq + i, IRQ_NOREQUEST); + } + + kfree(cfg); + return irq; + +out_vecs: + for (i--; i >= 0; i--) + __clear_irq_vector(irq + i, cfg[i]); + raw_spin_unlock_irqrestore(&vector_lock, flags); +out_irqs: + for (i = 0; i < count; i++) + free_irq_at(irq + i, cfg[i]); +out_cfgs: + kfree(cfg); + return 0; +} + +unsigned int create_irq_nr(unsigned int from, int node) +{ + return __create_irqs(from, 1, node); +} + +int create_irq(void) +{ + int node = cpu_to_node(0); + unsigned int irq_want; + int irq; + + irq_want = nr_irqs_gsi; + irq = create_irq_nr(irq_want, node); + + if (irq == 0) + irq = -1; + + return irq; +} + +void destroy_irq(unsigned int irq) +{ + struct irq_cfg *cfg = irq_get_chip_data(irq); + unsigned long flags; + + irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); + +#if 0 + free_remapped_irq(irq); +#endif + + raw_spin_lock_irqsave(&vector_lock, flags); + __clear_irq_vector(irq, cfg); + raw_spin_unlock_irqrestore(&vector_lock, flags); + free_irq_at(irq, cfg); +} + +void destroy_irqs(unsigned int irq, unsigned int count) +{ + unsigned int i; + + for (i = 0; i < count; i++) + destroy_irq(irq + i); +} + +/* + * MSI message composition + */ +void native_compose_msi_msg(struct pci_dev *pdev, + unsigned int irq, unsigned int dest, + struct msi_msg *msg, u8 hpet_id) +{ + struct irq_cfg *cfg = irq_cfg(irq); + struct iohub_sysdata *sd = pdev->bus->sysdata; + msg->address_hi = sd->pci_msi_addr_hi; + + if (x2apic_enabled()) + msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest); + + msg->address_lo = + sd->pci_msi_addr_lo | + ((apic->irq_dest_mode == 0) ? + MSI_ADDR_DEST_MODE_PHYSICAL: + MSI_ADDR_DEST_MODE_LOGICAL) | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + MSI_ADDR_REDIRECTION_CPU: + MSI_ADDR_REDIRECTION_LOWPRI) | + MSI_ADDR_DEST_ID(dest); +#if 0 + /* IOH and IOH2 have a bug. We must duplicate + * destination into data. Fortunately it's possible + */ + msg->data = + MSI_DATA_TRIGGER_EDGE | + MSI_DATA_LEVEL_ASSERT | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + MSI_DATA_DELIVERY_FIXED: + MSI_DATA_DELIVERY_LOWPRI) | + MSI_DATA_VECTOR(cfg->vector); +#else + /* IOH and IOH2 have a bug. We must duplicate + * destination into data. Fortunately it's possible + */ + msg->data = + MSI_ADDR_DEST_ID(dest) | + MSI_DATA_VECTOR(cfg->vector); +#endif +#ifdef DEBUG_MCST_MSI + printk("MSI interrupt for %s: irq %d : address_lo = 0x%08x," + "data = 0x%08x\n", pdev->bus->name, irq, + msg->address_lo, msg->data); +#endif +} + +#ifdef CONFIG_PCI_MSI +int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, + struct msi_msg *msg, u8 hpet_id) +{ + struct irq_cfg *cfg; + int err; + unsigned dest; + +#ifdef CONFIG_E2K + if (e2k_msi_disabled) { + return -ENXIO; + } +#endif + + if (disable_apic) + return -ENXIO; + + cfg = irq_cfg(irq); + err = assign_irq_vector(irq, cfg, apic->target_cpus()); + if (err) + return err; + + err = apic->cpu_mask_to_apicid_and(cfg->domain, + apic->target_cpus(), &dest); + if (err) + return err; +#if 1 + native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); +#else + x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id); +#endif + return 0; +} + +static int +msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) +{ + struct irq_cfg *cfg = data->chip_data; + struct msi_msg msg; + unsigned int dest; + + if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + + __get_cached_msi_msg(data->common->msi_desc, &msg); + +#if 0 + /* IOH and IOH2 have a bug. We must duplicate + * destination into data. Fortunately it's possible + */ + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); +#else + /* IOH and IOH2 have a bug. We must duplicate + * destination into data. Fortunately it's possible + */ + msg.data = MSI_DATA_VECTOR(cfg->vector) | MSI_ADDR_DEST_ID(dest); +#endif + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + + pci_write_msi_msg(data->irq, &msg); + + return IRQ_SET_MASK_OK_NOCOPY; +} + +static void l_msi_irq_ack(struct irq_data *data) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + struct pci_dev *dev = msi_desc_to_pci_dev(desc); + + if (iohub_generation(dev) < 2) { + u32 mask; + /* interrupt can come ahead of DMA, + so flush DMA with register read */ + if (desc->msi_attrib.is_msix) + readl(desc->mask_base); + else + pci_read_config_dword(dev, desc->mask_pos, &mask); + } + + ioapic_ack_pic_edge(data); +} + +/* + * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, + * which implement the MSI or MSI-X Capability Structure. + */ +static struct irq_chip msi_chip = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = l_msi_irq_ack, + .irq_set_affinity = msi_set_affinity, + .irq_retrigger = ioapic_retrigger_irq, +}; + +int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, + unsigned int irq_base, unsigned int irq_offset) +{ + struct irq_chip *chip = &msi_chip; + struct msi_msg msg; + unsigned int irq = irq_base + irq_offset; + int ret; + ret = msi_compose_msg(dev, irq, &msg, -1); + if (ret < 0) + return ret; + + irq_set_msi_desc_off(irq_base, irq_offset, msidesc); + + /* + * MSI-X message is written per-IRQ, the offset is always 0. + * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. + */ + if (!irq_offset) + pci_write_msi_msg(irq, &msg); +#ifdef CONFIG_IRQ_REMAP + setup_remapped_irq(irq, irq_get_chip_data(irq), chip); +#endif + irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); + + dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); + + return 0; +} + +int native_setup_msi_irqs_apic(struct pci_dev *dev, int nvec, int type) +{ + unsigned int irq, irq_want; + struct msi_desc *msidesc; + int node, ret; + + /* Multiple MSI vectors only supported with interrupt remapping */ + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + + node = dev_to_node(&dev->dev); + irq_want = nr_irqs_gsi; + list_for_each_entry(msidesc, dev_to_msi_list(&dev->dev), list) { + irq = create_irq_nr(irq_want, node); + if (irq == 0) + return -ENOSPC; + + irq_want = irq + 1; + + ret = setup_msi_irq(dev, msidesc, irq, 0); + if (ret < 0) + goto error; + } + return 0; + +error: + destroy_irq(irq); + return ret; +} + +void native_teardown_msi_irq_apic(unsigned int irq) +{ + destroy_irq(irq); +} + +static bool irqchip_is_ioapic(struct irq_chip *chip) +{ + return chip == &ioapic_chip || chip == &msi_chip || + irqchip_is_ioepic_to_apic(chip); +} + +#ifdef CONFIG_DMAR_TABLE +static int +dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) +{ + struct irq_cfg *cfg = data->chip_data; + unsigned int dest, irq = data->irq; + struct msi_msg msg; + + if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + + dmar_msi_read(irq, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); + + dmar_msi_write(irq, &msg); + + return IRQ_SET_MASK_OK_NOCOPY; +} + +static struct irq_chip dmar_msi_type = { + .name = "DMAR_MSI", + .irq_unmask = dmar_msi_unmask, + .irq_mask = dmar_msi_mask, + .irq_ack = ioapic_ack_pic_edge, + .irq_set_affinity = dmar_msi_set_affinity, + .irq_retrigger = ioapic_retrigger_irq, +}; + +int arch_setup_dmar_msi(unsigned int irq) +{ + int ret; + struct msi_msg msg; + + ret = msi_compose_msg(NULL, irq, &msg, -1); + if (ret < 0) + return ret; + dmar_msi_write(irq, &msg); + irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, + "edge"); + return 0; +} +#endif + +#ifdef CONFIG_HPET_TIMER + +static int hpet_msi_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct irq_cfg *cfg = data->chip_data; + struct msi_msg msg; + unsigned int dest; + + if (__ioapic_set_affinity(data, mask, &dest)) + return -1; + + hpet_msi_read(data->handler_data, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + + hpet_msi_write(data->handler_data, &msg); + + return IRQ_SET_MASK_OK_NOCOPY; +} + +static struct irq_chip hpet_msi_type = { + .name = "HPET_MSI", + .irq_unmask = hpet_msi_unmask, + .irq_mask = hpet_msi_mask, + .irq_ack = ioapic_ack_pic_edge, + .irq_set_affinity = hpet_msi_set_affinity, + .irq_retrigger = ioapic_retrigger_irq, +}; + +int default_setup_hpet_msi(unsigned int irq, unsigned int id) +{ + struct irq_chip *chip = &hpet_msi_type; + struct msi_msg msg; + int ret; + + ret = msi_compose_msg(NULL, irq, &msg, id); + if (ret < 0) + return ret; + + hpet_msi_write(irq_get_handler_data(irq), &msg); + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); + setup_remapped_irq(irq, irq_get_chip_data(irq), chip); + + irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); + return 0; +} +#endif + +#endif /* CONFIG_PCI_MSI */ + +int io_apic_setup_irq_pin_once(unsigned int irq, int node, + struct io_apic_irq_attr *attr) +{ + unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin; + int ret; + struct IO_APIC_route_entry orig_entry; + + /* Avoid redundant programming */ + if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) { + pr_debug("Pin %d-%d already programmed\n", mpc_ioapic_id(ioapic_idx), pin); + orig_entry = ioapic_read_entry(attr->ioapic, pin); + if (attr->trigger == orig_entry.trigger && attr->polarity == orig_entry.polarity) + return 0; + return -EBUSY; + } + ret = io_apic_setup_irq_pin(irq, node, attr); + if (!ret) + set_bit(pin, ioapics[ioapic_idx].pin_programmed); + return ret; +} + +static int __init io_apic_get_redir_entries(int ioapic) +{ + union IO_APIC_reg_01 reg_01; + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + reg_01.raw = io_apic_read(ioapic, 1); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + /* The register returns the maximum index redir index + * supported, which is one less than the total number of redir + * entries. + */ + return reg_01.bits.entries + 1; +} + +void __init probe_nr_irqs_gsi(void) +{ + int nr; + + nr = gsi_top + NR_IRQS_LEGACY; + if (nr > nr_irqs_gsi) + nr_irqs_gsi = nr; + + printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); +} + +int get_nr_irqs_gsi(void) +{ + return nr_irqs_gsi; +} +EXPORT_SYMBOL(get_nr_irqs_gsi); + +int __init arch_probe_nr_irqs(void) +{ + int nr; + + if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) + nr_irqs = NR_VECTORS * nr_cpu_ids; + +#if 0 + nr = nr_irqs_gsi + 8 * nr_cpu_ids; +#else + /* + * We have nr_irqs_gsi pins, I2C_SPI_IRQS_NUM chained + * interrupts in I2C-SPI controller and + * ARCH_NR_OWN_GPIOS*MAX_NUMIOLINKS chained interrupts + * in GPIO controller (if CONFIG_IOHUB_DOMAINS - it can be + * MAX_NUMIOLINKS controllers on board). + * + * GPIO irqs will belong + * [nr_irqs_gsi, nr_irqs_gsi + ARCH_MAX_NR_OWN_GPIOS) + * + * And I2C_SPI irqs: + * [nr_irqs_gsi + ARCH_MAX_NR_OWN_GPIOS, + * nr_irqs_gsi + ARCH_MAX_NR_OWN_GPIOS + I2C_SPI_IRQS_NUM) + */ +# ifdef CONFIG_IOHUB_DOMAINS + nr = nr_irqs_gsi + ARCH_MAX_NR_OWN_GPIOS * num_online_iohubs() + + I2C_SPI_IRQS_NUM; +# else + nr = nr_irqs_gsi + ARCH_MAX_NR_OWN_GPIOS + I2C_SPI_IRQS_NUM; +# endif +#endif + +#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) + /* + * for MSI and HT dyn irq + */ + nr += nr_irqs_gsi * 16; +#endif + if (nr < nr_irqs) + nr_irqs = nr; + + return NR_IRQS_LEGACY; +} + +/* TODO just a reminder: pin numbers here and in current arch/l/pci + * implementation are different, see commit 878f2e50. + */ +int io_apic_set_pci_routing(struct device *dev, int irq, + struct io_apic_irq_attr *irq_attr) +{ + int node; + + if (!IO_APIC_IRQ(irq)) { + apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", + irq_attr->ioapic); + return -EINVAL; + } + + node = dev ? dev_to_node(dev) : cpu_to_node(0); + + return io_apic_setup_irq_pin_once(irq, node, irq_attr); +} + +#ifdef CONFIG_L_X86_32 +static int __init io_apic_get_unique_id(int ioapic, int apic_id) +{ + union IO_APIC_reg_00 reg_00; + static physid_mask_t apic_id_map = PHYSID_MASK_NONE; + physid_mask_t tmp; + unsigned long flags; + int i = 0; + + /* + * The P4 platform supports up to 256 APIC IDs on two separate APIC + * buses (one for LAPICs, one for IOAPICs), where predecessors only + * supports up to 16 on one shared APIC bus. + * + * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full + * advantage of new APIC bus architecture. + */ + + if (physids_empty(apic_id_map)) + apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); + + raw_spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(ioapic, 0); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + if (apic_id >= get_physical_broadcast()) { + printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " + "%d\n", ioapic, apic_id, reg_00.bits.ID); + apic_id = reg_00.bits.ID; + } + + /* + * Every APIC in a system must have a unique ID or we get lots of nice + * 'stuck on smp_invalidate_needed IPI wait' messages. + */ + if (apic->check_apicid_used(&apic_id_map, apic_id)) { + + for (i = 0; i < get_physical_broadcast(); i++) { + if (!apic->check_apicid_used(&apic_id_map, i)) + break; + } + + if (i == get_physical_broadcast()) + panic("Max apic_id exceeded!\n"); + + printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " + "trying %d\n", ioapic, apic_id, i); + + apic_id = i; + } + + apic->apicid_to_cpu_present(apic_id, &tmp); + physids_or(apic_id_map, apic_id_map, tmp); + + if (reg_00.bits.ID != apic_id) { + reg_00.bits.ID = apic_id; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0, reg_00.raw); + reg_00.raw = io_apic_read(ioapic, 0); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + /* Sanity check */ + if (reg_00.bits.ID != apic_id) { + pr_err("IOAPIC[%d]: Unable to change apic_id!\n", + ioapic); + return -1; + } + } + + apic_printk(APIC_VERBOSE, KERN_INFO + "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); + + return apic_id; +} + +static u8 __init io_apic_unique_id(u8 id) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return io_apic_get_unique_id(nr_ioapics, id); + else + return id; +} +#else +static u8 __init io_apic_unique_id(u8 id) +{ + int i; + DECLARE_BITMAP(used, 256); + + bitmap_zero(used, 256); + for (i = 0; i < nr_ioapics; i++) { + __set_bit(mpc_ioapic_id(i), used); + } + if (!test_bit(id, used)) + return id; + return find_first_zero_bit(used, 256); +} +#endif + +static int __init io_apic_get_version(int ioapic) +{ + union IO_APIC_reg_01 reg_01; + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + reg_01.raw = io_apic_read(ioapic, 1); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.bits.version; +} + +#ifdef CONFIG_X86_IO_APIC +int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) +{ + int ioapic, pin, idx; + + if (skip_ioapic_setup) + return -1; + + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) + return -1; + + pin = mp_find_ioapic_pin(ioapic, gsi); + if (pin < 0) + return -1; + + idx = find_irq_entry(ioapic, pin, mp_INT); + if (idx < 0) + return -1; + + *trigger = irq_trigger(idx); + *polarity = irq_polarity(idx); + return 0; +} +#endif + +/* + * This function currently is only a helper for the i386 smp boot process where + * we need to reprogram the ioredtbls to cater for the cpus which have come online + * so mask in all cases should simply be apic->target_cpus() + */ +#ifdef CONFIG_SMP +void __init_recv setup_ioapic_dest(void) +{ + int pin, ioapic, irq, irq_entry; + const struct cpumask *mask; + struct irq_data *idata; + + if (skip_ioapic_setup == 1) + return; + + for (ioapic = 0; ioapic < nr_ioapics; ioapic++) + for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) { + irq_entry = find_irq_entry(ioapic, pin, mp_INT); + if (irq_entry == -1) + continue; + irq = pin_2_irq(irq_entry, ioapic, pin); + + if ((ioapic > 0) && (irq > 16)) + continue; + + idata = irq_get_irq_data(irq); + + /* + * Honour affinities which have been set in early boot + */ + if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) + mask = irq_data_get_affinity_mask(idata); + else + mask = apic->target_cpus(); + +#if 0 + x86_io_apic_ops.set_affinity(idata, mask, false); +#else + native_ioapic_set_affinity(idata, mask, false); +#endif + } + +} +#endif + +#if 0 +#define IOAPIC_RESOURCE_NAME_SIZE 11 + +static struct resource *ioapic_resources; + +static struct resource * __init ioapic_setup_resources(int nr_ioapics) +{ + unsigned long n; + struct resource *res; + char *mem; + int i; + + if (nr_ioapics <= 0) + return NULL; + + n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); + n *= nr_ioapics; + + mem = alloc_bootmem(n); + res = (void *)mem; + + mem += sizeof(struct resource) * nr_ioapics; + + for (i = 0; i < nr_ioapics; i++) { + res[i].name = mem; + res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; + snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); + mem += IOAPIC_RESOURCE_NAME_SIZE; + } + + ioapic_resources = res; + + return res; +} + +void __init native_io_apic_init_mappings(void) +{ + unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; + struct resource *ioapic_res; + int i; + + ioapic_res = ioapic_setup_resources(nr_ioapics); + for (i = 0; i < nr_ioapics; i++) { + if (smp_found_config) { + ioapic_phys = mpc_ioapic_addr(i); +#ifdef CONFIG_L_X86_32 + if (!ioapic_phys) { + printk(KERN_ERR + "WARNING: bogus zero IO-APIC " + "address found in MPTABLE, " + "disabling IO/APIC support!\n"); + smp_found_config = 0; + skip_ioapic_setup = 1; + goto fake_ioapic_page; + } +#endif + } else { +#ifdef CONFIG_L_X86_32 +fake_ioapic_page: +#endif + ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + ioapic_phys = __pa(ioapic_phys); + } + set_fixmap_nocache(idx, ioapic_phys); + apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", + __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), + ioapic_phys); + idx++; + + ioapic_res->start = ioapic_phys; + ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; + ioapic_res++; + } + + probe_nr_irqs_gsi(); +} + +void __init ioapic_insert_resources(void) +{ + int i; + struct resource *r = ioapic_resources; + + if (!r) { + if (nr_ioapics > 0) + printk(KERN_ERR + "IO APIC resources couldn't be allocated.\n"); + return; + } + + for (i = 0; i < nr_ioapics; i++) { + insert_resource(&iomem_resource, r); + r++; + } +} +#endif + +int mp_find_ioapic(u32 gsi) +{ + int i = 0; + + if (nr_ioapics == 0) + return -1; + + /* Find the IOAPIC that manages this GSI. */ + for (i = 0; i < nr_ioapics; i++) { + struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); + if ((gsi >= gsi_cfg->gsi_base) + && (gsi <= gsi_cfg->gsi_end)) + return i; + } + + printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); + return -1; +} + +int mp_find_ioapic_pin(int ioapic, u32 gsi) +{ + struct mp_ioapic_gsi *gsi_cfg; + + if (WARN_ON(ioapic == -1)) + return -1; + + gsi_cfg = mp_ioapic_gsi_routing(ioapic); + if (WARN_ON(gsi > gsi_cfg->gsi_end)) + return -1; + + return gsi - gsi_cfg->gsi_base; +} + +static __init int bad_ioapic(unsigned long address) +{ + if (nr_ioapics >= MAX_IO_APICS) { + pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n", + MAX_IO_APICS, nr_ioapics); + return 1; + } + if (!address) { + pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n"); + return 1; + } + return 0; +} + +static __init int bad_ioapic_register(int idx) +{ + union IO_APIC_reg_00 reg_00; + union IO_APIC_reg_01 reg_01; + union IO_APIC_reg_02 reg_02; + + reg_00.raw = io_apic_read(idx, 0); + reg_01.raw = io_apic_read(idx, 1); + reg_02.raw = io_apic_read(idx, 2); + + if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) { + pr_warn("I/O APIC 0x%lx registers return all ones, skipping!\n", + mpc_ioapic_addr(idx)); + return 1; + } + + return 0; +} + +#if defined CONFIG_E2K || defined CONFIG_E90S +void __init mp_register_ioapic(int id, unsigned long address, u32 gsi_base) +#else +void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) +#endif +{ + int idx = 0; + int entries; + struct mp_ioapic_gsi *gsi_cfg; + + if (bad_ioapic(address)) + return; + + idx = nr_ioapics; + + ioapics[idx].mp_config.type = MP_IOAPIC; + ioapics[idx].mp_config.flags = MPC_APIC_USABLE; + ioapics[idx].mp_config.apicaddr = address; + +#if 0 + set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); +#endif + + if (bad_ioapic_register(idx)) { +#if 0 + clear_fixmap(FIX_IO_APIC_BASE_0 + idx); +#endif + return; + } + + ioapics[idx].mp_config.apicid = io_apic_unique_id(id); + ioapics[idx].mp_config.apicver = io_apic_get_version(idx); + + /* + * Build basic GSI lookup table to facilitate gsi->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI GSIs). + */ + entries = io_apic_get_redir_entries(idx); + gsi_cfg = mp_ioapic_gsi_routing(idx); + gsi_cfg->gsi_base = gsi_base; + gsi_cfg->gsi_end = gsi_base + entries - 1; + + /* + * The number of IO-APIC IRQ registers (== #pins): + */ + ioapics[idx].nr_registers = entries; + + if (gsi_cfg->gsi_end >= gsi_top) + gsi_top = gsi_cfg->gsi_end + 1; + + pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, GSI %d-%d\n", + idx, mpc_ioapic_id(idx), + mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), + gsi_cfg->gsi_base, gsi_cfg->gsi_end); + + nr_ioapics++; +} + +#if defined CONFIG_PCI_MSI && defined CONFIG_PCI_QUIRKS +#define MSI_LO_ADDRESS 0x48 +#define MSI_HI_ADDRESS 0x4c + + +void l_request_msi_addresses_window(struct pci_dev *pdev) +{ + u32 lo; + u32 hi; + u64 a; + if (pdev->device != PCI_DEVICE_ID_MCST_I2C_SPI) { + return; + } + if ((pdev->vendor != PCI_VENDOR_ID_ELBRUS) && + (pdev->vendor != PCI_VENDOR_ID_MCST_TMP)) { + return; + } + /* this is the place where it's possible to insert resource for APIC + * MSI window address + */ + pci_read_config_dword(pdev, MSI_LO_ADDRESS, &lo); + pci_read_config_dword(pdev, MSI_HI_ADDRESS, &hi); + a = ((u64)hi << 32) | lo; + if (__request_region(&iomem_resource, a, 0x100000, "MSI", + IORESOURCE_MEM)) { + pr_info("MSI window 0x%llx + 0x100000. Reserved\n", a); + } else { + pr_info("MSI window 0x%llx + 0x100000. Could not reserve\n", a); + } +} + +static void quirk_pci_msi(struct pci_dev *pdev) +{ + struct iohub_sysdata *sd = pdev->bus->sysdata; + int gen, rev = pdev->revision; + + if (pdev->device == PCI_DEVICE_ID_MCST_I2CSPI) { + gen = 0; + sd->iohub_generation = gen; + sd->has_iohub = true; + sd->iohub_revision = rev; + } else if (pdev->device == PCI_DEVICE_ID_MCST_I2C_SPI) { + gen = 1; + sd->iohub_generation = gen; + sd->has_iohub = true; + sd->iohub_revision = rev; + } else { + gen = 2; + sd->eioh_generation = gen; + sd->has_eioh = true; + sd->eioh_revision = rev; + } + /* + * If IOHub2 is connected to EIOHub, use RT_MSI address instead of + * the address from IOAPIC BARs + */ + if (cpu_has_epic()) { + get_io_epic_msi(dev_to_node(&pdev->dev), + &sd->pci_msi_addr_lo, &sd->pci_msi_addr_hi); + } else if (gen < 2) { + pci_read_config_dword(pdev, MSI_LO_ADDRESS, + &sd->pci_msi_addr_lo); + pci_read_config_dword(pdev, MSI_HI_ADDRESS, + &sd->pci_msi_addr_hi); + } + dev_info(&pdev->dev, "MSI address at: %x; IOHUB generation: %d, " + "revision: %x\n", sd->pci_msi_addr_lo, gen, rev); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ELBRUS, + PCI_DEVICE_ID_MCST_I2CSPI, quirk_pci_msi); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI, quirk_pci_msi); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI_EPIC, quirk_pci_msi); + +#elif defined CONFIG_PCI_MSI +#error fixme +#endif /*CONFIG_PCI_QUIRKS*/ + +unsigned int ioapic_cfg_get_pin(struct irq_cfg *cfg) +{ + return cfg->irq_2_pin->pin; +} + +unsigned int ioapic_cfg_get_idx(struct irq_cfg *cfg) +{ + return cfg->irq_2_pin->apic; +} + +void fixup_irqs_apic(void) +{ + unsigned int vector; + + /* + * We can remove mdelay() and then send spuriuous interrupts to + * new cpu targets for all the irqs that were handled previously by + * this cpu. While it works, I have seen spurious interrupt messages + * (nothing wrong but still...). + * + * So for now, retain mdelay(1) and check the IRR and then send those + * interrupts to new targets as this cpu is already offlined... + */ + mdelay(1); + + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + unsigned int irr; + + if (__this_cpu_read(vector_irq[vector]) < 0) + continue; + + irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); + if (irr & (1 << (vector % 32))) { + unsigned int irq = __this_cpu_read(vector_irq[vector]); + struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data = irq_desc_get_irq_data(desc); + struct irq_chip *chip = irq_data_get_irq_chip(data); + raw_spin_lock(&desc->lock); + if (chip->irq_retrigger) + chip->irq_retrigger(data); + raw_spin_unlock(&desc->lock); + } + __this_cpu_write(vector_irq[vector], -1); + } +} diff --git a/arch/l/kernel/apic/ipi.c b/arch/l/kernel/apic/ipi.c new file mode 100644 index 000000000000..c10209241360 --- /dev/null +++ b/arch/l/kernel/apic/ipi.c @@ -0,0 +1,168 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#if 0 +#include +#endif +#include + +void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) +{ + unsigned long query_cpu; + unsigned long flags; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicast to each CPU instead. + * - mbligh + */ + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, + int vector) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned int query_cpu; + unsigned long flags; + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +#ifdef CONFIG_L_X86_32 + +void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector) +{ + unsigned long flags; + unsigned int query_cpu; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicasts to each CPU instead. This + * should be modified to do 1 message per cluster ID - mbligh + */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) + __default_send_IPI_dest_field( + early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); + local_irq_restore(flags); +} + +void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector) +{ + unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field( + early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); + } + local_irq_restore(flags); +} + +/* + * This is only used on smaller machines. + */ +void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + unsigned long flags; + + if (!mask) + return; + + local_irq_save(flags); + WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); + local_irq_restore(flags); +} + +void default_send_IPI_allbutself(int vector) +{ + /* + * if there are no other CPUs in the system then we get an APIC send + * error if we try to broadcast, thus avoid sending IPIs in this case. + */ + if (!(num_online_cpus() > 1)) + return; + + __default_local_send_IPI_allbutself(vector); +} + +void default_send_IPI_all(int vector) +{ + __default_local_send_IPI_all(vector); +} + +void default_send_IPI_self(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); +} + +/* must come after the send_IPI functions above for inlining */ +static int convert_apicid_to_cpu(int apic_id) +{ + int i; + + for_each_possible_cpu(i) { + if (per_cpu(x86_cpu_to_apicid, i) == apic_id) + return i; + } + return -1; +} + +int safe_smp_processor_id(void) +{ + int apicid, cpuid; + + if (!cpu_has_apic) + return 0; + + apicid = hard_smp_processor_id(); + if (apicid == BAD_APICID) + return 0; + + cpuid = convert_apicid_to_cpu(apicid); + + return cpuid >= 0 ? cpuid : 0; +} +#endif diff --git a/arch/l/kernel/apic/irq.c b/arch/l/kernel/apic/irq.c new file mode 100644 index 000000000000..3e9ee4cb4b53 --- /dev/null +++ b/arch/l/kernel/apic/irq.c @@ -0,0 +1,236 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +/* + * This file holds code that is common for e2k and e90s APIC implementation + * but which did not originate in arch/x86/kernel/apic/ folder. + * + * Corresponding declarations can be found in asm-l/hw_irq.h + */ + +#if IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET) +#ifdef CONFIG_NUMA +int rdma_node[MAX_NUMNODES] = {0}; +#else +int rdma_node[1] = {0}; +#endif +int rdma_apic_init; +EXPORT_SYMBOL(rdma_apic_init); + +void (*rdma_interrupt_p)(struct pt_regs *regs) = NULL; +EXPORT_SYMBOL(rdma_interrupt_p); + +static void rdma_interrupt(struct pt_regs *regs) +{ + static int int_rdma_error = 0; + + ack_APIC_irq(); + l_irq_enter(); + if (rdma_interrupt_p) + rdma_interrupt_p(regs); + else { + if (!int_rdma_error) + printk("rdma: attempt calling null handler\n"); + int_rdma_error++; + } + inc_irq_stat(irq_rdma_count); + l_irq_exit(); +} +#endif + +__init_recv +void l_init_system_handlers_table(void) +{ + /* + * Initialize interrupt[] array of system interrupts' handlers. + */ + +#ifdef CONFIG_SMP + /* + * The reschedule interrupt is a CPU-to-CPU reschedule-helper + * IPI, driven by wakeup. + */ + setup_PIC_vector_handler(RESCHEDULE_VECTOR, + smp_reschedule_interrupt, 1, + "smp_reschedule_interrupt"); + + /* IPI for generic function call */ + setup_PIC_vector_handler(CALL_FUNCTION_VECTOR, + smp_call_function_interrupt, 1, + "smp_call_function_interrupt"); + + /* IPI for generic single function call */ + setup_PIC_vector_handler(CALL_FUNCTION_SINGLE_VECTOR, + smp_call_function_single_interrupt, 1, + "smp_call_function_single_interrupt"); + + /* Low priority IPI to cleanup after moving an irq. */ + setup_PIC_vector_handler(IRQ_MOVE_CLEANUP_VECTOR, + smp_irq_move_cleanup_interrupt, 0, + "smp_irq_move_cleanup_interrupt"); + +#endif + /* self generated IPI for local APIC timer */ + setup_PIC_vector_handler(LOCAL_TIMER_VECTOR, + smp_apic_timer_interrupt, 1, + "smp_apic_timer_interrupt"); + + /* IPI vectors for APIC spurious and error interrupts */ + setup_PIC_vector_handler(SPURIOUS_APIC_VECTOR, + smp_spurious_interrupt, 1, + "smp_spurious_interrupt"); + setup_PIC_vector_handler(ERROR_APIC_VECTOR, + smp_error_interrupt, 1, + "smp_error_interrupt"); + +#if IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET) + setup_PIC_vector_handler(RDMA_INTERRUPT_VECTOR, + rdma_interrupt, 1, + "rdma_interrupt"); +#endif + + setup_PIC_vector_handler(IRQ_WORK_VECTOR, + smp_irq_work_interrupt, 1, + "smp_irq_work_interrupt"); +} + +static void unknown_nmi_error(unsigned int reason, struct pt_regs *regs) +{ + printk("Uhhuh. NMI received for unknown reason %x on CPU %d.\n", + reason, smp_processor_id()); + printk("Dazed and confused, but trying to continue\n"); +} + + +/* + * How NMIs work: + * + * 1) After receiving NMI corresponding bit in APIC_NM is set. + * + * 2) An exception is passed to CPU as soon as the following + * condition holds true: + * + * APIC_NM != 0 && (!PSR.unmie && PSR.nmie || PSR.unmie && UPSR.nmie) + * + * 3) CPU reads APIC_NM register which has a bit set for each + * successfully received NMI. At this moment all further NMI + * exceptions are blocked until APIC_NMI is written with any value. + * + * 4) CPU writes APIC_NM thus allowing receive of next NMI and + * also clearing corresponding bits: + * + * APIC_NM &= ~written_value + */ +noinline notrace void apic_do_nmi(struct pt_regs *regs) +{ + unsigned int reason; +#ifdef CONFIG_EARLY_PRINTK + int console_switched; +#endif + + reason = arch_apic_read(APIC_NM); + + /* + * Immediately allow receiving of next NM interrupts. + * Must be done before handling to avoid losing interrupts like this: + * + * cpu0 cpu1 + * -------------------------------------------- + * set flag for cpu 0 + * and send an NMI + * enter handler and + * clear the flag + * because flag is cleared, + * set it again and send + * the next NMI + * clear APIC_NM + * + * In this example cpu0 will never receive the second NMI. + */ + arch_apic_write(APIC_NM, APIC_NM_BIT_MASK); + +#ifdef CONFIG_EARLY_PRINTK + /* We should not use normal printk() from inside the NMI handler */ + console_switched = switch_to_early_dump_console(); +#endif + + if (reason & APIC_NM_NMI) { +#ifdef CONFIG_E2K + /* NMI IPIs are used only by nmi_call_function() */ + nmi_call_function_interrupt(); +#endif + reason &= ~APIC_NM_NMI; + } + + if (APIC_NM_MASK(reason) != 0) + unknown_nmi_error(reason, regs); + +#ifdef CONFIG_EARLY_PRINTK + if (console_switched) + switch_from_early_dump_console(); +#endif +} + +#ifdef CONFIG_SMP +void __inquire_remote_apic(int apicid) +{ + unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; + char *names[] = { "ID", "VERSION", "SPIV" }; + int timeout; + u32 status; + + printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid); + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]); + + /* + * Wait for idle. + */ + status = safe_apic_wait_icr_idle(); + if (status) + printk(KERN_CONT + "a previous APIC delivery may have failed\n"); + + apic_icr_write(APIC_DM_REMRD | regs[i], apicid); + + timeout = 0; + do { + udelay(100); + status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; + } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); + + switch (status) { + case APIC_ICR_RR_VALID: + status = apic_read(APIC_RRR); + printk(KERN_CONT "%08x\n", status); + break; + default: + printk(KERN_CONT "failed\n"); + } + } +} +#endif diff --git a/arch/l/kernel/apic/irq_work.c b/arch/l/kernel/apic/irq_work.c new file mode 100644 index 000000000000..e87de86def36 --- /dev/null +++ b/arch/l/kernel/apic/irq_work.c @@ -0,0 +1,42 @@ +#include +#include +#include +#include +#include +#include + +static inline void irq_work_entering_irq(void) +{ + l_irq_enter(); + ack_APIC_irq(); +} + +static inline void __smp_irq_work_interrupt(void) +{ + inc_irq_stat(apic_irq_work_irqs); + irq_work_run(); +} + +__visible void smp_irq_work_interrupt(struct pt_regs *regs) +{ + irq_work_entering_irq(); + __smp_irq_work_interrupt(); + exiting_irq(); +} + +#if 0 +__visible void smp_trace_irq_work_interrupt(struct pt_regs *regs) +{ + irq_work_entering_irq(); + trace_irq_work_entry(IRQ_WORK_VECTOR); + __smp_irq_work_interrupt(); + trace_irq_work_exit(IRQ_WORK_VECTOR); + exiting_irq(); +} +#endif + +void apic_irq_work_raise(void) +{ + apic->send_IPI_self(IRQ_WORK_VECTOR); + apic_wait_icr_idle(); +} diff --git a/arch/l/kernel/apic/msi.c b/arch/l/kernel/apic/msi.c new file mode 100644 index 000000000000..bbc8d6c8011f --- /dev/null +++ b/arch/l/kernel/apic/msi.c @@ -0,0 +1,365 @@ +/* + * Support of MSI, HPET and DMAR interrupts. + * + * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo + * Moved from arch/x86/kernel/apic/io_apic.c. + * Jiang Liu + * Convert to hierarchical irqdomain + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HPET_TIMER +#include +#endif +#include +#include +#include + +static struct irq_domain *msi_default_domain; + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct irq_cfg *cfg = irqd_cfg(data); + +#ifdef CONFIG_E2K + if (e2k_msi_disabled) { + return -ENXIO; + } +#endif + msg->address_hi = MSI_ADDR_BASE_HI; + + if (x2apic_enabled()) + msg->address_hi |= MSI_ADDR_EXT_DEST_ID(cfg->dest_apicid); + + msg->address_lo = + MSI_ADDR_BASE_LO | + ((apic->irq_dest_mode == 0) ? + MSI_ADDR_DEST_MODE_PHYSICAL : + MSI_ADDR_DEST_MODE_LOGICAL) | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + MSI_ADDR_REDIRECTION_CPU : + MSI_ADDR_REDIRECTION_LOWPRI) | + MSI_ADDR_DEST_ID(cfg->dest_apicid); + + msg->data = + MSI_DATA_TRIGGER_EDGE | + MSI_DATA_LEVEL_ASSERT | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + MSI_DATA_DELIVERY_FIXED : + MSI_DATA_DELIVERY_LOWPRI) | + MSI_DATA_VECTOR(cfg->vector); +} + +/* + * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, + * which implement the MSI or MSI-X Capability Structure. + */ +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = irq_chip_ack_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct irq_domain *domain; + struct irq_alloc_info info; + + init_irq_alloc_info(&info, NULL); + info.type = X86_IRQ_ALLOC_TYPE_MSI; + info.msi_dev = dev; + + domain = irq_remapping_get_irq_domain(&info); + if (domain == NULL) + domain = msi_default_domain; + if (domain == NULL) + return -ENOSYS; + + return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); +} + +void native_teardown_msi_irq(unsigned int irq) +{ + irq_domain_free_irqs(irq, 1); +} + +static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return arg->msi_hwirq; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct msi_desc *desc = first_pci_msi_entry(pdev); + + init_irq_alloc_info(arg, NULL); + arg->msi_dev = pdev; + if (desc->msi_attrib.is_msix) { + arg->type = X86_IRQ_ALLOC_TYPE_MSIX; + } else { + arg->type = X86_IRQ_ALLOC_TYPE_MSI; + arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; + } + + return 0; +} + +static void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc); +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .get_hwirq = pci_msi_get_hwirq, + .msi_prepare = pci_msi_prepare, + .set_desc = pci_msi_set_desc, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +void arch_init_msi_domain(struct irq_domain *parent) +{ + if (disable_apic) + return; + + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, parent); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +#ifdef CONFIG_IRQ_REMAP +static struct irq_chip pci_msi_ir_controller = { + .name = "IR-PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = irq_chip_ack_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static struct msi_domain_info pci_msi_ir_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_ir_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +struct irq_domain *arch_create_msi_irq_domain(struct irq_domain *parent) +{ + return pci_msi_create_irq_domain(NULL, &pci_msi_ir_domain_info, parent); +} +#endif + +#ifdef CONFIG_DMAR_TABLE +static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg) +{ + dmar_msi_write(data->irq, msg); +} + +static struct irq_chip dmar_msi_controller = { + .name = "DMAR-MSI", + .irq_unmask = dmar_msi_unmask, + .irq_mask = dmar_msi_mask, + .irq_ack = irq_chip_ack_parent, + .irq_set_affinity = msi_domain_set_affinity, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_compose_msi_msg = irq_msi_compose_msg, + .irq_write_msi_msg = dmar_msi_write_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return arg->dmar_id; +} + +static int dmar_msi_init(struct irq_domain *domain, + struct msi_domain_info *info, unsigned int virq, + irq_hw_number_t hwirq, msi_alloc_info_t *arg) +{ + irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL, + handle_edge_irq, arg->dmar_data, "edge"); + + return 0; +} + +static struct msi_domain_ops dmar_msi_domain_ops = { + .get_hwirq = dmar_msi_get_hwirq, + .msi_init = dmar_msi_init, +}; + +static struct msi_domain_info dmar_msi_domain_info = { + .ops = &dmar_msi_domain_ops, + .chip = &dmar_msi_controller, +}; + +static struct irq_domain *dmar_get_irq_domain(void) +{ + static struct irq_domain *dmar_domain; + static DEFINE_MUTEX(dmar_lock); + + mutex_lock(&dmar_lock); + if (dmar_domain == NULL) + dmar_domain = msi_create_irq_domain(NULL, &dmar_msi_domain_info, + x86_vector_domain); + mutex_unlock(&dmar_lock); + + return dmar_domain; +} + +int dmar_alloc_hwirq(int id, int node, void *arg) +{ + struct irq_domain *domain = dmar_get_irq_domain(); + struct irq_alloc_info info; + + if (!domain) + return -1; + + init_irq_alloc_info(&info, NULL); + info.type = X86_IRQ_ALLOC_TYPE_DMAR; + info.dmar_id = id; + info.dmar_data = arg; + + return irq_domain_alloc_irqs(domain, 1, node, &info); +} + +void dmar_free_hwirq(int irq) +{ + irq_domain_free_irqs(irq, 1); +} +#endif + +/* + * MSI message composition + */ +#ifdef CONFIG_HPET_TIMER +static inline int hpet_dev_id(struct irq_domain *domain) +{ + struct msi_domain_info *info = msi_get_domain_info(domain); + + return (int)(long)info->data; +} + +static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) +{ + hpet_msi_write(irq_data_get_irq_handler_data(data), msg); +} + +static struct irq_chip hpet_msi_controller = { + .name = "HPET-MSI", + .irq_unmask = hpet_msi_unmask, + .irq_mask = hpet_msi_mask, + .irq_ack = irq_chip_ack_parent, + .irq_set_affinity = msi_domain_set_affinity, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_compose_msi_msg = irq_msi_compose_msg, + .irq_write_msi_msg = hpet_msi_write_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return arg->hpet_index; +} + +static int hpet_msi_init(struct irq_domain *domain, + struct msi_domain_info *info, unsigned int virq, + irq_hw_number_t hwirq, msi_alloc_info_t *arg) +{ + irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); + irq_domain_set_info(domain, virq, arg->hpet_index, info->chip, NULL, + handle_edge_irq, arg->hpet_data, "edge"); + + return 0; +} + +static void hpet_msi_free(struct irq_domain *domain, + struct msi_domain_info *info, unsigned int virq) +{ + irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT); +} + +static struct msi_domain_ops hpet_msi_domain_ops = { + .get_hwirq = hpet_msi_get_hwirq, + .msi_init = hpet_msi_init, + .msi_free = hpet_msi_free, +}; + +static struct msi_domain_info hpet_msi_domain_info = { + .ops = &hpet_msi_domain_ops, + .chip = &hpet_msi_controller, +}; + +struct irq_domain *hpet_create_irq_domain(int hpet_id) +{ + struct irq_domain *parent; + struct irq_alloc_info info; + struct msi_domain_info *domain_info; + + if (x86_vector_domain == NULL) + return NULL; + + domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL); + if (!domain_info) + return NULL; + + *domain_info = hpet_msi_domain_info; + domain_info->data = (void *)(long)hpet_id; + + init_irq_alloc_info(&info, NULL); + info.type = X86_IRQ_ALLOC_TYPE_HPET; + info.hpet_id = hpet_id; + parent = irq_remapping_get_ir_irq_domain(&info); + if (parent == NULL) + parent = x86_vector_domain; + else + hpet_msi_controller.name = "IR-HPET-MSI"; + + return msi_create_irq_domain(NULL, domain_info, parent); +} + +int hpet_assign_irq(struct irq_domain *domain, struct hpet_dev *dev, + int dev_num) +{ + struct irq_alloc_info info; + + init_irq_alloc_info(&info, NULL); + info.type = X86_IRQ_ALLOC_TYPE_HPET; + info.hpet_data = dev; + info.hpet_id = hpet_dev_id(domain); + info.hpet_index = dev_num; + + return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); +} +#endif + diff --git a/arch/l/kernel/apic/probe_64.c b/arch/l/kernel/apic/probe_64.c new file mode 100644 index 000000000000..06fa9cb9b9c4 --- /dev/null +++ b/arch/l/kernel/apic/probe_64.c @@ -0,0 +1,75 @@ +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch probe layer. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. + */ +void __init_recv default_setup_apic_routing(void) +{ + struct apic **drv; + +#ifdef CONFIG_X86_X2APIC + enable_IR_x2apic(); +#endif + + for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { + if ((*drv)->probe && (*drv)->probe()) { + if (apic != *drv) { + apic = *drv; + pr_info("Switched APIC routing to %s.\n", + apic->name); + } + break; + } + } +#if 0 + if (x86_platform.apic_post_init) + x86_platform.apic_post_init(); +#endif +} + +/* Same for both flat and physical. */ + +void apic_send_IPI_self(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); +} + +int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + struct apic **drv; + + for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { + if ((*drv)->acpi_madt_oem_check(oem_id, oem_table_id)) { + if (apic != *drv) { + apic = *drv; + pr_info("Setting APIC routing to %s.\n", + apic->name); + } + return 1; + } + } + return 0; +} diff --git a/arch/l/kernel/apic/smp.c b/arch/l/kernel/apic/smp.c new file mode 100644 index 000000000000..fe5f51793755 --- /dev/null +++ b/arch/l/kernel/apic/smp.c @@ -0,0 +1,131 @@ +/* + * SMP IPI Support + */ + +#include +#include +#include + +#include + +/* + * the following functions deal with sending IPIs between CPUs. + * + * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. + */ + +void apic_send_call_function_ipi_mask(const struct cpumask *mask) +{ + apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); +} + +void apic_send_call_function_single_ipi(int cpu) +{ + apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); +} + +/* + * this function sends a 'reschedule' IPI to another CPU. + * it goes straight through and wastes no time serializing + * anything. Worst case is that we lose a reschedule ... + */ +void apic_smp_send_reschedule(int cpu) +{ + if (unlikely(cpu_is_offline(cpu))) { + WARN_ON(1); + return; + } + current->intr_sc = get_cycles(); + apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); +} + + +/* + * Reschedule call back. Nothing to do, + * all the work is done automatically when + * we return from the interrupt. + */ +static inline void __smp_reschedule_interrupt(void) +{ + inc_irq_stat(irq_resched_count); + scheduler_ipi(); +} + +__visible void smp_reschedule_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + ack_APIC_irq(); + __smp_reschedule_interrupt(); + l_irq_exit(); +} + +static inline void smp_entering_irq(void) +{ + l_irq_enter(); + ack_APIC_irq(); +} + +#if 0 +__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs) +{ + /* + * Need to call l_irq_enter() before calling the trace point. + * __smp_reschedule_interrupt() calls l_irq_enter/exit() too (in + * scheduler_ipi(). This is OK, since those functions are allowed + * to nest. + */ + smp_entering_irq(); + trace_reschedule_entry(RESCHEDULE_VECTOR); + __smp_reschedule_interrupt(); + trace_reschedule_exit(RESCHEDULE_VECTOR); + exiting_irq(); +} +#endif + +static inline void __smp_call_function_interrupt(void) +{ + generic_smp_call_function_interrupt(); + inc_irq_stat(irq_call_count); +} + +__visible void smp_call_function_interrupt(struct pt_regs *regs) +{ + smp_entering_irq(); + __smp_call_function_interrupt(); + exiting_irq(); +} + +#if 0 +__visible void smp_trace_call_function_interrupt(struct pt_regs *regs) +{ + smp_entering_irq(); + trace_call_function_entry(CALL_FUNCTION_VECTOR); + __smp_call_function_interrupt(); + trace_call_function_exit(CALL_FUNCTION_VECTOR); + exiting_irq(); +} +#endif + +static inline void __smp_call_function_single_interrupt(void) +{ + generic_smp_call_function_single_interrupt(); + inc_irq_stat(irq_call_count); +} + +__visible void smp_call_function_single_interrupt(struct pt_regs *regs) +{ + smp_entering_irq(); + __smp_call_function_single_interrupt(); + exiting_irq(); +} + +#if 0 +__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs) +{ + smp_entering_irq(); + trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); + __smp_call_function_single_interrupt(); + trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); + exiting_irq(); +} +#endif diff --git a/arch/l/kernel/boot_profiling.c b/arch/l/kernel/boot_profiling.c new file mode 100644 index 000000000000..479adbbf286b --- /dev/null +++ b/arch/l/kernel/boot_profiling.c @@ -0,0 +1,191 @@ +/* linux/arch/e2k/lib/boot_profiling.c. + * + * Copyright (C) 2011 MCST + */ + +#include +#include + +#include +#include +#include +#include + +/* This assignment makes sure that this array is not put + * into BSS segment and cleared by kernel while being in use. + * This is also the reason to use '-1' for the 'top_event' + * and '1' for 'boot_trace_enabled' initial values. */ +struct boot_tracepoint boot_trace_events[BOOT_TRACE_ARRAY_SIZE] = { + [0].cpu = 1, + [BOOT_TRACE_ARRAY_SIZE - 1].cpu = 1 +}; +atomic_t boot_trace_top_event = ATOMIC_INIT(-1); + +int boot_trace_enabled = 1; + + +#if NR_CPUS > 64 +# error please initialize boot_trace_cpu_events_list[] for other cpus... +#endif + +#if NR_CPUS > 48 +# define BOOT_TRACE_LIST_SIZE NR_CPUS +#elif NR_CPUS > 32 +# define BOOT_TRACE_LIST_SIZE 48 +#elif NR_CPUS > 16 +# define BOOT_TRACE_LIST_SIZE 32 +#elif NR_CPUS > 4 +# define BOOT_TRACE_LIST_SIZE 16 +#elif NR_CPUS > 1 +# define BOOT_TRACE_LIST_SIZE 4 +#else +# define BOOT_TRACE_LIST_SIZE 1 +#endif +struct list_head boot_trace_cpu_events_list[BOOT_TRACE_LIST_SIZE] = { + [0] = LIST_HEAD_INIT(boot_trace_cpu_events_list[0]), +#if NR_CPUS > 1 + [1] = LIST_HEAD_INIT(boot_trace_cpu_events_list[1]), + [2] = LIST_HEAD_INIT(boot_trace_cpu_events_list[2]), + [3] = LIST_HEAD_INIT(boot_trace_cpu_events_list[3]), +#if NR_CPUS > 4 + [4] = LIST_HEAD_INIT(boot_trace_cpu_events_list[4]), + [5] = LIST_HEAD_INIT(boot_trace_cpu_events_list[5]), + [6] = LIST_HEAD_INIT(boot_trace_cpu_events_list[6]), + [7] = LIST_HEAD_INIT(boot_trace_cpu_events_list[7]), + [8] = LIST_HEAD_INIT(boot_trace_cpu_events_list[8]), + [9] = LIST_HEAD_INIT(boot_trace_cpu_events_list[9]), + [10] = LIST_HEAD_INIT(boot_trace_cpu_events_list[10]), + [11] = LIST_HEAD_INIT(boot_trace_cpu_events_list[11]), + [12] = LIST_HEAD_INIT(boot_trace_cpu_events_list[12]), + [13] = LIST_HEAD_INIT(boot_trace_cpu_events_list[13]), + [14] = LIST_HEAD_INIT(boot_trace_cpu_events_list[14]), + [15] = LIST_HEAD_INIT(boot_trace_cpu_events_list[15]), +#if NR_CPUS > 16 + [16] = LIST_HEAD_INIT(boot_trace_cpu_events_list[16]), + [17] = LIST_HEAD_INIT(boot_trace_cpu_events_list[17]), + [18] = LIST_HEAD_INIT(boot_trace_cpu_events_list[18]), + [19] = LIST_HEAD_INIT(boot_trace_cpu_events_list[19]), + [20] = LIST_HEAD_INIT(boot_trace_cpu_events_list[20]), + [21] = LIST_HEAD_INIT(boot_trace_cpu_events_list[21]), + [22] = LIST_HEAD_INIT(boot_trace_cpu_events_list[22]), + [23] = LIST_HEAD_INIT(boot_trace_cpu_events_list[23]), + [24] = LIST_HEAD_INIT(boot_trace_cpu_events_list[24]), + [25] = LIST_HEAD_INIT(boot_trace_cpu_events_list[25]), + [26] = LIST_HEAD_INIT(boot_trace_cpu_events_list[26]), + [27] = LIST_HEAD_INIT(boot_trace_cpu_events_list[27]), + [28] = LIST_HEAD_INIT(boot_trace_cpu_events_list[28]), + [29] = LIST_HEAD_INIT(boot_trace_cpu_events_list[29]), + [30] = LIST_HEAD_INIT(boot_trace_cpu_events_list[30]), + [31] = LIST_HEAD_INIT(boot_trace_cpu_events_list[31]), +#if NR_CPUS > 32 + [32] = LIST_HEAD_INIT(boot_trace_cpu_events_list[32]), + [33] = LIST_HEAD_INIT(boot_trace_cpu_events_list[33]), + [34] = LIST_HEAD_INIT(boot_trace_cpu_events_list[34]), + [35] = LIST_HEAD_INIT(boot_trace_cpu_events_list[35]), + [36] = LIST_HEAD_INIT(boot_trace_cpu_events_list[36]), + [37] = LIST_HEAD_INIT(boot_trace_cpu_events_list[37]), + [38] = LIST_HEAD_INIT(boot_trace_cpu_events_list[38]), + [39] = LIST_HEAD_INIT(boot_trace_cpu_events_list[39]), + [40] = LIST_HEAD_INIT(boot_trace_cpu_events_list[40]), + [41] = LIST_HEAD_INIT(boot_trace_cpu_events_list[41]), + [42] = LIST_HEAD_INIT(boot_trace_cpu_events_list[42]), + [43] = LIST_HEAD_INIT(boot_trace_cpu_events_list[43]), + [44] = LIST_HEAD_INIT(boot_trace_cpu_events_list[44]), + [45] = LIST_HEAD_INIT(boot_trace_cpu_events_list[45]), + [46] = LIST_HEAD_INIT(boot_trace_cpu_events_list[46]), + [47] = LIST_HEAD_INIT(boot_trace_cpu_events_list[47]), +#if NR_CPUS > 48 + [48] = LIST_HEAD_INIT(boot_trace_cpu_events_list[48]), + [49] = LIST_HEAD_INIT(boot_trace_cpu_events_list[49]), + [50] = LIST_HEAD_INIT(boot_trace_cpu_events_list[50]), + [51] = LIST_HEAD_INIT(boot_trace_cpu_events_list[51]), + [52] = LIST_HEAD_INIT(boot_trace_cpu_events_list[52]), + [53] = LIST_HEAD_INIT(boot_trace_cpu_events_list[53]), + [54] = LIST_HEAD_INIT(boot_trace_cpu_events_list[54]), + [55] = LIST_HEAD_INIT(boot_trace_cpu_events_list[55]), + [56] = LIST_HEAD_INIT(boot_trace_cpu_events_list[56]), + [57] = LIST_HEAD_INIT(boot_trace_cpu_events_list[57]), + [58] = LIST_HEAD_INIT(boot_trace_cpu_events_list[58]), + [59] = LIST_HEAD_INIT(boot_trace_cpu_events_list[59]), + [60] = LIST_HEAD_INIT(boot_trace_cpu_events_list[60]), + [61] = LIST_HEAD_INIT(boot_trace_cpu_events_list[61]), + [62] = LIST_HEAD_INIT(boot_trace_cpu_events_list[62]), + [63] = LIST_HEAD_INIT(boot_trace_cpu_events_list[63]), +#endif /* NR_CPUS > 48 */ +#endif /* NR_CPUS > 32 */ +#endif /* NR_CPUS > 16 */ +#endif /* NR_CPUS > 4 */ +#endif /* NR_CPUS > 1 */ +}; + + +__init_recv void notrace add_boot_trace_event(const char *fmt, ...) +{ + va_list ap; + struct boot_tracepoint *event; + unsigned long flags; + long index; + unsigned int cpu; + + if (!boot_trace_enabled) + return; + + index = atomic_inc_return(&boot_trace_top_event); + if (unlikely(index >= BOOT_TRACE_ARRAY_SIZE)) { + WARN_ONCE(1, "WARNING Overflow of boot tracepoints array! " + "Disabling it...\n"); + atomic_set(&boot_trace_top_event, BOOT_TRACE_ARRAY_SIZE - 1); + return; + } + + event = &boot_trace_events[index]; + va_start(ap, fmt); + vscnprintf(event->name, 81, fmt, ap); + va_end(ap); + + raw_local_irq_save(flags); + cpu = raw_smp_processor_id(); + + event->cpu = cpu; + list_add_tail(&event->list, &boot_trace_cpu_events_list[cpu]); + + event->cycles = boot_trace_get_cycles(); + raw_local_irq_restore(flags); +} + +struct boot_tracepoint *boot_trace_prev_event(int cpu, + struct boot_tracepoint *event) +{ + struct boot_tracepoint *prev; + + prev = list_entry(event->list.prev, struct boot_tracepoint, list); + + if (&prev->list == &boot_trace_cpu_events_list[cpu]) + return NULL; + else + return prev; +} + +struct boot_tracepoint *boot_trace_next_event(int cpu, + struct boot_tracepoint *event) +{ + struct boot_tracepoint *next; + + if (cpu >= NR_CPUS) { + WARN_ON(1); + return NULL; + } + + next = list_entry(event->list.next, struct boot_tracepoint, list); + + if (&next->list == &boot_trace_cpu_events_list[cpu]) + return NULL; + else + return next; +} + +void stop_boot_trace() +{ + boot_trace_enabled = 0; +} + diff --git a/arch/l/kernel/clk_rt.c b/arch/l/kernel/clk_rt.c new file mode 100644 index 000000000000..afa8c48dd4ba --- /dev/null +++ b/arch/l/kernel/clk_rt.c @@ -0,0 +1,301 @@ +/* + * arch/e2k/kernel/clk_rt.c + * + * This file contains implementation of clk_rt clocksource. + * + * Copyright (C) MCST 2018 Leonid Ananiev (leoan@mcst.ru) + */ + +#if defined(CONFIG_E90S) +#include +#include +#include +#include +#include +#include +#include +#define DBG_CLK_RT 0 + +#define clk_rt_clocksource_register() \ + __clocksource_register(&clocksource_clk_rt) +#define MASK_32 0xffffffff +#define NPT_MASK 0x8000000000000000LL +#define SOFT_OK_MASK 0x4000000000000000LL + +int clk_rt_mode = CLK_RT_RTC; +EXPORT_SYMBOL(clk_rt_mode); +/* for single clk_rt_register thread run if multiple RTC */ +atomic_t num_clk_rt_register = ATOMIC_INIT(-1); +EXPORT_SYMBOL(num_clk_rt_register); + +static int __init clk_rt_setup(char *s) +{ + int error; + static struct task_struct *reg_task; + if (!s || (strcmp(s, "no") && strcmp(s, "rtc") && + strcmp(s, "ext") && strcmp(s, "int"))) { + pr_err(KERN_ERR "Possible sclkr cmdline modes are:\n" + "no, ext, rtc, int\n"); + return -EINVAL; + } + + if (get_cpu_revision() >= 0x10 && s) { + if (!strcmp(s, "ext")) { + reg_task = kthread_run(clk_rt_register, + (void *)CLK_RT_EXT, "clk_rt_register"); + if (IS_ERR(reg_task)) { + error = PTR_ERR(reg_task); + pr_err(KERN_ERR "Failed to start" + " clk_rt register" + " thread, error: %d\n", error); + return error; + } + } + if (!strcmp(s, "rtc")) { + clk_rt_mode = CLK_RT_RTC; + } + if (!strcmp(s, "no")) { + clk_rt_mode = CLK_RT_NO; + } + } + return 0; +} +__setup("clk_rt=", clk_rt_setup); +static inline unsigned long read_rt_tick(void) +{ + u64 rt_tick; /* RT_TICK ASR 0x1e */ + __asm__ __volatile__ ("rd %%asr30, %0\n\t" : "=r"(rt_tick)); + return rt_tick; +}; +static inline unsigned long read_rt_div(void) +{ + u64 rt_div; /* RT_DIV ASR 0x1f */ + __asm__ __volatile__ ("rd %%asr31, %0\n\t" : "=r"(rt_div)); + return rt_div; +}; +static inline void write_rt_tick(u64 rt_tick_v) +{ + __asm__ __volatile__("wr %0, 0, %%asr30" + : /* no outputs */ + : "r" (rt_tick_v)); +}; +static inline void write_rt_div(u64 rt_div_v) +{ + __asm__ __volatile__("wr %0, 0, %%asr31" + : /* no outputs */ + : "r" (rt_div_v)); +}; + +static inline int soft_ok(void) +{ + if (get_cpu_revision() >= 0x12) + return ((read_rt_div() & SOFT_OK_MASK) != 0); + else + return ((read_rt_div() & NPT_MASK) == 0); +} + +/* Use an aligned structure to make it occupy a whole cache line */ +struct { + u64 res; +} ____cacheline_aligned_in_smp prev_clk_rt = { 0 }; + +u64 clk_rt_old = 0; +#if DBG_CLK_RT +int cpu_before = 0; +int num_bmc = 0, pr_sec_bf = 0; +#endif +static u64 read_clk_rt(struct clocksource *cs) +{ + u64 clk_rt_lo, clk_rt_sec, clk_rt_v; + int freq; + u64 loc_clk_rt_old = 0; + unsigned long flags; + u64 res = 0; + + raw_local_irq_save(flags); + clk_rt_v = read_rt_tick(); + clk_rt_lo = clk_rt_v & MASK_32; + clk_rt_sec = clk_rt_v >> 32; + freq = read_rt_div() & MASK_32; + + if (!soft_ok()) { + pr_err_once("ERROR: clocksource clk_rt is not initialised" + " clk_rt_sec=%lld clk_rt_lo=%lld read_rt_div=0x%lx\n", + clk_rt_sec, clk_rt_lo, read_rt_div()); + raw_local_irq_restore(flags); + return 0; + } + res = clk_rt_sec * NSEC_PER_SEC + + clk_rt_lo * NSEC_PER_SEC / freq; +#pragma loop count(1) + while (1) { + loc_clk_rt_old = clk_rt_old; + if (res > loc_clk_rt_old) { + if (cmpxchg(&clk_rt_old, loc_clk_rt_old, + res) == loc_clk_rt_old) + break; + } else { +#if DBG_CLK_RT + if (num_bmc < 100 || pr_sec_bf != clk_rt_sec) { + num_bmc++; + pr_sec_bf = clk_rt_sec; + pr_warn("clk_rt old>res cpu=%d" + " cpu_old=%d" + " resold %10lld.%10lld" + " res %10lld.%10lld" + " %10lld.%10lld\n", + raw_smp_processor_id(), + cpu_before, + clk_rt_old / 1000000000, + clk_rt_old % 1000000000, + res / 1000000000, + res%1000000000, + clk_rt_old / 1000000000 - res / + 1000000000, + clk_rt_old % 1000000000 - res % + 1000000000); + } + cpu_before = raw_smp_processor_id(); +#endif + res = clk_rt_old; + break; + } + } + raw_local_irq_restore(flags); + return res; +} +static void susp_clk_rt(struct clocksource *clocksource) +{ + pr_warn("DEBUG: clocksource clk_rt suspend.\n"); + if (strcmp(curr_clocksource->name, "clk_rt") == 0) { + if (timekeeping_notify(<_cs)) { + pr_warn("susp_clk_rt: can't set lt clocksourse\n"); + } + } +} +static void resume_clk_rt(struct clocksource *clocksource) +{ + pr_crit("DEBUG: clocksource clk_rt resume is not need\n"); +} + +#define SCLK_CSOUR_SHFT 20 +/* ns = (cyc * mult) >> shift + * for clk_rt cyc==ns then 1 = (1 * mult) >> shift */ +struct clocksource clocksource_clk_rt = { + .name = "clk_rt", + .rating = 400, + .read = read_clk_rt, + .suspend = susp_clk_rt, + .resume = resume_clk_rt, + .mask = CLOCKSOURCE_MASK(64 - SCLK_CSOUR_SHFT), + .shift = SCLK_CSOUR_SHFT, + .mult = 1 << SCLK_CSOUR_SHFT, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; +EXPORT_SYMBOL(clocksource_clk_rt); + +void clk_rt_wr_seconds(void *arg) +{ + u64 w_clk_rt_sec = (u64) arg; +#if DBG_CLK_RT + pr_warn("clk_rt bf clk_rt_wr_seconds cpu %d %10ld.%9ld" + " w_clk_rt_sec %lld %llx\n", + raw_smp_processor_id(), + read_rt_tick() >> 32, read_rt_tick() & MASK_32, + w_clk_rt_sec, w_clk_rt_sec << 32); +#endif + write_rt_tick(w_clk_rt_sec << 32); +#if DBG_CLK_RT + pr_warn("clk_rt af clk_rt_wr_seconds cpu %d %10ld.%9ld\n", + raw_smp_processor_id(), + read_rt_tick() >> 32, read_rt_tick() & MASK_32); +#endif +} + +void set_soft_ok(void *arg) +{ + if (get_cpu_revision() >= 0x12) { + write_rt_div(SOFT_OK_MASK); + } else { /* set NPT to zero - clk_rt init is OK */ + u64 rt_div_v = read_rt_div(); + write_rt_div(read_rt_div() & MASK_32); + } +} + +noinline int clk_rt_register(void *new_clk_rt_src_arg) +{ + u64 clk_rt_lo, clk_rt_sec, clk_rt_v; + unsigned int freq, safe_lo, safe_lo2; + struct timespec ts; + unsigned long flags; + int i; + + /* FIXME add call register_cpu_notifier() for cpu hotplug case */ + /* We want to be far from beginning of next second. + */ + if (clk_rt_mode == CLK_RT_RTC) { + for (i = 0; i < 5; i++) { + if (read_rt_tick() >> 32) + break; + schedule_timeout_interruptible(HZ); + } + } + migrate_disable(); + raw_local_irq_save(flags); + clk_rt_v = read_rt_tick(); + clk_rt_lo = clk_rt_v & MASK_32; + clk_rt_sec = clk_rt_v >> 32; + freq = read_rt_div() & MASK_32; + if (clk_rt_sec == 0 || freq == 0) { + pr_err("CLK_RT: There is no pulse per second signal.\n"); + pr_err("CLK_RT: sec = %lld freq = %d\n", clk_rt_sec, freq); + return 1; + } + /* before smp_call_function() wait we are far from PPS */ + safe_lo = (freq >> 2) + (freq >> 3); + safe_lo2 = freq - (freq >> 2); + while (clk_rt_lo < safe_lo || clk_rt_lo > safe_lo2) { + cpu_relax(); + clk_rt_lo = read_rt_tick() & MASK_32; + /* ? schedule_timeout_interruptible(HZ / 2); */ + } + raw_local_irq_restore(flags); + migrate_enable(); + getnstimeofday(&ts); + while (system_state != SYSTEM_RUNNING) { +#if DBG_CLK_RT + pr_warn("clk_rt before wr_seconds" + " cpu=%d rt_tick= %10ld.%9ld" + " tod %ld system_state=%d\n", + raw_smp_processor_id(), + read_rt_tick() >> 32, read_rt_tick() & MASK_32, + ts.tv_sec, system_state); +#endif + schedule_timeout_interruptible(3 * HZ); + getnstimeofday(&ts); + } + smp_call_function(clk_rt_wr_seconds, (void *) ts.tv_sec, 1); + clk_rt_wr_seconds((void *) ts.tv_sec); +#if DBG_CLK_RT + pr_warn("clk_rt after wr_seconds " + " cpu=%d rt_tick= %10ld.%9ld" + " tod %ld system_state=%d\n", + raw_smp_processor_id(), + read_rt_tick() >> 32, read_rt_tick() & MASK_32, + ts.tv_sec, system_state); +#endif + pr_info("clk_rt clocksource registation at cpu %d " + "clk_rt=%lld.%09llu sec, getnstod =%ld.%09ld div=%u Hz\n", + raw_smp_processor_id(), clk_rt_sec, + (unsigned long long)clk_rt_lo * NSEC_PER_SEC / freq, + ts.tv_sec, ts.tv_nsec, freq); + /* timeout 2 second after seconds writing into CLK_RT + until rt_div will be correct */ + schedule_timeout_interruptible(3 * HZ); + smp_call_function(set_soft_ok, NULL, 1); + set_soft_ok(NULL); + clk_rt_clocksource_register(); + return 0; +} +EXPORT_SYMBOL(clk_rt_register); +#endif /* E90S */ diff --git a/arch/l/kernel/clkr.c b/arch/l/kernel/clkr.c new file mode 100644 index 000000000000..d88b6f4434a3 --- /dev/null +++ b/arch/l/kernel/clkr.c @@ -0,0 +1,293 @@ +/* + * arch/e2k/kernel/clkr.c + * + * This file contains implementation of clkr clocksource. + * + * Copyright (C) 2011 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +/* includes */ +#include +#include +#include +#include + +#include +#ifdef CONFIG_E2K +# include +#endif + + +/* definitions */ + +/* See comment before __cycles_2_ns() */ +#define CYC2NS_SCALE 22 +/* CPU frequency must be greater than this to avoid overflows on conversions */ +#define CYC2NS_MIN_CPU_FREQ \ + ((NSEC_PER_SEC << CYC2NS_SCALE) / ((1UL << 32) - 1UL)) + +/* globals */ + +struct clocksource clocksource_clkr; + +/* locals */ + +u64 last_clkr; + +static bool clkr_unreliable = true; + +/* + * Offset of the CPU's clkr. Currently it might be needed in these cases: + * 1) Different CPUs start at a different time. + * 2) A processor core has been disabled for some time. + * 3) Different processor on NUMA use different clock generators. + */ +DEFINE_PER_CPU(u64, clkr_offset) = 0; + +/* + * Used when converting cycles to nanoseconds in sched_clock() + * with the following formula: + * nanoseconds = (cycles * mult) >> CYC2NS_SCALE + * + * Since sched_clock() tolerates small errors and all CPUs + * are running at the same frequency, we use the same 'mult' + * for all CPUs. + */ +static u64 __read_mostly mult; + + + +/* Until this function comlpetes sched_clock() will always + * return 0 due to 'mult' being set to 0. */ +static int __init e2k_sched_clock_init(void) +{ + u64 freq = cpu_freq_hz; + + /* Set multiplication factor for sched_clock() */ + mult = ((NSEC_PER_SEC << CYC2NS_SCALE) + freq / 2) / freq; + if (unlikely(mult >= (1UL << 32))) { + /* Cannot use math with scaling + * because of overflows. */ + pr_warning("CPU frequency is too low, sched_clock() " + "will be a bit imprecise\n"); + } + + return 0; +} +pure_initcall(e2k_sched_clock_init); + +/* + * Here is the math (NSEC_PER_SEC = 10^9): + * + * nanoseconds = cycles * coeff + * 10^9 = cpufreq * coeff + * coeff = 10^9 / cpufreq <=== cpufreq ~= 10^9, so this won't do + * coeff = (10^9 * 2^scale) / (cpufreq * 2^scale) + * coeff = ((10^9 << scale) / cpufreq) >> scale + * + * We want to avoid doing division on the hot path, so we precompute: + * mult = (10^9 << scale) / cpufreq + * nanoseconds = (cycles * mult) >> scale + * + * The rounding error when computing mult is (assuming cpufreq <= 10^9): + * error = cpufreq / (2 * 10^9 << scale) <= 1 / (2 ^ (scale + 1)) + * For scale of 22 error will be 10^-7 (0,000012 %). + * + * To avoid overflows use special math. Let's denote with cyc_l + * the left (biggest) 32-bits part of cyc and with cyc_r the + * right 32-bits part of cyc. We assume that mult can be held in + * 32-bits integer (this is true for cpufreq >= 976563 Hz if + * scale equals 22). + * + * ns = cyc * mult >> scale + * ns = (cyc_r + (cyc_l << 32)) * mult >> scale + * ns = cyc_r * mult >> scale + cyc_l * mult << (32 - scale) + */ +static inline unsigned long long __cycles_2_ns(cycles_t cyc) +{ + const u64 freq = cpu_freq_hz; + u64 cyc_l, cyc_r, ns; + +#ifdef CONFIG_CLKR_OFFSET + /* Add per-cpu offset */ + cyc += per_cpu(clkr_offset, smp_processor_id()); +#endif + /* Ensure monotonicity (and protect from clkr stops) */ + if (unlikely(cyc < last_clkr)) + cyc = last_clkr; + /* Do not update last_clkr and offset here as sched_clock() + * tolerates small errors and must be as fast as possible. */ + + /* Split cyc in two 32-bits parts */ + cyc_l = cyc >> 32; + cyc_r = cyc & 0xFFFFFFFFUL; + + if (unlikely(mult >= (1UL << 32))) { + /* Too bad. Can't do the scaled math, but the frequency + * should be rather low for this to happen (not greater + * than CYC2NS_MIN_CPU_FREQ) so use normal math with 0 + * scale (slow case which should not happen). */ + ns = (cyc_r * NSEC_PER_SEC + freq/2) / freq + + (((cyc_l * NSEC_PER_SEC + freq/2) / freq) << 32); + } else { + static bool warned; + + /* Compute nanoseconds without 64-bits overflows */ + ns = ((cyc_r * mult) >> CYC2NS_SCALE) + + ((cyc_l * mult) << (32 - CYC2NS_SCALE)); + + if (unlikely(freq <= CYC2NS_MIN_CPU_FREQ) && freq && !warned && + system_state == SYSTEM_RUNNING) { + /* Whoops, should not happen since we already + * have checked mult. Looks like there is + * something wrong with mathematics or our stack. */ + warned = true; + + pr_err("Looks like there is an error in mathematics " + "in sched_clock() or someone is doing " + "something very bad!\nfreq = %lld <= " + "CYC2NS_SCALE = %d, CYC2NS_MIN_CPU_FREQ = %ld," + " cpu = %d, mult = %lld, cycles = 0x%lx, " + "cyc_r = 0x%llx, cyc_l = 0x%llx, offset = %lld\n", + freq, CYC2NS_SCALE, CYC2NS_MIN_CPU_FREQ, + smp_processor_id(), mult, cyc, cyc_r, cyc_l, + per_cpu(clkr_offset, smp_processor_id())); + WARN_ON(1); + } + } + + return ns; +} + +/* + * Scheduler clock - returns current time in nanosec units. + */ +unsigned long long sched_clock(void) +{ + unsigned long long ns; +#ifdef CONFIG_CLKR_OFFSET + unsigned long flags; +#endif + +#ifdef CONFIG_E2K +# ifdef CONFIG_SCLKR_CLOCKSOURCE + if (use_sclkr_sched_clock()) + return sclkr_sched_offset + raw_read_sclkr(); +# endif + if (clkr_unreliable) + return (unsigned long long)(jiffies - INITIAL_JIFFIES) + * (NSEC_PER_SEC / HZ); +#endif + +#ifdef CONFIG_CLKR_OFFSET + /* Close interrupts to make sure that cpu does not + * change after reading cycles. */ + raw_local_irq_save(flags); +#endif + ns = __cycles_2_ns(get_cycles()); +#ifdef CONFIG_CLKR_OFFSET + raw_local_irq_restore(flags); +#endif + return ns; +} + +static u64 read_clkr(struct clocksource *cs) +{ + unsigned long flags; + u64 before, now; + + raw_local_irq_save(flags); + before = last_clkr; + /* Make sure we read 'last_clkr' before CLKR register */ +#ifdef CONFIG_SPARC64 + __asm__ __volatile__("membar #Sync"); +#else + smp_rmb(); +#endif + now = get_cycles(); + +#ifdef CONFIG_CLKR_OFFSET + now += per_cpu(clkr_offset, smp_processor_id()); +#endif + if (unlikely(now < before)) { +#if defined CONFIG_CLKR_SYNCHRONIZATION_WARNING || defined CONFIG_CLKR_OFFSET + unsigned int cpu = smp_processor_id(); +#endif + + /* Time is going backwards. This must be because of + * clkr drift (or someone disabling CPUs... in which + * case offset should be corrected in resume()). */ +#ifdef CONFIG_CLKR_SYNCHRONIZATION_WARNING + printk(KERN_DEBUG "CLKR on CPU%d is behind: clkr = %llu, " + "last read value = %llu\n", + cpu, now, before); +# ifdef CONFIG_CLKR_OFFSET + printk(KERN_DEBUG "offset = %llu\n", per_cpu(clkr_offset, cpu)); +# endif +#endif +#ifdef CONFIG_CLKR_OFFSET + per_cpu(clkr_offset, cpu) += before - now; +#endif + now = before; + } else { + last_clkr = now; + } + raw_local_irq_restore(flags); + + return now; +} + +static void resume_clkr(struct clocksource *cs) +{ + pr_crit("WARNING: clocksource clkr resume not implemented. " + "You should probably adjust offset here.\n"); +} + +struct clocksource clocksource_clkr = { + .name = "clkr", +#ifdef CONFIG_E2K + .rating = 300, +#else + .rating = 100, +#endif + .read = read_clkr, + .resume = resume_clkr, + .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init clkr_init(void) +{ +#ifdef CONFIG_E2K + u8 mb_type = bootblock_virt->info.bios.mb_type; + + /* SCLKR should be used on systems that support it.*/ + if (machine.native_iset_ver >= E2K_ISET_V3) + return 0; + + /* Sivuch has multiple motherboards without clock synchronization. */ + if (mb_type != MB_TYPE_ES2_RTC_CY14B101P_MULTICLOCK) + clkr_unreliable = false; +#endif + + /* Sivuch has multiple motherboards without clock synchronization. */ + if (num_online_nodes() <= 1) + clkr_unreliable = false; + + if (clkr_unreliable) { + clear_sched_clock_stable(); + return 0; + } + +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET + pr_warning("Warning: clkr clocksource is disabled because " + "ARCH_USES_GETTIMEOFFSET was enabled in " + "kernel configuration.\n"); +#else + clocksource_register_hz(&clocksource_clkr, cpu_freq_hz); +#endif + + return 0; +} +arch_initcall(clkr_init); diff --git a/arch/l/kernel/console.c b/arch/l/kernel/console.c new file mode 100644 index 000000000000..e5f6c16c5cb1 --- /dev/null +++ b/arch/l/kernel/console.c @@ -0,0 +1,841 @@ +#ifdef CONFIG_EARLY_DUMP_CONSOLE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_E2K +#include +#endif +#include + +#ifdef CONFIG_E2K +# include +#else +# define boot_spinlock_t arch_spinlock_t +# define arch_boot_spin_lock arch_spin_lock +# define arch_boot_spin_unlock arch_spin_unlock +# define __BOOT_SPIN_LOCK_UNLOCKED __ARCH_SPIN_LOCK_UNLOCKED +#endif + +#undef DEBUG_SC_MODE +#undef DebugSC +#define DEBUG_SC_MODE 0 /* serial console debug */ +#define DebugSC if (DEBUG_SC_MODE) dump_printk + +#ifdef CONFIG_SERIAL_PRINTK +/* list of all enabled serial consoles, NULL terminated */ +static serial_console_opts_t* serial_dump_consoles[] = { +#if defined(CONFIG_SERIAL_AM85C30_CONSOLE) + &am85c30_serial_console, +#endif /* SERIAL AM85C30 CONSOLE */ + NULL, +}; + +static volatile int serial_console_inited = 0; +serial_console_opts_t *serial_console_opts = NULL; +static void *serial_console_io_base = NULL; +unsigned char serial_dump_console_num = 0; + +static void __init_recv setup_serial_console_io_base(boot_info_t *boot_info) +{ + serial_console_io_base = (void *)boot_info->serial_base; +} + +void *get_serial_console_io_base(void) +{ + return serial_console_io_base; +} + +/* + * Iterates through the list of serial consoles, + * returning the first one that initializes successfully. + */ +void __init_recv setup_serial_dump_console(boot_info_t *boot_info) +{ + serial_console_opts_t **consoles = serial_dump_consoles; + serial_console_opts_t *console; + int i; + + if (serial_dump_console_num) + return; + + DebugSC("setup_serial_dump_console() started for consoles " + "list 0x%lx\n", consoles); + + setup_serial_console_io_base(boot_info); + +#ifdef CONFIG_E2K +#ifdef CONFIG_SMP + if (!read_pic_bsp()) { + DebugSC("setup_serial_dump_console() CPU is not BSP " + "waiting for init completion\n"); + while (!serial_console_inited) + ; + DebugSC("setup_serial_dump_console() waiting for init " + "completed\n"); + return; + } +#endif /* CONFIG_SMP */ +#endif /* CONFIG_E2K */ + + /* find most preferred working serial console */ + i = 0; + console = consoles[i]; + DebugSC("setup_serial_dump_console() start console is 0x%lx\n", + console); + while (console != NULL) { + DebugSC("setup_serial_dump_console() console " + "init entry 0x%lx\n", console->init); + if (console->init != NULL) { + if (console->init(serial_console_io_base) == 0) { + serial_console_opts = console; + serial_console_inited = 1; + DebugSC("setup_serial_dump_console() set " + "this console for using\n"); + return; + } + } + i++; + console = consoles[i]; + DebugSC("setup_serial_dump_console() next console " + "pointer 0x%lx\n", console); + } + dump_printk("setup_serial_dump_console() could not find working " + "serial console\n"); + serial_console_inited = -1; +} +#endif /* CONFIG_SERIAL_PRINTK */ + +#define FALSE 0 +#define TRUE 1 + +#define is_digit(c) ((c >= '0') && (c <= '9')) + + +static char temp[80]; +static int __init_cons +cvt(unsigned long val, char *buf, long radix, char *digits) +{ + register char *cp = temp; + register int length = 0; + + if (val == 0) { + /* Special case */ + *cp++ = '0'; + } else { + while (val) { + *cp++ = digits[val % radix]; + val /= radix; + } + } + while (cp != temp) { + *buf++ = *--cp; + length++; + } + *buf = '\0'; + return (length); +} + +static const char all_dec[] = "0123456789"; +static const char all_hex[] = "0123456789abcdef"; +static const char all_HEX[] = "0123456789ABCDEF"; + +/* spin lock to synchronize print on SMP */ +boot_spinlock_t vprint_lock = __BOOT_SPIN_LOCK_UNLOCKED; + +static void do_dump_vprintk(const char *fmt_v, va_list ap_v); +void dump_vprintk(const char *fmt_v, va_list ap_v); + + +/* + * procedures for dump_kernel + * they may be called from trap and sys_rq + * those proc are the same as boot_proc but used only virt memory + */ + +void +dump_printk(char const *fmt_v, ...) +{ + va_list ap; + + va_start(ap, fmt_v); + dump_vprintk(fmt_v, ap); + va_end(ap); +} + +#if defined(CONFIG_LMS_CONSOLE) +static __interrupt void outb_nostack(unsigned char byte, unsigned long port) +{ + debug_cons_outb(byte, port); +} + +static __interrupt u32 inl_nostack(unsigned long port) +{ + return debug_cons_inl(port); +} +#endif + +#ifdef CONFIG_SERIAL_PRINTK +static void serial_dump_putc(char c) +{ + if (serial_console_opts != NULL) { + serial_console_opts->serial_putc(c); + return; + } +} +#else /* !CONFIG_SERIAL_PRINTK */ +static void serial_dump_putc(char c) +{ +} +#endif /* CONFIG_SERIAL_PRINTK */ + +#ifdef CONFIG_LMS_CONSOLE +static void LMS_dump_putc(char c) +{ + if (!NATIVE_IS_MACHINE_SIM) { + /* LMS debug port can be used only on simulator */ + } else if (inl_nostack(LMS_CONS_DATA_PORT) != 0xffffffff) { + + while (inl_nostack(LMS_CONS_DATA_PORT)) + ; + + outb_nostack(c, LMS_CONS_DATA_PORT); + outb_nostack(0, LMS_CONS_DATA_PORT); + } +} +#else /* !CONFIG_LMS_CONSOLE */ +static void LMS_dump_putc(char c) +{ +} +#endif /* CONFIG_LMS_CONSOLE */ + +void early_serial_write(struct console *con, const char *s, + unsigned int count) +{ + int i; + + for (i = 0; i < count; i++) { + if (s[i] == '\n') + serial_dump_putc('\r'); + serial_dump_putc(s[i]); + } +} + +#ifdef CONFIG_LMS_CONSOLE +static void early_LMS_write(struct console *con, const char *s, + unsigned int count) +{ + int i; + + for (i = 0; i < count; i++) { + if (s[i] == '\n') + LMS_dump_putc('\r'); + LMS_dump_putc(s[i]); + } +} +#endif /* CONFIG_LMS_CONSOLE */ + +static inline void dump_putc(char c) +{ + LMS_dump_putc(c); + + serial_dump_putc(c); + + /* guest kernel virtual console support */ + virt_console_dump_putc(c); +} + +__interrupt void dump_vprintk(const char *fmt_v, va_list ap_v) +{ + unsigned long flags; + + /* Disable NMIs as well as normal interrupts + * (to avoid deadlock since dump_printk() might be + * called from NMI handler). */ +#ifdef CONFIG_E2K + raw_all_irq_save(flags); +#else + raw_local_irq_save(flags); +#endif + arch_boot_spin_lock(&vprint_lock); + do_dump_vprintk(fmt_v, ap_v); + arch_boot_spin_unlock(&vprint_lock); +#ifdef CONFIG_E2K + raw_all_irq_restore(flags); +#else + raw_local_irq_restore(flags); +#endif +} + +static char buf[32]; +static __interrupt void do_dump_vprintk(const char *fmt_v, va_list ap_v) +{ + register char *fmt = (char *)fmt_v; +#ifdef CONFIG_E90S + va_list ap = ap_v; +#else + register va_list ap = ap_v; +#endif + register char c, sign, *cp; + register int left_prec, right_prec, zero_fill, var_size; + register int length = 0, pad, pad_on_right, always_blank_fill; + register long long val = 0; + + /* Strip loglevel from the string? */ + if (fmt[0] == KERN_SOH_ASCII && fmt[1]) { + switch (fmt[1]) { + case '0' ... '7': + case 'd': + fmt += 2; + break; + } + } + + while ((c = *fmt++) != 0) { + if (c == '%') { + c = *fmt++; + left_prec = right_prec = pad_on_right = var_size = 0; + if (c == '-') { + c = *fmt++; + pad_on_right++; + always_blank_fill = TRUE; + } else { + always_blank_fill = FALSE; + } + if (c == '0') { + zero_fill = TRUE; + c = *fmt++; + } else { + zero_fill = FALSE; + } + while (is_digit(c)) { + left_prec = (left_prec * 10) + (c - '0'); + c = *fmt++; + } + if (c == '.') { + c = *fmt++; + zero_fill++; + while (is_digit(c)) { + right_prec = (right_prec * 10) + + (c - '0'); + c = *fmt++; + } + } else { + right_prec = left_prec; + } + if (c == 'l' || c == 'L') { + var_size = sizeof(long); + c = *fmt++; + if (c == 'l' || c == 'L') { + var_size = sizeof(long long); + c = *fmt++; + } + } else if (c == 'h') { + c = *fmt++; + if (c == 'h') { + c = *fmt++; + var_size = sizeof(char); + } else { + var_size = sizeof(short); + } + } else if (c == 'z' || c == 'Z') { + c = *fmt++; + var_size = sizeof(size_t); + } else if (c == 't') { + c = *fmt++; + var_size = sizeof(ptrdiff_t); + } else { + var_size = 4; + } + if (c == 'p') { + var_size = sizeof(void *); + } + sign = '\0'; + if (c == 'd' || c == 'i' || c == 'u' ||\ + c == 'x' || c == 'X' || c == 'p') { + int var_signed = (c == 'd'|| c == 'i'); + switch (var_size) { + case sizeof(long long): + if (var_signed) + val = (long long) + va_arg(ap, long long); + else + val = (unsigned long long) + va_arg(ap, long long); + break; + case sizeof(int): + if (var_signed) + val = (int) va_arg(ap, int); + else + val = (unsigned int) + va_arg(ap, int); + break; + case sizeof(short): + if (var_signed) + val = (short) va_arg(ap, int); + else + val = (unsigned short) + va_arg(ap, int); + break; + case sizeof(char): + if (var_signed) + val = (char) va_arg(ap, int); + else + val = (unsigned char) + va_arg(ap, int); + break; + } + if (val < 0 && (c == 'd' || c == 'i')) { + sign = '-'; + val = -val; + } + if (c == 'd' || c == 'i' || c == 'u') { + length = cvt(val, buf, 10, + (char*)all_dec); + } else if (c == 'x' || c == 'p') { + length = cvt(val, buf, 16, + (char*)all_hex); + } else if (c == 'X') { + length = cvt(val, buf, 16, + (char*)all_HEX); + } + cp = buf; + } else if (c == 's') { + cp = va_arg(ap, char *); + cp = cp; + length = strlen(cp); + } else if (c == 'c') { + c = va_arg(ap, int); + dump_putc(c); + continue; + } else { + dump_putc('?'); + continue; + } + + pad = left_prec - length; + if (sign != '\0') { + pad--; + } + if (zero_fill && !always_blank_fill) { + c = '0'; + if (sign != '\0') { + dump_putc(sign); + sign = '\0'; + } + } else { + c = ' '; + } + if (!pad_on_right) { + while (pad-- > 0) { + dump_putc(c); + } + } + if (sign != '\0') { + dump_putc(sign); + } + while (length-- > 0) { + dump_putc(c = *cp++); + if (c == '\n') { + dump_putc('\r'); + } + } + if (pad_on_right) { + if (zero_fill && !always_blank_fill) + c = '0'; + else + c = ' '; + + while (pad-- > 0) { + dump_putc(c); + } + } + } else { + dump_putc(c); + if (c == '\n') { + dump_putc('\r'); + } + } + } +} + +__interrupt void dump_putns(const char *s, int n) +{ + unsigned long flags; + + /* Disable NMIs as well as normal interrupts + * (to avoid deadlock since dump_printk() might be + * called from NMI handler). */ +#ifdef CONFIG_E2K + raw_all_irq_save(flags); +#else + raw_local_irq_save(flags); +#endif + arch_boot_spin_lock(&vprint_lock); + + while (n--) { + if (*s == '\n') + dump_putc('\r'); + dump_putc(*s++); + } + + arch_boot_spin_unlock(&vprint_lock); +#ifdef CONFIG_E2K + raw_all_irq_restore(flags); +#else + raw_local_irq_restore(flags); +#endif + +} + +__interrupt void dump_puts(const char *s) +{ + unsigned long flags; + + /* Disable NMIs as well as normal interrupts + * (to avoid deadlock since dump_printk() might be + * called from NMI handler). */ +#ifdef CONFIG_E2K + raw_all_irq_save(flags); +#else + raw_local_irq_save(flags); +#endif + arch_boot_spin_lock(&vprint_lock); + + while (*s) { + if (*s == '\n') + dump_putc('\r'); + dump_putc(*s++); + } + + arch_boot_spin_unlock(&vprint_lock); +#ifdef CONFIG_E2K + raw_all_irq_restore(flags); +#else + raw_local_irq_restore(flags); +#endif +} + + +#ifdef CONFIG_EARLY_DUMP_CONSOLE +static void early_dump_write(struct console *con, const char *s, + unsigned int count) +{ + unsigned long flags, i; + + /* Disable NMIs as well as normal interrupts + * (to avoid deadlock since dump_printk() might be + * called from NMI handler). */ +# ifdef CONFIG_E2K + raw_all_irq_save(flags); +# else + raw_local_irq_save(flags); +# endif + arch_boot_spin_lock(&vprint_lock); + for (i = 0; i < count; i++) { + if (s[i] == '\n') + dump_putc('\r'); + dump_putc(s[i]); + } + arch_boot_spin_unlock(&vprint_lock); +# ifdef CONFIG_E2K + raw_all_irq_restore(flags); +# else + raw_local_irq_restore(flags); +# endif +} + +static struct console early_serial_console = { + .name = "early-ttyS", + .write = early_serial_write, + .flags = CON_BOOT | CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, + .device = 0 +}; + +#ifdef CONFIG_LMS_CONSOLE +static struct console early_LMS_console = { + .name = "early-ttyLMS", + .write = early_LMS_write, + .flags = CON_BOOT | CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, + .device = 0 +}; +#endif /* CONFIG_LMS_CONSOLE */ + +static struct console early_dump_console = { + .name = "early-dump", + .write = early_dump_write, + .flags = CON_BOOT | CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, + .device = 0 +}; + +/* + * FIXME: The next function and its call with support functions should + * be deleted to use only interface of early printk consoles registration. + * (see bellow the function setup_early_printk()). But that means the mandatory + * presence of early console option on command line. + * The dump_printk() interface can be kept to have output to direct console. + */ +__init void register_early_dump_console(void) +{ + if (early_console) + return; + + register_console(&early_dump_console); + +# ifdef CONFIG_EARLY_PRINTK + early_console = &early_dump_console; +# endif +} + +static __init void register_early_console(struct console *con, int keep_early) +{ + if (con == NULL) { + pr_err("ERROR: earlyprintk=... cannot init console\n"); + return; + } + if (con->index != -1) { + printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", + con->name); + return; + } + early_console = con; + early_console->flags |= CON_BOOT; + register_console(early_console); +} + +#define DEFAULT_BAUD 115200 + +static __init char *early_serial_init(char *s, int *idx, char **options) +{ + unsigned long baud = DEFAULT_BAUD; + char *e; + + /* syntax: ttyS, : examples ttyS0/ttyS0,115200 ... */ + if (*s == ',') { + ++s; + *options = s; + } + + if (*s) { + int port; + + port = simple_strtoul(s, &e, 10); + if (s != e) { + *idx = port; + } + s += strcspn(s, ","); + if (*s == ',') { + s++; + *options = s; + } + } + + if (*s) { + baud = simple_strtoull(s, &e, 0); + if (baud == 0 || s == e) + baud = DEFAULT_BAUD; + s = e; + } + return s; +} + +#ifdef CONFIG_LMS_CONSOLE +static __init char *early_LMS_init(char *s) +{ + char *e; + + /* syntax: ttyLMS : examples ttyLMS/ttyLMS0,ttyLMS1 ... */ + if (*s) { + unsigned port; + + port = simple_strtoul(s, &e, 10); + s = e; + } + + return s; +} +#endif /* CONFIG_LMS_CONSOLE */ + +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE +static __init char *early_hvc_init(char *s, int *idx) +{ + char *e; + + /* syntax: hvc : examples hvc/hvc0 */ + if (*s) { + int port; + + port = simple_strtoul(s, &e, 10); + if (s != e) { + *idx = port; + } + s = e; + } + + return s; +} +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ + +typedef struct early_console { + char *name; + bool keep; + int idx; + char *options; +} early_console_t; + +#define MAX_EARLY_CONSELES_NUM 3 /* ttyS, ttyLMS, hvc */ + +static int __init setup_early_printk(char *buf) +{ + bool keep; + early_console_t consoles[MAX_EARLY_CONSELES_NUM]; + early_console_t *console = &consoles[0]; + int consoles_num = 0, c; + + if (!buf) + return 0; + + if (early_console) { + /* early console has been already registered otherwise */ + return 0; + } + + /* WARNING: keep option applies to all 'earlyprintk=' consoles */ + keep = (strstr(buf, "keep") != NULL); + + while (*buf != '\0') { + bool found; + + found = false; + + if (!strncmp(buf, "ttyS", 4)) { + int sidx = 0; + char *soptions = NULL; + + buf = early_serial_init(buf + 4, &sidx, &soptions); + register_early_console(&early_serial_console, keep); + console->name = "ttyS"; + console->keep = keep; + console->idx = sidx; + console->options = soptions; + consoles_num++; + if (consoles_num >= MAX_EARLY_CONSELES_NUM) { + break; + } + console++; + found = true; + } + +#ifdef CONFIG_LMS_CONSOLE + if (!strncmp(buf, "ttyLMS", 6)) { + buf = early_LMS_init(buf + 6); + register_early_console(&early_LMS_console, keep); + console->name = "ttyLMS"; + console->keep = keep; + console->idx = 0; + console->options = NULL; + consoles_num++; + if (consoles_num >= MAX_EARLY_CONSELES_NUM) { + break; + } + console++; + found = true; + } +#endif /* CONFIG_LMS_CONSOLE */ + +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE + if (!strncmp(buf, "hvc", 3)) {\ + struct console *hvc_con; + int hvc_idx = 0; + + buf = early_hvc_init(buf + 3, &hvc_idx); + hvc_con = hvc_l_early_cons_init(hvc_idx); + if (hvc_con == NULL) { + pr_err("%s(): could not create early HVC " + "console. ignore the hvc console\n", + __func__); + } else { + register_early_console(hvc_con, keep); + console->name = "hvc"; + console->keep = keep; + console->idx = hvc_idx; + console->options = NULL; + consoles_num++; + if (consoles_num >= MAX_EARLY_CONSELES_NUM) { + break; + } + console++; + } + found = true; + } +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ + + if (!found) { + buf++; + } + } + + for (c = 0; c < consoles_num; c++) { + /* WARNING: prefered consoles have to be added */ + /* only after registration of all early consoles */ + /* the order of consoles is important and should be kept */ + console = &consoles[c]; + if (console->keep) { + add_preferred_console(console->name, + console->idx, console->options); + } + } + return 0; +} +early_param("earlyprintk", setup_early_printk); + +# ifdef CONFIG_EARLY_PRINTK +int switch_to_early_dump_console() +{ + return 0; +} + +void switch_from_early_dump_console() +{ +} +# endif + +#endif /* CONFIG_EARLY_DUMP_CONSOLE */ + + +/* + * Temporary dumper until RealTime patch gains + * proper support for console_flush_on_panic(). + */ +#include + +static void kmsg_dumper_stdout(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason) +{ + static char line[1024]; + size_t len = 0; + + dump_puts("kmsg_dump (dump of the whole printk buffer on panic(), can have some lines doubled but will probably output more messages including KERN_DEBUG ones and those that just had no time to be printed before panic()):\n"); + while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) { + line[len] = '\0'; + dump_puts(line); + } +} + +static struct kmsg_dumper kmsg_dumper = { + .dump = kmsg_dumper_stdout +}; + +int __init kmsg_dumper_stdout_init(void) +{ + return kmsg_dump_register(&kmsg_dumper); +} +arch_initcall(kmsg_dumper_stdout_init); diff --git a/arch/l/kernel/cpufreq.c b/arch/l/kernel/cpufreq.c new file mode 100644 index 000000000000..e128da24c2c7 --- /dev/null +++ b/arch/l/kernel/cpufreq.c @@ -0,0 +1,89 @@ +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_E2K +# ifdef CONFIG_CPU_IDLE +static void wait_C2_exit(void) +{ + cycles_t lt_tick_before = lt_read(); + /* Wait for 100us to make sure this CPU has exited from C2 state */ + while (lt_read() - lt_tick_before < lt_clock_rate / 10000) + barrier(); +} +# else +static void wait_C2_exit(void) { } +# endif +#endif + +static void measure_cpu_freq_ipi(void *arg) +{ +#ifdef CONFIG_E2K + unsigned long flags; +#endif + u64 *freq = arg; + volatile cycles_t cpu_tick_before, cpu_tick_after; + volatile u32 lt_tick_before, lt_tick_after; + +#ifdef CONFIG_E2K + wait_C2_exit(); + + /* Make sure NMIs do not mess up our calculation */ + raw_all_irq_save(flags); +#endif + lt_tick_before = lt_read(); + cpu_tick_before = get_cycles(); +#ifdef CONFIG_E2K + raw_all_irq_restore(flags); +#endif + + while (lt_read() - lt_tick_before < lt_clock_rate / 1000) + barrier(); + +#ifdef CONFIG_E2K + raw_all_irq_save(flags); +#endif + lt_tick_after = lt_read(); + cpu_tick_after = get_cycles(); +#ifdef CONFIG_E2K + raw_all_irq_restore(flags); +#endif + + *freq = (cpu_tick_after - cpu_tick_before) * lt_clock_rate / + (lt_tick_after - lt_tick_before); +} + +static DEFINE_PER_CPU(u64, cpu_freq); + +unsigned long measure_cpu_freq(int cpu) +{ + u64 freq; + + /* First try querying the cpufreq driver */ + freq = 1000 * cpufreq_quick_get(cpu); + if (freq) + return freq; + + /* cpufreq is disabled so there is no need to re-measure frequency */ + if ((freq = per_cpu(cpu_freq, cpu))) + return freq; + + /* + * Workaround for paravirtualization: do not call smp_call_function from + * e2k_start_secondary: it leads to a WARN_ON(cpu_online && irqs_disabled) + */ + if (cpu == smp_processor_id()) { + measure_cpu_freq_ipi(&freq); + } else { + /* If cpufreq failed, then calibrate using lt timer from iohub */ + smp_call_function_single(cpu, measure_cpu_freq_ipi, &freq, true); + } + + per_cpu(cpu_freq, cpu) = freq; + + return freq; +} diff --git a/arch/l/kernel/devtree.c b/arch/l/kernel/devtree.c new file mode 100644 index 000000000000..cd7c0c3225fa --- /dev/null +++ b/arch/l/kernel/devtree.c @@ -0,0 +1,42 @@ +#include +#include + +#include + + +#ifdef CONFIG_PCI +int of_get_pci_domain_nr(struct device_node *node); + +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct device_node *np; + for_each_node_by_name(np, "pci") { + int domain = of_get_pci_domain_nr(np); + if (domain == pci_domain_nr(bus)) + return np; + } + return NULL; +} +#endif /*CONFIG_PCI*/ + +#ifdef CONFIG_DTB_L_TEST +unsigned char test_blob[] = { + 0xd0, 0x0d, 0xfe, 0xed, 0x00, 0x00, 0x00, 0xbd, 0x00, 0x00, 0x00, 0x38, + 0x00, 0x00, 0x00, 0xa8, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, + 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x2d, 0x62, + 0x75, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x18, + 0x00, 0x00, 0x00, 0x0b, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, + 0x64, 0x74, 0x62, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x01, 0x64, 0x73, 0x70, 0x00, + 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, + 0x6d, 0x63, 0x73, 0x74, 0x2c, 0x65, 0x6c, 0x64, 0x73, 0x70, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x09, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x00, 0x73, + 0x6f, 0x6d, 0x65, 0x5f, 0x74, 0x65, 0x78, 0x74, 0x00 +}; +#endif /*CONFIG_DTB_L_TEST*/ + diff --git a/arch/l/kernel/epic/Makefile b/arch/l/kernel/epic/Makefile new file mode 100644 index 000000000000..ca4e0a1a7625 --- /dev/null +++ b/arch/l/kernel/epic/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for EPIC (CEPIC, PREPIC, IO-EPIC) drivers +# + +obj-$(CONFIG_EPIC) += epic.o ipi.o irq.o io_epic.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_IRQ_WORK) += irq_work.o diff --git a/arch/l/kernel/epic/epic.c b/arch/l/kernel/epic/epic.c new file mode 100644 index 000000000000..9e9e69b482bc --- /dev/null +++ b/arch/l/kernel/epic/epic.c @@ -0,0 +1,1051 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_E2K +#include +#include +#include +#include +#include +#endif + +/* + * TODO Although boot_cpu_physical_apicid and phys_cpu_present_map are defined + * in apic.c, they are used in several other files. Since EPIC is always + * compiled along with APIC, those variables are referenced here directly + */ + +static unsigned int epic_num_processors; + +/* Disable CEPIC timer from kernel cmdline */ +bool disable_epic_timer; + +/* Enable CEPIC debugging from kernel cmdline */ +bool epic_debug = false; + +bool epic_bgi_mode; + +/* Enable pcsm_adjust daemon from kernel cmdline */ +bool pcsm_adjust_enable; +EXPORT_SYMBOL(pcsm_adjust_enable); + +/* + * The value written to CEPIC_TIMER_INIT register, that corresponds to HZ timer + * interrupt frequency + */ +static unsigned int cepic_timer_freq; + +#define EPIC_DIVISOR 1 + +/* Accessing PREPIC registers */ +unsigned int early_prepic_node_read_w(int node, unsigned int reg) +{ + return early_sic_read_node_nbsr_reg(node, reg); +} + +void early_prepic_node_write_w(int node, unsigned int reg, unsigned int v) +{ + early_sic_write_node_nbsr_reg(node, reg, v); +} + +/* FIXME Use early_sic_read in guest to avoid mas 0x13 reads/writes in guest */ +unsigned int prepic_node_read_w(int node, unsigned int reg) +{ + if (paravirt_enabled()) + return early_prepic_node_read_w(node, reg); + else + return sic_read_node_nbsr_reg(node, reg); +} + +void prepic_node_write_w(int node, unsigned int reg, unsigned int v) +{ + if (paravirt_enabled()) + early_prepic_node_write_w(node, reg, v); + else + sic_write_node_nbsr_reg(node, reg, v); +} + +#if 0 +static inline unsigned int prepic_read_w(unsigned int reg) +{ + if (paravirt_enabled()) + return early_prepic_read_w(reg); + else + return sic_read_nbsr_reg(reg); +} + +static inline void prepic_write_w(unsigned int reg, unsigned int v) +{ + if (paravirt_enabled()) + early_prepic_write_w(reg, v); + else + sic_write_nbsr_reg(reg, v); +} +#endif + +static int cepic_timer_set_periodic(struct clock_event_device *evt) +{ + union cepic_timer_lvtt reg_lvtt; + union cepic_timer_div reg_div; + + reg_lvtt.raw = 0; + reg_lvtt.bits.mode = 1; + reg_lvtt.bits.vect = CEPIC_TIMER_VECTOR; + epic_write_w(CEPIC_TIMER_LVTT, reg_lvtt.raw); + + /* Do not divide EPIC timer frequency */ + reg_div.raw = 0; + reg_div.bits.divider = CEPIC_TIMER_DIV_1; + epic_write_w(CEPIC_TIMER_DIV, reg_div.raw); + + epic_write_w(CEPIC_TIMER_INIT, cepic_timer_freq / HZ); + + epic_printk("set EPIC timer to periodic mode on CPU #%d: HZ %d Mhz", + smp_processor_id(), HZ); + + return 0; +} + +static int cepic_timer_set_oneshot(struct clock_event_device *evt) +{ + union cepic_timer_lvtt reg_lvtt; + union cepic_timer_div reg_div; + + reg_lvtt.raw = 0; + reg_lvtt.bits.vect = CEPIC_TIMER_VECTOR; + epic_write_w(CEPIC_TIMER_LVTT, reg_lvtt.raw); + + /* Do not divide EPIC timer frequency */ + reg_div.raw = 0; + reg_div.bits.divider = CEPIC_TIMER_DIV_1; + epic_write_w(CEPIC_TIMER_DIV, reg_div.raw); + + epic_printk("set EPIC timer to oneshot mode on CPU #%d", + smp_processor_id()); + + return 0; +} + +/* + * Program the next event, relative to now + */ +static int cepic_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + epic_write_w(CEPIC_TIMER_INIT, delta); + return 0; +} + +/* Stop generating timer interrupts and mask them */ +static int cepic_timer_shutdown(struct clock_event_device *evt) +{ + union cepic_timer_lvtt reg; + + reg.raw = epic_read_w(CEPIC_TIMER_LVTT); + reg.bits.mask = 1; + epic_write_w(CEPIC_TIMER_LVTT, reg.raw); + epic_write_w(CEPIC_TIMER_INIT, 0); + + return 0; +} + +/* + * The cepic timer can be used for any function which is CPU local. + * Broadcast is not supported + */ +static struct clock_event_device cepic_clockevent = { + .name = "cepic", + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, + .shift = 32, + .set_state_shutdown = cepic_timer_shutdown, + .set_state_periodic = cepic_timer_set_periodic, + .set_state_oneshot = cepic_timer_set_oneshot, + .set_next_event = cepic_next_event, + .broadcast = NULL, + .rating = 100, + .irq = -1, +}; +static DEFINE_PER_CPU(struct clock_event_device, cepic_events); + +/* + * Setup the CEPIC timer for this CPU. Copy the initialized values + * of the boot CPU and register the clock event in the framework. + */ +static void setup_epic_timer(void) +{ + struct clock_event_device *levt = this_cpu_ptr(&cepic_events); + + memcpy(levt, &cepic_clockevent, sizeof(*levt)); + levt->cpumask = cpumask_of(smp_processor_id()); + clockevents_config_and_register(levt, cepic_timer_freq, + 0xF, 0xFFFFFFFF); +} + +/* + * Setup the boot EPIC timer + * + * cepic_timer_freq is set from MP table. No need for calibration + */ +void __init setup_boot_epic_clock(void) +{ + /* + * The CEPIC timer can be disabled via the kernel cmdline. Ignore it. + */ + if (disable_epic_timer) { + pr_info("Disabling EPIC timer\n"); + return; + } + + /* Register CEPIC timer clockevent */ + setup_epic_timer(); +} + +void setup_secondary_epic_clock(void) +{ + if (disable_epic_timer) + pr_info("Disabling EPIC timer\n"); + else + setup_epic_timer(); +} + +static void __epic_smp_spurious_interrupt(void) +{ + ack_epic_irq(); + inc_irq_stat(irq_spurious_count); + + pr_info("Spurious EPIC interrupt on CPU#%d\n", smp_processor_id()); +} + +__visible void epic_smp_spurious_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + __epic_smp_spurious_interrupt(); + l_irq_exit(); +} + +/* Write 0 to CEPIC_ESR before reading it */ +static void __epic_smp_error_interrupt(void) +{ + union cepic_esr reg; + + epic_write_w(CEPIC_ESR, 0); + reg.raw = epic_read_w(CEPIC_ESR); + + ack_epic_irq(); + atomic_inc(&irq_err_count); + + printk(KERN_INFO "EPIC error on CPU%d: 0x%x", smp_processor_id(), reg.raw); + + if (reg.bits.rq_addr_err) + printk(KERN_CONT " : Illegal regsiter address"); + + if (reg.bits.rq_virt_err) + printk(KERN_CONT " : Illegal virt request (virt disabled)"); + + if (reg.bits.rq_cop_err) + printk(KERN_CONT " : Illegal opcode"); + + if (reg.bits.ms_gstid_err) + printk(KERN_CONT " : Illegal guest id"); + + if (reg.bits.ms_virt_err) + printk(KERN_CONT " : Illegal virt message (virt disabled)"); + + if (reg.bits.ms_err) + printk(KERN_CONT " : Illegal message"); + + if (reg.bits.ms_icr_err) + printk(KERN_CONT " : Illegal write to CEPIC_ICR"); + + printk(KERN_CONT "\n"); +} + +__visible void epic_smp_error_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + __epic_smp_error_interrupt(); + l_irq_exit(); +} + +static void __prepic_smp_error_interrupt(void) +{ + unsigned int stat, msg_hi, msg_lo; + int node; + for_each_online_node(node) { + stat = prepic_node_read_w(node, SIC_prepic_err_stat); + if (!stat) + continue; + + msg_hi = prepic_node_read_w(node, SIC_prepic_err_msg_hi); + msg_lo = prepic_node_read_w(node, SIC_prepic_err_msg_lo); + prepic_node_write_w(node, SIC_prepic_err_stat, stat); + + pr_err("PREPIC#%d err: stat 0x%x, msg_hi 0x%x, msg_lo 0x%x\n", + node, stat, msg_hi, msg_lo); + } + + ack_epic_irq(); + atomic_inc(&irq_err_count); +} + +__visible void prepic_smp_error_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + __prepic_smp_error_interrupt(); + l_irq_exit(); +} + +#ifdef CONFIG_KVM_ASYNC_PF +__visible void epic_pv_apf_wake(struct pt_regs *regs) +{ + l_irq_enter(); + + if (pv_apf_read_and_reset_reason() == KVM_APF_PAGE_READY) + pv_apf_wake(); + else + pr_err("Guest: async_pf, got spurious " + "ASYNC_PF_WAKE_VECTOR exception\n"); + + ack_epic_irq(); + + l_irq_exit(); +} +#endif /* CONFIG_KVM_ASYNC_PF */ + +static void set_cepic_timer_frequency(unsigned int freq) +{ + /* + * Boot should have passed CEPIC timer frequency in MP table + * Assume 100 MHz, if it didn't, and passed 0 instead + */ + if (!freq) { + pr_warn("Boot did not pass CEPIC timer frequency\n"); + freq = 100000000; /* 100 MHz */ + } + + pr_info_once("EPIC timer frequency is %d.%d MHz\n", + freq / 1000000, freq % 1000000 / 100000); + cepic_timer_freq = freq; +} + +int get_cepic_timer_frequency(void) +{ + return cepic_timer_freq; +} + +/* + * E2K depends on the "hard" cpu number to determine NUMA node, + * so we must exclude the influence of the order in which all + * processors get here. + */ +void epic_processor_info(int epicid, int version, unsigned int cepic_freq) +{ + unsigned int bsp_id = read_epic_id(); + bool boot_cpu_detected = physid_isset(bsp_id, + phys_cpu_present_map); + + boot_cpu_physical_apicid = bsp_id; + + /* + * If boot cpu has not been detected yet, then only allow upto + * nr_cpu_ids - 1 processors and keep one slot free for boot cpu + */ + if (!boot_cpu_detected && epic_num_processors >= nr_cpu_ids - 1 && + epicid != bsp_id) { + pr_warn("NR_CPUS=%d limit was reached", nr_cpu_ids); + pr_warn("Ignoring CPU#%d to keep a slot for boot CPU", epicid); + return; + } + + if (epic_num_processors >= nr_cpu_ids) { + pr_warn("NR_CPUS=%d limit was reached", nr_cpu_ids); + pr_warn("Ignoring CPU#%d", epicid); + return; + } + + epic_num_processors++; + + if (epicid >= MAX_PHYSID_NUM) + panic("EPIC id from MP table exceeds %d\n", MAX_PHYSID_NUM); + + physid_set(epicid, phys_cpu_present_map); + + early_per_cpu(x86_cpu_to_apicid, epicid) = epicid; + early_per_cpu(x86_bios_cpu_apicid, epicid) = epicid; + + set_cpu_possible(epicid, true); + set_cpu_present(epicid, true); + + set_cepic_timer_frequency(cepic_freq); +} + +/* + * The guts of the cepic timer interrupt + */ +void cepic_timer_interrupt(void) +{ + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(cepic_events, cpu); + + /* + * the NMI deadlock-detector uses this. + */ + inc_irq_stat(apic_timer_irqs); + + evt->event_handler(evt); +} + +#define DELTA_NS (NSEC_PER_SEC / HZ / 2) + +/* + * CEPIC timer interrupt. This is the most natural way for doing + * local interrupts, but local timer interrupts can be emulated by + * broadcast interrupts too. [in case the hw doesn't support CEPIC timers] + * + * [ if a single-CPU system runs an SMP kernel then we call the local + * interrupt as well. Thus we cannot inline the local irq ... ] + */ +__visible void __irq_entry epic_smp_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + int cpu; + long long cur_time; + long long next_time; + + cpu = smp_processor_id(); + next_time = per_cpu(next_rt_intr, cpu); + if (next_time) { + cur_time = ktime_to_ns(ktime_get()); + if (cur_time > next_time + DELTA_NS) { + per_cpu(next_rt_intr, cpu) = 0; + } else if (cur_time > next_time - DELTA_NS && + cur_time < next_time + DELTA_NS) { + /* + * set 1 -- must do timer later + * in do_postpone_tick() + */ + per_cpu(next_rt_intr, cpu) = 1; + set_irq_regs(old_regs); + ack_epic_irq(); + /* if do_postpone_tick() will not called: */ + epic_write_w(CEPIC_TIMER_INIT, + usecs_2cycles(USEC_PER_SEC / HZ)); + return; + } + } + + /* + * NOTE! We'd better ACK the irq immediately, + * because timer handling can be slow. + * + * update_process_times() expects us to have done l_irq_enter(). + * Besides, if we don't timer interrupts ignore the global + * interrupt lock, which is the WrongThing (tm) to do. + */ + l_irq_enter(); + ack_epic_irq(); + cepic_timer_interrupt(); + l_irq_exit(); + + set_irq_regs(old_regs); +} + +/* + * TODO clear_cepic - shutdown CEPIC + * + * This is called, when a CPU is disabled and before rebooting, so the state of + * the CEPIC has no dangling leftovers. Also used to cleanout any BIOS + * leftovers during boot. + */ +void clear_cepic(void) +{ +} + +/* + * A fake APIC driver, provided by EPIC for compatibility with existing code + * (mostly IOAPIC). The uninitialized fields should not be used + */ +static struct apic epic = { + .name = "epic", + + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = 0, + + .target_cpus = online_target_cpus, + .check_apicid_used = default_check_apicid_used, + + .vector_allocation_domain = default_vector_allocation_domain, + + .ioapic_phys_id_map = default_ioapic_phys_id_map, + .multi_timer_check = NULL, + .apicid_to_cpu_present = physid_set_mask_of_physid, + .check_phys_apicid_present = default_check_phys_apicid_present, + + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + + .send_IPI_mask = epic_send_IPI_mask, + .send_IPI_self = epic_send_IPI_self, + .send_IPI_mask_allbutself = epic_send_IPI_mask_allbutself +}; + +/* + * TODO Placeholder for various EPIC sanity checks + */ +void __init_recv verify_epic(void) +{ +} + +/* + * Used to setup CEPIC while initializing BSP or bringing up APs + * Always called with preemption disabled + */ +void setup_cepic(void) +{ + union cepic_ctrl reg_ctrl; + union cepic_svr reg_svr; + union cepic_esr2 reg_esr2; + unsigned int epic_id = read_epic_id(); + + /* Enable CEPIC */ + reg_ctrl.raw = epic_read_w(CEPIC_CTRL); + reg_ctrl.bits.soft_en = 1; + epic_write_w(CEPIC_CTRL, reg_ctrl.raw); + + /* Set up spurious IRQ vector */ + reg_svr.raw = 0; + reg_svr.bits.vect = SPURIOUS_EPIC_VECTOR; + epic_write_w(CEPIC_SVR, reg_svr.raw); + + /* Set up Error Status Register */ + reg_esr2.raw = 0; + reg_esr2.bits.vect = ERROR_EPIC_VECTOR; + epic_write_w(CEPIC_ESR2, reg_esr2.raw); + + epic_printk("CEPIC %d is set up\n", epic_id); +} + +/* Set first cepic on node as destination for LINP and ERR and unmask them */ +static void __init_recv __setup_prepic(unsigned int node) +{ +#ifdef CONFIG_E2K + union prepic_linpn reg; + union prepic_err_int reg_err; + unsigned int dest = node_to_first_cpu(node); + + if (dest >= nr_cpu_ids) { + pr_err("Failed to find online cpu on node %d. PREPIC err and linp are routed to bsp\n", + node); + dest = boot_cpu_physical_apicid; + } + + dest = cepic_id_short_to_full(dest); + + /* + * Setting up and unmasking PREPIC interrupts: + * - PREPIC error interrupt + * - LINP0 - emergency interrupt from HC + * - LINP1 - IOMMU interrupt + * - LINP2 - Uncore interrupt + * - LINP3 - IPCC interrupt + * - LINP4 - non-emergency interrupt from HC + * - LINP5 - Power Control (PCS) interrupt + */ + + reg_err.raw = 0; + reg_err.bits.dst = dest; + reg_err.bits.vect = PREPIC_ERROR_VECTOR; + prepic_node_write_w(node, SIC_prepic_err_int, reg_err.raw); + + reg.raw = 0; + reg.bits.dst = dest; + reg.bits.vect = LINP0_INTERRUPT_VECTOR; + prepic_node_write_w(node, SIC_prepic_linp0, reg.raw); + + reg.raw = 0; + reg.bits.dst = dest; + reg.bits.vect = LINP1_INTERRUPT_VECTOR; + prepic_node_write_w(node, SIC_prepic_linp1, reg.raw); + + reg.raw = 0; + reg.bits.dst = dest; + reg.bits.vect = LINP2_INTERRUPT_VECTOR; + prepic_node_write_w(node, SIC_prepic_linp2, reg.raw); + + reg.raw = 0; + reg.bits.dst = dest; + reg.bits.vect = LINP3_INTERRUPT_VECTOR; + prepic_node_write_w(node, SIC_prepic_linp3, reg.raw); + + reg.raw = 0; + reg.bits.dst = dest; + reg.bits.vect = LINP4_INTERRUPT_VECTOR; + prepic_node_write_w(node, SIC_prepic_linp4, reg.raw); + + reg.raw = 0; + reg.bits.dst = dest; + reg.bits.vect = LINP5_INTERRUPT_VECTOR; + prepic_node_write_w(node, SIC_prepic_linp5, reg.raw); +#endif + epic_printk("PREPIC %d is set up\n", node); +} + +void __init_recv setup_prepic(void) +{ + unsigned int node; + + for_each_online_node(node) + __setup_prepic(node); +} + +void __init setup_bsp_epic(void) +{ + /* + * Fake APIC driver for compatibility with existing IOAPIC code + */ + apic = &epic; + + /* Various EPIC sanity checks */ + verify_epic(); + + setup_cepic(); +} + +struct saved_cepic_regs { + bool valid; + u32 cepic_id; + u32 cepic_cpr; + u32 cepic_esr; + u32 cepic_esr2; + u32 cepic_cir; + u32 cepic_icr; + u32 cepic_icr2; + u32 cepic_timer_lvtt; + u32 cepic_timer_init; + u32 cepic_timer_cur; + u32 cepic_timer_div; + u32 cepic_nm_timer_lvtt; + u32 cepic_nm_timer_init; + u32 cepic_nm_timer_cur; + u32 cepic_nm_timer_div; + u32 cepic_svr; + u32 cepic_pnmirr_mask; +}; + +static void save_cepic(void *cepic_regs) +{ + struct saved_cepic_regs *regs = cepic_regs; + + regs->cepic_id = epic_read_w(CEPIC_ID); + regs->cepic_cpr = epic_read_w(CEPIC_CPR); + regs->cepic_esr = epic_read_w(CEPIC_ESR); + regs->cepic_esr2 = epic_read_w(CEPIC_ESR2); + + /* CEPIC_EOI is write-only */ + + regs->cepic_cir = epic_read_w(CEPIC_CIR); + + /* Reading CEPIC_PNMIRR starts NMI handling */ + + regs->cepic_icr = epic_read_w(CEPIC_ICR); + regs->cepic_icr2 = epic_read_w(CEPIC_ICR2); + regs->cepic_timer_lvtt = epic_read_w(CEPIC_TIMER_LVTT); + regs->cepic_timer_init = epic_read_w(CEPIC_TIMER_INIT); + regs->cepic_timer_cur = epic_read_w(CEPIC_TIMER_CUR); + regs->cepic_timer_div = epic_read_w(CEPIC_TIMER_DIV); + regs->cepic_nm_timer_lvtt = epic_read_w(CEPIC_NM_TIMER_LVTT); + regs->cepic_nm_timer_init = epic_read_w(CEPIC_NM_TIMER_INIT); + regs->cepic_nm_timer_cur = epic_read_w(CEPIC_NM_TIMER_CUR); + regs->cepic_nm_timer_div = epic_read_w(CEPIC_NM_TIMER_DIV); + regs->cepic_svr = epic_read_w(CEPIC_SVR); + regs->cepic_pnmirr_mask = epic_read_w(CEPIC_PNMIRR_MASK); + + regs->valid = true; +} + +static void print_saved_cepic(int cpu, struct saved_cepic_regs *regs) +{ + pr_info("Printing CEPIC contents on CPU#%d:\n", cpu); + pr_info("... CEPIC_ID: 0x%x\n", regs->cepic_id); + pr_info("... CEPIC_CPR: 0x%x\n", regs->cepic_cpr); + pr_info("... CEPIC_ESR: 0x%x\n", regs->cepic_esr); + pr_info("... CEPIC_ESR2: 0x%x\n", regs->cepic_esr2); + + /* CEPIC_EOI is write-only */ + + pr_info("... CEPIC_CIR: 0x%x\n", regs->cepic_cir); + + /* Reading CEPIC_PNMIRR starts NMI handling */ + + pr_info("... CEPIC_ICR: 0x%x\n", regs->cepic_icr); + pr_info("... CEPIC_ICR2: 0x%x\n", regs->cepic_icr2); + pr_info("... CEPIC_TIMER_LVTT: 0x%x\n", regs->cepic_timer_lvtt); + pr_info("... CEPIC_TIMER_INIT: 0x%x\n", regs->cepic_timer_init); + pr_info("... CEPIC_TIMER_CUR: 0x%x\n", regs->cepic_timer_cur); + pr_info("... CEPIC_TIMER_DIV: 0x%x\n", regs->cepic_timer_div); + pr_info("... CEPIC_NM_TIMER_LVTT: 0x%x\n", + regs->cepic_nm_timer_lvtt); + pr_info("... CEPIC_NM_TIMER_INIT: 0x%x\n", + regs->cepic_nm_timer_init); + pr_info("... CEPIC_NM_TIMER_CUR: 0x%x\n", regs->cepic_nm_timer_cur); + pr_info("... CEPIC_NM_TIMER_DIV: 0x%x\n", regs->cepic_nm_timer_div); + pr_info("... CEPIC_SVR: 0x%x\n", regs->cepic_svr); + pr_info("... CEPIC_PNMIRR_MASK: 0x%x\n", regs->cepic_pnmirr_mask); +} + +static void print_cepic(void *dummy) +{ + unsigned int v; + + pr_info("Printing CEPIC contents on CPU#%d:\n", + smp_processor_id()); + v = epic_read_w(CEPIC_ID); + pr_info("... CEPIC_ID: 0x%x\n", v); + + v = epic_read_w(CEPIC_CPR); + pr_info("... CEPIC_CPR: 0x%x\n", v); + + v = epic_read_w(CEPIC_ESR); + pr_info("... CEPIC_ESR: 0x%x\n", v); + + v = epic_read_w(CEPIC_ESR2); + pr_info("... CEPIC_ESR2: 0x%x\n", v); + + /* CEPIC_EOI is write-only */ + + v = epic_read_w(CEPIC_CIR); + pr_info("... CEPIC_CIR: 0x%x\n", v); + + /* Reading CEPIC_PNMIRR starts NMI handling */ + + v = epic_read_w(CEPIC_ICR); + pr_info("... CEPIC_ICR: 0x%x\n", v); + + v = epic_read_w(CEPIC_ICR2); + pr_info("... CEPIC_ICR2: 0x%x\n", v); + + v = epic_read_w(CEPIC_TIMER_LVTT); + pr_info("... CEPIC_TIMER_LVTT: 0x%x\n", v); + + v = epic_read_w(CEPIC_TIMER_INIT); + pr_info("... CEPIC_TIMER_INIT: 0x%x\n", v); + + v = epic_read_w(CEPIC_TIMER_CUR); + pr_info("... CEPIC_TIMER_CUR: 0x%x\n", v); + + v = epic_read_w(CEPIC_TIMER_DIV); + pr_info("... CEPIC_TIMER_DIV: 0x%x\n", v); + + v = epic_read_w(CEPIC_NM_TIMER_LVTT); + pr_info("... CEPIC_NM_TIMER_LVTT: 0x%x\n", v); + + v = epic_read_w(CEPIC_NM_TIMER_INIT); + pr_info("... CEPIC_NM_TIMER_INIT: 0x%x\n", v); + + v = epic_read_w(CEPIC_NM_TIMER_CUR); + pr_info("... CEPIC_NM_TIMER_CUR: 0x%x\n", v); + + v = epic_read_w(CEPIC_NM_TIMER_DIV); + pr_info("... CEPIC_NM_TIMER_DIV: 0x%x\n", v); + + v = epic_read_w(CEPIC_SVR); + pr_info("... CEPIC_SVR: 0x%x\n", v); + + v = epic_read_w(CEPIC_PNMIRR_MASK); + pr_info("... CEPIC_PNMIRR_MASK: 0x%x\n", v); +} + +static void print_prepics(void) +{ + int node; + unsigned int v; + + for_each_online_node(node) { + pr_info("Printing PREPIC#%d:\n", node); + + v = prepic_node_read_w(node, SIC_prepic_version); + pr_info("... PREPIC_VERSION: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_ctrl); + pr_info("... PREPIC_CTRL: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_id); + pr_info("... PREPIC_ID: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_ctrl2); + pr_info("... PREPIC_CTRL2: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_err_int); + pr_info("... PREPIC_ERR_INT: 0x%x\n", v); +#ifdef CONFIG_E2K + v = prepic_node_read_w(node, SIC_prepic_linp0); + pr_info("... PREPIC_LINP0: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_linp1); + pr_info("... PREPIC_LINP1: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_linp2); + pr_info("... PREPIC_LINP2: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_linp3); + pr_info("... PREPIC_LINP3: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_linp4); + pr_info("... PREPIC_LINP4: 0x%x\n", v); + + v = prepic_node_read_w(node, SIC_prepic_linp5); + pr_info("... PREPIC_LINP5: 0x%x\n", v); +#endif + } +} + +int print_epics(bool force) +{ + int cpu; + + if (!force && !epic_debug) + return 1; + + preempt_disable(); + for_each_online_cpu(cpu) { + struct saved_cepic_regs regs; + + if (cpu == smp_processor_id()) { + print_cepic(NULL); + continue; + } + + regs.valid = false; +#ifdef CONFIG_E2K + /* This function can be called through SysRq under + * disabled interrupts, so we have to be careful + * and use nmi_call_function() with a timeout + * instead of smp_call_function(). */ + nmi_call_function_single(cpu, save_cepic, ®s, 1, 30000); +#else + smp_call_function_single(cpu, save_cepic, ®s, 1); +#endif + if (regs.valid) + print_saved_cepic(cpu, ®s); + } + preempt_enable(); + + print_prepics(); + + return 0; +} + +static int __init epic_set_debug(char *arg) +{ + epic_debug = true; + return 0; +} +early_param("epic_debug", epic_set_debug); + +static int __init epic_set_bgi_mode(char *arg) +{ + epic_bgi_mode = true; + return 0; +} +early_param("epic_bgi_mode", epic_set_bgi_mode); + +static int __init pcsm_set_adjust(char *arg) +{ + pcsm_adjust_enable = true; + return 0; +} +early_param("pcsm_adjust", pcsm_set_adjust); + +/* + * EPIC Masked interrupt handling starts with reading CEPIC_VECT_INTA. + * Value read from CEPIC_VECT_INTA also contains Core Priority bits, + * which have to be saved to be written to CEPIC_EOI later + */ +int epic_get_vector(void) +{ + union cepic_vect_inta reg; + + reg.raw = epic_read_w(CEPIC_VECT_INTA); + + set_current_epic_core_priority(reg.bits.cpr); + + return reg.bits.vect; +} + +/* Core priority is read from CEPIC_VECT_INTA in native_do_interrupt */ +void ack_epic_irq(void) +{ + union cepic_eoi reg; + + reg.raw = 0; + reg.bits.rcpr = get_current_epic_core_priority(); + epic_write_w(CEPIC_EOI, reg.raw); +} + +__visible void __irq_entry cepic_epic_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + +#ifdef CONFIG_E2K + kvm_deliver_cepic_epic_interrupt(); +#endif + ack_epic_irq(); + l_irq_exit(); +} + +__visible void epic_hc_emerg_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + + pr_err("EPIC: received emergency hc interrupt on core %d\n", + smp_processor_id()); + + ack_epic_irq(); + l_irq_exit(); +} + +__visible void epic_iommu_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + +#ifdef CONFIG_E2K + e2k_iommu_error_interrupt(); +#endif + + ack_epic_irq(); + l_irq_exit(); +} + +__visible void epic_uncore_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + +#ifdef CONFIG_E2K + do_sic_error_interrupt(); +#endif + + panic("EPIC: received uncore interrupt on core %d\n", + smp_processor_id()); + + ack_epic_irq(); + l_irq_exit(); +} + +__visible void epic_ipcc_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + + pr_err("EPIC: received ipcc interrupt on core %d\n", + smp_processor_id()); + + ack_epic_irq(); + l_irq_exit(); +} + +__visible void epic_hc_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + + pr_err("EPIC: received hc interrupt on core %d\n", + smp_processor_id()); + + ack_epic_irq(); + l_irq_exit(); +} + +static const struct pcs_handle *pcs_handle_epic; + +void register_pcs_handle(const struct pcs_handle *handle) +{ + if (pcs_handle_epic) { + pr_err("PCS: handle is already registered\n"); + return; + } + + pcs_handle_epic = handle; +} +EXPORT_SYMBOL(register_pcs_handle); + +void unregister_pcs_handle(void) +{ + pcs_handle_epic = NULL; +} +EXPORT_SYMBOL(unregister_pcs_handle); + +__visible void epic_pcs_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + + if (pcs_handle_epic) + pcs_handle_epic->pcs_interrupt(); + + if (epic_debug) + pr_err("EPIC: received pcs interrupt on core %d\n", + smp_processor_id()); + + ack_epic_irq(); + l_irq_exit(); +} + + +/* + * Power management + */ +#ifdef CONFIG_PM +static int cepic_suspend(void) +{ + union cepic_ctrl reg_ctrl; + unsigned long flags; + + local_irq_save(flags); + + /* Disable CEPIC */ + reg_ctrl.raw = epic_read_w(CEPIC_CTRL); + reg_ctrl.bits.soft_en = 0; + epic_write_w(CEPIC_CTRL, reg_ctrl.raw); + + local_irq_restore(flags); + + return 0; +} + +static void cepic_resume(void) +{ + union cepic_ctrl reg_ctrl; + unsigned long flags; + + local_irq_save(flags); + + /* Enable CEPIC */ + reg_ctrl.raw = epic_read_w(CEPIC_CTRL); + reg_ctrl.bits.soft_en = 1; + epic_write_w(CEPIC_CTRL, reg_ctrl.raw); + + local_irq_restore(flags); +} + +static struct syscore_ops cepic_syscore_ops = { + .resume = cepic_resume, + .suspend = cepic_suspend, +}; + +static int __init init_cepic_sysfs(void) +{ + /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ + if (cpu_has_epic()) + register_syscore_ops(&cepic_syscore_ops); + + return 0; +} + +/* local apic needs to resume before other devices access its registers. */ +core_initcall(init_cepic_sysfs); +#endif /* CONFIG_PM */ diff --git a/arch/l/kernel/epic/io_epic.c b/arch/l/kernel/epic/io_epic.c new file mode 100644 index 000000000000..51f7ebe576fe --- /dev/null +++ b/arch/l/kernel/epic/io_epic.c @@ -0,0 +1,2394 @@ +/* + * IO-EPIC support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static DEFINE_RAW_SPINLOCK(ioepic_lock); +static DEFINE_RAW_SPINLOCK(vector_lock); + +static struct ioepic { + /* Number of IRQ routing registers */ + int nr_registers; + /* IO-EPIC config */ + struct { + unsigned char epicver; + unsigned short epicid; + unsigned short nodeid; + unsigned short bus; + unsigned long epicaddr; + } mp_config; + /* IO-EPIC gsi routing info */ + struct mp_ioepic_gsi gsi_config; + /* Saved state during suspend/resume */ + struct IO_EPIC_route_entry *saved_registers; + /* For APIC + EIOHub machines */ + struct pci_dev *ioepic; +} ioepics[MAX_IO_EPICS]; + +#define IO_EPIC_NR_REGS 32 + +/* 0: edge, 1: level */ +static int pin_to_trigger[IO_EPIC_NR_REGS] = { + 1, /* 0 - IPMB */ + 1, /* 1 - SCI */ + 0, /* 2 - System Timer */ + 1, /* 3 - Ethernet0_tx0 */ + 1, /* 4 - Ethernet0_tx1 */ + 1, /* 5 - Ethernet0_rx0 */ + 1, /* 6 - Ethernet0_rx1 */ + 1, /* 7 - Ethernet0_sys */ + 1, /* 8 - HDA */ + 1, /* 9 - Mpv_timers0 */ + 1, /* 10 - Mpv_timers1 */ + 1, /* 11 - Mpv_timers2 */ + 1, /* 12 - GPIO0 */ + 1, /* 13 - GPIO1 */ + 1, /* 14 - Serial Port */ + 1, /* 15 - I2C/SPI */ + 1, /* 16 - PCI IRQ A */ + 1, /* 17 - PCI IRQ B */ + 1, /* 18 - PCI IRQ C */ + 1, /* 19 - PCI IRQ D */ + 1, /* 20 - WD Timer */ + 1, /* 21 - SATA */ + 1, /* 22 - SERR */ + 1, /* 23 - Ethernet1_tx0 */ + 1, /* 24 - Ethernet1_tx1 */ + 1, /* 25 - Ethernet1_rx0 */ + 1, /* 26 - Ethernet1_rx1 */ + 1, /* 27 - Ethernet1_sys */ + 1, /* 28 - USB */ + 0, /* 29 - WLCC */ + 1, /* 30 - Reserved */ + 1 /* 31 - Reserved */ +}; + +int mpc_ioepic_id(int ioepic_idx) +{ + return ioepics[ioepic_idx].mp_config.epicid; +} + +static int mpc_ioepic_version(int ioepic_idx) +{ + return ioepics[ioepic_idx].mp_config.epicver; +} + +static bool ioepic_has_fast_eoi(int ioepic_idx) +{ + return mpc_ioepic_version(ioepic_idx) >= IOEPIC_VERSION_2; +} + +int mpc_ioepic_nodeid(int ioepic_idx) +{ + return ioepics[ioepic_idx].mp_config.nodeid; +} + +int mpc_ioepic_bus(int ioepic_idx) +{ + return ioepics[ioepic_idx].mp_config.bus; +} + +unsigned long mpc_ioepic_addr(int ioepic_idx) +{ + return ioepics[ioepic_idx].mp_config.epicaddr; +} + +unsigned int mp_ioepic_gsi_base(int ioepic_idx) +{ + return ioepics[ioepic_idx].gsi_config.gsi_base; +} + +int nr_ioepics; + +static inline unsigned long io_epic_base(int idx) +{ + return mpc_ioepic_addr(idx); +} + +unsigned long io_epic_base_node(int node) +{ + int i; + + for (i = 0; i < nr_ioepics; i++) { + if (mpc_ioepic_nodeid(i) == node) + return mpc_ioepic_addr(i); + } + + pr_err("%s(): could not find IOEPIC on node %d\n", __func__, node); + + return 0; +} + +void io_epic_write(unsigned int epic, unsigned int reg, unsigned int value) +{ + boot_writel(value, (void __iomem *) (io_epic_base(epic) + reg)); +} + +unsigned int io_epic_read(unsigned int epic, unsigned int reg) +{ + return boot_readl((void __iomem *) (io_epic_base(epic) + reg)); +} + +static inline void set_io_epic_irq_attr(struct io_epic_irq_attr *irq_attr, + int ioepic, int ioepic_pin, + int trigger, int rid) +{ + irq_attr->ioepic = ioepic; + irq_attr->ioepic_pin = ioepic_pin; + irq_attr->trigger = trigger; + irq_attr->rid = rid; +} + +union io_epic_entry_union { + struct { u32 w1, w2, w3, w4, w5; }; + struct IO_EPIC_route_entry entry; +}; + +/* Write interrupt control word last, as it contains the mask bit */ +static void __ioepic_write_entry(int epic, int pin, + struct IO_EPIC_route_entry e) +{ + union io_epic_entry_union eu; + union IO_EPIC_INT_CTRL reg; + + eu.entry = e; + io_epic_write(epic, IOEPIC_TABLE_MSG_DATA(pin), eu.w2); + io_epic_write(epic, IOEPIC_TABLE_ADDR_HIGH(pin), eu.w3); + io_epic_write(epic, IOEPIC_TABLE_ADDR_LOW(pin), eu.w4); + io_epic_write(epic, IOEPIC_INT_RID(pin), eu.w5); + + reg.raw = eu.w1; + /* do not reset RWC1 bits */ + reg.bits.delivery_status = 0; + reg.bits.software_int = 0; + + io_epic_write(epic, IOEPIC_TABLE_INT_CTRL(pin), reg.raw); +} + +static struct IO_EPIC_route_entry __ioepic_read_entry(int epic, int pin) +{ + union io_epic_entry_union eu; + + eu.w1 = io_epic_read(epic, IOEPIC_TABLE_INT_CTRL(pin)); + eu.w2 = io_epic_read(epic, IOEPIC_TABLE_MSG_DATA(pin)); + eu.w3 = io_epic_read(epic, IOEPIC_TABLE_ADDR_HIGH(pin)); + eu.w4 = io_epic_read(epic, IOEPIC_TABLE_ADDR_LOW(pin)); + eu.w5 = io_epic_read(epic, IOEPIC_INT_RID(pin)); + + return eu.entry; +} + +void ioepic_write_entry(int epic, int pin, struct IO_EPIC_route_entry e) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + __ioepic_write_entry(epic, pin, e); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); +} + +struct IO_EPIC_route_entry ioepic_read_entry(int epic, int pin) +{ + unsigned long flags; + struct IO_EPIC_route_entry e; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + e = __ioepic_read_entry(epic, pin); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + + return e; +} + +int native_setup_ioepic_entry(int ioepic_idx, int irq, + struct IO_EPIC_route_entry *entry, + unsigned int destination, int vector, + struct io_epic_irq_attr *attr) +{ + u32 lo, hi; + if (ioepics[ioepic_idx].ioepic) { + struct iohub_sysdata *sd = + ioepics[ioepic_idx].ioepic->bus->sysdata; + lo = sd->pci_msi_addr_lo; + hi = sd->pci_msi_addr_hi; + } else { + get_io_epic_msi(ioepics[ioepic_idx].mp_config.nodeid, + &lo, &hi); + } + memset(entry, 0, sizeof(*entry)); + + entry->msg_data.bits.vector = vector; + /* + * Area defined by RT_MSI is aligned to 1 Mb, so lower 20 bits are + * reused to encode destination core ID + */ + entry->addr_high = hi; + entry->addr_low.bits.MSI = lo >> 20; + entry->addr_low.bits.dst = cepic_id_short_to_full(destination); + + /* req_id is currently not used */ + + entry->int_ctrl.bits.trigger = attr->trigger; + if (attr->trigger) + entry->int_ctrl.bits.mask = 1; + + entry->rid.raw = attr->rid; + + return 0; +} + +/* Return 0, if the pin was not already masked, and 1, if it was */ +static bool __mask_ioepic_pin(unsigned short epic, unsigned short pin) +{ + union IO_EPIC_INT_CTRL reg_ctrl; + + reg_ctrl.raw = io_epic_read(epic, IOEPIC_TABLE_INT_CTRL(pin)); + + if (reg_ctrl.bits.mask) + return 0; + + /* do not reset RWC1 bits */ + reg_ctrl.bits.delivery_status = 0; + reg_ctrl.bits.software_int = 0; + + reg_ctrl.bits.mask = 1; + io_epic_write(epic, IOEPIC_TABLE_INT_CTRL(pin), reg_ctrl.raw); + + /* + * Synchronize the IO-EPIC and the CPU by doing + * a dummy read from the IO-EPIC + */ + io_epic_read(epic, IOEPIC_ID); + + return 1; +} + +static bool __mask_ioepic_irq(struct epic_irq_cfg *cfg) +{ + return __mask_ioepic_pin(cfg->epic, cfg->pin); +} + +static void mask_ioepic_irq(struct irq_data *data) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + __mask_ioepic_irq(data->chip_data); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); +} + +static void __unmask_ioepic_pin(unsigned short epic, unsigned short pin) +{ + union IO_EPIC_INT_CTRL reg_ctrl; + + reg_ctrl.raw = io_epic_read(epic, IOEPIC_TABLE_INT_CTRL(pin)); + + /* do not reset RWC1 bits */ + reg_ctrl.bits.delivery_status = 0; + reg_ctrl.bits.software_int = 0; + + reg_ctrl.bits.mask = 0; + io_epic_write(epic, IOEPIC_TABLE_INT_CTRL(pin), reg_ctrl.raw); +} + +static void __unmask_ioepic_irq(struct epic_irq_cfg *cfg) +{ + __unmask_ioepic_pin(cfg->epic, cfg->pin); +} + +static void unmask_ioepic_irq(struct irq_data *data) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + __unmask_ioepic_irq(data->chip_data); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); +} + +/* + * When listed as conforming in the MP table, ISA interupts are edge triggered, + * and PCI interrupts are level triggered + */ +static int irq_trigger(int idx) +{ + int bus = mp_irqs[idx].srcbus; + int trigger; + + /* + * Determine IRQ trigger mode (edge or level sensitive): + */ + switch ((mp_irqs[idx].irqflag>>2) & 3) { + case 0: /* conforms, ie. bus-type dependent */ + if (test_bit(bus, mp_bus_not_pci)) + trigger = 0; + else + trigger = 1; + break; + case 1: /* edge */ + { + trigger = 0; + break; + } + case 2: /* reserved */ + { + pr_warn("broken BIOS!!\n"); + trigger = 1; + break; + } + case 3: /* level */ + { + trigger = 1; + break; + } + default: /* invalid */ + { + pr_warn("broken BIOS!!\n"); + trigger = 0; + break; + } + } + return trigger; +} + +/* Change destination core id in IO-EPIC routing table */ +static void __target_IO_EPIC_irq(struct epic_irq_cfg *cfg, unsigned int dest) +{ + bool masked; + unsigned int vector = cfg->vector; + unsigned int epic = cfg->epic; + unsigned int pin = cfg->pin; + union IO_EPIC_MSG_DATA reg_data; + union IO_EPIC_MSG_ADDR_LOW reg_addr; + + /* Mask the pin */ + masked = __mask_ioepic_pin(epic, pin); + + /* Set vector */ + reg_data.raw = io_epic_read(epic, IOEPIC_TABLE_MSG_DATA(pin)); + reg_data.bits.vector = vector; + io_epic_write(epic, IOEPIC_TABLE_MSG_DATA(pin), reg_data.raw); + + /* Set destination core id */ + reg_addr.raw = io_epic_read(epic, IOEPIC_TABLE_ADDR_LOW(pin)); + reg_addr.bits.dst = cepic_id_short_to_full(dest); + io_epic_write(epic, IOEPIC_TABLE_ADDR_LOW(pin), reg_addr.raw); + + /* Unmask the pin, if it was masked here */ + if (masked) + __unmask_ioepic_pin(epic, pin); +} + +static bool irqchip_is_ioepic(struct irq_chip *chip); + +#ifdef CONFIG_SMP +static void irq_complete_move_vector(struct epic_irq_cfg *cfg, unsigned int vector) +{ + if (likely(!cfg->move_in_progress)) + return; + + /* + * When the first interrupt reaches the new CPU destination, we can + * safely clean up the table on the old one + */ + if (vector == cfg->vector && smp_processor_id() == cfg->dest) { + cfg->move_in_progress = 0; + epic_send_IPI(cfg->old_dest, IRQ_MOVE_CLEANUP_VECTOR); + epic_printk("Finished moving vector 0x%x to CPU %d\n", + cfg->vector, cfg->dest); + } +} + +static void irq_complete_move(struct epic_irq_cfg *cfg) +{ +#if defined CONFIG_E2K + irq_complete_move_vector(cfg, get_irq_regs()->interrupt_vector); +#elif defined CONFIG_E90S + irq_complete_move_vector(cfg, e90s_irq_pending[smp_processor_id()].vector); +#else +#error fixme +#endif +} + +void epic_irq_force_complete_move(struct irq_desc *desc) +{ + struct irq_data *data = irq_desc_get_irq_data(desc); + struct epic_irq_cfg *cfg; + unsigned int irq; + + if (!data) + return; + + irq = data->irq; + cfg = irq_data_get_irq_chip_data(data); + if (cfg) + irq_complete_move_vector(cfg, cfg->vector); +} + +/* Handler of IRQ move cleanup */ +asmlinkage void epic_smp_irq_move_cleanup_interrupt(struct pt_regs *regs) +{ + unsigned int vector, me; + + ack_epic_irq(); + l_irq_enter(); + + me = smp_processor_id(); + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + int irq; + unsigned int irr; + struct irq_desc *desc; + struct irq_chip *chip; + struct irq_cfg *apic_cfg; + struct epic_irq_cfg *epic_cfg; + + irq = __this_cpu_read(vector_irq[vector]); + + if (irq <= VECTOR_UNDEFINED) + continue; + + desc = irq_to_desc(irq); + if (!desc) + continue; + + chip = irq_get_chip(irq); + if (irqchip_is_ioepic(chip)) { + /* IO-EPIC IRQ */ + epic_cfg = irq_get_chip_data(irq); + if (!epic_cfg) + continue; + + raw_spin_lock(&desc->lock); + + /* + * Check if the irq migration is in progress. If so, we + * haven't received the cleanup request yet for this irq + */ + if (epic_cfg->move_in_progress) + goto unlock; + + if (vector == epic_cfg->vector && me == epic_cfg->dest) + goto unlock; + } else { + /* IO-APIC IRQ */ + apic_cfg = irq_get_chip_data(irq); + if (!apic_cfg) + continue; + + raw_spin_lock(&desc->lock); + + /* + * Check if the irq migration is in progress. If so, we + * haven't received the cleanup request yet for this irq + */ + if (apic_cfg->move_in_progress) + goto unlock; + + if (vector == apic_cfg->vector && + cpumask_test_cpu(me, apic_cfg->domain)) + goto unlock; + } + + irr = epic_read_w(CEPIC_PMIRR + vector / 32 * 0x4); + /* + * Check if the vector that needs to be cleaned up is + * registered at the cpu's IRR. If so, then this is not + * the best time to clean it up. Lets clean it up in the + * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR + * to myself. + */ + if (irr & (1 << (vector % 32))) { + epic_send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); + goto unlock; + } + __this_cpu_write(vector_irq[vector], -1); + epic_printk("Cleanup finished freeing vector 0x%x\n", vector); +unlock: + raw_spin_unlock(&desc->lock); + } + + l_irq_exit(); +} +#else +static inline void irq_complete_move(struct epic_irq_cfg *cfg) { } +#endif /* CONFIG_SMP */ + +static void ack_epic_edge(struct irq_data *data) +{ + irq_complete_move(data->chip_data); + irq_move_irq(data); + ack_epic_irq(); +} + +static void ioepic_level_eoi_slow(int epic, int pin) +{ + unsigned long flags; + union IO_EPIC_INT_CTRL reg; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + + reg.raw = io_epic_read(epic, IOEPIC_TABLE_INT_CTRL(pin)); + reg.bits.delivery_status = 1; + io_epic_write(epic, IOEPIC_TABLE_INT_CTRL(pin), reg.raw); + + raw_spin_unlock_irqrestore(&ioepic_lock, flags); +} + +/* Writing W1C bits of int_ctrl does not change the RW bits (IOEPIC version 2) */ +static void ioepic_level_eoi_fast(int epic, int pin) +{ + union IO_EPIC_INT_CTRL reg; + + reg.raw = 0; + reg.bits.delivery_status = 1; + io_epic_write(epic, IOEPIC_TABLE_INT_CTRL(pin), reg.raw); +} + +static void ioepic_level_eoi(int epic, int pin) +{ + if (ioepic_has_fast_eoi(epic)) + ioepic_level_eoi_fast(epic, pin); + else + ioepic_level_eoi_slow(epic, pin); +} + +#ifdef CONFIG_GENERIC_PENDING_IRQ +static bool io_epic_level_ack_pending(struct epic_irq_cfg *cfg) +{ + unsigned long flags; + union IO_EPIC_INT_CTRL reg; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + + reg.raw = io_epic_read(cfg->epic, + IOEPIC_TABLE_INT_CTRL(cfg->pin)); + /* Is the remote IRR bit set? */ + if (reg.bits.delivery_status) { + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + return true; + } + + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + + return false; +} + +static inline bool ioepic_irqd_mask(struct irq_data *data) +{ + /* If we are moving the irq we need to mask it */ + if (unlikely(irqd_is_setaffinity_pending(data) && + !irqd_irq_inprogress(data))) { + mask_ioepic_irq(data); + return true; + } + return false; +} + +static inline void ioepic_irqd_unmask(struct irq_data *data, + struct epic_irq_cfg *cfg, bool masked) +{ + if (unlikely(masked)) { + /* Only migrate the irq if the ack has been received. + * + * On rare occasions the broadcast level triggered ack gets + * delayed going to ioepics, and if we reprogram the + * vector while Remote IRR is still set the irq will never + * fire again. + * + * To prevent this scenario we read the Remote IRR bit + * of the ioepic. This has two effects. + * - On any sane system the read of the ioepic will + * flush writes (and acks) going to the ioepic from + * this cpu. + * - We get to see if the ACK has actually been delivered. + */ + if (!io_epic_level_ack_pending(cfg)) + irq_move_masked_irq(data); + unmask_ioepic_irq(data); + } +} +#else +static inline bool ioepic_irqd_mask(struct irq_data *data) +{ + return false; +} +static inline void ioepic_irqd_unmask(struct irq_data *data, + struct epic_irq_cfg *cfg, bool masked) +{ +} +#endif + +static void ack_epic_level(struct irq_data *data) +{ + struct epic_irq_cfg *cfg = irq_data_get_irq_chip_data(data); + bool masked; + + irq_complete_move(cfg); + masked = ioepic_irqd_mask(data); + + ack_epic_irq(); + ioepic_level_eoi(cfg->epic, cfg->pin); + + ioepic_irqd_unmask(data, cfg, masked); +} + +/* Assign IRQ to one of the CPUs from mask */ +static int __epic_assign_irq_vector(int irq, struct epic_irq_cfg *cfg, + const struct cpumask *mask) +{ + /* + * Start distributing vectors sequentially from + * FIRST_EXTERNAL_VECTOR + 1 + */ + static int current_vector = FIRST_EXTERNAL_VECTOR; + int cpu, err; + + if (cfg->move_in_progress) + return -EBUSY; + + /* Return ENOSPC if vectors on all CPUs in mask are already taken */ + err = -ENOSPC; + + /* This IRQ is already assigned to the CPU from mask. Exit quietly */ + if (cfg->dest != BAD_EPICID && cpumask_test_cpu(cfg->dest, mask) && + cpumask_test_cpu(cfg->dest, cpu_online_mask)) + return 0; + + /* Only try and allocate irqs on cpus that are present */ + for_each_cpu_and(cpu, mask, cpu_online_mask) { + int vector; + + /* Start the search from the current_vector */ + vector = current_vector; +next: + vector += 1; + + /* + * No good vectors found between current_vector and + * first_system_vector. Continue search from + * FIRST_EXTERNAL_VECTOR + */ + if (vector >= first_system_vector) + vector = FIRST_EXTERNAL_VECTOR; + + /* + * Ran out of availible vectors on current CPU. Search on the + * next one from mask + */ + if (vector == current_vector) + continue; + + /* This vector was already taken by setup_PIC_vector_handler */ + if (test_bit(vector, used_vectors)) + goto next; + + /* This vector was previously taken by this function */ + if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED) + goto next; + + /* Found one! Next time start searching from this vector */ + current_vector = vector; + + /* This IRQ was previously assigned to a different CPU */ + if (cfg->vector) { + cfg->old_dest = cfg->dest; + if (cpumask_test_cpu(cfg->old_dest, cpu_online_mask)) + cfg->move_in_progress = 1; + if (cfg->move_in_progress) { + epic_printk("Started move vect 0x%x to 0x%x\n", + cfg->vector, vector); + } + } + per_cpu(vector_irq, cpu)[vector] = irq; + cfg->vector = vector; + cfg->dest = cpu; + err = 0; + break; + } + return err; +} + +static int epic_assign_irq_vector(int irq, struct epic_irq_cfg *cfg, + const struct cpumask *mask) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __epic_assign_irq_vector(irq, cfg, mask); + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return err; +} + +int __ioepic_set_affinity(struct irq_data *data, const struct cpumask *mask, + unsigned int *dest_id) +{ + struct epic_irq_cfg *cfg = irq_data_get_irq_chip_data(data); + unsigned int irq = data->irq; + int err; + + if (!IS_ENABLED(CONFIG_SMP)) + return -1; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return -EINVAL; + + err = epic_assign_irq_vector(irq, cfg, mask); + if (err) + return err; + + *dest_id = cfg->dest; + + cpumask_copy(irq_data_get_affinity_mask(data), mask); + + return 0; +} + +static int native_ioepic_set_affinity(struct irq_data *data, + const struct cpumask *mask, + bool force) +{ + unsigned int dest; + unsigned long flags; + int ret; + + if (!IS_ENABLED(CONFIG_SMP)) + return -1; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + ret = __ioepic_set_affinity(data, mask, &dest); + if (!ret) { + epic_printk("native_ioepic_set_affinity: writing to IO-EPIC\n"); + __target_IO_EPIC_irq(data->chip_data, dest); + ret = IRQ_SET_MASK_OK_NOCOPY; + } + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + return ret; +} + +static int ioepic_retrigger_irq(struct irq_data *data) +{ + struct epic_irq_cfg *cfg = irq_data_get_irq_chip_data(data); + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + if (cpumask_test_cpu(cfg->dest, cpu_online_mask)) + epic_send_IPI(cfg->dest, cfg->vector); + else + pr_warn("Tried to retrigger IRQ %d, but CPU is offline\n", + data->irq); + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 1; +} + +static unsigned int startup_ioepic_irq(struct irq_data *data) +{ + epic_printk("Starting up IO-EPIC irq %u\n", data->irq); + + if (!data->chip_data) + epic_printk("ERROR: No chip data on this IRQ!\n"); + + unmask_ioepic_irq(data); + + return 0; +} + +struct irq_chip ioepic_chip __read_mostly = { + .name = "IO-EPIC", + .irq_startup = startup_ioepic_irq, + .irq_mask = mask_ioepic_irq, + .irq_unmask = unmask_ioepic_irq, + .irq_ack = ack_epic_edge, + .irq_eoi = ack_epic_level, + .irq_set_affinity = native_ioepic_set_affinity, + .irq_retrigger = ioepic_retrigger_irq +}; + +void __epic_setup_vector_irq(int cpu) +{ + /* Initialize vector_irq on a new cpu */ + int irq, vector; + struct epic_irq_cfg *cfg; + + /* + * vector_lock will make sure that we don't run into irq vector + * assignments that might be happening on another cpu in parallel, + * while we setup our initial vector to irq mappings. + */ + raw_spin_lock(&vector_lock); + /* Mark the inuse vectors */ + for_each_active_irq(irq) { + if (!irqchip_is_ioepic(irq_get_chip(irq))) + continue; + + cfg = irq_get_chip_data(irq); + if (!cfg) + continue; + + if (cpu != cfg->dest) + continue; + vector = cfg->vector; + per_cpu(vector_irq, cpu)[vector] = irq; + } + /* Mark the free vectors */ + for (vector = 0; vector < NR_VECTORS; ++vector) { + irq = per_cpu(vector_irq, cpu)[vector]; + if (irq <= VECTOR_UNDEFINED) + continue; + + if (!irqchip_is_ioepic(irq_get_chip(irq))) + continue; + + cfg = irq_get_chip_data(irq); + if (cpu != cfg->dest) + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; + } + raw_spin_unlock(&vector_lock); +} + +/* + * Function to set the IO-EPIC physical IDs based on the values stored in the + * MP table. Panic if these values are invalid for any reason. + */ +void __init_recv setup_ioepic_ids_from_mpc_nocheck(void) +{ + int ioepic_idx; + int mpc_id; + int mpc_node; + union IO_EPIC_ID reg_id; + unsigned long flags; + + /* + * Set the IOEPIC ID to the value stored in the MP table. + */ + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) { + /* Read the IOEPIC ID register value */ + raw_spin_lock_irqsave(&ioepic_lock, flags); + reg_id.raw = io_epic_read(ioepic_idx, IOEPIC_ID); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + + mpc_id = mpc_ioepic_id(ioepic_idx); + mpc_node = mpc_ioepic_nodeid(ioepic_idx); + + /* + * Update the ID register according to the right value + * from the MPC table if they are different. + */ + if (mpc_id == reg_id.bits.id && mpc_node == reg_id.bits.nodeid) + continue; + + epic_printk("Changing IO-EPIC physical ID to %d, node to %d\n", + mpc_id, mpc_node); + + reg_id.bits.id = mpc_id; + reg_id.bits.nodeid = mpc_node; + raw_spin_lock_irqsave(&ioepic_lock, flags); + io_epic_write(ioepic_idx, IOEPIC_ID, reg_id.raw); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + } +} + +static struct epic_irq_cfg *alloc_irq_cfg(int node) +{ + struct epic_irq_cfg *cfg; + + cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); + if (!cfg) + return NULL; + + /* 0 is a valid destination id, can't use it */ + cfg->dest = BAD_EPICID; + + return cfg; +} + +static struct epic_irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) +{ + int res = irq_alloc_desc_at(at, node); + struct epic_irq_cfg *cfg; + + if (res < 0) { + if (res != -EEXIST) + return NULL; + cfg = irq_get_chip_data(at); + if (cfg) + return cfg; + } + + cfg = alloc_irq_cfg(node); + if (cfg) + irq_set_chip_data(at, cfg); + else + irq_free_desc(at); + + return cfg; +} + +static void __epic_clear_irq_vector(int irq, struct epic_irq_cfg *cfg) +{ + int vector; + + BUG_ON(!cfg->vector); + + vector = cfg->vector; + if (cpumask_test_cpu(cfg->dest, cpu_online_mask)) + per_cpu(vector_irq, cfg->dest)[vector] = VECTOR_UNDEFINED; + + cfg->vector = 0; + cfg->dest = BAD_EPICID; + + if (likely(!cfg->move_in_progress)) + return; + + cfg->move_in_progress = 0; +} + +static void ioepic_register_intr(unsigned int irq, struct epic_irq_cfg *cfg, + unsigned long trigger) +{ + struct irq_chip *chip = &ioepic_chip; + irq_flow_handler_t hdl; + bool fasteoi; + + if (trigger == IOEPIC_AUTO || trigger == IOEPIC_LEVEL) { + irq_set_status_flags(irq, IRQ_LEVEL); + fasteoi = true; + } else { + irq_clear_status_flags(irq, IRQ_LEVEL); + fasteoi = false; + } + + hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; + irq_set_chip_and_handler_name(irq, chip, hdl, + fasteoi ? "fasteoi" : "edge"); +} + +static void setup_ioepic_irq(unsigned int irq, struct epic_irq_cfg *cfg, + struct io_epic_irq_attr *attr) +{ + struct IO_EPIC_route_entry entry; + + if (epic_assign_irq_vector(irq, cfg, cpu_online_mask)) + return; + if (cfg->dest == BAD_EPICID) { + pr_warn("Failed to obtain dest epicid for ioepic %d, pin %d\n", + mpc_ioepic_id(attr->ioepic), attr->ioepic_pin); + __epic_clear_irq_vector(irq, cfg); + return; + } + + epic_printk("IOEPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Dest:%d SID:0x%x)\n", + attr->ioepic, mpc_ioepic_id(attr->ioepic), attr->ioepic_pin, + cfg->vector, irq, attr->trigger, cfg->dest, attr->rid); + + if (native_setup_ioepic_entry(attr->ioepic, irq, &entry, cfg->dest, + cfg->vector, attr)) { + pr_warn("Failed to setup ioepic entry for ioepic %d, pin %d\n", + mpc_ioepic_id(attr->ioepic), attr->ioepic_pin); + __epic_clear_irq_vector(irq, cfg); + + return; + } + + ioepic_register_intr(irq, cfg, attr->trigger); + + ioepic_write_entry(attr->ioepic, attr->ioepic_pin, entry); +} + +static int +io_epic_setup_irq_pin(unsigned int irq, int node, struct io_epic_irq_attr *attr) +{ + struct epic_irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); + + if (!cfg) + return -EINVAL; + + cfg->epic = attr->ioepic; + cfg->pin = attr->ioepic_pin; + + setup_ioepic_irq(irq, cfg, attr); + + return 0; +} + +/* + * Find the IRQ entry number of a certain pin + */ +static int find_irq_entry(int ioepic_idx, int pin, int type) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].irqtype == type && + (mp_irqs[i].dstapic == mpc_ioepic_id(ioepic_idx) && + mp_irqs[i].dstirq == pin)) + return i; + + return -1; +} + +static int pin_2_irq(int idx, int epic, int pin) +{ + int irq; + int bus = mp_irqs[idx].srcbus; + unsigned int gsi_base = mp_ioepic_gsi_base(epic); + + /* + * Debugging check, we are in big trouble if this message pops up! + */ + if (mp_irqs[idx].dstirq != pin) + pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); + + /* + * ISA device interrupts are allowed only for the IO-EPIC + * on BSP. If boot passes such interrupts for other IO-EPICs + * then their IRQ numbers are calculated as for PCI devices. + * For example, system timer interrupt number is 26 on + * the second IO EPIC but it is masked + */ + if (test_bit(bus, mp_bus_not_pci) && epic == 0) { + irq = mp_irqs[idx].srcbusirq; + } else { + u32 gsi = gsi_base + pin; + + if (gsi >= NR_IRQS_LEGACY) + irq = gsi; + else + irq = gsi_top + gsi; + } + + return irq; +} + +/* + * Only two types of MP-table interrupts are supported: mp_FixINT and mp_INT. + * mp_FixINT are interrupts from EIOHub devices, directly connected to IOEPIC. + * mp_INT are PCI INTx (boot passes them for each bus, QEMU also passes them for virtio). + * Boot also passes mp_INT for system timer. + * + * mp_FixINT always passes correct bus in srcbus and devfn in srcbusirq fields. + * mp_INT may pass incorrect bus (ISA for system timer) or incorrect devfn (for INTx). + */ + +#define I2C_SPI_IOEPIC_DEVFN PCI_DEVFN(2, 1) +static unsigned int irq_requester_id(int idx, int epic) +{ + int bus, devfn; + union IO_EPIC_REQ_ID rid; + + if (mp_irqs[idx].irqtype == mp_FixINT) { + bus = mp_irqs[idx].srcbus; + devfn = mp_irqs[idx].srcbusirq; + } else { + bus = mpc_ioepic_bus(epic); + devfn = I2C_SPI_IOEPIC_DEVFN; + } + + rid.raw = 0; + rid.bits.bus = bus; + rid.bits.dev = PCI_SLOT(devfn); + rid.bits.fn = PCI_FUNC(devfn); + + return rid.raw; +} + +static void __init io_epic_reset_pin(unsigned int epic, unsigned int pin) +{ + /* These registers are not reset by hardware */ + io_epic_write(epic, IOEPIC_INT_RID(pin), 0); + io_epic_write(epic, IOEPIC_TABLE_MSG_DATA(pin), 0); + io_epic_write(epic, IOEPIC_TABLE_ADDR_HIGH(pin), 0); + io_epic_write(epic, IOEPIC_TABLE_ADDR_LOW(pin), 0); +} + +static void __init __setup_io_epic_irqs(unsigned int ioepic_idx) +{ + int idx; + struct io_epic_irq_attr attr; + unsigned int pin, irq, trigger, req_id; + + for (pin = 0; pin < ioepics[ioepic_idx].nr_registers; pin++) { + idx = find_irq_entry(ioepic_idx, pin, mp_INT); + if (idx == -1) + idx = find_irq_entry(ioepic_idx, pin, mp_FixINT); + + if (idx == -1) { + io_epic_reset_pin(ioepic_idx, pin); + continue; + } + + irq = pin_2_irq(idx, ioepic_idx, pin); + trigger = irq_trigger(idx); + req_id = irq_requester_id(idx, ioepic_idx); + + if (pin < IO_EPIC_NR_REGS && trigger != pin_to_trigger[pin]) + epic_printk("IOEPIC%d, pin %d: trigger type mismatch\n", + ioepic_idx, pin); + + set_io_epic_irq_attr(&attr, ioepic_idx, pin, trigger, req_id); + + io_epic_setup_irq_pin(irq, mpc_ioepic_nodeid(ioepic_idx), + &attr); + } +} + +static void __init setup_io_epic_irqs(void) +{ + unsigned int ioepic_idx; + + epic_printk("Initializing IO-EPIC IRQs\n"); + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) + __setup_io_epic_irqs(ioepic_idx); +} + +/* + * In IO-APIC this is done in arch_early_irq_init. There is no reason to + * allocate saved_registers that early though. saved_registers can only be used + * after the device initcall "ioepic_init_ops", which comes much later than + * init_IRQ() + */ +static inline void alloc_ioepic_saved_registers(void) +{ + int i; + + for (i = 0; i < nr_ioepics; i++) { + ioepics[i].saved_registers = + kzalloc(sizeof(struct IO_EPIC_route_entry) * + ioepics[i].nr_registers, GFP_KERNEL); + if (!ioepics[i].saved_registers) + pr_err("IOEPIC %d: suspend/resume impossible!\n", i); + } +} + +void __init setup_io_epic(void) +{ + /* + * Set up IO-EPIC IRQ routing. + */ + setup_ioepic_ids_from_mpc_nocheck(); + setup_io_epic_irqs(); + alloc_ioepic_saved_registers(); + /* FIXME skipping pcibios_irq_init() on guest (for passthrough) */ + if (paravirt_enabled()) + pcibios_enable_irq = pirq_enable_irq; +} + +void __init mp_register_ioepic(int ver, int id, int node, unsigned long address, + u32 gsi_base) +{ + int idx = nr_ioepics; + unsigned long flags; + struct mp_ioepic_gsi *gsi_cfg; + union IO_EPIC_ID reg_id; + union IO_EPIC_VERSION reg_version; + + + if (nr_ioepics > MAX_IO_EPICS) { + pr_warn("Max # of IO-EPICs (%d) reached, ignoring %d\n", + MAX_IO_EPICS, nr_ioepics); + return; + } + + if (!address) { + pr_warn("NULL IO-EPIC address in MP-table, ignoring %d", + nr_ioepics); + return; + } + + ioepics[idx].mp_config.epicaddr = address; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + reg_id.raw = io_epic_read(idx, IOEPIC_ID); + reg_version.raw = io_epic_read(idx, IOEPIC_VERSION); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + + if (reg_id.raw == -1 && reg_version.raw == -1) + pr_warn("IO-EPIC (mpc_id %d) is unusable\n", id); + + /* Get id, node and PCI bus from MP-table; get version from the register */ + ioepics[idx].mp_config.epicid = id; + ioepics[idx].mp_config.nodeid = node; + ioepics[idx].mp_config.epicver = reg_version.bits.version; + ioepics[idx].mp_config.bus = mp_ioepic_find_bus(id); + + /* + * Build basic GSI lookup table to facilitate gsi->io_epic lookups + * and to prevent reprogramming of IO-EPIC pins (PCI GSIs). + */ + gsi_cfg = &ioepics[idx].gsi_config; + gsi_cfg->gsi_base = gsi_base; + gsi_cfg->gsi_end = gsi_base + reg_version.bits.entries - 1; + + /* + * The number of IO-EPIC IRQ registers (== #pins): + */ + ioepics[idx].nr_registers = reg_version.bits.entries; + + if (gsi_cfg->gsi_end >= gsi_top) + gsi_top = gsi_cfg->gsi_end + 1; + + pr_info("IOEPIC[%d]: epic_id %d, node %d, bus %d, version %d, address 0x%lx, entries %d, GSI %d-%d\n", + idx, id, node, mpc_ioepic_bus(idx), reg_version.bits.version, address, + reg_version.bits.entries, gsi_cfg->gsi_base, gsi_cfg->gsi_end); + + nr_ioepics++; +} + +/* + * Only used for mp_INT intsrc MP-table entries. Currently only passed by qemu + * for virtio + */ +int io_epic_get_PCI_irq_vector(int bus, int slot, int pin) +{ + int ioepic_idx, i, best_guess = -1; + + epic_printk("io_epic_get_PCI_irq_vector() bus:%d, slot:%d, pin:%d\n", + bus, slot, pin); + if (test_bit(bus, mp_bus_not_pci)) { + epic_printk("PCI BIOS passed nonexistent PCI bus %d!\n", bus); + return -1; + } + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus, found = 0; + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) + if (mpc_ioepic_id(ioepic_idx) == mp_irqs[i].dstapic) { + found = 1; + break; + } + if (!found) + continue; + + if (!test_bit(lbus, mp_bus_not_pci) && + !mp_irqs[i].irqtype && + (bus == lbus) && + (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { + int irq = pin_2_irq(i, ioepic_idx, mp_irqs[i].dstirq); + + epic_printk("Found our bus & pin -> IRQ %d\n", irq); + + if (pin == (mp_irqs[i].srcbusirq & 3)) { + epic_printk("pin == src bus irq == %d\n", pin); + return irq; + } + /* + * Use the first all-but-pin matching entry as a + * best-guess fuzzy result for broken mptables. + */ + if (best_guess < 0) { + epic_printk("Use the first all-but-pin matching entry as a best-guess fuzzy result IRQ %d\n", + irq); + best_guess = irq; + } + } + } + epic_printk("io_epic_get_PCI_irq_vector() Return IRQ %d\n", + best_guess); + return best_guess; +} +EXPORT_SYMBOL(io_epic_get_PCI_irq_vector); + +int io_epic_get_fix_irq_vector(int domain, int bus, int slot, int func, int irq) +{ + int i; + + epic_printk("io_epic_get_fix_irq_vector() bus:%d, irq:%d\n", + bus, irq); + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus, found = 0, ioepic_idx; + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) + if (mpc_ioepic_id(ioepic_idx) == mp_irqs[i].dstapic && + mpc_ioepic_nodeid(ioepic_idx) == domain) { + found = 1; + break; + } + if (!found) + continue; + + if ((!test_bit(lbus, mp_bus_not_pci)) && + (mp_irqs[i].irqtype == mp_FixINT) && + (bus == lbus) && + (PCI_SLOT(mp_irqs[i].srcbusirq) == slot) && + (PCI_FUNC(mp_irqs[i].srcbusirq) == func) && + ((irq == mp_irqs[i].dstirq) || irq == 0)) { + irq = pin_2_irq(i, ioepic_idx, mp_irqs[i].dstirq); + epic_printk("Found our bus %d slot %d func %d IRQ %d\n", + lbus, slot, func, irq); + return irq; + } else if ((((test_bit(lbus, mp_bus_not_pci)) && + (test_bit(bus, mp_bus_not_pci)) && + (mp_irqs[i].irqtype == mp_FixINT)) || + ((test_bit(lbus, mp_bus_not_pci)) && + (mp_irqs[i].irqtype == mp_INT) && + (irq != 0))) && + (irq == mp_irqs[i].srcbusirq)) { + irq = pin_2_irq(i, ioepic_idx, mp_irqs[i].dstirq); + epic_printk("Found our bus %d, src IRQ %d -> dst IRQ %d\n", + lbus, mp_irqs[i].srcbusirq, irq); + return irq; + } + } + epic_printk("io_epic_get_fix_irq_vector() could not find IRQ\n"); + return -1; +} +EXPORT_SYMBOL(io_epic_get_fix_irq_vector); + +int ioepic_suspend(void) +{ + int epic, pin; + int err = 0; + + for (epic = 0; epic < nr_ioepics; epic++) { + if (!ioepics[epic].saved_registers) { + err = -ENOMEM; + continue; + } + + for (pin = 0; pin < ioepics[epic].nr_registers; pin++) + ioepics[epic].saved_registers[pin] = + ioepic_read_entry(epic, pin); + } + + return err; +} + +static void ioepic_resume_id(int ioepic_idx) +{ + unsigned long flags; + union IO_EPIC_ID reg_id; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + reg_id.raw = io_epic_read(ioepic_idx, 0); + if (reg_id.bits.id != mpc_ioepic_id(ioepic_idx)) { + reg_id.bits.id = mpc_ioepic_id(ioepic_idx); + io_epic_write(ioepic_idx, 0, reg_id.raw); + } + raw_spin_unlock_irqrestore(&ioepic_lock, flags); +} + +/* + * Restore IO EPIC entries which was saved in the ioepic structure. + */ +int restore_ioepic_entries(void) +{ + int epic, pin; + + for (epic = 0; epic < nr_ioepics; epic++) { + if (!ioepics[epic].saved_registers) + continue; + + for (pin = 0; pin < ioepics[epic].nr_registers; pin++) + ioepic_write_entry(epic, pin, + ioepics[epic].saved_registers[pin]); + } + return 0; +} + +static void ioepic_resume(void) +{ + int ioepic_idx; + + for (ioepic_idx = nr_ioepics - 1; ioepic_idx >= 0; ioepic_idx--) + ioepic_resume_id(ioepic_idx); + + restore_ioepic_entries(); +} + +static struct syscore_ops ioepic_syscore_ops = { + .suspend = ioepic_suspend, + .resume = ioepic_resume, +}; + +static int __init ioepic_init_ops(void) +{ + register_syscore_ops(&ioepic_syscore_ops); + + return 0; +} + +device_initcall(ioepic_init_ops); + +void native_io_epic_print_entries(unsigned int epic, unsigned int nr_entries) +{ + int i; + + pr_info("NR Dest Mask Trig Stat Deli Vect Sid\n"); + + for (i = 0; i < nr_entries; i++) { + struct IO_EPIC_route_entry entry; + + entry = ioepic_read_entry(epic, i); + + pr_info("%-2d %-4d %1d %1d %1d %1d 0x%-3x 0x%-4x\n", + i, + cepic_id_full_to_short(entry.addr_low.bits.dst), + entry.int_ctrl.bits.mask, + entry.int_ctrl.bits.trigger, + entry.int_ctrl.bits.delivery_status, + entry.msg_data.bits.dlvm, + entry.msg_data.bits.vector, + entry.rid.raw); + } +} + +void print_IO_EPIC(int ioepic_idx) +{ + union IO_EPIC_ID reg_id; + union IO_EPIC_VERSION reg_version; + unsigned long flags; + + raw_spin_lock_irqsave(&ioepic_lock, flags); + reg_id.raw = io_epic_read(ioepic_idx, IOEPIC_ID); + reg_version.raw = io_epic_read(ioepic_idx, IOEPIC_VERSION); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + + pr_info("Printing the registers of IO-EPIC#%d:\n", + mpc_ioepic_id(ioepic_idx)); + pr_info(".... IOEPIC_ID: 0x%x\n", reg_id.raw); + pr_info("....... physical IOEPIC id: %d\n", reg_id.bits.id); + pr_info("....... node id: %d\n", reg_id.bits.nodeid); + + pr_info(".... IOEPIC_VERSION: 0x%x\n", reg_version.raw); + pr_info("....... max redirection entries: %d\n", + reg_version.bits.entries); + pr_info("....... IO EPIC version: 0x%x\n", + reg_version.bits.version); + + pr_info(".... IRQ redirection table:\n"); + + native_io_epic_print_entries(ioepic_idx, + ioepics[ioepic_idx].nr_registers); +} + +void print_IO_EPICs(void) +{ + int ioepic_idx; + struct epic_irq_cfg *cfg; + unsigned int irq; + struct irq_chip *chip; + + pr_info("Number of MP IRQ sources: %d\n", mp_irq_entries); + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) + pr_info("Number of IO-EPIC #%d registers: %d.\n", + mpc_ioepic_id(ioepic_idx), + ioepics[ioepic_idx].nr_registers); + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) + print_IO_EPIC(ioepic_idx); + + pr_info("IRQ -> ioepic:pin\n"); + for_each_active_irq(irq) { + chip = irq_get_chip(irq); + if (chip != &ioepic_chip) + continue; + + cfg = irq_get_chip_data(irq); + if (!cfg) + continue; + pr_info("%d -> %d:%d\n", irq, cfg->epic, cfg->pin); + } +} + +static inline void mask_msi_ioepic_irq(struct irq_data *data) +{ + unsigned long flags; + struct irq_cfg *cfg = irq_data_get_irq_chip_data(data); + + raw_spin_lock_irqsave(&ioepic_lock, flags); + __mask_ioepic_pin(ioapic_cfg_get_idx(cfg), ioapic_cfg_get_pin(cfg)); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); +} + +static inline void unmask_msi_ioepic_irq(struct irq_data *data) +{ + unsigned long flags; + struct irq_cfg *cfg = irq_data_get_irq_chip_data(data); + + raw_spin_lock_irqsave(&ioepic_lock, flags); + __unmask_ioepic_pin(ioapic_cfg_get_idx(cfg), ioapic_cfg_get_pin(cfg)); + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + +} + +void ack_msi_ioepic_edge(struct irq_data *data) +{ + ack_apic_edge(data); +} + +void ack_msi_ioepic_level(struct irq_data *data) +{ + struct irq_cfg *cfg = irq_data_get_irq_chip_data(data); + + ack_apic_edge(data); + ioepic_level_eoi(ioapic_cfg_get_idx(cfg), ioapic_cfg_get_pin(cfg)); +} + +static void msi_ioepic_set_affinity_pin(unsigned int epic, unsigned int pin, + unsigned int vector, unsigned int dest) +{ + unsigned int msg_data; + unsigned int addr_low; + bool masked; + + /* Mask the interrupt in IOEPIC */ + masked = __mask_ioepic_pin(epic, pin); + + msg_data = io_epic_read(epic, IOEPIC_TABLE_MSG_DATA(pin)); + addr_low = io_epic_read(epic, IOEPIC_TABLE_ADDR_LOW(pin)); + + /* + * IOH and IOH2 have a bug. We must duplicate + * destination into data. Fortunately it's possible + */ + msg_data = MSI_DATA_VECTOR(vector) | MSI_ADDR_DEST_ID(dest); + + addr_low &= ~MSI_ADDR_DEST_ID_MASK; + addr_low |= MSI_ADDR_DEST_ID(dest); + + io_epic_write(epic, IOEPIC_TABLE_MSG_DATA(pin), msg_data); + io_epic_write(epic, IOEPIC_TABLE_ADDR_LOW(pin), addr_low); + + /* Unmask the interrupt, if it was masked here */ + if (masked) + __unmask_ioepic_pin(epic, pin); + +} + +static int +msi_ioepic_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) +{ + unsigned int dest; + unsigned long flags; + struct irq_cfg *cfg = irq_data_get_irq_chip_data(data); + unsigned int pin = ioapic_cfg_get_pin(cfg); + unsigned int epic = ioapic_cfg_get_idx(cfg); + int ret; + + /* Let IOAPIC find a new vector and dest id */ + if ((ret = __ioapic_set_affinity(data, mask, &dest))) { + pr_err("%s(): IOAPIC driver failed %d\n", __func__, ret); + return ret; + } + + raw_spin_lock_irqsave(&ioepic_lock, flags); + + /* Write changes to IOEPIC */ + msi_ioepic_set_affinity_pin(epic, pin, cfg->vector, dest); + + raw_spin_unlock_irqrestore(&ioepic_lock, flags); + + return IRQ_SET_MASK_OK_NOCOPY; +} + +/* + * IRQ Chip for MSI from IOEPIC to IOAPIC + */ +struct irq_chip ioepic_to_apic_chip = { + .name = "MSI-IOEPIC", + .irq_unmask = unmask_msi_ioepic_irq, + .irq_mask = mask_msi_ioepic_irq, + .irq_ack = ack_msi_ioepic_edge, + .irq_eoi = ack_msi_ioepic_level, + .irq_set_affinity = msi_ioepic_set_affinity, + .irq_retrigger = ioapic_retrigger_irq +}; + +/* + * io_epic_get_PCI_irq_vector is broken (srcbusirq >> 2 instead of 3) + * Also selects incorrect irq if one bus:dev:func has several irqs + */ +#if 0 +int ioepic_pin_to_irq_num(unsigned int pin, struct pci_dev *dev) +{ + return io_epic_get_PCI_irq_vector(dev->bus->number, + PCI_SLOT(dev->devfn), pin); +} +#else +int ioepic_pin_to_irq_num(unsigned int pin, struct pci_dev *dev) +{ + unsigned int dev_node = dev_to_node(&dev->dev); + unsigned int ioepic_idx; + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) + if (dev_node == mpc_ioepic_nodeid(ioepic_idx)) + return mp_ioepic_gsi_base(ioepic_idx) + pin; + + return -1; +} +#endif + +/* Skip MP-table lookup on APIC machines */ +int ioepic_pin_to_msi_ioapic_irq(unsigned int pin, struct pci_dev *dev) +{ + unsigned int dev_node = dev_to_node(&dev->dev); + unsigned int ioepic_idx; + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) + if (dev_node == mpc_ioepic_nodeid(ioepic_idx)) + return mp_ioepic_gsi_base(ioepic_idx) + pin; + + return -1; +} +/* + * Drivers should call this before request_irq() to find out the real IRQ they + * are working with. + */ +int ioepic_pin_to_irq(unsigned int pin, struct pci_dev *dev) +{ + int irq = ioepic_pin_to_irq_pic(pin, dev); + + epic_printk("Device %x:%x devfn 0x%x requested IOEPIC pin %d to IRQ %d\n", + dev->vendor, dev->device, dev->devfn, pin, irq); + + return irq; +} +EXPORT_SYMBOL(ioepic_pin_to_irq); + + +static void __init msi_ioepic_write_entry(unsigned int epic, unsigned int pin, + unsigned int trigger, struct msi_msg *msg) +{ + struct IO_EPIC_route_entry entry; + + memset(&entry, 0, sizeof(entry)); + + entry.msg_data.raw = msg->data; + entry.addr_high = msg->address_hi; + entry.addr_low.raw = msg->address_lo; + + entry.int_ctrl.bits.trigger = trigger; + entry.int_ctrl.bits.mask = 1; + + ioepic_write_entry(epic, pin, entry); +} + +static int __init setup_msi_ioepic_irq(struct pci_dev *pdev, + unsigned int epic, unsigned int pin) +{ + struct msi_msg msg; + int ret; + irq_flow_handler_t hdl; + struct irq_cfg *cfg; + unsigned int irq = mp_ioepic_gsi_base(epic) + pin; + unsigned int trigger = pin_to_trigger[pin]; + + /* Add IOEPIC pin to IOAPIC's chip data */ + cfg = irq_get_chip_data(irq); + __add_pin_to_irq_node(cfg, 0, epic, pin); + + ret = msi_compose_msg(pdev, irq, &msg, -1); + if (ret < 0) + return ret; + + msi_ioepic_write_entry(epic, pin, trigger, &msg); + + hdl = trigger ? handle_fasteoi_irq : handle_edge_irq; + + irq_set_chip_and_handler_name(irq, &ioepic_to_apic_chip, hdl, + trigger ? "level" : "edge"); + + epic_printk("EIOH: IOEPIC irq %d for MSI/MSI-X\n", irq); + + return 0; +} + +static int __init alloc_msi_ioepic_irqs(unsigned int idx) +{ + unsigned int irq; + + /* Alloc an array of IOAPIC MSI IRQs on 0 node */ + irq = __create_irqs(get_nr_irqs_gsi(), IO_EPIC_NR_REGS, 0); + if (!irq) { + destroy_irqs(irq, IO_EPIC_NR_REGS); + return 1; + } + + ioepics[idx].gsi_config.gsi_base = irq; + ioepics[idx].gsi_config.gsi_end = irq + IO_EPIC_NR_REGS - 1; + + return 0; +} + +static void __init fixup_eioh_dev_irq(struct pci_dev *eioh_dev) +{ + unsigned int devfn = eioh_dev->devfn; + unsigned short dev_vendor = eioh_dev->vendor; + unsigned short dev_id = eioh_dev->device; + unsigned short vendor = PCI_VENDOR_ID_MCST_TMP; + unsigned short id; + int pin; + int irq; + + /* pin == -1: skip fixup for this device */ + switch (devfn) { + case PCI_DEVFN(0, 0): /* USB 3.0 */ + id = PCI_DEVICE_ID_MCST_USB_3_0; + pin = 28; + break; + case PCI_DEVFN(1, 0): /* Ethernet1G_0 */ + id = PCI_DEVICE_ID_MCST_MGB; + pin = 3; /* 3 - 7 */ + break; + case PCI_DEVFN(1, 1): /* Ethernet1G_1 */ + id = PCI_DEVICE_ID_MCST_MGB; + pin = 23; /* 23 - 27 */ + break; + case PCI_DEVFN(2, 0): /* GPIO/MPV */ + id = PCI_DEVICE_ID_MCST_GPIO_MPV_EIOH; + pin = 9; /* 9 - 13 */ + break; + case PCI_DEVFN(2, 1): /* I2C/SPI */ + id = PCI_DEVICE_ID_MCST_I2C_SPI_EPIC; + pin = 15; /* Also 0 (IPMB), 20 (WD Timer), 2 (System Timer) */ + break; + case PCI_DEVFN(2, 2): /* Serial Port */ + id = PCI_DEVICE_ID_MCST_SERIAL; + pin = 14; + break; + case PCI_DEVFN(2, 3): /* HDA */ + id = PCI_DEVICE_ID_MCST_HDA; + pin = 8; + break; + case PCI_DEVFN(3, 0): /* SATA 3.0 */ + id = PCI_DEVICE_ID_MCST_SATA; + pin = 21; + break; + case PCI_DEVFN(4, 0): /* Ethernet10G */ + id = PCI_DEVICE_ID_MCST_XGBE; + pin = -1; + break; + case PCI_DEVFN(5, 0): /* PCIe 3.0 x16 (x8/x4) */ + id = PCI_DEVICE_ID_MCST_PCIE_X16; + pin = -1; + break; + case PCI_DEVFN(6, 0): /* PCIe 3.0 x8 (x4) */ + id = PCI_DEVICE_ID_MCST_PCIE_X16; + pin = -1; + break; + case PCI_DEVFN(7, 0): /* PCIe 3.0 x4 */ + id = PCI_DEVICE_ID_MCST_PCIE_X4; + pin = -1; + break; + case PCI_DEVFN(8, 0): /* PCIe 3.0 x4 */ + id = PCI_DEVICE_ID_MCST_PCIE_X4; + pin = -1; + break; + case PCI_DEVFN(9, 0): /* SPMC */ + id = PCI_DEVICE_ID_MCST_SPMC; + pin = 1; + break; + case PCI_DEVFN(10, 0): /* IOHub2 (WLCC) */ + /* + * WLCC controller doesn't have PCI config space + * Instead we should see IOHub2's PCI bridge + */ + id = PCI_DEVICE_ID_MCST_PCI_BRIDGE; + pin = -1; + break; + default: + pr_err("EIOH: Found unknown device %x:%x, devfn 0x%x\n", + dev_vendor, dev_id, devfn); + return; + } + + if (dev_vendor != vendor || dev_id != id) + pr_err("EIOH: PCI vendor/device id mismatch. Expected %x:%x, got %x:%x. devfn 0x%x\n", + vendor, id, dev_vendor, dev_id, devfn); + else + epic_printk("EIOH: Found device %x:%x, devfn 0x%x\n", + vendor, id, devfn); + + /* Fixup pin and irq in pci_dev */ + if (pin >= 0) { + irq = ioepic_pin_to_irq(pin, eioh_dev); + if (irq >= 0) { + epic_printk("EIOH: Fixup pin %d->%d and IRQ %d->%d\n", + eioh_dev->pin, pin, eioh_dev->irq, irq); + eioh_dev->pin = pin; + eioh_dev->irq = irq; + return; + } + } + + epic_printk("EIOH: Default pin %d, IRQ %d\n", eioh_dev->pin, + eioh_dev->irq); +} + +static int __init __setup_msi_ioepic(void) +{ + struct pci_dev *pdev_ioepic = NULL; + struct pci_dev *pdev_ioapic = NULL; + unsigned int ioepic_idx; + unsigned int pin; + unsigned int ioepic_base_addr; + + /* Find all IOEPICs and save their base addresses in mp_config */ + while ((pdev_ioepic = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI_EPIC, pdev_ioepic))) { + union IO_EPIC_VERSION reg_version; + + pci_set_master(pdev_ioepic); + /* Read IOEPIC base address from BAR3 of I2C/SPI */ + pci_read_config_dword(pdev_ioepic, PCI_BASE_ADDRESS_3, + &ioepic_base_addr); + epic_printk("EIOH: Found IOEPIC#%d base: 0x%x\n", nr_ioepics, + ioepic_base_addr); + ioepics[nr_ioepics].mp_config.epicaddr = ioepic_base_addr; + ioepics[nr_ioepics].mp_config.nodeid = + dev_to_node(&pdev_ioepic->dev); + ioepics[nr_ioepics].ioepic = pdev_ioepic; + + reg_version.raw = io_epic_read(nr_ioepics, IOEPIC_VERSION); + ioepics[nr_ioepics].mp_config.epicver = reg_version.bits.version; + + nr_ioepics++; + } + + if (!nr_ioepics) { + epic_printk("EIOH: Could not find I2C/SPI IOEPIC device\n"); + return 1; + } + + /* Find IOAPIC I2C/SPI device on node 0 */ + pdev_ioapic = NULL; + while ((pdev_ioapic = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI, pdev_ioapic))) { + if (dev_to_node(&pdev_ioapic->dev)) { + epic_printk("EIOH: Skip IOAPIC I2C/SPI on node %d\n", + dev_to_node(&pdev_ioapic->dev)); + continue; + } + break; + } + + if (!pdev_ioapic) { + while ((pdev_ioapic = pci_get_device(PCI_VENDOR_ID_ELBRUS, + PCI_DEVICE_ID_MCST_I2CSPI, pdev_ioapic))) { + if (dev_to_node(&pdev_ioapic->dev)) { + epic_printk("EIOH: Skip IOAPIC I2C/SPI on node %d\n", + dev_to_node(&pdev_ioapic->dev)); + continue; + } + break; + } + } + + if (!pdev_ioapic) { + epic_printk("EIOH: Could not find I2C/SPI IOAPIC device\n"); + return 1; + } + + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) { + if (alloc_msi_ioepic_irqs(ioepic_idx)) { + epic_printk("EIOH: Failed to alloc MSI IRQs for IOEPIC#%d\n", + ioepic_idx); + return 1; + } + + for (pin = 0; pin < IO_EPIC_NR_REGS; pin++) + setup_msi_ioepic_irq(pdev_ioapic, ioepic_idx, pin); + } + + for (ioepic_idx = 0; ioepic_idx < nr_ioepics; ioepic_idx++) { + struct pci_dev *dev; + for_each_pci_dev(dev) { + if (dev->bus->number == + ioepics[ioepic_idx].ioepic->bus->number) + fixup_eioh_dev_irq(dev); + } + } + + return 0; +} + +/* Only setup IOEPIC this way, if this is an APIC system */ +static int __init setup_msi_ioepic(void) +{ + if (!cpu_has_epic()) + return __setup_msi_ioepic(); + else + return 0; +} +subsys_initcall_sync(setup_msi_ioepic); + +static void __init fixup_iohub2_dev_irq(struct pci_dev *iohub2_dev) +{ + unsigned int devfn = iohub2_dev->devfn; + unsigned short dev_vendor = iohub2_dev->vendor; + unsigned short dev_id = iohub2_dev->device; + unsigned short vendor = PCI_VENDOR_ID_MCST_TMP; + unsigned short id; + int pin; + int irq; + + /* pin == -1: skip fixup for this device */ + switch (devfn) { + case PCI_DEVFN(0, 0): /* PCI bridge */ + id = PCI_DEVICE_ID_MCST_PCI_BRIDGE; + pin = -1; + break; + case PCI_DEVFN(1, 0): /* Ethernet_0 */ + id = PCI_DEVICE_ID_MCST_ETH; + pin = 4; + break; + case PCI_DEVFN(1, 1): /* Ethernet_1 */ + id = PCI_DEVICE_ID_MCST_ETH; + pin = 10; + break; + case PCI_DEVFN(1, 2): /* Ethernet_2 */ + id = PCI_DEVICE_ID_MCST_ETH; + pin = 14; + break; + case PCI_DEVFN(2, 0): /* IDE */ + id = PCI_DEVICE_ID_MCST_IDE_SDHCI; + pin = 11; + break; + case PCI_DEVFN(2, 1): /* I2C/SPI+IOAPIC */ + id = PCI_DEVICE_ID_MCST_I2C_SPI; + pin = 15; /* Also 0 (PIC), 8 (WD Timer), 2 (System Timer) */ + break; + case PCI_DEVFN(2, 2): /* Serial Port (ieee1284 + rs232) */ + id = PCI_DEVICE_ID_MCST_PARALLEL_SERIAL; + pin = 3; + break; + case PCI_DEVFN(2, 3): /* HDA */ + id = PCI_DEVICE_ID_MCST_HDA; + pin = 5; + break; + case PCI_DEVFN(2, 4): /* GPIO+MPV */ + id = PCI_DEVICE_ID_MCST_GPIO_MPV; + pin = 6; /* Also 9 (Mpv_timers12), 7 (GPIO0), 11 (GPIO1) */ + break; + case PCI_DEVFN(3, 0): /* SATA 3.0 */ + id = PCI_DEVICE_ID_MCST_SATA; + pin = 20; + break; + case PCI_DEVFN(3, 1): /* SATA 3.0 */ + id = PCI_DEVICE_ID_MCST_SATA; + pin = 21; + break; + case PCI_DEVFN(4, 0): /* PCIe 2.0 x4/x1/x1 */ + id = PCI_DEVICE_ID_MCST_PCIe1; + pin = -1; + break; + case PCI_DEVFN(5, 0): /* PCIe 2.0 x2/x1 */ + id = PCI_DEVICE_ID_MCST_PCIe1; + pin = -1; + break; + case PCI_DEVFN(6, 0): /* PCIe 2.0 x1 */ + id = PCI_DEVICE_ID_MCST_PCIe1; + pin = -1; + break; + case PCI_DEVFN(7, 0): /* PCIe 2.0 x1 */ + id = PCI_DEVICE_ID_MCST_PCIe1; + pin = -1; + break; + case PCI_DEVFN(8, 0): /* PCIe 2.0 x16/x8 */ + id = PCI_DEVICE_ID_MCST_PCIe8; + pin = -1; + break; + case PCI_DEVFN(9, 0): /* PCIe 2.0 x8 */ + id = PCI_DEVICE_ID_MCST_PCIe8; + pin = 1; + break; + case PCI_DEVFN(10, 0): /* OHCI-USB 2.0 */ + id = PCI_DEVICE_ID_MCST_OHCI; + pin = 12; + break; + case PCI_DEVFN(10, 1): /* EHCI-USB 2.0 */ + id = PCI_DEVICE_ID_MCST_EHCI; + pin = 12; + break; + case PCI_DEVFN(11, 0): /* OHCI-USB 2.0 */ + id = PCI_DEVICE_ID_MCST_OHCI; + pin = 13; + break; + case PCI_DEVFN(11, 1): /* EHCI-USB 2.0 */ + id = PCI_DEVICE_ID_MCST_EHCI; + pin = 13; + break; + case PCI_DEVFN(12, 0): /* SPMC */ + id = PCI_DEVICE_ID_MCST_SPMC; + pin = -1; + break; + default: + pr_err("IOHUB2: Found unknown device %x:%x, devfn 0x%x\n", + dev_vendor, dev_id, devfn); + return; + } + + if (dev_vendor != vendor || dev_id != id) + pr_err("IOHUB2: PCI vendor/device id mismatch. Expected %x:%x, got %x:%x. devfn 0x%x\n", + vendor, id, dev_vendor, dev_id, devfn); + else + epic_printk("IOHUB2: Found device %x:%x, devfn 0x%x\n", + vendor, id, devfn); + + /* Fixup pin and irq in pci_dev */ + if (pin >= 0) { + irq = mp_ioapic_gsi_routing(0)->gsi_base + pin; + epic_printk("IOHUB2: Fixup pin %d->%d and IRQ %d->%d\n", + iohub2_dev->pin, pin, iohub2_dev->irq, irq); + iohub2_dev->pin = pin; + iohub2_dev->irq = irq; + return; + } + + epic_printk("IOHUB2: Default pin %d, IRQ %d\n", iohub2_dev->pin, + iohub2_dev->irq); +} + + +/* TODO This currently only works with one IOHUB2 / IOAPIC */ +static int __init __setup_epic_ioapic(void) +{ + struct pci_dev *pdev_ioapic = NULL; + struct pci_dev *dev = NULL; + + /* Find IOAPIC I2C/SPI device */ + pdev_ioapic = NULL; + while ((pdev_ioapic = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI, pdev_ioapic))) + break; + + if (!pdev_ioapic) { + epic_printk("IOHUB2: Could not find I2C/SPI IOAPIC device\n"); + return 1; + } + + pci_set_master(pdev_ioapic); + + for_each_pci_dev(dev) { + if (dev->bus->number == pdev_ioapic->bus->number) + fixup_iohub2_dev_irq(dev); + } + + return 0; +} +/* Only setup IOAPIC this way, if this is an EPIC system */ +static int __init setup_epic_ioapic(void) +{ + if (cpu_has_epic()) + return __setup_epic_ioapic(); + else + return 0; +} +subsys_initcall_sync(setup_epic_ioapic); + +/* + * Dynamic irq allocate and deallocation + */ +unsigned int __epic_create_irqs(unsigned int from, unsigned int count, int node) +{ + struct epic_irq_cfg **cfg; + unsigned long flags; + int irq, i; + int gsi_irqs = get_nr_irqs_gsi(); + + if (from < gsi_irqs) + from = gsi_irqs; + + cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node); + if (!cfg) + return 0; + + irq = irq_alloc_descs_from(from, count, node); + if (irq < 0) + goto out_cfgs; + + for (i = 0; i < count; i++) { + cfg[i] = alloc_irq_cfg(node); + if (!cfg[i]) + goto out_irqs; + } + + raw_spin_lock_irqsave(&vector_lock, flags); + for (i = 0; i < count; i++) + if (__epic_assign_irq_vector(irq + i, cfg[i], cpu_online_mask)) + goto out_vecs; + raw_spin_unlock_irqrestore(&vector_lock, flags); + + for (i = 0; i < count; i++) { + irq_set_chip_data(irq + i, cfg[i]); + irq_clear_status_flags(irq + i, IRQ_NOREQUEST); + } + + kfree(cfg); + return irq; + +out_vecs: + for (i--; i >= 0; i--) + __epic_clear_irq_vector(irq + i, cfg[i]); + raw_spin_unlock_irqrestore(&vector_lock, flags); +out_irqs: + for (i = 0; i < count; i++) { + if (cfg[i]) { + irq_set_chip_data(irq + i, NULL); + kfree(cfg[i]); + } + irq_free_desc(irq + i); + } +out_cfgs: + kfree(cfg); + return 0; +} + +unsigned int epic_create_irq_nr(unsigned int from, int node) +{ + return __epic_create_irqs(from, 1, node); +} + +void epic_destroy_irq(unsigned int irq) +{ + struct epic_irq_cfg *cfg = irq_get_chip_data(irq); + unsigned long flags; + + irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); + + raw_spin_lock_irqsave(&vector_lock, flags); + __epic_clear_irq_vector(irq, cfg); + raw_spin_unlock_irqrestore(&vector_lock, flags); + + if (cfg) { + irq_set_chip_data(irq, NULL); + kfree(cfg); + } + irq_free_desc(irq); +} + +void epic_destroy_irqs(unsigned int irq, unsigned int count) +{ + unsigned int i; + + for (i = 0; i < count; i++) + epic_destroy_irq(irq + i); +} + +/* + * MSI message composition + */ +void native_epic_compose_msi_msg(struct pci_dev *pdev, + unsigned int irq, + struct msi_msg *msg) +{ + struct iohub_sysdata *sd = pdev->bus->sysdata; + struct epic_irq_cfg *cfg = irq_get_chip_data(irq); + union IO_EPIC_MSG_ADDR_LOW addr_low; + union IO_EPIC_MSG_DATA data; + + addr_low.raw = 0; + addr_low.bits.MSI = sd->pci_msi_addr_lo >> 20; + addr_low.bits.dst = cepic_id_short_to_full(cfg->dest); + + data.raw = 0; + data.bits.vector = cfg->vector; + + msg->address_hi = sd->pci_msi_addr_hi; + msg->address_lo = addr_low.raw; + msg->data = data.raw; + + epic_printk("MSI for %s: irq %d : addr_lo = 0x%08x, data = 0x%08x\n", + pdev->bus->name, irq, msg->address_lo, msg->data); +} + +int epic_msi_compose_msg(struct pci_dev *pdev, unsigned int irq, + struct msi_msg *msg) +{ + struct epic_irq_cfg *cfg; + int err; + +#ifdef CONFIG_E2K + if (e2k_msi_disabled) + return -ENXIO; +#endif + cfg = irq_get_chip_data(irq); + err = epic_assign_irq_vector(irq, cfg, cpu_online_mask); + if (err) + return err; + + native_epic_compose_msi_msg(pdev, irq, msg); + + return 0; +} + +static int +epic_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) +{ + struct epic_irq_cfg *cfg = irq_data_get_irq_chip_data(data); + struct msi_msg msg; + unsigned int dest; + + union IO_EPIC_MSG_ADDR_LOW addr_low; + union IO_EPIC_MSG_DATA msg_data; + + if (__ioepic_set_affinity(data, mask, &dest)) + return -1; + + __get_cached_msi_msg(data->common->msi_desc, &msg); + + addr_low.raw = msg.address_lo; + addr_low.bits.dst = cepic_id_short_to_full(cfg->dest); + + msg_data.raw = msg.data; + msg_data.bits.vector = cfg->vector; + + msg.address_lo = addr_low.raw; + msg.data = msg_data.raw; + + pci_write_msi_msg(data->irq, &msg); + + return IRQ_SET_MASK_OK_NOCOPY; +} + +/* + * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, + * which implement the MSI or MSI-X Capability Structure. + */ +static struct irq_chip msi_chip = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = ack_epic_edge, + .irq_set_affinity = epic_msi_set_affinity, + .irq_retrigger = ioepic_retrigger_irq, +}; + +int epic_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, + unsigned int irq_base, unsigned int irq_offset) +{ + struct irq_chip *chip = &msi_chip; + struct msi_msg msg; + unsigned int irq = irq_base + irq_offset; + int ret; + + ret = epic_msi_compose_msg(dev, irq, &msg); + if (ret < 0) + return ret; + + irq_set_msi_desc_off(irq_base, irq_offset, msidesc); + + /* + * MSI-X message is written per-IRQ, the offset is always 0. + * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. + */ + if (!irq_offset) + pci_write_msi_msg(irq, &msg); + + irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); + + dev_dbg(&dev->dev, "irq %d for MSI/MSI-X\n", irq); + + return 0; +} + +int native_setup_msi_irqs_epic(struct pci_dev *dev, int nvec, int type) +{ + unsigned int irq, irq_want; + struct msi_desc *msidesc; + int node, ret; + + /* Multiple MSI vectors only supported with interrupt remapping */ + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + + node = dev_to_node(&dev->dev); + irq_want = get_nr_irqs_gsi(); + list_for_each_entry(msidesc, dev_to_msi_list(&dev->dev), list) { + irq = epic_create_irq_nr(irq_want, node); + if (irq == 0) + return -ENOSPC; + + irq_want = irq + 1; + + ret = epic_setup_msi_irq(dev, msidesc, irq, 0); + if (ret < 0) + goto error; + } + return 0; + +error: + epic_destroy_irq(irq); + return ret; +} + +void native_teardown_msi_irq_epic(unsigned int irq) +{ + epic_destroy_irq(irq); +} + +static bool irqchip_is_ioepic(struct irq_chip *chip) +{ + return chip == &ioepic_chip || chip == &msi_chip; +} + +void fixup_irqs_epic(void) +{ + unsigned int vector; + + /* + * We can remove mdelay() and then send spuriuous interrupts to + * new cpu targets for all the irqs that were handled previously by + * this cpu. While it works, I have seen spurious interrupt messages + * (nothing wrong but still...). + * + * So for now, retain mdelay(1) and check the IRR and then send those + * interrupts to new targets as this cpu is already offlined... + */ + mdelay(1); + + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + unsigned int irr; + + if (__this_cpu_read(vector_irq[vector]) < 0) + continue; + + irr = epic_read_w(CEPIC_PMIRR + vector / 32 * 0x4); + if (irr & (1 << (vector % 32))) { + unsigned int irq = __this_cpu_read(vector_irq[vector]); + struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data = irq_desc_get_irq_data(desc); + struct irq_chip *chip = irq_data_get_irq_chip(data); + raw_spin_lock(&desc->lock); + if (chip->irq_retrigger) + chip->irq_retrigger(data); + raw_spin_unlock(&desc->lock); + } + __this_cpu_write(vector_irq[vector], -1); + } +} diff --git a/arch/l/kernel/epic/ipi.c b/arch/l/kernel/epic/ipi.c new file mode 100644 index 000000000000..070205e142f6 --- /dev/null +++ b/arch/l/kernel/epic/ipi.c @@ -0,0 +1,125 @@ +#include +#include + +#include + +/* + * The following functions deal with sending EPIC IPIs between CPUs + * We use 'broadcast', CPU->CPU IPIs and self-IPIs too + */ +void epic_wait_icr_idle(void) +{ + union cepic_icr reg; + + reg.raw = epic_read_d(CEPIC_ICR); + while (reg.bits.stat) { + cpu_relax(); + reg.raw = epic_read_d(CEPIC_ICR); + } +} + +static unsigned int epic_safe_wait_icr_idle(void) +{ + union cepic_icr reg; + int timeout; + + timeout = 0; + do { + reg.raw = epic_read_d(CEPIC_ICR); + if (!reg.bits.stat) + break; + inc_irq_stat(icr_read_retry_count); + udelay(100); + } while (timeout++ < 1000); + + return reg.bits.stat; +} + +/* + * Send an IPI to another CPU. Destination is specified in CEPIC_ICR2 + */ +void epic_send_IPI(unsigned int dest_id, int vector) +{ + union cepic_icr reg; + + /* + * Wait if other IPI is currently being delivered + */ + if (unlikely(vector == NMI_VECTOR)) { + if (epic_safe_wait_icr_idle()) + pr_err("ERROR : CEPIC : ICR safe 1 sec wait failed\n"); + } else { + epic_wait_icr_idle(); + } + + /* + * Set destination in CEPIC_ICR2 + */ + reg.raw = 0; + reg.bits.dst = cepic_id_short_to_full(dest_id); + + reg.bits.dst_sh = CEPIC_ICR_DST_FULL; + + if (vector != NMI_VECTOR) { + reg.bits.dlvm = CEPIC_ICR_DLVM_FIXED_IPI; + reg.bits.vect = vector; + } else { + reg.bits.dlvm = CEPIC_ICR_DLVM_NMI; + } + + /* + * Send the IPI by writing to CEPIC_ICR + */ + epic_write_d(CEPIC_ICR, reg.raw); +} + +void epic_send_IPI_mask(const struct cpumask *mask, int vector) +{ + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + epic_send_IPI(query_cpu, vector); + } + local_irq_restore(flags); +} + +void epic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + epic_send_IPI(query_cpu, vector); + } + local_irq_restore(flags); +} + +static inline void epic_send_IPI_shortcut(unsigned int shortcut, int vector) +{ + union cepic_icr reg; + + epic_wait_icr_idle(); + + reg.raw = 0; + reg.bits.dst_sh = shortcut; + + if (vector != NMI_VECTOR) { + reg.bits.dlvm = CEPIC_ICR_DLVM_FIXED_IPI; + reg.bits.vect = vector; + } else { + reg.bits.dlvm = CEPIC_ICR_DLVM_NMI; + } + + epic_write_d(CEPIC_ICR, reg.raw); +} + +void epic_send_IPI_self(int vector) +{ + epic_send_IPI_shortcut(CEPIC_ICR_DST_SELF, vector); +} diff --git a/arch/l/kernel/epic/irq.c b/arch/l/kernel/epic/irq.c new file mode 100644 index 000000000000..f1d425cac85a --- /dev/null +++ b/arch/l/kernel/epic/irq.c @@ -0,0 +1,173 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +__init_recv +void epic_init_system_handlers_table(void) +{ + /* + * Initialize interrupt[] array of system interrupts' handlers. + */ + +#ifdef CONFIG_SMP + /* + * The reschedule interrupt is a CPU-to-CPU reschedule-helper + * IPI, driven by wakeup. + */ + setup_PIC_vector_handler(EPIC_RESCHEDULE_VECTOR, + epic_smp_reschedule_interrupt, 1, + "epic_smp_reschedule_interrupt"); + + /* IPI for generic function call */ + setup_PIC_vector_handler(EPIC_CALL_FUNCTION_VECTOR, + epic_smp_call_function_interrupt, 1, + "epic_smp_call_function_interrupt"); + + /* IPI for generic single function call */ + setup_PIC_vector_handler(EPIC_CALL_FUNCTION_SINGLE_VECTOR, + epic_smp_call_function_single_interrupt, 1, + "epic_smp_call_function_single_interrupt"); + + /* Low priority IPI to cleanup after moving an irq (IOAPIC) */ + setup_PIC_vector_handler(IRQ_MOVE_CLEANUP_VECTOR, + epic_smp_irq_move_cleanup_interrupt, 0, + "epic_smp_irq_move_cleanup_interrupt"); +#endif + /* self generated IPI for CEPIC timer */ + setup_PIC_vector_handler(CEPIC_TIMER_VECTOR, + epic_smp_timer_interrupt, 1, + "epic_smp_timer_interrupt"); + + /* IPI vectors for EPIC spurious and error interrupts */ + setup_PIC_vector_handler(SPURIOUS_EPIC_VECTOR, + epic_smp_spurious_interrupt, 1, + "epic_smp_spurious_interrupt"); + setup_PIC_vector_handler(ERROR_EPIC_VECTOR, + epic_smp_error_interrupt, 1, + "epic_smp_error_interrupt"); + + setup_PIC_vector_handler(EPIC_IRQ_WORK_VECTOR, + epic_smp_irq_work_interrupt, 1, + "epic_smp_irq_work_interrupt"); + + /* PREPIC error handler */ + setup_PIC_vector_handler(PREPIC_ERROR_VECTOR, + prepic_smp_error_interrupt, 1, + "prepic_error_interrupt"); + + /* IPI delivery to inactive guest (virtualization only) */ + setup_PIC_vector_handler(CEPIC_EPIC_INT_VECTOR, + cepic_epic_interrupt, 1, + "cepic_epic_interrupt"); + +#ifdef CONFIG_KVM_ASYNC_PF + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) { + setup_PIC_vector_handler(ASYNC_PF_WAKE_VECTOR, + epic_pv_apf_wake, 1, + "async_pf_wake_interrupt"); + } +#endif /* CONFIG_KVM_ASYNC_PF */ + +#ifdef CONFIG_E2K + /* PREPIC LINP handlers */ + setup_PIC_vector_handler(LINP0_INTERRUPT_VECTOR, + epic_hc_emerg_interrupt, 1, + "hc_emerg_interrupt"); + + setup_PIC_vector_handler(LINP1_INTERRUPT_VECTOR, + epic_iommu_interrupt, 1, + "iommu_interrupt"); + + setup_PIC_vector_handler(LINP2_INTERRUPT_VECTOR, + epic_uncore_interrupt, 1, + "uncore_interrupt"); + + setup_PIC_vector_handler(LINP3_INTERRUPT_VECTOR, + epic_ipcc_interrupt, 1, + "ipcc_interrupt"); + + setup_PIC_vector_handler(LINP4_INTERRUPT_VECTOR, + epic_hc_interrupt, 1, + "hc_interrupt"); + + setup_PIC_vector_handler(LINP5_INTERRUPT_VECTOR, + epic_pcs_interrupt, 1, + "pcs_interrupt"); +#endif +} + +static void unknown_nmi_error(unsigned int reason, struct pt_regs *regs) +{ + pr_warn("NMI received for unknown reason %x on CPU %d.\n", + reason, smp_processor_id()); +} + +noinline notrace void epic_do_nmi(struct pt_regs *regs) +{ + union cepic_pnmirr reason; +#ifdef CONFIG_EARLY_PRINTK + int console_switched; +#endif + + reason.raw = epic_read_w(CEPIC_PNMIRR); + + /* + * Immediately allow receiving of next NM interrupts. + * Must be done before handling to avoid losing interrupts like this: + * + * cpu0 cpu1 + * -------------------------------------------- + * set flag for cpu 0 + * and send an NMI + * enter handler and + * clear the flag + * because flag is cleared, + * set it again and send + * the next NMI + * clear CEPIC_PNMIRR + * + * In this example cpu0 will never receive the second NMI. + */ + epic_write_w(CEPIC_PNMIRR, CEPIC_PNMIRR_BIT_MASK); + +#ifdef CONFIG_EARLY_PRINTK + /* We should not use normal printk() from inside the NMI handler */ + console_switched = switch_to_early_dump_console(); +#endif + + if (reason.bits.nmi) { +#ifdef CONFIG_E2K + /* NMI IPIs are used only by nmi_call_function() */ + nmi_call_function_interrupt(); +#endif + reason.bits.nmi = 0; + } + + if (reason.raw & CEPIC_PNMIRR_BIT_MASK) + unknown_nmi_error(reason.raw, regs); + +#ifdef CONFIG_EARLY_PRINTK + if (console_switched) + switch_from_early_dump_console(); +#endif +} diff --git a/arch/l/kernel/epic/irq_work.c b/arch/l/kernel/epic/irq_work.c new file mode 100644 index 000000000000..b4972304d4f2 --- /dev/null +++ b/arch/l/kernel/epic/irq_work.c @@ -0,0 +1,26 @@ +#include +#include +#include +#include +#include +#include + +static inline void __epic_smp_irq_work_interrupt(void) +{ + inc_irq_stat(apic_irq_work_irqs); + irq_work_run(); +} + +__visible void epic_smp_irq_work_interrupt(struct pt_regs *regs) +{ + l_irq_enter(); + ack_epic_irq(); + __epic_smp_irq_work_interrupt(); + l_irq_exit(); +} + +void epic_irq_work_raise(void) +{ + epic_send_IPI_self(EPIC_IRQ_WORK_VECTOR); + epic_wait_icr_idle(); +} diff --git a/arch/l/kernel/epic/smp.c b/arch/l/kernel/epic/smp.c new file mode 100644 index 000000000000..700fe7339c09 --- /dev/null +++ b/arch/l/kernel/epic/smp.c @@ -0,0 +1,85 @@ +/* + * SMP IPI Support + */ + +#include +#include +#include + +#include + +/* + * the following functions deal with sending IPIs between CPUs. + * + * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. + */ + +void epic_send_call_function_ipi_mask(const struct cpumask *mask) +{ + epic_send_IPI_mask(mask, EPIC_CALL_FUNCTION_VECTOR); +} + +void epic_send_call_function_single_ipi(int cpu) +{ + epic_send_IPI(cpu, EPIC_CALL_FUNCTION_SINGLE_VECTOR); +} + +/* + * this function sends a 'reschedule' IPI to another CPU. + * it goes straight through and wastes no time serializing + * anything. Worst case is that we lose a reschedule ... + */ +void epic_smp_send_reschedule(int cpu) +{ + if (unlikely(cpu_is_offline(cpu))) { + WARN_ON(1); + return; + } + current->intr_sc = get_cycles(); + epic_send_IPI(cpu, EPIC_RESCHEDULE_VECTOR); +} + +/* + * Reschedule call back. Nothing to do, + * all the work is done automatically when + * we return from the interrupt. + */ +static inline void __epic_smp_reschedule_interrupt(void) +{ + inc_irq_stat(irq_resched_count); + scheduler_ipi(); +} + +__visible void epic_smp_reschedule_interrupt(struct pt_regs *regs) +{ + ack_epic_irq(); + __epic_smp_reschedule_interrupt(); +} + +static inline void __epic_smp_call_function_interrupt(void) +{ + generic_smp_call_function_interrupt(); + inc_irq_stat(irq_call_count); +} + +__visible void epic_smp_call_function_interrupt(struct pt_regs *regs) +{ + ack_epic_irq(); + l_irq_enter(); + __epic_smp_call_function_interrupt(); + l_irq_exit(); +} + +static inline void __epic_smp_call_function_single_interrupt(void) +{ + generic_smp_call_function_single_interrupt(); + inc_irq_stat(irq_call_count); +} + +__visible void epic_smp_call_function_single_interrupt(struct pt_regs *regs) +{ + ack_epic_irq(); + l_irq_enter(); + __epic_smp_call_function_single_interrupt(); + l_irq_exit(); +} diff --git a/arch/l/kernel/gpio.c b/arch/l/kernel/gpio.c new file mode 100644 index 000000000000..3118735acc1a --- /dev/null +++ b/arch/l/kernel/gpio.c @@ -0,0 +1,662 @@ +/* + * arch/l/kernel/gpio.c + * + * Copyright (C) 2012 Evgeny Kravtsunov MCST. + * + * GPIOLIB implementation for MCST-GPIO controller (part of Elbrus MCST). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_INPUT_LTC2954) +#include +#include +#include +#endif /* CONFIG_INPUT_LTC2954 */ + +#include +#include +#include +#include + +#include +/* Offsets from BAR for MCST GPIO registers */ +#define L_GPIO_CNTRL 0x00 +#define L_GPIO_DATA 0x04 +#define L_GPIO_INT_CLS 0x08 +#define L_GPIO_INT_LVL 0x0c +#define L_GPIO_INT_EN 0x10 +#define L_GPIO_INT_STS 0x14 + +#define L_GPIO_ONE_MASK(x) (1 << (x)) +#define L_GPIO_ZERO_MASK(x) (~(1 << (x))) + +/* Configuration values */ +#define L_GPIO_CNTRL_IN 0x00000000 /* Input mode for all pins */ +#define L_GPIO_CNTRL_OUT 0x0000ffff /* Output mode for all pins */ +#define L_GPIO_INT_ENABLE 0x0000ffff /* Interrupts enabled for all */ +#define L_GPIO_INT_DISABLE 0x00000000 /* Interrupts disabled for all*/ +#define L_GPIO_INT_CLS_LVL 0x00000000 /* Enable level interrupts */ +#define L_GPIO_INT_CLS_EDGE 0x0000ffff /* Enable edge interrupts */ +#define L_GPIO_INT_LVL_RISE 0x0000ffff /* Rising edge detection (0->1)*/ +#define L_GPIO_INT_LVL_FALL 0x00000000 /* Falling edeg detection */ + +/* Predefined default configuration values */ +/* Input mode for all pins by default: */ +#define L_GPIO_CNTRL_DEF L_GPIO_CNTRL_IN +/* Interrupts from all pins disabled by default: */ +#define L_GPIO_INT_EN_DEF L_GPIO_INT_DISABLE +/* Interrupt mode for all pins - egde interrupts: */ +#define L_GPIO_INT_CLS_DEF L_GPIO_INT_CLS_EDGE +/* Interrupt mode for all pins - falling egde detection: */ +#define L_GPIO_INT_LVL_DEF L_GPIO_INT_LVL_FALL + +/* Sets of gpios */ +#define IOHUB_IRQ0_GPIO_START 0 +#define IOHUB_IRQ0_GPIO_END 7 +#define IOHUB_IRQ1_GPIO_START 8 +#define IOHUB_IRQ1_GPIO_END 15 + +#define DRV_NAME "l-gpio" + + #define L_GPIO_MAX_IRQS 2 + +struct l_gpio_data { + int bar; + int lines; + struct { + int nr; + int start; + int end; + } irq[L_GPIO_MAX_IRQS]; +}; + + +struct l_gpio { + struct gpio_chip chip; /*Must be the first*/ + resource_size_t base; + void __iomem *base_ioaddr; + struct pci_dev *pdev; + raw_spinlock_t lock; + unsigned int irq_base; + struct l_gpio *next; + struct l_gpio_data data; +}; + +/* Registering gpio-bound devices on board. This is embedded style. */ +#if IS_ENABLED(CONFIG_INPUT_LTC2954) + +struct gpio_keys_button ltc2954_descr = { + .code = KEY_SLEEP, + .gpio = LTC2954_IRQ_GPIO_PIN, + .active_low = 0, + .type = EV_KEY, + .wakeup = 0, + .debounce_interval = 0, +}; + +static struct gpio_keys_platform_data ltc2954_button_pdata = { + .buttons = <c2954_descr, + .nbuttons = 1, + .rep = 0, +}; + +static struct platform_device ltc2954_dev = { + .name = "ltc2954", + .id = -1, + .num_resources = 0, + .dev = { + .platform_data = <c2954_button_pdata, + }, +}; +#endif /* CONFIG_INPUT_LTC2954 */ + +static int register_l_gpio_bound_devices(void) +{ + + int err = 0; + + if (HAS_MACHINE_E2K_IOHUB) { +#if IS_ENABLED(CONFIG_INPUT_LTC2954) + /* Only power button is available today: */ + + err = platform_device_register(<c2954_dev); + if (err < 0) + printk(KERN_ERR "failed to register " + "ltc2954 device\n"); +#endif /* CONFIG_INPUT_LTC2954_BUTTON */ + } + + return err; +} + +/* Generic GPIO interface */ + +/* + * Set the state of an output GPIO line. + */ +static void l_gpio_set_value(struct gpio_chip *c, + unsigned int offset, int state) +{ + struct l_gpio *chip = (struct l_gpio *)c; + unsigned long flags; + unsigned int x; + + raw_spin_lock_irqsave(&chip->lock, flags); + x = readl(chip->base_ioaddr + L_GPIO_DATA); + if (state) + x |= L_GPIO_ONE_MASK(offset); + else + x &= L_GPIO_ZERO_MASK(offset); + + writel(x, chip->base_ioaddr + L_GPIO_DATA); + raw_spin_unlock_irqrestore(&chip->lock, flags); +} + +/* + * Read the state of a GPIO line. + */ +static int __l_gpio_get_value(struct gpio_chip *c, unsigned int offset) +{ + struct l_gpio *chip = (struct l_gpio *)c; + unsigned int x = readl(chip->base_ioaddr + L_GPIO_DATA); + + return (x & L_GPIO_ONE_MASK(offset)) ? 1 : 0; +} + +static int l_gpio_get_value(struct gpio_chip *c, unsigned int offset) +{ + struct l_gpio *chip = (struct l_gpio *)c; + unsigned long flags; + int x; + + raw_spin_lock_irqsave(&chip->lock, flags); + x = __l_gpio_get_value(c, offset); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return x; +} + +/* + * Configure the GPIO line as an input. + */ +static int l_gpio_direction_input(struct gpio_chip *c, unsigned offset) +{ + struct l_gpio *chip = (struct l_gpio *)c; + unsigned long flags; + unsigned int x; + + raw_spin_lock_irqsave(&chip->lock, flags); + x = readl(chip->base_ioaddr + L_GPIO_CNTRL); + x &= L_GPIO_ZERO_MASK(offset); + writel(x, chip->base_ioaddr + L_GPIO_CNTRL); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +/* + * Configure the GPIO line as an output. + */ +static int l_gpio_direction_output(struct gpio_chip *c, unsigned offset, + int val) +{ + struct l_gpio *chip = (struct l_gpio *)c; + unsigned long flags; + unsigned int x; + + raw_spin_lock_irqsave(&chip->lock, flags); + x = readl(chip->base_ioaddr + L_GPIO_CNTRL); + x |= L_GPIO_ONE_MASK(offset); + writel(x, chip->base_ioaddr + L_GPIO_CNTRL); + raw_spin_unlock_irqrestore(&chip->lock, flags); + l_gpio_set_value(c, offset, val); + + return 0; +} + +/* + * Map GPIO line to IRQ number. + */ +static int l_gpio_to_irq(struct gpio_chip *c, unsigned int pin) +{ + struct l_gpio *chip = (struct l_gpio *)c; + + return (chip->irq_base + pin); +} + +/* GPIOLIB interface */ +static struct l_gpio *l_gpios_set; + +/* + * GPIO IRQ + */ + +static int irq_to_gpio(unsigned int irq) +{ + struct l_gpio *chip = irq_get_chip_data(irq); + + return (irq - chip->irq_base); +} + +static void l_gpio_irq_disable(struct irq_data *irq_data) +{ + unsigned long flags; + unsigned int x; + unsigned int irq = irq_data->irq; + struct l_gpio *chip = irq_get_chip_data(irq); + int offset = irq_to_gpio(irq); + + raw_spin_lock_irqsave(&chip->lock, flags); + x = readl(chip->base_ioaddr + L_GPIO_INT_EN); + x &= L_GPIO_ZERO_MASK(offset); + writel(x, chip->base_ioaddr + L_GPIO_INT_EN); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return; +} + +static void l_gpio_irq_enable(struct irq_data *irq_data) +{ + unsigned long flags; + unsigned int x; + unsigned int irq = irq_data->irq; + struct l_gpio *chip = irq_get_chip_data(irq); + int offset = irq_to_gpio(irq); + + raw_spin_lock_irqsave(&chip->lock, flags); + x = readl(chip->base_ioaddr + L_GPIO_INT_EN); + x |= L_GPIO_ONE_MASK(offset); + writel(x, chip->base_ioaddr + L_GPIO_INT_EN); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return; +} + +static int l_gpio_irq_type(struct irq_data *irq_data, unsigned type) +{ + unsigned long flags; + unsigned int irq = irq_data->irq; + struct l_gpio *chip = irq_get_chip_data(irq); + int offset = irq_to_gpio(irq); + unsigned int cls, lvl; + + if (offset < 0 || offset > chip->chip.ngpio) { + return -EINVAL; + } + + raw_spin_lock_irqsave(&chip->lock, flags); + + cls = readl(chip->base_ioaddr + L_GPIO_INT_CLS); + lvl = readl(chip->base_ioaddr + L_GPIO_INT_LVL); + + switch (type) { + case IRQ_TYPE_EDGE_BOTH: + cls |= L_GPIO_ONE_MASK(offset); + /* + * Since the hardware doesn't support interrupts on both edges, + * emulate it in the software by setting the single edge + * interrupt and switching to the opposite edge while ACKing + * the interrupt + */ + if (__l_gpio_get_value(&chip->chip, offset)) + lvl &= L_GPIO_ZERO_MASK(offset); /* falling */ + else + lvl |= L_GPIO_ONE_MASK(offset); /* rising */ + break; + case IRQ_TYPE_EDGE_RISING: + cls |= L_GPIO_ONE_MASK(offset); + lvl |= L_GPIO_ONE_MASK(offset); + break; + case IRQ_TYPE_EDGE_FALLING: + cls |= L_GPIO_ONE_MASK(offset); + lvl &= L_GPIO_ZERO_MASK(offset); + break; + case IRQ_TYPE_LEVEL_HIGH: + cls &= L_GPIO_ZERO_MASK(offset); + lvl |= L_GPIO_ONE_MASK(offset); + break; + case IRQ_TYPE_LEVEL_LOW: + cls &= L_GPIO_ZERO_MASK(offset); + lvl &= L_GPIO_ZERO_MASK(offset); + break; + default: + break; + } + writel(lvl, chip->base_ioaddr + L_GPIO_INT_LVL); + writel(cls, chip->base_ioaddr + L_GPIO_INT_CLS); + + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int l_gpio_irq_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ +#ifdef CONFIG_SMP + int ret = 0, i; + unsigned int irq = irq_data->irq; + struct l_gpio *chip = irq_get_chip_data(irq); + for (i = 0; chip->data.irq[i].nr && ret == 0; i++) { + struct irq_data *idata = + irq_get_irq_data(chip->data.irq[i].nr); + if (idata) + ret = native_ioapic_set_affinity(idata, mask, force); + else + pr_alert("Error: gpio: could not set IRQ#%d affinity. Did not boot pass info about it?\n", + chip->data.irq[i].nr); + } + return ret; +#else + return IRQ_SET_MASK_OK; +#endif +} + +static void l_gpio_irq_handler(struct irq_desc *desc) +{ + int irq = irq_desc_get_irq(desc); + unsigned int x; + unsigned int i; + struct irq_chip *ch = irq_desc_get_chip(desc); + struct l_gpio *chip = irq_desc_get_handler_data(desc); + unsigned start = 0, end = chip->chip.ngpio; + chained_irq_enter(ch, desc); + + x = readl(chip->base_ioaddr + L_GPIO_INT_STS); + + for (i = 0; chip->data.irq[i].nr && + i < ARRAY_SIZE(chip->data.irq); i++) { + if (chip->data.irq[i].nr == irq && chip->data.irq[i].end) { + start = chip->data.irq[i].start; + end = chip->data.irq[i].end; + break; + } + } + for (i = start; i <= end; i++) { + int pin_irq = chip->irq_base + i; + u32 type = irq_get_trigger_type(pin_irq); + if (!(x & (1 << i))) + continue; + /* + * Switch the interrupt edge to the opposite edge + * of the interrupt which got triggered for the case + * of emulating both edges + */ + if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { + l_gpio_irq_type(irq_get_irq_data(pin_irq), + IRQ_TYPE_EDGE_BOTH); + } + generic_handle_irq(pin_irq); + } + + writel(x, chip->base_ioaddr + L_GPIO_INT_STS); + chained_irq_exit(ch, desc); +} + +static struct irq_chip l_gpio_irqchip = { + .name = "l-gpio-irqchip", + .irq_enable = l_gpio_irq_enable, + .irq_disable = l_gpio_irq_disable, + .irq_unmask = l_gpio_irq_enable, + .irq_mask = l_gpio_irq_disable, + .irq_set_type = l_gpio_irq_type, + .irq_set_affinity = l_gpio_irq_set_affinity, +}; + +static int __init l_gpio_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id, + struct l_gpio *c) +{ + int err; + int i, bar = c->data.bar; + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "can't enable l-gpio device MEM\n"); + goto done; + } + + /* set up the driver-specific struct */ + c->base = pci_resource_start(pdev, bar); + c->base_ioaddr = pci_iomap(pdev, bar, 0); + c->pdev = pdev; + raw_spin_lock_init(&(c->lock)); + + c->irq_base = get_nr_irqs_gsi() + c->base; + + dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", bar, + (unsigned long long)c->base); + + /* Default settings: */ + /* Default Input/Output mode for all pins: */ + writel(L_GPIO_CNTRL_DEF, c->base_ioaddr + L_GPIO_CNTRL); + /* Default interrupt enable/disable for all pins: */ + writel(L_GPIO_INT_EN_DEF, c->base_ioaddr + L_GPIO_INT_EN); + /* Default interrupt mode level/edge for all pins: */ + writel(L_GPIO_INT_CLS_DEF, c->base_ioaddr + L_GPIO_INT_CLS); + /* Default rising/falling edge detection for all pins (if edge): */ + writel(L_GPIO_INT_LVL_DEF, c->base_ioaddr + L_GPIO_INT_LVL); + + /* finally, register with the generic GPIO API */ + err = gpiochip_add(&(c->chip)); + if (err) + goto release_region; + + c->irq_base = irq_alloc_descs_from(get_nr_irqs_gsi(), (c->chip).ngpio, + pcibus_to_node(pdev->bus)); + if (!c->irq_base) { + dev_err(&pdev->dev, "could not reserve %d irq numbers for l-gpio\n", + (c->chip).ngpio); + goto release_chip; + } + + for (i = 0; c->data.irq[i].nr && + i < ARRAY_SIZE(c->data.irq); i++) { + irq_set_handler_data(c->data.irq[i].nr, c); + irq_set_chained_handler(c->data.irq[i].nr, + l_gpio_irq_handler); + } + + /* To virtual irq_desc's: */ + for (i = 0; i < (c->chip).ngpio; i++) { + irq_set_chip_and_handler(i + c->irq_base, + &l_gpio_irqchip, + handle_simple_irq); + irq_set_chip_data(i + c->irq_base, c); + } + + dev_info(&pdev->dev, + DRV_NAME ": L-GPIO support successfully loaded.\n"); + + return 0; + +release_chip: + gpiochip_remove(&(c->chip)); +release_region: + pci_iounmap(pdev, c->base_ioaddr); + pci_release_region(pdev, c->data.bar); +done: + return err; +} + +static void __exit l_gpio_remove(struct l_gpio *p) +{ + struct pci_dev *pdev = p->pdev; + int bar = p->data.bar; + + gpiochip_remove(&(p->chip)); + pci_iounmap(pdev, p->base_ioaddr); + pci_release_region(pdev, bar); +} + +static struct l_gpio_data l_iohub_private_data = { + .bar = 1, + .lines = ARCH_NR_IOHUB_GPIOS, + .irq = { + { .nr = 8, .start = 0, .end = 7, }, + { .nr = 9, .start = 8, .end = ARCH_NR_IOHUB_GPIOS - 1, } + }, +}; + +static struct l_gpio_data l_pci_private_data = { + .bar = 0, + .lines = 16, +}; + +static struct l_gpio_data l_iohub2_private_data = { + .bar = 0, + .lines = ARCH_NR_IOHUB2_GPIOS, + .irq = { + { .nr = 7 } + }, +}; + +static struct l_gpio_data l_iohub3_private_data = { + .bar = 0, + .lines = 16, +}; + +static struct pci_device_id __initdata l_gpio_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_GPIO_MPV_EIOH), + .driver_data = (unsigned long)&l_iohub3_private_data}, + {PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_GPIO_MPV), + .driver_data = (unsigned long)&l_iohub2_private_data}, + {PCI_DEVICE(PCI_AC97GPIO_VENDOR_ID_ELBRUS, + PCI_AC97GPIO_DEVICE_ID_ELBRUS), + .driver_data = (unsigned long)&l_iohub_private_data}, + {PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_GPIO), + .driver_data = (unsigned long)&l_pci_private_data}, + {}, +}; + +MODULE_DEVICE_TABLE(pci, l_gpio_pci_tbl); + +#ifdef CONFIG_OF_GPIO +static struct device_node *l_gpio_get_of_node(struct pci_dev *pdev, + struct l_gpio_data *data) +{ + int node = dev_to_node(&pdev->dev); + char path[32]; + + if (data != &l_iohub_private_data && data != &l_iohub2_private_data) + return NULL; + + if (node < 0) + node = 0; + sprintf(path, "/l_gpio@%d", node); + + return of_find_node_by_path(path); +} +#endif + +/* + * We can't use the standard PCI driver registration stuff here, since + * that allows only one driver to bind to each PCI device (and we want + * multiple drivers to be able to bind to the device: AC97 and GPIO). + * Instead, manually scan for the PCI device, request a single region, + * and keep track of the devices that we're using. + */ + +static int __init l_gpio_init(void) +{ + struct pci_dev *pdev = NULL; + int err = -ENODEV; + int i, j = 0, base = 0; + struct l_gpio *next, *old = NULL; + + for (i = 0; i < ARRAY_SIZE(l_gpio_pci_tbl) - 1; i++) { + while ((pdev = pci_get_device(l_gpio_pci_tbl[i].vendor, + l_gpio_pci_tbl[i].device, + pdev))) { + struct l_gpio_data *d = (struct l_gpio_data *) + l_gpio_pci_tbl[i].driver_data; + struct gpio_chip *c; + if (!(next = kzalloc(sizeof(*next), GFP_KERNEL))) + return -ENOMEM; + c = (struct gpio_chip *)next; + c->owner = THIS_MODULE; + c->label = DRV_NAME; + c->direction_input = l_gpio_direction_input; + c->direction_output = l_gpio_direction_output; + c->get = l_gpio_get_value; + c->set = l_gpio_set_value; + c->to_irq = l_gpio_to_irq; + c->base = base; + c->ngpio = d->lines; + c->can_sleep = 0; +#ifdef CONFIG_OF_GPIO + c->of_node = l_gpio_get_of_node(pdev, d); +#endif + + /* + * GPIO/MPV in IOHub2 has 4 IOAPIC IRQs: + * 6 and 9 - for MPV + * 7 and 11 - for GPIO + * On EPIC systems IRQs are recalculated in + * fixup_iohub2_dev_irq(), and pdev->irq has the + * IRQ for MPV. Add 1 to get IRQ for GPIO. + */ + if (cpu_has_epic() && d == &l_iohub2_private_data) + d->irq[0].nr = pdev->irq + 1; + + memcpy(&next->data, d, sizeof(*d)); + err = l_gpio_probe(pdev, &l_gpio_pci_tbl[i], next); + + if (err) + pci_dev_put(pdev); + if (old) + old->next = next; + else + l_gpios_set = next; + old = next; + base += d->lines; + j++; + } + } + + if (l_gpios_set) + err = register_l_gpio_bound_devices(); + + return err; +} + +static void __exit l_gpio_exit(void) +{ + struct l_gpio *p; + for (p = l_gpios_set; p; p = p->next) { + l_gpio_remove(p); + pci_dev_put(p->pdev); + } + +} + +module_init(l_gpio_init); +module_exit(l_gpio_exit); + +MODULE_AUTHOR("Evgeny Kravtsunov "); +MODULE_DESCRIPTION("Elbrus MCST GPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/l/kernel/i2c-spi/Makefile b/arch/l/kernel/i2c-spi/Makefile new file mode 100644 index 000000000000..8dc4aeb3946d --- /dev/null +++ b/arch/l/kernel/i2c-spi/Makefile @@ -0,0 +1,3 @@ +obj-y += core.o reset.o +obj-$(CONFIG_L_I2C_CONTROLLER) += i2c.o i2c-devices.o +obj-$(CONFIG_L_SPI_CONTROLLER) += spi.o spi-devices.o diff --git a/arch/l/kernel/i2c-spi/core.c b/arch/l/kernel/i2c-spi/core.c new file mode 100644 index 000000000000..d683bcd2dc86 --- /dev/null +++ b/arch/l/kernel/i2c-spi/core.c @@ -0,0 +1,171 @@ +/* + * Elbrus I2C_SPI controller support + * + * Copyright (C) 2012 MCST + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * 2012-05-29 Created + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Elbrus I2C-SPI and Reset Controller that is part of Elbrus IOHUB + * and is implemented as a pci device in iohub. + */ + +static struct pci_device_id i2c_spi_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_ELBRUS, PCI_DEVICE_ID_MCST_I2CSPI) }, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_SM) }, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_I2C_SPI) }, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_IOEPIC_I2C_SPI) }, + { 0, } +}; + +struct i2c_spi_priv { + struct platform_device *i2c; + struct platform_device *spi; +}; + +static void i2c_spi_clenup(struct pci_dev *pdev, int stage) +{ + struct i2c_spi_priv *priv = pci_get_drvdata(pdev); + switch (stage) { + case 4: + platform_device_unregister(priv->i2c); + case 3: + platform_device_unregister(priv->spi); + case 2: + /* don't do this. ioapic also will be disabled.*/ + /*pci_disable_device(pdev);*/ + case 1: + kfree(priv); + } +} + +static int i2c_spi_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret = 0, stage = 0; + /* HACK: boot sets irq for watchdog, so we have to fix it */ + int irq = pdev->device == PCI_DEVICE_ID_MCST_I2CSPI ? 15 : + pdev->device == PCI_DEVICE_ID_MCST_I2C_SPI ? 7 : 0; + struct resource res[] = { + { + .flags = IORESOURCE_MEM, + .start = pci_resource_start(pdev, 0), + .end = pci_resource_end(pdev, 0), + }, { + .flags = IORESOURCE_MEM, + .start = pci_resource_start(pdev, 1), + .end = pci_resource_end(pdev, 1), + }, { + .flags = IORESOURCE_IRQ, + .start = pdev->irq + irq, + .end = pdev->irq + irq, + }, + }; + struct i2c_spi_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); + int pdev_id = dev_to_node(&pdev->dev); + if (pdev_id < 0) + pdev_id = 0; +#ifdef CONFIG_EPIC + if ((cpu_has_epic() && pdev->device != + PCI_DEVICE_ID_MCST_IOEPIC_I2C_SPI) || + (!cpu_has_epic() && pdev->device == + PCI_DEVICE_ID_MCST_IOEPIC_I2C_SPI)) { + pdev_id += MAX_NUMNODES; + } +#endif + if (!priv) + return -ENOMEM; + stage++; + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, + "Failed to setup Elbrus reset control " + "in i2c-iohub: Unable to make enable device\n"); + goto out; + } + stage++; + + priv->i2c = platform_device_register_resndata(&pdev->dev, + "l_i2c", pdev_id, res, ARRAY_SIZE(res), NULL, 0); + if (IS_ERR(priv->i2c)) { + ret = PTR_ERR(priv->i2c); + goto out; + } + + stage++; + priv->spi = platform_device_register_resndata(&pdev->dev, "l_spi", + pdev_id, res, ARRAY_SIZE(res), NULL, 0); + if (IS_ERR(priv->spi)) { + ret = PTR_ERR(priv->spi); + goto out; + } + stage++; + pci_set_drvdata(pdev, priv); +out: + if (ret) + i2c_spi_clenup(pdev, stage); + return ret; +} + +static void i2c_spi_remove(struct pci_dev *pdev) +{ + i2c_spi_clenup(pdev, 4); + pci_set_drvdata(pdev, NULL); +} + +static int i2c_spi_suspend(struct pci_dev *dev, pm_message_t state) +{ + /*Do not disable DMA: ioapic will not be able to send interrupts*/ + return 0; +} + +static int i2c_spi_resume(struct pci_dev *dev) +{ + return 0; +} + +static struct pci_driver i2c_spi_driver = { + .name = "i2c_spi", + .id_table = i2c_spi_ids, + .probe = i2c_spi_probe, + .remove = i2c_spi_remove, + .suspend = i2c_spi_suspend, + .resume = i2c_spi_resume, +}; + +__init +static int i2c_spi_init(void) +{ + return pci_register_driver(&i2c_spi_driver); +} +module_init(i2c_spi_init); + +__exit +static void i2c_spi_exit(void) +{ + pci_unregister_driver(&i2c_spi_driver); +} +module_exit(i2c_spi_exit); + +MODULE_AUTHOR("Evgeny Kravtsunov "); +MODULE_DESCRIPTION("Elbrus I2C-SPI SMBus driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/l/kernel/i2c-spi/i2c-devices.c b/arch/l/kernel/i2c-spi/i2c-devices.c new file mode 100644 index 000000000000..51da23a8d837 --- /dev/null +++ b/arch/l/kernel/i2c-spi/i2c-devices.c @@ -0,0 +1,283 @@ +#include +#include +#include +#include +#include +#include +#include + +#define BUTTERFLY_PCA953X_LINES_NR 8 +#ifdef CONFIG_E2K +#if IS_ENABLED(CONFIG_IPE2ST_POWER) +static struct pca953x_platform_data e2k_i2c_board_pdata[] = { + [0] = { .gpio_base = ARCH_NR_IOHUB_GPIOS * MAX_NUMIOHUBS, }, +}; + +static struct pca953x_platform_data e8c_i2c_board_pdata[] = { + [0] = { .gpio_base = ARCH_MAX_NR_OWN_GPIOS * MAX_NUMIOHUBS, }, +}; + +static struct i2c_board_info __initdata e2k_i2c_board_info1[] = { +#if IS_ENABLED(CONFIG_LTC4306) + { + I2C_BOARD_INFO("ltc4306", 0x44), + }, +#endif +#if IS_ENABLED(CONFIG_GPIO_PCA953X) + { + I2C_BOARD_INFO("pca9536", 0x41), + .platform_data = &e2k_i2c_board_pdata[0], + }, +#endif +#if IS_ENABLED(CONFIG_ISL22317) + { + I2C_BOARD_INFO("isl22317", 0x2a), + }, +#endif +#if IS_ENABLED(CONFIG_SENSORS_LTC4151) + { + I2C_BOARD_INFO("ltc4151", 0x6a), + }, +#endif + +#if IS_ENABLED(CONFIG_SENSORS_LTC2978) + { + I2C_BOARD_INFO("ltm4676", 0x40), + }, + { + I2C_BOARD_INFO("ltm4676", 0x4f), + }, +#endif + +}; + +static struct i2c_board_info __initdata e2k_i2c_board_info_e8c[] = { +#if IS_ENABLED(CONFIG_LTC4306) + { + I2C_BOARD_INFO("ltc4306", 0x4e), + }, +#endif +#if IS_ENABLED(CONFIG_GPIO_PCA953X) + { + I2C_BOARD_INFO("pca9534", 0x24), + .platform_data = &e8c_i2c_board_pdata[0], + }, +#endif +#if IS_ENABLED(CONFIG_ISL22317) + { + I2C_BOARD_INFO("isl22317", 0x2a), + }, +#endif +#if IS_ENABLED(CONFIG_SENSORS_LTC4151) + { + I2C_BOARD_INFO("ltc4151", 0x6a), + }, +#endif + +#if IS_ENABLED(CONFIG_SENSORS_LTC2978) + { + I2C_BOARD_INFO("ltm4676", 0x40), + }, + { + I2C_BOARD_INFO("ltm4676", 0x4f), + }, +#endif + +}; +#endif /* CONFIG_IPE2ST_POWER */ +static const char * const iohub2_spmc[] = { + "spmc.0", "spmc.1", "spmc.2", "spmc.3", + "spmc.4", "spmc.5", "spmc.6", "spmc.7", +}; +static const char * const iohub2_pci_req[] = { + "pci-req.0", "pci-req.1", "pci-req.2", "pci-req.3", + "pci-req.4", "pci-req.5", "pci-req.6", "pci-req.7", +}; +static const char * const iohub2_pe0[] = { + "pe0-ctrl.0", "pe0-ctrl.1", "pe0-ctrl.2", "pe0-ctrl.3", + "pe0-ctrl.4", "pe0-ctrl.5", "pe0-ctrl.6", "pe0-ctrl.7", +}; +static const char * const iohub2_pe1[] = { + "pe1-ctrl.0", "pe1-ctrl.1", "pe1-ctrl.2", "pe1-ctrl.3", + "pe1-ctrl.4", "pe1-ctrl.5", "pe1-ctrl.6", "pe1-ctrl.7", +}; + +static struct pca953x_platform_data iohub2_i2c_board_pdata[] = { + [0] = { .gpio_base = ARCH_NR_IOHUB2_GPIOS, + .names = iohub2_spmc, + }, + [1] = { .gpio_base = 1 * BUTTERFLY_PCA953X_LINES_NR + + ARCH_NR_IOHUB2_GPIOS, + .names = iohub2_pci_req, + }, + [2] = { .gpio_base = 2 * BUTTERFLY_PCA953X_LINES_NR + + ARCH_NR_IOHUB2_GPIOS, + .names = iohub2_pe0, + }, + [3] = { .gpio_base = 3 * BUTTERFLY_PCA953X_LINES_NR + + ARCH_NR_IOHUB2_GPIOS, + .names = iohub2_pe1, + }, +}; + +static struct i2c_board_info __initdata iohub2_i2c_devices_bus1[] = { + { + I2C_BOARD_INFO("pca9534", 0x20), + .platform_data = &iohub2_i2c_board_pdata[0], + }, +}; + +static struct i2c_board_info __initdata iohub2_i2c_devices_bus2[] = { + { + I2C_BOARD_INFO("pca9534", 0x20), + .platform_data = &iohub2_i2c_board_pdata[1], + }, + { + I2C_BOARD_INFO("pca9534", 0x21), + .platform_data = &iohub2_i2c_board_pdata[2], + }, + { + I2C_BOARD_INFO("pca9534", 0x22), + .platform_data = &iohub2_i2c_board_pdata[3], + }, +}; + +static struct i2c_board_info __initdata iohub2_i2c_devices_bus3[] = { + { + I2C_BOARD_INFO("pdt012", 0x10), + }, + { + I2C_BOARD_INFO("pdt012", 0x14), + }, +}; + + +static struct i2c_board_info __initdata pmc_i2c_devices_bus[] = { + { + I2C_BOARD_INFO("udt020", 0x10), + }, + { + I2C_BOARD_INFO("udt020", 0x13), + }, + { + I2C_BOARD_INFO("pdt012", 0x14), + }, +}; + +#endif /* CONFIG_E2K */ + +/* Occupy gpios after iohub's ones */ +static struct pca953x_platform_data butterfly_pca953x_pdata[] = { + [0] = { .gpio_base = ARCH_NR_IOHUB_GPIOS * MAX_NUMIOHUBS + 0 * 8, }, + [1] = { .gpio_base = ARCH_NR_IOHUB_GPIOS * MAX_NUMIOHUBS + 1 * 8, }, + [2] = { .gpio_base = ARCH_NR_IOHUB_GPIOS * MAX_NUMIOHUBS + 2 * 8, }, + [3] = { .gpio_base = ARCH_NR_IOHUB_GPIOS * MAX_NUMIOHUBS + 3 * 8, }, +}; + +static struct i2c_board_info __initdata butterfly_i2c_devices_bus0[] = { + { + I2C_BOARD_INFO("pca9534", 0x20), + .platform_data = &butterfly_pca953x_pdata[0], + }, + { + I2C_BOARD_INFO("ucd9080", 0x60), + }, +}; +static struct i2c_board_info __initdata butterfly_i2c_devices_bus1[] = { + { + I2C_BOARD_INFO("pca9534", 0x20), + .platform_data = &butterfly_pca953x_pdata[1], + }, +}; +static struct i2c_board_info __initdata butterfly_i2c_devices_bus2[] = { + { + I2C_BOARD_INFO("pca9534", 0x20), + .platform_data = &butterfly_pca953x_pdata[2], + }, +}; +static struct i2c_board_info __initdata butterfly_i2c_devices_bus3[] = { + { + I2C_BOARD_INFO("pca9534", 0x20), + .platform_data = &butterfly_pca953x_pdata[3], + }, +}; + +static int __init i2c_board_info_init(void) +{ +#ifdef CONFIG_OF + if (devtree_detected) + return 0; +#endif + +#ifdef CONFIG_E2K + if (bootblock_virt->info.bios.mb_type == MB_TYPE_ES2_BUTTERFLY) { +#else + if (1) { +#endif + int i; + for (i = 0; i < ARRAY_SIZE(butterfly_pca953x_pdata); i++) + butterfly_pca953x_pdata[i].gpio_base = + i * BUTTERFLY_PCA953X_LINES_NR + + ARCH_NR_IOHUB_GPIOS * num_online_iohubs(); + + i2c_register_board_info(0, butterfly_i2c_devices_bus0, + ARRAY_SIZE(butterfly_i2c_devices_bus0)); + i2c_register_board_info(1, butterfly_i2c_devices_bus1, + ARRAY_SIZE(butterfly_i2c_devices_bus1)); + i2c_register_board_info(2, butterfly_i2c_devices_bus2, + ARRAY_SIZE(butterfly_i2c_devices_bus2)); + i2c_register_board_info(3, butterfly_i2c_devices_bus3, + ARRAY_SIZE(butterfly_i2c_devices_bus3)); + } else { +#ifdef CONFIG_E2K + + if (bootblock_virt->info.bios.mb_type == + MB_TYPE_E1CP_IOHUB2_RAZBRAKOVSCHIK + ) { + i2c_register_board_info(1, iohub2_i2c_devices_bus1, + ARRAY_SIZE(iohub2_i2c_devices_bus1)); + i2c_register_board_info(2, iohub2_i2c_devices_bus2, + ARRAY_SIZE(iohub2_i2c_devices_bus2)); + i2c_register_board_info(3, iohub2_i2c_devices_bus3, + ARRAY_SIZE(iohub2_i2c_devices_bus3)); + } else if (bootblock_virt->info.bios.mb_type == MB_TYPE_E1CP_PMC || + bootblock_virt->info.bios.mb_type == + MB_TYPE_MBE1C_PC) { + i2c_register_board_info(3, iohub2_i2c_devices_bus3, + ARRAY_SIZE(iohub2_i2c_devices_bus3)); + i2c_register_board_info(4, pmc_i2c_devices_bus, + ARRAY_SIZE(pmc_i2c_devices_bus)); + } else if (bootblock_virt->info.bios.mb_type == + MB_TYPE_E8C) { + int j; + for (j = 0; j < I2C_MAX_BUSSES; j++) + i2c_register_board_info(j, + e2k_i2c_board_info_e8c, + ARRAY_SIZE(e2k_i2c_board_info_e8c)); + } else { + +#if IS_ENABLED(CONFIG_IPE2ST_POWER) + if (iohub_i2c_line_id) { + i2c_register_board_info(iohub_i2c_line_id, + e2k_i2c_board_info1, + ARRAY_SIZE(e2k_i2c_board_info1)); + } else { + /* if adapter number is not given through kernel + * command line - create ipe2st devices on all + * adapters. + */ + int ii; + for (ii = 0; ii < I2C_MAX_BUSSES; ii++) { + i2c_register_board_info(ii, + e2k_i2c_board_info1, + ARRAY_SIZE(e2k_i2c_board_info1)); + } + } +#endif /* CONFIG_IPE2ST_POWER */ + } +#endif + } + return 0; +} + +module_init(i2c_board_info_init); diff --git a/arch/l/kernel/i2c-spi/i2c.c b/arch/l/kernel/i2c-spi/i2c.c new file mode 100644 index 000000000000..0d5e4e937e95 --- /dev/null +++ b/arch/l/kernel/i2c-spi/i2c.c @@ -0,0 +1,784 @@ +/* + * Elbrus I2C controller support + * + * Copyright (C) 2011-2012 Evgeny Kravstunov , + * Pavel Panteleev + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* I2C definitions for Elbrus I2C-SPI Controller (part of IOHUB) */ + +#define I2C_IOHUB_BAR0_OFFSET 0x14 +#define I2C_CONTROL 0x00 +#define I2C_STATUS 0x04 /* offset from i2c_cbase */ +#define I2C_MODE 0x08 /* offset from i2c_cbase */ +#define I2C_TIMEOUTVAL 0x0c /* offset from i2c_cbase */ + +/* I2C_CONTROL field */ + +#define I2C_TRANSACTION_TYPE_WRITE 0x1 +#define I2C_MAX_TRANS_BYTES 64 +#define I2C_TRANS_SIZE_SHIFT 1 +#define I2C_TRANS_SIZE(size) \ +({ \ + unsigned __ret = 0, __size = size; \ + if (__size > 0 && __size < I2C_MAX_TRANS_BYTES) \ + __ret = (__size << I2C_TRANS_SIZE_SHIFT);\ + __ret; \ +}) +#define I2C_TRANS_SIZE_MASK 0x7E /* bits 6:1 */ +#define I2C_10BIT_ADDR_SHIFT 7 +#define I2C_10BIT_ADDR_MASK 0x1FFF80 /* bits 21:7 */ +#define I2C_7BIT_ADDR_SHIFT 15 +#define I2C_7BIT_ADDR_MASK 0x3f8000 /* bits 21:15 */ +#define I2C_10BIT_ADDR_MODE_SHIFT 22 +#define I2C_10BIT_ADDR_MODE (1 << I2C_10BIT_ADDR_MODE_SHIFT) +#define I2C_DATA_PHASE_PRESENT_SHIFT 23 +#define I2C_DATA_PHASE_PRESENT (1 << I2C_DATA_PHASE_PRESENT_SHIFT) +#define I2C_DST_BUS_NUMBER_SHIFT 24 +/* Bus 0 is null in bits 25:24 */ + +#define I2C_DST_BUS(bus_id) \ +({ \ + unsigned __ret = 0, __bus_id = bus_id; \ + if (__bus_id > 0 && __bus_id < I2C_DST_BUSSES) \ + __ret = (__bus_id << I2C_DST_BUS_NUMBER_SHIFT); \ + else if (__bus_id == 4) \ + __ret = (1 << I2C_BUS_4_SHIFT); \ + __ret; \ +}) +#define I2C_START_BYTE_ON_SHIFT 26 +#define I2C_START_BYTE_ON (1 << I2C_START_BYTE_ON_SHIFT) +#define I2C_KILL_SHIFT 27 +#define I2C_KILL (1 << I2C_KILL_SHIFT) +#define I2C_START_EXEC_SHIFT 28 +#define I2C_START_EXEC (1 << I2C_START_EXEC_SHIFT) +#define I2C_BUS_4_SHIFT 30 +#define I2C_CONTROL_MASK 0x1fffffff /* bits 31 not used + (const zeros) */ + +/* I2C_STATUS field */ + +#define I2C_CONTROLLER_BUSY 0x1 +#define I2C_INTERRUPT 0x2 +#define I2C_FAILED 0x4 +#define I2C_BUS_COLLISION 0x8 +#define I2C_TRANS_TIMEOUT 0x10 +#define I2C_STATUS_MASK 0x1f /* bits 31:5 not used (const zeros) */ + +/* I2C_MODE field */ + +#define I2C_BUS_0_MODE_MASK 0x3 /* bits 1:0 */ +#define I2C_BUS_0_FAST 0x1 +#define I2C_BUS_0_FASTPLUS 0x2 +#define I2C_BUS_1_MODE_SHIFT 2 +#define I2C_BUS_1_MODE_MASK (I2C_BUS_0_MODE_MASK << I2C_BUS_1_MODE_SHIFT) +#define I2C_BUS_2_MODE_SHIFT 4 +#define I2C_BUS_2_MODE_MASK (I2C_BUS_0_MODE_MASK << I2C_BUS_2_MODE_SHIFT) +#define I2C_BUS_3_MODE_SHIFT 6 +#define I2C_BUS_3_MODE_MASK (I2C_BUS_0_MODE_MASK << I2C_BUS_3_MODE_SHIFT) +#define I2C_INTERRUPT_ENABLE_SHIFT 8 +#define I2C_INTERRUPT_ENABLE (1 << I2C_INTERRUPT_ENABLE_SHIFT) +#define I2C_BUS_4_MODE_SHIFT 9 +#define I2C_BUS_4_MODE_MASK (I2C_BUS_0_MODE_MASK << I2C_BUS_4_MODE_SHIFT) +#define I2C_MODE_MASK 0x1ff /* bits 31:11 not used (const zeros) */ + +/* I2C_TIMEOUTVAL field */ + +#define I2C_TIMEOUTVAL_ASIC 0x0ee6b280 +#define I2C_TIMEOUTVAL_ALTERA 0x05f5e100 +#define I2C_TIMEOUTVAL_MASK 0xffffffff + + +/* IOHUB SMBus address offsets */ +#define SMBCONTROL (I2C_CONTROL + l_i2c->cbase + I2C_IOHUB_BAR0_OFFSET) +#define SMBSTATUS (I2C_STATUS + l_i2c->cbase + I2C_IOHUB_BAR0_OFFSET) +#define SMBMODE (I2C_MODE + l_i2c->cbase + I2C_IOHUB_BAR0_OFFSET) +#define SMBTIMEOUT (I2C_TIMEOUTVAL + l_i2c->cbase + I2C_IOHUB_BAR0_OFFSET) +#define SMBDATA (l_i2c->dbase) + +/* IOHUB constants */ +#define IOHUB_QUICK 0x00 +#define IOHUB_BYTE 0x04 +#define IOHUB_BYTE_DATA 0x08 +#define IOHUB_WORD_DATA 0x0C +#define IOHUB_BLOCK_DATA 0x14 +#define IOHUB_I2C_BLOCK_DATA 0x1C + +/* Address of manufacturer id register for + * majority of lm sensors. Use it to emulate + * SMBUS_QUICK that is absent in IOHUB. */ +#define HWMON_MAN_ID 0xfe +static int i2c_adapters_per_controller = 4; + +/* Multimaster i2c configuration can require l_xfer retry in case of + * bus collision. Limit number of retries. */ +#define MAX_RETRIES 100 + +struct l_i2c { + struct i2c_adapter adapter[I2C_MAX_BUSSES]; + unsigned bus_speed[I2C_MAX_BUSSES]; + struct platform_device *pdev; + void __iomem *cbase; + void __iomem *dbase; + struct completion xfer_complete; + bool no_irq; +}; +#define __r_i2c(__addr) readl(__addr) +#define __w_i2c(__v, __addr) writel(__v, __addr) + +#define r_i2c(__offset) \ +({ \ + unsigned __val = __r_i2c(__offset); \ + dev_dbg(&l_i2c->pdev->dev, "r: %8x: %s\t\t%s:%d\n", \ + __val, # __offset, __func__, __LINE__); \ + __val; \ +}) + +#define w_i2c(__val, __offset) do { \ + unsigned __val2 = __val; \ + dev_dbg(&l_i2c->pdev->dev, "w: %8x: %s\t\t%s:%d\n", \ + __val2, # __offset, __func__, __LINE__);\ + __w_i2c(__val2, __offset); \ +} while (0) + +static irqreturn_t l_i2c_irq_handler(int irq, void *devid) +{ + struct l_i2c *l_i2c = devid; + u32 s = r_i2c(SMBSTATUS); + if (!(s & I2C_INTERRUPT)) + return IRQ_NONE; + + complete(&l_i2c->xfer_complete); + w_i2c(I2C_INTERRUPT, SMBSTATUS); + return IRQ_HANDLED; +} + +static int l_i2c_transaction(struct i2c_adapter *adap) +{ + unsigned s = 0; + int ret = 0; + struct l_i2c *l_i2c = i2c_get_adapdata(adap); + + /* Make sure the SMBus host is ready to start transmitting */ + s = r_i2c(SMBSTATUS); + if (s & I2C_CONTROLLER_BUSY) { + dev_err(&adap->dev, "Controller is busy! (%02x)\n", s); + return -EBUSY; + } + + reinit_completion(&l_i2c->xfer_complete); + /* start the transaction by setting bit 28 */ + w_i2c(r_i2c(SMBCONTROL) | I2C_START_EXEC, SMBCONTROL); + + if (l_i2c->no_irq) { + unsigned long timeout = jiffies + adap->timeout; + while (time_before(jiffies, timeout) && + r_i2c(SMBSTATUS) & I2C_CONTROLLER_BUSY) + mdelay(1); + } else if (!wait_for_completion_timeout(&l_i2c->xfer_complete, + adap->timeout)) { + dev_warn(&adap->dev, "SMBus Timeout! status: %x\n", + r_i2c(SMBSTATUS)); + } + s = r_i2c(SMBSTATUS); + /* If the SMBus is still busy, we give up */ + if (s & I2C_CONTROLLER_BUSY) { + dev_err(&adap->dev, "l_i2c is still busy (status: %x)!\n", s); + ret = -ETIMEDOUT; + goto out; + } + if (s & I2C_FAILED) { + ret = -EIO; + dev_err(&adap->dev, "Error: Failed bus transaction\n"); + goto out; + } + + if (s & I2C_BUS_COLLISION) { + ret = -EAGAIN; + /* well, try to fixup it later in l_xfer */ + goto out; + } + + if (s & I2C_TRANS_TIMEOUT) { + ret = -ENXIO; + /* I2C_TRANS_TIMEOUT is legitimate for multimaster. */ + /* dev_err(&adap->dev, "Error: no response!\n"); */ + goto out; + } +out: + /* Reset status register */ + w_i2c(r_i2c(SMBSTATUS), SMBSTATUS); + + if ((s = r_i2c(SMBSTATUS)) != 0) { + dev_err(&adap->dev, "Failed reset at end of " + "transaction (%02x)\n", s); + } + + return ret; +} + +static void lock_companion(int id) +{ + struct spi_master *master = spi_busnum_to_master(id); + if (WARN_ON(!master)) + return; + spi_master_get(master); + spi_bus_lock(master); +} + +static void unlock_companion(int id) +{ + struct spi_master *master = spi_busnum_to_master(id); + if (WARN_ON(!master)) + return; + spi_bus_unlock(master); + spi_master_put(master); +} + +static s32 __l_smbus_xfer(struct i2c_adapter *adap, u16 addr, + unsigned short i2c_flags, char read_write, + u8 command, int size, union i2c_smbus_data *data) +{ + + struct l_i2c *l_i2c = i2c_get_adapdata(adap); + int ret = 0; + int i, len = 0; + int bus_id = ((adap->nr) % i2c_adapters_per_controller); + unsigned int value; + unsigned char quick = 0; + void __iomem *daddr; + + value = (unsigned int) addr; + + if (i2c_flags & I2C_CLIENT_TEN) { + value <<= I2C_10BIT_ADDR_SHIFT; + value &= I2C_10BIT_ADDR_MASK; + value |= I2C_10BIT_ADDR_MODE; + } else { + value <<= I2C_7BIT_ADDR_SHIFT; + value &= I2C_7BIT_ADDR_MASK; + } + + if ((read_write == I2C_SMBUS_WRITE) && (size != I2C_SMBUS_QUICK)) + value |= I2C_TRANSACTION_TYPE_WRITE; + + value |= I2C_DST_BUS(bus_id); + value &= ~I2C_START_BYTE_ON; + + daddr = SMBDATA; + + switch (size) { + case I2C_SMBUS_QUICK: + /* iohub i2c-spi controller does not support QUICK. + * We emulate QUICK by BYTE_DATA, assuming QUICK + * will be used ONLY for detecting hwmon sensors + * on motherboard. Hwmon sensors (lm95231 etc.) + * fortunaly have MANUFACTURER_ID and REVISION_ID + * registers. Other possible chips (isl22317, pca953x etc.) + * are to be instantiated explicitly in + * instantiate_i2c_bus(busid). */ + value |= I2C_TRANS_SIZE(1); + value |= I2C_TRANSACTION_TYPE_WRITE; + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + writeb(HWMON_MAN_ID, daddr); + ret = l_i2c_transaction(adap); + if (ret) + goto out; + + value &= ~(I2C_TRANSACTION_TYPE_WRITE); + w_i2c(value, SMBCONTROL); + size = IOHUB_QUICK; + break; + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_WRITE) { + /* Write */ + value |= I2C_TRANS_SIZE(1); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + writeb(command, daddr); + } else { + /* Read */ + value |= I2C_TRANSACTION_TYPE_WRITE; + value &= ~(I2C_DATA_PHASE_PRESENT); + ret = l_i2c_transaction(adap); + if (ret) + goto out; + + value &= ~(I2C_TRANSACTION_TYPE_WRITE); + w_i2c(value, SMBCONTROL); + } + size = IOHUB_BYTE; + break; + case I2C_SMBUS_BYTE_DATA: + if (read_write == I2C_SMBUS_WRITE) { + /* Write */ + value |= I2C_TRANS_SIZE(2); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + writeb(command, daddr); + writeb(data->byte, (daddr + 1)); + } else { + /* Read */ + /* Use 10bit address mode to send command + in the low byte of address */ + value |= ((unsigned int)command) + << I2C_10BIT_ADDR_SHIFT; + value |= I2C_10BIT_ADDR_MODE; + value |= I2C_TRANS_SIZE(1); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + } + size = IOHUB_BYTE_DATA; + break; + case I2C_SMBUS_WORD_DATA: + if (read_write == I2C_SMBUS_WRITE) { + /* Write */ + value |= I2C_TRANS_SIZE(3); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + writeb(command, daddr); + writeb((u8)data->word, (daddr + 1)); + writeb((u8)(data->word >> 8), (daddr + 2)); + + } else { + /* Read */ + /* Use 10bit address mode to send command + in the low byte of address */ + value |= ((unsigned int)command) + << I2C_10BIT_ADDR_SHIFT; + value |= I2C_10BIT_ADDR_MODE; + value |= I2C_TRANS_SIZE(2); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + } + size = IOHUB_WORD_DATA; + break; + case I2C_SMBUS_BLOCK_DATA: + if (read_write == I2C_SMBUS_WRITE) { /* Write */ + len = data->block[0]; + if (len == 0 || len > (I2C_SMBUS_BLOCK_MAX - 1)) { + ret = -EINVAL; + goto out; + } + value |= I2C_TRANS_SIZE(len+1); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + writeb(command, daddr); + for (i = 1; i <= len; i++) { + writeb(data->block[i], (daddr + i)); + } + } else { /* Read */ + /* iohub controller does not support SMBUS_BLOCK. + * Length comes in first byte so try to read max block + */ + len = I2C_SMBUS_BLOCK_MAX; + /* Use 10bit address mode to send command + in the low byte of address */ + if (i2c_flags & I2C_CLIENT_TEN) { + ret = -EADDRNOTAVAIL; + goto out; + } + value |= ((unsigned int)command) + << I2C_10BIT_ADDR_SHIFT; + value |= I2C_10BIT_ADDR_MODE; + value |= I2C_TRANS_SIZE(len); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + } + size = IOHUB_BLOCK_DATA; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + if (read_write == I2C_SMBUS_WRITE) { + /* Write */ + len = data->block[0]; + if (len == 0 || len > (I2C_SMBUS_BLOCK_MAX - 1)) { + ret = -EINVAL; + goto out; + } + value |= I2C_TRANS_SIZE(len+1); + value |= I2C_DATA_PHASE_PRESENT; + w_i2c(value, SMBCONTROL); + writeb(command, daddr); + for (i = 1; i <= len; i++) { + writeb(data->block[i], (daddr + 1 + i)); + } + } else { + /* Read */ + len = data->block[0]; + if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) { + ret = -EINVAL; + goto out; + } + + value |= I2C_TRANS_SIZE(len); + value &= ~(I2C_TRANSACTION_TYPE_WRITE); + value |= I2C_DATA_PHASE_PRESENT; + + w_i2c(value, SMBCONTROL); + } + size = IOHUB_I2C_BLOCK_DATA; + break; + + default: + dev_warn(&adap->dev, "Unsupported transaction %d\n", size); + ret = -EOPNOTSUPP; + goto out; + } + + ret = l_i2c_transaction(adap); + if (ret) + goto out; + + if ((read_write == I2C_SMBUS_WRITE) && (size != IOHUB_QUICK)) { + ret = 0; + goto out; + } + + switch (size) { + case IOHUB_QUICK: + quick = readb(daddr); + break; + case IOHUB_BYTE: + case IOHUB_BYTE_DATA: + data->byte = readb(daddr); + break; + case IOHUB_WORD_DATA: + data->word = readw(daddr); + break; + case IOHUB_BLOCK_DATA: + len = readb(daddr); + data->block[0] = (unsigned char) len; + if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) { + ret = -EPROTO; + goto out; + } + + for (i = 1; i <= len; i++) { + data->block[i] = readb(daddr + i); + } + break; + case IOHUB_I2C_BLOCK_DATA: + data->block[0] = (unsigned char) len; + for (i = 0; i < len; i++) { + data->block[i + 1] = readb(daddr + i); + } + break; + } +out: + return ret; +} + +static s32 l_smbus_xfer(struct i2c_adapter *adap, u16 addr, + unsigned short i2c_flags, char read_write, + u8 command, int size, union i2c_smbus_data *data) +{ + struct l_i2c *l_i2c = i2c_get_adapdata(adap); + int retries = 0, ret = 0; + do { + /* Lock spi if we are going to use the common buffer + * which may be in use by other I2C adapters or + * by SPI controller. */ + lock_companion(l_i2c->pdev->id); + ret = __l_smbus_xfer(adap, addr, i2c_flags, + read_write, command, size, data); + unlock_companion(l_i2c->pdev->id); + retries++; + } while (ret == -EAGAIN && retries < MAX_RETRIES); + + if (ret == -EAGAIN) + dev_err(&adap->dev, "l_i2c_xfer: Failed to fix i2c bus " + "collisions. Retries %d\n", retries); + return ret; +} + +static s32 l_i2c_xfer_one_msg(struct i2c_adapter *adap, struct i2c_msg *m) +{ + struct l_i2c *l_i2c = i2c_get_adapdata(adap); + int bus_id = ((adap->nr) % i2c_adapters_per_controller); + int ret = 0, i; + int f = m->flags, len = m->len; + u32 v = m->addr; + u8 *buf = m->buf; + if (WARN_ON_ONCE(f & I2C_M_RECV_LEN)) + return -EOPNOTSUPP; + + if (f & I2C_M_TEN) { + v <<= I2C_10BIT_ADDR_SHIFT; + v &= I2C_10BIT_ADDR_MASK; + v |= I2C_10BIT_ADDR_MODE; + } else { + v <<= I2C_7BIT_ADDR_SHIFT; + v &= I2C_7BIT_ADDR_MASK; + } + + v |= I2C_DST_BUS(bus_id); + v &= ~I2C_START_BYTE_ON; + v |= I2C_TRANS_SIZE(len); + + if (len) + v |= I2C_DATA_PHASE_PRESENT; + if (!(f & I2C_M_RD)) + v |= I2C_TRANSACTION_TYPE_WRITE; + + for (i = 0; i < len; i++) + writeb(buf[i], SMBDATA + i); + + w_i2c(v, SMBCONTROL); + + ret = l_i2c_transaction(adap); + if (ret) + goto out; + + if (!(f & I2C_M_RD)) + goto out; + + for (i = 0; i < len; i++) + buf[i] = readb(SMBDATA + i); + +out: + return ret; +} + +static int __l_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *p, int num) +{ + int i, ret = 0; + /* Controller can't send pmsg in a single transaction, + * so split it into num transactions in hope + * that slave will handle them. + */ + for (i = 0; i < num && ret == 0; i++, p++) { + ret = l_i2c_xfer_one_msg(adap, p); + dev_dbg(&adap->dev, + "master_xfer[%d] %c, addr=0x%02x, len=%d%s:%d\n", + i, (p->flags & I2C_M_RD) ? 'R' : 'W', + p->addr, p->len, + (p->flags & I2C_M_RECV_LEN) ? "+" : "", ret); + } + + if (ret) + return ret; + else + return num; +} + +static int l_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num) +{ + struct l_i2c *l_i2c = i2c_get_adapdata(adap); + int retries = 0, ret = 0; + do { + /* Lock spi if we are going to use the common buffer + * which may be in use by other I2C adapters or + * by SPI controller. */ + lock_companion(l_i2c->pdev->id); + ret = __l_i2c_xfer(adap, pmsg, num); + unlock_companion(l_i2c->pdev->id); + retries++; + } while (ret == -EAGAIN && retries < MAX_RETRIES); + + if (ret == -EAGAIN) + dev_err(&adap->dev, "l_i2c_xfer: Failed to fix i2c bus" + "collision. Retries %d\n", retries); + return ret; +} + +static u32 l_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_BLOCK_DATA | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm l_i2c_algorithm = { + .smbus_xfer = l_smbus_xfer, + .master_xfer = l_i2c_xfer, + .functionality = l_i2c_func, +}; + +static const struct i2c_adapter_quirks l_i2c_quirks = { + .max_write_len = I2C_MAX_TRANS_BYTES, + .max_read_len = I2C_MAX_TRANS_BYTES, +}; + +static void l_i2c_init_hw(struct l_i2c *l_i2c) +{ + int i; + unsigned mode = I2C_INTERRUPT_ENABLE; + /* Reset status bits: write ones to RW1C bits of I2C Status. */ + w_i2c(r_i2c(SMBSTATUS), SMBSTATUS); + + for (i = 0; i < i2c_adapters_per_controller; i++) { + unsigned speed = l_i2c->bus_speed[i], m = 0; + int k = i == 4 ? 1 : 0; + if (speed >= 1000 * 1000) + m = I2C_BUS_0_FASTPLUS; + else if (speed >= 400 * 1000) + m = I2C_BUS_0_FAST; + mode |= m << (k + i * I2C_BUS_1_MODE_SHIFT); + } + + w_i2c(mode, SMBMODE); +} + +static int l_i2c_probe(struct platform_device *pdev) +{ + int ret = 0; + int i; + int id; + struct resource *r; + struct l_i2c *l_i2c = kzalloc(sizeof(*l_i2c), GFP_KERNEL); + if (!l_i2c) + return -ENOMEM; + + if (to_pci_dev(pdev->dev.parent)->device == PCI_DEVICE_ID_MCST_IOEPIC_I2C_SPI) { + i2c_adapters_per_controller = 5; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + l_i2c->cbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (IS_ERR(l_i2c->cbase)) + return PTR_ERR(l_i2c->cbase); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 1); + l_i2c->dbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (IS_ERR(l_i2c->dbase)) + return PTR_ERR(l_i2c->dbase); + + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if ((ret = devm_request_irq(&pdev->dev, r->start, + l_i2c_irq_handler, 0, "l_i2c", l_i2c))) { + l_i2c->no_irq = true; + } + + l_i2c->pdev = pdev; + init_completion(&l_i2c->xfer_complete); + + for (i = 0; i < i2c_adapters_per_controller; i++) { + char s[64]; + struct i2c_adapter *i2c = &l_i2c->adapter[i]; + id = pdev->id * i2c_adapters_per_controller + i; + /* set up the sysfs linkage to our parent device */ + i2c->dev.parent = &pdev->dev; + sprintf(s, "/l_i2c@%d/i2c@%d", pdev->id, i); + i2c->dev.of_node = of_find_node_by_path(s); + /* init adapter himself */ + i2c->owner = THIS_MODULE; + i2c->class = (I2C_CLASS_HWMON | I2C_CLASS_SPD); + i2c->algo = &l_i2c_algorithm; + i2c->quirks = &l_i2c_quirks; + i2c->nr = id; + /* Max. transaction should take: + * (I2C_MAX_TRANS_BYTES + 2) * 10 bit / 100kHz = 6600 us. + * Round it up to 10 ms. */ + i2c->timeout = msecs_to_jiffies(10); + of_property_read_u32(i2c->dev.of_node, + "clock-frequency", &l_i2c->bus_speed[i]); + strlcpy(i2c->name, "l_i2c", sizeof(i2c->name)); + + if ((ret = i2c_add_numbered_adapter(i2c))) { + dev_err(&pdev->dev, "failed to register " + "I2C adapter %d!\n", id); + goto cleanup; + } + + i2c_set_adapdata(i2c, l_i2c); + + dev_info(&pdev->dev, "I2C adapter %d registered\n", id); + } + + platform_set_drvdata(pdev, l_i2c); + l_i2c_init_hw(l_i2c); + + return ret; + +cleanup: + for (i = 0; i < i2c_adapters_per_controller; i++) + i2c_del_adapter(&l_i2c->adapter[i]); + return ret; +} + +static int l_i2c_remove(struct platform_device *pdev) +{ + struct l_i2c *l_i2c = platform_get_drvdata(pdev); + int i; + for (i = 0; i < i2c_adapters_per_controller; i++) + i2c_del_adapter(&l_i2c->adapter[i]); + kfree(l_i2c); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int l_i2c_suspend(struct device *dev) +{ + return 0; +} + +static int l_i2c_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct l_i2c *l_i2c = platform_get_drvdata(pdev); + l_i2c_init_hw(l_i2c); + return 0; +} + +static const struct dev_pm_ops l_i2c_pm_ops = { + .suspend = l_i2c_suspend, + .resume = l_i2c_resume, +}; +#endif + +static struct platform_driver l_i2c_driver = { + .driver = { + .name = "l_i2c", + .owner = THIS_MODULE, +#ifdef CONFIG_PM_SLEEP + .pm = &l_i2c_pm_ops, +#endif + }, + .probe = l_i2c_probe, + .remove = l_i2c_remove, +}; + +__init +static int l_i2c_init(void) +{ + return platform_driver_register(&l_i2c_driver); +} +module_init(l_i2c_init); + +__exit +static void l_i2c_exit(void) +{ + platform_driver_unregister(&l_i2c_driver); +} +module_exit(l_i2c_exit); + +MODULE_AUTHOR("Evgeny Kravtsunov "); +MODULE_DESCRIPTION("Elbrus I2C controller driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/l/kernel/i2c-spi/reset.c b/arch/l/kernel/i2c-spi/reset.c new file mode 100644 index 000000000000..db733306ef5d --- /dev/null +++ b/arch/l/kernel/i2c-spi/reset.c @@ -0,0 +1,211 @@ +/* + * Elbrus reset control driver + * + * Copyright (C) 2012 MCST + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * 2012-05-29 Created + */ + +#include +#include +#include + +#ifdef CONFIG_E2K +#include +#else +#include +#endif +#include +#include +#include + +#include +#include +#include + +#if IS_ENABLED(CONFIG_INPUT_LTC2954) +#include +#endif /* CONFIG_INPUT_LTC2954 */ + +#include + + +#undef DEBUG_RESET_MODE +#undef DebugRS +#define DEBUG_RESET_MODE 0 /* Elbrus reset debug */ +#define DebugRS if (DEBUG_RESET_MODE) printk + +#define BOOT_MODE_BOOTCMD 0xAAAA5500 + +static struct pci_dev *l_reset_device = NULL; + +int (*l_set_boot_mode)(int); + + +static void l_reset_pcie(void) +{ + int domain; + for_each_online_iohub(domain) { + struct pci_bus *bus = pci_find_bus(domain, 0); + u16 vid = 0, did = 0; + pci_bus_read_config_word(bus, 0, PCI_VENDOR_ID, &vid); + pci_bus_read_config_word(bus, 0, PCI_DEVICE_ID, &did); + if (vid != PCI_VENDOR_ID_MCST_PCIE_BRIDGE || + did != PCI_DEVICE_ID_MCST_PCIE_BRIDGE) + break; + pci_bus_write_config_word(bus, 0, PCI_BRIDGE_CONTROL, + PCI_BRIDGE_CTL_BUS_RESET); + } +} + +static void l_set_soft_reset_state(void) +{ + unsigned int reg; + pci_write_config_dword(l_reset_device, PCI_RESET_CONTROL, + L_SOFTWARE_RESET_TO_SOFT | L_WATCHDOG_RESET_TO_SOFT); + if (DEBUG_RESET_MODE) { + pci_read_config_dword(l_reset_device, + PCI_RESET_CONTROL, ®); + DebugRS("l_set_soft_reset_state() set Reset Control " + "to 0x%x\n", reg); + } +} + +static void l_set_hard_reset_state(void) +{ + unsigned int reg; + pci_write_config_dword(l_reset_device, PCI_RESET_CONTROL, + L_SOFTWARE_RESET_TO_HARD | L_WATCHDOG_RESET_TO_HARD); + if (DEBUG_RESET_MODE) { + pci_read_config_dword(l_reset_device, + PCI_RESET_CONTROL, ®); + DebugRS("l_set_hard_reset_state() set Reset Control " + "to 0x%x\n", reg); + } +} + +static int l_hard_reset = 1; + +static int l_reset_setup(char *str) +{ + l_hard_reset = 0; + return 1; +} + +__setup("softreset", l_reset_setup); + +static void l_reset_machine(char *cmd) +{ + if (iohub_generation(l_reset_device) == 0) { + /* system reset doesn't reset pcie */ + l_reset_pcie(); + } + + if (cmd && !strcmp(cmd, "bootcmd") && l_set_boot_mode) + l_set_boot_mode(BOOT_MODE_BOOTCMD); + + DebugRS("l_reset_machine() write to:0x%x val:0x%x\n", + PCI_SOFT_RESET_CONTROL, L_SOFTWARE_RESET); + pci_write_config_dword(l_reset_device, PCI_SOFT_RESET_CONTROL, + L_SOFTWARE_RESET); +} + +static void l_halt_machine(void) +{ +#if IS_ENABLED(CONFIG_INPUT_LTC2954) + char *desc = "ltc2954_kill"; + int err = gpio_request(LTC2954_KILL_GPIO_PIN, desc); + if (err < 0) + goto spmc_halt; + + err = gpio_direction_output(LTC2954_KILL_GPIO_PIN, 1); + if (err < 0) + goto spmc_halt; + + /* Never back from this guy: */ + gpio_set_value(LTC2954_KILL_GPIO_PIN, 0); +spmc_halt: +#endif /* CONFIG_INPUT_LTC2954 */ + /* If here - try to use SPMC for halting: */ + pr_info("l_halt_machine: trying to halt using spmc...\n"); + do_spmc_halt(); + + /* + * Machine halting is motherboard - dependent, so can be done + * only through interface kernel <-> boot + */ + pr_info("Hardware power off is not until implemented by " + "boot/kernel, so use manual mode\n"); + while (1); +} + + +static void l_reset_set_control_func(struct pci_dev *dev) +{ +#ifdef CONFIG_E2K + int nid; + for_each_node_has_dup_kernel(nid) { + the_node_machine(nid)->arch_reset = &l_reset_machine; + the_node_machine(nid)->arch_halt = &l_halt_machine; + } +#else + machine.arch_reset = &l_reset_machine; + machine.arch_halt = &l_halt_machine; +#endif + l_reset_device = dev; +} + +static struct pci_dev *get_i2c_spi_dev(void) +{ + struct pci_dev *dev = NULL; + if (cpu_has_epic()) + dev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_IOEPIC_I2C_SPI, NULL); + if (!dev) + dev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_I2C_SPI, NULL); + if (!dev) + dev = pci_get_device(PCI_VENDOR_ID_ELBRUS, + PCI_DEVICE_ID_MCST_I2CSPI, NULL); + + return dev; +} + +static int l_reset_init(void) +{ + struct pci_dev *dev = get_i2c_spi_dev(); + u32 duration; + + if (!dev) + return 0; + + l_reset_set_control_func(dev); + + duration = iohub_generation(l_reset_device) == 1 ? + L_IOHUB2_SOFT_RESET_DURATION : L_IOHUB_SOFT_RESET_DURATION; + pci_write_config_dword(dev, PCI_SOFT_RESET_DURATION, + duration); + + if (DEBUG_RESET_MODE) { + unsigned int reg; + pci_read_config_dword(l_reset_device, + PCI_SOFT_RESET_DURATION, ®); + DebugRS("l_set_soft_reset_state() set Software Reset Duration " + "to 0x%x\n", reg); + } + + if (l_hard_reset) + l_set_hard_reset_state(); + else + l_set_soft_reset_state(); + + dev_info(&dev->dev, "probed\n"); + + return 0; +} +device_initcall(l_reset_init); diff --git a/arch/l/kernel/i2c-spi/spi-devices.c b/arch/l/kernel/i2c-spi/spi-devices.c new file mode 100644 index 000000000000..1e1006abbe5c --- /dev/null +++ b/arch/l/kernel/i2c-spi/spi-devices.c @@ -0,0 +1,99 @@ +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_RTC_DRV_CY14B101P) +static struct spi_board_info spi_rtc_cy14b101p = { + .modalias = "rtc-cy14b101p", + .max_speed_hz = 16 * 1000 * 1000, /* 16 MHz */ + .mode = SPI_MODE_0, + .bus_num = 0, /* Matches 'id' of spi_controller device */ + .chip_select = 1 +}; +#endif /* CONFIG_RTC_DRV_CY14B101P */ + +static struct spi_board_info spi_rtc_fm33256 = { + .modalias = "rtc-fm33256", + .max_speed_hz = 16 * 1000 * 1000, /* 16 MHz */ + .mode = SPI_MODE_0, + .bus_num = 0, /* Matches 'id' of spi_controller device */ + .chip_select = 1 +}; + +static struct spi_board_info spi_rom_s25fl064a = { +#ifdef CONFIG_L_MTD_SPI_NOR + .modalias = "spi-nor", +#else + .modalias = "spidev", +#endif + /* Actually 50 MHz is supported, but not for the READ + * command which is usually used by userspace. */ + .max_speed_hz = 25 * 1000 * 1000, /* 25 MHz */ + .mode = SPI_MODE_0, + .bus_num = 0, + .chip_select = 0 +}; + +static int is_cy14b101p_exist(void) +{ + int mbtype = bootblock_virt->info.bios.mb_type; + + /* At first try to use explist definition */ + if (rtc_model) { + return (rtc_model == MP_RTC_VER_CY14B101P); + } + switch (mbtype) { + case 0: /* use cy14b101p by default */ +#ifdef CONFIG_E2K + case MB_TYPE_ES2_MBCUB_C: + case MB_TYPE_ES2_PLATO1: + case MB_TYPE_ES2_RTC_CY14B101P: + case MB_TYPE_ES2_RTC_CY14B101P_MULTICLOCK: + case MB_TYPE_E1CP_IOHUB2_RAZBRAKOVSCHIK: + case MB_TYPE_E1CP_PMC: + case MB_TYPE_ES2_EL2S4: +#endif +#ifdef CONFIG_E90S + case MB_TYPE_E90S_SIVUCH2: + case MB_TYPE_E90S_ATX: +#endif + return 1; + default: +#ifdef CONFIG_E2K + if (mbtype >= MB_TYPE_ES2_EL2S4) { + return 1; + } +#endif +#ifdef CONFIG_E90S + if (mbtype >= MB_TYPE_E90S_CY14B101P) { + return 1; + } +#endif + } + return 0; +} + +static int register_spi_devices(void) +{ +#ifdef CONFIG_OF + if (devtree_detected) + return 0; +#endif + /* Declare SPI devices to the SPI core */ + if (!is_cy14b101p_exist()) + spi_register_board_info(&spi_rtc_fm33256, 1); +# ifdef CONFIG_RTC_DRV_CY14B101P + else + spi_register_board_info(&spi_rtc_cy14b101p, 1); +# endif + + spi_register_board_info(&spi_rom_s25fl064a, 1); + + return 0; +} + +module_init(register_spi_devices); diff --git a/arch/l/kernel/i2c-spi/spi.c b/arch/l/kernel/i2c-spi/spi.c new file mode 100644 index 000000000000..8b6d5f5f5272 --- /dev/null +++ b/arch/l/kernel/i2c-spi/spi.c @@ -0,0 +1,779 @@ +/* + * Elbrus SPI controller driver + * + * Copyright (C) 2012 MCST + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * 2012-05-29 Created + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +/* SPI definitions for Elbrus I2C-SPI Controller (part of IOHUB) */ + +#define L_SPI_CONTROL 0x00 +#define L_SPI_DEVICE_SHIFT 0 +/* Maximum number of connected devices */ +#define L_SPI_MAX_DEVICES 4 +#define L_SPI_DEVICE_0 0 +#define L_SPI_DEVICE_1 1 +#define L_SPI_DEVICE_2 2 +#define L_SPI_DEVICE_3 3 +#define L_SPI_ADDRESS_SIZE_SHIFT 2 +#define L_SPI_ADDRESS_SIZE_8 (0 << L_SPI_ADDRESS_SIZE_SHIFT) +#define L_SPI_ADDRESS_SIZE_16 (1 << L_SPI_ADDRESS_SIZE_SHIFT) +#define L_SPI_ADDRESS_SIZE_24 (2 << L_SPI_ADDRESS_SIZE_SHIFT) +#define L_SPI_ADDRESS_SIZE_32 (3 << L_SPI_ADDRESS_SIZE_SHIFT) +#define L_SPI_DATA_SIZE_SHIFT 4 +/* Maximum size of transaction */ +#define L_SPI_MAX_BYTES 64 +#define L_SPI_ADDRESS_PHASE_SHIFT 11 +#define L_SPI_ADDRESS_PHASE_ENABLE (1 << L_SPI_ADDRESS_PHASE_SHIFT) +#define L_SPI_ADDRESS_PHASE_DISABLE (0 << L_SPI_ADDRESS_PHASE_SHIFT) +#define L_SPI_DATA_PHASE_SHIFT 12 +#define L_SPI_DATA_PHASE_ENABLE (1 << L_SPI_DATA_PHASE_SHIFT) +#define L_SPI_DATA_PHASE_DISABLE (0 << L_SPI_DATA_PHASE_SHIFT) +#define L_SPI_TRANS_TYPE_SHIFT 13 +#define L_SPI_TRANS_READ (0 << L_SPI_TRANS_TYPE_SHIFT) +#define L_SPI_TRANS_WRITE (1 << L_SPI_TRANS_TYPE_SHIFT) +#define L_SPI_START_SHIFT 14 +#define L_SPI_START (1 << L_SPI_START_SHIFT) +#define L_SPI_KILL_SHIFT 15 +#define L_SPI_KILL (1 << L_SPI_KILL_SHIFT) +#define L_SPI_STATUS 0x04 +#define L_SPI_STATUS_BUSY_SHIFT 0 +#define L_SPI_STATUS_BUSY (1 << L_SPI_STATUS_BUSY_SHIFT) +#define L_SPI_STATUS_INTR_SHIFT 1 +#define L_SPI_STATUS_INTR (1 << L_SPI_STATUS_INTR_SHIFT) +#define L_SPI_STATUS_FAIL_SHIFT 2 +#define L_SPI_STATUS_FAIL (1 << L_SPI_STATUS_FAIL_SHIFT) +#define L_SPI_OPCODE 0x08 +#define L_SPI_ADDRESS 0x0c +#define L_SPI_MODE 0x10 +#define L_SPI_MODE_MASK 1 +#define L_SPI_MODE_0 0 +#define L_SPI_MODE_3 1 +#define L_SPI_FREQ_CHANGED_SHIFT 1 +#define L_SPI_FREQ_CHANGED (1 << L_SPI_FREQ_CHANGED_SHIFT) +#define L_SPI_DIVIDER_SHIFT 2 +#define L_SPI_DIVIDER_2 (0 << L_SPI_DIVIDER_SHIFT) +#define L_SPI_DIVIDER_4 (1 << L_SPI_DIVIDER_SHIFT) +#define L_SPI_DIVIDER_8 (2 << L_SPI_DIVIDER_SHIFT) +#define L_SPI_DIVIDER_16 (3 << L_SPI_DIVIDER_SHIFT) +#define L_SPI_DIVIDER_MASK 0xc +#define L_SPI_MODE_INTR_SHIFT 4 +#define L_SPI_MODE_INTR (1 << L_SPI_MODE_INTR_SHIFT) +#define L_SPI_MODE_IVN_CLK_SHIFT 6 +#define L_SPI_MODE_IVN_CLK (1 << L_SPI_MODE_IVN_CLK_SHIFT) + +/* To avoid recursion when dump panic to nvram */ +#ifdef CONFIG_NVRAM_PANIC +#undef DEBUG +#endif + + +struct l_spi { + int mode; + void __iomem *cntrl; + void __iomem *data; + struct device *dev; + struct platform_device *pdev; + struct spi_master *master; + u32 speed_hz_min; + u32 speed_hz_max; + u32 baseclk; +}; + + +/* Reading/writing SPI controller registers */ + +static u32 l_spi_read(struct l_spi *l_spi, int reg) +{ + u32 res; + + res = readl(l_spi->cntrl + reg); + + dev_dbg(l_spi->dev, "reading register %s addr %px + reg 0x%x, got %x\n", + (reg == L_SPI_CONTROL) ? "CONTROL" : + (reg == L_SPI_STATUS) ? "STATUS" : + (reg == L_SPI_ADDRESS) ? "ADDRESS" : + (reg == L_SPI_MODE) ? "MODE" : + (reg == L_SPI_OPCODE) ? "OPCODE" : + "UNKNOWN", + l_spi->cntrl, reg, + res); + + return res; +} + +static void l_spi_write(struct l_spi *l_spi, u32 val, int reg) +{ + dev_dbg(l_spi->dev, "writing register %s, value %x\n", + (reg == L_SPI_CONTROL) ? "CONTROL" : + (reg == L_SPI_STATUS) ? "STATUS" : + (reg == L_SPI_ADDRESS) ? "ADDRESS" : + (reg == L_SPI_MODE) ? "MODE" : + (reg == L_SPI_OPCODE) ? "OPCODE" : + "UNKNOWN", + val); + + writel(val, l_spi->cntrl + reg); +} + + +#define MAX_BUSY_WAIT_LOOPS 10000000 + +static void l_spi_wait_busy(struct l_spi *l_spi) +{ + int status; + u32 loops = 0; + + do { + status = l_spi_read(l_spi, L_SPI_STATUS); + if (loops++ == MAX_BUSY_WAIT_LOOPS) { + dev_err(l_spi->dev, "Timed out waiting for the SPI controller!\n"); + break; + } + } while (status & L_SPI_STATUS_BUSY); + + /* Reset interrupt & fail bits */ + l_spi_write(l_spi, status, L_SPI_STATUS); +} + +static void l_spi_wait_freq(struct l_spi *l_spi) +{ + u32 mode; + u32 loops = 0; + + do { + mode = l_spi_read(l_spi, L_SPI_MODE); + if (loops++ == MAX_BUSY_WAIT_LOOPS) { + dev_err(l_spi->dev, "Timed out waiting for the SPI controller to change frequency!\n"); + break; + } + } while (!(mode & L_SPI_FREQ_CHANGED)); + + /* Reset 'freq' field */ + l_spi_write(l_spi, mode, L_SPI_MODE); +} + +static u32 l_spi_wait_completion(struct l_spi *l_spi) +{ + u32 status; + u32 loops = 0; + + do { + status = l_spi_read(l_spi, L_SPI_STATUS); + if (loops++ == MAX_BUSY_WAIT_LOOPS) { + dev_err(l_spi->dev, "Timed out waiting for the SPI controller to finish transaction!\n"); + break; + } + } while ((status & (L_SPI_STATUS_INTR | L_SPI_STATUS_FAIL)) == 0); + + /* Reset interrupt & fail bits */ + l_spi_write(l_spi, status, L_SPI_STATUS); + + return status; +} + + +/* Change bus speed or mode as requested. */ +static int l_spi_set_mode_and_freq(struct spi_device *spi, + int spi_mode, int spi_freq) +{ + struct l_spi *l_spi = spi_master_get_devdata(spi->master); + struct device *dev = l_spi->dev; + int n, prev_n; + u32 mode; + dev_dbg(dev, "Setting mode %x and freq %d Hz, current mode %x and current speeds: %u - %u\n", + spi_mode, spi_freq, l_spi->mode, + l_spi->speed_hz_min, l_spi->speed_hz_max); + + if (spi_mode == l_spi->mode + && (!spi_freq || (spi_freq < l_spi->speed_hz_max && + spi_freq >= l_spi->speed_hz_min))) + return 0; + + mode = l_spi_read(l_spi, L_SPI_MODE); + prev_n = mode & L_SPI_DIVIDER_MASK; + + if (spi_freq) { + n = DIV_ROUND_UP(l_spi->baseclk, spi_freq); + if (n <= 2) { + n = L_SPI_DIVIDER_2; + l_spi->speed_hz_min = l_spi->baseclk / 2; + l_spi->speed_hz_max = UINT_MAX; + } else if (n <= 4) { + n = L_SPI_DIVIDER_4; + l_spi->speed_hz_min = l_spi->baseclk / 4; + l_spi->speed_hz_max = l_spi->baseclk / 2; + } else if (n <= 8) { + n = L_SPI_DIVIDER_8; + l_spi->speed_hz_min = l_spi->baseclk / 8; + l_spi->speed_hz_max = l_spi->baseclk / 4; + } else if (n <= 16) { + n = L_SPI_DIVIDER_16; + l_spi->speed_hz_min = 0; + l_spi->speed_hz_max = l_spi->baseclk / 8; + } else { + dev_dbg(dev, "requested speed %d is not supported\n", + spi_freq); + return -EINVAL; + } + } else { + /* No speed was specified so do not change it */ + n = prev_n; + } + + dev_dbg(dev, "previous divider %d, new divider %d\n", prev_n, n); + + if (n != prev_n || spi_mode != l_spi->mode) { + l_spi->mode = spi_mode; + + /* Frequency divider or SPI mode has changed */ + mode &= ~L_SPI_DIVIDER_MASK; + mode |= n; + mode &= ~(L_SPI_MODE_MASK | L_SPI_MODE_IVN_CLK); + if (spi_mode == SPI_MODE_0) { + mode |= L_SPI_MODE_0; + } else if (spi_mode == SPI_MODE_1) { + mode |= L_SPI_MODE_3; + mode |= L_SPI_MODE_IVN_CLK; + } else { + mode |= L_SPI_MODE_3; + } + + dev_dbg(dev, "set divider to %d and mode to %d\n", + (n == L_SPI_DIVIDER_2) ? 2 : + (n == L_SPI_DIVIDER_4) ? 4 : + (n == L_SPI_DIVIDER_8) ? 8 : + (n == L_SPI_DIVIDER_16) ? 16 : 0, + (spi_mode == SPI_MODE_0) ? 0 : 3); + + l_spi_write(l_spi, mode, L_SPI_MODE); + + /* Wait until the new frequency + * divider is set */ + if (n != prev_n) + l_spi_wait_freq(l_spi); + } + + return 0; +} + +static int l_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct l_spi *l_spi = spi_master_get_devdata(spi->master); + struct device *dev = l_spi->dev; + struct spi_transfer *t; + int last = 1, ret = 0; +#define REG_BYTES 5 +#define BUF_BYTES L_SPI_MAX_BYTES + u8 reg[REG_BYTES], buf[BUF_BYTES], *rbuf; + unsigned long reg_i, buf_i, buf_write; + u32 speed_hz; + u16 delay_usecs; + u32 status, cmd; + + m->actual_length = 0; + + if (unlikely(spi->chip_select >= L_SPI_MAX_DEVICES)) { + dev_err(dev, "spi transfer: bad spi device number %d\n", + spi->chip_select); + return -EINVAL; + } + + /* check each transfer's parameters */ + list_for_each_entry(t, &m->transfers, transfer_list) { + u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; + speed_hz = t->speed_hz ? : spi->max_speed_hz; + + bits_per_word = bits_per_word ? : 8; + + if (unlikely(bits_per_word != 8)) { + dev_err(dev, "spi transfer: requested bits_per_word %d " + "is not supported\n", bits_per_word); + return -EINVAL; + } + + if (unlikely(speed_hz < l_spi->baseclk / 16)) { + dev_err(dev, "spi transfer: requested speed %d " + "is lower than %d\n", speed_hz, + l_spi->baseclk / 16); + return -EINVAL; + } + + if (unlikely(!t->tx_buf && !t->rx_buf && t->len)) { + dev_err(dev, "spi transfer: message is not empty but " + "no buffers were provided\n"); + return -EINVAL; + } + + if (unlikely(!t->len)) { + dev_err(dev, "spi transfer: chip selecting is not " + "supported\n"); + return -EIO; + } + } + + /* l_spi controller can do only one type of transactions: + * + * Write 1, 2, 3, 4 or 5 bytes followed by either write + * or read of up to 64 bytes. + * + * Chipselect is automatically asserted at the beginning + * of the transaction and de-asserted at its end. + * + * So we have to merge transfers in the message into + * this transaction and fail if thansfers do not follow + * the pattern. */ + + t = list_entry(&m->transfers, struct spi_transfer, transfer_list); +next_transaction: + rbuf = NULL; + speed_hz = 0; + reg_i = 0; + buf_i = 0; + buf_write = 0; + delay_usecs = 0; + cmd = 0; + + list_for_each_entry_continue(t, &m->transfers, transfer_list) { + unsigned int len = t->len; + + last = (t->transfer_list.next == &m->transfers); + if (t->speed_hz && (speed_hz == 0 || t->speed_hz < speed_hz)) + speed_hz = t->speed_hz; + + if (t->tx_buf) { + unsigned long tx_i = 0; + const char *tbuf = t->tx_buf; + + if (unlikely(buf_i && !buf_write)) { + dev_err(dev, "write-read-write sequence\n"); + ret = -EIO; + goto out; + } + if (unlikely(len > BUF_BYTES - buf_i + + REG_BYTES - reg_i)) { + dev_err(dev, "write size is too big: " + "%ld bytes\n", len + reg_i + buf_i); + ret = -EIO; + goto out; + } + if (len > REG_BYTES - reg_i) { + buf_write = 1; + rbuf = NULL; + } + while (tx_i < len && reg_i < REG_BYTES) + reg[reg_i++] = tbuf[tx_i++]; + while (tx_i < len && buf_i < BUF_BYTES) + buf[buf_i++] = tbuf[tx_i++]; + } + + if (t->rx_buf) { + if (unlikely(!reg_i)) { + dev_err(dev, "sequence starts with a read\n"); + ret = -EIO; + goto out; + } + if (unlikely(buf_i && buf_write)) { + dev_err(dev, "write more than 5 bytes - read " + "sequence\n"); + ret = -EIO; + goto out; + } + if (unlikely(buf_i)) { + dev_err(dev, "read-read sequence\n"); + ret = -EIO; + goto out; + } + if (unlikely(len > BUF_BYTES - buf_i)) { + dev_err(dev, "read size is too big: " + "%ld bytes\n", len + buf_i); + ret = -EIO; + goto out; + } + buf_write = 0; + rbuf = t->rx_buf; + buf_i += len; + } + + if (last || t->cs_change) { + /* If this is the last trnasfer we'll ignore the flag. + * And if this is not the last transfer, we must + * deselect chip after it is done. Either way, + * the transaction has been formed. */ + delay_usecs = t->delay_usecs; + break; + } else if (unlikely(t->delay_usecs)) { + dev_err(dev, "delay in the middle of transaction " + "will be ignored\n"); + } + } + + /* Change bus speed or mode if requested. */ + ret = l_spi_set_mode_and_freq(spi, spi->mode, speed_hz); + if (ret) + goto out; + + /* Now prepare control register and start the transaction */ + BUG_ON(!reg_i); + + dev_dbg(dev, "writing first %ld byte(s) from " + "\"0x%02hhx 0x%02hhx 0x%02hhx 0x%02hhx 0x%02hhx\", " + "%s %ld byte(s) %s buffer, device %d\n", + reg_i, reg[0], reg[1], reg[2], reg[3], reg[4], + buf_write ? "writing" : "reading", buf_i, + buf_write ? "to" : "from", spi->chip_select); + + l_spi_write(l_spi, reg[0], L_SPI_OPCODE); + + if (reg_i > 1) { + u32 addr = 0; + cmd |= L_SPI_ADDRESS_PHASE_ENABLE; + switch (reg_i - 1) { + case 1: + addr = reg[1]; + cmd |= L_SPI_ADDRESS_SIZE_8; + break; + case 2: + addr = (reg[1] << 8) | reg[2]; + cmd |= L_SPI_ADDRESS_SIZE_16; + break; + case 3: + addr = (reg[1] << 16) | (reg[2] << 8) | reg[3]; + cmd |= L_SPI_ADDRESS_SIZE_24; + break; + case 4: + /* This is the correct order for 32 bit address. */ + addr = (reg[1] << 16) | (reg[2] << 8) | + reg[3] | (reg[4] << 24); + cmd |= L_SPI_ADDRESS_SIZE_32; + break; + } + l_spi_write(l_spi, addr, L_SPI_ADDRESS); + } else { + cmd |= L_SPI_ADDRESS_PHASE_DISABLE; + } + + if (buf_i) { + u32 data_size = (buf_i == 64) ? 0 : buf_i; + cmd |= L_SPI_DATA_PHASE_ENABLE | + (data_size << L_SPI_DATA_SIZE_SHIFT); + } else { + cmd |= L_SPI_DATA_PHASE_DISABLE; + } + + cmd |= buf_write ? L_SPI_TRANS_WRITE : L_SPI_TRANS_READ; + cmd |= spi->chip_select << L_SPI_DEVICE_SHIFT; + cmd |= L_SPI_START; + + if (buf_i) { + /* i2c-driver will do buffer locking. */ + + if (buf_write) + /* Prepare data to be sent */ + memcpy_toio(l_spi->data, buf, buf_i); + } + l_spi_write(l_spi, cmd, L_SPI_CONTROL); + + status = l_spi_wait_completion(l_spi); + + if (buf_i) { + /* Receive data */ + if (!buf_write) { +#ifdef DEBUG + int i; +#endif + memcpy_fromio(rbuf, l_spi->data, buf_i); +#ifdef DEBUG + dev_dbg(dev, "read data:"); + for (i = 0; i < buf_i; i++) + pr_debug(" %02hhx", rbuf[i]); + pr_debug("\n"); +#endif + } + } + + if (status & L_SPI_STATUS_FAIL) { + dev_err(dev, "write operation failed\n"); + ret = -EIO; + goto out; + } + + m->actual_length += reg_i + buf_i; + if (delay_usecs) + udelay(delay_usecs); + + if (!last) + /* More transactions to do */ + goto next_transaction; + +out: + + dev_dbg(dev, "spi_transfer: status %d\n", ret); + + m->status = ret; + m->complete(m->context); + + return ret; +} + +#if 0 +ifdef CONFIG_NVRAM_PANIC + +static int l_raw_wait_for_comletion(struct l_spi *l_spi) +{ + int loops = 0; + u32 status; + do { + if (loops++ == MAX_BUSY_WAIT_LOOPS) { +printk("l_raw_write_panic_to_nvram: wait for completion failed\n"); + return 1; + } + status = readl(l_spi->cntrl + L_SPI_STATUS); + } while ((status & (L_SPI_STATUS_INTR | L_SPI_STATUS_FAIL)) == 0); + /* Reset interrupt & fail bits */ + writel(status, l_spi->cntrl + L_SPI_STATUS); + return 0; +} + +int l_raw_write_panic_to_nvram(struct spi_device *spi, int rst, int wren, int wrcmd, + u_int off) +{ + u32 cmd; + struct l_spi *l_spi = spi_master_get_devdata(spi->master); + int r = 0; + char c; + int i; + static int first = 1; + + if (first) { + l_spi_set_mode_and_freq(spi, 0, 0x1000000); + first = 0; + } + cmd = (1 << L_SPI_DATA_SIZE_SHIFT) | L_SPI_DATA_PHASE_ENABLE | + (spi->chip_select << L_SPI_DEVICE_SHIFT) | L_SPI_START; + for (i = 0; i < 100; i++) { + writel(rst, l_spi->cntrl + L_SPI_OPCODE); + memcpy_fromio(l_spi->data, &c, 1); + if ((c & 1) == 0) + break; + udelay(100); + } + if (i == 100) { + goto abort; + } + + writel(wren, l_spi->cntrl + L_SPI_OPCODE); + cmd = (spi->chip_select << L_SPI_DEVICE_SHIFT) | L_SPI_START; +printk("l_raw_write_panic_to_nvram: code = %d, cmd = 0x%08x\n", wren, cmd); + writel(cmd, l_spi->cntrl + L_SPI_CONTROL); + udelay(100); + + writel(wrcmd, l_spi->cntrl + L_SPI_OPCODE); + writel(off, l_spi->cntrl + L_SPI_ADDRESS); + cmd = L_SPI_ADDRESS_PHASE_ENABLE | L_SPI_ADDRESS_SIZE_32 | + L_SPI_START; + cmd |= spi->chip_select << L_SPI_DEVICE_SHIFT; +printk("l_raw_write_panic_to_nvram: code = %d, off = 0x%08x, cmd = 0x%08x\n", wrcmd, off, cmd); + writel(cmd, l_spi->cntrl + L_SPI_CONTROL); + /* wait for completion */ + if (l_raw_wait_for_comletion(l_spi)) { + goto abort; + } + + r = 1; +abort: + return r; +} + + +#endif +static bool l_spi_mode1_supported(void) +{ + return cpu_has_epic(); +} + +static int l_spi_setup(struct spi_device *spi) +{ + struct l_spi *l_spi = spi_master_get_devdata(spi->master); + struct device *dev = l_spi->dev; + + /* Sanity checks */ + + dev_dbg(dev, "%s setup\n", spi->modalias); + + if (spi->mode != SPI_MODE_0 && spi->mode != SPI_MODE_3 && + (spi->mode != SPI_MODE_1 || !l_spi_mode1_supported())) { + dev_err(dev, "mode %d is not supported\n", spi->mode); + return -EINVAL; + } + + if (spi->bits_per_word != 8) { + dev_err(dev, "bits_per_word %d is not supported\n", + spi->bits_per_word); + return -EINVAL; + } + + if (spi->max_speed_hz < l_spi->baseclk / 16) { + dev_err(dev, "requested bus speed %d is not supported\n", + spi->max_speed_hz); + return -EINVAL; + } + + /* Set SPI speed and mode */ + + return l_spi_set_mode_and_freq(spi, spi->mode, spi->max_speed_hz); +} + +static void l_spi_cleanup(struct spi_device *spi) +{ + struct l_spi *l_spi = spi_master_get_devdata(spi->master); + struct device *dev = l_spi->dev; + + dev_dbg(dev, "%s cleanup\n", spi->modalias); +} + +static size_t l_spi_max_transfer_size(struct spi_device *spi) +{ + return L_SPI_MAX_BYTES; +} + +static int l_spi_probe(struct platform_device *pdev) +{ + int ret; + struct spi_master *master; + struct l_spi *l_spi; + u32 mode; + int freq_changed; + struct resource *res; + char s[64]; + + master = spi_alloc_master(&pdev->dev, sizeof(struct l_spi)); + if (!master) + return -ENOMEM; + + l_spi = spi_master_get_devdata(master); + platform_set_drvdata(pdev, l_spi); + + /* init l_spi */ + l_spi->baseclk = 100 * 1000 * 1000; /* 100 MHz */ + l_spi->master = master; + l_spi->dev = &pdev->dev; + l_spi->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + l_spi->cntrl = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (IS_ERR(l_spi->cntrl)) + return PTR_ERR(l_spi->cntrl); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + l_spi->data = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (IS_ERR(l_spi->data)) + return PTR_ERR(l_spi->data); + + master->bus_num = pdev->id; + sprintf(s, "/l_spi@%d", pdev->id); + master->dev.of_node = of_find_node_by_path(s); + + master->num_chipselect = L_SPI_MAX_DEVICES; + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA; + + master->setup = l_spi_setup; + master->transfer = l_spi_transfer; + master->cleanup = l_spi_cleanup; + master->max_transfer_size = l_spi_max_transfer_size; + + /* Wait for controller */ + l_spi_wait_busy(l_spi); + + /* Initialize SPI speed and mode */ + /* NOTE: we are not working around hardware bug when SPI bus + * has 50 MHz speed after reset althouth SPI_MODE register + * is set to 12.5 MHz since it's being worked around by boot. */ + mode = l_spi_read(l_spi, L_SPI_MODE); + if (mode & L_SPI_FREQ_CHANGED) /* Reset 'freq' field if it was set */ + l_spi_write(l_spi, mode, L_SPI_MODE); + mode &= ~L_SPI_FREQ_CHANGED; + mode = (mode & ~L_SPI_MODE_MASK) | L_SPI_MODE_0; + l_spi->mode = SPI_MODE_0; + freq_changed = ((mode & L_SPI_DIVIDER_MASK) != L_SPI_DIVIDER_8); + mode = (mode & ~L_SPI_DIVIDER_MASK) | L_SPI_DIVIDER_8; + l_spi->speed_hz_min = l_spi->baseclk / 8; + l_spi->speed_hz_max = l_spi->baseclk / 4; + l_spi_write(l_spi, mode, L_SPI_MODE); + + if (freq_changed) + l_spi_wait_freq(l_spi); + + ret = spi_register_master(master); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_master error.\n"); + goto error; + } + + dev_info(&pdev->dev, "probed\n"); + + return 0; + +error: + spi_master_put(master); + + dev_info(&pdev->dev, "probed with errors (%d)\n", ret); + + return ret; +} + +static int l_spi_remove(struct platform_device *pdev) +{ + struct l_spi *l_spi = dev_get_drvdata(&pdev->dev); + + spi_unregister_master(l_spi->master); + + return 0; +} + + +static struct platform_driver l_spi_driver = { + .driver = { + .name = "l_spi", + .owner = THIS_MODULE, + }, + .probe = l_spi_probe, + .remove = l_spi_remove +}; + +static __init +int l_spi_init(void) +{ + return platform_driver_register(&l_spi_driver); +} +module_init(l_spi_init); + +static __exit +void l_spi_exit(void) +{ + platform_driver_unregister(&l_spi_driver); +} +module_exit(l_spi_exit); + +MODULE_AUTHOR("Alexander Fyodorov"); +MODULE_DESCRIPTION("Elbrus SPI controller driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/l/kernel/l-i2c2.c b/arch/l/kernel/l-i2c2.c new file mode 100644 index 000000000000..19fd08e34188 --- /dev/null +++ b/arch/l/kernel/l-i2c2.c @@ -0,0 +1,428 @@ +/* + * linux/arch/l/kernel/l_i2c2.c + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Implementation of Elbrus I2C master. + */ +#define DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +/******************************************************************************* + * I2C Registers + ******************************************************************************* + */ +#define I2C_REG_PRER_LO (0x00) /* Clock Prescale register lo-byte (RW) */ +#define I2C_REG_PRER_HI (0x04) /* Clock Prescale register hi-byte (RW) */ +#define I2C_REG_CTR (0x08) /* Control Register (RW) */ +#define I2C_REG_TXR (0x0c) /* Transmit Register (W) */ +#define I2C_REG_RXR (0x0c) /* Receive Register (R) */ +#define I2C_REG_CR (0x10) /* Command Register (W) */ +#define I2C_REG_SR (0x18) /* Status Register (R) */ +#define I2C_REG_AUX (0x1c) /* Reset Register */ + +/* Prescaler divider evaluates as (PCICLK/(5*SCLK))-1 */ +#define NORMAL_SCL 0x3F + +/* Control Register bits */ +#define I2C_CTR_EN (1 << 7) /* I2C core enable bit */ +#define I2C_CTR_IEN (1 << 6) /* I2C core interrupt enable bit */ + +/* Command Register bits */ +#define I2C_CR_STA (1 << 7) /* generate (repeated) start condition */ +#define I2C_CR_STO (1 << 6) /* generate stop condition */ +#define I2C_CR_RD (1 << 5) /* read from slave */ +#define I2C_CR_WR (1 << 4) /* write to slave */ +#define I2C_CR_NACK (1 << 3) /* when a receiver, sent I2C_CR_NACK */ + /* Interrupt acknowledge. When set, clears pending interrrupt */ +#define I2C_CR_IACK (1 << 0) + +/* Status Register bits */ +/* Receive acknowledge from slave. '1' - no acknowledge received */ +#define I2C_SR_RxACK (1 << 7) +/* I2C bus busy. '1' after START, '0' after STOP */ +#define I2C_SR_BUSY (1 << 6) +#define I2C_SR_AL (1 << 5) /* Arbitration lost */ +/* Transfer in progress. '1' when transferring data */ +#define I2C_SR_TIP (1 << 1) +#define I2C_SR_IF (1 << 0) /* Interrupt flag */ + +/* Transmit Register operations */ +#define I2C_READ_OP 0x01 /* Reading from slave (x << 1 | I2C_READ_OP) */ +#define I2C_WRITE_OP 0xFE /* Writing to slave (x << 1 & I2C_WRITE_OP) */ + +struct l_i2c2 { + struct i2c_adapter adap; + struct platform_device *pdev; + void __iomem *regs; +}; + +# define EXTPLLI2C_RD (0 << 31) +# define EXTPLLI2C_WR (1 << 31) + +static inline void +st2_i2c_write(void __iomem *regs, unsigned long reg, u8 val) +{ + writel(EXTPLLI2C_WR | (reg / 4 << 8) | val, regs); +} + +static inline u8 st2_i2c_read(void __iomem *regs, unsigned long reg) +{ + uint32_t result = 0; + writel(EXTPLLI2C_RD | (reg / 4 << 8), regs); + result = readl(regs); + return result; +} + +static inline void +raw_i2c_write(void __iomem *regs, unsigned long reg, u8 val) +{ + __raw_writel(val, regs + reg); +} + +static inline u8 raw_i2c_read(void __iomem *regs, unsigned long reg) +{ + unsigned int result = 0; + result = __raw_readl(regs + reg); + return result; +} + +static inline void +i2c_write(struct l_i2c2 *i2c, unsigned long reg, u8 val) +{ + struct l_i2c2_platform_data *pdata = dev_get_platdata(&i2c->pdev->dev); + if (pdata->two_stage_register_access) + st2_i2c_write(i2c->regs, reg, val); + else + raw_i2c_write(i2c->regs, reg, val); +} + +static inline u8 i2c_read(struct l_i2c2 *i2c, unsigned long reg) +{ + struct l_i2c2_platform_data *pdata = dev_get_platdata(&i2c->pdev->dev); + unsigned int r = 0; + if (pdata->two_stage_register_access) + r = st2_i2c_read(i2c->regs, reg); + else + r = raw_i2c_read(i2c->regs, reg); + return r; +} + +static void set_prescaler(struct l_i2c2 *i2c, int value) +{ + i2c_write(i2c, I2C_REG_PRER_LO, value & 0xFF); + i2c_write(i2c, I2C_REG_PRER_HI, (value >> 8) & 0xFF); +} + + +#define PMC_I2C_TIMEOUT_USEC (1000 * 1000) + +static int i2c_send(struct l_i2c2 *i2c, int cmd, int data) +{ + int i; + + if (cmd & I2C_CR_WR) + i2c_write(i2c, I2C_REG_TXR, data); + + i2c_write(i2c, I2C_REG_CR, cmd); + + for (i = 0; i < PMC_I2C_TIMEOUT_USEC; i++) { + unsigned status = i2c_read(i2c, I2C_REG_SR); + if (status & I2C_SR_AL) { + dev_dbg(&i2c->adap.dev, "i2c_send: busy: arbitration lost\n"); + return -EAGAIN; + } + if (!(status & I2C_SR_TIP)) + return 0; + udelay(1); + } + dev_err(&i2c->adap.dev, "i2c_send: timeout: transfer in progress.\n"); + return -ETIME; +} + +static int l_i2c2_read(struct i2c_adapter *adap, unsigned char *buf, + int length, int flags, int stop_bit) +{ + int ret = 0; + struct l_i2c2 *i2c = i2c_get_adapdata(adap); + if (flags & I2C_M_RECV_LEN) { + dev_err(&adap->dev, "%s: FIXME: I2C_M_RECV_LEN not " + "supported.\n", adap->name); + return -ENOTSUPP; + } + while (length--) { + int ret; + int v = I2C_CR_RD; + + if (length == 0) { + if (!(flags & I2C_M_NO_RD_ACK)) + v |= I2C_CR_NACK; + if (stop_bit) + v |= I2C_CR_STO; + } + ret = i2c_send(i2c, v, 0); + if (ret) + break; + *buf++ = i2c_read(i2c, I2C_REG_RXR); + } + return ret; +} + +static int l_i2c2_write(struct i2c_adapter *adap, unsigned char *buf, + int length, int flags, int stop_bit) +{ + int ret = 0; + struct l_i2c2 *i2c = i2c_get_adapdata(adap); + while (length--) { + int v = I2C_CR_WR; + + if (length == 0) { + if (!(flags & I2C_M_NO_RD_ACK)) + v |= I2C_CR_NACK; + if (stop_bit) + v |= I2C_CR_STO; + } + ret = i2c_send(i2c, v, *buf++); + if (ret) + break; + if (!(flags & I2C_M_IGNORE_NAK)) { + if (i2c_read(i2c, I2C_REG_SR) & I2C_SR_RxACK) { + dev_dbg(&adap->dev, + "no acknowledge from slave.\n"); + ret = -EIO; + break; + } + } + } + + return ret; +} + +static int +l_i2c2_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num) +{ + int i, ret = 0; + struct l_i2c2 *i2c = i2c_get_adapdata(adap); + int last_msg = num - 1; + + if (0) + dev_dbg(&adap->dev, "%s: processing %d messages:\n", + adap->name, num); + + for (i = 0; i < num; i++, pmsg++) { + int addr; + int flags = pmsg->flags; + int start = !(flags & I2C_M_NOSTART); + int nak = !(flags & I2C_M_IGNORE_NAK); + int stop_bit = i == last_msg ? 1 : 0; + if (0) + dev_dbg(&adap->dev, + " #%d: %sing %d byte%s %s 0x%02x\n", i, + pmsg->flags & I2C_M_RD ? "read" : "writ", + pmsg->len, pmsg->len > 1 ? "s" : "", + pmsg->flags & I2C_M_RD ? "from" : "to", + pmsg->addr); + + if (flags & I2C_M_TEN) { /* a ten bit address */ + dev_err(&adap->dev, "FIXME: a ten bit address not " + "supported.\n"); + ret = -ENOTSUPP; + break; + } else { /* normal 7bit address */ + addr = pmsg->addr << 1; + if (flags & I2C_M_RD) + addr |= 1; + if (flags & I2C_M_REV_DIR_ADDR) + addr ^= 1; + } + + if (start) { /* Sending device address */ + ret = i2c_send(i2c, I2C_CR_STA | I2C_CR_WR, addr); + if (ret) + break; + } + + if (nak && i2c_read(i2c, I2C_REG_SR) & I2C_SR_RxACK) { + dev_dbg(&adap->dev, "no acknowledge from slave.\n"); + ret = -ENXIO; + break; + } + /* check for bus probe */ + if ((num == 1) && (pmsg->len == 0)) { + i = 1; + break; + } + + ret = flags & I2C_M_RD ? + l_i2c2_read(adap, pmsg->buf, + pmsg->len, flags, stop_bit) : + l_i2c2_write(adap, pmsg->buf, + pmsg->len, flags, stop_bit); + + if (ret) + break; + } + if (ret) + return ret; + else + return i; +} + +static u32 l_i2c2_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm l_i2c2_algo = { + .master_xfer = l_i2c2_xfer, + .functionality = l_i2c2_func, +}; + +static void l_i2c2_init_hw(struct l_i2c2 *i2c) +{ + struct l_i2c2_platform_data *pdata = dev_get_platdata(&i2c->pdev->dev); + + /* Prescaler divider evaluates as (BASE_FREQ/(4*SCLK))-1 */ + set_prescaler(i2c, pdata->base_freq_hz / 4 / + pdata->desired_freq_hz - 1); + + /* Enable I2C core */ + i2c_write(i2c, I2C_REG_CTR, I2C_CTR_EN); +} + +static int l_i2c2_probe(struct platform_device *pdev) +{ + struct l_i2c2 *i2c; + int ret; + char of_path[] = "/pmc_i2c"; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +#if 0 /* + * It calls request_mem_region(), which fails if registers of two devices + * lay in the same physical page. + */ + void __iomem *regs = devm_ioremap_resource(&pdev->dev, res); +#else + void __iomem *regs = devm_ioremap(&pdev->dev, + res->start, resource_size(res)); +#endif + struct l_i2c2_platform_data *pdata = dev_get_platdata(&pdev->dev); + + if (IS_ERR(regs)) + return PTR_ERR(regs); + + i2c = devm_kzalloc(&pdev->dev, sizeof(struct l_i2c2), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + platform_set_drvdata(pdev, i2c); + i2c->pdev = pdev; + i2c->regs = regs; + + i2c->adap.owner = THIS_MODULE; + i2c->adap.class = I2C_CLASS_DDC; + i2c_set_adapdata(&i2c->adap, i2c); + snprintf(i2c->adap.name, sizeof(i2c->adap.name), + "Elbrus %s i2c bus", pdev->name); + if (strcmp("pmc-i2c", pdev->name)) + of_path[0] = 0; + + i2c->adap.dev.parent = &pdev->dev; + i2c->adap.dev.of_node = of_find_node_by_path(of_path); + i2c->adap.nr = pdata->bus_nr; /* Fix pmc i2c master number */ + i2c->adap.algo = &l_i2c2_algo; + + l_i2c2_init_hw(i2c); + + ret = i2c_add_numbered_adapter(&i2c->adap); + if (ret) { + dev_err(&pdev->dev, "Failed to register i2c\n"); + goto out; + } + +out: + return ret; +} + +static int l_i2c2_remove(struct platform_device *pdev) +{ + struct l_i2c2 *i2c = platform_get_drvdata(pdev); + /* Disable I2C core */ + i2c_write(i2c, I2C_REG_CTR, 0); + i2c_del_adapter(&i2c->adap); + + return 0; +} + + +#ifdef CONFIG_PM_SLEEP +static int l_i2c2_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct l_i2c2 *i2c = platform_get_drvdata(pdev); + /* Disable I2C core */ + i2c_write(i2c, I2C_REG_CTR, 0); + return 0; +} + +static int l_i2c2_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct l_i2c2 *i2c = platform_get_drvdata(pdev); + l_i2c2_init_hw(i2c); + return 0; +} + +static const struct dev_pm_ops l_i2c2_pm_ops = { + .suspend = l_i2c2_suspend, + .resume = l_i2c2_resume, +}; +#endif + +/* driver device registration */ +static const struct platform_device_id l_i2c2_driver_ids[] = { + { + .name = "pmc-i2c", + }, { + .name = "mga2-i2c", + }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(platform, l_i2c2_driver_ids); + +static struct platform_driver l_i2c2_driver = { + .probe = l_i2c2_probe, + .remove = l_i2c2_remove, + .id_table = l_i2c2_driver_ids, + .driver = { + .name = "l-i2c2", +#ifdef CONFIG_PM_SLEEP + .pm = &l_i2c2_pm_ops, +#endif + }, +}; + +module_platform_driver(l_i2c2_driver); + +MODULE_AUTHOR("Dmitriy E. Cherednichenko "); +MODULE_DESCRIPTION("i2c driver for Elbrus processors"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); diff --git a/arch/l/kernel/l-iommu.c b/arch/l/kernel/l-iommu.c new file mode 100644 index 000000000000..dde9a2c68da1 --- /dev/null +++ b/arch/l/kernel/l-iommu.c @@ -0,0 +1,1142 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifndef IOMMU_TABLES_NR +#define IOMMU_TABLES_NR 1 +#define IOMMU_LOW_TABLE 0 +#define IOMMU_HIGH_TABLE 0 +#endif + +int __initdata l_use_swiotlb = 0; +int l_iommu_no_numa_bug = 0; +EXPORT_SYMBOL(l_iommu_no_numa_bug); + +int l_iommu_force_numa_bug_on = 0; +EXPORT_SYMBOL(l_iommu_force_numa_bug_on); +unsigned long l_iommu_win_sz = DFLT_IOMMU_WINSIZE; + +static int __initdata l_not_use_prefetch = 0; +static const struct iommu_ops l_iommu_ops; + + +#ifndef l_prefetch_iopte_supported +#define l_prefetch_iopte_supported() 0 +#define l_prefetch_iopte(iopte, prefetch) do {} while (0) +#endif + +#ifndef l_iommu_has_numa_bug +#define l_iommu_has_numa_bug() 0 +#endif + +#ifndef l_has_devices_with_iommu +#define l_has_devices_with_iommu() 0 +#endif + +#ifndef l_iommu_enable_embedded_iommus +#define l_iommu_enable_embedded_iommus(node) do {} while (0) +#endif + +/* iohub, iohub2 supports only 56-bit of virtual address */ +#define L_IOMMU_VA_MASK ((1UL << 56) - 1) + +/* + * These give mapping size of each iommu pte/tlb. + */ +#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT) +#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1)) +#define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE) + +#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */ +#define IOMMU_CTRL_VERS 0x0f000000 /* Version */ +#define IOMMU_CTRL_PREFETCH_EN 0x00000040 /* enable prefeth TTE */ +#define IOMMU_CTRL_CASHABLE_TTE 0x00000020 /* Cachable TTE */ +#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */ +#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */ + +#define IOMMU_RNGE_OFF 2 + +struct l_iommu { + struct list_head list; /* list of all iommu */ + int node; + unsigned regs_offset; + unsigned companion_regs_offset; + struct mutex mutex; + + struct l_iommu_table { + iopte_t *pgtable; + unsigned long pgtable_pa; + unsigned long map_base; + } table[IOMMU_TABLES_NR]; + + unsigned prefetch_supported: 1; + struct iommu_group *default_group; + + struct iommu_device iommu; /* IOMMU core handle */ +}; + +static int l_dev_to_node(struct device *dev) +{ + return dev && dev_to_node(dev) >= 0 ? + dev_to_node(dev) : 0; +} + +static void l_iommu_write(struct l_iommu *iommu, unsigned val, unsigned addr) +{ + __l_iommu_write(iommu->node, val, addr + iommu->regs_offset); + if (iommu->companion_regs_offset) { + __l_iommu_write(iommu->node, val, + addr + iommu->companion_regs_offset); + } +} +#ifdef __l_iommu_set_ba +static inline void l_iommu_set_ba(struct l_iommu *iommu, unsigned long *ba) +{ + __l_iommu_set_ba(iommu->node, ba); +} +#else +static inline void l_iommu_set_ba(struct l_iommu *iommu, unsigned long *ba) +{ + l_iommu_write(iommu, (u32)pa_to_iopte(ba[0]), L_IOMMU_BA); +} +#endif + +static void iommu_flushall(struct l_iommu *iommu) +{ + l_iommu_write(iommu, 0, L_IOMMU_FLUSH_ALL); +} + +static inline void iommu_flush(struct l_iommu *iommu, dma_addr_t addr) +{ + l_iommu_write(iommu, addr_to_flush(addr), L_IOMMU_FLUSH_ADDR); +} + +static unsigned long l_iommu_prot_to_pte(int prot) +{ + unsigned long pte_prot = IOPTE_CACHE; + if (prot & IOMMU_READ) + pte_prot |= IOPTE_VALID; + + if (prot & IOMMU_WRITE) + pte_prot |= IOPTE_VALID | IOPTE_WRITE; + return pte_prot; +} + +struct l_iommu_domain { + struct l_iommu *iommu; + struct iommu_domain domain; /* generic domain data structure */ +}; + +static struct l_iommu_domain *to_l_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct l_iommu_domain, domain); +} + +static struct l_iommu_table *l_iommu_to_table(struct l_iommu *i, + unsigned long iova) +{ + return i->table + l_iommu_get_table(iova); +} + +static unsigned l_iommu_page_indx(struct l_iommu_table *t, unsigned long iova) +{ + return (iova - t->map_base) / IO_PAGE_SIZE; +} + +static iopte_t *l_iommu_iopte(struct l_iommu *i, unsigned long iova) +{ + struct l_iommu_table *t = l_iommu_to_table(i, iova); + return t->pgtable + l_iommu_page_indx(t, iova); +} + +static bool l_dom_iova_hi(unsigned long iova) +{ + return iova & (~0UL << 32) ? true : false; +} + +static unsigned l_dom_page_indx(struct iommu_domain *d, unsigned long iova) +{ + if (!l_dom_iova_hi(iova)) + return iova / IO_PAGE_SIZE; + + return (iova - d->map_base) / IO_PAGE_SIZE; +} + +static int l_add_buffer(struct iommu_domain *d, + phys_addr_t phys, unsigned long iova) +{ + int ret; + unsigned long flags; + unsigned i = l_dom_page_indx(d, iova); + if (!l_dom_iova_hi(iova)) { + WARN_ON(d->orig_phys_lo[i]); + d->orig_phys_lo[i] = phys; + return 0; + } + + idr_preload(GFP_ATOMIC); + write_lock_irqsave(&d->lock_hi, flags); + ret = idr_alloc(&d->idr_hi, (void *)phys, i, i + 1, GFP_NOWAIT); + write_unlock_irqrestore(&d->lock_hi, flags); + idr_preload_end(); + + return ret; +} + +static void l_remove_buffer(struct iommu_domain *d, unsigned long iova) +{ + unsigned long flags; + unsigned i = l_dom_page_indx(d, iova); + if (!l_dom_iova_hi(iova)) { + WARN_ON(!d->orig_phys_lo[i]); + d->orig_phys_lo[i] = 0; + return; + } + write_lock_irqsave(&d->lock_hi, flags); + WARN_ON(idr_remove(&d->idr_hi, i) == NULL); + write_unlock_irqrestore(&d->lock_hi, flags); +} + +static phys_addr_t l_dom_lookup_buffer(struct iommu_domain *d, + unsigned long iova) +{ + void *p; + unsigned long flags; + unsigned i = l_dom_page_indx(d, iova); + if (!l_dom_iova_hi(iova)) + return d->orig_phys_lo[i]; + + read_lock_irqsave(&d->lock_hi, flags); + p = idr_find(&d->idr_hi, i); + read_unlock_irqrestore(&d->lock_hi, flags); + + return (phys_addr_t)p; +} + +static phys_addr_t l_alloc_buffer(struct iommu_domain *d, phys_addr_t orig_phys, + size_t size, unsigned long iova, int node) +{ + int ret; + int npages = iommu_num_pages(orig_phys, size, IO_PAGE_SIZE); + gfp_t gfp_mask = __GFP_THISNODE | GFP_ATOMIC | __GFP_NOWARN; + int order = get_order(npages * IO_PAGE_SIZE); + struct page *page = alloc_pages_node(node, gfp_mask, order); + if (!page) + return 0; + ret = l_add_buffer(d, orig_phys, iova); + if (ret < 0) { + __free_pages(page, order); + return 0; + } + return page_to_phys(page); +} + +static void l_free_buffer(struct iommu_domain *d, phys_addr_t phys, + size_t size, unsigned long iova) +{ + phys_addr_t orig_paddr = l_dom_lookup_buffer(d, iova); + int npages = iommu_num_pages(phys, size, IO_PAGE_SIZE); + int order = get_order(npages * IO_PAGE_SIZE); + if (!orig_paddr) + return; + __free_pages(phys_to_page(phys), order); + l_remove_buffer(d, iova); +} + +static struct pci_dev *l_dev_to_parent_pcidev(struct device *dev) +{ + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + BUG_ON(!dev); + BUG_ON(!dev_is_pci(dev)); + return to_pci_dev(dev); +} + +/* + * This function checks if the driver got a valid device from the caller to + * avoid dereferencing invalid pointers. + */ +static bool l_iommu_check_device(struct device *dev) +{ + if (!dev || !dev->dma_mask) + return false; + + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + + if (!dev || !dev_is_pci(dev)) + return false; + return true; +} + +static void l_iommu_init_hw(struct l_iommu *iommu, unsigned long win_sz) +{ + int i; + unsigned long pa[ARRAY_SIZE(iommu->table)]; + unsigned long range = ilog2(win_sz) - ilog2(MIN_IOMMU_WINSIZE); + range <<= IOMMU_RNGE_OFF; /* Virtual DMA Address Range */ + for (i = 0; i < ARRAY_SIZE(iommu->table); i++) + pa[i] = iommu->table[i].pgtable_pa; + + l_iommu_set_ba(iommu, pa); + + if (iommu->prefetch_supported) + range |= IOMMU_CTRL_PREFETCH_EN; + + l_iommu_write(iommu, range | IOMMU_CTRL_CASHABLE_TTE | + IOMMU_CTRL_ENAB, L_IOMMU_CTRL); + iommu_flushall(iommu); +} + +static int l_iommu_init_table(struct l_iommu_table *t, unsigned long win_sz, + int node) +{ + int win_bits = ilog2(win_sz); + size_t sz = win_sz / IO_PAGE_SIZE * sizeof(iopte_t); + void *p; + if (t->pgtable) + return 0; + p = kzalloc_node(sz, GFP_KERNEL, node); + if (!p) + goto fail; + t->pgtable_pa = __pa(p); + + t->pgtable = l_iommu_map_table(t->pgtable_pa, win_sz); + if (!t->pgtable) + goto fail; + + t->map_base = (~0UL) << win_bits; + t->map_base &= L_IOMMU_VA_MASK; + if (win_bits <= 32) + t->map_base &= 0xFFFFffff; + + return 0; +fail: + return -1; +} + +static void l_iommu_free_table(struct l_iommu_table *t) +{ + if (t->pgtable == NULL) + return; + + t->pgtable = l_iommu_unmap_table(t->pgtable); + kfree(t->pgtable); + t->pgtable = NULL; +} + +struct l_iommu_device { + unsigned regs_offset; + unsigned companion_regs_offset; +}; + +static int l_iommu_init_tables(struct l_iommu *iommu) +{ + unsigned long win_sz = l_iommu_win_sz; + int node = iommu->node; + int n = ARRAY_SIZE(iommu->table), i, ret; + if (win_sz <= (1UL << 32)) + n = 1; + if (n == 2) { + ret = l_iommu_init_table(&iommu->table[IOMMU_LOW_TABLE], + MIN_IOMMU_WINSIZE, node); + if (ret) + goto fail; + ret = l_iommu_init_table(&iommu->table[IOMMU_HIGH_TABLE], + win_sz, node); + } else { + ret = l_iommu_init_table(&iommu->table[IOMMU_LOW_TABLE], + win_sz, node); + } + if (ret) + goto fail; + return ret; +fail: + for (i = 0; i < ARRAY_SIZE(iommu->table); i++) + l_iommu_free_table(iommu->table); + return ret; +} + +static void l_iommu_cleanup_one(struct l_iommu *iommu, int stage) +{ + int i; + for (i = 0; i < ARRAY_SIZE(iommu->table); i++) + l_iommu_free_table(iommu->table); + + switch (stage) { + case 4: + iommu_device_unregister(&iommu->iommu); + case 3: + iommu_device_sysfs_remove(&iommu->iommu); + case 2: + iommu_group_put(iommu->default_group); + case 1: + kfree(iommu); + } +} + +static __init struct l_iommu *l_iommu_init_one(int node, struct device *parent, + const struct l_iommu_device *desc) +{ + int ret, stage = 0; + struct l_iommu *i = kzalloc_node(sizeof(*i), GFP_KERNEL, node); + + if (!i) + return ERR_PTR(-ENOMEM); + stage++; + i->node = node; + i->regs_offset = desc->regs_offset; + i->companion_regs_offset = desc->companion_regs_offset; + + mutex_init(&i->mutex); + + if (l_prefetch_iopte_supported() && !l_not_use_prefetch) + i->prefetch_supported = 1; + + i->default_group = iommu_group_alloc(); + if (IS_ERR(i->default_group)) { + ret = PTR_ERR(i->default_group); + goto fail; + } + stage++; + + ret = iommu_device_sysfs_add(&i->iommu, parent, NULL, + "iommu%x", i->regs_offset ? i->regs_offset : node); + if (ret) + goto fail; + stage++; + iommu_device_set_ops(&i->iommu, &l_iommu_ops); + ret = iommu_device_register(&i->iommu); + if (ret) + goto fail; + return i; +fail: + l_iommu_cleanup_one(i, stage); + return ERR_PTR(ret); +} + + +static void l_quirk_enable_local_iommu(struct pci_dev *pdev) +{ + struct l_iommu *i = pdev->dev.archdata.iommu; + WARN_ON(l_iommu_init_tables(i)); + l_iommu_init_hw(i, l_iommu_win_sz); +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGA26, l_quirk_enable_local_iommu); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_3D_VIVANTE_R2000P, l_quirk_enable_local_iommu); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_VP9_BIGEV2_R2000P, l_quirk_enable_local_iommu); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_VP9_G2_R2000P, l_quirk_enable_local_iommu); + +static const struct pci_device_id l_devices_with_iommu[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_3D_VIVANTE_R2000P)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGA26)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_VP9_BIGEV2_R2000P)}, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_VP9_G2_R2000P)}, + { } /* terminate list */ +}; +/* must correspond to l_devices_with_iommu[] */ +static const unsigned l_iommu_devices_iommu_offset[] = { + 0x2000, + 0x2800, + 0x2c00, + 0x2c00, +}; + +static const struct l_iommu_device l_iommu_devices[] = { + { 0x2000, 0x2400 }, + { 0x2800 }, + { 0x2c00 }, +}; + +static LIST_HEAD(l_iommus); +static struct l_iommu *l_node_to_iommu[MAX_NUMNODES]; + +#define for_each_iommu(iommu) \ + list_for_each_entry(iommu, &l_iommus, list) + +static struct l_iommu *l_iommu_get_iommu_for_device(struct device *dev) +{ + int i; + unsigned o; + struct l_iommu *iommu; + const struct pci_device_id *id; + struct pci_dev *pdev; + if (!l_has_devices_with_iommu()) + return NULL; + pdev = l_dev_to_parent_pcidev(dev); + id = pci_match_id(l_devices_with_iommu, pdev); + if (!id) + return NULL; + if (pdev->bus->number != 0) + return NULL; + + i = id - l_devices_with_iommu; + o = l_iommu_devices_iommu_offset[i]; + + for_each_iommu(iommu) { + if (l_dev_to_node(dev) == iommu->node && + iommu->regs_offset == o) { + return iommu; + } + } + BUG(); /* unreachable */ + return NULL; +} + +static struct l_iommu *l_find_iommu(struct device *dev) +{ + struct l_iommu *i; + if (!l_iommu_check_device(dev)) + return NULL; + i = l_iommu_get_iommu_for_device(dev); + if (i) + return i; + return l_node_to_iommu[l_dev_to_node(dev)]; +} + +static void l_iommu_cleaup(void) +{ + struct l_iommu *i, *ii; + list_for_each_entry_safe(i, ii, &l_iommus, list) { + list_del(&i->list); + l_iommu_cleanup_one(i, 4); + } + memset(l_node_to_iommu, 0, sizeof(l_node_to_iommu)); +} + +static __init int __l_iommu_init(int node, struct device *parent) +{ + int j; + struct l_iommu_device default_desc = {}; + struct l_iommu *i; + i = l_iommu_init_one(node, parent, &default_desc); + if (IS_ERR(i)) + goto fail; + list_add(&i->list, &l_iommus); + l_node_to_iommu[node] = i; + if (l_iommu_init_tables(i)) { + i = ERR_PTR(-ENOMEM); + goto fail; + } + l_iommu_init_hw(i, l_iommu_win_sz); + + if (!l_has_devices_with_iommu()) + return 0; + l_iommu_enable_embedded_iommus(node); + + for (j = 0; j < ARRAY_SIZE(l_iommu_devices); j++) { + i = l_iommu_init_one(node, parent, &l_iommu_devices[j]); + if (IS_ERR(i)) + goto fail; + list_add(&i->list, &l_iommus); + } + return 0; +fail: + l_iommu_cleaup(); + return PTR_ERR(i); +} + +static int __init l_iommu_debugfs_init(void) +{ +#if defined CONFIG_IOMMU_DEBUGFS + /*TODO:*/; + return 0; +#else /* CONFIG_IOMMU_DEBUGFS */ + return 0; +#endif /* CONFIG_IOMMU_DEBUGFS */ +} + +/* IOMMU API */ +static int l_iommu_map(struct iommu_domain *iommu_domain, + unsigned long iova, phys_addr_t phys, size_t size, + int iommu_prot) +{ + unsigned long prot; + phys_addr_t orig_phys = phys; + struct l_iommu_domain *d = to_l_domain(iommu_domain); + iopte_t *ptep = l_iommu_iopte(d->iommu, iova); + int node = d->iommu->node; + bool copy = l_iommu_has_numa_bug() && + page_to_nid(phys_to_page(phys)) != node; + + if (WARN_ON(!IS_ALIGNED(phys, size))) + return -EINVAL; + if (WARN_ON(!IS_ALIGNED(iova, size))) + return -EINVAL; + if (WARN_ON(size ^ L_PGSIZE_BITMAP)) + return -EINVAL; + if (WARN_ON(!d->iommu->table[IOMMU_LOW_TABLE].pgtable)) + return -ENODEV; + + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + + if (copy) { + phys = l_alloc_buffer(iommu_domain, orig_phys, size, iova, node); + if (phys == 0) + return -ENOMEM; + } + + prot = l_iommu_prot_to_pte(iommu_prot); + + if (iopte_val(*ptep)) { + panic("iommu: %lx -> %llx: pte (%x) is not empty\n", + iova, phys, iopte_val(*ptep)); + } + + iopte_val(*ptep) = prot | pa_to_iopte(phys); + + return 0; +} + +static size_t l_iommu_unmap(struct iommu_domain *iommu_domain, + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) +{ + struct l_iommu_domain *d = to_l_domain(iommu_domain); + iopte_t *ptep = l_iommu_iopte(d->iommu, iova); + + if (WARN_ON(!IS_ALIGNED(iova, size))) + return 0; + if (WARN_ON(size ^ L_PGSIZE_BITMAP)) + return 0; + if (l_iommu_has_numa_bug()) { + l_free_buffer(iommu_domain, + iopte_to_pa(iopte_val(*ptep)), size, iova); + } + + iopte_val(*ptep) = 0; + /* Clear out TSB entry. */ + wmb(); + /*TODO: iotlb_sync */ + iommu_flush(d->iommu, iova); + + return size; +} + +static phys_addr_t l_iommu_iova_to_phys(struct iommu_domain *iommu_domain, + dma_addr_t iova) +{ + struct l_iommu_domain *d = to_l_domain(iommu_domain); + iopte_t *ptep = l_iommu_iopte(d->iommu, iova); + return iopte_to_pa(iopte_val(*ptep)); +} + +static void l_iommu_detach_device(struct iommu_domain *iommu_domain, + struct device *dev) +{ +} + +static int l_iommu_attach_device(struct iommu_domain *iommu_domain, + struct device *dev) +{ + int ret = 0; + unsigned o; + struct page *p; + struct l_iommu_domain *d = to_l_domain(iommu_domain); + struct l_iommu *i = dev->archdata.iommu; + mutex_lock(&i->mutex); + if (l_iommu_has_numa_bug() && !iommu_domain->orig_phys_lo) { + o = get_order(MIN_IOMMU_WINSIZE / IO_PAGE_SIZE * + sizeof(*iommu_domain->orig_phys_lo)); + p = alloc_pages_node(i->node, + __GFP_ZERO | GFP_KERNEL, o); + + if (p) + iommu_domain->orig_phys_lo = page_address(p); + else + ret = -ENOMEM; + } + mutex_unlock(&i->mutex); + + d->iommu = i; + return ret; +} + +static struct iommu_domain *__l_iommu_domain_alloc(unsigned type, int node) +{ + struct l_iommu_domain *d = kzalloc_node(sizeof(*d), GFP_KERNEL, node); + int win_bits = ilog2(l_iommu_win_sz); + unsigned long start = ~0UL << win_bits; + unsigned long end = ~0UL; + if (!d) + return NULL; + + if (type == IOMMU_DOMAIN_DMA) { + if (iommu_get_dma_cookie(&d->domain) != 0) + goto err_pgtable; + } else if (type != IOMMU_DOMAIN_UNMANAGED) { + goto err_pgtable; + } + if (win_bits <= 32) { + start &= 0xffffFFFF; + end &= 0xffffFFFF; + } else { + start = 0; + end &= L_IOMMU_VA_MASK; + } + d->domain.geometry.aperture_start = start; + d->domain.geometry.aperture_end = end; + d->domain.geometry.force_aperture = true; + + idr_init(&d->domain.idr_hi); + rwlock_init(&d->domain.lock_hi); + d->domain.map_base = (~0UL) << win_bits; + d->domain.map_base &= L_IOMMU_VA_MASK; + + return &d->domain; + +err_pgtable: + kfree(d); + return NULL; +} + +static struct iommu_domain *l_iommu_domain_alloc(unsigned type) +{ + return __l_iommu_domain_alloc(type, -1); +} + +static void l_iommu_domain_free(struct iommu_domain *iommu_domain) +{ + struct l_iommu_domain *d = to_l_domain(iommu_domain); + iommu_put_dma_cookie(iommu_domain); + idr_destroy(&d->domain.idr_hi); + kfree(d); +} + +static int l_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + struct l_iommu *i; + if (!l_iommu_check_device(dev)) + return -ENODEV; + i = l_find_iommu(dev); + if (!i) + return -ENODEV; + dev->archdata.iommu = i; + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) { + dev->archdata.iommu = NULL; + return PTR_ERR(group); + } + + iommu_group_put(group); + dev->archdata.iommu = i; + iommu_device_link(&i->iommu, dev); + iommu_setup_dma_ops(dev, 0, dma_get_mask(dev) + 1); + + return 0; +} + +static void l_iommu_remove_device(struct device *dev) +{ + struct l_iommu *i = dev->archdata.iommu; + dev->archdata.iommu = NULL; + iommu_device_unlink(&i->iommu, dev); + iommu_group_remove_device(dev); +} + +static struct iommu_group *l_iommu_device_group(struct device *dev) +{ + struct l_iommu *i; + if (!l_iommu_check_device(dev)) + return NULL; + i = l_find_iommu(dev); + if (!i) + return NULL; + return iommu_group_ref_get(i->default_group); +} + +static bool l_iommu_capable(enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + case IOMMU_CAP_INTR_REMAP: + return true; /* MSIs are just memory writes */ + case IOMMU_CAP_NOEXEC: + return true; + default: + return false; + } +} + +#define VGA_MEMORY_OFFSET 0x000A0000 +#define VGA_MEMORY_SIZE 0x00020000 +#define RT_MSI_MEMORY_SIZE 0x100000 /* 1 Mb */ +static void l_iommu_get_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *region; + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + struct iohub_sysdata *sd; + struct pci_dev *pdev = l_dev_to_parent_pcidev(dev); + + + if (l_iommu_win_sz > (1UL << 32)) { + unsigned long start = 1UL << 32; + unsigned long sz = L_IOMMU_VA_MASK - l_iommu_win_sz + 1; + /* remove space beetween 0xffffFFFF and map_base */ + region = iommu_alloc_resv_region(start, sz, + prot, IOMMU_RESV_RESERVED); + if (!region) + return; + list_add_tail(®ion->list, head); + } + + sd = pdev->bus->sysdata; + if (!sd->pci_msi_addr_lo) + return; + + region = iommu_alloc_resv_region(((u64)sd->pci_msi_addr_hi) + << 32 | sd->pci_msi_addr_lo, RT_MSI_MEMORY_SIZE, + prot, IOMMU_RESV_MSI); + if (!region) + return; + list_add_tail(®ion->list, head); + + region = iommu_alloc_resv_region(VGA_MEMORY_OFFSET, VGA_MEMORY_SIZE, + prot, IOMMU_RESV_RESERVED); + if (!region) + return; + list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); +} + +static void l_iommu_put_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) + kfree(entry); +} + +static const struct iommu_ops l_iommu_ops = { + .map = l_iommu_map, + .unmap = l_iommu_unmap, + .iova_to_phys = l_iommu_iova_to_phys, + + .domain_alloc = l_iommu_domain_alloc, + .domain_free = l_iommu_domain_free, + .attach_dev = l_iommu_attach_device, + .detach_dev = l_iommu_detach_device, + .add_device = l_iommu_add_device, + .remove_device = l_iommu_remove_device, + .device_group = l_iommu_device_group, + .capable = l_iommu_capable, + + .get_resv_regions = l_iommu_get_resv_regions, + .put_resv_regions = l_iommu_put_resv_regions, + + .pgsize_bitmap = L_PGSIZE_BITMAP, +}; + +#ifdef CONFIG_SWIOTLB + +static int l_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + int ret = -ENXIO; + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + unsigned long off = vma->vm_pgoff; + + if (attrs) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + if (off >= count || user_count > (count - off)) + return -ENXIO; + if (is_vmalloc_addr(cpu_addr)) { + ret = remap_vmalloc_range(vma, cpu_addr, off); + } else { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } + + return ret; +} + +static void *l_swiotlb_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + if (attrs & DMA_ATTR_NON_CONSISTENT) { + void *va = l_alloc_uncached(dev, size, dma_handle, gfp); + if (va) + memset(va, 0, size); + return va; + } + return dma_direct_alloc(dev, size, dma_handle, gfp, attrs); +} + +static void l_swiotlb_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + unsigned long attrs) +{ + if (attrs & DMA_ATTR_NON_CONSISTENT) + l_free_uncached(dev, size, cpu_addr); + else + dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); +} + +static const struct dma_map_ops l_swiotlb_dma_ops = { + .alloc = l_swiotlb_alloc_coherent, + .free = l_swiotlb_free_coherent, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, + .mmap = l_dma_mmap, + .get_sgtable = dma_common_get_sgtable, + .sync_single_for_cpu = dma_direct_sync_single_for_cpu, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, + .sync_sg_for_device = dma_direct_sync_sg_for_device, + .dma_supported = dma_direct_supported, +}; + +/* Built-in e1cp devices work bypassing iommu. They must work + * using swiotlb. + */ +static void l_quirk_iommu_bypass_devices(struct pci_dev *pdev) +{ + set_dma_ops(&pdev->dev, &l_swiotlb_dma_ops); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGA2, + l_quirk_iommu_bypass_devices); +#endif /* CONFIG_SWIOTLB */ + +#ifdef CONFIG_PM_SLEEP +void l_iommu_stop_all(void) +{ + struct l_iommu *i; + if (paravirt_enabled()) + return; + for_each_iommu(i) + l_iommu_write(i, 0, L_IOMMU_CTRL); +} + +static int l_iommu_suspend(void) +{ + return 0; +} + +static void l_iommu_resume(void) +{ + struct l_iommu *i; + for_each_iommu(i) + l_iommu_init_hw(i, l_iommu_win_sz); +} + +static void l_iommu_shutdown(void) +{ + l_iommu_stop_all(); +} + +static struct syscore_ops l_iommu_syscore_ops = { + .resume = l_iommu_resume, + .suspend = l_iommu_suspend, + .shutdown = l_iommu_shutdown, +}; + +static void __init l_iommu_init_pm_ops(void) +{ + register_syscore_ops(&l_iommu_syscore_ops); +} + +#else +static inline void l_iommu_stop_all(void) {} +static inline void l_iommu_init_pm_ops(void) {} +#endif /* CONFIG_PM_SLEEP */ + + +static void l_get_max_resource(struct pci_bus *bus, resource_size_t *start, resource_size_t *end) +{ + int i; + struct resource *res; + resource_size_t e = 0, s = ~0ULL; + struct pci_dev *dev = bus->self; + + pci_bus_for_each_resource(bus, res, i) { + if (!res || !res->flags || res->start > res->end) + continue; + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (res->start < s) + s = res->start; + if (res->end > e) + e = res->end; + } + for (i = 0; dev && i < DEVICE_COUNT_RESOURCE; i++) { + res = dev->resource + i; + if (!res || !res->flags || res->start > res->end) + continue; + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (res->start < s) + s = res->start; + if (res->end > e) + e = res->end; + } + *start = s; + *end = e; +} + +static void l_trim_pci_window(struct pci_bus *root_bus) +{ + struct pci_bus *b; + resource_size_t start = ~0ULL, end = 0; + struct resource_entry *window; + struct pci_host_bridge *bridge = pci_find_host_bridge(root_bus); + + list_for_each_entry(b, &root_bus->children, node) { + resource_size_t s, e; + l_get_max_resource(b, &s, &e); + if (s < start) + start = s; + if (e > end) + end = e; + } + + resource_list_for_each_entry(window, &bridge->windows) { + struct resource *res; + if (window->res != &iomem_resource) + continue; + /* + * Fixup for iova_reserve_pci_windows(): trim the window if + * the boot didn't pass us pci memory ranges + * (see mp_pci_add_resources()). + */ + res = kzalloc(sizeof(*res), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "PCI mem"; + res->flags = IORESOURCE_MEM; + res->start = start; + res->end = end; + window->res = res; + pr_info("iommu: trim bridge window: %pR\n", window->res); + break; + } +} + +const struct dma_map_ops *dma_ops; +EXPORT_SYMBOL(dma_ops); + +static int __init l_iommu_setup(char *str) +{ + unsigned long win_sz = DFLT_IOMMU_WINSIZE; + + if (!strcmp(str, "force-numa-bug-on")) { + l_iommu_force_numa_bug_on = 1; + } else if (!strcmp(str, "no-numa-bug")) { + l_iommu_no_numa_bug = 1; + } else if (!strcmp(str, "noprefetch")) { + l_not_use_prefetch = 1; + } else { + win_sz = memparse(str, &str); + if (win_sz == 0) + l_use_swiotlb = 1; + } + if (l_iommu_has_numa_bug() && ( +#ifdef CONFIG_E2K + !IS_MACHINE_E16C && +#endif + num_online_nodes() > 1)) { + /*swiotlb does not support numa*/ + l_use_swiotlb = 0; + } + + win_sz = roundup_pow_of_two(win_sz); + if (win_sz > MAX_IOMMU_WINSIZE) + win_sz = MAX_IOMMU_WINSIZE; + else if (win_sz < MIN_IOMMU_WINSIZE) + win_sz = MIN_IOMMU_WINSIZE; + l_iommu_win_sz = win_sz; + + return 1; +} +__setup("iommu=", l_iommu_setup); + +static int __init l_iommu_init(void) +{ + int ret; + struct pci_bus *b; + size_t idr_sz = 1UL + INT_MAX; + size_t tbl_sz = l_iommu_win_sz / IO_PAGE_SIZE * sizeof(iopte_t); + + WARN_ON(l_init_uncached_pool()); +#if defined CONFIG_SWIOTLB || defined CONFIG_E2K + if (HAS_MACHINE_E2K_IOMMU && !l_use_swiotlb) + return 0; + + if (!l_iommu_supported() || l_use_swiotlb) { + extern int swiotlb_late_init_with_default_size(size_t size); + swiotlb_late_init_with_default_size(L_SWIOTLB_DEFAULT_SIZE); + dma_ops = &l_swiotlb_dma_ops; + l_iommu_stop_all(); + pr_info("iommu disabled\n"); + return 0; + } +#endif /* CONFIG_SWIOTLB */ + if (tbl_sz > PAGE_SIZE << (MAX_ORDER - 1)) { + tbl_sz = PAGE_SIZE << (MAX_ORDER - 1); + l_iommu_win_sz = tbl_sz / sizeof(iopte_t) * IO_PAGE_SIZE; + } + if (l_iommu_has_numa_bug() && l_iommu_win_sz > idr_sz * PAGE_SIZE) + l_iommu_win_sz = idr_sz * PAGE_SIZE; + + list_for_each_entry(b, &pci_root_buses, node) { + int node = 0; +#ifdef CONFIG_IOHUB_DOMAINS + struct iohub_sysdata *sd = b->sysdata; + node = sd->node; +#endif + l_trim_pci_window(b); + ret = __l_iommu_init(node, &b->dev); + if (ret) + return ret; + pr_info("iommu:%d: enabled; window size %lu MiB\n", + node, l_iommu_win_sz / (1024 * 1024)); + } + ret = bus_set_iommu(&pci_bus_type, &l_iommu_ops); + if (ret) + return ret; + ret = bus_set_iommu(&platform_bus_type, &l_iommu_ops); + if (ret) + return ret; + + l_iommu_init_pm_ops(); + l_iommu_debugfs_init(); + return ret; +} + +/* + * Needs to be done after pci initialization which are subsys_initcall. + */ +subsys_initcall_sync(l_iommu_init); diff --git a/arch/l/kernel/l-mcmonitor.c b/arch/l/kernel/l-mcmonitor.c new file mode 100644 index 000000000000..747bd5adca9d --- /dev/null +++ b/arch/l/kernel/l-mcmonitor.c @@ -0,0 +1,187 @@ +#include +#include +#include +#include + +#include + + +static struct delayed_work l_mcmonitor; +static unsigned int l_mcmonitor_period; + + +#ifdef L_MCMONITOR_TEST_SIZE +static bool l_mcmonitor_test; + +static int __init l_mcmonitor_test_setup(char *str) +{ + l_mcmonitor_test = true; + return 1; +} +__setup("mcmonitor_test", l_mcmonitor_test_setup); + +static int l_mcmonitor_flush_cache(void) +{ + unsigned long a = __get_free_pages(GFP_KERNEL, MAX_ORDER - 1); + + if (!a) + return -ENOMEM; + + memset((void *)a, 0, 1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); + free_pages(a, MAX_ORDER - 1); + + return 0; +} + +static int l_mcmonitor_ecc_test(void) +{ + int ret = 0; + unsigned long flags; + + u64 *a = kzalloc_node(L_MCMONITOR_TEST_SIZE, GFP_KERNEL, 0); + if (!a) + return -ENOMEM; + + if ((ret = l_mcmonitor_flush_cache())) + return ret; + + raw_local_irq_save(flags); + l_mcmonitor_fill_data(a, true); + raw_local_irq_restore(flags); + + ret = l_mcmonitor_cmp(a); + if (ret) + print_hex_dump(KERN_INFO, "b:", DUMP_PREFIX_OFFSET, 32, 8, + a, L_MCMONITOR_TEST_SIZE, 0); + + /* restore ecc */ + raw_local_irq_save(flags); + l_mcmonitor_fill_data(a, false); + raw_local_irq_restore(flags); + + kfree(a); + + return ret; +} +#endif + +static int __init l_mcmonitor_period_setup(char *str) +{ + l_mcmonitor_period = simple_strtoul(str, NULL, 0); + return 1; +} +__setup("mcmonitor_period=", l_mcmonitor_period_setup); + +static void do_l_mcmonitor(struct work_struct *work) +{ + static u16 last_MC_ECC[MAX_NUMNODES][SIC_MAX_MC_COUNT] = {}; + int node, i; + +#ifdef L_MCMONITOR_TEST_SIZE + if (l_mcmonitor_test) { + if (num_online_cpus() != 1) { + pr_err("l-mcmonitor: can't run test\n"); + } else { + int ret = l_mcmonitor_ecc_test(); + + pr_info("l-mcmonitor: test %s: %d\n", + ret ? "failed" : "passed", ret); + } + + l_mcmonitor_test = 0; + } +#endif + + for_each_online_node(node) { + for (i = 0; i < SIC_MC_COUNT; i++) { + char s[256]; + l_mc_ecc_struct_t ecc; + u32 cnt = l_mc_get_error_cnt(&ecc, node, i); + + if ((cnt - last_MC_ECC[node][i]) == 0) + continue; + + last_MC_ECC[node][i] = cnt; + + pr_warning("MC error DETECTED on node%d: %s\n", + node, l_mc_get_error_str(&ecc, i, s, + sizeof(s))); + } + } + + if (l_mcmonitor_period) + queue_delayed_work(system_power_efficient_wq, &l_mcmonitor, + l_mcmonitor_period * HZ); +} + +#ifdef CONFIG_SYSCTL +static int proc_do_l_mcmonitor_period(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + unsigned long old_period = l_mcmonitor_period; + int res; + + res = proc_douintvec(table, write, buffer, lenp, ppos); + + if (write && !res && !old_period && l_mcmonitor_period) + queue_delayed_work(system_power_efficient_wq, &l_mcmonitor, 0); + + return res; +} + +/* Place file period in /proc/sys/dev/mc */ +static ctl_table period_table[] = { + { + .procname = "period", + .data = &l_mcmonitor_period, + .maxlen = sizeof(l_mcmonitor_period), + .mode = 0644, + .proc_handler = proc_do_l_mcmonitor_period, + }, + {} +}; + +static ctl_table mc_table[] = { + { + .procname = "mc", + .maxlen = 0, + .mode = 0555, + .child = period_table, + }, + {} +}; + +/* Make sure that /proc/sys/dev is there */ +static ctl_table root_table[] = { + { + .procname = "dev", + .maxlen = 0, + .mode = 0555, + .child = mc_table, + }, + {} +}; +#endif + +static int __init l_mcmonitor_init(void) +{ + if (!l_mcmonitor_supported()) { /* XXX: see bug 116361. */ + pr_notice("l-mcmonitor: not supported\n"); + return 0; + } + + if (!l_mcmonitor_eec_enabled()) + pr_notice("l-mcmonitor: ecc not enabled\n"); + + INIT_DEFERRABLE_WORK(&l_mcmonitor, do_l_mcmonitor); + +#ifdef CONFIG_SYSCTL + register_sysctl_table(root_table); +#endif + + if (l_mcmonitor_period) + queue_delayed_work(system_power_efficient_wq, &l_mcmonitor, 0); + + return 0; +} +arch_initcall(l_mcmonitor_init); diff --git a/arch/l/kernel/l-uncached.c b/arch/l/kernel/l-uncached.c new file mode 100644 index 000000000000..06b4ffed826e --- /dev/null +++ b/arch/l/kernel/l-uncached.c @@ -0,0 +1,179 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +static struct gen_pool *l_uncached_pool; +#define L_POOL_ORDER (MAX_ORDER - 1) + +static void *l_vmap_wc(phys_addr_t start, size_t size) +{ + pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); + struct page **pages; + phys_addr_t page_start; + unsigned int page_count; + unsigned int i; + void *vaddr; + + page_start = start - offset_in_page(start); + page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); + + + pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return NULL; + + for (i = 0; i < page_count; i++) { + phys_addr_t addr = page_start + i * PAGE_SIZE; + pages[i] = pfn_to_page(addr >> PAGE_SHIFT); + } + vaddr = vmap(pages, page_count, VM_MAP, prot); + kfree(pages); + + return vaddr; +} + +/* + * Add a new chunk of uncached memory pages to the specified pool. + * + * @pool: pool to add new chunk of uncached memory to + * @nid: node id of node to allocate memory from, or -1 + * + * This is accomplished by first allocating a granule of cached memory pages + * and then converting them to uncached memory pages. + */ +static int l_uncached_add_chunk(struct gen_pool *uc_pool, int nid) +{ + int ret; + void *va; + unsigned long pa; + size_t sz = PAGE_SIZE << L_POOL_ORDER; + struct page *page = __alloc_pages_node(nid, GFP_KERNEL | __GFP_THISNODE, + L_POOL_ORDER); + if (!page) + return -1; + split_page(page, L_POOL_ORDER); + + pa = page_to_phys(page); + va = l_vmap_wc(pa, sz); + if (!va) + goto failed; + ret = gen_pool_add_virt(uc_pool, (unsigned long)va, pa, sz, nid); + if (ret) + goto failed; + /*FIXME: NUMA */ +#ifdef CONFIG_E90S + e90s_flush_l2_cache(); +#elif defined(CONFIG_E2K) + write_back_cache_all(); +#else + WARN("FIXME: add flush cache\n"); +#endif + return 0; +failed: + iounmap(va); + __free_pages(page, L_POOL_ORDER); + return -1; +} + +/* + * l_uncached_alloc_page + * + * @nid: node id, or -1 + * @n_pages: number of contiguous pages to allocate + * + * Allocate the specified number of contiguous uncached pages on the + * the requested node. + */ +static unsigned long l_uncached_alloc_page(int nid, int n_pages, + phys_addr_t *phys_addr) +{ + unsigned long uc_addr; + struct gen_pool *uc_pool = l_uncached_pool; + + do { + uc_addr = gen_pool_alloc(uc_pool, n_pages * PAGE_SIZE); + if (uc_addr != 0) { + *phys_addr = gen_pool_virt_to_phys(uc_pool, uc_addr); + return uc_addr; + } + } while (l_uncached_add_chunk(uc_pool, nid) == 0); + + return 0; +} + +/* + * uncached_free_page + * + * @uc_addr: uncached address of first page to free + * @n_pages: number of contiguous pages to free + * + * Free the specified number of uncached pages. + */ +static void l_uncached_free_page(unsigned long uc_addr, int n_pages) +{ + struct gen_pool *pool = l_uncached_pool; + gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE); +} + +void *l_alloc_uncached(struct device *dev, size_t size, + phys_addr_t *phys_addr, gfp_t gfp) +{ + int pages = PAGE_ALIGN(size) / PAGE_SIZE; + void *v = (void *)l_uncached_alloc_page(dev_to_node(dev), pages, + phys_addr); + if (!v) + return v; + return v; +} + +void l_free_uncached(struct device *dev, size_t size, void *cpu_addr) +{ + int pages = PAGE_ALIGN(size) / PAGE_SIZE; + l_uncached_free_page((unsigned long)cpu_addr, pages); +} + +static void l_free_chunk(struct gen_pool *pool, + struct gen_pool_chunk *chunk, void *data) +{ + vunmap((void *)chunk->start_addr); + free_pages(chunk->phys_addr, L_POOL_ORDER); +} + +static void l_pool_destroy(struct gen_pool *pool, struct device *dev) +{ + if (!pool) + return; + /* this is quite ugly but no better idea */ + gen_pool_for_each_chunk(pool, l_free_chunk, dev); + gen_pool_destroy(pool); +} + +void l_destroy_uncached_pool(void) +{ + l_pool_destroy(l_uncached_pool, NULL); +} + +int l_init_uncached_pool(void) +{ + struct gen_pool *p; + int ret = 0; + + p = gen_pool_create(PAGE_SHIFT, 0); + if (!p) { + ret = -ENOMEM; + goto error; + } + l_uncached_pool = p; + gen_pool_set_algo(p, gen_pool_first_fit_order_align, NULL); + +error: + return ret; +} diff --git a/arch/l/kernel/lt.c b/arch/l/kernel/lt.c new file mode 100644 index 000000000000..814df8f8f27e --- /dev/null +++ b/arch/l/kernel/lt.c @@ -0,0 +1,252 @@ +/* + * lt.c E2K_SIC timer based/lt functions + * + * Copyright (C) 2010,2011,2012,2013,2014 MCST (os@mcst.ru) + */ +#include +#include +#include +#include +#include + +#include +#include + +#undef DEBUG_LT_MODE +#undef DebugLT +#define DEBUG_LT_MODE 0 /* Elbrus timer */ +#define DebugLT(fmt, args...) \ +({ \ + if (DEBUG_LT_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +long lt_clock_rate = 10000000; + +lt_regs_t *lt_regs = NULL; + +/* Points to the installed clock event device */ +struct clock_event_device *global_clock_event; + +/* + * Initialize the LT timer. + */ +static mpc_config_timer_t * __init +find_lt_in_mp_timers(void) +{ + mpc_config_timer_t *mp_timer; + int tm; + + if (nr_timers == 0) { + DebugLT("empty MP timers table entry\n"); + return (NULL); + } + mp_timer = &mp_timers[0]; + for (tm = 0; tm < nr_timers; tm ++) { + if (mp_timer->mpc_timertype == MP_LT_TYPE) { + DebugLT("found Elbrus timer at entry #%d\n", tm); + return (mp_timer); + } + DebugLT("entry #%d is %d type timer is not Elbrus timer\n", + tm, mp_timer->mpc_timertype); + mp_timer ++; + } + DebugLT("MP timers table has not Elbrus timer\n"); + return (NULL); +} + +int __init +get_lt_timer(void) +{ + mpc_config_timer_t *lt; + + if (lt_regs) + return 0; + + DebugLT("started\n"); + /* check clock override */ + if (!L_TIMER_IS_ALLOWED()) { + DebugLT("on this machine Elbrus timer is not " + "implemented\n"); + return (-ENODEV); + } + lt = find_lt_in_mp_timers(); + if (lt == NULL) { + DebugLT("on this machine Elbrus timer is not " + "found\n"); + return (-ENODEV); + } + if (lt->mpc_timeraddr == 0) { + pr_err("%s(): Elbrus timer registers base address " + "is not passed\n", __func__); + return (-ENODEV); + } + lt_regs = ioremap(lt->mpc_timeraddr, sizeof (*lt_regs)); + if (lt_regs == NULL) { + pr_err("%s(): could not map Elbrus timer registers " + "base address to virtual space\n", __func__); + return (-ENODEV); + } + DebugLT("Elbrus-timers registers 0x%lx mapped to IO virtual " + "space 0x%px\n", lt->mpc_timeraddr, lt_regs); + + return 0; + +} + +/* + * We use invertation clk_state to program timer device. This is historical + * choise, which is inherited from kernel 2.6.14. We used lt interrupt + * to move jiffies, lt counter to know the offset and invertional clk_state + * to differ a condition of overflow. + * + * We do not use counter offset anymore, but we continue use this clk_state. + * It's known as working everythere and good tested. Nobody wants + * to test direct clk_state on whole zoo of our machines. + * + * In invertional clk_state we have following interrupt diagram: + * + #1 + #2 + * 1 ---------- ---------- ---------- + * | | | | + * 0 ---------- ---------- + * We have two phase of interrupt counter: + * in the first phase limit bit set to 1 and when limit will + * be reached, then no interrupts will be occured only timer will switch + * to second phase with limit bit 0 + * in the second phase limit bit set to 0 and when limit will + * be reached, then interrupt will ve occured and timer switch to + * first phase again. + * So we should set timer to half value of interrupt period (HZ / 2) + * and programmed IOAPIC pin to receive interrupt on edge from 0 to 1 + */ + +#define LT_LATCH ((lt_clock_rate + HZ/2) / HZ) /* For divider */ +static int +lt_set_periodic(struct clock_event_device *evt) +{ + DebugLT("started\n"); + /* counter start value is from 1 to limit, so +1 */ + writel(LT_WRITE_COUNTER_VALUE(LT_LATCH / 2 + 1), <_regs->counter_limit); + writel(LT_INVERT_COUNTER_CNTR_LAUNCH, <_regs->counter_cntr); + return 0; +} + +static int +lt_shutdown(struct clock_event_device *evt) +{ + unsigned int cntr; + + DebugLT("started\n"); + cntr = readl(<_regs->counter_cntr); + cntr &= ~LT_INVERT_COUNTER_CNTR_LAUNCH; + writel(cntr, <_regs->counter_cntr); + return 0; +} + +/* + * The profiling and update capabilities are switched off once the local apic is + * registered. This mechanism replaces the previous #ifdef LOCAL_APIC - + * !using_apic_timer decisions in do_timer_interrupt_hook() + */ +static struct clock_event_device lt_ce = { + .name = "lt", + .features = CLOCK_EVT_FEAT_PERIODIC, + .set_state_periodic = lt_set_periodic, + .set_state_shutdown = lt_shutdown, + .shift = 32, + .irq = 0, +}; + +/* + * Initialize the conversion factor and the min/max deltas of the clock event + * structure and register the clock event source with the framework. + */ +void __init setup_lt_timer(void) +{ + DebugLT("started\n"); + if (get_lt_timer()) { + pr_err("%s(): could not get access to Elbrus-timer\n", + __func__); + return; + } + + if (is_prototype()) { + if (IS_ENABLED(CONFIG_E2K)) + lt_clock_rate = 500000; + } + + /* cpu_possible_mask() ? */ + lt_ce.cpumask = cpumask_of(smp_processor_id()); + lt_ce.mult = div_sc(lt_clock_rate, NSEC_PER_SEC, lt_ce.shift); + lt_ce.max_delta_ns = clockevent_delta2ns(0xF423F, <_ce); + lt_ce.min_delta_ns = clockevent_delta2ns(0xF, <_ce); + + clockevents_register_device(<_ce); + global_clock_event = <_ce; + DebugLT("clockevents device Elbrus-timer was registered\n"); +} + +u32 lt_read(void) +{ + if (WARN_ON_ONCE(!lt_regs)) + return 0; + + /* + * We read low bytes only. So we don't need any lock + * and clocksource's mask is 32 bit. + */ + return readl(<_regs->reset_counter_lo); +} + + +static u64 lt_read_cs(struct clocksource *cs) +{ + /* + * We read low bytes only. So we don't need any lock + * and clocksource's mask is 32 bit. + */ + return readl(<_regs->reset_counter_lo); +} + +struct clocksource lt_cs = { + .name = "lt", + .rating = 110, + .read = lt_read_cs, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mult = 0, + .shift = 20, +}; +#if defined(CONFIG_SCLKR_CLOCKSOURCE) +EXPORT_SYMBOL(lt_cs); +#endif + +int __init init_lt_clocksource(void) +{ + int ret; + + DebugLT("started\n"); +#ifdef __e2k__ + if (!L_TIMER_IS_ALLOWED()) { + ret = -ENODEV; + goto out; + } +#endif /* __e2k__ */ + + if (!lt_regs) { + ret = -ENODEV; + goto out; + } + + ret = clocksource_register_hz(<_cs, lt_clock_rate); + if (ret != 0) { + pr_err("%s(): clocksource registration failed, error %d\n", + __func__, ret); + } + +out: + DebugLT("completed with return value %d\n", ret); + return ret; +} +arch_initcall(init_lt_clocksource); + diff --git a/arch/l/kernel/mpparse.c b/arch/l/kernel/mpparse.c new file mode 100644 index 000000000000..e050ff0c87b9 --- /dev/null +++ b/arch/l/kernel/mpparse.c @@ -0,0 +1,1410 @@ +/* + * Intel Multiprocessor Specificiation 1.1 and 1.4 + * compliant MP-table parsing routines. + * + * Given from i386 architecture mpparse.c implementation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __e2k__ +#include +#include +#endif +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#undef DEBUG_MPT_MODE +#undef DebugMPT +#define DEBUG_MPT_MODE 0 /* MP-table parsing */ +#define DebugMPT if (DEBUG_MPT_MODE) printk + +static struct intel_mp_floating *mpf_found = NULL; +static boot_info_t *mpf_boot_info = NULL; +unsigned int __initdata maxcpus = NR_CPUS; +int __initdata max_iolinks = MAX_NUMIOLINKS; +int __initdata max_node_iolinks = 1; + +#ifdef CONFIG_E2K +# define boot_mpf_found boot_get_vo_value(mpf_found) +#else +# define boot_mpf_found mpf_found +#endif + +/* + * Various Linux-internal data structures created from the + * MP-table. + */ +static mpc_config_iolink_t mp_iolinks[MAX_NUMIOLINKS]; +static int mp_iolinks_num = 0; +static int mp_iohubs_num = 0; +static int mp_rdmas_num = 0; + +mpc_config_timer_t mp_timers[MAX_MP_TIMERS]; +int rtc_model = 0; +int rtc_syncintr = 0; +int nr_timers = 0; + +int IOHUB_revision = 0; +EXPORT_SYMBOL(IOHUB_revision); + /* CPU present map (passed by */ + /* BIOS thru MP table) */ +int phys_cpu_present_num = 0; /* number of present CPUs */ + /* (passed by BIOS thru */ + /* MP table) */ + +/* Processor count in MP configuration table */ +unsigned int mp_num_processors; + +#ifdef CONFIG_IOHUB_DOMAINS +static int src_irq_entries; +#endif /* CONFIG_IOHUB_DOMAINS */ + +/* + * Checksum an MP configuration block. + */ + +static int __init +mpf_checksum(unsigned char *mp, int len) +{ + int sum = 0; + + while (len--) + sum += *mp++; + + return sum & 0xFF; +} + +static void __init +MP_processor_info (struct mpc_config_processor *m) +{ + if (!(m->mpc_cpuflag & CPU_ENABLED)) + return; + + printk("Processor %s ID #%d version %d\n", + cpu_has_epic() ? "EPIC" : "APIC", + m->mpc_apicid, + m->mpc_apicver); + + if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { + DebugMPT(" Bootup CPU\n"); + boot_cpu_physical_apicid = m->mpc_apicid; + } + + if (mp_num_processors >= NR_CPUS) { + printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." + " Processor ignored.\n", NR_CPUS); + return; + } + + if (mp_num_processors >= maxcpus) { + printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." + " Processor ignored.\n", maxcpus); + return; + } + mp_num_processors++; + + if (m->mpc_apicid > MAX_APICS) { + printk("Processor #%d INVALID. (Max ID: %d).\n", + m->mpc_apicid, MAX_APICS); + return; + } + + pic_processor_info(m->mpc_apicid, m->mpc_apicver, + m->mpc_cepictimerfreq); +} + +static void __init +MP_iolink_info (struct mpc_config_iolink *m) +{ + printk("IO link #%d on node %d, version 0x%02x,", + m->link, m->node, m->mpc_iolink_ver); + if (m->mpc_iolink_type == MP_IOLINK_IOHUB) { + printk(" connected to IOHUB: min bus #%d max bus #%d IO %s " + "ID %d\n", m->bus_min, m->bus_max, + cpu_has_epic() ? "EPIC" : "APIC", m->apicid); + } else { + printk(" is RDMA controller\n"); + } + if (mp_iolinks_num >= max_iolinks) { + printk(KERN_WARNING "WARNING: IO links limit of %i reached." + " IO link ignored.\n", max_iolinks); + return; + } +#if defined(CONFIG_E90S) && !defined(CONFIG_NUMA) + if (m->node >= MAX_NUMIOLINKS) { +#else /* E2K or NUMA */ + if (m->node >= MAX_NUMNODES) { +#endif /* CONFIG_E90S && ! CONFIG_NUMA */ + printk(KERN_WARNING "WARNING: invalid node #%d (>= max %d)." + " IO link ignored.\n", m->node, MAX_NUMNODES); + if (nr_ioapics > mp_iolinks_num) + nr_ioapics = mp_iolinks_num; + return; + } + + if (m->link >= NODE_NUMIOLINKS) { + printk(KERN_WARNING "WARNING: invalid local link #%d " + "(>= max %d). IO link ignored.\n", + m->link, NODE_NUMIOLINKS); + return; + } + memcpy(&mp_iolinks[mp_iolinks_num], m, sizeof(*m)); + mp_iolinks_num ++; + if (m->mpc_iolink_type == MP_IOLINK_IOHUB) + mp_iohubs_num ++; + else + mp_rdmas_num ++; +} + +static void __init MP_bus_info (struct mpc_config_bus *m) +{ + char str[7]; +#if MAX_MP_BUSSES < 256 + if (m->mpc_busid >= MAX_MP_BUSSES) { + WARN(1, "MP table busid value (%d) for bustype %s is too large, max. supported is %d\n", + m->mpc_busid, str, MAX_MP_BUSSES - 1); + return; + } +#endif + memcpy(str, m->mpc_bustype, 6); + str[6] = 0; + DebugMPT("Bus #%d is %s\n", m->mpc_busid, str); + + if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { + set_bit(m->mpc_busid, mp_bus_not_pci); +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; +#endif + } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { + clear_bit(m->mpc_busid, mp_bus_not_pci); +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; + } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; + } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; +#endif + } else + printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); +} + +static void __init MP_ioapic_info (struct mpc_ioapic *m) +{ + unsigned long apicaddr; + if (!(m->flags & MPC_APIC_USABLE)) + return; + +#ifdef CONFIG_L_IO_APIC + if (nr_ioapics >= max_iolinks) { + pr_warning("Max # of I/O APICs (IO links) " + "(%d) limit reached. IO APIC ignored.\n", + max_iolinks); + return; + } + apicaddr = get_unaligned(&m->apicaddr); + mp_register_ioapic(m->apicid, apicaddr, gsi_top); +#endif +} + +#ifdef CONFIG_EPIC +static void __init MP_ioepic_info(struct mpc_ioepic *m) +{ + mp_register_ioepic(m->epicver, m->epicid, m->nodeid, m->epicaddr, gsi_top); +} + +/* + * Find an mpc_iolink structure with matching IO-EPIC id. Get PCI bus of EIOHub / IOEPIC + * from bus_min field. + * This requires boot to pass all mpc_iolinks before mpc_ioepics. + */ +int __init mp_ioepic_find_bus(int ioepic_id) +{ + mpc_config_iolink_t *iolink; + int i; + + for (i = 0; i < mp_iolinks_num; i++) { + iolink = &mp_iolinks[i]; + if (iolink->apicid == ioepic_id) + return iolink->bus_min; + } + + pr_warn("%s(): failed to find PCI bus of IOEPIC id %d\n", __func__, ioepic_id); + return 1; +} +#else +static void __init MP_ioepic_info(struct mpc_ioepic *m) +{ + pr_warn("Received MP_IOEPIC from boot on kernel without EPIC support\n"); +} +#endif + +static void __init +MP_timer_info(mpc_config_timer_t *m) +{ + unsigned long timeraddr = get_unaligned(&m->mpc_timeraddr); + printk(KERN_INFO "System timer type %d Version %d at 0x%lX.\n", + m->mpc_timertype, m->mpc_timerver, timeraddr); + /* try to find out explicit definition of RTC*/ + if (m->mpc_timertype == MP_RTC_TYPE) { + rtc_model = m->mpc_timerver; + rtc_syncintr = m->mpc_timerflags & MP_RTC_FLAG_SYNCINTR; + } + if (nr_timers >= MAX_MP_TIMERS) { + printk(KERN_CRIT "Max # of System timers (%d) exceeded " + "(found %d).\n", + MAX_MP_TIMERS, nr_timers); + panic("Recompile kernel with bigger MAX_MP_TIMERS!.\n"); + } + if (!timeraddr) { + printk(KERN_ERR "WARNING: bogus zero System timer address" + " found in MP table, skipping!\n"); + return; + } + memcpy(&mp_timers[nr_timers], m, sizeof(*m)); + nr_timers++; +} + +static void MP_i2c_spi_info(struct mpc_config_i2c *mpc) +{ + void *i2ccntrladdr = (void *)get_unaligned(&mpc->mpc_i2ccntrladdr); + void *i2cdataaddr = (void *)get_unaligned(&mpc->mpc_i2cdataaddr); + IOHUB_revision = mpc->mpc_revision; + printk("i2c_spi_info: control base addr = %px, data base addr = " + "%px, IRQ %d IOHUB revision %02x\n", + i2ccntrladdr, i2cdataaddr, + mpc->mpc_i2c_irq, + IOHUB_revision); +} + +static void __init MP_intsrc_info (struct mpc_intsrc *m) +{ +#ifdef CONFIG_L_IO_APIC + mp_irqs [mp_irq_entries] = *m; + DebugMPT("Int: type %d, pol %d, trig %d, bus %d," + " IRQ %02x, APIC ID %x, APIC INT %02x\n", + m->irqtype, m->irqflag & 3, + (m->irqflag >> 2) & 3, m->srcbus, + m->srcbusirq, m->dstapic, m->dstirq); + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!!\n"); +#endif +} + +static void __init construct_default_ioirq_mptable(int mpc_default_type) +{ + struct mpc_intsrc intsrc; + int i; + + intsrc.type = MP_INTSRC; + intsrc.irqflag = 0; /* conforming */ + intsrc.srcbus = 0; +#ifdef CONFIG_L_IO_APIC + intsrc.dstapic = mpc_ioapic_id(0); +#else + intsrc.dstapic = 0; +#endif + + intsrc.irqtype = mp_INT; + for (i = 0; i < 16; i++) { + switch (mpc_default_type) { + case 2: + if (i == 0 || i == 13) + continue; /* IRQ0 & IRQ13 not connected */ + /* fall through */ + default: + if (i == 2) + continue; /* IRQ2 is never connected */ + } + + intsrc.srcbusirq = i; + intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ +// intsrc.dstirq = i; /* */ + MP_intsrc_info(&intsrc); + } + + intsrc.irqtype = mp_ExtINT; + intsrc.srcbusirq = 0; + intsrc.dstirq = 0; /* 8259A to INTIN0 */ + MP_intsrc_info(&intsrc); +} + +static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) +{ + DebugMPT("Lint: type %d, pol %d, trig %d, bus %d," + " IRQ %02x, APIC ID %x, APIC LINT %02x\n", + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, + m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); + /* + * Well it seems all SMP boards in existence + * use ExtINT/LVT1 == LINT0 and + * NMI/LVT2 == LINT1 - the following check + * will show us if this assumptions is false. + * Until then we do not have to add baggage. + */ + if ((m->mpc_irqtype == mp_ExtINT) && + (m->mpc_destapiclint != 0)) + BUG(); + if ((m->mpc_irqtype == mp_NMI) && + (m->mpc_destapiclint != 1)) + BUG(); +} + +/* + * Read/parse the MPC + */ + +static int __init smp_read_mpc(struct mpc_table *mpc) +{ + char str[16]; + int count = MP_SIZE_ALIGN(sizeof(*mpc)); + unsigned char *mpt= MP_ADDR_ALIGN(((unsigned char *)mpc) + count); + + if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) + { + panic("SMP mptable: bad signature [%c%c%c%c]!\n", + mpc->mpc_signature[0], + mpc->mpc_signature[1], + mpc->mpc_signature[2], + mpc->mpc_signature[3]); + return 1; + } + if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) + { + panic("SMP mptable: checksum error!\n"); + return 1; + } + if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04 && mpc->mpc_spec!=0x08) + { + printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec); + return 1; + } + memcpy(str,mpc->mpc_oem,8); + str[8]=0; + printk("OEM ID: %s ",str); + + memcpy(str,mpc->mpc_productid,12); + str[12]=0; + printk("Product ID: %s ",str); + + printk("%s at: 0x%X\n", cpu_has_epic() ? "EPIC" : "APIC", + mpc->mpc_lapic); + + /* save the local APIC address, it might be non-default */ + mp_lapic_addr = mpc->mpc_lapic; + + /* + * Now process the configuration blocks. + */ + count = MP_SIZE_ALIGN(count); + while (count < MP_SIZE_ALIGN(mpc->mpc_length)) { + switch(*mpt) { + case MP_PROCESSOR: + { + struct mpc_config_processor *m= + (struct mpc_config_processor *)mpt; + MP_processor_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_IOLINK: + { + struct mpc_config_iolink *m= + (struct mpc_config_iolink *)mpt; + MP_iolink_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_BUS: + { + struct mpc_config_bus *m= + (struct mpc_config_bus *)mpt; + MP_bus_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_IOAPIC: + { + struct mpc_ioapic *m= + (struct mpc_ioapic *)mpt; + MP_ioapic_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_IOEPIC: + { + struct mpc_ioepic *m = + (struct mpc_ioepic *)mpt; + MP_ioepic_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_INTSRC: + { + struct mpc_intsrc *m = + (struct mpc_intsrc *) mpt; + + MP_intsrc_info(m); + mpt += MP_SIZE_ALIGN( sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_LINTSRC: + { + struct mpc_config_lintsrc *m= + (struct mpc_config_lintsrc *)mpt; + MP_lintsrc_info(m); + mpt += MP_SIZE_ALIGN( sizeof(*m)); + count += MP_SIZE_ALIGN( sizeof(*m)); + break; + } + case MP_I2C_SPI: + { + struct mpc_config_i2c *m= + (struct mpc_config_i2c *)mpt; + MP_i2c_spi_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_TIMER: + { + mpc_config_timer_t *m= + (mpc_config_timer_t *)mpt; + MP_timer_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + case MP_PMC: + { + mpc_config_pmc_t *m = + (mpc_config_pmc_t *)mpt; + /* Do nothing: as pmc is a pci host bridge */ + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + break; + } + default : + { + printk("smp_read_mpc() undefined MP table " + "item type %d\n", *mpt); + } + } + } + return mp_num_processors; +} + +#ifdef CONFIG_IOHUB_DOMAINS + +#ifdef __e2k__ +static int __init +MP_construct_dup_ioapic(int node, int link) +{ + struct mpc_ioapic ioapic; + + pr_info("BOOT did not pass IO-APIC info for node %d link %d, " + "construct duplicated table\n", node, link); + + ioapic.type = MP_IOAPIC; + ioapic.flags = MPC_APIC_USABLE; + ioapic.apicver = 0x11; + + if (nr_ioapics <= link) { + ioapic.apicid = NR_CPUS + link; + ioapic.apicaddr = 0xfec00000 + link * 0x1000; + } else { + ioapic.apicid = mpc_ioapic_id(link) + (nr_ioapics - link); + ioapic.apicaddr = mpc_ioapic_addr(link) + + (node * NODE_NUMIOLINKS) * 0x1000; + } + + MP_ioapic_info(&ioapic); + + return ioapic.apicid; +} + +static void __init +MP_construct_dup_intsrc(int apicid, int node, int link) +{ + struct mpc_intsrc intsrc; + int src_apicid; + int ent; + + if (nr_ioapics <= link) + panic("MP_construct_dup_intsrc() nothing IO APICs detected in " + "MP table\n"); + src_apicid = mpc_ioapic_id(link); + for (ent = 0; ent < src_irq_entries; ent ++) { + if (mp_irqs[ent].dstapic != src_apicid) + continue; + intsrc = mp_irqs[ent]; + intsrc.dstapic = apicid; + MP_intsrc_info(&intsrc); + } +} +#endif /* __e2k__ */ + +static void __init +MP_construct_default_iolinks(void) +{ + mpc_config_iolink_t mp_iolink; + boot_info_t *boot_info = mpf_boot_info; + unsigned long node_mask; + int node; + int iolinks_count; + unsigned int apicid; + + early_sic_init(); + + printk("BOOT did not pass IOLINKs info, construct default table\n"); +#ifdef __e2k__ + if (!(HAS_MACHINE_E2K_IOHUB) || boot_info == NULL) { + /* only one IO controller (south bridge) PIIX4 */ + /* on single node # 0 */ + mp_iolink.mpc_type = MP_IOLINK; + mp_iolink.mpc_iolink_type = MP_IOLINK_IOHUB; + mp_iolink.mpc_iolink_ver = MP_IOHUB_FPGA_VER; + mp_iolink.node = 0; + mp_iolink.link = 0; + mp_iolink.bus_min = 0; + mp_iolink.bus_max = 7; + if (nr_ioapics < 1) + mp_iolink.apicid = -1; + else + mp_iolink.apicid = mpc_ioapic_id(0); + MP_iolink_info(&mp_iolink); + return; + } +#endif /* __e2k__ */ + node_mask = boot_info->nodes_map; + iolinks_count = 0; + src_irq_entries = mp_irq_entries; + mp_iolink.mpc_type = MP_IOLINK; + +#if defined(CONFIG_E90S) && !defined(CONFIG_NUMA) + for (node = 0; node < MAX_NUMIOLINKS; node++) { +#else /* E2K or NUMA */ + for (node = 0; node < MAX_NUMNODES; node++) { + if (!(node_mask & (1 << node))) + continue; +#endif /* CONFIG_E90S && ! CONFIG_NUMA */ + if (!early_iohub_online(node, 0)) + continue; + if (iolinks_count >= max_iolinks) + break; + + mp_iolink.mpc_iolink_type = MP_IOLINK_IOHUB; + mp_iolink.mpc_iolink_ver = MP_IOHUB_FPGA_VER; + mp_iolink.node = node; + mp_iolink.link = 0; + mp_iolink.bus_min = 1; + mp_iolink.bus_max = 3; + if (nr_ioapics <= iolinks_count) { +#ifdef __e2k__ + apicid = MP_construct_dup_ioapic(node, 0); + MP_construct_dup_intsrc(apicid, node, 0); +#else /* e90s */ + pr_info("BOOT did not pass IO-APIC info for IOLINK #%d " + "on node #%d, ignore IO link\n", + iolinks_count, node); + break; +#endif /* __e2k__ */ + } else { + apicid = mpc_ioapic_id(iolinks_count); + } + mp_iolink.apicid = apicid; + iolinks_count ++; + MP_iolink_info(&mp_iolink); +#ifdef __e2k__ + if (IS_MACHINE_ES2) { + /* there is second IO link on each node */ + if (!early_iohub_online(node, 1)) + continue; + if (iolinks_count >= max_iolinks) + break; + if (max_node_iolinks <= 1) + continue; + mp_iolink.link = 1; + mp_iolink.bus_min = 1; + mp_iolink.bus_max = 1; + if (nr_ioapics <= iolinks_count) { + apicid = MP_construct_dup_ioapic(node, 1); + MP_construct_dup_intsrc(apicid, node, 1); + } else { + apicid = mpc_ioapic_id(iolinks_count); + } + mp_iolink.apicid = apicid; + iolinks_count ++; + MP_iolink_info(&mp_iolink); + } +#endif /* __e2k__ */ + } +} + +static int +mp_fix_iolinks_io_apicid(unsigned int src_apicid, unsigned int new_apicid) +{ + mpc_config_iolink_t *iolink; + int i; + + if (mp_iolinks_num <= 0) + return 0; + for (i = 0; i < mp_iolinks_num; i++) { + iolink = &mp_iolinks[i]; + if (iolink->mpc_iolink_type != MP_IOLINK_IOHUB) + continue; + if (iolink->apicid == src_apicid) { + iolink->apicid = new_apicid; + pr_err("... IOLINK node #%d link #%d IO-APIC ID " + "fixing up to %d\n", + iolink->node, iolink->link, new_apicid); + return 0; + } + } + pr_err("BIOS MP table bug: could not find IOLINK this IO-APIC ID %d\n", + src_apicid); + return -1; +} + +int mp_fix_intsrc_io_apicid(unsigned int src_apicid, unsigned int new_apicid) +{ + struct mpc_intsrc *m; + int count = 0; + int i; + + for (i = 0; i < mp_irq_entries; i++) { + m = &mp_irqs[i]; + if (m->dstapic == src_apicid) { + m->dstapic = new_apicid; + count++; + pr_err("... BUS #%d IRQ %d IO-APIC ID " + "fixing up to %d\n", + m->srcbus, m->srcbusirq, new_apicid); + } + } + if (count <= 0) { + pr_err("BIOS MP table bug: none IRQ entry for IO-APIC ID %d\n", + src_apicid); + return -1; + } + return 0; +} + +int mp_fix_io_apicid(unsigned int src_apicid, unsigned int new_apicid) +{ + int ret = 0; + + if (mp_iolinks_num > 0) + ret += mp_fix_iolinks_io_apicid(src_apicid, new_apicid); +/* ret += mp_fix_intsrc_io_apicid(src_apicid, new_apicid); */ + return ret; +} + +int mp_find_iolink_root_busnum(int node, int link) +{ + mpc_config_iolink_t *iolink; + int i; + + for (i = 0; i < mp_iolinks_num; i ++) { + iolink = &mp_iolinks[i]; + if (iolink->mpc_iolink_type != MP_IOLINK_IOHUB) + continue; + if (iolink->node == node && iolink->link == link) + return (iolink->bus_min); + } + return (-1); +} + +int mp_find_iolink_io_apicid(int node, int link) +{ + mpc_config_iolink_t *iolink; + int i; + + for (i = 0; i < mp_iolinks_num; i ++) { + iolink = &mp_iolinks[i]; + if (iolink->mpc_iolink_type != MP_IOLINK_IOHUB) + continue; + if (iolink->node == node && iolink->link == link) + return (iolink->apicid); + } + return (-1); +} +#else /* ! CONFIG_IOHUB_DOMAINS */ +#define MP_construct_default_iolinks() +#endif /* CONFIG_IOHUB_DOMAINS */ + +void mp_pci_add_resources(struct list_head *resources, + struct iohub_sysdata *sd) +{ + mpc_config_iolink_t *iolink = NULL; + struct resource *mem; + +#ifdef CONFIG_IOHUB_DOMAINS + int i; + + for (i = 0; i < mp_iolinks_num; i++) { + iolink = &mp_iolinks[i]; + if (iolink->mpc_iolink_type != MP_IOLINK_IOHUB) + continue; + if (iolink->node == sd->node && iolink->link == sd->link) + break; + } + BUG_ON(i == mp_iolinks_num); +#else + iolink = &mp_iolinks[0]; +#endif + sd->mem_space.name = "PCI mem"; + sd->mem_space.flags = IORESOURCE_MEM; + if (iolink->pci_mem_end) { + sd->mem_space.start = iolink->pci_mem_start; + sd->mem_space.end = iolink->pci_mem_end - 1; + WARN_ON(request_resource(&iomem_resource, &sd->mem_space)); + mem = &sd->mem_space; + } else { + mem = &iomem_resource; + } + pci_add_resource_offset(resources, &ioport_resource, + L_IOPORT_RESOURCE_OFFSET); + pci_add_resource_offset(resources, mem, + L_IOMEM_RESOURCE_OFFSET); +} + +static int mp_find_srcbus_io_apicid(int busnum) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus; + + if (busnum == lbus) + return mp_irqs[i].dstapic; + } + return -1; +} + +int get_bus_to_io_apicid(int busnum) +{ + mpc_config_iolink_t *iolink; + int i; + + if (mp_iolinks_num <= 0) { + printk(KERN_WARNING "Bogus boot: none IO links info " + "in MP table\n"); + return mp_find_srcbus_io_apicid(busnum); + } + for (i = 0; i < mp_iolinks_num; i++) { + iolink = &mp_iolinks[i]; + if (iolink->mpc_iolink_type != MP_IOLINK_IOHUB) + continue; + if (busnum >= iolink->bus_min && busnum <= iolink->bus_max) + return iolink->apicid; + } + return -1; +} + +static inline void __init +MP_construct_default_timer(void) +{ + mpc_config_timer_t mp_timer; +#ifdef CONFIG_E2K + if (get_machine_id() != MACHINE_ID_ES2_DSP_LMS && + get_machine_id() != MACHINE_ID_ES2_RU_LMS) + return; +#endif + mp_timer.mpc_type = MP_TIMER; + mp_timer.mpc_timertype = MP_LT_TYPE; + mp_timer.mpc_timerver = MP_LT_VERSION; + mp_timer.mpc_timerflags = MP_LT_FLAGS; + mp_timer.mpc_timeraddr = 0; + MP_timer_info(&mp_timer); +} + +static inline void __init construct_default_ISA_mptable(int mpc_default_type) +{ + struct mpc_config_processor processor; + struct mpc_config_bus bus; + struct mpc_ioapic ioapic; + struct mpc_config_lintsrc lintsrc; + int linttypes[2] = { mp_ExtINT, mp_NMI }; + int i; + + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + + /* + * 2 CPUs, numbered 0 & 1. + */ + processor.mpc_type = MP_PROCESSOR; + /* Either an integrated APIC or a discrete 82489DX. */ + processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; + processor.mpc_cpuflag = CPU_ENABLED; + + /* + * 111 Indicates a processor that is not a Intel architecture- + * compatible processor. + */ + processor.mpc_cpufeature = 0x0f; + processor.mpc_featureflag = 0x0; + processor.mpc_cepictimerfreq = 0; + processor.mpc_reserved = 0; + + /* + * Default configuration must be set for all live processors. + */ + for (i = 0; i < phys_cpu_present_num; i++) { + processor.mpc_apicid = i; + MP_processor_info(&processor); + } + if (mp_num_processors != phys_cpu_present_num) { + printk("BIOS bug, Number of processors from BIOS is %d " + "!= %d (number of processors in MP table)\n", + phys_cpu_present_num, mp_num_processors); + } + + MP_construct_default_iolinks(); + + bus.mpc_type = MP_BUS; + bus.mpc_busid = 0; + switch (mpc_default_type) { + default: + printk("???\nUnknown standard configuration %d\n", + mpc_default_type); + /* fall through */ + case 1: + case 5: + memcpy(bus.mpc_bustype, "ISA ", 6); + break; + case 2: + case 6: + case 3: + memcpy(bus.mpc_bustype, "EISA ", 6); + break; + case 4: + case 7: + memcpy(bus.mpc_bustype, "MCA ", 6); + } + MP_bus_info(&bus); + if (mpc_default_type > 4) { + bus.mpc_busid = 1; + memcpy(bus.mpc_bustype, "PCI ", 6); + MP_bus_info(&bus); + } + + ioapic.type = MP_IOAPIC; + ioapic.apicid = 2; + ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; + ioapic.flags = MPC_APIC_USABLE; + ioapic.apicaddr = 0xFEC00000; + MP_ioapic_info(&ioapic); + + /* + * We set up most of the low 16 IO-APIC pins according to MPS rules. + */ + construct_default_ioirq_mptable(mpc_default_type); + + lintsrc.mpc_type = MP_LINTSRC; + lintsrc.mpc_irqflag = 0; /* conforming */ + lintsrc.mpc_srcbusid = 0; + lintsrc.mpc_srcbusirq = 0; + lintsrc.mpc_destapic = MP_APIC_ALL; + for (i = 0; i < 2; i++) { + lintsrc.mpc_irqtype = linttypes[i]; + lintsrc.mpc_destapiclint = i; + MP_lintsrc_info(&lintsrc); + } + MP_construct_default_timer(); +} + +/* + * Scan the memory blocks for an SMP configuration block. + */ +void __init +get_smp_config(void) +{ + struct intel_mp_floating *mpf = mpf_found; + if (!smp_found_config || mpf == NULL) { + printk("MultiProcessor Specification could not find\n"); + return; + } + printk("MultiProcessor Specification v1.%d\n", mpf->mpf_specification); + if (mpf->mpf_feature2 & (1<<7)) { + printk(" IMCR and PIC compatibility mode.\n"); + panic("PIC cannot be used by this kernel\n"); + } else { + printk(" Virtual Wire compatibility mode.\n"); + pic_mode = 0; + } + + /* + * Now see if we need to read further. + */ + if (mpf->mpf_feature1 != 0) { + + printk("Default MP configuration #%d\n", mpf->mpf_feature1); + construct_default_ISA_mptable(mpf->mpf_feature1); + + } else if (mpf->mpf_physptr) { + /* + * Read the physical hardware table. Anything here will + * override the defaults. + */ + smp_read_mpc(mpc_addr_to_virt(mpf->mpf_physptr)); + + /* + * If there are no explicit MP IRQ entries, then we are + * broken. We set up most of the low 16 IO-APIC pins to + * ISA defaults and hope it will work. + */ + if (!mp_irq_entries) { + struct mpc_config_bus bus; + + printk("BIOS bug, no explicit IRQ entries, " + "using default mptable. " + "(tell your hw vendor)\n"); + + bus.mpc_type = MP_BUS; + bus.mpc_busid = 0; + memcpy(bus.mpc_bustype, "ISA ", 6); + MP_bus_info(&bus); + + construct_default_ioirq_mptable(0); + } + if (mp_iolinks_num <= 0) + MP_construct_default_iolinks(); + } else + BUG(); + + printk("Processors: %d\n", mp_num_processors); + /* + * Only use the first configuration found. + */ +} + +void __init +find_smp_config(boot_info_t *bblock) +{ + u32 *bp; + struct intel_mp_floating *mpf; + + mpf_boot_info = bblock; + mpf = (struct intel_mp_floating *) + mpc_addr_to_virt(bblock->mp_table_base); + + if (mpf == NULL) + return; + + bp = (u32 *)mpf; + DebugMPT("mpf->mpf_signature = 0x%x SMP_MAGIC_IDENT = 0x%x\n", + *bp, SMP_MAGIC_IDENT); + DebugMPT("mpf->mpf_length = %d should be 1\n", + mpf->mpf_length); + DebugMPT("mpf->mpf_checksum = 0x%x mpf_checksum() = 0x%x\n", + mpf->mpf_checksum, + mpf_checksum((unsigned char *)bp, sizeof(*mpf))); + DebugMPT("mpf->mpf_specification = %d should be 1/4 or 8\n", + mpf->mpf_specification); + if ((*bp == SMP_MAGIC_IDENT) && + (mpf->mpf_length == 1) && + !mpf_checksum((unsigned char *)bp, sizeof(*mpf)) && + ((mpf->mpf_specification == 1) || + (mpf->mpf_specification == 4) || + (mpf->mpf_specification == 8)) ) { + + smp_found_config = 1; + printk("found SMP MP-table\n"); + mpf_found = mpf; + } +} + +#define APIC_ADD_MASK 0x000000FFFFFFFFFF /* as physical address */ + +#if 0 +static void __init_kexec print_lintsrc_info(struct mpc_config_lintsrc *m) +{ + dump_printk("------- Lintsrc info entry\n"); + dump_printk("lintsrc entry: word 1 (32 bit) 0x%x\n", *(int *)m); + dump_printk("lintsrc entry: word 2 (32 bit) 0x%x\n", *(int *)(m + 1)); + dump_printk("Lint: type %d, pol %d, trig %d, bus %d," + " IRQ %02x,\n\t\t\t APIC ID %x, APIC LINT %02x\n", + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, + m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); +} + +static void __init_kexec print_intsrc_info(struct mpc_intsrc *m) +{ + dump_printk("------- Intsrc info entry\n"); + dump_printk("intsrc entry: word 1 (32 bit) 0x%x\n", *(int *)m); + dump_printk("intsrc entry: word 2 (32 bit) 0x%x\n", *(int *)(m + 1)); + + dump_printk("Int: type %d, pol %d, trig %d, bus %d," + " IRQ %02x,\n\t\t\t APIC ID %x, APIC INT %02x\n", + m->irqtype, m->irqflag & 3, + (m->irqflag >> 2) & 3, m->srcbus, + m->srcbusirq, m->dstapic, m->dstirq); +} + +static void __init_kexec print_iolink_info(struct mpc_config_iolink *m) +{ + dump_printk("------- I/O link entry\n"); + dump_printk("io apic entry: word 1 (32 bit) 0x%x\n", *(int *)m); + dump_printk("io apic entry: word 2 (32 bit) 0x%x\n", *(int *)(m + 1)); + dump_printk("io apic entry: word 3 (32 bit) 0x%x\n", *(int *)(m + 2)); + dump_printk("io apic entry: word 4 (32 bit) 0x%x\n", *(int *)(m + 3)); + dump_printk("io apic entry: word 5 (32 bit) 0x%x\n", *(int *)(m + 4)); + dump_printk("io apic entry: word 6 (32 bit) 0x%x\n", *(int *)(m + 5)); + + dump_printk("IO link #%d on node %d, version 0x%02x,", + m->link, m->node, m->mpc_iolink_ver); + if (m->mpc_iolink_type == MP_IOLINK_IOHUB) { + dump_printk(" connected to IOHUB: min bus #%d max bus #%d " + "IO APIC ID %d\n", + m->bus_min, m->bus_max, m->apicid); + } else { + dump_printk(" is RDMA controller\n"); + } +} + +static void __init_kexec print_ioapic_info(struct mpc_ioapic *m) +{ + dump_printk("------- I/O apic entry\n"); + dump_printk("io apic entry: word 1 (32 bit) 0x%x\n", *(int *)m); + dump_printk("io apic entry: word 2 (32 bit) 0x%x\n", *(int *)(m + 1)); + + if (!(m->flags & MPC_APIC_USABLE)) { + dump_printk("i/o apic is unusable\n"); + return; + } + dump_printk("I/O APIC ID #%d Version %d at 0x%x.\n", m->apicid, + m->apicver, m->apicaddr & APIC_ADD_MASK); +} + +static void __init_kexec +print_timer_info(mpc_config_timer_t *m) +{ + + dump_printk("------- System timer entry\n"); + dump_printk("timer type %d Version %d at 0x%lX.\n", + m->mpc_timertype, m->mpc_timerver, m->mpc_timeraddr); +} + +static void __init_kexec +print_i2c_spi_info(struct mpc_config_i2c *m){ + dump_printk("------- i2c/spi controller\n"); + dump_printk("device %d revision %02x control base addr = 0x%lx, " + "data base addr = 0x%lx, IRQ = %d\n", + m->mpc_max_channel, m->mpc_revision, + m->mpc_i2ccntrladdr, m->mpc_i2cdataaddr, + m->mpc_i2c_irq); +} + +static void __init_kexec print_bus_info(struct mpc_config_bus *m) +{ + char str[7]; + + dump_printk("------- Bus entry\n"); + memcpy(str, m->mpc_bustype, 6); + + dump_printk("bus entry: word 1 (32 bit) 0x%x\n", *(int *)m); + dump_printk("bus entry: word 2 (32 bit) 0x%x\n", *(int *)(m + 1)); + dump_printk("Bus #%d is %s\n", m->mpc_busid, str); + +} + +static void __init_kexec print_processor_info(struct mpc_config_processor *m) +{ + dump_printk("------- Processor entry\n"); + dump_printk("processor entry: word 1 (32 bit) 0x%x\n", *(int *)m); + dump_printk("processor entry: word 2 (32 bit) 0x%x\n", + m->mpc_cpufeature); + dump_printk("processor entry: word 3 (32 bit) 0x%x\n", + m->mpc_featureflag); + + dump_printk("Proc: lapic id %d, lapic version %d,\n" + "\t cpuflags(bit 1 - cpu enable, bit 2 - bootstrap) 0x%x\n" + "\t\t signature 0x%x flags 0x%x\n", m->mpc_apicid, m->mpc_apicver, + m->mpc_cpuflag & 0x3, m->mpc_cpufeature, m->mpc_featureflag); +} + +static void __init_kexec print_entries(char type, char *mpt) +{ + if (type == MP_BUS) { + struct mpc_config_bus *m = (struct mpc_config_bus *)mpt; + print_bus_info(m); + } else if (type == MP_IOLINK) { + struct mpc_config_iolink *m = (struct mpc_config_iolink *)mpt; + print_iolink_info(m); + } else if (type == MP_IOAPIC) { + struct mpc_ioapic *m = (struct mpc_ioapic *)mpt; + print_ioapic_info(m); + } else if (type == MP_INTSRC) { + struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; + print_intsrc_info(m); + } else if (type == MP_LINTSRC) { + struct mpc_config_lintsrc *m = (struct mpc_config_lintsrc *)mpt; + print_lintsrc_info(m); + } else if (type == MP_TIMER) { + mpc_config_timer_t *m = (mpc_config_timer_t *)mpt; + print_timer_info(m); + } else if (type == MP_I2C_SPI) { + struct mpc_config_i2c *m = (struct mpc_config_i2c *)mpt; + print_i2c_spi_info(m); + } else { + dump_printk("print_entries() invalid MP table entry type " + "%d\n", type); + } +} + +static void __init_kexec print_mptable(struct intel_mp_floating *mpf) +{ + char str[16]; + struct mpc_table *mpc = (struct mpc_table *) + mpc_addr(mpf->mpf_physptr); + int count = MP_SIZE_ALIGN(sizeof(*mpc)); + unsigned char *mpt = MP_ADDR_ALIGN(((unsigned char *)mpc) + count); + + dump_printk("\n\nMP CONFIGURATION TABLE HEADER:\n\n"); + dump_printk("mpf->mpf_feature1 = %d\n", mpf->mpf_feature1); + + if (mpf->mpf_feature1 != 0) { + dump_printk(".......construct_default_ISA_mptable\n"); + return; + } + + if (!mpf->mpf_physptr) { + dump_printk("null mptable address pointer\n"); + return; + } + + dump_printk("SMP mptable: signature [%c%c%c%c]!\n", + mpc->mpc_signature[0], + mpc->mpc_signature[1], + mpc->mpc_signature[2], + mpc->mpc_signature[3]); + + dump_printk("SMP mptable: mpc->mpc_length %d\n", mpc->mpc_length); + if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) + { + dump_printk("SMP mptable: checksum error!\n"); + return; + } + if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04 && mpc->mpc_spec!=0x08) + { + dump_printk("Bad Config Table version (%d)!!\n", mpc->mpc_spec); + return; + } + memcpy(str,mpc->mpc_oem,8); + str[8]=0; + dump_printk("OEM ID: %s\n", str); + + memcpy(str, mpc->mpc_productid, 12); + str[12]=0; + dump_printk("Product ID: %s\n", str); + + dump_printk("APIC at: 0x%lx\n", mpc->mpc_lapic & APIC_ADD_MASK); + + dump_printk("\n\nMP TABLE CONFIGURATION ENTRIES:\n\n"); + + while (count < MP_SIZE_ALIGN(mpc->mpc_length)) { + if (*mpt == MP_PROCESSOR) { + struct mpc_config_processor *m= + (struct mpc_config_processor *)mpt; + print_processor_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + } else if (*mpt == MP_IOLINK) { + struct mpc_config_iolink *m= + (struct mpc_config_iolink *)mpt; + print_iolink_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + } else if ((*mpt == MP_BUS) || (*mpt == MP_INTSRC) || + (*mpt == MP_LINTSRC)) { + print_entries(*mpt, (char *) mpt); + mpt += MP_SIZE_ALIGN(8); + count += MP_SIZE_ALIGN(8); + } else if (*mpt == MP_IOAPIC) { + struct mpc_ioapic *m = + (struct mpc_ioapic *)mpt; + print_ioapic_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + } else if (*mpt == MP_TIMER) { + print_timer_info((mpc_config_timer_t *)mpt); + mpt += MP_SIZE_ALIGN(sizeof(mpc_config_timer_t)); + count += MP_SIZE_ALIGN(sizeof(mpc_config_timer_t)); + } else if (*mpt == MP_I2C_SPI) { + struct mpc_config_i2c *m = + (struct mpc_config_i2c *)mpt; + print_i2c_spi_info(m); + mpt += MP_SIZE_ALIGN(sizeof(*m)); + count += MP_SIZE_ALIGN(sizeof(*m)); + } else { + dump_printk("unrecognized entry: %c ", *mpt); + mpt += 8; count += 8; + } + } + return; + +} + +static void __init_kexec print_floating_point(struct intel_mp_floating *mpf) +{ + u32 *bp; + + bp = (u32 *)mpf; + + dump_printk("\n\nFLOATING POINT STRUCTURE:\n\n"); + dump_printk("floating point: word 1 (32 bit) 0x%08x\n", *bp); + dump_printk("floating point: word 2 (32 bit) 0x%08x\n", *(bp+1)); + dump_printk("floating point: word 3 (32 bit) 0x%08x\n", *(bp+2)); + dump_printk("floating point: word 4 (32 bit) 0x%08x\n\n", *(bp+3)); + + dump_printk("mpf->mpf_signature = [%c%c%c%c]\n", + mpf->mpf_signature[0], mpf->mpf_signature[1], + mpf->mpf_signature[2], mpf->mpf_signature[3]); + dump_printk("mpf->mpf_signature = 0x%x SMP_MAGIC_IDENT = 0x%x\n", + *bp, SMP_MAGIC_IDENT); + dump_printk("mpf->mpf_length = %d should be 1\n", + mpf->mpf_length); + dump_printk("mpf->mpf_checksum = 0x%x check sum() = 0x%x\n", + mpf->mpf_checksum, + mpf_checksum((unsigned char *)bp, sizeof(*mpf))); + dump_printk("mpf->mpf_specification = %d should be 1/4 or 8\n", + mpf->mpf_specification); + + if ((*bp == SMP_MAGIC_IDENT) && + (mpf->mpf_length == 1) && + !mpf_checksum((unsigned char *)bp, sizeof(*mpf)) && + ((mpf->mpf_specification == 1) || + (mpf->mpf_specification == 4) || + (mpf->mpf_specification == 8)) ) { + dump_printk("found floating pointer structure at 0x%lx\n", + mpf); + print_mptable(mpf); + } else { + dump_printk("error floating pointer structur at 0x%lx\n", + mpf); + + } +} + +static void __init_kexec print_boot_info(boot_info_t *boot_info) +{ + int node; + int bank; + int total_banks = 0; + + dump_printk("signature 0x%x\n", boot_info->signature); + dump_printk("vga_mode %d\n", boot_info->vga_mode); + dump_printk("num_of_banks %d\n", boot_info->num_of_banks); + dump_printk("num_of_busy areas %d\n", boot_info->num_of_busy); + dump_printk("kernel_base 0x%lx\n", boot_info->kernel_base); + dump_printk("kernel_size 0x%lx\n", boot_info->kernel_size); + + dump_printk("ramdisk_base 0x%lx\n", boot_info->ramdisk_base); + dump_printk("ramdisk_size 0x%lx\n", boot_info->ramdisk_size); + dump_printk("num_of_cpus %d\n", boot_info->num_of_cpus); + dump_printk("machine flags 0x%04x\n", boot_info->mach_flags); + dump_printk("mp_table_base 0x%lx\n", boot_info->mp_table_base); + dump_printk("serial base 0x%x\n", boot_info->serial_base); + + if (!strncmp(boot_info->kernel_args_string, + KERNEL_ARGS_STRING_EX_SIGNATURE, + KERNEL_ARGS_STRING_EX_SIGN_SIZE)) + dump_printk("kernel string %s\n", + boot_info->bios.kernel_args_string_ex); + else + dump_printk("kernel string %s\n", + boot_info->kernel_args_string); + + dump_printk("mach_serialn 0x%lx\n", boot_info->mach_serialn); + dump_printk("kernel_csum 0x%lx\n", boot_info->kernel_csum); + + dump_printk("num_of_nodes %d\n", boot_info->num_of_nodes); + dump_printk("nodes_map %d\n", boot_info->nodes_map); + + for (node = 0; node < L_MAX_MEM_NUMNODES; node ++) { + bank_info_t *cur_bank; + + cur_bank = boot_info->nodes_mem[node].banks; + if (cur_bank->size == 0) { + if (boot_info->nodes_map & (1 << node)) { + dump_printk("Node #%d has not physical " + "memory\n", node); + } else { + dump_printk("Node #%d is not online\n", node); + } + continue; /* node has not memory */ + } else if (!(boot_info->nodes_map & (1 << node))) { + dump_printk("BUG : Node #%d is not online, but has " + "physical memory\n", node); + } + + dump_printk("Node #%d physical memory banks: ", node); + for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS; bank ++) { + if (cur_bank->size) { + dump_printk(" [%d] : address 0x%x, " + "size 0x%x\n", + bank, cur_bank->address, + cur_bank->size); + } else + break; /* no more memory on node */ + cur_bank ++; + total_banks ++; + } + } + if (boot_info->num_of_banks && + (boot_info->num_of_banks != total_banks)) { + dump_printk("BUG : boot_info->num_of_banks %d != " + "number of banks at boot_info->nodes_mem %d\n", + boot_info->num_of_banks, total_banks); + } + for (bank = 0; bank < boot_info->num_of_busy; bank ++) { + dump_printk("boot_info->busy[%d].address 0x%x\n", + bank, boot_info->busy[bank].address); + dump_printk("boot_info->busy[%d].size 0x%x\n", + bank, boot_info->busy[bank].size); + } + + if (boot_info->mp_table_base) + print_floating_point((struct intel_mp_floating *) + mpc_addr(boot_info->mp_table_base)); + else + dump_printk("null mp floating structure pointer\n"); +} + +void __init_kexec print_bootblock(bootblock_struct_t *bootblock) +{ + boot_info_t *boot_info = &bootblock->info; + + dump_printk("BOOT_INFO *******************************************:\n"); + print_boot_info(boot_info); + dump_printk("BOOT_INFO *******************************************:\n"); +} +#endif diff --git a/arch/l/kernel/panic2nvram.c b/arch/l/kernel/panic2nvram.c new file mode 100644 index 000000000000..c7e9dd744f15 --- /dev/null +++ b/arch/l/kernel/panic2nvram.c @@ -0,0 +1,247 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned int start_nvram_panic_area; +unsigned int size_nvram_panic_area = 0; +int (*panic2nvram_read) (unsigned int off, unsigned char *addr, int sz); +void (*panic2nvram_write) (unsigned int off, unsigned char *addr, int sz); +int (*panic2nvram_raw_write) (unsigned int off, unsigned char *addr, int sz); + + +static unsigned int cur_nvram_panic = 0; +static u_char output_header[] = "This is nvram panic output\n"; +static u_char Output_header[] = "This is nvram panic Output\n"; + + + +int inline read_from_nvram(unsigned int off, unsigned char *addr, int sz) +{ + if (panic2nvram_read) + return panic2nvram_read(off, addr, sz); + else + printk("NO panic2nvram_read\n"); + return -1; +} + +void inline write_to_nvram(unsigned int off, unsigned char *addr, int sz) +{ + if (!panic2nvram_write) { + goto no_write; + } + if (off < start_nvram_panic_area) { + goto no_write; + } + if (off + sz > start_nvram_panic_area + size_nvram_panic_area) { + goto no_write; + } + panic2nvram_write(off, addr, sz); + return; +no_write: + printk("NO panic2nvram_write =%px, off = 0x%x, sz = 0x%x\n", + panic2nvram_write, off, sz); +} + + +static inline int raw_write_to_nvram(u_int off, u_char *addr, int sz) +{ + if (panic2nvram_write) { + panic2nvram_write(off, addr, sz); + return sz; + } +// if (panic2nvram_raw_write) { +// return panic2nvram_raw_write(off, addr, sz); +// } + return 0; +} + + +void write_to_nvram_panic_area(const char *str, int len) +{ + if (len + cur_nvram_panic >= size_nvram_panic_area) { + return; + } + if (!cur_nvram_panic) { + cur_nvram_panic += + raw_write_to_nvram(start_nvram_panic_area, + output_header, strlen(output_header)); + } + cur_nvram_panic += + raw_write_to_nvram(start_nvram_panic_area + cur_nvram_panic, + (u_char *)str, len); + +} + + + +static void *nvram_panic_seq_start(struct seq_file *f, loff_t *pos) +{ + + /* The pointer we are returning is arbitrary, + * it just has to be non-NULL and not IS_ERR + * in the success case. + */ + return *pos == 0 ? &nvram_panic_seq_start: NULL; +} + +static void *nvram_panic_seq_next(struct seq_file *f, void *v, loff_t *pos) +{ + ++*pos; + return nvram_panic_seq_start(f, pos); +} + +static void nvram_panic_seq_stop(struct seq_file *f, void *v) +{ + /* Nothing to do */ +} + +static ssize_t nvram_panic_seq_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char c; + unsigned char cc[3]; + if (count == 0) { + return 0; + } + if (get_user(c, buf)) { + return -EFAULT; + } + if (c == 'c') { + int l; + long zeroes = 0; + for (l = 0; l < size_nvram_panic_area; l += sizeof(long)) { + write_to_nvram(start_nvram_panic_area + l, + (char *)&zeroes, sizeof(long)); + } + cur_nvram_panic = 0; + return count; + } + if (c == 'p') { + panic("This is juct panic call\n"); + return count; + } +#if 0 + if (c == 't') { +// int i = 0; + char c = 'G'; + int s = strlen(output_header); + cur_nvram_panic = 0; +// for (i = 0; i < size_nvram_panic_area + s; i += s) + write_to_nvram(start_nvram_panic_area, &c, 1); + return count; + } +#endif + if (c == 'T') { + int i = 0; + int s = strlen(Output_header); + cur_nvram_panic = 0; + for (i = 0; i < size_nvram_panic_area + s; i += s) + write_to_nvram_panic_area(Output_header, s); + return count; + } + if (c == '0') { + cur_nvram_panic = 0; + return count; + } + + cc[0] = c; + cc[1] = '\n'; + cc[2] = 0; + write_to_nvram(start_nvram_panic_area + cur_nvram_panic, cc, 3); + cur_nvram_panic++; + + return count; +// return -EINVAL; +} + + + +int show_nvram_panic(struct seq_file *p, void *v) +{ + char *data = kmalloc(size_nvram_panic_area, GFP_KERNEL); + char *l; + + if (data == NULL) { + return -ENOMEM; + } + read_from_nvram(start_nvram_panic_area, data, size_nvram_panic_area); + *(data + size_nvram_panic_area -1) = 0; +// read_from_nvram(start_nvram_panic_area, data, 32); +// *(data + 30) = '\n'; +// *(data + 31) = 0; +printk("show_nvram_panic: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", *((int *)data), *((int *)data+4), *((int *)data+8), *((int *)data+12)); + l = memchr(data, 0, size_nvram_panic_area); + seq_write(p, data, l - data); + kfree(data); + return 0; +} + +static const struct seq_operations nvram_panic_seq_ops = { + .start = nvram_panic_seq_start, + .next = nvram_panic_seq_next, + .stop = nvram_panic_seq_stop, + .show = show_nvram_panic, +}; + +static int nvram_panic_open(struct inode *inode, struct file *filp) +{ + return seq_open(filp, &nvram_panic_seq_ops); +} + +static const struct file_operations proc_nvram_panic_operations = { + .open = nvram_panic_open, + .read = seq_read, + .write = nvram_panic_seq_write, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init start_nvram_panic_area_setup(char *str) +{ + start_nvram_panic_area = memparse(str, &str); + return 1; +} + +__setup("panic2nvram-start=", start_nvram_panic_area_setup); + + +static int __init size_nvram_panic_area_setup(char *str) +{ + size_nvram_panic_area = memparse(str, &str); + return 1; +} + +__setup("panic2nvram-size=", size_nvram_panic_area_setup); + + +static int __init nvram_panic_init(void) +{ + char buf[32]; + buf[0] = 0; + read_from_nvram(start_nvram_panic_area, buf, 32); + if (!strncmp(buf, output_header, strlen(output_header))) { + if (proc_create("driver/nvram_panic", S_IWUSR | S_IROTH, + NULL, &proc_nvram_panic_operations) == NULL) { + pr_warn("%s: Could not create " + "/proc/sys/kernel/nvram_panic\n", __func__); + return -EINVAL; + } + } + + return 0; +} + +late_initcall(nvram_panic_init); + + diff --git a/arch/l/kernel/pic_irq.c b/arch/l/kernel/pic_irq.c new file mode 100644 index 000000000000..1c267a5c120d --- /dev/null +++ b/arch/l/kernel/pic_irq.c @@ -0,0 +1,396 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include + +/* + * This file holds code that is common for 1) e2k APIC, 2) e90s APIC and + * 3) e2k EPIC implementations. + * + * Corresponding declarations can be found in asm-l/hw_irq.h and asm-l/pic.h + */ + +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... NR_VECTORS - 1] = -1 +}; + +/* + * Array of handlers for system interrupts (local timer, IPI, etc). + */ +void (*interrupt[NR_VECTORS])(struct pt_regs *regs) = { + [0 ... NR_VECTORS - 1] = NULL +}; + +atomic_t irq_err_count; +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; +EXPORT_PER_CPU_SYMBOL(irq_stat); + +int first_system_vector = NR_VECTORS - 1; + +__init_recv +void setup_PIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name) +{ + if (test_bit(vector, used_vectors) || interrupt[vector]) + BUG(); + + set_bit(vector, used_vectors); + if (system && first_system_vector > vector) + first_system_vector = vector; + interrupt[vector] = handler; +} + +#define irq_stats(cpu) (&per_cpu(irq_stat, cpu)) + +/* + * /proc/interrupts printing: + */ +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + + seq_printf(p, "%*s: ", prec, "NMI"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); + seq_printf(p, " Non-maskable interrupts\n"); + seq_printf(p, "%*s: ", prec, "LOC"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); + seq_printf(p, " Local timer interrupts\n"); + + seq_printf(p, "%*s: ", prec, "SPU"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); + seq_printf(p, " Spurious interrupts\n"); +//TODO +#if 0 + seq_printf(p, "%*s: ", prec, "IWI"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); + seq_printf(p, " IRQ work interrupts\n"); +#endif + seq_printf(p, "%*s: ", prec, "RTR"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); + seq_printf(p, " read retries\n"); +#ifdef CONFIG_SMP + seq_printf(p, "%*s: ", prec, "RES"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); + seq_printf(p, " Rescheduling interrupts\n"); +# ifdef CONFIG_E2K + seq_printf(p, "%*s: ", prec, "CAL"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - + irq_stats(j)->irq_tlb_count); + seq_printf(p, " Function call interrupts\n"); + seq_printf(p, "%*s: ", prec, "TLB"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); + seq_printf(p, " TLB shootdowns\n"); +# else + seq_printf(p, "%*s: ", prec, "CAL"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); + seq_printf(p, " Function call interrupts\n"); +# endif +#endif +#ifdef CONFIG_E2K +# if IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET) + seq_printf(p, "v%*d: ", prec - 1, RDMA_INTERRUPT_VECTOR); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_rdma_count); + seq_printf(p, " RDMA interrupts\n"); +# endif +# if IS_ENABLED(CONFIG_ELDSP) + if (IS_MACHINE_ES2) { + seq_printf(p, "v%*d: ", prec - 1, LVT3_INTERRUPT_VECTOR); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_eldsp_count); + seq_printf(p, " Elbrus DSP interrupts\n"); + } +# endif +#endif + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); + return 0; +} + + +/* + * do_IRQ handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ +void do_IRQ(struct pt_regs * regs, unsigned int vector) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + struct irq_desc *desc; + int irq; + + irq = __this_cpu_read(vector_irq[vector]); + +#ifdef CONFIG_E2K + /*It works under CONFIG_PROFILING flag only */ + store_do_irq_ticks(); +#endif + + l_irq_enter(); + + desc = irq_to_desc(irq); + + if (likely(desc)) { + generic_handle_irq_desc(desc); + } else { + ack_pic_irq(); + if (printk_ratelimit()) + pr_emerg("%s: %d No irq handler for vector " + "0x%x (irq %d)\n", __func__, + smp_processor_id(), vector, irq); + } + +#ifdef CONFIG_E2K + /*It works under CONFIG_PROFILING flag only */ + define_time_of_do_irq(irq); +#endif + + l_irq_exit(); + + set_irq_regs(old_regs); +} + +void ack_bad_irq(unsigned int irq) +{ + printk("unexpected IRQ trap at vector %02x\n", irq); + /* + * Currently unexpected vectors happen only on SMP and APIC. + * We _must_ ack these because every local APIC has only N + * irq slots per priority level, and a 'hanging, unacked' IRQ + * holds up an irq slot - in excessive cases (when multiple + * unexpected vectors occur) that might lock up the APIC + * completely. + */ + ack_pic_irq(); +} + +/* + * /proc/stat helpers + */ +u64 arch_irq_stat_cpu(unsigned int cpu) +{ + u64 sum = irq_stats(cpu)->__nmi_count; + + sum += irq_stats(cpu)->apic_timer_irqs; + sum += irq_stats(cpu)->irq_spurious_count; +#ifdef CONFIG_SMP + sum += irq_stats(cpu)->irq_resched_count; + sum += irq_stats(cpu)->irq_call_count; +#endif +#ifdef CONFIG_E2K +# if IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET) + sum += irq_stats(cpu)->irq_rdma_count; +# endif +# if IS_ENABLED(CONFIG_ELDSP) + if (IS_MACHINE_ES2) { + sum += irq_stats(cpu)->irq_eldsp_count; + } +# endif +#endif + + return sum; +} + +u64 arch_irq_stat(void) +{ + u64 sum = atomic_read(&irq_err_count) + atomic_read(&irq_mis_count); + + return sum; +} + +static ssize_t show_irq_table(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u64 ret = 0; + int *vector_table = per_cpu(vector_irq, dev->id); + int i; + + for (i = 0; i < NR_VECTORS; i++) { + if (i % 16 == 0) { + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "0x%02x ", i); + } + + if (*interrupt[i]) { + ret += scnprintf(buf + ret, PAGE_SIZE - ret, " * "); + } else { + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "%3d ", vector_table[i]); + } + + if (i % 16 == 15) { + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + } + } + + + return ret; +} + +const struct device_attribute irq_dev_attr = { + .attr = { + .name = "irq", + .mode = S_IRUGO, + + }, + .show = show_irq_table, + .store = NULL, +}; + +static int __init irq_sysfs_init(void) +{ + int ret = 0; + int cpu; + + for_each_online_cpu(cpu) { + ret = device_create_file(get_cpu_device(cpu), &irq_dev_attr); + + if (ret) + return ret; + } + + return ret; +} +late_initcall(irq_sysfs_init); + +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + pic_irq_work_raise(); +} +#endif + +#ifdef CONFIG_SMP +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + pic_send_call_function_ipi_mask(mask); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + pic_send_call_function_single_ipi(cpu); +} + +void smp_send_reschedule(int cpu) +{ + pic_send_reschedule(cpu); +} + +void irq_force_complete_move(struct irq_desc *desc) +{ + pic_irq_force_complete_move(desc); +} +#endif + +noinline notrace void do_nmi(struct pt_regs *regs) +{ + pic_do_nmi(regs); +} + +DEFINE_PER_CPU(long long, next_rt_intr) = 0; +EXPORT_SYMBOL(next_rt_intr); + +void __ref do_postpone_tick(int to_next_rt_ns) +{ + int cpu; + long long cur_time = ktime_to_ns(ktime_get()); + long long next_tm; + unsigned long flags; + struct pt_regs regs_new; + struct pt_regs *old_regs; + + local_irq_save(flags); + cpu = smp_processor_id(); + next_tm = per_cpu(next_rt_intr, cpu); + if (to_next_rt_ns) { + per_cpu(next_rt_intr, cpu) = cur_time + to_next_rt_ns; + } else{ + per_cpu(next_rt_intr, cpu) = 0; + } +#if 0 + trace_printk("DOPOSTP old_nx-cur=%lld cur=%lld nx=%lld\n", + next_tm - cur_time, cur_time, cur_time + to_next_rt_ns); +#endif + if (next_tm == 1) { + /* FIXME next line has long run time and may be deleted */ + memset(®s_new, 0, sizeof(struct pt_regs)); + /* need to get answer to user_mod() only */ +#ifdef CONFIG_E90S + regs_new.tstate = TSTATE_PRIV; +#else + regs_new.stacks.top = NATIVE_NV_READ_SBR_REG_VALUE(); + regs_new.next = NULL; +#endif + old_regs = set_irq_regs(®s_new); + l_irq_enter(); + local_pic_timer_interrupt(); + l_irq_exit(); + set_irq_regs(old_regs); + } + local_irq_restore(flags); +} +EXPORT_SYMBOL(do_postpone_tick); + +static int print_ICs(void) +{ + /* print_local_pics() returns 1, if apic/epic verbosity is off */ + if (print_local_pics(false)) + return 0; + + print_IO_PICs(); + + return 0; +} +late_initcall(print_ICs); + +/* MSI arch specific hooks */ +int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + return setup_msi_irqs_pic(dev, nvec, type); +} + +void arch_teardown_msi_irq(unsigned int irq) +{ + teardown_msi_irq_pic(irq); +} + +int hard_smp_processor_id(void) +{ + return read_pic_id(); +} + +void __setup_vector_irq(int cpu) +{ + __pic_setup_vector_irq(cpu); +} \ No newline at end of file diff --git a/arch/l/kernel/pmc/Makefile b/arch/l/kernel/pmc/Makefile new file mode 100644 index 000000000000..b47ed8908540 --- /dev/null +++ b/arch/l/kernel/pmc/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_L_PMC) := pmc.o +pmc-$(CONFIG_L_PMC) := pmc_drv.o pmc_hwmon.o pmc_e1cp.o +obj-$(CONFIG_S2_PMC) := pmc.o +pmc-$(CONFIG_S2_PMC) := pmc_drv.o pmc_hwmon.o pmc_s2.o diff --git a/arch/l/kernel/pmc/pmc.h b/arch/l/kernel/pmc/pmc.h new file mode 100644 index 000000000000..7b1a90688466 --- /dev/null +++ b/arch/l/kernel/pmc/pmc.h @@ -0,0 +1,196 @@ +#ifndef _PMC_H_ +#define _PMC_H_ + +#include + +/* FID's and BFS bypass */ +#define HB_BFS_PCI_CONF_REG 0x70 +#define HB_BFS_BYPASS_MASK 0x80 + +/* + * S2: Global storage for NBSR_NODE_CFG_INFO, same for all nodes, + * initialized in s2_pmc_init for + * E1CP: global for HB_BFS_PCI_CONF_REG, initialized in + * pmc_l_cpufreq_init throug pci read + */ +extern unsigned int bfs_bypass_val; + +enum PStateValRegs { + PMC_PSTATEVAL_REG0, /* 0 */ + PMC_PSTATEVAL_REG1, /* 1 */ + PMC_PSTATEVAL_REG2, /* 2 */ + PMC_PSTATEVAL_REG3, /* 3 */ + PMC_MAX_STATES, /* 4 */ +}; + +#define Fcpu(fbfs, mii_inv, nii) ((fbfs) * 16 / ((mii_inv) * (nii))) +#define Mii_inv(fid) ((((fid) & 0xc0) >> 6) ? \ + 1 << ((((fid) & 0xc0) >> 6) - 1) : 0) +#define Nii(fid) ((fid) & 0x3f) + +#ifdef CONFIG_E90S /* E90S */ +/* From R2000 (S2) */ + +/* We consider that 100 MHz is base freq for all R2000 boards */ +#define S2_BASE_FREQ 100 +#define S2_MAX_PSTATES (PMC_L_MAX_PSTATES + 1) +#define S2_MAX_AV_PSTATES 60 +#define S2_MAX_AV_ID (S2_MAX_AV_PSTATES-1) +#define S2_MAX_FREQ 2000 +#define S2_MIN_FREQ 200 + +#define S2_TRANSITION_LATENCY 6000 +#define PMC_TRANSITION_LATENCY S2_TRANSITION_LATENCY + +#define BASE_FREQ S2_BASE_FREQ +#define MAX_PSTATES S2_MAX_PSTATES +#define MAX_AV_PSTATES S2_MAX_AV_PSTATES +#define MAX_AV_ID S2_MAX_AV_ID +#define MAX_FREQ S2_MAX_FREQ +#define MIN_FREQ S2_MIN_FREQ + +/* return 0 if cpu in CL0 and 1 if cpu in CL1 */ +#define cpu_to_cluster(cpu) (((cpu) & 0x4) ? 1 : 0) +#define Fbfs(fref, cfgclksys) ((fref) * ((cfgclksys) + 10)) + +#else /* E2K */ + +/* From E1CP */ +/* PMC's bar is 2 in host bridge */ +#define E1CP_PMC_BAR 2 +#define E1CP_BASE_FREQ 100 +#define E1CP_MAX_PSTATES (PMC_L_MAX_PSTATES + 1) +#define E1CP_MAX_AV_PSTATES 60 +#define E1CP_MAX_AV_ID (E1CP_MAX_AV_PSTATES-1) +#define E1CP_MAX_FREQ 1100 +#define E1CP_MIN_FREQ 143 +#define E1CP_MAX_3D_CLK1X_FREQ 533 +#define E1CP_MIN_3D_CLK1X_FREQ 143 +#define E1CP_MAX_3D_CLKSH_FREQ 800 +#define E1CP_MIN_3D_CLKSH_FREQ 143 + +#define E1CP_TRANSITION_LATENCY 60000 + +/* Bypass FIDs */ +#define E1CP_BYPASS_FID_P0 0xe0 +#define E1CP_BYPASS_FID_P1 0x00 +#define E1CP_BYPASS_FID_P2 0x20 +#define E1CP_BYPASS_FID_P3 0x60 + +#define BASE_FREQ E1CP_BASE_FREQ +#define MAX_FREQ E1CP_MAX_FREQ +#define MIN_FREQ E1CP_MIN_FREQ +#define MAX_PSTATES E1CP_MAX_PSTATES +#define MAX_AV_PSTATES E1CP_MAX_AV_PSTATES +#define MAX_AV_ID E1CP_MAX_AV_ID +#define PMC_TRANSITION_LATENCY E1CP_TRANSITION_LATENCY + +#define Fbfs(fref, cfgclksys) (((fref) * (cfgclksys + 10)) / 2) + +/* cpufreq subsystem: */ +extern struct cpufreq_frequency_table pmc_l_3d_clkSh_freqs[E1CP_MAX_PSTATES]; +extern struct cpufreq_frequency_table pmc_l_3d_clk1x_freqs[E1CP_MAX_PSTATES]; +/* available frequencies */ +extern struct cpufreq_frequency_table + pmc_l_3d_clk1x_available_freqs[E1CP_MAX_AV_PSTATES]; +extern struct cpufreq_frequency_table + pmc_l_3d_clkSh_available_freqs[E1CP_MAX_AV_PSTATES]; +/* dvfs subsystem */ +extern struct regulator *vout_regulator; + +#endif /* CONFIG_E90S */ + + +/* Moortec temperature sensor values */ +#define PMC_MOORTEC_TEMP_VALID 0x1000 +#define PMC_MOORTEC_TEMP_VALUE_MASK 0xfff +#define PMC_MOORTEC_TEMP_K 1083 +#define PMC_MOORTEC_TEMP_VALUE_SHIFT 12 + +/* 3D core target registers flags */ +#define PMC_L_TARGET_CLK1X 1 +#define PMC_L_TARGET_CLKSH 2 + +/* PMC I2C master */ +#define PMC_I2C_REGS_BASE 0x1000 +/* From E1CP */ + +#ifdef CONFIG_E90S +typedef enum pmc_access_regs { + PMC_L_COVFID_STATUS_REG, /* 0 */ + PMC_L_P_STATE_CNTRL_REG, /* 1 */ + PMC_L_P_STATE_STATUS_REG, /* 2 */ + PMC_L_P_STATE_VALUE_0_REG, /* 3 */ + PMC_L_P_STATE_VALUE_1_REG, /* 4 */ + PMC_L_P_STATE_VALUE_2_REG, /* 5 */ + PMC_L_P_STATE_VALUE_3_REG, /* 6 */ +} pmc_reg; + +unsigned int s2_reg_to_addr(pmc_reg reg, unsigned int cpu); +void __iomem *__pmc_regs(int node); +unsigned s2_get_freq_mult(int cpu); +#else /* E2K */ +extern void __iomem *pmc_cbase; /*Global for e1cp, init in pmc_l_cpufreq_init*/ +void __iomem *__pmc_regs(int node); + +#endif /* CONFIG_E90S */ + +/* PMC common function */ +#if 0 +int pmc_l_cpufreq_get_state(unsigned int cpu); +void pmc_l_cpufreq_set_state(unsigned int state, unsigned int cpu); +int pmc_l_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation); +void pmc_l_cpufreq_update_state_reg(unsigned int state, + unsigned int fid, unsigned int cpu); +int pmc_l_cpufreq_verify_policy(struct cpufreq_policy *policy); +unsigned int pmc_l_cpufreq_get(unsigned int cpu); +unsigned int pmc_l_cpufreq_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq); +#endif +int pmc_l_cpufreq_init(struct cpufreq_policy *policy); +int pmc_l_calc_freq_tables(struct cpufreq_policy *policy, + unsigned int cfgclksys); + +/* hwmon */ +int pmc_hwmon_init(void); +void pmc_hwmon_exit(void); + +/* legacy sensors interface */ +int pmc_temp_sensors_init(void); +void pmc_temp_sensors_exit(void); + +#ifdef CONFIG_E90S +#define pmc_reg_readl(__reg, __cpu) \ +({ \ + int __node = cpu_to_node(__cpu); \ + void __iomem *__pmc_cbase = __pmc_regs(__node); \ + unsigned int __reg_addr = s2_reg_to_addr(__reg, __cpu); \ + unsigned int __ret = __raw_readl(__pmc_cbase + __reg_addr); \ + __ret; \ +}) +#define pmc_reg_writel(__val, __reg, __cpu) do { \ + int __node = cpu_to_node(__cpu); \ + void __iomem *__pmc_cbase = __pmc_regs(__node); \ + unsigned int __reg_addr = s2_reg_to_addr(__reg, __cpu); \ + __raw_writel(__val, __pmc_cbase + __reg_addr); \ +} while (0) +#else /* E2K */ +#define pmc_reg_readl(__reg, __cpu) __raw_readl(pmc_cbase + __reg); +#define pmc_reg_writel(__val, __reg, __cpu) __raw_writel(__val, pmc_cbase + __reg); +#endif /* CONFIG_E90S */ + +/* cpufreq subsystem: */ +extern struct cpufreq_frequency_table pmc_l_freqs[MAX_PSTATES]; +/* available frequencies */ +extern struct cpufreq_frequency_table + pmc_l_available_freqs[MAX_AV_PSTATES]; + +#ifndef CONFIG_E90S +extern struct cpufreq_frequency_table + pmc_l_3d_clk1x_available_freqs[E1CP_MAX_AV_PSTATES]; +extern struct cpufreq_frequency_table + pmc_l_3d_clkSh_available_freqs[E1CP_MAX_AV_PSTATES]; +#endif /* !CONFIG_E90S */ + +#endif /* _PMC_H_ */ diff --git a/arch/l/kernel/pmc/pmc_drv.c b/arch/l/kernel/pmc/pmc_drv.c new file mode 100644 index 000000000000..5ce49f7210e2 --- /dev/null +++ b/arch/l/kernel/pmc/pmc_drv.c @@ -0,0 +1,684 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_E90S +#include +#endif + +#include "pmc.h" + +#undef DEBUG_PMC +#undef DebugPMC +#define DEBUG_PMC 0 +#if DEBUG_PMC +#define DebugPMC(...) pr_debug(##__VA_ARGS__) +#else /* DEBUG_PMC */ +#define DebugPMC(...) do {} while (0) +#endif /* DEBUG_PMC */ + + +/*******/ +void __iomem *pmc_cbase; /*Global for e1cp*/ + +#ifndef CONFIG_E90S /* E2K */ +struct l_pmc l_pmc[MAX_NUM_PMCS]; +#endif + +#ifdef CONFIG_E90S +void __iomem *__pmc_regs(int node) +{ + node = node >= 0 ? node : 0; + return (void *)(BASE_NODE0 + + (BASE_NODE1 - BASE_NODE0) * node + 0x9000); +} +#else /* E2K */ +void __iomem *__pmc_regs(int node) +{ + return pmc_cbase; +} +#endif /* CONFIG_E90S */ + +#ifdef CONFIG_CPU_FREQ +static int pmc_l_cpufreq_exit(struct cpufreq_policy *policy); + +#ifndef CONFIG_E90S /* E2K */ +/* cpufreq subsystem: */ +struct cpufreq_frequency_table pmc_l_3d_clkSh_freqs[E1CP_MAX_PSTATES]; +struct cpufreq_frequency_table pmc_l_3d_clk1x_freqs[E1CP_MAX_PSTATES]; +/* available frequencies */ +struct cpufreq_frequency_table + pmc_l_3d_clk1x_available_freqs[E1CP_MAX_AV_PSTATES]; +struct cpufreq_frequency_table + pmc_l_3d_clkSh_available_freqs[E1CP_MAX_AV_PSTATES]; +#endif + +int pmc_l_calc_freq_tables(struct cpufreq_policy *policy, + unsigned int cfgclksys) +{ + unsigned int Fbfs, Fboot, Fcpu; + unsigned int boots_fid, fid; + unsigned int Mii_inv, Nii; + int idx = 0; +#ifndef CONFIG_E90S + struct device *cpu_dev; + struct dev_pm_opp *opp; + int idx_3d_clk1x = 0; + int idx_3d_clkSh = 0; + + cpu_dev = get_cpu_device(policy->cpu); +#endif /* CONFIG_E90S */ + /* P-state 0 is taken from boot's jumper. */ + Fbfs = Fbfs(BASE_FREQ, cfgclksys); + boots_fid = pmc_reg_readl(PMC_L_P_STATE_VALUE_0_REG, policy->cpu); + Mii_inv = Mii_inv(boots_fid); + Nii = Nii(boots_fid); + Fboot = Fcpu(Fbfs, Mii_inv, Nii); + + pmc_l_freqs[PMC_PSTATEVAL_REG0].frequency = Fboot * 1000; + pmc_l_freqs[PMC_PSTATEVAL_REG0].driver_data = boots_fid; + pr_debug("set pmc_l_freqs[PMC_PSTATEVAL_REG0]: FID 0x%x FREQ %u Mhz.\n", + pmc_l_freqs[PMC_PSTATEVAL_REG0].driver_data, + pmc_l_freqs[PMC_PSTATEVAL_REG0].frequency / 1000); + + WARN_ON(Fboot > MAX_FREQ); + WARN_ON(Fboot < MIN_FREQ); + + for (fid = 0; fid <= 0xff; fid++) { + Mii_inv = Mii_inv(fid); + Nii = Nii(fid); + + + if (Mii_inv == 0) + continue; + if (Nii < 8 || Nii > 32) + continue; + + Fcpu = Fcpu(Fbfs, Mii_inv, Nii); + +#ifndef CONFIG_E90S + if (dev_pm_opp_get_opp_count(cpu_dev) > 0) { + opp = dev_pm_opp_find_freq_exact(cpu_dev, Fcpu * 1000000, true); + if (IS_ERR(opp)) { + continue; + } + } +#endif + + if ( + Fcpu <= Fboot && Fcpu >= MIN_FREQ + && ((idx > 0 && (Fcpu * 1000) < + pmc_l_available_freqs[idx-1].frequency) || idx == 0) + && idx < MAX_AV_ID) { + pmc_l_available_freqs[idx].driver_data = fid; + pmc_l_available_freqs[idx].frequency = Fcpu * 1000; + DebugPMC("Available freq [%d]: FID %d FREQ %u MHz\n", + idx, + pmc_l_available_freqs[idx].driver_data, + pmc_l_available_freqs[idx].frequency / 1000); + idx++; + } + +#ifndef CONFIG_E90S /* E1CP GPU */ + if ( + Fcpu <= E1CP_MAX_3D_CLK1X_FREQ && Fcpu >= E1CP_MIN_3D_CLK1X_FREQ + && ((idx_3d_clk1x > 0 && (Fcpu * 1000) < + pmc_l_3d_clk1x_available_freqs[idx_3d_clk1x-1].frequency) || idx_3d_clk1x == 0) + && idx_3d_clk1x < E1CP_MAX_AV_ID) { + pmc_l_3d_clk1x_available_freqs[idx_3d_clk1x].driver_data = fid; + pmc_l_3d_clk1x_available_freqs[idx_3d_clk1x].frequency = + Fcpu * 1000; + idx_3d_clk1x++; + } + + if ( + Fcpu <= E1CP_MAX_3D_CLKSH_FREQ && Fcpu >= E1CP_MIN_3D_CLKSH_FREQ + && ((idx_3d_clkSh > 0 && (Fcpu * 1000) < + pmc_l_3d_clkSh_available_freqs[idx_3d_clkSh-1].frequency) || idx_3d_clkSh == 0) + && idx_3d_clkSh < E1CP_MAX_AV_ID) { + pmc_l_3d_clkSh_available_freqs[idx_3d_clkSh].driver_data = fid; + pmc_l_3d_clkSh_available_freqs[idx_3d_clkSh].frequency = + Fcpu * 1000; + idx_3d_clkSh++; + } +#endif /* CONFIG_E90S */ + } + + pmc_l_available_freqs[idx].driver_data = 0; + pmc_l_available_freqs[idx].frequency = CPUFREQ_TABLE_END; +#ifndef CONFIG_E90S /* E1CP GPU */ + pmc_l_3d_clk1x_available_freqs[idx_3d_clk1x].driver_data = 0; + pmc_l_3d_clk1x_available_freqs[idx_3d_clk1x].frequency = + CPUFREQ_TABLE_END; + pmc_l_3d_clkSh_available_freqs[idx_3d_clkSh].driver_data = 0; + pmc_l_3d_clkSh_available_freqs[idx_3d_clkSh].frequency = + CPUFREQ_TABLE_END; +#endif /* CONFIG_E90S */ + + BUG_ON(idx == 0); + + pmc_l_freqs[PMC_PSTATEVAL_REG1].driver_data = + pmc_l_available_freqs[idx / 3].driver_data; + pmc_l_freqs[PMC_PSTATEVAL_REG1].frequency = + pmc_l_available_freqs[idx / 3].frequency; + pr_debug("set pmc_l_freqs[PMC_PSTATEVAL_REG1]: [idx %d] FID 0x%x FREQ %u KHz.\n", + idx / 3, + pmc_l_freqs[PMC_PSTATEVAL_REG1].driver_data, + pmc_l_freqs[PMC_PSTATEVAL_REG1].frequency); + + pmc_l_freqs[PMC_PSTATEVAL_REG2].driver_data = + pmc_l_available_freqs[(idx * 2) / 3].driver_data; + pmc_l_freqs[PMC_PSTATEVAL_REG2].frequency = + pmc_l_available_freqs[(idx * 2) / 3].frequency; + pr_debug("set pmc_l_freqs[PMC_PSTATEVAL_REG2]: [idx %d] FID 0x%x FREQ %u KHz.\n", + (idx * 2) / 3, + pmc_l_freqs[PMC_PSTATEVAL_REG2].driver_data, + pmc_l_freqs[PMC_PSTATEVAL_REG2].frequency); + + pmc_l_freqs[PMC_PSTATEVAL_REG3].driver_data = + pmc_l_available_freqs[idx - 1].driver_data; + pmc_l_freqs[PMC_PSTATEVAL_REG3].frequency = + pmc_l_available_freqs[idx - 1].frequency; + pr_debug("set pmc_l_freqs[PMC_PSTATEVAL_REG3]: [idx %d] FID 0x%x FREQ %u KHz.\n", + idx - 1, + pmc_l_freqs[PMC_PSTATEVAL_REG3].driver_data, + pmc_l_freqs[PMC_PSTATEVAL_REG3].frequency); + + pmc_l_freqs[MAX_PSTATES - 1].driver_data = 0; + pmc_l_freqs[MAX_PSTATES - 1].frequency = CPUFREQ_TABLE_END; + +#ifndef CONFIG_E90S /* E1CP GPU */ + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG0].driver_data = + pmc_l_3d_clk1x_available_freqs[0].driver_data; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG0].frequency = + pmc_l_3d_clk1x_available_freqs[0].frequency; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG1].driver_data = + pmc_l_3d_clk1x_available_freqs[(idx_3d_clk1x / 3)].driver_data; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG1].frequency = + pmc_l_3d_clk1x_available_freqs[(idx_3d_clk1x / 3)].frequency; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG2].driver_data = + pmc_l_3d_clk1x_available_freqs[(idx_3d_clk1x / 3) * 2].driver_data; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG2].frequency = + pmc_l_3d_clk1x_available_freqs[(idx_3d_clk1x / 3) * 2].frequency; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG3].driver_data = + pmc_l_3d_clk1x_available_freqs[(idx_3d_clk1x - 1)].driver_data; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG3].frequency = + pmc_l_3d_clk1x_available_freqs[(idx_3d_clk1x - 1)].frequency; + pmc_l_3d_clk1x_freqs[E1CP_MAX_PSTATES - 1].driver_data = 0; + pmc_l_3d_clk1x_freqs[E1CP_MAX_PSTATES - 1].frequency = CPUFREQ_TABLE_END; + + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG0].driver_data = + pmc_l_3d_clkSh_available_freqs[0].driver_data; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG0].frequency = + pmc_l_3d_clkSh_available_freqs[0].frequency; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG1].driver_data = + pmc_l_3d_clkSh_available_freqs[(idx_3d_clkSh / 3)].driver_data; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG1].frequency = + pmc_l_3d_clkSh_available_freqs[(idx_3d_clkSh / 3)].frequency; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG2].driver_data = + pmc_l_3d_clkSh_available_freqs[(idx_3d_clkSh / 3) * 2].driver_data; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG2].frequency = + pmc_l_3d_clkSh_available_freqs[(idx_3d_clkSh / 3) * 2].frequency; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG3].driver_data = + pmc_l_3d_clkSh_available_freqs[(idx_3d_clkSh - 1)].driver_data; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG3].frequency = + pmc_l_3d_clkSh_available_freqs[(idx_3d_clkSh - 1)].frequency; + pmc_l_3d_clkSh_freqs[E1CP_MAX_PSTATES - 1].driver_data = 0; + pmc_l_3d_clkSh_freqs[E1CP_MAX_PSTATES - 1].frequency = CPUFREQ_TABLE_END; +#endif /* CONFIG_E90S */ + return 0; +} + + +static int pmc_l_cpufreq_get_state(unsigned int cpu) +{ + unsigned int state = 0; + + state = pmc_reg_readl(PMC_L_P_STATE_CNTRL_REG, cpu); + DebugPMC("PMC_L_P_STATE_CNTRL_REG=0x%x\n", state); + state >>= PMC_L_P_STATE_CNTRL_SHIFT; + state &= PMC_L_P_STATE_CNTRL_MASK; + DebugPMC("state=0x%x\n", state); + + return state; +} + +static void pmc_l_cpufreq_set_state(unsigned int state, unsigned int cpu) +{ + unsigned int st = (state << PMC_L_P_STATE_CNTRL_SHIFT) & + PMC_L_P_STATE_CNTRL_MASK; + DebugPMC("state=0x%x st=0x%x\n", state, st); + pmc_reg_writel(st, PMC_L_P_STATE_CNTRL_REG, cpu); +} +static void pmc_l_cpufreq_update_state_reg(unsigned int state, + unsigned int fid, unsigned int cpu) +{ + if (state == PMC_PSTATEVAL_REG0) { + pmc_reg_writel(fid, PMC_L_P_STATE_VALUE_0_REG, cpu); + } else if (state == PMC_PSTATEVAL_REG1) { + pmc_reg_writel(fid, PMC_L_P_STATE_VALUE_1_REG, cpu); + } else if (state == PMC_PSTATEVAL_REG2) { + pmc_reg_writel(fid, PMC_L_P_STATE_VALUE_2_REG, cpu); + } else if (state == PMC_PSTATEVAL_REG3) { + pmc_reg_writel(fid, PMC_L_P_STATE_VALUE_3_REG, cpu); + } +} + + +static unsigned int pmc_l_cpufreq_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + unsigned int resolved_freq; + int index; + + index = cpufreq_frequency_table_target(policy, target_freq, + CPUFREQ_RELATION_L); + if (index < 0) { + pr_err("%s: returned frequency index is %d.\n", + __func__, index); + return index; + } + + resolved_freq = policy->freq_table[index].frequency; + policy->cached_target_freq = resolved_freq; + + return resolved_freq; +} + +static unsigned int pmc_l_cpufreq_get(unsigned int cpu) +{ + return pmc_l_freqs[pmc_l_cpufreq_get_state(cpu)].frequency; +} + +/** + * pmc_l_cpufreq_verify_policy - verifies a new CPUFreq policy + * @policy: new policy + * + * Limit must be within low_freq and high_freq, with at least + * one border included. + */ +static int pmc_l_cpufreq_verify_policy(struct cpufreq_policy_data *policy) +{ + return cpufreq_frequency_table_verify(policy, pmc_l_freqs); +} + + +/** + * pmc_l_cpufreq_set_target - set a new CPUFreq policy + * @policy: new policy + * @target_freq: new freq + * @relation: + * + * Sets a new CPUFreq policy/freq. + */ +static int pmc_l_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + int ii = 0; + unsigned int i; + unsigned int newstate = 0; + int ffound = 0; + struct cpufreq_freqs freqs; +#ifndef CONFIG_E90S /* E2K */ + unsigned long volt = 0; + struct dev_pm_opp *opp; + int ret = 0; + struct device *cpu_dev; +#endif + + /* Select fequency from pmc_l_available_freqs only when bfs is disabled. + * In other case (bfs) - use only fixed values of FIDs, determined + * in pmc_l_freq table during init. + */ + if (bfs_bypass_val & HB_BFS_BYPASS_MASK) { /* use pmc_l_freqs here */ + if (policy->freq_table != pmc_l_freqs) { /* switch tables */ + + pr_err("%s: policy->freq_table is supposed " + "to be set properly already. Has bfs_bypass_val" + " changed since it was initialized?\n", + __func__); + + /* Use cpufreq_generic_init() here: it allows + * to switch policy's frequency table + * and transition latency at the same time + * using cpufreq interface. We don't care about + * setting all cpus in the policy's mask as we + * check it's presence later with cpu_online() + * anyway */ + cpufreq_generic_init(policy, pmc_l_freqs, + PMC_TRANSITION_LATENCY); + } + + newstate = cpufreq_frequency_table_target(policy, + target_freq, relation); + if (newstate < 0) + return newstate; + + freqs.new = pmc_l_freqs[newstate].frequency; + } else { /* use pmc_l_available_freqs table here */ + if (policy->freq_table != pmc_l_available_freqs) { /* switch */ + + pr_err("%s: policy->freq_table is supposed " + "to be set properly already. Has bfs_bypass_val" + " changed since it was initialized?\n", + __func__); + + /* Use cpufreq_generic_init() here: see above */ + cpufreq_generic_init(policy, pmc_l_available_freqs, + PMC_TRANSITION_LATENCY); + } + + newstate = cpufreq_frequency_table_target(policy, + target_freq, relation); + if (newstate < 0) + return newstate; + + freqs.new = pmc_l_available_freqs[newstate].frequency; + } + + freqs.old = pmc_l_freqs[pmc_l_cpufreq_get_state(policy->cpu)].frequency; + + if (freqs.old == freqs.new) + return 0; + + /* + * 0) If bfs - skip 1) and 2) as newstate is valid. + */ + if (!(bfs_bypass_val & HB_BFS_BYPASS_MASK)) { + + /* + * 1) Check if wanted frequency is in pmc_l_freqs: if so select + * index of state and update newstate. + */ + + for (ii = 0; ii < PMC_MAX_STATES; ii++) { + if (pmc_l_freqs[ii].frequency == freqs.new) { + newstate = ii; + ffound = 1; + break; + } + } + + /* + * 2) If frequency is not presented - userspace governor probably + * is used: update pmc_l_freqs table and PStateValueX reg, + * update newstate by index of updated entry. + */ + if (!ffound) { + unsigned int fid; + unsigned int idx; + unsigned int delta1; + unsigned int delta2; + + if (freqs.new > + pmc_l_freqs[PMC_PSTATEVAL_REG0].frequency) { + idx = PMC_PSTATEVAL_REG0; + } else if (freqs.new < + pmc_l_freqs[PMC_PSTATEVAL_REG3].frequency) { + idx = PMC_PSTATEVAL_REG3; + } else if (freqs.new < + pmc_l_freqs[PMC_PSTATEVAL_REG0].frequency && + freqs.new > + pmc_l_freqs[PMC_PSTATEVAL_REG1].frequency) { + idx = PMC_PSTATEVAL_REG1; + } else if (freqs.new < + pmc_l_freqs[PMC_PSTATEVAL_REG2].frequency && + freqs.new > + pmc_l_freqs[PMC_PSTATEVAL_REG3].frequency) { + idx = PMC_PSTATEVAL_REG2; + } else { + delta1 = freqs.new - + pmc_l_freqs[PMC_PSTATEVAL_REG2].frequency; + delta2 = + pmc_l_freqs[PMC_PSTATEVAL_REG1].frequency + - freqs.new; + + if (delta1 > delta2) { + idx = PMC_PSTATEVAL_REG1; + } else { + idx = PMC_PSTATEVAL_REG2; + } + } + + pmc_l_freqs[idx].frequency = freqs.new; + fid = pmc_l_available_freqs[newstate].driver_data; + pmc_l_freqs[idx].driver_data = fid; + newstate = idx; + pmc_l_cpufreq_update_state_reg(newstate, fid, policy->cpu); + +#if 0 + /* TODO: This function is non-exported */ + cpufreq_frequency_table_cpuinfo(policy, pmc_l_freqs); +#endif + } + } + + +#ifndef CONFIG_E90S /* E2K */ + if (vout_regulator) { + cpu_dev = get_cpu_device(policy->cpu); + + opp = dev_pm_opp_find_freq_exact( + cpu_dev, freqs.new * 1000, true); + + if (IS_ERR(opp)) { + opp = NULL; + } else { + volt = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + } + } + + if ((volt > 0) && (freqs.new > freqs.old)) { + ret = regulator_set_voltage_tol( + vout_regulator, volt, 0); + } +#endif + + /* One PMC, many CPUs */ + for_each_cpu(i, policy->cpus) { + if (!cpu_online(i)) + continue; + cpufreq_freq_transition_begin(policy, &freqs); + pmc_l_cpufreq_set_state(newstate, policy->cpu); /* Can't catch a failure */ + cpufreq_freq_transition_end(policy, &freqs, 0); /* here */ + } + +#ifndef CONFIG_E90S /* E2K */ + if ((volt > 0) && (freqs.new < freqs.old)) { + ret = regulator_set_voltage_tol( + vout_regulator, volt, 0); + } +#endif + + return 0; +} + + +static struct freq_attr *pmc_l_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +struct cpufreq_driver pmc_l_cpufreq_driver = { + .init = pmc_l_cpufreq_init, + .verify = pmc_l_cpufreq_verify_policy, + .target = pmc_l_cpufreq_set_target, + .resolve_freq = pmc_l_cpufreq_resolve_freq, + .exit = pmc_l_cpufreq_exit, + .get = pmc_l_cpufreq_get, + .name = "pmc_l_cpufreq", + .attr = pmc_l_cpufreq_attr, +}; + +#endif /* CONFIG_CPU_FREQ */ + + +#ifndef CONFIG_E90S +static int get_pmc_cbase(void) +{ + int result = -ENODEV; + struct pci_dev *pdev = NULL; + unsigned long covfid_status; + + struct resource r[] = { + { + .flags = IORESOURCE_MEM, + .start = PMC_I2C_REGS_BASE, + .end = PMC_I2C_REGS_BASE + 0x20 - 1 + }, + }; + struct l_i2c2_platform_data pmc_i2c = { + .bus_nr = -1, + .base_freq_hz = 100 * 1000 * 1000, + .desired_freq_hz = 100 * 1000, + }; + + pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_HB, + pdev); + if (!pdev) + return result; + + result = pci_enable_device_mem(pdev); + + if (result) { + pci_dev_put(pdev); + pr_err("pmc_init:" + " failed to enable pci mem device\n"); + return result; + } + + l_pmc[0].cntrl_base = pci_iomap(pdev, E1CP_PMC_BAR, 0); + l_pmc[0].pdev = pdev; + + pr_err("pmc_init: l_pmc[0]=%p", &l_pmc[0]); + pr_err("pmc_init: l_pmc[0].cntrl_base=%p\n", + l_pmc[0].cntrl_base); + + pmc_cbase = l_pmc[0].cntrl_base; + + covfid_status = __raw_readl(pmc_cbase + PMC_L_COVFID_STATUS_REG); + pr_err("pmc_init: covfid_status_lo = %lx\n", + covfid_status); + + /* Initialize I2C master */ + r[0].start += pci_resource_start(pdev, E1CP_PMC_BAR); + r[0].end += pci_resource_start(pdev, E1CP_PMC_BAR); + + l_pmc[0].i2c_chan = + platform_device_register_resndata(&l_pmc[0].pdev->dev, + "pmc-i2c", PLATFORM_DEVID_AUTO, r, + ARRAY_SIZE(r), + &pmc_i2c, sizeof(pmc_i2c)); + if (l_pmc[0].i2c_chan == NULL) { + pr_err("pmc_init:" + " failed to initialize pmc_i2c master\n"); + } + + return result; +} +#endif + +static int __init pmc_init(void) +{ + int res = 0; +#ifdef CONFIG_E90S + int node; + unsigned int node_cfg_info[MAX_NUMNODES] = {0}; +#endif + +#ifdef CONFIG_E90S + if (e90s_get_cpu_type() != E90S_CPU_R2000) { + return 0; + } + /* Check configurations of nodes and exit if it differs */ + for_each_online_node(node) { + node_cfg_info[node] = nbsr_readl(NBSR_NODE_CFG_INFO, node); + DebugPMC("node=%d cfg = 0x%x\n", node, node_cfg_info[node]); + if (node > 0 && node_cfg_info[node] != node_cfg_info[node-1]) { + DebugPMC("s2_pmc_init error: \ + (node_cfg_info[%d] = 0x%x) != \ + (node_cfg_info[%d] = 0x%x)", \ + node, node_cfg_info[node], \ + node-1, node_cfg_info[node-1]); + return -ENODEV; + } + } + + bfs_bypass_val = node_cfg_info[0]; +#else + res = get_pmc_cbase(); + if (res) { + pr_err("PMC: failed to get pmc_cbase err = %d\n", res); + return res; + } +#endif + +#ifdef CONFIG_CPU_FREQ + res = cpufreq_register_driver(&pmc_l_cpufreq_driver); + if (res) { + pr_err("PMC: failed to register cpufreq err = %d\n", res); + return res; + } +#endif + res = pmc_temp_sensors_init(); + if (res) { + pr_err("PMC: failed to init temp sensors err = %d\n", res); + return res; + } + res = pmc_hwmon_init(); + if (res) { + pr_err("PMC: failed to hwmon err = %d\n", res); + return res; + } + return res; +} + +#ifdef CONFIG_CPU_FREQ +static int pmc_l_cpufreq_exit(struct cpufreq_policy *policy) +{ +#ifndef CONFIG_E90S /* E2K */ + pmc_cbase = NULL; +#endif + return 0; +} +#endif + +static void __exit pmc_exit(void) +{ +#ifdef CONFIG_E90S + if (e90s_get_cpu_type() != E90S_CPU_R2000) { + return; + } +#endif + pmc_hwmon_exit(); + +#ifdef CONFIG_CPU_FREQ + pmc_temp_sensors_exit(); + + cpufreq_unregister_driver(&pmc_l_cpufreq_driver); + +#ifndef CONFIG_E90S /* E2K */ + regulator_put(vout_regulator); +#endif +#endif +} + +module_init(pmc_init); +module_exit(pmc_exit); + +MODULE_AUTHOR("Evgeny Kravtsunov"); +MODULE_DESCRIPTION("PMC driver"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: max20730"); diff --git a/arch/l/kernel/pmc/pmc_e1cp.c b/arch/l/kernel/pmc/pmc_e1cp.c new file mode 100644 index 000000000000..7f93b60d287d --- /dev/null +++ b/arch/l/kernel/pmc/pmc_e1cp.c @@ -0,0 +1,487 @@ +/* + * linux/arch/l/kernel/pmc.c + * + * Copyright (C) 2013 Evgeny Kravtsunov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * PMC (Power Management Controller) for e2k (Processor-2) + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CPU_FREQ + +#include +#include +#ifdef CONFIG_E90S +#include +#endif + +#include "pmc.h" + +#undef DEBUG_PMC +#undef DebugPMC +#define DEBUG_PMC 0 +#if DEBUG_PMC +#define DebugPMC(...) pr_debug(##__VA_ARGS__) +#else /* DEBUG_PMC */ +#define DebugPMC(...) do {} while (0) +#endif /* DEBUG_PMC */ + + +unsigned int bfs_bypass_val; +static unsigned int gpu_scale; + +/* global cpufreq subsystem: */ +struct cpufreq_frequency_table pmc_l_freqs[MAX_PSTATES]; +/* global available frequencies */ +struct cpufreq_frequency_table + pmc_l_available_freqs[MAX_AV_PSTATES]; + +struct regulator *vout_regulator; + +static int pmc_l_gpufreq_get_state(void) +{ + unsigned int state = 0; + + state = __raw_readl(pmc_cbase + PMC_L_P_STATE_3D_CNTRL_REG); + + state >>= PMC_L_P_STATE_3D_CNTRL_SHIFT; + state &= PMC_L_P_STATE_3D_CNTRL_MASK; + + return state; +} + +int pmc_l_gpufreq_get_scale(void) +{ + return gpu_scale; +} +EXPORT_SYMBOL(pmc_l_gpufreq_get_scale); + +int pmc_l_gpufreq_get_frequency(void) +{ + int state = pmc_l_gpufreq_get_state(); + return pmc_l_3d_clk1x_freqs[state].frequency; +} +EXPORT_SYMBOL(pmc_l_gpufreq_get_frequency); + +void pmc_l_gpufreq_set_state(unsigned int state) +{ + unsigned int st; + + st = (state << PMC_L_P_STATE_3D_CNTRL_SHIFT) & + PMC_L_P_STATE_3D_CNTRL_MASK; + + __raw_writel(st, pmc_cbase + PMC_L_P_STATE_3D_CNTRL_REG); +} + +static void pmc_l_gpufreq_update_state_reg(unsigned int state, + unsigned int fid_clk1x, + unsigned int fid_clkSh, + unsigned int target) +{ +/* Here we want to write fid to both FID1 and FID2 */ + if (target & PMC_L_TARGET_CLK1X) + switch (state) { + case PMC_PSTATEVAL_REG0: + __raw_writel(fid_clk1x, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_0_REG); + break; + case PMC_PSTATEVAL_REG1: + __raw_writel(fid_clk1x, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_1_REG); + break; + case PMC_PSTATEVAL_REG2: + __raw_writel(fid_clk1x, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_2_REG); + break; + case PMC_PSTATEVAL_REG3: + __raw_writel(fid_clk1x, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_3_REG); + break; + } + if (target & PMC_L_TARGET_CLKSH) + switch (state) { + case PMC_PSTATEVAL_REG0: + __raw_writel(fid_clkSh, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_0_REG+1); + break; + case PMC_PSTATEVAL_REG1: + __raw_writel(fid_clkSh, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_1_REG+1); + break; + case PMC_PSTATEVAL_REG2: + __raw_writel(fid_clkSh, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_2_REG+1); + break; + case PMC_PSTATEVAL_REG3: + __raw_writel(fid_clkSh, pmc_cbase + + PMC_L_P_STATE_3D_VALUE_3_REG+1); + break; + } +} + +static int pmc_l_gpufreq_set_target(unsigned int target_clk1x_freq, + unsigned int target_clkSh_freq, + unsigned int relation) +{ + int ii = 0, j; + unsigned int target_freq, transition_latency; + unsigned int newstate, clk1x_newstate, clkSh_newstate; + int ffound, clk1x_ffound, clkSh_ffound; + struct cpufreq_frequency_table *target_table, *available_freqs; + struct cpufreq_freqs freqs, clk1x_freqs = {}, clkSh_freqs; + struct cpufreq_policy policy; + + newstate = 0; clk1x_newstate = 0; clkSh_newstate = 0; + ffound = 0; clk1x_ffound = 0; clkSh_ffound = 0; + policy.cpu = 0; /* This value is used only for debug output + in cpufreq_frequency_table_target(). So it's not so awful + if we just don't care. */ + policy.min = E1CP_MIN_FREQ * 1000; + policy.max = E1CP_MAX_FREQ * 1000; + + /* Select fequency from pmc_l_available_freqs only when bfs is disabled. + * In other case (bfs) - use only fixed values of FIDs, determined + * in pmc_l_3d_freq table during init. + */ + + for (j = 0; j < 2; j++) { + if (!j) { + target_table = pmc_l_3d_clk1x_freqs; + available_freqs = pmc_l_3d_clk1x_available_freqs; + target_freq = target_clk1x_freq; + transition_latency = E1CP_TRANSITION_LATENCY; + } else { + target_table = pmc_l_3d_clkSh_freqs; + available_freqs = pmc_l_3d_clkSh_available_freqs; + target_freq = target_clkSh_freq; + transition_latency = E1CP_TRANSITION_LATENCY; + } + + if (bfs_bypass_val & HB_BFS_BYPASS_MASK) { /* target_table */ + + policy.freq_table = target_table; + policy.cpuinfo.transition_latency = transition_latency; + newstate = cpufreq_frequency_table_target(&policy, + target_freq, relation); + if (newstate < 0) + return newstate; + + freqs.new = target_table[newstate].frequency; + } else { /* available_freqs table */ + + policy.freq_table = available_freqs; + policy.cpuinfo.transition_latency = transition_latency; + newstate = cpufreq_frequency_table_target(&policy, + target_freq, relation); + if (newstate < 0) + return newstate; + + freqs.new = available_freqs[newstate].frequency; + } + + freqs.old = target_table[pmc_l_gpufreq_get_state()].frequency; + + /* + * 0) If bfs - skip 1) and 2) as newstate is valid. + */ + if (!(bfs_bypass_val & HB_BFS_BYPASS_MASK)) { + + /* + * 1) Check if wanted frequency is in target_table: if so select + * index of state and update newstate. + */ + + for (ii = 0; ii < PMC_MAX_STATES; ii++) { + if (target_table[ii].frequency == freqs.new) { + newstate = ii; + ffound = 1; + break; + } + } + } + + if (!j) { + clk1x_freqs.old = freqs.old; + clk1x_freqs.new = freqs.new; + clk1x_ffound = ffound; + clk1x_newstate = newstate; + } else { + clkSh_freqs.old = freqs.old; + clkSh_freqs.new = freqs.new; + clkSh_ffound = ffound; + clkSh_newstate = newstate; + } + } + + if (clk1x_freqs.old == clk1x_freqs.new && + clkSh_freqs.old == clkSh_freqs.new) + return 0; + + /* + * 2) If frequency is not presented - userspace governor probably + * is used: update target_table table and PStateValueX reg, + * update newstate by index of updated entry. + */ + if (!(clk1x_ffound || clkSh_ffound)) { + unsigned int clk1x_fid; + unsigned int clkSh_fid; + unsigned int idx; + unsigned int delta1; + unsigned int delta2; + + target_table = pmc_l_3d_clk1x_freqs; + freqs.new = clk1x_freqs.new; + freqs.old = clk1x_freqs.old; + + if (freqs.new > + target_table[PMC_PSTATEVAL_REG0].frequency) { + idx = PMC_PSTATEVAL_REG0; + } else if (freqs.new < + target_table[PMC_PSTATEVAL_REG3].frequency) { + idx = PMC_PSTATEVAL_REG3; + } else if (freqs.new < + target_table[PMC_PSTATEVAL_REG0].frequency && + freqs.new > + target_table[PMC_PSTATEVAL_REG1].frequency) { + idx = PMC_PSTATEVAL_REG1; + } else if (freqs.new < + target_table[PMC_PSTATEVAL_REG2].frequency && + freqs.new > + target_table[PMC_PSTATEVAL_REG3].frequency) { + idx = PMC_PSTATEVAL_REG2; + } else { + delta1 = freqs.new - + target_table[PMC_PSTATEVAL_REG2].frequency; + delta2 = + target_table[PMC_PSTATEVAL_REG1].frequency + - freqs.new; + + if (delta1 > delta2) { + idx = PMC_PSTATEVAL_REG1; + } else { + idx = PMC_PSTATEVAL_REG2; + } + } + + target_table[idx].frequency = freqs.new; + clk1x_fid = pmc_l_3d_clk1x_available_freqs[clk1x_newstate].driver_data; + clkSh_fid = pmc_l_3d_clkSh_available_freqs[clkSh_newstate].driver_data; + newstate = idx; + pmc_l_gpufreq_update_state_reg(newstate, clk1x_fid, + clkSh_fid, PMC_L_TARGET_CLK1X | PMC_L_TARGET_CLKSH); + } else { + unsigned int clk1x_fid; + unsigned int clkSh_fid; + if (clkSh_ffound) { + newstate = clkSh_newstate; + if (!clk1x_ffound) + clk1x_fid = pmc_l_3d_clk1x_available_freqs[clk1x_newstate].driver_data; + else + clk1x_fid = pmc_l_3d_clk1x_freqs[clk1x_newstate].driver_data; + pmc_l_gpufreq_update_state_reg(newstate, clk1x_fid, 0, PMC_L_TARGET_CLK1X); + } else { + newstate = clk1x_newstate; + clkSh_fid = pmc_l_3d_clkSh_available_freqs[clkSh_newstate].driver_data; + pmc_l_gpufreq_update_state_reg(newstate, 0, clkSh_fid, PMC_L_TARGET_CLKSH); + } + } + /* should we notify anyone here about this trainsition? */ + pmc_l_gpufreq_set_state(newstate); + return 0; +} + +/* pmc_l_gpufreq_set_scale - set GPU frequency according to @scale + * @scale - a real number <1 - 64> + */ +int pmc_l_gpufreq_set_scale(unsigned char scale) +{ + unsigned int target_clk1x_freq, target_clkSh_freq; + gpu_scale = scale; + /* A convertation from @scale to an actual (approximate) + * frequency. + */ + target_clk1x_freq = (1000 * E1CP_MIN_3D_CLK1X_FREQ) + + ((E1CP_MAX_3D_CLK1X_FREQ - E1CP_MIN_3D_CLK1X_FREQ) * 1000 * (scale - 1)) / 63; + target_clkSh_freq = (1000 * E1CP_MIN_3D_CLKSH_FREQ) + + ((E1CP_MAX_3D_CLKSH_FREQ - E1CP_MIN_3D_CLKSH_FREQ) * 1000 * (scale - 1)) / 63; + return pmc_l_gpufreq_set_target(target_clk1x_freq, target_clkSh_freq, + CPUFREQ_RELATION_L); +} +EXPORT_SYMBOL(pmc_l_gpufreq_set_scale); + +static int pmc_l_init_wa_freq_tables(void) +{ + pmc_l_freqs[PMC_PSTATEVAL_REG0].driver_data = E1CP_BYPASS_FID_P0; + pmc_l_freqs[PMC_PSTATEVAL_REG0].frequency = PMC_L_FREQUENCY_1; + pmc_l_freqs[PMC_PSTATEVAL_REG1].driver_data = E1CP_BYPASS_FID_P1; + pmc_l_freqs[PMC_PSTATEVAL_REG1].frequency = PMC_L_FREQUENCY_2; + pmc_l_freqs[PMC_PSTATEVAL_REG2].driver_data = E1CP_BYPASS_FID_P2; + pmc_l_freqs[PMC_PSTATEVAL_REG2].frequency = PMC_L_FREQUENCY_3; + pmc_l_freqs[PMC_PSTATEVAL_REG3].driver_data = E1CP_BYPASS_FID_P3; + pmc_l_freqs[PMC_PSTATEVAL_REG3].frequency = PMC_L_FREQUENCY_4; + pmc_l_freqs[E1CP_MAX_PSTATES - 1].driver_data = 0; + pmc_l_freqs[E1CP_MAX_PSTATES - 1].frequency = CPUFREQ_TABLE_END; + + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG0].driver_data = E1CP_BYPASS_FID_P0; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG0].frequency = PMC_L_FREQUENCY_1; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG1].driver_data = E1CP_BYPASS_FID_P1; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG1].frequency = PMC_L_FREQUENCY_2; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG2].driver_data = E1CP_BYPASS_FID_P2; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG2].frequency = PMC_L_FREQUENCY_3; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG3].driver_data = E1CP_BYPASS_FID_P3; + pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG3].frequency = PMC_L_FREQUENCY_4; + pmc_l_3d_clk1x_freqs[E1CP_MAX_PSTATES - 1].driver_data = 0; + pmc_l_3d_clk1x_freqs[E1CP_MAX_PSTATES - 1].frequency = CPUFREQ_TABLE_END; + + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG0].driver_data = E1CP_BYPASS_FID_P0; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG0].frequency = PMC_L_FREQUENCY_1; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG1].driver_data = E1CP_BYPASS_FID_P1; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG1].frequency = PMC_L_FREQUENCY_2; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG2].driver_data = E1CP_BYPASS_FID_P2; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG2].frequency = PMC_L_FREQUENCY_3; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG3].driver_data = E1CP_BYPASS_FID_P3; + pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG3].frequency = PMC_L_FREQUENCY_4; + pmc_l_3d_clkSh_freqs[E1CP_MAX_PSTATES - 1].driver_data = 0; + pmc_l_3d_clkSh_freqs[E1CP_MAX_PSTATES - 1].frequency = CPUFREQ_TABLE_END; + + return 0; +} + +int pmc_l_cpufreq_init(struct cpufreq_policy *policy) +{ + int result = -ENODEV; + unsigned int hb_syscfg_val; + struct device *cpu_dev; + struct dev_pm_opp *opp; + + /* Dvfs init */ + cpu_dev = get_cpu_device(policy->cpu); + + vout_regulator = regulator_get_exclusive(cpu_dev, "vout"); + if (IS_ERR(vout_regulator)) { + pr_warn("didn't find vout regulator\n"); + vout_regulator = NULL; + } + + result = dev_pm_opp_of_add_table(cpu_dev); + if (result) + pr_warn("no OPP table for cpu%d\n", policy->cpu); + + + /* Initialize P_State_value_X: + * 1) Check BFS bypass bit value in host brigde pci config space; + * 2) Initialize FID values for both cases + * (0 - normal case, 1 - bypass; + * case is determined by jumper on board). + */ + pci_read_config_dword(l_pmc[0].pdev, HB_BFS_PCI_CONF_REG, + &bfs_bypass_val); + pr_err("BFS val = 0x%x bypass bit: 0x%x\n", + bfs_bypass_val, (bfs_bypass_val & HB_BFS_BYPASS_MASK)); + + hb_syscfg_val = bfs_bypass_val; + pr_err("HB SYSCFG val = 0x%x, Frequency is %d MHz\n", hb_syscfg_val, + ((E1CP_BASE_FREQ * ((hb_syscfg_val & 0xf) + 10)) / 2)); + + if (bfs_bypass_val & HB_BFS_BYPASS_MASK) { + /* WA case. */ + pr_err("WA case\n"); + pmc_l_init_wa_freq_tables(); + } else { + /* Normal case. */ + pr_err("Normal case\n"); + + /* + * Calculate FIDs - Frequencies table. + */ + pmc_l_calc_freq_tables(policy, hb_syscfg_val & 0xf); + } + + /* + * Write FID values to P_State_value_X registers + * (NOTE: we do not touch state P0 on init, - use value from boot. + * P0 can be updated when using userspace governor. + */ + __raw_writel(pmc_l_freqs[PMC_PSTATEVAL_REG1].driver_data, + pmc_cbase + PMC_L_P_STATE_VALUE_1_REG); + __raw_writel(pmc_l_freqs[PMC_PSTATEVAL_REG2].driver_data, + pmc_cbase + PMC_L_P_STATE_VALUE_2_REG); + __raw_writel(pmc_l_freqs[PMC_PSTATEVAL_REG3].driver_data, + pmc_cbase + PMC_L_P_STATE_VALUE_3_REG); + + /* Also write FID values to P_State_3D_value_X register */ + __raw_writel(pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG0].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_0_REG); + __raw_writel(pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG1].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_1_REG); + __raw_writel(pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG2].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_2_REG); + __raw_writel(pmc_l_3d_clk1x_freqs[PMC_PSTATEVAL_REG3].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_3_REG); + + __raw_writel(pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG0].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_0_REG + 1); + __raw_writel(pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG1].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_1_REG + 1); + __raw_writel(pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG2].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_2_REG + 1); + __raw_writel(pmc_l_3d_clkSh_freqs[PMC_PSTATEVAL_REG3].driver_data, + pmc_cbase + PMC_L_P_STATE_3D_VALUE_3_REG + 1); + + /* A little trick to save development time */ + pmc_l_gpufreq_set_scale(64); + gpu_scale = 64; + + /* Get boot's frequency, that was set up by jumpers */ + policy->cur = pmc_l_freqs[PMC_PSTATEVAL_REG0].frequency; + + if (!result) { + opp = dev_pm_opp_find_freq_exact( + cpu_dev, policy->cur * 1000, true); + + if (!IS_ERR(opp)) { + if (vout_regulator) { + regulator_set_voltage_tol(vout_regulator, + dev_pm_opp_get_voltage(opp), 0); + } + dev_pm_opp_put(opp); + } + } + + if (bfs_bypass_val & HB_BFS_BYPASS_MASK) + cpufreq_generic_init(policy, pmc_l_freqs, + E1CP_TRANSITION_LATENCY); + else + cpufreq_generic_init(policy, pmc_l_available_freqs, + E1CP_TRANSITION_LATENCY); + + return 0; +} + +#endif /* CONFIG_CPU_FREQ */ diff --git a/arch/l/kernel/pmc/pmc_hwmon.c b/arch/l/kernel/pmc/pmc_hwmon.c new file mode 100644 index 000000000000..b55d9aeecc81 --- /dev/null +++ b/arch/l/kernel/pmc/pmc_hwmon.c @@ -0,0 +1,905 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_E90S +#include +#endif + +#define PMC_HWMON +#ifdef PMC_HWMON +#include +#include +#include +#endif /* PMC_HWMON */ + +#include "pmc.h" + +#ifdef PMC_HWMON +struct pmcmon_data { + struct device *hdev; + int node; +}; +struct pmcmon_data *pmcmon_dev; +#endif /* PMC_HWMON */ + +static struct pmc_temp_coeff { + long y, k; +} pmc_temp_coeff[] = { + {344700, 108300}, /*e1cp*/ + {237700, 79925}, /*r2000*/ +}; +static int pmc_temp_coeff_index; + +#ifdef CONFIG_NUMA +static void __iomem *hwmon_pmc_regs(struct device *dev) +{ + struct pmcmon_data *pmcmon = dev_get_drvdata(dev); + return __pmc_regs(pmcmon->node); +} + +static void __iomem *pmc_regs(struct device *dev) +{ + return __pmc_regs(dev_to_node(dev)); +} +#else +#define pmc_regs(dev) __pmc_regs(0) +#define hwmon_pmc_regs(dev) __pmc_regs(0) +#endif /* CONFIG_NUMA */ + +static long spmc_input_to_celsius_millidegrees(unsigned int in) +{ + struct pmc_temp_coeff *c = &pmc_temp_coeff[pmc_temp_coeff_index]; + + return in * c->y / 4096 - c->k; +} + +/* Additional sysfs interface for Moortec temp sensor */ +static ssize_t spmc_show_temp_cur0(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int x; + int temp; + unsigned int frac; + void __iomem *regs = pmc_regs(dev); + + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_0); + if (x & PMC_MOORTEC_TEMP_VALID) { + x &= PMC_MOORTEC_TEMP_VALUE_MASK; + temp = spmc_input_to_celsius_millidegrees(x); + frac = abs(temp % 1000); + temp /= 1000; + return sprintf(buf, "%d.%d\n", temp, frac); + } + return sprintf(buf, "Bad value\n"); +} + +int spmc_get_temp_cur0(void) +{ + unsigned int x; + int temp; + unsigned int frac; + void __iomem *regs = __pmc_regs(numa_node_id()); + + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_0); + if (x & PMC_MOORTEC_TEMP_VALID) { + x &= PMC_MOORTEC_TEMP_VALUE_MASK; + temp = spmc_input_to_celsius_millidegrees(x); + frac = abs(temp % 1000); + temp /= 1000; + if (frac >= 500) + temp++; + + return temp; + } + + return SPMC_TEMP_BAD_VALUE; +} +EXPORT_SYMBOL(spmc_get_temp_cur0); + +static ssize_t spmc_show_temp_cur1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int x; + int temp; + unsigned int frac; + void __iomem *regs = pmc_regs(dev); + + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_1); + if (x & PMC_MOORTEC_TEMP_VALID) { + x &= PMC_MOORTEC_TEMP_VALUE_MASK; + temp = spmc_input_to_celsius_millidegrees(x); + frac = abs(temp % 1000); + temp /= 1000; + return sprintf(buf, "%d.%d\n", temp, frac); + } + return sprintf(buf, "Bad value\n"); +} + +#ifdef CONFIG_E90S +static ssize_t spmc_show_temp_cur2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int x; + int temp; + unsigned int frac; + void __iomem *regs = pmc_regs(dev); + + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_2); + if (x & PMC_MOORTEC_TEMP_VALID) { + x &= PMC_MOORTEC_TEMP_VALUE_MASK; + temp = spmc_input_to_celsius_millidegrees(x); + frac = abs(temp % 1000); + temp /= 1000; + return sprintf(buf, "%d.%d\n", temp, frac); + } + return sprintf(buf, "Bad value\n"); +} +#endif /* CONFIG_E90S */ + +static ssize_t spmc_show_nbs0(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int x; + unsigned int temp; + void __iomem *regs = pmc_regs(dev); + + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_0); + temp = x; + if (temp & PMC_MOORTEC_TEMP_VALID) { + temp &= PMC_MOORTEC_TEMP_VALUE_MASK; + return sprintf(buf, "%d\n", temp); + } + return sprintf(buf, "Bad value\n"); +} + +static ssize_t spmc_show_nbs1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int x; + unsigned int temp; + void __iomem *regs = pmc_regs(dev); + + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_1); + temp = x; + if (temp & PMC_MOORTEC_TEMP_VALID) { + temp &= PMC_MOORTEC_TEMP_VALUE_MASK; + return sprintf(buf, "%d\n", temp); + } + return sprintf(buf, "Bad value\n"); +} + +#ifdef CONFIG_E90S +static ssize_t spmc_show_nbs2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int x; + unsigned int temp; + void __iomem *regs = pmc_regs(dev); + + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_2); + temp = x; + if (temp & PMC_MOORTEC_TEMP_VALID) { + temp &= PMC_MOORTEC_TEMP_VALUE_MASK; + return sprintf(buf, "%d\n", temp); + } + return sprintf(buf, "Bad value\n"); +} +#endif /* CONFIG_E90S */ + +unsigned int load_threshold = 63; +EXPORT_SYMBOL(load_threshold); + +static ssize_t spmc_show_load_threshold(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", load_threshold); +} + +static ssize_t spmc_store_load_threshold(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long input; + + if (kstrtoul(buf, 10, &input) > 63) + return -EINVAL; + + load_threshold = (unsigned int)input; + + return count; +} + +static DEVICE_ATTR(temp_cur0, S_IRUGO, spmc_show_temp_cur0, NULL); +static DEVICE_ATTR(temp_cur1, S_IRUGO, spmc_show_temp_cur1, NULL); +#ifdef CONFIG_E90S +static DEVICE_ATTR(temp_cur2, S_IRUGO, spmc_show_temp_cur2, NULL); +#endif /* CONFIG_E90S */ +static DEVICE_ATTR(nbs0, S_IRUGO, spmc_show_nbs0, NULL); +static DEVICE_ATTR(nbs1, S_IRUGO, spmc_show_nbs1, NULL); +#ifdef CONFIG_E90S +static DEVICE_ATTR(nbs2, S_IRUGO, spmc_show_nbs2, NULL); +#endif /* CONFIG_E90S */ +static DEVICE_ATTR(load_threshold, S_IRUGO|S_IWUSR, spmc_show_load_threshold, + spmc_store_load_threshold); + +static struct attribute *pmc_tmoortec_attributes[] = { + &dev_attr_temp_cur0.attr, /* 0 */ + &dev_attr_temp_cur1.attr, /* 1 */ + &dev_attr_nbs0.attr, /* 2 */ + &dev_attr_nbs1.attr, /* 3 */ + &dev_attr_load_threshold.attr, /* 4 */ + NULL, /* 5: for: dev_attr_temp_cur2 */ + NULL, /* 6: for: dev_attr_nbs2 */ + NULL +}; + +static const struct attribute_group pmc_tmoortec_attr_group = { + .attrs = pmc_tmoortec_attributes, +}; + + +#ifdef PMC_HWMON +static int hwmon_read_temp(struct device *dev, int idx) +{ + unsigned int x; + int temp; + void __iomem *regs = hwmon_pmc_regs(dev); + + switch (idx) { + case 0: + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_0); + break; + case 1: + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_1); + break; +#ifdef CONFIG_E90S + case 2: + x = __raw_readl(regs + PMC_L_TEMP_RG_CUR_REG_2); + break; +#endif /* CONFIG_E90S */ + default: + return 0; + } + + if (x & PMC_MOORTEC_TEMP_VALID) { + x &= PMC_MOORTEC_TEMP_VALUE_MASK; + temp = spmc_input_to_celsius_millidegrees(x); + return temp; + } + return 0; /* Bad value */ +} /* hwmon_read_temp */ + +static ssize_t hwmon_show_temp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", + hwmon_read_temp(dev, to_sensor_dev_attr(attr)->index)); +} /* hwmon_show_temp */ + +static ssize_t hwmon_show_label(struct device *dev, + struct device_attribute *attr, char *buf) +{ +#ifndef CONFIG_E90S /* E1CP */ + switch (to_sensor_dev_attr(attr)->index) { + case 0: + return sprintf(buf, "Core\n"); + case 1: + return sprintf(buf, "GPU\n"); + } +#else /* CONFIG_E90S - R2000 */ + switch (to_sensor_dev_attr(attr)->index) { + case 0: + return sprintf(buf, "NB\n"); + case 1: + return sprintf(buf, "Core 0-3\n"); + case 2: + return sprintf(buf, "Core 4-7\n"); + } +#endif /* CONFIG_E90S */ + return sprintf(buf, "temp%d\n", to_sensor_dev_attr(attr)->index); +} /* hwmon_show_label */ + +static ssize_t hwmon_show_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", 1); /* 1: CPU embedded diode */ +} /* hwmon_show_type */ + +static ssize_t show_node(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pmcmon_data *pmcmon_dev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", pmcmon_dev->node); +} /* show_node */ + + +SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, hwmon_show_temp, NULL, 0); +SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, hwmon_show_temp, NULL, 1); +#ifdef CONFIG_E90S +SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, hwmon_show_temp, NULL, 2); +#endif /* CONFIG_E90S */ + +SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, hwmon_show_label, NULL, 0); +SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, hwmon_show_label, NULL, 1); +#ifdef CONFIG_E90S +SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, hwmon_show_label, NULL, 2); +#endif /* CONFIG_E90S */ + +SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, hwmon_show_type, NULL, 0); +SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO, hwmon_show_type, NULL, 1); +#ifdef CONFIG_E90S +SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, hwmon_show_type, NULL, 2); +#endif /* CONFIG_E90S */ +SENSOR_DEVICE_ATTR(node, S_IRUGO, show_node, NULL, 3); + +static struct attribute *pmcmon_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, +#ifdef CONFIG_E90S + &sensor_dev_attr_temp3_input.dev_attr.attr, +#endif /* CONFIG_E90S */ + + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, +#ifdef CONFIG_E90S + &sensor_dev_attr_temp3_label.dev_attr.attr, +#endif /* CONFIG_E90S */ + + &sensor_dev_attr_temp1_type.dev_attr.attr, + &sensor_dev_attr_temp2_type.dev_attr.attr, +#ifdef CONFIG_E90S + &sensor_dev_attr_temp3_type.dev_attr.attr, +#endif /* CONFIG_E90S */ + &sensor_dev_attr_node.dev_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(pmcmon); + +#endif /* PMC_HWMON */ + +#define LPMC_POLLING_DELAY 0 +#define LPMC_PASSIVE_DELAY 0 /* millisecond */ + +#define LPMC_TEMP_PASSIVE 100000 /* millicelsius */ +#define LPMC_TEMP_CRITICAL 105000 + +#define LPMC_TEMP_HYSTERESIS 1500 + +#define l_pmc_read(__offset) \ +({ \ + unsigned int __val = __raw_readl(l_pmc->cntrl_base + __offset); \ + dev_dbg(&l_pmc->pdev->dev, "R:%x:%x: %s\t%s:%d\n", \ + __offset, __val, # __offset, __func__, __LINE__); \ + __val; \ +}) + +#define l_pmc_write(__val, __offset) do { \ + unsigned int __val2 = __val; \ + dev_dbg(&l_pmc->pdev->dev, "W:%x:%x: %s\t%s:%d\n", \ + __offset, __val2, # __offset, __func__, __LINE__);\ + __raw_writel(__val2, l_pmc->cntrl_base + __offset); \ +} while (0) + +static int pmc_l_raw_to_millicelsius(unsigned v) +{ + v &= PMC_MOORTEC_TEMP_VALUE_MASK; + return (int)(v * 344700 / 4096) - 108300; +} + +static unsigned int pmc_l_millicelsius_to_raw(int t) +{ + return (t + 108300) * 4096 / 344700; +} + +static int l_pmc_get_temp(struct thermal_zone_device *tz, int *ptemp) +{ + struct l_pmc *l_pmc = tz->devdata; + unsigned int x = l_pmc_read(PMC_L_TEMP_RG_CUR_REG_0); +#ifdef DEBUG + l_pmc_read(PMC_L_TEMP_RG_CUR_REG_1); +#endif + if (!(x & PMC_MOORTEC_TEMP_VALID)) + return -1; + *ptemp = pmc_l_raw_to_millicelsius(x); + dev_dbg(&l_pmc->pdev->dev, "t: %d mC\n", *ptemp); + +#ifdef DEBUG + l_pmc_read(PMC_L_GPE0_STS_REG); + l_pmc_read(PMC_L_GPE0_EN_REG); +#endif + return 0; +} + +static int l_pmc_get_mode(struct thermal_zone_device *tz, + enum thermal_device_mode *mode) +{ + struct l_pmc *l_pmc = tz->devdata; + *mode = l_pmc->thermal_mode; + return 0; +} + +static int l_pmc_set_mode(struct thermal_zone_device *tz, + enum thermal_device_mode mode) +{ + struct l_pmc *l_pmc = tz->devdata; + unsigned long flags; + + raw_spin_lock_irqsave(&l_pmc->thermal_lock, flags); + + l_pmc_write(0, PMC_L_GPE0_EN_REG); + if (mode != THERMAL_DEVICE_ENABLED) + goto out; + + l_pmc_write(0xf, PMC_L_GPE0_EN_REG); + l_pmc_read(PMC_L_GPE0_STS_REG); +out: + l_pmc->thermal_mode = mode; + raw_spin_unlock_irqrestore(&l_pmc->thermal_lock, flags); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + return 0; +} + +static int l_pmc_get_trip_type(struct thermal_zone_device *tz, int trip, + enum thermal_trip_type *type) +{ + *type = (trip == LPMC_TRIP_PASSIVE) ? THERMAL_TRIP_PASSIVE : + THERMAL_TRIP_CRITICAL; + return 0; +} + +static int l_pmc_get_trip_temp(struct thermal_zone_device *tz, int trip, + int *temp) +{ + struct l_pmc *l_pmc = tz->devdata; + + if (trip >= LPMC_TRIP_NUM) + return -EINVAL; + *temp = l_pmc->trip_temp[trip]; + return 0; +} + +static void pmc_l_set_alarm(struct thermal_zone_device *tz, int nr) +{ + struct l_pmc *l_pmc = tz->devdata; + unsigned int e; + unsigned int temp1 = pmc_l_millicelsius_to_raw(l_pmc->trip_temp[nr] - + l_pmc->trip_hyst[nr]); + unsigned int temp2 = pmc_l_millicelsius_to_raw(l_pmc->trip_temp[nr]); + unsigned long flags; + + raw_spin_lock_irqsave(&l_pmc->thermal_lock, flags); + e = l_pmc_read(PMC_L_GPE0_EN_REG); + + l_pmc_write(0, PMC_L_GPE0_EN_REG); + + temp1 |= PMC_L_TEMP_RGX_FALL; + temp2 |= PMC_L_TEMP_RGX_RISE; + + l_pmc_write(temp1, PMC_L_TEMP_RG0_REG + nr * 2 * 4); + l_pmc_write(temp2, PMC_L_TEMP_RG0_REG + nr * 2 * 4 + 4); + l_pmc_write(PMC_L_GPE0_STS_CLR, PMC_L_GPE0_STS_REG); + + l_pmc_write(e, PMC_L_GPE0_EN_REG); + l_pmc_read(PMC_L_GPE0_STS_REG); + raw_spin_unlock_irqrestore(&l_pmc->thermal_lock, flags); +} + +static int l_pmc_set_trip_temp(struct thermal_zone_device *tz, int trip, + int temp) +{ + struct l_pmc *l_pmc = tz->devdata; + + l_pmc->trip_temp[trip] = temp; + pmc_l_set_alarm(tz, trip); + return 0; +} + +static int l_pmc_get_crit_temp(struct thermal_zone_device *tz, int *temp) +{ + return l_pmc_get_trip_temp(tz, THERMAL_TRIP_CRITICAL, temp); +} + +static int l_pmc_bind(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev) +{ + int ret; + + ret = thermal_zone_bind_cooling_device(tz, LPMC_TRIP_PASSIVE, cdev, + THERMAL_NO_LIMIT, + THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); + if (ret) { + dev_err(&tz->device, + "binding zone %s with cdev %s failed:%d\n", + tz->type, cdev->type, ret); + return ret; + } + + return 0; +} + +static int l_pmc_unbind(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev) +{ + int ret; + + ret = thermal_zone_unbind_cooling_device(tz, LPMC_TRIP_PASSIVE, cdev); + if (ret) { + dev_err(&tz->device, + "unbinding zone %s with cdev %s failed:%d\n", + tz->type, cdev->type, ret); + return ret; + } + + return 0; +} + + +static int l_pmc_get_trip_hyst(struct thermal_zone_device *tz, int trip, + int *hyst) +{ + struct l_pmc *l_pmc = tz->devdata; + *hyst = l_pmc->trip_hyst[trip]; + return 0; +} + +static int l_pmc_set_trip_hyst(struct thermal_zone_device *tz, int trip, + int hyst) +{ + struct l_pmc *l_pmc = tz->devdata; + + l_pmc->trip_hyst[trip] = hyst; + pmc_l_set_alarm(tz, trip); + return 0; +} + +static irqreturn_t l_pmc_thermal_alarm_irq(int irq, void *dev) +{ + struct l_pmc *l_pmc = dev; + struct thermal_zone_device *tz = l_pmc->thermal; + int t = 0, i, ret = IRQ_HANDLED; + unsigned int s, e; + unsigned long flags; + + raw_spin_lock_irqsave(&l_pmc->thermal_lock, flags); + + s = l_pmc_read(PMC_L_GPE0_STS_REG); + e = l_pmc_read(PMC_L_GPE0_EN_REG); + + l_pmc_get_temp(tz, &t); + for (i = 0; i < 4; i++) { + if (!(s & (1 << i))) + continue; + + if (!(i & 1)) { /*falling threshold*/ + if (t >= l_pmc->trip_temp[i / 2] - + l_pmc->trip_hyst[i / 2]) + continue; + e &= ~(1 << i); + e |= 1 << (i + 1); + + if (i / 2 == LPMC_TRIP_PASSIVE) + tz->passive_delay = 0; + } else { + if (t < l_pmc->trip_temp[i / 2]) + continue; + e &= ~(1 << i); + e |= 1 << (i - 1); + + if (i / 2 == LPMC_TRIP_PASSIVE) + tz->passive_delay = LPMC_PASSIVE_DELAY; + + dev_crit(&l_pmc->pdev->dev, + "THERMAL ALARM: T %d > %d mC\n", + t, l_pmc->trip_temp[i / 2]); + } + ret = IRQ_WAKE_THREAD; + } + + l_pmc_write(0, PMC_L_GPE0_EN_REG); + l_pmc_write(PMC_L_GPE0_STS_CLR, PMC_L_GPE0_STS_REG); + l_pmc_write(e, PMC_L_GPE0_EN_REG); + l_pmc_read(PMC_L_GPE0_STS_REG); + raw_spin_unlock_irqrestore(&l_pmc->thermal_lock, flags); + + return ret; +} + +static irqreturn_t l_pmc_thermal_alarm_irq_thread(int irq, void *dev) +{ + struct l_pmc *l_pmc = dev; + + pr_debug("%s:THERMAL ALARM\n", pci_name(l_pmc->pdev)); + + thermal_zone_device_update(l_pmc->thermal, THERMAL_EVENT_UNSPECIFIED); + + return IRQ_HANDLED; +} + +static int thermal_get_trend(struct thermal_zone_device *tz, + int trip, enum thermal_trend *trend) +{ + int trip_temp; + + if (tz->ops->get_trip_temp(tz, trip, &trip_temp)) + return -EINVAL; + + if (tz->temperature > trip_temp) { + *trend = THERMAL_TREND_RAISE_FULL; + return 0; + } else { + *trend = THERMAL_TREND_DROP_FULL; + return 0; + } + + if (tz->temperature > tz->last_temperature) + *trend = THERMAL_TREND_RAISING; + else if (tz->temperature < tz->last_temperature) + *trend = THERMAL_TREND_DROPPING; + else + *trend = THERMAL_TREND_STABLE; + + return 0; +} + +static struct thermal_zone_device_ops l_pmc_tz_ops = { + .bind = l_pmc_bind, + .unbind = l_pmc_unbind, + .get_temp = l_pmc_get_temp, + .get_mode = l_pmc_get_mode, + .set_mode = l_pmc_set_mode, + .get_trend = thermal_get_trend, + .get_trip_type = l_pmc_get_trip_type, + .get_trip_temp = l_pmc_get_trip_temp, + .get_crit_temp = l_pmc_get_crit_temp, + .set_trip_temp = l_pmc_set_trip_temp, + .get_trip_hyst = l_pmc_get_trip_hyst, + .set_trip_hyst = l_pmc_set_trip_hyst, +}; + +static int pmc_l_thermal_probe(struct l_pmc *l_pmc) +{ + int ret = 0; + struct pci_dev *pdev = l_pmc->pdev; + + raw_spin_lock_init(&l_pmc->thermal_lock); + + l_pmc->trip_temp[LPMC_TRIP_PASSIVE] = LPMC_TEMP_PASSIVE; + l_pmc->trip_temp[LPMC_TRIP_CRITICAL] = LPMC_TEMP_CRITICAL; + l_pmc->trip_hyst[LPMC_TRIP_PASSIVE] = LPMC_TEMP_HYSTERESIS; + l_pmc->trip_hyst[LPMC_TRIP_CRITICAL] = LPMC_TEMP_HYSTERESIS; + + l_pmc_write(0, PMC_L_GPE0_EN_REG); + + l_pmc->policy = cpufreq_cpu_get(cpumask_first(cpu_online_mask)); + if (!l_pmc->policy) { + dev_err(&pdev->dev, "CPUFreq policy not found\n"); + return -EPROBE_DEFER; + } + + l_pmc->cdev = cpufreq_cooling_register(l_pmc->policy); + if (IS_ERR(l_pmc->cdev)) { + ret = PTR_ERR(l_pmc->cdev); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to register cpufreq cooling device: %d\n", + ret); + goto out_policy; + } + + l_pmc->thermal = thermal_zone_device_register("l_thermal", + LPMC_TRIP_NUM, LPMC_TRIP_POINTS_MSK, + l_pmc, &l_pmc_tz_ops, NULL, 0, 0); + if (IS_ERR(l_pmc->thermal)) { + dev_err(&pdev->dev, + "Failed to register thermal zone device\n"); + ret = PTR_ERR(l_pmc->thermal); + goto out_cooling; + } + + ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, + l_pmc_thermal_alarm_irq, l_pmc_thermal_alarm_irq_thread, + 0, "l_pmc_thermal", l_pmc); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request alarm irq %d: %d\n", + pdev->irq, ret); + goto out_dev; + } + + pmc_l_set_alarm(l_pmc->thermal, LPMC_TRIP_CRITICAL); + pmc_l_set_alarm(l_pmc->thermal, LPMC_TRIP_PASSIVE); + + l_pmc_set_mode(l_pmc->thermal, THERMAL_DEVICE_ENABLED); + + return ret; + +out_dev: + thermal_zone_device_unregister(l_pmc->thermal); +out_cooling: + cpufreq_cooling_unregister(l_pmc->cdev); +out_policy: + cpufreq_cpu_put(l_pmc->policy); + return ret; +} + +static void pmc_l_thermal_remove(struct l_pmc *l_pmc) +{ + cpufreq_cooling_unregister(l_pmc->cdev); + thermal_zone_device_unregister(l_pmc->thermal); +} + +static int pmc_l_probe(struct pci_dev *dev, + const struct pci_device_id *ent) +{ + int ret; + + ret = sysfs_create_group(&(l_pmc[0].pdev)->dev.kobj, + &pmc_tmoortec_attr_group); + if (ret) { + return ret; + } + ret = pmc_l_thermal_probe(&l_pmc[0]); + return ret; +} + +static void pmc_l_remove(struct pci_dev *dev) +{ + pmc_l_thermal_remove(&l_pmc[0]); +} + + +static const struct pci_device_id pmc_l_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_HB) }, + { 0, } +}; + +static struct pci_driver pmc_l_driver = { + .name = "l-pmc", + .id_table = pmc_l_devices, + .probe = pmc_l_probe, + .remove = pmc_l_remove, +}; + + +#ifdef PMC_HWMON +int pmc_hwmon_init(void) +{ + int node; + struct device *dev; + struct platform_device *vdev; + struct device *hwmon_dev; + +/* + * R2000 has 3 temperature sensors (Core 0-3, Core 4-7, NB) + * embedded in chips's die, access to sensors (and other + * PMC functions) provided by NB registers; + * + * E1C+ has PMC device embedded in root hub of + * SoC, access to two temp. sensors (Core & GPU) + * performed through PCI. + * + */ + for_each_online_node(node) { +#ifdef CONFIG_E90S /* R2000 */ +#ifdef CONFIG_NUMA + dev = &node_devices[node]->dev; +#else + dev = cpu_subsys.dev_root; +#endif /* CONFIG_NUMA */ +#else /* E1C+ */ + dev = &(l_pmc[0].pdev)->dev; +#endif /* CONFIG_E90S */ + + /* Create platform device to be parent of hwmon dev */ + vdev = platform_device_register_data(dev, "pmc_hwmon", node, + NULL, 0); + if (IS_ERR(vdev)) { + dev_err(dev, "failed to create PMC platform device"); + return PTR_ERR(vdev); + } + + pmcmon_dev = devm_kzalloc(dev, sizeof(*pmcmon_dev), GFP_KERNEL); + if (!pmcmon_dev) + return -ENOMEM; + + pmcmon_dev->node = node; + hwmon_dev = devm_hwmon_device_register_with_groups( + &vdev->dev, + KBUILD_MODNAME, + pmcmon_dev, + pmcmon_groups); + if (IS_ERR(hwmon_dev)) { + dev_err(dev, "failed to create PMC hwmon device"); + return PTR_ERR(hwmon_dev); + } + pmcmon_dev->hdev = hwmon_dev; + hwmon_dev->init_name = "pmcmon"; + dev_info(hwmon_dev, "node %d hwmon device enabled - %s", + pmcmon_dev->node, dev_name(pmcmon_dev->hdev)); + + } + return 0; +} + +void pmc_hwmon_exit(void) +{ + return; +} +#endif /* PMC_HWMON */ + +int pmc_temp_sensors_init(void) +{ +#ifdef CONFIG_E90S + int err = 0; + int node; + struct resource r[] = { + { + .flags = IORESOURCE_MEM, + }, + }; + struct l_i2c2_platform_data pmc_i2c = { + .base_freq_hz = 100 * 1000 * 1000, + .desired_freq_hz = 100 * 1000, + }; + + pmc_temp_coeff_index = 1; + pmc_tmoortec_attributes[5] = &dev_attr_temp_cur2.attr; + pmc_tmoortec_attributes[6] = &dev_attr_nbs2.attr; + + for_each_online_node(node) { +#ifdef CONFIG_NUMA + struct device *d = &node_devices[node]->dev; +#else + struct device *d = cpu_subsys.dev_root; +#endif /* CONFIG_NUMA */ + struct platform_device *a; + + r[0].start = BASE_NODE0 + + (BASE_NODE1 - BASE_NODE0) * node + 0x9400; + r[0].end = r[0].start + 20 - 1; + pmc_i2c.bus_nr = node + 16, /* after KPI2 i2c controllers */ + a = platform_device_register_resndata(NULL, + "pmc-i2c", node, r, + ARRAY_SIZE(r), + &pmc_i2c, sizeof(pmc_i2c)); + if (!a) + continue; + /* Create sysfs interface for Moortec temp sensor */ + err = sysfs_create_group(&d->kobj, + &pmc_tmoortec_attr_group); + if (err) { + platform_device_unregister(a); + return err; + } + } + return 0; +#else /* E1C+ */ + return pci_register_driver(&pmc_l_driver); +#endif /* CONFIG_E90S */ +} + +void pmc_temp_sensors_exit(void) +{ + if (l_pmc[0].i2c_chan) { + platform_device_unregister(l_pmc[0].i2c_chan); + l_pmc[0].i2c_chan = NULL; + } + sysfs_remove_group(&(l_pmc[0].pdev)->dev.kobj, + &pmc_tmoortec_attr_group); + pci_dev_put(l_pmc[0].pdev); + l_pmc[0].pdev = NULL; + + pci_unregister_driver(&pmc_l_driver); +} diff --git a/arch/l/kernel/pmc/pmc_s2.c b/arch/l/kernel/pmc/pmc_s2.c new file mode 100644 index 000000000000..70333dc79cc2 --- /dev/null +++ b/arch/l/kernel/pmc/pmc_s2.c @@ -0,0 +1,199 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pmc.h" + +#define S2_PMC_DEBUG 0 +#ifdef S2_PMC_DEBUG +#define DebugPMC(x, ...) do { \ + pr_err("S2-PMC DEBUG: %s: %d: " x, __func__, \ + __LINE__, ##__VA_ARGS__); \ +} while (0) +#else +#define DebugPMC(...) (while (0) {}) +#endif /* S2_PMC_DEBUG */ + +struct l_pmc l_pmc[MAX_NUM_PMCS]; + +/* + * Global storage for NBSR_NODE_CFG_INFO, same for all nodes, + * initialized in s2_pmc_init + */ +unsigned int bfs_bypass_val; + +/** + * s2_reg_to_addr - convert PMC register name to matching arrdess + * @reg: PMC register + * @cpu: + * + * S2 (R2000) PMC specific function, converts PMC register to + * corresponding address which depends on CPU's cluster (CL0 or CL1) + * + */ +unsigned int s2_reg_to_addr(pmc_reg reg, unsigned int cpu) +{ + switch (reg) { + case PMC_L_COVFID_STATUS_REG: + if (cpu_to_cluster(cpu)) + return PMC_L_COVFID_STATUS_REG_CL1; + else + return PMC_L_COVFID_STATUS_REG_CL0; + case PMC_L_P_STATE_CNTRL_REG: + if (cpu_to_cluster(cpu)) + return PMC_L_P_STATE_CNTRL_REG_CL1; + else + return PMC_L_P_STATE_CNTRL_REG_CL0; + case PMC_L_P_STATE_STATUS_REG: + if (cpu_to_cluster(cpu)) + return PMC_L_P_STATE_STATUS_REG_CL1; + else + return PMC_L_P_STATE_STATUS_REG_CL0; + case PMC_L_P_STATE_VALUE_0_REG: + if (cpu_to_cluster(cpu)) + return PMC_L_P_STATE_VALUE_0_REG_CL1; + else + return PMC_L_P_STATE_VALUE_0_REG_CL0; + case PMC_L_P_STATE_VALUE_1_REG: + if (cpu_to_cluster(cpu)) + return PMC_L_P_STATE_VALUE_1_REG_CL1; + else + return PMC_L_P_STATE_VALUE_1_REG_CL0; + case PMC_L_P_STATE_VALUE_2_REG: + if (cpu_to_cluster(cpu)) + return PMC_L_P_STATE_VALUE_2_REG_CL1; + else + return PMC_L_P_STATE_VALUE_2_REG_CL0; + case PMC_L_P_STATE_VALUE_3_REG: + if (cpu_to_cluster(cpu)) + return PMC_L_P_STATE_VALUE_3_REG_CL1; + else + return PMC_L_P_STATE_VALUE_3_REG_CL0; + } + return 0; +} + +unsigned s2_get_freq_mult(int cpu) +{ + unsigned int freq, Fbfs, Mii_inv, Nii; + unsigned int fid = 0; + fid = pmc_reg_readl(PMC_L_P_STATE_VALUE_0_REG, cpu); + Mii_inv = Mii_inv(fid); + Nii = Nii(fid); + Fbfs = Fbfs(BASE_FREQ, bfs_bypass_val & 0xf); + freq = Fcpu(Fbfs, Mii_inv, Nii); + return freq; +} + +#ifdef CONFIG_CPU_FREQ + +/* cpufreq subsystem: */ +struct cpufreq_frequency_table pmc_l_freqs[MAX_PSTATES]; +/* available frequencies */ +struct cpufreq_frequency_table + pmc_l_available_freqs[MAX_AV_PSTATES]; + +int pmc_l_cpufreq_init(struct cpufreq_policy *policy) +{ + int result = 0; + unsigned long cpu_mask; + unsigned int hb_syscfg_val; + int i = 0; + int node = cpu_to_node(policy->cpu); + void __iomem *pmc_cbase = __pmc_regs(node); + unsigned int reg_addr; + /* + * Set masks for groups of cpu, that share the same frequency. + * Same freqs have cpus situated on the same node cluster, + * R2000 cpu has two cluster of 4 cores (8 cores in total) per each node + */ + for (i = 0; i < 8; i++) { + if (1UL << policy->cpu & 0xffUL << (i * 8)) { + if (policy->cpu & 0x4) { + cpu_mask = 0xf0UL<<(i*8); + } else { + cpu_mask = 0xfUL<<(i*8); + } + memcpy(policy->cpus, &cpu_mask, sizeof(cpu_mask)); + } + } + + hb_syscfg_val = bfs_bypass_val; + + if (bfs_bypass_val & HB_BFS_BYPASS_MASK) { + /* + * WA case. + * pmc_l_init_wa_freq_tables() should be here, but... + * S2 1st iteration has a bug in bypass mode, so we can`t use it now + */ + return -ENODEV; + } else { /* Normal case. */ + + /* + * Calculate FIDs - Frequencies table. + */ + pmc_l_calc_freq_tables(policy, hb_syscfg_val & 0xf); + } + + /* + * Write FID values to P_State_value_X registers + * (NOTE: we do not touch state P0 on init, - use value from boot. + * P0 can be updated when using userspace governor. + */ + reg_addr = s2_reg_to_addr(PMC_L_P_STATE_VALUE_1_REG, policy->cpu); + __raw_writel(pmc_l_freqs[PMC_PSTATEVAL_REG1].driver_data, + pmc_cbase + reg_addr); + reg_addr = s2_reg_to_addr(PMC_L_P_STATE_VALUE_2_REG, policy->cpu); + __raw_writel(pmc_l_freqs[PMC_PSTATEVAL_REG2].driver_data, + pmc_cbase + reg_addr); + reg_addr = s2_reg_to_addr(PMC_L_P_STATE_VALUE_3_REG, policy->cpu); + __raw_writel(pmc_l_freqs[PMC_PSTATEVAL_REG3].driver_data, + pmc_cbase + reg_addr); + + /* Get boot's frequency, that was set up by jumpers */ + policy->cur = pmc_l_freqs[PMC_PSTATEVAL_REG0].frequency; + + if (bfs_bypass_val & HB_BFS_BYPASS_MASK) { + /* + * The code below should be used for bypass mode, + * but nowdays there is no any R2000 with working bypass + */ +#if 0 + result = cpufreq_table_validate_and_show(policy, pmc_l_freqs); + if (result) { + pr_err("%s: invalid frequency table: %d\n", __func__, result); + } + policy->cpuinfo.transition_latency = S2_TRANSITION_LATENCY; +#endif + return -ENODEV; + } else { + /* + * We can't use cpufreq_generic_init on R2000, its only for SMP + * with same frequency on cores, so let's do init explictly + */ +#if 0 + result = cpufreq_table_validate_and_show(policy, pmc_l_available_freqs); + if (result) { + pr_err("%s: invalid frequency table: %d\n", __func__, result); + } +#endif + result = cpufreq_frequency_table_cpuinfo(policy, pmc_l_available_freqs); + if (result) { + pr_err("%s: invalid frequency table: %d\n", __func__, result); + return result; + } + policy->freq_table = pmc_l_available_freqs; + policy->cpuinfo.transition_latency = S2_TRANSITION_LATENCY; + } + return result; +} +#endif /* CONFIG_CPU_FREQ */ diff --git a/arch/l/kernel/procipcc2.c b/arch/l/kernel/procipcc2.c new file mode 100644 index 000000000000..0f243e2f92f4 --- /dev/null +++ b/arch/l/kernel/procipcc2.c @@ -0,0 +1,104 @@ +/* + * arch/l/kernel/procipcc2.c + * + * Support for iplink switching off/on through IPCC2 write + * available registers. This works for E2K machines, that + * have SIC on board. + * + * Copyright (C) 2014 Evgeny M. Kravtsunov (kravtsunov_e@mcst.ru) + */ + +#include +#include +#include +#include + +#include +#include + +#define IPLINKMASK_FILENAME "iplinkmask" +static struct proc_dir_entry *iplinkmask_entry; + +/* LTSSM states */ +#define IPCC2_LTSSM_POWEROFF 0x000 +#define IPCC2_LTSSM_DISABLE 0x001 +#define IPCC2_LTSSM_SLEEP 0x010 +#define IPCC2_LTSSM_LINKUP 0x011 +#define IPCC2_LTSSM_SERVICE 0x100 +#define IPCC2_LTSSM_REINIT 0x101 + +static ssize_t write_iplinkmask_ipcc2(struct file *file, + const char __user *buffer, size_t count, loff_t *data) +{ + char val[10]; + int nid; + e2k_ipcc_csr_struct_t ipcc_csr; + int i; + + if (copy_from_user(val, buffer, count)) + return -EFAULT; + + nid = cpu_to_node(raw_smp_processor_id()); + + for (i = 1; i < SIC_IPCC_LINKS_COUNT + 1; i++) { + ipcc_csr.E2K_IPCC_CSR_reg = sic_get_ipcc_csr(nid, i); + if (val[0] == '0') { + /* Switch off iplinks (power off) */ + ipcc_csr.E2K_IPCC_CSR_cmd_code = IPCC2_LTSSM_POWEROFF; + } else if (val[0] == '1') { + /* Switch off iplinks (disable) */ + ipcc_csr.E2K_IPCC_CSR_cmd_code = IPCC2_LTSSM_DISABLE; + } else if (val[0] == '2') { + /* Switch off iplinks (sleep) */ + ipcc_csr.E2K_IPCC_CSR_cmd_code = IPCC2_LTSSM_SLEEP; + } else { + /* Reinit iplinks */ + ipcc_csr.E2K_IPCC_CSR_cmd_code = IPCC2_LTSSM_REINIT; + } + sic_set_ipcc_csr(nid, i, ipcc_csr.E2K_IPCC_CSR_reg); + } + + return count; +} + +static int iplinkmask_proc_show(struct seq_file *m, void *data) +{ + /* Not implemented */ + return 0; +} + +static int iplinkmask_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, iplinkmask_proc_show, NULL); +} + +static const struct file_operations iplinkmask_proc_fops = { + .open = iplinkmask_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = write_iplinkmask_ipcc2 +}; + +static int __init init_procipcc2(void) +{ + if (IS_MACHINE_E2S) { + iplinkmask_entry = proc_create(IPLINKMASK_FILENAME, + S_IFREG | S_IWUGO, NULL, + &iplinkmask_proc_fops); + if (!iplinkmask_entry) + return -ENOMEM; + } + + return 0; +} +module_init(init_procipcc2); + +static void __exit exit_procipcc2(void) +{ + if (IS_MACHINE_E2S) { + proc_remove(iplinkmask_entry); + } +} +module_exit(exit_procipcc2); + diff --git a/arch/l/kernel/procmmpddiag.c b/arch/l/kernel/procmmpddiag.c new file mode 100644 index 000000000000..f28888cd8644 --- /dev/null +++ b/arch/l/kernel/procmmpddiag.c @@ -0,0 +1,101 @@ +/* + * arch/l/kernel/procmmpddiag.c + * + * Support for 32-bit mmpd test and diag status value. + * 32-bit value is stored in kernel's ram and available + * for reading and writing from userspace (from init and FPO) + * through /proc/mmpddiag interface. + * + * Copyright (C) 2014 Evgeny M. Kravtsunov (kravtsunov_e@mcst.ru) + */ + +#include +#include +#include + +#define MMPDDIAG_FILENAME "mmpddiag" +static struct proc_dir_entry *dir_mmpddiag; + +/* mmpdstatus value format: + * + * bit 0: 1 - was hw reset; 0 - was sw reset; + * bit 1: 1/0 - mem test passed/failed; + * bit 2: 1/0 - cpu test passed/failed; + * bits 3-31: reserved + * + */ +static uint32_t mmpdstatus = 0; + +/* Status bits */ +#define MMPD_RESET_HW 0x00000001 +#define MMPD_MEM_BIT 0x00000002 +#define MMPD_CPU_BIT 0x00000004 + +static ssize_t write_mmpddiag(struct file *file, const char *buf, + size_t len, loff_t *off) +{ + char val[10] = {0}; + ssize_t ret = 0; + ret = simple_write_to_buffer(&val, sizeof(val), + off, buf, sizeof(val)); + if (ret < 0) + return -EINVAL; + /* + * 000(bin) = 0(val): SW reset ("hot start"), no tests + * 001(bin) = 1(val): HW reset, tests mem and cpu failed + * 011(bin) = 3(val): HW reset, test mem passed, test cpu failed + * 111(bin) = 7(val): HW reset, test mem passed, test cpu passed + */ + if (val[0] == '0') { + /* SW reset ("hot start"), no tests */ + mmpdstatus = 0; + } else if (val[0] == '1') { + /* HW reset, tests mem and cpu failed */ + mmpdstatus = MMPD_RESET_HW; + } else if (val[0] == '3') { + /* HW reset, test mem passed, test cpu failed */ + mmpdstatus = (MMPD_MEM_BIT | MMPD_RESET_HW); + } else if (val[0] == '5') { + /* HW reset, test cpu passed, test mem failed */ + mmpdstatus = (MMPD_CPU_BIT | MMPD_RESET_HW); + } else if (val[0] == '7') { + /* HW reset, test mem passed, test cpu failed */ + mmpdstatus = (MMPD_CPU_BIT | MMPD_MEM_BIT | MMPD_RESET_HW); + } else { + /* Invalid case */ + return -EFAULT; + } + return ret; +} + +static ssize_t read_mmpddiag(struct file *file, char *buf, size_t len, + loff_t *off) +{ + return simple_read_from_buffer(buf, len, off, &mmpdstatus, + sizeof(mmpdstatus)); +} + + +static const struct file_operations mmpddiag_proc_fops = { + .owner = THIS_MODULE, + .read = read_mmpddiag, + .write = write_mmpddiag, +}; + +static int __init init_mmpddiag(void) +{ + dir_mmpddiag = proc_create(MMPDDIAG_FILENAME, + S_IFREG | S_IWUGO | S_IRUGO, NULL, &mmpddiag_proc_fops); + + if (!dir_mmpddiag) + return -ENOMEM; + return 0; +} + +static void __exit exit_mmpddiag(void) +{ + remove_proc_entry(MMPDDIAG_FILENAME, NULL); +} +module_init(init_mmpddiag); +module_exit(exit_mmpddiag); + diff --git a/arch/l/kernel/procregs.c b/arch/l/kernel/procregs.c new file mode 100644 index 000000000000..26342a2f69aa --- /dev/null +++ b/arch/l/kernel/procregs.c @@ -0,0 +1,487 @@ +/* + * arch/e2k/kernel/procsic.c + * + * This file contains implementation of functions to read and write hw + * registers through proc fs. + * + * Copyright (C) 2010-2018 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include +#include +#include +#include + +#include +#ifdef CONFIG_E2K +#include +#endif + + +#define L2READ_FILENAME "l2cacheread" +#define L2WRITE_FILENAME "l2cahcewrite" +#define SICREAD_FILENAME "sicread" +#define SICWRITE_FILENAME "sicwrite" +#define LDRD_FILENAME "ldrd" + +#define L2READ_STR_MAX_SIZE 32 +#define L2WRITE_STR_MAX_SIZE 64 +#define SICREAD_STR_MAX_SIZE 16 +#define SICWRITE_STR_MAX_SIZE 32 +#define LDRD_STR_MAX_SIZE 32 + + +enum { + SICREG_FORMAT_W, + SICREG_FORMAT_L +}; + + +static raw_spinlock_t sicreg_lock; +static u32 sicreg_offset; +static u32 sicreg_format; + +#ifdef CONFIG_E2K +static atomic64_t l2addr; + +/* + * Use KERNEL_BASE as default for ldrd_val to prevent panic due sporadic access + * from stupid user utilities + */ +static atomic64_t ldrd_val = ATOMIC_INIT(KERNEL_BASE); + + +/* + * L2 cache register read + */ + +static int l2read_proc_show(struct seq_file *s, void *v) +{ + u64 val = NATIVE_READ_MAS_D(atomic64_read(&l2addr), MAS_DCACHE_L2_REG); + + seq_printf(s, "0x%llx", val); + + return 0; +} + +static int l2read_proc_open(struct inode *inode, struct file *file) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return single_open(file, l2read_proc_show, NULL); +} + +static inline void l2read_write_reg(char *str) +{ + u64 addr; + int res; + + res = sscanf(str, "0x%llX\n", &addr); + if (res != 1) { + pr_err("Failed to save L2 cache address to read (invalid string).\n"); + return; + } + + atomic64_set(&l2addr, addr); +} + +static ssize_t l2read_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char l2read_buffer[L2READ_STR_MAX_SIZE]; + long ret; + + memset(l2read_buffer, 0, sizeof(char) * L2READ_STR_MAX_SIZE); + + if (count > L2READ_STR_MAX_SIZE - 1) { + pr_err("Failed to save L2 cache address to read (too long string).\n"); + ret = -EINVAL; + } else if (copy_from_user(l2read_buffer, buffer, count)) { + pr_err("Failed to save L2 cache address to read (kernel error).\n"); + ret = -EFAULT; + } else { + l2read_write_reg(l2read_buffer); + ret = count; + } + + return ret; +} + +static const struct file_operations l2read_proc_fops = { + .owner = THIS_MODULE, + .open = l2read_proc_open, + .write = l2read_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + + +/* + * L2 cache register write + */ + +static inline void l2write_write_reg(char *str) +{ + u64 addr, val; + int res; + + res = sscanf(str, "0x%llX 0x%llX\n", &addr, &val); + if (res != 2) { + pr_err("Failed to write L2 cache address (invalid string).\n"); + return; + } + + NATIVE_WRITE_MAS_D(addr, val, MAS_DCACHE_L2_REG); +} + +static ssize_t l2write_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char l2write_buffer[SICWRITE_STR_MAX_SIZE]; + long ret; + + memset(l2write_buffer, 0, sizeof(char) * L2WRITE_STR_MAX_SIZE); + + if (count + 1 > L2WRITE_STR_MAX_SIZE) { + pr_err("Failed to write L2 cache address (too long string).\n"); + ret = -EINVAL; + } else if (copy_from_user(l2write_buffer, buffer, count)) { + pr_err("Failed to write L2 cache address (kernel error).\n"); + ret = -EFAULT; + } else { + l2write_write_reg(l2write_buffer); + ret = count; + } + + return ret; +} + +static int l2write_proc_show(struct seq_file *s, void *v) +{ + return 0; +} + +static int l2write_proc_open(struct inode *inode, struct file *file) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return single_open(file, l2write_proc_show, NULL); +} + +static const struct file_operations l2write_proc_fops = { + .owner = THIS_MODULE, + .open = l2write_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .write = l2write_write, + .release = single_release +}; + + +/* + * LDRD read + */ + +static int ldrd_proc_show(struct seq_file *s, void *v) +{ + seq_printf(s, "0x%lx", LDRD(atomic64_read(&ldrd_val))); + return 0; +} + +static int ldrd_proc_open(struct inode *inode, struct file *file) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return single_open(file, ldrd_proc_show, NULL); +} + +static inline void ldrd_write_reg(char *str) +{ + int ldrd, res; + + res = sscanf(str, "0x%X\n", &ldrd); + if (res != 1) { + pr_err("Failed to save LDRD value (invalid string).\n"); + return; + } + + atomic64_set(&ldrd_val, ldrd); +} + +static ssize_t ldrd_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char ldrd_buffer[LDRD_STR_MAX_SIZE]; + long ret; + + memset(ldrd_buffer, 0, sizeof(char) * LDRD_STR_MAX_SIZE); + + if (count > LDRD_STR_MAX_SIZE - 1) { + pr_err("Failed to save LDRD value (too long string).\n"); + ret = -EINVAL; + } else if (copy_from_user(ldrd_buffer, buffer, count)) { + pr_err("Failed to save LDRD value (kernel error).\n"); + ret = -EFAULT; + } else { + ldrd_write_reg(ldrd_buffer); + ret = count; + } + + return ret; +} + +static const struct file_operations ldrd_proc_fops = { + .owner = THIS_MODULE, + .open = ldrd_proc_open, + .write = ldrd_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +#endif + + +/* + * SIC read + */ + +static int sicread_seq_show(struct seq_file *s, void *v) +{ + int node = (int)(*((loff_t *)v)); + int offset, format; + unsigned int val; + unsigned long flags; + + raw_spin_lock_irqsave(&sicreg_lock, flags); + offset = sicreg_offset; + format = sicreg_format; + raw_spin_unlock_irqrestore(&sicreg_lock, flags); + + switch (format) { + case SICREG_FORMAT_W: + val = sic_readw_node_nbsr_reg(node, offset); + break; + case SICREG_FORMAT_L: + val = sic_read_node_nbsr_reg(node, offset); + break; + default: + pr_err("Failed to write SIC register (invalid format).\n"); + return 0; + } + + seq_printf(s, "node: %d reg (0x%X): 0x%X\n", node, offset, val); + + return 0; +} + +static void *sicread_seq_start(struct seq_file *s, loff_t *pos) +{ + if (!node_online(*pos)) + *pos = next_online_node(*pos); + if (*pos == MAX_NUMNODES) + return 0; + return (void *)pos; +} + +static void *sicread_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + *pos = next_online_node(*pos); + if (*pos == MAX_NUMNODES) + return 0; + return (void *)pos; +} + +static void sicread_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations sicread_seq_ops = { + .start = sicread_seq_start, + .next = sicread_seq_next, + .stop = sicread_seq_stop, + .show = sicread_seq_show +}; + +static int sicread_proc_open(struct inode *inode, struct file *file) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return seq_open(file, &sicread_seq_ops); +} + +static inline void sicread_write_reg(char *str) +{ + int offset, format; + char format_sym; + unsigned long flags; + + if (sscanf(str, "0x%X%c\n", &offset, &format_sym) != 2) { + pr_err("Failed to save SIC register to read (invalid string).\n"); + return; + } + + switch (format_sym) { + case 'w': + format = SICREG_FORMAT_W; + break; + case 'l': + case '\n': + format = SICREG_FORMAT_L; + break; + default: + pr_err("Failed to save SIC register to read (invalid format).\n"); + return; + } + + raw_spin_lock_irqsave(&sicreg_lock, flags); + sicreg_offset = offset; + sicreg_format = format; + raw_spin_unlock_irqrestore(&sicreg_lock, flags); +} + +static ssize_t sicread_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char sicread_buffer[SICREAD_STR_MAX_SIZE]; + long ret; + + memset(sicread_buffer, 0, sizeof(char) * SICREAD_STR_MAX_SIZE); + + if (count + 1 > SICREAD_STR_MAX_SIZE) { + pr_err("Failed to save SIC register to read (too long string).\n"); + ret = -EINVAL; + } else if (copy_from_user(sicread_buffer, buffer, + min(sizeof(sicread_buffer), count))) { + pr_err("Failed to save SIC register to read (kernel error).\n"); + ret = -EFAULT; + } else { + sicread_write_reg(sicread_buffer); + ret = count; + } + + return ret; +} + +static const struct file_operations sicread_proc_fops = { + .owner = THIS_MODULE, + .open = sicread_proc_open, + .write = sicread_write, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + + +/* + * SIC write + */ + +static inline void sicwrite_write_reg(char *str) +{ + int node, reg, val, res; + char format_sym, space[2]; + + res = sscanf(str, "%d 0x%X%c%1[ ] 0x%X\n", + &node, ®, &format_sym, space, &val); + if (res != 5) { + res = sscanf(str, "%d 0x%X 0x%X\n", &node, ®, &val); + if (res != 3) { + pr_err("Failed to write SIC register (invalid string).\n"); + return; + } + format_sym = 'l'; + } + + if (!node_online(node)) { + pr_err("Failed to write SIC register (invalid node number).\n"); + return; + } + + switch (format_sym) { + case 'w': + sic_writew_node_nbsr_reg(node, reg, val); + break; + case 'l': + sic_write_node_nbsr_reg(node, reg, val); + break; + default: + pr_err("Failed to write SIC register (invalid format).\n"); + return; + } +} + +static ssize_t sicwrite_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char sicwrite_buffer[SICWRITE_STR_MAX_SIZE]; + long ret; + + memset(sicwrite_buffer, 0, sizeof(char) * SICWRITE_STR_MAX_SIZE); + + if (count + 1 > SICWRITE_STR_MAX_SIZE) { + pr_err("Failed to write SIC register (too long string).\n"); + ret = -EINVAL; + } else if (copy_from_user(sicwrite_buffer, buffer, + min(sizeof(sicwrite_buffer), count))) { + pr_err("Failed to write SIC register (kernel error).\n"); + ret = -EFAULT; + } else { + sicwrite_write_reg(sicwrite_buffer); + ret = count; + } + + return ret; +} + +static int sicwrite_proc_show(struct seq_file *s, void *v) +{ + return 0; +} + +static int sicwrite_proc_open(struct inode *inode, struct file *file) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return single_open(file, sicwrite_proc_show, NULL); +} + +static const struct file_operations sicwrite_proc_fops = { + .owner = THIS_MODULE, + .open = sicwrite_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .write = sicwrite_write, + .release = single_release +}; + + +/* + * Init + */ + +static int __init init_procregs(void) +{ + if (HAS_MACHINE_L_SIC) { + proc_create(SICREAD_FILENAME, S_IRUGO, NULL, + &sicread_proc_fops); + proc_create(SICWRITE_FILENAME, S_IRUGO, NULL, + &sicwrite_proc_fops); + } + +#ifdef CONFIG_E2K + proc_create(L2READ_FILENAME, S_IRUGO, NULL, &l2read_proc_fops); + proc_create(L2WRITE_FILENAME, S_IRUGO, NULL, &l2write_proc_fops); + proc_create(LDRD_FILENAME, S_IRUGO, NULL, &ldrd_proc_fops); +#endif + + return 0; +} + +module_init(init_procregs); diff --git a/arch/l/kernel/procshow.c b/arch/l/kernel/procshow.c new file mode 100644 index 000000000000..cc611fc744d6 --- /dev/null +++ b/arch/l/kernel/procshow.c @@ -0,0 +1,760 @@ +/* + * arch/l/kernel/procshow.c + * + * This file contains implementation of functions to show different data + * through proc fs. + * + * Copyright (C) 2010-2014 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include +#include +#include + +#include + +#ifdef CONFIG_BOOT_TRACE +#include +#endif /* CONFIG_BOOT_TRACE */ + +#ifdef CONFIG_E2K +#include +#include +#endif /* CONFIG_E2K */ + +#ifdef CONFIG_E90S +#include +#endif + +#if defined(CONFIG_E2K) || defined(CONFIG_E90S) +#include +#endif + + +#define BOOTDATA_FILENAME "bootdata" +#define LOADTIME_FILENAME "loadtime" + + +#ifdef CONFIG_BOOT_TRACE + +#define LOADTIMEKERN_FILENAME "loadtime_kernel" + +typedef struct loadtime_tpnt { + char *name; + char *keyword; +} loadtime_tpnt_t; + +#define LOADTIME_TPNT_NUM 4 + +static loadtime_tpnt_t loadtime_tpnt_arr[LOADTIME_TPNT_NUM] = { + {"KernelBoottimeInit", "boot-time" }, + {"KernelMemInit", "mm_init" }, + {"KernelPagingInit", "paging_init" }, + {"KernelInitcalls", "do_initcalls"}, +}; +#endif /* CONFIG_BOOT_TRACE */ + + +#ifdef CONFIG_E2K +#define LDSP_FILENAME "dspinfo" +struct proc_dir_entry *ldsp_entry = NULL; +EXPORT_SYMBOL(ldsp_entry); +const struct file_operations *ldsp_proc_fops_pointer = NULL; +EXPORT_SYMBOL(ldsp_proc_fops_pointer); +#endif + + +#if defined(CONFIG_E2K) || defined(CONFIG_E90S) +#define RDMA_FILENAME "rdmainfo" +struct proc_dir_entry *rdma_entry = NULL; +EXPORT_SYMBOL(rdma_entry); +const struct file_operations *rdma_proc_fops_pointer = NULL; +EXPORT_SYMBOL(rdma_proc_fops_pointer); + +#define NODES_FILENAME "nodesinfo" +struct proc_dir_entry *nodes_entry = NULL; +EXPORT_SYMBOL(nodes_entry); +const struct file_operations *nodes_proc_fops_pointer = NULL; +EXPORT_SYMBOL(nodes_proc_fops_pointer); +#endif + + +#define BOOTLOG_FILENAME "bootlog" +#define BOOTLOG_BLOCK_SIZE 1024 + +#define BOOTLOG_BLOCKS_COUNT \ + ((bootblock_virt->info.bios.bootlog_len / BOOTLOG_BLOCK_SIZE) + \ + ((bootblock_virt->info.bios.bootlog_len % BOOTLOG_BLOCK_SIZE) ? \ + 1 : 0)) + +static void get_uuid(__u8 *uuid, char *uuidstr) +{ + int i; + uuidstr[0] = 0; + for (i = 0; i < 16; i++) { + if (uuid[i] != 0) { + snprintf(uuidstr, 64, + "uuid='%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x'\n", + uuid[0], uuid[1], uuid[2], uuid[3], + uuid[4], uuid[5], uuid[6], uuid[7], + uuid[8], uuid[9], uuid[10], uuid[11], + uuid[12], uuid[13], uuid[14], uuid[15]); + break; + } + } +} + +static void get_macaddr(__u8 *mac_addr, char *macstr) +{ + macstr[0] = 0; + if (mac_addr[3] != 0 && mac_addr[4] != 0 && mac_addr[5] != 0) { + snprintf(macstr, 32, + "mac='%02X:%02X:%02X:%02X:%02X:%02X'\n", + mac_addr[0], mac_addr[1], + mac_addr[2], mac_addr[3], + mac_addr[4], mac_addr[5]); + } +} + +static void get_sernum(__u64 mach_serialn, char *serstr) +{ + serstr[0] = 0; + if (mach_serialn != 0) { + snprintf(serstr, 32, "serial='%llu'\n", mach_serialn); + } +} + +static int bootdata_proc_show(struct seq_file *m, void *data) +{ + char serstr[32]; + char macstr[32]; + char uuidstr[64]; + + get_sernum(bootblock_virt->info.mach_serialn, serstr); + get_macaddr(bootblock_virt->info.mac_addr, macstr); + get_uuid(bootblock_virt->info.bios.uuid, uuidstr); + + seq_printf(m, + "boot_ver='%s'\n" + "mb_type='%s' (0x%x)\n" + "chipset_type='%s'\n" + "cpu_type='%s'\n" + "cache_lines_damaged=%lu\n" + "%s%s%s", + bootblock_virt->info.bios.boot_ver, + mcst_mb_name, + bootblock_virt->info.bios.mb_type, + GET_CHIPSET_TYPE_NAME(bootblock_virt->info.bios.chipset_type), + GET_CPU_TYPE_NAME(bootblock_virt->info.bios.cpu_type), + (unsigned long)bootblock_virt->info.bios.cache_lines_damaged, + strlen(uuidstr) ? uuidstr : "", + strlen(macstr) ? macstr : "", + strlen(serstr) ? serstr : ""); + + return 0; +} + +#ifdef CONFIG_BOOT_TRACE +#ifdef CONFIG_E2K +static u64 boot_loadtime_show(struct seq_file *m) +{ + boot_times_t t = bootblock_virt->boot_times; + u64 arch = t.arch * MSEC_PER_SEC / cpu_freq_hz; + u64 unpack = (t.unpack - t.arch) * MSEC_PER_SEC / cpu_freq_hz; + u64 pci = (t.pci - t.unpack) * MSEC_PER_SEC / cpu_freq_hz; + u64 drivers1 = (t.drivers1 - t.pci) * MSEC_PER_SEC / cpu_freq_hz; + u64 drivers2 = (t.drivers2 - t.drivers1) * MSEC_PER_SEC / cpu_freq_hz; + u64 menu = (t.menu - t.drivers2) * MSEC_PER_SEC / cpu_freq_hz; + u64 sm = (t.sm - t.menu) * MSEC_PER_SEC / cpu_freq_hz; + u64 kernel = (t.kernel - t.sm) * MSEC_PER_SEC / cpu_freq_hz; + u64 total = 0; + + if (arch + unpack + pci + drivers1 + drivers2 + menu + sm + kernel) { + seq_printf(m, + "BootArch: %llu ms\nBootUnpack: %llu ms\n" + "BootPci: %llu ms\nBootDrivers1: %llu ms\n" + "BootDrivers2: %llu ms\nBootMenu: %llu ms\n" + "BootSm: %llu ms\nBootKernel: %llu ms\n", + arch, unpack, pci, drivers1, drivers2, menu, sm, + kernel); + total = boot_cycles_to_ms(t.kernel); + } else { + seq_printf(m, "Boot: %llu ms\n", + boot_cycles_to_ms(boot_trace_events[0].cycles)); + total = boot_cycles_to_ms(boot_trace_events[0].cycles); + } + + return total; +} +#else /* !CONFIG_E2K */ +static u64 boot_loadtime_show(struct seq_file *m) +{ + seq_printf(m, "Boot: %llu ms\n", + boot_cycles_to_ms(boot_trace_events[0].cycles)); + return boot_cycles_to_ms(boot_trace_events[0].cycles); +} +#endif /* CONFIG_E2K */ + +static u64 kernel_loadtime_show(struct seq_file *m) +{ + int i; + u64 kernel_common_time = 0; + u64 kernel_traced_time = 0; + u64 events_count = atomic_read(&boot_trace_top_event) + 1; + + for (i = 0; i < events_count - 1; i++) { + struct boot_tracepoint *curr = &boot_trace_events[i]; + int j; + + for (j = 0; j < LOADTIME_TPNT_NUM; j++) { + loadtime_tpnt_t elem = loadtime_tpnt_arr[j]; + u64 time = 0; + u64 k; + + if (!strstr(curr->name, elem.keyword)) + continue; + + for (k = i + 1; k < events_count; k++) { + struct boot_tracepoint *next = + &boot_trace_events[k]; + u64 delta; + + if (!strstr(next->name, elem.keyword)) + continue; + + delta = next->cycles - curr->cycles; + time = boot_cycles_to_ms(delta); + + break; + } + + kernel_traced_time += time; + + if (time) + seq_printf(m, "%s: %llu ms\n", + elem.name, time); + } + } + + if (atomic_read(&boot_trace_top_event) != -1) { + int top_event = atomic_read(&boot_trace_top_event); + u64 start, end; + + start = boot_trace_events[0].cycles; + end = boot_trace_events[top_event].cycles; + + kernel_common_time = boot_cycles_to_ms(end - start); + seq_printf(m, "KernelOther: %llu ms\n", + kernel_common_time - kernel_traced_time); + } + + return kernel_common_time; +} +#endif /* CONFIG_BOOT_TRACE */ + +static int loadtime_proc_show(struct seq_file *m, void *data) +{ + u64 total_time = 0; + +#ifdef CONFIG_BOOT_TRACE + total_time += boot_loadtime_show(m); + total_time += kernel_loadtime_show(m); +#endif + + seq_printf(m, "Total: %llu ms\n", total_time); + + return 0; +} + +#ifdef CONFIG_BOOT_TRACE +static void show_cpu_indentation(struct seq_file *s, int num) +{ + int i; + + for (i = 0; i < num; i++) + seq_printf(s, "\t"); +} + +static int loadtimekern_seq_show(struct seq_file *s, void *v) +{ + long pos = (struct boot_tracepoint *)v - boot_trace_events; + struct boot_tracepoint *event = &boot_trace_events[pos]; + unsigned int cpu = event->cpu; + struct boot_tracepoint *next = boot_trace_next_event(cpu, event); + struct boot_tracepoint *prev = boot_trace_prev_event(cpu, event); + struct boot_tracepoint *next_next = next ? + boot_trace_next_event(cpu, next) : NULL; + struct boot_tracepoint *prev_prev = prev ? + boot_trace_prev_event(cpu, prev) : NULL; + u64 delta_next = (next ? next->cycles : event->cycles) - event->cycles; + u64 delta_prev = event->cycles - (prev ? prev->cycles : 0); + u64 delta_ms_next = boot_cycles_to_ms(delta_next); + u64 delta_ms_prev = boot_cycles_to_ms(delta_prev); + int i, printed; + + if (pos == 0) { + int top = atomic_read(&boot_trace_top_event); + u64 delta, sec, msec; + + delta = boot_trace_events[top].cycles + - boot_trace_events[0].cycles; + delta = boot_cycles_to_ms(delta); + + msec = do_div(delta, MSEC_PER_SEC); + sec = delta; + + seq_printf(s, + "Boot trace finished, kernel booted in %llu.%.3llu s,\n" + "%d events were collected.\n" + "Output format is:\n\tabsolute time; time passed " + "after the last event; the event name\n", + sec, msec, top + 1); + + seq_printf(s, "----------------------------------------" + "-------------------------------\nCPU0"); + + printed = 1; + for (i = 1; i < NR_CPUS; i++) { + seq_printf(s, "\tCPU%d", i); + ++printed; + if (printed == num_online_cpus()) + break; + } + + seq_printf(s, "\n----------------------------------------" + "-------------------------------\n"); + } + + /* Print only the first two and the last two events + * and events with big enough delta. */ + if (!prev || !next || !next_next || !prev_prev || + delta_ms_next >= CONFIG_BOOT_TRACE_THRESHOLD || + delta_ms_prev >= CONFIG_BOOT_TRACE_THRESHOLD) { + /* Print this event */ + show_cpu_indentation(s, cpu); + seq_printf(s, "%3llu ms (delta %3llu ms) %s\n", + boot_cycles_to_ms(event->cycles), + boot_cycles_to_ms(delta_prev), + event->name); + } else { + /* Skip this event. If this is the first or the last + * skipped event in a row then output < ... >. */ + u64 delta_cycles_next_next = next_next->cycles - next->cycles; + u64 delta_cycles_prev_prev = prev->cycles - prev_prev->cycles; + u64 delta_ms_next_next = boot_cycles_to_ms(delta_cycles_next_next); + u64 delta_ms_prev_prev = boot_cycles_to_ms(delta_cycles_prev_prev); + + if ((delta_ms_next_next >= CONFIG_BOOT_TRACE_THRESHOLD + && delta_ms_next < + CONFIG_BOOT_TRACE_THRESHOLD) + || (delta_ms_prev_prev >= + CONFIG_BOOT_TRACE_THRESHOLD + && delta_ms_prev < + CONFIG_BOOT_TRACE_THRESHOLD)) { + /* Skip this event and inform about it. */ + show_cpu_indentation(s, cpu); + seq_printf(s, "< ... >\n"); + } else { + /* Skip this event and do nothing */ + } + } + + return 0; +} + +static void *loadtimekern_seq_start(struct seq_file *s, loff_t *pos) +{ + long count = atomic_read(&boot_trace_top_event); + if (*pos > count || count == -1) + return 0; + return (&boot_trace_events[*pos]); +} + +static void *loadtimekern_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + (*pos)++; + if (*pos > atomic_read(&boot_trace_top_event)) + return 0; + return (&boot_trace_events[*pos]); +} + +static void loadtimekern_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations loadtimekern_seq_ops = { + .start = loadtimekern_seq_start, + .next = loadtimekern_seq_next, + .stop = loadtimekern_seq_stop, + .show = loadtimekern_seq_show +}; + +static int loadtimekern_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &loadtimekern_seq_ops); +} + +static const struct file_operations loadtime_kernel_proc_fops = { + .owner = THIS_MODULE, + .open = loadtimekern_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; +#endif /* CONFIG_BOOT_TRACE */ + +#if defined(CONFIG_E2K) || defined(CONFIG_E90S) +static int rdma_seq_show(struct seq_file *s, void *v) +{ + int node = (int)(*((loff_t *)v)); + int i = 0; + + seq_printf(s, " node: %d\n", node); + for (i = 0; i < NODE_NUMIOLINKS; i++) { + if (node_rdma_possible(node, i)) { + seq_printf(s, " link: %d - %s\n", + i, + node_rdma_online(node, i) ? "on" : "off"); + } else { + seq_printf(s, " link: %d - none\n", i); + } + } + + return 0; +} + +static void *rdma_seq_start(struct seq_file *s, loff_t *pos) +{ + if (!node_online(*pos)) + *pos = next_online_node(*pos); + if (*pos == MAX_NUMNODES) + return 0; + seq_printf(s, "- RDMA device info - number: %d, online: %d.\n", + num_possible_rdmas(), num_online_rdmas()); + seq_printf(s, " Module not loaded.\n"); + seq_printf(s, " Status for each node:\n"); + return (void *)pos; +} + +static void *rdma_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + *pos = next_online_node(*pos); + if (*pos == MAX_NUMNODES) + return 0; + return (void *)pos; +} + +static void rdma_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations rdma_seq_ops = { + .start = rdma_seq_start, + .next = rdma_seq_next, + .stop = rdma_seq_stop, + .show = rdma_seq_show +}; + +static int rdma_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &rdma_seq_ops); +} + +static const struct file_operations rdma_proc_fops = { + .owner = THIS_MODULE, + .open = rdma_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static int nodes_seq_show(struct seq_file *s, void *v) +{ + unsigned int node1; + unsigned int node2; + unsigned int node3; + +#ifdef CONFIG_E2K + /* Check vp and vio bits of RT_LCFG SIC register */ + node1 = sic_read_node_nbsr_reg(0, SIC_rt_lcfg1) & 9; + node2 = sic_read_node_nbsr_reg(0, SIC_rt_lcfg2) & 9; + node3 = sic_read_node_nbsr_reg(0, SIC_rt_lcfg3) & 9; +#endif + +#ifdef CONFIG_E90S + node1 = sic_read_node_iolink_nbsr_reg(0, 0, NBSR_LINK0_CSR); + node2 = sic_read_node_iolink_nbsr_reg(0, 0, NBSR_LINK1_CSR); + node3 = sic_read_node_iolink_nbsr_reg(0, 0, NBSR_LINK2_CSR); +#endif + + seq_printf(s, "node0: on\n"); + seq_printf(s, "node1: %s\n", node1 != 0 ? "on" : "off"); + seq_printf(s, "node2: %s\n", node2 != 0 ? "on" : "off"); + seq_printf(s, "node3: %s\n", node3 != 0 ? "on" : "off"); + + return 0; +} + +static void *nodes_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos != 0) + return 0; + return (void *)pos; +} + +static void *nodes_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + (*pos) = 1; + return 0; +} + +static void nodes_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations nodes_seq_ops = { + .start = nodes_seq_start, + .next = nodes_seq_next, + .stop = nodes_seq_stop, + .show = nodes_seq_show +}; + +static int nodes_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &nodes_seq_ops); +} + +static const struct file_operations nodes_proc_fops = { + .owner = THIS_MODULE, + .open = nodes_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +#endif /* CONFIG_E2K || CONFIG_E90S */ + +#ifdef CONFIG_E2K +static int ldsp_seq_show(struct seq_file *s, void *v) +{ + int node = (int)(*((loff_t *)v)); + e2k_pwr_mgr_t pwr; + + seq_printf(s, " node: %d\n", node); + pwr.word = sic_read_node_nbsr_reg(node, SIC_pwr_mgr); + seq_printf(s, " state: %s\n", + (pwr.fields.ic_clk) ? "on" : "off"); + + return 0; +} + +extern int eldsp_disable; +static void *ldsp_seq_start(struct seq_file *s, loff_t *pos) +{ + int node = 0, dsp_on = 0; + e2k_pwr_mgr_t pwr; + + if (!node_online(*pos)) + *pos = next_online_node(*pos); + + for_each_online_node(node) { + pwr.word = sic_read_node_nbsr_reg(node, SIC_pwr_mgr); + if (pwr.fields.ic_clk) + dsp_on++; + } + + if (eldsp_disable) + seq_printf(s, "! ELDSP device disabled " \ + "from cmdline (\"eldsp-off\").\n"); + + seq_printf(s, "- ELDSP device info - number: %d, online: %d.\n", + num_online_nodes() * 4, + dsp_on * 4); + seq_printf(s, " Module not loaded.\n"); + if (*pos == MAX_NUMNODES) + return 0; + seq_printf(s, " Status for each node:\n"); + return (void *)pos; +} + +static void *ldsp_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + *pos = next_online_node(*pos); + if (*pos == MAX_NUMNODES) + return 0; + return (void *)pos; +} + +static void ldsp_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations ldsp_seq_ops = { + .start = ldsp_seq_start, + .next = ldsp_seq_next, + .stop = ldsp_seq_stop, + .show = ldsp_seq_show +}; + +static int ldsp_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &ldsp_seq_ops); +} + +static const struct file_operations ldsp_proc_fops = { + .owner = THIS_MODULE, + .open = ldsp_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; +#endif /* __e2k__ */ + +static int loadtime_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, loadtime_proc_show, NULL); +} + +static const struct file_operations loadtime_proc_fops = { + .owner = THIS_MODULE, + .open = loadtime_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int bootdata_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, bootdata_proc_show, NULL); +} + +static const struct file_operations bootdata_proc_fops = { + .owner = THIS_MODULE, + .open = bootdata_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int bootlog_seq_show(struct seq_file *s, void *v) +{ + int block_num = *((loff_t *)v); + u64 start_addr, end_addr, current_addr, len; + + start_addr = (u64)__va(bootblock_virt->info.bios.bootlog_addr); + end_addr = start_addr + bootblock_virt->info.bios.bootlog_len; + current_addr = start_addr + block_num * BOOTLOG_BLOCK_SIZE; + + len = (end_addr - current_addr < BOOTLOG_BLOCK_SIZE) ? + (end_addr - current_addr) : BOOTLOG_BLOCK_SIZE; + + seq_write(s, (void *)current_addr, len); + + return 0; +} + +static void *bootlog_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= BOOTLOG_BLOCKS_COUNT) + return 0; + return (void *)pos; +} + +static void *bootlog_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + (*pos)++; + if (*pos >= BOOTLOG_BLOCKS_COUNT) + return 0; + return (void *)pos; +} + +static void bootlog_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations bootlog_seq_ops = { + .start = bootlog_seq_start, + .next = bootlog_seq_next, + .stop = bootlog_seq_stop, + .show = bootlog_seq_show +}; + +static int bootlog_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &bootlog_seq_ops); +} + +static const struct file_operations bootlog_proc_fops = { + .owner = THIS_MODULE, + .open = bootlog_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static int __init init_procshow(void) +{ + const char *signature; + + if (bootblock_virt == NULL) { + return -EINVAL; + } + + signature = (char *) bootblock_virt->info.bios.signature; + if (!strcmp(signature, BIOS_INFO_SIGNATURE)) { + if (!proc_create(BOOTDATA_FILENAME, S_IRUGO, NULL, + &bootdata_proc_fops)) + return -ENOMEM; + } + + if (!proc_create(LOADTIME_FILENAME, S_IRUGO, NULL, &loadtime_proc_fops)) + return -ENOMEM; + +#ifdef CONFIG_BOOT_TRACE + if (!proc_create(LOADTIMEKERN_FILENAME, S_IRUGO, NULL, + &loadtime_kernel_proc_fops)) + return -ENOMEM; +#endif /* CONFIG_BOOT_TRACE */ + +#ifdef CONFIG_E2K + if (HAS_MACHINE_E2K_DSP) { + ldsp_proc_fops_pointer = &ldsp_proc_fops; + ldsp_entry = proc_create(LDSP_FILENAME, S_IRUGO, + NULL, ldsp_proc_fops_pointer); + if (!ldsp_entry) { + ldsp_proc_fops_pointer = NULL; + return -ENOMEM; + } + } +#endif + +#if defined(CONFIG_E2K) || defined(CONFIG_E90S) + if (num_possible_rdmas()) { + rdma_proc_fops_pointer = &rdma_proc_fops; + rdma_entry = proc_create(RDMA_FILENAME, S_IRUGO, + NULL, rdma_proc_fops_pointer); + if (!rdma_entry) { + rdma_proc_fops_pointer = NULL; + return -ENOMEM; + } + } + + nodes_proc_fops_pointer = &nodes_proc_fops; + nodes_entry = proc_create(NODES_FILENAME, S_IRUGO, + NULL, nodes_proc_fops_pointer); + if (!nodes_entry) { + nodes_proc_fops_pointer = NULL; + return -ENOMEM; + } +#endif + + if (bootblock_virt->info.bios.bootlog_len) { + if (!proc_create(BOOTLOG_FILENAME, S_IRUGO, NULL, + &bootlog_proc_fops)) + return -ENOMEM; + } + + return 0; +} + +module_init(init_procshow); diff --git a/arch/l/kernel/setup.c b/arch/l/kernel/setup.c new file mode 100644 index 000000000000..7cc97d64544a --- /dev/null +++ b/arch/l/kernel/setup.c @@ -0,0 +1,303 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct screen_info screen_info = { + .orig_x = 0, + .orig_y = 25, + .orig_video_page = 0, + .orig_video_mode = 7, + .orig_video_cols = 80, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; +EXPORT_SYMBOL(screen_info); + +#define VGA_FB_PHYS 0xA0000 +#define VGA_FB_PHYS_LEN 65536 + +int fb_is_primary_device(struct fb_info *info) +{ + struct device *device = info->device; + struct pci_dev *pci_dev = NULL; + struct pci_dev *default_device = vga_default_device(); + struct resource *res = NULL; + + if (device && dev_is_pci(device)) + pci_dev = to_pci_dev(device); + + if (!pci_dev) { + struct apertures_struct *gen_aper = info->apertures; + if (gen_aper && gen_aper->count && + gen_aper->ranges[0].base == VGA_FB_PHYS) + return 1; + return 0; + } + + if (default_device) { + if (pci_dev == default_device) + return 1; + else + return 0; + } + + res = &pci_dev->resource[PCI_ROM_RESOURCE]; + + if (res && res->flags & IORESOURCE_ROM_SHADOW) + return 1; + return 0; +} +EXPORT_SYMBOL(fb_is_primary_device); +MODULE_LICENSE("GPL"); + +void __init l_setup_vga(void) +{ + boot_info_t *boot_info = &bootblock_virt->info; +#ifdef CONFIG_VT +#ifdef CONFIG_VGA_CONSOLE + struct device_node *root; + const char *model = ""; +#endif +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif +#ifdef CONFIG_VGA_CONSOLE + if (memblock_is_region_memory(VGA_FB_PHYS, VGA_FB_PHYS_LEN)) { + pr_info("Legacy VGA MMIO range routes to system memory."); + return; + } + + root = of_find_node_by_path("/"); + of_property_read_string(root, "model", &model); + /* tablet displays garbage */ + if (!strcmp(model, "e1c+,mcst,e1cmt,tablet")) { + of_node_put(root); + return; + } + of_node_put(root); + conswitchp = &vga_con; +#endif /*CONFIG_VGA_CONSOLE*/ +#endif /*CONFIG_VT*/ + if (boot_info->vga_mode != 0xe2) /* new boot */ + screen_info.orig_video_mode = boot_info->vga_mode; +} + + +#define L_MAC_MAX 32 +static unsigned char l_base_mac_addr[6] = {0}; +static int l_mac_last_nr = 0; + +static const struct pci_device_id l_iohub_eth_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_ELBRUS, PCI_DEVICE_ID_MCST_E1000) }, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_ETH) }, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGB) }, + { PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_XGBE) }, + { } /* terminate list */ +}; + +static struct l_pdev_mac { + unsigned char depth, domain, bus, slot, func; +} *l_pdev_mac; + +static int l_cmp_pdev_mac(const void *_a, const void *_b) +{ + const struct l_pdev_mac *a = _a, *b = _b; + if (a->depth != b->depth) + return (int)a->depth - (int)b->depth; + if (a->domain != b->domain) + return (int)a->domain - (int)b->domain; + if (a->bus != b->bus) + return (int)a->bus - (int)b->bus; + if (a->slot != b->slot) + return (int)a->slot - (int)b->slot; + if (a->func != b->func) + return (int)a->func - (int)b->func; + WARN_ON(1); + return 0; +} + +static int l_get_depth(struct pci_dev *pdev) +{ + int i = 0; + while ((pdev = pci_upstream_bridge(pdev))) + i++; + return i; +} + +static int __init l_ethernet_mac_addr_init(void) +{ + int i = 0; + struct pci_dev *pdev = NULL; + const struct pci_device_id *ent; + struct l_pdev_mac *m = kmalloc(sizeof(*m) * L_MAC_MAX, GFP_KERNEL); + if (!m) + return -ENOMEM; + for_each_pci_dev(pdev) { + ent = pci_match_id(l_iohub_eth_devices, pdev); + if (!ent) + continue; + m[i].depth = l_get_depth(pdev); + m[i].domain = pci_domain_nr(pdev->bus); + m[i].bus = pdev->bus->number; + m[i].slot = PCI_SLOT(pdev->devfn); + m[i].func = PCI_FUNC(pdev->devfn); + i++; + if (WARN_ON(i == L_MAC_MAX)) + break; + } + sort(m, i, sizeof(struct l_pdev_mac), l_cmp_pdev_mac, NULL); + l_mac_last_nr = i; + l_pdev_mac = m; + return 0; +} +/* Needs to be done after pci initialization which are subsys_initcall. */ +subsys_initcall_sync(l_ethernet_mac_addr_init); + +int l_set_ethernet_macaddr(struct pci_dev *pdev, char *macaddr) +{ + static DEFINE_SPINLOCK(lock); + struct l_pdev_mac *m = l_pdev_mac; + int i, ret = 0; + for (i = 0; i < 6; i++) + macaddr[i] = l_base_mac_addr[i]; + + spin_lock_irq(&lock); + if (pdev) { + /* Find reserved mac-address for device */ + for (i = 0; i < l_mac_last_nr; i++) { + if (m[i].domain == pci_domain_nr(pdev->bus) && + m[i].bus == pdev->bus->number && + m[i].slot == PCI_SLOT(pdev->devfn) && + m[i].func == PCI_FUNC(pdev->devfn)) { + macaddr[5] += i; + goto out; + } + } + } + /*Try to assign mac-address */ + if (l_mac_last_nr >= L_MAC_MAX) { + ret = -ENOSPC; + goto out; + } + i = l_mac_last_nr; + if (pdev) { + m[i].depth = l_get_depth(pdev); + m[i].domain = pci_domain_nr(pdev->bus); + m[i].bus = pdev->bus->number; + m[i].slot = PCI_SLOT(pdev->devfn); + m[i].func = PCI_FUNC(pdev->devfn); + } + macaddr[5] += i; + l_mac_last_nr++; +out: + spin_unlock_irq(&lock); + return ret; +} +EXPORT_SYMBOL(l_set_ethernet_macaddr); + +static int get_long_option(char **str, u64 *pint) +{ + char *cur = *str; + + if (!cur || !(*cur)) + return 0; + *pint = simple_strtoull(cur, str, 0); + if (cur == *str) + return 0; + return 1; +} + +static int __init machine_mac_addr_setup(char *str) +{ + u64 machine_mac_addr; + if (get_long_option(&str, &machine_mac_addr)) { + u64 tmp = be64_to_cpu(machine_mac_addr); + memcpy(l_base_mac_addr, ((u8 *)&tmp) + 2, + sizeof(l_base_mac_addr)); + + printk("machine_mac_addr_setup: " + "New MAC address is %06llx\n" + "Base MAC addr for ethernet: %pm\n", + machine_mac_addr, l_base_mac_addr); + } + return 1; +} +__setup("mach_mac=", machine_mac_addr_setup); + + +#define MB_NAME_BODY_SZ 32 +static char mb_name_body[MB_NAME_BODY_SZ]; +char *mcst_mb_name; +EXPORT_SYMBOL(mcst_mb_name); + +int __init l_setup_arch(void) +{ + unsigned char *ma; + u64 tsc = get_cycles(); + if(!bootblock_virt) + return -1; + + /* Random ticks after booting */ + add_device_randomness(&tsc, sizeof(tsc)); + /* Some info from boot */ + add_device_randomness(bootblock_virt->info.bios.boot_ver, + strlen(bootblock_virt->info.bios.boot_ver)); + add_device_randomness(&bootblock_virt->info.mach_serialn, + sizeof(bootblock_virt->info.mach_serialn)); + + ma = bootblock_virt->info.mac_addr; + + if (!ma[0] && !ma[1] && !ma[2] && !ma[3] && !ma[4] && !ma[5]) { + l_base_mac_addr[0] = 0x08; + l_base_mac_addr[1] = 0x00; +#ifdef __e2k__ + l_base_mac_addr[2] = 0x30; +#else + l_base_mac_addr[2] = 0x20; +#endif + l_base_mac_addr[3] = (bootblock_virt->info.mach_serialn >> 8) & 0xff; + l_base_mac_addr[4] = bootblock_virt->info.mach_serialn & 0xff; + } else { + memcpy(l_base_mac_addr, ma, 6); + } + pr_info("Base MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", + l_base_mac_addr[0], l_base_mac_addr[1], l_base_mac_addr[2], + l_base_mac_addr[3], l_base_mac_addr[4], l_base_mac_addr[5]); + /* Set mb_name. If boot provides it from FRUiD, set it. + * If no, try to guess it by mb_version + * Max name len = 14. bootblock_virt->info.mb_name[15] + * is a revision of board + */ + ma = bootblock_virt->info.mb_name; + if (ma[0]) { + /* FRUiD name is valid */ + if (ma[15]) { + /* we have valid revision. compose name */ + ma[14] = 0; /* for safety */ + sprintf(mb_name_body, "%s v.%u", ma, ma[15]); + mcst_mb_name = mb_name_body; + } else { + mcst_mb_name = ma; + } + } else { + mcst_mb_name = + GET_MB_TYPE_NAME(bootblock_virt->info.bios.mb_type); + } + + register_early_dump_console(); + return 0; +} diff --git a/arch/l/pci/Kconfig b/arch/l/pci/Kconfig new file mode 100644 index 000000000000..2ab01ef5afbd --- /dev/null +++ b/arch/l/pci/Kconfig @@ -0,0 +1,28 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# + +menu "Elbrus chipset PCI support" + +config PCI_ELBRUS + bool "Elbrus (e2k/e90s) PCI controller support" + depends on (E2K || E90S) && PCI + default y + help + Elbrus PCI controller designed for machines based on both + microprocessors arch: e2k and e90s + +config IOHUB_DOMAINS + bool "Elbrus PCI controller domain support (multiple IOHUB)" + depends on PCI_ELBRUS && NUMA && (E2K || E90S) + select PCI_DOMAINS + default y + help + Each node on Elbrus NUMA based machine has a few IO links + so system can have multiple IOHUBs to support PCI domains + If say Y then PCI controllers will be scaned on each online + node + If say N then only one root IOHUB will be enabled on node 0 + +endmenu # "Elbrus chipset PCI support" diff --git a/arch/l/pci/Makefile b/arch/l/pci/Makefile new file mode 100644 index 000000000000..bf5ea89ca65d --- /dev/null +++ b/arch/l/pci/Makefile @@ -0,0 +1,12 @@ +CFLAGS_pci.o += -Wno-deprecated-declarations +CFLAGS_irq.o += -Wno-deprecated-declarations + +obj-$(CONFIG_PCI_ELBRUS) += direct.o +obj-$(CONFIG_PCI_ELBRUS) += l_pci.o + +obj-$(CONFIG_PCI_ELBRUS) += pci.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_IOHUB_DOMAINS) += numa.o +obj-$(CONFIG_PCI_ELBRUS) += irq.o + +obj-$(CONFIG_PCI_ELBRUS) += common.o diff --git a/arch/l/pci/acpi.c b/arch/l/pci/acpi.c new file mode 100644 index 000000000000..45ebbc9e20da --- /dev/null +++ b/arch/l/pci/acpi.c @@ -0,0 +1,20 @@ +#include +#include +#include +#include "pci.h" + +struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum) +{ + if (domain != 0) { + printk(KERN_WARNING "PCI: Multiple domains not supported\n"); + return NULL; + } + + return pcibios_scan_root(busnum); +} + +static int __init pci_acpi_init(void) +{ + return 0; +} +subsys_initcall(pci_acpi_init); diff --git a/arch/l/pci/common.c b/arch/l/pci/common.c new file mode 100644 index 000000000000..1d96428d455e --- /dev/null +++ b/arch/l/pci/common.c @@ -0,0 +1,149 @@ +/* + * Low-Level PCI Support for Elbrus/Intel chipset + * + * (c) 1999--2000 Martin Mares + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pci.h" + +#undef DEBUG_PCI_MODE +#undef DebugPCI +#define DEBUG_PCI_MODE 0 /* PCI init */ +#define DebugPCI if (DEBUG_PCI_MODE) printk + +int noioapicquirk; +#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS +int noioapicreroute = 0; +#else +int noioapicreroute = 1; +#endif + +unsigned int pci_probe = 0; + +int pcibios_last_bus = -1; +unsigned long pirq_table_addr; +struct pci_bus *pci_root_bus = NULL; +struct pci_raw_ops *raw_pci_ops = NULL; + +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val) +{ + return raw_pci_ops->read(domain, bus, devfn, reg, len, val); +} + +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val) +{ + return raw_pci_ops->write(domain, bus, devfn, reg, len, val); +} + +static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + return raw_pci_read(pci_domain_nr(bus), bus->number, + devfn, where, size, value); +} + +static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + return raw_pci_write(pci_domain_nr(bus), bus->number, + devfn, where, size, value); +} + +struct pci_ops pci_root_ops = { + .read = pci_read, + .write = pci_write, +}; + +/* + * legacy, numa, and acpi all want to call pcibios_scan_root + * from their initcalls. This flag prevents that. + */ +int pcibios_scanned; + +/* + * This interrupt-safe spinlock protects all accesses to PCI + * configuration space. + */ +DEFINE_RAW_SPINLOCK(pci_config_lock); + +struct pci_bus *pcibios_scan_root(int busnum) +{ + struct pci_bus *bus = NULL; + struct iohub_sysdata *sd; + LIST_HEAD(resources); + + DebugPCI("pcibios_scan_root() started for bus # %d\n", busnum); + while ((bus = pci_find_next_bus(bus)) != NULL) { + DebugPCI("pcibios_scan_root() find next bus # %d\n", + bus->number); + if (bus->number == busnum) { + /* Already scanned */ + DebugPCI("pcibios_scan_root() bus # %d already " + "scanned\n", busnum); + return bus; + } + } + + /* Allocate per-root-bus (not per bus) arch-specific data. + * TODO: leak; this memory is never freed. + * It's arguable whether it's worth the trouble to care. + */ + sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!sd) { + printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum); + return NULL; + } + printk("PCI: Probing PCI hardware (bus %02x)\n", busnum); + + mp_pci_add_resources(&resources, sd); + + return pci_scan_root_bus(NULL, busnum, &pci_root_ops, sd, &resources); +} + +extern u8 pci_cache_line_size; + +static int __init pcibios_init(void) +{ + if (!raw_pci_ops) { + printk("PCI: System does not support PCI\n"); + return 0; + } + /* lock consoles to prevent output to pci consoles while scanning */ + console_lock(); + if (pci_root_bus == NULL) { + pci_root_bus = pcibios_scan_root(0); + if (pci_root_bus) + pci_bus_add_devices(pci_root_bus); + } + pcibios_irq_init(); + + /* + * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8 + * and P4. It's also good for 386/486s (which actually have 16) + * as quite a few PCI devices do not support smaller values. + */ + pci_cache_line_size = PCI_ARCH_CACHE_LINE_SIZE >> 2; + + pcibios_resource_survey(); + + if (paravirt_enabled()) + pci_assign_unassigned_resources(); + + console_unlock(); + + return 0; +} + +subsys_initcall(pcibios_init); diff --git a/arch/l/pci/direct.c b/arch/l/pci/direct.c new file mode 100644 index 000000000000..ee61f20033f6 --- /dev/null +++ b/arch/l/pci/direct.c @@ -0,0 +1,18 @@ +/* + * direct.c - Low-level direct PCI config space access + */ + +#include +#include +#include "pci.h" + +int __init l_pci_direct_init(void) +{ + if ((pci_probe & PCI_PROBE_L) == 0) + return (-1); + if (pci_check_type_l()) { + printk(KERN_INFO "PCI: Using Elbrus configuration type\n"); + return (0); + } + return (-1); +} diff --git a/arch/l/pci/irq.c b/arch/l/pci/irq.c new file mode 100644 index 000000000000..0b53f9844ff8 --- /dev/null +++ b/arch/l/pci/irq.c @@ -0,0 +1,1157 @@ +/* + * Low-Level PCI Support for PC -- Routing of Interrupts + * + * (c) 1999--2000 Martin Mares + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pci.h" + +#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) +#define PIRQ_VERSION 0x0100 + +#undef DEBUG_IRQ_MODE +#undef DebugIRQ +#define DEBUG_IRQ_MODE 0 /* PCI IRQ */ +#define DebugIRQ if (DEBUG_IRQ_MODE) printk + +int broken_hp_bios_irq9; + +static struct irq_routing_table *pirq_table; + +/* + * Never use: 0, 1, 2 (timer, keyboard, and cascade) + * Avoid using: 13, 14 and 15 (FP error and IDE). + * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse) + */ +unsigned int pcibios_irq_mask = 0xfff8; + +static int pirq_penalty[16] = { + 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000, + 0, 0, 0, 0, 1000, 100000, 100000, 100000 +}; + +struct irq_router { + char *name; + u16 vendor, device; + int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq); + int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); +}; + +struct irq_router_handler { + u16 vendor; + int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); +}; + +int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL; +void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; + +/* + * Check passed address for the PCI IRQ Routing Table signature + * and perform checksum verification. + */ + +static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr) +{ + struct irq_routing_table *rt; + int i; + u8 sum; + + rt = (struct irq_routing_table *) addr; + if (rt->signature != PIRQ_SIGNATURE || + rt->version != PIRQ_VERSION || + rt->size % 16 || + rt->size < sizeof(struct irq_routing_table)) + return NULL; + sum = 0; + for (i=0; i < rt->size; i++) + sum += addr[i]; + if (!sum) { + DBG("PCI: Interrupt Routing Table found at 0x%px\n", rt); + return rt; + } + return NULL; +} + + + +/* + * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table. + */ + +static struct irq_routing_table * __init pirq_find_routing_table(void) +{ + u8 *addr; + struct irq_routing_table *rt; +#if defined(CONFIG_E90S) || defined(__e2k__) + /* We don't have such table */ + return NULL; +#endif + + if (pirq_table_addr) { + rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr)); + if (rt) + return rt; + printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n"); + } + for(addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) { + rt = pirq_check_routing_table(addr); + if (rt) + return rt; + } + return NULL; +} + +/* + * If we have a IRQ routing table, use it to search for peer host + * bridges. It's a gross hack, but since there are no other known + * ways how to get a list of buses, we have to go this way. + */ + +static void __init pirq_peer_trick(void) +{ + struct irq_routing_table *rt = pirq_table; + u8 busmap[256]; + int i; + struct irq_info *e; + + memset(busmap, 0, sizeof(busmap)); + for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) { + e = &rt->slots[i]; +#ifdef DEBUG + { + int j; + DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot); + for(j=0; j<4; j++) + DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap); + DBG("\n"); + } +#endif + busmap[e->bus] = 1; + } + for(i = 1; i < 256; i++) { + if (!busmap[i] || pci_find_bus(0, i)) + continue; + if (pci_scan_bus(i, &pci_root_ops, NULL)) + printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i); + } + pcibios_last_bus = -1; +} + +/* + * Code for querying and setting of IRQ routes on various interrupt routers. + */ + +void eisa_set_level_irq(unsigned int irq) +{ + unsigned char mask = 1 << (irq & 7); + unsigned int port = 0x4d0 + (irq >> 3); + unsigned char val = inb(port); + + if (!(val & mask)) { + DBG(" -> edge"); + outb(val | mask, port); + } +} + +/* + * Common IRQ routing practice: nybbles in config space, + * offset by some magic constant. + */ +static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) +{ + u8 x; + unsigned reg = offset + (nr >> 1); + + pci_read_config_byte(router, reg, &x); + return (nr & 1) ? (x >> 4) : (x & 0xf); +} + +static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val) +{ + u8 x; + unsigned reg = offset + (nr >> 1); + + pci_read_config_byte(router, reg, &x); + x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val); + pci_write_config_byte(router, reg, x); +} + +/* + * ALI pirq entries are damn ugly, and completely undocumented. + * This has been figured out from pirq tables, and it's not a pretty + * picture. + */ +static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; + + return irqmap[read_config_nybble(router, 0x48, pirq-1)]; +} + +static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; + unsigned int val = irqmap[irq]; + + if (val) { + write_config_nybble(router, 0x48, pirq-1, val); + return 1; + } + return 0; +} + +/* + * The Intel PIIX4 pirq rules are fairly simple: "pirq" is + * just a pointer to the config space. + */ +static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + u8 x; + + pci_read_config_byte(router, pirq, &x); + return (x < 16) ? x : 0; +} + +static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + pci_write_config_byte(router, pirq, irq); + return 1; +} + +/* + * The VIA pirq rules are nibble-based, like ALI, + * but without the ugly irq number munging. + * However, PIRQD is in the upper instead of lower 4 bits. + */ +static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq); +} + +static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq); + return 1; +} + +/* + * The VIA pirq rules are nibble-based, like ALI, + * but without the ugly irq number munging. + * However, for 82C586, nibble map is different . + */ +static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + static unsigned int pirqmap[4] = { 3, 2, 5, 1 }; + return read_config_nybble(router, 0x55, pirqmap[pirq-1]); +} + +static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + static unsigned int pirqmap[4] = { 3, 2, 5, 1 }; + write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); + return 1; +} + +/* + * ITE 8330G pirq rules are nibble-based + * FIXME: pirqmap may be { 1, 0, 3, 2 }, + * 2+3 are both mapped to irq 9 on my system + */ +static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + static unsigned char pirqmap[4] = { 1, 0, 2, 3 }; + return read_config_nybble(router,0x43, pirqmap[pirq-1]); +} + +static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + static unsigned char pirqmap[4] = { 1, 0, 2, 3 }; + write_config_nybble(router, 0x43, pirqmap[pirq-1], irq); + return 1; +} + +/* + * OPTI: high four bits are nibble pointer.. + * I wonder what the low bits do? + */ +static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + return read_config_nybble(router, 0xb8, pirq >> 4); +} + +static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + write_config_nybble(router, 0xb8, pirq >> 4, irq); + return 1; +} + +/* + * Cyrix: nibble offset 0x5C + * 0x5C bits 7:4 is INTB bits 3:0 is INTA + * 0x5D bits 7:4 is INTD bits 3:0 is INTC + */ +static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + return read_config_nybble(router, 0x5C, (pirq-1)^1); +} + +static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + write_config_nybble(router, 0x5C, (pirq-1)^1, irq); + return 1; +} + +/* + * PIRQ routing for SiS 85C503 router used in several SiS chipsets. + * We have to deal with the following issues here: + * - vendors have different ideas about the meaning of link values + * - some onboard devices (integrated in the chipset) have special + * links and are thus routed differently (i.e. not via PCI INTA-INTD) + * - different revision of the router have a different layout for + * the routing registers, particularly for the onchip devices + * + * For all routing registers the common thing is we have one byte + * per routeable link which is defined as: + * bit 7 IRQ mapping enabled (0) or disabled (1) + * bits [6:4] reserved (sometimes used for onchip devices) + * bits [3:0] IRQ to map to + * allowed: 3-7, 9-12, 14-15 + * reserved: 0, 1, 2, 8, 13 + * + * The config-space registers located at 0x41/0x42/0x43/0x44 are + * always used to route the normal PCI INT A/B/C/D respectively. + * Apparently there are systems implementing PCI routing table using + * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D. + * We try our best to handle both link mappings. + * + * Currently (2003-05-21) it appears most SiS chipsets follow the + * definition of routing registers from the SiS-5595 southbridge. + * According to the SiS 5595 datasheets the revision id's of the + * router (ISA-bridge) should be 0x01 or 0xb0. + * + * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1. + * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets. + * They seem to work with the current routing code. However there is + * some concern because of the two USB-OHCI HCs (original SiS 5595 + * had only one). YMMV. + * + * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1: + * + * 0x61: IDEIRQ: + * bits [6:5] must be written 01 + * bit 4 channel-select primary (0), secondary (1) + * + * 0x62: USBIRQ: + * bit 6 OHCI function disabled (0), enabled (1) + * + * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved + * + * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved + * + * We support USBIRQ (in addition to INTA-INTD) and keep the + * IDE, ACPI and DAQ routing untouched as set by the BIOS. + * + * Currently the only reported exception is the new SiS 65x chipset + * which includes the SiS 69x southbridge. Here we have the 85C503 + * router revision 0x04 and there are changes in the register layout + * mostly related to the different USB HCs with USB 2.0 support. + * + * Onchip routing for router rev-id 0x04 (try-and-error observation) + * + * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs + * bit 6-4 are probably unused, not like 5595 + */ + +#define PIRQ_SIS_IRQ_MASK 0x0f +#define PIRQ_SIS_IRQ_DISABLE 0x80 +#define PIRQ_SIS_USB_ENABLE 0x40 + +static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + u8 x; + int reg; + + reg = pirq; + if (reg >= 0x01 && reg <= 0x04) + reg += 0x40; + pci_read_config_byte(router, reg, &x); + return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK); +} + +static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + u8 x; + int reg; + + reg = pirq; + if (reg >= 0x01 && reg <= 0x04) + reg += 0x40; + pci_read_config_byte(router, reg, &x); + x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE); + x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE; + pci_write_config_byte(router, reg, x); + return 1; +} + + +/* + * VLSI: nibble offset 0x74 - educated guess due to routing table and + * config space of VLSI 82C534 PCI-bridge/router (1004:0102) + * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard + * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6 + * for the busbridge to the docking station. + */ + +static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + if (pirq > 8) { + printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); + return 0; + } + return read_config_nybble(router, 0x74, pirq-1); +} + +static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + if (pirq > 8) { + printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); + return 0; + } + write_config_nybble(router, 0x74, pirq-1, irq); + return 1; +} + +/* + * ServerWorks: PCI interrupts mapped to system IRQ lines through Index + * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register + * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect + * register is a straight binary coding of desired PIC IRQ (low nibble). + * + * The 'link' value in the PIRQ table is already in the correct format + * for the Index register. There are some special index values: + * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1, + * and 0x03 for SMBus. + */ +static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + outb_p(pirq, 0xc00); + return inb(0xc01) & 0xf; +} + +static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + outb_p(pirq, 0xc00); + outb_p(irq, 0xc01); + return 1; +} + +/* Support for AMD756 PCI IRQ Routing + * Jhon H. Caicedo + * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced) + * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced) + * The AMD756 pirq rules are nibble-based + * offset 0x56 0-3 PIRQA 4-7 PIRQB + * offset 0x57 0-3 PIRQC 4-7 PIRQD + */ +static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + u8 irq; + irq = 0; + if (pirq <= 4) + { + irq = read_config_nybble(router, 0x56, pirq - 1); + } + printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n", + dev->vendor, dev->device, pirq, irq); + return irq; +} + +static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", + dev->vendor, dev->device, pirq, irq); + if (pirq <= 4) + { + write_config_nybble(router, 0x56, pirq - 1, irq); + } + return 1; +} + +static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ +#if 0 /* Let's see what chip this is supposed to be ... */ + /* We must not touch 440GX even if we have tables. 440GX has + different IRQ routing weirdness */ + if ( pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82443GX_0, NULL) || + pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82443GX_2, NULL)) + return 0; +#endif + + switch(device) + { + case PCI_DEVICE_ID_INTEL_82371FB_0: + case PCI_DEVICE_ID_INTEL_82371SB_0: + case PCI_DEVICE_ID_INTEL_82371AB_0: + case PCI_DEVICE_ID_INTEL_82371MX: + case PCI_DEVICE_ID_INTEL_82443MX_0: + case PCI_DEVICE_ID_INTEL_82801AA_0: + case PCI_DEVICE_ID_INTEL_82801AB_0: + case PCI_DEVICE_ID_INTEL_82801BA_0: + case PCI_DEVICE_ID_INTEL_82801BA_10: + case PCI_DEVICE_ID_INTEL_82801CA_0: + case PCI_DEVICE_ID_INTEL_82801CA_12: + case PCI_DEVICE_ID_INTEL_82801DB_0: + case PCI_DEVICE_ID_INTEL_82801E_0: + case PCI_DEVICE_ID_INTEL_82801EB_0: + case PCI_DEVICE_ID_INTEL_ESB_1: + case PCI_DEVICE_ID_INTEL_ICH6_0: + case PCI_DEVICE_ID_INTEL_ICH6_1: + case PCI_DEVICE_ID_INTEL_ICH7_0: + case PCI_DEVICE_ID_INTEL_ICH7_1: + case PCI_DEVICE_ID_INTEL_ICH7_30: + case PCI_DEVICE_ID_INTEL_ICH7_31: + case PCI_DEVICE_ID_INTEL_ESB2_0: + r->name = "PIIX/ICH"; + r->get = pirq_piix_get; + r->set = pirq_piix_set; + return 1; + } + return 0; +} + +static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + /* FIXME: We should move some of the quirk fixup stuff here */ + + if (router->device == PCI_DEVICE_ID_VIA_82C686 && + device == PCI_DEVICE_ID_VIA_82C586_0) { + /* Asus k7m bios wrongly reports 82C686A as 586-compatible */ + device = PCI_DEVICE_ID_VIA_82C686; + } + + switch(device) + { + case PCI_DEVICE_ID_VIA_82C586_0: + r->name = "VIA"; + r->get = pirq_via586_get; + r->set = pirq_via586_set; + return 1; + case PCI_DEVICE_ID_VIA_82C596: + case PCI_DEVICE_ID_VIA_82C686: + case PCI_DEVICE_ID_VIA_8231: + /* FIXME: add new ones for 8233/5 */ + r->name = "VIA"; + r->get = pirq_via_get; + r->set = pirq_via_set; + return 1; + } + return 0; +} + +static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_VLSI_82C534: + r->name = "VLSI 82C534"; + r->get = pirq_vlsi_get; + r->set = pirq_vlsi_set; + return 1; + } + return 0; +} + + +static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_SERVERWORKS_OSB4: + case PCI_DEVICE_ID_SERVERWORKS_CSB5: + r->name = "ServerWorks"; + r->get = pirq_serverworks_get; + r->set = pirq_serverworks_set; + return 1; + } + return 0; +} + +static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + if (device != PCI_DEVICE_ID_SI_503) + return 0; + + r->name = "SIS"; + r->get = pirq_sis_get; + r->set = pirq_sis_set; + return 1; +} + +static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_CYRIX_5520: + r->name = "NatSemi"; + r->get = pirq_cyrix_get; + r->set = pirq_cyrix_set; + return 1; + } + return 0; +} + +static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_OPTI_82C700: + r->name = "OPTI"; + r->get = pirq_opti_get; + r->set = pirq_opti_set; + return 1; + } + return 0; +} + +static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_ITE_IT8330G_0: + r->name = "ITE"; + r->get = pirq_ite_get; + r->set = pirq_ite_set; + return 1; + } + return 0; +} + +static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_AL_M1533: + r->name = "ALI"; + r->get = pirq_ali_get; + r->set = pirq_ali_set; + return 1; + /* Should add 156x some day */ + } + return 0; +} + +static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_AMD_VIPER_740B: + r->name = "AMD756"; + break; + case PCI_DEVICE_ID_AMD_VIPER_7413: + r->name = "AMD766"; + break; + case PCI_DEVICE_ID_AMD_VIPER_7443: + r->name = "AMD768"; + break; + default: + return 0; + } + r->get = pirq_amd756_get; + r->set = pirq_amd756_set; + return 1; +} + +static __initdata struct irq_router_handler pirq_routers[] = { + { PCI_VENDOR_ID_INTEL, intel_router_probe }, + { PCI_VENDOR_ID_AL, ali_router_probe }, + { PCI_VENDOR_ID_ITE, ite_router_probe }, + { PCI_VENDOR_ID_VIA, via_router_probe }, + { PCI_VENDOR_ID_OPTI, opti_router_probe }, + { PCI_VENDOR_ID_SI, sis_router_probe }, + { PCI_VENDOR_ID_CYRIX, cyrix_router_probe }, + { PCI_VENDOR_ID_VLSI, vlsi_router_probe }, + { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe }, + { PCI_VENDOR_ID_AMD, amd_router_probe }, + /* Someone with docs needs to add the ATI Radeon IGP */ + { 0, NULL } +}; +static struct irq_router pirq_router; +static struct pci_dev *pirq_router_dev; + + +/* + * FIXME: should we have an option to say "generic for + * chipset" ? + */ + +static void __init pirq_find_router(struct irq_router *r) +{ + struct irq_routing_table *rt = pirq_table; + struct irq_router_handler *h; + + /* Default unless a driver reloads it */ + r->name = "default"; + r->get = NULL; + r->set = NULL; + + DBG("PCI: Attempting to find IRQ router for %04x:%04x\n", + rt->rtr_vendor, rt->rtr_device); + + pirq_router_dev = pci_get_domain_bus_and_slot(0, rt->rtr_bus, rt->rtr_devfn); + if (!pirq_router_dev) { + DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); + return; + } + + for( h = pirq_routers; h->vendor; h++) { + /* First look for a router match */ + if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device)) + break; + /* Fall back to a device match */ + if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device)) + break; + } + printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n", + pirq_router.name, + pirq_router_dev->vendor, + pirq_router_dev->device, + pci_name(pirq_router_dev)); +} + +static struct irq_info *pirq_get_info(struct pci_dev *dev) +{ + struct irq_routing_table *rt = pirq_table; + int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); + struct irq_info *info; + + for (info = rt->slots; entries--; info++) + if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn)) + return info; + return NULL; +} + +static int pcibios_lookup_irq(struct pci_dev *dev, int assign) +{ + u8 pin; + struct irq_info *info; + int i, pirq, newirq; + int irq = 0; + u32 mask; + struct irq_router *r = &pirq_router; + struct pci_dev *dev2 = NULL; + char *msg = NULL; + + DebugIRQ("%s: pcibios_lookup_irq() started IRQ %d\n", + pci_name(dev), dev->irq); + /* Find IRQ pin */ + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + DebugIRQ("%s: pcibios_lookup_irq() pin # %d\n", + pci_name(dev), pin); + if (!pin) { + DebugIRQ("%s: pcibios_lookup_irq() -> no interrupt pin\n", + pci_name(dev)); + return 0; + } + pin = pin - 1; + + /* Find IRQ routing entry */ + + if (!pirq_table) { + DebugIRQ("%s: pcibios_lookup_irq() returns : PIRQ table " + "is empty\n", pci_name(dev)); + return 0; + } + + DebugIRQ("%s: pcibios_lookup_irq() PIN %d", pci_name(dev), pin); + info = pirq_get_info(dev); + if (!info) { + DebugIRQ("%s: pcibios_lookup_irq() -> not found in " + "routing table\n", pci_name(dev)); + return 0; + } + pirq = info->irq[pin].link; + mask = info->irq[pin].bitmap; + if (!pirq) { + DebugIRQ("%s: pcibios_lookup_irq() -> not routed\n", + pci_name(dev)); + return 0; + } + DebugIRQ("%s: pcibios_lookup_irq() -> PIRQ %02x, mask %04x, " + "excl %04x\n", + pci_name(dev), pirq, mask, pirq_table->exclusive_irqs); + mask &= pcibios_irq_mask; + + /* Work around broken HP Pavilion Notebooks which assign USB to + IRQ 9 even though it is actually wired to IRQ 11 */ + + if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) { + dev->irq = 11; + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); + r->set(pirq_router_dev, dev, pirq, 11); + } + + /* + * Find the best IRQ to assign: use the one + * reported by the device if possible. + */ + newirq = dev->irq; + DebugIRQ("%s: pcibios_lookup_irq() device reported IRQ %d\n", + pci_name(dev), newirq); + if (!((1 << newirq) & mask)) { + if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; + else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev)); + } + if (!newirq && assign) { + for (i = 0; i < 16; i++) { + if (!(mask & (1 << i))) + continue; + if (pirq_penalty[i] < pirq_penalty[newirq] && + can_request_irq(i, IRQF_SHARED)) + newirq = i; + } + } + DebugIRQ("%s: pcibios_lookup_irq() -> newirq=%d", + pci_name(dev), newirq); + + /* Check if it is hardcoded */ + if ((pirq & 0xf0) == 0xf0) { + irq = pirq & 0xf; + DebugIRQ("%s: pcibios_lookup_irq() -> hardcoded IRQ %d\n", + pci_name(dev), irq); + msg = "Hardcoded"; + } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ + ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) { + DebugIRQ("%s: pcibios_lookup_irq() -> got IRQ %d\n", + pci_name(dev), irq); + msg = "Found"; + } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { + DebugIRQ("%s: pcibios_lookup_irq() -> assigning IRQ %d", + pci_name(dev), newirq); + if (r->set(pirq_router_dev, dev, pirq, newirq)) { + eisa_set_level_irq(newirq); + DebugIRQ(" ... OK\n"); + msg = "Assigned"; + irq = newirq; + } + } + + if (!irq) { + DebugIRQ(" ... failed\n"); + if (newirq && mask == (1 << newirq)) { + msg = "Guessed"; + irq = newirq; + } else + return 0; + } + printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev)); + + /* Update IRQ for all devices with the same pirq value */ + for_each_pci_dev(dev2) { + pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); + DebugIRQ("%s: pcibios_lookup_irq() will update IRQ for all " + "devices with same PIRQ\n", + pci_name(dev)); + if (!pin) + continue; + pin--; + info = pirq_get_info(dev2); + if (!info) + continue; + if (info->irq[pin].link == pirq) { + /* We refuse to override the dev->irq information. Give a warning! */ + if ( dev2->irq && dev2->irq != irq && \ + (!(pci_probe & PCI_USE_PIRQ_MASK) || \ + ((1 << dev2->irq) & mask)) ) { +#ifndef CONFIG_PCI_USE_VECTOR + printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n", + pci_name(dev2), dev2->irq, irq); +#endif + continue; + } + dev2->irq = irq; + pirq_penalty[irq]++; + if (dev != dev2) + printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2)); + } + } + return 1; +} + +static void __init pcibios_fixup_irqs(void) +{ + struct pci_dev *dev = NULL; + u8 pin; + + DebugIRQ("pcibios_fixup_irqs() started\n"); + for_each_pci_dev(dev) { + /* + * If the BIOS has set an out of range IRQ number, just ignore it. + * Also keep track of which IRQ's are already in use. + */ + DebugIRQ("%s: pcibios_fixup_irqs() IRQ is %d\n", + pci_name(dev), dev->irq); + + if ((dev->irq >= 16 && dev->irq <= 19) || + ((dev->irq & 0xf0) == 0xf0)) { /* Hardcoded int line */ + DebugIRQ("%s: pcibios_fixup_irqs() ignoring bogus " + "IRQ %d\n", pci_name(dev), dev->irq); + dev->irq = 0; + } + if (dev->irq >= 16) + continue; + /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */ + if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000) + pirq_penalty[dev->irq] = 0; + pirq_penalty[dev->irq]++; + } + + dev = NULL; + for_each_pci_dev(dev) { + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); +#ifdef CONFIG_L_IO_APIC + /* + * Recalculate IRQ numbers if we use the I/O APIC. + */ + DebugIRQ("%s: pcibios_fixup_irqs() recalculates IRQ %d, " + "pin %d\n", pci_name(dev), dev->irq, pin); + if (io_apic_assign_pci_irqs) + { + int irq; + + if (pin && dev->irq <= 0) { + struct io_apic_irq_attr irq_attr; + + pin--; /* interrupt pins are numbered starting from 1 */ + irq = IO_PIC_get_PCI_irq_vector( + pci_domain_nr(dev->bus), + dev->bus->number, + PCI_SLOT(dev->devfn), + pin, &irq_attr); + DebugIRQ("%s: pcibios_fixup_irqs() IO APIC " + "get IRQ %d\n", + pci_name(dev), irq); + /* + * Busses behind bridges are typically not listed in the MP-table. + * In this case we have to look up the IRQ based on the parent bus, + * parent slot, and pin number. The SMP code detects such bridged + * busses itself so we should get into this branch reliably. + */ + if (irq < 0 && dev->bus->parent) { /* go back to the bridge */ + struct pci_dev * bridge = dev->bus->self; + + DebugIRQ("%s: pcibios_fixup_irqs() " + "go back to the bridge %s\n", + pci_name(dev), + pci_name(bridge)); + pin = (pin + PCI_SLOT(dev->devfn)) % 4; + irq = IO_PIC_get_PCI_irq_vector( + pci_domain_nr(bridge->bus), + bridge->bus->number, + PCI_SLOT(bridge->devfn), + pin, &irq_attr); + DebugIRQ("%s: pcibios_fixup_irqs() " + "new slot pin %d, IRQ %d\n", + pci_name(dev), pin, irq); + if (irq >= 0) + printk(KERN_WARNING "PCI: using PPB(B%d,I%d,P%d) to get irq %d\n", + bridge->bus->number, PCI_SLOT(bridge->devfn), pin, irq); + } + if (irq >= 0) { + printk(KERN_INFO "PCI->APIC IRQ transform: (D%d, B%d,S%d,F%d, P%d) -> %d\n", + pci_domain_nr(dev->bus), + dev->bus->number, + PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), + pin, irq); + dev->irq = irq; + } + if (dev->irq > 0) + continue; + } + if (dev->irq >= 0) { + irq = IO_PIC_get_fix_irq_vector( + pci_domain_nr(dev->bus), + dev->bus->number, + PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), + 0); + DebugIRQ("%s: pcibios_fixup_irqs() IO APIC " + "get fixed IRQ %d\n", + pci_name(dev), irq); + dev->irq = irq; + pci_write_config_byte(dev, PCI_INTERRUPT_PIN, 1); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, + irq); + } + } +#endif + /* + * Still no IRQ? Try to lookup one... + */ + if (pin && !dev->irq) + pcibios_lookup_irq(dev, 0); + } +} + +int __init pcibios_irq_init(void) +{ + DebugIRQ("PCI: IRQ init\n"); + + if (pcibios_enable_irq || raw_pci_ops == NULL) + return 0; + + pirq_table = pirq_find_routing_table(); + + if (pirq_table) { + pirq_peer_trick(); + pirq_find_router(&pirq_router); + if (pirq_table->exclusive_irqs) { + int i; + for (i=0; i<16; i++) + if (!(pirq_table->exclusive_irqs & (1 << i))) + pirq_penalty[i] += 100; + } + /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */ + if (io_apic_assign_pci_irqs) + pirq_table = NULL; + } + + pcibios_enable_irq = pirq_enable_irq; + + pcibios_fixup_irqs(); + return 0; +} + +static void pirq_penalize_isa_irq(int irq, int active) +{ + /* + * If any ISAPnP device reports an IRQ in its list of possible + * IRQ's, we try to avoid assigning it to PCI devices. + */ + if (irq < 16) { + if (active) + pirq_penalty[irq] += 1000; + else + pirq_penalty[irq] += 100; + } +} + +void pcibios_penalize_isa_irq(int irq, int active) +{ +#ifdef CONFIG_ACPI_PCI + if (!acpi_noirq) + acpi_penalize_isa_irq(irq, active); + else +#endif + pirq_penalize_isa_irq(irq, active); +} + +int pirq_enable_irq(struct pci_dev *dev) +{ + u8 pin; + struct pci_dev *temp_dev; + + DebugIRQ("%s: pirq_enable_irq() started IRQ %d\n", + pci_name(dev), dev->irq); + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + DebugIRQ("%s: pirq_enable_irq() pin # %d\n", + pci_name(dev), pin); + if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { + char *msg = ""; + + pin--; /* interrupt pins are numbered starting from 1 */ + + if (io_apic_assign_pci_irqs) { + int irq; + struct io_apic_irq_attr irq_attr; + + irq = IO_PIC_get_PCI_irq_vector( + pci_domain_nr(dev->bus), + dev->bus->number, + PCI_SLOT(dev->devfn), + pin, &irq_attr); + DebugIRQ("%s: pirq_enable_irq() IO APIC get IRQ %d\n", + pci_name(dev), irq); + /* + * Busses behind bridges are typically not listed in the MP-table. + * In this case we have to look up the IRQ based on the parent bus, + * parent slot, and pin number. The SMP code detects such bridged + * busses itself so we should get into this branch reliably. + */ + temp_dev = dev; + while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ + struct pci_dev * bridge = dev->bus->self; + + DebugIRQ("%s: pirq_enable_irq() go back to " + "the bridge %s\n", + pci_name(dev), pci_name(bridge)); + pin = (pin + PCI_SLOT(dev->devfn)) % 4; + DebugIRQ("%s: pirq_enable_irq() PCI slot pin " + "is now %d\n", + pci_name(dev), pin); + irq = IO_PIC_get_PCI_irq_vector( + pci_domain_nr(bridge->bus), + bridge->bus->number, + PCI_SLOT(bridge->devfn), pin, &irq_attr); + DebugIRQ("%s: pirq_enable_irq() IO APIC get " + "now IRQ %d\n", + pci_name(dev), irq); + if (irq >= 0) + printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n", + pci_name(bridge), 'A' + pin, irq); + dev = bridge; + } + dev = temp_dev; + if (irq >= 0) { +#ifdef CONFIG_PCI_USE_VECTOR + irq = IO_APIC_VECTOR(irq); +#endif + printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", + pci_name(dev), 'A' + pin, irq); + dev->irq = irq; + return 0; + } else { + msg = " Probably buggy MP table."; + } + } else if (pci_probe & PCI_BIOS_IRQ_SCAN) + msg = ""; + else + msg = " Please try using pci=biosirq."; + + /* With IDE legacy devices the IRQ lookup failure is not a problem.. */ + if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5)) { + DebugIRQ("%s: pirq_enable_irq() device is IDE in " + "legacy mode\n", + pci_name(dev)); + return 0; + } + + printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", + 'A' + pin, pci_name(dev), msg); + } + DebugIRQ("%s: pirq_enable_irq() returns with IRQ # %d\n", + pci_name(dev), dev->irq); + return 0; +} + +int pci_vector_resources(int last, int nr_released) +{ + int count = nr_released; + printk("pci_vector_resources: isn't implemented\n"); + return count; +} diff --git a/arch/l/pci/l_pci.c b/arch/l/pci/l_pci.c new file mode 100644 index 000000000000..d56b590fd792 --- /dev/null +++ b/arch/l/pci/l_pci.c @@ -0,0 +1,170 @@ +/* + * Low-Level PCI Support for Elbrus + * + */ +#include +#include "pci.h" +#include +#include + +#undef __KERNEL__ + +/**************************** DEBUG DEFINES *****************************/ +#undef DEBUG_PCI_MODE +#undef DebugPCI +#define DEBUG_PCI_MODE 0 /* PCI init */ +#define DebugPCI if (DEBUG_PCI_MODE) printk +/************************************************************************/ + + +/* + * Direct access to PCI hardware... + */ + +/* + * Functions for accessing PCI configuration space + */ + +static inline int +l_pci_read_config_byte(unsigned int domain, unsigned int bus, + int devfn, int where, u8 *value) +{ + conf_inb(domain, bus, CONFIG_CMD(bus, devfn, where), value); + DebugPCI("l_pci_read_config_byte: domain %d, bus %d, devfn %d," + "where %d read value 0x%x\n", + domain, bus, devfn, where, *value); + return 0; +} + +static inline int +l_pci_read_config_word(unsigned int domain, unsigned int bus, + int devfn, int where, u16 *value) +{ + conf_inw(domain, bus, CONFIG_CMD(bus, devfn, where), value); + DebugPCI("l_pci_read_config_word: domain %d, bus %d, devfn %d," + "where %d read value 0x%x\n", + domain, bus, devfn, where, *value); + return 0; +} + +static inline int +l_pci_read_config_dword(unsigned int domain, unsigned int bus, + int devfn, int where, u32 *value) +{ + conf_inl(domain, bus, CONFIG_CMD(bus, devfn, where), value); + DebugPCI("l_pci_read_config_dword: domain %d, bus %d, devfn %d," + "where %d read value 0x%x\n", + domain, bus, devfn, where, *value); + return 0; +} + + +static inline int +l_pci_write_config_byte(unsigned int domain, unsigned int bus, + int devfn, int where, u8 value) +{ + conf_outb(domain, bus, CONFIG_CMD(bus, devfn, where), value); + DebugPCI("l_pci_write_config_byte: domain %d, bus %d, devfn %d," + "where %d write value 0x%x\n", + domain, bus, devfn, where, value); + return 0; +} + +static inline int +l_pci_write_config_word(unsigned int domain, unsigned int bus, + int devfn, int where, u16 value) +{ + conf_outw(domain, bus, CONFIG_CMD(bus, devfn, where), value); + DebugPCI("l_pci_write_config_word: domain %d, bus %d, devfn %d," + "where %d write value 0x%x\n", + domain, bus, devfn, where, value); + return 0; +} + +static inline int +l_pci_write_config_dword(unsigned int domain, unsigned int bus, int devfn, + int where, u32 value) +{ + conf_outl(domain, bus, CONFIG_CMD(bus, devfn, where), value); + DebugPCI("l_pci_write_config_dword: domain %d, bus %d, devfn %d, " + "where %d write value 0x%x\n", + domain, bus, devfn, where, value); + return 0; +} + +static int +l_pci_read(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 *value) +{ + unsigned long flags; + u16 tmp16; + u8 tmp8; + + if (!value || (bus > 0xff) || (devfn > 0xff) || (reg > 0xfff)) + return -EINVAL; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + switch (len) { + case 1: + l_pci_read_config_byte(domain, bus, devfn, reg, &tmp8); + *value = (u32)tmp8; + break; + case 2: + l_pci_read_config_word(domain, bus, devfn, reg, &tmp16); + *value = (u32)tmp16; + break; + case 4: + l_pci_read_config_dword(domain, bus, devfn, reg, value); + break; + } + + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return 0; +} + +static int +l_pci_write(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 value) +{ + unsigned long flags; + + if ((bus > 0xff) || (devfn > 0xff) || (reg > 0xfff)) + return -EINVAL; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + + switch (len) { + case 1: + l_pci_write_config_byte(domain, bus, devfn, reg, (u8)value); + break; + case 2: + l_pci_write_config_word(domain, bus, devfn, reg, (u16)value); + break; + case 4: + l_pci_write_config_dword(domain, bus, devfn, reg, value); + break; + } + + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + + return 0; +} + +struct pci_raw_ops l_pci_direct_ops = { + .read = l_pci_read, + .write = l_pci_write, +}; + +int __init +pci_check_type_l(void) +{ + if (!HAS_MACHINE_L_SIC) + return (0); + + raw_pci_ops = &l_pci_direct_ops; + + return (1); +} + diff --git a/arch/l/pci/numa.c b/arch/l/pci/numa.c new file mode 100644 index 000000000000..d2b903442c07 --- /dev/null +++ b/arch/l/pci/numa.c @@ -0,0 +1,374 @@ +/* + * numa.c - Low-level PCI access for NUMA-Q machines + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "pci.h" + +/**************************** DEBUG DEFINES *****************************/ +#undef DEBUG_PCI_MODE +#undef DebugPCI +#define DEBUG_PCI_MODE 0 /* PCI init */ +#define DebugPCI if (DEBUG_PCI_MODE) printk +/************************************************************************/ + +/* + * IO Links of all nodes configuration + */ +int iolinks_num = 0; +iolinkmask_t iolink_iohub_map = IOLINK_MASK_NONE; +iolinkmask_t iolink_online_iohub_map = IOLINK_MASK_NONE; +int iolink_iohub_num = 0; +int iolink_online_iohub_num = 0; +iolinkmask_t iolink_rdma_map = IOLINK_MASK_NONE; +iolinkmask_t iolink_online_rdma_map = IOLINK_MASK_NONE; +int iolink_rdma_num = 0; +int iolink_online_rdma_num = 0; + +/* Add for rdma_sic module */ +EXPORT_SYMBOL(iolinks_num); +EXPORT_SYMBOL(iolink_iohub_map); +EXPORT_SYMBOL(iolink_online_iohub_map); +EXPORT_SYMBOL(iolink_iohub_num); +EXPORT_SYMBOL(iolink_online_iohub_num); +EXPORT_SYMBOL(iolink_rdma_map); +EXPORT_SYMBOL(iolink_online_rdma_map); +EXPORT_SYMBOL(iolink_rdma_num); +EXPORT_SYMBOL(iolink_online_rdma_num); + + +int get_domain_to_root_busnum(int domain) +{ + int busnum; + int node = iohub_domain_to_node(domain); + int link = iohub_domain_to_link(domain); + + if (domain < 0 || domain >= MAX_NUMIOLINKS) { + printk(KERN_ERR "get_domain_to_root_busnum() invalid domain " + "# %d (< 0 or >= max %d)\n", + domain, MAX_NUMIOLINKS); + return (-1); + } + busnum = mp_find_iolink_root_busnum(node, link); +// if (busnum >= 0) +// return (busnum); + if (domain == first_iohub_online()) { + busnum = 0; + } else { + busnum = 0; + } + + return (busnum); +} + +struct pci_bus *pcibios_scan_root_domain(int domain, int busnum) +{ + struct pci_bus *bus = NULL; + struct iohub_sysdata *sd; + int node, link; + LIST_HEAD(resources); + + node = iohub_domain_to_node(domain); + link = iohub_domain_to_link(domain); + DebugPCI("pcibios_scan_root_domain(): root bus # %d on IOHUB " + "domain #%d (node %d, link %d)\n", + busnum, domain, node, link); + while ((bus = pci_find_next_bus(bus)) != NULL) { + DebugPCI("pcibios_scan_root_domain() find next bus # %d\n", + bus->number); + if (bus->number == busnum) { + /* Already scanned */ + DebugPCI("pcibios_scan_root_domain() bus # %d already " + "scanned\n", busnum); + return bus; + } + } + + /* Allocate per-root-bus (not per bus) arch-specific data. + * TODO: leak; this memory is never freed. + * It's arguable whether it's worth the trouble to care. + */ + sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!sd) { + printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum); + return NULL; + } + + sd->domain = domain; + sd->node = node; + sd->link = link; + + printk(KERN_INFO "PCI: Probing PCI hardware (bus %02x)\n", busnum); + mp_pci_add_resources(&resources, sd); + bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, sd, &resources); + if (!bus) { + DebugPCI("pcibios_scan_root_domain() scanning failed\n"); + pci_free_resource_list(&resources); + kfree(sd); + } else { + DebugPCI("pcibios_scan_root_domain() scanning returned bus " + "#%d %s\n", bus->number, bus->name); + } + + return bus; +} + +struct pci_bus *pci_scan_bus_on_domain(int busno, struct pci_ops *ops, int domain) +{ + struct pci_bus *bus = NULL; + struct iohub_sysdata *sd; + int node, link; + LIST_HEAD(resources); + + node = iohub_domain_to_node(domain); + link = iohub_domain_to_link(domain); + DebugPCI("pci_scan_bus_on_domain(): bus # %d on IOHUB " + "domain #%d (node %d, link %d)\n", + busno, domain, node, link); + /* + * Allocate per-root-bus (not per bus) arch-specific data. + * TODO: leak; this memory is never freed. + * It's arguable whether it's worth the trouble to care. + */ + sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!sd) { + printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno); + return NULL; + } + sd->domain = domain; + sd->node = node; + sd->link = link; + mp_pci_add_resources(&resources, sd); + bus = pci_scan_root_bus(NULL, busno, ops, sd, &resources); + if (bus) { + DebugPCI("pci_scan_bus_on_domain() scanning returned bus " + "#%d %s\n", bus->number, bus->name); + pci_scan_child_bus(bus); + } else { + DebugPCI("pci_scan_bus_on_domain() scanning failed\n"); + kfree(sd); + } + + return bus; +} + +#ifdef HAVE_MULTIROOT_BUS_PCI_DOMAINS +/* + * Root bus is separate for each PCI domain (as on e2k) + */ +static int __init pci_scan_multiroot_bus_domains(void) +{ + int root_domain; + int domain = 0; + int root_busnum; + int node, link; + + DebugPCI("pci_scan_multiroot_bus_domains() started\n"); + root_domain = first_iohub_online(); + if (root_domain < 0 || root_domain >= MAX_NUMIOLINKS) { + printk("PCI: none IOHUB found at the system\n"); + return (-1); + } + node = iohub_domain_to_node(domain); + link = iohub_domain_to_link(domain); + DebugPCI("pci_scan_multiroot_bus_domains() root IOHUB: domain #%d " + "(node %d, link %d)\n", + root_domain, node, link); + root_busnum = get_domain_to_root_busnum(root_domain); + if (root_busnum < 0 || root_domain >= 256) { + printk("PCI: invalid root bus # %d for IOHUB domain #%d " + "(node %d, link %d)\n", + root_busnum, root_domain, node, link); + return (-1); + } + DebugPCI("pci_scan_multiroot_bus_domains() root IOHUB: root bus %d\n", + root_busnum); + pci_root_bus = pcibios_scan_root_domain(root_domain, root_busnum); + if (pci_root_bus) { + pci_bus_add_devices(pci_root_bus); + DebugPCI("pci_scan_multiroot_bus_domains() root IOHUB: root " + "bus %s devices was added\n", + pci_root_bus->name); + } + if (num_online_iohubs() <= 1) { + DebugPCI("pci_scan_multiroot_bus_domains() only one IOHUB " + "detected\n"); + return (0); + } + for_each_online_iohub(domain) { + int busnum, node, link; + struct pci_bus *domain_root_bus; + + if (domain == root_domain) + continue; + busnum = get_domain_to_root_busnum(domain); + node = iohub_domain_to_node(domain); + link = iohub_domain_to_link(domain); + if (busnum < 0 || busnum >= 256) { + printk("PCI: invalid root bus # %d for IOHUB " + "domain #%d (node %d, link %d)\n", + busnum, domain, node, link); + continue; + } + printk(KERN_INFO "Scanning PCI root bus %d of IOHUB domain #%d " + "(node %d, link %d)\n", + busnum, domain, node, link); + domain_root_bus = pci_scan_bus_on_domain(busnum, &pci_root_ops, + domain); + if (domain_root_bus) { + pci_bus_add_devices(domain_root_bus); + printk(KERN_INFO "PCI: created domain #%d (node %d, " + "link %d) from root bus %s\n", + domain, node, link, domain_root_bus->name); + } else { + printk(KERN_INFO "PCI: empty domain #%d (node %d, " + "link %d) will ignore\n", + domain, node, link); + } + } + return 0; +} +#endif /* HAVE_MULTIROOT_BUS_PCI_DOMAINS */ + +#ifdef HAVE_COMMONROOT_BUS_PCI_DOMAINS +struct pci_bus *pci_scan_root_bus_domain(int domain) +{ + struct pci_bus *bus = NULL; + struct iohub_sysdata *sd; + int node, link; + int root_slot, slot; + unsigned char subordinate; + LIST_HEAD(resources); + + node = iohub_domain_to_node(domain); + link = iohub_domain_to_link(domain); + DebugPCI("pci_scan_root_bus_domain(): IOHUB domain #%d (node %d, " + "link %d)\n", + domain, node, link); + sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!sd) { + printk(KERN_ERR "PCI: OOM, not probing root PCI domain %d\n", + domain); + return NULL; + } + + sd->domain = domain; + sd->node = node; + sd->link = link; + mp_pci_add_resources(&resources, sd); + + bus = pci_create_root_bus(NULL, L_IOHUB_ROOT_BUS_NUM, &pci_root_ops, sd, + &resources); + if (!bus) { + pr_err("PCI: could not create root PCI bus on " + "domain %d (node %d, link %d)\n", + domain, node, link); + kfree(sd); + return NULL; + } + DebugPCI("pci_scan_root_bus_domain() created root PCI bus %s\n", + bus->name); + + root_slot = pci_iohub_domain_to_slot(domain); + for (slot = 0; slot < L_IOHUB_SLOTS_NUM; slot++) { + int devfn = PCI_DEVFN(root_slot + slot, 0); + int devs = pci_scan_slot(bus, devfn); + DebugPCI("pci_scan_root_bus_domain() detected %d devices " + "on root PCI slot #%d\n", + devs, root_slot + slot); + } + subordinate = pci_scan_root_child_bus(bus); + pci_bus_add_devices(bus); + DebugPCI("pci_scan_root_bus_domain() max subordinated bus on PCI " + "domain %d is 0x%02x\n", domain, subordinate); + return bus; +} + +/* + * Root bus is common for all PCI domains (as on e90s) + */ +static int pci_scan_commonroot_bus_domains(void) +{ + int root_domain; + int domain = 0; + int node, link; + + DebugPCI("pci_scan_commonroot_bus_domains() started\n"); + root_domain = first_iohub_online(); + if (root_domain < 0 || root_domain >= MAX_NUMIOLINKS) { + pr_info("PCI: none IOHUB found at the system\n"); + return -EINVAL; + } + node = iohub_domain_to_node(root_domain); + link = iohub_domain_to_link(root_domain); + DebugPCI("pci_scan_commonroot_bus_domains() root IOHUB: domain #%d " + "(node %d, link %d)\n", + root_domain, node, link); + + pci_root_bus = pci_scan_root_bus_domain(root_domain); + if (!pci_root_bus) { + printk(KERN_ERR "PCI: could not create root PCI bus " + "on domain %d\n", root_domain); + return -ENOMEM; + } + if (num_online_iohubs() <= 1) { + DebugPCI("pci_scan_commonroot_bus_domains() only one IOHUB " + "detected\n"); + return 0; + } + for_each_online_iohub(domain) { + int node, link; + struct pci_bus *domain_root_bus; + + if (domain == root_domain) + continue; + node = iohub_domain_to_node(domain); + link = iohub_domain_to_link(domain); + pr_info("Scanning PCI root bus of IOHUB domain #%d " + "(node %d, link %d)\n", + domain, node, link); + domain_root_bus = pci_scan_root_bus_domain(domain); + if (domain_root_bus) { + pr_info("PCI: created domain #%d (node %d, " + "link %d)\n", + domain, node, link); + } else { + pr_info("PCI: empty domain #%d (node %d, " + "link %d)\n", + domain, node, link); + } + } + return 0; +} +#endif /* HAVE_COMMONROOT_BUS_PCI_DOMAINS */ + +static int __init pci_numa_init(void) +{ + int ret; + /* lock consoles to prevent output to pci consoles while scanning */ + console_lock(); +#if defined(HAVE_MULTIROOT_BUS_PCI_DOMAINS) + ret = pci_scan_multiroot_bus_domains(); +#elif defined(HAVE_COMMONROOT_BUS_PCI_DOMAINS) + ret = pci_scan_commonroot_bus_domains(); +#else + #error "PCI domain root bus type is undefined" + ret = -ENODEV; +#endif + console_unlock(); + return ret; +} + +subsys_initcall(pci_numa_init); diff --git a/arch/l/pci/pci.c b/arch/l/pci/pci.c new file mode 100644 index 000000000000..2f27a68ba47e --- /dev/null +++ b/arch/l/pci/pci.c @@ -0,0 +1,744 @@ +/* + * Low-Level PCI Access for i386 machines + * + * Copyright 1993, 1994 Drew Eckhardt + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * Drew@Colorado.EDU + * +1 (303) 786-7975 + * + * Drew's work was sponsored by: + * iX Multiuser Multitasking Magazine + * Hannover, Germany + * hm@ix.de + * + * Copyright 1997--2000 Martin Mares + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI BIOS Specification + * PCI Local Bus Specification + * PCI to PCI Bridge Specification + * PCI System Design Guide + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pci.h" + +#undef DEBUG_PCI_MODE +#undef DebugPCI +#define DEBUG_PCI_MODE 0 /* PCI init */ +#define DebugPCI if (DEBUG_PCI_MODE) printk +#define DebugBUSINFO if (DEBUG_PCI_MODE) Debug_BUS_INFO +#define DebugRESINFO if (DEBUG_PCI_MODE) Debug_RES_INFO +#define DebugALLRESINFO if (DEBUG_PCI_MODE) Debug_ALL_RES_INFO +#define DebugDEVINFO if (DEBUG_PCI_MODE) Debug_DEV_INFO + +void +Debug_RES_INFO(struct resource *resource, int num) +{ + printk("RESOURCE 0x%px #%d %s\n", resource, num, + (resource->name) ? resource->name : "???"); + printk(" from 0x%llx to 0x%llx flags 0x%lx\n", + resource->start, resource->end, resource->flags); +} + +void +Debug_ALL_RES_INFO(struct resource *resource, int num) +{ + int i; + + printk("ALL RESOURCES 0x%px number of resources %d\n", + resource, num); + for (i = 0; i < num; i ++) { + DebugRESINFO(&resource[i], i); + } +} + +void +Debug_DEV_INFO(struct pci_dev *dev, int res) +{ + printk("DEV 0x%px BUS 0x%px bus this device bridges to 0x%px\n", + dev, dev->bus, dev->subordinate); + printk(" %s devfn %x vendor %x device %x class (base,sub,prog-if) " + "%06x\n", pci_name(dev), + dev->devfn, dev->vendor, dev->device, dev->class); + printk(" config space size 0x%x IRQ %d\n", + dev->cfg_size, dev->irq); + if (res) { + printk("I/O and memory regions + expansion ROMs :\n"); + DebugALLRESINFO(dev->resource, PCI_NUM_RESOURCES); + } +} + +void +Debug_BUS_INFO(struct pci_bus *bus, int self, int res) +{ + int i; + + printk("BUS 0x%px parent 0x%px self 0x%px\n", + bus, bus->parent, bus->self); + printk(" %s # %02x primary %02x\n", + bus->name, bus->number, bus->primary); + if (self && bus->self) { + printk("Bridge device as seen by parent:\n"); + DebugDEVINFO(bus->self, res); + } + if (res && bus->resource) { + printk("Address space routed to this bus:\n"); + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i ++) { + if (bus->resource[i]) { + printk(" SPACE #%d\n", i); + DebugRESINFO(bus->resource[i], i); + } + } + } +} + + +/* + * Handle resources of PCI devices. If the world were perfect, we could + * just allocate all the resource regions and do nothing more. It isn't. + * On the other hand, we cannot just re-allocate all devices, as it would + * require us to know lots of host bridge internals. So we attempt to + * keep as much of the original configuration as possible, but tweak it + * when it's found to be wrong. + * + * Known BIOS problems we have to work around: + * - I/O or memory regions not configured + * - regions configured, but not enabled in the command register + * - bogus I/O addresses above 64K used + * - expansion ROMs left enabled (this may sound harmless, but given + * the fact the PCI specs explicitly allow address decoders to be + * shared between expansion ROMs and other resource regions, it's + * at least dangerous) + * + * Our solution: + * (1) Allocate resources for all buses behind PCI-to-PCI bridges. + * This gives us fixed barriers on where we can allocate. + * (2) Allocate resources for all enabled devices. If there is + * a collision, just mark the resource as unallocated. Also + * disable expansion ROMs during this step. + * (3) Try to allocate resources for disabled devices. If the + * resources were assigned correctly, everything goes well, + * if they weren't, they won't disturb allocation of other + * resources. + * (4) Assign new addresses to resources which were either + * not configured at all or misconfigured. If explicitly + * requested by the user, configure expansion ROM address + * as well. + */ + +static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) +{ + struct list_head *ln; + struct pci_bus *bus; + struct pci_dev *dev; + int idx; + struct resource *r, *pr; + + /* Depth-First Search on bus tree */ + DebugPCI("pcibios_allocate_bus_resources() started for bus list\n"); + for (ln=bus_list->next; ln != bus_list; ln=ln->next) { + bus = pci_bus_b(ln); + DebugBUSINFO(bus, 0, 0); + if ((dev = bus->self)) { + DebugDEVINFO(dev, 0); + for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) { + r = &dev->resource[idx]; + DebugRESINFO(r, idx); + if (!r->flags) + continue; + pr = pci_find_parent_resource(dev, r); + DebugPCI("pcibios_allocate_bus_resources() " + "parent resource is 0x%px : \n", pr); + if (pr) { + DebugPCI("PARENT "); + DebugRESINFO(pr, 0); + } + if ((!r->start && !r->end) || !pr || request_resource(pr, r) < 0) { + printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev)); + /* Something is wrong with the region. + Invalidate the resource to prevent child + resource allocations in this range. */ + r->flags = 0; + } + } + } + pcibios_allocate_bus_resources(&bus->children); + } +} + +extern void l_request_msi_addresses_window(struct pci_dev *pdev); +static void __init pcibios_allocate_resources(int pass) +{ + struct pci_dev *dev = NULL; + int idx, disabled; + u16 command; + struct resource *r; + + DebugPCI("pcibios_allocate_resources() started for pass %d\n", pass); + for_each_pci_dev(dev) { + if (!pass) { + /* Here is the only place where we can withdraw + * MSI addresses addresses from possible pci + * addresses range + */ + l_request_msi_addresses_window(dev); + } + DebugDEVINFO(dev, 0); + pci_read_config_word(dev, PCI_COMMAND, &command); + for(idx = 0; idx < 6; idx++) { + r = &dev->resource[idx]; + DebugRESINFO(r, idx); + if (r->parent) { /* Already allocated */ + DebugPCI("pcibios_allocate_resources() " + "Already allocated\n"); + continue; + } + if (!r->start && !r->end) { /* Address not assigned at all */ + DebugPCI("pcibios_allocate_resources() " + "Address not assigned at all\n"); + continue; + } + if (r->flags & IORESOURCE_IO) + disabled = !(command & PCI_COMMAND_IO); + else + disabled = !(command & PCI_COMMAND_MEMORY); + if (pass == disabled) { + DebugPCI("PCI: Resource %08llx-%08llx (f=%lx, " + "disabled=%d, pass=%d)\n", + r->start, r->end, r->flags, + disabled, pass); + if (pci_claim_resource(dev, idx) < 0) { + printk(KERN_ERR "PCI: Cannot allocate " + "resource region %d of device " + "%s\n", idx, pci_name(dev)); + /* We'll assign a new address later */ + r->end -= r->start; + r->start = 0; + } + } + } + if (!pass) { + r = &dev->resource[PCI_ROM_RESOURCE]; + if (r->flags & PCI_ROM_ADDRESS_ENABLE) { + /* Turn the ROM off, leave the resource region, but keep it unregistered. */ + u32 reg; + DebugPCI("PCI: Switching off ROM of %s\n", pci_name(dev)); + r->flags &= ~PCI_ROM_ADDRESS_ENABLE; + pci_read_config_dword(dev, dev->rom_base_reg, ®); + pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE); + } + } + } +} + +void __init pcibios_resource_survey(void) +{ + DBG("PCI: Allocating resources\n"); + pcibios_allocate_bus_resources(&pci_root_buses); + pcibios_allocate_resources(0); + pcibios_allocate_resources(1); +} + +int pcibios_enable_resources(struct pci_dev *dev, int mask) +{ + u16 cmd, old_cmd; + int idx; + struct resource *r; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + old_cmd = cmd; + for(idx = 0; idx < PCI_NUM_RESOURCES; idx++) { + /* Only set up the requested stuff */ + if (!(mask & (1<resource[idx]; + if (!r->start && r->end && !(r->flags & IORESOURCE_IO)) { + printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); + return -EINVAL; + } + if (r->flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (r->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + if (dev->resource[PCI_ROM_RESOURCE].start) + cmd |= PCI_COMMAND_MEMORY; + if (cmd != old_cmd) { + printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +static unsigned long l_slink_freq; + +static int __init l_slink_freq_setup(char *str) +{ + l_slink_freq = memparse(str, &str); + return 1; +} + +__setup("slink=", l_slink_freq_setup); + +#define SLINK_PLL_MULTIPLIER 0x6c +#define SLINK_PLL_STROB 0x6e +# define SLPLLM_STB_UP 0xa5 +#define SLINK_PLLSTS 0x6f +# define SLINK_PLLSTS_0_LOCKED 0x3 + +struct l_slink_freqs { + unsigned long dflt, min, max, mult, div; +} l_slink_freqs[] = { + { + .dflt = 14, .min = 7, .max = 25, .mult = 50 * 1000 * 1000, .div = 3}, { + .dflt = 19, .min = 9, .max = 25, .mult = 25 * 1000 * 1000, .div = 1} +}; + +#define SLINK_TIMEOUT_10USEC (1 * 1000 * 100) +static void l_quirk_slink_freq(struct pci_dev *pdev) +{ + struct l_slink_freqs *f; + int i; + unsigned short v, curr; + if (l_slink_freq == 0) + return; + pci_write_config_byte(pdev, SLINK_PLL_STROB, 0); + pci_read_config_word(pdev, SLINK_PLL_MULTIPLIER, &curr); + for (i = 0; i < ARRAY_SIZE(l_slink_freqs); i++) { + f = &l_slink_freqs[i]; + if (f->dflt == curr) + break; + } + if (i == ARRAY_SIZE(l_slink_freqs)) { + pr_err("slink: unknown default multiplier: %d\n", curr); + return; + } + if (l_slink_freq > (f->max + 1) * f->mult / f->div || + l_slink_freq < (f->min +1) * f->mult / f->div) { + pr_err("slink: requested frequency out of bounds: %ld\n", + l_slink_freq); + return; + } + v = ((l_slink_freq + (f->mult / f->div / 2)) * f->div) / f->mult - 1; + do { + unsigned short last; + if (v == curr) { + pr_info("slink: frequency set to: %ld (SLPLLM=%d)\n", + (v + 1) * f->mult / f->div, v); + break; + } + if (v < f->dflt) + curr--; + else + curr++; + pci_write_config_byte(pdev, SLINK_PLL_STROB, 0); + pci_write_config_word(pdev, SLINK_PLL_MULTIPLIER, curr); + pci_write_config_byte(pdev, SLINK_PLL_STROB, SLPLLM_STB_UP); + for (i = 0; i < SLINK_TIMEOUT_10USEC; i++) { + unsigned char sts; + pci_read_config_byte(pdev, SLINK_PLLSTS, &sts); + if ((sts & SLINK_PLLSTS_0_LOCKED) == + SLINK_PLLSTS_0_LOCKED) + break; + udelay(10); + } + if (i == SLINK_TIMEOUT_10USEC) { + panic("slink: timeout\n"); + } + pci_write_config_byte(pdev, SLINK_PLL_STROB, 0); + pci_read_config_word(pdev, SLINK_PLL_MULTIPLIER, &last); + if (last != curr) { + panic("slink: failed to set frequency to: %ld (SLPLLM=%d)\n", + (curr + 1) * f->mult / f->div, curr); + break; + } + } while (1); + +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_VPPB, + l_quirk_slink_freq); + +static void fixup_milandr(struct pci_dev *pdev) +{ + struct resource *r = &pdev->resource[0]; + /* Milandr shows wrong bar size */ + if (r->flags) + r->end = r->start + (16 << 20) - 1; + +} +DECLARE_PCI_FIXUP_HEADER(0x16c3, 0xabcd, fixup_milandr); +DECLARE_PCI_FIXUP_HEADER(0x16c3, 0x0bad, fixup_milandr); + +#define PCI_SCBA_0 0xf0 /* System commutator base address [31:00] */ +#define B0_BCTRL 0x13e /* 8/0x03 PCIe bridge control 0:N:0{0x3e} */ +#define B1_BCTRL 0x23e /* 8/0x1c PCI bridge control m:0:0{0x3e} */ + +static const struct pci_device_id l_iohub_bridges[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_MCST_PCIE_BRIDGE, + PCI_DEVICE_ID_MCST_PCIE_BRIDGE) + }, + { + PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_PCI_BRIDGE), + }, + {} +}; +static int l_check_iohub_vga_enable(struct pci_dev *pdev) +{ + int ret = 0, reg_off = 0; + struct pci_dev *dev; + struct pci_bus *bus; + + /* iohub errata: + * ERR 48 - PCI-Express root hub (pcie) + * vga-enable bit is missing from PCI-Express root hub's + * configuration space + */ + + bus = pdev->bus; + + while (bus) { + struct pci_dev *bridge = bus->self; + const struct pci_device_id *id; + if (!bridge) + goto next; + + id = pci_match_id(l_iohub_bridges, bridge); + if (!id) + goto next; + if (id->device == PCI_DEVICE_ID_MCST_PCIE_BRIDGE) { + reg_off = B0_BCTRL; + break; + } else if (id->device == PCI_DEVICE_ID_MCST_PCI_BRIDGE) { + reg_off = B1_BCTRL; + break; + } else { + u16 l; + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, + &l); + if (!(l & PCI_BRIDGE_CTL_VGA)) + goto out; + } +next: + bus = bus->parent; + } + if (reg_off == 0) + goto out; + + dev = NULL; + if ((dev = pci_get_device(PCI_VENDOR_ID_ELBRUS, + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE, dev))) { + u32 addr; + void __iomem *scrb; + pci_read_config_dword(dev, PCI_SCBA_0, &addr); + addr &= ~3; + scrb = ioremap(addr, 0x1000); + if (readb(scrb + reg_off) & PCI_BRIDGE_CTL_VGA) + ret = 1; + iounmap(scrb); + } +out: + return ret; +} + +static const struct pci_device_id l_iohub2_bridges[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_PCIe1), + }, + { + PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_PCIe8), + }, + { + PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_VPPB), + }, + {} +}; + +static int l_check_iohub2_vga_enable(struct pci_dev *pdev) +{ + int ret = 1; + struct pci_bus *bus; + + /* iohub2 errata: ERR 02 - vga_mode (pcie): + * PCIE Root Complexes do not support VGA Mode which means + * that "VGA Enable" and "VGA 16 bit Decode" registers are + * read-only and set to 0 by default. + * So set up the first device we come across. + */ + + bus = pdev->bus; + + while (bus) { + struct pci_dev *bridge = bus->self; + const struct pci_device_id *id; + u16 l; + if (!bridge) + goto next; + + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l); + + id = pci_match_id(l_iohub2_bridges, bridge); + if (id) { + l |= PCI_BRIDGE_CTL_VGA; + pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, l); + } else { + if (!(l & PCI_BRIDGE_CTL_VGA)) { + ret = 0; + goto out; + } + } +next: + bus = bus->parent; + } +out: + return ret; +} + +static void fixup_vga(struct pci_dev *pdev) +{ + u16 cmd; + + if (vga_default_device()) + return; + + if (!HAS_MACHINE_E2K_IOHUB) + return; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) != + (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) + return; + + if ((iohub_generation(pdev) == 0 && + l_check_iohub_vga_enable(pdev)) || + (iohub_generation(pdev) == 1 && + l_check_iohub2_vga_enable(pdev))) { + + vga_set_default_device(pdev); + } +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_DISPLAY_VGA, 8, fixup_vga); + +#define MGA2_REGS_SIZE (512 * 1024) +#define MGA2_DC0_CTRL 0x00800 +# define MGA2_DC_CTRL_NATIVEMODE (1 << 0) +# define MGA2_DC_CTRL_DIS_VGAREGS (1 << 1) +# define MGA2_DC_CTRL_SOFT_RESET (1 << 31) + +#define PCI_VCFG 0x40 +# define PCI_MGA2_TRANSACTIONS_PENDING (1 << 3) + +static void __iomem *mga2_regs_base; +static u32 mga2_ctrl_val; +static void freeze_mga2(struct pci_dev *dev) +{ + int i; + u32 r; + u16 cmd, vcfg; + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &r); + r &= ~(MGA2_REGS_SIZE - 1); + if (r == 0) { + /* BAR is not allocated, ignore for now */ + pr_err("%s: MGA2 registers BAR #2 is not allocated\n", + pci_name(dev)); + return; + } + mga2_regs_base = ioremap_nocache(r, MGA2_REGS_SIZE); + if (WARN_ON(mga2_regs_base == NULL)) + return; + + mga2_ctrl_val = readl(mga2_regs_base + MGA2_DC0_CTRL); + writel(mga2_ctrl_val | MGA2_DC_CTRL_SOFT_RESET | MGA2_DC_CTRL_NATIVEMODE | + MGA2_DC_CTRL_DIS_VGAREGS, + mga2_regs_base + MGA2_DC0_CTRL); + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd & ~PCI_COMMAND_MASTER); + for (i = 0; i < 200 * 10; i++) { + pci_read_config_word(dev, PCI_VCFG, &vcfg); + if (!(vcfg & PCI_MGA2_TRANSACTIONS_PENDING)) + break; + udelay(100); + } + WARN_ON(i == 200 * 10); + pci_write_config_word(dev, PCI_COMMAND, cmd); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGA2, freeze_mga2); + +static void thaw_mga2(struct pci_dev *dev) +{ + if (!mga2_regs_base) + return; + writel(mga2_ctrl_val, mga2_regs_base + MGA2_DC0_CTRL); + iounmap(mga2_regs_base); + mga2_regs_base = NULL; + dev_info(&dev->dev, "mga2 fixup done\n"); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGA2, thaw_mga2); + +#define PCI_MCST_CFG 0x40 +#define PCI_MCST_RESET (1 << 6) +#define PCI_MCST_IOMMU_DSBL (1 << 5) + +#define MGA2_INTLEVEL 0x00008 +#define MGA2_INTMODE 0x0000C +# define MGA2_INT_B_SETRST (1U << 31) +# define MGA25_INT_B_HDA1 (1 << 28) +# define MGA25_INT_B_HDA2 (1 << 29) + +/* + * disable iommu translation to prevent iommu fault at vga-console +*/ +static void mga25_disable_iommu_translation(struct pci_dev *dev) +{ + u32 v = MGA2_INT_B_SETRST | MGA25_INT_B_HDA1 | MGA25_INT_B_HDA2; + void __iomem *b = (void *)pci_resource_start(dev, 0) + 0x1c00; + u8 tmp; + pci_read_config_byte(dev, PCI_MCST_CFG, &tmp); + pci_write_config_byte(dev, PCI_MCST_CFG, tmp | PCI_MCST_IOMMU_DSBL); + + /*enable hda interrupts*/ + boot_writel(v, b + MGA2_INTLEVEL); + boot_writel(v, b + MGA2_INTMODE); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_MGA25, + mga25_disable_iommu_translation); + +static int pci_disable_extended_tags(struct pci_dev *dev, void *ign) +{ + u32 cap; + u16 ctl; + int ret; + + if (!pci_is_pcie(dev)) + return 0; + + ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); + if (ret) + return 0; + + if (!(cap & PCI_EXP_DEVCAP_EXT_TAG)) + return 0; + + ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); + if (ret) + return 0; + + if (ctl & PCI_EXP_DEVCTL_EXT_TAG) { + pci_info(dev, "iohub2: disabling Extended Tags\n"); + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_EXT_TAG); + } + return 0; +} + +static void l_quirk_no_ext_tags(struct pci_dev *pdev) +{ + if (iohub_generation(pdev) != 1 || iohub_revision(pdev) > 3) + return; + + pci_disable_extended_tags(pdev, NULL); + if (pdev->subordinate) { + pci_walk_bus(pdev->subordinate, + pci_disable_extended_tags, NULL); + } +} +/* Must override pci_configure_device() */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_PCIe1, l_quirk_no_ext_tags); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MCST_TMP, PCI_DEVICE_ID_MCST_PCIe8, l_quirk_no_ext_tags); + + +/* + * Called after each bus is probed, but before its children + * are examined. + */ +void pcibios_fixup_bus(struct pci_bus *b) +{ + pci_read_bridge_bases(b); +} + +/* + * pcibios_add_bus resets flags for MCST PCI-E bridges which + * have been inherited from parent MCST PCI bridge + */ +void pcibios_add_bus(struct pci_bus *bus) +{ + struct pci_dev *dev = bus->self; + if (dev) { + if ((dev->vendor == PCI_VENDOR_ID_MCST_TMP) && + ((dev->device == PCI_DEVICE_ID_MCST_PCIe1) || + (dev->device == PCI_DEVICE_ID_MCST_PCIe8) || + (dev->device == PCI_DEVICE_ID_MCST_PCIE_X4) || + (dev->device == PCI_DEVICE_ID_MCST_PCIE_X16))) { + bus->bus_flags &= ~PCI_BUS_FLAGS_NO_EXTCFG; + dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; + } + } +} + +static const struct pci_device_id l_iohub_root_devices[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_ELBRUS, + PCI_DEVICE_ID_MCST_VIRT_PCI_BRIDGE), + }, + { + PCI_DEVICE(PCI_VENDOR_ID_MCST_PCIE_BRIDGE, + PCI_DEVICE_ID_MCST_PCIE_BRIDGE) + }, + {} +}; + +static bool __l_eioh_device(struct pci_dev *pdev) +{ + struct pci_bus *b = pdev->bus; + if (pdev->vendor == PCI_VENDOR_ID_MCST_TMP && + pdev->device == PCI_DEVICE_ID_MCST_VPPB) { + return pdev->revision >= 0x10 ? true : false; + } else if (pci_match_id(l_iohub_root_devices, pdev)) { + return false; + } + if (pci_is_root_bus(b)) { + u16 vid = 0, did = 0; + u8 rev; + pci_bus_read_config_word(b, 0, PCI_VENDOR_ID, &vid); + pci_bus_read_config_word(b, 0, PCI_DEVICE_ID, &did); + pci_bus_read_config_byte(b, 0, PCI_REVISION_ID, &rev); + if (vid == PCI_VENDOR_ID_MCST_TMP && + did == PCI_DEVICE_ID_MCST_VPPB) { + return rev >= 0x10 ? true : false; + } + return false; + } + return __l_eioh_device(b->self); +} + +bool l_eioh_device(struct pci_dev *pdev) +{ + struct iohub_sysdata *sd = pdev->bus->sysdata; + if (!sd->has_eioh) + return false; + if (!sd->has_iohub) + return true; + return __l_eioh_device(pdev); +} +EXPORT_SYMBOL(l_eioh_device); diff --git a/arch/l/pci/pci.h b/arch/l/pci/pci.h new file mode 100644 index 000000000000..d764b73af872 --- /dev/null +++ b/arch/l/pci/pci.h @@ -0,0 +1,93 @@ +/* + * Low-Level PCI Access for i386 machines. + * + * (c) 1999 Martin Mares + */ + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +#define PCI_PROBE_BIOS 0x0001 +#define PCI_PROBE_CONF1 0x0002 +#define PCI_PROBE_CONF2 0x0004 +#define PCI_PROBE_MMCONF 0x0008 +#define PCI_PROBE_L 0x0010 +#define PCI_PROBE_MASK 0x001f + +#define PCI_NO_SORT 0x0100 +#define PCI_BIOS_SORT 0x0200 +#define PCI_NO_CHECKS 0x0400 +#define PCI_USE_PIRQ_MASK 0x0800 +#define PCI_ASSIGN_ROMS 0x1000 +#define PCI_BIOS_IRQ_SCAN 0x2000 +#define PCI_ASSIGN_ALL_BUSSES 0x4000 + +extern unsigned int pci_probe; +extern unsigned long pirq_table_addr; + +/* pci-i386.c */ + +extern unsigned int pcibios_max_latency; + +void pcibios_resource_survey(void); + +/* Elbrus */ + +extern int __init pci_check_type_l(void); + +/* pci-pc.c */ + +extern int pcibios_last_bus; +extern struct pci_bus *pci_root_bus; +extern struct pci_ops pci_root_ops; + +/* pci-irq.c */ + +struct irq_info { + u8 bus, devfn; /* Bus, device and function */ + struct { + u8 link; /* IRQ line ID, chipset dependent, 0=not routed */ + u16 bitmap; /* Available IRQs */ + } __attribute__((packed)) irq[4]; + u8 slot; /* Slot number, 0=onboard */ + u8 rfu; +} __attribute__((packed)); + +struct irq_routing_table { + u32 signature; /* PIRQ_SIGNATURE should be here */ + u16 version; /* PIRQ_VERSION */ + u16 size; /* Table size in bytes */ + u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */ + u16 exclusive_irqs; /* IRQs devoted exclusively to PCI usage */ + u16 rtr_vendor, rtr_device; /* Vendor and device ID of interrupt router */ + u32 miniport_data; /* Crap */ + u8 rfu[11]; + u8 checksum; /* Modulo 256 checksum must give zero */ + struct irq_info slots[0]; +} __attribute__((packed)); + +extern unsigned int pcibios_irq_mask; + +extern int pcibios_scanned; + +int pirq_enable_irq(struct pci_dev *dev); + +extern int __init pcibios_irq_init(void); +extern int (*pcibios_enable_irq)(struct pci_dev *dev); +extern void (*pcibios_disable_irq)(struct pci_dev *dev); + +#ifndef __e2k__ +struct pci_raw_ops { + int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val); + int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val); +}; + +extern struct pci_raw_ops *raw_pci_ops; +#endif diff --git a/arch/sparc/Kbuild b/arch/sparc/Kbuild index c9e574906a9b..097b314ac01d 100644 --- a/arch/sparc/Kbuild +++ b/arch/sparc/Kbuild @@ -8,4 +8,6 @@ obj-y += mm/ obj-y += math-emu/ obj-y += net/ obj-y += crypto/ +ifndef CONFIG_E90S obj-$(CONFIG_SPARC64) += vdso/ +endif diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 84a7fef54a7c..ee8b773c9aa6 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -14,25 +14,27 @@ config SPARC bool default y select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI - select ARCH_MIGHT_HAVE_PC_SERIO - select OF - select OF_PROMTREE + select ARCH_MIGHT_HAVE_PC_SERIO if !E90S + select ARCH_SUPPORTS_NUMA_BALANCING if E90S + select ARCH_HAS_DMA_WRITE_COMBINE if E90S + select ARCH_WANTS_PROT_NUMA_PROT_NONE if E90S + select OF_PROMTREE if !E90S select HAVE_ASM_MODVERSIONS select HAVE_IDE select HAVE_OPROFILE - select HAVE_ARCH_KGDB if !SMP || SPARC64 + select HAVE_ARCH_KGDB if !E90S && (!SMP || SPARC64) select HAVE_ARCH_TRACEHOOK select HAVE_EXIT_THREAD select HAVE_PCI select SYSCTL_EXCEPTION_TRACE select RTC_CLASS - select RTC_DRV_M48T59 + select RTC_DRV_M48T59 if !E90S select RTC_SYSTOHC select HAVE_ARCH_JUMP_LABEL if SPARC64 select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_PCI_IOMAP - select HAVE_NMI_WATCHDOG if SPARC64 + select HAVE_NMI_WATCHDOG if (SPARC64 && !E90S) select HAVE_CBPF_JIT if SPARC32 select HAVE_EBPF_JIT if SPARC64 select HAVE_DEBUG_BUGVERBOSE @@ -40,6 +42,7 @@ config SPARC select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select GENERIC_PENDING_IRQ if SMP select MODULES_USE_ELF_RELA select PCI_SYSCALL if PCI select ODD_RT_SIGACTION @@ -74,10 +77,10 @@ config SPARC64 select HAVE_DEBUG_KMEMLEAK select IOMMU_HELPER select SPARSE_IRQ - select RTC_DRV_CMOS - select RTC_DRV_BQ4802 - select RTC_DRV_SUN4V - select RTC_DRV_STARFIRE + select RTC_DRV_CMOS if !E90S + select RTC_DRV_BQ4802 if !E90S + select RTC_DRV_SUN4V if !E90S + select RTC_DRV_STARFIRE if !E90S select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select IRQ_PREFLOW_FASTEOI @@ -85,15 +88,23 @@ config SPARC64 select HAVE_C_RECORDMCOUNT select HAVE_ARCH_AUDITSYSCALL select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_RT if E90S select HAVE_NMI select HAVE_REGS_AND_STACK_ACCESS_API select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS - select GENERIC_TIME_VSYSCALL + select GENERIC_TIME_VSYSCALL if !E90S select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_PTE_SPECIAL - select PCI_DOMAINS if PCI + select PCI_DOMAINS if PCI && IOHUB_DOMAINS select ARCH_HAS_GIGANTIC_PAGE + select IRQ_FORCED_THREADING if E90S + select HAVE_UNSTABLE_SCHED_CLOCK if E90S && NUMA + select IRQ_DOMAIN if E90S + select IOMMU_DMA if E90S + select IOMMU_API if E90S + select DMA_REMAP if E90S + select HAVE_PREEMPT_LAZY if E90S config ARCH_DEFCONFIG string @@ -134,6 +145,9 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK config NEED_PER_CPU_PAGE_FIRST_CHUNK def_bool y if SPARC64 +config ARCH_SUSPEND_POSSIBLE + def_bool y + config MMU bool default y @@ -156,6 +170,33 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC config PGTABLE_LEVELS default 4 if 64BIT default 3 +if SPARC64 +menu "Elbrus90S-micro machine setup" +config E90S + bool "Elbrus90-micro machine suport" + default y + help + SPARC E90S is a family of RISC microprocessors designed and marketed by + MCST, incorporated. + + +config RMO + bool "Relaxed memory order (RMO) model" + ---help--- + Unlike Ultra E90S have slow Total Store Order (TSO) memory model + implementation + default y if E90S + +config E90S_SERIALIZE_IO + bool "Serialize io operations (Ch2 iteration-1 bug workaround)" + depends on E90S + default n +endmenu + +config PCI_ELBRUS + def_bool y if E90S && PCI + +endif config ARCH_SUPPORTS_UPROBES def_bool y if SPARC64 @@ -183,7 +224,15 @@ config SMP available at . If you don't know what to do here, say N. - +if E90S +config NR_CPUS + int "Maximum number of CPUs" + depends on SMP + range 8 84 + default 8 if !NUMA + default 64 if NUMA +endif +if !E90S config NR_CPUS int "Maximum number of CPUs" depends on SMP @@ -191,6 +240,7 @@ config NR_CPUS range 2 4096 if SPARC64 default 32 if SPARC32 default 4096 if SPARC64 +endif source "kernel/Kconfig.hz" @@ -222,12 +272,12 @@ config SPARC32_SMP config SPARC64_SMP bool default y - depends on SPARC64 && SMP + depends on SPARC64 && SMP && !E90S config EARLYFB bool "Support for early boot text console" default y - depends on SPARC64 + depends on SPARC64 && !E90S help Say Y here to enable a faster early framebuffer boot console. @@ -262,7 +312,7 @@ endif config US3_MC tristate "UltraSPARC-III Memory Controller driver" - depends on SPARC64 + depends on SPARC64 && !E90S default y help This adds a driver for the UltraSPARC-III memory controller. @@ -282,14 +332,22 @@ config NUMA bool "NUMA support" depends on SPARC64 && SMP +if !E90S config NODES_SHIFT - int "Maximum NUMA Nodes (as a power of 2)" - range 4 5 if SPARC64 - default "5" - depends on NEED_MULTIPLE_NODES + int + default "4" + depends on NEED_MULTIPLE_NODES help Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. +endif + +if E90S +config NODES_SHIFT + int + default "2" + depends on NEED_MULTIPLE_NODES +endif # Some NUMA nodes have memory ranges that span # other nodes. Even though a pfn is valid and @@ -306,10 +364,11 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y if SPARC64 + select SPARSEMEM_MANUAL config FORCE_MAX_ZONEORDER int "Maximum zone order" - default "13" + default "15" help The kernel memory allocator divides physically contiguous memory blocks into "zones", where each zone is a power of two number of @@ -327,7 +386,7 @@ endif config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" - depends on SPARC64 && SMP + depends on SPARC64 && SMP && !E90S default y help SMT scheduler support improves the CPU scheduler's decision making @@ -453,15 +512,17 @@ endmenu menu "Bus options (PCI etc.)" config SBUS bool + depends on !E90S default y config SBUSCHAR bool + depends on !E90S default y config SUN_LDOMS bool "Sun Logical Domains support" - depends on SPARC64 + depends on SPARC64 && !E90S help Say Y here is you want to support virtual devices via Logical Domains. @@ -494,7 +555,10 @@ config SPARC_GRPCI2 help Say Y here to include the GRPCI2 Host Bridge Driver. +source "arch/l/pci/Kconfig" + config SUN_OPENPROMFS + depends on !E90S tristate "Openprom tree appears in /proc/openprom" help If you say Y, the OpenPROM device tree will be available as a @@ -511,7 +575,7 @@ config SUN_OPENPROMFS config SPARC64_PCI bool default y - depends on SPARC64 && PCI + depends on SPARC64 && PCI && !E90S config SPARC64_PCI_MSI bool @@ -520,6 +584,13 @@ config SPARC64_PCI_MSI endmenu +source "fs/Kconfig.binfmt" + +config EARLY_PRINTK + bool + default y + depends on EARLY_DUMP_CONSOLE + config COMPAT bool depends on SPARC64 @@ -534,4 +605,20 @@ config SYSVIPC_COMPAT depends on COMPAT && SYSVIPC default y +menu "Power management options (ACPI, APM)" + +source "kernel/power/Kconfig" + +source "drivers/acpi/Kconfig" + +config PM + bool + depends on E90S && !MCST_RT + default y + +source "drivers/cpuidle/Kconfig" +endmenu + source "drivers/sbus/char/Kconfig" + +source "arch/l/Kconfig" diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 4a0919581697..80b5012923e3 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -9,10 +9,10 @@ # Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) # We are not yet configured - so test on arch -ifeq ($(ARCH),sparc64) - KBUILD_DEFCONFIG := sparc64_defconfig +ifeq ($(ARCH),sparc) + KBUILD_DEFCONFIG ?= sparc32_defconfig else - KBUILD_DEFCONFIG := sparc32_defconfig + KBUILD_DEFCONFIG ?= sparc64_defconfig endif ifeq ($(CONFIG_SPARC32),y) @@ -44,9 +44,16 @@ KBUILD_LDFLAGS := -m elf64_sparc export BITS := 64 UTS_MACHINE := sparc64 -KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow +KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc3 -mcmodel=medlow KBUILD_CFLAGS += -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare -KBUILD_CFLAGS += -Wa,--undeclared-regs +ifeq ($(call cc-lcc-yn),y) + KBUILD_CFLAGS += -fkernel + KBUILD_CFLAGS += -Wno-unused-result -Wno-reduced-alignment + KBUILD_CFLAGS += -Wno-builtin-functions-redefined -Wa,--undeclared-regs + KBUILD_CFLAGS += -Wno-alignment-reduction-ignored +# to reduce image size + KBUILD_CFLAGS += -fno-align-functions -finline-functions-called-once +endif KBUILD_CFLAGS += $(call cc-option,-mtune=ultrasparc3) KBUILD_AFLAGS += -m64 -mcpu=ultrasparc -Wa,--undeclared-regs @@ -61,7 +68,24 @@ head-y := arch/sparc/kernel/head_$(BITS).o # See arch/sparc/Kbuild for the core part of the kernel core-y += arch/sparc/ -libs-y += arch/sparc/prom/ +ifdef CONFIG_E90S +# Elbrus common modules +core-y += arch/l/ +drivers-$(CONFIG_PCI) += arch/l/pci/ +# e90s can boot an image no larger then 16MiB +define e90s_check_image_size + e=$$($(AWK) '$$3 == "_end" { print "0x"$$1 }' System.map); \ + d=$$((e - 0x1400000)); \ + [ $$d -le 0 ] || { echo "Linux image too large (by $$d bytes)" && \ + false; } +endef +else +e90s_check_image_size = true +endif + +ifndef CONFIG_E90S +libs-$(CONFIG_OF) += arch/sparc/prom/ +endif libs-y += arch/sparc/lib/ drivers-$(CONFIG_PM) += arch/sparc/power/ @@ -74,6 +98,8 @@ all: zImage image zImage uImage tftpboot.img vmlinux.aout: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + $(Q)$(call e90s_check_image_size) + install: $(Q)$(MAKE) $(build)=$(boot) $@ @@ -84,9 +110,14 @@ archclean: archheaders: $(Q)$(MAKE) $(build)=arch/sparc/kernel/syscalls all +build-install: FORCE + $(CONFIG_SHELL) scripts/gen-osl-build -l $(srctree) -m $(MODLIB) + +ifndef CONFIG_E90S PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/sparc/vdso $@ +endif # This is the image used for packaging KBUILD_IMAGE := $(boot)/zImage diff --git a/arch/sparc/boot/dts/Makefile b/arch/sparc/boot/dts/Makefile new file mode 100644 index 000000000000..813f68a44e75 --- /dev/null +++ b/arch/sparc/boot/dts/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +dtb-$(CONFIG_MCST) += r2000_m1r-uvp.dtb diff --git a/arch/sparc/boot/dts/include/dt-bindings b/arch/sparc/boot/dts/include/dt-bindings new file mode 120000 index 000000000000..08c00e4972fa --- /dev/null +++ b/arch/sparc/boot/dts/include/dt-bindings @@ -0,0 +1 @@ +../../../../../include/dt-bindings \ No newline at end of file diff --git a/arch/sparc/boot/dts/r2000_m1r-uvp.dts b/arch/sparc/boot/dts/r2000_m1r-uvp.dts new file mode 100644 index 000000000000..7360676e29cf --- /dev/null +++ b/arch/sparc/boot/dts/r2000_m1r-uvp.dts @@ -0,0 +1,183 @@ +/* + * M1R-UVP + * TVGI.469555.384 ver.1 + * (UVM_KMI) + */ +/dts-v1/; + +/ { + /* version = ""; */ + model = "r2000,mcst,m1r-uvp"; + compatible = "mcst,m1r-uvp"; + #address-cells = <1>; + #size-cells = <0>; + + /* IOHUB2 I2C */ + l_i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + compatible = "l_i2c"; + label = "iohub2 i2c"; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + lm96163@4c { + /* hwmon/lm63.ko (DD19:LM96163CISD) */ + compatible = "lm63"; + reg = <0x4c>; + label = "IOHUB Temp Sensor"; + + temp1_label = "Internal Temp"; + temp2_label = "IOHUB Temp"; + /* no fan control */ + }; + + /* config SENSORS_PMBUS */ + pndt012@38 { + /* hwmon/pmbus/pmbus.ko (DA1:PNDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x38>; + label = "+0V9_CL0 R2000 (in +3V3)"; + }; + pndt012@39 { + /* hwmon/pmbus/pmbus.ko (DA2:PNDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x39>; + label = "+0V9_CL1 R2000 (in +3V3)"; + }; + pndt012@3A { + /* hwmon/pmbus/pmbus.ko (DA3:PNDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x3A>; + label = "+0V9_UN R2000 (in +5V)"; + }; + pndt006@3C { + /* hwmon/pmbus/pmbus.ko (DA5:PNDT006A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x3C>; + label = "+1V2 R2000 (in +5V)"; + }; + pndt012@3B { + /* hwmon/pmbus/pmbus.ko (DA4:PNDT012A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x3B>; + label = "+1V0 IOHUB2 (in +5V)"; + }; + pndt006@3E { + /* hwmon/pmbus/pmbus.ko (DA6:PNDT006A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x3E>; + label = "+2V5 IOHUB2 (in +5V)"; + }; + }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + pndt006@38 { + /* hwmon/pmbus/pmbus.ko (PK-UVP:DA2:PNDT006A0X3-SRZ) */ + compatible = "pmbus"; + reg = <0x38>; + label = "+1V2 (in +3V3)"; + }; + + /* KSZ9896CTX@64 from PK-UVP:I2C_1 (M2E:i2c@1) */ + /* + * reg = <0x64>; + * DD2:KSZ9896CTX) "Ethernet Switch" ?@5F + */ + + /* TODO: (DD3:NH82580EB) "Ethernet 0" */ + }; + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + /* KSZ9896CTX@64 from PK-UVP:I2C_2 (M2E:i2c@2) */ + /* + * reg = <0x64>; + * DD1:KSZ9896CTX) "Ethernet Switch" ?@5F + */ + + /* TODO: (DD4:NH82580EB) "Ethernet 1" */ + }; + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + /* empty */ + }; + }; + + /* IOHUB2 SPI */ + l_spi@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "l_spi"; + reg = <0>; + label = "iohub2 spi"; + + boot@0 { + /* (DD21:S25FL128SAGNFI001) */ + compatible = "mcst,boot"; + reg = <0>; + spi-max-frequency = <25000000>; + }; + rtc-cy14b101p@1 { + /* (DD20:CY14B101PA-SFXI) */ + compatible = "rtc-cy14b101p"; + reg = <1>; + spi-max-frequency = <12500000>; + }; + }; + + /* IOHUB2 GPIO */ + gpio0:l_gpio@0 { + compatible = "mcst,gpio"; + reg = <0x0>; + label = "L-GPIO"; + gpio-controller; + ngpios = <32>; + #gpio-cells = <2>; + + gpio-line-names = + /* 0.. 3 */ "LED_RED", "LED_GRN", "LED_YLW", "M2_RST", + /* 4.. 7 */ "GA0", "GA1", "nc", "nc", + /* 8..11 */ "nc", "nc", "nc", "nc", + /* 12..15 */ "nc", "nc", "nc", "nc", + /* 16..19 */ "nc", "I2C_0_ALERTn", "nc", "nc", + /* 20..23 */ "nc", "nc", "nc", "nc", + /* 24..27 */ "nc", "nc", "nc", "nc", + /* 28..31 */ "nc", "nc", "nc", "nc"; + }; + + gpio_leds { + compatible = "gpio-leds"; + + pwm_r_led { + gpios = <&gpio0 0 0>; /* LED_RED */ + linux,default-trigger = "none"; + default-state = "off"; + label = "red"; + }; + pwm_g_led { + gpios = <&gpio0 1 0>; /* LED_GRN */ + linux,default-trigger = "none"; + default-state = "on"; + label = "green"; + }; + tpad_off_led { + gpios = <&gpio0 2 0>; /* LED_YLW */ + linux,default-trigger = "none"; + default-state = "off"; + label = "yellow"; + }; + }; +}; diff --git a/arch/sparc/configs/build-config b/arch/sparc/configs/build-config new file mode 100644 index 000000000000..cb6b0f7f85fd --- /dev/null +++ b/arch/sparc/configs/build-config @@ -0,0 +1,131 @@ +#!/bin/bash +scname=`basename $0` +scdir=`dirname $0` +conf_script=./scripts/config +conf_path=./arch/sparc/configs +[ "`uname -m`" != "sparc64" ] && export ARCH=sparc64 +Confs() +{ +cat <s:" +Confs +} +usage() +{ + echo "incorrect param: $1" + echo "usage: $scname [--def] --conf [--file _defconfig] [CC=]" >&2 + exit 1 +} +Usage() { + cat >&2 < [--output_directory/-o ] [--file _defconfig] + +Generate .config or|and files for some kernel configuration +from arch/sparc/configs/sparc64_defconfig | arch/sparc/configs/rt_sparc64_defconfig + +: ./arch/sparc/configs/_defconfig + (default: ./arch/sparc/configs/gen__defconfig) + +1. To generate file + $scname --def --conf [--file _defconfig] +2. To generate .config and + $scname --conf --file _defconfig +3. To generate .config only + $scname --conf +4. To generate .config only in separate directory + $scname --conf -o + +EOL +KnownConfs +exit 0 +} +tmpf=tmp_$$_defconfig +cleanup() +{ + rm -f $conf_path/$tmpf +} +localversion() +{ + l=`echo $DCONF | sed -e 's/_/-/' -e 's/nort//' -e 's/-$//'` + $conf_script --file $OUTPUT_DIR/.config --set-str CONFIG_LOCALVERSION "-${l}" \ + || exit 1 +} +############################################################################### +conf_nn() +{ + $conf_script $OPTFILE \ + -d CONFIG_NUMA -u CONFIG_NUMA_BALANCING -u CONFIG_NUMA_BALANCING_DEFAULT_ENABLED \ + || exit 1 +} +e90s_nort() { :; } +e90s_rt() { :; } +e90s_nn() { conf_nn; } +e90s_nnrt() { conf_nn; } +############################################################################### +trap "cleanup" 1 2 3 6 15 EXIT +OUTPUT_DIR=. +while [ "$1" != "" ] ; do + opt="$1" + shift + case "$opt" in + --def|-d) def_flg=0;; + --conf|-c) [ x"$DCONF" != x ] && usage; DCONF="$1"; shift;; + --file|-f) [ x"$DFILE" != x ] && usage; DFILE="$1"; shift;; + --output_directory|-o) OUTPUT_DIR="$1"; shift;; + CC=*) optcc="$opt";; + V=*) optv="$opt";; + --help|-h) Usage;; + *) echo "incorrect param: $opt"; Usage;; + esac +done +[ x"$DCONF" == x ] && Usage +if [[ "$CONFS" != *" $DCONF "* ]]; then + echo "unknown : $DCONF" + KnownConfs + exit 1 +fi +# additional configuration files to use +ADDITIONAL_CONFIGS="mcst.config" +if [[ "$DCONF" == *"_rt"* ]] || [[ "$DCONF" == *"_nnrt"* ]]; then + ADDITIONAL_CONFIGS+=" mcst_rt.config " +fi +# src defconfig +SFILE=$conf_path/sparc64_defconfig +# dst defconfig +if [ x"$DFILE" == x ]; then + DFILENAME=$tmpf + [ x$def_flg == x0 ] && DFILENAME=gen_${DCONF}_defconfig +else + if [[ $DFILE != *"_defconfig" ]]; then + usage "incorrect defconfig file name: $DFILE" + fi + DFILENAME=$DFILE +fi +DFILE=$conf_path/$DFILENAME +OPTFILE="--file $DFILE" + +cd $scdir/../../.. || exit 1 +pwd +echo "ARCH=$ARCH" +echo "$DCONF $DFILENAME $DFILE $SFILE" +echo "$OPTFILE" +#exit 0 + +# Generate defconfig +cp $SFILE $DFILE || exit 1 +$DCONF +[ x$def_flg == x0 ] && exit 0 + +# Generate .config; silence configs merging output by adding >/dev/null +make O=$OUTPUT_DIR $DFILENAME $ADDITIONAL_CONFIGS $optcc $optv >/dev/null || exit 1 +localversion +make O=$OUTPUT_DIR olddefconfig $optcc $optv +exit $? diff --git a/arch/sparc/configs/mcst_rt.config b/arch/sparc/configs/mcst_rt.config new file mode 100644 index 000000000000..4047149d1dd5 --- /dev/null +++ b/arch/sparc/configs/mcst_rt.config @@ -0,0 +1 @@ +CONFIG_LOCALVERSION="-e90s-rt" diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index bde4d21a8ac8..44662e8e3a21 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -1,238 +1,4133 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/sparc64 4.9.33-0.xx-virt Kernel Configuration +# CONFIG_64BIT=y -# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +# CONFIG_RWSEM_GENERIC_SPINLOCK is not set +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_IOMMU_HELPER=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_GENERIC_GPIO=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_MIGRATION=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-e90s" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y +CONFIG_INTEGRITY_AUDIT=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_HZ_PERIODIC=y +# CONFIG_HZ_IDLE is not set +# CONFIG_HZ_FULL is not set +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y + +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=2 +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_BALANCING is not set +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +# CONFIG_RT_GROUP_SCHED is not set +# CONFIG_CGROUP_PIDS is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +# CONFIG_KALLSYMS is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +# CONFIG_USERFAULTFD is not set +CONFIG_PCI_QUIRKS=y +CONFIG_MEMBARRIER=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# CONFIG_PERF_EVENTS=y +CONFIG_VM_EVENT_COUNTERS=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_SYSTEM_DATA_VERIFICATION is not set CONFIG_PROFILING=y -CONFIG_OPROFILE=m +CONFIG_TRACEPOINTS=y +CONFIG_E90S_CPUIDLE=y CONFIG_KPROBES=y +# CONFIG_UPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_EXIT_THREAD=y +# CONFIG_HAVE_ARCH_HASH is not set +# CONFIG_ISA_BUS_API is not set +# CONFIG_HAVE_ARCH_VMAP_STACK is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_LTTNG=m +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_SQ=y +CONFIG_BLK_WBT_MQ=y + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +CONFIG_AIX_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_LDM_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=m +CONFIG_IOSCHED_WBT=m +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_MQ_IOSCHED_KYBER=m +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_FREEZER=y + +# +# Processor type and features +# CONFIG_SMP=y -CONFIG_HZ_100=y -CONFIG_HOTPLUG_CPU=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_NUMA=y +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_NR_CPUS=32 + +# +# Elbrus Architecture Linux Kernel Configuration +# + +# +# Boot/prom console support +# +CONFIG_SERIAL_PRINTK=y +CONFIG_SERIAL_AM85C30_CONSOLE=y +CONFIG_EARLY_DUMP_CONSOLE=y +CONFIG_CLKR_CLOCKSOURCE=y +# CONFIG_CLKR_SYNCHRONIZATION_WARNING is not set +CONFIG_CLKR_OFFSET=y +CONFIG_IOHUB_GPIO=y +CONFIG_L_X86_64=y +CONFIG_L_LOCAL_APIC=y +CONFIG_L_IO_APIC=y +CONFIG_L_PCI_QUIRKS=y +CONFIG_I2C_SPI_RESET_CONTROLLER=y +CONFIG_L_I2C_CONTROLLER=y +CONFIG_L_SPI_CONTROLLER=y +CONFIG_FRAME_VECTOR=y +CONFIG_MMU_NOTIFIER=y +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_COMPACTION=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_HAVE_MEMORY_PRESENT=y + +CONFIG_MCST=y CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_SUN_LDOMS=y + +# +# Bus options (PCI etc.) +# + +# +# Elbrus chipset PCI support +# +CONFIG_PCI_ELBRUS=y CONFIG_PCI=y +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_BUS_ADDR_T_64BIT=y CONFIG_PCI_MSI=y -CONFIG_SUN_OPENPROMFS=m +CONFIG_PCI_MSI_IRQ_DOMAIN=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_HOTPLUG_PCI is not set + +# +# PCI host controller drivers +# +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +CONFIG_EARLY_PRINTK=y +CONFIG_COMPAT=y CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +# CONFIG_XFRM_SUB_POLICY is not set +CONFIG_XFRM_MIGRATE=y +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=m CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m CONFIG_NET_IPGRE=m CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -CONFIG_ARPD=y CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y +# CONFIG_NET_UDP_TUNNEL is not set +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_TCP_CONG_CDG is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +# CONFIG_IPV6_MIP6 is not set +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +# CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +# CONFIG_NETFILTER_XT_MATCH_BPF is not set +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +# CONFIG_NETFILTER_XT_MATCH_IPVS is not set +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_VS=m +CONFIG_IP_VS_TAB_BITS=12 +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +# CONFIG_IP_VS_FO is not set +# CONFIG_IP_VS_OVF is not set +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +# CONFIG_IP_VS_PE_SIP is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +# CONFIG_NF_DUP_IPV4 is not set +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_CONNTRACK_IPV6=m +# CONFIG_NF_DUP_IPV6 is not set +# CONFIG_NF_REJECT_IPV6 is not set +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +# CONFIG_NF_NAT_MASQUERADE_IPV6 is not set +CONFIG_IP6_NF_IPTABLES=m +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set +# CONFIG_IP6_NF_MATCH_MH is not set +# CONFIG_IP6_NF_MATCH_RPFILTER is not set +# CONFIG_IP6_NF_MATCH_RT is not set +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +# CONFIG_IP6_NF_FILTER is not set +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +# CONFIG_IP6_NF_RAW is not set +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_IP6_NF_NAT is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +# CONFIG_L2TP is not set +CONFIG_STP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_BRIDGE_VLAN_FILTERING is not set +CONFIG_HAVE_NET_DSA=y CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=m +CONFIG_LLC2=m +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +# CONFIG_NET_SCH_SFB is not set +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +CONFIG_NET_SCH_INGRESS=m +# CONFIG_NET_SCH_PLUG is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +# CONFIG_NET_CLS_BPF is not set +# CONFIG_NET_CLS_FLOWER is not set +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +# CONFIG_OPENVSWITCH_GENEVE is not set +# CONFIG_VSOCKETS is not set +CONFIG_NETLINK_DIAG=m +# CONFIG_MPLS is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_CONNECTOR=m +# CONFIG_NET_DROP_MONITOR is not set +CONFIG_CAN=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +# CONFIG_IRDA is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +# CONFIG_BT_RFCOMM_TTY is not set +CONFIG_BT_BNEP=m +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +# CONFIG_BT_LEDS is not set +# CONFIG_BT_SELFTEST is not set +CONFIG_BT_DEBUGFS=y + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_HCIBTSDIO is not set +# CONFIG_BT_HCIUART is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +CONFIG_BT_MRVL=m +# CONFIG_BT_MRVL_SDIO is not set +# CONFIG_BT_ATH3K is not set +CONFIG_AF_RXRPC=m +# CONFIG_AF_RXRPC_IPV6 is not set +# CONFIG_AF_RXRPC_INJECT_LOSS is not set +# CONFIG_AF_RXRPC_DEBUG is not set +# CONFIG_RXKAD is not set +# CONFIG_AF_KCM is not set +# CONFIG_STREAM_PARSER is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +# CONFIG_LIB80211 is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +# CONFIG_MAC80211_RC_MINSTREL_VHT is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_NET_DEVLINK=y +CONFIG_MAY_USE_DEVLINK=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_FENCE_TRACE is not set + +# +# Elbrus MCST Device Drivers +# +CONFIG_DRM_MGA2=m +# CONFIG_AGAT_NIC is not set +CONFIG_AGAT_SCSI=m +CONFIG_GPIO=m +CONFIG_MPV=m +# CONFIG_MGPM is not set +CONFIG_MMRM=m +CONFIG_MOKM=m +CONFIG_RDMA_SIC=m +CONFIG_RDMA_M=m +CONFIG_MOKX=m +CONFIG_LPTOUTS=m +CONFIG_M2MLC=m +CONFIG_WD=y +CONFIG_ELDSP=m + +# +# MCST Vivante GPU support (galcore v6.2.4p3) +# +CONFIG_MCST_GPU_VIV=m + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_UBI is not set +CONFIG_DTC=y +CONFIG_OF=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=m +CONFIG_OF_PCI=y +CONFIG_OF_GPIO=y +# CONFIG_OF_IRQ is not set +# CONFIG_OF_OVERLAY is not set +CONFIG_PARPORT=m +CONFIG_PARPORT_MCST=m +# CONFIG_PARPORT_GSC is not set +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 CONFIG_BLK_DEV_CRYPTOLOOP=m +# CONFIG_BLK_DEV_DRBD is not set CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 CONFIG_CDROM_PKTCDVD_WCACHE=y CONFIG_ATA_OVER_ETH=m -CONFIG_SUNVDC=m -CONFIG_IDE=y +# CONFIG_BLK_DEV_HD is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set +CONFIG_BLK_DEV_NVME=y + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29020 is not set +CONFIG_ISL22317=m +# CONFIG_ISL29003 is not set +# CONFIG_LTC4306 is not set +CONFIG_UCD9080=m +CONFIG_I2C_P2PMC=m +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_SRAM is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +CONFIG_EEPROM_EE1004=m +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# +CONFIG_HAVE_IDE=y +CONFIG_IDE=m + +# +# Please see Documentation/ide/ide.txt for help/info on IDE drives +# +CONFIG_IDE_XFER_MODE=y +CONFIG_IDE_ATAPI=y +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_ALI15X3=y +CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_IDE_TASK_IOCTL is not set +CONFIG_IDE_PROC_FS=y + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_PLATFORM is not set +CONFIG_BLK_DEV_IDEDMA_SFF=y + +# +# PCI IDE chipsets support +# +CONFIG_BLK_DEV_IDEPCI=y +CONFIG_IDEPCI_PCIBUS_ORDER=y +# CONFIG_BLK_DEV_OFFBOARD is not set +CONFIG_BLK_DEV_GENERIC=y +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_JMICRON is not set +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_ELBRUS=y +# CONFIG_BLK_DEV_IT8172 is not set +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_BLK_DEV_IDEDMA=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y +# CONFIG_CHR_DEV_SCH is not set CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# CONFIG_SCSI_SPI_ATTRS=y -CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVSAS_DEBUG=y +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +CONFIG_LIBFC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_SCSI_QLA_ISCSI is not set +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_BFA_FC=m +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +CONFIG_SATA_SIL=y +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set CONFIG_MD=y -CONFIG_BLK_DEV_MD=m +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m +# CONFIG_MD_FAULTY is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +# CONFIG_DM_ERA is not set CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +# CONFIG_LOOPBACK_TARGET is not set +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_FIREWIRE_NOSY=m CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=m -CONFIG_SUNLANCE=m -CONFIG_HAPPYMEAL=m -CONFIG_SUNGEM=m -CONFIG_SUNVNET=m -CONFIG_LDMVSW=m -CONFIG_NET_PCI=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +CONFIG_IFB=m +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=m +CONFIG_VXLAN=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_TUN=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +# CONFIG_NLMON is not set +# CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +CONFIG_ATM_DUMMY=m +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +# CONFIG_ATM_ENI_DEBUG is not set +CONFIG_ATM_ENI_TUNE_BURST=y +# CONFIG_ATM_ENI_BURST_TX_16W is not set +# CONFIG_ATM_ENI_BURST_TX_8W is not set +# CONFIG_ATM_ENI_BURST_TX_4W is not set +# CONFIG_ATM_ENI_BURST_TX_2W is not set +# CONFIG_ATM_ENI_BURST_RX_16W is not set +# CONFIG_ATM_ENI_BURST_RX_8W is not set +# CONFIG_ATM_ENI_BURST_RX_4W is not set +# CONFIG_ATM_ENI_BURST_RX_2W is not set +# CONFIG_ATM_NICSTAR is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E is not set +# CONFIG_ATM_HE is not set +# CONFIG_ATM_SOLOS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +CONFIG_NET_VENDOR_3COM=y +CONFIG_VORTEX=m +# CONFIG_TYPHOON is not set +CONFIG_NET_VENDOR_ADAPTEC=y +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALTEON=y +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_CADENCE=y +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_BNX2=m +CONFIG_BNXT=m +# CONFIG_TIGON3 is not set +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_NET_VENDOR_CISCO=y +CONFIG_NET_VENDOR_CIRRUS=y +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +# CONFIG_TULIP is not set +CONFIG_WINBOND_840=m +CONFIG_NET_VENDOR_DLINK=y +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_EXAR=y +CONFIG_NET_VENDOR_FUJITSU=y +CONFIG_NET_VENDOR_HP=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=m CONFIG_E1000=m CONFIG_E1000E=m -CONFIG_TIGON3=m -CONFIG_BNX2=m -CONFIG_NIU=m -# CONFIG_WLAN is not set +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +# CONFIG_IGBVF is not set +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +# CONFIG_IXGBEVF is not set +CONFIG_I40E=m +# CONFIG_I40EVF is not set +# CONFIG_FM10K is not set +CONFIG_NET_VENDOR_I825XX=y +# CONFIG_JME is not set +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_SKY2=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLXSW_CORE is not set +CONFIG_NET_VENDOR_MICREL=y +CONFIG_NET_VENDOR_MICROCHIP=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_FEALNX is not set +CONFIG_NET_VENDOR_NATSEMI=y +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP_NETVF is not set +CONFIG_NET_VENDOR_8390=y +CONFIG_NE2K_PCI=m +CONFIG_NET_VENDOR_NVIDIA=y +CONFIG_FORCEDETH=m +CONFIG_NET_VENDOR_OKI=y +# CONFIG_PCH_GBE is not set +# CONFIG_ETHOC is not set +CONFIG_NET_PACKET_ENGINE=y +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_NET_VENDOR_QUALCOMM=y +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +CONFIG_8139TOO=m +CONFIG_R8169=m +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_RDC=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +CONFIG_PCI_SUNLANCE=m +CONFIG_ETH1000=y +CONFIG_RDMA_NET=m +CONFIG_NET_VENDOR_SEEQ=y +CONFIG_NET_VENDOR_SILAN=y +CONFIG_NET_VENDOR_SIS=y +CONFIG_NET_VENDOR_SMSC=y +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_NET_VENDOR_SUN=y +CONFIG_NET_VENDOR_SYNOPSYS=y +CONFIG_NET_VENDOR_TEHUTI=y +CONFIG_NET_VENDOR_TI=y +CONFIG_NET_VENDOR_VIA=y +CONFIG_VIA_VELOCITY=m +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_NET_VENDOR_WIZNET=y +CONFIG_NET_VENDOR_XIRCOM=y +CONFIG_FDDI=m +CONFIG_PHYLIB=y +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +CONFIG_SWPHY=y + +# +# MDIO bus device drivers +# +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_GPIO=m + +# +# MII PHY device drivers +# +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +CONFIG_DP83867_PHY=y +CONFIG_FIXED_PHY=m +# CONFIG_ICPLUS_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +CONFIG_MARVELL_PHY=y +CONFIG_MICREL_PHY=y +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=y +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +# CONFIG_PPPOATM is not set +CONFIG_PPPOE=m +# CONFIG_PPTP is not set CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_INPUT_EVDEV=y -CONFIG_KEYBOARD_LKKBD=m -CONFIG_KEYBOARD_SUNKBD=y -CONFIG_MOUSE_SERIAL=y +CONFIG_SLIP=m +CONFIG_SLHC=m +# CONFIG_SLIP_COMPRESSED is not set +# CONFIG_SLIP_SMART is not set +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +# CONFIG_USB_NET_SMSC75XX is not set +CONFIG_USB_NET_SMSC95XX=m +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=m +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +# CONFIG_USB_ALI_M5632 is not set +# CONFIG_USB_AN2720 is not set +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +# CONFIG_USB_EPSON2888 is not set +# CONFIG_USB_KC2190 is not set +CONFIG_USB_NET_ZAURUS=m +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_IPHETH is not set +CONFIG_WLAN=y +CONFIG_WLAN_VENDOR_ADMTEK=y +# CONFIG_ADM8211 is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +# CONFIG_ATH10K is not set +# CONFIG_WCN36XX is not set +CONFIG_WLAN_VENDOR_ATMEL=y +# CONFIG_ATMEL is not set +# CONFIG_AT76C50X_USB is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +# CONFIG_BRCMSMAC is not set +# CONFIG_BRCMFMAC is not set +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +# CONFIG_IWLWIFI is not set +CONFIG_WLAN_VENDOR_INTERSIL=y +CONFIG_HOSTAP=m +# CONFIG_HERMES is not set +# CONFIG_P54_COMMON is not set +# CONFIG_PRISM54 is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +# CONFIG_MT7601U is not set +CONFIG_WLAN_VENDOR_RALINK=y +# CONFIG_RT2X00 is not set +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +CONFIG_WLAN_VENDOR_RSI=y +# CONFIG_RSI_91X is not set +CONFIG_WLAN_VENDOR_ST=y +# CONFIG_CW1200 is not set +CONFIG_WLAN_VENDOR_TI=y +# CONFIG_WL1251 is not set +# CONFIG_WL12XX is not set +# CONFIG_WL18XX is not set +# CONFIG_WLCORE is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +# CONFIG_ZD1211RW is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=m +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_SERIAL=m +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +CONFIG_TOUCHSCREEN_EETI=m +CONFIG_TOUCHSCREEN_EGALAX=m +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m +# CONFIG_TOUCHSCREEN_FUJITSU is not set +CONFIG_TOUCHSCREEN_GOODIX=m +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set CONFIG_INPUT_MISC=y -CONFIG_INPUT_SPARCSPKR=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_PCIPS2=m -CONFIG_SERIO_RAW=m -# CONFIG_DEVKMEM is not set -CONFIG_SERIAL_SUNSU=y -CONFIG_SERIAL_SUNSU_CONSOLE=y -CONFIG_SERIAL_SUNSAB=y -CONFIG_SERIAL_SUNSAB_CONSOLE=y -CONFIG_SERIAL_SUNHV=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_MPU3050 is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +CONFIG_INPUT_LTC2954=m +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_MOXA_SMARTIO=m +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=m +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +# CONFIG_SERIAL_8250_FSL is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +CONFIG_SERIAL_L_ZILOG=y +CONFIG_SERIAL_L_ZILOG_CONSOLE=y +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +# CONFIG_IPMI_WATCHDOG is not set +CONFIG_IPMI_POWEROFF=m +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_MUX_LTC4306=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +CONFIG_SPI_SPIDEV=m +# CONFIG_SPMI is not set +# CONFIG_HSI is not set + +# +# PPS support +# +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +CONFIG_NTP_PPS=y + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_PCA953X=m +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_SBS=m +CONFIG_CHARGER_SBS=m +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IT87 is not set +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +CONFIG_SENSORS_LTC4151=m +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +CONFIG_SENSORS_LM95231=m +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +CONFIG_SENSORS_LM95245=m +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NTC_THERMISTOR=y +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_TPS53679=m +CONFIG_SENSORS_PWM_FAN=m +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set + +CONFIG_THERMAL=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_CPU_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_I6300ESB_WDT is not set +CONFIG_L_WDT=y + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RTSX_USB is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +CONFIG_REGULATOR=y +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_RC_SUPPORT is not set +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_V4L2=m +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_DVB_CORE=m +CONFIG_DVB_NET=y +# CONFIG_TTPCI_EEPROM is not set +CONFIG_DVB_MAX_ADAPTERS=8 +# CONFIG_DVB_DYNAMIC_MINORS is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_PVRUSB2 is not set +# CONFIG_VIDEO_HDPVR is not set +# CONFIG_VIDEO_USBVISION is not set +# CONFIG_VIDEO_STK1160_COMMON is not set +# CONFIG_VIDEO_GO7007 is not set + +# +# Analog/digital TV USB devices +# +# CONFIG_VIDEO_AU0828 is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_USB_V2 is not set +# CONFIG_DVB_TTUSB_BUDGET is not set +# CONFIG_DVB_TTUSB_DEC is not set +# CONFIG_SMS_USB_DRV is not set +# CONFIG_DVB_B2C2_FLEXCOP_USB is not set +# CONFIG_DVB_AS102 is not set + +# +# Webcam, TV (analog/digital) USB devices +# +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_MEDIA_PCI_SUPPORT is not set +CONFIG_V4L_PLATFORM_DRIVERS=y +# CONFIG_VIDEO_CAFE_CCIC is not set +# CONFIG_SOC_CAMERA is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set + +# +# Supported MMC/SDIO adapters +# +# CONFIG_SMS_SDIO_DRV is not set +CONFIG_RADIO_ADAPTERS=y +# CONFIG_RADIO_SI470X is not set +# CONFIG_RADIO_SI4713 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_USB_MA901 is not set +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set + +# +# Texas Instruments WL128x FM driver (ST based) +# +# CONFIG_CYPRESS_FIRMWARE is not set + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y +CONFIG_MEDIA_ATTACH=y + +# +# Audio decoders, processors and mixers +# + +# +# RDS decoders +# + +# +# Video decoders +# + +# +# Video and audio decoders +# + +# +# Video encoders +# + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# + +# +# Audio/Video compression chips +# + +# +# Miscellaneous helper chips +# + +# +# Sensors used on soc_camera driver +# +CONFIG_MEDIA_TUNER=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_MC44S803=m + +# +# Multistandard (satellite) frontends +# + +# +# Multistandard (cable + terrestrial) frontends +# + +# +# DVB-S (satellite) frontends +# + +# +# DVB-T (terrestrial) frontends +# +# CONFIG_DVB_AS102_FE is not set +# CONFIG_DVB_GP8PSK_FE is not set + +# +# DVB-C (cable) frontends +# + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# + +# +# ISDB-T (terrestrial) frontends +# + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# + +# +# Digital terrestrial only tuners/PLL +# + +# +# SEC control devices for DVB-S +# + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=m +# CONFIG_DRM_DP_AUX_CHARDEV is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +CONFIG_DRM_TTM=m + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +CONFIG_DRM_RADEON=m +# CONFIG_DRM_RADEON_USERPTR is not set +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VIVANTE=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +# CONFIG_DRM_MCST is not set +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +CONFIG_DRM_PANEL=y +CONFIG_DRM_PANEL_LVDS=m +CONFIG_DRM_BRIDGE=y + +# +# Display Interface Bridges +# +CONFIG_DRM_DW_HDMI=y +CONFIG_DRM_NXP_PTN3460=m +CONFIG_DRM_IMX_HDMI=m + +# +# Frame buffer Devices +# CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_DDC=m +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_FOREIGN_ENDIAN=y +CONFIG_FB_BOTH_ENDIAN=y +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y -CONFIG_FB_SBUS=y -CONFIG_FB_CG6=y -CONFIG_FB_FFB=y -CONFIG_FB_XVR500=y -CONFIG_FB_XVR2500=y -CONFIG_FB_XVR1000=y -CONFIG_FB_RADEON=y -# CONFIG_FB_RADEON_BACKLIGHT is not set -CONFIG_FB_ATY=y -CONFIG_FB_ATY_GX=y -# CONFIG_FB_ATY_BACKLIGHT is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +CONFIG_FB_VGA16=y +# CONFIG_FB_UVESA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=m +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +CONFIG_VGASTATE=y +CONFIG_HDMI=y +CONFIG_FB_MGAM83=y +CONFIG_PCI_FB_MGAM83=y +CONFIG_MGA_HWCOPYAREA=y +CONFIG_MGA_HWIMAGEBLIT=y +CONFIG_FB_MGA3D=m +CONFIG_FB_LYNXFB=y +CONFIG_FB_LYNXFB_DOMAINS=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FONTS=y -CONFIG_FONT_SUN8x16=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_LOGO is not set CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_HRTIMER=y +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_RAWMIDI_SEQ=m +CONFIG_SND_OPL3_LIB_SEQ=m +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +CONFIG_SND_EMU10K1=m +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y CONFIG_SND_DUMMY=m +# CONFIG_SND_ALOOP is not set CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m -CONFIG_SND_ALI5451=m -CONFIG_SND_SUN_CS4231=m +# CONFIG_SND_SERIAL_U16550 is not set +CONFIG_SND_MPU401=m +# CONFIG_SND_AC97_POWER_SAVE is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +CONFIG_SND_CS4281=m +# CONFIG_SND_CS46XX is not set +CONFIG_SND_CTXFI=m +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +CONFIG_SND_EMU10K1X=m +# CONFIG_SND_ENS1370 is not set +CONFIG_SND_ENS1371=m +CONFIG_SND_ES1938=m +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +# CONFIG_SND_HDA_RECONFIG is not set +# CONFIG_SND_HDA_INPUT_BEEP is not set +# CONFIG_SND_HDA_PATCH_LOADER is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +# CONFIG_SND_HDA_CODEC_CA0132_DSP is not set +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_CORE=m +CONFIG_SND_SPI=y +CONFIG_SND_HDA_PREALLOC_SIZE=2048 +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_6FIRE is not set +# CONFIG_SND_USB_HIFACE is not set +# CONFIG_SND_BCD2000 is not set +# CONFIG_SND_USB_POD is not set +# CONFIG_SND_USB_PODHD is not set +# CONFIG_SND_USB_TONEPORT is not set +# CONFIG_SND_USB_VARIAX is not set +CONFIG_SND_SOC=m +CONFIG_SND_SOC_TLV320AIC26=m +CONFIG_SND_SIMPLE_CARD=m +# CONFIG_SOUND_PRIME is not set +CONFIG_AC97_BUS=m + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +CONFIG_HIDRAW=y +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=m +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=m +# CONFIG_HID_BETOP_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +# CONFIG_HID_CORSAIR is not set +# CONFIG_HID_PRODIKEYS is not set +CONFIG_HID_CYPRESS=m +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GT683R is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +CONFIG_HID_GYRATION=m +# CONFIG_HID_ICADE is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=m +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +CONFIG_LOGITECH_FF=y +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +CONFIG_LOGIWHEELS_FF=y +# CONFIG_HID_MAGICMOUSE is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=m +# CONFIG_HID_ORTEK is not set +CONFIG_HID_PANTHERLORD=m +CONFIG_PANTHERLORD_FF=y +# CONFIG_HID_PENMOUNT is not set +CONFIG_HID_PETALYNX=m +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEELSERIES is not set +CONFIG_HID_SUNPLUS=m +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +CONFIG_HID_TOPSEED=m +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y -CONFIG_HID_DRAGONRISE=y -CONFIG_HID_GYRATION=y -CONFIG_HID_TWINHAN=y -CONFIG_HID_NTRIG=y -CONFIG_HID_ORTEK=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y -CONFIG_HID_SUNPLUS=y -CONFIG_HID_GREENASIA=y -CONFIG_HID_SMARTJOYPLUS=y -CONFIG_HID_TOPSEED=y -CONFIG_HID_THRUSTMASTER=y -CONFIG_HID_ZEROPLUS=y + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y -CONFIG_USB_EHCI_HCD=m -# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_MON=y +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=m +CONFIG_USB_XHCI_PCI=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set CONFIG_USB_UHCI_HCD=m -CONFIG_USB_STORAGE=m -CONFIG_SUN_OPENPROMIO=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_USS720 is not set +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +CONFIG_USB_SERIAL_CH341=m +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +CONFIG_USB_SERIAL_CP210X=m +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +CONFIG_USB_SERIAL_FTDI_SIO=m +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_GARMIN=m +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +# CONFIG_USB_SERIAL_MXUPORT is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +CONFIG_USB_SERIAL_PL2303=m +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_XSENS_MT is not set +# CONFIG_USB_SERIAL_WISHBONE is not set +# CONFIG_USB_SERIAL_SSU100 is not set +# CONFIG_USB_SERIAL_QT2 is not set +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set + +# +# USB Physical Layer drivers +# +# CONFIG_USB_PHY is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +CONFIG_USB_GADGET=m +# CONFIG_USB_LED_TRIG is not set +# CONFIG_UWB is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set + +# +# USB Peripheral Controller +# +CONFIG_USB_CONFIGFS=m +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_ECM_SUBSET=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +# CONFIG_MMC_RICOH_MMC is not set +# CONFIG_MMC_SDHCI_PLTFM is not set +# CONFIG_MMC_TIFM_SD is not set +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=m +# CONFIG_LEDS_CLASS_FLASH is not set + +# +# LED drivers +# +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=m +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +CONFIG_LEDS_TRIGGER_GPIO=m +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_MTHCA_DEBUG=y +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +# CONFIG_INFINIBAND_SRPT is not set +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RDMAVT is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=y +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_CY14B101P=y +CONFIG_RTC_DRV_FM33256=y +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=y + + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_DMADEVICES is not set + +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_VIRT_DRIVERS is not set + +# +# Virtio drivers +# +# CONFIG_VIRTIO_PCI is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_STAGING=y +CONFIG_R8188EU=m +CONFIG_VT6656=m +CONFIG_CRYSTALHD=m + +# +# Hardware Spinlock drivers +# + +# +# Clock Source drivers +# +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# + +# +# Remoteproc drivers +# +# CONFIG_STE_MODEM_RPROC is not set + +# +# Rpmsg drivers +# + +# +# SOC (System On Chip) specific Drivers +# +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_OPT3001=m +CONFIG_INV_MPU6050_IIO=m +CONFIG_INV_MPU6050_I2C=m +CONFIG_AK8975=m +CONFIG_BMP280=m +CONFIG_BMP280_I2C=m +CONFIG_BMP280_SPI=m +CONFIG_PWM=y +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_RAS=y +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_NVMEM=y +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set + +# +# FPGA Configuration Support +# +# CONFIG_FPGA is not set + +# +# File systems +# +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS_XATTR is not set CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +# CONFIG_JFS_SECURITY is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +# CONFIG_XFS_QUOTA is not set +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_ORANGEFS_FS=m +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=m +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y + +# +# Caches +# +CONFIG_FSCACHE=y + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +# CONFIG_SQUASHFS_XATTR is not set +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +# CONFIG_SQUASHFS_XZ is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_UPCALL is not set +# CONFIG_CIFS_XATTR is not set +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_SMB2 is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +CONFIG_NLS_CODEPAGE_855=m +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +CONFIG_NLS_CODEPAGE_866=m +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +CONFIG_NLS_ISO8859_5=m +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +CONFIG_NLS_MAC_CYRILLIC=m +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y + +# +# printk and dmesg options +# CONFIG_PRINTK_TIME=y +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=8192 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +# CONFIG_DEBUG_KERNEL is not set + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_MEMORY_INIT=y + +# +# Debug Lockups and Hangs +# +# CONFIG_LOCKUP_DETECTOR is not set CONFIG_DETECT_HUNG_TASK=y -# CONFIG_SCHED_DEBUG is not set -CONFIG_SCHEDSTATS=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENTS=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_INFO=y +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_STACKTRACE=y +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y + +# +# RCU Debugging +# +# CONFIG_PROVE_RCU is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_TORTURE_TEST is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_MISSED_TIMER_OFFSETS_HIST is not set +# CONFIG_FTRACE_SYSCALLS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_TRACE_ENUM_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y + +# +# Runtime Testing +# +CONFIG_LKDTM=m +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_MEMTEST is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_SAMPLES is not set +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set +# CONFIG_UBSAN is not set +CONFIG_BOOT_TRACE=y +CONFIG_BOOT_TRACE_THRESHOLD=100 + +# +# Security options +# CONFIG_KEYS=y +CONFIG_BIG_KEYS=y +# CONFIG_PERSISTENT_KEYRINGS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_IMA_TRUSTED_KEYRING=n +CONFIG_IMA=y +CONFIG_IMA_AUDIT=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_TRUSTED_KEYRING=n +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_KEYS_COMPAT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +# CONFIG_CRYPTO_RSA is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MICHAEL_MIC=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=m CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +# CONFIG_CRYPTO_LZO is not set +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# +# Random Number Generation +# # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=m +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HW=y +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC7 is not set CONFIG_LIBCRC32C=m -CONFIG_VCC=m +# CONFIG_CRC8 is not set +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_INTERVAL_TREE=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_FONT_SUPPORT=y +CONFIG_FONT_8x16=y +# CONFIG_SG_SPLIT is not set +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +CONFIG_HAVE_DMA_ATTRS=y diff --git a/arch/sparc/include/asm-l b/arch/sparc/include/asm-l new file mode 120000 index 000000000000..55881a560a9f --- /dev/null +++ b/arch/sparc/include/asm-l @@ -0,0 +1 @@ +../../l/include/asm \ No newline at end of file diff --git a/arch/sparc/include/asm/acpi.h b/arch/sparc/include/asm/acpi.h new file mode 100644 index 000000000000..ef2d4f943030 --- /dev/null +++ b/arch/sparc/include/asm/acpi.h @@ -0,0 +1,8 @@ +#ifndef __ASM_SPARC_ACPI_H +#define __ASM_SPARC_ACPI_H + +#define ACPI_FLUSH_CPU_CACHE() + +#include + +#endif /*__ASM_SPARC_ACPI_H*/ diff --git a/arch/sparc/include/asm/apic.h b/arch/sparc/include/asm/apic.h new file mode 100644 index 000000000000..8d85aadc9906 --- /dev/null +++ b/arch/sparc/include/asm/apic.h @@ -0,0 +1,40 @@ +#ifndef __ASM_APIC_H +#define __ASM_APIC_H + +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * Basic functions accessing APICs. + */ +static inline void arch_apic_write(unsigned long reg, unsigned long v) +{ + writel_asi(v, reg, ASI_LAPIC); +} +static inline unsigned long arch_apic_read(unsigned long reg) +{ + return readl_asi(reg, ASI_LAPIC); +} + +static __inline int logical_smp_processor_id(void) +{ + return (GET_APIC_LOGICAL_ID(arch_apic_read(APIC_LDR))); +} + +#endif /* !(__ASSEMBLY__) */ + +#if (defined(CONFIG_E90S)) && \ + (defined(CONFIG_RDMA_SIC) || defined(CONFIG_RDMA_SIC_MODULE) || defined(CONFIG_RDMA_NET) || defined(CONFIG_RDMA_NET_MODULE)) +extern int rdma_apic_init; +extern int rdma_node[]; +#endif + +#include + +#endif /*__ASM_APIC_H*/ diff --git a/arch/sparc/include/asm/apic_regs.h b/arch/sparc/include/asm/apic_regs.h new file mode 100644 index 000000000000..2627c599c87c --- /dev/null +++ b/arch/sparc/include/asm/apic_regs.h @@ -0,0 +1,276 @@ +#ifndef __ASM_APIC_REGS_H +#define __ASM_APIC_REGS_H + + +#ifndef __ASSEMBLY__ + +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved_3 : 20, + apic_enable : 1, + __reserved_2 : 2, + boot_strap : 1, + __reserved_1 : 8; + u32 __reserved[3]; + } bsp; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_2 : 4, + phys_apic_id : 4, + __reserved_1 : 24; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 __reserved_2 : 8, + max_lvt : 8, + __reserved_1 : 8, + version : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 __reserved_1 : 24, + priority : 8; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 __reserved_1 : 24, + priority : 8; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 __reserved_1 : 24, + priority : 8; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 logical_dest : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 model : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 __reserved_2 : 22, + focus_cpu : 1, + apic_enabled : 1, + spurious_vector : 8; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 __reserved_2 : 24, + illegal_register_address : 1, + receive_illegal_vector : 1, + send_illegal_vector : 1, + __reserved_1 : 1, + receive_accept_error : 1, + send_accept_error : 1, + receive_cs_error : 1, + send_cs_error : 1; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 __reserved_3 : 12, + shorthand : 2, + __reserved_2 : 2, + trigger : 1, + level : 1, + __reserved_1 : 1, + delivery_status : 1, + destination_mode : 1, + delivery_mode : 3, + vector : 8; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_2 : 4, + phys_dest : 4, + __reserved_1 : 24; + u32 logical_dest : 8, + __reserved_3 : 24; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 __reserved_3 : 14, + timer_mode : 1, + mask : 1, + __reserved_2 : 3, + delivery_status : 1, + __reserved_1 : 4, + vector : 8; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { u32 __reserved[4]; } __reserved_15; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 __reserved_3 : 15, + mask : 1, + __reserved_2 : 3, + delivery_status : 1, + __reserved_1 : 1, + delivery_mode : 3, + vector : 8; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 __reserved_2 : 15, + mask : 1, + trigger : 1, + remote_irr : 1, + polarity : 1, + delivery_status : 1, + __reserved_1 : 1, + delivery_mode : 3, + vector : 8; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 __reserved_2 : 15, + mask : 1, + trigger : 1, + remote_irr : 1, + polarity : 1, + delivery_status : 1, + __reserved_1 : 1, + delivery_mode : 3, + vector : 8; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 __reserved_3 : 15, + mask : 1, + __reserved_2 : 3, + delivery_status : 1, + __reserved_1 : 4, + vector : 8; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 __reserved_1 : 28, + divisor : 4; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; +#if 0 +/*3F0*/ struct { u32 __reserved[764]; } __reserved_20; +/*FE0*/ struct { /* Vector from PIC or APIC in nmi */ + u32 __reserved : 24, + nm_vector : 8; + u32 __reserved[3]; + } nm_vect; +/*FF0*/ struct { /* Vector */ + u32 __reserved_1 : 24, + vector : 8; + u32 __reserved[3]; + } vect; +#endif +} __attribute__ ((packed)); + +#undef u32 + +#endif /* !(__ASSEMBLY__) */ + +#endif /* __ASM_APIC_REGS_H */ diff --git a/arch/sparc/include/asm/apicdef.h b/arch/sparc/include/asm/apicdef.h new file mode 100644 index 000000000000..347dc3a1a0c3 --- /dev/null +++ b/arch/sparc/include/asm/apicdef.h @@ -0,0 +1,9 @@ +#ifndef __ASM_SPARC_APICDEF_H +#define __ASM_SPARC_APICDEF_H + +#ifdef __KERNEL__ +#include +#include +#endif + +#endif /*__ASM_SPARC_APICDEF_H */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index b60448397d4f..dce359c5f4ed 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -64,4 +64,23 @@ static inline int atomic_xchg(atomic_t *v, int new) s64 atomic64_dec_if_positive(atomic64_t *v); #define atomic64_dec_if_positive atomic64_dec_if_positive +#ifdef CONFIG_RMO +#ifdef CONFIG_SMP +#define smp_mb__before_atomic_dec() membar_storeload_loadload(); +#define smp_mb__after_atomic_dec() membar_storeload_storestore(); +#define smp_mb__before_atomic_inc() membar_storeload_loadload(); +#define smp_mb__after_atomic_inc() membar_storeload_storestore(); +#else +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() +#endif +#else /* CONFIG_RMO */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() +#endif /* CONFIG_RMO */ + #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 9fb148bd3c97..ac61e0369fe5 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -2,6 +2,17 @@ #ifndef __SPARC64_BARRIER_H #define __SPARC64_BARRIER_H +#define membar_sync() do{__asm__ __volatile__("membar #Sync\n\t");}while(0) + +#ifdef CONFIG_E90S +#define membar_sync() do{__asm__ __volatile__("membar #Sync\n\t");}while(0) +#define membar_safe(type) \ +do { __asm__ __volatile__( \ + " membar " type "\n" \ + : : : "memory"); \ +} while (0) + +#else /*CONFIG_E90S*/ /* These are here in an effort to more fully work around Spitfire Errata * #51. Essentially, if a memory barrier occurs soon after a mispredicted * branch, the chip can stop executing instructions until a trap occurs. @@ -23,13 +34,16 @@ * the memory barrier explicitly into a "branch always, predicted taken" * delay slot to avoid the problem case. */ + #define membar_safe(type) \ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ " membar " type "\n" \ "1:\n" \ : : : "memory"); \ } while (0) +#endif /*CONFIG_E90S*/ +#ifndef CONFIG_RMO /* The kernel always executes in TSO memory model these days, * and furthermore most sparc64 chips implement more stringent * memory ordering than required by the specifications. @@ -37,6 +51,83 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define mb() membar_safe("#StoreLoad") #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") +#else /* CONFIG_RMO */ +#define mb() \ + membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") +#define rmb() \ + membar_safe("#LoadLoad") +#define wmb() \ + membar_safe("#StoreStore") +#define membar_storeload() \ + membar_safe("#StoreLoad") +#define membar_storeload_storestore() \ + membar_safe("#StoreLoad | #StoreStore") +#define membar_storeload_loadload() \ + membar_safe("#StoreLoad | #LoadLoad") +#define membar_storestore_loadstore() \ + membar_safe("#StoreStore | #LoadStore") +#endif /* CONFIG_RMO */ + +#define dma_rmb() rmb() +#define dma_wmb() wmb() + +#define read_barrier_depends() do { } while(0) +#ifndef CONFIG_RMO +#define set_mb(__var, __value) \ + do { __var = __value; membar_safe("#StoreLoad"); } while(0) +#else /* CONFIG_RMO */ +#define set_mb(__var, __value) \ + do { __var = __value; membar_storeload_storestore(); } while(0) +#endif /* CONFIG_RMO */ + +#ifdef CONFIG_RMO +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() __asm__ __volatile__("":::"memory") +#define smp_rmb() __asm__ __volatile__("":::"memory") +#define smp_wmb() __asm__ __volatile__("":::"memory") +#endif +#define smp_store_mb(__var, __value) set_mb(__var, __value) + + + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + membar_storeload_storestore(); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + membar_storeload_loadload(); \ + ___p1; \ +}) + +#define __smp_mb__before_atomic() membar_storeload_loadload() +#define __smp_mb__after_atomic() membar_storeload_storestore() + +#else /* CONFIG_RMO */ +#define smp_store_mb(__var, __value) \ + do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() __asm__ __volatile__("":::"memory") +#define smp_rmb() __asm__ __volatile__("":::"memory") +#define smp_wmb() __asm__ __volatile__("":::"memory") +#endif + +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) #define __smp_store_release(p, v) \ do { \ @@ -55,6 +146,7 @@ do { \ #define __smp_mb__before_atomic() barrier() #define __smp_mb__after_atomic() barrier() +#endif /* CONFIG_RMO */ #include diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h index ca7ea5913494..1c6a9d00de69 100644 --- a/arch/sparc/include/asm/bitops_64.h +++ b/arch/sparc/include/asm/bitops_64.h @@ -23,11 +23,23 @@ void set_bit(unsigned long nr, volatile unsigned long *addr); void clear_bit(unsigned long nr, volatile unsigned long *addr); void change_bit(unsigned long nr, volatile unsigned long *addr); -int fls(unsigned int word); -int __fls(unsigned long word); - #include +#ifdef CONFIG_RMO +#ifdef CONFIG_SMP +#define smp_mb__before_clear_bit() membar_storeload_loadload() +#define smp_mb__after_clear_bit() membar_storeload_storestore() +#else +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() +#endif +#else /* CONFIG_RMO */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() +#endif /* CONFIG_RMO */ + +#include +#include #include #ifdef __KERNEL__ diff --git a/arch/sparc/include/asm/bootinfo.h b/arch/sparc/include/asm/bootinfo.h new file mode 100644 index 000000000000..65d269409573 --- /dev/null +++ b/arch/sparc/include/asm/bootinfo.h @@ -0,0 +1,279 @@ +#ifndef _SPARC64_BOOTINFO_H_ +#define _SPARC64_BOOTINFO_H_ + +#ifdef __KERNEL__ +#include + +extern bootblock_struct_t *bootblock; /* bootblock structure pointer */ + /* passed by boot (bios/prom) */ + +#define bootblock_virt bootblock + +#if defined(CONFIG_E90S) && defined(NEEDS_GET_DCR) +static inline unsigned long get_dcr(void) +{ + unsigned long dcr; + __asm__ __volatile__("rd %%dcr, %0" + : "=r" (dcr) + : "i" (ASI_DCU_CONTROL_REG)); + return dcr; +} +#endif +#endif + +/* The cpu type names */ + +#define CPU_TYPE_MIN 0 +#define CPU_TYPE_R150 0x0 /* R150 */ +#define CPU_TYPE_R500 0x1 /* R500 */ +#define CPU_TYPE_R500S 0x2 /* R500S */ +#define CPU_TYPE_4R 0x3 /* 4R */ +#define CPU_TYPE_SIMUL 0xfe /* Simulator */ +#define CPU_TYPE_MAX 0xff + + +#if defined(CONFIG_E90S) +#define GET_CPU_TYPE_NAME(type) \ +({ \ + char *name; \ + \ + switch (e90s_get_cpu_type()) { \ + case E90S_CPU_R1000: \ + name = "R1000"; \ + break; \ + case E90S_CPU_R2000: \ + name = "R2000"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + \ + name; \ +}) + +#else /*CONFIG_E90S*/ + +#define GET_CPU_TYPE_NAME(type) \ +({ \ + char *name; \ + \ + switch (type) { \ + case CPU_TYPE_R150: \ + name = "R150"; \ + break; \ + case CPU_TYPE_R500: \ + name = "R500"; \ + break; \ + case CPU_TYPE_R500S: \ + name = "R500S"; \ + break; \ + case CPU_TYPE_4R: \ + name = "R1000"; \ + break; \ + case CPU_TYPE_SIMUL: \ + name = "SIMUL"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + \ + name; \ +}) +#endif /*CONFIG_E90S*/ + +/* Motherboard type names */ +#ifdef CONFIG_E90 + +#define MB_TYPE_E90_BASE 0x00 +#define MB_TYPE_MIN 0 + +#define MB_TYPE_E90_NO_PCI 0 +#define MB_TYPE_E90_CPCI 1 +#define MB_TYPE_E90_MBC MB_TYPE_E90_CPCI +#define MB_TYPE_E90_VK3201 (MB_TYPE_E90_CPCI | (1 << 3)) +#define MB_TYPE_E90_MBCC (MB_TYPE_E90_CPCI | (2 << 3)) +#define MB_TYPE_E90_PMC (MB_TYPE_E90_CPCI | (3 << 3)) +#define MB_TYPE_E90_MYPC (MB_TYPE_E90_CPCI | (4 << 3)) +#define MB_TYPE_E90_NOTEBOOK (MB_TYPE_E90_CPCI | (5 << 3)) +#define MB_TYPE_E90_MVC 3 +#define MB_TYPE_E90_MB (3 | (1 << 3)) +#define MB_TYPE_E90_PCPCI 4 +#define MB_TYPE_E90_MPJA1 5 +#define MB_TYPE_E90_COUSIN 6 +#define MB_TYPE_E90_THINCLIENT 7 + +#define MB_TYPE_MAX 127 + + +#define mb_type_pci_mask 0x7 + + +#define GET_MB_TYPE_NAME(type) \ +({ \ + char *name; \ + switch (type) { \ + case MB_TYPE_E90_NO_PCI: \ + name = "MB-1"; \ + break; \ + case MB_TYPE_E90_MBC: \ + name = "MB/C"; \ + break; \ + case MB_TYPE_E90_VK3201: \ + name = "VK32-01"; \ + break; \ + case MB_TYPE_E90_PMC: \ + name = "PMC"; \ + break; \ + case MB_TYPE_E90_MB: \ + name = "MB"; \ + break; \ + case MB_TYPE_E90_MPJA1: \ + name = "MPY1"; \ + break; \ + case MB_TYPE_E90_MVC: \ + name = "MV/C"; \ + break; \ + case MB_TYPE_E90_PCPCI: \ + name = "VK32"; \ + break; \ + case MB_TYPE_E90_NOTEBOOK: \ + name = "MPY2"; \ + break; \ + case MB_TYPE_E90_MBCC: \ + name = "MBC/C"; \ + break; \ + case MB_TYPE_E90_COUSIN: \ + name = "OLD-COUSIN"; \ + break; \ + case MB_TYPE_E90_MYPC: \ + name = "MYP/C"; \ + break; \ + case MB_TYPE_E90_THINCLIENT: \ + name = "TY-R500S"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + name; \ +}) + + +#define GET_MB_USED_IN(type) \ +({ \ + char *name; \ + switch(type) { \ + case MB_TYPE_E90_THINCLIENT: \ + name = "APM VK-1"; \ + break; \ + case MB_TYPE_E90_MVC: \ + name = "VK-27.02, VK-27.03, VK-27.04"; \ + break; \ + case MB_TYPE_E90_MBC: \ + name = "VK-27, VK-27.01"; \ + break; \ + case MB_TYPE_E90_MBCC: \ + name = "ELBRUS-90 MICRO - 52"; \ + break; \ + case MB_TYPE_E90_MYPC: \ + name = "Management module"; \ + break; \ + default : \ + name = NULL; \ + } \ + name; \ +}) +#endif + +#ifdef CONFIG_E90S + +#define MB_TYPE_MIN 128 +#define MB_TYPE_E90S 128 +#define MB_TYPE_E90S_BUTTERFLY 128 +#define MB_TYPE_E90S_CPCI 129 +#define MB_TYPE_E90S_PC 130 +#define MB_TYPE_E90S_ATX 131 +#define MB_TYPE_E90S_NT 132 +#define MB_TYPE_E90S_SIVUCH2 133 +#define MB_TYPE_E90S_MBC4_1_C 134 +#define MB_TYPE_E90S_MPU3_C 135 +#define MB_TYPE_E90S_MPU6_C 136 +#define MB_TYPE_E90S_MPU_COM 137 +#define MB_TYPE_E90S_MPU_MPC 138 +#define MB_TYPE_E90S_IZUMRUD 139 +#define MB_TYPE_E90S_REJECTOR 140 +#define MB_TYPE_E90S_CY14B101P 141 +/* By default all mb_versions > MB_TYPE_E90S_REJECTOR + * have cy14b101p rt clock. If no correct is_cy14b101p_exist() + * in arch/l/kernel/i2c-spi/core.c + */ + +#define MB_TYPE_MAX 150 + +#define GET_MB_TYPE_NAME(type) \ +({ \ + char *name; \ + switch (type) { \ + case MB_TYPE_E90S_BUTTERFLY: \ + name = "MB90C/C"; \ + break; \ + case MB_TYPE_E90S_CPCI: \ + name = "MBC4/C"; \ + break; \ + case MB_TYPE_E90S_PC: \ + name = "MBC4-PC"; \ + break; \ + case MB_TYPE_E90S_MPU6_C: \ + name = "MPU6/C"; \ + break; \ + case MB_TYPE_E90S_MPU3_C: \ + name = "MPU3/C"; \ + break; \ + case MB_TYPE_E90S_ATX: \ + name = "MPU-ATX"; \ + break; \ + case MB_TYPE_E90S_MPU_MPC: \ + name = "MPU-MPC"; \ + break; \ + case MB_TYPE_E90S_NT: \ + name = "NT-MCST4R"; \ + break; \ + case MB_TYPE_E90S_SIVUCH2: \ + name = "MP1C2/V"; \ + break; \ + case MB_TYPE_E90S_MBC4_1_C: \ + name = "MMBC4_1/C"; \ + break; \ + case MB_TYPE_E90S_IZUMRUD: \ + name = "IZUMRUD"; \ + break; \ + case MB_TYPE_E90S_REJECTOR: \ + name = "REJECTOR"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + name; \ +}) + + + +#define GET_MB_USED_IN(type) \ +({ \ + char *name; \ + switch (type) { \ + case MB_TYPE_E90S_BUTTERFLY: \ + name = "ELBRUS-90C"; \ + break; \ + case MB_TYPE_E90S_MBC4_1_C: \ + name = "VK 27.05"; \ + break; \ + default: \ + name = NULL; \ + } \ + name; \ +}) + + +#endif +#endif /* _SPARC64_BOOTINFO_H_ */ + diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h index dcfd58118c11..00ec4bcdd95a 100644 --- a/arch/sparc/include/asm/cache.h +++ b/arch/sparc/include/asm/cache.h @@ -10,8 +10,13 @@ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) +#ifdef CONFIG_E90S +#define L1_CACHE_SHIFT 6 +#else #define L1_CACHE_SHIFT 5 -#define L1_CACHE_BYTES 32 +#endif + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #ifdef CONFIG_SPARC32 #define SMP_CACHE_BYTES_SHIFT 5 diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index e7517434d1fa..1c08aeed906d 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -12,14 +12,28 @@ #define flushw_all() __asm__ __volatile__("flushw") void __flushw_user(void); -#define flushw_user() __flushw_user() +#ifdef CONFIG_MCST +#define flushw_user() synchronize_user_stack() +#define flush_user_windows __flushw_user + +void e90s_flush_l2_cache(void); +#else +#define flushw_user() __flushw_user() #define flush_user_windows flushw_user +#endif + #define flush_register_windows flushw_all /* These are the same regardless of whether this is an SMP kernel or not. */ +#ifdef CONFIG_MCST +#define flush_cache_mm(__mm) \ + do { if ((__mm) == current->mm) flush_user_windows(); } while(0) +#else #define flush_cache_mm(__mm) \ do { if ((__mm) == current->mm) flushw_user(); } while(0) +#endif + #define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_cache_range(vma, start, end) \ flush_cache_mm((vma)->vm_mm) diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 316faa0130ba..59fab1420597 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -10,10 +10,16 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, int old, int new) { +#ifndef CONFIG_RMO __asm__ __volatile__("cas [%2], %3, %0" - : "=&r" (new) - : "0" (new), "r" (m), "r" (old) - : "memory"); +#else /* CONFIG_RMO */ + __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" + "cas [%2], %3, %0\n\t" + "membar #StoreLoad | #StoreStore" +#endif /* CONFIG_RMO */ + : "=&r" (new) + : "0" (new), "r" (m), "r" (old) + : "memory"); return new; } @@ -23,12 +29,18 @@ static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int va unsigned long tmp1, tmp2; __asm__ __volatile__( +#ifdef CONFIG_RMO +" membar #StoreLoad | #LoadLoad\n" +#endif /* CONFIG_RMO */ " mov %0, %1\n" "1: lduw [%4], %2\n" " cas [%4], %2, %0\n" " cmp %2, %0\n" " bne,a,pn %%icc, 1b\n" " mov %1, %0\n" +#ifdef CONFIG_RMO +" membar #StoreLoad | #StoreStore\n" +#endif /* CONFIG_RMO */ : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) : "0" (val), "r" (m) : "cc", "memory"); @@ -40,12 +52,18 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long unsigned long tmp1, tmp2; __asm__ __volatile__( +#ifdef CONFIG_RMO +" membar #StoreLoad | #LoadLoad\n" +#endif /* CONFIG_RMO */ " mov %0, %1\n" "1: ldx [%4], %2\n" " casx [%4], %2, %0\n" " cmp %2, %0\n" " bne,a,pn %%xcc, 1b\n" " mov %1, %0\n" +#ifdef CONFIG_RMO +" membar #StoreLoad | #StoreStore\n" +#endif /* CONFIG_RMO */ : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) : "0" (val), "r" (m) : "cc", "memory"); @@ -110,11 +128,16 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, #include - static inline unsigned long __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) { +#ifndef CONFIG_RMO __asm__ __volatile__("casx [%2], %3, %0" +#else /* CONFIG_RMO */ + __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" + "casx [%2], %3, %0\n\t" + "membar #StoreLoad | #StoreStore" +#endif /* CONFIG_RMO */ : "=&r" (new) : "0" (new), "r" (m), "r" (old) : "memory"); @@ -128,7 +151,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) * The XOR is handy for reversing the bits for big-endian byte order */ static inline unsigned long -__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new) + __cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new) { unsigned long maddr = (unsigned long)m; int bit_shift = (((unsigned long)m & 3) ^ 3) << 3; @@ -142,7 +165,7 @@ __cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new) old32 = (load32 & ~mask) | (old << bit_shift); load32 = __cmpxchg_u32(ptr, old32, new32); if (load32 == old32) - return old; + return old; load = (load32 & mask) >> bit_shift; } while (load == old); diff --git a/arch/sparc/include/asm/console.h b/arch/sparc/include/asm/console.h new file mode 100644 index 000000000000..a351584028e5 --- /dev/null +++ b/arch/sparc/include/asm/console.h @@ -0,0 +1,24 @@ + +#ifndef _SPARC64_CONSOLE_H_ +#define _SPARC64_CONSOLE_H_ + +#ifndef __ASSEMBLY__ +#include +#include + +#define early_virtio_cons_enabled false /* cannot be used */ + +static inline void +virt_console_dump_putc(char c) +{ + /* virtual console is actual only on guest */ +} + +#include + +#define LMS_CONS_DATA_PORT (0x300 + PCI_IO) +#define LMS_CONS_STATUS_PORT (0x301 + PCI_IO) + +#endif /* __ASSEMBLY__ */ + +#endif /* _SPARC64_CONSOLE_H_ */ diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h index a797d5e86406..a38f3b433c7e 100644 --- a/arch/sparc/include/asm/device.h +++ b/arch/sparc/include/asm/device.h @@ -5,6 +5,21 @@ #ifndef _ASM_SPARC_DEVICE_H #define _ASM_SPARC_DEVICE_H +#if defined(CONFIG_E90S) + +struct dev_archdata { +#ifdef CONFIG_IOMMU_API + void *iommu; /* private IOMMU data */ +#endif +}; + +struct pdev_archdata { +}; + +#define dev_to_link(dev) 0 +#define set_dev_link(dev, link) do { } while (0) + +#else /*CONFIG_E90S*/ #include struct device_node; @@ -25,5 +40,5 @@ struct pdev_archdata { unsigned int irqs[PROMINTR_MAX]; int num_irqs; }; - +#endif /*CONFIG_E90S*/ #endif /* _ASM_SPARC_DEVICE_H */ diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index ed32845bd2d2..d0fec09fce0e 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -2,6 +2,10 @@ #ifndef ___ASM_SPARC_DMA_MAPPING_H #define ___ASM_SPARC_DMA_MAPPING_H +#if defined(CONFIG_E90) || defined(CONFIG_E90S) +#include +#else /*CONFIG_E90 || CONFIG_E90S*/ + #include extern const struct dma_map_ops *dma_ops; @@ -21,4 +25,22 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } +#define HAVE_ARCH_DMA_SET_MASK 1 + +static inline int dma_set_mask(struct device *dev, u64 mask) +{ +#ifdef CONFIG_PCI + if (dev->bus == &pci_bus_type) { + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EINVAL; + *dev->dma_mask = mask; + return 0; + } +#endif + return -EINVAL; +} + +#include +#endif /*CONFIG_E90 || CONFIG_E90S*/ + #endif diff --git a/arch/sparc/include/asm/e90s.h b/arch/sparc/include/asm/e90s.h new file mode 100644 index 000000000000..f075df5f6b9a --- /dev/null +++ b/arch/sparc/include/asm/e90s.h @@ -0,0 +1,467 @@ +#ifndef _SPARC64_E90_H +#define _SPARC64_E90_H + + +#include +#include + +/* */ +#define E90S_DCACHE_SIZE (32 * 1024) +#define E90S_DCACHE_LINE_SHIFT 6 +#define E90S_DCACHE_LINE_SIZE (1 << E90S_DCACHE_LINE_SHIFT) +#define E90S_ICACHE_SIZE (16 * 1024) +#define E90S_ICACHE_LINE_SIZE (64) +#define E90S_ECACHE_SIZE (2 * 1024 * 1024) +#define E90S_ECACHE_LINE_SIZE (64) + +/* MMU MCNTL */ + +#define E90S_MCNTL 0x8 + +/* Reset */ +#define E90S_MCNTL_IRM_8K (0<<16) /* RW 00 sITLB RAM Mode ? , sITLB. */ +#define E90S_MCNTL_IRM_64K (1<<16) /* DRM01. */ +#define E90S_MCNTL_IRM_512K (2<<16) /* sITLB TTE.size*/ +#define E90S_MCNTL_IRM_1M (3<<16) /* */ + +#define E90S_MCNTL_FW_FITLB (1<<15) /* RW 0 ITLB Data \ + In Register fITLB \ + TTE*/ +#define E90S_MCNTL_FW_FDTLB (1<<14) /* RW 0 DTLB Data \ + In Register fITLB \ + TTE**/ +#define E90S_MCNTL_DRM23_8K (0<<12) /* RW 00 sDTLB RAM Mode 23 ? , */ +#define E90S_MCNTL_DRM23_64K (1<<12) /* sDTLB.*/ +#define E90S_MCNTL_DRM23_512K (2<<12) /* DRM01*/ +#define E90S_MCNTL_DRM23_4M (3<<12) /* */ +#define E90S_MCNTL_DRM23_MSK (3<<12) + +#define E90S_MCNTL_DRM01_8K (0<<10) /* RW 00 sDTLB RAM Mode 01 ? , */ +#define E90S_MCNTL_DRM01_64K (1<<10) /* sDTLB*/ +#define E90S_MCNTL_DRM01_512K (2<<10) /* */ +#define E90S_MCNTL_DRM01_4M (3<<10) /* */ +#define E90S_MCNTL_DRM01_MSK (3<<10) + +#define E90S_MCNTL_NC_CACHE (1<<9) /* RW 0 ޣ L1I. .*/ +#define E90S_MCNTL_JPS1_TSBP (1<<8) /* RW 0 TSB JPS1 Ultra I/II*/ + +#if PAGE_SHIFT == 13 +#define E90S_MCNTL_IRM_DEFAULT E90S_MCNTL_IRM_8K +#define E90S_MCNTL_DRM01_DEFAULT E90S_MCNTL_DRM01_8K +#define E90S_MCNTL_DRM23_DEFAULT E90S_MCNTL_DRM23_8K +#elif PAGE_SHIFT == 16 +#define E90S_MCNTL_IRM_DEFAULT E90S_MCNTL_IRM_64K +#define E90S_MCNTL_DRM01_DEFAULT E90S_MCNTL_DRM01_64K +#define E90S_MCNTL_DRM23_DEFAULT E90S_MCNTL_DRM23_64K +#elif PAGE_SHIFT == 19 +#define E90S_MCNTL_IRM_DEFAULT E90S_MCNTL_IRM_512K +#define E90S_MCNTL_DRM01_DEFAULT E90S_MCNTL_DRM01_512K +#define E90S_MCNTL_DRM23_DEFAULT E90S_MCNTL_DRM23_512K +#elif PAGE_SHIFT == 22 +#define E90S_MCNTL_IRM_DEFAULT E90S_MCNTL_IRM_4M +#define E90S_MCNTL_DRM01_DEFAULT E90S_MCNTL_DRM01_4M +#define E90S_MCNTL_DRM23_DEFAULT E90S_MCNTL_DRM23_4M +#else +#error No page size specified in kernel configuration +#endif + + +#define E90S_ICJR 0x10 + +#define ICJR_GHR_SHIFT 10 +#define ICJR_GHR_MASK 0x3ff + + +#define E90S_DBGJMP 0x20 + +#define DBGJMP_MM_MASK (3<<22) +#define DBGJMP_MM_PSTATE_MM (0<<22) +#define DBGJMP_MM_TSO (1<<22) +#define DBGJMP_MM_RMO (2<<22) + + + /* */ + +/* */ +#define BASE_DRAM _AC(0,UL) /* */ +#define BASE_PCIMEM _AC(0,UL) /* PCI MMIO */ + + +/* PCI */ +#define BASE_BOOT _AC(0xFFFF000000,UL) /*256M */ + /* */ +#define BASE_PCIIO _AC(0xFF20000000,UL) /*256M PCIIO.*/ + /* IO hub I/O Read I/O Write.*/ +#define BASE_PEXCFG _AC(0xFF10000000,UL) /*256M PCI Express.*/ + /* 7.2.2*/ + /* PCI Express.*/ + +#define PCI_CONFIG_BASE BASE_PEXCFG + +/* + * + * Nodes configuration area (NODESREG) + */ + +#define NODES_CONF_AREA_BASE _AC(0xFE00000000, UL) /* base of area */ +#define NODES_CONF_AREA_SIZE _AC(0x0100000000, UL) /* size of area */ +#define NODE_CONF_AREA_SIZE _AC(0x0010000000, UL) /* node area size */ +#define NODE_OFF NODE_CONF_AREA_SIZE +#define NODE_CONF_AREA_BASE(nodeid) /* the node conf area base */ \ + (NODES_CONF_AREA_BASE + (NODE_CONF_AREA_SIZE * (nodeid))) +#define NODE0_CONF_AREA_BASE NODE_CONF_AREA_BASE(0) +#define NODE1_CONF_AREA_BASE NODE_CONF_AREA_BASE(1) +#define NODE2_CONF_AREA_BASE NODE_CONF_AREA_BASE(2) +#define NODE3_CONF_AREA_BASE NODE_CONF_AREA_BASE(3) +#define BASE_NODE0 NODE0_CONF_AREA_BASE /* node #0*/ +#define BASE_NODE1 NODE1_CONF_AREA_BASE /* node #1*/ +#define BASE_NODE2 NODE2_CONF_AREA_BASE /* node #2*/ +#define BASE_NODE3 NODE3_CONF_AREA_BASE /* node #3*/ + +#define NODES_PFREG_AREA_BASE _AC(0xFD00000000, UL) /* base of area */ +#define NODES_PFREG_AREA_SIZE _AC(0x0100000000, UL) /* size of area */ +#define NODE_PFREG_AREA_SIZE _AC(0x0010000000, UL) /* node area size */ +#define NODE_PFREG_AREA_BASE(nodeid) /* the node PFREG area base */ \ + (NODES_PFREG_AREA_BASE + (NODE_PFREG_AREA_SIZE * (nodeid))) +#define NODE0_PFREG_AREA_BASE NODE_PFREG_AREA_BASE(0) +#define NODE1_PFREG_AREA_BASE NODE_PFREG_AREA_BASE(1) +#define NODE2_PFREG_AREA_BASE NODE_PFREG_AREA_BASE(2) +#define NODE3_PFREG_AREA_BASE NODE_PFREG_AREA_BASE(3) + +#ifndef __ASSEMBLY__ +/* + * Nodes processor system registers (north bridge) + * NBSR = { NBSR0 ... NBSRj ... } + * NBSR is some part of node system registers area NSR + */ +#define NODE_NBSR_OFFSET 0x0000000 /* offset of NBSR base into */ + /* node configuration area */ +#define NODE_NBSR_SIZE 0x0100000 /* 1 Mb - size of NBSR area */ +#define THE_NODE_NBSR_PHYS_BASE(node) \ + ((unsigned char *)(NODE_CONF_AREA_BASE(node) + \ + NODE_NBSR_OFFSET)) + +#ifdef CONFIG_NUMA +static inline unsigned char * +sic_get_node_nbsr_base(int node_id) +{ + return THE_NODE_NBSR_PHYS_BASE(node_id); +} +#else /* ! CONFIG_NUMA */ +/* + * NUMA mode is not supported, but each node can has online IO link + * IO links have numeration same as nodes: + * IO link #0 is link on node 0 (CPUs 0-3) + * IO link #1 is link on node 1 (CPUs 4-7) + * ... + * So in this case node # is always only 0 and IO link # can be considered + * as node # + */ +static inline unsigned char * +sic_get_node_nbsr_base(int link) +{ + return THE_NODE_NBSR_PHYS_BASE(link); +} +extern void __init create_nodes_config(void); +#endif /* CONFIG_NUMA */ + +extern int __init e90s_sic_init(void); +extern int __init e90s_early_iohub_online(int node, int link); + +#endif /* ! __ASSEMBLY__ */ + +/* + * NBSR + */ + /* [15:0] () */ + + /* */ +#define NBSR_MEM_EN 0x0000 /* 4 Memory Enable */ +#define NBSR_MEM_DEL 0x0004 /* 4 Memory Delay */ +#define NBSR_FAULT_ST 0x0008 /* 4 Fault Status */ +#define NBSR_VIDEO_CONF 0x000c /* 4 Video Configuration */ +#define NBSR_FAULT0 0x0010 /* 4 Fault Address 0 */ +#define NBSR_FAULT1 0x0014 /* 4 Fault Address 1 */ +#define NBSR_ECC_DIAG 0x0018 /* 4 ECC Diagnostics */ +#define NBSR_EX_MEM_CTRL0 0x0100 /* 4 Extended Memory Control 0 */ +#define NBSR_EX_MEM_CTRL1 0x0104 /* 4 Extended Memory Control 1 */ +#define NBSR_EX_MEM_CTRL2 0x0108 /* 4 Extended Memory Control 2 */ +#define NBSR_EX_MEM_CTRL3 0x010c /* 4 Extended Memory Control 3 */ +#define NBSR_SPD_ST 0x0110 /* 4 SPD status */ +#define NBSR_SPD_DATA0 0x0200 /* 128 SPD Data slot_0 */ +#define NBSR_SPD_DATA1 0x0280 /* 128 SPD Data slot_1 */ + + /* address mapping */ +#define NBSR_DRAM_BASE0 0x1000 /* 4 DRAM Base 0 */ +#define NBSR_DRAM_LIMIT0 0x1004 /* 4 DRAM Limit 0 */ +#define NBSR_DRAM_BASE1 0x1008 /* 4 DRAM Base 1 */ +#define NBSR_DRAM_LIMIT1 0x100c /* 4 DRAM Limit 1 */ +#define NBSR_DRAM_BASE2 0x1010 /* 4 DRAM Base 2 */ +#define NBSR_DRAM_LIMIT2 0x1014 /* 4 DRAM Limit 2 */ +#define NBSR_DRAM_BASE3 0x1018 /* 4 DRAM Base 3 */ +#define NBSR_DRAM_LIMIT3 0x101c /* 4 DRAM Limit 3 */ +#define NBSR_MMIO_BASE0 0x1020 /* 4 MMIO Base 0 */ +#define NBSR_MMIO_LIMIT0 0x1024 /* 4 MMIO Limit 0 */ +#define NBSR_MMIO_BASE1 0x1028 /* 4 MMIO Base 1 */ +#define NBSR_MMIO_LIMIT1 0x102c /* 4 MMIO Limit 1 */ +#define NBSR_MMIO_BASE2 0x1030 /* 4 MMIO Base 2 */ +#define NBSR_MMIO_LIMIT2 0x1034 /* 4 MMIO Limit 2 */ +#define NBSR_MMIO_BASE3 0x1038 /* 4 MMIO Base 3 */ +#define NBSR_MMIO_LIMIT3 0x103c /* 4 MMIO Limit 3 */ +#define NBSR_PCIIO_BASE0 0x1040 /* 4 PCIIO Base 0 */ +#define NBSR_PCIIO_LIMIT0 0x1044 /* 4 PCIIO Limit 0 */ +#define NBSR_PCIIO_BASE1 0x1048 /* 4 PCIIO Base 1 */ +#define NBSR_PCIIO_LIMIT1 0x104c /* 4 PCIIO Limit 1 */ +#define NBSR_PCIIO_BASE2 0x1050 /* 4 PCIIO Base 2 */ +#define NBSR_PCIIO_LIMIT2 0x1054 /* 4 PCIIO Limit 2 */ +#define NBSR_PCIIO_BASE3 0x1058 /* 4 PCIIO Base 3 */ +#define NBSR_PCIIO_LIMIT3 0x105c /* 4 PCIIO Limit 3 */ +#define NBSR_PEXCFG0 0x1060 /* 4 PEXCFG Base and Limit 0 */ +#define NBSR_PEXCFG1 0x1064 /* 4 PEXCFG Base and Limit 1 */ +#define NBSR_PEXCFG2 0x1068 /* 4 PEXCFG Base and Limit 2 */ +#define NBSR_PEXCFG3 0x106c /* 4 PEXCFG Base and Limit 3 */ +#define NBSR_DRAM_HOLE_BASE 0x1070 /* 4 DRAM Hole Base */ +#define NBSR_DRAM_HOLE_LIMIT 0x1074 /* 4 DRAM Hole Limit */ +#define NBSR_IOAPIC_BASE 0x1078 /* 4 IOAPIC Message base */ +#define NBSR_LAPIC_BASE 0x107c /* 4 LAPIC Message base */ +#define NBSR_MC_CONFIG 0x10a0 /* 4 MC Configuration */ +# define NBSR_MC_INTERLEAVE_BIT_OFFSET 4 +# define NBSR_MC_INTERLEAVE_BIT_MASK 0xf +# define NBSR_MC_INTERLEAVE_BIT_MIN 6 +# define NBSR_MC_INTERLEAVE_BIT_MAX 23 +# define NBSR_MC_ENABLE_MC1 (1 << 1) +# define NBSR_MC_ENABLE_MC0 (1 << 0) +#define NBSR_EPIC_UP_MSG_BASE 0x10a4 /* 4 EPIC Upstream Message Base */ + #define NBSR_EPIC_UP_MSG_BASE_TO_ADDR(__v) (__v << (20 - 12)) + + /* */ +#define NBSR_IO_VID 0x2000 /* 4 IO Channel VID (IO_VID) */ +#define NBSR_IO_CSR 0x2004 /* 4 IO Control/Status Register (IO_CSR) */ +#define NBSR_IO_TMR 0x2008 /* 4 IO Timer Register (IO_TMR) */ +#define NBSR_IO_STR 0x200c /* 4 IO Statistic Register (IO_STR) */ +#define NBSR_IO_FHR0 0x2104 /* 4 IO Fault Header Register0 (IO_FHR0) */ +#define NBSR_IO_FHR1 0x2108 /* 4 IO Fault Header Register1 (IO_FHR1) */ +#define NBSR_IO_FHR2 0x210c /* 4 IO Fault Header Register2 (IO_FHR2) */ +#define NBSR_IO_FHR3 0x2110 /* 4 IO Fault Header Register3 (IO_FHR3) */ +#define NBSR_VID 0x3080 /* 4 RDMA VID VID */ +#define NBSR_CH_IDT 0x3084 /* 4 RDMAChannel ID/Type (CH_IDT) */ +#define NBSR_CS 0x3088 /* 4 RDMA Control/Status (CS) */ +#define NBSR_DD_ID 0x3000 /* 4 Data Destination ID (DD_ID) */ +#define NBSR_IDDMD_ID 0x3004 /* 4 Data_Message Destination (IDDMD_ID) */ +#define NBSR_N_IDT 0x3008 /* 4 Neighbour ID/Type (N_IDT) */ +#define NBSR_ES 0x300c /* 4 Event Status (ES) */ +#define NBSR_IRQ_MC 0x3010 /* 4 Interrupt Mask Control (IRQ_MC) */ +#define NBSR_DMA_TCS 0x3014 /* 4 DMA Tx Control/Status (DMA_TCS) */ +#define NBSR_DMA_TSA 0x3018 /* 4 DMA Tx Start Address (DMA_TSA) */ +#define NBSR_DMA_TBC 0x301c /* 4 DMA Tx Byte Counter (DMA_TBC) */ +#define NBSR_DMA_RCS 0x3020 /* 4 DMA Rx Control/Status(DMA_RCS) */ +#define NBSR_DMA_RSA 0x3024 /* 4 DMA Rx Start Address(DMA_RSA) */ +#define NBSR_DMA_RBC 0x3028 /* 4 DMA Rx Byte Counter (DMA_RBC) */ +#define NBSR_MSG_SC 0x302c /* 4 Message Control/Status (MSG_CS) */ +#define NBSR_TDMSG 0x3030 /* 4 Tx Data_Message Buffer (TDMSG) */ +#define NBSR_RDMSG 0x3034 /* 4 Rx Data_Message Buffer (RDMSG) */ +#define NBSR_CAM 0x3038 /* 4 Channel Alive Management (CAM) */ + +#define NBSR_LINK0_VID 0x4000 /* 4 Link0 Channel VID (LNK0_VID) */ +#define NBSR_LINK0_CSR 0x4004 /* 4 Link0 Control/Status Register (LNK0_CSR) */ +#define NBSR_LINK0_TMR 0x4008 /* 4 Link0 Timer Register (LNK0_TMR) */ +#define NBSR_LINK0_STR 0x4100 /* 4 Link0 Statistic Register (LNK0_STR) */ +#define NBSR_LINK0_FHR0 0x4104 /* 4 Link0 Fault Header Register0 (LNK0_FHR0) */ +#define NBSR_LINK0_FHR1 0x4108 /* 4 Link0 Fault Header Register1 (LNK0_FHR1) */ +#define NBSR_LINK0_FHR2 0x410c /* 4 Link0 Fault Header Register2 (LNK0_FHR2) */ +#define NBSR_LINK0_FHR3 0x4110 /* 4 Link0 Fault Header Register3 (LNK0_FHR3) */ +#define NBSR_LINK1_VID 0x5000 /* 4 Link1 Channel VID (LNK1_VID) */ +#define NBSR_LINK1_CSR 0x5004 /* 4 Link1 Control/Status Register (LNK1_CSR) */ +#define NBSR_LINK1_TMR 0x5008 /* 4 Link1 Timer Register (LNK1_TMR) */ +#define NBSR_LINK1_STR 0x5100 /* 4 Link1 Statistic Register (LNK1_STR) */ +#define NBSR_LINK1_FHR0 0x5104 /* 4 Link1 Fault Header Register0 (LNK1_FHR0) */ +#define NBSR_LINK1_FHR1 0x5108 /* 4 Link1 Fault Header Register1 (LNK1_FHR1) */ +#define NBSR_LINK1_FHR2 0x510c /* 4 Link1 Fault Header Register2 (LNK1_FHR2) */ +#define NBSR_LINK1_FHR3 0x5110 /* 4 Link1 Fault Header Register3 (LNK1_FHR3) */ +#define NBSR_LINK2_VID 0x6000 /* 4 Link2 Channel VID (LNK2_VID) */ +#define NBSR_LINK2_CSR 0x6004 /* 4 Link2 Control/Status Register (LNK2_CSR) */ +#define NBSR_LINK2_TMR 0x6008 /* 4 Link2 Timer Register (LNK2_TMR) */ +#define NBSR_LINK2_STR 0x6100 /* 4 Link2 Statistic Register (LNK2_STR) */ +#define NBSR_LINK2_FHR0 0x6104 /* 4 Link2 Fault Header Register0 (LNK2_FHR0) */ +#define NBSR_LINK2_FHR1 0x6108 /* 4 Link2 Fault Header Register1 (LNK2_FHR1) */ +#define NBSR_LINK2_FHR2 0x610c /* 4 Link2 Fault Header Register2 (LNK2_FHR2) */ +#define NBSR_LINK2_FHR3 0x6110 /* 4 Link2 Fault Header Register3 (LNK2_FHR3) */ + + /* */ +#define NBSR_NODE_ID 0x7000 /* 4 NodeId */ +#define NBSR_NODE_CFG 0x7004 /* 4 NodeConfig */ +#define NBSR_ROUTE_TBL0 0x7010 /* 4 RouteTbl 0 */ +#define NBSR_ROUTE_TBL1 0x7014 /* 4 RouteTbl 1 */ +#define NBSR_ROUTE_TBL2 0x7018 /* 4 RouteTbl 2 */ +#define NBSR_ROUTE_TBL3 0x701c /* 4 RouteTbl 3 */ +#define NBSR_INT_CFG 0x7080 /* 4 Node Interrupt Configuration */ +#define NBSR_NODE_CFG_INFO 0x7088 /* 4 Node Config Information */ +#define NBSR_JUMPER 0x70b0 /* 4 Node Jumper Register */ +# define NBSR_JUMPER_R2000P_JmpIommuMirrorEn (1 << 12) +#define NBSR_NODE_CFG2 0x70b4 /* 4 NodeConfig2 */ + +/* e90s has only one IO link on each node */ +#define SIC_io_reg_offset(io_link, reg) ((reg)) + + /* IOMMU */ +#define NBSR_IOMMU_CTRL 0x8000 /* 4 IOMMU Control */ +#define NBSR_IOMMU_BA 0x8004 /* 4 IOMMU Base Address */ +#define NBSR_IOMMU_FLUSH_ALL 0x8014 /* 4 Flush All TLB Entries */ +#define NBSR_IOMMU_FLUSH_ADDR 0x8018 /* 4 Flush on Address Match */ + /*0x8100-0x813f*/ /* 4 IOMMU CAM */ +#define NBSR_IOMMU_VA 0x8140 /* 4 Virtual Address */ +#define NBSR_IOMMU_TLB_COMPR 0x8150 /* 4 TLB Comparator */ +#define NBSR_IOMMU_FSR 0x8160 /* 4 Fault Status Register */ +#define NBSR_IOMMU_FAULT_SOURCE_ID 0x8164 /* 4 Fault Source ID. + Exists at cpu > r2000+ */ +#define NBSR_IOMMU_FAH 0x8170 /* 4 Fault Address High */ +#define NBSR_IOMMU_FAL 0x8174 /* 4 Fault Address Low */ + /*0x8200-0x823f*/ /* 4 IOMMU RAM */ + +#define NBSR_IOMMU_1_OFFSET (0xA000 - NBSR_IOMMU_CTRL) +#define NBSR_IOMMU_2TO4_OFFSET (0x400) + +#define NBSR_NODE_CFG_CPU_MASK 0xf + + +#define IOMMU_FSR_MUTIPLE_ERR (1 << 4) /* */ +#define IOMMU_FSR_MULTIHIT (1 << 3) /* IOMMU tlb.*/ +#define IOMMU_FSR_WRITE_PROTECTION (1 << 2) /* */ +#define IOMMU_FSR_PAGE_MISS (1 << 1) /* TTE . */ +#define IOMMU_FSR_ADDR_RNG_VIOLATION (1 << 0) /* */ + /*( ,*/ + /* -). */ +#define IOMMU_FSR_ERR_MASK (IOMMU_FSR_MULTIHIT | IOMMU_FSR_MULTIHIT | \ + IOMMU_FSR_WRITE_PROTECTION| IOMMU_FSR_PAGE_MISS | \ + IOMMU_FSR_ADDR_RNG_VIOLATION) + +/* FIXME: PREPIC */ +#define SIC_prepic_version 0x8000 +#define SIC_prepic_ctrl 0x8010 +#define SIC_prepic_id 0x8020 +#define SIC_prepic_ctrl2 0x8030 +#define SIC_prepic_err_stat 0x8040 +#define SIC_prepic_err_msg_lo 0x8050 +#define SIC_prepic_err_msg_hi 0x8054 +#define SIC_prepic_err_int 0x8060 +#define SIC_prepic_mcr 0x8070 +#define SIC_prepic_mid 0x8074 +#define SIC_prepic_mar0_lo 0x8080 +#define SIC_prepic_mar0_hi 0x8084 +#define SIC_prepic_mar1_lo 0x8090 +#define SIC_prepic_mar1_hi 0x8094 +#define SIC_prepic_linp 0x8c00 + +#ifndef __ASSEMBLY__ + +#define E90S_LMS_HALT_OK \ +({ \ + asm volatile (".word \t0xff680000"); \ +}) +#define E90S_LMS_HALT_ERROR(err_no) \ +({ \ + asm volatile (".word \t0xff680000 | %0" \ + : \ + : "i" (err_no)); \ +}) + +#define IS_MACHINE_HW 1 + +/* + * IO links and IO controllers specifications + * E90S machines use IO links and own chipset. + * Main IO buses controller is IOHUB. + */ + +#ifdef CONFIG_NUMA + +#define E90S_MAX_NODE_IOLINKS 1 /* each node can has only 1 IO link */ + /* connected to IOHUB or RDMA */ +#define MACH_NODE_NUMIOLINKS E90S_MAX_NODE_IOLINKS +#define MACH_MAX_NUMIOLINKS (E90S_MAX_NODE_IOLINKS * MAX_NUMNODES) +#define mach_early_sic_init() + +#else /* ! CONFIG_NUMA */ + +#define E90S_MAX_NODE_IOLINKS 1 /* all IO links are considered */ + /* as links on single node # 0 */ +#define MACH_NODE_NUMIOLINKS 1 +#define MACH_MAX_NUMIOLINKS (E90S_MAX_NODE_IOLINKS * MACH_NODE_NUMIOLINKS) +#define mach_early_sic_init() create_nodes_config() + +#endif /* CONFIG_NUMA */ + +#define for_each_iolink_of_node(link) \ + for ((link) = 0; (link) < MACH_NODE_NUMIOLINKS; (link)++) + + +static inline unsigned get_cpu_revision(void) +{ +#ifdef CONFIG_E90S + unsigned long ver; + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); +#endif + return (ver >> 24) & 0xff; +} + +#define E90S_CPU_R1000 1 +#define E90S_CPU_R2000 2 +#define E90S_CPU_R2000P 3 + +static inline int e90s_get_cpu_type(void) +{ + unsigned rev = get_cpu_revision(); + if (rev < 0x10) + return E90S_CPU_R1000; + else if (rev < 0x20) + return E90S_CPU_R2000; + + return E90S_CPU_R2000P; +} + +#define E90S_R1000_MAX_NR_NODE_CPUS 4 + +static inline int e90s_max_nr_node_cpus(void) +{ + switch (e90s_get_cpu_type()) { + case E90S_CPU_R1000: + return E90S_R1000_MAX_NR_NODE_CPUS; + case E90S_CPU_R2000: + return 16; + case E90S_CPU_R2000P: + return 2; + } + return 0; +} + +static inline bool cpu_has_epic(void) +{ + return e90s_get_cpu_type() >= E90S_CPU_R2000P; +} + +static inline bool has_external_iohub(void) +{ + return e90s_get_cpu_type() < E90S_CPU_R2000P; +} + + +#define HAS_MACHINE_E90S_SIC (1) +#define HAS_MACHINE_E2K_SIC HAS_MACHINE_E90S_SIC +#define HAS_MACHINE_E2K_FULL_SIC HAS_MACHINE_E90S_SIC +#define HAS_MACHINE_E2K_IOHUB (1) +#define HAS_MACHINE_L_SIC HAS_MACHINE_E90S_SIC +#define HAS_MACHINE_L_FULL_SIC HAS_MACHINE_E2K_FULL_SIC +#define HAS_MACHINE_L_IOHUB HAS_MACHINE_E2K_IOHUB + +extern void flush_locked_tte(void); +extern void smp_synchronize_one_tick(int cpu); +extern long long delta_ticks[]; +extern long long do_sync_cpu_clocks; + +extern void __init e90s_late_time_init(void); +#endif /*__ASSEMBLY__*/ + +#define CYCL_SYNC_GAP_BIT 11 +#define CYCL_SYNC_GAP (1 << CYCL_SYNC_GAP_BIT) + +#endif /*_SPARC64_E90_H*/ diff --git a/arch/sparc/include/asm/el_posix.h b/arch/sparc/include/asm/el_posix.h new file mode 100644 index 000000000000..6952f70acef0 --- /dev/null +++ b/arch/sparc/include/asm/el_posix.h @@ -0,0 +1,10 @@ +#ifndef _ASM_EL_POSIX_H +#define _ASM_EL_POSIX_H + +#if defined(__sparc__) && defined(__arch64__) +#include +#else +#include +#endif + +#endif diff --git a/arch/sparc/include/asm/el_posix_64.h b/arch/sparc/include/asm/el_posix_64.h new file mode 100644 index 000000000000..6abf96088c60 --- /dev/null +++ b/arch/sparc/include/asm/el_posix_64.h @@ -0,0 +1,93 @@ + +#define ARCH_HAS_GET_CYCLES + +#define ARCH_HAS_ATOMIC_CMPXCHG + +#define el_atomic_cmpxchg_acq(x, uaddr, oldval, newval) \ + __el_atomic_cmpxchg_acq(&x, uaddr, oldval, newval) +static inline int __el_atomic_cmpxchg_acq(int *x, int *uaddr, int oldval, + int newval) +{ + int ret; + + __asm__ __volatile__( + "\n1: casa [%4] %%asi, %3, %1\n" + "2:\n" + " .section .fixup,#alloc,#execinstr\n" + " .align 4\n" + "3: sethi %%hi(2b), %0\n" + " jmpl %0 + %%lo(2b), %%g0\n" + " mov %5, %0\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .align 4\n" + " .word 1b, 3b\n" + " .previous\n" + : "=&r" (ret), "=r" (newval) + : "1" (newval), "r" (oldval), "r" (uaddr), + "i" (-EFAULT), "0" (0) + : "memory"); + smp_mb(); + *x = newval; + + return ret; +} + +#define el_atomic_cmpxchg_rel(x, uaddr, oldval, newval) \ + __el_atomic_cmpxchg_rel(&x, uaddr, oldval, newval) +static inline int __el_atomic_cmpxchg_rel(int *x, int *uaddr, int oldval, + int newval) +{ + int ret; + + smp_mb(); + __asm__ __volatile__( + "\n1: casa [%4] %%asi, %3, %1\n" + "2:\n" + " .section .fixup,#alloc,#execinstr\n" + " .align 4\n" + "3: sethi %%hi(2b), %0\n" + " jmpl %0 + %%lo(2b), %%g0\n" + " mov %5, %0\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .align 4\n" + " .word 1b, 3b\n" + " .previous\n" + : "=&r" (ret), "=r" (newval) + : "1" (newval), "r" (oldval), "r" (uaddr), + "i" (-EFAULT), "0" (0) + : "memory"); + *x = newval; + + return ret; +} + +#define el_atomic_xchg_acq(x, uaddr, value) \ + __el_atomic_xchg_acq(&x, uaddr, value) +static inline int __el_atomic_xchg_acq(int *x, int *uaddr, int val) +{ + int ret; + + __asm__ __volatile__( + "\n1: swapa [%3] %%asi, %1\n\t" + "2:\n" + " .section .fixup,#alloc,#execinstr\n" + " .align 4\n" + "3: sethi %%hi(2b), %0\n" + " jmpl %0 + %%lo(2b), %%g0\n" + " mov %4, %0\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .align 4\n" + " .word 1b, 3b\n" + " .previous\n" + : "=&r" (ret), "=&r" (val) + : "1" (val), "r" (uaddr), "i" (-EFAULT), "0" (0) + : "memory"); + smp_mb(); + + *x = val; + + return ret; +} diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 7e078bc73ef5..fbb71946d63d 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -59,6 +59,9 @@ #define R_SPARC_7 43 #define R_SPARC_5 44 #define R_SPARC_6 45 +#ifdef CONFIG_MCST +#define R_SPARC_UA64 54 +#endif /*CONFIG_MCST*/ /* Bits present in AT_HWCAP, primarily for Sparc32. */ #define HWCAP_SPARC_FLUSH 0x00000001 @@ -214,6 +217,15 @@ do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ extern unsigned int vdso_enabled; +#ifdef CONFIG_E90S +#define ARCH_DLINFO \ +do { \ + extern struct adi_config adi_state; \ + NEW_AUX_ENT(AT_ADI_BLKSZ, adi_state.caps.blksz); \ + NEW_AUX_ENT(AT_ADI_NBITS, adi_state.caps.nbits); \ + NEW_AUX_ENT(AT_ADI_UEONADI, adi_state.caps.ue_on_adi); \ +} while (0) +#else #define ARCH_DLINFO \ do { \ extern struct adi_config adi_state; \ @@ -224,10 +236,12 @@ do { \ NEW_AUX_ENT(AT_ADI_NBITS, adi_state.caps.nbits); \ NEW_AUX_ENT(AT_ADI_UEONADI, adi_state.caps.ue_on_adi); \ } while (0) +#endif struct linux_binprm; - +#ifndef CONFIG_E90S #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); +#endif #endif /* !(__ASM_SPARC64_ELF_H) */ diff --git a/arch/sparc/include/asm/epic.h b/arch/sparc/include/asm/epic.h new file mode 100644 index 000000000000..b20fe6b548f5 --- /dev/null +++ b/arch/sparc/include/asm/epic.h @@ -0,0 +1,41 @@ +#ifndef __ASM_E90S_EPIC_H +#define __ASM_E90S_EPIC_H + +#ifdef __KERNEL__ +#include + + +static inline unsigned get_current_epic_core_priority(void) +{ + return current_thread_info()->epic_core_priority; +} + +static inline void set_current_epic_core_priority(unsigned p) +{ + current_thread_info()->epic_core_priority = p; +} + +static inline void epic_write_w(unsigned int reg, unsigned int v) +{ + writel_asi(v, reg, ASI_EPIC); +} + +static inline unsigned int epic_read_w(unsigned int reg) +{ + return readl_asi(reg, ASI_EPIC); +} + +static inline void epic_write_d(unsigned int reg, unsigned long v) +{ + writeq_asi(v, reg, ASI_EPIC); +} + +static inline unsigned long epic_read_d(unsigned int reg) +{ + return readq_asi(reg, ASI_EPIC); +} + +#include + +#endif /* __KERNEL__ */ +#endif /* __ASM_E90S_EPIC_H */ diff --git a/arch/sparc/include/asm/epic_regs.h b/arch/sparc/include/asm/epic_regs.h new file mode 100644 index 000000000000..593b21ef1217 --- /dev/null +++ b/arch/sparc/include/asm/epic_regs.h @@ -0,0 +1,4 @@ +#ifndef __ASM_E90S_EPIC_REGS_H +#define __ASM_E90S_EPIC_REGS_H +#include +#endif /* __ASM_E90S_EPIC_REGS_H */ diff --git a/arch/sparc/include/asm/epicdef.h b/arch/sparc/include/asm/epicdef.h new file mode 100644 index 000000000000..d80dfdd1a135 --- /dev/null +++ b/arch/sparc/include/asm/epicdef.h @@ -0,0 +1,4 @@ +#ifndef _ASM_E90S_EPICDEF_H +#define _ASM_E90S_EPICDEF_H +#include +#endif /* _ASM_E90S_EPICDEF_H */ diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h index f699962e9ddf..3645e787eae9 100644 --- a/arch/sparc/include/asm/fb.h +++ b/arch/sparc/include/asm/fb.h @@ -15,6 +15,9 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, #endif } +#ifdef CONFIG_E90S +extern int fb_is_primary_device(struct fb_info *info); +#else static inline int fb_is_primary_device(struct fb_info *info) { struct device *dev = info->device; @@ -27,8 +30,8 @@ static inline int fb_is_primary_device(struct fb_info *info) if (node && node == of_console_device) return 1; - return 0; } +#endif /*CONFIG_E90S*/ #endif /* _SPARC_FB_H_ */ diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h index 0865ce77ec00..0dec5ac157c1 100644 --- a/arch/sparc/include/asm/futex_64.h +++ b/arch/sparc/include/asm/futex_64.h @@ -6,6 +6,33 @@ #include #include +#if CONFIG_RMO +#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile__( \ + "\n membar #LoadLoad | #LoadStore\n" \ + "1: lduwa [%3] %%asi, %2\n" \ + " " insn "\n" \ + "2: casa [%3] %%asi, %2, %1\n" \ + " cmp %2, %1\n" \ + " bne,pn %%icc, 1b\n" \ + " mov 0, %0\n" \ + " membar #LoadStore | #StoreStore\n" \ + "3:\n" \ + " .section .fixup,#alloc,#execinstr\n" \ + " .align 4\n" \ + "4: sethi %%hi(3b), %0\n" \ + " jmpl %0 + %%lo(3b), %%g0\n" \ + " mov %5, %0\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .word 1b, 4b\n" \ + " .word 2b, 4b\n" \ + " .previous\n" \ + : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \ + : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ + : "memory") +#else #define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile__( \ "\n1: lduwa [%3] %%asi, %2\n" \ @@ -29,6 +56,7 @@ : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ : "memory") +#endif static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) @@ -75,7 +103,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0; __asm__ __volatile__( +#ifdef CONFIG_RMO + "membar #LoadLoad | #LoadStore\n" +#endif "\n1: casa [%4] %%asi, %3, %1\n" +#ifdef CONFIG_RMO + "membar #StoreLoad | #StoreStore\n" +#endif "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" diff --git a/arch/sparc/include/asm/gpio.h b/arch/sparc/include/asm/gpio.h new file mode 100644 index 000000000000..1213826883b0 --- /dev/null +++ b/arch/sparc/include/asm/gpio.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SPARC64_E90S_GPIO_H +#define _ASM_SPARC64_E90S_GPIO_H + +#include + +#endif /* _ASM_SPARC64_E90S_GPIO_H */ diff --git a/arch/sparc/include/asm/hardirq.h b/arch/sparc/include/asm/hardirq.h index a185f6647348..a0d7ec37bc4a 100644 --- a/arch/sparc/include/asm/hardirq.h +++ b/arch/sparc/include/asm/hardirq.h @@ -2,7 +2,11 @@ #ifndef ___ASM_SPARC_HARDIRQ_H #define ___ASM_SPARC_HARDIRQ_H #if defined(__sparc__) && defined(__arch64__) +#ifdef CONFIG_E90S +#include +#else #include +#endif #else #include #endif diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h index 69a2062d992c..0947bcfb4458 100644 --- a/arch/sparc/include/asm/head_64.h +++ b/arch/sparc/include/asm/head_64.h @@ -16,9 +16,15 @@ #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) -#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) -#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) -#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) +#ifndef CONFIG_RMO +#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) +#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) +#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) +#else /* CONFIG_RMO */ +#define RTRAP_PSTATE (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) +#define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV) +#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) +#endif /* CONFIG_RMO */ #define __CHEETAH_ID 0x003e0014 #define __JALAPENO_ID 0x003e0016 diff --git a/arch/sparc/include/asm/hw_irq.h b/arch/sparc/include/asm/hw_irq.h index 8d30a7694be2..b66683985525 100644 --- a/arch/sparc/include/asm/hw_irq.h +++ b/arch/sparc/include/asm/hw_irq.h @@ -1,6 +1,8 @@ #ifndef __ASM_SPARC_HW_IRQ_H #define __ASM_SPARC_HW_IRQ_H -/* Dummy include. */ +#ifdef CONFIG_E90S +#include +#endif #endif diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index f4afa301954a..c4711af1254f 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -9,10 +9,235 @@ #include /* IO address mapping routines need this */ #include #include +#ifdef CONFIG_E90S +#include +#endif /* BIO layer definitions. */ extern unsigned long kern_base, kern_size; + +#ifndef CONFIG_E90S +static inline u8 _inb(unsigned long addr) +{ + u8 ret; + + __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + + return ret; +} + +static inline u16 _inw(unsigned long addr) +{ + u16 ret; + + __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + + return ret; +} + +static inline u32 _inl(unsigned long addr) +{ + u32 ret; + + __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + + return ret; +} + +static inline void _outb(u8 b, unsigned long addr) +{ + __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */" + : /* no outputs */ + : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); +} + +static inline void _outw(u16 w, unsigned long addr) +{ + __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */" + : /* no outputs */ + : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); +} + +static inline void _outl(u32 l, unsigned long addr) +{ + __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */" + : /* no outputs */ + : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); +} +#endif /*CONFIG_E90S*/ + +/* Memory functions, same as I/O accesses on Ultra. */ +static inline u8 _readb(const volatile void __iomem *addr) +{ u8 ret; + + __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + return ret; +} + +static inline u16 _readw(const volatile void __iomem *addr) +{ u16 ret; + + __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + + return ret; +} + +static inline u32 _readl(const volatile void __iomem *addr) +{ u32 ret; + + __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + + return ret; +} + +static inline u64 _readq(const volatile void __iomem *addr) +{ u64 ret; + + __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + + return ret; +} + +static inline void _writeb(u8 b, volatile void __iomem *addr) +{ + __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */" + : /* no outputs */ + : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); +} + +static inline void _writew(u16 w, volatile void __iomem *addr) +{ + __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */" + : /* no outputs */ + : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); +} + +static inline void _writel(u32 l, volatile void __iomem *addr) +{ + __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */" + : /* no outputs */ + : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); +} + +static inline void _writeq(u64 q, volatile void __iomem *addr) +{ + __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */" + : /* no outputs */ + : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); +} +#ifdef CONFIG_SBUS +/* Now, SBUS variants, only difference from PCI is that we do + * not use little-endian ASIs. + */ +static inline u8 _sbus_readb(const volatile void __iomem *addr) +{ + u8 ret; + + __asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); + + return ret; +} + +static inline u16 _sbus_readw(const volatile void __iomem *addr) +{ + u16 ret; + + __asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); + + return ret; +} + +static inline u32 _sbus_readl(const volatile void __iomem *addr) +{ + u32 ret; + + __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); + + return ret; +} + +static inline u64 _sbus_readq(const volatile void __iomem *addr) +{ + u64 ret; + + __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */" + : "=r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); + + return ret; +} + +static inline void _sbus_writeb(u8 b, volatile void __iomem *addr) +{ + __asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */" + : /* no outputs */ + : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); +} + +static inline void _sbus_writew(u16 w, volatile void __iomem *addr) +{ + __asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */" + : /* no outputs */ + : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); +} + +static inline void _sbus_writel(u32 l, volatile void __iomem *addr) +{ + __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */" + : /* no outputs */ + : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); +} + +static inline void _sbus_writeq(u64 l, volatile void __iomem *addr) +{ + __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */" + : /* no outputs */ + : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); +} +#endif /*CONFIG_SBUS*/ /* __raw_{read,write}{b,w,l,q} uses direct access. * Access the memory as big endian bypassing the cache * by using ASI_PHYS_BYPASS_EC_E @@ -65,6 +290,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) return ret; } +#define __raw_readll __raw_readq + #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 b, const volatile void __iomem *addr) { @@ -97,6 +324,8 @@ static inline void __raw_writeq(u64 q, const volatile void __iomem *addr) : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); } +#define __raw_writell __raw_writeq + /* Memory functions, same as I/O accesses on Ultra. * Access memory as little endian bypassing * the cache by using ASI_PHYS_BYPASS_EC_E_L @@ -192,6 +421,46 @@ static inline void writeq(u64 q, volatile void __iomem *addr) : "memory"); } +#ifdef CONFIG_E90S +#define inb inb +u8 inb(unsigned long addr); + +#define inw inw +u16 inw(unsigned long addr); + +#define inl inl +u32 inl(unsigned long addr); +#define outb outb +void outb(u8 b, unsigned long addr); + +#define outw outw +void outw(u16 w, unsigned long addr); + +#define outl outl +void outl(u32 l, unsigned long addr); + +static inline u32 nbsr_readl(int reg, int node) +{ + void __iomem *base; + node = node >= 0 ? node : 0; + base = NULL + BASE_NODE0 + NODE_CONF_AREA_SIZE * node; + return __raw_readl(base + reg); +} +static inline u64 nbsr_readq(int reg, int node) +{ + void __iomem *base; + node = node >= 0 ? node : 0; + base = NULL + BASE_NODE0 + NODE_CONF_AREA_SIZE * node; + return __raw_readq(base + reg); +} +static inline void nbsr_writel(u32 val, unsigned long addr, int node) +{ + __raw_writel(val, (void *)(BASE_NODE0 + + NODE_CONF_AREA_SIZE * node + addr)); +} + +#else /*CONFIG_E90S*/ + #define inb inb static inline u8 inb(unsigned long addr) { @@ -227,7 +496,7 @@ static inline void outl(u32 l, unsigned long addr) { writel(l, (volatile void __iomem *)addr); } - +#endif /*CONFIG_E90S*/ #define inb_p(__addr) inb(__addr) #define outb_p(__b, __addr) outb(__b, __addr) @@ -243,91 +512,128 @@ void insb(unsigned long, void *, unsigned long); void insw(unsigned long, void *, unsigned long); void insl(unsigned long, void *, unsigned long); -static inline void readsb(void __iomem *port, void *buf, unsigned long count) +static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count) { insb((unsigned long __force)port, buf, count); } -static inline void readsw(void __iomem *port, void *buf, unsigned long count) +static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count) { insw((unsigned long __force)port, buf, count); } -static inline void readsl(void __iomem *port, void *buf, unsigned long count) +static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count) { insl((unsigned long __force)port, buf, count); } -static inline void writesb(void __iomem *port, const void *buf, unsigned long count) +static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count) { outsb((unsigned long __force)port, buf, count); } -static inline void writesw(void __iomem *port, const void *buf, unsigned long count) +static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count) { outsw((unsigned long __force)port, buf, count); } -static inline void writesl(void __iomem *port, const void *buf, unsigned long count) +static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count) { outsl((unsigned long __force)port, buf, count); } -#define ioread8_rep(p,d,l) readsb(p,d,l) -#define ioread16_rep(p,d,l) readsw(p,d,l) -#define ioread32_rep(p,d,l) readsl(p,d,l) -#define iowrite8_rep(p,d,l) writesb(p,d,l) -#define iowrite16_rep(p,d,l) writesw(p,d,l) -#define iowrite32_rep(p,d,l) writesl(p,d,l) +/* + * Read/write from/to PCI IO memory on sparc64 arch used + * by common Elbrus arch functions + */ +#define boot_readb(__addr) readb(__addr) +#define boot_readw(__addr) readw(__addr) +#define boot_readl(__addr) readl(__addr) +#define boot_readq(__addr) readq(__addr) +#define boot_readb_relaxed(__addr) readb(__addr) +#define boot_readw_relaxed(__addr) readw(__addr) +#define boot_readl_relaxed(__addr) readl(__addr) +#define boot_readq_relaxed(__addr) readq(__addr) +#define boot_writeb(__b, __addr) writeb(__b, __addr) +#define boot_writew(__w, __addr) writew(__w, __addr) +#define boot_writel(__l, __addr) writel(__l, __addr) +#define boot_writeq(__q, __addr) writeq(__q, __addr) + +#define readb_asi(__reg, asi) \ +({ u8 __ret; \ +__asm__ __volatile__("lduba [%1] %2, %0" \ + : "=r" (__ret) \ + : "r" (__reg), "i" (asi) \ + : "memory"); \ + __ret; \ +}) + +#define readw_asi(__reg, asi) \ +({ u16 __ret; \ +__asm__ __volatile__("lduha [%1] %2, %0" \ + : "=r" (__ret) \ + : "r" (__reg), "i" (asi) \ + : "memory"); \ + __ret; \ +}) +#define readl_asi(__reg, asi) \ +({ u32 __ret; \ +__asm__ __volatile__("lduwa [%1] %2, %0" \ + : "=r" (__ret) \ + : "r" (__reg), "i" (asi) \ + : "memory"); \ + __ret; \ +}) + +#define readq_asi(__reg, asi) \ +({ u64 __ret; \ +__asm__ __volatile__("ldxa [%1] %2, %0" \ + : "=r" (__ret) \ + : "r" (__reg), "i" (asi) \ + : "memory"); \ + __ret; \ +}) + +#define writeb_asi(__val, __reg, asi) \ +({ __asm__ __volatile__("stba %0, [%1] %2" \ + : /* no outputs */ \ + : "r" (__val), "r" (__reg), "i" (asi) \ + : "memory"); }) + +#define writew_asi(__val, __reg, asi) \ +({ __asm__ __volatile__("stha %0, [%1] %2" \ + : /* no outputs */ \ + : "r" (__val), "r" (__reg), "i" (asi) \ + : "memory"); }) + +#define writel_asi(__val, __reg, asi) \ +({ __asm__ __volatile__("stwa %0, [%1] %2" \ + : /* no outputs */ \ + : "r" (__val), "r" (__reg), "i" (asi) \ + : "memory"); }) + +#define writeq_asi(__val, __reg, asi) \ +({ __asm__ __volatile__("stxa %0, [%1] %2" \ + : /* no outputs */ \ + : "r" (__val), "r" (__reg), "i" (asi) \ + : "memory"); }) /* Valid I/O Space regions are anywhere, because each PCI bus supported * can live in an arbitrary area of the physical address range. */ #define IO_SPACE_LIMIT 0xffffffffffffffffUL -/* Now, SBUS variants, only difference from PCI is that we do - * not use little-endian ASIs. - */ -static inline u8 sbus_readb(const volatile void __iomem *addr) -{ - return __raw_readb(addr); -} +#ifdef CONFIG_SBUS -static inline u16 sbus_readw(const volatile void __iomem *addr) -{ - return __raw_readw(addr); -} +#define sbus_readb(__addr) _sbus_readb(__addr) +#define sbus_readw(__addr) _sbus_readw(__addr) +#define sbus_readl(__addr) _sbus_readl(__addr) +#define sbus_readq(__addr) _sbus_readq(__addr) +#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr) +#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr) +#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr) +#define sbus_writeq(__l, __addr) _sbus_writeq(__l, __addr) -static inline u32 sbus_readl(const volatile void __iomem *addr) -{ - return __raw_readl(addr); -} - -static inline u64 sbus_readq(const volatile void __iomem *addr) -{ - return __raw_readq(addr); -} - -static inline void sbus_writeb(u8 b, volatile void __iomem *addr) -{ - __raw_writeb(b, addr); -} - -static inline void sbus_writew(u16 w, volatile void __iomem *addr) -{ - __raw_writew(w, addr); -} - -static inline void sbus_writel(u32 l, volatile void __iomem *addr) -{ - __raw_writel(l, addr); -} - -static inline void sbus_writeq(u64 q, volatile void __iomem *addr) -{ - __raw_writeq(q, addr); -} - -static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) +static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) { while(n--) { sbus_writeb(c, dst); @@ -335,15 +641,7 @@ static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_si } } -static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) -{ - volatile void __iomem *d = dst; - - while (n--) { - writeb(c, d); - d++; - } -} +#define sbus_memset_io(d,c,sz) _sbus_memset_io(d,c,sz) static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n) @@ -357,7 +655,6 @@ static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *sr } } - static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n) { @@ -394,6 +691,18 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, writeb(tmp, d); d++; } +#endif /*CONFIG_SBUS*/ + +/* + * String version of IO memory access ops: + */ +extern void memcpy_fromio(void *, const volatile void __iomem *, long); +extern void memcpy_toio(volatile void __iomem *, const void *, long); +extern void _memset_c_io(volatile void __iomem *, unsigned long, long); + +static inline void memset_io(volatile void __iomem *addr, u8 c, long len) +{ + _memset_c_io(addr, 0x0101010101010101UL * c, len); } #ifdef __KERNEL__ @@ -414,7 +723,6 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) static inline void iounmap(volatile void __iomem *addr) { } - #define ioread8 readb #define ioread16 readw #define ioread16be __raw_readw @@ -432,8 +740,10 @@ void ioport_unmap(void __iomem *); /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ struct pci_dev; +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); void pci_iounmap(struct pci_dev *dev, void __iomem *); +#ifdef CONFIG_SBUS static inline int sbus_can_dma_64bit(void) { return 1; @@ -444,6 +754,7 @@ static inline int sbus_can_burst64(void) } struct device; void sbus_set_sbus64(struct device *, int); +#endif /*CONFIG_SBUS*/ /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem @@ -456,6 +767,12 @@ void sbus_set_sbus64(struct device *, int); */ #define xlate_dev_kmem_ptr(p) p +#ifdef CONFIG_E90S +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +int valid_phys_addr_range(unsigned long addr, size_t size); +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); +#endif /*CONFIG_E90S*/ + #endif #endif /* !(__SPARC64_IO_H) */ diff --git a/arch/sparc/include/asm/io_apic.h b/arch/sparc/include/asm/io_apic.h new file mode 100644 index 000000000000..be92d7cd2f97 --- /dev/null +++ b/arch/sparc/include/asm/io_apic.h @@ -0,0 +1,7 @@ +#ifndef __ASM_E90S_IO_APIC_H +#define __ASM_E90S_IO_APIC_H + +#include +#include + +#endif diff --git a/arch/sparc/include/asm/io_apic_regs.h b/arch/sparc/include/asm/io_apic_regs.h new file mode 100644 index 000000000000..88405659ff85 --- /dev/null +++ b/arch/sparc/include/asm/io_apic_regs.h @@ -0,0 +1,89 @@ +#ifndef __ASM_IO_APIC_REGS_H +#define __ASM_IO_APIC_REGS_H + +#include +#include + + +/* + * Intel IO-APIC support for SMP and UP systems. + * + * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar + */ + + +/* + * The structure of the IO-APIC: + */ +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 ID : 8, + __reserved_1 : 8, + delivery_type : 1, + LTS : 1, + __reserved_2 : 14; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 __reserved_1 : 8, + entries : 8, + PRQ : 1, + __reserved_2 : 7, + version : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_1 : 4, + arbitration : 4, + __reserved_2 : 24; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 __reserved_1 : 31, + boot_DT : 1; + } __attribute__ ((packed)) bits; +}; + +struct IO_APIC_route_entry { + __u32 __reserved_2 : 15, + mask : 1, /* 0: enabled, 1: disabled */ + trigger : 1, /* 0: edge, 1: level */ + irr : 1, + polarity : 1, + delivery_status : 1, + dest_mode : 1, /* 0: physical, 1: logical */ + delivery_mode : 3, /* 000: FIXED + * 001: lowest prio + * 111: ExtINT + */ + vector : 8; + + __u32 dest : 8, + __reserved_3 : 24; +} __attribute__ ((packed)); + +struct IR_IO_APIC_route_entry { + __u64 index : 15, + format : 1, + reserved : 31, + mask : 1, + trigger : 1, + irr : 1, + polarity : 1, + delivery_status : 1, + index2 : 1, + zero : 3, + vector : 8; +} __attribute__ ((packed)); + +#endif /* __ASM_IO_APIC_REGS_H */ diff --git a/arch/sparc/include/asm/io_epic.h b/arch/sparc/include/asm/io_epic.h new file mode 100644 index 000000000000..d9fad8adb968 --- /dev/null +++ b/arch/sparc/include/asm/io_epic.h @@ -0,0 +1,24 @@ +#ifndef _ASM_E90S_IO_EPIC_H +#define _ASM_E90S_IO_EPIC_H + +#define E90S_EPIC_EOI_BASE 0x02000000 +#define E90S_EPIC_APIC_EOI_BASE 0x01000000 + +static inline void epic_ioapic_eoi(u8 vector) +{ + unsigned v = vector << 8; + v |= 0x5; + writel_asi(v, E90S_EPIC_APIC_EOI_BASE, ASI_EPIC); +} + +static inline void get_io_epic_msi(int node, u32 *lo, u32 *hi) +{ + u64 v = nbsr_readl(NBSR_EPIC_UP_MSG_BASE, node); + v = NBSR_EPIC_UP_MSG_BASE_TO_ADDR(v); + *lo = v; + *hi = v >> 32; +} + +#include + +#endif /* _ASM_E90S_IO_EPIC_H */ diff --git a/arch/sparc/include/asm/io_epic_regs.h b/arch/sparc/include/asm/io_epic_regs.h new file mode 100644 index 000000000000..956987077f8c --- /dev/null +++ b/arch/sparc/include/asm/io_epic_regs.h @@ -0,0 +1,4 @@ +#ifndef __ASM_E90S_IO_EPIC_REGS_H +#define __ASM_E90S_IO_EPIC_REGS_H +#include +#endif /* __ASM_E90S_IO_EPIC_REGS_H */ diff --git a/arch/sparc/include/asm/iolinkmask.h b/arch/sparc/include/asm/iolinkmask.h new file mode 100644 index 000000000000..ed87a67d7364 --- /dev/null +++ b/arch/sparc/include/asm/iolinkmask.h @@ -0,0 +1,7 @@ +#ifndef __ASM_IOHUBMASK_H +#define __ASM_IOHUBMASK_H + +#include +#include + +#endif /* __LINUX_IOHUBMASK_H */ diff --git a/arch/sparc/include/asm/iommu.h b/arch/sparc/include/asm/iommu.h index 37935cb34865..583e20229b8c 100644 --- a/arch/sparc/include/asm/iommu.h +++ b/arch/sparc/include/asm/iommu.h @@ -2,7 +2,9 @@ #ifndef ___ASM_SPARC_IOMMU_H #define ___ASM_SPARC_IOMMU_H #if defined(__sparc__) && defined(__arch64__) +#ifndef CONFIG_E90S #include +#endif #else #include #endif diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 4d748e93b974..d403444e3b76 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -11,10 +11,25 @@ #include #include #include -#include #include #include +#ifdef CONFIG_E90S + + +#include + +struct e90s_irq_pending { + unsigned vector; +} __aligned(SMP_CACHE_BYTES); +extern struct e90s_irq_pending e90s_irq_pending[NR_CPUS]; + +#define irq_canonicalize(irq) (irq) + +#define l_irq_enter() +#define l_irq_exit() + +#else /*CONFIG_E90S*/ /* IMAP/ICLR register defines */ #define IMAP_VALID 0x80000000UL /* IRQ Enabled */ #define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */ @@ -61,6 +76,7 @@ void sun4u_destroy_msi(unsigned int irq); unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino); void irq_free(unsigned int irq); +#endif /*CONFIG_E90S*/ void __init init_IRQ(void); void fixup_irqs(void); diff --git a/arch/sparc/include/asm/irq_remapping.h b/arch/sparc/include/asm/irq_remapping.h new file mode 100644 index 000000000000..87576f595c7f --- /dev/null +++ b/arch/sparc/include/asm/irq_remapping.h @@ -0,0 +1,6 @@ +#ifndef __SPARC_IRQ_REMAPPING_H +#define __SPARC_IRQ_REMAPPING_H + +#include + +#endif /*__SPARC_IRQ_REMAPPING_H*/ diff --git a/arch/sparc/include/asm/irq_vectors.h b/arch/sparc/include/asm/irq_vectors.h new file mode 100644 index 000000000000..24664b5094a7 --- /dev/null +++ b/arch/sparc/include/asm/irq_vectors.h @@ -0,0 +1,41 @@ +#ifndef _ASM_SPARC_IRQ_VECTORS_H +#define _ASM_SPARC_IRQ_VECTORS_H + +#define ERROR_APIC_VECTOR 0xfe +#define INVALIDATE_TLB_VECTOR 0xfd +#define CYCLES_SYNC_VECTOR 0xfc +#define RESCHEDULE_VECTOR 0xec +#define CALL_FUNCTION_VECTOR 0xeb +#define CALL_FUNCTION_SINGLE_VECTOR 0xea +#define RDMA_INTERRUPT_VECTOR 0xe9 +#define IRQ_WORK_VECTOR 0xe8 +#define NMI_PSEUDO_VECTOR 0x100 + +/* + * Local APIC timer IRQ vector is on a different priority level, + * to work around the 'lost local interrupt if more than 2 IRQ + * sources per level' errata. + */ +#define LOCAL_TIMER_VECTOR 0xdf + + +#ifdef CONFIG_EPIC +#define CEPIC_TIMER_VECTOR 0x2ea +#define CEPIC_EPIC_INT_VECTOR 0x2fb +#define EPIC_IRQ_WORK_VECTOR 0x2fc +#define EPIC_CALL_FUNCTION_SINGLE_VECTOR 0x2fd +#define EPIC_CALL_FUNCTION_VECTOR 0x2fe +#define EPIC_RESCHEDULE_VECTOR 0x2ff + +#define EPIC_CYCLES_SYNC_VECTOR 0x3fb +#define EPIC_INVALIDATE_TLB_VECTOR 0x3fc +#define PREPIC_ERROR_VECTOR 0x3fd +#define ERROR_EPIC_VECTOR 0x3fe +#define SPURIOUS_EPIC_VECTOR 0x3ff +#endif + + +#include + +#endif /* _ASM_SPARC_IRQ_VECTORS_H */ + diff --git a/arch/sparc/include/asm/irq_work.h b/arch/sparc/include/asm/irq_work.h new file mode 100644 index 000000000000..a289af5209c1 --- /dev/null +++ b/arch/sparc/include/asm/irq_work.h @@ -0,0 +1,10 @@ +#ifndef _ASM_IRQ_WORK_H +#define _ASM_IRQ_WORK_H + +#ifdef CONFIG_E90S +# include +#else +# include +#endif + +#endif /* _ASM_IRQ_WORK_H */ diff --git a/arch/sparc/include/asm/irqdomain.h b/arch/sparc/include/asm/irqdomain.h new file mode 100644 index 000000000000..c81446b25fc3 --- /dev/null +++ b/arch/sparc/include/asm/irqdomain.h @@ -0,0 +1,6 @@ +#ifndef __ASM_IRQDOMAIN_H +#define __ASM_IRQDOMAIN_H + +#include + +#endif diff --git a/arch/sparc/include/asm/l-iommu.h b/arch/sparc/include/asm/l-iommu.h new file mode 100644 index 000000000000..155489859c15 --- /dev/null +++ b/arch/sparc/include/asm/l-iommu.h @@ -0,0 +1,74 @@ +#ifndef _SPARC64_IOMMU_E90S_H +#define _SPARC64_IOMMU_E90S_H + +#include +#include +#include + +#include + +#define L_IOMMU_CTRL NBSR_IOMMU_CTRL +#define L_IOMMU_BA NBSR_IOMMU_BA +#define L_IOMMU_FLUSH_ALL NBSR_IOMMU_FLUSH_ALL +#define L_IOMMU_FLUSH_ADDR NBSR_IOMMU_FLUSH_ADDR + +#define IO_PAGE_SHIFT 13 + +#define MIN_IOMMU_WINSIZE (32*1024*1024UL) +#define MAX_IOMMU_WINSIZE (4*1024*1024*1024UL) +#define DFLT_IOMMU_WINSIZE (2*1024*1024*1024UL) + +#define addr_to_flush(__addr) (__addr) + +#define IOPTE_PAGE_MASK 0xffffffe0 +#define IOPTE_CACHE 0x00000008 /* Cached */ +#define IOPTE_WRITE 0x00000004 /* Writeable */ +#define IOPTE_VALID 0x00000002 /* IOPTE is valid */ + +#define pa_to_iopte(addr) (((unsigned long)(addr) >> (IO_PAGE_SHIFT - 5)) \ + & IOPTE_PAGE_MASK) +#define iopte_to_pa(iopte) (((unsigned long)(iopte) & IOPTE_PAGE_MASK) \ + << (IO_PAGE_SHIFT - 5)) + +static inline void __l_iommu_write(unsigned node, u32 val, unsigned long addr) +{ + nbsr_writel(val, addr, node); +} + +static inline void *l_iommu_map_table(unsigned long pa, unsigned long size) +{ + return __va(pa); +} + +static inline void *l_iommu_unmap_table(void *va) +{ + return va; +} + +static inline int l_iommu_get_table(unsigned long iova) +{ + return 0; +} + +#define l_iommu_supported() 1 +#define l_has_devices_with_iommu() (e90s_get_cpu_type() == E90S_CPU_R2000P) + +#define l_iommu_enable_embedded_iommus l_iommu_enable_embedded_iommus +static inline void l_iommu_enable_embedded_iommus(int node) +{ + unsigned v; + if (!l_has_devices_with_iommu()) + return; + v = nbsr_readl(NBSR_JUMPER, node); + v &= ~NBSR_JUMPER_R2000P_JmpIommuMirrorEn; + nbsr_writel(v, NBSR_JUMPER, node); +} + +#define L_PGSIZE_BITMAP SZ_8K + +/* software MMU support */ + +#define E90S_SWIOTLB_DEFAULT_SIZE (64 * 1024 * 1024) +#define L_SWIOTLB_DEFAULT_SIZE E90S_SWIOTLB_DEFAULT_SIZE + +#endif /* !(_SPARC64_IOMMU_E90S_H) */ diff --git a/arch/sparc/include/asm/l-mcmonitor.h b/arch/sparc/include/asm/l-mcmonitor.h new file mode 100644 index 000000000000..4042cfd50c6d --- /dev/null +++ b/arch/sparc/include/asm/l-mcmonitor.h @@ -0,0 +1,153 @@ +#ifndef _SPARC_L_MCMONITOR_H_ +#define _SPARC_L_MCMONITOR_H_ + +#include + +#define CC0_MC_ECC(node) (NODE_PFREG_AREA_BASE(node) | (1 << 25) | (0 << 8)) +#define CC1_MC_ECC(node) (CC0_MC_ECC(node) | (1 << 26)) + +#define CC_MC_ERROR_COUNTER_SHIFT 32 +#define CC_MC_ERROR_COUNTER_MASK 0xffffFFFF +#define CC_MC_ECC_SYN_SHIFT 12 +#define CC_MC_ECC_SYN_MASK 0xff +#define CC_MC_ECC_CB_SHIFT 4 +#define CC_MC_ECC_CB_MASK 0xff +#define CC_MC_EN_ECC_DMODE (1 << 3) +#define CC_MC_EN_ECC_CINT (1 << 2) +#define CC_MC_EN_ECC_CORR (1 << 1) +#define CC_MC_EN_ECC_DET (1 << 0) + +typedef u64 l_mc_ecc_struct_t; + +static inline u32 l_mc_get_error_cnt(l_mc_ecc_struct_t *ecc, int node, + int nr) +{ + u64 base = CC0_MC_ECC(node); + if (nr) + base = CC1_MC_ECC(node); + *ecc = __raw_readq((void *)base); + return *ecc >> CC_MC_ERROR_COUNTER_SHIFT; +} + +static inline char *l_mc_get_error_str(l_mc_ecc_struct_t *ecc, int nr, + char *error_msg, int error_msg_len) +{ + snprintf(error_msg, error_msg_len, + "error counter: %u (CC_MC_ECC%d: %08llx)", + (u32)(*ecc >> CC_MC_ERROR_COUNTER_SHIFT), nr, *ecc); + return error_msg; +} + +static inline bool l_mcmonitor_eec_enabled(void) +{ + u64 base = CC0_MC_ECC(0); + u64 ecc = __raw_readq((void *)base); + return (ecc & (CC_MC_EN_ECC_CORR | CC_MC_EN_ECC_CORR)) == + (CC_MC_EN_ECC_CORR | CC_MC_EN_ECC_CORR); +} + +#define l_mcmonitor_supported() ((e90s_get_cpu_type() == E90S_CPU_R2000) && \ + get_cpu_revision() > 0x11 /*Bug 107359*/) + +#define SIC_MAX_MC_COUNT 2 +#define SIC_MC_COUNT SIC_MAX_MC_COUNT + +/* CC handles 32 bytes at a time */ +#define L_MC_ECC_WORDS_NR 4 +#define L_MCMONITOR_TEST_SIZE (sizeof(l_good_data) * L_MC_ECC_WORDS_NR) + +static const u64 l_good_data[] = + { 0x51645e44ab98f0c9, 0x18ed950f0e82621f, 0x28a2a0a02fde054a, +0x8f597eec33ffb8ab, 0xeabad43b2da24553, 0x4d5bc4ff390179de, 0x7491662d7943d276, +0x1ae94cd46bf79bf3, 0xce17a4dcd95642db, 0xde21d28b154b9e48, 0x4bb2eac2723eda75, +0x04f25cde13c7964c, 0xeb63eb28a11747b4, 0x70a536ec16eca674, 0xbbb5e3dcab0054d9, +0xbfb86f11d2353c3b, 0x4c279041fc8ae329, 0x7bd9c3aa2cd41b5c, 0x4f09234d8a5290a2, +0x38f66a35fc4c7fdb, 0xd778dd1a0ff3e7e0, 0xf32d05401a82e1fa, 0x2817eb5785511580, +0xeb23563ddccf25df, 0xc127724c4a08eef5, 0x68d332bf14e49583, 0x046089a8b5e85fc9, +0x676433bdd0cc82d8, 0x4ea9d6422f75b83a, 0x9725c84b9b895d92, 0x7708451d0bb02872, +0xa08a665679547105, 0x31d5e812ce0fa38e, 0xa441944c6605dc6e, 0xe22fa272ae353c2b, +0x85e6833a211168ca, 0x00306aa862f1a9be, 0xf18743885c486792, 0xd7e2b28462e7886d, +0xfeb71e0bd9e6f2c1, 0x40dd36f387338753, 0x504526ac03f70700, 0x425191625d758895, +0x9f6f188abed4584f, 0x119440623aa8820b, 0xb0eb9f67d7dfdd33, 0x5d7dc9b790f8bcfb, +0xa623d31fad61ab4a, 0x0fdd5b441eac6264, 0xeac3ab5bdd599c59, 0xd57a3d69d16da623, +0x21333bad63220509, 0x43b415a94f05e5ad, 0x393c0ef347304b8e, 0x416ccb868b2ff6fc, +0xb0146df9b0e80803, 0x173e4ff32321237d, 0x95189d4247e070f2, 0xc466fc3aa5b651ff, +0x8716a93a5bb4b830, 0x68805a107190fda4, 0x7c3b6d80fef7a7cb, 0x25519a8c3836cdf9, +0x973828a29d19cd95, }; + +static u8 const l_good_ecc[] = + { 0xe6, 0x91, 0xda, 0x20, 0x79, 0xa5, 0x65, 0xb2, 0xd4, 0x06, 0xec, +0xfd, 0x1a, 0xaf, 0xa5, 0x03, 0xff, 0x65, 0xba, 0x34, 0x3f, 0xfa, 0x4a, 0x76, 0x84, +0x57, 0xf0, 0x2c, 0x2c, 0xd6, 0x4d, 0xdc, 0xa2, 0x0d, 0x67, 0x88, 0xab, 0x0c, 0x18, +0x3a, 0x01, 0x21, 0x13, 0x37, 0xce, 0x86, 0x55, 0x10, 0x08, 0xb6, 0xc8, 0xbb, 0xab, +0x08, 0x2c, 0x2b, 0xd2, 0x99, 0xf8, 0xe4, 0x3b, 0x0f, 0x7a, 0xee, }; + +static inline void __l_mcmonitor_fill_data(u64 *a, const u64 *good, + const u8 *ecc, int sz, bool make_error) +{ + void __iomem *mc_cfg = NULL + BASE_NODE0 + NBSR_MC_CONFIG; + void __iomem *mc_ecc0 = (void *)CC0_MC_ECC(0); + void __iomem *mc_ecc1 = (void *)CC1_MC_ECC(0); + u64 v = __raw_readl(mc_cfg), ecc0, ecc1; + bool interleaving = (v & (NBSR_MC_ENABLE_MC1 | NBSR_MC_ENABLE_MC0)) == + (NBSR_MC_ENABLE_MC1 | NBSR_MC_ENABLE_MC0); + u64 stride = (v >> NBSR_MC_INTERLEAVE_BIT_OFFSET) & + NBSR_MC_INTERLEAVE_BIT_MASK; + int i, j; + + if (stride > NBSR_MC_INTERLEAVE_BIT_MAX) + stride = NBSR_MC_INTERLEAVE_BIT_MAX; + if (stride < NBSR_MC_INTERLEAVE_BIT_MIN) + stride = NBSR_MC_INTERLEAVE_BIT_MIN; + stride = 1 << stride; + a = (void *)__pa(a); + + ecc0 = __raw_readq(mc_ecc0); + ecc1 = __raw_readq(mc_ecc1); + v = ecc0 & ~(CC_MC_EN_ECC_CORR | CC_MC_EN_ECC_DET); + __raw_writeq(v, mc_ecc0); + __raw_writeq(v, mc_ecc1); + mb(); + for (i = 0; i < sz; i++, a += L_MC_ECC_WORDS_NR) { + u8 e = ecc[i]; + u64 d = good[i]; + void __iomem *mc_ecc = interleaving && ((u64)a & stride) ? + mc_ecc1 : mc_ecc0; + __raw_writeq(v | (e << CC_MC_ECC_CB_SHIFT) | + CC_MC_EN_ECC_DMODE, mc_ecc); + mb(); + if (make_error) + d ^= (1UL << (i % 64)); + for (j = 0; j < L_MC_ECC_WORDS_NR; j++) + __raw_writeq(d, a + j); + mb(); + } + __raw_writeq(ecc0, mc_ecc0); + __raw_writeq(ecc1, mc_ecc1); + mb(); +} + +static inline void l_mcmonitor_fill_data(u64 *a, bool make_error) +{ + __l_mcmonitor_fill_data(a, l_good_data, l_good_ecc, + ARRAY_SIZE(l_good_ecc), make_error); +} + + +static inline int __l_mcmonitor_cmp(u64 *a, const u64 *good, int sz) +{ + int i, j; + for (i = 0; i < sz; i++, a += L_MC_ECC_WORDS_NR) { + for (j = 0; j < L_MC_ECC_WORDS_NR; j++) { + if (a[j] != good[i]) + return -EFAULT; + } + } + return 0; +} + +static inline int l_mcmonitor_cmp(u64 *a) +{ + return __l_mcmonitor_cmp(a, l_good_data, ARRAY_SIZE(l_good_data)); +} +#endif /* _SPARC_L_MCMONITOR_H_ */ diff --git a/arch/sparc/include/asm/l_ide.h b/arch/sparc/include/asm/l_ide.h new file mode 100644 index 000000000000..e487f2aaceba --- /dev/null +++ b/arch/sparc/include/asm/l_ide.h @@ -0,0 +1,15 @@ +#ifndef ___ASM_SPARC_L_IDE_H +#define ___ASM_SPARC_L_IDE_H + +#ifndef CONFIG_E90S +#if defined(__sparc__) && defined(__arch64__) +#include +#else +#include +#endif +#else /* !CONFIG_E90S */ +#include +#endif /* CONFIG_E90S */ + +#endif + diff --git a/arch/sparc/include/asm/l_ide32.h b/arch/sparc/include/asm/l_ide32.h new file mode 100644 index 000000000000..8bb9b1e22cd4 --- /dev/null +++ b/arch/sparc/include/asm/l_ide32.h @@ -0,0 +1,354 @@ +#ifndef _E90_L_IDE_H_ +#define _E90_L_IDE_H_ + +#define TRACE_E90_IDE_FLAG 0 +#define TRACE_E90_IDE_FLAG_ADDR 0 + + +#define TRACE_E90_IDE if (TRACE_E90_IDE_FLAG) printk +#define TRACE_E90_IDE_ADDR if (TRACE_E90_IDE_FLAG_ADDR && !TRACE_E90_IDE_FLAG) printk + +static inline u8 e90_inb(unsigned long port) +{ + return readb_asi(port, ASI_M_PCI); +} + +static inline u16 e90_inw (unsigned long port) +{ + return le16_to_cpu(readw_asi(port, ASI_M_PCI)); +} + +static inline void e90_insw(unsigned long port, void *dst, u32 count) +{ + u16 *ps = dst; + u32 *pi; + + if(((unsigned long)ps) & 0x2) { + *ps++ = readw_asi(port, ASI_M_PCI); + count--; + } + pi = (u32 *)ps; + while(count >= 2) { + u32 w; + w = readw_asi(port, ASI_M_PCI) << 16; + w |= readw_asi(port, ASI_M_PCI); + *pi++ = w; + count -= 2; + } + ps = (u16 *)pi; + if(count) + *ps++ = readw_asi(port, ASI_M_PCI); + +} + + +static inline void e90_outb(u8 val, unsigned long port) +{ + writeb_asi(val, port, ASI_M_PCI); +} + +static inline void e90_outw (u16 val, unsigned long port) +{ + writew_asi(cpu_to_le16(val), port, ASI_M_PCI); +} + +static inline void e90_outsw(unsigned long port, void *src, u32 count) +{ + const u16 *ps = src; + const u32 *pi; + + if(((unsigned long)src) & 0x2) { + writew_asi(*ps++, port, ASI_M_PCI); + count--; + } + pi = (const u32 *)ps; + while(count >= 2) { + u32 w = *pi++; + writew_asi(w >> 16, port, ASI_M_PCI); + writew_asi(w, port, ASI_M_PCI); + count -= 2; + } + ps = (const u16 *)pi; + if(count) + writew_asi(*ps, port, ASI_M_PCI); +} + +static inline void e90_outl(u32 val, unsigned long port) +{ + writel_asi(cpu_to_le32(val), port, ASI_M_PCI); +} + + +static void e90_ide_exec_command(ide_hwif_t *hwif, u8 cmd) +{ + TRACE_E90_IDE("%s: e90_ide_exec_command 0x%x; ", hwif->cur_dev->name, cmd); + e90_outb(cmd, hwif->io_ports.command_addr); + TRACE_E90_IDE("Done\n"); +} + +static u8 e90_ide_read_status(ide_hwif_t *hwif) +{ + u8 r; + TRACE_E90_IDE("%s: e90_ide_read_status: ", hwif->cur_dev->name); + r = e90_inb(hwif->io_ports.status_addr); + TRACE_E90_IDE(" Done = 0x%x\n", r); + return r; +} + +static u8 e90_ide_read_altstatus(ide_hwif_t *hwif) +{ + u8 r; + TRACE_E90_IDE("%s: e90_ide_read_alt_status: ", hwif->cur_dev->name); + r = e90_inb(hwif->io_ports.ctl_addr); + TRACE_E90_IDE(" Done = 0x%x\n", r); + return r; +} + +static void e90_ide_write_devctl(ide_hwif_t *hwif, u8 ctl) +{ + TRACE_E90_IDE("%s: e90_ide_write_devctl 0x%x ", hwif->cur_dev->name, ctl); + e90_outb(ctl, hwif->io_ports.ctl_addr); + TRACE_E90_IDE("Done\n"); +} + + +static void e90_ide_dev_select(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + u8 select = drive->select | ATA_DEVICE_OBS; + TRACE_E90_IDE("%s: e90_ide_dev_select 0x%x ", hwif->cur_dev->name, select); + e90_outb(select, hwif->io_ports.device_addr); + TRACE_E90_IDE("Done\n"); +} + + +static void e90_ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) +{ + ide_hwif_t *hwif = drive->hwif; + struct ide_io_ports *io_ports = &hwif->io_ports; + + TRACE_E90_IDE("%s: e90_ide_tf_load: valid=0x%x; feature=0x%02x, nsect=0x%02x," + " lbal=0x%02x, lbam=0x%02x, lbah=0x%02x, dev=0x%02x ", hwif->cur_dev->name, + valid, tf->feature, tf->nsect, tf->lbal, + tf->lbam, tf->lbah, tf->device); + TRACE_E90_IDE_ADDR("%s: e90_ide_tf_load: valid=0x%x; feature=0x%02x, nsect=0x%02x," + " lbal=0x%02x, lbam=0x%02x, lbah=0x%02x, dev=0x%02x\n", hwif->cur_dev->name, + valid, tf->feature, tf->nsect, tf->lbal, + tf->lbam, tf->lbah, tf->device); + if (valid & IDE_VALID_FEATURE) + e90_outb(tf->feature, io_ports->feature_addr); + if (valid & IDE_VALID_NSECT) + e90_outb(tf->nsect, io_ports->nsect_addr); + if (valid & IDE_VALID_LBAL) + e90_outb(tf->lbal, io_ports->lbal_addr); + if (valid & IDE_VALID_LBAM) + e90_outb(tf->lbam, io_ports->lbam_addr); + if (valid & IDE_VALID_LBAH) + e90_outb(tf->lbah, io_ports->lbah_addr); + if (valid & IDE_VALID_DEVICE) + e90_outb(tf->device, io_ports->device_addr); + TRACE_E90_IDE("Done\n"); +} + + +static void e90_ide_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) +{ + ide_hwif_t *hwif = drive->hwif; + struct ide_io_ports *io_ports = &hwif->io_ports; + + TRACE_E90_IDE("%s: e90_ide_tf_read ", hwif->cur_dev->name); + if (valid & IDE_VALID_ERROR) + tf->error = e90_inb(io_ports->feature_addr); + if (valid & IDE_VALID_NSECT) + tf->nsect = e90_inb(io_ports->nsect_addr); + if (valid & IDE_VALID_LBAL) + tf->lbal = e90_inb(io_ports->lbal_addr); + if (valid & IDE_VALID_LBAM) + tf->lbam = e90_inb(io_ports->lbam_addr); + if (valid & IDE_VALID_LBAH) + tf->lbah = e90_inb(io_ports->lbah_addr); + if (valid & IDE_VALID_DEVICE) + tf->device = e90_inb(io_ports->device_addr); + TRACE_E90_IDE("Done: valid=0x%x; feature=0x%02x, nsect=0x%02x," + " lbal=0x%02x, lbam=0x%02x, lbah=0x%02x, dev=0x%02x\n", + valid, tf->feature, tf->nsect, tf->lbal, + tf->lbam, tf->lbah, tf->device); +} + +static void e90_ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, + unsigned int len) +{ + ide_hwif_t *hwif = drive->hwif; + struct ide_io_ports *io_ports = &hwif->io_ports; + unsigned long data_addr = io_ports->data_addr; + unsigned int words = (len + 1) >> 1; + + TRACE_E90_IDE("%s: e90_ide_input_data: len = 0x%x ", hwif->cur_dev->name, len); + e90_insw(data_addr, buf, words); + TRACE_E90_IDE("Done\n"); +} + +static void e90_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, + unsigned int len) +{ + ide_hwif_t *hwif = drive->hwif; + struct ide_io_ports *io_ports = &hwif->io_ports; + unsigned long data_addr = io_ports->data_addr; + unsigned int words = (len + 1) >> 1; + + TRACE_E90_IDE("%s: e90_ide_output_data: len = 0x%x ", hwif->cur_dev->name, len); + e90_outsw(data_addr, buf, words); + TRACE_E90_IDE("Done\n"); +} + + +const struct ide_tp_ops e90_tp_ops = { + .exec_command = e90_ide_exec_command, + .read_status = e90_ide_read_status, + .read_altstatus = e90_ide_read_altstatus, + .write_devctl = e90_ide_write_devctl, + + .dev_select = e90_ide_dev_select, + .tf_load = e90_ide_tf_load, + .tf_read = e90_ide_tf_read, + + .input_data = e90_ide_input_data, + .output_data = e90_ide_output_data, +}; + +static void l_init_iops (ide_hwif_t *hwif) +{ + hwif->tp_ops = &e90_tp_ops; +} + + + + /* DMA handling interface */ + +static u8 e90_ide_dma_sff_read_status(ide_hwif_t *hwif) +{ + u8 r; + TRACE_E90_IDE("%s: e90_ide_dma_sff_read_status ", hwif->cur_dev->name); + r = e90_inb(hwif->dma_base + ATA_DMA_STATUS); + TRACE_E90_IDE("Done = 0x%02x\n", r); + return r; +} + +static void e90_ide_dma_sff_write_status(ide_hwif_t *hwif, u8 val) +{ + TRACE_E90_IDE("%s: e90_ide_dma_sff_write_status 0x%02x ", hwif->cur_dev->name, val); + e90_outb(val, hwif->dma_base + ATA_DMA_STATUS); + TRACE_E90_IDE(" Done\n"); +} + +/** + * ide_dma_host_set - Enable/disable DMA on a host + * @drive: drive to control + * + * Enable/disable DMA on an IDE controller following generic + * bus-mastering IDE controller behaviour. + */ + +static void e90_ide_dma_host_set(ide_drive_t *drive, int on) +{ + ide_hwif_t *hwif = drive->hwif; + u8 unit = drive->dn & 1; + u8 dma_stat = e90_ide_dma_sff_read_status(hwif); + + TRACE_E90_IDE("%s: e90_ide_dma_host_set\n", drive->name); + if (on) + dma_stat |= (1 << (5 + unit)); + else + dma_stat &= ~(1 << (5 + unit)); + + e90_ide_dma_sff_write_status(hwif, dma_stat); +} + +int e90_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) +{ + ide_hwif_t *hwif = drive->hwif; + u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR; + u8 dma_stat; + + /* fall back to pio! */ + TRACE_E90_IDE("%s: e90_ide_dma_setup ", drive->name); + if (ide_build_dmatable(drive, cmd) == 0) { + ide_map_sg(drive, cmd); + TRACE_E90_IDE(" ide_build_dmatable failed\n"); + return 1; + } + + /* PRD table */ + e90_outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); + TRACE_E90_IDE("PRD table = 0x%08x; ", hwif->dmatable_dma); + /* specify r/w */ + e90_outb(rw, hwif->dma_base + ATA_DMA_CMD); + TRACE_E90_IDE(" rw = 0x%02x\n", rw); + /* read DMA status for INTR & ERROR flags */ + dma_stat = e90_ide_dma_sff_read_status(hwif); + + /* clear INTR & ERROR flags */ + e90_ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); + + return 0; +} + +void e90_ide_dma_start(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + u8 dma_cmd; + + /* Note that this is done *after* the cmd has + * been issued to the drive, as per the BM-IDE spec. + * The Promise Ultra33 doesn't work correctly when + * we do this part before issuing the drive cmd. + */ + TRACE_E90_IDE("%s: e90_ide_dma_start ", drive->name); + dma_cmd = e90_inb(hwif->dma_base + ATA_DMA_CMD); + TRACE_E90_IDE(" read dma_cmd = 0x%02x; ", dma_cmd); + e90_outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); + TRACE_E90_IDE(" write dma_cmd = 0x%02x. Done\n", dma_cmd | ATA_DMA_START); +} + +/* returns 1 on error, 0 otherwise */ +int e90_ide_dma_end(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + u8 dma_stat = 0, dma_cmd = 0; + + /* stop DMA */ + TRACE_E90_IDE("%s: e90_ide_dma_end ", drive->name); + dma_cmd = e90_inb(hwif->dma_base + ATA_DMA_CMD); + TRACE_E90_IDE(" read dma_cmd = 0x%02x; ", dma_cmd); + e90_outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); + TRACE_E90_IDE(" write dma_cmd = 0x%02x.\n", dma_cmd | ATA_DMA_START); + + /* get DMA status */ + dma_stat = e90_ide_dma_sff_read_status(hwif); + + /* clear INTR & ERROR bits */ + e90_ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); + +#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) + TRACE_E90_IDE("Done e90_ide_dma_end\n"); + /* verify good DMA status */ + if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR) + return 0x10 | dma_stat; + return 0; +} + +const struct ide_dma_ops e90_dma_ops = { + .dma_host_set = e90_ide_dma_host_set, + .dma_setup = e90_ide_dma_setup, + .dma_start = e90_ide_dma_start, + .dma_end = e90_ide_dma_end, + .dma_test_irq = ide_dma_test_irq, + .dma_lost_irq = ide_dma_lost_irq, + .dma_timer_expiry = ide_dma_sff_timer_expiry, + .dma_sff_read_status = e90_ide_dma_sff_read_status, +}; + +#define L_FORCE_NATIVE_MODE 1 +#define L_DEAULT_IDE_DMA_MODE 0 + +#endif /*_E90_L_IDE_H_*/ diff --git a/arch/sparc/include/asm/l_pmc.h b/arch/sparc/include/asm/l_pmc.h new file mode 100644 index 000000000000..fba6aca639f0 --- /dev/null +++ b/arch/sparc/include/asm/l_pmc.h @@ -0,0 +1,26 @@ +#ifndef _ARCH_PMC_H_ +#define _ARCH_PMC_H_ +#ifdef CONFIG_E90S + +/* PMC registers */ +#define PMC_L_COVFID_STATUS_REG_CL0 0x0 +#define PMC_L_P_STATE_STATUS_REG_CL0 0xc +#define PMC_L_P_STATE_CNTRL_REG_CL0 0x8 +#define PMC_L_P_STATE_VALUE_0_REG_CL0 0x10 +#define PMC_L_P_STATE_VALUE_1_REG_CL0 0x14 +#define PMC_L_P_STATE_VALUE_2_REG_CL0 0x18 +#define PMC_L_P_STATE_VALUE_3_REG_CL0 0x1c +#define PMC_L_COVFID_STATUS_REG_CL1 0x168 +#define PMC_L_P_STATE_STATUS_REG_CL1 0x170 +#define PMC_L_P_STATE_CNTRL_REG_CL1 0x174 +#define PMC_L_P_STATE_VALUE_0_REG_CL1 0x178 +#define PMC_L_P_STATE_VALUE_1_REG_CL1 0x17c +#define PMC_L_P_STATE_VALUE_2_REG_CL1 0x180 +#define PMC_L_P_STATE_VALUE_3_REG_CL1 0x184 + + +unsigned s2_get_freq_mult(int cpu); + +#include +#endif /* CONFIG_E90S */ +#endif /*_ARCH_PMC_H_*/ diff --git a/arch/sparc/include/asm/l_spmc.h b/arch/sparc/include/asm/l_spmc.h new file mode 100644 index 000000000000..a6f46941644a --- /dev/null +++ b/arch/sparc/include/asm/l_spmc.h @@ -0,0 +1,8 @@ +#ifndef _ARCH_SPMC_H_ +#define _ARCH_SPMC_H_ +#ifdef CONFIG_E90S + +#include + +#endif /* CONFIG_E90S */ +#endif /*_ARCH_SPMC_H_*/ diff --git a/arch/sparc/include/asm/l_timer.h b/arch/sparc/include/asm/l_timer.h new file mode 100644 index 000000000000..22a963d2be0c --- /dev/null +++ b/arch/sparc/include/asm/l_timer.h @@ -0,0 +1,16 @@ +#ifndef _ASM_L_TIMER_H +#define _ASM_L_TIMER_H + +#include + +#ifdef CONFIG_E90S +#define L_TIMER_IS_ALLOWED() 1 /* E90S use this timer */ +#else +#define L_TIMER_IS_ALLOWED() 0 /* other sparc64 machine not use */ +#endif /* CONFIG_E90S */ + +#define SET_CLOCK_TICK_RATE() /* clock rate not set dinamicaly */ + +#include + +#endif /* _ASM_L_TIMER_H */ diff --git a/arch/sparc/include/asm/l_timer_regs.h b/arch/sparc/include/asm/l_timer_regs.h new file mode 100644 index 000000000000..6857d369e9e1 --- /dev/null +++ b/arch/sparc/include/asm/l_timer_regs.h @@ -0,0 +1,107 @@ +#ifndef _L_ASM_L_TIMER_REGS_H +#define _L_ASM_L_TIMER_REGS_H + +#include + +/* + * Elbrus System timer Registers (big endian) + */ + +typedef struct counter_limit_fields { + u32 l : 1; /* [31] */ + u32 c_l : 22; /* [30:9] */ + u32 unused : 9; /* [8:0] */ +} counter_limit_fields_t; +typedef union counter_limit { + u32 word; + counter_limit_fields_t fields; +} counter_limit_t; +typedef struct counter_st_v_fields { + u32 l : 1; /* [31] */ + u32 c_st_v : 22; /* [30:9] */ + u32 unused : 9; /* [8:0] */ +} counter_st_v_fields_t; +typedef union counter_st_v { + u32 word; + counter_st_v_fields_t fields; +} counter_st_v_t; +typedef struct counter_fields { + u32 l : 1; /* [31] */ + u32 c : 22; /* [30:9] */ + u32 unused : 9; /* [8:0] */ +} counter_fields_t; +typedef union counter { + u32 word; + counter_fields_t fields; +} counter_t; +typedef struct counter_control_fields { + u32 unused : 29; /* [31:3] */ + u32 l_ini : 1; /* [2] */ + u32 inv_l : 1; /* [1] */ + u32 s_s : 1; /* [0] */ +} counter_control_fields_t; +typedef union counter_control { + u32 word; + counter_control_fields_t fields; +} counter_control_t; +typedef struct wd_counter_l_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_l_fields_t; +typedef union wd_counter_l { + u32 word; + wd_counter_l_fields_t fields; +} wd_counter_l_t; +typedef struct wd_counter_h_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_h_fields_t; +typedef union wd_counter_h { + u32 word; + wd_counter_h_fields_t fields; +} wd_counter_h_t; +typedef struct wd_limit_fields { + u32 wd_l : 32; /* [31:0] */ +} wd_limit_fields_t; +typedef union wd_limit { + u32 word; + wd_limit_fields_t fields; +} wd_limit_t; +typedef struct power_counter_l_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_l_fields_t; +typedef union power_counter_l { + u32 word; + power_counter_l_fields_t fields; +} power_counter_l_t; +typedef struct power_counter_h_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_h_fields_t; +typedef union power_counter_h { + u32 word; + power_counter_h_fields_t fields; +} power_counter_h_t; +typedef struct wd_control_fields { + u32 unused : 29; /* [31:3] */ + u32 w_evn : 1; /* [2] */ + u32 w_out_e : 1; /* [1] */ + u32 w_m : 1; /* [0] */ +} wd_control_fields_t; +typedef union wd_control { + u32 word; + wd_control_fields_t fields; +} wd_control_t; +typedef struct reset_counter_l_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_l_fields_t; +typedef union reset_counter_l { + u32 word; + reset_counter_l_fields_t fields; +} reset_counter_l_t; +typedef struct reset_counter_h_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_h_fields_t; +typedef union reset_counter_h { + u32 word; + reset_counter_h_fields_t fields; +} reset_counter_h_t; + +#endif /* _L_ASM_L_TIMER_REGS_H */ diff --git a/arch/sparc/include/asm/machdep.h b/arch/sparc/include/asm/machdep.h new file mode 100644 index 000000000000..fb6efae502b5 --- /dev/null +++ b/arch/sparc/include/asm/machdep.h @@ -0,0 +1,16 @@ + +#ifndef _SPARC64_RESET_H_ +#define _SPARC64_RESET_H_ + +#ifndef __ASSEMBLY__ + +typedef struct machdep { + void (*arch_reset)(char *cmd); + void (*arch_halt)(void); +} machdep_t; + +extern machdep_t machine; + +#endif /* __ASSEMBLY__ */ + +#endif /* _SPARC64_RESET_H_ */ diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index 274217e7ed70..ffcb84d86a18 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h @@ -30,6 +30,9 @@ static inline void ipi_set_tstate_mcde(void *arg) #define arch_calc_vm_prot_bits(prot, pkey) sparc_calc_vm_prot_bits(prot) static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot) { + if (prot & PROT_INVEND) + return VM_INVEND; + if (adi_capable() && (prot & PROT_ADI)) { struct pt_regs *regs; @@ -49,13 +52,15 @@ static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot) #define arch_vm_get_page_prot(vm_flags) sparc_vm_get_page_prot(vm_flags) static inline pgprot_t sparc_vm_get_page_prot(unsigned long vm_flags) { - return (vm_flags & VM_SPARC_ADI) ? __pgprot(_PAGE_MCD_4V) : __pgprot(0); + return (vm_flags & VM_INVEND) ? __pgprot(_PAGE_IE) : + (vm_flags & VM_SPARC_ADI) ? __pgprot(_PAGE_MCD_4V) : __pgprot(0); } #define arch_validate_prot(prot, addr) sparc_validate_prot(prot, addr) static inline int sparc_validate_prot(unsigned long prot, unsigned long addr) { - if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI)) + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI | + PROT_INVEND)) return 0; return 1; } diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 7e2704c770e9..83bb8f5fe1cd 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -105,7 +105,12 @@ typedef struct { } tag_storage_desc_t; typedef struct { +#ifdef CONFIG_MCST + raw_spinlock_t lock; + unsigned char is_exit_mmap:1; +#else spinlock_t lock; +#endif unsigned long sparc64_ctx_val; unsigned long hugetlb_pte_count; unsigned long thp_pte_count; diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 312fcee8df2b..01f9f86115f1 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -16,11 +16,20 @@ #include #include +#ifdef CONFIG_E90S +#include +#include +#endif /*CONFIG_E90S*/ + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } +#ifdef CONFIG_MCST +extern raw_spinlock_t ctx_alloc_lock; +#else extern spinlock_t ctx_alloc_lock; +#endif extern unsigned long tlb_context_cache; extern unsigned long mmu_context_bmap[]; @@ -38,6 +47,29 @@ void __tsb_context_switch(unsigned long pgd_pa, static inline void tsb_context_switch_ctx(struct mm_struct *mm, unsigned long ctx) { +#ifdef CONFIG_E90S +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + u64 o = readq_asi(E90S_MCNTL, ASI_DCU_CONTROL_REG); + u64 n = o & ~E90S_MCNTL_DRM23_MSK; + /* setup 2 and 3 sDTLB sets to store huge pages ttes */ + n |= mm->context.tsb_block[MM_TSB_HUGE].tsb ? + E90S_MCNTL_DRM23_4M : E90S_MCNTL_DRM23_DEFAULT; + if (n != o) { + unsigned long flags; + local_save_flags(flags); + local_irq_restore((unsigned long)PIL_NMI); + if ((o & E90S_MCNTL_DRM23_MSK) != E90S_MCNTL_DRM23_4M) { + /* flush kernel to avoid tlb double hit */ + writeq_asi(0, 0x20, ASI_DMMU_DEMAP); + membar_sync(); + writeq_asi(n, E90S_MCNTL, ASI_DCU_CONTROL_REG); + membar_sync(); + } + local_irq_restore(flags); + } +#endif +#endif /*CONFIG_E90S*/ + __tsb_context_switch(__pa(mm->pgd), &mm->context.tsb_block[MM_TSB_BASE], #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -87,7 +119,11 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str if (unlikely(mm == &init_mm)) return; +#ifdef CONFIG_MCST + raw_spin_lock_irqsave(&mm->context.lock, flags); +#else spin_lock_irqsave(&mm->context.lock, flags); +#endif ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); @@ -133,7 +169,11 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } +#ifdef CONFIG_MCST + raw_spin_unlock_irqrestore(&mm->context.lock, flags); +#else spin_unlock_irqrestore(&mm->context.lock, flags); +#endif } #define deactivate_mm(tsk,mm) do { } while (0) @@ -142,6 +182,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str #define __HAVE_ARCH_START_CONTEXT_SWITCH static inline void arch_start_context_switch(struct task_struct *prev) { +#ifndef CONFIG_E90S /* Save the current state of MCDPER register for the process * we are switching from */ @@ -159,11 +200,13 @@ static inline void arch_start_context_switch(struct task_struct *prev) else clear_tsk_thread_flag(prev, TIF_MCDPER); } +#endif } #define finish_arch_post_lock_switch finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void) { +#ifndef CONFIG_E90S /* Restore the state of MCDPER register for the new process * just switched to. */ @@ -185,6 +228,7 @@ static inline void finish_arch_post_lock_switch(void) regs->tstate |= TSTATE_MCDE; } } +#endif } #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/mpspec.h b/arch/sparc/include/asm/mpspec.h new file mode 100644 index 000000000000..164d65d5089c --- /dev/null +++ b/arch/sparc/include/asm/mpspec.h @@ -0,0 +1,16 @@ +#ifndef __ASM_MPSPEC_H +#define __ASM_MPSPEC_H + +#include +#ifdef CONFIG_E90S +#include +#endif + +#include + +/* all addresses in MP table is virtual so do not change them */ +#define mpc_addr_to_virt(addr) ((void *)(addr)) +#define mpc_addr_to_phys(addr) (addr) +#define mpc_addr(addr) (addr) + +#endif /* __ASM_MPSPEC_H */ diff --git a/arch/sparc/include/asm/msidef.h b/arch/sparc/include/asm/msidef.h new file mode 100644 index 000000000000..dba1666a6a03 --- /dev/null +++ b/arch/sparc/include/asm/msidef.h @@ -0,0 +1,6 @@ +#ifndef __ASM_MSIDEF_H +#define __ASM_MSIDEF_H + +#include + +#endif diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h index a67abebd4359..5dfcc86d4f17 100644 --- a/arch/sparc/include/asm/oplib_64.h +++ b/arch/sparc/include/asm/oplib_64.h @@ -100,7 +100,12 @@ unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); void prom_console_write_buf(const char *buf, int len); /* Prom's internal routines, don't use in kernel/boot code. */ +#ifdef CONFIG_E90S +#define prom_printf printk +#else __printf(1, 2) void prom_printf(const char *fmt, ...); +#endif /*CONFIG_E90S*/ + void prom_write(const char *buf, unsigned int len); /* Multiprocessor operations... */ diff --git a/arch/sparc/include/asm/page.h b/arch/sparc/include/asm/page.h index 5e44cdf2a8f2..53d47e211cfa 100644 --- a/arch/sparc/include/asm/page.h +++ b/arch/sparc/include/asm/page.h @@ -3,6 +3,7 @@ #define ___ASM_SPARC_PAGE_H #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) #if defined(__sparc__) && defined(__arch64__) #include diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index e80f2d5bf62f..4ed99966f7d1 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -12,7 +12,7 @@ /* Flushing for D-cache alias handling is only needed if * the page size is smaller than 16K. */ -#if PAGE_SHIFT < 14 +#if PAGE_SHIFT < 14 && !defined(CONFIG_E90S) #define DCACHE_ALIASING_POSSIBLE #endif @@ -31,6 +31,8 @@ #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) #define HUGE_MAX_HSTATE 5 +#else +#define HPAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #endif #ifndef __ASSEMBLY__ @@ -67,7 +69,11 @@ void copy_highpage(struct page *to, struct page *from); #ifdef STRICT_MM_TYPECHECKS /* These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; +#ifndef CONFIG_E90S typedef struct { unsigned long iopte; } iopte_t; +#else +typedef struct { unsigned iopte; } iopte_t; +#endif typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pud; } pud_t; typedef struct { unsigned long pgd; } pgd_t; @@ -90,7 +96,11 @@ typedef struct { unsigned long pgprot; } pgprot_t; #else /* .. while these make it easier on the compiler */ typedef unsigned long pte_t; +#ifndef CONFIG_E90S typedef unsigned long iopte_t; +#else +typedef unsigned iopte_t; +#endif typedef unsigned long pmd_t; typedef unsigned long pud_t; typedef unsigned long pgd_t; @@ -137,7 +147,11 @@ extern unsigned long PAGE_OFFSET; * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS" * evaluates to. */ +#ifdef CONFIG_E90S +#define MAX_PHYS_ADDRESS_BITS 40 +#else #define MAX_PHYS_ADDRESS_BITS 53 +#endif #define ILOG2_4MB 22 #define ILOG2_256MB 28 diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index 4deddf430e5d..28c398c2463c 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h @@ -1,6 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_PCI_H #define ___ASM_SPARC_PCI_H +#ifdef CONFIG_E90S +#include +#else /* Can be used to override the logic in pci_scan_bus for skipping @@ -48,5 +51,5 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) #else #include #endif - -#endif /* ___ASM_SPARC_PCI_H */ +#endif /* CONFIG_E90S */ +#endif /* ___ASM_SPARC_PCI_H */ diff --git a/arch/sparc/include/asm/pci_e90s.h b/arch/sparc/include/asm/pci_e90s.h new file mode 100644 index 000000000000..d0f8c407dbe7 --- /dev/null +++ b/arch/sparc/include/asm/pci_e90s.h @@ -0,0 +1,66 @@ +#ifndef __SPARC64_PCI_E90S_H +#define __SPARC64_PCI_E90S_H + +#ifdef __KERNEL__ +#include + +#define L_IOHUB_SLOTS_NUM \ + (e90s_get_cpu_type() == E90S_CPU_R1000 ? 2 : \ + e90s_get_cpu_type() == E90S_CPU_R2000P ? 32 : \ + SLOTS_PER_L_IOHUB) + +#define HAVE_PCI_MMAP +#define HAVE_PCI_LEGACY 1 +#define HAVE_COMMONROOT_BUS_PCI_DOMAINS 1 /* all IOHUBs accessed */ + /* through common root */ + /* bus #0 */ +struct resource; +struct pci_bus_region; + +extern void +pcibios_resource_to_bus(struct pci_bus *dev, struct pci_bus_region *region, + struct resource *res); +extern void +pcibios_bus_to_resource(struct pci_bus *dev, struct resource *res, + struct pci_bus_region *region); + +#define PCIBIOS_MIN_IO 0UL +#define PCIBIOS_MIN_MEM 0UL + +extern void pci_config_read8(u8 *addr, u8 *ret); +extern void pci_config_read16(u16 *addr, u16 *ret); +extern void pci_config_read32(u32 *addr, u32 *ret); +extern void pci_config_write8(u8 *addr, u8 val); +extern void pci_config_write16(u16 *addr, u16 val); +extern void pci_config_write32(u32 *addr, u32 val); +#define conf_inb(domain, bus, port, val) \ + pci_config_read8((u8 *)(PCI_CONFIG_BASE + (port)), val) +#define conf_inw(domain, bus, port, val) \ + pci_config_read16((u16 *)(PCI_CONFIG_BASE + (port)), val) +#define conf_inl(domain, bus, port, val) \ + pci_config_read32((u32 *)(PCI_CONFIG_BASE + (port)), val) +#define conf_outb(domain, bus, port, val) \ + pci_config_write8((u8 *)(PCI_CONFIG_BASE + (port)), val) +#define conf_outw(domain, bus, port, val) \ + pci_config_write16((u16 *)(PCI_CONFIG_BASE + (port)), val) +#define conf_outl(domain, bus, port, val) \ + pci_config_write32((u32 *)(PCI_CONFIG_BASE + (port)), val) + + +#define PCI_ARCH_CACHE_LINE_SIZE SMP_CACHE_BYTES_SHIFT +#define PCI_DMA_BUS_IS_PHYS (0) +#define PCI_IRQ_NONE 0xffffffff + +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return PCI_IRQ_NONE; +} + + +#define L_IOPORT_RESOURCE_OFFSET BASE_PCIIO + +#include + +#endif /* __KERNEL__ */ + +#endif /* __SPARC64_PCI_E90S_H */ diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h index da834ffbe75d..769f6c8b505b 100644 --- a/arch/sparc/include/asm/pcr.h +++ b/arch/sparc/include/asm/pcr.h @@ -48,4 +48,84 @@ void schedule_deferred_pcr_work(void); int pcr_arch_init(void); +#ifdef CONFIG_E90S +#define E90S_PIC_NR 4 + + +#define E90S_PCR_PRIV_SHIFT 0 + +#define E90S_PCR_SYS_SHIFT 1 +#define E90S_PCR_USR_SHIFT 2 + +#define E90S_PCR_PICL_SHIFT 4 +#define E90S_PCR_PICU_MASK 0x3f +#define E90S_PCR_PICU_SHIFT 11 + +#define E90S_PCR_ULRO_SHIFT 3 +#define E90S_PCR_SC_SHIFT 18 +#define E90S_PCR_SC_MASK 0x7UL +#define E90S_PCR_NC_SHIFT 22 +#define E90S_PCR_NC_MASK 0x7UL +#define E90S_PCR_OVRO_SHIFT 26 +#define E90S_PCR_OVF_SHIFT 32 +#define E90S_PCR_OVF_MASK 0xfUL + +#define E90S_PCR_PRIV (1UL << E90S_PCR_PRIV_SHIFT) +#define E90S_PCR_SYS (1UL << E90S_PCR_SYS_SHIFT) +#define E90S_PCR_USR (1UL << E90S_PCR_USR_SHIFT) +#define E90S_PCR_ULRO (1UL << E90S_PCR_ULRO_SHIFT) +#define E90S_PCR_OVRO (1UL << E90S_PCR_OVRO_SHIFT) +#define E90S_PCR_OVF (E90S_PCR_OVF_MASK << \ + E90S_PCR_OVF_SHIFT) + +#define E90S_NOP_EVENT (0x3fUL << E90S_PCR_PICU_SHIFT) + +/* Performance counter register access. */ +#define rd_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p)) +#define wr_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p)) +#define rd_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p)) +#define wr_pic(__p) __asm__ __volatile__("wr %0, 0x0, %%pic" : : "r" (__p)) + +static inline void read_and_stop_perfctrs(__u64 *cntrs) +{ + unsigned long pcr, pic; + int i; + pcr = E90S_PCR_ULRO | E90S_PCR_OVRO; + for(i=0; i < E90S_PIC_NR; i++) { + wr_pcr(pcr | i << E90S_PCR_SC_SHIFT); + rd_pic(pic); + cntrs[i] = pic; + } +} + +static inline void read_perfctrs(__u64 *cntrs) +{ + unsigned long pcr, old_pcr, pic; + int i; + rd_pcr(old_pcr); + pcr = old_pcr; + pcr &= ~(E90S_PCR_USR | E90S_PCR_SYS | + (E90S_PCR_SC_MASK << E90S_PCR_SC_SHIFT)) + | E90S_PCR_ULRO | E90S_PCR_OVRO; + for(i=0; i < E90S_PIC_NR; i++) { + wr_pcr(pcr | i << E90S_PCR_SC_SHIFT); + rd_pic(pic); + cntrs[i] = pic; + } + wr_pcr(old_pcr); +} + +static inline void write_perfctrs(__u64 *pcrs, __u64 *cntrs) +{ + int i; + for(i=0; i < E90S_PIC_NR; i++) { + wr_pcr((pcrs[i] & (~(E90S_PCR_USR | E90S_PCR_SYS))) + | E90S_PCR_OVRO); + wr_pic(cntrs[i]); + } + wr_pcr(pcrs[E90S_PIC_NR-1] & ~(E90S_PCR_OVF | E90S_PCR_OVRO)); +} + +#endif /* CONFIG_E90S */ + #endif /* __PCR_H */ diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h index 32ef6f05cc56..f35dcdbfe033 100644 --- a/arch/sparc/include/asm/percpu_64.h +++ b/arch/sparc/include/asm/percpu_64.h @@ -22,4 +22,9 @@ register unsigned long __local_per_cpu_offset asm("g5"); #include +#if defined CONFIG_E90S +/* For EARLY_PER_CPU_* definitions */ +# include +#endif + #endif /* __ARCH_SPARC64_PERCPU__ */ diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h index c2aec0c7f4f5..ff6ed19180b7 100644 --- a/arch/sparc/include/asm/perf_event.h +++ b/arch/sparc/include/asm/perf_event.h @@ -25,6 +25,11 @@ do { \ (regs)->u_regs[UREG_I6] = _fp; \ (regs)->u_regs[UREG_I7] = _i7; \ } while (0) + +#ifdef CONFIG_E90S +struct pt_regs; +extern int perf_event_nmi_handler(struct pt_regs *regs); +#endif #endif #endif diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 6ae8016ef4ec..d701c5b554f6 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -126,8 +126,13 @@ bool kern_addr_valid(unsigned long addr); #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ -#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */ +#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */ */ +#ifdef CONFIG_E90S +#define _PAGE_E90S_WC_4U _AC(0x0002000000000000,UL) /* Write-combine */ + +#else #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ +#endif #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */ @@ -141,6 +146,11 @@ bool kern_addr_valid(unsigned long addr); #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */ +#ifdef CONFIG_E90S +#define _PAGE_PRESENT _PAGE_PRESENT_4U +#define _PAGE_ACCESSED _PAGE_ACCESSED_4U +#define _PAGE_PROTNONE _AC(0x0000000000000200, UL) /* For numa balancing */ +#endif #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */ #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ @@ -189,7 +199,6 @@ bool kern_addr_valid(unsigned long addr); #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V - /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ #define __P000 __pgprot(0) #define __P001 __pgprot(0) @@ -210,6 +219,12 @@ bool kern_addr_valid(unsigned long addr); #define __S111 __pgprot(0) #ifndef __ASSEMBLY__ +#ifdef CONFIG_MCST +#define pte_flags pte_val +#define pmd_flags pmd_val +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +#endif pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); @@ -438,6 +453,30 @@ static inline bool is_hugetlb_pte(pte_t pte) } #endif +#ifdef CONFIG_E90S +/* We don't need patching for SUN4V */ + +static inline pte_t pte_mkdirty(pte_t pte) +{ + unsigned long val = pte_val(pte) | (_PAGE_MODIFIED_4U | _PAGE_W_4U); + return __pte(val); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + unsigned long val = pte_val(pte) & ~(_PAGE_MODIFIED_4U | _PAGE_W_4U); + return __pte(val); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t prot) +{ + unsigned long val = pgprot_val(prot); + val &= ~(_PAGE_CP_4U | _PAGE_CV_4U); + val |= _PAGE_E90S_WC_4U; + return __pgprot(val); +} +#else /* CONFIG_E90S */ static inline pte_t pte_mkdirty(pte_t pte) { unsigned long val = pte_val(pte), tmp; @@ -485,7 +524,7 @@ static inline pte_t pte_mkclean(pte_t pte) return __pte(val); } - +#endif /* CONFIG_E90S */ static inline pte_t pte_mkwrite(pte_t pte) { unsigned long val = pte_val(pte), mask; @@ -603,7 +642,13 @@ static inline unsigned long pte_young(pte_t pte) return (pte_val(pte) & mask); } - +#ifdef CONFIG_E90S +/* We don't need patching for SUN4V */ +static inline unsigned long pte_dirty(pte_t pte) +{ + return (pte_val(pte) & _PAGE_MODIFIED_4U); +} +#else static inline unsigned long pte_dirty(pte_t pte) { unsigned long mask; @@ -621,7 +666,7 @@ static inline unsigned long pte_dirty(pte_t pte) return (pte_val(pte) & mask); } - +#endif /* CONFIG_E90S */ static inline unsigned long pte_write(pte_t pte) { unsigned long mask; @@ -656,6 +701,40 @@ static inline unsigned long pte_exec(pte_t pte) return (pte_val(pte) & mask); } +#define pte_accessible pte_accessible +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) +{ + return pte_val(a) & (_PAGE_VALID); +} +#ifdef CONFIG_E90S +#ifdef CONFIG_NUMA_BALANCING +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) +/* + * These return true for PAGE_NONE too but the kernel does not care. + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) == + _PAGE_PROTNONE; +} +static inline int pmd_protnone(pmd_t pmd) +{ + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE)) == + _PAGE_PROTNONE; +} + +static inline unsigned long pte_present(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT_4U | _PAGE_PROTNONE)); +} +#else /* !CONFIG_NUMA_BALANCING */ +static inline unsigned long pte_present(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT_4U)); +} +#endif /* CONFIG_NUMA_BALANCING */ +#else /* !CONFIG_E90S */ static inline unsigned long pte_present(pte_t pte) { unsigned long val = pte_val(pte); @@ -668,15 +747,14 @@ static inline unsigned long pte_present(pte_t pte) " .previous\n" : "=r" (val) : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); - return val; } -#define pte_accessible pte_accessible static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) { return pte_val(a) & _PAGE_VALID; } +#endif /* CONFIG_E90S */ static inline unsigned long pte_special(pte_t pte) { @@ -804,8 +882,18 @@ static inline int pmd_present(pmd_t pmd) * the top bits outside of the range of any physical address size we * support are clear as well. We also validate the physical itself. */ +#if defined( CONFIG_E90S) && defined(CONFIG_NUMA_BALANCING) +static inline int pmd_bad(pmd_t pmd) +{ + /* pmd_numa check */ + if ((pmd_val(pmd) & (_PAGE_PROTNONE|_PAGE_PRESENT_4U)) == _PAGE_PROTNONE) + return 0; + return (pmd_val(pmd) & ~PAGE_MASK); +} +#else +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) +#endif #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) - #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK) @@ -1049,9 +1137,10 @@ static inline void arch_do_swap_page(struct mm_struct *mm, */ if (pte_none(oldpte)) return; - +#ifndef CONFIG_E90S if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V)) adi_restore_tags(mm, vma, addr, pte); +#endif } #define __HAVE_ARCH_UNMAP_ONE @@ -1059,8 +1148,10 @@ static inline int arch_unmap_one(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t oldpte) { +#ifndef CONFIG_E90S if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V)) return adi_save_tags(mm, vma, addr, oldpte); +#endif return 0; } diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h index 4003c35304bd..e4f0002804e2 100644 --- a/arch/sparc/include/asm/pil.h +++ b/arch/sparc/include/asm/pil.h @@ -25,6 +25,9 @@ #define PIL_SMP_CALL_FUNC_SNGL 6 #define PIL_DEFERRED_PCR_WORK 7 #define PIL_KGDB_CAPTURE 8 +#ifdef CONFIG_E90S +#define PIL_E90S_ASYNC_ERR 13 +#endif /*CONFIG_E90S*/ #define PIL_NORMAL_MAX 14 #define PIL_NMI 15 diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index 3c4bc2189092..a33334edb106 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -26,6 +26,10 @@ struct task_struct; #ifdef __KERNEL__ + +/* virtualization does not support */ +#define paravirt_enabled() false + struct fpq { unsigned long *insn_addr; unsigned long insn; diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 5cf145f18f36..fcde65d272ce 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -47,6 +47,9 @@ #ifndef __ASSEMBLY__ +/* virtualization does not support */ +#define paravirt_enabled() false + typedef struct { unsigned char seg; } mm_segment_t; @@ -86,6 +89,10 @@ struct thread_struct { struct task_struct; +#ifdef CONFIG_E90S +#define default_idle() +#endif + /* On Uniprocessor, even in RMO processes see TSO semantics */ #ifdef CONFIG_SMP #define TSTATE_INITIAL_MM TSTATE_TSO @@ -252,6 +259,50 @@ static inline void prefetchw(const void *x) int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap); +#ifdef CONFIG_E90S +#define NUM_DUMP_FRAMES 64 +#endif /*CONFIG_E90S*/ + +#define SET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & \ + ~(TS_UNALIGN_NOPRINT | TS_UNALIGN_SIGBUS); \ + if (value & PR_UNALIGN_NOPRINT) \ + status |= TS_UNALIGN_NOPRINT; \ + if (value & PR_UNALIGN_SIGBUS) \ + status |= TS_UNALIGN_SIGBUS; \ + task_thread_info(task)->status = status; \ + 0; }) + +#define GET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status; \ + __u32 res = 0; \ + if (status & TS_UNALIGN_NOPRINT) \ + res |= PR_UNALIGN_NOPRINT; \ + if (status & TS_UNALIGN_SIGBUS) \ + res |= PR_UNALIGN_SIGBUS; \ + put_user(res, (int __user *)(value)); \ + }) + +#if defined(CONFIG_E90S) && defined(CONFIG_SMP) +#include +typedef struct { + char comm[16]; + long pid; + long state; + long need_resched; + long cpu; + long t_pc; + long t_npc; + long prio; + long fp[NUM_DUMP_FRAMES]; + long tpc[NUM_DUMP_FRAMES]; +} cpu_bt_buf_t; +DECLARE_PER_CPU(cpu_bt_buf_t, cpu_bt_buf); + +extern void smp_show_backtrace_all_cpus(void); + +#endif /*CONFIG_SMP && CONFIG_E90S*/ + #endif /* !(__ASSEMBLY__) */ #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ diff --git a/arch/sparc/include/asm/sections.h b/arch/sparc/include/asm/sections.h index 08f833453ab3..b7666c0dc3d6 100644 --- a/arch/sparc/include/asm/sections.h +++ b/arch/sparc/include/asm/sections.h @@ -5,6 +5,42 @@ /* nothing to see, move along */ #include +#ifndef CONFIG_RECOVERY +#define __init_recv __init +#define __initdata_recv __initdata +#else +#define __init_recv +#define __initdata_recv +#endif /* ! (CONFIG_RECOVERY) */ + +#if !defined(CONFIG_RECOVERY) && !defined(CONFIG_SERIAL_PRINTK) && \ + !defined(CONFIG_LMS_CONSOLE) +#define __init_cons __init +#else +#define __init_cons +#endif /* boot console used after init completion */ + +#define __interrupt +#define __init_kexec __init + +#ifdef CONFIG_MCST +#ifndef CONFIG_RECOVERY +#define __init_recv __init +#else +#define __init_recv +#endif /* ! (CONFIG_RECOVERY) */ + +#if !defined(CONFIG_RECOVERY) && !defined(CONFIG_SERIAL_PRINTK) && \ + !defined(CONFIG_LMS_CONSOLE) +#define __init_cons __init +#else +#define __init_cons +#endif /* boot console used after init completion */ + +#define __interrupt +#define __init_kexec __init +#endif /* CONFIG_MCST */ + /* sparc entry point */ extern char _start[]; diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h new file mode 100644 index 000000000000..667284ba9bf6 --- /dev/null +++ b/arch/sparc/include/asm/serial.h @@ -0,0 +1,10 @@ +#ifndef __SPARC_SERIAL_H +#define __SPARC_SERIAL_H + +#ifndef CONFIG_E90S +#define BASE_BAUD ( 1843200 / 16 ) +#else +#include +#endif + +#endif /* __SPARC_SERIAL_H */ diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 72205684e51e..a891affdcbbb 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -9,6 +9,10 @@ #include +#ifdef CONFIG_E90S +#include +#endif + extern char reboot_command[]; #ifdef CONFIG_SPARC32 @@ -49,11 +53,13 @@ unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int); #endif #ifdef CONFIG_SPARC64 +#ifndef CONFIG_E90S void __init start_early_boot(void); +#endif /* unaligned_64.c */ int handle_ldf_stq(u32 insn, struct pt_regs *regs); -void handle_ld_nf(u32 insn, struct pt_regs *regs); +int handle_ld_nf(u32 insn, struct pt_regs *regs); /* init_64.c */ extern atomic_t dcpage_flushes; diff --git a/arch/sparc/include/asm/sic_regs.h b/arch/sparc/include/asm/sic_regs.h new file mode 100644 index 000000000000..c38531f78ee2 --- /dev/null +++ b/arch/sparc/include/asm/sic_regs.h @@ -0,0 +1,247 @@ + +#ifndef _SPARC64_SIC_REGS_H_ +#define _SPARC64_SIC_REGS_H_ + +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * IO controller vendor ID + */ +typedef unsigned int e90s_io_vid_t; /* single word (32 bits) */ +typedef struct e90s_io_vid_fields { + e90s_io_vid_t unused : 16; /* [31:16] */ + e90s_io_vid_t vid : 16; /* [15:0] */ +} e90s_io_vid_fields_t; +typedef union e90s_io_vid_struct { /* Structure of word */ + e90s_io_vid_fields_t fields; /* as fields */ + e90s_io_vid_t word; /* as entier register */ +} e90s_io_vid_struct_t; +#define NBSR_IO_VID_vid(REG) \ + ((REG).fields.vid) /* vendor ID */ +#define NBSR_IO_VID_reg(REG) \ + ((REG).word) + +/* + * IO controller state register + */ +typedef unsigned int e90s_io_csr_t; /* single word (32 bits) */ +typedef struct e90s_io_csr_fields { + e90s_io_csr_t ch_on : 1; /* [31] */ + e90s_io_csr_t link_tu : 1; /* [30] */ + e90s_io_csr_t unused3 : 15; /* [29:15] */ + e90s_io_csr_t to_ev : 1; /* [14] */ + e90s_io_csr_t err_ev : 1; /* [13] */ + e90s_io_csr_t bsy_ev : 1; /* [12] */ + e90s_io_csr_t unused2 : 5; /* [11:7] */ + e90s_io_csr_t to_ie : 1; /* [6] */ + e90s_io_csr_t err_ie : 1; /* [5] */ + e90s_io_csr_t bsy_ie : 1; /* [4] */ + e90s_io_csr_t unused1 : 3; /* [3:1] */ + e90s_io_csr_t srst : 1; /* [0] */ +} e90s_io_csr_fields_t; +typedef union e90s_io_csr_struct { /* Structure of word */ + e90s_io_csr_fields_t fields; /* as fields */ + e90s_io_csr_t word; /* as entier register */ +} e90s_io_csr_struct_t; + +#define NBSR_IO_CSR_srst(REG) \ + ((REG).fields.srst) /* sofrware reset flag */ +#define NBSR_IO_CSR_bsy_ie(REG) \ + ((REG).fields.bsy_ie) /* flag of interrupt enable */ + /* on receiver busy */ +#define NBSR_IO_CSR_err_ie(REG) \ + ((REG).fields.err_ie) /* flag of interrupt enable */ + /* on CRC-error */ +#define NBSR_IO_CSR_to_ie(REG) \ + ((REG).fields.to_ie) /* flag of interrupt enable */ + /* on timeout */ +#define NBSR_IO_CSR_bsy_ev(REG) \ + ((REG).fields.bsy_ev) /* flag of interrupt */ + /* on receiver busy */ +#define NBSR_IO_CSR_err_ev(REG) \ + ((REG).fields.err_ev) /* flag of interrupt */ + /* on CRC-error */ +#define NBSR_IO_CSR_to_ev(REG) \ + ((REG).fields.to_ev) /* flag of interrupt */ + /* on timeout */ +#define NBSR_IO_CSR_link_tu(REG) \ + ((REG).fields.link_tu) /* flag of trening */ + /* in progress */ +#define NBSR_IO_CSR_ch_on(REG) \ + ((REG).fields.ch_on) /* flag of chanel */ + /* is ready and online */ +#define NBSR_IO_CSR_reg(REG) \ + ((REG).word) +#define IO_IS_ON_IO_CSR 1 /* IO controller is ready */ + /* and online */ + +/* + * Node Configuration + */ +typedef unsigned int e90s_ncfg_t; /* single word (32 bits) */ +typedef struct e90s_ncfg_fields { + e90s_ncfg_t unused1 : 8; /* [31:24] */ + e90s_ncfg_t ApicIoPresentMask : 4; /* [23:20] */ + e90s_ncfg_t ApicNodePresentMask : 4; /* [19:16] */ + e90s_ncfg_t unused2 : 2; /* [15:14] */ + e90s_ncfg_t CoreCmpMode : 1; /* [13] */ + e90s_ncfg_t CohModeHb : 1; /* [12] */ + e90s_ncfg_t CoreHardMask : 4; /* [11:8] */ + e90s_ncfg_t IoLinkRdmaMode : 1; /* [7] */ + e90s_ncfg_t Bootstrap : 1; /* [6] */ + e90s_ncfg_t BootMode : 1; /* [5] */ + e90s_ncfg_t CohModeL2 : 1; /* [4] */ + e90s_ncfg_t CoreSoftMask : 4; /* [3:0] */ +} e90s_ncfg_fields_t; +typedef union e90s_ncfg_struct { /* Structure of word */ + e90s_ncfg_fields_t fields; /* as fields */ + e90s_ncfg_t word; /* as entier register */ +} e90s_ncfg_struct_t; +#define NBSR_NCFG_ApicIoPresentMask(REG) \ + ((REG).fields.ApicIoPresentMask) /* present IO */ + /* links mask */ +#define NBSR_NCFG_ApicNodePresentMask(REG) \ + ((REG).fields.ApicNodePresentMask) /* present */ + /* CPUS link */ + /* mask */ +#define NBSR_NCFG_CoreCmpMode(REG) \ + ((REG).fields.CoreCmpMode) /* core comparision */ + /* mode flag */ +#define NBSR_NCFG_CohModeHb(REG) \ + ((REG).fields.CohModeHb) /* IO coherent mode */ +#define NBSR_NCFG_CoreHardMask(REG) \ + ((REG).fields.CoreHardMask) /* present core */ + /* hardware mask */ +#define NBSR_NCFG_IoLinkRdmaMode(REG) \ + ((REG).fields.IoLinkRdmaMode) /* IO link is RDMA */ +#define NBSR_NCFG_Bootstrap(REG) \ + ((REG).fields.Bootstrap) /* bootstrap CPU */ +#define NBSR_NCFG_BootMode(REG) \ + ((REG).fields.BootMode) /* boot mode */ +#define NBSR_NCFG_CohModeL2(REG) \ + ((REG).fields.CohModeL2) /* L2 coherent mode */ +#define NBSR_NCFG_CoreSoftMask(REG) \ + ((REG).fields.CoreSoftMask) /* present core */ + /* software mask */ +#define NBSR_NCFG_reg(REG) \ + ((REG).word) + +#define IOHUB_IOL_MODE 0 /* controller is IO HUB */ +#define RDMA_IOL_MODE 1 /* controller is RDMA */ + +/* + * Node Configuration Information + */ +typedef unsigned int e90s_nc_info_t; /* single word (32 bits) */ +typedef struct e90s_nc_info_fields { + e90s_nc_info_t unused1 : 6; /* [31:26] */ + e90s_nc_info_t IoccLinkTu : 1; /* [25] */ + e90s_nc_info_t IoccLinkUp : 1; /* [24] */ + e90s_nc_info_t unused2 : 1; /* [23] */ + e90s_nc_info_t IoccLinkRtype : 7; /* [22:16] */ + e90s_nc_info_t unused3 : 8; /* [15:8] */ + e90s_nc_info_t ClkDiv : 8; /* [7:0] */ +} e90s_nc_info_fields_t; +typedef union e90s_nc_info_struct { /* Structure of word */ + e90s_nc_info_fields_t fields; /* as fields */ + e90s_nc_info_t word; /* as entier register */ +} e90s_nc_info_struct_t; +#define NBSR_NC_INFO_IoccLinkTu(REG) \ + ((REG).fields.IoccLinkTu) /* training flag */ +#define NBSR_NC_INFO_IoccLinkUp(REG) \ + ((REG).fields.IoccLinkUp) /* IO link UP */ +#define NBSR_NC_INFO_IoccLinkRtype(REG) \ + ((REG).fields.IoccLinkRtype) /* abonent type */ +#define NBSR_NC_INFO_ClkDiv(REG) \ + ((REG).fields.ClkDiv) +#define NBSR_NC_INFO_reg(REG) \ + ((REG).word) + +#define IOHUB_ONLY_IOL_ABTYPE 1 /* abonent has only IO HUB */ + /* controller */ +#define RDMA_ONLY_IOL_ABTYPE 2 /* abonent has only RDMA */ + /* controller */ +#define RDMA_IOHUB_IOL_ABTYPE 3 /* abonent has RDMA and */ + /* IO HUB controller */ + +/* + * RDMA controller vendor ID + */ +typedef unsigned int e90s_rdma_vid_t; /* single word (32 bits) */ +typedef struct e90s_rdma_vid_fields { + e90s_rdma_vid_t unused : 16; /* [31:16] */ + e90s_rdma_vid_t vid : 16; /* [15:0] */ +} e90s_rdma_vid_fields_t; +typedef union e90s_rdma_vid_struct { /* Structure of word */ + e90s_rdma_vid_fields_t fields; /* as fields */ + e90s_rdma_vid_t word; /* as entier register */ +} e90s_rdma_vid_struct_t; + +#define NBSR_RDMA_VID_vid(REG) \ + ((REG).fields.vid) /* vendor ID */ +#define NBSR_RDMA_VID_reg(REG) \ + ((REG).word) + +/* + * RDMA controller state register + */ +typedef unsigned int e90s_rdma_cs_t; /* single word (32 bits) */ +typedef struct e90s_rdma_cs_fields { + e90s_rdma_cs_t ch_on : 1; /* [31] */ + e90s_rdma_cs_t link_tu : 1; /* [30] */ + e90s_rdma_cs_t fch_on : 1; /* [29] */ + e90s_rdma_cs_t mow : 1; /* [28] */ + e90s_rdma_cs_t mor : 1; /* [27] */ + e90s_rdma_cs_t srst : 1; /* [26] */ + e90s_rdma_cs_t unused1 : 10; /* [25:16] */ + e90s_rdma_cs_t ptocl : 16; /* [15:0] */ +} e90s_rdma_cs_fields_t; +typedef union e90s_rdma_cs_struct { /* Structure of word */ + e90s_rdma_cs_fields_t fields; /* as fields */ + e90s_rdma_cs_t word; /* as entier register */ +} e90s_rdma_cs_struct_t; + +#define NBSR_RDMA_CS_ptocl(REG) \ + ((REG).fields.ptocl) /* timeout clock */ +#define NBSR_RDMA_CS_srst(REG) \ + ((REG).fields.srst) /* sofrware reset flag */ +#define NBSR_RDMA_CS_mor(REG) \ + ((REG).fields.mor) /* flag of not completed */ + /* readings */ +#define NBSR_RDMA_CS_mow(REG) \ + ((REG).fields.mow) /* flag of not completed */ + /* writings */ +#define NBSR_RDMA_CS_fch_on(REG) \ + ((REG).fields.fch_on) /* flag of chanel */ + /* forced set on */ +#define NBSR_RDMA_CS_link_tu(REG) \ + ((REG).fields.link_tu) /* flag of trenning */ + /* in progress */ +#define NBSR_RDMA_CS_ch_on(REG) \ + ((REG).fields.ch_on) /* flag of chanel */ + /* is ready and online */ +#define NBSR_RDMA_CS_reg(REG) \ + ((REG).word) + +#endif /* ! __ASSEMBLY__ */ + +#define nbsr_early_read(addr) __raw_readl((addr)) +#define nbsr_early_write(value, addr) __raw_writel((value), (addr)) + +#define nbsr_read(addr) __raw_readl((addr)) +#define nbsr_readll(addr) __raw_readll((addr)) +#define nbsr_readw(addr) __raw_readw((addr)) +#define nbsr_write(value, addr) __raw_writel((value), (addr)) +#define nbsr_write_relaxed(value, addr) __raw_writel((value), (addr)) +#define nbsr_writell(value, addr) __raw_writeq((value), (addr)) +#define nbsr_writew(value, addr) __raw_writew((value), (addr)) + +#include + +#endif /* _SPARC64_SIC_REGS_H_ */ diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index e75783b6abc4..26e81d6ec7a3 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -40,10 +40,13 @@ void scheduler_poke(void); void arch_send_call_function_single_ipi(int cpu); void arch_send_call_function_ipi_mask(const struct cpumask *mask); +#if defined(CONFIG_E90S) && defined(CONFIG_SMP) +extern void smp_show_backtrace_all_cpus(void); +#endif + /* * General functions that each host system must provide. */ - int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index e9b7d25b29fa..653b77159648 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h @@ -38,7 +38,11 @@ #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1) #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1) +#ifdef CONFIG_E90S +#define L1DCACHE_SIZE 0x8000 +#else #define L1DCACHE_SIZE 0x4000 +#endif #define SUN4V_CHIP_INVALID 0x00 #define SUN4V_CHIP_NIAGARA1 0x01 diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index b1d4e2e3210f..422200f9f82d 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -9,6 +9,35 @@ do { \ flushw_all(); \ } while (0) +#ifdef CONFIG_E90S +#define PERFCTR_END() do { \ + if (test_thread_flag(TIF_PERFCTR)) { \ + if (!test_thread_flag(TIF_FIRST_READ_PIC)) \ + read_and_stop_perfctrs(current_thread_info()->kernel_cnt);\ + else { \ + clear_thread_flag(TIF_FIRST_READ_PIC); \ + } \ +}} while(0) +#define PERFCTR_BEGIN() do { \ + if (test_thread_flag(TIF_PERFCTR)) { \ + clear_thread_flag(TIF_FIRST_READ_PIC); \ + write_perfctrs(current_thread_info()->pcr_regs, \ + current_thread_info()->kernel_cnt); \ + } else { \ + wr_pcr(0); \ + } \ +} while(0) + +#endif /* CONFIG_E90S */ + +#ifdef CONFIG_PREEMPTION +void check_lazy_mmu_end(void); +void check_lazy_mmu_begin(void); +#else /*!CONFIG_PREEMPTION*/ +static inline void check_lazy_mmu_end(void) {} +static inline void check_lazy_mmu_begin(void) {} +#endif /*CONFIG_PREEMPTION*/ + /* See what happens when you design the chip correctly? * * We tell gcc we clobber all non-fixed-usage registers except @@ -19,7 +48,9 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { save_and_clear_fpu(); \ +do { check_lazy_mmu_end(); \ + PERFCTR_END(); \ + save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ @@ -64,6 +95,8 @@ do { save_and_clear_fpu(); \ "l1", "l2", "l3", "l4", "l5", "l6", "l7", \ "i0", "i1", "i2", "i3", "i4", "i5", \ "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ + check_lazy_mmu_begin(); \ + PERFCTR_BEGIN(); \ } while(0) void synchronize_user_stack(void); diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 548b366165dd..8861454338ac 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -49,6 +49,9 @@ struct thread_info { struct reg_window32 reg_window[NSWINS]; /* align for ldd! */ unsigned long rwbuf_stkptrs[NSWINS]; unsigned long w_saved; +#ifdef CONFIG_MCST + long long irq_enter_clk; +#endif }; /* diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 20255471e653..7b63de366769 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -30,6 +30,9 @@ #include #include +#ifdef CONFIG_E90S +#include +#endif /* CONFIG_E90S */ struct task_struct; @@ -60,6 +63,17 @@ struct thread_info { struct pt_regs *kern_una_regs; unsigned int kern_una_insn; + int preempt_lazy_count; /* 0 => lazy preemptable + <0 => BUG */ +#ifdef CONFIG_MCST + unsigned int epic_core_priority; + long long irq_enter_clk; +#endif +#ifdef CONFIG_E90S + __u64 kernel_cnt[E90S_PIC_NR]; + __u64 pcr_regs[E90S_PIC_NR]; + +#endif /* CONFIG_E90S */ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)] __attribute__ ((aligned(64))); }; @@ -90,7 +104,12 @@ struct thread_info { #define TI_XFSR 0x00000430 #define TI_KUNA_REGS 0x00000468 #define TI_KUNA_INSN 0x00000470 -#define TI_FPREGS 0x00000480 +#define TI_LAZY_COUNT 0x00000474 +#ifndef CONFIG_E90S +#define TI_FPREGS 0x000004c0 +#else /* CONFIG_E90S */ +#define TI_FPREGS 0x00000500 +#endif /* CONFIG_E90S */ /* We embed this in the uppermost byte of thread_info->flags */ #define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */ @@ -113,12 +132,24 @@ struct thread_info { */ #ifndef __ASSEMBLY__ +#ifdef CONFIG_MCST #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .current_ds = ASI_P, \ .preempt_count = INIT_PREEMPT_COUNT, \ + .preempt_lazy_count = 0, \ + .status = TS_UNALIGN_SIGBUS, \ } +#else /* !CONFIG_MCST */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .current_ds = ASI_P, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .preempt_lazy_count = 0, \ +} +#endif /* how to get the thread information struct from C */ #ifndef BUILD_VDSO @@ -180,7 +211,11 @@ extern struct thread_info *current_thread_info(void); #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#ifndef CONFIG_E90S /* flag bit 4 is available */ +#else /* CONFIG_E90S */ +#define TIF_PERFCTR 4 /* performance counters active */ +#endif /* CONFIG_E90S */ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ #define TIF_UPROBE 6 /* breakpointed or singlestepped */ #define TIF_32BIT 7 /* 32-bit binary */ @@ -192,9 +227,17 @@ extern struct thread_info *current_thread_info(void); * in using in assembly, else we can't use the mask as * an immediate value in instructions such as andcc. */ -#define TIF_MCDPER 12 /* Precise MCD exception */ +#ifndef CONFIG_E90S +/* flag bit 12 is available */ +#else /* CONFIG_E90S */ +#define TIF_FIRST_READ_PIC 12 /* first read pic counters */ +#endif /* CONFIG_E90S */ #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 14 +#define TIF_NEED_RESCHED_LAZY 15 /* lazy rescheduling necessary */ +#ifdef CONFIG_MCST +#define TIF_NAPI_WORK 31 /* napi_wq_worker() is running MCST addition */ +#endif #define _TIF_SYSCALL_TRACE (1< #else diff --git a/arch/sparc/include/asm/timex_64.h b/arch/sparc/include/asm/timex_64.h index 076c44f6845d..d2a71df7fd8e 100644 --- a/arch/sparc/include/asm/timex_64.h +++ b/arch/sparc/include/asm/timex_64.h @@ -8,7 +8,43 @@ #define _ASMsparc64_TIMEX_H #include +#include +#ifdef CONFIG_E90S + +#define CLOCK_TICK_RATE 10000000 +/* Getting on the cycle counter on sparc64. */ +typedef unsigned long cycles_t; +#define ARCH_HAS_READ_CURRENT_TIMER +static inline cycles_t get_cycles (void) +{ + unsigned long ret; + __asm__ __volatile__("rd %%stick, %0" + : "=r" (ret)); + return ret; + +} + +extern u32 cpu_freq_hz; +#define UNSET_CPU_FREQ ((u32)(-1)) +static inline cycles_t get_cycles_rate(void) +{ + return (cycles_t)local_cpu_data().clock_tick; +} +static inline long long cycles_2nsec(long long cycles) +{ + return cycles * 1000 / (local_cpu_data().clock_tick / 1000000); +} +static inline long long cycles_2usec(long long cycles) +{ + return cycles * 1000 / (local_cpu_data().clock_tick / 1000); +} +static inline cycles_t usecs_2cycles(long long usecs) +{ + return usecs * (local_cpu_data().clock_tick / 1000) / 1000; +} + +#else /*CONFIG_E90S*/ #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ /* Getting on the cycle counter on sparc64. */ @@ -16,5 +52,6 @@ typedef unsigned long cycles_t; #define get_cycles() tick_ops->get_tick() #define ARCH_HAS_READ_CURRENT_TIMER +#endif /*CONFIG_E90S*/ #endif diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 8b8cdaa69272..848105f42ab8 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -38,13 +38,16 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, } void flush_tlb_kernel_range(unsigned long start, unsigned long end); +void flush_tlb_pending(void); +#ifndef CONFIG_MCST_RT #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -void flush_tlb_pending(void); void arch_enter_lazy_mmu_mode(void); void arch_leave_lazy_mmu_mode(void); #define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + /* Local cpu only. */ void __flush_tlb_all(void); diff --git a/arch/sparc/include/asm/topology.h b/arch/sparc/include/asm/topology.h index ba7b9d9d91cf..b648f846fc47 100644 --- a/arch/sparc/include/asm/topology.h +++ b/arch/sparc/include/asm/topology.h @@ -2,7 +2,11 @@ #ifndef ___ASM_SPARC_TOPOLOGY_H #define ___ASM_SPARC_TOPOLOGY_H #if defined(__sparc__) && defined(__arch64__) +#ifdef CONFIG_E90S +#include +#else #include +#endif #else #include #endif diff --git a/arch/sparc/include/asm/topology_e90s.h b/arch/sparc/include/asm/topology_e90s.h new file mode 100644 index 000000000000..0fdf1df0e721 --- /dev/null +++ b/arch/sparc/include/asm/topology_e90s.h @@ -0,0 +1,137 @@ +#ifndef _ASM_SPARC64_TOPOLOGY_E90S_H +#define _ASM_SPARC64_TOPOLOGY_E90S_H + +#include +#include +#include +/* + * IO links/controllers/buses topology: + * each node of e90s machines can have from 1 to MAX_NODE_IOLINKS IO links + * which can be connected to IOHUB or RDMA + */ +#define MAX_NODE_IOLINKS MACH_NODE_NUMIOLINKS +/* + * IOLINK can be represented by global domain number (unique at system and + * corresponds to bit number at iolinkmask_t bit map structure) + * and as pair: node # and local link number on the node. + * It needs convert from one presentation to other + * Supporing of many IOHUBs not yet implemented on sprac arch + */ + +#define node_iolink_to_domain(node, link) \ + ((node) * (MACH_NODE_NUMIOLINKS) + (link)) +#define node_iohub_to_domain(node, link) node_iolink_to_domain((node), (link)) +#define node_rdma_to_domain(node, link) node_iolink_to_domain((node), (link)) +#define iolink_domain_to_node(domain) \ + ((domain) / (MACH_NODE_NUMIOLINKS)) +#define iolink_domain_to_link(domain) \ + ((domain) % (MACH_NODE_NUMIOLINKS)) +#define iohub_domain_to_node(domain) iolink_domain_to_node(domain) +#define iohub_domain_to_link(domain) iolink_domain_to_link(domain) +#define rdma_domain_to_node(domain) iolink_domain_to_node(domain) +#define rdma_domain_to_link(domain) iolink_domain_to_link(domain) + +#define pcibus_to_link(bus) __pcibus_to_link(bus) + +#define mach_early_iohub_online(node, link) \ + e90s_early_iohub_online((node), (link)) + + +#define __node_to_cpumask_and(node, cpu_mask) \ +({ \ + cpumask_t cpumask = *cpumask_of_node(node); \ + cpumask_and(&cpumask, &cpumask, &cpu_mask); \ + cpumask; \ +}) + +#define node_to_cpumask(node) \ + __node_to_cpumask_and(node, *cpu_online_mask) +#define node_to_present_cpumask(node) \ + __node_to_cpumask_and(node, *cpu_present_mask) + +#define __node_to_first_cpu(node, cpu_mask) \ +({ \ + cpumask_t node_cpumask; \ + node_cpumask = __node_to_cpumask_and(node, cpu_mask); \ + cpumask_first((const struct cpumask *)&node_cpumask); \ +}) + +#define node_to_first_cpu(node) \ + __node_to_first_cpu(node, *cpu_online_mask) +#define node_to_first_present_cpu(node) \ + __node_to_first_cpu(node, *cpu_present_mask) + +#ifdef CONFIG_NUMA + +#include + +static inline int cpu_to_node(int cpu) +{ + return numa_cpu_lookup_table[cpu]; +} + +#define parent_node(node) (node) + +#define cpumask_of_node(node) ((node) == -1 ? \ + cpu_all_mask : \ + &numa_cpumask_lookup_table[node]) + +struct pci_bus; +#ifdef CONFIG_PCI +extern int pcibus_to_node(struct pci_bus *pbus); +#else +static inline int pcibus_to_node(struct pci_bus *pbus) +{ + return -1; +} +#endif + +#define cpumask_of_pcibus(bus) \ + (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) + +#define SD_NODE_INIT (struct sched_domain) { \ + .min_interval = 8, \ + .max_interval = 32, \ + .busy_factor = 32, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 2, \ + .busy_idx = 3, \ + .idle_idx = 2, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + .flags = SD_LOAD_BALANCE \ + | SD_BALANCE_FORK \ + | SD_BALANCE_EXEC \ + | SD_SERIALIZE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ +} + +#else /* CONFIG_NUMA */ + +#include + +#endif /* !(CONFIG_NUMA) */ + +#ifdef CONFIG_SMP +#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) +#define topology_core_id(cpu) (cpu_data(cpu).core_id) +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) +#define mc_capable() (sparc64_multi_core) +#define smt_capable() (sparc64_multi_core) +#endif /* CONFIG_SMP */ + +static inline void arch_fix_phys_package_id(int num, u32 slot) +{ +} + +extern cpumask_t cpu_core_map[NR_CPUS]; +static inline const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_core_map[cpu]; +} + +#endif /* _ASM_SPARC64_TOPOLOGY_E90S_H */ diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index 0f6d0c4f6683..460f85cc72b5 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -61,10 +61,14 @@ unsigned long real_hard_smp_processor_id(void); struct cpuid_patch_entry { unsigned int addr; +#ifdef CONFIG_E90S + unsigned int r2000p[3]; +#else unsigned int cheetah_safari[4]; unsigned int cheetah_jbus[4]; unsigned int starfire[4]; unsigned int sun4v[4]; +#endif }; extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; @@ -115,6 +119,27 @@ extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch, #include + +#ifdef CONFIG_E90S +#ifdef __KERNEL__ +#include +#endif + +#define __GET_CPUID(REG) \ + /* r1000 implementation (default). */ \ +661: mov APIC_LVT0, REG; \ + lduwa [REG] ASI_LAPIC, REG; \ + and REG, APIC_VECTOR_MASK, REG; \ + .section .cpuid_patch, "ax"; \ + /* Instruction location. */ \ + .word 661b; \ + /* r2000p implementation. */ \ + set CEPIC_ID, REG; \ + lduwa [REG] ASI_EPIC, REG; \ + nop; \ + .previous; + +#else /*CONFIG_E90S*/ #define __GET_CPUID(REG) \ /* Spitfire implementation (default). */ \ 661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ @@ -145,6 +170,7 @@ extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch, nop; \ nop; \ .previous; +#endif /*CONFIG_E90S*/ #ifdef CONFIG_SMP diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 522a677e050d..9fc711f3cdc4 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -51,6 +51,18 @@ #define TSB_TAG_INVALID_BIT 46 #define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32)) +#ifdef CONFIG_RMO +#define TSB_MEMBAR membar #StoreStore +#else /* CONFIG_RMO */ +#define TSB_MEMBAR +#endif /* CONFIG_RMO */ + +#ifdef CONFIG_RMO +#define TSB_MEMBAR membar #StoreStore +#else /* CONFIG_RMO */ +#define TSB_MEMBAR +#endif /* CONFIG_RMO */ + /* Some cpus support physical address quad loads. We want to use * those if possible so we don't need to hard-lock the TSB mapping * into the TLB. We encode some instruction patching in order to @@ -127,11 +139,13 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; cmp REG1, REG2; \ bne,pn %icc, 99b; \ nop; \ + TSB_MEMBAR #define TSB_WRITE(TSB, TTE, TAG) \ add TSB, 0x8, TSB; \ TSB_STORE(TSB, TTE); \ sub TSB, 0x8, TSB; \ + TSB_MEMBAR; \ TSB_STORE(TSB, TAG); /* Do a kernel page table walk. Leaves valid PTE value in @@ -151,7 +165,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; * bit 23, for 8MB per PMD) we must propagate bit 22 for a * 4MB huge page. For huge PUDs (which fall on bit 33, for * 8GB per PUD), we have to accommodate 256MB and 2GB huge - * pages. So for those we propagate bits 32 to 28. + * pages. So for those we propagate bits 32 to 22. */ #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ sethi %hi(swapper_pg_dir), REG1; \ @@ -170,7 +184,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; brz,pn REG1, FAIL_LABEL; \ sllx REG2, 32, REG2; \ andcc REG1, REG2, %g0; \ - sethi %hi(0xf8000000), REG2; \ + sethi %hi(0xffe00000), REG2; \ bne,pt %xcc, 697f; \ sllx REG2, 1, REG2; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index bf9d330073b2..2383267a1407 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -8,8 +8,10 @@ #include #include +#include #include #include +#include #include #include @@ -176,18 +178,34 @@ __asm__ __volatile__( \ int __get_user_bad(void); unsigned long __must_check raw_copy_from_user(void *to, - const void __user *from, - unsigned long size); + const void __user *from, + unsigned long size); +#ifdef CONFIG_MCST +unsigned long __must_check _raw_copy_to_user(void __user *to, + const void *from, + unsigned long size); + +unsigned long mcst_copy_to_user(char __user *to, + void *from, + unsigned long size); + +static inline unsigned long __must_check raw_copy_to_user(void __user *to, + const void *from, + unsigned long size){ + return mcst_copy_to_user(to, (void *)from, size); +} +#else unsigned long __must_check raw_copy_to_user(void __user *to, - const void *from, - unsigned long size); + const void *from, + unsigned long size); +#endif /* CONFIG_MCST */ #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER unsigned long __must_check raw_copy_in_user(void __user *to, - const void __user *from, - unsigned long size); + const void __user *from, + unsigned long size); unsigned long __must_check __clear_user(void __user *, unsigned long); @@ -196,8 +214,9 @@ unsigned long __must_check __clear_user(void __user *, unsigned long); __must_check long strnlen_user(const char __user *str, long n); struct pt_regs; -unsigned long compute_effective_address(struct pt_regs *, +int compute_effective_address(struct pt_regs *, unsigned int insn, - unsigned int rd); + unsigned int rd, + unsigned long *addr); #endif /* _ASM_UACCESS_H */ diff --git a/arch/sparc/include/uapi/asm/asi.h b/arch/sparc/include/uapi/asm/asi.h index fbb30a5b082f..1f590962b20e 100644 --- a/arch/sparc/include/uapi/asm/asi.h +++ b/arch/sparc/include/uapi/asm/asi.h @@ -224,6 +224,11 @@ #define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag */ #define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag */ #define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram */ +#ifdef CONFIG_E90S +#define ASI_LAPIC 0x68 +#define ASI_EPIC ASI_LAPIC +#define ASI_CONFIG 0x69 /* (e90s) access to local resources*/ +#endif /*CONFIG_E90S*/ #define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag */ #define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag */ #define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag*/ diff --git a/arch/sparc/include/uapi/asm/ioctls.h b/arch/sparc/include/uapi/asm/ioctls.h index 7fd2f5873c9e..9aa1d2c40297 100644 --- a/arch/sparc/include/uapi/asm/ioctls.h +++ b/arch/sparc/include/uapi/asm/ioctls.h @@ -124,6 +124,9 @@ #define TIOCSERSETMULTI 0x545B /* Set multiport config */ #define TIOCMIWAIT 0x545C /* Wait for change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* Read serial port inline interrupt counts */ +#ifdef CONFIG_MCST +#define TIODUMPREGS 0x54F6 /* return array of dev regs */ +#endif /* Kernel definitions */ diff --git a/arch/sparc/include/uapi/asm/mman.h b/arch/sparc/include/uapi/asm/mman.h index cec9f4109687..e4d468267712 100644 --- a/arch/sparc/include/uapi/asm/mman.h +++ b/arch/sparc/include/uapi/asm/mman.h @@ -4,6 +4,8 @@ #include +#define PROT_INVEND 0x00010000 /* Invert Endianness */ + /* SunOS'ified... */ #define PROT_ADI 0x10 /* ADI enabled */ diff --git a/arch/sparc/include/uapi/asm/perfctr.h b/arch/sparc/include/uapi/asm/perfctr.h index 316b837bcb8f..99e3cb2ac593 100644 --- a/arch/sparc/include/uapi/asm/perfctr.h +++ b/arch/sparc/include/uapi/asm/perfctr.h @@ -52,7 +52,32 @@ enum perfctr_opcode { /* Store in pointer given in ARG0 the current PCR register value * being used. */ - PERFCTR_GETPCR + PERFCTR_GETPCR, +#ifdef CONFIG_E90S + /* Requests for E90S. Initial requests disabled. + * Signal SIGTRAP is sent in case of process performance counter overflow + */ + + /* Write to perfctr and pic registers. + * ARG0 - pid of task to profile. + * ARG1 - pointer to initial value set of 4 64-bit counters. + * ARG2 - pointer to set of 4 64-bit PCR register values. + */ + E90S_PERFCTR_WRITE_AND_ON, + + /* Disable performance counter + * ARG0 - pid of task to profile. + */ + E90S_PERFCTR_OFF, + + /* Read from perfctr and pic registers. + * ARG0 - pid of task to profile. + * ARG1 - pointer to initial value set of 4 64-bit counters. + * ARG2 - pointer to set of 4 64-bit PCR register values. + * ARG1 and/or ARG2 can be empty (set to NULL). + */ + E90S_PERFCTR_READ, +#endif /* CONFIG_E90S */ }; #define PRIV 0x00000001 diff --git a/arch/sparc/include/uapi/asm/ptrace.h b/arch/sparc/include/uapi/asm/ptrace.h index abe640037a55..7ba76c861470 100644 --- a/arch/sparc/include/uapi/asm/ptrace.h +++ b/arch/sparc/include/uapi/asm/ptrace.h @@ -329,11 +329,20 @@ struct sparc_stackf { /* Stuff for the ptrace system call */ #define PTRACE_SPARC_DETACH 11 +#ifdef CONFIG_MCST /* bug 135261 */ + /* do not pollute user space, define as glibc does */ +#undef PTRACE_DETACH +#define PTRACE_DETACH PTRACE_SPARC_DETACH +#endif #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 +#ifdef CONFIG_MCST /* 16 used in linux/ptrace.h, define as glibc does */ +#define PTRACE_READDATA PTRACE_READTEXT +#else #define PTRACE_READDATA 16 +#endif #define PTRACE_WRITEDATA 17 #define PTRACE_READTEXT 18 #define PTRACE_WRITETEXT 19 diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index 7f5d773b8cfc..d7f136cfa8cf 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -30,4 +30,4 @@ /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 -#endif /* _UAPI_SPARC_UNISTD_H */ +#endif /* _UAPI_SPARC_UNISTD_H */ \ No newline at end of file diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 97c0e19263d1..c00d1e688c5f 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -5,7 +5,7 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror extra-y := head_$(BITS).o @@ -29,7 +29,14 @@ obj-$(CONFIG_SPARC32) += rtrap_32.o obj-y += traps_$(BITS).o # IRQ +ifdef CONFIG_E90S +obj-y += irq_e90s.o +obj-y += e90s.o +obj-y += e90s_sic.o +obj-y += procpfreg_e90s.o +else obj-y += irq_$(BITS).o +endif obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4d_irq.o obj-y += process_$(BITS).o @@ -37,26 +44,33 @@ obj-y += signal_$(BITS).o obj-y += sigutil_$(BITS).o obj-$(CONFIG_SPARC32) += ioport.o obj-y += setup_$(BITS).o -obj-y += idprom.o obj-y += sys_sparc_$(BITS).o obj-$(CONFIG_SPARC32) += systbls_32.o +ifdef CONFIG_E90S +obj-y += time_e90s.o +else obj-y += time_$(BITS).o +obj-y += idprom.o +endif obj-$(CONFIG_SPARC32) += windows.o obj-y += cpu.o -obj-$(CONFIG_SPARC64) += vdso.o +obj-$(CONFIG_GENERIC_TIME_VSYSCALL) += vdso.o obj-$(CONFIG_SPARC32) += devices.o obj-y += ptrace_$(BITS).o obj-y += unaligned_$(BITS).o obj-y += una_asm_$(BITS).o +ifndef CONFIG_E90S +obj-$(CONFIG_OF) += of_device_common.o +obj-$(CONFIG_OF) += of_device_$(BITS).o obj-y += prom_common.o obj-y += prom_$(BITS).o -obj-y += of_device_common.o -obj-y += of_device_$(BITS).o obj-$(CONFIG_SPARC64) += prom_irqtrans.o +endif obj-$(CONFIG_SPARC32) += leon_kernel.o obj-$(CONFIG_SPARC32) += leon_pmc.o +ifndef CONFIG_E90S obj-$(CONFIG_SPARC64) += reboot.o obj-$(CONFIG_SPARC64) += sysfs.o obj-$(CONFIG_SPARC64) += iommu.o iommu-common.o @@ -69,21 +83,27 @@ obj-$(CONFIG_SPARC64) += visemul.o obj-$(CONFIG_SPARC64) += hvapi.o obj-$(CONFIG_SPARC64) += sstate.o obj-$(CONFIG_SPARC64) += mdesc.o -obj-$(CONFIG_SPARC64) += adi_64.o obj-$(CONFIG_SPARC64) += pcr.o obj-$(CONFIG_SPARC64) += nmi.o obj-$(CONFIG_SPARC64_SMP) += cpumap.o +obj-y += dma.o +endif + obj-$(CONFIG_PCIC_PCI) += pcic.o obj-$(CONFIG_LEON_PCI) += leon_pci.o obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o obj-$(CONFIG_SPARC_GRPCI1)+= leon_pci_grpci1.o +ifdef CONFIG_E90S +obj-$(CONFIG_SMP) += trampoline_e90s.o smp_e90s.o +else obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o +obj-y += auxio_$(BITS).o +endif obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o obj-$(CONFIG_SPARC64_SMP) += hvtramp.o -obj-y += auxio_$(BITS).o obj-$(CONFIG_SUN_PM) += apc.o pmc.o obj-$(CONFIG_MODULES) += module.o @@ -101,7 +121,11 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o +ifdef CONFIG_E90S +obj-$(CONFIG_PCI) += pci_e90s.o +else obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o +endif obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o @@ -114,8 +138,15 @@ obj-$(CONFIG_AUDIT) += audit.o audit--$(CONFIG_AUDIT) := compat_audit.o obj-$(CONFIG_COMPAT) += $(audit--y) +ifdef CONFIG_E90S +pc--$(CONFIG_PERF_EVENTS) := perf_event_e90s.o +else pc--$(CONFIG_PERF_EVENTS) := perf_event.o +endif + obj-$(CONFIG_SPARC64) += $(pc--y) obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o + +obj-$(CONFIG_OF) += devtree.o diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 4401dee30018..7cdf87229158 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -21,6 +21,11 @@ #include #include #include +#ifdef CONFIG_E90S +#define NEEDS_GET_DCR +#include +#endif + #include "kernel.h" #include "entry.h" @@ -369,12 +374,26 @@ unsigned int icache_parity_tl1_occurred; static int show_cpuinfo(struct seq_file *m, void *__unused) { +#ifdef CONFIG_E90S + unsigned long ver; + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); +#endif + seq_printf(m, +#ifdef CONFIG_E90S + "vendor_id\t: %s\n" + "model name\t: %s\n" + "cpu\t\t: %s\n" + "revision\t: %lu\n" + "dcr\t\t: 0x%lx\n" + "type\t\t: e90s\n" +#else "cpu\t\t: %s\n" "fpu\t\t: %s\n" "pmu\t\t: %s\n" "prom\t\t: %s\n" "type\t\t: %s\n" +#endif "ncpus probed\t: %d\n" "ncpus active\t: %d\n" "D$ parity tl1\t: %u\n" @@ -383,6 +402,13 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) "Cpu0ClkTck\t: %016lx\n" #endif , +#ifdef CONFIG_E90S + mcst_mb_name, + GET_CPU_TYPE_NAME(bootblock->info.bios.cpu_type), + GET_CPU_TYPE_NAME(bootblock->info.bios.cpu_type), + (ver >> 24) & 0xff, + get_dcr(), +#else sparc_cpu_type, sparc_fpu_type, sparc_pmu_type, @@ -390,6 +416,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) ((tlb_type == hypervisor) ? "sun4v" : "sun4u"), +#endif ncpus_probed, num_online_cpus(), dcache_parity_tl1_occurred, @@ -526,8 +553,10 @@ static void __init sun4v_cpu_probe(void) break; default: +#ifndef CONFIG_E90S printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", prom_cpu_compatible); +#endif sparc_cpu_type = "Unknown SUN4V CPU"; sparc_fpu_type = "Unknown SUN4V FPU"; sparc_pmu_type = "Unknown SUN4V PMU"; diff --git a/arch/sparc/kernel/devtree.c b/arch/sparc/kernel/devtree.c new file mode 100644 index 000000000000..eef3927f4885 --- /dev/null +++ b/arch/sparc/kernel/devtree.c @@ -0,0 +1,122 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int devtree_detected = 1; + +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + memblock_add(base, size); +} + +void __init *early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + return memblock_alloc(size, align); +} + +/* + * This function will create device nodes in sysfs coresponding to nodes + * described in dtb. + */ +int __init sparc_publish_devices(void) +{ + if (!of_have_populated_dt()) { + return 0; + } + return of_platform_populate(NULL, of_default_bus_match_table, + NULL, NULL); +} +device_initcall(sparc_publish_devices); + +static u64 get_device_tree_addr(void) +{ + /* + * Atention! Hack! + * Old versions of boot (2019 year include), have a bug. + * Boot writes virtual memory address in ....bios.devtree field. + * But we need phys memory address. + * Boot-writers claim that phys address high bits + * are same as in ....kernel_base field. + * So, for fix it, we need to change this high bytes. + */ + if ((bootblock->info.bios.devtree & 0xf0000000) == + (bootblock->info.kernel_base & 0xf0000000)) + return bootblock->info.bios.devtree; + return (bootblock->info.bios.devtree & 0xffffff) | 0x81000000; +} + +u32 get_dtb_size(void) +{ + u32 *blob_addr = (u32 *)get_device_tree_addr(); + u32 dtb_data = __raw_readl(blob_addr); + u32 magic = OF_DT_HEADER; + + /* left for possible debugging */ + /* + printk(KERN_ERR "DevTree: %llx ==> %llx\n", + bootblock->info.bios.devtree, get_device_tree_addr()); + printk(KERN_ERR "DevTree[0] DevTree[1]: %x %x\n", + __raw_readl(blob_addr), __raw_readl(blob_addr + 1)); + printk(KERN_ERR "Kern phys: 0x%llx\n", bootblock->info.kernel_base); + printk(KERN_ERR "BootLog addr: %x\n", + bootblock->info.bios.bootlog_addr); + */ + if (be32_to_cpu(dtb_data) != magic) { + printk(KERN_ERR "DevTree: disabled (incorrect magic): %x\n", + dtb_data); + return 0; + } + dtb_data = __raw_readl(blob_addr+1); + + return __be32_to_cpu(dtb_data); +} + +void get_dtb_from_boot(u8 *blob, u32 len) +{ + u32 *blob_addr = (u32 *)get_device_tree_addr(); + u8 *dtb_ptr = (u8 *)blob_addr; + int i; + + for (i = 0; i < len; i++) { + u8 dt = __raw_readb(dtb_ptr); + blob[i] = dt; + dtb_ptr++; + } + + return; +} + +int __init device_tree_init(void) +{ + u8 *dt; +#ifdef CONFIG_DTB_L_TEST + initial_boot_params = (struct boot_param_header *)test_blob; +#else + u32 sz = get_dtb_size(); + if (sz == 0) { + printk(KERN_ERR "DevTree: device tree size is 0\n"); + devtree_detected = 0; + return -1; + } else { + printk(KERN_INFO "DevTree: device tree size is %d\n", sz); + } + + dt = memblock_alloc(sz, SMP_CACHE_BYTES); + if (dt == NULL) { + printk(KERN_ERR "DevTree: not enough memory\n"); + devtree_detected = 0; + return -2; + } + + get_dtb_from_boot(dt, sz); + + initial_boot_params = (struct boot_param_header *)dt; +#endif + unflatten_device_tree(); + return 0; +} diff --git a/arch/sparc/kernel/e90s.c b/arch/sparc/kernel/e90s.c new file mode 100644 index 000000000000..981bdb77ab15 --- /dev/null +++ b/arch/sparc/kernel/e90s.c @@ -0,0 +1,166 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int this_is_starfire = 0; +int scons_pwroff = 1; +int sun4v_chip_type = SUN4V_CHIP_INVALID; +EXPORT_SYMBOL(sun4v_chip_type); + +machdep_t machine; +EXPORT_SYMBOL(machine); + +static char command_line[COMMAND_LINE_SIZE]; +struct adi_config adi_state; +EXPORT_SYMBOL(adi_state); + +char * __init prom_getbootargs(void) +{ +#ifdef CONFIG_CMDLINE_BOOL + return CONFIG_CMDLINE; +#endif + if (bootblock != NULL && + bootblock->info.kernel_args_string != NULL) { + + if (strncmp(bootblock->info.kernel_args_string, + KERNEL_ARGS_STRING_EX_SIGNATURE, + KERNEL_ARGS_STRING_EX_SIGN_SIZE)) { + strncpy(command_line, bootblock->info.kernel_args_string, + KSTRMAX_SIZE); + } else { // long cmd line 512 b + strncpy(command_line, bootblock->info.bios.kernel_args_string_ex, + KSTRMAX_SIZE_EX); + } + command_line[COMMAND_LINE_SIZE-1] = '\0'; /* for safety */ + prom_printf("prom_getbootargs() command line 0x%p : %s\n", + command_line, command_line); + return (command_line); + } + +#ifdef CONFIG_PCI +#ifdef CONFIG_CMDLINE_BOOL + return CONFIG_CMDLINE; +#else + return "root=/dev/hda1 console=ttyS0"; +#endif +#else + return "root=/dev/nfs ip=bootp lpj=1000000000"; +#endif + return NULL; +} + +void machine_restart(char *cmd) +{ + if (machine.arch_reset != NULL) + machine.arch_reset(cmd); +} + +void machine_halt(void) +{ + if (machine.arch_halt != NULL) + machine.arch_halt(); +} + +void machine_power_off(void) +{ + machine_halt(); +} + +void prom_halt(void) +{ + panic(__func__); + for(;;); +} + +/* This isn't actually used, it exists merely to satisfy the + * reference in kernel/sys.c + */ +void (*pm_power_off)(void) = machine_power_off; +EXPORT_SYMBOL(pm_power_off); + + +#define L2_CTRL ((2UL << 32) | (0 << 8)) +# define CMD_FLUSH_L2 (1 << 8) + +static void e90s_local_flush_l2_cache(void) +{ + u64 v = readq_asi(L2_CTRL, ASI_CONFIG); + writeq_asi(v | CMD_FLUSH_L2, L2_CTRL, ASI_CONFIG); +} + +void e90s_flush_l2_cache(void) +{ + if (e90s_get_cpu_type() < E90S_CPU_R2000) /* unsupported */ + return; + WARN_ONCE(e90s_get_cpu_type() != E90S_CPU_R2000P, + "FIXME: add smp flush_cache_all()\n"); + e90s_local_flush_l2_cache(); +} +EXPORT_SYMBOL(e90s_flush_l2_cache); + +u8 inb(unsigned long addr) +{ + /*we have to OR BASE_PCIIO because vga drivers ignore it*/ + return readb((void __iomem *)(addr | BASE_PCIIO)); +} +EXPORT_SYMBOL(inb); + +u16 inw(unsigned long addr) +{ + return readw((void __iomem *)(addr | BASE_PCIIO)); +} +EXPORT_SYMBOL(inw); + +u32 inl(unsigned long addr) +{ + return readl((void __iomem *)(addr | BASE_PCIIO)); +} +EXPORT_SYMBOL(inl); + +void outb(u8 b, unsigned long addr) +{ + writeb(b, (void __iomem *)(addr | BASE_PCIIO)); +} +EXPORT_SYMBOL(outb); + +void outw(u16 w, unsigned long addr) +{ + writew(w, (void __iomem *)(addr | BASE_PCIIO)); +} +EXPORT_SYMBOL(outw); + +void outl(u32 l, unsigned long addr) +{ + writel(l, (void __iomem *)(addr | BASE_PCIIO)); +} +EXPORT_SYMBOL(outl); + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static int __init topology_init(void) +{ + int i; + for_each_online_node(i) + register_one_node(i); + for_each_present_cpu(i) + register_cpu(&per_cpu(cpu_devices, i), i); + + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/sparc/kernel/e90s_sic.c b/arch/sparc/kernel/e90s_sic.c new file mode 100644 index 000000000000..e1a031db609b --- /dev/null +++ b/arch/sparc/kernel/e90s_sic.c @@ -0,0 +1,272 @@ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_SIC_MODE +#undef DebugSIC +#define DEBUG_SIC_MODE 0 /* SIC mapping & init */ +#define DebugSIC(fmt, args...) \ + ({ if (DEBUG_SIC_MODE) \ + pr_info(fmt, ##args); }) + +#ifndef CONFIG_IOHUB_DOMAINS +/* + * IO Links of all nodes configuration + */ +#undef iolinks_num +#undef iolink_iohub_num + +int iolinks_num = 0; +iolinkmask_t iolink_iohub_map = IOLINK_MASK_NONE; +iolinkmask_t iolink_online_iohub_map = IOLINK_MASK_NONE; +int iolink_iohub_num = 0; +int iolink_online_iohub_num = 0; +iolinkmask_t iolink_rdma_map = IOLINK_MASK_NONE; +iolinkmask_t iolink_online_rdma_map = IOLINK_MASK_NONE; +int iolink_rdma_num = 0; +int iolink_online_rdma_num = 0; +#endif /* CONFIG_IOHUB_DOMAINS */ + +static void create_nodes_io_config(void); + +#ifndef CONFIG_NUMA + +unsigned long nodes_present_map = 0; +unsigned long iolinks_present_map = 0; + +void __init create_nodes_config(void) +{ + e90s_ncfg_struct_t io_link; + + NBSR_NCFG_reg(io_link) = early_sic_read_node_iolink_nbsr_reg(0, 0, + NBSR_NODE_CFG); + nodes_present_map = NBSR_NCFG_ApicNodePresentMask(io_link); + iolinks_present_map = NBSR_NCFG_ApicIoPresentMask(io_link); + printk(KERN_INFO "Nodes present map 0x%lx IO links maap 0x%lx\n", + nodes_present_map, iolinks_present_map); +} +#endif /* ! CONFIG_NUMA */ + +int __init +e90s_early_iohub_online(int node, int link) +{ + e90s_nc_info_struct_t nc_info; + e90s_ncfg_struct_t io_link; + int iohub_on = 0; + +#ifdef CONFIG_NUMA + if (!node_online(node)) + return 0; +#else /* ! CONFIG_NUMA */ + if (!(nodes_present_map & (1 << node))) + return 0; +#endif /* CONFIG_NUMA */ + NBSR_NC_INFO_reg(nc_info) = early_sic_read_node_nbsr_reg(node, + NBSR_NODE_CFG_INFO); + if (!NBSR_NC_INFO_IoccLinkUp(nc_info)) + return 0; + NBSR_NCFG_reg(io_link) = early_sic_read_node_nbsr_reg(node, + NBSR_NODE_CFG); + if (NBSR_NCFG_IoLinkRdmaMode(io_link) == IOHUB_IOL_MODE) { + iohub_on = 1; + } + DebugENBSR("e90s_early_iohub_online() IOHUB of node %d link %d %s\n", + node, link, (iohub_on) ? "ON" : "OFF"); + return iohub_on; +} +/* + * NBSR area mapping and init + */ + +int __init +e90s_sic_init(void) +{ + DebugSIC("e90s_sic_init() started\n"); + if (!HAS_MACHINE_E90S_SIC) { + pr_info("e90s_sic_init() the arch has not NBSR\n"); + return -ENODEV; + } +#ifndef CONFIG_NUMA + create_nodes_config(); +#endif /* ! CONFIG_NUMA */ + create_nodes_io_config(); + return 0; +} + +#ifdef CHECK_IOLINKS +/* + * IO Links of all nodes configuration + */ + +static void check_iolink_config(int node, int link) +{ + e90s_nc_info_struct_t nc_info; + int link_on; + int ab_type; + + link_on = 0; + + NBSR_NC_INFO_reg(nc_info) = sic_read_node_iolink_nbsr_reg( + node, link, NBSR_NODE_CFG_INFO); + link_on = NBSR_NC_INFO_IoccLinkUp(nc_info); + ab_type = NBSR_NC_INFO_IoccLinkRtype(nc_info); + pr_info("Node #%d IO LINK #%d is", node, link); + if (ab_type == IOHUB_ONLY_IOL_ABTYPE) { + node_iohub_set(node, link, iolink_iohub_map); + iolink_iohub_num++; + pr_cont(" IO HUB controller"); + if (link_on) { + node_iohub_set(node, link, iolink_online_iohub_map); + iolink_online_iohub_num++; + pr_cont(" ON"); + } else { + pr_cont(" OFF"); + } + } else if (ab_type == RDMA_ONLY_IOL_ABTYPE || + ab_type == RDMA_IOHUB_IOL_ABTYPE) { + node_rdma_set(node, link, iolink_rdma_map); + iolink_rdma_num++; + pr_cont(" RDMA controller"); + if (link_on) { + node_rdma_set(node, link, iolink_online_rdma_map); + iolink_online_rdma_num++; + pr_cont(" ON"); + } else { + pr_cont(" OFF"); + } + } else { + pr_cont(" unknown controller"); + if (link_on) { + pr_cont(" ON"); + } else { + pr_cont(" OFF"); + } + } + if (link_on) { + pr_cont(" connected to"); + switch (ab_type) { + case IOHUB_ONLY_IOL_ABTYPE: + pr_cont(" IO HUB controller"); + break; + case RDMA_ONLY_IOL_ABTYPE: + pr_cont(" RDMA controller"); + break; + case RDMA_IOHUB_IOL_ABTYPE: + pr_cont(" IO HUB/RDMA controller"); + break; + default: + pr_cont(" unknown controller"); + break; + } + } + pr_cont("\n"); +} +#endif /* CHECK_IOLINKS */ + +void create_iolink_config(int node, int link) +{ + e90s_nc_info_struct_t nc_info = {}; + e90s_ncfg_struct_t io_link = {}; + e90s_io_csr_struct_t io_hub = {}; + e90s_rdma_cs_struct_t rdma = {}; + int ab_type; + int link_on = 0; + + if (has_external_iohub()) { + NBSR_NC_INFO_reg(nc_info) = sic_read_node_iolink_nbsr_reg( + node, link, NBSR_NODE_CFG_INFO); + NBSR_NCFG_reg(io_link) = sic_read_node_iolink_nbsr_reg( + node, link, NBSR_NODE_CFG); + NBSR_IO_CSR_reg(io_hub) = + sic_read_node_iolink_nbsr_reg(node, link, NBSR_IO_CSR); + } else { + NBSR_NC_INFO_IoccLinkRtype(nc_info) = IOHUB_ONLY_IOL_ABTYPE; + NBSR_NCFG_IoLinkRdmaMode(io_link) = IOHUB_IOL_MODE; + NBSR_IO_CSR_ch_on(io_hub) = 1; + } + ab_type = NBSR_NC_INFO_IoccLinkRtype(nc_info); + + pr_info("Node #%d IO LINK #%d is", node, link); + if (NBSR_NCFG_IoLinkRdmaMode(io_link) == IOHUB_IOL_MODE) { + node_iohub_set(node, link, iolink_iohub_map); + iolink_iohub_num++; + pr_cont(" IO HUB controller"); + if (NBSR_IO_CSR_ch_on(io_hub)) { + node_iohub_set(node, link, iolink_online_iohub_map); + iolink_online_iohub_num++; + link_on = 1; + pr_cont(" ON"); + } else { + pr_cont(" OFF"); + } + } else { + node_rdma_set(node, link, iolink_rdma_map); + iolink_rdma_num++; + pr_cont(" RDMA controller"); + NBSR_RDMA_CS_reg(rdma) = + sic_read_node_iolink_nbsr_reg(node, link, NBSR_CS); + if (NBSR_RDMA_CS_ch_on(rdma)) { + node_rdma_set(node, link, iolink_online_rdma_map); + iolink_online_rdma_num++; + link_on = 1; + pr_cont(" ON"); + } else { + pr_cont(" OFF"); + } + } + if (link_on) { + pr_cont(" connected to"); + switch (ab_type) { + case IOHUB_ONLY_IOL_ABTYPE: + pr_cont(" IO HUB controller"); + break; + case RDMA_ONLY_IOL_ABTYPE: + pr_cont(" RDMA controller"); + break; + case RDMA_IOHUB_IOL_ABTYPE: + pr_cont(" IO HUB/RDMA controller"); + break; + default: + pr_cont(" unknown controller"); + break; + } + } + pr_cont("\n"); +} + +static void create_nodes_io_config(void) +{ + int node; + int link; + + DebugSIC("create_nodes_io_config() started\n"); + for_each_online_node(node) { + DebugSIC("create_nodes_io_config() on node #%d\n", node); + for_each_iolink_of_node(link) { +#ifndef CONFIG_NUMA + if (!(nodes_present_map & (1 << link))) + continue; +#endif /* ! CONFIG_NUMA */ + DebugSIC("create_nodes_io_config() on link #%d\n", + link); + iolinks_num++; +#ifdef CONFIG_NUMA + create_iolink_config(node, link); +#else /* ! CONFIG_NUMA */ + create_iolink_config(link, 0); +#endif /* CONFIG_NUMA */ + } + } + if (iolinks_num > 1) { + printk(KERN_INFO "Total IO links %d: IOHUBs %d, RDMAs %d\n", + iolinks_num, iolink_iohub_num, iolink_rdma_num); + } +} diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S index 08cc41f64725..d7094f536925 100644 --- a/arch/sparc/kernel/etrap_64.S +++ b/arch/sparc/kernel/etrap_64.S @@ -108,8 +108,8 @@ etrap_save: save %g2, -STACK_BIAS, %sp sll %g2, 3, %g2 /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ - mov 1, %l5 - sth %l5, [%l6 + TI_SYS_NOERROR] + mov 1, %l5 + sth %l5, [%l6 + TI_SYS_NOERROR] 661: wrpr %g3, 0, %otherwin .section .fast_win_ctrl_1insn_patch, "ax" @@ -151,32 +151,11 @@ etrap_save: save %g2, -STACK_BIAS, %sp stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] or %l7, %l0, %l7 -661: sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0 - /* If userspace is using ADI, it could potentially pass - * a pointer with version tag embedded in it. To maintain - * the ADI security, we must enable PSTATE.mcde. Userspace - * would have already set TTE.mcd in an earlier call to - * kernel and set the version tag for the address being - * dereferenced. Setting PSTATE.mcde would ensure any - * access to userspace data through a system call honors - * ADI and does not allow a rogue app to bypass ADI by - * using system calls. Setting PSTATE.mcde only affects - * accesses to virtual addresses that have TTE.mcd set. - * Set PMCDPER to ensure any exceptions caused by ADI - * version tag mismatch are exposed before system call - * returns to userspace. Setting PMCDPER affects only - * writes to virtual addresses that have TTE.mcd set and - * have a version tag set as well. - */ - .section .sun_m7_1insn_patch, "ax" - .word 661b - sethi %hi(TSTATE_TSO | TSTATE_PEF | TSTATE_MCDE), %l0 - .previous -661: nop - .section .sun_m7_1insn_patch, "ax" - .word 661b - .word 0xaf902001 /* wrpr %g0, 1, %pmcdper */ - .previous +#ifndef CONFIG_RMO + sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0 +#else /* CONFIG_RMO */ + sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 +#endif /* CONFIG_RMO */ or %l7, %l0, %l7 wrpr %l2, %tnpc wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 540bfc98472c..399bfef135d2 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -33,8 +33,9 @@ #include #include #include -#include +#include +#ifndef CONFIG_E90S /* This section from from _start to sparc64_boot_end should fit into * 0x0000000000404000 to 0x0000000000408000. */ @@ -882,6 +883,379 @@ setup_tba: ret restore +#else /*CONFIG_E90S*/ +#include +#include +#include +#include + +/* This section from from _start to sparc64_boot_end should fit into + * 0x0000000000404000 to 0x0000000000408000. + */ + .text + .globl start, _start, stext, _stext +_start: +start: +_stext: +stext: +! 0x0000000000404000 + b sparc64_boot + nop + +/* This stuff has to be in sync with SILO and other potential boot loaders + * Fields should be kept upward compatible and whenever any change is made, + * HdrS version should be incremented. + */ + .global root_flags, ram_flags, root_dev + .global sparc_ramdisk_image, sparc_ramdisk_size + .global sparc_ramdisk_image64, is_sun4v + .global bootblock; + + .ascii "HdrS" + .word LINUX_VERSION_CODE + + /* History: + * + * 0x0300 : Supports being located at other than 0x4000 + * 0x0202 : Supports kernel params string + * 0x0201 : Supports reboot_command + */ + .half 0x0301 /* HdrS version */ +is_sun4v: + .word 0 +root_flags: + .half 1 +root_dev: + .half 0 +ram_flags: + .half 0 +.align 8 +bootblock: + .xword 0 +sparc_ramdisk_image: + .word 0 +sparc_ramdisk_size: + .word 0 + .xword reboot_command +sparc_ramdisk_image64: + .xword 0 + .word _end + +sparc64_boot: +#ifdef CONFIG_SMP + rdpr %ver, %g1 + srlx %g1, 24, %g1 + and %g1, 0xff, %g1 + cmp %g1, 0x20 + bl 2f + nop + set CEPIC_CTRL, %g1 + lduwa [%g1]ASI_EPIC, %g1 + andcc %g1, CEPIC_CTRL_BSP_CORE, %g0 + bz 1f + nop + b 3f + nop +2: set APIC_BSP, %g1 + lduwa [%g1]ASI_LAPIC, %g1 + andcc %g1, APIC_BSP_IS_BSP, %g0 + bz 1f + nop +3: +#endif + /* Boot E90S pass bootblock pointer in %o2, save it */ + set bootblock, %g1 + stx %o2, [%g1] + +1: + /* Initialize processor registers */ + wrpr 0, %tl + wrpr PSTATE_PRIV, %pstate + wr %g0, 1<<2, %fprs + wr %g0, 0, %ccr + wr %g0, 0, %asi +#ifndef CONFIG_MCST + setx 0, %g1, %g2 + wrpr %g2, %tick + setx 0, %g1, %g2 + mov %g2, %asr24 +#endif + mov 0x1, %g2 + sllx %g2, 63, %g2 + mov %g2, %asr23 + mov %g2, %asr25 + mov %g0, %y + wrpr 0xf, %pil + wrpr 0, %cwp + wrpr 6, %cansave + wrpr 0, %canrestore + wrpr 0, %otherwin + wrpr 7, %cleanwin + wrpr 7, %wstate + + mov PRIMARY_CONTEXT, %g1 + stxa %g0, [%g1] ASI_DMMU + membar #Sync + + mov SECONDARY_CONTEXT, %g1 + stxa %g0, [%g1] ASI_DMMU + membar #Sync + + mov TSB_REG, %g1 + stxa %g0, [%g1] ASI_DMMU + stxa %g0, [%g1] ASI_IMMU + membar #Sync + + mov TSB_EXTENSION_P, %g1 + stxa %g0, [%g1] ASI_DMMU + stxa %g0, [%g1] ASI_IMMU + membar #Sync + + mov TSB_EXTENSION_S, %g1 + stxa %g0, [%g1] ASI_DMMU + membar #Sync + + mov TSB_EXTENSION_N, %g1 + stxa %g0, [%g1] ASI_DMMU + stxa %g0, [%g1] ASI_IMMU + +/* Reset MMU error registers */ + mov TLB_SFSR, %g1 + stxa %g0, [%g1]ASI_IMMU + stxa %g0, [%g1]ASI_DMMU + + + stxa %g0, [%g0] ASI_AFSR + stxa %g0, [%g0] ASI_AFAR + +/* Clear tlb*/ + mov 0x80, %g1 + stxa %g0, [%g1]ASI_IMMU_DEMAP + stxa %g0, [%g1]ASI_DMMU_DEMAP + + /* Set ctx 0 */ + mov PRIMARY_CONTEXT, %g1 + stxa %g0, [%g1] ASI_DMMU + membar #Sync + + mov SECONDARY_CONTEXT, %g1 + stxa %g0, [%g1] ASI_DMMU + membar #Sync + + mov TSB_REG, %g1 + stxa %g0, [%g1] ASI_DMMU + stxa %g0, [%g1] ASI_IMMU + membar #Sync + + mov TSB_EXTENSION_P, %g1 + stxa %g0, [%g1] ASI_DMMU + stxa %g0, [%g1] ASI_IMMU + membar #Sync + + mov TSB_EXTENSION_S, %g1 + stxa %g0, [%g1] ASI_DMMU + membar #Sync + + mov TSB_EXTENSION_N, %g1 + stxa %g0, [%g1] ASI_DMMU + stxa %g0, [%g1] ASI_IMMU + + setx DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE|DCU_DM|DCU_IM|DCU_DC|DCU_IC, %g2, %g7 + set 1f, %l0 + call %l0 + stxa %g7, [%g0] ASI_DCU_CONTROL_REG +1: + membar #Sync + +cheetah_boot: + /* + * Make sure we are in privileged mode, have address masking, + * using the ordinary globals and have enabled floating + * point. + * + * Again, typically PROM has left %pil at 13 or similar, and + * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate. + */ + wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate + wr %g0, 0, %fprs + +#ifdef CONFIG_SMP + rdpr %ver, %g1 + srlx %g1, 24, %g1 + and %g1, 0xff, %g1 + cmp %g1, 0x20 + bl 2f + nop + set CEPIC_CTRL, %g1 + lduwa [%g1]ASI_EPIC, %g1 + andcc %g1, CEPIC_CTRL_BSP_CORE, %g0 + bnz 1f + nop + b 3f + nop +2: set APIC_BSP, %g1 + lduwa [%g1]ASI_LAPIC, %g1 + andcc %g1, APIC_BSP_IS_BSP, %g0 + bnz 1f + nop +3: call sparc64_cpu_startup + nop +#endif + +1: call cheetah_tlb_fixup + nop + + __REF +cheetah_tlb_fixup: + mov 2, %g2 /* Set TLB type to cheetah_plus. */ + sethi %hi(tlb_type), %g1 + stw %g2, [%g1 + %lo(tlb_type)] + + rdpr %ver, %o1 + srlx %o1, 24, %o1 + and %o1, 0xff, %o1 + cmp %o1, 0x11 /* r2000, revision 1 has bug */ + bne 1f /* in block store operation */ + nop + + call generic_patch_copyops + nop + call generic_patch_bzero + nop + call generic_patch_pageops + nop + b,a 2f +1: /* Patch copy/page operations to cheetah optimized versions. */ + call cheetah_patch_copyops + nop + call cheetah_patch_copy_page + nop +2: call cheetah_patch_cachetlbops + nop + + sethi %hi(init_thread_union), %g6 + or %g6, %lo(init_thread_union), %g6 + ldx [%g6 + TI_TASK], %g4 + mov %sp, %l6 + mov %o4, %l7 + + wr %g0, ASI_P, %asi + mov 1, %g1 + sllx %g1, THREAD_SHIFT, %g1 + sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 + add %g6, %g1, %sp + mov 0, %fp + + /* Set per-cpu pointer initially to zero, this makes + * the boot-cpu use the in-kernel-image per-cpu areas + * before setup_per_cpu_area() is invoked. + */ + clr %g5 + + wrpr %g0, 0, %wstate + wrpr %g0, 0x0, %tl + + /* Clear the bss */ + sethi %hi(__bss_start), %o0 + or %o0, %lo(__bss_start), %o0 + sethi %hi(_end), %o1 + or %o1, %lo(_end), %o1 + call __bzero + sub %o1, %o0, %o1 + + call setup_trap_table + nop + + /* Off we go.... */ + call start_early_boot + nop + /* Not reached... */ + +.globl setup_trap_table +setup_trap_table: + save %sp, -192, %sp + + /* Force interrupts to be disabled. */ + rdpr %pstate, %o1 + andn %o1, PSTATE_IE, %o1 + wrpr %o1, 0x0, %pstate + wrpr %g0, 15, %pil + + /* + * Set MCNTL register to enable STLB for N Kb pages and + * E90S TSBRP type + */ + set E90S_MCNTL_IRM_DEFAULT | E90S_MCNTL_DRM01_DEFAULT | E90S_MCNTL_DRM23_DEFAULT, %g1 + set E90S_MCNTL, %g2 + stxa %g1, [%g2]ASI_DCU_CONTROL_REG + + /* Set jump prediction area size */ + set 0x10, %g1 + ldxa [%g1]ASI_IIU_INST_TRAP, %g2 +#ifdef CONFIG_SPARC64_PAGE_SIZE_64KB + or %g2, 1 << 8, %g2 +#else + andn %g2, 1 << 8, %g2 +#endif + stxa %g2, [%g1]ASI_IIU_INST_TRAP + stxa %g0, [%g0] ASI_IC_TAG + +#ifndef CONFIG_RMO + set E90S_DBGJMP, %g1 + ldxa [%g1]ASI_DCU_CONTROL_REG, %g2 + set DBGJMP_MM_MASK, %g3 + andn %g2, %g3, %g2 + or %g2, DBGJMP_MM_PSTATE_MM, %g2 + membar #Sync + stxa %g2, [%g1]ASI_DCU_CONTROL_REG + membar #Sync +#endif /*CONFIG_RMO*/ + + set sparc64_ttable_tl0, %o0 + wrpr %o0, %tba + + /* Start using proper page size encodings in ctx register. */ + sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + stxa %g2, [%g1] ASI_DMMU + membar #Sync + + lduh [%g6 + TI_CPU], %o0 + rdpr %pstate, %o1 + wrpr %o1, PSTATE_IG, %pstate + + set e90s_irq_pending, %g6 + sllx %o0, SMP_CACHE_BYTES_SHIFT, %g1 + add %g6, %g1, %g6 + + wrpr %o1, 0, %pstate + + /* Kill PROM timer */ + sethi %hi(0x80000000), %o2 + sllx %o2, 32, %o2 + wr %o2, 0, %tick_cmpr + + /* Disable STICK_INT interrupts. */ + + sethi %hi(0x80000000), %o2 + sllx %o2, 32, %o2 + wr %o2, %asr25 + + /* Now we can turn interrupts back on. */ +#if 1 /* hardware error workaround: timer interrupts are active after */ + /* machine reset/power on */ + rdpr %pstate, %o1 + or %o1, PSTATE_IE, %o1 + wrpr %o1, 0, %pstate +#endif /* 0 hardware error workaround: timer interrupts are active after */ + /* machine reset/power on */ + wrpr %g0, 0x0, %pil + + ret + restore + +#endif /*CONFIG_E90S*/ sparc64_boot_end: #include "etrap_64.S" @@ -896,9 +1270,11 @@ sparc64_boot_end: #include "misctrap.S" #include "syscalls.S" #include "helpers.S" -#include "sun4v_tlb_miss.S" #include "sun4v_mcd.S" +#ifdef CONFIG_SPARC64_SUN4V +#include "sun4v_tlb_miss.S" #include "sun4v_ivec.S" +#endif /*CONFIG_SPARC64_SUN4V*/ #include "ktlb.S" #include "tsb.S" diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S index 9b3f74706cfb..04c51d4c332b 100644 --- a/arch/sparc/kernel/helpers.S +++ b/arch/sparc/kernel/helpers.S @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - .align 32 +#include +.align 32 .globl __flushw_user .type __flushw_user,#function __flushw_user: @@ -48,6 +48,7 @@ stack_trace_flush: nop .size stack_trace_flush,.-stack_trace_flush +#ifndef CONFIG_E90S #ifdef CONFIG_SMP .globl hard_smp_processor_id .type hard_smp_processor_id,#function @@ -64,3 +65,5 @@ real_hard_smp_processor_id: #endif .size real_hard_smp_processor_id,.-real_hard_smp_processor_id EXPORT_SYMBOL_GPL(real_hard_smp_processor_id) + +#endif /*CONFIG_E90S*/ diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index e8452be5123b..bec55d7bb123 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -219,6 +219,10 @@ void handler_irq(unsigned int pil, struct pt_regs *regs) { struct pt_regs *old_regs; struct irq_bucket *p; +#ifdef CONFIG_MCST + struct thread_info *ti = current_thread_info(); + ti->irq_enter_clk = get_cycles(); +#endif BUG_ON(pil > 15); old_regs = set_irq_regs(regs); diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index eb21682abfcb..7353b0ff3547 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -816,6 +816,10 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) unsigned long pstate, bucket_pa; struct pt_regs *old_regs; void *orig_sp; +#ifdef CONFIG_MCST + struct thread_info *ti = current_thread_info(); + ti->irq_enter_clk = get_cycles(); +#endif clear_softint(1 << pil); diff --git a/arch/sparc/kernel/irq_e90s.c b/arch/sparc/kernel/irq_e90s.c new file mode 100644 index 000000000000..1090d08f3fcc --- /dev/null +++ b/arch/sparc/kernel/irq_e90s.c @@ -0,0 +1,176 @@ +/* $Id: irq_e90.c,v 1.14 2009/02/24 16:12:16 atic Exp $ + * irq.c: UltraSparc IRQ handling/init/registry. + * + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kstack.h" + +#define DEBUG_IRQ_MODE 0 /* interrupts */ +#if DEBUG_IRQ_MODE +# define DebugIRQ(...) printk(__VA_ARGS__) +#else +# define DebugIRQ(...) +#endif + +atomic_t nmi_active = ATOMIC_INIT(0); +EXPORT_SYMBOL(nmi_active); + +void *hardirq_stack[NR_CPUS]; +void *softirq_stack[NR_CPUS]; + +unsigned long ivector_table_pa; + +notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) +{ + unsigned long pcr, i; + + rd_pcr(pcr); + if(!(pcr & E90S_PCR_OVF)) { + do_nmi(regs); + clear_softint(1 << irq); + return; + } + + nmi_enter(); + inc_irq_stat(__nmi_count); + + if(test_thread_flag(TIF_PERFCTR)) { + for(i = 0; i < E90S_PIC_NR; i++) { + current_thread_info()->pcr_regs[i] |= pcr & E90S_PCR_OVF; + } + wr_pcr(pcr & ~(E90S_PCR_OVF | E90S_PCR_OVRO)); + send_sig(SIGTRAP, current, 0); + + } else { +#ifdef CONFIG_PERF_EVENTS + perf_event_nmi_handler(regs); +#endif + } + + clear_softint(1 << irq); + + nmi_exit(); +} + +struct e90s_irq_pending e90s_irq_pending[NR_CPUS]; + +void handler_irq(int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + void *orig_sp; + unsigned vector, cpu = smp_processor_id(); +#ifdef CONFIG_MCST + struct thread_info *ti = current_thread_info(); + ti->irq_enter_clk = get_cycles(); + __this_cpu_write(prev_intr_clock, + __this_cpu_read(last_intr_clock)); + __this_cpu_write(last_intr_clock, get_cycles()); +#endif + + clear_softint(1 << irq); + old_regs = set_irq_regs(regs); + irq_enter(); + orig_sp = set_hardirq_stack(); + + set_current_epic_core_priority(e90s_irq_pending[cpu].vector >> + CEPIC_VECT_INTA_PRI_SHIFT); + e90s_irq_pending[cpu].vector &= CEPIC_VECT_INTA_VMASK; + vector = e90s_irq_pending[cpu].vector; + + DebugIRQ("CPU #%d (%d) will start %pS on vector %02x (IRQ %d) (task %d %s)\n", + smp_processor_id(), apicid, interrupt[vector], + vector, __raw_get_cpu_var(vector_irq)[vector], + current->pid, current->comm); + BUG_ON(!irqs_disabled()); + if (*interrupt[vector]) + (*interrupt[vector])(regs); + else + do_IRQ(regs, vector); + + restore_hardirq_stack(orig_sp); + irq_exit(); + set_irq_regs(old_regs); +} + +#ifndef CONFIG_PREEMPT_RT +void do_softirq_own_stack(void) +{ + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; + BUG_ON(!irqs_disabled()); + + sp += THREAD_SIZE - 192 - STACK_BIAS; + + __asm__ __volatile__("mov %%sp, %0\n\t" + "mov %1, %%sp" + : "=&r" (orig_sp) + : "r" (sp)); + __do_softirq(); + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); +} +#endif + +static void __init epic_init_IRQ(void) +{ + /* + * Initialize interrupt[] array of system interrupts' handlers. + */ + epic_init_system_handlers_table(); + setup_bsp_epic(); + + /* + * Initialize both IO-APICs and IO-EPICs + */ + if (nr_ioapics) + setup_IO_APIC(); + if (nr_ioepics) + setup_io_epic(); +} + +static void __init apic_init_IRQ(void) +{ + init_bsp_APIC(); + /* Initialize interrupt[] array of system interrupts' handlers. */ + l_init_system_handlers_table(); +#ifdef CONFIG_X86_X2APIC + enable_IR_x2apic(); +#endif + default_setup_apic_routing(); + if (!verify_local_APIC()) + pr_emerg("Your LAPIC is broken, trying to continue...\n"); + connect_bsp_APIC(); + setup_local_APIC(); + /* Enable IO APIC before setting up error vector. */ + enable_IO_APIC(); + end_local_APIC_setup(); + if (apic->setup_portio_remap) + apic->setup_portio_remap(); + setup_IO_APIC(); +} + +void __init init_IRQ(void) +{ + BUILD_BUG_ON(sizeof(e90s_irq_pending[0]) != SMP_CACHE_BYTES); + if (cpu_has_epic()) + return epic_init_IRQ(); + else + return apic_init_IRQ(); +} diff --git a/arch/sparc/kernel/ivec.S b/arch/sparc/kernel/ivec.S index 94ba2c3a29c1..9fb58e8f001d 100644 --- a/arch/sparc/kernel/ivec.S +++ b/arch/sparc/kernel/ivec.S @@ -12,6 +12,105 @@ .align 32 .globl do_ivec .type do_ivec,#function +#ifdef CONFIG_E90S +#include + .globl r2000p_do_ivec, r2000p_rtrap_irq + .type r2000p_do_ivec,#function +do_ivec: + mov APIC_VECT, %g2 + lduwa [%g2] ASI_LAPIC, %g5 + mov APIC_EOI, %g2 +#ifdef CONFIG_SMP + cmp %g5, INVALIDATE_TLB_VECTOR + be,a do_ivec_xcall_e90s + stwa %g0, [%g2] ASI_LAPIC + cmp %g5, CYCLES_SYNC_VECTOR + be,a do_ivec_cycl_sync_e90s + stwa %g0, [%g2] ASI_LAPIC +#endif + cmp %g5, APIC_SPIV_SPURIOUS_VECT + be,a do_ivec_spurious_e90s + stwa %g0, [%g2] ASI_LAPIC + + mov APIC_SPIV_SPURIOUS_VECT - 0x10, %g3 + mov APIC_TASKPRI, %g1 + stwa %g3, [%g1] ASI_LAPIC + membar #Sync + + stw %g5, [%g6] + set 1<>1, %g2 /* to round up */ + and %g2, ~(CYCL_SYNC_GAP - 1), %g2 + /* add %g2, for more precicsion we could add interrupt time cycles */ + wrpr %g2, 0, %tick + set go_cycl_sync, %g3 + stx %g2, [%g3] /* set any !=0 value; cycles - for debug */ +do_ivec_spurious_e90s: + membar #Sync + retry +#else /*CONFIG_E90S*/ do_ivec: mov 0x40, %g3 ldxa [%g3 + %g0] ASI_INTR_R, %g3 @@ -50,3 +149,5 @@ do_ivec_xcall: 1: jmpl %g3, %g0 nop .size do_ivec,.-do_ivec + +#endif /*CONFIG_E90S*/ diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index dfbca2470536..b5b9c66e2cd3 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -347,7 +347,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accounting + * we can also use npre/npostfault count for accouting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 1cf91c05e275..acfec2261a1c 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -55,11 +55,14 @@ kvmap_itlb_vmalloc_addr: TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ - +#ifdef CONFIG_MCST + nop /* Sivuch bug: retry instruction must be aligned at 64 bytes */ +#endif kvmap_itlb_load: 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN retry +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b nop @@ -80,16 +83,19 @@ kvmap_itlb_load: */ ba,pt %xcc, sun4v_itlb_load mov %g5, %g3 +#endif /*CONFIG_SPARC64_SUN4V*/ kvmap_itlb_longpath: 661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b SET_GL(1) nop .previous +#endif /*CONFIG_SPARC64_SUN4V*/ rdpr %tpc, %g5 ba,pt %xcc, sparc64_realfault_common @@ -164,6 +170,7 @@ kvmap_dtlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) TSB_LOCK_TAG(%g1, %g2, %g7) + TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ @@ -172,6 +179,7 @@ kvmap_dtlb_load: 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB retry +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b nop @@ -192,6 +200,7 @@ kvmap_dtlb_load: */ ba,pt %xcc, sun4v_dtlb_load mov %g5, %g3 +#endif /*CONFIG_SPARC64_SUN4V*/ #ifdef CONFIG_SPARSEMEM_VMEMMAP kvmap_vmemmap: @@ -243,22 +252,26 @@ kvmap_dtlb_longpath: 661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b SET_GL(1) ldxa [%g0] ASI_SCRATCHPAD, %g5 .previous +#endif /*CONFIG_SPARC64_SUN4V*/ rdpr %tl, %g3 cmp %g3, 1 661: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g5 +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 nop .previous +#endif /*CONFIG_SPARC64_SUN4V*/ /* The kernel executes in context zero, therefore we do not * need to clear the context ID bits out of %g5 here. diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index df39580f398d..cc58c4bedb9e 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -117,6 +117,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, break; #ifdef CONFIG_SPARC64 case R_SPARC_64: +#ifdef CONFIG_MCST + case R_SPARC_UA64: +#endif /*CONFIG_MCST*/ location[0] = v >> 56; location[1] = v >> 48; location[2] = v >> 40; diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 5a9f86b1d4e7..0014f3661700 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "of_device_common.h" @@ -699,10 +700,18 @@ static void __init scan_tree(struct device_node *dp, struct device *parent) } } +static struct of_device_id __initdata e90s_ids[] = { + { .compatible = "simple-bus", }, + {}, +}; + static int __init scan_of_devices(void) { struct device_node *root = of_find_node_by_path("/"); struct platform_device *parent; +#ifdef CONFIG_E90S + return of_platform_bus_probe(NULL, e90s_ids, NULL); +#endif parent = scan_one_device(root, NULL); if (!parent) diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c index b32cc5610712..3e20dd7956f6 100644 --- a/arch/sparc/kernel/of_device_common.c +++ b/arch/sparc/kernel/of_device_common.c @@ -24,6 +24,7 @@ unsigned int irq_of_parse_and_map(struct device_node *node, int index) } EXPORT_SYMBOL(irq_of_parse_and_map); +#ifndef CONFIG_E90S int of_address_to_resource(struct device_node *node, int index, struct resource *r) { @@ -50,6 +51,7 @@ void __iomem *of_iomap(struct device_node *node, int index) return of_ioremap(r, 0, resource_size(r), (char *) r->name); } EXPORT_SYMBOL(of_iomap); +#endif /* Take the archdata values for IOMMU, STC, and HOSTDATA found in * BUS and propagate to all child platform_device objects. diff --git a/arch/sparc/kernel/pci_e90s.c b/arch/sparc/kernel/pci_e90s.c new file mode 100644 index 000000000000..1ca6d4c1f852 --- /dev/null +++ b/arch/sparc/kernel/pci_e90s.c @@ -0,0 +1,361 @@ +/* + * Low-Level PCI Support for PC + * + * (c) 1999--2000 Martin Mares + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#undef DEBUG_PCI_MODE +#undef DebugPCI +#define DEBUG_PCI_MODE 0 /* PCI init */ +#define DebugPCI if (DEBUG_PCI_MODE) printk + + +volatile int pci_poke_in_progress; +volatile int pci_poke_cpu = -1; +volatile int pci_poke_faulted; + +static DEFINE_RAW_SPINLOCK(pci_poke_lock); + +void pci_config_read8(u8 *addr, u8 *ret) +{ + unsigned long flags; + u8 byte; + + raw_spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "lduba [%1] %2, %0\n\t" + "membar #Sync" + : "=r" (byte) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + if (!pci_poke_faulted) + *ret = byte; + else + *ret = ~0; + raw_spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_read16(u16 *addr, u16 *ret) +{ + unsigned long flags; + u16 word; + + raw_spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "lduha [%1] %2, %0\n\t" + "membar #Sync" + : "=r" (word) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + if (!pci_poke_faulted) + *ret = word; + else + *ret = ~0; + raw_spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_read32(u32 *addr, u32 *ret) +{ + unsigned long flags; + u32 dword; + + raw_spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "lduwa [%1] %2, %0\n\t" + "membar #Sync" + : "=r" (dword) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + if (!pci_poke_faulted) + *ret = dword; + else + *ret = ~0; + raw_spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_write8(u8 *addr, u8 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "stba %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + raw_spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_write16(u16 *addr, u16 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "stha %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + raw_spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +void pci_config_write32(u32 *addr, u32 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pci_poke_lock, flags); + pci_poke_cpu = smp_processor_id(); + pci_poke_in_progress = 1; + pci_poke_faulted = 0; + __asm__ __volatile__("membar #Sync\n\t" + "stwa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) + : "memory"); + pci_poke_in_progress = 0; + pci_poke_cpu = -1; + raw_spin_unlock_irqrestore(&pci_poke_lock, flags); +} + +char * __init pcibios_setup(char *str) +{ + if (!strcmp(str, "off")) { + pci_probe = 0; + return NULL; + } else if (!strcmp(str, "assign-busses")) { + pci_probe |= PCI_ASSIGN_ALL_BUSSES; + return NULL; + } + return str; +} + +unsigned int pcibios_assign_all_busses(void) +{ + return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0; +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + int err; + + if ((err = pcibios_enable_resources(dev, mask)) < 0) + return err; + + return pcibios_enable_irq(dev); +} + +void pcibios_disable_device (struct pci_dev *dev) +{ + if (pcibios_disable_irq) + pcibios_disable_irq(dev); +} + +/* + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + * + * Why? Because some silly external IO cards only decode + * the low 10 bits of the IO address. The 0x00-0xff region + * is reserved for motherboard devices that decode all 16 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff, + * but we want to try to avoid allocating at 0x2900-0x2bff + * which might have be mirrored at 0x0100-0x03ff.. + */ +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + return res->start; +} + +void pcibios_set_master(struct pci_dev *dev) +{ + u8 lat; + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); + if (lat < 16) + lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; + else if (lat > pcibios_max_latency) + lat = pcibios_max_latency; + else + return; + printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", + pci_name(dev), lat); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); +} + +/* Perform the actual remap of the pages for a PCI device mapping, as appropriate + * for this architecture. The region in the process to map is described by vm_start + * and vm_end members of VMA, the base physical address is found in vm_pgoff. + * The pci device structure is provided so that architectures may make mapping + * decisions on a per-device or per-bus basis. + * + * Returns a negative error code on failure, zero on success. + */ +int pci_mmap_page_range(struct pci_dev *pdev, int bar, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + /* We only support mmap'ing of legacy memory space */ + if (mmap_state != pci_mmap_mem) + return -ENOSYS; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return io_remap_pfn_range(vma, vma->vm_start, + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +#if HAVE_PCI_LEGACY + +/** + * pci_mmap_legacy_page_range - map legacy memory space to userland + * @bus: bus whose legacy space we're mapping + * @vma: vma passed in by mmap + * + * Map legacy memory space for this device back to userspace using a machine + * vector to get the base address. + */ +int +pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state) +{ + unsigned long size = vma->vm_end - vma->vm_start; + pgprot_t prot; + unsigned long addr = 0; + + /* We only support mmap'ing of legacy memory space */ + if (mmap_state != pci_mmap_mem) + return -ENOSYS; + + prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff += addr >> PAGE_SHIFT; + vma->vm_page_prot = prot; + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + size, vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +/** + * ia64_pci_legacy_read - read from legacy I/O space + * @bus: bus to read + * @port: legacy port value + * @val: caller allocated storage for returned value + * @size: number of bytes to read + * + * Simply reads @size bytes from @port and puts the result in @val. + * + * Again, this (and the write routine) are generic versions that can be + * overridden by the platform. This is necessary on platforms that don't + * support legacy I/O routing or that hard fail on legacy I/O timeouts. + */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + int ret = size; + + switch (size) { + case 1: + *((u8 *)val) = inb(L_IOPORT_RESOURCE_OFFSET + port); + break; + case 2: + *((u16 *)val) = inw(L_IOPORT_RESOURCE_OFFSET + port); + break; + case 4: + *((u32 *)val) = inl(L_IOPORT_RESOURCE_OFFSET + port); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +/** + * ia64_pci_legacy_write - perform a legacy I/O write + * @bus: bus pointer + * @port: port to write + * @val: value to write + * @size: number of bytes to write from @val + * + * Simply writes @size bytes of @val to @port. + */ +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + int ret = size; + + switch (size) { + case 1: + outb(val, L_IOPORT_RESOURCE_OFFSET + port); + break; + case 2: + outw(val, L_IOPORT_RESOURCE_OFFSET + port); + break; + case 4: + outl(val, L_IOPORT_RESOURCE_OFFSET + port); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} +#endif /*HAVE_PCI_LEGACY*/ + +#ifdef CONFIG_NUMA +int pcibus_to_node(struct pci_bus *pbus) +{ + return __pcibus_to_node(pbus); +} +EXPORT_SYMBOL(pcibus_to_node); +#endif + +static int __init pci_direct_init(void) +{ + pci_probe = PCI_PROBE_L; + return l_pci_direct_init(); +} + +arch_initcall(pci_direct_init); diff --git a/arch/sparc/kernel/pcr_e90s.c b/arch/sparc/kernel/pcr_e90s.c new file mode 100644 index 000000000000..87b300b61fe6 --- /dev/null +++ b/arch/sparc/kernel/pcr_e90s.c @@ -0,0 +1,79 @@ +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + + +static u64 e90s_pcr_read(unsigned long reg_num) +{ + u64 val; + rd_pcr(val); + val &= ~(E90S_PCR_SC_MASK << E90S_PCR_SC_SHIFT); + val |= reg_num << E90S_PCR_SC_SHIFT; + return val; +} + +static void e90s_pcr_write(unsigned long reg_num, u64 val) +{ + val &= ~(E90S_PCR_SC_MASK << E90S_PCR_SC_SHIFT); + val |= reg_num << E90S_PCR_SC_SHIFT; + wr_pcr(val); +} + +static u64 e90s_pic_read(unsigned long reg_num) +{ + unsigned long pcr, old_pcr, pic; + rd_pcr(old_pcr); + pcr = old_pcr; + pcr &= ~(E90S_PCR_USR | E90S_PCR_SYS | + (E90S_PCR_SC_MASK << E90S_PCR_SC_SHIFT)) + | E90S_PCR_ULRO | E90S_PCR_OVRO; + + wr_pcr(pcr | reg_num << E90S_PCR_SC_SHIFT); + rd_pic(pic); + wr_pcr(old_pcr); + return pic; +} + +static void e90s_pic_write(unsigned long reg_num, u64 val) +{ + unsigned long pcr, old_pcr; + rd_pcr(old_pcr); + pcr = old_pcr; + pcr &= ~(E90S_PCR_USR | E90S_PCR_SYS | + (E90S_PCR_SC_MASK << E90S_PCR_SC_SHIFT)) + | E90S_PCR_ULRO | E90S_PCR_OVRO; + + wr_pcr(pcr | reg_num << E90S_PCR_SC_SHIFT); + wr_pic(val); + wr_pcr(old_pcr); +} + +static u64 e90s_picl_value(unsigned int nmi_hz) +{ + u32 delta = local_cpu_data().clock_tick / nmi_hz; + return ((u64)((0 - delta) & 0xffffffff)) << 32; +} + +static const struct pcr_ops e90s_pcr_ops = { + .read_pcr = e90s_pcr_read, + .write_pcr = e90s_pcr_write, + .read_pic = e90s_pic_read, + .write_pic = e90s_pic_write, + .nmi_picl_value = e90s_picl_value, + .pcr_nmi_enable = (E90S_PCR_PRIV | E90S_PCR_SYS | E90S_PCR_USR | PCR_UTRACE), + .pcr_nmi_disable = E90S_PCR_PRIV, +}; + + +const struct pcr_ops *pcr_ops = &e90s_pcr_ops; +EXPORT_SYMBOL_GPL(pcr_ops); diff --git a/arch/sparc/kernel/perf_event_e90s.c b/arch/sparc/kernel/perf_event_e90s.c new file mode 100644 index 000000000000..b520fa6baf24 --- /dev/null +++ b/arch/sparc/kernel/perf_event_e90s.c @@ -0,0 +1,804 @@ +/* Performance event support for sparc64. + * + */ +/*#define DEBUG*/ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "kernel.h" +#include "kstack.h" + +#define E90S_MAX_HWEVENTS E90S_PIC_NR +#define E90S_MAX_HWEVENTS_MASK ((1 << E90S_MAX_HWEVENTS) - 1) + +/* An event map describes the characteristics of a performance + * counter event. In particular it gives the encoding as well as + * a mask telling which counters the event can be measured on. + * + */ +struct perf_event_map { + u16 encoding; + u8 pic_mask; + +#define PIC_E90S_0 0x01 +#define PIC_E90S_1 0x02 +#define PIC_E90S_2 0x04 +#define PIC_E90S_3 0x08 +}; + +/* Encode a perf_event_map entry into a long. */ +static inline unsigned long perf_event_encode(const struct perf_event_map *pmap) +{ + return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; +} + +static inline u8 perf_event_get_msk(unsigned long val) +{ + return val & 0xff; +} + +static inline u64 perf_event_get_enc(unsigned long val) +{ + return val >> 16; +} + +static inline u64 nop_for_index(int idx) +{ + return E90S_NOP_EVENT | (idx << E90S_PCR_SC_SHIFT); +} + +#define C(x) PERF_COUNT_HW_CACHE_##x + +#define CACHE_OP_UNSUPPORTED 0xfffe +#define CACHE_OP_NONSENSE 0xffff + +typedef struct perf_event_map cache_map_t + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + +static const struct perf_event_map e90s_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x0, PIC_E90S_0 | PIC_E90S_1 + | PIC_E90S_2 | PIC_E90S_3 }, +}; + +static const struct perf_event_map *e90s_event_map(int event_id) +{ + return &e90s_perfmon_event_map[event_id]; +} + +static const cache_map_t e90s_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x2d, PIC_E90S_0 }, + [C(RESULT_MISS)] = { 0x2d, PIC_E90S_1 }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x2d, PIC_E90S_2 }, + [C(RESULT_MISS)] = { 0x2d, PIC_E90S_3 }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x2c, PIC_E90S_1 }, + [C(RESULT_MISS)] = { 0x2c, PIC_E90S_2 }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x28, PIC_E90S_2 }, + [C(RESULT_MISS)] = { 0x2d, PIC_E90S_3 }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x2a, PIC_E90S_0 }, + [C(RESULT_MISS)] = { 0x2a, PIC_E90S_1 }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x25, PIC_E90S_3 }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x25, PIC_E90S_2 }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x20, PIC_E90S_2 }, + [C(RESULT_MISS)] = { 0x20, PIC_E90S_3 }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +/* For tracking PMCs and the hw events they monitor on each CPU. */ +struct cpu_hw_events { + + /* Events currently scheduled. */ + struct perf_event *event[E90S_MAX_HWEVENTS]; + u64 pcr[E90S_MAX_HWEVENTS]; + unsigned long config_base; + unsigned long used_mask; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +static inline u64 __pcr_read(void) +{ + u64 val; + rd_pcr(val); + return val; +} + +static inline void __pcr_write(u64 val) +{ + wr_pcr(val); +} + +static inline u64 __pic_read(unsigned long reg_num) +{ + unsigned long pcr, old_pcr, pic; + rd_pcr(old_pcr); + pcr = old_pcr; + pcr &= ~(E90S_PCR_USR | E90S_PCR_SYS | + (E90S_PCR_SC_MASK << E90S_PCR_SC_SHIFT)); + + pcr |= E90S_PCR_ULRO | E90S_PCR_OVRO; + + wr_pcr(pcr | (reg_num << E90S_PCR_SC_SHIFT)); + rd_pic(pic); + wr_pcr(old_pcr); + return pic; +} + +static inline void __pic_write(unsigned long reg_num, u64 val) +{ + unsigned long pcr, old_pcr; + rd_pcr(old_pcr); + pcr = old_pcr; + pcr &= ~(E90S_PCR_USR | E90S_PCR_SYS | + (E90S_PCR_SC_MASK << E90S_PCR_SC_SHIFT)); + + pcr |= E90S_PCR_ULRO | E90S_PCR_OVRO; + + wr_pcr(pcr | (reg_num << E90S_PCR_SC_SHIFT)); + wr_pic(val); + wr_pcr(old_pcr); +} + +#define pcr_read() \ +({ \ + u64 __val = __pcr_read(); \ + pr_debug("pcrR:%llx\t%s:%d\n", __val, \ + __func__, __LINE__); \ + __val; \ +}) + +#define pcr_write(__val) do { \ + pr_debug("pcrW:%llx\t%s:%d\n", __val, \ + __func__, __LINE__); \ + __pcr_write(__val); \ +} while (0) + + +#define pic_read(__reg_num) \ +({ \ + u64 __val = __pic_read(__reg_num); \ + pr_debug("picR:%x:%llx\t%s:%d\n", __reg_num, __val, \ + __func__, __LINE__); \ + __val; \ +}) + +#define pic_write(__reg_num, __val) do { \ + pr_debug("picW:%x:%llx\t%s:%d\n", __reg_num, __val, \ + __func__, __LINE__); \ + __pic_write(__reg_num, __val); \ +} while (0) + + +static u64 event_encoding(u64 event_id, int idx) +{ + return (idx << E90S_PCR_SC_SHIFT) | + (perf_event_get_enc(event_id) << E90S_PCR_PICU_SHIFT); + +} + +static u64 sparc_perf_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = event->hw.idx; + u64 pcr = pcr_read(); + u64 prev_raw_count, new_raw_count; + s64 delta; + int user_overflow; + user_overflow = (pcr & (1UL << (idx + E90S_PCR_OVF_SHIFT))) && + ((pcr & (E90S_PCR_USR | E90S_PCR_SYS)) == E90S_PCR_USR); + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = user_overflow ? 0 : pic_read(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = new_raw_count - prev_raw_count; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static int sparc_perf_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = event->hw.idx; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + /* + * If we are way outside a reasonable range then just skip forward: + */ + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + /* + * The hw event starts counting from this event offset, + * mark it to be able to extra future deltas: + */ + local64_set(&hwc->prev_count, (u64)-left); + + pic_write(idx, (u64)(-left)); + + perf_event_update_userpage(event); + + return ret; +} + +static void sparc_pmu_enable(struct pmu *pmu) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u64 pcr = pcr_read(); + pcr |= cpuc->config_base; + pcr_write(pcr); +} + +static void sparc_pmu_disable(struct pmu *pmu) +{ + u64 pcr = pcr_read(); + pcr &= ~(E90S_PCR_USR | E90S_PCR_SYS); + pcr |= E90S_PCR_ULRO | E90S_PCR_OVRO; + pcr_write(pcr); +} + +static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, + struct perf_event *event, int idx) +{ + struct hw_perf_event *hwc = &event->hw; + u64 val = hwc->config_base; + + val |= event_encoding(hwc->event_base, idx); + cpuc->pcr[idx] = val; + cpuc->event[idx] = event; + /* prevent interrupt */ + mb(); + pcr_write(val); +} + +static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc, int idx) +{ + cpuc->pcr[idx] = nop_for_index(idx); + pcr_write(cpuc->pcr[idx]); + /* prevent interrupt */ + mb(); + cpuc->event[idx] = NULL; +} + +static void sparc_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + sparc_perf_event_set_period(event); + } + hwc->state = 0; + sparc_pmu_enable_event(cpuc, event, hwc->idx); +} + +static void sparc_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx = event->hw.idx; + + if (!(event->hw.state & PERF_HES_STOPPED)) { + sparc_pmu_disable_event(cpuc, &event->hw, idx); + event->hw.state |= PERF_HES_STOPPED; + } + + if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { + sparc_perf_event_update(event); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void sparc_pmu_del(struct perf_event *event, int _flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + sparc_pmu_stop(event, PERF_EF_UPDATE); + __clear_bit(event->hw.idx, &cpuc->used_mask); + cpuc->event[event->hw.idx] = NULL; + perf_event_update_userpage(event); +} + +static void sparc_pmu_read(struct perf_event *event) +{ + sparc_perf_event_update(event); +} + +static const struct perf_event_map *sparc_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct perf_event_map *pmap; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + pr_debug("event[%d][%d][%d]\n", + cache_type, cache_op, cache_result); + + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + + pmap = &e90s_cache_map[cache_type][cache_op][cache_result]; + + if (pmap->encoding == CACHE_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + if (pmap->encoding == CACHE_OP_NONSENSE) + return ERR_PTR(-EINVAL); + + return pmap; +} + +static int e90s_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc) +{ + int idx; + unsigned long msk = perf_event_get_msk(hwc->event_base); + unsigned long free_msk = (~cpuc->used_mask) & E90S_MAX_HWEVENTS_MASK; + free_msk &= msk; + idx = find_first_bit(&free_msk, E90S_MAX_HWEVENTS); + + if (idx >= E90S_MAX_HWEVENTS) + return -EAGAIN; + + set_bit(idx, &cpuc->used_mask); + return idx; +} + +static int sparc_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + perf_pmu_disable(event->pmu); + + /* If we don't have a space for the counter then finish early. */ + idx = e90s_get_event_idx(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then make + * sure it is disabled. + */ + event->hw.idx = idx; + cpuc->event[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + sparc_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static int validate_event(struct cpu_hw_events *hw_events, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct pmu *leader_pmu = event->group_leader->pmu; + + if (is_software_event(event)) + return 1; + + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) + return 1; + + return hw_events->config_base == hwc->config_base && + e90s_get_event_idx(hw_events, hwc) >= 0; +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_pmu; + + /* + * Initialise the fake PMU. We only need to populate the + * used_mask and config for the purposes of validation. + */ + fake_pmu.used_mask = 0; + fake_pmu.config_base = leader->hw.config_base; + + if (!validate_event(&fake_pmu, leader)) + return -EINVAL; + + for_each_sibling_event(sibling, leader) { + if (!validate_event(&fake_pmu, sibling)) + return -EINVAL; + } + + if (!validate_event(&fake_pmu, event)) + return -EINVAL; + + return 0; +} + +static int sparc_pmu_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct perf_event_map *pmap; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (attr->type) { + case PERF_TYPE_HARDWARE: + if (attr->config >= ARRAY_SIZE(e90s_perfmon_event_map)) + return -EINVAL; + pmap = e90s_event_map(attr->config); + break; + + case PERF_TYPE_HW_CACHE: + pmap = sparc_map_cache_event(attr->config); + if (IS_ERR(pmap)) + return PTR_ERR(pmap); + break; + + case PERF_TYPE_RAW: + pmap = NULL; + break; + + default: + return -ENOENT; + + } + + if (pmap) { + hwc->event_base = perf_event_encode(pmap); + } else { + /* + * User gives us "(encoding << 16) | pic_mask" for + * PERF_TYPE_RAW events. + */ + hwc->event_base = attr->config; + } + + if (!attr->exclude_user) + hwc->config_base |= E90S_PCR_USR; + if (!attr->exclude_kernel) + hwc->config_base |= E90S_PCR_SYS; + + if (event->group_leader != event) { + if (validate_group(event)) + return -EINVAL; + } + + if (!hwc->sample_period) { + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +static struct pmu pmu = { + .pmu_enable = sparc_pmu_enable, + .pmu_disable = sparc_pmu_disable, + .event_init = sparc_pmu_event_init, + .add = sparc_pmu_add, + .del = sparc_pmu_del, + .start = sparc_pmu_start, + .stop = sparc_pmu_stop, + .read = sparc_pmu_read, +}; + +int perf_event_nmi_handler(struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int i; + u64 pcr = pcr_read(); + + for (i = 0; i < E90S_MAX_HWEVENTS; i++) { + struct perf_sample_data data; + struct hw_perf_event *hwc; + struct perf_event *event = cpuc->event[i]; + if (!(pcr & (1ULL << (i + E90S_PCR_OVF_SHIFT)))) + continue; + if (!event) { + pr_err("perf_event_nmi_handler:" + " event[%d] == NULL\n", i); + continue; + } + hwc = &event->hw; + sparc_perf_event_update(event); + perf_sample_data_init(&data, 0, hwc->last_period); + if (!sparc_perf_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + sparc_pmu_stop(event, 0); + } + pcr = pcr_read(); + pcr_write(pcr & ~(pcr & E90S_PCR_OVF)); + + return NOTIFY_STOP; +} + +static int __init init_hw_perf_events(void) +{ + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + return 0; +} + +pure_initcall(init_hw_perf_events); + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long ksp, fp; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph = 0; +#endif + + stack_trace_flush(); + + perf_callchain_store(entry, regs->tpc); + + ksp = regs->u_regs[UREG_I6]; + fp = ksp + STACK_BIAS; + do { + struct sparc_stackf *sf; + struct pt_regs *regs; + unsigned long pc; + + if (!kstack_valid(current_thread_info(), fp)) + break; + + sf = (struct sparc_stackf *) fp; + regs = (struct pt_regs *) (sf + 1); + + if (kstack_is_trap_frame(current_thread_info(), regs)) { + if (user_mode(regs)) + break; + pc = regs->tpc; + fp = regs->u_regs[UREG_I6] + STACK_BIAS; + } else { + pc = sf->callers_pc; + fp = (unsigned long)sf->fp + STACK_BIAS; + } + perf_callchain_store(entry, pc); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + int index = current->curr_ret_stack; + if (current->ret_stack && index >= graph) { + pc = current->ret_stack[index - graph].ret; + perf_callchain_store(entry, pc); + graph++; + } + } +#endif + } while (entry->nr < entry->max_stack); +} + +static inline int +valid_user_frame(const void __user *fp, unsigned long size) +{ + /* addresses should be at least 4-byte aligned */ + if (((unsigned long) fp) & 3) + return 0; + + return (__range_not_ok(fp, size, TASK_SIZE) == 0); +} + +static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long ufp; + + ufp = regs->u_regs[UREG_FP] + STACK_BIAS; + do { + struct sparc_stackf __user *usf; + struct sparc_stackf sf; + unsigned long pc; + + usf = (struct sparc_stackf __user *)ufp; + if (!valid_user_frame(usf, sizeof(sf))) + break; + + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp + STACK_BIAS; + perf_callchain_store(entry, pc); + } while (entry->nr < entry->max_stack); +} + +static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long ufp; + + ufp = regs->u_regs[UREG_FP] & 0xffffffffUL; + do { + unsigned long pc; + + if (thread32_stack_is_64bit(ufp)) { + struct sparc_stackf __user *usf; + struct sparc_stackf sf; + + ufp += STACK_BIAS; + usf = (struct sparc_stackf __user *)ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc & 0xffffffff; + ufp = ((unsigned long) sf.fp) & 0xffffffff; + } else { + struct sparc_stackf32 __user *usf; + struct sparc_stackf32 sf; + usf = (struct sparc_stackf32 __user *)ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp; + } + perf_callchain_store(entry, pc); + } while (entry->nr < entry->max_stack); +} + +void +perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) +{ + u64 saved_fault_address = current_thread_info()->fault_address; + u8 saved_fault_code = get_thread_fault_code(); + mm_segment_t old_fs; + + perf_callchain_store(entry, regs->tpc); + + if (!current->mm) + return; + + old_fs = get_fs(); + set_fs(USER_DS); + + flushw_user(); + + pagefault_disable(); + + if (test_thread_flag(TIF_32BIT)) + perf_callchain_user_32(entry, regs); + else + perf_callchain_user_64(entry, regs); + + pagefault_enable(); + + set_fs(old_fs); + set_thread_fault_code(saved_fault_code); + current_thread_info()->fault_address = saved_fault_address; +} diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 4282116e28e7..2ad342a8dab2 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -61,6 +61,9 @@ /* Idle loop support on sparc64. */ void arch_cpu_idle(void) { +#ifdef CONFIG_E90S + local_irq_enable(); +#else if (tlb_type != hypervisor) { touch_nmi_watchdog(); local_irq_enable(); @@ -95,6 +98,7 @@ void arch_cpu_idle(void) : "=&r" (pstate) : "i" (PSTATE_IE)); } +#endif /*CONFIG_E90S*/ } #ifdef CONFIG_HOTPLUG_CPU @@ -198,8 +202,60 @@ void show_regs(struct pt_regs *regs) show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); } +#if defined(CONFIG_SMP) && defined(CONFIG_E90S) +DEFINE_PER_CPU(cpu_bt_buf_t, cpu_bt_buf); + +void dump_backtrace_smp(void) +{ + unsigned long fp; + int i; + struct reg_window *rw; + unsigned long thread_base; + cpu_bt_buf_t *bt_buf = this_cpu_ptr(&cpu_bt_buf); + unsigned long flags; + + /* Protect against xcall ipis which might lead to livelock on the lock */ + __asm__ __volatile__("rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (flags) + : "i" (PSTATE_IE)); + + flushw_all(); + __asm__ __volatile__("mov %%i6, %0" : "=r" (fp)); // get fp + fp += STACK_BIAS; + bt_buf->pid = current->pid; + bt_buf->t_pc = current_thread_info()->kregs->tpc; + bt_buf->t_npc = current_thread_info()->kregs->tnpc; + bt_buf->prio = current->prio; + bt_buf->need_resched = need_resched(); + + for (i = 0; i < 16; i++) { + bt_buf->comm[i] = current->comm[i]; + } + thread_base = (unsigned long) current_thread_info(); + for (i = 0; i < NUM_DUMP_FRAMES; i++) { + /* Bogus frame pointer? */ + if (fp < (thread_base + sizeof(struct thread_info)) || + fp >= (thread_base + THREAD_SIZE)) + break; + rw = (struct reg_window *)fp; + bt_buf->tpc[i] = *((long *)(fp + PT_V9_TPC)); + bt_buf->fp[i] = rw->ins[7]; + fp = rw->ins[6] + STACK_BIAS; + } + if (i < NUM_DUMP_FRAMES) { + bt_buf->fp[i] = 0; + } + + __asm__ __volatile__("wrpr %0, 0, %%pstate" + : : "r" (flags)); +} +#endif /*CONFIG_SMP && CONFIG_E90S*/ + union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; +#ifndef CONFIG_MCST static DEFINE_SPINLOCK(global_cpu_snapshot_lock); +#endif static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, int this_cpu) @@ -248,18 +304,32 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) udelay(1); } } +#ifdef CONFIG_MCST +static unsigned long backtrace_flag; +#endif void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { struct thread_info *tp = current_thread_info(); struct pt_regs *regs = get_irq_regs(); +#if !defined(CONFIG_MCST) unsigned long flags; +#endif int this_cpu, cpu; if (!regs) regs = tp->kregs; - +#ifdef CONFIG_MCST + if (test_and_set_bit(0, &backtrace_flag)) + /* + * If there is already a trigger_all_cpu_backtrace() in progress + * (backtrace_flag == 1), don't output double cpu dump infos. + */ + return; +#else spin_lock_irqsave(&global_cpu_snapshot_lock, flags); +#endif + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); this_cpu = raw_smp_processor_id(); @@ -302,8 +372,12 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) } memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); - +#ifdef CONFIG_MCST + clear_bit(0, &backtrace_flag); + smp_mb__after_clear_bit(); +#else spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); +#endif } #ifdef CONFIG_MAGIC_SYSRQ @@ -319,6 +393,7 @@ static struct sysrq_key_op sparc_globalreg_op = { .action_msg = "Show Global CPU Regs", }; +#ifndef CONFIG_E90S static void __global_pmu_self(int this_cpu) { struct global_pmu_snapshot *pp; @@ -393,13 +468,15 @@ static struct sysrq_key_op sparc_globalpmu_op = { .help_msg = "global-pmu(x)", .action_msg = "Show Global PMU Regs", }; +#endif /*CONFIG_E90S*/ static int __init sparc_sysrq_init(void) { int ret = register_sysrq_key('y', &sparc_globalreg_op); - +#ifndef CONFIG_E90S if (!ret) ret = register_sysrq_key('x', &sparc_globalpmu_op); +#endif /*CONFIG_E90S*/ return ret; } @@ -418,6 +495,12 @@ void exit_thread(struct task_struct *tsk) else t->utraps[0]--; } +#ifdef CONFIG_E90S + if (test_and_clear_thread_flag(TIF_PERFCTR)) { + memset(t->pcr_regs, 0, sizeof(t->pcr_regs)); + wr_pcr(0); + } +#endif /* CONFIG_E90S */ } void flush_thread(void) @@ -433,6 +516,12 @@ void flush_thread(void) /* Clear FPU register state. */ t->fpsaved[0] = 0; +#ifdef CONFIG_E90S + if (test_and_clear_thread_flag(TIF_PERFCTR)) { + memset(t->pcr_regs, 0, sizeof(t->pcr_regs)); + wr_pcr(0); + } +#endif /* CONFIG_E90S */ } /* It's a bit more tricky when 64-bit tasks are involved... */ @@ -582,6 +671,9 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags, unsigned long orig_i1 = regs->u_regs[UREG_I1]; long ret; +#ifdef CONFIG_MCST + synchronize_user_stack(); +#endif #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_32BIT)) { parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); @@ -592,7 +684,6 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags, parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; } - ret = do_fork(clone_flags, stack_start, stack_size, parent_tid_ptr, child_tid_ptr); @@ -682,6 +773,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { +#ifndef CONFIG_E90S if (adi_capable()) { register unsigned long tmp_mcdper; @@ -696,7 +788,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) else clear_thread_flag(TIF_MCDPER); } - +#endif *dst = *src; return 0; } @@ -798,3 +890,32 @@ unsigned long get_wchan(struct task_struct *task) out: return ret; } + +#ifdef CONFIG_MCST +unsigned long mcst_copy_to_user(char __user *to, void *from, + unsigned long size) +{ + unsigned long sz; + unsigned long a; + + if (unlikely(size == 0)) + return 0; + sz = PAGE_SIZE - ((unsigned long)to) % PAGE_SIZE; + sz = min(size, sz); + do { + if (unlikely(__put_user(0, to) != 0)) { + return size; + } + a = _raw_copy_to_user(to, from, sz); + if (unlikely(a)) { + return size - sz + a; + } + to += sz; + from += sz; + size -= sz; + sz = min(size, PAGE_SIZE); + } while (size > 0); + return 0; +} +EXPORT_SYMBOL(mcst_copy_to_user); +#endif /* CONFIG_MCST */ diff --git a/arch/sparc/kernel/procpfreg_e90s.c b/arch/sparc/kernel/procpfreg_e90s.c new file mode 100644 index 000000000000..5ba4a647c0ed --- /dev/null +++ b/arch/sparc/kernel/procpfreg_e90s.c @@ -0,0 +1,246 @@ +/* + * arch/sparc/kernel/procpfreg_e90s.c + * + * This file contains implementation of functions to read and write PFREG + * registers through proc fs. + * + * Copyright (C) 2010-2015 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#include +#include +#include +#include +#include + + +#define PFREGREGS_FILENAME "pfregs" +#define PFREGREAD_FILENAME "pfread" +#define PFREGWRITE_FILENAME "pfwrite" + +#define PFREGREAD_STR_MAX_SIZE 16 +#define PFREGWRITE_STR_MAX_SIZE 32 + +static DEFINE_RAW_SPINLOCK(pfregreg_lock); +static int pfregreg_offset = 0; + + +/* + * PFREG read + */ + +static int pfregregs_seq_show(struct seq_file *s, void *v) +{ + int node = (int)(*((loff_t *)v)); + int offset; + unsigned long val; + unsigned long flags; + + raw_spin_lock_irqsave(&pfregreg_lock, flags); + offset = pfregreg_offset; + raw_spin_unlock_irqrestore(&pfregreg_lock, flags); + + if (offset > NODE_PFREG_AREA_SIZE) + return -EINVAL; + + val = __raw_readq((void *)NODE_PFREG_AREA_BASE(node) + offset); + + seq_printf(s, "node: %d reg (0x%X): 0x%lX\n", node, offset, val); + + return 0; +} + +static void *pfregregs_seq_start(struct seq_file *s, loff_t *pos) +{ + if (!node_online(*pos)) + *pos = next_online_node(*pos); + if (*pos == MAX_NUMNODES) + return 0; + return (void *)pos; +} + +static void *pfregregs_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + *pos = next_online_node(*pos); + if (*pos == MAX_NUMNODES) + return 0; + return (void *)pos; +} + +static void pfregregs_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations pfregregs_seq_ops = { + .start = pfregregs_seq_start, + .next = pfregregs_seq_next, + .stop = pfregregs_seq_stop, + .show = pfregregs_seq_show +}; + +static int pfregregs_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &pfregregs_seq_ops); +} + +static const struct file_operations pfregregs_proc_fops = { + .owner = THIS_MODULE, + .open = pfregregs_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + + +/* + * PFREG adjust read + */ + +static inline void pfregread_write_reg(char *str) +{ + int offset, res; + unsigned long flags; + + res = sscanf(str, "0x%X\n", &offset); + if (res != 1) { + pr_err("Failed to save PFREG register to read (invalid string).\n"); + return; + } + + raw_spin_lock_irqsave(&pfregreg_lock, flags); + pfregreg_offset = offset; + raw_spin_unlock_irqrestore(&pfregreg_lock, flags); +} + +static ssize_t pfregread_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char pfregread_buffer[PFREGREAD_STR_MAX_SIZE]; + long ret; + + memset(pfregread_buffer, 0, sizeof(char) * PFREGREAD_STR_MAX_SIZE); + + if (count + 1 > PFREGREAD_STR_MAX_SIZE) { + pr_err("Failed to save PFREG register to read (too long string).\n"); + ret = -EINVAL; + } else if (copy_from_user(pfregread_buffer, buffer, + min(sizeof(pfregread_buffer), count))) { + pr_err("Failed to save PFREG register to read (kernel error).\n"); + ret = -EFAULT; + } else { + pfregread_write_reg(pfregread_buffer); + ret = count; + } + + return ret; +} + +static int pfregread_proc_show(struct seq_file *m, void *v) +{ + return 0; +} + +static int pfregread_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, pfregread_proc_show, NULL); +} + +static const struct file_operations pfregread_proc_fops = { + .owner = THIS_MODULE, + .open = pfregread_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pfregread_write, + .release = seq_release +}; + + +/* + * PFREG write + */ + +static inline void pfregwrite_write_reg(char *str) +{ + int node, reg, res; + unsigned long val; + + if (!capable(CAP_SYS_ADMIN)) { + pr_err("Failed to write PFREG register (no permissions).\n"); + return; + } + + res = sscanf(str, "%d 0x%X 0x%lX\n", &node, ®, &val); + if (res != 3) { + pr_err("Failed to write PFREG register (invalid string).\n"); + return; + } else if (!node_online(node)) { + pr_err("Failed to write PFREG register (invalid node number).\n"); + return; + } else if (reg > NODE_PFREG_AREA_SIZE) { + pr_err("Failed to write PFREG register (invalid register).\n"); + return; + } + __raw_writeq(val, (void *)NODE_PFREG_AREA_BASE(node) + reg); + +} + +static ssize_t pfregwrite_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char pfregwrite_buffer[PFREGWRITE_STR_MAX_SIZE]; + long ret; + + memset(pfregwrite_buffer, 0, sizeof(char) * PFREGWRITE_STR_MAX_SIZE); + + if (count + 1 > PFREGWRITE_STR_MAX_SIZE) { + pr_err("Failed to write PFREG register (too long string).\n"); + ret = -EINVAL; + } else if (copy_from_user(pfregwrite_buffer, buffer, + min(sizeof(pfregwrite_buffer), count))) { + pr_err("Failed to write PFREG register (kernel error).\n"); + ret = -EFAULT; + } else { + pfregwrite_write_reg(pfregwrite_buffer); + ret = count; + } + + return ret; +} + +static int pfregwrite_proc_show(struct seq_file *m, void *v) +{ + return 0; +} + +static int pfregwrite_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, pfregwrite_proc_show, NULL); +} + +static const struct file_operations pfregwrite_proc_fops = { + .owner = THIS_MODULE, + .open = pfregwrite_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pfregwrite_write, + .release = seq_release +}; + + +/* + * Init + */ + +static int __init init_procpfreg(void) +{ + if (e90s_get_cpu_type() != E90S_CPU_R2000) + return 0; + + proc_create(PFREGREGS_FILENAME, S_IRUGO, NULL, &pfregregs_proc_fops); + proc_create(PFREGREAD_FILENAME, S_IRUGO, NULL, &pfregread_proc_fops); + proc_create(PFREGWRITE_FILENAME, S_IRUGO, NULL, &pfregwrite_proc_fops); + + return 0; +} + +module_init(init_procpfreg); diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 3f5930bfab06..59b54d249e5d 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -972,7 +972,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; case PTRACE_READTEXT: +#ifndef CONFIG_MCST /* bug 135261 */ case PTRACE_READDATA: +#endif ret = ptrace_readdata(child, addr, (char __user *)addr2, data); if (ret == data) @@ -1069,7 +1071,9 @@ long arch_ptrace(struct task_struct *child, long request, break; case PTRACE_READTEXT: +#ifndef CONFIG_MCST /* bug 135261 */ case PTRACE_READDATA: +#endif ret = ptrace_readdata(child, addr, addr2p, data); if (ret == data) ret = 0; diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index c5fd4b450d9b..1ccce6002daf 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -110,6 +110,10 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 .align 64 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall rtrap_irq: +#ifdef CONFIG_E90S + mov APIC_TASKPRI, %l0 + stwa %g0, [%l0] ASI_LAPIC +#endif rtrap: /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 @@ -200,11 +204,13 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 /* Normal globals are restored, go to trap globals. */ 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate nop +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate SET_GL(1) .previous +#endif /*CONFIG_SPARC64_SUN4V*/ mov %l2, %g6 @@ -269,7 +275,13 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 .previous ldx [%g6 + TI_FLAGS], %g3 +#ifndef CONFIG_MCST + /* This is unnecessary (ASI_AIUS is in %asi already) + * and may cause page fault deadlock in signal handler + * after user_rtt_fill_fixup_common procedure. + */ wr %g0, ASI_AIUP, %asi +#endif rdpr %cwp, %g1 andcc %g3, _TIF_32BIT, %g0 sub %g1, 1, %g1 @@ -315,15 +327,42 @@ to_kernel: brnz %l5, kern_fpucheck ldx [%g6 + TI_FLAGS], %l5 andcc %l5, _TIF_NEED_RESCHED, %g0 +#ifdef CONFIG_PREEMPT_LAZY + bne,pn %xcc, do_preempt + nop + ldsw [%g6 + TI_LAZY_COUNT], %l5 + brnz %l5, kern_fpucheck + ldx [%g6 + TI_FLAGS], %l5 + set _TIF_NEED_RESCHED_LAZY, %g1 + andcc %l5, %g1, %g0 + be,pt %xcc, kern_fpucheck +do_preempt: +#else be,pt %xcc, kern_fpucheck nop +#endif cmp %l4, 0 bne,pn %xcc, kern_fpucheck +#ifdef CONFIG_MCST_RT + rdpr %pil, %l6 + wrpr %g0, PIL_NORMAL_MAX, %pil + ldx [%g6 + TI_FLAGS], %l5 + andcc %l5, _TIF_NEED_RESCHED, %g0 + be,pt %xcc, 1f + nop + call preempt_schedule_irq + nop + wrpr %g0, %l6, %pil + ba,pt %xcc, rtrap + nop +1: wrpr %g0, %l6, %pil +#else nop call preempt_schedule_irq nop ba,pt %xcc, rtrap #endif +#endif kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 brz,pt %l5, rt_continue srl %l5, 1, %o0 diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index fd2182a5c32d..600645cfc68b 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -5,7 +5,6 @@ * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ - #include #include #include @@ -60,6 +59,17 @@ #include #endif +#ifdef CONFIG_E90S +#include +#include +#include +#include +#include +#include +#include +#include +#endif + #include "entry.h" #include "kernel.h" @@ -69,6 +79,7 @@ DEFINE_SPINLOCK(ns87303_lock); EXPORT_SYMBOL(ns87303_lock); +#ifndef CONFIG_E90S struct screen_info screen_info = { 0, 0, /* orig-x, orig-y */ 0, /* unused */ @@ -80,15 +91,25 @@ struct screen_info screen_info = { 0, /* orig-video-isVGA */ 16 /* orig-video-points */ }; +#endif +/* Exported for mm/init.c:paging_init. */ +unsigned long cmdline_memory_size = 0; + + +/* Global for RDMA drivers*/ +#ifdef CONFIG_E90S +int rdma_present = 0; +EXPORT_SYMBOL(rdma_present); +#endif + +#if defined CONFIG_OF && !defined CONFIG_E90S static void prom_console_write(struct console *con, const char *s, unsigned int n) { prom_write(s, n); } -/* Exported for mm/init.c:paging_init. */ -unsigned long cmdline_memory_size = 0; static struct console prom_early_console = { .name = "earlyprom", @@ -96,6 +117,7 @@ static struct console prom_early_console = { .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, .index = -1, }; +#endif /*CONFIG_OF*/ /* * Process kernel command line switches that are specific to the @@ -111,9 +133,11 @@ static void __init process_switch(char c) prom_printf("boot_flags_init: Halt!\n"); prom_halt(); break; +#if defined CONFIG_OF && !defined CONFIG_E90S case 'p': prom_early_console.flags &= ~CON_BOOT; break; +#endif /*CONFIG_OF*/ case 'P': /* Force UltraSPARC-III P-Cache on. */ if (tlb_type != cheetah) { @@ -168,6 +192,26 @@ char reboot_command[COMMAND_LINE_SIZE]; static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; +#ifdef CONFIG_E90S +static void __init per_cpu_patch(void) +{ + struct cpuid_patch_entry *p; + if (!cpu_has_epic()) + return; + + for (p = &__cpuid_patch; p < &__cpuid_patch_end; p++) { + int i; + unsigned *insns = p->r2000p; + unsigned *addr = (unsigned *)(long)p->addr; + for (i = 0; i < ARRAY_SIZE(p->r2000p); i++) { + addr[i] = insns[i]; + wmb(); + __asm__ __volatile__( + "flush %0" : : "r" (addr + i)); + } + } +} +#else static void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; @@ -188,7 +232,6 @@ static void __init per_cpu_patch(void) while (p < &__cpuid_patch_end) { unsigned long addr = p->addr; unsigned int *insns; - switch (tlb_type) { case spitfire: insns = &p->starfire[0]; @@ -227,6 +270,7 @@ static void __init per_cpu_patch(void) p++; } } +#endif void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start, struct sun4v_1insn_patch_entry *end) @@ -278,6 +322,7 @@ void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, } } +#ifndef CONFIG_E90S static void __init sun4v_patch(void) { extern void sun4v_hvapi_init(void); @@ -311,6 +356,7 @@ static void __init sun4v_patch(void) sun4v_hvapi_init(); } +#endif static void __init popc_patch(void) { @@ -365,6 +411,20 @@ static void __init pause_patch(void) } } +#ifdef CONFIG_E90S +void __init start_early_boot(void) +{ + if (!cpu_has_epic()) { + /* use APIC_LVT0 to store cpuid for __GET_CPUID() */ + u32 v = apic_read(APIC_LVT0); + v &= ~APIC_VECTOR_MASK; + v |= smp_processor_id(); + apic_write(APIC_LVT0, v); + } + per_cpu_patch(); + start_kernel(); +} +#else /*CONFIG_E90S*/ void __init start_early_boot(void) { int cpu; @@ -386,6 +446,7 @@ void __init start_early_boot(void) prom_init_report(); start_kernel(); } +#endif /*CONFIG_E90S*/ /* On Ultra, we support all of the v8 capabilities. */ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | @@ -415,6 +476,9 @@ void cpucap_info(struct seq_file *m) { unsigned long caps = sparc64_elf_hwcap; int i, printed = 0; +#ifdef CONFIG_E90S + caps &= ~(AV_SPARC_VIS | AV_SPARC_VIS2); +#endif seq_puts(m, "cpucaps\t\t: "); for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { @@ -438,6 +502,12 @@ void cpucap_info(struct seq_file *m) } } } +#ifdef CONFIG_E90S + if ((e90s_get_cpu_type() == E90S_CPU_R2000) && + ((1 << 4) & sic_read_node_nbsr_reg(0, NBSR_NODE_CFG2))) { + seq_printf(m, ",pf"); /* probe-filter is on */ + } +#endif /*CONFIG_E90S*/ seq_putc(m, '\n'); } @@ -482,6 +552,7 @@ static void __init report_hwcaps(unsigned long caps) printk(KERN_CONT "]\n"); } +#if defined CONFIG_OF && !defined CONFIG_E90S static unsigned long __init mdesc_cpu_hwcap_list(void) { struct mdesc_handle *hp; @@ -527,6 +598,12 @@ out: mdesc_release(hp); return caps; } +#else /* CONFIG_OF */ +static unsigned long __init mdesc_cpu_hwcap_list(void) +{ + return 0; +} +#endif /* CONFIG_OF */ /* This yields a mask that user programs can use to figure out what * instruction set this cpu supports. @@ -615,6 +692,30 @@ static void __init init_sparc64_elf_hwcap(void) pause_patch(); } +#if defined CONFIG_OF && !defined CONFIG_E90S +static inline void register_prom_console(void) +{ +#ifdef CONFIG_EARLY_PRINTK + early_console = &prom_early_console; +#endif + register_console(&prom_early_console); +} +#endif /*CONFIG_OF*/ + +#ifdef CONFIG_E90S +static void __init e90s_late_init(void) +{ + if (HAS_MACHINE_E90S_SIC) { + int ret = e90s_sic_init(); + if (ret != 0) { + panic("e90s_late_time_init() could not init access " + "to NBSR registers, error %d\n", ret); + } + } + e90s_late_time_init(); +} +#endif /* CONFIG_E90S */ + void __init alloc_irqstack_bootmem(void) { unsigned int i, node; @@ -646,8 +747,13 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_EARLYFB if (btext_find_display()) #endif - register_console(&prom_early_console); +#if defined CONFIG_OF && !defined CONFIG_E90S + register_prom_console(); +#endif /*CONFIG_OF*/ +#ifdef CONFIG_SERIAL_PRINTK + setup_serial_dump_console(&bootblock->info); +#endif if (tlb_type == hypervisor) pr_info("ARCH: SUN4V\n"); else @@ -657,7 +763,9 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif +#ifndef CONFIG_E90S idprom_init(); +#endif if (!root_flags) root_mountflags &= ~MS_RDONLY; @@ -670,7 +778,7 @@ void __init setup_arch(char **cmdline_p) task_thread_info(&init_task)->kregs = &fake_swapper_regs; -#ifdef CONFIG_IP_PNP +#if defined(CONFIG_IP_PNP) && defined(CONFIG_OF) && !defined(CONFIG_E90S) if (!ic_set_manually) { phandle chosen = prom_finddevice("/chosen"); u32 cl, sv, gw; @@ -693,19 +801,60 @@ void __init setup_arch(char **cmdline_p) /* Get boot processor trap_block[] setup. */ init_cur_cpu_trap(current_thread_info()); +#ifdef CONFIG_E90S + if ((e90s_get_cpu_type() == E90S_CPU_R2000)) { + extern int max_iolinks; + max_iolinks += MAX_NUMNODES; // + RDMA IOAPICS + printk("max_iolinks corrected = %d\n", max_iolinks); + } +# ifdef CONFIG_NET + { + extern int e1000; + e1000 = 1; + } +# endif + l_setup_arch(); + /* + * Find (but now set) boot-time smp configuration. + * Like in i386 arch. used MP Floating Pointer Structure. + */ + find_smp_config(&bootblock->info); + /* Set entries of MP Configuration tables(but now one processor system). */ + get_smp_config(); + + init_pic_mappings(); + + probe_nr_irqs_gsi(); + + late_time_init = e90s_late_init; + +#endif + paging_init(); init_sparc64_elf_hwcap(); +#ifndef CONFIG_E90S smp_fill_in_cpu_possible_map(); +#endif + /* * Once the OF device tree and MDESC have been setup and nr_cpus has * been parsed, we know the list of possible cpus. Therefore we can * allocate the IRQ stacks. */ alloc_irqstack_bootmem(); + +#ifdef CONFIG_E90S +#ifdef CONFIG_OF + device_tree_init(); +#endif + /* Must be called after paging_init() & device_tree_init() */ + l_setup_vga(); +#endif } extern int stop_a_enabled; +#ifndef CONFIG_E90S void sun_do_break(void) { if (!stop_a_enabled) @@ -717,6 +866,7 @@ void sun_do_break(void) prom_cmdline(); } EXPORT_SYMBOL(sun_do_break); +#endif /*CONFIG_E90S*/ int stop_a_enabled = 1; EXPORT_SYMBOL(stop_a_enabled); diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 69ae814b7e90..23c6aeee0b4a 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -51,6 +51,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) int err; synchronize_user_stack(); + if (get_thread_wsaved() || (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || (!__access_ok(ucp, sizeof(*ucp)))) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index aa81c25b44cf..5475bab1cb60 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -179,7 +179,11 @@ static inline long get_delta (long *rt, long *master) for (i = 0; i < NUM_ITERS; i++) { t0 = tick_ops->get_tick(); go[MASTER] = 1; +#ifndef CONFIG_RMO membar_safe("#StoreLoad"); +#else /* CONFIG_RMO */ + membar_storeload(); +#endif /* CONFIG_RMO */ while (!(tm = go[SLAVE])) rmb(); go[SLAVE] = 0; @@ -271,7 +275,11 @@ static void smp_synchronize_one_tick(int cpu) /* now let the client proceed into his loop */ go[MASTER] = 0; +#ifndef CONFIG_RMO membar_safe("#StoreLoad"); +#else /* CONFIG_RMO */ + membar_storeload(); +#endif /* CONFIG_RMO */ raw_spin_lock_irqsave(&itc_sync_lock, flags); { @@ -281,7 +289,11 @@ static void smp_synchronize_one_tick(int cpu) go[MASTER] = 0; wmb(); go[SLAVE] = tick_ops->get_tick(); +#ifndef CONFIG_RMO membar_safe("#StoreLoad"); +#else /* CONFIG_RMO */ + membar_storeload(); +#endif /* CONFIG_RMO */ } } raw_spin_unlock_irqrestore(&itc_sync_lock, flags); @@ -1142,6 +1154,9 @@ void smp_capture(void) smp_processor_id()); #endif penguins_are_doing_time = 1; +#ifdef CONFIG_RMO + membar_storestore_loadstore(); +#endif /* CONFIG_RMO */ atomic_inc(&smp_capture_registry); smp_cross_call(&xcall_capture, 0, 0, 0); while (atomic_read(&smp_capture_registry) != ncpus) @@ -1161,7 +1176,11 @@ void smp_release(void) smp_processor_id()); #endif penguins_are_doing_time = 0; +#ifndef CONFIG_RMO membar_safe("#StoreLoad"); +#else /* CONFIG_RMO */ + membar_storeload_storestore(); +#endif /* CONFIG_RMO */ atomic_dec(&smp_capture_registry); } } @@ -1180,7 +1199,11 @@ void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) __asm__ __volatile__("flushw"); prom_world(1); atomic_inc(&smp_capture_registry); +#ifndef CONFIG_RMO membar_safe("#StoreLoad"); +#else /* CONFIG_RMO */ + membar_storeload_storestore(); +#endif /* CONFIG_RMO */ while (penguins_are_doing_time) rmb(); atomic_dec(&smp_capture_registry); @@ -1444,48 +1467,10 @@ void smp_send_reschedule(int cpu) if (cpu == smp_processor_id()) { WARN_ON_ONCE(preemptible()); set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); - return; + } else { + xcall_deliver((u64) &xcall_receive_signal, + 0, 0, cpumask_of(cpu)); } - - /* Use cpu poke to resume idle cpu if supported. */ - if (cpu_poke && idle_cpu(cpu)) { - unsigned long ret; - - ret = send_cpu_poke(cpu); - if (ret == HV_EOK) - return; - } - - /* Use IPI in following cases: - * - cpu poke not supported - * - cpu not idle - * - send_cpu_poke() returns with error - */ - send_cpu_ipi(cpu); -} - -void smp_init_cpu_poke(void) -{ - unsigned long major; - unsigned long minor; - int ret; - - if (tlb_type != hypervisor) - return; - - ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); - if (ret) { - pr_debug("HV_GRP_CORE is not registered\n"); - return; - } - - if (major == 1 && minor >= 6) { - /* CPU POKE is registered. */ - cpu_poke = true; - return; - } - - pr_debug("CPU_POKE not supported\n"); } void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) diff --git a/arch/sparc/kernel/smp_e90s.c b/arch/sparc/kernel/smp_e90s.c new file mode 100644 index 000000000000..e327e2a6c954 --- /dev/null +++ b/arch/sparc/kernel/smp_e90s.c @@ -0,0 +1,868 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_SMP_BOOT_MODE 0 /* SMP Booting process */ +#if DEBUG_SMP_BOOT_MODE +# define DebugSMPB(...) printk(__VA_ARGS__) +#else +# define DebugSMPB(...) +#endif + +int sparc64_multi_core __read_mostly; + +cpumask_t cpu_core_map[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS - 1] = CPU_MASK_NONE +}; +EXPORT_SYMBOL(cpu_core_map); + +long long do_sync_cpu_clocks = 1; /* 0 - watch only; 1 - do_sync(modify); */ + +static void smp_store_cpu_info(int cpu) +{ + cpu_data(cpu).clock_tick = loops_per_jiffy * HZ; + cpu_data(cpu).dcache_size = E90S_DCACHE_SIZE; + cpu_data(cpu).dcache_line_size = E90S_DCACHE_LINE_SIZE; + cpu_data(cpu).icache_size = E90S_ICACHE_SIZE; + cpu_data(cpu).icache_line_size = E90S_ICACHE_LINE_SIZE; + cpu_data(cpu).ecache_size = E90S_ECACHE_SIZE; + cpu_data(cpu).ecache_line_size = E90S_ECACHE_LINE_SIZE; + cpu_data(cpu).core_id = cpu; + cpu_data(cpu).proc_id = cpu_to_node(cpu); + + printk("CPU[%d]: Caches " + "D[sz(%d):line_sz(%d)] " + "I[sz(%d):line_sz(%d)] " + "E[sz(%d):line_sz(%d)]\n", + cpu, + cpu_data(cpu).dcache_size, cpu_data(cpu).dcache_line_size, + cpu_data(cpu).icache_size, cpu_data(cpu).icache_line_size, + cpu_data(cpu).ecache_size, cpu_data(cpu).ecache_line_size); + +} + +void __init smp_fill_in_sib_core_maps(void) +{ + unsigned int i; + + for_each_present_cpu(i) { + unsigned int j; + ncpus_probed++; + cpumask_clear(&cpu_core_map[i]); + + for_each_present_cpu(j) { + if (cpu_data(i).proc_id == cpu_data(j).proc_id) + cpumask_set_cpu(j, &cpu_core_map[i]); + } + } +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + DebugSMPB("smp_prepare_cpus entered, max_cpus = %d\n", max_cpus); +} + +void __init smp_prepare_boot_cpu(void) +{ + sparc64_multi_core = 1; + smp_store_cpu_info(smp_processor_id()); +} + +void smp_info(struct seq_file *m) +{ + int i; + + seq_printf(m, "State:\n"); + for_each_online_cpu(i) + seq_printf(m, "CPU%d:\t\tonline\n", i); +} + +void smp_bogo(struct seq_file *m) +{ + int i; + unsigned long ctick; + for_each_online_cpu(i) { + switch (e90s_get_cpu_type()) { + case E90S_CPU_R2000: + ctick = s2_get_freq_mult(i) * + cpu_data(i).clock_tick; + break; + default: + ctick = cpu_data(i).clock_tick; + } + seq_printf(m, + "Cpu%dClkTck\t: %016lx\n" + "Cpu%d MHz\t: %lu.%02lu\n", + i, ctick, + i, ctick / 1000000, + ctick % 1000000); + } +} + +struct thread_info *cpu_new_thread = NULL; +static volatile unsigned long callin_flag = 0; + +static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) +{ + int timeout, ret, node; + int reg; + callin_flag = 0; + cpu_new_thread = task_thread_info(idle); + + cpu = cpu_physical_id(cpu); + node = cpu / e90s_max_nr_node_cpus(); + cpu %= e90s_max_nr_node_cpus(); + + reg = cpu < E90S_R1000_MAX_NR_NODE_CPUS ? + NBSR_NODE_CFG : NBSR_NODE_CFG2; + cpu %= E90S_R1000_MAX_NR_NODE_CPUS; + + nbsr_writel(nbsr_readl(reg, node) | (1 << cpu), reg, node); + + for (timeout = 0; timeout < 50000; timeout++) { + if (callin_flag) + break; + udelay(100); + } + if (callin_flag) { + ret = 0; + } else { + panic("Processor %d is stuck.\n", cpu); + ret = -ENODEV; + } + cpu_new_thread = NULL; + DebugSMPB("smp_boot_one_cpu finished for cpu%d\n", cpu); + + return ret; +} + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + int ret = smp_boot_one_cpu(cpu, tidle); + + if (!ret) { + while (!cpu_online(cpu)) + mb(); + if (!cpu_online(cpu)) { + ret = -ENODEV; + } + } + return ret; +} + +void smp_callin(void) +{ + int cpuid = smp_processor_id(); + + if (!cpu_has_epic()) { + /* use APIC_LVT0 to store cpuid for __GET_CPUID() */ + u32 v = apic_read(APIC_LVT0); + v &= ~APIC_VECTOR_MASK; + v |= cpuid; + apic_write(APIC_LVT0, v); + } + DebugSMPB("smp calling entered on CPU %d\n", cpuid); + + init_cur_cpu_trap(current_thread_info()); + + __local_per_cpu_offset = __per_cpu_offset(cpuid); + + __flush_tlb_all(); + + calibrate_delay(); + smp_store_cpu_info(cpuid); + + /* Let the user get at STICK too. */ + __asm__ __volatile__(" rd %%stick, %%g2\n" + " andn %%g2, %0, %%g2\n" + " wr %%g2, 0, %%stick" + : /* no outputs */ + : "r" (TICK_PRIV_BIT) + : "g1", "g2"); + /* Let the user get at TICK too. + * If you will set TICK_PRIV_BIT add + * 'return ret & ~TICK_PRIV_BIT' in get_cycles() */ + __asm__ __volatile__(" rd %%tick, %%g2\n" + " andn %%g2, %0, %%g2\n" + " wrpr %%g2, 0, %%tick" + : /* no outputs */ + : "r" (TICK_PRIV_BIT) + : "g1", "g2"); + + if (cpu_has_epic()) { + setup_cepic(); + } else { + if (apic->smp_callin_clear_local_apic) + apic->smp_callin_clear_local_apic(); + setup_local_APIC(); + end_local_APIC_setup(); + } + + setup_secondary_pic_clock(); + flush_locked_tte(); + + callin_flag = 1; + __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); + + /* Clear this or we will die instantly when we + * schedule back to this idler... + */ + current_thread_info()->new_child = 0; + + /* Attach to the address space of init_task. */ + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + + /* inform the notifiers about the new cpu */ + notify_cpu_starting(cpuid); + + __setup_vector_irq(cpuid); + set_cpu_online(cpuid, true); + + /* idle thread is expected to have preempt disabled */ + preempt_disable(); + + local_irq_enable(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +void cpu_panic(void) +{ + printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); + panic("SMP bolixed\n"); +} + +extern unsigned long xcall_flush_tlb_page; +extern unsigned long xcall_flush_tlb_mm; +extern unsigned long xcall_flush_tlb_kernel_range; +extern unsigned long xcall_fetch_glob_regs; +extern unsigned long xcall_receive_signal; +extern unsigned long xcall_new_mmu_context_version; +#ifdef CONFIG_KGDB +extern unsigned long xcall_kgdb_capture; +#endif + +static DEFINE_RAW_SPINLOCK(tlb_call_lock); + +struct tlb_call_data_struct { + unsigned long *func; + u64 data0; + u64 data1; + u64 data2; +} tlb_call_data; +atomic_t tlb_call_finished; + +static int smp_tlb_call_function(struct tlb_call_data_struct *info, + const cpumask_t *cpu_mask) +{ + int cpus = 0, this_cpu, i, print_once = 1; + cpumask_t mask = *cpu_mask; + int vec = cpu_has_epic() ? + EPIC_INVALIDATE_TLB_VECTOR : INVALIDATE_TLB_VECTOR; + + /* We don't use raw_spin_lock_irqsave here on 2.6.14 too */ + raw_spin_lock(&tlb_call_lock); + memcpy(&tlb_call_data, info, sizeof(tlb_call_data)); + atomic_set(&tlb_call_finished, 0); + this_cpu = smp_processor_id(); + cpumask_clear_cpu(this_cpu, &mask); + cpus = cpumask_weight(&mask); + + if (!cpus) { + raw_spin_unlock(&tlb_call_lock); + return 0; + } + + /* Send a message to all other CPUs and wait for them to respond */ + apic->send_IPI_mask((const struct cpumask *)&mask, vec); + + while (1) { + for (i = 0; i < loops_per_jiffy * HZ; i++) { + if (atomic_read(&tlb_call_finished) == cpus) { + goto out; + } + cpu_relax(); + } + if (print_once) { + pr_err("smp_tlb_call_function lock up on CPU#%d\n", + this_cpu); + dump_stack(); + print_once = 0; + } + } +out: + raw_spin_unlock(&tlb_call_lock); + return 0; +} + +/* This tick register synchronization scheme is taken entirely from + * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. + * + * The only change I've made is to rework it so that the master + * initiates the synchonization instead of the slave. -DaveM + */ + +#define MASTER 0 +#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) + +#define NUM_ROUNDS 64 /* magic value */ +#define NUM_ITERS 5 /* likewise */ + +static DEFINE_RAW_SPINLOCK(itc_sync_lock); +unsigned long go_cycl_sync[SLAVE + 1]; +long long delta_ticks[NR_CPUS]; + +#define DEBUG_TICK_SYNC 0 + +static unsigned long tick_add_tick(unsigned long adj) +{ + unsigned long new_tick; + + __asm__ __volatile__("rd %%stick, %0\n\t" + "add %0, %1, %0\n\t" + "wr %0, 0, %%stick\n\t" + : "=&r"(new_tick) + : "r"(adj)); + return new_tick; +} + +static inline long get_delta(long *rt, long *master) +{ + unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; + unsigned long tcenter, t0, t1, tm; + int i; + + for (i = 0; i < NUM_ITERS; i++) { + t0 = get_cycles(); + go_cycl_sync[MASTER] = 1; + membar_safe("#StoreLoad"); + while (!(tm = go_cycl_sync[SLAVE])) + rmb(); + go_cycl_sync[SLAVE] = 0; + wmb(); + t1 = get_cycles(); + + if (t1 - t0 < best_t1 - best_t0) + best_t0 = t0, best_t1 = t1, best_tm = tm; + } + + *rt = best_t1 - best_t0; + *master = best_tm - best_t0; + + /* average best_t0 and best_t1 without overflow: */ + tcenter = (best_t0 / 2 + best_t1 / 2); + if (best_t0 % 2 + best_t1 % 2 == 2) + tcenter++; + return tcenter - best_tm; +} + +void smp_synchronize_tick_client(void) +{ + long i, delta, adj, adjust_latency = 0, done = 0; + unsigned long flags, rt, master_time_stamp; + int do_sync = do_sync_cpu_clocks; +#if DEBUG_TICK_SYNC + struct { + long rt; /* roundtrip time */ + long master; /* master's timestamp */ + long diff; /* difference between midpoint and master's timestamp */ + long lat; /* estimate of itc adjustment latency */ + } t[NUM_ROUNDS]; +#endif + + go_cycl_sync[MASTER] = 1; + wmb(); /* */ + + while (go_cycl_sync[MASTER]) + rmb(); + + local_irq_save(flags); + { + for (i = 0; i < NUM_ROUNDS; i++) { + delta = get_delta(&rt, &master_time_stamp); + if (delta == 0) { + done = 1; /* let's lock on to this... */ + } + + if (!done) { + if (i > 0) { + adjust_latency += -delta; + adj = -delta + adjust_latency / 4; + } else + adj = -delta; + if (do_sync) + tick_add_tick(adj); + } +#if DEBUG_TICK_SYNC + t[i].rt = rt; + t[i].master = master_time_stamp; + t[i].diff = delta; + t[i].lat = adjust_latency / 4; +#endif + } + if (do_sync) { + /* This %tick register synchronization step avoids + * memory reading which may have jitter. + * Run master which will send IPI CYCLES_SYNC + * when master have 0 in his %tick low order bits. */ + go_cycl_sync[MASTER] = 1; + /* wait for IPI CYCLES_SYNC which will + * set slave %tick register low order bits to 0 + * (with round up) */ + while (go_cycl_sync[MASTER]) + rmb(); /* */ + } + } + local_irq_restore(flags); + +#if DEBUG_TICK_SYNC + for (i = 0; i < NUM_ROUNDS; i++) + printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", + t[i].rt, t[i].master, t[i].diff, t[i].lat); +#endif + + if (!do_sync) { + delta_ticks[smp_processor_id()] = delta; + return; + } + printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " + "(last diff %ld cycles, maxerr %lu cycles)\n", + smp_processor_id(), delta, rt); +} + +void smp_synchronize_one_tick(int cpu) +{ + unsigned long flags; + int i; + int vec = cpu_has_epic() ? EPIC_CYCLES_SYNC_VECTOR + : CYCLES_SYNC_VECTOR; + + go_cycl_sync[MASTER] = 0; + + smp_call_function_many(get_cpu_mask(cpu), + (smp_call_func_t)smp_synchronize_tick_client, NULL, 0); + + /* wait for client to be ready */ + while (!go_cycl_sync[MASTER]) + rmb(); + + /* now let the client proceed into his loop */ + go_cycl_sync[MASTER] = 0; + membar_safe("#StoreLoad"); + + raw_spin_lock_irqsave(&itc_sync_lock, flags); + { + for (i = 0; i < NUM_ROUNDS * NUM_ITERS; i++) { + while (!go_cycl_sync[MASTER]) + rmb(); + go_cycl_sync[MASTER] = 0; + wmb(); + go_cycl_sync[SLAVE] = get_cycles(); + membar_safe("#StoreLoad"); + } + } + if (do_sync_cpu_clocks) { + while (!go_cycl_sync[MASTER]) + rmb(); /* */ + go_cycl_sync[MASTER] = 0; + wmb(); /* */ + /* prepare to catch 0 in low order CYCL_SYNC_GAP bits */ + while (!(get_cycles() & (CYCL_SYNC_GAP >> 1))) { + ; + } + /* catch 0 in low order bits or just after it was */ + while (get_cycles() & (CYCL_SYNC_GAP >> 1)) { + ; + } + apic->send_IPI_mask(get_cpu_mask(cpu), vec); + while (!go_cycl_sync[MASTER]) + rmb(); /* */ + if ((go_cycl_sync[MASTER] & ~(CYCL_SYNC_GAP - 1)) != + (get_cycles() & ~(CYCL_SYNC_GAP - 1))) { + pr_err("CYCLES_SYNC ERR cpu%d: slv=0x%lx mst=0x%lx\n", + cpu, go_cycl_sync[MASTER], + get_cycles()); + } + } + raw_spin_unlock_irqrestore(&itc_sync_lock, flags); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + int i; + int this_cpu = smp_processor_id(); + + for_each_online_cpu(i) { + if (i != this_cpu) + smp_synchronize_one_tick(i); + } + setup_ioapic_dest(); + smp_fill_in_sib_core_maps(); +} + +void smp_fetch_global_regs(void) +{ + struct tlb_call_data_struct t = { &xcall_fetch_glob_regs }; + smp_tlb_call_function(&t, cpu_online_mask); +} + +extern unsigned long xcall_dump_stack_chain; +void smp_show_backtrace_all_cpus(void) +{ + struct tlb_call_data_struct t = { &xcall_dump_stack_chain }; + preempt_disable(); + smp_tlb_call_function(&t, cpu_online_mask); + preempt_enable(); +} + +void smp_flush_tlb_mm(struct mm_struct *mm) +{ + u32 ctx = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + struct tlb_call_data_struct t = { + &xcall_flush_tlb_mm, ctx + }; + + if (atomic_read(&mm->mm_users) == 1) { + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + goto local_flush_and_out; + } + + smp_tlb_call_function(&t, mm_cpumask(mm)); + +local_flush_and_out: + __flush_tlb_mm(ctx, SECONDARY_CONTEXT); + + put_cpu(); +} + +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + +void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) +{ + u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; + int cpu = get_cpu(); + + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); + + __flush_tlb_pending(ctx, nr, vaddrs); + + put_cpu(); +} + +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + struct tlb_call_data_struct r = { + &xcall_flush_tlb_page, context, vaddr, 0, + }; + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_tlb_call_function(&r, mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + +void smp_flush_tlb_kernel_range(unsigned long _start, unsigned long _end) +{ + unsigned long start = _start & PAGE_MASK; + unsigned long end = PAGE_ALIGN(_end); + struct tlb_call_data_struct r = { + &xcall_flush_tlb_kernel_range, 0, start, end + }; + if (start != end) { + smp_tlb_call_function(&r, cpu_online_mask); + __flush_tlb_kernel_range(start, end); + } +} + +#ifdef CONFIG_KGDB +void kgdb_roundup_cpus(unsigned long flags) +{ + struct tlb_call_data_struct r = { &xcall_kgdb_capture }; + smp_tlb_call_function(&r, cpu_online_mask); +} +#endif + +void smp_flush_dcache_page_impl(struct page *page, int cpu) +{ +} + +void flush_dcache_page_all(struct mm_struct *mm, struct page *page) +{ +} + +static void tsb_sync(void *info) +{ + struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; + struct mm_struct *mm = info; + + /* It is not valid to test "currrent->active_mm == mm" here. + * + * The value of "current" is not changed atomically with + * switch_mm(). But that's OK, we just need to check the + * current cpu's trap block PGD physical address. + */ + if (tp->pgd_paddr == __pa(mm->pgd)) + tsb_context_switch(mm); +} + +void smp_tsb_sync(struct mm_struct *mm) +{ +#ifdef CONFIG_MCST_RT + preempt_disable(); + smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); + preempt_enable(); +#else + smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); +#endif +} + +static void stop_this_cpu(void *dummy) +{ + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + + local_irq_disable(); + while (1) + ; +} + +void smp_send_stop(void) +{ + smp_call_function(stop_this_cpu, NULL, 0); +} + +/** + * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu + * @cpu: cpu to allocate for + * @size: size allocation in bytes + * @align: alignment + * + * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper + * does the right thing for NUMA regardless of the current + * configuration. + * + * RETURNS: + * Pointer to the allocated area on success, NULL on failure. + */ + +static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, + size_t align) +{ + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +#ifdef CONFIG_NEED_MULTIPLE_NODES + int node = cpu_to_node(cpu); + void *ptr; + + if (!node_online(node) || !NODE_DATA(node)) { + ptr = memblock_alloc_from(size, align, goal); + pr_info("cpu %d has no node %d or node-local memory\n", + cpu, node); + pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = memblock_alloc_try_nid(size, align, goal, + MEMBLOCK_ALLOC_ACCESSIBLE, node); + pr_debug("per cpu data for cpu%d %lu bytes on node%d at " + "%016lx\n", cpu, size, node, __pa(ptr)); + } + return ptr; +#else + return memblock_alloc_from(size, align, goal); +#endif +} + + +static void __init pcpu_free_bootmem(void *ptr, size_t size) +{ + memblock_free(__pa(ptr), size); +} + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + if (cpu_to_node(from) == cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +} + +static void __init pcpu_populate_pte(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + pud_t *pud; + pmd_t *pmd; + + if (pgd_none(*pgd)) { + pud_t *new; + + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; + pgd_populate(&init_mm, pgd, new); + } + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + pmd_t *new; + + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; + pmd_populate_kernel(&init_mm, pmd, new); + } + + return; + +err_alloc: + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc = -EINVAL; + + if (pcpu_chosen_fc != PCPU_FC_PAGE) { + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, 4 << 20, + pcpu_cpu_distance, + pcpu_alloc_bootmem, + pcpu_free_bootmem); + if (rc) + pr_warning("PERCPU: %s allocator failed (%d), " + "falling back to page size\n", + pcpu_fc_names[pcpu_chosen_fc], rc); + } + if (rc < 0) + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, + pcpu_alloc_bootmem, + pcpu_free_bootmem, + pcpu_populate_pte); + if (rc < 0) + panic("cannot initialize percpu area (err=%d)", rc); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; + + /* Setup %g5 for the boot cpu. */ + __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); + +#ifdef CONFIG_L_LOCAL_APIC + for_each_possible_cpu(cpu) { + per_cpu(x86_cpu_to_apicid, cpu) = + early_per_cpu_map(x86_cpu_to_apicid, cpu); + per_cpu(x86_bios_cpu_apicid, cpu) = + early_per_cpu_map(x86_bios_cpu_apicid, cpu); + } +#endif + /* alrighty, percpu areas up and running */ +} +#ifdef CONFIG_HOTPLUG_CPU +void cpu_play_dead(void) +{ + int cpu = smp_processor_id(); + unsigned long node; + int reg; + +#if 0 + idle_task_exit(); +#endif + cpu = cpu_physical_id(cpu); + node = cpu / e90s_max_nr_node_cpus(); + cpu %= e90s_max_nr_node_cpus(); + reg = cpu < E90S_R1000_MAX_NR_NODE_CPUS ? + NBSR_NODE_CFG : NBSR_NODE_CFG2; + cpu %= E90S_R1000_MAX_NR_NODE_CPUS; +#if 0 + cpumask_clear_cpu(cpu, &smp_commenced_mask); +#endif + membar_safe("#StoreLoad"); + local_irq_disable(); + e90s_flush_l2_cache(); + + nbsr_writel(nbsr_readl(reg, node) | ~(1 << cpu), reg, node); + while (1) + barrier(); +} + +int __cpu_disable(void) +{ + lock_vector_lock(); + set_cpu_online(raw_smp_processor_id(), false); + unlock_vector_lock(); +#if 0 + fixup_irqs(); +#endif + + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + /* They ack this in play_dead() by setting CPU_DEAD */ + if (cpu_wait_death(cpu, 5)) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + } else { + pr_err("CPU %u didn't die...\n", cpu); + } +} +#endif diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 489ffab918a8..50d24bfc2125 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -238,4 +238,4 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ .word 61b, __retl_efault, 62b, __retl_efault .word 63b, __retl_efault, 64b, __retl_efault .word 65b, __retl_efault, 66b, __retl_efault - .previous + .previous \ No newline at end of file diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index b5da3bfdc225..d693ca63f2a4 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c @@ -11,9 +11,9 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include @@ -236,3 +236,192 @@ COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, u32, offhi, u32, offlo, return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, ((loff_t)lenhi << 32) | lenlo); } + +#ifdef CONFIG_MCST + +#define ATOMIC_HASH_SIZE 256 +#define ATOMIC_HASH(a) (&__sys_atomic_hash[((unsigned long) a) &\ + (ATOMIC_HASH_SIZE-1)]) + +static raw_spinlock_t __sys_atomic_hash[ATOMIC_HASH_SIZE] = { + [0 ... (ATOMIC_HASH_SIZE - 1)] = __RAW_SPIN_LOCK_UNLOCKED(sys_atomic) +}; + +enum { + ATOMIC_XCHG = 0, + ATOMIC_CMPXCHG = 1, + ATOMIC_ADD = 2 +}; + +static void force_sigbus_at(void __user *addr) +{ + force_sig_fault(SIGBUS, BUS_ADRERR, addr, 0); +} + +static void force_sigsegv_at(void __user *addr) +{ + force_sig_fault(SIGSEGV, 0, addr, 0); +} + +asmlinkage long sys_atomic(int req, int size, void __user *addr, + s32 arg1, s32 arg2, void __user *old) +{ + struct mm_struct *mm = current->mm; + unsigned long flags; + long rval; + s32 val32 = 0; + s16 val16 = 0; + s8 val8 = 0; + + __chk_user_ptr(addr); + __chk_user_ptr(old); + + if (unlikely(!access_ok(addr, size))) { + force_sigsegv_at(addr); + + return -EFAULT; + } + + if (unlikely(!access_ok(old, size))) { + force_sigsegv_at(old); + + return -EFAULT; + } + + if (unlikely(!IS_ALIGNED((unsigned long)addr, size) || + !IS_ALIGNED((unsigned long)old, size))) { + force_sigbus_at(IS_ALIGNED((unsigned long)addr, size) ? + old : addr); + + return -EFAULT; + } + +again: + raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags); + + switch (__builtin_expect(size, 4)) { + case 1: + switch (req) { + case ATOMIC_XCHG: + if (unlikely(__get_user(val8, (s8 *) addr) + || __put_user((s8) arg1, (s8 *) addr))) + goto handle_fault; + break; + case ATOMIC_CMPXCHG: + if (unlikely(__get_user(val8, (s8 *) addr))) + goto handle_fault; + + if (val8 == (s8) arg1) + if (unlikely(__put_user((s8) arg2, + (s8 *) addr))) + goto handle_fault; + break; + case ATOMIC_ADD: + if (unlikely(__get_user(val8, (s8 *) addr) + || __put_user(val8 + (s8) arg1, + (s8 *) addr))) + goto handle_fault; + break; + default: + val8 = -EINVAL; + break; + } + break; + case 2: + switch (req) { + case ATOMIC_XCHG: + if (unlikely(__get_user(val16, (s16 *) addr) || + __put_user((s16) arg1, (s16 *) addr))) + goto handle_fault; + break; + case ATOMIC_CMPXCHG: + if (unlikely(__get_user(val16, (s16 *) addr))) + goto handle_fault; + + if (val16 == (s16) arg1) + if (unlikely(__put_user((s16) arg2, + (s16 *) addr))) + goto handle_fault; + break; + case ATOMIC_ADD: + if (unlikely(__get_user(val16, (s16 *) addr) + || __put_user(val16 + (s16) arg1, + (s16 *) addr))) + goto handle_fault; + break; + default: + val16 = -EINVAL; + break; + } + break; + case 4: + switch (req) { + case ATOMIC_XCHG: + if (unlikely(__get_user(val32, (s32 *) addr) || + __put_user((s32) arg1, (s32 *) addr))) + goto handle_fault; + break; + case ATOMIC_CMPXCHG: + if (unlikely(__get_user(val32, (s32 *) addr))) + goto handle_fault; + + if (val32 == (s32) arg1) + if (unlikely(__put_user((s32) arg2, + (s32 *) addr))) + goto handle_fault; + break; + case ATOMIC_ADD: + if (unlikely(__get_user(val32, (s32 *) addr) + || __put_user(val32 + (s32) arg1, + (s32 *) addr))) + goto handle_fault; + break; + default: + val32 = -EINVAL; + break; + } + break; + default: + raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + return -EINVAL; + } + raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + switch (size) { + case 1: + rval = put_user(val8, (s8 *) old); + break; + case 2: + rval = put_user(val16, (s16 *) old); + break; + case 4: + rval = put_user(val32, (s32 *) old); + break; + default: + pr_notice_ratelimited("sys_atomic: bad size %d passed\n", size); + rval = -EINVAL; + break; + } + + if (rval) + force_sigsegv_at(old); + + return rval; + +handle_fault: + raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + down_read(&mm->mmap_sem); + rval = get_user_pages((unsigned long)addr, + 1, FOLL_WRITE, NULL, NULL); + up_read(&mm->mmap_sem); + + if (rval < 0) { + force_sigsegv_at(addr); + + return rval; + } + + goto again; +} +#endif /* CONFIG_MCST */ diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 9f41a6f5a032..ca004d09dd44 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -33,6 +33,9 @@ #include #include +#ifdef CONFIG_E90S +#include +#endif /* CONFIG_E90S */ #include "entry.h" #include "kernel.h" @@ -432,6 +435,27 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) return ret; } +#ifdef CONFIG_MCST +/* ltp tests required such error code, as other archs */ +int sparc_mmap_check(unsigned long addr, unsigned long len) +{ + if (test_thread_flag(TIF_32BIT)) { + if (len >= STACK_TOP32) + return -ENOMEM; + + if (addr > STACK_TOP32 - len) + return -ENOMEM; + } else { + if (len >= VA_EXCLUDE_START) + return -ENOMEM; + + if (invalid_64bit_range(addr, len)) + return -ENOMEM; + } + + return 0; +} +#else int sparc_mmap_check(unsigned long addr, unsigned long len) { if (test_thread_flag(TIF_32BIT)) { @@ -450,7 +474,7 @@ int sparc_mmap_check(unsigned long addr, unsigned long len) return 0; } - +#endif /* Linux version of mmap */ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, @@ -702,6 +726,91 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, return ret; } +#ifdef CONFIG_E90S +static int kill_ok_by_cred(struct task_struct *t) +{ + const struct cred *cred = current_cred(); + const struct cred *tcred = __task_cred(t); + + if (uid_eq(cred->euid, tcred->suid) || + uid_eq(cred->euid, tcred->uid) || + uid_eq(cred->uid, tcred->suid) || + uid_eq(cred->uid, tcred->uid)) + return 1; + + if (ns_capable(tcred->user_ns, CAP_KILL)) + return 1; + + return 0; +} + +SYSCALL_DEFINE4(perfctr, int, opcode, unsigned long, arg0, + unsigned long, arg1, unsigned long, arg2) +{ + int err = 0; + struct task_struct *t; + struct thread_info *ti; + + rcu_read_lock(); + t = find_task_by_vpid(arg0); + rcu_read_unlock(); + if(!t) + return -EINVAL; + if (!kill_ok_by_cred(t)) + return -EPERM; + + ti = task_thread_info(t); + + switch(opcode) { + case E90S_PERFCTR_WRITE_AND_ON: + clear_ti_thread_flag(ti, TIF_PERFCTR); + err |= copy_from_user(ti->kernel_cnt, (void __user *)arg1, + sizeof(ti->kernel_cnt)); + err |= copy_from_user(ti->pcr_regs, (void __user *)arg2, + sizeof(ti->pcr_regs)); + if(ti == current_thread_info()) { + write_perfctrs(ti->pcr_regs, ti->kernel_cnt); + } else { + set_ti_thread_flag(ti, TIF_FIRST_READ_PIC); + } + set_ti_thread_flag(ti, TIF_PERFCTR); + break; + + case E90S_PERFCTR_OFF: + err = -EINVAL; + if (test_ti_thread_flag(ti, TIF_PERFCTR)) { + if(ti == current_thread_info()) + wr_pcr(0); + clear_ti_thread_flag(ti, TIF_PERFCTR); + clear_ti_thread_flag(ti, TIF_FIRST_READ_PIC); + memset(ti->pcr_regs, 0, sizeof(ti->pcr_regs)); + err = 0; + } + break; + + case E90S_PERFCTR_READ: { + if (!test_ti_thread_flag(ti, TIF_PERFCTR)) { + err = -EINVAL; + break; + } + if(ti == current_thread_info()) + read_perfctrs(ti->kernel_cnt); + if(arg1) + err |= copy_to_user((void __user *)arg1, ti->kernel_cnt, + sizeof(ti->kernel_cnt)); + if(arg2) + err |= copy_to_user((void __user *)arg2, ti->pcr_regs, + sizeof(ti->pcr_regs)); + break; + } + default: + err = -EINVAL; + break; + }; + return err; +} +#endif /* CONFIG_E90S */ + SYSCALL_DEFINE0(kern_features) { return KERN_FEATURE_MIXED_MODE_STACK; diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 8c8cc7537fb2..877013024af3 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -29,7 +29,7 @@ 16 32 lchown sys_lchown16 16 64 lchown sys_lchown 17 common brk sys_brk -18 common perfctr sys_nis_syscall +18 common perfctr sys_perfctr 19 common lseek sys_lseek compat_sys_lseek 20 common getpid sys_getpid 21 common capget sys_capget @@ -481,3 +481,4 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +498 common el_posix sys_el_posix diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 89fb05f90609..3c3d4c5fa845 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -69,17 +69,17 @@ static void tick_disable_protection(void) { /* Set things up so user can access tick register for profiling * purposes. Also workaround BB_ERRATA_1 by doing a dummy - * read back of %tick after writing it. + * read back of %stick after writing it. */ __asm__ __volatile__( " ba,pt %%xcc, 1f\n" " nop\n" " .align 64\n" - "1: rd %%tick, %%g2\n" + "1: rd %%stick, %%g2\n" " add %%g2, 6, %%g2\n" " andn %%g2, %0, %%g2\n" - " wrpr %%g2, 0, %%tick\n" - " rdpr %%tick, %%g0" + " wrpr %%g2, 0, %%stick\n" + " rdpr %%stick, %%g0" : /* no outputs */ : "r" (TICK_PRIV_BIT) : "g2"); @@ -107,7 +107,7 @@ static unsigned long long tick_get_tick(void) { unsigned long ret; - __asm__ __volatile__("rd %%tick, %0\n\t" + __asm__ __volatile__("rd %%stick, %0\n\t" "mov %0, %0" : "=r" (ret)); @@ -118,7 +118,7 @@ static int tick_add_compare(unsigned long adj) { unsigned long orig_tick, new_tick, new_compare; - __asm__ __volatile__("rd %%tick, %0" + __asm__ __volatile__("rd %%stick, %0" : "=r" (orig_tick)); orig_tick &= ~TICKCMP_IRQ_BIT; @@ -141,7 +141,7 @@ static int tick_add_compare(unsigned long adj) : "=r" (new_compare) : "r" (orig_tick), "r" (adj)); - __asm__ __volatile__("rd %%tick, %0" + __asm__ __volatile__("rd %%stick, %0" : "=r" (new_tick)); new_tick &= ~TICKCMP_IRQ_BIT; @@ -153,9 +153,9 @@ static unsigned long tick_add_tick(unsigned long adj) unsigned long new_tick; /* Also need to handle Blackbird bug here too. */ - __asm__ __volatile__("rd %%tick, %0\n\t" + __asm__ __volatile__("rd %%stick, %0\n\t" "add %0, %1, %0\n\t" - "wrpr %0, 0, %%tick\n\t" + "wrpr %0, 0, %%stick\n\t" : "=&r" (new_tick) : "r" (adj)); @@ -226,9 +226,9 @@ static void stick_init_tick(void) /* Let the user get at STICK too. */ __asm__ __volatile__( - " rd %%asr24, %%g2\n" + " rd %%stick, %%g2\n" " andn %%g2, %0, %%g2\n" - " wr %%g2, 0, %%asr24" + " wr %%g2, 0, %%stick" : /* no outputs */ : "r" (TICK_PRIV_BIT) : "g1", "g2"); @@ -241,7 +241,7 @@ static unsigned long long stick_get_tick(void) { unsigned long ret; - __asm__ __volatile__("rd %%asr24, %0" + __asm__ __volatile__("rd %%stick, %0" : "=r" (ret)); return ret & ~TICK_PRIV_BIT; @@ -251,9 +251,9 @@ static unsigned long stick_add_tick(unsigned long adj) { unsigned long new_tick; - __asm__ __volatile__("rd %%asr24, %0\n\t" + __asm__ __volatile__("rd %%stick, %0\n\t" "add %0, %1, %0\n\t" - "wr %0, 0, %%asr24\n\t" + "wr %0, 0, %%stick\n\t" : "=&r" (new_tick) : "r" (adj)); @@ -264,7 +264,7 @@ static int stick_add_compare(unsigned long adj) { unsigned long orig_tick, new_tick; - __asm__ __volatile__("rd %%asr24, %0" + __asm__ __volatile__("rd %%stick, %0" : "=r" (orig_tick)); orig_tick &= ~TICKCMP_IRQ_BIT; @@ -272,7 +272,7 @@ static int stick_add_compare(unsigned long adj) : /* no outputs */ : "r" (orig_tick + adj)); - __asm__ __volatile__("rd %%asr24, %0" + __asm__ __volatile__("rd %%stick, %0" : "=r" (new_tick)); new_tick &= ~TICKCMP_IRQ_BIT; diff --git a/arch/sparc/kernel/time_e90s.c b/arch/sparc/kernel/time_e90s.c new file mode 100644 index 000000000000..914dee54ad82 --- /dev/null +++ b/arch/sparc/kernel/time_e90s.c @@ -0,0 +1,175 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int using_apic_timer; + +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +static char clock_override[10] __initdata; + +static int __init clock_setup(char* str) +{ + if (str) + strlcpy(clock_override, str, sizeof(clock_override)); + return 1; +} +__setup("clock=", clock_setup); + +#ifdef CONFIG_SMP +unsigned long profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + + if (in_lock_functions(pc)) + return regs->u_regs[UREG_RETPC]; + return pc; +} +EXPORT_SYMBOL(profile_pc); +#endif + +static irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + global_clock_event->event_handler(global_clock_event); + return IRQ_HANDLED; +} + +static struct irqaction irq0 = { + .handler = timer_interrupt, + .flags = IRQF_TIMER, + .name = "timer", +}; + +void __init e90s_late_time_init(void) +{ + /* + * Now that the external timer is enabled we can + * set up the local PIC timer on boot CPU. + * + * Since setup_boot_pic_clock() will enable interrupts + * it should not be called from time_init(). + */ + setup_boot_pic_clock(); + + /* We cannot initialize clock_tick as early as other fields + * (other fields are required earlier in the boot process). */ + cpu_data(0).clock_tick = measure_cpu_freq(raw_smp_processor_id()); + cpu_freq_hz = cpu_data(0).clock_tick; +} + +void __init time_init(void) +{ + int ret; + + /* Let the user get at STICK too. */ + __asm__ __volatile__( + " rd %%stick, %%g2\n" + " andn %%g2, %0, %%g2\n" + " wr %%g2, 0, %%asr24" + : /* no outputs */ + : "r" (TICK_PRIV_BIT) + : "g1", "g2"); + /* Let the user get at TICK too. + * If you will set TICK_PRIV_BIT add + * 'return ret & ~TICK_PRIV_BIT' in get_cycles() */ + __asm__ __volatile__( + " rd %%tick, %%g2\n" + " andn %%g2, %0, %%g2\n" + " wrpr %%g2, 0, %%tick" + : /* no outputs */ + : "r" (TICK_PRIV_BIT) + : "g1", "g2"); + + setup_lt_timer(); + + ret = setup_irq(0, &irq0); + if (ret) { + printk("Could not setup IRQ #%02x as timer interrupt, error " + "%d\n", 0, ret); + return; + } + +} + +#ifdef ARCH_HAS_READ_CURRENT_TIMER +static inline unsigned long long tick_get_tick(void) +{ + unsigned long ret; + + __asm__ __volatile__("rd %%stick, %0" + : "=r" (ret)); + return ret; +} + +void __delay(unsigned long loops) +{ + unsigned long bclock, now; + + bclock = tick_get_tick(); + do { + now = tick_get_tick(); + } while ((now-bclock) < loops); +} +EXPORT_SYMBOL(__delay); + + +int read_current_timer(unsigned long *timer_val) +{ + *timer_val = tick_get_tick(); + return 0; +} +#else + +void __delay(unsigned long loops) +{ + __asm__ __volatile__( +" b,pt %%xcc, 1f\n" +" cmp %0, 0\n" +" .align 32\n" +"1:\n" +" bne,pt %%xcc, 1b\n" +" subcc %0, 1, %0\n" + : "=&r" (loops) + : "0" (loops) + : "cc"); +} +#endif /*ARCH_HAS_READ_CURRENT_TIMER*/ + + +void udelay(unsigned long loops) +{ + migrate_disable(); + __delay(loops * (local_cpu_data().clock_tick / USEC_PER_SEC)); + migrate_enable(); +} +EXPORT_SYMBOL(udelay); + +int update_persistent_clock(struct timespec now) +{ + int ret = -1; +#ifdef CONFIG_RTC + /* Everything uses /dev/rtc0 interface. */ + struct rtc_device *rtc = rtc_class_open("rtc0"); + + if (rtc) { + ret = rtc_set_mmss(rtc, now.tv_sec); + rtc_class_close(rtc); + } +#endif + return ret; +} diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S index fe59122d257d..b1b41645d1a2 100644 --- a/arch/sparc/kernel/trampoline_64.S +++ b/arch/sparc/kernel/trampoline_64.S @@ -107,6 +107,9 @@ startup_continue: */ sethi %hi(prom_entry_lock), %g2 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 +#ifdef CONFIG_RMO + membar #StoreLoad | #StoreStore +#endif /* CONFIG_RMO */ brnz,pn %g1, 1b nop @@ -213,6 +216,9 @@ startup_continue: sethi %hi(prom_entry_lock), %g2 stb %g0, [%g2 + %lo(prom_entry_lock)] +#ifdef CONFIG_RMO + membar #StoreStore | #StoreLoad +#endif /* CONFIG_RMO */ ba,pt %xcc, after_lock_tlb nop @@ -327,6 +333,9 @@ after_lock_tlb: sethi %hi(prom_entry_lock), %g2 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 +#ifdef CONFIG_RMO + membar #StoreLoad | #StoreStore +#endif /* CONFIG_RMO */ brnz,pn %g1, 1b nop @@ -390,6 +399,9 @@ after_lock_tlb: 3: sethi %hi(prom_entry_lock), %g2 stb %g0, [%g2 + %lo(prom_entry_lock)] +#ifdef CONFIG_RMO + membar #StoreStore | #StoreLoad +#endif /* CONFIG_RMO */ ldx [%l0], %g6 ldx [%g6 + TI_TASK], %g4 diff --git a/arch/sparc/kernel/trampoline_e90s.S b/arch/sparc/kernel/trampoline_e90s.S new file mode 100644 index 000000000000..4d211c7fdd70 --- /dev/null +++ b/arch/sparc/kernel/trampoline_e90s.S @@ -0,0 +1,39 @@ +#include +#include +#include +#include +#include +#include + + + .align 8 + .globl sparc64_cpu_startup, sparc64_cpu_startup_end +sparc64_cpu_startup: + + setx cpu_new_thread, %g4, %g6 + ldx [%g6], %g6 + + wrpr %g0, 0, %wstate + wrpr %g0, 0x0, %tl + + call setup_trap_table + nop + + wr %g0, ASI_P, %asi + mov 1, %g1 + sllx %g1, THREAD_SHIFT, %g1 + sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 + add %g6, %g1, %sp + mov 0, %fp + + ldx [%g6 + TI_TASK], %g4 + + call smp_callin + nop + call cpu_panic + nop +1: b,a,pt %xcc, 1b + + + .align 8 +sparc64_cpu_startup_end: diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index f2b22c496fb9..4714336dd4ca 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include #include @@ -46,6 +48,7 @@ #include #include #include +#include #include "entry.h" #include "kernel.h" @@ -114,7 +117,7 @@ void bad_trap(struct pt_regs *regs, long lvl) void bad_trap_tl1(struct pt_regs *regs, long lvl) { char buffer[36]; - + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; @@ -146,10 +149,16 @@ static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int bufle if (dimm_handler) { ret = dimm_handler(synd_code, paddr, buf, buflen); } else if (tlb_type == spitfire) { +#ifdef CONFIG_OF +#ifndef CONFIG_E90S if (prom_getunumber(synd_code, paddr, buf, buflen) == -1) ret = -EINVAL; else ret = 0; +#endif /* E90S */ +#else + ret = -ENODEV; +#endif /* CONFIG_OF */ } else ret = -ENODEV; spin_unlock_irqrestore(&dimm_handler_lock, flags); @@ -406,7 +415,7 @@ void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u sun4v_data_access_exception(regs, addr, type_ctx); } -#ifdef CONFIG_PCI +#ifdef CONFIG_SPARC64_PCI #include "pci_impl.h" #endif @@ -582,7 +591,7 @@ void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, u udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; -#ifdef CONFIG_PCI +#ifdef CONFIG_SPARC64_PCI if (tt == TRAP_TYPE_DAE && pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { spitfire_clean_and_reenable_l1_caches(); @@ -1429,6 +1438,9 @@ static int cheetah_fix_ce(unsigned long physaddr) __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "casxa [%2] %3, %%g0, %%g0\n\t" +#ifdef CONFIG_RMO + "membar #StoreLoad | #StoreStore\n\t" +#endif /* CONFIG_RMO */ "ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "membar #Sync" @@ -1575,7 +1587,7 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned struct cheetah_err_info local_snapshot, *p; int recoverable, is_memory; -#ifdef CONFIG_PCI +#ifdef CONFIG_SPARC64_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { cheetah_flush_icache(); @@ -1763,7 +1775,7 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned * Bit1: 0=recoverable,1=unrecoverable * * The hardware has disabled both the I-cache and D-cache in - * the %dcr register. + * the %dcr register. */ void cheetah_plus_parity_error(int type, struct pt_regs *regs) { @@ -1799,6 +1811,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); } +#ifdef CONFIG_SPARC64_SUN4V struct sun4v_error_entry { /* Unique error handle */ /*0x00*/u64 err_handle; @@ -1956,7 +1969,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs) insn = *(unsigned int *) regs->tpc; - addr = compute_effective_address(regs, insn, 0); + compute_effective_address(regs, insn, 0, &addr); printk("%s: insn effective address [0x%016llx]\n", pfx, addr); @@ -2140,12 +2153,14 @@ void sun4v_resum_overflow(struct pt_regs *regs) static unsigned long sun4v_get_vaddr(struct pt_regs *regs) { unsigned int insn; + unsigned long addr = 0; if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) { return compute_effective_address(regs, insn, - (insn >> 25) & 0x1f); + (insn >> 25) & 0x1f, + &addr); } - return 0; + return addr; } /* Attempt to handle non-resumable errors generated from userspace. @@ -2222,7 +2237,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) return; } -#ifdef CONFIG_PCI +#ifdef CONFIG_SPARC64_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == cpu) { pci_poke_faulted = 1; @@ -2313,6 +2328,7 @@ void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", err, op); } +#endif /*CONFIG_SPARC64_SUN4V*/ static void do_fpe_common(struct pt_regs *regs) { @@ -2438,10 +2454,10 @@ static void user_instruction_dump(unsigned int __user *pc) { int i; unsigned int buf[9]; - + if ((((unsigned long) pc) & 3)) return; - + if (copy_from_user(buf, pc - 3, sizeof(buf))) return; @@ -2474,8 +2490,14 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) flushw_all(); fp = ksp + STACK_BIAS; - +#ifdef CONFIG_MCST + if (tsk == current) { + pr_info("%s", linux_banner); + } + pr_info("Call Trace %d - %s:\n", tsk->pid, tsk->comm); +#else printk("Call Trace:\n"); +#endif do { struct sparc_stackf *sf; struct pt_regs *regs; @@ -2508,7 +2530,11 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) } } #endif +#if defined(CONFIG_E90S) + } while (++count < NUM_DUMP_FRAMES); +#else } while (++count < 16); +#endif } static inline struct reg_window *kernel_stack_up(struct reg_window *rw) @@ -2521,11 +2547,72 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) return (struct reg_window *) (fp + STACK_BIAS); } +#ifdef CONFIG_MCST +static int __die_if_kernel(char *str, struct pt_regs *regs) +{ + static int die_counter; + + /* Amuse the user. */ + pr_alert( +" \\|/ ____ \\|/\n" +" \"@'/ .. \\`@\"\n" +" /_| \\__/ |_\\\n" +" \\__U_/\n"); + + pr_alert("%s(%d): %s [#%d]\n", current->comm, + task_pid_nr(current), str, ++die_counter); + + if (notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV) == NOTIFY_STOP) + return 1; + __asm__ __volatile__("flushw"); + show_regs(regs); + if (regs->tstate & TSTATE_PRIV) { + instruction_dump((unsigned int *) regs->tpc); + } else { + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + user_instruction_dump((unsigned int __user *) regs->tpc); + } + if (regs->tstate & TSTATE_PRIV) + show_state(); + return 0; +} + +void __noreturn die_if_kernel(char *str, struct pt_regs *regs) +{ + unsigned long flags; + int ret; + static DEFINE_RAW_SPINLOCK(die_lock); + + oops_enter(); + raw_spin_lock_irqsave(&die_lock, flags); + console_verbose(); + bust_spinlocks(1); + ret = __die_if_kernel(str, regs); + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + raw_spin_unlock_irqrestore(&die_lock, flags); + oops_exit(); + + if (in_nmi()) + panic("Fatal exception in non-maskable interrupt"); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + + if (regs->tstate & TSTATE_PRIV) + do_exit(SIGKILL); + do_exit(SIGSEGV); +} +#else void __noreturn die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; int count = 0; - + /* Amuse the user. */ printk( " \\|/ ____ \\|/\n" @@ -2554,41 +2641,349 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) rw = kernel_stack_up(rw); } - instruction_dump ((unsigned int *) regs->tpc); + instruction_dump((unsigned int *) regs->tpc); } else { if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - user_instruction_dump ((unsigned int __user *) regs->tpc); + user_instruction_dump((unsigned int __user *) regs->tpc); } - if (panic_on_oops) - panic("Fatal exception"); - if (regs->tstate & TSTATE_PRIV) + if (regs->tstate & TSTATE_PRIV) { do_exit(SIGKILL); + } do_exit(SIGSEGV); } +#endif EXPORT_SYMBOL(die_if_kernel); #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) +#ifdef CONFIG_MCST + +static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, + unsigned int rd, int from_kernel) +{ + if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { + if (from_kernel != 0) + __asm__ __volatile__("flushw"); + else + flushw_user(); + } +} + +static inline int fetch_reg(struct pt_regs *regs, int reg, + unsigned long *val) +{ + unsigned long value = 0, fp; + int ret = 0; + + if (reg < 16) { + *val = !reg ? 0 : regs->u_regs[reg]; + return 0; + } + + fp = regs->u_regs[UREG_FP]; + + if (regs->tstate & TSTATE_PRIV) { + struct reg_window *win; + win = (struct reg_window *)(fp + STACK_BIAS); + value = win->locals[reg - 16]; + } else if (!test_thread_64bit_stack(fp)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *) + ((unsigned long)((u32)fp)); + ret = get_user(value, &win32->locals[reg - 16]); + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(fp + STACK_BIAS); + ret = get_user(value, &win->locals[reg - 16]); + } + + *val = value; + return ret; +} + +static inline int put_reg(struct pt_regs *regs, int reg, + unsigned long val) +{ + int err = 0; + unsigned long fp = regs->u_regs[UREG_FP]; + if (reg < 16) { + if (reg) + regs->u_regs[reg] = val; + } else if (regs->tstate & TSTATE_PRIV) { + struct reg_window *win; + win = (struct reg_window *)(fp + STACK_BIAS); + win->locals[reg - 16] = val; + } else if (!test_thread_64bit_stack(fp)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *) + ((unsigned long)((u32)fp)); + err = put_user(val, &win32->locals[reg - 16]); + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(fp + STACK_BIAS); + err = put_user(val, &win->locals[reg - 16]); + } + return err; +} + +static uint32_t e90s___div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} + +# define e90s_do_div(n, base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = e90s___div64_32(&(n), __base); \ + __rem; \ +}) + +static inline u64 e90s_div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = e90s_do_div(dividend, divisor); + return dividend; +} + +static inline u64 e90s_div_u64(u64 dividend, u32 divisor) +{ + u32 remainder; + return e90s_div_u64_rem(dividend, divisor, &remainder); +} + +static u64 e90s_div64_u64(u64 dividend, u64 divisor) +{ + u32 high = divisor >> 32; + u64 quot; + + if (high == 0) { + quot = e90s_div_u64(dividend, divisor); + } else { + int n = 1 + fls(high); + quot = e90s_div_u64(dividend >> n, divisor >> n); + + if (quot != 0) + quot--; + if ((dividend - quot * divisor) >= divisor) + quot++; + } + + return quot; +} + +static s64 e90s_div64_s64(s64 dividend, s64 divisor) +{ + s64 quot, t; + + quot = e90s_div64_u64(abs(dividend), abs(divisor)); + t = (dividend ^ divisor) >> 63; + + return (quot ^ t) - t; +} + +static inline void advance(struct pt_regs *regs) +{ + regs->tpc = regs->tnpc; + regs->tnpc += 4; + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } +} + +#define OP_ARITH 2 + +#define OP3_UDIVX 0x0d +#define OP3_SDIVX 0x2d + +#define OP3_UDIV 0x0e +#define OP3_SDIV 0x0f +#define OP3_UDIVCC 0x1e +#define OP3_SDIVCC 0x1f + +/* sysctl hooks */ +int instruction_emulation_warning; + +static int __init parse_instruction_emulation_warning(char *arg) +{ + instruction_emulation_warning = 1; + return 0; +} +early_param("emu_warn", parse_instruction_emulation_warning); + +static int e90s_emu_div(struct pt_regs *regs, u32 insn) +{ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + int err, overflow = 0; + unsigned long tstate = + regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); + unsigned long pc = regs->tpc; + unsigned long dividend = 0, divisor = 0, result; + unsigned op3 = (insn >> 19) & 0x3f; + int sign = (op3 == OP3_UDIVX) || (op3 == OP3_UDIV) || + (op3 == OP3_UDIVCC) ? 0 : 1; + int cc = (op3 == OP3_UDIVCC) || (op3 == OP3_SDIVCC) ? 1 : 0; + int div64_32 = cc || (op3 == OP3_UDIV) || (op3 == OP3_UDIV) ? 1 : 0; + int rd = (insn >> 25) & 0x1f; + int rs1 = (insn >> 14) & 0x1f; + int rs2 = (insn >> 13) & 1 ? rs1 : insn & 0x1f; + int from_kernel = regs->tstate & TSTATE_PRIV ? 1 : 0; + + maybe_flush_windows(rs1, rs2, rd, from_kernel); + + if ((err = fetch_reg(regs, rs1, ÷nd))) + goto out; + + if (div64_32) + dividend |= (unsigned long)regs->y << 32; + + if ((insn >> 13) & 1) { + divisor = insn & 0x1fff; + } else { + if ((err = fetch_reg(regs, rs2, &divisor))) + goto out; + } + if (divisor == 0) { + err = SIGFPE; + goto out; + } + result = sign ? e90s_div64_s64(dividend, divisor) : + e90s_div64_u64(dividend, divisor); + + if (div64_32) { + if (sign) { + long r = (long)result; + if (r > (1L << 31) - 1) { + overflow = 1; + result = (1L << 31) - 1; + } else if (r < -(1L << 31)) { + overflow = 1; + result = -(1L << 31); + } + } else { + if (result >= (1L << 32) - 1) { + overflow = 1; + result = (1L << 32) - 1; + } + } + } + + if (cc) { + unsigned long r = result; + if (r & (1 << 31)) + tstate |= TSTATE_INEG; + if ((r & 0xffffFFFF) == 0) + tstate |= TSTATE_IZERO; + if (overflow) + tstate |= TSTATE_IOVFL; + + if (r & (1UL << 61)) + tstate |= TSTATE_XNEG; + if (r == 0) + tstate |= TSTATE_XZERO; + } + + if (instruction_emulation_warning && __ratelimit(&rs)) { + pr_warn("%s illegal div instruction trap:" + " pc: %lx insn:%08x\n" + "\t\t\tsign:%d rd:%d: " + "%016lx / %016lx = %lx\n", + regs->tstate & TSTATE_PRIV ? "Kernel" : "User", + pc, insn, + sign, rd, + dividend, divisor, result); + show_regs(regs); + } + + if (!(err = put_reg(regs, rd, result))) + advance(regs); + regs->tstate = tstate; +out: + return err; +} +#endif /*CONFIG_MCST*/ + void do_illegal_instruction(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; +#ifdef CONFIG_MCST + u32 optype, op3; +#endif if (notify_die(DIE_TRAP, "illegal instruction", regs, 0, 0x10, SIGILL) == NOTIFY_STOP) goto out; - if (tstate & TSTATE_PRIV) + if (tstate & TSTATE_PRIV) { +#ifdef CONFIG_MCST + int r; + insn = *((u32 *)pc); + optype = (insn >> 30) & 0x3; + op3 = (insn >> 19) & 0x3f; + if (optype == OP_ARITH) switch (op3) { + case OP3_SDIVX: + case OP3_UDIVX: + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + r = e90s_emu_div(regs, insn); + if (r == 0) + goto out; + if (r == SIGFPE) + die_if_kernel("Kernel divide by zero", regs); + break; + } + + if (insn == 0) { + show_state(); + } +#endif die_if_kernel("Kernel illegal instruction", regs); + } if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { + optype = (insn >> 30) & 0x3; + op3 = (insn >> 19) & 0x3f; if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ { if (handle_popc(insn, regs)) goto out; @@ -2596,6 +2991,7 @@ void do_illegal_instruction(struct pt_regs *regs) if (handle_ldf_stq(insn, regs)) goto out; } else if (tlb_type == hypervisor) { +#ifdef CONFIG_SPARC64_SUN4V if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) { if (!vis_emul(regs, insn)) goto out; @@ -2610,13 +3006,41 @@ void do_illegal_instruction(struct pt_regs *regs) if (do_mathemu(regs, f, true)) goto out; } +#endif /*CONFIG_SPARC64_SUN4V*/ } +#ifdef CONFIG_MCST + else if (optype == OP_ARITH) { + int r; + switch (op3) { + case OP3_SDIVX: + case OP3_UDIVX: + case OP3_SDIV: + case OP3_UDIV: + case OP3_SDIVCC: + case OP3_UDIVCC: + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + r = e90s_emu_div(regs, insn); + if (r == 0) + goto out; + if (r == SIGFPE) { + force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)pc, 0); + goto out; + } + break; + } + } +#endif } force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, 0); out: exception_exit(prev_state); } +#ifdef CONFIG_MCST +int user_unaligned_trap(struct pt_regs *regs, unsigned int insn, + void __user *addr); +#endif void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { enum ctx_state prev_state = exception_enter(); @@ -2628,10 +3052,18 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo if (regs->tstate & TSTATE_PRIV) { kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); goto out; - } - if (is_no_fault_exception(regs)) - return; +#ifdef CONFIG_MCST + } else { + unsigned int insn; + if (!get_user(insn, (u32 __user *)regs->tpc) && + !user_unaligned_trap(regs, insn, + (void __user *)sfar)) { + goto out; + } +#endif + + } force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar, 0); out: exception_exit(prev_state); @@ -2832,7 +3264,11 @@ EXPORT_SYMBOL(trap_block); */ void notrace init_cur_cpu_trap(struct thread_info *t) { +#ifdef CONFIG_E90S + int cpu = smp_processor_id(); +#else int cpu = hard_smp_processor_id(); +#endif struct trap_per_cpu *p = &trap_block[cpu]; p->thread = t; @@ -2871,6 +3307,8 @@ void __init trap_init(void) kern_una_regs) || TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || + TI_LAZY_COUNT != offsetof(struct thread_info, + preempt_lazy_count) || TI_FPREGS != offsetof(struct thread_info, fpregs) || (TI_FPREGS & (64 - 1))); diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index eaed39ce8938..61f7d05ae6f5 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -58,11 +58,13 @@ tsb_miss_page_table_walk: 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 nop +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b mov SCRATCHPAD_UTSBREG2, %g5 ldxa [%g5] ASI_SCRATCHPAD, %g5 .previous +#endif /*CONFIG_SPARC64_SUN4V*/ cmp %g5, -1 be,pt %xcc, 80f @@ -120,10 +122,10 @@ tsb_miss_page_table_walk_sun4v_fastpath: #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7 sllx %g7, 32, %g7 - andcc %g5, %g7, %g0 be,pt %xcc, 60f - nop + nop + /* It is a huge page, use huge page TSB entry address we * calculated above. If the huge page TSB has not been @@ -146,11 +148,13 @@ tsb_miss_page_table_walk_sun4v_fastpath: 661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b SET_GL(1) nop .previous +#endif /*CONFIG_SPARC64_SUN4V*/ rdpr %tl, %g7 cmp %g7, 1 @@ -186,6 +190,7 @@ tsb_dtlb_load: 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN retry +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b nop @@ -206,22 +211,26 @@ tsb_dtlb_load: */ ba,pt %xcc, sun4v_dtlb_load mov %g5, %g3 +#endif /*CONFIG_SPARC64_SUN4V*/ tsb_itlb_load: /* Executable bit must be set. */ 661: sethi %hi(_PAGE_EXEC_4U), %g4 andcc %g5, %g4, %g0 +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b andcc %g5, _PAGE_EXEC_4V, %g0 nop .previous +#endif /*CONFIG_SPARC64_SUN4V*/ be,pn %xcc, tsb_do_fault nop 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN retry +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b nop @@ -246,6 +255,7 @@ tsb_itlb_load: /* No valid entry in the page tables, do full fault * processing. */ +#endif /*CONFIG_SPARC64_SUN4V*/ .globl tsb_do_fault tsb_do_fault: @@ -253,11 +263,13 @@ tsb_do_fault: 661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b SET_GL(1) ldxa [%g0] ASI_SCRATCHPAD, %g4 .previous +#endif /*CONFIG_SPARC64_SUN4V*/ bne,pn %xcc, tsb_do_itlb_fault nop @@ -268,11 +280,13 @@ tsb_do_dtlb_fault: 661: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g5 +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5 nop .previous +#endif /*CONFIG_SPARC64_SUN4V*/ /* Clear context ID bits. */ srlx %g5, PAGE_SHIFT, %g5 @@ -341,7 +355,11 @@ tsb_flush: srlx %g1, 32, %o3 andcc %o3, %g2, %g0 bne,pn %icc, 1b +#ifndef CONFIG_RMO nop +#else /* CONFIG_RMO */ + membar #LoadLoad +#endif /* CONFIG_RMO */ cmp %g1, %o1 mov 1, %o3 bne,pt %xcc, 2f @@ -351,7 +369,11 @@ tsb_flush: bne,pn %xcc, 1b nop 2: retl +#ifndef CONFIG_RMO nop +#else /* CONFIG_RMO */ + TSB_MEMBAR +#endif /* CONFIG_RMO */ .size tsb_flush, .-tsb_flush /* Reload MMU related context switch state at @@ -517,7 +539,11 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size nop retl +#ifndef CONFIG_RMO nop +#else /* CONFIG_RMO */ + TSB_MEMBAR +#endif /* CONFIG_RMO */ .size copy_tsb, .-copy_tsb /* Set the invalid bit in all TSB entries. */ diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S index 86e737e59c7e..dcfea70ad86f 100644 --- a/arch/sparc/kernel/ttable_64.S +++ b/arch/sparc/kernel/ttable_64.S @@ -12,15 +12,26 @@ .globl tl0_cee, tl1_cee .globl tl0_iae, tl1_iae .globl tl0_dae, tl1_dae +#ifdef CONFIG_E90S + .globl tl0_ivec, tl1_ivec +#endif sparc64_ttable_tl0: tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) tl0_iax: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) +#ifdef CONFIG_SPARC64_SUN4V tl0_itsb_4v: SUN4V_ITSB_MISS +#else +tl0_itsb_4v: BTRAP(0x9) +#endif /*CONFIG_SPARC64_SUN4V*/ +#ifndef CONFIG_E90S tl0_iae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) +#else +tl0_iae: TRAP(do_e90s_insn_access_error) +#endif tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) tl0_ill: membar #Sync TRAP_7INSNS(do_illegal_instruction) @@ -39,21 +50,40 @@ tl0_div0: TRAP(do_div0) tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) tl0_resv02f: BTRAP(0x2f) tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) +#ifdef CONFIG_SPARC64_SUN4V tl0_dtsb_4v: SUN4V_DTSB_MISS +#else +tl0_dtsb_4v: BTRAP(0x31) +#endif /*CONFIG_SPARC64_SUN4V*/ +#ifndef CONFIG_E90S tl0_dae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) +#else +tl0_dae: TRAP(do_e90s_data_access_error) +#endif tl0_resv033: BTRAP(0x33) tl0_mna: TRAP_NOSAVE(do_mna) tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) tl0_stdfmna: TRAP_NOSAVE(do_stdfmna) tl0_privact: TRAP_NOSAVE(__do_privact) tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d) -tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) +tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) +#ifndef CONFIG_E90S + BTRAP(0x40) +#else +tl0_async_de: TRAP(do_async_data_error) +#endif #ifdef CONFIG_SMP +#ifndef CONFIG_E90S tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) -tl0_irq4: BTRAP(0x44) +#else +tl0_irq1: BTRAP(0x41) +tl0_irq2: BTRAP(0x42) +tl0_irq3: BTRAP(0x43) +#endif +tl0_irq4: BTRAP(0x44) #else tl0_irq1: BTRAP(0x41) tl0_irq2: BTRAP(0x42) @@ -61,20 +91,29 @@ tl0_irq3: BTRAP(0x43) tl0_irq4: BTRAP(0x44) #endif tl0_irq5: TRAP_IRQ(handler_irq, 5) -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_E90S) tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6) #else tl0_irq6: BTRAP(0x46) #endif +#ifndef CONFIG_E90S tl0_irq7: TRAP_IRQ(deferred_pcr_work_irq, 7) +#else +tl0_irq7: BTRAP(0x47) +#endif #if defined(CONFIG_KGDB) && defined(CONFIG_SMP) tl0_irq8: TRAP_IRQ(smp_kgdb_capture_client, 8) #else tl0_irq8: BTRAP(0x48) #endif tl0_irq9: BTRAP(0x49) -tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) +tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) +tl0_irq13: BTRAP(0x4d) +#ifndef CONFIG_E90S tl0_irq14: TRAP_IRQ(timer_interrupt, 14) +#else +tl0_irq14: BTRAP(0x4e) +#endif tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15) tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) @@ -95,10 +134,14 @@ tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */ tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) +#ifdef CONFIG_SPARC64_SUN4V tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo) tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo) tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo) tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo) +#else +tl0_resv07b: BTRAP(0x7b) BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) +#endif /*CONFIG_SPARC64_SUN4V*/ tl0_s0n: SPILL_0_NORMAL tl0_s1n: SPILL_1_NORMAL tl0_s2n: SPILL_2_NORMAL @@ -185,9 +228,17 @@ sparc64_ttable_tl1: tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) +#ifdef CONFIG_SPARC64_SUN4V tl1_itsb_4v: SUN4V_ITSB_MISS +#else +tl1_itsb_4v: BTRAP(0x9) +#endif /*CONFIG_SPARC64_SUN4V*/ +#ifndef CONFIG_E90S tl1_iae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) +#else +tl1_iae: TRAPTL1(do_e90s_insn_access_error) +#endif tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) tl1_ill: TRAPTL1(do_ill_tl1) tl1_privop: BTRAPTL1(0x11) @@ -204,9 +255,17 @@ tl1_div0: TRAPTL1(do_div0_tl1) tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) +#ifdef CONFIG_SPARC64_SUN4V tl1_dtsb_4v: SUN4V_DTSB_MISS +#else +tl1_dtsb_4v: BTRAP(0x9) +#endif /*CONFIG_SPARC64_SUN4V*/ +#ifndef CONFIG_E90S tl1_dae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) +#else +tl1_dae: TRAPTL1(do_e90s_data_access_error) +#endif tl1_resv033: BTRAPTL1(0x33) tl1_mna: TRAP_NOSAVE(do_mna) tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) @@ -214,7 +273,11 @@ tl1_stdfmna: TRAPTL1(do_stdfmna_tl1) tl1_privact: BTRAPTL1(0x37) tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b) tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f) +#ifndef CONFIG_E90S tl1_resv040: BTRAPTL1(0x40) +#else +tl1_async_de: TRAPTL1(do_async_data_error) +#endif tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3) tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6) tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9) diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 23db2efda570..c7d64b0b4ac8 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -71,6 +71,7 @@ static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) else if (tmp == 2) return 2; else { + return 0; /* it is possible (r1000 bug 47278) */ printk("Impossible unaligned trap. insn=%08x\n", insn); die_if_kernel("Byte sized unaligned access?!?!", regs); @@ -117,12 +118,16 @@ static inline long sign_extend_imm13(long imm) return imm << 51 >> 51; } -static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) +static inline int fetch_reg(struct pt_regs *regs, int reg, + unsigned long *val) { - unsigned long value, fp; - - if (reg < 16) - return (!reg ? 0 : regs->u_regs[reg]); + unsigned long value = 0, fp; + int ret = 0; + + if (reg < 16) { + *val = !reg ? 0 : regs->u_regs[reg]; + return 0; + } fp = regs->u_regs[UREG_FP]; @@ -132,14 +137,17 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) value = win->locals[reg - 16]; } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); - get_user(value, &win32->locals[reg - 16]); + win32 = (struct reg_window32 __user *) + ((unsigned long)((u32)fp)); + ret = get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(fp + STACK_BIAS); - get_user(value, &win->locals[reg - 16]); + ret = get_user(value, &win->locals[reg - 16]); } - return value; + + *val = value; + return ret; } static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) @@ -166,26 +174,39 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) } } -unsigned long compute_effective_address(struct pt_regs *regs, - unsigned int insn, unsigned int rd) +int compute_effective_address(struct pt_regs *regs, + unsigned int insn, unsigned int rd, + unsigned long *address) { int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; - unsigned long addr; + unsigned long addr, offset; + int ret; if (insn & 0x2000) { - maybe_flush_windows(rs1, 0, rd, from_kernel); - addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + maybe_flush_windows(rs1, 0, 0, from_kernel); + ret = fetch_reg(regs, rs1, &addr); + if (ret) + goto out; + addr += sign_extend_imm13(insn); } else { - maybe_flush_windows(rs1, rs2, rd, from_kernel); - addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + maybe_flush_windows(rs1, rs2, 0, from_kernel); + ret = fetch_reg(regs, rs1, &addr); + if (ret) + goto out; + ret = fetch_reg(regs, rs2, &offset); + if (ret) + goto out; + addr += offset; } if (!from_kernel && test_thread_flag(TIF_32BIT)) addr &= 0xffffffff; - return addr; + *address = addr; +out: + return ret; } /* This is just to make gcc think die_if_kernel does return... */ @@ -196,22 +217,28 @@ static void __used unaligned_panic(char *str, struct pt_regs *regs) extern int do_int_load(unsigned long *dest_reg, int size, unsigned long *saddr, int is_signed, int asi); - + extern int __do_int_store(unsigned long *dst_addr, int size, unsigned long src_val, int asi); static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, - struct pt_regs *regs, int asi, int orig_asi) + struct pt_regs *regs, int asi, int orig_asi) { unsigned long zero = 0; unsigned long *src_val_p = &zero; unsigned long src_val; + int ret = 0; if (size == 16) { + unsigned long v; size = 8; - zero = (((long)(reg_num ? - (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) | - (unsigned int)fetch_reg(reg_num + 1, regs); + ret = fetch_reg(regs, reg_num, &zero); + if (ret) + goto out; + fetch_reg(regs, reg_num + 1, &v); + if (ret) + goto out; + zero = (zero << 32) | v; } else if (reg_num) { src_val_p = fetch_reg_addr(reg_num, regs); } @@ -234,6 +261,8 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, } } return __do_int_store(dst_addr, size, src_val, asi); +out: + return ret; } static inline void advance(struct pt_regs *regs) @@ -264,10 +293,10 @@ static void kernel_mna_trap_fault(int fixup_tstate_asi) entry = search_exception_tables(regs->tpc); if (!entry) { - unsigned long address; + unsigned long address = 0; - address = compute_effective_address(regs, insn, - ((insn >> 25) & 0x1f)); + compute_effective_address(regs, insn, + ((insn >> 25) & 0x1f), &address); if (address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL " "pointer dereference in mna handler"); @@ -303,6 +332,348 @@ static void log_unaligned(struct pt_regs *regs) } } + +static inline int do_user_int_store(int reg_num, int size, + unsigned long *dst_addr, struct pt_regs *regs, + int asi, int orig_asi) +{ + unsigned long zero = 0; + unsigned long *src_val_p = &zero; + unsigned long src_val; + int ret = 0; + + if (size == 16) { + unsigned long v; + size = 8; + ret = fetch_reg(regs, reg_num, &zero); + if (ret) + goto out; + fetch_reg(regs, reg_num + 1, &v); + if (ret) + goto out; + zero = (zero << 32) | v; + src_val = *src_val_p; + } else if (reg_num) { + src_val_p = fetch_reg_addr(reg_num, regs); + ret = get_user(src_val, src_val_p); + if (ret) + goto out; + } + if (unlikely(asi != orig_asi)) { + switch (size) { + case 2: + src_val = swab16(src_val); + break; + case 4: + src_val = swab32(src_val); + break; + case 8: + src_val = swab64(src_val); + break; + case 16: + default: + BUG(); + break; + } + } + return __do_int_store(dst_addr, size, src_val, asi); +out: + return ret; +} + +static int write_user(unsigned long val, void __user *addr, + int size, int swab) +{ + int err = 0; + switch (size) { + case 1: + val <<= 56; + break; + case 2: + if (swab) + val = swab16(val); + val <<= 48; + break; + case 4: + if (swab) + val = swab32(val); + val <<= 32; + break; + case 8: + if (swab) + val = swab64(val); + break; + default: + err = -EOPNOTSUPP; + break; + } + if (err) + return err; + + return copy_to_user(addr, &val, size); +} + +static int read_user(unsigned long *dest, void __user *addr, + int size, int swab, int sign) +{ + int err = 0; + unsigned long x = 0; + err = copy_from_user(&x, addr, size); + if (err) + goto out; + + switch (size) { + case 1: + x >>= 56; + if (sign) + x = (s64)((s8) x); + break; + case 2: + x >>= 48; + if (swab) + x = swab16(x); + if (sign) + x = (s64)((s16) x); + break; + case 4: + x >>= 32; + if (swab) + x = swab32(x); + if (sign) + x = (s64)((s32) x); + break; + case 8: + if (swab) + x = swab64(x); + break; + default: + err = -EOPNOTSUPP; + break; + } + if (!err) + *dest = x; +out: + return err; +} + +static inline int put_reg(struct pt_regs *regs, int rd, + unsigned long val) +{ + int err = 0; + if (rd < 16) { + if (rd) + regs->u_regs[rd] = val; + } else { + unsigned long fp = regs->u_regs[UREG_FP]; + + if (!test_thread_64bit_stack(fp)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *) + ((unsigned long)((u32)fp)); + err = put_user(val, &win32->locals[rd - 16]); + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(fp + STACK_BIAS); + err = put_user(val, &win->locals[rd - 16]); + } + } + return err; +} + +static int put_fp_reg(struct pt_regs *regs, int freg, + unsigned long value, int sz) +{ + struct fpustate *f = FPUSTATE; + struct thread_info *t = current_thread_info(); + int err = 0; + int flag = (freg < 32) ? FPRS_DL : FPRS_DU; + + save_and_clear_fpu(); + + if (!(t->fpsaved[0] & FPRS_FEF)) { + t->fpsaved[0] = FPRS_FEF; + t->gsr[0] = 0; + } + if (!(t->fpsaved[0] & flag)) { + if (freg < 32) + memset(f->regs, 0, 32 * sizeof(u32)); + else + memset(f->regs + 32, 0, 32 * sizeof(u32)); + } + switch (sz) { + case 4: + f->regs[freg] = (u32)value; + break; + case 8: + *(u64 *)(f->regs + freg) = value; + break; + default: + err = -1; + break; + } + t->fpsaved[0] |= flag; + return err; +} + +static int fetch_fp_reg(struct pt_regs *regs, int freg, + unsigned long *value, int sz) +{ + struct fpustate *f = FPUSTATE; + int err = 0; + + save_and_clear_fpu(); + + switch (sz) { + case 4: + *value = f->regs[freg]; + break; + case 8: + *value = *(u64 *)(f->regs + freg); + break; + default: + err = -1; + break; + } + return err; +} + +static int __decode_access_size(struct pt_regs *regs, unsigned int insn) +{ + int sz = -1; + int flt = (insn >> 24) & 1; + int rd = (insn >> 25) & 0x1f; + if (flt) { + switch ((insn >> 19) & 3) { /* map size bits to a number */ + case 0: + sz = 4; + break; /* ldf{a}/stf{a} */ + case 1: + if (rd == 0) + sz = 4; /* ldfsr/stfsr */ + else if (rd == 1) + sz = 8; /* ldxfsr/stxfsr */ + else + break; + sz = -1; + break; + case 2: + sz = 16; + sz = -1; + break; /* ldqf{a}/stqf{a} */ + case 3: + sz = 8; + break; /* lddf{a}/stdf{a} */ + } + } else { + sz = decode_access_size(regs, insn); + } + return sz; +} + +asmlinkage int user_unaligned_trap(struct pt_regs *regs, unsigned int insn, + void __user *addr) +{ + enum direction dir; + int asi, sign, rd, sz, flt, swab = 0; + int err = 0; + unsigned long val; + + rd = (insn >> 25) & 0x1f; + flt = (insn >> 24) & 1; + asi = decode_asi(insn, regs); + dir = decode_direction(insn); + sign = flt ? 0 : decode_signedness(insn); + sz = __decode_access_size(regs, insn); + + if (flt) { + if (sz > 4) { + if ((rd & 1) == 1) + rd = (rd & 0x1e) | 0x20; + if ((sz == 16) && ((rd & 0x1) != 0)) { + err = -1; + goto kill_user; + } + } + } + /* Check the UAC bits to decide what the user wants us to do + with the unaliged access. */ + if (!(current_thread_info()->status & TS_UNALIGN_NOPRINT)) { + pr_info_ratelimited("%s(%d): unaligned trap:" + " addr:%p insn:%x asi:%x dir:%d\n" + "\t\t\tpc:%lx sign:%x sz:%d flt:%d rd:%d\n", + current->comm, task_pid_nr(current), + addr, insn, asi, dir, + regs->tpc, sign, sz, flt, rd); + } + + if ((current_thread_info()->status & TS_UNALIGN_SIGBUS)) + goto kill_user; + if (sz <= 0 || sz > 8) + goto kill_user; + + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, + regs, (unsigned long)addr); + + switch (asi) { + case ASI_PL: + swab = 1; + break; + case ASI_P: + break; + default: + goto kill_user; + break; + } + + maybe_flush_windows(0, 0, rd, 0); + + switch (dir) { + case load: + if (read_user(&val, addr, sz, swab, sign)) + goto kill_user; + if (flt) { + if (put_fp_reg(regs, rd, val, sz)) + goto kill_user; + } else { + if (put_reg(regs, rd, val)) + goto kill_user; + } + break; + + case store: + if (flt) { + if (fetch_fp_reg(regs, rd, &val, sz)) + goto kill_user; + } else { + if (fetch_reg(regs, rd, &val)) + goto kill_user; + } + if (write_user(val, addr, sz, swab)) + goto kill_user; + break; + + case both: + pr_info("Unaligned SWAP unsupported.\n"); + err = -EFAULT; + break; + + default: + unaligned_panic("Impossible user unaligned trap.", regs); + goto out; + } + if (err) + goto kill_user; + else + advance(regs); + goto out; + + +kill_user: + return -1; +out: + return 0; +} + asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); @@ -318,12 +689,14 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) * just signal a fault and do not log the event. */ if (asi == ASI_AIUS) { - kernel_mna_trap_fault(0); - return; + if ((current_thread_info()->status & TS_UNALIGN_SIGBUS)) { + kernel_mna_trap_fault(0); + return; + } + } else { + log_unaligned(regs); } - log_unaligned(regs); - if (!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel " "at <%016lx>.\n", regs->tpc); @@ -335,8 +708,18 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) unsigned long addr, *reg_addr; int err; - addr = compute_effective_address(regs, insn, - ((insn >> 25) & 0x1f)); + err = compute_effective_address(regs, insn, + ((insn >> 25) & 0x1f), &addr); + if (err) + goto err; + + if (asi == ASI_AIUS && + !(current_thread_info()->status & TS_UNALIGN_NOPRINT)) { + pr_info_ratelimited("%s(%d): {get,put}_user()" + " unaligned access at address %lx pc: %lx\n", + current->comm, task_pid_nr(current), + addr, regs->tpc); + } perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (asi) { case ASI_NL: @@ -386,10 +769,15 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) panic("Impossible kernel unaligned trap."); /* Not reached... */ } - if (unlikely(err)) - kernel_mna_trap_fault(1); - else +err: + if (unlikely(err)) { + if (asi == ASI_AIUS) + kernel_mna_trap_fault(0); + else + kernel_mna_trap_fault(1); + } else { advance(regs); + } } } @@ -397,15 +785,16 @@ int handle_popc(u32 insn, struct pt_regs *regs) { int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; int ret, rd = ((insn >> 25) & 0x1f); - u64 value; - + unsigned long value; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); } else { maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); - value = fetch_reg(insn & 0x1f, regs); + if (fetch_reg(regs, insn & 0x1f, &value)) + return 0; } ret = hweight64(value); if (rd < 16) { @@ -417,11 +806,13 @@ int handle_popc(u32 insn, struct pt_regs *regs) if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); - put_user(ret, &win32->locals[rd - 16]); + if (put_user(ret, &win32->locals[rd - 16])) + return 0; } else { struct reg_window __user *win; win = (struct reg_window __user *)(fp + STACK_BIAS); - put_user(ret, &win->locals[rd - 16]); + if (put_user(ret, &win->locals[rd - 16])) + return 0; } } advance(regs); @@ -436,12 +827,15 @@ extern void sun4v_data_access_exception(struct pt_regs *regs, int handle_ldf_stq(u32 insn, struct pt_regs *regs) { - unsigned long addr = compute_effective_address(regs, insn, 0); + unsigned long addr; int freg; struct fpustate *f = FPUSTATE; int asi = decode_asi(insn, regs); - int flag; + int flag, ret; + ret = compute_effective_address(regs, insn, 0, &addr); + if (ret) + return 0; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); save_and_clear_fpu(); @@ -449,7 +843,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) if (insn & 0x200000) { /* STQ */ u64 first = 0, second = 0; - + freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); flag = (freg < 32) ? FPRS_DL : FPRS_DU; if (freg & 3) { @@ -469,11 +863,11 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) case ASI_P: case ASI_S: break; case ASI_PL: - case ASI_SL: + case ASI_SL: { /* Need to convert endians */ u64 tmp = __swab64p(&first); - + first = __swab64p(&second); second = tmp; break; @@ -524,7 +918,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) for (i = 0; i < size; i++) data[i] = 0; - + err = get_user (data[0], (u32 __user *) addr); if (!err) { for (i = 1; i < size; i++) @@ -567,12 +961,13 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) return 1; } -void handle_ld_nf(u32 insn, struct pt_regs *regs) +int handle_ld_nf(u32 insn, struct pt_regs *regs) { int rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; - + int ret = 0; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); maybe_flush_windows(0, 0, rd, from_kernel); @@ -582,15 +977,24 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) if ((insn & 0x780000) == 0x180000) reg[1] = 0; } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { - put_user(0, (int __user *) reg); + ret = put_user(0, (int __user *) reg); + if (ret) + return ret; if ((insn & 0x780000) == 0x180000) - put_user(0, ((int __user *) reg) + 1); + ret = put_user(0, ((int __user *) reg) + 1); + if (ret) + return ret; } else { - put_user(0, (unsigned long __user *) reg); + ret = put_user(0, (unsigned long __user *) reg); + if (ret) + return ret; if ((insn & 0x780000) == 0x180000) - put_user(0, (unsigned long __user *) reg + 1); + ret = put_user(0, (unsigned long __user *) reg + 1); + if (ret) + return ret; } advance(regs); + return ret; } void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) @@ -606,10 +1010,19 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); + + if (!(current_thread_info()->status & TS_UNALIGN_NOPRINT)) { + pr_info_ratelimited("%s(%d): unaligned load trap at pc: %lx\n", + current->comm, task_pid_nr(current), pc); + } + + if ((current_thread_info()->status & TS_UNALIGN_SIGBUS)) + goto kill_user; + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; - if (get_user(insn, (u32 __user *) pc) != -EFAULT) { + if (!get_user(insn, (u32 __user *) pc)) { int asi = decode_asi(insn, regs); u32 first, second; int err; @@ -653,6 +1066,9 @@ daex: goto out; } advance(regs); + goto out; +kill_user: + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar, 0); out: exception_exit(prev_state); } @@ -670,10 +1086,19 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); + + if (!(current_thread_info()->status & TS_UNALIGN_NOPRINT)) { + pr_info_ratelimited("%s(%d): unaligned store trap at pc: %lx\n", + current->comm, task_pid_nr(current), pc); + } + + if ((current_thread_info()->status & TS_UNALIGN_SIGBUS)) + goto kill_user; + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; - if (get_user(insn, (u32 __user *) pc) != -EFAULT) { + if (!get_user(insn, (u32 __user *) pc)) { int asi = decode_asi(insn, regs); freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); value = 0; @@ -688,7 +1113,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr case ASI_P: case ASI_S: break; case ASI_PL: - case ASI_SL: + case ASI_SL: value = __swab64p(&value); break; default: goto daex; } @@ -704,6 +1129,9 @@ daex: goto out; } advance(regs); + goto out; +kill_user: + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar, 0); out: exception_exit(prev_state); } diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 59b6df13ddea..09a8eeb2ed06 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -83,6 +83,15 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = ALIGN(PAGE_SIZE); INIT_TEXT_SECTION(PAGE_SIZE) + +#ifdef CONFIG_E90S + . = ALIGN(8); + .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { + __apicdrivers = .; + *(.apicdrivers); + __apicdrivers_end = .; + } +#endif /*CONFIG_E90S*/ __init_text_end = .; INIT_DATA_SECTION(16) diff --git a/arch/sparc/lib/GENpatch.S b/arch/sparc/lib/GENpatch.S index 1ec1f02c8b7b..457ee63a1fcd 100644 --- a/arch/sparc/lib/GENpatch.S +++ b/arch/sparc/lib/GENpatch.S @@ -28,7 +28,11 @@ generic_patch_copyops: GEN_DO_PATCH(memcpy, GENmemcpy) GEN_DO_PATCH(raw_copy_from_user, GENcopy_from_user) +#ifdef CONFIG_MCST + GEN_DO_PATCH(_raw_copy_to_user, GENcopy_to_user) +#else GEN_DO_PATCH(raw_copy_to_user, GENcopy_to_user) +#endif /* CONFIG_MCST */ retl nop .size generic_patch_copyops,.-generic_patch_copyops diff --git a/arch/sparc/lib/M7patch.S b/arch/sparc/lib/M7patch.S index 9000b7bc5f2b..f83749d00bed 100644 --- a/arch/sparc/lib/M7patch.S +++ b/arch/sparc/lib/M7patch.S @@ -28,7 +28,11 @@ ENTRY(m7_patch_copyops) NG_DO_PATCH(memcpy, M7memcpy) NG_DO_PATCH(raw_copy_from_user, M7copy_from_user) +#ifdef CONFIG_MCST + NG_DO_PATCH(_raw_copy_to_user, M7copy_to_user) +#else NG_DO_PATCH(raw_copy_to_user, M7copy_to_user) +#endif /* CONFIG_MCST */ retl nop ENDPROC(m7_patch_copyops) diff --git a/arch/sparc/lib/NG2patch.S b/arch/sparc/lib/NG2patch.S index 72431b24491a..1adc38a2231d 100644 --- a/arch/sparc/lib/NG2patch.S +++ b/arch/sparc/lib/NG2patch.S @@ -28,7 +28,11 @@ niagara2_patch_copyops: NG_DO_PATCH(memcpy, NG2memcpy) NG_DO_PATCH(raw_copy_from_user, NG2copy_from_user) +#ifdef CONFIG_MCST + NG_DO_PATCH(_raw_copy_to_user, NG2copy_to_user) +#else NG_DO_PATCH(raw_copy_to_user, NG2copy_to_user) +#endif /* CONFIG_MCST */ retl nop .size niagara2_patch_copyops,.-niagara2_patch_copyops diff --git a/arch/sparc/lib/NG4clear_page.S b/arch/sparc/lib/NG4clear_page.S index d91d6b5f2444..11c7dd010c3b 100644 --- a/arch/sparc/lib/NG4clear_page.S +++ b/arch/sparc/lib/NG4clear_page.S @@ -28,3 +28,5 @@ NG4clear_user_page: /* %o0=dest, %o1=vaddr */ nop .size NG4clear_page,.-NG4clear_page .size NG4clear_user_page,.-NG4clear_user_page + + diff --git a/arch/sparc/lib/NG4patch.S b/arch/sparc/lib/NG4patch.S index 37866175c921..9cc968ba23fd 100644 --- a/arch/sparc/lib/NG4patch.S +++ b/arch/sparc/lib/NG4patch.S @@ -30,7 +30,11 @@ niagara4_patch_copyops: NG_DO_PATCH(memcpy, NG4memcpy) NG_DO_PATCH(raw_copy_from_user, NG4copy_from_user) +#ifdef CONFIG_MCST + NG_DO_PATCH(_raw_copy_to_user, NG4copy_to_user) +#else NG_DO_PATCH(raw_copy_to_user, NG4copy_to_user) +#endif /* CONFIG_MCST */ retl nop .size niagara4_patch_copyops,.-niagara4_patch_copyops diff --git a/arch/sparc/lib/NGpatch.S b/arch/sparc/lib/NGpatch.S index e9f843f1063e..1307070ee71f 100644 --- a/arch/sparc/lib/NGpatch.S +++ b/arch/sparc/lib/NGpatch.S @@ -28,7 +28,11 @@ niagara_patch_copyops: NG_DO_PATCH(memcpy, NGmemcpy) NG_DO_PATCH(raw_copy_from_user, NGcopy_from_user) +#ifdef CONFIG_MCST + NG_DO_PATCH(_raw_copy_to_user, NGcopy_to_user) +#else NG_DO_PATCH(raw_copy_to_user, NGcopy_to_user) +#endif /* CONFIG_MCST */ retl nop .size niagara_patch_copyops,.-niagara_patch_copyops diff --git a/arch/sparc/lib/PeeCeeI.c b/arch/sparc/lib/PeeCeeI.c index cde4c9a51b2e..2c0f20c8741d 100644 --- a/arch/sparc/lib/PeeCeeI.c +++ b/arch/sparc/lib/PeeCeeI.c @@ -210,3 +210,191 @@ void insl(unsigned long __addr, void *dst, unsigned long count) } EXPORT_SYMBOL(insl); + + +#ifdef __arch64__ +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, long count) +{ + /* Optimize aligned transfers. Everything else gets handled + a byte at a time. */ + +#ifdef __arch64__ + if (count >= 8 && !(((long)to & 7) || ((long)from & 7))) { + count -= 8; + do { + *(u64 *)to = __raw_readq(from); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } +#endif + if (count >= 4 && !(((long)to & 3) || ((long)from & 3))) { + count -= 4; + do { + *(u32 *)to = __raw_readl(from); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && !(((long)to & 1) || ((long)from & 1))) { + count -= 2; + do { + *(u16 *)to = __raw_readw(from); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + *(u8 *) to = __raw_readb(from); + count--; + to++; + from++; + } + mb(); +} + +EXPORT_SYMBOL(memcpy_fromio); + + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, long count) +{ + /* Optimize aligned transfers. Everything else gets handled + a byte at a time. */ + +#ifdef __arch64__ + if (count >= 8 && !(((long)to & 7) || ((long)from & 7))) { + count -= 8; + do { + __raw_writeq(*(const u64 *)from, to); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } +#endif + + if (count >= 4 && !(((long)to & 3) || ((long)from & 3))) { + count -= 4; + do { + __raw_writel(*(const u32 *)from, to); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && !(((long)to & 1) || ((long)from & 1))) { + count -= 2; + do { + __raw_writew(*(const u16 *)from, to); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + __raw_writeb(*(const u8 *) from, to); + count--; + to++; + from++; + } + mb(); +} + +EXPORT_SYMBOL(memcpy_toio); + + +/* + * "memset" on IO memory space. + */ +void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) +{ + /* Handle any initial odd byte */ + if (count > 0 && ((long)to & 1)) { + __raw_writeb(c, to); + to++; + count--; + } + + /* Handle any initial odd halfword */ + if (count >= 2 && ((long)to & 2)) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + +#ifdef __arch64__ + /* Handle any initial odd word */ + if (count >= 4 && ((long)to & 4)) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* Handle all full-sized quadwords: we're aligned + (or have a small count) */ + count -= 8; + if (count >= 0) { + do { + __raw_writeq(c, to); + to += 8; + count -= 8; + } while (count >= 0); + } + count += 8; + + /* The tail is word-aligned if we still have count >= 4 */ + if (count >= 4) { + __raw_writel(c, to); + to += 4; + count -= 4; + } +#else /*__arch64__*/ + count -= 4; + if (count >= 0) { + do { + __raw_writel(c, to); + to += 4; + count -= 4; + } while (count >= 0); + } + count += 4; +#endif /*__arch64__*/ + + /* The tail is half-word aligned if we have count >= 2 */ + if (count >= 2) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* And finally, one last byte.. */ + if (count) { + __raw_writeb(c, to); + } + mb(); +} + +EXPORT_SYMBOL(_memset_c_io); + +#endif /*__arch64__*/ diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S index 15169851e7ab..acfa90dfec7f 100644 --- a/arch/sparc/lib/U1copy_to_user.S +++ b/arch/sparc/lib/U1copy_to_user.S @@ -20,7 +20,11 @@ .text; \ .align 4; +#ifdef CONFIG_MCST +#define FUNC_NAME _raw_copy_to_user +#else #define FUNC_NAME raw_copy_to_user +#endif /* CONFIG_MCST */ #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS #define EX_RETVAL(x) 0 diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 9248d59c734c..0c37d6a40b6a 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -268,6 +268,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2) subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE +#ifdef CONFIG_MCST + /* %o2 must contain the remainder so that U3_retl_o2_plus_o3_sll_6* + * code returns correct value */ + and %o2, 0x3f, %o2 +#endif add %o1, 0x40, %o1 bgu,pt %XCC, 1f srl GLOBAL_SPARE, 6, %o3 @@ -336,7 +341,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ * Also notice how this code is careful not to perform a * load past the end of the src buffer. */ +#ifndef CONFIG_MCST /* see comment above */ and %o2, 0x3f, %o2 +#endif andcc %o2, 0x38, %g2 be,pn %XCC, 2f subcc %g2, 0x8, %g2 diff --git a/arch/sparc/lib/U3patch.S b/arch/sparc/lib/U3patch.S index 9a888088f3c9..3692f665101f 100644 --- a/arch/sparc/lib/U3patch.S +++ b/arch/sparc/lib/U3patch.S @@ -28,7 +28,11 @@ cheetah_patch_copyops: ULTRA3_DO_PATCH(memcpy, U3memcpy) ULTRA3_DO_PATCH(raw_copy_from_user, U3copy_from_user) +#ifdef CONFIG_MCST + ULTRA3_DO_PATCH(_raw_copy_to_user, U3copy_to_user) +#else ULTRA3_DO_PATCH(raw_copy_to_user, U3copy_to_user) +#endif /* CONFIG_MCST */ retl nop .size cheetah_patch_copyops,.-cheetah_patch_copyops diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 456b65a30ecf..59c6b5473b5f 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -9,6 +9,19 @@ #include #include +#if defined(CONFIG_MCST) && defined(CONFIG_RMO) && defined(CONFIG_SMP) + /* On SMP we need to use memory barriers to ensure + * correct memory operation ordering, nop these out + * for uniprocessor. + */ +#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad; +#define ATOMIC_POST_BARRIER \ + membar #StoreLoad | #StoreStore; +#else +#define ATOMIC_PRE_BARRIER +#define ATOMIC_POST_BARRIER +#endif /* CONFIG_RMO & CONFIG_SMP*/ + .text /* Three versions of the atomic routines, one that @@ -36,12 +49,14 @@ EXPORT_SYMBOL(atomic_##op); #define ATOMIC_OP_RETURN(op) \ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ + ATOMIC_PRE_BARRIER \ 1: lduw [%o1], %g1; \ op %g1, %o0, %g7; \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ op %g1, %o0, %g1; \ + ATOMIC_POST_BARRIER \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ @@ -102,12 +117,14 @@ EXPORT_SYMBOL(atomic64_##op); #define ATOMIC64_OP_RETURN(op) \ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ + ATOMIC_PRE_BARRIER \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ + ATOMIC_POST_BARRIER \ retl; \ op %g1, %o0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ @@ -116,6 +133,7 @@ EXPORT_SYMBOL(atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op) \ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ + ATOMIC_PRE_BARRIER \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ @@ -123,6 +141,7 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ + ATOMIC_POST_BARRIER \ retl; \ mov %g1, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ @@ -152,6 +171,7 @@ ATOMIC64_FETCH_OP(xor) ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ BACKOFF_SETUP(%o2) + ATOMIC_PRE_BARRIER 1: ldx [%o0], %g1 brlez,pn %g1, 3f sub %g1, 1, %g7 @@ -159,6 +179,7 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ cmp %g1, %g7 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) nop + ATOMIC_POST_BARRIER 3: retl sub %g1, 1, %o0 2: BACKOFF_SPIN(%o2, %o3, 1b) diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S index 9d647f977618..3477a5052cd2 100644 --- a/arch/sparc/lib/bitops.S +++ b/arch/sparc/lib/bitops.S @@ -10,9 +10,27 @@ #include .text +#ifdef CONFIG_RMO + /* On SMP we need to use memory barriers to ensure + * correct memory operation ordering, nop these out + * for uniprocessor. + */ +#ifdef CONFIG_SMP +#define BITOP_PRE_BARRIER membar #StoreLoad | #LoadLoad +#define BITOP_POST_BARRIER \ + membar #StoreLoad | #StoreStore +#else +#define BITOP_PRE_BARRIER +#define BITOP_POST_BARRIER +#endif + +#endif /* CONFIG_RMO */ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) +#ifdef CONFIG_RMO + BITOP_PRE_BARRIER +#endif /* CONFIG_RMO */ srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 @@ -27,6 +45,9 @@ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */ and %g7, %o2, %g2 clr %o0 movrne %g2, 1, %o0 +#ifdef CONFIG_RMO + BITOP_POST_BARRIER +#endif /* CONFIG_RMO */ retl nop 2: BACKOFF_SPIN(%o3, %o4, 1b) @@ -35,6 +56,9 @@ EXPORT_SYMBOL(test_and_set_bit) ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) +#ifdef CONFIG_RMO + BITOP_PRE_BARRIER +#endif /* CONFIG_RMO */ srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 @@ -49,6 +73,9 @@ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ and %g7, %o2, %g2 clr %o0 movrne %g2, 1, %o0 +#ifdef CONFIG_RMO + BITOP_POST_BARRIER +#endif /* CONFIG_RMO */ retl nop 2: BACKOFF_SPIN(%o3, %o4, 1b) @@ -57,6 +84,9 @@ EXPORT_SYMBOL(test_and_clear_bit) ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) +#ifdef CONFIG_RMO + BITOP_PRE_BARRIER +#endif /* CONFIG_RMO */ srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 @@ -71,6 +101,9 @@ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ and %g7, %o2, %g2 clr %o0 movrne %g2, 1, %o0 +#ifdef CONFIG_RMO + BITOP_POST_BARRIER +#endif /* CONFIG_RMO */ retl nop 2: BACKOFF_SPIN(%o3, %o4, 1b) @@ -79,6 +112,9 @@ EXPORT_SYMBOL(test_and_change_bit) ENTRY(set_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) +#if defined CONFIG_MCST && defined CONFIG_RMO + BITOP_PRE_BARRIER +#endif /* CONFIG_RMO */ srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 @@ -99,6 +135,9 @@ EXPORT_SYMBOL(set_bit) ENTRY(clear_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) +#if defined CONFIG_MCST && defined CONFIG_RMO + BITOP_PRE_BARRIER +#endif /* CONFIG_RMO */ srlx %o0, 6, %g1 mov 1, %o2 sllx %g1, 3, %g3 diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 1379dee26a65..86f213c1dc8a 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c @@ -67,6 +67,11 @@ #define FITOS 0x0c4 /* Only Ultra-III generates this. */ #endif #define FITOD 0x0c8 /* Only Ultra-III generates this. */ +#ifdef CONFIG_E90S +#define PFADDS 0x040 +#define PFSUBS 0x044 +#define PFMULS 0x048 +#endif /* FPOP2 */ #define FCMPQ 0x053 #define FCMPEQ 0x057 @@ -82,6 +87,22 @@ #define FMOVQNZ 0x0a7 #define FMOVQGZ 0x0c7 #define FMOVQGE 0x0e7 +#ifdef CONFIG_E90S + /* FMA-fused opcode */ +#define FMADDS ((0<<7 | 1<<5) >> 5) +#define FMADDD ((0<<7 | 2<<5) >> 5) +#define FMSUBS ((1<<7 | 1<<5) >> 5) +#define FMSUBD ((1<<7 | 2<<5) >> 5) +#define FNMADDS ((3<<7 | 1<<5) >> 5) +#define FNMADDD ((3<<7 | 2<<5) >> 5) +#define FNMSUBS ((2<<7 | 1<<5) >> 5) +#define FNMSUBD ((2<<7 | 2<<5) >> 5) + +#define PFMADDS ((0<<7) >> 5) +#define PFMSUBS ((1<<7) >> 5) +#define PFNMADDS ((3<<7) >> 5) +#define PFNMSUBS ((2<<7) >> 5) +#endif #define FSR_TEM_SHIFT 23UL #define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT) @@ -256,6 +277,47 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap) case FITOS: TYPE(2,1,1,1,0,0,0); break; #endif case FITOD: TYPE(2,2,1,1,0,0,0); break; +#ifdef CONFIG_E90S + default:{ + int s1 = (insn >> 14) & 0x1f; + int s2 = (insn >> 0) & 0x1f; + int d = (insn >> 25) & 0x1f; + FP_DECL_S(SA2); FP_DECL_S(SB2); FP_DECL_S(SR2); + unsigned op = (insn >> 5) & 0x1ff; + if(op != PFADDS && op != PFSUBS && op != PFMULS) + break; + s1 = ((s1 & 1) << 5) | (s1 & 0x1e); + s2 = ((s2 & 1) << 5) | (s2 & 0x1e); + d = ((d & 1) << 5) | (d & 0x1e); + argp rs1 = (argp)&f->regs[s1]; + argp rs2 = (argp)&f->regs[s2]; + argp rd = (argp)&f->regs[d]; + argp rs12 = (argp)&f->regs[s1 + 1]; + argp rs22 = (argp)&f->regs[s2 + 1]; + argp rd2 = (argp)&f->regs[d + 1]; + + FP_UNPACK_SP(SA, rs1); FP_UNPACK_SP(SB, rs2); + FP_UNPACK_SP(SA2, rs12); FP_UNPACK_SP(SB2, rs22); + + switch (op) { + case PFADDS: + FP_ADD_S(SR, SA, SB); FP_ADD_S(SR2, SA2, SB2); break; + case PFSUBS: + FP_SUB_S(SR, SA, SB); FP_SUB_S(SR2, SA2, SB2); break; + case PFMULS: + FP_MUL_S(SR, SA, SB); FP_MUL_S(SR2, SA2, SB2); break; + default: + goto err; + } + + if (!FP_INHIBIT_RESULTS) { + FP_PACK_SP (rd, SR); FP_PACK_SP (rd2, SR2); + } + flags = (d < 32) ? FPRS_DL : FPRS_DU; + current_thread_info()->fpsaved[0] |= flags; + goto success; + } +#endif /*CONFIG_E90S*/ } } else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ { @@ -354,6 +416,177 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap) TYPE(3,3,0,3,0,0,0); } } +#ifdef CONFIG_E90S + else if ((insn & 0xc1f80000) == 0x81b80000) /* FMA-fused opcode */ { + FP_DECL_S(SC); FP_DECL_S(SE); + FP_DECL_S(SA2); FP_DECL_S(SB2); FP_DECL_S(SC2); FP_DECL_S(SR2); + FP_DECL_D(DC); FP_DECL_D(DE); + argp rs12, rs22, rs32, rd2; + u64 e; + int s1 = (insn >> 14) & 0x1f; + int s2 = (insn >> 0) & 0x1f; + int s3 = (insn >> 9) & 0x1f; + int d = (insn >> 25) & 0x1f; + argp rs1 = (argp)&f->regs[s1]; + argp rs2 = (argp)&f->regs[s2]; + argp rs3 = (argp)&f->regs[s3]; + argp rd = (argp)&f->regs[d]; + argp re = (argp)&e; + + switch ((insn >> 5) & 3) { + case 2: + s1 = ((s1 & 1) << 5) | (s1 & 0x1e); + s2 = ((s2 & 1) << 5) | (s2 & 0x1e); + s3 = ((s3 & 1) << 5) | (s3 & 0x1e); + d = ((d & 1) << 5) | (d & 0x1e); + rs1 = (argp)&f->regs[s1]; + rs2 = (argp)&f->regs[s2]; + rs3 = (argp)&f->regs[s3]; + rd = (argp)&f->regs[d]; + FP_UNPACK_DP (DA, rs1); FP_UNPACK_DP (DB, rs2); + FP_UNPACK_DP (DC, rs3); + break; + + case 1: FP_UNPACK_SP (SA, rs1); FP_UNPACK_SP (SB, rs2); + FP_UNPACK_SP (SC, rs3); break; + case 0: /* Only packed operation. We should treat reg + number as a double */ + s1 = ((s1 & 1) << 5) | (s1 & 0x1e); + s2 = ((s2 & 1) << 5) | (s2 & 0x1e); + s3 = ((s3 & 1) << 5) | (s3 & 0x1e); + d = ((d & 1) << 5) | (d & 0x1e); + rs1 = (argp)&f->regs[s1]; + rs2 = (argp)&f->regs[s2]; + rs3 = (argp)&f->regs[s3]; + rd = (argp)&f->regs[d]; + rs12 = (argp)&f->regs[s1 + 1]; + rs22 = (argp)&f->regs[s2 + 1]; + rs32 = (argp)&f->regs[s3 + 1]; + rd2 = (argp)&f->regs[d + 1]; + FP_UNPACK_SP (SA, rs1); FP_UNPACK_SP (SB, rs2); FP_UNPACK_SP (SC, rs3); + FP_UNPACK_SP (SA2, rs12); FP_UNPACK_SP (SB2, rs22); FP_UNPACK_SP (SC2, rs32); + break; + default: + goto err; + } + + switch ((insn >> 5) & 0xf) { + case FMADDS: + FP_MUL_S(SE, SA, SB); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_ADD_S(SR, SE, SC); + break; + case FMADDD: + FP_MUL_D(DE, DA, DB); + FP_PACK_DP(re, DE); /* round DE */ + FP_UNPACK_DP(DE, re); + FP_ADD_D(DR, DE, DC); + break; + case FMSUBS: + FP_MUL_S(SE, SA, SB); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_SUB_S(SR, SE, SC); + break; + case FMSUBD: + FP_MUL_D(DE, DA, DB); + FP_PACK_DP(re, DE); /* round DE */ + FP_UNPACK_DP(DE, re); + FP_SUB_D(DR, DE, DC); + break; + case FNMADDS: + FP_MUL_S(SE, SA, SB); + FP_NEG_S(SE, SE); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_SUB_S(SR, SE, SC); + break; + case FNMADDD: + FP_MUL_D(DE, DA, DB); + FP_NEG_D(DE, DE); + FP_PACK_DP(re, DE); /* round DE */ + FP_UNPACK_DP(DE, re); + FP_SUB_D(DR, DE, DC); + break; + case FNMSUBS: + FP_MUL_S(SE, SA, SB); + FP_NEG_S(SE, SE); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_ADD_S(SR, SE, SC); + break; + case FNMSUBD: + FP_MUL_D(DE, DA, DB); + FP_NEG_D(DE, DE); + FP_PACK_DP(re, DE); /* round DE */ + FP_UNPACK_DP(DE, re); + FP_ADD_D(DR, DE, DC); + break; + case PFMADDS: + FP_MUL_S(SE, SA, SB); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_ADD_S(SR, SE, SC); + + FP_MUL_S(SE, SA2, SB2); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_ADD_S(SR2, SE, SC2); + break; + case PFMSUBS: + FP_MUL_S(SE, SA, SB); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_SUB_S(SR, SE, SC); + + FP_MUL_S(SE, SA2, SB2); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_SUB_S(SR2, SE, SC2); + break; + case PFNMADDS: /* rd <- -rs1 x rs2 - rs3 (sic!) */ + FP_MUL_S(SE, SA, SB); + FP_NEG_S(SE, SE); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_SUB_S(SR, SE, SC); + + FP_MUL_S(SE, SA2, SB2); + FP_NEG_S(SE, SE); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_SUB_S(SR2, SE, SC2); + break; + case PFNMSUBS: /* rd <- -rs1 x rs2 + rs3 (sic!) */ + FP_MUL_S(SE, SA, SB); + FP_NEG_S(SE, SE); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_ADD_S(SR, SE, SC); + + FP_MUL_S(SE, SA2, SB2); + FP_NEG_S(SE, SE); + FP_PACK_SP(re, SE); /* round SE */ + FP_UNPACK_SP(SE, re); + FP_ADD_S(SR2, SE, SC2); + break; + default: + goto err; + } + + if(!FP_INHIBIT_RESULTS) switch((insn >> 5) & 3) { + case 2: FP_PACK_DP(rd, DR); break; + case 1: FP_PACK_SP(rd, SR); break; + case 0: FP_PACK_SP(rd, SR); FP_PACK_SP(rd2, SR2); break; + default: + goto err; + } + flags = (d < 32) ? FPRS_DL : FPRS_DU; + current_thread_info()->fpsaved[0] |= flags; + goto success; + } +#endif /*CONFIG_E90S*/ } if (type) { argp rs1 = NULL, rs2 = NULL, rd = NULL; @@ -511,7 +744,9 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap) case 7: FP_PACK_QP (rd, QR); break; } } - +#ifdef CONFIG_E90S +success: +#endif if(_fex != 0) return record_exception(regs, _fex); diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index b078205b70e0..01ab4c75aa63 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -3,11 +3,15 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o obj-y += fault_$(BITS).o +ifdef CONFIG_E90S +obj-y += init_e90s.o +else obj-y += init_$(BITS).o +endif obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += srmmu_access.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 2371fb6b97e4..fe02f157e063 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -35,6 +35,11 @@ #include #include #include +#include +#ifdef CONFIG_MCST_RT +#include +#include +#endif int show_unhandled_signals = 1; @@ -155,7 +160,7 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, unsigned long fault_addr, unsigned int insn, int fault_code) { - unsigned long addr; + unsigned long addr = 0xdeadbeef; if (fault_code & FAULT_CODE_ITLB) { addr = regs->tpc; @@ -165,7 +170,7 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, * time provided address which may only have page granularity. */ if (insn) - addr = compute_effective_address(regs, insn, 0); + compute_effective_address(regs, insn, 0, &addr); else addr = fault_addr; } @@ -223,7 +228,6 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, return; } } - /* Is this in ex_table? */ if (regs->tstate & TSTATE_PRIV) { const struct exception_table_entry *entry; @@ -524,3 +528,227 @@ do_sigbus: if (regs->tstate & TSTATE_PRIV) goto handle_kernel_fault; } + +#ifdef CONFIG_E90S + +extern volatile int pci_poke_in_progress; +extern volatile int pci_poke_cpu; +extern volatile int pci_poke_faulted; + +#include +#include + +/* AFSR*/ +/* Bits Identifier Value Access mode Reset Comment */ +/*63:32 Reserved RO X Reserved */ +#define AFSR_L1_WAY (F << 28) /*RW X L1 way mask: L1 columns with error */ +/*27:25 Reserved RO X Reserved */ +#define AFSR_L2_ERR (3 << 23) /*RW X L2 error code */ + /* + * 00 ? No error + * 01 ? Data error + * 10 ? Address doesn't exist + * 11 ? Protocol error + */ +#define AFSR_L2_SIZE (3 << 21) /*RW X L2 size - for accesses with size */ +#define AFSR_L2_CFG (1 << 20) /*RW X L2 configuration access */ +#define AFSR_L2_OP (F << 16) /*RW L2 operation code */ +/*14:15 Reserved RO 0 Reserved */ +#define AFSR_ERR_IOCC (1 << 13) /*RW X Error in IOCC .*/ +#define AFSR_ERR_SC (1 << 12) /*RW X Error in SC */ +#define AFSR_ERR_IOMMU (1 << 11) /*RW X Error in IOMMU */ +#define AFSR_ERR_IC_SNOOP_MULTIHIT (1 << 10) /*RW X Multihit while snooping IC.*/ +#define AFSR_ERR_IC_SNOOP (1 << 9) /*RW X IC snoop error */ +#define AFSR_ERR_RB_DATA (1 << 8) /*RW X Error L1 repeat data read - error when reading data while repeating query from repeat buff. */ +#define AFSR_ERR_RB_TAG (1 << 7) /*RW X Error L1 repeat tag read - error when reading tags while repeating query from repeat buff. */ +#define AFSR_ERR_SNOOP_DATA (1 << 6) /*RW X Error L1 snoop data read */ +#define AFSR_ERR_SNOOP_TAG (1 << 5) /*RW X Error L1 snoop tag read */ +#define AFSR_ERR_CWB (1 << 4) /*RW X Error L1 write-back - error while reading data to evict from L1 */ +#define AFSR_ERR_L2_WR (1 << 3) /*RW X Error L2 write */ +#define AFSR_ERR_L2_RD (1 << 2) /*RW X Error L2 read */ +#define AFSR_OW (1 << 1) /*RW X Overwrite - Set by exception if FV bit is set already */ +#define AFSR_FV (1 << 0) /*RW 0 Fault Valid - set if register contents are valid. Is not set by fast_data_access_MMU_miss exception */ + + +#define L2_FAR ((2UL << 32) | (3 << 8)) +#define L2_FSR ((2UL << 32) | (4 << 8)) +static void __e90s_iommu_error_interrupt(char *str, int len, int iommu, + unsigned fsr, unsigned long far, unsigned src_id) +{ + int bus, slot, func; + char *err = fsr & IOMMU_FSR_MULTIHIT ? "Multihit" + : fsr & IOMMU_FSR_WRITE_PROTECTION ? "Write protection error" + : fsr & IOMMU_FSR_PAGE_MISS ? "Page miss" + : fsr & IOMMU_FSR_ADDR_RNG_VIOLATION ? "Address range violation" + : "Unknown error"; + char *s = fsr & IOMMU_FSR_MUTIPLE_ERR ? " (Mutiple error)" : ""; + if (src_id == ~0) + src_id = 0; + bus = (src_id >> 8) & 0xff; + slot = PCI_SLOT(src_id); + func = PCI_FUNC(src_id); + + snprintf(str, len, "\tIOMMU%d: %s%s at address 0x%lx\n" + "\t\t (device: %02x:%02x.%d: fsr: 0x%x)\n", + iommu, err, s, far, + bus, slot, func, fsr); +} + + +static void instruction_dump(char *str, int len, unsigned int *pc) +{ + int i; + if ((((unsigned long) pc) & 3)) + return; + len += snprintf(str + len, len, "Instruction DUMP:"); + for (i = -3; i < 6; i++) { + len += snprintf(str + len, len, + "%c%08x%c", i ? ' ' : '<', pc[i], i ? ' ' : '>'); + } + len += snprintf(str + len, len, "\n"); +} + +static void user_instruction_dump(char *str, int len, unsigned int __user *pc) +{ + int i; + unsigned int instr; + if ((((unsigned long) pc) & 3)) + return; + len += snprintf(str + len, len, "Instruction DUMP:"); + for (i = -3; i < 6; i++) { + instr = get_user_insn((unsigned long) pc + i * 4); + len += snprintf(str + len, len, + "%c%08x%c", i ? ' ' : '<', instr, i ? ' ' : '>'); + } + len += snprintf(str + len, len, "\n"); +} + +asmlinkage void do_async_data_error(struct pt_regs *regs) +{ + enum ctx_state prev_state = exception_enter(); + unsigned long asfr = readq_asi(0, ASI_AFSR); + unsigned long afar = readq_asi(0, ASI_AFAR); + unsigned long l2_fsr = readq_asi(L2_FSR, ASI_CONFIG); + unsigned long l2_far = readq_asi(L2_FAR, ASI_CONFIG); + char s[1024]; + int len, n, cpu = smp_processor_id(); + + char *err = + asfr & AFSR_ERR_IOCC ? "Error in IOCC" + : asfr & AFSR_ERR_SC ? "Error in SC" + : asfr & AFSR_ERR_IOMMU ? "Error in IOMMU" + : asfr & AFSR_ERR_IC_SNOOP_MULTIHIT ? "IC snoop multihit" + : asfr & AFSR_ERR_IC_SNOOP ? "Error IC snoop" + : asfr & AFSR_ERR_RB_DATA ? "Error L1 repeat data read" + : asfr & AFSR_ERR_RB_TAG ? "Error L1 repeat tag read" + : asfr & AFSR_ERR_SNOOP_DATA ? "Error L1 snoop data read" + : asfr & AFSR_ERR_SNOOP_TAG ? "Error L1 snoop tag read" + : asfr & AFSR_ERR_RB_DATA ? "Error L1 repeat data read" + : asfr & AFSR_ERR_RB_TAG ? "Error L1 repeat tag read" + : asfr & AFSR_ERR_SNOOP_DATA ? "Error L1 snoop data read" + : asfr & AFSR_ERR_SNOOP_TAG ? "Error L1 snoop tag read" + : asfr & AFSR_ERR_CWB ? "Error L1 write-back" + : asfr & AFSR_ERR_L2_WR ? "Error L2 write" + : asfr & AFSR_ERR_L2_RD ? "Error L2 read" + : "Unknown error"; + + if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { + writeq_asi(0, 0, ASI_AFSR); + writeq_asi(0, 0, ASI_AFAR); + writeq_asi(0, L2_FSR, ASI_CONFIG); + writeq_asi(0, L2_FAR, ASI_CONFIG); + pci_poke_faulted = 1; + regs->tpc += 4; + regs->tnpc = regs->tpc + 4; + return; + } + + len = snprintf(s, sizeof(s), "cpu %d:async data error: %s\n" + "\t(afsr: 0x%lx far: 0x%lx).\n" + "\tpc: %lx, L2 fsr: 0x%lx, far: 0x%lx\n", + cpu, err, asfr, afar, + regs->tpc, l2_fsr, l2_far); + + if (e90s_get_cpu_type() == E90S_CPU_R2000) + for_each_online_node(n) { + int reg = 1 << 25; + unsigned long cc_fsr = nbsr_readq(reg | (2 << 8), n); + unsigned long cc_far = nbsr_readq(reg | (1 << 8), n); + len += snprintf(s + len, sizeof(s) - len, + "\tCC%d.0 fsr: 0x%lx, far: 0x%lx\n", n, cc_fsr, cc_far); + reg |= 1 << 26; + cc_fsr = nbsr_readq(reg | (2 << 8), n); + cc_far = nbsr_readq(reg | (1 << 8), n); + len += snprintf(s + len, sizeof(s) - len, + "\tCC%d.1 fsr: 0x%lx, far: 0x%lx\n", n, cc_fsr, cc_far); + } + + if (asfr & AFSR_ERR_IOMMU) { + int i, base; + int iommu_nr = e90s_get_cpu_type() == E90S_CPU_R2000P ? 5 : 1; + for (base = i = 0; i < iommu_nr; i++, + base += i == 1 ? NBSR_IOMMU_1_OFFSET : + NBSR_IOMMU_2TO4_OFFSET) { + unsigned long far; + unsigned src_id; + int node = cpu_to_node(cpu); + unsigned fsr = nbsr_readl(NBSR_IOMMU_FSR + base, node); + + if (fsr == ~0 || !(fsr & IOMMU_FSR_ERR_MASK)) + continue; + src_id = nbsr_readl(NBSR_IOMMU_FAULT_SOURCE_ID + base, + node); + far = nbsr_readl(NBSR_IOMMU_FAH + base, node); + far = (far << 32) | + nbsr_readl(NBSR_IOMMU_FAL + base, node); + __e90s_iommu_error_interrupt(s + len, sizeof(s) - len, + i, fsr, far, src_id); + break; + } + } else { + show_regs(regs); + + if (regs->tstate & TSTATE_PRIV) { + instruction_dump(s + len, sizeof(s) - len, (unsigned int *)regs->tpc); + } else { + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + user_instruction_dump(s + len, sizeof(s) - len, + (unsigned int __user *) regs->tpc); + writeq_asi(0, 0, ASI_AFSR); + writeq_asi(0, 0, ASI_AFAR); + writeq_asi(0, L2_FSR, ASI_CONFIG); + writeq_asi(0, L2_FAR, ASI_CONFIG); + /*goto kill_user; FIXME: after bug 131913*/ + } + } + panic(s); +/*kill_user:*/ + die_if_kernel(s, regs); + exception_exit(prev_state); +} + +asmlinkage void do_e90s_data_access_error(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + char s[128]; + unsigned long fsr = readq_asi(TLB_SFSR, ASI_DMMU); + unsigned long far = readq_asi(DMMU_SFAR, ASI_DMMU); + sprintf(s, "cpu %d: data access error: dsfsr: 0x%lx dfar: 0x%lx", + cpu, fsr, far); + panic(s); +} + +asmlinkage void do_e90s_insn_access_error(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + char s[128]; + unsigned long fsr = readq_asi(TLB_SFSR, ASI_IMMU); + sprintf(s, "cpu %d: instruction access error: isfsr: 0x%lx", + cpu, fsr); + panic(s); +} + +#endif /*CONFIG_E90S*/ diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index f78793a06bbd..03550ac8135c 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -134,6 +134,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) { +#ifdef CONFIG_MCST + switch (shift) { + case HPAGE_16GB_SHIFT: + pte_val(entry) |= _PAGE_PUD_HUGE; + break; + case HPAGE_2GB_SHIFT: + case HPAGE_256MB_SHIFT: + case HPAGE_SHIFT: + pte_val(entry) |= _PAGE_PMD_HUGE; + break; + case HPAGE_64K_SHIFT: + break; + default: + WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); + } +#endif return entry; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 28b9ffd85db0..e6f18456b195 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -235,6 +235,9 @@ static inline void set_dcache_dirty(struct page *page, int this_cpu) "or %%g1, %0, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" +#ifdef CONFIG_RMO + "membar #StoreLoad | #StoreStore\n\t" +#endif /* CONFIG_RMO */ "bne,pn %%xcc, 1b\n\t" " nop" : /* no outputs */ @@ -256,6 +259,9 @@ static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) " andn %%g7, %1, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" +#ifdef CONFIG_RMO + "membar #StoreLoad | #StoreStore\n\t" +#endif /* CONFIG_RMO */ "bne,pn %%xcc, 1b\n\t" " nop\n" "2:" @@ -433,7 +439,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * if (!pte_accessible(mm, pte)) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); is_huge_tsb = false; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -469,7 +475,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) @@ -769,7 +775,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end) EXPORT_SYMBOL(__flush_dcache_range); /* get_new_mmu_context() uses "cache + 1". */ -DEFINE_SPINLOCK(ctx_alloc_lock); +DEFINE_RAW_SPINLOCK(ctx_alloc_lock); unsigned long tlb_context_cache = CTX_FIRST_VERSION; #define MAX_CTX_NR (1UL << CTX_NR_BITS) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) @@ -822,7 +828,7 @@ static void mmu_context_wrap(void) } } } - + /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * @@ -838,7 +844,7 @@ void get_new_mmu_context(struct mm_struct *mm) unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; - spin_lock(&ctx_alloc_lock); + raw_spin_lock(&ctx_alloc_lock); retry: /* wrap might have happened, test again if our context became valid */ if (unlikely(CTX_VALID(mm->context))) @@ -860,7 +866,7 @@ retry: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; out: - spin_unlock(&ctx_alloc_lock); + raw_spin_unlock(&ctx_alloc_lock); } static int numa_enabled = 1; @@ -3006,10 +3012,9 @@ void hugetlb_setup(struct pt_regs *regs) * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { - bool need_context_reload = false; unsigned long ctx; - spin_lock_irq(&ctx_alloc_lock); + raw_spin_lock(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; @@ -3028,29 +3033,26 @@ void hugetlb_setup(struct pt_regs *regs) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - need_context_reload = true; - } - spin_unlock_irq(&ctx_alloc_lock); - - if (need_context_reload) on_each_cpu(context_reload, mm, 0); + } + raw_spin_unlock(&ctx_alloc_lock); } } #endif static struct resource code_resource = { .name = "Kernel code", - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM + .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource data_resource = { .name = "Kernel data", - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM + .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource bss_resource = { .name = "Kernel bss", - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM + .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static inline resource_size_t compute_kern_paddr(void *addr) diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h index d920a75b5f14..2f329ede84df 100644 --- a/arch/sparc/mm/init_64.h +++ b/arch/sparc/mm/init_64.h @@ -24,7 +24,11 @@ struct linux_prom_translation { }; /* Exported for kernel TLB miss handling in ktlb.S */ +#ifdef CONFIG_E90S +extern struct linux_prom_translation prom_trans[64]; +#else extern struct linux_prom_translation prom_trans[512]; +#endif extern unsigned int prom_trans_ents; /* Exported for SMP bootup purposes. */ diff --git a/arch/sparc/mm/init_e90s.c b/arch/sparc/mm/init_e90s.c new file mode 100644 index 000000000000..45ed74b9e9f3 --- /dev/null +++ b/arch/sparc/mm/init_e90s.c @@ -0,0 +1,2153 @@ +/* + * arch/sparc64/mm/init.c + * + * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "init_64.h" + +unsigned long kern_linear_pte_xor[4] __read_mostly; + +#ifndef CONFIG_DEBUG_PAGEALLOC +/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. + * Space is allocated for this right after the trap table in + * arch/sparc64/kernel/head.S + */ +extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; +#endif +extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; + +static unsigned long cpu_pgsz_mask; + +/* Kernel physical address base and size in bytes. */ +unsigned long kern_base __read_mostly; +unsigned long kern_size __read_mostly; + +/* Initial ramdisk setup */ +extern unsigned long sparc_ramdisk_image64; +extern unsigned int sparc_ramdisk_image; +extern unsigned int sparc_ramdisk_size; + +struct page *mem_map_zero __read_mostly; +EXPORT_SYMBOL(mem_map_zero); + +unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; + +unsigned long sparc64_kern_pri_context __read_mostly; +unsigned long sparc64_kern_pri_nuc_bits __read_mostly; +unsigned long sparc64_kern_sec_context __read_mostly; + +int num_kernel_image_mappings; + +#ifdef CONFIG_DEBUG_DCFLUSH +atomic_t dcpage_flushes = ATOMIC_INIT(0); +#ifdef CONFIG_SMP +atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); +#endif +#endif + +inline void flush_dcache_page_impl(struct page *page) +{ + BUG_ON(tlb_type == hypervisor); +#ifdef CONFIG_DEBUG_DCFLUSH + atomic_inc(&dcpage_flushes); +#endif + +#ifdef DCACHE_ALIASING_POSSIBLE + __flush_dcache_page(page_address(page), + ((tlb_type == spitfire) && + page_mapping_file(page) != NULL)); +#else + if (page_mapping_file(page) != NULL && + tlb_type == spitfire) + __flush_icache_page(__pa(page_address(page))); +#endif +} + +#define PG_dcache_dirty PG_arch_1 +#define PG_dcache_cpu_shift 32UL +#define PG_dcache_cpu_mask \ + ((1UL<flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) + +static inline void set_dcache_dirty(struct page *page, int this_cpu) +{ + unsigned long mask = this_cpu; + unsigned long non_cpu_bits; + + non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); + mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); + + __asm__ __volatile__("1:\n\t" + "ldx [%2], %%g7\n\t" + "and %%g7, %1, %%g1\n\t" + "or %%g1, %0, %%g1\n\t" + "casx [%2], %%g7, %%g1\n\t" + "cmp %%g7, %%g1\n\t" +#ifdef CONFIG_RMO + "membar #StoreLoad | #StoreStore\n\t" +#endif /* CONFIG_RMO */ + "bne,pn %%xcc, 1b\n\t" + " nop" + : /* no outputs */ + : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) + : "g1", "g7"); +} + +static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) +{ + unsigned long mask = (1UL << PG_dcache_dirty); + + __asm__ __volatile__("! test_and_clear_dcache_dirty\n" + "1:\n\t" + "ldx [%2], %%g7\n\t" + "srlx %%g7, %4, %%g1\n\t" + "and %%g1, %3, %%g1\n\t" + "cmp %%g1, %0\n\t" + "bne,pn %%icc, 2f\n\t" + " andn %%g7, %1, %%g1\n\t" + "casx [%2], %%g7, %%g1\n\t" + "cmp %%g7, %%g1\n\t" +#ifdef CONFIG_RMO + "membar #StoreLoad | #StoreStore\n\t" +#endif /* CONFIG_RMO */ + "bne,pn %%xcc, 1b\n\t" + " nop\n" + "2:" + : /* no outputs */ + : "r" (cpu), "r" (mask), "r" (&page->flags), + "i" (PG_dcache_cpu_mask), + "i" (PG_dcache_cpu_shift) + : "g1", "g7"); +} + +static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) +{ + unsigned long tsb_addr = (unsigned long) ent; + + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + tsb_addr = __pa(tsb_addr); + + __tsb_insert(tsb_addr, tag, pte); +} + +unsigned long _PAGE_ALL_SZ_BITS __read_mostly; + +static void flush_dcache(unsigned long pfn) +{ + struct page *page; + + page = pfn_to_page(pfn); + if (page) { + unsigned long pg_flags; + + pg_flags = page->flags; + if (pg_flags & (1UL << PG_dcache_dirty)) { + int cpu = ((pg_flags >> PG_dcache_cpu_shift) & + PG_dcache_cpu_mask); + int this_cpu = get_cpu(); + + /* This is just to optimize away some function calls + * in the SMP case. + */ + if (cpu == this_cpu) + flush_dcache_page_impl(page); + else + smp_flush_dcache_page_impl(page, cpu); + + clear_dcache_dirty_cpu(page, cpu); + + put_cpu(); + } + } +} + +/* mm->context.lock must be held */ +static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index, + unsigned long tsb_hash_shift, unsigned long address, + unsigned long tte) +{ + struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; + unsigned long tag; + + if (unlikely(!tsb)) + return; + + tsb += ((address >> tsb_hash_shift) & + (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); + tag = (address >> 22UL); + tsb_insert(tsb, tag, tte); +} + +#ifdef CONFIG_HUGETLB_PAGE +static void __init add_huge_page_size(unsigned long size) +{ + unsigned int order; + + if (size_to_hstate(size)) + return; + + order = ilog2(size) - PAGE_SHIFT; + hugetlb_add_hstate(order); +} + +static int __init hugetlbpage_init(void) +{ + add_huge_page_size(1UL << HPAGE_64K_SHIFT); + add_huge_page_size(1UL << HPAGE_SHIFT); + add_huge_page_size(1UL << HPAGE_256MB_SHIFT); + add_huge_page_size(1UL << HPAGE_2GB_SHIFT); + + return 0; +} + +arch_initcall(hugetlbpage_init); + +static void __init pud_huge_patch(void) +{ + struct pud_huge_patch_entry *p; + unsigned long addr; + + p = &__pud_huge_patch; + addr = p->addr; + *(unsigned int *)addr = p->insn; + + __asm__ __volatile__("flush %0" : : "r" (addr)); +} + +static int __init setup_hugepagesz(char *string) +{ + unsigned long long hugepage_size; + unsigned int hugepage_shift; + unsigned short hv_pgsz_idx; + unsigned int hv_pgsz_mask; + int rc = 0; + + hugepage_size = memparse(string, &string); + hugepage_shift = ilog2(hugepage_size); + + switch (hugepage_shift) { + case HPAGE_16GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_16GB; + hv_pgsz_idx = HV_PGSZ_IDX_16GB; + pud_huge_patch(); + break; + case HPAGE_2GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_2GB; + hv_pgsz_idx = HV_PGSZ_IDX_2GB; + break; + case HPAGE_256MB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_256MB; + hv_pgsz_idx = HV_PGSZ_IDX_256MB; + break; + case HPAGE_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_4MB; + hv_pgsz_idx = HV_PGSZ_IDX_4MB; + break; + case HPAGE_64K_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_64K; + hv_pgsz_idx = HV_PGSZ_IDX_64K; + break; + default: + hv_pgsz_mask = 0; + } + + if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { + hugetlb_bad_size(); + pr_err("hugepagesz=%llu not supported by MMU.\n", + hugepage_size); + goto out; + } + + add_huge_page_size(hugepage_size); + rc = 1; + +out: + return rc; +} +__setup("hugepagesz=", setup_hugepagesz); +#endif /* CONFIG_HUGETLB_PAGE */ + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + struct mm_struct *mm; + unsigned long flags; + bool is_huge_tsb; + pte_t pte = *ptep; + + if (tlb_type != hypervisor) { + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) + flush_dcache(pfn); + } + + mm = vma->vm_mm; + + /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ + if (!pte_accessible(mm, pte)) + return; + + raw_spin_lock_irqsave(&mm->context.lock, flags); + + is_huge_tsb = false; +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { + unsigned long hugepage_size = PAGE_SIZE; + + if (is_vm_hugetlb_page(vma)) + hugepage_size = huge_page_size(hstate_vma(vma)); + + if (hugepage_size >= PUD_SIZE) { + unsigned long mask = 0x1ffc00000UL; + + /* Transfer bits [32:22] from address to resolve + * at 4M granularity. + */ + pte_val(pte) &= ~mask; + pte_val(pte) |= (address & mask); + } else if (hugepage_size >= PMD_SIZE) { + /* We are fabricating 8MB pages using 4MB + * real hw pages. + */ + pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); + } + + if (hugepage_size >= PMD_SIZE) { + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, + REAL_HPAGE_SHIFT, address, pte_val(pte)); + is_huge_tsb = true; + } + } +#endif + if (!is_huge_tsb) + __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, + address, pte_val(pte)); + + raw_spin_unlock_irqrestore(&mm->context.lock, flags); +} + +void flush_dcache_page(struct page *page) +{ + struct address_space *mapping; + int this_cpu; + + if (tlb_type == hypervisor) + return; + + /* Do not bother with the expensive D-cache flush if it + * is merely the zero page. The 'bigcore' testcase in GDB + * causes this case to run millions of times. + */ + if (page == ZERO_PAGE(0)) + return; + + this_cpu = get_cpu(); + + mapping = page_mapping_file(page); + if (mapping && !mapping_mapped(mapping)) { + int dirty = test_bit(PG_dcache_dirty, &page->flags); + if (dirty) { + int dirty_cpu = dcache_dirty_cpu(page); + + if (dirty_cpu == this_cpu) + goto out; + smp_flush_dcache_page_impl(page, dirty_cpu); + } + set_dcache_dirty(page, this_cpu); + } else { + /* We could delay the flush for the !page_mapping + * case too. But that case is for exec env/arg + * pages and those are %99 certainly going to get + * faulted into the tlb (and thus flushed) anyways. + */ + flush_dcache_page_impl(page); + } + +out: + put_cpu(); +} +EXPORT_SYMBOL(flush_dcache_page); + +void __kprobes flush_icache_range(unsigned long start, unsigned long end) +{ + /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ + if (tlb_type == spitfire) { + unsigned long kaddr; + + /* This code only runs on Spitfire cpus so this is + * why we can assume _PAGE_PADDR_4U. + */ + for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { + unsigned long paddr, mask = _PAGE_PADDR_4U; + + if (kaddr >= PAGE_OFFSET) + paddr = kaddr & mask; + else { + pgd_t *pgdp = pgd_offset_k(kaddr); + pud_t *pudp = pud_offset(pgdp, kaddr); + pmd_t *pmdp = pmd_offset(pudp, kaddr); + pte_t *ptep = pte_offset_kernel(pmdp, kaddr); + + paddr = pte_val(*ptep) & mask; + } + __flush_icache_page(paddr); + } + } +} +EXPORT_SYMBOL(flush_icache_range); + +void mmu_info(struct seq_file *m) +{ + static const char *pgsz_strings[] = { + "8K", "64K", "512K", "4MB", "32MB", + "256MB", "2GB", "16GB", + }; + int i, printed; + + if (tlb_type == cheetah) + seq_printf(m, "MMU Type\t: Cheetah\n"); + else if (tlb_type == cheetah_plus) + seq_printf(m, "MMU Type\t: Cheetah+\n"); + else if (tlb_type == spitfire) + seq_printf(m, "MMU Type\t: Spitfire\n"); + else if (tlb_type == hypervisor) + seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); + else + seq_printf(m, "MMU Type\t: ???\n"); + + seq_printf(m, "MMU PGSZs\t: "); + printed = 0; + for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) { + if (cpu_pgsz_mask & (1UL << i)) { + seq_printf(m, "%s%s", + printed ? "," : "", pgsz_strings[i]); + printed++; + } + } + seq_putc(m, '\n'); + +#ifdef CONFIG_DEBUG_DCFLUSH + seq_printf(m, "DCPageFlushes\t: %d\n", + atomic_read(&dcpage_flushes)); +#ifdef CONFIG_SMP + seq_printf(m, "DCPageFlushesXC\t: %d\n", + atomic_read(&dcpage_flushes_xcall)); +#endif /* CONFIG_SMP */ +#endif /* CONFIG_DEBUG_DCFLUSH */ +} + +struct linux_prom_translation prom_trans[64] __read_mostly; +unsigned int prom_trans_ents __read_mostly; + +unsigned long kern_locked_tte_data; + +static int cmp_ptrans(const void *a, const void *b) +{ + const struct linux_prom_translation *x = a, *y = b; + + if (x->virt > y->virt) + return 1; + if (x->virt < y->virt) + return -1; + return 0; +} + +#define BOOT_PAGE_SZ 0x400000 +static void __init read_obp_translations(void) +{ + int i, j; + struct boot_info *b = &bootblock->info; + for (i = j = 0; i < b->num_of_busy; i++) { + int n = b->busy[i].size / BOOT_PAGE_SZ, k; + for (k = 0; k < n; k++) { + if (WARN_ON(j + k == sizeof(prom_trans))) + break; + prom_trans[j + k].virt = + b->busy[i].address + k * BOOT_PAGE_SZ; + prom_trans[j + k].size = BOOT_PAGE_SZ; + prom_trans[j + k].data = prom_trans[j].virt + | _PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U + | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U; + } + j += k; + } + prom_trans_ents = j; + sort(prom_trans, j, sizeof(prom_trans[0]), cmp_ptrans, NULL); + for (i = 0; i < prom_trans_ents; i++) { + unsigned long low_phys = prom_trans[0].virt; + /*consider bootloader maps itself linearly */ + prom_trans[i].virt = LOW_OBP_ADDRESS - low_phys + + prom_trans[i].virt; + } +} + +static unsigned long kern_large_tte(unsigned long paddr); + +static void __init remap_kernel(void) +{ + unsigned long phys_page, tte_vaddr, tte_data; + int tlb_ent = sparc64_highest_locked_tlbent(); + + tte_vaddr = (unsigned long) KERNBASE; + phys_page = bootblock->info.kernel_base; + tte_data = kern_large_tte(phys_page); + + kern_locked_tte_data = tte_data; + + sparc64_highest_unlocked_tlb_ent = tlb_ent - num_kernel_image_mappings; + + if (tlb_type == cheetah_plus) { + sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | + CTX_CHEETAH_PLUS_NUC); + sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; + sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; + } +} + + +static void __init inherit_prom_mappings(void) +{ + /* Now fixup OBP's idea about where we really are mapped. */ + printk("Remapping the kernel... "); + remap_kernel(); + printk("done.\n"); +} + +void prom_world(int enter) +{ + if (!enter) + set_fs(get_fs()); + + __asm__ __volatile__("flushw"); +} + +void __flush_dcache_range(unsigned long start, unsigned long end) +{ + unsigned long va; + + if (tlb_type == spitfire) { + int n = 0; + + for (va = start; va < end; va += 32) { + spitfire_put_dcache_tag(va & 0x3fe0, 0x0); + if (++n >= 512) + break; + } + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { + start = __pa(start); + end = __pa(end); + for (va = start; va < end; va += 32) + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (va), + "i" (ASI_DCACHE_INVALIDATE)); + } +} +EXPORT_SYMBOL(__flush_dcache_range); + +/* get_new_mmu_context() uses "cache + 1". */ +DEFINE_RAW_SPINLOCK(ctx_alloc_lock); +unsigned long tlb_context_cache = CTX_FIRST_VERSION; +#define MAX_CTX_NR (1UL << CTX_NR_BITS) +#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) +DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); +DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; + +static void mmu_context_wrap(void) +{ + unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; + unsigned long new_ver, new_ctx, old_ctx; + struct mm_struct *mm; + int cpu; + + bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); + + /* Reserve kernel context */ + set_bit(0, mmu_context_bmap); + + new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; + if (unlikely(new_ver == 0)) + new_ver = CTX_FIRST_VERSION; + tlb_context_cache = new_ver; + + /* + * Make sure that any new mm that are added into per_cpu_secondary_mm, + * are going to go through get_new_mmu_context() path. + */ + mb(); + + /* + * Updated versions to current on those CPUs that had valid secondary + * contexts + */ + for_each_online_cpu(cpu) { + /* + * If a new mm is stored after we took this mm from the array, + * it will go into get_new_mmu_context() path, because we + * already bumped the version in tlb_context_cache. + */ + mm = per_cpu(per_cpu_secondary_mm, cpu); + + if (unlikely(!mm || mm == &init_mm)) + continue; + + old_ctx = mm->context.sparc64_ctx_val; + if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { + new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; + set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); + mm->context.sparc64_ctx_val = new_ctx; + } + } +} + +/* Caller does TLB context flushing on local CPU if necessary. + * The caller also ensures that CTX_VALID(mm->context) is false. + * + * We must be careful about boundary cases so that we never + * let the user have CTX 0 (nucleus) or we ever use a CTX + * version of zero (and thus NO_CONTEXT would not be caught + * by version mis-match tests in mmu_context.h). + * + * Always invoked with interrupts disabled. + */ +void get_new_mmu_context(struct mm_struct *mm) +{ + unsigned long ctx, new_ctx; + unsigned long orig_pgsz_bits; + + raw_spin_lock(&ctx_alloc_lock); +retry: + /* wrap might have happened, test again if our context became valid */ + if (unlikely(CTX_VALID(mm->context))) + goto out; + orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); + ctx = (tlb_context_cache + 1) & CTX_NR_MASK; + new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); + if (new_ctx >= (1 << CTX_NR_BITS)) { + new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); + if (new_ctx >= ctx) { + mmu_context_wrap(); + goto retry; + } + } + if (mm->context.sparc64_ctx_val) + cpumask_clear(mm_cpumask(mm)); + mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); + new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); + tlb_context_cache = new_ctx; + mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; +out: + raw_spin_unlock(&ctx_alloc_lock); +} + +#ifdef CONFIG_NUMA +static int numa_enabled = 1; +#else +static int numa_enabled = 0; +#endif +static int numa_debug; + +static int __init early_numa(char *p) +{ + if (!p) + return 0; + + if (strstr(p, "off")) + numa_enabled = 0; + + if (strstr(p, "debug")) + numa_debug = 1; + + return 0; +} +early_param("numa", early_numa); + +#define numadbg(f, a...) \ +do { if (numa_debug) \ + printk(KERN_INFO f, ## a); \ +} while (0) + +static void __init find_ramdisk(void) +{ +#ifdef CONFIG_BLK_DEV_INITRD + struct boot_info *bi = &bootblock->info; + if (bi->ramdisk_size) { + numadbg("Found ramdisk at physical address 0x%llx, size %llx\n", + bi->ramdisk_base, bi->ramdisk_size); + initrd_start = bi->ramdisk_base; + initrd_end = bi->ramdisk_base + bi->ramdisk_size; + memblock_reserve(initrd_start, bi->ramdisk_size); + initrd_start += PAGE_OFFSET; + initrd_end += PAGE_OFFSET; + } +#endif +} + +#ifdef CONFIG_NUMA +int numa_cpu_lookup_table[NR_CPUS]; +EXPORT_SYMBOL(numa_cpu_lookup_table); + +cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; +EXPORT_SYMBOL(numa_cpumask_lookup_table); + +struct pglist_data *node_data[MAX_NUMNODES]; +EXPORT_SYMBOL(node_data); +#endif + +/* This must be invoked after performing all of the necessary + * memblock_set_node() calls for 'nid'. We need to be able to get + * correct data from get_pfn_range_for_nid(). + */ +static void __init allocate_node_data(int nid) +{ + struct pglist_data *p; + unsigned long start_pfn, end_pfn; +#ifdef CONFIG_NEED_MULTIPLE_NODES + void *a; + + a = memblock_alloc_node(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); + if (!a) { + prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); + prom_halt(); + } + NODE_DATA(nid) = a; + memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); + + NODE_DATA(nid)->node_id = nid; +#endif + + p = NODE_DATA(nid); + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + p->node_start_pfn = start_pfn; + p->node_spanned_pages = end_pfn - start_pfn; +} + +static int __init bootmem_init_numa(void) +{ +#ifdef CONFIG_NUMA + unsigned long cpu, node; + for (cpu = 0; cpu < ARRAY_SIZE(numa_cpu_lookup_table); cpu++) { + int id; + if (!cpu_present(cpu)) { + numa_cpu_lookup_table[cpu] = NUMA_NO_NODE; + continue; + } + id = cpu_physical_id(cpu); + node = id / e90s_max_nr_node_cpus(); + if (!node_online(node)) + node = 0; + numa_cpu_lookup_table[cpu] = node; + cpumask_set_cpu(cpu, &numa_cpumask_lookup_table[node]); + } +#endif + return 0; +} + +static unsigned long __init bootmem_init(void) +{ + boot_info_t *bi = &bootblock->info; + int i, nodes = numa_enabled ? L_MAX_MEM_NUMNODES : 1; + unsigned long end_pfn; + /* setup resource limits */ + ioport_resource.start = BASE_PCIIO; + + memblock_reserve(kern_base, kern_size); + + for (i = 0; i < bi->num_of_busy; i++) + memblock_reserve(bi->busy[i].address, bi->busy[i].size); + + find_ramdisk(); + for (i = 0; i < nodes; i++) { + int j; + node_banks_t *boot_nodes = bi->nodes_mem + i; + bank_info_t *banks = boot_nodes->banks; + if (!(bi->nodes_map & (1 << i))) + continue; + node_set_online(i); + for (j = 0; j < L_MAX_NODE_PHYS_BANKS; j++) { + bank_info_t *b = &banks[j]; + if (!b->size) + break; /* no more banks on i */ + memblock_add_node(b->address, b->size, i); + } + } + memblock_enforce_memory_limit(cmdline_memory_size); + + memblock_allow_resize(); + end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; + max_pfn = max_low_pfn = end_pfn; + min_low_pfn = memblock_start_of_DRAM() >> PAGE_SHIFT; + + for (i = 0; i < nodes; i++) { + if (!(bi->nodes_map & (1 << i))) + continue; + allocate_node_data(i); + } + bootmem_init_numa(); + + /* Dump memblock with node info. */ + memblock_dump_all(); + + sparse_memory_present_with_active_regions(MAX_NUMNODES); + sparse_init(); + + return end_pfn; +} + +static unsigned long max_phys_bits = 40; + +bool kern_addr_valid(unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if ((long)addr < 0L) { + unsigned long pa = __pa(addr); + + if ((pa >> max_phys_bits) != 0UL) + return false; + + return pfn_valid(pa >> PAGE_SHIFT); + } + + if (addr >= (unsigned long) KERNBASE && + addr < (unsigned long)&_end) + return true; + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return 0; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return 0; + + if (pud_large(*pud)) + return pfn_valid(pud_pfn(*pud)); + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return 0; + + if (pmd_large(*pmd)) + return pfn_valid(pmd_pfn(*pmd)); + + pte = pte_offset_kernel(pmd, addr); + if (pte_none(*pte)) + return 0; + + return pfn_valid(pte_pfn(*pte)); +} +EXPORT_SYMBOL(kern_addr_valid); + +static unsigned long __ref kernel_map_hugepud(unsigned long vstart, + unsigned long vend, + pud_t *pud) +{ + const unsigned long mask16gb = (1UL << 34) - 1UL; + u64 pte_val = vstart; + + /* Each PUD is 8GB */ + if ((vstart & mask16gb) || + (vend - vstart <= mask16gb)) { + pte_val ^= kern_linear_pte_xor[2]; + pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; + + return vstart + PUD_SIZE; + } + + pte_val ^= kern_linear_pte_xor[3]; + pte_val |= _PAGE_PUD_HUGE; + + vend = vstart + mask16gb + 1UL; + while (vstart < vend) { + pud_val(*pud) = pte_val; + + pte_val += PUD_SIZE; + vstart += PUD_SIZE; + pud++; + } + return vstart; +} + +static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, + bool guard) +{ + if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) + return true; + + return false; +} + +static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, + unsigned long vend, + pmd_t *pmd) +{ + const unsigned long mask256mb = (1UL << 28) - 1UL; + const unsigned long mask2gb = (1UL << 31) - 1UL; + u64 pte_val = vstart; + + /* Each PMD is 8MB */ + if ((vstart & mask256mb) || + (vend - vstart <= mask256mb)) { + pte_val ^= kern_linear_pte_xor[0]; + pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; + + return vstart + PMD_SIZE; + } + + if ((vstart & mask2gb) || + (vend - vstart <= mask2gb)) { + pte_val ^= kern_linear_pte_xor[1]; + pte_val |= _PAGE_PMD_HUGE; + vend = vstart + mask256mb + 1UL; + } else { + pte_val ^= kern_linear_pte_xor[2]; + pte_val |= _PAGE_PMD_HUGE; + vend = vstart + mask2gb + 1UL; + } + + while (vstart < vend) { + pmd_val(*pmd) = pte_val; + + pte_val += PMD_SIZE; + vstart += PMD_SIZE; + pmd++; + } + + return vstart; +} + +static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, + bool guard) +{ + if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) + return true; + + return false; +} + +static unsigned long __ref kernel_map_range(unsigned long pstart, + unsigned long pend, pgprot_t prot, + bool use_huge) +{ + unsigned long vstart = PAGE_OFFSET + pstart; + unsigned long vend = PAGE_OFFSET + pend; + unsigned long alloc_bytes = 0UL; + + if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { + prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", + vstart, vend); + prom_halt(); + } + + while (vstart < vend) { + unsigned long this_end, paddr = __pa(vstart); + pgd_t *pgd = pgd_offset_k(vstart); + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (pgd_none(*pgd)) { + pud_t *new; + + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, + PAGE_SIZE); + if (!new) + goto err_alloc; + alloc_bytes += PAGE_SIZE; + pgd_populate(&init_mm, pgd, new); + } + pud = pud_offset(pgd, vstart); + if (pud_none(*pud)) { + pmd_t *new; + + if (kernel_can_map_hugepud(vstart, vend, use_huge)) { + vstart = kernel_map_hugepud(vstart, vend, pud); + continue; + } + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, + PAGE_SIZE); + if (!new) + goto err_alloc; + alloc_bytes += PAGE_SIZE; + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, vstart); + if (pmd_none(*pmd)) { + pte_t *new; + + if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { + vstart = kernel_map_hugepmd(vstart, vend, pmd); + continue; + } + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, + PAGE_SIZE); + if (!new) + goto err_alloc; + alloc_bytes += PAGE_SIZE; + pmd_populate_kernel(&init_mm, pmd, new); + } + + pte = pte_offset_kernel(pmd, vstart); + this_end = (vstart + PMD_SIZE) & PMD_MASK; + if (this_end > vend) + this_end = vend; + + while (vstart < this_end) { + pte_val(*pte) = (paddr | pgprot_val(prot)); + + vstart += PAGE_SIZE; + paddr += PAGE_SIZE; + pte++; + } + } + + return alloc_bytes; + +err_alloc: + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + return -ENOMEM; +} + +static void __init flush_all_kernel_tsbs(void) +{ + int i; + + for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { + struct tsb *ent = &swapper_tsb[i]; + + ent->tag = (1UL << TSB_TAG_INVALID_BIT); + } +#ifndef CONFIG_DEBUG_PAGEALLOC + for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { + struct tsb *ent = &swapper_4m_tsb[i]; + + ent->tag = (1UL << TSB_TAG_INVALID_BIT); + } +#endif +} + +extern unsigned int kvmap_linear_patch[1]; + +static void __init kernel_physical_mapping_init(void) +{ + unsigned long mem_alloced = 0UL; + bool use_huge = true; + unsigned long start_pfn, end_pfn; + int i, nid; + +#ifdef CONFIG_DEBUG_PAGEALLOC + use_huge = false; +#endif + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { + mem_alloced += kernel_map_range(start_pfn << PAGE_SHIFT, + end_pfn << PAGE_SHIFT, PAGE_KERNEL, use_huge); + } + + printk("Allocated %ld bytes for kernel page tables.\n", + mem_alloced); + + kvmap_linear_patch[0] = 0x01000000; /* nop */ + flushi(&kvmap_linear_patch[0]); + + flush_all_kernel_tsbs(); + + __flush_tlb_all(); +} + +#ifdef CONFIG_DEBUG_PAGEALLOC +void __kernel_map_pages(struct page *page, int numpages, int enable) +{ + unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; + unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); + + kernel_map_range(phys_start, phys_end, + (enable ? PAGE_KERNEL : __pgprot(0)), false); + + flush_tsb_kernel_range(PAGE_OFFSET + phys_start, + PAGE_OFFSET + phys_end); + + /* we should perform an IPI and flush all tlbs, + * but that can deadlock->flush only current cpu. + */ + __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, + PAGE_OFFSET + phys_end); +} +#endif + +unsigned long __init find_ecache_flush_span(unsigned long size) +{ + return ~0UL; +} + +/* e90s support a full 64-bit virtual + * address, so we can use all that our page tables + * support. + */ +unsigned long sparc64_va_hole_top = 0xfff0000000000000UL; +unsigned long sparc64_va_hole_bottom = 0x0010000000000000UL; + +unsigned long PAGE_OFFSET = 0xfff0000000000000UL; +EXPORT_SYMBOL(PAGE_OFFSET); + +unsigned long VMALLOC_END = ((0x0010000000000000UL >> 1) + + (0x0010000000000000UL >> 2)); +EXPORT_SYMBOL(VMALLOC_END); + +static void __init tsb_phys_patch(void) +{ + struct tsb_ldquad_phys_patch_entry *pquad; + struct tsb_phys_patch_entry *p; + + pquad = &__tsb_ldquad_phys_patch; + while (pquad < &__tsb_ldquad_phys_patch_end) { + unsigned long addr = pquad->addr; + + if (tlb_type == hypervisor) + *(unsigned int *) addr = pquad->sun4v_insn; + else + *(unsigned int *) addr = pquad->sun4u_insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + pquad++; + } + + p = &__tsb_phys_patch; + while (p < &__tsb_phys_patch_end) { + unsigned long addr = p->addr; + + *(unsigned int *) addr = p->insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + p++; + } +} + +/* Don't mark as init, we give this to the Hypervisor. */ +#ifndef CONFIG_DEBUG_PAGEALLOC +#define NUM_KTSB_DESCR 2 +#else +#define NUM_KTSB_DESCR 1 +#endif + +/* The swapper TSBs are loaded with a base sequence of: + * + * sethi %uhi(SYMBOL), REG1 + * sethi %hi(SYMBOL), REG2 + * or REG1, %ulo(SYMBOL), REG1 + * or REG2, %lo(SYMBOL), REG2 + * sllx REG1, 32, REG1 + * or REG1, REG2, REG1 + * + * When we use physical addressing for the TSB accesses, we patch the + * first four instructions in the above sequence. + */ + +static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) +{ + unsigned long high_bits, low_bits; + + high_bits = (pa >> 32) & 0xffffffff; + low_bits = (pa >> 0) & 0xffffffff; + + while (start < end) { + unsigned int *ia = (unsigned int *)(unsigned long)*start; + + ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10); + __asm__ __volatile__("flush %0" : : "r" (ia)); + + ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10); + __asm__ __volatile__("flush %0" : : "r" (ia + 1)); + + ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff); + __asm__ __volatile__("flush %0" : : "r" (ia + 2)); + + ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff); + __asm__ __volatile__("flush %0" : : "r" (ia + 3)); + + start++; + } +} + +static void ktsb_phys_patch(void) +{ + extern unsigned int __swapper_tsb_phys_patch; + extern unsigned int __swapper_tsb_phys_patch_end; + unsigned long ktsb_pa; + + ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); + patch_one_ktsb_phys(&__swapper_tsb_phys_patch, + &__swapper_tsb_phys_patch_end, ktsb_pa); +#ifndef CONFIG_DEBUG_PAGEALLOC + { + extern unsigned int __swapper_4m_tsb_phys_patch; + extern unsigned int __swapper_4m_tsb_phys_patch_end; + ktsb_pa = (kern_base + + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); + patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, + &__swapper_4m_tsb_phys_patch_end, ktsb_pa); + } +#endif +} + +static void __init sun4u_linear_pte_xor_finalize(void) +{ +#ifndef CONFIG_DEBUG_PAGEALLOC + /* This is where we would add Panther support for + * 32MB and 256MB pages. + */ +#endif +} + +/* paging_init() sets up the page tables */ + +static unsigned long last_valid_pfn; + +static void sun4u_pgprot_init(void); +static void sun4v_pgprot_init(void); + +#define KERNEL_PAGE_SZ 0x400000 +void flush_locked_tte(void) +{ + int i; + unsigned long va; + for (i = num_kernel_image_mappings; i < 4; i++) { + va = KERNBASE + i * KERNEL_PAGE_SZ; + spitfire_flush_dtlb_nucleus_page(va); + spitfire_flush_itlb_nucleus_page(va); + } + + for (va = LOW_OBP_ADDRESS; va < HI_OBP_ADDRESS; va += KERNEL_PAGE_SZ) { + spitfire_flush_dtlb_nucleus_page(va); + spitfire_flush_itlb_nucleus_page(va); + } +} + +void __init paging_init(void) +{ + unsigned long end_pfn, shift; + unsigned long real_end; + + /* These build time checkes make sure that the dcache_dirty_cpu() + * page->flags usage will work. + * + * When a page gets marked as dcache-dirty, we store the + * cpu number starting at bit 32 in the page->flags. Also, + * functions like clear_dcache_dirty_cpu use the cpu mask + * in 13-bit signed-immediate instruction fields. + */ + + /* + * Page flags must not reach into upper 32 bits that are used + * for the cpu number + */ + BUILD_BUG_ON(NR_PAGEFLAGS > 32); + + /* + * The bit fields placed in the high range must not reach below + * the 32 bit boundary. Otherwise we cannot place the cpu field + * at the 32 bit boundary. + */ + BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + + ilog2(roundup_pow_of_two(NR_CPUS)) > 32); + + BUILD_BUG_ON(NR_CPUS > 4096); + + kern_base = bootblock->info.kernel_base; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + + /* Invalidate both kernel TSBs. */ + memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); +#ifndef CONFIG_DEBUG_PAGEALLOC + memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); +#endif + + if (tlb_type == hypervisor) + sun4v_pgprot_init(); + else + sun4u_pgprot_init(); + + if (tlb_type == cheetah_plus || + tlb_type == hypervisor) { + tsb_phys_patch(); + ktsb_phys_patch(); + } + + read_obp_translations(); + + set_bit(0, mmu_context_bmap); + + shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); + + real_end = (unsigned long)_end; + num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); + printk("Kernel: Using %d locked TLB entries for main kernel image.\n", + num_kernel_image_mappings); + + /* Set kernel pgd to upper alias so physical page computations + * work. + */ + init_mm.pgd += ((shift) / (sizeof(pgd_t))); + + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); + + inherit_prom_mappings(); + + { + unsigned long impl, ver; + + cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K | + HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB); + + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); + impl = ((ver >> 32) & 0xffff); + if (impl == PANTHER_IMPL) + cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB | + HV_PGSZ_MASK_256MB); + + sun4u_linear_pte_xor_finalize(); + } + + flush_locked_tte(); + + /* Flush the TLBs and the 4M TSB so that the updated linear + * pte XOR settings are realized for all mappings. + */ + __flush_tlb_all(); +#ifndef CONFIG_DEBUG_PAGEALLOC + memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); +#endif + __flush_tlb_all(); + + /* Setup bootmem... */ + last_valid_pfn = end_pfn = bootmem_init(); + + kernel_physical_mapping_init(); + + { + unsigned long max_zone_pfns[MAX_NR_ZONES]; + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + + max_zone_pfns[ZONE_NORMAL] = end_pfn; + + free_area_init_nodes(max_zone_pfns); + } + + printk("Booting Linux...\n"); +} + +static void __init register_page_bootmem_info(void) +{ +#ifdef CONFIG_NEED_MULTIPLE_NODES + int i; + + for_each_online_node(i) + if (NODE_DATA(i)->node_spanned_pages) + register_page_bootmem_info_node(NODE_DATA(i)); +#endif +} +void __init mem_init(void) +{ + high_memory = __va(last_valid_pfn << PAGE_SHIFT); + + memblock_free_all(); + + /* + * Must be done after boot memory is put on freelist, because here we + * might set fields in deferred struct pages that have not yet been + * initialized, and free_all_bootmem() initializes all the reserved + * deferred pages for us. + */ + register_page_bootmem_info(); + + /* + * Set up the zero page, mark it reserved, so that page count + * is not manipulated when freeing the page from user ptes. + */ + mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); + if (mem_map_zero == NULL) { + prom_printf("paging_init: Cannot alloc zero page.\n"); + prom_halt(); + } + mark_page_reserved(mem_map_zero); + + totalram_real_pages = get_num_physpages(); + + mem_init_print_info(NULL); + +} + +void free_initmem(void) +{ + unsigned long addr, initend; + int do_free = 1; + + /* If the physical memory maps were trimmed by kernel command + * line options, don't even try freeing this initmem stuff up. + * The kernel image could have been in the trimmed out region + * and if so the freeing below will free invalid page structs. + */ + if (cmdline_memory_size) + do_free = 0; + + /* + * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. + */ + addr = PAGE_ALIGN((unsigned long)(__init_begin)); + initend = (unsigned long)(__init_end) & PAGE_MASK; + for (; addr < initend; addr += PAGE_SIZE) { + unsigned long page; + + page = (addr + + ((unsigned long) __va(kern_base)) - + ((unsigned long) KERNBASE)); + memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); + + if (do_free) + free_reserved_page(virt_to_page(page)); + } +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); +} +#endif + +#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) +#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) +#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) +#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) + +#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_R) +#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) + +pgprot_t PAGE_KERNEL __read_mostly; +EXPORT_SYMBOL(PAGE_KERNEL); + +pgprot_t PAGE_KERNEL_LOCKED __read_mostly; +pgprot_t PAGE_COPY __read_mostly; + +pgprot_t PAGE_SHARED __read_mostly; +EXPORT_SYMBOL(PAGE_SHARED); + +unsigned long pg_iobits __read_mostly; + +unsigned long _PAGE_IE __read_mostly; +EXPORT_SYMBOL(_PAGE_IE); + +unsigned long _PAGE_E __read_mostly; +EXPORT_SYMBOL(_PAGE_E); + +unsigned long _PAGE_CACHE __read_mostly; +EXPORT_SYMBOL(_PAGE_CACHE); + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, + int node, struct vmem_altmap *altmap) +{ + unsigned long pte_base; + + pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | + _PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + pte_base |= _PAGE_PMD_HUGE; + + vstart = vstart & PMD_MASK; + vend = ALIGN(vend, PMD_SIZE); + for (; vstart < vend; vstart += PMD_SIZE) { + pgd_t *pgd = vmemmap_pgd_populate(vstart, node); + unsigned long pte; + pud_t *pud; + pmd_t *pmd; + + if (!pgd) + return -ENOMEM; + + pud = vmemmap_pud_populate(pgd, vstart, node); + if (!pud) + return -ENOMEM; + + pmd = pmd_offset(pud, vstart); + pte = pmd_val(*pmd); + if (!(pte & _PAGE_VALID)) { + void *block = vmemmap_alloc_block(PMD_SIZE, node); + + if (!block) + return -ENOMEM; + + pmd_val(*pmd) = pte_base | __pa(block); + } + } + + return 0; +} + +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) +{ +} +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + +static void prot_init_common(unsigned long page_none, + unsigned long page_shared, + unsigned long page_copy, + unsigned long page_readonly, + unsigned long page_exec_bit) +{ + PAGE_COPY = __pgprot(page_copy); + PAGE_SHARED = __pgprot(page_shared); + + protection_map[0x0] = __pgprot(page_none); + protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x4] = __pgprot(page_readonly); + protection_map[0x5] = __pgprot(page_readonly); + protection_map[0x6] = __pgprot(page_copy); + protection_map[0x7] = __pgprot(page_copy); + protection_map[0x8] = __pgprot(page_none); + protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xc] = __pgprot(page_readonly); + protection_map[0xd] = __pgprot(page_readonly); + protection_map[0xe] = __pgprot(page_shared); + protection_map[0xf] = __pgprot(page_shared); +} + +static void __init sun4u_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + int i; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U); + PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U | _PAGE_L_4U); + + _PAGE_IE = _PAGE_IE_4U; + _PAGE_E = _PAGE_E_4U; + _PAGE_CACHE = _PAGE_CACHE_4U; + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | + __ACCESS_BITS_4U | _PAGE_E_4U); + +#ifdef CONFIG_DEBUG_PAGEALLOC + kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; +#else + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ + PAGE_OFFSET; +#endif + kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + for (i = 1; i < 4; i++) + kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; + + _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | + _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | + _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); + +#ifdef CONFIG_NUMA_BALANCING + page_none = _PAGE_PROTNONE | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; +#else + page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; +#endif + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + + page_exec_bit = _PAGE_EXEC_4U; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +static void __init sun4v_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + int i; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | + _PAGE_CACHE_4V | _PAGE_P_4V | + __ACCESS_BITS_4V | __DIRTY_BITS_4V | + _PAGE_EXEC_4V); + PAGE_KERNEL_LOCKED = PAGE_KERNEL; + + _PAGE_IE = _PAGE_IE_4V; + _PAGE_E = _PAGE_E_4V; + _PAGE_CACHE = _PAGE_CACHE_4V; + +#ifdef CONFIG_DEBUG_PAGEALLOC + kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; +#else + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + PAGE_OFFSET; +#endif + kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + for (i = 1; i < 4; i++) + kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | + __ACCESS_BITS_4V | _PAGE_E_4V); + + _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | + _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | + _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | + _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); + + page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + + page_exec_bit = _PAGE_EXEC_4V; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +unsigned long pte_sz_bits(unsigned long sz) +{ + if (tlb_type == hypervisor) { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4V; + case 64 * 1024: + return _PAGE_SZ64K_4V; + case 512 * 1024: + return _PAGE_SZ512K_4V; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4V; + } + } else { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4U; + case 64 * 1024: + return _PAGE_SZ64K_4U; + case 512 * 1024: + return _PAGE_SZ512K_4U; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4U; + } + } +} + +pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) +{ + pte_t pte; + + pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); + pte_val(pte) |= (((unsigned long)space) << 32); + pte_val(pte) |= pte_sz_bits(page_size); + + return pte; +} + +static unsigned long kern_large_tte(unsigned long paddr) +{ + unsigned long val; + + val = (_PAGE_VALID | _PAGE_SZ4MB_4U | + _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | + _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); + if (tlb_type == hypervisor) + val = (_PAGE_VALID | _PAGE_SZ4MB_4V | + _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | + _PAGE_EXEC_4V | _PAGE_W_4V); + + return val | paddr; +} + +/* If not locked, zap it. */ +void __flush_tlb_all(void) +{ + unsigned long pstate; + int i; + + __asm__ __volatile__("flushw\n\t" + "rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); + if (tlb_type == spitfire) { + for (i = 0; i < 64; i++) { + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); + spitfire_put_dtlb_data(i, 0x0UL); + } + + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); + spitfire_put_itlb_data(i, 0x0UL); + } + } + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { + cheetah_flush_dtlb_all(); + cheetah_flush_itlb_all(); + } + __asm__ __volatile__("wrpr %0, 0, %%pstate" + : : "r" (pstate)); +} + +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); + pte_t *pte = NULL; + + if (page) + pte = (pte_t *) page_address(page); + + return pte; +} + +pgtable_t pte_alloc_one(struct mm_struct *mm) +{ + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return NULL; + if (!pgtable_pte_page_ctor(page)) { + free_unref_page(page); + return NULL; + } + return (pte_t *) page_address(page); +} + +void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static void __pte_free(pgtable_t pte) +{ + struct page *page = virt_to_page(pte); + + pgtable_pte_page_dtor(page); + __free_page(page); +} + +void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + __pte_free(pte); +} + +void pgtable_free(void *table, bool is_page) +{ + if (is_page) + __pte_free(table); + else + kmem_cache_free(pgtable_cache, table); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd) +{ + unsigned long pte, flags; + struct mm_struct *mm; + pmd_t entry = *pmd; + + if (!pmd_large(entry) || !pmd_young(entry)) + return; + + pte = pmd_val(entry); + + /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ + if (!(pte & _PAGE_VALID)) + return; + + /* We are fabricating 8MB pages using 4MB real hw pages. */ + pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); + + mm = vma->vm_mm; + + raw_spin_lock_irqsave(&mm->context.lock, flags); + + if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, + addr, pte); + + raw_spin_unlock_irqrestore(&mm->context.lock, flags); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +static void context_reload(void *__data) +{ + struct mm_struct *mm = __data; + + if (mm == current->mm) + load_secondary_context(mm); +} + +void hugetlb_setup(struct pt_regs *regs) +{ + struct mm_struct *mm = current->mm; + struct tsb_config *tp; + + if (faulthandler_disabled() || !mm) { + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (entry) { + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + return; + } + pr_alert("Unexpected HugeTLB setup in atomic context.\n"); + die_if_kernel("HugeTSB in atomic", regs); + } + + tp = &mm->context.tsb_block[MM_TSB_HUGE]; + if (likely(tp->tsb == NULL)) + tsb_grow(mm, MM_TSB_HUGE, 0); + + tsb_context_switch(mm); + smp_tsb_sync(mm); + + /* On UltraSPARC-III+ and later, configure the second half of + * the Data-TLB for huge pages. + */ + if (tlb_type == cheetah_plus) { + unsigned long ctx, flags; + bool need_context_reload = false; + + raw_spin_lock_irqsave(&ctx_alloc_lock, flags); + ctx = mm->context.sparc64_ctx_val; + ctx &= ~CTX_PGSZ_MASK; + ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; + ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; + + if (ctx != mm->context.sparc64_ctx_val) { + /* When changing the page size fields, we + * must perform a context flush so that no + * stale entries match. This flush must + * occur with the original context register + * settings. + */ + do_flush_tlb_mm(mm); + + /* Reload the context register of all processors + * also executing in this address space. + */ + mm->context.sparc64_ctx_val = ctx; + need_context_reload = true; + } + raw_spin_unlock_irqrestore(&ctx_alloc_lock, flags); + + if (need_context_reload) + on_each_cpu(context_reload, mm, 0); + } +} +#endif + +#ifdef CONFIG_SMP +#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range +#else +#define do_flush_tlb_kernel_range __flush_tlb_kernel_range +#endif + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { + if (start < LOW_OBP_ADDRESS) { + flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); + do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); + } + if (end > HI_OBP_ADDRESS) { + flush_tsb_kernel_range(HI_OBP_ADDRESS, end); + do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end); + } + } else { + flush_tsb_kernel_range(start, end); + do_flush_tlb_kernel_range(start, end); + } +} + + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_user_page(vto, vfrom, vaddr, to); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + + /* If this page has ADI enabled, copy over any ADI tags + * as well + */ + if (vma->vm_flags & VM_SPARC_ADI) { + unsigned long pfrom, pto, i, adi_tag; + + pfrom = page_to_phys(from); + pto = page_to_phys(to); + + for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (adi_tag) + : "r" (i), "i" (ASI_MCD_REAL)); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (adi_tag), "r" (pto), + "i" (ASI_MCD_REAL)); + pto += adi_blksize(); + } + asm volatile("membar #Sync\n\t"); + } +} +EXPORT_SYMBOL(copy_user_highpage); + +void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_page(vto, vfrom); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + + /* If this platform is ADI enabled, copy any ADI tags + * as well + */ + if (adi_capable()) { + unsigned long pfrom, pto, i, adi_tag; + + pfrom = page_to_phys(from); + pto = page_to_phys(to); + + for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (adi_tag) + : "r" (i), "i" (ASI_MCD_REAL)); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (adi_tag), "r" (pto), + "i" (ASI_MCD_REAL)); + pto += adi_blksize(); + } + asm volatile("membar #Sync\n\t"); + } +} +EXPORT_SYMBOL(copy_highpage); + +int valid_phys_addr_range(unsigned long addr, size_t size) +{ + int i; + int pfn = PFN_DOWN(addr); + size = PFN_UP(size); + for (i = 0; i < size; i++) { + if (!page_is_ram(pfn + i)) + return 0; + } + return 1; +} + +static int mmap_phys_addr_enable = 0; +static int __init mmap_phys_addr_setup(char *str) +{ + mmap_phys_addr_enable = 1; + return 1; +} +__setup("mmap-devmem-enable", mmap_phys_addr_setup); + +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) +{ + int i; + if (mmap_phys_addr_enable) + return 1; + + size = PFN_UP(size); + for (i = 0; i < size; i++) { + if (!page_is_ram(pfn + i)) + return 0; + } + return 1; +} + +static struct resource code_resource = { + .name = "Kernel code", + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; + +static struct resource data_resource = { + .name = "Kernel data", + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; + +static struct resource bss_resource = { + .name = "Kernel bss", + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; + +static inline resource_size_t compute_kern_paddr(void *addr) +{ + return (resource_size_t) (addr - KERNBASE + kern_base); +} + +static void __init kernel_lds_init(void) +{ + code_resource.start = compute_kern_paddr(_text); + code_resource.end = compute_kern_paddr(_etext - 1); + data_resource.start = compute_kern_paddr(_etext); + data_resource.end = compute_kern_paddr(_edata - 1); + bss_resource.start = compute_kern_paddr(__bss_start); + bss_resource.end = compute_kern_paddr(_end - 1); +} + +/* + * System memory should not be in /proc/iomem but various tools expect it + * (eg kdump). + */ +static int __init add_system_ram_resources(void) +{ + struct memblock_region *reg; + + kernel_lds_init(); + + for_each_memblock(memory, reg) { + struct resource *res; + unsigned long base = reg->base; + unsigned long size = reg->size; + + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + if (WARN_ON(!res)) + continue; + + res->name = "System RAM"; + res->start = base; + res->end = base + size - 1; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + if (WARN_ON(request_resource(&iomem_resource, res) < 0)) + continue; + + if (code_resource.start >= res->start && + code_resource.end <= res->end) + request_resource(res, &code_resource); + if (data_resource.start >= res->start && + data_resource.end <= res->end) + request_resource(res, &data_resource); + if (bss_resource.start >= res->start && + bss_resource.end <= res->end) + request_resource(res, &bss_resource); + + } + + return 0; +} +subsys_initcall(add_system_ram_resources); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 9e256d4d1f4c..eeffb804c467 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -106,6 +106,61 @@ static struct bit_map srmmu_nocache_map; static inline int srmmu_pmd_none(pmd_t pmd) { return !(pmd_val(pmd) & 0xFFFFFFF); } +static inline int srmmu_pmd_bad(pmd_t pmd) +{ return !(pmd_val(pmd) & (SRMMU_ET_PTD | SRMMU_ET_PTE)); } + +static inline int srmmu_pmd_present(pmd_t pmd) +{ return ((pmd_val(pmd) & (SRMMU_ET_PTD | SRMMU_ET_PTE))); } + +static inline void srmmu_pmd_clear(pmd_t *pmdp) { + int i; + for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) + srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); +} + +static inline int srmmu_pgd_none(pgd_t pgd) +{ return !(pgd_val(pgd) & 0xFFFFFFF); } + +static inline int srmmu_pgd_bad(pgd_t pgd) +{ return !(pgd_val(pgd) & (SRMMU_ET_PTD | SRMMU_ET_PTE)); } + +static inline int srmmu_pgd_present(pgd_t pgd) +{ return ((pgd_val(pgd) & (SRMMU_ET_PTD | SRMMU_ET_PTE))); } + +static inline void srmmu_pgd_clear(pgd_t * pgdp) +{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); } + +static inline pte_t srmmu_pte_wrprotect(pte_t pte) +{ return __pte(pte_val(pte) & ~SRMMU_WRITE);} + +static inline pte_t srmmu_pte_mkclean(pte_t pte) +{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);} + +static inline pte_t srmmu_pte_mkold(pte_t pte) +{ return __pte(pte_val(pte) & ~SRMMU_REF);} + +static inline pte_t srmmu_pte_mkwrite(pte_t pte) +{ return __pte(pte_val(pte) | SRMMU_WRITE);} + +static inline pte_t srmmu_pte_mkdirty(pte_t pte) +{ return __pte(pte_val(pte) | SRMMU_DIRTY);} + +static inline pte_t srmmu_pte_mkyoung(pte_t pte) +{ return __pte(pte_val(pte) | SRMMU_REF);} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) +{ return __pte(pfn_to_pte(page_to_pfn(page)) | pgprot_val(pgprot)); } + +static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) +{ return __pte(phys_addr_to_pte(page) | pgprot_val(pgprot)); } + +static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) +{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot_noncached(pgprot))); } + /* XXX should we hyper_flush_whole_icache here - Anton */ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) { diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 3d72d2deb13b..52e228410818 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -26,6 +26,9 @@ void flush_tlb_pending(void) struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct mm_struct *mm = tb->mm; +#ifdef CONFIG_WATCH_PREEMPT + this_cpu_or(nowatch_set, NOPWATCH_EXITMM); +#endif if (!tb->tlb_nr) goto out; @@ -51,10 +54,13 @@ out: put_cpu_var(tlb_batch); } + +#ifndef CONFIG_MCST_RT void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); - + struct tlb_batch *tb; + migrate_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -65,11 +71,42 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + migrate_enable(); } +#endif /* CONFIG_MCST_RT */ + +#ifdef CONFIG_PREEMPTION +void check_lazy_mmu_end(void) +{ + struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + if (tb->active) { + current_thread_info()->status |= TS_LAZY_MMU; + tb->active = 0; + } +} + +void check_lazy_mmu_begin(void) +{ + if (current_thread_info()->status & TS_LAZY_MMU) { + struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + current_thread_info()->status &= ~TS_LAZY_MMU; + tb->active = 1; + } +} +#endif /*CONFIG_PREEMPTION*/ + + static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec, unsigned int hugepage_shift) { +#ifdef CONFIG_MCST_RT + vaddr &= PAGE_MASK; + if (exec) + vaddr |= 0x1UL; + flush_tsb_user_page(mm, vaddr, hugepage_shift); + global_flush_tlb_page(mm, vaddr); +#else struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; @@ -108,6 +145,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, out: put_cpu_var(tlb_batch); +#endif } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, @@ -138,7 +176,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, } no_cache_flush: - if (!fullmm) + if (!fullmm && !mm->context.is_exit_mmap) tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); } @@ -246,7 +284,11 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, { pmd_t old, entry; +#if defined(CONFIG_E90S) && defined(CONFIG_NUMA_BALANCING) + entry = __pmd(pmd_val(*pmdp) & ~(_PAGE_VALID | _PAGE_PROTNONE)); +#else entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID); +#endif old = pmdp_establish(vma, address, pmdp, entry); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index f5edc28aa3a5..c1f7e65e03c0 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -16,6 +16,9 @@ #include #include #include +#ifdef CONFIG_MCST_RT +#include +#endif extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -121,7 +124,7 @@ void flush_tsb_user(struct tlb_batch *tb) struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; @@ -142,11 +145,10 @@ void flush_tsb_user(struct tlb_batch *tb) nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries, - tb->hugepage_shift); + __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, @@ -154,7 +156,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, { unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; @@ -180,7 +182,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, nentries, hugepage_shift); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K @@ -408,6 +410,9 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) new_cache_index = 0; for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { new_rss_limit = tsb_size_to_rss_limit(new_size); +#ifdef CONFIG_MCST_RT + if (!rts_act_mask) +#endif if (new_rss_limit > rss) break; new_cache_index++; @@ -470,7 +475,7 @@ retry_tsb_alloc: * the lock and ask all other cpus running this address space * to run tsb_context_switch() to see the new TSB table. */ - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb_block[tsb_index].tsb; old_cache_index = @@ -485,7 +490,7 @@ retry_tsb_alloc: */ if (unlikely(old_tsb && (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; @@ -509,12 +514,19 @@ retry_tsb_alloc: copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, tsb_index == MM_TSB_BASE ? PAGE_SHIFT : REAL_HPAGE_SHIFT); +#ifdef CONFIG_MCST_RT + if (rts_act_mask) { + pr_warn("tsb_grow() %7lu->%7lu cpu %d in %s-%d\n", + old_size, new_size, smp_processor_id(), + current->comm, current->pid); + } +#endif } mm->context.tsb_block[tsb_index].tsb = new_tsb; setup_tsb_params(mm, tsb_index, new_size); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). @@ -542,12 +554,11 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) #endif unsigned int i; - spin_lock_init(&mm->context.lock); + raw_spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; - mm->context.tag_store = NULL; - spin_lock_init(&mm->context.tag_lock); + mm->context.is_exit_mmap = 0; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) /* We reset them to zero because the fork() page copying @@ -606,30 +617,12 @@ void destroy_context(struct mm_struct *mm) for (i = 0; i < MM_NUM_TSBS; i++) tsb_destroy_one(&mm->context.tsb_block[i]); - spin_lock_irqsave(&ctx_alloc_lock, flags); + raw_spin_lock_irqsave(&ctx_alloc_lock, flags); if (CTX_VALID(mm->context)) { unsigned long nr = CTX_NRBITS(mm->context); mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); } - spin_unlock_irqrestore(&ctx_alloc_lock, flags); - - /* If ADI tag storage was allocated for this task, free it */ - if (mm->context.tag_store) { - tag_storage_desc_t *tag_desc; - unsigned long max_desc; - unsigned char *tags; - - tag_desc = mm->context.tag_store; - max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); - for (i = 0; i < max_desc; i++) { - tags = tag_desc->tags; - tag_desc->tags = NULL; - kfree(tags); - tag_desc++; - } - kfree(mm->context.tag_store); - mm->context.tag_store = NULL; - } + raw_spin_unlock_irqrestore(&ctx_alloc_lock, flags); } diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index d220b6848746..3bc572ab30ee 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -17,6 +17,11 @@ #include #include #include +#ifdef CONFIG_E90S +#include +#include +#include +#endif /*CONFIG_E90S*/ /* Basically, most of the Spitfire vs. Cheetah madness * has to do with the fact that Cheetah does not support @@ -52,6 +57,9 @@ __flush_tlb_mm: /* 19 insns */ nop nop nop +#ifdef CONFIG_MCST + nop +#endif .align 32 .globl __flush_tlb_page @@ -190,6 +198,9 @@ __spitfire_flush_tlb_mm_slow: .align 32 .globl __flush_icache_page __flush_icache_page: /* %o0 = phys_page */ +#ifdef CONFIG_RMO + membar #StoreStore +#endif /* CONFIG_RMO */ srlx %o0, PAGE_SHIFT, %o0 sethi %hi(PAGE_OFFSET), %g1 sllx %o0, PAGE_SHIFT, %o0 @@ -268,6 +279,63 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate +#ifdef CONFIG_MCST +__e90s_r2000_with_bug_flush_tlb_mm: /* 20 insns */ + /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, 0x0, %pstate + wrpr %g0, 1, %tl + mov PRIMARY_CONTEXT, %o2 + mov 0x40, %g3 + mov 0x90, %o3 + ldxa [%o2] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 + sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 + or %o0, %o1, %o0 /* Preserve nucleus page size fields */ + stxa %o0, [%o2] ASI_DMMU + stxa %g0, [%g3] ASI_DMMU_DEMAP + stxa %g0, [%o3] ASI_IMMU_DEMAP + stxa %g2, [%o2] ASI_DMMU + sethi %hi(KERNBASE), %o2 + flush %o2 + wrpr %g0, 0, %tl + retl + wrpr %g7, 0x0, %pstate + +e90_fix_fast_instruction_access_MMU_miss_hw_bug: /* 3 * 8 insns */ + rdpr %tl, %g1 + cmp %g1, 2 + bne 1f + nop + wrpr 1, %tl + nop + retry + nop + + nop + nop + nop + nop + nop + nop + nop + nop + +1: BTRAPTL1(0x64) + +e90_fix_tt0_hw_bug: /* 8 insns */ + rdpr %tpc, %g7 + brz %g7, 1f + nop + retry + + nop +1: udiv %g0, %g0, %g0 + nop + nop +#endif + __cheetah_flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ rdpr %pstate, %g7 @@ -284,7 +352,7 @@ __cheetah_flush_tlb_page: /* 22 insns */ be,pn %icc, 1f andn %o1, 1, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP -1: stxa %g0, [%o3] ASI_DMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 @@ -312,7 +380,7 @@ __cheetah_flush_tlb_pending: /* 27 insns */ be,pn %icc, 2f andn %o3, 1, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP -2: stxa %g0, [%o3] ASI_DMMU_DEMAP +2: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync brnz,pt %o1, 1b nop @@ -372,6 +440,7 @@ __cheetah_flush_dcache_page: /* 11 insns */ nop #endif /* DCACHE_ALIASING_POSSIBLE */ +#ifdef CONFIG_SPARC64_SUN4V /* Hypervisor specific versions, patched at boot time. */ __hypervisor_tlb_tl0_error: save %sp, -192, %sp @@ -500,6 +569,7 @@ __hypervisor_flush_dcache_page: /* 2 insns */ retl nop #endif +#endif /*CONFIG_SPARC64_SUN4V*/ tlb_patch_one: 1: lduw [%o1], %g1 @@ -521,7 +591,7 @@ tlb_patch_one: * %g1 address arg 1 (tlb page and range flushes) * %g7 address arg 2 (tlb range flush only) * - * %g6 scratch 1 + * %g6 scratch 1 (unusable for e90s) * %g2 scratch 2 * %g3 scratch 3 * %g4 scratch 4 @@ -554,6 +624,32 @@ xcall_flush_tlb_mm: /* 24 insns */ nop nop +#ifdef CONFIG_E90S + .align 32 +__e90s_r2000_with_bug_xcall_flush_tlb_mm: /* 21 insns */ + mov PRIMARY_CONTEXT, %g2 + ldxa [%g2] ASI_DMMU, %g3 + srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 + sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 + or %g5, %g4, %g5 /* Preserve nucleus page size fields */ + stxa %g5, [%g2] ASI_DMMU + mov 0x40, %g4 + mov 0x90, %g5 + stxa %g0, [%g4] ASI_DMMU_DEMAP + stxa %g0, [%g5] ASI_IMMU_DEMAP + stxa %g3, [%g2] ASI_DMMU + retry + nop + nop + nop + nop + nop + nop + nop + nop + nop +#endif + .globl xcall_flush_tlb_page xcall_flush_tlb_page: /* 20 insns */ /* %g5=context, %g1=vaddr */ @@ -633,11 +729,13 @@ xcall_sync_tick: 661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate +#ifdef CONFIG_SPARC64_SUN4V .section .sun4v_2insn_patch, "ax" .word 661b nop nop .previous +#endif /*CONFIG_SPARC64_SUN4V*/ rdpr %pil, %g2 wrpr %g0, PIL_NORMAL_MAX, %pil @@ -680,6 +778,9 @@ xcall_fetch_glob_regs: sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 add %g7, %g2, %g7 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3 +#ifdef CONFIG_RMO + membar #StoreStore +#endif /* CONFIG_RMO */ stx %g3, [%g1 + GR_SNAP_THREAD] retry @@ -838,6 +939,7 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop +#ifdef CONFIG_SPARC64_SUN4V /* %g5: error * %g6: tlb op */ @@ -892,64 +994,43 @@ __hypervisor_xcall_flush_tlb_page: /* 20 insns */ sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 - brnz,a,pn %o0, 1f + brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 membar #Sync retry -1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 - jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 - nop .globl __hypervisor_xcall_flush_tlb_kernel_range -__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */ +__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 - srlx %g3, 18, %g7 add %g2, 1, %g2 sub %g3, %g2, %g3 mov %o0, %g2 mov %o1, %g4 - brnz,pn %g7, 2f - mov %o2, %g7 + mov %o2, %g7 1: add %g1, %g3, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 - brnz,pn %o0, 1f + brnz,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 sethi %hi(PAGE_SIZE), %o2 brnz,pt %g3, 1b sub %g3, %o2, %g3 -5: mov %g2, %o0 + mov %g2, %o0 mov %g4, %o1 mov %g7, %o2 membar #Sync retry -1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 - jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 - nop -2: mov %o3, %g1 - mov %o5, %g3 - mov 0, %o0 /* ARG0: CPU lists unimplemented */ - mov 0, %o1 /* ARG1: CPU lists unimplemented */ - mov 0, %o2 /* ARG2: mmu context == nucleus */ - mov HV_MMU_ALL, %o3 /* ARG3: flags */ - mov HV_FAST_MMU_DEMAP_CTX, %o5 - ta HV_FAST_TRAP - mov %g1, %o3 - brz,pt %o0, 5b - mov %g3, %o5 - mov HV_FAST_MMU_DEMAP_CTX, %g6 - ba,pt %xcc, 1b - clr %g5 +#endif /*CONFIG_SPARC64_SUN4V*/ /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function @@ -985,13 +1066,70 @@ xcall_kgdb_capture: cheetah_patch_cachetlbops: save %sp, -128, %sp +#ifdef CONFIG_E90S + rdpr %ver, %o3 + srlx %o3, 24, %o3 + and %o3, 0xff, %o3 + cmp %o3, 0x11 /* r2000, revision 1 */ + bne 1f + set __flush_tlb_mm, %o0 + set __e90s_r2000_with_bug_flush_tlb_mm, %o1 + call tlb_patch_one + mov 20, %o2 +#ifdef CONFIG_SMP + set xcall_flush_tlb_mm, %o0 + set __e90s_r2000_with_bug_xcall_flush_tlb_mm, %o1 + call tlb_patch_one + mov 21, %o2 +#endif + set sparc64_ttable_tl0, %o0 + set e90_fix_tt0_hw_bug, %o1 + call tlb_patch_one + mov 8, %o2 + + b,a 2f +1: + cmp %o3, 0x20 /* r2000p */ + bl 3f + nop + set rtrap_irq, %o0 + set r2000p_rtrap_irq, %o1 + call tlb_patch_one + mov 2, %o2 + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define GEN_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + GEN_DO_PATCH(tl0_ivec, r2000p_do_ivec) + GEN_DO_PATCH(tl1_ivec, r2000p_do_ivec) + +3: +#endif /*CONFIG_E90S*/ sethi %hi(__flush_tlb_mm), %o0 or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 call tlb_patch_one mov 19, %o2 - +#ifdef CONFIG_E90S +2: +#endif sethi %hi(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0 sethi %hi(__cheetah_flush_tlb_page), %o1 @@ -1034,6 +1172,7 @@ cheetah_patch_cachetlbops: ret restore +#ifdef CONFIG_SPARC64_SUN4V .globl hypervisor_patch_cachetlbops hypervisor_patch_cachetlbops: save %sp, -128, %sp @@ -1100,3 +1239,23 @@ hypervisor_patch_cachetlbops: ret restore +#endif /*CONFIG_SPARC64_SUN4V*/ + + +#ifdef CONFIG_E90S + .globl xcall_dump_stack_chain +xcall_dump_stack_chain: + rdpr %pstate, %g2 + wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate + rdpr %pil, %g2 + wrpr %g0, 15, %pil + sethi %hi(110f), %g7 + b,pt %xcc, etrap_irq +110: or %g7, %lo(110b), %g7 + call dump_backtrace_smp + add %sp, PTREGS_OFF, %o0 + clr %l6 + /* Has to be a non-v9 branch due to the large distance. */ + b rtrap_xcall + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 +#endif /*CONFIG_E90S*/ diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c index dcee3dfa6de9..8c6e837e01ca 100644 --- a/arch/sparc/prom/printf.c +++ b/arch/sparc/prom/printf.c @@ -56,7 +56,7 @@ void notrace prom_write(const char *buf, unsigned int n) raw_spin_unlock_irqrestore(&console_write_lock, flags); } - +#ifndef CONFIG_E90S void notrace prom_printf(const char *fmt, ...) { va_list args; @@ -68,3 +68,4 @@ void notrace prom_printf(const char *fmt, ...) prom_write(ppbuf, i); } +#endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2f951c7828b7..e2911613c0a2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -228,6 +228,7 @@ config X86 select VIRT_TO_BUS select X86_FEATURE_NAMES if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS + select MCST config INSTRUCTION_DECODER def_bool y @@ -1067,6 +1068,10 @@ config SCHED_MC_PRIO If unsure say Y here. +config MCST + bool "MCST additions" + default y + config UP_LATE_INIT def_bool y depends on !SMP && X86_LOCAL_APIC diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 69f0cb01c666..e75bdb29b8c8 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -4,12 +4,12 @@ # select defconfig based on actual architecture ifeq ($(ARCH),x86) ifeq ($(shell uname -m),x86_64) - KBUILD_DEFCONFIG := x86_64_defconfig + KBUILD_DEFCONFIG ?= x86_64_defconfig else - KBUILD_DEFCONFIG := i386_defconfig + KBUILD_DEFCONFIG ?= i386_defconfig endif else - KBUILD_DEFCONFIG := $(ARCH)_defconfig + KBUILD_DEFCONFIG ?= $(ARCH)_defconfig endif # For gcc stack alignment is specified with -mpreferred-stack-boundary, @@ -296,6 +296,10 @@ PHONY += install install: $(Q)$(MAKE) $(build)=$(boot) $@ +PHONY += build-install +build-install: + $(CONFIG_SHELL) scripts/gen-osl-build -l $(srctree) -m $(MODLIB) + PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ diff --git a/arch/x86/configs/mcst.config b/arch/x86/configs/mcst.config new file mode 100644 index 000000000000..dbcea4de27ca --- /dev/null +++ b/arch/x86/configs/mcst.config @@ -0,0 +1,3202 @@ +CONFIG_LOCALVERSION="-x86-64" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_COMPAT_BRK=y +CONFIG_PROFILING=y +CONFIG_SMP=y +CONFIG_X86_VSMP=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=m +CONFIG_CALGARY_IOMMU=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE_INJECT=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_I8K=m +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_NUMA=y +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +# CONFIG_EFI_MIXED is not set +CONFIG_HZ_1000=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_LEGACY_VSYSCALL_EMULATE=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_PM_TRACE_RTC=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_DOCK=y +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_SBS=m +CONFIG_ACPI_NFIT=m +CONFIG_DPTF_POWER=m +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_CONFIGFS=m +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +CONFIG_X86_P4_CLOCKMOD=m +CONFIG_IA32_EMULATION=y +CONFIG_DMI_SYSFS=m +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=m +CONFIG_EFI_VARS=y +CONFIG_EFI_BOOTLOADER_CONTROL=m +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_OPROFILE=m +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_UNUSED_SYMBOLS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_MQ_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_KYBER=m +CONFIG_IOSCHED_BFQ=m +CONFIG_BINFMT_MISC=m +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=y +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NETFILTER_XTABLES=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_DECNET_NF_GRABULATOR=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_DCCP=m +CONFIG_IP_SCTP=m +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_NET_DSA=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUGFS=y +CONFIG_VSOCKETS=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +CONFIG_HAMRADIO=y +CONFIG_AX25=m +CONFIG_NETROM=m +CONFIG_ROSE=m +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +CONFIG_BAYCOM_PAR=m +CONFIG_YAM=m +CONFIG_CAN=m +CONFIG_CAN_VXCAN=m +CONFIG_CAN_JANZ_ICAN3=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_ISA=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_IFI_CANFD=m +CONFIG_CAN_M_CAN=m +CONFIG_CAN_PEAK_PCIEFD=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_EMS_PCMCIA=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCMCIA=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_ISA=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_SOFTING_CS=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +CONFIG_CAN_GS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_MCBA_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_6LOWPAN=m +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_AG6XX=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_AF_RXRPC=m +CONFIG_AF_RXRPC_IPV6=y +CONFIG_RXKAD=y +CONFIG_AF_KCM=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_LEDS=y +CONFIG_WIMAX=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_NET_9P_RDMA=m +CONFIG_CAIF=m +CONFIG_CAIF_USB=m +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_NCI=m +CONFIG_NFC_NCI_UART=m +CONFIG_NFC_HCI=m +CONFIG_NFC_MEI_PHY=m +CONFIG_NFC_SIM=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_FDP=m +CONFIG_NFC_FDP_I2C=m +CONFIG_NFC_PN544_MEI=m +CONFIG_NFC_PN533_USB=m +CONFIG_NFC_PN533_I2C=m +CONFIG_NFC_MICROREAD_MEI=m +CONFIG_NFC_MRVL_USB=m +CONFIG_NFC_MRVL_UART=m +CONFIG_NFC_MRVL_I2C=m +CONFIG_NFC_ST_NCI_I2C=m +CONFIG_NFC_NXP_NCI=m +CONFIG_NFC_NXP_NCI_I2C=m +CONFIG_NFC_S3FWRN5_I2C=m +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_STUB=m +CONFIG_PCI_HYPERV=m +CONFIG_HOTPLUG_PCI=y +CONFIG_VMD=m +CONFIG_PCCARD=y +CONFIG_PCMCIA=m +CONFIG_YENTA=y +CONFIG_PD6729=m +CONFIG_I82092=m +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEBUG_DEVRES=y +CONFIG_CONNECTOR=y +CONFIG_MTD=m +CONFIG_MTD_AR7_PARTS=m +CONFIG_MTD_CMDLINE_PARTS=m +CONFIG_MTD_REDBOOT_PARTS=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_BLOCK_RO=m +CONFIG_FTL=m +CONFIG_NFTL=m +CONFIG_INFTL=m +CONFIG_RFD_FTL=m +CONFIG_SSFDC=m +CONFIG_SM_FTL=m +CONFIG_MTD_OOPS=m +CONFIG_MTD_SWAP=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_ROM=m +CONFIG_MTD_ABSENT=m +CONFIG_MTD_PHYSMAP=m +CONFIG_MTD_AMD76XROM=m +CONFIG_MTD_ICHXROM=m +CONFIG_MTD_ESB2ROM=m +CONFIG_MTD_CK804XROM=m +CONFIG_MTD_SCB2_FLASH=m +CONFIG_MTD_NETtel=m +CONFIG_MTD_L440GX=m +CONFIG_MTD_INTEL_VR_NOR=m +CONFIG_MTD_PLATRAM=m +CONFIG_MTD_PMC551=m +CONFIG_MTD_SLRAM=m +CONFIG_MTD_PHRAM=m +CONFIG_MTD_MTDRAM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_DOCG3=m +CONFIG_MTD_ONENAND=m +CONFIG_MTD_ONENAND_GENERIC=m +CONFIG_MTD_LPDDR=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_OF=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_PCMCIA=m +CONFIG_PARPORT_AX88796=m +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_BLK_DEV_FD=m +CONFIG_PARIDE=m +CONFIG_PARIDE_PD=m +CONFIG_PARIDE_PCD=m +CONFIG_PARIDE_PF=m +CONFIG_PARIDE_PT=m +CONFIG_PARIDE_PG=m +CONFIG_PARIDE_ATEN=m +CONFIG_PARIDE_BPCK=m +CONFIG_PARIDE_COMM=m +CONFIG_PARIDE_DSTR=m +CONFIG_PARIDE_FIT2=m +CONFIG_PARIDE_FIT3=m +CONFIG_PARIDE_EPAT=m +CONFIG_PARIDE_EPIA=m +CONFIG_PARIDE_FRIQ=m +CONFIG_PARIDE_FRPW=m +CONFIG_PARIDE_KBIC=m +CONFIG_PARIDE_KTTI=m +CONFIG_PARIDE_ON20=m +CONFIG_PARIDE_ON26=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m +CONFIG_BLK_DEV_UMEM=m +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_SKD=m +CONFIG_BLK_DEV_SX8=m +CONFIG_BLK_DEV_RAM=m +CONFIG_CDROM_PKTCDVD=m +CONFIG_ATA_OVER_ETH=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RSXX=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_AD525X_DPOT=m +CONFIG_AD525X_DPOT_I2C=m +CONFIG_DUMMY_IRQ=m +CONFIG_IBM_ASM=m +CONFIG_PHANTOM=m +CONFIG_ICS932S401=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_HP_ILO=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29020=m +CONFIG_ISL29003=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_HMC6352=m +CONFIG_DS1682=m +CONFIG_VMWARE_BALLOON=m +CONFIG_PVPANIC=m +CONFIG_C2PORT=m +CONFIG_C2PORT_DURAMAR_2150=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_IDT_89HPESX=m +CONFIG_EEPROM_EE1004=m +CONFIG_TI_ST=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI_ME=m +CONFIG_INTEL_MEI_TXE=m +CONFIG_VMWARE_VMCI=m +CONFIG_INTEL_MIC_BUS=m +CONFIG_SCIF_BUS=m +CONFIG_VOP_BUS=m +CONFIG_INTEL_MIC_HOST=m +CONFIG_INTEL_MIC_CARD=m +CONFIG_SCIF=m +CONFIG_MIC_COSM=m +CONFIG_VOP=m +CONFIG_GENWQE=m +CONFIG_ECHO=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_SCSI_AIC79XX=m +CONFIG_SCSI_AIC94XX=m +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVUMI=m +CONFIG_SCSI_DPT_I2O=m +CONFIG_SCSI_ADVANSYS=m +CONFIG_SCSI_ARCMSR=m +CONFIG_SCSI_ESAS2R=m +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_SCSI_UFSHCD=m +CONFIG_SCSI_UFSHCD_PCI=m +CONFIG_SCSI_UFS_DWC_TC_PCI=m +CONFIG_SCSI_UFSHCD_PLATFORM=m +CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m +CONFIG_SCSI_HPTIOP=m +CONFIG_SCSI_BUSLOGIC=m +CONFIG_VMWARE_PVSCSI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +CONFIG_SCSI_SNIC=m +CONFIG_SCSI_DMX3191D=m +CONFIG_SCSI_GDTH=m +CONFIG_SCSI_ISCI=m +CONFIG_SCSI_IPS=m +CONFIG_SCSI_INITIO=m +CONFIG_SCSI_INIA100=m +CONFIG_SCSI_PPA=m +CONFIG_SCSI_IMM=m +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_IPR=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_DC395x=m +CONFIG_SCSI_AM53C974=m +CONFIG_SCSI_WD719X=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_INIC162X=m +CONFIG_SATA_ACARD_AHCI=m +CONFIG_SATA_SIL24=m +CONFIG_PDC_ADMA=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_SX4=m +CONFIG_ATA_PIIX=y +CONFIG_SATA_DWC=m +CONFIG_SATA_MV=m +CONFIG_SATA_NV=y +CONFIG_SATA_PROMISE=m +CONFIG_SATA_SIL=m +CONFIG_SATA_SIS=m +CONFIG_SATA_SVW=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_SATA_VITESSE=m +CONFIG_PATA_ALI=m +CONFIG_PATA_AMD=y +CONFIG_PATA_ARTOP=m +CONFIG_PATA_ATIIXP=m +CONFIG_PATA_ATP867X=m +CONFIG_PATA_CMD64X=m +CONFIG_PATA_CYPRESS=m +CONFIG_PATA_EFAR=m +CONFIG_PATA_HPT366=m +CONFIG_PATA_HPT37X=m +CONFIG_PATA_HPT3X2N=m +CONFIG_PATA_HPT3X3=m +CONFIG_PATA_IT8213=m +CONFIG_PATA_IT821X=m +CONFIG_PATA_JMICRON=y +CONFIG_PATA_MARVELL=m +CONFIG_PATA_NETCELL=m +CONFIG_PATA_NINJA32=m +CONFIG_PATA_NS87415=m +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_OPTIDMA=m +CONFIG_PATA_PDC2027X=m +CONFIG_PATA_PDC_OLD=m +CONFIG_PATA_RADISYS=m +CONFIG_PATA_RDC=m +CONFIG_PATA_SCH=y +CONFIG_PATA_SERVERWORKS=m +CONFIG_PATA_SIL680=m +CONFIG_PATA_SIS=m +CONFIG_PATA_TOSHIBA=m +CONFIG_PATA_TRIFLEX=m +CONFIG_PATA_VIA=m +CONFIG_PATA_WINBOND=m +CONFIG_PATA_CMD640_PCI=m +CONFIG_PATA_MPIIX=m +CONFIG_PATA_NS87410=m +CONFIG_PATA_OPTI=m +CONFIG_PATA_PCMCIA=m +CONFIG_PATA_PLATFORM=m +CONFIG_PATA_RZ1000=m +CONFIG_PATA_ACPI=m +CONFIG_PATA_LEGACY=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_SBP_TARGET=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_VXLAN=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_GENEVE=m +CONFIG_GTP=m +CONFIG_MACSEC=m +CONFIG_NTB_NETDEV=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +CONFIG_ARCNET=m +CONFIG_ARCNET_1201=m +CONFIG_ARCNET_1051=m +CONFIG_ARCNET_RAW=m +CONFIG_ARCNET_CAP=m +CONFIG_ARCNET_COM90xx=m +CONFIG_ARCNET_COM90xxIO=m +CONFIG_ARCNET_RIM_I=m +CONFIG_ARCNET_COM20020=m +CONFIG_ARCNET_COM20020_PCI=m +CONFIG_ARCNET_COM20020_CS=m +CONFIG_ATM_DUMMY=m +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +CONFIG_ATM_FIRESTREAM=m +CONFIG_ATM_ZATM=m +CONFIG_ATM_NICSTAR=m +CONFIG_ATM_IDT77252=m +CONFIG_ATM_AMBASSADOR=m +CONFIG_ATM_HORIZON=m +CONFIG_ATM_IA=m +CONFIG_ATM_FORE200E=m +CONFIG_ATM_HE=m +CONFIG_ATM_SOLOS=m +CONFIG_CAIF_TTY=m +CONFIG_CAIF_SPI_SLAVE=m +CONFIG_CAIF_HSI=m +CONFIG_CAIF_VIRTIO=m +CONFIG_B53=m +CONFIG_B53_MDIO_DRIVER=m +CONFIG_B53_MMAP_DRIVER=m +CONFIG_B53_SRAB_DRIVER=m +CONFIG_NET_DSA_MT7530=m +CONFIG_NET_DSA_MV88E6060=m +CONFIG_NET_DSA_MV88E6XXX=m +CONFIG_NET_DSA_QCA8K=m +CONFIG_NET_DSA_SMSC_LAN9303_I2C=m +CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m +CONFIG_PCMCIA_3C574=m +CONFIG_PCMCIA_3C589=m +CONFIG_VORTEX=m +CONFIG_TYPHOON=m +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_ET131X=m +CONFIG_SLICOSS=m +CONFIG_ACENIC=m +CONFIG_ALTERA_TSE=m +CONFIG_ENA_ETHERNET=m +CONFIG_AMD8111_ETH=m +CONFIG_PCNET32=y +CONFIG_PCMCIA_NMCLAN=m +CONFIG_AMD_XGBE=m +CONFIG_AQTION=m +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_AURORA_NB8800=m +CONFIG_B44=m +CONFIG_TIGON3=y +CONFIG_BNX2X=m +CONFIG_BNA=m +CONFIG_MACB=m +CONFIG_MACB_PCI=m +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +# CONFIG_CAVIUM_PTP is not set +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CX_ECAT=m +CONFIG_DNET=m +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_TULIP=m +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +CONFIG_PCMCIA_XIRCOM=m +CONFIG_DL2K=m +CONFIG_SUNDANCE=m +CONFIG_PCMCIA_FMVJ18X=m +CONFIG_HP100=m +CONFIG_HINIC=m +CONFIG_E100=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40EVF=m +CONFIG_FM10K=m +CONFIG_JME=m +CONFIG_MVMDIO=m +CONFIG_SKGE=m +CONFIG_SKY2=y +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXFW=m +CONFIG_MLXSW_CORE=m +CONFIG_KS8842=m +CONFIG_KS8851_MLL=m +CONFIG_KSZ884X_PCI=m +CONFIG_MYRI10GE=m +CONFIG_FEALNX=m +CONFIG_NATSEMI=m +CONFIG_NS83820=m +CONFIG_S2IO=m +CONFIG_VXGE=m +CONFIG_NFP=m +CONFIG_PCMCIA_AXNET=m +CONFIG_NE2K_PCI=m +CONFIG_PCMCIA_PCNET=m +CONFIG_FORCEDETH=m +CONFIG_ETHOC=m +CONFIG_HAMACHI=m +CONFIG_YELLOWFIN=m +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QEDE=m +CONFIG_QCOM_EMAC=m +CONFIG_RMNET=m +CONFIG_R6040=m +CONFIG_ATP=m +CONFIG_8139CP=m +CONFIG_8139TOO=m +CONFIG_R8169=m +CONFIG_ROCKER=m +CONFIG_SXGBE_ETH=m +CONFIG_SFC=m +CONFIG_SFC_FALCON=m +CONFIG_SC92031=m +CONFIG_SIS900=m +CONFIG_SIS190=m +CONFIG_PCMCIA_SMC91C92=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +CONFIG_STMMAC_ETH=m +CONFIG_HAPPYMEAL=m +CONFIG_SUNGEM=m +CONFIG_CASSINI=m +CONFIG_NIU=m +CONFIG_DWC_XLGMAC=m +CONFIG_DWC_XLGMAC_PCI=m +CONFIG_TEHUTI=m +CONFIG_TLAN=m +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_VELOCITY=m +CONFIG_WIZNET_W5100=m +CONFIG_WIZNET_W5300=m +CONFIG_PCMCIA_XIRC2PS=m +CONFIG_FDDI=m +CONFIG_REALTEK_PHY=m +CONFIG_USB_USBNET=m +CONFIG_DEFXX=m +CONFIG_SKFP=m +CONFIG_NET_SB1000=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_GPIO=m +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_AT803X_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +CONFIG_MICREL_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_PLIP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_USB_NET_DRIVERS=m +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_CDC_PHONET=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_ADM8211=m +CONFIG_ATH5K=m +CONFIG_ATH9K=m +CONFIG_ATH9K_HTC=m +CONFIG_CARL9170=m +CONFIG_ATH6KL=m +CONFIG_ATH6KL_SDIO=m +CONFIG_ATH6KL_USB=m +CONFIG_AR5523=m +CONFIG_WIL6210=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +CONFIG_ATH10K_SDIO=m +CONFIG_ATH10K_USB=m +CONFIG_WCN36XX=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PCMCIA_ATMEL=m +CONFIG_AT76C50X_USB=m +CONFIG_B43=m +CONFIG_B43LEGACY=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_AIRO=m +CONFIG_AIRO_CS=m +CONFIG_IPW2100=m +CONFIG_IPW2200=m +CONFIG_IWL4965=m +CONFIG_IWL3945=m +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_HOSTAP=m +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_HOSTAP_CS=m +CONFIG_HERMES=m +CONFIG_PLX_HERMES=m +CONFIG_TMD_HERMES=m +CONFIG_NORTEL_HERMES=m +CONFIG_PCMCIA_HERMES=m +CONFIG_PCMCIA_SPECTRUM=m +CONFIG_ORINOCO_USB=m +CONFIG_P54_COMMON=m +CONFIG_P54_USB=m +CONFIG_P54_PCI=m +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_CS=m +CONFIG_LIBERTAS_SDIO=m +CONFIG_LIBERTAS_THINFIRM=m +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MWL8K=m +CONFIG_MT7601U=m +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_INPUT_LEDS=m +CONFIG_RSI_91X=m +CONFIG_CW1200=m +CONFIG_CW1200_WLAN_SDIO=m +CONFIG_WL1251=m +CONFIG_WL1251_SDIO=m +CONFIG_WL12XX=m +CONFIG_WL18XX=m +CONFIG_WLCORE_SDIO=m +CONFIG_USB_ZD1201=m +CONFIG_ZD1211RW=m +CONFIG_PCMCIA_RAYCS=m +CONFIG_PCMCIA_WL3501=m +CONFIG_MAC80211_HWSIM=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_WIMAX_I2400M_USB=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_IEEE802154_ATUSB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_THUNDERBOLT_NET=m +CONFIG_HYPERV_NET=m +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +CONFIG_KEYBOARD_ADC=m +CONFIG_KEYBOARD_ADP5588=m +CONFIG_KEYBOARD_ADP5589=m +CONFIG_KEYBOARD_QT1070=m +CONFIG_KEYBOARD_QT2160=m +CONFIG_KEYBOARD_DLINK_DIR685=m +CONFIG_KEYBOARD_LKKBD=m +CONFIG_KEYBOARD_TCA6416=m +CONFIG_KEYBOARD_TCA8418=m +CONFIG_KEYBOARD_MATRIX=m +CONFIG_KEYBOARD_LM8323=m +CONFIG_KEYBOARD_LM8333=m +CONFIG_KEYBOARD_MAX7359=m +CONFIG_KEYBOARD_MCS=m +CONFIG_KEYBOARD_MPR121=m +CONFIG_KEYBOARD_NEWTON=m +CONFIG_KEYBOARD_OPENCORES=m +CONFIG_KEYBOARD_SAMSUNG=m +CONFIG_KEYBOARD_STOWAWAY=m +CONFIG_KEYBOARD_TM2_TOUCHKEY=m +CONFIG_KEYBOARD_XTKBD=m +CONFIG_KEYBOARD_CROS_EC=m +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_GPIO=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_ANALOG=m +CONFIG_JOYSTICK_A3D=m +CONFIG_JOYSTICK_ADI=m +CONFIG_JOYSTICK_COBRA=m +CONFIG_JOYSTICK_GF2K=m +CONFIG_JOYSTICK_GRIP=m +CONFIG_JOYSTICK_GRIP_MP=m +CONFIG_JOYSTICK_GUILLEMOT=m +CONFIG_JOYSTICK_INTERACT=m +CONFIG_JOYSTICK_SIDEWINDER=m +CONFIG_JOYSTICK_TMDC=m +CONFIG_JOYSTICK_IFORCE=m +CONFIG_JOYSTICK_WARRIOR=m +CONFIG_JOYSTICK_MAGELLAN=m +CONFIG_JOYSTICK_SPACEORB=m +CONFIG_JOYSTICK_SPACEBALL=m +CONFIG_JOYSTICK_STINGER=m +CONFIG_JOYSTICK_TWIDJOY=m +CONFIG_JOYSTICK_ZHENHUA=m +CONFIG_JOYSTICK_DB9=m +CONFIG_JOYSTICK_GAMECON=m +CONFIG_JOYSTICK_TURBOGRAFX=m +CONFIG_JOYSTICK_AS5011=m +CONFIG_JOYSTICK_JOYDUMP=m +CONFIG_JOYSTICK_XPAD=m +CONFIG_JOYSTICK_WALKERA0701=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_GTCO=m +CONFIG_TABLET_USB_HANWANG=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_USB_PEGASUS=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_AD7879=m +CONFIG_TOUCHSCREEN_AD7879_I2C=m +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m +CONFIG_TOUCHSCREEN_BU21013=m +CONFIG_TOUCHSCREEN_CY8CTMG110=m +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m +CONFIG_TOUCHSCREEN_DYNAPRO=m +CONFIG_TOUCHSCREEN_HAMPSHIRE=m +CONFIG_TOUCHSCREEN_EXC3000=m +CONFIG_TOUCHSCREEN_FUJITSU=m +CONFIG_TOUCHSCREEN_HIDEEP=m +CONFIG_TOUCHSCREEN_ILI210X=m +CONFIG_TOUCHSCREEN_S6SY761=m +CONFIG_TOUCHSCREEN_GUNZE=m +CONFIG_TOUCHSCREEN_EKTF2127=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_TOUCHSCREEN_MAX11801=m +CONFIG_TOUCHSCREEN_MCS5000=m +CONFIG_TOUCHSCREEN_MMS114=m +CONFIG_TOUCHSCREEN_MELFAS_MIP4=m +CONFIG_TOUCHSCREEN_MTOUCH=m +CONFIG_TOUCHSCREEN_INEXIO=m +CONFIG_TOUCHSCREEN_MK712=m +CONFIG_TOUCHSCREEN_PENMOUNT=m +CONFIG_TOUCHSCREEN_EDT_FT5X06=m +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m +CONFIG_TOUCHSCREEN_TOUCHWIN=m +CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m +CONFIG_TOUCHSCREEN_UCB1400=m +CONFIG_TOUCHSCREEN_PIXCIR=m +CONFIG_TOUCHSCREEN_WDT87XX_I2C=m +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_MC13783=m +CONFIG_TOUCHSCREEN_TOUCHIT213=m +CONFIG_TOUCHSCREEN_TSC_SERIO=m +CONFIG_TOUCHSCREEN_TSC2004=m +CONFIG_TOUCHSCREEN_TSC2007=m +CONFIG_TOUCHSCREEN_RM_TS=m +CONFIG_TOUCHSCREEN_SILEAD=m +CONFIG_TOUCHSCREEN_SIS_I2C=m +CONFIG_TOUCHSCREEN_ST1232=m +CONFIG_TOUCHSCREEN_STMFTS=m +CONFIG_TOUCHSCREEN_SX8654=m +CONFIG_TOUCHSCREEN_TPS6507X=m +CONFIG_TOUCHSCREEN_ZET6223=m +CONFIG_TOUCHSCREEN_ZFORCE=m +CONFIG_TOUCHSCREEN_ROHM_BU21023=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_88PM80X_ONKEY=m +CONFIG_INPUT_AD714X=m +CONFIG_INPUT_ARIZONA_HAPTICS=m +CONFIG_INPUT_BMA150=m +CONFIG_INPUT_E3X0_BUTTON=m +CONFIG_INPUT_PCSPKR=m +CONFIG_INPUT_MC13783_PWRBUTTON=m +CONFIG_INPUT_MMA8450=m +CONFIG_INPUT_APANEL=m +CONFIG_INPUT_GP2A=m +CONFIG_INPUT_GPIO_BEEPER=m +CONFIG_INPUT_GPIO_DECODER=m +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_KXTJ9=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_REGULATOR_HAPTIC=m +CONFIG_INPUT_RETU_PWRBUTTON=m +CONFIG_INPUT_AXP20X_PEK=m +CONFIG_INPUT_PCF50633_PMU=m +CONFIG_INPUT_PCF8574=m +CONFIG_INPUT_PWM_BEEPER=m +CONFIG_INPUT_PWM_VIBRA=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_INPUT_DA9063_ONKEY=m +CONFIG_INPUT_ADXL34X=m +CONFIG_INPUT_IMS_PCU=m +CONFIG_INPUT_CMA3000=m +CONFIG_INPUT_CMA3000_I2C=m +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m +CONFIG_INPUT_SOC_BUTTON_ARRAY=m +CONFIG_INPUT_DRV260X_HAPTICS=m +CONFIG_INPUT_DRV2665_HAPTICS=m +CONFIG_INPUT_DRV2667_HAPTICS=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SMB=m +CONFIG_SERIO_SERPORT=m +CONFIG_LEGACY_PTYS=y +CONFIG_SERIO_CT82C710=m +CONFIG_SERIO_PARKBD=m +CONFIG_SERIO_PCIPS2=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_PS2MULT=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIO_GPIO_PS2=m +CONFIG_USERIO=m +CONFIG_GAMEPORT_NS558=m +CONFIG_GAMEPORT_L4=m +CONFIG_GAMEPORT_EMU10K1=m +CONFIG_GAMEPORT_FM801=m +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_ROCKETPORT=m +CONFIG_CYCLADES=m +CONFIG_MOXA_INTELLIO=m +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +CONFIG_ISI=m +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_TRACE_ROUTER=m +CONFIG_TRACE_SINK=m +CONFIG_DEVKMEM=y +CONFIG_SERIAL_8250=m +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=m +CONFIG_SERIAL_UARTLITE=m +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_SC16IS7XX=m +CONFIG_SERIAL_ALTERA_JTAGUART=m +CONFIG_SERIAL_ALTERA_UART=m +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_RP2=m +CONFIG_SERIAL_FSL_LPUART=m +CONFIG_SERIAL_MEN_Z135=m +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_INTEL=y +CONFIG_HW_RANDOM_AMD=y +CONFIG_NVRAM=y +CONFIG_APPLICOM=m +CONFIG_SYNCLINK_CS=m +CONFIG_CARDMAN_4000=m +CONFIG_CARDMAN_4040=m +CONFIG_SCR24X=m +CONFIG_IPWIRELESS=m +CONFIG_MWAVE=m +CONFIG_RAW_DRIVER=m +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +CONFIG_HANGCHECK_TIMER=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_VTPM_PROXY=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TELCLOCK=m +CONFIG_XILLYBUS=m +CONFIG_XILLYBUS_PCIE=m +CONFIG_I2C=y +CONFIG_I2C_MUX_GPIO=m +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_REG=m +CONFIG_I2C_MUX_MLXCPLD=m +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=y +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_CBUS_GPIO=m +CONFIG_I2C_DESIGNWARE_PCI=m +CONFIG_I2C_GPIO=m +CONFIG_I2C_KEMPLD=m +CONFIG_I2C_OCORES=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_XILINX=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_DLN2=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_PARPORT_LIGHT=m +CONFIG_I2C_ROBOTFUZZ_OSIF=m +CONFIG_I2C_TAOS_EVM=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_MLXCPLD=m +CONFIG_I2C_CROS_EC_TUNNEL=m +CONFIG_I2C_STUB=m +CONFIG_SPI=y +CONFIG_SPMI=m +CONFIG_HSI=m +CONFIG_HSI_CHAR=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_PTP_1588_CLOCK_KVM=m +CONFIG_PINCTRL_MCP23S08=m +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_LEWISBURG=m +CONFIG_PINCTRL_SUNRISEPOINT=m +# CONFIG_PINCTRL_MT6397 is not set +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_DWAPB=m +CONFIG_GPIO_EXAR=m +CONFIG_GPIO_GENERIC_PLATFORM=m +CONFIG_GPIO_ICH=m +CONFIG_GPIO_MB86S7X=m +CONFIG_GPIO_MENZ127=m +CONFIG_GPIO_VX855=m +CONFIG_GPIO_F7188X=m +CONFIG_GPIO_IT87=m +CONFIG_GPIO_SCH=m +CONFIG_GPIO_SCH311X=m +CONFIG_GPIO_WS16C48=m +CONFIG_GPIO_ADP5588=m +CONFIG_GPIO_MAX7300=m +CONFIG_GPIO_MAX732X=m +CONFIG_GPIO_PCA953X=m +CONFIG_GPIO_PCF857X=m +CONFIG_GPIO_TPIC2810=m +CONFIG_GPIO_ARIZONA=m +CONFIG_GPIO_BD9571MWV=m +CONFIG_GPIO_DLN2=m +CONFIG_GPIO_JANZ_TTL=m +CONFIG_GPIO_KEMPLD=m +CONFIG_GPIO_LP3943=m +CONFIG_GPIO_LP873X=m +CONFIG_GPIO_TPS65086=m +CONFIG_GPIO_UCB1400=m +CONFIG_GPIO_WHISKEY_COVE=m +CONFIG_GPIO_WM8994=m +CONFIG_GPIO_AMD8111=m +CONFIG_GPIO_ML_IOH=m +CONFIG_GPIO_PCI_IDIO_16=m +CONFIG_GPIO_RDC321X=m +CONFIG_GPIO_VIPERBOARD=m +CONFIG_W1_MASTER_MATROX=m +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_DS1WM=m +CONFIG_W1_MASTER_GPIO=m +CONFIG_W1_SLAVE_THERM=m +CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_DS2405=m +CONFIG_W1_SLAVE_DS2408=m +CONFIG_W1_SLAVE_DS2413=m +CONFIG_W1_SLAVE_DS2406=m +CONFIG_W1_SLAVE_DS2423=m +CONFIG_W1_SLAVE_DS2805=m +CONFIG_W1_SLAVE_DS2431=m +CONFIG_W1_SLAVE_DS2433=m +CONFIG_W1_SLAVE_DS2438=m +CONFIG_W1_SLAVE_DS28E04=m +CONFIG_W1_SLAVE_DS28E17=m +CONFIG_PDA_POWER=m +CONFIG_GENERIC_ADC_BATTERY=m +CONFIG_TEST_POWER=m +CONFIG_BATTERY_DS2760=m +CONFIG_BATTERY_DS2780=m +CONFIG_BATTERY_DS2781=m +CONFIG_BATTERY_DS2782=m +CONFIG_MANAGER_SBS=m +CONFIG_BATTERY_BQ27XXX=m +CONFIG_CHARGER_DA9150=m +CONFIG_BATTERY_DA9150=m +CONFIG_CHARGER_AXP20X=m +CONFIG_BATTERY_AXP20X=m +CONFIG_AXP20X_POWER=m +CONFIG_AXP288_CHARGER=m +CONFIG_AXP288_FUEL_GAUGE=m +CONFIG_BATTERY_MAX17040=m +CONFIG_BATTERY_MAX17042=m +CONFIG_BATTERY_MAX1721X=m +CONFIG_CHARGER_PCF50633=m +CONFIG_CHARGER_ISP1704=m +CONFIG_CHARGER_MAX8903=m +CONFIG_CHARGER_LP8727=m +CONFIG_CHARGER_GPIO=m +CONFIG_CHARGER_BQ2415X=m +CONFIG_CHARGER_BQ24190=m +CONFIG_CHARGER_BQ24257=m +CONFIG_CHARGER_BQ24735=m +CONFIG_CHARGER_BQ25890=m +CONFIG_CHARGER_SMB347=m +CONFIG_BATTERY_GAUGE_LTC2941=m +CONFIG_BATTERY_RT5033=m +CONFIG_CHARGER_RT9455=m +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ASPEED=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_MC13783_ADC=m +CONFIG_SENSORS_FSCHMD=m +CONFIG_SENSORS_FTSTEUTATES=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +CONFIG_SENSORS_HIH6130=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IIO_HWMON=m +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_POWR1220=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC2945=m +CONFIG_SENSORS_LTC2990=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4222=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6621=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_TC654=m +CONFIG_SENSORS_MENF21BMC_HWMON=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_IBM_CFFPS=m +CONFIG_SENSORS_IR35221=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC3815=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX20751=m +CONFIG_SENSORS_MAX31785=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_TPS40422=m +CONFIG_SENSORS_TPS53679=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_PWM_FAN=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SHT3x=m +CONFIG_SENSORS_SHTC1=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC2103=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_STTS751=m +CONFIG_SENSORS_SMM665=m +CONFIG_SENSORS_ADC128D818=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_INA3221=m +CONFIG_SENSORS_TC74=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP103=m +CONFIG_SENSORS_TMP108=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_XGENE=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +CONFIG_INTEL_POWERCLAMP=m +CONFIG_INTEL_SOC_DTS_THERMAL=m +CONFIG_INT340X_THERMAL=m +CONFIG_INT3406_THERMAL=m +CONFIG_INTEL_BXT_PMIC_THERMAL=m +CONFIG_INTEL_PCH_THERMAL=m +CONFIG_GENERIC_ADC_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_DA9052_WATCHDOG=m +CONFIG_DA9055_WATCHDOG=m +CONFIG_DA9063_WATCHDOG=m +CONFIG_DA9062_WATCHDOG=m +CONFIG_MENF21BMC_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_XILINX_WATCHDOG=m +CONFIG_ZIIRAVE_WATCHDOG=m +CONFIG_CADENCE_WATCHDOG=m +CONFIG_DW_WATCHDOG=m +CONFIG_MAX63XX_WATCHDOG=m +CONFIG_RETU_WATCHDOG=m +CONFIG_ACQUIRE_WDT=m +CONFIG_ADVANTECH_WDT=m +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_EBC_C384_WDT=m +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +CONFIG_EUROTECH_WDT=m +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +CONFIG_WAFER_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_KEMPLD_WDT=m +CONFIG_SC1200_WDT=m +CONFIG_PC87413_WDT=m +CONFIG_NV_TCO=m +CONFIG_60XX_WDT=m +CONFIG_CPU5_WDT=m +CONFIG_SMSC_SCH311X_WDT=m +CONFIG_SMSC37B787_WDT=m +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +CONFIG_SBC_EPX_C3_WATCHDOG=m +CONFIG_INTEL_MEI_WDT=m +CONFIG_NI903X_WDT=m +CONFIG_NIC7018_WDT=m +CONFIG_MEN_A21_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_MFD_BCM590XX=m +CONFIG_MFD_BD9571MWV=m +CONFIG_MFD_AXP20X_I2C=m +CONFIG_MFD_DA9062=m +CONFIG_MFD_DA9150=m +CONFIG_MFD_DLN2=m +CONFIG_MFD_MC13XXX_I2C=m +CONFIG_HTC_PASIC3=m +CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m +CONFIG_INTEL_SOC_PMIC_BXTWC=m +CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +CONFIG_MFD_JANZ_CMODIO=m +CONFIG_MFD_KEMPLD=m +CONFIG_MFD_88PM800=m +CONFIG_MFD_88PM805=m +CONFIG_MFD_MAX8907=m +CONFIG_MFD_MT6397=m +CONFIG_MFD_MENF21BMC=m +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_RETU=m +CONFIG_MFD_PCF50633=m +CONFIG_PCF50633_ADC=m +CONFIG_PCF50633_GPIO=m +CONFIG_UCB1400_CORE=m +CONFIG_MFD_RT5033=m +CONFIG_MFD_SI476X_CORE=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SKY81452=m +# CONFIG_ABX500_CORE is not set +CONFIG_MFD_TI_AM335X_TSCADC=m +CONFIG_MFD_LP3943=m +CONFIG_MFD_TI_LMU=m +CONFIG_TPS6105X=m +CONFIG_TPS65010=m +CONFIG_TPS6507X=m +CONFIG_MFD_TPS65086=m +CONFIG_MFD_TI_LP873X=m +CONFIG_MFD_WL1273_CORE=m +CONFIG_MFD_LM3533=m +CONFIG_MFD_ARIZONA_I2C=m +CONFIG_MFD_WM8994=m +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m +CONFIG_REGULATOR_USERSPACE_CONSUMER=m +CONFIG_REGULATOR_88PM800=m +CONFIG_REGULATOR_ACT8865=m +CONFIG_REGULATOR_AD5398=m +CONFIG_REGULATOR_ANATOP=m +CONFIG_REGULATOR_ARIZONA_LDO1=m +CONFIG_REGULATOR_ARIZONA_MICSUPP=m +CONFIG_REGULATOR_AXP20X=m +CONFIG_REGULATOR_BCM590XX=m +CONFIG_REGULATOR_BD9571MWV=m +CONFIG_REGULATOR_DA9062=m +CONFIG_REGULATOR_DA9210=m +CONFIG_REGULATOR_DA9211=m +CONFIG_REGULATOR_FAN53555=m +CONFIG_REGULATOR_GPIO=m +CONFIG_REGULATOR_ISL9305=m +CONFIG_REGULATOR_ISL6271A=m +CONFIG_REGULATOR_LM363X=m +CONFIG_REGULATOR_LP3971=m +CONFIG_REGULATOR_LP3972=m +CONFIG_REGULATOR_LP872X=m +CONFIG_REGULATOR_LP8755=m +CONFIG_REGULATOR_LTC3589=m +CONFIG_REGULATOR_LTC3676=m +CONFIG_REGULATOR_MAX1586=m +CONFIG_REGULATOR_MAX8649=m +CONFIG_REGULATOR_MAX8660=m +CONFIG_REGULATOR_MAX8907=m +CONFIG_REGULATOR_MAX8952=m +CONFIG_REGULATOR_MC13783=m +CONFIG_REGULATOR_MC13892=m +CONFIG_REGULATOR_MT6311=m +CONFIG_REGULATOR_MT6323=m +CONFIG_REGULATOR_MT6397=m +CONFIG_REGULATOR_PCF50633=m +CONFIG_REGULATOR_PFUZE100=m +CONFIG_REGULATOR_PV88060=m +CONFIG_REGULATOR_PV88080=m +CONFIG_REGULATOR_PV88090=m +CONFIG_REGULATOR_PWM=m +CONFIG_REGULATOR_QCOM_SPMI=m +CONFIG_REGULATOR_RT5033=m +CONFIG_REGULATOR_SKY81452=m +CONFIG_REGULATOR_TPS51632=m +CONFIG_REGULATOR_TPS62360=m +CONFIG_REGULATOR_TPS65023=m +CONFIG_REGULATOR_TPS6507X=m +CONFIG_REGULATOR_TPS65086=m +CONFIG_REGULATOR_TPS65132=m +CONFIG_REGULATOR_WM8994=m +CONFIG_RC_CORE=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_V4L2_FLASH_LED_CLASS=m +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_VIDEO_TVAUDIO=m +CONFIG_VIDEO_TDA7432=m +CONFIG_VIDEO_TDA9840=m +CONFIG_VIDEO_TEA6415C=m +CONFIG_VIDEO_TEA6420=m +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_UDA1342=m +CONFIG_VIDEO_WM8775=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_SONY_BTF_MPX=m +CONFIG_VIDEO_SAA6588=m +CONFIG_VIDEO_ADV7604=m +CONFIG_VIDEO_ADV7842=m +CONFIG_VIDEO_BT819=m +CONFIG_VIDEO_BT856=m +CONFIG_VIDEO_BT866=m +CONFIG_VIDEO_KS0127=m +CONFIG_VIDEO_SAA7110=m +CONFIG_VIDEO_SAA711X=m +CONFIG_VIDEO_TVP5150=m +CONFIG_VIDEO_TW2804=m +CONFIG_VIDEO_TW9903=m +CONFIG_VIDEO_TW9906=m +CONFIG_VIDEO_VPX3220=m +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +CONFIG_VIDEO_SAA7127=m +CONFIG_VIDEO_SAA7185=m +CONFIG_VIDEO_ADV7170=m +CONFIG_VIDEO_ADV7175=m +CONFIG_VIDEO_ADV7511=m +CONFIG_VIDEO_OV2640=m +CONFIG_VIDEO_OV7640=m +CONFIG_VIDEO_OV7670=m +CONFIG_VIDEO_MT9M111=m +CONFIG_VIDEO_MT9V011=m +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +CONFIG_VIDEO_SAA6752HS=m +CONFIG_VIDEO_M52790=m +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_AGP_SIS=m +CONFIG_DRM=m +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +CONFIG_DRM_RADEON=m +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_I915=m +CONFIG_DRM_VGEM=m +CONFIG_DRM_VMWGFX=m +CONFIG_DRM_GMA500=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m +CONFIG_DRM_ANALOGIX_ANX78XX=m +CONFIG_DRM_VBOXVIDEO=m +CONFIG_DRM_I810=m +CONFIG_FB=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_CIRRUS=m +CONFIG_FB_PM2=m +CONFIG_FB_CYBER2000=m +CONFIG_FB_ARC=m +CONFIG_FB_VGA16=m +CONFIG_FB_UVESA=m +CONFIG_FB_EFI=y +CONFIG_FB_N411=m +CONFIG_FB_HGA=m +CONFIG_FB_OPENCORES=m +CONFIG_FB_S1D13XXX=m +CONFIG_FB_NVIDIA=m +CONFIG_FB_RIVA=m +CONFIG_FB_I740=m +CONFIG_FB_LE80578=m +CONFIG_FB_CARILLO_RANCH=m +CONFIG_FB_INTEL=m +CONFIG_FB_MATROX=m +CONFIG_FB_MATROX_I2C=m +CONFIG_FB_RADEON=m +CONFIG_FB_ATY128=m +CONFIG_FB_ATY=m +CONFIG_FB_S3=m +CONFIG_FB_SAVAGE=m +CONFIG_FB_SIS=m +CONFIG_FB_VIA=m +CONFIG_FB_NEOMAGIC=m +CONFIG_FB_KYRO=m +CONFIG_FB_3DFX=m +CONFIG_FB_VOODOO1=m +CONFIG_FB_VT8623=m +CONFIG_FB_TRIDENT=m +CONFIG_FB_ARK=m +CONFIG_FB_PM3=m +CONFIG_FB_CARMINE=m +CONFIG_FB_SM501=m +CONFIG_FB_SMSCUFX=m +CONFIG_FB_UDL=m +CONFIG_FB_METRONOME=m +CONFIG_FB_MB862XX=m +CONFIG_FB_BROADSHEET=m +CONFIG_FB_HYPERV=m +CONFIG_FB_SM712=m +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=m +CONFIG_BACKLIGHT_LM3533=m +CONFIG_BACKLIGHT_CARILLO_RANCH=m +CONFIG_BACKLIGHT_APPLE=m +CONFIG_BACKLIGHT_PM8941_WLED=m +CONFIG_BACKLIGHT_SAHARA=m +CONFIG_BACKLIGHT_ADP8860=m +CONFIG_BACKLIGHT_ADP8870=m +CONFIG_BACKLIGHT_PCF50633=m +CONFIG_BACKLIGHT_LM3630A=m +CONFIG_BACKLIGHT_LM3639=m +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_SKY81452=m +CONFIG_BACKLIGHT_GPIO=m +CONFIG_BACKLIGHT_LV5207LP=m +CONFIG_BACKLIGHT_BD6107=m +CONFIG_BACKLIGHT_ARCXCNN=m +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_SOUND=m +CONFIG_SND=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_HRTIMER=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_PCSP=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MTS64=m +CONFIG_SND_SERIAL_U16550=m +CONFIG_SND_PORTMAN2X4=m +CONFIG_SND_AD1889=m +CONFIG_SND_ALS300=m +CONFIG_SND_ALS4000=m +CONFIG_SND_ALI5451=m +CONFIG_SND_ASIHPI=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_AW2=m +CONFIG_SND_AZT3328=m +CONFIG_SND_BT87X=m +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS4281=m +CONFIG_SND_CS46XX=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ES1968=m +CONFIG_SND_FM801=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MIXART=m +CONFIG_SND_NM256=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RIPTIDE=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_SONICVIBES=m +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_YMFPCI=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_USX2Y=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_US122L=m +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_VXPOCKET=m +CONFIG_SND_PDAUDIOCF=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_AMD_ACP=m +CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m +CONFIG_SND_ATMEL_SOC=m +CONFIG_SND_DESIGNWARE_I2S=m +CONFIG_SND_SOC_FSL_ASRC=m +CONFIG_SND_SOC_FSL_SAI=m +CONFIG_SND_SOC_FSL_SSI=m +CONFIG_SND_SOC_FSL_SPDIF=m +CONFIG_SND_SOC_FSL_ESAI=m +CONFIG_SND_SOC_IMX_AUDMUX=m +CONFIG_SND_I2S_HI6210_I2S=m +CONFIG_SND_SOC_INTEL_HASWELL=m +CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_INTEL_HASWELL_MACH=m +CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m +CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m +CONFIG_SND_SOC_XTFPGA_I2S=m +CONFIG_ZX_TDM=m +CONFIG_SND_SOC_TLV320AIC26=m +CONFIG_SND_ATMEL_SOC=m +CONFIG_SND_SIMPLE_CARD=m +CONFIG_HDMI_LPE_AUDIO=m +CONFIG_HID=m +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_ACCUTOUCH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CP2112=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_EMS_FF=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +# CONFIG_HID_ITE is not set +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LOGITECH=y +CONFIG_LOGITECH_FF=y +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=m +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_RETRODE=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_UDRAW_PS3=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_USB_HID=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_KBD=m +CONFIG_USB_MOUSE=m +CONFIG_I2C_HID=m +CONFIG_INTEL_ISH_HID=m +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_C67X00_HCD=m +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OXU210HP_HCD=m +CONFIG_USB_ISP116X_HCD=m +CONFIG_USB_ISP1362_HCD=m +CONFIG_USB_FOTG210_HCD=m +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_U132_HCD=m +CONFIG_USB_SL811_HCD=m +CONFIG_USB_SL811_CS=m +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_HCD_BCMA=m +CONFIG_USB_HCD_SSB=m +CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USBIP_CORE=m +CONFIG_USBIP_VHCI_HCD=m +CONFIG_USBIP_HOST=m +CONFIG_USBIP_VUDC=m +CONFIG_USB_MUSB_HDRC=m +CONFIG_USB_DWC3=m +CONFIG_USB_CHIPIDEA=m +CONFIG_USB_ISP1760=m +CONFIG_USB_USS720=m +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_TEST=m +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_YUREX=m +CONFIG_USB_HUB_USB251XB=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_HSIC_USB4604=m +CONFIG_USB_LINK_LAYER_TEST=m +CONFIG_USB_CHAOSKEY=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +CONFIG_NOP_USB_XCEIV=m +CONFIG_USB_GPIO_VBUS=m +CONFIG_TAHVO_USB=m +CONFIG_USB_ISP1301=m +CONFIG_USB_GADGET=m +CONFIG_USB_FOTG210_UDC=m +CONFIG_USB_GR_UDC=m +CONFIG_USB_R8A66597=m +CONFIG_USB_PXA27X=m +CONFIG_USB_MV_UDC=m +CONFIG_USB_MV_U3D=m +CONFIG_USB_BDC_UDC=m +CONFIG_USB_AMD5536UDC=m +CONFIG_USB_NET2272=m +CONFIG_USB_NET2280=m +CONFIG_USB_GOKU=m +CONFIG_USB_EG20T=m +CONFIG_USB_ZERO=m +CONFIG_USB_AUDIO=m +CONFIG_USB_ETH=m +CONFIG_USB_G_NCM=m +CONFIG_USB_GADGETFS=m +CONFIG_USB_FUNCTIONFS=m +CONFIG_USB_MASS_STORAGE=m +CONFIG_USB_GADGET_TARGET=m +CONFIG_USB_G_SERIAL=m +CONFIG_USB_MIDI_GADGET=m +CONFIG_USB_G_PRINTER=m +CONFIG_USB_CDC_COMPOSITE=m +CONFIG_USB_G_NOKIA=m +CONFIG_USB_G_ACM_MS=m +CONFIG_USB_G_HID=m +CONFIG_USB_G_DBGP=m +CONFIG_USB_G_WEBCAM=m +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_MMC=y +CONFIG_MMC_BLOCK=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_LEDS_CLASS=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_WBSD=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SDRICOH_CS=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_USDHI6ROL0=m +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MS_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_LEDS_CLASS_FLASH=m +CONFIG_LEDS_APU=m +CONFIG_LEDS_AS3645A=m +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LM3533=m +CONFIG_LEDS_LM3642=m +CONFIG_LEDS_MT6323=m +CONFIG_LEDS_PCA9532=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_LP3952=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +CONFIG_LEDS_LP8501=m +CONFIG_LEDS_LP8860=m +CONFIG_LEDS_CLEVO_MAIL=m +CONFIG_LEDS_PCA955X=m +CONFIG_LEDS_PCA963X=m +CONFIG_LEDS_PWM=m +CONFIG_LEDS_REGULATOR=m +CONFIG_LEDS_BD2802=m +CONFIG_LEDS_INTEL_SS4200=m +CONFIG_LEDS_LT3593=m +CONFIG_LEDS_MC13783=m +CONFIG_LEDS_TCA6507=m +CONFIG_LEDS_TLC591XX=m +CONFIG_LEDS_LM355x=m +CONFIG_LEDS_MENF21BMC=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +CONFIG_LEDS_USER=m +CONFIG_LEDS_NIC78BX=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_ACTIVITY=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_QIB=m +CONFIG_INFINIBAND_CXGB3=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_OCRDMA=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_HFI1=m +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC=y +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_MAX8907=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +CONFIG_RTC_DRV_PCF85363=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_S35390A=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_S5M=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_DA9063=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T86=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m +CONFIG_RTC_DRV_PCF50633=m +CONFIG_RTC_DRV_FTRTC010=m +CONFIG_RTC_DRV_MC13XXX=m +CONFIG_RTC_DRV_MT6397=m +CONFIG_RTC_DRV_HID_SENSOR_TIME=m +CONFIG_DMADEVICES=y +CONFIG_ALTERA_MSGDMA=m +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IOATDMA=m +CONFIG_INTEL_MIC_X100_DMA=m +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +CONFIG_HD44780=m +CONFIG_KS0108=m +CONFIG_CFAG12864B=m +CONFIG_IMG_ASCII_LCD=m +CONFIG_PANEL=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PRUSS=m +CONFIG_UIO_MF624=m +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VIRTIO_INPUT=m +CONFIG_HYPERV=m +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +CONFIG_GREYBUS=m +CONFIG_GREYBUS_ES2=m +CONFIG_ACER_WMI=m +CONFIG_ACERHDF=m +CONFIG_ALIENWARE_WMI=m +CONFIG_ASUS_LAPTOP=m +CONFIG_DCDBAS=m +CONFIG_DELL_SMBIOS=m +CONFIG_DELL_LAPTOP=m +CONFIG_DELL_WMI=m +CONFIG_DELL_WMI_AIO=m +CONFIG_DELL_WMI_LED=m +CONFIG_DELL_SMO8800=m +CONFIG_DELL_RBTN=m +CONFIG_DELL_RBU=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +CONFIG_AMILO_RFKILL=m +CONFIG_HP_ACCEL=m +CONFIG_HP_WIRELESS=m +CONFIG_HP_WMI=m +CONFIG_MSI_LAPTOP=m +CONFIG_PANASONIC_LAPTOP=m +CONFIG_COMPAL_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_IDEAPAD_LAPTOP=m +CONFIG_THINKPAD_ACPI=m +CONFIG_SENSORS_HDAPS=m +CONFIG_INTEL_MENLOW=m +CONFIG_EEEPC_LAPTOP=m +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +CONFIG_EEEPC_WMI=m +CONFIG_ASUS_WIRELESS=m +CONFIG_INTEL_WMI_THUNDERBOLT=m +CONFIG_MSI_WMI=m +CONFIG_PEAQ_WMI=m +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_ACPI_TOSHIBA=m +CONFIG_TOSHIBA_BT_RFKILL=m +CONFIG_TOSHIBA_HAPS=m +CONFIG_ACPI_CMPC=m +CONFIG_INTEL_INT0002_VGPIO=m +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +CONFIG_INTEL_IPS=m +CONFIG_IBM_RTL=m +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_INTEL_OAKTRAIL=m +CONFIG_SAMSUNG_Q10=m +CONFIG_APPLE_GMUX=m +CONFIG_INTEL_RST=m +CONFIG_INTEL_SMARTCONNECT=m +CONFIG_INTEL_PMC_IPC=m +CONFIG_INTEL_BXTWC_PMIC_TMU=m +CONFIG_SURFACE_PRO3_BUTTON=m +CONFIG_SURFACE_3_BUTTON=m +CONFIG_INTEL_PUNIT_IPC=m +CONFIG_INTEL_TELEMETRY=m +CONFIG_MLX_PLATFORM=m +CONFIG_MFD_CROS_EC=m +CONFIG_CHROMEOS_LAPTOP=m +CONFIG_CHROMEOS_PSTORE=m +CONFIG_CROS_EC_LPC=m +CONFIG_CROS_KBD_LED_BACKLIGHT=m +CONFIG_COMMON_CLK_SI5351=m +CONFIG_COMMON_CLK_CDCE706=m +CONFIG_COMMON_CLK_CS2000_CP=m +CONFIG_COMMON_CLK_S2MPS11=m +CONFIG_COMMON_CLK_PWM=m +CONFIG_ALTERA_MBOX=m +CONFIG_AMD_IOMMU=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_DEFAULT_ON=y +CONFIG_RPMSG_CHAR=m +CONFIG_RPMSG_QCOM_GLINK_RPM=m +CONFIG_RPMSG_VIRTIO=m +CONFIG_EXTCON_ADC_JACK=m +CONFIG_EXTCON_ARIZONA=m +CONFIG_EXTCON_AXP288=m +CONFIG_EXTCON_GPIO=m +CONFIG_EXTCON_INTEL_INT3496=m +CONFIG_EXTCON_MAX3355=m +CONFIG_EXTCON_RT8973A=m +CONFIG_EXTCON_SM5502=m +CONFIG_EXTCON_USB_GPIO=m +CONFIG_EXTCON_USBC_CROS_EC=m +CONFIG_IIO=m +CONFIG_IIO_SW_DEVICE=m +CONFIG_IIO_SW_TRIGGER=m +CONFIG_BMA180=m +CONFIG_BMC150_ACCEL=m +CONFIG_DA280=m +CONFIG_DA311=m +CONFIG_DMARD09=m +CONFIG_DMARD10=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m +CONFIG_IIO_ST_ACCEL_3AXIS=m +CONFIG_KXSD9=m +CONFIG_KXCJK1013=m +CONFIG_MC3230=m +CONFIG_MMA7455_I2C=m +CONFIG_MMA7660=m +CONFIG_MMA8452=m +CONFIG_MMA9551=m +CONFIG_MMA9553=m +CONFIG_MXC4005=m +CONFIG_MXC6255=m +CONFIG_STK8312=m +CONFIG_STK8BA50=m +CONFIG_AD7291=m +CONFIG_AD7606_IFACE_PARALLEL=m +CONFIG_AD799X=m +CONFIG_AXP20X_ADC=m +CONFIG_AXP288_ADC=m +CONFIG_CC10001_ADC=m +CONFIG_DA9150_GPADC=m +CONFIG_DLN2_ADC=m +CONFIG_HX711=m +CONFIG_INA2XX_ADC=m +CONFIG_LTC2471=m +CONFIG_LTC2485=m +CONFIG_LTC2497=m +CONFIG_MAX1363=m +CONFIG_MAX9611=m +CONFIG_MCP3422=m +CONFIG_MEN_Z188_ADC=m +CONFIG_NAU7802=m +CONFIG_QCOM_SPMI_IADC=m +CONFIG_QCOM_SPMI_VADC=m +CONFIG_TI_ADC081C=m +CONFIG_TI_ADS1015=m +CONFIG_TI_AM335X_ADC=m +CONFIG_VIPERBOARD_ADC=m +CONFIG_ATLAS_PH_SENSOR=m +CONFIG_CCS811=m +CONFIG_IAQCORE=m +CONFIG_VZ89X=m +CONFIG_IIO_CROS_EC_SENSORS_CORE=m +CONFIG_IIO_CROS_EC_SENSORS=m +CONFIG_AD5064=m +CONFIG_AD5380=m +CONFIG_AD5446=m +CONFIG_AD5593R=m +CONFIG_DS4424=m +CONFIG_M62332=m +CONFIG_MAX517=m +CONFIG_MCP4725=m +CONFIG_IIO_SIMPLE_DUMMY=m +CONFIG_BMG160=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_MPU3050_I2C=m +CONFIG_IIO_ST_GYRO_3AXIS=m +CONFIG_ITG3200=m +CONFIG_AFE4404=m +CONFIG_MAX30100=m +CONFIG_MAX30102=m +CONFIG_AM2315=m +CONFIG_DHT11=m +CONFIG_HDC100X=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HTS221=m +CONFIG_HTU21=m +CONFIG_SI7005=m +CONFIG_SI7020=m +CONFIG_BMI160_I2C=m +CONFIG_KMX61=m +CONFIG_INV_MPU6050_I2C=m +CONFIG_IIO_ST_LSM6DSX=m +CONFIG_ACPI_ALS=m +CONFIG_ADJD_S311=m +CONFIG_AL3320A=m +CONFIG_APDS9300=m +CONFIG_APDS9960=m +CONFIG_BH1750=m +CONFIG_BH1780=m +CONFIG_CM32181=m +CONFIG_CM3232=m +CONFIG_CM3323=m +CONFIG_CM36651=m +CONFIG_IIO_CROS_EC_LIGHT_PROX=m +CONFIG_GP2AP020A00F=m +CONFIG_SENSORS_ISL29018=m +CONFIG_SENSORS_ISL29028=m +CONFIG_ISL29125=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_JSA1212=m +CONFIG_RPR0521=m +CONFIG_SENSORS_LM3533=m +CONFIG_LTR501=m +CONFIG_MAX44000=m +CONFIG_PA12203001=m +CONFIG_SI1145=m +CONFIG_STK3310=m +CONFIG_TCS3414=m +CONFIG_TCS3472=m +CONFIG_SENSORS_TSL2563=m +CONFIG_TSL2583=m +CONFIG_TSL4531=m +CONFIG_US5182D=m +CONFIG_VCNL4000=m +CONFIG_VEML6070=m +CONFIG_VL6180=m +CONFIG_AK09911=m +CONFIG_BMC150_MAGN_I2C=m +CONFIG_MAG3110=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_MMC35240=m +CONFIG_IIO_ST_MAGN_3AXIS=m +CONFIG_SENSORS_HMC5843_I2C=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_IIO_HRTIMER_TRIGGER=m +CONFIG_IIO_INTERRUPT_TRIGGER=m +CONFIG_IIO_TIGHTLOOP_TRIGGER=m +CONFIG_IIO_SYSFS_TRIGGER=m +CONFIG_DS1803=m +CONFIG_MCP4531=m +CONFIG_TPL0102=m +CONFIG_LMP91000=m +CONFIG_ABP060MG=m +CONFIG_IIO_CROS_EC_BARO=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HP03=m +CONFIG_MPL115_I2C=m +CONFIG_MPL3115=m +CONFIG_MS5611=m +CONFIG_MS5611_I2C=m +CONFIG_MS5637=m +CONFIG_IIO_ST_PRESS=m +CONFIG_T5403=m +CONFIG_HP206C=m +CONFIG_ZPA2326=m +CONFIG_LIDAR_LITE_V2=m +CONFIG_RFD77402=m +CONFIG_SRF04=m +CONFIG_SX9500=m +CONFIG_SRF08=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_MLX90614=m +CONFIG_TMP006=m +CONFIG_TMP007=m +CONFIG_TSYS01=m +CONFIG_TSYS02D=m +CONFIG_NTB=m +CONFIG_NTB_IDT=m +CONFIG_NTB_INTEL=m +CONFIG_NTB_SWITCHTEC=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_PWM_CROS_EC=m +CONFIG_PWM_LP3943=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +CONFIG_PWM_PCA9685=m +CONFIG_IPACK_BUS=m +CONFIG_BOARD_TPCI200=m +CONFIG_SERIAL_IPOCTAL=m +CONFIG_RESET_TI_SYSCON=m +CONFIG_BCM_KONA_USB2_PHY=m +CONFIG_PHY_PXA_28NM_HSIC=m +CONFIG_PHY_PXA_28NM_USB2=m +CONFIG_PHY_CPCAP_USB=m +# CONFIG_PHY_ATH79_USB is not set +CONFIG_PHY_QCOM_USB_HS=m +CONFIG_PHY_QCOM_USB_HSIC=m +CONFIG_PHY_TUSB1210=m +CONFIG_MCB=m +CONFIG_MCB_PCI=m +CONFIG_MCB_LPC=m +CONFIG_THUNDERBOLT=m +CONFIG_DEV_DAX=m +CONFIG_STM=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +CONFIG_FPGA=m +CONFIG_ALTERA_PR_IP_CORE=m +CONFIG_FPGA_MGR_ALTERA_CVP=m +CONFIG_FSI=m +CONFIG_FSI_MASTER_GPIO=m +CONFIG_FSI_MASTER_HUB=m +CONFIG_FSI_SCOM=m +CONFIG_UNISYS_VISORBUS=m +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_REISERFS_CHECK=y +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_XFS_FS=m +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=m +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=m +CONFIG_NILFS2_FS=m +CONFIG_F2FS_FS=m +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +CONFIG_QFMT_V2=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_FSCACHE=m +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_NTFS_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ADFS_FS=m +CONFIG_AFFS_FS=m +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_UBIFS_FS=m +CONFIG_CRAMFS=m +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +CONFIG_HPFS_FS=m +CONFIG_QNX4FS_FS=m +CONFIG_QNX6FS_FS=m +CONFIG_ROMFS_FS=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_NFS_FS=y +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_CEPH_FS=m +CONFIG_CIFS=m +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +CONFIG_9P_FS=m +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_DLM=m +CONFIG_SECURITY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_APPARMOR=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=n +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_TRUSTED_KEYRING=n +CONFIG_EVM=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_JITTERENTROPY=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRC_CCITT=m +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_RCU_PERF_TEST=m +CONFIG_NOTIFIER_ERROR_INJECTION=m +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_UNWINDER_FRAME_POINTER=y diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 15908eb9b17e..a26c77aa9a97 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -440,3 +440,5 @@ 433 i386 fspick sys_fspick __ia32_sys_fspick 434 i386 pidfd_open sys_pidfd_open __ia32_sys_pidfd_open 435 i386 clone3 sys_clone3 __ia32_sys_clone3 +# MCST extentions +400 i386 el_posix sys_el_posix compat_sys_el_posix diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index c29976eca4a8..cf038a92040b 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -358,6 +358,9 @@ 434 common pidfd_open __x64_sys_pidfd_open 435 common clone3 __x64_sys_clone3/ptregs +# MCST extentions +400 common el_posix __x64_sys_el_posix + # # x32-specific system call numbers start at 512 to avoid cache impact # for native 64-bit operation. The __x32_compat_sys stubs are created diff --git a/arch/x86/include/asm-l b/arch/x86/include/asm-l new file mode 120000 index 000000000000..55881a560a9f --- /dev/null +++ b/arch/x86/include/asm-l @@ -0,0 +1 @@ +../../l/include/asm \ No newline at end of file diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index adc6cc86b062..cb9238b7e968 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -10,6 +10,7 @@ #ifdef CONFIG_SMP +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) extern void prefill_possible_map(void); #else /* CONFIG_SMP */ diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h new file mode 100644 index 000000000000..9414a7c50a3d --- /dev/null +++ b/arch/x86/include/asm/gpio.h @@ -0,0 +1,21 @@ +/* + * Generic GPIO API implementation for e2k. + * + * Derived from the generic GPIO API for x86: + * + * Copyright (c) 2012 MCST. + * + * Author: Evgeny Kravtsunov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ASM_X86_GPIO_H +#define _ASM_X86_GPIO_H + +#include + +#endif /* _ASM_X86_GPIO_H */ diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 172f9749dbb2..b2b0af1bbb8b 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -60,8 +60,8 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); -bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode); +bool kvm_page_track_is_active(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode); void kvm_page_track_register_notifier(struct kvm *kvm, diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index e662f987dfa2..63e51b52fb2c 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -14,6 +14,13 @@ #ifdef __KERNEL__ + +#ifdef CONFIG_MCST +#define iohub_revision(pdev) 255 +#define iohub_generation(pdev) 255 +#define is_prototype() 0 +#endif + struct pci_sysdata { int domain; /* PCI domain */ int node; /* NUMA node */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ed8ec011a9fd..bcaff8583567 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -9,6 +9,7 @@ #include #include + #ifdef __i386__ #include @@ -29,6 +30,9 @@ #ifndef __ASSEMBLY__ #include #include +#ifdef CONFIG_MCST +extern int l_set_ethernet_macaddr(struct pci_dev *pdev, char *macaddr); +#endif extern u64 relocated_ramdisk; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 911d261a450d..0c38cafb9422 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -58,6 +58,9 @@ struct thread_info { u32 status; /* thread synchronous flags */ int preempt_lazy_count; /* 0 => lazy preemptable <0 => BUG */ +#ifdef CONFIG_MCST + long long irq_enter_clk; /* CPU clock when irq enter was */ +#endif }; #define INIT_THREAD_INFO(tsk) \ @@ -111,6 +114,9 @@ struct thread_info { #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ #define TIF_X32 30 /* 32-bit native x86-64 binary */ #define TIF_FSCHECK 31 /* Check FS is USER_DS on return */ +#ifdef CONFIG_MCST +#define TIF_NAPI_WORK 32 /* napi_wq_worker() is running MCST addition */ +#endif #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -141,6 +147,9 @@ struct thread_info { #define _TIF_ADDR32 (1 << TIF_ADDR32) #define _TIF_X32 (1 << TIF_X32) #define _TIF_FSCHECK (1 << TIF_FSCHECK) +#ifdef CONFIG_MCST +#define _TIF_NAPI_WORK (1 << TIF_NAPI_WORK) +#endif /* * work to do in syscall_trace_enter(). Also includes TIF_NOHZ for diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 8a0c25c6bf09..dcead43eec7a 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -29,6 +29,45 @@ static inline cycles_t get_cycles(void) return rdtsc(); } +#ifdef CONFIG_MCST +#define UNSET_CPU_FREQ ((u32)(-1)) +extern u32 cpu_freq_hz; + +static inline long long cycles_2nsec(cycles_t cycles) +{ +#if BITS_PER_LONG == 64 + return cycles * 1000 / (cpu_freq_hz / 1000000); +#else + long long long_res = cycles * 1000; + do_div(long_res, (cpu_freq_hz / 1000000)); + return long_res; +#endif /* BITS_PER_LONG */ +} + +static inline long long cycles_2usec(cycles_t cycles) +{ +#if BITS_PER_LONG == 64 + return cycles / (cpu_freq_hz / 1000000); +#else + long long long_res = cycles; + do_div(long_res, (cpu_freq_hz / 1000000)); + return long_res; +#endif /* BITS_PER_LONG */ +} + +static inline cycles_t usecs_2cycles(long long usecs) +{ +#if BITS_PER_LONG == 64 + return usecs * cpu_freq_hz / 1000000; +#else + long long long_res = usecs * cpu_freq_hz; + do_div(long_res, 1000000); + return long_res; +#endif /* BITS_PER_LONG */ +} + +#endif /* CONFIG_MCST */ + extern struct system_counterval_t convert_art_to_tsc(u64 art); extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3578ad248bc9..7d6e8b41ad46 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -140,6 +140,9 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o +# MCST extension +obj-$(CONFIG_MCST) += mcst_eth_mac.o + ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4e4476b832be..10124dba97c0 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1123,6 +1123,50 @@ static void local_apic_timer_interrupt(void) evt->event_handler(evt); } +#ifdef CONFIG_MCST +DEFINE_PER_CPU(long long, next_rt_intr) = 0; +EXPORT_SYMBOL(next_rt_intr); + +#define DELTA_NS (NSEC_PER_SEC / HZ / 2) + +void do_postpone_tick(int to_next_rt_ns) +{ + int cpu; + long long cur_time = ktime_to_ns(ktime_get()); + long long next_tm; + unsigned long flags; + struct pt_regs regs_new; + struct pt_regs *old_regs; + + local_irq_save(flags); + cpu = smp_processor_id(); + next_tm = per_cpu(next_rt_intr, cpu); + if (to_next_rt_ns) { + per_cpu(next_rt_intr, cpu) = cur_time + to_next_rt_ns; + } else{ + per_cpu(next_rt_intr, cpu) = 0; + } +#if 0 + trace_printk("DOPOSTP old_nx-cur=%lld cur=%lld nx=%lld\n", + next_tm - cur_time, cur_time, cur_time + to_next_rt_ns); +#endif + if (next_tm == 1) { + /* FIXME next line has long run time and may be deleted */ + memset(®s_new, 0, sizeof(struct pt_regs)); + /* need to get answer to user_mod() only */ + regs_new.cs = 3; + old_regs = set_irq_regs(®s_new); + irq_enter(); + local_apic_timer_interrupt(); + irq_exit(); + set_irq_regs(old_regs); + } + local_irq_restore(flags); +} +EXPORT_SYMBOL(do_postpone_tick); + +#endif + /* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by @@ -1135,6 +1179,32 @@ __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); +#ifdef CONFIG_MCST + int cpu; + long long cur_time; + long long next_time; + + cpu = smp_processor_id(); + next_time = per_cpu(next_rt_intr, cpu); + + if (next_time) { + cur_time = ktime_to_ns(ktime_get()); + if (cur_time > next_time + DELTA_NS) { + per_cpu(next_rt_intr, cpu) = 0; + } else if (cur_time > next_time - DELTA_NS && + cur_time < next_time + DELTA_NS) { + /* set 1 -- must do timer later + * in do_postpone_tick() */ + per_cpu(next_rt_intr, cpu) = 1; + /* if do_postpone_tick() will not called: */ + ack_APIC_irq(); + apic_write(APIC_TMICT, + usecs_2cycles(USEC_PER_SEC / HZ)); + return; + } + } +#endif + /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 7dfd0185767c..f60e46af9248 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -237,6 +237,10 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; +#ifdef CONFIG_MCST + current_thread_info()->irq_enter_clk = get_cycles(); +#endif + entering_irq(); /* entering_irq() tells RCU that we're not quiescent. Check it. */ diff --git a/arch/x86/kernel/mcst_eth_mac.c b/arch/x86/kernel/mcst_eth_mac.c new file mode 100644 index 000000000000..400ae930e6ce --- /dev/null +++ b/arch/x86/kernel/mcst_eth_mac.c @@ -0,0 +1,50 @@ +#include +#include +#include +#include +#include + +static unsigned char l_base_mac_addr[6] = {0}; + + +static int __init machine_mac_addr_setup(char *str) +{ + char *cur = str; + int i; + for (i = 0; i < 6; i++) { + l_base_mac_addr[i] = + (unsigned char)simple_strtoull(cur, &cur, 16); + if (*cur != ':') { + break; + } + cur++; + } + pr_info("MCST_base_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", + l_base_mac_addr[0], + l_base_mac_addr[1], + l_base_mac_addr[2], + l_base_mac_addr[3], + l_base_mac_addr[4], + l_base_mac_addr[5]); + return 0; +} +__setup("mcst_mac=", machine_mac_addr_setup); + +int l_set_ethernet_macaddr(struct pci_dev *pdev, char *macaddr) +{ + static raw_spinlock_t my_spinlock = + __RAW_SPIN_LOCK_UNLOCKED(my_spinlock); + static int l_cards_without_mac = 1; + int i; + for (i = 0; i < 6; i++) { + macaddr[i] = l_base_mac_addr[i]; + } + raw_spin_lock_irq(&my_spinlock); + macaddr[5] += l_cards_without_mac & 0xff; + l_cards_without_mac ++; + raw_spin_unlock_irq(&my_spinlock); + return 0; +} +EXPORT_SYMBOL(l_set_ethernet_macaddr); + + diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7e322e2daaf5..daa3202a49d1 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1411,6 +1411,10 @@ static bool __init determine_cpu_tsc_frequencies(bool early) cpu_khz = pit_hpet_ptimer_calibrate_cpu(); } +#ifdef CONFIG_MCST + cpu_freq_hz = cpu_khz * 1000; +#endif + /* * Trust non-zero tsc_khz as authoritative, * and use it to sanity check cpu_khz, diff --git a/config-elbrus-def b/config-elbrus-def new file mode 100644 index 000000000000..c2a32b3cc0ce --- /dev/null +++ b/config-elbrus-def @@ -0,0 +1,7869 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/e2k 5.4.40-elbrus-def-alt1.3.0 Kernel Configuration +# + +# +# Compiler: lcc:1.25.07:Aug--1-2020:e2k-v4-linux +# +CONFIG_GCC_VERSION=0 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_HAVE_PREEMPT_LAZY=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_RT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=m +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 + +# +# Scheduler features +# +CONFIG_UCLAMP_TASK=y +CONFIG_UCLAMP_BUCKETS_COUNT=5 +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_UCLAMP_TASK_GROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_HAVE_FUTEX_CMPXCHG=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_USERFAULTFD=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# CONFIG_WATCH_PREEMPT is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_LTTNG=m +# end of General setup + +CONFIG_E2K=y +CONFIG_SWIOTLB=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ZONE_DMA=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_EARLY_PRINTK=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GREGS_CONTEXT=y +CONFIG_GENERIC_IOMAP=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_BOOT_TRACE_POSSIBLE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_GENERIC_GPIO=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_HOLES_IN_ZONE=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 + +# +# Processor type and features +# +CONFIG_E2K_MACHINE_SIC=y +CONFIG_E2K_MACHINE_IOHUB=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_E2K_MACHINE is not set +CONFIG_CPU_E2S=y +CONFIG_CPU_E8C=y +CONFIG_CPU_E1CP=y +CONFIG_CPU_E8C2=y +CONFIG_CPU_E12C=y +CONFIG_CPU_E16C=y +CONFIG_CPU_E2C3=y +CONFIG_CPU_ISET=0 +CONFIG_CPU_HWBUG_IBRANCH=y +# CONFIG_E2K_MINVER_V2 is not set +CONFIG_E2K_MINVER_V3=y +# CONFIG_E2K_MINVER_V4 is not set +# CONFIG_E2K_MINVER_V5 is not set +# CONFIG_E2K_MINVER_V6 is not set +CONFIG_E2K_MINVER=3 + +# +# MMU Page Tables features +# +CONFIG_MMU_PT_V6=y +# CONFIG_MMU_SEP_VIRT_SPACE is not set +CONFIG_SECCOMP=y +# end of MMU Page Tables features + +CONFIG_ENABLE_EXTMEM=y +CONFIG_E16_CORE_SUPPORT=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +# CONFIG_HZ_10 is not set +# CONFIG_HZ_20 is not set +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_GLOBAL_CONTEXT=y +CONFIG_SECONDARY_SPACE_SUPPORT=y +CONFIG_MLT_STORAGE=y +CONFIG_E2K_PA_BITS=40 +CONFIG_ONLY_HIGH_PHYS_MEM=y +# CONFIG_ONLY_BSP_MEMORY is not set +CONFIG_FORCE_MAX_ZONEORDER=16 +CONFIG_NODES_SHIFT=2 +CONFIG_NODES_SPAN_OTHER_NODES=y +CONFIG_NUMA=y +CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT=y +# CONFIG_KTHREAD_ON_CPU is not set +CONFIG_BOOT_PRINTK=y +CONFIG_SERIAL_BOOT_PRINTK=y +CONFIG_SERIAL_AM85C30_BOOT_CONSOLE=y +CONFIG_BOOT_SERIAL_BAUD=115200 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_MEMLIMIT=64 +CONFIG_EXT_MEMLIMIT=2048 +CONFIG_VRAM_SIZE_128=y +# CONFIG_VRAM_SIZE_256 is not set +# CONFIG_VRAM_SIZE_512 is not set +# CONFIG_VRAM_SIZE_1024 is not set +# CONFIG_VRAM_DISABLE is not set +# CONFIG_USR_CONTROL_INTERRUPTS is not set +# CONFIG_KERNEL_TIMES_ACCOUNT is not set +# CONFIG_CLI_CHECK_TIME is not set +# CONFIG_CMDLINE_PROMPT is not set +CONFIG_MAKE_ALL_PAGES_VALID=y +CONFIG_USE_AAU=y +CONFIG_DATA_STACK_WINDOW=y +CONFIG_BINFMT_ELF32=y +CONFIG_COMPAT=y +CONFIG_PROTECTED_MODE=y +# CONFIG_DBG_RTL_TRACE is not set +CONFIG_CLW_ENABLE=y +# CONFIG_IPD_DISABLE is not set +CONFIG_TC_STORAGE=y +# CONFIG_IGNORE_MEM_LOCK_AS is not set +CONFIG_RECOVERY=y +CONFIG_MONITORS=y +CONFIG_E2K_KEXEC=y +CONFIG_DUMP_ALL_STACKS=y +CONFIG_CMDLINE="root=/dev/sda3 console=ttyLMS0 console=ttyS0,115200 sclkr=no init=/bin/bash transparent_hugepage=madvise iommu=0" +# CONFIG_CMDLINE_OVERRIDE is not set +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_SCHED_MC=y +CONFIG_MCST=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_GENERIC_TIME=y +CONFIG_SCLKR_CLOCKSOURCE=y +CONFIG_BIOS=y +CONFIG_ENABLE_BIOS_MPTABLE=y +CONFIG_CEPIC_TIMER_FREQUENCY=100 +CONFIG_ENABLE_ELBRUS_PCIBIOS=y +CONFIG_ENABLE_IDE=y +CONFIG_ENABLE_KEYBOARD=y +CONFIG_ENABLE_MOUSE=y +CONFIG_ENABLE_FLOPPY=y +CONFIG_ENABLE_MGA=y +CONFIG_ENABLE_RTC=y +CONFIG_ENABLE_SERIAL=y +CONFIG_ENABLE_PARALLEL_PORT=y +CONFIG_ENABLE_IOAPIC=y +# CONFIG_ADC_DISABLE is not set +CONFIG_OPTIMIZE_REGISTERS_ACCESS=y +# CONFIG_E2K_STACKS_TRACER is not set +CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y +# CONFIG_EPROF_KERNEL is not set +# end of Processor type and features + +CONFIG_IOMMU_HELPER=y +CONFIG_HAVE_DMA_ATTRS=y + +# +# Elbrus Architecture Linux Kernel Configuration +# +CONFIG_EPIC=y + +# +# Boot/prom console support +# +CONFIG_L_EARLY_PRINTK=y +CONFIG_SERIAL_PRINTK=y +CONFIG_NVRAM_PANIC=y +CONFIG_SERIAL_AM85C30_CONSOLE=y +CONFIG_EARLY_DUMP_CONSOLE=y +# end of Boot/prom console support + +CONFIG_CLKR_CLOCKSOURCE=y +CONFIG_CLKR_OFFSET=y +CONFIG_IOHUB_GPIO=y +# CONFIG_PIC is not set +CONFIG_L_X86_64=y +CONFIG_L_LOCAL_APIC=y +CONFIG_L_IO_APIC=y +CONFIG_L_PCI_QUIRKS=y +CONFIG_L_SIC_IPLINK_OFF=y +CONFIG_L_MMPD=y +CONFIG_L_PMC=y +CONFIG_I2C_SPI_RESET_CONTROLLER=y +CONFIG_L_I2C_CONTROLLER=y +CONFIG_L_SPI_CONTROLLER=y +CONFIG_IPE2ST_POWER=m +CONFIG_ACPI_L_SPMC=y + +# +# Device Tree +# +CONFIG_OF=y +# CONFIG_DTB_L_TEST is not set +# end of Device Tree +# end of Elbrus Architecture Linux Kernel Configuration + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +# CONFIG_ENERGY_MODEL is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PSTATES is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_GOV_PSTATES=m + +# +# CPU frequency scaling drivers +# +CONFIG_E2K_PCS_CPUFREQ=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# E2K CPU Idle Drivers +# +CONFIG_E2K_CPUIDLE=y +# end of E2K CPU Idle Drivers +# end of CPU Idle +# end of Power management options + +# +# Bus options (PCI, ISA) +# + +# +# Elbrus chipset PCI support +# +CONFIG_PCI_ELBRUS=y +CONFIG_IOHUB_DOMAINS=y +# end of Elbrus chipset PCI support + +# CONFIG_PCI_USE_VECTOR is not set +# CONFIG_ISA is not set +# end of Bus options (PCI, ISA) + +CONFIG_SYSVIPC_COMPAT=y +# CONFIG_VIRTUALIZATION is not set +# CONFIG_VIRTIO_MENU is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_THREAD_STACK_ALLOCATOR=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_ARCH_WEAK_RELEASE_ACQUIRE=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_ARCH_SUPPORTS_RT=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_COMPILER_H=y +# CONFIG_LOCK_EVENT_COUNTS is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_DEV_THROTTLING_LOW=y +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +CONFIG_BLK_SED_OPAL=y + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_EFI_PARTITION=y +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=m +CONFIG_IOSCHED_BFQ=m +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_MMU_NOTIFIER=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_MEMORY_SANITIZE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_FRAME_VECTOR=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +# CONFIG_XFRM_SUB_POLICY is not set +CONFIG_XFRM_MIGRATE=y +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +# CONFIG_NFT_FIB_INET is not set +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +# CONFIG_NFT_FIB_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m + +# +# DECnet: Netfilter Configuration +# +# CONFIG_DECNET_NF_GRABULATOR is not set +# end of DECnet: Netfilter Configuration + +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +CONFIG_IP_DCCP_CCID3=y +# CONFIG_IP_DCCP_CCID3_DEBUG is not set +CONFIG_IP_DCCP_TFRC_LIB=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +# CONFIG_DEFAULT_FQ_CODEL is not set +# CONFIG_DEFAULT_SFQ is not set +CONFIG_DEFAULT_PFIFO_FAST=y +CONFIG_DEFAULT_NET_SCH="pfifo_fast" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +# CONFIG_BATMAN_ADV_DEBUGFS is not set +# CONFIG_BATMAN_ADV_DEBUG is not set +CONFIG_BATMAN_ADV_SYSFS=y +# CONFIG_BATMAN_ADV_TRACING is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +# CONFIG_VSOCKETS is not set +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=m +CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +# CONFIG_CAN_SLCAN is not set +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set +# CONFIG_CAN_C_CAN is not set +# CONFIG_CAN_CC770 is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_ELCAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +# CONFIG_CAN_SJA1000 is not set +# CONFIG_CAN_SOFTING is not set + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +# CONFIG_CAN_8DEV_USB is not set +# CONFIG_CAN_EMS_USB is not set +# CONFIG_CAN_ESD_USB2 is not set +# CONFIG_CAN_GS_USB is not set +# CONFIG_CAN_KVASER_USB is not set +# CONFIG_CAN_MCBA_USB is not set +# CONFIG_CAN_PEAK_USB is not set +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +# end of CAN Device Drivers + +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_6LOWPAN=m +CONFIG_BT_LEDS=y +# CONFIG_BT_SELFTEST is not set +CONFIG_BT_DEBUGFS=y + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_MTK=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_HCIBTSDIO is not set +# CONFIG_BT_HCIUART is not set +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +# CONFIG_BT_HCIVHCI is not set +CONFIG_BT_MRVL=m +# CONFIG_BT_MRVL_SDIO is not set +# CONFIG_BT_ATH3K is not set +# CONFIG_BT_MTKSDIO is not set +CONFIG_BT_HCIRSI=m +# end of Bluetooth device drivers + +CONFIG_AF_RXRPC=m +CONFIG_AF_RXRPC_IPV6=y +# CONFIG_AF_RXRPC_INJECT_LOSS is not set +# CONFIG_AF_RXRPC_DEBUG is not set +CONFIG_RXKAD=y +CONFIG_AF_KCM=m +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_CERTIFICATION_ONUS=y +# CONFIG_CFG80211_REQUIRE_SIGNED_REGDB is not set +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_REG_RELAX_NO_IR=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_CFG80211_WEXT_EXPORT=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_WIMAX=m +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_RDMA=m +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_FAILOVER is not set + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# end of Cadence PCIe controllers support + +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +# CONFIG_PCIE_XILINX is not set + +# +# DesignWare PCI Core Support +# +# end of DesignWare PCI Core Support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_FW_LOADER_COMPRESS=y +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Elbrus MCST Device Drivers +# +CONFIG_DRM_MGA2=y +CONFIG_MGA2_PWM=m +CONFIG_MGA2_GPIO=m +CONFIG_MSPS=m +CONFIG_MPV=m +# CONFIG_MGPM is not set +CONFIG_MMRM=m +CONFIG_MOKM=m +CONFIG_RDMA=m +CONFIG_RDMA_SIC=m +CONFIG_RDMA_M=m +CONFIG_MOKX=m +CONFIG_WD=y +# CONFIG_DMP_ASSIST is not set +CONFIG_LPTOUTS=m +CONFIG_M2MLC=m +CONFIG_APKPWR=m +CONFIG_MEM2ALLOC=m +CONFIG_HANTRODEC=m +CONFIG_BIGE=m +CONFIG_E8CPCS=m +CONFIG_PCSM=m +CONFIG_IMGTEC=m +CONFIG_BUS_MASTERING=y +CONFIG_VXD_FPGA=y + +# +# MCST support of Imagination`s GPU (Rogue_DDK_Linux_WS_REL_1.13@5824814) +# +CONFIG_MCST_GPU_IMGTEC=m +# CONFIG_MCST_GPU_IMGTEC_PDUMP is not set +# CONFIG_MCST_GPU_IMGTEC_DEBUG is not set +CONFIG_MCST_GPU_IMGTEC_CONTIGUOUS_FW=y +# end of MCST support of Imagination`s GPU (Rogue_DDK_Linux_WS_REL_1.13@5824814) + +# +# MCST Vivante GPU support (galcore v6.2.4p3) +# +CONFIG_MCST_GPU_VIV=m +# end of MCST Vivante GPU support (galcore v6.2.4p3) + +CONFIG_DRM_SMI=m +CONFIG_DRM_SMI_HDMI=y +CONFIG_DRM_SMI_AUDIO=y +CONFIG_DRM_SMI_PRIME=y +CONFIG_SMI_PWM=m +CONFIG_SMI_GPIO=m +CONFIG_SENSORS_EMC2305=m +# end of Elbrus MCST Device Drivers + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_SIMPLE_PM_BUS is not set +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_PARPORT=m +CONFIG_PARPORT_MCST=m +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=y +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_SKD=m +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +CONFIG_CDROM_PKTCDVD_WCACHE=y +CONFIG_ATA_OVER_ETH=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_RSXX=m + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29020 is not set +CONFIG_ISL22317=m +# CONFIG_ISL29003 is not set +# CONFIG_LTC4306 is not set +CONFIG_UCD9080=m +CONFIG_I2C_P2PMC=m +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +# CONFIG_EEPROM_MAX6875 is not set +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_EEPROM_EE1004=m +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set +CONFIG_ALTERA_STAPL=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +CONFIG_IDE=m + +# +# Please see Documentation/ide/ide.rst for help/info on IDE drives +# +CONFIG_IDE_XFER_MODE=y +CONFIG_IDE_ATAPI=y +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_IDE_GD=m +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set +CONFIG_BLK_DEV_IDECD=m +CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_IDE_TASK_IOCTL is not set +CONFIG_IDE_PROC_FS=y + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_PLATFORM is not set +CONFIG_BLK_DEV_IDEDMA_SFF=y + +# +# PCI IDE chipsets support +# +CONFIG_BLK_DEV_IDEPCI=y +# CONFIG_BLK_DEV_OFFBOARD is not set +CONFIG_BLK_DEV_GENERIC=m +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_JMICRON is not set +CONFIG_BLK_DEV_PIIX=m +CONFIG_BLK_DEV_ELBRUS=m +# CONFIG_BLK_DEV_IT8172 is not set +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_BLK_DEV_IDEDMA=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC79XX=m +CONFIG_AIC79XX_CMDS_PER_DEVICE=32 +CONFIG_AIC79XX_RESET_DELAY_MS=5000 +CONFIG_AIC79XX_DEBUG_ENABLE=y +CONFIG_AIC79XX_DEBUG_MASK=0 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC94XX=m +CONFIG_AIC94XX_DEBUG=y +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVSAS_DEBUG=y +# CONFIG_SCSI_MVSAS_TASKLET is not set +CONFIG_SCSI_MVUMI=m +CONFIG_SCSI_ADVANSYS=m +CONFIG_SCSI_ARCMSR=m +CONFIG_SCSI_ESAS2R=m +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +CONFIG_SCSI_HPTIOP=m +CONFIG_SCSI_MYRB=m +CONFIG_SCSI_MYRS=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +CONFIG_SCSI_DMX3191D=m +CONFIG_SCSI_FDOMAIN=m +CONFIG_SCSI_FDOMAIN_PCI=m +CONFIG_SCSI_GDTH=m +CONFIG_SCSI_IPS=m +CONFIG_SCSI_INITIO=m +CONFIG_SCSI_INIA100=m +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_MMIO=y +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_DC395x=m +CONFIG_SCSI_AM53C974=m +CONFIG_SCSI_WD719X=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_AHCI_QORIQ is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +CONFIG_SATA_SIL=y +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_CLONE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_DUST=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_DM_ZONED=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_SBP_TARGET=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LAN=m +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_FIREWIRE_NOSY=m +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_GTP=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_SUNGEM_PHY=m +CONFIG_ARCNET=m +CONFIG_ARCNET_1201=m +CONFIG_ARCNET_1051=m +CONFIG_ARCNET_RAW=m +CONFIG_ARCNET_CAP=m +CONFIG_ARCNET_COM90xx=m +CONFIG_ARCNET_COM90xxIO=m +CONFIG_ARCNET_RIM_I=m +CONFIG_ARCNET_COM20020=m +CONFIG_ARCNET_COM20020_PCI=m +CONFIG_ATM_DRIVERS=y +CONFIG_ATM_DUMMY=m +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +# CONFIG_ATM_ENI_DEBUG is not set +CONFIG_ATM_ENI_TUNE_BURST=y +# CONFIG_ATM_ENI_BURST_TX_16W is not set +# CONFIG_ATM_ENI_BURST_TX_8W is not set +# CONFIG_ATM_ENI_BURST_TX_4W is not set +# CONFIG_ATM_ENI_BURST_TX_2W is not set +# CONFIG_ATM_ENI_BURST_RX_16W is not set +# CONFIG_ATM_ENI_BURST_RX_8W is not set +# CONFIG_ATM_ENI_BURST_RX_4W is not set +# CONFIG_ATM_ENI_BURST_RX_2W is not set +CONFIG_ATM_NICSTAR=m +CONFIG_ATM_NICSTAR_USE_SUNI=y +CONFIG_ATM_NICSTAR_USE_IDT77105=y +CONFIG_ATM_IDT77252=m +# CONFIG_ATM_IDT77252_DEBUG is not set +CONFIG_ATM_IDT77252_RCV_ALL=y +CONFIG_ATM_IDT77252_USE_SUNI=y +CONFIG_ATM_IA=m +# CONFIG_ATM_IA_DEBUG is not set +CONFIG_ATM_FORE200E=m +CONFIG_ATM_FORE200E_USE_TASKLET=y +CONFIG_ATM_FORE200E_TX_RETRY=16 +CONFIG_ATM_FORE200E_DEBUG=0 +CONFIG_ATM_HE=m +CONFIG_ATM_HE_USE_SUNI=y +CONFIG_ATM_SOLOS=m + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +CONFIG_NET_VENDOR_3COM=y +CONFIG_VORTEX=m +CONFIG_TYPHOON=m +CONFIG_NET_VENDOR_ADAPTEC=y +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +CONFIG_NET_VENDOR_ALTEON=y +CONFIG_ACENIC=m +# CONFIG_ACENIC_OMIT_TIGON_I is not set +CONFIG_ALTERA_TSE=m +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +CONFIG_AMD8111_ETH=m +CONFIG_PCNET32=m +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_AURORA=y +CONFIG_AURORA_NB8800=m +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_B44=m +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +CONFIG_BCMGENET=m +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +# CONFIG_BNX2X is not set +CONFIG_SYSTEMPORT=m +CONFIG_BNXT=m +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_BNA=m +CONFIG_NET_VENDOR_CADENCE=y +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +CONFIG_THUNDER_NIC_VF=m +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4_DCB=y +CONFIG_CHELSIO_T4_FCOE=y +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +CONFIG_NET_VENDOR_CORTINA=y +CONFIG_GEMINI_ETHERNET=m +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_MMIO is not set +CONFIG_TULIP_NAPI=y +CONFIG_TULIP_NAPI_HW_MITIGATION=y +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +CONFIG_NET_VENDOR_DLINK=y +CONFIG_DL2K=m +CONFIG_SUNDANCE=m +CONFIG_SUNDANCE_MMIO=y +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +CONFIG_NET_VENDOR_HP=y +CONFIG_HP100=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_IGC=m +CONFIG_JME=m +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_ACCEL=y +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_FPGA_IPSEC=y +CONFIG_MLX5_EN_IPSEC=y +CONFIG_MLX5_FPGA_TLS=y +CONFIG_MLX5_TLS=y +CONFIG_MLX5_EN_TLS=y +CONFIG_MLX5_SW_STEERING=y +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SWITCHIB=m +CONFIG_MLXSW_SWITCHX2=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +CONFIG_NET_VENDOR_MICREL=y +CONFIG_KS8851=m +CONFIG_KS8851_MLL=m +CONFIG_KSZ884X_PCI=m +CONFIG_NET_VENDOR_MICROCHIP=y +CONFIG_ENC28J60=m +# CONFIG_ENC28J60_WRITEVERIFY is not set +CONFIG_ENCX24J600=m +CONFIG_LAN743X=m +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_MSCC_OCELOT_SWITCH=m +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_FEALNX=m +CONFIG_NET_VENDOR_NATSEMI=y +CONFIG_NATSEMI=m +CONFIG_NS83820=m +CONFIG_NET_VENDOR_NETERION=y +CONFIG_S2IO=m +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +CONFIG_NET_VENDOR_NI=y +CONFIG_NI_XGE_MANAGEMENT_ENET=m +CONFIG_NET_VENDOR_8390=y +CONFIG_NE2K_PCI=m +CONFIG_NET_VENDOR_NVIDIA=y +CONFIG_FORCEDETH=m +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +CONFIG_HAMACHI=m +CONFIG_YELLOWFIN=m +CONFIG_NET_VENDOR_PENSANDO=y +CONFIG_IONIC=m +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_QUALCOMM=y +CONFIG_QCA7000=m +CONFIG_QCA7000_SPI=m +CONFIG_QCOM_EMAC=m +CONFIG_RMNET=m +CONFIG_NET_VENDOR_RDC=y +CONFIG_R6040=m +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +CONFIG_8139TOO_PIO=y +CONFIG_8139TOO_TUNE_TWISTER=y +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +CONFIG_NET_VENDOR_SAMSUNG=y +CONFIG_SXGBE_ETH=m +CONFIG_PCI_SUNLANCE=m +CONFIG_MCST_PCC=m +CONFIG_RDMA_NET=m +CONFIG_ETH1000=y +CONFIG_MXGBE=m +CONFIG_MGB=m +CONFIG_NET_VENDOR_SEEQ=y +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_MCDI_LOGGING=y +CONFIG_SFC_FALCON=m +CONFIG_SFC_FALCON_MTD=y +CONFIG_NET_VENDOR_SILAN=y +# CONFIG_SC92031 is not set +CONFIG_NET_VENDOR_SIS=y +CONFIG_SIS900=m +CONFIG_SIS190=m +CONFIG_NET_VENDOR_SMSC=y +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=m +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=m +CONFIG_DWMAC_DWC_QOS_ETH=m +CONFIG_DWMAC_GENERIC=m +CONFIG_NET_VENDOR_SUN=y +CONFIG_HAPPYMEAL=m +CONFIG_SUNGEM=m +CONFIG_CASSINI=m +CONFIG_NIU=m +CONFIG_NET_VENDOR_SYNOPSYS=y +CONFIG_DWC_XLGMAC=m +CONFIG_DWC_XLGMAC_PCI=m +CONFIG_NET_VENDOR_TEHUTI=y +CONFIG_TEHUTI=m +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_PHY_SEL is not set +CONFIG_TLAN=m +CONFIG_NET_VENDOR_VIA=y +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_VELOCITY=m +CONFIG_NET_VENDOR_WIZNET=y +CONFIG_WIZNET_W5100=m +CONFIG_WIZNET_W5300=m +# CONFIG_WIZNET_BUS_DIRECT is not set +# CONFIG_WIZNET_BUS_INDIRECT is not set +CONFIG_WIZNET_BUS_ANY=y +CONFIG_WIZNET_W5100_SPI=m +CONFIG_FDDI=m +CONFIG_DEFXX=m +CONFIG_DEFXX_MMIO=y +CONFIG_SKFP=m +CONFIG_HIPPI=y +CONFIG_ROADRUNNER=m +CONFIG_ROADRUNNER_LARGE_RINGS=y +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +CONFIG_MDIO_GPIO=m +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_SFP is not set +# CONFIG_ADIN_PHY is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_AT803X_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM87XX_PHY is not set +CONFIG_BCM_NET_PHYLIB=m +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +CONFIG_DP83867_PHY=y +CONFIG_FIXED_PHY=y +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +CONFIG_MARVELL_PHY=y +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=y +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=y +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y +CONFIG_USB_NET_DRIVERS=m +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_CDC_PHONET=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_USB_NET_AQC111=m +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +CONFIG_WLAN_VENDOR_ADMTEK=y +CONFIG_ADM8211=m +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS=y +# CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING is not set +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG is not set +# CONFIG_ATH5K_TRACER is not set +CONFIG_ATH5K_PCI=y +CONFIG_ATH5K_TEST_CHANNELS=y +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DFS_CERTIFIED is not set +CONFIG_ATH9K_DYNACK=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +CONFIG_ATH9K_CHANNEL_CONTEXT=y +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_PCI_NO_EEPROM=m +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +CONFIG_CARL9170=m +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_WPC=y +# CONFIG_ATH6KL is not set +CONFIG_AR5523=m +CONFIG_WIL6210=m +CONFIG_WIL6210_ISR_COR=y +# CONFIG_WIL6210_TRACING is not set +CONFIG_WIL6210_DEBUGFS=y +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +CONFIG_ATH10K_AHB=y +CONFIG_ATH10K_SDIO=m +CONFIG_ATH10K_USB=m +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +CONFIG_ATH10K_DFS_CERTIFIED=y +CONFIG_WCN36XX=m +# CONFIG_WCN36XX_DEBUGFS is not set +CONFIG_WLAN_VENDOR_ATMEL=y +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_AT76C50X_USB=m +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_B43=m +CONFIG_B43_BCMA=y +CONFIG_B43_SSB=y +CONFIG_B43_BUSES_BCMA_AND_SSB=y +# CONFIG_B43_BUSES_BCMA is not set +# CONFIG_B43_BUSES_SSB is not set +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_SDIO=y +CONFIG_B43_BCMA_PIO=y +CONFIG_B43_PIO=y +CONFIG_B43_PHY_G=y +CONFIG_B43_PHY_N=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_PHY_HT=y +CONFIG_B43_LEDS=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_DEBUG=y +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_WLAN_VENDOR_INTEL=y +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +# CONFIG_IPW2100_DEBUG is not set +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +# CONFIG_IPW2200_DEBUG is not set +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_IWLEGACY=m +CONFIG_IWL4965=m +CONFIG_IWL3945=m + +# +# iwl3945 / iwl4965 Debugging Options +# +# CONFIG_IWLEGACY_DEBUG is not set +# end of iwl3945 / iwl4965 Debugging Options + +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y +# CONFIG_IWLWIFI_BCAST_FILTERING is not set + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +CONFIG_WLAN_VENDOR_INTERSIL=y +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_HERMES=m +CONFIG_HERMES_PRISM=y +CONFIG_HERMES_CACHE_FW_ON_INIT=y +CONFIG_PLX_HERMES=m +CONFIG_TMD_HERMES=m +CONFIG_NORTEL_HERMES=m +CONFIG_PCI_HERMES=m +CONFIG_ORINOCO_USB=m +CONFIG_P54_COMMON=m +CONFIG_P54_USB=m +CONFIG_P54_PCI=m +CONFIG_P54_SPI=m +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set +CONFIG_P54_LEDS=y +CONFIG_PRISM54=m +CONFIG_WLAN_VENDOR_MARVELL=y +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_SDIO=m +CONFIG_LIBERTAS_SPI=m +# CONFIG_LIBERTAS_DEBUG is not set +CONFIG_LIBERTAS_MESH=y +CONFIG_LIBERTAS_THINFIRM=m +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MWL8K=m +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +CONFIG_MT76x0E=m +CONFIG_MT76x2_COMMON=m +CONFIG_MT76x2E=m +CONFIG_MT76x2U=m +CONFIG_MT7603E=m +CONFIG_MT7615E=m +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +CONFIG_RTL8XXXU_UNTESTED=y +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822BE=y +CONFIG_RTW88_8822CE=y +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +CONFIG_WLAN_VENDOR_RSI=y +CONFIG_RSI_91X=m +CONFIG_RSI_DEBUGFS=y +CONFIG_RSI_SDIO=m +CONFIG_RSI_USB=m +CONFIG_RSI_COEX=y +CONFIG_WLAN_VENDOR_ST=y +CONFIG_CW1200=m +CONFIG_CW1200_WLAN_SDIO=m +CONFIG_CW1200_WLAN_SPI=m +CONFIG_WLAN_VENDOR_TI=y +CONFIG_WL1251=m +CONFIG_WL1251_SPI=m +CONFIG_WL1251_SDIO=m +CONFIG_WL12XX=m +CONFIG_WL18XX=m +CONFIG_WLCORE=m +CONFIG_WLCORE_SPI=m +CONFIG_WLCORE_SDIO=m +CONFIG_WILINK_PLATFORM_DATA=y +CONFIG_WLAN_VENDOR_ZYDAS=y +CONFIG_USB_ZD1201=m +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +CONFIG_QTNFMAC=m +CONFIG_QTNFMAC_PCIE=m +# CONFIG_MAC80211_HWSIM is not set +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_VIRT_WIFI=m + +# +# WiMAX Wireless Broadband devices +# +CONFIG_WIMAX_I2400M=m +CONFIG_WIMAX_I2400M_USB=m +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8 +# end of WiMAX Wireless Broadband devices + +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_RAW_ETH=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_HDLC_X25=m +CONFIG_PCI200SYN=m +CONFIG_WANXL=m +CONFIG_PC300TOO=m +CONFIG_FARSYNC=m +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +CONFIG_LAPBETHER=m +CONFIG_X25_ASY=m +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_IEEE802154_AT86RF230=m +# CONFIG_IEEE802154_AT86RF230_DEBUGFS is not set +CONFIG_IEEE802154_MRF24J40=m +CONFIG_IEEE802154_CC2520=m +CONFIG_IEEE802154_ATUSB=m +CONFIG_IEEE802154_ADF7242=m +CONFIG_IEEE802154_MCR20A=m +# CONFIG_IEEE802154_HWSIM is not set +# CONFIG_VMXNET3 is not set +# CONFIG_NETDEVSIM is not set +# CONFIG_NET_FAILOVER is not set +# CONFIG_ISDN is not set +CONFIG_NVM=y +CONFIG_NVM_PBLK=m +# CONFIG_NVM_PBLK_DEBUG is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +CONFIG_KEYBOARD_SUNKBD=m +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +# CONFIG_MOUSE_PS2_ELANTECH_SMBUS is not set +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_PS2_TOUCHKIT=y +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_ANALOG=m +CONFIG_JOYSTICK_A3D=m +CONFIG_JOYSTICK_ADI=m +CONFIG_JOYSTICK_COBRA=m +CONFIG_JOYSTICK_GF2K=m +CONFIG_JOYSTICK_GRIP=m +CONFIG_JOYSTICK_GRIP_MP=m +CONFIG_JOYSTICK_GUILLEMOT=m +CONFIG_JOYSTICK_INTERACT=m +CONFIG_JOYSTICK_SIDEWINDER=m +CONFIG_JOYSTICK_TMDC=m +CONFIG_JOYSTICK_IFORCE=m +CONFIG_JOYSTICK_IFORCE_USB=m +CONFIG_JOYSTICK_IFORCE_232=m +CONFIG_JOYSTICK_WARRIOR=m +CONFIG_JOYSTICK_MAGELLAN=m +CONFIG_JOYSTICK_SPACEORB=m +CONFIG_JOYSTICK_SPACEBALL=m +CONFIG_JOYSTICK_STINGER=m +CONFIG_JOYSTICK_TWIDJOY=m +CONFIG_JOYSTICK_ZHENHUA=m +CONFIG_JOYSTICK_DB9=m +CONFIG_JOYSTICK_GAMECON=m +CONFIG_JOYSTICK_TURBOGRAFX=m +CONFIG_JOYSTICK_AS5011=m +CONFIG_JOYSTICK_JOYDUMP=m +CONFIG_JOYSTICK_XPAD=m +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_JOYSTICK_WALKERA0701=m +CONFIG_JOYSTICK_PSXPAD_SPI=m +CONFIG_JOYSTICK_PSXPAD_SPI_FF=y +CONFIG_JOYSTICK_PXRC=m +CONFIG_JOYSTICK_FSIA6B=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_GTCO=m +CONFIG_TABLET_USB_HANWANG=m +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +# CONFIG_TABLET_SERIAL_WACOM4 is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +CONFIG_TOUCHSCREEN_EETI=m +CONFIG_TOUCHSCREEN_EGALAX=m +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +CONFIG_TOUCHSCREEN_GOODIX=m +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +CONFIG_INPUT_LTC2954=m +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +# CONFIG_INPUT_REGULATOR_HAPTIC is not set +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +# CONFIG_RMI4_I2C is not set +# CONFIG_RMI4_SPI is not set +# CONFIG_RMI4_SMB is not set +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F54 is not set +# CONFIG_RMI4_F55 is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +CONFIG_GAMEPORT=m +# CONFIG_GAMEPORT_NS558 is not set +# CONFIG_GAMEPORT_L4 is not set +# CONFIG_GAMEPORT_EMU10K1 is not set +# CONFIG_GAMEPORT_FM801 is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +CONFIG_MOXA_SMARTIO=m +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_8250=m +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_PCI=m +CONFIG_SERIAL_8250_EXAR=m +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_LMS_CONSOLE is not set +CONFIG_SERIAL_L_ZILOG=y +CONFIG_SERIAL_L_ZILOG_CONSOLE=y +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=m +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +# CONFIG_IPMI_SSIF is not set +# CONFIG_IPMI_WATCHDOG is not set +CONFIG_IPMI_POWEROFF=m +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +CONFIG_TCG_TPM=y +# CONFIG_TCG_TIS is not set +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +# CONFIG_TCG_ATMEL is not set +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +CONFIG_I2C_MUX_LTC4306=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_NXP_FLEXSPI is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +CONFIG_NTP_PPS=y + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +# CONFIG_DP83640_PHY is not set +# end of PTP clock support + +# CONFIG_PINCTRL is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SAMA5D2_PIOBU is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=m +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_LEGO_EV3 is not set +CONFIG_BATTERY_SBS=m +CONFIG_CHARGER_SBS=m +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_UCS1002 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IIO_HWMON is not set +# CONFIG_SENSORS_IT87 is not set +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +CONFIG_SENSORS_LM95231=m +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +CONFIG_SENSORS_LM95245=m +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +# CONFIG_SENSORS_LM25066 is not set +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC2978_REGULATOR=y +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX16064 is not set +CONFIG_SENSORS_MAX20730=m +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_ZL6100 is not set +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_CPU_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +# CONFIG_QORIQ_THERMAL is not set +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +# CONFIG_WATCHDOG_SYSFS is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_GPIO_WATCHDOG is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_L_WDT=y + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_BLOCKIO=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +CONFIG_SSB_SDIOHOST=y +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_GPIO is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_BLOCKIO=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +# CONFIG_BCMA_DRIVER_GMAC_CMN is not set +# CONFIG_BCMA_DRIVER_GPIO is not set +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +CONFIG_MFD_WL1273_CORE=m +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD70528 is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ANATOP is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +CONFIG_CEC_CORE=y +CONFIG_CEC_NOTIFIER=y +CONFIG_RC_CORE=m +CONFIG_RC_MAP=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_RCMM_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_RC_ATI_REMOTE=m +CONFIG_IR_HIX5HD2=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_REDRAT3=m +# CONFIG_IR_SPI is not set +CONFIG_IR_STREAMZAP=m +CONFIG_IR_IGORPLUGUSB=m +CONFIG_IR_IGUANA=m +CONFIG_IR_TTUSBIR=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_PWM_TX is not set +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_SIR=m +CONFIG_RC_XBOX_DVD=m +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_CEC_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_MEDIA_CONTROLLER_DVB=y +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_V4L2=m +CONFIG_VIDEO_V4L2_I2C=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_MEM2MEM_DEV=m +# CONFIG_V4L2_FLASH_LED_CLASS is not set +CONFIG_V4L2_FWNODE=m +CONFIG_VIDEOBUF_GEN=m +CONFIG_VIDEOBUF_DMA_SG=m +CONFIG_VIDEOBUF_VMALLOC=m +CONFIG_DVB_CORE=m +CONFIG_DVB_MMAP=y +CONFIG_DVB_NET=y +CONFIG_TTPCI_EEPROM=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_GL860=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_DTCS033=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KINECT=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STK1135=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TOUPTEK=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_VIDEO_CPIA2=m +CONFIG_USB_ZR364XX=m +CONFIG_USB_STKWEBCAM=m +CONFIG_USB_S2255=m +CONFIG_VIDEO_USBTV=m + +# +# Analog TV USB devices +# +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_USBVISION=m +CONFIG_VIDEO_STK1160_COMMON=m +CONFIG_VIDEO_STK1160=m +CONFIG_VIDEO_GO7007=m +CONFIG_VIDEO_GO7007_USB=m +CONFIG_VIDEO_GO7007_LOADER=m +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +CONFIG_VIDEO_AU0828_RC=y +CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_RC=y +CONFIG_VIDEO_CX231XX_ALSA=m +CONFIG_VIDEO_CX231XX_DVB=m +CONFIG_VIDEO_TM6000=m +CONFIG_VIDEO_TM6000_ALSA=m +CONFIG_VIDEO_TM6000_DVB=m + +# +# Digital TV USB devices +# +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_CXUSB_ANALOG=y +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_VP7045=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB_RTL28XXU=m +CONFIG_DVB_USB_DVBSKY=m +CONFIG_DVB_USB_ZD1301=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +CONFIG_DVB_AS102=m + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_V4L2=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +CONFIG_USB_AIRSPY=m +CONFIG_USB_HACKRF=m +CONFIG_USB_MSI2500=m + +# +# USB HDMI CEC adapters +# +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +CONFIG_VIDEO_SOLO6X10=m +CONFIG_VIDEO_TW5864=m +CONFIG_VIDEO_TW68=m +CONFIG_VIDEO_TW686X=m + +# +# Media capture/analog TV support +# +CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set +CONFIG_VIDEO_IVTV_ALSA=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_HEXIUM_ORION=m +CONFIG_VIDEO_MXB=m +CONFIG_VIDEO_DT3155=m + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX25821=m +CONFIG_VIDEO_CX25821_ALSA=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +CONFIG_VIDEO_CX88_ENABLE_VP3054=y +CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7134_GO7007=m +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_AV7110_IR=y +CONFIG_DVB_AV7110=m +CONFIG_DVB_AV7110_OSD=y +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_DVB_BUDGET_PATCH=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_DM1105=m +CONFIG_DVB_PT1=m +CONFIG_DVB_PT3=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +CONFIG_DVB_SMIPCIE=m +CONFIG_DVB_NETUP_UNIDVB=m +# CONFIG_V4L_PLATFORM_DRIVERS is not set +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m +# CONFIG_VIDEO_SH_VEU is not set +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_CEC_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set + +# +# Supported MMC/SDIO adapters +# +CONFIG_SMS_SDIO_DRV=m +CONFIG_RADIO_ADAPTERS=y +CONFIG_RADIO_TEA575X=m +CONFIG_RADIO_SI470X=m +CONFIG_USB_SI470X=m +CONFIG_I2C_SI470X=m +CONFIG_RADIO_SI4713=m +CONFIG_USB_SI4713=m +CONFIG_PLATFORM_SI4713=m +CONFIG_I2C_SI4713=m +CONFIG_USB_MR800=m +CONFIG_USB_DSBR=m +CONFIG_RADIO_MAXIRADIO=m +CONFIG_RADIO_SHARK=m +CONFIG_RADIO_SHARK2=m +CONFIG_USB_KEENE=m +CONFIG_USB_RAREMONO=m +CONFIG_USB_MA901=m +CONFIG_RADIO_TEA5764=m +CONFIG_RADIO_SAA7706H=m +CONFIG_RADIO_TEF6862=m +CONFIG_RADIO_WL1273=m + +# +# Texas Instruments WL128x FM driver (ST based) +# +# end of Texas Instruments WL128x FM driver (ST based) + +# +# Supported FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m + +# +# I2C Encoders, decoders, sensors and other helper chips +# + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_TVAUDIO=m +CONFIG_VIDEO_TDA7432=m +CONFIG_VIDEO_TDA9840=m +CONFIG_VIDEO_TEA6415C=m +CONFIG_VIDEO_TEA6420=m +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +# CONFIG_VIDEO_TLV320AIC23B is not set +CONFIG_VIDEO_UDA1342=m +CONFIG_VIDEO_WM8775=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_SONY_BTF_MPX=m + +# +# RDS decoders +# +CONFIG_VIDEO_SAA6588=m + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TVP514X is not set +CONFIG_VIDEO_TVP5150=m +# CONFIG_VIDEO_TVP7002 is not set +CONFIG_VIDEO_TW2804=m +CONFIG_VIDEO_TW9903=m +CONFIG_VIDEO_TW9906=m +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m + +# +# Video encoders +# +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_AD9389B is not set +# CONFIG_VIDEO_AK881X is not set +# CONFIG_VIDEO_THS8200 is not set + +# +# Camera sensor devices +# +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX355 is not set +CONFIG_VIDEO_OV2640=m +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV772X is not set +CONFIG_VIDEO_OV7640=m +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_VS6624 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M032 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T001 is not set +# CONFIG_VIDEO_MT9T112 is not set +CONFIG_VIDEO_MT9V011=m +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_SR030PC30 is not set +# CONFIG_VIDEO_NOON010PC30 is not set +# CONFIG_VIDEO_M5MOLS is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5K6AA is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_S5K4ECGX is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_ET8EK8 is not set +# CONFIG_VIDEO_S5C73M3 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9807_VCM is not set + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m + +# +# Audio/Video compression chips +# +CONFIG_VIDEO_SAA6752HS=m + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_THS7303 is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_I2C is not set +# CONFIG_VIDEO_ST_MIPID02 is not set +# end of I2C Encoders, decoders, sensors and other helper chips + +# +# SPI helper chips +# +# CONFIG_VIDEO_GS1662 is not set +# end of SPI helper chips + +# +# Media SPI Adapters +# +# CONFIG_CXD2880_SPI_DRV is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_M88DS3103=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_TDA18271C2DD=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_MT312=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_TDA10071=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_SP8870=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +# CONFIG_DVB_S5H1432 is not set +CONFIG_DVB_DRXD=m +CONFIG_DVB_L64781=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_MT352=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +# CONFIG_DVB_DIB9000 is not set +CONFIG_DVB_TDA10048=m +CONFIG_DVB_AF9013=m +CONFIG_DVB_EC100=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_RTL2830=m +CONFIG_DVB_RTL2832=m +CONFIG_DVB_RTL2832_SDR=m +CONFIG_DVB_SI2168=m +CONFIG_DVB_AS102_FE=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_GP8PSK_FE=m +# CONFIG_DVB_CXD2880 is not set + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_VES1820=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_STV0297=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT3306A=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_S921=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_TC90522=m +# CONFIG_DVB_MN88443X is not set + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_DRX39XYJ=m +CONFIG_DVB_LNBH25=m +# CONFIG_DVB_LNBH29 is not set +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_A8293=m +# CONFIG_DVB_LGS8GL5 is not set +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_HELENE=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m + +# +# Tools to develop new frontends +# +CONFIG_DVB_DUMMY_FE=m +# end of Customise DVB Frontends + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=y +CONFIG_DRM_MIPI_DBI=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_KMS_CMA_HELPER=y +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_VM=y +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=y +CONFIG_DRM_I2C_NXP_TDA998X=m +CONFIG_DRM_I2C_NXP_TDA9950=m +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +# CONFIG_DRM_RADEON_USERPTR is not set +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +# CONFIG_DRM_AMDGPU_USERPTR is not set +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +# CONFIG_DEBUG_KERNEL_DC is not set +# end of Display Engine Configuration + +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_VGEM=m +CONFIG_DRM_VIVANTE=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_ATI_PCIGART=y +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +CONFIG_DRM_PANEL_ARM_VERSATILE=m +CONFIG_DRM_PANEL_LVDS=y +CONFIG_DRM_PANEL_SIMPLE=m +CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D=m +CONFIG_DRM_PANEL_ILITEK_IL9322=m +CONFIG_DRM_PANEL_ILITEK_ILI9881C=m +CONFIG_DRM_PANEL_INNOLUX_P079ZCA=m +CONFIG_DRM_PANEL_JDI_LT070ME05000=m +CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04=m +CONFIG_DRM_PANEL_SAMSUNG_LD9040=m +CONFIG_DRM_PANEL_LG_LB035Q02=m +CONFIG_DRM_PANEL_LG_LG4573=m +CONFIG_DRM_PANEL_NEC_NL8048HL11=m +CONFIG_DRM_PANEL_NOVATEK_NT39016=m +CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO=m +CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m +CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS=m +CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00=m +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m +CONFIG_DRM_PANEL_RAYDIUM_RM67191=m +CONFIG_DRM_PANEL_RAYDIUM_RM68200=m +CONFIG_DRM_PANEL_ROCKTECH_JH057N00900=m +CONFIG_DRM_PANEL_RONBO_RB070D30=m +CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=m +CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2=m +CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m +CONFIG_DRM_PANEL_SAMSUNG_S6E63M0=m +CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m +CONFIG_DRM_PANEL_SEIKO_43WVF1G=m +CONFIG_DRM_PANEL_SHARP_LQ101R1SX01=m +CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m +CONFIG_DRM_PANEL_SHARP_LS043T1LE01=m +CONFIG_DRM_PANEL_SITRONIX_ST7701=m +CONFIG_DRM_PANEL_SITRONIX_ST7789V=m +CONFIG_DRM_PANEL_SONY_ACX565AKM=m +CONFIG_DRM_PANEL_TPO_TD028TTEC1=m +CONFIG_DRM_PANEL_TPO_TD043MTEA1=m +CONFIG_DRM_PANEL_TPO_TPG110=m +CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +CONFIG_DRM_ANALOGIX_ANX78XX=m +# CONFIG_DRM_CDNS_DSI is not set +CONFIG_DRM_DUMB_VGA_DAC=m +CONFIG_DRM_LVDS_ENCODER=m +CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW=m +CONFIG_DRM_NXP_PTN3460=m +CONFIG_DRM_PARADE_PS8622=m +CONFIG_DRM_SIL_SII8620=m +CONFIG_DRM_SII902X=m +CONFIG_DRM_SII9234=m +CONFIG_DRM_THINE_THC63LVD1024=m +CONFIG_DRM_TOSHIBA_TC358764=m +CONFIG_DRM_TOSHIBA_TC358767=m +CONFIG_DRM_TI_TFP410=m +CONFIG_DRM_TI_SN65DSI86=m +CONFIG_DRM_I2C_ADV7511=m +CONFIG_DRM_I2C_ADV7533=y +CONFIG_DRM_I2C_ADV7511_CEC=y +CONFIG_DRM_DW_HDMI=y +CONFIG_DRM_DW_HDMI_AHB_AUDIO=m +CONFIG_DRM_DW_HDMI_CEC=y +# end of Display Interface Bridges + +CONFIG_DRM_IMX_HDMI=y +CONFIG_DRM_ETNAVIV=m +CONFIG_DRM_ETNAVIV_THERMAL=y +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_GM12U320=m +CONFIG_TINYDRM_HX8357D=m +CONFIG_TINYDRM_ILI9225=m +CONFIG_TINYDRM_ILI9341=m +CONFIG_TINYDRM_MI0283QT=m +CONFIG_TINYDRM_REPAPER=m +CONFIG_TINYDRM_ST7586=m +CONFIG_TINYDRM_ST7735R=m +CONFIG_DRM_LEGACY=y +# CONFIG_DRM_TDFX is not set +# CONFIG_DRM_R128 is not set +# CONFIG_DRM_MGA is not set +# CONFIG_DRM_VIA is not set +# CONFIG_DRM_SAVAGE is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DDC=m +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=m +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +CONFIG_FB_VGA16=y +# CONFIG_FB_UVESA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=m +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +CONFIG_BACKLIGHT_PWM=y +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LP855X is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_VGASTATE=y +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y +CONFIG_FB_MGAM83=m +CONFIG_PCI_FB_MGAM83=m +CONFIG_MGA_HWCOPYAREA=y +CONFIG_MGA_HWIMAGEBLIT=y +CONFIG_FB_MGA3D=m +CONFIG_FB_LYNXFB=m +CONFIG_FB_LYNXFB_DOMAINS=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_PCM_ELD=y +CONFIG_SND_PCM_IEC958=y +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MTS64=m +CONFIG_SND_SERIAL_U16550=m +CONFIG_SND_MPU401=m +CONFIG_SND_PORTMAN2X4=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +CONFIG_SND_ALS300=m +CONFIG_SND_ALI5451=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_AW2=m +CONFIG_SND_AZT3328=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS4281=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_ES1938=m +CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y +CONFIG_SND_ES1968_RADIO=y +CONFIG_SND_FM801=m +CONFIG_SND_FM801_TEA575X_BOOL=y +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y +CONFIG_SND_MIXART=m +CONFIG_SND_NM256=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RIPTIDE=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_SONICVIBES=m +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_YMFPCI=m + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_INTEL=m +# CONFIG_SND_HDA_INTEL_DETECT_DMIC is not set +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=1 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_PREALLOC_SIZE=2048 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +# CONFIG_SND_DICE is not set +# CONFIG_SND_OXFW is not set +# CONFIG_SND_ISIGHT is not set +# CONFIG_SND_FIREWORKS is not set +# CONFIG_SND_BEBOB is not set +# CONFIG_SND_FIREWIRE_DIGI00X is not set +# CONFIG_SND_FIREWIRE_TASCAM is not set +# CONFIG_SND_FIREWIRE_MOTU is not set +# CONFIG_SND_FIREFACE is not set +# CONFIG_SND_SOC is not set +CONFIG_SND_SYNTH_EMUX=m +CONFIG_AC97_BUS=m + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +CONFIG_HID_ACCUTOUCH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_BIGBEN_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_COUGAR=m +CONFIG_HID_MACALLY=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CP2112=m +CONFIG_HID_CREATIVE_SB0540=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HOLTEK_FF=y +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_VIEWSONIC=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MALTRON=m +CONFIG_HID_MAYFLASH=m +CONFIG_HID_REDRAGON=m +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=m +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_RETRODE=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEAM=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_THRUSTMASTER_FF=y +CONFIG_HID_UDRAW_PS3=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_ZEROPLUS_FF=y +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_OTG=y +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_OTG_FSM=m +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=m + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=m +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=m +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=m +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USBIP_CORE=m +CONFIG_USBIP_VHCI_HCD=m +CONFIG_USBIP_VHCI_HC_PORTS=8 +CONFIG_USBIP_VHCI_NR_HCS=1 +CONFIG_USBIP_HOST=m +CONFIG_USBIP_VUDC=m +# CONFIG_USBIP_DEBUG is not set +CONFIG_USB_CDNS3=m +CONFIG_USB_CDNS3_GADGET=y +CONFIG_USB_CDNS3_HOST=y +CONFIG_USB_MUSB_HDRC=m +# CONFIG_USB_MUSB_HOST is not set +# CONFIG_USB_MUSB_GADGET is not set +CONFIG_USB_MUSB_DUAL_ROLE=y + +# +# Platform Glue Layer +# + +# +# MUSB DMA mode +# +# CONFIG_MUSB_PIO_ONLY is not set +CONFIG_USB_DWC3=m +CONFIG_USB_DWC3_ULPI=y +# CONFIG_USB_DWC3_HOST is not set +# CONFIG_USB_DWC3_GADGET is not set +CONFIG_USB_DWC3_DUAL_ROLE=y + +# +# Platform Glue Driver Support +# +CONFIG_USB_DWC3_HAPS=m +CONFIG_USB_DWC2=m +# CONFIG_USB_DWC2_HOST is not set + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +CONFIG_USB_DWC2_DUAL_ROLE=y +CONFIG_USB_DWC2_PCI=m +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +CONFIG_USB_CHIPIDEA=m +CONFIG_USB_CHIPIDEA_OF=m +CONFIG_USB_CHIPIDEA_PCI=m +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_CHIPIDEA_HOST=y +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_USS720 is not set +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MOS7720=m +# CONFIG_USB_SERIAL_MOS7715_PARPORT is not set +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_TEST=m +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_YUREX=m +CONFIG_USB_EZUSB_FX2=m +CONFIG_USB_HUB_USB251XB=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_HSIC_USB4604=m +CONFIG_USB_LINK_LAYER_TEST=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +CONFIG_USB_PHY=y +CONFIG_NOP_USB_XCEIV=m +CONFIG_USB_GPIO_VBUS=m +CONFIG_USB_ISP1301=m +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=m +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_U_ETHER=m +CONFIG_USB_F_ECM=m +CONFIG_USB_F_SUBSET=m +CONFIG_USB_CONFIGFS=m +# CONFIG_USB_CONFIGFS_SERIAL is not set +# CONFIG_USB_CONFIGFS_ACM is not set +# CONFIG_USB_CONFIGFS_OBEX is not set +# CONFIG_USB_CONFIGFS_NCM is not set +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_ECM_SUBSET=y +# CONFIG_USB_CONFIGFS_RNDIS is not set +# CONFIG_USB_CONFIGFS_EEM is not set +# CONFIG_USB_CONFIGFS_PHONET is not set +# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set +# CONFIG_USB_CONFIGFS_F_LB_SS is not set +# CONFIG_USB_CONFIGFS_F_FS is not set +# CONFIG_USB_CONFIGFS_F_UAC1 is not set +# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set +# CONFIG_USB_CONFIGFS_F_UAC2 is not set +# CONFIG_USB_CONFIGFS_F_MIDI is not set +# CONFIG_USB_CONFIGFS_F_HID is not set +# CONFIG_USB_CONFIGFS_F_UVC is not set +# CONFIG_USB_CONFIGFS_F_PRINTER is not set +# CONFIG_USB_CONFIGFS_F_TCM is not set +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_CCG=m +CONFIG_TYPEC_TPS6598X=m + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +CONFIG_TYPEC_MUX_PI3USB30532=m +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_TYPEC_NVIDIA_ALTMODE=m +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=m +CONFIG_MMC=y +CONFIG_PWRSEQ_EMMC=y +CONFIG_PWRSEQ_SD8787=m +CONFIG_PWRSEQ_SIMPLE=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +# CONFIG_MMC_RICOH_MMC is not set +# CONFIG_MMC_SDHCI_PLTFM is not set +# CONFIG_MMC_TIFM_SD is not set +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=m +CONFIG_LEDS_CLASS_FLASH=m +CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=m +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_TI_LMU_COMMON is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_MTD=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_ACTIVITY=m +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_PANIC=y +CONFIG_LEDS_TRIGGER_NETDEV=m +CONFIG_LEDS_TRIGGER_PATTERN=m +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI=y +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_MTHCA_DEBUG=y +CONFIG_INFINIBAND_CXGB3=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_EFA=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_OCRDMA=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=m +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_E2K=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +CONFIG_RTC_DRV_CY14B101P=y +CONFIG_RTC_DRV_FM33256=y +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_SELFTESTS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PRUSS=m +CONFIG_UIO_MF624=m +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +CONFIG_PRISM2_USB=m +# CONFIG_COMEDI is not set +CONFIG_RTL8192U=m +CONFIG_RTLLIB=m +CONFIG_RTLLIB_CRYPTO_CCMP=m +CONFIG_RTLLIB_CRYPTO_TKIP=m +CONFIG_RTLLIB_CRYPTO_WEP=m +CONFIG_RTL8192E=m +# CONFIG_RTL8723BS is not set +CONFIG_R8712U=m +CONFIG_R8188EU=m +CONFIG_88EU_AP_MODE=y +CONFIG_RTS5208=m +CONFIG_VT6655=m +CONFIG_VT6656=m + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7280 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Active energy metering IC +# +# CONFIG_ADE7854 is not set +# end of Active energy metering IC + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +CONFIG_CRYSTALHD=m + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# end of Speakup console speech + +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# end of Android + +CONFIG_LTE_GDM724X=m +# CONFIG_FIREWIRE_SERIAL is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_FB_TFT is not set +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +# CONFIG_MOST is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set + +# +# Gasket devices +# +# end of Gasket devices + +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set +CONFIG_USB_WHCI_HCD=m +CONFIG_USB_HWA_HCD=m +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DONT_MOUNT_VFAT=y +CONFIG_EXFAT_DISCARD=y +CONFIG_EXFAT_DELAYED_SYNC=y +# CONFIG_EXFAT_KERNEL_DEBUG is not set +# CONFIG_EXFAT_DEBUG_MSG is not set +CONFIG_EXFAT_DEFAULT_CODEPAGE=866 +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" +CONFIG_QLGE=m +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_ADC_JACK is not set +# CONFIG_EXTCON_FSA9480 is not set +# CONFIG_EXTCON_GPIO is not set +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_HID_SENSOR_ACCEL_3D is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7124 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# end of Amplifiers + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SPS30 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +# CONFIG_HID_SENSOR_IIO_COMMON is not set +# end of Hid Sensor IIO Common + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +# CONFIG_HID_SENSOR_GYRO_3D is not set +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HID_SENSOR_HUMIDITY is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_KMX61 is not set +CONFIG_INV_MPU6050_IIO=m +CONFIG_INV_MPU6050_I2C=m +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +# CONFIG_HID_SENSOR_ALS is not set +# CONFIG_HID_SENSOR_PROX is not set +# CONFIG_JSA1212 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +CONFIG_OPT3001=m +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +CONFIG_AK8975=m +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +# CONFIG_HID_SENSOR_MAGNETOMETER_3D is not set +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +# CONFIG_HID_SENSOR_INCLINOMETER_3D is not set +# CONFIG_HID_SENSOR_DEVICE_ROTATION is not set +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Digital potentiometers +# +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +CONFIG_BMP280=m +CONFIG_BMP280_I2C=m +CONFIG_BMP280_SPI=m +# CONFIG_DPS310 is not set +# CONFIG_HID_SENSOR_PRESS is not set +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +# CONFIG_HID_SENSOR_TEMP is not set +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX31856 is not set +# end of Temperature sensors + +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +# CONFIG_AL_FIC is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_CADENCE_DP is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_FSL_IMX8MQ_USB is not set +# CONFIG_PHY_MIXEL_MIPI_DPHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# CONFIG_PHY_TUSB1210 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +CONFIG_XFS_ONLINE_SCRUB=y +CONFIG_XFS_ONLINE_REPAIR=y +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +CONFIG_F2FS_FS=m +CONFIG_F2FS_STAT_FS=y +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_FS_POSIX_ACL=y +CONFIG_F2FS_FS_SECURITY=y +# CONFIG_F2FS_CHECK_FS is not set +# CONFIG_F2FS_IO_TRACE is not set +# CONFIG_F2FS_FAULT_INJECTION is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_VERITY=y +# CONFIG_FS_VERITY_DEBUG is not set +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=m +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=m +CONFIG_AUTOFS_FS=m +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +# CONFIG_VIRTIO_FS is not set +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y + +# +# Caches +# +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=866 +CONFIG_FAT_DEFAULT_IOCHARSET="utf8" +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_CONFIGFS_FS=m +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ADFS_FS=m +CONFIG_ADFS_FS_RW=y +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +# CONFIG_BEFS_DEBUG is not set +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +CONFIG_JFFS2_SUMMARY=y +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +# CONFIG_JFFS2_CMODE_FAVOURLZO is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +CONFIG_CRAMFS_MTD=y +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +CONFIG_HPFS_FS=m +CONFIG_QNX4FS_FS=m +CONFIG_QNX6FS_FS=m +# CONFIG_QNX6FS_DEBUG is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=m +CONFIG_PSTORE_DEFLATE_COMPRESS=m +CONFIG_PSTORE_LZO_COMPRESS=m +CONFIG_PSTORE_LZ4_COMPRESS=m +CONFIG_PSTORE_LZ4HC_COMPRESS=m +CONFIG_PSTORE_842_COMPRESS=y +CONFIG_PSTORE_ZSTD_COMPRESS=y +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set +# CONFIG_PSTORE_LZ4_COMPRESS_DEFAULT is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS_DEFAULT is not set +# CONFIG_PSTORE_842_COMPRESS_DEFAULT is not set +# CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_UFS_FS_WRITE=y +# CONFIG_UFS_DEBUG is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT=2 +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_SMB_DIRECT=y +CONFIG_CIFS_FSCACHE=y +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +# CONFIG_AFS_DEBUG is not set +CONFIG_AFS_FSCACHE=y +# CONFIG_AFS_DEBUG_CURSOR is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set +CONFIG_UNICODE=y +# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set +# end of File systems + +# +# Security options +# + +# +# Miscellaneous hardening features +# +# CONFIG_MCST_MEMORY_SANITIZE is not set +# end of Miscellaneous hardening features + +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +# CONFIG_TRUSTED_KEYS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_SMACK=y +# CONFIG_SECURITY_SMACK_BRINGUP is not set +CONFIG_SECURITY_SMACK_NETFILTER=y +CONFIG_SECURITY_SMACK_APPEND_SIGNALS=y +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_SAFESETID=y +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_SECURITY_ALTHA=y +# CONFIG_SECURITY_KIOSK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_TRUSTED_KEYRING=y +# CONFIG_IMA_BLACKLIST_KEYRING is not set +# CONFIG_IMA_LOAD_X509 is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +CONFIG_EVM_EXTRA_SMACK_XATTRS=y +# CONFIG_EVM_ADD_XATTRS is not set +# CONFIG_EVM_LOAD_X509 is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="yama,safesetid,integrity,altha" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_ECRDSA=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_AEGIS128=m +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=m +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_NHPOLY1305=m +CONFIG_CRYPTO_ADIANTUM=m +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_XXHASH=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_TI=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CHELSIO is not set +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS8_PRIVATE_KEY_PARSER=m +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set + +# +# Certificates for signature checking +# +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=m +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=m +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_ARCH_HAS_DMA_WRITE_COMBINE=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_DMA_REMAP=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_FONT_SUPPORT=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_6x10 is not set +# CONFIG_FONT_10x18 is not set +# CONFIG_FONT_SUN8x16 is not set +CONFIG_FONT_SUN12x22=y +CONFIG_FONT_TER16x32=y +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_CONSOLE_LOGLEVEL_EMERGENCY=5 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=0 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_FRAME_POINTER is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_KASAN_STACK=0 +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# end of Debug Lockups and Hangs + +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +CONFIG_WARN_ALL_UNSEEDED_RANDOM=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +CONFIG_RCU_TRACE=y +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_FTRACE_SYSCALLS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_RUNTIME_TESTING_MENU=y +CONFIG_LKDTM=m +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +CONFIG_MEMTEST=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_E2K_DEBUG_KERNEL=y +# CONFIG_NESTED_PAGE_FAULT_INJECTION is not set +# CONFIG_DEBUG_LCC_VOLATILE_ATOMIC is not set +# CONFIG_DEBUG_IRQ is not set +# CONFIG_DEBUG_PT_REGS is not set +# CONFIG_DEBUG_KMEM_AREA is not set +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y + +# +# Elbrus architecture kernel hacking +# +CONFIG_BOOT_TRACE=y +CONFIG_BOOT_TRACE_THRESHOLD=100 +# end of Elbrus architecture kernel hacking +# end of Kernel hacking diff --git a/crypto/hmac.c b/crypto/hmac.c index 8b2a212eb0ad..649ed97998ec 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -37,6 +37,8 @@ static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) crypto_tfm_ctx_alignment()); } +/* LCC does not understand VLA in structures */ +#if !defined CONFIG_MCST || !defined __LCC__ static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -79,6 +81,52 @@ static int hmac_setkey(struct crypto_shash *parent, crypto_shash_update(shash, opad, bs) ?: crypto_shash_export(shash, opad); } +#else /* !defined CONFIG_MCST || !defined __LCC__ || __LCC__ != 118 */ +static int hmac_setkey(struct crypto_shash *parent, + const u8 *inkey, unsigned int keylen) +{ + int bs = crypto_shash_blocksize(parent); + int ds = crypto_shash_digestsize(parent); + int ss = crypto_shash_statesize(parent); + char *ipad = crypto_shash_ctx_aligned(parent); + char *opad = ipad + ss; + struct hmac_ctx *ctx = align_ptr(opad + ss, + crypto_tfm_ctx_alignment()); + struct crypto_shash *hash = ctx->hash; + struct shash_desc *desc; + unsigned int i; + + desc = __builtin_alloca(sizeof(*desc) + crypto_shash_descsize(hash)); + + desc->tfm = hash; + + if (keylen > bs) { + int err; + + err = crypto_shash_digest(desc, inkey, keylen, ipad); + if (err) + return err; + + keylen = ds; + } else + memcpy(ipad, inkey, keylen); + + memset(ipad + keylen, 0, bs - keylen); + memcpy(opad, ipad, bs); + + for (i = 0; i < bs; i++) { + ipad[i] ^= 0x36; + opad[i] ^= 0x5c; + } + + return crypto_shash_init(desc) ?: + crypto_shash_update(desc, ipad, bs) ?: + crypto_shash_export(desc, ipad) ?: + crypto_shash_init(desc) ?: + crypto_shash_update(desc, opad, bs) ?: + crypto_shash_export(desc, opad); +} +#endif /* !defined CONFIG_MCST || !defined __LCC__ || __LCC__ != 118 */ static int hmac_export(struct shash_desc *pdesc, void *out) { diff --git a/drivers/Kconfig b/drivers/Kconfig index 8befa53f43be..e2cfb60eeeae 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -12,6 +12,8 @@ source "drivers/rapidio/Kconfig" source "drivers/base/Kconfig" +source "drivers/mcst/Kconfig" + source "drivers/bus/Kconfig" source "drivers/connector/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index aaef17cc6512..aac7b25da7e8 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -149,6 +149,7 @@ obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_STAGING) += staging/ obj-y += platform/ +obj-$(CONFIG_MCST) += mcst/ obj-$(CONFIG_MAILBOX) += mailbox/ obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 8beb418ce167..ba7e3e6098da 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -32,6 +32,10 @@ #include #include #include +#if defined(CONFIG_MCST) && defined(CONFIG_E2K) +#include +#include +#endif #include "ahci.h" #define DRV_NAME "ahci" @@ -616,6 +620,21 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets"); static void ahci_pci_save_initial_config(struct pci_dev *pdev, struct ahci_host_priv *hpriv) { +#if defined(CONFIG_MCST) && defined(CONFIG_E2K) + if (cpu_has_epic() && iohub_generation(pdev) == 2 && /* Bug 127617 */ + pdev->revision == 2 && + pdev->vendor == PCI_VENDOR_ID_MCST_TMP && + pdev->device == PCI_DEVICE_ID_MCST_SATA) { + sys_mon_1_t r; + int node = dev_to_node(&pdev->dev); + if (node < 0) + node = 0; + r.word = sic_read_node_nbsr_reg(node, PMC_SYS_MON_1); + /* disable unused ports to prevent irq lockup */ + if (r.pin_sataeth_config) /*2 & 3 phy-s are routed to ethernet*/ + hpriv->force_port_map = 3; + } +#endif if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { dev_info(&pdev->dev, "JMB361 has only one port\n"); hpriv->force_port_map = 1; @@ -807,6 +826,28 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, return rc; } +#ifdef CONFIG_MCST +void mcst_ahci_port_reset(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + + /* + * ERR 23 - sata (ahci port reg reset) + */ + if (pdev->vendor == PCI_VENDOR_ID_MCST_TMP && + pdev->device == PCI_DEVICE_ID_MCST_SATA) { + int i; + for (i = 0; i < host->n_ports; i++) { + void __iomem *port_mmio = + __ahci_port_base(host, i); + u32 tmp = readl(port_mmio + PORT_CMD); + /* disable FIS reception */ + tmp &= ~PORT_CMD_FIS_RX; + writel(tmp, port_mmio + PORT_CMD); + } + } +} +#endif #ifdef CONFIG_PM static void ahci_pci_disable_interrupts(struct ata_host *host) @@ -840,6 +881,9 @@ static int ahci_pci_device_runtime_resume(struct device *dev) struct ata_host *host = pci_get_drvdata(pdev); int rc; +#ifdef CONFIG_MCST + mcst_ahci_port_reset(pdev); +#endif rc = ahci_reset_controller(host); if (rc) return rc; @@ -875,6 +919,9 @@ static int ahci_pci_device_resume(struct device *dev) ahci_mcp89_apple_enable(pdev); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { +#ifdef CONFIG_MCST + mcst_ahci_port_reset(pdev); +#endif rc = ahci_reset_controller(host); if (rc) return rc; @@ -1770,6 +1817,19 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; +#ifdef CONFIG_MCST + if (pdev->vendor == PCI_VENDOR_ID_MCST_TMP && + pdev->device == PCI_DEVICE_ID_MCST_SATA) { + if (iohub_generation(pdev) == 1 && + iohub_revision(pdev) < 3) { + pi.flags |= ATA_FLAG_IOHUB2_REV2; + } else if (iohub_generation(pdev) == 2 && + pdev->revision == 2) { + pi.flags |= ATA_FLAG_E2C3_REV0; + } + } +#endif + ahci_set_em_messages(hpriv, &pi); if (ahci_broken_system_poweroff(pdev)) { @@ -1855,6 +1915,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; +#ifdef CONFIG_MCST + mcst_ahci_port_reset(pdev); +#endif rc = ahci_reset_controller(host); if (rc) return rc; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index fec2e9754aed..43eaf51e9a13 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1580,6 +1580,9 @@ static void ahci_postreset(struct ata_link *link, unsigned int *class) struct ata_port *ap = link->ap; void __iomem *port_mmio = ahci_port_base(ap); u32 new_tmp, tmp; +#ifdef CONFIG_MCST + struct ahci_port_priv *pp = ap->private_data; +#endif ata_std_postreset(link, class); @@ -1593,6 +1596,12 @@ static void ahci_postreset(struct ata_link *link, unsigned int *class) writel(new_tmp, port_mmio + PORT_CMD); readl(port_mmio + PORT_CMD); /* flush */ } +#ifdef CONFIG_MCST + if (ap->flags & ATA_FLAG_E2C3_REV0) { /*bug#130272*/ + pp->intr_mask |= PORT_IRQ_PIOS_FIS; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + } +#endif } static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) @@ -1629,6 +1638,8 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) return sata_pmp_qc_defer_cmd_switch(qc); } + + static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; @@ -1647,11 +1658,22 @@ static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); if (is_atapi) { +#ifdef CONFIG_MCST + /* handle hw bug: problems with dma on packet commands: + use pio instead */ + if (qc->tf.flags & ATA_TFLAG_WRITE && + ap->flags & ATA_FLAG_IOHUB2_REV2) { + u8 *fis = cmd_tbl; + fis[3] &= ~ATAPI_PKT_DMA; + } +#endif + memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); } n_elem = 0; + if (qc->flags & ATA_QCFLAG_DMAMAP) n_elem = ahci_fill_sg(qc, cmd_tbl); @@ -1886,6 +1908,16 @@ static void ahci_handle_port_interrupt(struct ata_port *ap, ehi->action |= ATA_EH_RESET; ata_port_freeze(ap); } +#ifdef CONFIG_MCST + else if (!qc_active && ap->flags & ATA_FLAG_E2C3_REV0 && /*bug#130272*/ + ap->link.device->class == ATA_DEV_ATAPI && + pp->intr_mask & PORT_IRQ_PIOS_FIS && + readl(port_mmio + PORT_IRQ_STAT) & PORT_IRQ_PIOS_FIS) { + ata_link_dbg(&ap->link, "mask Setup FIS Interrupt\n"); + pp->intr_mask &= ~PORT_IRQ_PIOS_FIS; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + } +#endif } static void ahci_port_intr(struct ata_port *ap) @@ -1989,6 +2021,45 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) return IRQ_RETVAL(rc); } +#ifdef CONFIG_MCST +static void l_atapi_wait_for_command(void *data, async_cookie_t cookie) +{ + struct ata_port *ap = data; + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + int timeout = 15; + u64 ap_qc_active, qc_active; + do { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(20)); + ap_qc_active = ap->qc_active; + if (pp->fbs_enabled) { + if (ap->qc_active) { + qc_active = readl(port_mmio + PORT_SCR_ACT); + qc_active |= readl(port_mmio + PORT_CMD_ISSUE); + } + } else { + /* pp->active_link is valid iff any command is in flight */ + if (ap_qc_active && pp->active_link->sactive) + qc_active = readl(port_mmio + PORT_SCR_ACT); + else + qc_active = readl(port_mmio + PORT_CMD_ISSUE); + } + + if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; + qc_active ^= qc_active & 0x01; + } + + if (ap_qc_active ^ qc_active) { + ahci_port_intr(ap); + break; + } + } while (timeout--); + __set_current_state(TASK_RUNNING); +} +#endif + unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; @@ -2014,6 +2085,15 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE); +#ifdef CONFIG_MCST /*bug#130272*/ + if (qc->tf.protocol == ATA_PROT_PIO && + ap->flags & ATA_FLAG_E2C3_REV0 && + ap->link.device->class == ATA_DEV_ATAPI && + !(pp->intr_mask & PORT_IRQ_PIOS_FIS)) { + ata_link_dbg(&ap->link, "scheduling Setup FIS Interrupt\n"); + async_schedule(l_atapi_wait_for_command, ap); + } +#endif ahci_sw_activity(qc->dev->link); return 0; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index bed433fd9c70..a215d33a2d5d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2484,6 +2484,11 @@ int ata_dev_configure(struct ata_device *dev) return 0; } +#ifdef CONFIG_MCST + if (!(ap->flags & ATA_FLAG_IOHUB2_REV2)) + dev->horkage &= ~ATA_HORKAGE_FIX_ERROR_ON_WRITE; +#endif + if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && dev->class == ATA_DEV_ATAPI) { ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", @@ -4051,6 +4056,119 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline) return 0; } +#ifdef CONFIG_MCST +/* Bug 131730: force sata link speed to 6.0 Gbps */ +struct reg_wr_data { + u32 data; + u32 phy_num; + u32 reg_address; +}; +/* See: dwc_ent12mp_phy_tsmc16ffpgl_databook.pdf */ +static struct reg_wr_data preset3516_0[4] = { + {0xc7f8, 0x09, 0x1002}, + {0xc7f8, 0x09, 0x1102}, + {0xc7f8, 0x08, 0x1002}, + {0xc7f8, 0x08, 0x1102}, +}; + +static struct reg_wr_data preset3516_1[4] = { + {0x2800, 0x09, 0x1003}, + {0x2800, 0x09, 0x1103}, + {0x2800, 0x08, 0x1003}, + {0x2800, 0x08, 0x1103}, +}; + +static struct reg_wr_data preset3520_0[4] = { + {0xc7f8, 0x09, 0x1002}, + {0xc7f8, 0x09, 0x1102}, + {0xc7f8, 0x08, 0x1002}, + {0xc7f8, 0x08, 0x1102}, +}; + +static struct reg_wr_data preset3520_1[4] = { + {0x2A00, 0x09, 0x1003}, + {0x2A00, 0x09, 0x1103}, + {0x2A00, 0x08, 0x1003}, + {0x2A00, 0x08, 0x1103}, +}; + +static struct reg_wr_data link_retrain[4] = { + {0x008d, 0x09, 0x3005}, + {0x008d, 0x09, 0x3105}, + {0x008d, 0x08, 0x3005}, + {0x008d, 0x08, 0x3105}, +}; + +static struct reg_wr_data set_rx_reset[4] = { + {0x4785, 0x09, 0x1005}, + {0x4785, 0x09, 0x1105}, + {0x4785, 0x08, 0x1005}, + {0x4785, 0x08, 0x1105}, +}; + +static struct reg_wr_data down_rx_reset[4] = { + {0x0784, 0x09, 0x1005}, + {0x0784, 0x09, 0x1105}, + {0x0784, 0x08, 0x1005}, + {0x0784, 0x08, 0x1105}, +}; + +static struct reg_wr_data force_enable_off_0[4] = { + {0x0784, 0x09, 0x1005}, + {0x0784, 0x09, 0x1105}, + {0x0784, 0x08, 0x1005}, + {0x0784, 0x08, 0x1105}, +}; + +static struct reg_wr_data force_enable_off_1[4] = { + {0x000d, 0x09, 0x3005}, + {0x000d, 0x09, 0x3105}, + {0x000d, 0x08, 0x3005}, + {0x000d, 0x08, 0x3105}, +}; + +static int conf_write(struct pci_dev *dev, struct reg_wr_data *d) +{ + u32 v; + int t = 50; + u32 cmd = (3 << 29) | (d->phy_num << 16) | d->reg_address; + /* See: eioh_e2c3.pdf */ + pci_write_config_dword(dev, 0x70, d->data); + pci_write_config_dword(dev, 0x6c, cmd); + do { + pci_read_config_dword(dev, 0x6c, &v); + if ((v & (1 << 31)) == 0) + return 0; + udelay(20); + } while (--t); + return -ETIME; +} + +static int port_phy_setup(struct pci_dev *dev, u32 port) +{ + if (conf_write(dev, &preset3516_0[port]) || + conf_write(dev, &preset3516_1[port]) || + conf_write(dev, &preset3520_0[port]) || + conf_write(dev, &preset3520_1[port]) || + conf_write(dev, &link_retrain[port]) || + conf_write(dev, &set_rx_reset[port])) + return -EIO; + return 0; +} + +static int port_phy_down_reset(struct pci_dev *dev, u32 port) +{ + return conf_write(dev, &down_rx_reset[port]); +} + +static int force_enable_off(struct pci_dev *dev, u32 port) +{ + if (conf_write(dev, &force_enable_off_0[port]) || + conf_write(dev, &force_enable_off_1[port])) + return -EIO; + return 0; +} +#endif /*CONFIG_MCST*/ /** * sata_link_hardreset - reset link via SATA phy reset @@ -4082,6 +4200,12 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, { u32 scontrol; int rc; +#ifdef CONFIG_MCST /* Bug 131730 */ + bool restore_phy = false; + struct ata_host *host = link->ap->host; + struct pci_dev *pdev = to_pci_dev(host->dev->parent); + int port = link->ap->local_port_no; +#endif DPRINTK("ENTER\n"); @@ -4089,6 +4213,11 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, *online = false; if (sata_set_spd_needed(link)) { +#ifdef CONFIG_MCST + if (!is_prototype() && link->ap->flags & ATA_FLAG_E2C3_REV0 && + WARN_ON(rc = force_enable_off(pdev, port))) + goto out; +#endif /* SATA spec says nothing about how to reconfigure * spd. To be on the safe side, turn off phy during * reconfiguration. This works for at least ICH7 AHCI @@ -4104,6 +4233,14 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, sata_set_spd(link); } +#ifdef CONFIG_MCST /* the prototype has a different phy */ + else if (!is_prototype() && link->ap->flags & ATA_FLAG_E2C3_REV0) { + ata_link_info(link, "force link speed to 6.0 Gbps\n"); + if (WARN_ON(rc = port_phy_setup(pdev, port))) + goto out; + restore_phy = true; + } +#endif /* issue phy wake/reset */ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) @@ -4113,7 +4250,10 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) goto out; - +#ifdef CONFIG_MCST + if (restore_phy && WARN_ON(rc = port_phy_down_reset(pdev, port))) + goto out; +#endif /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 * 10.4.2 says at least 1 ms. */ @@ -4637,7 +4777,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, - +#ifdef CONFIG_MCST + { "ASUS DRW-24F1MT", NULL, ATA_HORKAGE_FIX_ERROR_ON_WRITE }, +#endif /* End Marker */ { } }; @@ -6987,6 +7129,9 @@ static int __init ata_parse_force_one(char **cur, { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, +#ifdef CONFIG_MCST + { "fix-atapi-write", .horkage_on = ATA_HORKAGE_FIX_ERROR_ON_WRITE }, +#endif }; char *start = *cur, *p = *cur; char *id, *val, *endp; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 464efedc778b..bfb91a599180 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -33,6 +33,9 @@ #include #include #include +#ifdef CONFIG_MCST +#include "../scsi/scsi_priv.h" +#endif #include #include @@ -264,7 +267,11 @@ static ssize_t ata_ncq_prio_enable_show(struct device *device, struct scsi_device *sdev = to_scsi_device(device); struct ata_port *ap; struct ata_device *dev; +#ifndef CONFIG_MCST bool ncq_prio_enable; +#else + bool uninitialized_var(ncq_prio_enable); +#endif int rc = 0; ap = ata_shost_to_port(sdev->host); @@ -4312,6 +4319,89 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, #endif } +#ifdef CONFIG_MCST +static unsigned l_read_buffer_capacity(struct ata_device *dev, + u32 *b_len, u32 *avalible) +{ + __u32 buf[3]; + unsigned ret; + struct ata_taskfile tf; + char cdb[] = { GPCMD_READ_BUFFER_CAPACITY, 0, 0, 0, + 0, 0, 0, 0, + sizeof(buf), 0, + }; + + ata_tf_init(dev, &tf); + tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.command = ATA_CMD_PACKET; + tf.protocol = ATAPI_PROT_PIO; + tf.lbam = sizeof(buf); + + ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, + buf, sizeof(buf), 0); + + *b_len = be32_to_cpu(buf[1]); + *avalible = be32_to_cpu(buf[2]); + + return ret; +} + +static int __l_atapi_wait_for_buffer(struct ata_device *dev, u32 len) +{ + u32 b_len, avalible; + unsigned ret; + unsigned long timeout = jiffies + msecs_to_jiffies(60000); + + do { + ret = l_read_buffer_capacity(dev, &b_len, &avalible); + if (ret) + goto out; + + if (avalible > len) + goto out; + + ret = -ETIME; + msleep(50); + } while (!time_after(jiffies, timeout)); +out: + return ret; +} + +static void _l_atapi_wait_for_buffer(void *data, async_cookie_t cookie) +{ + struct scsi_cmnd *cmd = data; + struct scsi_device *scsidev = cmd->device; + struct ata_port *ap = ata_shost_to_port(scsidev->host); + struct ata_device *dev = ata_scsi_find_dev(ap, scsidev); + + if (!dev) + goto out; + + if (in_atomic() || irqs_disabled()) { + pr_warn("l_atapi_wait_for_buffer: atomic context\n"); + goto out; + } + if (ap->ops->error_handler) + ata_eh_acquire(ap); + __l_atapi_wait_for_buffer(dev, scsi_bufflen(cmd)); + if (ap->ops->error_handler) + ata_eh_release(ap); + dev->flags &= ~ATA_DFLAG_ATAPI_CHECK_BUFFER; +out: + scsi_internal_device_unblock_nowait(cmd->device, SDEV_RUNNING); +} + +static void l_atapi_wait_for_buffer(struct ata_device *dev, + struct scsi_cmnd *cmd) +{ + /* handle hw bug: controller deadlocks if device returns error, + so let's wait for a buffer */ + + scsi_internal_device_block_nowait(cmd->device); + async_schedule(_l_atapi_wait_for_buffer, cmd); +} +#endif + static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) { @@ -4346,7 +4436,22 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, xlat_func = ata_get_xlat_func(dev, scsi_op); } } - +#ifdef CONFIG_MCST + if (xlat_func && dev->horkage & ATA_HORKAGE_FIX_ERROR_ON_WRITE) { + if (scsi_op == WRITE_6 || + scsi_op == WRITE_10 || + scsi_op == WRITE_16) { + if (dev->flags & ATA_DFLAG_ATAPI_CHECK_BUFFER) { + l_atapi_wait_for_buffer(dev, scmd); + return SCSI_MLQUEUE_DEVICE_BUSY; + } else { + dev->flags |= ATA_DFLAG_ATAPI_CHECK_BUFFER; + } + } else { + dev->flags &= ATA_DFLAG_ATAPI_CHECK_BUFFER; + } + } +#endif if (xlat_func) rc = ata_scsi_translate(dev, scmd, xlat_func); else diff --git a/drivers/base/component.c b/drivers/base/component.c index b9f20ada68b0..abe38766412d 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -7,6 +7,7 @@ * subsystem, and only handles one master device, but this doesn't have to be * the case. */ +#define DEBUG #include #include #include diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 0b67d41bab8f..8402c43caba5 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); static int __platform_get_irq(struct platform_device *dev, unsigned int num) { -#ifdef CONFIG_SPARC +#if defined(CONFIG_SPARC) && !defined(CONFIG_E90S) /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ if (!dev || num >= dev->archdata.num_irqs) return -ENXIO; diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index fabf87058d80..0702b482b86b 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -385,7 +385,11 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, regcache_rbtree_set_register(map, rbnode, reg_tmp, value); } else { unsigned int base_reg, top_reg; +#ifdef __LCC__ + unsigned int new_base_reg = UINT_MAX, new_top_reg = UINT_MAX; +#else unsigned int new_base_reg, new_top_reg; +#endif unsigned int min, max; unsigned int max_dist; unsigned int dist, best_dist = UINT_MAX; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 43c0452a8ba9..e7948eca8c10 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2589,7 +2589,11 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_bytes = map->format.val_bytes; size_t val_count = val_len / val_bytes; unsigned int v; +#ifndef CONFIG_MCST int ret, i; +#else + int uninitialized_var(ret), i; +#endif if (!map->bus) return -EINVAL; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 816eb2db7308..d4f0548c3e3b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -591,7 +591,11 @@ static int init_vq(struct virtio_blk *vblk) vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; +#ifndef CONFIG_MCST unsigned short num_vqs; +#else + unsigned short uninitialized_var(num_vqs); +#endif struct virtio_device *vdev = vblk->vdev; struct irq_affinity desc = { 0, }; @@ -671,7 +675,11 @@ static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) static int virtblk_get_cache_mode(struct virtio_device *vdev) { +#ifndef CONFIG_MCST u8 writeback; +#else + u8 uninitialized_var(writeback); +#endif int err; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, @@ -814,9 +822,18 @@ static int virtblk_probe(struct virtio_device *vdev) struct request_queue *q; int err, index; +#ifndef CONFIG_MCST u32 v, blk_size, max_size, sg_elems, opt_io_size; u16 min_io_size; u8 physical_block_exp, alignment_offset; +#else + u32 uninitialized_var(v), uninitialized_var(blk_size), + max_size, uninitialized_var(sg_elems), + uninitialized_var(opt_io_size); + u16 uninitialized_var(min_io_size); + u8 uninitialized_var(physical_block_exp), + uninitialized_var(alignment_offset); +#endif if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index df0fc997dc3e..b42ade79bea6 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -525,7 +525,7 @@ source "drivers/char/xillybus/Kconfig" config ADI tristate "SPARC Privileged ADI driver" - depends on SPARC64 + depends on SPARC64 && !E90S default m help SPARC M7 and newer processors utilize ADI (Application Data diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7c5ea6f9df14..8de3f6670d28 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -52,3 +52,4 @@ js-rtc-y = rtc.o obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o obj-$(CONFIG_ADI) += adi.o +obj-$(CONFIG_KEYBOARD_DUMPER) += kdumper.o diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 59f25286befe..d328325e73fb 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -5,6 +5,7 @@ menuconfig HW_RANDOM tristate "Hardware Random Number Generator Core support" + depends on !E2K && !E90S default m ---help--- Hardware Random Number Generator Core infrastructure. @@ -116,7 +117,7 @@ config HW_RANDOM_GEODE config HW_RANDOM_N2RNG tristate "Niagara2 Random Number Generator support" - depends on SPARC64 + depends on SPARC64 && !E90S default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number diff --git a/drivers/char/kdumper.c b/drivers/char/kdumper.c new file mode 100644 index 000000000000..546ea07fff16 --- /dev/null +++ b/drivers/char/kdumper.c @@ -0,0 +1,102 @@ +#include +#include + +/* + * Keyboard dumper on F12 key press + * + * (c) 2013 Kirill Tkhai, thay_k@mcst.ru + */ + +static int kdumper_enable = 0; + +static void kdumper_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + if (type != EV_KEY || code != KEY_F12 || value != 1) + return; + + show_state(); +} + +static int kdumper_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "debug"; + + error = input_register_handle(handle); + if (error) + goto err_free_handle; + + error = input_open_device(handle); + if (error) + goto err_unregister_handle; + + return 0; + + err_unregister_handle: + input_unregister_handle(handle); + err_free_handle: + kfree(handle); + return error; +} + +static void kdumper_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id kdumper_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_KEYBIT, + .keybit = { BIT_MASK(KEY_F12) }, + }, + + { }, /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(input, kdumper_ids); + +static struct input_handler kdumper_handler = { + .event = kdumper_event, + .connect = kdumper_connect, + .disconnect = kdumper_disconnect, + .name = "kdumper", + .id_table = kdumper_ids, +}; + + +int __init kdumper_init(void) +{ + int error; + + if (!kdumper_enable) + return -ENODEV; + + error = input_register_handler(&kdumper_handler); + + if (error) + printk(KERN_ALERT "Failed to register kdumper\n"); + + return error; +} + +late_initcall_sync(kdumper_init); + +static int __devinit kdumper_setup(char *str) +{ + kdumper_enable = 1; + return 1; +} + +__setup("kdumper", kdumper_setup); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 6b56bff9b68c..fec2d0abc79c 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -314,13 +314,26 @@ static int uncached_access(struct file *file, phys_addr_t addr) } #endif +#ifdef CONFIG_E2K +static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot, + struct vm_area_struct *vma) +#else static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) +#endif { #ifdef pgprot_noncached phys_addr_t offset = pfn << PAGE_SHIFT; if (uncached_access(file, offset)) +#ifdef CONFIG_E2K + /* Support for MAP_WRITECOMBINED flag */ + if (cpu_has(CPU_FEAT_WC_PCI_PREFETCH) && + vma->vm_flags & VM_WRITECOMBINED) + return pgprot_writecombine(vma_prot); + else +#endif /* CONFIG_E2K */ return pgprot_noncached(vma_prot); #endif return vma_prot; @@ -396,9 +409,16 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) &vma->vm_page_prot)) return -EINVAL; +#ifdef CONFIG_E2K + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, + size, + vma->vm_page_prot, + vma); +#else vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); +#endif vma->vm_ops = &mmap_mem_ops; diff --git a/drivers/char/random.c b/drivers/char/random.c index e3cbb7987b14..8862cbd11f1f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1212,7 +1212,23 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) } sample; long delta, delta2, delta3; +#if (defined CONFIG_E2K || defined CONFIG_E90S) && CONFIG_HZ != 1000 + /* Check that cpu_freq_hz is initialized */ + WARN_ON_ONCE((s64) cpu_freq_hz < 1000000); + /* + * Using jiffies at HZ=1000 for enthropy estimation is + * already pessimistic, and at HZ==100 it is so pessimistic + * that we barely have any bytes in /dev/random pool at all. + * Here we fake HZ==1000 so that the estimation gives us a few + * bits of enthropy. "The Linux Pseudorandom Number Generator + * Revisited" suggests that this should be OK for architectures + * with working get_cycles(). + */ + sample.jiffies = div64_u64(get_cycles() * MSEC_PER_SEC, + (u64) cpu_freq_hz); +#else sample.jiffies = jiffies; +#endif sample.cycles = random_get_entropy(); sample.num = num; r = &input_pool; diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 3b91184b77ae..65835d554899 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -859,6 +859,11 @@ static int __init rtc_init(void) #endif #endif +#ifdef CONFIG_E2K + if (HAS_MACHINE_E2K_IOHUB) + return -ENODEV; +#endif + #ifdef CONFIG_SPARC32 for_each_node_by_name(ebus_dp, "ebus") { struct device_node *dp; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index b453029487a1..1fa98d24d298 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -62,6 +62,14 @@ struct ports_driver_data { /* All the console devices handled by this driver */ struct list_head consoles; }; + +/* + * FIXME: Community patch 4b0a2c5ff7215206ea6135a405f17c5f6fca7d00 + * broke virtio console. + * When switching from early console to the real one, real gets vtermno 1, + * and index 1, however hvc_console.index remains 0. Early console is shut down + * and hvc_console_print stops working + */ static struct ports_driver_data pdrvdata = { .next_vtermno = 1}; static DEFINE_SPINLOCK(pdrvdata_lock); @@ -1245,7 +1253,16 @@ static int init_port_console(struct port *port) * pointers. The final argument is the output buffer size: we * can do any size, so we put PAGE_SIZE here. */ +#if !defined(CONFIG_E2K) && !defined(CONFIG_MCST) + /* + * FIXME: probably it is arch-independent bug + * Community patch 4b0a2c5ff7215206ea6135a405f17c5f6fca7d00 set + * console port vtermno to 0 (at add_port()), but it is ignored here + */ port->cons.vtermno = pdrvdata.next_vtermno; +#else /* CONFIG_E2K || CONFIG_MCST */ + BUG_ON(port->cons.vtermno != 0); +#endif /* !CONFIG_E2K && !CONFIG_MCST */ port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); if (IS_ERR(port->cons.hvc)) { diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index bff5295016ae..e0f93cfd3d01 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -101,6 +101,15 @@ config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL have a look at the help section of that governor. The fallback governor will be 'performance'. +config CPU_FREQ_DEFAULT_GOV_PSTATES + bool "pstates" + depends on E2K + select CPU_FREQ_GOV_PSTATES + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'pstates' as default. Fallback + governor will be the performance governor. + endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -199,6 +208,16 @@ config CPU_FREQ_GOV_SCHEDUTIL If in doubt, say N. +config CPU_FREQ_GOV_PSTATES + tristate "'pstates' cpufreq governor" + depends on CPU_FREQ && E2K + select CPU_FREQ_GOV_COMMON + help + To compile this driver as a module, choose M here: the + module will be called cpufreq_conservative. + + If in doubt, say N. + comment "CPU frequency scaling drivers" config CPUFREQ_DT @@ -247,6 +266,12 @@ config IA64_ACPI_CPUFREQ If in doubt, say N. endif +if E2K +config E2K_PCS_CPUFREQ + tristate "E2K CPUFreq Driver" + default y +endif + if MIPS config BMIPS_CPUFREQ tristate "BMIPS CPUfreq Driver" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 9a9f5ccd13d9..f9dc2604e650 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o # CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o +obj-$(CONFIG_CPU_FREQ_GOV_PSTATES) += cpufreq_pstates.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o @@ -111,3 +112,4 @@ obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o +obj-$(CONFIG_E2K_PCS_CPUFREQ) += e2k-pcs-cpufreq.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c4e928375c40..79381957c030 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -28,6 +28,9 @@ #include #include #include +#ifdef CONFIG_MCST +#include +#endif #include static LIST_HEAD(cpufreq_policy_list); @@ -383,6 +386,10 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, cpufreq_stats_record_transition(policy, freqs->new); policy->cur = freqs->new; + +#ifdef CONFIG_MCST + cpu_freq_hz = policy->cur * 1000; +#endif } } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index dced033875bf..959445824c3c 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -41,6 +41,12 @@ static unsigned int default_powersave_bias; */ static int should_io_be_busy(void) { +#ifdef CONFIG_E2K + /* TODO remove check for IS_MACHINE_E1CP when bug #130433 is fixed */ + if (cpu_has(CPU_FEAT_ISET_V3) && !cpu_has(CPU_HWBUG_C3) && + !IS_MACHINE_E1CP) + return 1; +#endif #if defined(CONFIG_X86) /* * For Intel, Core 2 (model 15) and later have an efficient idle. diff --git a/drivers/cpufreq/cpufreq_pstates.c b/drivers/cpufreq/cpufreq_pstates.c new file mode 100644 index 000000000000..13dc8dc7f70f --- /dev/null +++ b/drivers/cpufreq/cpufreq_pstates.c @@ -0,0 +1,504 @@ +/* drivers/cpufreq/cpufreq_pstates.c */ + +#include +#include +#include +#include +#include + +#include "cpufreq_governor.h" + +#include +#include + +#define CPU_PWR_LIMIT_MAX (5100) +#define CPU_PWR_LIMIT_MIN (2850) +#define MIN_POWER_CONSUMPTION_LIMIT CPU_PWR_LIMIT_MIN /*mWatt*/ +#define MAX_POWER_CONSUMPTION_LIMIT CPU_PWR_LIMIT_MAX /*mWatt*/ +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define CPUFREQ_PSTATES_INIT_TEMP 70 + +struct ps_cpu_state { + unsigned int temperature; + unsigned int power; + struct pstate *pstate; +}; + +struct ps_policy_dbs_info { + struct policy_dbs_info policy_dbs; + struct ps_cpu_state cur_cpu_state; + struct cpufreq_frequency_table; +}; + +static inline struct ps_policy_dbs_info + *to_dbs_info(struct policy_dbs_info *policy_dbs) +{ + return container_of(policy_dbs, struct ps_policy_dbs_info, policy_dbs); +} + +struct ps_dbs_tuners { + unsigned int power_consumption_limit; + unsigned int temperature; +}; + +/* These are straight from cpufreq_ondemand.c */ +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) + + +unsigned int init_cpu_pwr_limit = 5100; /* CPU_PWR_LIMIT_MAX */ +EXPORT_SYMBOL(init_cpu_pwr_limit); +unsigned int cpu_pwr_limit = 5100; /* CPU_PWR_LIMIT_MAX */ +EXPORT_SYMBOL(cpu_pwr_limit); +unsigned int battery_pwr = 2850; /*CPU_PWR_LIMIT_MIN */ +EXPORT_SYMBOL(battery_pwr); + +int set_cpu_pwr_limit(int new_cpu_pwr_limit) +{ + if ((new_cpu_pwr_limit < CPU_PWR_LIMIT_MIN) || + (new_cpu_pwr_limit > CPU_PWR_LIMIT_MAX)) { + pr_err("set_cpu_pwr_limit: attribue is out of range - " + "new_cpu_pwr_limit = %d\n", new_cpu_pwr_limit); + return -1; + } + + cpu_pwr_limit = new_cpu_pwr_limit; + + return 0; +} + +static int init_cpu_pwr_limit_setup(char *str) +{ + int new_cpu_pwr_limit; + int ints[2]; + + str = get_options(str, ARRAY_SIZE(ints), ints); + new_cpu_pwr_limit = ints[1]; + + if (new_cpu_pwr_limit < CPU_PWR_LIMIT_MIN) { + pr_err("cpu_pwr_limit_setup: " + "get_options(...) has returned a value <= CPU_PWR_LIMIT_MIN\n"); + return -1; + } + if (new_cpu_pwr_limit > CPU_PWR_LIMIT_MAX) { + pr_err("cpu_pwr_limit_setup: " + "get_options(...) has retrned a value > CPU_PWR_LIMIT_MAX"); + return -2; + } + + init_cpu_pwr_limit = new_cpu_pwr_limit; + + return set_cpu_pwr_limit(init_cpu_pwr_limit); +} + +static int battery_pwr_setup(char *str) +{ + int new_battery_pwr; + int ints[2]; + + str = get_options(str, ARRAY_SIZE(ints), ints); + new_battery_pwr = ints[1]; + + if (new_battery_pwr <= 0) { + pr_err("battery_pwr_setup: " + "get_options(...) has returned a value <= 0\n"); + return -1; + } + if (new_battery_pwr > CPU_PWR_LIMIT_MAX) { + pr_err("battery_pwr_setup: " + "get_options(...) has retrned a value > CPU_PWR_LIMIT_MAX"); + return -2; + } + + battery_pwr = new_battery_pwr; + + return 0; +} + +__setup("init_cpu_pwr_limit=", init_cpu_pwr_limit_setup); +__setup("battery_pwr=", battery_pwr_setup); + +struct pstate { + unsigned int frequency; + unsigned int voltage; +}; + +#define NUMBER_OF_PSTATES 8 +struct pstate available_pstates[NUMBER_OF_PSTATES] = { + {.frequency = 984000, .voltage = 999}, + {.frequency = 914000, .voltage = 999}, + {.frequency = 800000, .voltage = 999}, + {.frequency = 711000, .voltage = 999}, + {.frequency = 512000, .voltage = 999}, + {.frequency = 400000, .voltage = 999}, + {.frequency = 200000, .voltage = 999}, + {.frequency = 0, .voltage = 0} +}; +#define P0 (&available_pstates[0]) +#define P1 (&available_pstates[1]) +#define P2 (&available_pstates[2]) +#define P3 (&available_pstates[3]) +#define P4 (&available_pstates[4]) +#define P5 (&available_pstates[5]) +#define P6 (&available_pstates[6]) +#define PS_HALT (&available_pstates[7]) + + +#define NUMBER_OF_TEMPS 9 +int scaling_temperatures[NUMBER_OF_TEMPS] = { +50, 60, 70, 80, 90, 100, 110, 120, 130 +}; + +#define NUMBER_OF_PWRS 13 +int scaling_powers[NUMBER_OF_PWRS] = { +5100, 4850, 4600, 4350, 4100, 3850, 3600, 3350, 3100, 2850, 2600, 2350, 2100 +}; + +struct pstate *pstates[NUMBER_OF_PWRS][NUMBER_OF_TEMPS] = { + {P0, P0, P0, P0, P0, P0, P0, P5, PS_HALT}, + {P0, P0, P0, P0, P0, P0, P2, PS_HALT, PS_HALT}, + {P0, P0, P0, P0, P0, P0, PS_HALT, PS_HALT, PS_HALT}, + {P0, P0, P0, P0, P0, P6, PS_HALT, PS_HALT, PS_HALT}, + {P0, P0, P0, P0, P4, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {P0, P0, P0, P3, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {P0, P0, P1, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {P0, P0, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {P0, P6, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {P6, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT}, + {PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT, PS_HALT} +}; + +unsigned int ps_get_cpu_power(void) +{ + return cpu_pwr_limit; +} + +int ps_cpu_power_to_index(unsigned int cpu_power) +{ + int i; + for (i = 0; i < NUMBER_OF_PWRS; i++) { + if (scaling_powers[i] <= cpu_power) + return i; + } + + pr_alert("cpufreq_pstates.c: ps_cpu_power_to_index() - " + "can't find appropriate index for cpu_power = %d.\n", cpu_power); + return -EINVAL; +} + +static void update_cpu_state(struct cpufreq_policy *policy, + struct ps_cpu_state *cpu_state, + int new_temperature, int new_power) +{ + int new_frequency; + struct pstate *new_pstate; + + if ((new_temperature < 0) || (new_temperature >= NUMBER_OF_TEMPS)) { + pr_err("update_cpu_state: atribute is out of limits - " + "new_temperature = %d\n", new_temperature); + return; + } + if ((new_power < 0) || (new_power >= NUMBER_OF_PWRS)) { + pr_err("update_cpu_state: atribute is out of limits - " + "new_power = %d\n", new_power); + return; + } + + cpu_state->temperature = new_temperature; + cpu_state->power = new_power; + new_pstate = pstates[new_power][new_temperature]; + cpu_state->pstate = new_pstate; + new_frequency = new_pstate->frequency; + + __cpufreq_driver_target(policy, new_frequency , CPUFREQ_RELATION_L); +} + +int ps_get_cpu_temperature(void) +{ + int temp = spmc_get_temp_cur0(); + if (temp == SPMC_TEMP_BAD_VALUE) + pr_alert("cpufreq_pstates.c - " + "ps_get_cpu_temperature(): spmc_get_temp_cur0() " + "returned bad value.\n"); + + return temp; +} + +void ps_check_cpu(struct cpufreq_policy *policy) +{ + int temperature, new_temperature, current_temperature, + power, new_power, current_power, cpu; + + struct ps_dbs_tuners *ps_tuners; + struct policy_dbs_info *policy_dbs = policy->governor_data; + struct ps_policy_dbs_info *ps_policy_dbs; + struct dbs_data *dbs_data; + struct ps_cpu_state *current_cpu_state; + + ps_policy_dbs = to_dbs_info(policy_dbs); + if (!ps_policy_dbs) { + pr_alert("%s: ps_policy_dbs is NULL!\n", __func__); + return; + } + dbs_data = policy_dbs->dbs_data; + if (!dbs_data) { + pr_alert("%s: dbs_data is NULL!\n", __func__); + return; + } + ps_tuners = (struct ps_dbs_tuners *)dbs_data->tuners; + if (!ps_tuners) { + pr_alert("%s: ps_tuners is NULL!\n", __func__); + return; + } + current_cpu_state = &ps_policy_dbs->cur_cpu_state; + if (!current_cpu_state) { + pr_alert("%s: current_cpu_state is NULL!\n", __func__); + return; + } + + cpu = policy->cpu; + temperature = ps_get_cpu_temperature(); + power = ps_get_cpu_power(); /* Returns cpu_pwr_limit */ + + current_temperature = current_cpu_state->temperature; + new_temperature = current_temperature; + if (temperature < scaling_temperatures[current_temperature]) { + if (current_temperature > 0) { + while (temperature <= + scaling_temperatures[new_temperature-1]) { + new_temperature--; + if (new_temperature <= 0) { + break; + } + } + } + } else if (current_temperature < (NUMBER_OF_TEMPS-1)) { + while (temperature >= scaling_temperatures[new_temperature+1]) { + new_temperature++; + if (new_temperature >= NUMBER_OF_TEMPS-1) { + break; + } + } + } + + current_power = current_cpu_state->power; + new_power = current_power; + if (power < scaling_powers[current_power]) { + if (current_power < (NUMBER_OF_PWRS-1)) { + while (power < scaling_powers[new_power]) { + new_power++; + if (new_power >= NUMBER_OF_PWRS-1) { + break; + } + } + } + } else if (current_power > 0) { + while (power >= scaling_powers[new_power-1]) { + new_power--; + if (new_power <= 0) { + break; + } + } + } + + if ((new_temperature != current_temperature) || + (new_power != current_power)) { + ps_tuners->temperature = scaling_temperatures[new_temperature]; + update_cpu_state(policy, current_cpu_state, + new_temperature, new_power); + } +} + +static unsigned int ps_dbs_update(struct cpufreq_policy *policy) +{ + struct policy_dbs_info *policy_dbs = policy->governor_data; + struct dbs_data *dbs_data = policy_dbs->dbs_data; + + /* if (!ps_need_load_eval(&core_pbs_info->cpbs, + ps_tuners->sampling_rate)) { + modify_all = false; + goto max_delay; + } */ + + ps_check_cpu(policy); + + return dbs_data->sampling_rate; + +} + +/**************** sysfs ******************/ + +static ssize_t store_power_consumption_limit(struct gov_attr_set *attr_set, + const char *buf, size_t count) +{ + struct dbs_data *dbs_data = to_dbs_data(attr_set); + struct ps_dbs_tuners *ps_tuners; + int input; + int ret; + + ps_tuners = (struct ps_dbs_tuners *) dbs_data->tuners; + ret = sscanf(buf, "%u", &input); + ret = set_cpu_pwr_limit(input); + if (!ret) + ps_tuners->power_consumption_limit = input; + + return count; +} + +/* For later debug purposes */ +/* +static ssize_t store_temperature(struct gov_attr_set *attr_set, const char *buf, + size_t count) +{ + struct dbs_data *dbs_data = to_dbs_data(attr_set); + struct ps_dbs_tuners *ps_tuners; + int input; + int ret; + + ps_tuners = (struct ps_dbs_tuners *) dbs_data->tuners; + ret = sscanf(buf, "%d", &input); + ps_tuners->temperature = input; + + return count; +} +*/ + +gov_show_one_common(sampling_rate); +gov_show_one(ps, power_consumption_limit); +gov_show_one(ps, temperature); + +gov_attr_rw(sampling_rate); +gov_attr_rw(power_consumption_limit); +gov_attr_ro(temperature); + +static struct attribute *ps_attributes[] = { + &sampling_rate.attr, + &power_consumption_limit.attr, + &temperature.attr, + NULL +}; + + +/************** sysfs end ****************/ + +static struct policy_dbs_info *ps_alloc(void) +{ + struct ps_policy_dbs_info *dbs_info; + + if (!IS_MACHINE_E1CP) { + pr_alert("PSTATES governor is only supported for E1CP. " + "Please select another governor.\n"); + return NULL; + } + + dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL); + return dbs_info ? &dbs_info->policy_dbs : NULL; +} + +static void ps_free(struct policy_dbs_info *policy_dbs) +{ + kfree(to_dbs_info(policy_dbs)); +} + +static int ps_init(struct dbs_data *dbs_data) +{ + struct ps_dbs_tuners *tuners; + u64 idle_time; + int cpu; + + if (!IS_MACHINE_E1CP) { + pr_alert("PSTATES governor is only supported for E1CP. " + "Please select another governor.\n"); + return -EINVAL; + } + + tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); + if (!tuners) { + pr_alert("%s: can't allocate memory for tuners!\n", + __func__); + return -ENOMEM; + } + + cpu = get_cpu(); + idle_time = get_cpu_idle_time_us(cpu, NULL); + put_cpu(); + + dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; + + dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; + dbs_data->ignore_nice_load = 0; + + tuners->power_consumption_limit = ps_get_cpu_power(); + tuners->temperature = CPUFREQ_PSTATES_INIT_TEMP; + + dbs_data->tuners = tuners; + + return 0; +} + +static void ps_exit(struct dbs_data *dbs_data) +{ + kfree(dbs_data->tuners); +} + +static void ps_start(struct cpufreq_policy *policy) +{ + struct ps_policy_dbs_info *dbs_info = + to_dbs_info(policy->governor_data); + + dbs_info->cur_cpu_state.pstate = P0; + dbs_info->cur_cpu_state.temperature = NUMBER_OF_TEMPS - 1; + dbs_info->cur_cpu_state.power = + ps_cpu_power_to_index(ps_get_cpu_power()); +} + +static struct dbs_governor ps_dbs_gov = { + .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("pstates"), + .kobj_type = { .default_attrs = ps_attributes }, + .gov_dbs_update = ps_dbs_update, + .alloc = ps_alloc, + .free = ps_free, + .init = ps_init, + .exit = ps_exit, + .start = ps_start, +}; + +#define CPU_FREQ_GOV_PSTATES (&ps_dbs_gov.gov) + +static int __init cpufreq_gov_pbs_init(void) +{ + if (!IS_MACHINE_E1CP) { + pr_warn("PSTATES governor is only supported for E1CP.\n"); + return 0; + } + + return cpufreq_register_governor(CPU_FREQ_GOV_PSTATES); +} + +static void __exit cpufreq_gov_pbs_exit(void) +{ + if (!IS_MACHINE_E1CP) + return; + + cpufreq_unregister_governor(CPU_FREQ_GOV_PSTATES); +} + +MODULE_AUTHOR("Mikhail Ablakatov ablakatov@mcst.ru"); +MODULE_DESCRIPTION("'cpufreq_pstates' - A dynamic cpufreq governor for E1C+"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PSTATES +struct cpufreq_governor *cpufreq_default_governor(void) +{ + return CPU_FREQ_GOV_PSTATES; +} +#endif + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PSTATES +fs_initcall(cpufreq_gov_pbs_init); +#else +module_init(cpufreq_gov_pbs_init); +#endif +module_exit(cpufreq_gov_pbs_exit); diff --git a/drivers/cpufreq/e2k-pcs-cpufreq.c b/drivers/cpufreq/e2k-pcs-cpufreq.c new file mode 100644 index 000000000000..98b91900a867 --- /dev/null +++ b/drivers/cpufreq/e2k-pcs-cpufreq.c @@ -0,0 +1,432 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define M_BFS 3 +#define N_BFS 16 +#define MAX_STATES (M_BFS*N_BFS) +#define DEFAULT_F_PLL 2000 +#define MAX_F_PLL 2000 +#define MIN_F_PLL 600 +#define F_REF 100 + +#define EFUSE_START_ADDR 0x0 +#define EFUSE_END_ADDR 0xff +#define OD_MASK 0x7ff +#define NF_MASK_LO 0x1f +#define NF_MASK_HI 0x7f +#define NR_MASK 0xfff + +#define OD_OFFSET 5 +#define NF_OFFSET_LO 16 +#define NF_OFFSET_HI 0 +#define NR_OFFSET 7 + +#define EFUSE_DATA_SIZE 21 + +#define get_od(data) (OD_MASK & (data >> OD_OFFSET)) +#define get_nr(data) (NR_MASK & (data >> NR_OFFSET)) + +#define MAX_NODE 4 +#define MAX_CORE 16 + +static int f_plls[MAX_NODE]; + +struct pcs_data { + int div_max; + int div_min; + struct cpufreq_frequency_table *table; +}; + +struct pcs_data *cpufreq_pcs_data[MAX_NODE][MAX_CORE]; + +typedef union { + struct { + u32 data:21; + u32 broadcast:1; + u32 addr:7; + u32 parity:1; + u32 disable:1; + u32 sign:1; + }; + u32 word; +} efuse_data_t; + +static inline bool check_bfs_bypass(int node) +{ + pcs_ctrl3_t ctrl; + + ctrl.word = sic_read_node_nbsr_reg(node, SIC_pcs_ctrl3); + + return (ctrl.bfs_freq == 8); +} + +static inline int get_pcs_mode(int node) +{ + pcs_ctrl1_t ctrl; + + ctrl.word = sic_read_node_nbsr_reg(node, SIC_pcs_ctrl1); + + return ctrl.pcs_mode; +} + +static inline int64_t get_nf(uint64_t *data) +{ + int64_t val = 0; + val += (NF_MASK_LO & (data[0] >> NF_OFFSET_LO)); + val += data[1] << (EFUSE_DATA_SIZE - NF_OFFSET_LO); + val += data[2] << (EFUSE_DATA_SIZE * 2 - NF_OFFSET_LO); + val += + ((NF_MASK_HI << NF_OFFSET_HI) & data[3]) << (EFUSE_DATA_SIZE * 3 - + NF_OFFSET_LO); + + return val; +} + +#define GET_FREQ(div, pll) (16000*pll/(1 << div/16)/(div%16 + 16)) /* Khz */ + +static struct cpufreq_frequency_table *pcs_l_calc_freq_tables(int node, + int divFmin, int divFmax) +{ + int divF; + int divFi = 0; + + struct cpufreq_frequency_table *table = kzalloc( + (sizeof(struct cpufreq_frequency_table) * + (divFmax - divFmin + 2)), GFP_KERNEL); + + for (divF = divFmin; divF < MAX_STATES && divF <= divFmax; divF++) { + table[divFi].frequency = + GET_FREQ(divF, f_plls[node]); + table[divFi++].driver_data = divF; + } + + table[divFi].frequency = CPUFREQ_TABLE_END; + + return table; +} + +int get_idx_by_n_sys(int n_sys) +{ + return (n_sys < 20) ? n_sys - 10 : (n_sys < 32) ? 9 + (n_sys - 20) / 2 : 14; +} + +int n_sys[] = {10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 32}; +int f_base_rev0[] = {900, 1000, 1050, 1100, 1125, 1175, 1200, 1300}; +int f_base_rev1[] = {900, 1000, 1100, 1200, 1300, 1400, 1500, 1550}; + +static struct cpufreq_frequency_table *pcs_l_calc_freq_tables_e8c2(int node, + int divFmin, int divFmax) +{ + struct cpufreq_frequency_table *table; + int i, ii = 0; + int f_base = 0; + e2k_idr_t IDR; + pcs_ctrl3_t ctrl; + + if (divFmin > divFmax) { + pr_err("%s: invalid params", __func__); + return NULL; + } + + table = kzalloc((sizeof(struct cpufreq_frequency_table) * + (ARRAY_SIZE(n_sys) + 1)), + GFP_KERNEL); + + ctrl.word = sic_read_node_nbsr_reg(node, SIC_pcs_ctrl3); + + IDR = read_IDR_reg(); + + if (!IDR.IDR_rev) + f_base = f_base_rev0[ctrl.pll_mode]; + else + f_base = f_base_rev1[ctrl.pll_mode]; + + for (i = 0; i < ARRAY_SIZE(n_sys); i++) { + int freq = f_base * 16000 / n_sys[i]; + + if (n_sys[i] >= divFmin && n_sys[i] <= divFmax) { + table[ii].frequency = freq; + table[ii].driver_data = n_sys[i]; + ii++; + } + } + + table[ii].frequency = CPUFREQ_TABLE_END; + + return table; +} + +#ifdef DEBUG +static void print_pmc_freq_core_mon(freq_core_mon_t *mon) +{ + printk(KERN_DEBUG "freq_core_mon:\n" + "\tdivF_curr %d\n" + "\tdivF_target %d\n" + "\tdivF_limit_hi %d\n" + "\tdivF_limit_lo %d\n" + "\tdivF_init %d\n" + "\tbfs_bypass %d\n", + mon->divF_curr, + mon->divF_target, + mon->divF_limit_hi, + mon->divF_limit_lo, mon->divF_init, mon->bfs_bypass); +} + +static void print_pmc_freq_core_sleep(freq_core_sleep_t *sleep) +{ + printk(KERN_DEBUG "freq_core_sleep:\n" + "\tcmd %d\n" + "\tstatus %d\n" + "\tctrl_enable %d\n" + "\talter_disable %d\n" + "\tbfs_bypass %d\n" + "\tpin_en %d\n", + sleep->cmd, + sleep->status, + sleep->ctrl_enable, + sleep->alter_disable, sleep->bfs_bypass, sleep->pin_en); +} + +static void print_efuse_data(efuse_data_t *efuse_data) +{ + printk(KERN_DEBUG "efuse_data:\n" + "\tsign %d\n" + "\tdisable %d\n" + "\tparity %d\n" + "\taddr 0x%x\n" + "\tbroadcast %d\n" + "\tdata 0x%x\n", + efuse_data->sign, + efuse_data->disable, + efuse_data->parity, + efuse_data->addr, efuse_data->broadcast, efuse_data->data); +} +#endif + +static unsigned int pcs_l_cpufreq_get_e8c2(unsigned int cpu) +{ + int node = cpu_to_node(cpu); + int core = cpu_to_cpuid(cpu) % cpu_max_cores_num(); + struct cpufreq_frequency_table *table = + cpufreq_pcs_data[node][core]->table; + int target_idx = 0; + pcs_ctrl1_t ctrl; + + ctrl.word = sic_read_node_nbsr_reg(node, SIC_pcs_ctrl1); + + target_idx = get_idx_by_n_sys(ctrl.n) - get_idx_by_n_sys(table[0].driver_data); + + return cpufreq_pcs_data[node][core]->table[target_idx].frequency; +} + +static unsigned int pcs_l_cpufreq_get_e16c(unsigned int cpu) +{ + freq_core_mon_t mon; + int core = cpu_to_cpuid(cpu) % cpu_max_cores_num(); + int node = cpu_to_node(cpu); + struct pcs_data *pcs_data = cpufreq_pcs_data[node][core]; + + mon.word = sic_read_node_nbsr_reg(node, PMC_FREQ_CORE_N_MON(core)); + WARN_ON_ONCE(mon.divF_curr < pcs_data->div_min || mon.divF_curr > pcs_data->div_max); + + return pcs_data->table[mon.divF_curr - pcs_data->div_min].frequency; +} + +static unsigned int pcs_l_cpufreq_get(unsigned int cpu) +{ + if (IS_MACHINE_E8C2) + return pcs_l_cpufreq_get_e8c2(cpu); + + return pcs_l_cpufreq_get_e16c(cpu); +} + +static int pcs_l_cpufreq_setpolicy(struct cpufreq_policy *policy) +{ + /* TODO */ + switch (policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + break; + case CPUFREQ_POLICY_POWERSAVE: + break; + } + + return 0; +} + +static int get_f_pll(int node) +{ + int addr = EFUSE_START_ADDR; + int f_pll = DEFAULT_F_PLL; + uint64_t data[4]; + int i = 0; + + for (addr; addr < EFUSE_END_ADDR; addr++) { + efuse_data_t efuse_data; +#ifdef DEBUG + print_efuse_data(&efuse_data); +#endif + sic_write_node_nbsr_reg(node, EFUSE_RAM_ADDR, addr); + efuse_data.word = sic_read_node_nbsr_reg(node, EFUSE_RAM_DATA); + if (efuse_data.sign && !efuse_data.disable + && efuse_data.broadcast && (efuse_data.addr >= 0x45) + && (efuse_data.addr <= 0x48)) { + data[i++] = efuse_data.data; + } + } + + if (i == 4) { + int64_t nr = get_nr(data[3]); + int64_t nf = get_nf(data); + int64_t od = get_od(data[0]); + + int f_pll_calc = F_REF * nf / ((1LL << 33) * (nr + 1) * (od + 1)); + + if (f_pll_calc >= MIN_F_PLL && f_pll_calc <= MAX_F_PLL) + f_pll = f_pll_calc; + } + + return f_pll; +} + +static struct pcs_data *get_pcs_data(int node, int core) +{ + freq_core_mon_t mon; + struct pcs_data *data; + + data = kzalloc(sizeof(struct pcs_data), GFP_KERNEL); + + mon.word = sic_read_node_nbsr_reg(node, PMC_FREQ_CORE_N_MON(core)); + + data->div_max = mon.divF_limit_hi; + data->div_min = mon.divF_init; + data->table = pcs_l_calc_freq_tables(node, + mon.divF_init, mon.divF_limit_hi); + + return data; +} + +static struct pcs_data *get_pcs_data_e8c2(int node) +{ + pcs_ctrl1_t ctrl; + struct pcs_data *data; + + data = kzalloc(sizeof(struct pcs_data), GFP_KERNEL); + + ctrl.word = sic_read_node_nbsr_reg(node, SIC_pcs_ctrl1); + + data->div_min = ctrl.n_fmin; + data->div_max = ctrl.n; + data->table = pcs_l_calc_freq_tables_e8c2(node, ctrl.n, ctrl.n_fmin); + + return data; +} + +static int pcs_l_cpufreq_init(struct cpufreq_policy *policy) +{ + int node = cpu_to_node(policy->cpu); + int core = cpu_to_cpuid(policy->cpu) % cpu_max_cores_num(); + struct pcs_data *data = cpufreq_pcs_data[node][core]; + + policy->max = data->table[data->div_max].frequency; + policy->min = data->table[data->div_min].frequency; + + policy->cur = pcs_l_cpufreq_get(policy->cpu); + policy->freq_table = data->table; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + cpumask_set_cpu(policy->cpu, policy->cpus); + + return 0; +} + +static int pcs_l_cpufreq_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct freq_attr *pcs_l_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver pcs_cpufreq_driver = { + .init = pcs_l_cpufreq_init, + .verify = cpufreq_generic_frequency_table_verify, + .setpolicy = pcs_l_cpufreq_setpolicy, + .exit = pcs_l_cpufreq_exit, + .get = pcs_l_cpufreq_get, + .name = "pcs_cpufreq", + .attr = pcs_l_cpufreq_attr, +}; + +static int __init pcs_cpufreq_probe(void) +{ + /* cpufreq driver is disabled on guest as it is host's + * responsibility to adjust CPU frequency. */ + bool use_cpufreq = !IS_HV_GM() && !IS_ENABLED(CONFIG_KVM_GUEST_KERNEL); + + if ((IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C || + IS_MACHINE_E8C2) && use_cpufreq && !is_prototype()) { + + int node; + int core; + + for_each_online_node(node) { + if (IS_MACHINE_E8C2) { + struct pcs_data *data; + + if (check_bfs_bypass(node)) { + pr_err("cpufreq: CPU pins encode BFS bypass mode (bfs_freq==8)," + " that is why program frequency control is unavailable on node %d!", node); + continue; + } + + if (get_pcs_mode(node) < 4) + pr_err("cpufreq: throttling is disabled on node %d", node); + + data = get_pcs_data_e8c2(node); + + for (core = 0; core < cpu_max_cores_num(); core++) + cpufreq_pcs_data[node][core] = data; + } else { + f_plls[node] = get_f_pll(node); + + for (core = 0; core < cpu_max_cores_num(); core++) + cpufreq_pcs_data[node][core] = get_pcs_data(node, core); + } + } + + if (cpufreq_register_driver(&pcs_cpufreq_driver)) { + pr_err("ERROR: %s: %d\n", __func__, __LINE__); + } + } + + return 0; +} + +static void __exit pcs_cpufreq_remove(void) +{ + /* cpufreq driver is disabled on guest as it is host's + * responsibility to adjust CPU frequency. */ + bool use_cpufreq = !IS_HV_GM() && !IS_ENABLED(CONFIG_KVM_GUEST_KERNEL); + + if ((IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C || + IS_MACHINE_E8C2) && use_cpufreq && !is_prototype()) { + cpufreq_unregister_driver(&pcs_cpufreq_driver); + } +} + +MODULE_AUTHOR("Arseniy.A.Demidov@mcst.ru"); +MODULE_DESCRIPTION("E2K CPUFreq Driver"); +MODULE_LICENSE("GPL v2"); + +module_init(pcs_cpufreq_probe); +module_exit(pcs_cpufreq_remove); diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 88727b7c0d59..af66d84c69e7 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -1,4 +1,3 @@ -# SPDX-License-Identifier: GPL-2.0-only menu "CPU Idle" config CPU_IDLE @@ -20,9 +19,11 @@ config CPU_IDLE_MULTIPLE_DRIVERS config CPU_IDLE_GOV_LADDER bool "Ladder governor (for periodic timer tick)" + default m config CPU_IDLE_GOV_MENU bool "Menu governor (for tickless system)" + default m config CPU_IDLE_GOV_TEO bool "Timer events oriented (TEO) governor (for tickless systems)" @@ -47,6 +48,11 @@ config CPU_IDLE_GOV_HALTPOLL config DT_IDLE_STATES bool +menu "E90S CPU Idle Drivers" +depends on E90S +source "drivers/cpuidle/Kconfig.e90s" +endmenu + menu "ARM CPU Idle Drivers" depends on ARM || ARM64 source "drivers/cpuidle/Kconfig.arm" @@ -62,6 +68,11 @@ depends on PPC source "drivers/cpuidle/Kconfig.powerpc" endmenu +menu "E2K CPU Idle Drivers" +depends on E2K +source "drivers/cpuidle/Kconfig.e2k" +endmenu + config HALTPOLL_CPUIDLE tristate "Halt poll cpuidle driver" depends on X86 && KVM_GUEST diff --git a/drivers/cpuidle/Kconfig.e2k b/drivers/cpuidle/Kconfig.e2k new file mode 100644 index 000000000000..92651da50f00 --- /dev/null +++ b/drivers/cpuidle/Kconfig.e2k @@ -0,0 +1,10 @@ +# +# E2K CPU Idle drivers +# + +config E2K_CPUIDLE + bool "CPU Idle driver for e2k machines" + depends on CPU_IDLE && E2K && !KVM_GUEST_KERNEL + default y + help + This adds the CPU Idle driver for e2k machines diff --git a/drivers/cpuidle/Kconfig.e90s b/drivers/cpuidle/Kconfig.e90s new file mode 100644 index 000000000000..a8246d7343d4 --- /dev/null +++ b/drivers/cpuidle/Kconfig.e90s @@ -0,0 +1,11 @@ +# +# E2K CPU Idle drivers +# + +config E90S_CPUIDLE + tristate "CPU Idle Driver for E90S machines (r2000)" + depends on E90S && !MCST_RT + default m + help + CPU Idle driver for E90S machines (r2000). + diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index ee70d5cc5b99..7b14fa1ae799 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -31,3 +31,12 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o # POWERPC drivers obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o + +############################################################################### +# MCST E2K & E90S drivers + +obj-$(CONFIG_E2K_CPUIDLE) += cpuidle-e2k.o +obj-$(CONFIG_E90S_CPUIDLE) += cpuidle-e90s.o + +CFLAGS_REMOVE_cpuidle-e2k.o = $(CFLAGS_ALL_CPUS) +CFLAGS_cpuidle-e2k.o += -march=elbrus-v2 diff --git a/drivers/cpuidle/cpuidle-e2k.c b/drivers/cpuidle/cpuidle-e2k.c new file mode 100644 index 000000000000..b62cb76679fd --- /dev/null +++ b/drivers/cpuidle/cpuidle-e2k.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CPU idle for E2K machines. + */ + +#include +#include +#include +#include + +#include + +static int __cpuidle C1_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + machine.C1_enter(); + return index; +} + +static int __cpuidle C2_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + unsigned long flags; + unsigned int node = numa_node_id(); + int core = cpu_to_cpuid(dev->cpu) % cpu_max_cores_num(); + int reg = PMC_FREQ_CORE_N_SLEEP(core); + freq_core_sleep_t C2 = { .cmd = 2 }, C0 = { .cmd = 0 }; + + /* We do not want an NMI to arrive just before + * machine.C1_enter() and force us out of C2. */ + raw_all_irq_save(flags); + sic_write_node_nbsr_reg(node, reg, AW(C2)); + + machine.C1_enter(); + + sic_write_node_nbsr_reg(node, reg, AW(C0)); + raw_all_irq_restore(flags); + + return index; +} + +static int __cpuidle C3_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + if (WARN_ON_ONCE(!machine.C3_enter)) + return 0; + + machine.C3_enter(); + return index; +} + + +#define E2K_CPUIDLE_C1_STATE ({ \ + struct cpuidle_state state = { \ + .name = "C1", \ + .desc = "CPU pipeline stop", \ + .exit_latency = 0, \ + .target_residency = 0, \ + .enter = &C1_enter \ + }; \ + state; \ +}) + +/* One step takes ~2.6 us */ +#define DIVF_STEPS_LENGTH_US(divF) ((divF) * 26 / 10) +#define E2K_CPUIDLE_C2_STATE(divF) ({ \ + struct cpuidle_state state = { \ + .name = "C2", \ + .desc = "CPU pipeline stop at lower freq", \ + /* Divide by 2 since CPU starts executing immediately \ + * (although at lower frequency), and enters C2 also \ + * immediately (although at higher frequency). */ \ + .exit_latency = DIVF_STEPS_LENGTH_US(divF) / 2, \ + .target_residency = 1 + DIVF_STEPS_LENGTH_US(divF) / 2, \ + .enter = &C2_enter \ + }; \ + state; \ +}) + +#define E2K_CPUIDLE_C3_STATE ({ \ + struct cpuidle_state state = { \ + .name = "C3", \ + .desc = "CPU clock off (including L1/L2)", \ + /* Since v6 C3 is entered and exited ~(7 * 2.6) us slower */ \ + .exit_latency = 30 + (cpu_has(CPU_FEAT_ISET_V6) \ + ? DIVF_STEPS_LENGTH_US(7) \ + : 0), \ + .target_residency = 100 + (cpu_has(CPU_FEAT_ISET_V6) \ + ? DIVF_STEPS_LENGTH_US(14) \ + : 0), \ + .enter = &C3_enter \ + }; \ + state; \ +}) + + +static struct cpuidle_driver e2k_idle_driver = { + .name = "e2k_idle", + .owner = THIS_MODULE, +}; + +static int __initdata cpu_divF[NR_CPUS]; +static void __init initialize_C2_state(void *unused) +{ + int cpu = smp_processor_id(); + int node = numa_node_id(); + int core = cpu_to_cpuid(cpu) % cpu_max_cores_num(); + freq_core_mon_t C2_mon; + /* Choose not too deep sleep, otherwise there is no + * value in choosing C2 over C3. */ + int new_divF = 0x10; + + C2_mon.word = sic_read_node_nbsr_reg(node, PMC_FREQ_CORE_N_MON(core)); + if (C2_mon.divF_limit_hi < new_divF) + new_divF = C2_mon.divF_limit_hi; + + cpu_divF[cpu] = new_divF; + + /* Set C2 state to also reduce CPU frequency */ + if (cpu == cpumask_first(cpumask_of_node(node))) + sic_write_node_nbsr_reg(node, PMC_FREQ_C2, new_divF); +} + +/* Force C3 on if it is disabled on current hardware */ +static bool force_C3; +static int __init force_C3_setup(char *__unused) +{ + pr_info("C3 idle state enabled from command line\n"); + force_C3 = 1; + + return 1; +} +__setup("force_C3", force_C3_setup); + + +/* Initialize CPU idle by registering the idle states */ +static int __init e2k_idle_init(void) +{ + /* C2/C3 states are disabled on guest as they will + * just cause a lot of unnecessary interceptions. */ + bool use_deep_states = !IS_HV_GM() && !IS_ENABLED(CONFIG_KVM_GUEST_KERNEL); + + if (cpu_has(CPU_FEAT_ISET_V6)) { + int nr = 0; + + /* Enable C1 state */ + if (!idle_nomwait) { + e2k_idle_driver.states[nr] = E2K_CPUIDLE_C1_STATE; + nr += 1; + } + + /* Enable C2 state */ + if (use_deep_states && !idle_nomwait) { + int cpu, divF_min = INT_MAX, divF_max = 0; + + on_each_cpu(initialize_C2_state, NULL, 1); + for_each_online_cpu(cpu) { + divF_min = min(divF_min, cpu_divF[cpu]); + divF_max = max(divF_max, cpu_divF[cpu]); + } + pr_info("Chosen C2 state dividers range 0x%x:0x%x\n", + divF_min, divF_max); + + if (divF_min) { + e2k_idle_driver.states[nr] = E2K_CPUIDLE_C2_STATE( + (divF_min + divF_max) / 2); + nr += 1; + } else { + pr_warn("WARNING: disabling C2 state\n"); + } + } + + /* Enable C3 state */ + if (use_deep_states && (!cpu_has(CPU_HWBUG_C3) || force_C3)) { + e2k_idle_driver.states[nr] = E2K_CPUIDLE_C3_STATE; + nr += 1; + WARN_ON(nr > 1 && e2k_idle_driver.states[nr - 1].target_residency <= + e2k_idle_driver.states[nr - 2].target_residency); + } + e2k_idle_driver.state_count = nr; + } else if (cpu_has(CPU_FEAT_ISET_V3)) { + e2k_idle_driver.states[0] = E2K_CPUIDLE_C1_STATE; + e2k_idle_driver.state_count = 1; + + /* TODO bug 130433: temporarily disable C3 on e1c+ until bug is fixed, + * can be forced back on with "force_C3" in cmdline */ + if (use_deep_states && (!IS_MACHINE_E1CP || force_C3)) { + e2k_idle_driver.states[1] = E2K_CPUIDLE_C3_STATE; + e2k_idle_driver.state_count = 2; + } + } else { + e2k_idle_driver.states[0] = E2K_CPUIDLE_C1_STATE; + e2k_idle_driver.state_count = 1; + } + return cpuidle_register(&e2k_idle_driver, NULL); +} +device_initcall(e2k_idle_init); diff --git a/drivers/cpuidle/cpuidle-e90s.c b/drivers/cpuidle/cpuidle-e90s.c new file mode 100644 index 000000000000..844b14693693 --- /dev/null +++ b/drivers/cpuidle/cpuidle-e90s.c @@ -0,0 +1,164 @@ +/* + * CPU idle for r2000 machines. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Maintainer: Andrey Kuyan + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define R2000P_MAX_STATES 4 + +/* ASI Regs: */ +#define E90S_R2000_PWRCTRL_REG_ADDR 0x38 + + +static struct platform_device *pdev; + +static int e90s_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + int state; + + local_irq_enable(); + switch (index) { + case 0: return index; + case 1: + state = 1; + break; + case 2: + state = 3; + break; + case 3: + state = 6; /* Note: for R2000 index < 3 */ + break; + default: + return -1; + } + writeq_asi(state, E90S_R2000_PWRCTRL_REG_ADDR, ASI_LSU_CONTROL); + return index; +} + +static struct cpuidle_driver e90s_idle_driver = { + .name = "e90s_idle", + .owner = THIS_MODULE, + .states[0] = { + .enter = e90s_enter_idle, + .exit_latency = 1, + .target_residency = 1, + .name = "C0", + .desc = "Idle busy loop", + }, + .states[1] = { + .enter = e90s_enter_idle, + .exit_latency = 10000, + .target_residency = 10, + .name = "C1", + .desc = "Stop decoding only", + }, + .states[2] = { + .enter = e90s_enter_idle, + .exit_latency = 40, + .target_residency = 20, + .name = "C3", + .desc = "Stop decoding and L1", + }, + .states[3] = { + .enter = e90s_enter_idle, + .exit_latency = 2000, + .target_residency = 1000, + .name = "C6", + .desc = "Reduces CPU voltage down to 0 V", + }, + .state_count = R2000P_MAX_STATES, /* will be reset for R2000 */ +}; + +/* Initialize CPU idle by registering the idle states */ +static int e90s_cpuidle_probe(struct platform_device *pdev) +{ + int rev = get_cpu_revision(); + if (rev < 0x10) + return -EINVAL; + if (rev < 0x20) /* walk around bug 123699 */ + e90s_idle_driver.state_count = 2; + return cpuidle_register(&e90s_idle_driver, NULL); +} + +static int e90s_cpuidle_remove(struct platform_device *pdev) +{ + int rev = get_cpu_revision(); + if (rev < 0x10) + return -EINVAL; + cpuidle_unregister(&e90s_idle_driver); + return 0; +} + +static struct platform_driver e90s_cpuidle_driver = { + .probe = e90s_cpuidle_probe, + .remove = e90s_cpuidle_remove, + .driver = { + .name = "e90s_cpuidle", + .owner = THIS_MODULE, + }, +}; + +static int __init e90s_cpuidle_init(void) +{ + int rc; + int rev = get_cpu_revision(); + if (rev < 0x10) + return -ENODEV; + pdev = platform_device_alloc("e90s_cpuidle", 0); + if (!pdev) + return -ENOMEM; + + + rc = platform_device_add(pdev); + if (rc) { + rc = -ENODEV; + goto undo_platform_dev_alloc; + } + rc = platform_driver_register(&e90s_cpuidle_driver); + if (rc) + goto undo_platform_dev_add; + return 0; + +undo_platform_dev_add: + platform_device_del(pdev); +undo_platform_dev_alloc: + platform_device_put(pdev); + return rc; +} + +static void __exit e90s_cpuidle_exit(void) +{ + platform_driver_unregister(&e90s_cpuidle_driver); + if (pdev) { + platform_device_del(pdev); + platform_device_put(pdev); + } +} + +MODULE_AUTHOR("Andrey Kuyan "); +MODULE_DESCRIPTION("E90S cpu idle driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:e90s-cpuidle"); + +module_init(e90s_cpuidle_init); +module_exit(e90s_cpuidle_exit); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index e591f56f98c0..723dbb3706fe 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -18,6 +18,15 @@ #include "cpuidle.h" +#if CONFIG_MCST +static unsigned int sysfs_switch = 1; +static int __init cpuidle_sysfs_setup(char *str) +{ + sysfs_switch = simple_strtol(str, NULL, 10); + return 1; +} +__setup("cpuidle_sysfs_switch=", cpuidle_sysfs_setup); +#else static unsigned int sysfs_switch; static int __init cpuidle_sysfs_setup(char *unused) { @@ -25,6 +34,7 @@ static int __init cpuidle_sysfs_setup(char *unused) return 1; } __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); +#endif static ssize_t show_available_governors(struct device *dev, struct device_attribute *attr, @@ -291,6 +301,23 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ return sprintf(buf, "\n");\ return sprintf(buf, "%s\n", state->_name);\ } +#ifdef CONFIG_MCST +#define define_store_state_ui_function(_name) \ +static ssize_t store_state_##_name(struct cpuidle_state *state, \ + struct cpuidle_state_usage *state_usage, \ + const char *buf, size_t size) \ +{ \ + unsigned int value; \ + int err; \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + err = kstrtouint(buf, 0, &value); \ + if (err) \ + return err; \ + state->_name = value; \ + return size; \ +} +#endif define_show_state_function(exit_latency) define_show_state_function(target_residency) @@ -306,8 +333,13 @@ define_show_state_ull_function(below) define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); -define_one_state_ro(latency, show_state_exit_latency); -define_one_state_ro(residency, show_state_target_residency); +#ifdef CONFIG_MCST +define_store_state_ui_function(target_residency) +define_store_state_ui_function(exit_latency) +define_one_state_rw(latency, show_state_exit_latency, store_state_exit_latency); +define_one_state_rw(residency, show_state_target_residency, + store_state_target_residency); +#endif define_one_state_ro(power, show_state_power_usage); define_one_state_ro(usage, show_state_usage); define_one_state_ro(time, show_state_time); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 417dad635526..b30d051594b2 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -74,6 +74,13 @@ config EDAC_GHES In doubt, say 'Y'. +config EDAC_E2K + tristate "E2K (e1c+, e8c*)" + depends on E2K + help + Support for error detection and correction of DRAM ECC errors on + the E2K families (e1c+, e8c, e8c2) of memory controllers. + config EDAC_AMD64 tristate "AMD64 (Opteron, Athlon64)" depends on AMD_NB && EDAC_DECODE_MCE diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index d77200c9680b..b1d34331c7d7 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_EDAC_GHES) += ghes_edac.o edac_mce_amd-y := mce_amd.o obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o +obj-$(CONFIG_EDAC_E2K) += e2k_edac.o obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o obj-$(CONFIG_EDAC_I5000) += i5000_edac.o diff --git a/drivers/edac/e2k_edac.c b/drivers/edac/e2k_edac.c new file mode 100644 index 000000000000..052f1dfcf854 --- /dev/null +++ b/drivers/edac/e2k_edac.c @@ -0,0 +1,510 @@ +/* + * EDAC ECC kernel module for e2k platforms + * e8c* (P1, P9), e16c, e2c3, e12c + * + * Author: Alexey Mukhin + * 2021 (c) MCST + */ + +#include +#include +#include +#include + +#include +#include + +#include "edac_module.h" + +#define E2K_EDAC_REVISION " Ver: 0.3" +#define E2K_EDAC_DRVNAME "e2k_edac" + +#define e2k_info(fmt, arg...) \ + edac_printk(KERN_INFO, "e2k", fmt, ##arg) + +#define e2k_warn(fmt, arg...) \ + edac_printk(KERN_WARNING, "e2k", "Warning: " fmt, ##arg) + +#define e2k_err(fmt, arg...) \ + edac_printk(KERN_ERR, "e2k", "Error: " fmt, ##arg) + +static LIST_HEAD(e2k_edac_list); + +/*********************** pci section *************************************/ + +/* not present */ + +/*********************** cpu section *************************************/ + +/* not present */ + +/*********************** ecc section *************************************/ + +#define DIMM_ON_CHANNEL 2 + +typedef e2k_mc_ecc_struct_t ecc_struct_t; + +static int use_cfg_reg = 1; +static int total_mc_num = 0; + +struct channel_info { + int arch_size;/* ctX */ + int num_side; /* pbmX */ +}; + +struct e2k_edac_dev { + struct list_head list; + int node; + int mcN; /* internal num MC on node*/ + int id; /* total num MC on machine */ + /**********************************/ + struct channel_info dimm[DIMM_ON_CHANNEL]; + int m_type; /* DDR3/DDR4 */ + int r_type; /* rm - register/unregister*/ + int freq; /* sf */ + int w_type; /* dqw - width type */ + /**********************************/ + u16 last_ecc_ce; + u8 last_ecc_ue; + /**********************************/ + struct platform_device *pdev; + struct mem_ctl_info *mci; +}; + +struct e2k_mci_priv { + struct e2k_edac_dev *dev; +}; + +static inline u32 ecc_get_error_cnt(ecc_struct_t *ecc, int node, int nr) +{ + ecc->E2K_MC_ECC_reg = sic_get_mc_ecc(node, nr); + return ecc->E2K_MC_ECC_secnt; +} + +static inline bool ecc_enabled(void) +{ + ecc_struct_t ecc; + ecc.E2K_MC_ECC_reg = sic_get_mc_ecc(0, 0); + return ecc.E2K_MC_ECC_ee; +} + +#define ecc_supported() HAS_MACHINE_L_SIC + +/* Check for ECC Errors */ +static void e2k_ecc_check(struct mem_ctl_info *mci) +{ + int node, i; + u32 cnt, current_cnt; + struct e2k_mci_priv *priv = mci->pvt_info; + struct e2k_edac_dev *dev; + char s[32]; + ecc_struct_t ecc; + + dev = priv->dev; + + node = dev->node; + i = dev->mcN; + cnt = ecc_get_error_cnt(&ecc, node, i); + current_cnt = cnt - dev->last_ecc_ce; +/* + e2k_info("node %d mc%d secnt %d of %d ue %d reg 0x%x.\n", + node, i, + ecc.E2K_MC_ECC_secnt, + ecc.E2K_MC_ECC_of, + ecc.E2K_MC_ECC_ue, + ecc.E2K_MC_ECC_reg); +*/ + if (!dev->last_ecc_ue && ecc.E2K_MC_ECC_ue) { + /* e2k_err("node %d mc%d: unrecoverable error.\n", node, i); */ + dev->last_ecc_ue = 1; + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + 1, 0, 0, 0, + 0, 0, -1, + "E2K MC", ""); + } + + /* check old errors */ + if (current_cnt == 0) { + return; + } + + dev->last_ecc_ce = cnt; + + snprintf(s, 30, ""); + if (ecc.E2K_MC_ECC_of) { + snprintf(s, 30, "(error buffer overflow)"); + } + /* + e2k_warn("node %d mc%d: %d correctable errors. %s\n", + node, i, current_cnt, s); + */ + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + current_cnt, 0, 0, 0, + 0, 0, -1, + "E2K MC", s); +} + + +static int init_csrows(struct e2k_edac_dev *dev) +{ + struct mem_ctl_info *mci = dev->mci; + struct dimm_info *dimm; + int i; + + for (i = 0; i < DIMM_ON_CHANNEL; i++) { + /* DIMM not present */ + if (!dev->dimm[i].num_side) { + continue; + } + + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, + mci->n_layers, 0, i, 0); + if (dev->m_type == MEM_DDR4) { + dimm->mtype = dev->r_type ? MEM_RDDR4 : MEM_DDR4; + } else { + dimm->mtype = dev->r_type ? MEM_RDDR3 : MEM_DDR3; + } + dimm->edac_mode = EDAC_SECDED; + dimm->nr_pages = dev->dimm[i].arch_size; + dimm->grain = 32; /* ??? */ + dimm->dtype = dev->w_type; + snprintf(dimm->label, sizeof(dimm->label), "DIMM%u", dev->id); + } + + return 0; +} + + +static struct e2k_edac_dev *alloc_e2k_dev(int node, int mcN) +{ + struct e2k_edac_dev *e2k_edac_dev; + + e2k_edac_dev = kzalloc(sizeof(*e2k_edac_dev), GFP_KERNEL); + if (!e2k_edac_dev) + return NULL; + + e2k_edac_dev->pdev = platform_device_register_simple(E2K_EDAC_DRVNAME, + total_mc_num, + NULL, 0); + if (IS_ERR(e2k_edac_dev->pdev)) { + kfree(e2k_edac_dev); + return NULL; + } + + e2k_edac_dev->node = node; + e2k_edac_dev->mcN = mcN; + e2k_edac_dev->id = total_mc_num; + + total_mc_num++; + + list_add_tail(&e2k_edac_dev->list, &e2k_edac_list); + + return e2k_edac_dev; +} + +static void free_e2k_dev(struct e2k_edac_dev *e2k_edac_dev) +{ + list_del(&e2k_edac_dev->list); + kfree(e2k_edac_dev); +} + +static void e2k_free_all_devices(void) +{ + struct e2k_edac_dev *e2k_edac_dev, *tmp; + + list_for_each_entry_safe(e2k_edac_dev, tmp, + &e2k_edac_list, list) { + free_e2k_dev(e2k_edac_dev); + } +} + +static inline int get_chip_CFG_size(int ct) +{ + switch (ct) { + case 0: return 256*8; + case 1: return 512*4; + case 2: return 1024*4; + case 3: return 2048*4; + case 4: return 4096*4; + } + return 8192*4; +} + +static inline int get_chip_OPMB_size(int ct) +{ + switch (ct) { + case 0: return 64*8; + case 1: return 128*8; + case 2: return 256*8; + case 3: return 512*8; + case 4: return 1024*8; + } + return 2048*8; +} + +static inline int get_chip_speed(int sf) +{ + switch (sf) { + case 0: return 1600; + case 1: return 1866; + case 2: return 2133; + case 3: return 2400; + case 4: return 2666; + case 5: return 3200; + } + return 3200; +} + +static inline int get_chip_bus_width(int dqw) +{ + switch (dqw) { + case 0: return DEV_X4; + case 1: return DEV_X8; + case 2: return DEV_X16; + case 3: return DEV_X32; + } + return DEV_UNKNOWN; +} + +static inline int get_chip_memory_type(void) +{ + if (machine.native_id == MACHINE_ID_E1CP || + machine.native_id == MACHINE_ID_E8C) { + return MEM_DDR3; + } + + /* for next machine type -- DDR4: + * machine.native_id == MACHINE_ID_E8C2 + * machine.native_id == MACHINE_ID_E12C + * machine.native_id == MACHINE_ID_E16C + * machine.native_id == MACHINE_ID_E2C3 + */ + return MEM_DDR4; +} + +static void fill_info_about_channel(struct e2k_edac_dev *dev) +{ + if (use_cfg_reg) { + e2k_mc_cfg_struct_t r; + r.E2K_MC_CFG_reg = sic_get_mc_cfg(dev->node, dev->mcN); + + dev->dimm[0].num_side = r.fields.pbm0; + dev->dimm[0].arch_size = 0; + if (r.fields.pbm0) { + dev->dimm[0].arch_size = + get_chip_CFG_size(r.fields.ct0); + } + dev->dimm[1].num_side = r.fields.pbm1; + dev->dimm[1].arch_size = 0; + if (r.fields.pbm1) { + dev->dimm[1].arch_size = + get_chip_CFG_size(r.fields.ct1); + } + dev->m_type = get_chip_memory_type(); + dev->r_type = r.fields.rm; + dev->w_type = get_chip_bus_width(r.fields.dqw); + dev->freq = get_chip_speed(r.fields.sf); + } else { + e2k_mc_opmb_struct_t r; + r.E2K_MC_OPMB_reg = sic_get_mc_opmb(dev->node, dev->mcN); + + dev->dimm[0].num_side = r.fields.pbm0; + dev->dimm[0].arch_size = 0; + if (r.fields.pbm0) { + dev->dimm[0].arch_size = + get_chip_OPMB_size(r.fields.ct0); + } + dev->dimm[1].num_side = r.fields.pbm1; + dev->dimm[1].arch_size = 0; + if (r.fields.pbm1) { + dev->dimm[1].arch_size = + get_chip_OPMB_size(r.fields.ct1); + } + dev->m_type = get_chip_memory_type(); + dev->r_type = r.fields.rm; + dev->w_type = DEV_X4; /* no data */ + dev->freq = 0; /* no data */ + } +} + + +static int e2k_register_mci(struct e2k_edac_dev *e2k_edac_dev) +{ + struct mem_ctl_info *mci; + struct edac_mc_layer layers[2]; + struct platform_device *pdev = e2k_edac_dev->pdev; + struct e2k_mci_priv *priv; + int rc, ret = -ENXIO; + + /* allocate & init EDAC MC data structure */ + layers[0].type = EDAC_MC_LAYER_CHANNEL; + layers[0].size = 1; + layers[0].is_virt_csrow = false; + layers[1].type = EDAC_MC_LAYER_SLOT; + layers[1].size = 2; + layers[1].is_virt_csrow = true; + + mci = edac_mc_alloc(e2k_edac_dev->id, ARRAY_SIZE(layers), layers, + sizeof(struct e2k_mci_priv)); + if (!mci) { + ret = -ENOMEM; + goto err; + } + + e2k_edac_dev->mci = mci; + + mci->pdev = &pdev->dev; + if (e2k_edac_dev->m_type == MEM_DDR4) { + mci->mtype_cap = MEM_FLAG_DDR4; + } else { + mci->mtype_cap = MEM_FLAG_DDR3; + } + mci->edac_ctl_cap = EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->scrub_cap = SCRUB_FLAG_HW_SRC; + mci->scrub_mode = SCRUB_HW_SRC; + mci->mod_name = "E2K ECC"; + mci->ctl_name = dev_name(&pdev->dev); + mci->dev_name = dev_name(&pdev->dev); + mci->edac_check = e2k_ecc_check; + mci->ctl_page_to_phys = NULL; + priv = mci->pvt_info; + priv->dev = e2k_edac_dev; /* save ptr to myself */ + + /* directly setup polling by default, very strange rule */ + edac_op_state = EDAC_OPSTATE_POLL; + + rc = init_csrows(e2k_edac_dev); + if (rc) { + e2k_err("failed to init csrows\n"); + ret = rc; + goto err_free; + } + + /* register with edac core */ + rc = edac_mc_add_mc(mci); + if (rc) { + e2k_err("failed to register with EDAC core\n"); + ret = rc; + goto err_free; + } + + return 0; + +err_free: + edac_mc_free(mci); +err: + return ret; +} + +static int e2k_unregister_mci(struct e2k_edac_dev *e2k_edac_dev) +{ + struct mem_ctl_info *mci = e2k_edac_dev->mci; + + mci = edac_mc_del_mc(mci->pdev); + if (mci) { + edac_mc_free(mci); + } + + platform_device_unregister(e2k_edac_dev->pdev); + + return 0; +} + +/*********************** main section ************************************/ + +static inline int cpu_supported(void) +{ + if (machine.native_id != MACHINE_ID_E1CP && + machine.native_id != MACHINE_ID_E8C && + machine.native_id != MACHINE_ID_E8C2 && + machine.native_id != MACHINE_ID_E12C && + machine.native_id != MACHINE_ID_E16C && + machine.native_id != MACHINE_ID_E2C3) + return 0; + + return 1; +} + +static int __init e2k_edac_init(void) +{ + int ret = 0, i; + const char *owner; + struct e2k_edac_dev *e2k_edac_dev; + int node; + + owner = edac_get_owner(); + if (owner && + strncmp(owner, E2K_EDAC_DRVNAME, sizeof(E2K_EDAC_DRVNAME))) { + e2k_info("E2K EDAC driver " E2K_EDAC_REVISION " - busy\n"); + return -EBUSY; + } + + e2k_info("E2K EDAC driver " E2K_EDAC_REVISION "\n"); + + if (!cpu_supported()) { + e2k_info("CPU not supported\n"); + return -ENODEV; + } + + if (!ecc_supported()) { + e2k_info("ECC not supported\n"); + return -ENODEV; + } + + if (!ecc_enabled()) { + e2k_info("ECC not enabled\n"); + return -ENODEV; + } + + if (machine.native_id == MACHINE_ID_E1CP || + machine.native_id == MACHINE_ID_E8C) { + use_cfg_reg = 0; + } + + for_each_online_node(node) { + for (i = 0; i < SIC_MC_COUNT; i++) { + e2k_edac_dev = alloc_e2k_dev(node, i); + if (!e2k_edac_dev) { + e2k_err("alloc e2k err\n"); + ret = -ENOMEM; + goto err; + } + fill_info_about_channel(e2k_edac_dev); + } + } + + list_for_each_entry(e2k_edac_dev, &e2k_edac_list, list) { + ret = e2k_register_mci(e2k_edac_dev); + if (ret) { + e2k_err("register mci err\n"); + goto err; + } + } + + return 0; +err: + list_for_each_entry(e2k_edac_dev, &e2k_edac_list, list) { + e2k_unregister_mci(e2k_edac_dev); + } + + return ret; +} + +static void __exit e2k_edac_exit(void) +{ + struct e2k_edac_dev *e2k_edac_dev; + + list_for_each_entry(e2k_edac_dev, &e2k_edac_list, list) { + e2k_unregister_mci(e2k_edac_dev); + } + e2k_free_all_devices(); +} + +module_init(e2k_edac_init); +module_exit(e2k_edac_exit); + +MODULE_AUTHOR("Alexey Mukhin, MCST"); +MODULE_DESCRIPTION("edac ECC driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(E2K_EDAC_REVISION); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index abdf448b11a3..a9dfd47abe9f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -17,6 +17,9 @@ #include #include #include +#ifdef CONFIG_MCST +#include +#endif #include #include #include @@ -32,6 +35,10 @@ #include "gpiolib-of.h" #include "gpiolib-acpi.h" +#ifdef CONFIG_MCST +#include +#endif + #define CREATE_TRACE_POINTS #include diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1341939cd39c..656cdc805330 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -207,6 +207,7 @@ config DRM_RADEON tristate "ATI Radeon" depends on DRM && PCI && MMU depends on AGP || !AGP + select FB_SYS_FOPS select FW_LOADER select DRM_KMS_HELPER select DRM_TTM @@ -254,6 +255,13 @@ config DRM_VGEM as used by Mesa's software renderer for enhanced performance. If M is selected the module will be called vgem. +config DRM_VIVANTE + tristate "Vivante GCCore" + depends on DRM + help + Choose this option if you have a Vivante graphics card. + If M is selected, the module will be called vivante. + config DRM_VKMS tristate "Virtual KMS (EXPERIMENTAL)" depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 82ff826b33cc..6105142084b1 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -98,7 +98,8 @@ obj-$(CONFIG_DRM_MSM) += msm/ obj-$(CONFIG_DRM_TEGRA) += tegra/ obj-$(CONFIG_DRM_STM) += stm/ obj-$(CONFIG_DRM_STI) += sti/ -obj-$(CONFIG_DRM_IMX) += imx/ +obj-$(CONFIG_DRM_VIVANTE) += vivante/ +obj-y += imx/ obj-$(CONFIG_DRM_INGENIC) += ingenic/ obj-$(CONFIG_DRM_MEDIATEK) += mediatek/ obj-$(CONFIG_DRM_MESON) += meson/ @@ -120,3 +121,4 @@ obj-$(CONFIG_DRM_LIMA) += lima/ obj-$(CONFIG_DRM_PANFROST) += panfrost/ obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/ obj-$(CONFIG_DRM_MCDE) += mcde/ + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index fb47ddc6f7f4..b34eae0f9444 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1935,6 +1935,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct dma_fence *f = NULL; int r; +#ifdef CONFIG_MCST + mutex_lock(&adev->lock_reset); +#endif + while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); @@ -1950,6 +1954,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); +#ifdef CONFIG_MCST + mutex_unlock(&adev->lock_reset); +#endif return r; } } @@ -1961,6 +1968,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, dma_fence_put(f); } +#ifdef CONFIG_MCST + mutex_unlock(&adev->lock_reset); +#endif + return 0; } diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 6ed6ff49efc0..087b33b0eaa9 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -58,7 +58,12 @@ static struct drm_driver driver; .driver_data = (unsigned long) info } static const struct pci_device_id pciidlist[] = { +#ifdef CONFIG_MCST /* our chip shows PCI_BASE_CLASS_MULTIMEDIA */ +#define PCI_VENDOR_ID_ASPEED 0x1a03 + { PCI_VDEVICE(ASPEED, PCI_CHIP_AST2000) }, +#else AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL), +#endif AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL), /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */ {0, 0, 0}, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 41bf4aaff21c..740e16066c80 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -6,6 +6,9 @@ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. * Copyright (C) 2010, Guennadi Liakhovetski */ +#ifdef CONFIG_MCST +#define DEBUG +#endif #include #include #include @@ -31,7 +34,7 @@ #include #include #include - +#include #include "dw-hdmi-audio.h" #include "dw-hdmi-cec.h" #include "dw-hdmi.h" @@ -140,8 +143,10 @@ struct dw_hdmi { struct platform_device *audio; struct platform_device *cec; struct device *dev; +#ifndef CONFIG_MCST struct clk *isfr_clk; struct clk *iahb_clk; +#endif struct clk *cec_clk; struct dw_hdmi_i2c *i2c; @@ -202,12 +207,12 @@ struct dw_hdmi { (HDMI_PHY_RX_SENSE0 | HDMI_PHY_RX_SENSE1 | \ HDMI_PHY_RX_SENSE2 | HDMI_PHY_RX_SENSE3) -static inline void hdmi_writeb(struct dw_hdmi *hdmi, u8 val, int offset) +static inline void __hdmi_writeb(struct dw_hdmi *hdmi, u8 val, int offset) { regmap_write(hdmi->regm, offset << hdmi->reg_shift, val); } -static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset) +static inline u8 __hdmi_readb(struct dw_hdmi *hdmi, int offset) { unsigned int val = 0; @@ -216,16 +221,43 @@ static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset) return val; } -static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg) +static void __hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg) { regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data); } -static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg, - u8 shift, u8 mask) -{ - hdmi_modb(hdmi, data << shift, mask, reg); -} +#ifdef DEBUG +#define hdmi_writeb(_hdmi, _val, _offset) do { \ + unsigned _val2 = _val; \ + DRM_DEBUG("%s: wr: %s (%x): 0x%02x\n", dev_name(hdmi->dev), \ + #_offset, _offset, _val2); \ + __hdmi_writeb(_hdmi, _val, _offset); \ +} while (0) + +#define hdmi_readb(_hdmi, _offset) \ +({ \ + u8 _val = __hdmi_readb(_hdmi, _offset); \ + DRM_DEBUG("%s: rd: %s (%x): 0x%02x\n", dev_name(hdmi->dev), \ + #_offset, _offset, _val); \ + _val; \ +}) + +#define hdmi_modb(hdmi, data, mask, _reg) do { \ + unsigned _val2 = data; \ + unsigned _m = mask; \ + DRM_DEBUG("%s: md: %s (%x): 0x%02x (mask %x)\n", dev_name(hdmi->dev),\ + #_reg, _reg, _val2, _m); \ + __hdmi_modb(hdmi, data, _m, _reg); \ +} while (0) + +#else +#define hdmi_writeb __hdmi_writeb +#define hdmi_readb __hdmi_readb +#define hdmi_modb __hdmi_modb +#endif + +#define hdmi_mask_writeb(__hdmi, __data, __reg, __shift, __mask) \ + hdmi_modb(__hdmi, (__data) << (__shift), __mask, __reg) static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi) { @@ -601,9 +633,13 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, n = hdmi_compute_n(sample_rate, pixel_clk); config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID); - +#ifdef CONFIG_MCST + /* CTS needs to be computed for AHB & GPAUD */ + if (config3 & (HDMI_CONFIG3_AHBAUDDMA | HDMI_CONFIG3_GPAUD)) { +#else /* Only compute CTS when using internal AHB audio */ if (config3 & HDMI_CONFIG3_AHBAUDDMA) { +#endif /* * Compute the CTS value from the N value. Note that CTS and N * can be up to 20 bits in total, so we need 64-bit math. Also @@ -627,6 +663,10 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, hdmi->audio_n = n; hdmi->audio_cts = cts; hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0); +#ifdef CONFIG_MCST /*FIXME:*/ + if (config3 & HDMI_CONFIG3_GPAUD) /* i2s: enable 2 channels */ + hdmi_writeb(hdmi, 3, HDMI_GP_CONF1); +#endif spin_unlock_irq(&hdmi->audio_lock); } @@ -1144,31 +1184,46 @@ static bool hdmi_phy_wait_i2c_done(struct dw_hdmi *hdmi, int msec) { u32 val; - while ((val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3) == 0) { + while ((val = __hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3) == 0) { if (msec-- == 0) return false; udelay(1000); } - hdmi_writeb(hdmi, val, HDMI_IH_I2CMPHY_STAT0); + __hdmi_writeb(hdmi, val, HDMI_IH_I2CMPHY_STAT0); return true; } -void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, +void __dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, unsigned char addr) { - hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0); - hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR); - hdmi_writeb(hdmi, (unsigned char)(data >> 8), + __hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0); + __hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR); + __hdmi_writeb(hdmi, (unsigned char)(data >> 8), HDMI_PHY_I2CM_DATAO_1_ADDR); - hdmi_writeb(hdmi, (unsigned char)(data >> 0), + __hdmi_writeb(hdmi, (unsigned char)(data >> 0), HDMI_PHY_I2CM_DATAO_0_ADDR); - hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE, + __hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE, HDMI_PHY_I2CM_OPERATION_ADDR); - hdmi_phy_wait_i2c_done(hdmi, 1000); + WARN_ON(!hdmi_phy_wait_i2c_done(hdmi, 1000)); } -EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write); +EXPORT_SYMBOL_GPL(__dw_hdmi_phy_i2c_write); +#ifdef CONFIG_MCST +unsigned short __dw_hdmi_phy_i2c_read(struct dw_hdmi *hdmi, unsigned char addr) +{ + unsigned short v; + __hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0); + __hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR); + __hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_READ, + HDMI_PHY_I2CM_OPERATION_ADDR); + WARN_ON(!hdmi_phy_wait_i2c_done(hdmi, 1000)); + v = __hdmi_readb(hdmi, HDMI_PHY_I2CM_DATAI_0_ADDR); + v |= __hdmi_readb(hdmi, HDMI_PHY_I2CM_DATAI_1_ADDR) << 8; + return v; +} +EXPORT_SYMBOL_GPL(__dw_hdmi_phy_i2c_read); +#endif /* Filter out invalid setups to avoid configuring SCDC and scrambling */ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi) { @@ -1279,8 +1334,18 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable) void dw_hdmi_phy_reset(struct dw_hdmi *hdmi) { /* PHY reset. The reset signal is active high on Gen2 PHYs. */ +#ifdef CONFIG_MCST + if (hdmi->version == 0x200a) { /* e1c+ */ + hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); + hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); + } else { + hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); + hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); + } +#else hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); +#endif } EXPORT_SYMBOL_GPL(dw_hdmi_phy_reset); @@ -1314,7 +1379,6 @@ static void dw_hdmi_phy_power_off(struct dw_hdmi *hdmi) val = hdmi_readb(hdmi, HDMI_PHY_STAT0); if (!(val & HDMI_PHY_TX_PHY_LOCK)) break; - usleep_range(1000, 2000); } @@ -1323,6 +1387,10 @@ static void dw_hdmi_phy_power_off(struct dw_hdmi *hdmi) else dev_dbg(hdmi->dev, "PHY powered down in %u iterations\n", i); +#ifdef CONFIG_MCST + /* gen2 tx power off */ + /*dw_hdmi_phy_gen2_txpwron(hdmi, 0);*/ +#endif dw_hdmi_phy_gen2_pddq(hdmi, 1); } @@ -1419,6 +1487,352 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi, return 0; } +#ifdef CONFIG_MCST +static const unsigned hdmi_clock[] = { + 13.5 * 1000 * 1000, + 25.175 * 1000 * 1000, + 27 * 1000 * 1000, + 50.35 * 1000 * 1000, + 54 * 1000 * 1000, + 59.4 * 1000 * 1000, + 72 * 1000 * 1000, + 74.25 * 1000 * 1000, + 100.7 * 1000 * 1000, + 108 * 1000 * 1000, + 118.8 * 1000 * 1000, + 144 * 1000 * 1000, + 148.5 * 1000 * 1000, + 216 * 1000 * 1000, + 237.6 * 1000 * 1000, + 297 * 1000 * 1000, + 340 * 1000 * 1000, +}; + +enum { + HDMI_PHY_PREP_DIV, + HDMI_PHY_MPLL_N_CNTRL, + HDMI_PHY_PLL_N_CNTRL, + HDMI_PHY_PIXEL_REP, + HDMI_PHY_CLR_DPTH, + HDMI_PHY_PLL_PROP_CNTRL, + HDMI_PHY_PLL_INT_CNTRL, + HDMI_PHY_PLL_GMP_CNTRL, + HDMI_PHY_MPLL_PROP_CNTRL, + HDMI_PHY_MPLL_INT_CNTRL, + HDMI_PHY_MPLL_GMP_CNTRL, + + HDMI_PHY_NR +}; + +static const u8 hdmi_phy_signals[ARRAY_SIZE(hdmi_clock)][2][4][HDMI_PHY_NR] = { +{ + { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, { + /*REP2_8BPP_13p5*/ { 0x0, 0x3, 0x3, 0x0, 0x3, 0x3, 0x2, 0x0, 0x4, 0x3, 0x0 }, + /*REP2_10BPP_13p5*/ { 0x1, 0x3, 0x3, 0x4, 0x1, 0x3, 0x2, 0x0, 0x4, 0x3, 0x0 }, + /*REP2_12BPP_13p5*/ { 0x2, 0x3, 0x3, 0x4, 0x2, 0x3, 0x2, 0x0, 0x3, 0x1, 0x0 }, + /*REP2_16BPP_13p5*/ { 0x3, 0x2, 0x3, 0x1, 0x3, 0x3, 0x2, 0x0, 0x4, 0x7, 0x1 }, + }, +}, { + { + /*REP1_8BPP_25p175*/ { 0x0, 0x3, 0x3, 0x0, 0x0, 0x3, 0x2, 0x0, 0x4, 0x3, 0x0 }, + /*REP1_10BPP_25p175*/{ 0x1, 0x3, 0x3, 0x0, 0x1, 0x3, 0x2, 0x0, 0x4, 0x3, 0x0 }, + /*REP1_12BPP_25p175*/{ 0x2, 0x3, 0x3, 0x0, 0x2, 0x3, 0x2, 0x0, 0x3, 0x1, 0x0 }, + /*REP1_16BPP_25p175*/{ 0x3, 0x2, 0x2, 0x0, 0x3, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_27*/ { 0x0, 0x3, 0x3, 0x0, 0x0, 0x3, 0x2, 0x0, 0x4, 0x3, 0x0 }, + /*REP1_10BPP_27*/ { 0x1, 0x3, 0x3, 0x0, 0x1, 0x3, 0x2, 0x0, 0x4, 0x3, 0x0 }, + /*REP1_12BPP_27*/ { 0x2, 0x3, 0x3, 0x0, 0x2, 0x3, 0x2, 0x0, 0x3, 0x1, 0x0 }, + /*REP1_16BPP_27*/ { 0x3, 0x2, 0x2, 0x0, 0x3, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + }, { + /*REP2_8BPP_27*/ { 0x0, 0x2, 0x2, 0x0, 0x3, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + /*REP2_10BPP_27*/ { 0x1, 0x2, 0x3, 0x1, 0x1, 0x3, 0x2, 0x0, 0x4, 0x7, 0x1 }, + /*REP2_12BPP_27*/ { 0x2, 0x2, 0x3, 0x1, 0x2, 0x3, 0x2, 0x0, 0x3, 0x6, 0x1 }, + /*REP2_16BPP_27*/ { 0x3, 0x1, 0x2, 0x1, 0x3, 0x3, 0x2, 0x1, 0x5, 0x1, 0x2 }, + } +}, { + { + /*REP1_8BPP_50p35*/ { 0x0, 0x2, 0x2, 0x0, 0x0, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + /*REP1_10BPP_50p35*/ { 0x1, 0x2, 0x2, 0x0, 0x1, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + /*REP1_12BPP_50p35*/ { 0x2, 0x2, 0x2, 0x0, 0x2, 0x3, 0x2, 0x1, 0x3, 0x6, 0x1 }, + /*REP1_16BPP_50p35*/ { 0x3, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_54*/ { 0x0, 0x2, 0x2, 0x0, 0x0, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + /*REP1_10BPP_54*/ { 0x1, 0x2, 0x2, 0x0, 0x1, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + /*REP1_12BPP_54*/ { 0x2, 0x2, 0x2, 0x0, 0x2, 0x3, 0x2, 0x1, 0x3, 0x6, 0x1 }, + /*REP1_16BPP_54*/ { 0x3, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + }, { + /*REP2_8BPP_54*/ { 0x0, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP2_10BPP_54*/ { 0x1, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP2_12BPP_54*/ { 0x2, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP2_16BPP_54*/ { 0x3, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + } +}, { + { + /*REP1_8BPP_59p4*/ { 0x0, 0x2, 0x2, 0x0, 0x0, 0x3, 0x2, 0x1, 0x4, 0x7, 0x1 }, + /*REP1_10BPP_59p4*/ { 0x1, 0x2, 0x2, 0x0, 0x1, 0x3, 0x2, 0x1, 0x3, 0x6, 0x1 }, + /*REP1_12BPP_59p4*/ { 0x2, 0x2, 0x2, 0x0, 0x2, 0x3, 0x2, 0x1, 0x3, 0x6, 0x1 }, + /*REP1_16BPP_59p4*/ { 0x3, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_72*/ { 0x0, 0x2, 0x2, 0x0, 0x0, 0x3, 0x2, 0x1, 0x3, 0x6, 0x1 }, + /*REP1_10BPP_72*/ { 0x1, 0x2, 0x2, 0x0, 0x1, 0x3, 0x2, 0x1, 0x3, 0x6, 0x1 }, + /*REP1_12BPP_72*/ { 0x2, 0x1, 0x1, 0x0, 0x2, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_16BPP_72*/ { 0x3, 0x1, 0x1, 0x0, 0x3, 0x3, 0x2, 0x2, 0x3, 0x6, 0x2 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_74p25*/ { 0x0, 0x2, 0x2, 0x0, 0x0, 0x3, 0x2, 0x1, 0x3, 0x6, 0x1 }, + /*REP1_10BPP_74p25*/ { 0x1, 0x1, 0x2, 0x0, 0x1, 0x3, 0x2, 0x2, 0x5, 0x7, 0x2 }, + /*REP1_12BPP_74p25*/ { 0x2, 0x1, 0x2, 0x0, 0x2, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_16BPP_74p25*/ { 0x3, 0x1, 0x2, 0x0, 0x3, 0x3, 0x2, 0x2, 0x3, 0x6, 0x2 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_100p7*/ { 0x0, 0x1, 0x1, 0x0, 0x0, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_10BPP_100p7*/ { 0x1, 0x1, 0x1, 0x0, 0x1, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_12BPP_100p7*/ { 0x2, 0x1, 0x1, 0x0, 0x2, 0x3, 0x2, 0x2, 0x3, 0x6, 0x2 }, + /*REP1_16BPP_100p7*/ { 0x3, 0x0, 0x0, 0x0, 0x3, 0x3, 0x2, 0x3, 0x5, 0x1, 0x3 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_108*/ { 0x0, 0x1, 0x1, 0x0, 0x0, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_10BPP_108*/ { 0x1, 0x1, 0x1, 0x0, 0x1, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_12BPP_108*/ { 0x2, 0x1, 0x1, 0x0, 0x2, 0x3, 0x2, 0x2, 0x3, 0x6, 0x2 }, + /*REP1_16BPP_108*/ { 0x3, 0x0, 0x0, 0x0, 0x3, 0x3, 0x2, 0x3, 0x5, 0x1, 0x3 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_118p8*/ { 0x0, 0x1, 0x1, 0x0, 0x0, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_10BPP_118p8*/ { 0x1, 0x1, 0x1, 0x0, 0x1, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_12BPP_118p8*/ { 0x2, 0x1, 0x1, 0x0, 0x2, 0x3, 0x2, 0x2, 0x3, 0x6, 0x2 }, + /*REP1_16BPP_118p8*/ { 0x3, 0x0, 0x0, 0x0, 0x3, 0x3, 0x2, 0x3, 0x5, 0x1, 0x3 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_144*/ { 0x0, 0x1, 0x1, 0x0, 0x0, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_10BPP_144*/ { 0x1, 0x1, 0x1, 0x0, 0x1, 0x3, 0x2, 0x2, 0x5, 0x1, 0x2 }, + /*REP1_12BPP_144*/ { 0x2, 0x0, 0x0, 0x0, 0x2, 0x3, 0x2, 0x3, 0x3, 0x7, 0x3 }, + /*REP1_16BPP_144*/ { 0x3, 0x0, 0x0, 0x0, 0x3, 0x3, 0x2, 0x3, 0x5, 0x1, 0x3 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_148p5*/ { 0x0, 0x1, 0x1, 0x0, 0x0, 0x3, 0x2, 0x2, 0x3, 0x6, 0x2 }, + /*REP1_10BPP_148p5*/ { 0x1, 0x0, 0x0, 0x0, 0x1, 0x3, 0x2, 0x3, 0x5, 0x7, 0x3 }, + /*REP1_12BPP_148p5*/ { 0x2, 0x0, 0x0, 0x0, 0x2, 0x3, 0x2, 0x3, 0x5, 0x1, 0x3 }, + /*REP1_16BPP_148p5*/ { 0x3, 0x0, 0x0, 0x0, 0x3, 0x3, 0x2, 0x3, 0x3, 0x7, 0x3 }, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_216*/ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x2, 0x3, 0x3, 0x6, 0x3 }, + /*REP1_10BPP_216*/ { 0x1, 0x0, 0x0, 0x0, 0x1, 0x3, 0x2, 0x3, 0x3, 0x6, 0x3 }, + /*REP1_12BPP_216*/ { 0x2, 0x0, 0x0, 0x0, 0x2, 0x3, 0x2, 0x3, 0x5, 0x1, 0x3 }, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_237p6*/ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x2, 0x3, 0x3, 0x6, 0x3 }, + /*REP1_10BPP_237p6*/ { 0x1, 0x0, 0x0, 0x0, 0x1, 0x3, 0x2, 0x3, 0x3, 0x6, 0x3 }, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_297*/ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x2, 0x3, 0x3, 0x7, 0x3 }, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, { + { + /*REP1_8BPP_340*/ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x2, 0x3, 0x3, 0x7, 0x3 }, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, { + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + /*not supported*/ { 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + } +}, +}; + +enum { + HDMI_PHY_CDEPTH_REP_DIV = 0x6, + HDMI_PHY_CURRCTRL = 0x10, + HDMI_PHY_GMPCTRL = 0x15, + HDMI_PHY_PWRCTRL = 0x0 +}; +#define CK_SYMON (1 << 0) +#define SYMON (1 << 1) +#define TRAON (1 << 2) +#define TRBON (1 << 3) + +static int hdmi_plls_configure(struct dw_hdmi *hdmi, + unsigned char res) +{ + unsigned rep = hdmi->hdmi_data.video_mode.mpixelrepetitionoutput; + unsigned clk = hdmi->hdmi_data.video_mode.mpixelclock; + unsigned short val; + int i, r; + unsigned clkk = clk / 1000; + unsigned sup_tx_lvl = 0, sup_ck_lvl = 0, + cksymtxctrl = 0; + + + switch (res) { + case 8: + r = 0; + break; + case 10: + r = 1; + break; + case 12: + r = 2; + break; + case 16: + r = 3; + break; + default: + goto err; + } + for (i = 0; clk > hdmi_clock[i] && i < ARRAY_SIZE(hdmi_clock); i++) + ; + + if (i == ARRAY_SIZE(hdmi_clock)) + goto err; + + val = (u16)hdmi_phy_signals[i][rep][r][HDMI_PHY_CLR_DPTH] | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_PIXEL_REP] << 2) | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_PLL_N_CNTRL] << 5) | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_MPLL_N_CNTRL] << 7) | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_PREP_DIV] << 13); + dw_hdmi_phy_i2c_write(hdmi, val, HDMI_PHY_CDEPTH_REP_DIV); + + val = (u16)hdmi_phy_signals[i][rep][r][HDMI_PHY_PLL_INT_CNTRL] | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_PLL_PROP_CNTRL] << 3) | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_MPLL_INT_CNTRL] << 6) | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_MPLL_PROP_CNTRL] << 9); + dw_hdmi_phy_i2c_write(hdmi, val, HDMI_PHY_CURRCTRL); + + val = (u16)hdmi_phy_signals[i][rep][r][HDMI_PHY_MPLL_GMP_CNTRL] | + (hdmi_phy_signals[i][rep][r][HDMI_PHY_PLL_GMP_CNTRL] << 2); + dw_hdmi_phy_i2c_write(hdmi, val, HDMI_PHY_GMPCTRL); + + dev_dbg(hdmi->dev, "Pixel clock %d, res %d, pixel repetition %d\n", + clk, res, rep); + + dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */ + + /* RESISTANCE TERM 66.67 Ohm Cfg */ + dw_hdmi_phy_i2c_write(hdmi, 0x0002, 0x19); /* TXTERM */ + + if (clkk <= 148500) { + sup_tx_lvl = 0x11; + } else if (clkk <= 297 * 1000) { + sup_tx_lvl = 0x0a; + cksymtxctrl |= TRBON; + } else if (clkk <= 340 * 1000) { + sup_tx_lvl = 0x8; + cksymtxctrl |= TRBON; + } + + sup_ck_lvl = sup_tx_lvl; + /* VLEVCTRL */ + dw_hdmi_phy_i2c_write(hdmi, (sup_tx_lvl << 5) | sup_ck_lvl, 0x0E); + + /* CKSYMTXCTRL */ + cksymtxctrl |= SYMON | TRAON | CK_SYMON; + dw_hdmi_phy_i2c_write(hdmi, cksymtxctrl, 0x09); + + dw_hdmi_phy_i2c_write(hdmi, 0x0007, 0x17); +#if 0 + /* REMOVE CLK TERM */ + dw_hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */ +#endif + return 0; +err: + dev_err(hdmi->dev, "Pixel clock %d, res %d, pixel repetition %d -" + " unsupported by HDMI\n", clk, res, rep); + return -EINVAL; +} +#endif /*CONFIG_MCST*/ + static int hdmi_phy_configure(struct dw_hdmi *hdmi) { const struct dw_hdmi_phy_data *phy = hdmi->phy.data; @@ -1441,17 +1855,27 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi) dw_hdmi_phy_i2c_set_addr(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2); +#ifdef CONFIG_MCST + if (hdmi->version == 0x200a) { /* e1c+ */ + if (hdmi_plls_configure(hdmi, 8)) + return -EINVAL; + goto out; + } +#endif /* Write to the PHY as configured by the platform */ if (pdata->configure_phy) ret = pdata->configure_phy(hdmi, pdata, mpixelclock); else ret = phy->configure(hdmi, pdata, mpixelclock); if (ret) { - dev_err(hdmi->dev, "PHY configuration failed (clock %lu)\n", - mpixelclock); + dev_err(hdmi->dev, "PHY configuration failed (%d, clock %lu)\n", + ret, mpixelclock); return ret; } +#ifdef CONFIG_MCST +out: +#endif /* Wait for resuming transmission of TMDS clock and data */ if (mtmdsclock > HDMI14_MAX_TMDSCLK) msleep(100); @@ -2025,11 +2449,14 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step B.1 */ hdmi_av_composer(hdmi, mode); +#ifndef CONFIG_MCST /* too early: monitor sometimes fails to determine + correct resolution (bug 132357).*/ /* HDMI Initializateion Step B.2 */ ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data, &hdmi->previous_mode); if (ret) return ret; hdmi->phy.enabled = true; +#endif /* HDMI Initialization Step B.3 */ dw_hdmi_enable_video_path(hdmi); @@ -2039,6 +2466,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step E - Configure audio */ hdmi_clk_regenerator_update_pixel_clock(hdmi); +#ifdef CONFIG_MCST /*FIXME:*/ + if (hdmi->version == 0x214a) /* e2c3 */ + dw_hdmi_set_channel_count(hdmi, 2); +#endif hdmi_enable_audio_clk(hdmi, hdmi->audio_enable); } @@ -2056,6 +2487,13 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) hdmi_video_packetize(hdmi); hdmi_video_csc(hdmi); hdmi_video_sample(hdmi); +#ifdef CONFIG_MCST + /* HDMI Initialization Step D.3 */ + ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data, &hdmi->previous_mode); + if (ret) + return ret; + hdmi->phy.enabled = true; +#endif hdmi_tx_hdcp_config(hdmi); dw_hdmi_clear_overflow(hdmi); @@ -2209,7 +2647,8 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector) ret = drm_add_edid_modes(connector, edid); kfree(edid); } else { - dev_dbg(hdmi->dev, "failed to get edid\n"); + dev_dbg(hdmi->dev, "failed to get edid (%s:%d)\n", + hdmi->ddc->name, hdmi->ddc->nr); } return ret; @@ -2228,6 +2667,7 @@ static void dw_hdmi_connector_force(struct drm_connector *connector) } static const struct drm_connector_funcs dw_hdmi_connector_funcs = { + .dpms = drm_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = dw_hdmi_connector_detect, .destroy = drm_connector_cleanup, @@ -2374,7 +2814,7 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) if (hdmi->i2c) ret = dw_hdmi_i2c_irq(hdmi); - intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); + intr_stat = __hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); if (intr_stat) { hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); return IRQ_WAKE_THREAD; @@ -2516,7 +2956,7 @@ static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi) phy_type = hdmi->plat_data->phy_force_vendor ? DW_HDMI_PHY_VENDOR_PHY : hdmi_readb(hdmi, HDMI_CONFIG2_ID); - +#ifndef CONFIG_MCST if (phy_type == DW_HDMI_PHY_VENDOR_PHY) { /* Vendor PHYs require support from the glue layer. */ if (!hdmi->plat_data->phy_ops || !hdmi->plat_data->phy_name) { @@ -2530,21 +2970,21 @@ static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi) hdmi->phy.name = hdmi->plat_data->phy_name; return 0; } - +#endif /* Synopsys PHYs are handled internally. */ for (i = 0; i < ARRAY_SIZE(dw_hdmi_phys); ++i) { if (dw_hdmi_phys[i].type == phy_type) { hdmi->phy.ops = &dw_hdmi_synopsys_phy_ops; hdmi->phy.name = dw_hdmi_phys[i].name; hdmi->phy.data = (void *)&dw_hdmi_phys[i]; - +#ifndef CONFIG_MCST if (!dw_hdmi_phys[i].configure && !hdmi->plat_data->configure_phy) { dev_err(hdmi->dev, "%s requires platform support\n", hdmi->phy.name); return -ENODEV; } - +#endif return 0; } } @@ -2570,8 +3010,8 @@ static void dw_hdmi_cec_disable(struct dw_hdmi *hdmi) } static const struct dw_hdmi_cec_ops dw_hdmi_cec_ops = { - .write = hdmi_writeb, - .read = hdmi_readb, + .write = __hdmi_writeb, + .read = __hdmi_readb, .enable = dw_hdmi_cec_enable, .disable = dw_hdmi_cec_disable, }; @@ -2605,6 +3045,29 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi) hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data); } +#ifdef CONFIG_MCST +static int dev_is_type(struct device *dev, void *data) +{ + struct device **d = data; + if (dev->type == &i2c_adapter_type) { + get_device(dev); + *d = dev; + return 1; + } + return device_for_each_child(dev, d, dev_is_type); +} + +static struct device *dev_find_type(struct device *parent) +{ + struct device *d = NULL; + if (dev_is_type(parent, &d)) { + return d; + } + device_for_each_child(parent, &d, dev_is_type); + return d; +} +#endif /*CONFIG_MCST*/ + static struct dw_hdmi * __dw_hdmi_probe(struct platform_device *pdev, const struct dw_hdmi_plat_data *plat_data) @@ -2616,6 +3079,9 @@ __dw_hdmi_probe(struct platform_device *pdev, struct dw_hdmi_cec_data cec; struct dw_hdmi *hdmi; struct resource *iores = NULL; +#ifdef CONFIG_MCST + struct device *ddc_dev; +#endif /*CONFIG_MCST*/ int irq; int ret; u32 val = 1; @@ -2650,6 +3116,10 @@ __dw_hdmi_probe(struct platform_device *pdev, return ERR_PTR(-EPROBE_DEFER); } +#ifdef CONFIG_MCST + } else if ((ddc_dev = dev_find_type(hdmi->dev))) { + hdmi->ddc = to_i2c_adapter(ddc_dev); +#endif /*CONFIG_MCST*/ } else { dev_dbg(hdmi->dev, "no ddc property found\n"); } @@ -2658,6 +3128,10 @@ __dw_hdmi_probe(struct platform_device *pdev, const struct regmap_config *reg_config; of_property_read_u32(np, "reg-io-width", &val); +#ifdef CONFIG_MCST + hdmi->audio_enable = true; + val = 4; +#endif /*CONFIG_MCST*/ switch (val) { case 4: reg_config = &hdmi_regmap_32bit_config; @@ -2688,6 +3162,7 @@ __dw_hdmi_probe(struct platform_device *pdev, hdmi->regm = plat_data->regm; } +#ifndef CONFIG_MCST hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr"); if (IS_ERR(hdmi->isfr_clk)) { ret = PTR_ERR(hdmi->isfr_clk); @@ -2713,6 +3188,7 @@ __dw_hdmi_probe(struct platform_device *pdev, dev_err(hdmi->dev, "Cannot enable HDMI iahb clock: %d\n", ret); goto err_isfr; } +#endif /*CONFIG_MCST*/ hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec"); if (PTR_ERR(hdmi->cec_clk) == -ENOENT) { @@ -2836,8 +3312,13 @@ __dw_hdmi_probe(struct platform_device *pdev, audio.hdmi = hdmi; audio.eld = hdmi->connector.eld; +#ifdef CONFIG_MCST + audio.write = __hdmi_writeb; + audio.read = __hdmi_readb; +#else audio.write = hdmi_writeb; audio.read = hdmi_readb; +#endif /*CONFIG_MCST*/ hdmi->enable_audio = dw_hdmi_i2s_audio_enable; hdmi->disable_audio = dw_hdmi_i2s_audio_disable; @@ -2864,6 +3345,7 @@ __dw_hdmi_probe(struct platform_device *pdev, return hdmi; err_iahb: +#ifndef CONFIG_MCST if (hdmi->i2c) { i2c_del_adapter(&hdmi->i2c->adap); hdmi->ddc = NULL; @@ -2874,6 +3356,7 @@ err_iahb: clk_disable_unprepare(hdmi->cec_clk); err_isfr: clk_disable_unprepare(hdmi->isfr_clk); +#endif /*CONFIG_MCST*/ err_res: i2c_put_adapter(hdmi->ddc); @@ -2890,6 +3373,9 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi) /* Disable all interrupts */ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); +#ifdef CONFIG_MCST + put_device(&hdmi->ddc->dev); +#else clk_disable_unprepare(hdmi->iahb_clk); clk_disable_unprepare(hdmi->isfr_clk); if (hdmi->cec_clk) @@ -2899,6 +3385,7 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi) i2c_del_adapter(&hdmi->i2c->adap); else i2c_put_adapter(hdmi->ddc); +#endif /*CONFIG_MCST*/ } /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h index 6988f12d89d9..65921628bcc1 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h @@ -373,10 +373,15 @@ #define HDMI_GP_CONF0 0x3500 #define HDMI_GP_CONF1 0x3501 #define HDMI_GP_CONF2 0x3502 +#ifdef CONFIG_MCST +/*our ip-core has only this register */ +#define HDMI_GP_MASK 0x3506 +#else #define HDMI_GP_STAT 0x3503 #define HDMI_GP_INT 0x3504 #define HDMI_GP_MASK 0x3505 #define HDMI_GP_POL 0x3506 +#endif /* Audio DMA Registers */ #define HDMI_AHB_DMA_CONF0 0x3600 diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5e906ea6df67..a7db1a07b4a0 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1456,13 +1456,19 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, } for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + int timeout = msecs_to_jiffies(100); +#ifdef CONFIG_MCST +#if HZ < 100 /* suppose it is processor prototype */ + timeout *= 100; +#endif +#endif if (!(crtc_mask & drm_crtc_mask(crtc))) continue; ret = wait_event_timeout(dev->vblank[i].queue, old_state->crtcs[i].last_vblank_count != drm_crtc_vblank_count(crtc), - msecs_to_jiffies(100)); + timeout); WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n", crtc->base.id, crtc->name); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 8ce9d73fab4f..bef664e94c73 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -190,7 +190,7 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) +#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) && !defined(__e2k__) if (map->offset + (map->size-1) < map->offset || map->offset < virt_to_phys(high_memory)) { kfree(map); diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 2e8ce99d0baa..cd77517de3d5 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -135,6 +135,11 @@ static int drm_legacy_lock_free(struct drm_lock_data *lock_data, } spin_unlock_bh(&lock_data->spinlock); +#ifdef CONFIG_MCST + if (!lock) /* Device has been unregistered */ + return 1; +#endif + do { old = *lock; new = _DRM_LOCKING_CONTEXT(old); @@ -169,8 +174,10 @@ int drm_legacy_lock(struct drm_device *dev, void *data, struct drm_master *master = file_priv->master; int ret = 0; +#ifndef CONFIG_MCST /*Vivante galcore needs this*/ if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EOPNOTSUPP; +#endif ++file_priv->lock_count; @@ -259,8 +266,10 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_lock *lock = data; struct drm_master *master = file_priv->master; +#ifndef CONFIG_MCST /*Vivante galcore needs this*/ if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EOPNOTSUPP; +#endif if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 52e87e4869a5..275b5a9d3b0b 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -75,7 +75,7 @@ static pgprot_t drm_io_prot(struct drm_local_map *map, tmp = pgprot_decrypted(tmp); #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ - defined(__mips__) + defined(__mips__) || defined(__e2k__) if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) tmp = pgprot_noncached(tmp); else diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c index 8bcf0d199145..2389d5fd97b1 100644 --- a/drivers/gpu/drm/i2c/sil164_drv.c +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -103,7 +103,7 @@ struct sil164_priv { /* HW access functions */ static void -sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val) +__sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val) { uint8_t buf[] = {addr, val}; int ret; @@ -115,7 +115,7 @@ sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val) } static uint8_t -sil164_read(struct i2c_client *client, uint8_t addr) +__sil164_read(struct i2c_client *client, uint8_t addr) { uint8_t val; int ret; @@ -136,6 +136,21 @@ fail: return 0; } +#define sil164_write(_client, _addr, _val) do { \ + unsigned __val2 = _val; \ + DRM_DEBUG("%x: wr: 0x%02x: 0x%02x\n", \ + (_client)->addr, _addr, __val2); \ + __sil164_write(_client, _addr, _val); \ +} while (0) + +#define sil164_read(_client, _addr) \ +({ \ + uint8_t _val = __sil164_read(_client, _addr); \ + DRM_DEBUG("%x: rd: 0x%02x: 0x%02x\n", \ + (_client)->addr, _addr, _val); \ + _val; \ +}) + static void sil164_save_state(struct i2c_client *client, uint8_t *state) { @@ -172,6 +187,14 @@ sil164_init_state(struct i2c_client *client, struct sil164_encoder_params *config, bool duallink) { + /* + * Sil 1178 Magic from datashit + */ + if (strcmp(client->name, "sil1178") == 0) { + sil164_write(client, 0x0F, 0x44); + sil164_write(client, 0x0F, 0x4C); + } + sil164_write(client, SIL164_CONTROL0, SIL164_CONTROL0_HSYNC_ON | SIL164_CONTROL0_VSYNC_ON | @@ -384,9 +407,10 @@ sil164_detect_slave(struct i2c_client *client) .addr = SIL164_I2C_ADDR_SLAVE, .len = 0, }; - const struct i2c_board_info info = { + struct i2c_board_info info = { I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE) }; + strcpy(info.type, client->name); if (i2c_transfer(adap, &msg, 1) != 1) { sil164_dbg(adap, "No dual-link slave found."); @@ -417,6 +441,7 @@ sil164_encoder_init(struct i2c_client *client, static const struct i2c_device_id sil164_ids[] = { { "sil164", 0 }, + { "sil1178", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, sil164_ids); diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 207bf7409dfb..53d78dde52d5 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -36,6 +36,5 @@ config DRM_IMX_LDB config DRM_IMX_HDMI tristate "Freescale i.MX DRM HDMI" select DRM_DW_HDMI - depends on DRM_IMX help Choose this if you want to use HDMI on i.MX6. diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 2e12a4a3bfa1..099ea9d20460 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -4,6 +4,10 @@ * derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now) */ +#ifdef CONFIG_MCST +#define DEBUG +#endif /* CONFIG_MCST */ + #include #include #include @@ -18,6 +22,7 @@ #include #include #include +#include #include "imx-drm.h" @@ -99,6 +104,7 @@ static const struct dw_hdmi_phy_config imx_phy_config[] = { static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi) { +#ifndef CONFIG_MCST struct device_node *np = hdmi->dev->of_node; hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); @@ -107,6 +113,7 @@ static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi) return PTR_ERR(hdmi->regmap); } +#endif /* ! CONFIG_MCST */ return 0; } @@ -116,29 +123,32 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder) static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder) { +#ifndef CONFIG_MCST struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder); int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder); regmap_update_bits(hdmi->regmap, IOMUXC_GPR3, IMX6Q_GPR3_HDMI_MUX_CTL_MASK, mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT); +#endif /* ! CONFIG_MCST */ } static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { +#ifndef CONFIG_MCST struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24; imx_crtc_state->di_hsync_pin = 2; imx_crtc_state->di_vsync_pin = 3; - +#endif /* ! CONFIG_MCST */ return 0; } static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { - .enable = dw_hdmi_imx_encoder_enable, + .enable = dw_hdmi_imx_encoder_enable, .disable = dw_hdmi_imx_encoder_disable, .atomic_check = dw_hdmi_imx_atomic_check, }; @@ -173,6 +183,18 @@ imx6dl_hdmi_mode_valid(struct drm_connector *con, return MODE_OK; } +#ifdef CONFIG_MCST +static enum drm_mode_status +mga2_hdmi_mode_valid(struct drm_connector *con, + const struct drm_display_mode *mode) +{ + unsigned long max = 552.75e+06; + if (mode->clock <=max) + return MODE_OK; + return MODE_CLOCK_HIGH; +} + +#endif /* CONFIG_MCST */ static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { .mpll_cfg = imx_mpll_cfg, .cur_ctr = imx_cur_ctr, @@ -187,6 +209,189 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { .mode_valid = imx6dl_hdmi_mode_valid, }; +#ifdef CONFIG_MCST + +static int dw_hdmi_phy_reg_read(void *context, unsigned int reg, + unsigned int *result) +{ + struct dw_hdmi *hdmi = context; + *result = __dw_hdmi_phy_i2c_read(hdmi, reg); + return 0; +} + +static int dw_hdmi_phy_reg_write(void *context, unsigned int reg, + unsigned int value) +{ + struct dw_hdmi *hdmi = context; + __dw_hdmi_phy_i2c_write(hdmi, value, reg); + return 0; +} + +static const struct regmap_config dw_hdmi_phy_regmap_config = { + .name = "phy-regs", + .reg_bits = 8, + .val_bits = 16, + .reg_read = dw_hdmi_phy_reg_read, + .reg_write = dw_hdmi_phy_reg_write, + .max_register = 0x3b, +}; + +#define MGA25_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */ +#define MGA25_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */ +#define MGA25_HDMI_PHY_PLLDIVCTRL 0x11 /* PLL dividers */ + +struct mga25_hdmi_phy_params { + unsigned long mpixelclock; + u16 opmode_div; /* Mode of operation and PLL dividers */ + u16 curr_gmp; /* PLL current and Gmp (conductance) */ + u16 div; /* PLL dividers */ +}; + +static const struct mga25_hdmi_phy_params mga25_hdmi_phy_params[] = { + { 13.500e+06, 0x0003, 0x0280, 0x0650 }, + { 13.500e+06, 0x0002, 0x1280, 0x0650 }, + { 13.500e+06, 0x0001, 0x2280, 0x0650 }, + { 21.600e+06, 0x0001, 0x2281, 0x0632 }, + { 21.600e+06, 0x0000, 0x3281, 0x0632 }, + { 25.175e+06, 0x0003, 0x0283, 0x0628 }, + { 31.5e+06, 0x0003, 0x0283, 0x0628 }, + { 33.75e+06, 0x0003, 0x0283, 0x0628 }, + { 35.5e+06, 0x0003, 0x0283, 0x0628 }, + { 43.200e+06, 0x0000, 0x3203, 0x0619 }, + { 44.9e+06, 0x0003, 0x0285, 0x0228 }, + { 49.5e+06, 0x0002, 0x1183, 0x0614 }, + { 50.35e+06, 0x0002, 0x1183, 0x0614 }, + { 56.25e+06, 0x0002, 0x1183, 0x0614 }, + { 59.4e+06, 0x0002, 0x1183, 0x0614 }, + { 68.25e+06, 0x0002, 0x1183, 0x0614 }, + { 73.25e+06, 0x0002, 0x1142, 0x0214 }, + { 74.25e+06, 0x0002, 0x1142, 0x0214 }, + { 78.75e+06, 0x0002, 0x1142, 0x0214 }, + { 79.5e+06, 0x0002, 0x1142, 0x0214 }, + { 82.5e+06, 0x0002, 0x1142, 0x0214 }, + { 83.5e+06, 0x0002, 0x1142, 0x0214 }, + { 85.5e+06, 0x0002, 0x1142, 0x0214 }, + { 88.75e+06, 0x0002, 0x1142, 0x0214 }, + { 94.5e+06, 0x0001, 0x20c0, 0x060a }, + { 100.7e+06, 0x0001, 0x20c0, 0x060a }, + { 102.25e+06, 0x0001, 0x20c0, 0x060a }, + { 106.5e+06, 0x0001, 0x20c0, 0x060a }, + { 115.5e+06, 0x0001, 0x20c0, 0x060a }, + { 117.5e+06, 0x0001, 0x20c0, 0x060a }, + { 118.8e+06, 0x0001, 0x20c0, 0x060a }, + { 121.75e+06, 0x0001, 0x20c0, 0x060a }, + { 122.5e+06, 0x0001, 0x20c0, 0x060a }, + { 136.75e+06, 0x0001, 0x20c0, 0x060a }, + { 140.25e+06, 0x0001, 0x20c0, 0x060a }, + { 146.25e+06, 0x0001, 0x2080, 0x020a }, + { 148.25e+06, 0x0001, 0x2080, 0x020a }, + { 148.5e+06, 0x0001, 0x2080, 0x020a }, + { 157.5e+06, 0x0001, 0x2080, 0x020a }, + { 175.5e+06, 0x0001, 0x2080, 0x020a }, + { 179.5e+06, 0x0001, 0x2080, 0x020a }, + { 182.75e+06, 0x0001, 0x2080, 0x020a }, + {185.625e+06, 0x0000, 0x3040, 0x0605 }, + { 187.25e+06, 0x0000, 0x3040, 0x0605 }, + { 193.25e+06, 0x0000, 0x3040, 0x0605 }, + { 202.5e+06, 0x0000, 0x3040, 0x0605 }, + { 204.75e+06, 0x0000, 0x3040, 0x0605 }, + { 214.75e+06, 0x0000, 0x3040, 0x0605 }, + { 218.25e+06, 0x0000, 0x3040, 0x0605 }, + { 229.5e+06, 0x0000, 0x3040, 0x0605 }, + { 237.6e+06, 0x0000, 0x3040, 0x0605 }, + { 245.25e+06, 0x0000, 0x3040, 0x0605 }, + { 245.5e+06, 0x0000, 0x3040, 0x0605 }, + { 268.25e+06, 0x0000, 0x3040, 0x0605 }, + { 268.5e+06, 0x0000, 0x3040, 0x0605 }, + { 281.25e+06, 0x0000, 0x3040, 0x0605 }, + { 333.25e+06, 0x0000, 0x3041, 0x0205 }, + { 348.5e+06, 0x0640, 0x3041, 0x0205 }, + { 356.5e+06, 0x0640, 0x3041, 0x0205 }, + { 371.25e+06, 0x0640, 0x3041, 0x0205 }, + { 380.5e+06, 0x0640, 0x3041, 0x0205 }, + { 475.2e+06, 0x0640, 0x3080, 0x0005 }, + { 505.25e+06, 0x0640, 0x3080, 0x0005 }, + { 552.75e+06, 0x0640, 0x3080, 0x0005 }, + { /* sentinel */ }, +}; + +#define MGA25_HDMI_PHY_CKSYMTXCTRL 0x09 /* Clock Symbol and Transmitter Control Register */ +#define MGA25_HDMI_PHY_VLEVCTRL_PLLMEASCTRL 0x0e /* Voltage Level Control Register, PLL Measure Control Register*/ +#define MGA25_HDMI_PHY_TXTERM 0x19 /*Transmission Termination Register*/ + +struct mga25_hdmi_phy_drvr_vltg_lvl { + unsigned long mpixelclock; + u16 cksymtxctrl; + u16 vlevctrl_pllmeasctrl; + u16 txterm; +}; + +static const struct mga25_hdmi_phy_drvr_vltg_lvl mga25_hdmi_phy_drvr_vltg_lvl[] = { + /* tx_symon ck_symon, txlvl, txterm */ + { 165e+06, (0xc << 4) | (8 << 0), (12 << 5), (4 << 0) /* 100 Omh */ }, /* HDMI 1.4 < 1.65Gbps */ + { 340e+06, (0xc << 4) | (8 << 0), (12 << 5), (4 << 0) /* 100 Omh */ }, /* HDMI 1.4 > 1.65Gbps */ + { 552.75e+06, (0xf << 4) | (5 << 0), (12 << 5), (0 << 0) /* 50 Omh */ }, /* HDMI 2.0 (Data rate greater than 3.4 Gbps)*/ + { /* sentinel */ }, +}; + +static int mga25_hdmi_phy_configure(struct dw_hdmi *hdmi, + const struct dw_hdmi_plat_data *pdata, + unsigned long mpixelclock) +{ + const struct mga25_hdmi_phy_params *p = mga25_hdmi_phy_params; + const struct mga25_hdmi_phy_drvr_vltg_lvl *d = + mga25_hdmi_phy_drvr_vltg_lvl; + + for (; p->mpixelclock && mpixelclock > p->mpixelclock; p++); + + if (p->mpixelclock == 0) + return -EINVAL; + + for (; d->mpixelclock && mpixelclock > d->mpixelclock; d++); + + if (d->mpixelclock == 0) + return -EINVAL; + + dw_hdmi_phy_i2c_write(hdmi, d->cksymtxctrl, + MGA25_HDMI_PHY_CKSYMTXCTRL); + dw_hdmi_phy_i2c_write(hdmi, d->vlevctrl_pllmeasctrl, + MGA25_HDMI_PHY_VLEVCTRL_PLLMEASCTRL); + dw_hdmi_phy_i2c_write(hdmi, d->txterm, + MGA25_HDMI_PHY_TXTERM); + + dw_hdmi_phy_i2c_write(hdmi, p->opmode_div, + MGA25_HDMI_PHY_OPMODE_PLLCFG); + dw_hdmi_phy_i2c_write(hdmi, p->curr_gmp, + MGA25_HDMI_PHY_PLLCURRGMPCTRL); + dw_hdmi_phy_i2c_write(hdmi, p->div, MGA25_HDMI_PHY_PLLDIVCTRL); + + return 0; +} + +static struct dw_hdmi_plat_data mga2_drv_data = { + .mpll_cfg = imx_mpll_cfg, + .cur_ctr = imx_cur_ctr, + .phy_config = imx_phy_config, + .mode_valid = mga2_hdmi_mode_valid, +}; +static const struct dw_hdmi_plat_data mga25_drv_data = { + .mode_valid = mga2_hdmi_mode_valid, + .configure_phy = mga25_hdmi_phy_configure, +}; + +static struct platform_device_id imx_hdmi_devtype[] = { + { + .name = "mga2-hdmi", + .driver_data = (unsigned long)&mga2_drv_data + }, { + .name = "mga25-hdmi", + .driver_data = (unsigned long)&mga25_drv_data + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype); + +#endif /* CONFIG_MCST */ static const struct of_device_id dw_hdmi_imx_dt_ids[] = { { .compatible = "fsl,imx6q-hdmi", .data = &imx6q_hdmi_drv_data @@ -194,6 +399,12 @@ static const struct of_device_id dw_hdmi_imx_dt_ids[] = { .compatible = "fsl,imx6dl-hdmi", .data = &imx6dl_hdmi_drv_data }, +#ifdef CONFIG_MCST + { + .compatible = "mga2-hdmi", + .data = &mga2_drv_data + }, +#endif /* CONFIG_MCST */ {}, }; MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids); @@ -203,24 +414,45 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, { struct platform_device *pdev = to_platform_device(dev); const struct dw_hdmi_plat_data *plat_data; +#ifdef CONFIG_MCST + struct drm_crtc *crtc; + uint32_t crtc_mask = 0; +#else const struct of_device_id *match; +#endif /* ! CONFIG_MCST */ struct drm_device *drm = data; struct drm_encoder *encoder; struct imx_hdmi *hdmi; int ret; +#ifndef CONFIG_MCST if (!pdev->dev.of_node) return -ENODEV; +#endif /* ! CONFIG_MCST */ hdmi = dev_get_drvdata(dev); memset(hdmi, 0, sizeof(*hdmi)); - + +#ifndef CONFIG_MCST match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node); plat_data = match->data; +#else /* CONFIG_MCST */ + plat_data = (struct dw_hdmi_plat_data *) + platform_get_device_id(pdev)->driver_data; +#endif /* CONFIG_MCST */ hdmi->dev = &pdev->dev; encoder = &hdmi->encoder; +#ifdef CONFIG_MCST + platform_set_drvdata(pdev, hdmi); + drm_for_each_crtc(crtc, drm) + crtc_mask |= drm_crtc_mask(crtc); + + encoder->possible_crtcs = crtc_mask; +#else encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); +#endif /* CONFIG_MCST */ + /* * If we failed to find the CRTC(s) which this encoder is * supposed to be connected to, it's because the CRTC has @@ -248,7 +480,10 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, ret = PTR_ERR(hdmi->hdmi); drm_encoder_cleanup(encoder); } - +#ifdef CONFIG_MCST + WARN_ON(IS_ERR(devm_regmap_init(dev, NULL, hdmi->hdmi, + &dw_hdmi_phy_regmap_config))); +#endif return ret; } @@ -292,6 +527,9 @@ static struct platform_driver dw_hdmi_imx_platform_driver = { .name = "dwhdmi-imx", .of_match_table = dw_hdmi_imx_dt_ids, }, +#ifdef CONFIG_MCST + .id_table = imx_hdmi_devtype, +#endif /* CONFIG_MCST */ }; module_platform_driver(dw_hdmi_imx_platform_driver); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 897442754fd0..c38bbe182bd9 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6424,7 +6424,7 @@ static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable) void cik_init_cp_pg_table(struct radeon_device *rdev) { - volatile u32 *dst_ptr; + u32 __iomem *dst_ptr; int me, i, max_me = 4; u32 bo_offset = 0; u32 table_offset, table_size; @@ -6475,8 +6475,8 @@ void cik_init_cp_pg_table(struct radeon_device *rdev) } for (i = 0; i < table_size; i ++) { - dst_ptr[bo_offset + i] = - cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + writel(le32_to_cpu(fw_data[table_offset + i]), + dst_ptr + bo_offset + i); } bo_offset += table_size; } else { @@ -6498,8 +6498,8 @@ void cik_init_cp_pg_table(struct radeon_device *rdev) } for (i = 0; i < table_size; i ++) { - dst_ptr[bo_offset + i] = - cpu_to_le32(be32_to_cpu(fw_data[table_offset + i])); + writel(be32_to_cpu(fw_data[table_offset + i]), + dst_ptr + bo_offset + i); } bo_offset += table_size; } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 1d978a3d9c82..c42882002be7 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4153,7 +4153,7 @@ void sumo_rlc_fini(struct radeon_device *rdev) int sumo_rlc_init(struct radeon_device *rdev) { const u32 *src_ptr; - volatile u32 *dst_ptr; + u32 __iomem *dst_ptr; u32 dws, data, i, j, k, reg_num; u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0; u64 reg_list_mc_addr; @@ -4204,7 +4204,7 @@ int sumo_rlc_init(struct radeon_device *rdev) if (rdev->family >= CHIP_TAHITI) { /* SI */ for (i = 0; i < rdev->rlc.reg_list_size; i++) - dst_ptr[i] = cpu_to_le32(src_ptr[i]); + writel(src_ptr[i], dst_ptr + i); } else { /* ON/LN/TN */ /* format: @@ -4218,10 +4218,10 @@ int sumo_rlc_init(struct radeon_device *rdev) if (i < dws) data |= (src_ptr[i] >> 2) << 16; j = (((i - 1) * 3) / 2); - dst_ptr[j] = cpu_to_le32(data); + writel(data, dst_ptr + j); } j = ((i * 3) / 2); - dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER); + writel(RLC_SAVE_RESTORE_LIST_END_MARKER, dst_ptr + j); } radeon_bo_kunmap(rdev->rlc.save_restore_obj); radeon_bo_unreserve(rdev->rlc.save_restore_obj); @@ -4284,40 +4284,40 @@ int sumo_rlc_init(struct radeon_device *rdev) cik_get_csb_buffer(rdev, dst_ptr); } else if (rdev->family >= CHIP_TAHITI) { reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256; - dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr)); - dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr)); - dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size); + writel(upper_32_bits(reg_list_mc_addr), dst_ptr + 0); + writel(lower_32_bits(reg_list_mc_addr), dst_ptr + 1); + writel(rdev->rlc.clear_state_size, dst_ptr + 2); si_get_csb_buffer(rdev, &dst_ptr[(256/4)]); } else { reg_list_hdr_blk_index = 0; reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); data = upper_32_bits(reg_list_mc_addr); - dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + writel(data, dst_ptr + reg_list_hdr_blk_index); reg_list_hdr_blk_index++; for (i = 0; cs_data[i].section != NULL; i++) { for (j = 0; cs_data[i].section[j].extent != NULL; j++) { reg_num = cs_data[i].section[j].reg_count; data = reg_list_mc_addr & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + writel(data, dst_ptr + reg_list_hdr_blk_index); reg_list_hdr_blk_index++; data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + writel(data, dst_ptr + reg_list_hdr_blk_index); reg_list_hdr_blk_index++; data = 0x08000000 | (reg_num * 4); - dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + writel(data, dst_ptr + reg_list_hdr_blk_index); reg_list_hdr_blk_index++; for (k = 0; k < reg_num; k++) { data = cs_data[i].section[j].extent[k]; - dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data); + writel(data, dst_ptr + reg_list_hdr_blk_index + k); } reg_list_mc_addr += reg_num * 4; reg_list_blk_index += reg_num; } } - dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER); + writel(RLC_CLEAR_STATE_END_MARKER, dst_ptr + reg_list_hdr_blk_index); } radeon_bo_kunmap(rdev->rlc.clear_state_obj); radeon_bo_unreserve(rdev->rlc.clear_state_obj); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 033bc466a862..a32d5dee6450 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2620,7 +2620,7 @@ u32 r600_gfx_get_rptr(struct radeon_device *rdev, u32 rptr; if (rdev->wb.enabled) - rptr = rdev->wb.wb[ring->rptr_offs/4]; + rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); else rptr = RREG32(R600_CP_RB_RPTR); diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index af6c0da45f28..bb31bac9ca5b 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -257,6 +257,12 @@ int r600_dma_ring_test(struct radeon_device *rdev, radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring, false); +#ifdef CONFIG_E2K + /* We do not want to fetch the old value into Radeon's + * cache, so give GPU some time to write the new value. */ + mdelay(1); +#endif + for (i = 0; i < rdev->usec_timeout; i++) { tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 147087a891aa..4a8fa06cc83c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -105,6 +105,7 @@ extern int radeon_dpm; extern int radeon_aspm; extern int radeon_runtime_pm; extern int radeon_hard_reset; +extern int radeon_fbdev_accel; extern int radeon_vm_size; extern int radeon_vm_block_size; extern int radeon_deep_color; @@ -986,19 +987,19 @@ struct radeon_rlc { /* for power gating */ struct radeon_bo *save_restore_obj; uint64_t save_restore_gpu_addr; - volatile uint32_t *sr_ptr; + uint32_t __iomem *sr_ptr; const u32 *reg_list; u32 reg_list_size; /* for clear state */ struct radeon_bo *clear_state_obj; uint64_t clear_state_gpu_addr; - volatile uint32_t *cs_ptr; + uint32_t __iomem *cs_ptr; const struct cs_section_def *cs_data; u32 clear_state_size; /* for cp tables */ struct radeon_bo *cp_table_obj; uint64_t cp_table_gpu_addr; - volatile uint32_t *cp_table_ptr; + uint32_t __iomem *cp_table_ptr; u32 cp_table_size; }; @@ -2386,6 +2387,7 @@ struct radeon_device { uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; struct radeon_wb wb; struct radeon_dummy_page dummy_page; + struct radeon_dummy_page dummy_page2; bool shutdown; bool need_swiotlb; bool accel_working; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 756a50e8aff2..d313b52769fd 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -63,7 +63,11 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) return false; } +#ifdef CONFIG_MCST + if (size == 0 || readb(&bios[0]) != 0x55 || readb(&bios[1]) != 0xaa) { +#else if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { +#endif iounmap(bios); return false; } @@ -96,6 +100,7 @@ static bool radeon_read_bios(struct radeon_device *rdev) pci_unmap_rom(rdev->pdev, bios); return false; } + rdev->bios = kzalloc(size, GFP_KERNEL); if (rdev->bios == NULL) { pci_unmap_rom(rdev->pdev, bios); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 5d017f0aec66..135b8d337276 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1488,6 +1488,15 @@ int radeon_device_init(struct radeon_device *rdev, mutex_unlock(&rdev->pm.mutex); } +#ifdef CONFIG_E2K + if (cpu_has(CPU_HWBUG_BAD_RESET)) { + if (rdev->accel_working) + radeon_test_moves(rdev); + else + DRM_INFO("radeon: acceleration disabled, " + "skipping reset tests\n"); + } +#endif if ((radeon_testing & 1)) { if (rdev->accel_working) radeon_test_moves(rdev); @@ -1506,6 +1515,19 @@ int radeon_device_init(struct radeon_device *rdev, else DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); } + rdev->dummy_page2.page = alloc_page(GFP_DMA32 | + GFP_KERNEL | __GFP_ZERO); + if (rdev->dummy_page2.page == NULL) + return -ENOMEM; + rdev->dummy_page2.addr = pci_map_page(rdev->pdev, + rdev->dummy_page2.page, + 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page2.addr)) { + dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); + __free_page(rdev->dummy_page2.page); + rdev->dummy_page2.page = NULL; + return -ENOMEM; + } return 0; failed: @@ -1576,7 +1598,13 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - +#ifdef CONFIG_MCST + if (fbcon) { /* suspend the fbdev before turning off the card */ + console_lock(); + radeon_fbdev_set_suspend(rdev, 1); + console_unlock(); + } +#endif drm_kms_helper_poll_disable(dev); drm_modeset_lock_all(dev); @@ -1624,6 +1652,13 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, /* delay GPU reset to resume */ radeon_fence_driver_force_completion(rdev, i); } +#ifdef CONFIG_MCST + /* cancel lockup_work to prevent access to registers + * of suspended card */ + if (rdev->fence_drv[i].initialized) + cancel_delayed_work_sync(&rdev-> + fence_drv[i].lockup_work); +#endif } radeon_save_bios_scratch_regs(rdev); @@ -1647,12 +1682,13 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); } - +#ifndef CONFIG_MCST if (fbcon) { console_lock(); radeon_fbdev_set_suspend(rdev, 1); console_unlock(); } +#endif return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index c2573096d43c..abc7fb472e49 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -51,6 +51,7 @@ #include #include "radeon_drv.h" +#include "radeon_mode.h" /* * KMS wrapper. @@ -192,6 +193,7 @@ int radeon_dpm = -1; int radeon_aspm = -1; int radeon_runtime_pm = -1; int radeon_hard_reset = 0; +int radeon_fbdev_accel = 1; int radeon_vm_size = 8; int radeon_vm_block_size = -1; int radeon_deep_color = 0; @@ -269,6 +271,9 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444); MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); module_param_named(hard_reset, radeon_hard_reset, int, 0444); +MODULE_PARM_DESC(fbdev_accel, "Disable/Enable linux console framebuffer acceleration"); +module_param_named(fbdev_accel, radeon_fbdev_accel, int, 0444); + MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); module_param_named(vm_size, radeon_vm_size, int, 0444); @@ -413,7 +418,7 @@ radeon_pci_remove(struct pci_dev *pdev) static void radeon_pci_shutdown(struct pci_dev *pdev) { -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) || defined(CONFIG_MCST) struct drm_device *ddev = pci_get_drvdata(pdev); #endif @@ -423,7 +428,7 @@ radeon_pci_shutdown(struct pci_dev *pdev) if (radeon_device_is_virtual()) radeon_pci_remove(pdev); -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) || defined(CONFIG_MCST) /* Some adapters need to be suspended before a * shutdown occurs in order to prevent an error * during kexec. @@ -556,7 +561,7 @@ long radeon_drm_ioctl(struct file *filp, } ret = drm_ioctl(filp, cmd, arg); - + pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 2c564f4f3468..4569d6ef0228 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -46,6 +46,11 @@ struct radeon_fbdev { struct drm_fb_helper helper; /* must be first */ struct drm_framebuffer fb; struct radeon_device *rdev; + + struct radeon_bo *rbosys; + struct drm_clip_rect dirty_clip; + spinlock_t dirty_lock; + struct work_struct dirty_work; }; static int @@ -67,20 +72,142 @@ radeonfb_release(struct fb_info *info, int user) { struct radeon_fbdev *rfbdev = info->par; struct radeon_device *rdev = rfbdev->rdev; + if (radeon_fbdev_accel) + cancel_work_sync(&rfbdev->dirty_work); pm_runtime_mark_last_busy(rdev->ddev->dev); pm_runtime_put_autosuspend(rdev->ddev->dev); return 0; } +static void __radeon_dirty(struct radeon_fbdev *rfbdev, + struct drm_clip_rect *c) +{ + int r; + struct drm_framebuffer *fb = &rfbdev->fb; + struct radeon_bo *rbo = gem_to_radeon_bo(fb->obj[0]); + struct radeon_bo *rbosys = rfbdev->rbosys; + uint64_t src = radeon_bo_gpu_offset(rbosys); + uint64_t dst = radeon_bo_gpu_offset(rbo); + struct radeon_fence *fence = NULL; + int y = c->y1; + int height = c->y2 - c->y1; + int offset = y * fb->pitches[0]; + int size = (y + height) * fb->pitches[0]; + offset = round_down(offset, RADEON_GPU_PAGE_SIZE); + size = round_up(size - offset, RADEON_GPU_PAGE_SIZE); + + mutex_lock(&rfbdev->rdev->ddev->mode_config.mutex); + + fence = radeon_copy_dma(rfbdev->rdev, src + offset, dst + offset, + size / RADEON_GPU_PAGE_SIZE, rbo->tbo.base.resv); + if (IS_ERR(fence)) { + DRM_ERROR("Failed GTT->VRAM copy\n"); + r = PTR_ERR(fence); + goto out; + } + r = radeon_fence_wait(fence, false); + radeon_fence_unref(&fence); + if (r) { + DRM_ERROR("Failed to wait for GTT->VRAM fence\n"); + goto out; + } +out: + mutex_unlock(&rfbdev->rdev->ddev->mode_config.mutex); +} + +#define to_radeon_fbdev(x) container_of(x, struct radeon_fbdev, fb) + +static int radeon_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips) +{ + __radeon_dirty(to_radeon_fbdev(fb), clips); + return 0; +} + +static void radeon_fb_helper_dirty_work(struct work_struct *work) +{ + struct radeon_fbdev *helper = container_of(work, + struct radeon_fbdev, dirty_work); + struct drm_clip_rect *clip = &helper->dirty_clip; + struct drm_clip_rect clip_copy; + unsigned long flags; + spin_lock_irqsave(&helper->dirty_lock, flags); + clip_copy = *clip; + clip->x1 = clip->y1 = ~0; + clip->x2 = clip->y2 = 0; + spin_unlock_irqrestore(&helper->dirty_lock, flags); + + /* call dirty callback only when it has been really touched */ + if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) + return; + radeon_dirty(&helper->fb, NULL, 0, 0, &clip_copy, 1); +} + +static void radeon_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, + u32 width, u32 height) +{ + struct radeon_fbdev *rfbdev = info->par; + struct drm_clip_rect *clip = &rfbdev->dirty_clip; + unsigned long flags; + + spin_lock_irqsave(&rfbdev->dirty_lock, flags); + clip->x1 = min_t(u32, clip->x1, x); + clip->y1 = min_t(u32, clip->y1, y); + clip->x2 = max_t(u32, clip->x2, x + width); + clip->y2 = max_t(u32, clip->y2, y + height); + spin_unlock_irqrestore(&rfbdev->dirty_lock, flags); + + if (info->state == FBINFO_STATE_RUNNING) + queue_work(system_long_wq, &rfbdev->dirty_work); +} + +static void radeon_fillrect(struct fb_info *p, const struct fb_fillrect *rect) +{ + sys_fillrect(p, rect); + radeon_fb_helper_dirty(p, rect->dx, rect->dy, + rect->width, rect->height); +} + +static void radeon_copyarea(struct fb_info *p, const struct fb_copyarea *area) +{ + sys_copyarea(p, area); + radeon_fb_helper_dirty(p, area->dx, area->dy, + area->width, area->height); +} + +static void radeon_imageblit(struct fb_info *p, const struct fb_image *image) +{ + sys_imageblit(p, image); + radeon_fb_helper_dirty(p, image->dx, image->dy, + image->width, image->height); + +} + +static ssize_t radeon_fbwrite(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + ret = fb_sys_write(info, buf, count, ppos); + if (ret > 0) + radeon_fb_helper_dirty(info, 0, 0, info->var.xres, + info->var.yres); + return ret; +} + static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_open = radeonfb_open, .fb_release = radeonfb_release, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, + .fb_fillrect = radeon_fillrect, + .fb_copyarea = radeon_copyarea, + .fb_imageblit = radeon_imageblit, + .fb_read = fb_sys_read, + .fb_write = radeon_fbwrite, }; @@ -208,6 +335,56 @@ out_unref: return ret; } + +static struct radeon_bo *radeon_mk_obj(struct radeon_device *rdev, + unsigned size, int domain, + uint64_t *gpu_addr, void **addr) +{ + struct radeon_bo *obj = NULL; + int r; + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, + domain, 0, NULL, NULL, &obj); + if (r) { + DRM_ERROR("Failed to create object\n"); + goto out_lclean; + } + + r = radeon_bo_reserve(obj, false); + if (unlikely(r != 0)) { + DRM_ERROR("Failed to reserve object\n"); + goto out_lclean_unref; + } + r = radeon_bo_pin(obj, domain, gpu_addr); + if (r) { + DRM_ERROR("Failed to pin object\n"); + goto out_lclean_unres; + } + + r = radeon_bo_kmap(obj, addr); + if (r) { + DRM_ERROR("Failed to map object\n"); + goto out_lclean_unpin; + } + return obj; + +out_lclean_unpin: + radeon_bo_unpin(obj); +out_lclean_unres: + radeon_bo_unreserve(obj); +out_lclean_unref: + radeon_bo_unref(&obj); +out_lclean: + return NULL; +} + +static void radeon_rm_obj(struct radeon_bo *obj) +{ + radeon_bo_kunmap(obj); + radeon_bo_unpin(obj); + radeon_bo_unreserve(obj); + radeon_bo_unref(&obj); +} + static int radeonfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { @@ -240,6 +417,26 @@ static int radeonfb_create(struct drm_fb_helper *helper, rbo = gem_to_radeon_bo(gobj); + if (!rdev->accel_working) + radeon_fbdev_accel = 0; + if (radeon_fbdev_accel) { + void *addr; + uint64_t gpu_addr; + rfbdev->rbosys = radeon_mk_obj(rdev, gobj->size, + RADEON_GEM_DOMAIN_GTT, + &gpu_addr, &addr); + if (rfbdev->rbosys == NULL) { + ret = -ENOMEM; + goto out; + } + } else { + radeonfb_ops.fb_fillrect = cfb_fillrect; + radeonfb_ops.fb_copyarea = cfb_copyarea; + radeonfb_ops.fb_imageblit = cfb_imageblit; + radeonfb_ops.fb_read = NULL; + radeonfb_ops.fb_write = NULL; + } + /* okay we have an object now allocate the framebuffer */ info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { @@ -263,12 +460,16 @@ static int radeonfb_create(struct drm_fb_helper *helper, memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); + if (radeon_fbdev_accel) + info->flags |= FBINFO_READS_FAST; info->fbops = &radeonfb_ops; tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = radeon_bo_size(rbo); info->screen_base = rbo->kptr; + if (radeon_fbdev_accel) + info->screen_base = rfbdev->rbosys->kptr; info->screen_size = radeon_bo_size(rbo); drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); @@ -294,6 +495,8 @@ static int radeonfb_create(struct drm_fb_helper *helper, return 0; out: + if (rfbdev->rbosys) + radeon_rm_obj(rfbdev->rbosys); if (rbo) { } @@ -312,6 +515,12 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb drm_fb_helper_unregister_fbi(&rfbdev->helper); + if (rfbdev->rbosys) { + cancel_work_sync(&rfbdev->dirty_work); + radeon_rm_obj(rfbdev->rbosys); + rfbdev->rbosys = NULL; + } + if (fb->obj[0]) { radeonfb_destroy_pinned_object(fb->obj[0]); fb->obj[0] = NULL; @@ -350,7 +559,11 @@ int radeon_fbdev_init(struct radeon_device *rdev) rfbdev->rdev = rdev; rdev->mode_info.rfbdev = rfbdev; - +#ifdef CONFIG_MCST + spin_lock_init(&rfbdev->dirty_lock); + INIT_WORK(&rfbdev->dirty_work, radeon_fb_helper_dirty_work); + rfbdev->dirty_clip.x1 = rfbdev->dirty_clip.y1 = ~0; +#endif drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper, &radeon_fb_helper_funcs); @@ -393,6 +606,11 @@ void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) { if (rdev->mode_info.rfbdev) drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state); + +#ifdef CONFIG_MCST + if (rdev->mode_info.rfbdev && state && radeon_fbdev_accel) + cancel_work_sync(&rdev->mode_info.rfbdev->dirty_work); +#endif } bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 43f2f9307866..f51827a22db1 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -70,7 +70,10 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { if (drv->cpu_addr) { - *drv->cpu_addr = cpu_to_le32(seq); + if (ring != R600_RING_TYPE_UVD_INDEX) + *drv->cpu_addr = cpu_to_le32(seq); + else + writel(seq, drv->cpu_addr); } } else { WREG32(drv->scratch_reg, seq); @@ -93,7 +96,10 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) if (likely(rdev->wb.enabled || !drv->scratch_reg)) { if (drv->cpu_addr) { - seq = le32_to_cpu(*drv->cpu_addr); + if (ring != R600_RING_TYPE_UVD_INDEX) + seq = le32_to_cpu(*drv->cpu_addr); + else + seq = readl(drv->cpu_addr); } else { seq = lower_32_bits(atomic64_read(&drv->last_seq)); } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 03d3550ecc7c..45f5a9487c07 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -30,6 +30,9 @@ #include #include #include +#ifdef CONFIG_MCST +#include +#endif #include #include @@ -127,6 +130,10 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) !pci_is_thunderbolt_attached(dev->pdev)) flags |= RADEON_IS_PX; +#ifdef CONFIG_MCST + /* lock console to avoid hw deadlock on output to vga console */ + console_lock(); +#endif /* radeon_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must @@ -134,6 +141,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) * VRAM allocation */ r = radeon_device_init(rdev, dev, dev->pdev, flags); +#ifdef CONFIG_MCST + console_unlock(); +#endif if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index a5e1d2139e80..cf3bad8df700 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -23,6 +23,10 @@ * Authors: Michel Dänzer */ +#ifdef CONFIG_E2K +#include +#endif + #include #include "radeon_reg.h" #include "radeon.h" @@ -30,6 +34,18 @@ #define RADEON_TEST_COPY_BLIT 1 #define RADEON_TEST_COPY_DMA 0 +#ifdef CONFIG_MCST +#if BITS_PER_LONG == 32 +#define fb_readp fb_readl +#define fb_writep fb_writel +#elif BITS_PER_LONG == 64 +#define fb_readp fb_readq +#define fb_writep fb_writeq +#else +#error BITS_PER_LONG not 32 or 64 +#endif +#endif + /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) @@ -59,6 +75,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) */ n = rdev->mc.gtt_size - rdev->gart_pin_size; n /= size; +#ifdef CONFIG_E2K + if (cpu_has(CPU_HWBUG_BAD_RESET)) + n = 2; +#endif gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); if (!gtt_obj) { @@ -149,11 +169,12 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) vram_start = vram_map, vram_end = vram_map + size; vram_start < vram_end; gtt_start++, vram_start++) { - if (*vram_start != gtt_start) { + if ((void *) fb_readp(vram_start) != gtt_start) { DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " "expected 0x%p (GTT/VRAM offset " "0x%16llx/0x%16llx)\n", - i, *vram_start, gtt_start, + i, (void *)fb_readp(vram_start), + (void *)gtt_start, (unsigned long long) (gtt_addr - rdev->mc.gtt_start + (void*)gtt_start - gtt_map), @@ -161,9 +182,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) (vram_addr - rdev->mc.vram_start + (void*)gtt_start - gtt_map)); radeon_bo_kunmap(vram_obj); +#ifdef CONFIG_E2K + if (cpu_has(CPU_HWBUG_BAD_RESET)) { + pr_emerg("Bad reset detected." + " Restarting machine\n"); + machine_restart(NULL); + } +#endif goto out_lclean_unpin; } - *vram_start = vram_start; + fb_writep((unsigned long) vram_start, vram_start); } radeon_bo_kunmap(vram_obj); @@ -212,6 +240,13 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) (gtt_addr - rdev->mc.gtt_start + (void*)vram_start - vram_map)); radeon_bo_kunmap(gtt_obj[i]); +#ifdef CONFIG_E2K + if (cpu_has(CPU_HWBUG_BAD_RESET)) { + pr_emerg("Bad reset detected." + " Restarting machine\n"); + machine_restart(NULL); + } +#endif goto out_lclean_unpin; } } diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index a18bf70a251e..ed18293df3af 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -791,19 +791,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, return r; /* stitch together an UVD create msg */ - msg[0] = cpu_to_le32(0x00000de4); - msg[1] = cpu_to_le32(0x00000000); - msg[2] = cpu_to_le32(handle); - msg[3] = cpu_to_le32(0x00000000); - msg[4] = cpu_to_le32(0x00000000); - msg[5] = cpu_to_le32(0x00000000); - msg[6] = cpu_to_le32(0x00000000); - msg[7] = cpu_to_le32(0x00000780); - msg[8] = cpu_to_le32(0x00000440); - msg[9] = cpu_to_le32(0x00000000); - msg[10] = cpu_to_le32(0x01b37000); + writel(0x00000de4, msg + 0); + writel(0x00000000, msg + 1); + writel(handle, msg + 2); + writel(0x00000000, msg + 3); + writel(0x00000000, msg + 4); + writel(0x00000000, msg + 5); + writel(0x00000000, msg + 6); + writel(0x00000780, msg + 7); + writel(0x00000440, msg + 8); + writel(0x00000000, msg + 9); + writel(0x01b37000, msg + 10); for (i = 11; i < 1024; ++i) - msg[i] = cpu_to_le32(0x0); + writel(0x0, msg + i); r = radeon_uvd_send_msg(rdev, ring, addr, fence); radeon_bo_unreserve(rdev->uvd.vcpu_bo); @@ -827,12 +827,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, return r; /* stitch together an UVD destroy msg */ - msg[0] = cpu_to_le32(0x00000de4); - msg[1] = cpu_to_le32(0x00000002); - msg[2] = cpu_to_le32(handle); - msg[3] = cpu_to_le32(0x00000000); + writel(0x00000de4, msg + 0); + writel(0x00000002, msg + 1); + writel(handle, msg + 2); + writel(0x00000000, msg + 3); for (i = 4; i < 1024; ++i) - msg[i] = cpu_to_le32(0x0); + writel(0x0, msg + i); r = radeon_uvd_send_msg(rdev, ring, addr, fence); radeon_bo_unreserve(rdev->uvd.vcpu_bo); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 59db54ace428..61ad11ad3f99 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -238,13 +238,20 @@ int radeon_vce_resume(struct radeon_device *rdev) dev_err(rdev->dev, "(%d) VCE map failed\n", r); return r; } - +#ifdef CONFIG_MCST + memset_io(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); +#else memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); +#endif if (rdev->family < CHIP_BONAIRE) r = vce_v1_0_load_fw(rdev, cpu_addr); else +#ifdef CONFIG_MCST + memcpy_toio(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); +#else memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); +#endif radeon_bo_kunmap(rdev->vce.vcpu_bo); radeon_bo_unreserve(rdev->vce.vcpu_bo); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 1d8efb0eefdb..dd878638f90d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5712,7 +5712,7 @@ u32 si_get_csb_size(struct radeon_device *rdev) return count; } -void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) +void si_get_csb_buffer(struct radeon_device *rdev, u32 __iomem *buffer) { u32 count = 0, i; const struct cs_section_def *sect = NULL; @@ -5723,53 +5723,70 @@ void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + writel(PACKET3(PACKET3_PREAMBLE_CNTL, 0), buffer + count); + count++; + writel(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE, buffer + count); + count++; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); + writel(PACKET3(PACKET3_CONTEXT_CONTROL, 1), buffer + count); + count++; + writel(0x80000000, buffer + count); + count++; + writel(0x80000000, buffer + count); + count++; for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { for (ext = sect->section; ext->extent != NULL; ++ext) { if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); + writel(PACKET3(PACKET3_SET_CONTEXT_REG, + ext->reg_count), buffer + count); + count++; + writel(ext->reg_index - 0xa000, buffer + count); + count++; + for (i = 0; i < ext->reg_count; i++, count++) + writel(ext->extent[i], buffer + count); } else { return; } } } - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); - buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); + writel(PACKET3(PACKET3_SET_CONTEXT_REG, 1), buffer + count); + count++; + writel(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START, buffer + count); + count++; switch (rdev->family) { case CHIP_TAHITI: case CHIP_PITCAIRN: - buffer[count++] = cpu_to_le32(0x2a00126a); + writel(0x2a00126a, buffer + count); + count++; break; case CHIP_VERDE: - buffer[count++] = cpu_to_le32(0x0000124a); + writel(0x0000124a, buffer + count); + count++; break; case CHIP_OLAND: - buffer[count++] = cpu_to_le32(0x00000082); + writel(0x00000082, buffer + count); + count++; break; case CHIP_HAINAN: - buffer[count++] = cpu_to_le32(0x00000000); + writel(0x00000000, buffer + count); + count++; break; default: - buffer[count++] = cpu_to_le32(0x00000000); + writel(0x00000000, buffer + count); + count++; break; } - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + writel(PACKET3(PACKET3_PREAMBLE_CNTL, 0), buffer + count); + count++; + writel(PACKET3_PREAMBLE_END_CLEAR_STATE, buffer + count); + count++; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + writel(PACKET3(PACKET3_CLEAR_STATE, 0), buffer + count); + count++; + writel(0, buffer + count); } static void si_init_pg(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c index bd75bbcf5bf6..038efc598f04 100644 --- a/drivers/gpu/drm/radeon/vce_v1_0.c +++ b/drivers/gpu/drm/radeon/vce_v1_0.c @@ -187,6 +187,22 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) return -EINVAL; data += (256 - 64) / 4; +#ifdef CONFIG_MCST + writel(sign->val[i].nonce[0], &data[0]); + writel(sign->val[i].nonce[1], &data[1]); + writel(sign->val[i].nonce[2], &data[2]); + writel(sign->val[i].nonce[3], &data[3]); + writel(cpu_to_le32(le32_to_cpu(sign->len) + 64), &data[4]); + + memset_io(&data[5], 0, 44); + memcpy_toio(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); + + data += le32_to_cpu(readl(&data[4])) / 4; + writel(sign->val[i].sigval[0], &data[0]); + writel(sign->val[i].sigval[1], &data[1]); + writel(sign->val[i].sigval[2], &data[2]); + writel(sign->val[i].sigval[3], &data[3]); +#else data[0] = sign->val[i].nonce[0]; data[1] = sign->val[i].nonce[1]; data[2] = sign->val[i].nonce[2]; @@ -201,6 +217,7 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) data[1] = sign->val[i].sigval[1]; data[2] = sign->val[i].sigval[2]; data[3] = sign->val[i].sigval[3]; +#endif rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fe81c565e7ef..dbd56ab96110 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -539,7 +539,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) tmp = pgprot_noncached(tmp); #endif #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ - defined(__powerpc__) || defined(__mips__) + defined(__powerpc__) || defined(__mips__) || defined(__e2k__) if (caching_flags & TTM_PL_FLAG_WC) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 627f8dc91d0e..39fbc0a2f6b7 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1119,12 +1119,17 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, 0, num_pages * PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, tt->dma_address[i])) { +#ifdef CONFIG_MCST /* number of maps do not equal to i */ + tt->dma_address[i] = 0; + ttm_unmap_and_unpopulate_pages(dev, tt); +#else while (i--) { dma_unmap_page(dev, tt->dma_address[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); + PAGE_SIZE, DMA_BIDIRECTIONAL); tt->dma_address[i] = 0; } ttm_pool_unpopulate(&tt->ttm); +#endif return -EFAULT; } diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index bc1ab6060dc6..5a94260b8f8a 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -46,7 +46,7 @@ static char *udl_vidreg_unlock(char *buf) */ static char *udl_set_blank(char *buf, int dpms_mode) { - u8 reg; + u8 reg = 0; switch (dpms_mode) { case DRM_MODE_DPMS_OFF: reg = 0x07; diff --git a/drivers/gpu/drm/vivante/Makefile b/drivers/gpu/drm/vivante/Makefile new file mode 100644 index 000000000000..d7a8624abcca --- /dev/null +++ b/drivers/gpu/drm/vivante/Makefile @@ -0,0 +1,29 @@ +############################################################################## +# +# Copyright (C) 2005 - 2015 by Vivante Corp. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the license, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +# +############################################################################## + + +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Iinclude/drm +vivante-y := vivante_drv.o + +obj-$(CONFIG_DRM_VIVANTE) += vivante.o diff --git a/drivers/gpu/drm/vivante/vivante_drv.c b/drivers/gpu/drm/vivante/vivante_drv.c new file mode 100644 index 000000000000..508d923be7c8 --- /dev/null +++ b/drivers/gpu/drm/vivante/vivante_drv.c @@ -0,0 +1,148 @@ +/**************************************************************************** +* +* Copyright (C) 2005 - 2015 by Vivante Corp. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the license, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + + +#include +#include + +#include +#include "vivante_drv.h" + +#include + + +static char platformdevicename[] = "Vivante GCCore"; +static struct platform_device *pplatformdev=NULL; + + +static const struct file_operations vivante_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .mmap = drm_legacy_mmap, + .poll = drm_poll, + .read = drm_read, +}; + +int vivante_driver_load(struct drm_device *drm, unsigned long flags) +{ + struct platform_device *pdev = to_platform_device(drm->dev); + platform_set_drvdata(pdev, drm); + return 0; +} + +static struct drm_driver vivante_driver = { + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,38) + .driver_features = DRIVER_LEGACY, +#else + .driver_features = DRIVER_USE_MTRR | DRIVER_USE_PLATFORM_DEVICE, +#endif +// .reclaim_buffers = drm_core_reclaim_buffers, + .load = vivante_driver_load, + .fops = &vivante_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int shmob_drm_probe(struct platform_device *platdev) +{ + struct drm_driver *driver = &vivante_driver; + struct drm_device *dev; + int ret; + + DRM_DEBUG("\n"); + + dev = drm_dev_alloc(driver, &platdev->dev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + ret = drm_dev_register(dev, 0); + if (ret) + goto err_free; + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + driver->name, driver->major, driver->minor, driver->patchlevel, + driver->date, dev->primary->index); + + return 0; + +err_free: + drm_dev_put(dev); + return ret; +} + +static int shmob_drm_remove(struct platform_device *pdev) +{ + drm_put_dev(platform_get_drvdata(pdev)); + + return 0; +} + +static struct platform_driver shmob_drm_platform_driver = { + .probe = shmob_drm_probe, + .remove = shmob_drm_remove, + .driver = { + .owner = THIS_MODULE, + .name = platformdevicename, + }, +}; + +static int __init vivante_init(void) +{ + int retcode; + + retcode = platform_driver_register(&shmob_drm_platform_driver); + if (retcode < 0) + goto out_ipp; + + pplatformdev=platform_device_register_simple(platformdevicename,-1,NULL,0); + if (pplatformdev==NULL) printk(KERN_ERR"Platform device is null\n"); + +out_ipp: + return retcode; + +} + +static void __exit vivante_exit(void) +{ + if (pplatformdev) { + platform_device_unregister(pplatformdev); + platform_driver_unregister(&shmob_drm_platform_driver); + pplatformdev=NULL; + } + +} + +module_init(vivante_init); +module_exit(vivante_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/vivante/vivante_drv.h b/drivers/gpu/drm/vivante/vivante_drv.h new file mode 100644 index 000000000000..ba3a960f5575 --- /dev/null +++ b/drivers/gpu/drm/vivante/vivante_drv.h @@ -0,0 +1,38 @@ +/**************************************************************************** +* +* Copyright (C) 2005 - 2015 by Vivante Corp. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the license, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + + +#ifndef __VIVANTE_DRV_H__ +#define __VIVANTE_DRV_H__ + +/* General customization: + */ + +#define DRIVER_AUTHOR "Vivante Inc." + +#define DRIVER_NAME "vivante" +#define DRIVER_DESC "Vivante GCCore" +#define DRIVER_DATE "20120216" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#endif diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 13a6b4afb4b3..251316f9472c 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1161,6 +1161,46 @@ config SENSORS_LM93 This driver can also be built as a module. If so, the module will be called lm93. +config SENSORS_L_P1MON + tristate "MCST L_P1MON" + depends on I2C && EXPERIMENTAL + default m + help + If you say yes here you get support for MCST Processor-1 I2C monitor. + + This driver can also be built as a module. If so, the module will + be called l_p1mon. + +config SENSORS_LM95231 + tristate "National Semiconductor LM95231" + depends on I2C + help + If you say yes here you get support for LM95231 sensor chip. + + This driver can also be built as a module. If so, the module + will be called lm95231. + + +config SENSORS_L_P1MON + tristate "MCST L_P1MON" + depends on I2C && EXPERIMENTAL + default m + help + If you say yes here you get support for MCST Processor-1 I2C monitor. + + This driver can also be built as a module. If so, the module will + be called l_p1mon. + +config SENSORS_LM95231 + tristate "National Semiconductor LM95231" + depends on I2C + help + If you say yes here you get support for LM95231 sensor chip. + + This driver can also be built as a module. If so, the module + will be called lm95231. + + config SENSORS_LM95234 tristate "National Semiconductor LM95234 and compatibles" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 40c036ea45e6..9e8925d86bea 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -102,6 +102,7 @@ obj-$(CONFIG_SENSORS_LM87) += lm87.o obj-$(CONFIG_SENSORS_LM90) += lm90.o obj-$(CONFIG_SENSORS_LM92) += lm92.o obj-$(CONFIG_SENSORS_LM93) += lm93.o +obj-$(CONFIG_SENSORS_LM95231) += lm95231.o obj-$(CONFIG_SENSORS_LM95234) += lm95234.o obj-$(CONFIG_SENSORS_LM95241) += lm95241.o obj-$(CONFIG_SENSORS_LM95245) += lm95245.o @@ -114,6 +115,7 @@ obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o obj-$(CONFIG_SENSORS_LTC4260) += ltc4260.o obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o obj-$(CONFIG_SENSORS_LTQ_CPUTEMP) += ltq-cputemp.o +obj-$(CONFIG_SENSORS_L_P1MON) += l_p1mon.o obj-$(CONFIG_SENSORS_MAX1111) += max1111.o obj-$(CONFIG_SENSORS_MAX16065) += max16065.o obj-$(CONFIG_SENSORS_MAX1619) += max1619.o diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 01c2eeb02aa9..ae965f3feb2c 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include /* Indexes for the sysfs hooks */ @@ -186,6 +188,17 @@ static const struct of_device_id __maybe_unused adt7475_of_match[] = { }; MODULE_DEVICE_TABLE(of, adt7475_of_match); +#ifdef CONFIG_MCST +#define MAX_SENSORS 3 +#define MAX_PWM_DEVICES 3 + +struct adt7475_thermal_sensor { + struct adt7475_data *data; + struct thermal_zone_device *tz; + unsigned int sensor_id; +}; +#endif + struct adt7475_data { struct i2c_client *client; struct mutex lock; @@ -213,8 +226,19 @@ struct adt7475_data { u8 vid; u8 vrm; const struct attribute_group *groups[9]; +#ifdef CONFIG_MCST + struct pwm_chip chip; + struct adt7475_thermal_sensor thermal_sensor[MAX_SENSORS]; +#endif }; +#ifdef CONFIG_MCST +static inline struct adt7475_data *to_pwm(struct pwm_chip *chip) +{ + return container_of(chip, struct adt7475_data, chip); +} +#endif + static struct i2c_driver adt7475_driver; static struct adt7475_data *adt7475_update_device(struct device *dev); static void adt7475_read_hystersis(struct i2c_client *client); @@ -359,7 +383,7 @@ static ssize_t voltage_store(struct device *dev, mutex_lock(&data->lock); data->voltage[sattr->nr][sattr->index] = - volt2reg(sattr->index, val, data->bypass_attn); + volt2reg(sattr->index, val, data->bypass_attn); if (sattr->index < ADT7475_VOLTAGE_COUNT) { if (sattr->nr == MIN) @@ -672,7 +696,7 @@ static ssize_t point2_store(struct device *dev, struct device_attribute *attr, */ temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]); val = clamp_val(val, temp + autorange_table[0], - temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]); + temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]); val -= temp; /* Find the nearest table entry to what the user wrote */ @@ -944,6 +968,9 @@ static ssize_t pwmctrl_store(struct device *dev, int r; long val; + if (of_get_property(dev->of_node, "#pwm-cells", NULL)) + return -EPERM; + if (kstrtol(buf, 10, &val)) return -EINVAL; @@ -1008,8 +1035,8 @@ static ssize_t pwmfreq_store(struct device *dev, } static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, - struct device_attribute *devattr, - char *buf) + struct device_attribute *devattr, + char *buf) { struct adt7475_data *data = adt7475_update_device(dev); @@ -1020,8 +1047,8 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, } static ssize_t pwm_use_point2_pwm_at_crit_store(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) + struct device_attribute *devattr, + const char *buf, size_t count) { struct adt7475_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; @@ -1457,6 +1484,109 @@ static int adt7475_update_limits(struct i2c_client *client) return 0; } +#ifdef CONFIG_MCST +static int adt7475_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct adt7475_data *data = to_pwm(chip); + struct i2c_client *client = data->client; + int ret = -EINVAL; + u8 val; + u8 pwm_mode; + + pwm_mode = adt7475_read(PWM_CONFIG_REG(pwm->hwpwm)); + pwm_mode = (pwm_mode >> 5) & 7; + if (pwm_mode != 7) + return -EPERM; + + if (state->period > 1) { + mutex_lock(&data->lock); + val = state->duty_cycle * 255 / (state->period - 1); + val = clamp_val(val, 0, 255); + ret = i2c_smbus_write_byte_data(client, PWM_REG(pwm->hwpwm), val); + mutex_unlock(&data->lock); + } + + return ret; +} + +static const struct pwm_ops adt7475_pwm_ops = { + .apply = adt7475_pwm_apply, + .owner = THIS_MODULE, +}; + +static void adt7475_pwm_remove(void *arg) +{ + struct adt7475_data *data = arg; + + pwmchip_remove(&data->chip); +} + +static void adt7475_init_pwm(struct adt7475_data *data) +{ + struct i2c_client *client = data->client; + int ret; + + /* Initialize chip */ + + data->chip.dev = &client->dev; + data->chip.ops = &adt7475_pwm_ops; + data->chip.base = -1; + data->chip.npwm = MAX_PWM_DEVICES; + + ret = pwmchip_add(&data->chip); + if (ret < 0) { + dev_err(&client->dev, "pwmchip_add() failed: %d\n", ret); + return; + } + + devm_add_action(&client->dev, adt7475_pwm_remove, data); +} + +static int adt7475_get_temp(void *data, int *temp) +{ + struct adt7475_thermal_sensor *thermal_sensor = data; + struct i2c_client *client = thermal_sensor->data->client; + int ret; + u16 val; + u8 ext; + + ext = adt7475_read(REG_EXTEND2); + if (ext < 0) + return ext; + ret = adt7475_read(TEMP_REG(thermal_sensor->sensor_id)); + if (ret < 0) + return ret; + val = (ret << 2) | ((ext >> ((thermal_sensor->sensor_id + 1) * 2)) & 3); + *temp = reg2temp(thermal_sensor->data, val); + + return 0; +} + +static const struct thermal_zone_of_device_ops adt7475_tz_ops = { + .get_temp = adt7475_get_temp, +}; + +static void adt7475_init_thermal(struct adt7475_data *data) +{ + struct i2c_client *client = data->client; + struct adt7475_thermal_sensor *thermal_sensor; + unsigned int i; + + thermal_sensor = data->thermal_sensor; + for (i = 0; i < MAX_SENSORS; i++, thermal_sensor++) { + thermal_sensor->data = data; + thermal_sensor->sensor_id = i; + thermal_sensor->tz = devm_thermal_zone_of_sensor_register(&client->dev, + i, thermal_sensor, + &adt7475_tz_ops); + if (IS_ERR(thermal_sensor->tz)) { + dev_warn(&client->dev, "unable to register thermal sensor %ld\n", + PTR_ERR(thermal_sensor->tz)); + } + } +} +#endif static int adt7475_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -1551,7 +1681,7 @@ static int adt7475_probe(struct i2c_client *client, data->bypass_attn = (0x3 << 3) | 0x3; } else { data->bypass_attn = ((data->config4 & CONFIG4_ATTN_IN10) >> 4) | - ((data->config4 & CONFIG4_ATTN_IN43) >> 3); + ((data->config4 & CONFIG4_ATTN_IN43) >> 3); } data->bypass_attn &= data->has_voltage; @@ -1630,6 +1760,11 @@ static int adt7475_probe(struct i2c_client *client, if (ret) return ret; +#ifdef CONFIG_MCST + adt7475_init_thermal(data); + if (IS_ENABLED(CONFIG_PWM)) + adt7475_init_pwm(data); +#endif return 0; } diff --git a/drivers/hwmon/l_p1mon.c b/drivers/hwmon/l_p1mon.c new file mode 100644 index 000000000000..4668eb307007 --- /dev/null +++ b/drivers/hwmon/l_p1mon.c @@ -0,0 +1,589 @@ +/* + * l_p1mon.c - Linux kernel module for + * MCST designed Processor-1 i2c-slave monitoring module. + * + * Copyright (c) 2013 Evgeny Kravtsunov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include + +#define L_P1MON_DRV_NAME "l_p1mon" +#define DRIVER_VERSION "1.0" + +/* 64 Entries available */ +#define L_P1MON_DATA_0_IN 0x00 +#define L_P1MON_DATA_1_IN 0x01 +#define L_P1MON_DATA_2_IN 0x02 +#define L_P1MON_DATA_3_IN 0x03 +#define L_P1MON_DATA_4_IN 0x04 +#define L_P1MON_DATA_5_IN 0x05 +#define L_P1MON_DATA_6_IN 0x06 +#define L_P1MON_DATA_7_IN 0x07 +#define L_P1MON_DATA_8_IN 0x08 +#define L_P1MON_DATA_9_IN 0x09 +#define L_P1MON_DATA_10_IN 0x0a +#define L_P1MON_DATA_11_IN 0x0b +#define L_P1MON_DATA_12_IN 0x0c +#define L_P1MON_DATA_13_IN 0x0d +#define L_P1MON_DATA_14_IN 0x0e +#define L_P1MON_DATA_15_IN 0x0f +#define L_P1MON_DATA_16_IN 0x10 +#define L_P1MON_DATA_17_IN 0x11 +#define L_P1MON_DATA_18_IN 0x12 +#define L_P1MON_DATA_19_IN 0x13 +#define L_P1MON_DATA_20_IN 0x14 +#define L_P1MON_DATA_21_IN 0x15 +#define L_P1MON_DATA_22_IN 0x16 +#define L_P1MON_DATA_23_IN 0x17 +#define L_P1MON_DATA_24_IN 0x18 +#define L_P1MON_DATA_25_IN 0x19 +#define L_P1MON_DATA_26_IN 0x1a +#define L_P1MON_DATA_27_IN 0x1b +#define L_P1MON_DATA_28_IN 0x1c +#define L_P1MON_DATA_29_IN 0x1d +#define L_P1MON_DATA_30_IN 0x1e +#define L_P1MON_DATA_31_IN 0x1f +#define L_P1MON_DATA_32_IN 0x20 +#define L_P1MON_DATA_33_IN 0x21 +#define L_P1MON_DATA_34_IN 0x22 +#define L_P1MON_DATA_35_IN 0x23 +#define L_P1MON_DATA_36_IN 0x24 +#define L_P1MON_DATA_37_IN 0x25 +#define L_P1MON_DATA_38_IN 0x26 +#define L_P1MON_DATA_39_IN 0x27 +#define L_P1MON_DATA_40_IN 0x28 +#define L_P1MON_DATA_41_IN 0x29 +#define L_P1MON_DATA_42_IN 0x2a +#define L_P1MON_DATA_43_IN 0x2b +#define L_P1MON_DATA_44_IN 0x2c +#define L_P1MON_DATA_45_IN 0x2d +#define L_P1MON_DATA_46_IN 0x2e +#define L_P1MON_DATA_47_IN 0x2f +#define L_P1MON_DATA_48_IN 0x30 +#define L_P1MON_DATA_49_IN 0x31 +#define L_P1MON_DATA_50_IN 0x32 +#define L_P1MON_DATA_51_IN 0x33 +#define L_P1MON_DATA_52_IN 0x34 +#define L_P1MON_DATA_53_IN 0x35 +#define L_P1MON_DATA_54_IN 0x36 +#define L_P1MON_DATA_55_IN 0x37 +#define L_P1MON_DATA_56_IN 0x38 +#define L_P1MON_DATA_57_IN 0x39 +#define L_P1MON_DATA_58_IN 0x3a +#define L_P1MON_DATA_59_IN 0x3b +#define L_P1MON_DATA_60_IN 0x3c +#define L_P1MON_DATA_61_IN 0x3d +#define L_P1MON_DATA_62_IN 0x3e +#define L_P1MON_DATA_63_IN 0x3f + +/* addr_inc bit - OR him to entry address + * for autoincrement: + */ +#define L_P1MON_INC_VAL 0x80 + +static int l_p1mon_init_client(struct i2c_client *client); +static struct l_p1mon_data *l_p1mon_update_data(struct device *dev); + +struct l_p1mon_data { + struct i2c_client *client; + struct mutex lock; + int autoincrement; + unsigned long last_updated, rate; /* in jiffies */ + char valid; /* zero until following fields are valid */ + + /* data from entries: */ + int data_0_in, data_1_in, data_2_in, data_3_in; + int data_4_in, data_5_in, data_6_in, data_7_in; + int data_8_in, data_9_in, data_10_in, data_11_in; + int data_12_in, data_13_in, data_14_in, data_15_in; + int data_16_in, data_17_in, data_18_in, data_19_in; + int data_20_in, data_21_in, data_22_in, data_23_in; + int data_24_in, data_25_in, data_26_in, data_27_in; + int data_28_in, data_29_in, data_30_in, data_31_in; + int data_32_in, data_33_in, data_34_in, data_35_in; + int data_36_in, data_37_in, data_38_in, data_39_in; + int data_40_in, data_41_in, data_42_in, data_43_in; + int data_44_in, data_45_in, data_46_in, data_47_in; + int data_48_in, data_49_in, data_50_in, data_51_in; + int data_52_in, data_53_in, data_54_in, data_55_in; + int data_56_in, data_57_in, data_58_in, data_59_in; + int data_60_in, data_61_in, data_62_in, data_63_in; +}; + +/* Sysfs stuff */ +#define show_entry(value) \ +static ssize_t show_##value(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct l_p1mon_data *data = l_p1mon_update_data(dev); \ + \ + if (data->value##_in < 0) \ + snprintf(buf, PAGE_SIZE - 1, "Error\n"); \ + else \ + snprintf(buf, PAGE_SIZE - 1, "%d\n", \ + data->value##_in); \ + return strlen(buf); \ +} + +show_entry(data_0); +show_entry(data_1); +show_entry(data_2); +show_entry(data_3); +show_entry(data_4); +show_entry(data_5); +show_entry(data_6); +show_entry(data_7); +show_entry(data_8); +show_entry(data_9); +show_entry(data_10); +show_entry(data_11); +show_entry(data_12); +show_entry(data_13); +show_entry(data_14); +show_entry(data_15); +show_entry(data_16); +show_entry(data_17); +show_entry(data_18); +show_entry(data_19); +show_entry(data_20); +show_entry(data_21); +show_entry(data_22); +show_entry(data_23); +show_entry(data_24); +show_entry(data_25); +show_entry(data_26); +show_entry(data_27); +show_entry(data_28); +show_entry(data_29); +show_entry(data_30); +show_entry(data_31); +show_entry(data_32); +show_entry(data_33); +show_entry(data_34); +show_entry(data_35); +show_entry(data_36); +show_entry(data_37); +show_entry(data_38); +show_entry(data_39); +show_entry(data_40); +show_entry(data_41); +show_entry(data_42); +show_entry(data_43); +show_entry(data_44); +show_entry(data_45); +show_entry(data_46); +show_entry(data_47); +show_entry(data_48); +show_entry(data_49); +show_entry(data_50); +show_entry(data_51); +show_entry(data_52); +show_entry(data_53); +show_entry(data_54); +show_entry(data_55); +show_entry(data_56); +show_entry(data_57); +show_entry(data_58); +show_entry(data_59); +show_entry(data_60); +show_entry(data_61); +show_entry(data_62); +show_entry(data_63); + +static DEVICE_ATTR(entry_0_value, S_IRUGO, show_data_0, NULL); +static DEVICE_ATTR(entry_1_value, S_IRUGO, show_data_1, NULL); +static DEVICE_ATTR(entry_2_value, S_IRUGO, show_data_2, NULL); +static DEVICE_ATTR(entry_3_value, S_IRUGO, show_data_3, NULL); +static DEVICE_ATTR(entry_4_value, S_IRUGO, show_data_4, NULL); +static DEVICE_ATTR(entry_5_value, S_IRUGO, show_data_5, NULL); +static DEVICE_ATTR(entry_6_value, S_IRUGO, show_data_6, NULL); +static DEVICE_ATTR(entry_7_value, S_IRUGO, show_data_7, NULL); +static DEVICE_ATTR(entry_8_value, S_IRUGO, show_data_8, NULL); +static DEVICE_ATTR(entry_9_value, S_IRUGO, show_data_9, NULL); +static DEVICE_ATTR(entry_10_value, S_IRUGO, show_data_10, NULL); +static DEVICE_ATTR(entry_11_value, S_IRUGO, show_data_11, NULL); +static DEVICE_ATTR(entry_12_value, S_IRUGO, show_data_12, NULL); +static DEVICE_ATTR(entry_13_value, S_IRUGO, show_data_13, NULL); +static DEVICE_ATTR(entry_14_value, S_IRUGO, show_data_14, NULL); +static DEVICE_ATTR(entry_15_value, S_IRUGO, show_data_15, NULL); +static DEVICE_ATTR(entry_16_value, S_IRUGO, show_data_16, NULL); +static DEVICE_ATTR(entry_17_value, S_IRUGO, show_data_17, NULL); +static DEVICE_ATTR(entry_18_value, S_IRUGO, show_data_18, NULL); +static DEVICE_ATTR(entry_19_value, S_IRUGO, show_data_19, NULL); +static DEVICE_ATTR(entry_20_value, S_IRUGO, show_data_20, NULL); +static DEVICE_ATTR(entry_21_value, S_IRUGO, show_data_21, NULL); +static DEVICE_ATTR(entry_22_value, S_IRUGO, show_data_22, NULL); +static DEVICE_ATTR(entry_23_value, S_IRUGO, show_data_23, NULL); +static DEVICE_ATTR(entry_24_value, S_IRUGO, show_data_24, NULL); +static DEVICE_ATTR(entry_25_value, S_IRUGO, show_data_25, NULL); +static DEVICE_ATTR(entry_26_value, S_IRUGO, show_data_26, NULL); +static DEVICE_ATTR(entry_27_value, S_IRUGO, show_data_27, NULL); +static DEVICE_ATTR(entry_28_value, S_IRUGO, show_data_28, NULL); +static DEVICE_ATTR(entry_29_value, S_IRUGO, show_data_29, NULL); +static DEVICE_ATTR(entry_30_value, S_IRUGO, show_data_30, NULL); +static DEVICE_ATTR(entry_31_value, S_IRUGO, show_data_31, NULL); +static DEVICE_ATTR(entry_32_value, S_IRUGO, show_data_32, NULL); +static DEVICE_ATTR(entry_33_value, S_IRUGO, show_data_33, NULL); +static DEVICE_ATTR(entry_34_value, S_IRUGO, show_data_34, NULL); +static DEVICE_ATTR(entry_35_value, S_IRUGO, show_data_35, NULL); +static DEVICE_ATTR(entry_36_value, S_IRUGO, show_data_36, NULL); +static DEVICE_ATTR(entry_37_value, S_IRUGO, show_data_37, NULL); +static DEVICE_ATTR(entry_38_value, S_IRUGO, show_data_38, NULL); +static DEVICE_ATTR(entry_39_value, S_IRUGO, show_data_39, NULL); +static DEVICE_ATTR(entry_40_value, S_IRUGO, show_data_40, NULL); +static DEVICE_ATTR(entry_41_value, S_IRUGO, show_data_41, NULL); +static DEVICE_ATTR(entry_42_value, S_IRUGO, show_data_42, NULL); +static DEVICE_ATTR(entry_43_value, S_IRUGO, show_data_43, NULL); +static DEVICE_ATTR(entry_44_value, S_IRUGO, show_data_44, NULL); +static DEVICE_ATTR(entry_45_value, S_IRUGO, show_data_45, NULL); +static DEVICE_ATTR(entry_46_value, S_IRUGO, show_data_46, NULL); +static DEVICE_ATTR(entry_47_value, S_IRUGO, show_data_47, NULL); +static DEVICE_ATTR(entry_48_value, S_IRUGO, show_data_48, NULL); +static DEVICE_ATTR(entry_49_value, S_IRUGO, show_data_49, NULL); +static DEVICE_ATTR(entry_50_value, S_IRUGO, show_data_50, NULL); +static DEVICE_ATTR(entry_51_value, S_IRUGO, show_data_51, NULL); +static DEVICE_ATTR(entry_52_value, S_IRUGO, show_data_52, NULL); +static DEVICE_ATTR(entry_53_value, S_IRUGO, show_data_53, NULL); +static DEVICE_ATTR(entry_54_value, S_IRUGO, show_data_54, NULL); +static DEVICE_ATTR(entry_55_value, S_IRUGO, show_data_55, NULL); +static DEVICE_ATTR(entry_56_value, S_IRUGO, show_data_56, NULL); +static DEVICE_ATTR(entry_57_value, S_IRUGO, show_data_57, NULL); +static DEVICE_ATTR(entry_58_value, S_IRUGO, show_data_58, NULL); +static DEVICE_ATTR(entry_59_value, S_IRUGO, show_data_59, NULL); +static DEVICE_ATTR(entry_60_value, S_IRUGO, show_data_60, NULL); +static DEVICE_ATTR(entry_61_value, S_IRUGO, show_data_61, NULL); +static DEVICE_ATTR(entry_62_value, S_IRUGO, show_data_62, NULL); +static DEVICE_ATTR(entry_63_value, S_IRUGO, show_data_63, NULL); + + +static ssize_t l_p1mon_show_rate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct l_p1mon_data *data; + + data = i2c_get_clientdata(client); + return sprintf(buf, "%ld\n", data->rate); +} + +static ssize_t l_p1mon_store_rate(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct l_p1mon_data *data; + unsigned long val; + + data = i2c_get_clientdata(client); + + if (strict_strtoul(buf, 10, &val) <= 0) + return -EINVAL; + + data->rate = val; + return count; +} + +static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, l_p1mon_show_rate, + l_p1mon_store_rate); + +static ssize_t l_p1mon_show_autoincrement(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct l_p1mon_data *data; + + data = i2c_get_clientdata(client); + return sprintf(buf, "%d\n", data->autoincrement); +} + +static ssize_t l_p1mon_store_autoincrement(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct l_p1mon_data *data; + unsigned long val; + + data = i2c_get_clientdata(client); + + if (strict_strtoul(buf, 10, &val) < 0 || val > 1) + return -EINVAL; + + data->autoincrement= val; + return count; +} + +static DEVICE_ATTR(autoincrement, S_IWUSR | S_IRUGO, + l_p1mon_show_autoincrement, l_p1mon_store_autoincrement); + +static struct attribute *l_p1mon_attributes[] = { + &dev_attr_entry_0_value.attr, + &dev_attr_entry_1_value.attr, + &dev_attr_entry_2_value.attr, + &dev_attr_entry_3_value.attr, + &dev_attr_entry_4_value.attr, + &dev_attr_entry_5_value.attr, + &dev_attr_entry_6_value.attr, + &dev_attr_entry_7_value.attr, + &dev_attr_entry_8_value.attr, + &dev_attr_entry_9_value.attr, + &dev_attr_entry_10_value.attr, + &dev_attr_entry_11_value.attr, + &dev_attr_entry_12_value.attr, + &dev_attr_entry_13_value.attr, + &dev_attr_entry_14_value.attr, + &dev_attr_entry_15_value.attr, + &dev_attr_entry_16_value.attr, + &dev_attr_entry_17_value.attr, + &dev_attr_entry_18_value.attr, + &dev_attr_entry_19_value.attr, + &dev_attr_entry_20_value.attr, + &dev_attr_entry_21_value.attr, + &dev_attr_entry_22_value.attr, + &dev_attr_entry_23_value.attr, + &dev_attr_entry_24_value.attr, + &dev_attr_entry_25_value.attr, + &dev_attr_entry_26_value.attr, + &dev_attr_entry_27_value.attr, + &dev_attr_entry_28_value.attr, + &dev_attr_entry_29_value.attr, + &dev_attr_entry_30_value.attr, + &dev_attr_entry_31_value.attr, + &dev_attr_entry_32_value.attr, + &dev_attr_entry_33_value.attr, + &dev_attr_entry_34_value.attr, + &dev_attr_entry_35_value.attr, + &dev_attr_entry_36_value.attr, + &dev_attr_entry_37_value.attr, + &dev_attr_entry_38_value.attr, + &dev_attr_entry_39_value.attr, + &dev_attr_entry_40_value.attr, + &dev_attr_entry_41_value.attr, + &dev_attr_entry_42_value.attr, + &dev_attr_entry_43_value.attr, + &dev_attr_entry_44_value.attr, + &dev_attr_entry_45_value.attr, + &dev_attr_entry_46_value.attr, + &dev_attr_entry_47_value.attr, + &dev_attr_entry_48_value.attr, + &dev_attr_entry_49_value.attr, + &dev_attr_entry_50_value.attr, + &dev_attr_entry_51_value.attr, + &dev_attr_entry_52_value.attr, + &dev_attr_entry_53_value.attr, + &dev_attr_entry_54_value.attr, + &dev_attr_entry_55_value.attr, + &dev_attr_entry_56_value.attr, + &dev_attr_entry_57_value.attr, + &dev_attr_entry_58_value.attr, + &dev_attr_entry_59_value.attr, + &dev_attr_entry_60_value.attr, + &dev_attr_entry_61_value.attr, + &dev_attr_entry_62_value.attr, + &dev_attr_entry_63_value.attr, + &dev_attr_rate.attr, + &dev_attr_autoincrement.attr, + NULL +}; + +static const struct attribute_group l_p1mon_group = { + .attrs = l_p1mon_attributes, +}; + +/* + * I2C layer + */ +static struct l_p1mon_data *l_p1mon_update_data(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct l_p1mon_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->lock); + + if (time_after(jiffies, data->last_updated + data->rate) || + !data->valid) { + dev_dbg(&client->dev, "Updating l_p1mon_data.\n"); + if (data->autoincrement) { + dev_dbg(&client->dev, "Ignore autoincrement: bad idea.\n"); + } + + data->data_0_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_0_IN); + data->data_1_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_1_IN); + data->data_2_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_2_IN); + data->data_3_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_3_IN); + data->data_4_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_4_IN); + data->data_5_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_5_IN); + data->data_6_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_6_IN); + data->data_7_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_7_IN); + data->data_8_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_8_IN); + data->data_9_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_9_IN); + data->data_10_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_10_IN); + data->data_11_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_11_IN); + data->data_12_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_12_IN); + data->data_13_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_13_IN); + data->data_14_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_14_IN); + data->data_15_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_15_IN); + data->data_16_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_16_IN); + data->data_17_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_17_IN); + data->data_18_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_18_IN); + data->data_19_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_19_IN); + data->data_20_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_20_IN); + data->data_21_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_21_IN); + data->data_22_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_22_IN); + data->data_23_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_23_IN); + data->data_24_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_24_IN); + data->data_25_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_25_IN); + data->data_26_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_26_IN); + data->data_27_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_27_IN); + data->data_28_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_28_IN); + data->data_29_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_29_IN); + data->data_30_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_30_IN); + data->data_31_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_31_IN); + data->data_32_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_32_IN); + data->data_33_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_33_IN); + data->data_34_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_34_IN); + data->data_35_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_35_IN); + data->data_36_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_36_IN); + data->data_37_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_37_IN); + data->data_38_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_38_IN); + data->data_39_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_39_IN); + data->data_40_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_40_IN); + data->data_41_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_41_IN); + data->data_42_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_42_IN); + data->data_43_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_43_IN); + data->data_44_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_44_IN); + data->data_45_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_45_IN); + data->data_46_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_46_IN); + data->data_47_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_47_IN); + data->data_48_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_48_IN); + data->data_49_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_49_IN); + data->data_50_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_50_IN); + data->data_51_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_51_IN); + data->data_52_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_52_IN); + data->data_53_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_53_IN); + data->data_54_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_54_IN); + data->data_55_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_55_IN); + data->data_56_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_56_IN); + data->data_57_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_57_IN); + data->data_58_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_58_IN); + data->data_59_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_59_IN); + data->data_60_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_60_IN); + data->data_61_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_61_IN); + data->data_62_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_62_IN); + data->data_63_in = i2c_smbus_read_word_data(client, L_P1MON_DATA_63_IN); + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->lock); + + return data; +} + +static int l_p1mon_init_client(struct i2c_client *client) +{ + struct l_p1mon_data *data = i2c_get_clientdata(client); + + data->rate = HZ; /* 1 sec default */ + data->valid = 0; + data->autoincrement = 0; + + return 0; +} + +static int __devinit l_p1mon_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct l_p1mon_data *data; + int err = 0; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) + return -EIO; + + data = kzalloc(sizeof(struct l_p1mon_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + i2c_set_clientdata(client, data); + mutex_init(&data->lock); + + err = l_p1mon_init_client(client); + if (err) + goto exit_kfree; + + /* register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &l_p1mon_group); + if (err) + goto exit_kfree; + + dev_info(&client->dev, "l_p1mon driver version %s enabled\n", DRIVER_VERSION); + return 0; + +exit_kfree: + kfree(data); + return err; +} + +static int __devexit l_p1mon_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &l_p1mon_group); + kfree(i2c_get_clientdata(client)); + return 0; +} + +static const struct i2c_device_id l_p1mon_id[] = { + { "l_p1mon", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, l_p1mon_id); + +static struct i2c_driver l_p1mon_driver = { + .driver = { + .name = L_P1MON_DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = l_p1mon_probe, + .remove = __devexit_p(l_p1mon_remove), + .id_table = l_p1mon_id, +}; + +static int __init l_p1mon_init(void) +{ + return i2c_add_driver(&l_p1mon_driver); +} + +static void __exit l_p1mon_exit(void) +{ + i2c_del_driver(&l_p1mon_driver); +} + +MODULE_AUTHOR("Evgeny Kravtsunov "); +MODULE_DESCRIPTION("Processor-1 I2C slave driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRIVER_VERSION); + +module_init(l_p1mon_init); +module_exit(l_p1mon_exit); + diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 60a817f58db9..6220b272b2ef 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -36,6 +36,10 @@ #include #include #include +#ifdef CONFIG_MCST +#include +#include +#endif /* * Addresses to scan @@ -91,6 +95,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; #define LM96163_REG_REMOTE_TEMP_U_MSB 0x31 #define LM96163_REG_REMOTE_TEMP_U_LSB 0x32 #define LM96163_REG_CONFIG_ENHANCED 0x45 +#ifdef CONFIG_MCST +#define LM96163_REG_RDTF_CMP_MODE 0xBF +#endif #define LM63_MAX_CONVRATE 9 @@ -131,6 +138,16 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; enum chips { lm63, lm64, lm96163 }; +#ifdef CONFIG_MCST +#define MAX_SENSORS 2 + +struct lm63_thermal_sensor { + struct lm63_data *data; + struct thermal_zone_device *tz; + unsigned int sensor_id; +}; +#endif + /* * Client data (each client gets its own) */ @@ -173,8 +190,19 @@ struct lm63_data { bool lut_temp_highres; bool remote_unsigned; /* true if unsigned remote upper limits */ bool trutherm; +#ifdef CONFIG_MCST + struct pwm_chip chip; + struct lm63_thermal_sensor thermal_sensor[MAX_SENSORS]; +#endif }; +#ifdef CONFIG_MCST +static inline struct lm63_data *to_pwm(struct pwm_chip *chip) +{ + return container_of(chip, struct lm63_data, chip); +} +#endif + static inline int temp8_from_reg(struct lm63_data *data, int nr) { if (data->remote_unsigned) @@ -421,6 +449,9 @@ static ssize_t pwm1_enable_store(struct device *dev, unsigned long val; int err; + if (of_get_property(dev->of_node, "#pwm-cells", NULL)) + return -EPERM; + err = kstrtoul(buf, 10, &val); if (err) return err; @@ -969,7 +1000,9 @@ static int lm63_detect(struct i2c_client *client, { struct i2c_adapter *adapter = client->adapter; u8 man_id, chip_id, reg_config1, reg_config2; +#ifndef __e2k__ u8 reg_alert_status, reg_alert_mask; +#endif int address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) @@ -980,15 +1013,24 @@ static int lm63_detect(struct i2c_client *client, reg_config1 = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1); reg_config2 = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG2); +#ifndef __e2k__ reg_alert_status = i2c_smbus_read_byte_data(client, LM63_REG_ALERT_STATUS); reg_alert_mask = i2c_smbus_read_byte_data(client, LM63_REG_ALERT_MASK); +#endif +#ifndef __e2k__ if (man_id != 0x01 /* National Semiconductor */ || (reg_config1 & 0x18) != 0x00 || (reg_config2 & 0xF8) != 0x00 || (reg_alert_status & 0x20) != 0x00 || (reg_alert_mask & 0xA4) != 0xA4) { +#else + /* For e2k: reduce sensor detection time. Very bad strut. Emkr. */ + if (man_id != 0x01 /* National Semiconductor */ + || (reg_config1 & 0x18) != 0x00 + || (reg_config2 & 0xF8) != 0x00) { +#endif dev_dbg(&adapter->dev, "Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n", man_id, chip_id); @@ -1018,6 +1060,9 @@ static void lm63_init_client(struct lm63_data *data) u8 convrate; data->config = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1); +#ifdef CONFIG_MCST + i2c_smbus_write_byte_data(client, LM63_REG_CONFIG_FAN, 0x20); +#endif data->config_fan = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG_FAN); @@ -1028,6 +1073,55 @@ static void lm63_init_client(struct lm63_data *data) i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1, data->config); } +#ifdef CONFIG_MCST + if (data->kind == lm96163) { + /* MCST Boot knows nothing about configuration of sensor, + * we do it ourselves: */ + i2c_smbus_write_byte_data(client, 0x4b, 0x3f); + + /* + * Configuration register (0x3): + * 1) Enable alerts; + * 2) Set mode to operational; + * 3) Enable PWM; + * 4) Enable TACH; + * 5) Unlock T_CRIT for overriding. + */ + data->config = 0x6; + i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1, + data->config); + /* + * Enhanced configuration (reg 0x45): + * 0x68 + */ + i2c_smbus_write_byte_data(client, LM96163_REG_CONFIG_ENHANCED, + 0x68); + + /* + * Enable manual mode for pwm (regs 0x4a, 0x4d): + * 1) Enable writing to PWM value register; + * 2) Set dircet PWM polarity; + * 3) Set master PWM clock to 1,4 kHz; + * 4) Enable least effort TACH monitoring. + */ + i2c_smbus_write_byte_data(client, LM63_REG_CONFIG_FAN, 0x2b); + i2c_smbus_write_byte_data(client, LM63_REG_PWM_FREQ, 23); + + /* Set ALERT pin to behave as a comparator, asserting itself + * when an ALERT condition exists, de-asserting itself when + * the ALERT condition goes away.*/ + i2c_smbus_write_byte_data(client, LM96163_REG_RDTF_CMP_MODE, + 0x1); + } else { + /* Start converting if needed */ + if (data->config & 0x40) { /* standby */ + dev_dbg(dev, "Switching to oper. mode\n"); + data->config &= 0xA7; + i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1, + data->config); + } + } +#endif /* Tachometer is always enabled on LM64 */ if (data->kind == lm64) data->config |= 0x04; @@ -1087,6 +1181,100 @@ static void lm63_init_client(struct lm63_data *data) (data->config_fan & 0x20) ? "manual" : "auto"); } +#ifdef CONFIG_MCST +static int lm63_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct lm63_data *data = to_pwm(chip); + struct i2c_client *client = data->client; + int ret = -EINVAL; + u8 val; + u8 pwm_mode; + + pwm_mode = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG_FAN); + if (!(pwm_mode >> 5)) + return -EPERM; + + if (state->period > 1) { + mutex_lock(&data->update_lock); + val = state->duty_cycle * 255 / (state->period - 1); + val = clamp_val(val, 0, 255); + val = data->pwm_highres ? val : + (val * data->pwm1_freq * 2 + 127) / 255; + ret = i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, val); + mutex_unlock(&data->update_lock); + } + + return ret; +} + +static const struct pwm_ops lm63_pwm_ops = { + .apply = lm63_pwm_apply, + .owner = THIS_MODULE, +}; + +static void lm63_pwm_remove(void *arg) +{ + struct lm63_data *data = arg; + + pwmchip_remove(&data->chip); +} + +static void lm63_init_pwm(struct lm63_data *data) +{ + struct i2c_client *client = data->client; + int ret; + + /* Initialize chip */ + + data->chip.dev = &client->dev; + data->chip.ops = &lm63_pwm_ops; + data->chip.base = -1; + data->chip.npwm = 1; + + ret = pwmchip_add(&data->chip); + if (ret < 0) { + dev_warn(&client->dev, "pwmchip_add() failed: %d\n", ret); + return; + } + + devm_add_action(&client->dev, lm63_pwm_remove, data); +} + +static int lm63_get_temp(void *data, int *temp) +{ + struct lm63_thermal_sensor *thermal_sensor = data; + *temp = i2c_smbus_read_byte_data(thermal_sensor->data->client, + LM63_REG_LOCAL_TEMP + thermal_sensor->sensor_id) * 1000; + + return 0; +} + +static const struct thermal_zone_of_device_ops lm63_tz_ops = { + .get_temp = lm63_get_temp, +}; + +static void lm63_init_thermal(struct lm63_data *data) +{ + struct i2c_client *client = data->client; + struct lm63_thermal_sensor *thermal_sensor; + unsigned int i; + + thermal_sensor = data->thermal_sensor; + for (i = 0; i < MAX_SENSORS; i++, thermal_sensor++) { + thermal_sensor->data = data; + thermal_sensor->sensor_id = i; + thermal_sensor->tz = devm_thermal_zone_of_sensor_register(&client->dev, + i, thermal_sensor, &lm63_tz_ops); + + if (IS_ERR(thermal_sensor->tz)) { + dev_warn(&client->dev, "unable to register thermal sensor %ld\n", + PTR_ERR(thermal_sensor->tz)); + } + } +} +#endif + static int lm63_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1125,7 +1313,14 @@ static int lm63_probe(struct i2c_client *client, hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, data->groups); - return PTR_ERR_OR_ZERO(hwmon_dev); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); +#ifdef CONFIG_MCST + lm63_init_thermal(data); + if (IS_ENABLED(CONFIG_PWM)) + lm63_init_pwm(data); +#endif + return 0; } /* diff --git a/drivers/hwmon/lm95231.c b/drivers/hwmon/lm95231.c new file mode 100644 index 000000000000..34bd4c7d14a6 --- /dev/null +++ b/drivers/hwmon/lm95231.c @@ -0,0 +1,531 @@ +/* + * lm95231.c - Support for LM95231 temperature sensor, based on LM95241 driver. + * + * Copyright (C) 2012 Evgeny Kravtsunov + * + * The LM95245 is a sensor chip made by National Semiconductors. + * Complete datasheet can be obtained from National's website at: + * http://www.national.com/pf/LM/LM95231.html + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const unsigned short normal_i2c[] = { + 0x19, 0x2b, I2C_CLIENT_END}; + +/* LM95231 registers */ +#define LM95231_REG_R_MAN_ID 0xFE +#define LM95231_REG_R_CHIP_ID 0xFF +#define LM95231_REG_R_STATUS 0x02 +#define LM95231_REG_RW_CONFIG 0x03 +#define LM95231_REG_RW_REM_FILTER 0x06 +#define LM95231_REG_RW_TRUTHERM 0x07 +#define LM95231_REG_W_ONE_SHOT 0x0F +#define LM95231_REG_R_LOCAL_TEMPH 0x10 +#define LM95231_REG_R_REMOTE1_TEMPH 0x11 +#define LM95231_REG_R_REMOTE2_TEMPH 0x12 +#define LM95231_REG_R_LOCAL_TEMPL 0x20 +#define LM95231_REG_R_REMOTE1_TEMPL 0x21 +#define LM95231_REG_R_REMOTE2_TEMPL 0x22 +#define LM95231_REG_RW_REMOTE_MODEL 0x30 + +/* LM95231 specific bitfields */ +#define CFG_STOP 0x40 +#define CFG_CR0076 0x00 +#define CFG_CR0182 0x10 +#define CFG_CR1000 0x20 +#define CFG_CR2700 0x30 +#define R1MS_SHIFT 0 +#define R2MS_SHIFT 2 +#define R1MS_MASK (0x01 << (R1MS_SHIFT)) +#define R2MS_MASK (0x01 << (R2MS_SHIFT)) +#define R1DF_SHIFT 1 +#define R2DF_SHIFT 2 +#define R1DF_MASK (0x01 << (R1DF_SHIFT)) +#define R2DF_MASK (0x01 << (R2DF_SHIFT)) +#define R1FE_MASK 0x01 +#define R2FE_MASK 0x04 +#define TT1_SHIFT 0 +#define TT2_SHIFT 4 +#define TT_OFF 0 +#define TT_ON 1 +#define TT_MASK 7 +#define MANUFACTURER_ID 0x01 +#define DEFAULT_REVISION 0xA1 + +/* Conversions and various macros */ +#define TEMP_FROM_REG(val_h, val_l) \ + (((val_h) & 0x80 ? (char)((~(val_h - 1)) * (-1)) : \ + (val_h)) * 1000 + (val_l) * 1000 / 256) + +/* Functions declaration */ +static void lm95231_init_client(struct i2c_client *client); +static struct lm95231_data *lm95231_update_device(struct device *dev); + +/* Client data (each client gets its own) */ +struct lm95231_data { + struct device *hwmon_dev; + struct mutex update_lock; + unsigned long last_updated, rate; /* in jiffies */ + char valid; /* zero until following fields are valid */ + /* registers values */ + s32 local_h, local_l; /* local */ + s32 remote1_h, remote1_l; /* remote1 */ + s32 remote2_h, remote2_l; /* remote2 */ + s32 local_h_st, local_l_st; /* status of i2c transaction + * local_h, local_l */ + s32 remote1_h_st; /* status of i2c transaction remote1_h */ + s32 remote1_l_st; /* status of i2c transaction remote1_l */ + s32 remote2_h_st; /* status of i2c transaction remote2_h */ + s32 remote2_l_st; /* status of i2c transaction remote2_l */ + u8 config, model, trutherm; +}; + +/* Sysfs stuff */ +#define show_temp(value) \ +static ssize_t show_##value(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct lm95231_data *data = lm95231_update_device(dev); \ + \ + if (data->value##_h_st < 0 || data->value##_l_st < 0) \ + snprintf(buf, PAGE_SIZE - 1, "Error\n"); \ + else \ + snprintf(buf, PAGE_SIZE - 1, "%d\n", \ + TEMP_FROM_REG(data->value##_h, data->value##_l)); \ + return strlen(buf); \ +} +show_temp(local); +show_temp(remote1); +show_temp(remote2); + +static ssize_t show_rate(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct lm95231_data *data = lm95231_update_device(dev); + + snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ); + return strlen(buf); +} + +static ssize_t set_rate(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm95231_data *data = i2c_get_clientdata(client); + int ret; + + ret = kstrtol(buf, 10, &data->rate); + if (ret < 0) + return ret; + + data->rate = data->rate * HZ / 1000; + + return count; +} + +#define show_type(flag) \ +static ssize_t show_type##flag(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct lm95231_data *data = i2c_get_clientdata(client); \ + \ + snprintf(buf, PAGE_SIZE - 1, \ + data->model & R##flag##MS_MASK ? "1\n" : "2\n"); \ + return strlen(buf); \ +} +show_type(1); +show_type(2); + +#define show_min(flag) \ +static ssize_t show_min##flag(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct lm95231_data *data = i2c_get_clientdata(client); \ + \ + snprintf(buf, PAGE_SIZE - 1, \ + data->config & R##flag##DF_MASK ? \ + "-127000\n" : "0\n"); \ + return strlen(buf); \ +} +show_min(1); +show_min(2); + +#define show_max(flag) \ +static ssize_t show_max##flag(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct lm95231_data *data = i2c_get_clientdata(client); \ + \ + snprintf(buf, PAGE_SIZE - 1, \ + data->config & R##flag##DF_MASK ? \ + "127000\n" : "255000\n"); \ + return strlen(buf); \ +} +show_max(1); +show_max(2); + +#define set_type(flag) \ +static ssize_t set_type##flag(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct lm95231_data *data = i2c_get_clientdata(client); \ + \ + long val; \ + int ret; \ + \ + ret = kstrtol(buf, 10, &val); \ + if (ret < 0) \ + return ret; \ + \ + if ((val == 1) || (val == 2)) { \ + \ + mutex_lock(&data->update_lock); \ + \ + data->trutherm &= ~(TT_MASK << TT##flag##_SHIFT); \ + if (val == 1) { \ + data->model |= R##flag##MS_MASK; \ + data->trutherm |= (TT_ON << TT##flag##_SHIFT); \ + } \ + else { \ + data->model &= ~R##flag##MS_MASK; \ + data->trutherm |= (TT_OFF << TT##flag##_SHIFT); \ + } \ + \ + data->valid = 0; \ + \ + i2c_smbus_write_byte_data(client, \ + LM95231_REG_RW_REMOTE_MODEL, \ + data->model); \ + i2c_smbus_write_byte_data(client, \ + LM95231_REG_RW_TRUTHERM, \ + data->trutherm); \ + \ + mutex_unlock(&data->update_lock); \ + \ + } \ + return count; \ +} +set_type(1); +set_type(2); + +#define set_min(flag) \ +static ssize_t set_min##flag(struct device *dev, \ + struct device_attribute *devattr, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct lm95231_data *data = i2c_get_clientdata(client); \ + \ + long val; \ + int ret; \ + \ + ret = kstrtol(buf, 10, &val); \ + if (ret < 0) \ + return ret; \ + \ + mutex_lock(&data->update_lock); \ + \ + if (val < 0) \ + data->config |= R##flag##DF_MASK; \ + else \ + data->config &= ~R##flag##DF_MASK; \ + \ + data->valid = 0; \ + \ + i2c_smbus_write_byte_data(client, LM95231_REG_RW_CONFIG, \ + data->config); \ + \ + mutex_unlock(&data->update_lock); \ + \ + return count; \ +} +set_min(1); +set_min(2); + +#define set_max(flag) \ +static ssize_t set_max##flag(struct device *dev, \ + struct device_attribute *devattr, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct lm95231_data *data = i2c_get_clientdata(client); \ + \ + long val; \ + int ret; \ + \ + ret = kstrtol(buf, 10, &val); \ + if (ret < 0) \ + return ret; \ + \ + mutex_lock(&data->update_lock); \ + \ + if (val <= 127000) \ + data->config |= R##flag##DF_MASK; \ + else \ + data->config &= ~R##flag##DF_MASK; \ + \ + data->valid = 0; \ + \ + i2c_smbus_write_byte_data(client, LM95231_REG_RW_CONFIG, \ + data->config); \ + \ + mutex_unlock(&data->update_lock); \ + \ + return count; \ +} +set_max(1); +set_max(2); + +static DEVICE_ATTR(temp1_input, S_IRUGO, show_local, NULL); +static DEVICE_ATTR(temp2_input, S_IRUGO, show_remote1, NULL); +static DEVICE_ATTR(temp3_input, S_IRUGO, show_remote2, NULL); +static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type1, set_type1); +static DEVICE_ATTR(temp3_type, S_IWUSR | S_IRUGO, show_type2, set_type2); +static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1); +static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2); +static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1); +static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2); +static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate); + +static struct attribute *lm95231_attributes[] = { + &dev_attr_temp1_input.attr, + &dev_attr_temp2_input.attr, + &dev_attr_temp3_input.attr, + &dev_attr_temp2_type.attr, + &dev_attr_temp3_type.attr, + &dev_attr_temp2_min.attr, + &dev_attr_temp3_min.attr, + &dev_attr_temp2_max.attr, + &dev_attr_temp3_max.attr, + &dev_attr_rate.attr, + NULL +}; + +static const struct attribute_group lm95231_group = { + .attrs = lm95231_attributes, +}; + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm95231_detect(struct i2c_client *new_client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = new_client->adapter; + int address = new_client->addr; + const char *name; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + return -ENODEV; + } + + if ((i2c_smbus_read_byte_data(new_client, LM95231_REG_R_MAN_ID) + == MANUFACTURER_ID) + && (i2c_smbus_read_byte_data(new_client, LM95231_REG_R_CHIP_ID) + >= DEFAULT_REVISION)) { + name = "lm95231"; + } else { + dev_dbg(&adapter->dev, "LM95231 detection failed at 0x%02x\n", + address); + return -ENODEV; + } + + /* Fill the i2c board info */ + strlcpy(info->type, name, I2C_NAME_SIZE); + return 0; +} + +static int lm95231_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct lm95231_data *data; + int err; + + data = kzalloc(sizeof(struct lm95231_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); + mutex_init(&data->update_lock); + + /* Initialize the LM95231 chip */ + lm95231_init_client(new_client); + + /* Register sysfs hooks */ + err = sysfs_create_group(&new_client->dev.kobj, &lm95231_group); + if (err) + goto exit_free; + + data->hwmon_dev = hwmon_device_register(&new_client->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + goto exit_remove_files; + } + + return 0; + +exit_remove_files: + sysfs_remove_group(&new_client->dev.kobj, &lm95231_group); +exit_free: + kfree(data); +exit: + return err; +} + +static void lm95231_init_client(struct i2c_client *client) +{ + struct lm95231_data *data = i2c_get_clientdata(client); + + data->rate = HZ; /* 1 sec default */ + data->valid = 0; + data->config = CFG_CR0076; + data->model = 0; + data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT); + + i2c_smbus_write_byte_data(client, LM95231_REG_RW_CONFIG, + data->config); + i2c_smbus_write_byte_data(client, LM95231_REG_RW_REM_FILTER, + R1FE_MASK | R2FE_MASK); + i2c_smbus_write_byte_data(client, LM95231_REG_RW_TRUTHERM, + data->trutherm); + i2c_smbus_write_byte_data(client, LM95231_REG_RW_REMOTE_MODEL, + data->model); +} + +static int lm95231_remove(struct i2c_client *client) +{ + struct lm95231_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &lm95231_group); + + i2c_set_clientdata(client, NULL); + kfree(data); + return 0; +} + +static struct lm95231_data *lm95231_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm95231_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + data->rate) || + !data->valid) { + dev_dbg(&client->dev, "Updating lm95231 data.\n"); + data->local_h_st = i2c_smbus_read_byte_data(client, + LM95231_REG_R_LOCAL_TEMPH); + if (data->local_h_st < 0) + data->local_h = 0; + else + data->local_h = data->local_h_st; + + data->local_l_st = i2c_smbus_read_byte_data(client, + LM95231_REG_R_LOCAL_TEMPL); + if (data->local_l_st < 0) + data->local_l = 0; + else + data->local_l = data->local_l_st; + + data->remote1_h_st = i2c_smbus_read_byte_data(client, + LM95231_REG_R_REMOTE1_TEMPH); + if (data->remote1_h_st < 0) + data->remote1_h = 0; + else + data->remote1_h = data->remote1_h_st; + + data->remote1_l_st = i2c_smbus_read_byte_data(client, + LM95231_REG_R_REMOTE1_TEMPL); + if (data->remote1_l_st < 0) + data->remote1_l = 0; + else + data->remote1_l = (u8) data->remote1_l_st; + + data->remote2_h_st = i2c_smbus_read_byte_data(client, + LM95231_REG_R_REMOTE2_TEMPH); + if (data->remote2_h_st < 0) + data->remote2_h = 0; + else + data->remote2_h = data->remote2_h_st; + + data->remote2_l_st = i2c_smbus_read_byte_data(client, + LM95231_REG_R_REMOTE2_TEMPL); + if (data->remote2_l_st < 0) + data->remote2_l = 0; + else + data->remote2_l = data->remote2_l_st; + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +/* Driver data (common to all clients) */ +static const struct i2c_device_id lm95231_id[] = { + { "lm95231", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm95231_id); + +static struct i2c_driver lm95231_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "lm95231", + }, + .probe = lm95231_probe, + .remove = lm95231_remove, + .id_table = lm95231_id, + .detect = lm95231_detect, + .address_list = normal_i2c, +}; + +static int __init sensors_lm95231_init(void) +{ + return i2c_add_driver(&lm95231_driver); +} + +static void __exit sensors_lm95231_exit(void) +{ + i2c_del_driver(&lm95231_driver); +} + +MODULE_AUTHOR("Evgeny Kravtsunov "); +MODULE_DESCRIPTION("LM95231 sensor driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_lm95231_init); +module_exit(sensors_lm95231_exit); diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index d62d69bb7e49..21fccf407e74 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -136,6 +136,16 @@ config SENSORS_MAX16064 This driver can also be built as a module. If so, the module will be called max16064. +config SENSORS_MAX20730 + tristate "Maxim MAX20730, MAX20734, MAX20743" + default m + help + If you say yes here you get hardware monitoring support for Maxim + MAX20730, MAX20734, and MAX20743. + + This driver can also be built as a module. If so, the module will + be called max20730. + config SENSORS_MAX20751 tristate "Maxim MAX20751" help diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 03bacfcfd660..0840e2f12557 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o +obj-$(CONFIG_SENSORS_MAX20730) += max20730.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o obj-$(CONFIG_SENSORS_MAX31785) += max31785.o obj-$(CONFIG_SENSORS_MAX34440) += max34440.o diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c new file mode 100644 index 000000000000..e2a8b58e8b1c --- /dev/null +++ b/drivers/hwmon/pmbus/max20730.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX20710, MAX20730, MAX20734, and MAX20743 Integrated, + * Step-Down Switching Regulators + * + * Copyright 2019 Google LLC. + * Copyright 2020 Maxim Integrated + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pmbus.h" + +enum chips { + max20710, + max20730, + max20734, + max20743 +}; + +struct max20730_data { + enum chips id; + struct pmbus_driver_info info; + struct mutex lock; /* Used to protect against parallel writes */ + u16 mfr_devset1; +}; + +#define to_max20730_data(x) container_of(x, struct max20730_data, info) + +#define MAX20730_MFR_DEVSET1 0xd2 + +#define VOUT_CMD_MIN 307 +#define VOUT_CMD_MAX 512 +#define VREF_MIN 601600 +#define VREF_MAX 1000000 +#define VREF_STEP 3900 +#define RFB1 187 +#define RFB2 348 + +/* + * Convert discreet value to direct data format. Strictly speaking, all passed + * values are constants, so we could do that calculation manually. On the + * downside, that would make the driver more difficult to maintain, so lets + * use this approach. + */ +static u16 val_to_direct(int v, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; /* take milli-units into account */ + int b = info->b[class] * 1000; + long d; + + d = v * info->m[class] + b; + /* + * R < 0 is true for all callers, so we don't need to bother + * about the R > 0 case. + */ + while (R < 0) { + d = DIV_ROUND_CLOSEST(d, 10); + R++; + } + return (u16)d; +} + +static long direct_to_val(u16 w, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; + int b = info->b[class] * 1000; + int m = info->m[class]; + long d = (s16)w; + + if (m == 0) + return 0; + + while (R < 0) { + d *= 10; + R++; + } + d = (d - b) / m; + return d; +} + +static u32 max_current[][5] = { + [max20710] = { 6200, 8000, 9700, 11600 }, + [max20730] = { 13000, 16600, 20100, 23600 }, + [max20734] = { 21000, 27000, 32000, 38000 }, + [max20743] = { 18900, 24100, 29200, 34100 }, +}; + +static int max20730_read_word_data(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + const struct max20730_data *data = to_max20730_data(info); + int ret = 0; + u32 max_c; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + switch ((data->mfr_devset1 >> 11) & 0x3) { + case 0x0: + ret = val_to_direct(150000, PSC_TEMPERATURE, info); + break; + case 0x1: + ret = val_to_direct(130000, PSC_TEMPERATURE, info); + break; + default: + ret = -ENODATA; + break; + } + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3]; + ret = val_to_direct(max_c, PSC_CURRENT_OUT, info); + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int max20730_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + struct pmbus_driver_info *info; + struct max20730_data *data; + u16 devset1; + int ret = 0; + int idx; + + info = (struct pmbus_driver_info *)pmbus_get_driver_info(client); + data = to_max20730_data(info); + + mutex_lock(&data->lock); + devset1 = data->mfr_devset1; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + devset1 &= ~(BIT(11) | BIT(12)); + if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000) + devset1 |= BIT(11); + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + devset1 &= ~(BIT(5) | BIT(6)); + + idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info), + max_current[data->id], 4); + devset1 |= (idx << 5); + break; + case PMBUS_READ_VOUT: + devset1 = word; + default: + ret = -ENODATA; + break; + } + + if (!ret && devset1 != data->mfr_devset1) { + ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1, + devset1); + if (!ret) { + data->mfr_devset1 = devset1; + pmbus_clear_cache(client); + } + } + mutex_unlock(&data->lock); + return ret; +} + +static ssize_t vout_command_show(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev->parent); + int val; + + val = i2c_smbus_read_word_data(client, PMBUS_VOUT_COMMAND); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t vout_command_store(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev->parent); + struct pmbus_driver_info *info; + u16 val; + int ret; + + info = (struct pmbus_driver_info *)pmbus_get_driver_info(client); + + ret = kstrtou16(buf, 0, &val); + + i2c_smbus_write_word_data(client, PMBUS_VOUT_COMMAND, val); + + return count; +} + +static int max20730_list_voltage_table (struct regulator_dev *rdev, + unsigned selector) +{ + int volt = 0; + + if (selector < 0 || selector > (VOUT_CMD_MAX - VOUT_CMD_MIN)) + return -EINVAL; + + volt = (VREF_MIN + (selector) / 2 * VREF_STEP) * (RFB1 + RFB2) / RFB2; + + return volt; +} + +static int max20730_reg_set_voltage_sel(struct regulator_dev *rdev, + unsigned selector) +{ + struct i2c_client *client = to_i2c_client(rdev->dev.parent); + + if (selector < 0 || selector > (VOUT_CMD_MAX - VOUT_CMD_MIN)) + return -EINVAL; + + i2c_smbus_write_word_data(client, PMBUS_VOUT_COMMAND, selector + VOUT_CMD_MIN); + + return 0; +} + +static int max20730_reg_get_voltage_sel(struct regulator_dev *rdev) +{ + struct i2c_client *client = to_i2c_client(rdev->dev.parent); + int val; + + val = i2c_smbus_read_word_data(client, PMBUS_VOUT_COMMAND) - VOUT_CMD_MIN; + + return val; +} + +static const struct regulator_ops max20730_regulator_ops = { + .list_voltage = max20730_list_voltage_table, + .map_voltage = regulator_map_voltage_iterate, + .set_voltage_sel = max20730_reg_set_voltage_sel, + .get_voltage_sel = max20730_reg_get_voltage_sel, +}; + +static const struct regulator_desc max20730_regulator_descriptor = { + .name = "vout", + .of_match = of_match_ptr("ldo"), + .regulators_node = of_match_ptr("regulators"), + .id = -1, + .ops = &max20730_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .uV_step = VREF_STEP, + .n_voltages = VOUT_CMD_MAX - VOUT_CMD_MIN, +}; + +static SENSOR_DEVICE_ATTR_RW(vout_command, vout_command, 0); + +static struct attribute *vout_command_attrs[] = { + &sensor_dev_attr_vout_command.dev_attr.attr, + NULL, +}; + +static const struct attribute_group vout_command_group = { + .attrs = vout_command_attrs, +}; + +static const struct attribute_group *attribute_groups[] = { + &vout_command_group, + NULL, +}; + +static const struct pmbus_driver_info max20730_info[] = { + [max20710] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6140 and AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3609, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 153, + .b[PSC_CURRENT_OUT] = 4976, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_STATUS_INPUT, + }, + [max20730] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3609, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + /* + * Values in the datasheet are adjusted for temperature and + * for the relationship between Vin and Vout. + * Unfortunately, the data sheet suggests that Vout measurement + * may be scaled with a resistor array. This is indeed the case + * at least on the evaulation boards. As a result, any in-driver + * adjustments would either be wrong or require elaborate means + * to configure the scaling. Instead of doing that, just report + * raw values and let userspace handle adjustments. + */ + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 153, + .b[PSC_CURRENT_OUT] = 4976, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_STATUS_INPUT, + .groups = attribute_groups, + .num_regulators = 1, + .reg_desc = &max20730_regulator_descriptor, + }, + [max20734] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6209 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3592, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 111, + .b[PSC_CURRENT_OUT] = 3461, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_STATUS_INPUT, + }, + [max20743] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3597, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 95, + .b[PSC_CURRENT_OUT] = 5014, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_STATUS_INPUT, + }, +}; + +static int max20730_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + struct max20730_data *data; + enum chips chip_id; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA | + I2C_FUNC_SMBUS_READ_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf); + if (ret < 0) { + dev_err(&client->dev, "Failed to read Manufacturer ID\n"); + return ret; + } + if (ret != 4 && strncmp(buf, "VLTR", 4)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer ID: '%s', ret = %d \n", buf, ret) ; + return -ENODEV; + } + + /* + * The chips support reading PMBUS_MFR_MODEL. On both MAX20730 + * and MAX20734, reading it returns M20743. Presumably that is + * the reason why the command is not documented. Unfortunately, + * that means that there is no reliable means to detect the chip. + * However, we can at least detect the chip series. Compare + * the returned value against 'M20743' and bail out if there is + * a mismatch. If that doesn't work for all chips, we may have + * to remove this check. + */ + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Model\n"); + return ret; + } + if (ret != 6 || strncmp(buf, "VT7409", 6)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf); + return -ENODEV; + } + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Revision\n"); + return ret; + } + if (ret != 1 || buf[0] != 'A') { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf); + return -ENODEV; + } + + if (client->dev.of_node) + chip_id = (enum chips)of_device_get_match_data(dev); + else + chip_id = id->driver_data; + + pr_err("DEVICE ID %d\n", chip_id); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->id = chip_id; + mutex_init(&data->lock); + memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info)); + + ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1); + if (ret < 0) + return ret; + data->mfr_devset1 = ret; + + return pmbus_do_probe(client, id, &data->info); +} + +static const struct i2c_device_id max20730_id[] = { + { "max20710", max20710 }, + { "max20730", max20730 }, + { "max20734", max20734 }, + { "max20743", max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, max20730_id); + +static const struct of_device_id max20730_of_match[] = { + { .compatible = "maxim,max20710", .data = (void *)max20710 }, + { .compatible = "maxim,max20730", .data = (void *)max20730 }, + { .compatible = "maxim,max20734", .data = (void *)max20734 }, + { .compatible = "maxim,max20743", .data = (void *)max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(of, max20730_of_match); + +static struct i2c_driver max20730_driver = { + .driver = { + .name = "max20730", + .of_match_table = max20730_of_match, + }, + .probe = max20730_probe, + .remove = pmbus_do_remove, + .id_table = max20730_id, +}; + +module_i2c_driver(max20730_driver); + +MODULE_AUTHOR("Guenter Roeck "); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX20710 / MAX20730 / MAX20734 / MAX20743"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 8470097907bc..9b78ebc28784 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -750,19 +750,25 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 does not support negative voltages */ - if (val < 0) + if (val < 0 && (sensor->reg != PMBUS_VOUT_TRIM)) return 0; - /* - * For a static exponents, we don't have a choice - * but to adjust the value to it. - */ - if (data->exponent[sensor->page] < 0) - val <<= -data->exponent[sensor->page]; - else - val >>= data->exponent[sensor->page]; - val = DIV_ROUND_CLOSEST(val, 1000); - return val & 0xffff; + + if (sensor->reg == PMBUS_VOUT_TRIM) { + return val & 0xffff; + } else { + + /* + * For a static exponents, we don't have a choice + * but to adjust the value to it. + */ + if (data->exponent[sensor->page] < 0) + val <<= -data->exponent[sensor->page]; + else + val >>= data->exponent[sensor->page]; + val = DIV_ROUND_CLOSEST(val, 1000); + return val & 0xffff; + } } if (val < 0) { @@ -1399,6 +1405,20 @@ static const struct pmbus_limit_attr vout_limit_attrs[] = { .alarm = "crit_alarm", .sbit = PB_VOLTAGE_OV_FAULT, }, { +#ifdef CONFIG_MCST + .reg = PMBUS_VOUT_MARGIN_HIGH, + .attr = "margin_high", + }, { + .reg = PMBUS_VOUT_MARGIN_LOW, + .attr = "margin_low", + }, { + .reg = PMBUS_VOUT_TRIM, + .attr = "trim", + }, { + .reg = PMBUS_VOUT_COMMAND, + .attr = "command", + }, { +#endif .reg = PMBUS_VIRT_READ_VOUT_AVG, .update = true, .attr = "average", diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index c6040aa839ac..a8bc6a9496b8 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -31,6 +31,17 @@ config I2C_MUX_GPIO This driver can also be built as a module. If so, the module will be called i2c-mux-gpio. +config I2C_MUX_LTC4306 + tristate "LTC LTC4306/5 I2C multiplexer" + select GPIOLIB + select REGMAP_I2C + help + If you say yes here you get support for the Analog Devices + LTC4306 or LTC4305 I2C mux/switch devices. + + This driver can also be built as a module. If so, the module + will be called i2c-mux-ltc4306. + config I2C_MUX_GPMUX tristate "General Purpose I2C multiplexer" select MULTIPLEXER diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c index 704f1e50f6f4..59b0c999c84d 100644 --- a/drivers/i2c/muxes/i2c-mux-ltc4306.c +++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c @@ -20,6 +20,10 @@ #include #include +#ifdef CONFIG_MCST +#include +#endif + #define LTC4305_MAX_NCHANS 2 #define LTC4306_MAX_NCHANS 4 diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 1c227ea8ecd3..f141a0392c52 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -506,6 +506,14 @@ config BLK_DEV_PIIX This allows the kernel to change PIO, DMA and UDMA speeds and to configure the chip to optimum performance. +config BLK_DEV_ELBRUS + tristate "Elbrus chipsets support" + select BLK_DEV_IDEDMA_PCI + help + This driver adds explicit support for ELBRUS (E3S/E90/E90S) chips + This allows the kernel to change PIO, DMA and UDMA speeds + and to configure the chip to optimum performance. + config BLK_DEV_IT8172 tristate "IT8172 IDE support" select BLK_DEV_IDEDMA_PCI diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index cac02db4098d..2d8a75fe7059 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_BLK_DEV_IT8172) += it8172.o obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o +obj-$(CONFIG_BLK_DEV_ELBRUS) += l_ide.o obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 6f344654ef22..910490347d91 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -140,7 +140,16 @@ static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) cmd->sg_dma_direction = DMA_TO_DEVICE; else cmd->sg_dma_direction = DMA_FROM_DEVICE; - +#ifdef CONFIG_MCST + if (dev_is_pci(hwif->dev)) { /*see comment in l_ide.c */ + struct pci_dev *pdev = to_pci_dev(hwif->dev); + if ((pdev->vendor == PCI_VENDOR_ID_MCST_TMP) && + (pdev->device == PCI_DEVICE_ID_MCST_IDE_SDHCI) && + (iohub_revision(pdev) <= 3)) { + cmd->sg_dma_direction = DMA_BIDIRECTIONAL; + } + } +#endif i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction); if (i) { cmd->orig_sg_nents = cmd->sg_nents; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index b32a013d827a..709c4da3b55b 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -301,6 +301,9 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) return ide_do_reset(drive); default: BUG(); +#ifdef __LCC__ + return ide_stopped; +#endif } } diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index e867129466b0..0c14d83fc1c4 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -601,6 +601,9 @@ static int ide_port_wait_ready(ide_hwif_t *hwif) const struct ide_tp_ops *tp_ops = hwif->tp_ops; ide_drive_t *drive; int i, rc; +#ifdef CONFIG_E2K + int ports_num = 0; +#endif printk(KERN_DEBUG "Probing IDE interface %s...\n", hwif->name); @@ -625,13 +628,28 @@ static int ide_port_wait_ready(ide_hwif_t *hwif) tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); mdelay(2); rc = ide_wait_not_busy(hwif, 35000); +#ifdef CONFIG_E2K + /* e2k lms simulator workaround: if only master + * or slave disk image is specified for simulator, + * then access to other port (slave or master) + * return error. + * + * Remove when the bug is fixed */ + if (rc == 0) + ++ports_num; + else if (ports_num) + rc = 0; +#else if (rc) goto out; +#endif } else printk(KERN_DEBUG "%s: ide_wait_not_busy() skipped\n", drive->name); } +#ifndef CONFIG_E2K out: +#endif /* Exit function with master reselected (let's be sane) */ if (i) tp_ops->dev_select(hwif->devices[0]); diff --git a/drivers/ide/l_ide.c b/drivers/ide/l_ide.c new file mode 100644 index 000000000000..d292caa61483 --- /dev/null +++ b/drivers/ide/l_ide.c @@ -0,0 +1,641 @@ +/* + * Elbrus IDE driver specific + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#undef DEBUG_IDE_MODE +#undef DebugIDE +#define DEBUG_IDE_MODE 0 /* PCI init */ +#define DebugIDE if (DEBUG_IDE_MODE) printk +#define DebugBUSINFO if (DEBUG_IDE_MODE) Debug_BUS_INFO +#define DebugRESINFO if (DEBUG_IDE_MODE) Debug_RES_INFO +#define DebugALLRESINFO if (DEBUG_IDE_MODE) Debug_ALL_RES_INFO +#define DebugDEVINFO if (DEBUG_IDE_MODE) Debug_DEV_INFO + + +#define DRV_NAME "ELBRUS-IDE" + +/* + * IDE Configuration registers + */ +#define L_IDE_CONTROL 0x40 /* IDE control register 0x40-0x41 */ + +/* IDE Control Register structure */ +#define UDMA_MASTER_PRIMARY_IDEC 0x0001 /* Ultra DMA mode for master */ + /* of primary channel */ +#define UDMA_SLAVE_PRIMARY_IDEC 0x0002 /* Ultra DMA mode for slave */ + /* of primary channel */ +#define UDMA_MASTER_SECONDARY_IDEC 0x0004 /* Ultra DMA mode for master */ + /* of secjndary channel */ +#define UDMA_SLAVE_SECONDARY_IDEC 0x0008 /* Ultra DMA mode for slave */ + /* of secjndary channel */ +#define PREF_ENABLE_PRIMARY_IDEC 0x0010 /* Post and prefetch enable */ + /* for primary channel */ +#define PREF_ENABLE_SECONDARY_IDEC 0x0020 /* Post and prefetch enable */ + /* for secondary channel */ +#define RESET_INTERFACE_IDEC 0x0040 /* reset IDE interface */ +#define MASK_INTB_IDEC 0x0080 /* intb interrupt masked */ +#define MASK_INT_PRIMARY_IDEC 0x0100 /* interrupt masked for */ + /* primary channel */ +#define MASK_INT_SECONDARY_IDEC 0x0200 /* interrupt masked for */ + /* primary channel */ + +#define GET_ULTRA_DMA_ENABLE_MODE_IDEC(reg_value, secondary, slave) \ +({ \ + unsigned int mode; \ + if (secondary) { \ + if (slave) \ + mode = (reg_value) & UDMA_SLAVE_SECONDARY_IDEC; \ + else \ + mode = (reg_value) & UDMA_MASTER_SECONDARY_IDEC;\ + } else { \ + if (slave) \ + mode = (reg_value) & UDMA_SLAVE_PRIMARY_IDEC; \ + else \ + mode = (reg_value) & UDMA_MASTER_PRIMARY_IDEC; \ + } \ + mode = (mode != 0); \ +}) +#define SET_ULTRA_DMA_ENABLE_MODE_IDEC(reg_value, secondary, slave) \ +({ \ + if (secondary) { \ + if (slave) \ + (reg_value) |= UDMA_SLAVE_SECONDARY_IDEC; \ + else \ + (reg_value) |= UDMA_MASTER_SECONDARY_IDEC;\ + } else { \ + if (slave) \ + (reg_value) |= UDMA_SLAVE_PRIMARY_IDEC; \ + else \ + (reg_value) |= UDMA_MASTER_PRIMARY_IDEC; \ + } \ +}) +#define SET_MULTIWORD_DMA_ENABLE_MODE_IDEC(reg_value, secondary, slave) \ +({ \ + if (secondary) { \ + if (slave) \ + (reg_value) &= ~UDMA_SLAVE_SECONDARY_IDEC; \ + else \ + (reg_value) &= ~UDMA_MASTER_SECONDARY_IDEC;\ + } else { \ + if (slave) \ + (reg_value) &= ~UDMA_SLAVE_PRIMARY_IDEC; \ + else \ + (reg_value) &= ~UDMA_MASTER_PRIMARY_IDEC; \ + } \ +}) + +/* Class Code register */ +#define NATIVE_MODE_PRIMARY_CLASSC 0x01 /* primary channel in native */ + /* mode */ +#define NATIVE_MODE_SECONDARY_CLASSC 0x04 /* secondary channel in */ + /* native mode */ +#define NATIVE_MODE_CLASSC (NATIVE_MODE_PRIMARY_CLASSC | \ + NATIVE_MODE_SECONDARY_CLASSC) + +/* IDE PIO register port access timing register */ +#define L_IDE_PIO_RPA 0x44 /* IDE register 0x44 */ +/* register structure */ +#define MODE_0_IDE_PIO_RPA 0x00 /* Mode 0 to access */ +#define MODE_1_IDE_PIO_RPA 0x01 /* Mode 1 to access */ +#define MODE_2_IDE_PIO_RPA 0x02 /* Mode 2 to access */ +#define MODE_3_IDE_PIO_RPA 0x03 /* Mode 3 to access */ +#define MODE_4_IDE_PIO_RPA 0x04 /* Mode 4 to access */ + +/* IDE PIO data port access timing register */ +#define L_IDE_PIO_DPA 0x48 /* IDE register 0x48-0x4b */ +/* register structure */ +#define L_IDE_PIO_DPA0_P 0x48 /* primary channel master */ +#define L_IDE_PIO_DPA1_P 0x49 /* primary channel slave */ +#define L_IDE_PIO_DPA0_S 0x4a /* secondary channel master */ +#define L_IDE_PIO_DPA1_S 0x4b /* secondary channel slave */ + +#define MODE_0_IDE_PIO_DPA 0x00 /* Mode 0 to access */ +#define MODE_1_IDE_PIO_DPA 0x01 /* Mode 1 to access */ +#define MODE_2_IDE_PIO_DPA 0x02 /* Mode 2 to access */ +#define MODE_3_IDE_PIO_DPA 0x03 /* Mode 3 to access */ +#define MODE_4_IDE_PIO_DPA 0x04 /* Mode 4 to access */ +#define MAX_MODE_IDE_PIO_DPA MODE_4_IDE_PIO_DPA + +/* IDE DMA IDE access timing register */ +#define L_IDE_DMA_DA 0x4c /* IDE register 0x4c-0x4f */ +/* register structure */ +#define L_IDE_DMA_DA0_P 0x4c /* primary channel master */ +#define L_IDE_DMA_DA1_P 0x4d /* primary channel slave */ +#define L_IDE_DMA_DA0_S 0x4e /* secondary channel master */ +#define L_IDE_DMA_DA1_S 0x4f /* secondary channel slave */ + +#define MULTIWORD_DMA_MODE_0_IDE 0x00 /* Multiword Mode 0 to access */ +#define MULTIWORD_DMA_MODE_1_IDE 0x01 /* Multiword Mode 1 to access */ +#define MULTIWORD_DMA_MODE_2_IDE 0x02 /* Multiword Mode 2 to access */ +#define ULTRA_DMA_MODE_0_IDE 0x00 /* Ultra DMA Mode 0 to access */ +#define ULTRA_DMA_MODE_1_IDE 0x01 /* Ultra DMA Mode 1 to access */ +#define ULTRA_DMA_MODE_2_IDE 0x02 /* Ultra DMA Mode 2 to access */ +#define ULTRA_DMA_MODE_3_IDE 0x03 /* Ultra DMA Mode 3 to access */ +#define ULTRA_DMA_MODE_4_IDE 0x04 /* Ultra DMA Mode 4 to access */ +#define ULTRA_DMA_MODE_5_IDE 0x05 /* Ultra DMA Mode 5 to access */ + +#define GET_L_IDE_MODE(reg_value, secondary, slave) \ +({ \ + unsigned int mode; \ + if (secondary) { \ + if (slave) \ + mode = ((reg_value) >> 24) & 0xff; \ + else \ + mode = ((reg_value) >> 16) & 0xff; \ + } else { \ + if (slave) \ + mode = ((reg_value) >> 8) & 0xff; \ + else \ + mode = ((reg_value) >> 0) & 0xff; \ + } \ + mode; \ +}) +#define SET_L_IDE_MODE(reg_value, mode, secondary, slave) \ +({ \ + if (secondary) { \ + if (slave) { \ + (reg_value) &= ~((0xff) << 24); \ + (reg_value) |= ((mode) << 24); \ + } else { \ + (reg_value) &= ~((0xff) << 16); \ + (reg_value) |= ((mode) << 16); \ + } \ + } else { \ + if (slave) { \ + (reg_value) &= ~((0xff) << 8); \ + (reg_value) |= ((mode) << 8); \ + } else { \ + (reg_value) &= ~((0xff) << 0); \ + (reg_value) |= ((mode) << 0); \ + } \ + } \ +}) + +/* IDE iterrupt number */ +#define NATIVE_MODE_IDE_IRQ 11 /* IRQ # for native mode */ +#define LEGACY_MODE_IDE_IRQ 14 /* IRQ # for legacy mode */ + +/* Number of word size registers in PCI config space */ +#define L_IDE_CONFIG_REGS_NUM 32 + +static struct pci_device_id l_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_ELBRUS, + .device = PCI_DEVICE_ID_MCST_IDE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = PCI_VENDOR_ID_MCST_TMP, + .device = PCI_DEVICE_ID_MCST_IDE_SDHCI, + .subvendor = 0, + .subdevice = 0, + }, + { + 0, + }, +}; + +static void init_hwif_l_ide(ide_hwif_t *hwif); +static int init_chipset_l_ide(struct pci_dev *dev); +static void l_init_iops(ide_hwif_t *hwif); + +static int l_dma_mode = L_DEAULT_IDE_DMA_MODE; + +static int __init l_setup_ide_dma(const char *str, + const struct kernel_param *kp) +{ + const char *s = strstrip((char *) str); + + if (strcmp(s, "mw2") == 0 || strcmp(s, "mword2") == 0) { + l_dma_mode = ATA_MWDMA2; /* multi-word 2 */ + } else if (strcmp(s, "udma2") == 0 || strcmp(s, "udma33") == 0) { + l_dma_mode = ATA_UDMA2; /* ultra DMA 33 */ + } else if (strcmp(s, "udma4") == 0 || strcmp(s, "udma66") == 0) { + l_dma_mode = ATA_UDMA4; /* ultra DMA 66 */ + } else if (strcmp(s, "udma5") == 0) { + l_dma_mode = ATA_UDMA5; /* ultra DMA 100 */ + } + return 1; +} +static const struct kernel_param_ops idedma_ops = { + .set = l_setup_ide_dma +}; +module_param_cb(idedma, &idedma_ops, NULL, 0200); +MODULE_PARM_DESC(idedma, "Allowed values: mw2 == mword2, udma2 == udma33, udma4 == udma66, udma5\n"); + +static u8 l_udma_filter(ide_drive_t *drive) +{ + return (l_dma_mode); +} + +/** + * l_set_pio_mode - set host controller for PIO mode + * @drive: drive to tune + * @pio: desired PIO mode + * + * Set the interface PIO mode based upon the settings done by AMI BIOS + * (might be useful if drive is not registered in CMOS for any reason). + */ +static void l_set_pio_mode (ide_hwif_t *hwif, ide_drive_t *drive) +{ + struct pci_dev *dev = to_pci_dev(hwif->dev); + const u8 pio = drive->pio_mode - XFER_PIO_0; + + DebugIDE("%s: l_set_pio_mode() PIO mode is 0x%x\n", + pci_name(dev), pio); +} + +/** + * l_set_dma_mode - tune a Elbrus IDE interface + * @drive: IDE drive to tune + * @speed: speed to configure + * + * Set a IDE interface channel to the desired speeds. This involves + * requires the right timing data into the IDE configuration space + * then setting the drive parameters appropriately + */ + +static void l_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) +{ + struct pci_dev *dev = to_pci_dev(hwif->dev); + int channel = hwif->channel; + int slave = drive->dn & 1; + u16 ide_cntr; + u16 new_cntr; + u32 ide_timing; + u8 timing; + const u8 speed = drive->dma_mode; + + DebugIDE("%s: l_set_dma_mode() config drive %s.%s speed to 0x%x\n", + pci_name(dev), (channel) ? "secondary" : "primary", + (!slave) ? "master" : "slave", + speed); + pci_read_config_word(dev, L_IDE_CONTROL, &ide_cntr); + new_cntr = ide_cntr; + pci_read_config_dword(dev, L_IDE_DMA_DA, &ide_timing); + DebugIDE("%s: IDE Control Register 0x%04x, DMA mode 0x%08x\n", + pci_name(dev), ide_cntr, ide_timing); + switch(speed) { + case XFER_UDMA_5: + case XFER_UDMA_4: + case XFER_UDMA_3: + case XFER_UDMA_2: + case XFER_UDMA_1: + case XFER_UDMA_0: + SET_ULTRA_DMA_ENABLE_MODE_IDEC(new_cntr, + channel, slave); + break; + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_SW_DMA_2: + case XFER_MW_DMA_0: + case XFER_SW_DMA_1: + case XFER_SW_DMA_0: + SET_MULTIWORD_DMA_ENABLE_MODE_IDEC(new_cntr, + channel, slave); + break; + default: + panic("l_tune_chipset() Bad IDE %s speed 0x%x\n", + pci_name(dev), speed); + } + switch(speed) { + case XFER_UDMA_5: + timing = ULTRA_DMA_MODE_5_IDE; + break; + case XFER_UDMA_4: + timing = ULTRA_DMA_MODE_4_IDE; + break; + case XFER_UDMA_3: + timing = ULTRA_DMA_MODE_3_IDE; + break; + case XFER_UDMA_2: + timing = ULTRA_DMA_MODE_2_IDE; + break; + case XFER_UDMA_1: + timing = ULTRA_DMA_MODE_1_IDE; + break; + case XFER_UDMA_0: + timing = ULTRA_DMA_MODE_0_IDE; + break; + case XFER_MW_DMA_2: + case XFER_SW_DMA_2: + timing = MULTIWORD_DMA_MODE_2_IDE; + break; + case XFER_MW_DMA_1: + case XFER_SW_DMA_1: + timing = MULTIWORD_DMA_MODE_1_IDE; + break; + case XFER_MW_DMA_0: + case XFER_SW_DMA_0: + timing = MULTIWORD_DMA_MODE_0_IDE; + break; + break; + default: + panic("l_tune_chipset() Bad IDE %s speed 0x%x\n", + pci_name(dev), speed); + } + if (ide_cntr != new_cntr) { + pci_write_config_word(dev, L_IDE_CONTROL, new_cntr); + DebugIDE("%s: set %s DMA enable mode for %s %s in IDE " + "Control Register 0x%04x\n", + pci_name(dev), + (GET_ULTRA_DMA_ENABLE_MODE_IDEC(new_cntr, + channel, slave)) ? "ULTRA" : "MULTIWORD", + (channel) ? "secondary" : "primary", + (slave) ? "slave" : "master", + new_cntr); + } + if (GET_L_IDE_MODE(ide_timing, channel, slave) != timing) { +#ifdef CONFIG_E90 + SET_L_IDE_MODE(ide_timing, 0xee, channel, slave); +#else + SET_L_IDE_MODE(ide_timing, timing, channel, slave); +#endif + pci_write_config_dword(dev, L_IDE_DMA_DA, ide_timing); + DebugIDE("%s: set %s DMA (%d) mode for %s %s in IDE " + "DMA Timing Register 0x%08x\n", + pci_name(dev), + (GET_ULTRA_DMA_ENABLE_MODE_IDEC(new_cntr, + channel, slave)) ? "ULTRA" : "MULTIWORD", + timing, + (channel) ? "secondary" : "primary", + (slave) ? "slave" : "master", + ide_timing); + } +} + +static u8 l_cable_detect(ide_hwif_t *hwif) +{ + /* + * Elbrus IDE controller support ULTRA DMA-5 mode, but + * has not hardware support to detect cable type + * So driver supposes that a 80 wire cable was detected + * To limitate DMA mode use command line option + * idedma= (udma2, udma3 ... udma5) + */ + + return (ATA_CBL_PATA80); +} + +static void l_ide_reset(struct pci_dev *dev) +{ + u16 ide_cntr; + /* Resetting controller */ + pr_warning("%s: resetting IDE controller\n", pci_name(dev)); + pci_read_config_word(dev, L_IDE_CONTROL, &ide_cntr); + ide_cntr |= RESET_INTERFACE_IDEC; + pci_write_config_word(dev, L_IDE_CONTROL, ide_cntr); + udelay(25); + pci_read_config_word(dev, L_IDE_CONTROL, &ide_cntr); + ide_cntr &= ~RESET_INTERFACE_IDEC; + pci_write_config_word(dev, L_IDE_CONTROL, ide_cntr); +} + +#ifndef CONFIG_E90 +static void l_dma_clear(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + l_ide_reset(to_pci_dev(hwif->dev)); +} + +static void l_ide_tf_dump(const char *s, struct ide_cmd *cmd) +{ + pr_warning("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " + "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", + s, cmd->tf.feature, cmd->tf.nsect, + cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah, + cmd->tf.device, cmd->tf.command); + pr_warning("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n", + s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah); +} + +static void l_dma_dump_table(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + __le32 *t = (__le32 *)hwif->dmatable_cpu; + pr_warning("DMA table:\n"); + do { + pr_warning("\t%04x\t%04x\n", + le32_to_cpu(t[0]), le32_to_cpu(t[1])); + if (le32_to_cpu(t[1]) & 0x80000000) + break; + t += 2; + } while (1); +} + +static int l_ide_timer_expiry(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + struct ide_cmd *cmd = &hwif->cmd; + u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); + + pr_warning("%s: DMA status (0x%02x) {%s %s %s}\n", + drive->name, dma_stat, + dma_stat & ATA_DMA_ERR ? "Error" : NULL, + dma_stat & ATA_DMA_ACTIVE ? "Active" : NULL, + dma_stat & ATA_DMA_INTR ? "Interrupt" : NULL + ); + l_dma_dump_table(drive); + l_ide_tf_dump(hwif->cur_dev->name, cmd); + + ide_dump_status(drive, "DMA timeout", + hwif->tp_ops->read_status(hwif)); + if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ + return WAIT_CMD; + + hwif->expiry = NULL; /* one free ride for now */ + + if (dma_stat & ATA_DMA_ERR) { /* ERROR */ + l_dma_clear(drive); + return -1; + } + + if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */ + return WAIT_CMD; + + if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */ + return WAIT_CMD; + + return 0; /* Status is unknown -- reset the bus */ +} + +static const struct ide_dma_ops l_ide_dma_ops = { + .dma_host_set = ide_dma_host_set, + .dma_setup = ide_dma_setup, + .dma_start = ide_dma_start, + .dma_end = ide_dma_end, + .dma_test_irq = ide_dma_test_irq, + .dma_lost_irq = ide_dma_lost_irq, + .dma_timer_expiry = l_ide_timer_expiry, + .dma_sff_read_status = ide_dma_sff_read_status, + .dma_clear = l_dma_clear, +}; +#endif /*CONFIG_E90*/ + +static const struct ide_port_ops l_port_ops = { + .set_pio_mode = l_set_pio_mode, + .set_dma_mode = l_set_dma_mode, + .udma_filter = l_udma_filter, + .cable_detect = l_cable_detect, +}; + +static struct ide_port_info l_pci_info = { + .name = "ELBRUS", + .init_hwif = &init_hwif_l_ide, + .init_chipset = &init_chipset_l_ide, + .init_iops = l_init_iops, + .host_flags = IDE_HFLAG_NO_AUTODMA, + .port_ops = &l_port_ops, +#ifdef CONFIG_E90 + .dma_ops = &e90_dma_ops, +#else + .dma_ops = &l_ide_dma_ops, +#endif + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, +}; + +static int init_chipset_l_ide(struct pci_dev *dev) +{ + u16 ide_cntr; + + DebugIDE("%s: init_chipset_l_ide() started\n", pci_name(dev)); + pci_read_config_word(dev, L_IDE_CONTROL, &ide_cntr); + DebugIDE("%s: IDE Control Register 0x%04x, IRQ %d\n", + pci_name(dev), ide_cntr, dev->irq); + if (ide_cntr & (MASK_INTB_IDEC | MASK_INT_PRIMARY_IDEC | + MASK_INT_SECONDARY_IDEC)) { + printk(KERN_INFO "%s: Unmask interrupts\n", pci_name(dev)); + ide_cntr &= ~(MASK_INTB_IDEC | MASK_INT_PRIMARY_IDEC | + MASK_INT_SECONDARY_IDEC); + pci_write_config_word(dev, L_IDE_CONTROL, ide_cntr); + } +#ifndef CONFIG_E90 + if (dev->class & (NATIVE_MODE_CLASSC)) { + if (dev->irq <= 0) { + DebugIDE("%s: IDE in native mode set IRQ to %d\n", + pci_name(dev), NATIVE_MODE_IDE_IRQ); + dev->irq = NATIVE_MODE_IDE_IRQ; + } + DebugIDE("%s: IDE in native mode set IRQ to %d\n", + pci_name(dev), dev->irq); + + l_pci_info.host_flags &= ~IDE_HFLAG_ISA_PORTS; + } else { + if (dev->irq <= 0) { + DebugIDE("%s: IDE in legacy mode set IRQ to %d\n", + pci_name(dev), LEGACY_MODE_IDE_IRQ); + dev->irq = LEGACY_MODE_IDE_IRQ; + } + l_pci_info.host_flags |= IDE_HFLAG_ISA_PORTS; + } +#endif + return (0); +} + +static void l_enable_io_ports(struct pci_dev *dev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(dev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_IO; + else + cmd = old_cmd & ~PCI_COMMAND_IO; + if (cmd != old_cmd) { + DebugIDE("l_enable_io_ports() IO space: %s \n", + enable ? "enabling" : "disabling"); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } +} + +/** + * init_hwif_l_ide - fill in the hwif for the ELBRUS + * @hwif: IDE interface + * + * Set up the ide_hwif_t for the ELBRUS interface according to the + * capabilities of the hardware. + */ + +static void init_hwif_l_ide(ide_hwif_t *hwif) +{ + l_enable_io_ports(to_pci_dev(hwif->dev), 1); + if (!hwif->dma_base) { + DebugIDE("%s: init_hwif_l_ide() DMA base does not set\n", + pci_name(to_pci_dev(hwif->dev))); + return; + } + DebugIDE("%s: init_hwif_l_ide() DMA base set to 0x%lx\n", + pci_name(to_pci_dev(hwif->dev)), hwif->dma_base); +} + +/** + * l_probe - called when a Elbrus IDE is found + * @dev: the Elbrus device + * @id: the matching pci id + * + * Called when the PCI registration layer (or the IDE initialization) + * finds a device matching our IDE device tables. + */ + +static int l_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct ide_port_info *d = &l_pci_info; + if (id->driver_data != 0) { + pr_alert("%s: l_probe() invalid IDE id %ld should be only 0\n", + pci_name(dev), id->driver_data); + return (-1); + } + + if(L_FORCE_NATIVE_MODE) { + pci_write_config_dword(dev, PCI_CLASS_REVISION, + NATIVE_MODE_CLASSC << 8); + dev->class |= NATIVE_MODE_CLASSC; + } + + return ide_pci_init_one(dev, d, NULL); +} + +MODULE_DEVICE_TABLE(pci, l_pci_tbl); + +static struct pci_driver driver = { + .name = DRV_NAME, + .id_table = l_pci_tbl, + .probe = l_probe, + .suspend = ide_pci_suspend, + .resume = ide_pci_resume, +}; + +static int __init l_ide_init(void) +{ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_IDE_SDHCI, NULL); + + if (pdev && pci_resource_flags(pdev, 5) & IORESOURCE_MEM) + /*device is configured as sdhost */ + return 0; + + return ide_pci_register_driver(&driver); +} + +module_init(l_ide_init); + +MODULE_AUTHOR("Salavat Gilazov, Alexey Sitnikov"); +MODULE_DESCRIPTION("PCI driver module for Elbrus IDE"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c index 4b8b5a87398c..b77f4affd5c7 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -10,6 +10,11 @@ #include #include #include +#ifdef CONFIG_MCST +#include +#include +#include +#endif /* CONFIG_MCST */ #include "inv_mpu_iio.h" static const struct regmap_config inv_mpu_regmap_config = { @@ -111,6 +116,23 @@ static int inv_mpu_probe(struct i2c_client *client, return PTR_ERR(regmap); } +#ifdef CONFIG_MCST + if (!client->irq) { + struct gpio_desc *gpiod_int; + + gpiod_int = devm_gpiod_get_optional(&client->dev, + "irq", GPIOD_IN); + if (!IS_ERR(gpiod_int)) { + client->irq = gpiod_to_irq(gpiod_int); + } + } + + if (!client->irq) { + dev_err(&client->dev, "No IRQ !!!\n"); + return -EPROBE_DEFER; + } +#endif /* CONFIG_MCST */ + result = inv_mpu_core_probe(regmap, client->irq, name, NULL, chip_type); if (result < 0) @@ -203,6 +225,10 @@ static const struct of_device_id inv_of_match[] = { .compatible = "invensense,mpu9255", .data = (void *)INV_MPU9255 }, + { + .compatible = "invensense,mpu9255", + .data = (void *)INV_MPU9255 + }, { .compatible = "invensense,icm20608", .data = (void *)INV_ICM20608 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index dd55e70b6f77..bf15bb538d0a 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -131,6 +131,10 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type) int ret; struct inv_mpu6050_state *st = iio_priv(indio_dev); +#ifdef CONFIG_MCST + dev_info(&indio_dev->dev, + "trigger_alloc: %s-dev%d", indio_dev->name, indio_dev->id); +#endif /* CONFIG_MCST */ st->trig = devm_iio_trigger_alloc(&indio_dev->dev, "%s-dev%d", indio_dev->name, @@ -138,6 +142,10 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type) if (!st->trig) return -ENOMEM; +#ifdef CONFIG_MCST + dev_info(&indio_dev->dev, + "request_irq: %d, type %d\n", st->irq, irq_type); +#endif /* CONFIG_MCST */ ret = devm_request_irq(&indio_dev->dev, st->irq, &iio_trigger_generic_data_rdy_poll, irq_type, diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 13495b43dbc1..d220acccfaf5 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -688,7 +688,11 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) int i = 0, nlmsg_bytes = 0; unsigned long flags; const char *err_str = ""; +#if defined CONFIG_MCST && defined __LCC__ + int ret = 0; +#else int ret; +#endif skb = dev_alloc_skb(NLMSG_GOODSIZE); if (!skb) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9025086a8932..6e064b9ae3cd 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5696,6 +5696,9 @@ static int mlx5_ib_counter_dealloc(struct rdma_counter *counter) return mlx5_core_dealloc_q_counter(dev->mdev, counter->id); } +#if !defined(CONFIG_MCST) || !defined(__LCC__) || \ + defined(CONFIG_MLX5_CORE_IPOIB) +/* bug #121767 */ static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, enum rdma_netdev_t type, struct rdma_netdev_alloc_params *params) @@ -5705,6 +5708,7 @@ static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params); } +#endif static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev) { @@ -6306,9 +6310,13 @@ static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, }; +#if !defined(CONFIG_MCST) || !defined(__LCC__) || \ + defined(CONFIG_MLX5_CORE_IPOIB) +/* bug #121767 */ static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { .rdma_netdev_get_params = mlx5_ib_rn_get_params, }; +#endif static const struct ib_device_ops mlx5_ib_dev_sriov_ops = { .get_vf_config = mlx5_ib_get_vf_config, @@ -6374,10 +6382,14 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); +#if !defined(CONFIG_MCST) || !defined(__LCC__) || \ + defined(CONFIG_MLX5_CORE_IPOIB) + /* bug #121767 */ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ipoib_enhanced_ops); +#endif if (mlx5_core_is_pf(mdev)) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 7e3eae54c192..39f7a2f07b66 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -377,7 +377,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data, dev_dbg(&serio->dev, "Received %02x flags %02x\n", data, flags); -#if !defined(__i386__) && !defined (__x86_64__) +#if !defined(__i386__) && !defined (__x86_64__) && !defined(__e2k__) if ((flags & (SERIO_FRAME | SERIO_PARITY)) && (~flags & SERIO_TIMEOUT) && !atkbd->resend && atkbd->write) { dev_warn(&serio->dev, "Frame/parity error: %02x\n", flags); serio_write(serio, ATKBD_CMD_RESEND); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 7d9ae394e597..443df8af7e70 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -177,7 +177,7 @@ config INPUT_PMIC8XXX_PWRKEY config INPUT_SPARCSPKR tristate "SPARC Speaker support" - depends on PCI && SPARC64 + depends on PCI && SPARC64 && OF help Say Y here if you want the standard Speaker on Sparc PCI systems to be used for bells and whistles. @@ -400,6 +400,34 @@ config INPUT_KEYSPAN_REMOTE To compile this driver as a module, choose M here: the module will be called keyspan_remote. +config INPUT_LTC2954 + tristate "LTC2954 Pushbutton Controller with Interrupt" + depends on INPUT_POLLDEV + depends on GPIOLIB + help + Say Y here if you want to use a LTC2954 button for power + management. By default button will poll on gpio pin, without + raising interrupts. If "modprobe ltc2954 use_irq=1"- gpio pin + raises interrupt (do not use this mode when CONFGI_L_WDT is set, + due watchdog and gpio share the same irq line) + + To compile this driver as a module, choose M here: the module will + be called ltc2954. + +config INPUT_LTC2954 + tristate "LTC2954 Pushbutton Controller with Interrupt" + depends on INPUT_POLLDEV + depends on GPIOLIB + help + Say Y here if you want to use a LTC2954 button for power + management. By default button will poll on gpio pin, without + raising interrupts. If "modprobe ltc2954 use_irq=1"- gpio pin + raises interrupt (do not use this mode when CONFGI_L_WDT is set, + due watchdog and gpio share the same irq line) + + To compile this driver as a module, choose M here: the module will + be called ltc2954. + config INPUT_KXTJ9 tristate "Kionix KXTJ9 tri-axis digital accelerometer" depends on I2C diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 8fd187f314bd..4f86e03e1182 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o +obj-$(CONFIG_INPUT_LTC2954) += ltc2954.o obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o obj-$(CONFIG_INPUT_MAX77650_ONKEY) += max77650-onkey.o diff --git a/drivers/input/misc/ltc2954.c b/drivers/input/misc/ltc2954.c new file mode 100644 index 000000000000..9652523e8f97 --- /dev/null +++ b/drivers/input/misc/ltc2954.c @@ -0,0 +1,367 @@ +/* + * Driver for LTC2954 Pushbutton On/Off Controller. Supports both mP interrupt + * and polls the state of GPIO. + * + * Copyright 2012 Evgeny Kravtsunov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ID_VALUE_STUB 0x0001 +#define ID_VERSION_STUB 0x0100 +#define GPIO_LTC2954_VENDOR_ID ID_VALUE_STUB +#define GPIO_LTC2954_PRODUCT_ID ID_VALUE_STUB +#define GPIO_LTC2954_VERSION_ID ID_VERSION_STUB + +#define LTC2954_BTN_RATE 500 /* msec */ + +static bool use_irq = 1; +module_param(use_irq, bool, 0); +MODULE_PARM_DESC(use_irq, "Detects either to use poll or request irq"); + +/* Polling is a temporary solution when we not abel to use irq. + * We use the only poll_dev and the only button. + */ +struct ltc2954_poll_drvdata { + struct input_polled_dev *poll_dev; + struct gpio_keys_button *button; +}; + +static bool ltc2954_poll_button_pressed(struct gpio_keys_button *button) +{ + int val; + + val = gpio_get_value(button->gpio); + + if ((button->active_low && val == 0) || (!button->active_low && val)) + return 1; + + return 0; +} + +static void ltc2954_poll(struct input_polled_dev *poll_dev) +{ + struct ltc2954_poll_drvdata *ddata = poll_dev->private; + struct input_dev *input = poll_dev->input; + int state; + + state = ltc2954_poll_button_pressed(ddata->button); + + input_event(input, ddata->button->type, ddata->button->code, !!state); + input_sync(input); +} + +/* Structures for the case when use_irq=1 */ +struct ltc2954_button_data { + struct gpio_keys_button *button; + struct input_dev *input; + struct work_struct work; +}; + +struct ltc2954_button_drvdata { + struct input_dev *input; + struct ltc2954_button_data data[0]; +}; + +static void ltc2954_button_report_event(struct ltc2954_button_data *bdata) +{ + struct gpio_keys_button *button = bdata->button; + struct input_dev *input = bdata->input; + int state = (button->active_low ? 0 : 1); + + /* We play with values as it does not matter for is + * is button pressed or released - any button press + * must be caught by event handlers */ + if (state) + button->active_low = 1; + else + button->active_low = 0; + + input_event(input, button->type, button->code, !!state); + input_sync(input); +} + +static void ltc2954_button_work_func(struct work_struct *work) +{ + struct ltc2954_button_data *bdata = + container_of(work, struct ltc2954_button_data, work); + + ltc2954_button_report_event(bdata); +} + +static irqreturn_t ltc2954_button_irq_handler(int irq, void *dev_id) +{ + struct ltc2954_button_data *bdata = dev_id; + struct gpio_keys_button *button = bdata->button; + + BUG_ON(irq != gpio_to_irq(button->gpio)); + + schedule_work(&bdata->work); + + return IRQ_HANDLED; +} + +static int ltc2954_setup(struct device *dev, + struct ltc2954_button_data *bdata, + struct gpio_keys_button *button) +{ + char *desc = "ltc2954"; + int irq, error; + + if (use_irq) + INIT_WORK(&bdata->work, ltc2954_button_work_func); + + error = gpio_request(button->gpio, desc); + if (error < 0) { + dev_err(dev, "ltc2954 failed to request GPIO %d, error %d\n", + button->gpio, error); + goto out_err; + } + + error = gpio_direction_input(button->gpio); + if (error < 0) { + dev_err(dev, "ltc2954 failed to configure" + " direction for GPIO %d, error %d\n", + button->gpio, error); + goto cleanup; + } + + if (use_irq) { + irq = gpio_to_irq(button->gpio); + if (irq < 0) { + error = irq; + dev_err(dev, "ltc2954: unable to get irq number " + "for GPIO %d, error %d\n", button->gpio, error); + goto cleanup; + } + + error = request_irq(irq, ltc2954_button_irq_handler, + IRQF_TRIGGER_FALLING, + desc, bdata); + if (error) { + dev_err(dev, "ltc2954: unable to claim irq %d; error %d\n", + irq, error); + goto cleanup; + } + } + + return 0; + +cleanup: + gpio_free(button->gpio); +out_err: + return error; +} + +static int ltc2954_probe(struct platform_device *pdev) +{ + struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; + struct ltc2954_poll_drvdata *ddata_poll = NULL; + struct ltc2954_button_drvdata *ddata_irq = NULL; + struct device *dev = &pdev->dev; + struct input_polled_dev *poll_dev; + struct input_dev *input; + int i = 0, error; + + if (use_irq) { + /* button on irq */ + ddata_irq = kzalloc(sizeof(struct ltc2954_button_drvdata) + + pdata->nbuttons*sizeof(struct ltc2954_button_data), + GFP_KERNEL); + input = input_allocate_device(); + if (!ddata_irq || !input) { + dev_err(dev, "failed to allocate state\n"); + error = -ENOMEM; + goto out_err_free; + } + platform_set_drvdata(pdev, ddata_irq); + + input->name = pdev->name; + input->phys = "ltc2954"; + input->dev.parent = &pdev->dev; + input->id.bustype = BUS_HOST; + input->id.vendor = GPIO_LTC2954_VENDOR_ID; + input->id.product = GPIO_LTC2954_PRODUCT_ID; + input->id.version = GPIO_LTC2954_VERSION_ID; + + __set_bit(EV_KEY, input->evbit); + __set_bit(KEY_SLEEP, input->keybit); + + ddata_irq->input = input; + + for (i = 0; i < pdata->nbuttons; i++) { + struct gpio_keys_button *button = &pdata->buttons[i]; + struct ltc2954_button_data *bdata = &ddata_irq->data[i]; + + bdata->input = input; + bdata->button = button; + error = ltc2954_setup(dev, bdata, button); + if (error) + goto cleanup; + input_set_capability(input, button->type, button->code); + } + + error = input_register_device(input); + if (error) { + dev_err(dev, "Unable to register input device, " + "error: %d\n", error); + goto cleanup; + } + } else { + /* poll the button */ + if (pdata->nbuttons > 1) { + printk(KERN_ERR "Polling ltc2954 driver" + " supports the only button!\n"); + return -ENXIO; + } + + ddata_poll = kzalloc(sizeof(struct ltc2954_poll_drvdata), + GFP_KERNEL); + if (!ddata_poll) + return -ENOMEM; + + poll_dev = input_allocate_polled_device(); + if (!poll_dev) { + error = -ENOMEM; + goto out_err_free; + } + + poll_dev->poll = ltc2954_poll; + poll_dev->poll_interval = LTC2954_BTN_RATE; + poll_dev->input->name = pdev->name; + poll_dev->input->phys = "ltc2954"; + poll_dev->input->dev.parent = &pdev->dev; + poll_dev->input->id.bustype = BUS_HOST; + poll_dev->input->id.vendor = GPIO_LTC2954_VENDOR_ID; + poll_dev->input->id.product = GPIO_LTC2954_PRODUCT_ID; + poll_dev->input->id.version = GPIO_LTC2954_VERSION_ID; + + ddata_poll->poll_dev = poll_dev; + ddata_poll->button = &pdata->buttons[0]; + + poll_dev->private = ddata_poll; + + error = ltc2954_setup(dev, NULL, ddata_poll->button); + if (error) + goto out_err_free; + + platform_set_drvdata(pdev, ddata_poll); + input_set_capability(poll_dev->input, ddata_poll->button->type, + ddata_poll->button->code); + + __set_bit(ddata_poll->button->type, poll_dev->input->evbit); + __set_bit(ddata_poll->button->code, poll_dev->input->keybit); + + error = input_register_polled_device(poll_dev); + if (error) { + dev_err(dev, "Unable to register poll device, " + "error: %d\n", error); + goto cleanup; + } + } + return 0; + + cleanup: + if (use_irq) { + while (--i >= 0) { + free_irq(gpio_to_irq(pdata->buttons[i].gpio), + &ddata_irq->data[i]); + cancel_work_sync(&ddata_irq->data[i].work); + gpio_free(pdata->buttons[i].gpio); + } + platform_set_drvdata(pdev, NULL); + } else { + gpio_free(pdata->buttons[0].gpio); + platform_set_drvdata(pdev, NULL); + input_free_polled_device(poll_dev); + } + + out_err_free: + if (use_irq) { + input_free_device(input); + kfree(ddata_irq); + } else { + kfree(ddata_poll); + } + + return error; +} + +static int ltc2954_remove(struct platform_device *pdev) +{ + struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; + int i; + + if (use_irq) { + struct ltc2954_button_drvdata *ddata_irq = + platform_get_drvdata(pdev); + struct input_dev *input = ddata_irq->input; + + for (i = 0; i < pdata->nbuttons; i++) { + int irq = gpio_to_irq(pdata->buttons[i].gpio); + free_irq(irq, &ddata_irq->data[i]); + cancel_work_sync(&ddata_irq->data[i].work); + gpio_free(pdata->buttons[i].gpio); + } + input_unregister_device(input); + + } else { + struct ltc2954_poll_drvdata *ddata_poll = + platform_get_drvdata(pdev); + struct input_polled_dev *poll_dev = ddata_poll->poll_dev; + + gpio_free(pdata->buttons[0].gpio); + input_unregister_polled_device(poll_dev); + input_free_polled_device(poll_dev); + dev_set_drvdata(&pdev->dev, NULL); + } + + return 0; +} + +static struct platform_driver ltc2954_device_driver = { + .probe = ltc2954_probe, + .remove = ltc2954_remove, + .driver = { + .name = "ltc2954", + .owner = THIS_MODULE, + } +}; + +static int __init ltc2954_init(void) +{ + return platform_driver_register(<c2954_device_driver); +} + +static void __exit ltc2954_exit(void) +{ + platform_driver_unregister(<c2954_device_driver); +} + +module_init(ltc2954_init); +module_exit(ltc2954_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Evgeny Kravtsunov "); +MODULE_DESCRIPTION("Driver for LTC2954 bound to GPIO"); diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 505c562a5daa..6e34b1fa6cc9 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -55,6 +55,15 @@ struct mousedev_hw_data { unsigned long buttons; }; +#ifdef CONFIG_MCST +#define NO_TS 0 /* not a touchscreen device */ +#define EGALAX_TS 1 /* eGalax touchscreen device */ +#define ZYTRONIC_TS 2 /* Zytronic touchscreen device */ +#define ELO_TS 3 /* Elo touchscreen device */ +#define XIROKU_TS 4 /* Xiroku touchscreen device */ +#define TOUCH_3M_TS 5 /* 3M touchscreen device */ + +#endif /* CONFIG_MCST */ struct mousedev { int open; struct input_handle handle; @@ -77,6 +86,10 @@ struct mousedev { int (*open_device)(struct mousedev *mousedev); void (*close_device)(struct mousedev *mousedev); +#ifdef CONFIG_MCST + int touchscreen; /* touchscreen device */ + +#endif /* CONFIG_MCST */ }; enum mousedev_emul { @@ -172,6 +185,13 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, switch (code) { case ABS_X: +#ifdef CONFIG_MCST + if (mousedev->touchscreen) { + mousedev->packet.abs_event = 1; + mousedev->packet.x = value; + break; + }; +#endif /* CONFIG_MCST */ min = input_abs_get_min(dev, ABS_X); max = input_abs_get_max(dev, ABS_X); @@ -186,6 +206,13 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, break; case ABS_Y: +#ifdef CONFIG_MCST + if (mousedev->touchscreen) { + mousedev->packet.abs_event = 1; + mousedev->packet.y = value; + break; + }; +#endif /* CONFIG_MCST */ min = input_abs_get_min(dev, ABS_Y); max = input_abs_get_max(dev, ABS_Y); @@ -288,11 +315,20 @@ static void mousedev_notify_readers(struct mousedev *mousedev, client->pos_y = packet->y; } +#ifdef CONFIG_MCST + if (mousedev->touchscreen == NO_TS) { +#endif /* CONFIG_MCST */ client->pos_x += packet->dx; client->pos_x = clamp_val(client->pos_x, 0, xres); client->pos_y += packet->dy; client->pos_y = clamp_val(client->pos_y, 0, yres); +#ifdef CONFIG_MCST + } else { + client->pos_x = packet->x; + client->pos_y = packet->y; + }; +#endif /* CONFIG_MCST */ p->dx += packet->dx; p->dy += packet->dy; @@ -355,12 +391,27 @@ static void mousedev_event(struct input_handle *handle, if (test_bit(BTN_TRIGGER, handle->dev->keybit)) return; +#ifndef CONFIG_MCST if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) +#else /* CONFIG_MCST */ + if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit) && + (mousedev->touchscreen == NO_TS)) { +#endif /* CONFIG_MCST */ mousedev_touchpad_event(handle->dev, mousedev, code, value); +#ifndef CONFIG_MCST else mousedev_abs_event(handle->dev, mousedev, code, value); +#else /* CONFIG_MCST */ + } else if (mousedev->touchscreen) { + mousedev_abs_event(handle->dev, mousedev, + ABS_X + (code & 1), value); + } else { + mousedev_abs_event(handle->dev, mousedev, code, + value); + }; +#endif /* CONFIG_MCST */ break; case EV_REL: @@ -391,7 +442,12 @@ static void mousedev_event(struct input_handle *handle, } mousedev_notify_readers(mousedev, &mousedev->packet); +#ifndef CONFIG_MCST mousedev_notify_readers(mousedev_mix, &mousedev->packet); +#else /* CONFIG_MCST */ + if (mousedev->touchscreen == NO_TS) + mousedev_notify_readers(mousedev_mix, &mousedev->packet); +#endif /* CONFIG_MCST */ mousedev->packet.dx = mousedev->packet.dy = mousedev->packet.dz = 0; @@ -607,6 +663,19 @@ static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) case MOUSEDEV_EMUL_PS2: default: +#ifdef CONFIG_MCST + if (client->mousedev->touchscreen) { + /* for touchscreens */ + ps2_data[0] = 0x80 | p->buttons; + ps2_data[1] = (client->pos_x >> 8); + ps2_data[2] = (client->pos_x & 0xFF); + ps2_data[3] = (client->pos_y >> 8); + ps2_data[4] = (client->pos_y & 0xFF); + p->dz = 0; + client->bufsiz = 5; + break; + }; +#endif /* CONFIG_MCST */ p->dz = 0; ps2_data[0] |= ((p->buttons & 0x10) >> 3) | @@ -867,6 +936,9 @@ static struct mousedev *mousedev_create(struct input_dev *dev, mixdev ? SINGLE_DEPTH_NESTING : 0); init_waitqueue_head(&mousedev->wait); +#ifdef CONFIG_MCST + mousedev->touchscreen = NO_TS; +#endif /* CONFIG_MCST */ if (mixdev) { dev_set_name(&mousedev->dev, "mice"); @@ -877,6 +949,27 @@ static struct mousedev *mousedev_create(struct input_dev *dev, /* Normalize device number if it falls into legacy range */ if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) dev_no -= MOUSEDEV_MINOR_BASE; +#ifdef CONFIG_MCST + + if ((dev != 0) && (memcmp(dev->name, "eGalax", 6) == 0)) { + mousedev->touchscreen = EGALAX_TS; + pr_info("eGalax: Touchscreen connected to " + "/dev/input/mouse%d\n", dev_no); + } else if ((dev != 0) && (memcmp(dev->name, "Xiroku", 6) == 0)) { + mousedev->touchscreen = XIROKU_TS; + pr_info("Xiroku: Touchscreen connected to " + "/dev/input/mouse%d\n", dev_no); + } else if ((dev != 0) && (memcmp(dev->name, "Zytronic", 8) == 0)) { + mousedev->touchscreen = ZYTRONIC_TS; + pr_info("Zytronic: Touchscreen connected to " + "/dev/input/mouse%d\n", dev_no); + } else if ((dev != 0) && (memcmp(dev->name, "3M 3M", 5) == 0)) { + mousedev->touchscreen = TOUCH_3M_TS; + pr_info("3M: Touchscreen connected to " + "/dev/input/mouse%d\n", dev_no); + } + +#endif /* CONFIG_MCST */ dev_set_name(&mousedev->dev, "mouse%d", dev_no); mousedev->open_device = mousedev_open_device; diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h index da0bf85321de..8646696d12c9 100644 --- a/drivers/input/serio/i8042-io.h +++ b/drivers/input/serio/i8042-io.h @@ -28,6 +28,14 @@ extern int of_i8042_kbd_irq; extern int of_i8042_aux_irq; # define I8042_KBD_IRQ of_i8042_kbd_irq # define I8042_AUX_IRQ of_i8042_aux_irq +#elif defined(CONFIG_MCST) +#define I8042_KBD_IRQ i8042_kbd_irq +#define I8042_AUX_IRQ i8042_aux_irq +static int i8042_kbd_irq = 1; +static int i8042_aux_irq = 12; + +#include + #else # define I8042_KBD_IRQ 1 # define I8042_AUX_IRQ 12 @@ -38,6 +46,62 @@ extern int of_i8042_aux_irq; * Register numbers. */ +#ifdef CONFIG_MCST +#define I8042_COMMAND_REG (i8042_command_reg) +#define I8042_STATUS_REG (i8042_command_reg) +#define I8042_DATA_REG (i8042_data_reg) + +static void __iomem *i8042_command_reg = (void __iomem *)0x64; +static void __iomem *i8042_data_reg = (void __iomem *)0x60; + +static inline int i8042_read_data_io(void) +{ + return inb((unsigned long)I8042_DATA_REG); +} + +static inline int i8042_read_status_io(void) +{ + return inb((unsigned long)I8042_STATUS_REG); +} + +static inline void i8042_write_data_io(int val) +{ + outb(val, (unsigned long)I8042_DATA_REG); +} + +static inline void i8042_write_command_io(int val) +{ + outb(val, (unsigned long)I8042_COMMAND_REG); +} +static inline int i8042_read_data_pci(void) +{ + return readb(I8042_DATA_REG); +} + +static inline int i8042_read_status_pci(void) +{ + return readb(I8042_STATUS_REG); +} + +static inline void i8042_write_data_pci(int val) +{ + writeb(val, I8042_DATA_REG); +} + +static inline void i8042_write_command_pci(int val) +{ + writeb(val, I8042_COMMAND_REG); +} + +static int (*i8042_read_data)(void) = i8042_read_data_io; +static int (*i8042_read_status)(void) = i8042_read_status_io; +static void (*i8042_write_data)(int val) = i8042_write_data_io; +static void (*i8042_write_command)(int val) = i8042_write_command_io; + +static bool __initdata i8042_nopci = 0; +module_param_named(nopci, i8042_nopci, bool, 0); +MODULE_PARM_DESC(nokbd, "Do not probe MCST pci controller."); +#else #define I8042_COMMAND_REG 0x64 #define I8042_STATUS_REG 0x64 #define I8042_DATA_REG 0x60 @@ -61,6 +125,7 @@ static inline void i8042_write_command(int val) { outb(val, I8042_COMMAND_REG); } +#endif static inline int i8042_platform_init(void) { @@ -72,18 +137,36 @@ static inline int i8042_platform_init(void) if (check_legacy_ioport(I8042_DATA_REG)) return -ENODEV; #endif -#if !defined(__sh__) && !defined(__alpha__) +#if !defined(__sh__) && !defined(__alpha__) && !defined(__e2k__) if (!request_region(I8042_DATA_REG, 16, "i8042")) return -EBUSY; #endif i8042_reset = I8042_RESET_ALWAYS; +#ifdef CONFIG_MCST + /* r7683: aporia-2: add pci ps/2 controller support. dima@mcst.ru */ + { + struct pci_dev *pdev = + pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_PS2, NULL); + if (!i8042_nopci && pdev) { + void __iomem *r = pci_iomap(pdev, 0, 0); + i8042_command_reg = r + 2; + i8042_data_reg = r; + i8042_kbd_irq = i8042_aux_irq = pdev->irq; + i8042_read_data = i8042_read_data_pci; + i8042_read_status = i8042_read_status_pci; + i8042_write_data = i8042_write_data_pci; + i8042_write_command = i8042_write_command_pci; + } + } +#endif return 0; } static inline void i8042_platform_exit(void) { -#if !defined(__sh__) && !defined(__alpha__) +#if !defined(__sh__) && !defined(__alpha__) && !defined(__e2k__) release_region(I8042_DATA_REG, 16); #endif } diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 202e43a6ffae..3cf415ac1780 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -1261,7 +1261,9 @@ static int __init i8042_platform_init(void) int retval; #ifdef CONFIG_X86 +#ifndef CONFIG_MCST u8 a20_on = 0xdf; +#endif /* Just return if platform does not have i8042 controller */ if (x86_platform.legacy.i8042 == X86_LEGACY_I8042_PLATFORM_ABSENT) return -ENODEV; @@ -1319,8 +1321,13 @@ static int __init i8042_platform_init(void) * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to * resume from S3. So we do it here and hope that nothing breaks. */ +#ifndef CONFIG_MCST + /* Optimistical hope. It breaks. PS/2 KBD does not work + * in at least one MCST machine + */ i8042_command(&a20_on, 0x10d1); i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */ +#endif #endif /* CONFIG_X86 */ return retval; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 6ff6b5710dd4..89fe571ecade 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -240,6 +240,15 @@ static int i8042_wait_read(void) udelay(50); i++; } +#ifdef CONFIG_E2K + /* + * Workaround against ps/2 mouse detection failure on 2.6.14 + * This happens on E2K motherboard only + * Looks like it is necessary to execute a short delay to relax + * hardware after i8042 register status read. + */ + udelay(500); +#endif return -(i == I8042_CTL_TIMEOUT); } @@ -251,6 +260,15 @@ static int i8042_wait_write(void) udelay(50); i++; } +#ifdef CONFIG_E2K + /* + * Workaround against ps/2 mouse detection failure on 2.6.14 + * This happens on E2K motherboard only + * Looks like it is necessary to execute a short delay to relax + * hardware after i8042 register status read. + */ + udelay(500); +#endif return -(i == I8042_CTL_TIMEOUT); } diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index bfb945fc33a1..78796bb439b6 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -204,7 +204,7 @@ static const struct dmi_system_id inverted_x_screen[] = { * @buf: raw write data buffer. * @len: length of the buffer to write */ -static int goodix_i2c_read(struct i2c_client *client, +static int __goodix_i2c_read(struct i2c_client *client, u16 reg, u8 *buf, int len) { struct i2c_msg msgs[2]; @@ -233,7 +233,7 @@ static int goodix_i2c_read(struct i2c_client *client, * @buf: raw data buffer to write. * @len: length of the buffer to write */ -static int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, +static int __goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, unsigned len) { u8 *addr_buf; @@ -258,6 +258,24 @@ static int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, return ret < 0 ? ret : (ret != 1 ? -EIO : 0); } +#define goodix_i2c_read(c,r,b,l) \ +({ \ + unsigned __val = __goodix_i2c_read(c,r,b,l); \ + int __l = l; \ + dev_dbg(&c->dev, "R%d: %x: %s\t%s:%d\n", \ + __l, __val, # r, __func__, __LINE__); \ + __val; \ +}) + +#define goodix_i2c_write(c,r,b,l) \ +({ \ + unsigned __val = __goodix_i2c_write(c,r,b,l); \ + int __l = l; \ + dev_dbg(&c->dev, "W%d: %x: %s\t%s:%d\n", \ + __l, __val, # r, __func__, __LINE__); \ + __val; \ +}) + static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value) { return goodix_i2c_write(client, reg, &value, sizeof(value)); @@ -335,7 +353,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) * The Goodix panel will send spurious interrupts after a * 'finger up' event, which will always cause a timeout. */ - return 0; + return -ENOMSG; } static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data) @@ -931,7 +949,10 @@ static int goodix_ts_probe(struct i2c_client *client, return error; } } - +#ifdef CONFIG_MCST + if (ts->gpiod_int) + client->irq = gpiod_to_irq(ts->gpiod_int); +#endif error = goodix_i2c_test(client); if (error) { dev_err(&client->dev, "I2C communication failure: %d\n", error); diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 544a8f40b81f..a91f5035d355 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -51,6 +51,16 @@ static bool hwcalib_xy; module_param(hwcalib_xy, bool, 0644); MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available"); +#ifdef CONFIG_MCST +static int xres = CONFIG_INPUT_MOUSEDEV_SCREEN_X; +module_param(xres, uint, 0644); +MODULE_PARM_DESC(xres, "Horizontal screen resolution"); + +static int yres = CONFIG_INPUT_MOUSEDEV_SCREEN_Y; +module_param(yres, uint, 0644); +MODULE_PARM_DESC(yres, "Vertical screen resolution"); + +#endif /* CONFIG_MCST */ /* device specifc data/functions */ struct usbtouch_usb; struct usbtouch_device_info { @@ -128,14 +138,29 @@ enum { DEVTYPE_NEXIO, DEVTYPE_ELO, DEVTYPE_ETOUCH, +#ifdef CONFIG_MCST + DEVTYPE_ELO_VMC, + DEVTYPE_XIROKU, +#endif /* CONFIG_MCST */ }; +#ifdef CONFIG_MCST +#define USB_DEVICE_HID_CLASS(vend, prod) \ + .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \ + | USB_DEVICE_ID_MATCH_INT_PROTOCOL \ + | USB_DEVICE_ID_MATCH_DEVICE, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceClass = USB_INTERFACE_CLASS_HID, \ + .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE +#else /* CONFIG_MCST */ #define USB_DEVICE_HID_CLASS(vend, prod) \ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \ | USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceClass = USB_INTERFACE_CLASS_HID +#endif /* CONFIG_MCST */ static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX @@ -151,6 +176,9 @@ static const struct usb_device_id usbtouch_devices[] = { {USB_DEVICE(0x0eef, 0x0002), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x1234, 0x0001), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x1234, 0x0002), .driver_info = DEVTYPE_EGALAX}, +#ifdef CONFIG_MCST + {USB_DEVICE(0x2505, 0x0220), .driver_info = DEVTYPE_XIROKU}, +#endif /* CONFIG_MCST */ #endif #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT @@ -231,6 +259,9 @@ static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_ELO {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO}, +#ifdef CONFIG_MCST + {USB_DEVICE(0x04e7, 0x0050), .driver_info = DEVTYPE_ELO_VMC}, +#endif /* CONFIG_MCST */ #endif #ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH @@ -290,6 +321,7 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt) #define EGALAX_PKT_TYPE_REPT 0x80 #define EGALAX_PKT_TYPE_DIAG 0x0A +#ifndef CONFIG_MCST static int egalax_init(struct usbtouch_usb *usbtouch) { int ret, i; @@ -329,13 +361,28 @@ static int egalax_init(struct usbtouch_usb *usbtouch) return ret; } +#endif /* ! CONFIG_MCST */ static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { +#ifdef CONFIG_MCST + struct usbtouch_device_info *type = dev->type; + +#endif /* CONFIG_MCST */ if ((pkt[0] & EGALAX_PKT_TYPE_MASK) != EGALAX_PKT_TYPE_REPT) return 0; +#ifndef CONFIG_MCST dev->x = ((pkt[3] & 0x0F) << 7) | (pkt[4] & 0x7F); dev->y = ((pkt[1] & 0x0F) << 7) | (pkt[2] & 0x7F); +#else /* CONFIG_MCST */ + dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1]; + dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3]; + dev->touch = pkt[0] & 0x01; + dev->x = (dev->x * xres) / (type->max_xc - type->min_xc); + dev->y = yres - (dev->y * yres) / (type->max_yc - type->min_yc); + if (dev->y < 0) + dev->y = 0; +#endif /* CONFIG_MCST */ dev->touch = pkt[0] & 0x01; return 1; @@ -356,6 +403,35 @@ static int egalax_get_pkt_len(unsigned char *buf, int len) return 0; } +#ifdef CONFIG_MCST + +static int xiroku_read_data(struct usbtouch_usb *dev, unsigned char *pkt) +{ + struct usbtouch_device_info *type = dev->type; + int x = 0, y = 0; + + if (pkt[1] != 0) { + x = ((pkt[3] & 0x7F) << 8) | pkt[2]; + y = ((pkt[5] & 0x7F) << 8) | pkt[4]; + if ((x == 0) || (y == 0)) { + return 0; + }; + dev->touch = 1; + dev->x = (x * xres) / (type->max_xc - type->min_xc); + dev->y = (y * yres) / (type->max_yc - type->min_yc); + } else { + dev->touch = 0; + }; + return 1; +} + +static int xiroku_get_pkt_len(unsigned char *buf, int len) +{ + if (buf[0] == 1) + return 6; + return 0; +} +#endif /* CONFIG_MCST */ #endif /***************************************************************************** @@ -430,16 +506,52 @@ static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt) #define MTOUCHUSB_REQ_CTRLLR_ID_LEN 16 +#ifdef CONFIG_MCST +static int last_x = 0, last_y = 0; + +#endif /* CONFIG_MCST */ static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { +#ifdef CONFIG_MCST + unsigned int x, y; + struct usbtouch_device_info *type = dev->type; + + dev->touch = (pkt[2] & 0x40) ? 1 : 0; + if (dev->touch == 0) { + dev->x = last_x; + dev->y = last_y; + return 1; + }; +#endif /* CONFIG_MCST */ if (hwcalib_xy) { dev->x = (pkt[4] << 8) | pkt[3]; dev->y = 0xffff - ((pkt[6] << 8) | pkt[5]); } else { +#ifndef CONFIG_MCST dev->x = (pkt[8] << 8) | pkt[7]; dev->y = (pkt[10] << 8) | pkt[9]; +#else /* CONFIG_MCST */ + x = (pkt[8] << 8) | pkt[7]; + y = (pkt[10] << 8) | pkt[9]; + if (x > type->max_xc) + x = type->max_xc; + x -= type->min_xc; + if (x < 0) + x = 0; + if (y > type->max_yc) + y = type->max_yc; + y -= type->min_yc; + if (y < 0) + y = 0; + dev->x = (x * xres) / (type->max_xc - type->min_xc); + dev->y = (y * yres) / (type->max_yc - type->min_yc); + last_x = dev->x; + last_y = dev->y; +#endif /* CONFIG_MCST */ } +#ifndef CONFIG_MCST dev->touch = (pkt[2] & 0x40) ? 1 : 0; +#endif /* ! CONFIG_MCST */ return 1; } @@ -1133,6 +1245,37 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) #ifdef CONFIG_TOUCHSCREEN_USB_ELO +#ifdef CONFIG_MCST +#ifndef MULTI_PACKET +#define MULTI_PACKET +#endif + +#define ELO_PKT_TYPE_HEADER 0x54 + +static int elo_vmc_read_data(struct usbtouch_usb *dev, unsigned char *pkt) +{ + struct usbtouch_device_info *type = dev->type; + int maxx, minx, maxy, miny; + + if (pkt[0] != ELO_PKT_TYPE_HEADER) + return 0; + + dev->x = ((pkt[3] & 0x0F) << 8) | pkt[2]; + dev->y = ((pkt[5] & 0x0F) << 8) | pkt[4]; + dev->touch = pkt[1] & 0x01; + maxx = type->max_xc; + minx = type->min_xc; + maxy = type->max_yc; + miny = type->min_yc; + dev->x = ((dev->x - minx)* xres) / (maxx - minx); + dev->y = yres - ((dev->y - miny) * yres) / (maxy - miny); + if (dev->y < 0) + dev->y = 0; + return 1; +} + +#endif /* CONFIG_MCST */ + static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[3] << 8) | pkt[2]; @@ -1142,6 +1285,15 @@ static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } +#ifdef CONFIG_MCST + +static int elo_get_pkt_len(unsigned char *buf, int len) +{ + if (buf[0] != ELO_PKT_TYPE_HEADER) + return 0; + return 8; +} +#endif /* CONFIG_MCST */ #endif @@ -1151,6 +1303,10 @@ static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) #ifdef MULTI_PACKET static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len); +#ifdef CONFIG_MCST +static void usbtouch_process_multi_xiroku(struct usbtouch_usb *usbtouch, + unsigned char *pkt, int len); +#endif /* CONFIG_MCST */ #endif static struct usbtouch_device_info usbtouch_dev_info[] = { @@ -1164,19 +1320,39 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { .rept_size = 8, .read_data = elo_read_data, }, -#endif +#endif /* ! CONFIG_MCST */ #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX [DEVTYPE_EGALAX] = { +#ifndef CONFIG_MCST .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, +#else /* CONFIG_MCST */ + .min_xc = 0, + .max_xc = 4096, + .min_yc = 0, + .max_yc = 4096, +#endif /* CONFIG_MCST */ .rept_size = 16, .process_pkt = usbtouch_process_multi, .get_pkt_len = egalax_get_pkt_len, .read_data = egalax_read_data, +#ifndef CONFIG_MCST .init = egalax_init, +#else /* CONFIG_MCST */ + }, + [DEVTYPE_XIROKU] = { + .min_xc = 0, + .max_xc = 0x7FFF, + .min_yc = 0, + .max_yc = 0x7FFF, + .rept_size = 16, + .process_pkt = usbtouch_process_multi_xiroku, + .get_pkt_len = xiroku_get_pkt_len, + .read_data = xiroku_read_data, +#endif /* CONFIG_MCST */ }, #endif @@ -1193,10 +1369,17 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { #ifdef CONFIG_TOUCHSCREEN_USB_3M [DEVTYPE_3M] = { +#ifndef CONFIG_MCST .min_xc = 0x0, .max_xc = 0x4000, .min_yc = 0x0, .max_yc = 0x4000, +#else /* CONFIG_MCST */ + .min_xc = 3200, /* 3000 */ + .max_xc = 13640, + .min_yc = 3500, + .max_yc = 13100, +#endif /* CONFIG_MCST */ .rept_size = 11, .read_data = mtouch_read_data, .alloc = mtouch_alloc, @@ -1376,9 +1559,22 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { .read_data = etouch_read_data, }, #endif +#ifdef CONFIG_MCST +#ifdef CONFIG_TOUCHSCREEN_USB_ELO + [DEVTYPE_ELO_VMC] = { + .min_xc = 262, + .max_xc = 3821, + .min_yc = 335, + .max_yc = 3793, + .rept_size = 16, + .process_pkt = usbtouch_process_multi, + .get_pkt_len = elo_get_pkt_len, + .read_data = elo_vmc_read_data, + }, +#endif +#endif /* CONFIG_MCST */ }; - /***************************************************************************** * Generic Part */ @@ -1481,6 +1677,17 @@ out_flush_buf: usbtouch->buf_len = 0; return; } +#ifdef CONFIG_MCST + +static void usbtouch_process_multi_xiroku(struct usbtouch_usb *usbtouch, + unsigned char *pkt, int len) +{ + int pkt_len; + + pkt_len = usbtouch->type->get_pkt_len(pkt, 0); + usbtouch_process_pkt(usbtouch, pkt, pkt_len); +} +#endif /* CONFIG_MCST */ #endif diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9c3e630c6c4c..d3fcaabd0198 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -23,6 +23,13 @@ #include #include #include +#ifdef CONFIG_E2K +#include +#include +#endif +#ifdef CONFIG_MCST +#include +#endif struct iommu_dma_msi_page { struct list_head list; @@ -651,11 +658,111 @@ static int __iommu_dma_mmap(struct page **pages, size_t size, return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); } +#ifdef CONFIG_E2K +#define IO_PAGE_SHIFT 12 +#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT) +#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1)) +#define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE) + +static bool l_dom_iova_hi(unsigned long iova) +{ + return iova & (~0UL << 32) ? true : false; +} + +static unsigned l_dom_page_indx(struct iommu_domain *d, unsigned long iova) +{ + if (!l_dom_iova_hi(iova)) + return iova / IO_PAGE_SIZE; + + return (iova - d->map_base) / IO_PAGE_SIZE; +} + +static phys_addr_t l_dom_lookup_buffer(struct iommu_domain *d, + unsigned long iova) +{ + void *p; + unsigned long flags; + unsigned i = l_dom_page_indx(d, iova); + if (!l_dom_iova_hi(iova)) + return d->orig_phys_lo[i]; + + read_lock_irqsave(&d->lock_hi, flags); + p = idr_find(&d->idr_hi, i); + read_unlock_irqrestore(&d->lock_hi, flags); + + return (phys_addr_t)p; +} + +static void __l_sync_single(struct iommu_domain *d, + dma_addr_t iova, size_t sz, + enum dma_data_direction dir, + enum dma_sync_target target) +{ + phys_addr_t orig_phys, phys; + unsigned offset = offset_in_page(iova); + + orig_phys = l_dom_lookup_buffer(d, iova); + if (!orig_phys) + return; + + phys = iommu_iova_to_phys(d, iova); + + switch (target) { + case SYNC_FOR_CPU: + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + void *to = __va(orig_phys) + offset; + void *from = __va(phys) + offset; + memcpy(to, from, sz); + } else { + BUG_ON(dir != DMA_TO_DEVICE); + } + break; + case SYNC_FOR_DEVICE: + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + void *from = __va(orig_phys) + offset; + void *to = __va(phys) + offset; + memcpy(to, from, sz); + } else { + BUG_ON(dir != DMA_FROM_DEVICE); + } + break; + default: + BUG(); + } +} + +#define offset_in_iopage(p) (((unsigned long)p) % IO_PAGE_SIZE) + +static void l_sync_single(struct iommu_domain *d, + dma_addr_t iova, size_t sz, + enum dma_data_direction dir, + enum dma_sync_target target) +{ + if (!l_iommu_supported()) + return; + do { + unsigned this_step = min((IO_PAGE_SIZE - + offset_in_iopage(iova)), sz); + + __l_sync_single(d, iova, this_step, dir, target); + + sz -= this_step; + iova += this_step; + } while (sz > 0); +} +#endif /*CONFIG_E2K*/ + static void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { phys_addr_t phys; - +#ifdef CONFIG_E2K + if (l_iommu_has_numa_bug()) { + struct iommu_domain *d = iommu_get_dma_domain(dev); + l_sync_single(d, dma_handle, size, dir, SYNC_FOR_CPU); + return; + } +#endif if (dev_is_dma_coherent(dev)) return; @@ -668,6 +775,14 @@ static void iommu_dma_sync_single_for_device(struct device *dev, { phys_addr_t phys; +#ifdef CONFIG_E2K + if (l_iommu_has_numa_bug()) { + struct iommu_domain *d = iommu_get_dma_domain(dev); + l_sync_single(d, dma_handle, size, dir, SYNC_FOR_DEVICE); + return; + } +#endif + if (dev_is_dma_coherent(dev)) return; @@ -681,6 +796,17 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, { struct scatterlist *sg; int i; +#ifdef CONFIG_E2K + if (l_iommu_has_numa_bug()) { + for_each_sg(sgl, sg, nelems, i) { + if (sg_dma_len(sg) == 0) + break; + iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), + sg_dma_len(sg), dir); + } + return; + } +#endif if (dev_is_dma_coherent(dev)) return; @@ -695,7 +821,18 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, { struct scatterlist *sg; int i; - +#ifdef CONFIG_E2K + if (l_iommu_has_numa_bug()) { + for_each_sg(sgl, sg, nelems, i) { + if (sg_dma_len(sg) == 0) + break; + iommu_dma_sync_single_for_device(dev, + sg_dma_address(sg), + sg_dma_len(sg), dir); + } + return; + } +#endif if (dev_is_dma_coherent(dev)) return; @@ -713,6 +850,13 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, dma_addr_t dma_handle; dma_handle =__iommu_dma_map(dev, phys, size, prot); + +#ifdef CONFIG_E2K + if (l_iommu_has_numa_bug() && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + dma_handle != DMA_MAPPING_ERROR) + iommu_dma_sync_single_for_device(dev, dma_handle, + size, dir); +#endif if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(dev, phys, size, dir); @@ -821,10 +965,12 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); int i; - +#ifdef CONFIG_E2K + int ret; +#else if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); - +#endif /* * Work out how much IOVA space we need, and align the segments to * IOVA granules for the IOMMU driver to handle. With some clever @@ -875,6 +1021,12 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) goto out_free_iova; +#ifdef CONFIG_E2K + ret = __finalise_sg(dev, sg, nents, iova); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + iommu_dma_sync_sg_for_device(dev, sg, nents, dir); + return ret; +#endif return __finalise_sg(dev, sg, nents, iova); out_free_iova: @@ -921,12 +1073,24 @@ static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, __iommu_dma_unmap(dev, handle, size); } +#ifdef CONFIG_MCST +static void __iommu_dma_free(struct device *dev, size_t size, + void *cpu_addr, unsigned long attrs) +#else static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) +#endif { size_t alloc_size = PAGE_ALIGN(size); int count = alloc_size >> PAGE_SHIFT; struct page *page = NULL, **pages = NULL; +#ifdef CONFIG_MCST + if (attrs & DMA_ATTR_NON_CONSISTENT) { + l_free_uncached(dev, alloc_size, cpu_addr); + return; + } +#endif + /* Non-coherent atomic allocation? Easy */ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && dma_free_from_pool(cpu_addr, alloc_size)) @@ -956,7 +1120,11 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, unsigned long attrs) { __iommu_dma_unmap(dev, handle, size); +#ifdef CONFIG_MCST + __iommu_dma_free(dev, size, cpu_addr, attrs); +#else __iommu_dma_free(dev, size, cpu_addr); +#endif } static void *iommu_dma_alloc_pages(struct device *dev, size_t size, @@ -967,8 +1135,17 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, int node = dev_to_node(dev); struct page *page = NULL; void *cpu_addr; - +#ifdef CONFIG_MCST + if (attrs & DMA_ATTR_NON_CONSISTENT) { + phys_addr_t pa; + cpu_addr = l_alloc_uncached(dev, alloc_size, &pa, gfp); + if (!cpu_addr) + return NULL; + page = phys_to_page(pa); + } else +#endif page = dma_alloc_contiguous(dev, alloc_size, gfp); + if (!page) page = alloc_pages_node(node, gfp, get_order(alloc_size)); if (!page) @@ -985,6 +1162,9 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, if (!coherent) arch_dma_prep_coherent(page, size); } else { +#ifdef CONFIG_MCST + if (!(attrs & DMA_ATTR_NON_CONSISTENT)) +#endif cpu_addr = page_address(page); } @@ -1004,6 +1184,11 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, struct page *page = NULL; void *cpu_addr; +#ifdef CONFIG_E2K + if (l_iommu_has_numa_bug()) /* force the allocation from */ + gfp |= __GFP_THISNODE; /* the device node */ +#endif + gfp |= __GFP_ZERO; if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && @@ -1020,7 +1205,11 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); if (*handle == DMA_MAPPING_ERROR) { +#ifdef CONFIG_MCST + __iommu_dma_free(dev, size, cpu_addr, attrs); +#else __iommu_dma_free(dev, size, cpu_addr); +#endif return NULL; } diff --git a/drivers/mcst/BigEv2/Makefile b/drivers/mcst/BigEv2/Makefile new file mode 100644 index 000000000000..8568b6cef868 --- /dev/null +++ b/drivers/mcst/BigEv2/Makefile @@ -0,0 +1 @@ +obj-y += pcie_driver/ diff --git a/drivers/mcst/BigEv2/pcie_driver/Makefile b/drivers/mcst/BigEv2/pcie_driver/Makefile new file mode 100644 index 000000000000..69e673f9bce4 --- /dev/null +++ b/drivers/mcst/BigEv2/pcie_driver/Makefile @@ -0,0 +1,3 @@ +# Copyright 2013 Google Inc. All Rights Reserved. + +obj-$(CONFIG_BIGE) += bige.o diff --git a/drivers/mcst/BigEv2/pcie_driver/bige.c b/drivers/mcst/BigEv2/pcie_driver/bige.c new file mode 100644 index 000000000000..e628e5de22a5 --- /dev/null +++ b/drivers/mcst/BigEv2/pcie_driver/bige.c @@ -0,0 +1,1091 @@ +/* Copyright 2012 Google Inc. All Rights Reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#include "../ewl/ewl_shared.h" +#include "bige.h" +#include "bige_ioctl.h" + +#define HXDEC_MAX_CORES 1 + +#define GOOGLE_DEC_REGS 184 + +#define GOOGLE_DEC_FIRST_REG 0 +#define GOOGLE_DEC_LAST_REG GOOGLE_DEC_REGS-1 + +#define DEC_IO_SIZE (GOOGLE_DEC_REGS * 4) /* bytes */ + +static const int DecHwId[] = {0x6732}; +unsigned long base_port = -1; + +static u32 multicorebase[HXDEC_MAX_CORES] = {-1}; + +static int irq = -1; +static int elements = 0; + +/* module_param(name, type, perm) */ +module_param(base_port, ulong, 0); +module_param(irq, int, 0); +module_param_array(multicorebase, uint, &elements, 0); + +static int bige_major = 0; /* dynamic allocation */ + +/* here's all the must remember stuff */ +typedef struct +{ + char *buffer; + unsigned int iosize; + volatile u8 *hwregs[HXDEC_MAX_CORES]; + int irq; + int cores; + struct fasync_struct *async_queue_dec; +} bige_t; + +static bige_t bige_data; /* dynamic allocation? */ + +static int ReserveIO(void); +static void ReleaseIO(void); + +/* PCIe resources */ +/* TODO(mheikkinen) Implement multicore support. */ + +static struct pci_dev *gDev = NULL; /* PCI device structure. */ + +static u32 gBaseHdwr; /* PCI base register address (Hardware address) */ +static u32 gBaseLen; /* Base register address Length */ +static void *gBaseVirt = NULL; /* Base register virtual address */ +static u32 gHantroRegBase = 0; /* Base register for Hantro IP */ +static void* gHantroRegVirt = NULL; /* Virtual register for Hantro IP */ + +static int PcieInit(void); +static void ResetAsic(bige_t * dev); + +#ifdef BIGE_DEBUG +static void dump_regs(bige_t *dev); +#endif + +/* Enable/disable interrupt for userland */ +static int bige_irqcontrol(struct uio_info *info, s32 irq_on); + +/* IRQ handler */ +static irqreturn_t bige_isr(int irq, struct uio_info *dev_info); + +//static u32 dec_regs[HXDEC_MAX_CORES][DEC_IO_SIZE/4]; +static struct semaphore dec_core_sem; + +//static int dec_irq = 0; + +//static atomic_t irq_rx = ATOMIC_INIT(0); +//static atomic_t irq_tx = ATOMIC_INIT(0); + +static struct file* dec_owner[HXDEC_MAX_CORES]; + +//static DEFINE_SPINLOCK(owner_lock); + +static DECLARE_WAIT_QUEUE_HEAD(dec_wait_queue); +static DECLARE_WAIT_QUEUE_HEAD(hw_queue); + +#define DWL_CLIENT_TYPE_PP 4U +#define DWL_CLIENT_TYPE_VP9_DEC 11U +#define DWL_CLIENT_TYPE_HEVC_DEC 12U + +//static u32 cfg[HXDEC_MAX_CORES]; + +static void ReadCoreConfig(bige_t *dev) +{ +// int c; +// u32 reg, tmp; +// +// memset(cfg, 0, sizeof(cfg)); +// +// for(c = 0; c < dev->cores; c++) +// { +// /* Decoder configuration */ +// reg = ioread32(dev->hwregs[c] + BIGE_SYNTH_CFG_2 * 4); +// +// tmp = (reg >> DWL_HEVC_E) & 0x3U; +// if(tmp) printk(KERN_INFO "bige: core[%d] has HEVC\n", c); +// cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_HEVC_DEC : 0; +// +// tmp = (reg >> DWL_VP9_E) & 0x03U; +// if(tmp) printk(KERN_INFO "bige: core[%d] has VP9\n", c); +// cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP9_DEC : 0; +// +// /* Post-processor configuration */ +// reg = ioread32(dev->hwregs[c] + BIGEPP_SYNTH_CFG * 4); +// +// tmp = (reg >> DWL_PP_E) & 0x01U; +// if(tmp) printk(KERN_INFO "bige: core[%d] has PP\n", c); +// cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_PP : 0; +// } +} + +//static int CoreHasFormat(const void* cfg, int core, u32 format) +//{ +// return (cfg[core] & (1 << format)) ? 1 : 0; +//} +// +//int GetDecCore(long core, bige_t *dev, struct file* filp) +//{ +// int success = 0; +// unsigned long flags; +// +// PDEBUG("GetDecCore\n"); +// spin_lock_irqsave(&owner_lock, flags); +// if(dec_owner[core] == NULL ) +// { +// dec_owner[core] = filp; +// success = 1; +// } +// +// spin_unlock_irqrestore(&owner_lock, flags); +// PDEBUG("spin_lock_irqstore GetDecCore\n"); +// +// return success; +//} +// +//int GetDecCoreAny(long *core, bige_t *dev, struct file* filp, +// unsigned long format) +//{ +// int success = 0; +// long c; +// +// *core = -1; +// +// for(c = 0; c < dev->cores; c++) +// { +// /* a free core that has format */ +// if(CoreHasFormat(cfg, c, format) && GetDecCore(c, dev, filp)) +// { +// success = 1; +// *core = c; +// break; +// } +// } +// +// PDEBUG("GetCoreAny Success\n"); +// return success; +//} +// +//long ReserveDecoder(bige_t *dev, struct file* filp, unsigned long format) +//{ +// long core = -1; +// +// PDEBUG("Reserve core\n"); +// /* reserve a core */ +// if (down_interruptible(&dec_core_sem)) +// return -ERESTARTSYS; +// +// /* lock a core that has specific format*/ +// if(wait_event_interruptible(hw_queue, +// GetDecCoreAny(&core, dev, filp, format) != 0 )) +// return -ERESTARTSYS; +// +// PDEBUG("Reserve core, reserved\n"); +// return core; +//} + +void ReleaseDecoder(bige_t *dev, long core) +{ +// u32 status; +// unsigned long flags; +// u32 counter = 0; +// +// +// status = ioread32(dev->hwregs[core] + BIGE_IRQ_STAT_DEC_OFF); +// +// /* make sure HW is disabled */ +// if(status & BIGE_DEC_E) +// { +// printk(KERN_INFO "bige: DEC[%li] still enabled -> reset\n", core); +// +// while(status & BIGE_DEC_E) +// { +// if(!(counter & 0x7FF)) +// PDEBUG("bige: Killed, wait for HW finish\n", core); +// status = ioread32(dev->hwregs[core] + BIGE_IRQ_STAT_DEC_OFF); +// if(++counter > 500000){ +// +// printk(KERN_INFO "bige: Killed, timeout\n", core); +// break; +// } +// } +// +// iowrite32(0, dev->hwregs[core] + BIGE_IRQ_STAT_DEC_OFF); +// +// } +// +// spin_lock_irqsave(&owner_lock, flags); +// +// dec_owner[core] = NULL; +// +// spin_unlock_irqrestore(&owner_lock, flags); +// +// up(&dec_core_sem); +// +// wake_up_interruptible_all(&hw_queue); +} + +//long DecFlushRegs(bige_t *dev, struct core_desc *core) +//{ +// long ret = 0, i; +// +// u32 id = core->id; +// +// ret = copy_from_user(dec_regs[id], core->regs, GOOGLE_DEC_REGS*4); +// if (ret) +// { +// PDEBUG("copy_from_user failed, returned %li\n", ret); +// return -EFAULT; +// } +// +// /* write all regs but the status reg[1] to hardware */ +// for(i = 2; i <= GOOGLE_DEC_LAST_REG; i++) +// iowrite32(dec_regs[id][i], dev->hwregs[id] + i*4); +// +// /* write the status register, which may start the decoder */ +// iowrite32(dec_regs[id][1], dev->hwregs[id] + 4); +// +// PDEBUG("flushed registers on core %d %x\n", id, dec_regs[id][1]); +// +// return 0; +//} +// +//long DecRefreshRegs(bige_t *dev, struct core_desc *core) +//{ +// long ret, i; +// u32 id = core->id; +// +// /* user has to know exactly what they are asking for */ +// if(core->size != (GOOGLE_DEC_REGS * 4)) { +// PDEBUG("DecRefreshRegs failed, wrong size %d\n", core->size); +// return -EFAULT; +// } +// /* read all registers from hardware */ +// for(i = 0; i <= GOOGLE_DEC_LAST_REG; i++) +// dec_regs[id][i] = ioread32(dev->hwregs[id] + i*4); +// +// /* put registers to user space*/ +// ret = copy_to_user(core->regs, dec_regs[id], GOOGLE_DEC_REGS*4); +// if (ret) +// { +// PDEBUG("copy_to_user failed, returned %li\n", ret); +// return -EFAULT; +// } +// +// return 0; +//} +// +//static int CheckDecIrq(bige_t *dev, int id) +//{ +// unsigned long flags; +// int rdy = 0; +// +// const u32 irq_mask = (1 << id); +// +// spin_lock_irqsave(&owner_lock, flags); +// +// if(dec_irq & irq_mask) +// { +// /* reset the wait condition(s) */ +// dec_irq &= ~irq_mask; +// rdy = 1; +// } +// +// spin_unlock_irqrestore(&owner_lock, flags); +// +// return rdy; +//} +// +//long WaitDecReadyAndRefreshRegs(bige_t *dev, struct core_desc *core) +//{ +// u32 id = core->id; +// +// PDEBUG("wait_event_interruptible DEC[%d]\n", id); +// +// if(wait_event_interruptible(dec_wait_queue, CheckDecIrq(dev, id))) +// { +// PDEBUG("DEC[%d] wait_event_interruptible interrupted\n", id); +// return -ERESTARTSYS; +// } +// +// atomic_inc(&irq_tx); +// +// /* refresh registers */ +// return DecRefreshRegs(dev, core); +//} +// +//long PPFlushRegs(bige_t *dev, struct core_desc *core) +//{ +// long ret = 0; +// u32 id = core->id; +// u32 i; +// +// ret = copy_from_user(dec_regs[id] + GOOGLE_DEC_REGS, core->regs, +// GOOGLE_PP_REGS*4); +// if (ret) +// { +// PDEBUG("copy_from_user failed, returned %li\n", ret); +// return -EFAULT; +// } +// +// /* write all regs but the status reg[1] to hardware */ +// for(i = GOOGLE_PP_FIRST_REG + 1; i <= GOOGLE_PP_LAST_REG; i++) +// iowrite32(dec_regs[id][i], dev->hwregs[id] + i*4); +// +// /* write the stat reg, which may start the PP */ +// iowrite32(dec_regs[id][GOOGLE_PP_FIRST_REG], +// dev->hwregs[id] + GOOGLE_PP_FIRST_REG * 4); +// +// return 0; +//} +// +//long PPRefreshRegs(bige_t *dev, struct core_desc *core) +//{ +// long i, ret; +// u32 id = core->id; +// +// /* user has to know exactly what they are asking for */ +// if(core->size != (GOOGLE_PP_REGS * 4)) +// return -EFAULT; +// +// /* read all registers from hardware */ +// for(i = GOOGLE_PP_FIRST_REG; i <= GOOGLE_PP_LAST_REG; i++) +// dec_regs[id][i] = ioread32(dev->hwregs[id] + i*4); +// +// /* put registers to user space*/ +// ret = copy_to_user(core->regs, dec_regs[id] + GOOGLE_PP_FIRST_REG, +// GOOGLE_PP_REGS * 4); +// if (ret) +// { +// PDEBUG("copy_to_user failed, returned %li\n", ret); +// return -EFAULT; +// } +// +// return 0; +//} + +/*------------------------------------------------------------------------------ + Function name : bige_ioctl + Description : communication method to/from the user space + + Return type : long +------------------------------------------------------------------------------*/ + +static long bige_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int err = 0; +// long tmp; + +#ifdef HW_PERFORMANCE + struct timeval *end_time_arg; +#endif + + PDEBUG("ioctl cmd 0x%08x\n", cmd); + + // extract the type and number bitfields, and don't decode + // wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + if (_IOC_TYPE(cmd) != BIGE_IOC_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) > BIGE_IOC_MAXNR) + return -ENOTTY; + + // the direction is a bitmask, and VERIFY_WRITE catches R/W + // transfers. `Type' is user-oriented, while + // access_ok is kernel-oriented, so the concept of "read" and + // "write" is reversed + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); + + if (err) + return -EFAULT; + +// switch (cmd) +// { +// case BIGE_IOC_CLI: +// disable_irq(bige_data.irq); +// break; +// case BIGE_IOC_STI: +// enable_irq(bige_data.irq); +// break; +// case BIGE_IOCGHWOFFSET: +// __put_user(multicorebase[0], (unsigned long *) arg); +// break; +// case BIGE_IOCGHWIOSIZE: +// __put_user(bige_data.iosize, (unsigned int *) arg); +// break; +// case BIGE_IOC_MC_OFFSETS: +// { +// tmp = copy_to_user((size_t*) arg, multicorebase, sizeof(multicorebase)); +// if (err) +// { +// PDEBUG("copy_to_user failed, returned %li\n", tmp); +// return -EFAULT; +// } +// break; +// } +// case BIGE_IOC_MC_CORES: +// __put_user(bige_data.cores, (unsigned int *) arg); +// break; +// case BIGE_IOCS_DEC_PUSH_REG: +// { +// struct core_desc core; +// +// /* get registers from user space*/ +// tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); +// if (tmp) +// { +// PDEBUG("copy_from_user failed, returned %li\n", tmp); +// return -EFAULT; +// } +// +// DecFlushRegs(&bige_data, &core); +// break; +// } +// case BIGE_IOCS_PP_PUSH_REG: +// { +// struct core_desc core; +// +// /* get registers from user space*/ +// tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); +// if (tmp) +// { +// PDEBUG("copy_from_user failed, returned %li\n", tmp); +// return -EFAULT; +// } +// +// PPFlushRegs(&bige_data, &core); +// break; +// } +// case BIGE_IOCS_DEC_PULL_REG: +// { +// struct core_desc core; +// +// PDEBUG("start BIGE_IOCS_DEC_PULL_REG\n"); +// +// /* get registers from user space*/ +// tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); +// if (tmp) +// { +// PDEBUG("copy_from_user failed, returned %li\n", tmp); +// return -EFAULT; +// } +// +// return DecRefreshRegs(&bige_data, &core); +// } +// case BIGE_IOCS_PP_PULL_REG: +// { +// struct core_desc core; +// +// /* get registers from user space*/ +// tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); +// if (tmp) +// { +// PDEBUG("copy_from_user failed, returned %li\n", tmp); +// return -EFAULT; +// } +// +// return PPRefreshRegs(&bige_data, &core); +// } +// case BIGE_IOCH_DEC_RESERVE: +// { +// PDEBUG("Reserve DEC core, format = %li\n", arg); +// return ReserveDecoder(&bige_data, filp, arg); +// } +// case BIGE_IOCT_DEC_RELEASE: +// { +// +// PDEBUG("Release DEC, core = %li\n", arg); +// if(arg >= bige_data.cores || dec_owner[arg] != filp) +// { +// PDEBUG("bogus DEC release, core = %li\n", arg); +// return -EFAULT; +// } +// +// PDEBUG("Release DEC, core = %li\n", arg); +// +// ReleaseDecoder(&bige_data, arg); +// +// break; +// } +// case BIGE_IOCQ_PP_RESERVE: +// return ReservePostProcessor(&bige_data, filp); +// case BIGE_IOCT_PP_RELEASE: +// { +// if(arg != 0 || pp_owner[arg] != filp) +// { +// PDEBUG("bogus PP release %li\n", arg); +// return -EFAULT; +// } +// +// ReleasePostProcessor(&bige_data, arg); +// +// break; +// } +// case BIGE_IOCX_DEC_WAIT: +// { +// struct core_desc core; +// +// PDEBUG("BIGE_IOCX_DEC_WAIT\n", tmp); +// /* get registers from user space */ +// tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); +// if (tmp) +// { +// PDEBUG("copy_from_user failed, returned %li\n", tmp); +// return -EFAULT; +// } +// +// return WaitDecReadyAndRefreshRegs(&bige_data, &core); +// } +// case BIGE_IOCX_PP_WAIT: +// { +// struct core_desc core; +// +// /* get registers from user space */ +// tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); +// if (tmp) +// { +// PDEBUG("copy_from_user failed, returned %li\n", tmp); +// return -EFAULT; +// } +// +// return WaitPPReadyAndRefreshRegs(&bige_data, &core); +// } +// case BIGE_IOX_ASIC_ID: +// { +// u32 id; +// __get_user(id, (size_t*)arg); +// +// if(id >= bige_data.cores) +// { +// return -EFAULT; +// } +// id = ioread32(bige_data.hwregs[id]); +// __put_user(id, (size_t*) arg); +// } +// case BIGE_DEBUG_STATUS: +// { +// printk(KERN_INFO "bige: dec_irq = 0x%08x \n", dec_irq); +// +// printk(KERN_INFO "bige: IRQs received/sent2user = %d / %d \n", +// atomic_read(&irq_rx), atomic_read(&irq_tx)); +// +// for (tmp = 0; tmp < bige_data.cores; tmp++) +// { +// printk(KERN_INFO "bige: dec_core[%li] %s\n", +// tmp, dec_owner[tmp] == NULL ? "FREE" : "RESERVED"); +// printk(KERN_INFO "bige: pp_core[%li] %s\n", +// tmp, pp_owner[tmp] == NULL ? "FREE" : "RESERVED"); +// } +// } +// default: +// return -ENOTTY; +// } + + return 0; +} + +/*------------------------------------------------------------------------------ + Function name : bige_open + Description : open method + + Return type : int +------------------------------------------------------------------------------*/ + +static int bige_open(struct inode *inode, struct file *filp) +{ + PDEBUG("dev opened\n"); + return 0; +} + +/*------------------------------------------------------------------------------ + Function name : bige_release + Description : Release driver + + Return type : int +------------------------------------------------------------------------------*/ + +static int bige_release(struct inode *inode, struct file *filp) +{ + int n; + bige_t *dev = &bige_data; + + PDEBUG("closing ...\n"); + + for (n = 0; n < dev->cores; n++) { + if (dec_owner[n] == filp) { + PDEBUG("releasing dec core %i lock\n", n); + ReleaseDecoder(dev, n); + } + } + return 0; +} + +/* VFS methods */ +static struct file_operations bige_fops = +{ + .owner = THIS_MODULE, + .open = bige_open, + .release = bige_release, + .unlocked_ioctl = bige_ioctl, + .fasync = NULL +}; + +/*------------------------------------------------------------------------------ + Function name : bige_init + Description : Initialize the driver + + Return type : int +------------------------------------------------------------------------------*/ + +int __init bige_init(void) +{ + int result, i; + /* u32 buf[4]; */ + + printk(KERN_INFO "bige: Initializing\n"); + + result = PcieInit(); + if (result) + goto err; + + multicorebase[0] = gHantroRegBase; + elements = 1; + + // Find the IRQ + if (irq > 0) + irq = gDev->irq; + + bige_data.iosize = DEC_IO_SIZE; + bige_data.irq = irq; + + for(i=0; i< HXDEC_MAX_CORES; i++) + { + bige_data.hwregs[i] = 0; + /* If user gave less core bases that we have by default, + * invalidate default bases + */ + if(elements && i>=elements) + { + multicorebase[i] = -1; + } + } + + bige_data.async_queue_dec = NULL; + + result = register_chrdev(bige_major, "bige", &bige_fops); + if (result < 0) { + printk(KERN_ERR "bige: unable to get major %d\n", bige_major); + goto err; + } else if (result != 0) { + bige_major = result; + } + + result = ReserveIO(); + if (result < 0) { + goto err; + } + + memset(dec_owner, 0, sizeof(dec_owner)); + + sema_init(&dec_core_sem, bige_data.cores); +#if 0 + // TODO(trevorbunker): Do some sanity reads and writes. These can be removed + // when driver works. + memset_io(gHantroRegVirt, 0, 256*4); + + memset(buf, 0, 4*4); + memcpy_fromio(buf, gHantroRegVirt, 4*4); + printk(KERN_INFO "Before: buf[0] = 0x%X\n", buf[0]); + printk(KERN_INFO "Before: buf[1] = 0x%X\n", buf[1]); + printk(KERN_INFO "Before: buf[2] = 0x%X\n", buf[2]); + printk(KERN_INFO "Before: buf[3] = 0x%X\n", buf[3]); + + buf[0] = 0xDEADBEEF; + buf[1] = 0x12345678; + buf[2] = 0xBEE3FEED; + buf[3] = 0x98765432; + memcpy_toio(gHantroRegVirt, buf, 4*4); + + memset(buf, 0, 4*4); + memcpy_fromio(buf, gHantroRegVirt, 4*4); + + printk(KERN_INFO "After: buf[0] = 0x%X\n", buf[0]); + printk(KERN_INFO "After: buf[1] = 0x%X\n", buf[1]); + printk(KERN_INFO "After: buf[2] = 0x%X\n", buf[2]); + printk(KERN_INFO "After: buf[3] = 0x%X\n", buf[3]); +#endif + // read configuration fo all cores + ReadCoreConfig(&bige_data); + + /* reset hardware */ + ResetAsic(&bige_data); + + if (irq <= 0) { + printk(KERN_INFO "bige: IRQ not in use!\n"); + } + printk(KERN_INFO "bige: module inserted. Major = %d\n", bige_major); + + return 0; + +err: + ReleaseIO(); + unregister_chrdev(bige_major, "bige"); + printk(KERN_INFO "bige: module not inserted\n"); + return result; +} + +/*------------------------------------------------------------------------------ + Function name : bige_cleanup + Description : clean up + + Return type : int +------------------------------------------------------------------------------*/ + +void __exit bige_cleanup(void) +{ + bige_t *dev = &bige_data; + + /* reset hardware */ + ResetAsic(dev); + + // Release all of the PCI regions and mappings + ReleaseIO(); + + unregister_chrdev(bige_major, "bige"); + + printk(KERN_INFO "bige: module removed\n"); + return; +} + +/*------------------------------------------------------------------------------ + Function name : PcieInit + Description : Initialize PCI Hw access + + Return type : int + ------------------------------------------------------------------------------*/ + +static int PcieInit(void) +{ + int rc = 0; + struct uio_info *info; + + // Look for a device on the PCIe bus that matches the vendor and device ID + gDev = pci_get_device(BIGE_PCI_VENDOR_ID, BIGE_PCI_DEVICE_ID, gDev); + if (gDev == NULL) { + gDev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, 0x803b, gDev); + } + if (gDev == NULL) { + printk(KERN_ERR "bige: pci_get_device() failed.\n"); + return -1; + } + + // Allocate space for the uio_info struct + info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); + if (!info) { + printk(KERN_ERR "bige: kzalloc() failed for uio_info struct.\n"); + return -ENOMEM; + } + + // Enable the PCIe device + rc = pci_enable_device(gDev); + if (rc) { + printk(KERN_ERR "bige: pci_enable_device() failed.\n"); + return -1; + } + + // Check that BAR 0 exists + if (!(pci_resource_flags(gDev, BIGE_CONTROL_BAR) & IORESOURCE_MEM)) { + printk(KERN_ERR "bige: BAR %d is configured incorrectly or missing.\n", + BIGE_CONTROL_BAR); + return -1; + } + + // Request owernship of PCI device + rc = pci_request_regions(gDev, "bige"); + if (rc) { + printk(KERN_ERR "bige: pci_request_regions() failed.\n"); + return -1; + } + + // Get base address of BAR 0 + gBaseHdwr = pci_resource_start(gDev, BIGE_CONTROL_BAR); + gBaseLen = pci_resource_len (gDev, BIGE_CONTROL_BAR); + if (gBaseHdwr < 0) { + printk(KERN_ERR "bige: invalid base address of BAR %d.\n", + BIGE_CONTROL_BAR); + return (-1); + } + printk(KERN_INFO "bige: BAR %d is located at 0x%X and is %d bytes\n", + BIGE_CONTROL_BAR, (unsigned int)gBaseHdwr, (unsigned int)gBaseLen); + + // Remap the I/O register block so that it can be safely accessed. + gBaseVirt = pci_ioremap_bar(gDev, BIGE_CONTROL_BAR); + if (!gBaseVirt) { + printk(KERN_ERR "bige: pci_ioremap_bar() failed.\n"); + return -1; + } + pci_set_master(gDev); + +#ifdef CONFIG_E90S + rc = pci_alloc_irq_vectors(gDev, 1, 1, PCI_IRQ_MSIX); + if (rc < 0) { + printk(KERN_ERR "bige: unable to allocate MSIX irq vector.\n"); + return rc; + } + irq = pci_irq_vector(gDev, 0); +#else /* E2K */ + // Try to setup the interrupt + if (pci_enable_msi(gDev)) { + printk(KERN_ERR "bige: pci_enable_msi() failed.\n"); +// return -1; + } + irq = gDev->irq; +#endif + printk(KERN_INFO "bige: IRQ = %d\n", gDev->irq); + + gHantroRegBase = gBaseHdwr + BIGE_REG_OFFSET; + gHantroRegVirt = (unsigned int *)gBaseVirt + BIGE_REG_OFFSET/4; + //((unsigned int*)gBaseVirt)[HLINA_ADDR_TRANSL_REG] = HLINA_TRANSL_BASE; + //printk("bige: Address translation base for %x\n", + // (((unsigned int*)gBaseVirt)[HLINA_ADDR_TRANSL_REG])); + // + + // Create uio_info type + info->name = "bige"; + info->version = "0.0.2"; + info->mem[0].addr = gBaseHdwr; + info->mem[0].internal_addr = gBaseVirt; + info->mem[0].size = gBaseLen; + info->mem[0].memtype = UIO_MEM_PHYS; + info->mem[0].name = "BigEv2 regs"; + info->irq = irq; + info->irq_flags = IRQF_SHARED; + info->handler = bige_isr; + info->irqcontrol = bige_irqcontrol; + + /* Try to register the UIO device */ + rc = uio_register_device(&gDev->dev, info); + if (rc) { + printk(KERN_ERR "bige: uio_register_device() failed (%d).\n", rc); + return -1; + } + + // Store the uio_info struct + pci_set_drvdata(gDev, info); + + return 0; +} + +///*------------------------------------------------------------------------------ +// Function name : CheckHwId +// Return type : int +//------------------------------------------------------------------------------*/ +//static int CheckHwId(bige_t * dev) +//{ +// long int hwid; +// int i; +// size_t numHw = sizeof(DecHwId) / sizeof(*DecHwId); +// +// int found = 0; +// +// for (i = 0; i < dev->cores; i++) +// { +// if (dev->hwregs[i] != NULL ) +// { +// hwid = readl(dev->hwregs[i]); +// printk(KERN_INFO "bige: Core %d HW ID=0x%08lx\n", i, hwid); +// hwid = (hwid >> 16) & 0xFFFF; /* product version only */ +// +// while (numHw--) +// { +// if (hwid == DecHwId[numHw]) +// { +// printk(KERN_INFO "bige: Supported HW found at 0x%08x\n", +// multicorebase[i]); +// found++; +// break; +// } +// } +// if (!found) +// { +// printk(KERN_INFO "bige: Unknown HW found at 0x%08x\n", +// multicorebase[i]); +// return 0; +// } +// found = 0; +// numHw = sizeof(DecHwId) / sizeof(*DecHwId); +// } +// } +// +// return 1; +//} + +static int ReserveIO(void) +{ +// bige_data.hwregs[0] = (volatile u8 *) gHantroRegVirt; +// +// if (bige_data.hwregs[0] == NULL ) +// { +// printk(KERN_INFO "bige: failed to ioremap HW regs\n"); +// ReleaseIO(); +// return -EBUSY; +// } +// +// bige_data.cores = 1; +// /* check for correct HW */ +// if (!CheckHwId(&bige_data)) +// { +// ReleaseIO(); +// return -EBUSY; +// } + + return 0; +} +/*------------------------------------------------------------------------------ + Function name : releaseIO + Description : release + + Return type : void +------------------------------------------------------------------------------*/ + +static void ReleaseIO(void) +{ + PDEBUG("Release IO\n"); + if (gBaseVirt != NULL) + pci_iounmap(gDev, gBaseVirt); + if (gDev != NULL) { + struct uio_info *info = pci_get_drvdata(gDev); + uio_unregister_device(info); + kfree(info); + + pci_release_regions(gDev); + pci_disable_device(gDev); + } +} + +/* Enable/disable interrupt for userland */ +static int bige_irqcontrol(struct uio_info *info, s32 irq_on) +{ + /* TODO: real need this?.. */ + return 0; +} + +/*------------------------------------------------------------------------------ + Function name : bige_isr + Description : interrupt handler + + Return type : irqreturn_t +------------------------------------------------------------------------------*/ +static irqreturn_t bige_isr(int irq, struct uio_info *dev_info) +{ + unsigned int handled = IRQ_NONE; + void __iomem *hwregs = dev_info->mem[0].internal_addr; + u32 irq_status_enc; + + /* interrupt status register read */ + irq_status_enc = ioread32(hwregs + BIGE_IRQ_STAT_ENC_OFF); + if (irq_status_enc & (BIGE_IRQ_MASK + | BIGE_IRQ_AXI_READ_DATA_OVERFLOW_MASK + | BIGE_IRQ_AXI_WRITE_DATA_UNDERFLOW_MASK + | BIGE_IRQ_STREAM_BUF_OVERFLOW + | BIGE_IRQ_IDCT_OVERFLOW)) { + /* clear encoder IRQ and disable IRQ */ + irq_status_enc &= ~(BIGE_IRQ_MASK | BIGE_IRQ_EN_MASK); + iowrite32(irq_status_enc, hwregs + BIGE_IRQ_STAT_ENC_OFF); + handled = IRQ_HANDLED; + } + return IRQ_RETVAL(handled); +} + +/*------------------------------------------------------------------------------ + Function name : ResetAsic + Description : reset asic + + Return type : +------------------------------------------------------------------------------*/ +void ResetAsic(bige_t * dev) +{ +// int i, j; +// u32 status; +// +// for (j = 0; j < dev->cores; j++) +// { +// status = ioread32(dev->hwregs[j] + BIGE_IRQ_STAT_DEC_OFF); +// +// if( status & BIGE_DEC_E) +// { +// /* abort with IRQ disabled */ +// status = BIGE_DEC_ABORT | BIGE_DEC_IRQ_DISABLE; +// iowrite32(status, dev->hwregs[j] + BIGE_IRQ_STAT_DEC_OFF); +// } +// +// /* reset PP */ +// iowrite32(0, dev->hwregs[j] + BIGE_IRQ_STAT_PP_OFF); +// +// for (i = 4; i < dev->iosize; i += 4) +// { +// iowrite32(0, dev->hwregs[j] + i); +// } +// } +} +// +///*------------------------------------------------------------------------------ +// Function name : dump_regs +// Description : Dump registers +// +// Return type : +//------------------------------------------------------------------------------*/ +#ifdef BIGE_DEBUG +void dump_regs(bige_t *dev) +{ +// int i,c; +// +// PDEBUG("Reg Dump Start\n"); +// for(c = 0; c < dev->cores; c++) +// { +// for(i = 0; i < dev->iosize; i += 4*4) +// { +// PDEBUG("\toffset %04X: %08X %08X %08X %08X\n", i, +// ioread32(dev->hwregs[c] + i), +// ioread32(dev->hwregs[c] + i + 4), +// ioread32(dev->hwregs[c] + i + 16), +// ioread32(dev->hwregs[c] + i + 24)); +// } +// } +// PDEBUG("Reg Dump End\n"); +} +#endif + +module_init(bige_init); +module_exit(bige_cleanup); + +/* module description */ +//MODULE_LICENSE("Proprietary"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Google"); +MODULE_DESCRIPTION("Driver module for VP9 encoder (BigE)"); + diff --git a/drivers/mcst/BigEv2/pcie_driver/bige.h b/drivers/mcst/BigEv2/pcie_driver/bige.h new file mode 100644 index 000000000000..182500221f9c --- /dev/null +++ b/drivers/mcst/BigEv2/pcie_driver/bige.h @@ -0,0 +1,57 @@ +/* Copyright 2012 Google Inc. All Rights Reserved. */ + +#ifndef _BIGE_H_ +#define _BIGE_H_ + +#include +#include + +//#define BIGE_DEBUG + +#undef PDEBUG +#ifdef BIGE_DEBUG +# ifdef __KERNEL__ +# define PDEBUG(fmt, args...) printk( KERN_INFO "bige: " fmt, ## args) +# else +# define PDEBUG(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#else +# define PDEBUG(fmt, args...) +#endif + +/* TODO(mheikkinen) These are the Xilinx defaults. */ +/* Base address got control register */ +#ifdef CONFIG_E90S /* MCST R2000+ */ +#define BIGE_CONTROL_BAR 2 +#define BIGE_PCI_VENDOR_ID PCI_VENDOR_ID_MCST_TMP +#define BIGE_PCI_DEVICE_ID PCI_DEVICE_ID_MCST_VP9_BIGEV2_R2000P +#else /* MCST Elbrus-e2c3 */ +#define BIGE_CONTROL_BAR 0 +#define BIGE_PCI_VENDOR_ID PCI_VENDOR_ID_MCST_TMP +#define BIGE_PCI_DEVICE_ID PCI_DEVICE_ID_MCST_VP9_BIGEV2 +#endif + +/* PCIe BigEv2 driver offset in control register */ +#define BIGE_REG_OFFSET 0x0 +/* Address translation from CPU bus address to PCI bus address. */ +/* TODO(mheikkinen) Now set separately in memalloc and kernel driver, + * should this be set in a single place. */ +#define HLINA_TRANSL_BASE 0x0 +/* Base address of PCI base address translation */ +#define HLINA_ADDR_TRANSL_REG 0x20c/4 + + +/* Interrupt register of BigE (swreg1) */ +#define BIGE_IRQ_STAT_ENC_OFF 0x4 +#define BIGE_IRQ_EN_MASK (1<<9) +#define BIGE_IRQ_MASK (1<<4) /* masked by BIGE_IRQ_EN_MASK */ +#define BIGE_IRQ_FRAME_READY (1<<3) +#define BIGE_IRQ_BUS_ERROR (1<<2) +#define BIGE_IRQ_TIMEOUT (1<<1) +/* fatal cases */ +#define BIGE_IRQ_AXI_WRITE_DATA_UNDERFLOW_MASK (1<<8) +#define BIGE_IRQ_AXI_READ_DATA_OVERFLOW_MASK (1<<7) +#define BIGE_IRQ_STREAM_BUF_OVERFLOW (1<<6) +#define BIGE_IRQ_IDCT_OVERFLOW (1<<5) + +#endif /* !_BIGE_H_ */ diff --git a/drivers/mcst/BigEv2/pcie_driver/bige_ioctl.h b/drivers/mcst/BigEv2/pcie_driver/bige_ioctl.h new file mode 100644 index 000000000000..5658d2f4510c --- /dev/null +++ b/drivers/mcst/BigEv2/pcie_driver/bige_ioctl.h @@ -0,0 +1,58 @@ +/* Copyright 2012 Google Inc. All Rights Reserved. */ + +#ifndef _BIGE_IOCTL_H_ +#define _BIGE_IOCTL_H_ + +#include +#include + +struct core_desc +{ + __u32 id; /* id of the core */ + void* regs; /* pointer to user registers */ + __u32 size; /* size of register space */ +}; + +/* Use 'k' as magic number */ +#define BIGE_IOC_MAGIC 'k' + +/* + * S means "Set" through a ptr, + * T means "Tell" directly with the argument value + * G means "Get": reply by setting through a pointer + * Q means "Query": response is on the return value + * X means "eXchange": G and S atomically + * H means "sHift": T and Q atomically + */ + +#define BIGE_PP_INSTANCE _IO(BIGE_IOC_MAGIC, 1) +#define BIGE_HW_PERFORMANCE _IO(BIGE_IOC_MAGIC, 2) +#define BIGE_IOCGHWOFFSET _IOR(BIGE_IOC_MAGIC, 3, unsigned long *) +#define BIGE_IOCGHWIOSIZE _IOR(BIGE_IOC_MAGIC, 4, unsigned int *) + +#define BIGE_IOC_CLI _IO(BIGE_IOC_MAGIC, 5) +#define BIGE_IOC_STI _IO(BIGE_IOC_MAGIC, 6) +#define BIGE_IOC_MC_OFFSETS _IOR(BIGE_IOC_MAGIC, 7, unsigned long *) +#define BIGE_IOC_MC_CORES _IOR(BIGE_IOC_MAGIC, 8, unsigned int *) + +#define BIGE_IOCS_DEC_PUSH_REG _IOW(BIGE_IOC_MAGIC, 9, struct core_desc *) +#define BIGE_IOCS_PP_PUSH_REG _IOW(BIGE_IOC_MAGIC, 10, struct core_desc *) + +#define BIGE_IOCH_DEC_RESERVE _IO(BIGE_IOC_MAGIC, 11) +#define BIGE_IOCT_DEC_RELEASE _IO(BIGE_IOC_MAGIC, 12) +#define BIGE_IOCQ_PP_RESERVE _IO(BIGE_IOC_MAGIC, 13) +#define BIGE_IOCT_PP_RELEASE _IO(BIGE_IOC_MAGIC, 14) + +#define BIGE_IOCX_DEC_WAIT _IOWR(BIGE_IOC_MAGIC, 15, struct core_desc *) +#define BIGE_IOCX_PP_WAIT _IOWR(BIGE_IOC_MAGIC, 16, struct core_desc *) + +#define BIGE_IOCS_DEC_PULL_REG _IOWR(BIGE_IOC_MAGIC, 17, struct core_desc *) +#define BIGE_IOCS_PP_PULL_REG _IOWR(BIGE_IOC_MAGIC, 18, struct core_desc *) + +#define BIGE_IOX_ASIC_ID _IOWR(BIGE_IOC_MAGIC, 20, __void* ) + +#define BIGE_DEBUG_STATUS _IO(BIGE_IOC_MAGIC, 29) + +#define BIGE_IOC_MAXNR 29 + +#endif /* !_BIGE_IOCTL_H_ */ diff --git a/drivers/mcst/Kconfig b/drivers/mcst/Kconfig new file mode 100644 index 000000000000..72a939e45e44 --- /dev/null +++ b/drivers/mcst/Kconfig @@ -0,0 +1,198 @@ +menu "Elbrus MCST Device Drivers" + +#config DDI +# tristate "Drivers Device Interface (Solaris) support " +# default m + +config DRM_MGA2 + tristate "MGA2 video adapter" + depends on PCI && DRM && (E2K || E90S) + default m + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select VIDEOMODE_HELPERS + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select DRM_IMX_HDMI + select DRM_DW_HDMI_CEC + select DRM_I2C_SIL164 + select DRM_PANEL_LVDS + select BACKLIGHT_PWM + +config MGA2_PWM + tristate "MGA2 pulse-width modulation (PWM) driver" + depends on PWM + default m + +config MGA2_GPIO + tristate "MGA2 GPIO driver" + depends on GPIOLIB + default m + +config MSPS + tristate "MSPS driver" + depends on E2K && NUMA + default m + help + Driver for MSPS - needed for connect external ADC with our VK. + +config ELDSP + tristate "Elbrus DSP driver" + depends on E2K && CPU_ES2 + default m + help + Driver for Elbrus DSP (eldsp), older name "DSP Cube". + +config MPV + tristate "MPV driver" + depends on MCST && PCI + +config MGPM + tristate "MGPM PCI driver" + depends on PCI + +config MMRM + tristate "MMRM PCI driver" + depends on PCI + help + Driver for MMR (Manchester code) on PCI". + +config MOKM + tristate "MOKM PCI driver" + depends on PCI + + +config RDMA + tristate "RDMA driver" + depends on E2K + default m + +config RDMA_SIC + tristate "RDMA_SIC driver" + depends on (E2K || E90S) && NUMA + default m + +config RDMA_M + tristate "RDMA_M driver" + depends on (E2K || E90S) && NUMA + default m + +config MOKX + tristate "MOKX driver" + depends on E2K && NUMA + default m + +config WD + bool "WD driver" + depends on E2K + help + System work(watchdog) control manager. + /dev/watchdog major 10 minor 130 + +config DMP_ASSIST + tristate "DMP_ASSIST driver" + depends on E2K && RECOVERY + ---help--- + Pseudo-driver for online support dump-analyzer activity. + Needs ....dump_analyze_cmd= ... in cmdline. + +config LPTOUTS + tristate "GPIO via LPT" + depends on PARPORT_MCST + default m + ---help--- + You should say Y here if you have a BCVM-INEUM module. + To compile this driver as a module, choose M here: the + module will be called lptous. + +config M2MLC + tristate "M2MLC driver" + depends on PCI + default m + ---help--- + Driver for Node Interconnect Controller. + +config APKPWR + tristate "APKPWR driver" + depends on I2C + default m + ---help--- + Driver for MUP-4C power supply module (used in APK-NT4C and APK-PC4C). + +config MEM2ALLOC + tristate "DMA memory allocator" + default m + ---help--- + DMA memory allocator. + +config HANTRODEC + tristate "Google hantrodec VP9 driver" + default m + ---help--- + Google VP9 driver. + +config BIGE + tristate "Google bige VP9 encoder driver" + default m + select UIO + ---help--- + Google VP9 encoder driver. + +config E8CPCS + tristate "E8C Power Control System (PCS) hwmon driver" + depends on E2K && (E2K_E8C || E2K_E8C2 || E2K_MINVER <= 5) + default m + +config PCSM + tristate "Power Control System (PCS) for e8c* and e2c3 system's" + depends on E2K + default m + ---help--- + Needed for power management system (based on e1c+ or other) + +config IMGTEC + tristate "Imagination codec driver" + depends on E2K + default m + select BUS_MASTERING + select VXD_FPGA + ---help--- + Imagination codec driver. + +config BUS_MASTERING + def_bool y + depends on IMGTEC + +config VXD_FPGA + def_bool y + depends on IMGTEC + +source "drivers/mcst/gpu-imgtec/Kconfig" + +source "drivers/mcst/gpu-viv/Kconfig" + +source "drivers/mcst/smi/Kconfig" + +config SMI_PWM + tristate "Silicon Motion pulse-width modulation (PWM) driver" + depends on PWM + default m + +config SMI_GPIO + tristate "Silicon Motion GPIO driver" + depends on GPIOLIB + select GPIOLIB_IRQCHIP + default m + +config SENSORS_EMC2305 + tristate "SMSC EMC2305" + depends on I2C + help + If you say yes here you get support for the SMSC EMC2305/EMC2303 + fan controller chips. + + This driver can also be built as a module. If so, the module + will be called emc2305. + +endmenu diff --git a/drivers/mcst/Makefile b/drivers/mcst/Makefile new file mode 100644 index 000000000000..31aeb216b2d3 --- /dev/null +++ b/drivers/mcst/Makefile @@ -0,0 +1,40 @@ +# +# Makefile for the Linux kernel device drivers. +# + +obj-$(CONFIG_E90_FASTBOOT) += prom/ +obj-$(CONFIG_E90S) += prom/ +obj-$(CONFIG_E2K) += prom/ +obj-y += mvp/ +obj-y += mpv/ +obj-y += mgpm/ +obj-y += mmrm/ +obj-y += rdma/ +obj-y += rdma_sic/ +obj-y += rdma_m/ +obj-y += mokx/ +obj-y += mpk/ +obj-y += mokm/ +obj-$(CONFIG_DMP_ASSIST) += dmp_assist/ +obj-$(CONFIG_I2C_SPD) += i2c_spd/ +obj-$(CONFIG_MSPS) += msps/ +obj-$(CONFIG_ELDSP) += eldsp/ +obj-$(CONFIG_MCST_GPU_IMGTEC) += gpu-imgtec/ +obj-$(CONFIG_MCST_GPU_VIV) += gpu-viv/ +obj-$(CONFIG_LPTOUTS) += lptouts/ +obj-$(CONFIG_DRM_MGA2) += mga2/ +obj-$(CONFIG_MGA2_PWM) += mga2-pwm/ +obj-$(CONFIG_MGA2_GPIO) += mga2-gpio/ +obj-y += m2mlc/ +obj-y += apkpwr/ +obj-y += hantrodec/ +obj-y += BigEv2/ +obj-$(CONFIG_E2K) += pcs/ +obj-$(CONFIG_E2K) += pcsm/ +obj-$(CONFIG_IMGTEC) += video-imgtec/ +obj-$(CONFIG_DRM_SMI) += smi/ +obj-$(CONFIG_SMI_PWM) += smi-pwm/ +obj-$(CONFIG_SMI_GPIO) += smi-gpio/ +obj-$(CONFIG_MEM2ALLOC) += mem2alloc/ +obj-$(CONFIG_SENSORS_EMC2305) += emc/ + diff --git a/drivers/mcst/apkpwr/Makefile b/drivers/mcst/apkpwr/Makefile new file mode 100644 index 000000000000..1e6b463e9afd --- /dev/null +++ b/drivers/mcst/apkpwr/Makefile @@ -0,0 +1,6 @@ +# +## Makefile for the Linux kernel device drivers. +# + +obj-$(CONFIG_APKPWR) += apkpwr.o + diff --git a/drivers/mcst/apkpwr/apkpwr.c b/drivers/mcst/apkpwr/apkpwr.c new file mode 100644 index 000000000000..25606ebe393e --- /dev/null +++ b/drivers/mcst/apkpwr/apkpwr.c @@ -0,0 +1,1457 @@ +/* + * apkpwr.c - APKPWR driver + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DEBUG_FS +#include +#endif /* CONFIG_DEBUG_FS */ + + +#define DRIVER_NAME "apkpwr" +#define DRIVER_VERSION "1.0" + +#define APKPWR_POWER_SUPPLY /* Enale POWER_SUPPLY subsystem */ +#define APKPWR_GET_DATA_FROM_BATTERY + + +/*---------------------------------------------------------------------------*/ + +/* APKPWR registers */ +#define APKPWR_REG16_RO_ID 0x00 /* #00,01 */ +#define APKPWR_REG16_RW_FLAGS 0x02 /* #02,03 */ +#define APKPWR_REG16_RO_BATFLAGS 0x04 /* #04,05 */ +#define APKPWR_REG16_RO_ADC_VBAT_I 0x06 /* #06,07 */ +#define APKPWR_REG16_RO_ADC_THERM_I 0x08 /* #08,09 */ +#define APKPWR_REG16_RO_ADC_VBAT_E 0x0A /* #0A,0B */ +#define APKPWR_REG16_RO_ADC_THERM_E 0x0C /* #0C,0D */ +#define APKPWR_REG16_RO_ADC_V12 0x0E /* #0E,0F */ +#define APKPWR_REG16_RO_ADC_VSYS 0x10 /* #10,11 */ +#define APKPWR_REG16_RO_ADC_5VSB 0x12 /* #12,13 */ + /* #14,15 */ +#define APKPWR_REG16_RO_LEDS 0x16 /* #16,17 */ +/* registers of the accumulators */ +#define APKPWR_REG16_RO_UPDATECNT 0x18 /* #18,19 */ +/* registers of internal accumulator */ +#define APKPWR_REG16_BST_TEMP_I 0x1A /* Temperature - 0x08 */ +#define APKPWR_REG16_BST_VOLT_I 0x1C /* Voltage - 0x09 */ +#define APKPWR_REG16_BST_CURR_I 0x1E /* Current - 0x0A */ +#define APKPWR_REG16_BST_STOFCH_I 0x20 /* RelatStateOfCharge - 0x0D */ +#define APKPWR_REG16_BST_REMCAP_I 0x22 /* RemainingCapacity - 0x0F */ +#define APKPWR_REG16_BST_FCHCAP_I 0x24 /* FullChargeCapacity - 0x10 */ +#define APKPWR_REG16_BST_CHCURR_I 0x26 /* ChargingCurrent - 0x14 */ +#define APKPWR_REG16_BST_CHVOLT_I 0x28 /* ChargingVoltage - 0x15 */ +#define APKPWR_REG16_BST_STAT_I 0x2A /* BatteryStatus - 0x16 */ +/* registers of external accumulator */ +#define APKPWR_REG16_BST_TEMP_E 0x2C /* Temperature - 0x08 */ +#define APKPWR_REG16_BST_VOLT_E 0x2E /* Voltage - 0x09 */ +#define APKPWR_REG16_BST_CURR_E 0x30 /* Current - 0x0A */ +#define APKPWR_REG16_BST_STOFCH_E 0x32 /* RelatStateOfCharge - 0x0D */ +#define APKPWR_REG16_BST_REMCAP_E 0x34 /* RemainingCapacity - 0x0F */ +#define APKPWR_REG16_BST_FCHCAP_E 0x36 /* FullChargeCapacity - 0x10 */ +#define APKPWR_REG16_BST_CHCURR_E 0x38 /* ChargingCurrent - 0x14 */ +#define APKPWR_REG16_BST_CHVOLT_E 0x3A /* ChargingVoltage - 0x15 */ +#define APKPWR_REG16_BST_STAT_E 0x3C /* BatteryStatus - 0x16 */ + +#define APKPWR_REG16_END APKPWR_REG16_BST_STAT_E /* APKPWR_REG16_RO_LEDS */ +#define APKPWR_REG16_CNT ((APKPWR_REG16_END / 2) + 1) + + +/** APKPWR specific bitfields */ + +/* APKPWR_REG16_RO_ID */ +#define MANUFACTURER_ID 0x1502 + + +/* APKPWR_REG16_RW_FLAGS: FLAG - RO, CMD - RW */ +#define FLAG_ACP 0x0001 /* AC adapter ON */ +#define CMD_BATSEL 0x0002 /* 0 - select internal battery + 1 - select external battery */ +#define FLAG_FAULT_ICL 0x0004 /* Icharge decreased (Iin > 5A) */ + + +/* APKPWR_REG16_RO_BATFLAGS */ +/* - internal battery - */ +#define FLAG_ACC_STAT_I 0x0001 /* overtemp2, low bat voltage2 */ +#define FLAG_CHARGE_COMPLET_I 0x0002 /* Charge complete (Icharge < 2A-10%) */ +#define FLAG_FAULT_SHUTDN_I 0x0004 /* Charging circuit off (overtemp, + low bat voltage) */ +#define FLAG_FAULT_LOW_VOLT_I 0x0008 /* Low voltage on battery (<16,2V) */ +#define FLAG_FAULT_OVERTEMP_I 0x0010 /* Battery overtemp (>45`C or <0`C) */ +#define FLAG_FAULT_OVERTEMP2_I 0x0020 /* Battery overtemp (>55`C or <-30`C) */ +#define FLAG_FAULT_LOW_VOLT2_I 0x0040 /* Low voltage on battery (<19,3V) */ +#define FLAG_ACC_PRESENSE_I 0x0080 /* internal battery present */ +/* - external battery - */ +#define FLAG_ACC_STAT_E 0x0100 /* overtemp2, low bat voltage2 */ +#define FLAG_CHARGE_COMPLET_E 0x0200 /* Charge complete (Icharge < 2A-10%) */ +#define FLAG_FAULT_SHUTDN_E 0x0400 /* Charging circuit off (overtemp, + low bat voltage) */ +#define FLAG_FAULT_LOW_VOLT_E 0x0800 /* Low voltage on battery (<16,2V) */ +#define FLAG_FAULT_OVERTEMP_E 0x1000 /* Battery overtemp (>45`C or <0`C) */ +#define FLAG_FAULT_OVERTEMP2_E 0x2000 /* Battery overtemp (>55`C or <-30`C) */ +#define FLAG_FAULT_LOW_VOLT2_E 0x4000 /* Low voltage on battery (<19,3V) */ +#define FLAG_ACC_PRESENSE_E 0x8000 /* internal battery present */ + + +/* APKPWR_REG16_RO_ADC_VBAT_I - battery voltage (from ADC) */ +/* APKPWR_REG16_RO_ADC_VBAT_E - battery voltage (from ADC) */ +#define ADC_VBAT_MAX 0x03FF /* 0..1023, =1024 - invalid */ +#define VBATmV_FROM_ADC(val) ((((val & ADC_VBAT_MAX) * 1000000) / 346883) * 10) +#define CHARGE_MIN 16200 /* TODO: min ? */ +#define CHARGE_MAX 25200 /* TODO: max ? */ + + +/* APKPWR_REG16_RO_ADC_THERM_I - battery temp (from ADC) */ +/* APKPWR_REG16_RO_ADC_THERM_E - battery temp (from ADC) */ +#define ADC_THERM_MAX 0x03FF /* 0..1023, =1024 - invalid */ +/* TODO: */ +#define TBAToC_FROM_ADC(val) (77126 - (((val & ADC_THERM_MAX) \ + * 1000000) / 10107)) +/* +#define BAT_TEMP_ALERT_MAX 61000 +*/ + + +/* APKPWR_REG16_RO_ADC_V12 - output 12V (from ADC) */ +#define ADC_V12OUT_MAX 0x03FF /* 0..1023, =1024 - invalid */ +#define V12mV_FROM_ADC(val) ((((val & ADC_V12OUT_MAX) * 1000000) / 780487) * 10) + + +/* APKPWR_REG16_RO_ADC_VSYS - output (27V) system power (from ADC) */ +#define ADC_VSYS_MAX 0x03FF /* 0..1023, =1024 - invalid */ +#define VSYSmV_FROM_ADC(val) ((((val & ADC_VSYS_MAX) * 1000000) / 346883) * 10) + + +/* APKPWR_REG16_RO_ADC_5VSB - output 5V (from ADC) */ +#define ADC_V5SB_MAX 0x03FF /* 0..1023, =1024 - invalid */ +#define V5SBmV_FROM_ADC(val) \ + ((((val & ADC_V5SB_MAX) * 1000000) / 18778636) * 100) + + +/* APKPWR_REG16_RO_LEDS */ +#define FLAG_LED_OFF_I 0x0004 +#define FLAG_LED_FAULT_I 0x0008 /* red flash short */ +#define FLAG_LED_RED_I 0x0010 /* red light */ +#define FLAG_LED_RED_PULSE_I 0x0020 /* red flash */ +#define FLAG_LED_GREEN_PULSE_I 0x0040 /* green flash */ +#define FLAG_LED_GREEN_I 0x0080 /* green light */ +#define FLAG_LED_OFF_E 0x0400 +#define FLAG_LED_FAULT_E 0x0800 /* red flash short */ +#define FLAG_LED_RED_E 0x1000 /* red light */ +#define FLAG_LED_RED_PULSE_E 0x2000 /* red flash */ +#define FLAG_LED_GREEN_PULSE_E 0x4000 /* green flash */ +#define FLAG_LED_GREEN_E 0x8000 /* green light */ + + +/* registers of the accumulators index */ +#define BST_REG08_TEMP 0 /* Temperature */ +#define BST_REG09_VOLT 1 /* Voltage */ +#define BST_REG0A_CURR 2 /* Current */ +#define BST_REG0D_STOFCH 3 /* RelatStateOfCharge */ +#define BST_REG0F_REMCAP 4 /* RemainingCapacity */ +#define BST_REG10_FCHCAP 5 /* FullChargeCapacity */ +#define BST_REG14_CHCURR_E 6 /* ChargingCurrent */ +#define BST_REG15_CHVOLT_E 7 /* ChargingVoltage */ +#define BST_REG16_STAT_E 8 /* BatteryStatus */ +#define APKPWR_BSTREGS_CNT (BST_REG16_STAT_E + 1) + + +/* APKPWR_REG16_BST_TEMP_* */ +#define TCFROMmK(val) (((val * 10) - 27315) / 10) /* `C * 10 */ + + +/*---------------------------------------------------------------------------*/ + +#ifdef APKPWR_POWER_SUPPLY + +/* Battery data + * + * power supply monitor class: + * voltages in uV + * currents in uA + * charges in uAh + * energies in uWh + * time in seconds + * temperatures in tenths of degree Celsius + */ +struct apkpwr_bat_data { + struct power_supply *battery; + struct power_supply_desc battery_desc; + int status; /* power_supply status */ + int health; /* power_supply health */ + int present; /* power_supply present */ + int online; /* power_supply online */ + int level; /* power_supply capacity_level */ + int voltage; /* power_supply voltage_now */ + int capacity; /* power_supply capacity */ + int battemp; /* power_supply temp */ +}; + +#endif /* APKPWR_POWER_SUPPLY */ + +/* Client data from accumulators */ +struct apkpwr_bat_regs { + s32 reg[APKPWR_BSTREGS_CNT]; + s32 reg_st[APKPWR_BSTREGS_CNT]; +}; + +/* Client data (each client gets its own) */ +struct apkpwr_data { + struct device *hwmon_dev; + struct i2c_client *client; + struct mutex update_lock; + + unsigned long last_updated, rate; /* in jiffies */ + char valid; /* zero until following fields are valid */ + /* registers values */ + s32 flags, flags_st; + s32 batflags, batflags_st; + s32 vbat_i, vbat_i_st; /* hwmon in1 */ + s32 therm_i, therm_i_st; /* hwmon temp1 */ + s32 vbat_e, vbat_e_st; /* hwmon in2 */ + s32 therm_e, therm_e_st; /* hwmon temp2 */ + s32 v12, v12_st; /* hwmon in3 */ + s32 vsys, vsys_st; /* hwmon in4 */ + s32 v5sb, v5sb_st; /* hwmon in5 */ + s32 ledflags, ledflags_st; + s32 updatecnt, updatecnt_st; + struct apkpwr_bat_regs bat_regs[2]; + +#ifdef APKPWR_POWER_SUPPLY + /* Power supply */ + struct apkpwr_bat_data bat_data[2]; +#endif /* APKPWR_POWER_SUPPLY */ + +#ifdef CONFIG_DEBUG_FS + struct dentry *apkpwr_dbg; +#endif /* CONFIG_DEBUG_FS */ +}; + + +/*---------------------------------------------------------------------------*/ + +/* BUG on linux-l-rt-2.6.33.1/arch/l/kernel/i2c-spi/i2c.c + * line 328: value |= I2C_TRANS_SIZE(1); + */ +#ifndef USE_READ_WORD + +static s32 apkpwr_read_word(struct i2c_client *client, u8 command) +{ + s32 val; + u16 values; + + /* set address */ + val = i2c_smbus_write_byte(client, command); + if (val < 0) + return val; + + /* read 16 bit data */ + val = i2c_smbus_read_i2c_block_data(client, command, 2, (u8 *)&values); + if (val < 0) + return val; + else + return le16_to_cpu(values); +} /* apkpwr_read_word */ + +#else + +static s32 apkpwr_read_word(struct i2c_client *client, u8 command) +{ + return i2c_smbus_read_word_data(client, command); +} /* apkpwr_read_word */ + +#endif + +#if 0 +static s32 apkpwr_read_dump(struct i2c_client *client, u8 command, u8 length, + u8 *values) +{ + s32 val; + + /* set address */ + val = i2c_smbus_write_byte(client, command); + if (val < 0) + return val; + + /* read data */ + val = i2c_smbus_read_i2c_block_data(client, command, length, values); + if (val < 0) + return val; + else + return 0; +} /* apkpwr_read_dump */ +#endif /* 0 */ + +#if 0 +static s32 apkpwr_write_word(struct i2c_client *client, u8 command, u16 data) +{ + s32 val; + u16 value = cpu_to_le16(data); + +#ifdef DEBUG + dev_info(&client->adapter->dev, + "write_word: command=0x%02X data=0x%04X\n", + command, data); +#endif /* DEBUG */ + + /* write 16 bit data */ + val = i2c_smbus_write_block_data(client, command, 2, (u8 *)&value); + if (val < 0) + return val; + else + return 0; +} /* apkpwr_write_word */ +#endif /* 0 */ + +#if 0 +static s32 apkpwr_write_dword(struct i2c_client *client, u8 command, u32 data) +{ + s32 val; + u32 value = cpu_to_le32(data); + +#ifdef DEBUG + dev_info(&client->adapter->dev, + "write_dword: command=0x%02X data=0x%08X\n", + command, data); +#endif /* DEBUG */ + + /* write 32 bit data */ + val = i2c_smbus_write_block_data(client, command, 4, (u8 *)&value); + if (val < 0) + return val; + else + return 0; +} /* apkpwr_write_dword */ +#endif /* 0 */ + +#if 0 +static s32 apkpwr_write_dump(struct i2c_client *client, u8 command, u8 length, + u8 *values) +{ + s32 val; + +#ifdef DEBUG + dev_info(&client->adapter->dev, + "write_dump: command=0x%02X length=%u values: " \ + "0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", + command, length, + *(values + 0), *(values + 1), *(values + 2), *(values + 3), + *(values + 4), *(values + 5), *(values + 6), *(values + 7)); +#endif /* DEBUG */ + + /* write data */ + val = i2c_smbus_write_block_data(client, command, length, values); + if (val < 0) + return val; + else + return 0; +} /* apkpwr_write_dump */ +#endif /* 0 */ + + +#ifdef DEBUG + +static void dump_all_regs(struct i2c_client *client) +{ + int i; + s32 val; + struct i2c_adapter *adapter = client->adapter; + struct apkpwr_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + for (i = APKPWR_REG16_RO_ID; i <= APKPWR_REG16_END; i += 2) { + val = apkpwr_read_word(client, i); + if (val < 0) { + dev_err(&adapter->dev, + "apkpwr: reg16[%02d] - read_error (%d)\n", + i, val); + } else { + dev_dbg(&adapter->dev, + "apkpwr: reg16[%02d] = 0x%04x (%u)\n", + i, (u16)val, (u16)val); + } + } + mutex_unlock(&data->update_lock); +} /* dump_all_regs */ + +#endif /* DEBUG */ + + +#define APKPWR_UPDATE_FLAGS(REG, VAL, VAL_ST) \ +do { \ + data->VAL_ST = apkpwr_read_word(client, (REG)); \ + if (data->VAL_ST < 0) \ + data->VAL = 0; \ + else \ + data->VAL = data->VAL_ST; \ +} while (0) + +#define APKPWR_UPDATE_VAL(REG, VAL, VAL_ST, MAX) \ +do { \ + data->VAL_ST = apkpwr_read_word(client, (REG)); \ + if (data->VAL_ST < 0) \ + data->VAL = (MAX) + 1; \ + else \ + data->VAL = data->VAL_ST; \ +} while (0) + + +static void apkpwr_update_data(struct apkpwr_data *data) +{ + int i; + struct i2c_client *client = data->client; + + if (!client) + return; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + data->rate) || + !data->valid) { + /* read all regs */ + + APKPWR_UPDATE_FLAGS(APKPWR_REG16_RW_FLAGS, + flags, flags_st); + APKPWR_UPDATE_FLAGS(APKPWR_REG16_RO_BATFLAGS, + batflags, batflags_st); + APKPWR_UPDATE_FLAGS(APKPWR_REG16_RO_LEDS, + ledflags, ledflags_st); + APKPWR_UPDATE_FLAGS(APKPWR_REG16_RO_UPDATECNT, + updatecnt, updatecnt_st); + + APKPWR_UPDATE_VAL(APKPWR_REG16_RO_ADC_VBAT_I, + vbat_i, vbat_i_st, ADC_VBAT_MAX); + APKPWR_UPDATE_VAL(APKPWR_REG16_RO_ADC_THERM_I, + therm_i, therm_i_st, ADC_THERM_MAX); + APKPWR_UPDATE_VAL(APKPWR_REG16_RO_ADC_VBAT_E, + vbat_e, vbat_e_st, ADC_VBAT_MAX); + APKPWR_UPDATE_VAL(APKPWR_REG16_RO_ADC_THERM_E, + therm_e, therm_e_st, ADC_THERM_MAX); + APKPWR_UPDATE_VAL(APKPWR_REG16_RO_ADC_V12, + v12, v12_st, ADC_V12OUT_MAX); + APKPWR_UPDATE_VAL(APKPWR_REG16_RO_ADC_VSYS, + vsys, vsys_st, ADC_VSYS_MAX); + APKPWR_UPDATE_VAL(APKPWR_REG16_RO_ADC_5VSB, + v5sb, v5sb_st, ADC_V5SB_MAX); + + for (i = 0; i < APKPWR_BSTREGS_CNT; i++) { + APKPWR_UPDATE_FLAGS(APKPWR_REG16_BST_TEMP_I + (i << 1), + bat_regs[0].reg[i], + bat_regs[0].reg_st[i]); + } + for (i = 0; i < APKPWR_BSTREGS_CNT; i++) { + APKPWR_UPDATE_FLAGS(APKPWR_REG16_BST_TEMP_E + (i << 1), + bat_regs[1].reg[i], + bat_regs[1].reg_st[i]); + } + +#ifdef APKPWR_POWER_SUPPLY + + /* State Of Charge - TODO */ + if ((data->batflags_st < 0) || (data->flags_st < 0)) + data->bat_data[0].status = + POWER_SUPPLY_STATUS_UNKNOWN; + else if (data->batflags & FLAG_FAULT_SHUTDN_I) + data->bat_data[0].status = + POWER_SUPPLY_STATUS_NOT_CHARGING; + else if (data->batflags & FLAG_CHARGE_COMPLET_I) + data->bat_data[0].status = + POWER_SUPPLY_STATUS_FULL; + else if (data->flags & FLAG_ACP) + data->bat_data[0].status = + POWER_SUPPLY_STATUS_CHARGING; + else + data->bat_data[0].status = + POWER_SUPPLY_STATUS_DISCHARGING; + + /* HEALTH - TODO */ + if ((data->batflags_st < 0) || (data->flags_st < 0) || + (data->ledflags_st < 0)) + data->bat_data[0].health = + POWER_SUPPLY_HEALTH_UNKNOWN; + else if (data->batflags & FLAG_FAULT_OVERTEMP_I) + data->bat_data[0].health = + POWER_SUPPLY_HEALTH_OVERHEAT; + else if (data->flags & FLAG_FAULT_LOW_VOLT2_I) + data->bat_data[0].health = + POWER_SUPPLY_HEALTH_OVERVOLTAGE; + else if (data->batflags & FLAG_FAULT_LOW_VOLT_I) + data->bat_data[0].health = + POWER_SUPPLY_HEALTH_DEAD; + else if (data->ledflags & FLAG_LED_FAULT_I) + data->bat_data[0].health = + POWER_SUPPLY_HEALTH_DEAD; + else if (data->batflags & FLAG_FAULT_SHUTDN_I) + data->bat_data[0].health = + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; + else + data->bat_data[0].health = + POWER_SUPPLY_HEALTH_GOOD; + + /* present - Ok */ +#ifdef APKPWR_GET_DATA_FROM_BATTERY + if (data->updatecnt_st < 0) { + data->bat_data[0].present = 0; + data->bat_data[1].present = 0; + } else { + if (data->updatecnt & 0x00FF) + data->bat_data[0].present = 1; + else + data->bat_data[0].present = 0; + if (data->updatecnt & 0xFF00) + data->bat_data[1].present = 1; + else + data->bat_data[1].present = 0; + } +#else + if (data->batflags_st < 0) { + data->bat_data[0].present = 0; + } else { + if (data->batflags & FLAG_ACC_PRESENSE_I) { + data->bat_data[0].present = 1; + } else { + data->bat_data[0].present = 0; + } + } +#endif /* APKPWR_GET_DATA_FROM_BATTERY */ + + /* AC Connect - Ok */ + if (data->flags_st < 0) { + data->bat_data[0].online = 0; + data->bat_data[1].online = 0; + } else { + data->bat_data[0].online = + (data->flags & FLAG_ACP) ? 1 : 0; + data->bat_data[1].online = + (data->flags & FLAG_ACP) ? 1 : 0; + } + + /* CAPACITY_LEVEL - TODO */ + if ((data->batflags_st < 0) || (data->ledflags_st < 0)) + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + else if (data->batflags & FLAG_CHARGE_COMPLET_I) + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_FULL; + else if (data->ledflags & FLAG_LED_GREEN_I) + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_HIGH; + else if (data->ledflags & FLAG_LED_GREEN_PULSE_I) + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + else if (data->ledflags & FLAG_LED_RED_PULSE_I) + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_LOW; + else if (data->ledflags & FLAG_LED_RED_I) + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + else if (data->ledflags & FLAG_LED_FAULT_I) + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + else + data->bat_data[0].level = + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + + /* VOLTAGE_NOW - Ok */ +#ifdef APKPWR_GET_DATA_FROM_BATTERY + if (data->bat_regs[0].reg_st[BST_REG09_VOLT] < 0) { + data->bat_data[0].voltage = 0; + } else { + data->bat_data[0].voltage = \ + data->bat_regs[0].reg[BST_REG09_VOLT] * 1000; + } + if (data->bat_regs[1].reg_st[BST_REG09_VOLT] < 0) { + data->bat_data[1].voltage = 0; + } else { + data->bat_data[1].voltage = \ + data->bat_regs[1].reg[BST_REG09_VOLT] * 1000; + } +#else + if (data->vbat_i_st < 0) + data->bat_data[0].voltage = 0; + else + data->bat_data[0].voltage = + VBATmV_FROM_ADC(data->vbat_i) * 1000; +#endif /* APKPWR_GET_DATA_FROM_BATTERY */ + + /* CAPACITY in percents - Ok */ +#ifdef APKPWR_GET_DATA_FROM_BATTERY + if (data->bat_regs[0].reg_st[BST_REG0D_STOFCH] < 0) { + data->bat_data[0].capacity = 0; + } else { + data->bat_data[0].capacity = \ + data->bat_regs[0].reg[BST_REG0D_STOFCH]; + } + if (data->bat_regs[1].reg_st[BST_REG0D_STOFCH] < 0) { + data->bat_data[1].capacity = 0; + } else { + data->bat_data[1].capacity = \ + data->bat_regs[1].reg[BST_REG0D_STOFCH]; + } +#else + if (data->vbat_i_st < 0) { + data->bat_data[0].capacity = 0; + } else { + int val; + val = VBATmV_FROM_ADC(data->vbat_i); + val = 100 - ((100 * (CHARGE_MAX - val)) \ + / (CHARGE_MAX - CHARGE_MIN)); + if (val < 0) + val = 0; + if (val > 100) + val = 100; + data->bat_data[0].capacity = val; + } +#endif /* APKPWR_GET_DATA_FROM_BATTERY */ + + /* TEMP - Ok */ +#ifdef APKPWR_GET_DATA_FROM_BATTERY + if (data->bat_regs[0].reg_st[BST_REG08_TEMP] < 0) { + data->bat_data[0].battemp = 0; + } else { + data->bat_data[0].battemp = \ + TCFROMmK(data->bat_regs[0].reg[BST_REG08_TEMP]); + } + if (data->bat_regs[1].reg_st[BST_REG08_TEMP] < 0) { + data->bat_data[1].battemp = 0; + } else { + data->bat_data[1].battemp = \ + TCFROMmK(data->bat_regs[1].reg[BST_REG08_TEMP]); + } +#else + if (data->therm_i_st < 0) { + data->bat_data[0].battemp = 0; + } else { + int val; + val = TBAToC_FROM_ADC(data->therm_i); + data->bat_data[0].battemp = (val / 100); + } +#endif /* APKPWR_GET_DATA_FROM_BATTERY */ + +#endif /* APKPWR_POWER_SUPPLY */ + + + /* update status */ + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); +} /* apkpwr_update_data */ + + +/*---------------------------------------------------------------------------*/ +/* Debugfs part */ +/* Usage: mount -t debugfs none /sys/kernel/debug */ +/*---------------------------------------------------------------------------*/ + +#ifdef CONFIG_DEBUG_FS + +const char *apkpwr_dbg_regs_name[APKPWR_REG16_CNT] = { + "ID", + "RW_FLAGS", + "BATFLAGS", + "ADC_VBAT_I", + "ADC_THERM_I", + "ADC_VBAT_E", + "ADC_THERM_E", + "ADC_V12", + "ADC_VSYS", + "ADC_5VSB", + "", + "LEDS", + "UPDATECNT", + "BST_TEMP_I(08)", + "BST_VOLT_I(09)", + "BST_CURR_I(0A)", + "BST_STOFCH_I(0D)", + "BST_REMCAP_I(0F)", + "BST_FCHCAP_I(10)", + "BST_CHCURR_I(14)", + "BST_CHVOLT_I(15)", + "BST_STAT_I(16)", + "BST_TEMP_E(08)", + "BST_VOLT_E(09)", + "BST_CURR_E(0A)", + "BST_STOFCH_E(0D)", + "BST_REMCAP_E(0F)", + "BST_FCHCAP_E(10)", + "BST_CHCURR_E(14)", + "BST_CHVOLT_E(15)", + "BST_STAT_E(16)", +}; + +static char apkpwr_dbg_regs_buf[PAGE_SIZE] = ""; + +/** + * apkpwr_dbg_regs_read - read for regs datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t apkpwr_dbg_regs_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int i; + s32 val; + char *buf = apkpwr_dbg_regs_buf; + int offs = 0; + struct apkpwr_data *data = filp->private_data; + struct i2c_client *client = data->client; + struct i2c_adapter *adapter = client->adapter; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + mutex_lock(&data->update_lock); + for (i = 0; i < APKPWR_REG16_CNT; i++) { + val = apkpwr_read_word(client, i << 1); + if (val < 0) { + dev_err(&adapter->dev, + "apkpwr: reg16[%02d] - read_error (%d)\n", + i, val); + } else { + offs += scnprintf(buf + offs, PAGE_SIZE - 1 - offs, + "reg16[%02d] = 0x%04x (%5u) - %s\n", + i, (u16)val, (u16)val, + apkpwr_dbg_regs_name[i]); + } + } + mutex_unlock(&data->update_lock); + + if (count < strlen(buf)) { + return -ENOSPC; + } + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); +} /* apkpwr_dbg_regs_read */ + +static const struct file_operations apkpwr_dbg_regs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = apkpwr_dbg_regs_read, + /*.write = apkpwr_dbg_regs_write,*/ +}; + +/* /sys/kernel/debug/apkpwr */ +static struct dentry *apkpwr_dbg_root; + +/** + * apkpwr_dbgfs_init - setup the debugfs directory + **/ +void apkpwr_dbgfs_init(struct i2c_client *client) +{ + struct dentry *pfile; + const char *name = client->name; + struct apkpwr_data *data = i2c_get_clientdata(client); + + data->apkpwr_dbg = debugfs_create_dir(name, apkpwr_dbg_root); + if (data->apkpwr_dbg) { + /* regs */ + pfile = debugfs_create_file("regs", 0600, + data->apkpwr_dbg, data, + &apkpwr_dbg_regs_fops); + if (!pfile) { + dev_err(&client->dev, + "debugfs regs for %s failed\n", name); + } + } else { + dev_err(&client->dev, "debugfs entry for %s failed\n", name); + } +} /* apkpwr_dbgfs_init */ + +/** + * apkpwr_dbgfs_exit - clear out debugfs entries + **/ +void apkpwr_dbgfs_exit(struct i2c_client *client) +{ + struct apkpwr_data *data = i2c_get_clientdata(client); + + if (data->apkpwr_dbg) + debugfs_remove_recursive(data->apkpwr_dbg); + data->apkpwr_dbg = NULL; +} /* apkpwr_dbgfs_exit */ + +#endif /* CONFIG_DEBUG_FS */ + + +/*---------------------------------------------------------------------------*/ +/* HWMON */ +/*---------------------------------------------------------------------------*/ + +static struct apkpwr_data *apkpwr_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct apkpwr_data *data = i2c_get_clientdata(client); + + apkpwr_update_data(data); + + return data; +} /* apkpwr_update_device */ + + +#define APKPWR_FUNC_SHOWLABEL(NAME, LABEL) \ +static ssize_t NAME(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + return snprintf(buf, PAGE_SIZE - 1, "%s\n", LABEL); \ +} + +#define APKPWR_FUNC_SHOWRAW(NAME, VAR) \ +static ssize_t NAME(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct apkpwr_data *data = apkpwr_update_device(dev); \ + return snprintf(buf, PAGE_SIZE - 1, "0x%04X\n", data->VAR); \ +} + + +/* APKPWR_REG16_RW_FLAGS */ +static ssize_t show_flags(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->flags_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, + "%sACP\n" + "%sBATSEL\n" + "%sFAULT_ICL\n", + (data->flags & FLAG_ACP) ? "+" : "-", + (data->flags & CMD_BATSEL) ? "+" : "-", + (data->flags & FLAG_FAULT_ICL) ? "+" : "-"); + } + + return strlen(buf); +} /* show_flags */ + +/* APKPWR_REG16_RO_BATFLAGS */ +static ssize_t show_batflags(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->batflags_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, + "%sI_ACC_STAT\n" + "%sI_CHARGE_COMPLET\n" + "%sI_FAULT_SHUTDN\n" + "%sI_FAULT_LOW_VOLT\n" + "%sI_FAULT_OVERTEMP\n" + "%sI_FAULT_OVERTEMP2\n" + "%sI_FAULT_LOW_VOLT2\n" + "%sI_ACC_PRESENSE\n" + "%sE_ACC_STAT\n" + "%sE_CHARGE_COMPLET\n" + "%sE_FAULT_SHUTDN\n" + "%sE_FAULT_LOW_VOLT\n" + "%sE_FAULT_OVERTEMP\n" + "%sE_FAULT_OVERTEMP2\n" + "%sE_FAULT_LOW_VOLT2\n" + "%sE_ACC_PRESENSE\n", + (data->batflags & FLAG_ACC_STAT_I) ? "+" : "-", + (data->batflags & FLAG_CHARGE_COMPLET_I) ? "+" : "-", + (data->batflags & FLAG_FAULT_SHUTDN_I) ? "+" : "-", + (data->batflags & FLAG_FAULT_LOW_VOLT_I) ? "+" : "-", + (data->batflags & FLAG_FAULT_OVERTEMP_I) ? "+" : "-", + (data->batflags & FLAG_FAULT_OVERTEMP2_I) ? "+" : "-", + (data->batflags & FLAG_FAULT_LOW_VOLT2_I) ? "+" : "-", + (data->batflags & FLAG_ACC_PRESENSE_I) ? "+" : "-", + (data->batflags & FLAG_ACC_STAT_E) ? "+" : "-", + (data->batflags & FLAG_CHARGE_COMPLET_E) ? "+" : "-", + (data->batflags & FLAG_FAULT_SHUTDN_E) ? "+" : "-", + (data->batflags & FLAG_FAULT_LOW_VOLT_E) ? "+" : "-", + (data->batflags & FLAG_FAULT_OVERTEMP_E) ? "+" : "-", + (data->batflags & FLAG_FAULT_OVERTEMP2_E) ? "+" : "-", + (data->batflags & FLAG_FAULT_LOW_VOLT2_E) ? "+" : "-", + (data->batflags & FLAG_ACC_PRESENSE_E) ? "+" : "-"); + } + + return strlen(buf); +} /* show_batflags */ + +/* APKPWR_REG16_RO_LEDS */ +static ssize_t show_ledflags(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->ledflags_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, + "%sI_LED_FAULT\n" + "%sI_LED_RED\n" + "%sI_LED_RED_PULSE\n" + "%sI_LED_GREEN_PULSE\n" + "%sI_LED_GREEN\n" + "%sE_LED_FAULT\n" + "%sE_LED_RED\n" + "%sE_LED_RED_PULSE\n" + "%sE_LED_GREEN_PULSE\n" + "%sE_LED_GREEN\n", + (data->ledflags & FLAG_LED_FAULT_I) ? "+" : "-", + (data->ledflags & FLAG_LED_RED_I) ? "+" : "-", + (data->ledflags & FLAG_LED_RED_PULSE_I) ? "+" : "-", + (data->ledflags & FLAG_LED_GREEN_PULSE_I) ? "+" : "-", + (data->ledflags & FLAG_LED_GREEN_I) ? "+" : "-", + (data->ledflags & FLAG_LED_FAULT_E) ? "+" : "-", + (data->ledflags & FLAG_LED_RED_E) ? "+" : "-", + (data->ledflags & FLAG_LED_RED_PULSE_E) ? "+" : "-", + (data->ledflags & FLAG_LED_GREEN_PULSE_E) ? "+" : "-", + (data->ledflags & FLAG_LED_GREEN_E) ? "+" : "-"); + } + + return strlen(buf); +} /* show_ledflags */ + +/* APKPWR_REG16_RO_ADC_VBAT_I */ +static ssize_t show_vbat_i(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->vbat_i_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + if (data->vbat_i >= ADC_VBAT_MAX) { + snprintf(buf, PAGE_SIZE - 1, "Unknown\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, "%d\n", + VBATmV_FROM_ADC(data->vbat_i)); + } + } + + return strlen(buf); +} /* show_vbat_i */ + +/* APKPWR_REG16_RO_ADC_THERM_I */ +static ssize_t show_therm_i(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->therm_i_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + if (data->therm_i >= ADC_THERM_MAX) { + snprintf(buf, PAGE_SIZE - 1, "Unknown\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, "%d\n", + TBAToC_FROM_ADC(data->therm_i)); + } + } + + return strlen(buf); +} /* show_therm_i */ + +/* APKPWR_REG16_RO_ADC_VBAT_E */ +static ssize_t show_vbat_e(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->vbat_e_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + if (data->vbat_e >= ADC_VBAT_MAX) { + snprintf(buf, PAGE_SIZE - 1, "Unknown\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, "%d\n", + VBATmV_FROM_ADC(data->vbat_e)); + } + } + + return strlen(buf); +} /* show_vbat_e */ + +/* APKPWR_REG16_RO_ADC_THERM_E */ +static ssize_t show_therm_e(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->therm_e_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + if (data->therm_e >= ADC_THERM_MAX) { + snprintf(buf, PAGE_SIZE - 1, "Unknown\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, "%d\n", + TBAToC_FROM_ADC(data->therm_e)); + } + } + + return strlen(buf); +} /* show_therm_e */ + +/* APKPWR_REG16_RO_ADC_V12 */ +static ssize_t show_v12(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->v12_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + if (data->v12 >= ADC_V12OUT_MAX) { + snprintf(buf, PAGE_SIZE - 1, "Unknown\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, "%d\n", + V12mV_FROM_ADC(data->v12)); + } + } + + return strlen(buf); +} /* show_v12 */ + +/* APKPWR_REG16_RO_ADC_VSYS */ +static ssize_t show_vsys(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->vsys_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + if (data->vsys >= ADC_VSYS_MAX) { + snprintf(buf, PAGE_SIZE - 1, "Unknown\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, "%d\n", + VSYSmV_FROM_ADC(data->vsys)); + } + } + + return strlen(buf); +} /* show_vsys */ + +/* APKPWR_REG16_RO_ADC_5VSB */ +static ssize_t show_v5sb(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct apkpwr_data *data = apkpwr_update_device(dev); + + /* attr specific */ + if (data->v5sb_st < 0) { + snprintf(buf, PAGE_SIZE - 1, "error\n"); + } else { + if (data->v5sb >= ADC_V5SB_MAX) { + snprintf(buf, PAGE_SIZE - 1, "Unknown\n"); + } else { + snprintf(buf, PAGE_SIZE - 1, "%d\n", + V5SBmV_FROM_ADC(data->v5sb)); + } + } + + return strlen(buf); +} /* show_v5sb */ + + +APKPWR_FUNC_SHOWRAW(show_flags_raw, flags) +APKPWR_FUNC_SHOWRAW(show_batflags_raw, batflags) +APKPWR_FUNC_SHOWRAW(show_ledflags_raw, ledflags) +APKPWR_FUNC_SHOWRAW(show_vbat_i_raw, vbat_i) +APKPWR_FUNC_SHOWRAW(show_therm_i_raw, therm_i) +APKPWR_FUNC_SHOWRAW(show_vbat_e_raw, vbat_e) +APKPWR_FUNC_SHOWRAW(show_therm_e_raw, therm_e) +APKPWR_FUNC_SHOWRAW(show_v12_raw, v12) +APKPWR_FUNC_SHOWRAW(show_vsys_raw, vsys) +APKPWR_FUNC_SHOWRAW(show_v5sb_raw, v5sb) + +APKPWR_FUNC_SHOWLABEL(show_vbat_i_l, "Internal Battery Voltage") +APKPWR_FUNC_SHOWLABEL(show_therm_i_l, "Internal Battery Temperature") +APKPWR_FUNC_SHOWLABEL(show_vbat_e_l, "External Battery Voltage") +APKPWR_FUNC_SHOWLABEL(show_therm_e_l, "External Battery Temperature") +APKPWR_FUNC_SHOWLABEL(show_v12_l, "+12 Voltage") +APKPWR_FUNC_SHOWLABEL(show_vsys_l, "System Voltage") +APKPWR_FUNC_SHOWLABEL(show_v5sb_l, "+5 SB Voltage") + + +static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); +static DEVICE_ATTR(flags_raw, S_IRUGO, show_flags_raw, NULL); +static DEVICE_ATTR(batflags, S_IRUGO, show_batflags, NULL); +static DEVICE_ATTR(batflags_raw, S_IRUGO, show_batflags_raw, NULL); +static DEVICE_ATTR(ledflags, S_IRUGO, show_ledflags, NULL); +static DEVICE_ATTR(ledflags_raw, S_IRUGO, show_ledflags_raw, NULL); +static DEVICE_ATTR(in1_input, S_IRUGO, show_vbat_i, NULL); +static DEVICE_ATTR(in1_raw, S_IRUGO, show_vbat_i_raw, NULL); +static DEVICE_ATTR(in1_label, S_IRUGO, show_vbat_i_l, NULL); +static DEVICE_ATTR(temp1_input, S_IRUGO, show_therm_i, NULL); +static DEVICE_ATTR(temp1_raw, S_IRUGO, show_therm_i_raw, NULL); +static DEVICE_ATTR(temp1_label, S_IRUGO, show_therm_i_l, NULL); +static DEVICE_ATTR(in2_input, S_IRUGO, show_vbat_e, NULL); +static DEVICE_ATTR(in2_raw, S_IRUGO, show_vbat_e_raw, NULL); +static DEVICE_ATTR(in2_label, S_IRUGO, show_vbat_e_l, NULL); +static DEVICE_ATTR(temp2_input, S_IRUGO, show_therm_e, NULL); +static DEVICE_ATTR(temp2_raw, S_IRUGO, show_therm_e_raw, NULL); +static DEVICE_ATTR(temp2_label, S_IRUGO, show_therm_e_l, NULL); +static DEVICE_ATTR(in3_input, S_IRUGO, show_v12, NULL); +static DEVICE_ATTR(in3_raw, S_IRUGO, show_v12_raw, NULL); +static DEVICE_ATTR(in3_label, S_IRUGO, show_v12_l, NULL); +static DEVICE_ATTR(in4_input, S_IRUGO, show_vsys, NULL); +static DEVICE_ATTR(in4_raw, S_IRUGO, show_vsys_raw, NULL); +static DEVICE_ATTR(in4_label, S_IRUGO, show_vsys_l, NULL); +static DEVICE_ATTR(in5_input, S_IRUGO, show_v5sb, NULL); +static DEVICE_ATTR(in5_raw, S_IRUGO, show_v5sb_raw, NULL); +static DEVICE_ATTR(in5_label, S_IRUGO, show_v5sb_l, NULL); + +static struct attribute *apkpwr_attributes[] = { + &dev_attr_flags.attr, + &dev_attr_flags_raw.attr, + &dev_attr_batflags.attr, + &dev_attr_batflags_raw.attr, + &dev_attr_ledflags.attr, + &dev_attr_ledflags_raw.attr, + &dev_attr_in1_input.attr, + &dev_attr_in1_raw.attr, + &dev_attr_in1_label.attr, + &dev_attr_temp1_input.attr, + &dev_attr_temp1_raw.attr, + &dev_attr_temp1_label.attr, + &dev_attr_in2_input.attr, + &dev_attr_in2_raw.attr, + &dev_attr_in2_label.attr, + &dev_attr_temp2_input.attr, + &dev_attr_temp2_raw.attr, + &dev_attr_temp2_label.attr, + &dev_attr_in3_input.attr, + &dev_attr_in3_raw.attr, + &dev_attr_in3_label.attr, + &dev_attr_in4_input.attr, + &dev_attr_in4_raw.attr, + &dev_attr_in4_label.attr, + &dev_attr_in5_input.attr, + &dev_attr_in5_raw.attr, + &dev_attr_in5_label.attr, + NULL +}; + +static const struct attribute_group apkpwr_group = { + .attrs = apkpwr_attributes, +}; + + +/*---------------------------------------------------------------------------*/ +/* POWER_SUPPLY */ +/*---------------------------------------------------------------------------*/ + +#ifdef APKPWR_POWER_SUPPLY + +static int apkpwr_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + /*struct apkpwr_data *data = container_of(psy, struct apkpwr_data, + bat_data[0].battery);*/ + struct apkpwr_data *data = power_supply_get_drvdata(psy); + + apkpwr_update_data(data); + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = data->bat_data[0].status; + break; + case POWER_SUPPLY_PROP_HEALTH: + val->intval = data->bat_data[0].health; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = data->bat_data[0].present; + break; + case POWER_SUPPLY_PROP_ONLINE: + val->intval = data->bat_data[0].online; + break; + case POWER_SUPPLY_PROP_CAPACITY_LEVEL: + val->intval = data->bat_data[0].level; + break; + + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + val->intval = CHARGE_MAX * 1000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + val->intval = CHARGE_MIN * 1000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = data->bat_data[0].voltage; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = data->bat_data[0].capacity; + break; + case POWER_SUPPLY_PROP_TEMP: + val->intval = data->bat_data[0].battemp; + break; + + default: + return -EINVAL; + } + return 0; +} /* apkpwr_get_property */ + + +static enum power_supply_property apkpwr_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ + POWER_SUPPLY_PROP_TEMP, +}; + +#endif + + +/*---------------------------------------------------------------------------*/ +/* I2C driver part */ +/*---------------------------------------------------------------------------*/ + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int apkpwr_detect(struct i2c_client *new_client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = new_client->adapter; + int address = new_client->addr; + const char *name = DRIVER_NAME; + s32 manid; + +#ifdef DEBUG + dev_info(&adapter->dev, "apkpwr_probe\n"); +#endif /* DEBUG */ + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + manid = apkpwr_read_word(new_client, APKPWR_REG16_RO_ID); + if (manid != MANUFACTURER_ID) { + dev_err(&adapter->dev, + "apkpwr detection failed: " + "bad manufacturer id 0x%04x at 0x%02x\n", + (u16)manid, address); + return -ENODEV; + } + dev_info(&adapter->dev, "apkpwr detected at 0x%02x\n", address); + + /* Fill the i2c board info */ + strlcpy(info->type, name, I2C_NAME_SIZE); + return 0; +} /* apkpwr_detect */ + + +static int apkpwr_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct apkpwr_data *data; + struct power_supply_config psy_cfg = {}; + int err; + +#ifdef DEBUG + dev_info(&client->adapter->dev, "apkpwr_probe\n"); +#endif /* DEBUG */ + + data = kzalloc(sizeof(struct apkpwr_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->client = client; + mutex_init(&data->update_lock); + + data->rate = HZ; /* 1 sec default */ + data->valid = 0; + + /* Register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &apkpwr_group); + if (err) + goto exit_free; + + /* hwmon */ + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + goto exit_remove_files; + } + dev_info(&client->dev, "hwmon device registered\n"); + + +#ifdef APKPWR_POWER_SUPPLY + /* power_supply */ + psy_cfg.drv_data = data; + + data->bat_data[0].battery_desc.name = kasprintf(GFP_KERNEL, + "%s-%d", + client->name, + 0/*num*/); + if (!data->bat_data[0].battery_desc.name) { + err = -ENOMEM; + goto exit_remove_files; + } + data->bat_data[0].battery_desc.type = + POWER_SUPPLY_TYPE_BATTERY; + data->bat_data[0].battery_desc.get_property = + apkpwr_get_property; + data->bat_data[0].battery_desc.properties = + apkpwr_battery_props; + data->bat_data[0].battery_desc.num_properties = + ARRAY_SIZE(apkpwr_battery_props); + + data->bat_data[0].battery = power_supply_register(&client->dev, + &data->bat_data[0].battery_desc, + &psy_cfg); + if (IS_ERR(data->bat_data[0].battery)) { + dev_err(&client->dev, "failed: power supply register\n"); + goto hwmon_unregister; + } + dev_info(&client->dev, "power_supply device registered\n"); +#endif /* APKPWR_POWER_SUPPLY */ + +#ifdef CONFIG_DEBUG_FS + apkpwr_dbgfs_init(client); +#endif /* CONFIG_DEBUG_FS */ + +#ifdef DEBUG + dump_all_regs(client); +#endif /* DEBUG */ + + return 0; + +#ifdef APKPWR_POWER_SUPPLY +hwmon_unregister: +#endif /* APKPWR_POWER_SUPPLY */ + hwmon_device_unregister(data->hwmon_dev); +exit_remove_files: + sysfs_remove_group(&client->dev.kobj, &apkpwr_group); +exit_free: + kfree(data); +exit: + return err; +} /* apkpwr_probe */ + + +static int apkpwr_remove(struct i2c_client *client) +{ + struct apkpwr_data *data = i2c_get_clientdata(client); + +#ifdef CONFIG_DEBUG_FS + apkpwr_dbgfs_exit(client); +#endif /* CONFIG_DEBUG_FS */ + +#ifdef APKPWR_POWER_SUPPLY + power_supply_unregister(data->bat_data[0].battery); +#endif /* APKPWR_POWER_SUPPLY */ + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &apkpwr_group); + + i2c_set_clientdata(client, NULL); + mutex_destroy(&data->update_lock); + kfree(data); + + return 0; +} /* apkpwr_remove */ + + +/*---------------------------------------------------------------------------*/ +/* Module part */ +/*---------------------------------------------------------------------------*/ + +static const unsigned short normal_i2c[] = {0x5A, I2C_CLIENT_END}; + +#ifdef CONFIG_OF +static const struct of_device_id apkpwr_of_match[] = { + { .compatible = "mcst,apkpwr" }, + { } +}; + +MODULE_DEVICE_TABLE(of, apkpwr_of_match); +#endif /* CONFIG_OF */ + +/* Driver data (common to all clients) */ +static const struct i2c_device_id apkpwr_id[] = { + {DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, apkpwr_id); + +static struct i2c_driver apkpwr_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = DRIVER_NAME, +#ifdef CONFIG_OF + .of_match_table = of_match_ptr(apkpwr_of_match), +#endif /* CONFIG_OF */ + .owner = THIS_MODULE, + }, + .probe = apkpwr_probe, + .remove = apkpwr_remove, + .id_table = apkpwr_id, + .detect = apkpwr_detect, + .address_list = normal_i2c, +}; + +static int __init apkpwr_init(void) +{ + int err; +#ifdef DEBUG + printk(KERN_INFO "apkpwr_init\n"); +#endif /* DEBUG */ + + err = i2c_add_driver(&apkpwr_driver); +#ifdef CONFIG_DEBUG_FS + apkpwr_dbg_root = debugfs_create_dir(DRIVER_NAME, NULL); +#endif /* CONFIG_DEBUG_FS */ + return err; +} + +static void __exit apkpwr_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(apkpwr_dbg_root); +#endif /* CONFIG_DEBUG_FS */ + i2c_del_driver(&apkpwr_driver); +} + + +MODULE_AUTHOR("Andrey.V.Kalita@mcst.ru"); +MODULE_DESCRIPTION("apkpwr driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); + +module_init(apkpwr_init); +module_exit(apkpwr_exit); diff --git a/drivers/mcst/ddi/Makefile b/drivers/mcst/ddi/Makefile new file mode 100644 index 000000000000..4bf6b4f86feb --- /dev/null +++ b/drivers/mcst/ddi/Makefile @@ -0,0 +1,15 @@ +# Device drivers Interface + +# for sbus and pci-to-sbus module + +obj-$(CONFIG_DDI) += ddidrv.o + +ifdef CONFIG_DDI +ddidrv-objs := ddi.o \ + ddi_cv.o \ + ddi_arch.o +endif + + + + diff --git a/drivers/mcst/ddi/ddi.c b/drivers/mcst/ddi/ddi.c new file mode 100644 index 000000000000..6effb4f4fb80 --- /dev/null +++ b/drivers/mcst/ddi/ddi.c @@ -0,0 +1,494 @@ +/* + * DDI for Linux. (SVS) + * + * Supported by Alexey V. Sitnikov, alexmipt@mcst.ru, MCST + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_PCI2SBUS) +#include +#endif + + +EXPORT_SYMBOL(ddi_copyout); +EXPORT_SYMBOL(ddi_copyin); + +EXPORT_SYMBOL(ddi_init_soft); +EXPORT_SYMBOL(drv_getparm); + +EXPORT_SYMBOL(ddi_getl); +EXPORT_SYMBOL(ddi_putl); +EXPORT_SYMBOL(ddi_write_long); +EXPORT_SYMBOL(ddi_read_long); +EXPORT_SYMBOL(ddi_dma_mem_alloc); +EXPORT_SYMBOL(ddi_dma_mem_free); +EXPORT_SYMBOL(ddi_dma_mem_map); +EXPORT_SYMBOL(ddi_dma_mem_unmap); + +EXPORT_SYMBOL(drv_usectohz); +EXPORT_SYMBOL(ddi_gethrtime); + +EXPORT_SYMBOL(ddi_remap_page); +EXPORT_SYMBOL(ddi_dma_sync); + +EXPORT_SYMBOL(ddi_cv_wait); +EXPORT_SYMBOL(ddi_cv_timedwait); +EXPORT_SYMBOL(ddi_cv_spin_wait); +EXPORT_SYMBOL(ddi_cv_spin_timedwait); +EXPORT_SYMBOL(ddi_cv_broadcast); + +EXPORT_SYMBOL(ddi_poll_wait); + +EXPORT_SYMBOL(ddi_fls); +EXPORT_SYMBOL(ddi_malloc); + +#define __FFS 0 + +#define DBGDDI_MODE 0 +#define DBGDDIDETAIL_MODE 0 +#define dbgddi if (DBGDDI_MODE) printk +#define dbgddidetail if (DBGDDIDETAIL_MODE) printk + +int curr_drv_nr = 0; +dev_info_t ddi_dev_info[MCST_MAX_DRV]; + +/* + * All MCST driver's names (as "MCST," and dirs as "drv_name") + * It is mcst drv rooles (see ddi_create_minor()) + */ + +char *ddi_drivers[MCST_MAX_DRV]; +char *ddi_drv_dir[MCST_MAX_DRV]; + +unsigned short ddi_vendors[MCST_MAX_DRV]; +unsigned short ddi_devices[MCST_MAX_DRV]; + +int +ddi_max_drv_nr(void) +{ + int i = 0; + + for (;;) { + if (ddi_drivers[i] == NULL) return i - 1; + i++; + } + return 0; +} + +int +ddi_max_drv_dir_nr(void) +{ + int i = 0; + + for (;;) { + if (ddi_drv_dir[i] == NULL) return i - 1; + i++; + } + return 0; +} + +int +ddi_get_drv_nr(char *prom_name, int inst) +{ + int i = 0; + int j = 0; // added + + dbgddi("ddi_get_drv_nr: start\n"); + for (;;) { + if (ddi_drivers[i] == NULL) return 0; + if (strcmp(ddi_drivers[i], prom_name) == 0) { + if (j == inst) { + dbgddi("ddi_get_drv_nr: ret %d for %s\n", i, prom_name); + return i; + } + j++; + } + i++; + } + dbgddi("ddi_get_drv_nr: ret 0 for %s\n", prom_name); + return 0; +} + +/* Find the first bit set in I. */ +#if __FFS +int +__ffs (int i) +{ + static const unsigned char table[] = + { + 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 + }; + unsigned int a; + unsigned int x = i & -i; + + a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24); + + return table[x >> a] + a; +} +#endif /* __FFS */ + +int +ddi_fls(register long mask) +{ +#if __FFS + extern int ffs(long); +#endif /* __FFS */ + while (mask) { + register long nx; + + if ((nx = (mask & (mask - 1))) == 0) + break; + mask = nx; + } + return (__ffs(mask)); +} + +void * +ddi_malloc(size_t sz) +{ + void *p; + dbgddi("ddi_malloc: start\n"); + p = kmalloc(sz, GFP_KERNEL); + if (p) { + memset((char *)p, 0, sz); + } + return p; +} + +int +ddi_init_soft(dev_info_t *dip, size_t size) +{ + + dbgddi("ddi_init_soft: start\n"); + if (dip == NULL) { + printk("ddi_init_soft: dip == NULL\n"); + return -EFAULT; + } + dip->soft_state = ddi_malloc(size); + if (dip->soft_state == NULL) { + printk("ddi_init_soft: dip->soft_state == NULL\n"); + return (-EFAULT); + } + dip->soft_state_sz = size; + + dbgddi("ddi_init_soft: before memset operation\n"); + memset(dip->soft_state, 0, dip->soft_state_sz); + dbgddi("ddi_init_soft: finish\n"); + return 0; +} +int +ddi_dma_mem_map(struct device *dev, size_t len, dma_addr_t *dev_memory, size_t *real_size, + unsigned long dma_memory) +{ + size_t size; + dma_addr_t mem; + + dbgddi("*** ddi_dma_mem_map: start ***\n"); + size = *real_size; + mem = ddi_dev_map_mem(dev, size, dma_memory); + if (!mem) return -1; + *dev_memory = mem; + dbgddi("*** ddi_dma_mem_map: finish ***\n"); + return 0; +} + +int +ddi_dma_sync(struct device *dev, dma_addr_t addr, size_t size, int direction) +{ + return(_ddi_dma_sync(dev, addr, size, direction)); +} + +/* dev_memory - Memory from the device side */ +/* dma_memory - Memory from the processor side */ +int +ddi_dma_mem_alloc(struct device *dev, size_t len, dma_addr_t *dev_memory, size_t *real_size, + unsigned long *dma_memory) +{ + dma_addr_t mem; + + dbgddi("*** ddi_dma_mem_alloc: start, len = %ld ***\n", (u_long)len); + mem = ddi_dev_alloc_mem(dev, len, dma_memory); + if (!mem) return -1; + *dev_memory = mem; + dbgddi("*** ddi_dma_mem_alloc: finish ***\n"); + return 0; +} +/* dev_memory - Memory from the device side */ +/* dma_memory - Memory from the processor side */ + +void +ddi_dma_mem_free(struct device *dev, size_t size, dma_addr_t dev_memory, unsigned long dma_memory) +{ + dbgddi("ddi_dma_mem_free: start\n"); + ddi_dev_free_mem(dev, size, dma_memory, dev_memory); + dbgddi("ddi_dma_mem_free: finish\n"); +} + +/* dev_memory - Memory from the device side */ +/* dma_memory - Memory from the processor side */ + +void +ddi_dma_mem_unmap(struct device *dev, size_t size, dma_addr_t dev_memory, unsigned long dma_memory) +{ + int order; + caddr_t mem; + struct page *map, *mapend; + + dbgddi("ddi_dma_mem_unmap: start\n"); + mem = (caddr_t)dma_memory; + order = ddi_get_order(size); + mapend = virt_to_page(mem + (PAGE_SIZE << order) - 1); + for (map = virt_to_page(mem); map <= mapend; map++) { + ClearPageReserved(map); + } + ddi_dev_unmap_mem(dev, size, dma_memory, dev_memory); + dbgddi("ddi_dma_mem_unmap: finish\n"); +} + +int +ddi_copyin(void *from, void *to, size_t size) +{ + size_t rval; + + dbgddi("*** ddi_copyin: start, size = %lx ***\n", (u_long)size); + rval = copy_from_user(to, from, size); + dbgddi("ddi_copyin: rval = copy_from_user = %lx\n", (u_long)rval); + dbgddi("*** ddi_copyin: finish ***\n"); + return rval; +} +int +ddi_copyout(void *from, void *to, size_t size) +{ + size_t rval; + rval = copy_to_user(to, from, size); + return rval; +} + +int +ddi_remap_page(void *va, size_t sz, struct vm_area_struct *vma) +{ + unsigned long pha; + unsigned long vm_end; + unsigned long vm_start; + unsigned long vm_pgoff; + size_t size; + + dbgddi("**** ddi_remap_page: START ****\n"); + if (!sz) return -EINVAL; + pha = virt_to_phys(va); + size = (long )PAGE_ALIGN((pha & ~PAGE_MASK) + sz); +// if ((vma->vm_pgoff << PAGE_SHIFT) > size) return -ENXIO; + pha += (vma->vm_pgoff << PAGE_SHIFT); + vm_end = vma->vm_end; + vm_start = vma->vm_start; + vm_pgoff = vma->vm_pgoff; + + if ((vm_end - vm_start) < size) + size = vm_end - vm_start; + + vma->vm_flags |= (VM_READ | VM_WRITE); + +#if defined(CONFIG_E90) && !defined(STRICT_MM_TYPECHECKS) + dbgddidetail("ddi_remap_page: vm_start = 0x%lx, pha = 0x%lx, \n" + " size = %x, vma->vm_page_prot = %lx\n", + (unsigned long)vm_start, (unsigned long)pha, + (int)size, vma->vm_page_prot); +#else + dbgddidetail("ddi_remap_page: vm_start = 0x%lx, pha = 0x%lx, \n" + " size = %x, vma->vm_page_prot = %lx\n", + (unsigned long)vm_start, (unsigned long)pha, + (int)size, vma->vm_page_prot.pgprot); +#endif +#ifdef CONFIG_E2K + if (vma->vm_flags & VM_IO) + vma->vm_page_prot = + __pgprot(_PAGE_SET_MEM_TYPE( + pgprot_val(vma->vm_page_prot), + EXT_CONFIG_MT)); +#endif /* CONFIG_E2K */ + + if (remap_pfn_range(vma, vma->vm_start, (pha >> PAGE_SHIFT), size, + vma->vm_page_prot)){ + pr_info("ddi_remap_page: remap_pfn_range failed\n"); + return -EAGAIN; + } + + dbgddi("**** ddi_remap_page: FINISH ****\n"); + return 0; +} +int +drv_getparm(unsigned long parm, unsigned long *valuep) +{ + dbgddi("drv_getparm: start\n"); + switch (parm) { + case LBOLT: + *valuep = (unsigned long)jiffies; + break; + default: + printk("drv_get_parm: Unknown parm %ld\n", parm); + return (-1); + } + dbgddi("drv_getparm: finish\n"); + return 0; + } +void ddi_settime(struct timespec *tick, long mksec) +{ + time_t bt,ht; + + dbgddi("ddi_settime: start\n"); + bt = mksec % 1000000; + ht = mksec / 1000000; + tick->tv_sec = ht; + tick->tv_nsec = bt * 1000; +} +/* Convert mksec to HZ */ +clock_t +drv_usectohz(register clock_t mksec) +{ + clock_t clock; + struct timespec rqtp; + + dbgddi("drv_usectohz: start, mksec = 0x%lx\n", mksec); + rqtp.tv_nsec = ((mksec % 1000000L) * 1000L); + rqtp.tv_sec = mksec / 1000000L; + dbgddi("drv_usectohz: rqtp.tv_nsec = 0x%lx, rqtp.tv_sec = 0x%lx\n", + rqtp.tv_nsec, rqtp.tv_sec); + clock = timespec_to_jiffies(&rqtp); + return (clock); +} + +/* Returns nanoseconds */ +hrtime_t +ddi_gethrtime(void) +{ + struct timespec64 ts; + hrtime_t val; + ktime_get_real_ts64(&ts); + val = ts.tv_sec * 1000000000LL + ts.tv_nsec; + return (val); +} + +extern int _ddi_read_long(int dev_type, ulong_t *p); +int +ddi_getl(int t, ulong_t *p) +{ + return (_ddi_read_long(t,p)); +} + +extern void _ddi_write_long(int dev_type, ulong_t *p, ulong_t b); +void +ddi_putl(int t, ulong_t *p, ulong_t b) +{ + return (_ddi_write_long(t, p, b)); +} +int +ddi_read_long(int t, ulong_t *p, ulong_t b) +{ + return (_ddi_read_long(t, p)); +} +void +ddi_write_long(int t, ulong_t *p, ulong_t b) +{ + return (_ddi_write_long(t, p, b)); +} + +void +ddi_poll_wait(struct file * filp, + wait_queue_head_t *wait_address, + poll_table *p) +{ + poll_wait(filp, wait_address, p); +} + +extern int wake_up_state(struct task_struct *p, unsigned int state); +static void __raw_wake_up_common(raw_wait_queue_head_t *q) +{ + struct list_head *tmp, *next; + raw_wait_queue_t *curr; + + list_for_each_safe(tmp, next, &q->task_list) { + curr = list_entry(tmp, raw_wait_queue_t, task_list); + wake_up_state(curr->task, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE); + } +} + +/** + * __wake_up - wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + */ +void __raw_wake_up(raw_wait_queue_head_t *q) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&q->lock, flags); + __raw_wake_up_common(q); + raw_spin_unlock_irqrestore(&q->lock, flags); +} + +EXPORT_SYMBOL(__raw_wake_up); + +static inline void __raw_add_wait_queue(raw_wait_queue_head_t *head, raw_wait_queue_t *new) +{ + list_add(&new->task_list, &head->task_list); +} + +void raw_add_wait_queue(raw_wait_queue_head_t *q, raw_wait_queue_t *wait) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&q->lock, flags); + __raw_add_wait_queue(q, wait); + raw_spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(raw_add_wait_queue); + +static inline void __raw_remove_wait_queue(raw_wait_queue_head_t *head, raw_wait_queue_t *old) +{ + list_del(&old->task_list); +} +void raw_remove_wait_queue(raw_wait_queue_head_t *q, raw_wait_queue_t *wait) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&q->lock, flags); + __raw_remove_wait_queue(q, wait); + raw_spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(raw_remove_wait_queue); + + +static int __init pci_ddi_init(void) +{ + return 0; +} + +static void __exit pci_ddi_exit(void) +{ +} + +MODULE_DESCRIPTION( "Device driver interface for PCI" ); +MODULE_AUTHOR ( "Alexey Sitnikov" ); +MODULE_LICENSE ( "GPL" ); + +module_init( pci_ddi_init ); +module_exit( pci_ddi_exit ); + diff --git a/drivers/mcst/ddi/ddi_arch.c b/drivers/mcst/ddi/ddi_arch.c new file mode 100644 index 000000000000..b9b313ec5d09 --- /dev/null +++ b/drivers/mcst/ddi/ddi_arch.c @@ -0,0 +1,157 @@ +/* + * Arch depended part of ddi_support + * + * Supported by Alexey V. Sitnikov, alexmipt@mcst.ru, MCST + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DBG_MODE 0 +#define dbgddi if (DBG_MODE) printk + +typedef struct sbus_dev sbus_dev_t; +typedef struct pci_dev pci_dev_t; + +struct pci_dev_info pci_dev_info[MCST_MAX_DRV] = { + MCST_DEVICE_DRIVERS +}; + +unsigned int +_ddi_read_long(int t, ulong_t *p) +{ + dbgddi("ddi_read_long: start\n"); + if (t == DDI_SBUS_SPARC) { +#if defined(CONFIG_SBUS) + return (sbus_readl((const volatile void __iomem *)p)); +#elif IS_ENABLED(CONFIG_PCI2SBUS) + return (my_sbus_readl((long)p)); +#else + printk("_ddi_read_long: Unconfigured dev_type = %d\n", t); + return 0; +#endif /* CONFIG_SBUS */ + + } else if (t == DDI_PCI_SPARC) { +#ifdef CONFIG_PCI + return (readl((const volatile void __iomem *)p)); +#else + printk("_ddi_read_long: Unconfigured dev_type = %d\n", t); + return 0; +#endif /* CONFIG_PCI */ + } else { + printk("_ddi_read_long: Unknown dev_type = %d\n", t); + return 0; + } +} + +unsigned int +_ddi_write_long(int t, ulong_t *p, ulong_t b) +{ + int v; + dbgddi("ddi_write_long: start, addr = 0x%lx, val = 0x%lx\n", (unsigned long)p, b); + v = b & 0xFFFFFFFF; + if (t == DDI_SBUS_SPARC) { +#if defined(CONFIG_SBUS) + sbus_writel(b, (volatile void __iomem *)p); +#elif IS_ENABLED(CONFIG_PCI2SBUS) + my_sbus_writel(b, (long)p); +#else + printk("_ddi_write_long: Unconfigured dev_type = %d\n", t); + return 0; +#endif /* CONFIG_SBUS */ + } else if (t == DDI_PCI_SPARC) { +#ifdef CONFIG_PCI + writel(b, (volatile void __iomem *)p); +#else + printk("_ddi_write_long: Unconfigured dev_type = %d\n", t); + return 0; +#endif /* CONFIG_PCI */ + }else { + printk("_ddi_write_long: Unknown dev_type = %d\n", t); + return 0; + } + return 1; +} + +extern int curr_drv_nr; + +extern char *ddi_drivers[]; +extern char *ddi_drv_dir[]; +extern unsigned short ddi_vendors[]; +extern unsigned short ddi_devices[]; + +/* dma_memory - Memory from the processor side */ +dma_addr_t +ddi_dev_map_mem(struct device *dev, size_t size, unsigned long dma_memory) +{ +// dma_addr_t mem; + dbgddi("** ddi_dev_map_mem: start **\n"); +// mem = sbus_map_single(dev, (void *)dma_memory, size, SBUS_DMA_FROMDEVICE); +// mem = pci_map_single(dev, (void *)dma_memory, size, PCI_DMA_FROMDEVICE); + dbgddi("** ddi_dev_map_mem: finish **\n"); +// return mem; + return 0; +} + +int +_ddi_dma_sync(struct device *dev, dma_addr_t addr, size_t size, int direction) +{ + dbgddi("** ddi_dma_sync: start **\n"); + dma_sync_single_for_cpu(dev, addr, size, direction); + dbgddi("** ddi_dma_sync: finish **\n"); + return 0; +} + +dma_addr_t +ddi_dev_alloc_mem(struct device *dev, size_t size, unsigned long *va) +{ + dma_addr_t mem; + + dbgddi("** ddi_dev_alloc_mem: start **\n"); + *va = (unsigned long)dma_alloc_coherent(dev, size, &mem, GFP_DMA); + dbgddi("** ddi_dev_alloc_mem: finish **\n"); + return mem; +} + +void +ddi_dev_free_mem(struct device *dev, size_t size, unsigned long va, dma_addr_t dma_addr) +{ + dbgddi("** ddi_dev_free_mem: start **\n"); + dbgddi("** ddi_dev_free_mem: dma_addr = 0x%lx, va = 0x%lx **\n", + (unsigned long)dma_addr, (unsigned long)va); + + dma_free_coherent(dev, size, (void *)va, dma_addr); + dbgddi("** ddi_dev_free_mem: finish **\n"); + return; +} + +/* dev_memory - Memory from the device side */ +/* dma_memory - Memory from the processor side */ +void +ddi_dev_unmap_mem(struct device *dev, size_t size, unsigned long dma_memory, dma_addr_t dev_memory) +{ + dbgddi("** ddi_dev_unamp_mem: start **\n"); +// sbus_unmap_single(dev, dev_memory, size, SBUS_DMA_FROMDEVICE); +// pci_unmap_single(dev, dev_memory, size, PCI_DMA_FROMDEVICE); + dbgddi("** ddi_dev_unamp_mem: dev_memory = 0x%lx, dma_memory = 0x%lx **\n", + (unsigned long)dev_memory, (unsigned long)dma_memory); + + dbgddi("** ddi_dev_unmap_mem: finish **\n"); + return; +} + +int +ddi_get_order(size_t sz) +{ + dbgddi("ddi_get_order: start\n"); + return(get_order(sz)); +} + diff --git a/drivers/mcst/ddi/ddi_cv.c b/drivers/mcst/ddi/ddi_cv.c new file mode 100644 index 000000000000..b3c15e4cc510 --- /dev/null +++ b/drivers/mcst/ddi/ddi_cv.c @@ -0,0 +1,175 @@ +/* + * + * ddi_cv, Supported by Alexey V. Sitnikov, alexmipt@mcst.ru MCST + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG 0 +#define dbgprn if (DEBUG) printk + +/***********************************!!!!!!!! FIXME !!!!!!!!!*************************************/ +/* IN SOLARIS */ +/* +* cv_wait - returns void if reached condition +* +* cv_wait_sig - returns 0 if signaled or > 0 if reached condition +* +* cv_timedwait - returns -1 if timeouted or > 0 if reached condition +* +* cv_timedwait_sig - returns 0 if signaled, -1 if timeouted or > 0 if reached condition +*/ + +/* IN LINUX */ +/* +* cv_wait - returns -1 if signaled, 0 if reached condition +* +* cv_timedwait - returns -1 if timeouted or signaled (signal_pending checking needed), +* 0 if reached condition +*/ + +/* + * cond_wait and cond_broadcast, with using mutex + */ + +int +ddi_cv_wait(kcondvar_t *cvp, kmutex_t *semp) +{ + struct task_struct *tsk = current; + int rval = 0; + DECLARE_RAW_WAIT_QUEUE(wait); + + dbgprn("cond_wait: start\n"); + tsk->state = TASK_INTERRUPTIBLE; + raw_add_wait_queue(cvp, &wait); + if(!in_interrupt()) + mutex_exit(semp); + schedule(); + raw_remove_wait_queue(cvp, &wait); + tsk->state = TASK_RUNNING; + if (signal_pending(current)) { + rval = -1; + } + if(!in_interrupt()) + mutex_enter(semp); + return rval; + +} + +int +ddi_cv_broadcast(kcondvar_t *cvp) +{ + dbgprn("cond_broadcast: start\n"); + raw_wake_up(cvp); + return 0; +} + +int +ddi_cv_timedwait(kcondvar_t *cvp, kmutex_t *semp, long tim) +{ + long expire; + int rval = 0; + struct task_struct *tsk = current; + DECLARE_RAW_WAIT_QUEUE(wait); + + dbgprn("cond_timedwait: start\n"); + expire = tim - jiffies; + dbgprn("cond_timedwait: before schedule_timeout, expire = 0x%lx\n", expire); + tsk->state = TASK_INTERRUPTIBLE; + raw_add_wait_queue(cvp, &wait); + if(!in_interrupt()) + mutex_exit(semp); + if (expire > 0) + expire = schedule_timeout(expire); + else + expire = 0; + dbgprn("cond_timedwait: after schedule_timeout, expire = 0x%lx\n", expire); + raw_remove_wait_queue(cvp, &wait); + tsk->state = TASK_RUNNING; + if(!in_interrupt()) + mutex_enter(semp); + if (expire) { + if (signal_pending(current)) { + rval = -1; + } + } else { + rval = -1; + } + return rval; + +} + +/* + * cond_wait and cond_broadcast, with using spinlock, analog Solaris + */ + +int +ddi_cv_spin_wait(kcondvar_t *cvp, raw_spinlock_t *lock) +{ + struct task_struct *tsk = current; + int rval = 0; + int spin_locking_done = 0; + DECLARE_RAW_WAIT_QUEUE(wait); + + dbgprn("cond_wait: start\n"); + tsk->state = TASK_INTERRUPTIBLE; + raw_add_wait_queue(cvp, &wait); + spin_locking_done = raw_spin_is_locked(lock); + if (spin_locking_done) + spin_mutex_exit(lock); + dbgprn("cond_wait: in_interrupt = %ld\n", in_interrupt()); + schedule(); + raw_remove_wait_queue(cvp, &wait); + + tsk->state = TASK_RUNNING; + if (signal_pending(current)) { + rval = -1; + } + if (spin_locking_done) + spin_mutex_enter(lock); + return rval; + +} + +int +ddi_cv_spin_timedwait(kcondvar_t *cvp, raw_spinlock_t *lock, long tim) +{ + unsigned long expire; + int rval = 0; + int spin_locking_done = 0; + struct task_struct *tsk = current; + DECLARE_RAW_WAIT_QUEUE(wait); + + dbgprn("cond_timedwait: start\n"); + expire = tim - jiffies; + dbgprn("cond_timedwait: before schedule_timeout, expire = 0x%lx\n", expire); + tsk->state = TASK_INTERRUPTIBLE; + raw_add_wait_queue(cvp, &wait); + spin_locking_done = raw_spin_is_locked(lock); + if(spin_locking_done) + spin_mutex_exit(lock); + + dbgprn("cond_timedwait: in_interrupt = %ld\n", in_interrupt()); + expire = schedule_timeout(expire); + dbgprn("cond_timedwait: after schedule_timeout, expire = 0x%lx\n", expire); + raw_remove_wait_queue(cvp, &wait); + tsk->state = TASK_RUNNING; + if(spin_locking_done) + spin_mutex_enter(lock); + if (expire) { + if (signal_pending(current)) { + rval = -2; + } + } else { + rval = -1; + } + return rval; + +} diff --git a/drivers/mcst/dmp_assist/Makefile b/drivers/mcst/dmp_assist/Makefile new file mode 100644 index 000000000000..7c3ec5bed066 --- /dev/null +++ b/drivers/mcst/dmp_assist/Makefile @@ -0,0 +1,2 @@ + +obj-y += dmp_assist.o diff --git a/drivers/mcst/dmp_assist/dmp_assist.c b/drivers/mcst/dmp_assist/dmp_assist.c new file mode 100644 index 000000000000..60e8c79d46ef --- /dev/null +++ b/drivers/mcst/dmp_assist/dmp_assist.c @@ -0,0 +1,234 @@ +/* + * dmp_assist.c - MCST dump-analyzer assistant Driver + * Copyright (C) 2011 Mikhail Kharitonov + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dmp_assist.h" + +/* enable debug output? */ +#define DMP_DEBUG 0 + +#if DMP_DEBUG +#define DPRINT(fmt, args...) printk(fmt, ##args); +#else +#define DPRINT(fmt, args...) +#endif + +static char *dev_name = "dmp_assist"; +static int Major; +#if 0 +static int Minor; +static dev_t dev; +#endif + +static int dmp_open (struct inode *inode, struct file *file); +static int dmp_close(struct inode *inode, struct file *file); +static long dmp_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static int dmp_mmap(struct file *file, struct vm_area_struct * vma); +static long dmp_unlink(const char * pathname); + +static struct file_operations dmp_fops = { + .owner = THIS_MODULE, + .open = dmp_open, /* open */ + .release = dmp_close, /* close */ + .unlocked_ioctl = dmp_ioctl, /* ioctl */ + .mmap = dmp_mmap, /* mmap */ +}; + +static int dmp_open (struct inode *inode, struct file *file) +{ + DPRINT("dmp_assist driver open().\n"); + return 0; +} + +static int dmp_close (struct inode *inode, struct file *file) +{ + DPRINT("dmp_assist driver close().\n"); + return 0; +} + +#ifdef CONFIG_RECOVERY +extern e2k_addr_t cntp_kernel_base; +#endif +static long dmp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + DPRINT("dmp_ioctl() cmd:0x%x.\n", cmd); + switch (cmd) { + case IOCTL_DMP_ASSIST_kernel_base: + DPRINT("dmp_ioctl() IOCTL_DMP_ASSIST_kernel_base cmd*\n", cmd); +#ifdef CONFIG_RECOVERY + return cntp_kernel_base; +#else + printk(KERN_ERR "dmp_ioctl() Err: No CONFIG_RECOVERY\n"); + return -EINVAL; +#endif + default: + printk(KERN_INFO "dmp_ioctl(): unknown ioctl command:0x%lx," + "IOCTL_DMP_ASSIST_kernel_base:0x%lx\n", + cmd, IOCTL_DMP_ASSIST_kernel_base); + return -EINVAL; + } + return 0; +} + +static int dmp_mmap(struct file * file, struct vm_area_struct * vma) +{ + size_t size = vma->vm_end - vma->vm_start; + + DPRINT("dmp_mmap() vma:%p start:0x%lx end:0x%lx" + " size:0x%lx pgoff:0x%lx prot:0x%lx\n", + vma, vma->vm_start, vma->vm_end, size, + vma->vm_pgoff, vma->vm_page_prot); + + /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ + if (remap_pfn_range(vma, + vma->vm_start, + vma->vm_pgoff, + size, + vma->vm_page_prot)) { + return -EAGAIN; + } + + return 0; +} + + +static long dmp_mknod(const char *filename, int mode, unsigned dev) +{ + int error = 0; + struct dentry *file_dentry; + struct nameidata nd; + + DPRINT("---dmp_mknod(): start for %s\n", filename); + error = path_lookup(filename, LOOKUP_PARENT, &nd); + DPRINT("---dmp_mknod() ret path_lookup %d\n", error); + if (error) + goto out; + file_dentry = lookup_create(&nd, 0); /* Returns with + * nd->path.dentry->d_inode->i_mutex + * locked. + */ + error = PTR_ERR(file_dentry); + DPRINT("---dmp_mknod() PTR_ERR(file_dentry) 0x%lx\n", error); + if (!IS_POSIXACL(nd.path.dentry->d_inode)) + mode &= (~current_umask()); + if (IS_ERR(file_dentry)) { + goto out_unlock; + } + error = vfs_mknod(nd.path.dentry->d_inode, file_dentry, + mode, new_decode_dev(dev)); + DPRINT("---dmp_mknod() ret vfs_mknod() %d\n", error); + dput(file_dentry); +out_unlock: + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + path_put(&nd.path); +out: + if (error == -EEXIST) { + DPRINT("---dmp_mknod() error == EEXIST remove entry\n"); + dmp_unlink(dev_path); + return (dmp_mknod(filename, mode, dev)); + } + return error; +} + +long dmp_unlink(const char * pathname) +{ + int error = 0; + struct dentry *file_dentry; + struct nameidata nd; + struct inode *inode = NULL; + + DPRINT("---dmp_unlink() enter\n"); + + error = path_lookup(pathname, LOOKUP_PARENT, &nd); + DPRINT("---dmp_unlink() ret path_lookup %d\n", error); + if (error) + return error; + DPRINT("---dmp_unlink() after path_lookup parent_name: %s\n", + nd.path.dentry->d_name.name); + error = -EISDIR; + if (nd.last_type != LAST_NORM) + return error; + mutex_lock(&nd.path.dentry->d_inode->i_mutex); + file_dentry = lookup_one_len(nd.last.name, nd.path.dentry, + strlen(nd.last.name)); + error = PTR_ERR(file_dentry); + if (!IS_ERR(file_dentry)) { + if (nd.last.name[nd.last.len]) { + DPRINT("---dmp_unlink() nd.last.name[nd.last.len] %d\n", + nd.last.name[nd.last.len]); + error = !file_dentry->d_inode ? -ENOENT : + S_ISDIR(file_dentry->d_inode->i_mode) ? + -EISDIR : -ENOTDIR; + } else { + inode = file_dentry->d_inode; + if (inode) + atomic_inc(&inode->i_count); + DPRINT("---dmp_unlink() name:%s\n", + file_dentry->d_name.name); + error = vfs_unlink(nd.path.dentry->d_inode, file_dentry); + DPRINT("---dmp_unlink() ret vfs_unlink %d\n", error); + } + dput(file_dentry); + } else { + printk("---dmp_unlink() ret lookup_one_len %d\n", error); + } + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + if (inode) + iput(inode); /* truncate the inode here */ + path_put(&nd.path); + return error; +} + +static int __init dmp_init(void) +{ +#if 0 + int rval = 0; + mode_t mode = 0; +#endif + Major = register_chrdev(MAJOR_NUM, dev_name, &dmp_fops); + if (Major < 0) { + printk("dmp_init(): Register failed %d\n", Major); + return Major; + } +#if 0 + Minor = 0; + dev = (Major << 8) | Minor; + DPRINT("dmp_init(): dev:0x%lx\n", dev); + mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); + rval = dmp_mknod(dev_path, (mode | S_IFCHR) , dev); + if (rval) { + printk("dmp_init(): dmp_mknod() failed %d\n", rval); + return rval; + } +#endif + printk("dmp_assist driver installed. Major:0x%lx(%d)\n", MAJOR_NUM, + MAJOR_NUM); + return 0; +} + +static void __exit dmp_exit(void) +{ + dmp_unlink(dev_path); + unregister_chrdev(Major, dev_name); + DPRINT("dmp_assist driver exited.\n"); +} + +module_init(dmp_init); +module_exit(dmp_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mikhail Kharitonov "); +MODULE_DESCRIPTION("Dump-analyzer assistant Driver"); diff --git a/drivers/mcst/dmp_assist/dmp_assist.h b/drivers/mcst/dmp_assist/dmp_assist.h new file mode 100644 index 000000000000..7e5c5a921789 --- /dev/null +++ b/drivers/mcst/dmp_assist/dmp_assist.h @@ -0,0 +1,20 @@ +/* + * drivers/mcst/dmp_assist/dmp_assist.h + */ + +#ifndef DMP_ASSIST_H +#define DMP_ASSIST_H + +#include +#include +#include + +static char *dev_path = "/dev/dmp_assist"; + +#define MAJOR_NUM 242 +#define DMP_ASSIST_NUM 212 + +#define IOCTL_DMP_ASSIST_kernel_base _IO(DMP_ASSIST_NUM, 1) + +#endif /* DMP_ASSIST_H */ + diff --git a/drivers/mcst/eldsp/Makefile b/drivers/mcst/eldsp/Makefile new file mode 100644 index 000000000000..3aea3d85eb88 --- /dev/null +++ b/drivers/mcst/eldsp/Makefile @@ -0,0 +1,2 @@ + +obj-$(CONFIG_ELDSP) += eldsp.o diff --git a/drivers/mcst/eldsp/eldsp.c b/drivers/mcst/eldsp/eldsp.c new file mode 100644 index 000000000000..14a614b02b8f --- /dev/null +++ b/drivers/mcst/eldsp/eldsp.c @@ -0,0 +1,3110 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_ELDSP_MODULE + +#include +#include +#include +#include +#include +#include + +#endif + + +#define __DMA_ON__ +#define __DMA_INTERRUPTS_ON__ +#define __CATCH_INTERRUPT_ON__ +#define __ALL_ONLINE_NODE__ +#define __USE_PROC__ +#define __DSP_RUN_HACK_FOR_MEMORY__ +#define __CHAIN_MODE_ON__off /* temporary off */ + +#ifndef __DMA_ON__ +#warning DMA are - OFF ! ! ! +#endif + +#ifndef __DMA_INTERRUPTS_ON__ +#warning DMA interrupts are - OFF ! ! ! +#endif + +#ifndef __CATCH_INTERRUPT_ON__ +#warning catch interrupt are - OFF ! ! ! +#endif + +#ifndef __ALL_ONLINE_NODE__ +#warning All nodes (expect zero) are - OFF ! ! ! +#endif + +#ifndef __USE_PROC__ +#warning Use proc fs are - OFF ! ! ! +#endif + + +/* /proc/sys/debug/eldsp_debug trigger */ +int dsp_debug = 0; + +#include "eldsp.h" + +#define MCST_INCLUDE_IOCTL +#ifdef MCST_INCLUDE_IOCTL +#include +#endif + +#define DSP_VERSION "0.10.1.3" + +/* for /dev, /sys/class, /proc nodes and files */ +#define DSP_NAME "eldsp" + +static const char dsp_dev_name[] = "MCST,eldsp"; +static int dsp_numbers_devs = 0; +int node_numbers; +int dsp_minors[MAX_DSP * MAX_NODE] = {0}; /* all devices */ +int on_nodes[MAX_NODE] = {0}; /* all nodes */ +dsp_dev_t *dsp_devices[MAX_DSP * MAX_NODE]; /* all devices */ +dsp_node_t dsp_node[MAX_NODE]; /* dma_channels + dsp_devices */ +int major = ELDSP_MAJOR; +static struct class *dsp_class; + +SPINLOCK_T interrupt_lock; +SPINLOCK_T global_lock; +SPINLOCK_T dma_lock; + + +void dsp_dma_processing(dsp_dev_t *dev); +void dsp_interrupt_processing(dsp_dev_t *dev); +#ifdef __CHAIN_MODE_ON__ +void delete_dma_chain(dsp_dev_t *dev); +int add_link_to_dma_chain(dsp_dev_t *dev, int pages); +#endif /*__CHAIN_MODE_ON__*/ + +/********************/ + +#include +typedef long hrtime_t; + +hrtime_t gethrtime(void) { + struct timespec64 ts; + hrtime_t retval; + + ktime_get_real_ts64(&ts); + + retval = (hrtime_t)(ts.tv_sec * 1000000000L + ts.tv_nsec); + + return retval; +} +/******************/ + +#if defined(CONFIG_SYSCTL) +#include + +static ctl_table dsp_table[] = { + { + .procname = "eldsp_debug", + .data = &dsp_debug, + .maxlen = sizeof(dsp_debug), + .mode = 0666, + .proc_handler = proc_dointvec, + }, + { } +}; + +static ctl_table dsp_root_table[] = { + { + .procname = "debug", + .maxlen = 0, + .mode = 0555, + .child = dsp_table, + }, + { } +}; + +static struct ctl_table_header *dsp_sysctl_header = NULL; + +static void __init dsp_sysctl_register(void) +{ + dsp_sysctl_header = register_sysctl_table(dsp_root_table); +} + +static void dsp_sysctl_unregister(void) +{ + unregister_sysctl_table(dsp_sysctl_header); +} + +#else /* CONFIG_SYSCTL */ + +static void __init dsp_sysctl_register(void) +{ +} + +static void dsp_sysctl_unregister(void) +{ +} +#endif + +#ifdef __USE_PROC__ +#ifdef CONFIG_PROC_FS + +extern struct proc_dir_entry *ldsp_entry; +extern const struct file_operations *ldsp_proc_fops_pointer; +static struct proc_dir_entry *dsp_proc_entry; + +static int eldsp_seq_show(struct seq_file *s, void *v) +{ + int i = *((int *)v); + int k; + if (dsp_node[i].present) { + seq_printf(s, " node: %d\n", i); + for (k = 0; k < MAX_DSP; k++) { + unsigned long offset_b; + seq_printf(s, " number: %d, minor: %d, state: on\n", + dsp_node[i].dsp[k]->number, + dsp_node[i].dsp[k]->minor); + seq_printf(s, "\tmknod /dev/%s%d c %d %d\n", + DSP_NAME, + dsp_node[i].dsp[k]->id, + major, + dsp_node[i].dsp[k]->minor); + seq_printf(s, "\topened: %s\n", + dsp_node[i].dsp[k]->opened ? + "yes" : + "no"); + offset_b = (unsigned long)(nNODE_PHYS_ADR(i) + + (0x400000 * k)); +#if DEBUG_MODE + seq_printf(s, + "\t xyram 0x%lx <- 0x%lx" + "\n\t pram 0x%lx <- 0x%lx" + "\n\t regs 0x%lx <- 0x%lx\n", + BASE[i].xyram[k], offset_b, + BASE[i].pram[k], offset_b + 0x40000, + BASE[i].regs[k], offset_b + 0x80000 + ); +#endif + } + } else { + /* node online, but DSP are off */ + if (dsp_node[i].online) + seq_printf(s, " state: off\n"); + } + + return 0; +} + +static void *eldsp_seq_start(struct seq_file *s, loff_t *pos) +{ + seq_printf(s, "- ELDSP device info - number: %d, online: %d.\n", + num_online_nodes() * 4, + dsp_numbers_devs); + seq_printf(s, " Module loaded: version - %s.\n", DSP_VERSION); + seq_printf(s, " Major number: %d\n", major); + seq_printf(s, " Status for each DSP on each node:\n"); + + if (dsp_debug) + seq_printf(s, " Debug print mode: ON.\n"); + +#if ERROR_MODE == 0 + seq_printf(s, " Module was compiled without ERROR print !\n"); +#endif +#if DEBUG_MODE + seq_printf(s, " Module was compiled with DEBUG print.\n"); +#endif +#if DEBUG_DETAIL_MODE + seq_printf(s, " Module was compiled with DETAIL DEBUG print.\n"); +#endif +#ifndef __ALL_ONLINE_NODE__ + seq_printf(s, "All nodes (except zero-node) " + "are off manually in driver.\n"); +#endif + + if (*pos == MAX_NODE) { + return 0; + } + + return (void *)pos; +} + +static void *eldsp_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + /* not sure that increase correct ! */ + *pos = (int)*pos + 1; + if (*pos == MAX_NODE) + return 0; + + return (void *)pos; +} + +static void eldsp_seq_stop(struct seq_file *s, void *v) +{ +} + +static const struct seq_operations eldsp_seq_ops = { + .start = eldsp_seq_start, + .next = eldsp_seq_next, + .stop = eldsp_seq_stop, + .show = eldsp_seq_show +}; + +static int eldsp_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &eldsp_seq_ops); +} + +static const struct file_operations *save_eldsp_proc_ops = NULL; +static const struct file_operations eldsp_proc_ops = { + .owner = THIS_MODULE, + .open = eldsp_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +#endif /* CONFIG_PROC_FS */ + +#endif /* __USE_PROC__ */ + +#ifndef VM_RESERVED +# define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP) +#endif + +static int dsp_mmap(struct file *filp, struct vm_area_struct *vma) +{ + + dsp_dev_t *dev; + unsigned long mem_start; + unsigned long off; + + dev = (dsp_dev_t *)filp->private_data; + + /* WARNING: not check memory area size and limit !!! */ + + off = vma->vm_pgoff << PAGE_SHIFT; + + vma->vm_flags |= (VM_READ | VM_WRITE | VM_RESERVED); + if (off >= ADD_DMA_CHAIN) { + off = ADD_DMA_CHAIN; + } else if (off == DSP_DMA_MMAP) { + ; + } else { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + } + + switch (off) { + /* remap DMA buffer to user */ + case DSP_DMA_MMAP: + { + vma->vm_pgoff = 0; + vma->vm_flags |= VM_IO | VM_RESERVED; + + DETAIL_PRINT("mmap[%d:%d:%d]:\tDSP_DMA_MMAP: vm_off 0x%lx, " + "off 0x%lx 0x%lx, size 0x%x, dev %d\n", + dev->node, dev->number, dev->minor, + vma->vm_pgoff << PAGE_SHIFT, + dev->dma.phys_mem >> PAGE_SHIFT, + dev->dma.page_adr >> PAGE_SHIFT, + DMA_EXCHANGE_SIZE, + dev->number); + + /* + if ((vma->vm_end - vma->vm_start) > DMA_EXCHANGE_SIZE) + { + ERROR_PRINT("mmap:\terror mmap DMA memory to user, " + "size is too long\n"); + ERROR_PRINT("mmap:\t size can't be more than: %d. " + "Current size: %ld\n", + DMA_EXCHANGE_SIZE, (vma->vm_end - vma->vm_start)); + return -EFAULT; + } + */ + + if (remap_pfn_range(vma, + vma->vm_start, + dev->dma.phys_mem >> PAGE_SHIFT, + DMA_EXCHANGE_SIZE, + vma->vm_page_prot) < 0) + { + ERROR_PRINT("mmap:\terror mmap DMA memory to user\n"); + return -EAGAIN; + } + } + break; + + /* mmap DSP local registers area */ + case REG_LOCAL_MMAP: + { + vma->vm_pgoff = 0; + + mem_start = PHYS_NODE(REGS_OFFSET + 0x0); + + DETAIL_PRINT("mmap[%d:%d:%d]:\tREG_LOCAL_MMAP: " + "vm_off 0x%lx, off 0x%lx, size 0x%x, dev %d\n", + dev->node, dev->number, dev->minor, + vma->vm_pgoff << PAGE_SHIFT, + mem_start, + REG_LOCAL_SIZE, + dev->number); + + if (io_remap_pfn_range(vma, + vma->vm_start, /* virtual for user */ + mem_start >> PAGE_SHIFT, + REG_LOCAL_SIZE, + vma->vm_page_prot) < 0) + { + ERROR_PRINT("mmap:\tmmap local registers to user\n"); + return -EAGAIN; + } + } + break; + + /* mmap DSP global registers area */ + case REG_GLOBAL_MMAP: + { + vma->vm_pgoff = 0; + + mem_start = nPHYS_NODE(dev->node, 0, REGS_OFFSET + 0x1000); + + DETAIL_PRINT("mmap[%d:%d:%d]:\tREG_GLOBAL_MMAP: " + "vm_off 0x%lx, off 0x%lx, size 0x%x, dev %d\n", + dev->node, dev->number, dev->minor, + vma->vm_pgoff << PAGE_SHIFT, + mem_start, + REG_GLOBAL_SIZE, + dev->number); + + if (io_remap_pfn_range(vma, + vma->vm_start, /*virtual for user*/ + mem_start >> PAGE_SHIFT, + REG_GLOBAL_SIZE, + vma->vm_page_prot) < 0) + { + ERROR_PRINT("mmap:\tmmap global registers to user\n"); + return -EAGAIN; + } + } + break; + + /* allocate memory for DMA and mmap this memory to user */ + case ADD_DMA_CHAIN: + { +#ifdef __CHAIN_MODE_ON__ + int res = 0; + int found = 0; + vma->vm_pgoff = 0; + vma->vm_flags |= VM_IO | VM_RESERVED; + + res = add_link_to_dma_chain(dev, dev->link_size); + if (res > 0) { + struct chain_list *tmp; + list_for_each_entry(tmp, &dev->dma_chain, list) { + if (tmp->link.lnumber == res) { + DETAIL_PRINT("found chain: %d\n", + tmp->link.lnumber); + found = 1; + break; + } + } + + if (found) { + DETAIL_PRINT("mmap[%d:%d:%d]:\t" + "ADD_DMA_CHAIN: vm_off 0x%lx, " + "[vm_start: 0x%lx]" + "off 0x%lx 0x%lx, sz %d, dev %d\n", + dev->node, dev->number, dev->minor, + vma->vm_start, + vma->vm_pgoff << PAGE_SHIFT, + tmp->link.phys_mem >> PAGE_SHIFT, + tmp->link.page_adr >> PAGE_SHIFT, + tmp->link.size, + dev->number); + + if (remap_pfn_range(vma, + vma->vm_start, + tmp->link.phys_mem >> PAGE_SHIFT, + tmp->link.size, + vma->vm_page_prot) < 0) + { + ERROR_PRINT("mmap:\terror mmap DMA memory to user\n"); + return -EAGAIN; + } + } + } else { + DBG_PRINT("add link FAILED !\n"); + return -ENOMEM; + } +#else + WARNING_PRINT("Chain mode are - OFF !\n"); + return -ENOMEM; +#endif + } + break; + + /* mmap DSP code and data area */ + default: + { + mem_start = PHYS_NODE(off); + + DETAIL_PRINT("mmap[%d:%d:%d]:\tRAM: " + "vm_off 0x%lx, off 0x%lx, size 0x%lx, dev %d\n", + dev->node, dev->number, dev->minor, + vma->vm_pgoff << PAGE_SHIFT, + mem_start, + (vma->vm_end - vma->vm_start), + dev->number); + + if (io_remap_pfn_range(vma, + vma->vm_start, /*virtual for user*/ + mem_start >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot) < 0) + { + ERROR_PRINT("mmap:\terror mmap memory to user\n"); + return -EAGAIN; + } + } + break; + } + + return 0; +} + + +static unsigned int dsp_poll(struct file *filp, struct poll_table_struct *wait) +{ + dsp_dev_t *dev; + /* poll disable - wake_up() not work on random stack */ + /* unsigned int mask = 0; */ + + dev = (dsp_dev_t *)filp->private_data; + WARNING_PRINT("poll[%d]: Poll not implemented.\n", dev->minor); + return -EINVAL; + + /* poll disable - wake_up() not work on random stack */ + /* + DETAIL_PRINT("poll:\tdsp number: %d:%d:%d\n", + dev->node, dev->number, dev->minor); + + poll_wait(filp, &dev->wait_queue, wait); + + if (dev->mem_error) + mask |= POLLERR; + else if (dev->run == 0) + mask |= POLLIN; + + return mask; + */ +} + + +static int dsp_open(struct inode *inode, struct file *filp) +{ + dsp_dev_t *dev; +#ifdef __THIS_SECTION_ARE_OFF__ + unsigned long flags; + unsigned int dspmask = 0, cpumask = 0, dmamask = 0; + unsigned int dspmasko = 0, cpumasko = 0;//dbg +#endif + int minor = MINOR(inode->i_rdev), i, not_found = 1; + + for (i = 0; i < dsp_numbers_devs; i++) { + if (minor == dsp_minors[i]) { + not_found = 0; + break; + } + } + + if (not_found) { + ERROR_PRINT("open:\tdevice with minor number: " + "%d - not exist.\n", minor); + return -ENODEV; + } + + dev = (dsp_dev_t *)filp->private_data; + if (!dev) { + dev = dsp_devices[minor]; + if (dev->opened) { + WARNING_PRINT("open:\tre-open device: %d:%d:%d\n", + dev->node, dev->number, dev->minor); + return -EBUSY; + } else { + dev->opened = 1; + } + dev->reason = 0; + dev->dcsr_i = 0; + dev->sp_i = 0; + + +#ifdef __THIS_SECTION_ARE_OFF__ +#ifdef __CATCH_INTERRUPT_ON__ + DETAIL_PRINT("open:\ton interupts\n"); + SLOCK_IRQSAVE(&global_lock, flags); + dspmasko = dspmask = (GET_CLUSTER_REG(MASKR_DSP)); + dspmask |= (0x3f << (8 * dev->number)); + dspmask |= 0x40; /* INT_MEM_ERR - on */ + SET_CLUSTER_REG(MASKR_DSP, dspmask); + + /* on interrupts only at cpu0 */ + cpumasko = cpumask = GET_APIC_REG(IC_MR0); +# ifdef __DMA_INTERRUPTS_ON__ + dmamask = 0xff; +# else + dmamask = 0x0; +# endif /*__DMA_INTERRUPTS_ON__*/ + cpumask |= dmamask; + cpumask |= mask_intr[dev->number]; + SET_APIC_REG(IC_MR0, cpumask); + SUNLOCK_IRQREST(&global_lock, flags); + DETAIL_PRINT("old intr mask: dsp: 0x%08x cpu: 0x%04x" + "\tnew:dsp: 0x%08x cpu: 0x%04x\n", + dspmasko, cpumasko, dspmask, cpumask); +#endif /*__CATCH_INTERRUPT_ON__*/ +#endif /*__THIS_SECTION_ARE_OFF__*/ + filp->private_data = dev; + } + + DBG_PRINT("open: done\n"); + + return 0; +} + + +static int dsp_release(struct inode *inode, struct file *filp) +{ + dsp_dev_t *dev; + int minor = MINOR(inode->i_rdev); + unsigned int i, not_found = 1; +#ifdef __THIS_SECTION_ARE_OFF__ + unsigned long flags; + unsigned int dspmask = 0, cpumask = 0 + unsigned int dspmasko = 0, cpumasko = 0;//dbg +#endif /*__THIS_SECTION_ARE_OFF__*/ + + for (i = 0; i < dsp_numbers_devs; i++) { + if (minor == dsp_minors[i]) { + not_found = 0; + break; + } + } + + if (not_found) { + ERROR_PRINT("open:\tminor numbers more than exists\n"); + return -ENODEV; + } + + dev = (dsp_dev_t *)filp->private_data; + +#ifdef __THIS_SECTION_ARE_OFF__ +#ifdef __CATCH_INTERRUPT_ON__ + DETAIL_PRINT("open:\toff interupts\n"); + SLOCK_IRQSAVE(&global_lock, flags); + dspmasko = dspmask = (GET_CLUSTER_REG(MASKR_DSP)); + dspmask &= ~(0x3f << (8 * dev->number)); + if ((dspmask & 0xffffffbf) == 0) + dspmask = 0x0; /* all and INT_MEM_ERR - off */ + SET_CLUSTER_REG(MASKR_DSP, dspmask); + + /* off interrupts on both cpu */ + cpumasko = cpumask = GET_APIC_REG(IC_MR0); + cpumask &= ~(mask_intr[dev->number]); + if ((cpumask & 0xff00) == 0) /* nothing */ + cpumask = 0; /* off all, include dma */ + SET_APIC_REG(IC_MR0, cpumask); + SUNLOCK_IRQREST(&global_lock, flags); + DETAIL_PRINT("old intr mask: dsp: 0x%08x cpu: 0x%04x" + "\tnew:dsp: 0x%08x cpu: 0x%04x\n", + dspmasko, cpumasko, dspmask, cpumask); +#endif /*__CATCH_INTERRUPT_ON__*/ +#endif /*__THIS_SECTION_ARE_OFF__*/ + dev->opened = 0; + DBG_PRINT("closed\n"); + + return 0; +} + + +static ssize_t dsp_write(struct file *f, const char *b, size_t c, loff_t *f_pos) +{ + dsp_dev_t *dev; + dev = (dsp_dev_t *)f->private_data; + WARNING_PRINT("write[%d]: Write not implemented.\n", dev->minor); + return -EINVAL; +} + + +static ssize_t dsp_read(struct file *filp, char *b, size_t c, loff_t *f_pos) +{ + dsp_dev_t *dev; + dev = (dsp_dev_t *)filp->private_data; + WARNING_PRINT("read[%d]: Read not implemented.\n", dev->minor); + return -EINVAL; +} + + +int dsp_run(dsp_dev_t *dev, unsigned int adr) +{ + + dev->run = 1; + dev->reason = 0; + dev->state = 0; + dev->mem_error = 0; + + SET_DSP_REG(PC, adr);/* set start adress */ + SETBIT(DCSR, 14); /* start */ + + return 0; +} + + +int dsp_stop(dsp_dev_t *dev) +{ + + dev->run = 0; + dev->reason = (GET_DSP_REG(DCSR) & 0x1f); /* reason or nothing ??? */ + dev->state = GET_DSP_REG(SR) & 0xff; + + CLRBIT(DCSR, 14); /* stop */ + + return 0; +} + + +int dsp_reset(dsp_dev_t *dev) +{ + dsp_stop(dev); + + dev->state = 0; + dev->reason = 0; + dev->dcsr_i = 0; + dev->sp_i = 0; + + /* It is unclear what registers need to clean. */ + SET_DSP_REG(SR, 0x0); + SET_DSP_REG(DCSR, 0x0); + SET_DSP_REG(CNTR, 0x0); + + return 0; +} + + +/* + * clear local XYRAM and PRAM + * memory area for current DSP + */ +void dsp_clear_memory(dsp_dev_t *dev) +{ + memset(XYRAM, 0, XYRAM_SIZE); + memset(PRAM, 0, PRAM_SIZE); +} + + +/** + * internal function for alloc dma memory + * old-style + * \param s - size + */ +int alloc_dma(unsigned long *page, dma_addr_t **virt, dma_addr_t *phys, int s) +{ + int order = 0; + struct page *map, *mapend; + + order = get_order(s); + (*page) = __get_free_pages(GFP_KERNEL | GFP_DMA, order); + if ((*page) == 0) { + return -ENOMEM; + } + + mapend = virt_to_page((*page) + (PAGE_SIZE << order) - 1); + for (map = virt_to_page((*page)); map <= mapend; map++) + SetPageReserved(map); + + (*virt) = (dma_addr_t *)(*page); + memset((*virt), 0, s); + (*phys) = virt_to_phys((char *)((*virt))); + + return PAGE_SIZE << order; +} + + +/* free DMA buffers */ +void free_dma(unsigned long padr, int size) +{ + struct page *map, *mapend; + int order = get_order(size); + mapend = virt_to_page(padr + (PAGE_SIZE << order) - 1); + for (map = virt_to_page(padr); map <= mapend; map++) { + ClearPageReserved(map); + } + free_pages(padr, order); +} + + +#ifdef __DMA_ON__ +int lock_channel(int node, int dsp_number, int channel) +{ + unsigned long flags; + + DETAIL_PRINT("lock channel: %d %d %d\n", node, dsp_number, channel); + + SLOCK_IRQSAVE(&dma_lock, flags); + + if (dsp_node[node].dma_channel_lock[channel] != -1) { + SUNLOCK_IRQREST(&dma_lock, flags); + return -1; + } + + dsp_node[node].dma_channel_lock[channel] = dsp_number; + + SUNLOCK_IRQREST(&dma_lock, flags); + + DETAIL_PRINT("lock channel done\n"); + + return 0; +} + + +int unlock_channel(int node, int dsp_number, int channel) +{ + unsigned long flags; + + DETAIL_PRINT("unlock channel: %d %d %d\n", node, dsp_number, channel); + + SLOCK_IRQSAVE(&dma_lock, flags); + + if (dsp_node[node].dma_channel_lock[channel] != dsp_number) { + SUNLOCK_IRQREST(&dma_lock, flags); + ERROR_PRINT("DMA: ELDSP[%d:%d] unlock wrong channel: " + "lock from DSP[%d]\n", + node, dsp_number, + dsp_node[node].dma_channel_lock[channel]); + return -1; + } + + dsp_node[node].dma_channel_lock[channel] = -1; + + SUNLOCK_IRQREST(&dma_lock, flags); + + DETAIL_PRINT("unlock channel done\n"); + + return 0; +} + + +int check_channel(int node, int channel) +{ + unsigned long flags; + int dsp_number; + + DETAIL_PRINT("check channel\n"); + + SLOCK_IRQSAVE(&dma_lock, flags); + dsp_number = dsp_node[node].dma_channel_lock[channel]; + SUNLOCK_IRQREST(&dma_lock, flags); + + DETAIL_PRINT("check channel done: %d %d %d\n", + node, dsp_number, channel); + + return dsp_number; +} + + +/* dma exchange - write and read */ +int dma_exchange(dsp_dev_t *dev, dsp_dma_setup_t *set, int dir) +{ + union csr_register csr, reg_csr, old_csr; + union ior_register ior0, ior1; + int while_count = 0; + hrtime_t all_s, all_e, s, e; + unsigned long flags; + + DETAIL_PRINT("dma:\tDSP_EXCHANGE_DATA: " + "0x%x 0x%x 0x%x 0x%x (%d) mem: 0x%lx 0x%lx 0x%lx\n", + set->words, + (unsigned int)set->size, + set->run, + set->channel, + set->mode, + dev->dma.phys_mem + sizeof(dsp_dma_setup_t), + set->offset_mem0, + set->offset_mem1); + + if (lock_channel(dev->node, dev->number, set->channel)) { + ERROR_PRINT("dma: DMA lock channel error, " + "channel %d already busy\n", + set->channel); + return -EFAULT; + } + + all_s = gethrtime(); + + /* set up DMA transaction */ + csr.r = reg_csr.r = old_csr.r = 0L; + ior0.r = ior1.r = 0L; + csr.b.wn = set->words; + csr.b.wcx = set->size; /* word = 64 bit */ + + /* direction and offset memory: + * 0: CPU -> DSP (TO_DSP) + * 1: CPU <- DSP (FROM_DSP) + * 2: DSP -> DSP (DSP_DSP) + * + * ir0 - always using for CPU memory phys area (except DSP->DSP) + * ir1 - always using for DSP memory phys area + */ + if (dir < DSP_DSP) { + ior0.b.ir = dev->dma.phys_mem; + csr.b.dir = dir; + ior0.b.sel = 1; /* where memory addres 0 - DSP, 1 - CPU */ + } else { + ior0.b.ir = set->offset_mem0; + } + + ior0.b.or = 1; /* ir = ir + (or * 8) */ + ior1.b.ir = set->offset_mem1; /* set DSP_trg adress */ + //ior1.b.sel = 0; /* where memory addres 0 - DSP, 1 - CPU */ + ior1.b.or = 1; /* ir = ir + (or * 8) */ + + DBG_PRINT("dma:\tDSP_EXCHANGE_DATA: 0x%lx 0x%lx 0x%lx\n", + csr.r, + (unsigned long)ior0.r, + (unsigned long)ior1.r); + + s = gethrtime(); + SLOCK_IRQSAVE(&dev->spinlock, flags); + + dev->dma.run = 1; + dev->dma.channel = set->channel; + + SET_DMA_REG(IOR0, set->channel, ior0.r); + SET_DMA_REG(IOR1, set->channel, ior1.r); + SET_DMA_REG(CSR, set->channel, csr.r); + SET_DMA_REG(DMA_RUN, set->channel, 1); + +#ifdef __DMA_INTERRUPTS_ON__ + /* waiting ending DMA work */ + while (dev->dma.done == 0) { + while_count++; + + raw_wqueue_t wait_el = {.task = current}; + current->state = TASK_INTERRUPTIBLE; + list_add(&wait_el.task_list, &dev->dma.wait_task_list); + SUNLOCK_IRQREST(&dev->spinlock, flags); + schedule(); + SLOCK_IRQSAVE(&dev->spinlock, flags); + list_del(&wait_el.task_list); + current->state = TASK_RUNNING; + if (signal_pending(current)) { + SUNLOCK_IRQREST(&dev->spinlock, flags); + unlock_channel(dev->node, dev->number, set->channel); + return -ERESTARTSYS; + } + } +#else + SUNLOCK_IRQREST(&dev->spinlock, flags); + /* for debug without interrupts */ + while (!reg_csr.b.done) { + old_csr.r = reg_csr.r; + reg_csr.r = GET_DMA_REG(CSR, set->channel); + while_count++; + if (while_count > 9999999) + break; + } + SLOCK_IRQSAVE(&dev->spinlock, flags); +#endif + + dev->dma.done = 0; + dev->dma.run = 0; + dev->dma.channel = -1; + + SUNLOCK_IRQREST(&dev->spinlock, flags); + e = gethrtime(); + + unlock_channel(dev->node, dev->number, set->channel); + + all_e = gethrtime(); + + DETAIL_PRINT("exchange: event count %d\n", while_count); + DETAIL_PRINT("dma:\tDSP DMA done: " + "time: DMA - %lu, DMA plus system - %lu\n", + e - s, + all_e - all_s); + + return 0; +} + + +#ifdef __CHAIN_MODE_ON__ +///\todo: needed add check for chain exist!!! +void setup_target_link(dsp_dev_t *dev, setup_link_t *link) +{ + chain_link_t regs; + unsigned long long phys = 0; + unsigned long phys_base = ((unsigned long)(dev->link_regs.phys_mem)); + unsigned long *ptr = 0; + unsigned long *ptr_base = ((unsigned long *)(dev->link_regs.virt_mem)); + struct chain_list *tmp; + + DBG_PRINT("chain: setup target link: %d\n", link->link); + DETAIL_PRINT("(%d %d) intr: %d mode: %s 0x%lx 0x%lx size: %d\n", + link->dma_pause, link->dsp_run, + link->intr, link->mode ? "DSP->CPU" : "CPU->DSP", + link->offset_mem0, link->offset_mem1, + link->size); + + phys = phys_base + (sizeof(chain_link_t) * (link->link - 1)); + ptr = ptr_base + (4 * (link->link - 1)); + + DETAIL_PRINT("regs phys: 0x%lx off: 0x%lx\n", phys_base, phys); + DETAIL_PRINT("regs ptr: 0x%lx off: 0x%lx\n", ptr_base, ptr); + + regs.ir0.r = regs.ir1.r = regs.cp.r = regs.csr.r = 0L; + /* IR0 */ + list_for_each_entry(tmp, &dev->dma_chain, list) { + if (tmp->link.lnumber == link->link) { + link->offset_mem0 = tmp->link.phys_mem; + break; + } + } + regs.ir0.b.ir = link->offset_mem0; + regs.ir0.b.sel = 1; /* always CPU */ + regs.ir0.b.or = 1; /* ir = ir + (or * 8) */ + /* IR1 */ + regs.ir1.b.ir = link->offset_mem1;//PHYS_NODE(link->offset_mem1); + regs.ir1.b.sel = 0; /* always DSP */ + regs.ir1.b.or = 1; /* ir = ir + (or * 8) */ + /* CP */ + regs.cp.b.adr = phys + sizeof(chain_link_t); + regs.cp.b.sel = 1; /* regs always in CPU */ + //regs.cp.b.run = 1; //not sure - needed or not + /* CSR */ + /* one word = 64 bit */ + regs.csr.b.wn = 0xf; /* words - by default 16 words at once send */ + regs.csr.b.wcx = (link->size)/(128); /* 128 = (64bits/8bits)*16words */ + regs.csr.b.dir = link->mode; /* 0 = IOR0->IOR1, 1 = IOR0<-IOR1 */ + regs.csr.b.im = link->intr; + /* regs.csr.b.start_dsp = link->dsp_run; */ + regs.csr.b.chen = link->terminate ? 0 : 1; + + DBG_PRINT("dma:\tSETUP_LINK_REG: 0x%lx 0x%lx 0x%lx 0x%x\n", + regs.ir0.r, + regs.ir1.r, + regs.cp.r, + regs.csr.r); + + ptr[0] = regs.ir0.r; + ptr[1] = regs.ir1.r; + ptr[2] = regs.cp.r; + ptr[3] = regs.csr.r; +} + + +int check_chain_regs(dsp_dev_t *dev) +{ + unsigned long *ptr = NULL; + unsigned long *ptr_base = ((unsigned long *)(dev->link_regs.virt_mem)); + int i = 0; + + DETAIL_PRINT("dma:\tCHECK_LINK_REG\n"); + for (i = 0; i < dev->chain_present; i++) { + ptr = ptr_base + (i * 4); + DBG_PRINT("[%03d]: 0x%lx 0x%lx 0x%lx 0x%x\n", + i, + ptr[0], ptr[1], + ptr[2], ptr[3]); + } + + return 0; +} + + +/* + +: 0x101001c180000 0x1000000000000 0x3fff001c +: 0x101001c180000 0x1000000040000 0x0fff001c +! 0x101001c154000 0x10001c0000000 0x3001c0cb020 0x0080303c + +*/ + + + +/* dma exchange - write and read in chain mode */ +int dma_chain_exchange(dsp_dev_t *dev) +{ +#ifdef __ALL_IN_REG__ + unsigned long *ptr_base = ((unsigned long *)(dev->link_regs.virt_mem)); + union ior_register ir0, ir1; + union csr_register csr; +#endif + union cp_register cp; + unsigned long flags; + + int while_count = 0; + hrtime_t all_s, all_e, s, e; + + if (lock_channel(dev->node, dev->number, dev->chain_channel)) { + ERROR_PRINT("DMA: lock channel error, channel %d - busy\n", + dev->chain_channel); + return -EFAULT; + } + + all_s = gethrtime(); + + DETAIL_PRINT("self:\tCP - 0x%lx 0x%lx\n", + dev->link_regs.virt_mem, + dev->link_regs.phys_mem); + +#ifdef __ALL_IN_REG__ + csr.r = ir0.r = ir1.r = cp.r = 0L; + + ir0.r = ptr_base[0]; + ir1.r = ptr_base[1]; + cp.r = ptr_base[2]; + csr.r = ptr_base[3]; + + DETAIL_PRINT("REGS: 0x%lx 0x%lx 0x%lx 0x%lx\n", + ir0.r, ir1.r, + cp.r, csr.r); +#else + cp.b.adr = dev->link_regs.phys_mem; + cp.b.sel = 1; /* regs always in CPU */ + cp.b.run = 1; + DETAIL_PRINT("CP: 0x%lx\n", cp.r); +#endif /*__ALL_IN_REG__*/ + + /* run exchange */ + s = gethrtime(); + SLOCK_IRQSAVE(&dev->spinlock, flags); + + dev->dma.run = 1; + dev->dma.channel = dev->chain_channel; + dev->dma.chain = 1; + +#ifdef __ALL_IN_REG__ + SET_DMA_REG(IOR0, dev->chain_channel, ir0.r); + SET_DMA_REG(IOR1, dev->chain_channel, ir1.r); + SET_DMA_REG(CP, dev->chain_channel, cp.r); + SET_DMA_REG(CSR, dev->chain_channel, csr.r); +#else + SET_DMA_REG(CP, dev->chain_channel, cp.r); +#endif /*__ALL_IN_REG__*/ + +#ifdef __DMA_INTERRUPTS_ON__ + /* waiting ending DMA work */ + while (dev->dma.done == 0) { + while_count++; + + raw_wqueue_t wait_el = {.task = current}; + current->state = TASK_INTERRUPTIBLE; + list_add(&wait_el.task_list, &dev->dma.wait_task_list); + SUNLOCK_IRQREST(&dev->spinlock, flags); + DETAIL_PRINT("while:\t %d\n", while_count); + schedule(); + DETAIL_PRINT("while:\t %d\n", while_count); + SLOCK_IRQSAVE(&dev->spinlock, flags); + list_del(&wait_el.task_list); + current->state = TASK_RUNNING; + if (signal_pending(current)) { + SUNLOCK_IRQREST(&dev->spinlock, flags); + unlock_channel(dev->node, dev->number, dev->chain_channel); + dev->dma.end = 0;//repeat not needed + dev->dma.done = 0; + dev->dma.run = 0; + dev->dma.chain = 0; + dev->dma.channel = -1; + return -ERESTARTSYS; + } + } +#else + SUNLOCK_IRQREST(&dev->spinlock, flags); + /* for debug without interrupts */ + while (!reg_csr.b.done) { + old_csr.r = reg_csr.r; + reg_csr.r = GET_DMA_REG(CSR, dev->chain_channel); + while_count++; + if (while_count > 9999999) + break; + } +#endif + + dev->dma.end = 0;//repeat not needed + dev->dma.done = 0; + dev->dma.run = 0; + dev->dma.chain = 0; // off chain mode + dev->dma.channel = -1; + + SUNLOCK_IRQREST(&dev->spinlock, flags); + e = gethrtime(); + + unlock_channel(dev->node, dev->number, dev->chain_channel); + + all_e = gethrtime(); + + DETAIL_PRINT("dma selfinit: event count %d\n", while_count); + DETAIL_PRINT("dma:\tDSP DMA done: time: DMA - %lu, " + "DMA plus system - %lu\n", + e - s, + all_e - all_s); + + return 0; +} + + +void delete_dma_chain(dsp_dev_t *dev) +{ + struct list_head *entry, *tent; + struct chain_list *tmp; + + DETAIL_PRINT("before delete chain\n"); + + if (dev->chain_present <= 0) { + DBG_PRINT("chain not exists\n"); + return; + } + + free_dma(dev->link_regs.page_adr, dev->link_regs.size); + + list_for_each_safe(entry, tent, &dev->dma_chain) { + tmp = list_entry(entry, struct chain_list, list); + DETAIL_PRINT("delete chain: %d\n", tmp->link.lnumber); + free_dma(tmp->link.page_adr, tmp->link.size); + list_del(entry); + kfree(tmp); + } + DETAIL_PRINT("after delete chain\n"); + dev->chain_present = 0; + return; +} + + +int add_link_to_dma_chain(dsp_dev_t *dev, int pages) +{ + chain_list_t *tchain = NULL; + + //dbg: later move create regs pool in other place + if (dev->chain_present == 0) { + dma_state_t *l = &dev->link_regs; + DETAIL_PRINT("kmalloc for chain_regs\n"); + l->size = PAGE_SIZE; + l->real_size = alloc_dma(&l->page_adr, + &l->virt_mem, + &l->phys_mem, + l->size); + DETAIL_PRINT("after create poll for links regs: %d %d\n", + l->real_size, + l->size); + if (l->real_size <= 0) { + ERROR_PRINT("DMA: DSP[%d]: error allocate buffer\n", + dev->number); + return -ENOMEM; + } + } + + DETAIL_PRINT("kmalloc for tchain\n"); + tchain = kmalloc(sizeof(chain_list_t), GFP_KERNEL); + if (tchain == NULL) { + ERROR_PRINT("chain:\tDSP: %d. " + "Can't allocate memory for chain_list_t.\n", + dev->number); + free_dma(dev->link_regs.page_adr, dev->link_regs.size); + return -ENOMEM; + } + + DETAIL_PRINT("before create link to chain\n"); + /* create and setup new DMA buffers */ + tchain->link.size = PAGE_SIZE * pages; + tchain->link.real_size = alloc_dma(&tchain->link.page_adr, + &tchain->link.virt_mem, + &tchain->link.phys_mem, + tchain->link.size); + DETAIL_PRINT("after create link to chain: %d %d\n", + tchain->link.real_size, + tchain->link.size); + if (tchain->link.real_size <= 0) { + ERROR_PRINT("init: DSP[%d]: error allocate DMA buffer\n", + dev->number); + free_dma(dev->link_regs.page_adr, dev->link_regs.size); + kfree(tchain); + return -ENOMEM; + } + + dev->chain_present++; + tchain->link.lnumber = dev->chain_present; + + DETAIL_PRINT("chain DMA allocate[%d:%d:%2d]: 0x%lx 0x%lx 0x%lx %d %d\n", + dev->node, dev->number, dev->minor, + (unsigned long)tchain->link.virt_mem, + tchain->link.page_adr, + tchain->link.phys_mem, + tchain->link.size, + tchain->link.real_size); + + list_add_tail(&tchain->list, &dev->dma_chain); + + return tchain->link.lnumber; +} +#endif /*__CHAIN_MODE_ON__*/ +#endif /*__DMA_ON__*/ + + +void get_status(dsp_dev_t *dev, dsp_status_t *tstatus) +{ + tstatus->number = dev->minor; + tstatus->run = GETBIT(DCSR, 14); + tstatus->wait = GETBIT(DCSR, 4); + tstatus->reason = dev->reason; + tstatus->state = (GET_DSP_REG(SR) & 0xff); + tstatus->mail = GET_DSP_REG(EFR); /* needed ??? */ +} + + +static long dsp_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + dsp_dev_t *dev = (dsp_dev_t *)filp->private_data; + int tmp = 0; + int retval = 0; + unsigned long flags; + + DETAIL_PRINT("ioctl:\tdev[%d, %d]: node: %d, number: %d, minor: %d. " + "(cpu: node: %d, id: %d)\n", + major, (dev->minor & 0x0f), + dev->node, dev->number, dev->minor, + numa_node_id(), raw_smp_processor_id()); + + MLOCK(&dev->ioctl_mutex); + +#ifdef MCST_INCLUDE_IOCTL + if (cmd == MCST_SELFTEST_MAGIC) { + selftest_t st; + selftest_nonbus_t *st_nbus = &st.info.nonbus; + + DETAIL_PRINT("ioctl:\tSELFTEST\n"); + + st.bus_type = BUS_NONE; + st.error = 0; /* temporary unused */ + + st_nbus->major = major; + st_nbus->minor = dev->minor; + + strncpy(st_nbus->name, dsp_dev_name, 255); + DBG_PRINT("%s: [%d][%d][%s].\n", + __func__, + st_nbus->major, st_nbus->minor, st_nbus->name); + + if (copy_to_user((selftest_t __user *)arg, + &st, + sizeof(selftest_t))) { + ERROR_PRINT("%s: MCST_SELFTEST_MAGIC: " + "copy_to_user() failed\n", + __func__); + retval = -EFAULT; + } + goto ioctl_end; + } +#endif + + if (_IOC_TYPE(cmd) != DSP_IOC_MAGIC) {retval = -ENOTTY; goto ioctl_end;} + if (_IOC_NR(cmd) > DSP_IOC_MAXNR) {retval = -ENOTTY; goto ioctl_end;} + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + if (err) {retval = -EFAULT; goto ioctl_end;} + + + switch(cmd) { + + case DSP_GET_STATUS: + { + dsp_status_t tstatus; + DETAIL_PRINT("ioctl:\tDSP_GET_STATUS\n"); + + SLOCK_IRQSAVE(&dev->spinlock, flags); + get_status(dev, &tstatus); + SUNLOCK_IRQREST(&dev->spinlock, flags); + + if (copy_to_user((dsp_status_t __user *)arg, + &tstatus, + sizeof(dsp_status_t))) + { + ERROR_PRINT("ioctl: DSP_GET_STATUS\n"); + retval = -EFAULT; + break; + } + } + break; + + case DSP_GET_FULL_STATUS: + { + dsp_fstatus_t fstatus; + DETAIL_PRINT("ioctl:\tDSP_GET_FULL_STATUS\n"); + + SLOCK_IRQSAVE(&dev->spinlock, flags); + fstatus.dcsr = GET_DSP_REG(DCSR); + fstatus.dcsr_i = dev->dcsr_i; + fstatus.irqr = GET_DSP_REG(IRQR); + fstatus.imaskr = GET_DSP_REG(IMASKR); + SUNLOCK_IRQREST(&dev->spinlock, flags); + + fstatus.sp_i = dev->sp_i; + fstatus.sr = GET_DSP_REG(SR); + fstatus.idr = GET_DSP_REG(IDR); + fstatus.efr = GET_DSP_REG(EFR); + fstatus.tmr = GET_DSP_REG(TMR); + fstatus.arbr = GET_DSP_REG(ARBR); + fstatus.pc = GET_DSP_REG(PC); + fstatus.ss = GET_DSP_REG(SS); + fstatus.la = GET_DSP_REG(LA); + fstatus.csl = GET_DSP_REG(CSL); + fstatus.lc = GET_DSP_REG(LC); + fstatus.csh = GET_DSP_REG(CSH); + fstatus.sp = GET_DSP_REG(SP); + fstatus.sar = GET_DSP_REG(SAR); + fstatus.cntr = GET_DSP_REG(CNTR); + + if (copy_to_user((dsp_fstatus_t __user *)arg, + &fstatus, + sizeof(dsp_fstatus_t))) + { + ERROR_PRINT("ioctl: DSP_GET_FULL_STATUS\n"); + retval = -EFAULT; + break; + } + } + break; + + /* use chain DMA exchange */ + case DSP_RUN_CHAIN: + { + DETAIL_PRINT("ioctl:\tDSP_RUN_CHAIN\n"); +#ifdef __CHAIN_MODE_ON__ +#ifdef __DMA_ON__ + + retval = check_chain_regs(dev); + retval = dma_chain_exchange(dev); +#endif /*__DMA_ON__*/ +#else + WARNING_PRINT("Chain mode are - OFF !\n"); + retval = -EFAULT; +#endif /*__CHAIN_MODE_ON__*/ + } + break; + + + /* create chain DMA exchange */ + case DSP_SETUP_CHAIN: + { + DETAIL_PRINT("ioctl:\tDSP_SETUP_CHAIN\n"); +#ifdef __CHAIN_MODE_ON__ +#ifdef __DMA_ON__ + setup_chain_t chain; + if (copy_from_user(&chain, + (void __user *)arg, + sizeof(setup_chain_t))) + { + ERROR_PRINT("ioctl: DMA copy from user\n"); + retval = -EFAULT; + break; + } else { + DETAIL_PRINT("chain: send at %d channel\n", + chain.channel); + if (chain.channel < 0 || chain.channel > MAX_DMA) { + dev->chain_channel = dev->number * 2 + 1; + WARNING_PRINT("DMA channel wrong: %d - " + "setup channel: %d\n", + chain.channel, + dev->chain_channel); + } else { + dev->chain_channel = chain.channel; + DETAIL_PRINT("DMA channel for chain: %d\n", + dev->chain_channel); + } + + if (chain.size_in_pages > 0) { + dev->link_size = chain.size_in_pages; + DETAIL_PRINT("size one link of chain " + "setup in %d pages\n", + dev->link_size); + } else { + WARNING_PRINT("size link of chain can not be " + "less then one page: %d. size = %d\n", + chain.size_in_pages, dev->link_size); + } + } +#endif /*__DMA_ON__*/ +#else + WARNING_PRINT("Chain mode are - OFF !\n"); + retval = -EFAULT; +#endif /*__CHAIN_MODE_ON__*/ + } + break; + + + /* delete chain DMA */ + case DSP_DELETE_CHAIN: + { + DETAIL_PRINT("ioctl:\tDSP_DELETE_CHAIN\n"); +#ifdef __CHAIN_MODE_ON__ +#ifdef __DMA_ON__ + delete_dma_chain(dev); +#endif /*__DMA_ON__*/ +#else + WARNING_PRINT("Chain mode are - OFF !\n"); + retval = -EFAULT; +#endif /*__CHAIN_MODE_ON__*/ + } + break; + + + /* create chain DMA exchange */ + case DSP_SETUP_LINK: + { + DETAIL_PRINT("ioctl:\tDSP_SETUP_LINK\n"); +#ifdef __CHAIN_MODE_ON__ +#ifdef __DMA_ON__ + setup_link_t link; + if (copy_from_user(&link, + (void __user *)arg, + sizeof(setup_link_t))) + { + ERROR_PRINT("ioctl: DMA copy from user\n"); + retval = -EFAULT; + break; + } else { + setup_target_link(dev, &link); + } +#endif /*__DMA_ON__*/ +#else + WARNING_PRINT("Chain mode are - OFF !\n"); + retval = -EFAULT; +#endif /*__CHAIN_MODE_ON__*/ + } + break; + + + /* DBG: test chain DMA exchange */ + case DSP_TEST_CHAIN: + { + DETAIL_PRINT("ioctl:\tDSP_TEST_CHAIN\n"); +#ifdef __CHAIN_MODE_ON__ +#ifdef __DMA_ON__ + if (dev->chain_present > 0) { + struct chain_list *tmp; + list_for_each_entry(tmp, &dev->dma_chain, list) { + DETAIL_PRINT("chain: %d [0x%08x] [0x%08x]\n", + tmp->link.lnumber, + tmp->link.virt_mem[0], + tmp->link.virt_mem[9]); + } + } else { + DBG_PRINT("chain not exists\n"); + } +#endif /*__DMA_ON__*/ +#else + WARNING_PRINT("Chain mode are - OFF !\n"); + retval = -EFAULT; +#endif /*__CHAIN_MODE_ON__*/ + } + break; + + + /* DMA exchange between DSP */ + case DSP_TO_DSP_WRITE: + { + DETAIL_PRINT("ioctl:\tDSP_TO_DSP_WRITE\n"); +#ifdef __DMA_ON__ + dsp_dma_setup_t setup; + if (copy_from_user(&setup, + (void __user *)arg, + sizeof(dsp_dma_setup_t))) + { + ERROR_PRINT("ioctl: DMA copy from user\n"); + retval = -EFAULT; + break; + } + + /* offset must be DSP memory */ + setup.offset_mem0 = setup.offset_mem0; + setup.offset_mem1 = setup.offset_mem1; + + retval = dma_exchange(dev, &setup, DSP_DSP); +#endif /*__DMA_ON__*/ + } + break; + + /* data must be writing to DMA-area from user */ + case DSP_DMA_WRITE: + { + DETAIL_PRINT("ioctl:\tDSP_DMA_WRITE\n"); +#ifdef __DMA_ON__ + dsp_dma_setup_t setup; + if (copy_from_user(&setup, + (void __user *)arg, + sizeof(dsp_dma_setup_t))) + { + ERROR_PRINT("ioctl: DMA copy from user\n"); + retval = -EFAULT; + break; + } + + /* offset must be DSP memory */ + setup.offset_mem0 = setup.offset_mem0; /* now not used */ + setup.offset_mem1 = setup.offset_mem1; /* offset in DSP memory*/ + + dma_exchange(dev, &setup, TO_DSP); + + /* + if (copy_to_user((void __user *)arg, + &setup, + sizeof(dsp_dma_setup_t))) + { + ERROR_PRINT("ioctl: DMA copy to user\n"); + retval = -EFAULT; + break; + } + */ +#endif /*__DMA_ON__*/ + } + break; + + + /* data from DMA-area must be reading at user */ + case DSP_DMA_READ: + { + DETAIL_PRINT("ioctl:\tDSP_DMA_READ\n"); +#ifdef __DMA_ON__ + dsp_dma_setup_t setup; + if (copy_from_user(&setup, + (void __user *)arg, + sizeof(dsp_dma_setup_t))) + { + ERROR_PRINT("ioctl: DMA copy from user\n"); + retval = -EFAULT; + break; + } + + /* offset must be DSP memory */ + setup.offset_mem0 = setup.offset_mem0; /* now not used */ + setup.offset_mem1 = setup.offset_mem1; /* offsetin DSP memory */ + + dma_exchange(dev, &setup, FROM_DSP); + + /* + if (copy_to_user((void __user *)arg, + &setup, + sizeof(dsp_dma_setup_t))) + { + ERROR_PRINT("ioctl: DMA copy to user\n"); + retval = -EFAULT; + break; + } + */ +#endif /*__DMA_ON__*/ + } + break; + + case DSP_RUN: + DETAIL_PRINT("ioctl:\tDSP_RUN:\t %d %d %d\n", + dev->node, dev->number, dev->minor); + retval = __get_user(tmp, (unsigned int __user *)arg); + SLOCK_IRQSAVE(&dev->spinlock, flags); + dsp_run(dev, tmp); + SUNLOCK_IRQREST(&dev->spinlock, flags); + retval = 0; + break; + + case DSP_RUN_ALL: + { + dsp_run_all_setup_t setup; + int i; + dsp_dev_t *tmp_dev; + + DETAIL_PRINT("ioctl:\tDSP_RUN_ALL\n"); + + if (copy_from_user(&setup, + (void __user *)arg, + sizeof(dsp_run_all_setup_t))) + { + ERROR_PRINT("ioctl: DSP_RUN_ALL copy from user\n"); + retval = -EFAULT; + break; + } + + SLOCK_IRQSAVE(&dev->spinlock, flags); + for (i = 0; i < dsp_numbers_devs; i++) { + if ((setup.adr[i] != -1) && + (dsp_devices[dsp_minors[i]]->run != 1)) { + tmp_dev = dsp_devices[dsp_minors[i]]; + + tmp_dev->run = 1; + tmp_dev->mem_error = 0; + + DETAIL_PRINT("ioctl: RUN_ALL %d: adr - 0x%x\n", + i, setup.adr[i]); + /* may be bug - check it */ + nSET_DSP_REG(PC, tmp_dev->node, + tmp_dev->number, setup.adr[i]); + } + } + + for (i = 0; i < node_numbers; i++) + SETBIT_node(CSR_DSP, on_nodes[i], 0); + + SUNLOCK_IRQREST(&dev->spinlock, flags); + + retval = 0; + } + break; + + case DSP_WAIT: + { + hrtime_t s, e; + int err = 0; + + DETAIL_PRINT("ioctl:\tDSP_WAIT: 0x%x\n", DSP_WAIT); + + s = gethrtime(); + SLOCK_IRQSAVE(&dev->spinlock, flags); + while (dev->run == 1) { + raw_wqueue_t wait_el = {.task = current}; + current->state = TASK_INTERRUPTIBLE; + list_add(&wait_el.task_list, &dev->wait_task_list); + SUNLOCK_IRQREST(&dev->spinlock, flags); + schedule(); + SLOCK_IRQSAVE(&dev->spinlock, flags); + list_del(&wait_el.task_list); + current->state = TASK_RUNNING; + if (signal_pending(current)) { + dsp_status_t tstatus; + get_status(dev, &tstatus); + SUNLOCK_IRQREST(&dev->spinlock, flags); + retval = -ERESTARTSYS; + DETAIL_PRINT("ioctl:\tDSP_WAIT end " + "on signal pending\n"); + DETAIL_PRINT("ioctl:\tstate: run: %d wait: " + "0x%x rs: 0x%x st 0x%x\n", + tstatus.run, + tstatus.wait, + tstatus.reason, + tstatus.state); + goto ioctl_end; + } + if (dev->run == 0) + break; + /* + SUNLOCK_IRQREST(&dev->spinlock, flags); + if (wait_event_interruptible(dev->wait_queue, + (dev->run == 0))) + { + retval = -ERESTARTSYS; + goto ioctl_end; + } + SLOCK_IRQSAVE(&dev->spinlock, flags); + */ + } + err = dev->mem_error; + SUNLOCK_IRQREST(&dev->spinlock, flags); + e = gethrtime(); + + //DETAIL_PRINT("ioctl:\tDSP_WAIT end.\n"); + if (err) { + DETAIL_PRINT("ioctl:\tDSP_WAIT end on error, " + "wait time: %lu\n", e - s); + retval = -EIO; + break; + } + DETAIL_PRINT("ioctl:\tDSP_WAIT end, wait time: %lu\n", e - s); + } + + retval = 0; + break; + + case DSP_WAIT_ACTIVE: + { + int run = dev->run; + int t_count = 0; + hrtime_t s, e; + DETAIL_PRINT("ioctl:\tDSP_WAIT_ACTIVE: %d\n", run); + + retval = __get_user(tmp, (unsigned int __user *)arg); + + DBG_PRINT("ioctl:\tDSP_WAIT_ACTIVE: time_wait: %d\n", tmp); + + s = gethrtime(); + while(1) { + e = gethrtime(); + if (((e - s)/1000000) >= tmp) + break; + } + s = gethrtime(); + while (run) { + run = GETBIT(DCSR, 14); + t_count++; + if (t_count > 9999999) + break; + } + e = gethrtime(); + + SLOCK_IRQSAVE(&dev->spinlock, flags); + dev->run = 0; + SUNLOCK_IRQREST(&dev->spinlock, flags); + + DETAIL_PRINT("ioctl:\tDSP_WAIT_ACTIVE end: %d %d, time: %lu\n", + run, t_count, e - s); + } + + retval = 0; + break; + + case DSP_STOP: + DETAIL_PRINT("ioctl:\tDSP_STOP\n"); + SLOCK_IRQSAVE(&dev->spinlock, flags); //trylock ? + dsp_stop(dev); + SUNLOCK_IRQREST(&dev->spinlock, flags); + retval = 0; + break; + + case DSP_RESET: + DETAIL_PRINT("ioctl:\tDSP_RESET\n"); + SLOCK_IRQSAVE(&dev->spinlock, flags); + retval = dsp_reset(dev); + if (retval) + retval = -EBUSY; + else + retval = 0; + SUNLOCK_IRQREST(&dev->spinlock, flags); + dsp_clear_memory(dev); + break; + + case DSP_SET_TIMER: + { + DETAIL_PRINT("ioctl:\tDSP_SET_TIMER\n"); + + retval = __get_user(tmp, (unsigned int __user *)arg); + + SLOCK_IRQSAVE(&dev->spinlock, flags); + SET_DSP_REG(TMR, tmp); + SUNLOCK_IRQREST(&dev->spinlock, flags); + + DETAIL_PRINT("ioctl:\tDSP_SET_TIMER: %u\n", tmp); + } + break; + + case DSP_SET_MAIL_MODE: + { + retval = __get_user(tmp, (unsigned int __user *)arg); + + DETAIL_PRINT("ioctl:\tDSP_SET_MAIL_MODE: %s\n", + tmp ? "SYNC" : "NORMAL"); + SLOCK_IRQSAVE(&dev->spinlock, flags); + if (tmp) { + SETBIT_node(CSR_DSP, dev->node, 1); + } else { + CLRBIT_node(CSR_DSP, dev->node, 1); + } + SUNLOCK_IRQREST(&dev->spinlock, flags); + } + break; + + case DSP_SEND_MAIL: + { + dsp_mail_box_t mail; + + if (copy_from_user(&mail, + (struct dsp_mail_box __user *)arg, + sizeof(struct dsp_mail_box))) + { + ERROR_PRINT("ioctl: MAIL copy from user\n"); + retval = -EFAULT; + break; + } + + DETAIL_PRINT("ioctl:\tDSP_SEND_MAIL: %u 0x%lx\n", + mail.box, + mail.value); + + if (mail.box > 63) { + retval = -EFAULT; + break; + } + + SLOCK_IRQSAVE(&dev->spinlock, flags); + writeq(mail.value, XBUF(mail.box)); /* send */ + SUNLOCK_IRQREST(&dev->spinlock, flags); + } + break; + + case DSP_GET_MAIL: + { + dsp_mail_box_t mail; + DETAIL_PRINT("ioctl:\tDSP_GET_MAIL\n"); + if (copy_from_user(&mail, + (dsp_mail_box_t __user *)arg, + sizeof(dsp_mail_box_t))) + { + ERROR_PRINT("ioctl: MAIL copy from user\n"); + retval = -EFAULT; + break; + } + + if (mail.box > 63) { + retval = -EFAULT; + break; + } + + SLOCK_IRQSAVE(&dev->spinlock, flags); + mail.value = readq(XBUF(mail.box)); /* get */ + SUNLOCK_IRQREST(&dev->spinlock, flags); + + DETAIL_PRINT("ioctl:\tDSP_GET_MAIL: %u 0x%lx\n", + mail.box, + mail.value); + + if (copy_to_user((dsp_mail_box_t __user *)arg, + &mail, + sizeof(dsp_mail_box_t))) + { + ERROR_PRINT("ioctl: MAIL copy to user\n"); + retval = -EFAULT; + break; + } + } + break; + + case DSP_SET_APIC_MASK: + { + dsp_apic_mask_t mask; + + DETAIL_PRINT("ioctl:\tIOCTL SET_APIC_MASK\n"); + DETAIL_PRINT("ioctl:\tget masks: " + "node: %d: cpu0: 0x%x, cpu1: 0x%x\n", + dev->node, + GET_APIC_REG(IC_MR0), + GET_APIC_REG(IC_MR1)); + + if (copy_from_user(&mask, + (dsp_apic_mask_t __user *)arg, + sizeof(dsp_apic_mask_t))) + { + ERROR_PRINT("ioctl: SET_APIC_MASK copy from user\n"); + retval = -EFAULT; + break; + } else { + DBG_PRINT("ioctl:\tget from user masks: " + "cpu0: 0x%x, cpu1: 0x%x\n", + mask.cpu0, mask.cpu1); + } + + SLOCK_IRQSAVE(&global_lock, flags); + SET_APIC_REG(IC_MR0, mask.cpu0); + /* + * LCC compilator bug workaround: second register set is + * compiled as speculative read/wrtite ti IO space, so + * 'nop' separator command temporary is added + */ + E2K_CMD_SEPARATOR; + SET_APIC_REG(IC_MR1, mask.cpu1); + SUNLOCK_IRQREST(&global_lock, flags); + + DETAIL_PRINT("ioctl:\tset masks: cpu0: 0x%x, cpu1: 0x%x\n", + GET_APIC_REG(IC_MR0), + GET_APIC_REG(IC_MR1)); + } + break; + + case DSP_GET_APIC_MASK: + { + dsp_apic_mask_t mask; + + DETAIL_PRINT("ioctl:\tIOCTL GET_APIC_MASK\n"); + + SLOCK_IRQSAVE(&dev->spinlock, flags); + mask.cpu0 = GET_APIC_REG(IC_MR0); + mask.cpu1 = GET_APIC_REG(IC_MR1); + SUNLOCK_IRQREST(&dev->spinlock, flags); + + DETAIL_PRINT("ioctl:\tget masks: " + "node: %d: cpu0: 0x%x, cpu1: 0x%x\n", + dev->node, + mask.cpu0, + mask.cpu1); + + if (copy_to_user((dsp_apic_mask_t __user *)arg, + &mask, + sizeof(dsp_apic_mask_t))) + { + ERROR_PRINT("ioctl: GET_APIC_MASK copy from user\n"); + retval = -EFAULT; + break; + } + } + break; + + + case DSP_SET_INTR_MASK: + { + unsigned int tdsp; + + DETAIL_PRINT("ioctl:\tIOCTL SET_INTR_MASK\n"); + DETAIL_PRINT("ioctl:\tget masks: node: %d, intr: 0x%x\n", + dev->node, + GET_CLUSTER_REG(MASKR_DSP)); + + __get_user(tdsp, (unsigned int __user *)arg); + SLOCK_IRQSAVE(&global_lock, flags); + SET_CLUSTER_REG(MASKR_DSP, tdsp); + SUNLOCK_IRQREST(&global_lock, flags); + + DETAIL_PRINT("ioctl:\tget masks: node: %d, intr: 0x%x\n", + dev->node, + GET_CLUSTER_REG(MASKR_DSP)); + retval = 0; + } + break; + + + case DSP_GET_INTR_MASK: + { + unsigned int tdsp; + + DETAIL_PRINT("ioctl:\tIOCTL GET_INTR_MASK\n"); + tdsp = GET_CLUSTER_REG(MASKR_DSP); + DETAIL_PRINT("ioctl:\tget masks: node: %d, intr: 0x%x\n", + dev->node, + tdsp); + + __put_user(tdsp, (unsigned int __user *)arg); + retval = 0; + } + break; + + + /*next cases must be DELETE for release !!!*/ + case DSP_SETIRQ_IOCTL: + DETAIL_PRINT("ioctl:\tSET IRQ\n"); + __get_user(tmp, (unsigned int __user *)arg); + + SLOCK_IRQSAVE(&dev->spinlock, flags); + SET_DSP_REG(CNTR, tmp); + SUNLOCK_IRQREST(&dev->spinlock, flags); + + retval = 0; + break; + + + case DSP_TEST_MEMORY: + { + __get_user(tmp, (unsigned int __user *)arg); + + SLOCK_IRQSAVE(&dev->spinlock, flags); + if (tmp >= 0 && tmp <= 0xc47ff8) { + if (tmp >= 0xc40000) + retval = (int)readl(nPRAM(dev->node, 3) + + (tmp - 0xc40000)); + else if (tmp >= 0xc00000) + retval = (int)readl(nXYRAM(dev->node, 3) + + (tmp - 0xc00000)); + else if (tmp >= 0x840000) + retval = (int)readl(nPRAM(dev->node, 2) + + (tmp - 0x840000)); + else if (tmp >= 0x800000) + retval = (int)readl(nXYRAM(dev->node, 2) + + (tmp - 0x800000)); + else if (tmp >= 0x440000) + retval = (int)readl(nPRAM(dev->node, 1) + + (tmp - 0x440000)); + else if (tmp >= 0x400000) + retval = (int)readl(nXYRAM(dev->node, 1) + + (tmp - 0x400000)); + else if (tmp >= 0x040000) + retval = (int)readl(nPRAM(dev->node, 0) + + (tmp - 0x040000)); + else if (tmp >= 0x000000) + retval = (int)readl(nXYRAM(dev->node, 0) + + (tmp - 0x000000)); + } else { + retval = (int)0x12345; + } + + SUNLOCK_IRQREST(&dev->spinlock, flags); + DETAIL_PRINT("ioctl:\tdsp memory check:\t: " + "offset: 0x%x, [0x%x]\n", + tmp, (int)(retval)); + } + break; + + default: + ERROR_PRINT("ioctl:\tUnknown command: 0x%x\n", cmd); + retval = -EINVAL; + } + +ioctl_end: + MUNLOCK(&dev->ioctl_mutex); + + DETAIL_PRINT("ioctl:\tend\n"); + + return retval; +} + +static const struct file_operations dsp_fops = { + .owner = THIS_MODULE, + .open = dsp_open, + .release = dsp_release, + .read = dsp_read, /*not implemented*/ + .write = dsp_write, /*not implemented*/ + .unlocked_ioctl = dsp_ioctl, + .poll = dsp_poll, + .mmap = dsp_mmap, +}; + + +#ifdef __CATCH_INTERRUPT_ON__ +/* collect mask */ +static inline int interrupt_analyze(int interrupt) +{ + int i, mask = 0; + + for (i = 0; i < MAX_DSP; i++) { + if (interrupt & mask_intr[i]) + mask |= (1 << i); + } + + return mask; +} + + +/* + * interrupts to DSP and from DSP haven't priority + * all interrupts execute by sequence ? + */ +void dsp_interrupt_handler(struct pt_regs *regs) +{ + int i; + int receiver = 0; /* bitmask */ + int DMA_receiver = 0; /* bitmask */ + static unsigned long long icount = 0; + +/* defines for short strings */ +#define TN dsp_node[node.number] + +#if DEBUG_MODE + static int count_error = 0; +#endif + interrupt_t node; + int mem_error = 0; + unsigned long flags; + + SLOCK_IRQSAVE(&interrupt_lock, flags); + node.number = numa_node_id(); + SUNLOCK_IRQREST(&interrupt_lock, flags); + + node.r[0] = nGET_APIC_REG(IC_IR0, node.number); + node.r[1] = nGET_APIC_REG(IC_IR1, node.number); + node.generic = node.r[0] | node.r[1]; + + icount++; + +#ifndef __ALL_ONLINE_NODE__ + if (node.number > 0) { + WARNING_PRINT("intr:\t\tnode: %d: impossible, " + "this node are OFF for interrupts\n", + node.number); + } +#endif + + /* impossible situation */ + if (node.generic == 0) { +#if DEBUG_MODE + count_error++; + if (count_error > 1000) { + WARNING_PRINT("intr:\t\tnode: %d: impossible, " + "IC_IR0 & IC_IR1 == 0\n", node.number); + count_error = 0; + } +#endif + + /* + * there must be clear interrupt in APIC: + * ack_APIC_irq(); + * but now it called in e2k.c + */ + return; + } + + DBG_PRINT("intr:\t[%08lld] handler: 0x%x |= (0x%x | 0x%x). cpu: " \ + "node: %d, id: %d\n", + icount, node.generic, node.r[0], node.r[1], + node.number, raw_smp_processor_id()); + + /* check for DMA */ + if (node.generic & 0x00ff) { + int i = 0; + unsigned long flags; + DETAIL_PRINT("intr:\tDMA interrupt, generic: 0x%x\n", + node.generic); + for (i = 0; i < MAX_DMA; i++) + if ((node.generic >> i) & 1) { + DETAIL_PRINT("intr:\tchannel: %d\n", i); + SLOCK_IRQSAVE(&interrupt_lock, flags); + /*for DMA_receiver: 0,1 - DSP0; 2,3 - DSP1 ...*/ + DMA_receiver |= processing_DMA(node.number, i); + SUNLOCK_IRQREST(&interrupt_lock, flags); + DETAIL_PRINT("intr:\tDMA receiver: 0x%x " + "(chn: %d)\n", + DMA_receiver, i); + } + node.generic &= 0xff00;/* needed clear ??? */ + } + + + /* + * get mask for DSP, who set interrupt + * receiver for node.number + */ + receiver = interrupt_analyze(node.generic); + + DBG_PRINT("intr:\t[%08lld] receiver: 0x%x, QSTR: 0x%x, DMA: 0x%x\n", + icount, receiver, + nGET_CLUSTER_REG(QSTR_DSP, node.number), + DMA_receiver); + + if (receiver == 0) + goto done; + + /* added interupts to device mask */ + SLOCK_IRQSAVE(&interrupt_lock, flags); + for (i = 0; i < MAX_DSP; i++) { + if (receiver & (1 << i)) { + TN.dsp[i]->interrupts |= (node.generic & mask_intr[i]); + TN.dsp[i]->tmp_all_intr |= node.generic; + } + } + SUNLOCK_IRQREST(&interrupt_lock, flags); + + if (node.generic & 0x0f00) { + int i = 0; + unsigned int i_tmp = (node.generic >> 8) & 0xf; + DBG_PRINT("intr:\t[%08lld] STOP interrupt\n", icount); + for (i = 0; i < MAX_DSP; i++) + if ((i_tmp >> i) & 1) { + DETAIL_PRINT("intr:\tstop device: %d:%d:%d\n", + node.number, i, + (node.number * MAX_NODE) + i); + SLOCK_IRQSAVE(&interrupt_lock, flags); + TN.dsp[i]->state = + nGET_DSP_REG(SR, node.number, i) & 0xff; + TN.dsp[i]->run = 0; + /* stop */ + nSET_DSP_REG(DCSR, node.number, i, 0x0); + SUNLOCK_IRQREST(&interrupt_lock, flags); + } + } + + if (node.generic & 0xf000) { + int i = 0; + unsigned int i_tmp = (node.generic >> 12); + DBG_PRINT("intr:\t[%08lld] DSP interrupt\n", icount); + for (i = 0; i < MAX_DSP; i++) + if ((i_tmp >> i) & 1) { + DETAIL_PRINT("intr:\tdevice: %d\n", + (node.number * MAX_NODE) + i); + SLOCK_IRQSAVE(&interrupt_lock, flags); + processing_other_reason(TN.dsp[i]); + SUNLOCK_IRQREST(&interrupt_lock, flags); + } + mem_error = TN.dsp[0]->mem_error; + } + +done: + + for (i = 0; i < MAX_DSP; i++) { + if (DMA_receiver & (1 << i)) { + dsp_dma_processing(TN.dsp[i]); + } + + if (receiver & (1 << i)) { + dsp_interrupt_processing(TN.dsp[i]); + } + } + + /* + * there must be clear interrupt in APIC: + * ack_APIC_irq(); + * but now it called in e2k.c + */ + + return; +} + + +static inline void processing_other_reason(dsp_dev_t *dev) +{ + unsigned int reason = 0; + int not_clear = 1; /* flag for mark reason */ + static unsigned int count_error = 0; + /*unsigned long flags;*/ + + //SLOCK_IRQSAVE(&interrupt_lock, flags); + reason = (GET_CLUSTER_REG(QSTR_DSP) >> (8 * dev->number)) & 0xff; + dev->reason = reason; + dev->mem_error = 0; + dev->dcsr_i = GET_DSP_REG(DCSR); + dev->sp_i = GET_DSP_REG(SP); + + switch(reason) { + case (1 << 0): /* PI - write in IDR anything */ + SET_DSP_REG(IDR, 0x1); /* clear == write in IDR anything */ + not_clear = 0; + break; + case (1 << 1): /* SE - clear SP */ + SET_DSP_REG(SP, 0x0); /* for clear needed write zero in SP */ + SET_DSP_REG(DCSR, 0x0); /* and may be DCSR */ + not_clear = 0; + dev->run = 0; + break; + case (1 << 2): /* BREAK - clear DCSR */ + SET_DSP_REG(DCSR, 0x0); + not_clear = 0; + dev->run = 0; + break; + case (1 << 3): /* STOP - clear DCSR */ + SET_DSP_REG(DCSR, 0x0); + not_clear = 0; + dev->run = 0; + break; + case (1 << 7): /* dbDCSR - clear dbDCSR */ + SET_DSP_REG(dbDCSR, 0x0); + not_clear = 0; + dev->run = 0; + break; + } + //SUNLOCK_IRQREST(&interrupt_lock, flags); + + DBG_PRINT("other reason: QSTR_DSP (with mask): " + "[0x%d], DCSR: [0x%x], SP: [0x%x]\n", + reason, dev->dcsr_i, dev->sp_i); + + /* this switch only for dbg print */ + switch(reason) { + case (1 << 0): /* PI - write in IDR anything */ + DBG_PRINT("reason:\tPI\n"); + break; + case (1 << 1): /* SE - clear SP */ + DBG_PRINT("reason:\tSE\n"); + break; + case (1 << 2): /* BREAK - clear DCSR */ + DBG_PRINT("reason:\tBREAK\n"); + break; + case (1 << 3): /* STOP - clear DCSR */ + DBG_PRINT("reason:\tSTOP\n"); + break; + case (1 << 7): /* dbDCSR - clear dbDCSR */ + DBG_PRINT("reason:\tdBREAK\n"); + break; + } + + /* all DSPs wait or parity error or dbDCSR in DSP[0] at each cluster */ + if (not_clear && (dev->number == 0)) { + DBG_PRINT("[%d:%d:%02d]:\tintr - reason: 0x%08x; " + "REGS: QSTR: 0x%08x, CSR_DSP: 0x%08x, " + "IC_IR0: 0x%08x, IC_IR1: 0x%08x\n", + dev->node, dev->number, dev->minor, + reason, + GET_CLUSTER_REG(QSTR_DSP), + GET_CLUSTER_REG(CSR_DSP), + nGET_APIC_REG(IC_IR0, dev->node), + nGET_APIC_REG(IC_IR1, dev->node)); + +#define RIGHT_RULE_DSP_INTERRUPT_CATCH +#ifdef RIGHT_RULE_DSP_INTERRUPT_CATCH + if (reason & (1 << 4)) { /* WAIT = clear CSR_DSP */ + int i; + int dcsr_t = 0; + /* clear all DCSR, bit 4, in current node */ + //SLOCK_IRQSAVE(&interrupt_lock, flags); + for (i = 0; i < 4; i++) { + /* + WARNING_PRINT("%d, DCSR: 0x%08x\n", + i, GET_DSP_REG(DCSR)); + */ + dcsr_t |= (nGETBIT(DCSR, dev->node, i, 4)) << i; + nCLRBIT(DCSR, dev->node, i, 4); /* clear WT */ + } + SET_CLUSTER_REG(CSR_DSP, 0x0); + //SUNLOCK_IRQREST(&interrupt_lock, flags); + not_clear = 0; + WARNING_PRINT("reason:\t" + "all DSP wait XBUF exchange: 0x%x\n", + dcsr_t); + } else if (reason & (1 << 5)) { /* parity error */ + //SLOCK_IRQSAVE(&interrupt_lock, flags); + dev->mem_error = GET_CLUSTER_REG(MEM_ERR_CSR); + dev->run = 0; + SET_CLUSTER_REG(MEM_ERR_CSR, 0x4); + //SUNLOCK_IRQREST(&interrupt_lock, flags); + not_clear = 0; + ERROR_PRINT("intr:\t[%d:%d:%02d]:MEM PARITY ERROR: " + "ctrl 0x%x, memerr %d, dspX 0x%x - 0x%x\n", + dev->node, dev->number, dev->minor, + dev->mem_error & 0x3, + dev->mem_error & 0x4, + (dev->mem_error >> 4) & 0xf, + dev->mem_error); + } +#else /* debug rule */ + /* + WAIT - clear CSR_DSP and PARITY_ERROR - set 0x4 in MEM_ERR_CSR + */ + if (reason & 0x30) { //01110000 - check 2-bits for any happens + int i; + int dcsr_t = 0; + /* clear all DCSR, bit 4, in current node */ + //SLOCK_IRQSAVE(&interrupt_lock, flags); + for (i = 0; i < 4; i++) { + /* + WARNING_PRINT("%d, DCSR: 0x%08x\n", + i, GET_DSP_REG(DCSR)); + */ + dcsr_t |= (nGETBIT(DCSR, dev->node, i, 4)) << i; + nCLRBIT(DCSR, dev->node, i, 4); /* clear WT */ + } + SET_CLUSTER_REG(CSR_DSP, 0x0); + dev->mem_error = GET_CLUSTER_REG(MEM_ERR_CSR); + dev->run = 0; + SET_CLUSTER_REG(MEM_ERR_CSR, 0x4); + //SUNLOCK_IRQREST(&interrupt_lock, flags); + not_clear = 0; + ERROR_PRINT("intr:\tWAIT: " + "0x%x or PARITY ERROR: MEM_ERR_CSR 0x%x\n", + dcsr_t, + dev->mem_error); + } +#endif + } + + if (not_clear) { + count_error++; + if (count_error > 1000 || count_error == 1) { + if (count_error > 1000) count_error = 0; + ERROR_PRINT("intr:\t" + "impossible - interrupt not cleared !\n"); + ERROR_PRINT("[%d:%d:%02d]:\treason: 0x%x; " + "QSTR: 0x%08x, CSR: 0x%08x, " + "IC_IR0: 0x%08x, IC_IR1: 0x%08x\n", + dev->node, dev->number, dev->minor, + reason, + GET_CLUSTER_REG(QSTR_DSP), + GET_CLUSTER_REG(CSR_DSP), + nGET_APIC_REG(IC_IR0, dev->node), + nGET_APIC_REG(IC_IR1, dev->node)); + } + } + + return; +} + + +static inline int processing_DMA(unsigned int node, unsigned int channel) +{ + + int number = -1; + union csr_register csr; + static unsigned int count_error = 0; + /*unsigned long flags;*/ + + number = check_channel(node, channel); + + //SLOCK_IRQSAVE(&interrupt_lock, flags); + /* clear interrupt */ + csr.r = nGET_DMA_REG(CSR, node, channel); + + if (number == -1) { + count_error++; + //SUNLOCK_IRQREST(&interrupt_lock, flags); + if (count_error > 1000 || count_error == 1) { + if (count_error > 1000) count_error = 0; + ERROR_PRINT("DMA: get interrupt " + "for unused channel: %d:%d, err: %u\n", + node, channel, count_error); + } + return number; //what needed return for this error-situation ? + } + + /* if csr.b.done != 0 then we get csr.b.end (one block data exchange) */ + dsp_node[node].dsp[number]->dma.end = csr.b.end; + dsp_node[node].dsp[number]->dma.done = csr.b.done; + dsp_node[node].dsp[number]->dma.run = 0; + //SUNLOCK_IRQREST(&interrupt_lock, flags); + + DBG_PRINT("proceesing_DMA: get interrupt: node: %d, chn: %d, dsp: %d\n", + node, channel, number); + + return (1 << number); +} + + +inline void wakeup_each_dsp(struct list_head *list) +{ + struct list_head *tmp, *next; + raw_wqueue_t *waiter_item; + list_for_each_safe(tmp, next, list) { + waiter_item = list_entry(tmp, raw_wqueue_t, task_list); + wake_up_process(waiter_item->task); + } +} + + +void dsp_dma_processing(dsp_dev_t *dev) +{ + static unsigned long dma_intr = 0; + + dma_intr++; + + DETAIL_PRINT("dma :\t [%d:%d:%d] mask: 0x%x, all: 0x%x, intr: %lu\n", + dev->node, dev->number, dev->minor, + dev->interrupts, dev->tmp_all_intr, dma_intr); + + if (dev->dma.done == 0) { + dev->dma.end++; + DBG_PRINT("DMA: end for block exchange: %ld %d\n", + dma_intr, dev->dma.end); + } + + if (dev->dma.done) { + DBG_PRINT("DMA: wake up dsp[%d:%d:%d]\n", + dev->node, dev->number, dev->minor); + dev->dma.end = 0; + wakeup_each_dsp(&dev->dma.wait_task_list); + } + + return; +} + + +void dsp_interrupt_processing(dsp_dev_t *dev) +{ + dsp_dev_t *dsp_dev; + int i; + static unsigned long count_intr = 0; + + count_intr++; + + DETAIL_PRINT("intr:\t [%d:%d:%d] " + "mask: 0x%x, all: 0x%x, err %d, intr: %lu\n", + dev->node, dev->number, dev->minor, + dev->interrupts, dev->tmp_all_intr, + dev->mem_error, count_intr); + + if (dev->run == 0) { + DBG_PRINT("DSP: wake up dsp[%d:%d:%d]\n", + dev->node, dev->number, dev->minor); + wakeup_each_dsp(&dev->wait_task_list); + } + if (dev->mem_error) { + for (i = 0; i < MAX_DSP; i ++) { + if (dev->number == i) + continue; + dsp_dev = dsp_node[dev->node].dsp[i]; + if (dsp_dev->run) { + dsp_dev->run = 0; + dsp_dev->mem_error = 1; + DBG_PRINT("DSP: " + "wake up on error dsp[%d:%d:%d]\n", + dsp_dev->node, dsp_dev->number, + dsp_dev->minor); + wakeup_each_dsp(&dsp_dev->wait_task_list); + } + } + } + return; +} +#endif /*__CATCH_INTERRUPT_ON__*/ + + +void free_memory_from_dsp_allocate(void) +{ + int i; + + if (dsp_numbers_devs) { + /* free all DMA buffers */ + for (i = 0; i < dsp_numbers_devs; i++) { +#ifdef __CHAIN_MODE_ON__ + delete_dma_chain(dsp_devices[dsp_minors[i]]); +#endif /*__CHAIN_MODE_ON__*/ + free_dma(dsp_devices[dsp_minors[i]]->dma.page_adr, + DMA_EXCHANGE_SIZE); + } + + for (i = 0; i < dsp_numbers_devs; i++) { + int node = dsp_devices[dsp_minors[i]]->node; + /* clear mask interrupts */ + if (dsp_devices[dsp_minors[i]]->number == 0) { + nSET_CLUSTER_REG(MASKR_DSP, node, 0x0); + } + kfree(dsp_devices[dsp_minors[i]]); + } + + /* unmap phys memory */ +#ifdef __ALL_ONLINE_NODE__ + for (i = 0; i < MAX_NODE; i++) + +#endif + { + if (dsp_node[i].present) { + int m; + /* clear masks all interupts */ + nSET_APIC_REG(IC_MR0, i, 0x0); /*CPU0*/ + /* + * LCC compilator bug workaround: second + * register set is compiled as speculative + * read/wrtite ti IO space, so 'nop' separator + * command temporary is added + */ + E2K_CMD_SEPARATOR; + + nSET_APIC_REG(IC_MR1, i, 0x0); /*CPU1*/ + + /* unmap phys memory */ + for (m = 0; m < MAX_DSP; m++) { + iounmap(BASE[i].xyram[m]); + iounmap(BASE[i].pram[m]); + iounmap(BASE[i].regs[m]); + } + + iounmap(BASE[i].xbuf); + } + } + DETAIL_PRINT("End clear memory and mask's.\n"); + } +} + + +int create_dsp_device(int node, int number, dsp_dev_t *dev, int *all_dev_number) +{ + memset(dev, 0, sizeof(dsp_dev_t)); + + SINIT(&dev->spinlock); + MINIT(&dev->ioctl_mutex); + + /* setup queue */ +#ifdef __CATCH_INTERRUPT_ON__ + INIT_LIST_HEAD(&dev->wait_task_list); + INIT_LIST_HEAD(&dev->dma.wait_task_list); + +#ifdef __CHAIN_MODE_ON__ + /* chain */ + INIT_LIST_HEAD(&dev->dma_chain); + dev->chain_present = 0; /* set chain empty */ + dev->link_size = 4; +#endif /*__CHAIN_MODE_ON__*/ +#endif /*__CATCH_INTERRUPT_ON__*/ + + dev->node = node; + dev->number = number; + dev->id = (*all_dev_number); + dev->minor = node*MAX_NODE + number; + dev->dma.channel = -1; + + (*all_dev_number)++; /* increase: dsp_numbers_devs++ */ + + /* create and setup DMA buffer */ + dev->dma.real_size = alloc_dma(&dev->dma.page_adr, + &dev->dma.virt_mem, + &dev->dma.phys_mem, + DMA_EXCHANGE_SIZE); + if (dev->dma.real_size <= 0) { + ERROR_PRINT("init: DSP[%d]: error allocate DMA buffer\n", + dev->number); + return -ENOMEM; + } + + DETAIL_PRINT("DMA allocate[%d:%d:%2d]: 0x%lx 0x%lx 0x%lx %d %d\n", + dev->node, dev->number, dev->minor, + (unsigned long)dev->dma.virt_mem, + dev->dma.page_adr, + dev->dma.phys_mem, + (unsigned int)DMA_EXCHANGE_SIZE, + dev->dma.real_size); + + return 0; +} + + +void hardcore_clear_all_memory(void) { + +/* on interrupts only at cpu0 each nodes */ + uint32_t maskr_dsp[128]; + uint32_t ic_mr0[128]; + uint32_t ic_mr1[128]; + int i = 0; + + /* save target regs for all possible nodes */ +#ifdef __ALL_ONLINE_NODE__ + for (i = 0; i < MAX_NODE; i++) +#endif + { + if (dsp_node[i].present) { + maskr_dsp[i] = nGET_CLUSTER_REG(MASKR_DSP, i); + ic_mr0[i] = nGET_CLUSTER_REG(IC_MR0, i); + ic_mr1[i] = nGET_CLUSTER_REG(IC_MR1, i); + } + } + + /* clear target regs for all possible nodes */ +#ifdef __ALL_ONLINE_NODE__ + for (i = 0; i < MAX_NODE; i++) +#endif + { + if (dsp_node[i].present) { + nSET_CLUSTER_REG(MASKR_DSP, i, 0x0); + nSET_CLUSTER_REG(IC_MR0, i, 0x0); + nSET_CLUSTER_REG(IC_MR1, i, 0x0); + } + } + + /* clear mem-interrupts for all possible nodes */ +#ifdef __ALL_ONLINE_NODE__ + for (i = 0; i < MAX_NODE; i++) +#endif + { + if (dsp_node[i].present) { + nSET_CLUSTER_REG(MEM_ERR_CSR, i, 0x4); + nSET_CLUSTER_REG(CSR_DSP, i, 0x0); + } + } + + /* clear XYRAM and PRAM */ +#ifdef __ALL_ONLINE_NODE__ + for (i = 0; i < MAX_NODE; i++) +#endif + { + if (dsp_node[i].present) { + int m; + for (m = 0; m < MAX_DSP; m++) { + memset(nXYRAM(i, m), 0, XYRAM_SIZE); + memset(nPRAM(i, m), 0, PRAM_SIZE); + } + } + } + + /* clear mem-interrupts for all possible nodes */ +#ifdef __ALL_ONLINE_NODE__ + for (i = 0; i < MAX_NODE; i++) +#endif + { + if (dsp_node[i].present) { + nSET_CLUSTER_REG(MEM_ERR_CSR, i, 0x4); + nSET_CLUSTER_REG(CSR_DSP, i, 0x0); + } + } + + /* restore target regs for all possible nodes */ +#ifdef __ALL_ONLINE_NODE__ + for (i = 0; i < MAX_NODE; i++) +#endif + { + if (dsp_node[i].present) { + nSET_CLUSTER_REG(MASKR_DSP, i, maskr_dsp[i]); + nSET_CLUSTER_REG(IC_MR0, i, ic_mr0[i]); + nSET_CLUSTER_REG(IC_MR1, i, ic_mr1[i]); + } + } +} + + +/* #define FOR_DBG 0 */ +static int __init dsp_init (void) +{ + dsp_dev_t *dev; + int result, nod_i = 0, j, dsp_i, node_count = 0; + int dsp_on = 0; /* counter for online DSP-clusters */ + char name[128]; + int i, ret = 0, meminit = 0; + + DBG_PRINT ("Hello world from DSP driver. cpu: node: %d, id: %d\n", + numa_node_id(), raw_smp_processor_id()); + + dsp_sysctl_register(); + +#ifdef FOR_DBG + if (FOR_DBG) { /* for dbg */ + int nid = 0; + unsigned long long phys_base_tmp; + for_each_online_node(nid) { + phys_base_tmp = THE_NODE_NBSR_PHYS_BASE(nid); + ERROR_PRINT("sys: 0x%llx -> %p\n", + phys_base_tmp, + nodes_nbsr_base[nid]); + + ERROR_PRINT("node: " + "%d [0x%0lx:0x%0lx - 0x%0lx] - {0x%0lx}\n", + nid, THE_NODE_NBSR_PHYS_BASE(nid), + THE_NODE_NBSR_PHYS_BASE(nid) + 0xb0000000L, + THE_NODE_COPSR_PHYS_BASE(nid), +#ifdef _MANUAL_CONTROL_AREA_SIZE_ + BASE_PHYS_ADR + (nid * DSP_MEM_SIZE)); +#else + 0x01c0000000UL + (nid * 0x1000000UL)); +#endif + } + } +#endif + +#ifndef __DMA_INTERRUPTS_ON__ + WARNING_PRINT("init: NO interrupts DMA mode\n"); +#endif /*__DMA_INTERRUPTS_ON__*/ + + if (!IS_MACHINE_ES2) { + ERROR_PRINT("init:\tCan't find DSP lapic\n"); + ret = -ENODEV; + goto dsp_init_end; + } else { + node_numbers = num_online_nodes(); + DBG_PRINT ("CPU's numbers: %d, node: %d\n", + node_numbers*2, node_numbers); + } + + for (nod_i = 0; nod_i < MAX_NODE; nod_i++) { + dsp_node[nod_i].online = 0; + dsp_node[nod_i].present = 0; + } + + nod_i = 0; +#ifdef __ALL_ONLINE_NODE__ + for_each_online_node(nod_i) +#endif /*__ALL_ONLINE_NODE__*/ + { + unsigned long offset_b; + + /* for each online node check on/off dsp */ + e2k_pwr_mgr_t pwr; + pwr.word = nGET_APIC_REG(IC_PWR, nod_i); + + if (!pwr.fields.ic_clk) { + dsp_node[nod_i].online = 0; + DBG_PRINT("DSP for node: %d - OFF\n", nod_i); +#ifdef __ALL_ONLINE_NODE__ + continue; +#endif + } else { + meminit = 1; /* memory clear flag at error */ + on_nodes[node_count] = nod_i; + node_count++; + + DBG_PRINT("DSP for node: %d - ON\n", nod_i); + dsp_on++; + + for (dsp_i = 0; dsp_i < MAX_DSP; dsp_i++) { + + offset_b = (unsigned long)(nNODE_PHYS_ADR(nod_i) + + (0x400000 * + dsp_i)); + + BASE[nod_i].xyram[dsp_i] = ioremap(offset_b + + XYRAM_OFFSET, + XYRAM_SIZE); + BASE[nod_i].pram[dsp_i] = ioremap(offset_b + + PRAM_OFFSET, + PRAM_SIZE); + BASE[nod_i].regs[dsp_i] = ioremap(offset_b + + REGS_OFFSET, + dsp_i ? + (0x1000) : + (0x3000)); + + DETAIL_PRINT("%d xyram 0x%lx <- 0x%lx," + "\n\t\t\t pram 0x%lx <- 0x%lx," + "\n\t\t\t regs 0x%lx <- 0x%lx\n", + dsp_i, + BASE[nod_i].xyram[dsp_i], + offset_b, + BASE[nod_i].pram[dsp_i], + offset_b + 0x40000, + BASE[nod_i].regs[dsp_i], + offset_b + 0x80000 + ); + } + + BASE[nod_i].xbuf = (char *)ioremap(nNODE_PHYS_ADR(nod_i) + 0x3fff00, PAGE_SIZE); + + for (dsp_i = 0; dsp_i < MAX_DSP; dsp_i++) { + dev = kmalloc(sizeof(dsp_dev_t), GFP_KERNEL); + if (dev == NULL) { + ERROR_PRINT("init:\tDSP: %d. " + "Can't allocate memory " + "for dsp_dev_t.\n", + dsp_i + nod_i); + ret = -ENOMEM; + goto dsp_init_end; + } + + if (create_dsp_device(nod_i, dsp_i, + dev, &dsp_numbers_devs)) { + ret = -ENOMEM; + goto dsp_init_end; + } + + dsp_devices[nod_i*MAX_NODE + dsp_i] = dev; + dsp_node[nod_i].dsp[dsp_i] = dev; + dsp_minors[dsp_numbers_devs-1] = dev->minor; + } /* end creating four DSP in i-node */ + + for (j = 0; j < MAX_DMA; j++) + dsp_node[nod_i].dma_channel_lock[j] = -1; + + dsp_node[nod_i].present = 1; + dsp_node[nod_i].online = 1; + + /* off all interrupts for all DSP-node */ + /* off interupts in DSP-mask */ + nSET_CLUSTER_REG(MASKR_DSP, nod_i, 0x0); + /* off interupts in CPU-mask */ + nSET_APIC_REG(IC_MR0, nod_i, 0x0); + /* + * LCC compilator bug workaround: second register set is + * compiled as speculative read/wrtite ti IO space, so + * 'nop' separator command temporary is added + */ + E2K_CMD_SEPARATOR; + + nSET_APIC_REG(IC_MR1, nod_i, 0x0); + + } /* end for check on/off DSP */ + } /* end "for_each_online_node" */ + + +/* temporary hack */ +#ifdef __ALL_ONLINE_NODE__ + for (nod_i = 0; nod_i < MAX_NODE; nod_i++) +#endif + { + if (dsp_node[nod_i].present) { + /* off interupts in CPU-mask */ + nSET_APIC_REG(IC_MR0, nod_i, 0x0); + /* + * LCC compilator bug workaround: second register set is + * compiled as speculative read/wrtite ti IO space, so + * 'nop' separator command temporary is added + */ + E2K_CMD_SEPARATOR; + nSET_APIC_REG(IC_MR1, nod_i, 0x0); + } + } + + if (!dsp_on) { + WARNING_PRINT("Not found DSP-clusters online.\n"); + ret = -ENODEV; + goto dsp_init_end; + } + + if (node_numbers != dsp_on) { + WARNING_PRINT("Some DSP-clusters are off.\n"); + } + + SINIT(&dma_lock); + SINIT(&interrupt_lock); + SINIT(&global_lock); + + /* for clear memory interrupts before work */ + { + unsigned long flags = 0; + DETAIL_PRINT("hack:\tmemory clear\n"); + SLOCK_IRQSAVE(&interrupt_lock, flags); + hardcore_clear_all_memory(); + SUNLOCK_IRQREST(&interrupt_lock, flags); + } + +#ifdef __CATCH_INTERRUPT_ON__ + /* Save our interupt to global kernel pointer */ + eldsp_interrupt_p = dsp_interrupt_handler; +#endif + + if (dsp_numbers_devs > 0) { + result = register_chrdev(major, dsp_dev_name, &dsp_fops); + if (result < 0) { + ERROR_PRINT("init:\tCannot get major %d\n", major); + ret = -ENODEV; + goto dsp_init_end; + } + if (major == 0) + major = result; + DETAIL_PRINT("init:\tmajor number is %d.\n", major); + } else { + ERROR_PRINT("init:\tregister_chrdev did not execute.\n"); + ret = -ENODEV; + goto dsp_init_end; + } + + +#ifdef __USE_PROC__ +#ifdef CONFIG_PROC_FS + + if (!ldsp_entry) { + dsp_proc_entry = proc_create("dspinfo", S_IRUGO, + NULL, &eldsp_proc_ops); + if (!dsp_proc_entry) + ERROR_PRINT("init: can't create /proc/dspinfo\n"); + } else { + save_eldsp_proc_ops = ldsp_proc_fops_pointer; + ldsp_proc_fops_pointer = &eldsp_proc_ops; + } +#endif /* CONFIG_PROC_FS */ +#endif /* __USE_PROC__ */ + +#ifdef FOR_DBG + if (FOR_DBG) { /* for dbg */ + int nid = 0; + for (nid = 0; nid < MAX_NODE; nid++) { + if (dsp_node[nid].present) { + ERROR_PRINT("IR0: 0x%x\n", + nGET_APIC_REG(IC_IR0, nid)); + ERROR_PRINT("IR1: 0x%x\n", + nGET_APIC_REG(IC_IR1, nid)); + ERROR_PRINT("IDR: 0x%x\n", + nGET_DSP_REG(IDR, nid, 0)); + } + } + } +#endif + + +#ifdef __DSP_RUN_HACK_FOR_MEMORY__ + for (nod_i = 0; nod_i < MAX_NODE; nod_i++) { + if (dsp_node[nod_i].present) { + for (dsp_i = 0; dsp_i < MAX_DSP; dsp_i++) { + /* set run bit */ + nSETBIT(DCSR, nod_i, dsp_i, 14); + /* clear run bit */ + nCLRBIT(DCSR, nod_i, dsp_i, 14); + } + } + } +#endif + +/* now interrupts ON - only by open() device and only for target DSP */ +#ifdef __CATCH_INTERRUPT_ON__ + + /* on interrupts only at cpu0 each nodes */ + DETAIL_PRINT("init:\tbefore interupts\n"); +#ifdef __ALL_ONLINE_NODE__ + for (nod_i = 0; nod_i < MAX_NODE; nod_i++) +#endif + { + if (dsp_node[nod_i].present) { + /* DSP[0-3] */ + nSET_CLUSTER_REG(MASKR_DSP, nod_i, 0xffffffff); + /* 0xffffffdf - of bit 5 - off wait interrupt: DBG ! */ + +# ifdef __DMA_INTERRUPTS_ON__ + nSET_APIC_REG(IC_MR0, nod_i, 0xffff); /*CPU0*/ +# else + nSET_APIC_REG(IC_MR0, nod_i, 0xff00); /*CPU0 DMA OFF*/ +# endif /*__DMA_INTERRUPTS_ON__*/ + DETAIL_PRINT("init:\tinterupts for node - %d\n", nod_i); + } + } + +#endif /*__CATCH_INTERRUPT_ON__*/ + + +#ifdef FOR_DBG + if (FOR_DBG) { /* for dbg */ + for (i = 0; i < dsp_numbers_devs; i++) { + ERROR_PRINT("dsp: %d -> %d\n", i, dsp_minors[i]); + } + + for (i = 0; i < node_numbers; i++) { + ERROR_PRINT("node: %d -> %d\n", i, on_nodes[i]); + } + } +#endif + /* we register class in sysfs... */ + dsp_class = class_create(THIS_MODULE, DSP_NAME); + if (IS_ERR(dsp_class)) { + ERROR_PRINT("Error creating class: /sys/class/" DSP_NAME ".\n"); + } + + /* ...and create devices in /sys/class/eldsp */ + for (i = 0; i < dsp_numbers_devs; ++i) { + if (!IS_ERR(dsp_class)) { + sprintf(name, "%s%d", DSP_NAME, i); + /* + pr_info("make node /sys/class/%s/%s\n", + DSP_NAME, name); + */ + if (device_create(dsp_class, NULL, + MKDEV(major, dsp_minors[i]), NULL, name) == NULL) + ERROR_PRINT("create a node %d failed\n", i); + } + } + +dsp_init_end: + if (ret) { + if (meminit) + free_memory_from_dsp_allocate(); + dsp_sysctl_unregister(); + } + + return ret; + +} + + +static void __exit dsp_cleanup(void) +{ + int i; + +#ifdef __USE_PROC__ +#ifdef CONFIG_PROC_FS + if (!ldsp_entry) { + proc_remove(dsp_proc_entry); + } else { + ldsp_proc_fops_pointer = save_eldsp_proc_ops; + } +#endif /* CONFIG_PROC_FS */ +#endif /* __USE_PROC__ */ + + /* we need to remove the device...*/ + for (i = 0; i < dsp_numbers_devs; ++i) { + device_destroy(dsp_class, MKDEV(major, dsp_minors[i])); + } + + /* ...and class */ + class_destroy(dsp_class); + + free_memory_from_dsp_allocate(); + unregister_chrdev(major, dsp_dev_name); + dsp_sysctl_unregister(); + DBG_PRINT("exit:\tcleanup device driver\n"); + + return; +} + + +module_init(dsp_init); +module_exit(dsp_cleanup); + +MODULE_AUTHOR ("Alexey Mukhin"); +MODULE_LICENSE ("GPL"); +MODULE_DESCRIPTION("driver for Elbrus Digital Signal Processors v. " DSP_VERSION); diff --git a/drivers/mcst/eldsp/eldsp.h b/drivers/mcst/eldsp/eldsp.h new file mode 100644 index 000000000000..91298eda5946 --- /dev/null +++ b/drivers/mcst/eldsp/eldsp.h @@ -0,0 +1,527 @@ +#ifndef _MCST_DSP_DRV_H_ +#define _MCST_DSP_DRV_H_ + + +//#define DSP_MAJOR 55 +#define MAX_NODE 4 +#define MAX_DSP 4 +#define MAX_DMA 8 + +/* for dma_exchange */ +#define TO_DSP 0 +#define FROM_DSP 1 +#define DSP_DSP 20 + +#include "linux/mcst/dsp_io.h" + +/* + * Macros to help debugging + */ + +#define ERROR_MODE 1 +#define DEBUG_MODE 0 +#define DEBUG_DETAIL_MODE 0 + +#if DEBUG_MODE +#warning * * * --- DEBUG_MODE (before commit you must off it) --- * * * +#endif + +/* Definitions */ + +/* + * for area sizes: see iset.F-2 (map memory) + * 16 Mb - all DSP-cluster + */ +#define _MANUAL_CONTROL_AREA_SIZE_ +#ifdef _MANUAL_CONTROL_AREA_SIZE_ + +#define BASE_PHYS_ADR (0x01c0000000UL) +#define DSP_MEM_SIZE (0x1000000UL) +#define NODE_PHYS_ADR (BASE_PHYS_ADR + (dev->node * DSP_MEM_SIZE)) +#define nNODE_PHYS_ADR(n) (BASE_PHYS_ADR + (n * DSP_MEM_SIZE)) + +#else + +/* + * kernel definitions slowly because of the large number of checks + * i use their own definitions, for speed + */ +#define BASE_PHYS_ADR (THE_NODE_COPSR_PHYS_BASE(0)) +#define NODE_PHYS_ADR (THE_NODE_COPSR_PHYS_BASE(dev->node)) +#define nNODE_PHYS_ADR(n) (THE_NODE_COPSR_PHYS_BASE(n)) + +#endif + +/*APIC*/ +#define IC_IR0 SIC_ic_ir0 +#define IC_IR1 SIC_ic_ir1 +#define IC_MR0 SIC_ic_mr0 +#define IC_MR1 SIC_ic_mr1 +#define IC_PWR SIC_pwr_mgr + +#define GET_APIC_REG(r) sic_read_node_nbsr_reg(dev->node, r) +#define SET_APIC_REG(r, v) sic_write_node_nbsr_reg(dev->node, r, v) +#define nGET_APIC_REG(r, n) sic_read_node_nbsr_reg(n, r) +#define nSET_APIC_REG(r, n, v) sic_write_node_nbsr_reg(n, r, v) + + +#define BASE mem_mmap + +#define PHYS_BASE(o) (((BASE_PHYS_ADR + \ + (dev->node * DSP_MEM_SIZE)) + \ + (dev->number * 0x400000)) + (o)) +#define nPHYS_BASE(n, d, o) (((BASE_PHYS_ADR + \ + (n * DSP_MEM_SIZE)) + \ + ((d) * 0x400000)) + (o)) + +#define PHYS_NODE(o) ((NODE_PHYS_ADR + \ + (dev->number * 0x400000)) + (o)) +#define nPHYS_NODE(n, d, o) ((nNODE_PHYS_ADR(n) + \ + ((d) * 0x400000)) + (o)) + +/* DSP registers */ +/* data */ +#define XYRAM (BASE[dev->node].xyram[dev->number]) +#define nXYRAM(n, d) (BASE[n].xyram[d]) +#define XYRAM_SIZE (0x1fff8) + + +/* program data */ +#define PRAM (BASE[dev->node].pram[dev->number]) +#define nPRAM(n, d) (BASE[n].pram[d]) +#define PRAM_SIZE (0x7ff8) + + +/* state and control regs */ + +/* mnemonic for macros variables + * n -> number node + * m -> start phys. memory + * d -> device number in current DSP cluster -> for 0 to 3 + * r -> register offset + * v -> varible for write to register + */ + +#define REGS(r) (BASE[dev->node].regs[dev->number] + (r)) +#define nREGS(n, d, r) (BASE[n].regs[d] + (r)) + + +#define XYRAM_OFFSET (0x00000) +#define PRAM_OFFSET (0x40000) +#define REGS_OFFSET (0x80000) + + +/*32 bit*/ +#define GET_DSP_REG(r) readl(nREGS(dev->node, dev->number, r)) +#define SET_DSP_REG(r, v) writel(v, nREGS(dev->node, dev->number, r)) + +#define nGET_DSP_REG(r, n ,d) readl(nREGS(n, d, r)) +#define nSET_DSP_REG(r, n, d, v) writel(v, nREGS(n, d, r)) + + +/*32 bit*/ +#define GET_CLUSTER_REG(r) readl(nREGS(dev->node, 0, r)) +#define SET_CLUSTER_REG(r, v) writel(v, nREGS(dev->node, 0, r)) + +#define nGET_CLUSTER_REG(r, n) readl(nREGS(n, 0, r)) +#define nSET_CLUSTER_REG(r, n, v) writel(v, nREGS(n, 0, r)) + + +/*64 bit*/ +#define GET_DMA_REG(r, c) readq(nREGS(dev->node, \ + 0, \ + (r + (c * 0x100)))) +#define SET_DMA_REG(r, c, v) writeq(v, \ + nREGS(dev->node, \ + 0, \ + (r + (c * 0x100)))) + +#define nGET_DMA_REG(r, n, c) readq(nREGS(n, 0, (r + (c * 0x100)))) +#define nSET_DMA_REG(r, n, c, v) writeq(v, nREGS(n, 0, (r + (c * 0x100)))) + + + +/* + * for use GET_DSP_REG(r) and SET_DSP_REG(r, v) + */ + +#define DCSR (0x200) /* 16: R/W */ +#define SR (0x208) /* 16: R/W */ +#define IDR (0x210) /* 16: R/W - write -> clear PI */ +#define EFR (0x218) /* 32: R - 218 !*/ +#define DSTART (0x218) /* 32: W - 218 !*/ +#define IRQR (0x220) /* 32: R/W */ +#define IMASKR (0x228) /* 32: R/W */ +#define TMR (0x230) /* 32: R/W */ +#define ARBR (0x238) /* 16: R/W */ +#define PC (0x240) /* 16: R/W programm counter */ +#define SS (0x248) /* 16: R/W */ +#define LA (0x250) /* 16: R/W */ +#define CSL (0x258) /* 16: R/W */ +#define LC (0x260) /* 16: R/W */ +#define CSH (0x268) /* 16: R/W */ +#define SP (0x270) /* 16: R/W */ +#define SAR (0x278) /* 16: R/W */ +#define CNTR (0x280) /* 16: R/W */ +#define IVAR (0x1f8) /* 16: R/W */ + +/* debug registers */ +#define dbDCSR (0x500) /* 16: R/W */ +#define CNT_RUN (0x500) /* 32: R */ + + +/* + * for use GET_CLUSTER_REG(r) and SET_CLUSTER_REG(r, v) + */ + +#define MASKR_DSP (0x1000) /* 32: R/W interrupt's mask */ +#define QSTR_DSP (0x1008) /* 32: R requests */ +#define CSR_DSP (0x1010) /* 32: R/W control and state */ +#define TOTAL_CLK_CNTR (0x1018) /* 32: DSP clock counter */ +#define MEM_ERR_CSR (0x1020) /* 32: R/W parity error control */ + + +/* + * DMA channels + * for use GET_DMA_REG(r, c) and SET_DMA_REG(r, c, v) + */ + +#define CSR (0x2000) +#define CP (0x2008) +#define IOR0 (0x2010) +#define IOR1 (0x2018) +#define DMA_RUN (0x2020) + + +/* mailbox */ +#ifdef __DEVELOPMENT_DSP_H_SECTOIN__ +#define XBUF_X00 (BASE[dev->node] + 0x3fff00) /* X0 ... */ +#define XBUF_X31 (BASE[dev->node] + 0x3fff80) /* ... X31 adr */ +#define nXBUF_X00(n) (BASE[n] + 0x3fff00) /* X0 ... */ +#define nXBUF_X31(n) (BASE[n] + 0x3fff80) /* ... X31 adr */ +#endif + + +#define XBUF_X00 (BASE[dev->node].xbuf) /* X0 ... */ +#define nXBUF_X00(n) (BASE[n].xbuf) /* X0 ... */ + + +#define XBUF(m) (XBUF_X00 + (0x8 * (m))) +#define nXBUF(n, m) (nXBUF_X00(n) + (0x8 * (m))) + + + +/* Defenitions for mutex and spinlock */ + +#define MUTEX_T struct mutex // struct semaphore +#define SPINLOCK_T raw_spinlock_t // spinlock_t + +#define MINIT mutex_init // init_MUTEX +#define SINIT raw_spin_lock_init // spin_lock_init + +#define MLOCK mutex_lock // down +#define MUNLOCK mutex_unlock // up +#define SLOCK raw_spin_lock // spin_lock +#define SUNLOCK raw_spin_unlock // spin_unlock +#define SLOCK_IRQ raw_spin_lock_irq // spin_lock_irq +#define SUNLOCK_IRQ raw_spin_unlock_irq // spin_unlock_irq +#define SLOCK_IRQSAVE raw_spin_lock_irqsave // spin_lock_irqsave +#define SUNLOCK_IRQREST raw_spin_unlock_irqrestore // _unlock_irqrestore + + + + +/* use QSTR_DSP */ +const int mask_intr[4] = {0x1100, 0x2200, 0x4400, 0x8800}; + + +/* Global structure for memmory DSP remaping */ +typedef struct dsp_mem_mmap { + void __iomem *xyram[4]; + void __iomem *pram[4]; + void __iomem *regs[4]; + void __iomem *xbuf; +} dsp_mem_mmap_t; + +dsp_mem_mmap_t mem_mmap[MAX_NODE]; + + +/* + * structures and union + */ + +/** + * stolen from mpv.h + * needed for wait/wake_up + */ +typedef struct __raw_wqueue { + struct task_struct *task; + struct list_head task_list; +} raw_wqueue_t; + +typedef struct dma_state { + int lnumber; /* only for chain */ + int run; + int channel; + int end; + int done; + int chain; /*dbg: flag for chain mode */ + int size; + int real_size; + unsigned long page_adr; + dma_addr_t *virt_mem; + dma_addr_t phys_mem; + struct list_head wait_task_list; /* sleep and waiting interrupt*/ +} dma_state_t; + +/** + * list for chain mode + * store info about dma memory + */ +typedef struct chain_list { + struct dma_state link; /* our data */ + struct list_head list; +} chain_list_t; + + +typedef struct dsp_dev { + int opened; + dev_t dev; + int node; /* number DSP cluster */ + int number; /* local number from 0 to 3 */ + int minor; /* global number from 0 to 15 */ + int id; /* for dev numbers */ + dma_state_t dma; /**/ + + struct list_head dma_chain; /* for chain */ + int chain_present; /* count links in chain */ + int link_size; /* link size in pages */ + int chain_channel; /* chain channel */ + dma_state_t link_regs; /* 1*PAGE_SIZE - chain_link_t */ + + int run; /* run or stop */ + unsigned int reason; /* get reason from interrupt */ + unsigned int dcsr_i; /* get DSCR from interrupts */ + unsigned int sp_i; /* get SP from interrupts */ + int state; /* SR */ + int mem_error; /* interrupted on memory */ + /* parity error */ + int tmp_all_intr; /* interrupts without filter */ + int interrupts; /* all types for current */ + SPINLOCK_T spinlock; /* common for work with regs */ + MUTEX_T ioctl_mutex; /* common for work with ioctl */ + struct list_head wait_task_list; /* sleep and waiting interrupt*/ +} dsp_dev_t; + + +/** + * store info about all detected modes and dsp on machine + */ +typedef struct dsp_node { + int dma_channel_lock[MAX_DMA]; + dsp_dev_t *dsp[MAX_DSP]; + int present; /* setup if node present */ + int online; /* setup if node online */ +} dsp_node_t; + + +/* for processing interrupts */ +typedef struct interrupt_node { + int r[2]; + unsigned int generic; + int number; +} interrupt_t; + + +/* CSR register */ +typedef struct csr_reg { + unsigned run: 1; /* run DMA */ + unsigned dir: 1; /* 0 = IOR0->IOR1, 1 = IOR0<-IOR1 */ + unsigned wn: 4; /* length data transfer, + 0 = 1, + f = 16: + word = 64 bit */ + unsigned unused_1: 1; + unsigned start_dsp: 1; /* run DSP after work DMA */ + unsigned mode: 1; /* 0 - line, 1 - reverse order */ + unsigned d2: 1; /* 0 - 1d mode, 1 -2d mode */ + unsigned mask: 1; /* for DMAR */ + unsigned unused_2: 1; + unsigned chen: 1; /* =1 - chain mode on */ + unsigned im: 1; /* =1 - setup end transfer flag on*/ + unsigned end: 1; /* flag end transfer data block + (see wn), hardware */ + unsigned done: 1; /* flag end exchange, hardware */ + unsigned wcx: 16; /* length in word for line mode */ + unsigned oy: 16; /* offset adress in 32 bit + for 2d mode */ + unsigned wcy: 16; /* numbers line for 2d mode */ +} csr_reg_t; + +union csr_register { + csr_reg_t b; + unsigned long r; +}; + +/* IOR register */ +typedef struct ior_reg { + unsigned long ir: 40; /* memory adress */ + unsigned long sel: 1; /* 0 -DSP, 1 - CPU */ + unsigned long unused: 7; + unsigned long or: 16; /* offset (index) + for change adress */ +} ior_reg_t; + +union ior_register { + ior_reg_t b; + unsigned long r; +}; + +/* CP register */ +typedef struct cp_reg { + unsigned long adr: 40; /* adress for next block */ + unsigned long sel: 1; /* 0 -DSP, 1 - CPU */ + unsigned long run: 1; /* run chain */ + unsigned long unused: 22; +} cp_reg_t; + +union cp_register { + cp_reg_t b; + unsigned long r; +}; + + +/* for DMA chain exchange */ +typedef struct chain_link { + union ior_register ir0; + union ior_register ir1; + union cp_register cp; + union csr_register csr; +} chain_link_t; + + +/* + * Macros-function + */ + +#define GETBIT(r, b) ((readl(nREGS(dev->node, \ + dev->number, \ + r)) >> (b)) & 1) +#define nGETBIT(r, n, d, b) ((readl(nREGS(n, d, r)) >> (b)) & 1) + +#define SETBIT(r, b) { \ + unsigned long _treg; \ + _treg = readl(REGS(r)); \ + _treg |= (1 << b); \ + writel(_treg, REGS(r)); } + +#define nSETBIT(r, n, d, b) { \ + unsigned long _treg; \ + _treg = readl(nREGS(n, d, r)); \ + _treg |= (1 << b); \ + writel(_treg, nREGS(n, d, r)); } + + +#define CLRBIT(r, b) { \ + unsigned long _treg; \ + _treg = readl(REGS(r)); \ + _treg &= ~(1 << b); \ + writel(_treg, REGS(r)); } + +#define nCLRBIT(r, n, d, b) { \ + unsigned long _treg; \ + _treg = readl(nREGS(n, d, r)); \ + _treg &= ~(1 << b); \ + writel(_treg, nREGS(n, d, r)); } + + +#define GETBIT_node(r, n, b) ((readl(nREGS(n, 0, r)) >> (b)) & 1) + +#define SETBIT_node(r, n, b) { \ + unsigned long _treg; \ + _treg = readl(nREGS(n, 0, r)); \ + _treg |= (1 << b); \ + writel(_treg, nREGS(n, 0, r)); } + +#define CLRBIT_node(r, n, b) { \ + unsigned long _treg; \ + _treg = readl(nREGS(n, 0, r)); \ + _treg &= ~(1 << b); \ + writel(_treg, nREGS(n, 0, r)); } + + +/* + * Functions defenition + */ + +/* additional function */ +int dsp_run(dsp_dev_t *dev, unsigned int adr); +int dsp_stop(dsp_dev_t *dev); +int dsp_reset(dsp_dev_t *dev); + +/* for DMA */ +int lock_channel(int node, int dsp_number, int channel); +int unlock_channel(int node, int dsp_number, int channel); +int check_channel(int node, int channel); +int dma_exchange(dsp_dev_t *dev, + dsp_dma_setup_t *set, + int dir); + +/* for interrupt */ +void dsp_interrupt_handler(struct pt_regs *regs); +static inline int processing_DMA(unsigned int node, unsigned int channel); +static inline void processing_other_reason(dsp_dev_t *dev); +static inline int interrupt_analyze(int interrupt); + +/* for init and free */ +void free_memory_from_dsp_allocate(void); +int create_dsp_device(int node, + int number, + dsp_dev_t *dev, + int *all_devices_number); + +/* extern functions */ +extern void (*eldsp_interrupt_p)(struct pt_regs *regs); + + +#ifdef ERROR_MODE +# ifdef __KERNEL__ +# define ERROR_PRINT(fmt, args...) printk(KERN_ERR "eldsp:\t\terror:\t" \ + fmt, ## args) +# define WARNING_PRINT(fmt, args...) printk(KERN_WARNING "eldsp:\t\twarning:" \ + fmt, ## args) +# else +# define ERROR_PRINT(fmt, args...) fprintf(stderr, fmt, ## args) +# define WARNING_PRINT(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#else +# define ERROR_PRINT(fmt, args...) +# define WARNING_PRINT(fmt, args...) +#endif + +/* by default DBG_PRINT on/off by trigger dsp_debug */ +# define DBG_PRINT(fmt, args...) \ + if (dsp_debug) printk(KERN_INFO "eldsp:\t\t\t" fmt, ## args) +/* by default DETAIL_PRINT are off */ +# define DETAIL_PRINT(fmt, args...) + +#if DEBUG_MODE +# ifdef __KERNEL__ +# undef DBG_PRINT +# define DBG_PRINT(fmt, args...) printk(KERN_NOTICE "eldsp:\t\t\t" \ + fmt, ## args) +# if DEBUG_DETAIL_MODE +# undef DETAIL_PRINT +# define DETAIL_PRINT(fmt, args...) printk(KERN_NOTICE "eldsp detail:\t\t" \ + fmt, ## args) +# endif +# else +# define DBG_PRINT(fmt, args...) fprintf(stderr, fmt, ## args) +# ifdef DEBUG_DETAIL_MODE +# define DETAIL_PRINT(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +# endif +#endif + + +#endif /* !(_MCST_DSP_DRV_H_) */ diff --git a/drivers/mcst/emc/Makefile b/drivers/mcst/emc/Makefile new file mode 100644 index 000000000000..20c70358b00f --- /dev/null +++ b/drivers/mcst/emc/Makefile @@ -0,0 +1,5 @@ +# +## Makefile for the Linux kernel device drivers. +# + +obj-$(CONFIG_SENSORS_EMC2305) += emc2305.o diff --git a/drivers/mcst/emc/emc2305.c b/drivers/mcst/emc/emc2305.c new file mode 100644 index 000000000000..7b228d9aa14b --- /dev/null +++ b/drivers/mcst/emc/emc2305.c @@ -0,0 +1,957 @@ +/* + * emc2305.c - hwmon driver for SMSC EMC2305 fan controller + * (C) Copyright 2013 + * Reinhard Pfau, Guntermann & Drunck GmbH + * + * Based on emc2103 driver by SMSC. + * + * Datasheet available at: + * http://www.smsc.com/Downloads/SMSC/Downloads_Public/Data_Sheets/2305.pdf + * + * Also supports the EMC2303 fan controller which has the same functionality + * and register layout as EMC2305, but supports only up to 3 fans instead of 5. + * + * Also supports EMC2302 (up to 2 fans) and EMC2301 (1 fan) fan controller. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * TODO / IDEAS: + * - expose more of the configuration and features + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MCST +#include +#include +#endif + +/* + * Addresses scanned. + * Listed in the same order as they appear in the EMC2305, EMC2303 data sheets. + * + * Note: these are the I2C adresses which are possible for EMC2305 and EMC2303 + * chips. + * The EMC2302 supports only 0x2e (EMC2302-1) and 0x2f (EMC2302-2). + * The EMC2301 supports only 0x2f. + */ +static const unsigned short i2c_adresses[] = { + 0x2E, + 0x2F, + 0x2C, + 0x2D, + 0x4C, + 0x4D, + I2C_CLIENT_END +}; + +/* + * global registers + */ +enum { + REG_CONFIGURATION = 0x20, + REG_FAN_STATUS = 0x24, + REG_FAN_STALL_STATUS = 0x25, + REG_FAN_SPIN_STATUS = 0x26, + REG_DRIVE_FAIL_STATUS = 0x27, + REG_FAN_INTERRUPT_ENABLE = 0x29, + REG_PWM_POLARITY_CONFIG = 0x2a, + REG_PWM_OUTPUT_CONFIG = 0x2b, + REG_PWM_BASE_FREQ_1 = 0x2c, + REG_PWM_BASE_FREQ_2 = 0x2d, + REG_SOFTWARE_LOCK = 0xef, + REG_PRODUCT_FEATURES = 0xfc, + REG_PRODUCT_ID = 0xfd, + REG_MANUFACTURER_ID = 0xfe, + REG_REVISION = 0xff +}; + +/* + * fan specific registers + */ +enum { + REG_FAN_SETTING = 0x30, + REG_PWM_DIVIDE = 0x31, + REG_FAN_CONFIGURATION_1 = 0x32, + REG_FAN_CONFIGURATION_2 = 0x33, + REG_GAIN = 0x35, + REG_FAN_SPIN_UP_CONFIG = 0x36, + REG_FAN_MAX_STEP = 0x37, + REG_FAN_MINIMUM_DRIVE = 0x38, + REG_FAN_VALID_TACH_COUNT = 0x39, + REG_FAN_DRIVE_FAIL_BAND_LOW = 0x3a, + REG_FAN_DRIVE_FAIL_BAND_HIGH = 0x3b, + REG_TACH_TARGET_LOW = 0x3c, + REG_TACH_TARGET_HIGH = 0x3d, + REG_TACH_READ_HIGH = 0x3e, + REG_TACH_READ_LOW = 0x3f, +}; + +#define SEL_FAN(fan, reg) (reg + fan * 0x10) + +/* + * Factor by equations [2] and [3] from data sheet; valid for fans where the + * number of edges equals (poles * 2 + 1). + */ +#define FAN_RPM_FACTOR 3932160 + + +struct emc2305_fan_data { + bool enabled; + bool valid; + unsigned long last_updated; + bool rpm_control; + u8 multiplier; + u8 poles; + u16 target; + u16 tach; + u16 rpm_factor; + u8 pwm; +}; + +struct emc2305_data { + struct device *hwmon_dev; + struct mutex update_lock; + int fans; + struct emc2305_fan_data fan[5]; +#ifdef CONFIG_MCST + struct i2c_client *client; + struct pwm_chip chip; +#endif +}; + +#ifdef CONFIG_MCST +#define MAX_PWM_DEVICES 5 + +static inline struct emc2305_data *to_pwm(struct pwm_chip *chip) +{ + return container_of(chip, struct emc2305_data, chip); +} +#endif + +static int read_u8_from_i2c(struct i2c_client *client, u8 i2c_reg, u8 *output) +{ + int status = i2c_smbus_read_byte_data(client, i2c_reg); + if (status < 0) { + dev_warn(&client->dev, "reg 0x%02x, err %d\n", + i2c_reg, status); + } else { + *output = status; + } + return status; +} + +static void read_fan_from_i2c(struct i2c_client *client, u16 *output, + u8 hi_addr, u8 lo_addr) +{ + u8 high_byte, lo_byte; + + if (read_u8_from_i2c(client, hi_addr, &high_byte) < 0) + return; + + if (read_u8_from_i2c(client, lo_addr, &lo_byte) < 0) + return; + + *output = ((u16)high_byte << 5) | (lo_byte >> 3); +} + +static void write_fan_target_to_i2c(struct i2c_client *client, int fan, + u16 new_target) +{ + const u8 lo_reg = SEL_FAN(fan, REG_TACH_TARGET_LOW); + const u8 hi_reg = SEL_FAN(fan, REG_TACH_TARGET_HIGH); + u8 high_byte = (new_target & 0x1fe0) >> 5; + u8 low_byte = (new_target & 0x001f) << 3; + i2c_smbus_write_byte_data(client, lo_reg, low_byte); + i2c_smbus_write_byte_data(client, hi_reg, high_byte); +} + +static void read_fan_config_from_i2c(struct i2c_client *client, int fan) + +{ + struct emc2305_data *data = i2c_get_clientdata(client); + u8 conf1; + + if (read_u8_from_i2c(client, SEL_FAN(fan, REG_FAN_CONFIGURATION_1), + &conf1) < 0) + return; + + data->fan[fan].rpm_control = (conf1 & 0x80) != 0; + data->fan[fan].multiplier = 1 << ((conf1 & 0x60) >> 5); + data->fan[fan].poles = ((conf1 & 0x18) >> 3) + 1; +} + +static void read_fan_setting(struct i2c_client *client, int fan) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + u8 setting; + + if (read_u8_from_i2c(client, SEL_FAN(fan, REG_FAN_SETTING), + &setting) < 0) + return; + + data->fan[fan].pwm = setting; +} + +static void read_fan_data(struct i2c_client *client, int fan_idx) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + + read_fan_from_i2c(client, &data->fan[fan_idx].target, + SEL_FAN(fan_idx, REG_TACH_TARGET_HIGH), + SEL_FAN(fan_idx, REG_TACH_TARGET_LOW)); + read_fan_from_i2c(client, &data->fan[fan_idx].tach, + SEL_FAN(fan_idx, REG_TACH_READ_HIGH), + SEL_FAN(fan_idx, REG_TACH_READ_LOW)); +} + +static struct emc2305_fan_data * +emc2305_update_fan(struct i2c_client *client, int fan_idx) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + struct emc2305_fan_data *fan_data = &data->fan[fan_idx]; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) + || !fan_data->valid) { + read_fan_config_from_i2c(client, fan_idx); + read_fan_data(client, fan_idx); + read_fan_setting(client, fan_idx); + fan_data->valid = true; + fan_data->last_updated = jiffies; + } + + mutex_unlock(&data->update_lock); + return fan_data; +} + +static struct emc2305_fan_data * +emc2305_update_device_fan(struct device *dev, struct device_attribute *da) +{ + struct i2c_client *client = to_i2c_client(dev); + int fan_idx = to_sensor_dev_attr(da)->index; + + return emc2305_update_fan(client, fan_idx); +} + +/* + * set/ config functions + */ + +/* + * Note: we also update the fan target here, because its value is + * determined in part by the fan clock divider. This follows the principle + * of least surprise; the user doesn't expect the fan target to change just + * because the divider changed. + */ +static int +emc2305_set_fan_div(struct i2c_client *client, int fan_idx, long new_div) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + struct emc2305_fan_data *fan = emc2305_update_fan(client, fan_idx); + const u8 reg_conf1 = SEL_FAN(fan_idx, REG_FAN_CONFIGURATION_1); + int new_range_bits, old_div = 8 / fan->multiplier; + int status = 0; + + if (new_div == old_div) /* No change */ + return 0; + + switch (new_div) { + case 1: + new_range_bits = 3; + break; + case 2: + new_range_bits = 2; + break; + case 4: + new_range_bits = 1; + break; + case 8: + new_range_bits = 0; + break; + default: + return -EINVAL; + } + + mutex_lock(&data->update_lock); + + status = i2c_smbus_read_byte_data(client, reg_conf1); + if (status < 0) { + dev_dbg(&client->dev, "reg 0x%02x, err %d\n", + reg_conf1, status); + status = -EIO; + goto exit_unlock; + } + status &= 0x9F; + status |= (new_range_bits << 5); + status = i2c_smbus_write_byte_data(client, reg_conf1, status); + if (status < 0) { + status = -EIO; + goto exit_invalidate; + } + + fan->multiplier = 8 / new_div; + + /* update fan target if high byte is not disabled */ + if ((fan->target & 0x1fe0) != 0x1fe0) { + u16 new_target = (fan->target * old_div) / new_div; + fan->target = min_t(u16, new_target, 0x1fff); + write_fan_target_to_i2c(client, fan_idx, fan->target); + } + +exit_invalidate: + /* invalidate fan data to force re-read from hardware */ + fan->valid = false; +exit_unlock: + mutex_unlock(&data->update_lock); + return status; +} + +static int +emc2305_set_fan_target(struct i2c_client *client, int fan_idx, long rpm_target) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + struct emc2305_fan_data *fan = emc2305_update_fan(client, fan_idx); + + /* + * Datasheet states 16000 as maximum RPM target + * (table 2.2 and section 4.3) + */ + if ((rpm_target < 0) || (rpm_target > 16000)) + return -EINVAL; + + mutex_lock(&data->update_lock); + + if (rpm_target == 0) + fan->target = 0x1fff; + else + fan->target = clamp_val( + (FAN_RPM_FACTOR * fan->multiplier) / rpm_target, + 0, 0x1fff); + + write_fan_target_to_i2c(client, fan_idx, fan->target); + + mutex_unlock(&data->update_lock); + return 0; +} + +static int +emc2305_set_pwm_enable(struct i2c_client *client, int fan_idx, long enable) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + struct emc2305_fan_data *fan = emc2305_update_fan(client, fan_idx); + const u8 reg_fan_conf1 = SEL_FAN(fan_idx, REG_FAN_CONFIGURATION_1); + int status = 0; + u8 conf_reg; + + mutex_lock(&data->update_lock); + switch (enable) { + case 0: + fan->rpm_control = false; + break; + case 3: + fan->rpm_control = true; + break; + default: + status = -EINVAL; + goto exit_unlock; + } + + status = read_u8_from_i2c(client, reg_fan_conf1, &conf_reg); + if (status < 0) { + status = -EIO; + goto exit_unlock; + } + + if (fan->rpm_control) + conf_reg |= 0x80; + else + conf_reg &= ~0x80; + + status = i2c_smbus_write_byte_data(client, reg_fan_conf1, conf_reg); + if (status < 0) + status = -EIO; + +exit_unlock: + mutex_unlock(&data->update_lock); + return status; +} + +static int +emc2305_set_pwm(struct i2c_client *client, int fan_idx, long pwm) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + struct emc2305_fan_data *fan = emc2305_update_fan(client, fan_idx); + const u8 reg_fan_setting = SEL_FAN(fan_idx, REG_FAN_SETTING); + int status = 0; + + /* + * Datasheet states 255 as maximum PWM + * (section 5.7) + */ + if ((pwm < 0) || (pwm > 255)) + return -EINVAL; + + fan->pwm = pwm; + + mutex_lock(&data->update_lock); + + status = i2c_smbus_write_byte_data(client, reg_fan_setting, fan->pwm); + + mutex_unlock(&data->update_lock); + return status; +} +/* + * sysfs callback functions + * + * Note: + * Naming of the funcs is modelled after the naming scheme described in + * Documentation/hwmon/sysfs-interface: + * + * For a sysfs file _ the functions are named like this: + * the show function: show__ + * the store function: set__ + * For read only (RO) attributes of course only the show func is required. + * + * This convention allows us to define the sysfs attributes by using macros. + */ + +static ssize_t +show_fan_input(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2305_fan_data *fan = emc2305_update_device_fan(dev, da); + int rpm = 0; + if (fan->tach != 0) + rpm = (FAN_RPM_FACTOR * fan->multiplier) / fan->tach; + return sprintf(buf, "%d\n", rpm); +} + +static ssize_t +show_fan_fault(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2305_fan_data *fan = emc2305_update_device_fan(dev, da); + bool fault = ((fan->tach & 0x1fe0) == 0x1fe0); + return sprintf(buf, "%d\n", fault ? 1 : 0); +} + +static ssize_t +show_fan_div(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2305_fan_data *fan = emc2305_update_device_fan(dev, da); + int fan_div = 8 / fan->multiplier; + return sprintf(buf, "%d\n", fan_div); +} + +static ssize_t +set_fan_div(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int fan_idx = to_sensor_dev_attr(da)->index; + long new_div; + int status; + + status = kstrtol(buf, 10, &new_div); + if (status < 0) + return -EINVAL; + + status = emc2305_set_fan_div(client, fan_idx, new_div); + if (status < 0) + return status; + + return count; +} + +static ssize_t +show_fan_target(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2305_fan_data *fan = emc2305_update_device_fan(dev, da); + int rpm = 0; + + /* high byte of 0xff indicates disabled so return 0 */ + if ((fan->target != 0) && ((fan->target & 0x1fe0) != 0x1fe0)) + rpm = (FAN_RPM_FACTOR * fan->multiplier) + / fan->target; + + return sprintf(buf, "%d\n", rpm); +} + +static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int fan_idx = to_sensor_dev_attr(da)->index; + long rpm_target; + int status; + + status = kstrtol(buf, 10, &rpm_target); + if (status < 0) + return -EINVAL; + + status = emc2305_set_fan_target(client, fan_idx, rpm_target); + if (status < 0) + return status; + + return count; +} + +static ssize_t +show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2305_fan_data *fan = emc2305_update_device_fan(dev, da); + return sprintf(buf, "%d\n", fan->rpm_control ? 3 : 0); +} + +static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int fan_idx = to_sensor_dev_attr(da)->index; + long new_value; + int status; + + status = kstrtol(buf, 10, &new_value); + if (status < 0) + return -EINVAL; + status = emc2305_set_pwm_enable(client, fan_idx, new_value); + return count; +} + +static ssize_t show_pwm(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct emc2305_fan_data *fan = emc2305_update_device_fan(dev, da); + return sprintf(buf, "%d\n", fan->pwm); +} + +static ssize_t set_pwm(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int fan_idx = to_sensor_dev_attr(da)->index; + unsigned long val; + int ret; + int status; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + if (val > 255) + return -EINVAL; + + status = emc2305_set_pwm(client, fan_idx, val); + return count; +} + +/* define a read only attribute */ +#define EMC2305_ATTR_RO(_type, _item, _num) \ + SENSOR_ATTR(_type ## _num ## _ ## _item, S_IRUGO, \ + show_## _type ## _ ## _item, NULL, _num - 1) + +/* define a read/write attribute */ +#define EMC2305_ATTR_RW(_type, _item, _num) \ + SENSOR_ATTR(_type ## _num ## _ ## _item, S_IRUGO | S_IWUSR, \ + show_## _type ##_ ## _item, \ + set_## _type ## _ ## _item, _num - 1) + +/* + * TODO: Ugly hack, but temporary as this whole logic needs + * to be rewritten as per standard HWMON sysfs registration + */ + +/* define a read/write attribute */ +#define EMC2305_ATTR_RW2(_type, _num) \ + SENSOR_ATTR(_type ## _num, S_IRUGO | S_IWUSR, \ + show_## _type, set_## _type, _num - 1) + +/* defines the attributes for a single fan */ +#define EMC2305_DEFINE_FAN_ATTRS(_num) \ + static const \ + struct sensor_device_attribute emc2305_attr_fan ## _num[] = { \ + EMC2305_ATTR_RO(fan, input, _num), \ + EMC2305_ATTR_RO(fan, fault, _num), \ + EMC2305_ATTR_RW(fan, div, _num), \ + EMC2305_ATTR_RW(fan, target, _num), \ + EMC2305_ATTR_RW(pwm, enable, _num), \ + EMC2305_ATTR_RW2(pwm, _num) \ + } + +#define EMC2305_NUM_FAN_ATTRS ARRAY_SIZE(emc2305_attr_fan1) + +/* common attributes for EMC2303 and EMC2305 */ +static const struct sensor_device_attribute emc2305_attr_common[] = { +}; + +/* fan attributes for the single fans */ +EMC2305_DEFINE_FAN_ATTRS(1); +EMC2305_DEFINE_FAN_ATTRS(2); +EMC2305_DEFINE_FAN_ATTRS(3); +EMC2305_DEFINE_FAN_ATTRS(4); +EMC2305_DEFINE_FAN_ATTRS(5); +EMC2305_DEFINE_FAN_ATTRS(6); + +/* fan attributes */ +static const struct sensor_device_attribute *emc2305_fan_attrs[] = { + emc2305_attr_fan1, + emc2305_attr_fan2, + emc2305_attr_fan3, + emc2305_attr_fan4, + emc2305_attr_fan5, +}; + +/* + * driver interface + */ + +static int emc2305_remove(struct i2c_client *client) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + int fan_idx, i; + + hwmon_device_unregister(data->hwmon_dev); + + for (fan_idx = 0; fan_idx < data->fans; ++fan_idx) + for (i = 0; i < EMC2305_NUM_FAN_ATTRS; ++i) + device_remove_file( + &client->dev, + &emc2305_fan_attrs[fan_idx][i].dev_attr); + + for (i = 0; i < ARRAY_SIZE(emc2305_attr_common); ++i) + device_remove_file(&client->dev, + &emc2305_attr_common[i].dev_attr); + + kfree(data); + return 0; +} + + +#ifdef CONFIG_OF +/* + * device tree support + */ + +struct of_fan_attribute { + const char *name; + int (*set)(struct i2c_client*, int, long); +}; + +struct of_fan_attribute of_fan_attributes[] = { + {"fan-div", emc2305_set_fan_div}, + {"fan-target", emc2305_set_fan_target}, + {"pwm-enable", emc2305_set_pwm_enable}, + {NULL, NULL} +}; + +static int emc2305_config_of(struct i2c_client *client) +{ + struct emc2305_data *data = i2c_get_clientdata(client); + struct device_node *node; + unsigned int fan_idx; + + if (!client->dev.of_node) + return -EINVAL; + if (!of_get_next_child(client->dev.of_node, NULL)) + return 0; + + for (fan_idx = 0; fan_idx < data->fans; ++fan_idx) + data->fan[fan_idx].enabled = false; + + for_each_child_of_node(client->dev.of_node, node) { + const __be32 *property; + int len; + struct of_fan_attribute *attr; + + property = of_get_property(node, "reg", &len); + if (!property || len != sizeof(int)) { + dev_err(&client->dev, "invalid reg on %s\n", + node->full_name); + continue; + } + + fan_idx = be32_to_cpup(property); + if (fan_idx >= data->fans) { + dev_err(&client->dev, + "invalid fan index %d on %s\n", + fan_idx, node->full_name); + continue; + } + + data->fan[fan_idx].enabled = true; + + for (attr = of_fan_attributes; attr->name; ++attr) { + int status = 0; + long value; + property = of_get_property(node, attr->name, &len); + if (!property) + continue; + if (len != sizeof(int)) { + dev_err(&client->dev, "invalid %s on %s\n", + attr->name, node->full_name); + continue; + } + value = be32_to_cpup(property); + status = attr->set(client, fan_idx, value); + if (status == -EINVAL) { + dev_err(&client->dev, + "invalid value for %s on %s\n", + attr->name, node->full_name); + } + } + } + + return 0; +} + +#endif + +static void emc2305_get_config(struct i2c_client *client) +{ + int i; + struct emc2305_data *data = i2c_get_clientdata(client); + + for (i = 0; i < data->fans; ++i) { + data->fan[i].enabled = true; + emc2305_update_fan(client, i); + } + +#ifdef CONFIG_OF + emc2305_config_of(client); +#endif +} + +#ifdef CONFIG_MCST +static int emc2305_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct emc2305_data *data = to_pwm(chip); + struct i2c_client *client = data->client; + const u8 reg_fan_conf1 = SEL_FAN(pwm->hwpwm, REG_FAN_CONFIGURATION_1); + int ret = -EINVAL; + u8 val; + u8 pwm_mode; + + read_u8_from_i2c(client, reg_fan_conf1, &pwm_mode); + if (pwm_mode >> 7) + return -EPERM; + + if (state->period > 1) { + mutex_lock(&data->update_lock); + val = state->duty_cycle * 255 / (state->period - 1); + val = clamp_val(val, 0, 255); + i2c_smbus_write_byte_data(client, + SEL_FAN(pwm->hwpwm, REG_FAN_SETTING), val); + mutex_unlock(&data->update_lock); + } + + return ret; +} + +static const struct pwm_ops emc2305_pwm_ops = { + .apply = emc2305_pwm_apply, + .owner = THIS_MODULE, +}; + +static void emc2305_pwm_remove(void *arg) +{ + struct emc2305_data *data = arg; + + pwmchip_remove(&data->chip); +} + +static void emc2305_init_pwm(struct emc2305_data *data) +{ + struct i2c_client *client = data->client; + int ret; + + /* Initialize chip */ + + data->chip.dev = &client->dev; + data->chip.ops = &emc2305_pwm_ops; + data->chip.base = -1; + data->chip.npwm = MAX_PWM_DEVICES; + + ret = pwmchip_add(&data->chip); + if (ret < 0) { + dev_err(&client->dev, "pwmchip_add() failed: %d\n", ret); + return; + } + + devm_add_action(&client->dev, emc2305_pwm_remove, data); +} +#endif + +static int +emc2305_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct emc2305_data *data; + int status; + int i; + int fan_idx; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + data = kzalloc(sizeof(struct emc2305_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + status = i2c_smbus_read_byte_data(client, REG_PRODUCT_ID); + switch (status) { + case 0x34: /* EMC2305 */ + data->fans = 5; + break; + case 0x35: /* EMC2303 */ + data->fans = 3; + break; + case 0x36: /* EMC2302 */ + data->fans = 2; + break; + case 0x37: /* EMC2301 */ + data->fans = 1; + break; + default: + if (status >= 0) + status = -EINVAL; + goto exit_free; + } + + emc2305_get_config(client); + + for (i = 0; i < ARRAY_SIZE(emc2305_attr_common); ++i) { + status = device_create_file(&client->dev, + &emc2305_attr_common[i].dev_attr); + if (status) + goto exit_remove; + } + for (fan_idx = 0; fan_idx < data->fans; ++fan_idx) + for (i = 0; i < EMC2305_NUM_FAN_ATTRS; ++i) { + if (!data->fan[fan_idx].enabled) + continue; + status = device_create_file( + &client->dev, + &emc2305_fan_attrs[fan_idx][i].dev_attr); + if (status) + goto exit_remove_fans; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove_fans; + } + + dev_info(&client->dev, "%s: sensor '%s'\n", + dev_name(data->hwmon_dev), client->name); + +#ifdef CONFIG_MCST + data->client = client; + if (IS_ENABLED(CONFIG_PWM)) + emc2305_init_pwm(data); +#endif + return 0; + +exit_remove_fans: + for (fan_idx = 0; fan_idx < data->fans; ++fan_idx) + for (i = 0; i < EMC2305_NUM_FAN_ATTRS; ++i) + device_remove_file( + &client->dev, + &emc2305_fan_attrs[fan_idx][i].dev_attr); + +exit_remove: + for (i = 0; i < ARRAY_SIZE(emc2305_attr_common); ++i) + device_remove_file(&client->dev, + &emc2305_attr_common[i].dev_attr); +exit_free: + kfree(data); + return status; +} + +static const struct i2c_device_id emc2305_id[] = { + { "emc2305", 0 }, + { "emc2303", 0 }, + { "emc2302", 0 }, + { "emc2301", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, emc2305_id); + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int +emc2305_detect(struct i2c_client *new_client, struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = new_client->adapter; + int manufacturer, product; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + manufacturer = + i2c_smbus_read_byte_data(new_client, REG_MANUFACTURER_ID); + if (manufacturer != 0x5D) + return -ENODEV; + + product = i2c_smbus_read_byte_data(new_client, REG_PRODUCT_ID); + + switch (product) { + case 0x34: + strlcpy(info->type, "emc2305", I2C_NAME_SIZE); + break; + case 0x35: + strlcpy(info->type, "emc2303", I2C_NAME_SIZE); + break; + case 0x36: + strlcpy(info->type, "emc2302", I2C_NAME_SIZE); + break; + case 0x37: + strlcpy(info->type, "emc2301", I2C_NAME_SIZE); + break; + default: + return -ENODEV; + } + + return 0; +} + +static struct i2c_driver emc2305_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "emc2305", + }, + .probe = emc2305_probe, + .remove = emc2305_remove, + .id_table = emc2305_id, + .detect = emc2305_detect, + .address_list = i2c_adresses, +}; + +module_i2c_driver(emc2305_driver); + +MODULE_AUTHOR("Reinhard Pfau "); +MODULE_DESCRIPTION("SMSC EMC2305 hwmon driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mcst/gpu-imgtec/GPL-COPYING b/drivers/mcst/gpu-imgtec/GPL-COPYING new file mode 100644 index 000000000000..d159169d1050 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/GPL-COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/drivers/mcst/gpu-imgtec/INSTALL b/drivers/mcst/gpu-imgtec/INSTALL new file mode 100644 index 000000000000..ffe15ff02ae7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/INSTALL @@ -0,0 +1,58 @@ +Rogue Embedded Systems DDK for the Linux kernel. +Copyright (C) Imagination Technologies Ltd. All rights reserved. +====================================================================== + +This file covers how to build and install the Imagination Technologies +Rogue DDK for the Linux kernel. For full details, see the relevant platform +guide. + + +Build System Environment Variables +------------------------------------------- + +The Rogue DDK Build scripts depend on a number of environment variables +being set-up before compilation or installation of DDK software can +commence: + +$DISCIMAGE +The DDK Build scripts install files to the location specified by the +DISCIMAGE environment variable. To do so, they need to know where the +target system image resides: +$ export DISCIMAGE=/path/to/filesystem +If you are building on the target system, you can set this to '/'. + +$KERNELDIR +When building the Rogue DDK kernel module, the build needs access +to the headers of the Linux kernel. +If you are building on the target machine, you can set this as follows: +$ export KERNELDIR=/usr/src/linux-headers-`uname -r` + +$CROSS_COMPILE +If you intend on targeting a platform that is different from your build +machine (e.g., if you are compiling on an x86 but targeting ARM) you need +to set the CROSS_COMPILE variable so that the build system uses the correct +compiler. For example: +$ export CROSS_COMPILE=arm-linux-gnueabihf- + + +Build and Install Instructions +------------------------------------------- + +The Rogue DDK configures different target builds within directories under +build/linux/. + +The most interesting build targets are: + + build Makes everything + clobber Removes all binaries for all builds as well. + install Runs the install script generated by the build. + +The following variables may be set on the command line to influence a build. + + BUILD The type of build being performed. + Alternatives are release, timing or debug. + +To build for, change to the appropriate target directory, for example: +$ cd rogue_km/build/linux/ +$ make BUILD=debug +$ sudo -E make install BUILD=debug diff --git a/drivers/mcst/gpu-imgtec/Kconfig b/drivers/mcst/gpu-imgtec/Kconfig new file mode 100644 index 000000000000..4d09d2a59b99 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/Kconfig @@ -0,0 +1,55 @@ +menu "MCST support of Imagination`s GPU (Rogue_DDK_Linux_WS_REL_1.13@5824814)" + depends on DRM && E2K + +config MCST_GPU_IMGTEC + tristate "MCST support Imagination`s GPU support for E2C3 SoC (Rogue_DDK_Linux_WS_REL_1.13@5824814)" + select DRM_VM + default m + ---help--- + Say Y to get the GPU driver support. + +config MCST_GPU_IMGTEC_GPUVIRT + bool "Enable GPU virtualization support" + depends on MCST_GPU_IMGTEC && VIRTUALIZATION + default n + ---help--- + Enable GPU virtualization support. + +config MCST_GPU_IMGTEC_GPUVIRT_GUESTDRV + bool "Enable guest driver build." + depends on MCST_GPU_IMGTEC_GPUVIRT + default n + ---help--- + Enable guest driver build. + +config MCST_GPU_IMGTEC_GPUVIRT_NUM_OSID + int "Number of firmware supported OSIDs" + depends on MCST_GPU_IMGTEC_GPUVIRT + default 2 + ---help--- + Number of firmware supported OSIDs + +config MCST_GPU_IMGTEC_PDUMP + bool "Enable PDUMP support in E2C3 GPU graphics driver" + depends on MCST_GPU_IMGTEC + default n + help + Choose this option to enable PDUMP on E2C3 GPU (for debugging only). + +config MCST_GPU_IMGTEC_DEBUG + bool "Build debug version of E2C3 GPU graphics driver" + depends on MCST_GPU_IMGTEC + default n + help + Choose this option to debugging only (rebuild userland code and firmware with debug, also). + +config MCST_GPU_IMGTEC_CONTIGUOUS_FW + bool "Allocate contiguous physical memory for GPU FW heap" + depends on MCST_GPU_IMGTEC + default y + help + For virtualization builds, FW heaps (for host and all guests) should normally be allocated + as any other DMA buffer, using dma_alloc_coherent. This currently does not work on e2c3 + prototype, so we enable this workaround to alloc FW heaps contiguously in physical memory + +endmenu diff --git a/drivers/mcst/gpu-imgtec/MIT-COPYING b/drivers/mcst/gpu-imgtec/MIT-COPYING new file mode 100644 index 000000000000..0cbd14e06cbc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/MIT-COPYING @@ -0,0 +1,41 @@ + +This software is Copyright (C) Imagination Technologies Ltd. + +You may use, distribute and copy this software under the terms of the MIT +license displayed below. + +----------------------------------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, this Software may be used under the terms of the GNU General +Public License Version 2 ("GPL") in which case the provisions of GPL are +applicable instead of those above. + +If you wish to allow use of your version of this Software only under the terms +of GPL, and not to allow others to use your version of this file under the +terms of the MIT license, indicate your decision by deleting from each file +the provisions above and replace them with the notice and other provisions +required by GPL as set out in the file called "GPL-COPYING" included in this +distribution. If you do not delete the provisions above, a recipient may use +your version of this file under the terms of either the MIT license or GPL. + +----------------------------------------------------------------------------- + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +----------------------------------------------------------------------------- diff --git a/drivers/mcst/gpu-imgtec/Makefile b/drivers/mcst/gpu-imgtec/Makefile new file mode 100644 index 000000000000..0d5cd1ceaca7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/Makefile @@ -0,0 +1,60 @@ +########################################################################### ### +#@Title Root makefile. Builds everything else. +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +TOP := $(srctree)/drivers/mcst/gpu-imgtec +OUT = $(TOP) + +#Disable "-Werror" +W := 1 + +include $(TOP)/config_kernel.mk + +# for PDUMP +ccflags-$(CONFIG_MCST_GPU_IMGTEC_PDUMP) += -DPDUMP + +bridge_base := $(TOP)/generated/$(PVR_ARCH) + +ccflags-y += -D__linux__ -include $(OUT)/config_kernel.h + +include $(TOP)/services/server/env/linux/Kbuild.mk + +obj-$(CONFIG_MCST_GPU_IMGTEC) += $(PVRSRV_MODNAME).o +obj-$(CONFIG_MCST_GPU_IMGTEC) += kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.o diff --git a/drivers/mcst/gpu-imgtec/README b/drivers/mcst/gpu-imgtec/README new file mode 100644 index 000000000000..48f57c9f6049 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/README @@ -0,0 +1,32 @@ +Rogue Embedded Systems DDK for the Linux kernel. +Copyright (C) Imagination Technologies Ltd. All rights reserved. +====================================================================== + +About +------------------------------------------- + +This is the Imagination Technologies Rogue DDK for the Linux kernel. + + +License +------------------------------------------- + +You may use, distribute and copy this software under the terms of the MIT +license. Details of this license can be found in the file "MIT-COPYING". + +Alternatively, you may use, distribute and copy this software under the terms +of the GNU General Public License version 2. The full GNU General Public +License version 2 can be found in the file "GPL-COPYING". + + +Build and Install Instructions +------------------------------------------- + +For details see the "INSTALL" file and the platform guide. + + +Contact information +------------------------------------------- + +Imagination Technologies Ltd. +Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK diff --git a/drivers/mcst/gpu-imgtec/build/linux/bits.mk b/drivers/mcst/gpu-imgtec/build/linux/bits.mk new file mode 100644 index 000000000000..1da249704ac1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/bits.mk @@ -0,0 +1,126 @@ +########################################################################### ### +#@Title Useful special targets which don't build anything +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifneq ($(filter dumpvar-%,$(MAKECMDGOALS)),) +dumpvar-%: ; +$(foreach _var_to_dump,$(patsubst dumpvar-%,%,$(filter dumpvar-%,$(MAKECMDGOALS))),$(info $(if $(filter undefined,$(origin $(_var_to_dump))),# $$($(_var_to_dump)) is not set,$(_var_to_dump) := $($(_var_to_dump))))) +endif + +ifneq ($(filter whereis-%,$(MAKECMDGOALS)),) +whereis-%: ; +$(foreach _module_to_find,$(patsubst whereis-%,%,$(filter whereis-%,$(MAKECMDGOALS))),$(info $(if $(INTERNAL_MAKEFILE_FOR_MODULE_$(_module_to_find)),$(INTERNAL_MAKEFILE_FOR_MODULE_$(_module_to_find)),# No module $(_module_to_find)))) +endif + +ifneq ($(filter whatis-%,$(MAKECMDGOALS)),) +whatis-$(HOST_OUT)/%: ; +whatis-$(TARGET_PRIMARY_OUT)/%: ; +whatis-$(TARGET_NEUTRAL_OUT)/%: ; +$(foreach _file_to_find,$(patsubst whatis-%,%,$(filter whatis-%,$(MAKECMDGOALS))),$(info $(strip $(foreach _m,$(ALL_MODULES),$(if $(filter $(_file_to_find),$(INTERNAL_TARGETS_FOR_$(_m))),$(_file_to_find) is in $(_m) which is defined in $(INTERNAL_MAKEFILE_FOR_MODULE_$(_m)),))))) +endif + +.PHONY: ls-modules +ls-modules: + @: $(foreach _m,$(ALL_MODULES),$(info $($(_m)_type) $(_m) $(patsubst $(TOP)/%,%,$(INTERNAL_MAKEFILE_FOR_MODULE_$(_m))))) + +.PHONY: ls-types +ls-types: + @: $(info $(sort $(patsubst host_%,%,$(foreach _m,$(ALL_MODULES),$($(_m)_type))))) + +ifeq ($(strip $(MAKECMDGOALS)),visualise) +FORMAT ?= xlib +GRAPHVIZ ?= neato +visualise: $(OUT)/MAKE_RULES.dot + $(GRAPHVIZ) -T$(FORMAT) -o $(OUT)/MAKE_RULES.$(FORMAT) $< +$(OUT)/MAKE_RULES.dot: $(OUT)/MAKE_RULES + perl $(MAKE_TOP)/tools/depgraph.pl -t $(TOP) -g $(firstword $(GRAPHVIZ)) $(OUT)/MAKE_RULES >$(OUT)/MAKE_RULES.dot +$(OUT)/MAKE_RULES: $(ALL_MAKEFILES) + -$(MAKE) -C $(TOP) -f $(MAKE_TOP)/toplevel.mk TOP=$(TOP) OUT=$(OUT) ls-modules -qp >$(OUT)/MAKE_RULES 2>&1 +else +visualise: + @: $(error visualise specified along with other goals. This is not supported) +endif + +.PHONY: help confighelp +help: + @echo 'Build targets' + @echo ' make, make build Build all components of the build' + @echo ' make components Build only the user-mode components' + @echo ' make firmware Build only firmware binaries' + @echo ' make kbuild Build only the kernel-mode components' + @echo " make docs Build the build's supporting documentation" + @echo ' make MODULE Build the module MODULE and all of its dependencies' + @echo ' make binary_.../target/libsomething.so' + @echo ' Build a particular file (including intermediates)' + @echo 'Variables' + @echo ' make V=1 ... Print the commands that are executed' + @echo ' make W=1 ... Enable extra compiler warnings' + @echo ' make D=opt ... Set build system debug option (D=help for a list)' + @echo ' make OUT=dir ... Place output+intermediates in specified directory' + @echo ' make CHECK=cmd ... Check source with static analyser or other tool' + @echo ' EXCLUDED_APIS=... List of APIs to remove from the build' + @echo ' make SOMEOPTION=1 ... Set configuration options (see "make confighelp")' + @echo ' Defaults are set by $(PVR_BUILD_DIR)/Makefile' + @echo 'Clean targets' + @echo ' make clean Remove output files for the current build' + @echo ' make clobber As "make clean", but remove build config too' + @echo ' make clean-MODULE Clean (or clobber) only files for MODULE' + @echo '' + @echo 'Special targets' + @echo ' make whereis-MODULE Show the path to the Linux.mk defining MODULE' + @echo ' make whatis-FILE Show which module builds an output FILE' + @echo ' make ls-modules List all modules defined by makefiles' + +# This rule runs in the configuration stage, in config/help.mk. Make a dummy +# target here to suppress "no rule to make target 'confighelp' messages. +confighelp: ; + +ifneq ($(filter help,$(D)),) +empty := +space := $(empty) $(empty) +$(info Debug options) +$(info $(space)D=modules dump module info) +$(info $(space)D=config dump all config options + type and origin) +$(info $(space)D=freeze-config prevent config changes) +$(info $(space)D=config-changes dump diffs when config changes) +$(info $(space)D=nobuild stop before running the main build) +$(info Options can be combined: make D=freeze-config,config-changes) +$(error D=help given) +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/buildvars.mk b/drivers/mcst/gpu-imgtec/build/linux/buildvars.mk new file mode 100644 index 000000000000..a47b0552a89b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/buildvars.mk @@ -0,0 +1,391 @@ +########################################################################### ### +#@Title Define global variables +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Description This file is read once at the start of the build, after reading +# in config.mk. It should define the non-MODULE_* variables used +# in commands, like ALL_CFLAGS +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# NOTE: You must *not* use the cc-option et al macros in COMMON_FLAGS, +# COMMON_CFLAGS or COMMON_USER_FLAGS. These flags are shared between +# host and target, which might use compilers with different capabilities. + +# ANOTHER NOTE: All flags here must be architecture-independent (i.e. no +# -march or toolchain include paths) + +# These flags are used for kernel, User C and User C++ +# +COMMON_FLAGS := -W -Wall + +# Enable 64-bit file & memory handling on 32-bit systems. +# +# This only affects glibc and possibly other Linux libc implementations; it +# does not apply to Android where _FILE_OFFSET_BITS is not completely +# implemented in bionic, and _LARGEFILE{,64}_SOURCE do not apply. +# +# This makes no difference on 64-bit systems, and allows for file and +# memory addresses >2GB to be handled on 32-bit systems. +# +# _LARGEFILE_SOURCE adds a couple functions (fseeko & ftello) +# _LARGEFILE64_SOURCE adds *64 variants of 32-bit file operations +# _FILE_OFFSET_BITS=64 makes the 64-bit variants the default +# +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) +COMMON_FLAGS += \ + -D_LARGEFILE_SOURCE \ + -D_LARGEFILE64_SOURCE \ + -D_FILE_OFFSET_BITS=64 +endif + +ifeq (, $(shell which indent)) + INDENT_TOOL_NOT_FOUND := 1 +else + INDENT_GENERATED_HEADERS := 1 +endif + +# Some GCC warnings are C only, so we must mask them from C++ +# +COMMON_CFLAGS := $(COMMON_FLAGS) \ + -Wno-format-zero-length \ + -Wmissing-prototypes -Wstrict-prototypes + +# User C and User C++ optimization control. Does not affect kernel. +# +ifeq ($(BUILD),debug) +COMMON_USER_FLAGS := -O0 +else +OPTIM ?= -O2 +ifneq ($(PVRSRV_NEED_PVR_ASSERT),1) +COMMON_USER_FLAGS := -DNDEBUG +endif +ifeq ($(USE_LTO),1) +COMMON_USER_FLAGS += $(OPTIM) -flto +else +COMMON_USER_FLAGS += $(OPTIM) +endif +endif + +# GCOV support for user-mode coverage statistics +# +ifeq ($(GCOV_BUILD),on) +COMMON_USER_FLAGS += -fprofile-arcs -ftest-coverage +endif + +# Driver has not yet been audited for aliasing issues +# +COMMON_USER_FLAGS += -fno-strict-aliasing + +# We always enable debugging. Either the release binaries are stripped +# and the symbols put in the symbolpackage, or we're building debug. +# +COMMON_USER_FLAGS += -g + +# User C and User C++ warning flags +# +COMMON_USER_FLAGS += \ + -Wpointer-arith -Wunused-parameter \ + -Wmissing-format-attribute + +# Additional warnings, and optional warnings. +# +TESTED_TARGET_USER_FLAGS := \ + $(call cc-option,-Wno-error=implicit-fallthrough) \ + $(call cc-option,-Wno-missing-field-initializers) \ + $(call cc-option,-Wno-error=assume) \ + $(call cc-option,-fdiagnostics-show-option) \ + $(call cc-option,-Wno-self-assign) \ + $(call cc-option,-Wno-parentheses-equality) +TESTED_HOST_USER_FLAGS := \ + $(call host-cc-option,-Wno-error=implicit-fallthrough) \ + $(call host-cc-option,-Wno-missing-field-initializers) \ + $(call host-cc-option,-fdiagnostics-show-option) \ + $(call host-cc-option,-Wno-self-assign) \ + $(call host-cc-option,-Wno-parentheses-equality) + +# These flags are clang-specific. +# -Wno-unused-command-line-argument works around a buggy interaction +# with ccache, see https://bugzilla.samba.org/show_bug.cgi?id=8118 +# -fcolor-diagnostics force-enables colored error messages which +# get disabled when ccache is piped through ccache. +# +TESTED_TARGET_USER_FLAGS += \ + $(call cc-option,-Qunused-arguments) \ + $(call cc-option,-Wlogical-op) \ + $(if $(shell test -t 2 && echo true),$(call cc-option,-fcolor-diagnostics)) +TESTED_HOST_USER_FLAGS += \ + $(call host-cc-option,-Qunused-arguments) \ + $(call host-cc-option,-Wlogical-op) \ + $(if $(shell test -t 2 && echo true),$(call host-cc-option,-fcolor-diagnostics)) + +ifeq ($(W),1) +TESTED_TARGET_USER_FLAGS += \ + $(call cc-option,-Wbad-function-cast) \ + $(call cc-option,-Wcast-qual) \ + $(call cc-option,-Wcast-align) \ + $(call cc-option,-Wconversion) \ + $(call cc-option,-Wdisabled-optimization) \ + $(call cc-option,-Wmissing-declarations) \ + $(call cc-option,-Wmissing-include-dirs) \ + $(call cc-option,-Wnested-externs) \ + $(call cc-option,-Wold-style-definition) \ + $(call cc-option,-Woverlength-strings) \ + $(call cc-option,-Wpacked) \ + $(call cc-option,-Wpacked-bitfield-compat) \ + $(call cc-option,-Wpadded) \ + $(call cc-option,-Wredundant-decls) \ + $(call cc-option,-Wshadow) \ + $(call cc-option,-Wswitch-default) \ + $(call cc-option,-Wvla) \ + $(call cc-option,-Wwrite-strings) +TESTED_HOST_USER_FLAGS += \ + $(call host-cc-option,-Wbad-function-cast) \ + $(call host-cc-option,-Wcast-qual) \ + $(call host-cc-option,-Wcast-align) \ + $(call host-cc-option,-Wconversion) \ + $(call host-cc-option,-Wdisabled-optimization) \ + $(call host-cc-option,-Wmissing-declarations) \ + $(call host-cc-option,-Wmissing-include-dirs) \ + $(call host-cc-option,-Wnested-externs) \ + $(call host-cc-option,-Wold-style-definition) \ + $(call host-cc-option,-Woverlength-strings) \ + $(call host-cc-option,-Wpacked) \ + $(call host-cc-option,-Wpacked-bitfield-compat) \ + $(call host-cc-option,-Wpadded) \ + $(call host-cc-option,-Wredundant-decls) \ + $(call host-cc-option,-Wshadow) \ + $(call host-cc-option,-Wswitch-default) \ + $(call host-cc-option,-Wvla) \ + $(call host-cc-option,-Wwrite-strings) +endif + +TESTED_TARGET_USER_FLAGS += \ + $(call cc-optional-warning,-Wunused-but-set-variable) \ + $(call cc-optional-warning,-Wtypedef-redefinition) +TESTED_HOST_USER_FLAGS += \ + $(call host-cc-optional-warning,-Wunused-but-set-variable) \ + $(call host-cc-optional-warning,-Wtypedef-redefinition) + +KBUILD_FLAGS := \ + -Wno-unused-parameter -Wno-sign-compare + +TESTED_KBUILD_FLAGS := \ + $(call kernel-cc-option,-Wmissing-include-dirs) \ + $(call kernel-cc-option,-Wno-type-limits) \ + $(call kernel-cc-option,-Wno-pointer-arith) \ + $(call kernel-cc-option,-Wno-pointer-sign) \ + $(call kernel-cc-option,-Wno-aggregate-return) \ + $(call kernel-cc-option,-Wno-unused-but-set-variable) \ + $(call kernel-cc-option,-Wno-ignored-qualifiers) \ + $(call kernel-cc-option,-Wno-error=implicit-fallthrough) \ + $(call kernel-cc-optional-warning,-Wbad-function-cast) \ + $(call kernel-cc-optional-warning,-Wcast-qual) \ + $(call kernel-cc-optional-warning,-Wcast-align) \ + $(call kernel-cc-optional-warning,-Wconversion) \ + $(call kernel-cc-optional-warning,-Wdisabled-optimization) \ + $(call kernel-cc-optional-warning,-Wlogical-op) \ + $(call kernel-cc-optional-warning,-Wmissing-declarations) \ + $(call kernel-cc-optional-warning,-Wmissing-include-dirs) \ + $(call kernel-cc-optional-warning,-Wnested-externs) \ + $(call kernel-cc-optional-warning,-Wno-missing-field-initializers) \ + $(call kernel-cc-optional-warning,-Wold-style-definition) \ + $(call kernel-cc-optional-warning,-Woverlength-strings) \ + $(call kernel-cc-optional-warning,-Wpacked) \ + $(call kernel-cc-optional-warning,-Wpacked-bitfield-compat) \ + $(call kernel-cc-optional-warning,-Wpadded) \ + $(call kernel-cc-optional-warning,-Wredundant-decls) \ + $(call kernel-cc-optional-warning,-Wshadow) \ + $(call kernel-cc-optional-warning,-Wswitch-default) \ + $(call kernel-cc-optional-warning,-Wwrite-strings) + +# Force no-pie, for compilers that enable pie by default +TESTED_KBUILD_FLAGS := \ + $(call kernel-cc-option,-fno-pie) \ + $(call kernel-cc-option,-no-pie) \ + $(TESTED_KBUILD_FLAGS) + +# When building against experimentally patched kernels with LLVM support, +# we need to suppress warnings about bugs we haven't fixed yet. This is +# temporary and will go away in the future. +ifeq ($(kernel-cc-is-clang),true) +TESTED_KBUILD_FLAGS := \ + $(call kernel-cc-option,-Wno-address-of-packed-member) \ + $(call kernel-cc-option,-Wno-unneeded-internal-declaration) \ + $(call kernel-cc-option,-Wno-unused-function) \ + $(call kernel-cc-optional-warning,-Wno-typedef-redefinition) \ + $(call kernel-cc-optional-warning,-Wno-sometimes-uninitialized) \ + $(TESTED_KBUILD_FLAGS) +endif + +# User C only +# +ALL_CFLAGS := \ + -std=gnu99 \ + $(COMMON_USER_FLAGS) $(COMMON_CFLAGS) $(TESTED_TARGET_USER_FLAGS) \ + $(SYS_CFLAGS) +ALL_HOST_CFLAGS := \ + -std=gnu99 \ + $(COMMON_USER_FLAGS) $(COMMON_CFLAGS) $(TESTED_HOST_USER_FLAGS) + +# User C++ only +# +ALL_CXXFLAGS := \ + -std=gnu++11 \ + -fno-rtti -fno-exceptions \ + $(COMMON_USER_FLAGS) $(COMMON_FLAGS) $(TESTED_TARGET_USER_FLAGS) \ + $(SYS_CXXFLAGS) +ALL_HOST_CXXFLAGS := \ + -std=gnu++11 \ + -fno-rtti -fno-exceptions \ + $(COMMON_USER_FLAGS) $(COMMON_FLAGS) $(TESTED_HOST_USER_FLAGS) + +ifeq ($(PERFDATA),1) +ALL_CFLAGS += -funwind-tables +endif + +# Workaround for clang is producing wrong code when -O0 is used. +# Applies only for clang < 3.8 +# +ifeq ($(cc-is-clang),true) +__clang_bindir := $(dir $(shell which clang)) +__clang_version := $(shell clang --version | grep -P -o '(?<=clang version )([0-9][^ ]+)') +__clang_major := $(shell echo $(__clang_version) | cut -f1 -d'.') +__clang_minor := $(shell echo $(__clang_version) | cut -f2 -d'.') +ifneq ($(filter -O0,$(ALL_CFLAGS)),) +__clang_lt_3.8 := \ + $(shell ((test $(__clang_major) -lt 3) || \ + ((test $(__clang_major) -eq 3) && (test $(__clang_minor) -lt 8))) && echo 1 || echo 0) +ifeq ($(__clang_lt_3.8),1) +ALL_CFLAGS := $(patsubst -O0,-O1,$(ALL_CFLAGS)) +ALL_CXXFLAGS := $(patsubst -O0,-O1,$(ALL_CXXFLAGS)) +endif +endif +endif + +# Add GCOV_DIR just for target +# +ifeq ($(GCOV_BUILD),on) +ifneq ($(GCOV_DIR),) +ALL_CFLAGS += -fprofile-dir=$(GCOV_DIR) +ALL_CXXFLAGS += -fprofile-dir=$(GCOV_DIR) +endif +endif + +# Kernel C only +# +ALL_KBUILD_CFLAGS := $(COMMON_CFLAGS) $(KBUILD_FLAGS) $(TESTED_KBUILD_FLAGS) + +# User C and C++ +# +# NOTE: ALL_HOST_LDFLAGS should probably be using -rpath-link too, and if we +# ever need to support building host shared libraries, it's required. +# +# We can't use it right now because we want to support non-GNU-compatible +# linkers like the Darwin 'ld' which doesn't support -rpath-link. +# +# For the same reason (Darwin 'ld') don't bother checking for text +# relocations in host binaries. +# +ALL_HOST_LDFLAGS := +ALL_LDFLAGS := -Wl,--warn-shared-textrel + +ifneq ($(USE_GOLD_LINKER),) +ALL_HOST_LDFLAGS += -fuse-ld=gold +ALL_LDFLAGS +=-fuse-ld=gold +endif + +ifeq ($(GCOV_BUILD),on) +ALL_LDFLAGS += -fprofile-arcs +ALL_HOST_LDFLAGS += -fprofile-arcs +endif + +ALL_LDFLAGS += $(SYS_LDFLAGS) + +# Optional security hardening features. +# Roughly matches Android's default security build options. +ifneq ($(FORTIFY),) + ALL_CFLAGS += -fstack-protector -Wa,--noexecstack + ALL_CXXFLAGS += -fstack-protector -Wa,--noexecstack + ALL_LDFLAGS += -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now + + # Vanilla versions of glibc >= 2.16 print a warning if _FORTIFY_SOURCE is + # defined but compiler optimisations are disabled. + ifneq ($(BUILD),debug) + ifneq ($(filter-out -O -O0,$(OPTIM)),) + ALL_CFLAGS += -D_FORTIFY_SOURCE=2 + ALL_CXXFLAGS += -D_FORTIFY_SOURCE=2 + endif + endif +endif + +# Sanitiser support +ifneq ($(USE_SANITISER),) + ifeq ($(USE_SANITISER),1) + # Default sanitisers + override USE_SANITISER := address,undefined + endif + $(info Including the following sanitisers: $(USE_SANITISER)) + ALL_CFLAGS += -fsanitize=$(USE_SANITISER) + ALL_CXXFLAGS += -fsanitize=$(USE_SANITISER) + ALL_LDFLAGS += -fsanitize=$(USE_SANITISER) + ALL_HOST_CFLAGS += -fsanitize=$(USE_SANITISER) + ALL_HOST_CXXFLAGS += -fsanitize=$(USE_SANITISER) + ALL_HOST_LDFLAGS += -fsanitize=$(USE_SANITISER) + ifeq ($(cc-is-clang),false) + ALL_HOST_LDFLAGS += -static-libasan + endif +endif + +# This variable contains a list of all modules built by kbuild +ALL_KBUILD_MODULES := + +# This variable contains a list of all modules which contain C++ source files +ALL_CXX_MODULES := + +ifneq ($(TOOLCHAIN),) +$(warning **********************************************) +$(warning The TOOLCHAIN option has been removed, but) +$(warning you have it set (via $(origin TOOLCHAIN))) +$(warning **********************************************) +endif + +# We need the glibc version to generate the cache names for LLVM and XOrg components. +ifeq ($(CROSS_COMPILE),) +LIBC_VERSION_PROBE := $(shell ldd $(shell which true) | awk '/libc.so/{print $$3'} ) +LIBC_VERSION := $(shell $(LIBC_VERSION_PROBE)| tr -d '(),' | head -1) +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/commands.mk b/drivers/mcst/gpu-imgtec/build/linux/commands.mk new file mode 100644 index 000000000000..af9fa0faada1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/commands.mk @@ -0,0 +1,555 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# from-one-* recipes make a thing from one source file, so they use $<. Others +# use $(MODULE_something) instead of $^ + +# We expect that MODULE_*FLAGS contains all the flags we need, including the +# flags for all modules (like $(ALL_CFLAGS) and $(ALL_HOST_CFLAGS)), and +# excluding flags for include search dirs or for linking libraries. The +# exceptions are ALL_EXE_LDFLAGS and ALL_LIB_LDFLAGS, since they depend on the +# type of thing being linked, so they appear in the commands below + +define host-o-from-one-c +$(if $(V),,@echo " HOST_CC " $(call relative-to-top,$<)) +$(MODULE_CC) -MD -MP -MF $(patsubst %.o,%.d,$@) -c $(MODULE_CFLAGS) \ + $(MODULE_INCLUDE_FLAGS) -include $(CONFIG_H) $< -o $@ +endef + +define target-o-from-one-c +$(if $(V),,@echo " CC " $(call relative-to-top,$<)) +$(MODULE_CC) -MD -MP -MF $(patsubst %.o,%.d,$@) -c $(MODULE_CFLAGS) \ + $(MODULE_INCLUDE_FLAGS) -include $(CONFIG_H) $< -o $@ +endef + +define host-o-from-one-cxx +$(if $(V),,@echo " HOST_CXX" $(call relative-to-top,$<)) +$(MODULE_CXX) -MD -MP -MF $(patsubst %.o,%.d,$@) -c $(MODULE_CXXFLAGS) \ + $(MODULE_INCLUDE_FLAGS) -include $(CONFIG_H) $< -o $@ +endef + +define target-o-from-one-cxx +$(if $(V),,@echo " CXX " $(call relative-to-top,$<)) +$(MODULE_CXX) -MD -MP -MF $(patsubst %.o,%.d,$@) -c $(MODULE_CXXFLAGS) \ + $(MODULE_INCLUDE_FLAGS) -include $(CONFIG_H) $< -o $@ +endef + +define host-executable-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CC) $(MODULE_LDFLAGS) \ + -o $@ $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_LIBRARY_DIR_FLAGS) \ + $(MODULE_LIBRARY_FLAGS) +endef + +define host-executable-cxx-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CXX) $(MODULE_LDFLAGS) \ + -o $@ $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_LIBRARY_DIR_FLAGS) \ + $(MODULE_LIBRARY_FLAGS) +endef + +define target-executable-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CC) \ + $(MODULE_TARGET_VARIANT_TYPE) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_EXE_CRTBEGIN) $(MODULE_ALL_OBJECTS) $(MODULE_EXE_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) \ + $(MODULE_EXE_LDFLAGS) +endef + +define target-executable-cxx-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CXX) \ + $(MODULE_TARGET_VARIANT_TYPE) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_EXE_CRTBEGIN) $(MODULE_ALL_OBJECTS) $(MODULE_EXE_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) \ + $(MODULE_EXE_LDFLAGS) +endef + +define target-shared-library-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CC) -shared -Wl,-Bsymbolic \ + $(MODULE_TARGET_VARIANT_TYPE) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_LIB_CRTBEGIN) $(MODULE_ALL_OBJECTS) $(MODULE_LIB_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) \ + $(MODULE_LIB_LDFLAGS) +endef + +# Helper to convert a binary file into a C header. Can optionally +# null-terminate the binary before conversion. +# +# (1): Character array identifier +# (2): If non-empty, treat as a string and null terminate. +# The character array will also be 'signed'. + +define target-generate-header-from-binary +$(if $(V),,@echo " OD " $(call relative-to-top,$@)) +$(ECHO) "static const $(if $(2),,unsigned )char $(1)[] = {" >$@ +$(OD) $< -A n -t x1 -v | tr -d '\n' | \ + sed -r -e 's@^ @0x@' $(if $(2),-e 's@$$@ 00@',) -e 's@ @, 0x@g' \ + -e 's@(([^[:blank:]]+[[:blank:]]+){8})@\1\n@g' >> $@ +$(ECHO) "};" >> $@ +endef + +# Helper to convert an image file into a C header. The size of the +# image should be specified (but it is not checked). +# +# (1): Structure identifier +# (2): Width in pixels +# (3): Height in pixels + +define target-generate-image-header-from-binary +$(if $(V),,@echo " OD " $(call relative-to-top,$@)) +$(ECHO) "static const struct $(1)\ + {\n\tunsigned int width;\n\tunsigned int height;\n\tunsigned int byteLength;\n\tunsigned char data[$(shell stat -c %s $<)];\n}\ + $(1) = {\n\t$(2), $(3), sizeof($(1)), {" >$@ +$(OD) $< -A n -t x1 -v | tr -d '\n' | \ + sed -r -e 's@^ @0x@' -e 's@ @, 0x@g' \ + -e 's/(([^[:blank:]]+[[:blank:]]+){8})/\1\n/g' >>$@ +$(ECHO) "}\n};" >>$@ +endef + +define host-shared-library-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CC) -shared -Wl,-Bsymbolic \ + $(MODULE_LDFLAGS) -o $@ \ + $(sort $(MODULE_ALL_OBJECTS)) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) +endef + +# If there were any C++ source files in a shared library, we use one of +# these recipes, which run the C++ compiler to link the final library +define target-shared-library-cxx-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CXX) -shared -Wl,-Bsymbolic \ + $(MODULE_TARGET_VARIANT_TYPE) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_LIB_CRTBEGIN) $(MODULE_ALL_OBJECTS) $(MODULE_LIB_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) \ + $(MODULE_LIB_LDFLAGS) +endef + +define host-shared-library-cxx-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CXX) -shared -Wl,-Bsymbolic \ + $(MODULE_LDFLAGS) -o $@ \ + $(sort $(MODULE_ALL_OBJECTS)) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) +endef + +define host-copy-debug-information +$(MODULE_OBJCOPY) --only-keep-debug $@ $(basename $@).dbg +endef + +define host-strip-debug-information +$(MODULE_STRIP) --strip-unneeded $@ +endef + +define host-add-debuglink +$(if $(V),,@echo " DBGLINK " $(call relative-to-top,$(basename $@).dbg)) +$(MODULE_OBJCOPY) --add-gnu-debuglink=$(basename $@).dbg $@ +endef + +define target-copy-debug-information +$(MODULE_OBJCOPY) --only-keep-debug $@ $(basename $@).dbg +endef + +define target-strip-debug-information +$(MODULE_STRIP) --strip-unneeded $@ +endef + +define target-add-debuglink +$(if $(V),,@echo " DBGLINK " $(call relative-to-top,$(basename $@).dbg)) +$(MODULE_OBJCOPY) --add-gnu-debuglink=$(basename $@).dbg $@ +endef + +define target-compress-debug-information +$(MODULE_OBJCOPY) --compress-debug-sections $@ $@.compressed_debug +$(MV) $@.compressed_debug $@ +endef + +define host-static-library-from-o +$(if $(V),,@echo " HOST_AR " $(call relative-to-top,$@)) +$(RM) $@ +$(MODULE_AR) crD $@ $(sort $(MODULE_ALL_OBJECTS)) +endef + +define target-static-library-from-o +$(if $(V),,@echo " AR " $(call relative-to-top,$@)) +$(RM) $@ +$(MODULE_AR) crD $@ $(sort $(MODULE_ALL_OBJECTS)) +endef + +define tab-c-from-y +$(if $(V),,@echo " BISON " $(call relative-to-top,$<)) +$(BISON) $(MODULE_BISON_FLAGS) -o $@ -d $< +endef + +define l-c-from-l +$(if $(V),,@echo " FLEX " $(call relative-to-top,$<)) +$(FLEX) $(MODULE_FLEX_FLAGS) -o$@ $< +endef + +define l-cc-from-l +$(if $(V),,@echo " FLEXXX " $(call relative-to-top,$<)) +$(FLEXXX) $(MODULE_FLEXXX_FLAGS) -o$@ $< +endef + +define clean-dirs +$(if $(V),,@echo " RM " $(call relative-to-top,$(MODULE_DIRS_TO_REMOVE))) +$(RM) -rf $(MODULE_DIRS_TO_REMOVE) +endef + +define make-directory +$(MKDIR) -p $@ +endef + +ifeq ($(DISABLE_CHECK_EXPORTS),) +define check-exports +endef +else +define check-exports +endef +endif + +# Check a source file with the program specified in $(CHECK). +# If $(CHECK) is empty, don't do anything. +ifeq ($(CHECK),) +check-src := +else +# If CHECK is a relative path to something in the DDK then replace it with +# an absolute path. This is necessary for the kbuild target, which uses the +# Linux kernel build system, so that it can find the program specified in +# $(CHECK). +ifneq ($(wildcard $(TOP)/$(CHECK)),) + override CHECK := $(TOP)/$(CHECK) +endif + +define check-src-1 +$(if $(V),,@echo " CHECK " $(call relative-to-top,$<)) +$(if $(IGNORE_CHECK_ERRORS),-,)$(CHECK) $(MODULE_INCLUDE_FLAGS) \ + $(if $(CHECK_NO_CONFIG_H),,-include $(CONFIG_H)) \ + $(filter -D%,$(MODULE_CFLAGS)) \ + $(CHECKFLAGS) $< +endef +# If CHECK_ONLY is set, only check files matching a Make pattern. +# e.g. CHECK_ONLY=opengles1/%.c +define check-src +$(if $(and $(if $(CHECK_ONLY),$(filter $(CHECK_ONLY),$<),true), \ + $(if $(CHECK_EXCLUDE),$(filter-out $(CHECK_EXCLUDE),$<),true)),$(check-src-1),@:) +endef +endif + +# Programs used in recipes + +AR ?= ar +AR_SECONDARY ?= $(AR) +BISON ?= bison +CC ?= gcc +CC_SECONDARY ?= $(CC) +CROSS_COMPILE_SECONDARY ?= $(CROSS_COMPILE) +CXX ?= g++ +CXX_SECONDARY ?= $(CXX) +GLSLC ?= glslc +HOST_AR ?= ar +HOST_AS ?= as +HOST_CC ?= gcc +HOST_CXX ?= g++ +HOST_LD ?= ld +HOST_NM ?= nm +HOST_OBJCOPY ?= objcopy +HOST_OBJDUMP ?= objdump +HOST_RANLIB ?= ranlib +HOST_READELF ?= readelf +HOST_STRIP ?= strip +INDENT ?= indent +JAR ?= jar +JAVA ?= java +JAVAC ?= javac +M4 ?= m4 +NM ?= nm +NM_SECONDARY ?= $(NM) +OBJCOPY ?= objcopy +OBJCOPY_SECONDARY ?= $(OBJCOPY) +PKG_CONFIG ?= pkg-config +PYTHON2 ?= python2 +RANLIB ?= ranlib +RANLIB_SECONDARY ?= $(RANLIB) +STRIP ?= strip +STRIP_SECONDARY ?= $(STRIP) +ZIP ?= zip + +ifneq ($(shell which python3),) +PYTHON ?= python3 +else +PYTHON ?= python2 + +$(warning ******************************************************) +$(warning WARNING: Python 3 not found so falling back to Python) +$(warning 2, which is deprecated. See here for Python 2 end of) +$(warning life information:) +$(warning https://www.python.org/dev/peps/pep-0373/#id4) +$(warning ******************************************************) +endif + +ifneq ($(SUPPORT_BUILD_LWS),) +WAYLAND_SCANNER := `$(PKG_CONFIG) --variable=wayland_scanner wayland-scanner` +else +WAYLAND_SCANNER ?= wayland-scanner +endif + +# Define CHMOD and CC_CHECK first so we can use cc-is-clang +# +override CHMOD := $(if $(V),,@)chmod +override CC_CHECK := $(if $(V),,@)$(MAKE_TOP)/tools/cc-check.sh + +ifeq ($(USE_CCACHE),1) + CCACHE ?= ccache + ifeq ($(cc-is-clang),true) + # Compiling with ccache and clang together can cause false errors + # without this environment variable. + export CCACHE_CPP2=1 + endif +endif +ifeq ($(USE_DISTCC),1) + DISTCC ?= distcc +endif + +# Toolchain triples for cross environments +# +CROSS_TRIPLE := $(patsubst %-,%,$(notdir $(CROSS_COMPILE))) +CROSS_TRIPLE_SECONDARY := $(patsubst %-,%,$(notdir $(CROSS_COMPILE_SECONDARY))) + +# If clang is detected, the compiler name is invariant but CROSS_COMPILE +# is reflected in the use of -target. For GCC this is always encoded into +# the binary. If CROSS_COMPILE is not set we can skip this. +# +# If we're doing a build with multiple target architectures, we might need +# two separate compilers to build binaries for each architecture. In this +# case, CROSS_COMPILE and CROSS_COMPILE_SECONDARY are the cross compiler +# prefix for the two compilers - $(CC) and $(CC_SECONDARY). +# +# Set the secondary compiler first before we overwrite $(CC). +# + +ifneq ($(CROSS_COMPILE_SECONDARY),) + ifeq ($(cc-is-clang),true) + __clang_target := $(CROSS_TRIPLE_SECONDARY) + ifeq ($(__clang_target),mips64el-linux-android) + __clang_target := mipsel-linux-android + endif + __gcc_bindir := $(dir $(shell which $(CROSS_COMPILE_SECONDARY)gcc)) + ifeq ($(wildcard $(__gcc_bindir)),) + __gcc_bindir := $(dir $(CROSS_COMPILE_SECONDARY)gcc) + endif + override CC_SECONDARY := \ + $(CC_SECONDARY) \ + -target $(__clang_target) \ + -B$(__gcc_bindir) \ + -B$(__gcc_bindir)/../$(CROSS_TRIPLE_SECONDARY)/bin \ + --gcc-toolchain=$(__gcc_bindir)/.. + override CXX_SECONDARY := \ + $(CXX_SECONDARY) \ + -target $(__clang_target) \ + -B$(__gcc_bindir) \ + -B$(__gcc_bindir)/../$(CROSS_TRIPLE_SECONDARY)/bin \ + --gcc-toolchain=$(__gcc_bindir)/.. + else + ifeq ($(origin CC_SECONDARY),file) + override CC_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(CC_SECONDARY) + endif + ifeq ($(origin CXX_SECONDARY),file) + override CXX_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(CXX_SECONDARY) + endif + endif + ifeq ($(origin AR_SECONDARY),file) + override AR_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(AR_SECONDARY) + endif + ifeq ($(origin NM_SECONDARY),file) + override NM_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(NM_SECONDARY) + endif + ifeq ($(origin OBJCOPY_SECONDARY),file) + override OBJCOPY_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(OBJCOPY_SECONDARY) + endif + ifeq ($(origin RANLIB_SECONDARY),file) + override RANLIB_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(RANLIB_SECONDARY) + endif + ifeq ($(origin STRIP_SECONDARY),file) + override STRIP_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(STRIP_SECONDARY) + endif +endif + +# Vanilla versions of glibc >= 2.16 print a warning if _FORTIFY_SOURCE is +# defined but compiler optimisations are disabled. In this case, make sure it's +# not being defined as part of CC/CXX, as is the case for at least Yocto Poky +# 3.0. +ifeq ($(filter $(OPTIM),-O -O0),$(OPTIM)) + override CC_SECONDARY := $(filter-out -D_FORTIFY_SOURCE%,$(CC_SECONDARY)) + override CXX_SECONDARY := $(filter-out -D_FORTIFY_SOURCE%,$(CXX_SECONDARY)) +else ifeq ($(BUILD),debug) + override CC_SECONDARY := $(filter-out -D_FORTIFY_SOURCE%,$(CC_SECONDARY)) + override CXX_SECONDARY := $(filter-out -D_FORTIFY_SOURCE%,$(CXX_SECONDARY)) +endif + +# Apply compiler wrappers and V=1 handling +override AR_SECONDARY := $(if $(V),,@)$(AR_SECONDARY) +override CC_SECONDARY := $(if $(V),,@)$(strip $(CCACHE)$(DISTCC) $(CC_SECONDARY)) +override CXX_SECONDARY := $(if $(V),,@)$(strip $(CCACHE)$(DISTCC) $(CXX_SECONDARY)) +override NM_SECONDARY := $(if $(V),,@)$(NM_SECONDARY) +override OBJCOPY_SECONDARY := $(if $(V),,@)$(OBJCOPY_SECONDARY) +override RANLIB_SECONDARY := $(if $(V),,@)$(RANLIB_SECONDARY) + +ifneq ($(CROSS_COMPILE),) + ifeq ($(cc-is-clang),true) + __gcc_bindir := $(dir $(shell which $(CROSS_COMPILE)gcc)) + ifeq ($(wildcard $(__gcc_bindir)),) + __gcc_bindir := $(dir $(CROSS_COMPILE)gcc) + endif + override CC := \ + $(CC) \ + -target $(CROSS_TRIPLE) \ + -B$(__gcc_bindir) \ + -B$(__gcc_bindir)/../$(CROSS_TRIPLE)/bin \ + --gcc-toolchain=$(__gcc_bindir)/.. + override CXX := \ + $(CXX) \ + -target $(CROSS_TRIPLE) \ + -B$(__gcc_bindir) \ + -B$(__gcc_bindir)/../$(CROSS_TRIPLE)/bin \ + --gcc-toolchain=$(__gcc_bindir)/.. + else + ifeq ($(origin CC),file) + override CC := $(CROSS_COMPILE)$(CC) + endif + ifeq ($(origin CXX),file) + override CXX := $(CROSS_COMPILE)$(CXX) + endif + endif + ifeq ($(origin AR),file) + override AR := $(CROSS_COMPILE)$(AR) + endif + ifeq ($(origin NM),file) + override NM := $(CROSS_COMPILE)$(NM) + endif + ifeq ($(origin OBJCOPY),file) + override OBJCOPY := $(CROSS_COMPILE)$(OBJCOPY) + endif + ifeq ($(origin RANLIB),file) + override RANLIB := $(CROSS_COMPILE)$(RANLIB) + endif + ifeq ($(origin STRIP),file) + override STRIP := $(CROSS_COMPILE)$(STRIP) + endif +else + $(if $(CROSS_COMPILE_SECONDARY),$(warning CROSS_COMPILE_SECONDARY is set but CROSS_COMPILE is empty)) +endif + +# Vanilla versions of glibc >= 2.16 print a warning if _FORTIFY_SOURCE is +# defined but compiler optimisations are disabled. In this case, make sure it's +# not being defined as part of CC/CXX, as is the case for at least Yocto Poky +# 3.0. +ifeq ($(filter $(OPTIM),-O -O0),$(OPTIM)) + override CC := $(filter-out -D_FORTIFY_SOURCE%,$(CC)) + override CXX := $(filter-out -D_FORTIFY_SOURCE%,$(CXX)) +else ifeq ($(BUILD),debug) + override CC := $(filter-out -D_FORTIFY_SOURCE%,$(CC)) + override CXX := $(filter-out -D_FORTIFY_SOURCE%,$(CXX)) +endif + +# Apply tool wrappers and V=1 handling. +# +# This list should be kept in alphabetical order. +# +override AR := $(if $(V),,@)$(AR) +override BISON := $(if $(V),,@)$(BISON) +override BZIP2 := $(if $(V),,@)bzip2 -9 +override CAT := $(if $(V),,@)cat +override CC := $(if $(V),,@)$(strip $(CCACHE)$(DISTCC) $(CC)) +override CHECK := $(if $(CHECK),$(if $(V),,@)$(CHECK),) +override CP := $(if $(V),,@)cp +override CXX := $(if $(V),,@)$(strip $(CCACHE)$(DISTCC) $(CXX)) +override ECHO := $(if $(V),,@)$(shell which echo) -e +override FLEX := $(if $(V),,@)flex +override FLEXXX := $(if $(V),,@)flex++ +override FWINFO := $(if $(V),,@)$(HOST_OUT)/fwinfo +override GLSLC := $(if $(V),,@)$(GLSLC) +override GREP := $(if $(V),,@)grep +override HOST_AR := $(if $(V),,@)$(HOST_AR) +override HOST_AS := $(if $(V),,@)$(HOST_AS) +override HOST_CC := $(if $(V),,@)$(strip $(CCACHE) $(HOST_CC)) +override HOST_CXX := $(if $(V),,@)$(strip $(CCACHE) $(HOST_CXX)) +override HOST_LD := $(if $(V),,@)$(HOST_LD) +override HOST_NM := $(if $(V),,@)$(HOST_NM) +override HOST_OBJCOPY := $(if $(V),,@)$(HOST_OBJCOPY) +override HOST_OBJDUMP := $(if $(V),,@)$(HOST_OBJDUMP) +override HOST_RANLIB := $(if $(V),,@)$(HOST_RANLIB) +override HOST_READELF := $(if $(V),,@)$(HOST_READELF) +override HOST_STRIP := $(if $(V),,@)$(HOST_STRIP) +override INSTALL := $(if $(V),,@)install +override JAR := $(if $(V),,@)$(JAR) +override JAVA := $(if $(V),,@)$(JAVA) +override JAVAC := $(if $(V),,@)$(JAVAC) +override LN := $(if $(V),,@)ln -f -s +override M4 := $(if $(V),,@)$(M4) +override MKDIR := $(if $(V),,@)mkdir +override MV := $(if $(V),,@)mv +override NM := $(if $(V),,@)$(NM) +override OBJCOPY := $(if $(V),,@)$(OBJCOPY) +override OD := $(if $(V),,@)od +override PERL := $(if $(V),,@)perl +override PSC := $(if $(V),,@)$(HOST_OUT)/psc_standalone +override PYTHON := $(if $(V),,@)$(PYTHON) +override PYTHON2 := $(if $(V),,@)$(PYTHON2) +override RANLIB := $(if $(V),,@)$(RANLIB) +override RM := $(if $(V),,@)rm -f +override SED := $(if $(V),,@)sed +override SIGNFW := $(if $(V),,@)$(HOST_OUT)/signfw +override STRIP := $(if $(V),,@)$(STRIP) +override STRIP_SECONDARY := $(if $(V),,@)$(STRIP_SECONDARY) +override TAR := $(if $(V),,@)tar +override TEST := $(if $(V),,@)test +override TOUCH := $(if $(V),,@)touch +override UNIFLEXC := $(if $(V),,@)$(HOST_OUT)/usc +override USCASM := $(if $(V),,@)$(HOST_OUT)/uscasm +override WAYLAND_SCANNER := $(if $(V),,@)$(WAYLAND_SCANNER) +override ZIP := $(if $(V),,@)$(ZIP) + +ifeq ($(SUPPORT_NEUTRINO_PLATFORM),1) +include $(MAKE_TOP)/common/neutrino/commands_neutrino.mk +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/3rdparty.mk b/drivers/mcst/gpu-imgtec/build/linux/common/3rdparty.mk new file mode 100644 index 000000000000..501abc6675f0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/3rdparty.mk @@ -0,0 +1,119 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +########################################################################### ### +# Display class drivers +########################################################################### ### + +ifeq ($(DISPLAY_CONTROLLER),dc_example) +$(eval $(call TunableKernelConfigC,DC_EXAMPLE_WIDTH,)) +$(eval $(call TunableKernelConfigC,DC_EXAMPLE_HEIGHT,)) +$(eval $(call TunableKernelConfigC,DC_EXAMPLE_DPI,)) +$(eval $(call TunableKernelConfigC,DC_EXAMPLE_BIT_DEPTH,)) +$(eval $(call TunableKernelConfigC,DC_EXAMPLE_FBC_FORMAT,)) +$(eval $(call TunableKernelConfigC,DC_EXAMPLE_MEMORY_LAYOUT,)) +endif + +ifeq ($(DISPLAY_CONTROLLER),dc_fbdev) +$(eval $(call TunableKernelConfigC,DC_FBDEV_REFRESH,)) + +$(eval $(call TunableKernelConfigC,DC_FBDEV_FORCE_XRGB8888,,\ +Force the dc_fbdev display driver to use XRGB8888. This is necessary_\ +when the underlying Linux framebuffer driver does not ignore alpha_\ +meaning an alpha value of 0 results in nothing being displayed._\ +)) + +$(eval $(call TunableBothConfigC,DC_FBDEV_NUM_PREFERRED_BUFFERS,,\ +The maximum number of display buffers the dc_fbdev display driver_\ +supports. The underlying Linux framebuffer driver must be capable_\ +of allocating sufficient memory for the number of buffers chosen._\ +)) + +$(eval $(call TunableKernelConfigC,DC_FBDEV_USE_SCREEN_BASE,,\ +Use the framebuffer virtual address found in the framebuffer info_\ +screen base instead of the physical address found in the framebuffer_\ +fix screen info._\ +)) + +$(eval $(call TunableKernelConfigC,DC_FBDEV_FORCE_CONTEXT_CLEAN,,\ +Before each framebuffer flip force a context clean by writing out_\ +to memory the contents of the framebuffer display buffers. Certain_\ +Linux framebuffer drivers like udldrmfb for displaylink USB-to-VGA_\ +adapters make use of cached buffers which require userspace support_\ +to write out to memory only those regions that are dirty. So in the_\ +absence of such a userspace support in certain window systems like_\ +NULLWS we force clean the entire display buffers before each flip._\ +)) +endif + +ifeq ($(DISPLAY_CONTROLLER),dc_pdp) +$(eval $(call TunableKernelConfigC,DCPDP_WIDTH,)) +$(eval $(call TunableKernelConfigC,DCPDP_HEIGHT,)) +$(eval $(call TunableKernelConfigC,DCPDP_DPI,)) +$(eval $(call TunableKernelConfigC,DCPDP_DYNAMIC_GTF_TIMING,1)) +endif + +ifeq ($(DISPLAY_CONTROLLER),dc_pdp2) +$(eval $(call TunableKernelConfigC,DCPDP_WIDTH,)) +$(eval $(call TunableKernelConfigC,DCPDP_HEIGHT,)) +endif + +ifeq ($(DISPLAY_CONTROLLER),adf_pdp) +$(eval $(call TunableKernelConfigC,ADF_PDP_WIDTH,)) +$(eval $(call TunableKernelConfigC,ADF_PDP_HEIGHT,)) +endif + +ifeq ($(DISPLAY_CONTROLLER),drm_pdp) + ifeq ($(SUPPORT_KMS),1) + ifneq ($(call kernel-version-at-least,4,3),true) + $(eval $(call TunableKernelConfigC,SUPPORT_DRM_FBDEV_EMULATION,,\ +Enables legacy framebuffer device support in those DRM/KMS drivers \ +that support it when using kernel 4.2 and below. When using later \ +kernels this support must be enabled in the kernel via the \ +CONFIG_DRM_FBDEV_EMULATION option.)) + else ifeq ($(call kernel-version-at-least,4,3),true) + ifneq ($(filter command line environment,$(origin SUPPORT_DRM_FBDEV_EMULATION)),) + $(warning CONFIG_DRM_FBDEV_EMULATION must be set as part of the Linux kernel build) + $(warning SUPPORT_DRM_FBDEV_EMULATION will be ignored) + endif + override undefine SUPPORT_DRM_FBDEV_EMULATION + endif + endif +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/chromiumos_kernel.mk b/drivers/mcst/gpu-imgtec/build/linux/common/chromiumos_kernel.mk new file mode 100644 index 000000000000..795dc0d01f7d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/chromiumos_kernel.mk @@ -0,0 +1,56 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# This file intends to keep in sync DDK build options with new features +# backported or added on upstream Chromium OS kernels. + +# remotes/cros/chromeos-3.18, 315d1eac +# CHROMIUM: img-rogue: add support for dma-fence and sync_file + +ifeq ($(call kernel-version-at-least,3,18),true) + ifeq ($(CHROMIUMOS_KERNEL),1) + ifneq ($(wildcard $(KERNELDIR)/include/linux/dma-fence.h),) + CHROMIUMOS_KERNEL_HAS_DMA_FENCE := 1 + $(eval $(call KernelConfigC,CHROMIUMOS_KERNEL_HAS_DMA_FENCE,,\ +ChromiumOS kernel contains the dma-fence API instead of the fence API\ +)) + endif + endif +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/lws.mk b/drivers/mcst/gpu-imgtec/build/linux/common/lws.mk new file mode 100644 index 000000000000..845af9d30824 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/lws.mk @@ -0,0 +1,54 @@ +########################################################################### ### +#@File +#@Title Linux window system config options +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Description Linux build system LWS config options. +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +ifeq ($(SUPPORT_KMS),1) + + # Common mandatory config options + $(eval $(call KernelConfigMake,SUPPORT_BUFFER_SYNC,1)) + $(eval $(call BothConfigC,SUPPORT_BUFFER_SYNC,1)) + + # Common tunable config options + + # Xorg specific config options + ifeq ($(WINDOW_SYSTEM),xorg) + endif +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/testchip.mk b/drivers/mcst/gpu-imgtec/build/linux/common/testchip.mk new file mode 100644 index 000000000000..760c2a66492b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/testchip.mk @@ -0,0 +1,126 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +XE_BVNC = $(shell echo $(RGX_BVNC) | grep '^22.*\|^24.*\|^29.*') + +ifeq ($(RGX_BVNC),$(XE_BVNC)) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5,)) + SUPPORT_FPGA_DUT_CLK_INFO ?= 1 +endif + +ifeq ($(PVR_ARCH),volcanic) + SUPPORT_FPGA_DUT_CLK_INFO ?= 1 +endif + +$(eval $(call TunableKernelConfigC,SUPPORT_FPGA_DUT_CLK_INFO,)) + +ifeq ($(RGX_BVNC),1.82.4.5) + $(eval $(call KernelConfigC,TC_APOLLO_ES2,)) +else ifeq ($(RGX_BVNC),4.31.4.55) + $(eval $(call KernelConfigC,TC_APOLLO_BONNIE,)) +else ifeq ($(RGX_BVNC),22.46.54.330) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_46_54_330,)) +else ifeq ($(RGX_BVNC),22.49.21.16) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_49_21_16,)) +else ifeq ($(RGX_BVNC),22.60.22.29) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_60_22_29,)) +else ifeq ($(RGX_BVNC),22.67.54.30) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_67_54_30,)) +else ifeq ($(RGX_BVNC),22.75.22.25) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_75_22_25,)) +else ifeq ($(RGX_BVNC),22.86.104.218) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_86_104_218,)) +else ifeq ($(RGX_BVNC),22.88.104.318) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_88_104_318,)) +else ifeq ($(RGX_BVNC),22.89.204.18) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_89_204_18,)) +else ifeq ($(RGX_BVNC),22.98.54.230) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_98_54_230,)) +else ifeq ($(RGX_BVNC),22.102.54.38) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_22_102_54_38,)) +else ifeq ($(RGX_BVNC),33.8.22.1) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_33_8_22_1,)) +else ifeq ($(RGX_BVNC),29.12.52.208) + $(eval $(call KernelConfigC,TC_ORION,)) +else ifeq ($(RGX_BVNC),$(XE_BVNC)) + $(warning WARNING $(RGX_BVNC) is currently not supported on a Linux TCF5 system) + $(eval $(call KernelConfigC,TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED,)) +endif + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + ifneq ($(PVR_SYSTEM),rgx_nohw) + override TC_MEMORY_CONFIG := TC_MEMORY_LOCAL + endif +endif + +ifneq ($(PVR_SYSTEM),rgx_nohw) + ifeq ($(TC_MEMORY_CONFIG),) + $(error TC_MEMORY_CONFIG must be defined) + endif +endif + +$(eval $(call TunableBothConfigC,TC_MEMORY_CONFIG,$(TC_MEMORY_CONFIG),\ +Selects the memory configuration to be used. The choices are:_\ +* TC_MEMORY_LOCAL (Rogue and the display controller use local card memory)_\ +* TC_MEMORY_HOST (Rogue and the display controller use system memory)_\ +* TC_MEMORY_HYBRID (Rogue can use both system and local memory and the display controller uses local card memory))) + +ifeq ($(TC_FAKE_INTERRUPTS),1) +$(eval $(call KernelConfigC,TC_FAKE_INTERRUPTS,)) +endif + +ifeq ($(TC_MEMORY_CONFIG),TC_MEMORY_LOCAL) + ifeq ($(TC_DISPLAY_MEM_SIZE),) + $(error TC_DISPLAY_MEM_SIZE must be set in $(PVR_BUILD_DIR)/Makefile) + endif + $(eval $(call KernelConfigC,TC_DISPLAY_MEM_SIZE,$(TC_DISPLAY_MEM_SIZE))) + $(eval $(call BothConfigC,LMA,)) + $(eval $(call KernelConfigMake,LMA,1)) +else ifeq ($(TC_MEMORY_CONFIG),TC_MEMORY_HYBRID) + ifeq ($(TC_DISPLAY_MEM_SIZE),) + $(error TC_DISPLAY_MEM_SIZE must be set in $(PVR_BUILD_DIR)/Makefile) + endif + $(eval $(call KernelConfigC,TC_DISPLAY_MEM_SIZE,$(TC_DISPLAY_MEM_SIZE))) +endif + +$(eval $(call TunableKernelConfigC,SUPPORT_APOLLO_FPGA,)) + +$(eval $(call TunableKernelConfigC,SUPPORT_FAKE_SECURE_ION_HEAP,)) +$(eval $(call TunableKernelConfigC,TC_SECURE_MEM_SIZE,128)) diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/lws-generic.mk b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/lws-generic.mk new file mode 100644 index 000000000000..eb0f7f9c01fd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/lws-generic.mk @@ -0,0 +1,45 @@ +########################################################################### ### +#@File +#@Title Build Mesa support libraries only +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($(SUPPORT_COMPUTE_ONLY),) + COMPONENTS += pvr_dri_support +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/nulldrmws.mk b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/nulldrmws.mk new file mode 100644 index 000000000000..1f35cb637229 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/nulldrmws.mk @@ -0,0 +1,65 @@ +########################################################################### ### +#@File nulldrmws.mk +#@Title Specify Null DRM WS components +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Description For Null DRM WS driver builds, pull in Null DRM WS related +# components. +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($(SUPPORT_COMPUTE_ONLY),) + LWS_COMPONENTS += lws + + ifeq ($(MESA_EGL),) + COMPONENTS += null_drm_ws + + ifeq ($(PVR_REMOTE),1) + COMPONENTS += null_remote null_none + else ifeq ($(PVR_NONE),1) + COMPONENTS += null_none + endif + + ifneq ($(GBM_BACKEND),) + COMPONENTS += gbm + endif + else + -include ../common/window_systems/lws-generic.mk + + SUPPORT_BUILD_LWS ?= 1 + endif +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/surfaceless.mk b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/surfaceless.mk new file mode 100644 index 000000000000..a2fc8e9159e6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/surfaceless.mk @@ -0,0 +1,50 @@ +########################################################################### ### +#@File surfaceless.mk +#@Title Specify Surfaceless WS components +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Description For Surfaceless WS driver builds, pull in Surfaceless DRM WS +# related components. +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($(SUPPORT_COMPUTE_ONLY),) + include ../common/window_systems/lws-generic.mk + + LWS_COMPONENTS += lws + SUPPORT_BUILD_LWS ?= 1 +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/wayland.mk b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/wayland.mk new file mode 100644 index 000000000000..39525038c178 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/wayland.mk @@ -0,0 +1,49 @@ +########################################################################### ### +#@File wayland.mk +#@Title Specify Wayland components +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Description For Wayland driver builds, pull in Wayland related components. +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($(SUPPORT_COMPUTE_ONLY),) + include ../common/window_systems/lws-generic.mk + + LWS_COMPONENTS += lws + SUPPORT_BUILD_LWS ?= 1 +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/xorg.mk b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/xorg.mk new file mode 100644 index 000000000000..60cad6302613 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/common/window_systems/xorg.mk @@ -0,0 +1,51 @@ +########################################################################### ### +#@File xorg.mk +#@Title Specify X.Org components +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Description For X.Org driver builds, pull in X.Org related components. +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($(SUPPORT_COMPUTE_ONLY),) + include ../common/window_systems/lws-generic.mk + + LWS_COMPONENTS += lws + COMPONENTS += pvr_conf + + SUPPORT_BUILD_LWS ?= 1 +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compiler.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compiler.mk new file mode 100644 index 000000000000..145658eca8ae --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compiler.mk @@ -0,0 +1,299 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Check for valid values of $(MULTIARCH). +ifeq ($(strip $(MULTIARCH)),0) +$(error MULTIARCH must be empty to disable multiarch) +endif + +define calculate-compiler-preferred-target + ifeq ($(2),qcc) + $(1)_compiler_preferred_target := qcc + else + # Remove the 'unknown' substring from triple string to behave the same as before clang 8. + $(1)_compiler_preferred_target := $$(subst --,-,$$(subst unknown,,$$(shell $(2) -dumpmachine))) + ifeq ($$($(1)_compiler_preferred_target),) + $$(warning No output from '$(2) -dumpmachine') + $$(warning Check that the compiler is in your PATH and CROSS_COMPILE is) + $$(warning set correctly.) + $$(error Unable to run compiler '$(2)') + endif + ifneq ($$(filter %-w64-mingw32,$$($(1)_compiler_preferred_target)),) + # Use the compiler target name. + else + ifneq ($$(filter x86_64-%,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := x86_64-linux-gnu + endif + ifneq ($$(filter i386-% i486-% i586-% i686-%,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := i386-linux-gnu + endif + ifneq ($$(filter aarch64-poky-linux,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := aarch64-linux-gnu + endif + ifneq ($$(filter armv7a-cros-linux-gnueabi armv7l-tizen-linux-gnueabi,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := arm-linux-gnueabi + endif + ifneq ($$(filter arm-linux-android,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := arm-linux-androideabi + endif + ifneq ($$(filter riscv64-buildroot-linux-gnu riscv64-poky-linux,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := riscv64-linux-gnu + endif + ifneq ($$(filter clang%,$(2)),) + ifeq ($(1),target) + ifeq (arm-linux-gnueabihf,$$(CROSS_TRIPLE)) + $(1)_compiler_preferred_target := arm-linux-gnueabihf + endif + ifeq (arm-linux-gnueabi,$$(CROSS_TRIPLE)) + $(1)_compiler_preferred_target := arm-linux-gnueabi + endif + endif + endif + endif + endif +endef + +define cross-compiler-name + ifeq ($$(_CLANG),true) + ifneq ($(strip $(2)),) + ifeq ($(1):$(CROSS_TRIPLE),_cc_secondary:mips64el-linux-android) + $(1) := $(3) -target mipsel-linux-android -Qunused-arguments + else + $(1) := $(3) -target $$(patsubst %-,%,$$(notdir $(2))) -Qunused-arguments + endif + else + $(1) := $(3) -Qunused-arguments + endif + else + ifeq ($$(origin CC),file) + $(1) := $(2)$(3) + else + $(1) := $(3) + endif + endif +endef + +# Work out the host compiler architecture +$(eval $(call calculate-compiler-preferred-target,host,$(HOST_CC))) + +ifeq ($(host_compiler_preferred_target),x86_64-linux-gnu) + HOST_PRIMARY_ARCH := host_x86_64 + HOST_32BIT_ARCH := host_i386 + HOST_FORCE_32BIT := -m32 +else +ifeq ($(host_compiler_preferred_target),i386-linux-gnu) + HOST_PRIMARY_ARCH := host_i386 + HOST_32BIT_ARCH := host_i386 +else +ifeq ($(host_compiler_preferred_target),arm-linux-gnueabi) + HOST_PRIMARY_ARCH := host_armel + HOST_32BIT_ARCH := host_armel +else +ifeq ($(host_compiler_preferred_target),arm-linux-gnueabihf) + HOST_PRIMARY_ARCH := host_armhf + HOST_32BIT_ARCH := host_armhf +else +ifeq ($(host_compiler_preferred_target),aarch64-linux-gnu) + HOST_PRIMARY_ARCH := host_aarch64 + HOST_32BIT_ARCH := host_armhf +else + $(error Unknown host compiler target architecture $(host_compiler_preferred_target)) +endif +endif +endif +endif +endif + +# We set HOST_ALL_ARCH this way, as the host architectures may be overridden +# on the command line. +ifeq ($(HOST_PRIMARY_ARCH),$(HOST_32BIT_ARCH)) + HOST_ALL_ARCH := $(HOST_PRIMARY_ARCH) +else + HOST_ALL_ARCH := $(HOST_PRIMARY_ARCH) $(HOST_32BIT_ARCH) +endif + +# Workaround our lack of support for non-Linux HOST_CCs +ifneq ($(HOST_CC_IS_LINUX),1) + $(warning $$(HOST_CC) is non-Linux. Trying to work around.) + override HOST_CC := $(HOST_CC) -D__linux__ + $(eval $(call BothConfigMake,HOST_CC,$(HOST_CC))) +endif + +$(eval $(call BothConfigMake,HOST_PRIMARY_ARCH,$(HOST_PRIMARY_ARCH))) +$(eval $(call BothConfigMake,HOST_32BIT_ARCH,$(HOST_32BIT_ARCH))) +$(eval $(call BothConfigMake,HOST_FORCE_32BIT,$(HOST_FORCE_32BIT))) +$(eval $(call BothConfigMake,HOST_ALL_ARCH,$(HOST_ALL_ARCH))) + +TARGET_ALL_ARCH := +TARGET_PRIMARY_ARCH := +TARGET_SECONDARY_ARCH := + +# Work out the target compiler cross triple, and include the corresponding +# compilers/*.mk file, which sets TARGET_PRIMARY_ARCH and +# TARGET_SECONDARY_ARCH for that compiler. +# +compilers := ../config/compilers +define include-compiler-file + ifeq ($(strip $(1)),) + $$(error empty arg passed to include-compiler-file) + endif + ifeq ($$(wildcard $$(compilers)/$(1).mk),) + $$(warning ******************************************************) + $$(warning Compiler target '$(1)' not recognised) + $$(warning (missing $$(compilers)/$(1).mk file)) + $$(warning ******************************************************) + $$(error Compiler '$(1)' not recognised) + endif + include $$(compilers)/$(1).mk +endef + +# Check the kernel cross compiler to work out which architecture it targets. +# We can then tell if CROSS_COMPILE targets a different architecture. +ifneq ($(origin KERNEL_CROSS_COMPILE),undefined) + # First, calculate the value of KERNEL_CROSS_COMPILE as it would be seen by + # the main build, so we can check it here in the config stage. + $(call one-word-only,KERNEL_CROSS_COMPILE) + _kernel_cross_compile := $(if $(filter undef,$(KERNEL_CROSS_COMPILE)),,$(KERNEL_CROSS_COMPILE)) + # We can take shortcuts with KERNEL_CROSS_COMPILE, as we don't want to + # respect CC and we don't support clang in that part currently. + _kernel_cross_compile := $(_kernel_cross_compile)gcc + # Then check the compiler. + $(eval $(call calculate-compiler-preferred-target,target,$(_kernel_cross_compile))) + $(eval $(call include-compiler-file,$(target_compiler_preferred_target))) + _kernel_primary_arch := $(TARGET_PRIMARY_ARCH) +else + # We can take shortcuts with KERNEL_CROSS_COMPILE, as we don't want to + # respect CC and we don't support clang in that part currently. + _kernel_cross_compile := $(CROSS_COMPILE)gcc + # KERNEL_CROSS_COMPILE will be the same as CROSS_COMPILE, so we don't need + # to do the compatibility check. + _kernel_primary_arch := +endif + +$(eval $(call cross-compiler-name,_cc,$(CROSS_COMPILE),$(CC))) +$(eval $(call cross-compiler-name,_cc_secondary,$(if $(CROSS_COMPILE_SECONDARY),$(CROSS_COMPILE_SECONDARY),$(CROSS_COMPILE)),$(CC_SECONDARY))) +$(eval $(call calculate-compiler-preferred-target,target,$(_cc))) +$(eval $(call include-compiler-file,$(target_compiler_preferred_target))) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) +ifeq ($(MULTIARCH),1) + ifneq ($(MAKECMDGOALS),kbuild) + ifneq ($(COMPONENTS),) + $(eval $(call calculate-compiler-preferred-target,target_secondary,$(_cc_secondary))) + ifneq ($(target_compiler_preferred_target),$(target_secondary_compiler_preferred_target)) + $(eval $(call include-compiler-file,$(target_secondary_compiler_preferred_target))) + + ifeq ($(TARGET_SECONDARY_ARCH),) + $(error $(CROSS_COMPILE_SECONDARY) not supported for MULTIARCH builds) + endif + endif + endif + endif +endif +endif + +define remap-arch +$(if $(INTERNAL_ARCH_REMAP_$(1)),$(INTERNAL_ARCH_REMAP_$(1)),$(1)) +endef + +# Remap 'essentially compatible' architectures so the KM vs UM check +# isn't too strict. These mixtures are widely supported. +INTERNAL_ARCH_REMAP_target_armhf := target_armv7-a +INTERNAL_ARCH_REMAP_target_armel := target_armv7-a +INTERNAL_ARCH_REMAP_target_mips32r2el := target_mips32el +INTERNAL_ARCH_REMAP_target_mips32r6el := target_mips32el + +# Sanity check: if KERNEL_CROSS_COMPILE was set, it has to target the same +# architecture as CROSS_COMPILE. +ifneq ($(_kernel_primary_arch),) + ifneq ($(call remap-arch,$(TARGET_PRIMARY_ARCH)),$(call remap-arch,$(_kernel_primary_arch))) + $(warning ********************************************************) + $(warning Error: Kernel and user-mode cross compilers build for) + $(warning different targets) + $(warning $(space)$(space)CROSS_COMPILE=$(CROSS_COMPILE)) + $(warning $(space)$(space)$(space)builds for $(TARGET_PRIMARY_ARCH)) + $(warning $(space)$(space)KERNEL_CROSS_COMPILE=$(KERNEL_CROSS_COMPILE)) + $(warning $(space)$(space)$(space)builds for $(_kernel_primary_arch)) + $(warning ********************************************************) + $(error Mismatching kernel and user-mode cross compilers) + endif +endif + +ifneq ($(MULTIARCH),32only) +TARGET_ALL_ARCH += $(TARGET_PRIMARY_ARCH) +endif +ifneq ($(MULTIARCH),64only) +TARGET_ALL_ARCH += $(TARGET_SECONDARY_ARCH) +endif + +$(eval $(call BothConfigMake,TARGET_PRIMARY_ARCH,$(TARGET_PRIMARY_ARCH))) +$(eval $(call BothConfigMake,TARGET_SECONDARY_ARCH,$(TARGET_SECONDARY_ARCH))) +$(eval $(call BothConfigMake,TARGET_ALL_ARCH,$(TARGET_ALL_ARCH))) +$(eval $(call BothConfigMake,TARGET_FORCE_32BIT,$(TARGET_FORCE_32BIT))) + +$(info ******* Multiarch build: $(if $(MULTIARCH),yes,no)) +$(info ******* Primary arch: $(if $(TARGET_PRIMARY_ARCH),$(TARGET_PRIMARY_ARCH),none)) +$(info ******* Secondary arch: $(if $(TARGET_SECONDARY_ARCH),$(TARGET_SECONDARY_ARCH),none)) +$(info ******* PVR arch: $(PVR_ARCH)) +$(info ******* Host OS: $(HOST_OS)) +$(info ******* Target OS: $(TARGET_OS)) + +ifeq ($(SUPPORT_NEUTRINO_PLATFORM),) + # Find the paths to libgcc for the primary and secondary architectures. + LIBGCC := $(shell $(_cc) -print-libgcc-file-name) + LIBGCC_SECONDARY := $(shell $(_cc_secondary) $(TARGET_FORCE_32BIT) -print-libgcc-file-name) + + # Android clang toolchain drivers cannot find libgcc.a for various triples. + # We will use a fixed path to the last supported version (4.9.x) of GCC. + # + ifeq ($(SUPPORT_ARC_PLATFORM),) + ifeq ($(_CLANG),true) + LIBGCC_PREBUILT_PATH := $(ANDROID_ROOT)/prebuilts/gcc/linux-x86 + ifeq ($(filter-out arm%,$(ARCH)),) + LIBGCC := $(LIBGCC_PREBUILT_PATH)/aarch64/aarch64-linux-android-4.9/lib/gcc/aarch64-linux-android/4.9.x/libgcc.a + LIBGCC_SECONDARY := $(LIBGCC_PREBUILT_PATH)/arm/arm-linux-androideabi-4.9/lib/gcc/arm-linux-androideabi/4.9.x/libgcc.a + else + LIBGCC := $(LIBGCC_PREBUILT_PATH)/x86/x86_64-linux-android-4.9/lib/gcc/x86_64-linux-android/4.9.x/libgcc.a + LIBGCC_SECONDARY := $(LIBGCC_PREBUILT_PATH)/x86/x86_64-linux-android-4.9/lib/gcc/x86_64-linux-android/4.9.x/32/libgcc.a + endif + endif + endif +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/aarch64-linux-gnu.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/aarch64-linux-gnu.mk new file mode 100644 index 000000000000..6658e615e441 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/aarch64-linux-gnu.mk @@ -0,0 +1,11 @@ +# Ubuntu default aarch64 compiler. +TARGET_PRIMARY_ARCH := target_aarch64 +ifeq ($(MULTIARCH),1) + ifneq ($(MAKECMDGOALS),kbuild) + ifneq ($(COMPONENTS),) + ifeq ($(CROSS_COMPILE_SECONDARY),) + $(error CROSS_COMPILE_SECONDARY must be set for multiarch ARM builds) + endif + endif + endif +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabi.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabi.mk new file mode 100644 index 000000000000..0890cb554e8c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabi.mk @@ -0,0 +1,6 @@ +# 32-bit ARM soft float compiler +ifeq ($(MULTIARCH),1) + TARGET_SECONDARY_ARCH := target_armel +else + TARGET_PRIMARY_ARCH := target_armel +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabihf.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabihf.mk new file mode 100644 index 000000000000..2abaa9f4fc3b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/arm-linux-gnueabihf.mk @@ -0,0 +1,6 @@ +# 32-bit ARM hard float compiler +ifeq ($(MULTIARCH),1) + TARGET_SECONDARY_ARCH := target_armhf +else + TARGET_PRIMARY_ARCH := target_armhf +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/e2k-linux-gnu.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/e2k-linux-gnu.mk new file mode 100644 index 000000000000..6cd3570ac283 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/e2k-linux-gnu.mk @@ -0,0 +1,2 @@ +# 64-bit e2k compiler +TARGET_PRIMARY_ARCH := target_e2k diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/i386-linux-gnu.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/i386-linux-gnu.mk new file mode 100644 index 000000000000..15d094c68fa1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/i386-linux-gnu.mk @@ -0,0 +1,11 @@ +# 32-bit x86 compiler +ifeq ($(MULTIARCH),1) + # Ignore MULTIARCH setting if this is a 32-bit build + ifeq ($(ARCH),i386) + TARGET_PRIMARY_ARCH := target_i686 + else + TARGET_SECONDARY_ARCH := target_i686 + endif +else + TARGET_PRIMARY_ARCH := target_i686 +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/mips64el-buildroot-linux-gnu.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/mips64el-buildroot-linux-gnu.mk new file mode 100644 index 000000000000..3d07db0bfeaa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/mips64el-buildroot-linux-gnu.mk @@ -0,0 +1,5 @@ +# 64-bit MIPS R6 little-endian compiler +TARGET_PRIMARY_ARCH := target_mips64r6el +ifeq ($(MULTIARCH),1) +TARGET_SECONDARY_ARCH := target_mips32r6el +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/mipsel-buildroot-linux-gnu.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/mipsel-buildroot-linux-gnu.mk new file mode 100644 index 000000000000..9ead69e4c168 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/mipsel-buildroot-linux-gnu.mk @@ -0,0 +1,2 @@ +# 32-bit MIPS compiler +TARGET_PRIMARY_ARCH := target_mips32r6el diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/riscv64-linux-gnu.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/riscv64-linux-gnu.mk new file mode 100644 index 000000000000..b78a1078f6d0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/riscv64-linux-gnu.mk @@ -0,0 +1,2 @@ +#64-bit RISC-V compiler +TARGET_PRIMARY_ARCH := target_riscv64 diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/compilers/x86_64-linux-gnu.mk b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/x86_64-linux-gnu.mk new file mode 100644 index 000000000000..cf3c4b419a80 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/compilers/x86_64-linux-gnu.mk @@ -0,0 +1,34 @@ +# 64-bit x86 compiler +ifneq ($(KERNELDIR),) + ifneq ($(ARCH),i386) + ifeq ($(shell grep -q "CONFIG_X86_32=y" $(KERNELDIR)/.config && echo 1 || echo 0),1) + $(warning ******************************************************) + $(warning Your kernel appears to be configured for 32-bit x86,) + $(warning but CROSS_COMPILE (or KERNEL_CROSS_COMPILE) points) + $(warning to a 64-bit compiler.) + $(warning If you want a 32-bit build, either set CROSS_COMPILE) + $(warning to point to a 32-bit compiler, or build with ARCH=i386) + $(warning to force 32-bit mode with your existing compiler.) + $(warning ******************************************************) + $(error Invalid CROSS_COMPILE / kernel architecture combination) + endif # CONFIG_X86_32 + endif # ARCH=i386 +endif # KERNELDIR + +ifeq ($(ARCH),i386) + # This is actually a 32-bit build using a native 64-bit compiler + INCLUDE_I386-LINUX-GNU := true +else + TARGET_PRIMARY_ARCH := target_x86_64 + ifeq ($(MULTIARCH),1) + ifeq ($(CROSS_COMPILE_SECONDARY),) + # The secondary architecture is being built with a native 64-bit compiler + INCLUDE_I386-LINUX-GNU := true + endif + endif +endif + +ifeq ($(INCLUDE_I386-LINUX-GNU),true) + TARGET_FORCE_32BIT := -m32 + include $(compilers)/i386-linux-gnu.mk +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/core.mk b/drivers/mcst/gpu-imgtec/build/linux/config/core.mk new file mode 100644 index 000000000000..dd5fb1710f56 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/core.mk @@ -0,0 +1,1977 @@ +########################################################################### ### +#@File +#@Title Root build configuration. +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + ifeq ($(RGX_BVNC),) + ifeq ($(NO_HARDWARE),1) + $(error Error: Must specify RGX_BVNC when building for NO_HARDWARE) + else ifeq ($(PVR_ARCH),) + $(error Error: Must specify PVR_ARCH when doing a kernel mode build without RGX_BVNC set) + endif + endif +endif + +# If building for volcanic architecture use the core_volcanic.mk instead +ifeq ($(PVR_ARCH),volcanic) + include ../config/core_volcanic.mk +else + +# Configuration wrapper for new build system. This file deals with +# configuration of the build. Add to this file anything that deals +# with switching driver options on/off and altering the defines or +# objects the build uses. +# +# At the end of this file is an exhaustive list of all variables +# that are passed between the platform/config stage and the generic +# build. PLEASE refrain from adding more variables than necessary +# to this stage -- almost all options can go through config.h. +# + +# Sanity check: Make sure preconfig has been included +ifeq ($(TOP),) +$(error TOP not defined: Was preconfig.mk included in root makefile?) +endif + +################################# MACROS #################################### + +ALL_TUNABLE_OPTIONS := + +# This records the config option's help text and default value. Note that +# the help text can't contain a literal comma. Use $(comma) instead. +define RegisterOptionHelp +ALL_TUNABLE_OPTIONS += $(1) +ifeq ($(INTERNAL_DESCRIPTION_FOR_$(1)),) +INTERNAL_DESCRIPTION_FOR_$(1) := $(3) +endif +INTERNAL_CONFIG_DEFAULT_FOR_$(1) := $(2) +$(if $(4),\ + $(error Too many arguments in config option '$(1)' (stray comma in help text?))) +endef + + +# Write out a GNU make option for both user & kernel +# +define BothConfigMake +$$(eval $$(call KernelConfigMake,$(1),$(2))) +$$(eval $$(call UserConfigMake,$(1),$(2))) +endef + +# Conditionally write out a GNU make option for both user & kernel +# +define TunableBothConfigMake +$$(eval $$(call _TunableKernelConfigMake,$(1),$(2))) +$$(eval $$(call _TunableUserConfigMake,$(1),$(2))) +$(call RegisterOptionHelp,$(1),$(2),$(3),$(4)) +endef + +# Write out an option for both user & kernel +# +define BothConfigC +$$(eval $$(call KernelConfigC,$(1),$(2))) +$$(eval $$(call UserConfigC,$(1),$(2))) +endef + +# Conditionally write out an option for both user & kernel +# +define TunableBothConfigC +$$(eval $$(call _TunableKernelConfigC,$(1),$(2))) +$$(eval $$(call _TunableUserConfigC,$(1),$(2))) +$(call RegisterOptionHelp,$(1),$(2),$(3),$(4)) +endef + +# Use this to mark config options which have to exist, but aren't +# user-tunable. Warn if an attempt is made to change it. +# +define NonTunableOption +$(if $(filter command line environment,$(origin $(1))),\ + $(error Changing '$(1)' is not supported)) +endef + +############################### END MACROS ################################## + +# Check we have a new enough version of GNU make. +# +need := 3.81 +ifeq ($(filter $(need),$(firstword $(sort $(MAKE_VERSION) $(need)))),) +$(error A version of GNU make >= $(need) is required - this is version $(MAKE_VERSION)) +endif + +include ../defs.mk + +# Infer PVR_BUILD_DIR from the directory configuration is launched from. +# Check anyway that such a directory exists. +# +PVR_BUILD_DIR := $(notdir $(abspath .)) +$(call directory-must-exist,$(TOP)/build/linux/$(PVR_BUILD_DIR)) + +# Output directory for configuration, object code, +# final programs/libraries, and install/rc scripts. +# +BUILD ?= release +ifneq ($(filter $(WINDOW_SYSTEM),xorg wayland nullws nulldrmws screen surfaceless lws-generic),) +OUT ?= $(TOP)/binary_$(PVR_BUILD_DIR)_$(WINDOW_SYSTEM)_$(BUILD) +else +OUT ?= $(TOP)/binary_$(PVR_BUILD_DIR)_$(BUILD) +endif + +# Use abspath, which doesn't require the path to already exist, to remove '.' +# and '..' path components. This allows paths to be manipulated without things +# ending up in the wrong place. +override OUT := $(abspath $(if $(filter /%,$(OUT)),$(OUT),$(TOP)/$(OUT))) + +CONFIG_MK := $(OUT)/config.mk +CONFIG_H := $(OUT)/config.h +CONFIG_KERNEL_MK := $(OUT)/config_kernel.mk +CONFIG_KERNEL_H := $(OUT)/config_kernel.h + +# Convert commas to spaces in $(D). This is so you can say "make +# D=config-changes,freeze-config" and have $(filter config-changes,$(D)) +# still work. +override D := $(subst $(comma),$(space),$(D)) + +# Create the OUT directory +# +$(shell mkdir -p $(OUT)) + +# For a clobber-only build, we shouldn't regenerate any config files +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + +# Core handling +# + +-include ../config/user-defs.mk +-include ../config/kernel-defs.mk + +# Disabling the online OpenCL compiler breaks the OpenCL spec. +# Use this option carefully (i.e. for embedded usage only). +OCL_ONLINE_COMPILATION ?= 1 + +# Some platforms don't have blob cache support, or the blob cache isn't usable +# for some reason. Make it possible to disable the OpenCL driver's use of it. +OCL_USE_KERNEL_BLOB_CACHE ?= 1 + +# Allow OpenCL to disable image sharing with EGL on platforms that don't support it. +OCL_USE_EGL_SHARING ?= 1 +OCL_USE_GRALLOC_IMAGE_SHARING ?= 0 + +# Rather than requiring the user to have to define two variables (one quoted, +# one not), make PVRSRV_MODNAME a non-tunable and give it an overridable +# default here. +# +PVRSRV_MODNAME := pvrsrvkm +PVRSYNC_MODNAME := pvr_sync + +# Normally builds don't touch these, but we use them to influence the +# components list. Make sure these are defined early enough to make this +# possible. +# + +# Skip defining these UM/FW macros for kbuilds which do not define RGX_BVNC +ifneq ($(RGX_BNC_CONFIG_KM_HEADER),) +# Only the Firmware needs this make macro. +SUPPORT_META_DMA :=\ + $(shell grep -qw RGX_FEATURE_META_DMA $(RGX_BNC_CONFIG_KM) && echo 1) + +# Only the Firmware needs this make macro. +SUPPORT_META_COREMEM :=\ + $(shell grep -qe 'RGX_FEATURE_META_COREMEM_SIZE ([123456789][1234567890]*U*)' $(RGX_BNC_CONFIG_KM) && echo 1) + +# Client drivers, firmware and libsrv_um need this make macro. +SUPPORT_COMPUTE := \ + $(shell grep -qw RGX_FEATURE_COMPUTE $(RGX_BNC_CONFIG_KM) && echo 1) + +# Macro used by client driver makefiles only. +OPENCL_CDM_FORMAT_2 ?= \ + $(shell grep -qw "RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (2U*)" $(RGX_BNC_CONFIG_KM) && echo 1) + +# Only the Firmware needs this make macro. +SUPPORT_MIPS_FIRMWARE :=\ + $(shell grep -qw RGX_FEATURE_MIPS $(RGX_BNC_CONFIG_KM) && echo 1) + +# Only the Firmware needs this make macro. +SUPPORT_RISCV_FIRMWARE :=\ + $(shell grep -qw RGX_FEATURE_RISCV_FW_PROCESSOR $(RGX_BNC_CONFIG_KM) && echo 1) + +# Firmware and libsrv_um need this make macro. +SUPPORT_TLA :=\ + $(shell grep -qw RGX_FEATURE_TLA $(RGX_BNC_CONFIG_KM) && echo 1) + +# Firmware and libsrv_um need this make macro. +SUPPORT_FASTRENDER_DM :=\ + $(shell grep -qw RGX_FEATURE_FASTRENDER_DM $(RGX_BNC_CONFIG_KM) && echo 1) + +# Firmware and libsrv_um need this make macro. +SUPPORT_SIGNAL_FILTER := \ + $(shell grep -qw RGX_FEATURE_SIGNAL_SNOOPING $(RGX_BNC_CONFIG_KM) && echo 1) + +# Compute only used by Firmware and client drivers +SUPPORT_COMPUTE_ONLY := \ + $(shell grep -qw RGX_FEATURE_COMPUTE_ONLY $(RGX_BNC_CONFIG_KM) && echo 1) + +# Macro used by client driver makefiles only. +ifneq ($(wildcard $(RGX_BNC_CONFIG_H)),) + SUPPORT_ES32 :=\ + $(shell grep -qw RGX_FEATURE_ASTC $(RGX_BNC_CONFIG_H) && grep -qw RGX_FEATURE_GS_RTA_SUPPORT $(RGX_BNC_CONFIG_KM) && echo 1) +endif +endif + +PVRSRV_VZ_NUM_OSID ?= 1 + +ifeq ($(SUPPORT_COMPUTE_ONLY),1) + # A compute only core has neither TLA or 3D + PVRSRV_SUPPORT_LEGACY_TQ_UM ?= 0 + PVRSRV_SUPPORT_LEGACY_TQ_FW ?= 0 +else + ifneq ($(SUPPORT_FASTRENDER_DM),1) + # Without a TDM, the TLA and/or 3D TQ is required + PVRSRV_SUPPORT_LEGACY_TQ_UM ?= 1 + PVRSRV_SUPPORT_LEGACY_TQ_FW ?= 1 + endif +endif + +# Default place for binaries and shared libraries +BIN_DESTDIR ?= /usr/local/bin +INCLUDE_DESTDIR ?= /usr/include +SHARE_DESTDIR ?= /usr/local/share +SHLIB_DESTDIR ?= /usr/lib +FW_DESTDIR ?= /lib/firmware + +# Build's selected list of components. +# - components.mk is a per-build file that specifies the components that are +# to be built +-include components.mk + +# Set up the host and target compiler. +include ../config/compiler.mk + +# pvr-gdb needs extra components +# +ifeq ($(SUPPORT_DEBUGGER),1) + ifneq ($(filter opencl,$(COMPONENTS)),) + COMPONENTS += libpvrdebugger pvrgtrace gdb_ocl_test gdb_unit_test + endif + ifneq ($(filter opengles3,$(COMPONENTS)),) + COMPONENTS += libpvrdebugger pvrgtrace + endif + ifneq ($(filter vulkan,$(COMPONENTS)),) + COMPONENTS += libpvrdebugger pvrgtrace + endif + ifeq ($(SUPPORT_ANDROID_PLATFORM),1) + COMPONENTS += libpvrdebugipc + endif +endif + +$(eval $(call BothConfigMake,PVR_ARCH,$(PVR_ARCH))) + +ifneq ($(SUPPORT_BUILD_LWS),) + ifneq ($(SYSROOT),) + $(warning ******************************************************) + $(warning WARNING: You have specified a SYSROOT, or are using a) + $(warning buildroot compiler, and enabled SUPPORT_BUILD_LWS. We) + $(warning will ignore the sysroot and will build all required) + $(warning LWS components. Unset SUPPORT_BUILD_LWS if this is not) + $(warning what you want.) + $(warning ******************************************************) + endif + + ifneq ($(origin SUPPORT_BUILD_LWS),file) + $(warning ******************************************************) + $(warning WARNING: Enabling SUPPORT_BUILD_LWS is deprecated.) + ifneq ($(filter surfaceless wayland xorg,$(WINDOW_SYSTEM)),) + $(warning You should not need to set this explicitly.) + else + $(warning You should be setting SYSROOT instead, which is) + $(warning documented in the Rogue DDK Linux and Rogue DDK) + $(warning Linux WS Platform Guides.) + endif + $(warning ******************************************************) + endif + + override SYSROOT := +endif + + +ifneq ($(strip $(LWS_PREFIX)),) +endif + +# The name of the file that contains the set of tarballs that should be +# built to support a given linux distribution +LWS_DIST ?= tarballs-ubuntu-next + +ifeq ($(SUPPORT_BUILD_LWS),1) + COMPONENTS += ${LWS_COMPONENTS} +endif + +# RenderScript Replay needs extra components +ifeq ($(RSREPLAY),1) +ifneq ($(COMPONENTS),) +COMPONENTS += rscompiler renderscript rsreplay replay_rsdriver +endif +endif + +$(if $(filter config,$(D)),$(info Build configuration:)) + +################################# CONFIG #################################### + +-include ../config/core-internal.mk + + +$(eval $(call TunableKernelConfigMake,SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK,)) +$(eval $(call TunableKernelConfigC,SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK,,\ +This allows the kernel wrap memory handler to determine the pages_\ +associated with a given virtual address by performing a walk-through of the corresponding_\ +page tables. This method is only used with virtual address regions that belong to device_\ +or with virtual memory regions that have VM_IO set._\ +This setting is for Linux platforms only ._\ +)) + +# Firmware toolchain versions +$(eval $(call BothConfigMake,METAG_VERSION_NEEDED,2.8.1.0.3)) +$(eval $(call BothConfigMake,MIPS_VERSION_NEEDED,2014.07-1)) + +ifneq ($(SUPPORT_NEUTRINO_PLATFORM), 1) + +# If KERNELDIR is set, write it out to the config.mk, with +# KERNEL_COMPONENTS and KERNEL_ID +# +ifneq ($(strip $(KERNELDIR)),) +PVRSRV_MODULE_BASEDIR ?= /lib/modules/$(KERNEL_ID)/extra/ +$(eval $(call BothConfigMake,KERNELDIR,$(KERNELDIR))) +$(eval $(call BothConfigMake,KERNEL_ID,$(KERNEL_ID))) +$(eval $(call BothConfigMake,PVRSRV_MODULE_BASEDIR,$(PVRSRV_MODULE_BASEDIR))) +$(eval $(call BothConfigMake,KERNEL_COMPONENTS,$(KERNEL_COMPONENTS))) +$(eval $(call TunableKernelConfigMake,EXTRA_PVRSRVKM_COMPONENTS,,\ +List of components that should be built in to pvrsrvkm.ko$(comma) rather than_\ +forming separate kernel modules._\ +)) + +# If KERNEL_CROSS_COMPILE is set to "undef", this is magically +# equivalent to being unset. If it is unset, we use CROSS_COMPILE +# (which might also be unset). If it is set, use it directly. +ifneq ($(KERNEL_CROSS_COMPILE),undef) +KERNEL_CROSS_COMPILE ?= $(CROSS_COMPILE) +$(eval $(call TunableBothConfigMake,KERNEL_CROSS_COMPILE,)) +endif + +# Alternatively, allow the CC used for kbuild to be overridden +# exactly, bypassing any KERNEL_CROSS_COMPILE configuration. +# LD, NM and OBJCOPY could be overridden by set of CC tools. +$(eval $(call TunableBothConfigMake,KERNEL_CC,)) +$(eval $(call TunableBothConfigMake,KERNEL_LD,)) +$(eval $(call TunableBothConfigMake,KERNEL_NM,)) +$(eval $(call TunableBothConfigMake,KERNEL_OBJCOPY,)) + +# Check the KERNELDIR has a kernel built and also check that it is +# not 64-bit, which we do not support. +KERNEL_AUTOCONF := \ + $(strip $(wildcard $(KERNELDIR)/include/linux/autoconf.h) \ + $(wildcard $(KERNELDIR)/include/generated/autoconf.h)) +ifeq ($(KERNEL_AUTOCONF),) +$(warning autoconf.h not found in $$(KERNELDIR)/include/linux \ +or $$(KERNELDIR)/include/generated. Check your $$(KERNELDIR) variable \ +and kernel configuration.) +endif +else +$(if $(KERNEL_COMPONENTS),$(warning KERNELDIR is not set. Kernel components cannot be built)) +endif + +# Platforms can make use of the ChromiumOS upstream kernels. Make the build +# system aware of which features are available within those kernels. +-include ../common/chromiumos_kernel.mk + +# Enable Client CCB grow +PVRSRV_ENABLE_CCCB_GROW ?= 1 +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_CCCB_GROW,,\ +This controls the feature that allows the Services client CCBs to grow_\ +when they become full._\ +)) + +endif # !Neutrino + + +ifneq ($(_window_systems),) +# If there's only one window system then don't output this option as part +# of `make confighelp` +ifeq ($(words $(_window_systems)),1) +$(eval $(call BothConfigMake,WINDOW_SYSTEM,$(WINDOW_SYSTEM))) +else +$(eval $(call TunableBothConfigMake,WINDOW_SYSTEM,,\ +Window system to use ($(_window_systems))._\ +)) +endif +endif + +# Ideally configured by platform Makefiles, as necessary +# +SHADER_DESTDIR := $(SHARE_DESTDIR)/pvr/shaders/ + +ifeq ($(RGX_FW_SIGNED),1) +ifeq ($(RGX_FW_PK8),) +$(error RGX_FW_PK8 must be set for RGX_FW_SIGNED=1.) +endif # !RGX_FW_PK8 +$(eval $(call TunableBothConfigC,RGX_FW_PKCS1_PSS_PADDING,)) +else # RGX_FW_SIGNED +endif # RGX_FW_SIGNED + +ifeq ($(RGX_FW_SIGNED),1) +$(eval $(call KernelConfigC,RGX_FW_FILENAME,"\"rgx.fw.signed\"")) +$(eval $(call KernelConfigC,RGX_SH_FILENAME,"\"rgx.sh.signed\"")) +ifneq ($(RGX_FW_X509),) +$(eval $(call KernelConfigC,RGX_FW_SIGNED,1)) +endif # RGX_FW_X509 +else # RGX_FW_SIGNED +$(eval $(call KernelConfigC,RGX_FW_FILENAME,"\"rgx.fw\"")) +$(eval $(call KernelConfigC,RGX_SH_FILENAME,"\"rgx.sh\"")) +endif # RGX_FW_SIGNED + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) + ifeq ($(wildcard ${TOP}/build/linux/tools/prepare-llvm.sh),) + # No facility for using LLVM in this package. + else ifeq ($(LLVM_BUILD_DIR),) + $(warning LLVM_BUILD_DIR is not set. Components that use it (e.g. OpenCL, Vulkan) cannot be built) + else + override LLVM_BUILD_DIR := $(abspath $(LLVM_BUILD_DIR)) + LLVM_MESSAGE=$(shell ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} ${TOP}/build/linux/tools/prepare-llvm.sh -c $(LLVM_BUILD_DIR)) + + ifneq ($(filter Error:,$(firstword $(LLVM_MESSAGE))),) + $(info *** prepare-llvm.sh: $(LLVM_MESSAGE)) + $(error *** LLVM_BUILD_DIR $(LLVM_BUILD_DIR) is not suitable) + endif + + ifneq ($(filter Warning:,$(firstword $(LLVM_MESSAGE))),) + $(info *** prepare-llvm.sh: $(LLVM_MESSAGE)) + endif + + # Because we need to handle MULTIARCH builds, we can't work out the + # architecture to use in the paths until compile-time. So leave + # _LLVM_ARCH_ as a placeholder that will be replaced in the + # moduledef. + LLVM_INCLUDE_PATH := $(LLVM_BUILD_DIR)/llvm.src/llvm/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/include + + CLANG_INCLUDE_PATH := $(LLVM_INCLUDE_PATH) \ + $(LLVM_BUILD_DIR)/llvm.src/clang/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/clang/include + + SPV_INCLUDE_PATH := $(LLVM_INCLUDE_PATH) \ + $(LLVM_BUILD_DIR)/llvm.src/spv-translator/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/SPV_TRANSLATOR/include + + LLVM_LIB_PATH := $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/lib/ + + LLVM_INCLUDE_PATH_HOST := $(LLVM_BUILD_DIR)/llvm.src/llvm/include/ \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/include/ + + CLANG_INCLUDE_PATH_HOST := $(LLVM_INCLUDE_PATH_HOST) \ + $(LLVM_BUILD_DIR)/llvm.src/clang/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/clang/include + + SPV_INCLUDE_PATH_HOST := $(LLVM_INCLUDE_PATH_HOST) \ + $(LLVM_BUILD_DIR)/llvm.src/spv-translator/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/SPV_TRANSLATOR/include + + LLVM_LIB_PATH_HOST := $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/lib/ + endif + else + LLVM_MESSAGE=$(shell ANDROID_SDK_ROOT=$(ANDROID_SDK_ROOT) ${TOP}/build/linux/tools/prepare-llvm-android.sh -a verify -t "$(JNI_CPU_ABI) $(JNI_CPU_ABI_2ND)") + + ifneq ($(filter Warning:,$(firstword $(LLVM_MESSAGE))),) + $(info *** prepare-llvm-android.sh: $(LLVM_MESSAGE)) + $(info *** Please update your VNDK with the newer LLVM version. ) + endif +endif + + + + + + +# The SPV library is in the same folder as the LLVM ones as it is built +# as part of LLVM + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) + ifeq ($(wildcard ${TOP}/build/linux/tools/prepare-nnvm.sh),) + # No facility for using NNVM in this package. + else ifeq ($(NNVM_BUILD_DIR),) + # IMGDNN is not built by default so do nothing if NNVM_BUILD_DIR is not set + else + override NNVM_BUILD_DIR := $(abspath $(NNVM_BUILD_DIR)) + NNVM_MESSAGE=$(shell ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} ${TOP}/build/linux/tools/prepare-nnvm.sh -c $(NNVM_BUILD_DIR)) + + ifneq ($(filter Error:,$(firstword $(NNVM_MESSAGE))),) + $(info *** prepare-nnvm.sh: $(NNVM_MESSAGE)) + $(error *** NNVM_BUILD_DIR $(NNVM_BUILD_DIR) is not suitable) + endif + + ifneq ($(filter Warning:,$(firstword $(NNVM_MESSAGE))),) + $(info *** prepare-nnvm.sh: $(NNVM_MESSAGE)) + endif + + # Because we need to handle MULTIARCH builds, we can't work out the + # architecture to use in the paths until compile-time. So leave + # _NNVM_ARCH_ as a placeholder that will be replaced in the + # moduledef. + NNVM_INCLUDE_PATH := $(NNVM_BUILD_DIR)/nnvm.src/include + NNVM_LIB_PATH := $(NNVM_BUILD_DIR)/nnvm._NNVM_ARCH_/lib/ + endif +endif + + + + + + +$(if $(USE_CCACHE),$(if $(USE_DISTCC),$(error\ +Enabling both USE_CCACHE and USE_DISTCC at the same time is not supported))) + + +# Invariant options for Linux +# +$(eval $(call BothConfigC,LINUX,)) + +$(eval $(call BothConfigC,PVR_BUILD_DIR,"\"$(PVR_BUILD_DIR)\"")) +$(eval $(call BothConfigC,PVR_BUILD_TYPE,"\"$(BUILD)\"")) +$(eval $(call BothConfigC,PVRSRV_MODNAME,"\"$(PVRSRV_MODNAME)\"")) +$(eval $(call BothConfigMake,PVRSRV_MODNAME,$(PVRSRV_MODNAME))) +$(eval $(call BothConfigC,PVRSYNC_MODNAME,"\"$(PVRSYNC_MODNAME)\"")) +$(eval $(call BothConfigMake,PVRSYNC_MODNAME,$(PVRSYNC_MODNAME))) +$(eval $(call BothConfigMake,PVR_BUILD_DIR,$(PVR_BUILD_DIR))) +$(eval $(call BothConfigMake,PVR_BUILD_TYPE,$(BUILD))) + +SUPPORT_RGX ?= 1 +ifeq ($(SUPPORT_RGX),1) +$(eval $(call BothConfigC,SUPPORT_RGX,1)) +$(eval $(call BothConfigMake,SUPPORT_RGX,1)) +endif + +# Some of the definitions in stdint.h aren't exposed by default in C++ mode, +# unless these macros are defined. To make sure we get these definitions +# regardless of which files include stdint.h, define them here. + +# FIXME: We can't use GCC __thread reliably with clang. +ifeq ($(SUPPORT_ANDROID_PLATFORM),1) +else +endif + +ifneq ($(DISPLAY_CONTROLLER),) +$(eval $(call BothConfigC,DISPLAY_CONTROLLER,$(DISPLAY_CONTROLLER))) +$(eval $(call BothConfigMake,DISPLAY_CONTROLLER,$(DISPLAY_CONTROLLER))) +endif + + +$(eval $(call BothConfigMake,PVR_SYSTEM,$(PVR_SYSTEM))) +$(eval $(call KernelConfigMake,PVR_LOADER,$(PVR_LOADER))) + +ifeq ($(MESA_EGL),1) +else +endif + + +# Build-type dependent options +# +$(eval $(call BothConfigMake,BUILD,$(BUILD))) + +ifeq ($(SUPPORT_VALIDATION),1) +# Enable Periodic Hardware Reset functionality for testing +override PVR_ENABLE_PHR := 1 +endif + +# Prevent excluding regconfig bridge when the build level macro defined, +# regconfig functions are used in pvrdebug. +# +ifeq ($(SUPPORT_USER_REGISTER_CONFIGURATION),1) +ifeq ($(EXCLUDE_RGXREGCONFIG_BRIDGE),1) +override EXCLUDE_RGXREGCONFIG_BRIDGE := 0 +endif +endif + +ifeq ($(SUPPORT_ANDROID_PLATFORM),1) +# Always use DEBUGLINK. These days we are using external components which +# have large amounts of C++ debug information and it is impractical to carry +# statically linked components to the target filesystem without stripping. +DEBUGLINK ?= 1 +endif + +ifeq ($(BUILD),debug) + +$(eval $(call TunableBothConfigMake,PVR_SERVICES_DEBUG,,\ +Enable additional services debug options._\ +This needs to be enabled for both the UM and KM builds_\ +so that compatibility between them is achieved.\ +)) + +ifeq ($(PVR_SERVICES_DEBUG),1) +PVRSRV_ENABLE_GPU_MEMORY_INFO ?= 1 +PVRSRV_DEBUG_HANDLE_LOCK ?= 1 +PVRSRV_APPHINT_ENABLEFWPOISONONFREE ?= IMG_TRUE +PVRSRV_TIMER_CORRELATION_HISTORY ?= 1 +endif + +# enable sync prim poisoning in debug builds +PVRSRV_ENABLE_SYNC_POISONING ?= 1 + +# Client CCB usage statistics enabled by default in debug builds +PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE ?= 1 +# bridge debug and statistics enabled by default in debug builds +DEBUG_BRIDGE_KM ?= 1 +$(eval $(call BothConfigC,DEBUG,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,)) +PERFDATA ?= 1 +TRACK_FW_BOOT ?= 1 + +else ifeq ($(BUILD),release) +$(eval $(call BothConfigC,RELEASE,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,1)) + + +else ifeq ($(BUILD),timing) +$(eval $(call BothConfigC,TIMING,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,1)) +else +$(error BUILD= must be either debug, release or timing) + +endif # $BUILD + +# Memtest, currently implemented for Linux only +$(eval $(call TunableKernelConfigC,SUPPORT_PHYSMEM_TEST,1)) +$(eval $(call TunableKernelConfigMake,SUPPORT_PHYSMEM_TEST,1)) + + +$(eval $(call TunableKernelConfigMake,KERNEL_DEBUGLINK,,\ +Enable this to store DDK kernel module debugging symbols in separate$(comma) per_\ +module$(comma) .dbg files. These files will not be installed on the target system$(comma)_\ +but can be used by tools$(comma) e.g. gdb$(comma) for offline debugging. This may be_\ +desirable when the target system has limited storage space and the kernel_\ +has been configured with CONFIG_DEBUG_INFO=y$(comma) which can have a significant_\ +impact on kernel module size.)) + +$(eval $(call TunableBothConfigMake,COMPRESS_DEBUG_SECTIONS,,\ +Enable compression on debug sections (.zdebug)_\ +May have tool compatibility issues.)) + + +ifneq ($(SUPPORT_ALT_REGBASE),) +$(eval $(call KernelConfigC,SUPPORT_ALT_REGBASE,,\ +Some systems alter the GPU addresses before they reach the bus, e.g. by appending_\ +a MSB bit. In such cases, the GPU regbank in the system address space might alias_\ +from the GPU perspective with memory addresses used by the GPU. For example, if_\ +the GPU regbank is located in the lower 1GB and the 32bit GPU accesses the range_\ +1GB-5GB due to the address alteration. In such cases, the MIPS wrapper must use_\ +an alternate register bank address to avoid aliasing with device memory. The address_\ +used must be an address that is not mapped in the GPU as memory. The memory backing_\ +those addresses is never touched since the access is resolved inside the GPU.\ +\)) +endif + +# User-configurable options +# +ifneq ($(RGX_BVNC),) +$(eval $(call TunableKernelConfigC,RGX_BVNC_CORE_KM_HEADER,)) +endif +ifneq ($(RGX_BVNC),) +$(eval $(call TunableKernelConfigC,RGX_BNC_CONFIG_KM_HEADER,)) +endif + +$(eval $(call TunableBothConfigC,PVRSRV_DEBUG_HANDLE_LOCK,,\ +Enable checking that the handle lock is held when a handle reference_\ +count is modified)) + +$(eval $(call TunableBothConfigC,SUPPORT_DBGDRV_EVENT_OBJECTS,1)) +$(eval $(call TunableBothConfigC,PVR_DBG_BREAK_ASSERT_FAIL,,\ +Enable this to treat PVR_DBG_BREAK as PVR_ASSERT(0)._\ +Otherwise it is ignored._\ +)) +$(eval $(call TunableBothConfigC,PDUMP,,\ +Enable parameter dumping in the driver._\ +This adds code to record the parameters being sent to the hardware for_\ +later analysis._\ +)) +$(eval $(call TunableBothConfigC,NO_HARDWARE,,\ +Disable hardware interactions (e.g. register writes) that the driver would_\ +normally perform. A driver built with this option can$(apos)t drive hardware$(comma)_\ +but with PDUMP enabled$(comma) it can capture parameters to be played back later._\ +)) +$(eval $(call TunableBothConfigC,PDUMP_DEBUG_OUTFILES,,\ +Add debug information to the pdump script (out2.txt) as it is generated._\ +This includes line numbers$(comma) process names and also enables checksumming_\ +of the binary data dumped to out2.prm which can be verified offline._\ +)) +$(eval $(call TunableKernelConfigC,PVRSRV_NEED_PVR_DPF,1,\ +Enables PVR_DPF messages in the kernel mode driver._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_ASSERT,,\ +Enable this to turn on PVR_ASSERT in release builds._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_TRACE,,\ +Enable this to turn on PVR_TRACE in release builds._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_STACKTRACE_NATIVE,1,\ +Enable this to turn on stack trace functionality requiring only native_\ +operating system features._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_STACKTRACE,,\ +Enable this to turn on stack trace functionality requiring either native_\ +operating system features$(comma) or additional dependencies such as_\ +libunwind._\ +)) +ifeq ($(PVRSRV_NEED_PVR_STACKTRACE),1) +ifeq ($(SUPPORT_ANDROID_PLATFORM),1) +$(warning Since Android O it's not allowed to link to libunwind.) +endif +endif +$(eval $(call TunableBothConfigC,REFCOUNT_DEBUG,)) +$(eval $(call TunableBothConfigC,DC_DEBUG,,\ +Enable debug tracing in the DC (display class) server code)) +$(eval $(call TunableBothConfigC,SCP_DEBUG,,\ +Enable debug tracing in the SCP (software command processor)_\ +which is used by the DC.)) +$(eval $(call TunableBothConfigC,SUPPORT_INSECURE_EXPORT,)) +$(eval $(call TunableBothConfigC,SUPPORT_SECURE_EXPORT,,\ +Enable support for secure device memory and sync export._\ +This replaces export handles with file descriptors$(comma) which can be passed_\ +between processes to share memory._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_DISPLAY_CLASS,,\ +Enable DC (display class) support. Disable if not using a DC display driver.)) +$(eval $(call TunableBothConfigC,PVRSRV_DEBUG_CCB_MAX,)) + +$(eval $(call TunableBothConfigMake,SUPPORT_TRUSTED_DEVICE,)) +$(eval $(call TunableBothConfigC,SUPPORT_TRUSTED_DEVICE,,\ +Enable a build mode targeting an REE._\ +)) + +ifeq ($(SUPPORT_TRUSTED_DEVICE),1) +ifeq ($(NO_HARDWARE),1) +SUPPORT_SECURITY_VALIDATION ?= 1 +endif +endif + +$(eval $(call TunableBothConfigC,SUPPORT_SECURITY_VALIDATION,,\ +Enable DRM security validation mode._\ +)) +$(eval $(call TunableBothConfigMake,SUPPORT_SECURITY_VALIDATION,)) + +$(eval $(call TunableBothConfigC,PM_INTERACTIVE_MODE,,\ +Enable PM interactive mode._\ +)) + +$(eval $(call TunableBothConfigC,SECURE_FW_CODE_OSID,,\ +Emit specified OSID when the FW fetches code from memory._\ +In MIPS this will only work for statically mapped FW code._\ +)) + +ifeq ($(SUPPORT_TRUSTED_DEVICE),1) +override SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY := 1 +endif + + +$(eval $(call TunableBothConfigC,SUPPORT_RGXTQ_BRIDGE,1,\ +Enable RGXTQ bridge which is always present on Rogue cores_\ +)) + + + +$(eval $(call TunableBothConfigC,SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY,,\ +Use a single big allocation for the FW code and another one_\ +for the FW private data._\ +)) + +$(eval $(call TunableBothConfigC,SUPPORT_MIPS_UNIFIED_FW_MEMORY,,\ +Map both MIPS FW code and data in a single MIPS TLB entry. Code and data_\ +need to be adjacent in both virtual and physical memory space._\ +)) + +$(eval $(call TunableBothConfigC,METRICS_USE_ARM_COUNTERS,,\ +Enable usage of hardware performance counters for metrics on ARM platforms._\ +)) + + +ifeq ($(SHADER_DEBUG_TOOL),1) + ifneq ($(GTRACE_TOOL),1) + override GTRACE_TOOL = 1 + $(warning SHADER_DEBUG_TOOL requires GTRACE_TOOL, so GTRACE_TOOL is being enabled.) + endif +endif +ifeq ($(GTRACE_TOOL),1) + ifeq ($(SUPPORT_ANDROID_PLATFORM),1) + endif + override PVR_ANNOTATION_MAX_LEN ?= 128 +endif + + +IMG_1_11_OPTS?=0xffffffff + +IMG_1_12_OPTS?=0xffffffff + +$(eval $(call TunableBothConfigC,ENABLE_EMULATED_LARGE_TEXTURES,1,\ +Enable emulating support for 8k textures on hardware with screen size 4k._\ +)) + + + +# poison values for the Services +$(eval $(call TunableBothConfigC,PVRSRV_POISON_ON_ALLOC_VALUE,0xd9,\ +Poison on alloc value)) +$(eval $(call TunableBothConfigC,PVRSRV_POISON_ON_FREE_VALUE,0x63,\ +Poison on free value)) + +$(eval $(call TunableBothConfigC,SUPPORT_MIPS_64K_PAGE_SIZE,,\ +Enable this to change the MIPS FW page size to 64K._\ +)) + +$(eval $(call TunableBothConfigC,SUPPORT_MULTICORE,,\ +Set to the number of secondary cores contributing to a workload distribution in a multicore configuration._\ +)) + +# +# GPU virtualization support +# +VMM_TYPE ?= stub +RGX_FW_HEAP_SHIFT ?= 25 + +ifeq ($(PVRSRV_VZ_NUM_OSID),0) + override PVRSRV_VZ_NUM_OSID := 1 +endif + +# Make RGX_NUM_OS_SUPPORTED visible to both UM & KM makefiles +$(eval $(call BothConfigMake,RGX_NUM_OS_SUPPORTED,$(PVRSRV_VZ_NUM_OSID),)) +# Reveal RGX_NUM_OS_SUPPORTED only to KM code, allowing the firmware makefiles, +# which are part of the UM, to selectively control this symbol so the same DDK +# build can create both native and vz-supported firmware binaries +$(eval $(call KernelConfigC,RGX_NUM_OS_SUPPORTED,$(PVRSRV_VZ_NUM_OSID),\ +The number of firmware supported OSIDs [1 native build : 2+ vz build])) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DRIVERMODE,0x7FFFFFFF,\ +The driver execution mode. This can be either an override or non-override 32-bit value. An override_\ +value has the MSB bit set and a non-override value has this bit cleared. Excluding this MSB bit_\ +and treating the remaining 31-bit value as a signed integer the mode values are_\ +[-1 native mode : 0 host mode : +1 guest mode])) +$(eval $(call KernelConfigMake,VMM_TYPE,$(VMM_TYPE),\ +The virtual machine manager type, defaults to stub implementation)) +$(eval $(call TunableBothConfigC,RGX_FW_HEAP_SHIFT,$(RGX_FW_HEAP_SHIFT),\ +Firmware physical heap log2 size per OSID (minimum 4MiB, default 32MiB).)) + +$(eval $(call TunableBothConfigC,SUPPORT_AUTOVZ,,\ +Enable support for AutoVz mode_)) + +ifeq ($(SUPPORT_AUTOVZ),1) + override RGX_VZ_STATIC_CARVEOUT_FW_HEAPS := 1 + + $(eval $(call TunableBothConfigC,SUPPORT_AUTOVZ_HW_REGS,1\ + Use dedicated hardware registers for tracking OS states otherwise rely on shared memory._)) + + $(eval $(call TunableBothConfigC,PVR_AUTOVZ_WDG_PERIOD_MS,3000,\ + Time period in milliseconds between the firmware autovz watchdog checks._)) +endif + +$(eval $(call TunableBothConfigC,PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR,,\ +The carveout memory used for allocating the firmware MMU pagetables it start by default after_\ +the firmware heap. If a different memory range is reserved for this purpose set its base address._)) + +$(eval $(call TunableBothConfigC,RGX_VZ_STATIC_CARVEOUT_FW_HEAPS,,\ +Firmware heaps of Guest VMs are allocated from pre-determined carveout memory.)) + +$(eval $(call TunableBothConfigMake,PVR_ENABLE_PHR,,)) +$(eval $(call TunableBothConfigC,PVR_ENABLE_PHR,,\ +Enable the Periodic Hardware Reset functionality (PHR)._)) + +# GPUVIRT_VALIDATION default region values used _in the emulator_. +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION0MIN,$\ +\"0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000\",\ +Array of comma/space separated strings that define the start addresses for all 8 OSids on Region 0.)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION0MAX,$\ +\"0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF\",\ +Array of comma/space separated strings that define the end addresses for all 8 OSids on Region 0.)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION1MIN,$\ +\"0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000\",\ +Array of comma/space separated strings that define the start addresses for all 8 OSids on Region 1.)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION1MAX,$\ +\"0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF\",\ +Array of comma/space separated strings that define the end addresses for all 8 OSids on Region 1.)) + +$(eval $(call TunableBothConfigC,RGX_FW_IRQ_OS_COUNTERS,)) +$(eval $(call TunableBothConfigC,RGX_IRQ_HYPERV_HANDLER,)) + +$(eval $(call TunableBothConfigC,FIX_DUSTS_POW_ON_INIT,,\ +Enable WA for power controllers that power up dusts by default._\ +The Firmware powers down the dusts after booting._\ +)) +$(eval $(call TunableKernelConfigMake,SUPPORT_LINUX_DVFS,)) +$(eval $(call TunableKernelConfigC,SUPPORT_LINUX_DVFS,,\ +Enables PVR DVFS implementation to actively change frequency / voltage depending_\ +on current GPU load. Currently only supported on Linux._\ +)) + +# +# GPU power monitoring configuration +# + +$(eval $(call TunableBothConfigMake,SUPPORT_POWMON_COMPONENT,1)) +$(eval $(call TunableBothConfigC,SUPPORT_POWMON_COMPONENT,1,\ +Includes power-monitoring component in firmware build._\ +)) + +ifeq ($(SUPPORT_POWMON_COMPONENT),0) +# Force disable power-sampling if powmon component not being compiled in + override SUPPORT_POWER_SAMPLING_VIA_DEBUGFS := 0 +endif + +ifneq ($(SUPPORT_POWMON_COMPONENT),0) +# Following tunables are only used when power-monitoring present +$(eval $(call TunableBothConfigC,PVR_POWER_ACTOR,,\ +Enables PVR power actor implementation for registration with a kernel configured_\ +with IPA. Enables power counter measurement timer in the FW which is periodically_\ +read by the host DVFS from the POWER_ESTIMATE register in order to operate within_\ +a governor set power envelope.)) +$(eval $(call TunableBothConfigC,PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS,10U,\ +Period of time between regular power measurements. Default 10ms)) +$(eval $(call BothConfigC,PVR_POWER_MONITOR_HWPERF,,\ +Enables the generation of hwperf power monitoring packets._\ +This incurs an additional performance cost.)) +$(eval $(call TunableBothConfigC,PVR_POWER_MONITOR_DYNAMIC_ENERGY,,\ +Configures the power monitoring module to calculate dynamic energy_\ +instead of the default total power.)) +$(eval $(call TunableBothConfigC,PVR_POWER_ACTOR_SCALING,,\ +Scaling factor for the dynamic power coefficients.)) +$(eval $(call TunableBothConfigC,SUPPORT_POWER_SAMPLING_VIA_DEBUGFS,,\ +Enable sampling of power counter registers and expose the values via DebugFS power_data file.)) + ifneq ($(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS),) + $(error SUPPORT_POWER_VALIDATION_VIA_DEBUGFS is currently unsupported on Rogue.) + endif +endif + + +ifneq ($(PVR_POWER_ACTOR),) + + +else + + +endif + +$(eval $(call TunableKernelConfigC,DEBUG_HANDLEALLOC_INFO_KM,)) +$(eval $(call TunableKernelConfigC,PVRSRV_RESET_ON_HWTIMEOUT,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_PLATFORM_PRE_REGISTERED,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_DRIVER_REGISTRATION_NAME,"\"$(PVRSRV_MODNAME)\"")) +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_SYNC_POISONING,,\ +Poison Sync Prim values on free.)) +$(eval $(call TunableBothConfigC,PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN,256)) +$(eval $(call TunableKernelConfigC,SYNC_DEBUG,)) +$(eval $(call TunableKernelConfigC,SUPPORT_DUMP_CLIENT_CCB_COMMANDS,)) +$(eval $(call TunableKernelConfigC,SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT,)) + +# Disable DDK features when PDUMP is defined +# +ifeq ($(PDUMP),1) +# For PDUMP we deactivate the pending protection because it leads to +# changing script files because of the additional invalidation kick. +override SUPPORT_MMU_PENDING_FAULT_PROTECTION := 0 +# Increase the default max annotation length when PDump is enabled +override PVR_ANNOTATION_MAX_LEN ?= 96 +endif + +$(eval $(call TunableKernelConfigC,SUPPORT_MMU_PENDING_FAULT_PROTECTION,1,\ +Activates use of the cleanup worker thread to defer the freeing of page_\ +tables. The cleanup work item will issue an MMU cache invalidation kick_\ +and wait with page table freeing until the cache invalidation has been_\ +confirmed by the FW via a sync._\ +)) + +$(eval $(call TunableKernelConfigC,SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG,)) + +$(eval $(call TunableBothConfigC,SUPPORT_PVR_VALGRIND,)) + +$(eval $(call TunableBothConfigC,ION_DEFAULT_HEAP_NAME,\"system\",\ +In ion enabled DDKs$(comma) this setting should be the default heap name defined in the kernel_\ +where gralloc buffers are allocated by default._\ +This setting is for kernels (>=4.12) which support ION heap query_\ +)) +$(eval $(call TunableBothConfigC,ION_DEFAULT_HEAP_ID_MASK,(1 << ION_HEAP_TYPE_SYSTEM),\ +In ion enabled DDKs$(comma) this setting should be (1 << ION_HEAP_TYPE_xxx)_\ +where ION_HEAP_TYPE_xxx is default heap id defined in the kernel._\ +This setting is for kernels (<4.12) which do not support ION heap query_\ +)) +$(eval $(call TunableBothConfigC,ION_FALLBACK_HEAP_NAME,,\ +In ion enabled DDKs$(comma) use this setting to define fallback heap._\ +This setting is optional (depends if kernel has defined a fallback heap)_\ +)) +$(eval $(call TunableBothConfigC,ION_FALLBACK_HEAP_ID_MASK,,\ +In ion enabled DDKs$(comma) use this setting to define fallback heap id mask._\ +This setting is optional (depends if kernel has defined a fallback heap)_\ +)) + +EGL_EXTENSION_KHR_GL_COLORSPACE ?= 1 + + +ifneq ($(MESA_EGL),1) + # The EGL EXT_image_gl_colorspace spec says: + # "Can be supported on EGL 1.4 provided that EGL_KHR_gl_colorspace is + # implemented, as well as either EGL_KHR_image or EGL_KHR_image_base." + # + # Note: IMG EGL supports EGL_KHR_image and EGL_KHR_image_base unconditionally. + ifeq ($(EGL_EXTENSION_KHR_GL_COLORSPACE),1) + endif + + + # Check for GBM_BACKENDs that aren't understood by the GBM Linux.mk. + $(eval $(call ValidateValues,GBM_BACKEND,$(_supported_gbm_backends))) + +endif + + +$(eval $(call TunableBothConfigC,PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY,,\ +Enable this to force the use of PVRSRVMemSet/Copy in the client driver _\ +instead of the built-in libc functions. These implementations are device _\ +memory safe and are used by default on AARCH64 platform._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_BRIDGE_LOGGING,,\ +If enabled$(comma) provides a debugfs entry which logs the number of calls_\ +made to each bridge function._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP,,\ +If enabled it will make kernel threads to sleep indefinitely until signalled._\ +)) + +# If we are building against a ChromeOS kernel, set this. +$(eval $(call TunableKernelConfigC,CHROMIUMOS_KERNEL,)) +$(eval $(call TunableKernelConfigMake,CHROMIUMOS_KERNEL,)) + + + + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) +endif + + +DEFERRED_WORKER_THREAD ?= 1 + +$(eval $(call TunableKernelConfigC,CACHEFLUSH_NO_KMRBF_USING_UMVA,)) +$(eval $(call TunableBothConfigC,CACHEFLUSH_ISA_TYPE,,\ +Specify CPU d-cache maintenance ISA type (i.e. CACHEFLUSH_ISA_TYPE_[X86,ARM64,GENERIC])._\ +)) + +# Overrides the size of the Vulkan host-visible heap with the supplied size in MB + + + + + + +# Support version 3 of the loader. Versions 0/1/2 should not be used. +# Default needs to be set separately otherwise 0 gets replaced with the default +SUPPORT_LUNARG_LOADER_VERSION ?= 3 + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) +ifeq ($(WINDOW_SYSTEM),lws-generic) +# Check for SUPPORT_VK_PLATFORMS that aren't understood by the Vulkan Linux.mk. +# Platform names match Mesa where possible. +_supported_vk_platforms := null tizen wayland x11 +$(eval $(call ValidateValues,SUPPORT_VK_PLATFORMS,$(_supported_vk_platforms))) + +else +endif + +ifneq ($(SUPPORT_VK_PLATFORMS),) +endif +endif + +ifeq ($(PVR_BLOB_CACHE_DEBUG),1) +$(eval $(call BothConfigC,BLOB_CACHE_DEBUG,)) +endif + +$(eval $(call TunableBothConfigC,PVR_BLOB_CACHE_SIZE_MEGABYTES,20,\ +Set the Linux blob cache size in number of megabytes._\ +)) + +$(eval $(call TunableBothConfigMake,PDUMP,)) +$(eval $(call TunableBothConfigMake,SUPPORT_INSECURE_EXPORT,)) +$(eval $(call TunableBothConfigMake,SUPPORT_SECURE_EXPORT,)) +$(eval $(call TunableBothConfigMake,SUPPORT_DISPLAY_CLASS,)) +$(eval $(call TunableBothConfigMake,CLDNN,,\ +Build CLDNN graph libraries._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_EXTRA_METASP_DEBUG,,\ +Enable extra debug information using the META Slave Port._\ +Checks the validity of the Firmware code and dumps sync values_\ +using the GPU memory subsystem via the META Slave Port._\ +)) +$(eval $(call TunableBothConfigC,TRACK_FW_BOOT,,Enable FW boot tracking.)) +# Required to pass the build flag to the META FW makefile + +$(eval $(call TunableBothConfigMake,OPTIM,,\ +Specify the optimisation flags passed to the compiler. Normally this_\ +is autoconfigured based on the build type._\ +)) + + + + + + + + +# Enables the pre-compiled header use. + +# Switch among glsl frontend compiler variants + +RGX_TIMECORR_CLOCK ?= mono +$(eval $(call TunableKernelConfigMake,RGX_TIMECORR_CLOCK,mono,\ +Specifies which clock source will be used for time correlation tables and_\ +HWPerf)) +ifeq ($(RGX_TIMECORR_CLOCK),mono) +PVRSRV_APPHINT_TIMECORRCLOCK=0 +else ifeq ($(RGX_TIMECORR_CLOCK),mono_raw) +PVRSRV_APPHINT_TIMECORRCLOCK=1 +else ifeq ($(RGX_TIMECORR_CLOCK),sched) +PVRSRV_APPHINT_TIMECORRCLOCK=2 +else +$(error Wrong value ($(RGX_TIMECORR_CLOCK)) for RGX_TIMECORR_CLOCK) +endif + +# HWR is enabled by default +HWR_DEFAULT_ENABLED ?= 1 +$(eval $(call TunableBothConfigC,HWR_DEFAULT_ENABLED,)) + +# Build-only AppHint configuration values +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT,APPHNT_BLDVAR_DBGDUMPLIMIT,\ +Limit for the number of HWR debug dumps produced)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG,IMG_FALSE,\ +Enable trusted device ACE config)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE,0x4000,\ +Alternate general heap page size (i.e. 4K,16K [default],64K,256K,1M,2M))) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE,786432,\ +Buffer size in bytes for client HWPerf streams)) + +# PDUMP AppHint defaults +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLESIGNATURECHECKS,APPHNT_BLDVAR_ENABLESIGNATURECHECKS,\ +Buffer size in bytes for storing signature check data)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE,RGXFW_SIG_BUFFER_SIZE_MIN,\ +Buffer size in bytes for storing signature check data)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING,IMG_FALSE,\ +Enable full sync tracking)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG,APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG,\ +Enable page fault debug)) + +# Validation AppHint defaults +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_VALIDATEIRQ,0,\ +Used to validate the interrupt integration. \ +Enables extra code in the FW to assert all interrupt lines \ +at boot and polls on the host side. The code is only built when \ +generating pdumps for nohw targets.)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLECLOCKGATING,0,\ +Disable GPU clock gating)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLEDMOVERLAP,0,\ +Disable GPU data master overlapping)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE,0,\ +Enable random killing of the compute data master)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH,0,\ +Enable random context switching of all DMs for debug and testing purposes)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH,0,\ +Enable soft GPU resets on context switching)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH,RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL,\ +Enable firmware context switching)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE,RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX,\ +Enable VDM context switching mode)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLERDPOWERISLAND,RGX_RD_POWER_ISLAND_DEFAULT,\ +Enable RD power island)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FIRMWAREPERF,FW_PERF_CONF_NONE,\ +Force the initial Firmware Performance Configuration to the specified value)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE,RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN,\ +Firmware context switch profile)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER,0,\ +Force the initial HW Performance Custom Counter Filter value)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB,2048,\ +Buffer size in KB of the hardware performance GPU buffer)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB,2048,\ +Buffer size in KB of the hardware performance host buffer)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS,50,\ +Timeout in milliseconds of the hardware performance host thread)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_JONESDISABLEMASK,0,\ +Disable Jones)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_NEWFILTERINGMODE,1,\ +Enable new TPU filtering mode)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_TRUNCATEMODE,0,\ +Truncate mode)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_EMUMAXFREQ,0,\ +Set the maximum frequency for the emulator)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_GPIOVALIDATIONMODE,0,\ +Set the validation type for GPIO 1 for the standard protocol validation\ +2 for the address protocol validation)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_RGXBVNC,\"\",\ +Array of comma separated strings that define BVNC info of the devices.\ +(module parameter usage is RGXBVNC=x.x.x.x,y.y.y.y etc))) + +# Build-only AppHint configuration values +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CLEANUPTHREADPRIORITY,5,\ +Set the priority of the cleanup thread (0 - default, 1 - highest, 5 - lowest))) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY,0,\ +Set the priority of the watchdog thread (0 - default, 1 - highest, 5 - lowest))) + +# Debugfs AppHint configuration values +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ASSERTONHWRTRIGGER,IMG_FALSE,\ +Enable firmware assert when an HWR event is triggered)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ASSERTOUTOFMEMORY,IMG_FALSE,\ +Enable firmware assert when the TA raises out-of-memory)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CHECKMLIST,APPHNT_BLDVAR_DEBUG,\ +Enable firmware MLIST consistency checker)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLEFEDLOGGING,IMG_FALSE,\ +Disable fatal error detection debug dumps)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEAPM,RGX_ACTIVEPM_DEFAULT,\ +Force the initial driver APM configuration to the specified value)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEHTBLOGGROUP,0,\ +Enable host trace log groups)) +$(eval $(call AppHintFlagsConfigC,PVRSRV_APPHINT_ENABLELOGGROUP,RGXFWIF_LOG_TYPE_NONE,\ +RGXFWIF_LOG_TYPE_GROUP_,BIF CLEANUP CSW DEBUG DMA HWP HWR MAIN MTS PM POW RPM RTD SPM,\ +Enable firmware trace log groups)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FIRMWARELOGTYPE,0,\ +Specify output mechanism for firmware log data. 0 = TRACE and 1 = TBI)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS,$\ +RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,Buffer size in DWords for FW trace log data)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE,0,\ +Override system layer FBCDC version settings \ +(0) No override \ +(1) Force v3 \ +(2) Force v3.1)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HTBOPERATIONMODE,HTB_OPMODE_DROPOLDEST,\ +Configure host trace buffer behaviour)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HTBUFFERSIZE,64,\ +Buffer size in Kbytes for Host Trace log data)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFTRACEGPU,IMG_FALSE,\ +Enables generation of GPU FTrace events)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFFWFILTER,0,\ +Mask used to select GPU events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFHOSTFILTER,0,\ +Mask used to select host events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES,0,\ +Mask used to select client Services events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL,0,\ +Mask used to select client EGL events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES,0,\ +Mask used to select client OpenGLES events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL,0,\ +Mask used to select client OpenCL events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN,0,\ +Mask used to select client Vulkan events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_TIMECORRCLOCK,0,\ +Select HWPerf clock)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFWPOISONONFREE,IMG_FALSE,\ +Enables poisoning of firmware allocations when freed)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FWPOISONONFREEVALUE,0xBD,\ +Poison value when PVRSRV_APPHINT_ENABLEFWPOISONONFREE is enabled)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ZEROFREELIST,IMG_FALSE,\ +Zero freelist memory during freelist reconstruction as part of HWR)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_GPUUNITSPOWERCHANGE,IMG_FALSE,\ +Setting this to '1' enables a test mode to dynamically change the_\ +DUSTs powered before each kick to the FW)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLEPDUMPPANIC,IMG_FALSE,\ +Disable PDUMP panic)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CACHEOPCONFIG,0,\ +CPU d-cache maintenance framework flush type configuration)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE,0,\ +CPU d-cache maintenance framework UM/KM threshold configuration)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC,IMG_FALSE,\ +Ignore BVNC reported by HW and use the value specified at build time)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_PHYSMEMTESTPASSES,$\ +APPHNT_PHYSMEMTEST_ENABLE,Set number of passes of driver start time MemTest)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_TESTSLRINTERVAL,0,\ +Interval which determines after how many 3D kicks the driver will_\ +insert an additional unsignalled sync checkpoint to the 3D fence_\ +allowing testing of SLR)) + +# GLSL compiler options +ifeq ($(BUILD),debug) +DUMP_LOGFILES ?= 1 +endif +# end of GLSL compiler options + + +$(eval $(call TunableBothConfigC,SUPPORT_AXI_ACE_TEST,,\ +Enable this to add extra FW code for the AXI ACE unittest._\ +)) + + +TQ_CAPTURE_PARAMS ?= 1 + +TQ_DISABLE_SPARSE ?= 0 + +$(eval $(call TunableBothConfigC,RGXFW_DEBUG_LOG_GROUP,,\ +Enable the usage of DEBUG log group in the Firmware logs._\ +)) + +$(eval $(call TunableBothConfigC,SUPPORT_SOC_TIMER,,\ +Enable the use of the SoC timer. When enabled the SoC system layer must implement the pfnSoCTimerRead_\ +callback that when invoked returns the current value of the SoC timer._\ +)) +$(eval $(call TunableBothConfigC,SOC_TIMER_FREQ,20,\ +The SoC timer frequency. This default to 20 MHz if not specified_\ +)) + +# +# Tile Lifetime tracking support. +# +ifeq ($(SUPPORT_COMPUTE_ONLY),) +ifeq ($(SUPPORT_VALIDATION),1) +SUPPORT_TLT_PERF ?= 1 +endif +endif + + +# +# Strip Rendering support. +# +$(eval $(call TunableBothConfigMake,SUPPORT_STRIP_RENDERING,,\ +Enabling this feature provides the ability for the firmware to drive the_\ +display controller via GPIO and support Strip Rendering._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_STRIP_RENDERING,)) + +$(eval $(call TunableBothConfigMake,SUPPORT_DEDICATED_FW_MEMORY,,\ +Allocate FW code and private data from dedicated FW memory._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_DEDICATED_FW_MEMORY,)) + +# +# Ensure top-level PDVFS build defines are set correctly +# +ifeq ($(SUPPORT_PDVFS),1) +SUPPORT_WORKLOAD_ESTIMATION ?= 1 +#else +#ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1) +#SUPPORT_PDVFS ?= 1 +#endif +endif + +# +# Ensure PDVFS comms. protocol is compatible with RGX GPIO mode +# +ifneq ($(PDVFS_COM),) +ifneq ($(PDVFS_COM),PDVFS_COM_HOST) +SUPPORT_PDVFS ?= 1 +SUPPORT_WORKLOAD_ESTIMATION ?= 1 +DISABLE_GPU_FREQUENCY_CALIBRATION ?= 1 +ifeq ($(PDVFS_COM),PDVFS_COM_AP) +ifneq ($(PVR_GPIO_MODE),) +ifneq ($(PVR_GPIO_MODE),PVR_GPIO_MODE_GENERAL) +# GPIO cannot be used for power monitoring with PDVFS_COM_AP +$(error PDVFS_COM_AP is compatible with PVR_GPIO_MODE_GENERAL only) +endif +endif +endif +endif +endif + +ifeq ($(SUPPORT_PDVFS),1) +$(eval $(call BothConfigMake,SUPPORT_PDVFS,1,\ +Enabling this feature enables proactive dvfs in the firmware._\ +)) +$(eval $(call BothConfigC,SUPPORT_PDVFS,1)) + +$(eval $(call BothConfigC,SUPPORT_PDVFS_IDLE,$(SUPPORT_PDVFS_IDLE),\ +This enables idle management in PDVFS._\ +)) +endif + +ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1) +$(eval $(call BothConfigMake,SUPPORT_WORKLOAD_ESTIMATION,1,\ +Enabling this feature enables workload intensity estimation from a workloads_\ +characteristics and assigning a deadline to it._\ +)) +$(eval $(call BothConfigC,SUPPORT_WORKLOAD_ESTIMATION,1)) +endif + +# +# These specify how PDVFS OPP values are sent by the firmware +# +$(eval $(call BothConfigMake,PDVFS_COM_HOST,1,\ +Enables host shared-memory protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_HOST,1)) + +$(eval $(call BothConfigMake,PDVFS_COM_AP,2,\ +Enables GPIO address protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_AP,2)) + +$(eval $(call BothConfigMake,PDVFS_COM_PMC,3,\ +Enables GPIO power management controller protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_PMC,3)) + +$(eval $(call BothConfigMake,PDVFS_COM_IMG_CLKDIV,4,\ +Enables GPIO clock divider control protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_IMG_CLKDIV,4)) + +ifeq ($(SUPPORT_STRIP_RENDERING),1) +PDVFS_COM ?= PDVFS_COM_AP +else +PDVFS_COM ?= PDVFS_COM_HOST +endif + +$(eval $(call BothConfigMake,PDVFS_COM,$(PDVFS_COM))) +$(eval $(call BothConfigC,PDVFS_COM,$(PDVFS_COM))) + +# +# These specify how RGX GPIO port is used by the firmware. +# +$(eval $(call BothConfigMake,PVR_GPIO_MODE_GENERAL,1,\ +Enable basic send and receive using GPIO._\ +)) +$(eval $(call BothConfigC,PVR_GPIO_MODE_GENERAL,1)) + +$(eval $(call BothConfigMake,PVR_GPIO_MODE_POWMON_PIN,2,\ +Enables PMC power monitoring using GPIO._\ +)) +$(eval $(call BothConfigC,PVR_GPIO_MODE_POWMON_PIN,2)) + +PVR_GPIO_MODE ?= PVR_GPIO_MODE_GENERAL +$(eval $(call BothConfigMake,PVR_GPIO_MODE,$(PVR_GPIO_MODE))) +$(eval $(call BothConfigC,PVR_GPIO_MODE,$(PVR_GPIO_MODE))) + +# If NDK_ROOT is set, SUPPORT_WORKLOAD_ESTIMATION can't be, because the +# ANDROID_WSEGL module uses APIs (binder, gui) which are not in the NDK. +ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1) + ifneq ($(PVR_ANDROID_HAS_COMPOSITION_TIMINGS),1) + ifneq ($(NDK_ROOT),) + $(error SUPPORT_WORKLOAD_ESTIMATION and NDK_ROOT are incompatible features) + endif + ifeq ($(EGL_WSEGL_DIRECTLY_LINKED),1) + $(error EGL_WSEGL_DIRECTLY_LINKED and SUPPORT_WORKLOAD_ESTIMATION are not supported at the same time) + endif + endif +endif + +$(eval $(call TunableKernelConfigMake,PVR_HANDLE_BACKEND,idr,\ +Specifies the back-end that should be used$(comma) by the Services kernel handle_\ +interface$(comma) to allocate handles. The available backends are:_\ +* generic (OS agnostic)_\ +* idr (Uses the Linux IDR interface)_\ +)) + + +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_PROCESS_STATS,1,\ +Enable the collection of Process Statistics in the kernel Server module._\ +Feature on by default. Driver_stats summary presented in DebugFS on Linux._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_DEBUG_LINUX_MEMORY_STATS,,\ +Present Process Statistics memory stats in a more detailed manner to_\ +assist with debugging and finding memory leaks (under Linux only)._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_PERPID_STATS,,\ +Enable the presentation of process statistics in the kernel Server module._\ +Feature off by default. \ +)) + +$(eval $(call TunableBothConfigC,SUPPORT_SHARED_SLC,,\ +When the SLC is shared the SLC reset is performed by the System layer when_\ +calling RGXInitSLC and not the GPU driver. Define this for system layer_\ +SLC handling. \ +)) + +# SUPPORT_DMABUF_BRIDGE is set to include the dmabuf.brg in bridge generation +# by default for all Linux based builds. +$(eval $(call TunableBothConfigMake,SUPPORT_DMABUF_BRIDGE,1)) + +# SUPPORT_USC_BREAKPOINT is set to include the rgxbreakpoint.brg in bridge generation +# and to enable USC breakpoint in FW. Enabled by default for all Linux based builds. +# +SUPPORT_USC_BREAKPOINT ?= 1 +$(eval $(call TunableBothConfigMake,SUPPORT_USC_BREAKPOINT,)) +$(eval $(call TunableBothConfigC,SUPPORT_USC_BREAKPOINT,,Enable the USC breakpoint support)) + +# EXCLUDE_CMM_BRIDGE is set to exclude the cmm.brg bridge in +# the Kernel This is disabled by default for release builds. +# +$(eval $(call TunableBothConfigMake,EXCLUDE_CMM_BRIDGE,)) +$(eval $(call TunableBothConfigC,EXCLUDE_CMM_BRIDGE,,Disables the cmm bridge)) + +# EXCLUDE_HTBUFFER_BRIDGE is set to exclude the htbuffer.brg bridge in +# the Kernel This is disabled by default for release builds. +# +$(eval $(call TunableBothConfigMake,EXCLUDE_HTBUFFER_BRIDGE,)) +$(eval $(call TunableBothConfigC,EXCLUDE_HTBUFFER_BRIDGE,,Disables the htbuffer bridge)) + +# EXCLUDE_RGXREGCONFIG_BRIDGE is set to exclude the rgxregconfig.brg bridge in +# the Kernel This is disabled by default for release builds. +# +$(eval $(call TunableBothConfigMake,EXCLUDE_RGXREGCONFIG_BRIDGE,)) +$(eval $(call TunableBothConfigC,EXCLUDE_RGXREGCONFIG_BRIDGE,,Disables the RGX regconfig bridge)) + +# PVRSRV_ENABLE_GPU_MEMORY_INFO is set to enable RI annotation of devmem allocations +# This is enabled by default for debug builds. +# +$(eval $(call TunableBothConfigMake,PVRSRV_ENABLE_GPU_MEMORY_INFO,)) +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_GPU_MEMORY_INFO,,\ +Enable Resource Information (RI) debug. This logs details of_\ +resource allocations with annotation to help indicate their use._\ +)) + +# PVRSRV_DEBUG_CSW_STATE is set to provide context switch task information +# to FSIM. This is disabled by default for debug and release builds. + +ifeq ($(PDUMP),1) +# Force enable TBI interface in PDUMP mode + override SUPPORT_TBI_INTERFACE :=1 +endif + +ifeq ($(VIRTUAL_PLATFORM),1) +# Force enable TBI interface for VIRTUAL_PLATFORM + override SUPPORT_TBI_INTERFACE :=1 +endif + +$(eval $(call TunableBothConfigC,SUPPORT_TBI_INTERFACE,,\ +Enable TBI interface support for firmware._\ +)) + +$(eval $(call TunableBothConfigC,SUPPORT_FIRMWARE_GCOV,,\ +Enable gcov support for firmware._\ +)) + + +ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1) +# Increase the default annotation max length to 96 if RI_DEBUG is enabled +override PVR_ANNOTATION_MAX_LEN ?= 96 +endif + +# Default annotation max length to 63 if no other debug options are specified +$(eval $(call TunableBothConfigC,PVR_ANNOTATION_MAX_LEN,63,\ +Defines the max length for PMR$(comma) MemDesc$(comma) Device_\ +Memory History and RI debug annotations stored in memory.\ +)) + +$(eval $(call TunableKernelConfigC,DEBUG_BRIDGE_KM,,\ +Enable Services bridge debugging and bridge statistics output_\ +)) + +# if PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE has been specified then ensure +# PVRSRV_ENABLE_CCCB_UTILISATION_INFO is enabled +ifeq ($(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE),1) +override PVRSRV_ENABLE_CCCB_UTILISATION_INFO := 1 +endif + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_CCCB_UTILISATION_INFO,1,\ +Calculate high watermarks of all the client CCBs and print a warning if the_\ +watermarks touched a certain threshold value (90% by default) of the cCCB allocation size._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD,90,\ +Default threshold (as a percentage) for the PVRSRV_ENABLE_CCCB_UTILISATION_INFO feature._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE,,\ +Provides more information to PVRSRV_ENABLE_CCCB_UTILISATION_INFO messages._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_MEMORY_STATS,,\ +Enable Memory allocations to be recorded and published via Process Statistics._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_MEMTRACK_STATS_FILE,,\ +Enable the memtrack_stats debugfs file when not on an Android platform._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_STRICT_COMPAT_CHECK,,\ +Enable strict mode of checking all the build options between um & km._\ +The driver may fail to load if there is any mismatch in the options._\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_MAX_POOL_PAGES,10240,\ +Defines how many pages the page cache should hold.)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES,20480,\ +We double check if we would exceed this limit if we are below MAX_POOL_PAGES_\ +and want to add an allocation to the pool._\ +This prevents big allocations being given back to the OS just because they_\ +exceed the MAX_POOL_PAGES limit even though the pool is currently empty._\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES,,\ +All device memory allocated from the OS via the Rogue driver will be zeroed_\ +when this is defined. This may not be necessary in closed platforms where_\ +undefined data from previous use in device memory is acceptable._\ +This feature may change the performance signature of the drivers memory_\ +allocations on some platforms and kernels._\ +)) + +PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC ?= 0 +$(eval $(call TunableKernelConfigC,PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC,PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC,\ +Higher order page requests on Linux use dma_alloc_coherent but on some systems_\ +it could return pages from high memory and map those to the vmalloc space._\ +Since graphics demand a lot of memory the system could quickly exhaust the_\ +vmalloc space. Setting this define will suppress the use of dma_alloc_coherent_\ +and fall back to use alloc_pages and not map them to vmalloc space unless_\ +requested explicitly by the driver._\ +)) + +$(eval $(call TunableKernelConfigC,PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY,,\ +GPU buffers are allocated from the highmem region by default._\ +Only affects 32bit systems and devices with DMA_BIT_MASK equal to 32._\ +)) + +$(eval $(call TunableKernelConfigC,PVR_PMR_TRANSLATE_UMA_ADDRESSES,,\ +Requests for physical addresses from the PMR will translate the addresses_\ +retrieved from the PMR-factory from CpuPAddrToDevPAddr. This can be used_\ +for systems where the GPU has a different view onto the system memory_\ +compared to the CPU._\ +)) + +$(eval $(call TunableBothConfigC,PVR_MMAP_USE_VM_INSERT,,\ +If enabled Linux will always use vm_insert_page for CPU mappings._\ +vm_insert_page was found to be slower than remap_pfn_range on ARM kernels_\ +but guarantees full memory accounting for the process that mapped the memory._\ +The slowdown in vm_insert_page is caused by a dcache flush_\ +that is only implemented for ARM and a few other architectures._\ +This tunable can be enabled to debug memory issues. On x86 platforms_\ +we always use vm_insert_page independent of this tunable._\ +)) + +$(eval $(call TunableBothConfigC,PVR_DIRTY_BYTES_FLUSH_THRESHOLD,524288,\ +When allocating uncached or write-combine memory we need to invalidate the_\ +CPU cache before we can use the acquired pages; also when using cached memory_\ +we need to clean/flush the CPU cache before we transfer ownership of the_\ +memory to the device. This threshold defines at which number of pages expressed_\ +in bytes we want to do a full cache flush instead of invalidating pages one by one._\ +Default value is 524288 bytes or 128 pages; ideal value depends on SoC cache size._\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD,256,\ +Allocate OS pages in 2^(order) chunks if more than this threshold were requested_\ +)) + +PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER ?= 2 +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM,$(PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER),\ +Allocate OS pages in 2^(order) chunks to help reduce duration of large allocations_\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD,16384,\ +Choose the threshold at which allocation size the driver uses vmalloc instead of_\ +kmalloc. On highly fragmented systems large kmallocs can fail because it requests_\ +physically contiguous pages. All allocations bigger than this define use vmalloc._\ +)) + +$(eval $(call TunableBothConfigMake,SUPPORT_WRAP_EXTMEM,)) +$(eval $(call TunableBothConfigC,SUPPORT_WRAP_EXTMEM,,\ +This enables support for the Services API function PVRSRVWrapExtMem()_\ +which takes a CPU virtual address with size and imports the physical memory_\ +behind the CPU virtual addresses into Services for use with the GPU. It_\ +returns a memory descriptor that can be used with the usual services_\ +interfaces. On Linux the preferred method to import memory into the driver_\ +is to use the DMABuf API._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE,,\ +Setting this option enables the write attribute for all the device mappings acquired_\ +through the PVRSRVWrapExtMem interface. Otherwise the option is disabled by default._\ +)) + +ifeq ($(PDUMP),1) +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_INIT_STREAM_SIZE,0x200000,\ +Default size of pdump param init buffer is 2MB)) +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_MAIN_STREAM_SIZE,0x1000000,\ +Default size of PDump param main buffer is 16 MB)) +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_DEINIT_STREAM_SIZE,0x10000,\ +Default size of PDump param deinit buffer is 64KB)) +# Default size of PDump param block buffer is 0KB as it is currently not in use +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_BLOCK_STREAM_SIZE,0x0,\ +Default size of PDump param block buffer is 0KB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_INIT_STREAM_SIZE,0x80000,\ +Default size of PDump script init buffer is 512KB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_MAIN_STREAM_SIZE,0x800000,\ +Default size of PDump script main buffer is 8MB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_DEINIT_STREAM_SIZE,0x10000,\ +Default size of PDump script deinit buffer is 64KB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_BLOCK_STREAM_SIZE,0x800000,\ +Default size of PDump script block buffer is 8MB)) +$(eval $(call TunableKernelConfigC,PDUMP_SPLIT_64BIT_REGISTER_ACCESS,,\ + Split 64 bit RGX register accesses into two 32 bit)) +endif + +# Fence Sync build tunables +# Default values dependent on WINDOW_SYSTEM and found in window_system.mk +# +$(eval $(call TunableBothConfigMake,SUPPORT_NATIVE_FENCE_SYNC,$(SUPPORT_NATIVE_FENCE_SYNC))) +$(eval $(call TunableBothConfigC,SUPPORT_NATIVE_FENCE_SYNC,,\ +Use the Linux native fence sync back-end with timelines and fences)) + +$(eval $(call TunableBothConfigMake,SUPPORT_FALLBACK_FENCE_SYNC,)) +$(eval $(call TunableBothConfigC,SUPPORT_FALLBACK_FENCE_SYNC,,\ +Use Services OS agnostic fallback fence sync back-end with timelines and fences)) + +$(eval $(call TunableBothConfigC,PVRSRV_STALLED_CCB_ACTION,1,\ +This determines behaviour of DDK on detecting that a cCCB_\ +has stalled (failed to progress for a number of seconds when GPU is idle):_\ + "" = Output warning message to kernel log only_\ + "1" = Output warning message and additionally try to unblock cCCB by_\ + erroring sync checkpoints on which it is fenced (the value of any_\ + sync prims in the fenced will remain unmodified)_\ +)) + +# Fallback and native sync implementations are mutually exclusive because they +# both offer an implementation for the same interface +ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1) +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +$(error Choose either SUPPORT_NATIVE_FENCE_SYNC=1 or SUPPORT_FALLBACK_FENCE_SYNC=1 but not both) +endif +endif + +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +PVR_USE_LEGACY_SYNC_H ?= 1 + +endif + +$(eval $(call TunableBothConfigC,PVRSRV_SYNC_CHECKPOINT_CCB,,\ +Enabling this feature enables use of the sync checkpoint CCB._\ +)) + +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +ifneq ($(KERNEL_VERSION),) +ifeq ($(CHROMIUMOS_KERNEL),1) +ifeq ($(CHROMIUMOS_KERNEL_HAS_DMA_FENCE),1) +KERNEL_COMPATIBLE_WITH_OLD_ANS := 0 +else +KERNEL_COMPATIBLE_WITH_OLD_ANS := 1 +endif +else +KERNEL_COMPATIBLE_WITH_OLD_ANS := $(shell ( [ $(KERNEL_VERSION) -lt 4 ] || \ +[ $(KERNEL_VERSION) -eq 4 -a $(KERNEL_PATCHLEVEL) -lt 6 ] ) && echo 1 || echo 0) +endif +ifneq ($(KERNEL_COMPATIBLE_WITH_OLD_ANS),1) +# DMA fence objects are only supported when using checkpoints +override SUPPORT_DMA_FENCE := 1 +endif +KERNEL_COMPATIBLE_WITH_OLD_ANS := +endif +endif + +# This value is needed by ta/3d kick for early command size calculation. +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +ifeq ($(SUPPORT_DMA_FENCE),) +$(eval $(call KernelConfigC,UPDATE_FENCE_CHECKPOINT_COUNT,2)) +else +$(eval $(call KernelConfigC,UPDATE_FENCE_CHECKPOINT_COUNT,1)) +endif +else +$(eval $(call KernelConfigC,UPDATE_FENCE_CHECKPOINT_COUNT,1)) +endif + +$(eval $(call TunableKernelConfigMake,SUPPORT_DMA_FENCE,)) + +$(eval $(call BothConfigC,PVR_DRM_NAME,"\"pvr\"")) + + + +$(eval $(call TunableKernelConfigC,PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS,,\ +If enabled$(comma) all kernel mappings will use vmap/vunmap._\ +vmap/vunmap is slower than vm_map_ram/vm_unmap_ram and can_\ +even have bad peaks taking up to 100x longer than vm_map_ram._\ +The disadvantage of vm_map_ram is that it can lead to vmalloc space_\ +fragmentation that can lead to vmalloc space exhaustion on 32 bit Linux systems._\ +This flag only affects 64 bit Linux builds$(comma) on 32 bit we always default_\ +to use vmap because of the described fragmentation problem._\ +)) + +$(eval $(call TunableBothConfigC,DEVICE_MEMSETCPY_ALIGN_IN_BYTES,16,\ +Sets pointer alignment (in bytes) for PVRSRVDeviceMemSet/Copy._\ +This value should reflect memory bus width e.g. if the bus is 64 bits_\ +wide this value should be set to 8 bytes (though it's not a hard requirement)._\ +)) + + +$(eval $(call TunableKernelConfigC,PVRSRV_DEBUG_LISR_EXECUTION,,\ +Collect information about the last execution of the LISR in order to_\ +debug interrupt handling timeouts._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_TIMER_CORRELATION_HISTORY,,\ +Collect information about timer correlation data over time._\ +)) + +$(eval $(call TunableKernelConfigC,DISABLE_GPU_FREQUENCY_CALIBRATION,,\ +Disable software estimation of the GPU frequency done on the Host and used_\ +for timer correlation._\ +)) + +$(eval $(call TunableKernelConfigC,RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS,0,\ +Period (in ms) for which any Sync Lockup Recovery (SLR) behaviour should be_\ +suppressed following driver load. This can help to avoid any attempted SLR_\ +during the boot process._\ +)) + +# Set default CCB sizes +# Key for log2 CCB sizes: +# 13=8K 14=16K 15=32K 16=64K 17=128K +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D,14,\ +Define the log2 size of the TQ3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D,14,\ +Define the log2 size of the TQ2D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM,13,\ +Define the log2 size of the CDM client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA,15,\ +Define the log2 size of the TA client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D,16,\ +Define the log2 size of the 3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC,13,\ +Define the log2 size of the KickSync client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM,14,\ +Define the log2 size of the TDM client CCB._\ +)) + +# Max sizes (used in CCB grow feature) +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D,17,\ +Define the log2 max size of the TQ3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D,17,\ +Define the log2 max size of the TQ2D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM,15,\ +Define the log2 max size of the CDM client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA,16,\ +Define the log2 max size of the TA client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D,17,\ +Define the log2 max size of the 3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC,13,\ +Define the log2 max size of the KickSync client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM,17,\ +Define the log2 max size of the TDM client CCB._\ +)) + +endif # INTERNAL_CLOBBER_ONLY + +export INTERNAL_CLOBBER_ONLY +export TOP +export OUT +export PVR_ARCH + +MAKE_ETC := -Rr --no-print-directory -C $(TOP) \ + TOP=$(TOP) OUT=$(OUT) HWDEFS_DIR=$(HWDEFS_DIR) \ + -f build/linux/toplevel.mk + +# This must match the default value of MAKECMDGOALS below, and the default +# goal in toplevel.mk +.DEFAULT_GOAL := build + +ifeq ($(MAKECMDGOALS),) +MAKECMDGOALS := build +else +# We can't pass autogen to toplevel.mk +MAKECMDGOALS := $(filter-out autogen,$(MAKECMDGOALS)) +endif + +.PHONY: autogen +autogen: +ifeq ($(INTERNAL_CLOBBER_ONLY),) + @$(MAKE) -s --no-print-directory -C $(TOP) \ + -f build/linux/prepare_tree.mk +else + @: +endif + +include ../config/help.mk + +# This deletes built-in suffix rules. Otherwise the submake isn't run when +# saying e.g. "make thingy.a" +.SUFFIXES: + +# Because we have a match-anything rule below, we'll run the main build when +# we're actually trying to remake various makefiles after they're read in. +# These rules try to prevent that +%.mk: ; +Makefile%: ; +Makefile: ; + +tags: + cd $(TOP) ; \ + ctags \ + --recurse=yes \ + --exclude=binary_* \ + --exclude=caches \ + --exclude=docs \ + --exclude=external \ + --languages=C,C++ + +.PHONY: build kbuild install +build kbuild install: MAKEOVERRIDES := +build kbuild install: autogen + @$(if $(MAKECMDGOALS),$(MAKE) $(MAKE_ETC) $(MAKECMDGOALS) $(eval MAKECMDGOALS :=),:) + +%: MAKEOVERRIDES := +%: autogen + @$(if $(MAKECMDGOALS),$(MAKE) $(MAKE_ETC) $(MAKECMDGOALS) $(eval MAKECMDGOALS :=),:) + +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/core_volcanic.mk b/drivers/mcst/gpu-imgtec/build/linux/config/core_volcanic.mk new file mode 100644 index 000000000000..5e52585058f8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/core_volcanic.mk @@ -0,0 +1,1892 @@ +########################################################################### ### +#@File +#@Title Root build configuration. +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Configuration wrapper for new build system. This file deals with +# configuration of the build. Add to this file anything that deals +# with switching driver options on/off and altering the defines or +# objects the build uses. +# +# At the end of this file is an exhaustive list of all variables +# that are passed between the platform/config stage and the generic +# build. PLEASE refrain from adding more variables than necessary +# to this stage -- almost all options can go through config.h. +# + +# Sanity check: Make sure preconfig has been included +ifeq ($(TOP),) +$(error TOP not defined: Was preconfig.mk included in root makefile?) +endif + +################################# MACROS #################################### + +ALL_TUNABLE_OPTIONS := + +# This records the config option's help text and default value. Note that +# the help text can't contain a literal comma. Use $(comma) instead. +define RegisterOptionHelp +ALL_TUNABLE_OPTIONS += $(1) +ifeq ($(INTERNAL_DESCRIPTION_FOR_$(1)),) +INTERNAL_DESCRIPTION_FOR_$(1) := $(3) +endif +INTERNAL_CONFIG_DEFAULT_FOR_$(1) := $(2) +$(if $(4),\ + $(error Too many arguments in config option '$(1)' (stray comma in help text?))) +endef + +# Write out a GNU make option for both user & kernel +# +define BothConfigMake +$$(eval $$(call KernelConfigMake,$(1),$(2))) +$$(eval $$(call UserConfigMake,$(1),$(2))) +endef + +# Conditionally write out a GNU make option for both user & kernel +# +define TunableBothConfigMake +$$(eval $$(call _TunableKernelConfigMake,$(1),$(2))) +$$(eval $$(call _TunableUserConfigMake,$(1),$(2))) +$(call RegisterOptionHelp,$(1),$(2),$(3),$(4)) +endef + +# Write out an option for both user & kernel +# +define BothConfigC +$$(eval $$(call KernelConfigC,$(1),$(2))) +$$(eval $$(call UserConfigC,$(1),$(2))) +endef + +# Conditionally write out an option for both user & kernel +# +define TunableBothConfigC +$$(eval $$(call _TunableKernelConfigC,$(1),$(2))) +$$(eval $$(call _TunableUserConfigC,$(1),$(2))) +$(call RegisterOptionHelp,$(1),$(2),$(3),$(4)) +endef + +# Use this to mark config options which have to exist, but aren't +# user-tunable. Warn if an attempt is made to change it. +# +define NonTunableOption +$(if $(filter command line environment,$(origin $(1))),\ + $(error Changing '$(1)' is not supported)) +endef + +############################### END MACROS ################################## + +# Check we have a new enough version of GNU make. +# +need := 3.81 +ifeq ($(filter $(need),$(firstword $(sort $(MAKE_VERSION) $(need)))),) +$(error A version of GNU make >= $(need) is required - this is version $(MAKE_VERSION)) +endif + +include ../defs.mk + +# Infer PVR_BUILD_DIR from the directory configuration is launched from. +# Check anyway that such a directory exists. +# +PVR_BUILD_DIR := $(notdir $(abspath .)) +$(call directory-must-exist,$(TOP)/build/linux/$(PVR_BUILD_DIR)) + +# Output directory for configuration, object code, +# final programs/libraries, and install/rc scripts. +# +BUILD ?= release +ifneq ($(filter $(WINDOW_SYSTEM),xorg wayland nullws nulldrmws screen surfaceless lws-generic),) +OUT ?= $(TOP)/binary_$(PVR_BUILD_DIR)_$(WINDOW_SYSTEM)_$(BUILD) +else +OUT ?= $(TOP)/binary_$(PVR_BUILD_DIR)_$(BUILD) +endif + +# Use abspath, which doesn't require the path to already exist, to remove '.' +# and '..' path components. This allows paths to be manipulated without things +# ending up in the wrong place. +override OUT := $(abspath $(if $(filter /%,$(OUT)),$(OUT),$(TOP)/$(OUT))) + +CONFIG_MK := $(OUT)/config.mk +CONFIG_H := $(OUT)/config.h +CONFIG_KERNEL_MK := $(OUT)/config_kernel.mk +CONFIG_KERNEL_H := $(OUT)/config_kernel.h + +# Convert commas to spaces in $(D). This is so you can say "make +# D=config-changes,freeze-config" and have $(filter config-changes,$(D)) +# still work. +override D := $(subst $(comma),$(space),$(D)) + +# Create the OUT directory +# +$(shell mkdir -p $(OUT)) + +# For a clobber-only build, we shouldn't regenerate any config files +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + +# Core handling +# + +-include ../config/user-defs.mk +-include ../config/kernel-defs.mk + +# Disabling the online OpenCL compiler breaks the OpenCL spec. +# Use this option carefully (i.e. for embedded usage only). +OCL_ONLINE_COMPILATION ?= 1 + +# Some platforms don't have blob cache support, or the blob cache isn't usable +# for some reason. Make it possible to disable the OpenCL driver's use of it. +OCL_USE_KERNEL_BLOB_CACHE ?= 1 + +# Allow OpenCL to disable image sharing with EGL on platforms that don't support it. +OCL_USE_EGL_SHARING ?= 1 +OCL_USE_GRALLOC_IMAGE_SHARING ?= 0 + +# Rather than requiring the user to have to define two variables (one quoted, +# one not), make PVRSRV_MODNAME a non-tunable and give it an overridable +# default here. +# +PVRSRV_MODNAME := pvrsrvkm +PVRSYNC_MODNAME := pvr_sync + +ifneq ($(PVR_SUPPORT_HMMU_VALIDATION),) + $(error PVR_SUPPORT_HMMU_VALIDATION does not exist in Volcanic DDK anymore) +endif + +$(eval $(call TunableBothConfigC,PVR_BUILD_HMMU,,\ +Enable HMMU kernel module._\ +)) + +ifeq ($(PVR_BUILD_HMMU), 1) + PVRHMMU_MODNAME := pvrhmmu +endif + +# Normally builds don't touch these, but we use them to influence the +# components list. Make sure these are defined early enough to make this +# possible. +# + +# Skip defining these UM/FW macros for kbuilds which do not define RGX_BVNC +ifneq ($(RGX_BNC_CONFIG_KM_HEADER),) +# Only the Firmware needs this make macro. +SUPPORT_META_DMA :=\ + $(shell grep -qw RGX_FEATURE_META_DMA $(RGX_BNC_CONFIG_KM) && echo 1) + +SUPPORT_META_COREMEM :=\ + $(shell grep -qe 'RGX_FEATURE_META_COREMEM_SIZE ([123456789][1234567890]*U*)' $(RGX_BNC_CONFIG_KM) && echo 1) + +# Only the Firmware needs this make macro. +SUPPORT_RISCV_FIRMWARE :=\ + $(shell grep -qw RGX_FEATURE_RISCV_FW_PROCESSOR $(RGX_BNC_CONFIG_KM) && echo 1) + +SUPPORT_COMPUTE := 1 + +# Firmware and libsrv_um need this make macro. +SUPPORT_FASTRENDER_DM := 1 + +# Firmware and libsrv_um need this make macro. +SUPPORT_SIGNAL_FILTER := \ + $(shell grep -qw RGX_FEATURE_SIGNAL_SNOOPING $(RGX_BNC_CONFIG_KM) && echo 1) + +# Macro used by client driver makefiles only. +ifneq ($(wildcard $(RGX_BNC_CONFIG_H)),) + + SUPPORT_ES32 :=\ + $(shell echo 1) +endif +endif + +# Make sure we choose correct compiler variant before evaluating components +LIB_GLSL_VARIANT ?= llvm + +# Default place for binaries and shared libraries +BIN_DESTDIR ?= /usr/local/bin +INCLUDE_DESTDIR ?= /usr/include +SHARE_DESTDIR ?= /usr/local/share +SHLIB_DESTDIR ?= /usr/lib +FW_DESTDIR ?= /lib/firmware + +# Build's selected list of components. +# - components.mk is a per-build file that specifies the components that are +# to be built +-include components.mk + +# Set up the host and target compiler. +include ../config/compiler.mk + +# PVRGDB needs extra components +# +ifeq ($(PVRGDB),1) +ifneq ($(COMPONENTS),) +COMPONENTS += pvrdebugger pvrdebugipc pvrdebugipc_header +ifneq ($(filter opencl,$(COMPONENTS)),) +COMPONENTS += gdb_ocl_test +endif +endif +endif + +$(eval $(call BothConfigMake,PVR_ARCH,$(PVR_ARCH))) + +ifneq ($(SUPPORT_BUILD_LWS),) + ifneq ($(SYSROOT),) + $(warning ******************************************************) + $(warning WARNING: You have specified a SYSROOT, or are using a) + $(warning buildroot compiler, and enabled SUPPORT_BUILD_LWS. We) + $(warning will ignore the sysroot and will build all required) + $(warning LWS components. Unset SUPPORT_BUILD_LWS if this is not) + $(warning what you want.) + $(warning ******************************************************) + endif + + ifneq ($(origin SUPPORT_BUILD_LWS),file) + $(warning ******************************************************) + $(warning WARNING: Enabling SUPPORT_BUILD_LWS is deprecated.) + ifneq ($(filter surfaceless wayland xorg,$(WINDOW_SYSTEM)),) + $(warning You should not need to set this explicitly.) + else + $(warning You should be setting SYSROOT instead, which is) + $(warning documented in the Rogue DDK Linux and Rogue DDK) + $(warning Linux WS Platform Guides.) + endif + $(warning ******************************************************) + endif + + override SYSROOT := +endif + + +ifneq ($(strip $(LWS_PREFIX)),) +endif + +# The name of the file that contains the set of tarballs that should be +# built to support a given linux distribution +LWS_DIST ?= tarballs-ubuntu-next + +ifeq ($(SUPPORT_BUILD_LWS),1) + COMPONENTS += ${LWS_COMPONENTS} +endif + +$(if $(filter config,$(D)),$(info Build configuration:)) + +################################# CONFIG #################################### + +-include ../config/core-internal.mk + +# Firmware toolchain versions +$(eval $(call BothConfigMake,METAG_VERSION_NEEDED,2.8.1.0.3)) + +ifneq ($(SUPPORT_NEUTRINO_PLATFORM), 1) + +# If KERNELDIR is set, write it out to the config.mk, with +# KERNEL_COMPONENTS and KERNEL_ID +# +ifneq ($(strip $(KERNELDIR)),) +PVRSRV_MODULE_BASEDIR ?= /lib/modules/$(KERNEL_ID)/extra/ +$(eval $(call BothConfigMake,KERNELDIR,$(KERNELDIR))) +$(eval $(call BothConfigMake,KERNEL_ID,$(KERNEL_ID))) +$(eval $(call BothConfigMake,PVRSRV_MODULE_BASEDIR,$(PVRSRV_MODULE_BASEDIR))) +$(eval $(call BothConfigMake,KERNEL_COMPONENTS,$(KERNEL_COMPONENTS))) +$(eval $(call TunableKernelConfigMake,EXTRA_PVRSRVKM_COMPONENTS,,\ +List of components that should be built in to pvrsrvkm.ko$(comma) rather than_\ +forming separate kernel modules._\ +)) + +# If KERNEL_CROSS_COMPILE is set to "undef", this is magically +# equivalent to being unset. If it is unset, we use CROSS_COMPILE +# (which might also be unset). If it is set, use it directly. +ifneq ($(KERNEL_CROSS_COMPILE),undef) +KERNEL_CROSS_COMPILE ?= $(CROSS_COMPILE) +$(eval $(call TunableBothConfigMake,KERNEL_CROSS_COMPILE,)) +endif + +# Alternatively, allow the CC used for kbuild to be overridden +# exactly, bypassing any KERNEL_CROSS_COMPILE configuration. +# LD, NM and OBJCOPY could be overridden by set of CC tools. +$(eval $(call TunableBothConfigMake,KERNEL_CC,)) +$(eval $(call TunableBothConfigMake,KERNEL_LD,)) +$(eval $(call TunableBothConfigMake,KERNEL_NM,)) +$(eval $(call TunableBothConfigMake,KERNEL_OBJCOPY,)) + +# Check the KERNELDIR has a kernel built and also check that it is +# not 64-bit, which we do not support. +KERNEL_AUTOCONF := \ + $(strip $(wildcard $(KERNELDIR)/include/linux/autoconf.h) \ + $(wildcard $(KERNELDIR)/include/generated/autoconf.h)) +ifeq ($(KERNEL_AUTOCONF),) +$(warning autoconf.h not found in $$(KERNELDIR)/include/linux \ +or $$(KERNELDIR)/include/generated. Check your $$(KERNELDIR) variable \ +and kernel configuration.) +endif +else +$(if $(KERNEL_COMPONENTS),$(warning KERNELDIR is not set. Kernel components cannot be built)) +endif + +# Enable Client CCB grow +PVRSRV_ENABLE_CCCB_GROW ?= 1 +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_CCCB_GROW,,\ +This controls the feature that allows the Services client CCBs to grow_\ +when they become full._\ +)) + +endif # !Neutrino + + +ifneq ($(_window_systems),) +# If there's only one window system then don't output this option as part +# of `make confighelp` +ifeq ($(words $(_window_systems)),1) +$(eval $(call BothConfigMake,WINDOW_SYSTEM,$(WINDOW_SYSTEM))) +else +$(eval $(call TunableBothConfigMake,WINDOW_SYSTEM,,\ +Window system to use ($(_window_systems))._\ +)) +endif +endif + +# Ideally configured by platform Makefiles, as necessary +# +SHADER_DESTDIR := $(SHARE_DESTDIR)/pvr/shaders/ + +ifeq ($(RGX_FW_SIGNED),1) +ifeq ($(RGX_FW_PK8),) +$(error RGX_FW_PK8 must be set for RGX_FW_SIGNED=1.) +endif # !RGX_FW_PK8 +$(eval $(call TunableBothConfigC,RGX_FW_PKCS1_PSS_PADDING,)) +else # RGX_FW_SIGNED +endif # RGX_FW_SIGNED + +ifeq ($(RGX_FW_SIGNED),1) +$(eval $(call KernelConfigC,RGX_FW_FILENAME,"\"rgx.fw.signed\"")) +$(eval $(call KernelConfigC,RGX_SH_FILENAME,"\"rgx.sh.signed\"")) +ifneq ($(RGX_FW_X509),) +$(eval $(call KernelConfigC,RGX_FW_SIGNED,1)) +endif # RGX_FW_X509 +else # RGX_FW_SIGNED +$(eval $(call KernelConfigC,RGX_FW_FILENAME,"\"rgx.fw\"")) +$(eval $(call KernelConfigC,RGX_SH_FILENAME,"\"rgx.sh\"")) +endif # RGX_FW_SIGNED + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) + ifeq ($(wildcard ${TOP}/build/linux/tools/prepare-llvm.sh),) + # No facility for using LLVM in this package. + else ifeq ($(LLVM_BUILD_DIR),) + $(warning LLVM_BUILD_DIR is not set. Components that use it (e.g., OpenCL, Vulkan, OpenGLES3) cannot be built) + else + override LLVM_BUILD_DIR := $(abspath $(LLVM_BUILD_DIR)) + LLVM_MESSAGE=$(shell ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} ${TOP}/build/linux/tools/prepare-llvm.sh -c $(LLVM_BUILD_DIR)) + + ifneq ($(filter Error:,$(firstword $(LLVM_MESSAGE))),) + $(info *** prepare-llvm.sh: $(LLVM_MESSAGE)) + $(error *** LLVM_BUILD_DIR $(LLVM_BUILD_DIR) is not suitable) + endif + + ifneq ($(filter Warning:,$(firstword $(LLVM_MESSAGE))),) + $(info *** prepare-llvm.sh: $(LLVM_MESSAGE)) + endif + + # Because we need to handle MULTIARCH builds, we can't work out the + # architecture to use in the paths until compile-time. So leave + # _LLVM_ARCH_ as a placeholder that will be replaced in the + # moduledef. + LLVM_INCLUDE_PATH := $(LLVM_BUILD_DIR)/llvm.src/llvm/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/include + + CLANG_INCLUDE_PATH := $(LLVM_INCLUDE_PATH) \ + $(LLVM_BUILD_DIR)/llvm.src/clang/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/clang/include + + SPV_INCLUDE_PATH := $(LLVM_INCLUDE_PATH) \ + $(LLVM_BUILD_DIR)/llvm.src/spv-translator/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/SPV_TRANSLATOR/include + + LLVM_LIB_PATH := $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/lib/ + + LLVM_INCLUDE_PATH_HOST := $(LLVM_BUILD_DIR)/llvm.src/llvm/include/ \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/include/ + + CLANG_INCLUDE_PATH_HOST := $(LLVM_INCLUDE_PATH_HOST) \ + $(LLVM_BUILD_DIR)/llvm.src/clang/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/clang/include + + SPV_INCLUDE_PATH_HOST := $(LLVM_INCLUDE_PATH_HOST) \ + $(LLVM_BUILD_DIR)/llvm.src/spv-translator/include \ + $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/tools/SPV_TRANSLATOR/include + + LLVM_LIB_PATH_HOST := $(LLVM_BUILD_DIR)/llvm._LLVM_ARCH_/lib/ + endif + else + LLVM_MESSAGE=$(shell ANDROID_SDK_ROOT=$(ANDROID_SDK_ROOT) ${TOP}/build/linux/tools/prepare-llvm-android.sh -a verify -t "$(JNI_CPU_ABI) $(JNI_CPU_ABI_2ND)") + + ifneq ($(filter Warning:,$(firstword $(LLVM_MESSAGE))),) + $(info *** prepare-llvm-android.sh: $(LLVM_MESSAGE)) + $(info *** Please update your VNDK with the newer LLVM version. ) + endif +endif + + + + + + +# The SPV library is in the same folder as the LLVM ones as it is built +# as part of LLVM + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) + ifeq ($(wildcard ${TOP}/build/linux/tools/prepare-nnvm.sh),) + # No facility for using NNVM in this package. + else ifeq ($(NNVM_BUILD_DIR),) + # IMGDNN is not built by default so do nothing if NNVM_BUILD_DIR is not set + else + override NNVM_BUILD_DIR := $(abspath $(NNVM_BUILD_DIR)) + NNVM_MESSAGE=$(shell ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} ${TOP}/build/linux/tools/prepare-nnvm.sh -c $(NNVM_BUILD_DIR)) + + ifneq ($(filter Error:,$(firstword $(NNVM_MESSAGE))),) + $(info *** prepare-nnvm.sh: $(NNVM_MESSAGE)) + $(error *** NNVM_BUILD_DIR $(NNVM_BUILD_DIR) is not suitable) + endif + + ifneq ($(filter Warning:,$(firstword $(NNVM_MESSAGE))),) + $(info *** prepare-nnvm.sh: $(NNVM_MESSAGE)) + endif + + # Because we need to handle MULTIARCH builds, we can't work out the + # architecture to use in the paths until compile-time. So leave + # _NNVM_ARCH_ as a placeholder that will be replaced in the + # moduledef. + NNVM_INCLUDE_PATH := $(NNVM_BUILD_DIR)/nnvm.src/include + NNVM_LIB_PATH := $(NNVM_BUILD_DIR)/nnvm._NNVM_ARCH_/lib/ + endif +endif + + + + + + +$(if $(USE_CCACHE),$(if $(USE_DISTCC),$(error\ +Enabling both USE_CCACHE and USE_DISTCC at the same time is not supported))) + + +# Invariant options for Linux +# +$(eval $(call BothConfigC,LINUX,)) + +$(eval $(call BothConfigC,PVR_BUILD_DIR,"\"$(PVR_BUILD_DIR)\"")) +$(eval $(call BothConfigC,PVR_BUILD_TYPE,"\"$(BUILD)\"")) +$(eval $(call BothConfigC,PVRSRV_MODNAME,"\"$(PVRSRV_MODNAME)\"")) +$(eval $(call BothConfigMake,PVRSRV_MODNAME,$(PVRSRV_MODNAME))) +$(eval $(call BothConfigC,PVRHMMU_MODNAME,"\"$(PVRHMMU_MODNAME)\"")) +$(eval $(call BothConfigMake,PVRHMMU_MODNAME,$(PVRHMMU_MODNAME))) +$(eval $(call BothConfigC,PVRSYNC_MODNAME,"\"$(PVRSYNC_MODNAME)\"")) +$(eval $(call BothConfigMake,PVRSYNC_MODNAME,$(PVRSYNC_MODNAME))) +$(eval $(call BothConfigMake,PVR_BUILD_DIR,$(PVR_BUILD_DIR))) +$(eval $(call BothConfigMake,PVR_BUILD_TYPE,$(BUILD))) + +SUPPORT_RGX ?= 1 +ifeq ($(SUPPORT_RGX),1) +$(eval $(call BothConfigC,SUPPORT_RGX,1)) +$(eval $(call BothConfigMake,SUPPORT_RGX,1)) +endif + +# Some of the definitions in stdint.h aren't exposed by default in C++ mode, +# unless these macros are defined. To make sure we get these definitions +# regardless of which files include stdint.h, define them here. + +# FIXME: We can't use GCC __thread reliably with clang. +ifeq ($(SUPPORT_ANDROID_PLATFORM),1) +else +endif + +ifneq ($(DISPLAY_CONTROLLER),) +$(eval $(call BothConfigC,DISPLAY_CONTROLLER,$(DISPLAY_CONTROLLER))) +$(eval $(call BothConfigMake,DISPLAY_CONTROLLER,$(DISPLAY_CONTROLLER))) +endif + + +$(eval $(call BothConfigMake,PVR_SYSTEM,$(PVR_SYSTEM))) +$(eval $(call KernelConfigMake,PVR_LOADER,$(PVR_LOADER))) + +ifeq ($(MESA_EGL),1) +else +endif + + +# Build-type dependent options +# +$(eval $(call BothConfigMake,BUILD,$(BUILD))) + +ifeq ($(SUPPORT_VALIDATION),1) +SUPPORT_TRP :=\ + $(shell grep -qw RGX_FEATURE_TILE_REGION_PROTECTION $(RGX_BNC_CONFIG_H) && echo 1) +endif + +ifeq ($(SUPPORT_VALIDATION),1) +# Enable Periodic Hardware Reset functionality for testing +override PVR_ENABLE_PHR := 1 +endif + +ifeq ($(SUPPORT_VALIDATION),1) +# Enable FBCDC descriptor signature bit for testing +SUPPORT_FBCDC_SIGNATURE_CHECK :=\ + $(shell grep -qw RGX_FEATURE_FBCDC_SIGNATURE $(RGX_BNC_CONFIG_H) && echo 1) +endif + +# Prevent excluding regconfig bridge when the build level macro defined, +# regconfig functions are used in pvrdebug. +# +ifeq ($(SUPPORT_USER_REGISTER_CONFIGURATION),1) +ifeq ($(EXCLUDE_RGXREGCONFIG_BRIDGE),1) +override EXCLUDE_RGXREGCONFIG_BRIDGE := 0 +endif +endif + +ifeq ($(SUPPORT_ANDROID_PLATFORM),1) +# Always use DEBUGLINK. These days we are using external components which +# have large amounts of C++ debug information and it is impractical to carry +# statically linked components to the target filesystem without stripping. +DEBUGLINK ?= 1 +endif + +ifeq ($(BUILD),debug) + +$(eval $(call TunableBothConfigMake,PVR_SERVICES_DEBUG,,\ +Enable additional services debug options._\ +This needs to be enabled for both the UM and KM builds_\ +so that compatibility between them is achieved.\ +)) + +ifeq ($(PVR_SERVICES_DEBUG),1) +PVRSRV_ENABLE_GPU_MEMORY_INFO ?= 1 +PVRSRV_DEBUG_HANDLE_LOCK ?= 1 +PVRSRV_APPHINT_ENABLEFWPOISONONFREE ?= IMG_TRUE +PVRSRV_TIMER_CORRELATION_HISTORY ?= 1 +endif + +# enable sync prim poisoning in debug builds +PVRSRV_ENABLE_SYNC_POISONING ?= 1 + +# Client CCB usage statistics enabled by default in debug builds +PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE ?= 1 +# bridge debug and statistics enabled by default in debug builds +DEBUG_BRIDGE_KM ?= 1 +$(eval $(call BothConfigC,DEBUG,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,)) +PERFDATA ?= 1 + +else ifeq ($(BUILD),release) +$(eval $(call BothConfigC,RELEASE,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,1)) + + +else ifeq ($(BUILD),timing) +$(eval $(call BothConfigC,TIMING,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,1)) +else +$(error BUILD= must be either debug, release or timing) + +endif # $BUILD + +# Memtest, currently implemented for Linux only +$(eval $(call TunableKernelConfigC,SUPPORT_PHYSMEM_TEST,1)) +$(eval $(call TunableKernelConfigMake,SUPPORT_PHYSMEM_TEST,1)) + + +$(eval $(call TunableKernelConfigMake,KERNEL_DEBUGLINK,,\ +Enable this to store DDK kernel module debugging symbols in separate$(comma) per_\ +module$(comma) .dbg files. These files will not be installed on the target system$(comma)_\ +but can be used by tools$(comma) e.g. gdb$(comma) for offline debugging. This may be_\ +desirable when the target system has limited storage space and the kernel_\ +has been configured with CONFIG_DEBUG_INFO=y$(comma) which can have a significant_\ +impact on kernel module size.)) + +$(eval $(call TunableBothConfigMake,COMPRESS_DEBUG_SECTIONS,,\ +Enable compression on debug sections (.zdebug)_\ +May have tool compatibility issues.)) + + + +# User-configurable options +# +$(eval $(call TunableBothConfigMake,RGX_BNC,)) +ifneq ($(RGX_BVNC),) +$(eval $(call TunableKernelConfigC,RGX_BVNC_CORE_KM_HEADER,)) +endif +ifneq ($(RGX_BVNC),) +$(eval $(call TunableKernelConfigC,RGX_BNC_CONFIG_KM_HEADER,)) +endif + +$(eval $(call TunableBothConfigC,PVRSRV_DEBUG_HANDLE_LOCK,,\ +Enable checking that the handle lock is held when a handle reference_\ +count is modified)) + +$(eval $(call TunableBothConfigC,SUPPORT_DBGDRV_EVENT_OBJECTS,1)) +$(eval $(call TunableBothConfigC,PVR_DBG_BREAK_ASSERT_FAIL,,\ +Enable this to treat PVR_DBG_BREAK as PVR_ASSERT(0)._\ +Otherwise it is ignored._\ +)) +$(eval $(call TunableBothConfigC,PDUMP,,\ +Enable parameter dumping in the driver._\ +This adds code to record the parameters being sent to the hardware for_\ +later analysis._\ +)) +$(eval $(call TunableBothConfigC,NO_HARDWARE,,\ +Disable hardware interactions (e.g. register writes) that the driver would_\ +normally perform. A driver built with this option can$(apos)t drive hardware$(comma)_\ +but with PDUMP enabled$(comma) it can capture parameters to be played back later._\ +)) +$(eval $(call TunableBothConfigC,PDUMP_DEBUG_OUTFILES,,\ +Add debug information to the pdump script (out2.txt) as it is generated._\ +This includes line numbers$(comma) process names and also enables checksumming_\ +of the binary data dumped to out2.prm which can be verified offline._\ +)) +$(eval $(call TunableKernelConfigC,PVRSRV_NEED_PVR_DPF,1,\ +Enables PVR_DPF messages in the kernel mode driver._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_ASSERT,,\ +Enable this to turn on PVR_ASSERT in release builds._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_TRACE,,\ +Enable this to turn on PVR_TRACE in release builds._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_STACKTRACE_NATIVE,1,\ +Enable this to turn on stack trace functionality requiring only native_\ +operating system features._\ +)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_STACKTRACE,,\ +Enable this to turn on stack trace functionality requiring either native_\ +operating system features$(comma) or additional dependencies such as_\ +libunwind._\ +)) +ifeq ($(PVRSRV_NEED_PVR_STACKTRACE),1) +ifeq ($(SUPPORT_ANDROID_PLATFORM),1) +$(warning Since Android O it's not allowed to link to libunwind.) +endif +endif +$(eval $(call TunableBothConfigC,REFCOUNT_DEBUG,)) +$(eval $(call TunableBothConfigC,DC_DEBUG,,\ +Enable debug tracing in the DC (display class) server code)) +$(eval $(call TunableBothConfigC,SCP_DEBUG,,\ +Enable debug tracing in the SCP (software command processor)_\ +which is used by the DC.)) +$(eval $(call TunableBothConfigC,SUPPORT_INSECURE_EXPORT,)) +$(eval $(call TunableBothConfigC,SUPPORT_SECURE_EXPORT,,\ +Enable support for secure device memory and sync export._\ +This replaces export handles with file descriptors$(comma) which can be passed_\ +between processes to share memory._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_DISPLAY_CLASS,,\ +Enable DC (display class) support. Disable if not using a DC display driver.)) +$(eval $(call TunableBothConfigC,PVRSRV_DEBUG_CCB_MAX,)) + +$(eval $(call TunableBothConfigMake,SUPPORT_TRUSTED_DEVICE,)) +$(eval $(call TunableBothConfigC,SUPPORT_TRUSTED_DEVICE,,\ +Enable a build mode targeting an REE._\ +)) + +ifeq ($(SUPPORT_TRUSTED_DEVICE),1) +ifeq ($(NO_HARDWARE),1) +SUPPORT_SECURITY_VALIDATION ?= 1 +endif +endif + +$(eval $(call TunableBothConfigC,SUPPORT_SECURITY_VALIDATION,,\ +Enable DRM security validation mode._\ +)) +$(eval $(call TunableBothConfigMake,SUPPORT_SECURITY_VALIDATION,)) + +$(eval $(call TunableBothConfigC,PM_INTERACTIVE_MODE,,\ +Enable PM interactive mode._\ +)) + +$(eval $(call TunableBothConfigC,METRICS_USE_ARM_COUNTERS,,\ +Enable usage of hardware performance counters for metrics on ARM platforms._\ +)) + + +ifeq ($(GTRACE_TOOL),1) + ifeq ($(SUPPORT_ANDROID_PLATFORM),1) + endif + override PVR_ANNOTATION_MAX_LEN ?= 128 +endif + + +IMG_1_11_OPTS?=0xffffffff + +IMG_1_12_OPTS?=0xffffffff + + + +# poison values for the Services +$(eval $(call TunableBothConfigC,PVRSRV_POISON_ON_ALLOC_VALUE,0xd9,\ +Poison on alloc value)) +$(eval $(call TunableBothConfigC,PVRSRV_POISON_ON_FREE_VALUE,0x63,\ +Poison on free value)) + +# +# GPU virtualization support +# +VMM_TYPE ?= stub +ifeq ($(VMM_TYPE),stub) +PVRSRV_VZ_BYPASS_HMMU ?= 1 +endif + +RGX_FW_HEAP_SHIFT ?= 25 +PVRSRV_VZ_NUM_OSID ?= 1 + +ifeq ($(PVRSRV_VZ_NUM_OSID),0) + override PVRSRV_VZ_NUM_OSID := 1 +endif + +# Make RGX_NUM_OS_SUPPORTED visible to both UM & KM makefiles +$(eval $(call BothConfigMake,RGX_NUM_OS_SUPPORTED,$(PVRSRV_VZ_NUM_OSID),)) +# Reveal RGX_NUM_OS_SUPPORTED only to KM code, allowing the firmware makefiles, +# which are part of the UM, to selectively control this symbol so the same DDK +# build can create both native and vz-supported firmware binaries +$(eval $(call KernelConfigC,RGX_NUM_OS_SUPPORTED,$(PVRSRV_VZ_NUM_OSID),\ +The number of firmware supported OSIDs [1 native build : 2+ vz build])) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DRIVERMODE,0x7FFFFFFF,\ +The driver execution mode. This can be either an override or non-override 32-bit value. An override_\ +value has the MSB bit set and a non-override value has this bit cleared. Excluding this MSB bit_\ +and treating the remaining 31-bit value as a signed integer the mode values are_\ +[-1 native mode : 0 host mode : +1 guest mode])) +$(eval $(call KernelConfigMake,VMM_TYPE,$(VMM_TYPE),\ +The virtual machine manager type, defaults to stub implementation)) +$(eval $(call TunableBothConfigC,RGX_FW_HEAP_SHIFT,$(RGX_FW_HEAP_SHIFT),\ +Firmware physical heap log2 size per OSID (minimum 4MiB, default 32MiB).)) + +$(eval $(call TunableBothConfigC,SUPPORT_AUTOVZ,,\ +Enable support for AutoVz mode_)) + +ifeq ($(SUPPORT_AUTOVZ),1) + override PVRSRV_VZ_BYPASS_HMMU := 1 + override RGX_VZ_STATIC_CARVEOUT_FW_HEAPS := 1 + + $(eval $(call TunableBothConfigC,SUPPORT_AUTOVZ_HW_REGS,1\ + Use dedicated hardware registers for tracking OS states otherwise rely on shared memory._)) + + $(eval $(call TunableBothConfigC,PVR_AUTOVZ_WDG_PERIOD_MS,3000,\ + Time period in milliseconds between the firmware autovz watchdog checks._)) +endif + +$(eval $(call TunableBothConfigC,PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR,,\ +The carveout memory used for allocating the firmware MMU pagetables it start by default after_\ +the firmware heap. If a different memory range is reserved for this purpose set its base address._)) + +$(eval $(call TunableBothConfigC,RGX_VZ_STATIC_CARVEOUT_FW_HEAPS,,\ +Firmware heaps of Guest VMs are allocated from pre-determined carveout memory.)) + +$(eval $(call TunableKernelConfigC,PVRSRV_VZ_BYPASS_HMMU,$(PVRSRV_VZ_BYPASS_HMMU))) + +$(eval $(call TunableBothConfigMake,PVR_ENABLE_PHR,,)) +$(eval $(call TunableBothConfigC,PVR_ENABLE_PHR,,\ +Enable the Periodic Hardware Reset functionality (PHR))) + +# GPUVIRT_VALIDATION default region values used _in the emulator_. +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION0MIN,$\ +\"0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000\",\ +Array of comma/space separated strings that define the start addresses for all 8 OSids on Region 0.)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION0MAX,$\ +\"0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF\",\ +Array of comma/space separated strings that define the end addresses for all 8 OSids on Region 0.)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION1MIN,$\ +\"0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000\",\ +Array of comma/space separated strings that define the start addresses for all 8 OSids on Region 1.)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_OSIDREGION1MAX,$\ +\"0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF\",\ +Array of comma/space separated strings that define the end addresses for all 8 OSids on Region 1.)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CDMARBITRATIONOVERRIDE,0x00000000,\ +The CDM workgroup arbitration mode - used by the CDM to assign workgroups to a specific cluster)) + +# Check first for a system layer coefficient file... +# Then for a B.NC coefficient file... +# If neither are present then use default coefficients. +ifneq (,$(wildcard $(TOP)/kernel/system/$(PVR_SYSTEM)/rgxpowmoncoeffssys.h)) +else ifneq (,$(wildcard $(HWDEFS_DIR)/$(RGX_BNC)/rgxpowmoncoeffshwdefs.h)) +else +endif + +$(eval $(call TunableKernelConfigMake,SUPPORT_LINUX_DVFS,)) +$(eval $(call TunableKernelConfigC,SUPPORT_LINUX_DVFS,,\ +Enables PVR DVFS implementation to actively change frequency / voltage depending_\ +on current GPU load. Currently only supported on Linux._\ +)) + +# +# Tile Lifetime tracking support. +# +ifeq ($(SUPPORT_VALIDATION),1) +SUPPORT_TLT_PERF ?= 1 +endif + + +# +# GPU power monitoring configuration +# + +$(eval $(call TunableBothConfigMake,SUPPORT_POWMON_COMPONENT,1)) +$(eval $(call TunableBothConfigC,SUPPORT_POWMON_COMPONENT,1,\ +Includes power-monitoring component in firmware build._\ +)) + +ifneq ($(SUPPORT_POWMON_COMPONENT),0) +# Following tunables are only used when power-monitoring present +$(eval $(call TunableBothConfigC,PVR_POWER_ACTOR,,\ +Enables PVR power actor implementation for registration with a kernel configured_\ +with IPA. Enables power counter measurement timer in the FW which is periodically_\ +read by the host DVFS in order to operate within a governor set power envelope.)) +$(eval $(call TunableBothConfigC,PVR_POWER_ACTOR_SCALING,,\ +Scaling factor for the dynamic power coefficients.)) +$(eval $(call TunableBothConfigC,SUPPORT_POWER_VALIDATION_VIA_DEBUGFS,1,\ +Enables the validation feature to read power counters from DebugFS power_mon file.)) + ifneq ($(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS),) + $(error SUPPORT_POWER_SAMPLING_VIA_DEBUGFS is currently unsupported on Volcanic.) + endif +endif + +$(eval $(call TunableKernelConfigC,DEBUG_HANDLEALLOC_INFO_KM,)) +$(eval $(call TunableKernelConfigC,PVRSRV_RESET_ON_HWTIMEOUT,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_PLATFORM_PRE_REGISTERED,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_DRIVER_REGISTRATION_NAME,"\"$(PVRSRV_MODNAME)\"")) +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_SYNC_POISONING,,\ +Poison Sync Prim values on free.)) +$(eval $(call TunableBothConfigC,PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN,256)) +$(eval $(call TunableKernelConfigC,SYNC_DEBUG,)) +$(eval $(call TunableKernelConfigC,SUPPORT_DUMP_CLIENT_CCB_COMMANDS,)) +$(eval $(call TunableKernelConfigC,SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT,)) + +# Disable DDK features when PDUMP is defined +# +ifeq ($(PDUMP),1) +# For PDUMP we deactivate the pending protection because it leads to +# changing script files because of the additional invalidation kick. +override SUPPORT_MMU_PENDING_FAULT_PROTECTION := 0 +# Increase the default max annotation length when PDump is enabled +override PVR_ANNOTATION_MAX_LEN ?= 96 +endif + +$(eval $(call TunableKernelConfigC,SUPPORT_MMU_PENDING_FAULT_PROTECTION,1,\ +Activates use of the cleanup worker thread to defer the freeing of page_\ +tables. The cleanup work item will issue an MMU cache invalidation kick_\ +and wait with page table freeing until the cache invalidation has been_\ +confirmed by the FW via a sync._\ +)) + +$(eval $(call TunableKernelConfigC,SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG,)) + + +$(eval $(call TunableBothConfigC,SUPPORT_PVR_VALGRIND,)) + +$(eval $(call TunableBothConfigC,ION_DEFAULT_HEAP_NAME,\"system\",\ +In ion enabled DDKs$(comma) this setting should be the default heap name defined in the kernel_\ +where gralloc buffers are allocated by default._\ +This setting is for kernels (>=4.12) which support ION heap query_\ +)) +$(eval $(call TunableBothConfigC,ION_DEFAULT_HEAP_ID_MASK,(1 << ION_HEAP_TYPE_SYSTEM),\ +In ion enabled DDKs$(comma) this setting should be (1 << ION_HEAP_TYPE_xxx)_\ +where ION_HEAP_TYPE_xxx is default heap id defined in the kernel._\ +This setting is for kernels (<4.12) which do not support ION heap query_\ +)) +$(eval $(call TunableBothConfigC,ION_FALLBACK_HEAP_NAME,,\ +In ion enabled DDKs$(comma) use this setting to define fallback heap._\ +This setting is optional (depends if kernel has defined a fallback heap)_\ +)) +$(eval $(call TunableBothConfigC,ION_FALLBACK_HEAP_ID_MASK,,\ +In ion enabled DDKs$(comma) use this setting to define fallback heap id mask._\ +This setting is optional (depends if kernel has defined a fallback heap)_\ +)) + +EGL_EXTENSION_KHR_GL_COLORSPACE ?= 1 + + +ifneq ($(MESA_EGL),1) + # The EGL EXT_image_gl_colorspace spec says: + # "Can be supported on EGL 1.4 provided that EGL_KHR_gl_colorspace is + # implemented, as well as either EGL_KHR_image or EGL_KHR_image_base." + # + # Note: IMG EGL supports EGL_KHR_image and EGL_KHR_image_base unconditionally. + ifeq ($(EGL_EXTENSION_KHR_GL_COLORSPACE),1) + endif + + + # Check for GBM_BACKENDs that aren't understood by the GBM Linux.mk. + $(eval $(call ValidateValues,GBM_BACKEND,$(_supported_gbm_backends))) + +endif + + +$(eval $(call TunableBothConfigC,PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY,,\ +Enable this to force the use of PVRSRVMemSet/Copy in the client driver _\ +instead of the built-in libc functions. These implementations are device _\ +memory safe and are used by default on AARCH64 platform._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_BRIDGE_LOGGING,,\ +If enabled$(comma) provides a debugfs entry which logs the number of calls_\ +made to each bridge function._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP,,\ +If enabled it will make kernel threads to sleep indefinitely until signalled._\ +)) + +# If we are building against a ChromeOS kernel, set this. +$(eval $(call TunableKernelConfigC,CHROMIUMOS_KERNEL,)) +$(eval $(call TunableKernelConfigMake,CHROMIUMOS_KERNEL,)) + + + + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) +endif + + +DEFERRED_WORKER_THREAD ?= 1 + + +$(eval $(call TunableBothConfigC,CACHEFLUSH_ISA_TYPE,,\ +Specify CPU d-cache maintenance ISA type (i.e. CACHEFLUSH_ISA_TYPE_[X86,ARM64,GENERIC])._\ +)) + + +# Support version 3 of the loader. Versions 0/1/2 should not be used. +# Default needs to be set separately otherwise 0 gets replaced with the default +SUPPORT_LUNARG_LOADER_VERSION ?= 3 + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) +ifeq ($(WINDOW_SYSTEM),lws-generic) +# Check for SUPPORT_VK_PLATFORMS that aren't understood by the Vulkan Linux.mk. +# Platform names match Mesa where possible. +_supported_vk_platforms := null tizen wayland x11 +$(eval $(call ValidateValues,SUPPORT_VK_PLATFORMS,$(_supported_vk_platforms))) + +else +endif + +ifneq ($(SUPPORT_VK_PLATFORMS),) +endif +endif + +ifeq ($(PVR_BLOB_CACHE_DEBUG),1) +$(eval $(call BothConfigC,BLOB_CACHE_DEBUG,)) +endif + +$(eval $(call TunableBothConfigC,PVR_BLOB_CACHE_SIZE_MEGABYTES,20,\ +Set the Linux blob cache size in number of megabytes._\ +)) + +$(eval $(call TunableBothConfigMake,PDUMP,)) +$(eval $(call TunableBothConfigMake,SUPPORT_INSECURE_EXPORT,)) +$(eval $(call TunableBothConfigMake,SUPPORT_SECURE_EXPORT,)) +$(eval $(call TunableBothConfigMake,SUPPORT_DISPLAY_CLASS,)) +$(eval $(call TunableBothConfigMake,CLDNN,,\ +Build CLDNN graph libraries._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_EXTRA_METASP_DEBUG,,\ +Enable extra debug information using the META Slave Port._\ +Checks the validity of the Firmware code and dumps sync values_\ +using the GPU memory subsystem via the META Slave Port._\ +)) +$(eval $(call TunableBothConfigC,TRACK_FW_BOOT,1,Enable FW boot tracking.)) +# Required to pass the build flag to the META FW makefile + + +$(eval $(call TunableBothConfigMake,OPTIM,,\ +Specify the optimisation flags passed to the compiler. Normally this_\ +is autoconfigured based on the build type._\ +)) + + + + + + + + +# Enables the pre-compiled header use. + + +# Switch among glsl frontend compiler variants +ifeq ($(LIB_GLSL_VARIANT),llvm) +endif + +RGX_TIMECORR_CLOCK ?= mono +$(eval $(call TunableKernelConfigMake,RGX_TIMECORR_CLOCK,mono,\ +Specifies which clock source will be used for time correlation tables and_\ +HWPerf)) +ifeq ($(RGX_TIMECORR_CLOCK),mono) +PVRSRV_APPHINT_TIMECORRCLOCK=0 +else ifeq ($(RGX_TIMECORR_CLOCK),mono_raw) +PVRSRV_APPHINT_TIMECORRCLOCK=1 +else ifeq ($(RGX_TIMECORR_CLOCK),sched) +PVRSRV_APPHINT_TIMECORRCLOCK=2 +else +$(error Wrong value ($(RGX_TIMECORR_CLOCK)) for RGX_TIMECORR_CLOCK) +endif + +# HWR is enabled by default +HWR_DEFAULT_ENABLED ?= 1 +$(eval $(call TunableBothConfigC,HWR_DEFAULT_ENABLED,)) + +# Build-only AppHint configuration values +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT,APPHNT_BLDVAR_DBGDUMPLIMIT,\ +Limit for the number of HWR debug dumps produced)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG,IMG_FALSE,\ +Enable trusted device ACE config)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE,0x4000,\ +Alternate general heap page size (i.e. 4K,16K [default],64K,256K,1M,2M))) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE,786432,\ +Buffer size in bytes for client HWPerf streams)) + +# PDUMP AppHint defaults +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLESIGNATURECHECKS,APPHNT_BLDVAR_ENABLESIGNATURECHECKS,\ +Buffer size in bytes for storing signature check data)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE,RGXFW_SIG_BUFFER_SIZE_MIN,\ +Buffer size in bytes for storing signature check data)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING,IMG_FALSE,\ +Enable full sync tracking)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG,APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG,\ +Enable page fault debug)) + +# Validation AppHint defaults +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_VALIDATEIRQ,0,\ +Used to validate the interrupt integration. \ +Enables extra code in the FW to assert all interrupt lines \ +at boot and polls on the host side. The code is only built when \ +generating pdumps for nohw targets.)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLECLOCKGATING,0,\ +Disable GPU clock gating)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLEDMOVERLAP,0,\ +Disable GPU data master overlapping)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FABRICCOHERENCYOVERRIDE,2,\ +Override GPU fabric (i.e. AXI) coherency (i.e. 0:none, 1:ace-lite, 2:full-ace))) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEDMKILLINGRANDMODE,0,\ +Enable random killing of data masters. Use ENABLEFWCONTEXTSWITCH to \ +select the active DMs for killing)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH,RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL,\ +Enable firmware context switching)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLERDPOWERISLAND,RGX_RD_POWER_ISLAND_DEFAULT,\ +Enable RD power island)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWVALENABLESPUPOWERMASKCHANGE,0,\ +Enable firmware handling of SPU power state mask change requests sent by host driver)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWVALAVAILABLESPUMASK,0xFFFFFFFF,\ +Non Fused SPU mask)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_KILLINGCTL,0,\ +GEOM/3D Killing control)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH,0,\ +Enable random context switching of all DMs for debug and testing purposes)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH,0,\ +Enable soft GPU resets on context switching)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FIRMWAREPERF,FW_PERF_CONF_NONE,\ +Force the initial Firmware Performance Configuration to the specified value)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE,RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN,\ +Firmware context switch profile)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER,0,\ +Force the initial HW Performance Counter Filter value)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB,2048,\ +Buffer size in KB of the hardware performance GPU buffer)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB,2048,\ +Buffer size in KB of the hardware performance host buffer)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS,50,\ +Timeout in milliseconds of the hardware performance host thread)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_JONESDISABLEMASK,0,\ +Disable Jones)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_NEWFILTERINGMODE,IMG_FALSE,\ +Enable new TPU filtering mode)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_TRUNCATEMODE,0,\ +Truncate mode)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_EMUMAXFREQ,0,\ +Set the maximum frequency for the emulator)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_GPIOVALIDATIONMODE,0,\ +Set the validation type for GPIO 1 for the standard protocol validation\ +2 for the address protocol validation)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_RGXBVNC,\"\",\ +Array of comma separated strings that define BVNC info of the devices.\ +(module parameter usage is RGXBVNC=x.x.x.x,y.y.y.y etc))) + +$(eval $(call TunableBothConfigC,SUPPORT_SW_TRP,,\ +Enable Software Tile Region Protection._\ +)) +ifeq ($(SUPPORT_SW_TRP),1) +override SUPPORT_SHADOW_FREELISTS := 1 +endif + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE,1,\ +Scheduling latency mode for ISP renders; higher is better (lower latency). \ +(0) Wait for workload completion. \ +(1) IPP Tile granularity. \ +(2) ISP Tile granularity.)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_VALIDATESOCUSCTIMERS,0,\ +Used to validate SOC and USC timer integration. \ +Enables extra code in the kernel driver and FW to check that these \ +timers increase after each TA and 3D kick. The code is built both \ +when generating Pdumps for NOHW targets and as a driver live test.)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_SHGEOMPIPEMASK_OVERRIDE,0,\ +Override value for mask of geometry pipes to use in SH kicks.\ +Lowest bits should be set first; valid values are 0x1 0x3 0x7 and so on)) + +# Build-only AppHint configuration values +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CLEANUPTHREADPRIORITY,5,\ +Set the priority of the cleanup thread (0 - default, 1 - highest, 5 - lowest))) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY,0,\ +Set the priority of the watchdog thread (0 - default, 1 - highest, 5 - lowest))) + +# Debugfs AppHint configuration values +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ASSERTONHWRTRIGGER,IMG_FALSE,\ +Enable firmware assert when an HWR event is triggered)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ASSERTOUTOFMEMORY,IMG_FALSE,\ +Enable firmware assert when the TA raises out-of-memory)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CHECKMLIST,APPHNT_BLDVAR_DEBUG,\ +Enable firmware MLIST consistency checker)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLEFEDLOGGING,IMG_FALSE,\ +Disable fatal error detection debug dumps)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEAPM,RGX_ACTIVEPM_DEFAULT,\ +Force the initial driver APM configuration to the specified value)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEHTBLOGGROUP,0,\ +Enable host trace log groups)) +$(eval $(call AppHintFlagsConfigC,PVRSRV_APPHINT_ENABLELOGGROUP,RGXFWIF_LOG_TYPE_NONE,\ +RGXFWIF_LOG_TYPE_GROUP_,BIF CLEANUP CSW DEBUG DMA HWP HWR MAIN MTS PM POW RPM RTD SPM,\ +Enable firmware trace log groups)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FIRMWARELOGTYPE,0,\ +Specify output mechanism for firmware log data. 0 = TRACE and 1 = TBI)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS,$\ +RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,Buffer size in DWords for FW trace log data)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE,0,\ +Override system layer FBCDC version settings \ +(0) No override \ +(1) Force v3 \ +(2) Force v3.1)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HTBOPERATIONMODE,HTB_OPMODE_DROPOLDEST,\ +Configure host trace buffer behaviour)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HTBUFFERSIZE,64,\ +Buffer size in Kbytes for Host Trace log data)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFTRACEGPU,IMG_FALSE,\ +Enables generation of GPU FTrace events)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFFWFILTER,0,\ +Mask used to select GPU events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFHOSTFILTER,0,\ +Mask used to select host events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES,0,\ +Mask used to select client Services events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL,0,\ +Mask used to select client EGL events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES,0,\ +Mask used to select client OpenGLES events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL,0,\ +Mask used to select client OpenCL events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN,0,\ +Mask used to select client Vulkan events to log for performance)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_TIMECORRCLOCK,0,\ +Select HWPerf clock)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ENABLEFWPOISONONFREE,IMG_FALSE,\ +Enables poisoning of firmware allocations when freed)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_FWPOISONONFREEVALUE,0xBD,\ +Poison value when PVRSRV_APPHINT_ENABLEFWPOISONONFREE is enabled)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_ZEROFREELIST,IMG_FALSE,\ +Zero freelist memory during freelist reconstruction as part of HWR)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_GPUUNITSPOWERCHANGE,IMG_FALSE,\ +Setting this to '1' enables a test mode to dynamically change the_\ +SPU powered mask before each kick to the FW)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_DISABLEPDUMPPANIC,IMG_FALSE,\ +Disable PDUMP panic)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CACHEOPCONFIG,0,\ +CPU d-cache maintenance framework flush type configuration)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE,0,\ +CPU d-cache maintenance framework UM/KM threshold configuration)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC,IMG_FALSE,\ +Ignore BVNC reported by HW and use the value specified at build time)) +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_PHYSMEMTESTPASSES,$\ +APPHNT_PHYSMEMTEST_ENABLE,Set number of passes of driver start time MemTest)) + +$(eval $(call AppHintConfigC,PVRSRV_APPHINT_TESTSLRINTERVAL,0,\ +Interval which determines after how many 3D kicks the driver will_\ +insert an additional unsignalled sync checkpoint to the 3D fence_\ +allowing testing of SLR)) + +# GLSL compiler options +ifeq ($(BUILD),debug) +DUMP_LOGFILES ?= 1 +endif +# end of GLSL compiler options + + +$(eval $(call TunableBothConfigC,SUPPORT_AXI_ACE_TEST,,\ +Enable this to add extra FW code for the AXI ACE unittest._\ +)) + + +TQ_CAPTURE_PARAMS ?= 1 + +TQ_DISABLE_SPARSE ?= 0 + +$(eval $(call TunableBothConfigC,RGXFW_DEBUG_LOG_GROUP,,\ +Enable the usage of DEBUG log group in the Firmware logs._\ +)) + +$(eval $(call TunableBothConfigC,SUPPORT_SOC_TIMER,1,\ +Enable the use of the SoC timer. When enabled the SoC system layer must implement the pfnSoCTimerRead_\ +callback that when invoked returns the current value of the SoC timer._\ +)) +$(eval $(call TunableBothConfigC,SOC_TIMER_FREQ,20,\ +The SoC timer frequency. This default to 20 MHz if not specified_\ +)) + +# +# Strip Rendering support. +# +$(eval $(call TunableBothConfigMake,SUPPORT_STRIP_RENDERING,,\ +Enabling this feature provides the ability for the firmware to drive the_\ +display controller via GPIO and support Strip Rendering._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_STRIP_RENDERING,)) + +$(eval $(call TunableBothConfigMake,SUPPORT_DEDICATED_FW_MEMORY,,\ +Allocate FW code and private data from dedicated FW memory._\ +)) +$(eval $(call TunableBothConfigC,SUPPORT_DEDICATED_FW_MEMORY,)) + +# +# Serial DM killing support. +# + +# +# Ensure top-level PDVFS build defines are set correctly +# +ifeq ($(SUPPORT_PDVFS),1) +SUPPORT_WORKLOAD_ESTIMATION ?= 1 +#else +#ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1) +#SUPPORT_PDVFS ?= 1 +#endif +endif + +# +# Ensure PDVFS comms. protocol is compatible with RGX GPIO mode +# +ifneq ($(PDVFS_COM),) +ifneq ($(PDVFS_COM),PDVFS_COM_HOST) +SUPPORT_PDVFS ?= 1 +SUPPORT_WORKLOAD_ESTIMATION ?= 1 +DISABLE_GPU_FREQUENCY_CALIBRATION ?= 1 +ifeq ($(PDVFS_COM),PDVFS_COM_AP) +ifneq ($(PVR_GPIO_MODE),) +ifneq ($(PVR_GPIO_MODE),PVR_GPIO_MODE_GENERAL) +# GPIO cannot be used for power monitoring with PDVFS_COM_AP +$(error PDVFS_COM_AP is compatible with PVR_GPIO_MODE_GENERAL only) +endif +endif +endif +endif +endif + +ifeq ($(SUPPORT_PDVFS),1) +$(eval $(call BothConfigMake,SUPPORT_PDVFS,1,\ +Enabling this feature enables proactive dvfs in the firmware._\ +)) +$(eval $(call BothConfigC,SUPPORT_PDVFS,1)) + +$(eval $(call BothConfigC,SUPPORT_PDVFS_IDLE,$(SUPPORT_PDVFS_IDLE),\ +This enables idle management in PDVFS._\ +)) +endif + +ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1) +$(eval $(call BothConfigMake,SUPPORT_WORKLOAD_ESTIMATION,1,\ +Enabling this feature enables workload intensity estimation from a workloads_\ +characteristics and assigning a deadline to it._\ +)) +$(eval $(call BothConfigC,SUPPORT_WORKLOAD_ESTIMATION,1)) +endif + +# +# These specify how PDVFS OPP values are sent by the firmware +# +$(eval $(call BothConfigMake,PDVFS_COM_HOST,1,\ +Enables host shared-memory protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_HOST,1)) + +$(eval $(call BothConfigMake,PDVFS_COM_AP,2,\ +Enables GPIO address protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_AP,2)) + +$(eval $(call BothConfigMake,PDVFS_COM_PMC,3,\ +Enables GPIO power management controller protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_PMC,3)) + +$(eval $(call BothConfigMake,PDVFS_COM_IMG_CLKDIV,4,\ +Enables GPIO clock divider control protocol._\ +)) +$(eval $(call BothConfigC,PDVFS_COM_IMG_CLKDIV,4)) + +ifeq ($(SUPPORT_STRIP_RENDERING),1) +PDVFS_COM ?= PDVFS_COM_AP +else +PDVFS_COM ?= PDVFS_COM_HOST +endif + +$(eval $(call BothConfigMake,PDVFS_COM,$(PDVFS_COM))) +$(eval $(call BothConfigC,PDVFS_COM,$(PDVFS_COM))) + +# +# These specify how RGX GPIO port is used by the firmware. +# +$(eval $(call BothConfigMake,PVR_GPIO_MODE_GENERAL,1,\ +Enable basic send and receive using GPIO._\ +)) +$(eval $(call BothConfigC,PVR_GPIO_MODE_GENERAL,1)) + +$(eval $(call BothConfigMake,PVR_GPIO_MODE_POWMON_PIN,2,\ +Enables PMC power monitoring using GPIO._\ +)) +$(eval $(call BothConfigC,PVR_GPIO_MODE_POWMON_PIN,2)) + +PVR_GPIO_MODE ?= PVR_GPIO_MODE_GENERAL +$(eval $(call BothConfigMake,PVR_GPIO_MODE,$(PVR_GPIO_MODE))) +$(eval $(call BothConfigC,PVR_GPIO_MODE,$(PVR_GPIO_MODE))) + +# If NDK_ROOT is set, SUPPORT_WORKLOAD_ESTIMATION can't be, because the +# ANDROID_WSEGL module uses APIs (binder, gui) which are not in the NDK. +ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1) + ifneq ($(PVR_ANDROID_HAS_COMPOSITION_TIMINGS),1) + ifneq ($(NDK_ROOT),) + $(error SUPPORT_WORKLOAD_ESTIMATION and NDK_ROOT are incompatible features) + endif + ifeq ($(EGL_WSEGL_DIRECTLY_LINKED),1) + $(error EGL_WSEGL_DIRECTLY_LINKED and SUPPORT_WORKLOAD_ESTIMATION are not supported at the same time) + endif + endif +endif + +$(eval $(call TunableKernelConfigMake,PVR_HANDLE_BACKEND,idr,\ +Specifies the back-end that should be used$(comma) by the Services kernel handle_\ +interface$(comma) to allocate handles. The available backends are:_\ +* generic (OS agnostic)_\ +* idr (Uses the Linux IDR interface)_\ +)) + + +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_PROCESS_STATS,1,\ +Enable the collection of Process Statistics in the kernel Server module._\ +Feature on by default. Driver_stats summary presented in DebugFS on Linux._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_DEBUG_LINUX_MEMORY_STATS,,\ +Present Process Statistics memory stats in a more detailed manner to_\ +assist with debugging and finding memory leaks (under Linux only)._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_PERPID_STATS,,\ +Enable the presentation of process statistics in the kernel Server module._\ +Feature off by default. \ +)) + +# SUPPORT_DMABUF_BRIDGE is set to include the dmabuf.brg in bridge generation +# by default for all Linux based builds. +$(eval $(call TunableBothConfigMake,SUPPORT_DMABUF_BRIDGE,1)) + +# SUPPORT_USC_BREAKPOINT is set to include the rgxbreakpoint.brg in bridge generation +# and to enable USC breakpoint in FW. Disabled by default for all Linux based builds. +# +#SUPPORT_USC_BREAKPOINT ?= 1 +$(eval $(call TunableBothConfigMake,SUPPORT_USC_BREAKPOINT,)) +$(eval $(call TunableBothConfigC,SUPPORT_USC_BREAKPOINT,,Enable the USC breakpoint support)) + +# EXCLUDE_CMM_BRIDGE is set to exclude the cmm.brg bridge in +# the Kernel This is disabled by default for release builds. +# +$(eval $(call TunableBothConfigMake,EXCLUDE_CMM_BRIDGE,)) +$(eval $(call TunableBothConfigC,EXCLUDE_CMM_BRIDGE,,Disables the cmm bridge)) + +# EXCLUDE_HTBUFFER_BRIDGE is set to exclude the htbuffer.brg bridge in +# the Kernel This is disabled by default for release builds. +# +$(eval $(call TunableBothConfigMake,EXCLUDE_HTBUFFER_BRIDGE,)) +$(eval $(call TunableBothConfigC,EXCLUDE_HTBUFFER_BRIDGE,,Disables the htbuffer bridge)) + +# EXCLUDE_RGXREGCONFIG_BRIDGE is set to exclude the rgxregconfig.brg bridge in +# the Kernel This is disabled by default for release builds. +# +$(eval $(call TunableBothConfigMake,EXCLUDE_RGXREGCONFIG_BRIDGE,)) +$(eval $(call TunableBothConfigC,EXCLUDE_RGXREGCONFIG_BRIDGE,,Disables the RGX regconfig bridge)) + +# PVRSRV_ENABLE_GPU_MEMORY_INFO is set to enable RI annotation of devmem allocations +# This is enabled by default for debug builds. +# +$(eval $(call TunableBothConfigMake,PVRSRV_ENABLE_GPU_MEMORY_INFO,)) +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_GPU_MEMORY_INFO,,\ +Enable Resource Information (RI) debug. This logs details of_\ +resource allocations with annotation to help indicate their use._\ +)) + +# PVRSRV_DEBUG_CSW_STATE is set to provide context switch task information +# to FSIM. This is disabled by default for debug and release builds. + +ifeq ($(PDUMP),1) +# Force enable TBI interface in PDUMP mode + override SUPPORT_TBI_INTERFACE :=1 +endif + +ifeq ($(VIRTUAL_PLATFORM),1) +# Force enable TBI interface for VIRTUAL_PLATFORM + override SUPPORT_TBI_INTERFACE :=1 +endif + +$(eval $(call TunableBothConfigC,SUPPORT_TBI_INTERFACE,,\ +Enable TBI interface support for firmware._\ +)) + +$(eval $(call TunableBothConfigC,SUPPORT_FIRMWARE_GCOV,,\ +Enable gcov support for firmware._\ +)) + + +ifeq ($(SUPPORT_TRP),1) +$(eval $(call BothConfigMake,SUPPORT_TRP,1)) +$(eval $(call BothConfigC,SUPPORT_TRP,1)) +endif + +ifeq ($(SUPPORT_FBCDC_SIGNATURE_CHECK),1) +$(eval $(call BothConfigC,SUPPORT_FBCDC_SIGNATURE_CHECK,1)) +endif + +ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1) +# Increase the default annotation max length to 96 if RI_DEBUG is enabled +override PVR_ANNOTATION_MAX_LEN ?= 96 +endif + +# Default annotation max length to 63 if no other debug options are specified +$(eval $(call TunableBothConfigC,PVR_ANNOTATION_MAX_LEN,63,\ +Defines the max length for PMR$(comma) MemDesc$(comma) Device_\ +Memory History and RI debug annotations stored in memory.\ +)) + +$(eval $(call TunableKernelConfigC,DEBUG_BRIDGE_KM,,\ +Enable Services bridge debugging and bridge statistics output_\ +)) + +# if PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE has been specified then ensure +# PVRSRV_ENABLE_CCCB_UTILISATION_INFO is enabled +ifeq ($(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE),1) +override PVRSRV_ENABLE_CCCB_UTILISATION_INFO := 1 +endif + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_CCCB_UTILISATION_INFO,1,\ +Calculate high watermarks of all the client CCBs and print a warning if the_\ +watermarks touched a certain threshold value (90% by default) of the cCCB allocation size._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD,90,\ +Default threshold (as a percentage) for the PVRSRV_ENABLE_CCCB_UTILISATION_INFO feature._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE,,\ +Provides more information to PVRSRV_ENABLE_CCCB_UTILISATION_INFO messages._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_ENABLE_MEMORY_STATS,,\ +Enable Memory allocations to be recorded and published via Process Statistics._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_ENABLE_MEMTRACK_STATS_FILE,,\ +Enable the memtrack_stats debugfs file when not on an Android platform._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_STRICT_COMPAT_CHECK,,\ +Enable strict mode of checking all the build options between um & km._\ +The driver may fail to load if there is any mismatch in the options._\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_MAX_POOL_PAGES,10240,\ +Defines how many pages the page cache should hold.)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES,20480,\ +We double check if we would exceed this limit if we are below MAX_POOL_PAGES_\ +and want to add an allocation to the pool._\ +This prevents big allocations being given back to the OS just because they_\ +exceed the MAX_POOL_PAGES limit even though the pool is currently empty._\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES,,\ +All device memory allocated from the OS via the Rogue driver will be zeroed_\ +when this is defined. This may not be necessary in closed platforms where_\ +undefined data from previous use in device memory is acceptable._\ +This feature may change the performance signature of the drivers memory_\ +allocations on some platforms and kernels._\ +)) + +PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC ?= 0 +$(eval $(call TunableKernelConfigC,PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC,PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC,\ +Higher order page requests on Linux use dma_alloc_coherent but on some systems_\ +it could return pages from high memory and map those to the vmalloc space._\ +Since graphics demand a lot of memory the system could quickly exhaust the_\ +vmalloc space. Setting this define will suppress the use of dma_alloc_coherent_\ +and fall back to use alloc_pages and not map them to vmalloc space unless_\ +requested explicitly by the driver._\ +)) + +$(eval $(call TunableKernelConfigC,PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY,,\ +GPU buffers are allocated from the highmem region by default._\ +Only affects 32bit systems and devices with DMA_BIT_MASK equal to 32._\ +)) + +$(eval $(call TunableKernelConfigC,PVR_PMR_TRANSLATE_UMA_ADDRESSES,,\ +Requests for physical addresses from the PMR will translate the addresses_\ +retrieved from the PMR-factory from CpuPAddrToDevPAddr. This can be used_\ +for systems where the GPU has a different view onto the system memory_\ +compared to the CPU._\ +)) + +$(eval $(call TunableBothConfigC,PVR_MMAP_USE_VM_INSERT,,\ +If enabled Linux will always use vm_insert_page for CPU mappings._\ +vm_insert_page was found to be slower than remap_pfn_range on ARM kernels_\ +but guarantees full memory accounting for the process that mapped the memory._\ +The slowdown in vm_insert_page is caused by a dcache flush_\ +that is only implemented for ARM and a few other architectures._\ +This tunable can be enabled to debug memory issues. On x86 platforms_\ +we always use vm_insert_page independent of this tunable._\ +)) + +$(eval $(call TunableBothConfigC,PVR_DIRTY_BYTES_FLUSH_THRESHOLD,524288,\ +When allocating uncached or write-combine memory we need to invalidate the_\ +CPU cache before we can use the acquired pages; also when using cached memory_\ +we need to clean/flush the CPU cache before we transfer ownership of the_\ +memory to the device. This threshold defines at which number of pages expressed_\ +in bytes we want to do a full cache flush instead of invalidating pages one by one._\ +Default value is 524288 bytes or 128 pages; ideal value depends on SoC cache size._\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD,256,\ +Allocate OS pages in 2^(order) chunks if more than this threshold were requested_\ +)) + +PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER ?= 2 +$(eval $(call TunableBothConfigC,PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM,$(PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER),\ +Allocate OS pages in 2^(order) chunks to help reduce duration of large allocations_\ +)) + +$(eval $(call TunableBothConfigC,PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD,16384,\ +Choose the threshold at which allocation size the driver uses vmalloc instead of_\ +kmalloc. On highly fragmented systems large kmallocs can fail because it requests_\ +physically contiguous pages. All allocations bigger than this define use vmalloc._\ +)) + +$(eval $(call TunableBothConfigMake,SUPPORT_WRAP_EXTMEM,)) +$(eval $(call TunableBothConfigC,SUPPORT_WRAP_EXTMEM,,\ +This enables support for the Services API function PVRSRVWrapExtMem()_\ +which takes a CPU virtual address with size and imports the physical memory_\ +behind the CPU virtual addresses into Services for use with the GPU. It_\ +returns a memory descriptor that can be used with the usual services_\ +interfaces. On Linux the preferred method to import memory into the driver_\ +is to use the DMABuf API._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE,,\ +Setting this option enables the write attribute for all the device mappings acquired_\ +through the PVRSRVWrapExtMem interface. Otherwise the option is disabled by default._\ +)) + +ifeq ($(PDUMP),1) +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_INIT_STREAM_SIZE,0x800000,\ +Default size of pdump param init buffer is 8MB)) +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_MAIN_STREAM_SIZE,0x1000000,\ +Default size of PDump param main buffer is 16 MB)) +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_DEINIT_STREAM_SIZE,0x10000,\ +Default size of PDump param deinit buffer is 64KB)) +# Default size of PDump param block buffer is 0KB as it is currently not in use +$(eval $(call TunableKernelConfigC,PDUMP_PARAM_BLOCK_STREAM_SIZE,0x0,\ +Default size of PDump param block buffer is 0KB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_INIT_STREAM_SIZE,0x800000,\ +Default size of PDump script init buffer is 8MB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_MAIN_STREAM_SIZE,0x800000,\ +Default size of PDump script main buffer is 8MB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_DEINIT_STREAM_SIZE,0x10000,\ +Default size of PDump script deinit buffer is 64KB)) +$(eval $(call TunableKernelConfigC,PDUMP_SCRIPT_BLOCK_STREAM_SIZE,0x800000,\ +Default size of PDump script block buffer is 8MB)) +$(eval $(call TunableKernelConfigC,PDUMP_SPLIT_64BIT_REGISTER_ACCESS,,\ + Split 64 bit RGX register accesses into two 32 bit)) +endif + + +# Fence Sync build tunables +# Default values dependent on WINDOW_SYSTEM and found in window_system.mk +# +$(eval $(call TunableBothConfigMake,SUPPORT_NATIVE_FENCE_SYNC,$(SUPPORT_NATIVE_FENCE_SYNC))) +$(eval $(call TunableBothConfigC,SUPPORT_NATIVE_FENCE_SYNC,,\ +Use the Linux native fence sync back-end with timelines and fences)) + +$(eval $(call TunableBothConfigMake,SUPPORT_FALLBACK_FENCE_SYNC,)) +$(eval $(call TunableBothConfigC,SUPPORT_FALLBACK_FENCE_SYNC,,\ +Use Services OS agnostic fallback fence sync back-end with timelines and fences)) + +$(eval $(call TunableBothConfigC,PVRSRV_STALLED_CCB_ACTION,1,\ +This determines behaviour of DDK on detecting that a cCCB_\ +has stalled (failed to progress for a number of seconds when GPU is idle):_\ + "" = Output warning message to kernel log only_\ + "1" = Output warning message and additionally try to unblock cCCB by_\ + erroring sync checkpoints on which it is fenced (the value of any_\ + sync prims in the fenced will remain unmodified)_\ +)) + +# Fallback and native sync implementations are mutually exclusive because they +# both offer an implementation for the same interface +ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1) +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +$(error Choose either SUPPORT_NATIVE_FENCE_SYNC=1 or SUPPORT_FALLBACK_FENCE_SYNC=1 but not both) +endif +endif + +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +PVR_USE_LEGACY_SYNC_H ?= 1 + +endif + +$(eval $(call TunableBothConfigC,PVRSRV_SYNC_CHECKPOINT_CCB,,\ +Enabling this feature enables use of the sync checkpoint CCB._\ +)) + +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +ifneq ($(KERNEL_VERSION),) +ifeq ($(CHROMIUMOS_KERNEL),1) +KERNEL_COMPATIBLE_WITH_OLD_ANS := $(shell ( [ $(KERNEL_VERSION) -lt 4 ] || \ +[ $(KERNEL_VERSION) -eq 4 -a $(KERNEL_PATCHLEVEL) -lt 4 ] ) && echo 1 || echo 0) +else +KERNEL_COMPATIBLE_WITH_OLD_ANS := $(shell ( [ $(KERNEL_VERSION) -lt 4 ] || \ +[ $(KERNEL_VERSION) -eq 4 -a $(KERNEL_PATCHLEVEL) -lt 6 ] ) && echo 1 || echo 0) +endif +ifneq ($(KERNEL_COMPATIBLE_WITH_OLD_ANS),1) +# DMA fence objects are only supported when using checkpoints +override SUPPORT_DMA_FENCE := 1 +endif +KERNEL_COMPATIBLE_WITH_OLD_ANS := +endif +endif + +# This value is needed by ta/3d kick for early command size calculation. +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +ifeq ($(SUPPORT_DMA_FENCE),) +$(eval $(call KernelConfigC,UPDATE_FENCE_CHECKPOINT_COUNT,2)) +else +$(eval $(call KernelConfigC,UPDATE_FENCE_CHECKPOINT_COUNT,1)) +endif +else +$(eval $(call KernelConfigC,UPDATE_FENCE_CHECKPOINT_COUNT,1)) +endif + +$(eval $(call TunableKernelConfigMake,SUPPORT_DMA_FENCE,)) + +$(eval $(call BothConfigC,PVR_DRM_NAME,"\"pvr\"")) + + + +$(eval $(call TunableKernelConfigC,PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS,,\ +If enabled$(comma) all kernel mappings will use vmap/vunmap._\ +vmap/vunmap is slower than vm_map_ram/vm_unmap_ram and can_\ +even have bad peaks taking up to 100x longer than vm_map_ram._\ +The disadvantage of vm_map_ram is that it can lead to vmalloc space_\ +fragmentation that can lead to vmalloc space exhaustion on 32 bit Linux systems._\ +This flag only affects 64 bit Linux builds$(comma) on 32 bit we always default_\ +to use vmap because of the described fragmentation problem._\ +)) + +$(eval $(call TunableBothConfigC,DEVICE_MEMSETCPY_ALIGN_IN_BYTES,16,\ +Sets pointer alignment (in bytes) needed by PVRSRVDeviceMemSet/Copy_\ +for arm64 arch._\ +This value should reflect memory bus width e.g. if the bus is 64 bits_\ +wide this value should be set to 8 bytes (though it's not a hard requirement)._\ +)) + + +$(eval $(call TunableKernelConfigC,PVRSRV_DEBUG_LISR_EXECUTION,,\ +Collect information about the last execution of the LISR in order to_\ +debug interrupt handling timeouts._\ +)) + +$(eval $(call TunableKernelConfigC,PVRSRV_TIMER_CORRELATION_HISTORY,,\ +Collect information about timer correlation data over time._\ +)) + +$(eval $(call TunableKernelConfigC,DISABLE_GPU_FREQUENCY_CALIBRATION,,\ +Disable software estimation of the GPU frequency done on the Host and used_\ +for timer correlation._\ +)) + +$(eval $(call TunableKernelConfigC,RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS,0,\ +Period (in ms) for which any Sync Lockup Recovery (SLR) behaviour should be_\ +suppressed following driver load. This can help to avoid any attempted SLR_\ +during the boot process._\ +)) + +# Set default CCB sizes +# Key for log2 CCB sizes: +# 13=8K 14=16K 15=32K 16=64K 17=128K +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D,14,\ +Define the log2 size of the TQ3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D,14,\ +Define the log2 size of the TQ2D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM,13,\ +Define the log2 size of the CDM client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA,15,\ +Define the log2 size of the TA client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D,16,\ +Define the log2 size of the 3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC,13,\ +Define the log2 size of the KickSync client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM,14,\ +Define the log2 size of the TDM client CCB._\ +)) + +# Max sizes (used in CCB grow feature) +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D,17,\ +Define the log2 max size of the TQ3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D,17,\ +Define the log2 max size of the TQ2D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM,15,\ +Define the log2 max size of the CDM client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA,16,\ +Define the log2 max size of the TA client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D,17,\ +Define the log2 max size of the 3D client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC,13,\ +Define the log2 max size of the KickSync client CCB._\ +)) + +$(eval $(call TunableBothConfigC,PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM,17,\ +Define the log2 max size of the TDM client CCB._\ +)) + +endif # INTERNAL_CLOBBER_ONLY + +export INTERNAL_CLOBBER_ONLY +export TOP +export OUT +export PVR_ARCH + +MAKE_ETC := -Rr --no-print-directory -C $(TOP) \ + TOP=$(TOP) OUT=$(OUT) HWDEFS_DIR=$(HWDEFS_DIR) \ + -f build/linux/toplevel.mk + +# This must match the default value of MAKECMDGOALS below, and the default +# goal in toplevel.mk +.DEFAULT_GOAL := build + +ifeq ($(MAKECMDGOALS),) +MAKECMDGOALS := build +else +# We can't pass autogen to toplevel.mk +MAKECMDGOALS := $(filter-out autogen,$(MAKECMDGOALS)) +endif + +.PHONY: autogen +autogen: +ifeq ($(INTERNAL_CLOBBER_ONLY),) + @$(MAKE) -s --no-print-directory -C $(TOP) \ + -f build/linux/prepare_tree.mk +else + @: +endif + +include ../config/help.mk + +# This deletes built-in suffix rules. Otherwise the submake isn't run when +# saying e.g. "make thingy.a" +.SUFFIXES: + +# Because we have a match-anything rule below, we'll run the main build when +# we're actually trying to remake various makefiles after they're read in. +# These rules try to prevent that +%.mk: ; +Makefile%: ; +Makefile: ; + +tags: + cd $(TOP) ; \ + ctags \ + --recurse=yes \ + --exclude=binary_* \ + --exclude=caches \ + --exclude=docs \ + --exclude=external \ + --languages=C,C++ + +.PHONY: build kbuild install +build kbuild install: MAKEOVERRIDES := +build kbuild install: autogen + @$(if $(MAKECMDGOALS),$(MAKE) $(MAKE_ETC) $(MAKECMDGOALS) $(eval MAKECMDGOALS :=),:) + +%: MAKEOVERRIDES := +%: autogen + @$(if $(MAKECMDGOALS),$(MAKE) $(MAKE_ETC) $(MAKECMDGOALS) $(eval MAKECMDGOALS :=),:) diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/default_window_system.mk b/drivers/mcst/gpu-imgtec/build/linux/config/default_window_system.mk new file mode 100644 index 000000000000..f49f57353c00 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/default_window_system.mk @@ -0,0 +1,43 @@ +########################################################################### ### +#@File +#@Title Set the default window system to Wayland +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +WINDOW_SYSTEM ?= wayland diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/help.mk b/drivers/mcst/gpu-imgtec/build/linux/config/help.mk new file mode 100644 index 000000000000..999a62a9fc07 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/help.mk @@ -0,0 +1,74 @@ +########################################################################### ### +#@File +#@Title Targets for printing config option help +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +define newline + + +endef +empty := + +define abbrev-option-value +$(if $(word 6,$(1)),$(wordlist 1,5,$(1))...,$(1)) +endef + +define print-option-help +# Print the option name and value +$(info $(1) ($(if $($(1)),$(call abbrev-option-value,$($(1))),), default $(if $(INTERNAL_CONFIG_DEFAULT_FOR_$(1)),$(call abbrev-option-value,$(INTERNAL_CONFIG_DEFAULT_FOR_$(1))),))$(if $(INTERNAL_DESCRIPTION_FOR_$(1)),:,)) +# Ensure the config help text ends with a newline +$(and $(INTERNAL_DESCRIPTION_FOR_$(1)),$(if $(filter %_,$(word $(words $(INTERNAL_DESCRIPTION_FOR_$(1))),$(INTERNAL_DESCRIPTION_FOR_$(1)))),,$(eval INTERNAL_DESCRIPTION_FOR_$(1) := $(INTERNAL_DESCRIPTION_FOR_$(1))_ ))) +# Print the config help text +$(info $(empty) $(subst _ ,$(newline) ,$(INTERNAL_DESCRIPTION_FOR_$(1)))) +endef + +.PHONY: confighelp allconfighelp +# Show only the config options that have help text +confighelp: + @: $(foreach _o,$(sort $(ALL_TUNABLE_OPTIONS)),$(if $(INTERNAL_DESCRIPTION_FOR_$(_o)),$(call print-option-help,$(_o)),)) +# Show all the config options +allconfighelp: + @: $(foreach _o,$(sort $(ALL_TUNABLE_OPTIONS)),$(call print-option-help,$(_o))) + + +ifneq ($(filter confighelp-%,$(MAKECMDGOALS)),) +confighelp-%: + @: $(if $(filter $*,$(ALL_TUNABLE_OPTIONS)),$(call print-option-help,$*),$(info $* is not a tunable config option)) +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/kernel-defs.mk b/drivers/mcst/gpu-imgtec/build/linux/config/kernel-defs.mk new file mode 100644 index 000000000000..cfdfea0465a4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/kernel-defs.mk @@ -0,0 +1,143 @@ +############################################################################ ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ############################################################################ + +# The purpose of this file is to keep KernelConfig macros isolated from the +# BothConfig and UserConfig macros. This allows this file to be removed from +# the user mode package and, thus, avoids the kernel config files being +# generated when building from this package. +# + +# Write out a kernel GNU make option. +# +define KernelConfigMake +$$(shell echo "override $(1) $(if $(2),:= $(strip $(2)),:=)" >>$(CONFIG_KERNEL_MK).new) +$(if $(filter config,$(D)),$(info KernelConfigMake $(1) := $(2) # $(if $($(1)),$(origin $(1)),default))) +endef + +# Conditionally write out a kernel GNU make option +# +define _TunableKernelConfigMake +ifneq ($$($(1)),) +ifneq ($$($(1)),0) +$$(eval $$(call KernelConfigMake,$(1),$$($(1)))) +else +unexport $(1) +endif +else +ifneq ($(2),) +$$(eval $$(call KernelConfigMake,$(1),$(2))) +else +unexport $(1) +endif +endif +endef + +define TunableKernelConfigMake +$$(eval $$(call _TunableKernelConfigMake,$(1),$(2))) +$(call RegisterOptionHelp,$(1),$(2),$(3),$(4)) +endef + +# Write out a kernel-only option +# +define KernelConfigC +$$(shell echo "#define $(if $(2),$(1) $(2),$(1))" >>$(CONFIG_KERNEL_H).new) +$(if $(filter config,$(D)),$(info KernelConfigC #define $(1) $(2) /* $(if $($(1)),$(origin $(1)),default) */),) +endef + +# Write out kernel-only AppHint defaults as specified +# +define AppHintConfigC +ifneq ($$($(1)),) +$$(eval $$(call KernelConfigC,$(1),$$($(1)))) +else +$$(eval $$(call KernelConfigC,$(1),$(2))) +endif +$(call RegisterOptionHelp,$(1),$(2),$(3),$(4)) +endef + +# Write out kernel-only AppHint +# Converts user supplied value to a set of ORed flags or defaults as specified +# +define AppHintFlagsConfigC +ifneq ($$($(1)),) +$$(eval $$(call ValidateValues,$(1),$(4))) +_$(1)_FLAGS := $$(subst $$(comma),$$(space),$$($(1))) +_$(1)_FLAGS := $$(strip $$(foreach group,$$(_$(1)_FLAGS),$(3)$$(group))) +_$(1)_FLAGS := $$(subst $$(space), | ,$$(_$(1)_FLAGS)) +$$(eval $$(call KernelConfigC,$(1),$$(_$(1)_FLAGS))) +else +$$(eval $$(call KernelConfigC,$(1),$(2))) +endif +$(call RegisterOptionHelp,$(1),$(2),$(5)) +endef + +# Conditionally write out a kernel-only option +# +define _TunableKernelConfigC +ifneq ($$($(1)),) +ifneq ($$($(1)),0) +ifeq ($$($(1)),1) +$$(eval $$(call KernelConfigC,$(1),)) +else +$$(eval $$(call KernelConfigC,$(1),$$($(1)))) +endif +endif +else +ifneq ($(2),) +ifeq ($(2),1) +$$(eval $$(call KernelConfigC,$(1),)) +else +$$(eval $$(call KernelConfigC,$(1),$(2))) +endif +endif +endif +endef + +define TunableKernelConfigC +$$(eval $$(call _TunableKernelConfigC,$(1),$(2))) +$(call RegisterOptionHelp,$(1),$(2),$(3),$(4)) +endef + +# delete any previous intermediary files +$(shell \ + for file in $(CONFIG_KERNEL_H).new $(CONFIG_KERNEL_MK).new ; do \ + rm -f $$file; \ + done) diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/kernel_version.mk b/drivers/mcst/gpu-imgtec/build/linux/config/kernel_version.mk new file mode 100644 index 000000000000..8a2fd5480b19 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/kernel_version.mk @@ -0,0 +1,103 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +$(if $(KERNELDIR),,$(error KERNELDIR must be set to obtain a version)) + +override KERNEL_MAKEFILE := \ + $(shell realpath $(KERNELDIR)/source/Makefile 2> /dev/null || echo $(KERNELDIR)/Makefile) + +override KERNEL_VERSION := \ + $(shell grep "^VERSION = " $(KERNEL_MAKEFILE) | cut -f3 -d' ') +override KERNEL_PATCHLEVEL := \ + $(shell grep "^PATCHLEVEL = " $(KERNEL_MAKEFILE) | cut -f3 -d' ') +override KERNEL_SUBLEVEL := \ + $(shell grep "^SUBLEVEL = " $(KERNEL_MAKEFILE) | cut -f3 -d' ') +override KERNEL_EXTRAVERSION := \ + $(shell grep "^EXTRAVERSION = " $(KERNEL_MAKEFILE) | cut -f3 -d' ') + +# Break the kernel version up into a space separated list +kernel_version_as_list := $(KERNEL_VERSION) \ + $(KERNEL_PATCHLEVEL) \ + $(KERNEL_SUBLEVEL) \ + $(patsubst .%,%,$(KERNEL_EXTRAVERSION)) + +# The base ID doesn't have to be accurate; we only use it for +# feature checks which will not care about extraversion bits +# +override KERNEL_BASE_ID := \ + $(KERNEL_VERSION).$(KERNEL_PATCHLEVEL).$(KERNEL_SUBLEVEL) + +# Try to get the kernel ID from the kernel.release file. +# +KERNEL_ID ?= \ + $(shell cat $(KERNELDIR)/include/config/kernel.release 2>/dev/null) + +# If the kernel ID isn't set yet, try to set it from the UTS_RELEASE +# macro. +# +ifeq ($(strip $(KERNEL_ID)),) +KERNEL_ID := \ + $(shell grep -h '\#define UTS_RELEASE' \ + $(KERNELDIR)/include/linux/* | cut -f3 -d' ' | sed s/\"//g) +endif + +ifeq ($(strip $(KERNEL_ID)),) +KERNEL_ID := \ + $(KERNEL_VERSION).$(KERNEL_PATCHLEVEL).$(KERNEL_SUBLEVEL)$(KERNEL_EXTRAVERSION) +endif + +# Return 1 if the kernel version is at least the value passed to the +# function, else return nothing. +# Examples +# $(call kernel-version-at-least,2,6,35) +# $(call kernel-version-at-least,2,6,35,7) +# +define kernel-version-at-least +$(shell set -- $(kernel_version_as_list) 0 0 0 0; \ + Y=true; \ + for D in $1 $2 $3 $4; \ + do \ + [ $$1 ] || break; \ + [ $$1 -eq $$D ] && { shift; continue; };\ + [ $$1 -lt $$D ] && Y=; \ + break; \ + done; \ + echo $$Y) +endef diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/preconfig.mk b/drivers/mcst/gpu-imgtec/build/linux/config/preconfig.mk new file mode 100644 index 000000000000..ba5b534ba0be --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/preconfig.mk @@ -0,0 +1,387 @@ +########################################################################### ### +#@File +#@Title Set up configuration required by build-directory Makefiles +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# NOTE: Don't put anything in this file that isn't strictly required +# by the build-directory Makefiles. It should go in core.mk otherwise. + +TOP ?= $(abspath ../../..) + +# Some miscellaneous things to make comma substitutions easier. +apos := '#' +comma := , +empty := +space := $(empty) $(empty) +define newline + + +endef + +ifneq ($(words $(TOP)),1) +$(warning This source tree is located in a path which contains whitespace,) +$(warning which is not supported.) +$(warning ) +$(warning $(space)The root is: $(TOP)) +$(warning ) +$(error Whitespace found in $$(TOP)) +endif + +$(call directory-must-exist,$(TOP)) + +define ValidateValues +_supported_values := $(2) +_values := $$(subst $$(comma),$$(space),$$($(1))) +_unrecognised_values := $$(strip $$(filter-out $$(_supported_values),$$(_values))) +ifneq ($$(_unrecognised_values),) +$$(warning *** Unrecognised value(s): $$(_unrecognised_values)) +$$(warning *** $(1) was set via: $(origin $(1))) +$$(error Supported values are: $$(_supported_values)) +endif +endef + +ifeq ($(SUPPORT_NEUTRINO_PLATFORM),1) +include ../common/neutrino/preconfig_neutrino.mk +_CC := $(CC) +else + +CC_CHECK := ../tools/cc-check.sh +CHMOD := chmod + +PVR_BUILD_DIR := $(notdir $(abspath .)) +ifneq ($(PVR_BUILD_DIR),$(patsubst %_android,%,$(PVR_BUILD_DIR))) # Android build + include ../common/android/platform_version.mk + ifneq ($(USE_CLANG),0) + prefer_clang := true + else + $(info WARNING: USE_CLANG=0 is deprecated for Android builds) + endif +else + ifeq ($(USE_CLANG),1) + prefer_clang := true + endif +endif + +CROSS_TRIPLE := $(patsubst %-,%,$(notdir $(CROSS_COMPILE))) + +define clangify + ifneq ($$(strip $$(CROSS_TRIPLE)),) + _$(1) := $$($(1)) -target $$(patsubst %-,%,$$(CROSS_TRIPLE)) -Qunused-arguments + else + _$(1) := $$($(1)) -Qunused-arguments + endif +endef + +# GNU Make has builtin values for CC/CXX which we don't want to trust. This +# is because $(CROSS_COMPILE)$(CC) doesn't always expand to a cross compiler +# toolchain binary name (e.g. most toolchains have 'gcc' but not 'cc'). + +CLANG ?= clang +ifeq ($(origin CC),default) + ifeq ($(prefer_clang),true) + export CC := $(CLANG) + _CLANG := true + $(eval $(call clangify,CC)) + else + CC := gcc + _CC := $(CROSS_COMPILE)gcc + endif +else + _CLANG := $(shell $(CC_CHECK) --clang --cc "$(CC)") + ifeq ($(_CLANG),true) + $(eval $(call clangify,CC)) + else + _CC := $(CC) + endif +endif + +CLANG_CXX ?= clang++ +ifeq ($(origin CXX),default) + ifeq ($(prefer_clang),true) + export CXX := $(CLANG_CXX) + else + CXX := g++ + endif +endif + +CC_SECONDARY ?= $(CC) +CXX_SECONDARY ?= $(CXX) +ifeq ($(prefer_clang),true) + export CC_SECONDARY + export CXX_SECONDARY +endif + +ifeq ($(prefer_clang),true) + ifeq ($(HOST_CC),) + export HOST_CC := /usr/bin/clang + endif + ifeq ($(HOST_CXX),) + export HOST_CXX := /usr/bin/clang++ + endif +else + HOST_CC ?= gcc +endif + +# Work out if we are targeting ARM before we start tweaking _CC. +TARGETING_AARCH64 := $(shell \ + $(_CC) -dM -E - /dev/null 2>&1 && echo 1) + +TARGETING_MIPS := $(shell \ + $(_CC) -dM -E - /dev/null 2>&1 && echo 1) + +HOST_CC_IS_LINUX := $(shell \ + $(HOST_CC) -dM -E - /dev/null 2>&1 && echo 1) + +ifneq ($(strip $(KERNELDIR)),) +include ../config/kernel_version.mk +endif + +# The user didn't set CROSS_COMPILE. There's probably nothing wrong +# with that, but we'll let them know anyway. +# +ifeq ($(origin CROSS_COMPILE), undefined) +$(warning CROSS_COMPILE is not set. Target components will be built with the host compiler) +endif + +endif # Neutrino + +define calculate-os + ifeq ($(2),qcc) + $(1)_OS := neutrino + else + compiler_dumpmachine := $$(subst --,-,$$(shell $(2) -dumpmachine)) + ifeq ($$(compiler_dumpmachine),) + $$(warning No output from '$(2) -dumpmachine') + $$(warning Check that the compiler is in your PATH and CROSS_COMPILE is) + $$(warning set correctly.) + $$(error Unable to run compiler '$(2)') + endif + + triplet_word_list := $$(subst -, ,$$(compiler_dumpmachine)) + triplet_word_list_length := $$(words $$(triplet_word_list)) + ifeq ($$(triplet_word_list_length),4) + triplet_vendor := $$(word 2,$$(triplet_word_list)) + triplet_os_list := $$(wordlist 3,$$(triplet_word_list_length),$$(triplet_word_list)) + else ifeq ($$(triplet_word_list_length),3) + triplet_vendor := unknown + triplet_os_list := $$(wordlist 2,$$(triplet_word_list_length),$$(triplet_word_list)) + else + $$(error Unsupported compiler: $(2)) + endif + + triplet_os := $$(subst $$(space),-,$$(triplet_os_list)) + ifeq ($$(triplet_os),linux-android) + $(1)_OS := android + else ifeq ($$(triplet_os),poky-linux) + $(1)_OS := poky + else ifeq ($$(triplet_os),w64-mingw32) + $(1)_OS := windows + else ifneq ($$(findstring linux-gnu,$$(triplet_os)),) + ifneq ($$(filter buildroot cros tizen,$$(triplet_vendor)),) + $(1)_OS := $$(triplet_vendor) + else ifneq ($$(filter none pc unknown,$$(triplet_vendor)),) + $(1)_OS := linux + else + $$(warning Unsupported compiler vendor: $$(triplet_vendor)) + $$(warning Assuming $(1) is a standard Linux distro) + $(1)_OS := linux + endif + else + $$(warning Could not determine $(1)_OS so assuming Linux) + $(1)_OS := linux + endif + endif +endef + +$(eval $(call calculate-os,HOST,$(HOST_CC))) + +ifeq ($(PVR_BUILD_DIR),integrity) + TARGET_OS := integrity +else ifneq ($(PVR_BUILD_DIR),$(patsubst %_arc,%,$(PVR_BUILD_DIR))) + TARGET_OS := arc +else + $(eval $(call calculate-os,TARGET,$(_CC))) +endif + +define is-host-os +$(if $(HOST_OS),$(if $(filter $(1),$(HOST_OS)),true),$(error HOST_OS not set)) +endef + +define is-not-host-os +$(if $(HOST_OS),$(if $(filter-out $(1),$(HOST_OS)),true),$(error HOST_OS not set)) +endef + +define is-target-os +$(if $(TARGET_OS),$(if $(filter $(1),$(TARGET_OS)),true),$(error TARGET_OS not set)) +endef + +define is-not-target-os +$(if $(TARGET_OS),$(if $(filter-out $(1),$(TARGET_OS)),true),$(error TARGET_OS not set)) +endef + +ifeq ($(call is-target-os,buildroot),true) + SYSROOT ?= $(shell $(_CC) -print-sysroot) +else ifeq ($(call is-target-os,poky),true) + _OPTIM := $(lastword $(filter -O%,$(CFLAGS))) + ifneq ($(_OPTIM),) + OPTIM ?= $(_OPTIM) + + # Filter out any -O flags in case a platform Makefile makes use of these + # variables + override CFLAGS := $(filter-out -O%,$(CFLAGS)) + override CXXFLAGS := $(filter-out -O%,$(CXXFLAGS)) + endif +endif + +# The user is trying to set one of the old SUPPORT_ options on the +# command line or in the environment. This isn't supported any more +# and will often break the build. The user is generally only trying +# to remove a component from the list of targets to build, so we'll +# point them at the new way of doing this. +define sanity-check-support-option-origin +ifeq ($$(filter undefined file,$$(origin $(1))),) +$$(warning *** Setting $(1) via $$(origin $(1)) is deprecated) +$$(error If you are trying to disable a component, use e.g. EXCLUDED_APIS="opengles1 opengl") +endif +endef +$(foreach _o,SYS_CFLAGS SYS_CXXFLAGS SYS_INCLUDES SYS_EXE_LDFLAGS SYS_LIB_LDFLAGS,$(eval $(call sanity-check-support-option-origin,$(_o)))) + +# Check for words in EXCLUDED_APIS that aren't understood by the +# common/apis/*.mk files. This should be kept in sync with all the tests on +# EXCLUDED_APIS in those files +_excludable_apis := camerahal cldnn imgdnn nnhal composerhal hwperftools memtrackhal opencl opengl opengles1 opengles3 rogue2d scripts sensorhal servicestools thermalhal unittests vulkan +_excluded_apis := $(subst $(comma),$(space),$(EXCLUDED_APIS)) + +_unrecognised := $(strip $(filter-out $(_excludable_apis),$(_excluded_apis))) +ifneq ($(_unrecognised),) +$(warning *** Ignoring unrecognised entries in EXCLUDED_APIS: $(_unrecognised)) +$(warning *** EXCLUDED_APIS was set via $(origin EXCLUDED_APIS) to: $(EXCLUDED_APIS)) +$(warning *** Excludable APIs are: $(_excludable_apis)) +endif + +override EXCLUDED_APIS := $(filter $(_excludable_apis), $(_excluded_apis)) + +# Some targets don't need information about any modules. If we only specify +# these targets on the make command line, set INTERNAL_CLOBBER_ONLY to +# indicate that toplevel.mk shouldn't read any makefiles +CLOBBER_ONLY_TARGETS := clean clobber help install +INTERNAL_CLOBBER_ONLY := +ifneq ($(strip $(MAKECMDGOALS)),) +INTERNAL_CLOBBER_ONLY := \ +$(if \ + $(strip $(foreach _cmdgoal,$(MAKECMDGOALS),\ + $(if $(filter $(_cmdgoal),$(CLOBBER_ONLY_TARGETS)),,x))),,true) +endif + +# No need for BVNC information for clobber-only build +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + +# If RGX_BVNC is not defined a valid PVR_ARCH has to be specified +ifeq ($(RGX_BVNC),) + ifneq ($(PVR_ARCH),) + _supported_pvr_archs := rogue volcanic + $(eval $(call ValidateValues,PVR_ARCH,$(_supported_pvr_archs))) + override HWDEFS_DIR := $(TOP)/hwdefs/$(PVR_ARCH) + endif +else + ifneq ($(PVR_ARCH),) + $(warning PVR_ARCH ($(PVR_ARCH)) is specified when RGX_BVNC ($(RGX_BVNC)) is also specified - ignoring PVR_ARCH) + endif +# Extract the BNC config name +RGX_BNC_SPLIT := $(subst .,$(space) ,$(RGX_BVNC)) +RGX_BNC := $(word 1,$(RGX_BNC_SPLIT)).V.$(word 3,$(RGX_BNC_SPLIT)).$(word 4,$(RGX_BNC_SPLIT)) + +HWDEFS_DIR_ROGUE := $(TOP)/hwdefs/rogue +HWDEFS_DIR_VOLCANIC := $(TOP)/hwdefs/volcanic + +ALL_KM_BVNCS_ROGUE := \ + $(patsubst rgxcore_km_%.h,%,\ + $(notdir $(shell ls -v $(HWDEFS_DIR_ROGUE)/km/cores/rgxcore_km_*.h 2> /dev/null))) +ALL_KM_BVNCS_VOLCANIC := \ + $(patsubst rgxcore_km_%.h,%,\ + $(notdir $(shell ls -v $(HWDEFS_DIR_VOLCANIC)/km/cores/rgxcore_km_*.h 2> /dev/null))) +ALL_KM_BVNCS := $(ALL_KM_BVNCS_ROGUE) $(ALL_KM_BVNCS_VOLCANIC) + +ifneq ($(filter $(RGX_BVNC),$(ALL_KM_BVNCS_ROGUE)),) + override PVR_ARCH := rogue + override HWDEFS_DIR := $(HWDEFS_DIR_ROGUE) +else ifneq ($(filter $(RGX_BVNC),$(ALL_KM_BVNCS_VOLCANIC)),) + override PVR_ARCH := volcanic + override HWDEFS_DIR := $(HWDEFS_DIR_VOLCANIC) +else + $(error Error: Invalid Kernel core RGX_BVNC=$(RGX_BVNC). \ + $(if $(ALL_KM_BVNCS_ROGUE),$(newline)$(newline)Valid Rogue Kernel core BVNCs:$(newline) \ + $(subst $(space),$(newline)$(space),$(ALL_KM_BVNCS_ROGUE))) \ + $(if $(ALL_KM_BVNCS_VOLCANIC),$(newline)$(newline)Valid Volcanic Kernel core BVNCs:$(newline) \ + $(subst $(space),$(newline)$(space),$(ALL_KM_BVNCS_VOLCANIC)))) +endif + +# Check if BVNC core file exist +RGX_BVNC_CORE_KM := $(HWDEFS_DIR)/km/cores/rgxcore_km_$(RGX_BVNC).h +RGX_BVNC_CORE_KM_HEADER := \"cores/rgxcore_km_$(RGX_BVNC).h\" +# "rgxcore_km_$(RGX_BVNC).h" +ifeq ($(wildcard $(RGX_BVNC_CORE_KM)),) +$(error The file $(RGX_BVNC_CORE_KM) does not exist. \ + Valid BVNCs: $(ALL_KM_BVNCS)) +endif + +# Check BNC config version +ALL_KM_BNCS := \ + $(patsubst rgxconfig_km_%.h,%,\ + $(notdir $(shell ls -v $(HWDEFS_DIR)/km/configs/rgxconfig_km_*.h))) +ifeq ($(filter $(RGX_BNC),$(ALL_KM_BNCS)),) +$(error Error: Invalid Kernel config RGX_BNC=$(RGX_BNC). \ + Valid Kernel config BNCs: $(subst $(space),$(comma)$(space),$(ALL_KM_BNCS))) +endif + +# Check if BNC config file exist +RGX_BNC_CONFIG_KM := $(HWDEFS_DIR)/km/configs/rgxconfig_km_$(RGX_BNC).h +RGX_BNC_CONFIG_KM_HEADER := \"configs/rgxconfig_km_$(RGX_BNC).h\" +#"rgxcore_km_$(RGX_BNC).h" +ifeq ($(wildcard $(RGX_BNC_CONFIG_KM)),) +$(error The file $(RGX_BNC_CONFIG_KM) does not exist. \ + Valid BNCs: $(ALL_KM_BNCS)) +endif +endif + +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/config/window_system.mk b/drivers/mcst/gpu-imgtec/build/linux/config/window_system.mk new file mode 100644 index 000000000000..e9f70d5ba711 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/config/window_system.mk @@ -0,0 +1,191 @@ +########################################################################### ### +#@File +#@Title Select a window system +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Set the default window system. +# If you want to override the default, create a default_window_system.mk file +# that sets WINDOW_SYSTEM appropriately. (There is a suitable example in +# ../config/default_window_system_xorg.mk) +-include ../config/default_window_system.mk + +ifeq ($(SUPPORT_NEUTRINO_PLATFORM),) +WINDOW_SYSTEM ?= ews +_supported_window_systems := ews lws-generic nulldrmws nullws surfaceless wayland xorg +else +WINDOW_SYSTEM ?= nullws +_supported_window_systems := nullws screen +endif + +_window_system_mk_path := ../common/window_systems +_window_systems := \ + $(sort $(patsubst $(_window_system_mk_path)/%.mk,%,$(wildcard $(_window_system_mk_path)/*.mk))) +_window_systems := $(filter $(_supported_window_systems),$(_window_systems)) + +_unrecognised_window_system := $(strip $(filter-out $(_window_systems),$(WINDOW_SYSTEM))) +ifneq ($(_unrecognised_window_system),) +$(warning *** Unrecognised WINDOW_SYSTEM: $(_unrecognised_window_system)) +$(warning *** WINDOW_SYSTEM was set via: $(origin WINDOW_SYSTEM)) +$(error Supported Window Systems are: $(_window_systems)) +endif + +# Use this to mark config options that are user-tunable for certain window +# systems but not others. Warn if an attempt is made to change it. +# +# $(1): config option +# $(2): window system(s) for which the config option is user-tunable +# +define WindowSystemTunableOption +$(if $(filter $(2),$(WINDOW_SYSTEM)),,\ + $(if $(filter command line environment,$(origin $(1))),\ + $(error Changing '$(1)' for '$(WINDOW_SYSTEM)' is not supported))) +endef + +$(call WindowSystemTunableOption,EGL_EXTENSION_ANDROID_NATIVE_FENCE_SYNC,) +$(call WindowSystemTunableOption,GBM_BACKEND,$(if $(MESA_EGL),,nulldrmws)) +$(call WindowSystemTunableOption,MESA_EGL,nulldrmws) +$(call WindowSystemTunableOption,OPK_DEFAULT,) +$(call WindowSystemTunableOption,OPK_FALLBACK,) +$(call WindowSystemTunableOption,SUPPORT_ACTIVE_FLUSH,\ + ews lws-generic nullws nulldrmws wayland screen) +$(call WindowSystemTunableOption,SUPPORT_DISPLAY_CLASS,ews nullws screen) +$(call WindowSystemTunableOption,SUPPORT_FALLBACK_FENCE_SYNC,) +$(call WindowSystemTunableOption,SUPPORT_INSECURE_EXPORT,ews) +$(call WindowSystemTunableOption,SUPPORT_KMS,ews) +$(call WindowSystemTunableOption,SUPPORT_NATIVE_FENCE_SYNC,\ + $(if $(SUPPORT_KMS),,ews) nullws) +$(call WindowSystemTunableOption,SUPPORT_SECURE_EXPORT,nullws ews) +$(call WindowSystemTunableOption,SUPPORT_VK_PLATFORMS,lws-generic) +$(call WindowSystemTunableOption,PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE,\ + $(if $(SUPPORT_KMS),,ews) nullws) + +ifeq ($(WINDOW_SYSTEM),xorg) + MESA_EGL := 1 + SUPPORT_VK_PLATFORMS := x11 + SUPPORT_DISPLAY_CLASS := 0 + SUPPORT_NATIVE_FENCE_SYNC := 1 + SUPPORT_KMS := 1 + override PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE := 0 + SUPPORT_ACTIVE_FLUSH := 1 +else ifeq ($(WINDOW_SYSTEM),wayland) + MESA_EGL := 1 + SUPPORT_VK_PLATFORMS := wayland + SUPPORT_DISPLAY_CLASS := 0 + SUPPORT_NATIVE_FENCE_SYNC := 1 + SUPPORT_KMS := 1 + override PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE := 0 +else ifeq ($(WINDOW_SYSTEM),surfaceless) + MESA_EGL := 1 + SUPPORT_ACTIVE_FLUSH := 1 + SUPPORT_DISPLAY_CLASS := 0 + SUPPORT_NATIVE_FENCE_SYNC := 1 + SUPPORT_KMS := 1 + override PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE := 0 +else ifeq ($(WINDOW_SYSTEM),lws-generic) + MESA_EGL := 1 + SUPPORT_DISPLAY_CLASS := 0 + SUPPORT_NATIVE_FENCE_SYNC := 1 + SUPPORT_KMS := 1 + override PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE := 0 +else ifeq ($(WINDOW_SYSTEM),ews) # Linux builds only + SUPPORT_VK_PLATFORMS := null + SUPPORT_SECURE_EXPORT ?= 1 + SUPPORT_DISPLAY_CLASS ?= 1 + OPK_DEFAULT := libpvrEWS_WSEGL.so + ifeq ($(SUPPORT_DISPLAY_CLASS),1) + PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE ?= 1 + OPK_FALLBACK := libpvrNULL_WSEGL.so + PVR_HANDLE_BACKEND ?= generic + ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),) # Set default if no override + SUPPORT_FALLBACK_FENCE_SYNC := 1 + endif + else ifeq ($(SUPPORT_KMS),1) + override PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE := 0 + OPK_FALLBACK := libpvrNULLDRM_WSEGL.so + SUPPORT_NATIVE_FENCE_SYNC := 1 + else + $(error either SUPPORT_DISPLAY_CLASS or SUPPORT_KMS must be enabled) + endif + ifeq ($(SUPPORT_NATIVE_FENCE_SYNC)$(SUPPORT_FALLBACK_FENCE_SYNC),1) + EGL_EXTENSION_ANDROID_NATIVE_FENCE_SYNC := 1 + endif +else ifeq ($(WINDOW_SYSTEM),nullws) # Linux and Neutrino builds + PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE ?= 1 + OPK_DEFAULT := libpvrNULL_WSEGL.so + OPK_FALLBACK := libpvrNULL_WSEGL.so + PVR_HANDLE_BACKEND ?= generic + SUPPORT_VK_PLATFORMS := null + SUPPORT_DISPLAY_CLASS ?= 1 + ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),) # Set default if no override + SUPPORT_FALLBACK_FENCE_SYNC := 1 + endif +else ifeq ($(WINDOW_SYSTEM),nulldrmws) + ifeq ($(MESA_EGL),) + OPK_DEFAULT := libpvrNULLDRM_WSEGL.so + OPK_FALLBACK := libpvrNULLDRM_WSEGL.so + _supported_gbm_backends := dbm + GBM_BACKEND ?= dbm + endif + SUPPORT_VK_PLATFORMS := null + SUPPORT_DISPLAY_CLASS := 0 + SUPPORT_NATIVE_FENCE_SYNC := 1 + SUPPORT_KMS := 1 + override PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE := 0 +else ifeq ($(WINDOW_SYSTEM),screen) # Neutrino builds + OPK_DEFAULT := libpvrSCREEN_WSEGL.so + OPK_FALLBACK := libpvrSCREEN_WSEGL.so + PVR_HANDLE_BACKEND := generic + SUPPORT_VK_PLATFORMS := null + SUPPORT_DISPLAY_CLASS ?= 1 + SUPPORT_FALLBACK_FENCE_SYNC := 1 + PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE ?= 1 +endif + +ifeq ($(MESA_EGL),1) + EGL_BASENAME_SUFFIX := _PVR_MESA + SUPPORT_OPENGLES1_V1_ONLY := 1 + ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) + EGL_EXTENSION_ANDROID_NATIVE_FENCE_SYNC := 1 + endif +endif + +ifeq ($(SUPPORT_KMS),1) + SUPPORT_DRM_FBDEV_EMULATION ?= 1 +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/defs.mk b/drivers/mcst/gpu-imgtec/build/linux/defs.mk new file mode 100644 index 000000000000..24ab3ad95623 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/defs.mk @@ -0,0 +1,304 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +define must-be-defined +$(if $(filter undefined,$(origin $(1))),$(error In makefile $(THIS_MAKEFILE): $$($(1)) must be defined),) +endef + +define must-be-nonempty +$(if $(strip $($(1))),,$(error In makefile $(THIS_MAKEFILE): $$($(1)) must contain a value)) +endef + +define directory-must-exist +$(if $(wildcard $(abspath $(1)/)),,$(error Directory $(1) must exist)) +endef + +define one-word-only +$(if $(filter-out $(firstword $($(1))),$($(1))),$(error In makefile $(THIS_MAKEFILE): $$($(1)) must contain only one word),) +endef + +define module-library +$(patsubst lib%.so,%,$(if $($(1)_target),$($(1)_target),$(1).so)) +endef + +# This is done to allow module type makefiles to use $(THIS_MAKEFILE) +define register-module +INTERNAL_MAKEFILE_FOR_MODULE_$(1) := $(THIS_MAKEFILE) +endef + +define process-module-arch +MODULE_ARCH := $$(strip $(2)) +include $$(MAKE_TOP)/moduledefs_common.mk +include $$(MAKE_TOP)/moduledefs/$$(MODULE_ARCH).mk +include $$(MAKE_TOP)/moduledefs_libs.mk +ifneq ($$(wildcard $$(MAKE_TOP)/modules/$$(strip $$($$(THIS_MODULE)_type)).mk),) +include $$(MAKE_TOP)/modules/$$(strip $$($$(THIS_MODULE)_type)).mk +.SECONDARY: $$(MODULE_INTERMEDIATES_DIR) +$$(MODULE_INTERMEDIATES_DIR): + $$(make-directory) +MODULE_CLEAN_TARGETS += $$(MODULE_INTERMEDIATES_DIR) +INTERNAL_TARGETS_FOR_$(1) += $$(MODULE_TARGETS) +INTERNAL_CLEAN_TARGETS_FOR_$(1) += $$(MODULE_CLEAN_TARGETS) +INTERNAL_CLOBBER_TARGETS_FOR_$(1) += $$(MODULE_CLEAN_TARGETS) $$(MODULE_CLOBBER_TARGETS) $$(MODULE_TARGETS) +else +ALL_BAD_MODULES := $$(ALL_BAD_MODULES) $$(THIS_MODULE) +endif +endef + +# This list should be kept in alphabetical order. +# +target_neutral_types := \ + aidl \ + apk \ + binary_header \ + bison_parser \ + bitcode_library \ + bridge \ + copy_files \ + custom \ + dex \ + flex_lexer \ + flexxx_lexer \ + gen_dispatch \ + hidl \ + java_archive \ + module_group \ + opencl_signature_header \ + pds_header \ + preprocessed_file \ + rgxmetafw \ + rgxmipsfw \ + rgxriscvfw \ + rs_bitcode \ + rs_object \ + rscsha1_header \ + spv_header \ + test_image \ + usc_header \ + usc_uniflex_header \ + vk_layer \ + wayland_protocol_files + +doc_types := \ + doc \ + doxygen + +define calculate-arch-list +# Work out the target platforms for this module +MODULE_ARCH_LIST := $(2) +ifeq ($$(MODULE_ARCH_LIST),) +ifneq ($$(filter $(1),$(doc_types)),) +MODULE_ARCH_LIST := doc +else +ifneq ($$(filter $(1),$(target_neutral_types)),) +MODULE_ARCH_LIST := target_neutral +else +ifneq ($$(filter $(1),kernel_module),) +MODULE_ARCH_LIST := $(TARGET_PRIMARY_ARCH) +else +MODULE_ARCH_LIST := $(TARGET_ALL_ARCH) +endif +endif +endif +endif +endef + +define process-module +THIS_MODULE := $(1) +THIS_MAKEFILE := $(INTERNAL_MAKEFILE_FOR_MODULE_$(1)) +INTERNAL_TARGETS_FOR_$(1) := +INTERNAL_CLEAN_TARGETS_FOR_$(1) := +INTERNAL_CLOBBER_TARGETS_FOR_$(1) := +include $$(MAKE_TOP)/this_makefile.mk +$$(call must-be-nonempty,THIS_MAKEFILE) +$$(call must-be-nonempty,$(1)_type) +$$(eval $$(call calculate-arch-list,$$($(1)_type),$$($(1)_arch))) +INTERNAL_ARCH_LIST_FOR_$(1) := $$(MODULE_ARCH_LIST) +$$(foreach _m,$$(MODULE_ARCH_LIST),$$(eval $$(call process-module-arch,$(1),$$(_m)))) +endef + +# This can be used by module_type.mk files to indicate that they can't be +# built as host_module_type +define target-build-only +$(if $(filter true,$(MODULE_HOST_BUILD)),$(error In makefile $(THIS_MAKEFILE): Module $(THIS_MODULE) attempted to build a host $(1), which is not supported)) +endef + +define relative-to-top +$(patsubst $(TOP)/%,%,$(1)) +endef + +define cc-check +$(shell \ + CC_CHECK=$(patsubst @%,%,$(CC_CHECK)) && \ + if [ ! -x $$CC_CHECK ]; then $(patsubst @%,%,$(CHMOD)) +x $$CC_CHECK ; fi && \ + $$CC_CHECK --cc "$(1)" --out "$(2)" $(3)) +endef + +define cc-is-clang +$(call cc-check,$(patsubst @%,%,$(CC)),$(OUT),--clang) +endef + +define cc-option +$(call cc-check,$(patsubst @%,%,$(CC)),$(OUT),$(1)) +endef + +define cxx-option +$(call cc-check,$(patsubst @%,%,$(CXX)),$(OUT),$(1)) +endef + +define host-cc-is-clang +$(call cc-check,$(patsubst @%,%,$(HOST_CC)),$(OUT),--clang) +endef + +define host-cc-option +$(call cc-check,$(patsubst @%,%,$(HOST_CC)),$(OUT),$(1)) +endef + +define host-cxx-option +$(call cc-check,$(patsubst @%,%,$(HOST_CXX)),$(OUT),$(1)) +endef + +define kernel-cc-is-clang +$(call cc-check,$(if $(KERNEL_CC),$(KERNEL_CC),$(KERNEL_CROSS_COMPILE)gcc),$(OUT),--clang) +endef + +define kernel-cc-option +$(call cc-check,$(if $(KERNEL_CC),$(KERNEL_CC),$(KERNEL_CROSS_COMPILE)gcc),$(OUT),$(1)) +endef + +# Turn a particular warning on, or explicitly turn it off, depending on +# the value of W. The "-W" or "-Wno-" part of the warning need not be +# specified. +define cc-optional-warning +$(call cc-option,-W$(if $(W),,no-)$(patsubst -W%,%,$(patsubst -Wno-%,%,$(1)))) +endef + +define host-cc-optional-warning +$(call host-cc-option,-W$(if $(W),,no-)$(patsubst -W%,%,$(patsubst -Wno-%,%,$(1)))) +endef + +define kernel-cc-optional-warning +$(call kernel-cc-option,-W$(if $(W),,no-)$(patsubst -W%,%,$(patsubst -Wno-%,%,$(1)))) +endef + +define module-info-line +$(if $(filter modules,$(D)),$(info [$(THIS_MODULE)] <$(MODULE_ARCH)> $(1)),) +endef + +# $(call if-exists,A,B) => A if A is a file which exists, otherwise B +define if-exists +$(if $(wildcard $(1)),$(1),$(2)) +endef + +# +# Joins a given list of strings together with the given separator. +# +# (1): the list of strings to join +# (2): the separator to use for joining +# +NOOP= +SPACE=$(NOOP) $(NOOP) +define list-join +$(subst $(SPACE),$(2),$(strip $(1))) +endef + +# +# Check if a given path is absolute +# +# $(1): path to check +# $(2): return when true +# $(3): return when false +# +define if-abs-path +$(if $(filter /%,$(1)),$(2),$(3)) +endef + +# +# Add a prefix to every path in a list, when the path isn't absolute. +# +# $(1): prefix to add +# $(2): list of paths +# +define addprefix-ifnot-abs +$(foreach _path,$(2),$(call if-abs-path,$(_path),$(_path),$(1)$(_path))) +endef + +# +# Return the directory part of a path normalized (without trailing slashes) +# +# $(1): Path to normalize +# +define normalized-dir +$(if $(filter /,$(dir $(1))),$(dir $(1)),$(if $(findstring /,$(dir $(1))),$(patsubst %/,%,$(dir $(1))),$(dir $(1)))) +endef + +# Definitions to handle the various suffixes allowed for C++ files. +INTERNAL_CXX_SUFFIXES := .cc .cxx .cpp .c++ +define filter-cxx-files +$(filter $(addprefix %,$(INTERNAL_CXX_SUFFIXES)),$(1)) +endef + +define filter-out-cxx-files +$(filter-out $(addprefix %,$(INTERNAL_CXX_SUFFIXES)),$(1)) +endef + +define objects-from-cxx-files +$(foreach _suffix,$(INTERNAL_CXX_SUFFIXES),$(patsubst %$(_suffix),%.o,$(filter %$(_suffix),$(1)))) +endef + +define unsupported-module-var +$(if $(strip $($(THIS_MODULE)_$(1))),$(error In makefile $(THIS_MAKEFILE): Setting '$(THIS_MODULE)_$(1)' has no effect, because $(THIS_MODULE) has type $($(THIS_MODULE)_type))) +endef + +define hidl_headers +$(addprefix $(1)/$($(1)_intf_path)/,\ + $(foreach _i,$($(1)_intf_class),BnHw$(_i).h BpHw$(_i).h Bs$(_i).h IHw$(_i).h I$(_i).h) \ + $(foreach _i,$($(1)_intf_type),hw$(_i).h $(_i).h)) +endef + +define hidl_sources +$(addprefix $(GENERATED_CODE_OUT)/$(1)/$($(1)_intf_path)/,\ + $(foreach _i,$($(1)_intf_class),$(_i)All.cpp)) +endef + +define aidl_headers +$(addprefix $(1)/$($(1)_type)/$($(1)_intf_path)/, \ + $(foreach _i,$($(1)_intf_class),Bn$(_i).h Bp$(_i).h $(_i).h)) +endef diff --git a/drivers/mcst/gpu-imgtec/build/linux/e2c3_gpu/Makefile b/drivers/mcst/gpu-imgtec/build/linux/e2c3_gpu/Makefile new file mode 100644 index 000000000000..35933b9738ac --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/e2c3_gpu/Makefile @@ -0,0 +1,94 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Copyright Copyright (c) MCST +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +RGX_BVNC ?= 4.46.6.62 + +include ../config/preconfig.mk +include ../config/window_system.mk + +ifneq ($(SUPPORT_KMS),1) +$(error This platform only supports window systems that use the standard Linux driver model.) +endif + +PVR_SYSTEM := e2c3_gpu +NO_HARDWARE := 0 + +PVR_LDM_PLATFORM_PRE_REGISTERED := 1 + +# Enables PVR DVFS implementation to actively change frequency / voltage depending +# on current GPU load. Currently only supported on Linux. +SUPPORT_LINUX_DVFS ?= 0 + +KERNEL_COMPONENTS := srvkm e2c3_gpu + +ifeq ($(PVR_REMVIEW),1) + DISPLAY_CONTROLLER := drm_nulldisp + KERNEL_COMPONENTS += $(DISPLAY_CONTROLLER) + PVR_DRM_MODESET_DRIVER_NAME := nulldisp +else + PVR_DRM_MODESET_DRIVER_NAME := mga2 + PVR_DRM_MODESET_MODULE_NAME := mga2 +endif + +# Enable WA for power controllers that power up dusts by default. +# The Firmware powers down the dusts after booting +FIX_DUSTS_POW_ON_INIT := 1 + +# Always print fatal and error logs, especially in kernel mode. +PVRSRV_NEED_PVR_DPF := 1 + +RGXFW_POWER_EVENT_DELAY_TICKS := 10 + +# Should be last +include ../config/core.mk +include ../common/lws.mk + +$(eval $(call TunableBothConfigMake,KERNEL_DRIVER_DIR,,\ +The directory inside the Linux kernel tree (relative to KERNELDIR) where_\ +the PVR Services kernel driver files will live. If set$(comma) the PVR_LOADER and_\ +PVR_SYSTEM related files found in this directory will be used instead of_\ +the local copies._\ +)) + + +# Bug 120942:e2k:iommu: set PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM to 0: _AllocOSPage_CMA() can not handle more +# Look: r23427 | dima@mcst.ru +PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER := 0 diff --git a/drivers/mcst/gpu-imgtec/build/linux/fpga_linux/Makefile b/drivers/mcst/gpu-imgtec/build/linux/fpga_linux/Makefile new file mode 100644 index 000000000000..2147ea353617 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/fpga_linux/Makefile @@ -0,0 +1,44 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Inherit build config from tc_linux +include ../tc_linux/Makefile + +$(eval $(call TunableBothConfigC,FPGA,1)) diff --git a/drivers/mcst/gpu-imgtec/build/linux/kbuild/Makefile.template b/drivers/mcst/gpu-imgtec/build/linux/kbuild/Makefile.template new file mode 100644 index 000000000000..62af7bad824e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/kbuild/Makefile.template @@ -0,0 +1,90 @@ +########################################################################### ### +#@Title Root kernel makefile +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# This top-level kbuild makefile builds all the Linux kernel modules in the +# DDK. To run kbuild, this makefile is copied to $(TARGET_PRIMARY_OUT)/kbuild/Makefile +# and make is invoked in $(TARGET_PRIMARY_OUT)/kbuild. + +# This makefile doesn't define any kbuild special variables apart from +# ccflags-y and obj-m. The variables for objects are picked up by including +# the kbuild makefile fragments named in $(INTERNAL_KBUILD_MAKEFILES). The +# list of objects that these fragments make is collected in +# $(INTERNAL_KBUILD_OBJECTS) and $(INTERNAL_EXTRA_KBUILD_OBJECTS). These +# variables are set according to the build's $(KERNEL_COMPONENTS) and +# $(EXTRA_PVRSRVKM_COMPONENTS). To add a new kernel module to the build, edit +# these variables in the per-build Makefile. + +include $(OUT)/config_kernel.mk + +.SECONDARY: + +define symlink-source-file +@if [ ! -e $(dir $@) ]; then mkdir -p $(dir $@); fi +@if [ ! -h $@ ]; then ln -sf $< $@; fi +endef + +bridge_base := $(BRIDGE_SOURCE_ROOT) + +$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/%.c: $(TOP)/%.c + $(symlink-source-file) + +$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/generated/$(PVR_ARCH)/%.c: $(bridge_base)/%.c + $(symlink-source-file) + +$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/external/%.c: $(abspath $(srctree))/%.c + $(symlink-source-file) + +ccflags-y += -D__linux__ -include $(OUT)/config_kernel.h \ + -include kernel_config_compatibility.h \ + -I$(OUT)/include -I$(TOP)/kernel/drivers/staging/imgtec + +include $(INTERNAL_KBUILD_MAKEFILES) + +ifneq ($(KERNEL_DRIVER_DIR),) + ccflags-y += \ + -I$(abspath $(srctree))/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM) \ + -I$(abspath $(srctree))/$(KERNEL_DRIVER_DIR) +endif + +$(if $($(PVRSRVKM_NAME)-y),,$(warning $(PVRSRVKM_NAME)-y was empty, which could mean that srvkm is missing from $$(KERNEL_COMPONENTS))) +$(PVRSRVKM_NAME)-y += $(foreach _m,$(INTERNAL_EXTRA_KBUILD_OBJECTS:.o=),$($(_m)-y)) + +obj-m += $(INTERNAL_KBUILD_OBJECTS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/kbuild/kbuild.mk b/drivers/mcst/gpu-imgtec/build/linux/kbuild/kbuild.mk new file mode 100644 index 000000000000..c1114e993c24 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/kbuild/kbuild.mk @@ -0,0 +1,116 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +$(TARGET_PRIMARY_OUT)/kbuild/Makefile: $(MAKE_TOP)/kbuild/Makefile.template + @[ ! -e $(dir $@) ] && mkdir -p $(dir $@) || true + $(CP) -f $< $@ + +# We need to make INTERNAL_KBUILD_MAKEFILES absolute because the files will be +# read while chdir'd into $(KERNELDIR) +INTERNAL_KBUILD_MAKEFILES := $(abspath $(foreach _m,$(KERNEL_COMPONENTS) $(EXTRA_PVRSRVKM_COMPONENTS),$(if $(INTERNAL_KBUILD_MAKEFILE_FOR_$(_m)),$(INTERNAL_KBUILD_MAKEFILE_FOR_$(_m)),$(error Unknown kbuild module "$(_m)")))) +INTERNAL_KBUILD_OBJECTS := $(foreach _m,$(KERNEL_COMPONENTS),$(if $(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(error BUG: Unknown kbuild module "$(_m)" should have been caught earlier))) +INTERNAL_EXTRA_KBUILD_OBJECTS := $(foreach _m,$(EXTRA_PVRSRVKM_COMPONENTS),$(if $(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(error BUG: Unknown kbuild module "$(_m)" should have been caught earlier))) +.PHONY: kbuild kbuild_clean kbuild_check + +kbuild_check: + @: $(if $(strip $(KERNELDIR)),,$(error KERNELDIR must be set)) + @: $(call directory-must-exist,$(KERNELDIR)) + @: $(foreach _m,$(ALL_KBUILD_MODULES),$(if $(wildcard $(abspath $(INTERNAL_KBUILD_MAKEFILE_FOR_$(_m)))),,$(error In makefile $(INTERNAL_MAKEFILE_FOR_MODULE_$(_m)): Module $(_m) requires kbuild makefile $(INTERNAL_KBUILD_MAKEFILE_FOR_$(_m)), which is missing))) + @: $(if $(filter-out command line override,$(origin build)),,$(error Overriding $$(build) (with "make build=...") will break kbuild)) + +# Services server headers are generated as part of running the bridge +# generator, which might be included in KM code. So as well as depending on +# the kbuild Makefile, we need to make kbuild also depend on each bridge +# module (including direct bridges), so that 'make kbuild' in a clean tree +# works. +kbuild: kbuild_check $(TARGET_PRIMARY_OUT)/kbuild/Makefile bridges + $(if $(V),,@)$(MAKE) -Rr --no-print-directory -C $(KERNELDIR) \ + M=$(abspath $(TARGET_PRIMARY_OUT)/kbuild) \ + INTERNAL_KBUILD_MAKEFILES="$(INTERNAL_KBUILD_MAKEFILES)" \ + INTERNAL_KBUILD_OBJECTS="$(INTERNAL_KBUILD_OBJECTS)" \ + INTERNAL_EXTRA_KBUILD_OBJECTS="$(INTERNAL_EXTRA_KBUILD_OBJECTS)" \ + BRIDGE_SOURCE_ROOT=$(abspath $(BRIDGE_SOURCE_ROOT)) \ + TARGET_PRIMARY_ARCH=$(TARGET_PRIMARY_ARCH) \ + PVR_ARCH=$(PVR_ARCH) \ + CLANG_TRIPLE=$(if $(filter %-androideabi,$(CROSS_TRIPLE)),$(patsubst \ + %-androideabi,%-gnueabi,$(CROSS_TRIPLE)),$(patsubst \ + %-android,%-gnu,$(CROSS_TRIPLE)))- \ + CROSS_COMPILE="$(CCACHE) $(KERNEL_CROSS_COMPILE)" \ + EXTRA_CFLAGS="$(ALL_KBUILD_CFLAGS)" \ + CC=$(if $(KERNEL_CC),$(KERNEL_CC),$(KERNEL_CROSS_COMPILE)gcc) \ + LD=$(if $(KERNEL_LD),$(KERNEL_LD),$(KERNEL_CROSS_COMPILE)ld) \ + NM=$(if $(KERNEL_NM),$(KERNEL_NM),$(KERNEL_CROSS_COMPILE)nm) \ + OBJCOPY=$(if $(KERNEL_OBJCOPY),$(KERNEL_OBJCOPY),$(KERNEL_CROSS_COMPILE)objcopy) \ + CHECK="$(patsubst @%,%,$(CHECK))" $(if $(CHECK),C=1,) \ + V=$(V) W=$(W) TOP=$(TOP) + @for kernel_module in $(addprefix $(TARGET_PRIMARY_OUT)/kbuild/,$(INTERNAL_KBUILD_OBJECTS:.o=.ko)); do \ + cp $$kernel_module $(TARGET_PRIMARY_OUT); \ + done +ifeq ($(KERNEL_DEBUGLINK),1) + @for kernel_module in $(addprefix $(TARGET_PRIMARY_OUT)/,$(INTERNAL_KBUILD_OBJECTS:.o=.ko)); do \ + $(CROSS_COMPILE)objcopy --only-keep-debug $$kernel_module $(basename $$kernel_module).dbg; \ + $(CROSS_COMPILE)strip --strip-debug $$kernel_module; \ + $(if $(V),,echo " DBGLINK " $(call relative-to-top,$(basename $$kernel_module).dbg)); \ + $(CROSS_COMPILE)objcopy --add-gnu-debuglink=$(basename $$kernel_module).dbg $$kernel_module; \ + done +endif + + +kbuild_clean: kbuild_check $(TARGET_PRIMARY_OUT)/kbuild/Makefile + $(if $(V),,@)$(MAKE) -Rr --no-print-directory -C $(KERNELDIR) \ + M=$(abspath $(TARGET_PRIMARY_OUT)/kbuild) \ + INTERNAL_KBUILD_MAKEFILES="$(INTERNAL_KBUILD_MAKEFILES)" \ + INTERNAL_KBUILD_OBJECTS="$(INTERNAL_KBUILD_OBJECTS)" \ + INTERNAL_EXTRA_KBUILD_OBJECTS="$(INTERNAL_EXTRA_KBUILD_OBJECTS)" \ + BRIDGE_SOURCE_ROOT=$(abspath $(BRIDGE_SOURCE_ROOT)) \ + TARGET_PRIMARY_ARCH=$(TARGET_PRIMARY_ARCH) \ + CLANG_TRIPLE=$(if $(filter %-androideabi,$(CROSS_TRIPLE)),$(patsubst \ + %-androideabi,%-gnueabi,$(CROSS_TRIPLE)),$(patsubst \ + %-android,%-gnu,$(CROSS_TRIPLE)))- \ + CROSS_COMPILE="$(CCACHE) $(KERNEL_CROSS_COMPILE)" \ + EXTRA_CFLAGS="$(ALL_KBUILD_CFLAGS)" \ + CC=$(if $(KERNEL_CC),$(KERNEL_CC),$(KERNEL_CROSS_COMPILE)gcc) \ + LD=$(if $(KERNEL_LD),$(KERNEL_LD),$(KERNEL_CROSS_COMPILE)ld) \ + NM=$(if $(KERNEL_NM),$(KERNEL_NM),$(KERNEL_CROSS_COMPILE)nm) \ + OBJCOPY=$(if $(KERNEL_OBJCOPY),$(KERNEL_OBJCOPY),$(KERNEL_CROSS_COMPILE)objcopy) \ + V=$(V) W=$(W) TOP=$(TOP) clean + +kbuild_install: installkm +kbuild: install_script_km diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_i386.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_i386.mk new file mode 100644 index 000000000000..48f15356def2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_i386.mk @@ -0,0 +1,81 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_HOST_BUILD := true + +MODULE_AR := $(HOST_AR) +MODULE_CC := $(HOST_CC) $(HOST_FORCE_32BIT) +MODULE_CXX := $(HOST_CXX) $(HOST_FORCE_32BIT) +MODULE_NM := $(HOST_NM) +MODULE_OBJCOPY := $(HOST_OBJCOPY) +MODULE_RANLIB := $(HOST_RANLIB) +MODULE_STRIP := $(HOST_STRIP) + +MODULE_CFLAGS := $(ALL_HOST_CFLAGS) $($(THIS_MODULE)_cflags) $(HOST_FORCE_32BIT) +MODULE_CXXFLAGS := $(ALL_HOST_CXXFLAGS) $($(THIS_MODULE)_cxxflags) $(HOST_FORCE_32BIT) +MODULE_LDFLAGS := $(ALL_HOST_LDFLAGS) -L$(MODULE_OUT) $($(THIS_MODULE)_ldflags) $(HOST_FORCE_32BIT) + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_HOST_CFLAGS) $(ALL_HOST_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +ifeq ($(SUPPORT_NEUTRINO_PLATFORM),1) +include $(MAKE_TOP)/common/neutrino/extra_host.mk +else +endif + +MESON_CROSS_CPU_HOST ?= i686 + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := x86 +MESON_CROSS_CPU := $(MESON_CROSS_CPU_HOST) +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := +MESON_CROSS_CC := $(patsubst @%,%,$(HOST_CC)) +MESON_CROSS_C_ARGS := $(HOST_FORCE_32BIT) +MESON_CROSS_C_LINK_ARGS := $(HOST_FORCE_32BIT) +MESON_CROSS_CXX := $(patsubst @%,%,$(HOST_CXX)) +MESON_CROSS_CXX_ARGS := $(HOST_FORCE_32BIT) +MESON_CROSS_CXX_LINK_ARGS := $(HOST_FORCE_32BIT) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_x86_64.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_x86_64.mk new file mode 100644 index 000000000000..d101d970a849 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/host_x86_64.mk @@ -0,0 +1,87 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_HOST_BUILD := true + +MODULE_AR := $(HOST_AR) +MODULE_CC := $(HOST_CC) +MODULE_CXX := $(HOST_CXX) +MODULE_NM := $(HOST_NM) +MODULE_OBJCOPY := $(HOST_OBJCOPY) +MODULE_RANLIB := $(HOST_RANLIB) +MODULE_STRIP := $(HOST_STRIP) + +MODULE_CFLAGS := $(ALL_HOST_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_CXXFLAGS := $(ALL_HOST_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_LDFLAGS := $(ALL_HOST_LDFLAGS) -L$(MODULE_OUT) $($(THIS_MODULE)_ldflags) + +ifeq ($(USE_LLD),1) + MODULE_LDFLAGS += -fuse-ld=lld +endif + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_HOST_CFLAGS) $(ALL_HOST_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +ifeq ($(SUPPORT_NEUTRINO_PLATFORM),1) +include $(MAKE_TOP)/common/neutrino/extra_host.mk +else +endif + +ifeq ($(SUPPORT_INTEGRITY_PLATFORM),1) +include $(MAKE_TOP)/common/integrity/extra_host.mk +endif + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := x86_64 +MESON_CROSS_CPU := x86_64 +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := +MESON_CROSS_CC := $(patsubst @%,%,$(HOST_CC)) +MESON_CROSS_C_ARGS := +MESON_CROSS_C_LINK_ARGS := +MESON_CROSS_CXX := $(patsubst @%,%,$(HOST_CXX)) +MESON_CROSS_CXX_ARGS := +MESON_CROSS_CXX_LINK_ARGS := diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_aarch64.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_aarch64.mk new file mode 100644 index 000000000000..9beae96c5e40 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_aarch64.mk @@ -0,0 +1,263 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_AR := $(AR) +MODULE_CC := $(CC) +MODULE_CXX := $(CXX) +MODULE_NM := $(NM) +MODULE_OBJCOPY := $(OBJCOPY) +MODULE_RANLIB := $(RANLIB) +MODULE_STRIP := $(STRIP) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +MODULE_EXE_LDFLAGS := \ + -Bdynamic -nostdlib -Wl,-dynamic-linker,/system/bin/linker64 + +MODULE_LIBGCC := -Wl,--version-script,$(MAKE_TOP)/common/libgcc.lds $(LIBGCC) + +ifeq ($(NDK_ROOT),) + +include $(MAKE_TOP)/common/android/moduledefs_defs.mk + +# Android prebuilts for AARCH64 defaults to bfd linker. x86 and ARM use gold linker. +# Use gold linker for AARCH64. BFD linker gives unexpected results. + +ifeq ($(USE_LLD),1) + MODULE_LDFLAGS += -fuse-ld=lld +else + MODULE_LDFLAGS += -fuse-ld=gold +endif + +_obj := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj +_lib := lib64 + +SYSTEM_LIBRARY_LIBC := $(strip $(call path-to-system-library,$(_lib),c)) +SYSTEM_LIBRARY_LIBM := $(strip $(call path-to-system-library,$(_lib),m)) +SYSTEM_LIBRARY_LIBDL := $(strip $(call path-to-system-library,$(_lib),dl)) + +MODULE_EXE_LDFLAGS += $(SYSTEM_LIBRARY_LIBC) + +# APK unittests +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_APK))) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 + +MODULE_LIBRARY_FLAGS_SUBST := \ + c++_static:$(ANDROID_DDK_DEPS)/out/local/arm64-v8a/libc++_static.a$$(space)$(ANDROID_DDK_DEPS)/out/local/arm64-v8a/libc++abi.a \ + RScpp:$(_obj)/STATIC_LIBRARIES/libRScpp_static_intermediates/libRScpp_static.a + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC) -Wl,--as-needed $(SYSTEM_LIBRARY_LIBDL) + +else + +_vndk := $(strip $(call path-to-vndk,$(_lib))) +_vndk-sp := $(strip $(call path-to-vndk-sp,$(_lib))) +_apex-vndk := $(strip $(call path-to-apex-vndk,$(_lib))) + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk) \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk) \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk-sp) \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk-sp) \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/$(_apex-vndk)/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/$(_apex-vndk)/lib64 + +# LL-NDK libraries +ifneq ($(PVR_ANDROID_LLNDK_LIBRARIES),) +MODULE_LIBRARY_FLAGS_SUBST := \ + $(foreach _llndk,$(PVR_ANDROID_LLNDK_LIBRARIES), \ + $(_llndk):$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/lib$(_llndk).so) +endif + +# CLDNN needs libneuralnetworks_common.a +MODULE_LIBRARY_FLAGS_SUBST += \ + neuralnetworks_common:$(_obj)/STATIC_LIBRARIES/libneuralnetworks_common_intermediates/libneuralnetworks_common.a \ + BlobCache:$(_obj)/STATIC_LIBRARIES/libBlobCache_intermediates/libBlobCache.a \ + nnCache:$(_obj)/STATIC_LIBRARIES/lib_nnCache_intermediates/lib_nnCache.a \ + perfetto_client_experimental:$(_obj)/STATIC_LIBRARIES/libperfetto_client_experimental_intermediates/libperfetto_client_experimental.a \ + protobuf-cpp-lite:$(_obj)/STATIC_LIBRARIES/libprotobuf-cpp-lite_intermediates/libprotobuf-cpp-lite.a \ + perfetto_trace_protos:$(_obj)/STATIC_LIBRARIES/perfetto_trace_protos_intermediates/perfetto_trace_protos.a + +# Gralloc and hwcomposer depend on libdrm +MODULE_LIBRARY_FLAGS_SUBST += \ + drm:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib64/libdrm.so + +# Unittests dependent on libRScpp_static.a +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_DEP_LIBRSCPP))) +MODULE_LIBRARY_FLAGS_SUBST += \ + RScpp:$(_obj)/STATIC_LIBRARIES/libRScpp_static_intermediates/libRScpp_static.a +endif +endif # PVR_UNITTESTS_APK + +# Always link to specific system libraries. +MODULE_LIBRARY_FLAGS_SUBST += \ + c:$(SYSTEM_LIBRARY_LIBC) \ + m:$(SYSTEM_LIBRARY_LIBM) \ + dl:$(SYSTEM_LIBRARY_LIBDL) + +MODULE_INCLUDE_FLAGS := \ + -isystem $(ANDROID_ROOT)/bionic/libc/arch-arm64/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi/asm-arm64 \ + -isystem $(ANDROID_ROOT)/bionic/libm/include/arm64 \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_ARCH_TAG := arm64-v8a + +_arch := arm64 +_obj := $(strip $(call path-to-libc-rt,$(_obj),$(_arch))) +_lib := lib + +else # NDK_ROOT + +MODULE_EXE_LDFLAGS += -lc + +MODULE_INCLUDE_FLAGS := \ + -isystem $(NDK_SYSROOT)/usr/include/$(CROSS_TRIPLE) \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_LIBRARY_FLAGS_SUBST := \ + art:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libart.so \ + RScpp:$(NDK_ROOT)/toolchains/renderscript/prebuilt/$(HOST_OS)-$(HOST_ARCH)/platform/arm64/libRScpp_static.a \ + neuralnetworks_common:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/libneuralnetworks_common_intermediates/libneuralnetworks_common.a \ + BlobCache:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/libBlobCache_intermediates/libBlobCache.a \ + nnCache:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/lib_nnCache_intermediates/lib_nnCache.a + +# Unittests dependant on libc++_static +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_APK))) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++_static:$(NDK_ROOT)/out/local/arm64-v8a/libc++_static.a$$(space)$(NDK_ROOT)/out/local/arm64-v8a/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +endif + +ifeq ($(wildcard $(NDK_ROOT)/out/local/arm64-v8a/libc++.so),) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/arm64-v8a/libc++_static.a$$(space)$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/arm64-v8a/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +else +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/out/local/arm64-v8a/libc++.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -Xlinker -rpath-link=$(NDK_ROOT)/out/local/arm64-v8a +endif + +ifeq ($(filter-out $(NDK_ROOT)/%,$(NDK_SYSROOT)),) + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 + +# Substitutions performed on MODULE_LIBRARY_FLAGS (NDK workarounds) +MODULE_LIBRARY_FLAGS_SUBST := \ + nativewindow:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libnativewindow.so \ + sync:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libsync.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) + +endif # !VNDK + +_obj := $(NDK_PLATFORMS_ROOT)/$(TARGET_PLATFORM)/arch-arm64/usr +_lib := lib + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(_obj)/$(_lib) \ + -Xlinker -rpath-link=$(_obj)/$(_lib) \ + $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +# Workaround; the VNDK platforms root lacks the crt files +_obj := $(NDK_ROOT)/platforms/$(TARGET_PLATFORM)/arch-arm64/usr +_lib := lib +ifeq ($(wildcard $(_obj)),) +_obj := $(NDK_ROOT)/platforms/android-$(API_LEVEL)/arch-arm64/usr +endif + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC) -Wl,--as-needed -ldl + +MODULE_ARCH_TAG := arm64-v8a + +endif # NDK_ROOT + +MODULE_LIB_LDFLAGS := $(MODULE_EXE_LDFLAGS) + +MODULE_EXE_CRTBEGIN := $(_obj)/$(_lib)/crtbegin_dynamic.o +MODULE_EXE_CRTEND := $(_obj)/$(_lib)/crtend_android.o + +MODULE_LIB_CRTBEGIN := $(_obj)/$(_lib)/crtbegin_so.o +MODULE_LIB_CRTEND := $(_obj)/$(_lib)/crtend_so.o + +MODULE_LDFLAGS += $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +endif # SUPPORT_ANDROID_PLATFORM + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 64 + +MESON_CROSS_CPU_PRIMARY ?= armv8-a + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := aarch64 +MESON_CROSS_CPU := $(MESON_CROSS_CPU_PRIMARY) +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE) +MESON_CROSS_CC := $(patsubst @%,%,$(CC)) +MESON_CROSS_C_ARGS := $(SYS_CFLAGS) +MESON_CROSS_C_LINK_ARGS := $(SYS_LDFLAGS) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX)) +MESON_CROSS_CXX_ARGS := $(SYS_CXXFLAGS) +MESON_CROSS_CXX_LINK_ARGS := $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armel.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armel.mk new file mode 100644 index 000000000000..5c3d9448cab1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armel.mk @@ -0,0 +1,86 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_AR := $(AR_SECONDARY) +MODULE_CC := $(CC_SECONDARY) +MODULE_CXX := $(CXX_SECONDARY) +MODULE_NM := $(NM_SECONDARY) +MODULE_OBJCOPY := $(OBJCOPY_SECONDARY) +MODULE_RANLIB := $(RANLIB_SECONDARY) +MODULE_STRIP := $(STRIP_SECONDARY) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) +$(error Android builds on this architecture are not supported) +endif + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 32 + +MESON_CROSS_CPU_SECONDARY ?= armv7-a + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := arm +MESON_CROSS_CPU := $(MESON_CROSS_CPU_SECONDARY) +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE_SECONDARY) +MESON_CROSS_CC := $(patsubst @%,%,$(CC_SECONDARY)) +MESON_CROSS_C_ARGS := $(SYS_CFLAGS) +MESON_CROSS_C_LINK_ARGS := $(SYS_LDFLAGS) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX_SECONDARY)) +MESON_CROSS_CXX_ARGS := $(SYS_CXXFLAGS) +MESON_CROSS_CXX_LINK_ARGS := $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armhf.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armhf.mk new file mode 100644 index 000000000000..5c3d9448cab1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_armhf.mk @@ -0,0 +1,86 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_AR := $(AR_SECONDARY) +MODULE_CC := $(CC_SECONDARY) +MODULE_CXX := $(CXX_SECONDARY) +MODULE_NM := $(NM_SECONDARY) +MODULE_OBJCOPY := $(OBJCOPY_SECONDARY) +MODULE_RANLIB := $(RANLIB_SECONDARY) +MODULE_STRIP := $(STRIP_SECONDARY) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) +$(error Android builds on this architecture are not supported) +endif + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 32 + +MESON_CROSS_CPU_SECONDARY ?= armv7-a + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := arm +MESON_CROSS_CPU := $(MESON_CROSS_CPU_SECONDARY) +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE_SECONDARY) +MESON_CROSS_CC := $(patsubst @%,%,$(CC_SECONDARY)) +MESON_CROSS_C_ARGS := $(SYS_CFLAGS) +MESON_CROSS_C_LINK_ARGS := $(SYS_LDFLAGS) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX_SECONDARY)) +MESON_CROSS_CXX_ARGS := $(SYS_CXXFLAGS) +MESON_CROSS_CXX_LINK_ARGS := $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_e2k.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_e2k.mk new file mode 100644 index 000000000000..98a99dc29884 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_e2k.mk @@ -0,0 +1,24 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Strictly Confidential. +### ########################################################################### + +MODULE_CC := $(CC) +MODULE_CXX := $(CXX) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +# On Linux, we currently don't need to specify any flags to find the system +# libraries. +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := + +MODULE_ARCH_BITNESS := 64 diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_i686.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_i686.mk new file mode 100644 index 000000000000..74c31047e0a7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_i686.mk @@ -0,0 +1,273 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_AR := $(AR_SECONDARY) +MODULE_CC := $(CC_SECONDARY) $(TARGET_FORCE_32BIT) -march=i686 +MODULE_CXX := $(CXX_SECONDARY) $(TARGET_FORCE_32BIT) -march=i686 +MODULE_NM := $(NM_SECONDARY) +MODULE_OBJCOPY := $(OBJCOPY_SECONDARY) +MODULE_RANLIB := $(RANLIB_SECONDARY) +MODULE_STRIP := $(STRIP_SECONDARY) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) $(TARGET_FORCE_32BIT) -march=i686 -mstackrealign +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) $(TARGET_FORCE_32BIT) -march=i686 -mstackrealign +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(TARGET_FORCE_32BIT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +MODULE_EXE_LDFLAGS := \ + -Bdynamic -nostdlib -Wl,-dynamic-linker,/system/bin/linker + +MODULE_LIBGCC := -Wl,--version-script,$(MAKE_TOP)/common/libgcc.lds $(LIBGCC_SECONDARY) + +ifeq ($(NDK_ROOT),) + +include $(MAKE_TOP)/common/android/moduledefs_defs.mk + +_obj := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj$(if $(MULTIARCH),_x86,) +_lib := lib + +SYSTEM_LIBRARY_LIBC := $(strip $(call path-to-system-library,$(_lib),c)) +SYSTEM_LIBRARY_LIBM := $(strip $(call path-to-system-library,$(_lib),m)) +SYSTEM_LIBRARY_LIBDL := $(strip $(call path-to-system-library,$(_lib),dl)) + +ifeq ($(USE_LLD),1) + MODULE_LDFLAGS += -fuse-ld=lld +endif + +MODULE_EXE_LDFLAGS += $(SYSTEM_LIBRARY_LIBC) + +# APK unittests +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_APK))) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib + +MODULE_LIBRARY_FLAGS_SUBST += \ + c++_static:$(ANDROID_DDK_DEPS)/out/local/x86/libc++_static.a$$(space)$(ANDROID_DDK_DEPS)/out/local/x86/libc++abi.a \ + RScpp:$(_obj)/STATIC_LIBRARIES/libRScpp_static_intermediates/libRScpp_static.a + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC_SECONDARY) -Wl,--as-needed $(SYSTEM_LIBRARY_LIBDL) + +else + +_vndk := $(strip $(call path-to-vndk,$(_lib))) +_vndk-sp := $(strip $(call path-to-vndk-sp,$(_lib))) +_apex-vndk := $(strip $(call path-to-apex-vndk,$(_lib))) + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/$(_vndk) \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/$(_vndk) \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/$(_vndk-sp) \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/$(_vndk-sp) \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/$(_apex-vndk)/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/$(_apex-vndk)/lib + +# LL-NDK libraries +ifneq ($(PVR_ANDROID_LLNDK_LIBRARIES),) +MODULE_LIBRARY_FLAGS_SUBST := \ + $(foreach _llndk,$(PVR_ANDROID_LLNDK_LIBRARIES), \ + $(_llndk):$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/lib$(_llndk).so) +endif + +# CLDNN needs libneuralnetworks_common.a +MODULE_LIBRARY_FLAGS_SUBST += \ + neuralnetworks_common:$(_obj)/STATIC_LIBRARIES/libneuralnetworks_common_intermediates/libneuralnetworks_common.a \ + BlobCache:$(_obj)/STATIC_LIBRARIES/libBlobCache_intermediates/libBlobCache.a \ + nnCache:$(_obj)/STATIC_LIBRARIES/lib_nnCache_intermediates/lib_nnCache.a \ + perfetto_client_experimental:$(_obj)/STATIC_LIBRARIES/libperfetto_client_experimental_intermediates/libperfetto_client_experimental.a \ + protobuf-cpp-lite:$(_obj)/STATIC_LIBRARIES/libprotobuf-cpp-lite_intermediates/libprotobuf-cpp-lite.a \ + perfetto_trace_protos:$(_obj)/STATIC_LIBRARIES/perfetto_trace_protos_intermediates/perfetto_trace_protos.a \ + clang_rt:$(__clang_bindir)../lib64/clang/$(__clang_version)/lib/linux/libclang_rt.builtins-i686-android.a + +# Gralloc and hwcomposer depend on libdrm +MODULE_LIBRARY_FLAGS_SUBST += \ + drm:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib/libdrm.so + +# Unittests dependent on libRScpp_static.a +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_DEP_LIBRSCPP))) +MODULE_LIBRARY_FLAGS_SUBST += \ + RScpp:$(_obj)/STATIC_LIBRARIES/libRScpp_static_intermediates/libRScpp_static.a +endif +endif # PVR_UNITTESTS_APK + +# Always link to specific system libraries. +MODULE_LIBRARY_FLAGS_SUBST += \ + c:$(SYSTEM_LIBRARY_LIBC) \ + m:$(SYSTEM_LIBRARY_LIBM) \ + dl:$(SYSTEM_LIBRARY_LIBDL) + +MODULE_INCLUDE_FLAGS := \ + -isystem $(ANDROID_ROOT)/bionic/libc/arch-x86/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi/asm-x86 \ + -isystem $(ANDROID_ROOT)/bionic/libm/include/i387 \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_ARCH_TAG := x86 + +_arch := x86 +_obj := $(strip $(call path-to-libc-rt,$(_obj),$(_arch))) + +else # NDK_ROOT + +MODULE_EXE_LDFLAGS += -lc + +MODULE_INCLUDE_FLAGS := \ + -isystem $(NDK_SYSROOT)/usr/include/$(patsubst x86_64-%,i686-%,$(CROSS_TRIPLE)) \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_LIBRARY_FLAGS_SUBST := \ + art:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/libart.so \ + RScpp:$(NDK_ROOT)/toolchains/renderscript/prebuilt/$(HOST_OS)-$(HOST_ARCH)/platform/x86/libRScpp_static.a \ + neuralnetworks_common:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj$(if $(MULTIARCH),_x86,)/STATIC_LIBRARIES/libneuralnetworks_common_intermediates/libneuralnetworks_common.a \ + BlobCache:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/libBlobCache_intermediates/libBlobCache.a \ + nnCache:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/lib_nnCache_intermediates/lib_nnCache.a \ + clang_rt:$(NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/$(__clang_version)/lib/linux/libclang_rt.builtins-i686-android.a + +# Unittests dependant on libc++_static +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_APK))) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++_static:$(NDK_ROOT)/out/local/x86/libc++_static.a$$(space)$(NDK_ROOT)/out/local/x86/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +endif + +ifeq ($(wildcard $(NDK_ROOT)/out/local/x86/libc++.so),) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/x86/libc++_static.a$$(space)$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/x86/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +else +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/out/local/x86/libc++.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -Xlinker -rpath-link=$(NDK_ROOT)/out/local/x86 +endif + +ifeq ($(filter-out $(NDK_ROOT)/%,$(NDK_SYSROOT)),) + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib + +# Substitutions performed on MODULE_LIBRARY_FLAGS (NDK workarounds) +MODULE_LIBRARY_FLAGS_SUBST := \ + nativewindow:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/libnativewindow.so \ + sync:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/libsync.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) + +endif # !VNDK + +_obj := $(NDK_PLATFORMS_ROOT)/$(TARGET_PLATFORM)/arch-x86/usr + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(_obj)/lib \ + -Xlinker -rpath-link=$(_obj)/lib \ + $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +# Workaround; the VNDK platforms root lacks the crt files +_obj := $(NDK_ROOT)/platforms/$(TARGET_PLATFORM)/arch-x86/usr +ifeq ($(wildcard $(_obj)),) +_obj := $(NDK_ROOT)/platforms/android-$(API_LEVEL)/arch-x86/usr +endif + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC_SECONDARY) -Wl,--as-needed -ldl + +MODULE_ARCH_TAG := x86 + +endif # NDK_ROOT + +MODULE_LIB_LDFLAGS := $(MODULE_EXE_LDFLAGS) + +MODULE_LDFLAGS += $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +MODULE_EXE_CRTBEGIN := $(_obj)/lib/crtbegin_dynamic.o +MODULE_EXE_CRTEND := $(_obj)/lib/crtend_android.o + +MODULE_LIB_CRTBEGIN := $(_obj)/lib/crtbegin_so.o +MODULE_LIB_CRTEND := $(_obj)/lib/crtend_so.o + +endif # SUPPORT_ANDROID_PLATFORM + +# When building 32 bit binaries on a 64 bit system with a native compiler it's +# necessary to install the (gcc|g++)-multilib packages. However, Ubuntu doesn't +# allow the (gcc|g++)-multilib and gcc-(arm|aarch64)-* packages to be installed +# at the same time. This is due to the multilib packages creating a symlink from +# /usr/include/asm to /usr/include/x86_64-linux-gnu/asm, which is invalid for +# anything other than x86. Work around this by removing the need to install the +# multilib packages. +# +ifeq ($(CROSS_COMPILE),) + ifneq ($(SUPPORT_BUILD_LWS),) + MODULE_INCLUDE_FLAGS += \ + -isystem /usr/include/x86_64-linux-gnu + else ifeq ($(SYSROOT),/) + MODULE_INCLUDE_FLAGS += \ + -isystem /usr/include/x86_64-linux-gnu + endif +endif + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 32 + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := x86 +MESON_CROSS_CPU := i686 +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE_SECONDARY) +MESON_CROSS_CC := $(patsubst @%,%,$(CC_SECONDARY)) +MESON_CROSS_C_ARGS := $(TARGET_FORCE_32BIT) -march=i686 $(SYS_CFLAGS) $(MODULE_INCLUDE_FLAGS) +MESON_CROSS_C_LINK_ARGS := $(TARGET_FORCE_32BIT) $(SYS_LDFLAGS) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX_SECONDARY)) +MESON_CROSS_CXX_ARGS := $(TARGET_FORCE_32BIT) -march=i686 $(SYS_CXXFLAGS) $(MODULE_INCLUDE_FLAGS) +MESON_CROSS_CXX_LINK_ARGS := $(TARGET_FORCE_32BIT) $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips32r6el.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips32r6el.mk new file mode 100644 index 000000000000..de789d46ebdc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips32r6el.mk @@ -0,0 +1,200 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Little endian, mips32r6, regular ABI, with synci instruction, 64-bit fp +# registers. Disable odd-spreg for r2 abi compatibility. +MIPS_ABI_FLAGS := -EL -march=mips32r6 -mabi=32 -msynci -mfp64 -mno-odd-spreg +ifeq ($(cc-is-clang),true) +MIPS_ABI_FLAGS := $(filter-out -msynci,$(MIPS_ABI_FLAGS)) +endif + +MODULE_AR := $(AR_SECONDARY) +MODULE_CC := $(CC_SECONDARY) $(MIPS_ABI_FLAGS) +MODULE_CXX := $(CXX_SECONDARY) $(MIPS_ABI_FLAGS) +MODULE_NM := $(NM_SECONDARY) +MODULE_OBJCOPY := $(OBJCOPY_SECONDARY) +MODULE_RANLIB := $(RANLIB_SECONDARY) +MODULE_STRIP := $(STRIP_SECONDARY) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) $(MIPS_ABI_FLAGS) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) $(MIPS_ABI_FLAGS) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) $(MIPS_ABI_FLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +MODULE_EXE_LDFLAGS := \ + -Bdynamic -nostdlib -Wl,-dynamic-linker,/system/bin/linker -lc + +override LIBGCC_SECONDARY := $(shell $(patsubst @%,%,$(MODULE_CC)) -print-libgcc-file-name) +ifeq ($(cc-is-clang),true) + ifeq ($(wildcard $(LIBGCC_SECONDARY)),) + override LIBGCC_SECONDARY := \ + $(shell $(CROSS_COMPILE_SECONDARY)gcc $(MIPS_ABI_FLAGS) -print-libgcc-file-name) + ifeq ($(wildcard $(LIBGCC_SECONDARY)),) + $(error Secondary clang -print-libgcc-file-name workaround failed) + endif + endif +endif + +MODULE_LIBGCC := -Wl,--version-script,$(MAKE_TOP)/common/libgcc.lds $(LIBGCC_SECONDARY) + +_obj := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj_mips + +# Linker flags used to find system libraries. +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(_obj)/lib \ + -Xlinker -rpath-link=$(_obj)/lib \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib +ifneq ($(wildcard $(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor),) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib +else +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib +endif + +ifeq ($(NDK_ROOT),) + +MODULE_INCLUDE_FLAGS := \ + -isystem $(ANDROID_ROOT)/bionic/libc/arch-mips/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi/asm-mips \ + -isystem $(ANDROID_ROOT)/bionic/libm/include/mips \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_ARCH_TAG := $(_obj) + +else # NDK_ROOT + +MODULE_INCLUDE_FLAGS := \ + -isystem $(NDK_SYSROOT)/usr/include/$(patsubst mips64el-%,mipsel-%,$(CROSS_TRIPLE)) \ + $(MODULE_INCLUDE_FLAGS) + +# FIXME: Won't actually work, no 32r6 ABI support +MODULE_LIBRARY_FLAGS_SUBST := \ + art:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/libart.so \ + RScpp:$(NDK_ROOT)/toolchains/renderscript/prebuilt/$(HOST_OS)-$(HOST_ARCH)/platform/mips/libRScpp_static.a + +ifeq ($(wildcard $(NDK_ROOT)/out/local/mips/libc++.so),) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/mips/libc++_static.a$$(space)$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/mips/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +else +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/out/local/mips/libc++.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -Xlinker -rpath-link=$(NDK_ROOT)/out/local/mips +endif + +ifeq ($(filter-out $(NDK_ROOT)/%,$(NDK_SYSROOT)),) + +# Substitutions performed on MODULE_LIBRARY_FLAGS (NDK workarounds) +MODULE_LIBRARY_FLAGS_SUBST := \ + nativewindow:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/libnativewindow.so \ + sync:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib/libsync.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) + +endif # !VNDK + +_obj := $(NDK_PLATFORMS_ROOT)/$(TARGET_PLATFORM)/arch-mips/usr + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(_obj)/lib \ + -Xlinker -rpath-link=$(_obj)/lib \ + $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +# Workaround; the VNDK platforms root lacks the crt files +_obj := $(NDK_ROOT)/platforms/$(TARGET_PLATFORM)/arch-mips/usr + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC_SECONDARY) -Wl,--as-needed -ldl + +MODULE_ARCH_TAG := mips + +endif # NDK_ROOT + +MODULE_LIB_LDFLAGS := $(MODULE_EXE_LDFLAGS) + +MODULE_LDFLAGS += $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +MODULE_EXE_CRTBEGIN := $(_obj)/lib/crtbegin_dynamic.o +MODULE_EXE_CRTEND := $(_obj)/lib/crtend_android.o + +MODULE_LIB_CRTBEGIN := $(_obj)/lib/crtbegin_so.o +MODULE_LIB_CRTEND := $(_obj)/lib/crtend_so.o + +else # SUPPORT_ANDROID_PLATFORM + +MODULE_ARCH_TAG := mipsel + +endif # SUPPORT_ANDROID_PLATFORM + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 32 + +MESON_CROSS_CPU_SECONDARY ?= mips + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := mips +MESON_CROSS_CPU := $(MESON_CROSS_CPU_SECONDARY) +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE_SECONDARY) +MESON_CROSS_CC := $(patsubst @%,%,$(CC_SECONDARY)) +MESON_CROSS_C_ARGS := $(MIPS_ABI_FLAGS) $(SYS_CFLAGS) +MESON_CROSS_C_LINK_ARGS := -EL $(SYS_LDFLAGS) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX_SECONDARY)) +MESON_CROSS_CXX_ARGS := $(MIPS_ABI_FLAGS) $(SYS_CXXFLAGS) +MESON_CROSS_CXX_LINK_ARGS := -EL $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips64r6el.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips64r6el.mk new file mode 100644 index 000000000000..cad0b9a112cc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_mips64r6el.mk @@ -0,0 +1,217 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Little endian, mips64r6, regular ABI, with synci instruction +MIPS_ABI_FLAGS := -EL -march=mips64r6 -mabi=64 -msynci + +ifeq ($(cc-is-clang),true) +MIPS_ABI_FLAGS := $(filter-out -msynci,$(MIPS_ABI_FLAGS)) +MODULE_CC := $(CC) $(MIPS_ABI_FLAGS) -fintegrated-as +MODULE_CXX := $(CXX) $(MIPS_ABI_FLAGS) -fintegrated-as +else +MODULE_CC := $(CC) $(MIPS_ABI_FLAGS) +MODULE_CXX := $(CXX) $(MIPS_ABI_FLAGS) +endif + +MODULE_AR := $(AR) +MODULE_NM := $(NM) +MODULE_OBJCOPY := $(OBJCOPY) +MODULE_RANLIB := $(RANLIB) +MODULE_STRIP := $(STRIP) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) $(MIPS_ABI_FLAGS) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) $(MIPS_ABI_FLAGS) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) $(MIPS_ABI_FLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +MODULE_EXE_LDFLAGS := \ + -Bdynamic -nostdlib -Wl,-dynamic-linker,/system/bin/linker64 -lc + +override LIBGCC := $(shell $(patsubst @%,%,$(MODULE_CC)) -print-libgcc-file-name) +ifeq ($(cc-is-clang),true) + ifeq ($(wildcard $(LIBGCC)),) + override LIBGCC := \ + $(shell $(CROSS_COMPILE)gcc $(MIPS_ABI_FLAGS) -print-libgcc-file-name) + ifeq ($(wildcard $(LIBGCC)),) + $(error Primary clang -print-libgcc-file-name workaround failed) + endif + endif +endif + +MODULE_LIBGCC := -Wl,--version-script,$(MAKE_TOP)/common/libgcc.lds $(LIBGCC) + +ifeq ($(NDK_ROOT),) + +_obj := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj +_lib := lib + +# Linker flags used to find system libraries. +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(_obj)/$(_lib) \ + -Xlinker -rpath-link=$(_obj)/$(_lib) \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 +ifneq ($(wildcard $(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor),) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib64 +else +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib64 +endif + +MODULE_INCLUDE_FLAGS := \ + -isystem $(ANDROID_ROOT)/bionic/libc/arch-mips64/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi/asm-mips \ + -isystem $(ANDROID_ROOT)/bionic/libm/include/mips \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_ARCH_TAG := $(_obj) + +else # NDK_ROOT + +MODULE_INCLUDE_FLAGS := \ + -isystem $(NDK_SYSROOT)/usr/include/$(CROSS_TRIPLE) \ + $(MODULE_INCLUDE_FLAGS) + +# Take RScpp from TARGET_ROOT, as the NDK lacks 64r6 support +MODULE_LIBRARY_FLAGS_SUBST := \ + art:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libart.so \ + RScpp:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/libRScpp_static_intermediates/libRScpp_static.a + +ifeq ($(wildcard $(NDK_ROOT)/out/local/mips64/libc++.so),) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/mips64/libc++_static.a$$(space)$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/mips64/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +else +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/out/local/mips64/libc++.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -Xlinker -rpath-link=$(NDK_ROOT)/out/local/mips64 +endif + +ifeq ($(filter-out $(NDK_ROOT)/%,$(NDK_SYSROOT)),) + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 + +# Substitutions performed on MODULE_LIBRARY_FLAGS (NDK workarounds) +MODULE_LIBRARY_FLAGS_SUBST := \ + nativewindow:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libnativewindow.so \ + sync:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libsync.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) + +endif # !VNDK + +_obj := $(NDK_PLATFORMS_ROOT)/$(TARGET_PLATFORM)/arch-mips64/usr +_lib := lib64 + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(_obj)/$(_lib) \ + -Xlinker -rpath-link=$(_obj)/$(_lib) \ + $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +# Workaround; the VNDK platforms root lacks the crt files +_obj := $(NDK_ROOT)/platforms/$(TARGET_PLATFORM)/arch-mips64/usr +_lib := lib64 + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC) -Wl,--as-needed -ldl + +MODULE_ARCH_TAG := mips64 + +endif # NDK_ROOT + +MODULE_LIB_LDFLAGS := $(MODULE_EXE_LDFLAGS) + +MODULE_LDFLAGS += $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +MODULE_EXE_CRTBEGIN := $(_obj)/$(_lib)/crtbegin_dynamic.o +MODULE_EXE_CRTEND := $(_obj)/$(_lib)/crtend_android.o + +MODULE_LIB_CRTBEGIN := $(_obj)/$(_lib)/crtbegin_so.o +MODULE_LIB_CRTEND := $(_obj)/$(_lib)/crtend_so.o + +else # SUPPORT_ANDROID_PLATFORM + +# this is probably wrong... +MODULE_ARCH_TAG := mips64el + +endif # SUPPORT_ANDROID_PLATFORM + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 64 + +MESON_CROSS_CPU ?= mips64 + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := mips64 +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE) +MESON_CROSS_CC := $(patsubst @%,%,$(CC)) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX)) + +ifeq ($(cc-is-clang),true) +MESON_CROSS_C_ARGS := $(MIPS_ABI_FLAGS) -fintegrated-as $(SYS_CFLAGS) +MESON_CROSS_CXX_ARGS := $(MIPS_ABI_FLAGS) -fintegrated-as $(SYS_CXXFLAGS) +else +MESON_CROSS_C_ARGS := $(MIPS_ABI_FLAGS) $(SYS_CFLAGS) +MESON_CROSS_CXX_ARGS := $(MIPS_ABI_FLAGS) $(SYS_CXXFLAGS) +endif + +MESON_CROSS_C_LINK_ARGS := -EL $(SYS_LDFLAGS) +MESON_CROSS_CXX_LINK_ARGS := -EL $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_neutral.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_neutral.mk new file mode 100644 index 000000000000..52cb6f73f1e7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_neutral.mk @@ -0,0 +1,44 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_BISON_FLAGS := $(ALL_BISON_FLAGS) $($(THIS_MODULE)_bisonflags) +MODULE_FLEX_FLAGS := $(ALL_FLEX_FLAGS) $($(THIS_MODULE)_flexflags) +MODULE_FLEXXX_FLAGS := $(ALL_FLEXXX_FLAGS) $($(THIS_MODULE)_flexxxflags) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_riscv64.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_riscv64.mk new file mode 100644 index 000000000000..8d6ebf542fec --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_riscv64.mk @@ -0,0 +1,82 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_AR := $(AR) +MODULE_CC := $(CC) +MODULE_CXX := $(CXX) +MODULE_NM := $(NM) +MODULE_OBJCOPY := $(OBJCOPY) +MODULE_RANLIB := $(RANLIB) +MODULE_STRIP := $(STRIP) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 64 + +MESON_CROSS_CPU_PRIMARY ?= riscv64 + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := riscv64 +MESON_CROSS_CPU := $(MESON_CROSS_CPU_PRIMARY) +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE) +MESON_CROSS_CC := $(patsubst @%,%,$(CC)) +MESON_CROSS_C_ARGS := $(SYS_CFLAGS) +MESON_CROSS_C_LINK_ARGS := $(SYS_LDFLAGS) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX)) +MESON_CROSS_CXX_ARGS := $(SYS_CXXFLAGS) +MESON_CROSS_CXX_LINK_ARGS := $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_x86_64.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_x86_64.mk new file mode 100644 index 000000000000..248c3a01883d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs/target_x86_64.mk @@ -0,0 +1,267 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_AR := $(AR) +MODULE_CC := $(CC) -march=x86-64 +MODULE_CXX := $(CXX) -march=x86-64 +MODULE_NM := $(NM) +MODULE_OBJCOPY := $(OBJCOPY) +MODULE_RANLIB := $(RANLIB) +MODULE_STRIP := $(STRIP) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) -march=x86-64 -mstackrealign +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) -march=x86-64 -mstackrealign +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +MODULE_EXE_LDFLAGS := \ + -Bdynamic -nostdlib -Wl,-dynamic-linker,/system/bin/linker64 + +MODULE_LIBGCC := -Wl,--version-script,$(MAKE_TOP)/common/libgcc.lds $(LIBGCC) + +ifeq ($(NDK_ROOT),) + +include $(MAKE_TOP)/common/android/moduledefs_defs.mk + +_obj := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj +_lib := lib64 + +SYSTEM_LIBRARY_LIBC := $(strip $(call path-to-system-library,$(_lib),c)) +SYSTEM_LIBRARY_LIBM := $(strip $(call path-to-system-library,$(_lib),m)) +SYSTEM_LIBRARY_LIBDL := $(strip $(call path-to-system-library,$(_lib),dl)) + +ifeq ($(USE_LLD),1) + MODULE_LDFLAGS += -fuse-ld=lld +endif + +MODULE_EXE_LDFLAGS += $(SYSTEM_LIBRARY_LIBC) + +# APK unittests +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_APK))) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 + +MODULE_LIBRARY_FLAGS_SUBST += \ + c++_static:$(ANDROID_DDK_DEPS)/out/local/x86_64/libc++_static.a$$(space)$(ANDROID_DDK_DEPS)/out/local/x86_64/libc++abi.a \ + RScpp:$(_obj)/STATIC_LIBRARIES/libRScpp_static_intermediates/libRScpp_static.a + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC) -Wl,--as-needed $(SYSTEM_LIBRARY_LIBDL) + +else + +_vndk := $(strip $(call path-to-vndk,$(_lib))) +_vndk-sp := $(strip $(call path-to-vndk-sp,$(_lib))) +_apex-vndk := $(strip $(call path-to-apex-vndk,$(_lib))) + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk) \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk) \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk-sp) \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/$(_vndk-sp) \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/$(_apex-vndk)/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/$(_apex-vndk)/lib64 + +# LL-NDK libraries +ifneq ($(PVR_ANDROID_LLNDK_LIBRARIES),) +MODULE_LIBRARY_FLAGS_SUBST := \ + $(foreach _llndk,$(PVR_ANDROID_LLNDK_LIBRARIES), \ + $(_llndk):$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/lib$(_llndk).so) +endif + +# CLDNN needs libneuralnetworks_common.a +MODULE_LIBRARY_FLAGS_SUBST += \ + neuralnetworks_common:$(_obj)/STATIC_LIBRARIES/libneuralnetworks_common_intermediates/libneuralnetworks_common.a \ + BlobCache:$(_obj)/STATIC_LIBRARIES/libBlobCache_intermediates/libBlobCache.a \ + nnCache:$(_obj)/STATIC_LIBRARIES/lib_nnCache_intermediates/lib_nnCache.a \ + perfetto_client_experimental:$(_obj)/STATIC_LIBRARIES/libperfetto_client_experimental_intermediates/libperfetto_client_experimental.a \ + protobuf-cpp-lite:$(_obj)/STATIC_LIBRARIES/libprotobuf-cpp-lite_intermediates/libprotobuf-cpp-lite.a \ + perfetto_trace_protos:$(_obj)/STATIC_LIBRARIES/perfetto_trace_protos_intermediates/perfetto_trace_protos.a \ + clang_rt:$(__clang_bindir)../lib64/clang/$(__clang_version)/lib/linux/libclang_rt.builtins-x86_64-android.a + +# Gralloc and hwcomposer depend on libdrm +MODULE_LIBRARY_FLAGS_SUBST += \ + drm:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib64/libdrm.so + +# Unittests dependent on libRScpp_static.a +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_DEP_LIBRSCPP))) +MODULE_LIBRARY_FLAGS_SUBST += \ + RScpp:$(_obj)/STATIC_LIBRARIES/libRScpp_static_intermediates/libRScpp_static.a +endif +endif # PVR_UNITTESTS_APK + +# Always link to specific system libraries. +MODULE_LIBRARY_FLAGS_SUBST += \ + c:$(SYSTEM_LIBRARY_LIBC) \ + m:$(SYSTEM_LIBRARY_LIBM) \ + dl:$(SYSTEM_LIBRARY_LIBDL) + +MODULE_INCLUDE_FLAGS := \ + -isystem $(ANDROID_ROOT)/bionic/libc/arch-x86_64/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi/asm-x86 \ + -isystem $(ANDROID_ROOT)/bionic/libm/include/amd64 \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_ARCH_TAG := x86_64 + +_arch := x86_64 +_obj := $(strip $(call path-to-libc-rt,$(_obj),$(_arch))) +ifneq ($(findstring prebuilts,$(_obj)),) +_lib := lib64 +else +_lib := lib +endif + +else # NDK_ROOT + +MODULE_EXE_LDFLAGS += -lc + +MODULE_INCLUDE_FLAGS := \ + -isystem $(NDK_SYSROOT)/usr/include/$(CROSS_TRIPLE) \ + $(MODULE_INCLUDE_FLAGS) + +MODULE_LIBRARY_FLAGS_SUBST := \ + art:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libart.so \ + RScpp:$(NDK_ROOT)/toolchains/renderscript/prebuilt/$(HOST_OS)-$(HOST_ARCH)/platform/x86_64/libRScpp_static.a \ + neuralnetworks_common:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/libneuralnetworks_common_intermediates/libneuralnetworks_common.a \ + BlobCache:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/libBlobCache_intermediates/libBlobCache.a \ + nnCache:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/STATIC_LIBRARIES/lib_nnCache_intermediates/lib_nnCache.a \ + clang_rt:$(NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/$(__clang_version)/lib/linux/libclang_rt.builtins-x86_64-android.a + +# Unittests dependant on libc++_static +ifneq (,$(findstring $(THIS_MODULE),$(PVR_UNITTESTS_APK))) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++_static:$(NDK_ROOT)/out/local/x86_64/libc++_static.a$$(space)$(NDK_ROOT)/out/local/x86_64/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +endif + +ifeq ($(wildcard $(NDK_ROOT)/out/local/x86_64/libc++.so),) +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/x86_64/libc++_static.a$$(space)$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/x86_64/libc++abi.a \ + $(MODULE_LIBRARY_FLAGS_SUBST) +else +MODULE_LIBRARY_FLAGS_SUBST := \ + c++:$(NDK_ROOT)/out/local/x86_64/libc++.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -Xlinker -rpath-link=$(NDK_ROOT)/out/local/x86_64 +endif + +ifeq ($(filter-out $(NDK_ROOT)/%,$(NDK_SYSROOT)),) + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64 + +# Substitutions performed on MODULE_LIBRARY_FLAGS (NDK workarounds) +MODULE_LIBRARY_FLAGS_SUBST := \ + nativewindow:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libnativewindow.so \ + sync:$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib64/libsync.so \ + $(MODULE_LIBRARY_FLAGS_SUBST) + +endif # !VNDK + +_obj := $(NDK_PLATFORMS_ROOT)/$(TARGET_PLATFORM)/arch-x86_64/usr +_lib := lib64 + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(_obj)/$(_lib) \ + -Xlinker -rpath-link=$(_obj)/$(_lib) \ + $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +# Workaround; the VNDK platforms root lacks the crt files +_obj := $(NDK_ROOT)/platforms/$(TARGET_PLATFORM)/arch-x86_64/usr +_lib := lib64 +ifeq ($(wildcard $(_obj)),) +_obj := $(NDK_ROOT)/platforms/android-$(API_LEVEL)/arch-x86_64/usr +endif + +MODULE_EXE_LDFLAGS := $(MODULE_EXE_LDFLAGS) $(LIBGCC) -Wl,--as-needed -ldl + +MODULE_ARCH_TAG := x86_64 + +endif # NDK_ROOT + +MODULE_LIB_LDFLAGS := $(MODULE_EXE_LDFLAGS) + +MODULE_LDFLAGS += $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +MODULE_EXE_CRTBEGIN := $(_obj)/$(_lib)/crtbegin_dynamic.o +MODULE_EXE_CRTEND := $(_obj)/$(_lib)/crtend_android.o + +MODULE_LIB_CRTBEGIN := $(_obj)/$(_lib)/crtbegin_so.o +MODULE_LIB_CRTEND := $(_obj)/$(_lib)/crtend_so.o + +endif # SUPPORT_ANDROID_PLATFORM + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D% -isystem /%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 64 +MODULE_ARCH_TYPE := x86 + +ifeq ($(SUPPORT_INTEGRITY_PLATFORM),1) +include $(MAKE_TOP)/common/integrity/modify_moduledefs.mk +endif + +MESON_CROSS_SYSTEM := linux +MESON_CROSS_CPU_FAMILY := x86_64 +MESON_CROSS_CPU := x86_64 +MESON_CROSS_ENDIAN := little +MESON_CROSS_CROSS_COMPILE := $(CROSS_COMPILE) +MESON_CROSS_CC := $(patsubst @%,%,$(CC)) +MESON_CROSS_C_ARGS := -march=x86-64 $(SYS_CFLAGS) +MESON_CROSS_C_LINK_ARGS := $(SYS_LDFLAGS) +MESON_CROSS_CXX := $(patsubst @%,%,$(CXX)) +MESON_CROSS_CXX_ARGS := -march=x86-64 $(SYS_CXXFLAGS) +MESON_CROSS_CXX_LINK_ARGS := $(SYS_LDFLAGS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs_common.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs_common.mk new file mode 100644 index 000000000000..86dbd9304db9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs_common.mk @@ -0,0 +1,152 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_OUT := $(RELATIVE_OUT)/$(MODULE_ARCH) +MODULE_INTERMEDIATES_DIR := $(MODULE_OUT)/intermediates/$(THIS_MODULE) + +MODULE_TARGETS := +MODULE_HOST_BUILD := +MODULE_CLEAN_TARGETS := +MODULE_CLOBBER_TARGETS := + +MODULE_CFLAGS := +MODULE_CXXFLAGS := +MODULE_LDFLAGS := +MODULE_BISON_FLAGS := +MODULE_FLEX_FLAGS := +MODULE_FLEXXX_FLAGS := + +MODULE_ARCH_TAG := $(patsubst i%86,i686,$(subst host_,,$(subst target_,,$(MODULE_ARCH)))) +MODULE_ARCH_BITNESS := + +# Only allow cflags that do not affect code generation. This is to ensure +# proper binary compatibility when LTO (Link-Time Optimization) is enabled. +# We make exceptions for the below flags which will all fail linkage in +# non-LTO mode if incorrectly specified. +# +# NOTE: Only used by static_library and objects right now. Other module +# types should not be affected by complex code generation flags w/ LTO. +# Set MODULE_CHECK_CFLAGS in the module makefile to enable this check. +MODULE_CHECK_CFLAGS := +MODULE_ALLOWED_CFLAGS := -W% -D% -std=% -frtti -fPIC -fPIE -pie -m32 -fvisibility=hidden -fexceptions + +# -L flags for library search dirs: these are relative to $(TOP), unless +# they're absolute paths +MODULE_LIBRARY_DIR_FLAGS := $(foreach _path,$($(THIS_MODULE)_libpaths),$(if $(filter /%,$(_path)),-L$(call relative-to-top,$(_path)),-L$(_path))) +# -L options to find system libraries (may be arch-specific) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := +# -I flags for header search dirs (same rules as for -L) +MODULE_INCLUDE_FLAGS := $(foreach _path,$($(THIS_MODULE)_includes),$(if $(filter /%,$(_path)),-I$(call relative-to-top,$(_path)),-I$(_path))) +# Pattern substitution in form old:new to be done to link command line +MODULE_LIBRARY_FLAGS_SUBST := + +# If the build provides some external khronos include flags, and the module +# hasn't explicitly opted out of path substitution, prepend the system path +# to the DDK khronos header include path. This causes the platform headers +# to override the DDK versions. This is the default behaviour for Android. +ifneq ($(filter-out host_%,$(MODULE_ARCH)),) +ifneq ($(SYS_KHRONOS_INCLUDES),) +ifneq ($($(THIS_MODULE)_force_internal_khronos_headers),1) +MODULE_INCLUDE_FLAGS := $(patsubst -Iinclude/khronos,$(SYS_KHRONOS_INCLUDES) -isystem include/khronos,$(MODULE_INCLUDE_FLAGS)) +endif +endif +endif + +# These define the rules for finding source files. +# +# - If a name begins with a slash, we strip $(TOP) off the front if it +# begins with $(TOP). This is so that we don't get really long error +# messages from the compiler if the source tree is in a deeply nested +# directory, but we still do get absolute paths if you say "make +# OUT=/tmp/somewhere" +# +# - Otherwise, if a name contains a slash and begins with $(OUT), we leave +# it as it is. This is so you can say "module_src := +# $(TARGET_INTERMEDIATES)/something/generated.c" +# +# - Otherwise, we assume it's a path referring to somewhere under the +# directory containing Linux.mk, and add $(THIS_DIR) to it +# +_SOURCES_WITHOUT_SLASH := \ + $(strip $(foreach _s,$($(THIS_MODULE)_src),$(if $(findstring /,$(_s)),,$(_s)))) +_SOURCES_WITH_SLASH := \ + $(strip $(foreach _s,$($(THIS_MODULE)_src),$(if $(findstring /,$(_s)),$(_s),))) +MODULE_SOURCES := $(addprefix $(THIS_DIR)/,$(_SOURCES_WITHOUT_SLASH)) +MODULE_SOURCES += $(call relative-to-top,$(abspath $(filter /%,$(_SOURCES_WITH_SLASH)))) + +_RELATIVE_SOURCES_WITH_SLASH := \ + $(filter-out /%,$(_SOURCES_WITH_SLASH)) +_OUTDIR_RELATIVE_SOURCES_WITH_SLASH := \ + $(filter $(RELATIVE_OUT)/%,$(_RELATIVE_SOURCES_WITH_SLASH)) +_THISDIR_RELATIVE_SOURCES_WITH_SLASH := \ + $(filter-out $(RELATIVE_OUT)/%,$(_RELATIVE_SOURCES_WITH_SLASH)) +MODULE_SOURCES += $(call relative-to-top,$(abspath $(_OUTDIR_RELATIVE_SOURCES_WITH_SLASH))) +MODULE_SOURCES += $(call relative-to-top,$(abspath $(addprefix $(THIS_DIR)/,$(_THISDIR_RELATIVE_SOURCES_WITH_SLASH)))) + +# Add generated sources +MODULE_SOURCES += $(call relative-to-top,$(abspath $(addprefix $(MODULE_OUT)/,$($(THIS_MODULE)_src_relative)))) + +# We want to do this only for pure Android, in which case only +# SUPPORT_ANDROID_PLATFORM will be set to 1. +ifeq ($(SUPPORT_ANDROID_PLATFORM)$(SUPPORT_ARC_PLATFORM),1) + define set-flags-from-package + ifeq ($(1),libdrm) + ifeq ($(PVR_ANDROID_OLD_LIBDRM_HEADER_PATH),1) + $(THIS_MODULE)_includes += \ + $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/include \ + $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj/include/libdrm + endif + else ifeq ($(1),sync) + # Nothing to add in this case + else + $$(warning Unknown package for '$(THIS_MODULE)': $(1)) + $$(error Missing mapping between package and compiler flags) + endif + endef + + $(foreach _package,$($(THIS_MODULE)_packages),\ + $(eval $(call set-flags-from-package,$(_package)))) +else ifeq ($(SUPPORT_NEUTRINO_PLATFORM),) + # pkg-config integration + # We don't support arbitrary CFLAGS yet (just includes) + $(foreach _package,$($(THIS_MODULE)_packages),\ + $(eval MODULE_INCLUDE_FLAGS += `$(PKG_CONFIG) --cflags-only-I $(_package)`)\ + $(eval MODULE_LIBRARY_DIR_FLAGS += `$(PKG_CONFIG) --libs-only-L $(_package)`)) +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/moduledefs_libs.mk b/drivers/mcst/gpu-imgtec/build/linux/moduledefs_libs.mk new file mode 100644 index 000000000000..ed1ab10d9b62 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/moduledefs_libs.mk @@ -0,0 +1,149 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# This can't go in moduledefs_common.mk because it depends on +# MODULE_HOST_BUILD. + +# MODULE_LIBRARY_FLAGS contains the flags to link each library. The rules +# are: +# +# module_staticlibs := mylib +# module_libs := mylib +# Use -lmylib +# +# module_extlibs := mylib +# Use $(libmylib_ldflags) if that variable is defined (empty counts as +# defined). Otherwise use -lmylib +# +# module_libs := :mylib +# Use -l:mylib.so +# +# module_extlibs := :mylib.a +# Use -l:mylib.a, but process *before* libgcc.a is linked. All other +# extlibs are treated as dynamic and linked *after* libgcc.a. + +MODULE_LIBRARY_FLAGS := + +# We want to do this only for pure Android, in which case only +# SUPPORT_ANDROID_PLATFORM will be set to 1. +ifeq ($(SUPPORT_ANDROID_PLATFORM)$(SUPPORT_ARC_PLATFORM),1) + ifneq ($(filter $($(THIS_MODULE)_type),shared_library executable),) + define set-extlibs-from-package + ifeq ($(1),libdrm) + $(THIS_MODULE)_extlibs += drm + else ifeq ($(1),sync) + $(THIS_MODULE)_extlibs += sync + else + $$(warning Unknown package for '$(THIS_MODULE)': $(1)) + $$(error Missing mapping between package and external library) + endif + endef + + $(foreach _package,$($(THIS_MODULE)_packages),\ + $(eval $(call set-extlibs-from-package,$(_package)))) + endif +endif + +MODULE_LIBRARY_FLAGS += \ + $(addprefix -l, $($(THIS_MODULE)_staticlibs)) \ + $(addprefix -l, $(filter :%.a, $($(THIS_MODULE)_extlibs))) \ + $(if $(MODULE_HOST_BUILD),,$(MODULE_LIBGCC)) \ + $(addprefix -l, $(filter-out :%, $($(THIS_MODULE)_libs))) \ + $(addprefix -l, $(addsuffix .so, $(filter :%,$($(THIS_MODULE)_libs)))) \ + $(foreach _lib,$(filter-out :%.a, $($(THIS_MODULE)_extlibs)),$(if $(or $(MODULE_HOST_BUILD),$(filter undefined,$(origin lib$(_lib)_ldflags))),-l$(_lib),$(lib$(_lib)_ldflags))) + +ifneq ($(MODULE_LIBRARY_FLAGS_SUBST),) +$(foreach _s,$(MODULE_LIBRARY_FLAGS_SUBST),$(eval \ + MODULE_LIBRARY_FLAGS := $(patsubst \ + -l$(word 1,$(subst :,$(space),$(_s))),\ + $(word 2,$(subst :,$(space),$(_s))),\ + $(MODULE_LIBRARY_FLAGS)))) +endif + +ifneq ($(SUPPORT_NEUTRINO_PLATFORM),1) + # We don't want to do this for pure Android, in which case only + # SUPPORT_ANDROID_PLATFORM will be set to 1. + ifneq ($(SUPPORT_ANDROID_PLATFORM)$(SUPPORT_ARC_PLATFORM),1) + $(foreach _package,$($(THIS_MODULE)_packages),\ + $(eval MODULE_LIBRARY_FLAGS += `$(PKG_CONFIG) --libs-only-l $(_package)`)) + endif +endif + +ifneq ($(SYSROOT),) + ifneq ($(SYSROOT),/) + ifeq (${MODULE_ARCH_TAG},armhf) + MULTIARCH_DIR := arm-linux-gnueabihf + else ifeq (${MODULE_ARCH_TAG},i686) + MULTIARCH_DIR := i386-linux-gnu + else + MULTIARCH_DIR := ${MODULE_ARCH_TAG}-linux-gnu + endif + + # Restrict pkg-config to looking only in the SYSROOT + # + # Sort paths based on priority. Local paths should always appear first to + # ensure that user built packages override the system versions. Driver paths + # should appear last to ensure shim libraries (if present) get priority. + PKG_CONFIG_LIBDIR := ${SYSROOT}/usr/local/lib/${MULTIARCH_DIR}/pkgconfig + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/local/lib/pkgconfig + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/local/share/pkgconfig + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/lib/${MULTIARCH_DIR}/pkgconfig + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/lib64/pkgconfig + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/lib/pkgconfig + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/share/pkgconfig + PKG_CONFIF_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/lib64/driver/pkgconfig + PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):${SYSROOT}/usr/lib/driver/pkgconfig + + # SYSROOT doesn't always do the right thing. So explicitly add necessary + # paths to the link path + MODULE_LDFLAGS += -Xlinker -rpath-link=${SYSROOT}/usr/local/lib/${MULTIARCH_DIR} + MODULE_LDFLAGS += -Xlinker -rpath-link=${SYSROOT}/lib/${MULTIARCH_DIR} + MODULE_LDFLAGS += -Xlinker -rpath-link=${SYSROOT}/usr/lib/ + MODULE_LDFLAGS += -Xlinker -rpath-link=${SYSROOT}/usr/lib/${MULTIARCH_DIR} + endif +endif + +ifneq ($(MODULE_ARCH_TAG),) + MODULE_LIBRARY_DIR_FLAGS := $(subst _LLVM_ARCH_,$(MODULE_ARCH_TAG),$(MODULE_LIBRARY_DIR_FLAGS)) + MODULE_INCLUDE_FLAGS := $(subst _LLVM_ARCH_,$(MODULE_ARCH_TAG),$(MODULE_INCLUDE_FLAGS)) + + MODULE_LIBRARY_DIR_FLAGS := $(subst _NNVM_ARCH_,$(MODULE_ARCH_TAG),$(MODULE_LIBRARY_DIR_FLAGS)) + MODULE_INCLUDE_FLAGS := $(subst _NNVM_ARCH_,$(MODULE_ARCH_TAG),$(MODULE_INCLUDE_FLAGS)) +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/modules.mk b/drivers/mcst/gpu-imgtec/build/linux/modules.mk new file mode 100644 index 000000000000..18424842fae3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/modules.mk @@ -0,0 +1,48 @@ +########################################################################### ### +#@Title Module processing +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Bits for processing $(modules) after reading in each Linux.mk + +#$(info ---- $(modules) ----) + +$(foreach _m,$(modules),$(if $(filter $(_m),$(ALL_MODULES)),$(error In makefile $(THIS_MAKEFILE): Duplicate module $(_m) (first seen in $(INTERNAL_MAKEFILE_FOR_MODULE_$(_m))) listed in $$(modules)),$(eval $(call register-module,$(_m))))) + +ALL_MODULES += $(modules) diff --git a/drivers/mcst/gpu-imgtec/build/linux/modules/kernel_module.mk b/drivers/mcst/gpu-imgtec/build/linux/modules/kernel_module.mk new file mode 100644 index 000000000000..022232ff9e7b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/modules/kernel_module.mk @@ -0,0 +1,92 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Rules for making kernel modules with kbuild. This makefile doesn't define +# any rules that build the modules, it only copies the kbuild Makefile into +# the right place and then invokes kbuild to do the actual build + +$(call target-build-only,kernel module) + +MODULE_KBUILD_DIR := $(MODULE_OUT)/kbuild + +# $(THIS_MODULE)_makefile names the kbuild makefile fragment used to build +# this module's objects +$(call must-be-nonempty,$(THIS_MODULE)_makefile) +MODULE_KBUILD_MAKEFILE := $($(THIS_MODULE)_makefile) + +# $(THIS_MODULE)_target specifies the name of the kernel module +$(call must-be-nonempty,$(THIS_MODULE)_target) +MODULE_TARGETS := $($(THIS_MODULE)_target) +MODULE_KBUILD_OBJECTS := $($(THIS_MODULE)_target:.ko=.o) + +$(call module-info-line,kernel module: $(MODULE_TARGETS)) + +# Unusually, we define $(THIS_MODULE)_install_path if the user didn't, as we +# can't use MODULE_INSTALL_PATH in the scripts.mk logic. +ifeq ($($(THIS_MODULE)_install_path),) +$(THIS_MODULE)_install_path := \ + $${MOD_DESTDIR}/$(patsubst $(MODULE_OUT)/%,%,$(MODULE_TARGETS)) +endif + +MODULE_INSTALL_PATH := $($(THIS_MODULE)_install_path) + +# Here we could maybe include $(MODULE_KBUILD_MAKEFILE) and look at +# $(MODULE_KBUILD_OBJECTS)-y to see which source files might be built + +.PHONY: $(THIS_MODULE) +$(THIS_MODULE): MODULE_KBUILD_MAKEFILE := $(MODULE_KBUILD_MAKEFILE) +$(THIS_MODULE): MODULE_KBUILD_OBJECTS := $(MODULE_KBUILD_OBJECTS) +$(THIS_MODULE): + @echo "kbuild module '$@'" + @echo " MODULE_KBUILD_MAKEFILE := $(MODULE_KBUILD_MAKEFILE)" + @echo " MODULE_KBUILD_OBJECTS := $(MODULE_KBUILD_OBJECTS)" + @echo ' Being built:' $(if $(filter $@,$(KERNEL_COMPONENTS)),"yes (separate module)",$(if $(filter $@,$(EXTRA_PVRSRVKM_COMPONENTS)),"yes (into pvrsrvkm)","no")) + @echo "Module $@ is a kbuild module. Run 'make kbuild' to make it" + @false + +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_TYPE := $($(THIS_MODULE)_type) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_INSTALL_PATH := $(MODULE_INSTALL_PATH) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_TARGETS := $(patsubst $(MODULE_OUT)/%,%,$(MODULE_TARGETS)) +$(MODULE_INTERMEDIATES_DIR)/.install: $(THIS_MAKEFILE) | $(MODULE_INTERMEDIATES_DIR) + @echo 'install_file $(MODULE_TARGETS) $(MODULE_INSTALL_PATH) "$(MODULE_TYPE)" 0644 0:0' >$@ + +ALL_KBUILD_MODULES += $(THIS_MODULE) +INTERNAL_KBUILD_MAKEFILE_FOR_$(THIS_MODULE) := $(MODULE_KBUILD_MAKEFILE) +INTERNAL_KBUILD_OBJECTS_FOR_$(THIS_MODULE) := $(MODULE_KBUILD_OBJECTS) diff --git a/drivers/mcst/gpu-imgtec/build/linux/mt8173_linux/Makefile b/drivers/mcst/gpu-imgtec/build/linux/mt8173_linux/Makefile new file mode 100644 index 000000000000..d94332616214 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/mt8173_linux/Makefile @@ -0,0 +1,83 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +RGX_BVNC ?= 4.40.2.51 + +include ../config/preconfig.mk +include ../config/window_system.mk + +ifneq ($(SUPPORT_KMS),1) +$(error This platform only supports window systems that use the standard Linux driver model.) +endif + +PVR_SYSTEM := mt8173 +NO_HARDWARE := 0 + +PVR_LDM_PLATFORM_PRE_REGISTERED := 1 +SUPPORT_LINUX_DVFS ?= 1 + +KERNEL_COMPONENTS := srvkm + +ifeq ($(PVR_REMVIEW),1) + DISPLAY_CONTROLLER := drm_nulldisp + KERNEL_COMPONENTS += $(DISPLAY_CONTROLLER) + PVR_DRM_MODESET_DRIVER_NAME := nulldisp +else + PVR_DRM_MODESET_DRIVER_NAME := mediatek + PVR_DRM_MODESET_MODULE_NAME := dumb +endif + +FIX_DUSTS_POW_ON_INIT := 1 + +# Always print fatal and error logs, especially in kernel mode. +PVRSRV_NEED_PVR_DPF := 1 + +RGXFW_POWER_EVENT_DELAY_TICKS := 10 + +# Should be last +include ../config/core.mk +include ../common/lws.mk + +$(eval $(call TunableBothConfigMake,KERNEL_DRIVER_DIR,,\ +The directory inside the Linux kernel tree (relative to KERNELDIR) where_\ +the PVR Services kernel driver files will live. If set$(comma) the PVR_LOADER and_\ +PVR_SYSTEM related files found in this directory will be used instead of_\ +the local copies._\ +)) diff --git a/drivers/mcst/gpu-imgtec/build/linux/nohw_linux/Makefile b/drivers/mcst/gpu-imgtec/build/linux/nohw_linux/Makefile new file mode 100644 index 000000000000..197e56efc36c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/nohw_linux/Makefile @@ -0,0 +1,71 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +include ../config/preconfig.mk +include ../config/window_system.mk + +PVR_SYSTEM := rgx_nohw +NO_HARDWARE := 1 +PDUMP ?= 1 +DISABLE_GPU_FREQUENCY_CALIBRATION ?= 1 + +ifeq ($(SUPPORT_KMS),1) + PVR_DRM_MODESET_DRIVER_NAME := nulldisp + DISPLAY_CONTROLLER := drm_nulldisp +else + ifeq ($(SUPPORT_DISPLAY_CLASS),1) + DISPLAY_CONTROLLER := dc_example + endif +endif + +KERNEL_COMPONENTS := srvkm $(DISPLAY_CONTROLLER) + +ifeq ($(PVR_ARCH),volcanic) + ifeq ($(PVR_BUILD_HMMU),1) + KERNEL_COMPONENTS += pvrhmmu + endif +endif + +HWR_DEFAULT_ENABLED := 0 + +# Should be last +include ../config/core.mk +-include ../common/lws.mk +include ../common/3rdparty.mk diff --git a/drivers/mcst/gpu-imgtec/build/linux/packaging.mk b/drivers/mcst/gpu-imgtec/build/linux/packaging.mk new file mode 100644 index 000000000000..ab2fc074ebe6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/packaging.mk @@ -0,0 +1,130 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +.PHONY: rpm_specs +rpm_specs: ddk_rpm_spec llvm_rpm_spec mesa_rpm_spec + + +# DDK spec file +# +.PHONY: ddk_rpm_spec +ddk_rpm_spec: $(RELATIVE_OUT)/packaging/pvr-rogue-ddk.spec +$(RELATIVE_OUT)/packaging/pvr-rogue-ddk.spec: METAG_VERSION := $(METAG_VERSION_NEEDED) +$(RELATIVE_OUT)/packaging/pvr-rogue-ddk.spec: MIPS_VERSION := $(MIPS_VERSION_NEEDED) +$(RELATIVE_OUT)/packaging/pvr-rogue-ddk.spec: $(MAKE_TOP)/packaging/pvr-rogue-ddk.spec +$(RELATIVE_OUT)/packaging/pvr-rogue-ddk.spec: $(PVRVERSION_H) $(CONFIG_MK) +$(RELATIVE_OUT)/packaging/pvr-rogue-ddk.spec: | $(RELATIVE_OUT)/packaging + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(SED) \ + -e 's,@DDK_VERSION@,$(PVRVERSION_MAJ).$(PVRVERSION_MIN).$(PVRVERSION_BUILD),g' \ + -e 's,@METAG_VERSION@,$(METAG_VERSION),g' \ + -e 's,@MIPS_VERSION@,$(MIPS_VERSION),g' \ + $< > $@ + +$(RELATIVE_OUT)/packaging: + @mkdir -p $@ + + +# LLVM spec file +# +# Generate llvm-img rpm spec file and copy patches referenced in the spec file +# to the same location. +# +LLVM_PATCH_DIR := $(TOP)/compiler/llvmufgen/rogue/patches +LLVM_PATCHES := $(sort $(notdir $(wildcard $(LLVM_PATCH_DIR)/*))) +LLVM_OUT_DIR := $(RELATIVE_OUT)/packaging/llvm-img + +.PHONY: llvm_rpm_spec +llvm_rpm_spec: $(LLVM_OUT_DIR)/llvm-img.spec +$(LLVM_OUT_DIR)/llvm-img.spec: LLVM_PATCH_DIR := $(LLVM_PATCH_DIR) +$(LLVM_OUT_DIR)/llvm-img.spec: $(MAKE_TOP)/packaging/llvm-img.spec +$(LLVM_OUT_DIR)/lolvm-img.spec: $(addprefix $(LLVM_PATCH_DIR)/, $(LLVM_PATCHES)) +$(LLVM_OUT_DIR)/llvm-img.spec: | $(LLVM_OUT_DIR) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(CP) $< $@ + $(CP) $(wildcard $(LLVM_PATCH_DIR)/*) $(dir $@) + +$(LLVM_OUT_DIR): + @mkdir -p $@ + + +# Mesa spec file +# +# Generate mesa-img rpm spec file. This involves generating 'patch' lines +# based upon the patches found in the Mesa patch directory. This is done +# to protect against Mesa patches being added and removed (something that +# happens fairly often). All referenced patches get copied to the location +# of the generated spec file. +# +MESA_PATCH_DIR := $(LWS_GIT_PATCH_DIR)/mesa/mesa-20.0.1 +MESA_PATCHES := $(sort $(notdir $(wildcard $(MESA_PATCH_DIR)/*))) +MESA_OUT_DIR := $(RELATIVE_OUT)/packaging/mesa-img + +.PHONY: mesa_rpm_spec +mesa_rpm_spec: $(MESA_OUT_DIR)/mesa-img.spec +$(MESA_OUT_DIR)/mesa-img.spec: MESA_PATCH_DIR := $(MESA_PATCH_DIR) +$(MESA_OUT_DIR)/mesa-img.spec: MESA_PATCHES := $(MESA_PATCHES) +$(MESA_OUT_DIR)/mesa-img.spec: SUBST_PATCHES_TXT := $(MESA_OUT_DIR)/subst_patches.txt +$(MESA_OUT_DIR)/mesa-img.spec: SUBST_APPLY_PATCHES_TXT := $(MESA_OUT_DIR)/subst_apply_patches.txt +$(MESA_OUT_DIR)/mesa-img.spec: $(MAKE_TOP)/packaging/mesa-img.spec +$(MESA_OUT_DIR)/mesa-img.spec: $(addprefix $(MESA_PATCH_DIR)/, $(MESA_PATCHES)) +$(MESA_OUT_DIR)/mesa-img.spec: | $(MESA_OUT_DIR) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(if $(V),,@)patch_nums=$$(seq -s ' ' 0 $$(expr $$(echo $(MESA_PATCHES) | wc -w) - 1)); \ + echo "# Gbp-Ignore-Patches: $${patch_nums}" > $(SUBST_PATCHES_TXT) + $(if $(V),,@)i=0; for patch in $(MESA_PATCHES); do \ + echo "Patch$${i}: $${patch}" >> $(SUBST_PATCHES_TXT); \ + i=$$(expr $${i} + 1); \ + done + $(if $(V),,@)i=0; for patch in $(MESA_PATCHES); do \ + echo "%patch$${i} -p1" >> $(SUBST_APPLY_PATCHES_TXT); \ + i=$$(expr $${i} + 1); \ + done + $(SED) \ + -e '/@PATCHES@/ {' -e 'r $(SUBST_PATCHES_TXT)' -e 'd' -e '}' \ + -e '/@APPLY_PATCHES@/ {' -e 'r $(SUBST_APPLY_PATCHES_TXT)' -e 'd' -e '}' \ + $< > $@ + $(RM) $(SUBST_PATCHES_TXT) + $(RM) $(SUBST_APPLY_PATCHES_TXT) + $(RM) $(MESA_OUT_DIR)/*.patch + $(CP) $(wildcard $(MESA_PATCH_DIR)/*) $(dir $@) + +$(MESA_OUT_DIR): + @mkdir -p $@ diff --git a/drivers/mcst/gpu-imgtec/build/linux/plato/Makefile b/drivers/mcst/gpu-imgtec/build/linux/plato/Makefile new file mode 100644 index 000000000000..62ceeb0c252f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/plato/Makefile @@ -0,0 +1,184 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +RGX_BVNC ?= 6.34.4.35 + +include ../config/preconfig.mk +include ../config/window_system.mk + +NO_HARDWARE := 0 +KERNEL_COMPONENTS := srvkm + +ifeq ($(PVR_REMVIEW),1) + SUPPORT_PLATO_DISPLAY := 0 +else + SUPPORT_PLATO_DISPLAY ?= 1 +endif + +PLATO_DUAL_CHANNEL_DDR ?= 1 +PDP_DEBUG ?= 0 +HDMI_DEBUG ?= 0 +HDMI_EXPERIMENTAL_CLOCKS ?= 0 +HDMI_PDUMP ?= 0 +EMULATOR ?= 0 +HDMI_FORCE_MODE ?= 0 +PVRSRV_FORCE_UNLOAD_IF_BAD_STATE := 1 +PLATO_PDP_MAX_RELOADS ?= 15 +SUPPORT_PLATO_DMA ?= 0 + +PLATO_DDR_BDLR_TRAINING ?= 1 + +PVR_PMR_TRANSLATE_UMA_ADDRESSES := 1 + +ifeq ($(VIRTUAL_PLATFORM),1) + PLATO_MEMORY_SIZE_GB ?= 2 +else + PLATO_MEMORY_SIZE_GB ?= 8 +endif +PLATO_DC_MEM_SIZE_MB ?= 36 + +ifneq ($(shell expr 32 % $(PLATO_MEMORY_SIZE_GB)),0) + $(error PLATO_MEMORY_SIZE_GB has to be an integer divisor of 32) +endif + +ifeq ($(SUPPORT_KMS),1) + PVR_SYSTEM := rgx_linux_plato + PVR_LDM_PLATFORM_PRE_REGISTERED := 1 + + ifeq ($(SUPPORT_PLATO_DISPLAY),1) + PLATO_MEMORY_CONFIG ?= PLATO_MEMORY_HYBRID + DISPLAY_CONTROLLER ?= drm_pdp + HDMI_CONTROLLER ?= drm_pdp2_hdmi + PVR_DRM_MODESET_DRIVER_NAME ?= pdp + ENABLE_PLATO_HDMI ?= 1 + else + PLATO_MEMORY_CONFIG := PLATO_MEMORY_HYBRID + DISPLAY_CONTROLLER := "drm_nulldisp" + PVR_DRM_MODESET_DRIVER_NAME ?= "nulldisp" + endif + KERNEL_COMPONENTS += plato +else + PVR_SYSTEM := plato + PVR_LOADER := pvr_pci_drv + + PLATO_MEMORY_CONFIG ?= PLATO_MEMORY_HYBRID + + ifeq ($(SUPPORT_DISPLAY_CLASS),1) + ifeq ($(SUPPORT_PLATO_DISPLAY),1) + DISPLAY_CONTROLLER ?= dc_pdp2 + HDMI_CONTROLLER ?= plato_hdmi + ENABLE_PLATO_HDMI ?= 1 + ifeq ($(EMULATOR), 1) + HDMI_PDUMP := 1 + endif + else + DISPLAY_CONTROLLER ?= dc_example + endif + else + SUPPORT_PLATO_DISPLAY ?= 0 + endif +endif + +ifeq ($(PDUMP),1) + PLATO_DISPLAY_PDUMP ?= 0 + PLATO_SYSTEM_PDUMP ?= 0 +endif + +ifeq ($(SUPPORT_GIGACLUSTER),1) + GC_SINGLE_DEVICE_MODE ?= 0 + GC_PERF_OVERLAY ?= 0 + GC_ENABLE_LIBAV ?= 0 + GC_STREAM_X11 ?= 0 + GC_STREAM_GL_PLATO_DEDICATED ?= 1 +endif + +KERNEL_COMPONENTS += $(DISPLAY_CONTROLLER) $(HDMI_CONTROLLER) + +# Should be last +include ../config/core.mk +-include ../common/lws.mk +include ../common/3rdparty.mk + +ifeq ($(PLATO_MEMORY_CONFIG),PLATO_MEMORY_LOCAL) + $(eval $(call BothConfigC,LMA,)) + $(eval $(call KernelConfigMake,LMA,)) +endif + +ifeq ($(SUPPORT_GIGACLUSTER), 1) +$(eval $(call TunableUserConfigC,GC_STREAM_GL_PLATO_DEDICATED,1)) +$(eval $(call TunableUserConfigC,GC_STREAM_X11,1)) +$(eval $(call TunableUserConfigMake,GC_STREAM_X11,1)) +$(eval $(call TunableUserConfigC,GC_ENABLE_LIBAV,1)) +$(eval $(call TunableUserConfigC,GC_PERF_OVERLAY,1)) +endif + +$(eval $(call TunableKernelConfigC,PLATO_MEMORY_SIZE_GIGABYTES, $(PLATO_MEMORY_SIZE_GB)ULL)) +$(eval $(call TunableKernelConfigC,PLATO_MEMORY_CONFIG,$(PLATO_MEMORY_CONFIG))) +$(eval $(call TunableKernelConfigC,PLATO_DDR_BDLR_TRAINING,$(PLATO_DDR_BDLR_TRAINING))) +$(eval $(call TunableKernelConfigC,SUPPORT_PLATO_DISPLAY,$(SUPPORT_PLATO_DISPLAY))) + +$(eval $(call TunableKernelConfigC,SUPPORT_PLATO_DMA,$(SUPPORT_PLATO_DMA))) +$(eval $(call KernelConfigMake,SUPPORT_PLATO_DMA,$(SUPPORT_PLATO_DMA))) + +$(eval $(call TunableKernelConfigC,PLATO_PDP_RELOADS_MAX,$(PLATO_PDP_MAX_RELOADS))) + +$(eval $(call TunableKernelConfigC,VIRTUAL_PLATFORM,)) +$(eval $(call TunableKernelConfigC,EMULATOR,)) +$(eval $(call TunableKernelConfigC,PDP_ARTIFICIAL_KICK,)) +$(eval $(call TunableKernelConfigC,PLATO_DUAL_CHANNEL_DDR,)) +$(eval $(call TunableKernelConfigC,HDMI_FORCE_MODE,$(HDMI_FORCE_MODE))) + +ifneq ($(HDMI_CONTROLLER),) +$(eval $(call BothConfigC,HDMI_CONTROLLER,$(HDMI_CONTROLLER))) +$(eval $(call BothConfigMake,HDMI_CONTROLLER,$(HDMI_CONTROLLER))) +endif +$(eval $(call TunableKernelConfigC,ENABLE_PLATO_HDMI,)) + +$(eval $(call TunableKernelConfigC,PDP_DEBUG,)) +$(eval $(call TunableBothConfigC,HDMI_DEBUG,)) +$(eval $(call TunableBothConfigC,HDMI_EXPERIMENTAL_CLOCKS,)) +$(eval $(call TunableBothConfigC,HDMI_PDUMP,)) +ifeq ($(PDUMP), 1) +$(eval $(call TunableKernelConfigC,PLATO_DISPLAY_PDUMP,)) +$(eval $(call TunableKernelConfigC,PLATO_SYSTEM_PDUMP,)) +endif + +$(eval $(call TunableKernelConfigC,PLATO_DC_MEM_SIZE_MB,)) +$(eval $(call TunableUserConfigC,SUPPORT_VK_PLATO_DUAL_HEAP,1)) diff --git a/drivers/mcst/gpu-imgtec/build/linux/prepare_tree.mk b/drivers/mcst/gpu-imgtec/build/linux/prepare_tree.mk new file mode 100644 index 000000000000..a2c185417697 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/prepare_tree.mk @@ -0,0 +1,56 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +.PHONY: prepare_tree + +prepare_tree: + +INTERNAL_INCLUDED_PREPARE_HEADERS := +-include build/linux/prepare_headers.mk +ifneq ($(INTERNAL_INCLUDED_PREPARE_HEADERS),true) +missing_headers := $(strip $(shell test ! -e include/pvrversion.h && echo true)) +ifdef missing_headers +$(info ) +$(info ** include/pvrversion.h is missing, and cannot be rebuilt.) +$(info ** Cannot continue.) +$(info ) +$(error Missing headers) +endif +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/pvrversion.mk b/drivers/mcst/gpu-imgtec/build/linux/pvrversion.mk new file mode 100644 index 000000000000..3e17da6e9972 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/pvrversion.mk @@ -0,0 +1,57 @@ +########################################################################### ### +#@Title Extract info from pvrversion.h +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Version information +PVRVERSION_H := $(call if-exists,include/pvrversion.h,$(OUT)/include/pvrversion.h) + +ifneq ($(wildcard $(PVRVERSION_H)),) + +# scripts.mk uses these to set the install script's version suffix +PVRVERSION_MAJ := $(shell perl -ne '/\sPVRVERSION_MAJ\s+(\d+)/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_MIN := $(shell perl -ne '/\sPVRVERSION_MIN\s+(\d+)/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_FAMILY := $(shell perl -ne '/\sPVRVERSION_FAMILY\s+"(\S+)"/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_BRANCHNAME := $(shell perl -ne '/\sPVRVERSION_BRANCHNAME\s+"(\S+)"/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_BUILD := $(shell perl -ne '/\sPVRVERSION_BUILD\s+(\d+)/ and print $$1' $(PVRVERSION_H)) + +PVRVERSION_NUM := $(PVRVERSION_MAJ).$(PVRVERSION_MIN).$(PVRVERSION_BUILD) +PVRVERSION := "$(PVRVERSION_FAMILY)_$(PVRVERSION_BRANCHNAME)\@$(PVRVERSION_BUILD)" + +endif diff --git a/drivers/mcst/gpu-imgtec/build/linux/scripts.mk b/drivers/mcst/gpu-imgtec/build/linux/scripts.mk new file mode 100644 index 000000000000..5e28e67fa23c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/scripts.mk @@ -0,0 +1,326 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($(SUPPORT_ANDROID_PLATFORM),) + +define if-kernel-component + ifneq ($$(filter $(1),$$(KERNEL_COMPONENTS)),) + M4DEFS_K += $(2) + endif +endef + +# common.m4 lives here +# +M4FLAGS := -I$(MAKE_TOP)/scripts + +# These defs are required for the init script +M4DEFS_K := \ + -DPVRVERSION="$(PVRVERSION)" \ + -DPVR_BUILD_DIR=$(PVR_BUILD_DIR) \ + -DPVRSRV_MODNAME=$(PVRSRV_MODNAME) \ + -DPVR_SYSTEM=$(PVR_SYSTEM) \ + -DPVRTC_MODNAME=tc \ + -DSUPPORT_NATIVE_FENCE_SYNC=$(SUPPORT_NATIVE_FENCE_SYNC) \ + -DPVRSYNC_MODNAME=$(PVRSYNC_MODNAME) + +# passing the BVNC value via init script is required +# only in the case of a Guest OS running on a VZ setup +ifneq ($(PVRSRV_VZ_NUM_OSID),) + ifneq ($(PVRSRV_VZ_NUM_OSID), 0) + ifneq ($(PVRSRV_VZ_NUM_OSID), 1) + M4DEFS_K += -DRGX_BVNC=$(RGX_BVNC) + endif + endif +endif + +ifneq ($(DISPLAY_CONTROLLER),) + $(eval $(call if-kernel-component,$(DISPLAY_CONTROLLER),\ + -DDISPLAY_CONTROLLER=$(DISPLAY_CONTROLLER))) +endif + +ifneq ($(HDMI_CONTROLLER),) + $(eval $(call if-kernel-component,$(HDMI_CONTROLLER),\ + -DHDMI_CONTROLLER=$(HDMI_CONTROLLER))) +endif + +M4DEFS := \ + -DPDUMP_CLIENT_NAME=$(PDUMP_CLIENT_NAME) + +ifeq ($(WINDOW_SYSTEM),xorg) + M4DEFS += -DSUPPORT_XORG=1 + + M4DEFS += -DPVR_XORG_DESTDIR=$(LWS_PREFIX)/bin + M4DEFS += -DPVR_CONF_DESTDIR=$(XORG_CONFDIR) + +else ifeq ($(WINDOW_SYSTEM),wayland) + M4DEFS += -DPVR_WESTON_DESTDIR=$(LWS_PREFIX)/bin + M4DEFS += -DSUPPORT_WAYLAND=1 +endif + +init_script_install_path := $${RC_DESTDIR} + +$(TARGET_NEUTRAL_OUT)/rc.pvr: $(PVRVERSION_H) $(CONFIG_MK) \ + $(MAKE_TOP)/scripts/rc.pvr.m4 $(MAKE_TOP)/scripts/common.m4 \ + $(MAKE_TOP)/$(PVR_BUILD_DIR)/rc.pvr.m4 \ + | $(TARGET_NEUTRAL_OUT) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(M4) $(M4FLAGS) $(M4DEFS) $(M4DEFS_K) $(MAKE_TOP)/scripts/rc.pvr.m4 \ + $(MAKE_TOP)/$(PVR_BUILD_DIR)/rc.pvr.m4 > $@ + $(CHMOD) +x $@ + +.PHONY: init_script +init_script: $(TARGET_NEUTRAL_OUT)/rc.pvr + +$(GENERATED_CODE_OUT)/init_script: + $(make-directory) + +$(GENERATED_CODE_OUT)/init_script/.install: init_script_install_path := $(init_script_install_path) +$(GENERATED_CODE_OUT)/init_script/.install: | $(GENERATED_CODE_OUT)/init_script + @echo 'install_file rc.pvr $(init_script_install_path)/rc.pvr "boot script" 0755 0:0' >$@ + +# Generate udev rules file +udev_rules_install_path := $${UDEV_DESTDIR} + +$(TARGET_NEUTRAL_OUT)/udev.pvr: $(CONFIG_MK) \ + $(MAKE_TOP)/scripts/udev.pvr.m4 \ + | $(TARGET_NEUTRAL_OUT) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(M4) $(M4FLAGS) $(M4DEFS) $(M4DEFS_K) $(MAKE_TOP)/scripts/udev.pvr.m4 > $@ + $(CHMOD) +x $@ + +.PHONY: udev_rules +udev_rules: $(TARGET_NEUTRAL_OUT)/udev.pvr + +$(GENERATED_CODE_OUT)/udev_rules: + $(make-directory) + +$(GENERATED_CODE_OUT)/udev_rules/.install: udev_rules_install_path := $(udev_rules_install_path) +$(GENERATED_CODE_OUT)/udev_rules/.install: | $(GENERATED_CODE_OUT)/udev_rules + @echo 'install_file udev.pvr $(udev_rules_install_path)/99-pvr.rules "udev rules" 0644 0:0' >$@ + +endif # ifeq ($(SUPPORT_ANDROID_PLATFORM),) + +# This code mimics the way Make processes our implicit/explicit goal list. +# It tries to build up a list of components that were actually built, from +# whence an install script is generated. +# +ifneq ($(MAKECMDGOALS),) +BUILT_UM := $(MAKECMDGOALS) +ifneq ($(filter build services_all components uninstall,$(MAKECMDGOALS)),) +BUILT_UM += $(COMPONENTS) +endif +BUILT_UM := $(sort $(filter $(ALL_MODULES) init_script udev_rules,$(BUILT_UM))) +else +BUILT_UM := $(sort $(COMPONENTS)) +endif + +ifneq ($(MAKECMDGOALS),) +BUILT_FW := $(MAKECMDGOALS) +ifneq ($(filter build services_all firmware uninstall,$(MAKECMDGOALS)),) +BUILT_FW += $(FW_COMPONENTS) +endif +BUILT_FW := $(sort $(filter $(ALL_MODULES),$(BUILT_FW))) +else +BUILT_FW := $(sort $(FW_COMPONENTS)) +endif + +ifneq ($(MAKECMDGOALS),) +BUILT_KM := $(MAKECMDGOALS) +ifneq ($(filter build services_all kbuild uninstall,$(MAKECMDGOALS)),) +BUILT_KM += $(KERNEL_COMPONENTS) +endif +BUILT_KM := $(sort $(filter $(ALL_MODULES),$(BUILT_KM))) +else +BUILT_KM := $(sort $(KERNEL_COMPONENTS)) +endif + +INSTALL_UM_MODULES := \ + $(strip $(foreach _m,$(BUILT_UM),\ + $(if $(filter $(doc_types) module_group,$($(_m)_type)),,\ + $(if $(filter host_%,$($(_m)_arch)),,\ + $(if $($(_m)_install_path),$(_m),\ + $(warning WARNING: UM $(_m)_install_path not defined)))))) + +INSTALL_UM_MODULES := \ + $(sort $(INSTALL_UM_MODULES) \ + $(strip $(foreach _m,$(BUILT_UM),\ + $(if $(filter module_group,$($(_m)_type)),\ + $($(_m)_install_dependencies))))) + +# Build up a list of installable shared libraries. The shared_library module +# type is specially guaranteed to define $(_m)_target, even if the Linux.mk +# itself didn't. The list is formatted with : pairs e.g. +# "moduleA:libmoduleA.so moduleB:libcustom.so" for later processing. +ALL_SHARED_INSTALLABLE := \ + $(sort $(foreach _a,$(ALL_MODULES),\ + $(if $(filter shared_library,$($(_a)_type)),$(_a):$($(_a)_target),))) + +# Handle implicit install dependencies. Executables and shared libraries may +# be linked against other shared libraries. Avoid requiring the user to +# specify the program's binary dependencies explicitly with $(m)_install_extra +INSTALL_UM_MODULES := \ + $(sort $(INSTALL_UM_MODULES) \ + $(foreach _a,$(ALL_SHARED_INSTALLABLE),\ + $(foreach _m,$(INSTALL_UM_MODULES),\ + $(foreach _l,$($(_m)_libs),\ + $(if $(filter lib$(_l).so,$(word 2,$(subst :, ,$(_a)))),\ + $(word 1,$(subst :, ,$(_a)))))))) + +# Add explicit dependencies that must be installed +INSTALL_UM_MODULES := \ + $(sort $(INSTALL_UM_MODULES) \ + $(foreach _m,$(INSTALL_UM_MODULES),\ + $($(_m)_install_dependencies))) + +define calculate-um-fragments +# Work out which modules are required for this arch. +INSTALL_UM_MODULES_$(1) := \ + $$(strip $$(foreach _m,$(INSTALL_UM_MODULES),\ + $$(if $$(filter $(1),$$(INTERNAL_ARCH_LIST_FOR_$$(_m))),$$(_m)))) + +INSTALL_UM_FRAGMENTS_$(1) := $$(foreach _m,$$(INSTALL_UM_MODULES_$(1)),$(RELATIVE_OUT)/$(1)/intermediates/$$(_m)/.install) + +.PHONY: install_um_$(1)_debug +install_um_$(1)_debug: $$(INSTALL_UM_FRAGMENTS_$(1)) + $(CAT) $$^ +endef + +$(foreach _t,$(TARGET_ALL_ARCH) target_neutral,$(eval $(call calculate-um-fragments,$(_t)))) + +INSTALL_FW_FRAGMENTS := \ + $(strip $(foreach _m,$(BUILT_FW),\ + $(if $(filter-out custom,$($(_m)_type)),,\ + $(if $($(_m)_install_path),\ + $(RELATIVE_OUT)/target_neutral/intermediates/$(_m)/.install,)))) + +.PHONY: install_fw_debug +install_fw_debug: $(INSTALL_FW_FRAGMENTS) + $(CAT) $^ + +ifneq ($(filter init_script, $(INSTALL_UM_MODULES)),) + INSTALL_UM_FRAGMENTS_target_neutral += $(GENERATED_CODE_OUT)/init_script/.install +endif + +ifneq ($(filter udev_rules, $(INSTALL_UM_MODULES)),) + INSTALL_UM_FRAGMENTS_target_neutral += $(GENERATED_CODE_OUT)/udev_rules/.install +endif + +INSTALL_KM_FRAGMENTS := \ + $(strip $(foreach _m,$(BUILT_KM),\ + $(if $(filter-out kernel_module,$($(_m)_type)),,\ + $(if $($(_m)_install_path),\ + $(TARGET_PRIMARY_OUT)/intermediates/$(_m)/.install,\ + $(warning WARNING: KM $(_m)_install_path not defined))))) + +.PHONY: install_km_debug +install_km_debug: $(INSTALL_KM_FRAGMENTS) + $(CAT) $^ + +ifneq ($(INSTALL_KM_FRAGMENTS),) +$(TARGET_PRIMARY_OUT)/install_km.sh: $(INSTALL_KM_FRAGMENTS) $(CONFIG_KERNEL_MK) | $(TARGET_PRIMARY_OUT) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(ECHO) KERNELVERSION=$(KERNEL_ID) > $@ + $(ECHO) MOD_DESTDIR=$(patsubst %/,%,$(PVRSRV_MODULE_BASEDIR)) >> $@ +ifeq ($(SUPPORT_ANDROID_PLATFORM),) + $(ECHO) check_module_directory /lib/modules/$(KERNEL_ID) >> $@ +endif + $(CAT) $(INSTALL_KM_FRAGMENTS) >> $@ +install_script_km: $(TARGET_PRIMARY_OUT)/install_km.sh +endif + +# Build UM arch scripts +define create-install-um-script +ifneq ($$(INSTALL_UM_FRAGMENTS_$(1)),) +$(RELATIVE_OUT)/$(1)/install_um.sh: $$(INSTALL_UM_FRAGMENTS_$(1)) $(CONFIG_MK) | $(RELATIVE_OUT)/$(1) + $(if $(V),,@echo " GEN " $$(call relative-to-top,$$@)) + $(CAT) $$(INSTALL_UM_FRAGMENTS_$(1)) > $$@ +install_script: $(RELATIVE_OUT)/$(1)/install_um.sh +endif +endef + +$(foreach _t,$(TARGET_ALL_ARCH) target_neutral,$(eval $(call create-install-um-script,$(_t)))) + +# Build FW neutral script +ifneq ($(INSTALL_FW_FRAGMENTS),) +$(RELATIVE_OUT)/target_neutral/install_fw.sh: $(INSTALL_FW_FRAGMENTS) $(CONFIG_MK) | $(RELATIVE_OUT)/target_neutral + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(CAT) $(INSTALL_FW_FRAGMENTS) > $@ +install_script_fw: $(RELATIVE_OUT)/target_neutral/install_fw.sh +endif + +# Build the top-level install script that drives the install. +ifneq ($(SUPPORT_ANDROID_PLATFORM),) +ifneq ($(SUPPORT_ARC_PLATFORM),) +install_sh_template := $(MAKE_TOP)/scripts/install.sh.tpl +else +install_sh_template := $(MAKE_TOP)/common/android/install.sh.tpl +endif +else +install_sh_template := $(MAKE_TOP)/scripts/install.sh.tpl +endif + +$(RELATIVE_OUT)/install.sh: $(PVRVERSION_H) | $(RELATIVE_OUT) +# In customer packages only one of config.mk or config_kernel.mk will exist. +# We can depend on either one, as long as we rebuild the install script when +# the config options it uses change. +$(RELATIVE_OUT)/install.sh: $(call if-exists,$(CONFIG_MK),$(CONFIG_KERNEL_MK)) +$(RELATIVE_OUT)/install.sh: $(install_sh_template) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(ECHO) 's/\[PVRVERSION\]/$(subst /,\/,$(PVRVERSION))/g;' > $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[PVRBUILD\]/$(BUILD)/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[PRIMARY_ARCH\]/$(TARGET_PRIMARY_ARCH)/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[ARCHITECTURES\]/$(TARGET_ALL_ARCH) target_neutral/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[APP_DESTDIR\]/$(subst /,\/,$(APP_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[BIN_DESTDIR\]/$(subst /,\/,$(BIN_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[SHARE_DESTDIR\]/$(subst /,\/,$(SHARE_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[FW_DESTDIR\]/$(subst /,\/,$(FW_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[SHADER_DESTDIR\]/$(subst /,\/,$(SHADER_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[SHLIB_DESTDIR\]/$(subst /,\/,$(SHLIB_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[INCLUDE_DESTDIR\]/$(subst /,\/,$(INCLUDE_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[TEST_DESTDIR\]/$(subst /,\/,$(TEST_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + @sed -f $(RELATIVE_OUT)/install.sh.sed $< > $@ + $(CHMOD) +x $@ + $(RM) $(RELATIVE_OUT)/install.sh.sed +install_script: $(RELATIVE_OUT)/install.sh +install_script_fw: $(RELATIVE_OUT)/install.sh +install_script_km: $(RELATIVE_OUT)/install.sh + +firmware_install: installfw +components_install: installcomponents diff --git a/drivers/mcst/gpu-imgtec/build/linux/scripts/install.sh.tpl b/drivers/mcst/gpu-imgtec/build/linux/scripts/install.sh.tpl new file mode 100644 index 000000000000..64562f392b0d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/scripts/install.sh.tpl @@ -0,0 +1,795 @@ +#!/bin/bash +############################################################################ ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License MIT +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +#### ########################################################################### +# Help on how to invoke +# +function usage { + echo "usage: $0 [options...]" + echo "" + echo "Options: -v Verbose mode." + echo " -n Dry-run mode." + echo " -u Uninstall-only mode." + echo " --root Use as the root of the install file system." + echo " (Overrides the DISCIMAGE environment variable.)" + echo " -p Pack mode: Don't install anything. Just copy files" + echo " required for installation to ." + echo " (Sets/overrides the PACKAGEDIR environment variable.)" + echo " --nolog Don't produce any logfiles." + echo " --fw firmware binaries." + echo " --km kernel modules only." + echo " --um user mode." + exit 1 +} + +WD="$(pwd)" +SCRIPT_ROOT="$(dirname "$0")" +cd "$SCRIPT_ROOT" + +INSTALL_UM_SH_PRESENT= + +INSTALL_PREFIX="i" +INSTALL_PREFIX_CAP="I" + +# Parse arguments +while [ "$1" ]; do + case "$1" in + -v|--verbose) + VERBOSE=v + ;; + -r|--root) + DISCIMAGE="$2" + shift; + ;; + -u|--uninstall) + UNINSTALL_ONLY=y + INSTALL_PREFIX="uni" + INSTALL_PREFIX_CAP="Uni" + ;; + -n) + DOIT=echo + ;; + -p|--package) + PACKAGEDIR="$2" + if [ "${PACKAGEDIR:0:1}" != '/' ]; then + PACKAGEDIR="$WD/$PACKAGEDIR" + fi + shift; + ;; + --nolog) + DISABLE_LOGGING=1 + ;; + --fw) + INCLUDE_INDIVIDUAL_MODULE=y + INCLUDE_FW=y + ;; + --km) + INCLUDE_INDIVIDUAL_MODULE=y + INCLUDE_KM=y + ;; + --um) + INCLUDE_INDIVIDUAL_MODULE=y + INCLUDE_COMPONENTS=y + ;; + -h | --help | *) + usage + exit 0 + ;; + esac + shift +done + +PVRVERSION=[PVRVERSION] +PVRBUILD=[PVRBUILD] +PRIMARY_ARCH="[PRIMARY_ARCH]" +ARCHITECTURES=([ARCHITECTURES]) +SHLIB_DESTDIR_DEFAULT=[SHLIB_DESTDIR] + +BIN_DESTDIR_DEFAULT=[BIN_DESTDIR] +SHARE_DESTDIR_DEFAULT=[SHARE_DESTDIR] +SHADER_DESTDIR_DEFAULT=[SHADER_DESTDIR] +FW_DESTDIR_DEFAULT=[FW_DESTDIR] +INCLUDE_DESTDIR_DEFAULT=[INCLUDE_DESTDIR] + +RC_DESTDIR=/etc/init.d +UDEV_DESTDIR=/etc/udev/rules.d + +LOG_DIR=/etc + +if [ ${#ARCHITECTURES[@]} -le 2 ]; then + INSTALLING_SINGLELIB=true +else + INSTALLING_SINGLELIB=false +fi + +# Exit with an error messages. +# $1=blurb +# +function bail() { + if [ ! -z "$1" ]; then + echo "$1" >&2 + fi + + echo "" >&2 + echo "${INSTALL_PREFIX_CAP}nstallation failed" >&2 + exit 1 +} + +# Copy the files that we are going to install into $PACKAGEDIR +function copy_files_locally() { + # Create versions of the installation functions that just copy files to a useful place. + function check_module_directory() { true; } + function uninstall() { true; } + function link_library() { true; } + function do_link() { true; } + function symlink_library_if_not_present() { true; } + + # basic installation function + # $1=fromfile, $4=chmod-flags + # plus other stuff that we aren't interested in. + function install_file() { + if [ -f "$1" ]; then + $DOIT cp "$1" "$PACKAGEDIR/$THIS_ARCH" + $DOIT chmod "$4" "$PACKAGEDIR/$THIS_ARCH/$1" + fi + } + + # Tree-based installation function + # $1=fromdir $2=destdir + # plus other stuff that we aren't interested in. + function install_tree() { + mkdir -p "$(dirname "$PACKAGEDIR/$THIS_ARCH/$2")" + if [ -d "$1" ]; then + cp -Rf "$1" "$PACKAGEDIR/$THIS_ARCH/$2" + fi + } + + echo "Copying files to $PACKAGEDIR." + + if [ -d "$PACKAGEDIR" ]; then + rm -Rf "$PACKAGEDIR" + fi + mkdir -p "$PACKAGEDIR" + + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_COMPONENTS" == "y" ]; then + for THIS_ARCH in "${ARCHITECTURES[@]}"; do + if [ ! -d "$THIS_ARCH" ]; then + continue + fi + + mkdir -p "$PACKAGEDIR/$THIS_ARCH" + pushd "$THIS_ARCH" > /dev/null + if [ -f install_um.sh ]; then + source install_um.sh + install_file install_um.sh x x 0644 + fi + popd > /dev/null + done + fi + + THIS_ARCH="target_neutral" + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_FW" == "y" ]; then + if [ -d "$THIS_ARCH" ]; then + mkdir -p "$PACKAGEDIR/$THIS_ARCH" + pushd "$THIS_ARCH" > /dev/null + if [ -f install_fw.sh ]; then + source install_fw.sh + install_file install_fw.sh x x 0644 + install_file rgxfw_debug.zip x x 0644 + fi + popd > /dev/null + fi + fi + + THIS_ARCH="$PRIMARY_ARCH" + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_KM" == "y" ]; then + if [ -d "$THIS_ARCH" ]; then + mkdir -p "$PACKAGEDIR/$THIS_ARCH" + pushd "$THIS_ARCH" > /dev/null + if [ -f install_km.sh ]; then + source install_km.sh + install_file install_km.sh x x 0644 + fi + popd > /dev/null + fi + fi + + unset THIS_ARCH + install_file install.sh x x 0755 +} + +# Install the files on the remote machine using SSH +# We do this by: +# - Copying the required files to a place on the local disk +# - rsync these files to the remote machine +# - run the install via SSH on the remote machine +function install_via_ssh() { + # Default to port 22 (SSH) if not otherwise specified + if [ -z "$INSTALL_TARGET_PORT" ]; then + INSTALL_TARGET_PORT=22 + fi + + # Execute something on the target machine via SSH + # $1 The command to execute + function remote_execute() { + ssh -p "$INSTALL_TARGET_PORT" -q -o "BatchMode=yes" root@"$INSTALL_TARGET" "$@" + } + + if ! remote_execute "test 1"; then + echo "Can't access $INSTALL_TARGET via ssh (on port $INSTALL_TARGET_PORT)." + echo "Have you installed your public key into root@$INSTALL_TARGET:~/.ssh/authorized_keys?" + echo "If root has a password on the target system, you can do so by executing:" + echo " ssh-copy-id root@$INSTALL_TARGET" + bail + fi + + # Create a directory to contain all the files we are going to install. + PACKAGEDIR_PREFIX="$(mktemp -d)" || bail "Couldn't create local temporary directory" + PACKAGEDIR="$PACKAGEDIR_PREFIX"/Rogue_DDK_Install_Root + PACKAGEDIR_REMOTE=/tmp/Rogue_DDK_Install_Root + copy_files_locally + + echo "RSyncing $PACKAGEDIR to $INSTALL_TARGET:$INSTALL_TARGET_PORT." + $DOIT rsync -crlpt -e "ssh -p \"$INSTALL_TARGET_PORT\"" --delete "$PACKAGEDIR"/ root@"$INSTALL_TARGET":"$PACKAGEDIR_REMOTE" || bail "Couldn't rsync $PACKAGEDIR to root@$INSTALL_TARGET" + echo "Running ${INSTALL_PREFIX}nstall remotely." + + REMOTE_COMMAND="bash $PACKAGEDIR_REMOTE/install.sh -r /" + + if [ "$UNINSTALL_ONLY" == "y" ]; then + REMOTE_COMMAND="$REMOTE_COMMAND -u" + fi + + remote_execute "$REMOTE_COMMAND" || bail "Couldn't execute install remotely." + rm -Rf "$PACKAGEDIR_PREFIX" +} + +# Copy all the required files into their appropriate places on the local machine. +function install_locally { + # Define functions required for local installs + + # Check that the appropriate kernel module directory is there + # $1 the module directory we are looking for + # + function check_module_directory { + MODULEDIR="$1" + if [ ! -d "${DISCIMAGE}${MODULEDIR}" ]; then + echo + echo "Can't find ${MODULEDIR} in the target file system." + echo + echo "If you are using a custom kernel, you probably need to install the kernel" + echo "modules." + echo "You can do so by executing the following:" + echo " \$ cd \$KERNELDIR" + echo " \$ make [ INSTALL_MOD_PATH=\$DISCIMAGE ] modules_install" + echo "(You need to set INSTALL_MOD_PATH if your build machine is not your target" + echo "machine.)" + echo + echo "If you are not using a custom kernel, ensure you KERNELDIR identifies the" + echo "correct kernel headers. E.g., if you are building on your target machine:" + echo " \$ export KERNELDIR=/usr/src/linux-headers-\`uname -r\`" + echo " \$ make [ ... ] kbuild" + bail + fi + + if [ -d "${DISCIMAGE}${MODULEDIR}/kernel/drivers/gpu/drm/img-rogue" ]; then + echo + echo "It looks like ${MODULEDIR} in the target file system contains prebuilt versions" + echo "of rogue drivers. You'll need to remove these before installing locally-built" + echo "versions. To do so, run the following on the target system:" + echo " \$ sudo rm -Rf ${MODULEDIR}/kernel/drivers/gpu/drm/img-rogue" + echo "then reboot." + bail + fi + } + + function setup_libdir_for_arch { + local libdir="$1" + if $INSTALLING_SINGLELIB; then + if [ -d "${DISCIMAGE}$libdir" ]; then + SHLIB_DESTDIR="$libdir" + else + SHLIB_DESTDIR="${SHLIB_DESTDIR_DEFAULT}" + fi + else + if [ ! -d "${DISCIMAGE}$libdir" ]; then + bail "Library directory $libdir for architecture $arch does not exist." + fi + SHLIB_DESTDIR="$libdir" + fi + } + + function setup_bindir_for_arch { + if [ "$arch" = "${PRIMARY_ARCH}" ]; then + BIN_DESTDIR="${BIN_DESTDIR_DEFAULT}" + else + BIN_DESTDIR="$1" + fi + } + + function setup_dirs { + case "$1" in + 'target_x86_64') + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/x86_64-linux-gnu" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}64" + ;; + 'target_i686') + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/i386-linux-gnu" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}32" + ;; + 'target_armel' | 'target_armv7-a') + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/arm-linux-gnueabi" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}32" + echo "$SHLIB_DESTDIR" + ;; + 'target_armhf') + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/arm-linux-gnueabihf" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}32" + echo "$SHLIB_DESTDIR" + ;; + 'target_aarch64') + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/aarch64-linux-gnu" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}64" + echo "$SHLIB_DESTDIR" + ;; + target_mips32*) + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/mips-linux-gnu" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}32" + echo "$SHLIB_DESTDIR" + ;; + 'target_mips64r6el') + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/mips64-linux-gnu" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}64" + echo "$SHLIB_DESTDIR" + ;; + 'target_riscv64') + setup_libdir_for_arch "${SHLIB_DESTDIR_DEFAULT}/riscv64-linux-gnu" + setup_bindir_for_arch "${BIN_DESTDIR_DEFAULT}64" + echo "$SHLIB_DESTDIR" + ;; + 'target_neutral' | '') + unset SHLIB_DESTDIR + unset BIN_DESTDIR + unset EGL_DESTDIR + INCLUDE_DESTDIR="${INCLUDE_DESTDIR_DEFAULT}" + SHARE_DESTDIR="${SHARE_DESTDIR_DEFAULT}" + SHADER_DESTDIR="${SHADER_DESTDIR_DEFAULT}" + DATA_DESTDIR="${BIN_DESTDIR_DEFAULT}" + FW_DESTDIR="${FW_DESTDIR_DEFAULT}" + return + ;; + *) + bail "Unknown architecture $1" + ;; + esac + + EGL_DESTDIR="${SHLIB_DESTDIR}" + } + + # basic installation function + # $1=fromfile, $2=destfilename, $3=blurb, $4=chmod-flags, $5=chown-flags + # + function install_file { + if [ -z "$DDK_INSTALL_LOG" ]; then + bail "INTERNAL ERROR: Invoking install without setting logfile name" + fi + DESTFILE="${DISCIMAGE}$2" + DESTDIR="$(dirname "$DESTFILE")" + + if [ ! -e "$1" ]; then + [ -n "$VERBOSE" ] && echo "skipping file $1 -> $2" + return + fi + + # Destination directory - make sure it's there and writable + # + if [ -d "${DESTDIR}" ]; then + if [ ! -w "${DESTDIR}" ]; then + bail "${DESTDIR} is not writable." + fi + else + $DOIT mkdir -p "${DESTDIR}" || bail "Couldn't mkdir -p ${DESTDIR}" + [ -n "$VERBOSE" ] && echo "Created directory $(dirname "$2")" + fi + + # Delete the original so that permissions don't persist. + # + $DOIT rm -f "$DESTFILE" + + $DOIT cp -f "$1" "$DESTFILE" || bail "Couldn't copy $1 to $DESTFILE" + $DOIT chmod "$4" "${DESTFILE}" + $DOIT chown "$5" "${DESTFILE}" + + echo "$3 $(basename "$1") -> $2" + $DOIT echo "file $2" >> "$DDK_INSTALL_LOG" + } + + + function do_link { + local DESTDIR="$1" + local FILENAME="$2" + local LINKNAME="$3" + pushd "${DISCIMAGE}/$DESTDIR" > /dev/null + # Delete the original so that permissions don't persist. + $DOIT ln -sf "$FILENAME" "$LINKNAME" || bail "Couldn't link $FILENAME to $LINKNAME" + $DOIT echo "link $DESTDIR/$LINKNAME" >> "$DDK_INSTALL_LOG" + [ -n "$VERBOSE" ] && echo " linked $LINKNAME -> $FILENAME" + popd > /dev/null + } + + # Create the relevant links for the given library + # ldconfig will do this too. + function link_library { + if [ -z "$DDK_INSTALL_LOG" ]; then + bail "INTERNAL ERROR: Invoking install without setting logfile name" + fi + + local TARGETFILE="$(basename "$1")" + local DESTDIR="$(dirname "$1")" + + if [ ! -e "${DISCIMAGE}/${DESTDIR}/${TARGETFILE}" ]; then + [ -n "$VERBOSE" ] && echo "Can't link ${DISCIMAGE}${DESTDIR}/${TARGETFILE} as it doesn't exist." + return + fi + + local SONAME="$(objdump -p "${DISCIMAGE}/${DESTDIR}/$TARGETFILE" | grep SONAME | awk '{print $2}')" + + if [ -n "$SONAME" ]; then + do_link "$DESTDIR" "$TARGETFILE" "$SONAME" + fi + + local BASENAME="$(expr match "$TARGETFILE" '\(.\+\.so\)')" + + if [ "$BASENAME" != "$TARGETFILE" ]; then + do_link "$DESTDIR" "$TARGETFILE" "$BASENAME" + fi + } + + function symlink_library_if_not_present { + local DESTDIR="$1" + local LIBNAME="$2" + local DESTFILE="$3" + + # Only make a symlink if the file doesn't exist + if [ ! -e "${DISCIMAGE}/${DESTDIR}/${DESTFILE}" ]; then + do_link "$DESTDIR" "$LIBNAME" "$DESTFILE" + echo "symlink ${LIBNAME} -> ${DESTFILE}" + fi + } + + + # Tree-based installation function + # $1 = fromdir $2=destdir $3=blurb + # + function install_tree { + if [ -z "$DDK_INSTALL_LOG" ]; then + bail "INTERNAL ERROR: Invoking install without setting logfile name" + fi + + # Make the destination directory if it's not there + # + if [ ! -d "${DISCIMAGE}$2" ]; then + $DOIT mkdir -p "${DISCIMAGE}$2" || bail "Couldn't mkdir -p ${DISCIMAGE}$2" + fi + if [ -n "$DOIT" ]; then + printf "### tar -C %q -cf - . | tar -C %q -xm%q -\n" "$1" "${DISCIMAGE}$2" "${VERBOSE}f" + else + tar -C "$1" -cf - . | tar -C "${DISCIMAGE}$2" -xm"${VERBOSE}"f - + fi + if [ $? -eq 0 ]; then + echo "Installed $3 in ${DISCIMAGE}$2" + find "$1" -type f -printf "%P\n" | while read -r INSTALL_FILE; do + $DOIT echo "file $2/$INSTALL_FILE" >> "$DDK_INSTALL_LOG" + done + find "$1" -type l -printf "%P\n" | while read -r INSTALL_LINK; do + $DOIT echo "link $2/$INSTALL_LINK" >> "$DDK_INSTALL_LOG" + done + else + echo "Failed copying $3 from $1 to ${DISCIMAGE}$2" + fi + } + + mkdir -p "$DISCIMAGE/$LOG_DIR" + + # Install UM components + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_COMPONENTS" == "y" ]; then + for arch in "${ARCHITECTURES[@]}"; do + if [ ! -d "$arch" ]; then + continue + fi + + pushd "$arch" > /dev/null + if [ -f install_um.sh ]; then + setup_dirs "$arch" + DDK_INSTALL_LOG="$UMLOG" + echo "Installing user components for architecture $arch" + if [ -z "$FIRST_TIME" ] ; then + $DOIT echo "version $PVRVERSION" > "$DDK_INSTALL_LOG" + fi + FIRST_TIME=1 + source install_um.sh + echo + setup_dirs + fi + popd > /dev/null + done + fi + + # Install FW components + THIS_ARCH="target_neutral" + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_FW" == "y" ]; then + if [ -d "$THIS_ARCH" ]; then + pushd "$THIS_ARCH" > /dev/null + if [ -f install_fw.sh ]; then + setup_dirs "$THIS_ARCH" + DDK_INSTALL_LOG="$FWLOG" + echo "Installing firmware components for architecture $THIS_ARCH" + $DOIT echo "version $PVRVERSION" > "$DDK_INSTALL_LOG" + source install_fw.sh + echo + setup_dirs + fi + popd > /dev/null + fi + fi + + # Install KM components + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_KM" == "y" ]; then + if [ -d "$PRIMARY_ARCH" ]; then + pushd "$PRIMARY_ARCH" > /dev/null + if [ -f install_km.sh ]; then + DDK_INSTALL_LOG="$KMLOG" + echo "Installing kernel components for architecture $PRIMARY_ARCH" + $DOIT echo "version $PVRVERSION" > "$DDK_INSTALL_LOG" + source install_km.sh + echo + fi + popd > /dev/null + fi + fi + + if [ -f $UMLOG ] || [ -f $FWLOG ]; then + # Create an OLDUMLOG so old versions of the driver can uninstall UM + FW. + $DOIT echo "version $PVRVERSION" > $OLDUMLOG + if [ -f $UMLOG ]; then + # skip the first line which is DDK version information + tail -n +2 $UMLOG >> $OLDUMLOG + echo "file $UMLOG" >> $OLDUMLOG + fi + if [ -f $FWLOG ]; then + tail -n +2 $FWLOG >> $OLDUMLOG + echo "file $FWLOG" >> $OLDUMLOG + fi + fi +} + +# Read the appropriate install log and delete anything therein. +function uninstall_locally { + # Function to uninstall something. + function do_uninstall { + LOG="$1" + + if [ ! -f "$LOG" ]; then + echo "Nothing to un-install." + return; + fi + + BAD=false + VERSION="" + LOG_UNRECOGNISED_TYPE=false + while read -r type data; do + case "$type" in + version) + echo "Uninstalling existing version $data" + VERSION="$data" + ;; + link|file|icdconf) + if [ -z "$VERSION" ]; then + BAD=true; + echo "No version record at head of $LOG" + elif ! $DOIT rm -f "${DISCIMAGE}${data}"; then + BAD=true; + else + [ -n "$VERBOSE" ] && echo "Deleted $type $data" + fi + ;; + *) + if ! $LOG_UNRECOGNISED_TYPE ; then + # Only report the first unrecognised type + LOG_UNRECOGNISED_TYPE=true + echo "Found unrecognised type '$type' in $LOG" + fi + ;; + esac + done < "$1"; + + if ! $BAD ; then + echo "Uninstallation completed." + $DOIT rm -f "$LOG" + else + echo "Uninstallation failed!!!" + fi + } + + + if [ -z "$OLDUMLOG" ] || [ -z "$KMLOG" ] || [ -z "$UMLOG" ] || [ -z "$FWLOG" ]; then + bail "INTERNAL ERROR: Invoking uninstall without setting logfile name" + fi + + # Check if last install was using legacy UM log (combined FW and UM components) + DO_LEGACY_UM_UNINSTALL= + if [ -f $OLDUMLOG ] && [ ! -f $UMLOG ] && [ ! -f $FWLOG ]; then + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ]; then + DO_LEGACY_UM_UNINSTALL=1 + elif [ "$INCLUDE_FW" == "y" ] && [ "$INCLUDE_COMPONENTS" == "y" ]; then + DO_LEGACY_UM_UNINSTALL=1 + elif [ "$INCLUDE_FW" != "$INCLUDE_COMPONENTS" ]; then + if [ "$INCLUDE_FW" == "y" ]; then + echo "Previous driver installation doesn't support ${INSTALL_PREFIX}nstalling" + echo "firmware components separately to user components." + bail + else + echo "Previous driver installation doesn't support ${INSTALL_PREFIX}nstalling" + echo "user components separately to firmware components." + bail + fi + fi + fi + + # Uninstall KM components if we are doing a KM install. + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_KM" == "y" ]; then + if [ -f "${PRIMARY_ARCH}"/install_km.sh ] && [ -f "$KMLOG" ]; then + echo "Uninstalling kernel components" + do_uninstall "$KMLOG" + echo + fi + fi + + if [ -n "$DO_LEGACY_UM_UNINSTALL" ]; then + # Uninstall FW and UM components if we are doing a FW+UM install. + if [ -n "$INSTALL_UM_SH_PRESENT" ]; then + echo "Uninstalling all (firmware + user mode) components from legacy log." + do_uninstall "$OLDUMLOG" + echo + fi + else + # Uninstall FW binaries if we are doing a FW install. + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_FW" == "y" ]; then + if [ -f target_neutral/install_fw.sh ] && [ -f "$FWLOG" ]; then + echo "Uninstalling firmware components" + if [ -f "$UMLOG" ]; then + # Update legacy UM log + cp "$UMLOG" "$OLDUMLOG" + echo "file $UMLOG" >> $OLDUMLOG + elif [ -f $OLDUMLOG ]; then + rm "$OLDUMLOG" + fi + do_uninstall "$FWLOG" + echo + fi + fi + + # Uninstall UM components if we are doing a UM install. + if [ "$INCLUDE_INDIVIDUAL_MODULE" != "y" ] || [ "$INCLUDE_COMPONENTS" == "y" ]; then + if [ -n "$INSTALL_UM_SH_PRESENT" ] && [ -f "$UMLOG" ]; then + echo "Uninstalling user components" + if [ -f "$FWLOG" ]; then + # Update legacy UM log + cp "$FWLOG" "$OLDUMLOG" + echo "file $FWLOG" >> $OLDUMLOG + elif [ -f $OLDUMLOG ]; then + rm "$OLDUMLOG" + fi + do_uninstall "$UMLOG" + echo + fi + fi + fi +} + +for i in "${ARCHITECTURES[@]}"; do + if [ -f "$i"/install_um.sh ]; then + INSTALL_UM_SH_PRESENT=1 + fi +done + +if [ "$INCLUDE_COMPONENTS" == "y" ] && [ ! -n "$INSTALL_UM_SH_PRESENT" ]; then + bail "Cannot ${INSTALL_PREFIX}nstall user components only (install_um.sh missing)." +fi + +if [ "$INCLUDE_FW" == "y" ] && [ ! -f target_neutral/install_fw.sh ]; then + bail "Cannot ${INSTALL_PREFIX}nstall firmware components only (install_fw.sh missing)." +fi + +if [ "$INCLUDE_KM" == "y" ] && [ ! -f "${PRIMARY_ARCH}"/install_km.sh ]; then + bail "Cannot ${INSTALL_PREFIX}nstall kernel components only (install_km.sh missing)." +fi + +if [ ! -z "$PACKAGEDIR" ]; then + copy_files_locally "$PACKAGEDIR" + echo "Copy complete!" + +elif [ ! -z "$INSTALL_TARGET" ]; then + echo "${INSTALL_PREFIX_CAP}nstalling using SSH/rsync on target $INSTALL_TARGET" + echo + + install_via_ssh + +elif [ ! -z "$DISCIMAGE" ]; then + + if [ ! -d "$DISCIMAGE" ]; then + bail "$0: $DISCIMAGE does not exist." + fi + + echo + echo "File system root is $DISCIMAGE" + echo + + if [ -z "${DISABLE_LOGGING}" ]; then + KMLOG="$DISCIMAGE/$LOG_DIR/powervr_ddk_install_km.log" + OLDUMLOG="$DISCIMAGE/$LOG_DIR/powervr_ddk_install_um.log" + UMLOG="$DISCIMAGE/$LOG_DIR/powervr_ddk_install_components.log" + FWLOG="$DISCIMAGE/$LOG_DIR/powervr_ddk_install_fw.log" + + # Can't do uninstall unless we are doing logging + uninstall_locally + else + OLDUMLOG=/dev/null + KMLOG=/dev/null + UMLOG=/dev/null + FWLOG=/dev/null + fi + + if [ "$UNINSTALL_ONLY" != "y" ]; then + if [ "$DISCIMAGE" == "/" ]; then + echo "Installing PowerVR '$PVRVERSION ($PVRBUILD)' locally" + else + echo "Installing PowerVR '$PVRVERSION ($PVRBUILD)' on $DISCIMAGE" + fi + echo + + install_locally + fi + + if [ "$DISCIMAGE" == "/" ]; then + # If we've installed kernel modules, then KERNELVERSION will have been set + # by install_km.sh + if [ -n "$KERNELVERSION" ] && [ "$(uname -r)" == "$KERNELVERSION" ]; then + echo "Running depmod" + depmod + fi + echo "Running ldconfig" + ldconfig + # Ensure installed files get written to disk + sync + echo "${INSTALL_PREFIX_CAP}nstallation complete!" + else + # Ensure installed files get written to disk + sync + echo "To complete ${INSTALL_PREFIX}nstall, please run the following on the target system:" + echo "$ depmod" + echo "$ ldconfig" + fi + +else + bail "INSTALL_TARGET or DISCIMAGE must be set for ${INSTALL_PREFIX}nstallation to be possible." +fi diff --git a/drivers/mcst/gpu-imgtec/build/linux/tc_linux/Makefile b/drivers/mcst/gpu-imgtec/build/linux/tc_linux/Makefile new file mode 100644 index 000000000000..794803592614 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/tc_linux/Makefile @@ -0,0 +1,109 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +RGX_BVNC ?= 1.82.4.5 + +include ../config/preconfig.mk +include ../config/window_system.mk + +NO_HARDWARE := 0 +KERNEL_COMPONENTS := srvkm + +ifeq ($(SUPPORT_KMS),1) + PVR_SYSTEM := rgx_linux_tc + TC_DISPLAY_MEM_SIZE ?= 383 + ifeq ($(PVR_REMVIEW),1) + DISPLAY_CONTROLLER := "drm_nulldisp" + PVR_DRM_MODESET_DRIVER_NAME := "nulldisp" + else + DISPLAY_CONTROLLER ?= drm_pdp + PVR_DRM_MODESET_DRIVER_NAME ?= pdp + endif +else + ifeq ($(PVR_ARCH),volcanic) + PVR_SYSTEM := rgx_linux_tc + TC_DISPLAY_MEM_SIZE ?= 383 + DISPLAY_CONTROLLER ?= dc_example + else + PVR_SYSTEM ?= rgx_tc + + ifeq ($(SUPPORT_DISPLAY_CLASS),1) + TC_DISPLAY_MEM_SIZE ?= 32 + ifeq ($(TC_MEMORY_CONFIG),TC_MEMORY_HOST) + DISPLAY_CONTROLLER ?= dc_example + else + DISPLAY_CONTROLLER ?= dc_pdp + endif + endif + endif +endif + +ifeq ($(PVR_ARCH),volcanic) + PVRSRV_APPHINT_FABRICCOHERENCYOVERRIDE := 0 + + ifeq ($(PVR_BUILD_HMMU),1) + KERNEL_COMPONENTS += pvrhmmu + endif +endif + +ifeq ($(PVR_SYSTEM),rgx_linux_tc) + TC_MEMORY_CONFIG ?= TC_MEMORY_LOCAL + PVR_LDM_PLATFORM_PRE_REGISTERED := 1 + KERNEL_COMPONENTS += tc +else ifeq ($(PVR_SYSTEM),rgx_tc) + TC_MEMORY_CONFIG ?= TC_MEMORY_HYBRID + PVR_LOADER := pvr_pci_drv +endif + +ifeq ($(DISPLAY_CONTROLLER),) + # No display driver support so set to 0 + # + TC_DISPLAY_MEM_SIZE := 0 +else + KERNEL_COMPONENTS += $(DISPLAY_CONTROLLER) +endif + +# Should be last +include ../config/core.mk +-include ../common/lws.mk +include ../common/3rdparty.mk +include ../common/testchip.mk + +$(eval $(call TunableKernelConfigC,VIRTUAL_PLATFORM,)) diff --git a/drivers/mcst/gpu-imgtec/build/linux/this_makefile.mk b/drivers/mcst/gpu-imgtec/build/linux/this_makefile.mk new file mode 100644 index 000000000000..17abc71877f6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/this_makefile.mk @@ -0,0 +1,68 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Find out the path of the Linux.mk makefile currently being processed, and +# set paths used by the build rules + +# This magic is used so we can use this_makefile.mk twice: first when reading +# in each Linux.mk, and then again when generating rules. There we set +# $(THIS_MAKEFILE), and $(REMAINING_MAKEFILES) should be empty +ifneq ($(strip $(REMAINING_MAKEFILES)),) + +# Absolute path to the Linux.mk being processed +THIS_MAKEFILE := $(firstword $(REMAINING_MAKEFILES)) + +# The list of makefiles left to process +REMAINING_MAKEFILES := $(wordlist 2,$(words $(REMAINING_MAKEFILES)),$(REMAINING_MAKEFILES)) + +else + +# When generating rules, we should have read in every Linux.mk +$(if $(INTERNAL_INCLUDED_ALL_MAKEFILES),,$(error No makefiles left in $$(REMAINING_MAKEFILES), but $$(INTERNAL_INCLUDED_ALL_MAKEFILES) is not set)) + +endif + +# Path to the directory containing Linux.mk +THIS_DIR := $(patsubst %/,%,$(dir $(THIS_MAKEFILE))) +ifeq ($(strip $(THIS_DIR)),) +$(error Empty $$(THIS_DIR) for makefile "$(THIS_MAKEFILE)") +endif + +modules := diff --git a/drivers/mcst/gpu-imgtec/build/linux/tools/cc-check.sh b/drivers/mcst/gpu-imgtec/build/linux/tools/cc-check.sh new file mode 100644 index 000000000000..cdc037ac070f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/tools/cc-check.sh @@ -0,0 +1,120 @@ +#!/bin/sh +########################################################################### ### +#@File +#@Title Test the nature of the C compiler. +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +LANG=C +export LANG + +usage() { + echo "usage: $0 [--64] [--clang] --cc CC [--out OUT] [cflag]" + exit 1 +} + +check_clang() { + $CC -Wp,-dM -E - /dev/null 2>&1 + if [ "$?" = "0" ]; then + # Clang must be passed a program with a main() that returns 0. + # It will produce an error if main() is improperly specified. + IS_CLANG=1 + TEST_PROGRAM="int main(void){return 0;}" + else + # If we're not clang, assume we're GCC. GCC needs to be passed + # a program with a faulty return in main() so that another + # warning (unrelated to the flag being tested) is emitted. + # This will cause GCC to warn about the unsupported warning flag. + IS_CLANG=0 + TEST_PROGRAM="int main(void){return;}" + fi +} + +do_cc() { + echo "$TEST_PROGRAM" 2> /dev/null | $CC -W -Wall $3 -xc -c - -o $1 >$2 2>&1 +} + +while [ 1 ]; do + if [ "$1" = "--64" ]; then + [ -z $CLANG ] && BIT_CHECK=1 + elif [ "$1" = "--clang" ]; then + [ -z $BIT_CHECK ] && CLANG=1 + elif [ "$1" = "--cc" ]; then + [ "x$2" = "x" ] && usage + CC="$2" && shift + elif [ "$1" = "--out" ]; then + [ "x$2" = "x" ] && usage + OUT="$2" && shift + elif [ "${1#--}" != "$1" ]; then + usage + else + break + fi + shift +done + +[ "x$CC" = "x" ] && usage +[ "x$CLANG" = "x" -a "x$OUT" = "x" ] && usage +ccof=$OUT/cc-sanity-check +log=${ccof}.log + +check_clang + +if [ "x$BIT_CHECK" = "x1" ]; then + do_cc $ccof $log "" + file $ccof | grep 64-bit >/dev/null 2>&1 + [ "$?" = "0" ] && echo true || echo false +elif [ "x$CLANG" = "x1" ]; then + [ "x$IS_CLANG" = "x1" ] && echo true || echo false +else + [ "x$1" = "x" ] && usage + do_cc $ccof $log $1 + if [ "$?" = "0" ]; then + # compile passed, but was the warning unrecognized? + if [ "x$IS_CLANG" = "x1" ]; then + grep "^warning: unknown warning option '$1'" $log >/dev/null 2>&1 + else + grep -E "(^cc1: warning: unrecognized command line option .$1.|^cc1: warning: command line option \"$1\" is valid for C\+\+/ObjC\+\+ but not for C|gcc: unrecognized option '$1')" $log >/dev/null 2>&1 + fi + [ "$?" = "1" ] && echo $1 + fi +fi + +rm -f $ccof $log +exit 0 diff --git a/drivers/mcst/gpu-imgtec/build/linux/toplevel.mk b/drivers/mcst/gpu-imgtec/build/linux/toplevel.mk new file mode 100644 index 000000000000..28b543aab152 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/build/linux/toplevel.mk @@ -0,0 +1,442 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Define the default goal. This masks a previous definition of the default +# goal in config/core.mk, which must match this one +.PHONY: build +build: components firmware + +MAKE_TOP := build/linux +THIS_MAKEFILE := (top-level makefiles) + +include $(MAKE_TOP)/defs.mk + +ifeq ($(OUT),) +$(error Must specify output directory with OUT=) +endif + +ifeq ($(TOP),) +$(error Must specify root of source tree with TOP=) +endif +$(call directory-must-exist,$(TOP)) + +# RELATIVE_OUT is relative only if it's under $(TOP) +RELATIVE_OUT := $(patsubst $(TOP)/%,%,$(OUT)) +CONFIG_MK := $(RELATIVE_OUT)/config.mk +CONFIG_H := $(RELATIVE_OUT)/config.h +CONFIG_KERNEL_MK := $(RELATIVE_OUT)/config_kernel.mk +CONFIG_KERNEL_H := $(RELATIVE_OUT)/config_kernel.h + +# Convert commas to spaces in $(D). This is so you can say "make +# D=config-changes,freeze-config" and have $(filter config-changes,$(D)) +# still work. +comma := , +empty := +space := $(empty) $(empty) +override D := $(subst $(comma),$(space),$(D)) + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) +# Create the out directory +# +$(shell mkdir -p $(OUT)) + +# If these generated files differ from any pre-existing ones, +# replace them, causing affected parts of the driver to rebuild. +# +_want_config_diff := $(filter config-changes,$(D)) +_freeze_config := $(strip $(filter freeze-config,$(D))) +_updated_config_files := $(shell \ + $(if $(_want_config_diff),rm -f $(OUT)/config.diff;,) \ + for file in $(CONFIG_MK) $(CONFIG_H) \ + $(CONFIG_KERNEL_MK) $(CONFIG_KERNEL_H); do \ + diff -U 0 $$file $$file.new \ + >>$(if $(_want_config_diff),$(OUT)/config.diff,/dev/null) 2>/dev/null \ + && rm -f $$file.new \ + || echo $$file; \ + done) + +ifneq ($(_want_config_diff),) +# We send the diff to stderr so it isn't captured by $(shell) +$(shell [ -s $(OUT)/config.diff ] && echo >&2 "Configuration changed in $(RELATIVE_OUT):" && cat >&2 $(OUT)/config.diff) +endif + +ifneq ($(_freeze_config),) +$(if $(_updated_config_files),$(error Configuration change in $(RELATIVE_OUT) prevented by D=freeze-config),) +endif + +# Update the config, if changed +$(foreach _f,$(_updated_config_files), \ + $(shell mv -f $(_f).new $(_f) >/dev/null 2>/dev/null)) + +endif # INTERNAL_CLOBBER_ONLY + +MAKEFLAGS := -Rr --no-print-directory + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + +# This is so you can say "find $(TOP) -name Linux.mk > /tmp/something; export +# ALL_MAKEFILES=/tmp/something; make" and avoid having to run find. This is +# handy if your source tree is mounted over NFS or something +override ALL_MAKEFILES := $(call relative-to-top,$(if $(strip $(ALL_MAKEFILES)),$(shell cat $(ALL_MAKEFILES)),$(shell find $(TOP) -type f -name Linux.mk -print -o -type d -name '.*' -prune))) +ifeq ($(strip $(ALL_MAKEFILES)),) +$(info ** Unable to find any Linux.mk files under $$(TOP). This could mean that) +$(info ** there are no makefiles, or that ALL_MAKEFILES is set in the environment) +$(info ** and points to a nonexistent or empty file.) +$(error No makefiles) +endif + +else # clobber-only +ALL_MAKEFILES := +endif + +unexport ALL_MAKEFILES + +REMAINING_MAKEFILES := $(ALL_MAKEFILES) +ALL_MODULES := +INTERNAL_INCLUDED_ALL_MAKEFILES := + +ALL_LDFLAGS := + +# Please do not change the format of the following lines +-include $(CONFIG_MK) +-include $(CONFIG_KERNEL_MK) +# OK to change now + +define is-host-os +$(if $(HOST_OS),$(if $(filter $(1),$(HOST_OS)),true),$(error HOST_OS not set)) +endef + +define is-not-host-os +$(if $(HOST_OS),$(if $(filter-out $(1),$(HOST_OS)),true),$(error HOST_OS not set)) +endef + +define is-target-os +$(if $(TARGET_OS),$(if $(filter $(1),$(TARGET_OS)),true),$(error TARGET_OS not set)) +endef + +define is-not-target-os +$(if $(TARGET_OS),$(if $(filter-out $(1),$(TARGET_OS)),true),$(error TARGET_OS not set)) +endef + +# If we haven't set host/target archs, set some sensible defaults now. +# This allows things like prune.sh to work +ifeq ($(HOST_PRIMARY_ARCH),) +ifneq ($(FORCE_ARCH),) +HOST_PRIMARY_ARCH := host_x86_64 +HOST_32BIT_ARCH := host_i386 + +# We set HOST_ALL_ARCH this way, as HOST_32BIT_ARCH may be overridden on the +# command line. +ifeq ($(HOST_PRIMARY_ARCH),$(HOST_32BIT_ARCH)) +HOST_ALL_ARCH := $(HOST_PRIMARY_ARCH) +else +HOST_ALL_ARCH := $(HOST_PRIMARY_ARCH) $(HOST_32BIT_ARCH) +endif + +_ALL_ARCHS := \ + $(filter-out %target_neutral.mk,$(wildcard $(MAKE_TOP)/moduledefs/target_*.mk)) +TARGET_PRIMARY_ARCH := \ + $(patsubst $(MAKE_TOP)/moduledefs/%.mk,%,$(word 1, $(_ALL_ARCHS))) + +TARGET_ALL_ARCH := $(TARGET_PRIMARY_ARCH) +endif +endif + +# Output directory for configuration, object code, +# final programs/libraries, and install/rc scripts. +HOST_OUT := $(RELATIVE_OUT)/$(HOST_PRIMARY_ARCH) +HOST_32BIT_OUT := $(RELATIVE_OUT)/$(HOST_32BIT_ARCH) +TARGET_OUT := $(RELATIVE_OUT)/$(TARGET_PRIMARY_ARCH) +TARGET_PRIMARY_OUT := $(RELATIVE_OUT)/$(TARGET_PRIMARY_ARCH) +TARGET_NEUTRAL_OUT := $(RELATIVE_OUT)/target_neutral +BRIDGE_SOURCE_ROOT := $(call if-exists,$(TOP)/generated/$(PVR_ARCH),$(TARGET_NEUTRAL_OUT)/intermediates) +GENERATED_CODE_OUT := $(TARGET_NEUTRAL_OUT)/intermediates +DOCS_OUT := $(RELATIVE_OUT)/doc + +# +# neutrino/subst_makefiles.mk must be included after Output directories have been defined, +# because it overrides BRIDGE_SOURCE_ROOT of bridges to be built. If we include this makefile +# earlier, the value of BRIDGE_SOURCE_ROOT set in neutrino/subst_makefiles.mk will be overwritten. +ifeq ($(SUPPORT_NEUTRINO_PLATFORM),1) +include $(MAKE_TOP)/common/neutrino/subst_makefiles.mk +# neutrino/subst_makefiles.mk overrides ALL_MAKEFILES. +# Set REMAINING_MAKEFILES to the new value of ALL_MAKEFILES +REMAINING_MAKEFILES := $(ALL_MAKEFILES) +endif + +# Mark subdirectories of $(OUT) as secondary, and provide rules to create +# them. +OUT_SUBDIRS := $(addprefix $(RELATIVE_OUT)/,$(TARGET_ALL_ARCH)) \ + $(TARGET_NEUTRAL_OUT) $(DOCS_OUT) $(if $(HOST_PRIMARY_ARCH),$(sort $(HOST_OUT) $(HOST_32BIT_OUT))) +.SECONDARY: $(OUT_SUBDIRS) +$(OUT_SUBDIRS): + $(make-directory) + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) +-include $(MAKE_TOP)/pvrversion.mk +-include $(MAKE_TOP)/llvm.mk +-include $(MAKE_TOP)/common/bridges.mk +endif + +ifeq ($(INTERNAL_CLOBBER_ONLY)$(SUPPORT_ANDROID_PLATFORM)$(SUPPORT_NEUTRINO_PLATFORM)$(SUPPORT_INTEGRITY_PLATFORM),) + # doing a Linux build. We need to worry about sysroots. + + # The directories containing linux window system related components + LWS_DIR := $(TOP)/lws + LWS_TARBALL_DIR ?= $(TOP)/external/lws + LWS_PATCH_DIR := $(LWS_DIR)/dist/patches + LWS_GIT_PATCH_DIR := $(LWS_DIR)/patches + + ifneq ($(SUPPORT_BUILD_LWS),) + -include $(MAKE_TOP)/lwsconf.mk + + else ifneq ($(SYSROOT),) + LWS_PREFIX ?= /usr + XORG_CONFDIR ?= /etc/X11 + + override SYS_CFLAGS += --sysroot=${SYSROOT} + override SYS_CXXFLAGS += --sysroot=${SYSROOT} + override SYS_LDFLAGS += --sysroot=${SYSROOT} + + PKG_CONFIG_SYSROOT_DIR := ${SYSROOT} + + ifneq ($(SYSROOT),/) + # Override PKG_CONFIG_PATH to prevent additional host paths from being + # searched + PKG_CONFIG_PATH := + endif + + else ifneq ($(CROSS_COMPILE),) + $(info WARNING: You are not specifying how to find dependent libraries, e.g., by specifying SYSROOT.) + $(info The build may fail.) + endif +endif + +# This has to come after including lwsconf.mk since it relies on LWS_PREFIX +# to construct the path to wayland-scanner +include $(MAKE_TOP)/commands.mk + +BUILD_CACHES=$(sort $(LWS_BUILD_CACHES)) +ifneq ($(BUILD_CACHES),) +.SECONDARY: $(BUILD_CACHES) +$(BUILD_CACHES): + $(make-directory) +endif + +# We don't need to include this if we're just doing a clean or a clobber +# +ifneq ($(INTERNAL_CLOBBER_ONLY),true) +include $(MAKE_TOP)/buildvars.mk +endif + +HOST_INTERMEDIATES := $(HOST_OUT)/intermediates +TARGET_INTERMEDIATES := $(TARGET_OUT)/intermediates + +ifneq ($(KERNEL_COMPONENTS),) +build: kbuild +endif + +# The 'bridges' target is used to make all the modules that are of the bridge +# type. One of its main uses is to pregenerate the bridge files for inclusion +# into the DDK source packages. Since there is no need and no facility to +# generate the bridge files when the pregenerated files exist, make the target +# a no-op in this case. +.PHONY: bridges +ifeq ($(BRIDGE_SOURCE_ROOT),$(TOP)/generated/$(PVR_ARCH)) +bridges: +else +bridges: $(BRIDGES) +endif + +# Include each Linux.mk, then include modules.mk to save some information +# about each module +include $(foreach _Linux.mk,$(ALL_MAKEFILES),$(MAKE_TOP)/this_makefile.mk $(_Linux.mk) $(MAKE_TOP)/modules.mk) + +ifeq ($(strip $(REMAINING_MAKEFILES)),) +INTERNAL_INCLUDED_ALL_MAKEFILES := true +else +$(error Impossible: $(words $(REMAINING_MAKEFILES)) makefiles were mysteriously ignored when reading $$(ALL_MAKEFILES)) +endif + +# For the special "all_modules" target, filter out any kernel_module modules +# These are explicitly non-buildable in isolation. +ALL_NON_KERNEL_MODULES := \ + $(foreach _m,$(ALL_MODULES),\ + $(if $(filter-out kernel_module,$($(_m)_type)),$(_m))) + +# Compute the isystem paths passed in via SYS_INCLUDES. We'll use this in +# the module target_xxx makefiles to filter duplicate -isystem and -I flags, +# to ensure the module can always override the include precedence. (Also +# calculate any 'residual' non-include flags, as we need to put them back.) +SYS_INCLUDES_ISYSTEM := \ + $(subst -isystem,,$(filter -isystem%,$(subst -isystem ,-isystem,$(SYS_INCLUDES)))) +SYS_INCLUDES_RESIDUAL := \ + $(strip $(filter-out -isystem%,$(subst -isystem ,-isystem,$(SYS_INCLUDES)))) + +# At this point, all Linux.mks have been included. Now generate rules to build +# each module: for each module in $(ALL_MODULES), set per-makefile variables +ALL_BAD_MODULES := +$(foreach _m,$(ALL_MODULES),$(eval $(call process-module,$(_m)))) + +# Filter out any modules that were missing their module 'type' makefile. These +# modules will break the packaging process and can't be built anyway. +ALL_MODULES := $(filter-out $(ALL_BAD_MODULES),$(ALL_MODULES)) + +.PHONY: kbuild install +kbuild install: + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + ifeq ($(SUPPORT_ANDROID_PLATFORM)$(SUPPORT_NEUTRINO_PLATFORM)$(SUPPORT_INTEGRITY_PLATFORM),) + -include $(MAKE_TOP)/packaging.mk + endif + -include $(MAKE_TOP)/scripts.mk + -include $(MAKE_TOP)/kbuild/kbuild.mk +endif +# We won't depend on 'build' here so that people can build subsets of +# components and still have the install script attempt to install the +# subset. +install: + @if [ ! -d "$(DISCIMAGE)" -a -z "$(INSTALL_TARGET)" ]; then \ + echo; \ + echo "** DISCIMAGE was not set or does not point to a valid directory."; \ + echo "** Either use INSTALL_TARGET or set DISCIMAGE."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @if [ ! -f $(RELATIVE_OUT)/install.sh ]; then \ + echo; \ + echo "** install.sh not found in $(RELATIVE_OUT)."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @cd $(RELATIVE_OUT) && ./install.sh + +installfw: install_script_fw + @if [ ! -f $(RELATIVE_OUT)/install.sh ]; then \ + echo; \ + echo "** install.sh not found in $(RELATIVE_OUT)."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @cd $(RELATIVE_OUT) && ./install.sh --fw + +installcomponents: install_script + @if [ ! -f $(RELATIVE_OUT)/install.sh ]; then \ + echo; \ + echo "** install.sh not found in $(RELATIVE_OUT)."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @cd $(RELATIVE_OUT) && ./install.sh --um + +installkm: install_script_km + @if [ ! -d "$(DISCIMAGE)" -a -z "$(INSTALL_TARGET)" ]; then \ + echo; \ + echo "** DISCIMAGE was not set or does not point to a valid directory."; \ + echo "** Either use INSTALL_TARGET or set DISCIMAGE."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @if [ ! -f $(RELATIVE_OUT)/install.sh ]; then \ + echo; \ + echo "** install.sh not found in $(RELATIVE_OUT)."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @cd $(RELATIVE_OUT) && ./install.sh --km + +.PHONY: uninstall +uninstall: install_script install_script_fw install_script_km + @if [ ! -d "$(DISCIMAGE)" -a -z "$(INSTALL_TARGET)" ]; then \ + echo; \ + echo "** DISCIMAGE was not set or does not point to a valid directory."; \ + echo "** Either use INSTALL_TARGET or set DISCIMAGE."; \ + echo "** Cannot continue with uninstall."; \ + echo; \ + exit 1; \ + fi + @if [ ! -f $(RELATIVE_OUT)/install.sh ]; then \ + echo; \ + echo "** install.sh not found in $(RELATIVE_OUT)."; \ + echo "** Cannot continue with uninstall."; \ + echo; \ + exit 1; \ + fi + @cd $(RELATIVE_OUT) && ./install.sh -u + +# You can say 'make all_modules' to attempt to make everything, or 'make +# components' to only make the things which are listed (in the per-build +# makefiles) as components of the build. +.PHONY: all_modules all_docs components firmware +all_modules: $(ALL_NON_KERNEL_MODULES) kbuild +all_docs: ; +components: $(COMPONENTS) +firmware: $(FW_COMPONENTS) + +# Cleaning +.PHONY: clean clobber +clean: MODULE_DIRS_TO_REMOVE := $(OUT_SUBDIRS) +clean: + $(clean-dirs) +clobber: MODULE_DIRS_TO_REMOVE := $(OUT) +clobber: + $(clean-dirs) + +# Saying 'make clean-MODULE' removes the intermediates for MODULE. +# clobber-MODULE deletes the output files as well +clean-%: + $(if $(V),,@echo " RM " $(call relative-to-top,$(INTERNAL_CLEAN_TARGETS_FOR_$*))) + $(RM) -rf $(INTERNAL_CLEAN_TARGETS_FOR_$*) +clobber-%: + $(if $(V),,@echo " RM " $(call relative-to-top,$(INTERNAL_CLOBBER_TARGETS_FOR_$*))) + $(RM) -rf $(INTERNAL_CLOBBER_TARGETS_FOR_$*) + +include $(MAKE_TOP)/bits.mk + +# D=nobuild stops the build before any recipes are run. This line should +# come at the end of this makefile. +$(if $(filter nobuild,$(D)),$(error D=nobuild given),) diff --git a/drivers/mcst/gpu-imgtec/config_kernel.h b/drivers/mcst/gpu-imgtec/config_kernel.h new file mode 100644 index 000000000000..a5cd1a30fc50 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/config_kernel.h @@ -0,0 +1,149 @@ +#define PVRSRV_ENABLE_CCCB_GROW +#define RGX_FW_FILENAME "rgx.fw" +#define RGX_SH_FILENAME "rgx.sh" +#define LINUX +#define PVR_BUILD_DIR "e2c3_gpu" +#define PVR_BUILD_TYPE "release" +#define PVRSRV_MODNAME "pvrsrvkm" +#define PVRSYNC_MODNAME "pvr_sync" +#define SUPPORT_RGX 1 +#define RELEASE +#define SUPPORT_PHYSMEM_TEST +#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_4.46.6.62.h" +#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_4.V.6.62.h" +#define SUPPORT_DBGDRV_EVENT_OBJECTS +#define PVRSRV_NEED_PVR_DPF +#define PVRSRV_NEED_PVR_STACKTRACE_NATIVE +#define SUPPORT_RGXTQ_BRIDGE +#define ENABLE_EMULATED_LARGE_TEXTURES +#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9 +#define PVRSRV_POISON_ON_FREE_VALUE 0x63 +#define RGX_NUM_OS_SUPPORTED 1 +#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF +#define RGX_FW_HEAP_SHIFT 25 +#define PVRSRV_APPHINT_OSIDREGION0MIN "0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000" +#define PVRSRV_APPHINT_OSIDREGION0MAX "0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF" +#define PVRSRV_APPHINT_OSIDREGION1MIN "0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000" +#define PVRSRV_APPHINT_OSIDREGION1MAX "0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF" +#define FIX_DUSTS_POW_ON_INIT +#define SUPPORT_POWMON_COMPONENT +#define PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS 10U +#define PVR_POWER_MONITOR_HWPERF +#define PVR_LDM_PLATFORM_PRE_REGISTERED +#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm" +#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256 +#define SUPPORT_MMU_PENDING_FAULT_PROTECTION +#define ION_DEFAULT_HEAP_NAME "system" +#define ION_DEFAULT_HEAP_ID_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define PVR_BLOB_CACHE_SIZE_MEGABYTES 20 +#define SUPPORT_EXTRA_METASP_DEBUG +#define HWR_DEFAULT_ENABLED +#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT +#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE +#define PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE 0x4000 +#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432 +#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS +#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN +#define PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING IMG_FALSE +#define PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG +#define PVRSRV_APPHINT_VALIDATEIRQ 0 +#define PVRSRV_APPHINT_DISABLECLOCKGATING 0 +#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0 +#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0 +#define PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH 0 +#define PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH 0 +#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL +#define PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX +#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT +#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE +#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN +#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0 +#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048 +#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048 +#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50 +#define PVRSRV_APPHINT_JONESDISABLEMASK 0 +#define PVRSRV_APPHINT_NEWFILTERINGMODE 1 +#define PVRSRV_APPHINT_TRUNCATEMODE 0 +#define PVRSRV_APPHINT_EMUMAXFREQ 0 +#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0 +#define PVRSRV_APPHINT_RGXBVNC "" +#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5 +#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0 +#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE +#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE +#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG +#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE +#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT +#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0 +#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE +#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0 +#define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS +#define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0 +#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST +#define PVRSRV_APPHINT_HTBUFFERSIZE 64 +#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE +#define PVRSRV_APPHINT_HWPERFFWFILTER 0 +#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0 +#define PVRSRV_APPHINT_TIMECORRCLOCK 0 +#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE +#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD +#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE +#define PVRSRV_APPHINT_GPUUNITSPOWERCHANGE IMG_FALSE +#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE +#define PVRSRV_APPHINT_CACHEOPCONFIG 0 +#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0 +#define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE +#define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE +#define PVRSRV_APPHINT_TESTSLRINTERVAL 0 +#define SOC_TIMER_FREQ 20 +#define PDVFS_COM_HOST 1 +#define PDVFS_COM_AP 2 +#define PDVFS_COM_PMC 3 +#define PDVFS_COM_IMG_CLKDIV 4 +#define PDVFS_COM PDVFS_COM_HOST +#define PVR_GPIO_MODE_GENERAL 1 +#define PVR_GPIO_MODE_POWMON_PIN 2 +#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL +#define PVRSRV_ENABLE_PROCESS_STATS +#define SUPPORT_USC_BREAKPOINT +#define PVR_ANNOTATION_MAX_LEN 63 +#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO +#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90 +#ifndef CONFIG_MCST /*Can't use the pool: it mixes dma & ordinary pages */ +#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240 +#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480 +#endif +#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288 +#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 +#ifdef CONFIG_MCST /*_AllocOSPage_CMA() can not handle more */ +#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0 +#else +#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 +#endif +#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 +#define SUPPORT_NATIVE_FENCE_SYNC +#define PVRSRV_STALLED_CCB_ACTION +#define UPDATE_FENCE_CHECKPOINT_COUNT 1 +#define PVR_DRM_NAME "pvr" +#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16 +#define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17 +#define SUPPORT_BUFFER_SYNC 1 diff --git a/drivers/mcst/gpu-imgtec/config_kernel.mk b/drivers/mcst/gpu-imgtec/config_kernel.mk new file mode 100644 index 000000000000..4d1b4835dda9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/config_kernel.mk @@ -0,0 +1,35 @@ +override TARGET_PRIMARY_ARCH := target_e2k +override TARGET_SECONDARY_ARCH := +override TARGET_ALL_ARCH := target_e2k +override TARGET_FORCE_32BIT := +override PVR_ARCH := rogue +override KERNEL_COMPONENTS := srvkm e2c3_gpu +override WINDOW_SYSTEM := nulldrmws +override PVRSRV_MODNAME := pvrsrvkm +override PVRSYNC_MODNAME := pvr_sync +override PVR_BUILD_DIR := e2c3_gpu +override PVR_BUILD_TYPE := release +override SUPPORT_RGX := 1 +override PVR_SYSTEM := e2c3_gpu +override PVR_LOADER := +override BUILD := release +override DEBUGLINK := 1 +override SUPPORT_PHYSMEM_TEST := 1 +override RGX_NUM_OS_SUPPORTED := 1 +override VMM_TYPE := stub +override SUPPORT_POWMON_COMPONENT := 1 +override RGX_TIMECORR_CLOCK := mono +override PDVFS_COM_HOST := 1 +override PDVFS_COM_AP := 2 +override PDVFS_COM_PMC := 3 +override PDVFS_COM_IMG_CLKDIV := 4 +override PDVFS_COM := PDVFS_COM_HOST +override PVR_GPIO_MODE_GENERAL := 1 +override PVR_GPIO_MODE_POWMON_PIN := 2 +override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL +override PVR_HANDLE_BACKEND := idr +override SUPPORT_DMABUF_BRIDGE := 1 +override SUPPORT_USC_BREAKPOINT := 1 +override SUPPORT_NATIVE_FENCE_SYNC := 1 +override SUPPORT_DMA_FENCE := 1 +override SUPPORT_BUFFER_SYNC := 1 diff --git a/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/apollo.mk b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/apollo.mk new file mode 100644 index 000000000000..6d34673e40be --- /dev/null +++ b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/apollo.mk @@ -0,0 +1,4 @@ +apollo-y += \ + tc_apollo.o \ + tc_drv.o \ + tc_odin.o diff --git a/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.h b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.h new file mode 100644 index 000000000000..208480397b57 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.h @@ -0,0 +1,160 @@ +#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16 +#define DISPLAY_CONTROLLER drm_pdp +#define ENABLE_EMULATED_LARGE_TEXTURES +#define GPUVIRT_VALIDATION_NUM_OS 8 +#define GPUVIRT_VALIDATION_NUM_REGIONS 2 +#define HWR_DEFAULT_ENABLED +#define ION_DEFAULT_HEAP_ID_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define ION_DEFAULT_HEAP_NAME "system" +#define LINUX +#define LMA +#define PDVFS_COM PDVFS_COM_HOST +#define PDVFS_COM_AP 2 +#define PDVFS_COM_HOST 1 +#define PDVFS_COM_IMG_CLKDIV 4 +#define PDVFS_COM_PMC 3 +#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE +#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE +#define PVRSRV_APPHINT_CACHEOPCONFIG 0 +#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0 +#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG +#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5 +#define PVRSRV_APPHINT_DISABLECLOCKGATING 0 +#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0 +#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE +#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE +#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF +#define PVRSRV_APPHINT_EMUMAXFREQ 0 +#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT +#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0 +#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE +#define PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING IMG_FALSE +#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL +#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE +#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0 +#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE +#define PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG +#define PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH 0 +#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT +#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS +#define PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH 0 +#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE +#define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0 +#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0 +#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE +#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN +#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD +#define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS +#define PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE 0x4000 +#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0 +#define PVRSRV_APPHINT_GPUUNITSPOWERCHANGE IMG_FALSE +#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST +#define PVRSRV_APPHINT_HTBUFFERSIZE 64 +#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0 +#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0 +#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048 +#define PVRSRV_APPHINT_HWPERFFWFILTER 0 +#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048 +#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0 +#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50 +#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT +#define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE +#define PVRSRV_APPHINT_JONESDISABLEMASK 0 +#define PVRSRV_APPHINT_NEWFILTERINGMODE 1 +#define PVRSRV_APPHINT_OSIDREGION0MAX "0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF" +#define PVRSRV_APPHINT_OSIDREGION0MIN "0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000" +#define PVRSRV_APPHINT_OSIDREGION1MAX "0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF" +#define PVRSRV_APPHINT_OSIDREGION1MIN "0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000" +#define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE +#define PVRSRV_APPHINT_RGXBVNC "" +#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN +#define PVRSRV_APPHINT_TESTSLRINTERVAL 0 +#define PVRSRV_APPHINT_TIMECORRCLOCK 0 +#define PVRSRV_APPHINT_TRUNCATEMODE 0 +#define PVRSRV_APPHINT_VALIDATEIRQ 0 +#define PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX +#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0 +#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE +#define PVRSRV_ENABLE_CCCB_GROW +#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO +#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90 +#define PVRSRV_ENABLE_PROCESS_STATS +#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256 +#define PVRSRV_MODNAME "pvrsrvkm" +#define PVRSRV_NEED_PVR_DPF +#define PVRSRV_NEED_PVR_STACKTRACE_NATIVE +#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9 +#define PVRSRV_POISON_ON_FREE_VALUE 0x63 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14 +#define PVRSRV_STALLED_CCB_ACTION +#define PVRSYNC_MODNAME "pvr_sync" +#define PVR_BLOB_CACHE_SIZE_MEGABYTES 20 +#define PVR_BUILD_DIR "tc_linux" +#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288 +#define PVR_DRM_NAME "pvr" +#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL +#define PVR_GPIO_MODE_GENERAL 1 +#define PVR_GPIO_MODE_POWMON_PIN 2 +#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm" +#define PVR_LDM_PLATFORM_PRE_REGISTERED +#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 +#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 +#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 +#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480 +#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240 +#define PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS 10U +#define PVR_POWER_MONITOR_HWPERF +#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_1.V.4.5.h" +#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_1.82.4.5.h" +#define RGX_FW_FILENAME "rgx.fw" +#define RGX_FW_HEAP_SHIFT 25 +#define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0 +#define RGX_NUM_OS_SUPPORTED 1 +#define RGX_SH_FILENAME "rgx.sh" +#define SOC_TIMER_FREQ 20 +#define SUPPORT_BUFFER_SYNC 1 +#define SUPPORT_DBGDRV_EVENT_OBJECTS +#define SUPPORT_MMU_PENDING_FAULT_PROTECTION +#define SUPPORT_NATIVE_FENCE_SYNC +#define SUPPORT_PHYSMEM_TEST +#define SUPPORT_POWMON_COMPONENT +#define SUPPORT_RGX 1 +#define SUPPORT_RGXTQ_BRIDGE +#define SUPPORT_USC_BREAKPOINT +#define TC_APOLLO_ES2 +#define TC_DISPLAY_MEM_SIZE 383 +#define TC_MEMORY_CONFIG TC_MEMORY_LOCAL +#define TC_SECURE_MEM_SIZE 128 +#define UPDATE_FENCE_CHECKPOINT_COUNT 1 +#ifdef CONFIG_DRM_POWERVR_ROGUE_DEBUG +#define DEBUG +#define DEBUG_BRIDGE_KM +#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE +#define PVRSRV_ENABLE_GPU_MEMORY_INFO +#define PVRSRV_ENABLE_SYNC_POISONING +#define PVR_ANNOTATION_MAX_LEN 96 +#define PVR_BUILD_TYPE "debug" +#define TRACK_FW_BOOT +#else +#define PVR_ANNOTATION_MAX_LEN 63 +#define PVR_BUILD_TYPE "release" +#define RELEASE +#endif diff --git a/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.mk b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.mk new file mode 100644 index 000000000000..d8a0513130b4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/config_kernel.mk @@ -0,0 +1,39 @@ +override DISPLAY_CONTROLLER := drm_pdp +override HOST_ALL_ARCH := host_x86_64 host_i386 +override LMA := 1 +override METAG_VERSION_NEEDED := 2.8.1.0.3 +override MIPS_VERSION_NEEDED := 2014.07-1 +override PDVFS_COM := PDVFS_COM_HOST +override PDVFS_COM_AP := 2 +override PDVFS_COM_HOST := 1 +override PDVFS_COM_IMG_CLKDIV := 4 +override PDVFS_COM_PMC := 3 +override PVRSRV_MODNAME := pvrsrvkm +override PVRSYNC_MODNAME := pvr_sync +override PVR_ARCH := rogue +override PVR_BUILD_DIR := tc_linux +override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL +override PVR_GPIO_MODE_GENERAL := 1 +override PVR_GPIO_MODE_POWMON_PIN := 2 +override PVR_HANDLE_BACKEND := idr +override PVR_SYSTEM := rgx_linux_tc +override RGX_NUM_OS_SUPPORTED := 1 +override RGX_TIMECORR_CLOCK := mono +override SUPPORT_BUFFER_SYNC := 1 +override SUPPORT_DMABUF_BRIDGE := 1 +override SUPPORT_DMA_FENCE := 1 +override SUPPORT_NATIVE_FENCE_SYNC := 1 +override SUPPORT_PHYSMEM_TEST := 1 +override SUPPORT_POWMON_COMPONENT := 1 +override SUPPORT_RGX := 1 +override SUPPORT_USC_BREAKPOINT := 1 +override VMM_TYPE := stub +override WINDOW_SYSTEM := wayland +ifeq ($(CONFIG_DRM_POWERVR_ROGUE_DEBUG),y) +override BUILD := debug +override PVRSRV_ENABLE_GPU_MEMORY_INFO := 1 +override PVR_BUILD_TYPE := debug +else +override BUILD := release +override PVR_BUILD_TYPE := release +endif diff --git a/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/copy_items.sh b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/copy_items.sh new file mode 100644 index 000000000000..492317e80a84 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/copy_items.sh @@ -0,0 +1,443 @@ +copyfile generated/rogue/cache_bridge/client_cache_direct_bridge.c drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c +copyfile generated/rogue/cache_bridge/server_cache_bridge.c drivers/gpu/drm/img-rogue/server_cache_bridge.c +copyfile generated/rogue/cmm_bridge/server_cmm_bridge.c drivers/gpu/drm/img-rogue/server_cmm_bridge.c +copyfile generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c +copyfile generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c +copyfile generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c +copyfile generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c +copyfile generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c +copyfile generated/rogue/mm_bridge/client_mm_direct_bridge.c drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c +copyfile generated/rogue/mm_bridge/server_mm_bridge.c drivers/gpu/drm/img-rogue/server_mm_bridge.c +copyfile generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c +copyfile generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c +copyfile generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c +copyfile generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c +copyfile generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c +copyfile generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c +copyfile generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c +copyfile generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c +copyfile generated/rogue/rgxsignals_bridge/server_rgxsignals_bridge.c drivers/gpu/drm/img-rogue/server_rgxsignals_bridge.c +copyfile generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c +copyfile generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c +copyfile generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c +copyfile generated/rogue/ri_bridge/client_ri_direct_bridge.c drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c +copyfile generated/rogue/ri_bridge/server_ri_bridge.c drivers/gpu/drm/img-rogue/server_ri_bridge.c +copyfile generated/rogue/srvcore_bridge/server_srvcore_bridge.c drivers/gpu/drm/img-rogue/server_srvcore_bridge.c +copyfile generated/rogue/sync_bridge/client_sync_direct_bridge.c drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c +copyfile generated/rogue/sync_bridge/server_sync_bridge.c drivers/gpu/drm/img-rogue/server_sync_bridge.c +copyfile generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c +copyfile generated/rogue/synctracking_bridge/server_synctracking_bridge.c drivers/gpu/drm/img-rogue/server_synctracking_bridge.c +copyfile kernel/drivers/staging/imgtec/pvr_buffer_sync.c drivers/gpu/drm/img-rogue/pvr_buffer_sync.c +copyfile kernel/drivers/staging/imgtec/pvr_counting_timeline.c drivers/gpu/drm/img-rogue/pvr_counting_timeline.c +copyfile kernel/drivers/staging/imgtec/pvr_drm.c drivers/gpu/drm/img-rogue/pvr_drm.c +copyfile kernel/drivers/staging/imgtec/pvr_fence.c drivers/gpu/drm/img-rogue/pvr_fence.c +copyfile kernel/drivers/staging/imgtec/pvr_platform_drv.c drivers/gpu/drm/img-rogue/pvr_platform_drv.c +copyfile kernel/drivers/staging/imgtec/pvr_sw_fence.c drivers/gpu/drm/img-rogue/pvr_sw_fence.c +copyfile kernel/drivers/staging/imgtec/pvr_sync_file.c drivers/gpu/drm/img-rogue/pvr_sync_file.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_fb.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_plane.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c +copyfile kernel/drivers/staging/imgtec/tc/pdp_apollo.c drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c +copyfile kernel/drivers/staging/imgtec/tc/pdp_odin.c drivers/gpu/drm/img-rogue/apollo/pdp_odin.c +copyfile kernel/drivers/staging/imgtec/tc/pdp_plato.c drivers/gpu/drm/img-rogue/apollo/pdp_plato.c +copyfile kernel/drivers/staging/imgtec/tc/tc_apollo.c drivers/gpu/drm/img-rogue/apollo/tc_apollo.c +copyfile kernel/drivers/staging/imgtec/tc/tc_drv.c drivers/gpu/drm/img-rogue/apollo/tc_drv.c +copyfile kernel/drivers/staging/imgtec/tc/tc_odin.c drivers/gpu/drm/img-rogue/apollo/tc_odin.c +copyfile services/server/common/cache_km.c drivers/gpu/drm/img-rogue/cache_km.c +copyfile services/server/common/connection_server.c drivers/gpu/drm/img-rogue/connection_server.c +copyfile services/server/common/devicemem_heapcfg.c drivers/gpu/drm/img-rogue/devicemem_heapcfg.c +copyfile services/server/common/devicemem_history_server.c drivers/gpu/drm/img-rogue/devicemem_history_server.c +copyfile services/server/common/devicemem_server.c drivers/gpu/drm/img-rogue/devicemem_server.c +copyfile services/server/common/di_server.c drivers/gpu/drm/img-rogue/di_server.c +copyfile services/server/common/handle.c drivers/gpu/drm/img-rogue/handle.c +copyfile services/server/common/htb_debug.c drivers/gpu/drm/img-rogue/htb_debug.c +copyfile services/server/common/htbserver.c drivers/gpu/drm/img-rogue/htbserver.c +copyfile services/server/common/info_page_km.c drivers/gpu/drm/img-rogue/info_page_km.c +copyfile services/server/common/lists.c drivers/gpu/drm/img-rogue/lists.c +copyfile services/server/common/mmu_common.c drivers/gpu/drm/img-rogue/mmu_common.c +copyfile services/server/common/physheap.c drivers/gpu/drm/img-rogue/physheap.c +copyfile services/server/common/physmem.c drivers/gpu/drm/img-rogue/physmem.c +copyfile services/server/common/physmem_hostmem.c drivers/gpu/drm/img-rogue/physmem_hostmem.c +copyfile services/server/common/physmem_lma.c drivers/gpu/drm/img-rogue/physmem_lma.c +copyfile services/server/common/pmr.c drivers/gpu/drm/img-rogue/pmr.c +copyfile services/server/common/power.c drivers/gpu/drm/img-rogue/power.c +copyfile services/server/common/process_stats.c drivers/gpu/drm/img-rogue/process_stats.c +copyfile services/server/common/pvr_notifier.c drivers/gpu/drm/img-rogue/pvr_notifier.c +copyfile services/server/common/pvrsrv.c drivers/gpu/drm/img-rogue/pvrsrv.c +copyfile services/server/common/pvrsrv_bridge_init.c drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c +copyfile services/server/common/pvrsrv_pool.c drivers/gpu/drm/img-rogue/pvrsrv_pool.c +copyfile services/server/common/ri_server.c drivers/gpu/drm/img-rogue/ri_server.c +copyfile services/server/common/srvcore.c drivers/gpu/drm/img-rogue/srvcore.c +copyfile services/server/common/sync_checkpoint.c drivers/gpu/drm/img-rogue/sync_checkpoint.c +copyfile services/server/common/sync_server.c drivers/gpu/drm/img-rogue/sync_server.c +copyfile services/server/common/tlintern.c drivers/gpu/drm/img-rogue/tlintern.c +copyfile services/server/common/tlserver.c drivers/gpu/drm/img-rogue/tlserver.c +copyfile services/server/common/tlstream.c drivers/gpu/drm/img-rogue/tlstream.c +copyfile services/server/common/vmm_pvz_client.c drivers/gpu/drm/img-rogue/vmm_pvz_client.c +copyfile services/server/common/vmm_pvz_server.c drivers/gpu/drm/img-rogue/vmm_pvz_server.c +copyfile services/server/common/vz_vmm_pvz.c drivers/gpu/drm/img-rogue/vz_vmm_pvz.c +copyfile services/server/common/vz_vmm_vm.c drivers/gpu/drm/img-rogue/vz_vmm_vm.c +copyfile services/server/devices/rgxfwdbg.c drivers/gpu/drm/img-rogue/rgxfwdbg.c +copyfile services/server/devices/rgxfwtrace_strings.c drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c +copyfile services/server/devices/rgxshader.c drivers/gpu/drm/img-rogue/rgxshader.c +copyfile services/server/devices/rgxtimecorr.c drivers/gpu/drm/img-rogue/rgxtimecorr.c +copyfile services/server/devices/rogue/rgxbreakpoint.c drivers/gpu/drm/img-rogue/rgxbreakpoint.c +copyfile services/server/devices/rogue/rgxbvnc.c drivers/gpu/drm/img-rogue/rgxbvnc.c +copyfile services/server/devices/rogue/rgxccb.c drivers/gpu/drm/img-rogue/rgxccb.c +copyfile services/server/devices/rogue/rgxcompute.c drivers/gpu/drm/img-rogue/rgxcompute.c +copyfile services/server/devices/rogue/rgxdebug.c drivers/gpu/drm/img-rogue/rgxdebug.c +copyfile services/server/devices/rogue/rgxfwimageutils.c drivers/gpu/drm/img-rogue/rgxfwimageutils.c +copyfile services/server/devices/rogue/rgxfwutils.c drivers/gpu/drm/img-rogue/rgxfwutils.c +copyfile services/server/devices/rogue/rgxhwperf.c drivers/gpu/drm/img-rogue/rgxhwperf.c +copyfile services/server/devices/rogue/rgxinit.c drivers/gpu/drm/img-rogue/rgxinit.c +copyfile services/server/devices/rogue/rgxkicksync.c drivers/gpu/drm/img-rogue/rgxkicksync.c +copyfile services/server/devices/rogue/rgxlayer_impl.c drivers/gpu/drm/img-rogue/rgxlayer_impl.c +copyfile services/server/devices/rogue/rgxmem.c drivers/gpu/drm/img-rogue/rgxmem.c +copyfile services/server/devices/rogue/rgxmipsmmuinit.c drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c +copyfile services/server/devices/rogue/rgxmmuinit.c drivers/gpu/drm/img-rogue/rgxmmuinit.c +copyfile services/server/devices/rogue/rgxmulticore.c drivers/gpu/drm/img-rogue/rgxmulticore.c +copyfile services/server/devices/rogue/rgxpower.c drivers/gpu/drm/img-rogue/rgxpower.c +copyfile services/server/devices/rogue/rgxregconfig.c drivers/gpu/drm/img-rogue/rgxregconfig.c +copyfile services/server/devices/rogue/rgxsignals.c drivers/gpu/drm/img-rogue/rgxsignals.c +copyfile services/server/devices/rogue/rgxsrvinit.c drivers/gpu/drm/img-rogue/rgxsrvinit.c +copyfile services/server/devices/rogue/rgxstartstop.c drivers/gpu/drm/img-rogue/rgxstartstop.c +copyfile services/server/devices/rogue/rgxsyncutils.c drivers/gpu/drm/img-rogue/rgxsyncutils.c +copyfile services/server/devices/rogue/rgxta3d.c drivers/gpu/drm/img-rogue/rgxta3d.c +copyfile services/server/devices/rogue/rgxtdmtransfer.c drivers/gpu/drm/img-rogue/rgxtdmtransfer.c +copyfile services/server/devices/rogue/rgxtransfer.c drivers/gpu/drm/img-rogue/rgxtransfer.c +copyfile services/server/devices/rogue/rgxutils.c drivers/gpu/drm/img-rogue/rgxutils.c +copyfile services/server/env/linux/allocmem.c drivers/gpu/drm/img-rogue/allocmem.c +copyfile services/server/env/linux/event.c drivers/gpu/drm/img-rogue/event.c +copyfile services/server/env/linux/fwload.c drivers/gpu/drm/img-rogue/fwload.c +copyfile services/server/env/linux/handle_idr.c drivers/gpu/drm/img-rogue/handle_idr.c +copyfile services/server/env/linux/km_apphint.c drivers/gpu/drm/img-rogue/km_apphint.c +copyfile services/server/env/linux/module_common.c drivers/gpu/drm/img-rogue/module_common.c +copyfile services/server/env/linux/osconnection_server.c drivers/gpu/drm/img-rogue/osconnection_server.c +copyfile services/server/env/linux/osfunc.c drivers/gpu/drm/img-rogue/osfunc.c +copyfile services/server/env/linux/osmmap_stub.c drivers/gpu/drm/img-rogue/osmmap_stub.c +copyfile services/server/env/linux/physmem_dmabuf.c drivers/gpu/drm/img-rogue/physmem_dmabuf.c +copyfile services/server/env/linux/physmem_osmem_linux.c drivers/gpu/drm/img-rogue/physmem_osmem_linux.c +copyfile services/server/env/linux/physmem_test.c drivers/gpu/drm/img-rogue/physmem_test.c +copyfile services/server/env/linux/pmr_os.c drivers/gpu/drm/img-rogue/pmr_os.c +copyfile services/server/env/linux/pvr_bridge_k.c drivers/gpu/drm/img-rogue/pvr_bridge_k.c +copyfile services/server/env/linux/pvr_debug.c drivers/gpu/drm/img-rogue/pvr_debug.c +copyfile services/server/env/linux/pvr_debugfs.c drivers/gpu/drm/img-rogue/pvr_debugfs.c +copyfile services/server/env/linux/pvr_gputrace.c drivers/gpu/drm/img-rogue/pvr_gputrace.c +copyfile services/shared/common/devicemem.c drivers/gpu/drm/img-rogue/devicemem.c +copyfile services/shared/common/devicemem_utils.c drivers/gpu/drm/img-rogue/devicemem_utils.c +copyfile services/shared/common/hash.c drivers/gpu/drm/img-rogue/hash.c +copyfile services/shared/common/htbuffer.c drivers/gpu/drm/img-rogue/htbuffer.c +copyfile services/shared/common/mem_utils.c drivers/gpu/drm/img-rogue/mem_utils.c +copyfile services/shared/common/pvrsrv_error.c drivers/gpu/drm/img-rogue/pvrsrv_error.c +copyfile services/shared/common/ra.c drivers/gpu/drm/img-rogue/ra.c +copyfile services/shared/common/sync.c drivers/gpu/drm/img-rogue/sync.c +copyfile services/shared/common/tlclient.c drivers/gpu/drm/img-rogue/tlclient.c +copyfile services/shared/common/uniq_key_splay_tree.c drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c +copyfile services/shared/devices/rogue/rgx_hwperf_table.c drivers/gpu/drm/img-rogue/rgx_hwperf_table.c +copyfile services/system/rogue/common/env/linux/dma_support.c drivers/gpu/drm/img-rogue/dma_support.c +copyfile services/system/rogue/common/env/linux/pci_support.c drivers/gpu/drm/img-rogue/pci_support.c +copyfile services/system/rogue/common/vmm_type_stub.c drivers/gpu/drm/img-rogue/vmm_type_stub.c +copyfile services/system/rogue/rgx_linux_tc/sysconfig.c drivers/gpu/drm/img-rogue/apollo/sysconfig.c +copyfile services/server/env/linux/osfunc_arm.c drivers/gpu/drm/img-rogue/osfunc_arm.c +copyfile services/server/env/linux/osfunc_arm64.c drivers/gpu/drm/img-rogue/osfunc_arm64.c +copyfile services/server/env/linux/trace_events.c drivers/gpu/drm/img-rogue/trace_events.c +copyfile services/server/env/linux/osfunc_x86.c drivers/gpu/drm/img-rogue/osfunc_x86.c +copyfile services/server/env/linux/osfunc_mips.c drivers/gpu/drm/img-rogue/osfunc_mips.c +copyfile generated/rogue/cache_bridge/client_cache_bridge.h drivers/gpu/drm/img-rogue/client_cache_bridge.h +copyfile generated/rogue/cache_bridge/common_cache_bridge.h drivers/gpu/drm/img-rogue/common_cache_bridge.h +copyfile generated/rogue/cmm_bridge/common_cmm_bridge.h drivers/gpu/drm/img-rogue/common_cmm_bridge.h +copyfile generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h +copyfile generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h +copyfile generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h +copyfile generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h +copyfile generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h +copyfile generated/rogue/mm_bridge/client_mm_bridge.h drivers/gpu/drm/img-rogue/client_mm_bridge.h +copyfile generated/rogue/mm_bridge/common_mm_bridge.h drivers/gpu/drm/img-rogue/common_mm_bridge.h +copyfile generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h +copyfile generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h +copyfile generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h +copyfile generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h +copyfile generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h +copyfile generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h +copyfile generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h +copyfile generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h +copyfile generated/rogue/rgxsignals_bridge/common_rgxsignals_bridge.h drivers/gpu/drm/img-rogue/common_rgxsignals_bridge.h +copyfile generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h +copyfile generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h +copyfile generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h +copyfile generated/rogue/ri_bridge/client_ri_bridge.h drivers/gpu/drm/img-rogue/client_ri_bridge.h +copyfile generated/rogue/ri_bridge/common_ri_bridge.h drivers/gpu/drm/img-rogue/common_ri_bridge.h +copyfile generated/rogue/srvcore_bridge/common_srvcore_bridge.h drivers/gpu/drm/img-rogue/common_srvcore_bridge.h +copyfile generated/rogue/sync_bridge/client_sync_bridge.h drivers/gpu/drm/img-rogue/client_sync_bridge.h +copyfile generated/rogue/sync_bridge/common_sync_bridge.h drivers/gpu/drm/img-rogue/common_sync_bridge.h +copyfile generated/rogue/synctracking_bridge/client_synctracking_bridge.h drivers/gpu/drm/img-rogue/client_synctracking_bridge.h +copyfile generated/rogue/synctracking_bridge/common_synctracking_bridge.h drivers/gpu/drm/img-rogue/common_synctracking_bridge.h +copyfile hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h drivers/gpu/drm/img-rogue/configs/rgxconfig_km_1.V.4.5.h +copyfile hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h drivers/gpu/drm/img-rogue/cores/rgxcore_km_1.82.4.5.h +copyfile hwdefs/rogue/km/rgx_bvnc_defs_km.h drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h +copyfile hwdefs/rogue/km/rgx_bvnc_table_km.h drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h +copyfile hwdefs/rogue/km/rgx_cr_defs_km.h drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h +copyfile hwdefs/rogue/km/rgxdefs_km.h drivers/gpu/drm/img-rogue/km/rgxdefs_km.h +copyfile hwdefs/rogue/km/rgxmhdefs_km.h drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h +copyfile hwdefs/rogue/km/rgxmmudefs_km.h drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h +copyfile hwdefs/rogue/km/tmp_rgx_cr_defs_riscv_km.h drivers/gpu/drm/img-rogue/km/tmp_rgx_cr_defs_riscv_km.h +copyfile include/cache_ops.h drivers/gpu/drm/img-rogue/cache_ops.h +copyfile include/devicemem_typedefs.h drivers/gpu/drm/img-rogue/devicemem_typedefs.h +copyfile include/dllist.h drivers/gpu/drm/img-rogue/dllist.h +copyfile include/drm/pdp_drm.h drivers/gpu/drm/img-rogue/pdp_drm.h +copyfile include/drm/pvr_drm.h drivers/gpu/drm/img-rogue/pvr_drm.h +copyfile include/drm/pvr_drm_core.h drivers/gpu/drm/img-rogue/pvr_drm_core.h +copyfile include/img_3dtypes.h drivers/gpu/drm/img-rogue/img_3dtypes.h +copyfile include/img_defs.h drivers/gpu/drm/img-rogue/img_defs.h +copyfile include/img_elf.h drivers/gpu/drm/img-rogue/img_elf.h +copyfile include/img_types.h drivers/gpu/drm/img-rogue/img_types.h +copyfile include/kernel_types.h drivers/gpu/drm/img-rogue/kernel_types.h +copyfile include/linux_sw_sync.h drivers/gpu/drm/img-rogue/linux_sw_sync.h +copyfile include/lock_types.h drivers/gpu/drm/img-rogue/lock_types.h +copyfile include/log2.h drivers/gpu/drm/img-rogue/log2.h +copyfile include/osfunc_common.h drivers/gpu/drm/img-rogue/osfunc_common.h +copyfile include/pdumpdefs.h drivers/gpu/drm/img-rogue/pdumpdefs.h +copyfile include/pdumpdesc.h drivers/gpu/drm/img-rogue/pdumpdesc.h +copyfile include/public/powervr/buffer_attribs.h drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h +copyfile include/public/powervr/mem_types.h drivers/gpu/drm/img-rogue/powervr/mem_types.h +copyfile include/public/powervr/pvrsrv_sync_ext.h drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h +copyfile include/pvr_buffer_sync_shared.h drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h +copyfile include/pvr_debug.h drivers/gpu/drm/img-rogue/pvr_debug.h +copyfile include/pvr_fd_sync_kernel.h drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h +copyfile include/pvr_intrinsics.h drivers/gpu/drm/img-rogue/pvr_intrinsics.h +copyfile include/pvrmodule.h drivers/gpu/drm/img-rogue/pvrmodule.h +copyfile include/pvrsrv_device_types.h drivers/gpu/drm/img-rogue/pvrsrv_device_types.h +copyfile include/pvrsrv_devvar.h drivers/gpu/drm/img-rogue/pvrsrv_devvar.h +copyfile include/pvrsrv_error.h drivers/gpu/drm/img-rogue/pvrsrv_error.h +copyfile include/pvrsrv_errors.h drivers/gpu/drm/img-rogue/pvrsrv_errors.h +copyfile include/pvrsrv_memallocflags.h drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h +copyfile include/pvrsrv_sync_km.h drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h +copyfile include/pvrsrv_tlcommon.h drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h +copyfile include/pvrsrv_tlstreams.h drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h +copyfile include/pvrversion.h drivers/gpu/drm/img-rogue/pvrversion.h +copyfile include/rgx_heap_firmware.h drivers/gpu/drm/img-rogue/rgx_heap_firmware.h +copyfile include/rgx_memallocflags.h drivers/gpu/drm/img-rogue/rgx_memallocflags.h +copyfile include/rgx_meta.h drivers/gpu/drm/img-rogue/rgx_meta.h +copyfile include/rgx_mips.h drivers/gpu/drm/img-rogue/rgx_mips.h +copyfile include/rgx_riscv.h drivers/gpu/drm/img-rogue/rgx_riscv.h +copyfile include/ri_typedefs.h drivers/gpu/drm/img-rogue/ri_typedefs.h +copyfile include/rogue/rgx_common.h drivers/gpu/drm/img-rogue/rgx_common.h +copyfile include/rogue/rgx_fwif_alignchecks.h drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h +copyfile include/rogue/rgx_fwif_shared.h drivers/gpu/drm/img-rogue/rgx_fwif_shared.h +copyfile include/rogue/rgx_heaps.h drivers/gpu/drm/img-rogue/rgx_heaps.h +copyfile include/rogue/rgx_hwperf.h drivers/gpu/drm/img-rogue/rgx_hwperf.h +copyfile include/rogue/rgx_options.h drivers/gpu/drm/img-rogue/rgx_options.h +copyfile include/rogue/system/rgx_tc/apollo_clocks.h drivers/gpu/drm/img-rogue/apollo/apollo_clocks.h +copyfile include/services_km.h drivers/gpu/drm/img-rogue/services_km.h +copyfile include/servicesext.h drivers/gpu/drm/img-rogue/servicesext.h +copyfile include/sync_checkpoint_external.h drivers/gpu/drm/img-rogue/sync_checkpoint_external.h +copyfile include/sync_prim_internal.h drivers/gpu/drm/img-rogue/sync_prim_internal.h +copyfile include/system/rgx_tc/apollo_regs.h drivers/gpu/drm/img-rogue/apollo/apollo_regs.h +copyfile include/system/rgx_tc/bonnie_tcf.h drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h +copyfile include/system/rgx_tc/odin_defs.h drivers/gpu/drm/img-rogue/apollo/odin_defs.h +copyfile include/system/rgx_tc/odin_pdp_regs.h drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h +copyfile include/system/rgx_tc/odin_regs.h drivers/gpu/drm/img-rogue/apollo/odin_regs.h +copyfile include/system/rgx_tc/orion_defs.h drivers/gpu/drm/img-rogue/apollo/orion_defs.h +copyfile include/system/rgx_tc/orion_regs.h drivers/gpu/drm/img-rogue/apollo/orion_regs.h +copyfile include/system/rgx_tc/pdp_regs.h drivers/gpu/drm/img-rogue/apollo/pdp_regs.h +copyfile include/system/rgx_tc/tcf_clk_ctrl.h drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h +copyfile include/system/rgx_tc/tcf_pll.h drivers/gpu/drm/img-rogue/apollo/tcf_pll.h +copyfile include/system/rgx_tc/tcf_rgbpdp_regs.h drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h +copyfile kernel/drivers/staging/imgtec/kernel_compatibility.h drivers/gpu/drm/img-rogue/kernel_compatibility.h +copyfile kernel/drivers/staging/imgtec/kernel_config_compatibility.h drivers/gpu/drm/img-rogue/kernel_config_compatibility.h +copyfile kernel/drivers/staging/imgtec/kernel_nospec.h drivers/gpu/drm/img-rogue/kernel_nospec.h +copyfile kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h +copyfile kernel/drivers/staging/imgtec/plato/pdp2_regs.h drivers/gpu/drm/img-rogue/pdp2_regs.h +copyfile kernel/drivers/staging/imgtec/plato/plato_drv.h drivers/gpu/drm/img-rogue/plato_drv.h +copyfile kernel/drivers/staging/imgtec/pvr_buffer_sync.h drivers/gpu/drm/img-rogue/pvr_buffer_sync.h +copyfile kernel/drivers/staging/imgtec/pvr_counting_timeline.h drivers/gpu/drm/img-rogue/pvr_counting_timeline.h +copyfile kernel/drivers/staging/imgtec/pvr_dma_resv.h drivers/gpu/drm/img-rogue/pvr_dma_resv.h +copyfile kernel/drivers/staging/imgtec/pvr_drv.h drivers/gpu/drm/img-rogue/pvr_drv.h +copyfile kernel/drivers/staging/imgtec/pvr_fence.h drivers/gpu/drm/img-rogue/pvr_fence.h +copyfile kernel/drivers/staging/imgtec/pvr_fence_trace.h drivers/gpu/drm/img-rogue/pvr_fence_trace.h +copyfile kernel/drivers/staging/imgtec/pvr_linux_fence.h drivers/gpu/drm/img-rogue/pvr_linux_fence.h +copyfile kernel/drivers/staging/imgtec/pvr_sw_fence.h drivers/gpu/drm/img-rogue/pvr_sw_fence.h +copyfile kernel/drivers/staging/imgtec/pvr_sync.h drivers/gpu/drm/img-rogue/pvr_sync.h +copyfile kernel/drivers/staging/imgtec/services_kernel_client.h drivers/gpu/drm/img-rogue/services_kernel_client.h +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h +copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h +copyfile kernel/drivers/staging/imgtec/tc/pdp_apollo.h drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h +copyfile kernel/drivers/staging/imgtec/tc/pdp_common.h drivers/gpu/drm/img-rogue/apollo/pdp_common.h +copyfile kernel/drivers/staging/imgtec/tc/pdp_odin.h drivers/gpu/drm/img-rogue/apollo/pdp_odin.h +copyfile kernel/drivers/staging/imgtec/tc/pdp_plato.h drivers/gpu/drm/img-rogue/apollo/pdp_plato.h +copyfile kernel/drivers/staging/imgtec/tc/tc_apollo.h drivers/gpu/drm/img-rogue/apollo/tc_apollo.h +copyfile kernel/drivers/staging/imgtec/tc/tc_drv.h drivers/gpu/drm/img-rogue/apollo/tc_drv.h +copyfile kernel/drivers/staging/imgtec/tc/tc_drv_internal.h drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h +copyfile kernel/drivers/staging/imgtec/tc/tc_ion.h drivers/gpu/drm/img-rogue/apollo/tc_ion.h +copyfile kernel/drivers/staging/imgtec/tc/tc_odin.h drivers/gpu/drm/img-rogue/apollo/tc_odin.h +copyfile kernel/drivers/staging/imgtec/tc/tc_odin_common_regs.h drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h +copyfile services/include/htbuffer_sf.h drivers/gpu/drm/img-rogue/htbuffer_sf.h +copyfile services/include/htbuffer_types.h drivers/gpu/drm/img-rogue/htbuffer_types.h +copyfile services/include/info_page_client.h drivers/gpu/drm/img-rogue/info_page_client.h +copyfile services/include/info_page_defs.h drivers/gpu/drm/img-rogue/info_page_defs.h +copyfile services/include/km_apphint_defs_common.h drivers/gpu/drm/img-rogue/km_apphint_defs_common.h +copyfile services/include/os_cpu_cache.h drivers/gpu/drm/img-rogue/os_cpu_cache.h +copyfile services/include/pdump.h drivers/gpu/drm/img-rogue/pdump.h +copyfile services/include/physheap.h drivers/gpu/drm/img-rogue/physheap.h +copyfile services/include/pvr_bridge.h drivers/gpu/drm/img-rogue/pvr_bridge.h +copyfile services/include/pvr_ricommon.h drivers/gpu/drm/img-rogue/pvr_ricommon.h +copyfile services/include/rgx_bridge.h drivers/gpu/drm/img-rogue/rgx_bridge.h +copyfile services/include/rgx_compat_bvnc.h drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h +copyfile services/include/rgx_fw_info.h drivers/gpu/drm/img-rogue/rgx_fw_info.h +copyfile services/include/rgx_fwif_sf.h drivers/gpu/drm/img-rogue/rgx_fwif_sf.h +copyfile services/include/rgx_pdump_panics.h drivers/gpu/drm/img-rogue/rgx_pdump_panics.h +copyfile services/include/rgx_tq_shared.h drivers/gpu/drm/img-rogue/rgx_tq_shared.h +copyfile services/include/rgxfw_log_helper.h drivers/gpu/drm/img-rogue/rgxfw_log_helper.h +copyfile services/include/rgxtransfer_shader.h drivers/gpu/drm/img-rogue/rgxtransfer_shader.h +copyfile services/include/rogue/km_apphint_defs.h drivers/gpu/drm/img-rogue/km_apphint_defs.h +copyfile services/include/rogue/rgx_fwif_hwperf.h drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h +copyfile services/include/rogue/rgx_fwif_km.h drivers/gpu/drm/img-rogue/rgx_fwif_km.h +copyfile services/include/rogue/rgx_fwif_resetframework.h drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h +copyfile services/include/rogue/rgxapi_km.h drivers/gpu/drm/img-rogue/rgxapi_km.h +copyfile services/include/rogue/rgxheapconfig.h drivers/gpu/drm/img-rogue/rgxheapconfig.h +copyfile services/include/sync_checkpoint_internal.h drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h +copyfile services/include/sync_checkpoint_internal_fw.h drivers/gpu/drm/img-rogue/sync_checkpoint_internal_fw.h +copyfile services/server/common/htb_debug.h drivers/gpu/drm/img-rogue/htb_debug.h +copyfile services/server/devices/rgxbreakpoint.h drivers/gpu/drm/img-rogue/rgxbreakpoint.h +copyfile services/server/devices/rgxbvnc.h drivers/gpu/drm/img-rogue/rgxbvnc.h +copyfile services/server/devices/rgxfwdbg.h drivers/gpu/drm/img-rogue/rgxfwdbg.h +copyfile services/server/devices/rgxkicksync.h drivers/gpu/drm/img-rogue/rgxkicksync.h +copyfile services/server/devices/rgxmulticore.h drivers/gpu/drm/img-rogue/rgxmulticore.h +copyfile services/server/devices/rgxregconfig.h drivers/gpu/drm/img-rogue/rgxregconfig.h +copyfile services/server/devices/rgxshader.h drivers/gpu/drm/img-rogue/rgxshader.h +copyfile services/server/devices/rgxsignals.h drivers/gpu/drm/img-rogue/rgxsignals.h +copyfile services/server/devices/rgxstartstop.h drivers/gpu/drm/img-rogue/rgxstartstop.h +copyfile services/server/devices/rgxsyncutils.h drivers/gpu/drm/img-rogue/rgxsyncutils.h +copyfile services/server/devices/rgxtimecorr.h drivers/gpu/drm/img-rogue/rgxtimecorr.h +copyfile services/server/devices/rgxutils.h drivers/gpu/drm/img-rogue/rgxutils.h +copyfile services/server/devices/rogue/rgxccb.h drivers/gpu/drm/img-rogue/rgxccb.h +copyfile services/server/devices/rogue/rgxcompute.h drivers/gpu/drm/img-rogue/rgxcompute.h +copyfile services/server/devices/rogue/rgxdebug.h drivers/gpu/drm/img-rogue/rgxdebug.h +copyfile services/server/devices/rogue/rgxdevice.h drivers/gpu/drm/img-rogue/rgxdevice.h +copyfile services/server/devices/rogue/rgxfwimageutils.h drivers/gpu/drm/img-rogue/rgxfwimageutils.h +copyfile services/server/devices/rogue/rgxfwutils.h drivers/gpu/drm/img-rogue/rgxfwutils.h +copyfile services/server/devices/rogue/rgxhwperf.h drivers/gpu/drm/img-rogue/rgxhwperf.h +copyfile services/server/devices/rogue/rgxinit.h drivers/gpu/drm/img-rogue/rgxinit.h +copyfile services/server/devices/rogue/rgxlayer.h drivers/gpu/drm/img-rogue/rgxlayer.h +copyfile services/server/devices/rogue/rgxlayer_impl.h drivers/gpu/drm/img-rogue/rgxlayer_impl.h +copyfile services/server/devices/rogue/rgxmem.h drivers/gpu/drm/img-rogue/rgxmem.h +copyfile services/server/devices/rogue/rgxmipsmmuinit.h drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h +copyfile services/server/devices/rogue/rgxmmuinit.h drivers/gpu/drm/img-rogue/rgxmmuinit.h +copyfile services/server/devices/rogue/rgxpower.h drivers/gpu/drm/img-rogue/rgxpower.h +copyfile services/server/devices/rogue/rgxta3d.h drivers/gpu/drm/img-rogue/rgxta3d.h +copyfile services/server/devices/rogue/rgxtdmtransfer.h drivers/gpu/drm/img-rogue/rgxtdmtransfer.h +copyfile services/server/devices/rogue/rgxtransfer.h drivers/gpu/drm/img-rogue/rgxtransfer.h +copyfile services/server/env/linux/env_connection.h drivers/gpu/drm/img-rogue/env_connection.h +copyfile services/server/env/linux/event.h drivers/gpu/drm/img-rogue/event.h +copyfile services/server/env/linux/km_apphint.h drivers/gpu/drm/img-rogue/km_apphint.h +copyfile services/server/env/linux/linkage.h drivers/gpu/drm/img-rogue/linkage.h +copyfile services/server/env/linux/module_common.h drivers/gpu/drm/img-rogue/module_common.h +copyfile services/server/env/linux/physmem_osmem_linux.h drivers/gpu/drm/img-rogue/physmem_osmem_linux.h +copyfile services/server/env/linux/physmem_test.h drivers/gpu/drm/img-rogue/physmem_test.h +copyfile services/server/env/linux/private_data.h drivers/gpu/drm/img-rogue/private_data.h +copyfile services/server/env/linux/pvr_bridge_k.h drivers/gpu/drm/img-rogue/pvr_bridge_k.h +copyfile services/server/env/linux/pvr_debugfs.h drivers/gpu/drm/img-rogue/pvr_debugfs.h +copyfile services/server/env/linux/pvr_ion_stats.h drivers/gpu/drm/img-rogue/pvr_ion_stats.h +copyfile services/server/env/linux/pvr_procfs.h drivers/gpu/drm/img-rogue/pvr_procfs.h +copyfile services/server/env/linux/pvr_uaccess.h drivers/gpu/drm/img-rogue/pvr_uaccess.h +copyfile services/server/env/linux/rogue_trace_events.h drivers/gpu/drm/img-rogue/rogue_trace_events.h +copyfile services/server/env/linux/trace_events.h drivers/gpu/drm/img-rogue/trace_events.h +copyfile services/server/include/cache_km.h drivers/gpu/drm/img-rogue/cache_km.h +copyfile services/server/include/connection_server.h drivers/gpu/drm/img-rogue/connection_server.h +copyfile services/server/include/device.h drivers/gpu/drm/img-rogue/device.h +copyfile services/server/include/devicemem_heapcfg.h drivers/gpu/drm/img-rogue/devicemem_heapcfg.h +copyfile services/server/include/devicemem_history_server.h drivers/gpu/drm/img-rogue/devicemem_history_server.h +copyfile services/server/include/devicemem_server.h drivers/gpu/drm/img-rogue/devicemem_server.h +copyfile services/server/include/devicemem_server_utils.h drivers/gpu/drm/img-rogue/devicemem_server_utils.h +copyfile services/server/include/di_common.h drivers/gpu/drm/img-rogue/di_common.h +copyfile services/server/include/di_server.h drivers/gpu/drm/img-rogue/di_server.h +copyfile services/server/include/fwload.h drivers/gpu/drm/img-rogue/fwload.h +copyfile services/server/include/fwtrace_string.h drivers/gpu/drm/img-rogue/fwtrace_string.h +copyfile services/server/include/handle.h drivers/gpu/drm/img-rogue/handle.h +copyfile services/server/include/handle_impl.h drivers/gpu/drm/img-rogue/handle_impl.h +copyfile services/server/include/handle_types.h drivers/gpu/drm/img-rogue/handle_types.h +copyfile services/server/include/htbserver.h drivers/gpu/drm/img-rogue/htbserver.h +copyfile services/server/include/info_page.h drivers/gpu/drm/img-rogue/info_page.h +copyfile services/server/include/lists.h drivers/gpu/drm/img-rogue/lists.h +copyfile services/server/include/mmu_common.h drivers/gpu/drm/img-rogue/mmu_common.h +copyfile services/server/include/opaque_types.h drivers/gpu/drm/img-rogue/opaque_types.h +copyfile services/server/include/os_srvinit_param.h drivers/gpu/drm/img-rogue/os_srvinit_param.h +copyfile services/server/include/osconnection_server.h drivers/gpu/drm/img-rogue/osconnection_server.h +copyfile services/server/include/osdi_impl.h drivers/gpu/drm/img-rogue/osdi_impl.h +copyfile services/server/include/osfunc.h drivers/gpu/drm/img-rogue/osfunc.h +copyfile services/server/include/oskm_apphint.h drivers/gpu/drm/img-rogue/oskm_apphint.h +copyfile services/server/include/ospvr_gputrace.h drivers/gpu/drm/img-rogue/ospvr_gputrace.h +copyfile services/server/include/pdump_km.h drivers/gpu/drm/img-rogue/pdump_km.h +copyfile services/server/include/pdump_mmu.h drivers/gpu/drm/img-rogue/pdump_mmu.h +copyfile services/server/include/pdump_physmem.h drivers/gpu/drm/img-rogue/pdump_physmem.h +copyfile services/server/include/pdump_symbolicaddr.h drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h +copyfile services/server/include/physmem.h drivers/gpu/drm/img-rogue/physmem.h +copyfile services/server/include/physmem_dmabuf.h drivers/gpu/drm/img-rogue/physmem_dmabuf.h +copyfile services/server/include/physmem_hostmem.h drivers/gpu/drm/img-rogue/physmem_hostmem.h +copyfile services/server/include/physmem_lma.h drivers/gpu/drm/img-rogue/physmem_lma.h +copyfile services/server/include/physmem_osmem.h drivers/gpu/drm/img-rogue/physmem_osmem.h +copyfile services/server/include/pmr.h drivers/gpu/drm/img-rogue/pmr.h +copyfile services/server/include/pmr_impl.h drivers/gpu/drm/img-rogue/pmr_impl.h +copyfile services/server/include/pmr_os.h drivers/gpu/drm/img-rogue/pmr_os.h +copyfile services/server/include/power.h drivers/gpu/drm/img-rogue/power.h +copyfile services/server/include/process_stats.h drivers/gpu/drm/img-rogue/process_stats.h +copyfile services/server/include/pvr_notifier.h drivers/gpu/drm/img-rogue/pvr_notifier.h +copyfile services/server/include/pvrsrv.h drivers/gpu/drm/img-rogue/pvrsrv.h +copyfile services/server/include/pvrsrv_apphint.h drivers/gpu/drm/img-rogue/pvrsrv_apphint.h +copyfile services/server/include/pvrsrv_bridge_init.h drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h +copyfile services/server/include/pvrsrv_cleanup.h drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h +copyfile services/server/include/pvrsrv_device.h drivers/gpu/drm/img-rogue/pvrsrv_device.h +copyfile services/server/include/pvrsrv_pool.h drivers/gpu/drm/img-rogue/pvrsrv_pool.h +copyfile services/server/include/pvrsrv_sync_server.h drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h +copyfile services/server/include/ri_server.h drivers/gpu/drm/img-rogue/ri_server.h +copyfile services/server/include/sofunc_pvr.h drivers/gpu/drm/img-rogue/sofunc_pvr.h +copyfile services/server/include/sofunc_rgx.h drivers/gpu/drm/img-rogue/sofunc_rgx.h +copyfile services/server/include/srvcore.h drivers/gpu/drm/img-rogue/srvcore.h +copyfile services/server/include/srvinit.h drivers/gpu/drm/img-rogue/srvinit.h +copyfile services/server/include/srvkm.h drivers/gpu/drm/img-rogue/srvkm.h +copyfile services/server/include/sync_checkpoint.h drivers/gpu/drm/img-rogue/sync_checkpoint.h +copyfile services/server/include/sync_checkpoint_init.h drivers/gpu/drm/img-rogue/sync_checkpoint_init.h +copyfile services/server/include/sync_fallback_server.h drivers/gpu/drm/img-rogue/sync_fallback_server.h +copyfile services/server/include/sync_server.h drivers/gpu/drm/img-rogue/sync_server.h +copyfile services/server/include/tlintern.h drivers/gpu/drm/img-rogue/tlintern.h +copyfile services/server/include/tlserver.h drivers/gpu/drm/img-rogue/tlserver.h +copyfile services/server/include/tlstream.h drivers/gpu/drm/img-rogue/tlstream.h +copyfile services/server/include/vmm_impl.h drivers/gpu/drm/img-rogue/vmm_impl.h +copyfile services/server/include/vmm_pvz_client.h drivers/gpu/drm/img-rogue/vmm_pvz_client.h +copyfile services/server/include/vmm_pvz_common.h drivers/gpu/drm/img-rogue/vmm_pvz_common.h +copyfile services/server/include/vmm_pvz_server.h drivers/gpu/drm/img-rogue/vmm_pvz_server.h +copyfile services/server/include/vz_vm.h drivers/gpu/drm/img-rogue/vz_vm.h +copyfile services/server/include/vz_vmm_pvz.h drivers/gpu/drm/img-rogue/vz_vmm_pvz.h +copyfile services/shared/common/uniq_key_splay_tree.h drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h +copyfile services/shared/devices/rogue/rgx_hwperf_table.h drivers/gpu/drm/img-rogue/rgx_hwperf_table.h +copyfile services/shared/include/allocmem.h drivers/gpu/drm/img-rogue/allocmem.h +copyfile services/shared/include/device_connection.h drivers/gpu/drm/img-rogue/device_connection.h +copyfile services/shared/include/devicemem.h drivers/gpu/drm/img-rogue/devicemem.h +copyfile services/shared/include/devicemem_pdump.h drivers/gpu/drm/img-rogue/devicemem_pdump.h +copyfile services/shared/include/devicemem_utils.h drivers/gpu/drm/img-rogue/devicemem_utils.h +copyfile services/shared/include/hash.h drivers/gpu/drm/img-rogue/hash.h +copyfile services/shared/include/htbuffer.h drivers/gpu/drm/img-rogue/htbuffer.h +copyfile services/shared/include/htbuffer_init.h drivers/gpu/drm/img-rogue/htbuffer_init.h +copyfile services/shared/include/lock.h drivers/gpu/drm/img-rogue/lock.h +copyfile services/shared/include/osmmap.h drivers/gpu/drm/img-rogue/osmmap.h +copyfile services/shared/include/proc_stats.h drivers/gpu/drm/img-rogue/proc_stats.h +copyfile services/shared/include/ra.h drivers/gpu/drm/img-rogue/ra.h +copyfile services/shared/include/sync.h drivers/gpu/drm/img-rogue/sync.h +copyfile services/shared/include/sync_internal.h drivers/gpu/drm/img-rogue/sync_internal.h +copyfile services/shared/include/tlclient.h drivers/gpu/drm/img-rogue/tlclient.h +copyfile services/system/rogue/include/dma_support.h drivers/gpu/drm/img-rogue/dma_support.h +copyfile services/system/rogue/include/pci_support.h drivers/gpu/drm/img-rogue/pci_support.h +copyfile services/system/rogue/include/syscommon.h drivers/gpu/drm/img-rogue/syscommon.h +copyfile services/system/rogue/include/sysvalidation.h drivers/gpu/drm/img-rogue/sysvalidation.h +copyfile services/system/rogue/rgx_linux_tc/sysinfo.h drivers/gpu/drm/img-rogue/apollo/sysinfo.h +copyfile copy-to-kernel-tc/drm_pdp.mk drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk +copyfile copy-to-kernel-tc/apollo.mk drivers/gpu/drm/img-rogue/apollo/apollo.mk +copyfile copy-to-kernel-tc/pvrsrvkm.mk drivers/gpu/drm/img-rogue/pvrsrvkm.mk +copyfile copy-to-kernel-tc/config_kernel.mk drivers/gpu/drm/img-rogue/config_kernel.mk +copyfile copy-to-kernel-tc/config_kernel.h drivers/gpu/drm/img-rogue/config_kernel.h diff --git a/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/drm_pdp.mk b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/drm_pdp.mk new file mode 100644 index 000000000000..f4ac53c4a7d2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/drm_pdp.mk @@ -0,0 +1,13 @@ +drm_pdp-y += \ + ../apollo/drm_pdp_crtc.o \ + ../apollo/drm_pdp_debugfs.o \ + ../apollo/drm_pdp_drv.o \ + ../apollo/drm_pdp_dvi.o \ + ../apollo/drm_pdp_fb.o \ + ../apollo/drm_pdp_gem.o \ + ../apollo/drm_pdp_modeset.o \ + ../apollo/drm_pdp_plane.o \ + ../apollo/drm_pdp_tmds.o \ + ../apollo/pdp_apollo.o \ + ../apollo/pdp_odin.o \ + ../apollo/pdp_plato.o diff --git a/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/pvrsrvkm.mk b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/pvrsrvkm.mk new file mode 100644 index 000000000000..f6a9adb90392 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/copy-to-kernel-tc/pvrsrvkm.mk @@ -0,0 +1,138 @@ +pvrsrvkm-y += \ + client_cache_direct_bridge.o \ + server_cache_bridge.o \ + server_cmm_bridge.o \ + client_devicememhistory_direct_bridge.o \ + server_devicememhistory_bridge.o \ + server_dmabuf_bridge.o \ + client_htbuffer_direct_bridge.o \ + server_htbuffer_bridge.o \ + client_mm_direct_bridge.o \ + server_mm_bridge.o \ + client_pvrtl_direct_bridge.o \ + server_pvrtl_bridge.o \ + server_rgxbreakpoint_bridge.o \ + server_rgxcmp_bridge.o \ + server_rgxfwdbg_bridge.o \ + server_rgxhwperf_bridge.o \ + server_rgxkicksync_bridge.o \ + server_rgxregconfig_bridge.o \ + server_rgxsignals_bridge.o \ + server_rgxta3d_bridge.o \ + server_rgxtq2_bridge.o \ + server_rgxtq_bridge.o \ + server_srvcore_bridge.o \ + client_sync_direct_bridge.o \ + server_sync_bridge.o \ + client_synctracking_direct_bridge.o \ + server_synctracking_bridge.o \ + pvr_buffer_sync.o \ + pvr_counting_timeline.o \ + pvr_drm.o \ + pvr_fence.o \ + pvr_platform_drv.o \ + pvr_sw_fence.o \ + pvr_sync_file.o \ + cache_km.o \ + connection_server.o \ + devicemem_heapcfg.o \ + devicemem_history_server.o \ + devicemem_server.o \ + di_server.o \ + handle.o \ + htb_debug.o \ + htbserver.o \ + info_page_km.o \ + lists.o \ + mmu_common.o \ + physheap.o \ + physmem.o \ + physmem_hostmem.o \ + physmem_lma.o \ + pmr.o \ + power.o \ + process_stats.o \ + pvr_notifier.o \ + pvrsrv.o \ + pvrsrv_bridge_init.o \ + pvrsrv_pool.o \ + srvcore.o \ + sync_checkpoint.o \ + sync_server.o \ + tlintern.o \ + tlserver.o \ + tlstream.o \ + vmm_pvz_client.o \ + vmm_pvz_server.o \ + vz_vmm_pvz.o \ + vz_vmm_vm.o \ + rgxfwdbg.o \ + rgxfwtrace_strings.o \ + rgxshader.o \ + rgxtimecorr.o \ + rgxbreakpoint.o \ + rgxbvnc.o \ + rgxccb.o \ + rgxcompute.o \ + rgxdebug.o \ + rgxfwimageutils.o \ + rgxfwutils.o \ + rgxhwperf.o \ + rgxinit.o \ + rgxkicksync.o \ + rgxlayer_impl.o \ + rgxmem.o \ + rgxmipsmmuinit.o \ + rgxmmuinit.o \ + rgxmulticore.o \ + rgxpower.o \ + rgxregconfig.o \ + rgxsignals.o \ + rgxsrvinit.o \ + rgxstartstop.o \ + rgxsyncutils.o \ + rgxta3d.o \ + rgxtdmtransfer.o \ + rgxtransfer.o \ + rgxutils.o \ + allocmem.o \ + event.o \ + fwload.o \ + handle_idr.o \ + km_apphint.o \ + module_common.o \ + osconnection_server.o \ + osfunc.o \ + osmmap_stub.o \ + physmem_dmabuf.o \ + physmem_osmem_linux.o \ + physmem_test.o \ + pmr_os.o \ + pvr_bridge_k.o \ + pvr_debug.o \ + pvr_debugfs.o \ + pvr_gputrace.o \ + devicemem.o \ + devicemem_utils.o \ + hash.o \ + htbuffer.o \ + mem_utils.o \ + pvrsrv_error.o \ + ra.o \ + sync.o \ + tlclient.o \ + uniq_key_splay_tree.o \ + rgx_hwperf_table.o \ + dma_support.o \ + pci_support.o \ + vmm_type_stub.o \ + apollo/sysconfig.o +pvrsrvkm-$(CONFIG_DRM_POWERVR_ROGUE_DEBUG) += \ + client_ri_direct_bridge.o \ + server_ri_bridge.o \ + ri_server.o +pvrsrvkm-$(CONFIG_ARM) += osfunc_arm.o +pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o +pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o +pvrsrvkm-$(CONFIG_MIPS) += osfunc_mips.o +pvrsrvkm-$(CONFIG_X86) += osfunc_x86.o diff --git a/drivers/mcst/gpu-imgtec/copy-to-kernel.sh b/drivers/mcst/gpu-imgtec/copy-to-kernel.sh new file mode 100644 index 000000000000..3ef1163465a7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/copy-to-kernel.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +function usage() +{ + echo "$0 " + echo " Copy source files and configuration into a kernel tree." + echo " The configuration and list of files to copy is found in ." + echo " The target kernel tree is ." + echo " Before running this script, we recommend that you clean out the old" + echo " destination directories in ." +} + +if [ "$#" -lt 2 ]; then + echo "Not enough arguments" + usage + exit 1 +fi + +CONFIG=$1 +DEST=$2 + +if [ ! -f "$CONFIG/copy_items.sh" ]; then + echo "$CONFIG does not look like a config directory. copy_items.sh is missing." + usage + exit 1 +fi + +if [ ! -f "$DEST/Kconfig" ] ; then + echo "$DEST does not look like a kernel directory." + usage + exit 1 +fi + +function copyfile() +{ + src=$1 + dest="$DEST/$2" + + mkdir -p `dirname $dest` + echo copy $src to $dest + cp $src $dest + chmod u+w $dest +} + +source "$CONFIG/copy_items.sh" diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_bridge.h new file mode 100644 index 000000000000..e9c405198814 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_bridge.h @@ -0,0 +1,101 @@ +/******************************************************************************* +@File +@Title Client bridge header for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_CACHE_BRIDGE_H +#define CLIENT_CACHE_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_cache_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumCacheOps, + IMG_HANDLE * phPMR, + IMG_UINT64 * + pui64Address, + IMG_DEVMEM_OFFSET_T * + puiOffset, + IMG_DEVMEM_SIZE_T * + puiSize, + PVRSRV_CACHE_OP * + piuCacheOp, + IMG_UINT32 + ui32OpTimeline, + IMG_UINT32 + ui32CurrentFenceSeqNum, + IMG_UINT32 * + pui32NextFenceSeqNum); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + PVRSRV_CACHE_OP + iuCacheOp); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_INT64 + i64QueuedTimeUs, + IMG_INT64 + i64ExecuteTimeUs, + IMG_INT32 i32NumRBF, + PVRSRV_CACHE_OP + iuCacheOp); + +#endif /* CLIENT_CACHE_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_direct_bridge.c new file mode 100644 index 000000000000..d16b30f7918b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/client_cache_direct_bridge.c @@ -0,0 +1,143 @@ +/******************************************************************************* +@File +@Title Direct client bridge for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for cache + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_cache_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "cache_ops.h" + +#include "cache_km.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumCacheOps, + IMG_HANDLE * phPMR, + IMG_UINT64 * + pui64Address, + IMG_DEVMEM_OFFSET_T * + puiOffset, + IMG_DEVMEM_SIZE_T * + puiSize, + PVRSRV_CACHE_OP * + piuCacheOp, + IMG_UINT32 + ui32OpTimeline, + IMG_UINT32 + ui32CurrentFenceSeqNum, + IMG_UINT32 * + pui32NextFenceSeqNum) +{ + PVRSRV_ERROR eError; + PMR **psPMRInt; + + psPMRInt = (PMR **) phPMR; + + eError = + CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32NumCacheOps, + psPMRInt, + pui64Address, + puiOffset, + puiSize, + piuCacheOp, + ui32OpTimeline, + ui32CurrentFenceSeqNum, pui32NextFenceSeqNum); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + PVRSRV_CACHE_OP + iuCacheOp) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_INT64 + i64QueuedTimeUs, + IMG_INT64 + i64ExecuteTimeUs, + IMG_INT32 i32NumRBF, + PVRSRV_CACHE_OP + iuCacheOp) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + CacheOpLog(psPMRInt, + ui64Address, + uiOffset, + uiSize, + i64QueuedTimeUs, i64ExecuteTimeUs, i32NumRBF, iuCacheOp); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/common_cache_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/common_cache_bridge.h new file mode 100644 index 000000000000..8e608f177daf --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/common_cache_bridge.h @@ -0,0 +1,128 @@ +/******************************************************************************* +@File +@Title Common bridge header for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_CACHE_BRIDGE_H +#define COMMON_CACHE_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "cache_ops.h" + +#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0 +#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0 +#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1 +#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+2 +#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2) + +/******************************************* + CacheOpQueue + *******************************************/ + +/* Bridge in structure for CacheOpQueue */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG +{ + IMG_UINT32 ui32NumCacheOps; + IMG_HANDLE *phPMR; + IMG_UINT64 *pui64Address; + IMG_DEVMEM_OFFSET_T *puiOffset; + IMG_DEVMEM_SIZE_T *puiSize; + PVRSRV_CACHE_OP *piuCacheOp; + IMG_UINT32 ui32OpTimeline; + IMG_UINT32 ui32CurrentFenceSeqNum; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPQUEUE; + +/* Bridge out structure for CacheOpQueue */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG +{ + IMG_UINT32 ui32NextFenceSeqNum; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPQUEUE; + +/******************************************* + CacheOpExec + *******************************************/ + +/* Bridge in structure for CacheOpExec */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG +{ + IMG_HANDLE hPMR; + IMG_UINT64 ui64Address; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + PVRSRV_CACHE_OP iuCacheOp; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPEXEC; + +/* Bridge out structure for CacheOpExec */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPEXEC; + +/******************************************* + CacheOpLog + *******************************************/ + +/* Bridge in structure for CacheOpLog */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG +{ + IMG_HANDLE hPMR; + IMG_UINT64 ui64Address; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_INT64 i64QueuedTimeUs; + IMG_INT64 i64ExecuteTimeUs; + IMG_INT32 i32NumRBF; + PVRSRV_CACHE_OP iuCacheOp; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPLOG; + +/* Bridge out structure for CacheOpLog */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPLOG; + +#endif /* COMMON_CACHE_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/server_cache_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/server_cache_bridge.c new file mode 100644 index 000000000000..8f862aebbb1e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/cache_bridge/server_cache_bridge.c @@ -0,0 +1,503 @@ +/******************************************************************************* +@File +@Title Server bridge for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "cache_km.h" + +#include "common_cache_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpQueueIN_UI8, + IMG_UINT8 * psCacheOpQueueOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN = + (PVRSRV_BRIDGE_IN_CACHEOPQUEUE *) + IMG_OFFSET_ADDR(psCacheOpQueueIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *) + IMG_OFFSET_ADDR(psCacheOpQueueOUT_UI8, 0); + + PMR **psPMRInt = NULL; + IMG_HANDLE *hPMRInt2 = NULL; + IMG_UINT64 *ui64AddressInt = NULL; + IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL; + IMG_DEVMEM_SIZE_T *uiSizeInt = NULL; + PVRSRV_CACHE_OP *iuCacheOpInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0; + + if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX)) + { + psCacheOpQueueOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto CacheOpQueue_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psCacheOpQueueIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psCacheOpQueueOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto CacheOpQueue_exit; + } + } + } + + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + psPMRInt = + (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *); + hPMRInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hPMRInt2, + (const void __user *)psCacheOpQueueIN->phPMR, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != + PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + ui64AddressInt = + (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0) + { + if (OSCopyFromUser + (NULL, ui64AddressInt, + (const void __user *)psCacheOpQueueIN->pui64Address, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != + PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + uiOffsetInt = + (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_OFFSET_T); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0) + { + if (OSCopyFromUser + (NULL, uiOffsetInt, + (const void __user *)psCacheOpQueueIN->puiOffset, + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + uiSizeInt = + (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_SIZE_T); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0) + { + if (OSCopyFromUser + (NULL, uiSizeInt, + (const void __user *)psCacheOpQueueIN->puiSize, + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + iuCacheOpInt = + (PVRSRV_CACHE_OP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0) + { + if (OSCopyFromUser + (NULL, iuCacheOpInt, + (const void __user *)psCacheOpQueueIN->piuCacheOp, + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + { + IMG_UINT32 i; + + for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) + { + /* Look up the address from the handle */ + psCacheOpQueueOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **)&psPMRInt[i], + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpQueue_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpQueueOUT->eError = + CacheOpQueue(psConnection, OSGetDevNode(psConnection), + psCacheOpQueueIN->ui32NumCacheOps, + psPMRInt, + ui64AddressInt, + uiOffsetInt, + uiSizeInt, + iuCacheOpInt, + psCacheOpQueueIN->ui32OpTimeline, + psCacheOpQueueIN->ui32CurrentFenceSeqNum, + &psCacheOpQueueOUT->ui32NextFenceSeqNum); + +CacheOpQueue_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + if (hPMRInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) + { + + /* Unreference the previously looked up handle */ + if (hPMRInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpExecIN_UI8, + IMG_UINT8 * psCacheOpExecOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN = + (PVRSRV_BRIDGE_IN_CACHEOPEXEC *) + IMG_OFFSET_ADDR(psCacheOpExecIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPEXEC *) + IMG_OFFSET_ADDR(psCacheOpExecOUT_UI8, 0); + + IMG_HANDLE hPMR = psCacheOpExecIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psCacheOpExecOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpExec_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpExecOUT->eError = + CacheOpValExec(psPMRInt, + psCacheOpExecIN->ui64Address, + psCacheOpExecIN->uiOffset, + psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp); + +CacheOpExec_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpLogIN_UI8, + IMG_UINT8 * psCacheOpLogOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN = + (PVRSRV_BRIDGE_IN_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPLOG *) + IMG_OFFSET_ADDR(psCacheOpLogOUT_UI8, 0); + + IMG_HANDLE hPMR = psCacheOpLogIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psCacheOpLogOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpLog_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpLogOUT->eError = + CacheOpLog(psPMRInt, + psCacheOpLogIN->ui64Address, + psCacheOpLogIN->uiOffset, + psCacheOpLogIN->uiSize, + psCacheOpLogIN->i64QueuedTimeUs, + psCacheOpLogIN->i64ExecuteTimeUs, + psCacheOpLogIN->i32NumRBF, psCacheOpLogIN->iuCacheOp); + +CacheOpLog_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitCACHEBridge(void); +PVRSRV_ERROR DeinitCACHEBridge(void); + +/* + * Register all CACHE functions with services + */ +PVRSRV_ERROR InitCACHEBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, + PVRSRVBridgeCacheOpQueue, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, + PVRSRVBridgeCacheOpExec, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPLOG, + PVRSRVBridgeCacheOpLog, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all cache functions with services + */ +PVRSRV_ERROR DeinitCACHEBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPEXEC); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPLOG); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/common_cmm_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/common_cmm_bridge.h new file mode 100644 index 000000000000..52bdef547897 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/common_cmm_bridge.h @@ -0,0 +1,113 @@ +/******************************************************************************* +@File +@Title Common bridge header for cmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for cmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_CMM_BRIDGE_H +#define COMMON_CMM_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2) + +/******************************************* + DevmemIntExportCtx + *******************************************/ + +/* Bridge in structure for DevmemIntExportCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG +{ + IMG_HANDLE hContext; + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX; + +/* Bridge out structure for DevmemIntExportCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG +{ + IMG_HANDLE hContextExport; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX; + +/******************************************* + DevmemIntUnexportCtx + *******************************************/ + +/* Bridge in structure for DevmemIntUnexportCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG +{ + IMG_HANDLE hContextExport; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX; + +/* Bridge out structure for DevmemIntUnexportCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX; + +/******************************************* + DevmemIntAcquireRemoteCtx + *******************************************/ + +/* Bridge in structure for DevmemIntAcquireRemoteCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX; + +/* Bridge out structure for DevmemIntAcquireRemoteCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG +{ + IMG_HANDLE hContext; + IMG_HANDLE hPrivData; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX; + +#endif /* COMMON_CMM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/server_cmm_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/server_cmm_bridge.c new file mode 100644 index 000000000000..0e5a081ea135 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/cmm_bridge/server_cmm_bridge.c @@ -0,0 +1,445 @@ +/******************************************************************************* +@File +@Title Server bridge for cmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for cmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "pmr.h" +#include "devicemem_server.h" + +#include "common_cmm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_CMM_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _DevmemIntExportCtxpsContextExportIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnexportCtx((DEVMEMINT_CTX_EXPORT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntExportCtxIN_UI8, + IMG_UINT8 * psDevmemIntExportCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntExportCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntExportCtxOUT_UI8, 0); + + IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext; + DEVMEMINT_CTX *psContextInt = NULL; + IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntExportCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psContextInt, + hContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + + /* Look up the address from the handle */ + psDevmemIntExportCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntExportCtxOUT->eError = + DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + goto DevmemIntExportCtx_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntExportCtxOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntExportCtxOUT->hContextExport, + (void *)psContextExportInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _DevmemIntExportCtxpsContextExportIntRelease); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntExportCtx_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK) + { + if (psContextExportInt) + { + LockHandle(KERNEL_HANDLE_BASE); + DevmemIntUnexportCtx(psContextExportInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnexportCtxIN_UI8, + IMG_UINT8 * psDevmemIntUnexportCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntUnexportCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntUnexportCtxOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnexportCtxOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntUnexportCtxIN-> + hContextExport, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT); + if (unlikely + ((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) + && (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnexportCtx_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnexportCtx_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntAcquireRemoteCtxpsContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntAcquireRemoteCtxIN_UI8, + IMG_UINT8 * + psDevmemIntAcquireRemoteCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX + *psDevmemIntAcquireRemoteCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *) + IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX + *psDevmemIntAcquireRemoteCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *) + IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_CTX *psContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + psDevmemIntAcquireRemoteCtxOUT->hContext = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntAcquireRemoteCtxOUT->eError = + DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + goto DevmemIntAcquireRemoteCtx_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntAcquireRemoteCtxOUT->hContext, + (void *)psContextInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _DevmemIntAcquireRemoteCtxpsContextIntRelease); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntAcquireRemoteCtxOUT-> + hPrivData, (void *)hPrivDataInt, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psDevmemIntAcquireRemoteCtxOUT-> + hContext); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntAcquireRemoteCtx_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK) + { + if (psDevmemIntAcquireRemoteCtxOUT->hContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psDevmemIntAcquireRemoteCtxOUT-> + hContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psContextInt) + { + DevmemIntCtxDestroy(psContextInt); + } + } + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_CMM_BRIDGE */ + +#if !defined(EXCLUDE_CMM_BRIDGE) +PVRSRV_ERROR InitCMMBridge(void); +PVRSRV_ERROR DeinitCMMBridge(void); + +/* + * Register all CMM functions with services + */ +PVRSRV_ERROR InitCMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, + PVRSRVBridgeDevmemIntExportCtx, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, + PVRSRVBridgeDevmemIntUnexportCtx, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, + PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all cmm functions with services + */ +PVRSRV_ERROR DeinitCMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX); + + return PVRSRV_OK; +} +#else /* EXCLUDE_CMM_BRIDGE */ +/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitCMMBridge() \ + PVRSRV_OK + +#define DeinitCMMBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_CMM_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h new file mode 100644 index 000000000000..4fc6ec1855a2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h @@ -0,0 +1,132 @@ +/******************************************************************************* +@File +@Title Client bridge header for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H +#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_devicememhistory_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const IMG_CHAR + * puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const + IMG_CHAR * + puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c new file mode 100644 index 000000000000..33336f46f61d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c @@ -0,0 +1,221 @@ +/******************************************************************************* +@File +@Title Direct client bridge for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for devicememhistory + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_devicememhistory_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" + +#include "devicemem_history_server.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const IMG_CHAR + * puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistoryMapKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const + IMG_CHAR * + puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistoryUnmapKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + DevicememHistoryMapVRangeKM(sBaseDevVAddr, + ui32ui32StartPage, + ui32NumPages, + uiAllocSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + DevicememHistoryUnmapVRangeKM(sBaseDevVAddr, + ui32ui32StartPage, + ui32NumPages, + uiAllocSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistorySparseChangeKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, + ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h new file mode 100644 index 000000000000..da7af1bb74d7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h @@ -0,0 +1,184 @@ +/******************************************************************************* +@File +@Title Common bridge header for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H +#define COMMON_DEVICEMEMHISTORY_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4) + +/******************************************* + DevicememHistoryMap + *******************************************/ + +/* Bridge in structure for DevicememHistoryMap */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP; + +/* Bridge out structure for DevicememHistoryMap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP; + +/******************************************* + DevicememHistoryUnmap + *******************************************/ + +/* Bridge in structure for DevicememHistoryUnmap */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP; + +/* Bridge out structure for DevicememHistoryUnmap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP; + +/******************************************* + DevicememHistoryMapVRange + *******************************************/ + +/* Bridge in structure for DevicememHistoryMapVRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG +{ + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_UINT32 ui32ui32StartPage; + IMG_UINT32 ui32NumPages; + IMG_DEVMEM_SIZE_T uiAllocSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE; + +/* Bridge out structure for DevicememHistoryMapVRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE; + +/******************************************* + DevicememHistoryUnmapVRange + *******************************************/ + +/* Bridge in structure for DevicememHistoryUnmapVRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG +{ + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_UINT32 ui32ui32StartPage; + IMG_UINT32 ui32NumPages; + IMG_DEVMEM_SIZE_T uiAllocSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE; + +/* Bridge out structure for DevicememHistoryUnmapVRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE; + +/******************************************* + DevicememHistorySparseChange + *******************************************/ + +/* Bridge in structure for DevicememHistorySparseChange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocPageCount; + IMG_UINT32 *pui32AllocPageIndices; + IMG_UINT32 ui32FreePageCount; + IMG_UINT32 *pui32FreePageIndices; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE; + +/* Bridge out structure for DevicememHistorySparseChange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE; + +#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c new file mode 100644 index 000000000000..98d4a7c388e4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c @@ -0,0 +1,885 @@ +/******************************************************************************* +@File +@Title Server bridge for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_history_server.h" + +#include "common_devicememhistory_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryMapIN_UI8, + IMG_UINT8 * psDevicememHistoryMapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryMapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryMapOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistoryMapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryMapOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryMap_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryMapIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryMapOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryMap_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistoryMapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistoryMap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistoryMapOUT->eError = + DevicememHistoryMapKM(psPMRInt, + psDevicememHistoryMapIN->uiOffset, + psDevicememHistoryMapIN->sDevVAddr, + psDevicememHistoryMapIN->uiSize, + uiTextInt, + psDevicememHistoryMapIN->ui32Log2PageSize, + psDevicememHistoryMapIN->ui32AllocationIndex, + &psDevicememHistoryMapOUT-> + ui32AllocationIndexOut); + +DevicememHistoryMap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryUnmapIN_UI8, + IMG_UINT8 * psDevicememHistoryUnmapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistoryUnmapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryUnmapOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryUnmap_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryUnmapIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryUnmapOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryUnmap_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistoryUnmapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistoryUnmap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistoryUnmapOUT->eError = + DevicememHistoryUnmapKM(psPMRInt, + psDevicememHistoryUnmapIN->uiOffset, + psDevicememHistoryUnmapIN->sDevVAddr, + psDevicememHistoryUnmapIN->uiSize, + uiTextInt, + psDevicememHistoryUnmapIN->ui32Log2PageSize, + psDevicememHistoryUnmapIN-> + ui32AllocationIndex, + &psDevicememHistoryUnmapOUT-> + ui32AllocationIndexOut); + +DevicememHistoryUnmap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevicememHistoryMapVRangeIN_UI8, + IMG_UINT8 * + psDevicememHistoryMapVRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE + *psDevicememHistoryMapVRangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE + *psDevicememHistoryMapVRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeOUT_UI8, 0); + + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistoryMapVRangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryMapVRangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryMapVRange_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryMapVRangeIN-> + puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryMapVRangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryMapVRange_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psDevicememHistoryMapVRangeOUT->eError = + DevicememHistoryMapVRangeKM(psDevicememHistoryMapVRangeIN-> + sBaseDevVAddr, + psDevicememHistoryMapVRangeIN-> + ui32ui32StartPage, + psDevicememHistoryMapVRangeIN-> + ui32NumPages, + psDevicememHistoryMapVRangeIN-> + uiAllocSize, uiTextInt, + psDevicememHistoryMapVRangeIN-> + ui32Log2PageSize, + psDevicememHistoryMapVRangeIN-> + ui32AllocationIndex, + &psDevicememHistoryMapVRangeOUT-> + ui32AllocationIndexOut); + +DevicememHistoryMapVRange_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevicememHistoryUnmapVRangeIN_UI8, + IMG_UINT8 * + psDevicememHistoryUnmapVRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE + *psDevicememHistoryUnmapVRangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE + *psDevicememHistoryUnmapVRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeOUT_UI8, 0); + + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psDevicememHistoryUnmapVRangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryUnmapVRangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryUnmapVRange_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryUnmapVRangeIN-> + puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryUnmapVRangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryUnmapVRange_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psDevicememHistoryUnmapVRangeOUT->eError = + DevicememHistoryUnmapVRangeKM(psDevicememHistoryUnmapVRangeIN-> + sBaseDevVAddr, + psDevicememHistoryUnmapVRangeIN-> + ui32ui32StartPage, + psDevicememHistoryUnmapVRangeIN-> + ui32NumPages, + psDevicememHistoryUnmapVRangeIN-> + uiAllocSize, uiTextInt, + psDevicememHistoryUnmapVRangeIN-> + ui32Log2PageSize, + psDevicememHistoryUnmapVRangeIN-> + ui32AllocationIndex, + &psDevicememHistoryUnmapVRangeOUT-> + ui32AllocationIndexOut); + +DevicememHistoryUnmapVRange_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevicememHistorySparseChangeIN_UI8, + IMG_UINT8 * + psDevicememHistorySparseChangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE + *psDevicememHistorySparseChangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *) + IMG_OFFSET_ADDR(psDevicememHistorySparseChangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE + *psDevicememHistorySparseChangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *) + IMG_OFFSET_ADDR(psDevicememHistorySparseChangeOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + + (psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32)) + + (psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psDevicememHistorySparseChangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistorySparseChange_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistorySparseChangeIN-> + puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0) + { + ui32AllocPageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AllocPageIndicesInt, + (const void __user *)psDevicememHistorySparseChangeIN-> + pui32AllocPageIndices, + psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + } + if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0) + { + ui32FreePageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FreePageIndicesInt, + (const void __user *)psDevicememHistorySparseChangeIN-> + pui32FreePageIndices, + psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistorySparseChangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistorySparseChange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistorySparseChangeOUT->eError = + DevicememHistorySparseChangeKM(psPMRInt, + psDevicememHistorySparseChangeIN-> + uiOffset, + psDevicememHistorySparseChangeIN-> + sDevVAddr, + psDevicememHistorySparseChangeIN-> + uiSize, uiTextInt, + psDevicememHistorySparseChangeIN-> + ui32Log2PageSize, + psDevicememHistorySparseChangeIN-> + ui32AllocPageCount, + ui32AllocPageIndicesInt, + psDevicememHistorySparseChangeIN-> + ui32FreePageCount, + ui32FreePageIndicesInt, + psDevicememHistorySparseChangeIN-> + ui32AllocationIndex, + &psDevicememHistorySparseChangeOUT-> + ui32AllocationIndexOut); + +DevicememHistorySparseChange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pDEVICEMEMHISTORYBridgeLock; + +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void); + +/* + * Register all DEVICEMEMHISTORY functions with services + */ +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock), + "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, + PVRSRVBridgeDevicememHistoryMap, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, + PVRSRVBridgeDevicememHistoryUnmap, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, + PVRSRVBridgeDevicememHistoryMapVRange, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, + PVRSRVBridgeDevicememHistoryUnmapVRange, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, + PVRSRVBridgeDevicememHistorySparseChange, + pDEVICEMEMHISTORYBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all devicememhistory functions with services + */ +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock), + "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h new file mode 100644 index 000000000000..9c7e8254bde8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h @@ -0,0 +1,126 @@ +/******************************************************************************* +@File +@Title Common bridge header for dmabuf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for dmabuf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DMABUF_BRIDGE_H +#define COMMON_DMABUF_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_memallocflags.h" + +#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2) + +/******************************************* + PhysmemImportDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemImportDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG +{ + IMG_INT ifd; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_UINT32 ui32NameSize; + const IMG_CHAR *puiName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF; + +/* Bridge out structure for PhysmemImportDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG +{ + IMG_HANDLE hPMRPtr; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T sAlign; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF; + +/******************************************* + PhysmemExportDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemExportDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF; + +/* Bridge out structure for PhysmemExportDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG +{ + IMG_INT iFd; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF; + +/******************************************* + PhysmemImportSparseDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemImportSparseDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG +{ + IMG_INT ifd; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 *pui32MappingTable; + IMG_UINT32 ui32NameSize; + const IMG_CHAR *puiName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF; + +/* Bridge out structure for PhysmemImportSparseDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG +{ + IMG_HANDLE hPMRPtr; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T sAlign; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF; + +#endif /* COMMON_DMABUF_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c new file mode 100644 index 000000000000..ea6e460ba056 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c @@ -0,0 +1,545 @@ +/******************************************************************************* +@File +@Title Server bridge for dmabuf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for dmabuf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "physmem_dmabuf.h" +#include "pmr.h" + +#include "common_dmabuf_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _PhysmemImportDmaBufpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemImportDmaBufIN_UI8, + IMG_UINT8 * psPhysmemImportDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportDmaBufIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportDmaBufOUT_UI8, 0); + + IMG_CHAR *uiNameInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemImportDmaBufOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportDmaBuf_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPhysmemImportDmaBufIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemImportDmaBufOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemImportDmaBuf_exit; + } + } + } + + if (psPhysmemImportDmaBufIN->ui32NameSize != 0) + { + uiNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, + (const void __user *)psPhysmemImportDmaBufIN->puiName, + psPhysmemImportDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemImportDmaBufOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportDmaBuf_exit; + } + ((IMG_CHAR *) + uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPhysmemImportDmaBufOUT->eError = + PhysmemImportDmaBuf(psConnection, OSGetDevNode(psConnection), + psPhysmemImportDmaBufIN->ifd, + psPhysmemImportDmaBufIN->uiFlags, + psPhysmemImportDmaBufIN->ui32NameSize, + uiNameInt, + &psPMRPtrInt, + &psPhysmemImportDmaBufOUT->uiSize, + &psPhysmemImportDmaBufOUT->sAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) + { + goto PhysmemImportDmaBuf_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemImportDmaBufOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemImportDmaBufOUT->hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemImportDmaBufpsPMRPtrIntRelease); + if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemImportDmaBuf_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemImportDmaBuf_exit: + + if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemExportDmaBufIN_UI8, + IMG_UINT8 * psPhysmemExportDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemExportDmaBufIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemExportDmaBufOUT_UI8, 0); + + IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPhysmemExportDmaBufOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemExportDmaBuf_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPhysmemExportDmaBufOUT->eError = + PhysmemExportDmaBuf(psConnection, OSGetDevNode(psConnection), + psPMRInt, &psPhysmemExportDmaBufOUT->iFd); + +PhysmemExportDmaBuf_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPhysmemImportSparseDmaBufIN_UI8, + IMG_UINT8 * + psPhysmemImportSparseDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF + *psPhysmemImportSparseDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF + *psPhysmemImportSparseDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufOUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiNameInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) + + (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + + 0; + + if (unlikely + (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportSparseDmaBuf_exit; + } + + if (unlikely + (psPhysmemImportSparseDmaBufIN->ui32NameSize > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportSparseDmaBuf_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPhysmemImportSparseDmaBufIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemImportSparseDmaBuf_exit; + } + } + } + + if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemImportSparseDmaBufIN-> + pui32MappingTable, + psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportSparseDmaBuf_exit; + } + } + if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0) + { + uiNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemImportSparseDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, + (const void __user *)psPhysmemImportSparseDmaBufIN-> + puiName, + psPhysmemImportSparseDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportSparseDmaBuf_exit; + } + ((IMG_CHAR *) + uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPhysmemImportSparseDmaBufOUT->eError = + PhysmemImportSparseDmaBuf(psConnection, OSGetDevNode(psConnection), + psPhysmemImportSparseDmaBufIN->ifd, + psPhysmemImportSparseDmaBufIN->uiFlags, + psPhysmemImportSparseDmaBufIN-> + uiChunkSize, + psPhysmemImportSparseDmaBufIN-> + ui32NumPhysChunks, + psPhysmemImportSparseDmaBufIN-> + ui32NumVirtChunks, ui32MappingTableInt, + psPhysmemImportSparseDmaBufIN-> + ui32NameSize, uiNameInt, &psPMRPtrInt, + &psPhysmemImportSparseDmaBufOUT->uiSize, + &psPhysmemImportSparseDmaBufOUT->sAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) + { + goto PhysmemImportSparseDmaBuf_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemImportSparseDmaBufOUT->hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemImportSparseDmaBufpsPMRPtrIntRelease); + if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemImportSparseDmaBuf_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemImportSparseDmaBuf_exit: + + if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitDMABUFBridge(void); +PVRSRV_ERROR DeinitDMABUFBridge(void); + +/* + * Register all DMABUF functions with services + */ +PVRSRV_ERROR InitDMABUFBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, + PVRSRVBridgePhysmemImportDmaBuf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, + PVRSRVBridgePhysmemExportDmaBuf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, + PVRSRVBridgePhysmemImportSparseDmaBuf, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all dmabuf functions with services + */ +PVRSRV_ERROR DeinitDMABUFBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h new file mode 100644 index 000000000000..c32b7591bb14 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h @@ -0,0 +1,75 @@ +/******************************************************************************* +@File +@Title Client bridge header for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_HTBUFFER_BRIDGE_H +#define CLIENT_HTBUFFER_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_htbuffer_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumGroups, + IMG_UINT32 * + pui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 + ui32EnablePID, + IMG_UINT32 ui32LogMode, + IMG_UINT32 ui32OpMode); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge, + IMG_UINT32 ui32PID, + IMG_UINT64 ui64TimeStamp, + IMG_UINT32 ui32SF, + IMG_UINT32 ui32NumArgs, + IMG_UINT32 * pui32Args); + +#endif /* CLIENT_HTBUFFER_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c new file mode 100644 index 000000000000..699da1f02468 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c @@ -0,0 +1,91 @@ +/******************************************************************************* +@File +@Title Direct client bridge for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for htbuffer + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_htbuffer_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "htbuffer_types.h" + +#include "htbserver.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumGroups, + IMG_UINT32 * + pui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 + ui32EnablePID, + IMG_UINT32 ui32LogMode, + IMG_UINT32 ui32OpMode) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + HTBControlKM(ui32NumGroups, + pui32GroupEnable, + ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge, + IMG_UINT32 ui32PID, + IMG_UINT64 ui64TimeStamp, + IMG_UINT32 ui32SF, + IMG_UINT32 ui32NumArgs, + IMG_UINT32 * pui32Args) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + HTBLogKM(ui32PID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h new file mode 100644 index 000000000000..a936da34e0a1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h @@ -0,0 +1,102 @@ +/******************************************************************************* +@File +@Title Common bridge header for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_HTBUFFER_BRIDGE_H +#define COMMON_HTBUFFER_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "htbuffer_types.h" + +#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0 +#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0 +#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1 +#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1) + +/******************************************* + HTBControl + *******************************************/ + +/* Bridge in structure for HTBControl */ +typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG +{ + IMG_UINT32 ui32NumGroups; + IMG_UINT32 *pui32GroupEnable; + IMG_UINT32 ui32LogLevel; + IMG_UINT32 ui32EnablePID; + IMG_UINT32 ui32LogMode; + IMG_UINT32 ui32OpMode; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HTBCONTROL; + +/* Bridge out structure for HTBControl */ +typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HTBCONTROL; + +/******************************************* + HTBLog + *******************************************/ + +/* Bridge in structure for HTBLog */ +typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG +{ + IMG_UINT32 ui32PID; + IMG_UINT64 ui64TimeStamp; + IMG_UINT32 ui32SF; + IMG_UINT32 ui32NumArgs; + IMG_UINT32 *pui32Args; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HTBLOG; + +/* Bridge out structure for HTBLog */ +typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HTBLOG; + +#endif /* COMMON_HTBUFFER_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c new file mode 100644 index 000000000000..451f11e8c696 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c @@ -0,0 +1,346 @@ +/******************************************************************************* +@File +@Title Server bridge for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "htbserver.h" + +#include "common_htbuffer_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHTBControlIN_UI8, + IMG_UINT8 * psHTBControlOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN = + (PVRSRV_BRIDGE_IN_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT = + (PVRSRV_BRIDGE_OUT_HTBCONTROL *) + IMG_OFFSET_ADDR(psHTBControlOUT_UI8, 0); + + IMG_UINT32 *ui32GroupEnableInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL)) + { + psHTBControlOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HTBControl_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHTBControlIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHTBControlOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HTBControl_exit; + } + } + } + + if (psHTBControlIN->ui32NumGroups != 0) + { + ui32GroupEnableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32GroupEnableInt, + (const void __user *)psHTBControlIN->pui32GroupEnable, + psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HTBControl_exit; + } + } + + psHTBControlOUT->eError = + HTBControlKM(psHTBControlIN->ui32NumGroups, + ui32GroupEnableInt, + psHTBControlIN->ui32LogLevel, + psHTBControlIN->ui32EnablePID, + psHTBControlIN->ui32LogMode, + psHTBControlIN->ui32OpMode); + +HTBControl_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHTBLogIN_UI8, + IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN = + (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT = + (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0); + + IMG_UINT32 *ui32ArgsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS)) + { + psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HTBLog_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHTBLogIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHTBLogOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HTBLog_exit; + } + } + } + + if (psHTBLogIN->ui32NumArgs != 0) + { + ui32ArgsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ArgsInt, + (const void __user *)psHTBLogIN->pui32Args, + psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HTBLog_exit; + } + } + + psHTBLogOUT->eError = + HTBLogKM(psHTBLogIN->ui32PID, + psHTBLogIN->ui64TimeStamp, + psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt); + +HTBLog_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pHTBUFFERBridgeLock; + +#endif /* EXCLUDE_HTBUFFER_BRIDGE */ + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +PVRSRV_ERROR InitHTBUFFERBridge(void); +PVRSRV_ERROR DeinitHTBUFFERBridge(void); + +/* + * Register all HTBUFFER functions with services + */ +PVRSRV_ERROR InitHTBUFFERBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), + "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, + PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBLOG, PVRSRVBridgeHTBLog, + pHTBUFFERBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all htbuffer functions with services + */ +PVRSRV_ERROR DeinitHTBUFFERBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pHTBUFFERBridgeLock), + "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBLOG); + + return PVRSRV_OK; +} +#else /* EXCLUDE_HTBUFFER_BRIDGE */ +/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitHTBUFFERBridge() \ + PVRSRV_OK + +#define DeinitHTBUFFERBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_HTBUFFER_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_bridge.h new file mode 100644 index 000000000000..b3315bbf45f4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_bridge.h @@ -0,0 +1,377 @@ +/******************************************************************************* +@File +@Title Client bridge header for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_MM_BRIDGE_H +#define CLIENT_MM_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_mm_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE * + phPMRExport, + IMG_UINT64 * + pui64Size, + IMG_UINT32 * + pui32Log2Contig, + IMG_UINT64 * + pui64Password); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRExport); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 * pui64UID); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hBuffer, + IMG_HANDLE + * + phExtMem); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMRExport, + IMG_UINT64 + ui64uiPassword, + IMG_UINT64 ui64uiSize, + IMG_UINT32 + ui32uiLog2Contig, + IMG_HANDLE * phPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hExtHandle, + IMG_HANDLE * + phPMR, + IMG_DEVMEM_SIZE_T + * puiSize, + IMG_DEVMEM_ALIGN_T + * psAlign); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_DEVMEM_SIZE_T + uiChunkSize, + IMG_UINT32 + ui32NumPhysChunks, + IMG_UINT32 + ui32NumVirtChunks, + IMG_UINT32 * + pui32MappingTable, + IMG_UINT32 + ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_UINT32 + ui32AnnotationLength, + const + IMG_CHAR * + puiAnnotation, + IMG_PID + ui32PID, + IMG_HANDLE * + phPMRPtr, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE + hBridge, + IMG_BOOL + bbKernelMemoryCtx, + IMG_HANDLE * + phDevMemServerContext, + IMG_HANDLE * + phPrivData, + IMG_UINT32 * + pui32CPUCacheLineSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerContext); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sHeapBaseAddr, + IMG_DEVMEM_SIZE_T + uiHeapLength, + IMG_UINT32 + ui32Log2DataPageSize, + IMG_HANDLE * + phDevmemHeapPtr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemHeap); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T + uiMapFlags, + IMG_HANDLE * + phMapping); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiLength, + IMG_HANDLE * + phReservation); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_HANDLE + hSrvDevMemHeap, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32AllocPageCount, + IMG_UINT32 * + pui32AllocPageIndices, + IMG_UINT32 + ui32FreePageCount, + IMG_UINT32 * + pui32FreePageIndices, + IMG_UINT32 + ui32SparseFlags, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT64 + ui64CPUVAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32PageCount, + IMG_UINT32 + ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT32 + ui32PageCount); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemFlushDevSLCRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_BOOL + bInvalidate); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT64 ui64FBSCEntries); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32NumHeapConfigs); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 * + pui32NumHeaps); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapConfigNameBufSz, + IMG_CHAR * + puiHeapConfigName); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapIndex, + IMG_UINT32 + ui32HeapNameBufSz, + IMG_CHAR * + puiHeapNameOut, + IMG_DEV_VIRTADDR + * + psDevVAddrBase, + IMG_DEVMEM_SIZE_T + * puiHeapLength, + IMG_DEVMEM_SIZE_T + * + puiReservedRegionLength, + IMG_UINT32 * + pui32Log2DataPageSizeOut, + IMG_UINT32 * + pui32Log2ImportAlignmentOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT32 ui32PID, IMG_BOOL bRegister); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + * puiLMASize, + IMG_DEVMEM_SIZE_T + * puiUMASize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + * + psFaultAddress); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVUpdateOOMStats(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32ui32StatType, + IMG_PID + ui32pid); + +#endif /* CLIENT_MM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_direct_bridge.c new file mode 100644 index 000000000000..32affebeaa7a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/client_mm_direct_bridge.c @@ -0,0 +1,887 @@ +/******************************************************************************* +@File +@Title Direct client bridge for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for mm + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_mm_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#include "devicemem.h" +#include "devicemem_server.h" +#include "pmr.h" +#include "devicemem_heapcfg.h" +#include "physmem.h" +#include "devicemem_utils.h" +#include "process_stats.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE * + phPMRExport, + IMG_UINT64 * + pui64Size, + IMG_UINT32 * + pui32Log2Contig, + IMG_UINT64 * + pui64Password) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PMR_EXPORT *psPMRExportInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRExportPMR(psPMRInt, + &psPMRExportInt, + pui64Size, pui32Log2Contig, pui64Password); + + *phPMRExport = psPMRExportInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRExport) +{ + PVRSRV_ERROR eError; + PMR_EXPORT *psPMRExportInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRExportInt = (PMR_EXPORT *) hPMRExport; + + eError = PMRUnexportPMR(psPMRExportInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 * pui64UID) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRGetUID(psPMRInt, pui64UID); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hBuffer, + IMG_HANDLE + * + phExtMem) +{ + PVRSRV_ERROR eError; + PMR *psBufferInt; + PMR *psExtMemInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psBufferInt = (PMR *) hBuffer; + + eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); + + *phExtMem = psExtMemInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem) +{ + PVRSRV_ERROR eError; + PMR *psExtMemInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psExtMemInt = (PMR *) hExtMem; + + eError = PMRUnmakeLocalImportHandle(psExtMemInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMRExport, + IMG_UINT64 + ui64uiPassword, + IMG_UINT64 ui64uiSize, + IMG_UINT32 + ui32uiLog2Contig, + IMG_HANDLE * phPMR) +{ + PVRSRV_ERROR eError; + PMR_EXPORT *psPMRExportInt; + PMR *psPMRInt = NULL; + + psPMRExportInt = (PMR_EXPORT *) hPMRExport; + + eError = + PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psPMRExportInt, + ui64uiPassword, + ui64uiSize, ui32uiLog2Contig, &psPMRInt); + + *phPMR = psPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hExtHandle, + IMG_HANDLE * + phPMR, + IMG_DEVMEM_SIZE_T + * puiSize, + IMG_DEVMEM_ALIGN_T + * psAlign) +{ + PVRSRV_ERROR eError; + PMR *psExtHandleInt; + PMR *psPMRInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psExtHandleInt = (PMR *) hExtHandle; + + eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, psAlign); + + *phPMR = psPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRUnrefPMR(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRUnrefUnlockPMR(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_DEVMEM_SIZE_T + uiChunkSize, + IMG_UINT32 + ui32NumPhysChunks, + IMG_UINT32 + ui32NumVirtChunks, + IMG_UINT32 * + pui32MappingTable, + IMG_UINT32 + ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_UINT32 + ui32AnnotationLength, + const + IMG_CHAR * + puiAnnotation, + IMG_PID + ui32PID, + IMG_HANDLE * + phPMRPtr, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRPtrInt = NULL; + + eError = + PhysmemNewRamBackedPMR(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + uiSize, uiChunkSize, ui32NumPhysChunks, + ui32NumVirtChunks, pui32MappingTable, + ui32Log2PageSize, uiFlags, + ui32AnnotationLength, puiAnnotation, ui32PID, + &psPMRPtrInt, ui32PDumpFlags); + + *phPMRPtr = psPMRPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRPtrInt = NULL; + + eError = + PhysmemNewRamBackedLockedPMR(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + uiSize, uiChunkSize, ui32NumPhysChunks, + ui32NumVirtChunks, pui32MappingTable, + ui32Log2PageSize, uiFlags, + ui32AnnotationLength, puiAnnotation, + ui32PID, &psPMRPtrInt, ui32PDumpFlags); + + *phPMRPtr = psPMRPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntPin(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntUnpin(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntPinValidate(psMappingInt, psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE + hBridge, + IMG_BOOL + bbKernelMemoryCtx, + IMG_HANDLE * + phDevMemServerContext, + IMG_HANDLE * + phPrivData, + IMG_UINT32 * + pui32CPUCacheLineSize) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + eError = + DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + bbKernelMemoryCtx, + &psDevMemServerContextInt, + &hPrivDataInt, pui32CPUCacheLineSize); + + *phDevMemServerContext = psDevMemServerContextInt; + *phPrivData = hPrivDataInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerContext) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemServerContextInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; + + eError = DevmemIntCtxDestroy(psDevmemServerContextInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sHeapBaseAddr, + IMG_DEVMEM_SIZE_T + uiHeapLength, + IMG_UINT32 + ui32Log2DataPageSize, + IMG_HANDLE * + phDevmemHeapPtr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntHeapCreate(psDevmemCtxInt, + sHeapBaseAddr, + uiHeapLength, + ui32Log2DataPageSize, &psDevmemHeapPtrInt); + + *phDevmemHeapPtr = psDevmemHeapPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemHeap) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemHeapInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap; + + eError = DevmemIntHeapDestroy(psDevmemHeapInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T + uiMapFlags, + IMG_HANDLE * + phMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + DEVMEMINT_MAPPING *psMappingInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntMapPMR(psDevmemServerHeapInt, + psReservationInt, + psPMRInt, uiMapFlags, &psMappingInt); + + *phMapping = psMappingInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + + eError = DevmemIntUnmapPMR(psMappingInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiLength, + IMG_HANDLE * + phReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + + eError = + DevmemIntReserveRange(psDevmemServerHeapInt, + sAddress, uiLength, &psReservationInt); + + *phReservation = psReservationInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + + eError = DevmemIntUnreserveRange(psReservationInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_HANDLE + hSrvDevMemHeap, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32AllocPageCount, + IMG_UINT32 * + pui32AllocPageIndices, + IMG_UINT32 + ui32FreePageCount, + IMG_UINT32 * + pui32FreePageIndices, + IMG_UINT32 + ui32SparseFlags, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT64 + ui64CPUVAddr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psSrvDevMemHeapInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntChangeSparse(psSrvDevMemHeapInt, + psPMRInt, + ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, + ui32SparseFlags, + uiFlags, sDevVAddr, ui64CPUVAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32PageCount, + IMG_UINT32 + ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntMapPages(psReservationInt, + psPMRInt, + ui32PageCount, + ui32PhysicalPgOffset, uiFlags, sDevVAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT32 + ui32PageCount) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + + eError = + DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntIsVDevAddrValid(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemCtxInt, sAddress); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemFlushDevSLCRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_BOOL + bInvalidate) +{ +#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntFlushDevSLCRange(psDevmemCtxInt, + sAddress, uiSize, bInvalidate); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hDevmemCtx); + PVR_UNREFERENCED_PARAMETER(sAddress); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(bInvalidate); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT64 ui64FBSCEntries) +{ +#if defined(RGX_FEATURE_FBCDC) + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ui64FBSCEntries); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hDevmemCtx); + PVR_UNREFERENCED_PARAMETER(ui64FBSCEntries); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32NumHeapConfigs) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapConfigCount(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + pui32NumHeapConfigs); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 * + pui32NumHeaps) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, pui32NumHeaps); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapConfigNameBufSz, + IMG_CHAR * + puiHeapConfigName) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapConfigName(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, ui32HeapConfigNameBufSz, + puiHeapConfigName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapIndex, + IMG_UINT32 + ui32HeapNameBufSz, + IMG_CHAR * + puiHeapNameOut, + IMG_DEV_VIRTADDR + * + psDevVAddrBase, + IMG_DEVMEM_SIZE_T + * puiHeapLength, + IMG_DEVMEM_SIZE_T + * + puiReservedRegionLength, + IMG_UINT32 * + pui32Log2DataPageSizeOut, + IMG_UINT32 * + pui32Log2ImportAlignmentOut) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, + ui32HeapIndex, + ui32HeapNameBufSz, + puiHeapNameOut, + psDevVAddrBase, + puiHeapLength, + puiReservedRegionLength, + pui32Log2DataPageSizeOut, + pui32Log2ImportAlignmentOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT32 ui32PID, IMG_BOOL bRegister) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, ui32PID, bRegister); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + * puiLMASize, + IMG_DEVMEM_SIZE_T + * puiUMASize) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVGetMaxDevMemSizeKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + puiLMASize, puiUMASize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + * + psFaultAddress) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntGetFaultAddress(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemCtxInt, psFaultAddress); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVUpdateOOMStats(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32ui32StatType, + IMG_PID + ui32pid) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PVRSRVServerUpdateOOMStats(ui32ui32StatType, ui32pid); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(ui32ui32StatType); + PVR_UNREFERENCED_PARAMETER(ui32pid); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/common_mm_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/common_mm_bridge.h new file mode 100644 index 000000000000..f622e3aaf507 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/common_mm_bridge.h @@ -0,0 +1,782 @@ +/******************************************************************************* +@File +@Title Common bridge header for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_MM_BRIDGE_H +#define COMMON_MM_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_MM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3 +#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4 +#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5 +#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6 +#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7 +#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9 +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10 +#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN PVRSRV_BRIDGE_MM_CMD_FIRST+11 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN PVRSRV_BRIDGE_MM_CMD_FIRST+12 +#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+13 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+14 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20 +#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22 +#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25 +#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26 +#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE PVRSRV_BRIDGE_MM_CMD_FIRST+27 +#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+28 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+30 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+31 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+32 +#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+33 +#define PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE PVRSRV_BRIDGE_MM_CMD_FIRST+34 +#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+35 +#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS PVRSRV_BRIDGE_MM_CMD_FIRST+36 +#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+36) + +/******************************************* + PMRExportPMR + *******************************************/ + +/* Bridge in structure for PMRExportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMREXPORTPMR; + +/* Bridge out structure for PMRExportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG +{ + IMG_HANDLE hPMRExport; + IMG_UINT64 ui64Size; + IMG_UINT32 ui32Log2Contig; + IMG_UINT64 ui64Password; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMREXPORTPMR; + +/******************************************* + PMRUnexportPMR + *******************************************/ + +/* Bridge in structure for PMRUnexportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG +{ + IMG_HANDLE hPMRExport; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR; + +/* Bridge out structure for PMRUnexportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR; + +/******************************************* + PMRGetUID + *******************************************/ + +/* Bridge in structure for PMRGetUID */ +typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRGETUID; + +/* Bridge out structure for PMRGetUID */ +typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG +{ + IMG_UINT64 ui64UID; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRGETUID; + +/******************************************* + PMRMakeLocalImportHandle + *******************************************/ + +/* Bridge in structure for PMRMakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hBuffer; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE; + +/* Bridge out structure for PMRMakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hExtMem; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE; + +/******************************************* + PMRUnmakeLocalImportHandle + *******************************************/ + +/* Bridge in structure for PMRUnmakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hExtMem; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE; + +/* Bridge out structure for PMRUnmakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE; + +/******************************************* + PMRImportPMR + *******************************************/ + +/* Bridge in structure for PMRImportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG +{ + IMG_HANDLE hPMRExport; + IMG_UINT64 ui64uiPassword; + IMG_UINT64 ui64uiSize; + IMG_UINT32 ui32uiLog2Contig; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRIMPORTPMR; + +/* Bridge out structure for PMRImportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG +{ + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRIMPORTPMR; + +/******************************************* + PMRLocalImportPMR + *******************************************/ + +/* Bridge in structure for PMRLocalImportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG +{ + IMG_HANDLE hExtHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR; + +/* Bridge out structure for PMRLocalImportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T sAlign; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR; + +/******************************************* + PMRUnrefPMR + *******************************************/ + +/* Bridge in structure for PMRUnrefPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNREFPMR; + +/* Bridge out structure for PMRUnrefPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFPMR; + +/******************************************* + PMRUnrefUnlockPMR + *******************************************/ + +/* Bridge in structure for PMRUnrefUnlockPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR; + +/* Bridge out structure for PMRUnrefUnlockPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR; + +/******************************************* + PhysmemNewRamBackedPMR + *******************************************/ + +/* Bridge in structure for PhysmemNewRamBackedPMR */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG +{ + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 *pui32MappingTable; + IMG_UINT32 ui32Log2PageSize; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_UINT32 ui32AnnotationLength; + const IMG_CHAR *puiAnnotation; + IMG_PID ui32PID; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR; + +/* Bridge out structure for PhysmemNewRamBackedPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG +{ + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR; + +/******************************************* + PhysmemNewRamBackedLockedPMR + *******************************************/ + +/* Bridge in structure for PhysmemNewRamBackedLockedPMR */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG +{ + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 *pui32MappingTable; + IMG_UINT32 ui32Log2PageSize; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_UINT32 ui32AnnotationLength; + const IMG_CHAR *puiAnnotation; + IMG_PID ui32PID; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR; + +/* Bridge out structure for PhysmemNewRamBackedLockedPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG +{ + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR; + +/******************************************* + DevmemIntPin + *******************************************/ + +/* Bridge in structure for DevmemIntPin */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPIN; + +/* Bridge out structure for DevmemIntPin */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPIN; + +/******************************************* + DevmemIntUnpin + *******************************************/ + +/* Bridge in structure for DevmemIntUnpin */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN; + +/* Bridge out structure for DevmemIntUnpin */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN; + +/******************************************* + DevmemIntPinValidate + *******************************************/ + +/* Bridge in structure for DevmemIntPinValidate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG +{ + IMG_HANDLE hMapping; + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE; + +/* Bridge out structure for DevmemIntPinValidate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE; + +/******************************************* + DevmemIntUnpinInvalidate + *******************************************/ + +/* Bridge in structure for DevmemIntUnpinInvalidate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG +{ + IMG_HANDLE hMapping; + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE; + +/* Bridge out structure for DevmemIntUnpinInvalidate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE; + +/******************************************* + DevmemIntCtxCreate + *******************************************/ + +/* Bridge in structure for DevmemIntCtxCreate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG +{ + IMG_BOOL bbKernelMemoryCtx; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE; + +/* Bridge out structure for DevmemIntCtxCreate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG +{ + IMG_HANDLE hDevMemServerContext; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32CPUCacheLineSize; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE; + +/******************************************* + DevmemIntCtxDestroy + *******************************************/ + +/* Bridge in structure for DevmemIntCtxDestroy */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG +{ + IMG_HANDLE hDevmemServerContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY; + +/* Bridge out structure for DevmemIntCtxDestroy */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY; + +/******************************************* + DevmemIntHeapCreate + *******************************************/ + +/* Bridge in structure for DevmemIntHeapCreate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_DEV_VIRTADDR sHeapBaseAddr; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_UINT32 ui32Log2DataPageSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE; + +/* Bridge out structure for DevmemIntHeapCreate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG +{ + IMG_HANDLE hDevmemHeapPtr; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE; + +/******************************************* + DevmemIntHeapDestroy + *******************************************/ + +/* Bridge in structure for DevmemIntHeapDestroy */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG +{ + IMG_HANDLE hDevmemHeap; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY; + +/* Bridge out structure for DevmemIntHeapDestroy */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY; + +/******************************************* + DevmemIntMapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntMapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG +{ + IMG_HANDLE hDevmemServerHeap; + IMG_HANDLE hReservation; + IMG_HANDLE hPMR; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR; + +/* Bridge out structure for DevmemIntMapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG +{ + IMG_HANDLE hMapping; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR; + +/******************************************* + DevmemIntUnmapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntUnmapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG +{ + IMG_HANDLE hMapping; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR; + +/* Bridge out structure for DevmemIntUnmapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR; + +/******************************************* + DevmemIntReserveRange + *******************************************/ + +/* Bridge in structure for DevmemIntReserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG +{ + IMG_HANDLE hDevmemServerHeap; + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiLength; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE; + +/* Bridge out structure for DevmemIntReserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE; + +/******************************************* + DevmemIntUnreserveRange + *******************************************/ + +/* Bridge in structure for DevmemIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE; + +/* Bridge out structure for DevmemIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE; + +/******************************************* + ChangeSparseMem + *******************************************/ + +/* Bridge in structure for ChangeSparseMem */ +typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG +{ + IMG_HANDLE hSrvDevMemHeap; + IMG_HANDLE hPMR; + IMG_UINT32 ui32AllocPageCount; + IMG_UINT32 *pui32AllocPageIndices; + IMG_UINT32 ui32FreePageCount; + IMG_UINT32 *pui32FreePageIndices; + IMG_UINT32 ui32SparseFlags; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_UINT64 ui64CPUVAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CHANGESPARSEMEM; + +/* Bridge out structure for ChangeSparseMem */ +typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM; + +/******************************************* + DevmemIntMapPages + *******************************************/ + +/* Bridge in structure for DevmemIntMapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG +{ + IMG_HANDLE hReservation; + IMG_HANDLE hPMR; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32PhysicalPgOffset; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_DEV_VIRTADDR sDevVAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES; + +/* Bridge out structure for DevmemIntMapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES; + +/******************************************* + DevmemIntUnmapPages + *******************************************/ + +/* Bridge in structure for DevmemIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG +{ + IMG_HANDLE hReservation; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_UINT32 ui32PageCount; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES; + +/* Bridge out structure for DevmemIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES; + +/******************************************* + DevmemIsVDevAddrValid + *******************************************/ + +/* Bridge in structure for DevmemIsVDevAddrValid */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_DEV_VIRTADDR sAddress; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID; + +/* Bridge out structure for DevmemIsVDevAddrValid */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID; + +/******************************************* + DevmemFlushDevSLCRange + *******************************************/ + +/* Bridge in structure for DevmemFlushDevSLCRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiSize; + IMG_BOOL bInvalidate; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE; + +/* Bridge out structure for DevmemFlushDevSLCRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE; + +/******************************************* + DevmemInvalidateFBSCTable + *******************************************/ + +/* Bridge in structure for DevmemInvalidateFBSCTable */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT64 ui64FBSCEntries; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE; + +/* Bridge out structure for DevmemInvalidateFBSCTable */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE; + +/******************************************* + HeapCfgHeapConfigCount + *******************************************/ + +/* Bridge in structure for HeapCfgHeapConfigCount */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT; + +/* Bridge out structure for HeapCfgHeapConfigCount */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG +{ + IMG_UINT32 ui32NumHeapConfigs; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT; + +/******************************************* + HeapCfgHeapCount + *******************************************/ + +/* Bridge in structure for HeapCfgHeapCount */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG +{ + IMG_UINT32 ui32HeapConfigIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT; + +/* Bridge out structure for HeapCfgHeapCount */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG +{ + IMG_UINT32 ui32NumHeaps; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT; + +/******************************************* + HeapCfgHeapConfigName + *******************************************/ + +/* Bridge in structure for HeapCfgHeapConfigName */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG +{ + IMG_UINT32 ui32HeapConfigIndex; + IMG_UINT32 ui32HeapConfigNameBufSz; + /* Output pointer puiHeapConfigName is also an implied input */ + IMG_CHAR *puiHeapConfigName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME; + +/* Bridge out structure for HeapCfgHeapConfigName */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG +{ + IMG_CHAR *puiHeapConfigName; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME; + +/******************************************* + HeapCfgHeapDetails + *******************************************/ + +/* Bridge in structure for HeapCfgHeapDetails */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG +{ + IMG_UINT32 ui32HeapConfigIndex; + IMG_UINT32 ui32HeapIndex; + IMG_UINT32 ui32HeapNameBufSz; + /* Output pointer puiHeapNameOut is also an implied input */ + IMG_CHAR *puiHeapNameOut; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS; + +/* Bridge out structure for HeapCfgHeapDetails */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG +{ + IMG_CHAR *puiHeapNameOut; + IMG_DEV_VIRTADDR sDevVAddrBase; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_DEVMEM_SIZE_T uiReservedRegionLength; + IMG_UINT32 ui32Log2DataPageSizeOut; + IMG_UINT32 ui32Log2ImportAlignmentOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS; + +/******************************************* + DevmemIntRegisterPFNotifyKM + *******************************************/ + +/* Bridge in structure for DevmemIntRegisterPFNotifyKM */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32PID; + IMG_BOOL bRegister; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM; + +/* Bridge out structure for DevmemIntRegisterPFNotifyKM */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM; + +/******************************************* + GetMaxDevMemSize + *******************************************/ + +/* Bridge in structure for GetMaxDevMemSize */ +typedef struct PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE; + +/* Bridge out structure for GetMaxDevMemSize */ +typedef struct PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE_TAG +{ + IMG_DEVMEM_SIZE_T uiLMASize; + IMG_DEVMEM_SIZE_T uiUMASize; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE; + +/******************************************* + DevmemGetFaultAddress + *******************************************/ + +/* Bridge in structure for DevmemGetFaultAddress */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG +{ + IMG_HANDLE hDevmemCtx; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS; + +/* Bridge out structure for DevmemGetFaultAddress */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG +{ + IMG_DEV_VIRTADDR sFaultAddress; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS; + +/******************************************* + PVRSRVUpdateOOMStats + *******************************************/ + +/* Bridge in structure for PVRSRVUpdateOOMStats */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG +{ + IMG_UINT32 ui32ui32StatType; + IMG_PID ui32pid; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS; + +/* Bridge out structure for PVRSRVUpdateOOMStats */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS; + +#endif /* COMMON_MM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/server_mm_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/server_mm_bridge.c new file mode 100644 index 000000000000..c8b6bb5c2d03 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/mm_bridge/server_mm_bridge.c @@ -0,0 +1,3502 @@ +/******************************************************************************* +@File +@Title Server bridge for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem.h" +#include "devicemem_server.h" +#include "pmr.h" +#include "devicemem_heapcfg.h" +#include "physmem.h" +#include "devicemem_utils.h" +#include "process_stats.h" + +#include "common_mm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +static PVRSRV_ERROR ReleasePMRExport(void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(pvData); + + return PVRSRV_OK; +} + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnexportPMR((PMR_EXPORT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRExportPMRIN_UI8, + IMG_UINT8 * psPMRExportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN = + (PVRSRV_BRIDGE_IN_PMREXPORTPMR *) + IMG_OFFSET_ADDR(psPMRExportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT = + (PVRSRV_BRIDGE_OUT_PMREXPORTPMR *) + IMG_OFFSET_ADDR(psPMRExportPMROUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR; + PMR *psPMRInt = NULL; + PMR_EXPORT *psPMRExportInt = NULL; + IMG_HANDLE hPMRExportInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRExportPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRExportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRExportPMROUT->eError = + PMRExportPMR(psPMRInt, + &psPMRExportInt, + &psPMRExportPMROUT->ui64Size, + &psPMRExportPMROUT->ui32Log2Contig, + &psPMRExportPMROUT->ui64Password); + /* Exit early if bridged call fails */ + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + goto PMRExportPMR_exit; + } + + /* + * For cases where we need a cross process handle we actually allocate two. + * + * The first one is a connection specific handle and it gets given the real + * release function. This handle does *NOT* get returned to the caller. It's + * purpose is to release any leaked resources when we either have a bad or + * abnormally terminated client. If we didn't do this then the resource + * wouldn't be freed until driver unload. If the resource is freed normally, + * this handle can be looked up via the cross process handle and then + * released accordingly. + * + * The second one is a cross process handle and it gets given a noop release + * function. This handle does get returned to the caller. + */ + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRExportPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, &hPMRExportInt, + (void *)psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRExportPMRpsPMRExportIntRelease); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRExportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Lock over handle creation. */ + LockHandle(KERNEL_HANDLE_BASE); + psPMRExportPMROUT->eError = + PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE, + &psPMRExportPMROUT->hPMRExport, + (void *)psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & ReleasePMRExport); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRExportPMR_exit; + } + /* Release now we have created handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + +PMRExportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psPMRExportPMROUT->eError != PVRSRV_OK) + { + if (psPMRExportPMROUT->hPMRExport) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(KERNEL_HANDLE_BASE); + + eError = PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, + (IMG_HANDLE) + psPMRExportPMROUT-> + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Release now we have cleaned up creation handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + } + + if (hPMRExportInt) + { + PVRSRV_ERROR eError; + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psProcessHandleBase-> + psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psProcessHandleBase-> + psHandleBase, + hPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psPMRExportInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psProcessHandleBase-> + psHandleBase); + } + + if (psPMRExportInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnexportPMR(psPMRExportInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnexportPMRIN_UI8, + IMG_UINT8 * psPMRUnexportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *) + IMG_OFFSET_ADDR(psPMRUnexportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *) + IMG_OFFSET_ADDR(psPMRUnexportPMROUT_UI8, 0); + + PMR_EXPORT *psPMRExportInt = NULL; + IMG_HANDLE hPMRExportInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* Lock over handle destruction. */ + LockHandle(KERNEL_HANDLE_BASE); + psPMRUnexportPMROUT->eError = + PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, + (void **)&psPMRExportInt, + (IMG_HANDLE) psPMRUnexportPMRIN-> + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + IMG_FALSE); + if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); + + /* Release now we have destroyed handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + /* + * Find the connection specific handle that represents the same data + * as the cross process handle as releasing it will actually call the + * data's real release function (see the function where the cross + * process handle is allocated for more details). + */ + psPMRUnexportPMROUT->eError = + PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, &hPMRExportInt, + psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); + + psPMRUnexportPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase-> + psHandleBase, hPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely + ((psPMRUnexportPMROUT->eError != PVRSRV_OK) + && (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) || + (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY)); + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Lock over handle destruction. */ + LockHandle(KERNEL_HANDLE_BASE); + + psPMRUnexportPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(KERNEL_HANDLE_BASE, + (IMG_HANDLE) psPMRUnexportPMRIN-> + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely + ((psPMRUnexportPMROUT->eError != PVRSRV_OK) + && (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRUnexportPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + +PMRUnexportPMR_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRGetUIDIN_UI8, + IMG_UINT8 * psPMRGetUIDOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN = + (PVRSRV_BRIDGE_IN_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT = + (PVRSRV_BRIDGE_OUT_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDOUT_UI8, + 0); + + IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRGetUIDOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRGetUID_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID); + +PMRGetUID_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _PMRMakeLocalImportHandlepsExtMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnmakeLocalImportHandle((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPMRMakeLocalImportHandleIN_UI8, + IMG_UINT8 * + psPMRMakeLocalImportHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN + = + (PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE + *psPMRMakeLocalImportHandleOUT = + (PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleOUT_UI8, 0); + + IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer; + PMR *psBufferInt = NULL; + PMR *psExtMemInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRMakeLocalImportHandleOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psBufferInt, + hBuffer, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + IMG_TRUE); + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRMakeLocalImportHandle_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRMakeLocalImportHandleOUT->eError = + PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + goto PMRMakeLocalImportHandle_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRMakeLocalImportHandleOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, + &psPMRMakeLocalImportHandleOUT->hExtMem, + (void *)psExtMemInt, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRMakeLocalImportHandlepsExtMemIntRelease); + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRMakeLocalImportHandle_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +PMRMakeLocalImportHandle_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hBuffer, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK) + { + if (psExtMemInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnmakeLocalImportHandle(psExtMemInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPMRUnmakeLocalImportHandleIN_UI8, + IMG_UINT8 * + psPMRUnmakeLocalImportHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE + *psPMRUnmakeLocalImportHandleIN = + (PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE + *psPMRUnmakeLocalImportHandleOUT = + (PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRUnmakeLocalImportHandleOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase-> + psHandleBase, + (IMG_HANDLE) + psPMRUnmakeLocalImportHandleIN-> + hExtMem, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + if (unlikely + ((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) + && (psPMRUnmakeLocalImportHandleOUT->eError != + PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT-> + eError))); + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRUnmakeLocalImportHandle_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +PMRUnmakeLocalImportHandle_exit: + + return 0; +} + +static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRImportPMRIN_UI8, + IMG_UINT8 * psPMRImportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN = + (PVRSRV_BRIDGE_IN_PMRIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRImportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRImportPMROUT_UI8, 0); + + IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport; + PMR_EXPORT *psPMRExportInt = NULL; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(KERNEL_HANDLE_BASE); + + /* Look up the address from the handle */ + psPMRImportPMROUT->eError = + PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, + (void **)&psPMRExportInt, + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + IMG_TRUE); + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRImportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + psPMRImportPMROUT->eError = + PhysmemImportPMR(psConnection, OSGetDevNode(psConnection), + psPMRExportInt, + psPMRImportPMRIN->ui64uiPassword, + psPMRImportPMRIN->ui64uiSize, + psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + goto PMRImportPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPMRImportPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPMRImportPMROUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRImportPMRpsPMRIntRelease); + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRImportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRImportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(KERNEL_HANDLE_BASE); + + /* Unreference the previously looked up handle */ + if (psPMRExportInt) + { + PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + if (psPMRImportPMROUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRLocalImportPMRIN_UI8, + IMG_UINT8 * psPMRLocalImportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN = + (PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRLocalImportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRLocalImportPMROUT_UI8, 0); + + IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle; + PMR *psExtHandleInt = NULL; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Look up the address from the handle */ + psPMRLocalImportPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, (void **)&psExtHandleInt, + hExtHandle, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + IMG_TRUE); + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRLocalImportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRLocalImportPMROUT->eError = + PMRLocalImportPMR(psExtHandleInt, + &psPMRInt, + &psPMRLocalImportPMROUT->uiSize, + &psPMRLocalImportPMROUT->sAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + goto PMRLocalImportPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPMRLocalImportPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPMRLocalImportPMROUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRLocalImportPMRpsPMRIntRelease); + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRLocalImportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRLocalImportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psExtHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, hExtHandle, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + if (psPMRLocalImportPMROUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnrefPMRIN_UI8, + IMG_UINT8 * psPMRUnrefPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNREFPMR *) + IMG_OFFSET_ADDR(psPMRUnrefPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNREFPMR *) + IMG_OFFSET_ADDR(psPMRUnrefPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psPMRUnrefPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psPMRUnrefPMRIN->hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) && + (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnrefPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto PMRUnrefPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRUnrefPMR_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8, + IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) + IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) + IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psPMRUnrefUnlockPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psPMRUnrefUnlockPMRIN-> + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (unlikely + ((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) + && (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto PMRUnrefUnlockPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRUnrefUnlockPMR_exit: + + return 0; +} + +static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemNewRamBackedPMRIN_UI8, + IMG_UINT8 * psPhysmemNewRamBackedPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN = + (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMROUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiAnnotationInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) + + (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedPMR_exit; + } + + if (unlikely + (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedPMR_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPhysmemNewRamBackedPMRIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemNewRamBackedPMR_exit; + } + } + } + + if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > + 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemNewRamBackedPMRIN-> + pui32MappingTable, + psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedPMR_exit; + } + } + if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0) + { + uiAnnotationInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiAnnotationInt, + (const void __user *)psPhysmemNewRamBackedPMRIN-> + puiAnnotation, + psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedPMR_exit; + } + ((IMG_CHAR *) + uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN-> + ui32AnnotationLength * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psPhysmemNewRamBackedPMROUT->eError = + PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection), + psPhysmemNewRamBackedPMRIN->uiSize, + psPhysmemNewRamBackedPMRIN->uiChunkSize, + psPhysmemNewRamBackedPMRIN-> + ui32NumPhysChunks, + psPhysmemNewRamBackedPMRIN-> + ui32NumVirtChunks, ui32MappingTableInt, + psPhysmemNewRamBackedPMRIN->ui32Log2PageSize, + psPhysmemNewRamBackedPMRIN->uiFlags, + psPhysmemNewRamBackedPMRIN-> + ui32AnnotationLength, uiAnnotationInt, + psPhysmemNewRamBackedPMRIN->ui32PID, + &psPMRPtrInt, + psPhysmemNewRamBackedPMRIN->ui32PDumpFlags); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) + { + goto PhysmemNewRamBackedPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemNewRamBackedPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemNewRamBackedPMROUT->hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemNewRamBackedPMRpsPMRPtrIntRelease); + if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemNewRamBackedPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemNewRamBackedPMR_exit: + + if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefUnlockPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPhysmemNewRamBackedLockedPMRIN_UI8, + IMG_UINT8 * + psPhysmemNewRamBackedLockedPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR + *psPhysmemNewRamBackedLockedPMRIN = + (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR + *psPhysmemNewRamBackedLockedPMROUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiAnnotationInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32)) + + (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedLockedPMR_exit; + } + + if (unlikely + (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedLockedPMR_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psPhysmemNewRamBackedLockedPMRIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemNewRamBackedLockedPMR_exit; + } + } + } + + if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemNewRamBackedLockedPMRIN-> + pui32MappingTable, + psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedLockedPMR_exit; + } + } + if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0) + { + uiAnnotationInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiAnnotationInt, + (const void __user *)psPhysmemNewRamBackedLockedPMRIN-> + puiAnnotation, + psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedLockedPMR_exit; + } + ((IMG_CHAR *) + uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN-> + ui32AnnotationLength * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psPhysmemNewRamBackedLockedPMROUT->eError = + PhysmemNewRamBackedLockedPMR(psConnection, + OSGetDevNode(psConnection), + psPhysmemNewRamBackedLockedPMRIN-> + uiSize, + psPhysmemNewRamBackedLockedPMRIN-> + uiChunkSize, + psPhysmemNewRamBackedLockedPMRIN-> + ui32NumPhysChunks, + psPhysmemNewRamBackedLockedPMRIN-> + ui32NumVirtChunks, ui32MappingTableInt, + psPhysmemNewRamBackedLockedPMRIN-> + ui32Log2PageSize, + psPhysmemNewRamBackedLockedPMRIN-> + uiFlags, + psPhysmemNewRamBackedLockedPMRIN-> + ui32AnnotationLength, uiAnnotationInt, + psPhysmemNewRamBackedLockedPMRIN-> + ui32PID, &psPMRPtrInt, + psPhysmemNewRamBackedLockedPMRIN-> + ui32PDumpFlags); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + { + goto PhysmemNewRamBackedLockedPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemNewRamBackedLockedPMROUT-> + hPMRPtr, (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease); + if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemNewRamBackedLockedPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemNewRamBackedLockedPMR_exit: + + if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefUnlockPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntPinIN_UI8, + IMG_UINT8 * psDevmemIntPinOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPIN *) + IMG_OFFSET_ADDR(psDevmemIntPinIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *) + IMG_OFFSET_ADDR(psDevmemIntPinOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPinOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntPinOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPin_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPinOUT->eError = DevmemIntPin(psPMRInt); + +DevmemIntPin_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnpinIN_UI8, + IMG_UINT8 * psDevmemIntUnpinOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *) + IMG_OFFSET_ADDR(psDevmemIntUnpinIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *) + IMG_OFFSET_ADDR(psDevmemIntUnpinOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnpinOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntUnpinOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpin_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnpinOUT->eError = DevmemIntUnpin(psPMRInt); + +DevmemIntUnpin_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntPinValidateIN_UI8, + IMG_UINT8 * psDevmemIntPinValidateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntPinValidateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntPinValidateOUT_UI8, 0); + + IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping; + DEVMEMINT_MAPPING *psMappingInt = NULL; + IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPinValidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMappingInt, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + IMG_TRUE); + if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPinValidate_exit; + } + + /* Look up the address from the handle */ + psDevmemIntPinValidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPinValidate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPinValidateOUT->eError = + DevmemIntPinValidate(psMappingInt, psPMRInt); + +DevmemIntPinValidate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psMappingInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntUnpinInvalidateIN_UI8, + IMG_UINT8 * + psDevmemIntUnpinInvalidateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN + = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE + *psDevmemIntUnpinInvalidateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateOUT_UI8, 0); + + IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping; + DEVMEMINT_MAPPING *psMappingInt = NULL; + IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnpinInvalidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMappingInt, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + IMG_TRUE); + if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpinInvalidate_exit; + } + + /* Look up the address from the handle */ + psDevmemIntUnpinInvalidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpinInvalidate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnpinInvalidateOUT->eError = + DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); + +DevmemIntUnpinInvalidate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psMappingInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, + IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) + IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) + IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, 0); + + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; + + psDevmemIntCtxCreateOUT->eError = + DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), + psDevmemIntCtxCreateIN->bbKernelMemoryCtx, + &psDevMemServerContextInt, + &hPrivDataInt, + &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntCtxCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxCreateOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + (void *)psDevMemServerContextInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntCtxCreatepsDevMemServerContextIntRelease); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; + } + + psDevmemIntCtxCreateOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT->hPrivData, + (void *)hPrivDataInt, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psDevmemIntCtxCreateOUT-> + hDevMemServerContext); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntCtxCreate_exit: + + if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemIntCtxCreateOUT->hDevMemServerContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psDevMemServerContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psDevMemServerContextInt) + { + DevmemIntCtxDestroy(psDevMemServerContextInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, + IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxDestroyOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntCtxDestroyIN-> + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) + && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxDestroy_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntCtxDestroy_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, + IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) + IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) + IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntHeapCreateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = + DevmemIntHeapCreate(psDevmemCtxInt, + psDevmemIntHeapCreateIN->sHeapBaseAddr, + psDevmemIntHeapCreateIN->uiHeapLength, + psDevmemIntHeapCreateIN->ui32Log2DataPageSize, + &psDevmemHeapPtrInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntHeapCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntHeapCreateOUT->hDevmemHeapPtr, + (void *)psDevmemHeapPtrInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapCreate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemHeapPtrInt) + { + DevmemIntHeapDestroy(psDevmemHeapPtrInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, + IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapDestroyOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntHeapDestroyIN-> + hDevmemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + if (unlikely + ((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) + && (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapDestroy_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapDestroy_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPMRIN_UI8, + IMG_UINT8 * psDevmemIntMapPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_MAPPING *psMappingInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntMapPMROUT->eError = + DevmemIntMapPMR(psDevmemServerHeapInt, + psReservationInt, + psPMRInt, + psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + goto DevmemIntMapPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntMapPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntMapPMROUT->hMapping, + (void *)psMappingInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntMapPMRpsMappingIntRelease); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntMapPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntMapPMROUT->eError != PVRSRV_OK) + { + if (psMappingInt) + { + DevmemIntUnmapPMR(psMappingInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnmapPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntUnmapPMRIN-> + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + if (unlikely + ((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) + && (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnmapPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnmapPMR_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = + psDevmemIntReserveRangeIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntReserveRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = + DevmemIntReserveRange(psDevmemServerHeapInt, + psDevmemIntReserveRangeIN->sAddress, + psDevmemIntReserveRangeIN->uiLength, + &psReservationInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + goto DevmemIntReserveRange_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntReserveRangeOUT->hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntReserveRangepsReservationIntRelease); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntReserveRange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) + { + if (psReservationInt) + { + DevmemIntUnreserveRange(psReservationInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, + IMG_UINT8 * + psDevmemIntUnreserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT + = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnreserveRangeOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntUnreserveRangeIN-> + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + if (unlikely + ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) + && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnreserveRange_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnreserveRange_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psChangeSparseMemIN_UI8, + IMG_UINT8 * psChangeSparseMemOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = + (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) + IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = + (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) + IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); + + IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap; + DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL; + IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR; + PMR *psPMRInt = NULL; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + + (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; + + if (unlikely + (psChangeSparseMemIN->ui32AllocPageCount > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; + } + + if (unlikely + (psChangeSparseMemIN->ui32FreePageCount > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psChangeSparseMemIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psChangeSparseMemIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto ChangeSparseMem_exit; + } + } + } + + if (psChangeSparseMemIN->ui32AllocPageCount != 0) + { + ui32AllocPageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psChangeSparseMemIN->ui32AllocPageCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AllocPageIndicesInt, + (const void __user *)psChangeSparseMemIN-> + pui32AllocPageIndices, + psChangeSparseMemIN->ui32AllocPageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto ChangeSparseMem_exit; + } + } + if (psChangeSparseMemIN->ui32FreePageCount != 0) + { + ui32FreePageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FreePageIndicesInt, + (const void __user *)psChangeSparseMemIN-> + pui32FreePageIndices, + psChangeSparseMemIN->ui32FreePageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto ChangeSparseMem_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psChangeSparseMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSrvDevMemHeapInt, + hSrvDevMemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + IMG_TRUE); + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto ChangeSparseMem_exit; + } + + /* Look up the address from the handle */ + psChangeSparseMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto ChangeSparseMem_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psChangeSparseMemOUT->eError = + DevmemIntChangeSparse(psSrvDevMemHeapInt, + psPMRInt, + psChangeSparseMemIN->ui32AllocPageCount, + ui32AllocPageIndicesInt, + psChangeSparseMemIN->ui32FreePageCount, + ui32FreePageIndicesInt, + psChangeSparseMemIN->ui32SparseFlags, + psChangeSparseMemIN->uiFlags, + psChangeSparseMemIN->sDevVAddr, + psChangeSparseMemIN->ui64CPUVAddr); + +ChangeSparseMem_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSrvDevMemHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSrvDevMemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPagesIN_UI8, + IMG_UINT8 * psDevmemIntMapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0); + + IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPages_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPages_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntMapPagesOUT->eError = + DevmemIntMapPages(psReservationInt, + psPMRInt, + psDevmemIntMapPagesIN->ui32PageCount, + psDevmemIntMapPagesIN->ui32PhysicalPgOffset, + psDevmemIntMapPagesIN->uiFlags, + psDevmemIntMapPagesIN->sDevVAddr); + +DevmemIntMapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8, 0); + + IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnmapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnmapPages_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnmapPagesOUT->eError = + DevmemIntUnmapPages(psReservationInt, + psDevmemIntUnmapPagesIN->sDevVAddr, + psDevmemIntUnmapPagesIN->ui32PageCount); + +DevmemIntUnmapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIsVDevAddrValidIN_UI8, + IMG_UINT8 * psDevmemIsVDevAddrValidOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN = + (PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *) + IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *) + IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIsVDevAddrValidOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIsVDevAddrValid_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIsVDevAddrValidOUT->eError = + DevmemIntIsVDevAddrValid(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, + psDevmemIsVDevAddrValidIN->sAddress); + +DevmemIsVDevAddrValid_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) + +static IMG_INT +PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8, + IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *) + IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *) + IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemFlushDevSLCRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemFlushDevSLCRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemFlushDevSLCRangeOUT->eError = + DevmemIntFlushDevSLCRange(psDevmemCtxInt, + psDevmemFlushDevSLCRangeIN->sAddress, + psDevmemFlushDevSLCRangeIN->uiSize, + psDevmemFlushDevSLCRangeIN->bInvalidate); + +DevmemFlushDevSLCRange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeDevmemFlushDevSLCRange NULL +#endif + +#if defined(RGX_FEATURE_FBCDC) + +static IMG_INT +PVRSRVBridgeDevmemInvalidateFBSCTable(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemInvalidateFBSCTableIN_UI8, + IMG_UINT8 * + psDevmemInvalidateFBSCTableOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE + *psDevmemInvalidateFBSCTableIN = + (PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *) + IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE + *psDevmemInvalidateFBSCTableOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *) + IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemInvalidateFBSCTableIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemInvalidateFBSCTableOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemInvalidateFBSCTableOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemInvalidateFBSCTable_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemInvalidateFBSCTableOUT->eError = + DevmemIntInvalidateFBSCTable(psDevmemCtxInt, + psDevmemInvalidateFBSCTableIN-> + ui64FBSCEntries); + +DevmemInvalidateFBSCTable_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeDevmemInvalidateFBSCTable NULL +#endif + +static IMG_INT +PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapConfigCountIN_UI8, + IMG_UINT8 * psHeapCfgHeapConfigCountOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN); + + psHeapCfgHeapConfigCountOUT->eError = + HeapCfgHeapConfigCount(psConnection, OSGetDevNode(psConnection), + &psHeapCfgHeapConfigCountOUT-> + ui32NumHeapConfigs); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapCountIN_UI8, + IMG_UINT8 * psHeapCfgHeapCountOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapCountIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapCountOUT_UI8, 0); + + psHeapCfgHeapCountOUT->eError = + HeapCfgHeapCount(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapCountIN->ui32HeapConfigIndex, + &psHeapCfgHeapCountOUT->ui32NumHeaps); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8, + IMG_UINT8 * psHeapCfgHeapConfigNameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameOUT_UI8, 0); + + IMG_CHAR *puiHeapConfigNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR)) + 0; + + if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz > + DEVMEM_HEAPNAME_MAXLENGTH) + { + psHeapCfgHeapConfigNameOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HeapCfgHeapConfigName_exit; + } + + psHeapCfgHeapConfigNameOUT->puiHeapConfigName = + psHeapCfgHeapConfigNameIN->puiHeapConfigName; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHeapCfgHeapConfigNameIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHeapCfgHeapConfigNameOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HeapCfgHeapConfigName_exit; + } + } + } + + if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0) + { + puiHeapConfigNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR); + } + + psHeapCfgHeapConfigNameOUT->eError = + HeapCfgHeapConfigName(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapConfigNameIN-> + ui32HeapConfigIndex, + psHeapCfgHeapConfigNameIN-> + ui32HeapConfigNameBufSz, + puiHeapConfigNameInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapConfigNameInt) && + ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psHeapCfgHeapConfigNameOUT-> + puiHeapConfigName, puiHeapConfigNameInt, + (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psHeapCfgHeapConfigNameOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapConfigName_exit; + } + } + +HeapCfgHeapConfigName_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8, + IMG_UINT8 * psHeapCfgHeapDetailsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *) + IMG_OFFSET_ADDR(psHeapCfgHeapDetailsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *) + IMG_OFFSET_ADDR(psHeapCfgHeapDetailsOUT_UI8, 0); + + IMG_CHAR *puiHeapNameOutInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0; + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz > + DEVMEM_HEAPNAME_MAXLENGTH) + { + psHeapCfgHeapDetailsOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HeapCfgHeapDetails_exit; + } + + psHeapCfgHeapDetailsOUT->puiHeapNameOut = + psHeapCfgHeapDetailsIN->puiHeapNameOut; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHeapCfgHeapDetailsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHeapCfgHeapDetailsOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HeapCfgHeapDetails_exit; + } + } + } + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) + { + puiHeapNameOutInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * + sizeof(IMG_CHAR); + } + + psHeapCfgHeapDetailsOUT->eError = + HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, + psHeapCfgHeapDetailsIN->ui32HeapIndex, + psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, + puiHeapNameOutInt, + &psHeapCfgHeapDetailsOUT->sDevVAddrBase, + &psHeapCfgHeapDetailsOUT->uiHeapLength, + &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, + &psHeapCfgHeapDetailsOUT-> + ui32Log2DataPageSizeOut, + &psHeapCfgHeapDetailsOUT-> + ui32Log2ImportAlignmentOut); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapNameOutInt) && + ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > + 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, + puiHeapNameOutInt, + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psHeapCfgHeapDetailsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapDetails_exit; + } + } + +HeapCfgHeapDetails_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntRegisterPFNotifyKMIN_UI8, + IMG_UINT8 * + psDevmemIntRegisterPFNotifyKMOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM + *psDevmemIntRegisterPFNotifyKMIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM + *psDevmemIntRegisterPFNotifyKMOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntRegisterPFNotifyKMOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntRegisterPFNotifyKM_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntRegisterPFNotifyKMOUT->eError = + DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, + psDevmemIntRegisterPFNotifyKMIN-> + ui32PID, + psDevmemIntRegisterPFNotifyKMIN-> + bRegister); + +DevmemIntRegisterPFNotifyKM_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetMaxDevMemSize(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetMaxDevMemSizeIN_UI8, + IMG_UINT8 * psGetMaxDevMemSizeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeIN = + (PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *) + IMG_OFFSET_ADDR(psGetMaxDevMemSizeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeOUT = + (PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *) + IMG_OFFSET_ADDR(psGetMaxDevMemSizeOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetMaxDevMemSizeIN); + + psGetMaxDevMemSizeOUT->eError = + PVRSRVGetMaxDevMemSizeKM(psConnection, OSGetDevNode(psConnection), + &psGetMaxDevMemSizeOUT->uiLMASize, + &psGetMaxDevMemSizeOUT->uiUMASize); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, + IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = + (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemGetFaultAddressOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemGetFaultAddress_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemGetFaultAddressOUT->eError = + DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, + &psDevmemGetFaultAddressOUT-> + sFaultAddress); + +DevmemGetFaultAddress_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + +static IMG_INT +PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8, + IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN = + (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) + IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *) + IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psPVRSRVUpdateOOMStatsOUT->eError = + PVRSRVServerUpdateOOMStats(psPVRSRVUpdateOOMStatsIN-> + ui32ui32StatType, + psPVRSRVUpdateOOMStatsIN->ui32pid); + + return 0; +} + +#else +#define PVRSRVBridgePVRSRVUpdateOOMStats NULL +#endif + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitMMBridge(void); +PVRSRV_ERROR DeinitMMBridge(void); + +/* + * Register all MM functions with services + */ +PVRSRV_ERROR InitMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, + PVRSRVBridgePMRExportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, + PVRSRVBridgePMRUnexportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, + PVRSRVBridgePMRGetUID, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, + PVRSRVBridgePMRMakeLocalImportHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, + PVRSRVBridgePMRUnmakeLocalImportHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, + PVRSRVBridgePMRImportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, + PVRSRVBridgePMRLocalImportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, + PVRSRVBridgePMRUnrefPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, + PVRSRVBridgePMRUnrefUnlockPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, + PVRSRVBridgePhysmemNewRamBackedPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, + PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN, + PVRSRVBridgeDevmemIntPin, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN, + PVRSRVBridgeDevmemIntUnpin, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE, + PVRSRVBridgeDevmemIntPinValidate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE, + PVRSRVBridgeDevmemIntUnpinInvalidate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, + PVRSRVBridgeDevmemIntCtxCreate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, + PVRSRVBridgeDevmemIntCtxDestroy, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, + PVRSRVBridgeDevmemIntHeapCreate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, + PVRSRVBridgeDevmemIntHeapDestroy, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, + PVRSRVBridgeDevmemIntMapPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, + PVRSRVBridgeDevmemIntUnmapPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, + PVRSRVBridgeDevmemIntReserveRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, + PVRSRVBridgeDevmemIntUnreserveRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, + PVRSRVBridgeChangeSparseMem, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, + PVRSRVBridgeDevmemIntMapPages, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, + PVRSRVBridgeDevmemIntUnmapPages, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, + PVRSRVBridgeDevmemIsVDevAddrValid, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE, + PVRSRVBridgeDevmemFlushDevSLCRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE, + PVRSRVBridgeDevmemInvalidateFBSCTable, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, + PVRSRVBridgeHeapCfgHeapConfigCount, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, + PVRSRVBridgeHeapCfgHeapCount, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, + PVRSRVBridgeHeapCfgHeapConfigName, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, + PVRSRVBridgeHeapCfgHeapDetails, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, + PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE, + PVRSRVBridgeGetMaxDevMemSize, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, + PVRSRVBridgeDevmemGetFaultAddress, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS, + PVRSRVBridgePVRSRVUpdateOOMStats, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all mm functions with services + */ +PVRSRV_ERROR DeinitMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMREXPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRIMPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTPIN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_CHANGESPARSEMEM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_bridge.h new file mode 100644 index 000000000000..9c360f418d2d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_bridge.h @@ -0,0 +1,153 @@ +/******************************************************************************* +@File +@Title Client bridge header for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PDUMP_BRIDGE_H +#define CLIENT_PDUMP_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pdump_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE + hBridge, + IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32FileOffset, + IMG_UINT32 + ui32Width, + IMG_UINT32 + ui32Height, + IMG_UINT32 + ui32StrideInBytes, + IMG_DEV_VIRTADDR + sDevBaseAddr, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32Size, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_UINT32 + ui32AddrMode, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpImageDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32LogicalWidth, + IMG_UINT32 + ui32LogicalHeight, + IMG_UINT32 + ui32PhysicalWidth, + IMG_UINT32 + ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_MEMLAYOUT + eMemLayout, + IMG_FB_COMPRESSION + eFBCompression, + const + IMG_UINT32 * + pui32FBCClearColour, + PDUMP_FBC_SWIZZLE + eeFBCSwizzle, + IMG_DEV_VIRTADDR + sHeaderDevAddr, + IMG_UINT32 + ui32HeaderSize, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE + hBridge, + IMG_CHAR * + puiComment, + IMG_UINT32 + ui32Flags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32Frame); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpDataDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32HeaderType, + IMG_UINT32 + ui32ElementType, + IMG_UINT32 + ui32ElementCount, + IMG_UINT32 + ui32PDumpFlags); + +#endif /* CLIENT_PDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_direct_bridge.c new file mode 100644 index 000000000000..9b54a7d23138 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/client_pdump_direct_bridge.c @@ -0,0 +1,228 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pdump + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pdump_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "pdumpdefs.h" +#include + +#include "devicemem_server.h" +#include "pdump_km.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE + hBridge, + IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32FileOffset, + IMG_UINT32 + ui32Width, + IMG_UINT32 + ui32Height, + IMG_UINT32 + ui32StrideInBytes, + IMG_DEV_VIRTADDR + sDevBaseAddr, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32Size, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_UINT32 + ui32AddrMode, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntPDumpBitmap(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + puiFileName, + ui32FileOffset, + ui32Width, + ui32Height, + ui32StrideInBytes, + sDevBaseAddr, + psDevmemCtxInt, + ui32Size, + ePixelFormat, ui32AddrMode, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpImageDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32LogicalWidth, + IMG_UINT32 + ui32LogicalHeight, + IMG_UINT32 + ui32PhysicalWidth, + IMG_UINT32 + ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_MEMLAYOUT + eMemLayout, + IMG_FB_COMPRESSION + eFBCompression, + const + IMG_UINT32 * + pui32FBCClearColour, + PDUMP_FBC_SWIZZLE + eeFBCSwizzle, + IMG_DEV_VIRTADDR + sHeaderDevAddr, + IMG_UINT32 + ui32HeaderSize, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntPDumpImageDescriptor(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + psDevmemCtxInt, ui32StringSize, + puiFileName, sDataDevAddr, + ui32DataSize, ui32LogicalWidth, + ui32LogicalHeight, ui32PhysicalWidth, + ui32PhysicalHeight, ePixelFormat, + eMemLayout, eFBCompression, + pui32FBCClearColour, eeFBCSwizzle, + sHeaderDevAddr, ui32HeaderSize, + ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE + hBridge, + IMG_CHAR * + puiComment, + IMG_UINT32 + ui32Flags) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpCommentKM(puiComment, ui32Flags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32Frame) +{ + PVRSRV_ERROR eError; + + eError = + PDumpSetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32Frame); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpDataDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32HeaderType, + IMG_UINT32 + ui32ElementType, + IMG_UINT32 + ui32ElementCount, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntPDumpDataDescriptor(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + psDevmemCtxInt, ui32StringSize, + puiFileName, sDataDevAddr, + ui32DataSize, ui32HeaderType, + ui32ElementType, ui32ElementCount, + ui32PDumpFlags); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/common_pdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/common_pdump_bridge.h new file mode 100644 index 000000000000..4e35fc24c1c1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/common_pdump_bridge.h @@ -0,0 +1,180 @@ +/******************************************************************************* +@File +@Title Common bridge header for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PDUMP_BRIDGE_H +#define COMMON_PDUMP_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "pdumpdefs.h" +#include + +#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4) + +/******************************************* + DevmemPDumpBitmap + *******************************************/ + +/* Bridge in structure for DevmemPDumpBitmap */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP_TAG +{ + IMG_CHAR *puiFileName; + IMG_UINT32 ui32FileOffset; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; + IMG_UINT32 ui32StrideInBytes; + IMG_DEV_VIRTADDR sDevBaseAddr; + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32Size; + PDUMP_PIXEL_FORMAT ePixelFormat; + IMG_UINT32 ui32AddrMode; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP; + +/* Bridge out structure for DevmemPDumpBitmap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP; + +/******************************************* + PDumpImageDescriptor + *******************************************/ + +/* Bridge in structure for PDumpImageDescriptor */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32StringSize; + const IMG_CHAR *puiFileName; + IMG_DEV_VIRTADDR sDataDevAddr; + IMG_UINT32 ui32DataSize; + IMG_UINT32 ui32LogicalWidth; + IMG_UINT32 ui32LogicalHeight; + IMG_UINT32 ui32PhysicalWidth; + IMG_UINT32 ui32PhysicalHeight; + PDUMP_PIXEL_FORMAT ePixelFormat; + IMG_MEMLAYOUT eMemLayout; + IMG_FB_COMPRESSION eFBCompression; + const IMG_UINT32 *pui32FBCClearColour; + PDUMP_FBC_SWIZZLE eeFBCSwizzle; + IMG_DEV_VIRTADDR sHeaderDevAddr; + IMG_UINT32 ui32HeaderSize; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR; + +/* Bridge out structure for PDumpImageDescriptor */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR; + +/******************************************* + PVRSRVPDumpComment + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpComment */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG +{ + IMG_CHAR *puiComment; + IMG_UINT32 ui32Flags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT; + +/* Bridge out structure for PVRSRVPDumpComment */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT; + +/******************************************* + PVRSRVPDumpSetFrame + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpSetFrame */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG +{ + IMG_UINT32 ui32Frame; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME; + +/* Bridge out structure for PVRSRVPDumpSetFrame */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME; + +/******************************************* + PDumpDataDescriptor + *******************************************/ + +/* Bridge in structure for PDumpDataDescriptor */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32StringSize; + const IMG_CHAR *puiFileName; + IMG_DEV_VIRTADDR sDataDevAddr; + IMG_UINT32 ui32DataSize; + IMG_UINT32 ui32HeaderType; + IMG_UINT32 ui32ElementType; + IMG_UINT32 ui32ElementCount; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR; + +/* Bridge out structure for PDumpDataDescriptor */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR; + +#endif /* COMMON_PDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/server_pdump_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/server_pdump_bridge.c new file mode 100644 index 000000000000..8433df3a9ad2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdump_bridge/server_pdump_bridge.c @@ -0,0 +1,758 @@ +/******************************************************************************* +@File +@Title Server bridge for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "pdump_km.h" + +#include "common_pdump_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeDevmemPDumpBitmap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemPDumpBitmapIN_UI8, + IMG_UINT8 * psDevmemPDumpBitmapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapIN = + (PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *) + IMG_OFFSET_ADDR(psDevmemPDumpBitmapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *) + IMG_OFFSET_ADDR(psDevmemPDumpBitmapOUT_UI8, 0); + + IMG_CHAR *uiFileNameInt = NULL; + IMG_HANDLE hDevmemCtx = psDevmemPDumpBitmapIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevmemPDumpBitmapIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevmemPDumpBitmapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevmemPDumpBitmapOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevmemPDumpBitmap_exit; + } + } + } + + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psDevmemPDumpBitmapIN->puiFileName, + PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psDevmemPDumpBitmapOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevmemPDumpBitmap_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(PVRSRV_PDUMP_MAX_FILENAME_SIZE * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemPDumpBitmapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemPDumpBitmapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemPDumpBitmap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemPDumpBitmapOUT->eError = + DevmemIntPDumpBitmap(psConnection, OSGetDevNode(psConnection), + uiFileNameInt, + psDevmemPDumpBitmapIN->ui32FileOffset, + psDevmemPDumpBitmapIN->ui32Width, + psDevmemPDumpBitmapIN->ui32Height, + psDevmemPDumpBitmapIN->ui32StrideInBytes, + psDevmemPDumpBitmapIN->sDevBaseAddr, + psDevmemCtxInt, + psDevmemPDumpBitmapIN->ui32Size, + psDevmemPDumpBitmapIN->ePixelFormat, + psDevmemPDumpBitmapIN->ui32AddrMode, + psDevmemPDumpBitmapIN->ui32PDumpFlags); + +DevmemPDumpBitmap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpImageDescriptorIN_UI8, + IMG_UINT8 * psPDumpImageDescriptorOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorIN = + (PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpImageDescriptorIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorOUT = + (PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpImageDescriptorOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psPDumpImageDescriptorIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + IMG_UINT32 *ui32FBCClearColourInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) + + (4 * sizeof(IMG_UINT32)) + 0; + + if (unlikely + (psPDumpImageDescriptorIN->ui32StringSize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PDumpImageDescriptor_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPDumpImageDescriptorIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PDumpImageDescriptor_exit; + } + } + } + + if (psPDumpImageDescriptorIN->ui32StringSize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psPDumpImageDescriptorIN->puiFileName, + psPDumpImageDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PDumpImageDescriptor_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psPDumpImageDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + { + ui32FBCClearColourInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += 4 * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (4 * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FBCClearColourInt, + (const void __user *)psPDumpImageDescriptorIN-> + pui32FBCClearColour, 4 * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PDumpImageDescriptor_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPDumpImageDescriptorOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psPDumpImageDescriptorOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PDumpImageDescriptor_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPDumpImageDescriptorOUT->eError = + DevmemIntPDumpImageDescriptor(psConnection, + OSGetDevNode(psConnection), + psDevmemCtxInt, + psPDumpImageDescriptorIN-> + ui32StringSize, uiFileNameInt, + psPDumpImageDescriptorIN-> + sDataDevAddr, + psPDumpImageDescriptorIN-> + ui32DataSize, + psPDumpImageDescriptorIN-> + ui32LogicalWidth, + psPDumpImageDescriptorIN-> + ui32LogicalHeight, + psPDumpImageDescriptorIN-> + ui32PhysicalWidth, + psPDumpImageDescriptorIN-> + ui32PhysicalHeight, + psPDumpImageDescriptorIN-> + ePixelFormat, + psPDumpImageDescriptorIN->eMemLayout, + psPDumpImageDescriptorIN-> + eFBCompression, ui32FBCClearColourInt, + psPDumpImageDescriptorIN-> + eeFBCSwizzle, + psPDumpImageDescriptorIN-> + sHeaderDevAddr, + psPDumpImageDescriptorIN-> + ui32HeaderSize, + psPDumpImageDescriptorIN-> + ui32PDumpFlags); + +PDumpImageDescriptor_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpCommentIN_UI8, + IMG_UINT8 * psPVRSRVPDumpCommentOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *) + IMG_OFFSET_ADDR(psPVRSRVPDumpCommentIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *) + IMG_OFFSET_ADDR(psPVRSRVPDumpCommentOUT_UI8, 0); + + IMG_CHAR *uiCommentInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPVRSRVPDumpCommentIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPVRSRVPDumpCommentOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PVRSRVPDumpComment_exit; + } + } + } + + { + uiCommentInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiCommentInt, + (const void __user *)psPVRSRVPDumpCommentIN->puiComment, + PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psPVRSRVPDumpCommentOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PVRSRVPDumpComment_exit; + } + ((IMG_CHAR *) + uiCommentInt)[(PVRSRV_PDUMP_MAX_COMMENT_SIZE * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPVRSRVPDumpCommentOUT->eError = + PDumpCommentKM(uiCommentInt, psPVRSRVPDumpCommentIN->ui32Flags); + +PVRSRVPDumpComment_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpSetFrameIN_UI8, + IMG_UINT8 * psPVRSRVPDumpSetFrameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameOUT_UI8, 0); + + psPVRSRVPDumpSetFrameOUT->eError = + PDumpSetFrameKM(psConnection, OSGetDevNode(psConnection), + psPVRSRVPDumpSetFrameIN->ui32Frame); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpDataDescriptorIN_UI8, + IMG_UINT8 * psPDumpDataDescriptorOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorIN = + (PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpDataDescriptorIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorOUT = + (PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpDataDescriptorOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psPDumpDataDescriptorIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPDumpDataDescriptorIN->ui32StringSize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psPDumpDataDescriptorOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PDumpDataDescriptor_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPDumpDataDescriptorIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPDumpDataDescriptorIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPDumpDataDescriptorOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PDumpDataDescriptor_exit; + } + } + } + + if (psPDumpDataDescriptorIN->ui32StringSize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psPDumpDataDescriptorIN->puiFileName, + psPDumpDataDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPDumpDataDescriptorOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PDumpDataDescriptor_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psPDumpDataDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPDumpDataDescriptorOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psPDumpDataDescriptorOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PDumpDataDescriptor_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPDumpDataDescriptorOUT->eError = + DevmemIntPDumpDataDescriptor(psConnection, + OSGetDevNode(psConnection), + psDevmemCtxInt, + psPDumpDataDescriptorIN-> + ui32StringSize, uiFileNameInt, + psPDumpDataDescriptorIN->sDataDevAddr, + psPDumpDataDescriptorIN->ui32DataSize, + psPDumpDataDescriptorIN-> + ui32HeaderType, + psPDumpDataDescriptorIN-> + ui32ElementType, + psPDumpDataDescriptorIN-> + ui32ElementCount, + psPDumpDataDescriptorIN-> + ui32PDumpFlags); + +PDumpDataDescriptor_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitPDUMPBridge(void); +PVRSRV_ERROR DeinitPDUMPBridge(void); + +/* + * Register all PDUMP functions with services + */ +PVRSRV_ERROR InitPDUMPBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP, + PVRSRVBridgeDevmemPDumpBitmap, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR, + PVRSRVBridgePDumpImageDescriptor, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, + PVRSRVBridgePVRSRVPDumpComment, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME, + PVRSRVBridgePVRSRVPDumpSetFrame, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR, + PVRSRVBridgePDumpDataDescriptor, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all pdump functions with services + */ +PVRSRV_ERROR DeinitPDUMPBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h new file mode 100644 index 000000000000..0dedace31089 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h @@ -0,0 +1,82 @@ +/******************************************************************************* +@File +@Title Client bridge header for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pdumpctrl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PDUMPCTRL_BRIDGE_H +#define CLIENT_PDUMPCTRL_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pdumpctrl_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetState(IMG_HANDLE + hBridge, + IMG_UINT64 * + pui64State); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32Frame); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge, + IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge, + IMG_BOOL * pbpbIsLastCaptureFrame); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge); + +#endif /* CLIENT_PDUMPCTRL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c new file mode 100644 index 000000000000..49b4800da7ee --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c @@ -0,0 +1,120 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pdumpctrl + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pdumpctrl_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ + +#include "pdump_km.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetState(IMG_HANDLE + hBridge, + IMG_UINT64 * + pui64State) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpGetStateKM(pui64State); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32Frame) +{ + PVRSRV_ERROR eError; + + eError = + PDumpGetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + pui32Frame); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge, + IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + PDumpSetDefaultCaptureParamsKM(ui32Mode, + ui32Start, + ui32End, + ui32Interval, ui32MaxParamFileSize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge, + IMG_BOOL * pbpbIsLastCaptureFrame) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpIsLastCaptureFrameKM(pbpbIsLastCaptureFrame); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpForceCaptureStopKM(); + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h new file mode 100644 index 000000000000..1f3ff6d970c2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h @@ -0,0 +1,148 @@ +/******************************************************************************* +@File +@Title Common bridge header for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pdumpctrl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PDUMPCTRL_BRIDGE_H +#define COMMON_PDUMPCTRL_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST (PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4) + +/******************************************* + PVRSRVPDumpGetState + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpGetState */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE; + +/* Bridge out structure for PVRSRVPDumpGetState */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE_TAG +{ + IMG_UINT64 ui64State; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE; + +/******************************************* + PVRSRVPDumpGetFrame + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpGetFrame */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME; + +/* Bridge out structure for PVRSRVPDumpGetFrame */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG +{ + IMG_UINT32 ui32Frame; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME; + +/******************************************* + PVRSRVPDumpSetDefaultCaptureParams + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG +{ + IMG_UINT32 ui32Mode; + IMG_UINT32 ui32Start; + IMG_UINT32 ui32End; + IMG_UINT32 ui32Interval; + IMG_UINT32 ui32MaxParamFileSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS; + +/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS; + +/******************************************* + PVRSRVPDumpIsLastCaptureFrame + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME; + +/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG +{ + IMG_BOOL bpbIsLastCaptureFrame; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME; + +/******************************************* + PVRSRVPDumpForceCaptureStop + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpForceCaptureStop */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP; + +/* Bridge out structure for PVRSRVPDumpForceCaptureStop */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP; + +#endif /* COMMON_PDUMPCTRL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c new file mode 100644 index 000000000000..4aa153d5d9c9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c @@ -0,0 +1,266 @@ +/******************************************************************************* +@File +@Title Server bridge for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pdumpctrl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "pdump_km.h" + +#include "common_pdumpctrl_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgePVRSRVPDumpGetState(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpGetStateIN_UI8, + IMG_UINT8 * psPVRSRVPDumpGetStateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetStateIN); + + psPVRSRVPDumpGetStateOUT->eError = + PDumpGetStateKM(&psPVRSRVPDumpGetStateOUT->ui64State); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpGetFrameIN_UI8, + IMG_UINT8 * psPVRSRVPDumpGetFrameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN); + + psPVRSRVPDumpGetFrameOUT->eError = + PDumpGetFrameKM(psConnection, OSGetDevNode(psConnection), + &psPVRSRVPDumpGetFrameOUT->ui32Frame); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 + ui32DispatchTableEntry, + IMG_UINT8 * + psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8, + IMG_UINT8 * + psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS + *psPVRSRVPDumpSetDefaultCaptureParamsIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS + *psPVRSRVPDumpSetDefaultCaptureParamsOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError = + PDumpSetDefaultCaptureParamsKM + (psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32MaxParamFileSize); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPVRSRVPDumpIsLastCaptureFrameIN_UI8, + IMG_UINT8 * + psPVRSRVPDumpIsLastCaptureFrameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME + *psPVRSRVPDumpIsLastCaptureFrameIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME + *psPVRSRVPDumpIsLastCaptureFrameOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN); + + psPVRSRVPDumpIsLastCaptureFrameOUT->eError = + PDumpIsLastCaptureFrameKM(&psPVRSRVPDumpIsLastCaptureFrameOUT-> + bpbIsLastCaptureFrame); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpForceCaptureStop(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPVRSRVPDumpForceCaptureStopIN_UI8, + IMG_UINT8 * + psPVRSRVPDumpForceCaptureStopOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP + *psPVRSRVPDumpForceCaptureStopIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP *) + IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP + *psPVRSRVPDumpForceCaptureStopOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP *) + IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpForceCaptureStopIN); + + psPVRSRVPDumpForceCaptureStopOUT->eError = PDumpForceCaptureStopKM(); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pPDUMPCTRLBridgeLock; + +PVRSRV_ERROR InitPDUMPCTRLBridge(void); +PVRSRV_ERROR DeinitPDUMPCTRLBridge(void); + +/* + * Register all PDUMPCTRL functions with services + */ +PVRSRV_ERROR InitPDUMPCTRLBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock), + "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE, + PVRSRVBridgePVRSRVPDumpGetState, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, + PVRSRVBridgePVRSRVPDumpGetFrame, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, + PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, + PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP, + PVRSRVBridgePVRSRVPDumpForceCaptureStop, + pPDUMPCTRLBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all pdumpctrl functions with services + */ +PVRSRV_ERROR DeinitPDUMPCTRLBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pPDUMPCTRLBridgeLock), + "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_bridge.h new file mode 100644 index 000000000000..04c2874fd5a5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_bridge.h @@ -0,0 +1,169 @@ +/******************************************************************************* +@File +@Title Client bridge header for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pdumpmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PDUMPMM_BRIDGE_H +#define CLIENT_PDUMPMM_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pdumpmm_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32PDumpFlags, + IMG_BOOL bbZero); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT64 + ui64Value, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32ArraySize, + const IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32uiFileOffset); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32MemspaceNameLen, + IMG_CHAR * + puiMemspaceName, + IMG_UINT32 + ui32SymbolicAddrLen, + IMG_CHAR * + puiSymbolicAddr, + IMG_DEVMEM_OFFSET_T + * + puiNewOffset, + IMG_DEVMEM_OFFSET_T + * + puiNextSymName); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCheck32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiReadOffset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerContext, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32ArraySize, + const IMG_CHAR * puiFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags); + +#endif /* CLIENT_PDUMPMM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c new file mode 100644 index 000000000000..887e90c5b1bc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c @@ -0,0 +1,300 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pdumpmm + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pdumpmm_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pdump.h" +#include "pdumpdefs.h" +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#include "devicemem_server.h" +#include "pmr.h" +#include "physmem.h" +#include "pdump_physmem.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32PDumpFlags, + IMG_BOOL bbZero) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpLoadMem(psPMRInt, uiOffset, uiSize, ui32PDumpFlags, bbZero); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpLoadMemValue32(psPMRInt, + uiOffset, ui32Value, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT64 + ui64Value, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpLoadMemValue64(psPMRInt, + uiOffset, ui64Value, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32ArraySize, + const IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32uiFileOffset) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpSaveToFile(psPMRInt, + uiOffset, + uiSize, + ui32ArraySize, puiFileName, ui32uiFileOffset); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32MemspaceNameLen, + IMG_CHAR * + puiMemspaceName, + IMG_UINT32 + ui32SymbolicAddrLen, + IMG_CHAR * + puiSymbolicAddr, + IMG_DEVMEM_OFFSET_T + * + puiNewOffset, + IMG_DEVMEM_OFFSET_T + * + puiNextSymName) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMR_PDumpSymbolicAddr(psPMRInt, + uiOffset, + ui32MemspaceNameLen, + puiMemspaceName, + ui32SymbolicAddrLen, + puiSymbolicAddr, + puiNewOffset, puiNextSymName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpPol32(psPMRInt, + uiOffset, + ui32Value, ui32Mask, eOperator, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCheck32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpCheck32(psPMRInt, + uiOffset, + ui32Value, ui32Mask, eOperator, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiReadOffset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpCBP(psPMRInt, + uiReadOffset, + uiWriteOffset, uiPacketSize, uiBufferSize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerContext, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32ArraySize, + const IMG_CHAR * puiFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemServerContextInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; + + eError = + DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + sAddress, + uiSize, + ui32ArraySize, + puiFileName, + ui32FileOffset, ui32PDumpFlags); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/common_pdumpmm_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/common_pdumpmm_bridge.h new file mode 100644 index 000000000000..10c634556e72 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/common_pdumpmm_bridge.h @@ -0,0 +1,260 @@ +/******************************************************************************* +@File +@Title Common bridge header for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pdumpmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PDUMPMM_BRIDGE_H +#define COMMON_PDUMPMM_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pdump.h" +#include "pdumpdefs.h" +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7 +#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8 +#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST (PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8) + +/******************************************* + PMRPDumpLoadMem + *******************************************/ + +/* Bridge in structure for PMRPDumpLoadMem */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32PDumpFlags; + IMG_BOOL bbZero; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM; + +/* Bridge out structure for PMRPDumpLoadMem */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM; + +/******************************************* + PMRPDumpLoadMemValue32 + *******************************************/ + +/* Bridge in structure for PMRPDumpLoadMemValue32 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32; + +/* Bridge out structure for PMRPDumpLoadMemValue32 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32; + +/******************************************* + PMRPDumpLoadMemValue64 + *******************************************/ + +/* Bridge in structure for PMRPDumpLoadMemValue64 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT64 ui64Value; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64; + +/* Bridge out structure for PMRPDumpLoadMemValue64 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64; + +/******************************************* + PMRPDumpSaveToFile + *******************************************/ + +/* Bridge in structure for PMRPDumpSaveToFile */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32ArraySize; + const IMG_CHAR *puiFileName; + IMG_UINT32 ui32uiFileOffset; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE; + +/* Bridge out structure for PMRPDumpSaveToFile */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE; + +/******************************************* + PMRPDumpSymbolicAddr + *******************************************/ + +/* Bridge in structure for PMRPDumpSymbolicAddr */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32MemspaceNameLen; + IMG_UINT32 ui32SymbolicAddrLen; + /* Output pointer puiMemspaceName is also an implied input */ + IMG_CHAR *puiMemspaceName; + /* Output pointer puiSymbolicAddr is also an implied input */ + IMG_CHAR *puiSymbolicAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR; + +/* Bridge out structure for PMRPDumpSymbolicAddr */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG +{ + IMG_CHAR *puiMemspaceName; + IMG_CHAR *puiSymbolicAddr; + IMG_DEVMEM_OFFSET_T uiNewOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR; + +/******************************************* + PMRPDumpPol32 + *******************************************/ + +/* Bridge in structure for PMRPDumpPol32 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + PDUMP_POLL_OPERATOR eOperator; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPPOL32; + +/* Bridge out structure for PMRPDumpPol32 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32; + +/******************************************* + PMRPDumpCheck32 + *******************************************/ + +/* Bridge in structure for PMRPDumpCheck32 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + PDUMP_POLL_OPERATOR eOperator; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32; + +/* Bridge out structure for PMRPDumpCheck32 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32; + +/******************************************* + PMRPDumpCBP + *******************************************/ + +/* Bridge in structure for PMRPDumpCBP */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiReadOffset; + IMG_DEVMEM_OFFSET_T uiWriteOffset; + IMG_DEVMEM_SIZE_T uiPacketSize; + IMG_DEVMEM_SIZE_T uiBufferSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCBP; + +/* Bridge out structure for PMRPDumpCBP */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCBP; + +/******************************************* + DevmemIntPDumpSaveToFileVirtual + *******************************************/ + +/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG +{ + IMG_HANDLE hDevmemServerContext; + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32ArraySize; + const IMG_CHAR *puiFileName; + IMG_UINT32 ui32FileOffset; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL; + +/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL; + +#endif /* COMMON_PDUMPMM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c new file mode 100644 index 000000000000..0409e85f91c4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c @@ -0,0 +1,1029 @@ +/******************************************************************************* +@File +@Title Server bridge for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pdumpmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "pmr.h" +#include "physmem.h" +#include "pdump_physmem.h" + +#include "common_pdumpmm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpLoadMemIN_UI8, + IMG_UINT8 * psPMRPDumpLoadMemOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpLoadMemIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpLoadMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpLoadMem_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpLoadMemOUT->eError = + PMRPDumpLoadMem(psPMRInt, + psPMRPDumpLoadMemIN->uiOffset, + psPMRPDumpLoadMemIN->uiSize, + psPMRPDumpLoadMemIN->ui32PDumpFlags, + psPMRPDumpLoadMemIN->bbZero); + +PMRPDumpLoadMem_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpLoadMemValue32IN_UI8, + IMG_UINT8 * psPMRPDumpLoadMemValue32OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpLoadMemValue32IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpLoadMemValue32OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpLoadMemValue32_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpLoadMemValue32OUT->eError = + PMRPDumpLoadMemValue32(psPMRInt, + psPMRPDumpLoadMemValue32IN->uiOffset, + psPMRPDumpLoadMemValue32IN->ui32Value, + psPMRPDumpLoadMemValue32IN->ui32PDumpFlags); + +PMRPDumpLoadMemValue32_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpLoadMemValue64IN_UI8, + IMG_UINT8 * psPMRPDumpLoadMemValue64OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpLoadMemValue64IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpLoadMemValue64OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpLoadMemValue64_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpLoadMemValue64OUT->eError = + PMRPDumpLoadMemValue64(psPMRInt, + psPMRPDumpLoadMemValue64IN->uiOffset, + psPMRPDumpLoadMemValue64IN->ui64Value, + psPMRPDumpLoadMemValue64IN->ui32PDumpFlags); + +PMRPDumpLoadMemValue64_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpSaveToFileIN_UI8, + IMG_UINT8 * psPMRPDumpSaveToFileOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *) + IMG_OFFSET_ADDR(psPMRPDumpSaveToFileIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *) + IMG_OFFSET_ADDR(psPMRPDumpSaveToFileOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpSaveToFileIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPMRPDumpSaveToFileIN->ui32ArraySize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psPMRPDumpSaveToFileOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PMRPDumpSaveToFile_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPMRPDumpSaveToFileIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPMRPDumpSaveToFileOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PMRPDumpSaveToFile_exit; + } + } + } + + if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psPMRPDumpSaveToFileIN->puiFileName, + psPMRPDumpSaveToFileIN->ui32ArraySize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPMRPDumpSaveToFileOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PMRPDumpSaveToFile_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psPMRPDumpSaveToFileIN->ui32ArraySize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpSaveToFileOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpSaveToFile_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpSaveToFileOUT->eError = + PMRPDumpSaveToFile(psPMRInt, + psPMRPDumpSaveToFileIN->uiOffset, + psPMRPDumpSaveToFileIN->uiSize, + psPMRPDumpSaveToFileIN->ui32ArraySize, + uiFileNameInt, + psPMRPDumpSaveToFileIN->ui32uiFileOffset); + +PMRPDumpSaveToFile_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpSymbolicAddrIN_UI8, + IMG_UINT8 * psPMRPDumpSymbolicAddrOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *) + IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *) + IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpSymbolicAddrIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *puiMemspaceNameInt = NULL; + IMG_CHAR *puiSymbolicAddrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) + + (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) + + 0; + + if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen > + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PMRPDumpSymbolicAddr_exit; + } + + if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen > + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PMRPDumpSymbolicAddr_exit; + } + + psPMRPDumpSymbolicAddrOUT->puiMemspaceName = + psPMRPDumpSymbolicAddrIN->puiMemspaceName; + psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = + psPMRPDumpSymbolicAddrIN->puiSymbolicAddr; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPMRPDumpSymbolicAddrIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PMRPDumpSymbolicAddr_exit; + } + } + } + + if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0) + { + puiMemspaceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * + sizeof(IMG_CHAR); + } + + if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0) + { + puiSymbolicAddrInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * + sizeof(IMG_CHAR); + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpSymbolicAddr_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpSymbolicAddrOUT->eError = + PMR_PDumpSymbolicAddr(psPMRInt, + psPMRPDumpSymbolicAddrIN->uiOffset, + psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen, + puiMemspaceNameInt, + psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen, + puiSymbolicAddrInt, + &psPMRPDumpSymbolicAddrOUT->uiNewOffset, + &psPMRPDumpSymbolicAddrOUT->uiNextSymName); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiMemspaceNameInt) && + ((psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * + sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psPMRPDumpSymbolicAddrOUT->puiMemspaceName, + puiMemspaceNameInt, + (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PMRPDumpSymbolicAddr_exit; + } + } + + /* If dest ptr is non-null and we have data to copy */ + if ((puiSymbolicAddrInt) && + ((psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * + sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr, + puiSymbolicAddrInt, + (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PMRPDumpSymbolicAddr_exit; + } + } + +PMRPDumpSymbolicAddr_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpPol32IN_UI8, + IMG_UINT8 * psPMRPDumpPol32OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *) + IMG_OFFSET_ADDR(psPMRPDumpPol32IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *) + IMG_OFFSET_ADDR(psPMRPDumpPol32OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpPol32IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpPol32OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpPol32OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpPol32_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpPol32OUT->eError = + PMRPDumpPol32(psPMRInt, + psPMRPDumpPol32IN->uiOffset, + psPMRPDumpPol32IN->ui32Value, + psPMRPDumpPol32IN->ui32Mask, + psPMRPDumpPol32IN->eOperator, + psPMRPDumpPol32IN->ui32PDumpFlags); + +PMRPDumpPol32_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpCheck32(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpCheck32IN_UI8, + IMG_UINT8 * psPMRPDumpCheck32OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *psPMRPDumpCheck32IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *) + IMG_OFFSET_ADDR(psPMRPDumpCheck32IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *psPMRPDumpCheck32OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *) + IMG_OFFSET_ADDR(psPMRPDumpCheck32OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpCheck32IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpCheck32OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpCheck32OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpCheck32_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpCheck32OUT->eError = + PMRPDumpCheck32(psPMRInt, + psPMRPDumpCheck32IN->uiOffset, + psPMRPDumpCheck32IN->ui32Value, + psPMRPDumpCheck32IN->ui32Mask, + psPMRPDumpCheck32IN->eOperator, + psPMRPDumpCheck32IN->ui32PDumpFlags); + +PMRPDumpCheck32_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpCBPIN_UI8, + IMG_UINT8 * psPMRPDumpCBPOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPCBP *) + IMG_OFFSET_ADDR(psPMRPDumpCBPIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *) + IMG_OFFSET_ADDR(psPMRPDumpCBPOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpCBPIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpCBPOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpCBPOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpCBP_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpCBPOUT->eError = + PMRPDumpCBP(psPMRInt, + psPMRPDumpCBPIN->uiReadOffset, + psPMRPDumpCBPIN->uiWriteOffset, + psPMRPDumpCBPIN->uiPacketSize, + psPMRPDumpCBPIN->uiBufferSize); + +PMRPDumpCBP_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntPDumpSaveToFileVirtualIN_UI8, + IMG_UINT8 * + psDevmemIntPDumpSaveToFileVirtualOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL + *psDevmemIntPDumpSaveToFileVirtualIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *) + IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL + *psDevmemIntPDumpSaveToFileVirtualOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *) + IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualOUT_UI8, 0); + + IMG_HANDLE hDevmemServerContext = + psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext; + DEVMEMINT_CTX *psDevmemServerContextInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psDevmemIntPDumpSaveToFileVirtualIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + } + } + + if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psDevmemIntPDumpSaveToFileVirtualIN-> + puiFileName, + psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psDevmemIntPDumpSaveToFileVirtualIN-> + ui32ArraySize * sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerContextInt, + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + psDevmemIntPDumpSaveToFileVirtualIN-> + sAddress, + psDevmemIntPDumpSaveToFileVirtualIN-> + uiSize, + psDevmemIntPDumpSaveToFileVirtualIN-> + ui32ArraySize, uiFileNameInt, + psDevmemIntPDumpSaveToFileVirtualIN-> + ui32FileOffset, + psDevmemIntPDumpSaveToFileVirtualIN-> + ui32PDumpFlags); + +DevmemIntPDumpSaveToFileVirtual_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitPDUMPMMBridge(void); +PVRSRV_ERROR DeinitPDUMPMMBridge(void); + +/* + * Register all PDUMPMM functions with services + */ +PVRSRV_ERROR InitPDUMPMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, + PVRSRVBridgePMRPDumpLoadMem, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, + PVRSRVBridgePMRPDumpLoadMemValue32, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, + PVRSRVBridgePMRPDumpLoadMemValue64, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, + PVRSRVBridgePMRPDumpSaveToFile, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, + PVRSRVBridgePMRPDumpSymbolicAddr, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, + PVRSRVBridgePMRPDumpPol32, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32, + PVRSRVBridgePMRPDumpCheck32, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, + PVRSRVBridgePMRPDumpCBP, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, + PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, + NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all pdumpmm functions with services + */ +PVRSRV_ERROR DeinitPDUMPMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h new file mode 100644 index 000000000000..c3cc129384af --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h @@ -0,0 +1,113 @@ +/******************************************************************************* +@File +@Title Client bridge header for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PVRTL_BRIDGE_H +#define CLIENT_PVRTL_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pvrtl_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge, + const IMG_CHAR * + puiName, + IMG_UINT32 ui32Mode, + IMG_HANDLE * phSD, + IMG_HANDLE * phTLPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32ReadOffset, + IMG_UINT32 * + pui32ReadLen); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReadOffset, + IMG_UINT32 + ui32ReadLen); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE + hBridge, + const IMG_CHAR * + puiNamePattern, + IMG_UINT32 + ui32Size, + IMG_CHAR * + puiStreams, + IMG_UINT32 * + pui32NumFound); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32BufferOffset, + IMG_UINT32 + ui32Size, + IMG_UINT32 + ui32SizeMin, + IMG_UINT32 * + pui32Available); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReqSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE * psData); + +#endif /* CLIENT_PVRTL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c new file mode 100644 index 000000000000..481ed7945956 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c @@ -0,0 +1,200 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pvrtl + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pvrtl_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" + +#include "tlserver.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge, + const IMG_CHAR * + puiName, + IMG_UINT32 ui32Mode, + IMG_HANDLE * phSD, + IMG_HANDLE * phTLPMR) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt = NULL; + PMR *psTLPMRInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt); + + *phSD = psSDInt; + *phTLPMR = psTLPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerCloseStreamKM(psSDInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32ReadOffset, + IMG_UINT32 * + pui32ReadLen) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReadOffset, + IMG_UINT32 + ui32ReadLen) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE + hBridge, + const IMG_CHAR * + puiNamePattern, + IMG_UINT32 + ui32Size, + IMG_CHAR * + puiStreams, + IMG_UINT32 * + pui32NumFound) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + TLServerDiscoverStreamsKM(puiNamePattern, + ui32Size, puiStreams, pui32NumFound); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32BufferOffset, + IMG_UINT32 + ui32Size, + IMG_UINT32 + ui32SizeMin, + IMG_UINT32 * + pui32Available) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = + TLServerReserveStreamKM(psSDInt, + pui32BufferOffset, + ui32Size, ui32SizeMin, pui32Available); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReqSize) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE * psData) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerWriteDataKM(psSDInt, ui32Size, psData); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h new file mode 100644 index 000000000000..2047536c112d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h @@ -0,0 +1,214 @@ +/******************************************************************************* +@File +@Title Common bridge header for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PVRTL_BRIDGE_H +#define COMMON_PVRTL_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" + +#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5 +#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6 +#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7 +#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7) + +/******************************************* + TLOpenStream + *******************************************/ + +/* Bridge in structure for TLOpenStream */ +typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG +{ + const IMG_CHAR *puiName; + IMG_UINT32 ui32Mode; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLOPENSTREAM; + +/* Bridge out structure for TLOpenStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_HANDLE hTLPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLOPENSTREAM; + +/******************************************* + TLCloseStream + *******************************************/ + +/* Bridge in structure for TLCloseStream */ +typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG +{ + IMG_HANDLE hSD; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLCLOSESTREAM; + +/* Bridge out structure for TLCloseStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLCLOSESTREAM; + +/******************************************* + TLAcquireData + *******************************************/ + +/* Bridge in structure for TLAcquireData */ +typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG +{ + IMG_HANDLE hSD; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLACQUIREDATA; + +/* Bridge out structure for TLAcquireData */ +typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG +{ + IMG_UINT32 ui32ReadOffset; + IMG_UINT32 ui32ReadLen; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLACQUIREDATA; + +/******************************************* + TLReleaseData + *******************************************/ + +/* Bridge in structure for TLReleaseData */ +typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32ReadOffset; + IMG_UINT32 ui32ReadLen; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLRELEASEDATA; + +/* Bridge out structure for TLReleaseData */ +typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLRELEASEDATA; + +/******************************************* + TLDiscoverStreams + *******************************************/ + +/* Bridge in structure for TLDiscoverStreams */ +typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG +{ + const IMG_CHAR *puiNamePattern; + IMG_UINT32 ui32Size; + /* Output pointer puiStreams is also an implied input */ + IMG_CHAR *puiStreams; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS; + +/* Bridge out structure for TLDiscoverStreams */ +typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG +{ + IMG_CHAR *puiStreams; + IMG_UINT32 ui32NumFound; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS; + +/******************************************* + TLReserveStream + *******************************************/ + +/* Bridge in structure for TLReserveStream */ +typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32SizeMin; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLRESERVESTREAM; + +/* Bridge out structure for TLReserveStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG +{ + IMG_UINT32 ui32BufferOffset; + IMG_UINT32 ui32Available; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLRESERVESTREAM; + +/******************************************* + TLCommitStream + *******************************************/ + +/* Bridge in structure for TLCommitStream */ +typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32ReqSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLCOMMITSTREAM; + +/* Bridge out structure for TLCommitStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM; + +/******************************************* + TLWriteData + *******************************************/ + +/* Bridge in structure for TLWriteData */ +typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32Size; + IMG_BYTE *psData; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLWRITEDATA; + +/* Bridge out structure for TLWriteData */ +typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLWRITEDATA; + +#endif /* COMMON_PVRTL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c new file mode 100644 index 000000000000..7a601bea2cf2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c @@ -0,0 +1,880 @@ +/******************************************************************************* +@File +@Title Server bridge for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "tlserver.h" + +#include "common_pvrtl_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _TLOpenStreampsSDIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = TLServerCloseStreamKM((TL_STREAM_DESC *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLOpenStreamIN_UI8, + IMG_UINT8 * psTLOpenStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN = + (PVRSRV_BRIDGE_IN_TLOPENSTREAM *) + IMG_OFFSET_ADDR(psTLOpenStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT = + (PVRSRV_BRIDGE_OUT_TLOPENSTREAM *) + IMG_OFFSET_ADDR(psTLOpenStreamOUT_UI8, 0); + + IMG_CHAR *uiNameInt = NULL; + TL_STREAM_DESC *psSDInt = NULL; + PMR *psTLPMRInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; + + psTLOpenStreamOUT->hSD = NULL; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psTLOpenStreamIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLOpenStreamOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLOpenStream_exit; + } + } + } + + { + uiNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, + (const void __user *)psTLOpenStreamIN->puiName, + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLOpenStream_exit; + } + ((IMG_CHAR *) + uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psTLOpenStreamOUT->eError = + TLServerOpenStreamKM(uiNameInt, + psTLOpenStreamIN->ui32Mode, + &psSDInt, &psTLPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + goto TLOpenStream_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psTLOpenStreamOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psTLOpenStreamOUT->hSD, (void *)psSDInt, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _TLOpenStreampsSDIntRelease); + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLOpenStream_exit; + } + + psTLOpenStreamOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psTLOpenStreamOUT->hTLPMR, + (void *)psTLPMRInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psTLOpenStreamOUT->hSD); + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLOpenStream_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +TLOpenStream_exit: + + if (psTLOpenStreamOUT->eError != PVRSRV_OK) + { + if (psTLOpenStreamOUT->hSD) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psTLOpenStreamOUT->hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psSDInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psSDInt) + { + TLServerCloseStreamKM(psSDInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLCloseStreamIN_UI8, + IMG_UINT8 * psTLCloseStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN = + (PVRSRV_BRIDGE_IN_TLCLOSESTREAM *) + IMG_OFFSET_ADDR(psTLCloseStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT = + (PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *) + IMG_OFFSET_ADDR(psTLCloseStreamOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psTLCloseStreamOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psTLCloseStreamIN->hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD); + if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) && + (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psTLCloseStreamOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto TLCloseStream_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +TLCloseStream_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLAcquireDataIN_UI8, + IMG_UINT8 * psTLAcquireDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN = + (PVRSRV_BRIDGE_IN_TLACQUIREDATA *) + IMG_OFFSET_ADDR(psTLAcquireDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT = + (PVRSRV_BRIDGE_OUT_TLACQUIREDATA *) + IMG_OFFSET_ADDR(psTLAcquireDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLAcquireDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLAcquireDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLAcquireData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLAcquireDataOUT->eError = + TLServerAcquireDataKM(psSDInt, + &psTLAcquireDataOUT->ui32ReadOffset, + &psTLAcquireDataOUT->ui32ReadLen); + +TLAcquireData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLReleaseDataIN_UI8, + IMG_UINT8 * psTLReleaseDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN = + (PVRSRV_BRIDGE_IN_TLRELEASEDATA *) + IMG_OFFSET_ADDR(psTLReleaseDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT = + (PVRSRV_BRIDGE_OUT_TLRELEASEDATA *) + IMG_OFFSET_ADDR(psTLReleaseDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLReleaseDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLReleaseDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLReleaseData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLReleaseDataOUT->eError = + TLServerReleaseDataKM(psSDInt, + psTLReleaseDataIN->ui32ReadOffset, + psTLReleaseDataIN->ui32ReadLen); + +TLReleaseData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLDiscoverStreamsIN_UI8, + IMG_UINT8 * psTLDiscoverStreamsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN = + (PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *) + IMG_OFFSET_ADDR(psTLDiscoverStreamsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT = + (PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *) + IMG_OFFSET_ADDR(psTLDiscoverStreamsOUT_UI8, 0); + + IMG_CHAR *uiNamePatternInt = NULL; + IMG_CHAR *puiStreamsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + + (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0; + + if (psTLDiscoverStreamsIN->ui32Size > + PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto TLDiscoverStreams_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psTLDiscoverStreamsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLDiscoverStreams_exit; + } + } + } + + { + uiNamePatternInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNamePatternInt, + (const void __user *)psTLDiscoverStreamsIN->puiNamePattern, + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto TLDiscoverStreams_exit; + } + ((IMG_CHAR *) + uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psTLDiscoverStreamsIN->ui32Size != 0) + { + puiStreamsInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR); + } + + psTLDiscoverStreamsOUT->eError = + TLServerDiscoverStreamsKM(uiNamePatternInt, + psTLDiscoverStreamsIN->ui32Size, + puiStreamsInt, + &psTLDiscoverStreamsOUT->ui32NumFound); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiStreamsInt) && + ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams, + puiStreamsInt, + (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != + PVRSRV_OK)) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto TLDiscoverStreams_exit; + } + } + +TLDiscoverStreams_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLReserveStreamIN_UI8, + IMG_UINT8 * psTLReserveStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN = + (PVRSRV_BRIDGE_IN_TLRESERVESTREAM *) + IMG_OFFSET_ADDR(psTLReserveStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT = + (PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *) + IMG_OFFSET_ADDR(psTLReserveStreamOUT_UI8, 0); + + IMG_HANDLE hSD = psTLReserveStreamIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLReserveStreamOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLReserveStream_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLReserveStreamOUT->eError = + TLServerReserveStreamKM(psSDInt, + &psTLReserveStreamOUT->ui32BufferOffset, + psTLReserveStreamIN->ui32Size, + psTLReserveStreamIN->ui32SizeMin, + &psTLReserveStreamOUT->ui32Available); + +TLReserveStream_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLCommitStreamIN_UI8, + IMG_UINT8 * psTLCommitStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN = + (PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *) + IMG_OFFSET_ADDR(psTLCommitStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT = + (PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *) + IMG_OFFSET_ADDR(psTLCommitStreamOUT_UI8, 0); + + IMG_HANDLE hSD = psTLCommitStreamIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLCommitStreamOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLCommitStream_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLCommitStreamOUT->eError = + TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize); + +TLCommitStream_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLWriteDataIN_UI8, + IMG_UINT8 * psTLWriteDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN = + (PVRSRV_BRIDGE_IN_TLWRITEDATA *) + IMG_OFFSET_ADDR(psTLWriteDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT = + (PVRSRV_BRIDGE_OUT_TLWRITEDATA *) + IMG_OFFSET_ADDR(psTLWriteDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLWriteDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + IMG_BYTE *psDataInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE)) + { + psTLWriteDataOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto TLWriteData_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psTLWriteDataIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLWriteDataOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLWriteData_exit; + } + } + } + + if (psTLWriteDataIN->ui32Size != 0) + { + psDataInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psDataInt, + (const void __user *)psTLWriteDataIN->psData, + psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLWriteData_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLWriteDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLWriteData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLWriteDataOUT->eError = + TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, psDataInt); + +TLWriteData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitPVRTLBridge(void); +PVRSRV_ERROR DeinitPVRTLBridge(void); + +/* + * Register all PVRTL functions with services + */ +PVRSRV_ERROR InitPVRTLBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, + PVRSRVBridgeTLOpenStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, + PVRSRVBridgeTLCloseStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, + PVRSRVBridgeTLAcquireData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, + PVRSRVBridgeTLReleaseData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, + PVRSRVBridgeTLDiscoverStreams, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, + PVRSRVBridgeTLReserveStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, + PVRSRVBridgeTLCommitStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, + PVRSRVBridgeTLWriteData, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all pvrtl functions with services + */ +PVRSRV_ERROR DeinitPVRTLBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLWRITEDATA); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h new file mode 100644 index 000000000000..880f3542d776 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h @@ -0,0 +1,148 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxbreakpoint +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxbreakpoint +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXBREAKPOINT_BRIDGE_H +#define COMMON_RGXBREAKPOINT_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST (PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4) + +/******************************************* + RGXSetBreakpoint + *******************************************/ + +/* Bridge in structure for RGXSetBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; + IMG_UINT32 eFWDataMaster; + IMG_UINT32 ui32BreakpointAddr; + IMG_UINT32 ui32HandlerAddr; + IMG_UINT32 ui32DM; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT; + +/* Bridge out structure for RGXSetBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT; + +/******************************************* + RGXClearBreakpoint + *******************************************/ + +/* Bridge in structure for RGXClearBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT; + +/* Bridge out structure for RGXClearBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT; + +/******************************************* + RGXEnableBreakpoint + *******************************************/ + +/* Bridge in structure for RGXEnableBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT; + +/* Bridge out structure for RGXEnableBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT; + +/******************************************* + RGXDisableBreakpoint + *******************************************/ + +/* Bridge in structure for RGXDisableBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT; + +/* Bridge out structure for RGXDisableBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT; + +/******************************************* + RGXOverallocateBPRegisters + *******************************************/ + +/* Bridge in structure for RGXOverallocateBPRegisters */ +typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG +{ + IMG_UINT32 ui32TempRegs; + IMG_UINT32 ui32SharedRegs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS; + +/* Bridge out structure for RGXOverallocateBPRegisters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS; + +#endif /* COMMON_RGXBREAKPOINT_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c new file mode 100644 index 000000000000..89b9b658f656 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c @@ -0,0 +1,400 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxbreakpoint +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxbreakpoint +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxbreakpoint.h" + +#include "common_rgxbreakpoint_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetBreakpointIN_UI8, + IMG_UINT8 * psRGXSetBreakpointOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXSetBreakpointIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXSetBreakpointOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXSetBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetBreakpointOUT->eError = + PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevNode(psConnection), + hPrivDataInt, + psRGXSetBreakpointIN->eFWDataMaster, + psRGXSetBreakpointIN->ui32BreakpointAddr, + psRGXSetBreakpointIN->ui32HandlerAddr, + psRGXSetBreakpointIN->ui32DM); + +RGXSetBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXClearBreakpointIN_UI8, + IMG_UINT8 * psRGXClearBreakpointOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXClearBreakpointIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXClearBreakpointOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXClearBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXClearBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXClearBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXClearBreakpointOUT->eError = + PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevNode(psConnection), + hPrivDataInt); + +RGXClearBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXEnableBreakpointIN_UI8, + IMG_UINT8 * psRGXEnableBreakpointOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXEnableBreakpointIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXEnableBreakpointOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXEnableBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXEnableBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXEnableBreakpointOUT->eError = + PVRSRVRGXEnableBreakpointKM(psConnection, + OSGetDevNode(psConnection), + hPrivDataInt); + +RGXEnableBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDisableBreakpointIN_UI8, + IMG_UINT8 * psRGXDisableBreakpointOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXDisableBreakpointIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXDisableBreakpointOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXDisableBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXDisableBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXDisableBreakpointOUT->eError = + PVRSRVRGXDisableBreakpointKM(psConnection, + OSGetDevNode(psConnection), + hPrivDataInt); + +RGXDisableBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXOverallocateBPRegistersIN_UI8, + IMG_UINT8 * + psRGXOverallocateBPRegistersOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS + *psRGXOverallocateBPRegistersIN = + (PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *) + IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS + *psRGXOverallocateBPRegistersOUT = + (PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *) + IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersOUT_UI8, 0); + + psRGXOverallocateBPRegistersOUT->eError = + PVRSRVRGXOverallocateBPRegistersKM(psConnection, + OSGetDevNode(psConnection), + psRGXOverallocateBPRegistersIN-> + ui32TempRegs, + psRGXOverallocateBPRegistersIN-> + ui32SharedRegs); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ + +#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) +PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); +PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void); + +/* + * Register all RGXBREAKPOINT functions with services + */ +PVRSRV_ERROR InitRGXBREAKPOINTBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT, + PVRSRVBridgeRGXSetBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT, + PVRSRVBridgeRGXClearBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT, + PVRSRVBridgeRGXEnableBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT, + PVRSRVBridgeRGXDisableBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS, + PVRSRVBridgeRGXOverallocateBPRegisters, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxbreakpoint functions with services + */ +PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS); + + return PVRSRV_OK; +} +#else /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ +/* This bridge is conditional on EXCLUDE_RGXBREAKPOINT_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXBREAKPOINTBridge() \ + PVRSRV_OK + +#define DeinitRGXBREAKPOINTBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h new file mode 100644 index 000000000000..2f4ebb0f2561 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h @@ -0,0 +1,228 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxcmp +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxcmp +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXCMP_BRIDGE_H +#define COMMON_RGXCMP_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7) + +/******************************************* + RGXCreateComputeContext + *******************************************/ + +/* Bridge in structure for RGXCreateComputeContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG +{ + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32FrameworkCmdize; + IMG_BYTE *psFrameworkCmd; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32StaticComputeContextStateSize; + IMG_BYTE *psStaticComputeContextState; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32ContextFlags; + IMG_UINT64 ui64RobustnessAddress; + IMG_UINT32 ui32MaxDeadlineMS; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT; + +/* Bridge out structure for RGXCreateComputeContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG +{ + IMG_HANDLE hComputeContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT; + +/******************************************* + RGXDestroyComputeContext + *******************************************/ + +/* Bridge in structure for RGXDestroyComputeContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT; + +/* Bridge out structure for RGXDestroyComputeContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT; + +/******************************************* + RGXFlushComputeData + *******************************************/ + +/* Bridge in structure for RGXFlushComputeData */ +typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA; + +/* Bridge out structure for RGXFlushComputeData */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA; + +/******************************************* + RGXSetComputeContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG +{ + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY; + +/* Bridge out structure for RGXSetComputeContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY; + +/******************************************* + RGXGetLastComputeContextResetReason + *******************************************/ + +/* Bridge in structure for RGXGetLastComputeContextResetReason */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON; + +/* Bridge out structure for RGXGetLastComputeContextResetReason */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG +{ + IMG_UINT32 ui32LastResetReason; + IMG_UINT32 ui32LastResetJobRef; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) + PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON; + +/******************************************* + RGXNotifyComputeWriteOffsetUpdate + *******************************************/ + +/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; + +/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; + +/******************************************* + RGXKickCDM2 + *******************************************/ + +/* Bridge in structure for RGXKickCDM2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG +{ + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; + IMG_UINT32 *pui32ClientUpdateOffset; + IMG_UINT32 *pui32ClientUpdateValue; + PVRSRV_FENCE hCheckFenceFd; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_CHAR *puiUpdateFenceName; + IMG_UINT32 ui32CmdSize; + IMG_BYTE *psDMCmd; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32NumOfWorkgroups; + IMG_UINT32 ui32NumOfWorkitems; + IMG_UINT64 ui64DeadlineInus; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM2; + +/* Bridge out structure for RGXKickCDM2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG +{ + PVRSRV_FENCE hUpdateFence; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM2; + +/******************************************* + RGXSetComputeContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; + +/* Bridge out structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; + +#endif /* COMMON_RGXCMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c new file mode 100644 index 000000000000..a65ce0ba5496 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c @@ -0,0 +1,1201 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxcmp +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxcmp +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxcompute.h" + +#include "common_rgxcmp_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "rgx_bvnc_defs_km.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateComputeContextpsComputeContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXDestroyComputeContextKM((RGX_SERVER_COMPUTE_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateComputeContextIN_UI8, + IMG_UINT8 * + psRGXCreateComputeContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateComputeContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT + = + (PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateComputeContextOUT_UI8, 0); + + IMG_BYTE *psFrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *psStaticComputeContextStateInt = NULL; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateComputeContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE)) + + (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * + sizeof(IMG_BYTE)) + 0; + + if (unlikely + (psRGXCreateComputeContextIN->ui32FrameworkCmdize > + RGXFWIF_RF_CMD_SIZE)) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateComputeContext_exit; + } + + if (unlikely + (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize > + RGXFWIF_STATIC_COMPUTECONTEXT_SIZE)) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateComputeContext_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXCreateComputeContext_exit; + } + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCreateComputeContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateComputeContext_exit; + } + } + } + + if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0) + { + psFrameworkCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCreateComputeContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateComputeContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psFrameworkCmdInt, + (const void __user *)psRGXCreateComputeContextIN-> + psFrameworkCmd, + psRGXCreateComputeContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateComputeContext_exit; + } + } + if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize != 0) + { + psStaticComputeContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCreateComputeContextIN-> + ui32StaticComputeContextStateSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psStaticComputeContextStateInt, + (const void __user *)psRGXCreateComputeContextIN-> + psStaticComputeContextState, + psRGXCreateComputeContextIN-> + ui32StaticComputeContextStateSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateComputeContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateComputeContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateComputeContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateComputeContextOUT->eError = + PVRSRVRGXCreateComputeContextKM(psConnection, + OSGetDevNode(psConnection), + psRGXCreateComputeContextIN-> + ui32Priority, + psRGXCreateComputeContextIN-> + ui32FrameworkCmdize, + psFrameworkCmdInt, hPrivDataInt, + psRGXCreateComputeContextIN-> + ui32StaticComputeContextStateSize, + psStaticComputeContextStateInt, + psRGXCreateComputeContextIN-> + ui32PackedCCBSizeU88, + psRGXCreateComputeContextIN-> + ui32ContextFlags, + psRGXCreateComputeContextIN-> + ui64RobustnessAddress, + psRGXCreateComputeContextIN-> + ui32MaxDeadlineMS, + &psComputeContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateComputeContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateComputeContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateComputeContextOUT-> + hComputeContext, + (void *)psComputeContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateComputeContextpsComputeContextIntRelease); + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateComputeContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateComputeContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK) + { + if (psComputeContextInt) + { + PVRSRVRGXDestroyComputeContextKM(psComputeContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXDestroyComputeContextIN_UI8, + IMG_UINT8 * + psRGXDestroyComputeContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN + = + (PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyComputeContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT + *psRGXDestroyComputeContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyComputeContextOUT_UI8, 0); + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXDestroyComputeContextOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXDestroyComputeContext_exit; + } + } + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyComputeContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyComputeContextIN-> + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + if (unlikely + ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyComputeContextOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyComputeContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyComputeContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFlushComputeDataIN_UI8, + IMG_UINT8 * psRGXFlushComputeDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN = + (PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *) + IMG_OFFSET_ADDR(psRGXFlushComputeDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT = + (PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *) + IMG_OFFSET_ADDR(psRGXFlushComputeDataOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXFlushComputeDataOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXFlushComputeData_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXFlushComputeDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXFlushComputeData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXFlushComputeDataOUT->eError = + PVRSRVRGXFlushComputeDataKM(psComputeContextInt); + +RGXFlushComputeData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetComputeContextPriorityIN_UI8, + IMG_UINT8 * + psRGXSetComputeContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY + *psRGXSetComputeContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY + *psRGXSetComputeContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXSetComputeContextPriorityIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXSetComputeContextPriorityOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXSetComputeContextPriority_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPriorityOUT->eError = + PVRSRVRGXSetComputeContextPriorityKM(psConnection, + OSGetDevNode(psConnection), + psComputeContextInt, + psRGXSetComputeContextPriorityIN-> + ui32Priority); + +RGXSetComputeContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetLastComputeContextResetReason(IMG_UINT32 + ui32DispatchTableEntry, + IMG_UINT8 * + psRGXGetLastComputeContextResetReasonIN_UI8, + IMG_UINT8 * + psRGXGetLastComputeContextResetReasonOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON + *psRGXGetLastComputeContextResetReasonIN = + (PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastComputeContextResetReasonIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON + *psRGXGetLastComputeContextResetReasonOUT = + (PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastComputeContextResetReasonOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXGetLastComputeContextResetReasonIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXGetLastComputeContextResetReasonOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXGetLastComputeContextResetReason_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXGetLastComputeContextResetReasonOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXGetLastComputeContextResetReasonOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXGetLastComputeContextResetReason_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXGetLastComputeContextResetReasonOUT->eError = + PVRSRVRGXGetLastComputeContextResetReasonKM(psComputeContextInt, + &psRGXGetLastComputeContextResetReasonOUT-> + ui32LastResetReason, + &psRGXGetLastComputeContextResetReasonOUT-> + ui32LastResetJobRef); + +RGXGetLastComputeContextResetReason_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXNotifyComputeWriteOffsetUpdateIN_UI8, + IMG_UINT8 * + psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE + *psRGXNotifyComputeWriteOffsetUpdateIN = + (PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE + *psRGXNotifyComputeWriteOffsetUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXNotifyComputeWriteOffsetUpdateOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXNotifyComputeWriteOffsetUpdate_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXNotifyComputeWriteOffsetUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXNotifyComputeWriteOffsetUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXNotifyComputeWriteOffsetUpdateOUT->eError = + PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt); + +RGXNotifyComputeWriteOffsetUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickCDM2IN_UI8, + IMG_UINT8 * psRGXKickCDM2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN = + (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) + IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) + IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL; + IMG_UINT32 *ui32ClientUpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_BYTE *psDMCmdInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickCDM2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM2_exit; + } + + if (unlikely + (psRGXKickCDM2IN->ui32CmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickCDM2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM2_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXKickCDM2_exit; + } + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXKickCDM2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickCDM2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickCDM2_exit; + } + } + } + + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + psClientUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXKickCDM2IN-> + phClientUpdateUFOSyncPrimBlock, + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateOffsetInt, + (const void __user *)psRGXKickCDM2IN-> + pui32ClientUpdateOffset, + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateValueInt, + (const void __user *)psRGXKickCDM2IN-> + pui32ClientUpdateValue, + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psRGXKickCDM2IN->ui32CmdSize != 0) + { + psDMCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psDMCmdInt, + (const void __user *)psRGXKickCDM2IN->psDMCmd, + psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickCDM2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickCDM2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickCDM2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClientUpdateUFOSyncPrimBlockInt + [i], + hClientUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickCDM2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickCDM2OUT->eError = + PVRSRVRGXKickCDMKM(psComputeContextInt, + psRGXKickCDM2IN->ui32ClientCacheOpSeqNum, + psRGXKickCDM2IN->ui32ClientUpdateCount, + psClientUpdateUFOSyncPrimBlockInt, + ui32ClientUpdateOffsetInt, + ui32ClientUpdateValueInt, + psRGXKickCDM2IN->hCheckFenceFd, + psRGXKickCDM2IN->hUpdateTimeline, + &psRGXKickCDM2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickCDM2IN->ui32CmdSize, + psDMCmdInt, + psRGXKickCDM2IN->ui32PDumpFlags, + psRGXKickCDM2IN->ui32ExtJobRef, + psRGXKickCDM2IN->ui32NumOfWorkgroups, + psRGXKickCDM2IN->ui32NumOfWorkitems, + psRGXKickCDM2IN->ui64DeadlineInus); + +RGXKickCDM2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + + if (hClientUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientUpdateUFOSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClientUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetComputeContextPropertyIN_UI8, + IMG_UINT8 * + psRGXSetComputeContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY + *psRGXSetComputeContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY + *psRGXSetComputeContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXSetComputeContextPropertyIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXSetComputeContextPropertyOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXSetComputeContextProperty_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, + psRGXSetComputeContextPropertyIN-> + ui32Property, + psRGXSetComputeContextPropertyIN-> + ui64Input, + &psRGXSetComputeContextPropertyOUT-> + ui64Output); + +RGXSetComputeContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXCMPBridge(void); +PVRSRV_ERROR DeinitRGXCMPBridge(void); + +/* + * Register all RGXCMP functions with services + */ +PVRSRV_ERROR InitRGXCMPBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, + PVRSRVBridgeRGXCreateComputeContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, + PVRSRVBridgeRGXDestroyComputeContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, + PVRSRVBridgeRGXFlushComputeData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, + PVRSRVBridgeRGXSetComputeContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON, + PVRSRVBridgeRGXGetLastComputeContextResetReason, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, + PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2, + PVRSRVBridgeRGXKickCDM2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY, + PVRSRVBridgeRGXSetComputeContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxcmp functions with services + */ +PVRSRV_ERROR DeinitRGXCMPBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h new file mode 100644 index 000000000000..96341f37a34f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h @@ -0,0 +1,182 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxfwdbg +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxfwdbg +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXFWDBG_BRIDGE_H +#define COMMON_RGXFWDBG_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "rgx_bridge.h" +#include "pvrsrv_memallocflags.h" + +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6) + +/******************************************* + RGXFWDebugSetFWLog + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetFWLog */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG +{ + IMG_UINT32 ui32RGXFWLogType; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG; + +/* Bridge out structure for RGXFWDebugSetFWLog */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG; + +/******************************************* + RGXFWDebugDumpFreelistPageList + *******************************************/ + +/* Bridge in structure for RGXFWDebugDumpFreelistPageList */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST; + +/* Bridge out structure for RGXFWDebugDumpFreelistPageList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST; + +/******************************************* + RGXFWDebugSetHCSDeadline + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetHCSDeadline */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG +{ + IMG_UINT32 ui32RGXHCSDeadline; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE; + +/* Bridge out structure for RGXFWDebugSetHCSDeadline */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE; + +/******************************************* + RGXFWDebugSetOSidPriority + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetOSidPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG +{ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY; + +/* Bridge out structure for RGXFWDebugSetOSidPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY; + +/******************************************* + RGXFWDebugSetOSNewOnlineState + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG +{ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32OSNewState; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE; + +/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE; + +/******************************************* + RGXFWDebugPHRConfigure + *******************************************/ + +/* Bridge in structure for RGXFWDebugPHRConfigure */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE_TAG +{ + IMG_UINT32 ui32ui32PHRMode; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE; + +/* Bridge out structure for RGXFWDebugPHRConfigure */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE; + +/******************************************* + RGXCurrentTime + *******************************************/ + +/* Bridge in structure for RGXCurrentTime */ +typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCURRENTTIME; + +/* Bridge out structure for RGXCurrentTime */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG +{ + IMG_UINT64 ui64Time; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCURRENTTIME; + +#endif /* COMMON_RGXFWDBG_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c new file mode 100644 index 000000000000..49cfe0c35dc3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c @@ -0,0 +1,316 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxfwdbg +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxfwdbg +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "rgxfwdbg.h" +#include "pmr.h" +#include "rgxtimecorr.h" + +#include "common_rgxfwdbg_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetFWLogIN_UI8, + IMG_UINT8 * psRGXFWDebugSetFWLogOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *) + IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *) + IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogOUT_UI8, 0); + + psRGXFWDebugSetFWLogOUT->eError = + PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetFWLogIN-> + ui32RGXFWLogType); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugDumpFreelistPageListIN_UI8, + IMG_UINT8 * + psRGXFWDebugDumpFreelistPageListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST + *psRGXFWDebugDumpFreelistPageListIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *) + IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST + *psRGXFWDebugDumpFreelistPageListOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *) + IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN); + + psRGXFWDebugDumpFreelistPageListOUT->eError = + PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection, + OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetHCSDeadlineIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetHCSDeadlineOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineIN + = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE + *psRGXFWDebugSetHCSDeadlineOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineOUT_UI8, 0); + + psRGXFWDebugSetHCSDeadlineOUT->eError = + PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetHCSDeadlineIN-> + ui32RGXHCSDeadline); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetOSidPriorityIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetOSidPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY + *psRGXFWDebugSetOSidPriorityIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY + *psRGXFWDebugSetOSidPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0); + + psRGXFWDebugSetOSidPriorityOUT->eError = + PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetOSidPriorityIN-> + ui32OSid, + psRGXFWDebugSetOSidPriorityIN-> + ui32Priority); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetOSNewOnlineStateIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetOSNewOnlineStateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE + *psRGXFWDebugSetOSNewOnlineStateIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE + *psRGXFWDebugSetOSNewOnlineStateOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateOUT_UI8, 0); + + psRGXFWDebugSetOSNewOnlineStateOUT->eError = + PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetOSNewOnlineStateIN-> + ui32OSid, + psRGXFWDebugSetOSNewOnlineStateIN-> + ui32OSNewState); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8, + IMG_UINT8 * psRGXFWDebugPHRConfigureOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureOUT_UI8, 0); + + psRGXFWDebugPHRConfigureOUT->eError = + PVRSRVRGXFWDebugPHRConfigureKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugPHRConfigureIN-> + ui32ui32PHRMode); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCurrentTimeIN_UI8, + IMG_UINT8 * psRGXCurrentTimeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN = + (PVRSRV_BRIDGE_IN_RGXCURRENTTIME *) + IMG_OFFSET_ADDR(psRGXCurrentTimeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT = + (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) + IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN); + + psRGXCurrentTimeOUT->eError = + PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection), + &psRGXCurrentTimeOUT->ui64Time); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXFWDBGBridge(void); +PVRSRV_ERROR DeinitRGXFWDBGBridge(void); + +/* + * Register all RGXFWDBG functions with services + */ +PVRSRV_ERROR InitRGXFWDBGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG, + PVRSRVBridgeRGXFWDebugSetFWLog, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST, + PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE, + PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY, + PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE, + PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE, + PVRSRVBridgeRGXFWDebugPHRConfigure, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME, + PVRSRVBridgeRGXCurrentTime, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxfwdbg functions with services + */ +PVRSRV_ERROR DeinitRGXFWDBGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h new file mode 100644 index 000000000000..7c23eefb4764 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h @@ -0,0 +1,152 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxhwperf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxhwperf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXHWPERF_BRIDGE_H +#define COMMON_RGXHWPERF_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "rgx_hwperf.h" + +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4) + +/******************************************* + RGXCtrlHWPerf + *******************************************/ + +/* Bridge in structure for RGXCtrlHWPerf */ +typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG +{ + IMG_UINT32 ui32StreamId; + IMG_BOOL bToggle; + IMG_UINT64 ui64Mask; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERF; + +/* Bridge out structure for RGXCtrlHWPerf */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF; + +/******************************************* + RGXConfigEnableHWPerfCounters + *******************************************/ + +/* Bridge in structure for RGXConfigEnableHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS_TAG +{ + IMG_UINT32 ui32ArrayLen; + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS; + +/* Bridge out structure for RGXConfigEnableHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS; + +/******************************************* + RGXCtrlHWPerfCounters + *******************************************/ + +/* Bridge in structure for RGXCtrlHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS_TAG +{ + IMG_BOOL bEnable; + IMG_UINT32 ui32ArrayLen; + IMG_UINT16 *pui16BlockIDs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS; + +/* Bridge out structure for RGXCtrlHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS; + +/******************************************* + RGXConfigCustomCounters + *******************************************/ + +/* Bridge in structure for RGXConfigCustomCounters */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG +{ + IMG_UINT16 ui16CustomBlockID; + IMG_UINT16 ui16NumCustomCounters; + IMG_UINT32 *pui32CustomCounterIDs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS; + +/* Bridge out structure for RGXConfigCustomCounters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS; + +/******************************************* + RGXGetHWPerfBvncFeatureFlags + *******************************************/ + +/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS; + +/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG +{ + RGX_HWPERF_BVNC sBVNC; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS; + +#endif /* COMMON_RGXHWPERF_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c new file mode 100644 index 000000000000..a501a6ab23be --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c @@ -0,0 +1,538 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxhwperf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxhwperf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxhwperf.h" +#include "rgx_fwif_km.h" + +#include "common_rgxhwperf_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, + IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN = + (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) + IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT = + (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) + IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0); + + psRGXCtrlHWPerfOUT->eError = + PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection), + psRGXCtrlHWPerfIN->ui32StreamId, + psRGXCtrlHWPerfIN->bToggle, + psRGXCtrlHWPerfIN->ui64Mask); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXConfigEnableHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXConfigEnableHWPerfCountersIN_UI8, + IMG_UINT8 * + psRGXConfigEnableHWPerfCountersOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS + *psRGXConfigEnableHWPerfCountersIN = + (PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXConfigEnableHWPerfCountersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS + *psRGXConfigEnableHWPerfCountersOUT = + (PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXConfigEnableHWPerfCountersOUT_UI8, 0); + + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + + if (unlikely + (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen > + RGXFWIF_HWPERF_CTRL_BLKS_MAX)) + { + psRGXConfigEnableHWPerfCountersOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXConfigEnableHWPerfCounters_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXConfigEnableHWPerfCountersIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psRGXConfigEnableHWPerfCountersIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXConfigEnableHWPerfCountersOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXConfigEnableHWPerfCounters_exit; + } + } + } + + if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen != 0) + { + psBlockConfigsInt = + (RGX_HWPERF_CONFIG_CNTBLK *) + IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK); + } + + /* Copy the data over */ + if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0) + { + if (OSCopyFromUser + (NULL, psBlockConfigsInt, + (const void __user *)psRGXConfigEnableHWPerfCountersIN-> + psBlockConfigs, + psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) + { + psRGXConfigEnableHWPerfCountersOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXConfigEnableHWPerfCounters_exit; + } + } + + psRGXConfigEnableHWPerfCountersOUT->eError = + PVRSRVRGXConfigEnableHWPerfCountersKM(psConnection, + OSGetDevNode(psConnection), + psRGXConfigEnableHWPerfCountersIN-> + ui32ArrayLen, + psBlockConfigsInt); + +RGXConfigEnableHWPerfCounters_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXCtrlHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCtrlHWPerfCountersIN_UI8, + IMG_UINT8 * psRGXCtrlHWPerfCountersOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersIN = + (PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXCtrlHWPerfCountersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersOUT = + (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXCtrlHWPerfCountersOUT_UI8, 0); + + IMG_UINT16 *ui16BlockIDsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0; + + if (unlikely + (psRGXCtrlHWPerfCountersIN->ui32ArrayLen > + RGXFWIF_HWPERF_CTRL_BLKS_MAX)) + { + psRGXCtrlHWPerfCountersOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCtrlHWPerfCounters_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCtrlHWPerfCountersIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCtrlHWPerfCountersIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCtrlHWPerfCountersOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCtrlHWPerfCounters_exit; + } + } + } + + if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen != 0) + { + ui16BlockIDsInt = + (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCtrlHWPerfCountersIN->ui32ArrayLen * + sizeof(IMG_UINT16); + } + + /* Copy the data over */ + if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0) + { + if (OSCopyFromUser + (NULL, ui16BlockIDsInt, + (const void __user *)psRGXCtrlHWPerfCountersIN-> + pui16BlockIDs, + psRGXCtrlHWPerfCountersIN->ui32ArrayLen * + sizeof(IMG_UINT16)) != PVRSRV_OK) + { + psRGXCtrlHWPerfCountersOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCtrlHWPerfCounters_exit; + } + } + + psRGXCtrlHWPerfCountersOUT->eError = + PVRSRVRGXCtrlHWPerfCountersKM(psConnection, + OSGetDevNode(psConnection), + psRGXCtrlHWPerfCountersIN->bEnable, + psRGXCtrlHWPerfCountersIN-> + ui32ArrayLen, ui16BlockIDsInt); + +RGXCtrlHWPerfCounters_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXConfigCustomCountersIN_UI8, + IMG_UINT8 * + psRGXConfigCustomCountersOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN = + (PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *) + IMG_OFFSET_ADDR(psRGXConfigCustomCountersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT + = + (PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *) + IMG_OFFSET_ADDR(psRGXConfigCustomCountersOUT_UI8, 0); + + IMG_UINT32 *ui32CustomCounterIDsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXConfigCustomCountersIN->ui16NumCustomCounters * + sizeof(IMG_UINT32)) + 0; + + if (unlikely + (psRGXConfigCustomCountersIN->ui16NumCustomCounters > + RGX_HWPERF_MAX_CUSTOM_CNTRS)) + { + psRGXConfigCustomCountersOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXConfigCustomCounters_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXConfigCustomCountersIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXConfigCustomCountersOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXConfigCustomCounters_exit; + } + } + } + + if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0) + { + ui32CustomCounterIDsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXConfigCustomCountersIN->ui16NumCustomCounters * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXConfigCustomCountersIN->ui16NumCustomCounters * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32CustomCounterIDsInt, + (const void __user *)psRGXConfigCustomCountersIN-> + pui32CustomCounterIDs, + psRGXConfigCustomCountersIN->ui16NumCustomCounters * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXConfigCustomCountersOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXConfigCustomCounters_exit; + } + } + + psRGXConfigCustomCountersOUT->eError = + PVRSRVRGXConfigCustomCountersKM(psConnection, + OSGetDevNode(psConnection), + psRGXConfigCustomCountersIN-> + ui16CustomBlockID, + psRGXConfigCustomCountersIN-> + ui16NumCustomCounters, + ui32CustomCounterIDsInt); + +RGXConfigCustomCounters_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXGetHWPerfBvncFeatureFlagsIN_UI8, + IMG_UINT8 * + psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS + *psRGXGetHWPerfBvncFeatureFlagsIN = + (PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *) + IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS + *psRGXGetHWPerfBvncFeatureFlagsOUT = + (PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *) + IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN); + + psRGXGetHWPerfBvncFeatureFlagsOUT->eError = + PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, + OSGetDevNode(psConnection), + &psRGXGetHWPerfBvncFeatureFlagsOUT-> + sBVNC); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXHWPERFBridge(void); +PVRSRV_ERROR DeinitRGXHWPERFBridge(void); + +/* + * Register all RGXHWPERF functions with services + */ +PVRSRV_ERROR InitRGXHWPERFBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, + PVRSRVBridgeRGXCtrlHWPerf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS, + PVRSRVBridgeRGXConfigEnableHWPerfCounters, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS, + PVRSRVBridgeRGXCtrlHWPerfCounters, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, + PVRSRVBridgeRGXConfigCustomCounters, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, + PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxhwperf functions with services + */ +PVRSRV_ERROR DeinitRGXHWPERFBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h new file mode 100644 index 000000000000..58fceebb3a01 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h @@ -0,0 +1,143 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxkicksync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxkicksync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXKICKSYNC_BRIDGE_H +#define COMMON_RGXKICKSYNC_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3) + +/******************************************* + RGXCreateKickSyncContext + *******************************************/ + +/* Bridge in structure for RGXCreateKickSyncContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hPrivData; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32ContextFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT; + +/* Bridge out structure for RGXCreateKickSyncContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hKickSyncContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT; + +/******************************************* + RGXDestroyKickSyncContext + *******************************************/ + +/* Bridge in structure for RGXDestroyKickSyncContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hKickSyncContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT; + +/* Bridge out structure for RGXDestroyKickSyncContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT; + +/******************************************* + RGXKickSync2 + *******************************************/ + +/* Bridge in structure for RGXKickSync2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG +{ + IMG_HANDLE hKickSyncContext; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_HANDLE *phUpdateUFODevVarBlock; + IMG_UINT32 *pui32UpdateDevVarOffset; + IMG_UINT32 *pui32UpdateValue; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_TIMELINE hTimelineFenceFD; + IMG_CHAR *puiUpdateFenceName; + IMG_UINT32 ui32ExtJobRef; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNC2; + +/* Bridge out structure for RGXKickSync2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG +{ + PVRSRV_FENCE hUpdateFenceFD; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNC2; + +/******************************************* + RGXSetKickSyncContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetKickSyncContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG +{ + IMG_HANDLE hKickSyncContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetKickSyncContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY; + +#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c new file mode 100644 index 000000000000..b750d0e05c80 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c @@ -0,0 +1,626 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxkicksync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxkicksync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxkicksync.h" + +#include "common_rgxkicksync_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateKickSyncContextpsKickSyncContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXDestroyKickSyncContextKM((RGX_SERVER_KICKSYNC_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXCreateKickSyncContextIN_UI8, + IMG_UINT8 * + psRGXCreateKickSyncContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN + = + (PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateKickSyncContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT + *psRGXCreateKickSyncContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateKickSyncContextOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateKickSyncContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateKickSyncContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateKickSyncContextOUT->eError = + PVRSRVRGXCreateKickSyncContextKM(psConnection, + OSGetDevNode(psConnection), + hPrivDataInt, + psRGXCreateKickSyncContextIN-> + ui32PackedCCBSizeU88, + psRGXCreateKickSyncContextIN-> + ui32ContextFlags, + &psKickSyncContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateKickSyncContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateKickSyncContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateKickSyncContextOUT-> + hKickSyncContext, + (void *)psKickSyncContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateKickSyncContextpsKickSyncContextIntRelease); + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateKickSyncContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateKickSyncContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK) + { + if (psKickSyncContextInt) + { + PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXDestroyKickSyncContextIN_UI8, + IMG_UINT8 * + psRGXDestroyKickSyncContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT + *psRGXDestroyKickSyncContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT + *psRGXDestroyKickSyncContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyKickSyncContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyKickSyncContextIN-> + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + if (unlikely + ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyKickSyncContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyKickSyncContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickSync2IN_UI8, + IMG_UINT8 * psRGXKickSync2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *psRGXKickSync2IN = + (PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *) + IMG_OFFSET_ADDR(psRGXKickSync2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *psRGXKickSync2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *) + IMG_OFFSET_ADDR(psRGXKickSync2OUT_UI8, 0); + + IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFODevVarBlockInt = NULL; + IMG_HANDLE *hUpdateUFODevVarBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateDevVarOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_DEV_VARS)) + { + psRGXKickSync2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickSync2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXKickSync2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickSync2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickSync2_exit; + } + } + } + + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + psUpdateUFODevVarBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFODevVarBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFODevVarBlockInt2, + (const void __user *)psRGXKickSync2IN-> + phUpdateUFODevVarBlock, + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateDevVarOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateDevVarOffsetInt, + (const void __user *)psRGXKickSync2IN-> + pui32UpdateDevVarOffset, + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXKickSync2IN->pui32UpdateValue, + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickSync2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickSync2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKickSyncContextInt, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickSync2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickSync2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psUpdateUFODevVarBlockInt + [i], + hUpdateUFODevVarBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickSync2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickSync2OUT->eError = + PVRSRVRGXKickSyncKM(psKickSyncContextInt, + psRGXKickSync2IN->ui32ClientCacheOpSeqNum, + psRGXKickSync2IN->ui32ClientUpdateCount, + psUpdateUFODevVarBlockInt, + ui32UpdateDevVarOffsetInt, + ui32UpdateValueInt, + psRGXKickSync2IN->hCheckFenceFD, + psRGXKickSync2IN->hTimelineFenceFD, + &psRGXKickSync2OUT->hUpdateFenceFD, + uiUpdateFenceNameInt, + psRGXKickSync2IN->ui32ExtJobRef); + +RGXKickSync2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psKickSyncContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + } + + if (hUpdateUFODevVarBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFODevVarBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hUpdateUFODevVarBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetKickSyncContextPropertyIN_UI8, + IMG_UINT8 * + psRGXSetKickSyncContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY + *psRGXSetKickSyncContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY + *psRGXSetKickSyncContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyOUT_UI8, 0); + + IMG_HANDLE hKickSyncContext = + psRGXSetKickSyncContextPropertyIN->hKickSyncContext; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetKickSyncContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKickSyncContextInt, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetKickSyncContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetKickSyncContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetKickSyncContextPropertyOUT->eError = + PVRSRVRGXSetKickSyncContextPropertyKM(psKickSyncContextInt, + psRGXSetKickSyncContextPropertyIN-> + ui32Property, + psRGXSetKickSyncContextPropertyIN-> + ui64Input, + &psRGXSetKickSyncContextPropertyOUT-> + ui64Output); + +RGXSetKickSyncContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psKickSyncContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXKICKSYNCBridge(void); +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void); + +/* + * Register all RGXKICKSYNC functions with services + */ +PVRSRV_ERROR InitRGXKICKSYNCBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, + PVRSRVBridgeRGXCreateKickSyncContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, + PVRSRVBridgeRGXDestroyKickSyncContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2, + PVRSRVBridgeRGXKickSync2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY, + PVRSRVBridgeRGXSetKickSyncContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxkicksync functions with services + */ +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_bridge.h new file mode 100644 index 000000000000..61236370ed4c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_bridge.h @@ -0,0 +1,72 @@ +/******************************************************************************* +@File +@Title Client bridge header for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for rgxpdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_RGXPDUMP_BRIDGE_H +#define CLIENT_RGXPDUMP_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_rgxpdump_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpCRCSignatureCheck(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags); + +#endif /* CLIENT_RGXPDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c new file mode 100644 index 000000000000..395e423dcd63 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c @@ -0,0 +1,99 @@ +/******************************************************************************* +@File +@Title Direct client bridge for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for rgxpdump + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_rgxpdump_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "rgx_bridge.h" + +#include "rgxpdump.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPDumpTraceBufferKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPDumpSignatureBufferKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpCRCSignatureCheck(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPDumpCRCSignatureCheckKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + ui32PDumpFlags); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/common_rgxpdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/common_rgxpdump_bridge.h new file mode 100644 index 000000000000..0f67827d67c2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/common_rgxpdump_bridge.h @@ -0,0 +1,109 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxpdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXPDUMP_BRIDGE_H +#define COMMON_RGXPDUMP_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST (PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+2) + +/******************************************* + PDumpTraceBuffer + *******************************************/ + +/* Bridge in structure for PDumpTraceBuffer */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG +{ + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER; + +/* Bridge out structure for PDumpTraceBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER; + +/******************************************* + PDumpSignatureBuffer + *******************************************/ + +/* Bridge in structure for PDumpSignatureBuffer */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG +{ + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER; + +/* Bridge out structure for PDumpSignatureBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER; + +/******************************************* + PDumpCRCSignatureCheck + *******************************************/ + +/* Bridge in structure for PDumpCRCSignatureCheck */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK_TAG +{ + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK; + +/* Bridge out structure for PDumpCRCSignatureCheck */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK; + +#endif /* COMMON_RGXPDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c new file mode 100644 index 000000000000..7a61f007bf6e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c @@ -0,0 +1,176 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxpdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxpdump.h" + +#include "common_rgxpdump_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpTraceBufferIN_UI8, + IMG_UINT8 * psPDumpTraceBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN = + (PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *) + IMG_OFFSET_ADDR(psPDumpTraceBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT = + (PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *) + IMG_OFFSET_ADDR(psPDumpTraceBufferOUT_UI8, 0); + + psPDumpTraceBufferOUT->eError = + PVRSRVPDumpTraceBufferKM(psConnection, OSGetDevNode(psConnection), + psPDumpTraceBufferIN->ui32PDumpFlags); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpSignatureBufferIN_UI8, + IMG_UINT8 * psPDumpSignatureBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN = + (PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *) + IMG_OFFSET_ADDR(psPDumpSignatureBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT = + (PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *) + IMG_OFFSET_ADDR(psPDumpSignatureBufferOUT_UI8, 0); + + psPDumpSignatureBufferOUT->eError = + PVRSRVPDumpSignatureBufferKM(psConnection, + OSGetDevNode(psConnection), + psPDumpSignatureBufferIN-> + ui32PDumpFlags); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpCRCSignatureCheckIN_UI8, + IMG_UINT8 * psPDumpCRCSignatureCheckOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckIN = + (PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *) + IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckOUT = + (PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *) + IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckOUT_UI8, 0); + + psPDumpCRCSignatureCheckOUT->eError = + PVRSRVPDumpCRCSignatureCheckKM(psConnection, + OSGetDevNode(psConnection), + psPDumpCRCSignatureCheckIN-> + ui32PDumpFlags); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXPDUMPBridge(void); +PVRSRV_ERROR DeinitRGXPDUMPBridge(void); + +/* + * Register all RGXPDUMP functions with services + */ +PVRSRV_ERROR InitRGXPDUMPBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, + PVRSRVBridgePDumpTraceBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, + PVRSRVBridgePDumpSignatureBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK, + PVRSRVBridgePDumpCRCSignatureCheck, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxpdump functions with services + */ +PVRSRV_ERROR DeinitRGXPDUMPBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h new file mode 100644 index 000000000000..c1cab95393e7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h @@ -0,0 +1,145 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxregconfig +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxregconfig +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXREGCONFIG_BRIDGE_H +#define COMMON_RGXREGCONFIG_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4) + +/******************************************* + RGXSetRegConfigType + *******************************************/ + +/* Bridge in structure for RGXSetRegConfigType */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG +{ + IMG_UINT8 ui8RegPowerIsland; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE; + +/* Bridge out structure for RGXSetRegConfigType */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE; + +/******************************************* + RGXAddRegconfig + *******************************************/ + +/* Bridge in structure for RGXAddRegconfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG +{ + IMG_UINT32 ui32RegAddr; + IMG_UINT64 ui64RegValue; + IMG_UINT64 ui64RegMask; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXADDREGCONFIG; + +/* Bridge out structure for RGXAddRegconfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG; + +/******************************************* + RGXClearRegConfig + *******************************************/ + +/* Bridge in structure for RGXClearRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG; + +/* Bridge out structure for RGXClearRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG; + +/******************************************* + RGXEnableRegConfig + *******************************************/ + +/* Bridge in structure for RGXEnableRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG; + +/* Bridge out structure for RGXEnableRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG; + +/******************************************* + RGXDisableRegConfig + *******************************************/ + +/* Bridge in structure for RGXDisableRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG; + +/* Bridge out structure for RGXDisableRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG; + +#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c new file mode 100644 index 000000000000..546faf135f2a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c @@ -0,0 +1,251 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxregconfig +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxregconfig +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxregconfig.h" + +#include "common_rgxregconfig_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRegConfigTypeIN_UI8, + IMG_UINT8 * psRGXSetRegConfigTypeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN = + (PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *) + IMG_OFFSET_ADDR(psRGXSetRegConfigTypeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT = + (PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *) + IMG_OFFSET_ADDR(psRGXSetRegConfigTypeOUT_UI8, 0); + + psRGXSetRegConfigTypeOUT->eError = + PVRSRVRGXSetRegConfigTypeKM(psConnection, + OSGetDevNode(psConnection), + psRGXSetRegConfigTypeIN-> + ui8RegPowerIsland); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXAddRegconfigIN_UI8, + IMG_UINT8 * psRGXAddRegconfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN = + (PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *) + IMG_OFFSET_ADDR(psRGXAddRegconfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT = + (PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *) + IMG_OFFSET_ADDR(psRGXAddRegconfigOUT_UI8, 0); + + psRGXAddRegconfigOUT->eError = + PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevNode(psConnection), + psRGXAddRegconfigIN->ui32RegAddr, + psRGXAddRegconfigIN->ui64RegValue, + psRGXAddRegconfigIN->ui64RegMask); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXClearRegConfigIN_UI8, + IMG_UINT8 * psRGXClearRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *) + IMG_OFFSET_ADDR(psRGXClearRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *) + IMG_OFFSET_ADDR(psRGXClearRegConfigOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN); + + psRGXClearRegConfigOUT->eError = + PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXEnableRegConfigIN_UI8, + IMG_UINT8 * psRGXEnableRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXEnableRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXEnableRegConfigOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN); + + psRGXEnableRegConfigOUT->eError = + PVRSRVRGXEnableRegConfigKM(psConnection, + OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDisableRegConfigIN_UI8, + IMG_UINT8 * psRGXDisableRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXDisableRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXDisableRegConfigOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN); + + psRGXDisableRegConfigOUT->eError = + PVRSRVRGXDisableRegConfigKM(psConnection, + OSGetDevNode(psConnection)); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) +PVRSRV_ERROR InitRGXREGCONFIGBridge(void); +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void); + +/* + * Register all RGXREGCONFIG functions with services + */ +PVRSRV_ERROR InitRGXREGCONFIGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE, + PVRSRVBridgeRGXSetRegConfigType, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG, + PVRSRVBridgeRGXAddRegconfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG, + PVRSRVBridgeRGXClearRegConfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG, + PVRSRVBridgeRGXEnableRegConfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG, + PVRSRVBridgeRGXDisableRegConfig, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxregconfig functions with services + */ +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG); + + return PVRSRV_OK; +} +#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */ +/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXREGCONFIGBridge() \ + PVRSRV_OK + +#define DeinitRGXREGCONFIGBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/common_rgxsignals_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/common_rgxsignals_bridge.h new file mode 100644 index 000000000000..de523c2b2a89 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/common_rgxsignals_bridge.h @@ -0,0 +1,76 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxsignals +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxsignals +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXSIGNALS_BRIDGE_H +#define COMMON_RGXSIGNALS_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST (PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0) + +/******************************************* + RGXNotifySignalUpdate + *******************************************/ + +/* Bridge in structure for RGXNotifySignalUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE_TAG +{ + IMG_HANDLE hPrivData; + IMG_DEV_VIRTADDR sDevSignalAddress; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE; + +/* Bridge out structure for RGXNotifySignalUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE; + +#endif /* COMMON_RGXSIGNALS_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/server_rgxsignals_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/server_rgxsignals_bridge.c new file mode 100644 index 000000000000..f4aefc886884 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxsignals_bridge/server_rgxsignals_bridge.c @@ -0,0 +1,174 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxsignals +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxsignals +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxsignals.h" + +#include "common_rgxsignals_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "rgx_bvnc_defs_km.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXNotifySignalUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXNotifySignalUpdateIN_UI8, + IMG_UINT8 * psRGXNotifySignalUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateIN = + (PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifySignalUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifySignalUpdateOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXNotifySignalUpdateIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)) + { + psRGXNotifySignalUpdateOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXNotifySignalUpdate_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXNotifySignalUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXNotifySignalUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXNotifySignalUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXNotifySignalUpdateOUT->eError = + PVRSRVRGXNotifySignalUpdateKM(psConnection, + OSGetDevNode(psConnection), + hPrivDataInt, + psRGXNotifySignalUpdateIN-> + sDevSignalAddress); + +RGXNotifySignalUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXSIGNALSBridge(void); +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void); + +/* + * Register all RGXSIGNALS functions with services + */ +PVRSRV_ERROR InitRGXSIGNALSBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, + PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE, + PVRSRVBridgeRGXNotifySignalUpdate, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxsignals functions with services + */ +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, + PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h new file mode 100644 index 000000000000..5d73e22c0ed0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h @@ -0,0 +1,425 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxta3d +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxta3d +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTA3D_BRIDGE_H +#define COMMON_RGXTA3D_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "rgx_fwif_shared.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14 +#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14) + +/******************************************* + RGXCreateHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG +{ + IMG_DEV_VIRTADDR ssVHeapTableDevVAddr; + IMG_DEV_VIRTADDR sPMMlistDevVAddr0; + IMG_DEV_VIRTADDR sPMMlistDevVAddr1; + IMG_HANDLE *phapsFreeLists; + IMG_UINT32 ui32PPPScreen; + IMG_UINT64 ui64MultiSampleCtl; + IMG_UINT64 ui64FlippedMultiSampleCtl; + IMG_UINT32 ui32TPCStride; + IMG_DEV_VIRTADDR sTailPtrsDevVAddr; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32MTileStride; + IMG_UINT32 ui32ui32ISPMergeLowerX; + IMG_UINT32 ui32ui32ISPMergeLowerY; + IMG_UINT32 ui32ui32ISPMergeUpperX; + IMG_UINT32 ui32ui32ISPMergeUpperY; + IMG_UINT32 ui32ui32ISPMergeScaleX; + IMG_UINT32 ui32ui32ISPMergeScaleY; + IMG_DEV_VIRTADDR ssMacrotileArrayDevVAddr0; + IMG_DEV_VIRTADDR ssMacrotileArrayDevVAddr1; + IMG_DEV_VIRTADDR ssRgnHeaderDevVAddr0; + IMG_DEV_VIRTADDR ssRgnHeaderDevVAddr1; + IMG_DEV_VIRTADDR ssRTCDevVAddr; + IMG_UINT64 ui64uiRgnHeaderSize; + IMG_UINT32 ui32ui32ISPMtileSize; + IMG_UINT16 ui16MaxRTs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; + +/* Bridge out structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG +{ + IMG_HANDLE hKmHwRTDataSet0; + IMG_HANDLE hKmHwRTDataSet1; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; + +/******************************************* + RGXDestroyHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXDestroyHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET_TAG +{ + IMG_HANDLE hKmHwRTDataSet; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET; + +/* Bridge out structure for RGXDestroyHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET; + +/******************************************* + RGXCreateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXCreateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG +{ + IMG_HANDLE hReservation; + IMG_HANDLE hPMR; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER; + +/* Bridge out structure for RGXCreateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferKM; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER; + +/******************************************* + RGXDestroyZSBuffer + *******************************************/ + +/* Bridge in structure for RGXDestroyZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferMemDesc; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER; + +/* Bridge out structure for RGXDestroyZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER; + +/******************************************* + RGXPopulateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXPopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferKM; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER; + +/* Bridge out structure for RGXPopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsPopulation; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER; + +/******************************************* + RGXUnpopulateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXUnpopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsPopulation; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER; + +/* Bridge out structure for RGXUnpopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER; + +/******************************************* + RGXCreateFreeList + *******************************************/ + +/* Bridge in structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hMemCtxPrivData; + IMG_UINT32 ui32MaxFLPages; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32GrowParamThreshold; + IMG_HANDLE hsGlobalFreeList; + IMG_BOOL bbFreeListCheck; + IMG_DEV_VIRTADDR spsFreeListDevVAddr; + IMG_HANDLE hsFreeListPMR; + IMG_DEVMEM_OFFSET_T uiPMROffset; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; + +/* Bridge out structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; + +/******************************************* + RGXDestroyFreeList + *******************************************/ + +/* Bridge in structure for RGXDestroyFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST; + +/* Bridge out structure for RGXDestroyFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST; + +/******************************************* + RGXCreateRenderContext + *******************************************/ + +/* Bridge in structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG +{ + IMG_UINT32 ui32Priority; + IMG_DEV_VIRTADDR sVDMCallStackAddr; + IMG_UINT32 ui32FrameworkCmdSize; + IMG_BYTE *psFrameworkCmd; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32StaticRenderContextStateSize; + IMG_BYTE *psStaticRenderContextState; + IMG_UINT32 ui32PackedCCBSizeU8888; + IMG_UINT32 ui32ContextFlags; + IMG_UINT64 ui64RobustnessAddress; + IMG_UINT32 ui32MaxTADeadlineMS; + IMG_UINT32 ui32Max3DDeadlineMS; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; + +/* Bridge out structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG +{ + IMG_HANDLE hRenderContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; + +/******************************************* + RGXDestroyRenderContext + *******************************************/ + +/* Bridge in structure for RGXDestroyRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG +{ + IMG_HANDLE hCleanupCookie; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT; + +/* Bridge out structure for RGXDestroyRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT; + +/******************************************* + RGXSetRenderContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetRenderContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY; + +/* Bridge out structure for RGXSetRenderContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY; + +/******************************************* + RGXGetLastRenderContextResetReason + *******************************************/ + +/* Bridge in structure for RGXGetLastRenderContextResetReason */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON_TAG +{ + IMG_HANDLE hRenderContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON; + +/* Bridge out structure for RGXGetLastRenderContextResetReason */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON_TAG +{ + IMG_UINT32 ui32LastResetReason; + IMG_UINT32 ui32LastResetJobRef; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON; + +/******************************************* + RGXRenderContextStalled + *******************************************/ + +/* Bridge in structure for RGXRenderContextStalled */ +typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG +{ + IMG_HANDLE hRenderContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED; + +/* Bridge out structure for RGXRenderContextStalled */ +typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED; + +/******************************************* + RGXKickTA3D2 + *******************************************/ + +/* Bridge in structure for RGXKickTA3D2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG +{ + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientTAFenceCount; + IMG_HANDLE *phClientTAFenceSyncPrimBlock; + IMG_UINT32 *pui32ClientTAFenceSyncOffset; + IMG_UINT32 *pui32ClientTAFenceValue; + IMG_UINT32 ui32ClientTAUpdateCount; + IMG_HANDLE *phClientTAUpdateSyncPrimBlock; + IMG_UINT32 *pui32ClientTAUpdateSyncOffset; + IMG_UINT32 *pui32ClientTAUpdateValue; + IMG_UINT32 ui32Client3DUpdateCount; + IMG_HANDLE *phClient3DUpdateSyncPrimBlock; + IMG_UINT32 *pui32Client3DUpdateSyncOffset; + IMG_UINT32 *pui32Client3DUpdateValue; + IMG_HANDLE hPRFenceUFOSyncPrimBlock; + IMG_UINT32 ui32FRFenceUFOSyncOffset; + IMG_UINT32 ui32FRFenceValue; + PVRSRV_FENCE hCheckFence; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_CHAR *puiUpdateFenceName; + PVRSRV_FENCE hCheckFence3D; + PVRSRV_TIMELINE hUpdateTimeline3D; + IMG_CHAR *puiUpdateFenceName3D; + IMG_UINT32 ui32TACmdSize; + IMG_BYTE *psTACmd; + IMG_UINT32 ui323DPRCmdSize; + IMG_BYTE *ps3DPRCmd; + IMG_UINT32 ui323DCmdSize; + IMG_BYTE *ps3DCmd; + IMG_UINT32 ui32ExtJobRef; + IMG_BOOL bbKickTA; + IMG_BOOL bbKickPR; + IMG_BOOL bbKick3D; + IMG_BOOL bbAbort; + IMG_UINT32 ui32PDumpFlags; + IMG_HANDLE hKMHWRTDataSet; + IMG_HANDLE hZSBuffer; + IMG_HANDLE hMSAAScratchBuffer; + IMG_UINT32 ui32SyncPMRCount; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_HANDLE *phSyncPMRs; + IMG_UINT32 ui32RenderTargetSize; + IMG_UINT32 ui32NumberOfDrawCalls; + IMG_UINT32 ui32NumberOfIndices; + IMG_UINT32 ui32NumberOfMRTs; + IMG_UINT64 ui64Deadline; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D2; + +/* Bridge out structure for RGXKickTA3D2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG +{ + PVRSRV_FENCE hUpdateFence; + PVRSRV_FENCE hUpdateFence3D; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D2; + +/******************************************* + RGXSetRenderContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetRenderContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY_TAG +{ + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetRenderContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTA3D_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c new file mode 100644 index 000000000000..33bbb15fad25 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c @@ -0,0 +1,2516 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxta3d +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxta3d +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxta3d.h" + +#include "common_rgxta3d_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSet0IntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSet1IntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); + + RGX_FREELIST **psapsFreeListsInt = NULL; + IMG_HANDLE *hapsFreeListsInt2 = NULL; + RGX_KM_HW_RT_DATASET *psKmHwRTDataSet0Int = NULL; + RGX_KM_HW_RT_DATASET *psKmHwRTDataSet1Int = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *)) + + (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateHWRTDataSetOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateHWRTDataSet_exit; + } + } + } + + { + psapsFreeListsInt = + (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *); + hapsFreeListsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hapsFreeListsInt2, + (const void __user *)psRGXCreateHWRTDataSetIN-> + phapsFreeLists, + RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + { + IMG_UINT32 i; + + for (i = 0; i < RGXFW_MAX_FREELISTS; i++) + { + /* Look up the address from the handle */ + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psapsFreeListsInt[i], + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + IMG_TRUE); + if (unlikely + (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateHWRTDataSetOUT->eError = + RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), + psRGXCreateHWRTDataSetIN->ssVHeapTableDevVAddr, + psRGXCreateHWRTDataSetIN->sPMMlistDevVAddr0, + psRGXCreateHWRTDataSetIN->sPMMlistDevVAddr1, + psapsFreeListsInt, + psRGXCreateHWRTDataSetIN->ui32PPPScreen, + psRGXCreateHWRTDataSetIN->ui64MultiSampleCtl, + psRGXCreateHWRTDataSetIN-> + ui64FlippedMultiSampleCtl, + psRGXCreateHWRTDataSetIN->ui32TPCStride, + psRGXCreateHWRTDataSetIN->sTailPtrsDevVAddr, + psRGXCreateHWRTDataSetIN->ui32TPCSize, + psRGXCreateHWRTDataSetIN->ui32TEScreen, + psRGXCreateHWRTDataSetIN->ui32TEAA, + psRGXCreateHWRTDataSetIN->ui32TEMTILE1, + psRGXCreateHWRTDataSetIN->ui32TEMTILE2, + psRGXCreateHWRTDataSetIN->ui32MTileStride, + psRGXCreateHWRTDataSetIN-> + ui32ui32ISPMergeLowerX, + psRGXCreateHWRTDataSetIN-> + ui32ui32ISPMergeLowerY, + psRGXCreateHWRTDataSetIN-> + ui32ui32ISPMergeUpperX, + psRGXCreateHWRTDataSetIN-> + ui32ui32ISPMergeUpperY, + psRGXCreateHWRTDataSetIN-> + ui32ui32ISPMergeScaleX, + psRGXCreateHWRTDataSetIN-> + ui32ui32ISPMergeScaleY, + psRGXCreateHWRTDataSetIN-> + ssMacrotileArrayDevVAddr0, + psRGXCreateHWRTDataSetIN-> + ssMacrotileArrayDevVAddr1, + psRGXCreateHWRTDataSetIN->ssRgnHeaderDevVAddr0, + psRGXCreateHWRTDataSetIN->ssRgnHeaderDevVAddr1, + psRGXCreateHWRTDataSetIN->ssRTCDevVAddr, + psRGXCreateHWRTDataSetIN->ui64uiRgnHeaderSize, + psRGXCreateHWRTDataSetIN->ui32ui32ISPMtileSize, + psRGXCreateHWRTDataSetIN->ui16MaxRTs, + &psKmHwRTDataSet0Int, &psKmHwRTDataSet1Int); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + goto RGXCreateHWRTDataSet_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateHWRTDataSetOUT-> + hKmHwRTDataSet0, + (void *)psKmHwRTDataSet0Int, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSetpsKmHwRTDataSet0IntRelease); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateHWRTDataSetOUT-> + hKmHwRTDataSet1, + (void *)psKmHwRTDataSet1Int, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSetpsKmHwRTDataSet1IntRelease); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateHWRTDataSet_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + if (hapsFreeListsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < RGXFW_MAX_FREELISTS; i++) + { + + /* Unreference the previously looked up handle */ + if (hapsFreeListsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hapsFreeListsInt2 + [i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) + { + if (psKmHwRTDataSet0Int) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSet0Int); + } + if (psKmHwRTDataSet1Int) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSet1Int); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyHWRTDataSetOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyHWRTDataSetIN-> + hKmHwRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) + && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyHWRTDataSet_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyHWRTDataSet_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateZSBufferIN_UI8, + IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); + + IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; + PMR *psPMRInt = NULL; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = + RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), + psReservationInt, + psPMRInt, + psRGXCreateZSBufferIN->uiMapFlags, + &pssZSBufferKMInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + goto RGXCreateZSBuffer_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateZSBufferOUT->hsZSBufferKM, + (void *)pssZSBufferKMInt, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateZSBufferpssZSBufferKMIntRelease); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateZSBuffer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) + { + if (pssZSBufferKMInt) + { + RGXDestroyZSBufferKM(pssZSBufferKMInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, + IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) + IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) + IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyZSBufferOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyZSBufferIN-> + hsZSBufferMemDesc, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + if (unlikely + ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) + && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyZSBufferOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyZSBuffer_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyZSBuffer_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, 0); + + IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + RGX_POPULATION *pssPopulationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXPopulateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssZSBufferKMInt, + hsZSBufferKM, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + IMG_TRUE); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXPopulateZSBufferOUT->eError = + RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + goto RGXPopulateZSBuffer_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXPopulateZSBufferOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXPopulateZSBufferOUT->hsPopulation, + (void *)pssPopulationInt, + PVRSRV_HANDLE_TYPE_RGX_POPULATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXPopulateZSBufferpssPopulationIntRelease); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXPopulateZSBuffer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (pssZSBufferKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsZSBufferKM, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) + { + if (pssPopulationInt) + { + RGXUnpopulateZSBufferKM(pssPopulationInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXUnpopulateZSBufferOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXUnpopulateZSBufferIN-> + hsPopulation, + PVRSRV_HANDLE_TYPE_RGX_POPULATION); + if (unlikely + ((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) + && (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXUnpopulateZSBuffer_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXUnpopulateZSBuffer_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateFreeListIN_UI8, + IMG_UINT8 * psRGXCreateFreeListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = + (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) + IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) + IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); + + IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; + IMG_HANDLE hMemCtxPrivDataInt = NULL; + IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; + RGX_FREELIST *pssGlobalFreeListInt = NULL; + IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR; + PMR *pssFreeListPMRInt = NULL; + RGX_FREELIST *psCleanupCookieInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hMemCtxPrivDataInt, + hMemCtxPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + if (psRGXCreateFreeListIN->hsGlobalFreeList) + { + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssGlobalFreeListInt, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + } + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssFreeListPMRInt, + hsFreeListPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateFreeListOUT->eError = + RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), + hMemCtxPrivDataInt, + psRGXCreateFreeListIN->ui32MaxFLPages, + psRGXCreateFreeListIN->ui32InitFLPages, + psRGXCreateFreeListIN->ui32GrowFLPages, + psRGXCreateFreeListIN->ui32GrowParamThreshold, + pssGlobalFreeListInt, + psRGXCreateFreeListIN->bbFreeListCheck, + psRGXCreateFreeListIN->spsFreeListDevVAddr, + pssFreeListPMRInt, + psRGXCreateFreeListIN->uiPMROffset, + &psCleanupCookieInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + goto RGXCreateFreeList_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateFreeListOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateFreeListOUT->hCleanupCookie, + (void *)psCleanupCookieInt, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateFreeListpsCleanupCookieIntRelease); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateFreeList_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hMemCtxPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMemCtxPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + + if (psRGXCreateFreeListIN->hsGlobalFreeList) + { + + /* Unreference the previously looked up handle */ + if (pssGlobalFreeListInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } + + /* Unreference the previously looked up handle */ + if (pssFreeListPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsFreeListPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) + { + if (psCleanupCookieInt) + { + RGXDestroyFreeList(psCleanupCookieInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyFreeListIN_UI8, + IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) + IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) + IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyFreeListOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyFreeListIN-> + hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + if (unlikely + ((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) + && (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyFreeListOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyFreeList_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyFreeList_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateRenderContextIN_UI8, + IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); + + IMG_BYTE *psFrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *psStaticRenderContextStateInt = NULL; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * + sizeof(IMG_BYTE)) + + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE)) + 0; + + if (unlikely + (psRGXCreateRenderContextIN->ui32FrameworkCmdSize > + RGXFWIF_RF_CMD_SIZE)) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; + } + + if (unlikely + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > + RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateRenderContext_exit; + } + } + } + + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) + { + psFrameworkCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * + sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psFrameworkCmdInt, + (const void __user *)psRGXCreateRenderContextIN-> + psFrameworkCmd, + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * + sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateRenderContext_exit; + } + } + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) + { + psStaticRenderContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psStaticRenderContextStateInt, + (const void __user *)psRGXCreateRenderContextIN-> + psStaticRenderContextState, + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateRenderContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateRenderContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateRenderContextOUT->eError = + PVRSRVRGXCreateRenderContextKM(psConnection, + OSGetDevNode(psConnection), + psRGXCreateRenderContextIN-> + ui32Priority, + psRGXCreateRenderContextIN-> + sVDMCallStackAddr, + psRGXCreateRenderContextIN-> + ui32FrameworkCmdSize, + psFrameworkCmdInt, hPrivDataInt, + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize, + psStaticRenderContextStateInt, + psRGXCreateRenderContextIN-> + ui32PackedCCBSizeU8888, + psRGXCreateRenderContextIN-> + ui32ContextFlags, + psRGXCreateRenderContextIN-> + ui64RobustnessAddress, + psRGXCreateRenderContextIN-> + ui32MaxTADeadlineMS, + psRGXCreateRenderContextIN-> + ui32Max3DDeadlineMS, + &psRenderContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateRenderContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateRenderContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateRenderContextOUT-> + hRenderContext, + (void *)psRenderContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateRenderContextpsRenderContextIntRelease); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateRenderContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) + { + if (psRenderContextInt) + { + PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, + IMG_UINT8 * + psRGXDestroyRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT + = + (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyRenderContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyRenderContextIN-> + hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + if (unlikely + ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyRenderContextOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyRenderContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyRenderContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetRenderContextPriorityIN_UI8, + IMG_UINT8 * + psRGXSetRenderContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY + *psRGXSetRenderContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY + *psRGXSetRenderContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); + + IMG_HANDLE hRenderContext = + psRGXSetRenderContextPriorityIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetRenderContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVRGXSetRenderContextPriorityKM(psConnection, + OSGetDevNode(psConnection), + psRenderContextInt, + psRGXSetRenderContextPriorityIN-> + ui32Priority); + +RGXSetRenderContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetLastRenderContextResetReason(IMG_UINT32 + ui32DispatchTableEntry, + IMG_UINT8 * + psRGXGetLastRenderContextResetReasonIN_UI8, + IMG_UINT8 * + psRGXGetLastRenderContextResetReasonOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON + *psRGXGetLastRenderContextResetReasonIN = + (PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastRenderContextResetReasonIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON + *psRGXGetLastRenderContextResetReasonOUT = + (PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastRenderContextResetReasonOUT_UI8, 0); + + IMG_HANDLE hRenderContext = + psRGXGetLastRenderContextResetReasonIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXGetLastRenderContextResetReasonOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXGetLastRenderContextResetReasonOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXGetLastRenderContextResetReason_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXGetLastRenderContextResetReasonOUT->eError = + PVRSRVRGXGetLastRenderContextResetReasonKM(psRenderContextInt, + &psRGXGetLastRenderContextResetReasonOUT-> + ui32LastResetReason, + &psRGXGetLastRenderContextResetReasonOUT-> + ui32LastResetJobRef); + +RGXGetLastRenderContextResetReason_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXRenderContextStalledIN_UI8, + IMG_UINT8 * + psRGXRenderContextStalledOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = + (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT + = + (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXRenderContextStalledOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXRenderContextStalled_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXRenderContextStalledOUT->eError = + RGXRenderContextStalledKM(psRenderContextInt); + +RGXRenderContextStalled_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickTA3D2IN_UI8, + IMG_UINT8 * psRGXKickTA3D2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = + (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) + IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) + IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; + IMG_HANDLE hPRFenceUFOSyncPrimBlock = + psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_CHAR *uiUpdateFenceName3DInt = NULL; + IMG_BYTE *psTACmdInt = NULL; + IMG_BYTE *ps3DPRCmdInt = NULL; + IMG_BYTE *ps3DCmdInt = NULL; + IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; + IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; + RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; + IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; + RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely + (psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui32TACmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui323DPRCmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui323DCmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXKickTA3D2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickTA3D2_exit; + } + } + } + + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + psClientTAFenceSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAFenceSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAFenceSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN-> + phClientTAFenceSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAFenceSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceValueInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAFenceValue, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + psClientTAUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN-> + phClientTAUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAUpdateSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAUpdateValue, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + psClient3DUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClient3DUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClient3DUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN-> + phClient3DUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32Client3DUpdateSyncOffset, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32Client3DUpdateValue, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + { + uiUpdateFenceName3DInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceName3DInt, + (const void __user *)psRGXKickTA3D2IN-> + puiUpdateFenceName3D, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psRGXKickTA3D2IN->ui32TACmdSize != 0) + { + psTACmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psTACmdInt, + (const void __user *)psRGXKickTA3D2IN->psTACmd, + psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) + { + ps3DPRCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ps3DPRCmdInt, + (const void __user *)psRGXKickTA3D2IN->ps3DPRCmd, + psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DCmdSize != 0) + { + ps3DCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ps3DCmdInt, + (const void __user *)psRGXKickTA3D2IN->ps3DCmd, + psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = + (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClientTAFenceSyncPrimBlockInt + [i], + hClientTAFenceSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClientTAUpdateSyncPrimBlockInt + [i], + hClientTAUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClient3DUpdateSyncPrimBlockInt + [i], + hClient3DUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPRFenceUFOSyncPrimBlockInt, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKMHWRTDataSetInt, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psZSBufferInt, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMSAAScratchBufferInt, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickTA3D2OUT->eError = + PVRSRVRGXKickTA3DKM(psRenderContextInt, + psRGXKickTA3D2IN->ui32ClientCacheOpSeqNum, + psRGXKickTA3D2IN->ui32ClientTAFenceCount, + psClientTAFenceSyncPrimBlockInt, + ui32ClientTAFenceSyncOffsetInt, + ui32ClientTAFenceValueInt, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount, + psClientTAUpdateSyncPrimBlockInt, + ui32ClientTAUpdateSyncOffsetInt, + ui32ClientTAUpdateValueInt, + psRGXKickTA3D2IN->ui32Client3DUpdateCount, + psClient3DUpdateSyncPrimBlockInt, + ui32Client3DUpdateSyncOffsetInt, + ui32Client3DUpdateValueInt, + psPRFenceUFOSyncPrimBlockInt, + psRGXKickTA3D2IN->ui32FRFenceUFOSyncOffset, + psRGXKickTA3D2IN->ui32FRFenceValue, + psRGXKickTA3D2IN->hCheckFence, + psRGXKickTA3D2IN->hUpdateTimeline, + &psRGXKickTA3D2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickTA3D2IN->hCheckFence3D, + psRGXKickTA3D2IN->hUpdateTimeline3D, + &psRGXKickTA3D2OUT->hUpdateFence3D, + uiUpdateFenceName3DInt, + psRGXKickTA3D2IN->ui32TACmdSize, + psTACmdInt, + psRGXKickTA3D2IN->ui323DPRCmdSize, + ps3DPRCmdInt, + psRGXKickTA3D2IN->ui323DCmdSize, + ps3DCmdInt, + psRGXKickTA3D2IN->ui32ExtJobRef, + psRGXKickTA3D2IN->bbKickTA, + psRGXKickTA3D2IN->bbKickPR, + psRGXKickTA3D2IN->bbKick3D, + psRGXKickTA3D2IN->bbAbort, + psRGXKickTA3D2IN->ui32PDumpFlags, + psKMHWRTDataSetInt, + psZSBufferInt, + psMSAAScratchBufferInt, + psRGXKickTA3D2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXKickTA3D2IN->ui32RenderTargetSize, + psRGXKickTA3D2IN->ui32NumberOfDrawCalls, + psRGXKickTA3D2IN->ui32NumberOfIndices, + psRGXKickTA3D2IN->ui32NumberOfMRTs, + psRGXKickTA3D2IN->ui64Deadline); + +RGXKickTA3D2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + + if (hClientTAFenceSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientTAFenceSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClientTAFenceSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClientTAUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientTAUpdateSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClientTAUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClient3DUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClient3DUpdateSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClient3DUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + /* Unreference the previously looked up handle */ + if (psPRFenceUFOSyncPrimBlockInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + + /* Unreference the previously looked up handle */ + if (psKMHWRTDataSetInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + + /* Unreference the previously looked up handle */ + if (psZSBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + + /* Unreference the previously looked up handle */ + if (psMSAAScratchBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetRenderContextPropertyIN_UI8, + IMG_UINT8 * + psRGXSetRenderContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY + *psRGXSetRenderContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY + *psRGXSetRenderContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); + + IMG_HANDLE hRenderContext = + psRGXSetRenderContextPropertyIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetRenderContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, + psRGXSetRenderContextPropertyIN-> + ui32Property, + psRGXSetRenderContextPropertyIN-> + ui64Input, + &psRGXSetRenderContextPropertyOUT-> + ui64Output); + +RGXSetRenderContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXTA3DBridge(void); +PVRSRV_ERROR DeinitRGXTA3DBridge(void); + +/* + * Register all RGXTA3D functions with services + */ +PVRSRV_ERROR InitRGXTA3DBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, + PVRSRVBridgeRGXCreateHWRTDataSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET, + PVRSRVBridgeRGXDestroyHWRTDataSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, + PVRSRVBridgeRGXCreateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, + PVRSRVBridgeRGXDestroyZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, + PVRSRVBridgeRGXPopulateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, + PVRSRVBridgeRGXUnpopulateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, + PVRSRVBridgeRGXCreateFreeList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, + PVRSRVBridgeRGXDestroyFreeList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, + PVRSRVBridgeRGXCreateRenderContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, + PVRSRVBridgeRGXDestroyRenderContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, + PVRSRVBridgeRGXSetRenderContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON, + PVRSRVBridgeRGXGetLastRenderContextResetReason, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, + PVRSRVBridgeRGXRenderContextStalled, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2, + PVRSRVBridgeRGXKickTA3D2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY, + PVRSRVBridgeRGXSetRenderContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxta3d functions with services + */ +PVRSRV_ERROR DeinitRGXTA3DBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h new file mode 100644 index 000000000000..7fcaa9d71568 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h @@ -0,0 +1,227 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxtq2 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxtq2 +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTQ2_BRIDGE_H +#define COMMON_RGXTQ2_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7) + +/******************************************* + RGXTDMCreateTransferContext + *******************************************/ + +/* Bridge in structure for RGXTDMCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG +{ + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32FrameworkCmdize; + IMG_BYTE *psFrameworkCmd; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32ContextFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT; + +/* Bridge out structure for RGXTDMCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT; + +/******************************************* + RGXTDMDestroyTransferContext + *******************************************/ + +/* Bridge in structure for RGXTDMDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT; + +/* Bridge out structure for RGXTDMDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT; + +/******************************************* + RGXTDMSetTransferContextPriority + *******************************************/ + +/* Bridge in structure for RGXTDMSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY; + +/* Bridge out structure for RGXTDMSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY; + +/******************************************* + RGXTDMNotifyWriteOffsetUpdate + *******************************************/ + +/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE; + +/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE; + +/******************************************* + RGXTDMSubmitTransfer2 + *******************************************/ + +/* Bridge in structure for RGXTDMSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_HANDLE *phUpdateUFOSyncPrimBlock; + IMG_UINT32 *pui32UpdateSyncOffset; + IMG_UINT32 *pui32UpdateValue; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_CHAR *puiUpdateFenceName; + IMG_UINT32 ui32CommandSize; + IMG_UINT8 *pui8FWCommand; + IMG_UINT32 ui32ExternalJobReference; + IMG_UINT32 ui32SyncPMRCount; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_HANDLE *phSyncPMRs; + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + IMG_UINT64 ui64DeadlineInus; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2; + +/* Bridge out structure for RGXTDMSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG +{ + PVRSRV_FENCE hUpdateFence; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2; + +/******************************************* + RGXTDMGetSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY; + +/* Bridge out structure for RGXTDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG +{ + IMG_HANDLE hCLIPMRMem; + IMG_HANDLE hUSCPMRMem; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY; + +/******************************************* + RGXTDMReleaseSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY_TAG +{ + IMG_HANDLE hPMRMem; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY; + +/* Bridge out structure for RGXTDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY; + +/******************************************* + RGXTDMSetTransferContextProperty + *******************************************/ + +/* Bridge in structure for RGXTDMSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY; + +/* Bridge out structure for RGXTDMSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTQ2_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c new file mode 100644 index 000000000000..025eea8d67a1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c @@ -0,0 +1,1333 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxtq2 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxtq2 +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxtdmtransfer.h" + +#include "common_rgxtq2_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "rgx_bvnc_defs_km.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXTDMCreateTransferContextpsTransferContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXTDMDestroyTransferContextKM((RGX_SERVER_TQ_TDM_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMCreateTransferContextIN_UI8, + IMG_UINT8 * + psRGXTDMCreateTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT + *psRGXTDMCreateTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT + *psRGXTDMCreateTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextOUT_UI8, 0); + + IMG_BYTE *psFrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE)) + 0; + + if (unlikely + (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize > + RGXFWIF_RF_CMD_SIZE)) + { + psRGXTDMCreateTransferContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMCreateTransferContext_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMCreateTransferContextOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMCreateTransferContext_exit; + } + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psRGXTDMCreateTransferContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXTDMCreateTransferContextOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXTDMCreateTransferContext_exit; + } + } + } + + if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize != 0) + { + psFrameworkCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psFrameworkCmdInt, + (const void __user *)psRGXTDMCreateTransferContextIN-> + psFrameworkCmd, + psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXTDMCreateTransferContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMCreateTransferContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMCreateTransferContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVRGXTDMCreateTransferContextKM(psConnection, + OSGetDevNode(psConnection), + psRGXTDMCreateTransferContextIN-> + ui32Priority, + psRGXTDMCreateTransferContextIN-> + ui32FrameworkCmdize, + psFrameworkCmdInt, hPrivDataInt, + psRGXTDMCreateTransferContextIN-> + ui32PackedCCBSizeU88, + psRGXTDMCreateTransferContextIN-> + ui32ContextFlags, + &psTransferContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + goto RGXTDMCreateTransferContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMCreateTransferContextOUT-> + hTransferContext, + (void *)psTransferContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMCreateTransferContextpsTransferContextIntRelease); + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMCreateTransferContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMCreateTransferContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK) + { + if (psTransferContextInt) + { + PVRSRVRGXTDMDestroyTransferContextKM + (psTransferContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMDestroyTransferContextIN_UI8, + IMG_UINT8 * + psRGXTDMDestroyTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT + *psRGXTDMDestroyTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT + *psRGXTDMDestroyTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextOUT_UI8, 0); + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMDestroyTransferContextOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMDestroyTransferContext_exit; + } + } + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMDestroyTransferContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXTDMDestroyTransferContextIN-> + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + if (unlikely + ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) + && (psRGXTDMDestroyTransferContextOUT->eError != + PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString + (psRGXTDMDestroyTransferContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMDestroyTransferContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMDestroyTransferContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMSetTransferContextPriorityIN_UI8, + IMG_UINT8 * + psRGXTDMSetTransferContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY + *psRGXTDMSetTransferContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY + *psRGXTDMSetTransferContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMSetTransferContextPriorityIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSetTransferContextPriorityOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSetTransferContextPriority_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSetTransferContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSetTransferContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSetTransferContextPriorityOUT->eError = + PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, + OSGetDevNode(psConnection), + psTransferContextInt, + psRGXTDMSetTransferContextPriorityIN-> + ui32Priority); + +RGXTDMSetTransferContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMNotifyWriteOffsetUpdateIN_UI8, + IMG_UINT8 * + psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE + *psRGXTDMNotifyWriteOffsetUpdateIN = + (PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE + *psRGXTDMNotifyWriteOffsetUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMNotifyWriteOffsetUpdate_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMNotifyWriteOffsetUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = + PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt, + psRGXTDMNotifyWriteOffsetUpdateIN-> + ui32PDumpFlags); + +RGXTDMNotifyWriteOffsetUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMSubmitTransfer2IN_UI8, + IMG_UINT8 * psRGXTDMSubmitTransfer2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2IN = + (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2OUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2OUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMSubmitTransfer2IN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT8 *ui8FWCommandInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + + 0; + + if (unlikely + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount > + PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + if (unlikely + (psRGXTDMSubmitTransfer2IN->ui32CommandSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + if (unlikely + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXTDMSubmitTransfer2_exit; + } + } + } + + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + phUpdateUFOSyncPrimBlock, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateSyncOffsetInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui32UpdateSyncOffset, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui32UpdateValue, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0) + { + ui8FWCommandInt = + (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32CommandSize * + sizeof(IMG_UINT8); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0) + { + if (OSCopyFromUser + (NULL, ui8FWCommandInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui8FWCommand, + psRGXTDMSubmitTransfer2IN->ui32CommandSize * + sizeof(IMG_UINT8)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > + 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui32SyncPMRFlags, + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = + (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > + 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs, + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; + i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psUpdateUFOSyncPrimBlockInt + [i], + hUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely + (psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; + i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely + (psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt, + psRGXTDMSubmitTransfer2IN-> + ui32PDumpFlags, + psRGXTDMSubmitTransfer2IN-> + ui32ClientCacheOpSeqNum, + psRGXTDMSubmitTransfer2IN-> + ui32ClientUpdateCount, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXTDMSubmitTransfer2IN-> + hCheckFenceFD, + psRGXTDMSubmitTransfer2IN-> + hUpdateTimeline, + &psRGXTDMSubmitTransfer2OUT-> + hUpdateFence, uiUpdateFenceNameInt, + psRGXTDMSubmitTransfer2IN-> + ui32CommandSize, ui8FWCommandInt, + psRGXTDMSubmitTransfer2IN-> + ui32ExternalJobReference, + psRGXTDMSubmitTransfer2IN-> + ui32SyncPMRCount, ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXTDMSubmitTransfer2IN-> + ui32Characteristic1, + psRGXTDMSubmitTransfer2IN-> + ui32Characteristic2, + psRGXTDMSubmitTransfer2IN-> + ui64DeadlineInus); + +RGXTDMSubmitTransfer2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; + i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFOSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; + i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + +static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8, + IMG_UINT8 * psRGXTDMGetSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0); + + PMR *psCLIPMRMemInt = NULL; + PMR *psUSCPMRMemInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMGetSharedMemory_exit; + } + } + + PVR_UNREFERENCED_PARAMETER(psRGXTDMGetSharedMemoryIN); + + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRVRGXTDMGetSharedMemoryKM(psConnection, + OSGetDevNode(psConnection), + &psCLIPMRMemInt, &psUSCPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + goto RGXTDMGetSharedMemory_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMGetSharedMemoryOUT->hCLIPMRMem, + (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease); + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMGetSharedMemory_exit; + } + + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMGetSharedMemoryOUT->hUSCPMRMem, + (void *)psUSCPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease); + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMGetSharedMemory_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMGetSharedMemory_exit: + + if (psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK) + { + if (psCLIPMRMemInt) + { + PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt); + } + if (psUSCPMRMemInt) + { + PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMReleaseSharedMemoryIN_UI8, + IMG_UINT8 * + psRGXTDMReleaseSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY + *psRGXTDMReleaseSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY + *psRGXTDMReleaseSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryOUT_UI8, 0); + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMReleaseSharedMemoryOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMReleaseSharedMemory_exit; + } + } + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMReleaseSharedMemoryOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXTDMReleaseSharedMemoryIN-> + hPMRMem, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + if (unlikely + ((psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) + && (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXTDMReleaseSharedMemoryOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMReleaseSharedMemory_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMReleaseSharedMemory_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMSetTransferContextPropertyIN_UI8, + IMG_UINT8 * + psRGXTDMSetTransferContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY + *psRGXTDMSetTransferContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY + *psRGXTDMSetTransferContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMSetTransferContextPropertyIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSetTransferContextPropertyOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSetTransferContextProperty_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSetTransferContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXTDMSetTransferContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSetTransferContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSetTransferContextPropertyOUT->eError = + PVRSRVRGXTDMSetTransferContextPropertyKM(psTransferContextInt, + psRGXTDMSetTransferContextPropertyIN-> + ui32Property, + psRGXTDMSetTransferContextPropertyIN-> + ui64Input, + &psRGXTDMSetTransferContextPropertyOUT-> + ui64Output); + +RGXTDMSetTransferContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXTQ2Bridge(void); +PVRSRV_ERROR DeinitRGXTQ2Bridge(void); + +/* + * Register all RGXTQ2 functions with services + */ +PVRSRV_ERROR InitRGXTQ2Bridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, + PVRSRVBridgeRGXTDMCreateTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, + PVRSRVBridgeRGXTDMDestroyTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, + PVRSRVBridgeRGXTDMSetTransferContextPriority, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, + PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2, + PVRSRVBridgeRGXTDMSubmitTransfer2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY, + PVRSRVBridgeRGXTDMGetSharedMemory, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY, + PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY, + PVRSRVBridgeRGXTDMSetTransferContextProperty, + NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxtq2 functions with services + */ +PVRSRV_ERROR DeinitRGXTQ2Bridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h new file mode 100644 index 000000000000..bc4f49480b62 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h @@ -0,0 +1,175 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxtq +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxtq +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTQ_BRIDGE_H +#define COMMON_RGXTQ_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4) + +/******************************************* + RGXCreateTransferContext + *******************************************/ + +/* Bridge in structure for RGXCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG +{ + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32FrameworkCmdize; + IMG_BYTE *psFrameworkCmd; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32PackedCCBSizeU8888; + IMG_UINT32 ui32ContextFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT; + +/* Bridge out structure for RGXCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; + IMG_HANDLE hCLIPMRMem; + IMG_HANDLE hUSCPMRMem; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT; + +/******************************************* + RGXDestroyTransferContext + *******************************************/ + +/* Bridge in structure for RGXDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT; + +/* Bridge out structure for RGXDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT; + +/******************************************* + RGXSetTransferContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY; + +/* Bridge out structure for RGXSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY; + +/******************************************* + RGXSubmitTransfer2 + *******************************************/ + +/* Bridge in structure for RGXSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32PrepareCount; + IMG_UINT32 *pui32ClientUpdateCount; + IMG_HANDLE **phUpdateUFOSyncPrimBlock; + IMG_UINT32 **pui32UpdateSyncOffset; + IMG_UINT32 **pui32UpdateValue; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_TIMELINE h2DUpdateTimeline; + PVRSRV_TIMELINE h3DUpdateTimeline; + IMG_CHAR *puiUpdateFenceName; + IMG_UINT32 *pui32CommandSize; + IMG_UINT8 **pui8FWCommand; + IMG_UINT32 *pui32TQPrepareFlags; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32SyncPMRCount; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_HANDLE *phSyncPMRs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2; + +/* Bridge out structure for RGXSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2_TAG +{ + PVRSRV_FENCE h2DUpdateFence; + PVRSRV_FENCE h3DUpdateFence; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2; + +/******************************************* + RGXSetTransferContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTQ_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c new file mode 100644 index 000000000000..94cf31c8ec1d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c @@ -0,0 +1,1339 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxtq +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxtq +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxtransfer.h" +#include "rgx_tq_shared.h" + +#include "common_rgxtq_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if defined(SUPPORT_RGXTQ_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateTransferContextpsTransferContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXDestroyTransferContextKM((RGX_SERVER_TQ_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXCreateTransferContextIN_UI8, + IMG_UINT8 * + psRGXCreateTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN + = + (PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT + *psRGXCreateTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateTransferContextOUT_UI8, 0); + + IMG_BYTE *psFrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + PMR *psCLIPMRMemInt = NULL; + PMR *psUSCPMRMemInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE)) + 0; + + if (unlikely + (psRGXCreateTransferContextIN->ui32FrameworkCmdize > + RGXFWIF_RF_CMD_SIZE)) + { + psRGXCreateTransferContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateTransferContext_exit; + } + + psRGXCreateTransferContextOUT->hTransferContext = NULL; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCreateTransferContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateTransferContextOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateTransferContext_exit; + } + } + } + + if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0) + { + psFrameworkCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psFrameworkCmdInt, + (const void __user *)psRGXCreateTransferContextIN-> + psFrameworkCmd, + psRGXCreateTransferContextIN->ui32FrameworkCmdize * + sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXCreateTransferContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateTransferContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateTransferContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateTransferContextOUT->eError = + PVRSRVRGXCreateTransferContextKM(psConnection, + OSGetDevNode(psConnection), + psRGXCreateTransferContextIN-> + ui32Priority, + psRGXCreateTransferContextIN-> + ui32FrameworkCmdize, + psFrameworkCmdInt, hPrivDataInt, + psRGXCreateTransferContextIN-> + ui32PackedCCBSizeU8888, + psRGXCreateTransferContextIN-> + ui32ContextFlags, + &psTransferContextInt, + &psCLIPMRMemInt, &psUSCPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateTransferContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateTransferContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateTransferContextOUT-> + hTransferContext, + (void *)psTransferContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateTransferContextpsTransferContextIntRelease); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + + psRGXCreateTransferContextOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateTransferContextOUT-> + hCLIPMRMem, (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psRGXCreateTransferContextOUT-> + hTransferContext); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + + psRGXCreateTransferContextOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateTransferContextOUT-> + hUSCPMRMem, (void *)psUSCPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psRGXCreateTransferContextOUT-> + hTransferContext); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateTransferContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK) + { + if (psRGXCreateTransferContextOUT->hTransferContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psRGXCreateTransferContextOUT-> + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psTransferContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psTransferContextInt) + { + PVRSRVRGXDestroyTransferContextKM(psTransferContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXDestroyTransferContextIN_UI8, + IMG_UINT8 * + psRGXDestroyTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT + *psRGXDestroyTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT + *psRGXDestroyTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyTransferContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyTransferContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyTransferContextIN-> + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + if (unlikely + ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyTransferContextOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyTransferContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyTransferContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetTransferContextPriorityIN_UI8, + IMG_UINT8 * + psRGXSetTransferContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY + *psRGXSetTransferContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY + *psRGXSetTransferContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXSetTransferContextPriorityIN->hTransferContext; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetTransferContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetTransferContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetTransferContextPriorityOUT->eError = + PVRSRVRGXSetTransferContextPriorityKM(psConnection, + OSGetDevNode(psConnection), + psTransferContextInt, + psRGXSetTransferContextPriorityIN-> + ui32Priority); + +RGXSetTransferContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSubmitTransfer2IN_UI8, + IMG_UINT8 * psRGXSubmitTransfer2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2IN = + (PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXSubmitTransfer2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2OUT = + (PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXSubmitTransfer2OUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXSubmitTransfer2IN->hTransferContext; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + IMG_UINT32 *ui32ClientUpdateCountInt = NULL; + SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 **ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT32 *ui32CommandSizeInt = NULL; + IMG_UINT8 **ui8FWCommandInt = NULL; + IMG_UINT32 *ui32TQPrepareFlagsInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BYTE *pArrayArgsBuffer2 = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + IMG_UINT32 ui32BufferSize2 = 0; + IMG_UINT32 ui32NextOffset2 = 0; + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + + ui32BufferSize += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(SYNC_PRIMITIVE_BLOCK **); + ui32BufferSize += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_HANDLE **); + ui32BufferSize += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32 *); + ui32BufferSize += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32 *); + ui32BufferSize += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT8 *); + } + + if (unlikely + (psRGXSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXSubmitTransfer2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXSubmitTransfer2_exit; + } + } + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + ui32ClientUpdateCountInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateCountInt, + (const void __user *)psRGXSubmitTransfer2IN-> + pui32ClientUpdateCount, + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */ + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK ***) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(SYNC_PRIMITIVE_BLOCK **); + /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */ + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_HANDLE); + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */ + ui32UpdateSyncOffsetInt = + (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32 *); + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */ + ui32UpdateValueInt = + (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32 *); + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXSubmitTransfer2IN-> + puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + ui32CommandSizeInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32CommandSizeInt, + (const void __user *)psRGXSubmitTransfer2IN-> + pui32CommandSize, + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */ + ui8FWCommandInt = + (IMG_UINT8 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT8 *); + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + ui32TQPrepareFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32TQPrepareFlagsInt, + (const void __user *)psRGXSubmitTransfer2IN-> + pui32TQPrepareFlags, + psRGXSubmitTransfer2IN->ui32PrepareCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXSubmitTransfer2IN-> + pui32SyncPMRFlags, + psRGXSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = + (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXSubmitTransfer2IN->phSyncPMRs, + psRGXSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + ui32BufferSize2 += + ui32ClientUpdateCountInt[i] * + sizeof(SYNC_PRIMITIVE_BLOCK *); + ui32BufferSize2 += + ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *); + ui32BufferSize2 += + ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + ui32BufferSize2 += + ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + ui32BufferSize2 += + ui32CommandSizeInt[i] * sizeof(IMG_UINT8); + } + } + + if (ui32BufferSize2 != 0) + { + pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2); + + if (!pArrayArgsBuffer2) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXSubmitTransfer2_exit; + } + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNCS) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer2_exit; + } + + /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */ + psUpdateUFOSyncPrimBlockInt[i] = + (SYNC_PRIMITIVE_BLOCK **) + IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += + ui32ClientUpdateCountInt[i] * + sizeof(SYNC_PRIMITIVE_BLOCK *); + /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */ + hUpdateUFOSyncPrimBlockInt2[i] = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, + ui32NextOffset2); + ui32NextOffset2 += + ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE); + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */ + ui32UpdateSyncOffsetInt[i] = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, + ui32NextOffset2); + ui32NextOffset2 += + ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */ + ui32UpdateValueInt[i] = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, + ui32NextOffset2); + ui32NextOffset2 += + ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + if (ui32CommandSizeInt[i] > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer2_exit; + } + + /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */ + ui8FWCommandInt[i] = + (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, + ui32NextOffset2); + ui32NextOffset2 += + ui32CommandSizeInt[i] * sizeof(IMG_UINT8); + } + } + + { + IMG_UINT32 i; + IMG_HANDLE **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN-> + phUpdateUFOSyncPrimBlock[i], + sizeof(IMG_HANDLE **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > + 0) + { + if (OSCopyFromUser + (NULL, (hUpdateUFOSyncPrimBlockInt2[i]), + (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * + sizeof(IMG_HANDLE))) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT32 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN-> + pui32UpdateSyncOffset[i], + sizeof(IMG_UINT32 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > + 0) + { + if (OSCopyFromUser + (NULL, (ui32UpdateSyncOffsetInt[i]), + (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * + sizeof(IMG_UINT32))) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT32 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN-> + pui32UpdateValue[i], + sizeof(IMG_UINT32 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > + 0) + { + if (OSCopyFromUser + (NULL, (ui32UpdateValueInt[i]), + (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * + sizeof(IMG_UINT32))) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT8 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN-> + pui8FWCommand[i], + sizeof(IMG_UINT8 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0) + { + if (OSCopyFromUser + (NULL, (ui8FWCommandInt[i]), + (const void __user *)psPtr, + (ui32CommandSizeInt[i] * + sizeof(IMG_UINT8))) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + IMG_UINT32 j; + for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) + { + /* Look up the address from the handle */ + psRGXSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psUpdateUFOSyncPrimBlockInt + [i][j], + hUpdateUFOSyncPrimBlockInt2 + [i][j], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely + (psRGXSubmitTransfer2OUT->eError != + PVRSRV_OK)) + { + UnlockHandle(psConnection-> + psHandleBase); + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely + (psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSubmitTransfer2OUT->eError = + PVRSRVRGXSubmitTransferKM(psTransferContextInt, + psRGXSubmitTransfer2IN-> + ui32ClientCacheOpSeqNum, + psRGXSubmitTransfer2IN->ui32PrepareCount, + ui32ClientUpdateCountInt, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXSubmitTransfer2IN->hCheckFenceFD, + psRGXSubmitTransfer2IN->h2DUpdateTimeline, + &psRGXSubmitTransfer2OUT->h2DUpdateFence, + psRGXSubmitTransfer2IN->h3DUpdateTimeline, + &psRGXSubmitTransfer2OUT->h3DUpdateFence, + uiUpdateFenceNameInt, ui32CommandSizeInt, + ui8FWCommandInt, ui32TQPrepareFlagsInt, + psRGXSubmitTransfer2IN->ui32ExtJobRef, + psRGXSubmitTransfer2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, psSyncPMRsInt); + +RGXSubmitTransfer2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + IMG_UINT32 j; + for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFOSyncPrimBlockInt2[i][j]) + { + PVRSRVReleaseHandleUnlocked + (psConnection->psHandleBase, + hUpdateUFOSyncPrimBlockInt2[i][j], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2); + + if (pArrayArgsBuffer2) + OSFreeMemNoStats(pArrayArgsBuffer2); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetTransferContextPropertyIN_UI8, + IMG_UINT8 * + psRGXSetTransferContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY + *psRGXSetTransferContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY + *psRGXSetTransferContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXSetTransferContextPropertyIN->hTransferContext; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetTransferContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetTransferContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetTransferContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetTransferContextPropertyOUT->eError = + PVRSRVRGXSetTransferContextPropertyKM(psTransferContextInt, + psRGXSetTransferContextPropertyIN-> + ui32Property, + psRGXSetTransferContextPropertyIN-> + ui64Input, + &psRGXSetTransferContextPropertyOUT-> + ui64Output); + +RGXSetTransferContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* SUPPORT_RGXTQ_BRIDGE */ + +#if defined(SUPPORT_RGXTQ_BRIDGE) +PVRSRV_ERROR InitRGXTQBridge(void); +PVRSRV_ERROR DeinitRGXTQBridge(void); + +/* + * Register all RGXTQ functions with services + */ +PVRSRV_ERROR InitRGXTQBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, + PVRSRVBridgeRGXCreateTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, + PVRSRVBridgeRGXDestroyTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, + PVRSRVBridgeRGXSetTransferContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2, + PVRSRVBridgeRGXSubmitTransfer2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY, + PVRSRVBridgeRGXSetTransferContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxtq functions with services + */ +PVRSRV_ERROR DeinitRGXTQBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY); + + return PVRSRV_OK; +} +#else /* SUPPORT_RGXTQ_BRIDGE */ +/* This bridge is conditional on SUPPORT_RGXTQ_BRIDGE - when not defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXTQBridge() \ + PVRSRV_OK + +#define DeinitRGXTQBridge() \ + PVRSRV_OK + +#endif /* SUPPORT_RGXTQ_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_bridge.h new file mode 100644 index 000000000000..6778fce58df7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_bridge.h @@ -0,0 +1,120 @@ +/******************************************************************************* +@File +@Title Client bridge header for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_RI_BRIDGE_H +#define CLIENT_RI_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_ri_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Offset, + IMG_UINT64 + ui64Size, + IMG_BOOL + bIsImport, + IMG_BOOL + bIsSuballoc, + IMG_HANDLE * + phRIHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Size, + IMG_UINT64 + ui64DevVAddr, + IMG_HANDLE * + phRIHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle, + IMG_DEV_VIRTADDR + sAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge, + IMG_PID ui32Pid); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_PID + ui32Owner); + +#endif /* CLIENT_RI_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_direct_bridge.c new file mode 100644 index 000000000000..d7c8dfb0769a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/client_ri_direct_bridge.c @@ -0,0 +1,217 @@ +/******************************************************************************* +@File +@Title Direct client bridge for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for ri + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_ri_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "ri_typedefs.h" + +#include "ri_server.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIWritePMREntryKM(psPMRHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Offset, + IMG_UINT64 + ui64Size, + IMG_BOOL + bIsImport, + IMG_BOOL + bIsSuballoc, + IMG_HANDLE * + phRIHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + RI_HANDLE psRIHandleInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = + RIWriteMEMDESCEntryKM(psPMRHandleInt, + ui32TextBSize, + puiTextB, + ui64Offset, + ui64Size, + bIsImport, bIsSuballoc, &psRIHandleInt); + + *phRIHandle = psRIHandleInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Size, + IMG_UINT64 + ui64DevVAddr, + IMG_HANDLE * + phRIHandle) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + RIWriteProcListEntryKM(ui32TextBSize, + puiTextB, + ui64Size, ui64DevVAddr, &psRIHandleInt); + + *phRIHandle = psRIHandleInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle, + IMG_DEV_VIRTADDR + sAddr) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psRIHandleInt = (RI_HANDLE) hRIHandle; + + eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psRIHandleInt = (RI_HANDLE) hRIHandle; + + eError = RIDeleteMEMDESCEntryKM(psRIHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIDumpListKM(psPMRHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = RIDumpAllKM(); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge, + IMG_PID ui32Pid) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = RIDumpProcessKM(ui32Pid); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_PID + ui32Owner) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/common_ri_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/common_ri_bridge.h new file mode 100644 index 000000000000..e562671fff34 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/common_ri_bridge.h @@ -0,0 +1,224 @@ +/******************************************************************************* +@File +@Title Common bridge header for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RI_BRIDGE_H +#define COMMON_RI_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "ri_typedefs.h" + +#define PVRSRV_BRIDGE_RI_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER PVRSRV_BRIDGE_RI_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+8) + +/******************************************* + RIWritePMREntry + *******************************************/ + +/* Bridge in structure for RIWritePMREntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG +{ + IMG_HANDLE hPMRHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY; + +/* Bridge out structure for RIWritePMREntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY; + +/******************************************* + RIWriteMEMDESCEntry + *******************************************/ + +/* Bridge in structure for RIWriteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG +{ + IMG_HANDLE hPMRHandle; + IMG_UINT32 ui32TextBSize; + const IMG_CHAR *puiTextB; + IMG_UINT64 ui64Offset; + IMG_UINT64 ui64Size; + IMG_BOOL bIsImport; + IMG_BOOL bIsSuballoc; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY; + +/* Bridge out structure for RIWriteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG +{ + IMG_HANDLE hRIHandle; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY; + +/******************************************* + RIWriteProcListEntry + *******************************************/ + +/* Bridge in structure for RIWriteProcListEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG +{ + IMG_UINT32 ui32TextBSize; + const IMG_CHAR *puiTextB; + IMG_UINT64 ui64Size; + IMG_UINT64 ui64DevVAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY; + +/* Bridge out structure for RIWriteProcListEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG +{ + IMG_HANDLE hRIHandle; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY; + +/******************************************* + RIUpdateMEMDESCAddr + *******************************************/ + +/* Bridge in structure for RIUpdateMEMDESCAddr */ +typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG +{ + IMG_HANDLE hRIHandle; + IMG_DEV_VIRTADDR sAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR; + +/* Bridge out structure for RIUpdateMEMDESCAddr */ +typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR; + +/******************************************* + RIDeleteMEMDESCEntry + *******************************************/ + +/* Bridge in structure for RIDeleteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG +{ + IMG_HANDLE hRIHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY; + +/* Bridge out structure for RIDeleteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY; + +/******************************************* + RIDumpList + *******************************************/ + +/* Bridge in structure for RIDumpList */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG +{ + IMG_HANDLE hPMRHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPLIST; + +/* Bridge out structure for RIDumpList */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPLIST; + +/******************************************* + RIDumpAll + *******************************************/ + +/* Bridge in structure for RIDumpAll */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPALL; + +/* Bridge out structure for RIDumpAll */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPALL; + +/******************************************* + RIDumpProcess + *******************************************/ + +/* Bridge in structure for RIDumpProcess */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG +{ + IMG_PID ui32Pid; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPPROCESS; + +/* Bridge out structure for RIDumpProcess */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPPROCESS; + +/******************************************* + RIWritePMREntryWithOwner + *******************************************/ + +/* Bridge in structure for RIWritePMREntryWithOwner */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG +{ + IMG_HANDLE hPMRHandle; + IMG_PID ui32Owner; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER; + +/* Bridge out structure for RIWritePMREntryWithOwner */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER; + +#endif /* COMMON_RI_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/server_ri_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/server_ri_bridge.c new file mode 100644 index 000000000000..c82ff7f3c0ff --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/ri_bridge/server_ri_bridge.c @@ -0,0 +1,808 @@ +/******************************************************************************* +@File +@Title Server bridge for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "ri_server.h" + +#include "common_ri_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWritePMREntryIN_UI8, + IMG_UINT8 * psRIWritePMREntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *) + IMG_OFFSET_ADDR(psRIWritePMREntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *) + IMG_OFFSET_ADDR(psRIWritePMREntryOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWritePMREntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWritePMREntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt); + +RIWritePMREntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _RIWriteMEMDESCEntrypsRIHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWriteMEMDESCEntryIN_UI8, + IMG_UINT8 * psRIWriteMEMDESCEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + IMG_CHAR *uiTextBInt = NULL; + RI_HANDLE psRIHandleInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psRIWriteMEMDESCEntryIN->ui32TextBSize > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psRIWriteMEMDESCEntryOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RIWriteMEMDESCEntry_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRIWriteMEMDESCEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRIWriteMEMDESCEntryOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RIWriteMEMDESCEntry_exit; + } + } + } + + if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0) + { + uiTextBInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextBInt, + (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB, + psRIWriteMEMDESCEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRIWriteMEMDESCEntryOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RIWriteMEMDESCEntry_exit; + } + ((IMG_CHAR *) + uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWriteMEMDESCEntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteMEMDESCEntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWriteMEMDESCEntryOUT->eError = + RIWriteMEMDESCEntryKM(psPMRHandleInt, + psRIWriteMEMDESCEntryIN->ui32TextBSize, + uiTextBInt, + psRIWriteMEMDESCEntryIN->ui64Offset, + psRIWriteMEMDESCEntryIN->ui64Size, + psRIWriteMEMDESCEntryIN->bIsImport, + psRIWriteMEMDESCEntryIN->bIsSuballoc, + &psRIHandleInt); + /* Exit early if bridged call fails */ + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + goto RIWriteMEMDESCEntry_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRIWriteMEMDESCEntryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRIWriteMEMDESCEntryOUT->hRIHandle, + (void *)psRIHandleInt, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RIWriteMEMDESCEntrypsRIHandleIntRelease); + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteMEMDESCEntry_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIWriteMEMDESCEntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK) + { + if (psRIHandleInt) + { + RIDeleteMEMDESCEntryKM(psRIHandleInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RIWriteProcListEntrypsRIHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWriteProcListEntryIN_UI8, + IMG_UINT8 * psRIWriteProcListEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *) + IMG_OFFSET_ADDR(psRIWriteProcListEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *) + IMG_OFFSET_ADDR(psRIWriteProcListEntryOUT_UI8, 0); + + IMG_CHAR *uiTextBInt = NULL; + RI_HANDLE psRIHandleInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psRIWriteProcListEntryIN->ui32TextBSize > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psRIWriteProcListEntryOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RIWriteProcListEntry_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRIWriteProcListEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRIWriteProcListEntryOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RIWriteProcListEntry_exit; + } + } + } + + if (psRIWriteProcListEntryIN->ui32TextBSize != 0) + { + uiTextBInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextBInt, + (const void __user *)psRIWriteProcListEntryIN->puiTextB, + psRIWriteProcListEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRIWriteProcListEntryOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RIWriteProcListEntry_exit; + } + ((IMG_CHAR *) + uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psRIWriteProcListEntryOUT->eError = + RIWriteProcListEntryKM(psRIWriteProcListEntryIN->ui32TextBSize, + uiTextBInt, + psRIWriteProcListEntryIN->ui64Size, + psRIWriteProcListEntryIN->ui64DevVAddr, + &psRIHandleInt); + /* Exit early if bridged call fails */ + if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) + { + goto RIWriteProcListEntry_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRIWriteProcListEntryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRIWriteProcListEntryOUT->hRIHandle, + (void *)psRIHandleInt, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RIWriteProcListEntrypsRIHandleIntRelease); + if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteProcListEntry_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIWriteProcListEntry_exit: + + if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK) + { + if (psRIHandleInt) + { + RIDeleteMEMDESCEntryKM(psRIHandleInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIUpdateMEMDESCAddrIN_UI8, + IMG_UINT8 * psRIUpdateMEMDESCAddrOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN = + (PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *) + IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT = + (PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *) + IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrOUT_UI8, 0); + + IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle; + RI_HANDLE psRIHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIUpdateMEMDESCAddrOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRIHandleInt, + hRIHandle, + PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE); + if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIUpdateMEMDESCAddr_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIUpdateMEMDESCAddrOUT->eError = + RIUpdateMEMDESCAddrKM(psRIHandleInt, + psRIUpdateMEMDESCAddrIN->sAddr); + +RIUpdateMEMDESCAddr_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRIHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRIHandle, + PVRSRV_HANDLE_TYPE_RI_HANDLE); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDeleteMEMDESCEntryIN_UI8, + IMG_UINT8 * psRIDeleteMEMDESCEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN = + (PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT = + (PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRIDeleteMEMDESCEntryOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRIDeleteMEMDESCEntryIN->hRIHandle, + PVRSRV_HANDLE_TYPE_RI_HANDLE); + if (unlikely + ((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) + && (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RIDeleteMEMDESCEntry_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIDeleteMEMDESCEntry_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpListIN_UI8, + IMG_UINT8 * psRIDumpListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN = + (PVRSRV_BRIDGE_IN_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPLIST *) + IMG_OFFSET_ADDR(psRIDumpListOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIDumpListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIDumpList_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt); + +RIDumpList_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpAllIN_UI8, + IMG_UINT8 * psRIDumpAllOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN = + (PVRSRV_BRIDGE_IN_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllOUT_UI8, + 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN); + + psRIDumpAllOUT->eError = RIDumpAllKM(); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpProcessIN_UI8, + IMG_UINT8 * psRIDumpProcessOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN = + (PVRSRV_BRIDGE_IN_RIDUMPPROCESS *) + IMG_OFFSET_ADDR(psRIDumpProcessIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *) + IMG_OFFSET_ADDR(psRIDumpProcessOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psRIDumpProcessOUT->eError = + RIDumpProcessKM(psRIDumpProcessIN->ui32Pid); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRIWritePMREntryWithOwnerIN_UI8, + IMG_UINT8 * + psRIWritePMREntryWithOwnerOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN + = + (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *) + IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER + *psRIWritePMREntryWithOwnerOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *) + IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWritePMREntryWithOwnerOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWritePMREntryWithOwner_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWritePMREntryWithOwnerOUT->eError = + RIWritePMREntryWithOwnerKM(psPMRHandleInt, + psRIWritePMREntryWithOwnerIN->ui32Owner); + +RIWritePMREntryWithOwner_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRIBridge(void); +PVRSRV_ERROR DeinitRIBridge(void); + +/* + * Register all RI functions with services + */ +PVRSRV_ERROR InitRIBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, + PVRSRVBridgeRIWritePMREntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, + PVRSRVBridgeRIWriteMEMDESCEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, + PVRSRVBridgeRIWriteProcListEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, + PVRSRVBridgeRIUpdateMEMDESCAddr, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, + PVRSRVBridgeRIDeleteMEMDESCEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, + PVRSRVBridgeRIDumpList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, + PVRSRVBridgeRIDumpAll, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, + PVRSRVBridgeRIDumpProcess, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, + PVRSRVBridgeRIWritePMREntryWithOwner, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all ri functions with services + */ +PVRSRV_ERROR DeinitRIBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIDUMPPROCESS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/common_srvcore_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/common_srvcore_bridge.h new file mode 100644 index 000000000000..d383279416e4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/common_srvcore_bridge.h @@ -0,0 +1,370 @@ +/******************************************************************************* +@File +@Title Common bridge header for srvcore +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for srvcore +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SRVCORE_BRIDGE_H +#define COMMON_SRVCORE_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_device_types.h" +#include "cache_ops.h" + +#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2 +#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6 +#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7 +#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8 +#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9 +#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10 +#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11 +#define PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13 +#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14 +#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15 +#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16 +#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16) + +/******************************************* + Connect + *******************************************/ + +/* Bridge in structure for Connect */ +typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG +{ + IMG_UINT32 ui32Flags; + IMG_UINT32 ui32ClientBuildOptions; + IMG_UINT32 ui32ClientDDKVersion; + IMG_UINT32 ui32ClientDDKBuild; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CONNECT; + +/* Bridge out structure for Connect */ +typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG +{ + IMG_UINT8 ui8KernelArch; + IMG_UINT32 ui32CapabilityFlags; + IMG_UINT64 ui64PackedBvnc; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CONNECT; + +/******************************************* + Disconnect + *******************************************/ + +/* Bridge in structure for Disconnect */ +typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DISCONNECT; + +/* Bridge out structure for Disconnect */ +typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DISCONNECT; + +/******************************************* + AcquireGlobalEventObject + *******************************************/ + +/* Bridge in structure for AcquireGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT; + +/* Bridge out structure for AcquireGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG +{ + IMG_HANDLE hGlobalEventObject; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT; + +/******************************************* + ReleaseGlobalEventObject + *******************************************/ + +/* Bridge in structure for ReleaseGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG +{ + IMG_HANDLE hGlobalEventObject; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT; + +/* Bridge out structure for ReleaseGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT; + +/******************************************* + EventObjectOpen + *******************************************/ + +/* Bridge in structure for EventObjectOpen */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG +{ + IMG_HANDLE hEventObject; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN; + +/* Bridge out structure for EventObjectOpen */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG +{ + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN; + +/******************************************* + EventObjectWait + *******************************************/ + +/* Bridge in structure for EventObjectWait */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG +{ + IMG_HANDLE hOSEventKM; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT; + +/* Bridge out structure for EventObjectWait */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT; + +/******************************************* + EventObjectClose + *******************************************/ + +/* Bridge in structure for EventObjectClose */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG +{ + IMG_HANDLE hOSEventKM; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE; + +/* Bridge out structure for EventObjectClose */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE; + +/******************************************* + DumpDebugInfo + *******************************************/ + +/* Bridge in structure for DumpDebugInfo */ +typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG +{ + IMG_UINT32 ui32VerbLevel; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DUMPDEBUGINFO; + +/* Bridge out structure for DumpDebugInfo */ +typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO; + +/******************************************* + GetDevClockSpeed + *******************************************/ + +/* Bridge in structure for GetDevClockSpeed */ +typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED; + +/* Bridge out structure for GetDevClockSpeed */ +typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG +{ + IMG_UINT32 ui32ClockSpeed; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED; + +/******************************************* + HWOpTimeout + *******************************************/ + +/* Bridge in structure for HWOpTimeout */ +typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HWOPTIMEOUT; + +/* Bridge out structure for HWOpTimeout */ +typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HWOPTIMEOUT; + +/******************************************* + AlignmentCheck + *******************************************/ + +/* Bridge in structure for AlignmentCheck */ +typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG +{ + IMG_UINT32 ui32AlignChecksSize; + IMG_UINT32 *pui32AlignChecks; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ALIGNMENTCHECK; + +/* Bridge out structure for AlignmentCheck */ +typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK; + +/******************************************* + GetDeviceStatus + *******************************************/ + +/* Bridge in structure for GetDeviceStatus */ +typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETDEVICESTATUS; + +/* Bridge out structure for GetDeviceStatus */ +typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG +{ + IMG_UINT32 ui32DeviceSatus; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETDEVICESTATUS; + +/******************************************* + GetMultiCoreInfo + *******************************************/ + +/* Bridge in structure for GetMultiCoreInfo */ +typedef struct PVRSRV_BRIDGE_IN_GETMULTICOREINFO_TAG +{ + IMG_UINT32 ui32CapsSize; + /* Output pointer pui64Caps is also an implied input */ + IMG_UINT64 *pui64Caps; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETMULTICOREINFO; + +/* Bridge out structure for GetMultiCoreInfo */ +typedef struct PVRSRV_BRIDGE_OUT_GETMULTICOREINFO_TAG +{ + IMG_UINT32 ui32NumCores; + IMG_UINT64 *pui64Caps; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETMULTICOREINFO; + +/******************************************* + EventObjectWaitTimeout + *******************************************/ + +/* Bridge in structure for EventObjectWaitTimeout */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG +{ + IMG_HANDLE hOSEventKM; + IMG_UINT64 ui64uiTimeoutus; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT; + +/* Bridge out structure for EventObjectWaitTimeout */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT; + +/******************************************* + FindProcessMemStats + *******************************************/ + +/* Bridge in structure for FindProcessMemStats */ +typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG +{ + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ArrSize; + IMG_BOOL bbAllProcessStats; + /* Output pointer pui32MemStatsArray is also an implied input */ + IMG_UINT32 *pui32MemStatsArray; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS; + +/* Bridge out structure for FindProcessMemStats */ +typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG +{ + IMG_UINT32 *pui32MemStatsArray; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS; + +/******************************************* + AcquireInfoPage + *******************************************/ + +/* Bridge in structure for AcquireInfoPage */ +typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE; + +/* Bridge out structure for AcquireInfoPage */ +typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG +{ + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE; + +/******************************************* + ReleaseInfoPage + *******************************************/ + +/* Bridge in structure for ReleaseInfoPage */ +typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RELEASEINFOPAGE; + +/* Bridge out structure for ReleaseInfoPage */ +typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE; + +#endif /* COMMON_SRVCORE_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/server_srvcore_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/server_srvcore_bridge.c new file mode 100644 index 000000000000..e84f8a7a8c06 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/srvcore_bridge/server_srvcore_bridge.c @@ -0,0 +1,1164 @@ +/******************************************************************************* +@File +@Title Server bridge for srvcore +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for srvcore +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "srvcore.h" +#include "info_page.h" +#include "proc_stats.h" +#include "rgx_fwif_alignchecks.h" + +#include "common_srvcore_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psConnectIN_UI8, + IMG_UINT8 * psConnectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CONNECT *psConnectIN = + (PVRSRV_BRIDGE_IN_CONNECT *) IMG_OFFSET_ADDR(psConnectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT = + (PVRSRV_BRIDGE_OUT_CONNECT *) IMG_OFFSET_ADDR(psConnectOUT_UI8, 0); + + psConnectOUT->eError = + PVRSRVConnectKM(psConnection, OSGetDevNode(psConnection), + psConnectIN->ui32Flags, + psConnectIN->ui32ClientBuildOptions, + psConnectIN->ui32ClientDDKVersion, + psConnectIN->ui32ClientDDKBuild, + &psConnectOUT->ui8KernelArch, + &psConnectOUT->ui32CapabilityFlags, + &psConnectOUT->ui64PackedBvnc); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDisconnectIN_UI8, + IMG_UINT8 * psDisconnectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN = + (PVRSRV_BRIDGE_IN_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT = + (PVRSRV_BRIDGE_OUT_DISCONNECT *) + IMG_OFFSET_ADDR(psDisconnectOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDisconnectIN); + + psDisconnectOUT->eError = PVRSRVDisconnectKM(); + + return 0; +} + +static PVRSRV_ERROR _AcquireGlobalEventObjecthGlobalEventObjectIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVReleaseGlobalEventObjectKM((IMG_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psAcquireGlobalEventObjectIN_UI8, + IMG_UINT8 * + psAcquireGlobalEventObjectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN + = + (PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psAcquireGlobalEventObjectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT + *psAcquireGlobalEventObjectOUT = + (PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psAcquireGlobalEventObjectOUT_UI8, 0); + + IMG_HANDLE hGlobalEventObjectInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN); + + psAcquireGlobalEventObjectOUT->eError = + PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt); + /* Exit early if bridged call fails */ + if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) + { + goto AcquireGlobalEventObject_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psAcquireGlobalEventObjectOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psAcquireGlobalEventObjectOUT-> + hGlobalEventObject, + (void *)hGlobalEventObjectInt, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AcquireGlobalEventObjecthGlobalEventObjectIntRelease); + if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AcquireGlobalEventObject_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +AcquireGlobalEventObject_exit: + + if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK) + { + if (hGlobalEventObjectInt) + { + PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psReleaseGlobalEventObjectIN_UI8, + IMG_UINT8 * + psReleaseGlobalEventObjectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN + = + (PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psReleaseGlobalEventObjectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT + *psReleaseGlobalEventObjectOUT = + (PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psReleaseGlobalEventObjectOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psReleaseGlobalEventObjectOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psReleaseGlobalEventObjectIN-> + hGlobalEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + if (unlikely + ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) + && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto ReleaseGlobalEventObject_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +ReleaseGlobalEventObject_exit: + + return 0; +} + +static PVRSRV_ERROR _EventObjectOpenhOSEventIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = OSEventObjectClose((IMG_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectOpenIN_UI8, + IMG_UINT8 * psEventObjectOpenOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *) + IMG_OFFSET_ADDR(psEventObjectOpenIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *) + IMG_OFFSET_ADDR(psEventObjectOpenOUT_UI8, 0); + + IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject; + IMG_HANDLE hEventObjectInt = NULL; + IMG_HANDLE hOSEventInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectOpenOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hEventObjectInt, + hEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + IMG_TRUE); + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectOpen_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectOpenOUT->eError = + OSEventObjectOpen(hEventObjectInt, &hOSEventInt); + /* Exit early if bridged call fails */ + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + goto EventObjectOpen_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psEventObjectOpenOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psEventObjectOpenOUT->hOSEvent, + (void *)hOSEventInt, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _EventObjectOpenhOSEventIntRelease); + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectOpen_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +EventObjectOpen_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hEventObjectInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psEventObjectOpenOUT->eError != PVRSRV_OK) + { + if (hOSEventInt) + { + OSEventObjectClose(hOSEventInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectWaitIN_UI8, + IMG_UINT8 * psEventObjectWaitOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *) + IMG_OFFSET_ADDR(psEventObjectWaitIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *) + IMG_OFFSET_ADDR(psEventObjectWaitOUT_UI8, 0); + + IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM; + IMG_HANDLE hOSEventKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectWaitOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hOSEventKMInt, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + IMG_TRUE); + if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectWait_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt); + +EventObjectWait_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hOSEventKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectCloseIN_UI8, + IMG_UINT8 * psEventObjectCloseOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *) + IMG_OFFSET_ADDR(psEventObjectCloseIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *) + IMG_OFFSET_ADDR(psEventObjectCloseOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psEventObjectCloseOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psEventObjectCloseIN-> + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + if (unlikely + ((psEventObjectCloseOUT->eError != PVRSRV_OK) + && (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psEventObjectCloseOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto EventObjectClose_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +EventObjectClose_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDumpDebugInfoIN_UI8, + IMG_UINT8 * psDumpDebugInfoOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN = + (PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *) + IMG_OFFSET_ADDR(psDumpDebugInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT = + (PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *) + IMG_OFFSET_ADDR(psDumpDebugInfoOUT_UI8, 0); + + psDumpDebugInfoOUT->eError = + PVRSRVDumpDebugInfoKM(psConnection, OSGetDevNode(psConnection), + psDumpDebugInfoIN->ui32VerbLevel); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDevClockSpeedIN_UI8, + IMG_UINT8 * psGetDevClockSpeedOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN = + (PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *) + IMG_OFFSET_ADDR(psGetDevClockSpeedIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT = + (PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *) + IMG_OFFSET_ADDR(psGetDevClockSpeedOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN); + + psGetDevClockSpeedOUT->eError = + PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevNode(psConnection), + &psGetDevClockSpeedOUT->ui32ClockSpeed); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHWOpTimeoutIN_UI8, + IMG_UINT8 * psHWOpTimeoutOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN = + (PVRSRV_BRIDGE_IN_HWOPTIMEOUT *) + IMG_OFFSET_ADDR(psHWOpTimeoutIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT = + (PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *) + IMG_OFFSET_ADDR(psHWOpTimeoutOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN); + + psHWOpTimeoutOUT->eError = + PVRSRVHWOpTimeoutKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAlignmentCheckIN_UI8, + IMG_UINT8 * psAlignmentCheckOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN = + (PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *) + IMG_OFFSET_ADDR(psAlignmentCheckIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT = + (PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *) + IMG_OFFSET_ADDR(psAlignmentCheckOUT_UI8, 0); + + IMG_UINT32 *ui32AlignChecksInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0; + + if (unlikely + (psAlignmentCheckIN->ui32AlignChecksSize > + RGXFW_ALIGN_CHECKS_UM_MAX)) + { + psAlignmentCheckOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto AlignmentCheck_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psAlignmentCheckIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psAlignmentCheckIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psAlignmentCheckOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto AlignmentCheck_exit; + } + } + } + + if (psAlignmentCheckIN->ui32AlignChecksSize != 0) + { + ui32AlignChecksInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psAlignmentCheckIN->ui32AlignChecksSize * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AlignChecksInt, + (const void __user *)psAlignmentCheckIN->pui32AlignChecks, + psAlignmentCheckIN->ui32AlignChecksSize * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psAlignmentCheckOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto AlignmentCheck_exit; + } + } + + psAlignmentCheckOUT->eError = + PVRSRVAlignmentCheckKM(psConnection, OSGetDevNode(psConnection), + psAlignmentCheckIN->ui32AlignChecksSize, + ui32AlignChecksInt); + +AlignmentCheck_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDeviceStatusIN_UI8, + IMG_UINT8 * psGetDeviceStatusOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN = + (PVRSRV_BRIDGE_IN_GETDEVICESTATUS *) + IMG_OFFSET_ADDR(psGetDeviceStatusIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT = + (PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *) + IMG_OFFSET_ADDR(psGetDeviceStatusOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN); + + psGetDeviceStatusOUT->eError = + PVRSRVGetDeviceStatusKM(psConnection, OSGetDevNode(psConnection), + &psGetDeviceStatusOUT->ui32DeviceSatus); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetMultiCoreInfoIN_UI8, + IMG_UINT8 * psGetMultiCoreInfoOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETMULTICOREINFO *psGetMultiCoreInfoIN = + (PVRSRV_BRIDGE_IN_GETMULTICOREINFO *) + IMG_OFFSET_ADDR(psGetMultiCoreInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *psGetMultiCoreInfoOUT = + (PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *) + IMG_OFFSET_ADDR(psGetMultiCoreInfoOUT_UI8, 0); + + IMG_UINT64 *pui64CapsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) + 0; + + if (psGetMultiCoreInfoIN->ui32CapsSize > 8) + { + psGetMultiCoreInfoOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto GetMultiCoreInfo_exit; + } + + psGetMultiCoreInfoOUT->pui64Caps = psGetMultiCoreInfoIN->pui64Caps; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psGetMultiCoreInfoIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psGetMultiCoreInfoOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto GetMultiCoreInfo_exit; + } + } + } + + if (psGetMultiCoreInfoIN->ui32CapsSize != 0) + { + pui64CapsInt = + (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64); + } + + psGetMultiCoreInfoOUT->eError = + PVRSRVGetMultiCoreInfoKM(psConnection, OSGetDevNode(psConnection), + psGetMultiCoreInfoIN->ui32CapsSize, + &psGetMultiCoreInfoOUT->ui32NumCores, + pui64CapsInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((pui64CapsInt) && + ((psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psGetMultiCoreInfoOUT->pui64Caps, + pui64CapsInt, + (psGetMultiCoreInfoIN->ui32CapsSize * + sizeof(IMG_UINT64))) != PVRSRV_OK)) + { + psGetMultiCoreInfoOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto GetMultiCoreInfo_exit; + } + } + +GetMultiCoreInfo_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectWaitTimeoutIN_UI8, + IMG_UINT8 * psEventObjectWaitTimeoutOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *) + IMG_OFFSET_ADDR(psEventObjectWaitTimeoutIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *) + IMG_OFFSET_ADDR(psEventObjectWaitTimeoutOUT_UI8, 0); + + IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM; + IMG_HANDLE hOSEventKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectWaitTimeoutOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hOSEventKMInt, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + IMG_TRUE); + if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectWaitTimeout_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectWaitTimeoutOUT->eError = + OSEventObjectWaitTimeout(hOSEventKMInt, + psEventObjectWaitTimeoutIN-> + ui64uiTimeoutus); + +EventObjectWaitTimeout_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hOSEventKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psFindProcessMemStatsIN_UI8, + IMG_UINT8 * psFindProcessMemStatsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN = + (PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *) + IMG_OFFSET_ADDR(psFindProcessMemStatsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT = + (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) + IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8, 0); + + IMG_UINT32 *pui32MemStatsArrayInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0; + + if (psFindProcessMemStatsIN->ui32ArrSize > + PVRSRV_PROCESS_STAT_TYPE_COUNT) + { + psFindProcessMemStatsOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto FindProcessMemStats_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psFindProcessMemStatsOUT->pui32MemStatsArray = + psFindProcessMemStatsIN->pui32MemStatsArray; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psFindProcessMemStatsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psFindProcessMemStatsOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto FindProcessMemStats_exit; + } + } + } + + if (psFindProcessMemStatsIN->ui32ArrSize != 0) + { + pui32MemStatsArrayInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32); + } + + psFindProcessMemStatsOUT->eError = + PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID, + psFindProcessMemStatsIN->ui32ArrSize, + psFindProcessMemStatsIN-> + bbAllProcessStats, + pui32MemStatsArrayInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((pui32MemStatsArrayInt) && + ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psFindProcessMemStatsOUT-> + pui32MemStatsArray, pui32MemStatsArrayInt, + (psFindProcessMemStatsIN->ui32ArrSize * + sizeof(IMG_UINT32))) != PVRSRV_OK)) + { + psFindProcessMemStatsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto FindProcessMemStats_exit; + } + } + +FindProcessMemStats_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _AcquireInfoPagepsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVReleaseInfoPageKM((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAcquireInfoPageIN_UI8, + IMG_UINT8 * psAcquireInfoPageOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN = + (PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *) + IMG_OFFSET_ADDR(psAcquireInfoPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT = + (PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *) + IMG_OFFSET_ADDR(psAcquireInfoPageOUT_UI8, 0); + + PMR *psPMRInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN); + + psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) + { + goto AcquireInfoPage_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psAcquireInfoPageOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, &psAcquireInfoPageOUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AcquireInfoPagepsPMRIntRelease); + if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto AcquireInfoPage_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +AcquireInfoPage_exit: + + if (psAcquireInfoPageOUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + PVRSRVReleaseInfoPageKM(psPMRInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psReleaseInfoPageIN_UI8, + IMG_UINT8 * psReleaseInfoPageOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN = + (PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *) + IMG_OFFSET_ADDR(psReleaseInfoPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT = + (PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *) + IMG_OFFSET_ADDR(psReleaseInfoPageOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psReleaseInfoPageOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase-> + psHandleBase, + (IMG_HANDLE) psReleaseInfoPageIN-> + hPMR, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + if (unlikely + ((psReleaseInfoPageOUT->eError != PVRSRV_OK) + && (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psReleaseInfoPageOUT->eError))); + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto ReleaseInfoPage_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +ReleaseInfoPage_exit: + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSRVCOREBridge(void); +PVRSRV_ERROR DeinitSRVCOREBridge(void); + +/* + * Register all SRVCORE functions with services + */ +PVRSRV_ERROR InitSRVCOREBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_CONNECT, + PVRSRVBridgeConnect, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DISCONNECT, + PVRSRVBridgeDisconnect, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, + PVRSRVBridgeAcquireGlobalEventObject, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, + PVRSRVBridgeReleaseGlobalEventObject, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, + PVRSRVBridgeEventObjectOpen, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, + PVRSRVBridgeEventObjectWait, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, + PVRSRVBridgeEventObjectClose, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, + PVRSRVBridgeDumpDebugInfo, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, + PVRSRVBridgeGetDevClockSpeed, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, + PVRSRVBridgeHWOpTimeout, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, + PVRSRVBridgeAlignmentCheck, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, + PVRSRVBridgeGetDeviceStatus, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO, + PVRSRVBridgeGetMultiCoreInfo, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, + PVRSRVBridgeEventObjectWaitTimeout, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, + PVRSRVBridgeFindProcessMemStats, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, + PVRSRVBridgeAcquireInfoPage, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, + PVRSRVBridgeReleaseInfoPage, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all srvcore functions with services + */ +PVRSRV_ERROR DeinitSRVCOREBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_CONNECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DISCONNECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_bridge.h new file mode 100644 index 000000000000..0c3bf82923ba --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_bridge.h @@ -0,0 +1,142 @@ +/******************************************************************************* +@File +@Title Client bridge header for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_SYNC_BRIDGE_H +#define CLIENT_SYNC_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_sync_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + * + phSyncHandle, + IMG_UINT32 + * + pui32SyncPrimVAddr, + IMG_UINT32 + * + pui32SyncPrimBlockSize, + IMG_HANDLE + * + phhSyncPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + PDUMP_FLAGS_T + uiPDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge, + IMG_BOOL + bServerSync, + IMG_UINT32 + ui32FWAddr, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge, + IMG_UINT32 + ui32FWAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, PVRSRV_FENCE hFence); + +#endif /* CLIENT_SYNC_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_direct_bridge.c new file mode 100644 index 000000000000..36154b402948 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/client_sync_direct_bridge.c @@ -0,0 +1,314 @@ +/******************************************************************************* +@File +@Title Direct client bridge for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for sync + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_sync_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pdump.h" +#include "pdumpdefs.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" +#include + +#include "sync.h" +#include "sync_server.h" +#include "pdump.h" +#include "pvrsrv_sync_km.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + * + phSyncHandle, + IMG_UINT32 + * + pui32SyncPrimVAddr, + IMG_UINT32 + * + pui32SyncPrimBlockSize, + IMG_HANDLE + * + phhSyncPMR) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + PMR *pshSyncPMRInt = NULL; + + eError = + PVRSRVAllocSyncPrimitiveBlockKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + &psSyncHandleInt, + pui32SyncPrimVAddr, + pui32SyncPrimBlockSize, + &pshSyncPMRInt); + + *phSyncHandle = psSyncHandleInt; + *phhSyncPMR = pshSyncPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + PDUMP_FLAGS_T + uiPDumpFlags) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, + ui32Offset, + ui32Value, + ui32Mask, eOperator, uiPDumpFlags); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, + ui32Offset, + uiWriteOffset, uiPacketSize, uiBufferSize); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge, + IMG_BOOL + bServerSync, + IMG_UINT32 + ui32FWAddr, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVSyncAllocEventKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + bServerSync, ui32FWAddr, ui32ClassNameSize, + puiClassName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge, + IMG_UINT32 + ui32FWAddr) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVSyncFreeEventKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32FWAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, PVRSRV_FENCE hFence) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PVRSRVSyncCheckpointSignalledPDumpPolKM(hFence); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hFence); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/common_sync_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/common_sync_bridge.h new file mode 100644 index 000000000000..799a0ba1bce7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/common_sync_bridge.h @@ -0,0 +1,253 @@ +/******************************************************************************* +@File +@Title Common bridge header for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SYNC_BRIDGE_H +#define COMMON_SYNC_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pdump.h" +#include "pdumpdefs.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" +#include + +#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+3 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+4 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+5 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6 +#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+7 +#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+8 +#define PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+9 +#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+9) + +/******************************************* + AllocSyncPrimitiveBlock + *******************************************/ + +/* Bridge in structure for AllocSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK; + +/* Bridge out structure for AllocSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32SyncPrimVAddr; + IMG_UINT32 ui32SyncPrimBlockSize; + IMG_HANDLE hhSyncPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK; + +/******************************************* + FreeSyncPrimitiveBlock + *******************************************/ + +/* Bridge in structure for FreeSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG +{ + IMG_HANDLE hSyncHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK; + +/* Bridge out structure for FreeSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK; + +/******************************************* + SyncPrimSet + *******************************************/ + +/* Bridge in structure for SyncPrimSet */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Index; + IMG_UINT32 ui32Value; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSET; + +/* Bridge out structure for SyncPrimSet */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSET; + +/******************************************* + SyncPrimPDump + *******************************************/ + +/* Bridge in structure for SyncPrimPDump */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP; + +/* Bridge out structure for SyncPrimPDump */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP; + +/******************************************* + SyncPrimPDumpValue + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpValue */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE; + +/* Bridge out structure for SyncPrimPDumpValue */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE; + +/******************************************* + SyncPrimPDumpPol + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpPol */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + PDUMP_POLL_OPERATOR eOperator; + PDUMP_FLAGS_T uiPDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL; + +/* Bridge out structure for SyncPrimPDumpPol */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL; + +/******************************************* + SyncPrimPDumpCBP + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpCBP */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; + IMG_DEVMEM_OFFSET_T uiWriteOffset; + IMG_DEVMEM_SIZE_T uiPacketSize; + IMG_DEVMEM_SIZE_T uiBufferSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP; + +/* Bridge out structure for SyncPrimPDumpCBP */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP; + +/******************************************* + SyncAllocEvent + *******************************************/ + +/* Bridge in structure for SyncAllocEvent */ +typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG +{ + IMG_BOOL bServerSync; + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32ClassNameSize; + const IMG_CHAR *puiClassName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCALLOCEVENT; + +/* Bridge out structure for SyncAllocEvent */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT; + +/******************************************* + SyncFreeEvent + *******************************************/ + +/* Bridge in structure for SyncFreeEvent */ +typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG +{ + IMG_UINT32 ui32FWAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCFREEEVENT; + +/* Bridge out structure for SyncFreeEvent */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCFREEEVENT; + +/******************************************* + SyncCheckpointSignalledPDumpPol + *******************************************/ + +/* Bridge in structure for SyncCheckpointSignalledPDumpPol */ +typedef struct PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG +{ + PVRSRV_FENCE hFence; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; + +/* Bridge out structure for SyncCheckpointSignalledPDumpPol */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; + +#endif /* COMMON_SYNC_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/server_sync_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/server_sync_bridge.c new file mode 100644 index 000000000000..0db048d874be --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/sync_bridge/server_sync_bridge.c @@ -0,0 +1,818 @@ +/******************************************************************************* +@File +@Title Server bridge for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "sync.h" +#include "sync_server.h" +#include "pdump.h" +#include "pvrsrv_sync_km.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint.h" + +#include "common_sync_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _AllocSyncPrimitiveBlockpsSyncHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVFreeSyncPrimitiveBlockKM((SYNC_PRIMITIVE_BLOCK *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAllocSyncPrimitiveBlockIN_UI8, + IMG_UINT8 * + psAllocSyncPrimitiveBlockOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN = + (PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT + = + (PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockOUT_UI8, 0); + + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + PMR *pshSyncPMRInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN); + + psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL; + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocSyncPrimitiveBlockKM(psConnection, + OSGetDevNode(psConnection), + &psSyncHandleInt, + &psAllocSyncPrimitiveBlockOUT-> + ui32SyncPrimVAddr, + &psAllocSyncPrimitiveBlockOUT-> + ui32SyncPrimBlockSize, + &pshSyncPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + goto AllocSyncPrimitiveBlock_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psAllocSyncPrimitiveBlockOUT-> + hSyncHandle, (void *)psSyncHandleInt, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AllocSyncPrimitiveBlockpsSyncHandleIntRelease); + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AllocSyncPrimitiveBlock_exit; + } + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psAllocSyncPrimitiveBlockOUT-> + hhSyncPMR, (void *)pshSyncPMRInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psAllocSyncPrimitiveBlockOUT-> + hSyncHandle); + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AllocSyncPrimitiveBlock_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +AllocSyncPrimitiveBlock_exit: + + if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK) + { + if (psAllocSyncPrimitiveBlockOUT->hSyncHandle) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psAllocSyncPrimitiveBlockOUT-> + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psSyncHandleInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psSyncHandleInt) + { + PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psFreeSyncPrimitiveBlockIN_UI8, + IMG_UINT8 * psFreeSyncPrimitiveBlockOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN = + (PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockIN_UI8, 0); + PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT = + (PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psFreeSyncPrimitiveBlockOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psFreeSyncPrimitiveBlockIN-> + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + if (unlikely + ((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) + && (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto FreeSyncPrimitiveBlock_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +FreeSyncPrimitiveBlock_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimSetIN_UI8, + IMG_UINT8 * psSyncPrimSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMSET *) + IMG_OFFSET_ADDR(psSyncPrimSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMSET *) + IMG_OFFSET_ADDR(psSyncPrimSetOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimSet_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimSetOUT->eError = + PVRSRVSyncPrimSetKM(psSyncHandleInt, + psSyncPrimSetIN->ui32Index, + psSyncPrimSetIN->ui32Value); + +SyncPrimSet_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpIN_UI8, + IMG_UINT8 * psSyncPrimPDumpOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDump_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpOUT->eError = + PVRSRVSyncPrimPDumpKM(psSyncHandleInt, + psSyncPrimPDumpIN->ui32Offset); + +SyncPrimPDump_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDump NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpValueIN_UI8, + IMG_UINT8 * psSyncPrimPDumpValueOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *) + IMG_OFFSET_ADDR(psSyncPrimPDumpValueIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *) + IMG_OFFSET_ADDR(psSyncPrimPDumpValueOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpValueOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpValue_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpValueOUT->eError = + PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, + psSyncPrimPDumpValueIN->ui32Offset, + psSyncPrimPDumpValueIN->ui32Value); + +SyncPrimPDumpValue_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpValue NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpPolIN_UI8, + IMG_UINT8 * psSyncPrimPDumpPolOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncPrimPDumpPolIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncPrimPDumpPolOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpPolOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpPol_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpPolOUT->eError = + PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, + psSyncPrimPDumpPolIN->ui32Offset, + psSyncPrimPDumpPolIN->ui32Value, + psSyncPrimPDumpPolIN->ui32Mask, + psSyncPrimPDumpPolIN->eOperator, + psSyncPrimPDumpPolIN->uiPDumpFlags); + +SyncPrimPDumpPol_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpPol NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpCBPIN_UI8, + IMG_UINT8 * psSyncPrimPDumpCBPOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpCBPIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpCBPOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpCBPOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpCBP_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpCBPOUT->eError = + PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, + psSyncPrimPDumpCBPIN->ui32Offset, + psSyncPrimPDumpCBPIN->uiWriteOffset, + psSyncPrimPDumpCBPIN->uiPacketSize, + psSyncPrimPDumpCBPIN->uiBufferSize); + +SyncPrimPDumpCBP_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpCBP NULL +#endif + +static IMG_INT +PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncAllocEventIN_UI8, + IMG_UINT8 * psSyncAllocEventOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN = + (PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *) + IMG_OFFSET_ADDR(psSyncAllocEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT = + (PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *) + IMG_OFFSET_ADDR(psSyncAllocEventOUT_UI8, 0); + + IMG_CHAR *uiClassNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psSyncAllocEventIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) + { + psSyncAllocEventOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto SyncAllocEvent_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psSyncAllocEventIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psSyncAllocEventIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psSyncAllocEventOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto SyncAllocEvent_exit; + } + } + } + + if (psSyncAllocEventIN->ui32ClassNameSize != 0) + { + uiClassNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiClassNameInt, + (const void __user *)psSyncAllocEventIN->puiClassName, + psSyncAllocEventIN->ui32ClassNameSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psSyncAllocEventOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto SyncAllocEvent_exit; + } + ((IMG_CHAR *) + uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psSyncAllocEventOUT->eError = + PVRSRVSyncAllocEventKM(psConnection, OSGetDevNode(psConnection), + psSyncAllocEventIN->bServerSync, + psSyncAllocEventIN->ui32FWAddr, + psSyncAllocEventIN->ui32ClassNameSize, + uiClassNameInt); + +SyncAllocEvent_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncFreeEventIN_UI8, + IMG_UINT8 * psSyncFreeEventOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN = + (PVRSRV_BRIDGE_IN_SYNCFREEEVENT *) + IMG_OFFSET_ADDR(psSyncFreeEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT = + (PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *) + IMG_OFFSET_ADDR(psSyncFreeEventOUT_UI8, 0); + + psSyncFreeEventOUT->eError = + PVRSRVSyncFreeEventKM(psConnection, OSGetDevNode(psConnection), + psSyncFreeEventIN->ui32FWAddr); + + return 0; +} + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncCheckpointSignalledPDumpPol(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psSyncCheckpointSignalledPDumpPolIN_UI8, + IMG_UINT8 * + psSyncCheckpointSignalledPDumpPolOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL + *psSyncCheckpointSignalledPDumpPolIN = + (PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL + *psSyncCheckpointSignalledPDumpPolOUT = + (PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psSyncCheckpointSignalledPDumpPolOUT->eError = + PVRSRVSyncCheckpointSignalledPDumpPolKM + (psSyncCheckpointSignalledPDumpPolIN->hFence); + + return 0; +} + +#else +#define PVRSRVBridgeSyncCheckpointSignalledPDumpPol NULL +#endif + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSYNCBridge(void); +PVRSRV_ERROR DeinitSYNCBridge(void); + +/* + * Register all SYNC functions with services + */ +PVRSRV_ERROR InitSYNCBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, + PVRSRVBridgeAllocSyncPrimitiveBlock, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, + PVRSRVBridgeFreeSyncPrimitiveBlock, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, + PVRSRVBridgeSyncPrimSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, + PVRSRVBridgeSyncPrimPDump, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, + PVRSRVBridgeSyncPrimPDumpValue, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, + PVRSRVBridgeSyncPrimPDumpPol, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, + PVRSRVBridgeSyncPrimPDumpCBP, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, + PVRSRVBridgeSyncAllocEvent, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, + PVRSRVBridgeSyncFreeEvent, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL, + PVRSRVBridgeSyncCheckpointSignalledPDumpPol, + NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all sync functions with services + */ +PVRSRV_ERROR DeinitSYNCBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMSET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_bridge.h new file mode 100644 index 000000000000..d9316e7d4cb1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_bridge.h @@ -0,0 +1,78 @@ +/******************************************************************************* +@File +@Title Client bridge header for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_SYNCTRACKING_BRIDGE_H +#define CLIENT_SYNCTRACKING_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_synctracking_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hhRecord); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge, + IMG_HANDLE * + phhRecord, + IMG_HANDLE + hhServerSyncPrimBlock, + IMG_UINT32 + ui32ui32FwBlockAddr, + IMG_UINT32 + ui32ui32SyncOffset, + IMG_BOOL + bbServerSync, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName); + +#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c new file mode 100644 index 000000000000..1a1efdbb853e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c @@ -0,0 +1,103 @@ +/******************************************************************************* +@File +@Title Direct client bridge for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for synctracking + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_synctracking_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ + +#include "sync.h" +#include "sync_server.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hhRecord) +{ + PVRSRV_ERROR eError; + SYNC_RECORD_HANDLE pshRecordInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord; + + eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge, + IMG_HANDLE * + phhRecord, + IMG_HANDLE + hhServerSyncPrimBlock, + IMG_UINT32 + ui32ui32FwBlockAddr, + IMG_UINT32 + ui32ui32SyncOffset, + IMG_BOOL + bbServerSync, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName) +{ + PVRSRV_ERROR eError; + SYNC_RECORD_HANDLE pshRecordInt = NULL; + SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt; + + pshServerSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock; + + eError = + PVRSRVSyncRecordAddKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + &pshRecordInt, pshServerSyncPrimBlockInt, + ui32ui32FwBlockAddr, ui32ui32SyncOffset, + bbServerSync, ui32ClassNameSize, + puiClassName); + + *phhRecord = pshRecordInt; + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/common_synctracking_bridge.h b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/common_synctracking_bridge.h new file mode 100644 index 000000000000..340f73663cbd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/common_synctracking_bridge.h @@ -0,0 +1,96 @@ +/******************************************************************************* +@File +@Title Common bridge header for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SYNCTRACKING_BRIDGE_H +#define COMMON_SYNCTRACKING_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1) + +/******************************************* + SyncRecordRemoveByHandle + *******************************************/ + +/* Bridge in structure for SyncRecordRemoveByHandle */ +typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG +{ + IMG_HANDLE hhRecord; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE; + +/* Bridge out structure for SyncRecordRemoveByHandle */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE; + +/******************************************* + SyncRecordAdd + *******************************************/ + +/* Bridge in structure for SyncRecordAdd */ +typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG +{ + IMG_HANDLE hhServerSyncPrimBlock; + IMG_UINT32 ui32ui32FwBlockAddr; + IMG_UINT32 ui32ui32SyncOffset; + IMG_BOOL bbServerSync; + IMG_UINT32 ui32ClassNameSize; + const IMG_CHAR *puiClassName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDADD; + +/* Bridge out structure for SyncRecordAdd */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG +{ + IMG_HANDLE hhRecord; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDADD; + +#endif /* COMMON_SYNCTRACKING_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/server_synctracking_bridge.c b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/server_synctracking_bridge.c new file mode 100644 index 000000000000..c1591099bda7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/rogue/synctracking_bridge/server_synctracking_bridge.c @@ -0,0 +1,347 @@ +/******************************************************************************* +@File +@Title Server bridge for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "sync.h" +#include "sync_server.h" + +#include "common_synctracking_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psSyncRecordRemoveByHandleIN_UI8, + IMG_UINT8 * + psSyncRecordRemoveByHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN + = + (PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *) + IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE + *psSyncRecordRemoveByHandleOUT = + (PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *) + IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psSyncRecordRemoveByHandleOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psSyncRecordRemoveByHandleIN-> + hhRecord, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE); + if (unlikely + ((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) + && (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordRemoveByHandle_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +SyncRecordRemoveByHandle_exit: + + return 0; +} + +static PVRSRV_ERROR _SyncRecordAddpshRecordIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVSyncRecordRemoveByHandleKM((SYNC_RECORD_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncRecordAddIN_UI8, + IMG_UINT8 * psSyncRecordAddOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN = + (PVRSRV_BRIDGE_IN_SYNCRECORDADD *) + IMG_OFFSET_ADDR(psSyncRecordAddIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT = + (PVRSRV_BRIDGE_OUT_SYNCRECORDADD *) + IMG_OFFSET_ADDR(psSyncRecordAddOUT_UI8, 0); + + SYNC_RECORD_HANDLE pshRecordInt = NULL; + IMG_HANDLE hhServerSyncPrimBlock = + psSyncRecordAddIN->hhServerSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL; + IMG_CHAR *uiClassNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psSyncRecordAddIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) + { + psSyncRecordAddOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto SyncRecordAdd_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psSyncRecordAddIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psSyncRecordAddIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psSyncRecordAddOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto SyncRecordAdd_exit; + } + } + } + + if (psSyncRecordAddIN->ui32ClassNameSize != 0) + { + uiClassNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiClassNameInt, + (const void __user *)psSyncRecordAddIN->puiClassName, + psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psSyncRecordAddOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto SyncRecordAdd_exit; + } + ((IMG_CHAR *) + uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncRecordAddOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pshServerSyncPrimBlockInt, + hhServerSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordAdd_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncRecordAddOUT->eError = + PVRSRVSyncRecordAddKM(psConnection, OSGetDevNode(psConnection), + &pshRecordInt, + pshServerSyncPrimBlockInt, + psSyncRecordAddIN->ui32ui32FwBlockAddr, + psSyncRecordAddIN->ui32ui32SyncOffset, + psSyncRecordAddIN->bbServerSync, + psSyncRecordAddIN->ui32ClassNameSize, + uiClassNameInt); + /* Exit early if bridged call fails */ + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + goto SyncRecordAdd_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psSyncRecordAddOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psSyncRecordAddOUT->hhRecord, + (void *)pshRecordInt, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _SyncRecordAddpshRecordIntRelease); + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordAdd_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +SyncRecordAdd_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (pshServerSyncPrimBlockInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hhServerSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psSyncRecordAddOUT->eError != PVRSRV_OK) + { + if (pshRecordInt) + { + PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSYNCTRACKINGBridge(void); +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void); + +/* + * Register all SYNCTRACKING functions with services + */ +PVRSRV_ERROR InitSYNCTRACKINGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, + PVRSRVBridgeSyncRecordRemoveByHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, + PVRSRVBridgeSyncRecordAdd, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all synctracking functions with services + */ +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_bridge.h new file mode 100644 index 000000000000..e9c405198814 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_bridge.h @@ -0,0 +1,101 @@ +/******************************************************************************* +@File +@Title Client bridge header for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_CACHE_BRIDGE_H +#define CLIENT_CACHE_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_cache_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumCacheOps, + IMG_HANDLE * phPMR, + IMG_UINT64 * + pui64Address, + IMG_DEVMEM_OFFSET_T * + puiOffset, + IMG_DEVMEM_SIZE_T * + puiSize, + PVRSRV_CACHE_OP * + piuCacheOp, + IMG_UINT32 + ui32OpTimeline, + IMG_UINT32 + ui32CurrentFenceSeqNum, + IMG_UINT32 * + pui32NextFenceSeqNum); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + PVRSRV_CACHE_OP + iuCacheOp); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_INT64 + i64QueuedTimeUs, + IMG_INT64 + i64ExecuteTimeUs, + IMG_INT32 i32NumRBF, + PVRSRV_CACHE_OP + iuCacheOp); + +#endif /* CLIENT_CACHE_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_direct_bridge.c new file mode 100644 index 000000000000..d16b30f7918b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/client_cache_direct_bridge.c @@ -0,0 +1,143 @@ +/******************************************************************************* +@File +@Title Direct client bridge for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for cache + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_cache_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "cache_ops.h" + +#include "cache_km.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumCacheOps, + IMG_HANDLE * phPMR, + IMG_UINT64 * + pui64Address, + IMG_DEVMEM_OFFSET_T * + puiOffset, + IMG_DEVMEM_SIZE_T * + puiSize, + PVRSRV_CACHE_OP * + piuCacheOp, + IMG_UINT32 + ui32OpTimeline, + IMG_UINT32 + ui32CurrentFenceSeqNum, + IMG_UINT32 * + pui32NextFenceSeqNum) +{ + PVRSRV_ERROR eError; + PMR **psPMRInt; + + psPMRInt = (PMR **) phPMR; + + eError = + CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32NumCacheOps, + psPMRInt, + pui64Address, + puiOffset, + puiSize, + piuCacheOp, + ui32OpTimeline, + ui32CurrentFenceSeqNum, pui32NextFenceSeqNum); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + PVRSRV_CACHE_OP + iuCacheOp) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_INT64 + i64QueuedTimeUs, + IMG_INT64 + i64ExecuteTimeUs, + IMG_INT32 i32NumRBF, + PVRSRV_CACHE_OP + iuCacheOp) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + CacheOpLog(psPMRInt, + ui64Address, + uiOffset, + uiSize, + i64QueuedTimeUs, i64ExecuteTimeUs, i32NumRBF, iuCacheOp); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/common_cache_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/common_cache_bridge.h new file mode 100644 index 000000000000..8e608f177daf --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/common_cache_bridge.h @@ -0,0 +1,128 @@ +/******************************************************************************* +@File +@Title Common bridge header for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_CACHE_BRIDGE_H +#define COMMON_CACHE_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "cache_ops.h" + +#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0 +#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0 +#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1 +#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+2 +#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2) + +/******************************************* + CacheOpQueue + *******************************************/ + +/* Bridge in structure for CacheOpQueue */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG +{ + IMG_UINT32 ui32NumCacheOps; + IMG_HANDLE *phPMR; + IMG_UINT64 *pui64Address; + IMG_DEVMEM_OFFSET_T *puiOffset; + IMG_DEVMEM_SIZE_T *puiSize; + PVRSRV_CACHE_OP *piuCacheOp; + IMG_UINT32 ui32OpTimeline; + IMG_UINT32 ui32CurrentFenceSeqNum; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPQUEUE; + +/* Bridge out structure for CacheOpQueue */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG +{ + IMG_UINT32 ui32NextFenceSeqNum; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPQUEUE; + +/******************************************* + CacheOpExec + *******************************************/ + +/* Bridge in structure for CacheOpExec */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG +{ + IMG_HANDLE hPMR; + IMG_UINT64 ui64Address; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + PVRSRV_CACHE_OP iuCacheOp; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPEXEC; + +/* Bridge out structure for CacheOpExec */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPEXEC; + +/******************************************* + CacheOpLog + *******************************************/ + +/* Bridge in structure for CacheOpLog */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG +{ + IMG_HANDLE hPMR; + IMG_UINT64 ui64Address; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_INT64 i64QueuedTimeUs; + IMG_INT64 i64ExecuteTimeUs; + IMG_INT32 i32NumRBF; + PVRSRV_CACHE_OP iuCacheOp; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPLOG; + +/* Bridge out structure for CacheOpLog */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPLOG; + +#endif /* COMMON_CACHE_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/server_cache_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/server_cache_bridge.c new file mode 100644 index 000000000000..8f862aebbb1e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/cache_bridge/server_cache_bridge.c @@ -0,0 +1,503 @@ +/******************************************************************************* +@File +@Title Server bridge for cache +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "cache_km.h" + +#include "common_cache_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpQueueIN_UI8, + IMG_UINT8 * psCacheOpQueueOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN = + (PVRSRV_BRIDGE_IN_CACHEOPQUEUE *) + IMG_OFFSET_ADDR(psCacheOpQueueIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *) + IMG_OFFSET_ADDR(psCacheOpQueueOUT_UI8, 0); + + PMR **psPMRInt = NULL; + IMG_HANDLE *hPMRInt2 = NULL; + IMG_UINT64 *ui64AddressInt = NULL; + IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL; + IMG_DEVMEM_SIZE_T *uiSizeInt = NULL; + PVRSRV_CACHE_OP *iuCacheOpInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0; + + if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX)) + { + psCacheOpQueueOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto CacheOpQueue_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psCacheOpQueueIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psCacheOpQueueOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto CacheOpQueue_exit; + } + } + } + + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + psPMRInt = + (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *); + hPMRInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hPMRInt2, + (const void __user *)psCacheOpQueueIN->phPMR, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != + PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + ui64AddressInt = + (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0) + { + if (OSCopyFromUser + (NULL, ui64AddressInt, + (const void __user *)psCacheOpQueueIN->pui64Address, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != + PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + uiOffsetInt = + (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_OFFSET_T); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0) + { + if (OSCopyFromUser + (NULL, uiOffsetInt, + (const void __user *)psCacheOpQueueIN->puiOffset, + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + uiSizeInt = + (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_SIZE_T); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0) + { + if (OSCopyFromUser + (NULL, uiSizeInt, + (const void __user *)psCacheOpQueueIN->puiSize, + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + iuCacheOpInt = + (PVRSRV_CACHE_OP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0) + { + if (OSCopyFromUser + (NULL, iuCacheOpInt, + (const void __user *)psCacheOpQueueIN->piuCacheOp, + psCacheOpQueueIN->ui32NumCacheOps * + sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + { + IMG_UINT32 i; + + for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) + { + /* Look up the address from the handle */ + psCacheOpQueueOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **)&psPMRInt[i], + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpQueue_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpQueueOUT->eError = + CacheOpQueue(psConnection, OSGetDevNode(psConnection), + psCacheOpQueueIN->ui32NumCacheOps, + psPMRInt, + ui64AddressInt, + uiOffsetInt, + uiSizeInt, + iuCacheOpInt, + psCacheOpQueueIN->ui32OpTimeline, + psCacheOpQueueIN->ui32CurrentFenceSeqNum, + &psCacheOpQueueOUT->ui32NextFenceSeqNum); + +CacheOpQueue_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + if (hPMRInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) + { + + /* Unreference the previously looked up handle */ + if (hPMRInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpExecIN_UI8, + IMG_UINT8 * psCacheOpExecOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN = + (PVRSRV_BRIDGE_IN_CACHEOPEXEC *) + IMG_OFFSET_ADDR(psCacheOpExecIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPEXEC *) + IMG_OFFSET_ADDR(psCacheOpExecOUT_UI8, 0); + + IMG_HANDLE hPMR = psCacheOpExecIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psCacheOpExecOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpExec_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpExecOUT->eError = + CacheOpValExec(psPMRInt, + psCacheOpExecIN->ui64Address, + psCacheOpExecIN->uiOffset, + psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp); + +CacheOpExec_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpLogIN_UI8, + IMG_UINT8 * psCacheOpLogOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN = + (PVRSRV_BRIDGE_IN_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPLOG *) + IMG_OFFSET_ADDR(psCacheOpLogOUT_UI8, 0); + + IMG_HANDLE hPMR = psCacheOpLogIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psCacheOpLogOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpLog_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpLogOUT->eError = + CacheOpLog(psPMRInt, + psCacheOpLogIN->ui64Address, + psCacheOpLogIN->uiOffset, + psCacheOpLogIN->uiSize, + psCacheOpLogIN->i64QueuedTimeUs, + psCacheOpLogIN->i64ExecuteTimeUs, + psCacheOpLogIN->i32NumRBF, psCacheOpLogIN->iuCacheOp); + +CacheOpLog_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitCACHEBridge(void); +PVRSRV_ERROR DeinitCACHEBridge(void); + +/* + * Register all CACHE functions with services + */ +PVRSRV_ERROR InitCACHEBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, + PVRSRVBridgeCacheOpQueue, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, + PVRSRVBridgeCacheOpExec, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPLOG, + PVRSRVBridgeCacheOpLog, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all cache functions with services + */ +PVRSRV_ERROR DeinitCACHEBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPEXEC); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, + PVRSRV_BRIDGE_CACHE_CACHEOPLOG); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/common_cmm_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/common_cmm_bridge.h new file mode 100644 index 000000000000..52bdef547897 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/common_cmm_bridge.h @@ -0,0 +1,113 @@ +/******************************************************************************* +@File +@Title Common bridge header for cmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for cmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_CMM_BRIDGE_H +#define COMMON_CMM_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2) + +/******************************************* + DevmemIntExportCtx + *******************************************/ + +/* Bridge in structure for DevmemIntExportCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG +{ + IMG_HANDLE hContext; + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX; + +/* Bridge out structure for DevmemIntExportCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG +{ + IMG_HANDLE hContextExport; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX; + +/******************************************* + DevmemIntUnexportCtx + *******************************************/ + +/* Bridge in structure for DevmemIntUnexportCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG +{ + IMG_HANDLE hContextExport; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX; + +/* Bridge out structure for DevmemIntUnexportCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX; + +/******************************************* + DevmemIntAcquireRemoteCtx + *******************************************/ + +/* Bridge in structure for DevmemIntAcquireRemoteCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX; + +/* Bridge out structure for DevmemIntAcquireRemoteCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG +{ + IMG_HANDLE hContext; + IMG_HANDLE hPrivData; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX; + +#endif /* COMMON_CMM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/server_cmm_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/server_cmm_bridge.c new file mode 100644 index 000000000000..0e5a081ea135 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/cmm_bridge/server_cmm_bridge.c @@ -0,0 +1,445 @@ +/******************************************************************************* +@File +@Title Server bridge for cmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for cmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "pmr.h" +#include "devicemem_server.h" + +#include "common_cmm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_CMM_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _DevmemIntExportCtxpsContextExportIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnexportCtx((DEVMEMINT_CTX_EXPORT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntExportCtxIN_UI8, + IMG_UINT8 * psDevmemIntExportCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntExportCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntExportCtxOUT_UI8, 0); + + IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext; + DEVMEMINT_CTX *psContextInt = NULL; + IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntExportCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psContextInt, + hContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + + /* Look up the address from the handle */ + psDevmemIntExportCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntExportCtxOUT->eError = + DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + goto DevmemIntExportCtx_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntExportCtxOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntExportCtxOUT->hContextExport, + (void *)psContextExportInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _DevmemIntExportCtxpsContextExportIntRelease); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntExportCtx_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK) + { + if (psContextExportInt) + { + LockHandle(KERNEL_HANDLE_BASE); + DevmemIntUnexportCtx(psContextExportInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnexportCtxIN_UI8, + IMG_UINT8 * psDevmemIntUnexportCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntUnexportCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntUnexportCtxOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnexportCtxOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntUnexportCtxIN-> + hContextExport, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT); + if (unlikely + ((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) + && (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnexportCtx_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnexportCtx_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntAcquireRemoteCtxpsContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntAcquireRemoteCtxIN_UI8, + IMG_UINT8 * + psDevmemIntAcquireRemoteCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX + *psDevmemIntAcquireRemoteCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *) + IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX + *psDevmemIntAcquireRemoteCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *) + IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_CTX *psContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + psDevmemIntAcquireRemoteCtxOUT->hContext = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntAcquireRemoteCtxOUT->eError = + DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + goto DevmemIntAcquireRemoteCtx_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntAcquireRemoteCtxOUT->hContext, + (void *)psContextInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _DevmemIntAcquireRemoteCtxpsContextIntRelease); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntAcquireRemoteCtxOUT-> + hPrivData, (void *)hPrivDataInt, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psDevmemIntAcquireRemoteCtxOUT-> + hContext); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntAcquireRemoteCtx_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK) + { + if (psDevmemIntAcquireRemoteCtxOUT->hContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psDevmemIntAcquireRemoteCtxOUT-> + hContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psContextInt) + { + DevmemIntCtxDestroy(psContextInt); + } + } + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_CMM_BRIDGE */ + +#if !defined(EXCLUDE_CMM_BRIDGE) +PVRSRV_ERROR InitCMMBridge(void); +PVRSRV_ERROR DeinitCMMBridge(void); + +/* + * Register all CMM functions with services + */ +PVRSRV_ERROR InitCMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, + PVRSRVBridgeDevmemIntExportCtx, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, + PVRSRVBridgeDevmemIntUnexportCtx, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, + PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all cmm functions with services + */ +PVRSRV_ERROR DeinitCMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, + PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX); + + return PVRSRV_OK; +} +#else /* EXCLUDE_CMM_BRIDGE */ +/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitCMMBridge() \ + PVRSRV_OK + +#define DeinitCMMBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_CMM_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_bridge.h new file mode 100644 index 000000000000..4fc6ec1855a2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_bridge.h @@ -0,0 +1,132 @@ +/******************************************************************************* +@File +@Title Client bridge header for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H +#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_devicememhistory_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const IMG_CHAR + * puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const + IMG_CHAR * + puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_direct_bridge.c new file mode 100644 index 000000000000..33336f46f61d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/client_devicememhistory_direct_bridge.c @@ -0,0 +1,221 @@ +/******************************************************************************* +@File +@Title Direct client bridge for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for devicememhistory + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_devicememhistory_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" + +#include "devicemem_history_server.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const IMG_CHAR + * puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistoryMapKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_SIZE_T + uiOffset, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_DEVMEM_SIZE_T + uiSize, + const + IMG_CHAR * + puiText, + IMG_UINT32 + ui32Log2PageSize, + IMG_UINT32 + ui32AllocationIndex, + IMG_UINT32 * + pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistoryUnmapKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + DevicememHistoryMapVRangeKM(sBaseDevVAddr, + ui32ui32StartPage, + ui32NumPages, + uiAllocSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + DevicememHistoryUnmapVRangeKM(sBaseDevVAddr, + ui32ui32StartPage, + ui32NumPages, + uiAllocSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistorySparseChangeKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, + ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, + ui32AllocationIndex, + pui32AllocationIndexOut); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/common_devicememhistory_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/common_devicememhistory_bridge.h new file mode 100644 index 000000000000..da7af1bb74d7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/common_devicememhistory_bridge.h @@ -0,0 +1,184 @@ +/******************************************************************************* +@File +@Title Common bridge header for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H +#define COMMON_DEVICEMEMHISTORY_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4) + +/******************************************* + DevicememHistoryMap + *******************************************/ + +/* Bridge in structure for DevicememHistoryMap */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP; + +/* Bridge out structure for DevicememHistoryMap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP; + +/******************************************* + DevicememHistoryUnmap + *******************************************/ + +/* Bridge in structure for DevicememHistoryUnmap */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP; + +/* Bridge out structure for DevicememHistoryUnmap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP; + +/******************************************* + DevicememHistoryMapVRange + *******************************************/ + +/* Bridge in structure for DevicememHistoryMapVRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG +{ + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_UINT32 ui32ui32StartPage; + IMG_UINT32 ui32NumPages; + IMG_DEVMEM_SIZE_T uiAllocSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE; + +/* Bridge out structure for DevicememHistoryMapVRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE; + +/******************************************* + DevicememHistoryUnmapVRange + *******************************************/ + +/* Bridge in structure for DevicememHistoryUnmapVRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG +{ + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_UINT32 ui32ui32StartPage; + IMG_UINT32 ui32NumPages; + IMG_DEVMEM_SIZE_T uiAllocSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE; + +/* Bridge out structure for DevicememHistoryUnmapVRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE; + +/******************************************* + DevicememHistorySparseChange + *******************************************/ + +/* Bridge in structure for DevicememHistorySparseChange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32AllocPageCount; + IMG_UINT32 *pui32AllocPageIndices; + IMG_UINT32 ui32FreePageCount; + IMG_UINT32 *pui32FreePageIndices; + IMG_UINT32 ui32AllocationIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE; + +/* Bridge out structure for DevicememHistorySparseChange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG +{ + IMG_UINT32 ui32AllocationIndexOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE; + +#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c new file mode 100644 index 000000000000..98d4a7c388e4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c @@ -0,0 +1,885 @@ +/******************************************************************************* +@File +@Title Server bridge for devicememhistory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_history_server.h" + +#include "common_devicememhistory_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryMapIN_UI8, + IMG_UINT8 * psDevicememHistoryMapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryMapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryMapOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistoryMapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryMapOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryMap_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryMapIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryMapOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryMap_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistoryMapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistoryMap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistoryMapOUT->eError = + DevicememHistoryMapKM(psPMRInt, + psDevicememHistoryMapIN->uiOffset, + psDevicememHistoryMapIN->sDevVAddr, + psDevicememHistoryMapIN->uiSize, + uiTextInt, + psDevicememHistoryMapIN->ui32Log2PageSize, + psDevicememHistoryMapIN->ui32AllocationIndex, + &psDevicememHistoryMapOUT-> + ui32AllocationIndexOut); + +DevicememHistoryMap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryUnmapIN_UI8, + IMG_UINT8 * psDevicememHistoryUnmapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistoryUnmapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryUnmapOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryUnmap_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryUnmapIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryUnmapOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryUnmap_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistoryUnmapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistoryUnmap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistoryUnmapOUT->eError = + DevicememHistoryUnmapKM(psPMRInt, + psDevicememHistoryUnmapIN->uiOffset, + psDevicememHistoryUnmapIN->sDevVAddr, + psDevicememHistoryUnmapIN->uiSize, + uiTextInt, + psDevicememHistoryUnmapIN->ui32Log2PageSize, + psDevicememHistoryUnmapIN-> + ui32AllocationIndex, + &psDevicememHistoryUnmapOUT-> + ui32AllocationIndexOut); + +DevicememHistoryUnmap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevicememHistoryMapVRangeIN_UI8, + IMG_UINT8 * + psDevicememHistoryMapVRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE + *psDevicememHistoryMapVRangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE + *psDevicememHistoryMapVRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeOUT_UI8, 0); + + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistoryMapVRangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryMapVRangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryMapVRange_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryMapVRangeIN-> + puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryMapVRangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryMapVRange_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psDevicememHistoryMapVRangeOUT->eError = + DevicememHistoryMapVRangeKM(psDevicememHistoryMapVRangeIN-> + sBaseDevVAddr, + psDevicememHistoryMapVRangeIN-> + ui32ui32StartPage, + psDevicememHistoryMapVRangeIN-> + ui32NumPages, + psDevicememHistoryMapVRangeIN-> + uiAllocSize, uiTextInt, + psDevicememHistoryMapVRangeIN-> + ui32Log2PageSize, + psDevicememHistoryMapVRangeIN-> + ui32AllocationIndex, + &psDevicememHistoryMapVRangeOUT-> + ui32AllocationIndexOut); + +DevicememHistoryMapVRange_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevicememHistoryUnmapVRangeIN_UI8, + IMG_UINT8 * + psDevicememHistoryUnmapVRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE + *psDevicememHistoryUnmapVRangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE + *psDevicememHistoryUnmapVRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeOUT_UI8, 0); + + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psDevicememHistoryUnmapVRangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryUnmapVRangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryUnmapVRange_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistoryUnmapVRangeIN-> + puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryUnmapVRangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryUnmapVRange_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psDevicememHistoryUnmapVRangeOUT->eError = + DevicememHistoryUnmapVRangeKM(psDevicememHistoryUnmapVRangeIN-> + sBaseDevVAddr, + psDevicememHistoryUnmapVRangeIN-> + ui32ui32StartPage, + psDevicememHistoryUnmapVRangeIN-> + ui32NumPages, + psDevicememHistoryUnmapVRangeIN-> + uiAllocSize, uiTextInt, + psDevicememHistoryUnmapVRangeIN-> + ui32Log2PageSize, + psDevicememHistoryUnmapVRangeIN-> + ui32AllocationIndex, + &psDevicememHistoryUnmapVRangeOUT-> + ui32AllocationIndexOut); + +DevicememHistoryUnmapVRange_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevicememHistorySparseChangeIN_UI8, + IMG_UINT8 * + psDevicememHistorySparseChangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE + *psDevicememHistorySparseChangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *) + IMG_OFFSET_ADDR(psDevicememHistorySparseChangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE + *psDevicememHistorySparseChangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *) + IMG_OFFSET_ADDR(psDevicememHistorySparseChangeOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + + (psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32)) + + (psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psDevicememHistorySparseChangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistorySparseChange_exit; + } + } + } + + { + uiTextInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistorySparseChangeIN-> + puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + ((IMG_CHAR *) + uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0) + { + ui32AllocPageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AllocPageIndicesInt, + (const void __user *)psDevicememHistorySparseChangeIN-> + pui32AllocPageIndices, + psDevicememHistorySparseChangeIN->ui32AllocPageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + } + if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0) + { + ui32FreePageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FreePageIndicesInt, + (const void __user *)psDevicememHistorySparseChangeIN-> + pui32FreePageIndices, + psDevicememHistorySparseChangeIN->ui32FreePageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistorySparseChangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistorySparseChange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistorySparseChangeOUT->eError = + DevicememHistorySparseChangeKM(psPMRInt, + psDevicememHistorySparseChangeIN-> + uiOffset, + psDevicememHistorySparseChangeIN-> + sDevVAddr, + psDevicememHistorySparseChangeIN-> + uiSize, uiTextInt, + psDevicememHistorySparseChangeIN-> + ui32Log2PageSize, + psDevicememHistorySparseChangeIN-> + ui32AllocPageCount, + ui32AllocPageIndicesInt, + psDevicememHistorySparseChangeIN-> + ui32FreePageCount, + ui32FreePageIndicesInt, + psDevicememHistorySparseChangeIN-> + ui32AllocationIndex, + &psDevicememHistorySparseChangeOUT-> + ui32AllocationIndexOut); + +DevicememHistorySparseChange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pDEVICEMEMHISTORYBridgeLock; + +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void); + +/* + * Register all DEVICEMEMHISTORY functions with services + */ +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock), + "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, + PVRSRVBridgeDevicememHistoryMap, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, + PVRSRVBridgeDevicememHistoryUnmap, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, + PVRSRVBridgeDevicememHistoryMapVRange, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, + PVRSRVBridgeDevicememHistoryUnmapVRange, + pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, + PVRSRVBridgeDevicememHistorySparseChange, + pDEVICEMEMHISTORYBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all devicememhistory functions with services + */ +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock), + "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h new file mode 100644 index 000000000000..9c7e8254bde8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h @@ -0,0 +1,126 @@ +/******************************************************************************* +@File +@Title Common bridge header for dmabuf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for dmabuf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DMABUF_BRIDGE_H +#define COMMON_DMABUF_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_memallocflags.h" + +#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2) + +/******************************************* + PhysmemImportDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemImportDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG +{ + IMG_INT ifd; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_UINT32 ui32NameSize; + const IMG_CHAR *puiName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF; + +/* Bridge out structure for PhysmemImportDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG +{ + IMG_HANDLE hPMRPtr; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T sAlign; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF; + +/******************************************* + PhysmemExportDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemExportDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF; + +/* Bridge out structure for PhysmemExportDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG +{ + IMG_INT iFd; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF; + +/******************************************* + PhysmemImportSparseDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemImportSparseDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG +{ + IMG_INT ifd; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 *pui32MappingTable; + IMG_UINT32 ui32NameSize; + const IMG_CHAR *puiName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF; + +/* Bridge out structure for PhysmemImportSparseDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG +{ + IMG_HANDLE hPMRPtr; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T sAlign; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF; + +#endif /* COMMON_DMABUF_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c new file mode 100644 index 000000000000..ea6e460ba056 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c @@ -0,0 +1,545 @@ +/******************************************************************************* +@File +@Title Server bridge for dmabuf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for dmabuf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "physmem_dmabuf.h" +#include "pmr.h" + +#include "common_dmabuf_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _PhysmemImportDmaBufpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemImportDmaBufIN_UI8, + IMG_UINT8 * psPhysmemImportDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportDmaBufIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportDmaBufOUT_UI8, 0); + + IMG_CHAR *uiNameInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemImportDmaBufOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportDmaBuf_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPhysmemImportDmaBufIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemImportDmaBufOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemImportDmaBuf_exit; + } + } + } + + if (psPhysmemImportDmaBufIN->ui32NameSize != 0) + { + uiNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, + (const void __user *)psPhysmemImportDmaBufIN->puiName, + psPhysmemImportDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemImportDmaBufOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportDmaBuf_exit; + } + ((IMG_CHAR *) + uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPhysmemImportDmaBufOUT->eError = + PhysmemImportDmaBuf(psConnection, OSGetDevNode(psConnection), + psPhysmemImportDmaBufIN->ifd, + psPhysmemImportDmaBufIN->uiFlags, + psPhysmemImportDmaBufIN->ui32NameSize, + uiNameInt, + &psPMRPtrInt, + &psPhysmemImportDmaBufOUT->uiSize, + &psPhysmemImportDmaBufOUT->sAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) + { + goto PhysmemImportDmaBuf_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemImportDmaBufOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemImportDmaBufOUT->hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemImportDmaBufpsPMRPtrIntRelease); + if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemImportDmaBuf_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemImportDmaBuf_exit: + + if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemExportDmaBufIN_UI8, + IMG_UINT8 * psPhysmemExportDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemExportDmaBufIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *) + IMG_OFFSET_ADDR(psPhysmemExportDmaBufOUT_UI8, 0); + + IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPhysmemExportDmaBufOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemExportDmaBuf_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPhysmemExportDmaBufOUT->eError = + PhysmemExportDmaBuf(psConnection, OSGetDevNode(psConnection), + psPMRInt, &psPhysmemExportDmaBufOUT->iFd); + +PhysmemExportDmaBuf_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPhysmemImportSparseDmaBufIN_UI8, + IMG_UINT8 * + psPhysmemImportSparseDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF + *psPhysmemImportSparseDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF + *psPhysmemImportSparseDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufOUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiNameInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) + + (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + + 0; + + if (unlikely + (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportSparseDmaBuf_exit; + } + + if (unlikely + (psPhysmemImportSparseDmaBufIN->ui32NameSize > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportSparseDmaBuf_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPhysmemImportSparseDmaBufIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemImportSparseDmaBuf_exit; + } + } + } + + if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemImportSparseDmaBufIN-> + pui32MappingTable, + psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportSparseDmaBuf_exit; + } + } + if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0) + { + uiNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemImportSparseDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, + (const void __user *)psPhysmemImportSparseDmaBufIN-> + puiName, + psPhysmemImportSparseDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportSparseDmaBuf_exit; + } + ((IMG_CHAR *) + uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPhysmemImportSparseDmaBufOUT->eError = + PhysmemImportSparseDmaBuf(psConnection, OSGetDevNode(psConnection), + psPhysmemImportSparseDmaBufIN->ifd, + psPhysmemImportSparseDmaBufIN->uiFlags, + psPhysmemImportSparseDmaBufIN-> + uiChunkSize, + psPhysmemImportSparseDmaBufIN-> + ui32NumPhysChunks, + psPhysmemImportSparseDmaBufIN-> + ui32NumVirtChunks, ui32MappingTableInt, + psPhysmemImportSparseDmaBufIN-> + ui32NameSize, uiNameInt, &psPMRPtrInt, + &psPhysmemImportSparseDmaBufOUT->uiSize, + &psPhysmemImportSparseDmaBufOUT->sAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) + { + goto PhysmemImportSparseDmaBuf_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemImportSparseDmaBufOUT->hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemImportSparseDmaBufpsPMRPtrIntRelease); + if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemImportSparseDmaBuf_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemImportSparseDmaBuf_exit: + + if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitDMABUFBridge(void); +PVRSRV_ERROR DeinitDMABUFBridge(void); + +/* + * Register all DMABUF functions with services + */ +PVRSRV_ERROR InitDMABUFBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, + PVRSRVBridgePhysmemImportDmaBuf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, + PVRSRVBridgePhysmemExportDmaBuf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, + PVRSRVBridgePhysmemImportSparseDmaBuf, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all dmabuf functions with services + */ +PVRSRV_ERROR DeinitDMABUFBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h new file mode 100644 index 000000000000..c32b7591bb14 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h @@ -0,0 +1,75 @@ +/******************************************************************************* +@File +@Title Client bridge header for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_HTBUFFER_BRIDGE_H +#define CLIENT_HTBUFFER_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_htbuffer_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumGroups, + IMG_UINT32 * + pui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 + ui32EnablePID, + IMG_UINT32 ui32LogMode, + IMG_UINT32 ui32OpMode); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge, + IMG_UINT32 ui32PID, + IMG_UINT64 ui64TimeStamp, + IMG_UINT32 ui32SF, + IMG_UINT32 ui32NumArgs, + IMG_UINT32 * pui32Args); + +#endif /* CLIENT_HTBUFFER_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c new file mode 100644 index 000000000000..699da1f02468 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c @@ -0,0 +1,91 @@ +/******************************************************************************* +@File +@Title Direct client bridge for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for htbuffer + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_htbuffer_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "htbuffer_types.h" + +#include "htbserver.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge, + IMG_UINT32 + ui32NumGroups, + IMG_UINT32 * + pui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 + ui32EnablePID, + IMG_UINT32 ui32LogMode, + IMG_UINT32 ui32OpMode) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + HTBControlKM(ui32NumGroups, + pui32GroupEnable, + ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge, + IMG_UINT32 ui32PID, + IMG_UINT64 ui64TimeStamp, + IMG_UINT32 ui32SF, + IMG_UINT32 ui32NumArgs, + IMG_UINT32 * pui32Args) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + HTBLogKM(ui32PID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h new file mode 100644 index 000000000000..a936da34e0a1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h @@ -0,0 +1,102 @@ +/******************************************************************************* +@File +@Title Common bridge header for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_HTBUFFER_BRIDGE_H +#define COMMON_HTBUFFER_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "htbuffer_types.h" + +#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0 +#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0 +#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1 +#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1) + +/******************************************* + HTBControl + *******************************************/ + +/* Bridge in structure for HTBControl */ +typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG +{ + IMG_UINT32 ui32NumGroups; + IMG_UINT32 *pui32GroupEnable; + IMG_UINT32 ui32LogLevel; + IMG_UINT32 ui32EnablePID; + IMG_UINT32 ui32LogMode; + IMG_UINT32 ui32OpMode; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HTBCONTROL; + +/* Bridge out structure for HTBControl */ +typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HTBCONTROL; + +/******************************************* + HTBLog + *******************************************/ + +/* Bridge in structure for HTBLog */ +typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG +{ + IMG_UINT32 ui32PID; + IMG_UINT64 ui64TimeStamp; + IMG_UINT32 ui32SF; + IMG_UINT32 ui32NumArgs; + IMG_UINT32 *pui32Args; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HTBLOG; + +/* Bridge out structure for HTBLog */ +typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HTBLOG; + +#endif /* COMMON_HTBUFFER_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c new file mode 100644 index 000000000000..451f11e8c696 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c @@ -0,0 +1,346 @@ +/******************************************************************************* +@File +@Title Server bridge for htbuffer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "htbserver.h" + +#include "common_htbuffer_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHTBControlIN_UI8, + IMG_UINT8 * psHTBControlOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN = + (PVRSRV_BRIDGE_IN_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT = + (PVRSRV_BRIDGE_OUT_HTBCONTROL *) + IMG_OFFSET_ADDR(psHTBControlOUT_UI8, 0); + + IMG_UINT32 *ui32GroupEnableInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL)) + { + psHTBControlOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HTBControl_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHTBControlIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHTBControlOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HTBControl_exit; + } + } + } + + if (psHTBControlIN->ui32NumGroups != 0) + { + ui32GroupEnableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32GroupEnableInt, + (const void __user *)psHTBControlIN->pui32GroupEnable, + psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HTBControl_exit; + } + } + + psHTBControlOUT->eError = + HTBControlKM(psHTBControlIN->ui32NumGroups, + ui32GroupEnableInt, + psHTBControlIN->ui32LogLevel, + psHTBControlIN->ui32EnablePID, + psHTBControlIN->ui32LogMode, + psHTBControlIN->ui32OpMode); + +HTBControl_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHTBLogIN_UI8, + IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN = + (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT = + (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0); + + IMG_UINT32 *ui32ArgsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS)) + { + psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HTBLog_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHTBLogIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHTBLogOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HTBLog_exit; + } + } + } + + if (psHTBLogIN->ui32NumArgs != 0) + { + ui32ArgsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ArgsInt, + (const void __user *)psHTBLogIN->pui32Args, + psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HTBLog_exit; + } + } + + psHTBLogOUT->eError = + HTBLogKM(psHTBLogIN->ui32PID, + psHTBLogIN->ui64TimeStamp, + psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt); + +HTBLog_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pHTBUFFERBridgeLock; + +#endif /* EXCLUDE_HTBUFFER_BRIDGE */ + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +PVRSRV_ERROR InitHTBUFFERBridge(void); +PVRSRV_ERROR DeinitHTBUFFERBridge(void); + +/* + * Register all HTBUFFER functions with services + */ +PVRSRV_ERROR InitHTBUFFERBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), + "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, + PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBLOG, PVRSRVBridgeHTBLog, + pHTBUFFERBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all htbuffer functions with services + */ +PVRSRV_ERROR DeinitHTBUFFERBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pHTBUFFERBridgeLock), + "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, + PVRSRV_BRIDGE_HTBUFFER_HTBLOG); + + return PVRSRV_OK; +} +#else /* EXCLUDE_HTBUFFER_BRIDGE */ +/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitHTBUFFERBridge() \ + PVRSRV_OK + +#define DeinitHTBUFFERBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_HTBUFFER_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_bridge.h new file mode 100644 index 000000000000..b3315bbf45f4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_bridge.h @@ -0,0 +1,377 @@ +/******************************************************************************* +@File +@Title Client bridge header for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_MM_BRIDGE_H +#define CLIENT_MM_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_mm_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE * + phPMRExport, + IMG_UINT64 * + pui64Size, + IMG_UINT32 * + pui32Log2Contig, + IMG_UINT64 * + pui64Password); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRExport); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 * pui64UID); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hBuffer, + IMG_HANDLE + * + phExtMem); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMRExport, + IMG_UINT64 + ui64uiPassword, + IMG_UINT64 ui64uiSize, + IMG_UINT32 + ui32uiLog2Contig, + IMG_HANDLE * phPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hExtHandle, + IMG_HANDLE * + phPMR, + IMG_DEVMEM_SIZE_T + * puiSize, + IMG_DEVMEM_ALIGN_T + * psAlign); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_DEVMEM_SIZE_T + uiChunkSize, + IMG_UINT32 + ui32NumPhysChunks, + IMG_UINT32 + ui32NumVirtChunks, + IMG_UINT32 * + pui32MappingTable, + IMG_UINT32 + ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_UINT32 + ui32AnnotationLength, + const + IMG_CHAR * + puiAnnotation, + IMG_PID + ui32PID, + IMG_HANDLE * + phPMRPtr, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE + hBridge, + IMG_BOOL + bbKernelMemoryCtx, + IMG_HANDLE * + phDevMemServerContext, + IMG_HANDLE * + phPrivData, + IMG_UINT32 * + pui32CPUCacheLineSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerContext); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sHeapBaseAddr, + IMG_DEVMEM_SIZE_T + uiHeapLength, + IMG_UINT32 + ui32Log2DataPageSize, + IMG_HANDLE * + phDevmemHeapPtr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemHeap); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T + uiMapFlags, + IMG_HANDLE * + phMapping); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiLength, + IMG_HANDLE * + phReservation); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_HANDLE + hSrvDevMemHeap, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32AllocPageCount, + IMG_UINT32 * + pui32AllocPageIndices, + IMG_UINT32 + ui32FreePageCount, + IMG_UINT32 * + pui32FreePageIndices, + IMG_UINT32 + ui32SparseFlags, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT64 + ui64CPUVAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32PageCount, + IMG_UINT32 + ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT32 + ui32PageCount); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemFlushDevSLCRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_BOOL + bInvalidate); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT64 ui64FBSCEntries); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32NumHeapConfigs); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 * + pui32NumHeaps); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapConfigNameBufSz, + IMG_CHAR * + puiHeapConfigName); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapIndex, + IMG_UINT32 + ui32HeapNameBufSz, + IMG_CHAR * + puiHeapNameOut, + IMG_DEV_VIRTADDR + * + psDevVAddrBase, + IMG_DEVMEM_SIZE_T + * puiHeapLength, + IMG_DEVMEM_SIZE_T + * + puiReservedRegionLength, + IMG_UINT32 * + pui32Log2DataPageSizeOut, + IMG_UINT32 * + pui32Log2ImportAlignmentOut); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT32 ui32PID, IMG_BOOL bRegister); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + * puiLMASize, + IMG_DEVMEM_SIZE_T + * puiUMASize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + * + psFaultAddress); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVUpdateOOMStats(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32ui32StatType, + IMG_PID + ui32pid); + +#endif /* CLIENT_MM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_direct_bridge.c new file mode 100644 index 000000000000..32affebeaa7a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/client_mm_direct_bridge.c @@ -0,0 +1,887 @@ +/******************************************************************************* +@File +@Title Direct client bridge for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for mm + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_mm_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#include "devicemem.h" +#include "devicemem_server.h" +#include "pmr.h" +#include "devicemem_heapcfg.h" +#include "physmem.h" +#include "devicemem_utils.h" +#include "process_stats.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE * + phPMRExport, + IMG_UINT64 * + pui64Size, + IMG_UINT32 * + pui32Log2Contig, + IMG_UINT64 * + pui64Password) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PMR_EXPORT *psPMRExportInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRExportPMR(psPMRInt, + &psPMRExportInt, + pui64Size, pui32Log2Contig, pui64Password); + + *phPMRExport = psPMRExportInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRExport) +{ + PVRSRV_ERROR eError; + PMR_EXPORT *psPMRExportInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRExportInt = (PMR_EXPORT *) hPMRExport; + + eError = PMRUnexportPMR(psPMRExportInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 * pui64UID) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRGetUID(psPMRInt, pui64UID); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hBuffer, + IMG_HANDLE + * + phExtMem) +{ + PVRSRV_ERROR eError; + PMR *psBufferInt; + PMR *psExtMemInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psBufferInt = (PMR *) hBuffer; + + eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); + + *phExtMem = psExtMemInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem) +{ + PVRSRV_ERROR eError; + PMR *psExtMemInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psExtMemInt = (PMR *) hExtMem; + + eError = PMRUnmakeLocalImportHandle(psExtMemInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMRExport, + IMG_UINT64 + ui64uiPassword, + IMG_UINT64 ui64uiSize, + IMG_UINT32 + ui32uiLog2Contig, + IMG_HANDLE * phPMR) +{ + PVRSRV_ERROR eError; + PMR_EXPORT *psPMRExportInt; + PMR *psPMRInt = NULL; + + psPMRExportInt = (PMR_EXPORT *) hPMRExport; + + eError = + PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psPMRExportInt, + ui64uiPassword, + ui64uiSize, ui32uiLog2Contig, &psPMRInt); + + *phPMR = psPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hExtHandle, + IMG_HANDLE * + phPMR, + IMG_DEVMEM_SIZE_T + * puiSize, + IMG_DEVMEM_ALIGN_T + * psAlign) +{ + PVRSRV_ERROR eError; + PMR *psExtHandleInt; + PMR *psPMRInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psExtHandleInt = (PMR *) hExtHandle; + + eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, psAlign); + + *phPMR = psPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRUnrefPMR(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRUnrefUnlockPMR(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_DEVMEM_SIZE_T + uiChunkSize, + IMG_UINT32 + ui32NumPhysChunks, + IMG_UINT32 + ui32NumVirtChunks, + IMG_UINT32 * + pui32MappingTable, + IMG_UINT32 + ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_UINT32 + ui32AnnotationLength, + const + IMG_CHAR * + puiAnnotation, + IMG_PID + ui32PID, + IMG_HANDLE * + phPMRPtr, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRPtrInt = NULL; + + eError = + PhysmemNewRamBackedPMR(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + uiSize, uiChunkSize, ui32NumPhysChunks, + ui32NumVirtChunks, pui32MappingTable, + ui32Log2PageSize, uiFlags, + ui32AnnotationLength, puiAnnotation, ui32PID, + &psPMRPtrInt, ui32PDumpFlags); + + *phPMRPtr = psPMRPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRPtrInt = NULL; + + eError = + PhysmemNewRamBackedLockedPMR(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + uiSize, uiChunkSize, ui32NumPhysChunks, + ui32NumVirtChunks, pui32MappingTable, + ui32Log2PageSize, uiFlags, + ui32AnnotationLength, puiAnnotation, + ui32PID, &psPMRPtrInt, ui32PDumpFlags); + + *phPMRPtr = psPMRPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntPin(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge, + IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntUnpin(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntPinValidate(psMappingInt, psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping, + IMG_HANDLE + hPMR) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE + hBridge, + IMG_BOOL + bbKernelMemoryCtx, + IMG_HANDLE * + phDevMemServerContext, + IMG_HANDLE * + phPrivData, + IMG_UINT32 * + pui32CPUCacheLineSize) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + eError = + DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + bbKernelMemoryCtx, + &psDevMemServerContextInt, + &hPrivDataInt, pui32CPUCacheLineSize); + + *phDevMemServerContext = psDevMemServerContextInt; + *phPrivData = hPrivDataInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerContext) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemServerContextInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; + + eError = DevmemIntCtxDestroy(psDevmemServerContextInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sHeapBaseAddr, + IMG_DEVMEM_SIZE_T + uiHeapLength, + IMG_UINT32 + ui32Log2DataPageSize, + IMG_HANDLE * + phDevmemHeapPtr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntHeapCreate(psDevmemCtxInt, + sHeapBaseAddr, + uiHeapLength, + ui32Log2DataPageSize, &psDevmemHeapPtrInt); + + *phDevmemHeapPtr = psDevmemHeapPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemHeap) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemHeapInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap; + + eError = DevmemIntHeapDestroy(psDevmemHeapInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T + uiMapFlags, + IMG_HANDLE * + phMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + DEVMEMINT_MAPPING *psMappingInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntMapPMR(psDevmemServerHeapInt, + psReservationInt, + psPMRInt, uiMapFlags, &psMappingInt); + + *phMapping = psMappingInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE + hBridge, + IMG_HANDLE + hMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + + eError = DevmemIntUnmapPMR(psMappingInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemServerHeap, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiLength, + IMG_HANDLE * + phReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + + eError = + DevmemIntReserveRange(psDevmemServerHeapInt, + sAddress, uiLength, &psReservationInt); + + *phReservation = psReservationInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + + eError = DevmemIntUnreserveRange(psReservationInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_HANDLE + hSrvDevMemHeap, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32AllocPageCount, + IMG_UINT32 * + pui32AllocPageIndices, + IMG_UINT32 + ui32FreePageCount, + IMG_UINT32 * + pui32FreePageIndices, + IMG_UINT32 + ui32SparseFlags, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT64 + ui64CPUVAddr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psSrvDevMemHeapInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntChangeSparse(psSrvDevMemHeapInt, + psPMRInt, + ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, + ui32SparseFlags, + uiFlags, sDevVAddr, ui64CPUVAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 + ui32PageCount, + IMG_UINT32 + ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T + uiFlags, + IMG_DEV_VIRTADDR + sDevVAddr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntMapPages(psReservationInt, + psPMRInt, + ui32PageCount, + ui32PhysicalPgOffset, uiFlags, sDevVAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE + hBridge, + IMG_HANDLE + hReservation, + IMG_DEV_VIRTADDR + sDevVAddr, + IMG_UINT32 + ui32PageCount) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + + eError = + DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntIsVDevAddrValid(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemCtxInt, sAddress); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemFlushDevSLCRange(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + sAddress, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_BOOL + bInvalidate) +{ +#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntFlushDevSLCRange(psDevmemCtxInt, + sAddress, uiSize, bInvalidate); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hDevmemCtx); + PVR_UNREFERENCED_PARAMETER(sAddress); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(bInvalidate); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT64 ui64FBSCEntries) +{ +#if defined(RGX_FEATURE_FBCDC) + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ui64FBSCEntries); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hDevmemCtx); + PVR_UNREFERENCED_PARAMETER(ui64FBSCEntries); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32NumHeapConfigs) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapConfigCount(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + pui32NumHeapConfigs); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 * + pui32NumHeaps) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, pui32NumHeaps); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapConfigNameBufSz, + IMG_CHAR * + puiHeapConfigName) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapConfigName(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, ui32HeapConfigNameBufSz, + puiHeapConfigName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32HeapConfigIndex, + IMG_UINT32 + ui32HeapIndex, + IMG_UINT32 + ui32HeapNameBufSz, + IMG_CHAR * + puiHeapNameOut, + IMG_DEV_VIRTADDR + * + psDevVAddrBase, + IMG_DEVMEM_SIZE_T + * puiHeapLength, + IMG_DEVMEM_SIZE_T + * + puiReservedRegionLength, + IMG_UINT32 * + pui32Log2DataPageSizeOut, + IMG_UINT32 * + pui32Log2ImportAlignmentOut) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, + ui32HeapIndex, + ui32HeapNameBufSz, + puiHeapNameOut, + psDevVAddrBase, + puiHeapLength, + puiReservedRegionLength, + pui32Log2DataPageSizeOut, + pui32Log2ImportAlignmentOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, + IMG_UINT32 ui32PID, IMG_BOOL bRegister) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, ui32PID, bRegister); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE + hBridge, + IMG_DEVMEM_SIZE_T + * puiLMASize, + IMG_DEVMEM_SIZE_T + * puiUMASize) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVGetMaxDevMemSizeKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + puiLMASize, puiUMASize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_DEV_VIRTADDR + * + psFaultAddress) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntGetFaultAddress(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemCtxInt, psFaultAddress); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVUpdateOOMStats(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32ui32StatType, + IMG_PID + ui32pid) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PVRSRVServerUpdateOOMStats(ui32ui32StatType, ui32pid); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(ui32ui32StatType); + PVR_UNREFERENCED_PARAMETER(ui32pid); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/common_mm_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/common_mm_bridge.h new file mode 100644 index 000000000000..f622e3aaf507 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/common_mm_bridge.h @@ -0,0 +1,782 @@ +/******************************************************************************* +@File +@Title Common bridge header for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_MM_BRIDGE_H +#define COMMON_MM_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_MM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3 +#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4 +#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5 +#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6 +#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7 +#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9 +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10 +#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN PVRSRV_BRIDGE_MM_CMD_FIRST+11 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN PVRSRV_BRIDGE_MM_CMD_FIRST+12 +#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+13 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+14 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20 +#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22 +#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25 +#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26 +#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE PVRSRV_BRIDGE_MM_CMD_FIRST+27 +#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+28 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+30 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+31 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+32 +#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+33 +#define PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE PVRSRV_BRIDGE_MM_CMD_FIRST+34 +#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+35 +#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS PVRSRV_BRIDGE_MM_CMD_FIRST+36 +#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+36) + +/******************************************* + PMRExportPMR + *******************************************/ + +/* Bridge in structure for PMRExportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMREXPORTPMR; + +/* Bridge out structure for PMRExportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG +{ + IMG_HANDLE hPMRExport; + IMG_UINT64 ui64Size; + IMG_UINT32 ui32Log2Contig; + IMG_UINT64 ui64Password; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMREXPORTPMR; + +/******************************************* + PMRUnexportPMR + *******************************************/ + +/* Bridge in structure for PMRUnexportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG +{ + IMG_HANDLE hPMRExport; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR; + +/* Bridge out structure for PMRUnexportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR; + +/******************************************* + PMRGetUID + *******************************************/ + +/* Bridge in structure for PMRGetUID */ +typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRGETUID; + +/* Bridge out structure for PMRGetUID */ +typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG +{ + IMG_UINT64 ui64UID; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRGETUID; + +/******************************************* + PMRMakeLocalImportHandle + *******************************************/ + +/* Bridge in structure for PMRMakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hBuffer; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE; + +/* Bridge out structure for PMRMakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hExtMem; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE; + +/******************************************* + PMRUnmakeLocalImportHandle + *******************************************/ + +/* Bridge in structure for PMRUnmakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hExtMem; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE; + +/* Bridge out structure for PMRUnmakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE; + +/******************************************* + PMRImportPMR + *******************************************/ + +/* Bridge in structure for PMRImportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG +{ + IMG_HANDLE hPMRExport; + IMG_UINT64 ui64uiPassword; + IMG_UINT64 ui64uiSize; + IMG_UINT32 ui32uiLog2Contig; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRIMPORTPMR; + +/* Bridge out structure for PMRImportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG +{ + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRIMPORTPMR; + +/******************************************* + PMRLocalImportPMR + *******************************************/ + +/* Bridge in structure for PMRLocalImportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG +{ + IMG_HANDLE hExtHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR; + +/* Bridge out structure for PMRLocalImportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T sAlign; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR; + +/******************************************* + PMRUnrefPMR + *******************************************/ + +/* Bridge in structure for PMRUnrefPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNREFPMR; + +/* Bridge out structure for PMRUnrefPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFPMR; + +/******************************************* + PMRUnrefUnlockPMR + *******************************************/ + +/* Bridge in structure for PMRUnrefUnlockPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR; + +/* Bridge out structure for PMRUnrefUnlockPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR; + +/******************************************* + PhysmemNewRamBackedPMR + *******************************************/ + +/* Bridge in structure for PhysmemNewRamBackedPMR */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG +{ + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 *pui32MappingTable; + IMG_UINT32 ui32Log2PageSize; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_UINT32 ui32AnnotationLength; + const IMG_CHAR *puiAnnotation; + IMG_PID ui32PID; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR; + +/* Bridge out structure for PhysmemNewRamBackedPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG +{ + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR; + +/******************************************* + PhysmemNewRamBackedLockedPMR + *******************************************/ + +/* Bridge in structure for PhysmemNewRamBackedLockedPMR */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG +{ + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 *pui32MappingTable; + IMG_UINT32 ui32Log2PageSize; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_UINT32 ui32AnnotationLength; + const IMG_CHAR *puiAnnotation; + IMG_PID ui32PID; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR; + +/* Bridge out structure for PhysmemNewRamBackedLockedPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG +{ + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR; + +/******************************************* + DevmemIntPin + *******************************************/ + +/* Bridge in structure for DevmemIntPin */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPIN; + +/* Bridge out structure for DevmemIntPin */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPIN; + +/******************************************* + DevmemIntUnpin + *******************************************/ + +/* Bridge in structure for DevmemIntUnpin */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN; + +/* Bridge out structure for DevmemIntUnpin */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN; + +/******************************************* + DevmemIntPinValidate + *******************************************/ + +/* Bridge in structure for DevmemIntPinValidate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG +{ + IMG_HANDLE hMapping; + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE; + +/* Bridge out structure for DevmemIntPinValidate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE; + +/******************************************* + DevmemIntUnpinInvalidate + *******************************************/ + +/* Bridge in structure for DevmemIntUnpinInvalidate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG +{ + IMG_HANDLE hMapping; + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE; + +/* Bridge out structure for DevmemIntUnpinInvalidate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE; + +/******************************************* + DevmemIntCtxCreate + *******************************************/ + +/* Bridge in structure for DevmemIntCtxCreate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG +{ + IMG_BOOL bbKernelMemoryCtx; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE; + +/* Bridge out structure for DevmemIntCtxCreate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG +{ + IMG_HANDLE hDevMemServerContext; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32CPUCacheLineSize; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE; + +/******************************************* + DevmemIntCtxDestroy + *******************************************/ + +/* Bridge in structure for DevmemIntCtxDestroy */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG +{ + IMG_HANDLE hDevmemServerContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY; + +/* Bridge out structure for DevmemIntCtxDestroy */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY; + +/******************************************* + DevmemIntHeapCreate + *******************************************/ + +/* Bridge in structure for DevmemIntHeapCreate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_DEV_VIRTADDR sHeapBaseAddr; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_UINT32 ui32Log2DataPageSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE; + +/* Bridge out structure for DevmemIntHeapCreate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG +{ + IMG_HANDLE hDevmemHeapPtr; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE; + +/******************************************* + DevmemIntHeapDestroy + *******************************************/ + +/* Bridge in structure for DevmemIntHeapDestroy */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG +{ + IMG_HANDLE hDevmemHeap; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY; + +/* Bridge out structure for DevmemIntHeapDestroy */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY; + +/******************************************* + DevmemIntMapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntMapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG +{ + IMG_HANDLE hDevmemServerHeap; + IMG_HANDLE hReservation; + IMG_HANDLE hPMR; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR; + +/* Bridge out structure for DevmemIntMapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG +{ + IMG_HANDLE hMapping; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR; + +/******************************************* + DevmemIntUnmapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntUnmapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG +{ + IMG_HANDLE hMapping; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR; + +/* Bridge out structure for DevmemIntUnmapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR; + +/******************************************* + DevmemIntReserveRange + *******************************************/ + +/* Bridge in structure for DevmemIntReserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG +{ + IMG_HANDLE hDevmemServerHeap; + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiLength; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE; + +/* Bridge out structure for DevmemIntReserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE; + +/******************************************* + DevmemIntUnreserveRange + *******************************************/ + +/* Bridge in structure for DevmemIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE; + +/* Bridge out structure for DevmemIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE; + +/******************************************* + ChangeSparseMem + *******************************************/ + +/* Bridge in structure for ChangeSparseMem */ +typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG +{ + IMG_HANDLE hSrvDevMemHeap; + IMG_HANDLE hPMR; + IMG_UINT32 ui32AllocPageCount; + IMG_UINT32 *pui32AllocPageIndices; + IMG_UINT32 ui32FreePageCount; + IMG_UINT32 *pui32FreePageIndices; + IMG_UINT32 ui32SparseFlags; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_UINT64 ui64CPUVAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CHANGESPARSEMEM; + +/* Bridge out structure for ChangeSparseMem */ +typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM; + +/******************************************* + DevmemIntMapPages + *******************************************/ + +/* Bridge in structure for DevmemIntMapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG +{ + IMG_HANDLE hReservation; + IMG_HANDLE hPMR; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32PhysicalPgOffset; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_DEV_VIRTADDR sDevVAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES; + +/* Bridge out structure for DevmemIntMapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES; + +/******************************************* + DevmemIntUnmapPages + *******************************************/ + +/* Bridge in structure for DevmemIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG +{ + IMG_HANDLE hReservation; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_UINT32 ui32PageCount; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES; + +/* Bridge out structure for DevmemIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES; + +/******************************************* + DevmemIsVDevAddrValid + *******************************************/ + +/* Bridge in structure for DevmemIsVDevAddrValid */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_DEV_VIRTADDR sAddress; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID; + +/* Bridge out structure for DevmemIsVDevAddrValid */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID; + +/******************************************* + DevmemFlushDevSLCRange + *******************************************/ + +/* Bridge in structure for DevmemFlushDevSLCRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiSize; + IMG_BOOL bInvalidate; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE; + +/* Bridge out structure for DevmemFlushDevSLCRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE; + +/******************************************* + DevmemInvalidateFBSCTable + *******************************************/ + +/* Bridge in structure for DevmemInvalidateFBSCTable */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT64 ui64FBSCEntries; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE; + +/* Bridge out structure for DevmemInvalidateFBSCTable */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE; + +/******************************************* + HeapCfgHeapConfigCount + *******************************************/ + +/* Bridge in structure for HeapCfgHeapConfigCount */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT; + +/* Bridge out structure for HeapCfgHeapConfigCount */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG +{ + IMG_UINT32 ui32NumHeapConfigs; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT; + +/******************************************* + HeapCfgHeapCount + *******************************************/ + +/* Bridge in structure for HeapCfgHeapCount */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG +{ + IMG_UINT32 ui32HeapConfigIndex; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT; + +/* Bridge out structure for HeapCfgHeapCount */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG +{ + IMG_UINT32 ui32NumHeaps; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT; + +/******************************************* + HeapCfgHeapConfigName + *******************************************/ + +/* Bridge in structure for HeapCfgHeapConfigName */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG +{ + IMG_UINT32 ui32HeapConfigIndex; + IMG_UINT32 ui32HeapConfigNameBufSz; + /* Output pointer puiHeapConfigName is also an implied input */ + IMG_CHAR *puiHeapConfigName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME; + +/* Bridge out structure for HeapCfgHeapConfigName */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG +{ + IMG_CHAR *puiHeapConfigName; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME; + +/******************************************* + HeapCfgHeapDetails + *******************************************/ + +/* Bridge in structure for HeapCfgHeapDetails */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG +{ + IMG_UINT32 ui32HeapConfigIndex; + IMG_UINT32 ui32HeapIndex; + IMG_UINT32 ui32HeapNameBufSz; + /* Output pointer puiHeapNameOut is also an implied input */ + IMG_CHAR *puiHeapNameOut; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS; + +/* Bridge out structure for HeapCfgHeapDetails */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG +{ + IMG_CHAR *puiHeapNameOut; + IMG_DEV_VIRTADDR sDevVAddrBase; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_DEVMEM_SIZE_T uiReservedRegionLength; + IMG_UINT32 ui32Log2DataPageSizeOut; + IMG_UINT32 ui32Log2ImportAlignmentOut; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS; + +/******************************************* + DevmemIntRegisterPFNotifyKM + *******************************************/ + +/* Bridge in structure for DevmemIntRegisterPFNotifyKM */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32PID; + IMG_BOOL bRegister; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM; + +/* Bridge out structure for DevmemIntRegisterPFNotifyKM */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM; + +/******************************************* + GetMaxDevMemSize + *******************************************/ + +/* Bridge in structure for GetMaxDevMemSize */ +typedef struct PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE; + +/* Bridge out structure for GetMaxDevMemSize */ +typedef struct PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE_TAG +{ + IMG_DEVMEM_SIZE_T uiLMASize; + IMG_DEVMEM_SIZE_T uiUMASize; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE; + +/******************************************* + DevmemGetFaultAddress + *******************************************/ + +/* Bridge in structure for DevmemGetFaultAddress */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG +{ + IMG_HANDLE hDevmemCtx; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS; + +/* Bridge out structure for DevmemGetFaultAddress */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG +{ + IMG_DEV_VIRTADDR sFaultAddress; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS; + +/******************************************* + PVRSRVUpdateOOMStats + *******************************************/ + +/* Bridge in structure for PVRSRVUpdateOOMStats */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG +{ + IMG_UINT32 ui32ui32StatType; + IMG_PID ui32pid; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS; + +/* Bridge out structure for PVRSRVUpdateOOMStats */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS; + +#endif /* COMMON_MM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/server_mm_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/server_mm_bridge.c new file mode 100644 index 000000000000..c8b6bb5c2d03 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/mm_bridge/server_mm_bridge.c @@ -0,0 +1,3502 @@ +/******************************************************************************* +@File +@Title Server bridge for mm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem.h" +#include "devicemem_server.h" +#include "pmr.h" +#include "devicemem_heapcfg.h" +#include "physmem.h" +#include "devicemem_utils.h" +#include "process_stats.h" + +#include "common_mm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +static PVRSRV_ERROR ReleasePMRExport(void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(pvData); + + return PVRSRV_OK; +} + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnexportPMR((PMR_EXPORT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRExportPMRIN_UI8, + IMG_UINT8 * psPMRExportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN = + (PVRSRV_BRIDGE_IN_PMREXPORTPMR *) + IMG_OFFSET_ADDR(psPMRExportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT = + (PVRSRV_BRIDGE_OUT_PMREXPORTPMR *) + IMG_OFFSET_ADDR(psPMRExportPMROUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR; + PMR *psPMRInt = NULL; + PMR_EXPORT *psPMRExportInt = NULL; + IMG_HANDLE hPMRExportInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRExportPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRExportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRExportPMROUT->eError = + PMRExportPMR(psPMRInt, + &psPMRExportInt, + &psPMRExportPMROUT->ui64Size, + &psPMRExportPMROUT->ui32Log2Contig, + &psPMRExportPMROUT->ui64Password); + /* Exit early if bridged call fails */ + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + goto PMRExportPMR_exit; + } + + /* + * For cases where we need a cross process handle we actually allocate two. + * + * The first one is a connection specific handle and it gets given the real + * release function. This handle does *NOT* get returned to the caller. It's + * purpose is to release any leaked resources when we either have a bad or + * abnormally terminated client. If we didn't do this then the resource + * wouldn't be freed until driver unload. If the resource is freed normally, + * this handle can be looked up via the cross process handle and then + * released accordingly. + * + * The second one is a cross process handle and it gets given a noop release + * function. This handle does get returned to the caller. + */ + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRExportPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, &hPMRExportInt, + (void *)psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRExportPMRpsPMRExportIntRelease); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRExportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Lock over handle creation. */ + LockHandle(KERNEL_HANDLE_BASE); + psPMRExportPMROUT->eError = + PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE, + &psPMRExportPMROUT->hPMRExport, + (void *)psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & ReleasePMRExport); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRExportPMR_exit; + } + /* Release now we have created handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + +PMRExportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psPMRExportPMROUT->eError != PVRSRV_OK) + { + if (psPMRExportPMROUT->hPMRExport) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(KERNEL_HANDLE_BASE); + + eError = PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, + (IMG_HANDLE) + psPMRExportPMROUT-> + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Release now we have cleaned up creation handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + } + + if (hPMRExportInt) + { + PVRSRV_ERROR eError; + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psProcessHandleBase-> + psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psProcessHandleBase-> + psHandleBase, + hPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psPMRExportInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psProcessHandleBase-> + psHandleBase); + } + + if (psPMRExportInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnexportPMR(psPMRExportInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnexportPMRIN_UI8, + IMG_UINT8 * psPMRUnexportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *) + IMG_OFFSET_ADDR(psPMRUnexportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *) + IMG_OFFSET_ADDR(psPMRUnexportPMROUT_UI8, 0); + + PMR_EXPORT *psPMRExportInt = NULL; + IMG_HANDLE hPMRExportInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* Lock over handle destruction. */ + LockHandle(KERNEL_HANDLE_BASE); + psPMRUnexportPMROUT->eError = + PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, + (void **)&psPMRExportInt, + (IMG_HANDLE) psPMRUnexportPMRIN-> + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + IMG_FALSE); + if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); + + /* Release now we have destroyed handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + /* + * Find the connection specific handle that represents the same data + * as the cross process handle as releasing it will actually call the + * data's real release function (see the function where the cross + * process handle is allocated for more details). + */ + psPMRUnexportPMROUT->eError = + PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, &hPMRExportInt, + psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); + + psPMRUnexportPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase-> + psHandleBase, hPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely + ((psPMRUnexportPMROUT->eError != PVRSRV_OK) + && (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) || + (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY)); + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Lock over handle destruction. */ + LockHandle(KERNEL_HANDLE_BASE); + + psPMRUnexportPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(KERNEL_HANDLE_BASE, + (IMG_HANDLE) psPMRUnexportPMRIN-> + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely + ((psPMRUnexportPMROUT->eError != PVRSRV_OK) + && (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRUnexportPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + +PMRUnexportPMR_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRGetUIDIN_UI8, + IMG_UINT8 * psPMRGetUIDOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN = + (PVRSRV_BRIDGE_IN_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT = + (PVRSRV_BRIDGE_OUT_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDOUT_UI8, + 0); + + IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRGetUIDOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRGetUID_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID); + +PMRGetUID_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _PMRMakeLocalImportHandlepsExtMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnmakeLocalImportHandle((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPMRMakeLocalImportHandleIN_UI8, + IMG_UINT8 * + psPMRMakeLocalImportHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN + = + (PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE + *psPMRMakeLocalImportHandleOUT = + (PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleOUT_UI8, 0); + + IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer; + PMR *psBufferInt = NULL; + PMR *psExtMemInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRMakeLocalImportHandleOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psBufferInt, + hBuffer, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + IMG_TRUE); + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRMakeLocalImportHandle_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRMakeLocalImportHandleOUT->eError = + PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + goto PMRMakeLocalImportHandle_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRMakeLocalImportHandleOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, + &psPMRMakeLocalImportHandleOUT->hExtMem, + (void *)psExtMemInt, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRMakeLocalImportHandlepsExtMemIntRelease); + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRMakeLocalImportHandle_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +PMRMakeLocalImportHandle_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hBuffer, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK) + { + if (psExtMemInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnmakeLocalImportHandle(psExtMemInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPMRUnmakeLocalImportHandleIN_UI8, + IMG_UINT8 * + psPMRUnmakeLocalImportHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE + *psPMRUnmakeLocalImportHandleIN = + (PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE + *psPMRUnmakeLocalImportHandleOUT = + (PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRUnmakeLocalImportHandleOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase-> + psHandleBase, + (IMG_HANDLE) + psPMRUnmakeLocalImportHandleIN-> + hExtMem, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + if (unlikely + ((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) + && (psPMRUnmakeLocalImportHandleOUT->eError != + PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT-> + eError))); + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRUnmakeLocalImportHandle_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +PMRUnmakeLocalImportHandle_exit: + + return 0; +} + +static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRImportPMRIN_UI8, + IMG_UINT8 * psPMRImportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN = + (PVRSRV_BRIDGE_IN_PMRIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRImportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRImportPMROUT_UI8, 0); + + IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport; + PMR_EXPORT *psPMRExportInt = NULL; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(KERNEL_HANDLE_BASE); + + /* Look up the address from the handle */ + psPMRImportPMROUT->eError = + PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, + (void **)&psPMRExportInt, + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + IMG_TRUE); + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRImportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + psPMRImportPMROUT->eError = + PhysmemImportPMR(psConnection, OSGetDevNode(psConnection), + psPMRExportInt, + psPMRImportPMRIN->ui64uiPassword, + psPMRImportPMRIN->ui64uiSize, + psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + goto PMRImportPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPMRImportPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPMRImportPMROUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRImportPMRpsPMRIntRelease); + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRImportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRImportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(KERNEL_HANDLE_BASE); + + /* Unreference the previously looked up handle */ + if (psPMRExportInt) + { + PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + if (psPMRImportPMROUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRLocalImportPMRIN_UI8, + IMG_UINT8 * psPMRLocalImportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN = + (PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRLocalImportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *) + IMG_OFFSET_ADDR(psPMRLocalImportPMROUT_UI8, 0); + + IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle; + PMR *psExtHandleInt = NULL; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Look up the address from the handle */ + psPMRLocalImportPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, (void **)&psExtHandleInt, + hExtHandle, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + IMG_TRUE); + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRLocalImportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRLocalImportPMROUT->eError = + PMRLocalImportPMR(psExtHandleInt, + &psPMRInt, + &psPMRLocalImportPMROUT->uiSize, + &psPMRLocalImportPMROUT->sAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + goto PMRLocalImportPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPMRLocalImportPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPMRLocalImportPMROUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRLocalImportPMRpsPMRIntRelease); + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRLocalImportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRLocalImportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psExtHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, hExtHandle, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + if (psPMRLocalImportPMROUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnrefPMRIN_UI8, + IMG_UINT8 * psPMRUnrefPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNREFPMR *) + IMG_OFFSET_ADDR(psPMRUnrefPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNREFPMR *) + IMG_OFFSET_ADDR(psPMRUnrefPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psPMRUnrefPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psPMRUnrefPMRIN->hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) && + (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnrefPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto PMRUnrefPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRUnrefPMR_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8, + IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) + IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) + IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psPMRUnrefUnlockPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psPMRUnrefUnlockPMRIN-> + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (unlikely + ((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) + && (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto PMRUnrefUnlockPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRUnrefUnlockPMR_exit: + + return 0; +} + +static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemNewRamBackedPMRIN_UI8, + IMG_UINT8 * psPhysmemNewRamBackedPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN = + (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMROUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiAnnotationInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) + + (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedPMR_exit; + } + + if (unlikely + (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedPMR_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPhysmemNewRamBackedPMRIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemNewRamBackedPMR_exit; + } + } + } + + if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > + 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemNewRamBackedPMRIN-> + pui32MappingTable, + psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedPMR_exit; + } + } + if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0) + { + uiAnnotationInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiAnnotationInt, + (const void __user *)psPhysmemNewRamBackedPMRIN-> + puiAnnotation, + psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemNewRamBackedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedPMR_exit; + } + ((IMG_CHAR *) + uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN-> + ui32AnnotationLength * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psPhysmemNewRamBackedPMROUT->eError = + PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection), + psPhysmemNewRamBackedPMRIN->uiSize, + psPhysmemNewRamBackedPMRIN->uiChunkSize, + psPhysmemNewRamBackedPMRIN-> + ui32NumPhysChunks, + psPhysmemNewRamBackedPMRIN-> + ui32NumVirtChunks, ui32MappingTableInt, + psPhysmemNewRamBackedPMRIN->ui32Log2PageSize, + psPhysmemNewRamBackedPMRIN->uiFlags, + psPhysmemNewRamBackedPMRIN-> + ui32AnnotationLength, uiAnnotationInt, + psPhysmemNewRamBackedPMRIN->ui32PID, + &psPMRPtrInt, + psPhysmemNewRamBackedPMRIN->ui32PDumpFlags); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) + { + goto PhysmemNewRamBackedPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemNewRamBackedPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemNewRamBackedPMROUT->hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemNewRamBackedPMRpsPMRPtrIntRelease); + if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemNewRamBackedPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemNewRamBackedPMR_exit: + + if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefUnlockPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPhysmemNewRamBackedLockedPMRIN_UI8, + IMG_UINT8 * + psPhysmemNewRamBackedLockedPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR + *psPhysmemNewRamBackedLockedPMRIN = + (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR + *psPhysmemNewRamBackedLockedPMROUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiAnnotationInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32)) + + (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedLockedPMR_exit; + } + + if (unlikely + (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedLockedPMR_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psPhysmemNewRamBackedLockedPMRIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemNewRamBackedLockedPMR_exit; + } + } + } + + if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemNewRamBackedLockedPMRIN-> + pui32MappingTable, + psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedLockedPMR_exit; + } + } + if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0) + { + uiAnnotationInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiAnnotationInt, + (const void __user *)psPhysmemNewRamBackedLockedPMRIN-> + puiAnnotation, + psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedLockedPMR_exit; + } + ((IMG_CHAR *) + uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN-> + ui32AnnotationLength * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psPhysmemNewRamBackedLockedPMROUT->eError = + PhysmemNewRamBackedLockedPMR(psConnection, + OSGetDevNode(psConnection), + psPhysmemNewRamBackedLockedPMRIN-> + uiSize, + psPhysmemNewRamBackedLockedPMRIN-> + uiChunkSize, + psPhysmemNewRamBackedLockedPMRIN-> + ui32NumPhysChunks, + psPhysmemNewRamBackedLockedPMRIN-> + ui32NumVirtChunks, ui32MappingTableInt, + psPhysmemNewRamBackedLockedPMRIN-> + ui32Log2PageSize, + psPhysmemNewRamBackedLockedPMRIN-> + uiFlags, + psPhysmemNewRamBackedLockedPMRIN-> + ui32AnnotationLength, uiAnnotationInt, + psPhysmemNewRamBackedLockedPMRIN-> + ui32PID, &psPMRPtrInt, + psPhysmemNewRamBackedLockedPMRIN-> + ui32PDumpFlags); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + { + goto PhysmemNewRamBackedLockedPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemNewRamBackedLockedPMROUT-> + hPMRPtr, (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease); + if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemNewRamBackedLockedPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemNewRamBackedLockedPMR_exit: + + if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefUnlockPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntPinIN_UI8, + IMG_UINT8 * psDevmemIntPinOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPIN *) + IMG_OFFSET_ADDR(psDevmemIntPinIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *) + IMG_OFFSET_ADDR(psDevmemIntPinOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPinOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntPinOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPin_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPinOUT->eError = DevmemIntPin(psPMRInt); + +DevmemIntPin_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnpinIN_UI8, + IMG_UINT8 * psDevmemIntUnpinOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *) + IMG_OFFSET_ADDR(psDevmemIntUnpinIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *) + IMG_OFFSET_ADDR(psDevmemIntUnpinOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnpinOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntUnpinOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpin_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnpinOUT->eError = DevmemIntUnpin(psPMRInt); + +DevmemIntUnpin_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntPinValidateIN_UI8, + IMG_UINT8 * psDevmemIntPinValidateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntPinValidateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntPinValidateOUT_UI8, 0); + + IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping; + DEVMEMINT_MAPPING *psMappingInt = NULL; + IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPinValidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMappingInt, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + IMG_TRUE); + if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPinValidate_exit; + } + + /* Look up the address from the handle */ + psDevmemIntPinValidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPinValidate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPinValidateOUT->eError = + DevmemIntPinValidate(psMappingInt, psPMRInt); + +DevmemIntPinValidate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psMappingInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntUnpinInvalidateIN_UI8, + IMG_UINT8 * + psDevmemIntUnpinInvalidateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN + = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE + *psDevmemIntUnpinInvalidateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateOUT_UI8, 0); + + IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping; + DEVMEMINT_MAPPING *psMappingInt = NULL; + IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnpinInvalidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMappingInt, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + IMG_TRUE); + if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpinInvalidate_exit; + } + + /* Look up the address from the handle */ + psDevmemIntUnpinInvalidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpinInvalidate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnpinInvalidateOUT->eError = + DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); + +DevmemIntUnpinInvalidate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psMappingInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, + IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) + IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) + IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, 0); + + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; + + psDevmemIntCtxCreateOUT->eError = + DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), + psDevmemIntCtxCreateIN->bbKernelMemoryCtx, + &psDevMemServerContextInt, + &hPrivDataInt, + &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntCtxCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxCreateOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + (void *)psDevMemServerContextInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntCtxCreatepsDevMemServerContextIntRelease); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; + } + + psDevmemIntCtxCreateOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT->hPrivData, + (void *)hPrivDataInt, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psDevmemIntCtxCreateOUT-> + hDevMemServerContext); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntCtxCreate_exit: + + if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemIntCtxCreateOUT->hDevMemServerContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psDevMemServerContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psDevMemServerContextInt) + { + DevmemIntCtxDestroy(psDevMemServerContextInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, + IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxDestroyOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntCtxDestroyIN-> + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) + && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxDestroy_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntCtxDestroy_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, + IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) + IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) + IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntHeapCreateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = + DevmemIntHeapCreate(psDevmemCtxInt, + psDevmemIntHeapCreateIN->sHeapBaseAddr, + psDevmemIntHeapCreateIN->uiHeapLength, + psDevmemIntHeapCreateIN->ui32Log2DataPageSize, + &psDevmemHeapPtrInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntHeapCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntHeapCreateOUT->hDevmemHeapPtr, + (void *)psDevmemHeapPtrInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapCreate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemHeapPtrInt) + { + DevmemIntHeapDestroy(psDevmemHeapPtrInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, + IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapDestroyOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntHeapDestroyIN-> + hDevmemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + if (unlikely + ((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) + && (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapDestroy_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapDestroy_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPMRIN_UI8, + IMG_UINT8 * psDevmemIntMapPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_MAPPING *psMappingInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntMapPMROUT->eError = + DevmemIntMapPMR(psDevmemServerHeapInt, + psReservationInt, + psPMRInt, + psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + goto DevmemIntMapPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntMapPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntMapPMROUT->hMapping, + (void *)psMappingInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntMapPMRpsMappingIntRelease); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntMapPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntMapPMROUT->eError != PVRSRV_OK) + { + if (psMappingInt) + { + DevmemIntUnmapPMR(psMappingInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnmapPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntUnmapPMRIN-> + hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + if (unlikely + ((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) + && (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnmapPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnmapPMR_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = + psDevmemIntReserveRangeIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntReserveRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = + DevmemIntReserveRange(psDevmemServerHeapInt, + psDevmemIntReserveRangeIN->sAddress, + psDevmemIntReserveRangeIN->uiLength, + &psReservationInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + goto DevmemIntReserveRange_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntReserveRangeOUT->hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntReserveRangepsReservationIntRelease); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntReserveRange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) + { + if (psReservationInt) + { + DevmemIntUnreserveRange(psReservationInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, + IMG_UINT8 * + psDevmemIntUnreserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT + = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnreserveRangeOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntUnreserveRangeIN-> + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + if (unlikely + ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) + && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnreserveRange_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnreserveRange_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psChangeSparseMemIN_UI8, + IMG_UINT8 * psChangeSparseMemOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = + (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) + IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = + (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) + IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); + + IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap; + DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL; + IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR; + PMR *psPMRInt = NULL; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + + (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; + + if (unlikely + (psChangeSparseMemIN->ui32AllocPageCount > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; + } + + if (unlikely + (psChangeSparseMemIN->ui32FreePageCount > + PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psChangeSparseMemIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psChangeSparseMemIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto ChangeSparseMem_exit; + } + } + } + + if (psChangeSparseMemIN->ui32AllocPageCount != 0) + { + ui32AllocPageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psChangeSparseMemIN->ui32AllocPageCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AllocPageIndicesInt, + (const void __user *)psChangeSparseMemIN-> + pui32AllocPageIndices, + psChangeSparseMemIN->ui32AllocPageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto ChangeSparseMem_exit; + } + } + if (psChangeSparseMemIN->ui32FreePageCount != 0) + { + ui32FreePageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FreePageIndicesInt, + (const void __user *)psChangeSparseMemIN-> + pui32FreePageIndices, + psChangeSparseMemIN->ui32FreePageCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psChangeSparseMemOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto ChangeSparseMem_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psChangeSparseMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSrvDevMemHeapInt, + hSrvDevMemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + IMG_TRUE); + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto ChangeSparseMem_exit; + } + + /* Look up the address from the handle */ + psChangeSparseMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto ChangeSparseMem_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psChangeSparseMemOUT->eError = + DevmemIntChangeSparse(psSrvDevMemHeapInt, + psPMRInt, + psChangeSparseMemIN->ui32AllocPageCount, + ui32AllocPageIndicesInt, + psChangeSparseMemIN->ui32FreePageCount, + ui32FreePageIndicesInt, + psChangeSparseMemIN->ui32SparseFlags, + psChangeSparseMemIN->uiFlags, + psChangeSparseMemIN->sDevVAddr, + psChangeSparseMemIN->ui64CPUVAddr); + +ChangeSparseMem_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSrvDevMemHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSrvDevMemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPagesIN_UI8, + IMG_UINT8 * psDevmemIntMapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0); + + IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPages_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPages_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntMapPagesOUT->eError = + DevmemIntMapPages(psReservationInt, + psPMRInt, + psDevmemIntMapPagesIN->ui32PageCount, + psDevmemIntMapPagesIN->ui32PhysicalPgOffset, + psDevmemIntMapPagesIN->uiFlags, + psDevmemIntMapPagesIN->sDevVAddr); + +DevmemIntMapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8, 0); + + IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnmapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnmapPages_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnmapPagesOUT->eError = + DevmemIntUnmapPages(psReservationInt, + psDevmemIntUnmapPagesIN->sDevVAddr, + psDevmemIntUnmapPagesIN->ui32PageCount); + +DevmemIntUnmapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIsVDevAddrValidIN_UI8, + IMG_UINT8 * psDevmemIsVDevAddrValidOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN = + (PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *) + IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *) + IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIsVDevAddrValidOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIsVDevAddrValid_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIsVDevAddrValidOUT->eError = + DevmemIntIsVDevAddrValid(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, + psDevmemIsVDevAddrValidIN->sAddress); + +DevmemIsVDevAddrValid_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) + +static IMG_INT +PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8, + IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *) + IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *) + IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemFlushDevSLCRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemFlushDevSLCRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemFlushDevSLCRangeOUT->eError = + DevmemIntFlushDevSLCRange(psDevmemCtxInt, + psDevmemFlushDevSLCRangeIN->sAddress, + psDevmemFlushDevSLCRangeIN->uiSize, + psDevmemFlushDevSLCRangeIN->bInvalidate); + +DevmemFlushDevSLCRange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeDevmemFlushDevSLCRange NULL +#endif + +#if defined(RGX_FEATURE_FBCDC) + +static IMG_INT +PVRSRVBridgeDevmemInvalidateFBSCTable(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemInvalidateFBSCTableIN_UI8, + IMG_UINT8 * + psDevmemInvalidateFBSCTableOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE + *psDevmemInvalidateFBSCTableIN = + (PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *) + IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE + *psDevmemInvalidateFBSCTableOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *) + IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemInvalidateFBSCTableIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemInvalidateFBSCTableOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemInvalidateFBSCTableOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemInvalidateFBSCTable_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemInvalidateFBSCTableOUT->eError = + DevmemIntInvalidateFBSCTable(psDevmemCtxInt, + psDevmemInvalidateFBSCTableIN-> + ui64FBSCEntries); + +DevmemInvalidateFBSCTable_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeDevmemInvalidateFBSCTable NULL +#endif + +static IMG_INT +PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapConfigCountIN_UI8, + IMG_UINT8 * psHeapCfgHeapConfigCountOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN); + + psHeapCfgHeapConfigCountOUT->eError = + HeapCfgHeapConfigCount(psConnection, OSGetDevNode(psConnection), + &psHeapCfgHeapConfigCountOUT-> + ui32NumHeapConfigs); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapCountIN_UI8, + IMG_UINT8 * psHeapCfgHeapCountOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapCountIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapCountOUT_UI8, 0); + + psHeapCfgHeapCountOUT->eError = + HeapCfgHeapCount(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapCountIN->ui32HeapConfigIndex, + &psHeapCfgHeapCountOUT->ui32NumHeaps); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8, + IMG_UINT8 * psHeapCfgHeapConfigNameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameOUT_UI8, 0); + + IMG_CHAR *puiHeapConfigNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR)) + 0; + + if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz > + DEVMEM_HEAPNAME_MAXLENGTH) + { + psHeapCfgHeapConfigNameOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HeapCfgHeapConfigName_exit; + } + + psHeapCfgHeapConfigNameOUT->puiHeapConfigName = + psHeapCfgHeapConfigNameIN->puiHeapConfigName; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHeapCfgHeapConfigNameIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHeapCfgHeapConfigNameOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HeapCfgHeapConfigName_exit; + } + } + } + + if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0) + { + puiHeapConfigNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR); + } + + psHeapCfgHeapConfigNameOUT->eError = + HeapCfgHeapConfigName(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapConfigNameIN-> + ui32HeapConfigIndex, + psHeapCfgHeapConfigNameIN-> + ui32HeapConfigNameBufSz, + puiHeapConfigNameInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapConfigNameInt) && + ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psHeapCfgHeapConfigNameOUT-> + puiHeapConfigName, puiHeapConfigNameInt, + (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psHeapCfgHeapConfigNameOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapConfigName_exit; + } + } + +HeapCfgHeapConfigName_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8, + IMG_UINT8 * psHeapCfgHeapDetailsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *) + IMG_OFFSET_ADDR(psHeapCfgHeapDetailsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *) + IMG_OFFSET_ADDR(psHeapCfgHeapDetailsOUT_UI8, 0); + + IMG_CHAR *puiHeapNameOutInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0; + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz > + DEVMEM_HEAPNAME_MAXLENGTH) + { + psHeapCfgHeapDetailsOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HeapCfgHeapDetails_exit; + } + + psHeapCfgHeapDetailsOUT->puiHeapNameOut = + psHeapCfgHeapDetailsIN->puiHeapNameOut; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psHeapCfgHeapDetailsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHeapCfgHeapDetailsOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto HeapCfgHeapDetails_exit; + } + } + } + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) + { + puiHeapNameOutInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * + sizeof(IMG_CHAR); + } + + psHeapCfgHeapDetailsOUT->eError = + HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, + psHeapCfgHeapDetailsIN->ui32HeapIndex, + psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, + puiHeapNameOutInt, + &psHeapCfgHeapDetailsOUT->sDevVAddrBase, + &psHeapCfgHeapDetailsOUT->uiHeapLength, + &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, + &psHeapCfgHeapDetailsOUT-> + ui32Log2DataPageSizeOut, + &psHeapCfgHeapDetailsOUT-> + ui32Log2ImportAlignmentOut); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapNameOutInt) && + ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > + 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, + puiHeapNameOutInt, + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psHeapCfgHeapDetailsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapDetails_exit; + } + } + +HeapCfgHeapDetails_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntRegisterPFNotifyKMIN_UI8, + IMG_UINT8 * + psDevmemIntRegisterPFNotifyKMOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM + *psDevmemIntRegisterPFNotifyKMIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM + *psDevmemIntRegisterPFNotifyKMOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntRegisterPFNotifyKMOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntRegisterPFNotifyKM_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntRegisterPFNotifyKMOUT->eError = + DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, + psDevmemIntRegisterPFNotifyKMIN-> + ui32PID, + psDevmemIntRegisterPFNotifyKMIN-> + bRegister); + +DevmemIntRegisterPFNotifyKM_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetMaxDevMemSize(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetMaxDevMemSizeIN_UI8, + IMG_UINT8 * psGetMaxDevMemSizeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeIN = + (PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *) + IMG_OFFSET_ADDR(psGetMaxDevMemSizeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeOUT = + (PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *) + IMG_OFFSET_ADDR(psGetMaxDevMemSizeOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetMaxDevMemSizeIN); + + psGetMaxDevMemSizeOUT->eError = + PVRSRVGetMaxDevMemSizeKM(psConnection, OSGetDevNode(psConnection), + &psGetMaxDevMemSizeOUT->uiLMASize, + &psGetMaxDevMemSizeOUT->uiUMASize); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, + IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = + (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemGetFaultAddressOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemGetFaultAddress_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemGetFaultAddressOUT->eError = + DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, + &psDevmemGetFaultAddressOUT-> + sFaultAddress); + +DevmemGetFaultAddress_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + +static IMG_INT +PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8, + IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN = + (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) + IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *) + IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psPVRSRVUpdateOOMStatsOUT->eError = + PVRSRVServerUpdateOOMStats(psPVRSRVUpdateOOMStatsIN-> + ui32ui32StatType, + psPVRSRVUpdateOOMStatsIN->ui32pid); + + return 0; +} + +#else +#define PVRSRVBridgePVRSRVUpdateOOMStats NULL +#endif + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitMMBridge(void); +PVRSRV_ERROR DeinitMMBridge(void); + +/* + * Register all MM functions with services + */ +PVRSRV_ERROR InitMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, + PVRSRVBridgePMRExportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, + PVRSRVBridgePMRUnexportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, + PVRSRVBridgePMRGetUID, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, + PVRSRVBridgePMRMakeLocalImportHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, + PVRSRVBridgePMRUnmakeLocalImportHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, + PVRSRVBridgePMRImportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, + PVRSRVBridgePMRLocalImportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, + PVRSRVBridgePMRUnrefPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, + PVRSRVBridgePMRUnrefUnlockPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, + PVRSRVBridgePhysmemNewRamBackedPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, + PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN, + PVRSRVBridgeDevmemIntPin, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN, + PVRSRVBridgeDevmemIntUnpin, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE, + PVRSRVBridgeDevmemIntPinValidate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE, + PVRSRVBridgeDevmemIntUnpinInvalidate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, + PVRSRVBridgeDevmemIntCtxCreate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, + PVRSRVBridgeDevmemIntCtxDestroy, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, + PVRSRVBridgeDevmemIntHeapCreate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, + PVRSRVBridgeDevmemIntHeapDestroy, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, + PVRSRVBridgeDevmemIntMapPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, + PVRSRVBridgeDevmemIntUnmapPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, + PVRSRVBridgeDevmemIntReserveRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, + PVRSRVBridgeDevmemIntUnreserveRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, + PVRSRVBridgeChangeSparseMem, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, + PVRSRVBridgeDevmemIntMapPages, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, + PVRSRVBridgeDevmemIntUnmapPages, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, + PVRSRVBridgeDevmemIsVDevAddrValid, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE, + PVRSRVBridgeDevmemFlushDevSLCRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE, + PVRSRVBridgeDevmemInvalidateFBSCTable, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, + PVRSRVBridgeHeapCfgHeapConfigCount, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, + PVRSRVBridgeHeapCfgHeapCount, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, + PVRSRVBridgeHeapCfgHeapConfigName, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, + PVRSRVBridgeHeapCfgHeapDetails, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, + PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE, + PVRSRVBridgeGetMaxDevMemSize, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, + PVRSRVBridgeDevmemGetFaultAddress, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS, + PVRSRVBridgePVRSRVUpdateOOMStats, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all mm functions with services + */ +PVRSRV_ERROR DeinitMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMREXPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRIMPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTPIN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_CHANGESPARSEMEM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_bridge.h new file mode 100644 index 000000000000..9c360f418d2d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_bridge.h @@ -0,0 +1,153 @@ +/******************************************************************************* +@File +@Title Client bridge header for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PDUMP_BRIDGE_H +#define CLIENT_PDUMP_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pdump_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE + hBridge, + IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32FileOffset, + IMG_UINT32 + ui32Width, + IMG_UINT32 + ui32Height, + IMG_UINT32 + ui32StrideInBytes, + IMG_DEV_VIRTADDR + sDevBaseAddr, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32Size, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_UINT32 + ui32AddrMode, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpImageDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32LogicalWidth, + IMG_UINT32 + ui32LogicalHeight, + IMG_UINT32 + ui32PhysicalWidth, + IMG_UINT32 + ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_MEMLAYOUT + eMemLayout, + IMG_FB_COMPRESSION + eFBCompression, + const + IMG_UINT32 * + pui32FBCClearColour, + PDUMP_FBC_SWIZZLE + eeFBCSwizzle, + IMG_DEV_VIRTADDR + sHeaderDevAddr, + IMG_UINT32 + ui32HeaderSize, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE + hBridge, + IMG_CHAR * + puiComment, + IMG_UINT32 + ui32Flags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32Frame); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpDataDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32HeaderType, + IMG_UINT32 + ui32ElementType, + IMG_UINT32 + ui32ElementCount, + IMG_UINT32 + ui32PDumpFlags); + +#endif /* CLIENT_PDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_direct_bridge.c new file mode 100644 index 000000000000..9b54a7d23138 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/client_pdump_direct_bridge.c @@ -0,0 +1,228 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pdump + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pdump_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "pdumpdefs.h" +#include + +#include "devicemem_server.h" +#include "pdump_km.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE + hBridge, + IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32FileOffset, + IMG_UINT32 + ui32Width, + IMG_UINT32 + ui32Height, + IMG_UINT32 + ui32StrideInBytes, + IMG_DEV_VIRTADDR + sDevBaseAddr, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32Size, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_UINT32 + ui32AddrMode, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntPDumpBitmap(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + puiFileName, + ui32FileOffset, + ui32Width, + ui32Height, + ui32StrideInBytes, + sDevBaseAddr, + psDevmemCtxInt, + ui32Size, + ePixelFormat, ui32AddrMode, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpImageDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32LogicalWidth, + IMG_UINT32 + ui32LogicalHeight, + IMG_UINT32 + ui32PhysicalWidth, + IMG_UINT32 + ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT + ePixelFormat, + IMG_MEMLAYOUT + eMemLayout, + IMG_FB_COMPRESSION + eFBCompression, + const + IMG_UINT32 * + pui32FBCClearColour, + PDUMP_FBC_SWIZZLE + eeFBCSwizzle, + IMG_DEV_VIRTADDR + sHeaderDevAddr, + IMG_UINT32 + ui32HeaderSize, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntPDumpImageDescriptor(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + psDevmemCtxInt, ui32StringSize, + puiFileName, sDataDevAddr, + ui32DataSize, ui32LogicalWidth, + ui32LogicalHeight, ui32PhysicalWidth, + ui32PhysicalHeight, ePixelFormat, + eMemLayout, eFBCompression, + pui32FBCClearColour, eeFBCSwizzle, + sHeaderDevAddr, ui32HeaderSize, + ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE + hBridge, + IMG_CHAR * + puiComment, + IMG_UINT32 + ui32Flags) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpCommentKM(puiComment, ui32Flags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32Frame) +{ + PVRSRV_ERROR eError; + + eError = + PDumpSetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32Frame); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpDataDescriptor(IMG_HANDLE + hBridge, + IMG_HANDLE + hDevmemCtx, + IMG_UINT32 + ui32StringSize, + const IMG_CHAR + * puiFileName, + IMG_DEV_VIRTADDR + sDataDevAddr, + IMG_UINT32 + ui32DataSize, + IMG_UINT32 + ui32HeaderType, + IMG_UINT32 + ui32ElementType, + IMG_UINT32 + ui32ElementCount, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntPDumpDataDescriptor(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + psDevmemCtxInt, ui32StringSize, + puiFileName, sDataDevAddr, + ui32DataSize, ui32HeaderType, + ui32ElementType, ui32ElementCount, + ui32PDumpFlags); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/common_pdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/common_pdump_bridge.h new file mode 100644 index 000000000000..4e35fc24c1c1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/common_pdump_bridge.h @@ -0,0 +1,180 @@ +/******************************************************************************* +@File +@Title Common bridge header for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PDUMP_BRIDGE_H +#define COMMON_PDUMP_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "pdumpdefs.h" +#include + +#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4) + +/******************************************* + DevmemPDumpBitmap + *******************************************/ + +/* Bridge in structure for DevmemPDumpBitmap */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP_TAG +{ + IMG_CHAR *puiFileName; + IMG_UINT32 ui32FileOffset; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; + IMG_UINT32 ui32StrideInBytes; + IMG_DEV_VIRTADDR sDevBaseAddr; + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32Size; + PDUMP_PIXEL_FORMAT ePixelFormat; + IMG_UINT32 ui32AddrMode; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP; + +/* Bridge out structure for DevmemPDumpBitmap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP; + +/******************************************* + PDumpImageDescriptor + *******************************************/ + +/* Bridge in structure for PDumpImageDescriptor */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32StringSize; + const IMG_CHAR *puiFileName; + IMG_DEV_VIRTADDR sDataDevAddr; + IMG_UINT32 ui32DataSize; + IMG_UINT32 ui32LogicalWidth; + IMG_UINT32 ui32LogicalHeight; + IMG_UINT32 ui32PhysicalWidth; + IMG_UINT32 ui32PhysicalHeight; + PDUMP_PIXEL_FORMAT ePixelFormat; + IMG_MEMLAYOUT eMemLayout; + IMG_FB_COMPRESSION eFBCompression; + const IMG_UINT32 *pui32FBCClearColour; + PDUMP_FBC_SWIZZLE eeFBCSwizzle; + IMG_DEV_VIRTADDR sHeaderDevAddr; + IMG_UINT32 ui32HeaderSize; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR; + +/* Bridge out structure for PDumpImageDescriptor */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR; + +/******************************************* + PVRSRVPDumpComment + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpComment */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG +{ + IMG_CHAR *puiComment; + IMG_UINT32 ui32Flags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT; + +/* Bridge out structure for PVRSRVPDumpComment */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT; + +/******************************************* + PVRSRVPDumpSetFrame + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpSetFrame */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG +{ + IMG_UINT32 ui32Frame; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME; + +/* Bridge out structure for PVRSRVPDumpSetFrame */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME; + +/******************************************* + PDumpDataDescriptor + *******************************************/ + +/* Bridge in structure for PDumpDataDescriptor */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32StringSize; + const IMG_CHAR *puiFileName; + IMG_DEV_VIRTADDR sDataDevAddr; + IMG_UINT32 ui32DataSize; + IMG_UINT32 ui32HeaderType; + IMG_UINT32 ui32ElementType; + IMG_UINT32 ui32ElementCount; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR; + +/* Bridge out structure for PDumpDataDescriptor */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR; + +#endif /* COMMON_PDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/server_pdump_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/server_pdump_bridge.c new file mode 100644 index 000000000000..8433df3a9ad2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdump_bridge/server_pdump_bridge.c @@ -0,0 +1,758 @@ +/******************************************************************************* +@File +@Title Server bridge for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "pdump_km.h" + +#include "common_pdump_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeDevmemPDumpBitmap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemPDumpBitmapIN_UI8, + IMG_UINT8 * psDevmemPDumpBitmapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapIN = + (PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *) + IMG_OFFSET_ADDR(psDevmemPDumpBitmapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *) + IMG_OFFSET_ADDR(psDevmemPDumpBitmapOUT_UI8, 0); + + IMG_CHAR *uiFileNameInt = NULL; + IMG_HANDLE hDevmemCtx = psDevmemPDumpBitmapIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevmemPDumpBitmapIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevmemPDumpBitmapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevmemPDumpBitmapOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevmemPDumpBitmap_exit; + } + } + } + + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psDevmemPDumpBitmapIN->puiFileName, + PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psDevmemPDumpBitmapOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevmemPDumpBitmap_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(PVRSRV_PDUMP_MAX_FILENAME_SIZE * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemPDumpBitmapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemPDumpBitmapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemPDumpBitmap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemPDumpBitmapOUT->eError = + DevmemIntPDumpBitmap(psConnection, OSGetDevNode(psConnection), + uiFileNameInt, + psDevmemPDumpBitmapIN->ui32FileOffset, + psDevmemPDumpBitmapIN->ui32Width, + psDevmemPDumpBitmapIN->ui32Height, + psDevmemPDumpBitmapIN->ui32StrideInBytes, + psDevmemPDumpBitmapIN->sDevBaseAddr, + psDevmemCtxInt, + psDevmemPDumpBitmapIN->ui32Size, + psDevmemPDumpBitmapIN->ePixelFormat, + psDevmemPDumpBitmapIN->ui32AddrMode, + psDevmemPDumpBitmapIN->ui32PDumpFlags); + +DevmemPDumpBitmap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpImageDescriptorIN_UI8, + IMG_UINT8 * psPDumpImageDescriptorOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorIN = + (PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpImageDescriptorIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorOUT = + (PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpImageDescriptorOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psPDumpImageDescriptorIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + IMG_UINT32 *ui32FBCClearColourInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) + + (4 * sizeof(IMG_UINT32)) + 0; + + if (unlikely + (psPDumpImageDescriptorIN->ui32StringSize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PDumpImageDescriptor_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPDumpImageDescriptorIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PDumpImageDescriptor_exit; + } + } + } + + if (psPDumpImageDescriptorIN->ui32StringSize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psPDumpImageDescriptorIN->puiFileName, + psPDumpImageDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PDumpImageDescriptor_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psPDumpImageDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + { + ui32FBCClearColourInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += 4 * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (4 * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FBCClearColourInt, + (const void __user *)psPDumpImageDescriptorIN-> + pui32FBCClearColour, 4 * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psPDumpImageDescriptorOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PDumpImageDescriptor_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPDumpImageDescriptorOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psPDumpImageDescriptorOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PDumpImageDescriptor_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPDumpImageDescriptorOUT->eError = + DevmemIntPDumpImageDescriptor(psConnection, + OSGetDevNode(psConnection), + psDevmemCtxInt, + psPDumpImageDescriptorIN-> + ui32StringSize, uiFileNameInt, + psPDumpImageDescriptorIN-> + sDataDevAddr, + psPDumpImageDescriptorIN-> + ui32DataSize, + psPDumpImageDescriptorIN-> + ui32LogicalWidth, + psPDumpImageDescriptorIN-> + ui32LogicalHeight, + psPDumpImageDescriptorIN-> + ui32PhysicalWidth, + psPDumpImageDescriptorIN-> + ui32PhysicalHeight, + psPDumpImageDescriptorIN-> + ePixelFormat, + psPDumpImageDescriptorIN->eMemLayout, + psPDumpImageDescriptorIN-> + eFBCompression, ui32FBCClearColourInt, + psPDumpImageDescriptorIN-> + eeFBCSwizzle, + psPDumpImageDescriptorIN-> + sHeaderDevAddr, + psPDumpImageDescriptorIN-> + ui32HeaderSize, + psPDumpImageDescriptorIN-> + ui32PDumpFlags); + +PDumpImageDescriptor_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpCommentIN_UI8, + IMG_UINT8 * psPVRSRVPDumpCommentOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *) + IMG_OFFSET_ADDR(psPVRSRVPDumpCommentIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *) + IMG_OFFSET_ADDR(psPVRSRVPDumpCommentOUT_UI8, 0); + + IMG_CHAR *uiCommentInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPVRSRVPDumpCommentIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPVRSRVPDumpCommentOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PVRSRVPDumpComment_exit; + } + } + } + + { + uiCommentInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiCommentInt, + (const void __user *)psPVRSRVPDumpCommentIN->puiComment, + PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psPVRSRVPDumpCommentOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PVRSRVPDumpComment_exit; + } + ((IMG_CHAR *) + uiCommentInt)[(PVRSRV_PDUMP_MAX_COMMENT_SIZE * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPVRSRVPDumpCommentOUT->eError = + PDumpCommentKM(uiCommentInt, psPVRSRVPDumpCommentIN->ui32Flags); + +PVRSRVPDumpComment_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpSetFrameIN_UI8, + IMG_UINT8 * psPVRSRVPDumpSetFrameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameOUT_UI8, 0); + + psPVRSRVPDumpSetFrameOUT->eError = + PDumpSetFrameKM(psConnection, OSGetDevNode(psConnection), + psPVRSRVPDumpSetFrameIN->ui32Frame); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpDataDescriptorIN_UI8, + IMG_UINT8 * psPDumpDataDescriptorOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorIN = + (PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpDataDescriptorIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorOUT = + (PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *) + IMG_OFFSET_ADDR(psPDumpDataDescriptorOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psPDumpDataDescriptorIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPDumpDataDescriptorIN->ui32StringSize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psPDumpDataDescriptorOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PDumpDataDescriptor_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPDumpDataDescriptorIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPDumpDataDescriptorIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPDumpDataDescriptorOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PDumpDataDescriptor_exit; + } + } + } + + if (psPDumpDataDescriptorIN->ui32StringSize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psPDumpDataDescriptorIN->puiFileName, + psPDumpDataDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPDumpDataDescriptorOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PDumpDataDescriptor_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psPDumpDataDescriptorIN->ui32StringSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPDumpDataDescriptorOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psPDumpDataDescriptorOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PDumpDataDescriptor_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPDumpDataDescriptorOUT->eError = + DevmemIntPDumpDataDescriptor(psConnection, + OSGetDevNode(psConnection), + psDevmemCtxInt, + psPDumpDataDescriptorIN-> + ui32StringSize, uiFileNameInt, + psPDumpDataDescriptorIN->sDataDevAddr, + psPDumpDataDescriptorIN->ui32DataSize, + psPDumpDataDescriptorIN-> + ui32HeaderType, + psPDumpDataDescriptorIN-> + ui32ElementType, + psPDumpDataDescriptorIN-> + ui32ElementCount, + psPDumpDataDescriptorIN-> + ui32PDumpFlags); + +PDumpDataDescriptor_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitPDUMPBridge(void); +PVRSRV_ERROR DeinitPDUMPBridge(void); + +/* + * Register all PDUMP functions with services + */ +PVRSRV_ERROR InitPDUMPBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP, + PVRSRVBridgeDevmemPDumpBitmap, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR, + PVRSRVBridgePDumpImageDescriptor, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, + PVRSRVBridgePVRSRVPDumpComment, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME, + PVRSRVBridgePVRSRVPDumpSetFrame, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR, + PVRSRVBridgePDumpDataDescriptor, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all pdump functions with services + */ +PVRSRV_ERROR DeinitPDUMPBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, + PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h new file mode 100644 index 000000000000..0dedace31089 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h @@ -0,0 +1,82 @@ +/******************************************************************************* +@File +@Title Client bridge header for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pdumpctrl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PDUMPCTRL_BRIDGE_H +#define CLIENT_PDUMPCTRL_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pdumpctrl_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetState(IMG_HANDLE + hBridge, + IMG_UINT64 * + pui64State); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32Frame); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge, + IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge, + IMG_BOOL * pbpbIsLastCaptureFrame); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge); + +#endif /* CLIENT_PDUMPCTRL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c new file mode 100644 index 000000000000..49b4800da7ee --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c @@ -0,0 +1,120 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pdumpctrl + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pdumpctrl_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ + +#include "pdump_km.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetState(IMG_HANDLE + hBridge, + IMG_UINT64 * + pui64State) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpGetStateKM(pui64State); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE + hBridge, + IMG_UINT32 * + pui32Frame) +{ + PVRSRV_ERROR eError; + + eError = + PDumpGetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + pui32Frame); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge, + IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + PDumpSetDefaultCaptureParamsKM(ui32Mode, + ui32Start, + ui32End, + ui32Interval, ui32MaxParamFileSize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge, + IMG_BOOL * pbpbIsLastCaptureFrame) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpIsLastCaptureFrameKM(pbpbIsLastCaptureFrame); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PDumpForceCaptureStopKM(); + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h new file mode 100644 index 000000000000..1f3ff6d970c2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h @@ -0,0 +1,148 @@ +/******************************************************************************* +@File +@Title Common bridge header for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pdumpctrl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PDUMPCTRL_BRIDGE_H +#define COMMON_PDUMPCTRL_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST (PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4) + +/******************************************* + PVRSRVPDumpGetState + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpGetState */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE; + +/* Bridge out structure for PVRSRVPDumpGetState */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE_TAG +{ + IMG_UINT64 ui64State; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE; + +/******************************************* + PVRSRVPDumpGetFrame + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpGetFrame */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME; + +/* Bridge out structure for PVRSRVPDumpGetFrame */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG +{ + IMG_UINT32 ui32Frame; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME; + +/******************************************* + PVRSRVPDumpSetDefaultCaptureParams + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG +{ + IMG_UINT32 ui32Mode; + IMG_UINT32 ui32Start; + IMG_UINT32 ui32End; + IMG_UINT32 ui32Interval; + IMG_UINT32 ui32MaxParamFileSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS; + +/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS; + +/******************************************* + PVRSRVPDumpIsLastCaptureFrame + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME; + +/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG +{ + IMG_BOOL bpbIsLastCaptureFrame; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME; + +/******************************************* + PVRSRVPDumpForceCaptureStop + *******************************************/ + +/* Bridge in structure for PVRSRVPDumpForceCaptureStop */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP; + +/* Bridge out structure for PVRSRVPDumpForceCaptureStop */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP; + +#endif /* COMMON_PDUMPCTRL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c new file mode 100644 index 000000000000..4aa153d5d9c9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c @@ -0,0 +1,266 @@ +/******************************************************************************* +@File +@Title Server bridge for pdumpctrl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pdumpctrl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "pdump_km.h" + +#include "common_pdumpctrl_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgePVRSRVPDumpGetState(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpGetStateIN_UI8, + IMG_UINT8 * psPVRSRVPDumpGetStateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetStateIN); + + psPVRSRVPDumpGetStateOUT->eError = + PDumpGetStateKM(&psPVRSRVPDumpGetStateOUT->ui64State); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVPDumpGetFrameIN_UI8, + IMG_UINT8 * psPVRSRVPDumpGetFrameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN); + + psPVRSRVPDumpGetFrameOUT->eError = + PDumpGetFrameKM(psConnection, OSGetDevNode(psConnection), + &psPVRSRVPDumpGetFrameOUT->ui32Frame); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 + ui32DispatchTableEntry, + IMG_UINT8 * + psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8, + IMG_UINT8 * + psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS + *psPVRSRVPDumpSetDefaultCaptureParamsIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS + *psPVRSRVPDumpSetDefaultCaptureParamsOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *) + IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError = + PDumpSetDefaultCaptureParamsKM + (psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval, + psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32MaxParamFileSize); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPVRSRVPDumpIsLastCaptureFrameIN_UI8, + IMG_UINT8 * + psPVRSRVPDumpIsLastCaptureFrameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME + *psPVRSRVPDumpIsLastCaptureFrameIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME + *psPVRSRVPDumpIsLastCaptureFrameOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *) + IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN); + + psPVRSRVPDumpIsLastCaptureFrameOUT->eError = + PDumpIsLastCaptureFrameKM(&psPVRSRVPDumpIsLastCaptureFrameOUT-> + bpbIsLastCaptureFrame); + + return 0; +} + +static IMG_INT +PVRSRVBridgePVRSRVPDumpForceCaptureStop(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psPVRSRVPDumpForceCaptureStopIN_UI8, + IMG_UINT8 * + psPVRSRVPDumpForceCaptureStopOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP + *psPVRSRVPDumpForceCaptureStopIN = + (PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP *) + IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP + *psPVRSRVPDumpForceCaptureStopOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP *) + IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpForceCaptureStopIN); + + psPVRSRVPDumpForceCaptureStopOUT->eError = PDumpForceCaptureStopKM(); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pPDUMPCTRLBridgeLock; + +PVRSRV_ERROR InitPDUMPCTRLBridge(void); +PVRSRV_ERROR DeinitPDUMPCTRLBridge(void); + +/* + * Register all PDUMPCTRL functions with services + */ +PVRSRV_ERROR InitPDUMPCTRLBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock), + "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE, + PVRSRVBridgePVRSRVPDumpGetState, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, + PVRSRVBridgePVRSRVPDumpGetFrame, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, + PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, + PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, + pPDUMPCTRLBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP, + PVRSRVBridgePVRSRVPDumpForceCaptureStop, + pPDUMPCTRLBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all pdumpctrl functions with services + */ +PVRSRV_ERROR DeinitPDUMPCTRLBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pPDUMPCTRLBridgeLock), + "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, + PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_bridge.h new file mode 100644 index 000000000000..04c2874fd5a5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_bridge.h @@ -0,0 +1,169 @@ +/******************************************************************************* +@File +@Title Client bridge header for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pdumpmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PDUMPMM_BRIDGE_H +#define CLIENT_PDUMPMM_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pdumpmm_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32PDumpFlags, + IMG_BOOL bbZero); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT64 + ui64Value, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32ArraySize, + const IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32uiFileOffset); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32MemspaceNameLen, + IMG_CHAR * + puiMemspaceName, + IMG_UINT32 + ui32SymbolicAddrLen, + IMG_CHAR * + puiSymbolicAddr, + IMG_DEVMEM_OFFSET_T + * + puiNewOffset, + IMG_DEVMEM_OFFSET_T + * + puiNextSymName); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCheck32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiReadOffset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerContext, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32ArraySize, + const IMG_CHAR * puiFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags); + +#endif /* CLIENT_PDUMPMM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c new file mode 100644 index 000000000000..887e90c5b1bc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c @@ -0,0 +1,300 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pdumpmm + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pdumpmm_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pdump.h" +#include "pdumpdefs.h" +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#include "devicemem_server.h" +#include "pmr.h" +#include "physmem.h" +#include "pdump_physmem.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32PDumpFlags, + IMG_BOOL bbZero) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpLoadMem(psPMRInt, uiOffset, uiSize, ui32PDumpFlags, bbZero); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpLoadMemValue32(psPMRInt, + uiOffset, ui32Value, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT64 + ui64Value, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpLoadMemValue64(psPMRInt, + uiOffset, ui64Value, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE + hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_DEVMEM_SIZE_T + uiSize, + IMG_UINT32 + ui32ArraySize, + const IMG_CHAR * + puiFileName, + IMG_UINT32 + ui32uiFileOffset) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpSaveToFile(psPMRInt, + uiOffset, + uiSize, + ui32ArraySize, puiFileName, ui32uiFileOffset); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32MemspaceNameLen, + IMG_CHAR * + puiMemspaceName, + IMG_UINT32 + ui32SymbolicAddrLen, + IMG_CHAR * + puiSymbolicAddr, + IMG_DEVMEM_OFFSET_T + * + puiNewOffset, + IMG_DEVMEM_OFFSET_T + * + puiNextSymName) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMR_PDumpSymbolicAddr(psPMRInt, + uiOffset, + ui32MemspaceNameLen, + puiMemspaceName, + ui32SymbolicAddrLen, + puiSymbolicAddr, + puiNewOffset, puiNextSymName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpPol32(psPMRInt, + uiOffset, + ui32Value, ui32Mask, eOperator, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCheck32(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiOffset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpCheck32(psPMRInt, + uiOffset, + ui32Value, ui32Mask, eOperator, ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_OFFSET_T + uiReadOffset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + PMRPDumpCBP(psPMRInt, + uiReadOffset, + uiWriteOffset, uiPacketSize, uiBufferSize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerContext, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32ArraySize, + const IMG_CHAR * puiFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemServerContextInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; + + eError = + DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + sAddress, + uiSize, + ui32ArraySize, + puiFileName, + ui32FileOffset, ui32PDumpFlags); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/common_pdumpmm_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/common_pdumpmm_bridge.h new file mode 100644 index 000000000000..10c634556e72 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/common_pdumpmm_bridge.h @@ -0,0 +1,260 @@ +/******************************************************************************* +@File +@Title Common bridge header for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pdumpmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PDUMPMM_BRIDGE_H +#define COMMON_PDUMPMM_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pdump.h" +#include "pdumpdefs.h" +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6 +#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7 +#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8 +#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST (PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8) + +/******************************************* + PMRPDumpLoadMem + *******************************************/ + +/* Bridge in structure for PMRPDumpLoadMem */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32PDumpFlags; + IMG_BOOL bbZero; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM; + +/* Bridge out structure for PMRPDumpLoadMem */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM; + +/******************************************* + PMRPDumpLoadMemValue32 + *******************************************/ + +/* Bridge in structure for PMRPDumpLoadMemValue32 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32; + +/* Bridge out structure for PMRPDumpLoadMemValue32 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32; + +/******************************************* + PMRPDumpLoadMemValue64 + *******************************************/ + +/* Bridge in structure for PMRPDumpLoadMemValue64 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT64 ui64Value; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64; + +/* Bridge out structure for PMRPDumpLoadMemValue64 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64; + +/******************************************* + PMRPDumpSaveToFile + *******************************************/ + +/* Bridge in structure for PMRPDumpSaveToFile */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32ArraySize; + const IMG_CHAR *puiFileName; + IMG_UINT32 ui32uiFileOffset; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE; + +/* Bridge out structure for PMRPDumpSaveToFile */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE; + +/******************************************* + PMRPDumpSymbolicAddr + *******************************************/ + +/* Bridge in structure for PMRPDumpSymbolicAddr */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32MemspaceNameLen; + IMG_UINT32 ui32SymbolicAddrLen; + /* Output pointer puiMemspaceName is also an implied input */ + IMG_CHAR *puiMemspaceName; + /* Output pointer puiSymbolicAddr is also an implied input */ + IMG_CHAR *puiSymbolicAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR; + +/* Bridge out structure for PMRPDumpSymbolicAddr */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG +{ + IMG_CHAR *puiMemspaceName; + IMG_CHAR *puiSymbolicAddr; + IMG_DEVMEM_OFFSET_T uiNewOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR; + +/******************************************* + PMRPDumpPol32 + *******************************************/ + +/* Bridge in structure for PMRPDumpPol32 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + PDUMP_POLL_OPERATOR eOperator; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPPOL32; + +/* Bridge out structure for PMRPDumpPol32 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32; + +/******************************************* + PMRPDumpCheck32 + *******************************************/ + +/* Bridge in structure for PMRPDumpCheck32 */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + PDUMP_POLL_OPERATOR eOperator; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32; + +/* Bridge out structure for PMRPDumpCheck32 */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32; + +/******************************************* + PMRPDumpCBP + *******************************************/ + +/* Bridge in structure for PMRPDumpCBP */ +typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG +{ + IMG_HANDLE hPMR; + IMG_DEVMEM_OFFSET_T uiReadOffset; + IMG_DEVMEM_OFFSET_T uiWriteOffset; + IMG_DEVMEM_SIZE_T uiPacketSize; + IMG_DEVMEM_SIZE_T uiBufferSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCBP; + +/* Bridge out structure for PMRPDumpCBP */ +typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCBP; + +/******************************************* + DevmemIntPDumpSaveToFileVirtual + *******************************************/ + +/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG +{ + IMG_HANDLE hDevmemServerContext; + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32ArraySize; + const IMG_CHAR *puiFileName; + IMG_UINT32 ui32FileOffset; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL; + +/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL; + +#endif /* COMMON_PDUMPMM_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c new file mode 100644 index 000000000000..0409e85f91c4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c @@ -0,0 +1,1029 @@ +/******************************************************************************* +@File +@Title Server bridge for pdumpmm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pdumpmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "pmr.h" +#include "physmem.h" +#include "pdump_physmem.h" + +#include "common_pdumpmm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpLoadMemIN_UI8, + IMG_UINT8 * psPMRPDumpLoadMemOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpLoadMemIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpLoadMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpLoadMem_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpLoadMemOUT->eError = + PMRPDumpLoadMem(psPMRInt, + psPMRPDumpLoadMemIN->uiOffset, + psPMRPDumpLoadMemIN->uiSize, + psPMRPDumpLoadMemIN->ui32PDumpFlags, + psPMRPDumpLoadMemIN->bbZero); + +PMRPDumpLoadMem_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpLoadMemValue32IN_UI8, + IMG_UINT8 * psPMRPDumpLoadMemValue32OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpLoadMemValue32IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpLoadMemValue32OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpLoadMemValue32_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpLoadMemValue32OUT->eError = + PMRPDumpLoadMemValue32(psPMRInt, + psPMRPDumpLoadMemValue32IN->uiOffset, + psPMRPDumpLoadMemValue32IN->ui32Value, + psPMRPDumpLoadMemValue32IN->ui32PDumpFlags); + +PMRPDumpLoadMemValue32_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpLoadMemValue64IN_UI8, + IMG_UINT8 * psPMRPDumpLoadMemValue64OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *) + IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpLoadMemValue64IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpLoadMemValue64OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpLoadMemValue64_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpLoadMemValue64OUT->eError = + PMRPDumpLoadMemValue64(psPMRInt, + psPMRPDumpLoadMemValue64IN->uiOffset, + psPMRPDumpLoadMemValue64IN->ui64Value, + psPMRPDumpLoadMemValue64IN->ui32PDumpFlags); + +PMRPDumpLoadMemValue64_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpSaveToFileIN_UI8, + IMG_UINT8 * psPMRPDumpSaveToFileOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *) + IMG_OFFSET_ADDR(psPMRPDumpSaveToFileIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *) + IMG_OFFSET_ADDR(psPMRPDumpSaveToFileOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpSaveToFileIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPMRPDumpSaveToFileIN->ui32ArraySize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psPMRPDumpSaveToFileOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PMRPDumpSaveToFile_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPMRPDumpSaveToFileIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPMRPDumpSaveToFileOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PMRPDumpSaveToFile_exit; + } + } + } + + if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psPMRPDumpSaveToFileIN->puiFileName, + psPMRPDumpSaveToFileIN->ui32ArraySize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPMRPDumpSaveToFileOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PMRPDumpSaveToFile_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psPMRPDumpSaveToFileIN->ui32ArraySize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpSaveToFileOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpSaveToFile_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpSaveToFileOUT->eError = + PMRPDumpSaveToFile(psPMRInt, + psPMRPDumpSaveToFileIN->uiOffset, + psPMRPDumpSaveToFileIN->uiSize, + psPMRPDumpSaveToFileIN->ui32ArraySize, + uiFileNameInt, + psPMRPDumpSaveToFileIN->ui32uiFileOffset); + +PMRPDumpSaveToFile_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpSymbolicAddrIN_UI8, + IMG_UINT8 * psPMRPDumpSymbolicAddrOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *) + IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *) + IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpSymbolicAddrIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *puiMemspaceNameInt = NULL; + IMG_CHAR *puiSymbolicAddrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) + + (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) + + 0; + + if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen > + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PMRPDumpSymbolicAddr_exit; + } + + if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen > + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PMRPDumpSymbolicAddr_exit; + } + + psPMRPDumpSymbolicAddrOUT->puiMemspaceName = + psPMRPDumpSymbolicAddrIN->puiMemspaceName; + psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = + psPMRPDumpSymbolicAddrIN->puiSymbolicAddr; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPMRPDumpSymbolicAddrIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PMRPDumpSymbolicAddr_exit; + } + } + } + + if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0) + { + puiMemspaceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * + sizeof(IMG_CHAR); + } + + if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0) + { + puiSymbolicAddrInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * + sizeof(IMG_CHAR); + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpSymbolicAddr_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpSymbolicAddrOUT->eError = + PMR_PDumpSymbolicAddr(psPMRInt, + psPMRPDumpSymbolicAddrIN->uiOffset, + psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen, + puiMemspaceNameInt, + psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen, + puiSymbolicAddrInt, + &psPMRPDumpSymbolicAddrOUT->uiNewOffset, + &psPMRPDumpSymbolicAddrOUT->uiNextSymName); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiMemspaceNameInt) && + ((psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * + sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psPMRPDumpSymbolicAddrOUT->puiMemspaceName, + puiMemspaceNameInt, + (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PMRPDumpSymbolicAddr_exit; + } + } + + /* If dest ptr is non-null and we have data to copy */ + if ((puiSymbolicAddrInt) && + ((psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * + sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr, + puiSymbolicAddrInt, + (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * + sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psPMRPDumpSymbolicAddrOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto PMRPDumpSymbolicAddr_exit; + } + } + +PMRPDumpSymbolicAddr_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpPol32IN_UI8, + IMG_UINT8 * psPMRPDumpPol32OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *) + IMG_OFFSET_ADDR(psPMRPDumpPol32IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *) + IMG_OFFSET_ADDR(psPMRPDumpPol32OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpPol32IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpPol32OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpPol32OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpPol32_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpPol32OUT->eError = + PMRPDumpPol32(psPMRInt, + psPMRPDumpPol32IN->uiOffset, + psPMRPDumpPol32IN->ui32Value, + psPMRPDumpPol32IN->ui32Mask, + psPMRPDumpPol32IN->eOperator, + psPMRPDumpPol32IN->ui32PDumpFlags); + +PMRPDumpPol32_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpCheck32(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpCheck32IN_UI8, + IMG_UINT8 * psPMRPDumpCheck32OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *psPMRPDumpCheck32IN = + (PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *) + IMG_OFFSET_ADDR(psPMRPDumpCheck32IN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *psPMRPDumpCheck32OUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *) + IMG_OFFSET_ADDR(psPMRPDumpCheck32OUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpCheck32IN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpCheck32OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpCheck32OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpCheck32_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpCheck32OUT->eError = + PMRPDumpCheck32(psPMRInt, + psPMRPDumpCheck32IN->uiOffset, + psPMRPDumpCheck32IN->ui32Value, + psPMRPDumpCheck32IN->ui32Mask, + psPMRPDumpCheck32IN->eOperator, + psPMRPDumpCheck32IN->ui32PDumpFlags); + +PMRPDumpCheck32_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRPDumpCBPIN_UI8, + IMG_UINT8 * psPMRPDumpCBPOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN = + (PVRSRV_BRIDGE_IN_PMRPDUMPCBP *) + IMG_OFFSET_ADDR(psPMRPDumpCBPIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT = + (PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *) + IMG_OFFSET_ADDR(psPMRPDumpCBPOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRPDumpCBPIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRPDumpCBPOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psPMRPDumpCBPOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRPDumpCBP_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRPDumpCBPOUT->eError = + PMRPDumpCBP(psPMRInt, + psPMRPDumpCBPIN->uiReadOffset, + psPMRPDumpCBPIN->uiWriteOffset, + psPMRPDumpCBPIN->uiPacketSize, + psPMRPDumpCBPIN->uiBufferSize); + +PMRPDumpCBP_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psDevmemIntPDumpSaveToFileVirtualIN_UI8, + IMG_UINT8 * + psDevmemIntPDumpSaveToFileVirtualOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL + *psDevmemIntPDumpSaveToFileVirtualIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *) + IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL + *psDevmemIntPDumpSaveToFileVirtualOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *) + IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualOUT_UI8, 0); + + IMG_HANDLE hDevmemServerContext = + psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext; + DEVMEMINT_CTX *psDevmemServerContextInt = NULL; + IMG_CHAR *uiFileNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize > + PVRSRV_PDUMP_MAX_FILENAME_SIZE)) + { + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *) + psDevmemIntPDumpSaveToFileVirtualIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + } + } + + if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0) + { + uiFileNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiFileNameInt, + (const void __user *)psDevmemIntPDumpSaveToFileVirtualIN-> + puiFileName, + psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + ((IMG_CHAR *) + uiFileNameInt)[(psDevmemIntPDumpSaveToFileVirtualIN-> + ui32ArraySize * sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerContextInt, + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + IMG_TRUE); + if (unlikely(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPDumpSaveToFileVirtual_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPDumpSaveToFileVirtualOUT->eError = + DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + psDevmemIntPDumpSaveToFileVirtualIN-> + sAddress, + psDevmemIntPDumpSaveToFileVirtualIN-> + uiSize, + psDevmemIntPDumpSaveToFileVirtualIN-> + ui32ArraySize, uiFileNameInt, + psDevmemIntPDumpSaveToFileVirtualIN-> + ui32FileOffset, + psDevmemIntPDumpSaveToFileVirtualIN-> + ui32PDumpFlags); + +DevmemIntPDumpSaveToFileVirtual_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitPDUMPMMBridge(void); +PVRSRV_ERROR DeinitPDUMPMMBridge(void); + +/* + * Register all PDUMPMM functions with services + */ +PVRSRV_ERROR InitPDUMPMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, + PVRSRVBridgePMRPDumpLoadMem, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, + PVRSRVBridgePMRPDumpLoadMemValue32, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, + PVRSRVBridgePMRPDumpLoadMemValue64, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, + PVRSRVBridgePMRPDumpSaveToFile, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, + PVRSRVBridgePMRPDumpSymbolicAddr, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, + PVRSRVBridgePMRPDumpPol32, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32, + PVRSRVBridgePMRPDumpCheck32, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, + PVRSRVBridgePMRPDumpCBP, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, + PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, + NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all pdumpmm functions with services + */ +PVRSRV_ERROR DeinitPDUMPMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, + PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_bridge.h new file mode 100644 index 000000000000..c3cc129384af --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_bridge.h @@ -0,0 +1,113 @@ +/******************************************************************************* +@File +@Title Client bridge header for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PVRTL_BRIDGE_H +#define CLIENT_PVRTL_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pvrtl_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge, + const IMG_CHAR * + puiName, + IMG_UINT32 ui32Mode, + IMG_HANDLE * phSD, + IMG_HANDLE * phTLPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32ReadOffset, + IMG_UINT32 * + pui32ReadLen); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReadOffset, + IMG_UINT32 + ui32ReadLen); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE + hBridge, + const IMG_CHAR * + puiNamePattern, + IMG_UINT32 + ui32Size, + IMG_CHAR * + puiStreams, + IMG_UINT32 * + pui32NumFound); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32BufferOffset, + IMG_UINT32 + ui32Size, + IMG_UINT32 + ui32SizeMin, + IMG_UINT32 * + pui32Available); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReqSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE * psData); + +#endif /* CLIENT_PVRTL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_direct_bridge.c new file mode 100644 index 000000000000..481ed7945956 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/client_pvrtl_direct_bridge.c @@ -0,0 +1,200 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pvrtl + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pvrtl_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" + +#include "tlserver.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge, + const IMG_CHAR * + puiName, + IMG_UINT32 ui32Mode, + IMG_HANDLE * phSD, + IMG_HANDLE * phTLPMR) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt = NULL; + PMR *psTLPMRInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt); + + *phSD = psSDInt; + *phTLPMR = psTLPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerCloseStreamKM(psSDInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32ReadOffset, + IMG_UINT32 * + pui32ReadLen) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReadOffset, + IMG_UINT32 + ui32ReadLen) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE + hBridge, + const IMG_CHAR * + puiNamePattern, + IMG_UINT32 + ui32Size, + IMG_CHAR * + puiStreams, + IMG_UINT32 * + pui32NumFound) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + TLServerDiscoverStreamsKM(puiNamePattern, + ui32Size, puiStreams, pui32NumFound); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * + pui32BufferOffset, + IMG_UINT32 + ui32Size, + IMG_UINT32 + ui32SizeMin, + IMG_UINT32 * + pui32Available) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = + TLServerReserveStreamKM(psSDInt, + pui32BufferOffset, + ui32Size, ui32SizeMin, pui32Available); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 + ui32ReqSize) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE * psData) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerWriteDataKM(psSDInt, ui32Size, psData); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/common_pvrtl_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/common_pvrtl_bridge.h new file mode 100644 index 000000000000..2047536c112d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/common_pvrtl_bridge.h @@ -0,0 +1,214 @@ +/******************************************************************************* +@File +@Title Common bridge header for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PVRTL_BRIDGE_H +#define COMMON_PVRTL_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" + +#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5 +#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6 +#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7 +#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7) + +/******************************************* + TLOpenStream + *******************************************/ + +/* Bridge in structure for TLOpenStream */ +typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG +{ + const IMG_CHAR *puiName; + IMG_UINT32 ui32Mode; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLOPENSTREAM; + +/* Bridge out structure for TLOpenStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_HANDLE hTLPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLOPENSTREAM; + +/******************************************* + TLCloseStream + *******************************************/ + +/* Bridge in structure for TLCloseStream */ +typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG +{ + IMG_HANDLE hSD; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLCLOSESTREAM; + +/* Bridge out structure for TLCloseStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLCLOSESTREAM; + +/******************************************* + TLAcquireData + *******************************************/ + +/* Bridge in structure for TLAcquireData */ +typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG +{ + IMG_HANDLE hSD; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLACQUIREDATA; + +/* Bridge out structure for TLAcquireData */ +typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG +{ + IMG_UINT32 ui32ReadOffset; + IMG_UINT32 ui32ReadLen; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLACQUIREDATA; + +/******************************************* + TLReleaseData + *******************************************/ + +/* Bridge in structure for TLReleaseData */ +typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32ReadOffset; + IMG_UINT32 ui32ReadLen; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLRELEASEDATA; + +/* Bridge out structure for TLReleaseData */ +typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLRELEASEDATA; + +/******************************************* + TLDiscoverStreams + *******************************************/ + +/* Bridge in structure for TLDiscoverStreams */ +typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG +{ + const IMG_CHAR *puiNamePattern; + IMG_UINT32 ui32Size; + /* Output pointer puiStreams is also an implied input */ + IMG_CHAR *puiStreams; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS; + +/* Bridge out structure for TLDiscoverStreams */ +typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG +{ + IMG_CHAR *puiStreams; + IMG_UINT32 ui32NumFound; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS; + +/******************************************* + TLReserveStream + *******************************************/ + +/* Bridge in structure for TLReserveStream */ +typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32SizeMin; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLRESERVESTREAM; + +/* Bridge out structure for TLReserveStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG +{ + IMG_UINT32 ui32BufferOffset; + IMG_UINT32 ui32Available; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLRESERVESTREAM; + +/******************************************* + TLCommitStream + *******************************************/ + +/* Bridge in structure for TLCommitStream */ +typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32ReqSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLCOMMITSTREAM; + +/* Bridge out structure for TLCommitStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM; + +/******************************************* + TLWriteData + *******************************************/ + +/* Bridge in structure for TLWriteData */ +typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32Size; + IMG_BYTE *psData; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLWRITEDATA; + +/* Bridge out structure for TLWriteData */ +typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLWRITEDATA; + +#endif /* COMMON_PVRTL_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c new file mode 100644 index 000000000000..7a601bea2cf2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c @@ -0,0 +1,880 @@ +/******************************************************************************* +@File +@Title Server bridge for pvrtl +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "tlserver.h" + +#include "common_pvrtl_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _TLOpenStreampsSDIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = TLServerCloseStreamKM((TL_STREAM_DESC *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLOpenStreamIN_UI8, + IMG_UINT8 * psTLOpenStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN = + (PVRSRV_BRIDGE_IN_TLOPENSTREAM *) + IMG_OFFSET_ADDR(psTLOpenStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT = + (PVRSRV_BRIDGE_OUT_TLOPENSTREAM *) + IMG_OFFSET_ADDR(psTLOpenStreamOUT_UI8, 0); + + IMG_CHAR *uiNameInt = NULL; + TL_STREAM_DESC *psSDInt = NULL; + PMR *psTLPMRInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; + + psTLOpenStreamOUT->hSD = NULL; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psTLOpenStreamIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLOpenStreamOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLOpenStream_exit; + } + } + } + + { + uiNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, + (const void __user *)psTLOpenStreamIN->puiName, + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLOpenStream_exit; + } + ((IMG_CHAR *) + uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + psTLOpenStreamOUT->eError = + TLServerOpenStreamKM(uiNameInt, + psTLOpenStreamIN->ui32Mode, + &psSDInt, &psTLPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + goto TLOpenStream_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psTLOpenStreamOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psTLOpenStreamOUT->hSD, (void *)psSDInt, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _TLOpenStreampsSDIntRelease); + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLOpenStream_exit; + } + + psTLOpenStreamOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psTLOpenStreamOUT->hTLPMR, + (void *)psTLPMRInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psTLOpenStreamOUT->hSD); + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLOpenStream_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +TLOpenStream_exit: + + if (psTLOpenStreamOUT->eError != PVRSRV_OK) + { + if (psTLOpenStreamOUT->hSD) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psTLOpenStreamOUT->hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psSDInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psSDInt) + { + TLServerCloseStreamKM(psSDInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLCloseStreamIN_UI8, + IMG_UINT8 * psTLCloseStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN = + (PVRSRV_BRIDGE_IN_TLCLOSESTREAM *) + IMG_OFFSET_ADDR(psTLCloseStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT = + (PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *) + IMG_OFFSET_ADDR(psTLCloseStreamOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psTLCloseStreamOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psTLCloseStreamIN->hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD); + if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) && + (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psTLCloseStreamOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto TLCloseStream_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +TLCloseStream_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLAcquireDataIN_UI8, + IMG_UINT8 * psTLAcquireDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN = + (PVRSRV_BRIDGE_IN_TLACQUIREDATA *) + IMG_OFFSET_ADDR(psTLAcquireDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT = + (PVRSRV_BRIDGE_OUT_TLACQUIREDATA *) + IMG_OFFSET_ADDR(psTLAcquireDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLAcquireDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLAcquireDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLAcquireData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLAcquireDataOUT->eError = + TLServerAcquireDataKM(psSDInt, + &psTLAcquireDataOUT->ui32ReadOffset, + &psTLAcquireDataOUT->ui32ReadLen); + +TLAcquireData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLReleaseDataIN_UI8, + IMG_UINT8 * psTLReleaseDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN = + (PVRSRV_BRIDGE_IN_TLRELEASEDATA *) + IMG_OFFSET_ADDR(psTLReleaseDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT = + (PVRSRV_BRIDGE_OUT_TLRELEASEDATA *) + IMG_OFFSET_ADDR(psTLReleaseDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLReleaseDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLReleaseDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLReleaseData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLReleaseDataOUT->eError = + TLServerReleaseDataKM(psSDInt, + psTLReleaseDataIN->ui32ReadOffset, + psTLReleaseDataIN->ui32ReadLen); + +TLReleaseData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLDiscoverStreamsIN_UI8, + IMG_UINT8 * psTLDiscoverStreamsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN = + (PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *) + IMG_OFFSET_ADDR(psTLDiscoverStreamsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT = + (PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *) + IMG_OFFSET_ADDR(psTLDiscoverStreamsOUT_UI8, 0); + + IMG_CHAR *uiNamePatternInt = NULL; + IMG_CHAR *puiStreamsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + + (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0; + + if (psTLDiscoverStreamsIN->ui32Size > + PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto TLDiscoverStreams_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psTLDiscoverStreamsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLDiscoverStreams_exit; + } + } + } + + { + uiNamePatternInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNamePatternInt, + (const void __user *)psTLDiscoverStreamsIN->puiNamePattern, + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto TLDiscoverStreams_exit; + } + ((IMG_CHAR *) + uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psTLDiscoverStreamsIN->ui32Size != 0) + { + puiStreamsInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR); + } + + psTLDiscoverStreamsOUT->eError = + TLServerDiscoverStreamsKM(uiNamePatternInt, + psTLDiscoverStreamsIN->ui32Size, + puiStreamsInt, + &psTLDiscoverStreamsOUT->ui32NumFound); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiStreamsInt) && + ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams, + puiStreamsInt, + (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != + PVRSRV_OK)) + { + psTLDiscoverStreamsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto TLDiscoverStreams_exit; + } + } + +TLDiscoverStreams_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLReserveStreamIN_UI8, + IMG_UINT8 * psTLReserveStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN = + (PVRSRV_BRIDGE_IN_TLRESERVESTREAM *) + IMG_OFFSET_ADDR(psTLReserveStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT = + (PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *) + IMG_OFFSET_ADDR(psTLReserveStreamOUT_UI8, 0); + + IMG_HANDLE hSD = psTLReserveStreamIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLReserveStreamOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLReserveStream_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLReserveStreamOUT->eError = + TLServerReserveStreamKM(psSDInt, + &psTLReserveStreamOUT->ui32BufferOffset, + psTLReserveStreamIN->ui32Size, + psTLReserveStreamIN->ui32SizeMin, + &psTLReserveStreamOUT->ui32Available); + +TLReserveStream_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLCommitStreamIN_UI8, + IMG_UINT8 * psTLCommitStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN = + (PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *) + IMG_OFFSET_ADDR(psTLCommitStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT = + (PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *) + IMG_OFFSET_ADDR(psTLCommitStreamOUT_UI8, 0); + + IMG_HANDLE hSD = psTLCommitStreamIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLCommitStreamOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLCommitStream_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLCommitStreamOUT->eError = + TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize); + +TLCommitStream_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLWriteDataIN_UI8, + IMG_UINT8 * psTLWriteDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN = + (PVRSRV_BRIDGE_IN_TLWRITEDATA *) + IMG_OFFSET_ADDR(psTLWriteDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT = + (PVRSRV_BRIDGE_OUT_TLWRITEDATA *) + IMG_OFFSET_ADDR(psTLWriteDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLWriteDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + IMG_BYTE *psDataInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE)) + { + psTLWriteDataOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto TLWriteData_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psTLWriteDataIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLWriteDataOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLWriteData_exit; + } + } + } + + if (psTLWriteDataIN->ui32Size != 0) + { + psDataInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psDataInt, + (const void __user *)psTLWriteDataIN->psData, + psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLWriteData_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLWriteDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLWriteData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLWriteDataOUT->eError = + TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, psDataInt); + +TLWriteData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitPVRTLBridge(void); +PVRSRV_ERROR DeinitPVRTLBridge(void); + +/* + * Register all PVRTL functions with services + */ +PVRSRV_ERROR InitPVRTLBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, + PVRSRVBridgeTLOpenStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, + PVRSRVBridgeTLCloseStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, + PVRSRVBridgeTLAcquireData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, + PVRSRVBridgeTLReleaseData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, + PVRSRVBridgeTLDiscoverStreams, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, + PVRSRVBridgeTLReserveStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, + PVRSRVBridgeTLCommitStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, + PVRSRVBridgeTLWriteData, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all pvrtl functions with services + */ +PVRSRV_ERROR DeinitPVRTLBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, + PVRSRV_BRIDGE_PVRTL_TLWRITEDATA); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h new file mode 100644 index 000000000000..bbc74470f01c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h @@ -0,0 +1,226 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxcmp +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxcmp +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXCMP_BRIDGE_H +#define COMMON_RGXCMP_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7) + +/******************************************* + RGXCreateComputeContext + *******************************************/ + +/* Bridge in structure for RGXCreateComputeContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG +{ + IMG_UINT32 ui32Priority; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32StaticComputeContextStateSize; + IMG_BYTE *psStaticComputeContextState; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32ContextFlags; + IMG_UINT64 ui64RobustnessAddress; + IMG_UINT32 ui32MaxDeadlineMS; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT; + +/* Bridge out structure for RGXCreateComputeContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG +{ + IMG_HANDLE hComputeContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT; + +/******************************************* + RGXDestroyComputeContext + *******************************************/ + +/* Bridge in structure for RGXDestroyComputeContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT; + +/* Bridge out structure for RGXDestroyComputeContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT; + +/******************************************* + RGXFlushComputeData + *******************************************/ + +/* Bridge in structure for RGXFlushComputeData */ +typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA; + +/* Bridge out structure for RGXFlushComputeData */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA; + +/******************************************* + RGXSetComputeContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG +{ + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY; + +/* Bridge out structure for RGXSetComputeContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY; + +/******************************************* + RGXGetLastComputeContextResetReason + *******************************************/ + +/* Bridge in structure for RGXGetLastComputeContextResetReason */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON; + +/* Bridge out structure for RGXGetLastComputeContextResetReason */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG +{ + IMG_UINT32 ui32LastResetReason; + IMG_UINT32 ui32LastResetJobRef; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) + PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON; + +/******************************************* + RGXNotifyComputeWriteOffsetUpdate + *******************************************/ + +/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG +{ + IMG_HANDLE hComputeContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; + +/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; + +/******************************************* + RGXKickCDM2 + *******************************************/ + +/* Bridge in structure for RGXKickCDM2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG +{ + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; + IMG_UINT32 *pui32ClientUpdateOffset; + IMG_UINT32 *pui32ClientUpdateValue; + PVRSRV_FENCE hCheckFenceFd; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_CHAR *puiUpdateFenceName; + IMG_UINT32 ui32CmdSize; + IMG_BYTE *psDMCmd; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32NumOfWorkgroups; + IMG_UINT32 ui32NumOfWorkitems; + IMG_UINT64 ui64DeadlineInus; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM2; + +/* Bridge out structure for RGXKickCDM2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG +{ + PVRSRV_FENCE hUpdateFence; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM2; + +/******************************************* + RGXSetComputeContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; + +/* Bridge out structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; + +#endif /* COMMON_RGXCMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c new file mode 100644 index 000000000000..bfb75576dcde --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c @@ -0,0 +1,1038 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxcmp +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxcmp +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxcompute.h" + +#include "common_rgxcmp_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateComputeContextpsComputeContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXDestroyComputeContextKM((RGX_SERVER_COMPUTE_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateComputeContextIN_UI8, + IMG_UINT8 * + psRGXCreateComputeContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateComputeContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT + = + (PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateComputeContextOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *psStaticComputeContextStateInt = NULL; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * + sizeof(IMG_BYTE)) + 0; + + if (unlikely + (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize > + RGXFWIF_STATIC_COMPUTECONTEXT_SIZE)) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateComputeContext_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCreateComputeContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateComputeContext_exit; + } + } + } + + if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize != 0) + { + psStaticComputeContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCreateComputeContextIN-> + ui32StaticComputeContextStateSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psStaticComputeContextStateInt, + (const void __user *)psRGXCreateComputeContextIN-> + psStaticComputeContextState, + psRGXCreateComputeContextIN-> + ui32StaticComputeContextStateSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateComputeContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateComputeContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateComputeContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateComputeContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateComputeContextOUT->eError = + PVRSRVRGXCreateComputeContextKM(psConnection, + OSGetDevNode(psConnection), + psRGXCreateComputeContextIN-> + ui32Priority, hPrivDataInt, + psRGXCreateComputeContextIN-> + ui32StaticComputeContextStateSize, + psStaticComputeContextStateInt, + psRGXCreateComputeContextIN-> + ui32PackedCCBSizeU88, + psRGXCreateComputeContextIN-> + ui32ContextFlags, + psRGXCreateComputeContextIN-> + ui64RobustnessAddress, + psRGXCreateComputeContextIN-> + ui32MaxDeadlineMS, + &psComputeContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateComputeContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateComputeContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateComputeContextOUT-> + hComputeContext, + (void *)psComputeContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateComputeContextpsComputeContextIntRelease); + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateComputeContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateComputeContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK) + { + if (psComputeContextInt) + { + PVRSRVRGXDestroyComputeContextKM(psComputeContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXDestroyComputeContextIN_UI8, + IMG_UINT8 * + psRGXDestroyComputeContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN + = + (PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyComputeContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT + *psRGXDestroyComputeContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyComputeContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyComputeContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyComputeContextIN-> + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + if (unlikely + ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyComputeContextOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyComputeContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyComputeContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFlushComputeDataIN_UI8, + IMG_UINT8 * psRGXFlushComputeDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN = + (PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *) + IMG_OFFSET_ADDR(psRGXFlushComputeDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT = + (PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *) + IMG_OFFSET_ADDR(psRGXFlushComputeDataOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXFlushComputeDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXFlushComputeData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXFlushComputeDataOUT->eError = + PVRSRVRGXFlushComputeDataKM(psComputeContextInt); + +RGXFlushComputeData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetComputeContextPriorityIN_UI8, + IMG_UINT8 * + psRGXSetComputeContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY + *psRGXSetComputeContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY + *psRGXSetComputeContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXSetComputeContextPriorityIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPriorityOUT->eError = + PVRSRVRGXSetComputeContextPriorityKM(psConnection, + OSGetDevNode(psConnection), + psComputeContextInt, + psRGXSetComputeContextPriorityIN-> + ui32Priority); + +RGXSetComputeContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetLastComputeContextResetReason(IMG_UINT32 + ui32DispatchTableEntry, + IMG_UINT8 * + psRGXGetLastComputeContextResetReasonIN_UI8, + IMG_UINT8 * + psRGXGetLastComputeContextResetReasonOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON + *psRGXGetLastComputeContextResetReasonIN = + (PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastComputeContextResetReasonIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON + *psRGXGetLastComputeContextResetReasonOUT = + (PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastComputeContextResetReasonOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXGetLastComputeContextResetReasonIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXGetLastComputeContextResetReasonOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXGetLastComputeContextResetReasonOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXGetLastComputeContextResetReason_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXGetLastComputeContextResetReasonOUT->eError = + PVRSRVRGXGetLastComputeContextResetReasonKM(psComputeContextInt, + &psRGXGetLastComputeContextResetReasonOUT-> + ui32LastResetReason, + &psRGXGetLastComputeContextResetReasonOUT-> + ui32LastResetJobRef); + +RGXGetLastComputeContextResetReason_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXNotifyComputeWriteOffsetUpdateIN_UI8, + IMG_UINT8 * + psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE + *psRGXNotifyComputeWriteOffsetUpdateIN = + (PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE + *psRGXNotifyComputeWriteOffsetUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXNotifyComputeWriteOffsetUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXNotifyComputeWriteOffsetUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXNotifyComputeWriteOffsetUpdateOUT->eError = + PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt); + +RGXNotifyComputeWriteOffsetUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickCDM2IN_UI8, + IMG_UINT8 * psRGXKickCDM2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN = + (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) + IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) + IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL; + IMG_UINT32 *ui32ClientUpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_BYTE *psDMCmdInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickCDM2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM2_exit; + } + + if (unlikely + (psRGXKickCDM2IN->ui32CmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickCDM2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXKickCDM2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickCDM2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickCDM2_exit; + } + } + } + + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + psClientUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXKickCDM2IN-> + phClientUpdateUFOSyncPrimBlock, + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateOffsetInt, + (const void __user *)psRGXKickCDM2IN-> + pui32ClientUpdateOffset, + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateValueInt, + (const void __user *)psRGXKickCDM2IN-> + pui32ClientUpdateValue, + psRGXKickCDM2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psRGXKickCDM2IN->ui32CmdSize != 0) + { + psDMCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psDMCmdInt, + (const void __user *)psRGXKickCDM2IN->psDMCmd, + psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickCDM2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickCDM2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickCDM2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClientUpdateUFOSyncPrimBlockInt + [i], + hClientUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickCDM2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickCDM2OUT->eError = + PVRSRVRGXKickCDMKM(psComputeContextInt, + psRGXKickCDM2IN->ui32ClientCacheOpSeqNum, + psRGXKickCDM2IN->ui32ClientUpdateCount, + psClientUpdateUFOSyncPrimBlockInt, + ui32ClientUpdateOffsetInt, + ui32ClientUpdateValueInt, + psRGXKickCDM2IN->hCheckFenceFd, + psRGXKickCDM2IN->hUpdateTimeline, + &psRGXKickCDM2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickCDM2IN->ui32CmdSize, + psDMCmdInt, + psRGXKickCDM2IN->ui32PDumpFlags, + psRGXKickCDM2IN->ui32ExtJobRef, + psRGXKickCDM2IN->ui32NumOfWorkgroups, + psRGXKickCDM2IN->ui32NumOfWorkitems, + psRGXKickCDM2IN->ui64DeadlineInus); + +RGXKickCDM2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + + if (hClientUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientUpdateUFOSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClientUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetComputeContextPropertyIN_UI8, + IMG_UINT8 * + psRGXSetComputeContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY + *psRGXSetComputeContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY + *psRGXSetComputeContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); + + IMG_HANDLE hComputeContext = + psRGXSetComputeContextPropertyIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, + psRGXSetComputeContextPropertyIN-> + ui32Property, + psRGXSetComputeContextPropertyIN-> + ui64Input, + &psRGXSetComputeContextPropertyOUT-> + ui64Output); + +RGXSetComputeContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXCMPBridge(void); +PVRSRV_ERROR DeinitRGXCMPBridge(void); + +/* + * Register all RGXCMP functions with services + */ +PVRSRV_ERROR InitRGXCMPBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, + PVRSRVBridgeRGXCreateComputeContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, + PVRSRVBridgeRGXDestroyComputeContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, + PVRSRVBridgeRGXFlushComputeData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, + PVRSRVBridgeRGXSetComputeContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON, + PVRSRVBridgeRGXGetLastComputeContextResetReason, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, + PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2, + PVRSRVBridgeRGXKickCDM2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY, + PVRSRVBridgeRGXSetComputeContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxcmp functions with services + */ +PVRSRV_ERROR DeinitRGXCMPBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h new file mode 100644 index 000000000000..96341f37a34f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h @@ -0,0 +1,182 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxfwdbg +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxfwdbg +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXFWDBG_BRIDGE_H +#define COMMON_RGXFWDBG_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "rgx_bridge.h" +#include "pvrsrv_memallocflags.h" + +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6) + +/******************************************* + RGXFWDebugSetFWLog + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetFWLog */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG +{ + IMG_UINT32 ui32RGXFWLogType; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG; + +/* Bridge out structure for RGXFWDebugSetFWLog */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG; + +/******************************************* + RGXFWDebugDumpFreelistPageList + *******************************************/ + +/* Bridge in structure for RGXFWDebugDumpFreelistPageList */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST; + +/* Bridge out structure for RGXFWDebugDumpFreelistPageList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST; + +/******************************************* + RGXFWDebugSetHCSDeadline + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetHCSDeadline */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG +{ + IMG_UINT32 ui32RGXHCSDeadline; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE; + +/* Bridge out structure for RGXFWDebugSetHCSDeadline */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE; + +/******************************************* + RGXFWDebugSetOSidPriority + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetOSidPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG +{ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY; + +/* Bridge out structure for RGXFWDebugSetOSidPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY; + +/******************************************* + RGXFWDebugSetOSNewOnlineState + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG +{ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32OSNewState; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE; + +/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE; + +/******************************************* + RGXFWDebugPHRConfigure + *******************************************/ + +/* Bridge in structure for RGXFWDebugPHRConfigure */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE_TAG +{ + IMG_UINT32 ui32ui32PHRMode; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE; + +/* Bridge out structure for RGXFWDebugPHRConfigure */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE; + +/******************************************* + RGXCurrentTime + *******************************************/ + +/* Bridge in structure for RGXCurrentTime */ +typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCURRENTTIME; + +/* Bridge out structure for RGXCurrentTime */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG +{ + IMG_UINT64 ui64Time; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCURRENTTIME; + +#endif /* COMMON_RGXFWDBG_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c new file mode 100644 index 000000000000..49cfe0c35dc3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c @@ -0,0 +1,316 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxfwdbg +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxfwdbg +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "rgxfwdbg.h" +#include "pmr.h" +#include "rgxtimecorr.h" + +#include "common_rgxfwdbg_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetFWLogIN_UI8, + IMG_UINT8 * psRGXFWDebugSetFWLogOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *) + IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *) + IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogOUT_UI8, 0); + + psRGXFWDebugSetFWLogOUT->eError = + PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetFWLogIN-> + ui32RGXFWLogType); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugDumpFreelistPageListIN_UI8, + IMG_UINT8 * + psRGXFWDebugDumpFreelistPageListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST + *psRGXFWDebugDumpFreelistPageListIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *) + IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST + *psRGXFWDebugDumpFreelistPageListOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *) + IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN); + + psRGXFWDebugDumpFreelistPageListOUT->eError = + PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection, + OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetHCSDeadlineIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetHCSDeadlineOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineIN + = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE + *psRGXFWDebugSetHCSDeadlineOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineOUT_UI8, 0); + + psRGXFWDebugSetHCSDeadlineOUT->eError = + PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetHCSDeadlineIN-> + ui32RGXHCSDeadline); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetOSidPriorityIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetOSidPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY + *psRGXFWDebugSetOSidPriorityIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY + *psRGXFWDebugSetOSidPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0); + + psRGXFWDebugSetOSidPriorityOUT->eError = + PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetOSidPriorityIN-> + ui32OSid, + psRGXFWDebugSetOSidPriorityIN-> + ui32Priority); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetOSNewOnlineStateIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetOSNewOnlineStateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE + *psRGXFWDebugSetOSNewOnlineStateIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE + *psRGXFWDebugSetOSNewOnlineStateOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateOUT_UI8, 0); + + psRGXFWDebugSetOSNewOnlineStateOUT->eError = + PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetOSNewOnlineStateIN-> + ui32OSid, + psRGXFWDebugSetOSNewOnlineStateIN-> + ui32OSNewState); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8, + IMG_UINT8 * psRGXFWDebugPHRConfigureOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureOUT_UI8, 0); + + psRGXFWDebugPHRConfigureOUT->eError = + PVRSRVRGXFWDebugPHRConfigureKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugPHRConfigureIN-> + ui32ui32PHRMode); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCurrentTimeIN_UI8, + IMG_UINT8 * psRGXCurrentTimeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN = + (PVRSRV_BRIDGE_IN_RGXCURRENTTIME *) + IMG_OFFSET_ADDR(psRGXCurrentTimeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT = + (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) + IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN); + + psRGXCurrentTimeOUT->eError = + PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection), + &psRGXCurrentTimeOUT->ui64Time); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXFWDBGBridge(void); +PVRSRV_ERROR DeinitRGXFWDBGBridge(void); + +/* + * Register all RGXFWDBG functions with services + */ +PVRSRV_ERROR InitRGXFWDBGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG, + PVRSRVBridgeRGXFWDebugSetFWLog, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST, + PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE, + PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY, + PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE, + PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE, + PVRSRVBridgeRGXFWDebugPHRConfigure, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME, + PVRSRVBridgeRGXCurrentTime, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxfwdbg functions with services + */ +PVRSRV_ERROR DeinitRGXFWDBGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h new file mode 100644 index 000000000000..a122bf009f81 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h @@ -0,0 +1,134 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxhwperf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxhwperf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXHWPERF_BRIDGE_H +#define COMMON_RGXHWPERF_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "rgx_hwperf.h" + +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3) + +/******************************************* + RGXCtrlHWPerf + *******************************************/ + +/* Bridge in structure for RGXCtrlHWPerf */ +typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG +{ + IMG_UINT32 ui32StreamId; + IMG_BOOL bToggle; + IMG_UINT64 ui64Mask; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERF; + +/* Bridge out structure for RGXCtrlHWPerf */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF; + +/******************************************* + RGXConfigureHWPerfBlocks + *******************************************/ + +/* Bridge in structure for RGXConfigureHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG +{ + IMG_UINT32 ui32CtrlWord; + IMG_UINT16 ui16ArrayLen; + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS; + +/* Bridge out structure for RGXConfigureHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS; + +/******************************************* + RGXGetHWPerfBvncFeatureFlags + *******************************************/ + +/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS; + +/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG +{ + RGX_HWPERF_BVNC sBVNC; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS; + +/******************************************* + RGXControlHWPerfBlocks + *******************************************/ + +/* Bridge in structure for RGXControlHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS_TAG +{ + IMG_BOOL bEnable; + IMG_UINT16 ui16ArrayLen; + IMG_UINT16 *pui16BlockIDs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS; + +/* Bridge out structure for RGXControlHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS; + +#endif /* COMMON_RGXHWPERF_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c new file mode 100644 index 000000000000..4c977c524334 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c @@ -0,0 +1,409 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxhwperf +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxhwperf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxhwperf.h" +#include "rgx_fwif_km.h" + +#include "common_rgxhwperf_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, + IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN = + (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) + IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT = + (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) + IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0); + + psRGXCtrlHWPerfOUT->eError = + PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection), + psRGXCtrlHWPerfIN->ui32StreamId, + psRGXCtrlHWPerfIN->bToggle, + psRGXCtrlHWPerfIN->ui64Mask); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXConfigureHWPerfBlocksIN_UI8, + IMG_UINT8 * + psRGXConfigureHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN + = + (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS + *psRGXConfigureHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0); + + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXConfigureHWPerfBlocksIN->ui16ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + + if (unlikely + (psRGXConfigureHWPerfBlocksIN->ui16ArrayLen > + RGXFWIF_HWPERF_CTRL_BLKS_MAX)) + { + psRGXConfigureHWPerfBlocksOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXConfigureHWPerfBlocks_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXConfigureHWPerfBlocksOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXConfigureHWPerfBlocks_exit; + } + } + } + + if (psRGXConfigureHWPerfBlocksIN->ui16ArrayLen != 0) + { + psBlockConfigsInt = + (RGX_HWPERF_CONFIG_CNTBLK *) + IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXConfigureHWPerfBlocksIN->ui16ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK); + } + + /* Copy the data over */ + if (psRGXConfigureHWPerfBlocksIN->ui16ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0) + { + if (OSCopyFromUser + (NULL, psBlockConfigsInt, + (const void __user *)psRGXConfigureHWPerfBlocksIN-> + psBlockConfigs, + psRGXConfigureHWPerfBlocksIN->ui16ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) + { + psRGXConfigureHWPerfBlocksOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXConfigureHWPerfBlocks_exit; + } + } + + psRGXConfigureHWPerfBlocksOUT->eError = + PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, + OSGetDevNode(psConnection), + psRGXConfigureHWPerfBlocksIN-> + ui32CtrlWord, + psRGXConfigureHWPerfBlocksIN-> + ui16ArrayLen, psBlockConfigsInt); + +RGXConfigureHWPerfBlocks_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXGetHWPerfBvncFeatureFlagsIN_UI8, + IMG_UINT8 * + psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS + *psRGXGetHWPerfBvncFeatureFlagsIN = + (PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *) + IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS + *psRGXGetHWPerfBvncFeatureFlagsOUT = + (PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *) + IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN); + + psRGXGetHWPerfBvncFeatureFlagsOUT->eError = + PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, + OSGetDevNode(psConnection), + &psRGXGetHWPerfBvncFeatureFlagsOUT-> + sBVNC); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8, + IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN = + (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0); + + IMG_UINT16 *ui16BlockIDsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXControlHWPerfBlocksIN->ui16ArrayLen * sizeof(IMG_UINT16)) + 0; + + if (unlikely + (psRGXControlHWPerfBlocksIN->ui16ArrayLen > + RGXFWIF_HWPERF_CTRL_BLKS_MAX)) + { + psRGXControlHWPerfBlocksOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXControlHWPerfBlocks_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXControlHWPerfBlocksOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXControlHWPerfBlocks_exit; + } + } + } + + if (psRGXControlHWPerfBlocksIN->ui16ArrayLen != 0) + { + ui16BlockIDsInt = + (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXControlHWPerfBlocksIN->ui16ArrayLen * + sizeof(IMG_UINT16); + } + + /* Copy the data over */ + if (psRGXControlHWPerfBlocksIN->ui16ArrayLen * sizeof(IMG_UINT16) > 0) + { + if (OSCopyFromUser + (NULL, ui16BlockIDsInt, + (const void __user *)psRGXControlHWPerfBlocksIN-> + pui16BlockIDs, + psRGXControlHWPerfBlocksIN->ui16ArrayLen * + sizeof(IMG_UINT16)) != PVRSRV_OK) + { + psRGXControlHWPerfBlocksOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXControlHWPerfBlocks_exit; + } + } + + psRGXControlHWPerfBlocksOUT->eError = + PVRSRVRGXControlHWPerfBlocksKM(psConnection, + OSGetDevNode(psConnection), + psRGXControlHWPerfBlocksIN->bEnable, + psRGXControlHWPerfBlocksIN-> + ui16ArrayLen, ui16BlockIDsInt); + +RGXControlHWPerfBlocks_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXHWPERFBridge(void); +PVRSRV_ERROR DeinitRGXHWPERFBridge(void); + +/* + * Register all RGXHWPERF functions with services + */ +PVRSRV_ERROR InitRGXHWPERFBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, + PVRSRVBridgeRGXCtrlHWPerf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS, + PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, + PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS, + PVRSRVBridgeRGXControlHWPerfBlocks, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxhwperf functions with services + */ +PVRSRV_ERROR DeinitRGXHWPERFBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/common_rgxkicksync_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/common_rgxkicksync_bridge.h new file mode 100644 index 000000000000..58fceebb3a01 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/common_rgxkicksync_bridge.h @@ -0,0 +1,143 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxkicksync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxkicksync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXKICKSYNC_BRIDGE_H +#define COMMON_RGXKICKSYNC_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3) + +/******************************************* + RGXCreateKickSyncContext + *******************************************/ + +/* Bridge in structure for RGXCreateKickSyncContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hPrivData; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32ContextFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT; + +/* Bridge out structure for RGXCreateKickSyncContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hKickSyncContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT; + +/******************************************* + RGXDestroyKickSyncContext + *******************************************/ + +/* Bridge in structure for RGXDestroyKickSyncContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hKickSyncContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT; + +/* Bridge out structure for RGXDestroyKickSyncContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT; + +/******************************************* + RGXKickSync2 + *******************************************/ + +/* Bridge in structure for RGXKickSync2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG +{ + IMG_HANDLE hKickSyncContext; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_HANDLE *phUpdateUFODevVarBlock; + IMG_UINT32 *pui32UpdateDevVarOffset; + IMG_UINT32 *pui32UpdateValue; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_TIMELINE hTimelineFenceFD; + IMG_CHAR *puiUpdateFenceName; + IMG_UINT32 ui32ExtJobRef; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNC2; + +/* Bridge out structure for RGXKickSync2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG +{ + PVRSRV_FENCE hUpdateFenceFD; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNC2; + +/******************************************* + RGXSetKickSyncContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetKickSyncContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG +{ + IMG_HANDLE hKickSyncContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetKickSyncContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY; + +#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c new file mode 100644 index 000000000000..b750d0e05c80 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c @@ -0,0 +1,626 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxkicksync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxkicksync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxkicksync.h" + +#include "common_rgxkicksync_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateKickSyncContextpsKickSyncContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXDestroyKickSyncContextKM((RGX_SERVER_KICKSYNC_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXCreateKickSyncContextIN_UI8, + IMG_UINT8 * + psRGXCreateKickSyncContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN + = + (PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateKickSyncContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT + *psRGXCreateKickSyncContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateKickSyncContextOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateKickSyncContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateKickSyncContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateKickSyncContextOUT->eError = + PVRSRVRGXCreateKickSyncContextKM(psConnection, + OSGetDevNode(psConnection), + hPrivDataInt, + psRGXCreateKickSyncContextIN-> + ui32PackedCCBSizeU88, + psRGXCreateKickSyncContextIN-> + ui32ContextFlags, + &psKickSyncContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateKickSyncContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateKickSyncContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateKickSyncContextOUT-> + hKickSyncContext, + (void *)psKickSyncContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateKickSyncContextpsKickSyncContextIntRelease); + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateKickSyncContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateKickSyncContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK) + { + if (psKickSyncContextInt) + { + PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXDestroyKickSyncContextIN_UI8, + IMG_UINT8 * + psRGXDestroyKickSyncContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT + *psRGXDestroyKickSyncContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT + *psRGXDestroyKickSyncContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyKickSyncContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyKickSyncContextIN-> + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + if (unlikely + ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyKickSyncContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyKickSyncContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickSync2IN_UI8, + IMG_UINT8 * psRGXKickSync2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *psRGXKickSync2IN = + (PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *) + IMG_OFFSET_ADDR(psRGXKickSync2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *psRGXKickSync2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *) + IMG_OFFSET_ADDR(psRGXKickSync2OUT_UI8, 0); + + IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFODevVarBlockInt = NULL; + IMG_HANDLE *hUpdateUFODevVarBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateDevVarOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_DEV_VARS)) + { + psRGXKickSync2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickSync2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXKickSync2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickSync2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickSync2_exit; + } + } + } + + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + psUpdateUFODevVarBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFODevVarBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFODevVarBlockInt2, + (const void __user *)psRGXKickSync2IN-> + phUpdateUFODevVarBlock, + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateDevVarOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateDevVarOffsetInt, + (const void __user *)psRGXKickSync2IN-> + pui32UpdateDevVarOffset, + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXKickSync2IN->pui32UpdateValue, + psRGXKickSync2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickSync2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickSync2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKickSyncContextInt, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickSync2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickSync2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psUpdateUFODevVarBlockInt + [i], + hUpdateUFODevVarBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickSync2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickSync2OUT->eError = + PVRSRVRGXKickSyncKM(psKickSyncContextInt, + psRGXKickSync2IN->ui32ClientCacheOpSeqNum, + psRGXKickSync2IN->ui32ClientUpdateCount, + psUpdateUFODevVarBlockInt, + ui32UpdateDevVarOffsetInt, + ui32UpdateValueInt, + psRGXKickSync2IN->hCheckFenceFD, + psRGXKickSync2IN->hTimelineFenceFD, + &psRGXKickSync2OUT->hUpdateFenceFD, + uiUpdateFenceNameInt, + psRGXKickSync2IN->ui32ExtJobRef); + +RGXKickSync2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psKickSyncContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + } + + if (hUpdateUFODevVarBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFODevVarBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hUpdateUFODevVarBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetKickSyncContextPropertyIN_UI8, + IMG_UINT8 * + psRGXSetKickSyncContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY + *psRGXSetKickSyncContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY + *psRGXSetKickSyncContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyOUT_UI8, 0); + + IMG_HANDLE hKickSyncContext = + psRGXSetKickSyncContextPropertyIN->hKickSyncContext; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetKickSyncContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKickSyncContextInt, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetKickSyncContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetKickSyncContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetKickSyncContextPropertyOUT->eError = + PVRSRVRGXSetKickSyncContextPropertyKM(psKickSyncContextInt, + psRGXSetKickSyncContextPropertyIN-> + ui32Property, + psRGXSetKickSyncContextPropertyIN-> + ui64Input, + &psRGXSetKickSyncContextPropertyOUT-> + ui64Output); + +RGXSetKickSyncContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psKickSyncContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXKICKSYNCBridge(void); +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void); + +/* + * Register all RGXKICKSYNC functions with services + */ +PVRSRV_ERROR InitRGXKICKSYNCBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, + PVRSRVBridgeRGXCreateKickSyncContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, + PVRSRVBridgeRGXDestroyKickSyncContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2, + PVRSRVBridgeRGXKickSync2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY, + PVRSRVBridgeRGXSetKickSyncContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxkicksync functions with services + */ +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_bridge.h new file mode 100644 index 000000000000..61236370ed4c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_bridge.h @@ -0,0 +1,72 @@ +/******************************************************************************* +@File +@Title Client bridge header for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for rgxpdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_RGXPDUMP_BRIDGE_H +#define CLIENT_RGXPDUMP_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_rgxpdump_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpCRCSignatureCheck(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags); + +#endif /* CLIENT_RGXPDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c new file mode 100644 index 000000000000..395e423dcd63 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c @@ -0,0 +1,99 @@ +/******************************************************************************* +@File +@Title Direct client bridge for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for rgxpdump + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_rgxpdump_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "rgx_bridge.h" + +#include "rgxpdump.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPDumpTraceBufferKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPDumpSignatureBufferKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + ui32PDumpFlags); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpCRCSignatureCheck(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPDumpCRCSignatureCheckKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + ui32PDumpFlags); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/common_rgxpdump_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/common_rgxpdump_bridge.h new file mode 100644 index 000000000000..0f67827d67c2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/common_rgxpdump_bridge.h @@ -0,0 +1,109 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxpdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXPDUMP_BRIDGE_H +#define COMMON_RGXPDUMP_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST (PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+2) + +/******************************************* + PDumpTraceBuffer + *******************************************/ + +/* Bridge in structure for PDumpTraceBuffer */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG +{ + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER; + +/* Bridge out structure for PDumpTraceBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER; + +/******************************************* + PDumpSignatureBuffer + *******************************************/ + +/* Bridge in structure for PDumpSignatureBuffer */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG +{ + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER; + +/* Bridge out structure for PDumpSignatureBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER; + +/******************************************* + PDumpCRCSignatureCheck + *******************************************/ + +/* Bridge in structure for PDumpCRCSignatureCheck */ +typedef struct PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK_TAG +{ + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK; + +/* Bridge out structure for PDumpCRCSignatureCheck */ +typedef struct PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK; + +#endif /* COMMON_RGXPDUMP_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c new file mode 100644 index 000000000000..7a61f007bf6e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c @@ -0,0 +1,176 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxpdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxpdump +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxpdump.h" + +#include "common_rgxpdump_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpTraceBufferIN_UI8, + IMG_UINT8 * psPDumpTraceBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN = + (PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *) + IMG_OFFSET_ADDR(psPDumpTraceBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT = + (PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *) + IMG_OFFSET_ADDR(psPDumpTraceBufferOUT_UI8, 0); + + psPDumpTraceBufferOUT->eError = + PVRSRVPDumpTraceBufferKM(psConnection, OSGetDevNode(psConnection), + psPDumpTraceBufferIN->ui32PDumpFlags); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpSignatureBufferIN_UI8, + IMG_UINT8 * psPDumpSignatureBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN = + (PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *) + IMG_OFFSET_ADDR(psPDumpSignatureBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT = + (PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *) + IMG_OFFSET_ADDR(psPDumpSignatureBufferOUT_UI8, 0); + + psPDumpSignatureBufferOUT->eError = + PVRSRVPDumpSignatureBufferKM(psConnection, + OSGetDevNode(psConnection), + psPDumpSignatureBufferIN-> + ui32PDumpFlags); + + return 0; +} + +static IMG_INT +PVRSRVBridgePDumpCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPDumpCRCSignatureCheckIN_UI8, + IMG_UINT8 * psPDumpCRCSignatureCheckOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckIN = + (PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *) + IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckOUT = + (PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *) + IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckOUT_UI8, 0); + + psPDumpCRCSignatureCheckOUT->eError = + PVRSRVPDumpCRCSignatureCheckKM(psConnection, + OSGetDevNode(psConnection), + psPDumpCRCSignatureCheckIN-> + ui32PDumpFlags); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXPDUMPBridge(void); +PVRSRV_ERROR DeinitRGXPDUMPBridge(void); + +/* + * Register all RGXPDUMP functions with services + */ +PVRSRV_ERROR InitRGXPDUMPBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, + PVRSRVBridgePDumpTraceBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, + PVRSRVBridgePDumpSignatureBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK, + PVRSRVBridgePDumpCRCSignatureCheck, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxpdump functions with services + */ +PVRSRV_ERROR DeinitRGXPDUMPBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, + PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/common_rgxregconfig_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/common_rgxregconfig_bridge.h new file mode 100644 index 000000000000..c1cab95393e7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/common_rgxregconfig_bridge.h @@ -0,0 +1,145 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxregconfig +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxregconfig +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXREGCONFIG_BRIDGE_H +#define COMMON_RGXREGCONFIG_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4) + +/******************************************* + RGXSetRegConfigType + *******************************************/ + +/* Bridge in structure for RGXSetRegConfigType */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG +{ + IMG_UINT8 ui8RegPowerIsland; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE; + +/* Bridge out structure for RGXSetRegConfigType */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE; + +/******************************************* + RGXAddRegconfig + *******************************************/ + +/* Bridge in structure for RGXAddRegconfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG +{ + IMG_UINT32 ui32RegAddr; + IMG_UINT64 ui64RegValue; + IMG_UINT64 ui64RegMask; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXADDREGCONFIG; + +/* Bridge out structure for RGXAddRegconfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG; + +/******************************************* + RGXClearRegConfig + *******************************************/ + +/* Bridge in structure for RGXClearRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG; + +/* Bridge out structure for RGXClearRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG; + +/******************************************* + RGXEnableRegConfig + *******************************************/ + +/* Bridge in structure for RGXEnableRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG; + +/* Bridge out structure for RGXEnableRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG; + +/******************************************* + RGXDisableRegConfig + *******************************************/ + +/* Bridge in structure for RGXDisableRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG; + +/* Bridge out structure for RGXDisableRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG; + +#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c new file mode 100644 index 000000000000..546faf135f2a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c @@ -0,0 +1,251 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxregconfig +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxregconfig +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxregconfig.h" + +#include "common_rgxregconfig_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRegConfigTypeIN_UI8, + IMG_UINT8 * psRGXSetRegConfigTypeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN = + (PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *) + IMG_OFFSET_ADDR(psRGXSetRegConfigTypeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT = + (PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *) + IMG_OFFSET_ADDR(psRGXSetRegConfigTypeOUT_UI8, 0); + + psRGXSetRegConfigTypeOUT->eError = + PVRSRVRGXSetRegConfigTypeKM(psConnection, + OSGetDevNode(psConnection), + psRGXSetRegConfigTypeIN-> + ui8RegPowerIsland); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXAddRegconfigIN_UI8, + IMG_UINT8 * psRGXAddRegconfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN = + (PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *) + IMG_OFFSET_ADDR(psRGXAddRegconfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT = + (PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *) + IMG_OFFSET_ADDR(psRGXAddRegconfigOUT_UI8, 0); + + psRGXAddRegconfigOUT->eError = + PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevNode(psConnection), + psRGXAddRegconfigIN->ui32RegAddr, + psRGXAddRegconfigIN->ui64RegValue, + psRGXAddRegconfigIN->ui64RegMask); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXClearRegConfigIN_UI8, + IMG_UINT8 * psRGXClearRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *) + IMG_OFFSET_ADDR(psRGXClearRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *) + IMG_OFFSET_ADDR(psRGXClearRegConfigOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN); + + psRGXClearRegConfigOUT->eError = + PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXEnableRegConfigIN_UI8, + IMG_UINT8 * psRGXEnableRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXEnableRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXEnableRegConfigOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN); + + psRGXEnableRegConfigOUT->eError = + PVRSRVRGXEnableRegConfigKM(psConnection, + OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDisableRegConfigIN_UI8, + IMG_UINT8 * psRGXDisableRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXDisableRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *) + IMG_OFFSET_ADDR(psRGXDisableRegConfigOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN); + + psRGXDisableRegConfigOUT->eError = + PVRSRVRGXDisableRegConfigKM(psConnection, + OSGetDevNode(psConnection)); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) +PVRSRV_ERROR InitRGXREGCONFIGBridge(void); +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void); + +/* + * Register all RGXREGCONFIG functions with services + */ +PVRSRV_ERROR InitRGXREGCONFIGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE, + PVRSRVBridgeRGXSetRegConfigType, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG, + PVRSRVBridgeRGXAddRegconfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG, + PVRSRVBridgeRGXClearRegConfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG, + PVRSRVBridgeRGXEnableRegConfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG, + PVRSRVBridgeRGXDisableRegConfig, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxregconfig functions with services + */ +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG); + + return PVRSRV_OK; +} +#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */ +/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXREGCONFIGBridge() \ + PVRSRV_OK + +#define DeinitRGXREGCONFIGBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/common_rgxsignals_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/common_rgxsignals_bridge.h new file mode 100644 index 000000000000..de523c2b2a89 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/common_rgxsignals_bridge.h @@ -0,0 +1,76 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxsignals +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxsignals +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXSIGNALS_BRIDGE_H +#define COMMON_RGXSIGNALS_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST (PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0) + +/******************************************* + RGXNotifySignalUpdate + *******************************************/ + +/* Bridge in structure for RGXNotifySignalUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE_TAG +{ + IMG_HANDLE hPrivData; + IMG_DEV_VIRTADDR sDevSignalAddress; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE; + +/* Bridge out structure for RGXNotifySignalUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE; + +#endif /* COMMON_RGXSIGNALS_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/server_rgxsignals_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/server_rgxsignals_bridge.c new file mode 100644 index 000000000000..f4aefc886884 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxsignals_bridge/server_rgxsignals_bridge.c @@ -0,0 +1,174 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxsignals +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxsignals +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxsignals.h" + +#include "common_rgxsignals_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "rgx_bvnc_defs_km.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXNotifySignalUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXNotifySignalUpdateIN_UI8, + IMG_UINT8 * psRGXNotifySignalUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateIN = + (PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifySignalUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifySignalUpdateOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXNotifySignalUpdateIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)) + { + psRGXNotifySignalUpdateOUT->eError = + PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXNotifySignalUpdate_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXNotifySignalUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXNotifySignalUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXNotifySignalUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXNotifySignalUpdateOUT->eError = + PVRSRVRGXNotifySignalUpdateKM(psConnection, + OSGetDevNode(psConnection), + hPrivDataInt, + psRGXNotifySignalUpdateIN-> + sDevSignalAddress); + +RGXNotifySignalUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXSIGNALSBridge(void); +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void); + +/* + * Register all RGXSIGNALS functions with services + */ +PVRSRV_ERROR InitRGXSIGNALSBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, + PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE, + PVRSRVBridgeRGXNotifySignalUpdate, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxsignals functions with services + */ +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, + PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h new file mode 100644 index 000000000000..4a30a608c9db --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h @@ -0,0 +1,416 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxta3d +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxta3d +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTA3D_BRIDGE_H +#define COMMON_RGXTA3D_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "rgx_fwif_shared.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14 +#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14) + +/******************************************* + RGXCreateHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG +{ + IMG_DEV_VIRTADDR sVHeapTableDevVAddr; + IMG_DEV_VIRTADDR sPMDataAddr; + IMG_DEV_VIRTADDR sPMSecureDataAddr; + IMG_HANDLE *phapsFreeLists; + IMG_UINT32 ui32PPPScreen; + IMG_UINT64 ui64PPPMultiSampleCtl; + IMG_UINT32 ui32TPCStride; + IMG_DEV_VIRTADDR sTailPtrsDevVAddr; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32RgnStride; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; + IMG_UINT16 ui16MaxRTs; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; + +/* Bridge out structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG +{ + IMG_HANDLE hKmHwRTDataSet; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; + +/******************************************* + RGXDestroyHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXDestroyHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET_TAG +{ + IMG_HANDLE hKmHwRTDataSet; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET; + +/* Bridge out structure for RGXDestroyHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET; + +/******************************************* + RGXCreateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXCreateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG +{ + IMG_HANDLE hReservation; + IMG_HANDLE hPMR; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER; + +/* Bridge out structure for RGXCreateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferKM; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER; + +/******************************************* + RGXDestroyZSBuffer + *******************************************/ + +/* Bridge in structure for RGXDestroyZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferMemDesc; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER; + +/* Bridge out structure for RGXDestroyZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER; + +/******************************************* + RGXPopulateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXPopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferKM; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER; + +/* Bridge out structure for RGXPopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsPopulation; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER; + +/******************************************* + RGXUnpopulateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXUnpopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsPopulation; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER; + +/* Bridge out structure for RGXUnpopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER; + +/******************************************* + RGXCreateFreeList + *******************************************/ + +/* Bridge in structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hMemCtxPrivData; + IMG_UINT32 ui32MaxFLPages; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32GrowParamThreshold; + IMG_HANDLE hsGlobalFreeList; + IMG_BOOL bbFreeListCheck; + IMG_DEV_VIRTADDR spsFreeListBaseDevVAddr; + IMG_DEV_VIRTADDR spsFreeListStateDevVAddr; + IMG_HANDLE hsFreeListPMR; + IMG_DEVMEM_OFFSET_T uiPMROffset; + IMG_HANDLE hsFreeListStatePMR; + IMG_DEVMEM_OFFSET_T uiPMRStateOffset; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; + +/* Bridge out structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; + +/******************************************* + RGXDestroyFreeList + *******************************************/ + +/* Bridge in structure for RGXDestroyFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST; + +/* Bridge out structure for RGXDestroyFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST; + +/******************************************* + RGXCreateRenderContext + *******************************************/ + +/* Bridge in structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG +{ + IMG_UINT32 ui32Priority; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32StaticRenderContextStateSize; + IMG_BYTE *psStaticRenderContextState; + IMG_UINT32 ui32PackedCCBSizeU8888; + IMG_UINT32 ui32ContextFlags; + IMG_UINT64 ui64RobustnessAddress; + IMG_UINT32 ui32MaxTADeadlineMS; + IMG_UINT32 ui32Max3DDeadlineMS; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; + +/* Bridge out structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG +{ + IMG_HANDLE hRenderContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; + +/******************************************* + RGXDestroyRenderContext + *******************************************/ + +/* Bridge in structure for RGXDestroyRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG +{ + IMG_HANDLE hCleanupCookie; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT; + +/* Bridge out structure for RGXDestroyRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT; + +/******************************************* + RGXSetRenderContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetRenderContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY; + +/* Bridge out structure for RGXSetRenderContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY; + +/******************************************* + RGXGetLastRenderContextResetReason + *******************************************/ + +/* Bridge in structure for RGXGetLastRenderContextResetReason */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON_TAG +{ + IMG_HANDLE hRenderContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON; + +/* Bridge out structure for RGXGetLastRenderContextResetReason */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON_TAG +{ + IMG_UINT32 ui32LastResetReason; + IMG_UINT32 ui32LastResetJobRef; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON; + +/******************************************* + RGXRenderContextStalled + *******************************************/ + +/* Bridge in structure for RGXRenderContextStalled */ +typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG +{ + IMG_HANDLE hRenderContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED; + +/* Bridge out structure for RGXRenderContextStalled */ +typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED; + +/******************************************* + RGXKickTA3D2 + *******************************************/ + +/* Bridge in structure for RGXKickTA3D2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG +{ + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientTAFenceCount; + IMG_HANDLE *phClientTAFenceSyncPrimBlock; + IMG_UINT32 *pui32ClientTAFenceSyncOffset; + IMG_UINT32 *pui32ClientTAFenceValue; + IMG_UINT32 ui32ClientTAUpdateCount; + IMG_HANDLE *phClientTAUpdateSyncPrimBlock; + IMG_UINT32 *pui32ClientTAUpdateSyncOffset; + IMG_UINT32 *pui32ClientTAUpdateValue; + IMG_UINT32 ui32Client3DUpdateCount; + IMG_HANDLE *phClient3DUpdateSyncPrimBlock; + IMG_UINT32 *pui32Client3DUpdateSyncOffset; + IMG_UINT32 *pui32Client3DUpdateValue; + IMG_HANDLE hPRFenceUFOSyncPrimBlock; + IMG_UINT32 ui32FRFenceUFOSyncOffset; + IMG_UINT32 ui32FRFenceValue; + PVRSRV_FENCE hCheckFence; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_CHAR *puiUpdateFenceName; + PVRSRV_FENCE hCheckFence3D; + PVRSRV_TIMELINE hUpdateTimeline3D; + IMG_CHAR *puiUpdateFenceName3D; + IMG_UINT32 ui32TACmdSize; + IMG_BYTE *psTACmd; + IMG_UINT32 ui323DPRCmdSize; + IMG_BYTE *ps3DPRCmd; + IMG_UINT32 ui323DCmdSize; + IMG_BYTE *ps3DCmd; + IMG_UINT32 ui32ExtJobRef; + IMG_BOOL bbKickTA; + IMG_BOOL bbKickPR; + IMG_BOOL bbKick3D; + IMG_BOOL bbAbort; + IMG_UINT32 ui32PDumpFlags; + IMG_HANDLE hKMHWRTDataSet; + IMG_HANDLE hZSBuffer; + IMG_HANDLE hMSAAScratchBuffer; + IMG_UINT32 ui32SyncPMRCount; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_HANDLE *phSyncPMRs; + IMG_UINT32 ui32RenderTargetSize; + IMG_UINT32 ui32NumberOfDrawCalls; + IMG_UINT32 ui32NumberOfIndices; + IMG_UINT32 ui32NumberOfMRTs; + IMG_UINT64 ui64Deadline; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D2; + +/* Bridge out structure for RGXKickTA3D2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG +{ + PVRSRV_FENCE hUpdateFence; + PVRSRV_FENCE hUpdateFence3D; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D2; + +/******************************************* + RGXSetRenderContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetRenderContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY_TAG +{ + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetRenderContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTA3D_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c new file mode 100644 index 000000000000..f63d6df9361a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c @@ -0,0 +1,2454 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxta3d +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxta3d +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxta3d.h" + +#include "common_rgxta3d_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); + + RGX_FREELIST **psapsFreeListsInt = NULL; + IMG_HANDLE *hapsFreeListsInt2 = NULL; + RGX_KM_HW_RT_DATASET *psKmHwRTDataSetInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *)) + + (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateHWRTDataSetOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateHWRTDataSet_exit; + } + } + } + + { + psapsFreeListsInt = + (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *); + hapsFreeListsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hapsFreeListsInt2, + (const void __user *)psRGXCreateHWRTDataSetIN-> + phapsFreeLists, + RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + { + IMG_UINT32 i; + + for (i = 0; i < RGXFW_MAX_FREELISTS; i++) + { + /* Look up the address from the handle */ + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psapsFreeListsInt[i], + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + IMG_TRUE); + if (unlikely + (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateHWRTDataSetOUT->eError = + RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), + psRGXCreateHWRTDataSetIN->sVHeapTableDevVAddr, + psRGXCreateHWRTDataSetIN->sPMDataAddr, + psRGXCreateHWRTDataSetIN->sPMSecureDataAddr, + psapsFreeListsInt, + psRGXCreateHWRTDataSetIN->ui32PPPScreen, + psRGXCreateHWRTDataSetIN-> + ui64PPPMultiSampleCtl, + psRGXCreateHWRTDataSetIN->ui32TPCStride, + psRGXCreateHWRTDataSetIN->sTailPtrsDevVAddr, + psRGXCreateHWRTDataSetIN->ui32TPCSize, + psRGXCreateHWRTDataSetIN->ui32TEScreen, + psRGXCreateHWRTDataSetIN->ui32TEAA, + psRGXCreateHWRTDataSetIN->ui32TEMTILE1, + psRGXCreateHWRTDataSetIN->ui32TEMTILE2, + psRGXCreateHWRTDataSetIN->ui32RgnStride, + psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY, + psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY, + psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY, + psRGXCreateHWRTDataSetIN->ui16MaxRTs, + &psKmHwRTDataSetInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + goto RGXCreateHWRTDataSet_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateHWRTDataSetOUT-> + hKmHwRTDataSet, + (void *)psKmHwRTDataSetInt, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateHWRTDataSet_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + if (hapsFreeListsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < RGXFW_MAX_FREELISTS; i++) + { + + /* Unreference the previously looked up handle */ + if (hapsFreeListsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hapsFreeListsInt2 + [i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) + { + if (psKmHwRTDataSetInt) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyHWRTDataSetOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyHWRTDataSetIN-> + hKmHwRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) + && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyHWRTDataSet_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyHWRTDataSet_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateZSBufferIN_UI8, + IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); + + IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; + PMR *psPMRInt = NULL; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = + RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), + psReservationInt, + psPMRInt, + psRGXCreateZSBufferIN->uiMapFlags, + &pssZSBufferKMInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + goto RGXCreateZSBuffer_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateZSBufferOUT->hsZSBufferKM, + (void *)pssZSBufferKMInt, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateZSBufferpssZSBufferKMIntRelease); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateZSBuffer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) + { + if (pssZSBufferKMInt) + { + RGXDestroyZSBufferKM(pssZSBufferKMInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, + IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) + IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) + IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyZSBufferOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyZSBufferIN-> + hsZSBufferMemDesc, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + if (unlikely + ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) + && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyZSBufferOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyZSBuffer_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyZSBuffer_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, 0); + + IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + RGX_POPULATION *pssPopulationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXPopulateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssZSBufferKMInt, + hsZSBufferKM, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + IMG_TRUE); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXPopulateZSBufferOUT->eError = + RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + goto RGXPopulateZSBuffer_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXPopulateZSBufferOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXPopulateZSBufferOUT->hsPopulation, + (void *)pssPopulationInt, + PVRSRV_HANDLE_TYPE_RGX_POPULATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXPopulateZSBufferpssPopulationIntRelease); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXPopulateZSBuffer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (pssZSBufferKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsZSBufferKM, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) + { + if (pssPopulationInt) + { + RGXUnpopulateZSBufferKM(pssPopulationInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXUnpopulateZSBufferOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXUnpopulateZSBufferIN-> + hsPopulation, + PVRSRV_HANDLE_TYPE_RGX_POPULATION); + if (unlikely + ((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) + && (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXUnpopulateZSBuffer_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXUnpopulateZSBuffer_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateFreeListIN_UI8, + IMG_UINT8 * psRGXCreateFreeListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = + (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) + IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) + IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); + + IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; + IMG_HANDLE hMemCtxPrivDataInt = NULL; + IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; + RGX_FREELIST *pssGlobalFreeListInt = NULL; + IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR; + PMR *pssFreeListPMRInt = NULL; + IMG_HANDLE hsFreeListStatePMR = + psRGXCreateFreeListIN->hsFreeListStatePMR; + PMR *pssFreeListStatePMRInt = NULL; + RGX_FREELIST *psCleanupCookieInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hMemCtxPrivDataInt, + hMemCtxPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + if (psRGXCreateFreeListIN->hsGlobalFreeList) + { + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssGlobalFreeListInt, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + } + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssFreeListPMRInt, + hsFreeListPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssFreeListStatePMRInt, + hsFreeListStatePMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateFreeListOUT->eError = + RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), + hMemCtxPrivDataInt, + psRGXCreateFreeListIN->ui32MaxFLPages, + psRGXCreateFreeListIN->ui32InitFLPages, + psRGXCreateFreeListIN->ui32GrowFLPages, + psRGXCreateFreeListIN->ui32GrowParamThreshold, + pssGlobalFreeListInt, + psRGXCreateFreeListIN->bbFreeListCheck, + psRGXCreateFreeListIN->spsFreeListBaseDevVAddr, + psRGXCreateFreeListIN->spsFreeListStateDevVAddr, + pssFreeListPMRInt, + psRGXCreateFreeListIN->uiPMROffset, + pssFreeListStatePMRInt, + psRGXCreateFreeListIN->uiPMRStateOffset, + &psCleanupCookieInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + goto RGXCreateFreeList_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateFreeListOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateFreeListOUT->hCleanupCookie, + (void *)psCleanupCookieInt, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateFreeListpsCleanupCookieIntRelease); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateFreeList_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hMemCtxPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMemCtxPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + + if (psRGXCreateFreeListIN->hsGlobalFreeList) + { + + /* Unreference the previously looked up handle */ + if (pssGlobalFreeListInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } + + /* Unreference the previously looked up handle */ + if (pssFreeListPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsFreeListPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + + /* Unreference the previously looked up handle */ + if (pssFreeListStatePMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsFreeListStatePMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) + { + if (psCleanupCookieInt) + { + RGXDestroyFreeList(psCleanupCookieInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyFreeListIN_UI8, + IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) + IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) + IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyFreeListOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyFreeListIN-> + hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + if (unlikely + ((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) + && (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyFreeListOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyFreeList_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyFreeList_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateRenderContextIN_UI8, + IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *psStaticRenderContextStateInt = NULL; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE)) + 0; + + if (unlikely + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > + RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateRenderContext_exit; + } + } + } + + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) + { + psStaticRenderContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psStaticRenderContextStateInt, + (const void __user *)psRGXCreateRenderContextIN-> + psStaticRenderContextState, + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateRenderContextOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateRenderContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateRenderContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateRenderContextOUT->eError = + PVRSRVRGXCreateRenderContextKM(psConnection, + OSGetDevNode(psConnection), + psRGXCreateRenderContextIN-> + ui32Priority, hPrivDataInt, + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize, + psStaticRenderContextStateInt, + psRGXCreateRenderContextIN-> + ui32PackedCCBSizeU8888, + psRGXCreateRenderContextIN-> + ui32ContextFlags, + psRGXCreateRenderContextIN-> + ui64RobustnessAddress, + psRGXCreateRenderContextIN-> + ui32MaxTADeadlineMS, + psRGXCreateRenderContextIN-> + ui32Max3DDeadlineMS, + &psRenderContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateRenderContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateRenderContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateRenderContextOUT-> + hRenderContext, + (void *)psRenderContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateRenderContextpsRenderContextIntRelease); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateRenderContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) + { + if (psRenderContextInt) + { + PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, + IMG_UINT8 * + psRGXDestroyRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT + = + (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyRenderContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXDestroyRenderContextIN-> + hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + if (unlikely + ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXDestroyRenderContextOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyRenderContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyRenderContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetRenderContextPriorityIN_UI8, + IMG_UINT8 * + psRGXSetRenderContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY + *psRGXSetRenderContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY + *psRGXSetRenderContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); + + IMG_HANDLE hRenderContext = + psRGXSetRenderContextPriorityIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetRenderContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVRGXSetRenderContextPriorityKM(psConnection, + OSGetDevNode(psConnection), + psRenderContextInt, + psRGXSetRenderContextPriorityIN-> + ui32Priority); + +RGXSetRenderContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetLastRenderContextResetReason(IMG_UINT32 + ui32DispatchTableEntry, + IMG_UINT8 * + psRGXGetLastRenderContextResetReasonIN_UI8, + IMG_UINT8 * + psRGXGetLastRenderContextResetReasonOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON + *psRGXGetLastRenderContextResetReasonIN = + (PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastRenderContextResetReasonIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON + *psRGXGetLastRenderContextResetReasonOUT = + (PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON *) + IMG_OFFSET_ADDR(psRGXGetLastRenderContextResetReasonOUT_UI8, 0); + + IMG_HANDLE hRenderContext = + psRGXGetLastRenderContextResetReasonIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXGetLastRenderContextResetReasonOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXGetLastRenderContextResetReasonOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXGetLastRenderContextResetReason_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXGetLastRenderContextResetReasonOUT->eError = + PVRSRVRGXGetLastRenderContextResetReasonKM(psRenderContextInt, + &psRGXGetLastRenderContextResetReasonOUT-> + ui32LastResetReason, + &psRGXGetLastRenderContextResetReasonOUT-> + ui32LastResetJobRef); + +RGXGetLastRenderContextResetReason_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXRenderContextStalledIN_UI8, + IMG_UINT8 * + psRGXRenderContextStalledOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = + (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT + = + (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXRenderContextStalledOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXRenderContextStalled_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXRenderContextStalledOUT->eError = + RGXRenderContextStalledKM(psRenderContextInt); + +RGXRenderContextStalled_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickTA3D2IN_UI8, + IMG_UINT8 * psRGXKickTA3D2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = + (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) + IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) + IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; + IMG_HANDLE hPRFenceUFOSyncPrimBlock = + psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_CHAR *uiUpdateFenceName3DInt = NULL; + IMG_BYTE *psTACmdInt = NULL; + IMG_BYTE *ps3DPRCmdInt = NULL; + IMG_BYTE *ps3DCmdInt = NULL; + IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; + IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; + RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; + IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; + RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely + (psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui32TACmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui323DPRCmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely + (psRGXKickTA3D2IN->ui323DCmdSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXKickTA3D2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickTA3D2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickTA3D2_exit; + } + } + } + + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + psClientTAFenceSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAFenceSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAFenceSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN-> + phClientTAFenceSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAFenceSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceValueInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAFenceValue, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + psClientTAUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN-> + phClientTAUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAUpdateSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32ClientTAUpdateValue, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + psClient3DUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hClient3DUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClient3DUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN-> + phClient3DUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32Client3DUpdateSyncOffset, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN-> + pui32Client3DUpdateValue, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + { + uiUpdateFenceName3DInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceName3DInt, + (const void __user *)psRGXKickTA3D2IN-> + puiUpdateFenceName3D, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psRGXKickTA3D2IN->ui32TACmdSize != 0) + { + psTACmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, psTACmdInt, + (const void __user *)psRGXKickTA3D2IN->psTACmd, + psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) + { + ps3DPRCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ps3DPRCmdInt, + (const void __user *)psRGXKickTA3D2IN->ps3DPRCmd, + psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DCmdSize != 0) + { + ps3DCmdInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ps3DCmdInt, + (const void __user *)psRGXKickTA3D2IN->ps3DCmd, + psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = + (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != + PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClientTAFenceSyncPrimBlockInt + [i], + hClientTAFenceSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClientTAUpdateSyncPrimBlockInt + [i], + hClientTAUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psClient3DUpdateSyncPrimBlockInt + [i], + hClient3DUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPRFenceUFOSyncPrimBlockInt, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKMHWRTDataSetInt, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psZSBufferInt, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMSAAScratchBufferInt, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickTA3D2OUT->eError = + PVRSRVRGXKickTA3DKM(psRenderContextInt, + psRGXKickTA3D2IN->ui32ClientCacheOpSeqNum, + psRGXKickTA3D2IN->ui32ClientTAFenceCount, + psClientTAFenceSyncPrimBlockInt, + ui32ClientTAFenceSyncOffsetInt, + ui32ClientTAFenceValueInt, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount, + psClientTAUpdateSyncPrimBlockInt, + ui32ClientTAUpdateSyncOffsetInt, + ui32ClientTAUpdateValueInt, + psRGXKickTA3D2IN->ui32Client3DUpdateCount, + psClient3DUpdateSyncPrimBlockInt, + ui32Client3DUpdateSyncOffsetInt, + ui32Client3DUpdateValueInt, + psPRFenceUFOSyncPrimBlockInt, + psRGXKickTA3D2IN->ui32FRFenceUFOSyncOffset, + psRGXKickTA3D2IN->ui32FRFenceValue, + psRGXKickTA3D2IN->hCheckFence, + psRGXKickTA3D2IN->hUpdateTimeline, + &psRGXKickTA3D2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickTA3D2IN->hCheckFence3D, + psRGXKickTA3D2IN->hUpdateTimeline3D, + &psRGXKickTA3D2OUT->hUpdateFence3D, + uiUpdateFenceName3DInt, + psRGXKickTA3D2IN->ui32TACmdSize, + psTACmdInt, + psRGXKickTA3D2IN->ui323DPRCmdSize, + ps3DPRCmdInt, + psRGXKickTA3D2IN->ui323DCmdSize, + ps3DCmdInt, + psRGXKickTA3D2IN->ui32ExtJobRef, + psRGXKickTA3D2IN->bbKickTA, + psRGXKickTA3D2IN->bbKickPR, + psRGXKickTA3D2IN->bbKick3D, + psRGXKickTA3D2IN->bbAbort, + psRGXKickTA3D2IN->ui32PDumpFlags, + psKMHWRTDataSetInt, + psZSBufferInt, + psMSAAScratchBufferInt, + psRGXKickTA3D2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXKickTA3D2IN->ui32RenderTargetSize, + psRGXKickTA3D2IN->ui32NumberOfDrawCalls, + psRGXKickTA3D2IN->ui32NumberOfIndices, + psRGXKickTA3D2IN->ui32NumberOfMRTs, + psRGXKickTA3D2IN->ui64Deadline); + +RGXKickTA3D2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + + if (hClientTAFenceSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientTAFenceSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClientTAFenceSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClientTAUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientTAUpdateSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClientTAUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClient3DUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClient3DUpdateSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hClient3DUpdateSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + /* Unreference the previously looked up handle */ + if (psPRFenceUFOSyncPrimBlockInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + + /* Unreference the previously looked up handle */ + if (psKMHWRTDataSetInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + + /* Unreference the previously looked up handle */ + if (psZSBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + + /* Unreference the previously looked up handle */ + if (psMSAAScratchBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXSetRenderContextPropertyIN_UI8, + IMG_UINT8 * + psRGXSetRenderContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY + *psRGXSetRenderContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY + *psRGXSetRenderContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); + + IMG_HANDLE hRenderContext = + psRGXSetRenderContextPropertyIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetRenderContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, + psRGXSetRenderContextPropertyIN-> + ui32Property, + psRGXSetRenderContextPropertyIN-> + ui64Input, + &psRGXSetRenderContextPropertyOUT-> + ui64Output); + +RGXSetRenderContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXTA3DBridge(void); +PVRSRV_ERROR DeinitRGXTA3DBridge(void); + +/* + * Register all RGXTA3D functions with services + */ +PVRSRV_ERROR InitRGXTA3DBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, + PVRSRVBridgeRGXCreateHWRTDataSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET, + PVRSRVBridgeRGXDestroyHWRTDataSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, + PVRSRVBridgeRGXCreateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, + PVRSRVBridgeRGXDestroyZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, + PVRSRVBridgeRGXPopulateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, + PVRSRVBridgeRGXUnpopulateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, + PVRSRVBridgeRGXCreateFreeList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, + PVRSRVBridgeRGXDestroyFreeList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, + PVRSRVBridgeRGXCreateRenderContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, + PVRSRVBridgeRGXDestroyRenderContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, + PVRSRVBridgeRGXSetRenderContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON, + PVRSRVBridgeRGXGetLastRenderContextResetReason, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, + PVRSRVBridgeRGXRenderContextStalled, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2, + PVRSRVBridgeRGXKickTA3D2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY, + PVRSRVBridgeRGXSetRenderContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxta3d functions with services + */ +PVRSRV_ERROR DeinitRGXTA3DBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h new file mode 100644 index 000000000000..18fd7ff74d50 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h @@ -0,0 +1,225 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxtq2 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxtq2 +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTQ2_BRIDGE_H +#define COMMON_RGXTQ2_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7) + +/******************************************* + RGXTDMCreateTransferContext + *******************************************/ + +/* Bridge in structure for RGXTDMCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG +{ + IMG_UINT32 ui32Priority; + IMG_HANDLE hPrivData; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32ContextFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT; + +/* Bridge out structure for RGXTDMCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT; + +/******************************************* + RGXTDMDestroyTransferContext + *******************************************/ + +/* Bridge in structure for RGXTDMDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT; + +/* Bridge out structure for RGXTDMDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT; + +/******************************************* + RGXTDMSetTransferContextPriority + *******************************************/ + +/* Bridge in structure for RGXTDMSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Priority; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY; + +/* Bridge out structure for RGXTDMSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY; + +/******************************************* + RGXTDMNotifyWriteOffsetUpdate + *******************************************/ + +/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32PDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE; + +/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE; + +/******************************************* + RGXTDMSubmitTransfer2 + *******************************************/ + +/* Bridge in structure for RGXTDMSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_HANDLE *phUpdateUFOSyncPrimBlock; + IMG_UINT32 *pui32UpdateSyncOffset; + IMG_UINT32 *pui32UpdateValue; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_CHAR *puiUpdateFenceName; + IMG_UINT32 ui32CommandSize; + IMG_UINT8 *pui8FWCommand; + IMG_UINT32 ui32ExternalJobReference; + IMG_UINT32 ui32SyncPMRCount; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_HANDLE *phSyncPMRs; + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + IMG_UINT64 ui64DeadlineInus; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2; + +/* Bridge out structure for RGXTDMSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG +{ + PVRSRV_FENCE hUpdateFence; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2; + +/******************************************* + RGXTDMGetSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY; + +/* Bridge out structure for RGXTDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG +{ + IMG_HANDLE hCLIPMRMem; + IMG_HANDLE hUSCPMRMem; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY; + +/******************************************* + RGXTDMReleaseSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY_TAG +{ + IMG_HANDLE hPMRMem; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY; + +/* Bridge out structure for RGXTDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY; + +/******************************************* + RGXTDMSetTransferContextProperty + *******************************************/ + +/* Bridge in structure for RGXTDMSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Property; + IMG_UINT64 ui64Input; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY; + +/* Bridge out structure for RGXTDMSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTQ2_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c new file mode 100644 index 000000000000..8c200e44084e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c @@ -0,0 +1,1115 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxtq2 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxtq2 +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxtdmtransfer.h" + +#include "common_rgxtq2_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXTDMCreateTransferContextpsTransferContextIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVRGXTDMDestroyTransferContextKM((RGX_SERVER_TQ_TDM_CONTEXT *) + pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMCreateTransferContextIN_UI8, + IMG_UINT8 * + psRGXTDMCreateTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT + *psRGXTDMCreateTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT + *psRGXTDMCreateTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + IMG_TRUE); + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMCreateTransferContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVRGXTDMCreateTransferContextKM(psConnection, + OSGetDevNode(psConnection), + psRGXTDMCreateTransferContextIN-> + ui32Priority, hPrivDataInt, + psRGXTDMCreateTransferContextIN-> + ui32PackedCCBSizeU88, + psRGXTDMCreateTransferContextIN-> + ui32ContextFlags, + &psTransferContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + goto RGXTDMCreateTransferContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMCreateTransferContextOUT-> + hTransferContext, + (void *)psTransferContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMCreateTransferContextpsTransferContextIntRelease); + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMCreateTransferContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMCreateTransferContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK) + { + if (psTransferContextInt) + { + PVRSRVRGXTDMDestroyTransferContextKM + (psTransferContextInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMDestroyTransferContextIN_UI8, + IMG_UINT8 * + psRGXTDMDestroyTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT + *psRGXTDMDestroyTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT + *psRGXTDMDestroyTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMDestroyTransferContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXTDMDestroyTransferContextIN-> + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + if (unlikely + ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) + && (psRGXTDMDestroyTransferContextOUT->eError != + PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString + (psRGXTDMDestroyTransferContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMDestroyTransferContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMDestroyTransferContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMSetTransferContextPriorityIN_UI8, + IMG_UINT8 * + psRGXTDMSetTransferContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY + *psRGXTDMSetTransferContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY + *psRGXTDMSetTransferContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMSetTransferContextPriorityIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSetTransferContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSetTransferContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSetTransferContextPriorityOUT->eError = + PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, + OSGetDevNode(psConnection), + psTransferContextInt, + psRGXTDMSetTransferContextPriorityIN-> + ui32Priority); + +RGXTDMSetTransferContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMNotifyWriteOffsetUpdateIN_UI8, + IMG_UINT8 * + psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE + *psRGXTDMNotifyWriteOffsetUpdateIN = + (PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE + *psRGXTDMNotifyWriteOffsetUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMNotifyWriteOffsetUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = + PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt, + psRGXTDMNotifyWriteOffsetUpdateIN-> + ui32PDumpFlags); + +RGXTDMNotifyWriteOffsetUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMSubmitTransfer2IN_UI8, + IMG_UINT8 * psRGXTDMSubmitTransfer2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2IN = + (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2OUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2OUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMSubmitTransfer2IN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT8 *ui8FWCommandInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + + 0; + + if (unlikely + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount > + PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + if (unlikely + (psRGXTDMSubmitTransfer2IN->ui32CommandSize > + RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + if (unlikely + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXTDMSubmitTransfer2_exit; + } + } + } + + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + phUpdateUFOSyncPrimBlock, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateSyncOffsetInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui32UpdateSyncOffset, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui32UpdateValue, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + ((IMG_CHAR *) + uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0) + { + ui8FWCommandInt = + (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32CommandSize * + sizeof(IMG_UINT8); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0) + { + if (OSCopyFromUser + (NULL, ui8FWCommandInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui8FWCommand, + psRGXTDMSubmitTransfer2IN->ui32CommandSize * + sizeof(IMG_UINT8)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > + 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXTDMSubmitTransfer2IN-> + pui32SyncPMRFlags, + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = + (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > + 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs, + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * + sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; + i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psUpdateUFOSyncPrimBlockInt + [i], + hUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely + (psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; + i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection-> + psHandleBase, + (void **) + &psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely + (psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt, + psRGXTDMSubmitTransfer2IN-> + ui32PDumpFlags, + psRGXTDMSubmitTransfer2IN-> + ui32ClientCacheOpSeqNum, + psRGXTDMSubmitTransfer2IN-> + ui32ClientUpdateCount, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXTDMSubmitTransfer2IN-> + hCheckFenceFD, + psRGXTDMSubmitTransfer2IN-> + hUpdateTimeline, + &psRGXTDMSubmitTransfer2OUT-> + hUpdateFence, uiUpdateFenceNameInt, + psRGXTDMSubmitTransfer2IN-> + ui32CommandSize, ui8FWCommandInt, + psRGXTDMSubmitTransfer2IN-> + ui32ExternalJobReference, + psRGXTDMSubmitTransfer2IN-> + ui32SyncPMRCount, ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXTDMSubmitTransfer2IN-> + ui32Characteristic1, + psRGXTDMSubmitTransfer2IN-> + ui32Characteristic2, + psRGXTDMSubmitTransfer2IN-> + ui64DeadlineInus); + +RGXTDMSubmitTransfer2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; + i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFOSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hUpdateUFOSyncPrimBlockInt2 + [i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; + i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + +static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8, + IMG_UINT8 * psRGXTDMGetSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0); + + PMR *psCLIPMRMemInt = NULL; + PMR *psUSCPMRMemInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psRGXTDMGetSharedMemoryIN); + + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRVRGXTDMGetSharedMemoryKM(psConnection, + OSGetDevNode(psConnection), + &psCLIPMRMemInt, &psUSCPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + goto RGXTDMGetSharedMemory_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMGetSharedMemoryOUT->hCLIPMRMem, + (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease); + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMGetSharedMemory_exit; + } + + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMGetSharedMemoryOUT->hUSCPMRMem, + (void *)psUSCPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease); + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMGetSharedMemory_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMGetSharedMemory_exit: + + if (psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK) + { + if (psCLIPMRMemInt) + { + PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt); + } + if (psUSCPMRMemInt) + { + PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMReleaseSharedMemoryIN_UI8, + IMG_UINT8 * + psRGXTDMReleaseSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY + *psRGXTDMReleaseSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY + *psRGXTDMReleaseSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMReleaseSharedMemoryOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXTDMReleaseSharedMemoryIN-> + hPMRMem, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + if (unlikely + ((psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) + && (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXTDMReleaseSharedMemoryOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMReleaseSharedMemory_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMReleaseSharedMemory_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXTDMSetTransferContextPropertyIN_UI8, + IMG_UINT8 * + psRGXTDMSetTransferContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY + *psRGXTDMSetTransferContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY + *psRGXTDMSetTransferContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyOUT_UI8, 0); + + IMG_HANDLE hTransferContext = + psRGXTDMSetTransferContextPropertyIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSetTransferContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + IMG_TRUE); + if (unlikely + (psRGXTDMSetTransferContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSetTransferContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSetTransferContextPropertyOUT->eError = + PVRSRVRGXTDMSetTransferContextPropertyKM(psTransferContextInt, + psRGXTDMSetTransferContextPropertyIN-> + ui32Property, + psRGXTDMSetTransferContextPropertyIN-> + ui64Input, + &psRGXTDMSetTransferContextPropertyOUT-> + ui64Output); + +RGXTDMSetTransferContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXTQ2Bridge(void); +PVRSRV_ERROR DeinitRGXTQ2Bridge(void); + +/* + * Register all RGXTQ2 functions with services + */ +PVRSRV_ERROR InitRGXTQ2Bridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, + PVRSRVBridgeRGXTDMCreateTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, + PVRSRVBridgeRGXTDMDestroyTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, + PVRSRVBridgeRGXTDMSetTransferContextPriority, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, + PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2, + PVRSRVBridgeRGXTDMSubmitTransfer2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY, + PVRSRVBridgeRGXTDMGetSharedMemory, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY, + PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY, + PVRSRVBridgeRGXTDMSetTransferContextProperty, + NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxtq2 functions with services + */ +PVRSRV_ERROR DeinitRGXTQ2Bridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_bridge.h new file mode 100644 index 000000000000..6778fce58df7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_bridge.h @@ -0,0 +1,120 @@ +/******************************************************************************* +@File +@Title Client bridge header for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_RI_BRIDGE_H +#define CLIENT_RI_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_ri_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Offset, + IMG_UINT64 + ui64Size, + IMG_BOOL + bIsImport, + IMG_BOOL + bIsSuballoc, + IMG_HANDLE * + phRIHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Size, + IMG_UINT64 + ui64DevVAddr, + IMG_HANDLE * + phRIHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle, + IMG_DEV_VIRTADDR + sAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge, + IMG_PID ui32Pid); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_PID + ui32Owner); + +#endif /* CLIENT_RI_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_direct_bridge.c new file mode 100644 index 000000000000..d7c8dfb0769a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/client_ri_direct_bridge.c @@ -0,0 +1,217 @@ +/******************************************************************************* +@File +@Title Direct client bridge for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for ri + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_ri_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "ri_typedefs.h" + +#include "ri_server.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge, + IMG_HANDLE + hPMRHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIWritePMREntryKM(psPMRHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Offset, + IMG_UINT64 + ui64Size, + IMG_BOOL + bIsImport, + IMG_BOOL + bIsSuballoc, + IMG_HANDLE * + phRIHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + RI_HANDLE psRIHandleInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = + RIWriteMEMDESCEntryKM(psPMRHandleInt, + ui32TextBSize, + puiTextB, + ui64Offset, + ui64Size, + bIsImport, bIsSuballoc, &psRIHandleInt); + + *phRIHandle = psRIHandleInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE + hBridge, + IMG_UINT32 + ui32TextBSize, + const IMG_CHAR + * puiTextB, + IMG_UINT64 + ui64Size, + IMG_UINT64 + ui64DevVAddr, + IMG_HANDLE * + phRIHandle) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + RIWriteProcListEntryKM(ui32TextBSize, + puiTextB, + ui64Size, ui64DevVAddr, &psRIHandleInt); + + *phRIHandle = psRIHandleInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle, + IMG_DEV_VIRTADDR + sAddr) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psRIHandleInt = (RI_HANDLE) hRIHandle; + + eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE + hBridge, + IMG_HANDLE + hRIHandle) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psRIHandleInt = (RI_HANDLE) hRIHandle; + + eError = RIDeleteMEMDESCEntryKM(psRIHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIDumpListKM(psPMRHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = RIDumpAllKM(); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge, + IMG_PID ui32Pid) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = RIDumpProcessKM(ui32Pid); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE + hBridge, + IMG_HANDLE + hPMRHandle, + IMG_PID + ui32Owner) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/common_ri_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/common_ri_bridge.h new file mode 100644 index 000000000000..e562671fff34 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/common_ri_bridge.h @@ -0,0 +1,224 @@ +/******************************************************************************* +@File +@Title Common bridge header for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RI_BRIDGE_H +#define COMMON_RI_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "ri_typedefs.h" + +#define PVRSRV_BRIDGE_RI_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER PVRSRV_BRIDGE_RI_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+8) + +/******************************************* + RIWritePMREntry + *******************************************/ + +/* Bridge in structure for RIWritePMREntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG +{ + IMG_HANDLE hPMRHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY; + +/* Bridge out structure for RIWritePMREntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY; + +/******************************************* + RIWriteMEMDESCEntry + *******************************************/ + +/* Bridge in structure for RIWriteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG +{ + IMG_HANDLE hPMRHandle; + IMG_UINT32 ui32TextBSize; + const IMG_CHAR *puiTextB; + IMG_UINT64 ui64Offset; + IMG_UINT64 ui64Size; + IMG_BOOL bIsImport; + IMG_BOOL bIsSuballoc; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY; + +/* Bridge out structure for RIWriteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG +{ + IMG_HANDLE hRIHandle; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY; + +/******************************************* + RIWriteProcListEntry + *******************************************/ + +/* Bridge in structure for RIWriteProcListEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG +{ + IMG_UINT32 ui32TextBSize; + const IMG_CHAR *puiTextB; + IMG_UINT64 ui64Size; + IMG_UINT64 ui64DevVAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY; + +/* Bridge out structure for RIWriteProcListEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG +{ + IMG_HANDLE hRIHandle; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY; + +/******************************************* + RIUpdateMEMDESCAddr + *******************************************/ + +/* Bridge in structure for RIUpdateMEMDESCAddr */ +typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG +{ + IMG_HANDLE hRIHandle; + IMG_DEV_VIRTADDR sAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR; + +/* Bridge out structure for RIUpdateMEMDESCAddr */ +typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR; + +/******************************************* + RIDeleteMEMDESCEntry + *******************************************/ + +/* Bridge in structure for RIDeleteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG +{ + IMG_HANDLE hRIHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY; + +/* Bridge out structure for RIDeleteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY; + +/******************************************* + RIDumpList + *******************************************/ + +/* Bridge in structure for RIDumpList */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG +{ + IMG_HANDLE hPMRHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPLIST; + +/* Bridge out structure for RIDumpList */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPLIST; + +/******************************************* + RIDumpAll + *******************************************/ + +/* Bridge in structure for RIDumpAll */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPALL; + +/* Bridge out structure for RIDumpAll */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPALL; + +/******************************************* + RIDumpProcess + *******************************************/ + +/* Bridge in structure for RIDumpProcess */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG +{ + IMG_PID ui32Pid; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPPROCESS; + +/* Bridge out structure for RIDumpProcess */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPPROCESS; + +/******************************************* + RIWritePMREntryWithOwner + *******************************************/ + +/* Bridge in structure for RIWritePMREntryWithOwner */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG +{ + IMG_HANDLE hPMRHandle; + IMG_PID ui32Owner; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER; + +/* Bridge out structure for RIWritePMREntryWithOwner */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER; + +#endif /* COMMON_RI_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/server_ri_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/server_ri_bridge.c new file mode 100644 index 000000000000..c82ff7f3c0ff --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/ri_bridge/server_ri_bridge.c @@ -0,0 +1,808 @@ +/******************************************************************************* +@File +@Title Server bridge for ri +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "ri_server.h" + +#include "common_ri_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWritePMREntryIN_UI8, + IMG_UINT8 * psRIWritePMREntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *) + IMG_OFFSET_ADDR(psRIWritePMREntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *) + IMG_OFFSET_ADDR(psRIWritePMREntryOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWritePMREntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWritePMREntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt); + +RIWritePMREntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _RIWriteMEMDESCEntrypsRIHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWriteMEMDESCEntryIN_UI8, + IMG_UINT8 * psRIWriteMEMDESCEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + IMG_CHAR *uiTextBInt = NULL; + RI_HANDLE psRIHandleInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psRIWriteMEMDESCEntryIN->ui32TextBSize > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psRIWriteMEMDESCEntryOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RIWriteMEMDESCEntry_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRIWriteMEMDESCEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRIWriteMEMDESCEntryOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RIWriteMEMDESCEntry_exit; + } + } + } + + if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0) + { + uiTextBInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextBInt, + (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB, + psRIWriteMEMDESCEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRIWriteMEMDESCEntryOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RIWriteMEMDESCEntry_exit; + } + ((IMG_CHAR *) + uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWriteMEMDESCEntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteMEMDESCEntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWriteMEMDESCEntryOUT->eError = + RIWriteMEMDESCEntryKM(psPMRHandleInt, + psRIWriteMEMDESCEntryIN->ui32TextBSize, + uiTextBInt, + psRIWriteMEMDESCEntryIN->ui64Offset, + psRIWriteMEMDESCEntryIN->ui64Size, + psRIWriteMEMDESCEntryIN->bIsImport, + psRIWriteMEMDESCEntryIN->bIsSuballoc, + &psRIHandleInt); + /* Exit early if bridged call fails */ + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + goto RIWriteMEMDESCEntry_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRIWriteMEMDESCEntryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRIWriteMEMDESCEntryOUT->hRIHandle, + (void *)psRIHandleInt, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RIWriteMEMDESCEntrypsRIHandleIntRelease); + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteMEMDESCEntry_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIWriteMEMDESCEntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK) + { + if (psRIHandleInt) + { + RIDeleteMEMDESCEntryKM(psRIHandleInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RIWriteProcListEntrypsRIHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWriteProcListEntryIN_UI8, + IMG_UINT8 * psRIWriteProcListEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *) + IMG_OFFSET_ADDR(psRIWriteProcListEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *) + IMG_OFFSET_ADDR(psRIWriteProcListEntryOUT_UI8, 0); + + IMG_CHAR *uiTextBInt = NULL; + RI_HANDLE psRIHandleInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psRIWriteProcListEntryIN->ui32TextBSize > + DEVMEM_ANNOTATION_MAX_LEN)) + { + psRIWriteProcListEntryOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RIWriteProcListEntry_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRIWriteProcListEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRIWriteProcListEntryOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RIWriteProcListEntry_exit; + } + } + } + + if (psRIWriteProcListEntryIN->ui32TextBSize != 0) + { + uiTextBInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextBInt, + (const void __user *)psRIWriteProcListEntryIN->puiTextB, + psRIWriteProcListEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRIWriteProcListEntryOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RIWriteProcListEntry_exit; + } + ((IMG_CHAR *) + uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psRIWriteProcListEntryOUT->eError = + RIWriteProcListEntryKM(psRIWriteProcListEntryIN->ui32TextBSize, + uiTextBInt, + psRIWriteProcListEntryIN->ui64Size, + psRIWriteProcListEntryIN->ui64DevVAddr, + &psRIHandleInt); + /* Exit early if bridged call fails */ + if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) + { + goto RIWriteProcListEntry_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRIWriteProcListEntryOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRIWriteProcListEntryOUT->hRIHandle, + (void *)psRIHandleInt, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RIWriteProcListEntrypsRIHandleIntRelease); + if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteProcListEntry_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIWriteProcListEntry_exit: + + if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK) + { + if (psRIHandleInt) + { + RIDeleteMEMDESCEntryKM(psRIHandleInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIUpdateMEMDESCAddrIN_UI8, + IMG_UINT8 * psRIUpdateMEMDESCAddrOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN = + (PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *) + IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT = + (PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *) + IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrOUT_UI8, 0); + + IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle; + RI_HANDLE psRIHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIUpdateMEMDESCAddrOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRIHandleInt, + hRIHandle, + PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE); + if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIUpdateMEMDESCAddr_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIUpdateMEMDESCAddrOUT->eError = + RIUpdateMEMDESCAddrKM(psRIHandleInt, + psRIUpdateMEMDESCAddrIN->sAddr); + +RIUpdateMEMDESCAddr_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRIHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRIHandle, + PVRSRV_HANDLE_TYPE_RI_HANDLE); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDeleteMEMDESCEntryIN_UI8, + IMG_UINT8 * psRIDeleteMEMDESCEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN = + (PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT = + (PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRIDeleteMEMDESCEntryOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psRIDeleteMEMDESCEntryIN->hRIHandle, + PVRSRV_HANDLE_TYPE_RI_HANDLE); + if (unlikely + ((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) + && (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto RIDeleteMEMDESCEntry_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIDeleteMEMDESCEntry_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpListIN_UI8, + IMG_UINT8 * psRIDumpListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN = + (PVRSRV_BRIDGE_IN_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPLIST *) + IMG_OFFSET_ADDR(psRIDumpListOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIDumpListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIDumpList_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt); + +RIDumpList_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpAllIN_UI8, + IMG_UINT8 * psRIDumpAllOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN = + (PVRSRV_BRIDGE_IN_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllOUT_UI8, + 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN); + + psRIDumpAllOUT->eError = RIDumpAllKM(); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpProcessIN_UI8, + IMG_UINT8 * psRIDumpProcessOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN = + (PVRSRV_BRIDGE_IN_RIDUMPPROCESS *) + IMG_OFFSET_ADDR(psRIDumpProcessIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *) + IMG_OFFSET_ADDR(psRIDumpProcessOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psRIDumpProcessOUT->eError = + RIDumpProcessKM(psRIDumpProcessIN->ui32Pid); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRIWritePMREntryWithOwnerIN_UI8, + IMG_UINT8 * + psRIWritePMREntryWithOwnerOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN + = + (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *) + IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER + *psRIWritePMREntryWithOwnerOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *) + IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWritePMREntryWithOwnerOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWritePMREntryWithOwner_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWritePMREntryWithOwnerOUT->eError = + RIWritePMREntryWithOwnerKM(psPMRHandleInt, + psRIWritePMREntryWithOwnerIN->ui32Owner); + +RIWritePMREntryWithOwner_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRIBridge(void); +PVRSRV_ERROR DeinitRIBridge(void); + +/* + * Register all RI functions with services + */ +PVRSRV_ERROR InitRIBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, + PVRSRVBridgeRIWritePMREntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, + PVRSRVBridgeRIWriteMEMDESCEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, + PVRSRVBridgeRIWriteProcListEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, + PVRSRVBridgeRIUpdateMEMDESCAddr, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, + PVRSRVBridgeRIDeleteMEMDESCEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, + PVRSRVBridgeRIDumpList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, + PVRSRVBridgeRIDumpAll, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, + PVRSRVBridgeRIDumpProcess, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, + PVRSRVBridgeRIWritePMREntryWithOwner, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all ri functions with services + */ +PVRSRV_ERROR DeinitRIBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIDUMPPROCESS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, + PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h new file mode 100644 index 000000000000..d383279416e4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h @@ -0,0 +1,370 @@ +/******************************************************************************* +@File +@Title Common bridge header for srvcore +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for srvcore +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SRVCORE_BRIDGE_H +#define COMMON_SRVCORE_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_device_types.h" +#include "cache_ops.h" + +#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2 +#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6 +#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7 +#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8 +#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9 +#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10 +#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11 +#define PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13 +#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14 +#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15 +#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16 +#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16) + +/******************************************* + Connect + *******************************************/ + +/* Bridge in structure for Connect */ +typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG +{ + IMG_UINT32 ui32Flags; + IMG_UINT32 ui32ClientBuildOptions; + IMG_UINT32 ui32ClientDDKVersion; + IMG_UINT32 ui32ClientDDKBuild; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CONNECT; + +/* Bridge out structure for Connect */ +typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG +{ + IMG_UINT8 ui8KernelArch; + IMG_UINT32 ui32CapabilityFlags; + IMG_UINT64 ui64PackedBvnc; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CONNECT; + +/******************************************* + Disconnect + *******************************************/ + +/* Bridge in structure for Disconnect */ +typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DISCONNECT; + +/* Bridge out structure for Disconnect */ +typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DISCONNECT; + +/******************************************* + AcquireGlobalEventObject + *******************************************/ + +/* Bridge in structure for AcquireGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT; + +/* Bridge out structure for AcquireGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG +{ + IMG_HANDLE hGlobalEventObject; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT; + +/******************************************* + ReleaseGlobalEventObject + *******************************************/ + +/* Bridge in structure for ReleaseGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG +{ + IMG_HANDLE hGlobalEventObject; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT; + +/* Bridge out structure for ReleaseGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT; + +/******************************************* + EventObjectOpen + *******************************************/ + +/* Bridge in structure for EventObjectOpen */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG +{ + IMG_HANDLE hEventObject; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN; + +/* Bridge out structure for EventObjectOpen */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG +{ + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN; + +/******************************************* + EventObjectWait + *******************************************/ + +/* Bridge in structure for EventObjectWait */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG +{ + IMG_HANDLE hOSEventKM; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT; + +/* Bridge out structure for EventObjectWait */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT; + +/******************************************* + EventObjectClose + *******************************************/ + +/* Bridge in structure for EventObjectClose */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG +{ + IMG_HANDLE hOSEventKM; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE; + +/* Bridge out structure for EventObjectClose */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE; + +/******************************************* + DumpDebugInfo + *******************************************/ + +/* Bridge in structure for DumpDebugInfo */ +typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG +{ + IMG_UINT32 ui32VerbLevel; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DUMPDEBUGINFO; + +/* Bridge out structure for DumpDebugInfo */ +typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO; + +/******************************************* + GetDevClockSpeed + *******************************************/ + +/* Bridge in structure for GetDevClockSpeed */ +typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED; + +/* Bridge out structure for GetDevClockSpeed */ +typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG +{ + IMG_UINT32 ui32ClockSpeed; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED; + +/******************************************* + HWOpTimeout + *******************************************/ + +/* Bridge in structure for HWOpTimeout */ +typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HWOPTIMEOUT; + +/* Bridge out structure for HWOpTimeout */ +typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HWOPTIMEOUT; + +/******************************************* + AlignmentCheck + *******************************************/ + +/* Bridge in structure for AlignmentCheck */ +typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG +{ + IMG_UINT32 ui32AlignChecksSize; + IMG_UINT32 *pui32AlignChecks; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ALIGNMENTCHECK; + +/* Bridge out structure for AlignmentCheck */ +typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK; + +/******************************************* + GetDeviceStatus + *******************************************/ + +/* Bridge in structure for GetDeviceStatus */ +typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETDEVICESTATUS; + +/* Bridge out structure for GetDeviceStatus */ +typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG +{ + IMG_UINT32 ui32DeviceSatus; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETDEVICESTATUS; + +/******************************************* + GetMultiCoreInfo + *******************************************/ + +/* Bridge in structure for GetMultiCoreInfo */ +typedef struct PVRSRV_BRIDGE_IN_GETMULTICOREINFO_TAG +{ + IMG_UINT32 ui32CapsSize; + /* Output pointer pui64Caps is also an implied input */ + IMG_UINT64 *pui64Caps; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETMULTICOREINFO; + +/* Bridge out structure for GetMultiCoreInfo */ +typedef struct PVRSRV_BRIDGE_OUT_GETMULTICOREINFO_TAG +{ + IMG_UINT32 ui32NumCores; + IMG_UINT64 *pui64Caps; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETMULTICOREINFO; + +/******************************************* + EventObjectWaitTimeout + *******************************************/ + +/* Bridge in structure for EventObjectWaitTimeout */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG +{ + IMG_HANDLE hOSEventKM; + IMG_UINT64 ui64uiTimeoutus; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT; + +/* Bridge out structure for EventObjectWaitTimeout */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT; + +/******************************************* + FindProcessMemStats + *******************************************/ + +/* Bridge in structure for FindProcessMemStats */ +typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG +{ + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ArrSize; + IMG_BOOL bbAllProcessStats; + /* Output pointer pui32MemStatsArray is also an implied input */ + IMG_UINT32 *pui32MemStatsArray; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS; + +/* Bridge out structure for FindProcessMemStats */ +typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG +{ + IMG_UINT32 *pui32MemStatsArray; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS; + +/******************************************* + AcquireInfoPage + *******************************************/ + +/* Bridge in structure for AcquireInfoPage */ +typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE; + +/* Bridge out structure for AcquireInfoPage */ +typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG +{ + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE; + +/******************************************* + ReleaseInfoPage + *******************************************/ + +/* Bridge in structure for ReleaseInfoPage */ +typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG +{ + IMG_HANDLE hPMR; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RELEASEINFOPAGE; + +/* Bridge out structure for ReleaseInfoPage */ +typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE; + +#endif /* COMMON_SRVCORE_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c new file mode 100644 index 000000000000..e84f8a7a8c06 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c @@ -0,0 +1,1164 @@ +/******************************************************************************* +@File +@Title Server bridge for srvcore +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for srvcore +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "srvcore.h" +#include "info_page.h" +#include "proc_stats.h" +#include "rgx_fwif_alignchecks.h" + +#include "common_srvcore_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psConnectIN_UI8, + IMG_UINT8 * psConnectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CONNECT *psConnectIN = + (PVRSRV_BRIDGE_IN_CONNECT *) IMG_OFFSET_ADDR(psConnectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT = + (PVRSRV_BRIDGE_OUT_CONNECT *) IMG_OFFSET_ADDR(psConnectOUT_UI8, 0); + + psConnectOUT->eError = + PVRSRVConnectKM(psConnection, OSGetDevNode(psConnection), + psConnectIN->ui32Flags, + psConnectIN->ui32ClientBuildOptions, + psConnectIN->ui32ClientDDKVersion, + psConnectIN->ui32ClientDDKBuild, + &psConnectOUT->ui8KernelArch, + &psConnectOUT->ui32CapabilityFlags, + &psConnectOUT->ui64PackedBvnc); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDisconnectIN_UI8, + IMG_UINT8 * psDisconnectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN = + (PVRSRV_BRIDGE_IN_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT = + (PVRSRV_BRIDGE_OUT_DISCONNECT *) + IMG_OFFSET_ADDR(psDisconnectOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDisconnectIN); + + psDisconnectOUT->eError = PVRSRVDisconnectKM(); + + return 0; +} + +static PVRSRV_ERROR _AcquireGlobalEventObjecthGlobalEventObjectIntRelease(void + *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVReleaseGlobalEventObjectKM((IMG_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psAcquireGlobalEventObjectIN_UI8, + IMG_UINT8 * + psAcquireGlobalEventObjectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN + = + (PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psAcquireGlobalEventObjectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT + *psAcquireGlobalEventObjectOUT = + (PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psAcquireGlobalEventObjectOUT_UI8, 0); + + IMG_HANDLE hGlobalEventObjectInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN); + + psAcquireGlobalEventObjectOUT->eError = + PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt); + /* Exit early if bridged call fails */ + if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) + { + goto AcquireGlobalEventObject_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psAcquireGlobalEventObjectOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psAcquireGlobalEventObjectOUT-> + hGlobalEventObject, + (void *)hGlobalEventObjectInt, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AcquireGlobalEventObjecthGlobalEventObjectIntRelease); + if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AcquireGlobalEventObject_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +AcquireGlobalEventObject_exit: + + if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK) + { + if (hGlobalEventObjectInt) + { + PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psReleaseGlobalEventObjectIN_UI8, + IMG_UINT8 * + psReleaseGlobalEventObjectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN + = + (PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psReleaseGlobalEventObjectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT + *psReleaseGlobalEventObjectOUT = + (PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psReleaseGlobalEventObjectOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psReleaseGlobalEventObjectOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psReleaseGlobalEventObjectIN-> + hGlobalEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + if (unlikely + ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) + && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto ReleaseGlobalEventObject_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +ReleaseGlobalEventObject_exit: + + return 0; +} + +static PVRSRV_ERROR _EventObjectOpenhOSEventIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = OSEventObjectClose((IMG_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectOpenIN_UI8, + IMG_UINT8 * psEventObjectOpenOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *) + IMG_OFFSET_ADDR(psEventObjectOpenIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *) + IMG_OFFSET_ADDR(psEventObjectOpenOUT_UI8, 0); + + IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject; + IMG_HANDLE hEventObjectInt = NULL; + IMG_HANDLE hOSEventInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectOpenOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hEventObjectInt, + hEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + IMG_TRUE); + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectOpen_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectOpenOUT->eError = + OSEventObjectOpen(hEventObjectInt, &hOSEventInt); + /* Exit early if bridged call fails */ + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + goto EventObjectOpen_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psEventObjectOpenOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psEventObjectOpenOUT->hOSEvent, + (void *)hOSEventInt, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _EventObjectOpenhOSEventIntRelease); + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectOpen_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +EventObjectOpen_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hEventObjectInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psEventObjectOpenOUT->eError != PVRSRV_OK) + { + if (hOSEventInt) + { + OSEventObjectClose(hOSEventInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectWaitIN_UI8, + IMG_UINT8 * psEventObjectWaitOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *) + IMG_OFFSET_ADDR(psEventObjectWaitIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *) + IMG_OFFSET_ADDR(psEventObjectWaitOUT_UI8, 0); + + IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM; + IMG_HANDLE hOSEventKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectWaitOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hOSEventKMInt, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + IMG_TRUE); + if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectWait_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt); + +EventObjectWait_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hOSEventKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectCloseIN_UI8, + IMG_UINT8 * psEventObjectCloseOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *) + IMG_OFFSET_ADDR(psEventObjectCloseIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *) + IMG_OFFSET_ADDR(psEventObjectCloseOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psEventObjectCloseOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psEventObjectCloseIN-> + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + if (unlikely + ((psEventObjectCloseOUT->eError != PVRSRV_OK) + && (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psEventObjectCloseOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto EventObjectClose_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +EventObjectClose_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDumpDebugInfoIN_UI8, + IMG_UINT8 * psDumpDebugInfoOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN = + (PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *) + IMG_OFFSET_ADDR(psDumpDebugInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT = + (PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *) + IMG_OFFSET_ADDR(psDumpDebugInfoOUT_UI8, 0); + + psDumpDebugInfoOUT->eError = + PVRSRVDumpDebugInfoKM(psConnection, OSGetDevNode(psConnection), + psDumpDebugInfoIN->ui32VerbLevel); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDevClockSpeedIN_UI8, + IMG_UINT8 * psGetDevClockSpeedOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN = + (PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *) + IMG_OFFSET_ADDR(psGetDevClockSpeedIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT = + (PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *) + IMG_OFFSET_ADDR(psGetDevClockSpeedOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN); + + psGetDevClockSpeedOUT->eError = + PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevNode(psConnection), + &psGetDevClockSpeedOUT->ui32ClockSpeed); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHWOpTimeoutIN_UI8, + IMG_UINT8 * psHWOpTimeoutOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN = + (PVRSRV_BRIDGE_IN_HWOPTIMEOUT *) + IMG_OFFSET_ADDR(psHWOpTimeoutIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT = + (PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *) + IMG_OFFSET_ADDR(psHWOpTimeoutOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN); + + psHWOpTimeoutOUT->eError = + PVRSRVHWOpTimeoutKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAlignmentCheckIN_UI8, + IMG_UINT8 * psAlignmentCheckOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN = + (PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *) + IMG_OFFSET_ADDR(psAlignmentCheckIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT = + (PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *) + IMG_OFFSET_ADDR(psAlignmentCheckOUT_UI8, 0); + + IMG_UINT32 *ui32AlignChecksInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0; + + if (unlikely + (psAlignmentCheckIN->ui32AlignChecksSize > + RGXFW_ALIGN_CHECKS_UM_MAX)) + { + psAlignmentCheckOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto AlignmentCheck_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psAlignmentCheckIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psAlignmentCheckIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psAlignmentCheckOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto AlignmentCheck_exit; + } + } + } + + if (psAlignmentCheckIN->ui32AlignChecksSize != 0) + { + ui32AlignChecksInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psAlignmentCheckIN->ui32AlignChecksSize * + sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AlignChecksInt, + (const void __user *)psAlignmentCheckIN->pui32AlignChecks, + psAlignmentCheckIN->ui32AlignChecksSize * + sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psAlignmentCheckOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto AlignmentCheck_exit; + } + } + + psAlignmentCheckOUT->eError = + PVRSRVAlignmentCheckKM(psConnection, OSGetDevNode(psConnection), + psAlignmentCheckIN->ui32AlignChecksSize, + ui32AlignChecksInt); + +AlignmentCheck_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDeviceStatusIN_UI8, + IMG_UINT8 * psGetDeviceStatusOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN = + (PVRSRV_BRIDGE_IN_GETDEVICESTATUS *) + IMG_OFFSET_ADDR(psGetDeviceStatusIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT = + (PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *) + IMG_OFFSET_ADDR(psGetDeviceStatusOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN); + + psGetDeviceStatusOUT->eError = + PVRSRVGetDeviceStatusKM(psConnection, OSGetDevNode(psConnection), + &psGetDeviceStatusOUT->ui32DeviceSatus); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetMultiCoreInfoIN_UI8, + IMG_UINT8 * psGetMultiCoreInfoOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETMULTICOREINFO *psGetMultiCoreInfoIN = + (PVRSRV_BRIDGE_IN_GETMULTICOREINFO *) + IMG_OFFSET_ADDR(psGetMultiCoreInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *psGetMultiCoreInfoOUT = + (PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *) + IMG_OFFSET_ADDR(psGetMultiCoreInfoOUT_UI8, 0); + + IMG_UINT64 *pui64CapsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) + 0; + + if (psGetMultiCoreInfoIN->ui32CapsSize > 8) + { + psGetMultiCoreInfoOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto GetMultiCoreInfo_exit; + } + + psGetMultiCoreInfoOUT->pui64Caps = psGetMultiCoreInfoIN->pui64Caps; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psGetMultiCoreInfoIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psGetMultiCoreInfoOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto GetMultiCoreInfo_exit; + } + } + } + + if (psGetMultiCoreInfoIN->ui32CapsSize != 0) + { + pui64CapsInt = + (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64); + } + + psGetMultiCoreInfoOUT->eError = + PVRSRVGetMultiCoreInfoKM(psConnection, OSGetDevNode(psConnection), + psGetMultiCoreInfoIN->ui32CapsSize, + &psGetMultiCoreInfoOUT->ui32NumCores, + pui64CapsInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((pui64CapsInt) && + ((psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psGetMultiCoreInfoOUT->pui64Caps, + pui64CapsInt, + (psGetMultiCoreInfoIN->ui32CapsSize * + sizeof(IMG_UINT64))) != PVRSRV_OK)) + { + psGetMultiCoreInfoOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto GetMultiCoreInfo_exit; + } + } + +GetMultiCoreInfo_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectWaitTimeoutIN_UI8, + IMG_UINT8 * psEventObjectWaitTimeoutOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *) + IMG_OFFSET_ADDR(psEventObjectWaitTimeoutIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *) + IMG_OFFSET_ADDR(psEventObjectWaitTimeoutOUT_UI8, 0); + + IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM; + IMG_HANDLE hOSEventKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectWaitTimeoutOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hOSEventKMInt, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + IMG_TRUE); + if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectWaitTimeout_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectWaitTimeoutOUT->eError = + OSEventObjectWaitTimeout(hOSEventKMInt, + psEventObjectWaitTimeoutIN-> + ui64uiTimeoutus); + +EventObjectWaitTimeout_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hOSEventKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psFindProcessMemStatsIN_UI8, + IMG_UINT8 * psFindProcessMemStatsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN = + (PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *) + IMG_OFFSET_ADDR(psFindProcessMemStatsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT = + (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) + IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8, 0); + + IMG_UINT32 *pui32MemStatsArrayInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0; + + if (psFindProcessMemStatsIN->ui32ArrSize > + PVRSRV_PROCESS_STAT_TYPE_COUNT) + { + psFindProcessMemStatsOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto FindProcessMemStats_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psFindProcessMemStatsOUT->pui32MemStatsArray = + psFindProcessMemStatsIN->pui32MemStatsArray; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psFindProcessMemStatsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psFindProcessMemStatsOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto FindProcessMemStats_exit; + } + } + } + + if (psFindProcessMemStatsIN->ui32ArrSize != 0) + { + pui32MemStatsArrayInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32); + } + + psFindProcessMemStatsOUT->eError = + PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID, + psFindProcessMemStatsIN->ui32ArrSize, + psFindProcessMemStatsIN-> + bbAllProcessStats, + pui32MemStatsArrayInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((pui32MemStatsArrayInt) && + ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psFindProcessMemStatsOUT-> + pui32MemStatsArray, pui32MemStatsArrayInt, + (psFindProcessMemStatsIN->ui32ArrSize * + sizeof(IMG_UINT32))) != PVRSRV_OK)) + { + psFindProcessMemStatsOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto FindProcessMemStats_exit; + } + } + +FindProcessMemStats_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _AcquireInfoPagepsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVReleaseInfoPageKM((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAcquireInfoPageIN_UI8, + IMG_UINT8 * psAcquireInfoPageOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN = + (PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *) + IMG_OFFSET_ADDR(psAcquireInfoPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT = + (PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *) + IMG_OFFSET_ADDR(psAcquireInfoPageOUT_UI8, 0); + + PMR *psPMRInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN); + + psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) + { + goto AcquireInfoPage_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psAcquireInfoPageOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, &psAcquireInfoPageOUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AcquireInfoPagepsPMRIntRelease); + if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto AcquireInfoPage_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +AcquireInfoPage_exit: + + if (psAcquireInfoPageOUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + PVRSRVReleaseInfoPageKM(psPMRInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psReleaseInfoPageIN_UI8, + IMG_UINT8 * psReleaseInfoPageOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN = + (PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *) + IMG_OFFSET_ADDR(psReleaseInfoPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT = + (PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *) + IMG_OFFSET_ADDR(psReleaseInfoPageOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psReleaseInfoPageOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase-> + psHandleBase, + (IMG_HANDLE) psReleaseInfoPageIN-> + hPMR, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + if (unlikely + ((psReleaseInfoPageOUT->eError != PVRSRV_OK) + && (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psReleaseInfoPageOUT->eError))); + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto ReleaseInfoPage_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +ReleaseInfoPage_exit: + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSRVCOREBridge(void); +PVRSRV_ERROR DeinitSRVCOREBridge(void); + +/* + * Register all SRVCORE functions with services + */ +PVRSRV_ERROR InitSRVCOREBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_CONNECT, + PVRSRVBridgeConnect, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DISCONNECT, + PVRSRVBridgeDisconnect, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, + PVRSRVBridgeAcquireGlobalEventObject, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, + PVRSRVBridgeReleaseGlobalEventObject, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, + PVRSRVBridgeEventObjectOpen, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, + PVRSRVBridgeEventObjectWait, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, + PVRSRVBridgeEventObjectClose, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, + PVRSRVBridgeDumpDebugInfo, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, + PVRSRVBridgeGetDevClockSpeed, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, + PVRSRVBridgeHWOpTimeout, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, + PVRSRVBridgeAlignmentCheck, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, + PVRSRVBridgeGetDeviceStatus, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO, + PVRSRVBridgeGetMultiCoreInfo, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, + PVRSRVBridgeEventObjectWaitTimeout, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, + PVRSRVBridgeFindProcessMemStats, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, + PVRSRVBridgeAcquireInfoPage, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, + PVRSRVBridgeReleaseInfoPage, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all srvcore functions with services + */ +PVRSRV_ERROR DeinitSRVCOREBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_CONNECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DISCONNECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_bridge.h new file mode 100644 index 000000000000..0c3bf82923ba --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_bridge.h @@ -0,0 +1,142 @@ +/******************************************************************************* +@File +@Title Client bridge header for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_SYNC_BRIDGE_H +#define CLIENT_SYNC_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_sync_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + * + phSyncHandle, + IMG_UINT32 + * + pui32SyncPrimVAddr, + IMG_UINT32 + * + pui32SyncPrimBlockSize, + IMG_HANDLE + * + phhSyncPMR); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + PDUMP_FLAGS_T + uiPDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge, + IMG_BOOL + bServerSync, + IMG_UINT32 + ui32FWAddr, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge, + IMG_UINT32 + ui32FWAddr); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, PVRSRV_FENCE hFence); + +#endif /* CLIENT_SYNC_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_direct_bridge.c new file mode 100644 index 000000000000..36154b402948 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/client_sync_direct_bridge.c @@ -0,0 +1,314 @@ +/******************************************************************************* +@File +@Title Direct client bridge for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for sync + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_sync_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pdump.h" +#include "pdumpdefs.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" +#include + +#include "sync.h" +#include "sync_server.h" +#include "pdump.h" +#include "pvrsrv_sync_km.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + * + phSyncHandle, + IMG_UINT32 + * + pui32SyncPrimVAddr, + IMG_UINT32 + * + pui32SyncPrimBlockSize, + IMG_HANDLE + * + phhSyncPMR) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + PMR *pshSyncPMRInt = NULL; + + eError = + PVRSRVAllocSyncPrimitiveBlockKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *) + hBridge), + &psSyncHandleInt, + pui32SyncPrimVAddr, + pui32SyncPrimBlockSize, + &pshSyncPMRInt); + + *phSyncHandle = psSyncHandleInt; + *phhSyncPMR = pshSyncPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_UINT32 + ui32Value, + IMG_UINT32 + ui32Mask, + PDUMP_POLL_OPERATOR + eOperator, + PDUMP_FLAGS_T + uiPDumpFlags) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, + ui32Offset, + ui32Value, + ui32Mask, eOperator, uiPDumpFlags); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE + hBridge, + IMG_HANDLE + hSyncHandle, + IMG_UINT32 + ui32Offset, + IMG_DEVMEM_OFFSET_T + uiWriteOffset, + IMG_DEVMEM_SIZE_T + uiPacketSize, + IMG_DEVMEM_SIZE_T + uiBufferSize) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, + ui32Offset, + uiWriteOffset, uiPacketSize, uiBufferSize); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge, + IMG_BOOL + bServerSync, + IMG_UINT32 + ui32FWAddr, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVSyncAllocEventKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + bServerSync, ui32FWAddr, ui32ClassNameSize, + puiClassName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge, + IMG_UINT32 + ui32FWAddr) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVSyncFreeEventKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32FWAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV +BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, PVRSRV_FENCE hFence) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PVRSRVSyncCheckpointSignalledPDumpPolKM(hFence); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hFence); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/common_sync_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/common_sync_bridge.h new file mode 100644 index 000000000000..799a0ba1bce7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/common_sync_bridge.h @@ -0,0 +1,253 @@ +/******************************************************************************* +@File +@Title Common bridge header for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SYNC_BRIDGE_H +#define COMMON_SYNC_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pdump.h" +#include "pdumpdefs.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" +#include + +#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+3 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+4 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+5 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6 +#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+7 +#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+8 +#define PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+9 +#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+9) + +/******************************************* + AllocSyncPrimitiveBlock + *******************************************/ + +/* Bridge in structure for AllocSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK; + +/* Bridge out structure for AllocSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32SyncPrimVAddr; + IMG_UINT32 ui32SyncPrimBlockSize; + IMG_HANDLE hhSyncPMR; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK; + +/******************************************* + FreeSyncPrimitiveBlock + *******************************************/ + +/* Bridge in structure for FreeSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG +{ + IMG_HANDLE hSyncHandle; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK; + +/* Bridge out structure for FreeSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK; + +/******************************************* + SyncPrimSet + *******************************************/ + +/* Bridge in structure for SyncPrimSet */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Index; + IMG_UINT32 ui32Value; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSET; + +/* Bridge out structure for SyncPrimSet */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSET; + +/******************************************* + SyncPrimPDump + *******************************************/ + +/* Bridge in structure for SyncPrimPDump */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP; + +/* Bridge out structure for SyncPrimPDump */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP; + +/******************************************* + SyncPrimPDumpValue + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpValue */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE; + +/* Bridge out structure for SyncPrimPDumpValue */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE; + +/******************************************* + SyncPrimPDumpPol + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpPol */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + PDUMP_POLL_OPERATOR eOperator; + PDUMP_FLAGS_T uiPDumpFlags; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL; + +/* Bridge out structure for SyncPrimPDumpPol */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL; + +/******************************************* + SyncPrimPDumpCBP + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpCBP */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; + IMG_DEVMEM_OFFSET_T uiWriteOffset; + IMG_DEVMEM_SIZE_T uiPacketSize; + IMG_DEVMEM_SIZE_T uiBufferSize; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP; + +/* Bridge out structure for SyncPrimPDumpCBP */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP; + +/******************************************* + SyncAllocEvent + *******************************************/ + +/* Bridge in structure for SyncAllocEvent */ +typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG +{ + IMG_BOOL bServerSync; + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32ClassNameSize; + const IMG_CHAR *puiClassName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCALLOCEVENT; + +/* Bridge out structure for SyncAllocEvent */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT; + +/******************************************* + SyncFreeEvent + *******************************************/ + +/* Bridge in structure for SyncFreeEvent */ +typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG +{ + IMG_UINT32 ui32FWAddr; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCFREEEVENT; + +/* Bridge out structure for SyncFreeEvent */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCFREEEVENT; + +/******************************************* + SyncCheckpointSignalledPDumpPol + *******************************************/ + +/* Bridge in structure for SyncCheckpointSignalledPDumpPol */ +typedef struct PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG +{ + PVRSRV_FENCE hFence; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; + +/* Bridge out structure for SyncCheckpointSignalledPDumpPol */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; + +#endif /* COMMON_SYNC_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/server_sync_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/server_sync_bridge.c new file mode 100644 index 000000000000..0db048d874be --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/sync_bridge/server_sync_bridge.c @@ -0,0 +1,818 @@ +/******************************************************************************* +@File +@Title Server bridge for sync +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "sync.h" +#include "sync_server.h" +#include "pdump.h" +#include "pvrsrv_sync_km.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint.h" + +#include "common_sync_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _AllocSyncPrimitiveBlockpsSyncHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = + PVRSRVFreeSyncPrimitiveBlockKM((SYNC_PRIMITIVE_BLOCK *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAllocSyncPrimitiveBlockIN_UI8, + IMG_UINT8 * + psAllocSyncPrimitiveBlockOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN = + (PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT + = + (PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockOUT_UI8, 0); + + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + PMR *pshSyncPMRInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN); + + psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL; + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocSyncPrimitiveBlockKM(psConnection, + OSGetDevNode(psConnection), + &psSyncHandleInt, + &psAllocSyncPrimitiveBlockOUT-> + ui32SyncPrimVAddr, + &psAllocSyncPrimitiveBlockOUT-> + ui32SyncPrimBlockSize, + &pshSyncPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + goto AllocSyncPrimitiveBlock_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psAllocSyncPrimitiveBlockOUT-> + hSyncHandle, (void *)psSyncHandleInt, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AllocSyncPrimitiveBlockpsSyncHandleIntRelease); + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AllocSyncPrimitiveBlock_exit; + } + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psAllocSyncPrimitiveBlockOUT-> + hhSyncPMR, (void *)pshSyncPMRInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psAllocSyncPrimitiveBlockOUT-> + hSyncHandle); + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AllocSyncPrimitiveBlock_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +AllocSyncPrimitiveBlock_exit: + + if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK) + { + if (psAllocSyncPrimitiveBlockOUT->hSyncHandle) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection-> + psHandleBase, + (IMG_HANDLE) + psAllocSyncPrimitiveBlockOUT-> + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psSyncHandleInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psSyncHandleInt) + { + PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psFreeSyncPrimitiveBlockIN_UI8, + IMG_UINT8 * psFreeSyncPrimitiveBlockOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN = + (PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockIN_UI8, 0); + PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT = + (PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psFreeSyncPrimitiveBlockOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psFreeSyncPrimitiveBlockIN-> + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + if (unlikely + ((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) + && (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto FreeSyncPrimitiveBlock_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +FreeSyncPrimitiveBlock_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimSetIN_UI8, + IMG_UINT8 * psSyncPrimSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMSET *) + IMG_OFFSET_ADDR(psSyncPrimSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMSET *) + IMG_OFFSET_ADDR(psSyncPrimSetOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimSet_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimSetOUT->eError = + PVRSRVSyncPrimSetKM(psSyncHandleInt, + psSyncPrimSetIN->ui32Index, + psSyncPrimSetIN->ui32Value); + +SyncPrimSet_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpIN_UI8, + IMG_UINT8 * psSyncPrimPDumpOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDump_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpOUT->eError = + PVRSRVSyncPrimPDumpKM(psSyncHandleInt, + psSyncPrimPDumpIN->ui32Offset); + +SyncPrimPDump_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDump NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpValueIN_UI8, + IMG_UINT8 * psSyncPrimPDumpValueOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *) + IMG_OFFSET_ADDR(psSyncPrimPDumpValueIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *) + IMG_OFFSET_ADDR(psSyncPrimPDumpValueOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpValueOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpValue_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpValueOUT->eError = + PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, + psSyncPrimPDumpValueIN->ui32Offset, + psSyncPrimPDumpValueIN->ui32Value); + +SyncPrimPDumpValue_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpValue NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpPolIN_UI8, + IMG_UINT8 * psSyncPrimPDumpPolOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncPrimPDumpPolIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncPrimPDumpPolOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpPolOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpPol_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpPolOUT->eError = + PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, + psSyncPrimPDumpPolIN->ui32Offset, + psSyncPrimPDumpPolIN->ui32Value, + psSyncPrimPDumpPolIN->ui32Mask, + psSyncPrimPDumpPolIN->eOperator, + psSyncPrimPDumpPolIN->uiPDumpFlags); + +SyncPrimPDumpPol_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpPol NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpCBPIN_UI8, + IMG_UINT8 * psSyncPrimPDumpCBPOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpCBPIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *) + IMG_OFFSET_ADDR(psSyncPrimPDumpCBPOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpCBPOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpCBP_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpCBPOUT->eError = + PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, + psSyncPrimPDumpCBPIN->ui32Offset, + psSyncPrimPDumpCBPIN->uiWriteOffset, + psSyncPrimPDumpCBPIN->uiPacketSize, + psSyncPrimPDumpCBPIN->uiBufferSize); + +SyncPrimPDumpCBP_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpCBP NULL +#endif + +static IMG_INT +PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncAllocEventIN_UI8, + IMG_UINT8 * psSyncAllocEventOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN = + (PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *) + IMG_OFFSET_ADDR(psSyncAllocEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT = + (PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *) + IMG_OFFSET_ADDR(psSyncAllocEventOUT_UI8, 0); + + IMG_CHAR *uiClassNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psSyncAllocEventIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) + { + psSyncAllocEventOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto SyncAllocEvent_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psSyncAllocEventIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psSyncAllocEventIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psSyncAllocEventOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto SyncAllocEvent_exit; + } + } + } + + if (psSyncAllocEventIN->ui32ClassNameSize != 0) + { + uiClassNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiClassNameInt, + (const void __user *)psSyncAllocEventIN->puiClassName, + psSyncAllocEventIN->ui32ClassNameSize * + sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psSyncAllocEventOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto SyncAllocEvent_exit; + } + ((IMG_CHAR *) + uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psSyncAllocEventOUT->eError = + PVRSRVSyncAllocEventKM(psConnection, OSGetDevNode(psConnection), + psSyncAllocEventIN->bServerSync, + psSyncAllocEventIN->ui32FWAddr, + psSyncAllocEventIN->ui32ClassNameSize, + uiClassNameInt); + +SyncAllocEvent_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncFreeEventIN_UI8, + IMG_UINT8 * psSyncFreeEventOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN = + (PVRSRV_BRIDGE_IN_SYNCFREEEVENT *) + IMG_OFFSET_ADDR(psSyncFreeEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT = + (PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *) + IMG_OFFSET_ADDR(psSyncFreeEventOUT_UI8, 0); + + psSyncFreeEventOUT->eError = + PVRSRVSyncFreeEventKM(psConnection, OSGetDevNode(psConnection), + psSyncFreeEventIN->ui32FWAddr); + + return 0; +} + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncCheckpointSignalledPDumpPol(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psSyncCheckpointSignalledPDumpPolIN_UI8, + IMG_UINT8 * + psSyncCheckpointSignalledPDumpPolOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL + *psSyncCheckpointSignalledPDumpPolIN = + (PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL + *psSyncCheckpointSignalledPDumpPolOUT = + (PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psSyncCheckpointSignalledPDumpPolOUT->eError = + PVRSRVSyncCheckpointSignalledPDumpPolKM + (psSyncCheckpointSignalledPDumpPolIN->hFence); + + return 0; +} + +#else +#define PVRSRVBridgeSyncCheckpointSignalledPDumpPol NULL +#endif + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSYNCBridge(void); +PVRSRV_ERROR DeinitSYNCBridge(void); + +/* + * Register all SYNC functions with services + */ +PVRSRV_ERROR InitSYNCBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, + PVRSRVBridgeAllocSyncPrimitiveBlock, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, + PVRSRVBridgeFreeSyncPrimitiveBlock, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, + PVRSRVBridgeSyncPrimSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, + PVRSRVBridgeSyncPrimPDump, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, + PVRSRVBridgeSyncPrimPDumpValue, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, + PVRSRVBridgeSyncPrimPDumpPol, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, + PVRSRVBridgeSyncPrimPDumpCBP, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, + PVRSRVBridgeSyncAllocEvent, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, + PVRSRVBridgeSyncFreeEvent, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL, + PVRSRVBridgeSyncCheckpointSignalledPDumpPol, + NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all sync functions with services + */ +PVRSRV_ERROR DeinitSYNCBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMSET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_bridge.h new file mode 100644 index 000000000000..d9316e7d4cb1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_bridge.h @@ -0,0 +1,78 @@ +/******************************************************************************* +@File +@Title Client bridge header for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_SYNCTRACKING_BRIDGE_H +#define CLIENT_SYNCTRACKING_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_synctracking_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hhRecord); + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge, + IMG_HANDLE * + phhRecord, + IMG_HANDLE + hhServerSyncPrimBlock, + IMG_UINT32 + ui32ui32FwBlockAddr, + IMG_UINT32 + ui32ui32SyncOffset, + IMG_BOOL + bbServerSync, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName); + +#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_direct_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_direct_bridge.c new file mode 100644 index 000000000000..1a1efdbb853e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/client_synctracking_direct_bridge.c @@ -0,0 +1,103 @@ +/******************************************************************************* +@File +@Title Direct client bridge for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for synctracking + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_synctracking_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ + +#include "sync.h" +#include "sync_server.h" + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE + hBridge, + IMG_HANDLE + hhRecord) +{ + PVRSRV_ERROR eError; + SYNC_RECORD_HANDLE pshRecordInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord; + + eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge, + IMG_HANDLE * + phhRecord, + IMG_HANDLE + hhServerSyncPrimBlock, + IMG_UINT32 + ui32ui32FwBlockAddr, + IMG_UINT32 + ui32ui32SyncOffset, + IMG_BOOL + bbServerSync, + IMG_UINT32 + ui32ClassNameSize, + const IMG_CHAR * + puiClassName) +{ + PVRSRV_ERROR eError; + SYNC_RECORD_HANDLE pshRecordInt = NULL; + SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt; + + pshServerSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock; + + eError = + PVRSRVSyncRecordAddKM(NULL, + (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + &pshRecordInt, pshServerSyncPrimBlockInt, + ui32ui32FwBlockAddr, ui32ui32SyncOffset, + bbServerSync, ui32ClassNameSize, + puiClassName); + + *phhRecord = pshRecordInt; + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/common_synctracking_bridge.h b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/common_synctracking_bridge.h new file mode 100644 index 000000000000..340f73663cbd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/common_synctracking_bridge.h @@ -0,0 +1,96 @@ +/******************************************************************************* +@File +@Title Common bridge header for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SYNCTRACKING_BRIDGE_H +#define COMMON_SYNCTRACKING_BRIDGE_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1) + +/******************************************* + SyncRecordRemoveByHandle + *******************************************/ + +/* Bridge in structure for SyncRecordRemoveByHandle */ +typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG +{ + IMG_HANDLE hhRecord; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE; + +/* Bridge out structure for SyncRecordRemoveByHandle */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG +{ + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE; + +/******************************************* + SyncRecordAdd + *******************************************/ + +/* Bridge in structure for SyncRecordAdd */ +typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG +{ + IMG_HANDLE hhServerSyncPrimBlock; + IMG_UINT32 ui32ui32FwBlockAddr; + IMG_UINT32 ui32ui32SyncOffset; + IMG_BOOL bbServerSync; + IMG_UINT32 ui32ClassNameSize; + const IMG_CHAR *puiClassName; +} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDADD; + +/* Bridge out structure for SyncRecordAdd */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG +{ + IMG_HANDLE hhRecord; + PVRSRV_ERROR eError; +} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDADD; + +#endif /* COMMON_SYNCTRACKING_BRIDGE_H */ diff --git a/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c new file mode 100644 index 000000000000..c1591099bda7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c @@ -0,0 +1,347 @@ +/******************************************************************************* +@File +@Title Server bridge for synctracking +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "sync.h" +#include "sync_server.h" + +#include "common_synctracking_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psSyncRecordRemoveByHandleIN_UI8, + IMG_UINT8 * + psSyncRecordRemoveByHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN + = + (PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *) + IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE + *psSyncRecordRemoveByHandleOUT = + (PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *) + IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psSyncRecordRemoveByHandleOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) + psSyncRecordRemoveByHandleIN-> + hhRecord, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE); + if (unlikely + ((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) + && (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT-> + eError))); + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordRemoveByHandle_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +SyncRecordRemoveByHandle_exit: + + return 0; +} + +static PVRSRV_ERROR _SyncRecordAddpshRecordIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVSyncRecordRemoveByHandleKM((SYNC_RECORD_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncRecordAddIN_UI8, + IMG_UINT8 * psSyncRecordAddOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN = + (PVRSRV_BRIDGE_IN_SYNCRECORDADD *) + IMG_OFFSET_ADDR(psSyncRecordAddIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT = + (PVRSRV_BRIDGE_OUT_SYNCRECORDADD *) + IMG_OFFSET_ADDR(psSyncRecordAddOUT_UI8, 0); + + SYNC_RECORD_HANDLE pshRecordInt = NULL; + IMG_HANDLE hhServerSyncPrimBlock = + psSyncRecordAddIN->hhServerSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL; + IMG_CHAR *uiClassNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psSyncRecordAddIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) + { + psSyncRecordAddOUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto SyncRecordAdd_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psSyncRecordAddIN), + sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - + ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psSyncRecordAddIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psSyncRecordAddOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto SyncRecordAdd_exit; + } + } + } + + if (psSyncRecordAddIN->ui32ClassNameSize != 0) + { + uiClassNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiClassNameInt, + (const void __user *)psSyncRecordAddIN->puiClassName, + psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psSyncRecordAddOUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto SyncRecordAdd_exit; + } + ((IMG_CHAR *) + uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncRecordAddOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pshServerSyncPrimBlockInt, + hhServerSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordAdd_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncRecordAddOUT->eError = + PVRSRVSyncRecordAddKM(psConnection, OSGetDevNode(psConnection), + &pshRecordInt, + pshServerSyncPrimBlockInt, + psSyncRecordAddIN->ui32ui32FwBlockAddr, + psSyncRecordAddIN->ui32ui32SyncOffset, + psSyncRecordAddIN->bbServerSync, + psSyncRecordAddIN->ui32ClassNameSize, + uiClassNameInt); + /* Exit early if bridged call fails */ + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + goto SyncRecordAdd_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psSyncRecordAddOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psSyncRecordAddOUT->hhRecord, + (void *)pshRecordInt, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _SyncRecordAddpshRecordIntRelease); + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordAdd_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +SyncRecordAdd_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (pshServerSyncPrimBlockInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hhServerSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psSyncRecordAddOUT->eError != PVRSRV_OK) + { + if (pshRecordInt) + { + PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSYNCTRACKINGBridge(void); +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void); + +/* + * Register all SYNCTRACKING functions with services + */ +PVRSRV_ERROR InitSYNCTRACKINGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, + PVRSRVBridgeSyncRecordRemoveByHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, + PVRSRVBridgeSyncRecordAdd, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all synctracking functions with services + */ +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.2.30.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.2.30.h new file mode 100644 index 000000000000..1b6f8ed7de87 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.2.30.h @@ -0,0 +1,75 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 1.V.2.30 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_1_V_2_30_H +#define RGXCONFIG_KM_1_V_2_30_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 1 +#define RGX_BNC_KM_N 2 +#define RGX_BNC_KM_C 30 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U) +#define RGX_FEATURE_META (MTP218) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (1U) +#define RGX_FEATURE_META_COREMEM_SIZE (0U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_1_V_2_30_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h new file mode 100644 index 000000000000..576f85448e41 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h @@ -0,0 +1,75 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 1.V.4.19 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_1_V_4_19_H +#define RGXCONFIG_KM_1_V_4_19_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 1 +#define RGX_BNC_KM_N 4 +#define RGX_BNC_KM_C 19 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (4U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U) +#define RGX_FEATURE_META (MTP218) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (1U) +#define RGX_FEATURE_META_COREMEM_SIZE (0U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_1_V_4_19_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h new file mode 100644 index 000000000000..e8a70c96e97f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h @@ -0,0 +1,74 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 1.V.4.5 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_1_V_4_5_H +#define RGXCONFIG_KM_1_V_4_5_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 1 +#define RGX_BNC_KM_N 4 +#define RGX_BNC_KM_C 5 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (4U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U) +#define RGX_FEATURE_META (MTP218) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (1U) +#define RGX_FEATURE_META_COREMEM_SIZE (0U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_1_V_4_5_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h new file mode 100644 index 000000000000..3f8dcbe372c1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h @@ -0,0 +1,78 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 15.V.1.64 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_15_V_1_64_H +#define RGXCONFIG_KM_15_V_1_64_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 15 +#define RGX_BNC_KM_N 1 +#define RGX_BNC_KM_C 64 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U) +#define RGX_FEATURE_META (LTP217) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (2U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_META_COREMEM_SIZE (0U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_15_V_1_64_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h new file mode 100644 index 000000000000..e15e80c45d8c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.104.18 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_104_18_H +#define RGXCONFIG_KM_22_V_104_18_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 104 +#define RGX_BNC_KM_C 18 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (7U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_104_18_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h new file mode 100644 index 000000000000..41bfaeaea39f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h @@ -0,0 +1,83 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.104.218 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_104_218_H +#define RGXCONFIG_KM_22_V_104_218_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 104 +#define RGX_BNC_KM_C 218 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (7U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (3U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_104_218_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h new file mode 100644 index 000000000000..f879c34cfbe2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h @@ -0,0 +1,83 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.208.318 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_208_318_H +#define RGXCONFIG_KM_22_V_208_318_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 208 +#define RGX_BNC_KM_C 318 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (12U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (2U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (3U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (2U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_208_318_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h new file mode 100644 index 000000000000..de8d19c50e51 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.21.16 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_21_16_H +#define RGXCONFIG_KM_22_V_21_16_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 21 +#define RGX_BNC_KM_C 16 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PHYS_BUS_WIDTH (32U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_SLCSIZE8 +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_21_16_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.22.25.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.22.25.h new file mode 100644 index 000000000000..09a45b46f50b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.22.25.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.22.25 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_22_25_H +#define RGXCONFIG_KM_22_V_22_25_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 22 +#define RGX_BNC_KM_C 25 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PHYS_BUS_WIDTH (32U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16U * 1024U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_22_25_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h new file mode 100644 index 000000000000..a2be5323c2d8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.54.25 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_54_25_H +#define RGXCONFIG_KM_22_V_54_25_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 54 +#define RGX_BNC_KM_C 25 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PHYS_BUS_WIDTH (32U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_54_25_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h new file mode 100644 index 000000000000..634c97a5d028 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.54.30 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_54_30_H +#define RGXCONFIG_KM_22_V_54_30_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 54 +#define RGX_BNC_KM_C 30 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PHYS_BUS_WIDTH (32U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_54_30_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h new file mode 100644 index 000000000000..b3d31241438b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h @@ -0,0 +1,83 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.54.330 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_54_330_H +#define RGXCONFIG_KM_22_V_54_330_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 54 +#define RGX_BNC_KM_C 330 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PHYS_BUS_WIDTH (32U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (3U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_54_330_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h new file mode 100644 index 000000000000..22c4510ff28b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 22.V.54.38 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_22_V_54_38_H +#define RGXCONFIG_KM_22_V_54_38_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 22 +#define RGX_BNC_KM_N 54 +#define RGX_BNC_KM_C 38 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_SINGLE_BIF +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_22_V_54_38_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h new file mode 100644 index 000000000000..e694a8e78712 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h @@ -0,0 +1,86 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 24.V.104.504 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_24_V_104_504_H +#define RGXCONFIG_KM_24_V_104_504_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 24 +#define RGX_BNC_KM_N 104 +#define RGX_BNC_KM_C 504 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (6U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (3U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_24_V_104_504_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h new file mode 100644 index 000000000000..f06fd3ef3f33 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h @@ -0,0 +1,86 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 24.V.208.504 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_24_V_208_504_H +#define RGXCONFIG_KM_24_V_208_504_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 24 +#define RGX_BNC_KM_N 208 +#define RGX_BNC_KM_C 504 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (12U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (2U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (3U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (2U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_24_V_208_504_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h new file mode 100644 index 000000000000..3aff5f2caae1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h @@ -0,0 +1,86 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 24.V.208.505 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_24_V_208_505_H +#define RGXCONFIG_KM_24_V_208_505_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 24 +#define RGX_BNC_KM_N 208 +#define RGX_BNC_KM_C 505 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (12U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (2U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (3U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (2U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_24_V_208_505_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h new file mode 100644 index 000000000000..eb92f96ea63f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h @@ -0,0 +1,86 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 24.V.54.204 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_24_V_54_204_H +#define RGXCONFIG_KM_24_V_54_204_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 24 +#define RGX_BNC_KM_N 54 +#define RGX_BNC_KM_C 204 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (3U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_24_V_54_204_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h new file mode 100644 index 000000000000..5a28473591ae --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h @@ -0,0 +1,89 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 29.V.108.208 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_29_V_108_208_H +#define RGXCONFIG_KM_29_V_108_208_H + +/* Automatically generated file (30/03/2020 09:01:03): Do not edit manually */ + +#define RGX_BNC_KM_B 29 +#define RGX_BNC_KM_N 108 +#define RGX_BNC_KM_C 208 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (2U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (4U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (2U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_29_V_108_208_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h new file mode 100644 index 000000000000..8511695d0e3f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h @@ -0,0 +1,89 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 29.V.52.202 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_29_V_52_202_H +#define RGXCONFIG_KM_29_V_52_202_H + +/* Automatically generated file (30/03/2020 09:01:03): Do not edit manually */ + +#define RGX_BNC_KM_B 29 +#define RGX_BNC_KM_N 52 +#define RGX_BNC_KM_C 202 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16U * 1024U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (4U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_29_V_52_202_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.54.208.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.54.208.h new file mode 100644 index 000000000000..2bc5b5b0b763 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_29.V.54.208.h @@ -0,0 +1,89 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 29.V.54.208 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_29_V_54_208_H +#define RGXCONFIG_KM_29_V_54_208_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 29 +#define RGX_BNC_KM_N 54 +#define RGX_BNC_KM_C 208 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (4U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64U * 1024U) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_29_V_54_208_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h new file mode 100644 index 000000000000..f0605ae15e55 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h @@ -0,0 +1,82 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 4.V.2.51 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_4_V_2_51_H +#define RGXCONFIG_KM_4_V_2_51_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 4 +#define RGX_BNC_KM_N 2 +#define RGX_BNC_KM_C 51 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U) +#define RGX_FEATURE_META (LTP218) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) +#define RGX_FEATURE_FBCDC_ALGORITHM (2U) +#define RGX_FEATURE_META_COREMEM_SIZE (32U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_4_V_2_51_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h new file mode 100644 index 000000000000..8070da0ea3c3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h @@ -0,0 +1,84 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 4.V.2.58 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_4_V_2_58_H +#define RGXCONFIG_KM_4_V_2_58_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 4 +#define RGX_BNC_KM_N 2 +#define RGX_BNC_KM_C 58 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U) +#define RGX_FEATURE_META (LTP218) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) +#define RGX_FEATURE_FBCDC_ALGORITHM (2U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (32U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_4_V_2_58_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h new file mode 100644 index 000000000000..b35377ed030a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 4.V.4.55 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_4_V_4_55_H +#define RGXCONFIG_KM_4_V_4_55_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 4 +#define RGX_BNC_KM_N 4 +#define RGX_BNC_KM_C 55 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (4U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U) +#define RGX_FEATURE_META (LTP218) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) +#define RGX_FEATURE_FBCDC_ALGORITHM (2U) +#define RGX_FEATURE_META_COREMEM_SIZE (32U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_4_V_4_55_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h new file mode 100644 index 000000000000..f655f5fda7a3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h @@ -0,0 +1,85 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 4.V.6.62 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_4_V_6_62_H +#define RGXCONFIG_KM_4_V_6_62_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 4 +#define RGX_BNC_KM_N 6 +#define RGX_BNC_KM_C 62 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U) +#define RGX_FEATURE_META (LTP218) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (4U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) +#define RGX_FEATURE_FBCDC_ALGORITHM (2U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (32U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_NUM_RASTER_PIPES (2U) +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_4_V_6_62_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h new file mode 100644 index 000000000000..c53f9520f33e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 5.V.1.46 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_5_V_1_46_H +#define RGXCONFIG_KM_5_V_1_46_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 5 +#define RGX_BNC_KM_N 1 +#define RGX_BNC_KM_C 46 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16U * 1024U) +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1U) +#define RGX_FEATURE_META (LTP217) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) +#define RGX_FEATURE_FBCDC_ALGORITHM (2U) +#define RGX_FEATURE_META_COREMEM_SIZE (0U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_5_V_1_46_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h new file mode 100644 index 000000000000..bf8054ea494f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h @@ -0,0 +1,82 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 6.V.4.35 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_6_V_4_35_H +#define RGXCONFIG_KM_6_V_4_35_H + +/* Automatically generated file (30/03/2020 09:01:02): Do not edit manually */ + +#define RGX_BNC_KM_B 6 +#define RGX_BNC_KM_N 4 +#define RGX_BNC_KM_C 35 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_NUM_CLUSTERS (4U) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U) +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_RAY_TRACING_DEPRECATED +#define RGX_FEATURE_TLA +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U) +#define RGX_FEATURE_META (LTP218) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) +#define RGX_FEATURE_FBCDC_ALGORITHM (2U) +#define RGX_FEATURE_META_COREMEM_SIZE (32U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_BIF_TILING +#define RGX_FEATURE_LAYOUT_MARS (0U) + + +#endif /* RGXCONFIG_6_V_4_35_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.39.4.19.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.39.4.19.h new file mode 100644 index 000000000000..98201e19583e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.39.4.19.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 1.39.4.19 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_1_39_4_19_H +#define RGXCORE_KM_1_39_4_19_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @2784771 */ + +/****************************************************************************** + * BVNC = 1.39.4.19 + *****************************************************************************/ +#define RGX_BVNC_KM_B 1 +#define RGX_BVNC_KM_V 39 +#define RGX_BVNC_KM_N 4 +#define RGX_BVNC_KM_C 19 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_38344 +#define FIX_HW_BRN_42321 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_1_39_4_19_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.75.2.30.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.75.2.30.h new file mode 100644 index 000000000000..8cf25cdbd3e5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.75.2.30.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 1.75.2.30 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_1_75_2_30_H +#define RGXCORE_KM_1_75_2_30_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @2309075 */ + +/****************************************************************************** + * BVNC = 1.75.2.30 + *****************************************************************************/ +#define RGX_BVNC_KM_B 1 +#define RGX_BVNC_KM_V 75 +#define RGX_BVNC_KM_N 2 +#define RGX_BVNC_KM_C 30 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_42321 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_1_75_2_30_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h new file mode 100644 index 000000000000..bb0600be3b52 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h @@ -0,0 +1,69 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 1.82.4.5 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_1_82_4_5_H +#define RGXCORE_KM_1_82_4_5_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @2503111 */ + +/****************************************************************************** + * BVNC = 1.82.4.5 + *****************************************************************************/ +#define RGX_BVNC_KM_B 1 +#define RGX_BVNC_KM_V 82 +#define RGX_BVNC_KM_N 4 +#define RGX_BVNC_KM_C 5 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_1_82_4_5_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_15.5.1.64.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_15.5.1.64.h new file mode 100644 index 000000000000..da4094c2f514 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_15.5.1.64.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 15.5.1.64 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_15_5_1_64_H +#define RGXCORE_KM_15_5_1_64_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @3846532 */ + +/****************************************************************************** + * BVNC = 15.5.1.64 + *****************************************************************************/ +#define RGX_BVNC_KM_B 15 +#define RGX_BVNC_KM_V 5 +#define RGX_BVNC_KM_N 1 +#define RGX_BVNC_KM_C 64 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_15_5_1_64_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.102.54.38.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.102.54.38.h new file mode 100644 index 000000000000..981040ee62de --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.102.54.38.h @@ -0,0 +1,75 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.102.54.38 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_102_54_38_H +#define RGXCORE_KM_22_102_54_38_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4831550 */ + +/****************************************************************************** + * BVNC = 22.102.54.38 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 102 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 38 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_61389 + + + +#endif /* RGXCORE_KM_22_102_54_38_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.104.208.318.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.104.208.318.h new file mode 100644 index 000000000000..ebcef57e8a79 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.104.208.318.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.104.208.318 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_104_208_318_H +#define RGXCORE_KM_22_104_208_318_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5124208 */ + +/****************************************************************************** + * BVNC = 22.104.208.318 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 104 +#define RGX_BVNC_KM_N 208 +#define RGX_BVNC_KM_C 318 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65101 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_61389 + + + +#endif /* RGXCORE_KM_22_104_208_318_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.30.54.25.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.30.54.25.h new file mode 100644 index 000000000000..2522dbb0261d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.30.54.25.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.30.54.25 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_30_54_25_H +#define RGXCORE_KM_22_30_54_25_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4086500 */ + +/****************************************************************************** + * BVNC = 22.30.54.25 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 30 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 25 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_60084 +#define FIX_HW_BRN_61450 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65273 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 + + + +#endif /* RGXCORE_KM_22_30_54_25_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.40.54.30.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.40.54.30.h new file mode 100644 index 000000000000..7a4b290ca353 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.40.54.30.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.40.54.30 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_40_54_30_H +#define RGXCORE_KM_22_40_54_30_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4094817 */ + +/****************************************************************************** + * BVNC = 22.40.54.30 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 40 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 30 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_60084 +#define FIX_HW_BRN_61450 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65273 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 + + + +#endif /* RGXCORE_KM_22_40_54_30_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.44.22.25.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.44.22.25.h new file mode 100644 index 000000000000..3cd8be6c66d2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.44.22.25.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.44.22.25 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_44_22_25_H +#define RGXCORE_KM_22_44_22_25_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4137146 */ + +/****************************************************************************** + * BVNC = 22.44.22.25 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 44 +#define RGX_BVNC_KM_N 22 +#define RGX_BVNC_KM_C 25 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_60084 +#define FIX_HW_BRN_61450 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65273 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 + + + +#endif /* RGXCORE_KM_22_44_22_25_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.46.54.330.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.46.54.330.h new file mode 100644 index 000000000000..896bc1429d2f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.46.54.330.h @@ -0,0 +1,78 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.46.54.330 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_46_54_330_H +#define RGXCORE_KM_22_46_54_330_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4136505 */ + +/****************************************************************************** + * BVNC = 22.46.54.330 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 46 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 330 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_60084 +#define FIX_HW_BRN_61450 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65101 +#define FIX_HW_BRN_65273 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_22_46_54_330_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.49.21.16.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.49.21.16.h new file mode 100644 index 000000000000..df14515d8160 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.49.21.16.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.49.21.16 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_49_21_16_H +#define RGXCORE_KM_22_49_21_16_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4158766 */ + +/****************************************************************************** + * BVNC = 22.49.21.16 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 49 +#define RGX_BVNC_KM_N 21 +#define RGX_BVNC_KM_C 16 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_60084 +#define FIX_HW_BRN_61450 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65273 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 + + + +#endif /* RGXCORE_KM_22_49_21_16_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.67.54.30.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.67.54.30.h new file mode 100644 index 000000000000..f7a7ac2a7b85 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.67.54.30.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.67.54.30 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_67_54_30_H +#define RGXCORE_KM_22_67_54_30_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4339986 */ + +/****************************************************************************** + * BVNC = 22.67.54.30 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 67 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 30 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_60084 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65273 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_61389 + + + +#endif /* RGXCORE_KM_22_67_54_30_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.68.54.30.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.68.54.30.h new file mode 100644 index 000000000000..eddfe4469f84 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.68.54.30.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.68.54.30 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_68_54_30_H +#define RGXCORE_KM_22_68_54_30_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4339984 */ + +/****************************************************************************** + * BVNC = 22.68.54.30 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 68 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 30 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65273 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_61389 + + + +#endif /* RGXCORE_KM_22_68_54_30_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.86.104.218.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.86.104.218.h new file mode 100644 index 000000000000..b713678d8fe8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.86.104.218.h @@ -0,0 +1,75 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.86.104.218 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_86_104_218_H +#define RGXCORE_KM_22_86_104_218_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4665024 */ + +/****************************************************************************** + * BVNC = 22.86.104.218 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 86 +#define RGX_BVNC_KM_N 104 +#define RGX_BVNC_KM_C 218 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_65101 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_61389 + + + +#endif /* RGXCORE_KM_22_86_104_218_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.87.104.18.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.87.104.18.h new file mode 100644 index 000000000000..0f61e24082cf --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_22.87.104.18.h @@ -0,0 +1,75 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 22.87.104.18 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_22_87_104_18_H +#define RGXCORE_KM_22_87_104_18_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4658768 */ + +/****************************************************************************** + * BVNC = 22.87.104.18 + *****************************************************************************/ +#define RGX_BVNC_KM_B 22 +#define RGX_BVNC_KM_V 87 +#define RGX_BVNC_KM_N 104 +#define RGX_BVNC_KM_C 18 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_61389 + + + +#endif /* RGXCORE_KM_22_87_104_18_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.50.208.504.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.50.208.504.h new file mode 100644 index 000000000000..8eb8d7136a0b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.50.208.504.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 24.50.208.504 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_24_50_208_504_H +#define RGXCORE_KM_24_50_208_504_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5086680 */ + +/****************************************************************************** + * BVNC = 24.50.208.504 + *****************************************************************************/ +#define RGX_BVNC_KM_B 24 +#define RGX_BVNC_KM_V 50 +#define RGX_BVNC_KM_N 208 +#define RGX_BVNC_KM_C 504 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_66622 + + + +#endif /* RGXCORE_KM_24_50_208_504_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.56.208.505.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.56.208.505.h new file mode 100644 index 000000000000..55f63fe817cb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.56.208.505.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 24.56.208.505 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_24_56_208_505_H +#define RGXCORE_KM_24_56_208_505_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5203837 */ + +/****************************************************************************** + * BVNC = 24.56.208.505 + *****************************************************************************/ +#define RGX_BVNC_KM_B 24 +#define RGX_BVNC_KM_V 56 +#define RGX_BVNC_KM_N 208 +#define RGX_BVNC_KM_C 505 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_66622 + + + +#endif /* RGXCORE_KM_24_56_208_505_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.66.54.204.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.66.54.204.h new file mode 100644 index 000000000000..f8240012fb59 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.66.54.204.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 24.66.54.204 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_24_66_54_204_H +#define RGXCORE_KM_24_66_54_204_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5200207 */ + +/****************************************************************************** + * BVNC = 24.66.54.204 + *****************************************************************************/ +#define RGX_BVNC_KM_B 24 +#define RGX_BVNC_KM_V 66 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 204 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_66622 + + + +#endif /* RGXCORE_KM_24_66_54_204_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.67.104.504.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.67.104.504.h new file mode 100644 index 000000000000..bef674ea42de --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_24.67.104.504.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 24.67.104.504 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_24_67_104_504_H +#define RGXCORE_KM_24_67_104_504_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5203838 */ + +/****************************************************************************** + * BVNC = 24.67.104.504 + *****************************************************************************/ +#define RGX_BVNC_KM_B 24 +#define RGX_BVNC_KM_V 67 +#define RGX_BVNC_KM_N 104 +#define RGX_BVNC_KM_C 504 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_57596 +#define HW_ERN_66622 + + + +#endif /* RGXCORE_KM_24_67_104_504_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.13.54.208.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.13.54.208.h new file mode 100644 index 000000000000..175d4d88f650 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.13.54.208.h @@ -0,0 +1,78 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 29.13.54.208 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_29_13_54_208_H +#define RGXCORE_KM_29_13_54_208_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5336349 */ + +/****************************************************************************** + * BVNC = 29.13.54.208 + *****************************************************************************/ +#define RGX_BVNC_KM_B 29 +#define RGX_BVNC_KM_V 13 +#define RGX_BVNC_KM_N 54 +#define RGX_BVNC_KM_C 208 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_68186 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_47025 +#define HW_ERN_57596 +#define HW_ERN_66622 + + + +#endif /* RGXCORE_KM_29_13_54_208_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.14.108.208.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.14.108.208.h new file mode 100644 index 000000000000..d48f10934dde --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.14.108.208.h @@ -0,0 +1,78 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 29.14.108.208 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_29_14_108_208_H +#define RGXCORE_KM_29_14_108_208_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5361799 */ + +/****************************************************************************** + * BVNC = 29.14.108.208 + *****************************************************************************/ +#define RGX_BVNC_KM_B 29 +#define RGX_BVNC_KM_V 14 +#define RGX_BVNC_KM_N 108 +#define RGX_BVNC_KM_C 208 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_68186 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_47025 +#define HW_ERN_57596 +#define HW_ERN_66622 + + + +#endif /* RGXCORE_KM_29_14_108_208_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.19.52.202.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.19.52.202.h new file mode 100644 index 000000000000..c7df2511c470 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_29.19.52.202.h @@ -0,0 +1,78 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 29.19.52.202 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_29_19_52_202_H +#define RGXCORE_KM_29_19_52_202_H + +/* Automatically generated file (11/02/2020 09:01:23): Do not edit manually */ +/* CS: @5372505 */ + +/****************************************************************************** + * BVNC = 29.19.52.202 + *****************************************************************************/ +#define RGX_BVNC_KM_B 29 +#define RGX_BVNC_KM_V 19 +#define RGX_BVNC_KM_N 52 +#define RGX_BVNC_KM_C 202 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_68186 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_47025 +#define HW_ERN_57596 +#define HW_ERN_66622 + + + +#endif /* RGXCORE_KM_29_19_52_202_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.29.2.51.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.29.2.51.h new file mode 100644 index 000000000000..b35a656fbfe9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.29.2.51.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 4.29.2.51 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_4_29_2_51_H +#define RGXCORE_KM_4_29_2_51_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @2944502 */ + +/****************************************************************************** + * BVNC = 4.29.2.51 + *****************************************************************************/ +#define RGX_BVNC_KM_B 4 +#define RGX_BVNC_KM_V 29 +#define RGX_BVNC_KM_N 2 +#define RGX_BVNC_KM_C 51 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_50767 +#define FIX_HW_BRN_63142 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_4_29_2_51_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.31.4.55.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.31.4.55.h new file mode 100644 index 000000000000..9bbe54561616 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.31.4.55.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 4.31.4.55 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_4_31_4_55_H +#define RGXCORE_KM_4_31_4_55_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @2919104 */ + +/****************************************************************************** + * BVNC = 4.31.4.55 + *****************************************************************************/ +#define RGX_BVNC_KM_B 4 +#define RGX_BVNC_KM_V 31 +#define RGX_BVNC_KM_N 4 +#define RGX_BVNC_KM_C 55 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_50767 +#define FIX_HW_BRN_63142 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_4_31_4_55_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.40.2.51.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.40.2.51.h new file mode 100644 index 000000000000..cf66295d4bf2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.40.2.51.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 4.40.2.51 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_4_40_2_51_H +#define RGXCORE_KM_4_40_2_51_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @3254374 */ + +/****************************************************************************** + * BVNC = 4.40.2.51 + *****************************************************************************/ +#define RGX_BVNC_KM_B 4 +#define RGX_BVNC_KM_V 40 +#define RGX_BVNC_KM_N 2 +#define RGX_BVNC_KM_C 51 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_50767 +#define FIX_HW_BRN_63142 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_4_40_2_51_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.43.6.62.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.43.6.62.h new file mode 100644 index 000000000000..c3f416c5a0b8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.43.6.62.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 4.43.6.62 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_4_43_6_62_H +#define RGXCORE_KM_4_43_6_62_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @3253129 */ + +/****************************************************************************** + * BVNC = 4.43.6.62 + *****************************************************************************/ +#define RGX_BVNC_KM_B 4 +#define RGX_BVNC_KM_V 43 +#define RGX_BVNC_KM_N 6 +#define RGX_BVNC_KM_C 62 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_50767 +#define FIX_HW_BRN_63142 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_4_43_6_62_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.45.2.58.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.45.2.58.h new file mode 100644 index 000000000000..bad637e6a1c1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.45.2.58.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 4.45.2.58 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_4_45_2_58_H +#define RGXCORE_KM_4_45_2_58_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @3547765 */ + +/****************************************************************************** + * BVNC = 4.45.2.58 + *****************************************************************************/ +#define RGX_BVNC_KM_B 4 +#define RGX_BVNC_KM_V 45 +#define RGX_BVNC_KM_N 2 +#define RGX_BVNC_KM_C 58 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_63142 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_4_45_2_58_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.46.6.62.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.46.6.62.h new file mode 100644 index 000000000000..c9c83e3910eb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_4.46.6.62.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 4.46.6.62 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_4_46_6_62_H +#define RGXCORE_KM_4_46_6_62_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @4015666 */ + +/****************************************************************************** + * BVNC = 4.46.6.62 + *****************************************************************************/ +#define RGX_BVNC_KM_B 4 +#define RGX_BVNC_KM_V 46 +#define RGX_BVNC_KM_N 6 +#define RGX_BVNC_KM_C 62 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_50767 +#define FIX_HW_BRN_63142 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_4_46_6_62_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.11.1.46.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.11.1.46.h new file mode 100644 index 000000000000..1ce42d496c8c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.11.1.46.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 5.11.1.46 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_5_11_1_46_H +#define RGXCORE_KM_5_11_1_46_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @3485232 */ + +/****************************************************************************** + * BVNC = 5.11.1.46 + *****************************************************************************/ +#define RGX_BVNC_KM_B 5 +#define RGX_BVNC_KM_V 11 +#define RGX_BVNC_KM_N 1 +#define RGX_BVNC_KM_C 46 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_42321 +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_5_11_1_46_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.9.1.46.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.9.1.46.h new file mode 100644 index 000000000000..5d2934fe7209 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_5.9.1.46.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 5.9.1.46 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_5_9_1_46_H +#define RGXCORE_KM_5_9_1_46_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @2967148 */ + +/****************************************************************************** + * BVNC = 5.9.1.46 + *****************************************************************************/ +#define RGX_BVNC_KM_B 5 +#define RGX_BVNC_KM_V 9 +#define RGX_BVNC_KM_N 1 +#define RGX_BVNC_KM_C 46 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_38344 +#define FIX_HW_BRN_43276 +#define FIX_HW_BRN_44871 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_5_9_1_46_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_6.34.4.35.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_6.34.4.35.h new file mode 100644 index 000000000000..f609fad3f620 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/cores/rgxcore_km_6.34.4.35.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 6.34.4.35 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_6_34_4_35_H +#define RGXCORE_KM_6_34_4_35_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @3533654 */ + +/****************************************************************************** + * BVNC = 6.34.4.35 + *****************************************************************************/ +#define RGX_BVNC_KM_B 6 +#define RGX_BVNC_KM_V 34 +#define RGX_BVNC_KM_N 4 +#define RGX_BVNC_KM_C 35 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_63142 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 + + + +#endif /* RGXCORE_KM_6_34_4_35_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_defs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_defs_km.h new file mode 100644 index 000000000000..989a146fa2bc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_defs_km.h @@ -0,0 +1,319 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_bvnc_defs_km.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/****************************************************************************** + * Auto generated file by rgxbvnc_tablegen.py * + * This file should not be edited manually * + *****************************************************************************/ + +#ifndef RGX_BVNC_DEFS_KM_H +#define RGX_BVNC_DEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + +#if defined(RGX_BVNC_DEFS_UM_H) +#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h" +#endif + +#define BVNC_FIELD_WIDTH (16U) + + +/****************************************************************************** + * Mask and bit-position macros for features without values + *****************************************************************************/ + +#define RGX_FEATURE_AXI_ACELITE_POS (0U) +#define RGX_FEATURE_AXI_ACELITE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) + +#define RGX_FEATURE_BIF_TILING_POS (1U) +#define RGX_FEATURE_BIF_TILING_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) + +#define RGX_FEATURE_CLUSTER_GROUPING_POS (2U) +#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) + +#define RGX_FEATURE_COMPUTE_POS (3U) +#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) + +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (4U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) + +#define RGX_FEATURE_COMPUTE_ONLY_POS (5U) +#define RGX_FEATURE_COMPUTE_ONLY_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) + +#define RGX_FEATURE_COMPUTE_OVERLAP_POS (6U) +#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) + +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (7U) +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) + +#define RGX_FEATURE_COREID_PER_OS_POS (8U) +#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) + +#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (9U) +#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) + +#define RGX_FEATURE_DYNAMIC_DUST_POWER_POS (10U) +#define RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) + +#define RGX_FEATURE_FASTRENDER_DM_POS (11U) +#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) + +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (12U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) + +#define RGX_FEATURE_GPU_VIRTUALISATION_POS (13U) +#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) + +#define RGX_FEATURE_GS_RTA_SUPPORT_POS (14U) +#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) + +#define RGX_FEATURE_IRQ_PER_OS_POS (15U) +#define RGX_FEATURE_IRQ_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) + +#define RGX_FEATURE_META_DMA_POS (16U) +#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) + +#define RGX_FEATURE_MIPS_POS (17U) +#define RGX_FEATURE_MIPS_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) + +#define RGX_FEATURE_PBE2_IN_XE_POS (18U) +#define RGX_FEATURE_PBE2_IN_XE_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) + +#define RGX_FEATURE_PBVNC_COREID_REG_POS (19U) +#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) + +#define RGX_FEATURE_PDS_PER_DUST_POS (20U) +#define RGX_FEATURE_PDS_PER_DUST_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) + +#define RGX_FEATURE_PDS_TEMPSIZE8_POS (21U) +#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) + +#define RGX_FEATURE_PERFBUS_POS (22U) +#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) + +#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (23U) +#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) + +#define RGX_FEATURE_RAY_TRACING_DEPRECATED_POS (24U) +#define RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) + +#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (25U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) + +#define RGX_FEATURE_ROGUEXE_POS (26U) +#define RGX_FEATURE_ROGUEXE_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) + +#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (27U) +#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) + +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (28U) +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) + +#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (29U) +#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) + +#define RGX_FEATURE_SIGNAL_SNOOPING_POS (30U) +#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) + +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS (31U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) + +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS (32U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) + +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS (33U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) + +#define RGX_FEATURE_SINGLE_BIF_POS (34U) +#define RGX_FEATURE_SINGLE_BIF_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) + +#define RGX_FEATURE_SLCSIZE8_POS (35U) +#define RGX_FEATURE_SLCSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) + +#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS (36U) +#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) + +#define RGX_FEATURE_SLC_VIVT_POS (37U) +#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) + +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (38U) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) + +#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (39U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) + +#define RGX_FEATURE_TESSELLATION_POS (40U) +#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) + +#define RGX_FEATURE_TLA_POS (41U) +#define RGX_FEATURE_TLA_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) + +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (42U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) + +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (43U) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000080000000000)) + +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS (44U) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK (IMG_UINT64_C(0x0000100000000000)) + +#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (45U) +#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0000200000000000)) + +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (46U) +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0000400000000000)) + +#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (47U) +#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000800000000000)) + +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (48U) +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0001000000000000)) + + +/****************************************************************************** + * Features with values indexes + *****************************************************************************/ + +typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { + RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX, + RGX_FEATURE_FBCDC_ALGORITHM_IDX, + RGX_FEATURE_FBCDC_ARCHITECTURE_IDX, + RGX_FEATURE_LAYOUT_MARS_IDX, + RGX_FEATURE_META_IDX, + RGX_FEATURE_META_COREMEM_BANKS_IDX, + RGX_FEATURE_META_COREMEM_SIZE_IDX, + RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX, + RGX_FEATURE_NUM_CLUSTERS_IDX, + RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX, + RGX_FEATURE_NUM_OSIDS_IDX, + RGX_FEATURE_NUM_RASTER_PIPES_IDX, + RGX_FEATURE_PHYS_BUS_WIDTH_IDX, + RGX_FEATURE_SCALABLE_TE_ARCH_IDX, + RGX_FEATURE_SCALABLE_VCE_IDX, + RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_IDX, + RGX_FEATURE_SLC_BANKS_IDX, + RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX, + RGX_FEATURE_SLC_SIZE_IN_BYTES_IDX, + RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX, + RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX, + RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_IDX, + RGX_FEATURE_XPU_MAX_SLAVES_IDX, + RGX_FEATURE_XPU_REGISTER_BROADCAST_IDX, + RGX_FEATURE_WITH_VALUES_MAX_IDX, +} RGX_FEATURE_WITH_VALUE_INDEX; + + +/****************************************************************************** + * Mask and bit-position macros for ERNs and BRNs + *****************************************************************************/ + +#define FIX_HW_BRN_36492_POS (0U) +#define FIX_HW_BRN_36492_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) + +#define FIX_HW_BRN_38344_POS (1U) +#define FIX_HW_BRN_38344_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) + +#define HW_ERN_42290_POS (2U) +#define HW_ERN_42290_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) + +#define FIX_HW_BRN_42321_POS (3U) +#define FIX_HW_BRN_42321_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) + +#define HW_ERN_42606_POS (4U) +#define HW_ERN_42606_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) + +#define FIX_HW_BRN_43276_POS (5U) +#define FIX_HW_BRN_43276_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) + +#define FIX_HW_BRN_44871_POS (6U) +#define FIX_HW_BRN_44871_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) + +#define HW_ERN_46066_POS (7U) +#define HW_ERN_46066_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) + +#define HW_ERN_47025_POS (8U) +#define HW_ERN_47025_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) + +#define HW_ERN_50539_POS (9U) +#define HW_ERN_50539_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) + +#define FIX_HW_BRN_50767_POS (10U) +#define FIX_HW_BRN_50767_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) + +#define HW_ERN_57596_POS (11U) +#define HW_ERN_57596_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) + +#define FIX_HW_BRN_60084_POS (12U) +#define FIX_HW_BRN_60084_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) + +#define HW_ERN_61389_POS (13U) +#define HW_ERN_61389_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) + +#define FIX_HW_BRN_61450_POS (14U) +#define FIX_HW_BRN_61450_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) + +#define FIX_HW_BRN_63142_POS (15U) +#define FIX_HW_BRN_63142_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) + +#define FIX_HW_BRN_63553_POS (16U) +#define FIX_HW_BRN_63553_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) + +#define FIX_HW_BRN_65273_POS (17U) +#define FIX_HW_BRN_65273_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) + +#define HW_ERN_66622_POS (18U) +#define HW_ERN_66622_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) + +#define FIX_HW_BRN_68186_POS (19U) +#define FIX_HW_BRN_68186_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) + +/* Added manually */ +#define FIX_HW_BRN_65101_POS (20U) +#define FIX_HW_BRN_65101_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) + +/* Macro used for padding the unavailable values for features with values */ +#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU) + +/* Macro used for marking a feature with value as disabled for a specific bvnc */ +#define RGX_FEATURE_VALUE_DISABLED (0xFFFFFFFFU) + +#endif /* RGX_BVNC_DEFS_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_table_km.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_table_km.h new file mode 100644 index 000000000000..bc11326d97b9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_bvnc_table_km.h @@ -0,0 +1,439 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_bvnc_table_km.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/****************************************************************************** + * Auto generated file by rgxbvnc_tablegen.py * + * This file should not be edited manually * + *****************************************************************************/ + +#ifndef RGX_BVNC_TABLE_KM_H +#define RGX_BVNC_TABLE_KM_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgxdefs_km.h" + +#ifndef _RGXBVNC_C_ +#error "This file should only be included from rgxbvnc.c" +#endif + +#if defined(RGX_BVNC_TABLE_UM_H) +#error "This file should not be included in conjunction with rgx_bvnc_table_um.h" +#endif + + +/****************************************************************************** + * Defines and arrays for each feature with values used + * for handling the corresponding values + *****************************************************************************/ + +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, }; + +#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (6) +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 50, }; + +#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 7, }; + +#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; + +#define RGX_FEATURE_META_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, MTP219, }; + +#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, }; + +#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, 256, }; + +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, }; + +#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, }; + +#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (9) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, }; + +#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, }; + +#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, 2, }; + +#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, }; + +#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; + +#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; + +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; + +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, }; + +#define RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_BYTES_values[RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 16, 64, 128, }; + +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, }; + +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; + +#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values[RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 19, }; + +#define RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_SLAVES_values[RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 3, }; + +#define RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values[RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; + + +/****************************************************************************** + * Table contains pointers to each feature value array for features that have + * values. + * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h + *****************************************************************************/ + +static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { + aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values, + aui16_RGX_FEATURE_FBCDC_ALGORITHM_values, + aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values, + aui16_RGX_FEATURE_LAYOUT_MARS_values, + aui16_RGX_FEATURE_META_values, + aui16_RGX_FEATURE_META_COREMEM_BANKS_values, + aui16_RGX_FEATURE_META_COREMEM_SIZE_values, + aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values, + aui16_RGX_FEATURE_NUM_CLUSTERS_values, + aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values, + aui16_RGX_FEATURE_NUM_OSIDS_values, + aui16_RGX_FEATURE_NUM_RASTER_PIPES_values, + aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values, + aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values, + aui16_RGX_FEATURE_SCALABLE_VCE_values, + aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values, + aui16_RGX_FEATURE_SLC_BANKS_values, + aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values, + aui16_RGX_FEATURE_SLC_SIZE_IN_BYTES_values, + aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values, + aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values, + aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values, + aui16_RGX_FEATURE_XPU_MAX_SLAVES_values, + aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values, +}; + + +/****************************************************************************** + * Array containing the lengths of the arrays containing the values. + * Used for indexing the aui16__values defined upwards + *****************************************************************************/ + + +static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { + RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX, + RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX, + RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX, + RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX, + RGX_FEATURE_META_MAX_VALUE_IDX, + RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX, + RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX, + RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX, + RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX, + RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX, + RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX, + RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX, + RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX, + RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX, + RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX, + RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX, + RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX, + RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX, + RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX, + RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX, + RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX, + RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX, +}; + + +/****************************************************************************** + * Bit-positions for features with values + *****************************************************************************/ + +static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { + (0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ + (2U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ + (5U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ + (8U), /* RGX_FEATURE_LAYOUT_MARS_POS */ + (10U), /* RGX_FEATURE_META_POS */ + (13U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */ + (15U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */ + (18U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ + (20U), /* RGX_FEATURE_NUM_CLUSTERS_POS */ + (23U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ + (27U), /* RGX_FEATURE_NUM_OSIDS_POS */ + (29U), /* RGX_FEATURE_NUM_RASTER_PIPES_POS */ + (32U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ + (35U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ + (37U), /* RGX_FEATURE_SCALABLE_VCE_POS */ + (39U), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */ + (41U), /* RGX_FEATURE_SLC_BANKS_POS */ + (44U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ + (46U), /* RGX_FEATURE_SLC_SIZE_IN_BYTES_POS */ + (49U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ + (51U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ + (53U), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */ + (55U), /* RGX_FEATURE_XPU_MAX_SLAVES_POS */ + (57U), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */ +}; + + +/****************************************************************************** + * Bit-masks for features with values + *****************************************************************************/ + +static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { + (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */ + (IMG_UINT64_C(0x000000000000001C)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ + (IMG_UINT64_C(0x00000000000000E0)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000300)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000001C00)), /* RGX_FEATURE_META_BIT_MASK */ + (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000038000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ + (IMG_UINT64_C(0x00000000000C0000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ + (IMG_UINT64_C(0x0000000000700000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ + (IMG_UINT64_C(0x0000000007800000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ + (IMG_UINT64_C(0x0000000018000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ + (IMG_UINT64_C(0x00000000E0000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */ + (IMG_UINT64_C(0x0000000700000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ + (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ + (IMG_UINT64_C(0x0000006000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ + (IMG_UINT64_C(0x0000018000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x00000E0000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ + (IMG_UINT64_C(0x0000300000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ + (IMG_UINT64_C(0x0001C00000000000)), /* RGX_FEATURE_SLC_SIZE_IN_BYTES_BIT_MASK */ + (IMG_UINT64_C(0x0006000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ + (IMG_UINT64_C(0x0018000000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ + (IMG_UINT64_C(0x0060000000000000)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */ + (IMG_UINT64_C(0x0180000000000000)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */ + (IMG_UINT64_C(0x0600000000000000)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */ +}; + + +/****************************************************************************** + * Table mapping bitmasks for features and features with values + *****************************************************************************/ + + +static const IMG_UINT64 gaFeatures[][3]= +{ + { IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x000002000040404b), IMG_UINT64_C(0x0009100341a08d24) }, /* 1.0.2.30 */ + { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x000002000040404a), IMG_UINT64_C(0x0009100341b08d24) }, /* 1.0.4.5 */ + { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x000002000040404b), IMG_UINT64_C(0x0009100341b08d24) }, /* 1.0.4.19 */ + { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0001160000c0445f), IMG_UINT64_C(0x0009100343a10948) }, /* 4.0.2.51 */ + { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0001160000c0645f), IMG_UINT64_C(0x0009100353a10948) }, /* 4.0.2.58 */ + { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0001160000c0445e), IMG_UINT64_C(0x0009100343b10948) }, /* 4.0.4.55 */ + { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0001160000c0645f), IMG_UINT64_C(0x0009160373c10948) }, /* 4.0.6.62 */ + { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x000000000440440b), IMG_UINT64_C(0x0008920340908528) }, /* 5.0.1.46 */ + { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0001160001c0445f), IMG_UINT64_C(0x0009100303b10948) }, /* 6.0.4.35 */ + { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x000000000440640b), IMG_UINT64_C(0x0008d20351108528) }, /* 15.0.1.64 */ + { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x0000004d844e600b), IMG_UINT64_C(0x0008128151100100) }, /* 22.0.21.16 */ + { IMG_UINT64_C(0x0016000000160019), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008928151100100) }, /* 22.0.22.25 */ + { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008d28151900100) }, /* 22.0.54.25 */ + { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008d28152100100) }, /* 22.0.54.30 */ + { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008d28252100100) }, /* 22.0.54.38 */ + { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008d2815210012c) }, /* 22.0.54.330 */ + { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008d28253100100) }, /* 22.0.104.18 */ + { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008d2825310012c) }, /* 22.0.104.218 */ + { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x00000045844e600b), IMG_UINT64_C(0x0008d4827420012c) }, /* 22.0.208.318 */ + { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x000080c2844ee80b), IMG_UINT64_C(0x0008d3025210012c) }, /* 24.0.54.204 */ + { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x000080c2844e680b), IMG_UINT64_C(0x0008d3025210012c) }, /* 24.0.54.204 */ + { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x000080c2844ee80b), IMG_UINT64_C(0x0008d3025290012c) }, /* 24.0.104.504 */ + { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x000080c2844e680b), IMG_UINT64_C(0x0008d3025290012c) }, /* 24.0.104.504 */ + { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x000080c2844ee80b), IMG_UINT64_C(0x000915027420012c) }, /* 24.0.208.504 */ + { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x000080c2844e680b), IMG_UINT64_C(0x000915027420012c) }, /* 24.0.208.504 */ + { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x000080c2844ee80b), IMG_UINT64_C(0x000915027420012c) }, /* 24.0.208.505 */ + { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x000080c2844e680b), IMG_UINT64_C(0x000915027420012c) }, /* 24.0.208.505 */ + { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x00008cc2844ee90b), IMG_UINT64_C(0x0008930251100130) }, /* 29.0.52.202 */ + { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x00008cc2844e690b), IMG_UINT64_C(0x0008930251100130) }, /* 29.0.52.202 */ + { IMG_UINT64_C(0x001d0000003600d0), IMG_UINT64_C(0x00008cc2844ee90b), IMG_UINT64_C(0x0008d30252100130) }, /* 29.0.54.208 */ + { IMG_UINT64_C(0x001d0000003600d0), IMG_UINT64_C(0x00008cc2844e690b), IMG_UINT64_C(0x0008d30252100130) }, /* 29.0.54.208 */ + { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x00008cc2844ee90b), IMG_UINT64_C(0x0009150273a00130) }, /* 29.0.108.208 */ + { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x00008cc2844e690b), IMG_UINT64_C(0x0009150273a00130) }, /* 29.0.108.208 */ +}; + +/****************************************************************************** + * Table mapping bitmasks for ERNs/BRNs + *****************************************************************************/ + + +static const IMG_UINT64 gaErnsBrns[][2]= +{ + { IMG_UINT64_C(0x0001002700040013), IMG_UINT64_C(0x000000000000000a) }, /* 1.39.4.19 */ + { IMG_UINT64_C(0x0001004b0002001e), IMG_UINT64_C(0x0000000000000008) }, /* 1.75.2.30 */ + { IMG_UINT64_C(0x0001005200040005), IMG_UINT64_C(0x0000000000000000) }, /* 1.82.4.5 */ + { IMG_UINT64_C(0x0004001d00020033), IMG_UINT64_C(0x0000000000008414) }, /* 4.29.2.51 */ + { IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x0000000000008414) }, /* 4.31.4.55 */ + { IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x0000000000008414) }, /* 4.40.2.51 */ + { IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x0000000000008414) }, /* 4.43.6.62 */ + { IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x0000000000008014) }, /* 4.45.2.58 */ + { IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x0000000000008414) }, /* 4.46.6.62 */ + { IMG_UINT64_C(0x000500090001002e), IMG_UINT64_C(0x0000000000000062) }, /* 5.9.1.46 */ + { IMG_UINT64_C(0x0005000b0001002e), IMG_UINT64_C(0x0000000000000078) }, /* 5.11.1.46 */ + { IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x0000000000008014) }, /* 6.34.4.35 */ + { IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000000070) }, /* 15.5.1.64 */ + { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000035870) }, /* 22.30.54.25 */ + { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000035870) }, /* 22.40.54.30 */ + { IMG_UINT64_C(0x0016002c00160019), IMG_UINT64_C(0x0000000000035870) }, /* 22.44.22.25 */ + { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x0000000000135074) }, /* 22.46.54.330 */ + { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000035870) }, /* 22.49.21.16 */ + { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x0000000000033870) }, /* 22.67.54.30 */ + { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000032870) }, /* 22.68.54.30 */ + { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x0000000000112070) }, /* 22.86.104.218 */ + { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000012870) }, /* 22.87.104.18 */ + { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000012870) }, /* 22.102.54.38 */ + { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x0000000000112074) }, /* 22.104.208.318 */ + { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x0000000000050874) }, /* 24.50.208.504 */ + { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x0000000000050874) }, /* 24.56.208.505 */ + { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x0000000000050874) }, /* 24.66.54.204 */ + { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x0000000000050874) }, /* 24.67.104.504 */ + { IMG_UINT64_C(0x001d000d003600d0), IMG_UINT64_C(0x00000000000d0974) }, /* 29.13.54.208 */ + { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x00000000000d0974) }, /* 29.14.108.208 */ + { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x00000000000d0974) }, /* 29.19.52.202 */ +}; + +#if defined(DEBUG) + +#define FEATURE_NO_VALUES_NAMES_MAX_IDX (49) + +static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] = +{ + "AXI_ACELITE", + "BIF_TILING", + "CLUSTER_GROUPING", + "COMPUTE", + "COMPUTE_MORTON_CAPABLE", + "COMPUTE_ONLY", + "COMPUTE_OVERLAP", + "COMPUTE_OVERLAP_WITH_BARRIERS", + "COREID_PER_OS", + "DUST_POWER_ISLAND_S7", + "DYNAMIC_DUST_POWER", + "FASTRENDER_DM", + "GPU_MULTICORE_SUPPORT", + "GPU_VIRTUALISATION", + "GS_RTA_SUPPORT", + "IRQ_PER_OS", + "META_DMA", + "MIPS", + "PBE2_IN_XE", + "PBVNC_COREID_REG", + "PDS_PER_DUST", + "PDS_TEMPSIZE8", + "PERFBUS", + "PERF_COUNTER_BATCH", + "RAY_TRACING_DEPRECATED", + "RISCV_FW_PROCESSOR", + "ROGUEXE", + "S7_CACHE_HIERARCHY", + "S7_TOP_INFRASTRUCTURE", + "SCALABLE_VDM_GPP", + "SIGNAL_SNOOPING", + "SIMPLE_INTERNAL_PARAMETER_FORMAT", + "SIMPLE_INTERNAL_PARAMETER_FORMAT_V1", + "SIMPLE_INTERNAL_PARAMETER_FORMAT_V2", + "SINGLE_BIF", + "SLCSIZE8", + "SLC_HYBRID_CACHELINE_64_128", + "SLC_VIVT", + "SYS_BUS_SECURE_RESET", + "TDM_PDS_CHECKSUM", + "TESSELLATION", + "TLA", + "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS", + "TPU_DM_GLOBAL_REGISTERS", + "TPU_FILTERING_MODE_CONTROL", + "VDM_DRAWINDIRECT", + "VDM_OBJECT_LEVEL_LLS", + "XE_MEMORY_HIERARCHY", + "XT_TOP_INFRASTRUCTURE", +}; + +#define ERNSBRNS_IDS_MAX_IDX (20) + +static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = +{ + 36492, + 38344, + 42290, + 42321, + 42606, + 43276, + 44871, + 46066, + 47025, + 50539, + 50767, + 57596, + 60084, + 61389, + 61450, + 63142, + 63553, + 65273, + 66622, + 68186, +}; + +#endif /* defined(DEBUG) */ +#endif /* RGX_BVNC_TABLE_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_cr_defs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_cr_defs_km.h new file mode 100644 index 000000000000..5787c9354d1f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgx_cr_defs_km.h @@ -0,0 +1,6515 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_cr_defs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + */ + + +#ifndef RGX_CR_DEFS_KM_H +#define RGX_CR_DEFS_KM_H + +#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS) +#error This file may only be included if explicitly defined +#endif + +#include "img_types.h" +#include "img_defs.h" + + +#define RGX_CR_DEFS_KM_REVISION 1 + +/* + Register RGX_CR_RASTERISATION_INDIRECT +*/ +#define RGX_CR_RASTERISATION_INDIRECT (0x8238U) +#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_PBE_INDIRECT +*/ +#define RGX_CR_PBE_INDIRECT (0x83E0U) +#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_PBE_PERF_INDIRECT +*/ +#define RGX_CR_PBE_PERF_INDIRECT (0x83D8U) +#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_TPU_PERF_INDIRECT +*/ +#define RGX_CR_TPU_PERF_INDIRECT (0x83F0U) +#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) + + +/* + Register RGX_CR_RASTERISATION_PERF_INDIRECT +*/ +#define RGX_CR_RASTERISATION_PERF_INDIRECT (0x8318U) +#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT +*/ +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT (0x8028U) +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) + + +/* + Register RGX_CR_USC_PERF_INDIRECT +*/ +#define RGX_CR_USC_PERF_INDIRECT (0x8030U) +#define RGX_CR_USC_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_BLACKPEARL_INDIRECT +*/ +#define RGX_CR_BLACKPEARL_INDIRECT (0x8388U) +#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_BLACKPEARL_PERF_INDIRECT +*/ +#define RGX_CR_BLACKPEARL_PERF_INDIRECT (0x83F8U) +#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_TEXAS3_PERF_INDIRECT +*/ +#define RGX_CR_TEXAS3_PERF_INDIRECT (0x83D0U) +#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) + + +/* + Register RGX_CR_TEXAS_PERF_INDIRECT +*/ +#define RGX_CR_TEXAS_PERF_INDIRECT (0x8288U) +#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_BX_TU_PERF_INDIRECT +*/ +#define RGX_CR_BX_TU_PERF_INDIRECT (0xC900U) +#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_CLK_CTRL +*/ +#define RGX_CR_CLK_CTRL (0x0000U) +#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) +#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL (IMG_UINT64_C(0xCFCF03000F3F3F0F)) +#define RGX_CR_CLK_CTRL_MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT (62U) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL_IPP_SHIFT (60U) +#define RGX_CR_CLK_CTRL_IPP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_IPP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_IPP_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL_IPP_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL_FBC_SHIFT (58U) +#define RGX_CR_CLK_CTRL_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBC_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL_FBDC_SHIFT (56U) +#define RGX_CR_CLK_CTRL_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT (54U) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL_USCS_SHIFT (52U) +#define RGX_CR_CLK_CTRL_USCS_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_USCS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USCS_ON (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_CLK_CTRL_USCS_AUTO (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_CLK_CTRL_PBE_SHIFT (50U) +#define RGX_CR_CLK_CTRL_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PBE_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT (48U) +#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL_CDM_SHIFT (46U) +#define RGX_CR_CLK_CTRL_CDM_CLRMSK (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_CDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_CDM_ON (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_CLK_CTRL_CDM_AUTO (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT (44U) +#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK_ON (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT (42U) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_CLK_CTRL_BIF_SHIFT (40U) +#define RGX_CR_CLK_CTRL_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT (28U) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT (26U) +#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL_TPU_SHIFT (24U) +#define RGX_CR_CLK_CTRL_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL_USC_SHIFT (20U) +#define RGX_CR_CLK_CTRL_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL_USC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USC_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL_TLA_SHIFT (18U) +#define RGX_CR_CLK_CTRL_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL_TLA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TLA_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL_TLA_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL_SLC_SHIFT (16U) +#define RGX_CR_CLK_CTRL_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SLC_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL_UVS_SHIFT (14U) +#define RGX_CR_CLK_CTRL_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_CLK_CTRL_UVS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_UVS_ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL_UVS_AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL_PDS_SHIFT (12U) +#define RGX_CR_CLK_CTRL_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PDS_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL_VDM_SHIFT (10U) +#define RGX_CR_CLK_CTRL_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL_VDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_VDM_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL_VDM_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL_PM_SHIFT (8U) +#define RGX_CR_CLK_CTRL_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL_PM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PM_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL_GPP_SHIFT (6U) +#define RGX_CR_CLK_CTRL_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) +#define RGX_CR_CLK_CTRL_GPP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_GPP_ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL_GPP_AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL_TE_SHIFT (4U) +#define RGX_CR_CLK_CTRL_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL_TE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TE_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL_TE_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL_TSP_SHIFT (2U) +#define RGX_CR_CLK_CTRL_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_CLK_CTRL_TSP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TSP_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL_TSP_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL_ISP_SHIFT (0U) +#define RGX_CR_CLK_CTRL_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_ISP_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) + + +/* + Register RGX_CR_CLK_STATUS +*/ +#define RGX_CR_CLK_STATUS (0x0008U) +#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) +#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL (IMG_UINT64_C(0x00000001B3101773)) +#define RGX_CR_CLK_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) +#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT (32U) +#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT (31U) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS_IPP_SHIFT (30U) +#define RGX_CR_CLK_STATUS_IPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_CLK_STATUS_IPP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_IPP_RUNNING (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_STATUS_FBC_SHIFT (29U) +#define RGX_CR_CLK_STATUS_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_CLK_STATUS_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBC_RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS_FBDC_SHIFT (28U) +#define RGX_CR_CLK_STATUS_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_CLK_STATUS_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBDC_RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT (27U) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS_USCS_SHIFT (26U) +#define RGX_CR_CLK_STATUS_USCS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_CLK_STATUS_USCS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USCS_RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS_PBE_SHIFT (25U) +#define RGX_CR_CLK_STATUS_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_CLK_STATUS_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT (24U) +#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_CLK_STATUS_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS_CDM_SHIFT (23U) +#define RGX_CR_CLK_STATUS_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_CLK_STATUS_CDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_CDM_RUNNING (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT (22U) +#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CLK_STATUS_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT (21U) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_STATUS_BIF_SHIFT (20U) +#define RGX_CR_CLK_STATUS_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_CLK_STATUS_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT (14U) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT (13U) +#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_CLK_STATUS_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS_TPU_SHIFT (12U) +#define RGX_CR_CLK_STATUS_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_CLK_STATUS_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS_USC_SHIFT (10U) +#define RGX_CR_CLK_STATUS_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_STATUS_USC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS_TLA_SHIFT (9U) +#define RGX_CR_CLK_STATUS_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_STATUS_TLA_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TLA_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS_SLC_SHIFT (8U) +#define RGX_CR_CLK_STATUS_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_STATUS_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS_UVS_SHIFT (7U) +#define RGX_CR_CLK_STATUS_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_CLK_STATUS_UVS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_UVS_RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_STATUS_PDS_SHIFT (6U) +#define RGX_CR_CLK_STATUS_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_STATUS_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS_VDM_SHIFT (5U) +#define RGX_CR_CLK_STATUS_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_CLK_STATUS_VDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_VDM_RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_STATUS_PM_SHIFT (4U) +#define RGX_CR_CLK_STATUS_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS_PM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS_GPP_SHIFT (3U) +#define RGX_CR_CLK_STATUS_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_CLK_STATUS_GPP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_GPP_RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_STATUS_TE_SHIFT (2U) +#define RGX_CR_CLK_STATUS_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_CLK_STATUS_TE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TE_RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_STATUS_TSP_SHIFT (1U) +#define RGX_CR_CLK_STATUS_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_STATUS_TSP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TSP_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS_ISP_SHIFT (0U) +#define RGX_CR_CLK_STATUS_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_CORE_ID +*/ +#define RGX_CR_CORE_ID__PBVNC (0x0020U) +#define RGX_CR_CORE_ID__PBVNC__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT (48U) +#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT (32U) +#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) +#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT (0U) +#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_CORE_ID +*/ +#define RGX_CR_CORE_ID (0x0018U) +#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CORE_ID_ID_SHIFT (16U) +#define RGX_CR_CORE_ID_ID_CLRMSK (0x0000FFFFU) +#define RGX_CR_CORE_ID_CONFIG_SHIFT (0U) +#define RGX_CR_CORE_ID_CONFIG_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_CORE_REVISION +*/ +#define RGX_CR_CORE_REVISION (0x0020U) +#define RGX_CR_CORE_REVISION_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT (24U) +#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK (0x00FFFFFFU) +#define RGX_CR_CORE_REVISION_MAJOR_SHIFT (16U) +#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CORE_REVISION_MINOR_SHIFT (8U) +#define RGX_CR_CORE_REVISION_MINOR_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT (0U) +#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_DESIGNER_REV_FIELD1 +*/ +#define RGX_CR_DESIGNER_REV_FIELD1 (0x0028U) +#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U) +#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_DESIGNER_REV_FIELD2 +*/ +#define RGX_CR_DESIGNER_REV_FIELD2 (0x0030U) +#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U) +#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_CHANGESET_NUMBER +*/ +#define RGX_CR_CHANGESET_NUMBER (0x0040U) +#define RGX_CR_CHANGESET_NUMBER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT (0U) +#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_CLK_XTPLUS_CTRL +*/ +#define RGX_CR_CLK_XTPLUS_CTRL (0x0080U) +#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL (IMG_UINT64_C(0x0000003FFFFF0000)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT (36U) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT (34U) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT (32U) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT (30U) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT (28U) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT (26U) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT (24U) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT (22U) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT (20U) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT (18U) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT (16U) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO (IMG_UINT64_C(0x0000000000020000)) + + +/* + Register RGX_CR_CLK_XTPLUS_STATUS +*/ +#define RGX_CR_CLK_XTPLUS_STATUS (0x0088U) +#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT (10U) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT (9U) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT (8U) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT (7U) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT (6U) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT (5U) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT (4U) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT (3U) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT (2U) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT (1U) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT (0U) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SOFT_RESET +*/ +#define RGX_CR_SOFT_RESET (0x0100U) +#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFEFFFFFFFFFFC3D)) +#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00E7FFFFFFFFFC3D)) +#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT (63U) +#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT (62U) +#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_SOFT_RESET_BERNADO2_CORE_SHIFT (61U) +#define RGX_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BERNADO2_CORE_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_SOFT_RESET_JONES_CORE_SHIFT (60U) +#define RGX_CR_SOFT_RESET_JONES_CORE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_JONES_CORE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_SOFT_RESET_TILING_CORE_SHIFT (59U) +#define RGX_CR_SOFT_RESET_TILING_CORE_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TILING_CORE_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SOFT_RESET_TE3_SHIFT (58U) +#define RGX_CR_SOFT_RESET_TE3_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TE3_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SOFT_RESET_VCE_SHIFT (57U) +#define RGX_CR_SOFT_RESET_VCE_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_VCE_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SOFT_RESET_VBS_SHIFT (56U) +#define RGX_CR_SOFT_RESET_VBS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_VBS_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT (55U) +#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DPX1_CORE_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT (54U) +#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DPX0_CORE_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SOFT_RESET_FBA_SHIFT (53U) +#define RGX_CR_SOFT_RESET_FBA_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBA_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SOFT_RESET_FB_CDC_SHIFT (51U) +#define RGX_CR_SOFT_RESET_FB_CDC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FB_CDC_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SOFT_RESET_SH_SHIFT (50U) +#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SOFT_RESET_VRDM_SHIFT (49U) +#define RGX_CR_SOFT_RESET_VRDM_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_VRDM_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT (48U) +#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_MCU_FBTC_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT (47U) +#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT (46U) +#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT (45U) +#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT (44U) +#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SOFT_RESET_IPP_SHIFT (43U) +#define RGX_CR_SOFT_RESET_IPP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_IPP_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (42U) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT (41U) +#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT (40U) +#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT (39U) +#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT (38U) +#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT (37U) +#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT (36U) +#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT (35U) +#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_SOFT_RESET_MMU_SHIFT (34U) +#define RGX_CR_SOFT_RESET_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_SOFT_RESET_MMU_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_SOFT_RESET_BIF1_SHIFT (33U) +#define RGX_CR_SOFT_RESET_BIF1_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF1_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (32U) +#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SOFT_RESET_CPU_SHIFT (32U) +#define RGX_CR_SOFT_RESET_CPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SOFT_RESET_CPU_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT (31U) +#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT (30U) +#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT (29U) +#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT (28U) +#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SOFT_RESET_SLC_SHIFT (27U) +#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SOFT_RESET_TLA_SHIFT (26U) +#define RGX_CR_SOFT_RESET_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SOFT_RESET_TLA_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SOFT_RESET_UVS_SHIFT (25U) +#define RGX_CR_SOFT_RESET_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SOFT_RESET_UVS_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SOFT_RESET_TE_SHIFT (24U) +#define RGX_CR_SOFT_RESET_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SOFT_RESET_TE_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SOFT_RESET_GPP_SHIFT (23U) +#define RGX_CR_SOFT_RESET_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SOFT_RESET_GPP_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SOFT_RESET_FBDC_SHIFT (22U) +#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SOFT_RESET_FBC_SHIFT (21U) +#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SOFT_RESET_PM_SHIFT (20U) +#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) +#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT (18U) +#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SOFT_RESET_USC_SHARED_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) +#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SOFT_RESET_BIF_SHIFT (16U) +#define RGX_CR_SOFT_RESET_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SOFT_RESET_BIF_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SOFT_RESET_CDM_SHIFT (15U) +#define RGX_CR_SOFT_RESET_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SOFT_RESET_CDM_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SOFT_RESET_VDM_SHIFT (14U) +#define RGX_CR_SOFT_RESET_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SOFT_RESET_VDM_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SOFT_RESET_TESS_SHIFT (13U) +#define RGX_CR_SOFT_RESET_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SOFT_RESET_TESS_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) +#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) +#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SOFT_RESET_TSP_SHIFT (10U) +#define RGX_CR_SOFT_RESET_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SOFT_RESET_TSP_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SOFT_RESET_SYSARB_SHIFT (5U) +#define RGX_CR_SOFT_RESET_SYSARB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SOFT_RESET_SYSARB_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT (4U) +#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) +#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) +#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) +#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SOFT_RESET2 +*/ +#define RGX_CR_SOFT_RESET2 (0x0108U) +#define RGX_CR_SOFT_RESET2_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) +#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT (12U) +#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK (0xFFE00FFFU) +#define RGX_CR_SOFT_RESET2_TDM_SHIFT (11U) +#define RGX_CR_SOFT_RESET2_TDM_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SOFT_RESET2_TDM_EN (0x00000800U) +#define RGX_CR_SOFT_RESET2_ASTC_SHIFT (10U) +#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SOFT_RESET2_ASTC_EN (0x00000400U) +#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT (9U) +#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN (0x00000200U) +#define RGX_CR_SOFT_RESET2_USCPS_SHIFT (8U) +#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SOFT_RESET2_USCPS_EN (0x00000100U) +#define RGX_CR_SOFT_RESET2_IPF_SHIFT (7U) +#define RGX_CR_SOFT_RESET2_IPF_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SOFT_RESET2_IPF_EN (0x00000080U) +#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT (6U) +#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SOFT_RESET2_GEOMETRY_EN (0x00000040U) +#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT (5U) +#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SOFT_RESET2_USC_SHARED_EN (0x00000020U) +#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT (4U) +#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN (0x00000010U) +#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT (3U) +#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN (0x00000008U) +#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT (2U) +#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOFT_RESET2_PIXEL_EN (0x00000004U) +#define RGX_CR_SOFT_RESET2_CDM_SHIFT (1U) +#define RGX_CR_SOFT_RESET2_CDM_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SOFT_RESET2_CDM_EN (0x00000002U) +#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT (0U) +#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SOFT_RESET2_VERTEX_EN (0x00000001U) + + +/* + Register RGX_CR_EVENT_STATUS +*/ +#define RGX_CR_EVENT_STATUS (0x0130U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E00DFFFF)) +#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT (28U) +#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN (0x10000000U) +#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT (27U) +#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN (0x08000000U) +#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT (26U) +#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN (0x04000000U) +#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT (25U) +#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN (0x02000000U) +#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT (24U) +#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN (0x01000000U) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0x00800000U) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0x00400000U) +#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT (21U) +#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN (0x00200000U) +#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT (20U) +#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN (0x00100000U) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (19U) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS_SLAVE_REQ_SHIFT (19U) +#define RGX_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS_SLAVE_REQ_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (17U) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT (14U) +#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN (0x00004000U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT (8U) +#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN (0x00000100U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT (0U) +#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_TIMER +*/ +#define RGX_CR_TIMER (0x0160U) +#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_SHIFT (63U) +#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_TIMER_VALUE_SHIFT (0U) +#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + + +/* + Register RGX_CR_TLA_STATUS +*/ +#define RGX_CR_TLA_STATUS (0x0178U) +#define RGX_CR_TLA_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT (39U) +#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK (IMG_UINT64_C(0x0000007FFFFFFFFF)) +#define RGX_CR_TLA_STATUS_REQUEST_SHIFT (7U) +#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK (IMG_UINT64_C(0xFFFFFF800000007F)) +#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT (1U) +#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF81)) +#define RGX_CR_TLA_STATUS_BUSY_SHIFT (0U) +#define RGX_CR_TLA_STATUS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_TLA_STATUS_BUSY_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_PM_PARTIAL_RENDER_ENABLE +*/ +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) + + +/* + Register RGX_CR_SIDEKICK_IDLE +*/ +#define RGX_CR_SIDEKICK_IDLE (0x03C8U) +#define RGX_CR_SIDEKICK_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT (6U) +#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN (0x00000040U) +#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT (5U) +#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SIDEKICK_IDLE_MMU_EN (0x00000020U) +#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT (4U) +#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SIDEKICK_IDLE_BIF128_EN (0x00000010U) +#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT (3U) +#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SIDEKICK_IDLE_TLA_EN (0x00000008U) +#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT (2U) +#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN (0x00000004U) +#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT (1U) +#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN (0x00000002U) +#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT (0U) +#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN (0x00000001U) + + +/* + Register RGX_CR_MARS_IDLE +*/ +#define RGX_CR_MARS_IDLE (0x08F8U) +#define RGX_CR_MARS_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_MARS_IDLE_MH_SYSARB0_SHIFT (2U) +#define RGX_CR_MARS_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_MARS_IDLE_MH_SYSARB0_EN (0x00000004U) +#define RGX_CR_MARS_IDLE_CPU_SHIFT (1U) +#define RGX_CR_MARS_IDLE_CPU_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MARS_IDLE_CPU_EN (0x00000002U) +#define RGX_CR_MARS_IDLE_SOCIF_SHIFT (0U) +#define RGX_CR_MARS_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MARS_IDLE_SOCIF_EN (0x00000001U) + + +/* + Register RGX_CR_VDM_CONTEXT_STORE_STATUS +*/ +#define RGX_CR_VDM_CONTEXT_STORE_STATUS (0x0430U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000F3)) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT (4U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_VDM_CONTEXT_STORE_TASK0 +*/ +#define RGX_CR_VDM_CONTEXT_STORE_TASK0 (0x0438U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_VDM_CONTEXT_STORE_TASK1 +*/ +#define RGX_CR_VDM_CONTEXT_STORE_TASK1 (0x0440U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_VDM_CONTEXT_STORE_TASK2 +*/ +#define RGX_CR_VDM_CONTEXT_STORE_TASK2 (0x0448U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_VDM_CONTEXT_RESUME_TASK0 +*/ +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0 (0x0450U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_VDM_CONTEXT_RESUME_TASK1 +*/ +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1 (0x0458U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_VDM_CONTEXT_RESUME_TASK2 +*/ +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2 (0x0460U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_CDM_CONTEXT_STORE_STATUS +*/ +#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_CDM_CONTEXT_PDS0 +*/ +#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) +#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_CONTEXT_PDS1 +*/ +#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (27U) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (0x08000000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT (20U) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN (0x00100000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) +#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN (0x00000001U) + + +/* + Register RGX_CR_CDM_TERMINATE_PDS +*/ +#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) +#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_TERMINATE_PDS1 +*/ +#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (28U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (0x10000000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) +#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT (27U) +#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN (0x08000000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (21U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT (20U) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN (0x00100000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT (11U) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) +#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT (0U) +#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN (0x00000001U) + + +/* + Register RGX_CR_CDM_CONTEXT_LOAD_PDS0 +*/ +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_CONTEXT_LOAD_PDS1 +*/ +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT (27U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN (0x08000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT (20U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN (0x00100000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_WRAPPER_CONFIG +*/ +#define RGX_CR_MIPS_WRAPPER_CONFIG (0x0810U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x000001030F01FFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT (40U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT (33U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT (32U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT (25U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF1FFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT (24U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT (16U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 +*/ +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 (0x0818U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 +*/ +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 (0x0820U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 +*/ +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 (0x0828U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 +*/ +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 (0x0830U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 +*/ +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 (0x0838U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 +*/ +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 (0x0840U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 +*/ +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 (0x0848U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 +*/ +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 (0x0850U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 +*/ +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 (0x0858U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 +*/ +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 (0x0860U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS +*/ +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS (0x0868U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR +*/ +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR (0x0870U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG +*/ +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG (0x0878U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT (32U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT (1U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ +*/ +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ (0x0880U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT (1U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK (0xFFFFFFC1U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA +*/ +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA (0x0888U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFF81)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT (36U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT (32U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT (11U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE +*/ +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE (0x08A0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS +*/ +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS (0x08A8U) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR +*/ +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR (0x08B0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE +*/ +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE (0x08B8U) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_WRAPPER_NMI_EVENT +*/ +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT (0x08C0U) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_DEBUG_CONFIG +*/ +#define RGX_CR_MIPS_DEBUG_CONFIG (0x08C8U) +#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U) +#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN (0x00000001U) + + +/* + Register RGX_CR_MIPS_EXCEPTION_STATUS +*/ +#define RGX_CR_MIPS_EXCEPTION_STATUS (0x08D0U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT (5U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN (0x00000020U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT (4U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN (0x00000010U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT (3U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN (0x00000008U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT (2U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN (0x00000004U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT (1U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN (0x00000002U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT (0U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN (0x00000001U) + + +/* + Register RGX_CR_XPU_BROADCAST +*/ +#define RGX_CR_XPU_BROADCAST (0x0890U) +#define RGX_CR_XPU_BROADCAST_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_XPU_BROADCAST_MASK_SHIFT (0U) +#define RGX_CR_XPU_BROADCAST_MASK_CLRMSK (0xFFFFFE00U) + + +/* + Register RGX_CR_META_SP_MSLVDATAX +*/ +#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) +#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAT +*/ +#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) +#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL0 +*/ +#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) +#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL1 +*/ +#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) +#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVHANDSHKE +*/ +#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) +#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVT0KICK +*/ +#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) +#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT0KICKI +*/ +#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) +#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICK +*/ +#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) +#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICKI +*/ +#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) +#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICK +*/ +#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) +#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICKI +*/ +#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) +#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICK +*/ +#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) +#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICKI +*/ +#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) +#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVRST +*/ +#define RGX_CR_META_SP_MSLVRST (0x0AC0U) +#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVIRQSTATUS +*/ +#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQENABLE +*/ +#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) +#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQLEVEL +*/ +#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) + + +/* + Register RGX_CR_MTS_SCHEDULE +*/ +#define RGX_CR_MTS_SCHEDULE (0x0B00U) +#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE1 +*/ +#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) +#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE2 +*/ +#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) +#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE3 +*/ +#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) +#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE4 +*/ +#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) +#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE5 +*/ +#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) +#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE6 +*/ +#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) +#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE7 +*/ +#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) +#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC +*/ +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC +*/ +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC +*/ +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC +*/ +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG +*/ +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFF001)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFFF0FFFFFFFFFFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFF00FFFFFFFFFFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT (40U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT (9U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF9FF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (8U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MTS_INTCTX +*/ +#define RGX_CR_MTS_INTCTX (0x0B98U) +#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) +#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT (18U) +#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK (0xFFC3FFFFU) +#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT (16U) +#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK (0xFFFCFFFFU) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MTS_BGCTX +*/ +#define RGX_CR_MTS_BGCTX (0x0BA0U) +#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT (10U) +#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK (0xFFFFC3FFU) +#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT (8U) +#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK (0xFFFFFCFFU) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE +*/ +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MTS_GPU_INT_STATUS +*/ +#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) +#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_IRQ_OS0_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD8U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS0_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE8U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS1_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD8U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS1_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE8U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS2_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD8U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS2_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE8U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS3_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD8U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS3_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE8U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS4_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD8U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS4_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE8U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS5_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD8U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS5_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE8U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS6_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD8U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS6_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE8U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS7_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD8U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_IRQ_OS7_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE8U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN (0x00000001U) + + +/* + Register RGX_CR_META_BOOT +*/ +#define RGX_CR_META_BOOT (0x0BF8U) +#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_BOOT_MODE_SHIFT (0U) +#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) + + +/* + Register RGX_CR_GARTEN_SLC +*/ +#define RGX_CR_GARTEN_SLC (0x0BB8U) +#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) + + +/* + Register RGX_CR_PPP +*/ +#define RGX_CR_PPP (0x0CD0U) +#define RGX_CR_PPP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_CHECKSUM_SHIFT (0U) +#define RGX_CR_PPP_CHECKSUM_CLRMSK (0x00000000U) + + +#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK (0x00000003U) +/* +Top-left to bottom-right */ +#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR (0x00000000U) +/* +Top-right to bottom-left */ +#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL (0x00000001U) +/* +Bottom-left to top-right */ +#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR (0x00000002U) +/* +Bottom-right to top-left */ +#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL (0x00000003U) + + +#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK (0x00000003U) +/* +Normal render */ +#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM (0x00000000U) +/* +Fast 2D render */ +#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D (0x00000002U) +/* +Fast scale render */ +#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE (0x00000003U) + + +/* + Register RGX_CR_ISP_RENDER +*/ +#define RGX_CR_ISP_RENDER (0x0F08U) +#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_ISP_RENDER_DISABLE_EOMT_SHIFT (5U) +#define RGX_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_ISP_RENDER_DISABLE_EOMT_EN (0x00000020U) +#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ISP_RENDER_RESUME_EN (0x00000010U) +#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) +#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) +#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) +#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) +#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) +#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) +#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) +#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) +#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) + + +/* + Register RGX_CR_ISP_CTL +*/ +#define RGX_CR_ISP_CTL (0x0F38U) +#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000FFFFF3FF)) +#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT (31U) +#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_EN (0x80000000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (30U) +#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x40000000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT (29U) +#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN (0x20000000U) +#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT (28U) +#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN (0x10000000U) +#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT (27U) +#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_ISP_CTL_PAIR_TILES_EN (0x08000000U) +#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT (26U) +#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN (0x04000000U) +#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT (25U) +#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN (0x02000000U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT (23U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK (0xFE7FFFFFU) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 (0x00000000U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 (0x00800000U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL (0x01000000U) +#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT (21U) +#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK (0xFF9FFFFFU) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (20U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00100000U) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) +#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT (18U) +#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN (0x00040000U) +#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT (17U) +#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN (0x00020000U) +#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT (16U) +#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_ISP_CTL_SAMPLE_POS_EN (0x00010000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT (12U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE (0x00000000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO (0x00001000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE (0x00002000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR (0x00003000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE (0x00004000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX (0x00005000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN (0x00006000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT (0x00007000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE (0x00008000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN (0x00009000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN (0x0000A000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE (0x0000B000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN (0x0000C000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN (0x0000D000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN (0x0000E000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN (0x0000F000U) +#define RGX_CR_ISP_CTL_VALID_ID_SHIFT (4U) +#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK (0xFFFFFC0FU) +#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) +#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_ISP_STATUS +*/ +#define RGX_CR_ISP_STATUS (0x1038U) +#define RGX_CR_ISP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT (2U) +#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN (0x00000004U) +#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT (1U) +#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ISP_STATUS_ACTIVE_EN (0x00000002U) +#define RGX_CR_ISP_STATUS_EOR_SHIFT (0U) +#define RGX_CR_ISP_STATUS_EOR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ISP_STATUS_EOR_EN (0x00000001U) + + +/* + Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats +*/ +#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT (64U) +/* + Register RGX_CR_ISP_XTP_RESUME0 +*/ +#define RGX_CR_ISP_XTP_RESUME0 (0x3A00U) +#define RGX_CR_ISP_XTP_RESUME0_MASKFULL (IMG_UINT64_C(0x00000000003FF3FF)) +#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT (12U) +#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK (0xFFC00FFFU) +#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT (0U) +#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK (0xFFFFFC00U) + + +/* + Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats +*/ +#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT (32U) +/* + Register RGX_CR_ISP_XTP_STORE0 +*/ +#define RGX_CR_ISP_XTP_STORE0 (0x3C00U) +#define RGX_CR_ISP_XTP_STORE0_MASKFULL (IMG_UINT64_C(0x000000007F3FF3FF)) +#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT (30U) +#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN (0x40000000U) +#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT (29U) +#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_EOR_EN (0x20000000U) +#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT (28U) +#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN (0x10000000U) +#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT (24U) +#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK (0xF0FFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT (12U) +#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK (0xFFC00FFFU) +#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT (0U) +#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK (0xFFFFFC00U) + + +/* + Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats +*/ +#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT (8U) +/* + Register RGX_CR_BIF_CAT_BASE0 +*/ +#define RGX_CR_BIF_CAT_BASE0 (0x1200U) +#define RGX_CR_BIF_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE1 +*/ +#define RGX_CR_BIF_CAT_BASE1 (0x1208U) +#define RGX_CR_BIF_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE2 +*/ +#define RGX_CR_BIF_CAT_BASE2 (0x1210U) +#define RGX_CR_BIF_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE3 +*/ +#define RGX_CR_BIF_CAT_BASE3 (0x1218U) +#define RGX_CR_BIF_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE4 +*/ +#define RGX_CR_BIF_CAT_BASE4 (0x1220U) +#define RGX_CR_BIF_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE5 +*/ +#define RGX_CR_BIF_CAT_BASE5 (0x1228U) +#define RGX_CR_BIF_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE6 +*/ +#define RGX_CR_BIF_CAT_BASE6 (0x1230U) +#define RGX_CR_BIF_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE7 +*/ +#define RGX_CR_BIF_CAT_BASE7 (0x1238U) +#define RGX_CR_BIF_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_CAT_BASE_INDEX +*/ +#define RGX_CR_BIF_CAT_BASE_INDEX (0x1240U) +#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL (IMG_UINT64_C(0x00070707073F0707)) +#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT (48U) +#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK (IMG_UINT64_C(0xFFF8FFFFFFFFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT (40U) +#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT (32U) +#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT (24U) +#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT (19U) +#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC7FFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT (16U) +#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT (8U) +#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT (0U) +#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) + + +/* + Register RGX_CR_BIF_PM_CAT_BASE_VCE0 +*/ +#define RGX_CR_BIF_PM_CAT_BASE_VCE0 (0x1248U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_BIF_PM_CAT_BASE_TE0 +*/ +#define RGX_CR_BIF_PM_CAT_BASE_TE0 (0x1250U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_BIF_PM_CAT_BASE_ALIST0 +*/ +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0 (0x1260U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_BIF_PM_CAT_BASE_VCE1 +*/ +#define RGX_CR_BIF_PM_CAT_BASE_VCE1 (0x1268U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_BIF_PM_CAT_BASE_TE1 +*/ +#define RGX_CR_BIF_PM_CAT_BASE_TE1 (0x1270U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_BIF_PM_CAT_BASE_ALIST1 +*/ +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1 (0x1280U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_BIF_MMU_ENTRY_STATUS +*/ +#define RGX_CR_BIF_MMU_ENTRY_STATUS (0x1288U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF0F3)) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT (12U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT (4U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT (0U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_BIF_MMU_ENTRY +*/ +#define RGX_CR_BIF_MMU_ENTRY (0x1290U) +#define RGX_CR_BIF_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT (1U) +#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN (0x00000002U) +#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT (0U) +#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN (0x00000001U) + + +/* + Register RGX_CR_BIF_CTRL_INVAL +*/ +#define RGX_CR_BIF_CTRL_INVAL (0x12A0U) +#define RGX_CR_BIF_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT (3U) +#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN (0x00000008U) +#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT (2U) +#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_CTRL_INVAL_PC_EN (0x00000004U) +#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT (1U) +#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_CTRL_INVAL_PD_EN (0x00000002U) +#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT (0U) +#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_CTRL_INVAL_PT_EN (0x00000001U) + + +/* + Register RGX_CR_BIF_CTRL +*/ +#define RGX_CR_BIF_CTRL (0x12A8U) +#define RGX_CR_BIF_CTRL__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000000000013F)) +#define RGX_CR_BIF_CTRL_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT (8U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN (0x00000100U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT (7U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN (0x00000080U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT (6U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN (0x00000040U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT (5U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN (0x00000020U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT (4U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN (0x00000010U) +#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT (3U) +#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN (0x00000008U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT (2U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN (0x00000004U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT (1U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN (0x00000002U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT (0U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN (0x00000001U) + + +/* + Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS +*/ +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS (0x12B0U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) + + +/* + Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS +*/ +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS (0x12B8U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT (52U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT (46U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT (40U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) + + +/* + Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS +*/ +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS (0x12C0U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN (0x00000001U) + + +/* + Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS +*/ +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS (0x12C8U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT (50U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT (44U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT (40U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) + + +/* + Register RGX_CR_BIF_MMU_STATUS +*/ +#define RGX_CR_BIF_MMU_STATUS (0x12D0U) +#define RGX_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) +#define RGX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) +#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT (28U) +#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN (0x10000000U) +#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) +#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) +#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) +#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) +#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) +#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) +#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) +#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) +#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) +#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) +#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) +#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) + + +/* + Register group: RGX_CR_BIF_TILING_CFG, with 8 repeats +*/ +#define RGX_CR_BIF_TILING_CFG_REPEATCOUNT (8U) +/* + Register RGX_CR_BIF_TILING_CFG0 +*/ +#define RGX_CR_BIF_TILING_CFG0 (0x12D8U) +#define RGX_CR_BIF_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_TILING_CFG1 +*/ +#define RGX_CR_BIF_TILING_CFG1 (0x12E0U) +#define RGX_CR_BIF_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_TILING_CFG2 +*/ +#define RGX_CR_BIF_TILING_CFG2 (0x12E8U) +#define RGX_CR_BIF_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_TILING_CFG3 +*/ +#define RGX_CR_BIF_TILING_CFG3 (0x12F0U) +#define RGX_CR_BIF_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_TILING_CFG4 +*/ +#define RGX_CR_BIF_TILING_CFG4 (0x12F8U) +#define RGX_CR_BIF_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_TILING_CFG5 +*/ +#define RGX_CR_BIF_TILING_CFG5 (0x1300U) +#define RGX_CR_BIF_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_TILING_CFG6 +*/ +#define RGX_CR_BIF_TILING_CFG6 (0x1308U) +#define RGX_CR_BIF_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_TILING_CFG7 +*/ +#define RGX_CR_BIF_TILING_CFG7 (0x1310U) +#define RGX_CR_BIF_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_BIF_READS_EXT_STATUS +*/ +#define RGX_CR_BIF_READS_EXT_STATUS (0x1320U) +#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT (16U) +#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK (0xF000FFFFU) +#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT (0U) +#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIF_READS_INT_STATUS +*/ +#define RGX_CR_BIF_READS_INT_STATUS (0x1328U) +#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) +#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT (16U) +#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK (0xF800FFFFU) +#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT (0U) +#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIFPM_READS_INT_STATUS +*/ +#define RGX_CR_BIFPM_READS_INT_STATUS (0x1330U) +#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT (0U) +#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIFPM_READS_EXT_STATUS +*/ +#define RGX_CR_BIFPM_READS_EXT_STATUS (0x1338U) +#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT (0U) +#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIFPM_STATUS_MMU +*/ +#define RGX_CR_BIFPM_STATUS_MMU (0x1350U) +#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT (0U) +#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_BIF_STATUS_MMU +*/ +#define RGX_CR_BIF_STATUS_MMU (0x1358U) +#define RGX_CR_BIF_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT (0U) +#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_BIF_FAULT_READ +*/ +#define RGX_CR_BIF_FAULT_READ (0x13E0U) +#define RGX_CR_BIF_FAULT_READ_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT (4U) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE (16U) + + +/* + Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS +*/ +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS (0x1430U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) + + +/* + Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS +*/ +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS (0x1438U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) + + +/* + Register RGX_CR_MCU_FENCE +*/ +#define RGX_CR_MCU_FENCE (0x1740U) +#define RGX_CR_MCU_FENCE_MASKFULL (IMG_UINT64_C(0x000007FFFFFFFFE0)) +#define RGX_CR_MCU_FENCE_DM_SHIFT (40U) +#define RGX_CR_MCU_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_MCU_FENCE_DM_VERTEX (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MCU_FENCE_DM_PIXEL (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MCU_FENCE_DM_COMPUTE (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX (IMG_UINT64_C(0x0000030000000000)) +#define RGX_CR_MCU_FENCE_DM_RAY (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_MCU_FENCE_DM_FASTRENDER (IMG_UINT64_C(0x0000050000000000)) +#define RGX_CR_MCU_FENCE_ADDR_SHIFT (5U) +#define RGX_CR_MCU_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT (5U) +#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE (32U) + + +/* + Register group: RGX_CR_SCRATCH, with 16 repeats +*/ +#define RGX_CR_SCRATCH_REPEATCOUNT (16U) +/* + Register RGX_CR_SCRATCH0 +*/ +#define RGX_CR_SCRATCH0 (0x1A00U) +#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH1 +*/ +#define RGX_CR_SCRATCH1 (0x1A08U) +#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH2 +*/ +#define RGX_CR_SCRATCH2 (0x1A10U) +#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH3 +*/ +#define RGX_CR_SCRATCH3 (0x1A18U) +#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH4 +*/ +#define RGX_CR_SCRATCH4 (0x1A20U) +#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH4_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH5 +*/ +#define RGX_CR_SCRATCH5 (0x1A28U) +#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH5_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH6 +*/ +#define RGX_CR_SCRATCH6 (0x1A30U) +#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH6_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH7 +*/ +#define RGX_CR_SCRATCH7 (0x1A38U) +#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH7_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH8 +*/ +#define RGX_CR_SCRATCH8 (0x1A40U) +#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH8_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH9 +*/ +#define RGX_CR_SCRATCH9 (0x1A48U) +#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH9_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH10 +*/ +#define RGX_CR_SCRATCH10 (0x1A50U) +#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH10_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH11 +*/ +#define RGX_CR_SCRATCH11 (0x1A58U) +#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH11_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH12 +*/ +#define RGX_CR_SCRATCH12 (0x1A60U) +#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH12_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH13 +*/ +#define RGX_CR_SCRATCH13 (0x1A68U) +#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH13_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH14 +*/ +#define RGX_CR_SCRATCH14 (0x1A70U) +#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH14_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SCRATCH15 +*/ +#define RGX_CR_SCRATCH15 (0x1A78U) +#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH15_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS0_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS0_SCRATCH0 +*/ +#define RGX_CR_OS0_SCRATCH0 (0x1A80U) +#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH1 +*/ +#define RGX_CR_OS0_SCRATCH1 (0x1A88U) +#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH2 +*/ +#define RGX_CR_OS0_SCRATCH2 (0x1A90U) +#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS0_SCRATCH3 +*/ +#define RGX_CR_OS0_SCRATCH3 (0x1A98U) +#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_OS1_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS1_SCRATCH0 +*/ +#define RGX_CR_OS1_SCRATCH0 (0x11A80U) +#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH1 +*/ +#define RGX_CR_OS1_SCRATCH1 (0x11A88U) +#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH2 +*/ +#define RGX_CR_OS1_SCRATCH2 (0x11A90U) +#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS1_SCRATCH3 +*/ +#define RGX_CR_OS1_SCRATCH3 (0x11A98U) +#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_OS2_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS2_SCRATCH0 +*/ +#define RGX_CR_OS2_SCRATCH0 (0x21A80U) +#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH1 +*/ +#define RGX_CR_OS2_SCRATCH1 (0x21A88U) +#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH2 +*/ +#define RGX_CR_OS2_SCRATCH2 (0x21A90U) +#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS2_SCRATCH3 +*/ +#define RGX_CR_OS2_SCRATCH3 (0x21A98U) +#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_OS3_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS3_SCRATCH0 +*/ +#define RGX_CR_OS3_SCRATCH0 (0x31A80U) +#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH1 +*/ +#define RGX_CR_OS3_SCRATCH1 (0x31A88U) +#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH2 +*/ +#define RGX_CR_OS3_SCRATCH2 (0x31A90U) +#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS3_SCRATCH3 +*/ +#define RGX_CR_OS3_SCRATCH3 (0x31A98U) +#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_OS4_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS4_SCRATCH0 +*/ +#define RGX_CR_OS4_SCRATCH0 (0x41A80U) +#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH1 +*/ +#define RGX_CR_OS4_SCRATCH1 (0x41A88U) +#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH2 +*/ +#define RGX_CR_OS4_SCRATCH2 (0x41A90U) +#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS4_SCRATCH3 +*/ +#define RGX_CR_OS4_SCRATCH3 (0x41A98U) +#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_OS5_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS5_SCRATCH0 +*/ +#define RGX_CR_OS5_SCRATCH0 (0x51A80U) +#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH1 +*/ +#define RGX_CR_OS5_SCRATCH1 (0x51A88U) +#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH2 +*/ +#define RGX_CR_OS5_SCRATCH2 (0x51A90U) +#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS5_SCRATCH3 +*/ +#define RGX_CR_OS5_SCRATCH3 (0x51A98U) +#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_OS6_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS6_SCRATCH0 +*/ +#define RGX_CR_OS6_SCRATCH0 (0x61A80U) +#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH1 +*/ +#define RGX_CR_OS6_SCRATCH1 (0x61A88U) +#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH2 +*/ +#define RGX_CR_OS6_SCRATCH2 (0x61A90U) +#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS6_SCRATCH3 +*/ +#define RGX_CR_OS6_SCRATCH3 (0x61A98U) +#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_OS7_SCRATCH, with 2 repeats +*/ +#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (2U) +/* + Register RGX_CR_OS7_SCRATCH0 +*/ +#define RGX_CR_OS7_SCRATCH0 (0x71A80U) +#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH1 +*/ +#define RGX_CR_OS7_SCRATCH1 (0x71A88U) +#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH2 +*/ +#define RGX_CR_OS7_SCRATCH2 (0x71A90U) +#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_OS7_SCRATCH3 +*/ +#define RGX_CR_OS7_SCRATCH3 (0x71A98U) +#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_SPFILTER_SIGNAL_DESCR +*/ +#define RGX_CR_SPFILTER_SIGNAL_DESCR (0x2700U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT (0U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK (0xFFFF0000U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT (4U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE (16U) + + +/* + Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN +*/ +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN (0x2708U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT (4U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_SLC_CTRL_MISC +*/ +#define RGX_CR_SLC_CTRL_MISC (0x3800U) +#define RGX_CR_SLC_CTRL_MISC_MASKFULL (IMG_UINT64_C(0xFFFFFFFF01FF010F)) +#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT (32U) +#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_CTRL_MISC_WBMASKED_OVERRIDE_SHIFT (24U) +#define RGX_CR_SLC_CTRL_MISC_WBMASKED_OVERRIDE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SLC_CTRL_MISC_WBMASKED_OVERRIDE_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (16U) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000)) +#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT (8U) +#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT (3U) +#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT (2U) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT (0U) +#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SLC_CTRL_FLUSH_INVAL +*/ +#define RGX_CR_SLC_CTRL_FLUSH_INVAL (0x3818U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL (IMG_UINT64_C(0x0000000080000FFF)) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT (31U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN (0x80000000U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT (11U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN (0x00000800U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT (10U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN (0x00000400U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT (9U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN (0x00000200U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT (8U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN (0x00000100U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT (7U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN (0x00000080U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT (6U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN (0x00000040U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT (5U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN (0x00000020U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT (4U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN (0x00000010U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT (3U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN (0x00000008U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT (2U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN (0x00000004U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT (1U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN (0x00000002U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT (0U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN (0x00000001U) + + +/* + Register RGX_CR_SLC_STATUS0 +*/ +#define RGX_CR_SLC_STATUS0 (0x3820U) +#define RGX_CR_SLC_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT (2U) +#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN (0x00000004U) +#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT (1U) +#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN (0x00000002U) +#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT (0U) +#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN (0x00000001U) + + +/* + Register RGX_CR_SLC_CTRL_BYPASS +*/ +#define RGX_CR_SLC_CTRL_BYPASS (0x3828U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFF7FFF)) +#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT (59U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT (58U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT (57U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT (56U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT (55U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT (54U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT (53U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT (52U) +#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT (51U) +#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT (50U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT (49U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT (48U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT (47U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT (46U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT (45U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT (44U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT (43U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT (42U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT (41U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT (40U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT (39U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT (38U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT (37U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT (36U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT (35U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT (34U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT (33U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT (32U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT (31U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT (30U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT (29U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT (28U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT (27U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT (26U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT (25U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT (24U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT (23U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT (22U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT (21U) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT (20U) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT (19U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT (18U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT (17U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT (16U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT (15U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT (14U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT (13U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT (12U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT (11U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT (10U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT (9U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT (8U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT (7U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT (6U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT (5U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT (4U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT (3U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT (2U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT (1U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT (0U) +#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SLC_STATUS1 +*/ +#define RGX_CR_SLC_STATUS1 (0x3870U) +#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0x800003FF03FFFFFF)) +#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT (63U) +#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_PAUSED_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_SLC_STATUS1_READS1_SHIFT (32U) +#define RGX_CR_SLC_STATUS1_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) +#define RGX_CR_SLC_STATUS1_READS0_SHIFT (16U) +#define RGX_CR_SLC_STATUS1_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) +#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT (8U) +#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT (0U) +#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_SLC_IDLE +*/ +#define RGX_CR_SLC_IDLE (0x3898U) +#define RGX_CR_SLC_IDLE__XE_MEM__MASKFULL (IMG_UINT64_C(0x00000000000003FF)) +#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_SLC_IDLE_MH_SYSARB1_SHIFT (9U) +#define RGX_CR_SLC_IDLE_MH_SYSARB1_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SLC_IDLE_MH_SYSARB1_EN (0x00000200U) +#define RGX_CR_SLC_IDLE_MH_SYSARB0_SHIFT (8U) +#define RGX_CR_SLC_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SLC_IDLE_MH_SYSARB0_EN (0x00000100U) +#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT (7U) +#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SLC_IDLE_IMGBV4_EN (0x00000080U) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (6U) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN (0x00000040U) +#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT (5U) +#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SLC_IDLE_RBOFIFO_EN (0x00000020U) +#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT (4U) +#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SLC_IDLE_FRC_CONV_EN (0x00000010U) +#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT (3U) +#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SLC_IDLE_VXE_CONV_EN (0x00000008U) +#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT (2U) +#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_IDLE_VXD_CONV_EN (0x00000004U) +#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT (1U) +#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_IDLE_BIF1_CONV_EN (0x00000002U) +#define RGX_CR_SLC_IDLE_CBAR_SHIFT (0U) +#define RGX_CR_SLC_IDLE_CBAR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_IDLE_CBAR_EN (0x00000001U) + + +/* + Register RGX_CR_SLC_STATUS2 +*/ +#define RGX_CR_SLC_STATUS2 (0x3908U) +#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0x000003FF03FFFFFF)) +#define RGX_CR_SLC_STATUS2_READS3_SHIFT (32U) +#define RGX_CR_SLC_STATUS2_READS3_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) +#define RGX_CR_SLC_STATUS2_READS2_SHIFT (16U) +#define RGX_CR_SLC_STATUS2_READS2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) +#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT (8U) +#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT (0U) +#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_SLC_CTRL_MISC2 +*/ +#define RGX_CR_SLC_CTRL_MISC2 (0x3930U) +#define RGX_CR_SLC_CTRL_MISC2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT (0U) +#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE +*/ +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE (0x3938U) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT (0U) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN (0x00000001U) + + +/* + Register RGX_CR_USC_UVS0_CHECKSUM +*/ +#define RGX_CR_USC_UVS0_CHECKSUM (0x5000U) +#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVS1_CHECKSUM +*/ +#define RGX_CR_USC_UVS1_CHECKSUM (0x5008U) +#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVS2_CHECKSUM +*/ +#define RGX_CR_USC_UVS2_CHECKSUM (0x5010U) +#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVS3_CHECKSUM +*/ +#define RGX_CR_USC_UVS3_CHECKSUM (0x5018U) +#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PPP_SIGNATURE +*/ +#define RGX_CR_PPP_SIGNATURE (0x5020U) +#define RGX_CR_PPP_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT (0U) +#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TE_SIGNATURE +*/ +#define RGX_CR_TE_SIGNATURE (0x5028U) +#define RGX_CR_TE_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT (0U) +#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TE_CHECKSUM +*/ +#define RGX_CR_TE_CHECKSUM (0x5110U) +#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVB_CHECKSUM +*/ +#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) +#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_VCE_CHECKSUM +*/ +#define RGX_CR_VCE_CHECKSUM (0x5030U) +#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ISP_PDS_CHECKSUM +*/ +#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) +#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ISP_TPF_CHECKSUM +*/ +#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) +#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TFPU_PLANE0_CHECKSUM +*/ +#define RGX_CR_TFPU_PLANE0_CHECKSUM (0x5048U) +#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TFPU_PLANE1_CHECKSUM +*/ +#define RGX_CR_TFPU_PLANE1_CHECKSUM (0x5050U) +#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PBE_CHECKSUM +*/ +#define RGX_CR_PBE_CHECKSUM (0x5058U) +#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PDS_DOUTM_STM_SIGNATURE +*/ +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE (0x5060U) +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT (0U) +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_IFPU_ISP_CHECKSUM +*/ +#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) +#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVS4_CHECKSUM +*/ +#define RGX_CR_USC_UVS4_CHECKSUM (0x5100U) +#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVS5_CHECKSUM +*/ +#define RGX_CR_USC_UVS5_CHECKSUM (0x5108U) +#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PPP_CLIP_CHECKSUM +*/ +#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) +#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_TA_PHASE +*/ +#define RGX_CR_PERF_TA_PHASE (0x6008U) +#define RGX_CR_PERF_TA_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_3D_PHASE +*/ +#define RGX_CR_PERF_3D_PHASE (0x6010U) +#define RGX_CR_PERF_3D_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_COMPUTE_PHASE +*/ +#define RGX_CR_PERF_COMPUTE_PHASE (0x6018U) +#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_TA_CYCLE +*/ +#define RGX_CR_PERF_TA_CYCLE (0x6020U) +#define RGX_CR_PERF_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_3D_CYCLE +*/ +#define RGX_CR_PERF_3D_CYCLE (0x6028U) +#define RGX_CR_PERF_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_COMPUTE_CYCLE +*/ +#define RGX_CR_PERF_COMPUTE_CYCLE (0x6030U) +#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_TA_OR_3D_CYCLE +*/ +#define RGX_CR_PERF_TA_OR_3D_CYCLE (0x6038U) +#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_INITIAL_TA_CYCLE +*/ +#define RGX_CR_PERF_INITIAL_TA_CYCLE (0x6040U) +#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC0_READ_STALL +*/ +#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) +#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC0_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC1_READ_STALL +*/ +#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) +#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC1_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC2_READ_STALL +*/ +#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) +#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC2_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC3_READ_STALL +*/ +#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) +#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC3_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_3D_SPINUP +*/ +#define RGX_CR_PERF_3D_SPINUP (0x6220U) +#define RGX_CR_PERF_3D_SPINUP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT (0U) +#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_AXI_ACE_LITE_CONFIGURATION +*/ +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION (0x38C0U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL (IMG_UINT64_C(0x00003FFFFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT (45U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFE01FFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFC3FFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC3FFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC3FFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF0)) + + +/* + Register RGX_CR_POWER_ESTIMATE_RESULT +*/ +#define RGX_CR_POWER_ESTIMATE_RESULT (0x6328U) +#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT (0U) +#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TA_PERF +*/ +#define RGX_CR_TA_PERF (0x7600U) +#define RGX_CR_TA_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TA_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TA_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TA_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TA_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TA_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TA_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TA_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TA_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TA_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TA_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TA_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TA_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TA_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_TA_PERF_SELECT0 +*/ +#define RGX_CR_TA_PERF_SELECT0 (0x7608U) +#define RGX_CR_TA_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TA_PERF_SELECT1 +*/ +#define RGX_CR_TA_PERF_SELECT1 (0x7610U) +#define RGX_CR_TA_PERF_SELECT1_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT1_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TA_PERF_SELECT2 +*/ +#define RGX_CR_TA_PERF_SELECT2 (0x7618U) +#define RGX_CR_TA_PERF_SELECT2_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT2_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TA_PERF_SELECT3 +*/ +#define RGX_CR_TA_PERF_SELECT3 (0x7620U) +#define RGX_CR_TA_PERF_SELECT3_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT3_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TA_PERF_SELECTED_BITS +*/ +#define RGX_CR_TA_PERF_SELECTED_BITS (0x7648U) +#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TA_PERF_COUNTER_0 +*/ +#define RGX_CR_TA_PERF_COUNTER_0 (0x7650U) +#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TA_PERF_COUNTER_1 +*/ +#define RGX_CR_TA_PERF_COUNTER_1 (0x7658U) +#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TA_PERF_COUNTER_2 +*/ +#define RGX_CR_TA_PERF_COUNTER_2 (0x7660U) +#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TA_PERF_COUNTER_3 +*/ +#define RGX_CR_TA_PERF_COUNTER_3 (0x7668U) +#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_RASTERISATION_PERF +*/ +#define RGX_CR_RASTERISATION_PERF (0x7700U) +#define RGX_CR_RASTERISATION_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_RASTERISATION_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_RASTERISATION_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_RASTERISATION_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_RASTERISATION_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_RASTERISATION_PERF_SELECT0 +*/ +#define RGX_CR_RASTERISATION_PERF_SELECT0 (0x7708U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_RASTERISATION_PERF_COUNTER_0 +*/ +#define RGX_CR_RASTERISATION_PERF_COUNTER_0 (0x7750U) +#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_HUB_BIFPMCACHE_PERF +*/ +#define RGX_CR_HUB_BIFPMCACHE_PERF (0x7800U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 +*/ +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 (0x7808U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 +*/ +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 (0x7850U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TPU_MCU_L0_PERF +*/ +#define RGX_CR_TPU_MCU_L0_PERF (0x7900U) +#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_TPU_MCU_L0_PERF_SELECT0 +*/ +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0 (0x7908U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 +*/ +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 (0x7950U) +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_PERF +*/ +#define RGX_CR_USC_PERF (0x8100U) +#define RGX_CR_USC_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_USC_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_USC_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_USC_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_USC_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_USC_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_USC_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_USC_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_USC_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_USC_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_USC_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_USC_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_USC_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_USC_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_USC_PERF_SELECT0 +*/ +#define RGX_CR_USC_PERF_SELECT0 (0x8108U) +#define RGX_CR_USC_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_USC_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_USC_PERF_COUNTER_0 +*/ +#define RGX_CR_USC_PERF_COUNTER_0 (0x8150U) +#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_JONES_IDLE +*/ +#define RGX_CR_JONES_IDLE (0x8328U) +#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000007FFF)) +#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) +#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) +#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) +#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) +#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) +#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) +#define RGX_CR_JONES_IDLE_TLA_SHIFT (10U) +#define RGX_CR_JONES_IDLE_TLA_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_JONES_IDLE_TLA_EN (0x00000400U) +#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) +#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) +#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT (8U) +#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_IDLE_HOSTIF_EN (0x00000100U) +#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) +#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) +#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) +#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) +#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) +#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) +#define RGX_CR_JONES_IDLE_USCS_SHIFT (4U) +#define RGX_CR_JONES_IDLE_USCS_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_IDLE_USCS_EN (0x00000010U) +#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) +#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) +#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) +#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) +#define RGX_CR_JONES_IDLE_VDM_SHIFT (1U) +#define RGX_CR_JONES_IDLE_VDM_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_IDLE_VDM_EN (0x00000002U) +#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) +#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) + + +/* + Register RGX_CR_TORNADO_PERF +*/ +#define RGX_CR_TORNADO_PERF (0x8228U) +#define RGX_CR_TORNADO_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TORNADO_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TORNADO_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TORNADO_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TORNADO_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_TORNADO_PERF_SELECT0 +*/ +#define RGX_CR_TORNADO_PERF_SELECT0 (0x8230U) +#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TORNADO_PERF_COUNTER_0 +*/ +#define RGX_CR_TORNADO_PERF_COUNTER_0 (0x8268U) +#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TEXAS_PERF +*/ +#define RGX_CR_TEXAS_PERF (0x8290U) +#define RGX_CR_TEXAS_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT (6U) +#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_TEXAS_PERF_CLR_5_EN (0x00000040U) +#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT (5U) +#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_TEXAS_PERF_CLR_4_EN (0x00000020U) +#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TEXAS_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TEXAS_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TEXAS_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TEXAS_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_TEXAS_PERF_SELECT0 +*/ +#define RGX_CR_TEXAS_PERF_SELECT0 (0x8298U) +#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT (31U) +#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_TEXAS_PERF_COUNTER_0 +*/ +#define RGX_CR_TEXAS_PERF_COUNTER_0 (0x82D8U) +#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_JONES_PERF +*/ +#define RGX_CR_JONES_PERF (0x8330U) +#define RGX_CR_JONES_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_JONES_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_JONES_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_JONES_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_JONES_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_JONES_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_JONES_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_JONES_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_JONES_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_JONES_PERF_SELECT0 +*/ +#define RGX_CR_JONES_PERF_SELECT0 (0x8338U) +#define RGX_CR_JONES_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_JONES_PERF_COUNTER_0 +*/ +#define RGX_CR_JONES_PERF_COUNTER_0 (0x8368U) +#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_BLACKPEARL_PERF +*/ +#define RGX_CR_BLACKPEARL_PERF (0x8400U) +#define RGX_CR_BLACKPEARL_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT (6U) +#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN (0x00000040U) +#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT (5U) +#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN (0x00000020U) +#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_BLACKPEARL_PERF_SELECT0 +*/ +#define RGX_CR_BLACKPEARL_PERF_SELECT0 (0x8408U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT (31U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_BLACKPEARL_PERF_COUNTER_0 +*/ +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0 (0x8448U) +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PBE_PERF +*/ +#define RGX_CR_PBE_PERF (0x8478U) +#define RGX_CR_PBE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_PBE_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_PBE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_PBE_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_PBE_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_PBE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_PBE_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_PBE_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_PBE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_PBE_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_PBE_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_PBE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_PBE_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_PBE_PERF_SELECT0 +*/ +#define RGX_CR_PBE_PERF_SELECT0 (0x8480U) +#define RGX_CR_PBE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_PBE_PERF_COUNTER_0 +*/ +#define RGX_CR_PBE_PERF_COUNTER_0 (0x84B0U) +#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OCP_REVINFO +*/ +#define RGX_CR_OCP_REVINFO (0x9000U) +#define RGX_CR_OCP_REVINFO_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) +#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT (33U) +#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFF9FFFFFFFF)) +#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT (32U) +#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_OCP_REVINFO_REVISION_SHIFT (0U) +#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_OCP_SYSCONFIG +*/ +#define RGX_CR_OCP_SYSCONFIG (0x9010U) +#define RGX_CR_OCP_SYSCONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) +#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT (10U) +#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK (0xFFFFF3FFU) +#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT (8U) +#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK (0xFFFFFCFFU) +#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT (6U) +#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT (4U) +#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT (2U) +#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT (0U) +#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_OCP_IRQSTATUS_RAW_0 +*/ +#define RGX_CR_OCP_IRQSTATUS_RAW_0 (0x9020U) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQSTATUS_RAW_1 +*/ +#define RGX_CR_OCP_IRQSTATUS_RAW_1 (0x9028U) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQSTATUS_RAW_2 +*/ +#define RGX_CR_OCP_IRQSTATUS_RAW_2 (0x9030U) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQSTATUS_0 +*/ +#define RGX_CR_OCP_IRQSTATUS_0 (0x9038U) +#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQSTATUS_1 +*/ +#define RGX_CR_OCP_IRQSTATUS_1 (0x9040U) +#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQSTATUS_2 +*/ +#define RGX_CR_OCP_IRQSTATUS_2 (0x9048U) +#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQENABLE_SET_0 +*/ +#define RGX_CR_OCP_IRQENABLE_SET_0 (0x9050U) +#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQENABLE_SET_1 +*/ +#define RGX_CR_OCP_IRQENABLE_SET_1 (0x9058U) +#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQENABLE_SET_2 +*/ +#define RGX_CR_OCP_IRQENABLE_SET_2 (0x9060U) +#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQENABLE_CLR_0 +*/ +#define RGX_CR_OCP_IRQENABLE_CLR_0 (0x9068U) +#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQENABLE_CLR_1 +*/ +#define RGX_CR_OCP_IRQENABLE_CLR_1 (0x9070U) +#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQENABLE_CLR_2 +*/ +#define RGX_CR_OCP_IRQENABLE_CLR_2 (0x9078U) +#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_IRQ_EVENT +*/ +#define RGX_CR_OCP_IRQ_EVENT (0x9080U) +#define RGX_CR_OCP_IRQ_EVENT_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_OCP_DEBUG_CONFIG +*/ +#define RGX_CR_OCP_DEBUG_CONFIG (0x9088U) +#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT (0U) +#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN (0x00000001U) + + +/* + Register RGX_CR_OCP_DEBUG_STATUS +*/ +#define RGX_CR_OCP_DEBUG_STATUS (0x9090U) +#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL (IMG_UINT64_C(0x001F1F77FFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT (51U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFE7FFFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT (50U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT (48U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT (43U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFE7FFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT (42U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT (40U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT (38U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT (34U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT (31U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT (30U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT (29U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT (27U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFE7FFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT (26U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT (24U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT (23U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT (22U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT (21U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT (19U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE7FFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT (18U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT (16U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT (15U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT (14U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT (13U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT (11U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFE7FF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT (10U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT (8U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT (7U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT (6U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT (5U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT (3U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE7)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT (2U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT (0U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT (6U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN (0x00000040U) +#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT (5U) +#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN (0x00000020U) +#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT (4U) +#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN (0x00000010U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT (3U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN (0x00000008U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT (2U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN (0x00000004U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT (1U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN (0x00000002U) +#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT (0U) +#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN (0x00000001U) + + +#define RGX_CR_BIF_TRUST_DM_MASK (0x0000007FU) + + +/* + Register RGX_CR_BIF_TRUST +*/ +#define RGX_CR_BIF_TRUST (0xA000U) +#define RGX_CR_BIF_TRUST_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) +#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U) +#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN (0x00100000U) +#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT (19U) +#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN (0x00080000U) +#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT (18U) +#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN (0x00040000U) +#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT (17U) +#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN (0x00020000U) +#define RGX_CR_BIF_TRUST_ENABLE_SHIFT (16U) +#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_BIF_TRUST_ENABLE_EN (0x00010000U) +#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT (9U) +#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK (0xFFFF01FFU) +#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT (8U) +#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN (0x00000100U) +#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT (7U) +#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN (0x00000080U) +#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT (6U) +#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN (0x00000040U) +#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT (5U) +#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN (0x00000020U) +#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT (4U) +#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN (0x00000010U) +#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT (3U) +#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN (0x00000008U) +#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT (2U) +#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN (0x00000004U) +#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT (1U) +#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN (0x00000002U) +#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT (0U) +#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN (0x00000001U) + + +/* + Register RGX_CR_SYS_BUS_SECURE +*/ +#define RGX_CR_SYS_BUS_SECURE (0xA100U) +#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_FBA_FC0_CHECKSUM +*/ +#define RGX_CR_FBA_FC0_CHECKSUM (0xD170U) +#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FBA_FC1_CHECKSUM +*/ +#define RGX_CR_FBA_FC1_CHECKSUM (0xD178U) +#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FBA_FC2_CHECKSUM +*/ +#define RGX_CR_FBA_FC2_CHECKSUM (0xD180U) +#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FBA_FC3_CHECKSUM +*/ +#define RGX_CR_FBA_FC3_CHECKSUM (0xD188U) +#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_CLK_CTRL2 +*/ +#define RGX_CR_CLK_CTRL2 (0xD200U) +#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000000F33)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT (10U) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL2_VRDM_SHIFT (8U) +#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL2_VRDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_VRDM_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL2_VRDM_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL2_SH_SHIFT (4U) +#define RGX_CR_CLK_CTRL2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL2_SH_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_SH_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL2_SH_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL2_FBA_SHIFT (0U) +#define RGX_CR_CLK_CTRL2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL2_FBA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_FBA_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL2_FBA_AUTO (IMG_UINT64_C(0x0000000000000002)) + + +/* + Register RGX_CR_CLK_STATUS2 +*/ +#define RGX_CR_CLK_STATUS2 (0xD208U) +#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000015)) +#define RGX_CR_CLK_STATUS2_VRDM_SHIFT (4U) +#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS2_VRDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_VRDM_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS2_SH_SHIFT (2U) +#define RGX_CR_CLK_STATUS2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_CLK_STATUS2_SH_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_SH_RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_STATUS2_FBA_SHIFT (0U) +#define RGX_CR_CLK_STATUS2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS2_FBA_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_FBA_RUNNING (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_RPM_SHF_FPL +*/ +#define RGX_CR_RPM_SHF_FPL (0xD520U) +#define RGX_CR_RPM_SHF_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) +#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT (40U) +#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) +#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT (2U) +#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) +#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U) +#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE (4U) + + +/* + Register RGX_CR_RPM_SHF_FPL_READ +*/ +#define RGX_CR_RPM_SHF_FPL_READ (0xD528U) +#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) + + +/* + Register RGX_CR_RPM_SHF_FPL_WRITE +*/ +#define RGX_CR_RPM_SHF_FPL_WRITE (0xD530U) +#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) + + +/* + Register RGX_CR_RPM_SHG_FPL +*/ +#define RGX_CR_RPM_SHG_FPL (0xD538U) +#define RGX_CR_RPM_SHG_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) +#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT (40U) +#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) +#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT (2U) +#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) +#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT (2U) +#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE (4U) + + +/* + Register RGX_CR_RPM_SHG_FPL_READ +*/ +#define RGX_CR_RPM_SHG_FPL_READ (0xD540U) +#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) + + +/* + Register RGX_CR_RPM_SHG_FPL_WRITE +*/ +#define RGX_CR_RPM_SHG_FPL_WRITE (0xD548U) +#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) + + +/* + Register RGX_CR_SH_PERF +*/ +#define RGX_CR_SH_PERF (0xD5F8U) +#define RGX_CR_SH_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_SH_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_SH_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SH_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_SH_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_SH_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SH_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_SH_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_SH_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SH_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_SH_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_SH_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SH_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SH_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_SH_PERF_SELECT0 +*/ +#define RGX_CR_SH_PERF_SELECT0 (0xD600U) +#define RGX_CR_SH_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SH_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_SH_PERF_COUNTER_0 +*/ +#define RGX_CR_SH_PERF_COUNTER_0 (0xD628U) +#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHF_SHG_CHECKSUM +*/ +#define RGX_CR_SHF_SHG_CHECKSUM (0xD1C0U) +#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM +*/ +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHF_VARY_BIF_CHECKSUM +*/ +#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_RPM_BIF_CHECKSUM +*/ +#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U) +#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHG_BIF_CHECKSUM +*/ +#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U) +#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHG_FE_BE_CHECKSUM +*/ +#define RGX_CR_SHG_FE_BE_CHECKSUM (0xD1E8U) +#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register DPX_CR_BF_PERF +*/ +#define DPX_CR_BF_PERF (0xC458U) +#define DPX_CR_BF_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_BF_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_BF_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BF_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_BF_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_BF_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_BF_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_BF_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_BF_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BF_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_BF_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_BF_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BF_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BF_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register DPX_CR_BF_PERF_SELECT0 +*/ +#define DPX_CR_BF_PERF_SELECT0 (0xC460U) +#define DPX_CR_BF_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_BF_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register DPX_CR_BF_PERF_COUNTER_0 +*/ +#define DPX_CR_BF_PERF_COUNTER_0 (0xC488U) +#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register DPX_CR_BT_PERF +*/ +#define DPX_CR_BT_PERF (0xC3D0U) +#define DPX_CR_BT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_BT_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_BT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BT_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_BT_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_BT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_BT_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_BT_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_BT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BT_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_BT_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_BT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BT_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BT_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register DPX_CR_BT_PERF_SELECT0 +*/ +#define DPX_CR_BT_PERF_SELECT0 (0xC3D8U) +#define DPX_CR_BT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_BT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register DPX_CR_BT_PERF_COUNTER_0 +*/ +#define DPX_CR_BT_PERF_COUNTER_0 (0xC420U) +#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register DPX_CR_RQ_USC_DEBUG +*/ +#define DPX_CR_RQ_USC_DEBUG (0xC110U) +#define DPX_CR_RQ_USC_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT (0U) +#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS +*/ +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS (0xC5C8U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT (0U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN (0x00000001U) + + +/* + Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS +*/ +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS (0xC5D0U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x03FFFFFFFFFFFFF0)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT (57U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0200000000000000)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT (44U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFE000FFFFFFFFFFF)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT (40U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT (4U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) + + +/* + Register DPX_CR_BIF_MMU_STATUS +*/ +#define DPX_CR_BIF_MMU_STATUS (0xC5D8U) +#define DPX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) +#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) +#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) +#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) +#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) +#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) +#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) +#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) +#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) +#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) +#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) +#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) +#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) + + +/* + Register DPX_CR_RT_PERF +*/ +#define DPX_CR_RT_PERF (0xC700U) +#define DPX_CR_RT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_RT_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_RT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_RT_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_RT_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_RT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_RT_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_RT_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_RT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_RT_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_RT_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_RT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_RT_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_RT_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register DPX_CR_RT_PERF_SELECT0 +*/ +#define DPX_CR_RT_PERF_SELECT0 (0xC708U) +#define DPX_CR_RT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_RT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register DPX_CR_RT_PERF_COUNTER_0 +*/ +#define DPX_CR_RT_PERF_COUNTER_0 (0xC730U) +#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register DPX_CR_BX_TU_PERF +*/ +#define DPX_CR_BX_TU_PERF (0xC908U) +#define DPX_CR_BX_TU_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BX_TU_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_BX_TU_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BX_TU_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BX_TU_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register DPX_CR_BX_TU_PERF_SELECT0 +*/ +#define DPX_CR_BX_TU_PERF_SELECT0 (0xC910U) +#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register DPX_CR_BX_TU_PERF_COUNTER_0 +*/ +#define DPX_CR_BX_TU_PERF_COUNTER_0 (0xC938U) +#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) + + +/* + Register DPX_CR_RS_PDS_RR_CHECKSUM +*/ +#define DPX_CR_RS_PDS_RR_CHECKSUM (0xC0F0U) +#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT (0U) +#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT +*/ +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING +*/ +#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) +#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MMU_FAULT_STATUS +*/ +#define RGX_CR_MMU_FAULT_STATUS (0xE150U) +#define RGX_CR_MMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT (28U) +#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT (20U) +#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) +#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT (6U) +#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MMU_FAULT_STATUS_META +*/ +#define RGX_CR_MMU_FAULT_STATUS_META (0xE158U) +#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (28U) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (20U) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (6U) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SLC3_CTRL_MISC +*/ +#define RGX_CR_SLC3_CTRL_MISC (0xE200U) +#define RGX_CR_SLC3_CTRL_MISC_MASKFULL (IMG_UINT64_C(0x0000000000000107)) +#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT (8U) +#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN (0x00000100U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (0U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (0xFFFFFFF8U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR (0x00000000U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0x00000001U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0x00000002U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0x00000003U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0x00000004U) + + +/* + Register RGX_CR_SLC3_SCRAMBLE +*/ +#define RGX_CR_SLC3_SCRAMBLE (0xE208U) +#define RGX_CR_SLC3_SCRAMBLE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SLC3_SCRAMBLE2 +*/ +#define RGX_CR_SLC3_SCRAMBLE2 (0xE210U) +#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SLC3_SCRAMBLE3 +*/ +#define RGX_CR_SLC3_SCRAMBLE3 (0xE218U) +#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SLC3_SCRAMBLE4 +*/ +#define RGX_CR_SLC3_SCRAMBLE4 (0xE260U) +#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SLC3_STATUS +*/ +#define RGX_CR_SLC3_STATUS (0xE220U) +#define RGX_CR_SLC3_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT (48U) +#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT (32U) +#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_SLC3_STATUS_READS1_SHIFT (16U) +#define RGX_CR_SLC3_STATUS_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_SLC3_STATUS_READS0_SHIFT (0U) +#define RGX_CR_SLC3_STATUS_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_SLC3_IDLE +*/ +#define RGX_CR_SLC3_IDLE (0xE228U) +#define RGX_CR_SLC3_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT (18U) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK (0xFFF3FFFFU) +#define RGX_CR_SLC3_IDLE_MMU_SHIFT (17U) +#define RGX_CR_SLC3_IDLE_MMU_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_SLC3_IDLE_MMU_EN (0x00020000U) +#define RGX_CR_SLC3_IDLE_RDI_SHIFT (16U) +#define RGX_CR_SLC3_IDLE_RDI_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_SLC3_IDLE_RDI_EN (0x00010000U) +#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT (12U) +#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT (4U) +#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT (2U) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT (1U) +#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN (0x00000002U) +#define RGX_CR_SLC3_IDLE_XBAR_SHIFT (0U) +#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC3_IDLE_XBAR_EN (0x00000001U) + + +/* + Register RGX_CR_SLC3_FAULT_STOP_STATUS +*/ +#define RGX_CR_SLC3_FAULT_STOP_STATUS (0xE248U) +#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) +#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT (0U) +#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFFE000U) + + +/* + Register RGX_CR_VDM_CONTEXT_STORE_MODE +*/ +#define RGX_CR_VDM_CONTEXT_STORE_MODE (0xF048U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX (0x00000000U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE (0x00000001U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST (0x00000002U) + + +/* + Register RGX_CR_CONTEXT_MAPPING0 +*/ +#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) +#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (0x00FFFFFFU) +#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_CONTEXT_MAPPING1 +*/ +#define RGX_CR_CONTEXT_MAPPING1 (0xF080U) +#define RGX_CR_CONTEXT_MAPPING1_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_CONTEXT_MAPPING2 +*/ +#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) +#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_CONTEXT_MAPPING3 +*/ +#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) +#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_BIF_JONES_OUTSTANDING_READ +*/ +#define RGX_CR_BIF_JONES_OUTSTANDING_READ (0xF098U) +#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ +*/ +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ (0xF0A0U) +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIF_DUST_OUTSTANDING_READ +*/ +#define RGX_CR_BIF_DUST_OUTSTANDING_READ (0xF0A8U) +#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_CONTEXT_MAPPING4 +*/ +#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) +#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MULTICORE_GPU +*/ +#define RGX_CR_MULTICORE_GPU (0xF300U) +#define RGX_CR_MULTICORE_GPU_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT (6U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN (0x00000040U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT (5U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN (0x00000020U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT (4U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN (0x00000010U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT (3U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN (0x00000008U) +#define RGX_CR_MULTICORE_GPU_ID_SHIFT (0U) +#define RGX_CR_MULTICORE_GPU_ID_CLRMSK (0xFFFFFFF8U) + + +/* + Register RGX_CR_MULTICORE_SYSTEM +*/ +#define RGX_CR_MULTICORE_SYSTEM (0xF308U) +#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) + + +#endif /* RGX_CR_DEFS_KM_H */ + +/***************************************************************************** + End of file (rgx_cr_defs_km.h) +*****************************************************************************/ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxdefs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxdefs_km.h new file mode 100644 index 000000000000..c2b7bb7c6303 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxdefs_km.h @@ -0,0 +1,311 @@ +/*************************************************************************/ /*! +@Title Rogue hw definitions (kernel mode) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXDEFS_KM_H +#define RGXDEFS_KM_H + +#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) +#include RGX_BVNC_CORE_KM_HEADER +#include RGX_BNC_CONFIG_KM_HEADER +#endif + +#define IMG_EXPLICIT_INCLUDE_HWDEFS +#if defined(__KERNEL__) +#include "rgx_cr_defs_km.h" +#include "tmp_rgx_cr_defs_riscv_km.h" +#endif +#undef IMG_EXPLICIT_INCLUDE_HWDEFS + +#include "rgx_heap_firmware.h" + +/* The following Macros are picked up through BVNC headers for no hardware + * operations to be compatible with old build infrastructure. + */ +#if defined(NO_HARDWARE) +/****************************************************************************** + * Check for valid B.X.N.C + *****************************************************************************/ +#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C) +#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)" +#endif + +/* Check core/config compatibility */ +#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C) +#error "BVNC headers are mismatching (KM core/config)" +#endif +#endif + +/****************************************************************************** + * RGX Version name + *****************************************************************************/ +#define RGX_BVNC_KM_ST2(S) #S +#define RGX_BVNC_KM_ST(S) RGX_BVNC_KM_ST2(S) +#define RGX_BVNC_KM RGX_BVNC_KM_ST(RGX_BVNC_KM_B) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_V) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_N) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_C) +#define RGX_BVNC_KM_V_ST RGX_BVNC_KM_ST(RGX_BVNC_KM_V) + +/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */ +#define RGX_BVNC_STR_SIZE_MAX (2+1+4+1+4+1+4+1) +#define RGX_BVNC_STR_FMTSPEC "%u.%u.%u.%u" +#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u" + + +/****************************************************************************** + * RGX Defines + *****************************************************************************/ + +#define BVNC_FIELD_MASK ((1 << BVNC_FIELD_WIDTH) - 1) +#define C_POSITION (0) +#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH)) +#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH)) +#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH)) + +#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION))) +#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION))) +#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION))) +#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION))) + +#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION)) +#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION)) +#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION)) +#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION)) + +#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)(B))) << (B_POSITION) | \ + (((IMG_UINT64)(V))) << (V_POSITION) | \ + (((IMG_UINT64)(N))) << (N_POSITION) | \ + (((IMG_UINT64)(C))) << (C_POSITION) \ + ) + +#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U) +#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U) + +#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU) +#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U) + +#define RGXFW_MAX_NUM_OS (8U) +#define RGXFW_HOST_OS (0U) +#define RGXFW_GUEST_OSID_START (1U) + +#define RGXFW_THREAD_0 (0U) +#define RGXFW_THREAD_1 (1U) + +/* META cores (required for the RGX_FEATURE_META) */ +#define MTP218 (1) +#define MTP219 (2) +#define LTP218 (3) +#define LTP217 (4) + +/* META Core memory feature depending on META variants */ +#define RGX_META_COREMEM_32K (32*1024) +#define RGX_META_COREMEM_48K (48*1024) +#define RGX_META_COREMEM_64K (64*1024) +#define RGX_META_COREMEM_96K (96*1024) +#define RGX_META_COREMEM_128K (128*1024) +#define RGX_META_COREMEM_256K (256*1024) + +#if !defined(__KERNEL__) +#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && \ + (defined(RGX_FEATURE_META_COREMEM_SIZE) && RGX_FEATURE_META_COREMEM_SIZE != 0) +#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U) +#define RGX_META_COREMEM (1) +#define RGX_META_COREMEM_CODE (1) +#if !defined(FIX_HW_BRN_50767) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) +#define RGX_META_COREMEM_DATA (1) +#endif +#else +#undef SUPPORT_META_COREMEM +#undef RGX_FEATURE_META_COREMEM_SIZE +#undef RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_COREMEM_SIZE (0) +#define RGX_META_COREMEM_SIZE (0) +#endif +#endif + +#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((IMG_INT32)(x)) > 0) ? ((x)/8) : (0)) + + +#define MAX_HW_TA3DCONTEXTS 2U + +#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN (RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_H_CORE_EN) + +/* SOFT_RESET Rascal and DUSTs bits */ +#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN (RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \ + RGX_CR_SOFT_RESET_DUST_n_CORE_EN) + + + + +/* SOFT_RESET steps as defined in the TRM */ +#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN) + +#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \ + RGX_CR_SOFT_RESET_VDM_EN | \ + RGX_CR_SOFT_RESET_ISP_EN) + +#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES | \ + RGX_CR_SOFT_RESET_BIF_EN | \ + RGX_CR_SOFT_RESET_SLC_EN | \ + RGX_CR_SOFT_RESET_GARTEN_EN) + +#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \ + RGX_CR_SOFT_RESET2_PIXEL_EN | \ + RGX_CR_SOFT_RESET2_CDM_EN | \ + RGX_CR_SOFT_RESET2_VERTEX_EN) + + + +#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) +#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1U << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) + +#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U) +#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1U << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT) + +#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16U) + +/* To get the number of required Dusts, divide the number of + * clusters by 2 and round up + */ +#define RGX_REQ_NUM_DUSTS(CLUSTERS) (((CLUSTERS) + 1U) / 2U) + +/* To get the number of required Bernado/Phantom(s), divide + * the number of clusters by 4 and round up + */ +#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) +#define RGX_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) +#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) + +#if !defined(__KERNEL__) +# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS)) +#endif + + +/* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT is not defined for format 1 cores (so define it now). */ +#if !defined(RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT) +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1) +#endif + +/* META second thread feature depending on META variants and + * available CoreMem + */ +#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256) +#define RGXFW_META_SUPPORT_2ND_THREAD +#endif + + +/* + * FW MMU contexts + */ +#if defined(SUPPORT_TRUSTED_DEVICE) +#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) /* FW code/private data */ +#define MMU_CONTEXT_MAPPING_FWIF (0x7) /* Host/FW data */ +#else +#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) +#define MMU_CONTEXT_MAPPING_FWIF (0x0) +#endif + + +/****************************************************************************** + * WA HWBRNs + *****************************************************************************/ + +#if defined(RGX_CR_JONES_IDLE_MASKFULL) +/* Workaround for HW BRN 57289 */ +#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF) +#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!! +#endif +#undef RGX_CR_JONES_IDLE_MASKFULL +#undef RGX_CR_JONES_IDLE_TDM_SHIFT +#undef RGX_CR_JONES_IDLE_TDM_CLRMSK +#undef RGX_CR_JONES_IDLE_TDM_EN +#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#endif + +#if !defined(__KERNEL__) +#if !defined(RGX_FEATURE_SLC_SIZE_IN_BYTES) +#if defined(RGX_FEATURE_SLC_SIZE_IN_KILOBYTES) +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (RGX_FEATURE_SLC_SIZE_IN_KILOBYTES * 1024) +#else +#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0) +#endif +#endif +#endif + +#if !defined(__KERNEL__) + +#if defined(RGX_FEATURE_ROGUEXE) +#define RGX_NUM_RASTERISATION_MODULES RGX_FEATURE_NUM_CLUSTERS +#else +#define RGX_NUM_RASTERISATION_MODULES RGX_NUM_PHANTOMS +#endif + +#endif /* defined(__KERNEL__) */ + +/* GPU CR timer tick in GPU cycles */ +#define RGX_CRTIME_TICK_IN_CYCLES (256U) + +/* + If the size of the SLC is less than this value then the TPU bypasses the SLC. + */ +#define RGX_TPU_CACHED_SLC_SIZE_THRESHOLD (128*1024) + +/* + * If the size of the SLC is bigger than this value then the TCU must not be bypassed in the SLC. + * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default. + */ +#define RGX_TCU_CACHED_SLC_SIZE_THRESHOLD (32*1024) + +/* + * Register used by the FW to track the current boot stage (not used in MIPS) + */ +#define RGX_FW_BOOT_STAGE_REGISTER (RGX_CR_POWER_ESTIMATE_RESULT) + +/* + * Virtualisation definitions + */ +#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE) + +#endif /* RGXDEFS_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmhdefs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmhdefs_km.h new file mode 100644 index 000000000000..4b9e225d813d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmhdefs_km.h @@ -0,0 +1,336 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgxmhdefs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + * rogue_mh.def + */ + + +#ifndef RGXMHDEFS_KM_H +#define RGXMHDEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + + +#define RGXMHDEFS_KM_REVISION 0 + +/* +Encoding of MH_TAG_SB for TDM CTL +*/ +#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE (0x00000000U) +#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U) +#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE (0x00000002U) + + +/* +Encoding of MH_TAG_SB for TDM DMA +*/ +#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U) +#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U) +#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U) + + +/* +Encoding of MH_TAG_SB for PMD +*/ +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK (0x00000008U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST (0x00000009U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK (0x0000000aU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST (0x0000000bU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0 (0x0000000cU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1 (0x0000002dU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK (0x0000000fU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK (0x00000012U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK (0x00000013U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK (0x00000016U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK (0x00000017U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP (0x00000019U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP (0x0000001aU) + + +/* +Encoding of MH_TAG_SB for PMA +*/ +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK (0x00000000U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST (0x00000001U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK (0x00000002U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST (0x00000003U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0 (0x00000004U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1 (0x00000025U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP (0x00000006U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK (0x00000007U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK (0x00000008U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK (0x00000009U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK (0x00000014U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK (0x00000015U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP (0x00000018U) + + +/* +Encoding of MH_TAG_SB for TA +*/ +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP (0x00000008U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC (0x00000007U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC (0x00000006U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC (0x00000005U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR (0x00000004U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS (0x00000003U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC (0x00000002U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE (0x00000001U) + + +/* +Encoding of MH_TAG_SB for IPF when there are 2 IPF pipes +*/ +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U) + + +/* +Encoding of MH_TAG_SB for IPF when there are 4 IPF pipes +*/ +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU) + + +/* +Encoding of MH_TAG_SB for IPF when there are 7 IPF pipes +*/ +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U) + + +/* +Encoding of MH_TAG_SB for IPF when there are 14 IPF pipes +*/ +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU) + + +/* +Encoding of MH_TAG_SB for TPF +*/ +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE (0x00000000U) +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS (0x00000001U) +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U) +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA (0x00000003U) + + +/* +Encoding of MH_TAG_SB for ISP +*/ +#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS (0x00000000U) +#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS (0x00000001U) + + +/* +Encoding of MH_TAG_SB for VDM +*/ +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL (0x00000000U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE (0x00000001U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX (0x00000002U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK (0x00000004U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT (0x00000008U) + + +/* +Encoding of MH_TAG_SB for CDM +*/ +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U) +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA (0x00000001U) +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA (0x00000002U) +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE (0x00000003U) + + +/* +Encoding of MH_TAG_SB for MIPS +*/ +#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U) +#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS (0x00000003U) + + +/* +Encoding of MH_TAG_SB for MMU +*/ +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST (0x00000000U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST (0x00000001U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST (0x00000002U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST (0x00000003U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST (0x00000004U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST (0x00000005U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U) + + +/* +Encoding of MH TAG +*/ +#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT (0x00000000U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD (0x00000001U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC (0x00000002U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM (0x00000003U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS (0x00000004U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0 (0x00000005U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1 (0x00000006U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2 (0x00000007U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3 (0x00000008U) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0 (0x00000009U) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1 (0x0000000aU) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2 (0x0000000bU) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3 (0x0000000cU) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4 (0x0000000dU) +#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0 (0x0000000eU) +#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1 (0x0000000fU) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA (0x00000010U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB (0x00000011U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC (0x00000012U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD (0x00000013U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA (0x00000014U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB (0x00000015U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC (0x00000016U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD (0x00000017U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW (0x00000018U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0 (0x00000019U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1 (0x0000001aU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0 (0x0000001bU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1 (0x0000001cU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2 (0x0000001dU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3 (0x0000001eU) +#define RGX_MH_TAG_ENCODING_MH_TAG_USC (0x0000001fU) +#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS (0x00000020U) +#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS (0x00000021U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TPF (0x00000022U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS (0x00000023U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF (0x00000024U) +#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ (0x00000025U) +#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS (0x00000026U) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5 (0x00000027U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP (0x00000028U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC (0x00000029U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC (0x0000002aU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC (0x0000002bU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION (0x0000002cU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM (0x0000002dU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW (0x0000002eU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC (0x0000002fU) +#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC (0x00000030U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC (0x00000031U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA (0x00000032U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL (0x00000033U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0 (0x00000034U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1 (0x00000035U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE2 (0x00000036U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE3 (0x00000037U) + + +#endif /* RGXMHDEFS_KM_H */ + +/***************************************************************************** + End of file (rgxmhdefs_km.h) +*****************************************************************************/ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmmudefs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmmudefs_km.h new file mode 100644 index 000000000000..1b042673e95e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/rgxmmudefs_km.h @@ -0,0 +1,350 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgxmmudefs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + * rogue_bif.def + */ + + +#ifndef RGXMMUDEFS_KM_H +#define RGXMMUDEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + + +#define RGXMMUDEFS_KM_REVISION 0 + +/* +Encoding of DM (note value 0x6 not used) +*/ +#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) +#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) +#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) +#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) +#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) +#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) +#define RGX_BIF_DM_ENCODING_META (0x00000007U) +#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) +#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) + + +/* +Labelling of fields within virtual address +*/ +/* +Page Catalogue entry # +*/ +#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) +#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) +/* +Page Directory entry # +*/ +#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) +#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) +/* +Page Table entry # +*/ +#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) + + +/* +Number of entries in a PC +*/ +#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) + + +/* +Number of entries in a PD +*/ +#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) + + +/* +Number of entries in a PT +*/ +#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) + + +/* +Size in bits of the PC entries in memory +*/ +#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) + + +/* +Size in bits of the PD entries in memory +*/ +#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) + + +/* +Size in bits of the PT entries in memory +*/ +#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) + + +/* +Encoding of page size field +*/ +#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) +#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) +#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) +#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) +#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) +#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) +#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) + + +/* +Range of bits used for 4KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* +Range of bits used for 16KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) +#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) + + +/* +Range of bits used for 64KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) +#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) + + +/* +Range of bits used for 256KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) +#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) + + +/* +Range of bits used for 1MB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) +#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) + + +/* +Range of bits used for 2MB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) +#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) + + +/* +Range of bits used for PT Base Address for 4KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* +Range of bits used for PT Base Address for 16KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) + + +/* +Range of bits used for PT Base Address for 64KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) + + +/* +Range of bits used for PT Base Address for 256KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) + + +/* +Range of bits used for PT Base Address for 1MB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) + + +/* +Range of bits used for PT Base Address for 2MB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) + + +/* +Format of Page Table data +*/ +/* +PM/Meta protect bit +*/ +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) +/* +Upper part of vp page field +*/ +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) +/* +Physical page address +*/ +#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +/* +Lower part of vp page field +*/ +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +/* +Entry pending +*/ +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) +/* +PM Src +*/ +#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) +/* +SLC Bypass Ctrl +*/ +#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U) +#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (IMG_UINT64_C(0x0000000000000008)) +/* +Cache Coherency bit +*/ +#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) +#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) +/* +Read only +*/ +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) +/* +Entry valid +*/ +#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* +Format of Page Directory data +*/ +/* +Entry pending +*/ +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) +/* +Page Table base address +*/ +#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +/* +Page Size +*/ +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) +/* +Entry valid +*/ +#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* +Format of Page Catalogue data +*/ +/* +Page Catalogue base address +*/ +#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) +/* +Entry pending +*/ +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) +/* +Entry valid +*/ +#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) +#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) + + +#endif /* RGXMMUDEFS_KM_H */ + +/***************************************************************************** + End of file (rgxmmudefs_km.h) +*****************************************************************************/ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/tmp_rgx_cr_defs_riscv_km.h b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/tmp_rgx_cr_defs_riscv_km.h new file mode 100644 index 000000000000..a2db4a0312cd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/rogue/km/tmp_rgx_cr_defs_riscv_km.h @@ -0,0 +1,208 @@ +/*************************************************************************/ /*! +@Title Hardware definition file tmp_rgx_cr_defs_riscv_km.h +@Brief The file contains TEMPORARY hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef TMP_RGX_CR_DEFS_RISCV_KM_H +#define TMP_RGX_CR_DEFS_RISCV_KM_H + +#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS) +#error This file may only be included if explicitly defined +#endif + +#include "img_types.h" +#include "img_defs.h" + + +/* + Register TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG +*/ +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG (0x3000U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP1_CONFIG (0x3008U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP1_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP2_CONFIG (0x3010U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP2_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP3_CONFIG (0x3018U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP3_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP4_CONFIG (0x3020U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP4_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP5_CONFIG (0x3028U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP5_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP6_CONFIG (0x3030U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP6_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP7_CONFIG (0x3038U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP7_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP8_CONFIG (0x3040U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP8_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP9_CONFIG (0x3048U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP9_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP10_CONFIG (0x3050U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP10_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP11_CONFIG (0x3058U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP11_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP12_CONFIG (0x3060U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP12_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP13_CONFIG (0x3068U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP13_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP14_CONFIG (0x3070U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP14_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP15_CONFIG (0x3078U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP15_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_TRUSTED_SHIFT (62) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFE)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_SHIFT (61) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_EN (IMG_UINT64_C(0x2000000000000000)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_SHIFT (60) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_EN (IMG_UINT64_C(0x1000000000000000)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_SHIFT (44) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_ALIGN (12U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_SHIFT (40) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_DEVVADDR_SHIFT (12) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* + Register TMP_RGX_CR_FWCORE_BOOT + */ +#define TMP_RGX_CR_FWCORE_BOOT (0x3090U) +#define TMP_RGX_CR_FWCORE_BOOT_BOOT_SHIFT (0) +#define TMP_RGX_CR_FWCORE_BOOT_BOOT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define TMP_RGX_CR_FWCORE_BOOT_BOOT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register TMP_RGX_CR_FWCORE_RESET_ADDR + */ +#define TMP_RGX_CR_FWCORE_RESET_ADDR (0x3098U) +#define TMP_RGX_CR_FWCORE_RESET_ADDR_ADDR_SHIFT (1) +#define TMP_RGX_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL +*/ +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL (0x30C0U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT (3U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK (0xFFFFFFF7U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN (0x00000008U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT (2U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_EN (0x00000004U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT (1U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_EN (0x00000002U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT (0U) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) +#define TMP_RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_EN (0x00000001U) + + +/* + Register TMP_RGX_CR_FWCORE_WRAPPER_FENCE + */ +#define TMP_RGX_CR_FWCORE_WRAPPER_FENCE (0x30E8U) +#define TMP_RGX_CR_FWCORE_WRAPPER_FENCE_FENCE_SHIFT (0) +#define TMP_RGX_CR_FWCORE_WRAPPER_FENCE_FENCE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register TMP_RGX_CR_FWCORE_MEM_CAT_BASE0 + */ +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE0 (0x30F0U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE1 (0x30F8U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE1_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE2 (0x3100U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE2_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE3 (0x3108U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE3_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE4 (0x3110U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE4_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE5 (0x3118U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE5_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE6 (0x3120U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE6_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE7 (0x3128U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE7_MASKFULL (IMG_UINT64_C(0xFFFFFF0000000FFF) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT (12U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT (12U) +#define TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE (4096U) + + +/* + Register TMP_RGX_CR_MTIME_SET +*/ +#define TMP_RGX_CR_MTIME_SET (0x7000U) +#define TMP_RGX_CR_MTIME_SET_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) + +/* + Register TMP_RGX_CR_MTIME_CMP +*/ +#define TMP_RGX_CR_MTIME_CMP (0x7008U) +#define TMP_RGX_CR_MTIME_CMP_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) + +/* + Register TMP_RGX_CR_MTIME_CTRL +*/ +#define TMP_RGX_CR_MTIME_CTRL (0x7018U) +#define TMP_RGX_CR_MTIME_CTRL_MASKFULL (IMG_UINT64_C(0x0000000080000003)) +#define TMP_RGX_CR_MTIME_CTRL_SOFT_RESET_SHIFT (31) +#define TMP_RGX_CR_MTIME_CTRL_SOFT_RESET_CLRMSK (IMG_UINT64_C(0x0000000000000003)) +#define TMP_RGX_CR_MTIME_CTRL_SOFT_RESET_EN (IMG_UINT64_C(0x0000000080000000)) +#define TMP_RGX_CR_MTIME_CTRL_PAUSE_SHIFT (1) +#define TMP_RGX_CR_MTIME_CTRL_PAUSE_CLRMSK (IMG_UINT64_C(0x0000000080000001)) +#define TMP_RGX_CR_MTIME_CTRL_PAUSE_EN (IMG_UINT64_C(0x0000000000000002)) +#define TMP_RGX_CR_MTIME_CTRL_ENABLE_SHIFT (0) +#define TMP_RGX_CR_MTIME_CTRL_ENABLE_CLRMSK (IMG_UINT64_C(0x0000000080000002)) +#define TMP_RGX_CR_MTIME_CTRL_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + +#endif /* TMP_RGX_CR_DEFS_RISCV_KM_H */ + +/***************************************************************************** + End of file (tmp_rgx_cr_defs_riscv_km.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h new file mode 100644 index 000000000000..e7e5a39f854a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h @@ -0,0 +1,98 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 27.V.254.2 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_27_V_254_2_H +#define RGXCONFIG_KM_27_V_254_2_H + +/***** Automatically generated file. Do not edit manually ********************/ +/***** 27.V.254.2 ************************************************************/ + +#define RGX_BNC_KM_B 27 +#define RGX_BNC_KM_N 254 +#define RGX_BNC_KM_C 2 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (3U) +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FBCDC (4U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_MAX_TPU_PER_SPU (1U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_MMU_VERSION (3U) +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (1U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (1U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (2U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (1U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U) +#define RGX_FEATURE_SCALABLE_TE_ARCH (1U) +#define RGX_FEATURE_SCALABLE_VCE (1U) +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + + +#endif /* RGXCONFIG_KM_27_V_254_2_H */ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.1632.1.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.1632.1.h new file mode 100644 index 000000000000..8bb0b6aa9446 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.1632.1.h @@ -0,0 +1,100 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 30.V.1632.1 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_30_V_1632_1_H +#define RGXCONFIG_KM_30_V_1632_1_H + +/***** Automatically generated file. Do not edit manually ********************/ +/***** 30.V.1632.1 ************************************************************/ + +#define RGX_BNC_KM_B 30 +#define RGX_BNC_KM_N 1632 +#define RGX_BNC_KM_C 1 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FBCDC (4U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (4U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (16U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (2U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (2U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (1U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U) +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (2U) +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (512U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + + +#endif /* RGXCONFIG_KM_30_V_1632_1_H */ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h new file mode 100644 index 000000000000..3f2d2c09cd66 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h @@ -0,0 +1,100 @@ +/*************************************************************************/ /*! +@Title RGX Config BVNC 30.V.408.101 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_30_V_408_101_H +#define RGXCONFIG_KM_30_V_408_101_H + +/***** Automatically generated file. Do not edit manually ********************/ +/***** 30.V.408.101 ************************************************************/ + +#define RGX_BNC_KM_B 30 +#define RGX_BNC_KM_N 408 +#define RGX_BNC_KM_C 101 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FBCDC (4U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_MAX_TPU_PER_SPU (1U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (1U) +#define RGX_FEATURE_NUM_MEMBUS (1U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (1U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (1U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U) +#define RGX_FEATURE_SCALABLE_TE_ARCH (1U) +#define RGX_FEATURE_SCALABLE_VCE (1U) +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + + +#endif /* RGXCONFIG_KM_30_V_408_101_H */ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_27.5.254.2.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_27.5.254.2.h new file mode 100644 index 000000000000..03f6464d4ee9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_27.5.254.2.h @@ -0,0 +1,69 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 27.5.254.2 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_27_5_254_2_H +#define RGXCORE_KM_27_5_254_2_H + +/* Automatically generated file (11/02/2020 09:01:22): Do not edit manually */ +/* CS: @5262350 */ + +/****************************************************************************** + * BVNC = 27.5.254.2 + *****************************************************************************/ +#define RGX_BVNC_KM_B 27 +#define RGX_BVNC_KM_V 5 +#define RGX_BVNC_KM_N 254 +#define RGX_BVNC_KM_C 2 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_27_5_254_2_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h new file mode 100644 index 000000000000..0b9c9bd62b55 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 30.3.408.101 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_30_3_408_101_H +#define RGXCORE_KM_30_3_408_101_H + +/* Automatically generated file (11/02/2020 09:01:23): Do not edit manually */ +/* CS: @5621518 */ + +/****************************************************************************** + * BVNC = 30.3.408.101 + *****************************************************************************/ +#define RGX_BVNC_KM_B 30 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 408 +#define RGX_BVNC_KM_C 101 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_68777 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_30_3_408_101_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.4.1632.1.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.4.1632.1.h new file mode 100644 index 000000000000..337562a898d8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.4.1632.1.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 30.4.1632.1 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_30_4_1632_1_H +#define RGXCORE_KM_30_4_1632_1_H + +/* Automatically generated file (11/02/2020 09:01:23): Do not edit manually */ +/* CS: @5541679 */ + +/****************************************************************************** + * BVNC = 30.4.1632.1 + *****************************************************************************/ +#define RGX_BVNC_KM_B 30 +#define RGX_BVNC_KM_V 4 +#define RGX_BVNC_KM_N 1632 +#define RGX_BVNC_KM_C 1 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_68777 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_30_4_1632_1_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.5.1632.1.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.5.1632.1.h new file mode 100644 index 000000000000..1cc71e985804 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/cores/rgxcore_km_30.5.1632.1.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 30.5.1632.1 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_30_5_1632_1_H +#define RGXCORE_KM_30_5_1632_1_H + +/* Automatically generated file (11/02/2020 09:01:23): Do not edit manually */ +/* CS: @5621518 */ + +/****************************************************************************** + * BVNC = 30.5.1632.1 + *****************************************************************************/ +#define RGX_BVNC_KM_B 30 +#define RGX_BVNC_KM_V 5 +#define RGX_BVNC_KM_N 1632 +#define RGX_BVNC_KM_C 1 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_68777 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_30_5_1632_1_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_defs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_defs_km.h new file mode 100644 index 000000000000..70ebcd3eea50 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_defs_km.h @@ -0,0 +1,192 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_bvnc_defs_km.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/****************************************************************************** + * Auto generated file by rgxbvnc_tablegen.py * + * This file should not be edited manually * + *****************************************************************************/ + +#ifndef RGX_BVNC_DEFS_KM_H +#define RGX_BVNC_DEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + +#if defined(RGX_BVNC_DEFS_UM_H) +#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h" +#endif + +#define BVNC_FIELD_WIDTH (16U) + + +/****************************************************************************** + * Mask and bit-position macros for features without values + *****************************************************************************/ + +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_POS (0U) +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) + +#define RGX_FEATURE_AXI_ACE_POS (1U) +#define RGX_FEATURE_AXI_ACE_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) + +#define RGX_FEATURE_FW_DUAL_LOCKSTEP_POS (2U) +#define RGX_FEATURE_FW_DUAL_LOCKSTEP_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) + +#define RGX_FEATURE_GPU_CPU_COHERENCY_POS (3U) +#define RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) + +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (4U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) + +#define RGX_FEATURE_GPU_VIRTUALISATION_POS (5U) +#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) + +#define RGX_FEATURE_HYPERVISOR_MMU_POS (6U) +#define RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) + +#define RGX_FEATURE_META_DMA_POS (7U) +#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) + +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_POS (8U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) + +#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (9U) +#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) + +#define RGX_FEATURE_PERFBUS_POS (10U) +#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) + +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_POS (11U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) + +#define RGX_FEATURE_PM_MMUSTACK_POS (12U) +#define RGX_FEATURE_PM_MMUSTACK_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) + +#define RGX_FEATURE_PM_MMU_VFP_POS (13U) +#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) + +#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (14U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) + +#define RGX_FEATURE_SIGNAL_SNOOPING_POS (15U) +#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) + +#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_POS (16U) +#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) + +#define RGX_FEATURE_SLC_VIVT_POS (17U) +#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) + +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (18U) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) + +#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (19U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) + +#define RGX_FEATURE_TESSELLATION_POS (20U) +#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) + +#define RGX_FEATURE_ZLS_CHECKSUM_POS (21U) +#define RGX_FEATURE_ZLS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) + + +#if 0 +/* Defines for RGX_FEATURE_FASTRENDER_DM_* needed to allow building of kbuild volcanic target + * without needing to specify RGX_BVNC. + */ +#define RGX_FEATURE_FASTRENDER_DM_POS (0U) +#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0)) +#endif + +/****************************************************************************** + * Features with values indexes + *****************************************************************************/ + +typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { + RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_IDX, + RGX_FEATURE_ECC_RAMS_IDX, + RGX_FEATURE_FBCDC_IDX, + RGX_FEATURE_FBCDC_ARCHITECTURE_IDX, + RGX_FEATURE_MAX_TPU_PER_SPU_IDX, + RGX_FEATURE_META_IDX, + RGX_FEATURE_META_COREMEM_BANKS_IDX, + RGX_FEATURE_META_COREMEM_SIZE_IDX, + RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX, + RGX_FEATURE_MMU_VERSION_IDX, + RGX_FEATURE_NUM_CLUSTERS_IDX, + RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX, + RGX_FEATURE_NUM_ISP_PER_SPU_IDX, + RGX_FEATURE_NUM_MEMBUS_IDX, + RGX_FEATURE_NUM_OSIDS_IDX, + RGX_FEATURE_NUM_SPU_IDX, + RGX_FEATURE_PBE_PER_SPU_IDX, + RGX_FEATURE_PHYS_BUS_WIDTH_IDX, + RGX_FEATURE_POWER_ISLAND_VERSION_IDX, + RGX_FEATURE_RENDER_TARGET_XY_MAX_IDX, + RGX_FEATURE_SCALABLE_TE_ARCH_IDX, + RGX_FEATURE_SCALABLE_VCE_IDX, + RGX_FEATURE_SLC_BANKS_IDX, + RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX, + RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX, + RGX_FEATURE_TILE_SIZE_X_IDX, + RGX_FEATURE_TILE_SIZE_Y_IDX, + RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX, + RGX_FEATURE_WITH_VALUES_MAX_IDX, +} RGX_FEATURE_WITH_VALUE_INDEX; + + +/****************************************************************************** + * Mask and bit-position macros for ERNs and BRNs + *****************************************************************************/ + +#define HW_ERN_66574_POS (0U) +#define HW_ERN_66574_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) + +#define FIX_HW_BRN_68777_POS (1U) +#define FIX_HW_BRN_68777_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) + +/* Macro used for padding the unavailable values for features with values */ +#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU) + +/* Macro used for marking a feature with value as disabled for a specific bvnc */ +#define RGX_FEATURE_VALUE_DISABLED (0xFFFFFFFFU) + +#endif /* RGX_BVNC_DEFS_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_table_km.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_table_km.h new file mode 100644 index 000000000000..3c7557e72b42 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_bvnc_table_km.h @@ -0,0 +1,365 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_bvnc_table_km.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/****************************************************************************** + * Auto generated file by rgxbvnc_tablegen.py * + * This file should not be edited manually * + *****************************************************************************/ + +#ifndef RGX_BVNC_TABLE_KM_H +#define RGX_BVNC_TABLE_KM_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgxdefs_km.h" + +#ifndef _RGXBVNC_C_ +#error "This file should only be included from rgxbvnc.c" +#endif + +#if defined(RGX_BVNC_TABLE_UM_H) +#error "This file should not be included in conjunction with rgx_bvnc_table_um.h" +#endif + + +/****************************************************************************** + * Defines and arrays for each feature with values used + * for handling the corresponding values + *****************************************************************************/ + +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values[RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 2, }; + +#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 5, 6, }; + +#define RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values[RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, }; + +#define RGX_FEATURE_META_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, MTP219, }; + +#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, }; + +#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 96, }; + +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, }; + +#define RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_MMU_VERSION_values[RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (7) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 8, }; + +#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (8) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 3, 4, 8, 12, 16, 24, }; + +#define RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values[RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, }; + +#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; + +#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, }; + +#define RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_SPU_values[RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; + +#define RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_PBE_PER_SPU_values[RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; + +#define RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values[RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values[RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16384, 32768, }; + +#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; + +#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 8, }; + +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1024, }; + +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (7) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 64, 128, 256, 512, 1024, 2048, }; + +#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, }; + +#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_Y_values[RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, }; + +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; + + +/****************************************************************************** + * Table contains pointers to each feature value array for features that have + * values. + * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h + *****************************************************************************/ + +static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { + aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values, + aui16_RGX_FEATURE_ECC_RAMS_values, + aui16_RGX_FEATURE_FBCDC_values, + aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values, + aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values, + aui16_RGX_FEATURE_META_values, + aui16_RGX_FEATURE_META_COREMEM_BANKS_values, + aui16_RGX_FEATURE_META_COREMEM_SIZE_values, + aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values, + aui16_RGX_FEATURE_MMU_VERSION_values, + aui16_RGX_FEATURE_NUM_CLUSTERS_values, + aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values, + aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values, + aui16_RGX_FEATURE_NUM_MEMBUS_values, + aui16_RGX_FEATURE_NUM_OSIDS_values, + aui16_RGX_FEATURE_NUM_SPU_values, + aui16_RGX_FEATURE_PBE_PER_SPU_values, + aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values, + aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values, + aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values, + aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values, + aui16_RGX_FEATURE_SCALABLE_VCE_values, + aui16_RGX_FEATURE_SLC_BANKS_values, + aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values, + aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values, + aui16_RGX_FEATURE_TILE_SIZE_X_values, + aui16_RGX_FEATURE_TILE_SIZE_Y_values, + aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values, +}; + + +/****************************************************************************** + * Array containing the lengths of the arrays containing the values. + * Used for indexing the aui16__values defined upwards + *****************************************************************************/ + + +static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { + RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX, + RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX, + RGX_FEATURE_FBCDC_MAX_VALUE_IDX, + RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX, + RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX, + RGX_FEATURE_META_MAX_VALUE_IDX, + RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX, + RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX, + RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX, + RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX, + RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX, + RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX, + RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX, + RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX, + RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX, + RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX, + RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX, + RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX, + RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX, + RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX, + RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX, + RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX, + RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX, + RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX, + RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX, +}; + + +/****************************************************************************** + * Bit-positions for features with values + *****************************************************************************/ + +static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { + (0U), /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_POS */ + (2U), /* RGX_FEATURE_ECC_RAMS_POS */ + (4U), /* RGX_FEATURE_FBCDC_POS */ + (6U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ + (8U), /* RGX_FEATURE_MAX_TPU_PER_SPU_POS */ + (11U), /* RGX_FEATURE_META_POS */ + (13U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */ + (15U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */ + (17U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ + (19U), /* RGX_FEATURE_MMU_VERSION_POS */ + (21U), /* RGX_FEATURE_NUM_CLUSTERS_POS */ + (24U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ + (28U), /* RGX_FEATURE_NUM_ISP_PER_SPU_POS */ + (31U), /* RGX_FEATURE_NUM_MEMBUS_POS */ + (34U), /* RGX_FEATURE_NUM_OSIDS_POS */ + (36U), /* RGX_FEATURE_NUM_SPU_POS */ + (39U), /* RGX_FEATURE_PBE_PER_SPU_POS */ + (41U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ + (43U), /* RGX_FEATURE_POWER_ISLAND_VERSION_POS */ + (45U), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_POS */ + (47U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ + (49U), /* RGX_FEATURE_SCALABLE_VCE_POS */ + (52U), /* RGX_FEATURE_SLC_BANKS_POS */ + (55U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ + (57U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ + (60U), /* RGX_FEATURE_TILE_SIZE_X_POS */ + (62U), /* RGX_FEATURE_TILE_SIZE_Y_POS */ + (64U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ +}; + + +/****************************************************************************** + * Bit-masks for features with values + *****************************************************************************/ + +static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { + (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_BIT_MASK */ + (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000030)), /* RGX_FEATURE_FBCDC_BIT_MASK */ + (IMG_UINT64_C(0x00000000000000C0)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000700)), /* RGX_FEATURE_MAX_TPU_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000000000001800)), /* RGX_FEATURE_META_BIT_MASK */ + (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ + (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ + (IMG_UINT64_C(0x0000000000180000)), /* RGX_FEATURE_MMU_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0000000000E00000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ + (IMG_UINT64_C(0x000000000F000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ + (IMG_UINT64_C(0x0000000070000000)), /* RGX_FEATURE_NUM_ISP_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000000380000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */ + (IMG_UINT64_C(0x0000000C00000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ + (IMG_UINT64_C(0x0000007000000000)), /* RGX_FEATURE_NUM_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000018000000000)), /* RGX_FEATURE_PBE_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000060000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ + (IMG_UINT64_C(0x0000180000000000)), /* RGX_FEATURE_POWER_ISLAND_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0000600000000000)), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_BIT_MASK */ + (IMG_UINT64_C(0x0001800000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ + (IMG_UINT64_C(0x000E000000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ + (IMG_UINT64_C(0x0070000000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ + (IMG_UINT64_C(0x0180000000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ + (IMG_UINT64_C(0x0E00000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ + (IMG_UINT64_C(0x3000000000000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ + (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ +}; + + +/****************************************************************************** + * Table mapping bitmasks for features and features with values + *****************************************************************************/ + + +static const IMG_UINT64 gaFeatures[][4]= +{ + { IMG_UINT64_C(0x001b000000fe0002), IMG_UINT64_C(0x00000000003e8ea2), IMG_UINT64_C(0x5492ab14a34b29a5), IMG_UINT64_C(0x0000000000000001) }, /* 27.0.254.2 */ + { IMG_UINT64_C(0x001e000001980065), IMG_UINT64_C(0x00000000003e8fa3), IMG_UINT64_C(0x5492aa94933329a6), IMG_UINT64_C(0x0000000000000001) }, /* 30.0.408.101 */ + { IMG_UINT64_C(0x001e000006600001), IMG_UINT64_C(0x00000000003e8fa3), IMG_UINT64_C(0x58b52aa526932aa6), IMG_UINT64_C(0x0000000000000001) }, /* 30.0.1632.1 */ +}; + +/****************************************************************************** + * Table mapping bitmasks for ERNs/BRNs + *****************************************************************************/ + + +static const IMG_UINT64 gaErnsBrns[][2]= +{ + { IMG_UINT64_C(0x001b000500fe0002), IMG_UINT64_C(0x0000000000000000) }, /* 27.5.254.2 */ + { IMG_UINT64_C(0x001e000301980065), IMG_UINT64_C(0x0000000000000002) }, /* 30.3.408.101 */ + { IMG_UINT64_C(0x001e000406600001), IMG_UINT64_C(0x0000000000000002) }, /* 30.4.1632.1 */ + { IMG_UINT64_C(0x001e000506600001), IMG_UINT64_C(0x0000000000000002) }, /* 30.5.1632.1 */ +}; + +#if defined(DEBUG) + +#define FEATURE_NO_VALUES_NAMES_MAX_IDX (22) + +static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] = +{ + "ALBIORIX_TOP_INFRASTRUCTURE", + "AXI_ACE", + "FW_DUAL_LOCKSTEP", + "GPU_CPU_COHERENCY", + "GPU_MULTICORE_SUPPORT", + "GPU_VIRTUALISATION", + "HYPERVISOR_MMU", + "META_DMA", + "META_REGISTER_UNPACKED_ACCESSES", + "PBE_CHECKSUM_2D", + "PERFBUS", + "PM_BYTE_ALIGNED_BASE_ADDRESSES", + "PM_MMUSTACK", + "PM_MMU_VFP", + "RISCV_FW_PROCESSOR", + "SIGNAL_SNOOPING", + "SLC_FAULT_ACCESS_ADDR_PHYS", + "SLC_VIVT", + "SYS_BUS_SECURE_RESET", + "TDM_PDS_CHECKSUM", + "TESSELLATION", + "ZLS_CHECKSUM", +}; + +#define ERNSBRNS_IDS_MAX_IDX (2) + +static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = +{ + 66574, + 68777, +}; + +#endif /* defined(DEBUG) */ +#endif /* RGX_BVNC_TABLE_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_cr_defs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_cr_defs_km.h new file mode 100644 index 000000000000..01f1cd8aa0ce --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgx_cr_defs_km.h @@ -0,0 +1,5036 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_cr_defs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + */ + + +#ifndef RGX_CR_DEFS_KM_H +#define RGX_CR_DEFS_KM_H + +#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS) +#error This file may only be included if explicitly defined +#endif + +#include "img_types.h" +#include "img_defs.h" + + +#define RGX_CR_DEFS_KM_REVISION 106 + +/* + Register RGX_CR_USC_INDIRECT +*/ +#define RGX_CR_USC_INDIRECT (0x8000U) +#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_MERCER_INDIRECT +*/ +#define RGX_CR_MERCER_INDIRECT (0x8238U) +#define RGX_CR_MERCER_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MERCER_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_MERCER_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_PBE_INDIRECT +*/ +#define RGX_CR_PBE_INDIRECT (0x83E0U) +#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_PBE_SHARED_INDIRECT +*/ +#define RGX_CR_PBE_SHARED_INDIRECT (0x8388U) +#define RGX_CR_PBE_SHARED_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) + + +/* + Register RGX_CR_ISP_INDIRECT +*/ +#define RGX_CR_ISP_INDIRECT (0x83F8U) +#define RGX_CR_ISP_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_ISP_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_ISP_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_TPU_INDIRECT +*/ +#define RGX_CR_TPU_INDIRECT (0x83E8U) +#define RGX_CR_TPU_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_TPU_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TPU_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_SWIFT_INDIRECT +*/ +#define RGX_CR_SWIFT_INDIRECT (0x8308U) +#define RGX_CR_SWIFT_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_SWIFT_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_SWIFT_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_TEXAS_INDIRECT +*/ +#define RGX_CR_TEXAS_INDIRECT (0x8390U) +#define RGX_CR_TEXAS_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TEXAS_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TEXAS_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) + + +/* + Register RGX_CR_CLK_CTRL0 +*/ +#define RGX_CR_CLK_CTRL0 (0x0000U) +#define RGX_CR_CLK_CTRL0_MASKFULL (IMG_UINT64_C(0xFFCF03000F333303)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_SHIFT (62U) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_SHIFT (60U) +#define RGX_CR_CLK_CTRL0_FBCACHE_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_SHIFT (58U) +#define RGX_CR_CLK_CTRL0_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_SHIFT (56U) +#define RGX_CR_CLK_CTRL0_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_SHIFT (54U) +#define RGX_CR_CLK_CTRL0_FBM_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_SHIFT (50U) +#define RGX_CR_CLK_CTRL0_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_SHIFT (48U) +#define RGX_CR_CLK_CTRL0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_SHIFT (40U) +#define RGX_CR_CLK_CTRL0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL0_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_SHIFT (26U) +#define RGX_CR_CLK_CTRL0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL0_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL0_TPU_SHIFT (24U) +#define RGX_CR_CLK_CTRL0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL0_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TPU_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL0_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL0_USC_SHIFT (20U) +#define RGX_CR_CLK_CTRL0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL0_USC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_USC_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL0_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL0_SLC_SHIFT (16U) +#define RGX_CR_CLK_CTRL0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL0_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_SLC_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL0_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL0_PDS_SHIFT (12U) +#define RGX_CR_CLK_CTRL0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL0_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PDS_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL0_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL0_PM_SHIFT (8U) +#define RGX_CR_CLK_CTRL0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL0_PM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PM_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL0_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL0_ISP_SHIFT (0U) +#define RGX_CR_CLK_CTRL0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL0_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_ISP_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL0_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) + + +/* + Register RGX_CR_CORE_ID +*/ +#define RGX_CR_CORE_ID (0x0020U) +#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_CORE_ID_BRANCH_ID_SHIFT (48U) +#define RGX_CR_CORE_ID_BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CORE_ID_VERSION_ID_SHIFT (32U) +#define RGX_CR_CORE_ID_VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) +#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_CORE_ID_CONFIG_ID_SHIFT (0U) +#define RGX_CR_CORE_ID_CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_SPU_ENABLE +*/ +#define RGX_CR_SPU_ENABLE (0x0050U) +#define RGX_CR_SPU_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SPU_ENABLE_ENABLE_SHIFT (0U) +#define RGX_CR_SPU_ENABLE_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SOC_TIMER_GRAY +*/ +#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) +#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SOC_TIMER_BINARY +*/ +#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) +#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_CLK_CTRL1 +*/ +#define RGX_CR_CLK_CTRL1 (0x0080U) +#define RGX_CR_CLK_CTRL1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL1_BSC_SHIFT (62U) +#define RGX_CR_CLK_CTRL1_BSC_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_BSC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_BSC_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL1_BSC_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_SHIFT (60U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_SHIFT (58U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_SHIFT (56U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_SHIFT (54U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_SHIFT (52U) +#define RGX_CR_CLK_CTRL1_PSB_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_PSB_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_ON (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_AUTO (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_SHIFT (50U) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL1_IOWA_SHIFT (48U) +#define RGX_CR_CLK_CTRL1_IOWA_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_IOWA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_IOWA_ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL1_IOWA_AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL1_RAC_SHIFT (46U) +#define RGX_CR_CLK_CTRL1_RAC_CLRMSK (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_RAC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_RAC_ON (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_CLK_CTRL1_RAC_AUTO (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_SHIFT (44U) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_ON (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_AUTO (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_SHIFT (42U) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_ON (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_AUTO (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_SHIFT (40U) +#define RGX_CR_CLK_CTRL1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL1_SH_SHIFT (38U) +#define RGX_CR_CLK_CTRL1_SH_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_SH_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_SH_ON (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_CLK_CTRL1_SH_AUTO (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_CLK_CTRL1_TDM_SHIFT (36U) +#define RGX_CR_CLK_CTRL1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TDM_ON (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_CLK_CTRL1_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_CLK_CTRL1_ASTC_SHIFT (34U) +#define RGX_CR_CLK_CTRL1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) +#define RGX_CR_CLK_CTRL1_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_CLK_CTRL1_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_CLK_CTRL1_IPF_SHIFT (32U) +#define RGX_CR_CLK_CTRL1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_IPF_ON (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_CTRL1_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_SHIFT (30U) +#define RGX_CR_CLK_CTRL1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) +#define RGX_CR_CLK_CTRL1_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_SHIFT (28U) +#define RGX_CR_CLK_CTRL1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_CTRL1_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_SHIFT (26U) +#define RGX_CR_CLK_CTRL1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL1_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL1_TPF_SHIFT (24U) +#define RGX_CR_CLK_CTRL1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL1_TPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TPF_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL1_TPF_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_SHIFT (22U) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_ON (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_AUTO (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_SHIFT (18U) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_SHIFT (16U) +#define RGX_CR_CLK_CTRL1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_SHIFT (14U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_SHIFT (12U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_SHIFT (10U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_SHIFT (8U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_SHIFT (6U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_SHIFT (4U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_SHIFT (2U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_SHIFT (0U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_AUTO (IMG_UINT64_C(0x0000000000000002)) + + +/* + Register RGX_CR_SOFT_RESET +*/ +#define RGX_CR_SOFT_RESET (0x0100U) +#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x01FFFFE0000BDEFF)) +#define RGX_CR_SOFT_RESET_GEO_TESS_SHIFT (56U) +#define RGX_CR_SOFT_RESET_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GEO_TESS_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SOFT_RESET_INT_SHIFT (55U) +#define RGX_CR_SOFT_RESET_INT_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_INT_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SOFT_RESET_FP_SHIFT (54U) +#define RGX_CR_SOFT_RESET_FP_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FP_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SOFT_RESET_YUV_SHIFT (53U) +#define RGX_CR_SOFT_RESET_YUV_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_YUV_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SOFT_RESET_PSB_SHIFT (52U) +#define RGX_CR_SOFT_RESET_PSB_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PSB_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SOFT_RESET_IOWA_SHIFT (51U) +#define RGX_CR_SOFT_RESET_IOWA_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_IOWA_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SOFT_RESET_SH_SHIFT (50U) +#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SOFT_RESET_BSC_SHIFT (49U) +#define RGX_CR_SOFT_RESET_BSC_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BSC_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_SHIFT (48U) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_SHIFT (47U) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SOFT_RESET_TCU_L1_SHIFT (46U) +#define RGX_CR_SOFT_RESET_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TCU_L1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (45U) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SOFT_RESET_BIF_JONES_SHIFT (44U) +#define RGX_CR_SOFT_RESET_BIF_JONES_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_JONES_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SOFT_RESET_SLC_SHIFT (43U) +#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SOFT_RESET_FBCACHE_SHIFT (42U) +#define RGX_CR_SOFT_RESET_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBCACHE_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SOFT_RESET_FBM_SHIFT (41U) +#define RGX_CR_SOFT_RESET_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBM_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SOFT_RESET_FBDC_SHIFT (40U) +#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SOFT_RESET_FBC_SHIFT (39U) +#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SOFT_RESET_PM_SHIFT (38U) +#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (37U) +#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) +#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) +#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SOFT_RESET_CDM_PIPE_SHIFT (16U) +#define RGX_CR_SOFT_RESET_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SOFT_RESET_CDM_PIPE_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SOFT_RESET_TDM_SHIFT (15U) +#define RGX_CR_SOFT_RESET_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SOFT_RESET_TDM_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SOFT_RESET_ASTC_SHIFT (14U) +#define RGX_CR_SOFT_RESET_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SOFT_RESET_ASTC_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) +#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) +#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SOFT_RESET_TPF_SHIFT (10U) +#define RGX_CR_SOFT_RESET_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SOFT_RESET_TPF_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SOFT_RESET_IPF_SHIFT (9U) +#define RGX_CR_SOFT_RESET_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SOFT_RESET_IPF_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SOFT_RESET_GEO_SHARED_SHIFT (7U) +#define RGX_CR_SOFT_RESET_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SOFT_RESET_GEO_SHARED_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_SHIFT (6U) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SOFT_RESET_PIXEL_SHIFT (5U) +#define RGX_CR_SOFT_RESET_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SOFT_RESET_PIXEL_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SOFT_RESET_COMPUTE_SHIFT (4U) +#define RGX_CR_SOFT_RESET_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SOFT_RESET_COMPUTE_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) +#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) +#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SOFT_RESET_VERTEX_SHIFT (1U) +#define RGX_CR_SOFT_RESET_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SOFT_RESET_VERTEX_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) +#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SOFT_RESET_SPU +*/ +#define RGX_CR_SOFT_RESET_SPU (0x0108U) +#define RGX_CR_SOFT_RESET_SPU_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SOFT_RESET_SPU_SPU31_SHIFT (31U) +#define RGX_CR_SOFT_RESET_SPU_SPU31_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU31_EN (0x80000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU30_SHIFT (30U) +#define RGX_CR_SOFT_RESET_SPU_SPU30_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU30_EN (0x40000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU29_SHIFT (29U) +#define RGX_CR_SOFT_RESET_SPU_SPU29_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU29_EN (0x20000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU28_SHIFT (28U) +#define RGX_CR_SOFT_RESET_SPU_SPU28_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU28_EN (0x10000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU27_SHIFT (27U) +#define RGX_CR_SOFT_RESET_SPU_SPU27_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU27_EN (0x08000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU26_SHIFT (26U) +#define RGX_CR_SOFT_RESET_SPU_SPU26_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU26_EN (0x04000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU25_SHIFT (25U) +#define RGX_CR_SOFT_RESET_SPU_SPU25_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU25_EN (0x02000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU24_SHIFT (24U) +#define RGX_CR_SOFT_RESET_SPU_SPU24_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU24_EN (0x01000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU23_SHIFT (23U) +#define RGX_CR_SOFT_RESET_SPU_SPU23_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU23_EN (0x00800000U) +#define RGX_CR_SOFT_RESET_SPU_SPU22_SHIFT (22U) +#define RGX_CR_SOFT_RESET_SPU_SPU22_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU22_EN (0x00400000U) +#define RGX_CR_SOFT_RESET_SPU_SPU21_SHIFT (21U) +#define RGX_CR_SOFT_RESET_SPU_SPU21_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU21_EN (0x00200000U) +#define RGX_CR_SOFT_RESET_SPU_SPU20_SHIFT (20U) +#define RGX_CR_SOFT_RESET_SPU_SPU20_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU20_EN (0x00100000U) +#define RGX_CR_SOFT_RESET_SPU_SPU19_SHIFT (19U) +#define RGX_CR_SOFT_RESET_SPU_SPU19_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU19_EN (0x00080000U) +#define RGX_CR_SOFT_RESET_SPU_SPU18_SHIFT (18U) +#define RGX_CR_SOFT_RESET_SPU_SPU18_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU18_EN (0x00040000U) +#define RGX_CR_SOFT_RESET_SPU_SPU17_SHIFT (17U) +#define RGX_CR_SOFT_RESET_SPU_SPU17_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU17_EN (0x00020000U) +#define RGX_CR_SOFT_RESET_SPU_SPU16_SHIFT (16U) +#define RGX_CR_SOFT_RESET_SPU_SPU16_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU16_EN (0x00010000U) +#define RGX_CR_SOFT_RESET_SPU_SPU15_SHIFT (15U) +#define RGX_CR_SOFT_RESET_SPU_SPU15_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU15_EN (0x00008000U) +#define RGX_CR_SOFT_RESET_SPU_SPU14_SHIFT (14U) +#define RGX_CR_SOFT_RESET_SPU_SPU14_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU14_EN (0x00004000U) +#define RGX_CR_SOFT_RESET_SPU_SPU13_SHIFT (13U) +#define RGX_CR_SOFT_RESET_SPU_SPU13_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU13_EN (0x00002000U) +#define RGX_CR_SOFT_RESET_SPU_SPU12_SHIFT (12U) +#define RGX_CR_SOFT_RESET_SPU_SPU12_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU12_EN (0x00001000U) +#define RGX_CR_SOFT_RESET_SPU_SPU11_SHIFT (11U) +#define RGX_CR_SOFT_RESET_SPU_SPU11_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SOFT_RESET_SPU_SPU11_EN (0x00000800U) +#define RGX_CR_SOFT_RESET_SPU_SPU10_SHIFT (10U) +#define RGX_CR_SOFT_RESET_SPU_SPU10_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU10_EN (0x00000400U) +#define RGX_CR_SOFT_RESET_SPU_SPU9_SHIFT (9U) +#define RGX_CR_SOFT_RESET_SPU_SPU9_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU9_EN (0x00000200U) +#define RGX_CR_SOFT_RESET_SPU_SPU8_SHIFT (8U) +#define RGX_CR_SOFT_RESET_SPU_SPU8_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU8_EN (0x00000100U) +#define RGX_CR_SOFT_RESET_SPU_SPU7_SHIFT (7U) +#define RGX_CR_SOFT_RESET_SPU_SPU7_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SOFT_RESET_SPU_SPU7_EN (0x00000080U) +#define RGX_CR_SOFT_RESET_SPU_SPU6_SHIFT (6U) +#define RGX_CR_SOFT_RESET_SPU_SPU6_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SOFT_RESET_SPU_SPU6_EN (0x00000040U) +#define RGX_CR_SOFT_RESET_SPU_SPU5_SHIFT (5U) +#define RGX_CR_SOFT_RESET_SPU_SPU5_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SOFT_RESET_SPU_SPU5_EN (0x00000020U) +#define RGX_CR_SOFT_RESET_SPU_SPU4_SHIFT (4U) +#define RGX_CR_SOFT_RESET_SPU_SPU4_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SOFT_RESET_SPU_SPU4_EN (0x00000010U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_SHIFT (3U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_EN (0x00000008U) +#define RGX_CR_SOFT_RESET_SPU_SPU2_SHIFT (2U) +#define RGX_CR_SOFT_RESET_SPU_SPU2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOFT_RESET_SPU_SPU2_EN (0x00000004U) +#define RGX_CR_SOFT_RESET_SPU_SPU1_SHIFT (1U) +#define RGX_CR_SOFT_RESET_SPU_SPU1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SOFT_RESET_SPU_SPU1_EN (0x00000002U) +#define RGX_CR_SOFT_RESET_SPU_SPU0_SHIFT (0U) +#define RGX_CR_SOFT_RESET_SPU_SPU0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SOFT_RESET_SPU_SPU0_EN (0x00000001U) + + +/* + Register RGX_CR_MULTICORE_GPU +*/ +#define RGX_CR_MULTICORE_GPU (0x0588U) +#define RGX_CR_MULTICORE_GPU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_TDM_SHIFT (7U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_TDM_EN (0x00000080U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT (6U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN (0x00000040U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT (5U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN (0x00000020U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT (4U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN (0x00000010U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT (3U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN (0x00000008U) +#define RGX_CR_MULTICORE_GPU_ID_SHIFT (0U) +#define RGX_CR_MULTICORE_GPU_ID_CLRMSK (0xFFFFFFF8U) + + + + +/* + Register RGX_CR_MULTICORE_SYSTEM +*/ +#define RGX_CR_MULTICORE_SYSTEM (0x0590U) +#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) + + + + +/* + Register RGX_CR_EVENT_STATUS +*/ +#define RGX_CR_EVENT_STATUS (0x0130U) +#define RGX_CR_EVENT_STATUS__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_EVENT_STATUS__ALRIF_V2__MASKFULL (IMG_UINT64_C(0x00000000FFFBFFFF)) +#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS_TDM_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_STATUS_AMM_OUT_OF_MEMORY_SHIFT (28U) +#define RGX_CR_EVENT_STATUS_AMM_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_STATUS_AMM_OUT_OF_MEMORY_EN (0x10000000U) +#define RGX_CR_EVENT_STATUS_RTU_OUT_OF_MEMORY_SHIFT (27U) +#define RGX_CR_EVENT_STATUS_RTU_OUT_OF_MEMORY_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_STATUS_RTU_OUT_OF_MEMORY_EN (0x08000000U) +#define RGX_CR_EVENT_STATUS_RPM_VTX_OUT_OF_MEMORY_SHIFT (26U) +#define RGX_CR_EVENT_STATUS_RPM_VTX_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_STATUS_RPM_VTX_OUT_OF_MEMORY_EN (0x04000000U) +#define RGX_CR_EVENT_STATUS_RPM_VAR_OUT_OF_MEMORY_SHIFT (25U) +#define RGX_CR_EVENT_STATUS_RPM_VAR_OUT_OF_MEMORY_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_EVENT_STATUS_RPM_VAR_OUT_OF_MEMORY_EN (0x02000000U) +#define RGX_CR_EVENT_STATUS_RPM_NOD_OUT_OF_MEMORY_SHIFT (24U) +#define RGX_CR_EVENT_STATUS_RPM_NOD_OUT_OF_MEMORY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_STATUS_RPM_NOD_OUT_OF_MEMORY_EN (0x01000000U) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0x00800000U) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0x00400000U) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (21U) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0x00200000U) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (20U) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0x00100000U) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (19U) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT (14U) +#define RGX_CR_EVENT_STATUS_FAULT_FW_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_STATUS_FAULT_FW_EN (0x00004000U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_SHIFT (8U) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_EN (0x00000100U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS_TE_END_SHIFT (1U) +#define RGX_CR_EVENT_STATUS_TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS_TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_SHIFT (0U) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_EN (0x00000001U) + + +/* + Register RGX_CR_TIMER +*/ +#define RGX_CR_TIMER (0x0160U) +#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_SHIFT (63U) +#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_TIMER_VALUE_SHIFT (0U) +#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + + +/* + Register RGX_CR_JONES_RAM_STATUS +*/ +#define RGX_CR_JONES_RAM_STATUS (0x1148U) +#define RGX_CR_JONES_RAM_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_SHIFT (8U) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_EN (0x00000100U) +#define RGX_CR_JONES_RAM_STATUS_TDM_SHIFT (7U) +#define RGX_CR_JONES_RAM_STATUS_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_RAM_STATUS_TDM_EN (0x00000080U) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_SHIFT (6U) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_EN (0x00000040U) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_SHIFT (5U) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_EN (0x00000020U) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_SHIFT (4U) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_EN (0x00000010U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_SHIFT (3U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_EN (0x00000008U) +#define RGX_CR_JONES_RAM_STATUS_PM_SHIFT (2U) +#define RGX_CR_JONES_RAM_STATUS_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_RAM_STATUS_PM_EN (0x00000004U) +#define RGX_CR_JONES_RAM_STATUS_BIF_SHIFT (1U) +#define RGX_CR_JONES_RAM_STATUS_BIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_RAM_STATUS_BIF_EN (0x00000002U) +#define RGX_CR_JONES_RAM_STATUS_SLC_SHIFT (0U) +#define RGX_CR_JONES_RAM_STATUS_SLC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_RAM_STATUS_SLC_EN (0x00000001U) + + +/* + Register RGX_CR_JONES_RAM_INIT_KICK +*/ +#define RGX_CR_JONES_RAM_INIT_KICK (0x1158U) +#define RGX_CR_JONES_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_SHIFT (8U) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN (0x00000100U) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_SHIFT (7U) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_EN (0x00000080U) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_SHIFT (6U) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_EN (0x00000040U) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_SHIFT (5U) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_EN (0x00000020U) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_SHIFT (4U) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_EN (0x00000010U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_SHIFT (3U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_EN (0x00000008U) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_SHIFT (2U) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_EN (0x00000004U) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_SHIFT (1U) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_EN (0x00000002U) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_SHIFT (0U) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_EN (0x00000001U) + + +/* + Register RGX_CR_PM_PARTIAL_RENDER_ENABLE +*/ +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) + + +/* + Register RGX_CR_CDM_CONTEXT_STORE_STATUS +*/ +#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_CDM_CONTEXT_PDS0 +*/ +#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) +#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_CONTEXT_PDS1 +*/ +#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) +#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (30U) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSIZE (4U) + + +/* + Register RGX_CR_CDM_CONTEXT_LOAD_PDS0 +*/ +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_CONTEXT_LOAD_PDS1 +*/ +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSIZE (4U) + + +/* + Register RGX_CR_CDM_TERMINATE_PDS +*/ +#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) +#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_TERMINATE_PDS1 +*/ +#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) +#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSIZE (4U) + + +/* + Register group: RGX_CR_SCRATCH, with 16 repeats +*/ +#define RGX_CR_SCRATCH_REPEATCOUNT (16U) +/* + Register RGX_CR_SCRATCH0 +*/ +#define RGX_CR_SCRATCH0 (0x0800U) +#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH0_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH1 +*/ +#define RGX_CR_SCRATCH1 (0x0808U) +#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH1_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH2 +*/ +#define RGX_CR_SCRATCH2 (0x0810U) +#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH2_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH3 +*/ +#define RGX_CR_SCRATCH3 (0x0818U) +#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH3_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH4 +*/ +#define RGX_CR_SCRATCH4 (0x0820U) +#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH4_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH5 +*/ +#define RGX_CR_SCRATCH5 (0x0828U) +#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH5_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH6 +*/ +#define RGX_CR_SCRATCH6 (0x0830U) +#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH6_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH7 +*/ +#define RGX_CR_SCRATCH7 (0x0838U) +#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH7_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH8 +*/ +#define RGX_CR_SCRATCH8 (0x0840U) +#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH8_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH9 +*/ +#define RGX_CR_SCRATCH9 (0x0848U) +#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH9_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH10 +*/ +#define RGX_CR_SCRATCH10 (0x0850U) +#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH10_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH11 +*/ +#define RGX_CR_SCRATCH11 (0x0858U) +#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH11_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH12 +*/ +#define RGX_CR_SCRATCH12 (0x0860U) +#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH12_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH13 +*/ +#define RGX_CR_SCRATCH13 (0x0868U) +#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH13_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH14 +*/ +#define RGX_CR_SCRATCH14 (0x0870U) +#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH14_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH15 +*/ +#define RGX_CR_SCRATCH15 (0x0878U) +#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH15_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register group: RGX_CR_OS0_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS0_SCRATCH0 +*/ +#define RGX_CR_OS0_SCRATCH0 (0x0880U) +#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH1 +*/ +#define RGX_CR_OS0_SCRATCH1 (0x0888U) +#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH2 +*/ +#define RGX_CR_OS0_SCRATCH2 (0x0890U) +#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH3 +*/ +#define RGX_CR_OS0_SCRATCH3 (0x0898U) +#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS1_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS1_SCRATCH0 +*/ +#define RGX_CR_OS1_SCRATCH0 (0x10880U) +#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH1 +*/ +#define RGX_CR_OS1_SCRATCH1 (0x10888U) +#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH2 +*/ +#define RGX_CR_OS1_SCRATCH2 (0x10890U) +#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH3 +*/ +#define RGX_CR_OS1_SCRATCH3 (0x10898U) +#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS2_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS2_SCRATCH0 +*/ +#define RGX_CR_OS2_SCRATCH0 (0x20880U) +#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH1 +*/ +#define RGX_CR_OS2_SCRATCH1 (0x20888U) +#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH2 +*/ +#define RGX_CR_OS2_SCRATCH2 (0x20890U) +#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH3 +*/ +#define RGX_CR_OS2_SCRATCH3 (0x20898U) +#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS3_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS3_SCRATCH0 +*/ +#define RGX_CR_OS3_SCRATCH0 (0x30880U) +#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH1 +*/ +#define RGX_CR_OS3_SCRATCH1 (0x30888U) +#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH2 +*/ +#define RGX_CR_OS3_SCRATCH2 (0x30890U) +#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH3 +*/ +#define RGX_CR_OS3_SCRATCH3 (0x30898U) +#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS4_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS4_SCRATCH0 +*/ +#define RGX_CR_OS4_SCRATCH0 (0x40880U) +#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH1 +*/ +#define RGX_CR_OS4_SCRATCH1 (0x40888U) +#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH2 +*/ +#define RGX_CR_OS4_SCRATCH2 (0x40890U) +#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH3 +*/ +#define RGX_CR_OS4_SCRATCH3 (0x40898U) +#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS5_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS5_SCRATCH0 +*/ +#define RGX_CR_OS5_SCRATCH0 (0x50880U) +#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH1 +*/ +#define RGX_CR_OS5_SCRATCH1 (0x50888U) +#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH2 +*/ +#define RGX_CR_OS5_SCRATCH2 (0x50890U) +#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH3 +*/ +#define RGX_CR_OS5_SCRATCH3 (0x50898U) +#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS6_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS6_SCRATCH0 +*/ +#define RGX_CR_OS6_SCRATCH0 (0x60880U) +#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH1 +*/ +#define RGX_CR_OS6_SCRATCH1 (0x60888U) +#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH2 +*/ +#define RGX_CR_OS6_SCRATCH2 (0x60890U) +#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH3 +*/ +#define RGX_CR_OS6_SCRATCH3 (0x60898U) +#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS7_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS7_SCRATCH0 +*/ +#define RGX_CR_OS7_SCRATCH0 (0x70880U) +#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH1 +*/ +#define RGX_CR_OS7_SCRATCH1 (0x70888U) +#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH2 +*/ +#define RGX_CR_OS7_SCRATCH2 (0x70890U) +#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH3 +*/ +#define RGX_CR_OS7_SCRATCH3 (0x70898U) +#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAX +*/ +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES (0x3000U) +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAX_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAX +*/ +#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) +#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAT +*/ +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES (0x3040U) +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAT +*/ +#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) +#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL0 +*/ +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES (0x3080U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL0 +*/ +#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) +#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL1 +*/ +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES (0x30C0U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVCTRL1 +*/ +#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) +#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVHANDSHKE +*/ +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES (0x3280U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__OUTPUT_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVHANDSHKE +*/ +#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) +#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVT0KICK +*/ +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES (0x3400U) +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT0KICK +*/ +#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) +#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT0KICKI +*/ +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES (0x3440U) +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT0KICKI +*/ +#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) +#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICK +*/ +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES (0x3480U) +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICK +*/ +#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) +#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICKI +*/ +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES (0x34C0U) +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICKI +*/ +#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) +#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICK +*/ +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES (0x3500U) +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICK +*/ +#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) +#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICKI +*/ +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES (0x3540U) +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICKI +*/ +#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) +#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICK +*/ +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES (0x3580U) +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICK +*/ +#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) +#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICKI +*/ +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES (0x35C0U) +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICKI +*/ +#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) +#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVRST +*/ +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES (0x3600U) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__SOFTRESET_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVRST +*/ +#define RGX_CR_META_SP_MSLVRST (0x0AC0U) +#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVIRQSTATUS +*/ +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES (0x3640U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT2_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQSTATUS +*/ +#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQENABLE +*/ +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES (0x3680U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT0_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQENABLE +*/ +#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) +#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQLEVEL +*/ +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES (0x36C0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MODE_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVIRQLEVEL +*/ +#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) + + +/* + Register RGX_CR_MTS_SCHEDULE +*/ +#define RGX_CR_MTS_SCHEDULE (0x0B00U) +#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE1 +*/ +#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) +#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE2 +*/ +#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) +#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE3 +*/ +#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) +#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE4 +*/ +#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) +#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE5 +*/ +#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) +#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE6 +*/ +#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) +#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE7 +*/ +#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) +#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC +*/ +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC +*/ +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC +*/ +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC +*/ +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG +*/ +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S8_CPR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (1U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (0x00000002U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (0x00000000U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (0x00000001U) + + +/* + Register RGX_CR_MTS_INTCTX +*/ +#define RGX_CR_MTS_INTCTX (0x0B98U) +#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FC0FFFF)) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MTS_BGCTX +*/ +#define RGX_CR_MTS_BGCTX (0x0BA0U) +#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE +*/ +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MTS_GPU_INT_STATUS +*/ +#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) +#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_IRQ_OS0_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS0_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS1_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS1_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS2_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS2_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS3_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS3_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS4_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS4_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS5_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS5_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS6_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS6_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS7_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS7_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_BOOT +*/ +#define RGX_CR_META_BOOT (0x0BF8U) +#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_BOOT_MODE_SHIFT (0U) +#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) + + +/* + Register RGX_CR_GARTEN_SLC +*/ +#define RGX_CR_GARTEN_SLC (0x0BB8U) +#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) + + +/* + Register RGX_CR_ISP_RENDER +*/ +#define RGX_CR_ISP_RENDER (0x0F08U) +#define RGX_CR_ISP_RENDER__IPP_FAST_RENDER__MASKFULL (IMG_UINT64_C(0x000000000003FFFF)) +#define RGX_CR_ISP_RENDER__CS3DL_4__MASKFULL (IMG_UINT64_C(0x000000000007FF53)) +#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000003FFF0)) +#define RGX_CR_ISP_RENDER_TILES_PER_ISP_SHIFT (16U) +#define RGX_CR_ISP_RENDER_TILES_PER_ISP_CLRMSK (0xFFFCFFFFU) +#define RGX_CR_ISP_RENDER__CS3DL_4__TILES_PER_ISP_SHIFT (16U) +#define RGX_CR_ISP_RENDER__CS3DL_4__TILES_PER_ISP_CLRMSK (0xFFF8FFFFU) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_SHIFT (12U) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_SHIFT (8U) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_SHIFT (7U) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_EN (0x00000080U) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_SHIFT (6U) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_EN (0x00000040U) +#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_TILE (0x00000010U) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_PBLK (0x00000030U) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_CONTEXT_RESUME (0x00000010U) +#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) +#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) +#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) +#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) +#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (1U) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000002U) +#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) +#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) +#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) +#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (0U) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000001U) + + +/* + Register RGX_CR_ISP_CTL +*/ +#define RGX_CR_ISP_CTL (0x0FB0U) +#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000007BF8FF)) +#define RGX_CR_ISP_CTL_DBUFFER_COUNT_SHIFT (20U) +#define RGX_CR_ISP_CTL_DBUFFER_COUNT_CLRMSK (0xFF8FFFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_SHIFT (17U) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_EN (0x00020000U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_SHIFT (16U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_EN (0x00010000U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_SHIFT (15U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_EN (0x00008000U) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_SHIFT (14U) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_EN (0x00004000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (13U) +#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x00002000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_SHIFT (12U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_EN (0x00001000U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (11U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00000800U) +#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) +#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_MEM_TILING_CFG, with 8 repeats +*/ +#define RGX_CR_MEM_TILING_CFG_REPEATCOUNT (8U) +/* + Register RGX_CR_MEM_TILING_CFG0 +*/ +#define RGX_CR_MEM_TILING_CFG0 (0x12D8U) +#define RGX_CR_MEM_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG1 +*/ +#define RGX_CR_MEM_TILING_CFG1 (0x12E0U) +#define RGX_CR_MEM_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG2 +*/ +#define RGX_CR_MEM_TILING_CFG2 (0x12E8U) +#define RGX_CR_MEM_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG3 +*/ +#define RGX_CR_MEM_TILING_CFG3 (0x12F0U) +#define RGX_CR_MEM_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG4 +*/ +#define RGX_CR_MEM_TILING_CFG4 (0x12F8U) +#define RGX_CR_MEM_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG5 +*/ +#define RGX_CR_MEM_TILING_CFG5 (0x1300U) +#define RGX_CR_MEM_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG6 +*/ +#define RGX_CR_MEM_TILING_CFG6 (0x1308U) +#define RGX_CR_MEM_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG7 +*/ +#define RGX_CR_MEM_TILING_CFG7 (0x1310U) +#define RGX_CR_MEM_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_USC_TIMER +*/ +#define RGX_CR_USC_TIMER (0x46C8U) +#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_USC_TIMER_CNT +*/ +#define RGX_CR_USC_TIMER_CNT (0x46D0U) +#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) + + +/* + Register RGX_CR_TE_CHECKSUM +*/ +#define RGX_CR_TE_CHECKSUM (0x5110U) +#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVB_CHECKSUM +*/ +#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) +#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TE_TMA_CHECKSUM +*/ +#define RGX_CR_TE_TMA_CHECKSUM (0x5128U) +#define RGX_CR_TE_TMA_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_TMA_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_TMA_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_CDM_PDS_CHECKSUM +*/ +#define RGX_CR_CDM_PDS_CHECKSUM (0x5130U) +#define RGX_CR_CDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_VCE_CHECKSUM +*/ +#define RGX_CR_VCE_CHECKSUM (0x5030U) +#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ISP_PDS_CHECKSUM +*/ +#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) +#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ISP_TPF_CHECKSUM +*/ +#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) +#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TFPU_CHECKSUM +*/ +#define RGX_CR_TFPU_CHECKSUM (0x5048U) +#define RGX_CR_TFPU_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TFPU_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TFPU_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ZLS_CHECKSUM +*/ +#define RGX_CR_ZLS_CHECKSUM (0x5050U) +#define RGX_CR_ZLS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ZLS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ZLS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PBE_CHECKSUM_3D +*/ +#define RGX_CR_PBE_CHECKSUM_3D (0x5058U) +#define RGX_CR_PBE_CHECKSUM_3D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_3D_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_3D_VALUE_CLRMSK (0x00000000U) + + + + +/* + Register RGX_CR_PBE_CHECKSUM +*/ +#define RGX_CR_PBE_CHECKSUM (0x5058U) +#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PDS_DOUTM_STM_CHECKSUM +*/ +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM (0x5060U) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_IFPU_ISP_CHECKSUM +*/ +#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) +#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PPP_CLIP_CHECKSUM +*/ +#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) +#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_VCE_PRIM_CHECKSUM +*/ +#define RGX_CR_VCE_PRIM_CHECKSUM (0x5140U) +#define RGX_CR_VCE_PRIM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TDM_PDS_CHECKSUM +*/ +#define RGX_CR_TDM_PDS_CHECKSUM (0x5148U) +#define RGX_CR_TDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PBE_CHECKSUM_2D +*/ +#define RGX_CR_PBE_CHECKSUM_2D (0x5158U) +#define RGX_CR_PBE_CHECKSUM_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_2D_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_2D_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_GEOM +*/ +#define RGX_CR_PERF_PHASE_GEOM (0x6008U) +#define RGX_CR_PERF_PHASE_GEOM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_GEOM_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_GEOM_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_FRAG +*/ +#define RGX_CR_PERF_PHASE_FRAG (0x6010U) +#define RGX_CR_PERF_PHASE_FRAG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_FRAG_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_FRAG_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_COMP +*/ +#define RGX_CR_PERF_PHASE_COMP (0x6018U) +#define RGX_CR_PERF_PHASE_COMP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_COMP_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_COMP_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_GEOM_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL (0x6020U) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_FRAG_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL (0x6028U) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_COMP_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_COMP_TOTAL (0x6030U) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL (0x6038U) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_2D +*/ +#define RGX_CR_PERF_PHASE_2D (0x6050U) +#define RGX_CR_PERF_PHASE_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_2D_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_2D_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_2D_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_2D_TOTAL (0x6058U) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC0_READ_STALL +*/ +#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) +#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC0_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC1_READ_STALL +*/ +#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) +#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC1_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC2_READ_STALL +*/ +#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) +#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC2_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC3_READ_STALL +*/ +#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) +#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC3_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL (0x6408U) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_JONES_IDLE +*/ +#define RGX_CR_JONES_IDLE (0x8328U) +#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x000000000001FEFF)) +#define RGX_CR_JONES_IDLE_AXI2IMG_SHIFT (16U) +#define RGX_CR_JONES_IDLE_AXI2IMG_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_JONES_IDLE_AXI2IMG_EN (0x00010000U) +#define RGX_CR_JONES_IDLE_SLC_SHIFT (15U) +#define RGX_CR_JONES_IDLE_SLC_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_JONES_IDLE_SLC_EN (0x00008000U) +#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) +#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) +#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) +#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) +#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) +#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) +#define RGX_CR_JONES_IDLE_DFU_SHIFT (10U) +#define RGX_CR_JONES_IDLE_DFU_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_JONES_IDLE_DFU_EN (0x00000400U) +#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) +#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) +#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) +#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) +#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) +#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) +#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) +#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_SHIFT (4U) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_EN (0x00000010U) +#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) +#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) +#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) +#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) +#define RGX_CR_JONES_IDLE_DCE_SHIFT (1U) +#define RGX_CR_JONES_IDLE_DCE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_IDLE_DCE_EN (0x00000002U) +#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) +#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) + + +/* + Register RGX_CR_SYS_BUS_SECURE +*/ +#define RGX_CR_SYS_BUS_SECURE (0xA100U) +#define RGX_CR_SYS_BUS_SECURE__SYS_BUS_SECURE_RESET__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) + + +/* + Register group: RGX_CR_FBA_FC, with 2 repeats +*/ +#define RGX_CR_FBA_FC_REPEATCOUNT (2U) +/* + Register RGX_CR_FBA_FC0 +*/ +#define RGX_CR_FBA_FC0 (0xD170U) +#define RGX_CR_FBA_FC0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC0_CHECKSUM_SHIFT (0U) +#define RGX_CR_FBA_FC0_CHECKSUM_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FBA_FC1 +*/ +#define RGX_CR_FBA_FC1 (0xD178U) +#define RGX_CR_FBA_FC1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC1_CHECKSUM_SHIFT (0U) +#define RGX_CR_FBA_FC1_CHECKSUM_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHF_TVB_CHECKSUM +*/ +#define RGX_CR_SHF_TVB_CHECKSUM (0xD1C0U) +#define RGX_CR_SHF_TVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_TVB_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_TVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM +*/ +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHF_VARY_BIF_CHECKSUM +*/ +#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_RPM_BIF_CHECKSUM +*/ +#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U) +#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHG_BIF_CHECKSUM +*/ +#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U) +#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SHG_RTU_CHECKSUM +*/ +#define RGX_CR_SHG_RTU_CHECKSUM (0xD1F0U) +#define RGX_CR_SHG_RTU_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHG_RTU_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHG_RTU_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT +*/ +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__MMU_GT_V3__MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__MMU_GT_V3__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__MMU_GT_V3__ID_CLRMSK (0xFFFFFFE0U) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING +*/ +#define RGX_CR_MMU_CBASE_MAPPING__VPU (0x1E010U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING +*/ +#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) +#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MMU_FAULT_STATUS1 +*/ +#define RGX_CR_MMU_FAULT_STATUS1 (0xE150U) +#define RGX_CR_MMU_FAULT_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT (62U) +#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT (56U) +#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT (48U) +#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MMU_FAULT_STATUS2 +*/ +#define RGX_CR_MMU_FAULT_STATUS2 (0xE158U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__MASKFULL (IMG_UINT64_C(0x00000000003FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS2_MASKFULL (IMG_UINT64_C(0x000000003FFF0FFF)) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_SHIFT (29U) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN (0x20000000U) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT (28U) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_EN (0x10000000U) +#define RGX_CR_MMU_FAULT_STATUS2_BANK_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS2_BANK_CLRMSK (0xF0FFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_SHIFT (21U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN (0x00200000U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_SHIFT (20U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN (0x00100000U) +#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT (16U) +#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK (0xFF00FFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK (0xFFF00FFFU) +#define RGX_CR_MMU_FAULT_STATUS2_UPS_FAULT_SHIFT (11U) +#define RGX_CR_MMU_FAULT_STATUS2_UPS_FAULT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_MMU_FAULT_STATUS2_UPS_FAULT_EN (0x00000800U) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT (10U) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN (0x00000400U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_SHIFT (8U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK (0xFFFFFC00U) +#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MMU_FAULT_STATUS_META +*/ +#define RGX_CR_MMU_FAULT_STATUS_META (0xE160U) +#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (62U) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (56U) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (48U) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MMU_FAULT_STATUS2_META +*/ +#define RGX_CR_MMU_FAULT_STATUS2_META (0xE198U) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) +#define RGX_CR_MMU_FAULT_STATUS2_META_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT (13U) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN (0x00002000U) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN (0x00001000U) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_EN (0x00001000U) +#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT (8U) +#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MMU_FAULT_STATUS_PM +*/ +#define RGX_CR_MMU_FAULT_STATUS_PM (0xE130U) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_DM_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT (23U) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF800007)) +#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MMU_ENTRY_STATUS +*/ +#define RGX_CR_MMU_ENTRY_STATUS__VPU (0x1E028U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_SHIFT (15U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MMU_ENTRY_STATUS +*/ +#define RGX_CR_MMU_ENTRY_STATUS (0xE178U) +#define RGX_CR_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) +#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_SHIFT (15U) +#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) +#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MMU_ENTRY +*/ +#define RGX_CR_MMU_ENTRY__VPU (0x1E030U) +#define RGX_CR_MMU_ENTRY__VPU__MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_SHIFT (1U) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_EN (0x00000002U) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_SHIFT (0U) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_EN (0x00000001U) + + +/* + Register RGX_CR_MMU_ENTRY +*/ +#define RGX_CR_MMU_ENTRY (0xE180U) +#define RGX_CR_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MMU_ENTRY_ENABLE_SHIFT (1U) +#define RGX_CR_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MMU_ENTRY_ENABLE_EN (0x00000002U) +#define RGX_CR_MMU_ENTRY_PENDING_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MMU_ENTRY_PENDING_EN (0x00000001U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_ONE +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE (0xE350U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_TWO +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO (0xE358U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_THREE +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE (0xE360U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR (0xE368U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_SLC_STATUS1 +*/ +#define RGX_CR_SLC_STATUS1 (0xE210U) +#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT (48U) +#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT (36U) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT (24U) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT (12U) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT (0U) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) + + +/* + Register RGX_CR_SLC_STATUS2 +*/ +#define RGX_CR_SLC_STATUS2 (0xE218U) +#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT (48U) +#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT (36U) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT (24U) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT (12U) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT (0U) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) + + +/* + Register RGX_CR_SLC_IDLE +*/ +#define RGX_CR_SLC_IDLE (0xE230U) +#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) +#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_SLC_IDLE_ACE_CLBS_SHIFT (16U) +#define RGX_CR_SLC_IDLE_ACE_CLBS_CLRMSK (0xFFF0FFFFU) +#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT (12U) +#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (4U) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) +#define RGX_CR_SLC_IDLE_MMU_SHIFT (3U) +#define RGX_CR_SLC_IDLE_MMU_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SLC_IDLE_MMU_EN (0x00000008U) +#define RGX_CR_SLC_IDLE_CCM_SHIFT (2U) +#define RGX_CR_SLC_IDLE_CCM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_IDLE_CCM_EN (0x00000004U) +#define RGX_CR_SLC_IDLE_RDI_SHIFT (1U) +#define RGX_CR_SLC_IDLE_RDI_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_IDLE_RDI_EN (0x00000002U) +#define RGX_CR_SLC_IDLE_XBAR_SHIFT (0U) +#define RGX_CR_SLC_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_IDLE_XBAR_EN (0x00000001U) + + +/* + Register RGX_CR_SLC_FAULT_STOP_STATUS +*/ +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU (0x1E240U) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_SHIFT (0U) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_CLRMSK (0xFFFE0000U) + + +/* + Register RGX_CR_SLC_FAULT_STOP_STATUS +*/ +#define RGX_CR_SLC_FAULT_STOP_STATUS (0xE240U) +#define RGX_CR_SLC_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) +#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT (0U) +#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFE0000U) + + +/* + Register RGX_CR_SLC_STATUS_DEBUG +*/ +#define RGX_CR_SLC_STATUS_DEBUG__VPU (0x1E260U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_SHIFT (16U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_CLRMSK (0x0000FFFFU) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_SHIFT (0U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_SLC_STATUS_DEBUG +*/ +#define RGX_CR_SLC_STATUS_DEBUG (0xE260U) +#define RGX_CR_SLC_STATUS_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT (16U) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK (0x0000FFFFU) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT (0U) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_HMMU_OSID_PAGE_SIZE +*/ +#define RGX_CR_HMMU_OSID_PAGE_SIZE (0x80000U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_MASKFULL (IMG_UINT64_C(0x0000000077777777)) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_SHIFT (28U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_CLRMSK (0x8FFFFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_SHIFT (24U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_CLRMSK (0xF8FFFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_SHIFT (20U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_CLRMSK (0xFF8FFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_SHIFT (16U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_CLRMSK (0xFFF8FFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_SHIFT (12U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_CLRMSK (0xFFFF8FFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_SHIFT (8U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_SHIFT (4U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_CLRMSK (0xFFFFFF8FU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_SHIFT (0U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_CLRMSK (0xFFFFFFF8U) + + +/* + Register RGX_CR_HMMU_BYPASS +*/ +#define RGX_CR_HMMU_BYPASS (0x80008U) +#define RGX_CR_HMMU_BYPASS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_HMMU_BYPASS_EN_SHIFT (0U) +#define RGX_CR_HMMU_BYPASS_EN_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_HMMU_INVAL +*/ +#define RGX_CR_HMMU_INVAL (0x80010U) +#define RGX_CR_HMMU_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_HMMU_INVAL_OS_ID_SHIFT (4U) +#define RGX_CR_HMMU_INVAL_OS_ID_CLRMSK (0xFFFFFF8FU) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_SHIFT (3U) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_EN (0x00000008U) +#define RGX_CR_HMMU_INVAL_HPC_SHIFT (2U) +#define RGX_CR_HMMU_INVAL_HPC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_HMMU_INVAL_HPC_EN (0x00000004U) +#define RGX_CR_HMMU_INVAL_HPD_SHIFT (1U) +#define RGX_CR_HMMU_INVAL_HPD_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_HMMU_INVAL_HPD_EN (0x00000002U) +#define RGX_CR_HMMU_INVAL_HPT_SHIFT (0U) +#define RGX_CR_HMMU_INVAL_HPT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HMMU_INVAL_HPT_EN (0x00000001U) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING0 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING0 (0x80018U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING1 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING1 (0x80020U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING2 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING2 (0x80028U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING3 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING3 (0x80030U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register group: RGX_CR_HMMU_PAGE_FAULT_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO0 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO0 (0x80038U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO1 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO1 (0x80040U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO2 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO2 (0x80048U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO3 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO3 (0x80050U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO4 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO4 (0x80058U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO5 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO5 (0x80060U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO6 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO6 (0x80068U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO7 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO7 (0x80070U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register group: RGX_CR_HMMU_PENDING_ENTRY_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO0 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0 (0x800C0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO1 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1 (0x800C8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO2 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2 (0x800D0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO3 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3 (0x800D8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO4 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4 (0x800E0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO5 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5 (0x800E8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO6 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6 (0x800F0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO7 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7 (0x800F8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_HOST_IRQ_ENABLE +*/ +#define RGX_CR_HMMU_HOST_IRQ_ENABLE (0x80100U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_SHIFT (3U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_SHIFT (2U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_SHIFT (1U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_SHIFT (0U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY +*/ +#define RGX_CR_HMMU_PENDING_ENTRY (0x80108U) +#define RGX_CR_HMMU_PENDING_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_HMMU_FAULT_STATUS +*/ +#define RGX_CR_HMMU_FAULT_STATUS (0x80120U) +#define RGX_CR_HMMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_SHIFT (31U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_SHIFT (30U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_SHIFT (29U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_SHIFT (28U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_SHIFT (27U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_SHIFT (26U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_SHIFT (25U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_SHIFT (24U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_SHIFT (23U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_SHIFT (22U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_SHIFT (21U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_SHIFT (20U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_SHIFT (19U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_SHIFT (18U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_SHIFT (17U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_SHIFT (16U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_SHIFT (15U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_SHIFT (14U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_SHIFT (13U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_SHIFT (12U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_SHIFT (11U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_SHIFT (10U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_SHIFT (9U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_SHIFT (8U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_SHIFT (7U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_SHIFT (6U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_SHIFT (5U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_SHIFT (4U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_SHIFT (3U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_SHIFT (2U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_SHIFT (1U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_SHIFT (0U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register group: RGX_CR_HMMU_READONLY_FAULT_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO0 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO0 (0x80190U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO1 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO1 (0x80198U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO2 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO2 (0x801A0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO3 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO3 (0x801A8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO4 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO4 (0x801B0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO5 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO5 (0x801B8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO6 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO6 (0x801C0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO7 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO7 (0x801C8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register group: RGX_CR_HMMU_READONLY_FAULT_PM_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 (0x801D0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 (0x801D8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 (0x801E0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 (0x801E8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 (0x801F0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 (0x801F8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 (0x80200U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 (0x80208U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_ACE_CTRL +*/ +#define RGX_CR_ACE_CTRL__VPU (0x1E320U) +#define RGX_CR_ACE_CTRL__VPU__MASKFULL (IMG_UINT64_C(0x00000000007FCFFF)) +#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_SHIFT (19U) +#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_CLRMSK (0xFF87FFFFU) +#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_SHIFT (15U) +#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_SHIFT (8U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_SHIFT (4U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_SHIFT (2U) +#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_SHIFT (1U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_INNER_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SHIFT (0U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SYSTEM (0x00000001U) + + +/* + Register RGX_CR_ACE_CTRL +*/ +#define RGX_CR_ACE_CTRL (0xE320U) +#define RGX_CR_ACE_CTRL_MASKFULL (IMG_UINT64_C(0x0000000000FFCFFF)) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_SHIFT (23U) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_EN (0x00800000U) +#define RGX_CR_ACE_CTRL_CLB_AXQOS_SHIFT (19U) +#define RGX_CR_ACE_CTRL_CLB_AXQOS_CLRMSK (0xFF87FFFFU) +#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT (15U) +#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_SHIFT (8U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_SHIFT (4U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) +#define RGX_CR_ACE_CTRL_MMU_DOMAIN_SHIFT (2U) +#define RGX_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_SHIFT (1U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT (0U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM (0x00000001U) + + +/* + Register RGX_CR_SOC_AXI +*/ +#define RGX_CR_SOC_AXI (0xE338U) +#define RGX_CR_SOC_AXI_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000008U) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000004U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT (0U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY (0x00000000U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0x00000001U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0x00000002U) + + +/* + Register RGX_CR_CONTEXT_MAPPING0 +*/ +#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) +#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_RAY_SHIFT (32U) +#define RGX_CR_CONTEXT_MAPPING0_RAY_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING0_GEOM_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING0_GEOM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_CONTEXT_MAPPING2 +*/ +#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) +#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_CONTEXT_MAPPING3 +*/ +#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) +#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_CONTEXT_MAPPING4 +*/ +#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) +#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MERCER_SOFT_RESET +*/ +#define RGX_CR_MERCER_SOFT_RESET (0x0630U) +#define RGX_CR_MERCER_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_SHIFT (62U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_SHIFT (61U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_SHIFT (60U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_SHIFT (59U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_SHIFT (58U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_SHIFT (57U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_SHIFT (56U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_SHIFT (55U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_SHIFT (54U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_SHIFT (53U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_SHIFT (52U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_SHIFT (51U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_SHIFT (50U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_SHIFT (49U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_SHIFT (48U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_SHIFT (47U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_SHIFT (46U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_SHIFT (45U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_SHIFT (44U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_SHIFT (43U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_SHIFT (42U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_SHIFT (41U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_SHIFT (40U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_SHIFT (39U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_SHIFT (38U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_SHIFT (37U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_SHIFT (36U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_SHIFT (35U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_SHIFT (34U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_SHIFT (33U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_SHIFT (32U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_SHIFT (31U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_SHIFT (30U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_SHIFT (29U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_SHIFT (28U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_SHIFT (27U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_SHIFT (26U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_SHIFT (25U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_SHIFT (24U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_SHIFT (23U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_SHIFT (22U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_SHIFT (21U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_SHIFT (20U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_SHIFT (19U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_SHIFT (18U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_SHIFT (17U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_SHIFT (16U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_SHIFT (15U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_SHIFT (14U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_SHIFT (13U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_SHIFT (12U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_SHIFT (11U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_SHIFT (10U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_SHIFT (9U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_SHIFT (8U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_SHIFT (7U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_SHIFT (6U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_SHIFT (5U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_SHIFT (4U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_SHIFT (3U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_SHIFT (2U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_SHIFT (1U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_SHIFT (0U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_TEXAS_SOFT_RESET +*/ +#define RGX_CR_TEXAS_SOFT_RESET (0x0640U) +#define RGX_CR_TEXAS_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_SHIFT (31U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_EN (0x80000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_SHIFT (30U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_EN (0x40000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_SHIFT (29U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_EN (0x20000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_SHIFT (28U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_EN (0x10000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_SHIFT (27U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_EN (0x08000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_SHIFT (26U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_EN (0x04000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_SHIFT (25U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_EN (0x02000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_SHIFT (24U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_EN (0x01000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_SHIFT (23U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_EN (0x00800000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_SHIFT (22U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_EN (0x00400000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_SHIFT (21U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_EN (0x00200000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_SHIFT (20U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_EN (0x00100000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_SHIFT (19U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_EN (0x00080000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_SHIFT (18U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_EN (0x00040000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_SHIFT (17U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_EN (0x00020000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_SHIFT (16U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_EN (0x00010000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_SHIFT (15U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_EN (0x00008000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_SHIFT (14U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_EN (0x00004000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_SHIFT (13U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_EN (0x00002000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_SHIFT (12U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_EN (0x00001000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_SHIFT (11U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_EN (0x00000800U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_SHIFT (10U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_EN (0x00000400U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_SHIFT (9U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_EN (0x00000200U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_SHIFT (8U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_EN (0x00000100U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_SHIFT (7U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_EN (0x00000080U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_SHIFT (6U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_EN (0x00000040U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_SHIFT (5U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_EN (0x00000020U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_SHIFT (4U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_EN (0x00000010U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_SHIFT (3U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_EN (0x00000008U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_SHIFT (2U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_EN (0x00000004U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_SHIFT (1U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_EN (0x00000002U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_SHIFT (0U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_EN (0x00000001U) + + +/* + Register RGX_CR_SWIFT_SOFT_RESET +*/ +#define RGX_CR_SWIFT_SOFT_RESET (0x0650U) +#define RGX_CR_SWIFT_SOFT_RESET__ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_SHIFT (62U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_SHIFT (61U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_SHIFT (60U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_SHIFT (59U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_SHIFT (58U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_SHIFT (57U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_SHIFT (56U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_SHIFT (55U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_SHIFT (54U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_SHIFT (53U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_SHIFT (52U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_SHIFT (51U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_SHIFT (50U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_SHIFT (49U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_SHIFT (48U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_SHIFT (47U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_SHIFT (46U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_SHIFT (45U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_SHIFT (44U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_SHIFT (43U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_SHIFT (42U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_SHIFT (41U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_SHIFT (40U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_SHIFT (39U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_SHIFT (38U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_SHIFT (37U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_SHIFT (36U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_SHIFT (35U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_SHIFT (34U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_SHIFT (33U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_SHIFT (32U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_SHIFT (31U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_SHIFT (31U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_SHIFT (30U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_SHIFT (30U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_SHIFT (29U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_SHIFT (29U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_SHIFT (28U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_SHIFT (28U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_SHIFT (27U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_SHIFT (27U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_SHIFT (26U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_SHIFT (26U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_SHIFT (25U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_SHIFT (25U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_SHIFT (24U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_SHIFT (24U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_SHIFT (23U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_SHIFT (23U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_SHIFT (22U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_SHIFT (22U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_SHIFT (21U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_SHIFT (21U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SHIFT (20U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_SHIFT (20U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SHIFT (19U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_SHIFT (19U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SHIFT (18U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_SHIFT (18U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SHIFT (17U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_SHIFT (17U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SHIFT (16U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_SHIFT (16U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SHIFT (15U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_SHIFT (15U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SHIFT (14U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_SHIFT (14U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SHIFT (13U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_SHIFT (13U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SHIFT (12U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_SHIFT (12U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SHIFT (11U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_SHIFT (11U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SHIFT (10U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_SHIFT (10U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SHIFT (9U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_SHIFT (9U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SHIFT (8U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_SHIFT (8U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SHIFT (7U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_SHIFT (7U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SHIFT (6U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_SHIFT (6U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SHIFT (5U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_SHIFT (5U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SHIFT (4U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_SHIFT (4U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SHIFT (3U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_SHIFT (3U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SHIFT (2U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_SHIFT (2U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SHIFT (1U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_SHIFT (1U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SHIFT (0U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_SHIFT (0U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_RAC_SOFT_RESET +*/ +#define RGX_CR_RAC_SOFT_RESET (0x0660U) +#define RGX_CR_RAC_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU31_SHIFT (31U) +#define RGX_CR_RAC_SOFT_RESET_SPU31_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU31_EN (0x80000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU30_SHIFT (30U) +#define RGX_CR_RAC_SOFT_RESET_SPU30_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU30_EN (0x40000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU29_SHIFT (29U) +#define RGX_CR_RAC_SOFT_RESET_SPU29_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU29_EN (0x20000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU28_SHIFT (28U) +#define RGX_CR_RAC_SOFT_RESET_SPU28_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU28_EN (0x10000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU27_SHIFT (27U) +#define RGX_CR_RAC_SOFT_RESET_SPU27_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU27_EN (0x08000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU26_SHIFT (26U) +#define RGX_CR_RAC_SOFT_RESET_SPU26_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU26_EN (0x04000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU25_SHIFT (25U) +#define RGX_CR_RAC_SOFT_RESET_SPU25_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU25_EN (0x02000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU24_SHIFT (24U) +#define RGX_CR_RAC_SOFT_RESET_SPU24_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU24_EN (0x01000000U) +#define RGX_CR_RAC_SOFT_RESET_SPU23_SHIFT (23U) +#define RGX_CR_RAC_SOFT_RESET_SPU23_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU23_EN (0x00800000U) +#define RGX_CR_RAC_SOFT_RESET_SPU22_SHIFT (22U) +#define RGX_CR_RAC_SOFT_RESET_SPU22_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU22_EN (0x00400000U) +#define RGX_CR_RAC_SOFT_RESET_SPU21_SHIFT (21U) +#define RGX_CR_RAC_SOFT_RESET_SPU21_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU21_EN (0x00200000U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_SHIFT (20U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU20_EN (0x00100000U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_SHIFT (19U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU19_EN (0x00080000U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_SHIFT (18U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU18_EN (0x00040000U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_SHIFT (17U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU17_EN (0x00020000U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_SHIFT (16U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU16_EN (0x00010000U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_SHIFT (15U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU15_EN (0x00008000U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_SHIFT (14U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU14_EN (0x00004000U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_SHIFT (13U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU13_EN (0x00002000U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_SHIFT (12U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU12_EN (0x00001000U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_SHIFT (11U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_RAC_SOFT_RESET_SPU11_EN (0x00000800U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_SHIFT (10U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU10_EN (0x00000400U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_SHIFT (9U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU9_EN (0x00000200U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_SHIFT (8U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_RAC_SOFT_RESET_SPU8_EN (0x00000100U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_SHIFT (7U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_RAC_SOFT_RESET_SPU7_EN (0x00000080U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_SHIFT (6U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_RAC_SOFT_RESET_SPU6_EN (0x00000040U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_SHIFT (5U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_RAC_SOFT_RESET_SPU5_EN (0x00000020U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_SHIFT (4U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_RAC_SOFT_RESET_SPU4_EN (0x00000010U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_SHIFT (3U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_EN (0x00000008U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_SHIFT (2U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_RAC_SOFT_RESET_SPU2_EN (0x00000004U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_SHIFT (1U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_RAC_SOFT_RESET_SPU1_EN (0x00000002U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_SHIFT (0U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_RAC_SOFT_RESET_SPU0_EN (0x00000001U) + + +#endif /* RGX_CR_DEFS_KM_H */ + +/***************************************************************************** + End of file (rgx_cr_defs_km.h) +*****************************************************************************/ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxdefs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxdefs_km.h new file mode 100644 index 000000000000..48a8d0f89599 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxdefs_km.h @@ -0,0 +1,339 @@ +/*************************************************************************/ /*! +@Title Rogue hw definitions (kernel mode) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXDEFS_KM_H +#define RGXDEFS_KM_H + +#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) +#include RGX_BVNC_CORE_KM_HEADER +#include RGX_BNC_CONFIG_KM_HEADER +#endif + +#define IMG_EXPLICIT_INCLUDE_HWDEFS +#if defined(__KERNEL__) +#include "rgx_cr_defs_km.h" +#include "tmp_rgx_cr_defs_riscv_km.h" +#endif +#undef IMG_EXPLICIT_INCLUDE_HWDEFS + +#include "rgx_heap_firmware.h" + +/* The following Macros are picked up through BVNC headers for no hardware + * operations to be compatible with old build infrastructure. + */ +#if defined(NO_HARDWARE) +/****************************************************************************** + * Check for valid B.X.N.C + *****************************************************************************/ +#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C) +#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)" +#endif + +/* Check core/config compatibility */ +#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C) +#error "BVNC headers are mismatching (KM core/config)" +#endif +#endif + +/****************************************************************************** + * RGX Version name + *****************************************************************************/ +#define RGX_BVNC_KM_ST2(S) #S +#define RGX_BVNC_KM_ST(S) RGX_BVNC_KM_ST2(S) +#define RGX_BVNC_KM RGX_BVNC_KM_ST(RGX_BVNC_KM_B) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_V) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_N) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_C) +#define RGX_BVNC_KM_V_ST RGX_BVNC_KM_ST(RGX_BVNC_KM_V) + +/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */ +#define RGX_BVNC_STR_SIZE_MAX (2+1+4+1+4+1+4+1) +#define RGX_BVNC_STR_FMTSPEC "%u.%u.%u.%u" +#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u" + + +/****************************************************************************** + * RGX Defines + *****************************************************************************/ + +#define BVNC_FIELD_MASK ((1 << BVNC_FIELD_WIDTH) - 1) +#define C_POSITION (0) +#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH)) +#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH)) +#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH)) + +#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION))) +#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION))) +#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION))) +#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION))) + +#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION)) +#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION)) +#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION)) +#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION)) + +#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)B)) << (B_POSITION) | \ + (((IMG_UINT64)V)) << (V_POSITION) | \ + (((IMG_UINT64)N)) << (N_POSITION) | \ + (((IMG_UINT64)C)) << (C_POSITION) \ + ) + +#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U) +#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U) + +#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU) +#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U) + +/* The default number of OSID is 1, higher number implies VZ enabled firmware */ +#if !defined(RGXFW_NATIVE) && defined(PVRSRV_VZ_NUM_OSID) && (PVRSRV_VZ_NUM_OSID + 1U > 1U) +#define RGXFW_NUM_OS PVRSRV_VZ_NUM_OSID +#else +#define RGXFW_NUM_OS 1U +#endif + +#define RGXFW_MAX_NUM_OS (8U) +#define RGXFW_HOST_OS (0U) +#define RGXFW_GUEST_OSID_START (1U) + +#define RGXFW_THREAD_0 (0U) +#define RGXFW_THREAD_1 (1U) + +/* META cores (required for the RGX_FEATURE_META) */ +#define MTP218 (1) +#define MTP219 (2) +#define LTP218 (3) +#define LTP217 (4) + +/* META Core memory feature depending on META variants */ +#define RGX_META_COREMEM_32K (32*1024) +#define RGX_META_COREMEM_48K (48*1024) +#define RGX_META_COREMEM_64K (64*1024) +#define RGX_META_COREMEM_96K (96*1024) +#define RGX_META_COREMEM_128K (128*1024) +#define RGX_META_COREMEM_256K (256*1024) + +#if !defined(__KERNEL__) +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(RGX_FEATURE_META_DMA) +#undef SUPPORT_META_COREMEM +#undef RGX_FEATURE_META_COREMEM_SIZE +#define RGX_FEATURE_META_COREMEM_SIZE (0) +#define RGX_META_COREMEM_SIZE (0) +#elif defined(RGX_FEATURE_META_COREMEM_SIZE) +#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U) +#else +#define RGX_META_COREMEM_SIZE (0) +#endif + +#if RGX_META_COREMEM_SIZE != 0 +#define RGX_META_COREMEM +#define RGX_META_COREMEM_CODE +#define RGX_META_COREMEM_DATA +#endif +#endif + +#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((IMG_INT32)x) > 0) ? ((x)/8) : (0)) + + +#define MAX_HW_TA3DCONTEXTS 2U + +#define RGX_CR_CLK_CTRL0_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL0_MASKFULL) +#define RGX_CR_CLK_CTRL0_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL0_MASKFULL) +#define RGX_CR_CLK_CTRL1_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL1_MASKFULL) +#define RGX_CR_CLK_CTRL1_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL1_MASKFULL) +#define RGX_CR_CLK_CTRL2_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL2_MASKFULL) +#define RGX_CR_CLK_CTRL2_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL2_MASKFULL) + +#define RGX_CR_MERCER0_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN) + +#define RGX_CR_MERCER1_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN) + +#define RGX_CR_MERCER2_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN) + + +/* SOFT_RESET steps as defined in the TRM */ +#define RGX_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \ + RGX_CR_SOFT_RESET_ISP_EN) +#define RGX_SOFT_RESET_JONES_ALL (RGX_SOFT_RESET_JONES | \ + RGX_CR_SOFT_RESET_GARTEN_EN) +#define RGX_SOFT_RESET_EXTRA (RGX_CR_SOFT_RESET_PIXEL_EN | \ + RGX_CR_SOFT_RESET_VERTEX_EN | \ + RGX_CR_SOFT_RESET_GEO_VERTEX_EN | \ + RGX_CR_SOFT_RESET_GEO_SHARED_EN | \ + RGX_CR_SOFT_RESET_COMPUTE_EN | \ + RGX_CR_SOFT_RESET_TDM_EN) +#define RGX_SOFT_RESET_FROM_WITHIN_CORE (RGX_CR_SOFT_RESET_MASKFULL ^ \ + (RGX_CR_SOFT_RESET_GARTEN_EN | \ + RGX_CR_SOFT_RESET_BIF_JONES_EN | \ + RGX_CR_SOFT_RESET_SLC_EN)) + + +#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) +#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1U << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) + +#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U) +#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1U << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT) + +#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (32U) + +/* To get the number of required Bernado/Phantom(s), divide + * the number of clusters by 4 and round up + */ +#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) ((CLUSTERS + 3U) / 4U) +#define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3U) / 4U) +#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3U) / 4U) + +#if !defined(__KERNEL__) +# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS)) +#endif + +/* + * META second thread feature depending on META variants and + * available CoreMem + */ +#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && (RGX_FEATURE_META_COREMEM_SIZE >= 96) +#define RGXFW_META_SUPPORT_2ND_THREAD +#endif + + +/* + * FW MMU contexts + */ +#if defined(SUPPORT_TRUSTED_DEVICE) +#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) /* FW code/private data */ +#define MMU_CONTEXT_MAPPING_FWIF (0x7) /* Host/FW data */ +#else +#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) +#define MMU_CONTEXT_MAPPING_FWIF (0x0) +#endif + + +/* + * FBC clear color register defaults based on HW defaults + * non-YUV clear colour 0: 0x00000000 (encoded as ch3,2,1,0) + * non-YUV clear colour 1: 0x01000000 (encoded as ch3,2,1,0) + * YUV clear colour 0: 0x000 000 (encoded as UV Y) + * YUV clear colour 1: 0x000 3FF (encoded as UV Y) + */ +#define RGX_FBC_CC_DEFAULT (0x0100000000000000) +#define RGX_FBC_CC_YUV_DEFAULT (0x000003FF00000000) + +/* + * Virtualisation definitions + */ + +#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE) + + +/* GPU CR timer tick in GPU cycles */ +#define RGX_CRTIME_TICK_IN_CYCLES (256U) + +#define ROGUE_RENDERSIZE_MAXX (RGX_FEATURE_RENDER_TARGET_XY_MAX) +#define ROGUE_RENDERSIZE_MAXY (RGX_FEATURE_RENDER_TARGET_XY_MAX) + +/* + * Register used by the FW to track the current boot stage (not used in MIPS) + */ +#define RGX_FW_BOOT_STAGE_REGISTER (RGX_CR_SCRATCH14) + +/* + * Define used to determine whether or not SLC range-based flush/invalidate + * interface is supported. + */ +#define RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED 1 + +#endif /* RGXDEFS_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxmmudefs_km.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxmmudefs_km.h new file mode 100644 index 000000000000..23576a7e1026 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/rgxmmudefs_km.h @@ -0,0 +1,426 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgxmmudefs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + * rogue_bif.def + */ + + +#ifndef RGXMMUDEFS_KM_H +#define RGXMMUDEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + + +#define RGXMMUDEFS_KM_REVISION 0 + +/* +Number of OSID mappings in each register +*/ +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_NUM_ENTRIES_VALUE (0x00000010U) + + +/* +Separation of each OSID mapping within the register in bits +*/ +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_SHIFT_VALUE (0x00000004U) + + +/* +Mask of which bits within the separation amount represent the OSID value +*/ +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_MASK_VALUE (0x00000007U) + + +/* +Encoding of DM (note value 0x6 not used) +*/ +#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) +#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) +#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) +#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) +#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) +#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) +#define RGX_BIF_DM_ENCODING_META (0x00000007U) +#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) +#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) + + +/* +Labelling of fields within virtual address +*/ +/* +Page Catalogue entry # +*/ +#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) +#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) +/* +Page Directory entry # +*/ +#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) +#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) +/* +Page Table entry # +*/ +#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) + + +/* +Number of entries in a PC +*/ +#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) + + +/* +Number of entries in a PD +*/ +#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) + + +/* +Number of entries in a PT +*/ +#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) + + +/* +Size in bits of the PC entries in memory +*/ +#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) + + +/* +Size in bits of the PD entries in memory +*/ +#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) + + +/* +Size in bits of the PT entries in memory +*/ +#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) + + +/* +Encoding of page size field +*/ +#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) +#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) +#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) +#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) +#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) +#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) +#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) + + +/* +Range of bits used for 4KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* +Range of bits used for 16KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) +#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) + + +/* +Range of bits used for 64KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) +#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) + + +/* +Range of bits used for 256KB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) +#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) + + +/* +Range of bits used for 1MB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) +#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) + + +/* +Range of bits used for 2MB Physical Page +*/ +#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) +#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) + + +/* +Range of bits used for PT Base Address for 4KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* +Range of bits used for PT Base Address for 16KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) + + +/* +Range of bits used for PT Base Address for 64KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) + + +/* +Range of bits used for PT Base Address for 256KB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) + + +/* +Range of bits used for PT Base Address for 1MB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) + + +/* +Range of bits used for PT Base Address for 2MB Physical Page +*/ +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) + + +/* +Encoding of AXCACHE field +When the 64-byte converter is present and is splitting coherent requests, the following modes must not be used for coherent pages: +- Write-through No-allocate +- Write-through Read-allocate +- Write-back No-allocate +- Write-back Read-allocate +*/ +#define RGX_MMUCTRL_AXCACHE_MASK (0x0000000FU) +/* +Device Non-bufferable */ +#define RGX_MMUCTRL_AXCACHE_DEVNONBUFF (0x00000000U) +/* +Device Bufferable */ +#define RGX_MMUCTRL_AXCACHE_DEVBUFF (0x00000001U) +/* +Normal Non-cacheable Non-bufferable */ +#define RGX_MMUCTRL_AXCACHE_NORMNONBUFF (0x00000002U) +/* +Normal Non-cacheable Bufferable */ +#define RGX_MMUCTRL_AXCACHE_NORMBUFF (0x00000003U) +/* +Write-through No-allocate*/ +#define RGX_MMUCTRL_AXCACHE_WTNOALLOC (0x00000006U) +/* +Write-back No-allocate*/ +#define RGX_MMUCTRL_AXCACHE_WBNOALLOC (0x00000007U) +/* +Write-through Read-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WTRALLOC (0x00000008U) +/* +Write-back Read-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WBRALLOC (0x00000009U) +/* +Write-through Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WTWALLOC (0x0000000aU) +/* +Write-back Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WBWALLOC (0x0000000bU) +/* +Write-through Read/Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WTRWALLOC (0x0000000eU) +/* +Write-back Read/Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WBRWALLOC (0x0000000fU) + + +/* +Format of Page Table data +*/ +/* +PM/Meta protect bit +*/ +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) +/* +AxCACHE value +*/ +#define RGX_MMUCTRL_PT_DATA_AXCACHE_SHIFT (58U) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK (IMG_UINT64_C(0xC3FFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVNONBUFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVBUFF (IMG_UINT64_C(0x0400000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMNONBUFF (IMG_UINT64_C(0x0800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMBUFF (IMG_UINT64_C(0x0c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTNOALLOC (IMG_UINT64_C(0x1800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBNOALLOC (IMG_UINT64_C(0x1c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRALLOC (IMG_UINT64_C(0x2000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRALLOC (IMG_UINT64_C(0x2400000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTWALLOC (IMG_UINT64_C(0x2800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBWALLOC (IMG_UINT64_C(0x2c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRWALLOC (IMG_UINT64_C(0x3800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC (IMG_UINT64_C(0x3c00000000000000)) +/* +Upper part of vp page field +*/ +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xFC0000FFFFFFFFFF)) +/* +Physical page address +*/ +#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +/* +Lower part of vp page field +*/ +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +/* +Entry pending +*/ +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) +/* +PM Src +*/ +#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) +/* +Cache Coherency bit +*/ +#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) +#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) +/* +Read only +*/ +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) +/* +Entry valid +*/ +#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* +Format of Page Directory data +*/ +/* +Entry pending +*/ +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) +/* +Page Table base address +*/ +#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +/* +Page Size +*/ +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) +/* +Entry valid +*/ +#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* +Format of Page Catalogue data +*/ +/* +Page Catalogue base address +*/ +#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) +/* +Entry pending +*/ +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) +/* +Entry valid +*/ +#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) +#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) + + +#endif /* RGXMMUDEFS_KM_H */ + +/***************************************************************************** + End of file (rgxmmudefs_km.h) +*****************************************************************************/ + diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/tmp_rgx_cr_defs_riscv_km.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/tmp_rgx_cr_defs_riscv_km.h new file mode 100644 index 000000000000..cad1371d02b1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/km/tmp_rgx_cr_defs_riscv_km.h @@ -0,0 +1,164 @@ +/*************************************************************************/ /*! +@Title Hardware definition file tmp_rgx_cr_defs_riscv_km.h +@Brief The file contains TEMPORARY hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef TMP_RGX_CR_DEFS_RISCV_KM_H +#define TMP_RGX_CR_DEFS_RISCV_KM_H + +#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS) +#error This file may only be included if explicitly defined +#endif + +#include "img_types.h" +#include "img_defs.h" + + +/* + Register TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG +*/ +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG (0x3000U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP1_CONFIG (0x3008U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP1_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP2_CONFIG (0x3010U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP2_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP3_CONFIG (0x3018U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP3_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP4_CONFIG (0x3020U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP4_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP5_CONFIG (0x3028U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP5_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP6_CONFIG (0x3030U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP6_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP7_CONFIG (0x3038U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP7_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP8_CONFIG (0x3040U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP8_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP9_CONFIG (0x3048U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP9_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP10_CONFIG (0x3050U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP10_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP11_CONFIG (0x3058U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP11_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP12_CONFIG (0x3060U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP12_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP13_CONFIG (0x3068U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP13_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP14_CONFIG (0x3070U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP14_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP15_CONFIG (0x3078U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP15_CONFIG_MASKFULL (IMG_UINT64_C(0x003FFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_TRUSTED_SHIFT (62) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFE)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_SHIFT (61) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_EN (IMG_UINT64_C(0x2000000000000000)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_SHIFT (60) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_EN (IMG_UINT64_C(0x1000000000000000)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_SHIFT (44) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_ALIGN (12U) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_SHIFT (40) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_DEVVADDR_SHIFT (12) +#define TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* + Register TMP_RGX_CR_FWCORE_BOOT + */ +#define TMP_RGX_CR_FWCORE_BOOT (0x3090U) +#define TMP_RGX_CR_FWCORE_BOOT_BOOT_SHIFT (0) +#define TMP_RGX_CR_FWCORE_BOOT_BOOT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define TMP_RGX_CR_FWCORE_BOOT_BOOT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register TMP_RGX_CR_FWCORE_RESET_ADDR + */ +#define TMP_RGX_CR_FWCORE_RESET_ADDR (0x3098U) +#define TMP_RGX_CR_FWCORE_RESET_ADDR_ADDR_SHIFT (1) +#define TMP_RGX_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register TMP_RGX_CR_FWCORE_WRAPPER_FENCE + */ +#define TMP_RGX_CR_FWCORE_WRAPPER_FENCE (0x30E8U) +#define TMP_RGX_CR_FWCORE_WRAPPER_FENCE_FENCE_SHIFT (0) +#define TMP_RGX_CR_FWCORE_WRAPPER_FENCE_FENCE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register TMP_RGX_CR_MTIME_SET +*/ +#define TMP_RGX_CR_MTIME_SET (0x7000U) +#define TMP_RGX_CR_MTIME_SET_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) + +/* + Register TMP_RGX_CR_MTIME_CMP +*/ +#define TMP_RGX_CR_MTIME_CMP (0x7008U) +#define TMP_RGX_CR_MTIME_CMP_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) + +/* + Register TMP_RGX_CR_MTIME_CTRL +*/ +#define TMP_RGX_CR_MTIME_CTRL (0x7018U) +#define TMP_RGX_CR_MTIME_CTRL_MASKFULL (IMG_UINT64_C(0x0000000080000003)) +#define TMP_RGX_CR_MTIME_CTRL_SOFT_RESET_SHIFT (31) +#define TMP_RGX_CR_MTIME_CTRL_SOFT_RESET_CLRMSK (IMG_UINT64_C(0x0000000000000003)) +#define TMP_RGX_CR_MTIME_CTRL_SOFT_RESET_EN (IMG_UINT64_C(0x0000000080000000)) +#define TMP_RGX_CR_MTIME_CTRL_PAUSE_SHIFT (1) +#define TMP_RGX_CR_MTIME_CTRL_PAUSE_CLRMSK (IMG_UINT64_C(0x0000000080000001)) +#define TMP_RGX_CR_MTIME_CTRL_PAUSE_EN (IMG_UINT64_C(0x0000000000000002)) +#define TMP_RGX_CR_MTIME_CTRL_ENABLE_SHIFT (0) +#define TMP_RGX_CR_MTIME_CTRL_ENABLE_CLRMSK (IMG_UINT64_C(0x0000000080000002)) +#define TMP_RGX_CR_MTIME_CTRL_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + +#endif /* TMP_RGX_CR_DEFS_RISCV_KM_H */ + +/***************************************************************************** + End of file (tmp_rgx_cr_defs_riscv_km.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/hwdefs/volcanic/rgxpmdefs.h b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/rgxpmdefs.h new file mode 100644 index 000000000000..2b47f8295592 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/hwdefs/volcanic/rgxpmdefs.h @@ -0,0 +1,4645 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgxpmdefs.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + * rogue_pm.def: #12 + */ + + +#ifndef RGXPMDEFS_H +#define RGXPMDEFS_H + +#include "img_types.h" +#include "img_defs.h" + + +#define RGXPMDEFS_REVISION 12 + +/* +The mini PB size on a per-RT basis +*/ +typedef struct _PM_DATA_MINI_PB { + IMG_UINT32 u32_0; +} PM_DATA_MINI_PB; + +/* + +*/ +#define PM_DATA_MINI_PB_SIZE_WOFF (0U) +#define PM_DATA_MINI_PB_SIZE_SHIFT (0U) +#define PM_DATA_MINI_PB_SIZE_CLRMSK (0xFFFFFC00U) +#define PM_DATA_MINI_PB_SET_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MINI_PB_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_MINI_PB_SIZE_SHIFT)))) +#define PM_DATA_MINI_PB_GET_SIZE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_MINI_PB_SIZE_SHIFT)) & 0x000003ffU) + + +/* +The minimum PB size for the WDDM driver only. It is consistent with the OPENGL/OPENGLES. However, it is breaking down as two parts: the pagable memory and non pagable memory. +*/ +typedef struct _PM_DATA_WDDM_MINI_PB { + IMG_UINT32 u32_0; +} PM_DATA_WDDM_MINI_PB; + +/* + +*/ +#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_WOFF (0U) +#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT (10U) +#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_CLRMSK (0xFFF003FFU) +#define PM_DATA_WDDM_MINI_PB_SET_NON_PAGABLE_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT)))) +#define PM_DATA_WDDM_MINI_PB_GET_NON_PAGABLE_SIZE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT)) & 0x000003ffU) +/* + +*/ +#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_WOFF (0U) +#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT (0U) +#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_CLRMSK (0xFFFFFC00U) +#define PM_DATA_WDDM_MINI_PB_SET_PAGABLE_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT)))) +#define PM_DATA_WDDM_MINI_PB_GET_PAGABLE_SIZE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT)) & 0x000003ffU) + + +/* +the mini number of the reserve pages when only the local free list is used */ +#define PM_DATA_PM_RESERVE_PAGES_MIN_SIZE (0x00000007U) + + +/* +the mini number of the reserve pages when unified free list is present */ +#define PM_DATA_PM_RESERVE_PAGES_MIN_UNIFIED_SIZE (0x0000000bU) + + +/* +This defines the format of entries in the FSTACK, UFSTACK and MMUSTACK +*/ +typedef struct _PM_DATA_FSTACK_ENTRY { + IMG_UINT32 u32_0; +} PM_DATA_FSTACK_ENTRY; + +/* +Reserved for future use +*/ +#define PM_DATA_FSTACK_ENTRY_RSV_WOFF (0U) +#define PM_DATA_FSTACK_ENTRY_RSV_SHIFT (28U) +#define PM_DATA_FSTACK_ENTRY_RSV_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_FSTACK_ENTRY_SET_RSV(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_FSTACK_ENTRY_RSV_CLRMSK ) | (((_x_) & (0x0000000fU)) << (PM_DATA_FSTACK_ENTRY_RSV_SHIFT)))) +#define PM_DATA_FSTACK_ENTRY_GET_RSV(_ft_) ((_ft_).u32_0 >> ((PM_DATA_FSTACK_ENTRY_RSV_SHIFT)) & 0x0000000fU) +/* +Address of 4 kB physical page +*/ +#define PM_DATA_FSTACK_ENTRY_PPAGE_WOFF (0U) +#define PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT (0U) +#define PM_DATA_FSTACK_ENTRY_PPAGE_CLRMSK (0xF0000000U) +#define PM_DATA_FSTACK_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_FSTACK_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT)))) +#define PM_DATA_FSTACK_ENTRY_GET_PPAGE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU) + + +/* +This defines the format of an ALIST (Allocation List) entry +*/ +typedef struct _PM_DATA_ALIST_ENTRY { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; +} PM_DATA_ALIST_ENTRY; + +/* +Valid bit. Indicates whether this ALIST entry is valid. +*/ +#define PM_DATA_ALIST_ENTRY_VAL_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_VAL_SHIFT (31U) +#define PM_DATA_ALIST_ENTRY_VAL_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_ALIST_ENTRY_SET_VAL(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_VAL_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_ALIST_ENTRY_VAL_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_VAL(_ft_) ((_ft_).u32_1 >> ((PM_DATA_ALIST_ENTRY_VAL_SHIFT)) & 0x00000001U) +/* +The "data master" of the virtual page. 0=VCE, 1=TE, 2,3=reserved. +*/ +#define PM_DATA_ALIST_ENTRY_DM_INDEX_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT (26U) +#define PM_DATA_ALIST_ENTRY_DM_INDEX_CLRMSK (0xF3FFFFFFU) +#define PM_DATA_ALIST_ENTRY_SET_DM_INDEX(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_DM_INDEX_CLRMSK ) | (((_x_) & (0x00000003U)) << (PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_DM_INDEX(_ft_) ((_ft_).u32_1 >> ((PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT)) & 0x00000003U) +/* +Render Target Array index. Up to 2 k Render Target Arrays are supported. +*/ +#define PM_DATA_ALIST_ENTRY_RTA_INDEX_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT (14U) +#define PM_DATA_ALIST_ENTRY_RTA_INDEX_CLRMSK (0xFE003FFFU) +#define PM_DATA_ALIST_ENTRY_SET_RTA_INDEX(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_RTA_INDEX_CLRMSK ) | (((_x_) & (0x000007ffU)) << (PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_RTA_INDEX(_ft_) ((_ft_).u32_1 >> ((PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT)) & 0x000007ffU) +/* +The virtual page number (16 kB virtual page). +*/ +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_WOFF (0U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_SHIFT (16U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_SHIFT (0U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_CLRMSK (0x0000FFFFU) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_CLRMSK (0xFFFFFFF0U) +#define PM_DATA_ALIST_ENTRY_SET_VRP_PPAGE(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000ffff))) << 16))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000f0000))) >> 16))); } +#define PM_DATA_ALIST_ENTRY_GET_VRP_PPAGE(_ft_) (((_ft_).u32_0 >> (16)) | ((IMG_UINT64)((_ft_).u32_1 & 0x0000000fU ) << (16))) +/* +The 16-bit macrotile mask. Indicates which macrotile(s) are using this 16 kB page +*/ +#define PM_DATA_ALIST_ENTRY_MTILE_MASK_WOFF (0U) +#define PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT (0U) +#define PM_DATA_ALIST_ENTRY_MTILE_MASK_CLRMSK (0xFFFF0000U) +#define PM_DATA_ALIST_ENTRY_SET_MTILE_MASK(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_ALIST_ENTRY_MTILE_MASK_CLRMSK ) | (((_x_) & (0x0000ffffU)) << (PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_MTILE_MASK(_ft_) ((_ft_).u32_0 >> ((PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT)) & 0x0000ffffU) + + +/* +This defines the format of entries in the MLIST +*/ +typedef struct _PM_DATA_MLIST_ENTRY { + IMG_UINT32 u32_0; +} PM_DATA_MLIST_ENTRY; + +/* +Original source of the MMU page: +0=Page was allocated from the FSTACK, +1=Page was allocated from the UFSTACK. +This bit is ignored when RGX_CR_PM_MMU_STACK_POLICY=1 +*/ +#define PM_DATA_MLIST_ENTRY_SRC_WOFF (0U) +#define PM_DATA_MLIST_ENTRY_SRC_SHIFT (31U) +#define PM_DATA_MLIST_ENTRY_SRC_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_MLIST_ENTRY_SET_SRC(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MLIST_ENTRY_SRC_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_MLIST_ENTRY_SRC_SHIFT)))) +#define PM_DATA_MLIST_ENTRY_GET_SRC(_ft_) ((_ft_).u32_0 >> ((PM_DATA_MLIST_ENTRY_SRC_SHIFT)) & 0x00000001U) +/* +Address of Physical Page allocated to MMU +*/ +#define PM_DATA_MLIST_ENTRY_PPAGE_WOFF (0U) +#define PM_DATA_MLIST_ENTRY_PPAGE_SHIFT (0U) +#define PM_DATA_MLIST_ENTRY_PPAGE_CLRMSK (0xF0000000U) +#define PM_DATA_MLIST_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MLIST_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_MLIST_ENTRY_PPAGE_SHIFT)))) +#define PM_DATA_MLIST_ENTRY_GET_PPAGE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_MLIST_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU) + + +/* +This defines the format of entries in the VFP Table +*/ +typedef struct _PM_DATA_VFP_TABLE_ENTRY { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; +} PM_DATA_VFP_TABLE_ENTRY; + +/* +Valid bit. 0=VFP is unmapped, 1=VFP is mapped. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_VALID_WOFF (1U) +#define PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT (31U) +#define PM_DATA_VFP_TABLE_ENTRY_VALID_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VFP_TABLE_ENTRY_SET_VALID(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VFP_TABLE_ENTRY_VALID_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_VALID(_ft_) ((_ft_).u32_1 >> ((PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT)) & 0x00000001U) +/* +Address of MMU Page Table Entry. 8 Byte Granular. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_WOFF (1U) +#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT (0U) +#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_CLRMSK (0x80000000U) +#define PM_DATA_VFP_TABLE_ENTRY_SET_PTE_PTR(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_PTE_PTR(_ft_) ((_ft_).u32_1 >> ((PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT)) & 0x7fffffffU) +/* +Reserved for future use. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_RSV_WOFF (0U) +#define PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT (28U) +#define PM_DATA_VFP_TABLE_ENTRY_RSV_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VFP_TABLE_ENTRY_SET_RSV(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VFP_TABLE_ENTRY_RSV_CLRMSK ) | (((_x_) & (0x0000000fU)) << (PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_RSV(_ft_) ((_ft_).u32_0 >> ((PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT)) & 0x0000000fU) +/* +Address of 1 kB Physical Page. 1 TB addressable. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_WOFF (0U) +#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT (0U) +#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_CLRMSK (0xF0000000U) +#define PM_DATA_VFP_TABLE_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VFP_TABLE_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_PPAGE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU) + + +/* +PerPipe Segment SIZE, it has a fixed mapping as follows: +PIPE Number - Segment Size +1 16G +2 8G +4 4G +8 2G +*/ +typedef struct _PM_DATA_PERPIPE_SEGSIZE { + IMG_UINT32 u32_0; +} PM_DATA_PERPIPE_SEGSIZE; + +/* +PerSegment Size 2G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT (3U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_CLRMSK (0xFFFFFFF7U) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE8_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE8_SEGSZIZE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT)) & 0x00000001U) +/* +PerSegment Size 4G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT (2U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_CLRMSK (0xFFFFFFFBU) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE4_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE4_SEGSZIZE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT)) & 0x00000001U) +/* +PerSegment Size 8G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT (1U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_CLRMSK (0xFFFFFFFDU) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE2_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE2_SEGSZIZE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT)) & 0x00000001U) +/* +PerSegment Size 16G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_CLRMSK (0xFFFFFFFEU) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE1_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE1_SEGSZIZE(_ft_) ((_ft_).u32_0 >> ((PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT)) & 0x00000001U) + + +/* +PM Virtual Heap Buffer Offset. This buffer contains all the meta-data associated with each render target. +size is 11904/8 = 1488 Bytes. + +Natively the buffer supports up to 8-VCEs and 8-TEs scaling without changing HW. + +In case relevant PIPE N is not present, the corresponding space is just reserved. +*/ +typedef struct _PM_DATA_VHEAP_BUFFER { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; + IMG_UINT32 u32_12; + IMG_UINT32 u32_13; + IMG_UINT32 u32_14; + IMG_UINT32 u32_15; + IMG_UINT32 u32_16; + IMG_UINT32 u32_17; + IMG_UINT32 u32_18; + IMG_UINT32 u32_19; + IMG_UINT32 u32_20; + IMG_UINT32 u32_21; + IMG_UINT32 u32_22; + IMG_UINT32 u32_23; + IMG_UINT32 u32_24; + IMG_UINT32 u32_25; + IMG_UINT32 u32_26; + IMG_UINT32 u32_27; + IMG_UINT32 u32_28; + IMG_UINT32 u32_29; + IMG_UINT32 u32_30; + IMG_UINT32 u32_31; + IMG_UINT32 u32_32; + IMG_UINT32 u32_33; + IMG_UINT32 u32_34; + IMG_UINT32 u32_35; + IMG_UINT32 u32_36; + IMG_UINT32 u32_37; + IMG_UINT32 u32_38; + IMG_UINT32 u32_39; + IMG_UINT32 u32_40; + IMG_UINT32 u32_41; + IMG_UINT32 u32_42; + IMG_UINT32 u32_43; + IMG_UINT32 u32_44; + IMG_UINT32 u32_45; + IMG_UINT32 u32_46; + IMG_UINT32 u32_47; + IMG_UINT32 u32_48; + IMG_UINT32 u32_49; + IMG_UINT32 u32_50; + IMG_UINT32 u32_51; + IMG_UINT32 u32_52; + IMG_UINT32 u32_53; + IMG_UINT32 u32_54; + IMG_UINT32 u32_55; + IMG_UINT32 u32_56; + IMG_UINT32 u32_57; + IMG_UINT32 u32_58; + IMG_UINT32 u32_59; + IMG_UINT32 u32_60; + IMG_UINT32 u32_61; + IMG_UINT32 u32_62; + IMG_UINT32 u32_63; + IMG_UINT32 u32_64; + IMG_UINT32 u32_65; + IMG_UINT32 u32_66; + IMG_UINT32 u32_67; + IMG_UINT32 u32_68; + IMG_UINT32 u32_69; + IMG_UINT32 u32_70; + IMG_UINT32 u32_71; + IMG_UINT32 u32_72; + IMG_UINT32 u32_73; + IMG_UINT32 u32_74; + IMG_UINT32 u32_75; + IMG_UINT32 u32_76; + IMG_UINT32 u32_77; + IMG_UINT32 u32_78; + IMG_UINT32 u32_79; + IMG_UINT32 u32_80; + IMG_UINT32 u32_81; + IMG_UINT32 u32_82; + IMG_UINT32 u32_83; + IMG_UINT32 u32_84; + IMG_UINT32 u32_85; + IMG_UINT32 u32_86; + IMG_UINT32 u32_87; + IMG_UINT32 u32_88; + IMG_UINT32 u32_89; + IMG_UINT32 u32_90; + IMG_UINT32 u32_91; + IMG_UINT32 u32_92; + IMG_UINT32 u32_93; + IMG_UINT32 u32_94; + IMG_UINT32 u32_95; + IMG_UINT32 u32_96; + IMG_UINT32 u32_97; + IMG_UINT32 u32_98; + IMG_UINT32 u32_99; + IMG_UINT32 u32_100; + IMG_UINT32 u32_101; + IMG_UINT32 u32_102; + IMG_UINT32 u32_103; + IMG_UINT32 u32_104; + IMG_UINT32 u32_105; + IMG_UINT32 u32_106; + IMG_UINT32 u32_107; + IMG_UINT32 u32_108; + IMG_UINT32 u32_109; + IMG_UINT32 u32_110; + IMG_UINT32 u32_111; + IMG_UINT32 u32_112; + IMG_UINT32 u32_113; + IMG_UINT32 u32_114; + IMG_UINT32 u32_115; + IMG_UINT32 u32_116; + IMG_UINT32 u32_117; + IMG_UINT32 u32_118; + IMG_UINT32 u32_119; + IMG_UINT32 u32_120; + IMG_UINT32 u32_121; + IMG_UINT32 u32_122; + IMG_UINT32 u32_123; + IMG_UINT32 u32_124; + IMG_UINT32 u32_125; + IMG_UINT32 u32_126; + IMG_UINT32 u32_127; + IMG_UINT32 u32_128; + IMG_UINT32 u32_129; + IMG_UINT32 u32_130; + IMG_UINT32 u32_131; + IMG_UINT32 u32_132; + IMG_UINT32 u32_133; + IMG_UINT32 u32_134; + IMG_UINT32 u32_135; + IMG_UINT32 u32_136; + IMG_UINT32 u32_137; + IMG_UINT32 u32_138; + IMG_UINT32 u32_139; + IMG_UINT32 u32_140; + IMG_UINT32 u32_141; + IMG_UINT32 u32_142; + IMG_UINT32 u32_143; + IMG_UINT32 u32_144; + IMG_UINT32 u32_145; + IMG_UINT32 u32_146; + IMG_UINT32 u32_147; + IMG_UINT32 u32_148; + IMG_UINT32 u32_149; + IMG_UINT32 u32_150; + IMG_UINT32 u32_151; + IMG_UINT32 u32_152; + IMG_UINT32 u32_153; + IMG_UINT32 u32_154; + IMG_UINT32 u32_155; + IMG_UINT32 u32_156; + IMG_UINT32 u32_157; + IMG_UINT32 u32_158; + IMG_UINT32 u32_159; + IMG_UINT32 u32_160; + IMG_UINT32 u32_161; + IMG_UINT32 u32_162; + IMG_UINT32 u32_163; + IMG_UINT32 u32_164; + IMG_UINT32 u32_165; + IMG_UINT32 u32_166; + IMG_UINT32 u32_167; + IMG_UINT32 u32_168; + IMG_UINT32 u32_169; + IMG_UINT32 u32_170; + IMG_UINT32 u32_171; + IMG_UINT32 u32_172; + IMG_UINT32 u32_173; + IMG_UINT32 u32_174; + IMG_UINT32 u32_175; + IMG_UINT32 u32_176; + IMG_UINT32 u32_177; + IMG_UINT32 u32_178; + IMG_UINT32 u32_179; + IMG_UINT32 u32_180; + IMG_UINT32 u32_181; + IMG_UINT32 u32_182; + IMG_UINT32 u32_183; + IMG_UINT32 u32_184; + IMG_UINT32 u32_185; + IMG_UINT32 u32_186; + IMG_UINT32 u32_187; + IMG_UINT32 u32_188; + IMG_UINT32 u32_189; + IMG_UINT32 u32_190; + IMG_UINT32 u32_191; + IMG_UINT32 u32_192; + IMG_UINT32 u32_193; + IMG_UINT32 u32_194; + IMG_UINT32 u32_195; + IMG_UINT32 u32_196; + IMG_UINT32 u32_197; + IMG_UINT32 u32_198; + IMG_UINT32 u32_199; + IMG_UINT32 u32_200; + IMG_UINT32 u32_201; + IMG_UINT32 u32_202; + IMG_UINT32 u32_203; + IMG_UINT32 u32_204; + IMG_UINT32 u32_205; + IMG_UINT32 u32_206; + IMG_UINT32 u32_207; + IMG_UINT32 u32_208; + IMG_UINT32 u32_209; + IMG_UINT32 u32_210; + IMG_UINT32 u32_211; + IMG_UINT32 u32_212; + IMG_UINT32 u32_213; + IMG_UINT32 u32_214; + IMG_UINT32 u32_215; + IMG_UINT32 u32_216; + IMG_UINT32 u32_217; + IMG_UINT32 u32_218; + IMG_UINT32 u32_219; + IMG_UINT32 u32_220; + IMG_UINT32 u32_221; + IMG_UINT32 u32_222; + IMG_UINT32 u32_223; + IMG_UINT32 u32_224; + IMG_UINT32 u32_225; + IMG_UINT32 u32_226; + IMG_UINT32 u32_227; + IMG_UINT32 u32_228; + IMG_UINT32 u32_229; + IMG_UINT32 u32_230; + IMG_UINT32 u32_231; + IMG_UINT32 u32_232; + IMG_UINT32 u32_233; + IMG_UINT32 u32_234; + IMG_UINT32 u32_235; + IMG_UINT32 u32_236; + IMG_UINT32 u32_237; + IMG_UINT32 u32_238; + IMG_UINT32 u32_239; + IMG_UINT32 u32_240; + IMG_UINT32 u32_241; + IMG_UINT32 u32_242; + IMG_UINT32 u32_243; + IMG_UINT32 u32_244; + IMG_UINT32 u32_245; + IMG_UINT32 u32_246; + IMG_UINT32 u32_247; + IMG_UINT32 u32_248; + IMG_UINT32 u32_249; + IMG_UINT32 u32_250; + IMG_UINT32 u32_251; + IMG_UINT32 u32_252; + IMG_UINT32 u32_253; + IMG_UINT32 u32_254; + IMG_UINT32 u32_255; + IMG_UINT32 u32_256; + IMG_UINT32 u32_257; + IMG_UINT32 u32_258; + IMG_UINT32 u32_259; + IMG_UINT32 u32_260; + IMG_UINT32 u32_261; + IMG_UINT32 u32_262; + IMG_UINT32 u32_263; + IMG_UINT32 u32_264; + IMG_UINT32 u32_265; + IMG_UINT32 u32_266; + IMG_UINT32 u32_267; + IMG_UINT32 u32_268; + IMG_UINT32 u32_269; + IMG_UINT32 u32_270; + IMG_UINT32 u32_271; + IMG_UINT32 u32_272; + IMG_UINT32 u32_273; + IMG_UINT32 u32_274; + IMG_UINT32 u32_275; + IMG_UINT32 u32_276; + IMG_UINT32 u32_277; + IMG_UINT32 u32_278; + IMG_UINT32 u32_279; + IMG_UINT32 u32_280; + IMG_UINT32 u32_281; + IMG_UINT32 u32_282; + IMG_UINT32 u32_283; + IMG_UINT32 u32_284; + IMG_UINT32 u32_285; + IMG_UINT32 u32_286; + IMG_UINT32 u32_287; + IMG_UINT32 u32_288; + IMG_UINT32 u32_289; + IMG_UINT32 u32_290; + IMG_UINT32 u32_291; + IMG_UINT32 u32_292; + IMG_UINT32 u32_293; + IMG_UINT32 u32_294; + IMG_UINT32 u32_295; + IMG_UINT32 u32_296; + IMG_UINT32 u32_297; + IMG_UINT32 u32_298; + IMG_UINT32 u32_299; + IMG_UINT32 u32_300; + IMG_UINT32 u32_301; + IMG_UINT32 u32_302; + IMG_UINT32 u32_303; + IMG_UINT32 u32_304; + IMG_UINT32 u32_305; + IMG_UINT32 u32_306; + IMG_UINT32 u32_307; + IMG_UINT32 u32_308; + IMG_UINT32 u32_309; + IMG_UINT32 u32_310; + IMG_UINT32 u32_311; + IMG_UINT32 u32_312; + IMG_UINT32 u32_313; + IMG_UINT32 u32_314; + IMG_UINT32 u32_315; + IMG_UINT32 u32_316; + IMG_UINT32 u32_317; + IMG_UINT32 u32_318; + IMG_UINT32 u32_319; + IMG_UINT32 u32_320; + IMG_UINT32 u32_321; + IMG_UINT32 u32_322; + IMG_UINT32 u32_323; + IMG_UINT32 u32_324; + IMG_UINT32 u32_325; + IMG_UINT32 u32_326; + IMG_UINT32 u32_327; + IMG_UINT32 u32_328; + IMG_UINT32 u32_329; + IMG_UINT32 u32_330; + IMG_UINT32 u32_331; + IMG_UINT32 u32_332; + IMG_UINT32 u32_333; + IMG_UINT32 u32_334; + IMG_UINT32 u32_335; + IMG_UINT32 u32_336; + IMG_UINT32 u32_337; + IMG_UINT32 u32_338; + IMG_UINT32 u32_339; + IMG_UINT32 u32_340; + IMG_UINT32 u32_341; + IMG_UINT32 u32_342; + IMG_UINT32 u32_343; + IMG_UINT32 u32_344; + IMG_UINT32 u32_345; + IMG_UINT32 u32_346; + IMG_UINT32 u32_347; + IMG_UINT32 u32_348; + IMG_UINT32 u32_349; + IMG_UINT32 u32_350; + IMG_UINT32 u32_351; + IMG_UINT32 u32_352; + IMG_UINT32 u32_353; + IMG_UINT32 u32_354; + IMG_UINT32 u32_355; + IMG_UINT32 u32_356; + IMG_UINT32 u32_357; + IMG_UINT32 u32_358; + IMG_UINT32 u32_359; + IMG_UINT32 u32_360; + IMG_UINT32 u32_361; + IMG_UINT32 u32_362; + IMG_UINT32 u32_363; + IMG_UINT32 u32_364; + IMG_UINT32 u32_365; + IMG_UINT32 u32_366; + IMG_UINT32 u32_367; + IMG_UINT32 u32_368; + IMG_UINT32 u32_369; + IMG_UINT32 u32_370; + IMG_UINT32 u32_371; +} PM_DATA_VHEAP_BUFFER; + +/* +TE7 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_WOFF (371U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_371 = (((_ft_).u32_371 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_371 >> ((PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE7 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_WOFF (370U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_WOFF (371U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_370 = (((_ft_).u32_370 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_371 = (((_ft_).u32_371 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_370 >> (20)) | ((IMG_UINT64)((_ft_).u32_371 & 0x0000ffffU ) << (12))) +/* +TE7 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_WOFF (369U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_WOFF (370U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_369 = (((_ft_).u32_369 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_370 = (((_ft_).u32_370 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_369 >> (24)) | ((IMG_UINT64)((_ft_).u32_370 & 0x000fffffU ) << (8))) +/* +TE7 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_WOFF (368U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_WOFF (369U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_368 = (((_ft_).u32_368 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_369 = (((_ft_).u32_369 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_368 >> (28)) | ((IMG_UINT64)((_ft_).u32_369 & 0x00ffffffU ) << (4))) +/* +TE7 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_WOFF (368U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_368 = (((_ft_).u32_368 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_368 >> ((PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE7 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_WOFF (364U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_364 = (((_ft_).u32_364 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_364 >> ((PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE6 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_WOFF (363U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_363 = (((_ft_).u32_363 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_363 >> ((PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE6 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_WOFF (362U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_WOFF (363U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_362 = (((_ft_).u32_362 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_363 = (((_ft_).u32_363 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_362 >> (20)) | ((IMG_UINT64)((_ft_).u32_363 & 0x0000ffffU ) << (12))) +/* +TE6 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_WOFF (361U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_WOFF (362U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_361 = (((_ft_).u32_361 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_362 = (((_ft_).u32_362 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_361 >> (24)) | ((IMG_UINT64)((_ft_).u32_362 & 0x000fffffU ) << (8))) +/* +TE6 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_WOFF (360U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_WOFF (361U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_360 = (((_ft_).u32_360 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_361 = (((_ft_).u32_361 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_360 >> (28)) | ((IMG_UINT64)((_ft_).u32_361 & 0x00ffffffU ) << (4))) +/* +TE6 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_WOFF (360U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_360 = (((_ft_).u32_360 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_360 >> ((PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE6 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_WOFF (356U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_356 = (((_ft_).u32_356 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_356 >> ((PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE5 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_WOFF (355U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_355 = (((_ft_).u32_355 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_355 >> ((PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE5 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_WOFF (354U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_WOFF (355U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_354 = (((_ft_).u32_354 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_355 = (((_ft_).u32_355 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_354 >> (20)) | ((IMG_UINT64)((_ft_).u32_355 & 0x0000ffffU ) << (12))) +/* +TE5 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_WOFF (353U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_WOFF (354U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_353 = (((_ft_).u32_353 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_354 = (((_ft_).u32_354 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_353 >> (24)) | ((IMG_UINT64)((_ft_).u32_354 & 0x000fffffU ) << (8))) +/* +TE5 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_WOFF (352U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_WOFF (353U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_352 = (((_ft_).u32_352 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_353 = (((_ft_).u32_353 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_352 >> (28)) | ((IMG_UINT64)((_ft_).u32_353 & 0x00ffffffU ) << (4))) +/* +TE5 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_WOFF (352U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_352 = (((_ft_).u32_352 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_352 >> ((PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE5 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_WOFF (348U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_348 = (((_ft_).u32_348 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_348 >> ((PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE4 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_WOFF (347U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_347 = (((_ft_).u32_347 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_347 >> ((PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE4 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_WOFF (346U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_WOFF (347U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_346 = (((_ft_).u32_346 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_347 = (((_ft_).u32_347 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_346 >> (20)) | ((IMG_UINT64)((_ft_).u32_347 & 0x0000ffffU ) << (12))) +/* +TE4 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_WOFF (345U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_WOFF (346U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_345 = (((_ft_).u32_345 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_346 = (((_ft_).u32_346 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_345 >> (24)) | ((IMG_UINT64)((_ft_).u32_346 & 0x000fffffU ) << (8))) +/* +TE4 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_WOFF (344U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_WOFF (345U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_344 = (((_ft_).u32_344 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_345 = (((_ft_).u32_345 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_344 >> (28)) | ((IMG_UINT64)((_ft_).u32_345 & 0x00ffffffU ) << (4))) +/* +TE4 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_WOFF (344U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_344 = (((_ft_).u32_344 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_344 >> ((PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE4 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_WOFF (340U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_340 = (((_ft_).u32_340 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_340 >> ((PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE3 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_WOFF (339U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_339 = (((_ft_).u32_339 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_339 >> ((PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE3 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_WOFF (338U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_WOFF (339U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_338 = (((_ft_).u32_338 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_339 = (((_ft_).u32_339 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_338 >> (20)) | ((IMG_UINT64)((_ft_).u32_339 & 0x0000ffffU ) << (12))) +/* +TE3 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_WOFF (337U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_WOFF (338U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_337 = (((_ft_).u32_337 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_338 = (((_ft_).u32_338 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_337 >> (24)) | ((IMG_UINT64)((_ft_).u32_338 & 0x000fffffU ) << (8))) +/* +TE3 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_WOFF (336U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_WOFF (337U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_336 = (((_ft_).u32_336 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_337 = (((_ft_).u32_337 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_336 >> (28)) | ((IMG_UINT64)((_ft_).u32_337 & 0x00ffffffU ) << (4))) +/* +TE3 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_WOFF (336U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_336 = (((_ft_).u32_336 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_336 >> ((PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE3 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_WOFF (332U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_332 = (((_ft_).u32_332 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_332 >> ((PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE2 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_WOFF (331U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_331 = (((_ft_).u32_331 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_331 >> ((PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE2 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_WOFF (330U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_WOFF (331U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_330 = (((_ft_).u32_330 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_331 = (((_ft_).u32_331 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_330 >> (20)) | ((IMG_UINT64)((_ft_).u32_331 & 0x0000ffffU ) << (12))) +/* +TE2 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_WOFF (329U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_WOFF (330U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_329 = (((_ft_).u32_329 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_330 = (((_ft_).u32_330 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_329 >> (24)) | ((IMG_UINT64)((_ft_).u32_330 & 0x000fffffU ) << (8))) +/* +TE2 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_WOFF (328U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_WOFF (329U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_328 = (((_ft_).u32_328 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_329 = (((_ft_).u32_329 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_328 >> (28)) | ((IMG_UINT64)((_ft_).u32_329 & 0x00ffffffU ) << (4))) +/* +TE2 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_WOFF (328U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_328 = (((_ft_).u32_328 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_328 >> ((PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE2 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_WOFF (324U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_324 = (((_ft_).u32_324 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_324 >> ((PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE1 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_WOFF (323U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_323 = (((_ft_).u32_323 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_323 >> ((PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE1 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_WOFF (322U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_WOFF (323U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_322 = (((_ft_).u32_322 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_323 = (((_ft_).u32_323 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_322 >> (20)) | ((IMG_UINT64)((_ft_).u32_323 & 0x0000ffffU ) << (12))) +/* +TE1 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_WOFF (321U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_WOFF (322U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_321 = (((_ft_).u32_321 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_322 = (((_ft_).u32_322 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_321 >> (24)) | ((IMG_UINT64)((_ft_).u32_322 & 0x000fffffU ) << (8))) +/* +TE1 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_WOFF (320U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_WOFF (321U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_320 = (((_ft_).u32_320 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_321 = (((_ft_).u32_321 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_320 >> (28)) | ((IMG_UINT64)((_ft_).u32_321 & 0x00ffffffU ) << (4))) +/* +TE1 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_WOFF (320U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_320 = (((_ft_).u32_320 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_320 >> ((PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE1 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_WOFF (316U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_316 = (((_ft_).u32_316 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_316 >> ((PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE0 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_WOFF (315U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_315 = (((_ft_).u32_315 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_315 >> ((PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE0 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_WOFF (314U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_WOFF (315U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_314 = (((_ft_).u32_314 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_315 = (((_ft_).u32_315 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_314 >> (20)) | ((IMG_UINT64)((_ft_).u32_315 & 0x0000ffffU ) << (12))) +/* +TE0 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_WOFF (313U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_WOFF (314U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_313 = (((_ft_).u32_313 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_314 = (((_ft_).u32_314 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_313 >> (24)) | ((IMG_UINT64)((_ft_).u32_314 & 0x000fffffU ) << (8))) +/* +TE0 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_WOFF (312U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_WOFF (313U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_312 = (((_ft_).u32_312 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_313 = (((_ft_).u32_313 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_312 >> (28)) | ((IMG_UINT64)((_ft_).u32_313 & 0x00ffffffU ) << (4))) +/* +TE0 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_WOFF (312U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_312 = (((_ft_).u32_312 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_312 >> ((PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE0 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_WOFF (308U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_308 = (((_ft_).u32_308 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_308 >> ((PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_WOFF (307U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_307 = (((_ft_).u32_307 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_UFSTACK(_ft_) ((_ft_).u32_307 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_WOFF (306U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_WOFF (307U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_306 = (((_ft_).u32_306 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_307 = (((_ft_).u32_307 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_306 >> (20)) | ((IMG_UINT64)((_ft_).u32_307 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_WOFF (305U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_WOFF (306U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_305 = (((_ft_).u32_305 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_306 = (((_ft_).u32_306 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_305 >> (24)) | ((IMG_UINT64)((_ft_).u32_306 & 0x000fffffU ) << (8))) +/* +VCE7 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_WOFF (304U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_WOFF (305U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_304 = (((_ft_).u32_304 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_305 = (((_ft_).u32_305 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_304 >> (28)) | ((IMG_UINT64)((_ft_).u32_305 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_WOFF (304U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_304 = (((_ft_).u32_304 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE0(_ft_) ((_ft_).u32_304 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_WOFF (300U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_300 = (((_ft_).u32_300 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PAGE(_ft_) ((_ft_).u32_300 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_WOFF (299U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_299 = (((_ft_).u32_299 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_UFSTACK(_ft_) ((_ft_).u32_299 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_WOFF (298U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_WOFF (299U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_298 = (((_ft_).u32_298 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_299 = (((_ft_).u32_299 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_298 >> (20)) | ((IMG_UINT64)((_ft_).u32_299 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_WOFF (297U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_WOFF (298U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_297 = (((_ft_).u32_297 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_298 = (((_ft_).u32_298 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_297 >> (24)) | ((IMG_UINT64)((_ft_).u32_298 & 0x000fffffU ) << (8))) +/* +VCE7 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_WOFF (296U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_WOFF (297U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_296 = (((_ft_).u32_296 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_297 = (((_ft_).u32_297 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_296 >> (28)) | ((IMG_UINT64)((_ft_).u32_297 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_WOFF (296U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_296 = (((_ft_).u32_296 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE0(_ft_) ((_ft_).u32_296 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_WOFF (292U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_292 = (((_ft_).u32_292 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PAGE(_ft_) ((_ft_).u32_292 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_WOFF (291U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_291 = (((_ft_).u32_291 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_291 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_WOFF (290U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_WOFF (291U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_290 = (((_ft_).u32_290 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_291 = (((_ft_).u32_291 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_290 >> (20)) | ((IMG_UINT64)((_ft_).u32_291 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_WOFF (289U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_WOFF (290U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_289 = (((_ft_).u32_289 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_290 = (((_ft_).u32_290 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_289 >> (24)) | ((IMG_UINT64)((_ft_).u32_290 & 0x000fffffU ) << (8))) +/* +VCE7 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_WOFF (288U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_WOFF (289U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_288 = (((_ft_).u32_288 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_289 = (((_ft_).u32_289 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_288 >> (28)) | ((IMG_UINT64)((_ft_).u32_289 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_WOFF (288U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_288 = (((_ft_).u32_288 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_288 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_WOFF (284U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_284 = (((_ft_).u32_284 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_284 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_WOFF (283U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_283 = (((_ft_).u32_283 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_283 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_WOFF (282U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_WOFF (283U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_282 = (((_ft_).u32_282 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_283 = (((_ft_).u32_283 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_282 >> (20)) | ((IMG_UINT64)((_ft_).u32_283 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_WOFF (281U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_WOFF (282U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_281 = (((_ft_).u32_281 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_282 = (((_ft_).u32_282 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_281 >> (24)) | ((IMG_UINT64)((_ft_).u32_282 & 0x000fffffU ) << (8))) +/* +VCE7 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_WOFF (280U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_WOFF (281U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_280 = (((_ft_).u32_280 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_281 = (((_ft_).u32_281 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_280 >> (28)) | ((IMG_UINT64)((_ft_).u32_281 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_WOFF (280U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_280 = (((_ft_).u32_280 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_280 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_WOFF (276U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_276 = (((_ft_).u32_276 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_276 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE6 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_WOFF (259U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_259 = (((_ft_).u32_259 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_259 >> ((PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE6 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_WOFF (258U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_WOFF (259U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_258 = (((_ft_).u32_258 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_259 = (((_ft_).u32_259 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_258 >> (20)) | ((IMG_UINT64)((_ft_).u32_259 & 0x0000ffffU ) << (12))) +/* +VCE6 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_WOFF (257U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_WOFF (258U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_257 = (((_ft_).u32_257 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_258 = (((_ft_).u32_258 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_257 >> (24)) | ((IMG_UINT64)((_ft_).u32_258 & 0x000fffffU ) << (8))) +/* +VCE6 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_WOFF (256U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_WOFF (257U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_256 = (((_ft_).u32_256 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_257 = (((_ft_).u32_257 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_256 >> (28)) | ((IMG_UINT64)((_ft_).u32_257 & 0x00ffffffU ) << (4))) +/* +VCE6 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_WOFF (256U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_256 = (((_ft_).u32_256 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_256 >> ((PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE6 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_WOFF (252U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_252 = (((_ft_).u32_252 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_252 >> ((PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE6 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_WOFF (251U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_251 = (((_ft_).u32_251 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_251 >> ((PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE6 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_WOFF (250U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_WOFF (251U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_250 = (((_ft_).u32_250 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_251 = (((_ft_).u32_251 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_250 >> (20)) | ((IMG_UINT64)((_ft_).u32_251 & 0x0000ffffU ) << (12))) +/* +VCE6 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_WOFF (249U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_WOFF (250U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_249 = (((_ft_).u32_249 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_250 = (((_ft_).u32_250 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_249 >> (24)) | ((IMG_UINT64)((_ft_).u32_250 & 0x000fffffU ) << (8))) +/* +VCE6 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_WOFF (248U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_WOFF (249U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_248 = (((_ft_).u32_248 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_249 = (((_ft_).u32_249 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_248 >> (28)) | ((IMG_UINT64)((_ft_).u32_249 & 0x00ffffffU ) << (4))) +/* +VCE6 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_WOFF (248U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_248 = (((_ft_).u32_248 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_248 >> ((PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE6 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_WOFF (244U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_244 = (((_ft_).u32_244 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_244 >> ((PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_WOFF (243U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_243 = (((_ft_).u32_243 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_UFSTACK(_ft_) ((_ft_).u32_243 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_WOFF (242U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_WOFF (243U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_242 = (((_ft_).u32_242 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_243 = (((_ft_).u32_243 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_242 >> (20)) | ((IMG_UINT64)((_ft_).u32_243 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_WOFF (241U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_WOFF (242U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_241 = (((_ft_).u32_241 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_242 = (((_ft_).u32_242 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_241 >> (24)) | ((IMG_UINT64)((_ft_).u32_242 & 0x000fffffU ) << (8))) +/* +VCE5 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_WOFF (240U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_WOFF (241U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_240 = (((_ft_).u32_240 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_241 = (((_ft_).u32_241 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_240 >> (28)) | ((IMG_UINT64)((_ft_).u32_241 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_WOFF (240U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_240 = (((_ft_).u32_240 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE0(_ft_) ((_ft_).u32_240 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_WOFF (236U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_236 = (((_ft_).u32_236 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PAGE(_ft_) ((_ft_).u32_236 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_WOFF (235U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_235 = (((_ft_).u32_235 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_UFSTACK(_ft_) ((_ft_).u32_235 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_WOFF (234U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_WOFF (235U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_234 = (((_ft_).u32_234 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_235 = (((_ft_).u32_235 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_234 >> (20)) | ((IMG_UINT64)((_ft_).u32_235 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_WOFF (233U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_WOFF (234U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_233 = (((_ft_).u32_233 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_234 = (((_ft_).u32_234 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_233 >> (24)) | ((IMG_UINT64)((_ft_).u32_234 & 0x000fffffU ) << (8))) +/* +VCE5 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_WOFF (232U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_WOFF (233U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_232 = (((_ft_).u32_232 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_233 = (((_ft_).u32_233 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_232 >> (28)) | ((IMG_UINT64)((_ft_).u32_233 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_WOFF (232U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_232 = (((_ft_).u32_232 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE0(_ft_) ((_ft_).u32_232 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_WOFF (228U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_228 = (((_ft_).u32_228 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PAGE(_ft_) ((_ft_).u32_228 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_WOFF (227U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_227 = (((_ft_).u32_227 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_227 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_WOFF (226U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_WOFF (227U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_226 = (((_ft_).u32_226 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_227 = (((_ft_).u32_227 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_226 >> (20)) | ((IMG_UINT64)((_ft_).u32_227 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_WOFF (225U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_WOFF (226U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_225 = (((_ft_).u32_225 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_226 = (((_ft_).u32_226 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_225 >> (24)) | ((IMG_UINT64)((_ft_).u32_226 & 0x000fffffU ) << (8))) +/* +VCE5 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_WOFF (224U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_WOFF (225U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_224 = (((_ft_).u32_224 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_225 = (((_ft_).u32_225 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_224 >> (28)) | ((IMG_UINT64)((_ft_).u32_225 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_WOFF (224U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_224 = (((_ft_).u32_224 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_224 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_WOFF (220U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_220 = (((_ft_).u32_220 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_220 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_WOFF (219U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_219 = (((_ft_).u32_219 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_219 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_WOFF (218U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_WOFF (219U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_218 = (((_ft_).u32_218 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_219 = (((_ft_).u32_219 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_218 >> (20)) | ((IMG_UINT64)((_ft_).u32_219 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_WOFF (217U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_WOFF (218U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_217 = (((_ft_).u32_217 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_218 = (((_ft_).u32_218 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_217 >> (24)) | ((IMG_UINT64)((_ft_).u32_218 & 0x000fffffU ) << (8))) +/* +VCE5 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_WOFF (216U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_WOFF (217U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_216 = (((_ft_).u32_216 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_217 = (((_ft_).u32_217 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_216 >> (28)) | ((IMG_UINT64)((_ft_).u32_217 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_WOFF (216U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_216 = (((_ft_).u32_216 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_216 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_WOFF (212U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_212 = (((_ft_).u32_212 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_212 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_WOFF (211U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_211 = (((_ft_).u32_211 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_UFSTACK(_ft_) ((_ft_).u32_211 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_WOFF (210U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_WOFF (211U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_210 = (((_ft_).u32_210 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_211 = (((_ft_).u32_211 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_210 >> (20)) | ((IMG_UINT64)((_ft_).u32_211 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_WOFF (209U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_WOFF (210U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_209 = (((_ft_).u32_209 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_210 = (((_ft_).u32_210 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_209 >> (24)) | ((IMG_UINT64)((_ft_).u32_210 & 0x000fffffU ) << (8))) +/* +VCE4 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_WOFF (208U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_WOFF (209U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_208 = (((_ft_).u32_208 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_209 = (((_ft_).u32_209 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_208 >> (28)) | ((IMG_UINT64)((_ft_).u32_209 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_WOFF (208U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_208 = (((_ft_).u32_208 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE0(_ft_) ((_ft_).u32_208 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_WOFF (204U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_204 = (((_ft_).u32_204 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PAGE(_ft_) ((_ft_).u32_204 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_WOFF (203U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_203 = (((_ft_).u32_203 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_UFSTACK(_ft_) ((_ft_).u32_203 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_WOFF (202U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_WOFF (203U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_202 = (((_ft_).u32_202 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_203 = (((_ft_).u32_203 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_202 >> (20)) | ((IMG_UINT64)((_ft_).u32_203 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_WOFF (201U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_WOFF (202U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_201 = (((_ft_).u32_201 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_202 = (((_ft_).u32_202 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_201 >> (24)) | ((IMG_UINT64)((_ft_).u32_202 & 0x000fffffU ) << (8))) +/* +VCE4 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_WOFF (200U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_WOFF (201U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_200 = (((_ft_).u32_200 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_201 = (((_ft_).u32_201 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_200 >> (28)) | ((IMG_UINT64)((_ft_).u32_201 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_WOFF (200U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_200 = (((_ft_).u32_200 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE0(_ft_) ((_ft_).u32_200 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_WOFF (196U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_196 = (((_ft_).u32_196 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PAGE(_ft_) ((_ft_).u32_196 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_WOFF (195U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_195 = (((_ft_).u32_195 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_195 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_WOFF (194U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_WOFF (195U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_194 = (((_ft_).u32_194 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_195 = (((_ft_).u32_195 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_194 >> (20)) | ((IMG_UINT64)((_ft_).u32_195 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_WOFF (193U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_WOFF (194U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_193 = (((_ft_).u32_193 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_194 = (((_ft_).u32_194 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_193 >> (24)) | ((IMG_UINT64)((_ft_).u32_194 & 0x000fffffU ) << (8))) +/* +VCE4 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_WOFF (192U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_WOFF (193U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_192 = (((_ft_).u32_192 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_193 = (((_ft_).u32_193 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_192 >> (28)) | ((IMG_UINT64)((_ft_).u32_193 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_WOFF (192U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_192 = (((_ft_).u32_192 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_192 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_WOFF (188U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_188 = (((_ft_).u32_188 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_188 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_WOFF (187U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_187 = (((_ft_).u32_187 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_187 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_WOFF (186U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_WOFF (187U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_186 = (((_ft_).u32_186 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_187 = (((_ft_).u32_187 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_186 >> (20)) | ((IMG_UINT64)((_ft_).u32_187 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_WOFF (185U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_WOFF (186U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_185 = (((_ft_).u32_185 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_186 = (((_ft_).u32_186 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_185 >> (24)) | ((IMG_UINT64)((_ft_).u32_186 & 0x000fffffU ) << (8))) +/* +VCE4 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_WOFF (184U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_WOFF (185U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_184 = (((_ft_).u32_184 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_185 = (((_ft_).u32_185 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_184 >> (28)) | ((IMG_UINT64)((_ft_).u32_185 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_WOFF (184U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_184 = (((_ft_).u32_184 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_184 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_WOFF (180U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_180 = (((_ft_).u32_180 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_180 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_WOFF (179U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_179 = (((_ft_).u32_179 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_UFSTACK(_ft_) ((_ft_).u32_179 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_WOFF (178U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_WOFF (179U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_178 = (((_ft_).u32_178 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_179 = (((_ft_).u32_179 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_178 >> (20)) | ((IMG_UINT64)((_ft_).u32_179 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_WOFF (177U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_WOFF (178U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_177 = (((_ft_).u32_177 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_178 = (((_ft_).u32_178 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_177 >> (24)) | ((IMG_UINT64)((_ft_).u32_178 & 0x000fffffU ) << (8))) +/* +VCE3 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_WOFF (176U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_WOFF (177U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_176 = (((_ft_).u32_176 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_177 = (((_ft_).u32_177 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_176 >> (28)) | ((IMG_UINT64)((_ft_).u32_177 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_WOFF (176U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_176 = (((_ft_).u32_176 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE0(_ft_) ((_ft_).u32_176 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_WOFF (172U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_172 = (((_ft_).u32_172 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PAGE(_ft_) ((_ft_).u32_172 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_WOFF (171U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_171 = (((_ft_).u32_171 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_UFSTACK(_ft_) ((_ft_).u32_171 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_WOFF (170U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_WOFF (171U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_170 = (((_ft_).u32_170 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_171 = (((_ft_).u32_171 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_170 >> (20)) | ((IMG_UINT64)((_ft_).u32_171 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_WOFF (169U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_WOFF (170U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_169 = (((_ft_).u32_169 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_170 = (((_ft_).u32_170 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_169 >> (24)) | ((IMG_UINT64)((_ft_).u32_170 & 0x000fffffU ) << (8))) +/* +VCE3 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_WOFF (168U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_WOFF (169U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_168 = (((_ft_).u32_168 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_169 = (((_ft_).u32_169 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_168 >> (28)) | ((IMG_UINT64)((_ft_).u32_169 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_WOFF (168U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_168 = (((_ft_).u32_168 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE0(_ft_) ((_ft_).u32_168 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_WOFF (164U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_164 = (((_ft_).u32_164 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PAGE(_ft_) ((_ft_).u32_164 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_WOFF (163U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_163 = (((_ft_).u32_163 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_163 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_WOFF (162U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_WOFF (163U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_162 = (((_ft_).u32_162 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_163 = (((_ft_).u32_163 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_162 >> (20)) | ((IMG_UINT64)((_ft_).u32_163 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_WOFF (161U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_WOFF (162U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_161 = (((_ft_).u32_161 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_162 = (((_ft_).u32_162 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_161 >> (24)) | ((IMG_UINT64)((_ft_).u32_162 & 0x000fffffU ) << (8))) +/* +VCE3 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_WOFF (160U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_WOFF (161U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_160 = (((_ft_).u32_160 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_161 = (((_ft_).u32_161 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_160 >> (28)) | ((IMG_UINT64)((_ft_).u32_161 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_WOFF (160U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_160 = (((_ft_).u32_160 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_160 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_WOFF (156U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_156 = (((_ft_).u32_156 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_156 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_WOFF (155U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_155 = (((_ft_).u32_155 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_155 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_WOFF (154U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_WOFF (155U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_154 = (((_ft_).u32_154 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_155 = (((_ft_).u32_155 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_154 >> (20)) | ((IMG_UINT64)((_ft_).u32_155 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_WOFF (153U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_WOFF (154U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_153 = (((_ft_).u32_153 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_154 = (((_ft_).u32_154 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_153 >> (24)) | ((IMG_UINT64)((_ft_).u32_154 & 0x000fffffU ) << (8))) +/* +VCE3 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_WOFF (152U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_WOFF (153U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_152 = (((_ft_).u32_152 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_153 = (((_ft_).u32_153 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_152 >> (28)) | ((IMG_UINT64)((_ft_).u32_153 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_WOFF (152U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_152 = (((_ft_).u32_152 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_152 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_WOFF (148U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_148 = (((_ft_).u32_148 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_148 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_WOFF (147U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_147 = (((_ft_).u32_147 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_UFSTACK(_ft_) ((_ft_).u32_147 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_WOFF (146U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_WOFF (147U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_146 = (((_ft_).u32_146 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_147 = (((_ft_).u32_147 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_146 >> (20)) | ((IMG_UINT64)((_ft_).u32_147 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_WOFF (145U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_WOFF (146U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_145 = (((_ft_).u32_145 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_146 = (((_ft_).u32_146 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_145 >> (24)) | ((IMG_UINT64)((_ft_).u32_146 & 0x000fffffU ) << (8))) +/* +VCE2 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_WOFF (144U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_WOFF (145U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_144 = (((_ft_).u32_144 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_145 = (((_ft_).u32_145 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_144 >> (28)) | ((IMG_UINT64)((_ft_).u32_145 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_WOFF (144U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_144 = (((_ft_).u32_144 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE0(_ft_) ((_ft_).u32_144 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_WOFF (140U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_140 = (((_ft_).u32_140 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PAGE(_ft_) ((_ft_).u32_140 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_WOFF (139U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_139 = (((_ft_).u32_139 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_UFSTACK(_ft_) ((_ft_).u32_139 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_WOFF (138U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_WOFF (139U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_138 = (((_ft_).u32_138 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_139 = (((_ft_).u32_139 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_138 >> (20)) | ((IMG_UINT64)((_ft_).u32_139 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_WOFF (137U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_WOFF (138U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_137 = (((_ft_).u32_137 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_138 = (((_ft_).u32_138 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_137 >> (24)) | ((IMG_UINT64)((_ft_).u32_138 & 0x000fffffU ) << (8))) +/* +VCE2 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_WOFF (136U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_WOFF (137U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_136 = (((_ft_).u32_136 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_137 = (((_ft_).u32_137 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_136 >> (28)) | ((IMG_UINT64)((_ft_).u32_137 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_WOFF (136U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_136 = (((_ft_).u32_136 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE0(_ft_) ((_ft_).u32_136 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_WOFF (132U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_132 = (((_ft_).u32_132 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PAGE(_ft_) ((_ft_).u32_132 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_WOFF (131U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_131 = (((_ft_).u32_131 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_131 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_WOFF (130U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_WOFF (131U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_130 = (((_ft_).u32_130 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_131 = (((_ft_).u32_131 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_130 >> (20)) | ((IMG_UINT64)((_ft_).u32_131 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_WOFF (129U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_WOFF (130U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_129 = (((_ft_).u32_129 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_130 = (((_ft_).u32_130 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_129 >> (24)) | ((IMG_UINT64)((_ft_).u32_130 & 0x000fffffU ) << (8))) +/* +VCE2 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_WOFF (128U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_WOFF (129U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_128 = (((_ft_).u32_128 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_129 = (((_ft_).u32_129 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_128 >> (28)) | ((IMG_UINT64)((_ft_).u32_129 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_WOFF (128U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_128 = (((_ft_).u32_128 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_128 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_WOFF (124U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_124 = (((_ft_).u32_124 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_124 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_WOFF (123U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_123 = (((_ft_).u32_123 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_123 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_WOFF (122U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_WOFF (123U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_122 = (((_ft_).u32_122 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_123 = (((_ft_).u32_123 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_122 >> (20)) | ((IMG_UINT64)((_ft_).u32_123 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_WOFF (121U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_WOFF (122U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_121 = (((_ft_).u32_121 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_122 = (((_ft_).u32_122 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_121 >> (24)) | ((IMG_UINT64)((_ft_).u32_122 & 0x000fffffU ) << (8))) +/* +VCE2 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_WOFF (120U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_WOFF (121U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_120 = (((_ft_).u32_120 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_121 = (((_ft_).u32_121 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_120 >> (28)) | ((IMG_UINT64)((_ft_).u32_121 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_WOFF (120U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_120 = (((_ft_).u32_120 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_120 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_WOFF (116U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_116 = (((_ft_).u32_116 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_116 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_WOFF (115U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_115 = (((_ft_).u32_115 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_UFSTACK(_ft_) ((_ft_).u32_115 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_WOFF (114U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_WOFF (115U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_114 = (((_ft_).u32_114 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_115 = (((_ft_).u32_115 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_114 >> (20)) | ((IMG_UINT64)((_ft_).u32_115 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_WOFF (113U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_WOFF (114U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_113 = (((_ft_).u32_113 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_114 = (((_ft_).u32_114 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_113 >> (24)) | ((IMG_UINT64)((_ft_).u32_114 & 0x000fffffU ) << (8))) +/* +VCE1 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_WOFF (112U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_WOFF (113U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_112 = (((_ft_).u32_112 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_113 = (((_ft_).u32_113 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_112 >> (28)) | ((IMG_UINT64)((_ft_).u32_113 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_WOFF (112U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_112 = (((_ft_).u32_112 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE0(_ft_) ((_ft_).u32_112 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_WOFF (108U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_108 = (((_ft_).u32_108 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PAGE(_ft_) ((_ft_).u32_108 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_WOFF (107U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_107 = (((_ft_).u32_107 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_UFSTACK(_ft_) ((_ft_).u32_107 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_WOFF (106U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_WOFF (107U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_106 = (((_ft_).u32_106 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_107 = (((_ft_).u32_107 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_106 >> (20)) | ((IMG_UINT64)((_ft_).u32_107 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_WOFF (105U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_WOFF (106U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_105 = (((_ft_).u32_105 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_106 = (((_ft_).u32_106 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_105 >> (24)) | ((IMG_UINT64)((_ft_).u32_106 & 0x000fffffU ) << (8))) +/* +VCE1 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_WOFF (104U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_WOFF (105U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_104 = (((_ft_).u32_104 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_105 = (((_ft_).u32_105 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_104 >> (28)) | ((IMG_UINT64)((_ft_).u32_105 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_WOFF (104U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_104 = (((_ft_).u32_104 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE0(_ft_) ((_ft_).u32_104 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_WOFF (100U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_100 = (((_ft_).u32_100 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PAGE(_ft_) ((_ft_).u32_100 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_WOFF (99U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_99 = (((_ft_).u32_99 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_99 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_WOFF (98U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_WOFF (99U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_98 = (((_ft_).u32_98 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_99 = (((_ft_).u32_99 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_98 >> (20)) | ((IMG_UINT64)((_ft_).u32_99 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_WOFF (97U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_WOFF (98U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_97 = (((_ft_).u32_97 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_98 = (((_ft_).u32_98 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_97 >> (24)) | ((IMG_UINT64)((_ft_).u32_98 & 0x000fffffU ) << (8))) +/* +VCE1 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_WOFF (96U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_WOFF (97U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_96 = (((_ft_).u32_96 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_97 = (((_ft_).u32_97 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_96 >> (28)) | ((IMG_UINT64)((_ft_).u32_97 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_WOFF (96U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_96 = (((_ft_).u32_96 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_96 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_WOFF (92U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_92 = (((_ft_).u32_92 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_92 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_WOFF (91U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_91 = (((_ft_).u32_91 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_91 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_WOFF (90U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_WOFF (91U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_90 = (((_ft_).u32_90 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_91 = (((_ft_).u32_91 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_90 >> (20)) | ((IMG_UINT64)((_ft_).u32_91 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_WOFF (89U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_WOFF (90U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_89 = (((_ft_).u32_89 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_90 = (((_ft_).u32_90 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_89 >> (24)) | ((IMG_UINT64)((_ft_).u32_90 & 0x000fffffU ) << (8))) +/* +VCE1 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_WOFF (88U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_WOFF (89U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_88 = (((_ft_).u32_88 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_89 = (((_ft_).u32_89 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_88 >> (28)) | ((IMG_UINT64)((_ft_).u32_89 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_WOFF (88U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_88 = (((_ft_).u32_88 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_88 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_WOFF (84U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_84 = (((_ft_).u32_84 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_84 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_WOFF (83U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_83 = (((_ft_).u32_83 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_UFSTACK(_ft_) ((_ft_).u32_83 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_WOFF (82U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_WOFF (83U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_82 = (((_ft_).u32_82 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_83 = (((_ft_).u32_83 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_82 >> (20)) | ((IMG_UINT64)((_ft_).u32_83 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_WOFF (81U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_WOFF (82U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_81 = (((_ft_).u32_81 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_82 = (((_ft_).u32_82 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_81 >> (24)) | ((IMG_UINT64)((_ft_).u32_82 & 0x000fffffU ) << (8))) +/* +VCE0 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_WOFF (80U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_WOFF (81U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_80 = (((_ft_).u32_80 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_81 = (((_ft_).u32_81 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_80 >> (28)) | ((IMG_UINT64)((_ft_).u32_81 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_WOFF (80U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_80 = (((_ft_).u32_80 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE0(_ft_) ((_ft_).u32_80 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_WOFF (76U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_76 = (((_ft_).u32_76 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PAGE(_ft_) ((_ft_).u32_76 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_WOFF (75U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_75 = (((_ft_).u32_75 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_UFSTACK(_ft_) ((_ft_).u32_75 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_WOFF (74U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_WOFF (75U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_74 = (((_ft_).u32_74 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_75 = (((_ft_).u32_75 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_74 >> (20)) | ((IMG_UINT64)((_ft_).u32_75 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_WOFF (73U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_WOFF (74U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_73 = (((_ft_).u32_73 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_74 = (((_ft_).u32_74 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_73 >> (24)) | ((IMG_UINT64)((_ft_).u32_74 & 0x000fffffU ) << (8))) +/* +VCE0 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_WOFF (72U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_WOFF (73U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_72 = (((_ft_).u32_72 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_73 = (((_ft_).u32_73 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_72 >> (28)) | ((IMG_UINT64)((_ft_).u32_73 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_WOFF (72U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_72 = (((_ft_).u32_72 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE0(_ft_) ((_ft_).u32_72 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_WOFF (68U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_68 = (((_ft_).u32_68 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PAGE(_ft_) ((_ft_).u32_68 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_WOFF (67U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_67 = (((_ft_).u32_67 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_UFSTACK(_ft_) ((_ft_).u32_67 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_WOFF (66U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_WOFF (67U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_66 = (((_ft_).u32_66 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_67 = (((_ft_).u32_67 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_66 >> (20)) | ((IMG_UINT64)((_ft_).u32_67 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_WOFF (65U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_WOFF (66U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_65 = (((_ft_).u32_65 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_66 = (((_ft_).u32_66 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_65 >> (24)) | ((IMG_UINT64)((_ft_).u32_66 & 0x000fffffU ) << (8))) +/* +VCE0 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_WOFF (64U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_WOFF (65U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_64 = (((_ft_).u32_64 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_65 = (((_ft_).u32_65 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_64 >> (28)) | ((IMG_UINT64)((_ft_).u32_65 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_WOFF (64U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_64 = (((_ft_).u32_64 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE0(_ft_) ((_ft_).u32_64 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_WOFF (60U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_60 = (((_ft_).u32_60 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PAGE(_ft_) ((_ft_).u32_60 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_WOFF (59U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_59 = (((_ft_).u32_59 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_UFSTACK(_ft_) ((_ft_).u32_59 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_WOFF (58U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_WOFF (59U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_58 = (((_ft_).u32_58 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_59 = (((_ft_).u32_59 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_58 >> (20)) | ((IMG_UINT64)((_ft_).u32_59 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_WOFF (57U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_WOFF (58U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_57 = (((_ft_).u32_57 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_58 = (((_ft_).u32_58 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_57 >> (24)) | ((IMG_UINT64)((_ft_).u32_58 & 0x000fffffU ) << (8))) +/* +VCE0 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_WOFF (56U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_WOFF (57U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_56 = (((_ft_).u32_56 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_57 = (((_ft_).u32_57 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_56 >> (28)) | ((IMG_UINT64)((_ft_).u32_57 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_WOFF (56U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_56 = (((_ft_).u32_56 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE0(_ft_) ((_ft_).u32_56 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_WOFF (52U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_52 = (((_ft_).u32_52 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PAGE(_ft_) ((_ft_).u32_52 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +Rsv2 area +*/ +#define PM_DATA_VHEAP_BUFFER_RESV2_W0_WOFF (42U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W1_WOFF (43U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W2_WOFF (44U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W3_WOFF (45U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W4_WOFF (46U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W5_WOFF (47U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W6_WOFF (48U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W7_WOFF (49U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W8_WOFF (50U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W9_WOFF (51U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W0_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W1_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W2_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W3_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W4_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W5_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W6_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W7_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W8_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W9_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W0(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & PM_DATA_VHEAP_BUFFER_RESV2_W0_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W0(_ft_) (((_ft_).u32_42 >> (PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W1(_ft_,_x_) ((_ft_).u32_43 = (((_ft_).u32_43 & PM_DATA_VHEAP_BUFFER_RESV2_W1_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W1(_ft_) (((_ft_).u32_43 >> (PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W2(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & PM_DATA_VHEAP_BUFFER_RESV2_W2_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W2(_ft_) (((_ft_).u32_44 >> (PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W3(_ft_,_x_) ((_ft_).u32_45 = (((_ft_).u32_45 & PM_DATA_VHEAP_BUFFER_RESV2_W3_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W3(_ft_) (((_ft_).u32_45 >> (PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W4(_ft_,_x_) ((_ft_).u32_46 = (((_ft_).u32_46 & PM_DATA_VHEAP_BUFFER_RESV2_W4_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W4(_ft_) (((_ft_).u32_46 >> (PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W5(_ft_,_x_) ((_ft_).u32_47 = (((_ft_).u32_47 & PM_DATA_VHEAP_BUFFER_RESV2_W5_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W5(_ft_) (((_ft_).u32_47 >> (PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W6(_ft_,_x_) ((_ft_).u32_48 = (((_ft_).u32_48 & PM_DATA_VHEAP_BUFFER_RESV2_W6_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W6(_ft_) (((_ft_).u32_48 >> (PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W7(_ft_,_x_) ((_ft_).u32_49 = (((_ft_).u32_49 & PM_DATA_VHEAP_BUFFER_RESV2_W7_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W7(_ft_) (((_ft_).u32_49 >> (PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W8(_ft_,_x_) ((_ft_).u32_50 = (((_ft_).u32_50 & PM_DATA_VHEAP_BUFFER_RESV2_W8_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W8(_ft_) (((_ft_).u32_50 >> (PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W9(_ft_,_x_) ((_ft_).u32_51 = (((_ft_).u32_51 & PM_DATA_VHEAP_BUFFER_RESV2_W9_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W9(_ft_) (((_ft_).u32_51 >> (PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT)) & 0xffffffffU) +/* +Number of pages allocated to TE7 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT_CNT(_ft_) ((_ft_).u32_41 >> ((PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE6 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT_CNT(_ft_) ((_ft_).u32_41 >> ((PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE5 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT_CNT(_ft_) ((_ft_).u32_41 >> ((PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE4 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT_CNT(_ft_) ((_ft_).u32_41 >> ((PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE3 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT_CNT(_ft_) ((_ft_).u32_40 >> ((PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE2 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT_CNT(_ft_) ((_ft_).u32_40 >> ((PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE1 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT_CNT(_ft_) ((_ft_).u32_40 >> ((PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE0 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT_CNT(_ft_) ((_ft_).u32_40 >> ((PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE7 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT_CNT(_ft_) ((_ft_).u32_39 >> ((PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE6 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT_CNT(_ft_) ((_ft_).u32_39 >> ((PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE5 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT_CNT(_ft_) ((_ft_).u32_39 >> ((PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE4 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT_CNT(_ft_) ((_ft_).u32_39 >> ((PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE3 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT_CNT(_ft_) ((_ft_).u32_38 >> ((PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE2 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT_CNT(_ft_) ((_ft_).u32_38 >> ((PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE1 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT_CNT(_ft_) ((_ft_).u32_38 >> ((PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE0 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT_CNT(_ft_) ((_ft_).u32_38 >> ((PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +1=The PM ran out of memory during processing +*/ +#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_PM_OUTOFMEM_R(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_PM_OUTOFMEM_R(_ft_) ((_ft_).u32_37 >> ((PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT)) & 0x00000001U) +/* +A copy of rgx_cr_pm_outofmem_abortall (at the point the VHEAP buffer was written) +*/ +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT (30U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_CLRMSK (0xBFFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_OUTOFMEM_ABORT(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_OUTOFMEM_ABORT(_ft_) ((_ft_).u32_37 >> ((PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT)) & 0x00000001U) +/* +When running out of memory, indicates which of the free stacks have run out of memory. +If bit 2 is set, MMUSTACK has run out of memory. +Bit 2 is reserved. +If bit 1 is set, UFSTACK has run out of memory. +If bit 0 is set, FSTACK has run out of memory. +*/ +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT (2U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_CLRMSK (0xFFFFFFE3U) +#define PM_DATA_VHEAP_BUFFER_SET_OUTOFMEM_SRC(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_CLRMSK ) | (((_x_) & (0x00000007U)) << (PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_OUTOFMEM_SRC(_ft_) ((_ft_).u32_37 >> ((PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT)) & 0x00000007U) +/* +When running out of memory, indicates the source of the request that caused the OOM event +If bit 1 is set, TE caused the OOM. +If bit 0 is set, VCE caused the OOM. +*/ +#define PM_DATA_VHEAP_BUFFER_REQ_SRC_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_REQ_SRC_CLRMSK (0xFFFFFFFCU) +#define PM_DATA_VHEAP_BUFFER_SET_REQ_SRC(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_REQ_SRC_CLRMSK ) | (((_x_) & (0x00000003U)) << (PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_REQ_SRC(_ft_) ((_ft_).u32_37 >> ((PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT)) & 0x00000003U) +/* +MAX RTA index dword in TA stream +*/ +#define PM_DATA_VHEAP_BUFFER_MAX_RTA_WOFF (36U) +#define PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_MAX_RTA_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_MAX_RTA(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & PM_DATA_VHEAP_BUFFER_MAX_RTA_CLRMSK ) | (((_x_) & (0xffffffffU)) << (PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_MAX_RTA(_ft_) ((_ft_).u32_36 >> ((PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT)) & 0xffffffffU) +/* +Rsv1 area +*/ +#define PM_DATA_VHEAP_BUFFER_RESV1_W0_WOFF (20U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W1_WOFF (21U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W2_WOFF (22U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W3_WOFF (23U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W4_WOFF (24U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W5_WOFF (25U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W6_WOFF (26U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W7_WOFF (27U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W8_WOFF (28U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W9_WOFF (29U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W10_WOFF (30U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W11_WOFF (31U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W12_WOFF (32U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W13_WOFF (33U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W14_WOFF (34U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W15_WOFF (35U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W0_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W1_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W2_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W3_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W4_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W5_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W6_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W7_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W8_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W9_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W10_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W11_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W12_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W13_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W14_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W15_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W0(_ft_,_x_) ((_ft_).u32_20 = (((_ft_).u32_20 & PM_DATA_VHEAP_BUFFER_RESV1_W0_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W0(_ft_) (((_ft_).u32_20 >> (PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W1(_ft_,_x_) ((_ft_).u32_21 = (((_ft_).u32_21 & PM_DATA_VHEAP_BUFFER_RESV1_W1_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W1(_ft_) (((_ft_).u32_21 >> (PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W2(_ft_,_x_) ((_ft_).u32_22 = (((_ft_).u32_22 & PM_DATA_VHEAP_BUFFER_RESV1_W2_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W2(_ft_) (((_ft_).u32_22 >> (PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W3(_ft_,_x_) ((_ft_).u32_23 = (((_ft_).u32_23 & PM_DATA_VHEAP_BUFFER_RESV1_W3_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W3(_ft_) (((_ft_).u32_23 >> (PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W4(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & PM_DATA_VHEAP_BUFFER_RESV1_W4_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W4(_ft_) (((_ft_).u32_24 >> (PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W5(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & PM_DATA_VHEAP_BUFFER_RESV1_W5_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W5(_ft_) (((_ft_).u32_25 >> (PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W6(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & PM_DATA_VHEAP_BUFFER_RESV1_W6_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W6(_ft_) (((_ft_).u32_26 >> (PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W7(_ft_,_x_) ((_ft_).u32_27 = (((_ft_).u32_27 & PM_DATA_VHEAP_BUFFER_RESV1_W7_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W7(_ft_) (((_ft_).u32_27 >> (PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W8(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & PM_DATA_VHEAP_BUFFER_RESV1_W8_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W8(_ft_) (((_ft_).u32_28 >> (PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W9(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & PM_DATA_VHEAP_BUFFER_RESV1_W9_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W9(_ft_) (((_ft_).u32_29 >> (PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W10(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & PM_DATA_VHEAP_BUFFER_RESV1_W10_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W10(_ft_) (((_ft_).u32_30 >> (PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W11(_ft_,_x_) ((_ft_).u32_31 = (((_ft_).u32_31 & PM_DATA_VHEAP_BUFFER_RESV1_W11_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W11(_ft_) (((_ft_).u32_31 >> (PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W12(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & PM_DATA_VHEAP_BUFFER_RESV1_W12_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W12(_ft_) (((_ft_).u32_32 >> (PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W13(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & PM_DATA_VHEAP_BUFFER_RESV1_W13_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W13(_ft_) (((_ft_).u32_33 >> (PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W14(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & PM_DATA_VHEAP_BUFFER_RESV1_W14_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W14(_ft_) (((_ft_).u32_34 >> (PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W15(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & PM_DATA_VHEAP_BUFFER_RESV1_W15_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W15(_ft_) (((_ft_).u32_35 >> (PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT)) & 0xffffffffU) +/* +Rsv0 area +*/ +#define PM_DATA_VHEAP_BUFFER_RESV0_WOFF (19U) +#define PM_DATA_VHEAP_BUFFER_RESV0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV0_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_RESV0(_ft_,_x_) ((_ft_).u32_19 = (((_ft_).u32_19 & PM_DATA_VHEAP_BUFFER_RESV0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (PM_DATA_VHEAP_BUFFER_RESV0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV0(_ft_) ((_ft_).u32_19 >> ((PM_DATA_VHEAP_BUFFER_RESV0_SHIFT)) & 0xffffffffU) +/* +Init Bit Sent Flag for TE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_WOFF (18U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE7_INIT(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE7_INIT(_ft_) ((_ft_).u32_18 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_WOFF (18U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE7(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & PM_DATA_VHEAP_BUFFER_VPTR_TE7_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE7(_ft_) ((_ft_).u32_18 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_WOFF (17U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE7_INIT(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE7_INIT(_ft_) ((_ft_).u32_17 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_WOFF (17U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE7(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & PM_DATA_VHEAP_BUFFER_VPTR_VCE7_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE7(_ft_) ((_ft_).u32_17 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_WOFF (16U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE6_INIT(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE6_INIT(_ft_) ((_ft_).u32_16 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_WOFF (16U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE6(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & PM_DATA_VHEAP_BUFFER_VPTR_TE6_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE6(_ft_) ((_ft_).u32_16 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_WOFF (15U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE6_INIT(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE6_INIT(_ft_) ((_ft_).u32_15 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_WOFF (15U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE6(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & PM_DATA_VHEAP_BUFFER_VPTR_VCE6_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE6(_ft_) ((_ft_).u32_15 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_WOFF (14U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE5_INIT(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE5_INIT(_ft_) ((_ft_).u32_14 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_WOFF (14U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE5(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & PM_DATA_VHEAP_BUFFER_VPTR_TE5_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE5(_ft_) ((_ft_).u32_14 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_WOFF (13U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE5_INIT(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE5_INIT(_ft_) ((_ft_).u32_13 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_WOFF (13U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE5(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & PM_DATA_VHEAP_BUFFER_VPTR_VCE5_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE5(_ft_) ((_ft_).u32_13 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_WOFF (12U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE4_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE4_INIT(_ft_) ((_ft_).u32_12 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_WOFF (12U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE4(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & PM_DATA_VHEAP_BUFFER_VPTR_TE4_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE4(_ft_) ((_ft_).u32_12 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_WOFF (11U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE4_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE4_INIT(_ft_) ((_ft_).u32_11 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_WOFF (11U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE4(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & PM_DATA_VHEAP_BUFFER_VPTR_VCE4_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE4(_ft_) ((_ft_).u32_11 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_WOFF (10U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE3_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE3_INIT(_ft_) ((_ft_).u32_10 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_WOFF (10U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE3(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & PM_DATA_VHEAP_BUFFER_VPTR_TE3_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE3(_ft_) ((_ft_).u32_10 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_WOFF (9U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE3_INIT(_ft_) ((_ft_).u32_9 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_WOFF (9U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & PM_DATA_VHEAP_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE3(_ft_) ((_ft_).u32_9 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_WOFF (8U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE2_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE2_INIT(_ft_) ((_ft_).u32_8 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_WOFF (8U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE2(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & PM_DATA_VHEAP_BUFFER_VPTR_TE2_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE2(_ft_) ((_ft_).u32_8 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_WOFF (7U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE2_INIT(_ft_) ((_ft_).u32_7 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_WOFF (7U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & PM_DATA_VHEAP_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE2(_ft_) ((_ft_).u32_7 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_WOFF (6U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE1_INIT(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE1_INIT(_ft_) ((_ft_).u32_6 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_WOFF (6U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & PM_DATA_VHEAP_BUFFER_VPTR_TE1_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE1(_ft_) ((_ft_).u32_6 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_WOFF (5U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE1_INIT(_ft_) ((_ft_).u32_5 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_WOFF (5U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & PM_DATA_VHEAP_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE1(_ft_) ((_ft_).u32_5 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT)) & 0x000fffffU) +/* +4KB aligned top pointer for MMU +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_WOFF (4U) +#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_MMU(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & PM_DATA_VHEAP_BUFFER_VPTR_MMU_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_MMU(_ft_) ((_ft_).u32_4 >> ((PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for ALIST +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_WOFF (3U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_ALIST_INIT(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_ALIST_INIT(_ft_) ((_ft_).u32_3 >> ((PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for ALIST +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_WOFF (3U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_ALIST(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & PM_DATA_VHEAP_BUFFER_VPTR_ALIST_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_ALIST(_ft_) ((_ft_).u32_3 >> ((PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_WOFF (1U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE0_INIT(_ft_) ((_ft_).u32_1 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_WOFF (1U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VHEAP_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE0(_ft_) ((_ft_).u32_1 >> ((PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_WOFF (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE0_INIT(_ft_) ((_ft_).u32_0 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_WOFF (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VHEAP_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE0(_ft_) ((_ft_).u32_0 >> ((PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT)) & 0x000fffffU) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +The PM FreeListState Buffer Layout - this will apply to 3 resources - FSTACK, UFSTACK and MMUSTACK +*/ +typedef struct _RGX_PM_FREELISTSTATE_BUFFER { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; +} RGX_PM_FREELISTSTATE_BUFFER; + +/* +Reserved field word 2 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_WOFF (7U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_2(_ft_) ((_ft_).u32_7 >> ((RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)) & 0xffffffffU) +/* +Reserved field word 1 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_WOFF (6U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_1(_ft_) ((_ft_).u32_6 >> ((RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)) & 0xffffffffU) +/* +Reserved field word 0 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_WOFF (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_0(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_0(_ft_) ((_ft_).u32_5 >> ((RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)) & 0xffffffffU) +/* +The number of pages consumed for the MMU Page Table. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_WOFF (4U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_MMUPAGE_STATUS(_ft_) ((_ft_).u32_4 >> ((RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +The total number of pages consumed from the free stack. Must be initialised to zero. This field is unused in the MMUSTACK. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_WOFF (3U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_PAGE_STATUS(_ft_) ((_ft_).u32_3 >> ((RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +Stack pointer for the free stack - the location of the next free page relative to the BaseAddr, in number of DWORDs. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_WOFF (2U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(_ft_,_x_) ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_STACK_PTR(_ft_) ((_ft_).u32_2 >> ((RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)) & 0xffffffffU) +/* +Base address of the free stack - points to the bottom of the stack. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_WOFF (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_SHIFT (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_WOFF (1U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_SHIFT (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK (0x0000001FU) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } +#define RGX_PM_FREELISTSTATE_BUFFER_GET_BASE_ADDR(_ft_) (((_ft_).u32_0 >> (5)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (27))) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +The PM FreeListState Buffer Layout - this will apply to 3 resources - FSTACK, UFSTACK and MMUSTACK +*/ +typedef struct _RGX_PM_FREELISTSTATE_BUFFER { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; +} RGX_PM_FREELISTSTATE_BUFFER; + +/* +Reserved field word 2 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_WOFF (7U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_2(_ft_) ((_ft_).u32_7 >> ((RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)) & 0xffffffffU) +/* +Reserved field word 1 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_WOFF (6U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_1(_ft_) ((_ft_).u32_6 >> ((RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)) & 0xffffffffU) +/* +Reserved field word 0 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_WOFF (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_0(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_0(_ft_) ((_ft_).u32_5 >> ((RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)) & 0xffffffffU) +/* +The number of pages consumed for the MMU Page Table. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_WOFF (4U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_MMUPAGE_STATUS(_ft_) ((_ft_).u32_4 >> ((RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +The total number of pages consumed from the free stack. Must be initialised to zero. This field is unused in the MMUSTACK. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_WOFF (3U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_PAGE_STATUS(_ft_) ((_ft_).u32_3 >> ((RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +Stack pointer for the free stack - the location of the next free page relative to the BaseAddr, in number of DWORDs. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_WOFF (2U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(_ft_,_x_) ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_STACK_PTR(_ft_) ((_ft_).u32_2 >> ((RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)) & 0xffffffffU) +/* +Base address of the free stack - points to the bottom of the stack. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_WOFF (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_WOFF (1U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_FREELISTSTATE_BUFFER_GET_BASE_ADDR(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +256-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT (5U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSIZE (32U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_LOWER (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_LOWER (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +/* +Maximum range supported by hardware is 23 bits. +*/ +#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR (0U) +#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR_LOWER (0U) +#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR_UPPER (16777215U) + + +/* +Maximum range supported by hardware is 23 bits. +*/ +#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS (0U) +#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS_LOWER (0U) +#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS_UPPER (16777215U) + + +/* +Maximum range supported by hardware is 23 bits. +*/ +#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS (0U) +#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS_LOWER (0U) +#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS_UPPER (16777215U) + + +#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +The PM Render Context Buffer Layout +*/ +typedef struct _RGX_PM_RENDERSTATE_BUFFER { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; + IMG_UINT32 u32_12; + IMG_UINT32 u32_13; + IMG_UINT32 u32_14; + IMG_UINT32 u32_15; + IMG_UINT32 u32_16; + IMG_UINT32 u32_17; + IMG_UINT32 u32_18; + IMG_UINT32 u32_19; + IMG_UINT32 u32_20; + IMG_UINT32 u32_21; + IMG_UINT32 u32_22; + IMG_UINT32 u32_23; + IMG_UINT32 u32_24; + IMG_UINT32 u32_25; + IMG_UINT32 u32_26; + IMG_UINT32 u32_27; + IMG_UINT32 u32_28; + IMG_UINT32 u32_29; + IMG_UINT32 u32_30; + IMG_UINT32 u32_31; + IMG_UINT32 u32_32; + IMG_UINT32 u32_33; + IMG_UINT32 u32_34; + IMG_UINT32 u32_35; + IMG_UINT32 u32_36; + IMG_UINT32 u32_37; + IMG_UINT32 u32_38; + IMG_UINT32 u32_39; + IMG_UINT32 u32_40; + IMG_UINT32 u32_41; + IMG_UINT32 u32_42; + IMG_UINT32 u32_43; + IMG_UINT32 u32_44; + IMG_UINT32 u32_45; +} RGX_PM_RENDERSTATE_BUFFER; + +/* +The base address of the Virtual-Physical Page Translation Table. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK (0x0000000FU) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VFP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffffff))) << 4))); \ + ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0ffffffff0000000))) >> 28))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VFP_BASE_ADDR(_ft_) (((_ft_).u32_10 >> (4)) | ((IMG_UINT64)((_ft_).u32_11 & 0xffffffffU ) << (28))) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP. +A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) ((_ft_).u32_9 >> ((RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the PM. +A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) ((_ft_).u32_8 >> ((RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU) +/* +The base address of the VHEAP buffer. +Must be initialised to point to the location of the VHEAP buffer in memory. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_WOFF (6U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_WOFF (7U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK (0x0000000FU) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffffff))) << 4))); \ + ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0ffffffff0000000))) >> 28))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VHEAP_BASE_ADDR(_ft_) (((_ft_).u32_6 >> (4)) | ((IMG_UINT64)((_ft_).u32_7 & 0xffffffffU ) << (28))) +/* +Reserved bits, un-used. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) ((_ft_).u32_5 >> ((RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU) +/* +The number of entries on the MLIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) ((_ft_).u32_4 >> ((RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU) +/* +The base address of the MLIST. +Must be initialised to point to a block of memory where the PM can write the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ + ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27))) +/* +The number of entries on the ALIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) +#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&!defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +The PM Render Context Buffer Layout +*/ +typedef struct _RGX_PM_RENDERSTATE_BUFFER { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; +} RGX_PM_RENDERSTATE_BUFFER; + +/* +The base address of the Virtual-Physical Page Translation Table. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VFP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VFP_BASE_ADDR(_ft_) (((_ft_).u32_10 >> (0)) | ((IMG_UINT64)((_ft_).u32_11 & 0xffffffffU ) << (32))) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP. +A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) ((_ft_).u32_9 >> ((RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the PM. +A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) ((_ft_).u32_8 >> ((RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU) +/* +The base address of the VHEAP buffer. +Must be initialised to point to the location of the VHEAP buffer in memory. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_WOFF (6U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_WOFF (7U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VHEAP_BASE_ADDR(_ft_) (((_ft_).u32_6 >> (0)) | ((IMG_UINT64)((_ft_).u32_7 & 0xffffffffU ) << (32))) +/* +Reserved bits, un-used. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) ((_ft_).u32_5 >> ((RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU) +/* +The number of entries on the MLIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) ((_ft_).u32_4 >> ((RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU) +/* +The base address of the MLIST. +Must be initialised to point to a block of memory where the PM can write the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (0)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (32))) +/* +The number of entries on the ALIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) +#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&!PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) +/* +The PM Render Context Buffer Layout +*/ +typedef struct _RGX_PM_RENDERSTATE_BUFFER { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; + IMG_UINT32 u32_12; + IMG_UINT32 u32_13; + IMG_UINT32 u32_14; + IMG_UINT32 u32_15; + IMG_UINT32 u32_16; + IMG_UINT32 u32_17; + IMG_UINT32 u32_18; + IMG_UINT32 u32_19; + IMG_UINT32 u32_20; + IMG_UINT32 u32_21; + IMG_UINT32 u32_22; + IMG_UINT32 u32_23; + IMG_UINT32 u32_24; + IMG_UINT32 u32_25; + IMG_UINT32 u32_26; + IMG_UINT32 u32_27; + IMG_UINT32 u32_28; + IMG_UINT32 u32_29; + IMG_UINT32 u32_30; + IMG_UINT32 u32_31; + IMG_UINT32 u32_32; + IMG_UINT32 u32_33; + IMG_UINT32 u32_34; + IMG_UINT32 u32_35; + IMG_UINT32 u32_36; + IMG_UINT32 u32_37; + IMG_UINT32 u32_38; + IMG_UINT32 u32_39; + IMG_UINT32 u32_40; + IMG_UINT32 u32_41; + IMG_UINT32 u32_42; + IMG_UINT32 u32_43; + IMG_UINT32 u32_44; + IMG_UINT32 u32_45; +} RGX_PM_RENDERSTATE_BUFFER; + +/* +MMU catalogue base address for VCE pipe 3 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_WOFF (45U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_45 = (((_ft_).u32_45 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_) ((_ft_).u32_45 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 3 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_WOFF (44U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_) ((_ft_).u32_44 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 3 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_WOFF (44U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_) ((_ft_).u32_44 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 2 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_WOFF (43U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_43 = (((_ft_).u32_43 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_) ((_ft_).u32_43 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 2 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_WOFF (42U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_) ((_ft_).u32_42 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 2 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_WOFF (42U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_) ((_ft_).u32_42 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 1 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_WOFF (41U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_) ((_ft_).u32_41 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 1 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_WOFF (40U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_) ((_ft_).u32_40 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 1 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_WOFF (40U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_) ((_ft_).u32_40 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 3 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_WOFF (37U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE(_ft_) ((_ft_).u32_37 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 3 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_WOFF (36U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_MAPPED(_ft_) ((_ft_).u32_36 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 3 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_WOFF (36U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE(_ft_) ((_ft_).u32_36 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 2 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_WOFF (35U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE(_ft_) ((_ft_).u32_35 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 2 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_WOFF (34U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_MAPPED(_ft_) ((_ft_).u32_34 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 2 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_WOFF (34U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE(_ft_) ((_ft_).u32_34 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 1 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_WOFF (33U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE(_ft_) ((_ft_).u32_33 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 1 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_WOFF (32U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_MAPPED(_ft_) ((_ft_).u32_32 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 1 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_WOFF (32U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE(_ft_) ((_ft_).u32_32 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 0 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_WOFF (30U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_) ((_ft_).u32_30 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 0 ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_WOFF (29U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_) ((_ft_).u32_29 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for VCE pipe 0 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_WOFF (28U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_) ((_ft_).u32_28 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 0 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_WOFF (28U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_) ((_ft_).u32_28 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 0 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_WOFF (26U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_) ((_ft_).u32_26 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 0 ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_WOFF (25U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_) ((_ft_).u32_25 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for TE pipe 0 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_WOFF (24U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_) ((_ft_).u32_24 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 0 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_WOFF (24U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_) ((_ft_).u32_24 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for ALIST LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_WOFF (18U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_) ((_ft_).u32_18 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for ALIST ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_WOFF (17U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_ADDR(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_ADDR(_ft_) ((_ft_).u32_17 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for ALIST MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_WOFF (16U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_MAPPED(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_MAPPED(_ft_) ((_ft_).u32_16 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for ALIST INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_WOFF (16U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_) ((_ft_).u32_16 >> ((RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_WOFF (15U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE3_INIT(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE3_INIT(_ft_) ((_ft_).u32_15 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_WOFF (15U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE3(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE3(_ft_) ((_ft_).u32_15 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_WOFF (14U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3_INIT(_ft_) ((_ft_).u32_14 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_WOFF (14U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3(_ft_) ((_ft_).u32_14 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for TE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_WOFF (13U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE2_INIT(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE2_INIT(_ft_) ((_ft_).u32_13 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_WOFF (13U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE2(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE2(_ft_) ((_ft_).u32_13 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_WOFF (12U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2_INIT(_ft_) ((_ft_).u32_12 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_WOFF (12U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2(_ft_) ((_ft_).u32_12 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for TE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE1_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE1_INIT(_ft_) ((_ft_).u32_11 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE1(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE1(_ft_) ((_ft_).u32_11 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1_INIT(_ft_) ((_ft_).u32_10 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1(_ft_) ((_ft_).u32_10 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for TE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0_INIT(_ft_) ((_ft_).u32_9 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0(_ft_) ((_ft_).u32_9 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0_INIT(_ft_) ((_ft_).u32_8 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0(_ft_) ((_ft_).u32_8 >> ((RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)) & 0x7fffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP. +A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (7U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) ((_ft_).u32_7 >> ((RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the PM. +A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (6U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) ((_ft_).u32_6 >> ((RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU) +/* +Reserved bits, un-used. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) ((_ft_).u32_5 >> ((RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU) +/* +The number of entries on the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) ((_ft_).u32_4 >> ((RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU) +/* +The base address of the MLIST. +Must be initialised to point to a block of memory where the PM can write the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ + ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27))) +/* +Init bit sent flag for ALIST +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_INIT(_ft_) ((_ft_).u32_1 >> ((RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)) & 0x00000001U) +/* +The number of entries on the ALIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x7fffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0x7fffffffU ) << (32))) +#endif /* !defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) */ + + +/* +Maximum range supported by hardware is 33 bits. +*/ +#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL (0U) +#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL_LOWER (0U) +#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL_UPPER (8589934591ULL) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +256-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSHIFT (5U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSIZE (32U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +/* +Maximum range supported by hardware is 33 bits. +*/ +#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL (0U) +#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL_LOWER (0U) +#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL_UPPER (8589934591ULL) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +/* +Only the 16 least-significant bits are used +*/ +#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS (0U) +#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS_LOWER (0U) +#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS_UPPER (65535U) + + +/* +Only the 16 least-significant bits are used +*/ +#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS (0U) +#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS_LOWER (0U) +#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS_UPPER (65535U) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +#endif /* RGXPMDEFS_H */ + +/***************************************************************************** + End of file (rgxpmdefs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/cache_ops.h b/drivers/mcst/gpu-imgtec/include/cache_ops.h new file mode 100644 index 000000000000..c34ffadb70c2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/cache_ops.h @@ -0,0 +1,56 @@ +/*************************************************************************/ /*! +@File +@Title Services cache management header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines for cache management which are visible internally + and externally +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef CACHE_OPS_H +#define CACHE_OPS_H +#include "img_types.h" + +#define CACHE_BATCH_MAX (8) +typedef IMG_UINT32 PVRSRV_CACHE_OP; /*!< Type represents cache maintenance operation */ +#define PVRSRV_CACHE_OP_NONE 0x0 /*!< No operation */ +#define PVRSRV_CACHE_OP_CLEAN 0x1 /*!< Flush w/o invalidate */ +#define PVRSRV_CACHE_OP_INVALIDATE 0x2 /*!< Invalidate w/o flush */ +#define PVRSRV_CACHE_OP_FLUSH 0x3 /*!< Flush w/ invalidate */ + +#endif /* CACHE_OPS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/devicemem_typedefs.h b/drivers/mcst/gpu-imgtec/include/devicemem_typedefs.h new file mode 100644 index 000000000000..91a069912182 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/devicemem_typedefs.h @@ -0,0 +1,143 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Client side part of device memory management -- this file + is forked from new_devmem_allocation.h as this one has to + reside in the top level include so that client code is able + to make use of the typedefs. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_TYPEDEFS_H +#define DEVICEMEM_TYPEDEFS_H + +#include +#include "img_types.h" +#include "pvrsrv_memallocflags.h" + +typedef struct DEVMEM_CONTEXT_TAG DEVMEM_CONTEXT; /*!< Convenience typedef for struct DEVMEM_CONTEXT_TAG */ +typedef struct DEVMEM_HEAP_TAG DEVMEM_HEAP; /*!< Convenience typedef for struct DEVMEM_HEAP_TAG */ +typedef struct DEVMEM_MEMDESC_TAG DEVMEM_MEMDESC; /*!< Convenience typedef for struct DEVMEM_MEMDESC_TAG */ +typedef struct DEVMEM_PAGELIST_TAG DEVMEM_PAGELIST; /*!< Convenience typedef for struct DEVMEM_PAGELIST_TAG */ +typedef PVRSRV_MEMALLOCFLAGS_T DEVMEM_FLAGS_T; /*!< Convenience typedef for PVRSRV_MEMALLOCFLAGS_T */ + +typedef IMG_HANDLE DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */ +typedef IMG_UINT64 DEVMEM_EXPORTKEY; /*!< Typedef for DeviceMem Export Key */ +typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T; /*!< Typedef for DeviceMem SIZE_T */ +typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T; /*!< Typedef for DeviceMem LOG2 Alignment */ + +typedef struct DEVMEMX_PHYS_MEMDESC_TAG DEVMEMX_PHYSDESC; /*!< Convenience typedef for DevmemX physical */ +typedef struct DEVMEMX_VIRT_MEMDESC_TAG DEVMEMX_VIRTDESC; /*!< Convenience typedef for DevmemX virtual */ + +/*! calling code needs all the info in this struct, to be able to pass it around */ +typedef struct +{ + /*! A handle to the PMR. */ + IMG_HANDLE hPMRExportHandle; + /*! The "key" to prove we have authorisation to use this PMR */ + IMG_UINT64 uiPMRExportPassword; + /*! Size and alignment properties for this PMR. Note, these + numbers are not trusted in kernel, but we need to cache them + client-side in order to allocate from the VM arena. The kernel + will know the actual alignment and size of the PMR and thus + would prevent client code from breaching security here. Ditto + for physmem granularity (aka page size) if this is different + from alignment */ + IMG_DEVMEM_SIZE_T uiSize; + /*! We call this "contiguity guarantee" to be more precise than + calling it "alignment" or "page size", terms which may seem + similar but have different emphasis. The number reported here + is the minimum contiguity guarantee from the creator of the + PMR. Now, there is no requirement to allocate that coarsely + from the RA. The alignment given to the RA simply needs to be + at least as coarse as the device page size for the heap we + ultimately intend to map into. What is important is that the + device MMU data page size is not greater than the minimum + contiguity guarantee from the PMR. This value is reported to + the client in order that it can choose to make early checks and + perhaps decide which heap (in a variable page size scenario) it + would be safe to map this PMR into. For convenience, the + client may choose to use this argument as the alignment of the + virtual range he chooses to allocate, but this is _not_ + necessary and in many cases would be able to get away with a + finer alignment, should the heap into which this PMR will be + mapped support it. */ + IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee; +} DEVMEM_EXPORTCOOKIE; + +/* Enum that describes the operation associated with changing sparse memory */ +typedef enum Resize { + SPARSE_RESIZE_NONE = 0, + + /* This should be set to indicate the change needs allocation */ + SPARSE_RESIZE_ALLOC = 1, + + /* This should be set to indicate the change needs free */ + SPARSE_RESIZE_FREE = 2, + + SPARSE_RESIZE_BOTH = ((IMG_UINT8)SPARSE_RESIZE_ALLOC | (IMG_UINT8)SPARSE_RESIZE_FREE), + + /* This should be set to silently swap underlying physical memory + * without disturbing its device or cpu virtual maps. + * This flag is not supported in the case of PDUMP and could lead to + * PDUMP panic when used. + */ + SPARSE_REMAP_MEM = 4, + + /* Should be set to get the sparse changes appear in cpu virtual map */ + SPARSE_MAP_CPU_ADDR = 8 +}SPARSE_MEM_RESIZE_FLAGS; + +/* To be used with all the sparse allocations that gets mapped to CPU Virtual + * space. The sparse allocation CPU mapping is torn down and re-mapped every + * time the sparse allocation layout changes. + */ +#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1 + +/* To use with DevmemSubAllocate() as the default factor if no over-allocation + * is desired. + */ +#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER (1U) + +/* Defines the max length for PMR, MemDesc, Device memory History and RI debug + * annotations stored in memory, including the null terminator. + */ +#define DEVMEM_ANNOTATION_MAX_LEN (PVR_ANNOTATION_MAX_LEN + 1) + +#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/dllist.h b/drivers/mcst/gpu-imgtec/include/dllist.h new file mode 100644 index 000000000000..50096c2033f5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/dllist.h @@ -0,0 +1,356 @@ +/*************************************************************************/ /*! +@File +@Title Double linked list header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Double linked list interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DLLIST_H +#define DLLIST_H + +#include "img_types.h" +#include "img_defs.h" + +/*! + Pointer to a linked list node +*/ +typedef struct DLLIST_NODE_ *PDLLIST_NODE; + + +/*! + Node in a linked list +*/ +/* + * Note: the following structure's size is architecture-dependent and clients + * may need to create a mirror of the structure definition if it needs to be + * used in a structure shared between host and device. + * Consider such clients if any changes are made to this structure. + */ +typedef struct DLLIST_NODE_ +{ + struct DLLIST_NODE_ *psPrevNode; + struct DLLIST_NODE_ *psNextNode; +} DLLIST_NODE; + + +/*! + Static initialiser +*/ +#define DECLARE_DLLIST(n) \ +DLLIST_NODE (n) = {&(n), &(n)} + + +/*************************************************************************/ /*! +@Function dllist_init + +@Description Initialize a new double linked list + +@Input psListHead List head Node + +*/ +/*****************************************************************************/ +static INLINE +void dllist_init(PDLLIST_NODE psListHead) +{ + psListHead->psPrevNode = psListHead; + psListHead->psNextNode = psListHead; +} + +/*************************************************************************/ /*! +@Function dllist_is_empty + +@Description Returns whether the list is empty + +@Input psListHead List head Node + +*/ +/*****************************************************************************/ +static INLINE +bool dllist_is_empty(PDLLIST_NODE psListHead) +{ + return ((psListHead->psPrevNode == psListHead) + && (psListHead->psNextNode == psListHead)); +} + +/*************************************************************************/ /*! +@Function dllist_add_to_head + +@Description Add psNewNode to head of list psListHead + +@Input psListHead Head Node +@Input psNewNode New Node + +*/ +/*****************************************************************************/ +static INLINE +void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) +{ + PDLLIST_NODE psTmp; + + psTmp = psListHead->psNextNode; + + psListHead->psNextNode = psNewNode; + psNewNode->psNextNode = psTmp; + + psTmp->psPrevNode = psNewNode; + psNewNode->psPrevNode = psListHead; +} + + +/*************************************************************************/ /*! +@Function dllist_add_to_tail + +@Description Add psNewNode to tail of list psListHead + +@Input psListHead Head Node +@Input psNewNode New Node + +*/ +/*****************************************************************************/ +static INLINE +void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) +{ + PDLLIST_NODE psTmp; + + psTmp = psListHead->psPrevNode; + + psListHead->psPrevNode = psNewNode; + psNewNode->psPrevNode = psTmp; + + psTmp->psNextNode = psNewNode; + psNewNode->psNextNode = psListHead; +} + +/*************************************************************************/ /*! +@Function dllist_node_is_in_list + +@Description Returns true if psNode is in a list + +@Input psNode List node + +*/ +/*****************************************************************************/ +static INLINE +bool dllist_node_is_in_list(PDLLIST_NODE psNode) +{ + return (psNode->psNextNode != NULL); +} + +/*************************************************************************/ /*! +@Function dllist_get_next_node + +@Description Returns the list node after psListHead or NULL psListHead is + the only element in the list. + +@Input psListHead List node to start the operation + +*/ +/*****************************************************************************/ +static INLINE +PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead) +{ + if (psListHead->psNextNode == psListHead) + { + return NULL; + } + else + { + return psListHead->psNextNode; + } +} + +/*************************************************************************/ /*! +@Function dllist_get_prev_node + +@Description Returns the list node preceding psListHead or NULL if + psListHead is the only element in the list. + +@Input psListHead List node to start the operation + +*/ +/*****************************************************************************/ +static INLINE +PDLLIST_NODE dllist_get_prev_node(PDLLIST_NODE psListHead) +{ + if (psListHead->psPrevNode == psListHead) + { + return NULL; + } + else + { + return psListHead->psPrevNode; + } +} + +/*************************************************************************/ /*! +@Function dllist_remove_node + +@Description Removes psListNode from the list where it currently belongs + +@Input psListNode List node to be removed + +*/ +/*****************************************************************************/ +static INLINE +void dllist_remove_node(PDLLIST_NODE psListNode) +{ + psListNode->psNextNode->psPrevNode = psListNode->psPrevNode; + psListNode->psPrevNode->psNextNode = psListNode->psNextNode; + + /* Clear the node to show it's not in a list */ + psListNode->psPrevNode = NULL; + psListNode->psNextNode = NULL; +} + +/*************************************************************************/ /*! +@Function dllist_replace_head + +@Description Moves the list from psOldHead to psNewHead + +@Input psOldHead List node to be replaced. Will become a + head node of an empty list. +@Input psNewHead List node to be inserted. Must be an + empty list head. + +*/ +/*****************************************************************************/ +static INLINE +void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead) +{ + if (dllist_is_empty(psOldHead)) + { + psNewHead->psNextNode = psNewHead; + psNewHead->psPrevNode = psNewHead; + } + else + { + /* Change the neighbouring nodes */ + psOldHead->psNextNode->psPrevNode = psNewHead; + psOldHead->psPrevNode->psNextNode = psNewHead; + + /* Copy the old data to the new node */ + psNewHead->psNextNode = psOldHead->psNextNode; + psNewHead->psPrevNode = psOldHead->psPrevNode; + + /* Remove links to the previous list */ + psOldHead->psNextNode = psOldHead; + psOldHead->psPrevNode = psOldHead; + } +} + +/**************************************************************************/ /*! +@Function dllist_insert_list_at_head + +@Description Inserts psInHead list into the head of the psOutHead list. + After this operation psOutHead will contain psInHead at the + head of the list and the remaining elements that were + already in psOutHead will be places after the psInList (so + at a tail of the original list). + +@Input psOutHead List node psInHead will be inserted to. +@Input psInHead List node to be inserted to psOutHead. + After this operation this becomes an empty list. +*/ /***************************************************************************/ +static INLINE +void dllist_insert_list_at_head(PDLLIST_NODE psOutHead, PDLLIST_NODE psInHead) +{ + PDLLIST_NODE psInHeadNextNode = psInHead->psNextNode; + PDLLIST_NODE psOutHeadNextNode = psOutHead->psNextNode; + + if (!dllist_is_empty(psInHead)) + { + psOutHead->psNextNode = psInHeadNextNode; + psInHeadNextNode->psPrevNode = psOutHead; + + psInHead->psPrevNode->psNextNode = psOutHeadNextNode; + psOutHeadNextNode->psPrevNode = psInHead->psPrevNode; + + dllist_init(psInHead); + } + } + +/*************************************************************************/ /*! +@Function dllist_foreach_node + +@Description Walk through all the nodes on the list. + Safe against removal of (node). + +@Input list_head List node to start the operation +@Input node Current list node +@Input next Node after the current one + +*/ +/*****************************************************************************/ +#define dllist_foreach_node(list_head, node, next) \ + for ((node) = (list_head)->psNextNode, (next) = (node)->psNextNode; \ + (node) != (list_head); \ + (node) = (next), (next) = (node)->psNextNode) + +#define dllist_foreach_node_backwards(list_head, node, prev) \ + for ((node) = (list_head)->psPrevNode, (prev) = (node)->psPrevNode; \ + (node) != (list_head); \ + (node) = (prev), (prev) = (node)->psPrevNode) + + +/*************************************************************************/ /*! +@Function dllist_foreach + +@Description Simplification of dllist_foreach_node. + Walk through all the nodes on the list. + Safe against removal of currently-iterated node. + + Adds utility-macro dllist_cur() to typecast the current node. + +@Input list_head List node to start the operation + +*/ +/*****************************************************************************/ +#define dllist_foreach(list_head) \ + for (DLLIST_NODE *_DllNode = (list_head).psNextNode, *_DllNext = _DllNode->psNextNode; \ + _DllNode != &(list_head); \ + _DllNode = _DllNext, _DllNext = _DllNode->psNextNode) + +#define dllist_foreach_backwards(list_head) \ + for (DLLIST_NODE *_DllNode = (list_head).psPrevNode, *_DllPrev = _DllNode->psPrevNode; \ + _DllNode != &(list_head); \ + _DllNode = _DllPrev, _DllPrev = _DllNode->psPrevNode) + +#define dllist_cur(type, member) IMG_CONTAINER_OF(_DllNode, type, member) + +#endif /* DLLIST_H */ diff --git a/drivers/mcst/gpu-imgtec/include/drm/netlink.h b/drivers/mcst/gpu-imgtec/include/drm/netlink.h new file mode 100644 index 000000000000..45983e02768a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/drm/netlink.h @@ -0,0 +1,148 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title Nulldisp/Netlink interface definition +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef __NETLINK_H__ +#define __NETLINK_H__ + +/* For multi-plane pixel formats */ +#define NLPVRDPY_MAX_NUM_PLANES 3 + +enum nlpvrdpy_cmd { + __NLPVRDPY_CMD_INVALID, + NLPVRDPY_CMD_CONNECT, + NLPVRDPY_CMD_CONNECTED, + NLPVRDPY_CMD_DISCONNECT, + NLPVRDPY_CMD_FLIP, + NLPVRDPY_CMD_FLIPPED, + NLPVRDPY_CMD_COPY, + NLPVRDPY_CMD_COPIED, + __NLPVRDPY_CMD_MAX +}; +#define NLPVRDPY_CMD_MAX (__NLPVRDPY_CMD_MAX - 1) + +enum nlpvrdpy_attr { + __NLPVRDPY_ATTR_INVALID, + NLPVRDPY_ATTR_NAME, + NLPVRDPY_ATTR_MINOR, + NLPVRDPY_ATTR_NUM_PLANES, + NLPVRDPY_ATTR_WIDTH, + NLPVRDPY_ATTR_HEIGHT, + NLPVRDPY_ATTR_PIXFMT, + NLPVRDPY_ATTR_YUV_CSC, + NLPVRDPY_ATTR_YUV_BPP, + NLPVRDPY_ATTR_PLANE0_ADDR, + NLPVRDPY_ATTR_PLANE0_SIZE, + NLPVRDPY_ATTR_PLANE0_OFFSET, + NLPVRDPY_ATTR_PLANE0_PITCH, + NLPVRDPY_ATTR_PLANE0_GEM_OBJ_NAME, + NLPVRDPY_ATTR_PLANE1_ADDR, + NLPVRDPY_ATTR_PLANE1_SIZE, + NLPVRDPY_ATTR_PLANE1_OFFSET, + NLPVRDPY_ATTR_PLANE1_PITCH, + NLPVRDPY_ATTR_PLANE1_GEM_OBJ_NAME, + NLPVRDPY_ATTR_PLANE2_ADDR, + NLPVRDPY_ATTR_PLANE2_SIZE, + NLPVRDPY_ATTR_PLANE2_OFFSET, + NLPVRDPY_ATTR_PLANE2_PITCH, + NLPVRDPY_ATTR_PLANE2_GEM_OBJ_NAME, + NLPVRDPY_ATTR_FB_MODIFIER, + NLPVRDPY_ATTR_NAMING_REQUIRED, + NLPVRDPY_ATTR_PAD, + __NLPVRDPY_ATTR_MAX +}; +#define NLPVRDPY_ATTR_MAX (__NLPVRDPY_ATTR_MAX - 1) + +static struct nla_policy __attribute__((unused)) +nlpvrdpy_policy[NLPVRDPY_ATTR_MAX + 1] = { + [NLPVRDPY_ATTR_NAME] = { .type = NLA_STRING }, + [NLPVRDPY_ATTR_MINOR] = { .type = NLA_U32 }, + [NLPVRDPY_ATTR_NUM_PLANES] = { .type = NLA_U8 }, + [NLPVRDPY_ATTR_WIDTH] = { .type = NLA_U32 }, + [NLPVRDPY_ATTR_HEIGHT] = { .type = NLA_U32 }, + [NLPVRDPY_ATTR_PIXFMT] = { .type = NLA_U32 }, + [NLPVRDPY_ATTR_YUV_CSC] = { .type = NLA_U8 }, + [NLPVRDPY_ATTR_YUV_BPP] = { .type = NLA_U8 }, + [NLPVRDPY_ATTR_PLANE0_ADDR] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE0_SIZE] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE0_OFFSET] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE0_PITCH] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE0_GEM_OBJ_NAME] = { .type = NLA_U32 }, + [NLPVRDPY_ATTR_PLANE1_ADDR] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE1_SIZE] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE1_OFFSET] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE1_PITCH] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE1_GEM_OBJ_NAME] = { .type = NLA_U32 }, + [NLPVRDPY_ATTR_PLANE2_ADDR] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE2_SIZE] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE2_OFFSET] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE2_PITCH] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_PLANE2_GEM_OBJ_NAME] = { .type = NLA_U32 }, + [NLPVRDPY_ATTR_FB_MODIFIER] = { .type = NLA_U64 }, + [NLPVRDPY_ATTR_NAMING_REQUIRED] = { .type = NLA_FLAG }, +}; + +#define NLPVRDPY_ATTR_PLANE(index, type) \ + ({ \ + enum nlpvrdpy_attr __retval; \ + \ + switch (index) { \ + case 0: \ + __retval = NLPVRDPY_ATTR_PLANE0_ ## type; \ + break; \ + case 1: \ + __retval = NLPVRDPY_ATTR_PLANE1_ ## type; \ + break; \ + case 2: \ + __retval = NLPVRDPY_ATTR_PLANE2_ ## type; \ + break; \ + default: \ + __retval = __NLPVRDPY_ATTR_INVALID; \ + break; \ + }; \ + \ + __retval; \ + }) + +#endif /* __NETLINK_H__ */ diff --git a/drivers/mcst/gpu-imgtec/include/drm/nulldisp_drm.h b/drivers/mcst/gpu-imgtec/include/drm/nulldisp_drm.h new file mode 100644 index 000000000000..367a12ce5c7c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/drm/nulldisp_drm.h @@ -0,0 +1,106 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title Nulldisp DRM definitions shared between kernel and user space. +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__NULLDISP_DRM_H__) +#define __NULLDISP_DRM_H__ + +#if defined(__KERNEL__) +#include +#else +#include +#endif + +struct drm_nulldisp_gem_create { + __u64 size; /* in */ + __u32 flags; /* in */ + __u32 handle; /* out */ +}; + +struct drm_nulldisp_gem_mmap { + __u32 handle; /* in */ + __u32 pad; + __u64 offset; /* out */ +}; + +#define NULLDISP_GEM_CPU_PREP_READ (1 << 0) +#define NULLDISP_GEM_CPU_PREP_WRITE (1 << 1) +#define NULLDISP_GEM_CPU_PREP_NOWAIT (1 << 2) + +struct drm_nulldisp_gem_cpu_prep { + __u32 handle; /* in */ + __u32 flags; /* in */ +}; + +struct drm_nulldisp_gem_cpu_fini { + __u32 handle; /* in */ + __u32 pad; +}; + +/* + * DRM command numbers, relative to DRM_COMMAND_BASE. + * These defines must be prefixed with "DRM_". + */ +#define DRM_NULLDISP_GEM_CREATE 0x00 +#define DRM_NULLDISP_GEM_MMAP 0x01 +#define DRM_NULLDISP_GEM_CPU_PREP 0x02 +#define DRM_NULLDISP_GEM_CPU_FINI 0x03 + +/* These defines must be prefixed with "DRM_IOCTL_". */ +#define DRM_IOCTL_NULLDISP_GEM_CREATE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CREATE, \ + struct drm_nulldisp_gem_create) + +#define DRM_IOCTL_NULLDISP_GEM_MMAP \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_MMAP, \ + struct drm_nulldisp_gem_mmap) + +#define DRM_IOCTL_NULLDISP_GEM_CPU_PREP \ + DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_PREP, \ + struct drm_nulldisp_gem_cpu_prep) + +#define DRM_IOCTL_NULLDISP_GEM_CPU_FINI \ + DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_FINI, \ + struct drm_nulldisp_gem_cpu_fini) + +#endif /* defined(__NULLDISP_DRM_H__) */ diff --git a/drivers/mcst/gpu-imgtec/include/drm/pdp_drm.h b/drivers/mcst/gpu-imgtec/include/drm/pdp_drm.h new file mode 100644 index 000000000000..e44aa21a8e59 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/drm/pdp_drm.h @@ -0,0 +1,106 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PDP DRM definitions shared between kernel and user space. +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PDP_DRM_H__) +#define __PDP_DRM_H__ + +#if defined(__KERNEL__) +#include +#else +#include +#endif + +struct drm_pdp_gem_create { + __u64 size; /* in */ + __u32 flags; /* in */ + __u32 handle; /* out */ +}; + +struct drm_pdp_gem_mmap { + __u32 handle; /* in */ + __u32 pad; + __u64 offset; /* out */ +}; + +#define PDP_GEM_CPU_PREP_READ (1 << 0) +#define PDP_GEM_CPU_PREP_WRITE (1 << 1) +#define PDP_GEM_CPU_PREP_NOWAIT (1 << 2) + +struct drm_pdp_gem_cpu_prep { + __u32 handle; /* in */ + __u32 flags; /* in */ +}; + +struct drm_pdp_gem_cpu_fini { + __u32 handle; /* in */ + __u32 pad; +}; + +/* + * DRM command numbers, relative to DRM_COMMAND_BASE. + * These defines must be prefixed with "DRM_". + */ +#define DRM_PDP_GEM_CREATE 0x00 +#define DRM_PDP_GEM_MMAP 0x01 +#define DRM_PDP_GEM_CPU_PREP 0x02 +#define DRM_PDP_GEM_CPU_FINI 0x03 + +/* These defines must be prefixed with "DRM_IOCTL_". */ +#define DRM_IOCTL_PDP_GEM_CREATE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_CREATE, \ + struct drm_pdp_gem_create) + +#define DRM_IOCTL_PDP_GEM_MMAP\ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_MMAP, \ + struct drm_pdp_gem_mmap) + +#define DRM_IOCTL_PDP_GEM_CPU_PREP \ + DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_PREP, \ + struct drm_pdp_gem_cpu_prep) + +#define DRM_IOCTL_PDP_GEM_CPU_FINI \ + DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_FINI, \ + struct drm_pdp_gem_cpu_fini) + +#endif /* defined(__PDP_DRM_H__) */ diff --git a/drivers/mcst/gpu-imgtec/include/drm/pvr_drm.h b/drivers/mcst/gpu-imgtec/include/drm/pvr_drm.h new file mode 100644 index 000000000000..6e34443e8489 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/drm/pvr_drm.h @@ -0,0 +1,84 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PVR DRM definitions shared between kernel and user space. +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_DRM_H__) +#define __PVR_DRM_H__ + +#include "pvr_drm_core.h" + +/* + * IMPORTANT: + * All structures below are designed to be the same size when compiled for 32 + * and/or 64 bit architectures, i.e. there should be no compiler inserted + * padding. This is achieved by sticking to the following rules: + * 1) only use fixed width types + * 2) always naturally align fields by arranging them appropriately and by using + * padding fields when necessary + * + * These rules should _always_ be followed when modifying or adding new + * structures to this file. + */ + +struct drm_pvr_srvkm_cmd { + __u32 bridge_id; + __u32 bridge_func_id; + __u64 in_data_ptr; + __u64 out_data_ptr; + __u32 in_data_size; + __u32 out_data_size; +}; + +/* + * DRM command numbers, relative to DRM_COMMAND_BASE. + * These defines must be prefixed with "DRM_". + */ +#define DRM_PVR_SRVKM_CMD 0 /* Used for PVR Services ioctls */ + + +/* These defines must be prefixed with "DRM_IOCTL_". */ +#define DRM_IOCTL_PVR_SRVKM_CMD \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \ + struct drm_pvr_srvkm_cmd) + +#endif /* defined(__PVR_DRM_H__) */ diff --git a/drivers/mcst/gpu-imgtec/include/drm/pvr_drm_core.h b/drivers/mcst/gpu-imgtec/include/drm/pvr_drm_core.h new file mode 100644 index 000000000000..320cb67d73ca --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/drm/pvr_drm_core.h @@ -0,0 +1,77 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/* + * @File + * @Title Linux DRM definitions shared between kernel and user space. + * @Codingstyle LinuxKernel + * @Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * @Description This header contains a subset of the Linux kernel DRM uapi + * and is designed to be used in kernel and user mode. When + * included from kernel mode, it pulls in the full version of + * drm.h. Whereas, when included from user mode, it defines a + * minimal version of drm.h (as found in libdrm). As such, the + * structures and ioctl commands must exactly match those found + * in the Linux kernel/libdrm. + * @License MIT + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_DRM_CORE_H__) +#define __PVR_DRM_CORE_H__ + +#if defined(__KERNEL__) +#include +#else +#include +#include + +#define DRM_IOCTL_BASE 'd' +#define DRM_COMMAND_BASE 0x40 + +#define DRM_IOWR(nr, type) _IOWR(DRM_IOCTL_BASE, nr, type) + +struct drm_version { + int version_major; + int version_minor; + int version_patchlevel; + __kernel_size_t name_len; + char *name; + __kernel_size_t date_len; + char *date; + __kernel_size_t desc_len; + char *desc; +}; + +struct drm_set_version { + int drm_di_major; + int drm_di_minor; + int drm_dd_major; + int drm_dd_minor; +}; + +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) +#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) +#endif + +#endif diff --git a/drivers/mcst/gpu-imgtec/include/img_3dtypes.h b/drivers/mcst/gpu-imgtec/include/img_3dtypes.h new file mode 100644 index 000000000000..916e3a1eedc4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/img_3dtypes.h @@ -0,0 +1,248 @@ +/*************************************************************************/ /*! +@File +@Title Global 3D types for use by IMG APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines 3D types for use by IMG APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_3DTYPES_H +#define IMG_3DTYPES_H + +#include +#include "img_types.h" +#include "img_defs.h" + +/** + * Comparison functions + * This comparison function is defined as: + * A {CmpFunc} B + * A is a reference value, e.g., incoming depth etc. + * B is the sample value, e.g., value in depth buffer. + */ +typedef enum _IMG_COMPFUNC_ +{ + IMG_COMPFUNC_NEVER, /**< The comparison never succeeds */ + IMG_COMPFUNC_LESS, /**< The comparison is a less-than operation */ + IMG_COMPFUNC_EQUAL, /**< The comparison is an equal-to operation */ + IMG_COMPFUNC_LESS_EQUAL, /**< The comparison is a less-than or equal-to + operation */ + IMG_COMPFUNC_GREATER, /**< The comparison is a greater-than operation + */ + IMG_COMPFUNC_NOT_EQUAL, /**< The comparison is a no-equal-to operation + */ + IMG_COMPFUNC_GREATER_EQUAL, /**< The comparison is a greater-than or + equal-to operation */ + IMG_COMPFUNC_ALWAYS, /**< The comparison always succeeds */ +} IMG_COMPFUNC; + +/** + * Stencil op functions + */ +typedef enum _IMG_STENCILOP_ +{ + IMG_STENCILOP_KEEP, /**< Keep original value */ + IMG_STENCILOP_ZERO, /**< Set stencil to 0 */ + IMG_STENCILOP_REPLACE, /**< Replace stencil entry */ + IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */ + IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */ + IMG_STENCILOP_INVERT, /**< Invert bits in stencil entry */ + IMG_STENCILOP_INCR, /**< Increment stencil entry, + wrapping if necessary */ + IMG_STENCILOP_DECR, /**< Decrement stencil entry, + wrapping if necessary */ +} IMG_STENCILOP; + +/** + * Alpha blending allows colours and textures on one surface + * to be blended with transparency onto another surface. + * These definitions apply to both source and destination blending + * states + */ +typedef enum _IMG_BLEND_ +{ + IMG_BLEND_ZERO = 0, /**< Blend factor is (0,0,0,0) */ + IMG_BLEND_ONE, /**< Blend factor is (1,1,1,1) */ + IMG_BLEND_SRC_COLOUR, /**< Blend factor is the source colour */ + IMG_BLEND_INV_SRC_COLOUR, /**< Blend factor is the inverted source colour + (i.e. 1-src_col) */ + IMG_BLEND_SRC_ALPHA, /**< Blend factor is the source alpha */ + IMG_BLEND_INV_SRC_ALPHA, /**< Blend factor is the inverted source alpha + (i.e. 1-src_alpha) */ + IMG_BLEND_DEST_ALPHA, /**< Blend factor is the destination alpha */ + IMG_BLEND_INV_DEST_ALPHA, /**< Blend factor is the inverted destination + alpha */ + IMG_BLEND_DEST_COLOUR, /**< Blend factor is the destination colour */ + IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination + colour */ + IMG_BLEND_SRC_ALPHASAT, /**< Blend factor is the alpha saturation (the + minimum of (Src alpha, + 1 - destination alpha)) */ + IMG_BLEND_BLEND_FACTOR, /**< Blend factor is a constant */ + IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/ + IMG_BLEND_SRC1_COLOUR, /**< Blend factor is the colour outputted from + the pixel shader */ + IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour + outputted from the pixel shader */ + IMG_BLEND_SRC1_ALPHA, /**< Blend factor is the alpha outputted from + the pixel shader */ + IMG_BLEND_INV_SRC1_ALPHA /**< Blend factor is the inverted alpha + outputted from the pixel shader */ +} IMG_BLEND; + +/** + * The arithmetic operation to perform when blending + */ +typedef enum _IMG_BLENDOP_ +{ + IMG_BLENDOP_ADD = 0, /**< Result = (Source + Destination) */ + IMG_BLENDOP_SUBTRACT, /**< Result = (Source - Destination) */ + IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */ + IMG_BLENDOP_MIN, /**< Result = min (Source, Destination) */ + IMG_BLENDOP_MAX /**< Result = max (Source, Destination) */ +} IMG_BLENDOP; + +/** + * Logical operation to perform when logic ops are enabled + */ +typedef enum _IMG_LOGICOP_ +{ + IMG_LOGICOP_CLEAR = 0, /**< Result = 0 */ + IMG_LOGICOP_SET, /**< Result = -1 */ + IMG_LOGICOP_COPY, /**< Result = Source */ + IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */ + IMG_LOGICOP_NOOP, /**< Result = Destination */ + IMG_LOGICOP_INVERT, /**< Result = ~Destination */ + IMG_LOGICOP_AND, /**< Result = Source & Destination */ + IMG_LOGICOP_NAND, /**< Result = ~(Source & Destination) */ + IMG_LOGICOP_OR, /**< Result = Source | Destination */ + IMG_LOGICOP_NOR, /**< Result = ~(Source | Destination) */ + IMG_LOGICOP_XOR, /**< Result = Source ^ Destination */ + IMG_LOGICOP_EQUIV, /**< Result = ~(Source ^ Destination) */ + IMG_LOGICOP_AND_REVERSE, /**< Result = Source & ~Destination */ + IMG_LOGICOP_AND_INVERTED, /**< Result = ~Source & Destination */ + IMG_LOGICOP_OR_REVERSE, /**< Result = Source | ~Destination */ + IMG_LOGICOP_OR_INVERTED /**< Result = ~Source | Destination */ +} IMG_LOGICOP; + +/** + * Type of fog blending supported + */ +typedef enum _IMG_FOGMODE_ +{ + IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are + * based on the value output from the vertex phase */ + IMG_FOGMODE_LINEAR, /**< Linear interpolation */ + IMG_FOGMODE_EXP, /**< Exponential */ + IMG_FOGMODE_EXP2, /**< Exponential squaring */ +} IMG_FOGMODE; + +/** + * Types of filtering + */ +typedef enum _IMG_FILTER_ +{ + IMG_FILTER_DONTCARE, /**< Any filtering mode is acceptable */ + IMG_FILTER_POINT, /**< Point filtering */ + IMG_FILTER_LINEAR, /**< Bi-linear filtering */ + IMG_FILTER_BICUBIC, /**< Bi-cubic filtering */ +} IMG_FILTER; + +/** + * Addressing modes for textures + */ +typedef enum _IMG_ADDRESSMODE_ +{ + IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */ + IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */ + IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */ + IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */ + IMG_ADDRESSMODE_CLAMPBORDER, + IMG_ADDRESSMODE_OGL_CLAMP, + IMG_ADDRESSMODE_OVG_TILEFILL, + IMG_ADDRESSMODE_DONTCARE, +} IMG_ADDRESSMODE; + +/** + * Culling based on winding order of triangle. + */ +typedef enum _IMG_CULLMODE_ +{ + IMG_CULLMODE_NONE, /**< Don't cull */ + IMG_CULLMODE_FRONTFACING, /**< Front facing triangles */ + IMG_CULLMODE_BACKFACING, /**< Back facing triangles */ +} IMG_CULLMODE; + +/** + * Colour for clearing surfaces. + * The four elements of the 4 x 32 bit array will map to colour + * R,G,B,A components, in order. + * For YUV colour space the order is Y,U,V. + * For Depth and Stencil formats D maps to R and S maps to G. + */ +typedef union IMG_CLEAR_COLOUR_TAG { + IMG_UINT32 aui32[4]; + IMG_INT32 ai32[4]; + IMG_FLOAT af32[4]; +} IMG_CLEAR_COLOUR; + +static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits."); + +/*! ************************************************************************//** +@brief Specifies the MSAA resolve operation. +*/ /**************************************************************************/ +typedef enum _IMG_RESOLVE_OP_ +{ + IMG_RESOLVE_BLEND = 0, /*!< box filter on the samples */ + IMG_RESOLVE_MIN = 1, /*!< minimum of the samples */ + IMG_RESOLVE_MAX = 2, /*!< maximum of the samples */ + IMG_RESOLVE_SAMPLE0 = 3, /*!< choose sample 0 */ + IMG_RESOLVE_SAMPLE1 = 4, /*!< choose sample 1 */ + IMG_RESOLVE_SAMPLE2 = 5, /*!< choose sample 2 */ + IMG_RESOLVE_SAMPLE3 = 6, /*!< choose sample 3 */ + IMG_RESOLVE_SAMPLE4 = 7, /*!< choose sample 4 */ + IMG_RESOLVE_SAMPLE5 = 8, /*!< choose sample 5 */ + IMG_RESOLVE_SAMPLE6 = 9, /*!< choose sample 6 */ + IMG_RESOLVE_SAMPLE7 = 10, /*!< choose sample 7 */ +} IMG_RESOLVE_OP; + + +#endif /* IMG_3DTYPES_H */ +/****************************************************************************** + End of file (img_3dtypes.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/img_defs.h b/drivers/mcst/gpu-imgtec/include/img_defs.h new file mode 100644 index 000000000000..71e16faac63f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/img_defs.h @@ -0,0 +1,560 @@ +/*************************************************************************/ /*! +@File +@Title Common header containing type definitions for portability +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Contains variable and structure definitions. Any platform + specific types should be defined in this file. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_DEFS_H +#define IMG_DEFS_H + +#if defined(LINUX) && defined(__KERNEL__) +#include +#else +#include +#endif +#if !(defined(LINUX) && defined(__KERNEL__)) +#include +#endif + +#include "img_types.h" + +#if defined(NO_INLINE_FUNCS) + #define INLINE + #define FORCE_INLINE +#else +#if defined(__cplusplus) || defined(INTEGRITY_OS) + #if !defined(INLINE) + #define INLINE inline + #endif + #define FORCE_INLINE static inline +#else +#if !defined(INLINE) + #define INLINE __inline +#endif +#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_) + #define FORCE_INLINE __forceinline +#else + #define FORCE_INLINE static __inline +#endif +#endif +#endif + +/* True if the GCC version is at least the given version. False for older + * versions of GCC, or other compilers. + */ +#define GCC_VERSION_AT_LEAST(major, minor) \ + (__GNUC__ > (major) || \ + (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) + +/* Use Clang's __has_extension and __has_builtin macros if available. */ +#if defined(__has_extension) +#define has_clang_extension(e) __has_extension(e) +#else +#define has_clang_extension(e) 0 +#endif + +#if defined(__has_builtin) +#define has_clang_builtin(e) __has_builtin(e) +#else +#define has_clang_builtin(e) 0 +#endif + +/* Use this in any file, or use attributes under GCC - see below */ +#ifndef PVR_UNREFERENCED_PARAMETER +#define PVR_UNREFERENCED_PARAMETER(param) ((void)(param)) +#endif + +/* static_assert(condition, "message to print if it fails"); + * + * Assert something at compile time. If the assertion fails, try to print + * the message, otherwise do nothing. static_assert is available if: + * + * - It's already defined as a macro (e.g. by in C11) + * - We're using MSVC which exposes static_assert unconditionally + * - We're using a C++ compiler that supports C++11 + * - We're using GCC 4.6 and up in C mode (in which case it's available as + * _Static_assert) + * + * In all other cases, fall back to an equivalent that makes an invalid + * declaration. + */ +#if !defined(static_assert) && !defined(_MSC_VER) && \ + (!defined(__cplusplus) || __cplusplus < 201103L) || defined(__KLOCWORK__) + /* static_assert isn't already available */ + #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \ + (defined(__clang__) && has_clang_extension(c_static_assert))) + #define static_assert _Static_assert + #else + #define static_assert(expr, message) \ + extern int static_assert_failed[(expr) ? 1 : -1] __attribute__((unused)) + #endif +#else +#if defined(CONFIG_L4) + /* Defined but not compatible with DDK usage, so undefine and ignore */ + #undef static_assert + #define static_assert(expr, message) +#endif +#endif + +/* + * unreachable("explanation") can be used to indicate to the compiler that + * some parts of the code can never be reached, like the default branch + * of a switch that covers all real-world possibilities, even though there + * are other ints that exist for instance. + * + * The message will be printed as an assert() when debugging. + * + * Note: there is no need to add a 'return' or any error handling after + * calling unreachable(), as this call will never return. + */ +#if defined(LINUX) && defined(__KERNEL__) +/* Kernel has its own unreachable(), which is a simple infinite loop */ +#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) + #define unreachable(msg) \ + do { \ + assert(!(msg)); \ + __builtin_unreachable(); \ + } while (0) +#elif defined(_MSC_VER) + #define unreachable(msg) \ + do { \ + assert(!(msg)); \ + __assume(0); \ + } while (0) +#else + #define unreachable(msg) \ + do { \ + assert(!(msg)); \ + while (1); \ + } while (0) +#endif + +/* + * assume(x > 2 && x <= 7) works like an assert(), except it hints to the + * compiler what it can assume to optimise the code, like a limited range + * of parameter values. + */ +#if has_clang_builtin(__builtin_assume) + #define assume(expr) \ + do { \ + assert(expr); \ + __builtin_assume(expr); \ + } while (0) +#elif defined(_MSC_VER) + #define assume(expr) \ + do { \ + assert(expr); \ + __assume(expr); \ + } while (0) +#elif defined(LINUX) && defined(__KERNEL__) + #define assume(expr) ((void)(expr)) +#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) + #define assume(expr) \ + do { \ + if (unlikely(!(expr))) \ + unreachable("Assumption isn't true: " # expr); \ + } while (0) +#else + #define assume(expr) assert(expr) +#endif + +/*! Macro to calculate the n-byte aligned value from that supplied rounding up. + * n must be a power of two. + * + * Both arguments should be of a type with the same size otherwise the macro may + * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n. + */ +#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1U)) & ~((_n)-1U)) + +#if defined(_WIN32) + +#if defined(WINDOWS_WDF) + + /* + * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system. + * This 'empty' choice helps to resolve all the calling conv issues. + * + */ + #define IMG_CALLCONV + #define C_CALLCONV + + #define IMG_INTERNAL + #define IMG_RESTRICT __restrict + + /* + * The proper way of dll linking under MS compilers is made of two things: + * - decorate implementation with __declspec(dllexport) + * this decoration helps compiler with making the so called + * 'export library' + * - decorate forward-declaration (in a source dependent on a dll) with + * __declspec(dllimport), this decoration helps the compiler to make + * faster and smaller code in terms of calling dll-imported functions + * + * Usually these decorations are performed by having a single macro define + * making that expands to a proper __declspec() depending on the + * translation unit, dllexport inside the dll source and dllimport outside + * the dll source. Having IMG_EXPORT and IMG_IMPORT resolving to the same + * __declspec() makes no sense, but at least works. + */ + #define IMG_IMPORT __declspec(dllexport) + #define IMG_EXPORT __declspec(dllexport) + +#else + + #define IMG_CALLCONV __stdcall + #define IMG_INTERNAL + #define IMG_EXPORT __declspec(dllexport) + #define IMG_RESTRICT __restrict + #define C_CALLCONV __cdecl + + /* + * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations + * match. Some compilers require the header to be declared IMPORT, while + * the implementation is declared EXPORT. + */ + #define IMG_IMPORT IMG_EXPORT + +#endif + +#if defined(UNDER_WDDM) + #ifndef _INC_STDLIB + #if defined(__mips) + /* do nothing */ + #elif defined(UNDER_MSBUILD) + /* do nothing */ + #else + _CRTIMP void __cdecl abort(void); + #endif + #endif +#endif /* UNDER_WDDM */ +#else + #if (defined(LINUX) || defined(__QNXNTO__)) && defined(__KERNEL__) + #define IMG_INTERNAL + #define IMG_EXPORT + #define IMG_CALLCONV + #elif defined(LINUX) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) + #define IMG_CALLCONV + #define C_CALLCONV + + #if defined(__METAG) + #define IMG_INTERNAL + #else + #define IMG_INTERNAL __attribute__((visibility("hidden"))) + #endif + + #define IMG_EXPORT __attribute__((visibility("default"))) + #define IMG_RESTRICT __restrict__ + #elif defined(INTEGRITY_OS) + #define IMG_CALLCONV + #define IMG_INTERNAL + #define IMG_EXPORT + #define IMG_RESTRICT + #define C_CALLCONV + #define __cdecl + + #ifndef USE_CODE + #define IMG_ABORT() printf("IMG_ABORT was called.\n") + #endif + #else + #error("define an OS") + #endif + +#endif + +/* Use default definition if not overridden */ +#ifndef IMG_ABORT + #if defined(EXIT_ON_ABORT) + #define IMG_ABORT() exit(1) + #else + #define IMG_ABORT() abort() + #endif +#endif + +/* The best way to suppress unused parameter warnings using GCC is to use a + * variable attribute. Place the __maybe_unused between the type and name of an + * unused parameter in a function parameter list e.g. 'int __maybe_unused var'. + * This should only be used in GCC build environments, for example, in files + * that compile only on Linux. + * Other files should use PVR_UNREFERENCED_PARAMETER + */ + +/* Kernel macros for compiler attributes */ +/* Note: param positions start at 1 */ +#if defined(LINUX) && defined(__KERNEL__) + #include + + #if !defined(__fallthrough) + #if defined(__GNUC__) && GCC_VERSION_AT_LEAST(7, 0) + #define __fallthrough __attribute__((__fallthrough__)) + #else + #define __fallthrough + #endif + #endif +#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) + #define __must_check __attribute__((warn_unused_result)) + #define __maybe_unused __attribute__((unused)) + #define __malloc __attribute__((malloc)) + + /* Bionic's might have defined these already */ + /* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */ + #if !defined(__packed) + #define __packed __attribute__((packed)) + #endif + #if !defined(__aligned) + #define __aligned(n) __attribute__((aligned(n))) + #endif + #if !defined(__noreturn) + #define __noreturn __attribute__((noreturn)) + #endif + + /* That one compiler that supports attributes but doesn't support + * the printf attribute... */ + #if defined(__GNUC__) + #define __printf(fmt, va) __attribute__((format(printf, (fmt), (va)))) + #else + #define __printf(fmt, va) + #endif /* defined(__GNUC__) */ + + #if defined(__cplusplus) && (__cplusplus >= 201703L) + #define __fallthrough [[fallthrough]] + #elif defined(__GNUC__) && GCC_VERSION_AT_LEAST(7, 0) + #define __fallthrough __attribute__((__fallthrough__)) + #else + #define __fallthrough + #endif + + #define __user + #define __force + #define __iomem +#else + /* Silently ignore those attributes */ + #define __printf(fmt, va) + #define __packed + #define __aligned(n) + #define __must_check + #define __maybe_unused + #define __malloc + + #if defined(_MSC_VER) || defined(CC_ARM) + #define __noreturn __declspec(noreturn) + #else + #define __noreturn + #endif + + /* This may already been defined, e.g. by SAL (Source Annotation Language) */ + #if !defined(__fallthrough) + #define __fallthrough + #endif + + #define __user + #define __force + #define __iomem +#endif + + +/* Other attributes, following the same style */ +#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) + #define __const_function __attribute__((const)) +#else + #define __const_function +#endif + + +/* GCC builtins */ +#if defined(LINUX) && defined(__KERNEL__) + #include +#elif defined(__GNUC__) || defined(INTEGRITY_OS) + +/* Klocwork does not support __builtin_expect, which makes the actual condition + * expressions hidden during analysis, affecting it negatively. */ +#if !defined(__KLOCWORK__) && !defined(INTEGRITY_OS) && !defined(DEBUG) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#endif + + /* Compiler memory barrier to prevent reordering */ + #define barrier() __asm__ __volatile__("": : :"memory") +#else + #define barrier() do { static_assert(0, "barrier() isn't supported by your compiler"); } while (0) +#endif + +/* That one OS that defines one but not the other... */ +#ifndef likely + #define likely(x) (x) +#endif +#ifndef unlikely + #define unlikely(x) (x) +#endif + +/* These two macros are also provided by the kernel */ +#ifndef BIT +#define BIT(b) (1UL << (b)) +#endif + +#ifndef BIT_ULL +#define BIT_ULL(b) (1ULL << (b)) +#endif + +#define BIT_SET(f, b) BITMASK_SET((f), BIT(b)) +#define BIT_UNSET(f, b) BITMASK_UNSET((f), BIT(b)) +#define BIT_TOGGLE(f, b) BITMASK_TOGGLE((f), BIT(b)) +#define BIT_ISSET(f, b) BITMASK_HAS((f), BIT(b)) + +#define BITMASK_SET(f, m) (void) ((f) |= (m)) +#define BITMASK_UNSET(f, m) (void) ((f) &= ~(m)) +#define BITMASK_TOGGLE(f, m) (void) ((f) ^= (m)) +#define BITMASK_HAS(f, m) (((f) & (m)) == (m)) /* the bits from the mask are all set */ +#define BITMASK_ANY(f, m) (((f) & (m)) != 0U) /* any bit from the mask is set */ + +#ifndef MAX +#define MAX(a ,b) (((a) > (b)) ? (a) : (b)) +#endif + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef CLAMP +#define CLAMP(min, max, n) ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n))) +#endif + +#define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y); + + +#if defined(LINUX) && defined(__KERNEL__) + #include + #include +#endif + +/* Get a structure's address from the address of a member */ +#define IMG_CONTAINER_OF(ptr, type, member) \ + (type *) ((uintptr_t) (ptr) - offsetof(type, member)) + +/* Get a new pointer with an offset (in bytes) from a base address, useful + * when traversing byte buffers and accessing data in buffers through struct + * pointers. + * Note, this macro is not equivalent to or replacing offsetof() */ +#define IMG_OFFSET_ADDR(addr, offset_in_bytes) \ + (void*)(((IMG_UINT8*)(void*)(addr)) + (offset_in_bytes)) + +/* The number of elements in a fixed-sized array */ +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0])) +#endif + +/* To guarantee that __func__ can be used, define it as a macro here if it + isn't already provided by the compiler. */ +#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L) +#define __func__ __FUNCTION__ +#endif + +#if defined(__cplusplus) +/* C++ Specific: + * Disallow use of copy and assignment operator within a class. + * Should be placed under private. */ +#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \ + C(const C&); \ + void operator=(const C&) +#endif + +#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips) && !defined(__riscv) + #include "/usr/include/valgrind/memcheck.h" + + #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size) + #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size) + #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size) +#else + #if defined(_MSC_VER) + # define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) + #else + # define PVR_MSC_SUPPRESS_4127 + #endif + + #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (0) + #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (0) + #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (0) +#endif + +#define IMG_STRINGIFY_IMPL(x) # x +#define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x) + +#if defined(INTEGRITY_OS) + /* Definitions not present in INTEGRITY. */ + #define PATH_MAX 200 +#endif + +#if defined(__clang__) || defined(__GNUC__) + /* __SIZEOF_POINTER__ is defined already by these compilers */ +#elif defined(INTEGRITY_OS) + #if defined(__Ptr_Is_64) + #define __SIZEOF_POINTER__ 8 + #else + #define __SIZEOF_POINTER__ 4 + #endif +#elif defined(_WIN32) + #define __SIZEOF_POINTER__ sizeof(char *) +#else + #warning Unknown OS - using default method to determine whether CPU arch is 64-bit. + #define __SIZEOF_POINTER__ sizeof(char *) +#endif + +/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with + * uncached device memory allocations. Some pointers are made 'volatile' + * to prevent those optimisations being applied to writes through those + * pointers. + */ +#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__)) +#define NOLDSTOPT volatile +/* after applying 'volatile' to a pointer, we may need to cast it to 'void *' + * to keep it compatible with its existing uses. + */ +#define NOLDSTOPT_VOID (void *) + +#define NOLDSTOPT_REQUIRED 1 +#else +#define NOLDSTOPT +#define NOLDSTOPT_VOID +#endif + +#endif /* IMG_DEFS_H */ +/***************************************************************************** + End of file (img_defs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/img_drm_fourcc_internal.h b/drivers/mcst/gpu-imgtec/include/img_drm_fourcc_internal.h new file mode 100644 index 000000000000..6b43617cd4dc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/img_drm_fourcc_internal.h @@ -0,0 +1,86 @@ +/*************************************************************************/ /*! +@File +@Title Wrapper around drm_fourcc.h +@Description FourCCs and the DRM framebuffer modifiers should be added here + unless they are used by kernel code or a known user outside of + the DDK. If FourCCs or DRM framebuffer modifiers are required + outside of the DDK, they shall be moved to the corresponding + public header. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_DRM_FOURCC_INTERNAL_H +#define IMG_DRM_FOURCC_INTERNAL_H + +#include + +/* + * Modifier names are structured using the following convention, + * with underscores (_) between items: + * - prefix: DRM_FORMAT_MOD + * - identifier for our driver: PVR + * - category: FBCDC + * - compression tile dimension: 8x8, 16x4, 32x2 + * - FBDC version: V0, V1, V2, V3, V7, V8, V10, V12 + */ +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0 fourcc_mod_code(PVR, 1) +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0_FIX fourcc_mod_code(PVR, 2) /* Fix for HW_BRN_37464 */ +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1 fourcc_mod_code(PVR, 3) +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V2 fourcc_mod_code(PVR, 4) +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V3 fourcc_mod_code(PVR, 5) +/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 - moved to the public header */ +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V8 fourcc_mod_code(PVR, 18) +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 fourcc_mod_code(PVR, 21) +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 fourcc_mod_code(PVR, 15) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0 fourcc_mod_code(PVR, 7) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0_FIX fourcc_mod_code(PVR, 8) /* Fix for HW_BRN_37464 */ +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1 fourcc_mod_code(PVR, 9) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V2 fourcc_mod_code(PVR, 10) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V3 fourcc_mod_code(PVR, 11) +/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 - moved to the public header */ +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V8 fourcc_mod_code(PVR, 19) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 fourcc_mod_code(PVR, 22) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 fourcc_mod_code(PVR, 16) +#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V1 fourcc_mod_code(PVR, 13) +#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V3 fourcc_mod_code(PVR, 14) +#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V8 fourcc_mod_code(PVR, 20) +#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10 fourcc_mod_code(PVR, 23) +#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V12 fourcc_mod_code(PVR, 17) + +#endif /* IMG_DRM_FOURCC_INTERNAL_H */ diff --git a/drivers/mcst/gpu-imgtec/include/img_elf.h b/drivers/mcst/gpu-imgtec/include/img_elf.h new file mode 100644 index 000000000000..8837d9592599 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/img_elf.h @@ -0,0 +1,111 @@ +/*************************************************************************/ /*! +@File img_elf.h +@Title IMG ELF file definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Platform RGX +@Description Definitions for ELF file structures used in the DDK. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(IMG_ELF_H) +#define IMG_ELF_H + +#include "img_types.h" + +/* ELF format defines */ +#define ELF_PT_LOAD (0x1U) /* Program header identifier as Load */ +#define ELF_SHT_SYMTAB (0x2U) /* Section identifier as Symbol Table */ +#define ELF_SHT_STRTAB (0x3U) /* Section identifier as String Table */ +#define MAX_STRTAB_NUM (0x8U) /* Maximum number of string table in the ELF file */ + +/* Redefined structs of ELF format */ +typedef struct +{ + IMG_UINT8 ui32Eident[16]; + IMG_UINT16 ui32Etype; + IMG_UINT16 ui32Emachine; + IMG_UINT32 ui32Eversion; + IMG_UINT32 ui32Eentry; + IMG_UINT32 ui32Ephoff; + IMG_UINT32 ui32Eshoff; + IMG_UINT32 ui32Eflags; + IMG_UINT16 ui32Eehsize; + IMG_UINT16 ui32Ephentsize; + IMG_UINT16 ui32Ephnum; + IMG_UINT16 ui32Eshentsize; + IMG_UINT16 ui32Eshnum; + IMG_UINT16 ui32Eshtrndx; +} IMG_ELF_HDR; + +typedef struct +{ + IMG_UINT32 ui32Stname; + IMG_UINT32 ui32Stvalue; + IMG_UINT32 ui32Stsize; + IMG_UINT8 ui32Stinfo; + IMG_UINT8 ui32Stother; + IMG_UINT16 ui32Stshndx; +} IMG_ELF_SYM; + +typedef struct +{ + IMG_UINT32 ui32Shname; + IMG_UINT32 ui32Shtype; + IMG_UINT32 ui32Shflags; + IMG_UINT32 ui32Shaddr; + IMG_UINT32 ui32Shoffset; + IMG_UINT32 ui32Shsize; + IMG_UINT32 ui32Shlink; + IMG_UINT32 ui32Shinfo; + IMG_UINT32 ui32Shaddralign; + IMG_UINT32 ui32Shentsize; +} IMG_ELF_SHDR; + +typedef struct +{ + IMG_UINT32 ui32Ptype; + IMG_UINT32 ui32Poffset; + IMG_UINT32 ui32Pvaddr; + IMG_UINT32 ui32Ppaddr; + IMG_UINT32 ui32Pfilesz; + IMG_UINT32 ui32Pmemsz; + IMG_UINT32 ui32Pflags; + IMG_UINT32 ui32Palign; +} IMG_ELF_PROGRAM_HDR; + +#endif /* IMG_ELF_H */ diff --git a/drivers/mcst/gpu-imgtec/include/img_types.h b/drivers/mcst/gpu-imgtec/include/img_types.h new file mode 100644 index 000000000000..9f8dc055a6e3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/img_types.h @@ -0,0 +1,291 @@ +/*************************************************************************/ /*! +@File +@Title Global types for use by IMG APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines type aliases for use by IMG APIs. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_TYPES_H +#define IMG_TYPES_H +#if defined(__cplusplus) +extern "C" { +#endif + +/* To use C99 types and definitions, there are two special cases we need to + * cater for: + * + * - Visual Studio: in VS2010 or later, some standard headers are available, + * and MSVC has its own built-in sized types. We can define the C99 types + * in terms of these. + * + * - Linux kernel code: C99 sized types are defined in , but + * some other features (like macros for constants or printf format + * strings) are missing, so we need to fill in the gaps ourselves. + * + * For other cases (userspace code under Linux, Android or Neutrino, or + * firmware code), we can include the standard headers. + */ +#if defined(_MSC_VER) + #include /* bool */ + #include "msvc_types.h" +#elif defined(LINUX) && defined(__KERNEL__) + #include + #include + #include "kernel_types.h" +#elif defined(LINUX) || defined(__METAG) || defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) + #include /* NULL */ + #include + #include /* intX_t/uintX_t, format specifiers */ + #include /* INT_MIN, etc */ + #include /* bool */ +#elif defined(__mips) + #include /* NULL */ + #include /* intX_t/uintX_t, format specifiers */ + #include /* bool */ +#else + #error C99 support not set up for this build +#endif + +/* + * Due to a Klocwork bug, 'true'/'false' constants are not recognized to be of + * boolean type. This results in large number of false-positives being reported + * (MISRA.ETYPE.ASSIGN.2012: "An expression value of essential type 'signed char' + * is assigned to an object of essential type 'bool'"). Work around this by + * redefining those constants with cast to bool added. + */ +#if defined(__KLOCWORK__) && !defined(__cplusplus) +#undef true +#undef false +#define true ((bool) 1) +#define false ((bool) 0) +#endif + +typedef unsigned int IMG_UINT; +typedef int IMG_INT; + +typedef uint8_t IMG_UINT8, *IMG_PUINT8; +typedef uint8_t IMG_BYTE, *IMG_PBYTE; +typedef int8_t IMG_INT8; +typedef char IMG_CHAR, *IMG_PCHAR; + +typedef uint16_t IMG_UINT16, *IMG_PUINT16; +typedef int16_t IMG_INT16; +typedef uint32_t IMG_UINT32, *IMG_PUINT32; +typedef int32_t IMG_INT32, *IMG_PINT32; +#define IMG_UINT32_C(c) ((IMG_UINT32)UINT32_C(c)) + +typedef uint64_t IMG_UINT64, *IMG_PUINT64; +typedef int64_t IMG_INT64; +#define IMG_INT64_C(c) INT64_C(c) +#define IMG_UINT64_C(c) UINT64_C(c) +#define IMG_UINT16_C(c) UINT16_C(c) +#define IMG_UINT64_FMTSPEC PRIu64 +#define IMG_UINT64_FMTSPECX PRIX64 +#define IMG_UINT64_FMTSPECx PRIx64 +#define IMG_UINT64_FMTSPECo PRIo64 +#define IMG_INT64_FMTSPECd PRId64 + +#define IMG_UINT16_MAX UINT16_MAX +#define IMG_UINT32_MAX UINT32_MAX +#define IMG_UINT64_MAX UINT64_MAX + +#define IMG_INT16_MAX INT16_MAX +#define IMG_INT32_MAX INT32_MAX +#define IMG_INT64_MAX INT64_MAX + +/* Linux kernel mode does not use floating point */ +typedef float IMG_FLOAT, *IMG_PFLOAT; +typedef double IMG_DOUBLE; + +typedef union +{ + IMG_UINT32 ui32; + IMG_FLOAT f; +} IMG_UINT32_FLOAT; + +typedef int IMG_SECURE_TYPE; + +typedef enum tag_img_bool +{ + IMG_FALSE = 0, + IMG_TRUE = 1, + IMG_FORCE_ALIGN = 0x7FFFFFFF +} IMG_BOOL, *IMG_PBOOL; + +#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) +typedef IMG_CHAR const* IMG_PCCHAR; +#endif + +/* Format specifiers for 'size_t' type */ +#if defined(_MSC_VER) || defined(__MINGW32__) +#define IMG_SIZE_FMTSPEC "%Iu" +#define IMG_SIZE_FMTSPECX "%Ix" +#else +#define IMG_SIZE_FMTSPEC "%zu" +#define IMG_SIZE_FMTSPECX "%zx" +#endif + +#if defined(LINUX) && defined(__KERNEL__) +/* prints the function name when used with printk */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#define IMG_PFN_FMTSPEC "%ps" +#else +#define IMG_PFN_FMTSPEC "%pf" +#endif +#else +#define IMG_PFN_FMTSPEC "%p" +#endif + +typedef void *IMG_HANDLE; + +/* Process IDs */ +typedef IMG_UINT32 IMG_PID; + +/* OS connection type */ +typedef int IMG_OS_CONNECTION; + + +/* + * Address types. + * All types used to refer to a block of memory are wrapped in structures + * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot + * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the + * same thing. + * + * There is an assumption that the system contains at most one non-cpu mmu, + * and a memory block is only mapped by the MMU once. + * + * Different devices could have offset views of the physical address space. + * + */ + + +/* + * + * +------------+ +------------+ +------------+ +------------+ + * | CPU | | DEV | | DEV | | DEV | + * +------------+ +------------+ +------------+ +------------+ + * | | | | + * | void * |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR | + * | \-------------------/ | + * | | | + * +------------+ +------------+ | + * | MMU | | MMU | | + * +------------+ +------------+ | + * | | | + * | | | + * | | | + * +--------+ +---------+ +--------+ + * | Offset | | (Offset)| | Offset | + * +--------+ +---------+ +--------+ + * | | IMG_DEV_PHYADDR | + * | | | + * | | IMG_DEV_PHYADDR | + * +---------------------------------------------------------------------+ + * | System Address bus | + * +---------------------------------------------------------------------+ + * + */ + +#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX +#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX +#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX +#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX + +/* cpu physical address */ +typedef struct +{ +#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) + uintptr_t uiAddr; +#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (uintptr_t)(var) +#elif defined(LINUX) && defined(__KERNEL__) + phys_addr_t uiAddr; +#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (phys_addr_t)(var) +#else + IMG_UINT64 uiAddr; +#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (IMG_UINT64)(var) +#endif +} IMG_CPU_PHYADDR; + +/* device physical address */ +typedef struct +{ + IMG_UINT64 uiAddr; +} IMG_DEV_PHYADDR; + +/* + rectangle structure +*/ +typedef struct +{ + IMG_INT32 x0; + IMG_INT32 y0; + IMG_INT32 x1; + IMG_INT32 y1; +} IMG_RECT; + +typedef struct +{ + IMG_INT16 x0; + IMG_INT16 y0; + IMG_INT16 x1; + IMG_INT16 y1; +} IMG_RECT_16; + +/* + * box structure + */ +typedef struct +{ + IMG_INT32 x0; + IMG_INT32 y0; + IMG_INT32 z0; + IMG_INT32 x1; + IMG_INT32 y1; + IMG_INT32 z1; +} IMG_BOX; + +#if defined(__cplusplus) +} +#endif + +#endif /* IMG_TYPES_H */ +/****************************************************************************** + End of file (img_types.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/kernel_types.h b/drivers/mcst/gpu-imgtec/include/kernel_types.h new file mode 100644 index 000000000000..c3305102fc20 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/kernel_types.h @@ -0,0 +1,137 @@ +/*************************************************************************/ /*! +@Title C99-compatible types and definitions for Linux kernel code +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +/* Limits of specified-width integer types */ + +/* S8_MIN, etc were added in kernel version 3.14. The other versions are for + * earlier kernels. They can be removed once older kernels don't need to be + * supported. + */ +#ifdef S8_MIN + #define INT8_MIN S8_MIN +#else + #define INT8_MIN (-128) +#endif + +#ifdef S8_MAX + #define INT8_MAX S8_MAX +#else + #define INT8_MAX 127 +#endif + +#ifdef U8_MAX + #define UINT8_MAX U8_MAX +#else + #define UINT8_MAX 0xFF +#endif + +#ifdef S16_MIN + #define INT16_MIN S16_MIN +#else + #define INT16_MIN (-32768) +#endif + +#ifdef S16_MAX + #define INT16_MAX S16_MAX +#else + #define INT16_MAX 32767 +#endif + +#ifdef U16_MAX + #define UINT16_MAX U16_MAX +#else + #define UINT16_MAX 0xFFFF +#endif + +#ifdef S32_MIN + #define INT32_MIN S32_MIN +#else + #define INT32_MIN (-2147483647 - 1) +#endif + +#ifdef S32_MAX + #define INT32_MAX S32_MAX +#else + #define INT32_MAX 2147483647 +#endif + +#ifdef U32_MAX + #define UINT32_MAX U32_MAX +#else + #define UINT32_MAX 0xFFFFFFFF +#endif + +#ifdef S64_MIN + #define INT64_MIN S64_MIN +#else + #define INT64_MIN (-9223372036854775807LL) +#endif + +#ifdef S64_MAX + #define INT64_MAX S64_MAX +#else + #define INT64_MAX 9223372036854775807LL +#endif + +#ifdef U64_MAX + #define UINT64_MAX U64_MAX +#else + #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL +#endif + +/* Macros for integer constants */ +#define INT8_C S8_C +#define UINT8_C U8_C +#define INT16_C S16_C +#define UINT16_C U16_C +#define INT32_C S32_C +#define UINT32_C U32_C +#define INT64_C S64_C +#define UINT64_C U64_C + +/* Format conversion of integer types */ + +#define PRIX64 "llX" +#define PRIx64 "llx" +#define PRIu64 "llu" +#define PRId64 "lld" diff --git a/drivers/mcst/gpu-imgtec/include/linux_sw_sync.h b/drivers/mcst/gpu-imgtec/include/linux_sw_sync.h new file mode 100644 index 000000000000..fc66e98ceed4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/linux_sw_sync.h @@ -0,0 +1,66 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _UAPI_LINUX_PVR_SW_SYNC_H +#define _UAPI_LINUX_PVR_SW_SYNC_H + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + +#include + +#include "pvrsrv_sync_km.h" + +struct pvr_sw_sync_create_fence_data { + char name[PVRSRV_SYNC_NAME_LENGTH]; + __s32 fence; + __u32 pad; + __u64 sync_pt_idx; +}; + +struct pvr_sw_timeline_advance_data { + __u64 sync_pt_idx; +}; + +#define PVR_SW_SYNC_IOC_MAGIC 'W' +#define PVR_SW_SYNC_IOC_CREATE_FENCE _IOWR(PVR_SW_SYNC_IOC_MAGIC, 0, struct pvr_sw_sync_create_fence_data) +#define PVR_SW_SYNC_IOC_INC _IOR(PVR_SW_SYNC_IOC_MAGIC, 1, struct pvr_sw_timeline_advance_data) + +#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */ +#endif diff --git a/drivers/mcst/gpu-imgtec/include/lock_types.h b/drivers/mcst/gpu-imgtec/include/lock_types.h new file mode 100644 index 000000000000..f25e3e95b51d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/lock_types.h @@ -0,0 +1,92 @@ +/*************************************************************************/ /*! +@File lock_types.h +@Title Locking types +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Locking specific enums, defines and structures +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef LOCK_TYPES_H +#define LOCK_TYPES_H + +/* In Linux kernel mode we are using the kernel mutex implementation directly + * with macros. This allows us to use the kernel lockdep feature for lock + * debugging. */ +#if defined(LINUX) && defined(__KERNEL__) + +#include +#include +/* The mutex is defined as a pointer to be compatible with the other code. This + * isn't ideal and usually you wouldn't do that in kernel code. */ +typedef struct mutex *POS_LOCK; +typedef struct rw_semaphore *POSWR_LOCK; +typedef spinlock_t *POS_SPINLOCK; +typedef atomic_t ATOMIC_T; + +#else /* defined(LINUX) && defined(__KERNEL__) */ +#include "img_types.h" /* needed for IMG_INT */ +typedef struct _OS_LOCK_ *POS_LOCK; + +#if defined(LINUX) || defined(__QNXNTO__) || defined(INTEGRITY_OS) +typedef struct _OSWR_LOCK_ *POSWR_LOCK; +#else /* defined(LINUX) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ +typedef struct _OSWR_LOCK_ { + IMG_UINT32 ui32Dummy; +} *POSWR_LOCK; +#endif /* defined(LINUX) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ + +#if defined(LINUX) + typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T; +#elif defined(__QNXNTO__) + typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T; +#elif defined(_WIN32) + /* + * Dummy definition. WDDM doesn't use Services, but some headers + * still have to be shared. This is one such case. + */ + typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T; +#elif defined(INTEGRITY_OS) + /* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */ + typedef struct _OS_ATOMIC {IMG_INT64 counter;} ATOMIC_T; +#else + #error "Please type-define an atomic lock for this environment" +#endif + +#endif /* defined(LINUX) && defined(__KERNEL__) */ + +#endif /* LOCK_TYPES_H */ diff --git a/drivers/mcst/gpu-imgtec/include/log2.h b/drivers/mcst/gpu-imgtec/include/log2.h new file mode 100644 index 000000000000..57af4950ad9c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/log2.h @@ -0,0 +1,409 @@ +/*************************************************************************/ /*! +@Title Integer log2 and related functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef LOG2_H +#define LOG2_H + +#include "img_defs.h" + +/*************************************************************************/ /*! +@Description Determine if a number is a power of two. +@Input n +@Return True if n is a power of 2, false otherwise. True if n == 0. +*/ /**************************************************************************/ +static INLINE IMG_BOOL __const_function IsPower2(uint32_t n) +{ + /* C++ needs this cast. */ + return (IMG_BOOL)((n & (n - 1U)) == 0U); +} + +/*************************************************************************/ /*! +@Description Determine if a number is a power of two. +@Input n +@Return True if n is a power of 2, false otherwise. True if n == 0. +*/ /**************************************************************************/ +static INLINE IMG_BOOL __const_function IsPower2_64(uint64_t n) +{ + /* C++ needs this cast. */ + return (IMG_BOOL)((n & (n - 1U)) == 0U); +} + +/* Code using GNU GCC intrinsics */ +#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) + +/* CHAR_BIT is typically found in . For all the platforms where + * CHAR_BIT is not available, defined it here with the assumption that there + * are 8 bits in a byte */ +#ifndef CHAR_BIT +#define CHAR_BIT 8U +#endif + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2(uint32_t n) +{ + if (unlikely(n == 0U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + return uNumBits - (uint32_t)__builtin_clz(n) - 1U; + } +} + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) +{ + if (unlikely(n == 0U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + return uNumBits - (uint32_t)__builtin_clzll(n) - 1U; + } +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2(uint32_t n) +{ + if (unlikely(n == 0U || n == 1U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + + n--; /* Handle powers of 2 */ + return uNumBits - (uint32_t)__builtin_clz(n); + } +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) +{ + if (unlikely(n == 0U || n == 1U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + + n--; /* Handle powers of 2 */ + return uNumBits - (uint32_t)__builtin_clzll(n); + } +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2(uint32_t n) +{ + return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clz(n) - 1U; +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) +{ + return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clzll(n) - 1U; +} + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) +{ + /* Cases with n greater than 2^31 needs separate handling + * as result of (1<<32) is undefined. */ + if (unlikely( n == 0U || n > (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) + { + return 0; + } + + /* Return n if it is already a power of 2 */ + if ((IMG_BOOL)((n & (n - 1U)) == 0U)) + { + return n; + } + + return (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - (uint32_t)__builtin_clz(n)); +} + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) +{ + /* Cases with n greater than 2^63 needs separate handling + * as result of (1<<64) is undefined. */ + if (unlikely( n == 0U || n > (uint64_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) + { + return 0; + } + + /* Return n if it is already a power of 2 */ + if ((IMG_BOOL)((n & (n - 1U)) == 0U)) + { + return n; + } + + return (uint64_t)1 << ((uint64_t)CHAR_BIT * sizeof(n) - (uint64_t)__builtin_clzll(n)); +} + +#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) +{ + n--; + n |= n >> 1; /* handle 2 bit numbers */ + n |= n >> 2; /* handle 4 bit numbers */ + n |= n >> 4; /* handle 8 bit numbers */ + n |= n >> 8; /* handle 16 bit numbers */ + n |= n >> 16; /* handle 32 bit numbers */ + n++; + + return n; +} + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) +{ + n--; + n |= n >> 1; /* handle 2 bit numbers */ + n |= n >> 2; /* handle 4 bit numbers */ + n |= n >> 4; /* handle 8 bit numbers */ + n |= n >> 8; /* handle 16 bit numbers */ + n |= n >> 16; /* handle 32 bit numbers */ + n |= n >> 32; /* handle 64 bit numbers */ + n++; + + return n; +} + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2(uint32_t n) +{ + uint32_t log2 = 0; + + while (n >>= 1) + log2++; + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) +{ + uint32_t log2 = 0; + + while (n >>= 1) + log2++; + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2(uint32_t n) +{ + uint32_t log2 = 0; + + if (n == 0) + return 0; + + n--; /* Handle powers of 2 */ + + while (n) + { + log2++; + n >>= 1; + } + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) +{ + uint32_t log2 = 0; + + if (n == 0) + return 0; + + n--; /* Handle powers of 2 */ + + while (n) + { + log2++; + n >>= 1; + } + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2(uint32_t n) +{ + static const uint32_t b[] = + {0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF00, 0xFFFF0000}; + uint32_t r = (n & b[0]) != 0; + + r |= (uint32_t) ((n & b[4]) != 0) << 4; + r |= (uint32_t) ((n & b[3]) != 0) << 3; + r |= (uint32_t) ((n & b[2]) != 0) << 2; + r |= (uint32_t) ((n & b[1]) != 0) << 1; + + return r; +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) +{ + static const uint64_t b[] = + {0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL, + 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL, + 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL}; + uint32_t r = (n & b[0]) != 0; + + r |= (uint32_t) ((n & b[5]) != 0) << 5; + r |= (uint32_t) ((n & b[4]) != 0) << 4; + r |= (uint32_t) ((n & b[3]) != 0) << 3; + r |= (uint32_t) ((n & b[2]) != 0) << 2; + r |= (uint32_t) ((n & b[1]) != 0) << 1; + + return r; +} + +#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ + +/*************************************************************************/ /*! +@Description Compute floor(log2(size)) , where size is the max of 3 sizes + This is almost always the ONLY EVER valid use of FloorLog2. + Usually CeilLog2() should be used instead. + For a 5x5x1 texture, the 3 miplevels are: + 0: 5x5x1 + 1: 2x2x1 + 2: 1x1x1 + + For an 8x8x1 texture, the 4 miplevels are: + 0: 8x8x1 + 1: 4x4x1 + 2: 2x2x1 + 3: 1x1x1 + + +@Input sizeX, sizeY, sizeZ +@Return Count of mipmap levels for given dimensions +*/ /**************************************************************************/ +static INLINE uint32_t __const_function NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ) +{ + + uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ); + return FloorLog2(maxSize) + 1U; +} + +#endif /* LOG2_H */ diff --git a/drivers/mcst/gpu-imgtec/include/osfunc_common.h b/drivers/mcst/gpu-imgtec/include/osfunc_common.h new file mode 100644 index 000000000000..aa4e54f08d4a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/osfunc_common.h @@ -0,0 +1,229 @@ +/*************************************************************************/ /*! +@File +@Title OS functions header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS specific API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __OSFUNC_COMMON_H__ +/*! @cond Doxygen_Suppress */ +#define __OSFUNC_COMMON_H__ +/*! @endcond */ + +#if defined(__KERNEL__) && defined(LINUX) +#include +#else +#include +#endif + +#include "img_types.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +/**************************************************************************/ /*! +@Function DeviceMemSet +@Description Set memory, whose mapping may be uncached, to a given value. + Safe implementation for all architectures for uncached mapping, + optimised for speed where supported by tool chains. + In such cases, OSDeviceMemSet() is defined as a call to this + function. +@Input pvDest void pointer to the memory to be set +@Input ui8Value byte containing the value to be set +@Input ui32Size the number of bytes to be set to the given value +@Return None + */ /**************************************************************************/ +void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); + +/**************************************************************************/ /*! +@Function DeviceMemCopy +@Description Copy values from one area of memory. Safe implementation for + all architectures for uncached mapping, of either the source + or destination, optimised for speed where supported by tool + chains. In such cases, OSDeviceMemCopy() is defined as a call + to this function. +@Input pvDst void pointer to the destination memory +@Input pvSrc void pointer to the source memory +@Input ui32Size the number of bytes to be copied +@Return None + */ /**************************************************************************/ +void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size); + +/**************************************************************************/ /*! +@Function DeviceMemSetBytes +@Description Potentially very slow (but safe) memset fallback for non-GNU C + compilers for arm64/aarch64 +@Input pvDest void pointer to the memory to be set +@Input ui8Value byte containing the value to be set +@Input ui32Size the number of bytes to be set to the given value +@Return None + */ /**************************************************************************/ +void DeviceMemSetBytes(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); + +/**************************************************************************/ /*! +@Function DeviceMemCopyBytes +@Description Potentially very slow (but safe) memcpy fallback for non-GNU C + compilers for arm64/aarch64 +@Input pvDst void pointer to the destination memory +@Input pvSrc void pointer to the source memory +@Input ui32Size the number of bytes to be copied +@Return None + */ /**************************************************************************/ +void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t ui32Size); + +/**************************************************************************/ /*! +@Function StringLCopy +@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. + If no null byte ('\0') is contained within the first uDataSize-1 + characters of the source string, the destination string will be + truncated. If the length of the source string is less than uDataSize + an additional NUL byte will be copied to the destination string + to ensure that the string is NUL-terminated. +@Input pszDest char pointer to the destination string +@Input pszSrc const char pointer to the source string +@Input uDataSize the maximum number of bytes to be copied +@Return Size of the source string + */ /**************************************************************************/ +size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); + +#if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) +#if defined(__GNUC__) +/* Workarounds for assumptions made that memory will not be mapped uncached + * in kernel or user address spaces on arm64 platforms (or other testing). + */ + +#define OSDeviceMemSet(a,b,c) DeviceMemSet((a), (b), (c)) +#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c)) + +#else /* defined __GNUC__ */ + +#define OSDeviceMemSet(a,b,c) DeviceMemSetBytes((a), (b), (c)) +#define OSDeviceMemCopy(a,b,c) DeviceMemCopyBytes((a), (b), (c)) + +#endif /* defined __GNUC__ */ + +#define OSCachedMemSet(a,b,c) memset((a), (b), (c)) +#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c)) + +#else /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ + +/* Everything else */ + +/**************************************************************************/ /*! +@Function OSDeviceMemSet +@Description Set memory, whose mapping may be uncached, to a given value. + On some architectures, additional processing may be needed + if the mapping is uncached. +@Input a void pointer to the memory to be set +@Input b byte containing the value to be set +@Input c the number of bytes to be set to the given value +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSDeviceMemSet(a,b,c) memset((a), (b), (c)) + +/**************************************************************************/ /*! +@Function OSDeviceMemCopy +@Description Copy values from one area of memory, to another, when one + or both mappings may be uncached. + On some architectures, additional processing may be needed + if mappings are uncached. +@Input a void pointer to the destination memory +@Input b void pointer to the source memory +@Input c the number of bytes to be copied +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSDeviceMemCopy(a,b,c) memcpy((a), (b), (c)) + +/**************************************************************************/ /*! +@Function OSCachedMemSet +@Description Set memory, where the mapping is known to be cached, to a + given value. This function exists to allow an optimal memset + to be performed when memory is known to be cached. +@Input a void pointer to the memory to be set +@Input b byte containing the value to be set +@Input c the number of bytes to be set to the given value +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSCachedMemSet(a,b,c) memset((a), (b), (c)) + +/**************************************************************************/ /*! +@Function OSCachedMemCopy +@Description Copy values from one area of memory, to another, when both + mappings are known to be cached. + This function exists to allow an optimal memcpy to be + performed when memory is known to be cached. +@Input a void pointer to the destination memory +@Input b void pointer to the source memory +@Input c the number of bytes to be copied +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c)) + +#endif /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ + +/**************************************************************************/ /*! +@Function OSStringLCopy +@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. + If no null byte ('\0') is contained within the first uDataSize-1 + characters of the source string, the destination string will be + truncated. If the length of the source string is less than uDataSize + an additional NUL byte will be copied to the destination string + to ensure that the string is NUL-terminated. +@Input a char pointer to the destination string +@Input b const char pointer to the source string +@Input c the maximum number of bytes to be copied +@Return Size of the source string + */ /**************************************************************************/ +#if defined(__QNXNTO__) || (defined(LINUX) && defined(__KERNEL__) && !defined(DEBUG)) +#define OSStringLCopy(a,b,c) strlcpy((a), (b), (c)) +#else /* defined(__QNXNTO__) ... */ +#define OSStringLCopy(a,b,c) StringLCopy((a), (b), (c)) +#endif /* defined(__QNXNTO__) ... */ + +#ifdef __cplusplus +} +#endif + +#endif /* __OSFUNC_COMMON_H__ */ + +/****************************************************************************** + End of file (osfunc_common.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/pdumpdefs.h b/drivers/mcst/gpu-imgtec/include/pdumpdefs.h new file mode 100644 index 000000000000..36b47d55e2f9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pdumpdefs.h @@ -0,0 +1,246 @@ +/*************************************************************************/ /*! +@File +@Title PDUMP definitions header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description PDUMP definitions header +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PDUMPDEFS_H +#define PDUMPDEFS_H + +/*! PDump Pixel Format Enumeration */ +typedef enum _PDUMP_PIXEL_FORMAT_ +{ + PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2, + PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9, +/* PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, */ + PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11, + PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12, + PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13, + PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15, + PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16, + PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17, + PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18, + PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20, + PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25, + PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26, + PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27, + PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28, + PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29, + PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34, + PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35, + PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36, + PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37, + PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45, + PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46, + PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47, + PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49, + PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51, + PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52, + PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53, + PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54, + PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55, + PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56, + PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57, + PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58, + PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59, + PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60, + PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61, + PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62, + PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63, + PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64, + PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66, + + PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff + +} PDUMP_PIXEL_FORMAT; + +typedef enum _PDUMP_FBC_SWIZZLE_ +{ + PVRSRV_PDUMP_FBC_SWIZZLE_ARGB = 0x0, + PVRSRV_PDUMP_FBC_SWIZZLE_ARBG = 0x1, + PVRSRV_PDUMP_FBC_SWIZZLE_AGRB = 0x2, + PVRSRV_PDUMP_FBC_SWIZZLE_AGBR = 0x3, + PVRSRV_PDUMP_FBC_SWIZZLE_ABGR = 0x4, + PVRSRV_PDUMP_FBC_SWIZZLE_ABRG = 0x5, + PVRSRV_PDUMP_FBC_SWIZZLE_RGBA = 0x8, + PVRSRV_PDUMP_FBC_SWIZZLE_RBGA = 0x9, + PVRSRV_PDUMP_FBC_SWIZZLE_GRBA = 0xA, + PVRSRV_PDUMP_FBC_SWIZZLE_GBRA = 0xB, + PVRSRV_PDUMP_FBC_SWIZZLE_BGRA = 0xC, + PVRSRV_PDUMP_FBC_SWIZZLE_BRGA = 0xD, +} PDUMP_FBC_SWIZZLE; + +/*! PDump addrmode */ +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT 0 +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK 0x000000FF + +#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8 +#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12 +#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000 + +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT 20 +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK 0x00F00000 + +#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24 +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT 25 + +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28 +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000 + +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE (0U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED (9U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT (3U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT (4U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT (5U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE (1U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED (2U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2 (3U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE (4U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE (5U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_SURFACE (6U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_RESOURCE (7U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4 (8U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) + +/*! PDump Poll Operator */ +typedef enum _PDUMP_POLL_OPERATOR +{ + PDUMP_POLL_OPERATOR_EQUAL = 0, + PDUMP_POLL_OPERATOR_LESS = 1, + PDUMP_POLL_OPERATOR_LESSEQUAL = 2, + PDUMP_POLL_OPERATOR_GREATER = 3, + PDUMP_POLL_OPERATOR_GREATEREQUAL = 4, + PDUMP_POLL_OPERATOR_NOTEQUAL = 5, +} PDUMP_POLL_OPERATOR; + + +#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 75 /*!< Max length of a pdump log file name */ +#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 350 /*!< Max length of a pdump comment */ + +/*! + PDump MMU type + (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13) +*/ +typedef enum +{ + PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE = 1, + PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2, + PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE = 3, + PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE = 4, + PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE = 5, + PDUMP_MMU_TYPE_VARPAGE_40BIT = 6, + PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE = 7, + PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE = 8, + PDUMP_MMU_TYPE_MIPS_MICROAPTIV = 9, + PDUMP_MMU_TYPE_LAST +} PDUMP_MMU_TYPE; + +/*! + PDump states + These values are used by the bridge call PVRSRVPDumpGetState +*/ +#define PDUMP_STATE_CAPTURE_FRAME (1) /*!< Flag represents the PDump being in capture range or not*/ +#define PDUMP_STATE_CONNECTED (2) /*!< Flag represents the PDump Client App being connected on not */ +#define PDUMP_STATE_SUSPENDED (4) /*!< Flag represents the PDump being suspended or not */ + +/*! + PDump Capture modes + Values used with calls to PVRSRVPDumpSetDefaultCaptureParams +*/ +#define PDUMP_CAPMODE_UNSET 0x00000000UL +#define PDUMP_CAPMODE_FRAMED 0x00000001UL +#define PDUMP_CAPMODE_CONTINUOUS 0x00000002UL +#define PDUMP_CAPMODE_BLOCKED 0x00000003UL + +#define PDUMP_CAPMODE_MAX PDUMP_CAPMODE_BLOCKED + +#endif /* PDUMPDEFS_H */ + +/***************************************************************************** + End of file (pdumpdefs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/pdumpdesc.h b/drivers/mcst/gpu-imgtec/include/pdumpdesc.h new file mode 100644 index 000000000000..9ab47499a08e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pdumpdesc.h @@ -0,0 +1,205 @@ +/*************************************************************************/ /*! +@File pdumpdesc.h +@Title PDump Descriptor format +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Describes PDump descriptors that may be passed to the + extraction routines (SAB). +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PDUMPDESC_H) +#define PDUMPDESC_H + +#include "pdumpdefs.h" + +/* + * Common fields + */ +#define HEADER_WORD0_TYPE_SHIFT (0) +#define HEADER_WORD0_TYPE_CLRMSK (0xFFFFFFFFU) + +#define HEADER_WORD1_SIZE_SHIFT (0) +#define HEADER_WORD1_SIZE_CLRMSK (0x0000FFFFU) +#define HEADER_WORD1_VERSION_SHIFT (16) +#define HEADER_WORD1_VERSION_CLRMSK (0xFFFF0000U) + +#define HEADER_WORD2_DATA_SIZE_SHIFT (0) +#define HEADER_WORD2_DATA_SIZE_CLRMSK (0xFFFFFFFFU) + + +/* + * The image type descriptor + */ + +/* + * Header type (IMGBv2) - 'IMGB' in hex + VERSION 2 + * Header size - 64 bytes + */ +#define IMAGE_HEADER_TYPE (0x42474D49) +#define IMAGE_HEADER_SIZE (64) +#define IMAGE_HEADER_VERSION (2) + +/* + * Image type-specific fields + */ +#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT (0) +#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT (0) +#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD5_FORMAT_SHIFT (0) +#define IMAGE_HEADER_WORD5_FORMAT_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT (0) +#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT (0) +#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD8_TWIDDLING_SHIFT (0) +#define IMAGE_HEADER_WORD8_TWIDDLING_CLRMSK (0x000000FFU) +#define IMAGE_HEADER_WORD8_TWIDDLING_STRIDED (0 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) +#define IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE (9 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) +#define IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE (12 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) + + +#define IMAGE_HEADER_WORD8_STRIDE_SHIFT (8) +#define IMAGE_HEADER_WORD8_STRIDE_CLRMSK (0x0000FF00U) +#define IMAGE_HEADER_WORD8_STRIDE_POSITIVE (0 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) +#define IMAGE_HEADER_WORD8_STRIDE_NEGATIVE (1 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) + +#define IMAGE_HEADER_WORD8_BIFTYPE_SHIFT (16) +#define IMAGE_HEADER_WORD8_BIFTYPE_CLRMSK (0x00FF0000U) +#define IMAGE_HEADER_WORD8_BIFTYPE_NONE (0 << IMAGE_HEADER_WORD8_BIFTYPE_SHIFT) + +#define IMAGE_HEADER_WORD8_FBCTYPE_SHIFT (24) +#define IMAGE_HEADER_WORD8_FBCTYPE_CLRMSK (0xFF000000U) +#define IMAGE_HEADER_WORD8_FBCTYPE_8X8 (1 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) +#define IMAGE_HEADER_WORD8_FBCTYPE_16x4 (2 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) +#define IMAGE_HEADER_WORD8_FBCTYPE_32x2 (3 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) + +#define IMAGE_HEADER_WORD9_FBCDECOR_SHIFT (0) +#define IMAGE_HEADER_WORD9_FBCDECOR_CLRMSK (0x000000FFU) +#define IMAGE_HEADER_WORD9_FBCDECOR_ENABLE (1 << IMAGE_HEADER_WORD9_FBCDECOR_SHIFT) + +/* Align with fbcomp_export_c.h in pdump_tools branch */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT (8) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_CLRMSK (0x0000FF00U) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_SAME_AS_GPU (0 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_BASE (1 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_TWIDDLED_EN (2 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* TWIDDLED_ENHANCED */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V2 (3 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT1 (4 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2 (5 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V30_WITH_HEADER_REMAP */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT1 (6 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2 (7 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V31_WITH_HEADER_REMAP */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4 (8 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4_PLUS (9 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC (10 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) + +#define IMAGE_HEADER_WORD9_LOSSY_SHIFT (16) +#define IMAGE_HEADER_WORD9_LOSSY_CLRMSK (0x00FF0000U) +#define IMAGE_HEADER_WORD9_LOSSY_ON (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) +#define IMAGE_HEADER_WORD9_LOSSY_OFF (0 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) + +#define IMAGE_HEADER_WORD9_SWIZZLE_SHIFT (24) +#define IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK (0xFF000000U) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARGB (0x0 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARBG (0x1 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGRB (0x2 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGBR (0x3 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABGR (0x4 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABRG (0x5 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RGBA (0x8 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RBGA (0x9 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GRBA (0xA << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GBRA (0xB << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BGRA (0xC << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BRGA (0xD << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) + +#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_SHIFT (0) +#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_SHIFT (0) +#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_SHIFT (0) +#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_SHIFT (0) +#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_CLRMSK (0xFFFFFFFFU) + +/* IMAGE_HEADER_WORD14_RESERVED1 */ + +/* IMAGE_HEADER_WORD15_RESERVED2 */ + +/* + * The data type descriptor + */ + +/* + * Header type (IMGCv1) - 'IMGC' in hex + VERSION 0 + * Header size - 20 bytes (5 x 32 bit WORDS) + */ +#define DATA_HEADER_TYPE (0x43474D49) +#define DATA_HEADER_SIZE (20) +#define DATA_HEADER_VERSION (0) + +/* + * The IBIN type descriptor + */ + +/* + * Header type (IBIN) - 'IBIN' in hex + VERSION 0 + * Header size - 12 bytes (3 x 32 bit WORDS) + */ +#define IBIN_HEADER_TYPE (0x4e494249) +#define IBIN_HEADER_SIZE (12) +#define IBIN_HEADER_VERSION (0) + +/* + * Data type-specific fields + */ +#define DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT (0) +#define DATA_HEADER_WORD3_ELEMENT_TYPE_CLRMSK (0xFFFFFFFFU) + +#define DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT (0) +#define DATA_HEADER_WORD4_ELEMENT_COUNT_CLRMSK (0xFFFFFFFFU) + +#endif /* PDUMPDESC_H */ diff --git a/drivers/mcst/gpu-imgtec/include/public/powervr/buffer_attribs.h b/drivers/mcst/gpu-imgtec/include/public/powervr/buffer_attribs.h new file mode 100644 index 000000000000..9665ba2dd40d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/public/powervr/buffer_attribs.h @@ -0,0 +1,147 @@ +/*************************************************************************/ /*! +@File +@Title 3D types for use by IMG APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef POWERVR_BUFFER_ATTRIBS_H +#define POWERVR_BUFFER_ATTRIBS_H + +/** + * Memory layouts + * Defines how pixels are laid out within a surface. + */ +typedef enum +{ + IMG_MEMLAYOUT_STRIDED, /**< Resource is strided, one row at a time */ + IMG_MEMLAYOUT_TWIDDLED, /**< Resource is 2D twiddled to match HW */ + IMG_MEMLAYOUT_3DTWIDDLED, /**< Resource is 3D twiddled, classic style */ + IMG_MEMLAYOUT_TILED, /**< Resource is tiled, tiling config specified elsewhere. */ + IMG_MEMLAYOUT_PAGETILED, /**< Resource is pagetiled */ + IMG_MEMLAYOUT_INVNTWIDDLED, /**< Resource is 2D twiddled !N style */ +} IMG_MEMLAYOUT; + +/** + * Rotation types + */ +typedef enum +{ + IMG_ROTATION_0DEG = 0, + IMG_ROTATION_90DEG = 1, + IMG_ROTATION_180DEG = 2, + IMG_ROTATION_270DEG = 3, + IMG_ROTATION_FLIP_Y = 4, + + IMG_ROTATION_BAD = 255, +} IMG_ROTATION; + +/** + * Alpha types. + */ +typedef enum +{ + IMG_COLOURSPACE_FORMAT_UNKNOWN = 0x0 << 16, + IMG_COLOURSPACE_FORMAT_LINEAR = 0x1 << 16, + IMG_COLOURSPACE_FORMAT_SRGB = 0x2 << 16, + IMG_COLOURSPACE_FORMAT_SCRGB = 0x3 << 16, + IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR = 0x4 << 16, + IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR = 0x5 << 16, + IMG_COLOURSPACE_FORMAT_DISPLAY_P3 = 0x6 << 16, + IMG_COLOURSPACE_FORMAT_BT2020_PQ = 0x7 << 16, + IMG_COLOURSPACE_FORMAT_BT2020_LINEAR = 0x8 << 16, + IMG_COLOURSPACE_FORMAT_DISPLAY_P3_PASSTHROUGH = 0x9 << 16, + IMG_COLOURSPACE_FORMAT_MASK = 0xF << 16, +} IMG_COLOURSPACE_FORMAT; + +#define IS_FBCDC_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_TRUE : IMG_FALSE) + +#define IS_FBCDC_PACKED(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_TRUE : IMG_FALSE) + +#define GET_FBCDC_BLOCK_TYPE(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) + +#define FBCDC_MODE_ADD_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_PACKED_8x8 : mode) + +#define FBCDC_MODE_REMOVE_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : mode) + +#define FBCDC_MODE_ADD_LOSSY25(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2 : mode) + +#define FBCDC_MODE_ADD_LOSSY50(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2 : mode) + +#define FBCDC_MODE_ADD_LOSSY75(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2 : mode) + +#define FBCDC_MODE_REMOVE_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) + +/** + * Types of framebuffer compression + */ +typedef enum +{ + IMG_FB_COMPRESSION_NONE, + IMG_FB_COMPRESSION_DIRECT_8x8, + IMG_FB_COMPRESSION_DIRECT_16x4, + IMG_FB_COMPRESSION_DIRECT_32x2, + IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4, + IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2, + IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4, + IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2, + IMG_FB_COMPRESSION_DIRECT_PACKED_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4, + IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2, +} IMG_FB_COMPRESSION; + + +#endif /* POWERVR_BUFFER_ATTRIBS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/public/powervr/img_drm_fourcc.h b/drivers/mcst/gpu-imgtec/include/public/powervr/img_drm_fourcc.h new file mode 100644 index 000000000000..8d570ff8f53d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/public/powervr/img_drm_fourcc.h @@ -0,0 +1,113 @@ +/*************************************************************************/ /*! +@File +@Title Wrapper around drm_fourcc.h +@Description FourCCs and DRM framebuffer modifiers that are not in the + Kernel's and libdrm's drm_fourcc.h can be added here. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_DRM_FOURCC_H +#define IMG_DRM_FOURCC_H + +#if defined(__KERNEL__) +#include +#else +/* + * Include types.h to workaround versions of libdrm older than 2.4.68 + * not including the correct headers. + */ +#include + +#include +#endif + +/* + * Don't get too inspired by this example :) + * ADF doesn't support DRM modifiers, so the memory layout had to be + * included in the fourcc name, but the proper way to specify information + * additional to pixel formats is to use DRM modifiers. + * + * See upstream drm_fourcc.h for the proper naming convention. + */ +#ifndef DRM_FORMAT_BGRA8888_DIRECT_16x4 +#define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0') +#endif + +/* + * Upstream doesn't have a floating point format yet, so let's make one + * up. + * Note: The kernel's core DRM needs to know about this format, + * otherwise it won't be supported and should not be exposed by our + * kernel modules either. + * Refer to the provided kernel patch adding this format. + */ +#if !defined(__KERNEL__) +#define DRM_FORMAT_ABGR16_IMG fourcc_code('I', 'M', 'G', '1') +#endif + +/* + * Upstream does not have a packed 10 Bits Per Channel YVU format yet, + * so let`s make one up. + * Note: at the moment this format is not intended to be used with + * a framebuffer, so the kernels core DRM doesn`t need to know + * about this format. This means that the kernel doesn`t need + * to be patched. + */ +#if !defined(__KERNEL__) +#define DRM_FORMAT_YVU444_PACK10_IMG fourcc_code('I', 'M', 'G', '2') +#define DRM_FORMAT_YUV422_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '3') +#define DRM_FORMAT_YUV420_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '4') +#endif + +/* + * Value chosen in the middle of 255 pool to minimise the chance of hitting + * the same value potentially defined by other vendors in the drm_fourcc.h + */ +#define DRM_FORMAT_MOD_VENDOR_PVR 0x92 + +#ifndef DRM_FORMAT_MOD_VENDOR_NONE +#define DRM_FORMAT_MOD_VENDOR_NONE 0 +#endif + +#ifndef DRM_FORMAT_RESERVED +#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) +#endif + +#ifndef fourcc_mod_code +#define fourcc_mod_code(vendor, val) \ + ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) +#endif + +#ifndef DRM_FORMAT_MOD_INVALID +#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) +#endif + +#ifndef DRM_FORMAT_MOD_LINEAR +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) +#endif + +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 fourcc_mod_code(PVR, 6) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 fourcc_mod_code(PVR, 12) + +#endif /* IMG_DRM_FOURCC_H */ diff --git a/drivers/mcst/gpu-imgtec/include/public/powervr/mem_types.h b/drivers/mcst/gpu-imgtec/include/public/powervr/mem_types.h new file mode 100644 index 000000000000..c67897315341 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/public/powervr/mem_types.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File +@Title Public types +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef POWERVR_TYPES_H +#define POWERVR_TYPES_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(_MSC_VER) + #include "msvc_types.h" +#elif defined(LINUX) && defined(__KERNEL__) + #include + #include +#else + #include + #define __iomem +#endif + +typedef void *IMG_CPU_VIRTADDR; + +/* device virtual address */ +typedef struct +{ + uint64_t uiAddr; +#define IMG_CAST_TO_DEVVADDR_UINT(var) (uint64_t)(var) + +} IMG_DEV_VIRTADDR; + +typedef uint64_t IMG_DEVMEM_SIZE_T; +typedef uint64_t IMG_DEVMEM_ALIGN_T; +typedef uint64_t IMG_DEVMEM_OFFSET_T; +typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T; + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/mcst/gpu-imgtec/include/public/powervr/pvrsrv_sync_ext.h b/drivers/mcst/gpu-imgtec/include/public/powervr/pvrsrv_sync_ext.h new file mode 100644 index 000000000000..2d2aaa798ec8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/public/powervr/pvrsrv_sync_ext.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@File +@Title Services external synchronisation interface header +@Description Defines synchronisation structures that are visible internally + and externally +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef POWERVR_SYNC_EXT_H +#define POWERVR_SYNC_EXT_H + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! + * Number of sync prims still used internally in operations + */ +#define PVRSRV_MAX_SYNC_PRIMS 4 + +/*! + * Maximum number of dev var updates passed in a kick call + */ +#define PVRSRV_MAX_DEV_VARS 8 + +/*! + * Number of UFOs in operations + */ +#define PVRSRV_MAX_SYNCS (PVRSRV_MAX_SYNC_PRIMS + PVRSRV_MAX_DEV_VARS) + +/*! Implementation independent types for passing fence/timeline to Services. + */ +typedef int32_t PVRSRV_FENCE; +typedef int32_t PVRSRV_TIMELINE; + +/*! Maximum length for an annotation name string for fence sync model objects. + */ +#define PVRSRV_SYNC_NAME_LENGTH 32 + +/* Macros for API callers using the fence sync model + */ +#define PVRSRV_NO_TIMELINE ((PVRSRV_TIMELINE) -1) +#define PVRSRV_NO_FENCE ((PVRSRV_FENCE) -1) +#define PVRSRV_NO_FENCE_PTR NULL +#define PVRSRV_NO_TIMELINE_PTR NULL + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/mcst/gpu-imgtec/include/pvr_buffer_sync_shared.h b/drivers/mcst/gpu-imgtec/include/pvr_buffer_sync_shared.h new file mode 100644 index 000000000000..326bc756538f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvr_buffer_sync_shared.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@File +@Title PVR buffer sync shared +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Shared definitions between client and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_BUFFER_SYNC_SHARED_H +#define PVR_BUFFER_SYNC_SHARED_H + +#define PVR_BUFFER_FLAG_READ (1 << 0) +#define PVR_BUFFER_FLAG_WRITE (1 << 1) +#define PVR_BUFFER_FLAG_MASK (PVR_BUFFER_FLAG_READ | \ + PVR_BUFFER_FLAG_WRITE) + +/* Maximum number of PMRs passed + * in a kick when using buffer sync + */ +#define PVRSRV_MAX_BUFFERSYNC_PMRS 32 + +#endif /* PVR_BUFFER_SYNC_SHARED_H */ diff --git a/drivers/mcst/gpu-imgtec/include/pvr_debug.h b/drivers/mcst/gpu-imgtec/include/pvr_debug.h new file mode 100644 index 000000000000..dfef41b7cfc3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvr_debug.h @@ -0,0 +1,801 @@ +/*************************************************************************/ /*! +@File +@Title PVR Debug Declarations +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides debug functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DEBUG_H +#define PVR_DEBUG_H + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +/*! @cond Doxygen_Suppress */ +#if defined(_MSC_VER) +# define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) +#else +# define MSC_SUPPRESS_4127 +#endif +/*! @endcond */ + +#if defined(__cplusplus) +extern "C" { +#endif + +#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */ + +/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */ +#define DBGPRIV_FATAL 0x001UL /*!< Debug-Fatal. Privately used by pvr_debug. */ +#define DBGPRIV_ERROR 0x002UL /*!< Debug-Error. Privately used by pvr_debug. */ +#define DBGPRIV_WARNING 0x004UL /*!< Debug-Warning. Privately used by pvr_debug. */ +#define DBGPRIV_MESSAGE 0x008UL /*!< Debug-Message. Privately used by pvr_debug. */ +#define DBGPRIV_VERBOSE 0x010UL /*!< Debug-Verbose. Privately used by pvr_debug. */ +#define DBGPRIV_CALLTRACE 0x020UL /*!< Debug-CallTrace. Privately used by pvr_debug. */ +#define DBGPRIV_ALLOC 0x040UL /*!< Debug-Alloc. Privately used by pvr_debug. */ +#define DBGPRIV_BUFFERED 0x080UL /*!< Debug-Buffered. Privately used by pvr_debug. */ +#define DBGPRIV_DEBUG 0x100UL /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */ +#define DBGPRIV_LAST 0x100UL /*!< Always set to highest mask value. Privately used by pvr_debug. */ + +#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG) +#define PVRSRV_NEED_PVR_ASSERT +#endif + +#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF) +#define PVRSRV_NEED_PVR_DPF +#endif + +#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING)) +#define PVRSRV_NEED_PVR_TRACE +#endif + +#if !defined(DOXYGEN) +/*************************************************************************/ /* +PVRSRVGetErrorString +Returns a string describing the provided PVRSRV_ERROR code +NB No doxygen comments provided as this function does not require porting + for other operating systems +*/ /**************************************************************************/ +const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError); +#define PVRSRVGETERRORSTRING PVRSRVGetErrorString +#endif + +/* PVR_ASSERT() and PVR_DBG_BREAK handling */ + +#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN) + +/* Unfortunately the Klocwork static analysis checker doesn't understand our + * ASSERT macros. Thus it reports lots of false positive. Defining our Assert + * macros in a special way when the code is analysed by Klocwork avoids + * them. + */ +#if defined(__KLOCWORK__) +#define PVR_ASSERT(x) do { if (!(x)) {abort();} } while (0) +#else /* ! __KLOCWORKS__ */ + +#if defined(_WIN32) +#define PVR_ASSERT(expr) do \ + { \ + MSC_SUPPRESS_4127 \ + if (unlikely(!(expr))) \ + { \ + PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\ + "*** Debug assertion failed!"); \ + __debugbreak(); \ + } \ + MSC_SUPPRESS_4127 \ + } while (0) + +#else + +#if defined(LINUX) && defined(__KERNEL__) +#include +#include + +/* In Linux kernel mode, use WARN_ON() directly. This produces the + * correct filename and line number in the warning message. + */ +#define PVR_ASSERT(EXPR) do \ + { \ + if (unlikely(!(EXPR))) \ + { \ + PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \ + "Debug assertion failed!"); \ + WARN_ON(1); \ + } \ + } while (0) + +#else /* defined(LINUX) && defined(__KERNEL__) */ + +/*************************************************************************/ /*! +@Function PVRSRVDebugAssertFail +@Description Indicate to the user that a debug assertion has failed and + prevent the program from continuing. + Invoked from the macro PVR_ASSERT(). +@Input pszFile The name of the source file where the assertion failed +@Input ui32Line The line number of the failed assertion +@Input pszAssertion String describing the assertion +@Return NEVER! +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV __noreturn +PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, + IMG_UINT32 ui32Line, + const IMG_CHAR *pszAssertion); + +#define PVR_ASSERT(EXPR) do \ + { \ + if (unlikely(!(EXPR))) \ + { \ + PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR); \ + } \ + } while (0) + +#endif /* defined(LINUX) && defined(__KERNEL__) */ +#endif /* defined(_WIN32) */ +#endif /* defined(__KLOCWORK__) */ + +#if defined(__KLOCWORK__) + #define PVR_DBG_BREAK do { abort(); } while (0) +#else + #if defined(WIN32) + #define PVR_DBG_BREAK __debugbreak() /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */ + #else + #if defined(PVR_DBG_BREAK_ASSERT_FAIL) + /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */ + #if defined(_WIN32) + #define PVR_DBG_BREAK DBG_BREAK + #else + #if defined(LINUX) && defined(__KERNEL__) + #define PVR_DBG_BREAK BUG() + #else + #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK") + #endif + #endif + #else + /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ + #define PVR_DBG_BREAK + #endif + #endif +#endif + + +#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ + /* Unfortunately the Klocwork static analysis checker doesn't understand our + * ASSERT macros. Thus it reports lots of false positive. Defining our Assert + * macros in a special way when the code is analysed by Klocwork avoids + * them. + */ + #if defined(__KLOCWORK__) + #define PVR_ASSERT(EXPR) do { if (!(EXPR)) {abort();} } while (0) + #else + #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */ + #endif + + #define PVR_DBG_BREAK /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ + +#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ + + +/* PVR_DPF() handling */ + +#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) + + /* New logging mechanism */ + #define PVR_DBG_FATAL DBGPRIV_FATAL /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */ + #define PVR_DBG_ERROR DBGPRIV_ERROR /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */ + #define PVR_DBG_WARNING DBGPRIV_WARNING /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */ + #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */ + #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */ + #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE /*!< Debug level passed to PVRSRVDebugPrintf() for function tracing purposes. */ + #define PVR_DBG_ALLOC DBGPRIV_ALLOC /*!< Debug level passed to PVRSRVDebugPrintf() for tracking some of drivers memory operations. */ + #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */ + #define PVR_DBG_DEBUG DBGPRIV_DEBUG /*!< Debug level passed to PVRSRVDebugPrintf() for debug messages. */ + + /* These levels are always on with PVRSRV_NEED_PVR_DPF */ + /*! @cond Doxygen_Suppress */ + #define __PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__) + #define __PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__) + #define __PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__) + + /* + * The AdHoc-Debug level is only supported when enabled in the local + * build environment and may need to be used in both debug and release + * builds. An error is generated in the formal build if it is checked in. + */ +#if defined(PVR_DPF_ADHOC_DEBUG_ON) + #define __PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__) +#else + /* Use an undefined token here to stop compilation dead in the offending module */ + #define __PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing +#endif + + /* Some are compiled out completely in release builds */ +#if defined(DEBUG) || defined(DOXYGEN) + #define __PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__) + #define __PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__) + #define __PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__) + #define __PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__) + #define __PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__) +#else + #define __PVR_DPF_0x004UL(...) + #define __PVR_DPF_0x008UL(...) + #define __PVR_DPF_0x010UL(...) + #define __PVR_DPF_0x020UL(...) + #define __PVR_DPF_0x040UL(...) +#endif + + /* Translate the different log levels to separate macros + * so they can each be compiled out. + */ +#if defined(DEBUG) + #define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__) +#else + #define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl ("", __LINE__, __VA_ARGS__) +#endif + /*! @endcond */ + + /* Get rid of the double bracketing */ + #define PVR_DPF(x) __PVR_DPF x + + #define PVR_LOG_ERROR(_rc, _call) \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)) + + #define PVR_LOG_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_WARN_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do \ + { if (unlikely(_expr == NULL)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do \ + { if (unlikely(_expr == NULL)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \ + _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + return _rc; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + return; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do \ + { PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + _err = _rc; \ + goto _go; \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_IF_FALSE(_expr, _msg) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + return _rc; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + return; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \ + return PVRSRV_ERROR_INVALID_PARAMS; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", #_expr, __func__)); \ + _err = PVRSRV_ERROR_INVALID_PARAMS; \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + +/*************************************************************************/ /*! +@Function PVRSRVDebugPrintf +@Description Output a debug message to the user, using an OS-specific + method, to a log or console which can be read by developers + Invoked from the macro PVR_DPF(). +@Input ui32DebugLevel The debug level of the message. This can + be used to restrict the output of debug + messages based on their severity. + If this is PVR_DBG_BUFFERED, the message + should be written into a debug circular + buffer instead of being output immediately + (useful when performance would otherwise + be adversely affected). + The debug circular buffer shall only be + output when PVRSRVDebugPrintfDumpCCB() is + called. +@Input pszFileName The source file containing the code that is + generating the message +@Input ui32Line The line number in the source file +@Input pszFormat The formatted message string +@Input ... Zero or more arguments for use by the + formatted string +@Return None +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, + const IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR *pszFormat, + ...) __printf(4, 5); + +/*************************************************************************/ /*! +@Function PVRSRVDebugPrintfDumpCCB +@Description When PVRSRVDebugPrintf() is called with the ui32DebugLevel + specified as DBGPRIV_BUFFERED, the debug shall be written to + the debug circular buffer instead of being output immediately. + (This could be used to obtain debug without incurring a + performance hit by printing it at that moment). + This function shall dump the contents of that debug circular + buffer to be output in an OS-specific method to a log or + console which can be read by developers. +@Return None +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); + +#else /* defined(PVRSRV_NEED_PVR_DPF) */ + + #define PVR_DPF(X) /*!< Null Implementation of PowerVR Debug Printf (does nothing) */ + + #define PVR_LOG_ERROR(_rc, _call) (void)(_rc) + #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc) + #define PVR_WARN_IF_ERROR(_rc, _call) (void)(_rc) + + #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return PVRSRV_ERROR_OUT_OF_MEMORY; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do { _err = _rc; goto _go; MSC_SUPPRESS_4127 } while (0) + + #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr) + #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do { if (unlikely(!(_expr))) { return; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (0) + + #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do { if (unlikely(!(_expr))) { return PVRSRV_ERROR_INVALID_PARAMS; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do { if (unlikely(!(_expr))) { _err = PVRSRV_ERROR_INVALID_PARAMS; goto _go; } MSC_SUPPRESS_4127 } while (0) + + #undef PVR_DPF_FUNCTION_TRACE_ON + +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +#define PVR_DPF_FUNC__(lvl, message, ...) PVR_DPF((lvl, "%s: " message, __func__, ##__VA_ARGS__)) +#define PVR_DPF_FUNC(x) PVR_DPF_FUNC__ x + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_ERROR macro. + */ +#define PVR_RETURN_IF_ERROR(_rc) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + return _rc; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_FALSE macro. + */ +#define PVR_RETURN_IF_FALSE(_expr, _rc) do \ + { if (unlikely(!(_expr))) { \ + return _rc; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_INVALID_PARAM macro. + */ +#define PVR_RETURN_IF_INVALID_PARAM(_expr) do \ + { if (unlikely(!(_expr))) { \ + return PVRSRV_ERROR_INVALID_PARAMS; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_NOMEM macro. + */ +#define PVR_RETURN_IF_NOMEM(_expr) do \ + { if (unlikely(!(_expr))) { \ + return PVRSRV_ERROR_OUT_OF_MEMORY; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_NOMEM macro. + */ +#define PVR_GOTO_IF_NOMEM(_expr, _err, _go) do \ + { if (unlikely(_expr == NULL)) { \ + _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ + goto _go; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_INVALID_PARAM macro. + */ +#define PVR_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ + { if (unlikely(!(_expr))) { \ + _err = PVRSRV_ERROR_INVALID_PARAMS; \ + goto _go; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_FALSE macro. + */ +#define PVR_GOTO_IF_FALSE(_expr, _go) do \ + { if (unlikely(!(_expr))) { \ + goto _go; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_ERROR macro. + */ +#define PVR_GOTO_IF_ERROR(_rc, _go) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_WITH_ERROR macro. + */ +#define PVR_GOTO_WITH_ERROR(_err, _rc, _go) do \ + { _err = _rc; goto _go; \ + MSC_SUPPRESS_4127 \ + } while (0) + +/*! @cond Doxygen_Suppress */ +#if defined(PVR_DPF_FUNCTION_TRACE_ON) + + #define PVR_DPF_ENTERED \ + PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__)) + + #define PVR_DPF_ENTERED1(p1) \ + PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1))) + + #define PVR_DPF_RETURN_RC(a) \ + do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN_RC1(a,p1) \ + do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN_VAL(a) \ + do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__)); return (a); MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN_OK \ + do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN \ + do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (0) + + #if !defined(DEBUG) + #error PVR DPF Function trace enabled in release build, rectify + #endif + +#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ + + #define PVR_DPF_ENTERED + #define PVR_DPF_ENTERED1(p1) + #define PVR_DPF_RETURN_RC(a) return (a) + #define PVR_DPF_RETURN_RC1(a,p1) return (a) + #define PVR_DPF_RETURN_VAL(a) return (a) + #define PVR_DPF_RETURN_OK return PVRSRV_OK + #define PVR_DPF_RETURN return + +#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ +/*! @endcond */ + +#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__) +/*Use PVR_DPF() unless message is necessary in release build */ +#define PVR_LOG(X) PVRSRVReleasePrintf X + +/*************************************************************************/ /*! +@Function PVRSRVReleasePrintf +@Description Output an important message, using an OS-specific method, + to the Server log or console which will always be output in + both release and debug builds. + Invoked from the macro PVR_LOG(). Used in Services Server only. +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +@Return None +*/ /**************************************************************************/ +void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2); +#endif + +/* PVR_TRACE() handling */ + +#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN) + + #define PVR_TRACE(X) PVRSRVTrace X /*!< PowerVR Debug Trace Macro */ + /* Empty string implementation that is -O0 build friendly */ + #define PVR_TRACE_EMPTY_LINE() PVR_TRACE(("%s", "")) + +/*************************************************************************/ /*! +@Function PVRTrace +@Description Output a debug message to the user + Invoked from the macro PVR_TRACE(). +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) + __printf(1, 2); + +#else /* defined(PVRSRV_NEED_PVR_TRACE) */ + /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */ + #define PVR_TRACE(X) + +#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ + + +#if defined(PVRSRV_NEED_PVR_ASSERT) +#ifdef INLINE_IS_PRAGMA +#pragma inline(TRUNCATE_64BITS_TO_32BITS) +#endif + INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput) + { + IMG_UINT32 uiTruncated; + + uiTruncated = (IMG_UINT32)uiInput; + PVR_ASSERT(uiInput == uiTruncated); + return uiTruncated; + } + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(TRUNCATE_64BITS_TO_SIZE_T) +#endif + INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput) + { + size_t uiTruncated; + + uiTruncated = (size_t)uiInput; + PVR_ASSERT(uiInput == uiTruncated); + return uiTruncated; + } + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(TRUNCATE_SIZE_T_TO_32BITS) +#endif + INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput) + { + IMG_UINT32 uiTruncated; + + uiTruncated = (IMG_UINT32)uiInput; + PVR_ASSERT(uiInput == uiTruncated); + return uiTruncated; + } + + +#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ + #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr)) + #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr)) + #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr)) +#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ + +/*! @cond Doxygen_Suppress */ +/* Macros used to trace calls */ +#if defined(DEBUG) + #define PVR_DBG_FILELINE , (__FILE__), (__LINE__) + #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line + #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line + #define PVR_DBG_FILELINE_FMT " %s:%u" + #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \ + PVR_UNREFERENCED_PARAMETER(ui32Line); } while (0) +#else + #define PVR_DBG_FILELINE + #define PVR_DBG_FILELINE_PARAM + #define PVR_DBG_FILELINE_ARG + #define PVR_DBG_FILELINE_FMT + #define PVR_DBG_FILELINE_UNREF() +#endif +/*! @endcond */ + +#if defined(__cplusplus) +} +#endif + +/*! + @def PVR_ASSERT + @brief Aborts the program if assertion fails. + + The macro will be defined only when PVRSRV_NEED_PVR_ASSERT macro is + enabled. It's ignored otherwise. + + @def PVR_DPF + @brief PowerVR Debug Printf logging macro used throughout the driver. + + The macro allows to print logging messages to appropriate log. The + destination log is based on the component (user space / kernel space) and + operating system (Linux, Android, etc.). + + The macro also supports severity levels that allow to turn on/off messages + based on their importance. + + This macro will print messages with severity level higher that error only + if PVRSRV_NEED_PVR_DPF macro is defined. + + @def PVR_LOG_ERROR + @brief Logs error. + + @def PVR_LOG_IF_ERROR + @brief Logs error if not PVRSRV_OK. + + @def PVR_WARN_IF_ERROR + @brief Logs warning if not PVRSRV_OK. + + @def PVR_LOG_RETURN_IF_NOMEM + @brief Logs error if expression is NULL and returns PVRSRV_ERROR_OUT_OF_MEMORY. + + @def PVR_LOG_GOTO_IF_NOMEM + @brief Logs error if expression is NULL and jumps to given label. + + @def PVR_LOG_RETURN_IF_ERROR + @brief Logs error if not PVRSRV_OK and returns the error. + + @def PVR_LOG_RETURN_VOID_IF_ERROR + @brief Logs error if not PVRSRV_OK and returns (used in function that return void). + + @def PVR_LOG_GOTO_IF_ERROR + @brief Logs error if not PVRSRV_OK and jumps to label. + + @def PVR_LOG_GOTO_WITH_ERROR + @brief Logs error, goes to a label and sets the error code. + + @def PVR_LOG_IF_FALSE + @brief Prints error message if expression is false. + + @def PVR_LOG_RETURN_IF_FALSE + @brief Prints error message if expression is false and returns given error. + + @def PVR_LOG_RETURN_VOID_IF_FALSE + @brief Prints error message if expression is false and returns (used in function that return void). + + @def PVR_LOG_GOTO_IF_FALSE + @brief Prints error message if expression is false and jumps to label. + + @def PVR_LOG_RETURN_IF_INVALID_PARAM + @brief Prints error message if expression is false and returns PVRSRV_ERROR_INVALID_PARAMS. + + @def PVR_LOG_GOTO_IF_INVALID_PARAM + @brief Prints error message if expression is false and jumps to label. + + @def PVR_RETURN_IF_ERROR + @brief Returns passed error code if it's different than PVRSRV_OK; + + @def PVR_RETURN_IF_FALSE + @brief Returns passed error code if expression is false. + + @def PVR_RETURN_IF_INVALID_PARAM + @brief Returns PVRSRV_ERROR_INVALID_PARAMS if expression is false. + + @def PVR_RETURN_IF_NOMEM + @brief Returns PVRSRV_ERROR_OUT_OF_MEMORY if expression is NULL. + + @def PVR_GOTO_IF_NOMEM + @brief Goes to a label if expression is NULL. + + @def PVR_GOTO_IF_INVALID_PARAM + @brief Goes to a label if expression is false. + + @def PVR_GOTO_IF_FALSE + @def Goes to a label if expression is false. + + @def PVR_GOTO_IF_ERROR + @brief Goes to a label if the error code is different than PVRSRV_OK; + + @def PVR_GOTO_WITH_ERROR + @brief Goes to a label and sets the error code. + + @def PVR_LOG + @brief Prints message to a log unconditionally. + + This macro will print messages only if PVRSRV_NEED_PVR_LOG macro is defined. + + @def PVR_TRACE_EMPTY_LINE + @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined). + + @def TRUNCATE_64BITS_TO_32BITS + @brief Truncates 64 bit value to 32 bit value (with possible precision loss). + + @def TRUNCATE_64BITS_TO_SIZE_T + @brief Truncates 64 bit value to size_t value (with possible precision loss). + + @def TRUNCATE_SIZE_T_TO_32BITS + @brief Truncates size_t value to 32 bit value (with possible precision loss). + */ + +#endif /* PVR_DEBUG_H */ + +/****************************************************************************** + End of file (pvr_debug.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/pvr_fd_sync_kernel.h b/drivers/mcst/gpu-imgtec/include/pvr_fd_sync_kernel.h new file mode 100644 index 000000000000..9a85f19c06d2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvr_fd_sync_kernel.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@File pvr_fd_sync_kernel.h +@Title Kernel/userspace interface definitions to use the kernel sync + driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +/* vi: set ts=8: */ + + +#ifndef _PVR_FD_SYNC_KERNEL_H_ +#define _PVR_FD_SYNC_KERNEL_H_ + +#include +#include + +#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14 + +#define PVR_SYNC_IOC_MAGIC 'W' + +#define PVR_SYNC_IOC_RENAME \ + _IOW(PVR_SYNC_IOC_MAGIC, 4, struct pvr_sync_rename_ioctl_data) + +#define PVR_SYNC_IOC_FORCE_SW_ONLY \ + _IO(PVR_SYNC_IOC_MAGIC, 5) + +struct pvr_sync_pt_info { + /* Output */ + __u32 id; + __u32 ui32FWAddr; + __u32 ui32CurrOp; + __u32 ui32NextOp; + __u32 ui32TlTaken; +} __attribute__((packed, aligned(8))); + +struct pvr_sync_rename_ioctl_data +{ + /* Input */ + char szName[32]; +} __attribute__((packed, aligned(8))); + +#endif /* _PVR_FD_SYNC_KERNEL_H_ */ diff --git a/drivers/mcst/gpu-imgtec/include/pvr_intrinsics.h b/drivers/mcst/gpu-imgtec/include/pvr_intrinsics.h new file mode 100644 index 000000000000..2a9634ca12ae --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvr_intrinsics.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@File +@Title Intrinsics definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVR_INTRINSICS_H_ +#define _PVR_INTRINSICS_H_ + +/* PVR_CTZLL: + * Count the number of trailing zeroes in a long long integer + */ + +#if defined(__GNUC__) +#if defined(__x86_64__) || defined(__e2k__) + + #define PVR_CTZLL __builtin_ctzll +#endif +#endif + +/* PVR_CLZLL: + * Count the number of leading zeroes in a long long integer + */ + +#if defined(__GNUC__) +#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \ + defined(__arm__) || defined(__mips) || defined(__e2k__) + +#define PVR_CLZLL __builtin_clzll + +#endif +#endif + +#endif /* _PVR_INTRINSICS_H_ */ diff --git a/drivers/mcst/gpu-imgtec/include/pvrmodule.h b/drivers/mcst/gpu-imgtec/include/pvrmodule.h new file mode 100644 index 000000000000..267c7b687487 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrmodule.h @@ -0,0 +1,48 @@ +/*************************************************************************/ /*! +@Title Module Author and License. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVRMODULE_H_ +#define _PVRMODULE_H_ + +MODULE_AUTHOR("Imagination Technologies Ltd. "); +MODULE_LICENSE("Dual MIT/GPL"); + +#endif /* _PVRMODULE_H_ */ diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_device_types.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_device_types.h new file mode 100644 index 000000000000..cafd2dfcac6c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_device_types.h @@ -0,0 +1,55 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR device type definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PVRSRV_DEVICE_TYPES_H) +#define PVRSRV_DEVICE_TYPES_H + +#include "img_types.h" + +#define PVRSRV_MAX_DEVICES 16 /*!< Largest supported number of devices on the system */ + +#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +#endif /* PVRSRV_DEVICE_TYPES_H */ diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_devvar.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_devvar.h new file mode 100644 index 000000000000..19dc0a42089e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_devvar.h @@ -0,0 +1,291 @@ +/*************************************************************************/ /*! +@File +@Title Services Device Variable interface header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines the client side interface for device variables +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_DEVVAR_H +#define PVRSRV_DEVVAR_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define DEVVAR_MAX_NAME_LEN 32 + +typedef struct _PVRSRV_DEVVARCTX_ *PDEVVARCTX; +typedef struct _PVRSRV_DEVVAR_ *PDEVVAR; + +typedef struct PVRSRV_DEV_VAR_UPDATE_TAG +{ + PDEVVAR psDevVar; /*!< Pointer to the dev var */ + IMG_UINT32 ui32UpdateValue; /*!< the update value */ +} PVRSRV_DEV_VAR_UPDATE; + +/*************************************************************************/ /*! +@Function PVRSRVDevVarContextCreate + +@Description Create a new device variable context + +@Input psDevConnection Device to create the device + variable context on + +@Output phDevVarContext Handle to the created device + variable context + +@Return PVRSRV_OK if the device variable context was successfully + created +*/ +/*****************************************************************************/ +IMG_EXPORT PVRSRV_ERROR +PVRSRVDevVarContextCreate(const PVRSRV_DEV_CONNECTION *psDevConnection, + PDEVVARCTX *phDevVarContext); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarContextDestroy + +@Description Destroy a device variable context + +@Input hDevVarContext Handle to the device variable + context to destroy + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarContextDestroy(PDEVVARCTX hDevVarContext); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarAlloc + +@Description Allocate a new device variable on the specified device + variable context. The device variable's value is initialised + with the value passed in ui32InitialValue. + +@Input hDevVarContext Handle to the device variable + context +@Input ui32InitialValue Value to initially assign to the + new variable +@Input pszDevVarName Name assigned to the device variable + (for debug purposes) + +@Output ppsDevVar Created device variable + +@Return PVRSRV_OK if the device variable was successfully created +*/ +/*****************************************************************************/ +IMG_EXPORT PVRSRV_ERROR +PVRSRVDevVarAllocI(PDEVVARCTX hDevVarContext, + PDEVVAR *ppsDevVar, + IMG_UINT32 ui32InitialValue, + const IMG_CHAR *pszDevVarName + PVR_DBG_FILELINE_PARAM); +#define PVRSRVDevVarAlloc(hDevVarContext, ppsDevVar, ui32InitialValue, pszDevVarName) \ + PVRSRVDevVarAllocI( (hDevVarContext), (ppsDevVar), (ui32InitialValue), (pszDevVarName) \ + PVR_DBG_FILELINE ) + +/*************************************************************************/ /*! +@Function PVRSRVDevVarFree + +@Description Free a device variable + +@Input psDevVar The device variable to free + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarFree(PDEVVAR psDevVar); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarSet + +@Description Set the device variable to a value + +@Input psDevVar The device variable to set + +@Input ui32Value Value to set it to + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarSet(PDEVVAR psDevVar, + IMG_UINT32 ui32Value); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarGet + +@Description Get the current value of the device variable + +@Input psDevVar The device variable to get the + value of + +@Return Value of the variable +*/ +/*****************************************************************************/ +IMG_EXPORT IMG_UINT32 +PVRSRVDevVarGet(PDEVVAR psDevVar); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarGetFirmwareAddr + +@Description Returns the address of the associated firmware value for a + specified device integer (not exposed to client) + +@Input psDevVar The device variable to resolve + +@Return The firmware address of the device variable +*/ +/*****************************************************************************/ +IMG_EXPORT IMG_UINT32 +PVRSRVDevVarGetFirmwareAddr(PDEVVAR psDevVar); + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function PVRSRVDevVarPDump + +@Description PDump the current value of the device variable + +@Input psDevVar The device variable to PDump + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarPDump(PDEVVAR psDevVar); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarPDumpPol + +@Description Do a PDump poll of the device variable + +@Input psDevVar The device variable to PDump + +@Input ui32Value Value to Poll for + +@Input ui32Mask PDump mask operator + +@Input ui32PDumpFlags PDump flags + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarPDumpCBP + +@Description Do a PDump CB poll using the device variable + +@Input psDevVar The device variable to PDump + +@Input uiWriteOffset Current write offset of buffer + +@Input uiPacketSize Size of the packet to write into CB + +@Input uiBufferSize Size of the CB + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDevVarPDump) +#endif +static INLINE void +PVRSRVDevVarPDump(PDEVVAR psDevVar) +{ + PVR_UNREFERENCED_PARAMETER(psDevVar); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDevVarPDumpPol) +#endif +static INLINE void +PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDevVar); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDevVarPDumpCBP) +#endif +static INLINE void +PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psDevVar); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); +} +#endif /* PDUMP */ + +#if defined(__cplusplus) +} +#endif +#endif /* PVRSRV_DEVVAR_H */ diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_error.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_error.h new file mode 100644 index 000000000000..39d76e99cf5b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_error.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File pvrsrv_error.h +@Title services error enumerant +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines error codes used by any/all services modules +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PVRSRV_ERROR_H) +#define PVRSRV_ERROR_H + +/*! + ***************************************************************************** + * Error values + *****************************************************************************/ +typedef enum PVRSRV_ERROR +{ + PVRSRV_OK, +#define PVRE(x) x, +#include "pvrsrv_errors.h" +#undef PVRE + PVRSRV_ERROR_FORCE_I32 = 0x7fffffff + +} PVRSRV_ERROR; + +#endif /* !defined(PVRSRV_ERROR_H) */ diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_errors.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_errors.h new file mode 100644 index 000000000000..20bc04d77cab --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_errors.h @@ -0,0 +1,406 @@ +/*************************************************************************/ /*! +@File pvrsrv_errors.h +@Title services error codes +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines error codes used by any/all services modules +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Don't add include guards to this file! */ + +PVRE(PVRSRV_ERROR_OUT_OF_MEMORY) +PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS) +PVRE(PVRSRV_ERROR_INVALID_PARAMS) +PVRE(PVRSRV_ERROR_INIT_FAILURE) +PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK) +PVRE(PVRSRV_ERROR_INVALID_DEVICE) +PVRE(PVRSRV_ERROR_NOT_OWNER) +PVRE(PVRSRV_ERROR_BAD_MAPPING) +PVRE(PVRSRV_ERROR_TIMEOUT) +PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED) +PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS) +PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL) +PVRE(PVRSRV_ERROR_SCENE_INVALID) +PVRE(PVRSRV_ERROR_STREAM_ERROR) +PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES) +PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED) +PVRE(PVRSRV_ERROR_CMD_TOO_BIG) +PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED) +PVRE(PVRSRV_ERROR_TOOMANYBUFFERS) +PVRE(PVRSRV_ERROR_NOT_SUPPORTED) +PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED) +PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE) +PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) +PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS) +PVRE(PVRSRV_ERROR_RETRY) +PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH) +PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH) +PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH) +PVRE(PVRSRV_ERROR_BVNC_MISMATCH) +PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH) +PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG) +PVRE(PVRSRV_ERROR_INVALID_FLAGS) +PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS) +PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY) +PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR) +PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED) +PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED) +PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED) +PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR) +PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG) +PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE) +PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE) +PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP) +PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP) +PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY) +PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED) +PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED) +PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) +PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY) +PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES) +PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE) +PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED) +PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED) +PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR) +PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR) +PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE) +PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS) +PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE) +PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE) +PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH) +PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK) +PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING) +PVRE(PVRSRV_ERROR_PMR_EMPTY) +PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND) +PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED) +PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED) +PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED) +PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY) +PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP) +PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE) +PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX) +PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED) +PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT) +PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE) +PVRE(PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED) +PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA) +PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM) +PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED) +PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP) +PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE) +PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND) +PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT) +PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND) +PVRE(PVRSRV_ERROR_PCI_CALL_FAILED) +PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL) +PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE) +PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH) +PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY) +PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC) +PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL) +PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY) +PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES) +PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES) +PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES) +PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES) +PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES) +PVRE(PVRSRV_ERROR_STILL_MAPPED) +PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND) +PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT) +PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE) +PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK) +PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA) +PVRE(PVRSRV_ERROR_INVALID_DEVINFO) +PVRE(PVRSRV_ERROR_INVALID_MEMINFO) +PVRE(PVRSRV_ERROR_INVALID_MISCINFO) +PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL) +PVRE(PVRSRV_ERROR_INVALID_CONTEXT) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT) +PVRE(PVRSRV_ERROR_INVALID_HEAP) +PVRE(PVRSRV_ERROR_INVALID_KERNELINFO) +PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE) +PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE) +PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE) +PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR) +PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR) +PVRE(PVRSRV_ERROR_INVALID_HEAPINFO) +PVRE(PVRSRV_ERROR_INVALID_PERPROC) +PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO) +PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST) +PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST) +PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP) +PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE) +PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS) +PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD) +PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD) +PVRE(PVRSRV_ERROR_THREAD_READ_ERROR) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER) +PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR) +PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR) +PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED) +PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED) +PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT) +PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO) +PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT) +PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE) +PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND) +PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL) +PVRE(PVRSRV_ERROR_FLIP_FAILED) +PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED) +PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE) +PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB) +PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED) +PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG) +PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG) +PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG) +PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID) +PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED) +PVRE(PVRSRV_ERROR_SUBMIT_NEEDED) +PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE) +PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL) +PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW) +PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE) +PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES) +PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED) +PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR) +PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND) +PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED) +PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND) +PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED) +PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED) +PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED) +PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED) +PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE) +PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND) +PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE) +PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH) +PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE) +PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE) +PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND) +PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND) +PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND) +PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE) +PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK) +PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK) +PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK) +PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER) +PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER) +PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER) +PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT) +PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE) +PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED) +PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH) +PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE) +PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE) +PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND) +PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE) +PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE) +PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE) +PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE) +PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE) +PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE) +PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED) +PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP) +PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE) +PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE) +PVRE(PVRSRV_ERROR_INVALID_DEVICEID) +PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND) +PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED) +PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED) +PVRE(PVRSRV_ERROR_COPY_TEST_FAILED) +PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED) +PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK) +PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED) +PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE) +PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE) +PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE) +PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK) +PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK) +PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK) +PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK) +PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR) +PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE) +PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE) +PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) +PVRE(PVRSRV_ERROR_BAD_SYNC_STATE) +PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID) +PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION) +PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION) +PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE) +PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID) +PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG) +PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM) +PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP) +PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT) +PVRE(PVRSRV_ERROR_BP_NOT_SET) +PVRE(PVRSRV_ERROR_BP_ALREADY_SET) +PVRE(PVRSRV_ERROR_FEATURE_DISABLED) +PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED) +PVRE(PVRSRV_ERROR_REG_CONFIG_FULL) +PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE) +PVRE(PVRSRV_ERROR_MEMORY_ACCESS) +PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER) +PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG) +PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT) +PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT) +PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS) +PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM) +PVRE(PVRSRV_ERROR_DC_INVALID_SCALE) +PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM) +PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES) +PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA) +PVRE(PVRSRV_ERROR_NOT_READY) +PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE) +PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT) +PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT) +PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE) +PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS) +PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE) +PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER) +PVRE(PVRSRV_ERROR_NOT_FOUND) +PVRE(PVRSRV_ERROR_ALREADY_OPEN) +PVRE(PVRSRV_ERROR_STREAM_MISUSE) +PVRE(PVRSRV_ERROR_STREAM_FULL) +PVRE(PVRSRV_ERROR_STREAM_READLIMIT_REACHED) +PVRE(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE) +PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED) +PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX) +PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN) +PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG) +PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED) +PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED) +PVRE(PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL) +PVRE(PVRSRV_ERROR_INIT_TDFWMEM_PAGES_FAIL) +PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL) +PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL) +PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED) +PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED) +PVRE(PVRSRV_ERROR_ALREADY_EXISTS) +PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE) +PVRE(PVRSRV_ERROR_TASK_FAILED) +PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) +PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR) +PVRE(PVRSRV_ERROR_INVALID_OFFSET) +PVRE(PVRSRV_ERROR_CCCB_STALLED) +PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE) +PVRE(PVRSRV_ERROR_NOT_ENABLED) +PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL) +PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH) +PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED) +PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL) +PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX) +PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT) +PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED) +PVRE(PVRSRV_ERROR_UNABLE_TO_COMPILE_PDS) +PVRE(PVRSRV_ERROR_INTERNAL_ERROR) +PVRE(PVRSRV_ERROR_BRIDGE_EFAULT) +PVRE(PVRSRV_ERROR_BRIDGE_EINVAL) +PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM) +PVRE(PVRSRV_ERROR_BRIDGE_ERANGE) +PVRE(PVRSRV_ERROR_BRIDGE_EPERM) +PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY) +PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) +PVRE(PVRSRV_ERROR_PROBE_DEFER) +PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT) +PVRE(PVRSRV_ERROR_CLOSE_FAILED) +PVRE(PVRSRV_ERROR_NOT_INITIALISED) +PVRE(PVRSRV_ERROR_CONVERSION_FAILED) +PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) +PVRE(PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL) +PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED) +PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED) +PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED) +PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED) +PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS) +PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE) +PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT) +PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED) +PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED) +PVRE(PVRSRV_ERROR_SIGNAL_FAILED) +PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM) +PVRE(PVRSRV_ERROR_INVALID_SPU_MASK) +PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED) +PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG) +PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED) +PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE) +PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT) +PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID) +PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE) +PVRE(PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG) +PVRE(PVRSRV_ERROR_INTERRUPTED) +PVRE(PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) +PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN) +PVRE(PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF) +PVRE(PVRSRV_ERROR_MULTIPLE_SECURITY_PDUMPS) +PVRE(PVRSRV_ERROR_BAD_PARAM_SIZE) +PVRE(PVRSRV_ERROR_INVALID_REQUEST) +PVRE(PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES) +PVRE(PVRSRV_ERROR_TEST_FAILED) +PVRE(PVRSRV_ERROR_SYNC_PRIM_OP_NOT_SUPPORTED) +PVRE(PVRSRV_ERROR_FAILED_TO_GET_VIRT_ADDR) +PVRE(PVRSRV_ERROR_UNABLE_TO_FREE_RESOURCE) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_SEMAPHORE) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_SEMAPHORE) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_SEMAPHORE) +PVRE(PVRSRV_ERROR_TOO_MANY_SYNCS) +PVRE(PVRSRV_ERROR_ION_NO_CLIENT) +PVRE(PVRSRV_ERROR_ION_FAILED_TO_ALLOC) diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_memallocflags.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_memallocflags.h new file mode 100644 index 000000000000..d5fb9a67e280 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_memallocflags.h @@ -0,0 +1,947 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This file defines flags used on memory allocations and mappings + These flags are relevant throughout the memory management + software stack and are specified by users of services and + understood by all levels of the memory management in both + client and server. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_MEMALLOCFLAGS_H +#define PVRSRV_MEMALLOCFLAGS_H + +#include "img_types.h" +#include "rgx_memallocflags.h" + +/*! + Type for specifying memory allocation flags. + */ +#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_SECURITY_VALIDATION) +typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; +#define PVRSRV_MEMALLOCFLAGS_FMTSPEC IMG_UINT64_FMTSPECx +#else +typedef IMG_UINT32 PVRSRV_MEMALLOCFLAGS_T; +#define PVRSRV_MEMALLOCFLAGS_FMTSPEC "x" +#endif + + +/* + * --- MAPPING FLAGS 0..14 (15-bits) --- + * | 0-3 | 4-7 | 8-10 | 11-13 | 14 | + * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable | + * + * --- MISC FLAGS 15..23 (9-bits) --- + * | 15 | 16 | 17 | 18 | 19 | 20 | 21-22 | 23-25 | + * | Defer | CPU-Local | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | Sparse-Zero-Page | Fw-Alloc-Type | OS-id | + * + * --- DEV CONTROL FLAGS 26..27 (2-bits) --- + * | 26-27 | + * | Device-Flags | + * + * --- MISC FLAGS 28..31 (4-bits) --- + * | 28 | 29 | 30 | 31 | + * | No-Cache-Align | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc | + * + * --- VALIDATION FLAGS --- + * | 32 | 33 | 34 | + * | Secure-FW-code | Secure-FW-data | Secure-buffer | + * + */ + +/* + * ********************************************************** + * * * + * * MAPPING FLAGS * + * * * + * ********************************************************** + */ + +/*! + * This flag affects the device MMU protection flags, and specifies + * that the memory may be read by the GPU. + * + * Typically all device memory allocations would specify this flag. + * + * At the moment, memory allocations without this flag are not supported + * + * This flag will live with the PMR, thus subsequent mappings would + * honour this flag. + * + * This is a dual purpose flag. It specifies that memory is permitted + * to be read by the GPU, and also requests that the allocation is + * mapped into the GPU as a readable mapping + * + * To be clear: + * - When used as an argument on PMR creation; it specifies + * that GPU readable mappings will be _permitted_ + * - When used as an argument to a "map" function: it specifies + * that a GPU readable mapping is _desired_ + * - When used as an argument to "AllocDeviceMem": it specifies + * that the PMR will be created with permission to be mapped + * with a GPU readable mapping, _and_ that this PMR will be + * mapped with a GPU readable mapping. + * This distinction becomes important when (a) we export allocations; + * and (b) when we separate the creation of the PMR from the mapping. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1U<<0) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0) + +/*! + * This flag affects the device MMU protection flags, and specifies + * that the memory may be written by the GPU + * + * Using this flag on an allocation signifies that the allocation is + * intended to be written by the GPU. + * + * Omitting this flag causes a read-only mapping. + * + * This flag will live with the PMR, thus subsequent mappings would + * honour this flag. + * + * This is a dual purpose flag. It specifies that memory is permitted + * to be written by the GPU, and also requests that the allocation is + * mapped into the GPU as a writable mapping (see note above about + * permission vs. mapping mode, and why this flag causes permissions + * to be inferred from mapping mode on first allocation) + * + * N.B. This flag has no relevance to the CPU's MMU mapping, if any, + * and would therefore not enforce read-only mapping on CPU. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1U<<1) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0) + +/*! + The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1U<<2) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0) + +/*! + The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1U<<3) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0) + +/*! + The flag indicates that an allocation is mapped as readable to the CPU. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1U<<4) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0) + +/*! + The flag indicates that an allocation is mapped as writable to the CPU. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1U<<5) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0) + +/*! + The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1U<<6) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0) + +/*! + The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1U<<7) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0) + + +/* + * ********************************************************** + * * * + * * CACHE CONTROL FLAGS * + * * * + * ********************************************************** + */ + +/* + GPU domain + ========== + + The following defines are used to control the GPU cache bit field. + The defines are mutually exclusive. + + A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU + cache bit field from the flags. This should be used whenever the GPU cache + mode needs to be determined. +*/ + +/*! + GPU domain. Flag indicating uncached memory. This means that any writes to memory + allocated with this flag are written straight to memory and thus are + coherent for any device in the system. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (1U<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED) + +/*! + GPU domain. Use write combiner (if supported) to combine sequential writes + together to reduce memory access by doing burst writes. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE (0U<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE) + +/*! + GPU domain. This flag affects the GPU MMU protection flags. + The allocation will be cached. + Services will try to set the coherent bit in the GPU MMU tables so the + GPU cache is snooping the CPU cache. If coherency is not supported the + caller is responsible to ensure the caches are up to date. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2U<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) + +/*! + GPU domain. Request cached memory, but not coherent (i.e. no cache + snooping). Services will flush the GPU internal caches after every GPU + task so no cache maintenance requests from the users are necessary. + + Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future + expansion. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3U<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) + +/*! + GPU domain. This flag is for internal use only and is used to indicate + that the underlying allocation should be cached on the GPU after all + the snooping and coherent checks have been done +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7U<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_CACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED) + +/*! + GPU domain. GPU cache mode mask. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7U<<8) + +/*! + @Description A helper macro to obtain just the GPU cache bit field from the flags. + This should be used whenever the GPU cache mode needs to be determined. + @Input uiFlags Allocation flags. + @Return Value of the GPU cache bit field. + */ +#define PVRSRV_GPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) + + +/* + CPU domain + ========== + + The following defines are used to control the CPU cache bit field. + The defines are mutually exclusive. + + A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU + cache bit field from the flags. This should be used whenever the CPU cache + mode needs to be determined. +*/ + +/*! + CPU domain. Request uncached memory. This means that any writes to memory + allocated with this flag are written straight to memory and thus are + coherent for any device in the system. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (1U<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) + +/*! + CPU domain. Use write combiner (if supported) to combine sequential writes + together to reduce memory access by doing burst writes. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE (0U<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE) + +/*! + CPU domain. This flag affects the CPU MMU protection flags. + The allocation will be cached. + Services will try to set the coherent bit in the CPU MMU tables so the + CPU cache is snooping the GPU cache. If coherency is not supported the + caller is responsible to ensure the caches are up to date. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2U<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) + +/*! + CPU domain. Request cached memory, but not coherent (i.e. no cache + snooping). This means that if the allocation needs to transition from + one device to another services has to be informed so it can + flush/invalidate the appropriate caches. + + Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future + expansion. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3U<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) + +/*! + CPU domain. This flag is for internal use only and is used to indicate + that the underlying allocation should be cached on the CPU + after all the snooping and coherent checks have been done +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7U<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED) + +/*! + CPU domain. CPU cache mode mask +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7U<<11) + +/*! + @Description A helper macro to obtain just the CPU cache bit field from the flags. + This should be used whenever the CPU cache mode needs to be determined. + @Input uiFlags Allocation flags. + @Return Value of the CPU cache bit field. + */ +#define PVRSRV_CPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) + +/* Helper flags for usual cases */ + +/*! + * Memory will be uncached on CPU and GPU + */ +#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED) + +/*! + * Memory will be write-combined on CPU and GPU + */ +#define PVRSRV_MEMALLOCFLAG_WRITE_COMBINE (PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_WRITE_COMBINE mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_WRITE_COMBINE) + +/*! + * Memory will be cached. + * Services will try to set the correct flags in the MMU tables. + * In case there is no coherency support the caller has to ensure caches are up to date */ +#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_COHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT) + +/*! + * Memory will be cache-incoherent on CPU and GPU + */ +#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT) + +/*! + Cache mode mask +*/ +#define PVRSRV_CACHE_MODE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags)) + + +/*! + CPU MMU Flags mask -- intended for use internal to services only + */ +#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) + +/*! + MMU Flags mask -- intended for use internal to services only - used for + partitioning the flags bits and determining which flags to pass down to + mmu_common.c + */ +#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) + +/*! + Indicates that the PMR created due to this allocation will support + in-kernel CPU mappings. Only privileged processes may use this flag as + it may cause wastage of precious kernel virtual memory on some platforms. + */ +#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1U<<14) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0) + + + +/* + * + * ********************************************************** + * * * + * * ALLOC MEMORY FLAGS * + * * * + * ********************************************************** + * + * (Bits 15) + * + */ +#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1U<<15) +#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0) + +/*! + Indicates that the allocation will primarily be accessed by the CPU, so + a UMA allocation (if available) is preferable. If not set, the allocation + will primarily be accessed by the GPU, so LMA allocation (if available) + is preferable. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_LOCAL (1U<<16) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_LOCAL(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) != 0) + +/*! + Indicates that the allocation will be accessed by the CPU and GPU using + the same virtual address, i.e. for all SVM allocs, + IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR + */ +#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (1U<<17) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0) + +/*! + Indicates the particular memory that's being allocated is sparse and the + sparse regions should not be backed by dummy page +*/ +#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (1U << 18) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0) + +/*! + Services is going to clean the cache for the allocated memory. + For performance reasons avoid usage if allocation is written to by the + CPU anyway before the next GPU kick. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (1U<<19) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0) + +/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING + + Indicates the particular memory that's being allocated is sparse and the + sparse regions should be backed by zero page. This is different with + zero on alloc flag such that only physically unbacked pages are backed + by zero page at the time of mapping. + The zero backed page is always with read only attribute irrespective of its + original attributes. +*/ +#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING (1U << 20) +#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \ + PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) + +/*! + Determines if and how an allocation will be accessed by the firmware. + */ +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_SHIFT (21) +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_MASK (3U << PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_SHIFT) + +/*! + @Description Macro extracting the fw allocation type from a variable containing memalloc flags + @Input uiFlags Allocation flags + @Return returns the value of the FW_ALLOC_TYPE bitfield + */ +#define PVRSRV_FW_ALLOC_TYPE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_MASK) \ + >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_SHIFT) + +/*! This value indicates that the allocation does not require firmware access */ +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_NO_FW_ACCESS (0U << PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_SHIFT) + +/*! This value indicates that the allocation is from the firmware's Main heap */ +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_MAIN (1U << PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_SHIFT) + +/*! This value indicates that the allocation is from the firmware's Config heap */ +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG (2U << PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_SHIFT) + +/*! This value indicates that the allocation is from the firmware's Raw heap */ +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_RAW (3U << PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_SHIFT) + +/*! + @Description Macro checking if firmware access to the allocation is possible + @Input uiFlags Allocation flags + @Return True if the allocation is firmware local (i.e. fw main, config or raw) + */ +#define PVRSRV_CHECK_FW_LOCAL(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_MASK) > \ + PVRSRV_MEMALLOCFLAG_FW_ALLOC_NO_FW_ACCESS) + +/*! + * The privileged OSID 0 driver (i.e. host/primary) can map allocations + * on behalf of an unprivileged drivers (i.e. OSIDs 1 up to 7). + * This value indicates which OSid this allocation is for. + */ +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT (23) +#define PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK (7U << 23) + +/*! + @Description Macro extracting the OS id from a variable containing memalloc flags + @Input uiFlags Allocation flags + @Return returns the value of the FW_ALLOC_OSID bitfield + */ +#define PVRSRV_FW_RAW_ALLOC_OSID(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ + >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) + +/*! + @Description Macro converting an OS id value into a memalloc bitfield + @Input uiFlags OS id + @Return returns a shifted bitfield with the OS id value + */ +#define PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(osid) (((osid) << PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) \ + & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ + +/* + * + * ********************************************************** + * * * + * * MEMORY ZEROING AND POISONING FLAGS * + * * * + * ********************************************************** + * + * Zero / Poison, on alloc/free + * + * We think the following usecases are required: + * + * don't poison or zero on alloc or free + * (normal operation, also most efficient) + * poison on alloc + * (for helping to highlight bugs) + * poison on alloc and free + * (for helping to highlight bugs) + * zero on alloc + * (avoid highlighting security issues in other uses of memory) + * zero on alloc and poison on free + * (avoid highlighting security issues in other uses of memory, while + * helping to highlight a subset of bugs e.g. memory freed prematurely) + * + * Since there are more than 4, we can't encode this in just two bits, + * so we might as well have a separate flag for each of the three + * actions. + */ + +/*! + Ensures that the memory allocated is initialised with zeroes. + */ +#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1U<<31) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0) + +/*! + Scribbles over the allocated memory with a poison value + + Not compatible with ZERO_ON_ALLOC + + Poisoning is very deliberately _not_ reflected in PDump as we want + a simulation to cry loudly if the initialised data propagates to a + result. + */ +#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1U<<30) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0) + +/*! + Causes memory to be trashed when freed, as a lazy man's security measure. + */ +#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1U<<29) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0) + +/*! + Avoid address alignment to a CPU or GPU cache line size. + */ +#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN (1U<<28) + +/*! + @Description Macro checking whether the PVRSRV_CHECK_NO_CACHE_LINE_ALIGN flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN) != 0) + + +/* + * + * ********************************************************** + * * * + * * Device specific MMU flags * + * * * + * ********************************************************** + * + * (Bits 26 to 27) + * + * Some services controlled devices have device specific control bits in + * their page table entries, we need to allow these flags to be passed down + * the memory management layers so the user can control these bits. + * For example, RGX device has the file rgx_memallocflags.h + */ + +/*! + * Offset of device specific MMU flags. + */ +#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 26 + +/*! + * Mask for retrieving device specific MMU flags. + */ +#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK 0x0f000000UL + +/*! + @Description Helper macro for setting device specific MMU flags. + @Input n Flag index. + @Return Flag vector with the specified bit set. + */ +#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \ + (((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \ + PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) + + +#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_SECURITY_VALIDATION) +/* + * + * ********************************************************** + * * * + * * Validation only flags * + * * * + * ********************************************************** + * + * (Bits 32-34) + * + */ + +/*! + PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_CODE + */ +#define PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_CODE (1ULL<<32) +#define PVRSRV_CHECK_SECURE_FW_CODE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_CODE) != 0) + +/*! + PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_DATA + */ +#define PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_DATA (1ULL<<33) +#define PVRSRV_CHECK_SECURE_FW_DATA(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_DATA) != 0) + +/*! + PVRSRV_MEMALLOCFLAG_VAL_SECURE_BUFFER + */ +#define PVRSRV_MEMALLOCFLAG_VAL_SECURE_BUFFER (1ULL<<34) +#define PVRSRV_CHECK_SECURE_BUFFER(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SECURE_BUFFER) != 0) + +#endif + + +/*! + * Secure buffer mask -- Flags in the mask are allowed for secure buffers + * because they are not related to CPU mappings. + */ +#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ + PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ + PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) + + + +/*! + PMR flags mask -- for internal services use only. This is the set of flags + that will be passed down and stored with the PMR, this also includes the + MMU flags which the PMR has to pass down to mm_common.c at PMRMap time. +*/ +#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_SECURITY_VALIDATION) +#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ + PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ + PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_MASK | \ + PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_LOCAL | \ + PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_CODE | \ + PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_DATA | \ + PVRSRV_MEMALLOCFLAG_VAL_SECURE_BUFFER) +#else +#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ + PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ + PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_FW_ALLOC_TYPE_MASK | \ + PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_LOCAL) +#endif + + +/*! + RA differentiation mask + + for use internal to services + + this is the set of flags bits that are able to determine whether a pair of + allocations are permitted to live in the same page table. Allocations + whose flags differ in any of these places would be allocated from separate + RA Imports and therefore would never coexist in the same page. + Special cases are zeroing and poisoning of memory. The caller is responsible + to set the sub-allocations to the value he wants it to be. To differentiate + between zeroed and poisoned RA Imports does not make sense because the + memory might be reused. + +*/ +#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ + & \ + ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) + +/*! + Flags that affect _allocation_ +*/ +#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU) + +/*! + Flags that affect _mapping_ +*/ +#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ + PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) + +#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0) +#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK +#endif + + +/*! + Flags that affect _physical allocations_ in the DevMemX API + */ +#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ + PVRSRV_MEMALLOCFLAG_CPU_LOCAL | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) + +/*! + Flags that affect _virtual allocations_ in the DevMemX API + */ +#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) + +#endif /* PVRSRV_MEMALLOCFLAGS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_sync_km.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_sync_km.h new file mode 100644 index 000000000000..04611f9f7cee --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_sync_km.h @@ -0,0 +1,65 @@ +/*************************************************************************/ /*! +@File +@Title PVR synchronisation interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Types for server side code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef PVRSRV_SYNC_KM_H +#define PVRSRV_SYNC_KM_H + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define SYNC_FB_FILE_STRING_MAX 256 +#define SYNC_FB_MODULE_STRING_LEN_MAX (32) +#define SYNC_FB_DESC_STRING_LEN_MAX (32) + +/* By default, fence-sync module emits into HWPerf (of course, if enabled) and + * considers a process (sleepable) context */ +#define PVRSRV_FENCE_FLAG_NONE (0U) +#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0) +#define PVRSRV_FENCE_FLAG_CTX_ATOMIC (1U << 1) + +#if defined(__cplusplus) +} +#endif +#endif /* PVRSRV_SYNC_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_tlcommon.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_tlcommon.h new file mode 100644 index 000000000000..9c7dfd19581d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_tlcommon.h @@ -0,0 +1,261 @@ +/*************************************************************************/ /*! +@File +@Title Services Transport Layer common types and definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Transport layer common types and definitions included into + both user mode and kernel mode source. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef PVR_TLCOMMON_H +#define PVR_TLCOMMON_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" + + +/*! Handle type for stream descriptor objects as created by this API */ +typedef IMG_HANDLE PVRSRVTL_SD; + +/*! Maximum stream name length including the null byte */ +#define PRVSRVTL_MAX_STREAM_NAME_SIZE 40U + +/*! Maximum number of streams expected to exist */ +#define PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER (32*PRVSRVTL_MAX_STREAM_NAME_SIZE) + +/*! Packet lengths are always rounded up to a multiple of 8 bytes */ +#define PVRSRVTL_PACKET_ALIGNMENT 8U +#define PVRSRVTL_ALIGN(x) (((x)+PVRSRVTL_PACKET_ALIGNMENT-1) & ~(PVRSRVTL_PACKET_ALIGNMENT-1)) + + +/*! A packet is made up of a header structure followed by the data bytes. + * There are 3 types of packet: normal (has data), data lost and padding, + * see packet flags. Header kept small to reduce data overhead. + * + * if the ORDER of the structure members is changed, please UPDATE the + * PVRSRVTL_PACKET_FLAG_OFFSET macro. + * + * Layout of uiTypeSize member is : + * + * |<---------------------------32-bits------------------------------>| + * |<----8---->|<-----1----->|<----7--->|<------------16------------->| + * | Type | Drop-Oldest | UNUSED | Size | + * + */ +typedef struct +{ + IMG_UINT32 uiTypeSize; /*!< Type, Drop-Oldest flag & number of bytes following header */ + IMG_UINT32 uiReserved; /*!< Reserve, packets and data must be 8 byte aligned */ + + /* First bytes of TL packet data follow header ... */ +} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR; + +/* Structure must always be a size multiple of 8 as stream buffer + * still an array of IMG_UINT32s. + */ +static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8"); + +/*! Packet header reserved word fingerprint "TLP1" */ +#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U + +/*! Packet header mask used to extract the size from the uiTypeSize member. + * Do not use directly, see GET macros. + */ +#define PVRSRVTL_PACKETHDR_SIZE_MASK 0x0000FFFFU +#define PVRSRVTL_MAX_PACKET_SIZE (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU) + + +/*! Packet header mask used to extract the type from the uiTypeSize member. + * Do not use directly, see GET macros. + */ +#define PVRSRVTL_PACKETHDR_TYPE_MASK 0xFF000000U +#define PVRSRVTL_PACKETHDR_TYPE_OFFSET 24U + +/*! Packet header mask used to check if packets before this one were dropped + * or not. Do not use directly, see GET macros. + */ +#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK 0x00800000U +#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET 23U + +/*! Packet type enumeration. + */ +typedef enum +{ + /*! Undefined packet */ + PVRSRVTL_PACKETTYPE_UNDEF = 0, + + /*! Normal packet type. Indicates data follows the header. + */ + PVRSRVTL_PACKETTYPE_DATA = 1, + + /*! When seen this packet type indicates that at this moment in the stream + * packet(s) were not able to be accepted due to space constraints and + * that recent data may be lost - depends on how the producer handles the + * error. Such packets have no data, data length is 0. + */ + PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED = 2, + + /*! Packets with this type set are padding packets that contain undefined + * data and must be ignored/skipped by the client. They are used when the + * circular stream buffer wraps around and there is not enough space for + * the data at the end of the buffer. Such packets have a length of 0 or + * more. + */ + PVRSRVTL_PACKETTYPE_PADDING = 3, + + /*! This packet type conveys to the stream consumer that the stream + * producer has reached the end of data for that data sequence. The + * TLDaemon has several options for processing these packets that can + * be selected on a per stream basis. + */ + PVRSRVTL_PACKETTYPE_MARKER_EOS = 4, + + /*! This is same as PVRSRVTL_PACKETTYPE_MARKER_EOS but additionally removes + * old data record output file before opening new/next one + */ + PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD = 5, + + /*! Packet emitted on first stream opened by writer. Packet carries a name + * of the opened stream in a form of null-terminated string. + */ + PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE = 6, + + /*! Packet emitted on last stream closed by writer. Packet carries a name + * of the closed stream in a form of null-terminated string. + */ + PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE = 7, + + PVRSRVTL_PACKETTYPE_LAST +} PVRSRVTL_PACKETTYPE; + +/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared: + * uiFlags is the upper half of a structure consisting of 2 uint16 quantities. + */ +#define PVRSRVTL_SET_PACKET_DATA(len) (len) | (PVRSRVTL_PACKETTYPE_DATA << PVRSRVTL_PACKETHDR_TYPE_OFFSET) +#define PVRSRVTL_SET_PACKET_PADDING(len) (len) | (PVRSRVTL_PACKETTYPE_PADDING << PVRSRVTL_PACKETHDR_TYPE_OFFSET) +#define PVRSRVTL_SET_PACKET_WRITE_FAILED (0) | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET) +#define PVRSRVTL_SET_PACKET_HDR(len, type) (len) | ((type) << PVRSRVTL_PACKETHDR_TYPE_OFFSET) + +/*! Returns the number of bytes of data in the packet. + * p may be any address type. + */ +#define GET_PACKET_DATA_LEN(p) \ + ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR) (void *) (p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) + + +/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */ +#define GET_PACKET_DATA_PTR(p) \ + (((IMG_UINT8 *) (void *) (p)) + sizeof(PVRSRVTL_PACKETHDR)) + +/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type. + */ +#define GET_PACKET_HDR(p) ((PVRSRVTL_PPACKETHDR) ((void *) (p))) + +/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack + * It is up to the caller to determine if the new address is within the + * packet buffer. + */ +#define GET_NEXT_PACKET_ADDR(p) \ + GET_PACKET_HDR( \ + GET_PACKET_DATA_PTR(p) + \ + ( \ + (GET_PACKET_DATA_LEN(p) + (PVRSRVTL_PACKET_ALIGNMENT-1)) & \ + (~(PVRSRVTL_PACKET_ALIGNMENT-1)) \ + ) \ + ) + +/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR. + */ +#define GET_PACKET_TYPE(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET) + +/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize. + * p is of type PVRSRVTL_PPACKETHDR. + */ +#define SET_PACKETS_DROPPED(p) (((p)->uiTypeSize) | (1<uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET) + +/*! Flags for use with PVRSRVTLOpenStream + * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available + * 0x02 - When the stream does not exist wait for a bit (2s) in + * PVRSRVTLOpenStream() and then exit with a timeout error if it still + * does not exist. + * 0x04 - Open stream for write only operations. + * If flag is not used stream is opened as read-only. This flag is + * required if one wants to call reserve/commit/write function on the + * stream descriptor. Read from on the stream descriptor opened + * with this flag will fail. + * 0x08 - Disable Producer Callback. + * If this flag is set and the stream becomes empty, do not call any + * associated producer callback to generate more data from the reader + * context. + * 0x10 - Reset stream on open. + * When this flag is used the stream will drop all of the stored data. + * 0x20 - Limit read position to the write position at time the stream + * was opened. Hence this flag will freeze the content read to that + * produced before the stream was opened for reading. + * 0x40 - Ignore Open Callback. + * When this flag is set ignore any OnReaderOpenCallback setting for + * the stream. This allows access to the stream to be made without + * generating any extra packets into the stream. + */ + +#define PVRSRV_STREAM_FLAG_NONE (0U) +#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING (1U<<0) +#define PVRSRV_STREAM_FLAG_OPEN_WAIT (1U<<1) +#define PVRSRV_STREAM_FLAG_OPEN_WO (1U<<2) +#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK (1U<<3) +#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN (1U<<4) +#define PVRSRV_STREAM_FLAG_READ_LIMIT (1U<<5) +#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK (1U<<6) + + +#if defined(__cplusplus) +} +#endif + +#endif /* PVR_TLCOMMON_H */ +/****************************************************************************** + End of file (pvrsrv_tlcommon.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/pvrsrv_tlstreams.h b/drivers/mcst/gpu-imgtec/include/pvrsrv_tlstreams.h new file mode 100644 index 000000000000..9064075ad5c0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrsrv_tlstreams.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Services Transport Layer stream names +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Transport layer common types and definitions included into + both user mode and kernel mode source. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_TLSTREAMS_H +#define PVRSRV_TLSTREAMS_H + +#define PVRSRV_TL_CTLR_STREAM "tlctrl" + +#define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf_fw_" +#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_" + +/* Host HWPerf client stream names are of the form 'hwperf_client_' */ +#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_" +#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u" + +#endif /* PVRSRV_TLSTREAMS_H */ + +/****************************************************************************** + End of file (pvrsrv_tlstreams.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/pvrversion.h b/drivers/mcst/gpu-imgtec/include/pvrversion.h new file mode 100644 index 000000000000..fedefdee5376 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/pvrversion.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File pvrversion.h +@Title PowerVR version numbers and strings. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Version numbers and strings for PowerVR components. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRVERSION_H +#define PVRVERSION_H + +#define PVRVERSION_MAJ 1U +#define PVRVERSION_MIN 13U + +#define PVRVERSION_FAMILY "rogueddk" +#define PVRVERSION_BRANCHNAME "1.13" +#define PVRVERSION_BUILD 5824814 +#define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS" + +#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.13@5824814" +#define PVRVERSION_STRING_SHORT "1.13@5824814" + +#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." + +#define PVRVERSION_BUILD_HI 582 +#define PVRVERSION_BUILD_LO 4814 +#define PVRVERSION_STRING_NUMERIC "1.13.582.4814" + +#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U)) +#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU) +#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU) + +#endif /* PVRVERSION_H */ diff --git a/drivers/mcst/gpu-imgtec/include/rgx_heap_firmware.h b/drivers/mcst/gpu-imgtec/include/rgx_heap_firmware.h new file mode 100644 index 000000000000..bd9d636fdc68 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rgx_heap_firmware.h @@ -0,0 +1,131 @@ +/*************************************************************************/ /*! +@File +@Title RGX FW heap definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_HEAP_FIRMWARE_H) +#define RGX_HEAP_FIRMWARE_H + +/* Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h) + * NOTE: + * The firmware heaps bases and sizes are defined here to + * simplify #include dependencies, see rgxheapconfig.h + * for the full RGX virtual address space layout. + */ + +/* + * The Config heap holds initialisation data shared between the + * the driver and firmware (e.g. pointers to the KCCB and FWCCB). + * The Main Firmware heap size is adjusted accordingly but most + * of the map / unmap functions must take into consideration + * the entire range (i.e. main and config heap). + */ + +#if !defined(RGX_FW_HEAP_SHIFT) +#define RGX_FW_HEAP_SHIFT (25) +#endif + +#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS (2) +#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT +#define RGX_FIRMWARE_RAW_HEAP_BASE (0xE1C0000000ULL) +#define RGX_FIRMWARE_RAW_HEAP_SIZE (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT) + +#if defined(SUPPORT_MIPS_64K_PAGE_SIZE) +#if defined(PDUMP) +/* PDUMP drivers allocate each structure from the Config heap in a different PMR. + * Ensure the heap can hold 3 PMRs of 64KB */ +#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (IMG_UINT32_C(0x30000)) /* 192KB */ +#else +#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (IMG_UINT32_C(0x20000)) /* 128KB */ +#endif +#else +/* regular 4KB page size system assumed */ +#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (IMG_UINT32_C(0x10000)) /* 64KB */ +#endif + +#define RGX_FIRMWARE_META_MAIN_HEAP_SIZE (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE) +/* + * MIPS FW needs space in the Main heap to map GPU memory. + * This space is taken from the MAIN heap, to avoid creating a new heap. + */ +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL (IMG_UINT32_C(0x100000)) /* 1MB */ +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 (IMG_UINT32_C(0x400000)) /* 4MB */ + +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_NORMAL (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ + RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL) + +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_BRN65101 (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ + RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101) + +#if !defined(__KERNEL__) +#if defined(FIX_HW_BRN_65101) +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_BRN65101 + +#include "img_defs.h" +static_assert((RGX_FIRMWARE_RAW_HEAP_SIZE) >= IMG_UINT32_C(0x800000), "MIPS GPU map size cannot be increased due to BRN65101 with a small FW heap"); + +#else +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_NORMAL +#endif +#endif /* !defined(__KERNEL__) */ + +/* Host sub-heap order: MAIN + CONFIG */ +#define RGX_FIRMWARE_HOST_MAIN_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE +#define RGX_FIRMWARE_HOST_CONFIG_HEAP_BASE (RGX_FIRMWARE_HOST_MAIN_HEAP_BASE + \ + RGX_FIRMWARE_RAW_HEAP_SIZE - \ + RGX_FIRMWARE_CONFIG_HEAP_SIZE) + +/* Guest sub-heap order: CONFIG + MAIN */ +#define RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE +#define RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE (RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE + \ + RGX_FIRMWARE_CONFIG_HEAP_SIZE) + +/* + * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and + * the minimum is 4MiB (1<<22); the default firmware heap size is set to + * maximum 32MiB. + */ +#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25) +#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]" +#endif + +#endif /* RGX_HEAP_FIRMWARE_H */ diff --git a/drivers/mcst/gpu-imgtec/include/rgx_memallocflags.h b/drivers/mcst/gpu-imgtec/include/rgx_memallocflags.h new file mode 100644 index 000000000000..b45ccd4baba5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rgx_memallocflags.h @@ -0,0 +1,49 @@ +/**************************************************************************/ /*! +@File +@Title RGX memory allocation flags +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_MEMALLOCFLAGS_H +#define RGX_MEMALLOCFLAGS_H + +#define PMMETA_PROTECT (1U << 0) /* Memory that only the PM and Meta can access */ +#define FIRMWARE_CACHED (1U << 1) /* Memory that is cached in META/MIPS */ + +#endif diff --git a/drivers/mcst/gpu-imgtec/include/rgx_meta.h b/drivers/mcst/gpu-imgtec/include/rgx_meta.h new file mode 100644 index 000000000000..e88d21308fa8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rgx_meta.h @@ -0,0 +1,385 @@ +/*************************************************************************/ /*! +@File +@Title RGX META definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX META helper definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_META_H) +#define RGX_META_H + + +/***** The META HW register definitions in the file are updated manually *****/ + + +#include "img_defs.h" +#include "km/rgxdefs_km.h" + + +/****************************************************************************** +* META registers and MACROS +******************************************************************************/ +#define META_CR_CTRLREG_BASE(T) (0x04800000U + (0x1000U*(T))) + +#define META_CR_TXPRIVEXT (0x048000E8) +#define META_CR_TXPRIVEXT_MINIM_EN (IMG_UINT32_C(0x1) << 7) + +#define META_CR_SYSC_JTAG_THREAD (0x04830030) +#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004) + +#define META_CR_PERF_COUNT0 (0x0480FFE0) +#define META_CR_PERF_COUNT1 (0x0480FFE8) +#define META_CR_PERF_COUNT_CTRL_SHIFT (28) +#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000) +#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (IMG_UINT32_C(0x8) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (IMG_UINT32_C(0x9) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICACHEMISS (IMG_UINT32_C(0xA) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICORE (IMG_UINT32_C(0xD) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_THR_SHIFT (24) +#define META_CR_PERF_COUNT_THR_MASK (0x0F000000) +#define META_CR_PERF_COUNT_THR_0 (IMG_UINT32_C(0x1) << META_CR_PERF_COUNT_THR_SHIFT) +#define META_CR_PERF_COUNT_THR_1 (IMG_UINT32_C(0x2) << META_CR_PERF_COUNT_THR_1) + +#define META_CR_TxVECINT_BHALT (0x04820500) +#define META_CR_PERF_ICORE0 (0x0480FFD0) +#define META_CR_PERF_ICORE1 (0x0480FFD8) +#define META_CR_PERF_ICORE_DCACHEMISS (0x8) + +#define META_CR_PERF_COUNT(CTRL, THR) ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \ + (THR << META_CR_PERF_COUNT_THR_SHIFT)) + +#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U) +#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U) + +#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U) /* Poll for done */ +#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U) /* Set for read */ +#define META_CR_TXUXXRXRQ_TX_S (12) +#define META_CR_TXUXXRXRQ_RX_S (4) +#define META_CR_TXUXXRXRQ_UXX_S (0) + +#define META_CR_TXUIN_ID (0x0) /* Internal ctrl regs */ +#define META_CR_TXUD0_ID (0x1) /* Data unit regs */ +#define META_CR_TXUD1_ID (0x2) /* Data unit regs */ +#define META_CR_TXUA0_ID (0x3) /* Address unit regs */ +#define META_CR_TXUA1_ID (0x4) /* Address unit regs */ +#define META_CR_TXUPC_ID (0x5) /* PC registers */ + +/* Macros to calculate register access values */ +#define META_CR_CORE_REG(Thr, RegNum, Unit) (((IMG_UINT32)(Thr) << META_CR_TXUXXRXRQ_TX_S) | \ + ((IMG_UINT32)(RegNum) << META_CR_TXUXXRXRQ_RX_S) | \ + ((IMG_UINT32)(Unit) << META_CR_TXUXXRXRQ_UXX_S)) + +#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID) +#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID) +#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID) + +#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID) +#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID) +#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID) + +#define SP_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID) +#define PC_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID) + +#define META_CR_COREREG_ENABLE (0x0000000U) +#define META_CR_COREREG_STATUS (0x0000010U) +#define META_CR_COREREG_DEFR (0x00000A0U) +#define META_CR_COREREG_PRIVEXT (0x00000E8U) + +#define META_CR_T0ENABLE_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE) +#define META_CR_T0STATUS_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS) +#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR) +#define META_CR_T0PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT) + +#define META_CR_T1ENABLE_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE) +#define META_CR_T1STATUS_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS) +#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR) +#define META_CR_T1PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT) + +#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */ +#define META_CR_TXSTATUS_PRIV (0x00020000U) +#define META_CR_TXPRIVEXT_MINIM (0x00000080U) + +#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U) + +#define META_CR_TXCLKCTRL (0x048000B0) +#define META_CR_TXCLKCTRL_ALL_ON (0x55111111) +#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222) + + +/****************************************************************************** +* META LDR Format +******************************************************************************/ +/* Block header structure */ +typedef struct +{ + IMG_UINT32 ui32DevID; + IMG_UINT32 ui32SLCode; + IMG_UINT32 ui32SLData; + IMG_UINT16 ui16PLCtrl; + IMG_UINT16 ui16CRC; + +} RGX_META_LDR_BLOCK_HDR; + +/* High level data stream block structure */ +typedef struct +{ + IMG_UINT16 ui16Cmd; + IMG_UINT16 ui16Length; + IMG_UINT32 ui32Next; + IMG_UINT32 aui32CmdData[4]; + +} RGX_META_LDR_L1_DATA_BLK; + +/* High level data stream block structure */ +typedef struct +{ + IMG_UINT16 ui16Tag; + IMG_UINT16 ui16Length; + IMG_UINT32 aui32BlockData[4]; + +} RGX_META_LDR_L2_DATA_BLK; + +/* Config command structure */ +typedef struct +{ + IMG_UINT32 ui32Type; + IMG_UINT32 aui32BlockData[4]; + +} RGX_META_LDR_CFG_BLK; + +/* Block type definitions */ +#define RGX_META_LDR_COMMENT_TYPE_MASK (0x0010U) +#define RGX_META_LDR_BLK_IS_COMMENT(X) ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0U) + +/* Command definitions + * Value Name Description + * 0 LoadMem Load memory with binary data. + * 1 LoadCore Load a set of core registers. + * 2 LoadMMReg Load a set of memory mapped registers. + * 3 StartThreads Set each thread PC and SP, then enable threads. + * 4 ZeroMem Zeros a memory region. + * 5 Config Perform a configuration command. + */ +#define RGX_META_LDR_CMD_MASK (0x000FU) + +#define RGX_META_LDR_CMD_LOADMEM (0x0000U) +#define RGX_META_LDR_CMD_LOADCORE (0x0001U) +#define RGX_META_LDR_CMD_LOADMMREG (0x0002U) +#define RGX_META_LDR_CMD_START_THREADS (0x0003U) +#define RGX_META_LDR_CMD_ZEROMEM (0x0004U) +#define RGX_META_LDR_CMD_CONFIG (0x0005U) + +/* Config Command definitions + * Value Name Description + * 0 Pause Pause for x times 100 instructions + * 1 Read Read a value from register - No value return needed. + * Utilises effects of issuing reads to certain registers + * 2 Write Write to mem location + * 3 MemSet Set mem to value + * 4 MemCheck check mem for specific value. + */ +#define RGX_META_LDR_CFG_PAUSE (0x0000) +#define RGX_META_LDR_CFG_READ (0x0001) +#define RGX_META_LDR_CFG_WRITE (0x0002) +#define RGX_META_LDR_CFG_MEMSET (0x0003) +#define RGX_META_LDR_CFG_MEMCHECK (0x0004) + + +/****************************************************************************** +* RGX FW segmented MMU definitions +******************************************************************************/ +/* All threads can access the segment */ +#define RGXFW_SEGMMU_ALLTHRS (IMG_UINT32_C(0xf) << 8U) +/* Writable */ +#define RGXFW_SEGMMU_WRITEABLE (0x1U << 1U) +/* All threads can access and writable */ +#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE) + +/* Direct map region 10 used for mapping GPU memory - max 8MB */ +#define RGXFW_SEGMMU_DMAP_GPU_ID (10U) +#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U) +#define RGXFW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U) + +/* Segment IDs */ +#define RGXFW_SEGMMU_DATA_ID (1U) +#define RGXFW_SEGMMU_BOOTLDR_ID (2U) +#define RGXFW_SEGMMU_TEXT_ID (RGXFW_SEGMMU_BOOTLDR_ID) + +/* + * SLC caching strategy in S7 and volcanic is emitted through the segment MMU. + * All the segments configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are + * CACHED in the SLC. + * The interface has been kept the same to simplify the code changes. + * The bifdm argument is ignored (no longer relevant) in S7 and volcanic. + */ +#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) ((((IMG_UINT64) ((pers) & 0x3)) << 52) | \ + (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 44) | \ + (((IMG_UINT64) ((slc_policy) & 0x1)) << 40)) +#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3, 0x0, mmu_ctx) +#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0, 0x1, mmu_ctx) + +/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten + * accesses through this segment + */ +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) (((IMG_UINT64)((IMG_UINT64)(pc) & 0xFU) << 44U) | \ + ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U)) + +#define RGXFW_SEGMMU_META_BIFDM_ID (0x7U) +#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) +#if defined(RGX_FEATURE_SLC_VIVT) +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED +#define RGXFW_SEGMMU_OUTADDR_TOP_META RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED +#else +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC +#define RGXFW_SEGMMU_OUTADDR_TOP_META(pc) RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, RGXFW_SEGMMU_META_BIFDM_ID) +#endif +#endif + +/* META segments have 4kB minimum size */ +#define RGXFW_SEGMMU_ALIGN (0x1000U) + +/* Segmented MMU registers (n = segment id) */ +#define META_CR_MMCU_SEGMENTn_BASE(n) (0x04850000U + ((n)*0x10U)) +#define META_CR_MMCU_SEGMENTn_LIMIT(n) (0x04850004U + ((n)*0x10U)) +#define META_CR_MMCU_SEGMENTn_OUTA0(n) (0x04850008U + ((n)*0x10U)) +#define META_CR_MMCU_SEGMENTn_OUTA1(n) (0x0485000CU + ((n)*0x10U)) + +/* The following defines must be recalculated if the Meta MMU segments used + * to access Host-FW data are changed + * Current combinations are: + * - SLC uncached, META cached, FW base address 0x70000000 + * - SLC uncached, META uncached, FW base address 0xF0000000 + * - SLC cached, META cached, FW base address 0x10000000 + * - SLC cached, META uncached, FW base address 0x90000000 + */ +#define RGXFW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U) +#define RGXFW_SEGMMU_DATA_META_CACHED (0x0U) +#define RGXFW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000 +#define RGXFW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT) +/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in + * the PTEs for the FW data, not in the Meta Segment MMU, which means these + * defines have no real effect in those cases + */ +#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U) +#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U) +#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U) + + +#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META) +#error "SECURE_FW_CODE_OSID is not supported on META cores" +#endif + + +/****************************************************************************** +* RGX FW Bootloader defaults +******************************************************************************/ +#define RGXFW_BOOTLDR_META_ADDR (0x40000000U) +#define RGXFW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U) +#define RGXFW_BOOTLDR_DEVV_ADDR_1 (0x000000E1) +#define RGXFW_BOOTLDR_DEVV_ADDR ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0) +#define RGXFW_BOOTLDR_LIMIT (0x1FFFF000) +#define RGXFW_MAX_BOOTLDR_OFFSET (0x1000) + +/* Bootloader configuration offset is in dwords (512 bytes) */ +#define RGXFW_BOOTLDR_CONF_OFFSET (0x80) + + +/****************************************************************************** +* RGX META Stack +******************************************************************************/ +#define RGX_META_STACK_SIZE (0x1000U) + +/****************************************************************************** + RGX META Core memory +******************************************************************************/ +/* code and data both map to the same physical memory */ +#define RGX_META_COREMEM_CODE_ADDR (0x80000000U) +#define RGX_META_COREMEM_DATA_ADDR (0x82000000U) +#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU) + +#if defined(__KERNEL__) +#define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B)))) +#define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B)))) +#endif + +/****************************************************************************** +* 2nd thread +******************************************************************************/ +#define RGXFW_THR1_PC (0x18930000) +#define RGXFW_THR1_SP (0x78890000) + +/****************************************************************************** +* META compatibility +******************************************************************************/ + +#define META_CR_CORE_ID (0x04831000) +#define META_CR_CORE_ID_VER_SHIFT (16U) +#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU) + +#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) + + #if (RGX_FEATURE_META == MTP218) + #define RGX_CR_META_CORE_ID_VALUE 0x19 + #elif (RGX_FEATURE_META == MTP219) + #define RGX_CR_META_CORE_ID_VALUE 0x1E + #elif (RGX_FEATURE_META == LTP218) + #define RGX_CR_META_CORE_ID_VALUE 0x1C + #elif (RGX_FEATURE_META == LTP217) + #define RGX_CR_META_CORE_ID_VALUE 0x1F + #else + #error "Unknown META ID" + #endif +#else + + #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19 + #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E + #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C + #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F + +#endif +#define RGXFW_PROCESSOR_META "META" + + +#endif /* RGX_META_H */ + +/****************************************************************************** + End of file (rgx_meta.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/rgx_mips.h b/drivers/mcst/gpu-imgtec/include/rgx_mips.h new file mode 100644 index 000000000000..a7cf42e7968b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rgx_mips.h @@ -0,0 +1,376 @@ +/*************************************************************************/ /*! +@File rgx_mips.h +@Title +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Platform RGX +@Description RGX MIPS definitions, kernel/user space +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_MIPS_H) +#define RGX_MIPS_H + +/* + * Utility defines for memory management + */ +#define RGXMIPSFW_LOG2_PAGE_SIZE_4K (12) +#define RGXMIPSFW_PAGE_SIZE_4K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4K) +#define RGXMIPSFW_PAGE_MASK_4K (RGXMIPSFW_PAGE_SIZE_4K - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_64K (16) +#define RGXMIPSFW_PAGE_SIZE_64K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_64K) +#define RGXMIPSFW_PAGE_MASK_64K (RGXMIPSFW_PAGE_SIZE_64K - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_256K (18) +#define RGXMIPSFW_PAGE_SIZE_256K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_256K) +#define RGXMIPSFW_PAGE_MASK_256K (RGXMIPSFW_PAGE_SIZE_256K - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_1MB (20) +#define RGXMIPSFW_PAGE_SIZE_1MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_1MB) +#define RGXMIPSFW_PAGE_MASK_1MB (RGXMIPSFW_PAGE_SIZE_1MB - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_4MB (22) +#define RGXMIPSFW_PAGE_SIZE_4MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4MB) +#define RGXMIPSFW_PAGE_MASK_4MB (RGXMIPSFW_PAGE_SIZE_4MB - 1) +#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE (2) +/* log2 page table sizes dependent on FW heap size and page size (for each OS) */ +#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_4K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) +#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_64K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) +/* Maximum number of page table pages (both Host and MIPS pages) */ +#define RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES (4) +/* Total number of TLB entries */ +#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES (16) +/* "Uncached" caching policy */ +#define RGXMIPSFW_UNCACHED_CACHE_POLICY (0X00000002) +/* "Write-back write-allocate" caching policy */ +#define RGXMIPSFW_WRITEBACK_CACHE_POLICY (0X00000003) +/* "Write-through no write-allocate" caching policy */ +#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY (0X00000001) +/* Cached policy used by MIPS in case of physical bus on 32 bit */ +#define RGXMIPSFW_CACHED_POLICY (RGXMIPSFW_WRITEBACK_CACHE_POLICY) +/* Cached policy used by MIPS in case of physical bus on more than 32 bit */ +#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY) +/* Total number of Remap entries */ +#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES) + + +/* + * MIPS EntryLo/PTE format + */ + +#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U) +#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF) +#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000) + +#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U) +#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF) +#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000) + +/* Page Frame Number */ +#define RGXMIPSFW_ENTRYLO_PFN_SHIFT (6) +#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12) +/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */ +#define RGXMIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0) +#define RGXMIPSFW_ENTRYLO_PFN_SIZE (20) +/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */ +#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0) +#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24) +#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \ + RGXMIPSFW_ENTRYLO_PFN_SHIFT) + +#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U) +#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7) + +#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT (2U) +#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB) +#define RGXMIPSFW_ENTRYLO_DIRTY_EN (0X00000004) + +#define RGXMIPSFW_ENTRYLO_VALID_SHIFT (1U) +#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD) +#define RGXMIPSFW_ENTRYLO_VALID_EN (0X00000002) + +#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT (0U) +#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE) +#define RGXMIPSFW_ENTRYLO_GLOBAL_EN (0X00000001) + +#define RGXMIPSFW_ENTRYLO_DVG (RGXMIPSFW_ENTRYLO_DIRTY_EN | \ + RGXMIPSFW_ENTRYLO_VALID_EN | \ + RGXMIPSFW_ENTRYLO_GLOBAL_EN) +#define RGXMIPSFW_ENTRYLO_UNCACHED (RGXMIPSFW_UNCACHED_CACHE_POLICY << \ + RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT) +#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED) + + +/* Remap Range Config Addr Out */ +/* These defines refer to the upper half of the Remap Range Config register */ +#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0) +#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register */ +#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12) +#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \ + RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT) + +#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2) +#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID) +#elif defined(SECURE_FW_CODE_OSID) +#define MIPS_FW_CODE_OSID (1U) +#endif + + +/* + * Pages to trampoline problematic physical addresses: + * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000 + * - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000 + * - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000 + * - (benign trampoline) : 0x1FC0_3000 + * that would otherwise be erroneously remapped by the MIPS wrapper + * (see "Firmware virtual layout and remap configuration" section below) + */ + +#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2) +#define RGXMIPSFW_TRAMPOLINE_NUMPAGES (1 << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES) +#define RGXMIPSFW_TRAMPOLINE_SIZE (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE_4K) +#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE_4K) + +#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) +#define RGXMIPSFW_TRAMPOLINE_OFFSET(a) (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) + +#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1<> 2) +#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR 9 +/* Use only when Coprocessor Unusable exception */ +#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28) & 0x3) +#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10) +#define RGXMIPSFW_C0_CAUSE_FDCIPENDING (1 << 21) +#define RGXMIPSFW_C0_CAUSE_IV (1 << 23) +#define RGXMIPSFW_C0_CAUSE_IC (1 << 25) +#define RGXMIPSFW_C0_CAUSE_PCIPENDING (1 << 26) +#define RGXMIPSFW_C0_CAUSE_TIPENDING (1 << 30) +#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY (1 << 31) + +/* Macros to decode C0_Debug register */ +#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10) & 0x1f) +#define RGXMIPSFW_C0_DEBUG_DSS (1 << 0) +#define RGXMIPSFW_C0_DEBUG_DBP (1 << 1) +#define RGXMIPSFW_C0_DEBUG_DDBL (1 << 2) +#define RGXMIPSFW_C0_DEBUG_DDBS (1 << 3) +#define RGXMIPSFW_C0_DEBUG_DIB (1 << 4) +#define RGXMIPSFW_C0_DEBUG_DINT (1 << 5) +#define RGXMIPSFW_C0_DEBUG_DIBIMPR (1 << 6) +#define RGXMIPSFW_C0_DEBUG_DDBLIMPR (1 << 18) +#define RGXMIPSFW_C0_DEBUG_DDBSIMPR (1 << 19) +#define RGXMIPSFW_C0_DEBUG_IEXI (1 << 20) +#define RGXMIPSFW_C0_DEBUG_DBUSEP (1 << 21) +#define RGXMIPSFW_C0_DEBUG_CACHEEP (1 << 22) +#define RGXMIPSFW_C0_DEBUG_MCHECKP (1 << 23) +#define RGXMIPSFW_C0_DEBUG_IBUSEP (1 << 24) +#define RGXMIPSFW_C0_DEBUG_DM (1 << 30) +#define RGXMIPSFW_C0_DEBUG_DBD (1 << 31) + +/* Macros to decode TLB entries */ +#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK) (((PAGE_MASK) >> 13) & 0XFFFFU) +#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK) ((((PAGE_MASK) | 0x1FFF) + 1) >> 11) /* page size in KB */ +#define RGXMIPSFW_TLB_GET_PAGE_MASK(PAGE_SIZE) ((((PAGE_SIZE) << 11) - 1) & ~0x7FF) /* page size in KB */ +#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI) ((ENTRY_HI) >> 13) +#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO) (((ENTRY_LO) >> 3) & 0x7U) +#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO) (((ENTRY_LO) >> 6) & 0XFFFFFU) +/* GET_PA uses a non-standard PFN mask for 36 bit addresses */ +#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO) (((IMG_UINT64)(ENTRY_LO) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6) +#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO) (((ENTRY_LO) >> 30) & 0x3U) +#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO) ((ENTRY_LO) & 0x7U) +#define RGXMIPSFW_TLB_GLOBAL (1U) +#define RGXMIPSFW_TLB_VALID (1U << 1) +#define RGXMIPSFW_TLB_DIRTY (1U << 2) +#define RGXMIPSFW_TLB_XI (1U << 30) +#define RGXMIPSFW_TLB_RI (1U << 31) + +#define RGXMIPSFW_REMAP_GET_REGION_SIZE(REGION_SIZE_ENCODING) (1 << ((REGION_SIZE_ENCODING + 1) << 1)) + +typedef struct { + IMG_UINT32 ui32TLBPageMask; + IMG_UINT32 ui32TLBHi; + IMG_UINT32 ui32TLBLo0; + IMG_UINT32 ui32TLBLo1; +} RGX_MIPS_TLB_ENTRY; + +typedef struct { + IMG_UINT32 ui32RemapAddrIn; /* always 4k aligned */ + IMG_UINT32 ui32RemapAddrOut; /* always 4k aligned */ + IMG_UINT32 ui32RemapRegionSize; +} RGX_MIPS_REMAP_ENTRY; + +typedef struct { + IMG_UINT32 ui32ErrorState; /* This must come first in the structure */ + IMG_UINT32 ui32ErrorEPC; + IMG_UINT32 ui32StatusRegister; + IMG_UINT32 ui32CauseRegister; + IMG_UINT32 ui32BadRegister; + IMG_UINT32 ui32EPC; + IMG_UINT32 ui32SP; + IMG_UINT32 ui32Debug; + IMG_UINT32 ui32DEPC; + IMG_UINT32 ui32BadInstr; + IMG_UINT32 ui32UnmappedAddress; + RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]; + RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES]; +} RGX_MIPS_STATE; + +#endif /* RGXMIPSFW_ASSEMBLY_CODE */ + +#endif /* RGX_MIPS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/rgx_riscv.h b/drivers/mcst/gpu-imgtec/include/rgx_riscv.h new file mode 100644 index 000000000000..2e7fc6490878 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rgx_riscv.h @@ -0,0 +1,106 @@ +/*************************************************************************/ /*! +@File rgx_riscv.h +@Title +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Platform RGX +@Description RGX RISCV definitions, kernel/user space +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_RISCV_H) +#define RGX_RISCV_H + +#include "km/rgxdefs_km.h" + + +/* Utility defines to convert regions to virtual addresses and remaps */ +#define RGXRISCVFW_GET_REGION_BASE(r) IMG_UINT32_C((r) << 28) +#define RGXRISCVFW_GET_REGION(a) IMG_UINT32_C((a) >> 28) +#define RGXRISCVFW_MAX_REGION_SIZE IMG_UINT32_C(1 << 28) +#define RGXRISCVFW_GET_REMAP(r) (TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG + ((r) * 8U)) + +/* RISCV remap output is aligned to 4K */ +#define RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN (0x1000U) + +/* + * FW bootloader defines + */ +#define RGXRISCVFW_BOOTLDR_CODE_REGION IMG_UINT32_C(0xC) +#define RGXRISCVFW_BOOTLDR_DATA_REGION IMG_UINT32_C(0x5) +#define RGXRISCVFW_BOOTLDR_CODE_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_CODE_REGION)) +#define RGXRISCVFW_BOOTLDR_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_DATA_REGION)) +#define RGXRISCVFW_BOOTLDR_CODE_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_CODE_REGION)) +#define RGXRISCVFW_BOOTLDR_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_DATA_REGION)) + +/* Bootloader data offset in dwords from the beginning of the FW data allocation */ +#define RGXRISCVFW_BOOTLDR_CONF_OFFSET (0x0) + + +/* + * Host-FW shared data defines + */ +#define RGXRISCVFW_SHARED_CACHED_DATA_REGION (0x6U) +#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION (0xDU) +#define RGXRISCVFW_SHARED_CACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) +#define RGXRISCVFW_SHARED_UNCACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) +#define RGXRISCVFW_SHARED_CACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) +#define RGXRISCVFW_SHARED_UNCACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) + + +/* The things that follow are excluded when compiling assembly sources */ +#if !defined(RGXRISCVFW_ASSEMBLY_CODE) +#include "img_types.h" + +#define RGXFW_PROCESSOR_RISCV "RISCV" +#define RGXRISCVFW_CORE_ID_VALUE (0x00450B01U) + +typedef struct +{ + IMG_UINT64 ui64CorememCodeDevVAddr; + IMG_UINT64 ui64CorememDataDevVAddr; + IMG_UINT32 ui32CorememCodeFWAddr; + IMG_UINT32 ui32CorememDataFWAddr; + IMG_UINT32 ui32CorememCodeSize; + IMG_UINT32 ui32CorememDataSize; + IMG_UINT32 ui32Flags; + IMG_UINT32 ui32Reserved; +} RGXRISCVFW_BOOT_DATA; + +#endif /* RGXRISCVFW_ASSEMBLY_CODE */ + +#endif /* RGX_RISCV_H */ diff --git a/drivers/mcst/gpu-imgtec/include/ri_typedefs.h b/drivers/mcst/gpu-imgtec/include/ri_typedefs.h new file mode 100644 index 000000000000..77be10e2ab03 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/ri_typedefs.h @@ -0,0 +1,52 @@ +/*************************************************************************/ /*! +@File +@Title Resource Information (RI) Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Client side part of RI management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RI_TYPEDEFS_H +#define RI_TYPEDEFS_H + +#include "img_types.h" + +typedef struct RI_SUBLIST_ENTRY RI_ENTRY; +typedef RI_ENTRY* RI_HANDLE; + +#endif /* #ifndef RI_TYPEDEFS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/rogue/rgx_common.h b/drivers/mcst/gpu-imgtec/include/rogue/rgx_common.h new file mode 100644 index 000000000000..324b21e5e63c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rogue/rgx_common.h @@ -0,0 +1,225 @@ +/*************************************************************************/ /*! +@File +@Title RGX Common Types and Defines Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Common types and definitions for RGX software +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_COMMON_H +#define RGX_COMMON_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" + +/* Included to get the BVNC_KM_N defined and other feature defs */ +#include "km/rgxdefs_km.h" + +/*! This macro represents a mask of LSBs that must be zero on data structure + * sizes and offsets to ensure they are 8-byte granular on types shared between + * the FW and host driver */ +#define RGX_FW_ALIGNMENT_LSB (7U) + +/*! Macro to test structure size alignment */ +#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \ + static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0U, \ + "Size of " #_a " is not properly aligned") + +/*! Macro to test structure member alignment */ +#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \ + static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0U, \ + "Offset of " #_a "." #_b " is not properly aligned") + + +/* The following enum assumes only one of RGX_FEATURE_TLA or + * RGX_FEATURE_FASTRENDER_DM feature is present. + * In case this is no more true, fail build to fix code. */ +#if defined(RGX_FEATURE_TLA) && defined(RGX_FEATURE_FASTRENDER_DM) +#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!" +#endif + +/*! The master definition for data masters known to the firmware of RGX. + * When a new DM is added to this list, relevant entry should be added to + * RGX_HWPERF_DM enum list. + * The DM in a V1 HWPerf packet uses this definition. */ + +typedef IMG_UINT32 RGXFWIF_DM; + +#define RGXFWIF_DM_GP IMG_UINT32_C(0) +/* Either TDM or 2D DM is present. The above build time error is present to verify this */ +#define RGXFWIF_DM_2D IMG_UINT32_C(1) /* when RGX_FEATURE_TLA defined */ +#define RGXFWIF_DM_TDM IMG_UINT32_C(1) /* when RGX_FEATURE_FASTRENDER_DM defined */ + +#define RGXFWIF_DM_GEOM IMG_UINT32_C(2) +#define RGXFWIF_DM_3D IMG_UINT32_C(3) +#define RGXFWIF_DM_CDM IMG_UINT32_C(4) + +#define RGXFWIF_DM_LAST RGXFWIF_DM_CDM + +typedef enum _RGX_KICK_TYPE_DM_ +{ + RGX_KICK_TYPE_DM_GP = 0x001, + RGX_KICK_TYPE_DM_TDM_2D = 0x002, + RGX_KICK_TYPE_DM_TA = 0x004, + RGX_KICK_TYPE_DM_3D = 0x008, + RGX_KICK_TYPE_DM_CDM = 0x010, + RGX_KICK_TYPE_DM_RTU = 0x020, + RGX_KICK_TYPE_DM_SHG = 0x040, + RGX_KICK_TYPE_DM_TQ2D = 0x080, + RGX_KICK_TYPE_DM_TQ3D = 0x100, + RGX_KICK_TYPE_DM_LAST = 0x200 +} RGX_KICK_TYPE_DM; + +/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */ +#define RGXFWIF_DM_DEFAULT_MAX (RGXFWIF_DM_LAST + 1U) + +/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM*/ +#define RGXFWIF_DM_MAX (5U) +#define RGXFWIF_HWDM_MAX (RGXFWIF_DM_MAX) + +/* Min/Max number of HW DMs (all but GP) */ +#if defined(RGX_FEATURE_TLA) +#define RGXFWIF_HWDM_MIN (1U) +#else +#if defined(RGX_FEATURE_FASTRENDER_DM) +#define RGXFWIF_HWDM_MIN (1U) +#else +#define RGXFWIF_HWDM_MIN (2U) +#endif +#endif + +/* + * Data Master Tags to be appended to resources created on behalf of each RGX + * Context. + */ +#define RGX_RI_DM_TAG_KS 'K' +#define RGX_RI_DM_TAG_CDM 'C' +#define RGX_RI_DM_TAG_RC 'R' /* To be removed once TA/3D Timelines are split */ +#define RGX_RI_DM_TAG_TA 'V' +#define RGX_RI_DM_TAG_3D 'P' +#define RGX_RI_DM_TAG_TDM 'T' +#define RGX_RI_DM_TAG_TQ2D '2' +#define RGX_RI_DM_TAG_TQ3D 'Q' + +/* + * Client API Tags to be appended to resources created on behalf of each + * Client API. + */ +#define RGX_RI_CLIENT_API_GLES1 '1' +#define RGX_RI_CLIENT_API_GLES3 '3' +#define RGX_RI_CLIENT_API_VULKAN 'V' +#define RGX_RI_CLIENT_API_EGL 'E' +#define RGX_RI_CLIENT_API_OPENCL 'C' +#define RGX_RI_CLIENT_API_OPENGL 'G' +#define RGX_RI_CLIENT_API_SERVICES 'S' +#define RGX_RI_CLIENT_API_WSEGL 'W' +#define RGX_RI_CLIENT_API_ANDROID 'A' +#define RGX_RI_CLIENT_API_LWS 'L' + +/* + * Format a RI annotation for a given RGX Data Master context + */ +#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do \ + { \ + (annotation)[0] = (dmTag); \ + (annotation)[1] = (clientAPI); \ + (annotation)[2] = '\0'; \ + } while (false) + +/*! + ****************************************************************************** + * RGXFW Compiler alignment definitions + *****************************************************************************/ +#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) +#define RGXFW_ALIGN __attribute__ ((aligned (8))) +#elif defined(_MSC_VER) +#define RGXFW_ALIGN __declspec(align(8)) +#pragma warning (disable : 4324) +#else +#error "Align MACROS need to be defined for this compiler" +#endif + +/*! + ****************************************************************************** + * Force 8-byte alignment for structures allocated uncached. + *****************************************************************************/ +#define UNCACHED_ALIGN RGXFW_ALIGN + + +/*! + ****************************************************************************** + * GPU Utilisation states + *****************************************************************************/ +#define RGXFWIF_GPU_UTIL_STATE_IDLE (0U) +#define RGXFWIF_GPU_UTIL_STATE_ACTIVE (1U) +#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (2U) +#define RGXFWIF_GPU_UTIL_STATE_NUM (3U) +#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003) + + +/* + * Use of the 32-bit context property flags mask + * ( X = taken/in use, - = available/unused ) + * + * 0 + * | + * -------------------------------x + */ + /* + * Context creation flags + * (specify a context's properties at creation time) + */ +#define RGX_CONTEXT_FLAG_DISABLESLR (1UL << 0) /*!< Disable SLR */ + +/* List of attributes that may be set for a context */ +typedef enum _RGX_CONTEXT_PROPERTY_ +{ + RGX_CONTEXT_PROPERTY_FLAGS = 0, /*!< Context flags */ +} RGX_CONTEXT_PROPERTY; + +#if defined(__cplusplus) +} +#endif + +#endif /* RGX_COMMON_H */ + +/****************************************************************************** + End of file +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_alignchecks.h b/drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_alignchecks.h new file mode 100644 index 000000000000..a5431fb0fe45 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_alignchecks.h @@ -0,0 +1,192 @@ +/*************************************************************************/ /*! +@File +@Title RGX fw interface alignment checks +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Checks to avoid disalignment in RGX fw data structures + shared with the host +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_ALIGNCHECKS_H) +#define RGX_FWIF_ALIGNCHECKS_H + +/* for the offsetof macro */ +#if defined(__KERNEL__) && defined(LINUX) +#include +#else +#include +#endif + +/*! + ****************************************************************************** + * Alignment UM/FW checks array + *****************************************************************************/ + +#define RGXFW_ALIGN_CHECKS_UM_MAX 128U + +#define RGXFW_ALIGN_CHECKS_INIT0 \ + sizeof(RGXFWIF_TRACEBUF), \ + offsetof(RGXFWIF_TRACEBUF, ui32LogType), \ + offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \ + offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \ + offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \ + \ + sizeof(RGXFWIF_SYSDATA), \ + offsetof(RGXFWIF_SYSDATA, ePowState), \ + offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \ + offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \ + offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \ + offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \ + \ + sizeof(RGXFWIF_OSDATA), \ + offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \ + offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \ + \ + sizeof(RGXFWIF_HWRINFOBUF), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \ + \ + /* RGXFWIF_CMDTA checks */ \ + sizeof(RGXFWIF_CMDTA), \ + offsetof(RGXFWIF_CMDTA, sGeomRegs), \ + \ + /* RGXFWIF_CMD3D checks */ \ + sizeof(RGXFWIF_CMD3D), \ + offsetof(RGXFWIF_CMD3D, s3DRegs), \ + \ + /* RGXFWIF_CMDTRANSFER checks */ \ + sizeof(RGXFWIF_CMDTRANSFER), \ + offsetof(RGXFWIF_CMDTRANSFER, sTransRegs), \ + \ + \ + /* RGXFWIF_CMD_COMPUTE checks */ \ + sizeof(RGXFWIF_CMD_COMPUTE), \ + offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \ + \ + /* RGXFWIF_FREELIST checks */ \ + sizeof(RGXFWIF_FREELIST), \ + offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr), \ + offsetof(RGXFWIF_FREELIST, ui32MaxPages), \ + offsetof(RGXFWIF_FREELIST, ui32CurrentPages), \ + \ + /* RGXFWIF_HWRTDATA checks */ \ + sizeof(RGXFWIF_HWRTDATA), \ + offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), \ + offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \ + offsetof(RGXFWIF_HWRTDATA, apsFreeLists), \ + offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \ + offsetof(RGXFWIF_HWRTDATA, eState), \ + \ + /* RGXFWIF_HWRTDATA_COMMON checks */ \ + sizeof(RGXFWIF_HWRTDATA_COMMON), \ + offsetof(RGXFWIF_HWRTDATA_COMMON, bTACachesNeedZeroing),\ + \ + /* RGXFWIF_HWPERF_CTL_BLK checks */ \ + sizeof(RGXFWIF_HWPERF_CTL_BLK), \ + offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \ + \ + /* RGXFWIF_HWPERF_CTL checks */ \ + sizeof(RGXFWIF_HWPERF_CTL), \ + offsetof(RGXFWIF_HWPERF_CTL, SelCntr) + +#if defined(RGX_FEATURE_TLA) +#define RGXFW_ALIGN_CHECKS_INIT1 \ + RGXFW_ALIGN_CHECKS_INIT0, \ + /* RGXFWIF_CMD2D checks */ \ + sizeof(RGXFWIF_CMD2D), \ + offsetof(RGXFWIF_CMD2D, s2DRegs) +#else +#define RGXFW_ALIGN_CHECKS_INIT1 RGXFW_ALIGN_CHECKS_INIT0 +#endif /* RGX_FEATURE_TLA */ + + +#if defined(RGX_FEATURE_FASTRENDER_DM) +#define RGXFW_ALIGN_CHECKS_INIT \ + RGXFW_ALIGN_CHECKS_INIT1, \ + /* RGXFWIF_CMDTDM checks */ \ + sizeof(RGXFWIF_CMDTDM), \ + offsetof(RGXFWIF_CMDTDM, sTDMRegs) +#else +#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT1 +#endif /* ! RGX_FEATURE_FASTRENDER_DM */ + + + +/*! + ****************************************************************************** + * Alignment KM checks array + *****************************************************************************/ + +#define RGXFW_ALIGN_CHECKS_INIT_KM \ + sizeof(RGXFWIF_SYSINIT), \ + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ + offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ + offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ + offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ + offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ + offsetof(RGXFWIF_SYSINIT, sFwSysData), \ + sizeof(RGXFWIF_OSINIT), \ + offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \ + offsetof(RGXFWIF_OSINIT, psKernelCCB), \ + offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \ + offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \ + offsetof(RGXFWIF_OSINIT, sFwOsData), \ + offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \ + \ + /* RGXFWIF_FWRENDERCONTEXT checks */ \ + sizeof(RGXFWIF_FWRENDERCONTEXT), \ + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \ + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \ + \ + sizeof(RGXFWIF_FWCOMMONCONTEXT), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \ + \ + sizeof(RGXFWIF_MMUCACHEDATA), \ + offsetof(RGXFWIF_MMUCACHEDATA, ui32Flags), \ + offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \ + offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue) + +#endif /* RGX_FWIF_ALIGNCHECKS_H */ + +/****************************************************************************** + End of file (rgx_fwif_alignchecks.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_shared.h b/drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_shared.h new file mode 100644 index 000000000000..fc35fd869c8d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rogue/rgx_fwif_shared.h @@ -0,0 +1,245 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware interface structures +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware interface structures shared by both host client + and host server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_SHARED_H) +#define RGX_FWIF_SHARED_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_common.h" +#include "powervr/mem_types.h" + +/* Maximum number of UFOs in a CCB command. + * The number is based on having 32 sync prims (as originally), plus 32 sync + * checkpoints. + * Once the use of sync prims is no longer supported, we will retain + * the same total (64) as the number of sync checkpoints which may be + * supporting a fence is not visible to the client driver and has to + * allow for the number of different timelines involved in fence merges. + */ +#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U) + +/* + * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER) + * command passed through the bridge. + * Just across the bridge in the server, any incoming kick command size is + * checked against this maximum limit. + * In case the incoming command size is larger than the specified limit, + * the bridge call is retired with error. + */ +#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) + +#define RGXFWIF_PRBUFFER_START IMG_UINT32_C(0) +#define RGXFWIF_PRBUFFER_ZSBUFFER IMG_UINT32_C(0) +#define RGXFWIF_PRBUFFER_MSAABUFFER IMG_UINT32_C(1) +#define RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2) + +typedef struct RGXFWIF_DEV_VIRTADDR_ +{ + IMG_UINT32 ui32Addr; +} RGXFWIF_DEV_VIRTADDR; + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; + RGXFWIF_DEV_VIRTADDR pbyFWAddr; +} UNCACHED_ALIGN RGXFWIF_DMA_ADDR; + +typedef IMG_UINT8 RGXFWIF_CCCB; + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL; + + +typedef struct +{ + PRGXFWIF_UFO_ADDR puiAddrUFO; + IMG_UINT32 ui32Value; +} RGXFWIF_UFO; + +typedef struct +{ + IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */ + IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */ +} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL; + +/* + * TA and 3D commands require set of firmware addresses that are stored in the + * Kernel. Client has handle(s) to Kernel containers storing these addresses, + * instead of raw addresses. We have to patch/write these addresses in KM to + * prevent UM from controlling FW addresses directly. + * Typedefs for TA and 3D commands are shared between Client and Firmware (both + * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use + * TA|3D CMD type definitions directly. Therefore we have a SHARED block that + * is shared between UM-KM-FW across all BVNC configurations. + */ +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command, + this is used for context selection and for storing out HW-context, + when TA is switched out for continuing later */ + + RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */ + +} CMDTA3D_SHARED; + +/*! + * Client Circular Command Buffer (CCCB) control structure. + * This is shared between the Server and the Firmware and holds byte offsets + * into the CCCB as well as the wrapping mask to aid wrap around. A given + * snapshot of this queue with Cmd 1 running on the GPU might be: + * + * Roff Doff Woff + * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] + * < runnable commands >< !ready to run > + * + * Cmd 1 : Currently executing on the GPU data master. + * Cmd 2,3,4: Fence dependencies met, commands runnable. + * Cmd 5... : Fence dependency not met yet. + */ +typedef struct +{ + IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This + * must be aligned to 16 bytes. */ + IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB. + Points to the command that is + * runnable on GPU, if R!=W */ + IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset. + * Points to commands not ready, i.e. + * fence dependencies are not met. */ + IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity + * in bytes of the CCB-1 */ +} UNCACHED_ALIGN RGXFWIF_CCCB_CTL; + + +typedef IMG_UINT32 RGXFW_FREELIST_TYPE; + +#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0) +#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1) +#define RGXFW_FREELIST_TYPE_LAST RGXFW_GLOBAL_FREELIST +#define RGXFW_MAX_FREELISTS (RGXFW_FREELIST_TYPE_LAST + 1U) + + +typedef struct +{ + IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_BASE_ADDR; + IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_RESUME_ADDR; + IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; + + struct + { + IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK0; + IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK1; + IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK2; + + /* VDM resume state update controls */ + IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK0; + IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK1; + IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK2; + + IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK3; + IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK4; + + IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK3; + IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK4; + } asTAState[2]; + +} RGXFWIF_TAREGISTERS_CSWITCH; + +#define RGXFWIF_TAREGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_TAREGISTERS_CSWITCH) + +typedef struct +{ + IMG_UINT64 uCDMReg_CDM_CONTEXT_STATE_BASE_ADDR; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1; + IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS; + IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1; + + /* CDM resume controls */ + IMG_UINT64 uCDMReg_CDM_RESUME_PDS0; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B; + IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B; + +} RGXFWIF_CDM_REGISTERS_CSWITCH; + +typedef struct +{ + RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< Geom registers for ctx switch */ +} RGXFWIF_STATIC_RENDERCONTEXT_STATE; + +#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE) + +typedef struct +{ + RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */ +} RGXFWIF_STATIC_COMPUTECONTEXT_STATE; + +#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE) + +typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE; + +typedef enum +{ + RGXFWIF_PRBUFFER_UNBACKED = 0, + RGXFWIF_PRBUFFER_BACKED, + RGXFWIF_PRBUFFER_BACKING_PENDING, + RGXFWIF_PRBUFFER_UNBACKING_PENDING, +}RGXFWIF_PRBUFFER_STATE; + +typedef struct +{ + IMG_UINT32 ui32BufferID; /*!< Buffer ID*/ + IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */ + RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */ + RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */ + IMG_UINT32 ui32PRBufferFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_PRBUFFER; + + +#endif /* RGX_FWIF_SHARED_H */ + +/****************************************************************************** + End of file (rgx_fwif_shared.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/rogue/rgx_heaps.h b/drivers/mcst/gpu-imgtec/include/rogue/rgx_heaps.h new file mode 100644 index 000000000000..c923804ea426 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rogue/rgx_heaps.h @@ -0,0 +1,165 @@ +/*************************************************************************/ /*! +@File +@Title RGX heap definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_HEAPS_H) +#define RGX_HEAPS_H + +#include "km/rgxdefs_km.h" +#include "img_defs.h" +#include "log2.h" +#include "pvr_debug.h" + +/* RGX Heap IDs, note: not all heaps are available to clients */ +#define RGX_UNDEFINED_HEAP_ID (~0LU) /*!< RGX Undefined Heap ID */ +#define RGX_GENERAL_SVM_HEAP_ID 0 /*!< RGX General SVM (shared virtual memory) Heap ID */ +#define RGX_GENERAL_HEAP_ID 1 /*!< RGX General Heap ID */ +#define RGX_GENERAL_NON4K_HEAP_ID 2 /*!< RGX General none-4K Heap ID */ +#define RGX_RGNHDR_BRN_63142_HEAP_ID 3 /*!< RGX RgnHdr BRN63142 Heap ID */ +#define RGX_MMU_INIA_BRN_65273_ID 4 /*!< RGX MMU INIA Heap ID */ +#define RGX_MMU_INIB_BRN_65273_ID 5 /*!< RGX MMU INIB Heap ID */ +#define RGX_PDSCODEDATA_HEAP_ID 6 /*!< RGX PDS Code/Data Heap ID */ +#define RGX_USCCODE_HEAP_ID 7 /*!< RGX USC Code Heap ID */ +#define RGX_FIRMWARE_MAIN_HEAP_ID 8 /*!< RGX Main Firmware Heap ID */ +#define RGX_TQ3DPARAMETERS_HEAP_ID 9 /*!< RGX Firmware Heap ID */ +#define RGX_SIGNALS_HEAP_ID 10 /*!< Signals Heap ID */ +#define RGX_TDM_TPU_YUV_COEFFS_HEAP_ID 11 +#define RGX_FIRMWARE_CONFIG_HEAP_ID 12 /*!< Additional OSIDs Firmware */ +#define RGX_GUEST_FIRMWARE_RAW_HEAP_ID 13 /*!< Additional OSIDs Firmware */ +#define RGX_MAX_HEAP_ID (RGX_GUEST_FIRMWARE_RAW_HEAP_ID + RGX_NUM_OS_SUPPORTED) /*!< Max Valid Heap ID */ + +/* + Following heaps from the above HEAP IDs can have virtual address space reserved at the start + of the heap. Offsets within this reserved range are intended to be shared between RGX clients + and FW. Naming convention for these macros: Just replace the 'ID' suffix by 'RESERVED_SIZE' + in heap ID macros. + + Reserved VA space of a heap must always be multiple of RGX_HEAP_RESERVED_SIZE_GRANULARITY, + this check is validated in the DDK. Note this is only reserving "Virtual Address" space and + physical allocations (and mappings thereon) should only be done as much as required (to avoid + wastage). + Granularity has been chosen to support the max possible practically used OS page size. +*/ +#define RGX_HEAP_RESERVED_SIZE_GRANULARITY 0x10000 /* 64KB is MAX anticipated OS page size */ +#define RGX_GENERAL_HEAP_RESERVED_SIZE (1 * RGX_HEAP_RESERVED_SIZE_GRANULARITY) +#define RGX_VK_CAPT_REPLAY_BUF_HEAP_RESERVED_SIZE (0) +#define RGX_PDSCODEDATA_HEAP_RESERVED_SIZE (1 * RGX_HEAP_RESERVED_SIZE_GRANULARITY) +#define RGX_USCCODE_HEAP_RESERVED_SIZE (1 * RGX_HEAP_RESERVED_SIZE_GRANULARITY) +#define RGX_SIGNALS_HEAP_RESERVED_SIZE 0 +#define RGX_VISTEST_HEAP_RESERVED_SIZE 0 + +/* + Identify heaps by their names +*/ +#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< RGX General SVM (shared virtual memory) Heap Identifier */ +#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */ +#define RGX_VK_CAPT_REPLAY_BUF_HEAP_IDENT "Vulkan capture replay buffer" /*!< RGX vulkan capture replay buffer Heap Identifier */ +#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */ +#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142" /*!< RGX RgnHdr BRN63142 Heap Identifier */ +#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT "MMU INIA BRN65273" /*!< MMU BRN65273 Heap A Identifier */ +#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT "MMU INIB BRN65273" /*!< MMU BRN65273 Heap B Identifier */ +#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ +#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ +#define RGX_TQ3DPARAMETERS_HEAP_IDENT "TQ3DParameters" /*!< RGX TQ 3D Parameters Heap Identifier */ +#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Signals Heap Identifier */ +#define RGX_VISTEST_HEAP_IDENT "VisTest" /*!< VisTest heap */ +#define RGX_TDM_TPU_YUV_COEFFS_HEAP_IDENT "TDM TPU YUV Coeffs" +#define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */ +#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "FwConfig" /*!< RGX Config firmware Heap identifier */ +#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */ +/* + * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID + */ +#define RGX_HEAP_4KB_PAGE_SHIFT (12U) +#define RGX_HEAP_16KB_PAGE_SHIFT (14U) +#define RGX_HEAP_64KB_PAGE_SHIFT (16U) +#define RGX_HEAP_256KB_PAGE_SHIFT (18U) +#define RGX_HEAP_1MB_PAGE_SHIFT (20U) +#define RGX_HEAP_2MB_PAGE_SHIFT (21U) + +/* Takes a log2 page size parameter and calculates a suitable page size + * for the RGX heaps. Returns 0 if parameter is wrong.*/ +static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) +{ + IMG_BOOL bFound = IMG_FALSE; + + /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT, + * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/ + if (uiLog2PageSize == 0U || + (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) || + (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Provided incompatible log2 page size %u", + __func__, + uiLog2PageSize)); + PVR_ASSERT(0); + return 0; + } + + do + { + switch (uiLog2PageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + case RGX_HEAP_16KB_PAGE_SHIFT: + case RGX_HEAP_64KB_PAGE_SHIFT: + case RGX_HEAP_256KB_PAGE_SHIFT: + case RGX_HEAP_1MB_PAGE_SHIFT: + case RGX_HEAP_2MB_PAGE_SHIFT: + /* All good, RGX page size equals given page size + * => use it as default for heaps */ + bFound = IMG_TRUE; + break; + default: + /* We have to fall back to a smaller device + * page size than given page size because there + * is no exact match for any supported size. */ + uiLog2PageSize -= 1U; + break; + } + } while (!bFound); + + return uiLog2PageSize; +} + + +#endif /* RGX_HEAPS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/rogue/rgx_hwperf.h b/drivers/mcst/gpu-imgtec/include/rogue/rgx_hwperf.h new file mode 100644 index 000000000000..dfb72d32fcbf --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rogue/rgx_hwperf.h @@ -0,0 +1,1621 @@ +/*************************************************************************/ /*! +@File +@Title RGX HWPerf and Debug Types and Defines Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Common data types definitions for hardware performance API +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_HWPERF_H_ +#define RGX_HWPERF_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* These structures are used on both GPU and CPU and must be a size that is a + * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at + * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. + */ + +/****************************************************************************** + * Includes and Defines + *****************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" + +#include "rgx_common.h" +#include "pvrsrv_tlcommon.h" +#include "pvrsrv_sync_km.h" + + +#if !defined(__KERNEL__) +/* User-mode and Firmware definitions only */ + +#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) + +/* HWPerf interface assumption checks */ +static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPerf protocol definition"); + +/*! The number of indirectly addressable TPU_MSC blocks in the GPU */ +# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX(((IMG_UINT32)RGX_FEATURE_NUM_CLUSTERS >> 1), 1U) + +/*! The number of indirectly addressable USC blocks in the GPU */ +# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS) + +# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + + /*! Defines the number of performance counter blocks that are directly + * addressable in the RGX register map for S. */ +# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 1 /* JONES */ +# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) +# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 1 /* BLACKPEARL */ +# define RGX_HWPERF_PHANTOM_DUST_BLKS 2 /* TPU, TEXAS */ +# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */ +# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0 + +# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + + /*! Defines the number of performance counter blocks that are directly + * addressable in the RGX register map. */ +# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 2 /* TORNADO, TA */ +# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0 + +# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) +# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 2 /* RASTER, TEXAS */ +# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ +# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ + +# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */ + + /*! Defines the number of performance counter blocks that are + * addressable in the RGX register map for Series 6. */ +# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 3 /* TA, RASTER, HUB */ +# define RGX_HWPERF_INDIRECT_BY_PHANTOM 0 /* PHANTOM is not there in Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */ +# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 0 +# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ +# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ +# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0 + +# endif + +/*! The number of performance counters in each layout block defined for UM/FW code */ +#if defined(RGX_FEATURE_CLUSTER_GROUPING) + #define RGX_HWPERF_CNTRS_IN_BLK 6 + #else + #define RGX_HWPERF_CNTRS_IN_BLK 4 +#endif + +#endif /* #if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) */ +#else /* defined(__KERNEL__) */ +/* Kernel/server definitions - not used, hence invalid definitions */ + +# define RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC 0xFF + +# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC +# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC + +# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC +# define RGX_HWPERF_INDIRECT_BY_PHANTOM RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC +# define RGX_HWPERF_PHANTOM_NONDUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC +# define RGX_HWPERF_PHANTOM_DUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC +# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC + +# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0U + +#endif + +/*! The number of custom non-mux counter blocks supported */ +#define RGX_HWPERF_MAX_CUSTOM_BLKS 5U + +/*! The number of counters supported in each non-mux counter block */ +#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8U + + +/****************************************************************************** + * Packet Event Type Enumerations + *****************************************************************************/ + +/*! Type used to encode the event that generated the packet. + * NOTE: When this type is updated the corresponding hwperfbin2json tool + * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will + * also need updating when adding new types. + */ +typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE; + +#define RGX_HWPERF_INVALID 0x00U + +/* FW types 0x01..0x06 */ +#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE 0x01U + +#define RGX_HWPERF_FW_BGSTART 0x01U +#define RGX_HWPERF_FW_BGEND 0x02U +#define RGX_HWPERF_FW_IRQSTART 0x03U + +#define RGX_HWPERF_FW_IRQEND 0x04U +#define RGX_HWPERF_FW_DBGSTART 0x05U +#define RGX_HWPERF_FW_DBGEND 0x06U + +#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE 0x06U + +/* HW types 0x07..0x19 */ +#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE 0x07U + +#define RGX_HWPERF_HW_PMOOM_TAPAUSE 0x07U + +#define RGX_HWPERF_HW_TAKICK 0x08U +#define RGX_HWPERF_HW_TAFINISHED 0x09U +#define RGX_HWPERF_HW_3DTQKICK 0x0AU +#define RGX_HWPERF_HW_3DKICK 0x0BU +#define RGX_HWPERF_HW_3DFINISHED 0x0CU +#define RGX_HWPERF_HW_CDMKICK 0x0DU +#define RGX_HWPERF_HW_CDMFINISHED 0x0EU +#define RGX_HWPERF_HW_TLAKICK 0x0FU +#define RGX_HWPERF_HW_TLAFINISHED 0x10U +#define RGX_HWPERF_HW_3DSPMKICK 0x11U +#define RGX_HWPERF_HW_PERIODIC 0x12U +#define RGX_HWPERF_HW_RTUKICK 0x13U +#define RGX_HWPERF_HW_RTUFINISHED 0x14U +#define RGX_HWPERF_HW_SHGKICK 0x15U +#define RGX_HWPERF_HW_SHGFINISHED 0x16U +#define RGX_HWPERF_HW_3DTQFINISHED 0x17U +#define RGX_HWPERF_HW_3DSPMFINISHED 0x18U + +#define RGX_HWPERF_HW_PMOOM_TARESUME 0x19U + +/* HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */ +#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE 0x19U + + /* other types 0x1A..0x1F */ +#define RGX_HWPERF_CLKS_CHG 0x1AU +#define RGX_HWPERF_GPU_STATE_CHG 0x1BU + +/* power types 0x20..0x27 */ +#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE 0x20U +#define RGX_HWPERF_PWR_EST_REQUEST 0x20U +#define RGX_HWPERF_PWR_EST_READY 0x21U +#define RGX_HWPERF_PWR_EST_RESULT 0x22U +#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE 0x22U + +#define RGX_HWPERF_PWR_CHG 0x23U + +/* HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */ +#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE 0x28U + +#define RGX_HWPERF_HW_TDMKICK 0x28U +#define RGX_HWPERF_HW_TDMFINISHED 0x29U +#define RGX_HWPERF_HW_NULLKICK 0x2AU + +#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU + +/* context switch types 0x30..0x31 */ +#define RGX_HWPERF_CSW_START 0x30U +#define RGX_HWPERF_CSW_FINISHED 0x31U + +/* DVFS events */ +#define RGX_HWPERF_DVFS 0x32U + +/* firmware misc 0x38..0x39 */ +#define RGX_HWPERF_UFO 0x38U +#define RGX_HWPERF_FWACT 0x39U + +/* last */ +#define RGX_HWPERF_LAST_TYPE 0x3BU + +/* This enumeration must have a value that is a power of two as it is + * used in masks and a filter bit field (currently 64 bits long). + */ +#define RGX_HWPERF_MAX_TYPE 0x40U + + +/* The event type values are incrementing integers for use as a shift ordinal + * in the event filtering process at the point events are generated. + * This scheme thus implies a limit of 63 event types. + */ +static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types"); + +/* Macro used to check if an event type ID is present in the known set of hardware type events */ +#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \ + ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE)) + +#define HWPERF_PACKET_IS_FW_TYPE(_etype) \ + ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \ + (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE) + + +typedef enum { + RGX_HWPERF_HOST_INVALID = 0x00, + RGX_HWPERF_HOST_ENQ = 0x01, + RGX_HWPERF_HOST_UFO = 0x02, + RGX_HWPERF_HOST_ALLOC = 0x03, + RGX_HWPERF_HOST_CLK_SYNC = 0x04, + RGX_HWPERF_HOST_FREE = 0x05, + RGX_HWPERF_HOST_MODIFY = 0x06, + RGX_HWPERF_HOST_DEV_INFO = 0x07, + RGX_HWPERF_HOST_INFO = 0x08, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09, + RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE = 0x0A, + + /* last */ + RGX_HWPERF_HOST_LAST_TYPE, + + /* This enumeration must have a value that is a power of two as it is + * used in masks and a filter bit field (currently 32 bits long). + */ + RGX_HWPERF_HOST_MAX_TYPE = 0x20 +} RGX_HWPERF_HOST_EVENT_TYPE; + +/* The event type values are incrementing integers for use as a shift ordinal + * in the event filtering process at the point events are generated. + * This scheme thus implies a limit of 31 event types. + */ +static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types"); + + +/****************************************************************************** + * Packet Header Format Version 2 Types + *****************************************************************************/ + +/*! Major version number of the protocol in operation + */ +#define RGX_HWPERF_V2_FORMAT 2 + +/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet + */ +#define HWPERF_PACKET_V2_SIG 0x48575032 + +/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet + */ +#define HWPERF_PACKET_V2A_SIG 0x48575041 + +/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet + */ +#define HWPERF_PACKET_V2B_SIG 0x48575042 + +#define HWPERF_PACKET_ISVALID(_ptr) (((_ptr) == HWPERF_PACKET_V2_SIG) || ((_ptr) == HWPERF_PACKET_V2A_SIG)|| ((_ptr) == HWPERF_PACKET_V2B_SIG)) + +/*! Type defines the HWPerf packet header common to all events. */ +typedef struct +{ + IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */ + IMG_UINT32 ui32Size; /*!< Overall packet size in bytes */ + IMG_UINT32 eTypeId; /*!< Event type information field */ + IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */ + IMG_UINT64 ui64Timestamp; /*!< Event timestamp */ +} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR; + +#ifndef __CHECKER__ +RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp); + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); +#endif + + +/*! Mask for use with the IMG_UINT32 ui32Size header field */ +#define RGX_HWPERF_SIZE_MASK 0xFFFFU + +/*! This macro defines an upper limit to which the size of the largest variable + * length HWPerf packet must fall within, currently 3KB. This constant may be + * used to allocate a buffer to hold one packet. + * This upper limit is policed by packet producing code. + */ +#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U + +/*! Defines an upper limit to the size of a variable length packet payload. + */ +#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\ + sizeof(RGX_HWPERF_V2_PACKET_HDR))) + + +/*! Macro which takes a structure name and provides the packet size for + * a fixed size payload packet, rounded up to 8 bytes to align packets + * for 64 bit architectures. */ +#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT)))) + +/*! Macro which takes the number of bytes written in the data payload of a + * packet for a variable size payload packet, rounded up to 8 bytes to + * align packets for 64 bit architectures. */ +#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN((_size), PVRSRVTL_PACKET_ALIGNMENT)))) + +/*! Macro to obtain the size of the packet */ +#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK)) + +/*! Macro to obtain the size of the packet data */ +#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR)) + + + +/*! Masks for use with the IMG_UINT32 eTypeId header field */ +#define RGX_HWPERF_TYPEID_MASK 0x7FFFFU +#define RGX_HWPERF_TYPEID_EVENT_MASK 0x07FFFU +#define RGX_HWPERF_TYPEID_THREAD_MASK 0x08000U +#define RGX_HWPERF_TYPEID_STREAM_MASK 0x70000U +#define RGX_HWPERF_TYPEID_META_DMA_MASK 0x80000U +#define RGX_HWPERF_TYPEID_OSID_MASK 0xFF000000U + +/*! Meta thread macros for encoding the ID into the type field of a packet */ +#define RGX_HWPERF_META_THREAD_SHIFT 15U +#define RGX_HWPERF_META_THREAD_ID0 0x0U +#define RGX_HWPERF_META_THREAD_ID1 0x1U +/*! Obsolete, kept for source compatibility */ +#define RGX_HWPERF_META_THREAD_MASK 0x1U +/*! Stream ID macros for encoding the ID into the type field of a packet */ +#define RGX_HWPERF_STREAM_SHIFT 16U +/*! Meta DMA macro for encoding how the packet was generated into the type field of a packet */ +#define RGX_HWPERF_META_DMA_SHIFT 19U +/*! OSID bit-shift macro used for encoding OSID into type field of a packet */ +#define RGX_HWPERF_OSID_SHIFT 24U +typedef enum { + RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */ + RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */ + RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */ + RGX_HWPERF_STREAM_ID_LAST, +} RGX_HWPERF_STREAM_ID; + +/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */ +static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT), + "Too many HWPerf stream IDs."); + +/*! Macros used to set the packet type and encode meta thread ID (0|1), HWPerf stream ID, and OSID within */ +#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _metadma, _osid)\ + ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \ + (RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \ + (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \ + (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \ + (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)))) + +/*! Obtains the event type that generated the packet */ +#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK) + +/*! Obtains the META Thread number that generated the packet */ +#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT)) + +/*! Obtains the guest OSID which resulted in packet generation */ +#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT) + +/*! Obtain stream id */ +#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT)) + +/*! Obtain information about how the packet was generated, which might affect payload total size */ +#define RGX_HWPERF_GET_META_DMA_INFO(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_META_DMA_MASK) >> RGX_HWPERF_META_DMA_SHIFT)) + +/*! Macros to obtain a typed pointer to a packet or data structure given a packet address */ +#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR *)(void *) (_buffer_addr)) +#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR))) +#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), RGX_HWPERF_SIZE_MASK&((_packet_addr)->ui32Size)))) + +/*! Obtains a typed pointer to a packet header given the packed data address */ +#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), -sizeof(RGX_HWPERF_V2_PACKET_HDR)))) + + +/****************************************************************************** + * Other Common Defines + *****************************************************************************/ + +/* This macro is not a real array size, but indicates the array has a variable + * length only known at run-time but always contains at least 1 element. The + * final size of the array is deduced from the size field of a packet header. + */ +#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U + +/* This macro is not a real array size, but indicates the array is optional + * and if present has a variable length only known at run-time. The final + * size of the array is deduced from the size field of a packet header. */ +#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U + + +/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */ +#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U +#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU + +/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */ +#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U +#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U + +/*! Macro used to set the block info word as a combination of two 16-bit integers */ +#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)))) + +/*! Macro used to obtain get the number of counter blocks present in the packet */ +#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT) + +/*! Obtains the offset of the counter block stream in the packet */ +#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT) + +/* This macro gets the number of blocks depending on the packet version */ +#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \ + if (HWPERF_PACKET_V2B_SIG == (_sig)) \ + { \ + (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo); \ + } \ + else \ + { \ + IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3); \ + (_numblocks) = *(IMG_UINT16 *)(IMG_OFFSET_ADDR(&(_packet_data)->ui32WorkTarget, ui32VersionOffset)); \ + } + +/* This macro gets the counter stream pointer depending on the packet version */ +#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \ +{ \ + if (HWPERF_PACKET_V2B_SIG == (_sig)) \ + { \ + (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo))); \ + } \ + else \ + { \ + IMG_UINT32 ui32BlkStreamOffsetInWords = (((_sig) == HWPERF_PACKET_V2_SIG) ? 6 : 8); \ + (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), ui32BlkStreamOffsetInWords)); \ + } \ +} + +/* This is the maximum frame contexts that are supported in the driver at the moment */ +#define RGX_HWPERF_HW_MAX_WORK_CONTEXT 2 + +/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */ +#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U +#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU + +/*! Shift for the UFO count and data stream fields */ +#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U +#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U + +/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */ +#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff) \ + ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) | \ + (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)))) + +/*! Macro used to obtain UFO count*/ +#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo) \ + (((_streaminfo) & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT) + +/*! Obtains the offset of the UFO stream in the packet */ +#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo) \ + (((_streaminfo) & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT) + + + +/****************************************************************************** + * Data Stream Common Types + *****************************************************************************/ + +/* All the Data Masters HWPerf is aware of. When a new DM is added to this + * list, it should be appended at the end to maintain backward compatibility + * of HWPerf data */ +typedef enum { + + RGX_HWPERF_DM_GP, + RGX_HWPERF_DM_2D, + RGX_HWPERF_DM_TA, + RGX_HWPERF_DM_3D, + RGX_HWPERF_DM_CDM, + RGX_HWPERF_DM_RTU, + RGX_HWPERF_DM_SHG, + RGX_HWPERF_DM_TDM, + + RGX_HWPERF_DM_LAST, + + RGX_HWPERF_DM_INVALID = 0x1FFFFFFF +} RGX_HWPERF_DM; + +/* Enum containing bit pos for 32bit feature flags used in hwperf and api */ +typedef enum { + RGX_HWPERF_FEATURE_PERFBUS_FLAG = 0x001, + RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG = 0x002, + RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG = 0x004, + RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG = 0x008, + RGX_HWPERF_FEATURE_ROGUEXE_FLAG = 0x010, + RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG = 0x020, + RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG = 0x040, + RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION = 0x080 +} RGX_HWPERF_FEATURE_FLAGS; + +/*! This structure holds the data of a firmware packet. */ +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ + IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ + IMG_UINT32 ui32TimeCorrIndex; + IMG_UINT32 ui32Padding; +} RGX_HWPERF_FW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); + +/*! This structure holds the data of a hardware packet, including counters. */ +typedef struct +{ + IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ + IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ + IMG_UINT32 ui32PID; /*!< Process identifier */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ + IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ + IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ + IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ + IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ + IMG_UINT32 ui32CtxPriority; /*!< Context priority */ + IMG_UINT32 ui32Padding1; /* To ensure correct alignment */ + IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Counter data */ + IMG_UINT32 ui32Padding2; /* To ensure correct alignment */ +} RGX_HWPERF_HW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); + +/*! Mask for use with the aui32CountBlksStream field when decoding the + * counter block ID and mask word. */ +#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U +#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U + +/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words of + * a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) +#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) + +/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words + * of a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&~RGX_HWPERF_CNTBLK_ID_MASK)) +#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) + + +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32FrameNum; /*!< Frame number */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ + IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ + IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ +} RGX_HWPERF_CSW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); + +/*! Enumeration of clocks supporting this event */ +typedef enum +{ + RGX_HWPERF_CLKS_CHG_INVALID = 0, + + RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, + + RGX_HWPERF_CLKS_CHG_LAST, +} RGX_HWPERF_CLKS_CHG_NAME; + +/*! This structure holds the data of a clocks change packet. */ +typedef struct +{ + IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ + RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ + IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ + IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ + IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and + correlated to OSTimeStamp */ +} RGX_HWPERF_CLKS_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); + +/*! Enumeration of GPU utilisation states supported by this event */ +typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; + +/*! This structure holds the data of a GPU utilisation state change packet. */ +typedef struct +{ + RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ + IMG_UINT32 uiUnused1; /*!< Padding */ + IMG_UINT32 uiUnused2; /*!< Padding */ + IMG_UINT32 uiUnused3; /*!< Padding */ +} RGX_HWPERF_GPU_STATE_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); + + +/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ +#define HWPERF_PWR_EST_V1_SIG 0x48504531 + +/*! Macros to obtain a component field from a counter ID word */ +#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) +#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) +#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) + +/*! This macro constructs a counter ID for a power estimate data stream from + * the component parts of: high word flag, unit id, counter number */ +#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _number) \ + ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<<31) | ((IMG_UINT32)((IMG_UINT32)(_unit)&0xFU)<<24) | \ + ((_number)&0x0000FFFFU))) + +/*! This structure holds the data for a power estimate packet. */ +typedef struct +{ + IMG_UINT32 ui32StreamVersion; /*!< HWPERF_PWR_EST_V1_SIG */ + IMG_UINT32 ui32StreamSize; /*!< Size of array in bytes of stream data + held in the aui32StreamData member */ + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Counter data */ + IMG_UINT32 ui32Padding; /* To ensure correct alignment */ +} RGX_HWPERF_PWR_EST_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_EST_DATA); + +/*! Enumeration of the kinds of power change events that can occur */ +typedef enum +{ + RGX_HWPERF_PWR_UNDEFINED = 0, + RGX_HWPERF_PWR_ON = 1, /*!< Whole device powered on */ + RGX_HWPERF_PWR_OFF = 2, /*!< Whole device powered off */ + RGX_HWPERF_PWR_UP = 3, /*!< Power turned on to a HW domain */ + RGX_HWPERF_PWR_DOWN = 4, /*!< Power turned off to a HW domain */ + RGX_HWPERF_PWR_PHR_PARTIAL = 5, /*!< Periodic HW partial Rascal/Dust(S6) Reset */ + RGX_HWPERF_PWR_PHR_FULL = 6, /*!< Periodic HW full GPU Reset */ + + RGX_HWPERF_PWR_LAST, +} RGX_HWPERF_PWR; + +/*! This structure holds the data of a power packet. */ +typedef struct +{ + RGX_HWPERF_PWR eChange; /*!< Defines the type of power change */ + IMG_UINT32 ui32Domains; /*!< HW Domains affected */ + IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ + IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and + correlated to OSTimeStamp */ + IMG_UINT32 ui32CalibratedClockSpeed; /*!< GPU clock speed (in Hz) at the time + the two timers were correlated */ + IMG_UINT32 ui32Unused1; /*!< Padding */ +} RGX_HWPERF_PWR_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_CHG_DATA); + + +/* + * PDVFS, GPU clock frequency changes and workload estimation profiling + * data. + */ +/*! DVFS and work estimation events. */ +typedef enum +{ + RGX_HWPERF_DVFS_EV_INVALID, /*! Invalid value. */ + RGX_HWPERF_DVFS_EV_PROACTIVE_EST_START, /*! Proactive DVFS estimate start */ + RGX_HWPERF_DVFS_EV_PROACTIVE_EST_FINISHED, /*! Proactive DVFS estimate finished */ + RGX_HWPERF_DVFS_EV_REACTIVE_EST_START, /*! Reactive DVFS estimate start */ + RGX_HWPERF_DVFS_EV_REACTIVE_EST_FINISHED, /*! Reactive DVFS estimate finished */ + /* workload estimation */ + RGX_HWPERF_DVFS_EV_WORK_EST_START, /*! Workload estimation start */ + RGX_HWPERF_DVFS_EV_WORK_EST_FINISHED, /*! Workload estimation finished */ + RGX_HWPERF_DVFS_EV_FREQ_CHG, /*! DVFS OPP/clock frequency change */ + + RGX_HWPERF_DVFS_EV_LAST /*! Number of element. */ +} RGX_HWPERF_DVFS_EV; + +/*! Enumeration of DVFS transitions that can occur */ +typedef enum +{ + RGX_HWPERF_DVFS_OPP_NONE = 0x0, /*!< No OPP change, already operating at required freq */ +#if defined(SUPPORT_PDVFS_IDLE) + RGX_HWPERF_DVFS_OPP_IDLE = 0x1, /*!< GPU is idle, defer the OPP change */ +#endif + /* 0x2 to 0xF reserved */ + RGX_HWPERF_DVFS_OPP_UPDATE = 0x10, /*!< OPP change, new point is encoded in bits [3:0] */ + RGX_HWPERF_DVFS_OPP_LAST = 0x20, +} RGX_HWPERF_DVFS_OPP; + +typedef union +{ + /*! This structure holds the data of a proactive DVFS calculation packet. */ + struct + { + IMG_UINT64 ui64DeadlineInus; /*!< Next deadline in microseconds */ + IMG_UINT32 ui32Frequency; /*!< Required freq to meet deadline at 90% utilisation */ + IMG_UINT32 ui32WorkloadCycles; /*!< Current workload estimate in cycles */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + } sProDVFSCalc; + + /*! This structure holds the data of a reactive DVFS calculation packet. */ + struct + { + IMG_UINT32 ui32Frequency; /*!< Required freq to achieve average 90% utilisation */ + IMG_UINT32 ui32Utilisation; /*!< GPU utilisation since last update */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + } sDVFSCalc; + + /*! This structure holds the data of a work estimation packet. */ + struct + { + IMG_UINT64 ui64CyclesPrediction; /*!< Predicted cycle count for this workload */ + IMG_UINT64 ui64CyclesTaken; /*!< Actual cycle count for this workload */ + RGXFWIF_DM eDM; /*!< Target DM */ + IMG_UINT32 ui32ReturnDataIndex; /*!< Index into workload estimation table */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + } sWorkEst; + + /*! This structure holds the data of an OPP clock frequency transition packet. */ + struct + { + IMG_UINT32 ui32OPPData; /*!< OPP transition */ + } sOPP; + +} RGX_HWPERF_DVFS_DETAIL; + +typedef struct { + RGX_HWPERF_DVFS_EV eEventType; /*!< DVFS sub-event type */ + RGX_HWPERF_DVFS_DETAIL uData; /*!< DVFS sub-event data */ +} RGX_HWPERF_DVFS_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_DVFS_DATA); + + +/*! Firmware Activity event. */ +typedef enum +{ + RGX_HWPERF_FWACT_EV_INVALID, /*! Invalid value. */ + RGX_HWPERF_FWACT_EV_REGS_SET, /*! Registers set. */ + RGX_HWPERF_FWACT_EV_HWR_DETECTED, /*! HWR detected. */ + RGX_HWPERF_FWACT_EV_HWR_RESET_REQUIRED, /*! Reset required. */ + RGX_HWPERF_FWACT_EV_HWR_RECOVERED, /*! HWR recovered. */ + RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*! Freelist ready. */ + RGX_HWPERF_FWACT_EV_FEATURES, /*! Features present */ + + RGX_HWPERF_FWACT_EV_LAST /*! Number of element. */ +} RGX_HWPERF_FWACT_EV; + +/*! Cause of the HWR event. */ +typedef enum +{ + RGX_HWPERF_HWR_REASON_INVALID, /*! Invalid value. */ + RGX_HWPERF_HWR_REASON_LOCKUP, /*! Lockup. */ + RGX_HWPERF_HWR_REASON_PAGEFAULT, /*! Page fault. */ + RGX_HWPERF_HWR_REASON_POLLFAIL, /*! Poll fail. */ + RGX_HWPERF_HWR_REASON_DEADLINE_OVERRUN, /*! Deadline overrun. */ + RGX_HWPERF_HWR_REASON_CSW_DEADLINE_OVERRUN, /*! Hard Context Switch deadline overrun. */ + + RGX_HWPERF_HWR_REASON_LAST /*! Number of elements. */ +} RGX_HWPERF_HWR_REASON; + + +/* Fixed size for BVNC string so it does not alter packet data format + * Check it is large enough against official BVNC string length maximum + */ +#define RGX_HWPERF_MAX_BVNC_LEN (24) +static_assert((RGX_HWPERF_MAX_BVNC_LEN >= RGX_BVNC_STR_SIZE_MAX), + "Space inside HWPerf packet data for BVNC string insufficient"); + +#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (16U) + +/*! BVNC Features */ +typedef struct +{ + /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ + IMG_UINT16 ui16BlockID; + + /*! Number of counters in this block type */ + IMG_UINT16 ui16NumCounters; + + /*! Number of blocks of this type */ + IMG_UINT16 ui16NumBlocks; + + IMG_UINT16 ui16Reserved; +} RGX_HWPERF_BVNC_BLOCK; + +/*! BVNC Features */ +typedef struct +{ + IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*! BVNC string */ + IMG_UINT32 ui32BvncKmFeatureFlags; /*! See RGX_HWPERF_FEATURE_FLAGS */ + IMG_UINT16 ui16BvncBlocks; /*! Number of blocks described in aBvncBlocks */ + IMG_UINT16 ui16Reserved1; /*! Align to 32bit */ + RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*! Supported Performance Blocks for BVNC */ +} RGX_HWPERF_BVNC; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); + +/*! Sub-event's data. */ +typedef union +{ + struct + { + RGX_HWPERF_DM eDM; /*!< Data Master ID. */ + RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ + IMG_UINT32 ui32DMContext; /*!< FW render context */ + } sHWR; /*!< HWR sub-event data. */ + + RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features */ +} RGX_HWPERF_FWACT_DETAIL; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); + +/*! This structure holds the data of a FW activity event packet */ +typedef struct +{ + RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ + RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ + IMG_UINT32 ui32Padding; +} RGX_HWPERF_FWACT_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); + + + +typedef enum { + RGX_HWPERF_UFO_EV_UPDATE, + RGX_HWPERF_UFO_EV_CHECK_SUCCESS, + RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, + RGX_HWPERF_UFO_EV_CHECK_FAIL, + RGX_HWPERF_UFO_EV_PRCHECK_FAIL, + RGX_HWPERF_UFO_EV_FORCE_UPDATE, + + RGX_HWPERF_UFO_EV_LAST +} RGX_HWPERF_UFO_EV; + +/*! Data stream tuple. */ +typedef union +{ + struct + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32Value; + } sCheckSuccess; + struct + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Required; + } sCheckFail; + struct + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32OldValue; + IMG_UINT32 ui32NewValue; + } sUpdate; +} RGX_HWPERF_UFO_DATA_ELEMENT; + +/*! This structure holds the packet payload data for UFO event. */ +typedef struct +{ + RGX_HWPERF_UFO_EV eEvType; + IMG_UINT32 ui32TimeCorrIndex; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32IntJobRef; + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32StreamInfo; + RGX_HWPERF_DM eDM; + IMG_UINT32 ui32Padding; + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; +} RGX_HWPERF_UFO_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); + + + +typedef enum +{ + RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */ + RGX_HWPERF_KICK_TYPE_TQ2D, + RGX_HWPERF_KICK_TYPE_TQ3D, + RGX_HWPERF_KICK_TYPE_CDM, + RGX_HWPERF_KICK_TYPE_RS, + RGX_HWPERF_KICK_TYPE_VRDM, + RGX_HWPERF_KICK_TYPE_TQTDM, + RGX_HWPERF_KICK_TYPE_SYNC, + RGX_HWPERF_KICK_TYPE_TA, + RGX_HWPERF_KICK_TYPE_3D, + RGX_HWPERF_KICK_TYPE_LAST, + + RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff +} RGX_HWPERF_KICK_TYPE; + +typedef struct +{ + RGX_HWPERF_KICK_TYPE ui32EnqType; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32IntJobRef; + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32Padding; + IMG_UINT64 ui64CheckFence_UID; + IMG_UINT64 ui64UpdateFence_UID; + IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ + IMG_UINT64 ui64CycleEstimate; /*!< Estimated cycle time for the workload */ + PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ + PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ + PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ + + IMG_UINT32 ui32Pad; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_ENQ_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + RGX_HWPERF_UFO_EV eEvType; + IMG_UINT32 ui32StreamInfo; + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_UFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /* PRIM */ + RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, /* Timeline resource packets are now + emitted in client hwperf buffer */ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /* Fence for use on GPU (SYNC_CP backed) */ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /* Fence created on SW timeline */ + + RGX_HWPERF_HOST_RESOURCE_TYPE_LAST +} RGX_HWPERF_HOST_RESOURCE_TYPE; + +typedef union +{ + struct + { + IMG_UINT32 uiPid; + IMG_UINT64 ui64Timeline_UID1; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sTimelineAlloc; + + struct + { + IMG_PID uiPID; + PVRSRV_FENCE hFence; + IMG_UINT32 ui32CheckPt_FWAddr; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sFenceAlloc; + + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; + PVRSRV_TIMELINE hTimeline; + IMG_PID uiPID; + PVRSRV_FENCE hFence; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sSyncCheckPointAlloc; + + struct + { + IMG_PID uiPID; + PVRSRV_FENCE hSWFence; + PVRSRV_TIMELINE hSWTimeline; + IMG_UINT64 ui64SyncPtIndex; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sSWFenceAlloc; + + struct + { + IMG_UINT32 ui32FWAddr; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sSyncAlloc; +} RGX_HWPERF_HOST_ALLOC_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; + RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; +} RGX_HWPERF_HOST_ALLOC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + struct + { + IMG_UINT32 uiPid; + IMG_UINT64 ui64Timeline_UID1; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sTimelineDestroy; + + struct + { + IMG_UINT64 ui64Fence_UID; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sFenceDestroy; + + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; + } sSyncCheckPointFree; + + struct + { + IMG_UINT32 ui32FWAddr; + } sSyncFree; +} RGX_HWPERF_HOST_FREE_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; + RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_FREE_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + IMG_UINT64 ui64CRTimestamp; + IMG_UINT64 ui64OSTimestamp; + IMG_UINT32 ui32ClockSpeed; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_CLK_SYNC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + struct + { + IMG_UINT64 ui64NewFence_UID; + IMG_UINT64 ui64InFence1_UID; + IMG_UINT64 ui64InFence2_UID; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sFenceMerge; +} RGX_HWPERF_HOST_MODIFY_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; + RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; +} RGX_HWPERF_HOST_MODIFY_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, + + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS, + + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; + +typedef enum +{ + RGX_HWPERF_DEV_INFO_EV_HEALTH, + + RGX_HWPERF_DEV_INFO_EV_LAST +} RGX_HWPERF_DEV_INFO_EV; + +typedef union +{ + struct + { + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; + } sDeviceStatus; +} RGX_HWPERF_HOST_DEV_INFO_DETAIL; + +typedef struct +{ + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + RGX_HWPERF_DEV_INFO_EV eEvType; + RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; +} RGX_HWPERF_HOST_DEV_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_INFO_EV_MEM_USAGE, + + RGX_HWPERF_INFO_EV_LAST +} RGX_HWPERF_INFO_EV; + +typedef union +{ + struct + { + IMG_UINT32 ui32TotalMemoryUsage; + struct + { + IMG_UINT32 ui32Pid; + IMG_UINT32 ui32KernelMemUsage; + IMG_UINT32 ui32GraphicsMemUsage; + } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; + } sMemUsageStats; +} RGX_HWPERF_HOST_INFO_DETAIL; + +typedef struct +{ + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + RGX_HWPERF_INFO_EV eEvType; + RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; +} RGX_HWPERF_HOST_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; + +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; + +typedef union +{ + struct + { + IMG_UINT32 ui32TimeoutInMs; + } sBegin; + + struct + { + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; + } sEnd; +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; + +typedef struct +{ + IMG_PID uiPID; + PVRSRV_FENCE hFence; + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; + +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + IMG_PID uiPID; + PVRSRV_TIMELINE hTimeline; + IMG_UINT64 ui64SyncPtIndex; + +} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, + + RGX_HWPERF_RESOURCE_TYPE_COUNT +} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; + +typedef struct +{ + IMG_UINT32 ui32Height; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32BPP; + IMG_UINT32 ui32PixFormat; +} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; + +typedef struct +{ + IMG_INT32 i32XOffset; /*!< render surface X shift */ + IMG_INT32 i32YOffset; /*!< render surface Y shift */ + IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ + IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ +} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; + +typedef union +{ + struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES + { + IMG_UINT32 ui32RenderSurfaceCount; + RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + } sRenderSurfaces; + + struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS + { + RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + } sTLTBuffers; +} RGX_RESOURCE_CAPTURE_DETAIL; + +typedef struct +{ + RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; + IMG_PID uPID; + IMG_UINT32 ui32ContextID; + IMG_UINT32 ui32FrameNum; + IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ + IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ + RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ +} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; + +#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) + +#define RGX_TLT_HARDWARE_HDR_SIZE (16U) + +/* PVRSRVGetHWPerfResourceCaptureResult */ +typedef enum +{ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ +} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; + +typedef struct +{ + IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ + IMG_UINT32 ui32CtxID; + RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, + unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ + IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ +} RGX_RESOURCE_CAPTURE_RESULT; + +/*! This type is a union of packet payload data structures associated with + * various FW and Host events */ +typedef union +{ + RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data */ + RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data */ + RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet data */ + RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state change event packet data */ + RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event packet data */ + RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data */ + RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data */ + RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data */ + RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event packet data */ + RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data */ + /* */ + RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data */ + RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data */ + RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data */ + RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data */ + RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data */ + RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data */ + RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data */ + RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data */ + RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance data */ +} RGX_HWPERF_V2_PACKET_DATA_, *RGX_PHWPERF_V2_PACKET_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA_); + +#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) + +#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ + ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) + +/****************************************************************************** + * API Types + *****************************************************************************/ + +/*! Counter block IDs for all the hardware blocks with counters. + * Directly addressable blocks must have a value between 0..15. + * First hex digit represents a group number and the second hex digit + * represents the unit within the group. Group 0 is the direct group, + * all others are indirect groups. + */ +typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; + +/* Directly addressable counter blocks */ +#define RGX_CNTBLK_ID_TA 0x0000U +#define RGX_CNTBLK_ID_RASTER 0x0001U /* Non-cluster grouping cores */ +#define RGX_CNTBLK_ID_HUB 0x0002U /* Non-cluster grouping cores */ +#define RGX_CNTBLK_ID_TORNADO 0x0003U /* XT cores */ +#define RGX_CNTBLK_ID_JONES 0x0004U /* S7 cores */ +#define RGX_CNTBLK_ID_BF 0x0005U /* Doppler unit */ +#define RGX_CNTBLK_ID_BT 0x0006U /* Doppler unit */ +#define RGX_CNTBLK_ID_RT 0x0007U /* Doppler unit */ +#define RGX_CNTBLK_ID_SH 0x0008U /* Ray tracing unit */ + +#define RGX_CNTBLK_ID_DIRECT_LAST 0x0009U + +/* Indirectly addressable counter blocks */ +#define RGX_CNTBLK_ID_TPU_MCU0 0x0010U /* Addressable by Dust */ +#define RGX_CNTBLK_ID_TPU_MCU1 0x0011U +#define RGX_CNTBLK_ID_TPU_MCU2 0x0012U +#define RGX_CNTBLK_ID_TPU_MCU3 0x0013U +#define RGX_CNTBLK_ID_TPU_MCU4 0x0014U +#define RGX_CNTBLK_ID_TPU_MCU5 0x0015U +#define RGX_CNTBLK_ID_TPU_MCU6 0x0016U +#define RGX_CNTBLK_ID_TPU_MCU7 0x0017U +#define RGX_CNTBLK_ID_TPU_MCU_ALL 0x4010U + +#define RGX_CNTBLK_ID_USC0 0x0020U /* Addressable by Cluster */ +#define RGX_CNTBLK_ID_USC1 0x0021U +#define RGX_CNTBLK_ID_USC2 0x0022U +#define RGX_CNTBLK_ID_USC3 0x0023U +#define RGX_CNTBLK_ID_USC4 0x0024U +#define RGX_CNTBLK_ID_USC5 0x0025U +#define RGX_CNTBLK_ID_USC6 0x0026U +#define RGX_CNTBLK_ID_USC7 0x0027U +#define RGX_CNTBLK_ID_USC8 0x0028U +#define RGX_CNTBLK_ID_USC9 0x0029U +#define RGX_CNTBLK_ID_USC10 0x002AU +#define RGX_CNTBLK_ID_USC11 0x002BU +#define RGX_CNTBLK_ID_USC12 0x002CU +#define RGX_CNTBLK_ID_USC13 0x002DU +#define RGX_CNTBLK_ID_USC14 0x002EU +#define RGX_CNTBLK_ID_USC15 0x002FU +#define RGX_CNTBLK_ID_USC_ALL 0x4020U + +#define RGX_CNTBLK_ID_TEXAS0 0x0030U /* Addressable by Phantom in XT, Dust in S7 */ +#define RGX_CNTBLK_ID_TEXAS1 0x0031U +#define RGX_CNTBLK_ID_TEXAS2 0x0032U +#define RGX_CNTBLK_ID_TEXAS3 0x0033U +#define RGX_CNTBLK_ID_TEXAS4 0x0034U +#define RGX_CNTBLK_ID_TEXAS5 0x0035U +#define RGX_CNTBLK_ID_TEXAS6 0x0036U +#define RGX_CNTBLK_ID_TEXAS7 0x0037U +#define RGX_CNTBLK_ID_TEXAS_ALL 0x4030U + +#define RGX_CNTBLK_ID_RASTER0 0x0040U /* Addressable by Phantom, XT only */ +#define RGX_CNTBLK_ID_RASTER1 0x0041U +#define RGX_CNTBLK_ID_RASTER2 0x0042U +#define RGX_CNTBLK_ID_RASTER3 0x0043U +#define RGX_CNTBLK_ID_RASTER_ALL 0x4040U + +#define RGX_CNTBLK_ID_BLACKPEARL0 0x0050U /* Addressable by Phantom, S7, only */ +#define RGX_CNTBLK_ID_BLACKPEARL1 0x0051U +#define RGX_CNTBLK_ID_BLACKPEARL2 0x0052U +#define RGX_CNTBLK_ID_BLACKPEARL3 0x0053U +#define RGX_CNTBLK_ID_BLACKPEARL_ALL 0x4050U + +#define RGX_CNTBLK_ID_PBE0 0x0060U /* Addressable by Cluster in S7 and PBE2_IN_XE */ +#define RGX_CNTBLK_ID_PBE1 0x0061U +#define RGX_CNTBLK_ID_PBE2 0x0062U +#define RGX_CNTBLK_ID_PBE3 0x0063U +#define RGX_CNTBLK_ID_PBE4 0x0064U +#define RGX_CNTBLK_ID_PBE5 0x0065U +#define RGX_CNTBLK_ID_PBE6 0x0066U +#define RGX_CNTBLK_ID_PBE7 0x0067U +#define RGX_CNTBLK_ID_PBE8 0x0068U +#define RGX_CNTBLK_ID_PBE9 0x0069U +#define RGX_CNTBLK_ID_PBE10 0x006AU +#define RGX_CNTBLK_ID_PBE11 0x006BU +#define RGX_CNTBLK_ID_PBE12 0x006CU +#define RGX_CNTBLK_ID_PBE13 0x006DU +#define RGX_CNTBLK_ID_PBE14 0x006EU +#define RGX_CNTBLK_ID_PBE15 0x006FU +#define RGX_CNTBLK_ID_PBE_ALL 0x4060U + +#define RGX_CNTBLK_ID_BX_TU0 0x0070U /* Doppler unit, XT only */ +#define RGX_CNTBLK_ID_BX_TU1 0x0071U +#define RGX_CNTBLK_ID_BX_TU2 0x0072U +#define RGX_CNTBLK_ID_BX_TU3 0x0073U +#define RGX_CNTBLK_ID_BX_TU_ALL 0x4070U + +#define RGX_CNTBLK_ID_LAST 0x0074U + +#define RGX_CNTBLK_ID_CUSTOM0 0x7FF0U +#define RGX_CNTBLK_ID_CUSTOM1 0x7FF1U +#define RGX_CNTBLK_ID_CUSTOM2 0x7FF2U +#define RGX_CNTBLK_ID_CUSTOM3 0x7FF3U +#define RGX_CNTBLK_ID_CUSTOM4_FW 0x7FF4U /* Custom block used for getting statistics held in the FW */ + + +/* Masks for the counter block ID*/ +#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) +#define RGX_CNTBLK_ID_GROUP_SHIFT (4U) +#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) +#define RGX_CNTBLK_ID_UNIT_MASK (0xfU) + +#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## _n) - (IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## 0) + 1u) + +/*! The number of layout blocks defined with configurable multiplexed + * performance counters, hence excludes custom counter blocks. + */ +#define RGX_HWPERF_MAX_DEFINED_BLKS (\ + (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ + RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\ + RGX_CNTBLK_INDIRECT_COUNT(USC, 15)+\ + RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7)+\ + RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3)+\ + RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3)+\ + RGX_CNTBLK_INDIRECT_COUNT(PBE, 15)+\ + RGX_CNTBLK_INDIRECT_COUNT(BX_TU, 3) ) + +static_assert( + ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), + "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient"); + +#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (e)) + +#define RGX_CUSTOM_FW_CNTRS \ + X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ + \ + X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ + \ + X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ + \ + X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ + \ + X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK)) + +/*! Counter IDs for the firmware held statistics */ +typedef enum +{ +#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id, + RGX_CUSTOM_FW_CNTRS +#undef X + + /* always the last entry in the list */ + RGX_CUSTOM_FW_CNTR_LAST +} RGX_HWPERF_CUSTOM_FW_CNTR_ID; + +/*! Identifier for each counter in a performance counting module */ +typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID; + +#define RGX_CNTBLK_COUNTER0_ID 0U +#define RGX_CNTBLK_COUNTER1_ID 1U +#define RGX_CNTBLK_COUNTER2_ID 2U +#define RGX_CNTBLK_COUNTER3_ID 3U +#define RGX_CNTBLK_COUNTER4_ID 4U +#define RGX_CNTBLK_COUNTER5_ID 5U + /* MAX value used in server handling of counter config arrays */ +#define RGX_CNTBLK_COUNTERS_MAX 6U + + +/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */ +#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)(b1)) +#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE) +#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e)) + +/*! Mask macros for use with RGXCtrlHWPerf() API. + */ +#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000)) +#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) + +/*! HWPerf Firmware event masks + * Next macro covers all FW Start/End/Debug (SED) events. + */ +#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE)) + +#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) +#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED)) +#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\ + RGX_HWPERF_EVENT_MASK_FW_UFO |\ + RGX_HWPERF_EVENT_MASK_FW_CSW) + +#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC)) +#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\ + MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\ + ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC)) + +#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\ + RGX_HWPERF_EVENT_MASK_HW_PERIODIC) + +#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE)) + +#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG)) + +/*! HWPerf Host event masks + */ +#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ)) +#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO)) +#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC)) + + +/*! Type used in the RGX API RGXConfigureAndEnableHWPerfCounters() */ +typedef struct +{ + /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ + IMG_UINT16 ui16BlockID; + + /*! 4 or 6 LSBs used to select counters to configure in this block. */ + IMG_UINT8 ui8CounterSelect; + + /*! 4 or 6 LSBs used as MODE bits for the counters in the group. */ + IMG_UINT8 ui8Mode; + + /*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */ + IMG_UINT8 aui8GroupSelect[RGX_CNTBLK_COUNTERS_MAX]; + + /*! 16 LSBs used as the BIT_SELECT value for the counter. */ + IMG_UINT16 aui16BitSelect[RGX_CNTBLK_COUNTERS_MAX]; + + /*! 14 LSBs used as the BATCH_MAX value for the counter. */ + IMG_UINT32 aui32BatchMax[RGX_CNTBLK_COUNTERS_MAX]; + + /*! 14 LSBs used as the BATCH_MIN value for the counter. */ + IMG_UINT32 aui32BatchMin[RGX_CNTBLK_COUNTERS_MAX]; +} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK); + + +#if defined(__cplusplus) +} +#endif + +#endif /* RGX_HWPERF_H_ */ + +/****************************************************************************** + End of file +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/rogue/rgx_options.h b/drivers/mcst/gpu-imgtec/include/rogue/rgx_options.h new file mode 100644 index 000000000000..2661f1786d34 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rogue/rgx_options.h @@ -0,0 +1,261 @@ +/*************************************************************************/ /*! +@File +@Title RGX build options +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Each build option listed here is packed into a dword which provides up to + * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and + * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM. + * The corresponding bit is set if the build option was enabled at compile + * time. + * + * In order to extract the enabled build flags the INTERNAL_TEST switch should + * be enabled in a client program which includes this header. Then the client + * can test specific build flags by reading the bit value at + * ##OPTIONNAME##_SET_OFFSET + * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. + * + * IMPORTANT: add new options to unused bits or define a new dword + * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield + * remains backwards compatible. + */ + +#ifndef RGX_OPTIONS_H +#define RGX_OPTIONS_H + +#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL + +#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) + #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0 + #define OPTIONS_BIT0 (0x1UL << 0) + #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT0 0x0UL +#endif /* NO_HARDWARE */ + + +#if defined(PDUMP) || defined(INTERNAL_TEST) + #define PDUMP_SET_OFFSET OPTIONS_BIT1 + #define OPTIONS_BIT1 (0x1UL << 1) + #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT1 0x0UL +#endif /* PDUMP */ + + +#if defined(INTERNAL_TEST) + #define UNUSED_SET_OFFSET OPTIONS_BIT2 + #define OPTIONS_BIT2 (0x1UL << 2) + #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT2 0x0UL +#endif + +/* No longer used */ +#if defined(INTERNAL_TEST) + #define OPTIONS_BIT3 (0x1UL << 3) + #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT3 0x0UL +#endif + + +#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) + #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4 + #define OPTIONS_BIT4 (0x1UL << 4) + #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT4 0x0UL +#endif /* SUPPORT_RGX */ + + +#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) + #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5 + #define OPTIONS_BIT5 (0x1UL << 5) + #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT5 0x0UL +#endif /* SUPPORT_SECURE_EXPORT */ + + +#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) + #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6 + #define OPTIONS_BIT6 (0x1UL << 6) + #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT6 0x0UL +#endif /* SUPPORT_INSECURE_EXPORT */ + + +#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) + #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7 + #define OPTIONS_BIT7 (0x1UL << 7) + #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT7 0x0UL +#endif /* SUPPORT_VFP */ + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) + #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8 + #define OPTIONS_BIT8 (0x1UL << 8) + #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT8 0x0UL +#endif /* SUPPORT_WORKLOAD_ESTIMATION */ +#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1UL << 8) + +#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) + #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9 + #define OPTIONS_BIT9 (0x1UL << 9) + #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT9 0x0UL +#endif /* SUPPORT_PDVFS */ +#define OPTIONS_PDVFS_MASK (0x1UL << 9) + +#if defined(DEBUG) || defined(INTERNAL_TEST) + #define DEBUG_SET_OFFSET OPTIONS_BIT10 + #define OPTIONS_BIT10 (0x1UL << 10) + #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT10 0x0UL +#endif /* DEBUG */ +/* The bit position of this should be the same as DEBUG_SET_OFFSET option + * when defined. + */ +#define OPTIONS_DEBUG_MASK (0x1UL << 10) + +#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) + #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11 + #define OPTIONS_BIT11 (0x1UL << 11) + #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT11 0x0UL +#endif /* SUPPORT_BUFFER_SYNC */ + +#if defined(RGX_FW_IRQ_OS_COUNTERS) || defined(INTERNAL_TEST) + #define SUPPORT_FW_IRQ_REG_COUNTERS OPTIONS_BIT12 + #define OPTIONS_BIT12 (0x1UL << 12) + #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT12 0x0UL +#endif /* RGX_FW_IRQ_OS_COUNTERS */ + +#if defined(SUPPORT_AUTOVZ) + #define SUPPORT_AUTOVZ_OFFSET OPTIONS_BIT14 + #define OPTIONS_BIT14 (0x1UL << 14) + #if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT14 0x0UL +#endif + +#if defined(SUPPORT_AUTOVZ_HW_REGS) + #define SUPPORT_AUTOVZ_HW_REGS_OFFSET OPTIONS_BIT15 + #define OPTIONS_BIT15 (0x1UL << 15) + #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT15 0x0UL +#endif + + +#define RGX_BUILD_OPTIONS_KM \ + (OPTIONS_BIT0 |\ + OPTIONS_BIT1 |\ + OPTIONS_BIT2 |\ + OPTIONS_BIT3 |\ + OPTIONS_BIT4 |\ + OPTIONS_BIT6 |\ + OPTIONS_BIT7 |\ + OPTIONS_BIT8 |\ + OPTIONS_BIT9 |\ + OPTIONS_BIT10 |\ + OPTIONS_BIT11 |\ + OPTIONS_BIT12 |\ + OPTIONS_BIT14 |\ + OPTIONS_BIT15) + +#define RGX_BUILD_OPTIONS_MASK_FW \ + (RGX_BUILD_OPTIONS_MASK_KM & \ + ~OPTIONS_BIT11) + +#define OPTIONS_BIT31 (0x1UL << 31) +#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM +#error "Bit exceeds reserved range" +#endif +#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31 + +#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) + +#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \ + ~(OPTIONS_DEBUG_MASK | \ + OPTIONS_WORKLOAD_ESTIMATION_MASK | \ + OPTIONS_PDVFS_MASK)) + +#endif /* RGX_OPTIONS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/rogue/system/rgx_tc/apollo_clocks.h b/drivers/mcst/gpu-imgtec/include/rogue/system/rgx_tc/apollo_clocks.h new file mode 100644 index 000000000000..8bf67a4325b8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/rogue/system/rgx_tc/apollo_clocks.h @@ -0,0 +1,151 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(APOLLO_CLOCKS_H) +#define APOLLO_CLOCKS_H + +/* + * The core clock speed is passed through a multiplier depending on the TC + * version. + * + * On TC_ES1: Multiplier = x3, final speed = 270MHz + * On TC_ES2: Multiplier = x6, final speed = 540MHz + * On TCF5: Multiplier = 1x final speed = 45MHz + * + * + * The base (unmultiplied speed) can be adjusted using a module parameter + * called "sys_core_clk_speed", a number in Hz. + * As an example: + * + * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start + * + * would result in a core speed of 60MHz xMultiplier. + * + * + * The memory clock is unmultiplied and can be adjusted using a module + * parameter called "sys_mem_clk_speed", this should be the number in Hz for + * the memory clock speed. + * As an example: + * + * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start + * + * would attempt to start the driver with the memory clock speed set to 100MHz. + * + * + * Same applies to the system interface clock speed, "sys_sysif_clk_speed". + * Needed for TCF5 but not for TC_ES2/ES1. + * As an example: + * + * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start + * + * would attempt to start the driver with the system clock speed set to 45MHz. + * + * + * All parameters can be specified at once, e.g., + * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start + */ + +#define RGX_TC_SYS_CLOCK_SPEED (25000000) /*< At the moment just used for TCF5 */ + +#if defined(TC_APOLLO_TCF5_22_46_54_330) + #undef RGX_TC_SYS_CLOCK_SPEED + #define RGX_TC_CORE_CLOCK_SPEED (100000000) + #define RGX_TC_MEM_CLOCK_SPEED (45000000) + #define RGX_TC_SYS_CLOCK_SPEED (45000000) +#elif defined(TC_APOLLO_TCF5_22_49_21_16) || \ + defined(TC_APOLLO_TCF5_22_60_22_29) || \ + defined(TC_APOLLO_TCF5_22_75_22_25) + #define RGX_TC_CORE_CLOCK_SPEED (20000000) + #define RGX_TC_MEM_CLOCK_SPEED (50000000) +#elif defined(TC_APOLLO_TCF5_22_67_54_30) + #define RGX_TC_CORE_CLOCK_SPEED (100000000) + #define RGX_TC_MEM_CLOCK_SPEED (45000000) +#elif defined(TC_APOLLO_TCF5_22_89_204_18) + #define RGX_TC_CORE_CLOCK_SPEED (50000000) + #define RGX_TC_MEM_CLOCK_SPEED (25000000) +#elif defined(TC_APOLLO_TCF5_22_86_104_218) + #define RGX_TC_CORE_CLOCK_SPEED (30000000) + #define RGX_TC_MEM_CLOCK_SPEED (40000000) +#elif defined(TC_APOLLO_TCF5_22_88_104_318) + #define RGX_TC_CORE_CLOCK_SPEED (28000000) + #define RGX_TC_MEM_CLOCK_SPEED (40000000) +#elif defined(TC_APOLLO_TCF5_22_98_54_230) + #define RGX_TC_CORE_CLOCK_SPEED (100000000) + #define RGX_TC_MEM_CLOCK_SPEED (40000000) +#elif defined(TC_APOLLO_TCF5_22_102_54_38) + #define RGX_TC_CORE_CLOCK_SPEED (80000000) + #define RGX_TC_MEM_CLOCK_SPEED (25000000) +#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED) + /* TC TCF5 (22.*) fallback frequencies */ + #undef RGX_TC_SYS_CLOCK_SPEED + #define RGX_TC_CORE_CLOCK_SPEED (20000000) + #define RGX_TC_MEM_CLOCK_SPEED (50000000) + #define RGX_TC_SYS_CLOCK_SPEED (25000000) +#elif defined(TC_APOLLO_TCF5_33_8_22_1) + #define RGX_TC_CORE_CLOCK_SPEED (25000000) + #define RGX_TC_MEM_CLOCK_SPEED (45000000) +#elif defined(TC_APOLLO_TCF5_REFERENCE) + /* TC TCF5 (Reference bitfile) */ + #undef RGX_TC_SYS_CLOCK_SPEED + #define RGX_TC_CORE_CLOCK_SPEED (50000000) + #define RGX_TC_MEM_CLOCK_SPEED (50000000) + #define RGX_TC_SYS_CLOCK_SPEED (45000000) +#elif defined(TC_APOLLO_BONNIE) + /* TC Bonnie */ + #define RGX_TC_CORE_CLOCK_SPEED (18000000) + #define RGX_TC_MEM_CLOCK_SPEED (65000000) +#elif defined(TC_APOLLO_ES2) + /* TC ES2 */ + #define RGX_TC_CORE_CLOCK_SPEED (90000000) + #define RGX_TC_MEM_CLOCK_SPEED (104000000) +#elif defined(TC_ORION) + #define RGX_TC_CORE_CLOCK_SPEED (40000000) + #define RGX_TC_MEM_CLOCK_SPEED (100000000) + #define RGX_TC_SYS_CLOCK_SPEED (25000000) +#else + /* TC ES1 */ + #define RGX_TC_CORE_CLOCK_SPEED (90000000) + #define RGX_TC_MEM_CLOCK_SPEED (65000000) +#endif + +#endif /* if !defined(APOLLO_CLOCKS_H) */ diff --git a/drivers/mcst/gpu-imgtec/include/services_km.h b/drivers/mcst/gpu-imgtec/include/services_km.h new file mode 100644 index 000000000000..ca76cf9395a2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/services_km.h @@ -0,0 +1,168 @@ +/*************************************************************************/ /*! +@File +@Title Services API Kernel mode Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exported services API details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SERVICES_KM_H +#define SERVICES_KM_H + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +/*! 4k page size definition */ +#define PVRSRV_4K_PAGE_SIZE 4096UL /*!< Size of a 4K Page */ +#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 16k page size definition */ +#define PVRSRV_16K_PAGE_SIZE 16384UL /*!< Size of a 16K Page */ +#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 64k page size definition */ +#define PVRSRV_64K_PAGE_SIZE 65536UL /*!< Size of a 64K Page */ +#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 256k page size definition */ +#define PVRSRV_256K_PAGE_SIZE 262144UL /*!< Size of a 256K Page */ +#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 1MB page size definition */ +#define PVRSRV_1M_PAGE_SIZE 1048576UL /*!< Size of a 1M Page */ +#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 2MB page size definition */ +#define PVRSRV_2M_PAGE_SIZE 2097152UL /*!< Size of a 2M Page */ +#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that + it is always page-aligned */ + +#ifndef PVRSRV_DEV_CONNECTION_TYPEDEF +#define PVRSRV_DEV_CONNECTION_TYPEDEF +/*! + * Forward declaration (look on connection.h) + */ +typedef struct PVRSRV_DEV_CONNECTION_TAG PVRSRV_DEV_CONNECTION; +#endif + +/*! + Flags for Services connection. + Allows to define per-client policy for Services +*/ +/* + * Use of the 32-bit connection flags mask + * ( X = taken/in use, - = available/unused ) + * + * 31 27 20 6 4 0 + * | | | | | | + * X---XXXXXXXX-------------XXX---- + */ + +#define SRV_NO_HWPERF_CLIENT_STREAM (1U << 4) /*!< Don't create HWPerf for this connection */ +#define SRV_FLAGS_CLIENT_64BIT_COMPAT (1U << 5) /*!< This flags gets set if the client is 64 Bit compatible. */ +#define SRV_FLAGS_CLIENT_SLR_DISABLED (1U << 6) /*!< This flag is set if the client does not want Sync Lockup Recovery (SLR) enabled. */ +#define SRV_FLAGS_PDUMPCTRL (1U << 31) /*!< PDump Ctrl client flag */ + +/* + * Bits 20 - 27 are used to pass information needed for validation + * of the GPU Virtualisation Validation mechanism. In particular: + * + * Bits: + * [20 - 22]: OSid of the memory region that will be used for allocations + * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses + * regarding that memory context. + * [26]: If the AXI Protection register will be set to secure for that OSid + * [27]: If the Emulator Wrapper Register checking for protection violation + * will be set to secure for that OSid + */ + +#define VIRTVAL_FLAG_OSID_SHIFT (20) +#define SRV_VIRTVAL_FLAG_OSID_MASK (7U << VIRTVAL_FLAG_OSID_SHIFT) + +#define VIRTVAL_FLAG_OSIDREG_SHIFT (23) +#define SRV_VIRTVAL_FLAG_OSIDREG_MASK (7U << VIRTVAL_FLAG_OSIDREG_SHIFT) + +#define VIRTVAL_FLAG_AXIPREG_SHIFT (26) +#define SRV_VIRTVAL_FLAG_AXIPREG_MASK (1U << VIRTVAL_FLAG_AXIPREG_SHIFT) + +#define VIRTVAL_FLAG_AXIPTD_SHIFT (27) +#define SRV_VIRTVAL_FLAG_AXIPTD_MASK (1U << VIRTVAL_FLAG_AXIPTD_SHIFT) + + +/* Size of pointer on a 64 bit machine */ +#define POINTER_SIZE_64BIT (8) + + +/* + Pdump flags which are accessible to Services clients +*/ +#define PDUMP_NONE 0x00000000U /*puiAddrUFO.ui32Addr)) + +/* Maximum number of sync checkpoints the firmware supports in one fence */ +#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32U + +/*! + * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which + * represents a foreign sync point or collection of foreign sync points. + */ +#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U) + +#endif /* SYNC_CHECKPOINT_EXTERNAL_H */ diff --git a/drivers/mcst/gpu-imgtec/include/sync_prim_internal.h b/drivers/mcst/gpu-imgtec/include/sync_prim_internal.h new file mode 100644 index 000000000000..262f5b58a890 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/sync_prim_internal.h @@ -0,0 +1,84 @@ +/*************************************************************************/ /*! +@File +@Title Services internal synchronisation typedef header +@Description Defines synchronisation types that are used internally + only +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_INTERNAL_H +#define SYNC_INTERNAL_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include + +/* These are included here as the typedefs are required + * internally. + */ + +typedef struct SYNC_PRIM_CONTEXT *PSYNC_PRIM_CONTEXT; +typedef struct PVRSRV_CLIENT_SYNC_PRIM +{ + volatile uint32_t __iomem *pui32LinAddr; /*!< User pointer to the primitive */ +} PVRSRV_CLIENT_SYNC_PRIM; + +/*! + * Bundled information for a sync prim operation + * + * Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP + * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP + */ +typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP +{ + #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1U << 0) + #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1U << 1) + #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1U<<2)) + uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */ + PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */ + uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */ + uint32_t ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */ +} PVRSRV_CLIENT_SYNC_PRIM_OP; + +#if defined(__cplusplus) +} +#endif +#endif /* SYNC_INTERNAL_H */ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/apollo_regs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/apollo_regs.h new file mode 100644 index 000000000000..1f711e1cc960 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/apollo_regs.h @@ -0,0 +1,108 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(APOLLO_REGS_H) +#define APOLLO_REGS_H + +#include "apollo_clocks.h" + +/* TC TCF5 */ +#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1) +#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000) +#define TC5_SYS_APOLLO_REG_PDP2_SIZE (0x7C4) + +#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000) +#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE (0x14) + +#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000) +#define TC5_SYS_APOLLO_REG_HDMI_SIZE (0x1C) + +/* TC ES2 */ +#define TCF_TEMP_SENSOR_SPI_OFFSET 0xe +#define TCF_TEMP_SENSOR_TO_C(raw) (((raw) * 248 / 4096) - 54) + +/* Number of bytes that are broken */ +#define SYS_DEV_MEM_BROKEN_BYTES (1024 * 1024) +#define SYS_DEV_MEM_REGION_SIZE (0x40000000 - SYS_DEV_MEM_BROKEN_BYTES) + +/* Apollo reg on base register 0 */ +#define SYS_APOLLO_REG_PCI_BASENUM (0) +#define SYS_APOLLO_REG_REGION_SIZE (0x00010000) + +#define SYS_APOLLO_REG_SYS_OFFSET (0x0000) +#define SYS_APOLLO_REG_SYS_SIZE (0x0400) + +#define SYS_APOLLO_REG_PLL_OFFSET (0x1000) +#define SYS_APOLLO_REG_PLL_SIZE (0x0400) + +#define SYS_APOLLO_REG_HOST_OFFSET (0x4050) +#define SYS_APOLLO_REG_HOST_SIZE (0x0014) + +#define SYS_APOLLO_REG_PDP1_OFFSET (0xC000) +#define SYS_APOLLO_REG_PDP1_SIZE (0x2000) + +/* Offsets for flashing Apollo PROMs from base 0 */ +#define APOLLO_FLASH_STAT_OFFSET (0x4058) +#define APOLLO_FLASH_DATA_WRITE_OFFSET (0x4050) +#define APOLLO_FLASH_RESET_OFFSET (0x4060) + +#define APOLLO_FLASH_FIFO_STATUS_MASK (0xF) +#define APOLLO_FLASH_FIFO_STATUS_SHIFT (0) +#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF) +#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16) + +#define APOLLO_FLASH_PROG_COMPLETE_BIT (0x1) +#define APOLLO_FLASH_PROG_PROGRESS_BIT (0x2) +#define APOLLO_FLASH_PROG_FAILED_BIT (0x4) +#define APOLLO_FLASH_INV_FILETYPE_BIT (0x8) + +#define APOLLO_FLASH_FIFO_SIZE (8) + +/* RGX reg on base register 1 */ +#define SYS_RGX_REG_PCI_BASENUM (1) +#define SYS_RGX_REG_REGION_SIZE (0x7FFFF) + +/* Device memory (including HP mapping) on base register 2 */ +#define SYS_DEV_MEM_PCI_BASENUM (2) + +#endif /* APOLLO_REGS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/bonnie_tcf.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/bonnie_tcf.h new file mode 100644 index 000000000000..fc87ec790df9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/bonnie_tcf.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* bonnie_tcf.h - Bonnie TCF register definitions */ + +/* tab size 4 */ + +#ifndef BONNIE_TCF_DEFS_H +#define BONNIE_TCF_DEFS_H + +#define BONNIE_TCF_OFFSET_BONNIETC_REGBANK 0x00000000 +#define BONNIE_TCF_OFFSET_TC_IFACE_COUNTERS 0x00004000 +#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_IMGV4_RTM_TOP 0x00008000 +#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_SECN 0x0000C000 +#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_DBG 0x00010000 +#define BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN 0x00014000 +#define BONNIE_TCF_OFFSET_ALIGN_DATA_TX 0x00018000 +#define BONNIE_TCF_OFFSET_SAI_RX_1 0x0001C000 +#define BONNIE_TCF_OFFSET_SAI_RX_SDR 0x00040000 +#define BONNIE_TCF_OFFSET_SAI_TX_1 0x00044000 +#define BONNIE_TCF_OFFSET_SAI_TX_SDR 0x00068000 + +#define BONNIE_TCF_OFFSET_SAI_RX_DELTA 0x00004000 +#define BONNIE_TCF_OFFSET_SAI_TX_DELTA 0x00004000 + +#define BONNIE_TCF_OFFSET_SAI_CLK_TAPS 0x0000000C +#define BONNIE_TCF_OFFSET_SAI_EYES 0x00000010 +#define BONNIE_TCF_OFFSET_SAI_TRAIN_ACK 0x00000018 + + +#endif /* BONNIE_TCF_DEFS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_defs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_defs.h new file mode 100644 index 000000000000..61b4d44f896e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_defs.h @@ -0,0 +1,307 @@ +/**************************************************************************** +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Odin Memory Map - View from PCIe +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +****************************************************************************/ + +#ifndef _ODIN_DEFS_H_ +#define _ODIN_DEFS_H_ + +/* These defines have not been autogenerated */ + +#define PCI_VENDOR_ID_ODIN (0x1AEE) +#define DEVICE_ID_ODIN (0x1010) + +/* PCI BAR 0 contains the PDP regs and the Odin system regs */ +#define ODN_SYS_BAR 0 +#define ODN_SYS_REGION_SIZE 0x000800000 /* 8MB */ + +#define ODN_SYS_REGS_OFFSET 0 +#define ODN_SYS_REGS_SIZE 0x000400000 /* 4MB */ + +#define ODN_PDP_REGS_OFFSET 0x000440000 +#define ODN_PDP_REGS_SIZE 0x000040000 /* 256k */ + + +/* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */ +#define ODN_DUT_SOCIF_BAR 2 +#define ODN_DUT_SOCIF_OFFSET 0x000000000 +#define ODN_DUT_SOCIF_SIZE 0x004000000 /* 64MB */ + +/* PCI BAR 4 contains the on-board 1GB DDR memory */ +#define ODN_DDR_BAR 4 +#define ODN_DDR_MEM_OFFSET 0x000000000 +#define ODN_DDR_MEM_SIZE 0x040000000 /* 1GB */ + +/* Odin system register banks */ +#define ODN_REG_BANK_CORE 0x00000 +#define ODN_REG_BANK_TCF_SPI_MASTER 0x02000 +#define ODN_REG_BANK_ODN_CLK_BLK 0x0A000 +#define ODN_REG_BANK_ODN_MCU_COMMUNICATOR 0x0C000 +#define ODN_REG_BANK_DB_TYPE_ID 0x0C200 +#define ODN_REG_BANK_DB_TYPE_ID_TYPE_TCFVUOCTA 0x000000C6U +#define ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK 0x000000C0U +#define ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT 0x6 +#define ODN_REG_BANK_ODN_I2C 0x0E000 +#define ODN_REG_BANK_MULTI_CLK_ALIGN 0x20000 +#define ODN_REG_BANK_ALIGN_DATA_TX 0x22000 +#define ODN_REG_BANK_SAI_RX_DDR_0 0x24000 +#define ODN_REG_BANK_SAI_RX_DDR(n) (ODN_REG_BANK_SAI_RX_DDR_0 + (0x02000*n)) +#define ODN_REG_BANK_SAI_TX_DDR_0 0x3A000 +#define ODN_REG_BANK_SAI_TX_DDR(n) (ODN_REG_BANK_SAI_TX_DDR_0 + (0x02000*n)) +#define ODN_REG_BANK_SAI_TX_SDR 0x4E000 + +/* Odin SPI regs */ +#define ODN_SPI_MST_ADDR_RDNWR 0x0000 +#define ODN_SPI_MST_WDATA 0x0004 +#define ODN_SPI_MST_RDATA 0x0008 +#define ODN_SPI_MST_STATUS 0x000C +#define ODN_SPI_MST_GO 0x0010 + +/* Odin C2C link regs */ +#define CR_C2C_CHANNEL_STATUS_BASEB (0x20000) +#define CR_C2C_CHANNEL_CTRL_BASEB (0x20004) +#define CR_C2C_CHANNEL_STATUS_DAUGHTB (0x1800) +#define CR_C2C_CHANNEL_CTRL_DAUGHTB (0x1804) + +#define C2C_RESETVAL (0xB010) +#define C2C_DEFVAL (0xB011) + +/* C2C link number of attempts and timeout */ +#define C2C_READY_WAIT_ATTEMPTS (10) +#define C2C_READY_WAIT_MS (3000) + +/* Link status: BB = baseboard, DB = daughterboard */ +#define C2C_BB_STATUS_SHIFT (0) +#define C2C_BB_STATUS_OK (1 << C2C_BB_STATUS_SHIFT) +#define C2C_DB_STATUS_SHIFT (4) +#define C2C_DB_STATUS_OK (1 << C2C_DB_STATUS_SHIFT) + + +/* + Odin CLK regs - the odn_clk_blk module defs are not auto generated + */ +#define ODN_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 +#define ODN_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU +#define ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 +#define ODN_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U +#define ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 + +#define ODN_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 +#define ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U +#define ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 +#define ODN_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U +#define ODN_PDP_PCLK_ODIV2_EDGE_SHIFT 7 + +#define ODN_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C + +#define ODN_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 +#define ODN_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU +#define ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 +#define ODN_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U +#define ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 + +#define ODN_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C +#define ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U +#define ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 +#define ODN_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U +#define ODN_PDP_MCLK_ODIV2_EDGE_SHIFT 7 + +#define ODN_PDP_P_CLK_MULTIPLIER_REG1 0x650 +#define ODN_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU +#define ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 +#define ODN_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U +#define ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 + +#define ODN_PDP_P_CLK_MULTIPLIER_REG2 0x654 +#define ODN_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U +#define ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 +#define ODN_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U +#define ODN_PDP_PCLK_MUL2_EDGE_SHIFT 7 + +#define ODN_PDP_P_CLK_MULTIPLIER_REG3 0x64C + +#define ODN_PDP_P_CLK_IN_DIVIDER_REG 0x658 +#define ODN_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU +#define ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 +#define ODN_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U +#define ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 +#define ODN_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U +#define ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 +#define ODN_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U +#define ODN_PDP_PCLK_IDIV_EDGE_SHIFT 13 + +/* + * DUT core clock input divider, multiplier and out divider. + */ +#define ODN_DUT_CORE_CLK_OUT_DIVIDER1 (0x0028) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) + +#define ODN_DUT_CORE_CLK_OUT_DIVIDER2 (0x002C) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) +#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) + +#define ODN_DUT_CORE_CLK_MULTIPLIER1 (0x0050) +#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) +#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) +#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) +#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) + +#define ODN_DUT_CORE_CLK_MULTIPLIER2 (0x0054) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT (12) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT (7) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) +#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) + +#define ODN_DUT_CORE_CLK_IN_DIVIDER1 (0x0058) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) +#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) + +/* + * DUT interface clock input divider, multiplier and out divider. + */ +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1 (0x0220) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) + +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2 (0x0224) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) +#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) + +#define ODN_DUT_IFACE_CLK_MULTIPLIER1 (0x0250) +#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) +#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) +#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) +#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) + +#define ODN_DUT_IFACE_CLK_MULTIPLIER2 (0x0254) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT (12) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT (7) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) +#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) + +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1 (0x0258) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) +#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) + + +/* + * Min max values from Xilinx Virtex7 data sheet DS183, for speed grade 2 + * All in Hz + */ +#define ODN_INPUT_CLOCK_SPEED (100000000U) +#define ODN_INPUT_CLOCK_SPEED_MIN (10000000U) +#define ODN_INPUT_CLOCK_SPEED_MAX (933000000U) +#define ODN_OUTPUT_CLOCK_SPEED_MIN (4690000U) +#define ODN_OUTPUT_CLOCK_SPEED_MAX (933000000U) +#define ODN_VCO_MIN (600000000U) +#define ODN_VCO_MAX (1440000000U) +#define ODN_PFD_MIN (10000000U) +#define ODN_PFD_MAX (500000000U) + +/* + * Max values that can be set in DRP registers + */ +#define ODN_OREG_VALUE_MAX (126.875f) +#define ODN_MREG_VALUE_MAX (126.875f) +#define ODN_DREG_VALUE_MAX (126U) + + +#define ODN_MMCM_LOCK_STATUS_DUT_CORE (0x00000001U) +#define ODN_MMCM_LOCK_STATUS_DUT_IF (0x00000002U) +#define ODN_MMCM_LOCK_STATUS_PDPP (0x00000008U) + +/* + Odin interrupt flags +*/ +#define ODN_INTERRUPT_ENABLE_PDP1 (1 << ODN_INTERRUPT_ENABLE_PDP1_SHIFT) +#define ODN_INTERRUPT_ENABLE_DUT (1 << ODN_INTERRUPT_ENABLE_DUT_SHIFT) +#define ODN_INTERRUPT_STATUS_PDP1 (1 << ODN_INTERRUPT_STATUS_PDP1_SHIFT) +#define ODN_INTERRUPT_STATUS_DUT (1 << ODN_INTERRUPT_STATUS_DUT_SHIFT) +#define ODN_INTERRUPT_CLEAR_PDP1 (1 << ODN_INTERRUPT_CLR_PDP1_SHIFT) +#define ODN_INTERRUPT_CLEAR_DUT (1 << ODN_INTERRUPT_CLR_DUT_SHIFT) + +/* + Other defines +*/ +#define ODN_STREAM_OFF 0 +#define ODN_STREAM_ON 1 +#define ODN_SYNC_GEN_DISABLE 0 +#define ODN_SYNC_GEN_ENABLE 1 +#define ODN_INTERLACE_DISABLE 0 +#define ODN_INTERLACE_ENABLE 1 +#define ODN_PIXEL_CLOCK_INVERTED 1 +#define ODN_HSYNC_POLARITY_ACTIVE_HIGH 1 + +#define ODN_PDP_INTCLR_ALL 0x000FFFFFU +#define ODN_PDP_INTSTAT_ALL_OURUN_MASK 0x000FFFF0U + +#endif /* _ODIN_DEFS_H_ */ + +/***************************************************************************** + End of file (odn_defs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_pdp_regs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_pdp_regs.h new file mode 100644 index 000000000000..da47a253db31 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_pdp_regs.h @@ -0,0 +1,8540 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* tab size 4 */ + +#ifndef ODN_PDP_REGS_H +#define ODN_PDP_REGS_H + +/* Odin-PDP hardware register definitions */ + + +#define ODN_PDP_GRPH1SURF_OFFSET (0x0000) + +/* PDP, GRPH1SURF, GRPH1PIXFMT +*/ +#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK (0xF8000000) +#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT (27) +#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH (5) +#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1USEGAMMA +*/ +#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_MASK (0x04000000) +#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT (26) +#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH (1) +#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1USECSC +*/ +#define ODN_PDP_GRPH1SURF_GRPH1USECSC_MASK (0x02000000) +#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SHIFT (25) +#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LENGTH (1) +#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE +*/ +#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK (0x01000000) +#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT (24) +#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH (1) +#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1USELUT +*/ +#define ODN_PDP_GRPH1SURF_GRPH1USELUT_MASK (0x00800000) +#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SHIFT (23) +#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LENGTH (1) +#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2SURF_OFFSET (0x0004) + +/* PDP, GRPH2SURF, GRPH2PIXFMT +*/ +#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK (0xF8000000) +#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT (27) +#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH (5) +#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2USEGAMMA +*/ +#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_MASK (0x04000000) +#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT (26) +#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH (1) +#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2USECSC +*/ +#define ODN_PDP_GRPH2SURF_GRPH2USECSC_MASK (0x02000000) +#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SHIFT (25) +#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LENGTH (1) +#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE +*/ +#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK (0x01000000) +#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT (24) +#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH (1) +#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2USELUT +*/ +#define ODN_PDP_GRPH2SURF_GRPH2USELUT_MASK (0x00800000) +#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SHIFT (23) +#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LENGTH (1) +#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3SURF_OFFSET (0x0008) + +/* PDP, GRPH3SURF, GRPH3PIXFMT +*/ +#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_MASK (0xF8000000) +#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT (27) +#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH (5) +#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3USEGAMMA +*/ +#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_MASK (0x04000000) +#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT (26) +#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH (1) +#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3USECSC +*/ +#define ODN_PDP_GRPH3SURF_GRPH3USECSC_MASK (0x02000000) +#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SHIFT (25) +#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LENGTH (1) +#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE +*/ +#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK (0x01000000) +#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT (24) +#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH (1) +#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3USELUT +*/ +#define ODN_PDP_GRPH3SURF_GRPH3USELUT_MASK (0x00800000) +#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SHIFT (23) +#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LENGTH (1) +#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4SURF_OFFSET (0x000C) + +/* PDP, GRPH4SURF, GRPH4PIXFMT +*/ +#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK (0xF8000000) +#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT (27) +#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH (5) +#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4USEGAMMA +*/ +#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_MASK (0x04000000) +#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT (26) +#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH (1) +#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4USECSC +*/ +#define ODN_PDP_GRPH4SURF_GRPH4USECSC_MASK (0x02000000) +#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SHIFT (25) +#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LENGTH (1) +#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE +*/ +#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK (0x01000000) +#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT (24) +#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH (1) +#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4USELUT +*/ +#define ODN_PDP_GRPH4SURF_GRPH4USELUT_MASK (0x00800000) +#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SHIFT (23) +#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LENGTH (1) +#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1SURF_OFFSET (0x0010) + +/* PDP, VID1SURF, VID1PIXFMT +*/ +#define ODN_PDP_VID1SURF_VID1PIXFMT_MASK (0xF8000000) +#define ODN_PDP_VID1SURF_VID1PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT (27) +#define ODN_PDP_VID1SURF_VID1PIXFMT_LENGTH (5) +#define ODN_PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEGAMMA +*/ +#define ODN_PDP_VID1SURF_VID1USEGAMMA_MASK (0x04000000) +#define ODN_PDP_VID1SURF_VID1USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_VID1SURF_VID1USEGAMMA_SHIFT (26) +#define ODN_PDP_VID1SURF_VID1USEGAMMA_LENGTH (1) +#define ODN_PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USECSC +*/ +#define ODN_PDP_VID1SURF_VID1USECSC_MASK (0x02000000) +#define ODN_PDP_VID1SURF_VID1USECSC_LSBMASK (0x00000001) +#define ODN_PDP_VID1SURF_VID1USECSC_SHIFT (25) +#define ODN_PDP_VID1SURF_VID1USECSC_LENGTH (1) +#define ODN_PDP_VID1SURF_VID1USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEI2P +*/ +#define ODN_PDP_VID1SURF_VID1USEI2P_MASK (0x01000000) +#define ODN_PDP_VID1SURF_VID1USEI2P_LSBMASK (0x00000001) +#define ODN_PDP_VID1SURF_VID1USEI2P_SHIFT (24) +#define ODN_PDP_VID1SURF_VID1USEI2P_LENGTH (1) +#define ODN_PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1COSITED +*/ +#define ODN_PDP_VID1SURF_VID1COSITED_MASK (0x00800000) +#define ODN_PDP_VID1SURF_VID1COSITED_LSBMASK (0x00000001) +#define ODN_PDP_VID1SURF_VID1COSITED_SHIFT (23) +#define ODN_PDP_VID1SURF_VID1COSITED_LENGTH (1) +#define ODN_PDP_VID1SURF_VID1COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEHQCD +*/ +#define ODN_PDP_VID1SURF_VID1USEHQCD_MASK (0x00400000) +#define ODN_PDP_VID1SURF_VID1USEHQCD_LSBMASK (0x00000001) +#define ODN_PDP_VID1SURF_VID1USEHQCD_SHIFT (22) +#define ODN_PDP_VID1SURF_VID1USEHQCD_LENGTH (1) +#define ODN_PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEINSTREAM +*/ +#define ODN_PDP_VID1SURF_VID1USEINSTREAM_MASK (0x00200000) +#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LSBMASK (0x00000001) +#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SHIFT (21) +#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LENGTH (1) +#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2SURF_OFFSET (0x0014) + +/* PDP, VID2SURF, VID2PIXFMT +*/ +#define ODN_PDP_VID2SURF_VID2PIXFMT_MASK (0xF8000000) +#define ODN_PDP_VID2SURF_VID2PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_VID2SURF_VID2PIXFMT_SHIFT (27) +#define ODN_PDP_VID2SURF_VID2PIXFMT_LENGTH (5) +#define ODN_PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SURF, VID2COSITED +*/ +#define ODN_PDP_VID2SURF_VID2COSITED_MASK (0x00800000) +#define ODN_PDP_VID2SURF_VID2COSITED_LSBMASK (0x00000001) +#define ODN_PDP_VID2SURF_VID2COSITED_SHIFT (23) +#define ODN_PDP_VID2SURF_VID2COSITED_LENGTH (1) +#define ODN_PDP_VID2SURF_VID2COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SURF, VID2USEGAMMA +*/ +#define ODN_PDP_VID2SURF_VID2USEGAMMA_MASK (0x04000000) +#define ODN_PDP_VID2SURF_VID2USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_VID2SURF_VID2USEGAMMA_SHIFT (26) +#define ODN_PDP_VID2SURF_VID2USEGAMMA_LENGTH (1) +#define ODN_PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SURF, VID2USECSC +*/ +#define ODN_PDP_VID2SURF_VID2USECSC_MASK (0x02000000) +#define ODN_PDP_VID2SURF_VID2USECSC_LSBMASK (0x00000001) +#define ODN_PDP_VID2SURF_VID2USECSC_SHIFT (25) +#define ODN_PDP_VID2SURF_VID2USECSC_LENGTH (1) +#define ODN_PDP_VID2SURF_VID2USECSC_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3SURF_OFFSET (0x0018) + +/* PDP, VID3SURF, VID3PIXFMT +*/ +#define ODN_PDP_VID3SURF_VID3PIXFMT_MASK (0xF8000000) +#define ODN_PDP_VID3SURF_VID3PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_VID3SURF_VID3PIXFMT_SHIFT (27) +#define ODN_PDP_VID3SURF_VID3PIXFMT_LENGTH (5) +#define ODN_PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SURF, VID3COSITED +*/ +#define ODN_PDP_VID3SURF_VID3COSITED_MASK (0x00800000) +#define ODN_PDP_VID3SURF_VID3COSITED_LSBMASK (0x00000001) +#define ODN_PDP_VID3SURF_VID3COSITED_SHIFT (23) +#define ODN_PDP_VID3SURF_VID3COSITED_LENGTH (1) +#define ODN_PDP_VID3SURF_VID3COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SURF, VID3USEGAMMA +*/ +#define ODN_PDP_VID3SURF_VID3USEGAMMA_MASK (0x04000000) +#define ODN_PDP_VID3SURF_VID3USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_VID3SURF_VID3USEGAMMA_SHIFT (26) +#define ODN_PDP_VID3SURF_VID3USEGAMMA_LENGTH (1) +#define ODN_PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SURF, VID3USECSC +*/ +#define ODN_PDP_VID3SURF_VID3USECSC_MASK (0x02000000) +#define ODN_PDP_VID3SURF_VID3USECSC_LSBMASK (0x00000001) +#define ODN_PDP_VID3SURF_VID3USECSC_SHIFT (25) +#define ODN_PDP_VID3SURF_VID3USECSC_LENGTH (1) +#define ODN_PDP_VID3SURF_VID3USECSC_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4SURF_OFFSET (0x001C) + +/* PDP, VID4SURF, VID4PIXFMT +*/ +#define ODN_PDP_VID4SURF_VID4PIXFMT_MASK (0xF8000000) +#define ODN_PDP_VID4SURF_VID4PIXFMT_LSBMASK (0x0000001F) +#define ODN_PDP_VID4SURF_VID4PIXFMT_SHIFT (27) +#define ODN_PDP_VID4SURF_VID4PIXFMT_LENGTH (5) +#define ODN_PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SURF, VID4COSITED +*/ +#define ODN_PDP_VID4SURF_VID4COSITED_MASK (0x00800000) +#define ODN_PDP_VID4SURF_VID4COSITED_LSBMASK (0x00000001) +#define ODN_PDP_VID4SURF_VID4COSITED_SHIFT (23) +#define ODN_PDP_VID4SURF_VID4COSITED_LENGTH (1) +#define ODN_PDP_VID4SURF_VID4COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SURF, VID4USEGAMMA +*/ +#define ODN_PDP_VID4SURF_VID4USEGAMMA_MASK (0x04000000) +#define ODN_PDP_VID4SURF_VID4USEGAMMA_LSBMASK (0x00000001) +#define ODN_PDP_VID4SURF_VID4USEGAMMA_SHIFT (26) +#define ODN_PDP_VID4SURF_VID4USEGAMMA_LENGTH (1) +#define ODN_PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SURF, VID4USECSC +*/ +#define ODN_PDP_VID4SURF_VID4USECSC_MASK (0x02000000) +#define ODN_PDP_VID4SURF_VID4USECSC_LSBMASK (0x00000001) +#define ODN_PDP_VID4SURF_VID4USECSC_SHIFT (25) +#define ODN_PDP_VID4SURF_VID4USECSC_LENGTH (1) +#define ODN_PDP_VID4SURF_VID4USECSC_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1CTRL_OFFSET (0x0020) + +/* PDP, GRPH1CTRL, GRPH1STREN +*/ +#define ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK (0x80000000) +#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT (31) +#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LENGTH (1) +#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1CKEYEN +*/ +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_MASK (0x40000000) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT (30) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH (1) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1CKEYSRC +*/ +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK (0x20000000) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT (29) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH (1) +#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1BLEND +*/ +#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK (0x18000000) +#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK (0x00000003) +#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT (27) +#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LENGTH (2) +#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1BLENDPOS +*/ +#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK (0x07000000) +#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT (24) +#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH (3) +#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1DITHEREN +*/ +#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_MASK (0x00800000) +#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT (23) +#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH (1) +#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2CTRL_OFFSET (0x0024) + +/* PDP, GRPH2CTRL, GRPH2STREN +*/ +#define ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK (0x80000000) +#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT (31) +#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LENGTH (1) +#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2CKEYEN +*/ +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_MASK (0x40000000) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT (30) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH (1) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2CKEYSRC +*/ +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK (0x20000000) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT (29) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH (1) +#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2BLEND +*/ +#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK (0x18000000) +#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK (0x00000003) +#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT (27) +#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LENGTH (2) +#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2BLENDPOS +*/ +#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK (0x07000000) +#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT (24) +#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH (3) +#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2DITHEREN +*/ +#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_MASK (0x00800000) +#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT (23) +#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH (1) +#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3CTRL_OFFSET (0x0028) + +/* PDP, GRPH3CTRL, GRPH3STREN +*/ +#define ODN_PDP_GRPH3CTRL_GRPH3STREN_MASK (0x80000000) +#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SHIFT (31) +#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LENGTH (1) +#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3CKEYEN +*/ +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_MASK (0x40000000) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT (30) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH (1) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3CKEYSRC +*/ +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK (0x20000000) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT (29) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH (1) +#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3BLEND +*/ +#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_MASK (0x18000000) +#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK (0x00000003) +#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SHIFT (27) +#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LENGTH (2) +#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3BLENDPOS +*/ +#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK (0x07000000) +#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT (24) +#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH (3) +#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3DITHEREN +*/ +#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_MASK (0x00800000) +#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT (23) +#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH (1) +#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4CTRL_OFFSET (0x002C) + +/* PDP, GRPH4CTRL, GRPH4STREN +*/ +#define ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK (0x80000000) +#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT (31) +#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LENGTH (1) +#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4CKEYEN +*/ +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_MASK (0x40000000) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT (30) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH (1) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4CKEYSRC +*/ +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK (0x20000000) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT (29) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH (1) +#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4BLEND +*/ +#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK (0x18000000) +#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK (0x00000003) +#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT (27) +#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LENGTH (2) +#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4BLENDPOS +*/ +#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK (0x07000000) +#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT (24) +#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH (3) +#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4DITHEREN +*/ +#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_MASK (0x00800000) +#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT (23) +#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH (1) +#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1CTRL_OFFSET (0x0030) + +/* PDP, VID1CTRL, VID1STREN +*/ +#define ODN_PDP_VID1CTRL_VID1STREN_MASK (0x80000000) +#define ODN_PDP_VID1CTRL_VID1STREN_LSBMASK (0x00000001) +#define ODN_PDP_VID1CTRL_VID1STREN_SHIFT (31) +#define ODN_PDP_VID1CTRL_VID1STREN_LENGTH (1) +#define ODN_PDP_VID1CTRL_VID1STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1CKEYEN +*/ +#define ODN_PDP_VID1CTRL_VID1CKEYEN_MASK (0x40000000) +#define ODN_PDP_VID1CTRL_VID1CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID1CTRL_VID1CKEYEN_SHIFT (30) +#define ODN_PDP_VID1CTRL_VID1CKEYEN_LENGTH (1) +#define ODN_PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1CKEYSRC +*/ +#define ODN_PDP_VID1CTRL_VID1CKEYSRC_MASK (0x20000000) +#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SHIFT (29) +#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LENGTH (1) +#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1BLEND +*/ +#define ODN_PDP_VID1CTRL_VID1BLEND_MASK (0x18000000) +#define ODN_PDP_VID1CTRL_VID1BLEND_LSBMASK (0x00000003) +#define ODN_PDP_VID1CTRL_VID1BLEND_SHIFT (27) +#define ODN_PDP_VID1CTRL_VID1BLEND_LENGTH (2) +#define ODN_PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1BLENDPOS +*/ +#define ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK (0x07000000) +#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT (24) +#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LENGTH (3) +#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1DITHEREN +*/ +#define ODN_PDP_VID1CTRL_VID1DITHEREN_MASK (0x00800000) +#define ODN_PDP_VID1CTRL_VID1DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_VID1CTRL_VID1DITHEREN_SHIFT (23) +#define ODN_PDP_VID1CTRL_VID1DITHEREN_LENGTH (1) +#define ODN_PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2CTRL_OFFSET (0x0034) + +/* PDP, VID2CTRL, VID2STREN +*/ +#define ODN_PDP_VID2CTRL_VID2STREN_MASK (0x80000000) +#define ODN_PDP_VID2CTRL_VID2STREN_LSBMASK (0x00000001) +#define ODN_PDP_VID2CTRL_VID2STREN_SHIFT (31) +#define ODN_PDP_VID2CTRL_VID2STREN_LENGTH (1) +#define ODN_PDP_VID2CTRL_VID2STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2CKEYEN +*/ +#define ODN_PDP_VID2CTRL_VID2CKEYEN_MASK (0x40000000) +#define ODN_PDP_VID2CTRL_VID2CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID2CTRL_VID2CKEYEN_SHIFT (30) +#define ODN_PDP_VID2CTRL_VID2CKEYEN_LENGTH (1) +#define ODN_PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2CKEYSRC +*/ +#define ODN_PDP_VID2CTRL_VID2CKEYSRC_MASK (0x20000000) +#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SHIFT (29) +#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LENGTH (1) +#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2BLEND +*/ +#define ODN_PDP_VID2CTRL_VID2BLEND_MASK (0x18000000) +#define ODN_PDP_VID2CTRL_VID2BLEND_LSBMASK (0x00000003) +#define ODN_PDP_VID2CTRL_VID2BLEND_SHIFT (27) +#define ODN_PDP_VID2CTRL_VID2BLEND_LENGTH (2) +#define ODN_PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2BLENDPOS +*/ +#define ODN_PDP_VID2CTRL_VID2BLENDPOS_MASK (0x07000000) +#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SHIFT (24) +#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LENGTH (3) +#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2DITHEREN +*/ +#define ODN_PDP_VID2CTRL_VID2DITHEREN_MASK (0x00800000) +#define ODN_PDP_VID2CTRL_VID2DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_VID2CTRL_VID2DITHEREN_SHIFT (23) +#define ODN_PDP_VID2CTRL_VID2DITHEREN_LENGTH (1) +#define ODN_PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3CTRL_OFFSET (0x0038) + +/* PDP, VID3CTRL, VID3STREN +*/ +#define ODN_PDP_VID3CTRL_VID3STREN_MASK (0x80000000) +#define ODN_PDP_VID3CTRL_VID3STREN_LSBMASK (0x00000001) +#define ODN_PDP_VID3CTRL_VID3STREN_SHIFT (31) +#define ODN_PDP_VID3CTRL_VID3STREN_LENGTH (1) +#define ODN_PDP_VID3CTRL_VID3STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3CKEYEN +*/ +#define ODN_PDP_VID3CTRL_VID3CKEYEN_MASK (0x40000000) +#define ODN_PDP_VID3CTRL_VID3CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID3CTRL_VID3CKEYEN_SHIFT (30) +#define ODN_PDP_VID3CTRL_VID3CKEYEN_LENGTH (1) +#define ODN_PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3CKEYSRC +*/ +#define ODN_PDP_VID3CTRL_VID3CKEYSRC_MASK (0x20000000) +#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SHIFT (29) +#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LENGTH (1) +#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3BLEND +*/ +#define ODN_PDP_VID3CTRL_VID3BLEND_MASK (0x18000000) +#define ODN_PDP_VID3CTRL_VID3BLEND_LSBMASK (0x00000003) +#define ODN_PDP_VID3CTRL_VID3BLEND_SHIFT (27) +#define ODN_PDP_VID3CTRL_VID3BLEND_LENGTH (2) +#define ODN_PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3BLENDPOS +*/ +#define ODN_PDP_VID3CTRL_VID3BLENDPOS_MASK (0x07000000) +#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SHIFT (24) +#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LENGTH (3) +#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3DITHEREN +*/ +#define ODN_PDP_VID3CTRL_VID3DITHEREN_MASK (0x00800000) +#define ODN_PDP_VID3CTRL_VID3DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_VID3CTRL_VID3DITHEREN_SHIFT (23) +#define ODN_PDP_VID3CTRL_VID3DITHEREN_LENGTH (1) +#define ODN_PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4CTRL_OFFSET (0x003C) + +/* PDP, VID4CTRL, VID4STREN +*/ +#define ODN_PDP_VID4CTRL_VID4STREN_MASK (0x80000000) +#define ODN_PDP_VID4CTRL_VID4STREN_LSBMASK (0x00000001) +#define ODN_PDP_VID4CTRL_VID4STREN_SHIFT (31) +#define ODN_PDP_VID4CTRL_VID4STREN_LENGTH (1) +#define ODN_PDP_VID4CTRL_VID4STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4CKEYEN +*/ +#define ODN_PDP_VID4CTRL_VID4CKEYEN_MASK (0x40000000) +#define ODN_PDP_VID4CTRL_VID4CKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID4CTRL_VID4CKEYEN_SHIFT (30) +#define ODN_PDP_VID4CTRL_VID4CKEYEN_LENGTH (1) +#define ODN_PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4CKEYSRC +*/ +#define ODN_PDP_VID4CTRL_VID4CKEYSRC_MASK (0x20000000) +#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LSBMASK (0x00000001) +#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SHIFT (29) +#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LENGTH (1) +#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4BLEND +*/ +#define ODN_PDP_VID4CTRL_VID4BLEND_MASK (0x18000000) +#define ODN_PDP_VID4CTRL_VID4BLEND_LSBMASK (0x00000003) +#define ODN_PDP_VID4CTRL_VID4BLEND_SHIFT (27) +#define ODN_PDP_VID4CTRL_VID4BLEND_LENGTH (2) +#define ODN_PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4BLENDPOS +*/ +#define ODN_PDP_VID4CTRL_VID4BLENDPOS_MASK (0x07000000) +#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LSBMASK (0x00000007) +#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SHIFT (24) +#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LENGTH (3) +#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4DITHEREN +*/ +#define ODN_PDP_VID4CTRL_VID4DITHEREN_MASK (0x00800000) +#define ODN_PDP_VID4CTRL_VID4DITHEREN_LSBMASK (0x00000001) +#define ODN_PDP_VID4CTRL_VID4DITHEREN_SHIFT (23) +#define ODN_PDP_VID4CTRL_VID4DITHEREN_LENGTH (1) +#define ODN_PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1UCTRL_OFFSET (0x0050) + +/* PDP, VID1UCTRL, VID1UVHALFSTR +*/ +#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_MASK (0xC0000000) +#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK (0x00000003) +#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT (30) +#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH (2) +#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2UCTRL_OFFSET (0x0054) + +/* PDP, VID2UCTRL, VID2UVHALFSTR +*/ +#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_MASK (0xC0000000) +#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK (0x00000003) +#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT (30) +#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH (2) +#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3UCTRL_OFFSET (0x0058) + +/* PDP, VID3UCTRL, VID3UVHALFSTR +*/ +#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_MASK (0xC0000000) +#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK (0x00000003) +#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT (30) +#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH (2) +#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4UCTRL_OFFSET (0x005C) + +/* PDP, VID4UCTRL, VID4UVHALFSTR +*/ +#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_MASK (0xC0000000) +#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK (0x00000003) +#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT (30) +#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH (2) +#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1STRIDE_OFFSET (0x0060) + +/* PDP, GRPH1STRIDE, GRPH1STRIDE +*/ +#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK (0xFFC00000) +#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT (22) +#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH (10) +#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2STRIDE_OFFSET (0x0064) + +/* PDP, GRPH2STRIDE, GRPH2STRIDE +*/ +#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK (0xFFC00000) +#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT (22) +#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH (10) +#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3STRIDE_OFFSET (0x0068) + +/* PDP, GRPH3STRIDE, GRPH3STRIDE +*/ +#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_MASK (0xFFC00000) +#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT (22) +#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH (10) +#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4STRIDE_OFFSET (0x006C) + +/* PDP, GRPH4STRIDE, GRPH4STRIDE +*/ +#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK (0xFFC00000) +#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT (22) +#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH (10) +#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1STRIDE_OFFSET (0x0070) + +/* PDP, VID1STRIDE, VID1STRIDE +*/ +#define ODN_PDP_VID1STRIDE_VID1STRIDE_MASK (0xFFC00000) +#define ODN_PDP_VID1STRIDE_VID1STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT (22) +#define ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH (10) +#define ODN_PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2STRIDE_OFFSET (0x0074) + +/* PDP, VID2STRIDE, VID2STRIDE +*/ +#define ODN_PDP_VID2STRIDE_VID2STRIDE_MASK (0xFFC00000) +#define ODN_PDP_VID2STRIDE_VID2STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_VID2STRIDE_VID2STRIDE_SHIFT (22) +#define ODN_PDP_VID2STRIDE_VID2STRIDE_LENGTH (10) +#define ODN_PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3STRIDE_OFFSET (0x0078) + +/* PDP, VID3STRIDE, VID3STRIDE +*/ +#define ODN_PDP_VID3STRIDE_VID3STRIDE_MASK (0xFFC00000) +#define ODN_PDP_VID3STRIDE_VID3STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_VID3STRIDE_VID3STRIDE_SHIFT (22) +#define ODN_PDP_VID3STRIDE_VID3STRIDE_LENGTH (10) +#define ODN_PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4STRIDE_OFFSET (0x007C) + +/* PDP, VID4STRIDE, VID4STRIDE +*/ +#define ODN_PDP_VID4STRIDE_VID4STRIDE_MASK (0xFFC00000) +#define ODN_PDP_VID4STRIDE_VID4STRIDE_LSBMASK (0x000003FF) +#define ODN_PDP_VID4STRIDE_VID4STRIDE_SHIFT (22) +#define ODN_PDP_VID4STRIDE_VID4STRIDE_LENGTH (10) +#define ODN_PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1SIZE_OFFSET (0x0080) + +/* PDP, GRPH1SIZE, GRPH1WIDTH +*/ +#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT (16) +#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH (12) +#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SIZE, GRPH1HEIGHT +*/ +#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT (0) +#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH (12) +#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2SIZE_OFFSET (0x0084) + +/* PDP, GRPH2SIZE, GRPH2WIDTH +*/ +#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT (16) +#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH (12) +#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SIZE, GRPH2HEIGHT +*/ +#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT (0) +#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH (12) +#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3SIZE_OFFSET (0x0088) + +/* PDP, GRPH3SIZE, GRPH3WIDTH +*/ +#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT (16) +#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH (12) +#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SIZE, GRPH3HEIGHT +*/ +#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT (0) +#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH (12) +#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4SIZE_OFFSET (0x008C) + +/* PDP, GRPH4SIZE, GRPH4WIDTH +*/ +#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT (16) +#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH (12) +#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SIZE, GRPH4HEIGHT +*/ +#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT (0) +#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH (12) +#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1SIZE_OFFSET (0x0090) + +/* PDP, VID1SIZE, VID1WIDTH +*/ +#define ODN_PDP_VID1SIZE_VID1WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT (16) +#define ODN_PDP_VID1SIZE_VID1WIDTH_LENGTH (12) +#define ODN_PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SIZE, VID1HEIGHT +*/ +#define ODN_PDP_VID1SIZE_VID1HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT (0) +#define ODN_PDP_VID1SIZE_VID1HEIGHT_LENGTH (12) +#define ODN_PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2SIZE_OFFSET (0x0094) + +/* PDP, VID2SIZE, VID2WIDTH +*/ +#define ODN_PDP_VID2SIZE_VID2WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID2SIZE_VID2WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2SIZE_VID2WIDTH_SHIFT (16) +#define ODN_PDP_VID2SIZE_VID2WIDTH_LENGTH (12) +#define ODN_PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SIZE, VID2HEIGHT +*/ +#define ODN_PDP_VID2SIZE_VID2HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID2SIZE_VID2HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2SIZE_VID2HEIGHT_SHIFT (0) +#define ODN_PDP_VID2SIZE_VID2HEIGHT_LENGTH (12) +#define ODN_PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3SIZE_OFFSET (0x0098) + +/* PDP, VID3SIZE, VID3WIDTH +*/ +#define ODN_PDP_VID3SIZE_VID3WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID3SIZE_VID3WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3SIZE_VID3WIDTH_SHIFT (16) +#define ODN_PDP_VID3SIZE_VID3WIDTH_LENGTH (12) +#define ODN_PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SIZE, VID3HEIGHT +*/ +#define ODN_PDP_VID3SIZE_VID3HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID3SIZE_VID3HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3SIZE_VID3HEIGHT_SHIFT (0) +#define ODN_PDP_VID3SIZE_VID3HEIGHT_LENGTH (12) +#define ODN_PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4SIZE_OFFSET (0x009C) + +/* PDP, VID4SIZE, VID4WIDTH +*/ +#define ODN_PDP_VID4SIZE_VID4WIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID4SIZE_VID4WIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4SIZE_VID4WIDTH_SHIFT (16) +#define ODN_PDP_VID4SIZE_VID4WIDTH_LENGTH (12) +#define ODN_PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SIZE, VID4HEIGHT +*/ +#define ODN_PDP_VID4SIZE_VID4HEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID4SIZE_VID4HEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4SIZE_VID4HEIGHT_SHIFT (0) +#define ODN_PDP_VID4SIZE_VID4HEIGHT_LENGTH (12) +#define ODN_PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1POSN_OFFSET (0x00A0) + +/* PDP, GRPH1POSN, GRPH1XSTART +*/ +#define ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK (0x0FFF0000) +#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT (16) +#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LENGTH (12) +#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1POSN, GRPH1YSTART +*/ +#define ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK (0x00000FFF) +#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT (0) +#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LENGTH (12) +#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2POSN_OFFSET (0x00A4) + +/* PDP, GRPH2POSN, GRPH2XSTART +*/ +#define ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK (0x0FFF0000) +#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT (16) +#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LENGTH (12) +#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2POSN, GRPH2YSTART +*/ +#define ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK (0x00000FFF) +#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT (0) +#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LENGTH (12) +#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3POSN_OFFSET (0x00A8) + +/* PDP, GRPH3POSN, GRPH3XSTART +*/ +#define ODN_PDP_GRPH3POSN_GRPH3XSTART_MASK (0x0FFF0000) +#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SHIFT (16) +#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LENGTH (12) +#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3POSN, GRPH3YSTART +*/ +#define ODN_PDP_GRPH3POSN_GRPH3YSTART_MASK (0x00000FFF) +#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SHIFT (0) +#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LENGTH (12) +#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4POSN_OFFSET (0x00AC) + +/* PDP, GRPH4POSN, GRPH4XSTART +*/ +#define ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK (0x0FFF0000) +#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT (16) +#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LENGTH (12) +#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4POSN, GRPH4YSTART +*/ +#define ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK (0x00000FFF) +#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT (0) +#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LENGTH (12) +#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1POSN_OFFSET (0x00B0) + +/* PDP, VID1POSN, VID1XSTART +*/ +#define ODN_PDP_VID1POSN_VID1XSTART_MASK (0x0FFF0000) +#define ODN_PDP_VID1POSN_VID1XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1POSN_VID1XSTART_SHIFT (16) +#define ODN_PDP_VID1POSN_VID1XSTART_LENGTH (12) +#define ODN_PDP_VID1POSN_VID1XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1POSN, VID1YSTART +*/ +#define ODN_PDP_VID1POSN_VID1YSTART_MASK (0x00000FFF) +#define ODN_PDP_VID1POSN_VID1YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1POSN_VID1YSTART_SHIFT (0) +#define ODN_PDP_VID1POSN_VID1YSTART_LENGTH (12) +#define ODN_PDP_VID1POSN_VID1YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2POSN_OFFSET (0x00B4) + +/* PDP, VID2POSN, VID2XSTART +*/ +#define ODN_PDP_VID2POSN_VID2XSTART_MASK (0x0FFF0000) +#define ODN_PDP_VID2POSN_VID2XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2POSN_VID2XSTART_SHIFT (16) +#define ODN_PDP_VID2POSN_VID2XSTART_LENGTH (12) +#define ODN_PDP_VID2POSN_VID2XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2POSN, VID2YSTART +*/ +#define ODN_PDP_VID2POSN_VID2YSTART_MASK (0x00000FFF) +#define ODN_PDP_VID2POSN_VID2YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2POSN_VID2YSTART_SHIFT (0) +#define ODN_PDP_VID2POSN_VID2YSTART_LENGTH (12) +#define ODN_PDP_VID2POSN_VID2YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3POSN_OFFSET (0x00B8) + +/* PDP, VID3POSN, VID3XSTART +*/ +#define ODN_PDP_VID3POSN_VID3XSTART_MASK (0x0FFF0000) +#define ODN_PDP_VID3POSN_VID3XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3POSN_VID3XSTART_SHIFT (16) +#define ODN_PDP_VID3POSN_VID3XSTART_LENGTH (12) +#define ODN_PDP_VID3POSN_VID3XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3POSN, VID3YSTART +*/ +#define ODN_PDP_VID3POSN_VID3YSTART_MASK (0x00000FFF) +#define ODN_PDP_VID3POSN_VID3YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3POSN_VID3YSTART_SHIFT (0) +#define ODN_PDP_VID3POSN_VID3YSTART_LENGTH (12) +#define ODN_PDP_VID3POSN_VID3YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4POSN_OFFSET (0x00BC) + +/* PDP, VID4POSN, VID4XSTART +*/ +#define ODN_PDP_VID4POSN_VID4XSTART_MASK (0x0FFF0000) +#define ODN_PDP_VID4POSN_VID4XSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4POSN_VID4XSTART_SHIFT (16) +#define ODN_PDP_VID4POSN_VID4XSTART_LENGTH (12) +#define ODN_PDP_VID4POSN_VID4XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4POSN, VID4YSTART +*/ +#define ODN_PDP_VID4POSN_VID4YSTART_MASK (0x00000FFF) +#define ODN_PDP_VID4POSN_VID4YSTART_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4POSN_VID4YSTART_SHIFT (0) +#define ODN_PDP_VID4POSN_VID4YSTART_LENGTH (12) +#define ODN_PDP_VID4POSN_VID4YSTART_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1GALPHA_OFFSET (0x00C0) + +/* PDP, GRPH1GALPHA, GRPH1GALPHA +*/ +#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK (0x000003FF) +#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT (0) +#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH (10) +#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2GALPHA_OFFSET (0x00C4) + +/* PDP, GRPH2GALPHA, GRPH2GALPHA +*/ +#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK (0x000003FF) +#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT (0) +#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH (10) +#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3GALPHA_OFFSET (0x00C8) + +/* PDP, GRPH3GALPHA, GRPH3GALPHA +*/ +#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_MASK (0x000003FF) +#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT (0) +#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH (10) +#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4GALPHA_OFFSET (0x00CC) + +/* PDP, GRPH4GALPHA, GRPH4GALPHA +*/ +#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK (0x000003FF) +#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT (0) +#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH (10) +#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1GALPHA_OFFSET (0x00D0) + +/* PDP, VID1GALPHA, VID1GALPHA +*/ +#define ODN_PDP_VID1GALPHA_VID1GALPHA_MASK (0x000003FF) +#define ODN_PDP_VID1GALPHA_VID1GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT (0) +#define ODN_PDP_VID1GALPHA_VID1GALPHA_LENGTH (10) +#define ODN_PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2GALPHA_OFFSET (0x00D4) + +/* PDP, VID2GALPHA, VID2GALPHA +*/ +#define ODN_PDP_VID2GALPHA_VID2GALPHA_MASK (0x000003FF) +#define ODN_PDP_VID2GALPHA_VID2GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_VID2GALPHA_VID2GALPHA_SHIFT (0) +#define ODN_PDP_VID2GALPHA_VID2GALPHA_LENGTH (10) +#define ODN_PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3GALPHA_OFFSET (0x00D8) + +/* PDP, VID3GALPHA, VID3GALPHA +*/ +#define ODN_PDP_VID3GALPHA_VID3GALPHA_MASK (0x000003FF) +#define ODN_PDP_VID3GALPHA_VID3GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_VID3GALPHA_VID3GALPHA_SHIFT (0) +#define ODN_PDP_VID3GALPHA_VID3GALPHA_LENGTH (10) +#define ODN_PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4GALPHA_OFFSET (0x00DC) + +/* PDP, VID4GALPHA, VID4GALPHA +*/ +#define ODN_PDP_VID4GALPHA_VID4GALPHA_MASK (0x000003FF) +#define ODN_PDP_VID4GALPHA_VID4GALPHA_LSBMASK (0x000003FF) +#define ODN_PDP_VID4GALPHA_VID4GALPHA_SHIFT (0) +#define ODN_PDP_VID4GALPHA_VID4GALPHA_LENGTH (10) +#define ODN_PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1CKEY_R_OFFSET (0x00E0) + +/* PDP, GRPH1CKEY_R, GRPH1CKEY_R +*/ +#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK (0x000003FF) +#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT (0) +#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH (10) +#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1CKEY_GB_OFFSET (0x00E4) + +/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G +*/ +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT (16) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH (10) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B +*/ +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK (0x000003FF) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT (0) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH (10) +#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2CKEY_R_OFFSET (0x00E8) + +/* PDP, GRPH2CKEY_R, GRPH2CKEY_R +*/ +#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK (0x000003FF) +#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT (0) +#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH (10) +#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2CKEY_GB_OFFSET (0x00EC) + +/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G +*/ +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT (16) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH (10) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B +*/ +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK (0x000003FF) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT (0) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH (10) +#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3CKEY_R_OFFSET (0x00F0) + +/* PDP, GRPH3CKEY_R, GRPH3CKEY_R +*/ +#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK (0x000003FF) +#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT (0) +#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH (10) +#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3CKEY_GB_OFFSET (0x00F4) + +/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G +*/ +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT (16) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH (10) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B +*/ +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK (0x000003FF) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT (0) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH (10) +#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4CKEY_R_OFFSET (0x00F8) + +/* PDP, GRPH4CKEY_R, GRPH4CKEY_R +*/ +#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK (0x000003FF) +#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT (0) +#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH (10) +#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4CKEY_GB_OFFSET (0x00FC) + +/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G +*/ +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT (16) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH (10) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B +*/ +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK (0x000003FF) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT (0) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH (10) +#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1CKEY_R_OFFSET (0x0100) + +/* PDP, VID1CKEY_R, VID1CKEY_R +*/ +#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_MASK (0x000003FF) +#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SHIFT (0) +#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LENGTH (10) +#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1CKEY_GB_OFFSET (0x0104) + +/* PDP, VID1CKEY_GB, VID1CKEY_G +*/ +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT (16) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH (10) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CKEY_GB, VID1CKEY_B +*/ +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_MASK (0x000003FF) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT (0) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH (10) +#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2CKEY_R_OFFSET (0x0108) + +/* PDP, VID2CKEY_R, VID2CKEY_R +*/ +#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_MASK (0x000003FF) +#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SHIFT (0) +#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LENGTH (10) +#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2CKEY_GB_OFFSET (0x010C) + +/* PDP, VID2CKEY_GB, VID2CKEY_G +*/ +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT (16) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH (10) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CKEY_GB, VID2CKEY_B +*/ +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_MASK (0x000003FF) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT (0) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH (10) +#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3CKEY_R_OFFSET (0x0110) + +/* PDP, VID3CKEY_R, VID3CKEY_R +*/ +#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_MASK (0x000003FF) +#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SHIFT (0) +#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LENGTH (10) +#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3CKEY_GB_OFFSET (0x0114) + +/* PDP, VID3CKEY_GB, VID3CKEY_G +*/ +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT (16) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH (10) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CKEY_GB, VID3CKEY_B +*/ +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_MASK (0x000003FF) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT (0) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH (10) +#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4CKEY_R_OFFSET (0x0118) + +/* PDP, VID4CKEY_R, VID4CKEY_R +*/ +#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_MASK (0x000003FF) +#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SHIFT (0) +#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LENGTH (10) +#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4CKEY_GB_OFFSET (0x011C) + +/* PDP, VID4CKEY_GB, VID4CKEY_G +*/ +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_MASK (0x03FF0000) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT (16) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH (10) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CKEY_GB, VID4CKEY_B +*/ +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_MASK (0x000003FF) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT (0) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH (10) +#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1BLND2_R_OFFSET (0x0120) + +/* PDP, GRPH1BLND2_R, GRPH1PIXDBL +*/ +#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK (0x80000000) +#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT (31) +#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH (1) +#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1BLND2_R, GRPH1LINDBL +*/ +#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK (0x20000000) +#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT (29) +#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH (1) +#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R +*/ +#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT (0) +#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH (10) +#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1BLND2_GB_OFFSET (0x0124) + +/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G +*/ +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT (16) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B +*/ +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT (0) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10) +#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2BLND2_R_OFFSET (0x0128) + +/* PDP, GRPH2BLND2_R, GRPH2PIXDBL +*/ +#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK (0x80000000) +#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT (31) +#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH (1) +#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2BLND2_R, GRPH2LINDBL +*/ +#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK (0x20000000) +#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT (29) +#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH (1) +#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R +*/ +#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT (0) +#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH (10) +#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2BLND2_GB_OFFSET (0x012C) + +/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G +*/ +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT (16) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B +*/ +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT (0) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10) +#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3BLND2_R_OFFSET (0x0130) + +/* PDP, GRPH3BLND2_R, GRPH3PIXDBL +*/ +#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK (0x80000000) +#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT (31) +#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH (1) +#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3BLND2_R, GRPH3LINDBL +*/ +#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK (0x20000000) +#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT (29) +#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH (1) +#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R +*/ +#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT (0) +#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH (10) +#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3BLND2_GB_OFFSET (0x0134) + +/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G +*/ +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT (16) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B +*/ +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT (0) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10) +#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4BLND2_R_OFFSET (0x0138) + +/* PDP, GRPH4BLND2_R, GRPH4PIXDBL +*/ +#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK (0x80000000) +#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT (31) +#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH (1) +#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4BLND2_R, GRPH4LINDBL +*/ +#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK (0x20000000) +#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT (29) +#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH (1) +#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R +*/ +#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT (0) +#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH (10) +#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4BLND2_GB_OFFSET (0x013C) + +/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G +*/ +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT (16) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B +*/ +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT (0) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10) +#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1BLND2_R_OFFSET (0x0140) + +/* PDP, VID1BLND2_R, VID1CKEYMASK_R +*/ +#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT (0) +#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH (10) +#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1BLND2_GB_OFFSET (0x0144) + +/* PDP, VID1BLND2_GB, VID1CKEYMASK_G +*/ +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT (16) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH (10) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1BLND2_GB, VID1CKEYMASK_B +*/ +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT (0) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH (10) +#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2BLND2_R_OFFSET (0x0148) + +/* PDP, VID2BLND2_R, VID2CKEYMASK_R +*/ +#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT (0) +#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH (10) +#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2BLND2_GB_OFFSET (0x014C) + +/* PDP, VID2BLND2_GB, VID2CKEYMASK_G +*/ +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT (16) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH (10) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2BLND2_GB, VID2CKEYMASK_B +*/ +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT (0) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH (10) +#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3BLND2_R_OFFSET (0x0150) + +/* PDP, VID3BLND2_R, VID3CKEYMASK_R +*/ +#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT (0) +#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH (10) +#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3BLND2_GB_OFFSET (0x0154) + +/* PDP, VID3BLND2_GB, VID3CKEYMASK_G +*/ +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT (16) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH (10) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3BLND2_GB, VID3CKEYMASK_B +*/ +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT (0) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH (10) +#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4BLND2_R_OFFSET (0x0158) + +/* PDP, VID4BLND2_R, VID4CKEYMASK_R +*/ +#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK (0x000003FF) +#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK (0x000003FF) +#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT (0) +#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH (10) +#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4BLND2_GB_OFFSET (0x015C) + +/* PDP, VID4BLND2_GB, VID4CKEYMASK_G +*/ +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK (0x03FF0000) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK (0x000003FF) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT (16) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH (10) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4BLND2_GB, VID4CKEYMASK_B +*/ +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK (0x000003FF) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK (0x000003FF) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT (0) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH (10) +#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET (0x0160) + +/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD +*/ +#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK (0x00000001) +#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0) +#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1) +#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET (0x0164) + +/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD +*/ +#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001) +#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0) +#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1) +#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3INTERLEAVE_CTRL_OFFSET (0x0168) + +/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD +*/ +#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001) +#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0) +#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1) +#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET (0x016C) + +/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD +*/ +#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001) +#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0) +#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1) +#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET (0x0170) + +/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD +*/ +#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001) +#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0) +#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1) +#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2INTERLEAVE_CTRL_OFFSET (0x0174) + +/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD +*/ +#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001) +#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0) +#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1) +#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3INTERLEAVE_CTRL_OFFSET (0x0178) + +/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD +*/ +#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001) +#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0) +#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1) +#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4INTERLEAVE_CTRL_OFFSET (0x017C) + +/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD +*/ +#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001) +#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001) +#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0) +#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1) +#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1BASEADDR_OFFSET (0x0180) + +/* PDP, GRPH1BASEADDR, GRPH1BASEADDR +*/ +#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT (5) +#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH (27) +#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2BASEADDR_OFFSET (0x0184) + +/* PDP, GRPH2BASEADDR, GRPH2BASEADDR +*/ +#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT (5) +#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH (27) +#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3BASEADDR_OFFSET (0x0188) + +/* PDP, GRPH3BASEADDR, GRPH3BASEADDR +*/ +#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT (5) +#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH (27) +#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4BASEADDR_OFFSET (0x018C) + +/* PDP, GRPH4BASEADDR, GRPH4BASEADDR +*/ +#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT (5) +#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH (27) +#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1BASEADDR_OFFSET (0x0190) + +/* PDP, VID1BASEADDR, VID1BASEADDR +*/ +#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SHIFT (5) +#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH (27) +#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2BASEADDR_OFFSET (0x0194) + +/* PDP, VID2BASEADDR, VID2BASEADDR +*/ +#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SHIFT (5) +#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LENGTH (27) +#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3BASEADDR_OFFSET (0x0198) + +/* PDP, VID3BASEADDR, VID3BASEADDR +*/ +#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SHIFT (5) +#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LENGTH (27) +#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4BASEADDR_OFFSET (0x019C) + +/* PDP, VID4BASEADDR, VID4BASEADDR +*/ +#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SHIFT (5) +#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LENGTH (27) +#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1UBASEADDR_OFFSET (0x01B0) + +/* PDP, VID1UBASEADDR, VID1UBASEADDR +*/ +#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT (5) +#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH (27) +#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2UBASEADDR_OFFSET (0x01B4) + +/* PDP, VID2UBASEADDR, VID2UBASEADDR +*/ +#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT (5) +#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH (27) +#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3UBASEADDR_OFFSET (0x01B8) + +/* PDP, VID3UBASEADDR, VID3UBASEADDR +*/ +#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT (5) +#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH (27) +#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4UBASEADDR_OFFSET (0x01BC) + +/* PDP, VID4UBASEADDR, VID4UBASEADDR +*/ +#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT (5) +#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH (27) +#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VBASEADDR_OFFSET (0x01D0) + +/* PDP, VID1VBASEADDR, VID1VBASEADDR +*/ +#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT (5) +#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH (27) +#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VBASEADDR_OFFSET (0x01D4) + +/* PDP, VID2VBASEADDR, VID2VBASEADDR +*/ +#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT (5) +#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH (27) +#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VBASEADDR_OFFSET (0x01D8) + +/* PDP, VID3VBASEADDR, VID3VBASEADDR +*/ +#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT (5) +#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH (27) +#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VBASEADDR_OFFSET (0x01DC) + +/* PDP, VID4VBASEADDR, VID4VBASEADDR +*/ +#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_MASK (0xFFFFFFE0) +#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK (0x07FFFFFF) +#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT (5) +#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH (27) +#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1POSTSKIPCTRL_OFFSET (0x0230) + +/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP +*/ +#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK (0x007F0000) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK (0x0000007F) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT (16) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH (7) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP +*/ +#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK (0x0000003F) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK (0x0000003F) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT (0) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH (6) +#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2POSTSKIPCTRL_OFFSET (0x0234) + +/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP +*/ +#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK (0x007F0000) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK (0x0000007F) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT (16) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH (7) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP +*/ +#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK (0x0000003F) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK (0x0000003F) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT (0) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH (6) +#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3POSTSKIPCTRL_OFFSET (0x0238) + +/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP +*/ +#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK (0x007F0000) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK (0x0000007F) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT (16) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH (7) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP +*/ +#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK (0x0000003F) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK (0x0000003F) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT (0) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH (6) +#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4POSTSKIPCTRL_OFFSET (0x023C) + +/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP +*/ +#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK (0x007F0000) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK (0x0000007F) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT (16) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH (7) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP +*/ +#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK (0x0000003F) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK (0x0000003F) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT (0) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH (6) +#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1DECIMATE_CTRL_OFFSET (0x0240) + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN +*/ +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT (0) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH (1) +#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2DECIMATE_CTRL_OFFSET (0x0244) + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN +*/ +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT (0) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH (1) +#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3DECIMATE_CTRL_OFFSET (0x0248) + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN +*/ +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT (0) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH (1) +#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4DECIMATE_CTRL_OFFSET (0x024C) + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN +*/ +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT (0) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH (1) +#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1DECIMATE_CTRL_OFFSET (0x0250) + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN +*/ +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT (0) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH (1) +#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2DECIMATE_CTRL_OFFSET (0x0254) + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN +*/ +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT (0) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH (1) +#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3DECIMATE_CTRL_OFFSET (0x0258) + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN +*/ +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT (0) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH (1) +#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4DECIMATE_CTRL_OFFSET (0x025C) + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT +*/ +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE +*/ +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE +*/ +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT (2) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH (1) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN +*/ +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK (0x00000001) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT (0) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH (1) +#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1SKIPCTRL_OFFSET (0x0270) + +/* PDP, VID1SKIPCTRL, VID1HSKIP +*/ +#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_MASK (0x0FFF0000) +#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT (16) +#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH (12) +#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SKIPCTRL, VID1VSKIP +*/ +#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_MASK (0x00000FFF) +#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT (0) +#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH (12) +#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2SKIPCTRL_OFFSET (0x0274) + +/* PDP, VID2SKIPCTRL, VID2HSKIP +*/ +#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_MASK (0x0FFF0000) +#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT (16) +#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH (12) +#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SKIPCTRL, VID2VSKIP +*/ +#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_MASK (0x00000FFF) +#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT (0) +#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH (12) +#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3SKIPCTRL_OFFSET (0x0278) + +/* PDP, VID3SKIPCTRL, VID3HSKIP +*/ +#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_MASK (0x0FFF0000) +#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT (16) +#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH (12) +#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SKIPCTRL, VID3VSKIP +*/ +#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_MASK (0x00000FFF) +#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT (0) +#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH (12) +#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4SKIPCTRL_OFFSET (0x027C) + +/* PDP, VID4SKIPCTRL, VID4HSKIP +*/ +#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_MASK (0x0FFF0000) +#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT (16) +#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH (12) +#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SKIPCTRL, VID4VSKIP +*/ +#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_MASK (0x00000FFF) +#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT (0) +#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH (12) +#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1SCALECTRL_OFFSET (0x0460) + +/* PDP, VID1SCALECTRL, VID1HSCALEBP +*/ +#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_MASK (0x80000000) +#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT (31) +#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH (1) +#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VSCALEBP +*/ +#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_MASK (0x40000000) +#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT (30) +#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH (1) +#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1HSBEFOREVS +*/ +#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK (0x20000000) +#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK (0x00000001) +#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT (29) +#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH (1) +#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VSURUNCTRL +*/ +#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK (0x08000000) +#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT (27) +#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH (1) +#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1PAN_EN +*/ +#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_MASK (0x00040000) +#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT (18) +#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH (1) +#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VORDER +*/ +#define ODN_PDP_VID1SCALECTRL_VID1VORDER_MASK (0x00030000) +#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LSBMASK (0x00000003) +#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SHIFT (16) +#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LENGTH (2) +#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VPITCH +*/ +#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SHIFT (0) +#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LENGTH (16) +#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VSINIT_OFFSET (0x0464) + +/* PDP, VID1VSINIT, VID1VINITIAL1 +*/ +#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_MASK (0xFFFF0000) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SHIFT (16) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LENGTH (16) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1VSINIT, VID1VINITIAL0 +*/ +#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_MASK (0x0000FFFF) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SHIFT (0) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LENGTH (16) +#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF0_OFFSET (0x0468) + +/* PDP, VID1VCOEFF0, VID1VCOEFF0 +*/ +#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT (0) +#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH (32) +#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF1_OFFSET (0x046C) + +/* PDP, VID1VCOEFF1, VID1VCOEFF1 +*/ +#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT (0) +#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH (32) +#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF2_OFFSET (0x0470) + +/* PDP, VID1VCOEFF2, VID1VCOEFF2 +*/ +#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT (0) +#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH (32) +#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF3_OFFSET (0x0474) + +/* PDP, VID1VCOEFF3, VID1VCOEFF3 +*/ +#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT (0) +#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH (32) +#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF4_OFFSET (0x0478) + +/* PDP, VID1VCOEFF4, VID1VCOEFF4 +*/ +#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT (0) +#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH (32) +#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF5_OFFSET (0x047C) + +/* PDP, VID1VCOEFF5, VID1VCOEFF5 +*/ +#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT (0) +#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH (32) +#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF6_OFFSET (0x0480) + +/* PDP, VID1VCOEFF6, VID1VCOEFF6 +*/ +#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT (0) +#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH (32) +#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF7_OFFSET (0x0484) + +/* PDP, VID1VCOEFF7, VID1VCOEFF7 +*/ +#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT (0) +#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH (32) +#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1VCOEFF8_OFFSET (0x0488) + +/* PDP, VID1VCOEFF8, VID1VCOEFF8 +*/ +#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_MASK (0x000000FF) +#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK (0x000000FF) +#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT (0) +#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH (8) +#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HSINIT_OFFSET (0x048C) + +/* PDP, VID1HSINIT, VID1HINITIAL +*/ +#define ODN_PDP_VID1HSINIT_VID1HINITIAL_MASK (0xFFFF0000) +#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SHIFT (16) +#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LENGTH (16) +#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1HSINIT, VID1HPITCH +*/ +#define ODN_PDP_VID1HSINIT_VID1HPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID1HSINIT_VID1HPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID1HSINIT_VID1HPITCH_SHIFT (0) +#define ODN_PDP_VID1HSINIT_VID1HPITCH_LENGTH (16) +#define ODN_PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF0_OFFSET (0x0490) + +/* PDP, VID1HCOEFF0, VID1HCOEFF0 +*/ +#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT (0) +#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH (32) +#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF1_OFFSET (0x0494) + +/* PDP, VID1HCOEFF1, VID1HCOEFF1 +*/ +#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT (0) +#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH (32) +#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF2_OFFSET (0x0498) + +/* PDP, VID1HCOEFF2, VID1HCOEFF2 +*/ +#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT (0) +#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH (32) +#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF3_OFFSET (0x049C) + +/* PDP, VID1HCOEFF3, VID1HCOEFF3 +*/ +#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT (0) +#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH (32) +#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF4_OFFSET (0x04A0) + +/* PDP, VID1HCOEFF4, VID1HCOEFF4 +*/ +#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT (0) +#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH (32) +#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF5_OFFSET (0x04A4) + +/* PDP, VID1HCOEFF5, VID1HCOEFF5 +*/ +#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT (0) +#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH (32) +#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF6_OFFSET (0x04A8) + +/* PDP, VID1HCOEFF6, VID1HCOEFF6 +*/ +#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT (0) +#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH (32) +#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF7_OFFSET (0x04AC) + +/* PDP, VID1HCOEFF7, VID1HCOEFF7 +*/ +#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT (0) +#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH (32) +#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF8_OFFSET (0x04B0) + +/* PDP, VID1HCOEFF8, VID1HCOEFF8 +*/ +#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT (0) +#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH (32) +#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF9_OFFSET (0x04B4) + +/* PDP, VID1HCOEFF9, VID1HCOEFF9 +*/ +#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT (0) +#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH (32) +#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF10_OFFSET (0x04B8) + +/* PDP, VID1HCOEFF10, VID1HCOEFF10 +*/ +#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT (0) +#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH (32) +#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF11_OFFSET (0x04BC) + +/* PDP, VID1HCOEFF11, VID1HCOEFF11 +*/ +#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT (0) +#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH (32) +#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF12_OFFSET (0x04C0) + +/* PDP, VID1HCOEFF12, VID1HCOEFF12 +*/ +#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT (0) +#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH (32) +#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF13_OFFSET (0x04C4) + +/* PDP, VID1HCOEFF13, VID1HCOEFF13 +*/ +#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT (0) +#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH (32) +#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF14_OFFSET (0x04C8) + +/* PDP, VID1HCOEFF14, VID1HCOEFF14 +*/ +#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT (0) +#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH (32) +#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF15_OFFSET (0x04CC) + +/* PDP, VID1HCOEFF15, VID1HCOEFF15 +*/ +#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_MASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT (0) +#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH (32) +#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1HCOEFF16_OFFSET (0x04D0) + +/* PDP, VID1HCOEFF16, VID1HCOEFF16 +*/ +#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_MASK (0x000000FF) +#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK (0x000000FF) +#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT (0) +#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH (8) +#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1SCALESIZE_OFFSET (0x04D4) + +/* PDP, VID1SCALESIZE, VID1SCALEWIDTH +*/ +#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT (16) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH (12) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT +*/ +#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT (0) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH (12) +#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CORE_ID_OFFSET (0x04E0) + +/* PDP, PVR_ODN_PDP_CORE_ID, GROUP_ID +*/ +#define ODN_PDP_CORE_ID_GROUP_ID_MASK (0xFF000000) +#define ODN_PDP_CORE_ID_GROUP_ID_LSBMASK (0x000000FF) +#define ODN_PDP_CORE_ID_GROUP_ID_SHIFT (24) +#define ODN_PDP_CORE_ID_GROUP_ID_LENGTH (8) +#define ODN_PDP_CORE_ID_GROUP_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_ODN_PDP_CORE_ID, CORE_ID +*/ +#define ODN_PDP_CORE_ID_CORE_ID_MASK (0x00FF0000) +#define ODN_PDP_CORE_ID_CORE_ID_LSBMASK (0x000000FF) +#define ODN_PDP_CORE_ID_CORE_ID_SHIFT (16) +#define ODN_PDP_CORE_ID_CORE_ID_LENGTH (8) +#define ODN_PDP_CORE_ID_CORE_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_ODN_PDP_CORE_ID, CONFIG_ID +*/ +#define ODN_PDP_CORE_ID_CONFIG_ID_MASK (0x0000FFFF) +#define ODN_PDP_CORE_ID_CONFIG_ID_LSBMASK (0x0000FFFF) +#define ODN_PDP_CORE_ID_CONFIG_ID_SHIFT (0) +#define ODN_PDP_CORE_ID_CONFIG_ID_LENGTH (16) +#define ODN_PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CORE_REV_OFFSET (0x04F0) + +/* PDP, PVR_ODN_PDP_CORE_REV, MAJOR_REV +*/ +#define ODN_PDP_CORE_REV_MAJOR_REV_MASK (0x00FF0000) +#define ODN_PDP_CORE_REV_MAJOR_REV_LSBMASK (0x000000FF) +#define ODN_PDP_CORE_REV_MAJOR_REV_SHIFT (16) +#define ODN_PDP_CORE_REV_MAJOR_REV_LENGTH (8) +#define ODN_PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_ODN_PDP_CORE_REV, MINOR_REV +*/ +#define ODN_PDP_CORE_REV_MINOR_REV_MASK (0x0000FF00) +#define ODN_PDP_CORE_REV_MINOR_REV_LSBMASK (0x000000FF) +#define ODN_PDP_CORE_REV_MINOR_REV_SHIFT (8) +#define ODN_PDP_CORE_REV_MINOR_REV_LENGTH (8) +#define ODN_PDP_CORE_REV_MINOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_ODN_PDP_CORE_REV, MAINT_REV +*/ +#define ODN_PDP_CORE_REV_MAINT_REV_MASK (0x000000FF) +#define ODN_PDP_CORE_REV_MAINT_REV_LSBMASK (0x000000FF) +#define ODN_PDP_CORE_REV_MAINT_REV_SHIFT (0) +#define ODN_PDP_CORE_REV_MAINT_REV_LENGTH (8) +#define ODN_PDP_CORE_REV_MAINT_REV_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2SCALECTRL_OFFSET (0x0500) + +/* PDP, VID2SCALECTRL, VID2HSCALEBP +*/ +#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_MASK (0x80000000) +#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT (31) +#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH (1) +#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VSCALEBP +*/ +#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_MASK (0x40000000) +#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT (30) +#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH (1) +#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2HSBEFOREVS +*/ +#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK (0x20000000) +#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK (0x00000001) +#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT (29) +#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH (1) +#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VSURUNCTRL +*/ +#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK (0x08000000) +#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT (27) +#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH (1) +#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2PAN_EN +*/ +#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_MASK (0x00040000) +#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT (18) +#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH (1) +#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VORDER +*/ +#define ODN_PDP_VID2SCALECTRL_VID2VORDER_MASK (0x00030000) +#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LSBMASK (0x00000003) +#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SHIFT (16) +#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LENGTH (2) +#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VPITCH +*/ +#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SHIFT (0) +#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LENGTH (16) +#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VSINIT_OFFSET (0x0504) + +/* PDP, VID2VSINIT, VID2VINITIAL1 +*/ +#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_MASK (0xFFFF0000) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SHIFT (16) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LENGTH (16) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2VSINIT, VID2VINITIAL0 +*/ +#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_MASK (0x0000FFFF) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SHIFT (0) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LENGTH (16) +#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF0_OFFSET (0x0508) + +/* PDP, VID2VCOEFF0, VID2VCOEFF0 +*/ +#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT (0) +#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH (32) +#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF1_OFFSET (0x050C) + +/* PDP, VID2VCOEFF1, VID2VCOEFF1 +*/ +#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT (0) +#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH (32) +#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF2_OFFSET (0x0510) + +/* PDP, VID2VCOEFF2, VID2VCOEFF2 +*/ +#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT (0) +#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH (32) +#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF3_OFFSET (0x0514) + +/* PDP, VID2VCOEFF3, VID2VCOEFF3 +*/ +#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT (0) +#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH (32) +#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF4_OFFSET (0x0518) + +/* PDP, VID2VCOEFF4, VID2VCOEFF4 +*/ +#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT (0) +#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH (32) +#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF5_OFFSET (0x051C) + +/* PDP, VID2VCOEFF5, VID2VCOEFF5 +*/ +#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT (0) +#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH (32) +#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF6_OFFSET (0x0520) + +/* PDP, VID2VCOEFF6, VID2VCOEFF6 +*/ +#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT (0) +#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH (32) +#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF7_OFFSET (0x0524) + +/* PDP, VID2VCOEFF7, VID2VCOEFF7 +*/ +#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT (0) +#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH (32) +#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2VCOEFF8_OFFSET (0x0528) + +/* PDP, VID2VCOEFF8, VID2VCOEFF8 +*/ +#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_MASK (0x000000FF) +#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK (0x000000FF) +#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT (0) +#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH (8) +#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HSINIT_OFFSET (0x052C) + +/* PDP, VID2HSINIT, VID2HINITIAL +*/ +#define ODN_PDP_VID2HSINIT_VID2HINITIAL_MASK (0xFFFF0000) +#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SHIFT (16) +#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LENGTH (16) +#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2HSINIT, VID2HPITCH +*/ +#define ODN_PDP_VID2HSINIT_VID2HPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID2HSINIT_VID2HPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID2HSINIT_VID2HPITCH_SHIFT (0) +#define ODN_PDP_VID2HSINIT_VID2HPITCH_LENGTH (16) +#define ODN_PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF0_OFFSET (0x0530) + +/* PDP, VID2HCOEFF0, VID2HCOEFF0 +*/ +#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT (0) +#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH (32) +#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF1_OFFSET (0x0534) + +/* PDP, VID2HCOEFF1, VID2HCOEFF1 +*/ +#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT (0) +#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH (32) +#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF2_OFFSET (0x0538) + +/* PDP, VID2HCOEFF2, VID2HCOEFF2 +*/ +#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT (0) +#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH (32) +#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF3_OFFSET (0x053C) + +/* PDP, VID2HCOEFF3, VID2HCOEFF3 +*/ +#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT (0) +#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH (32) +#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF4_OFFSET (0x0540) + +/* PDP, VID2HCOEFF4, VID2HCOEFF4 +*/ +#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT (0) +#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH (32) +#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF5_OFFSET (0x0544) + +/* PDP, VID2HCOEFF5, VID2HCOEFF5 +*/ +#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT (0) +#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH (32) +#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF6_OFFSET (0x0548) + +/* PDP, VID2HCOEFF6, VID2HCOEFF6 +*/ +#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT (0) +#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH (32) +#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF7_OFFSET (0x054C) + +/* PDP, VID2HCOEFF7, VID2HCOEFF7 +*/ +#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT (0) +#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH (32) +#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF8_OFFSET (0x0550) + +/* PDP, VID2HCOEFF8, VID2HCOEFF8 +*/ +#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT (0) +#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH (32) +#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF9_OFFSET (0x0554) + +/* PDP, VID2HCOEFF9, VID2HCOEFF9 +*/ +#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT (0) +#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH (32) +#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF10_OFFSET (0x0558) + +/* PDP, VID2HCOEFF10, VID2HCOEFF10 +*/ +#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT (0) +#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH (32) +#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF11_OFFSET (0x055C) + +/* PDP, VID2HCOEFF11, VID2HCOEFF11 +*/ +#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT (0) +#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH (32) +#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF12_OFFSET (0x0560) + +/* PDP, VID2HCOEFF12, VID2HCOEFF12 +*/ +#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT (0) +#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH (32) +#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF13_OFFSET (0x0564) + +/* PDP, VID2HCOEFF13, VID2HCOEFF13 +*/ +#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT (0) +#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH (32) +#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF14_OFFSET (0x0568) + +/* PDP, VID2HCOEFF14, VID2HCOEFF14 +*/ +#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT (0) +#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH (32) +#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF15_OFFSET (0x056C) + +/* PDP, VID2HCOEFF15, VID2HCOEFF15 +*/ +#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_MASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT (0) +#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH (32) +#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2HCOEFF16_OFFSET (0x0570) + +/* PDP, VID2HCOEFF16, VID2HCOEFF16 +*/ +#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_MASK (0x000000FF) +#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK (0x000000FF) +#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT (0) +#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH (8) +#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2SCALESIZE_OFFSET (0x0574) + +/* PDP, VID2SCALESIZE, VID2SCALEWIDTH +*/ +#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT (16) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH (12) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT +*/ +#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT (0) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH (12) +#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3SCALECTRL_OFFSET (0x0578) + +/* PDP, VID3SCALECTRL, VID3HSCALEBP +*/ +#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_MASK (0x80000000) +#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT (31) +#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH (1) +#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VSCALEBP +*/ +#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_MASK (0x40000000) +#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT (30) +#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH (1) +#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3HSBEFOREVS +*/ +#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK (0x20000000) +#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK (0x00000001) +#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT (29) +#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH (1) +#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VSURUNCTRL +*/ +#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK (0x08000000) +#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT (27) +#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH (1) +#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3PAN_EN +*/ +#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_MASK (0x00040000) +#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT (18) +#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH (1) +#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VORDER +*/ +#define ODN_PDP_VID3SCALECTRL_VID3VORDER_MASK (0x00030000) +#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LSBMASK (0x00000003) +#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SHIFT (16) +#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LENGTH (2) +#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VPITCH +*/ +#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SHIFT (0) +#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LENGTH (16) +#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VSINIT_OFFSET (0x057C) + +/* PDP, VID3VSINIT, VID3VINITIAL1 +*/ +#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_MASK (0xFFFF0000) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SHIFT (16) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LENGTH (16) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3VSINIT, VID3VINITIAL0 +*/ +#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_MASK (0x0000FFFF) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SHIFT (0) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LENGTH (16) +#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF0_OFFSET (0x0580) + +/* PDP, VID3VCOEFF0, VID3VCOEFF0 +*/ +#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT (0) +#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH (32) +#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF1_OFFSET (0x0584) + +/* PDP, VID3VCOEFF1, VID3VCOEFF1 +*/ +#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT (0) +#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH (32) +#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF2_OFFSET (0x0588) + +/* PDP, VID3VCOEFF2, VID3VCOEFF2 +*/ +#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT (0) +#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH (32) +#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF3_OFFSET (0x058C) + +/* PDP, VID3VCOEFF3, VID3VCOEFF3 +*/ +#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT (0) +#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH (32) +#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF4_OFFSET (0x0590) + +/* PDP, VID3VCOEFF4, VID3VCOEFF4 +*/ +#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT (0) +#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH (32) +#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF5_OFFSET (0x0594) + +/* PDP, VID3VCOEFF5, VID3VCOEFF5 +*/ +#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT (0) +#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH (32) +#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF6_OFFSET (0x0598) + +/* PDP, VID3VCOEFF6, VID3VCOEFF6 +*/ +#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT (0) +#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH (32) +#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF7_OFFSET (0x059C) + +/* PDP, VID3VCOEFF7, VID3VCOEFF7 +*/ +#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT (0) +#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH (32) +#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3VCOEFF8_OFFSET (0x05A0) + +/* PDP, VID3VCOEFF8, VID3VCOEFF8 +*/ +#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_MASK (0x000000FF) +#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK (0x000000FF) +#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT (0) +#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH (8) +#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HSINIT_OFFSET (0x05A4) + +/* PDP, VID3HSINIT, VID3HINITIAL +*/ +#define ODN_PDP_VID3HSINIT_VID3HINITIAL_MASK (0xFFFF0000) +#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SHIFT (16) +#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LENGTH (16) +#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3HSINIT, VID3HPITCH +*/ +#define ODN_PDP_VID3HSINIT_VID3HPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID3HSINIT_VID3HPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID3HSINIT_VID3HPITCH_SHIFT (0) +#define ODN_PDP_VID3HSINIT_VID3HPITCH_LENGTH (16) +#define ODN_PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF0_OFFSET (0x05A8) + +/* PDP, VID3HCOEFF0, VID3HCOEFF0 +*/ +#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT (0) +#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH (32) +#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF1_OFFSET (0x05AC) + +/* PDP, VID3HCOEFF1, VID3HCOEFF1 +*/ +#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT (0) +#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH (32) +#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF2_OFFSET (0x05B0) + +/* PDP, VID3HCOEFF2, VID3HCOEFF2 +*/ +#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT (0) +#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH (32) +#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF3_OFFSET (0x05B4) + +/* PDP, VID3HCOEFF3, VID3HCOEFF3 +*/ +#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT (0) +#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH (32) +#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF4_OFFSET (0x05B8) + +/* PDP, VID3HCOEFF4, VID3HCOEFF4 +*/ +#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT (0) +#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH (32) +#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF5_OFFSET (0x05BC) + +/* PDP, VID3HCOEFF5, VID3HCOEFF5 +*/ +#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT (0) +#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH (32) +#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF6_OFFSET (0x05C0) + +/* PDP, VID3HCOEFF6, VID3HCOEFF6 +*/ +#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT (0) +#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH (32) +#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF7_OFFSET (0x05C4) + +/* PDP, VID3HCOEFF7, VID3HCOEFF7 +*/ +#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT (0) +#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH (32) +#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF8_OFFSET (0x05C8) + +/* PDP, VID3HCOEFF8, VID3HCOEFF8 +*/ +#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT (0) +#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH (32) +#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF9_OFFSET (0x05CC) + +/* PDP, VID3HCOEFF9, VID3HCOEFF9 +*/ +#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT (0) +#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH (32) +#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF10_OFFSET (0x05D0) + +/* PDP, VID3HCOEFF10, VID3HCOEFF10 +*/ +#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT (0) +#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH (32) +#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF11_OFFSET (0x05D4) + +/* PDP, VID3HCOEFF11, VID3HCOEFF11 +*/ +#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT (0) +#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH (32) +#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF12_OFFSET (0x05D8) + +/* PDP, VID3HCOEFF12, VID3HCOEFF12 +*/ +#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT (0) +#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH (32) +#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF13_OFFSET (0x05DC) + +/* PDP, VID3HCOEFF13, VID3HCOEFF13 +*/ +#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT (0) +#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH (32) +#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF14_OFFSET (0x05E0) + +/* PDP, VID3HCOEFF14, VID3HCOEFF14 +*/ +#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT (0) +#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH (32) +#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF15_OFFSET (0x05E4) + +/* PDP, VID3HCOEFF15, VID3HCOEFF15 +*/ +#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_MASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT (0) +#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH (32) +#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3HCOEFF16_OFFSET (0x05E8) + +/* PDP, VID3HCOEFF16, VID3HCOEFF16 +*/ +#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_MASK (0x000000FF) +#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK (0x000000FF) +#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT (0) +#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH (8) +#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3SCALESIZE_OFFSET (0x05EC) + +/* PDP, VID3SCALESIZE, VID3SCALEWIDTH +*/ +#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT (16) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH (12) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT +*/ +#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT (0) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH (12) +#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4SCALECTRL_OFFSET (0x05F0) + +/* PDP, VID4SCALECTRL, VID4HSCALEBP +*/ +#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_MASK (0x80000000) +#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT (31) +#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH (1) +#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VSCALEBP +*/ +#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_MASK (0x40000000) +#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK (0x00000001) +#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT (30) +#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH (1) +#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4HSBEFOREVS +*/ +#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK (0x20000000) +#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK (0x00000001) +#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT (29) +#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH (1) +#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VSURUNCTRL +*/ +#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK (0x08000000) +#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT (27) +#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH (1) +#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4PAN_EN +*/ +#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_MASK (0x00040000) +#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK (0x00000001) +#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT (18) +#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH (1) +#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VORDER +*/ +#define ODN_PDP_VID4SCALECTRL_VID4VORDER_MASK (0x00030000) +#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LSBMASK (0x00000003) +#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SHIFT (16) +#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LENGTH (2) +#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VPITCH +*/ +#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SHIFT (0) +#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LENGTH (16) +#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VSINIT_OFFSET (0x05F4) + +/* PDP, VID4VSINIT, VID4VINITIAL1 +*/ +#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_MASK (0xFFFF0000) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SHIFT (16) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LENGTH (16) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4VSINIT, VID4VINITIAL0 +*/ +#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_MASK (0x0000FFFF) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SHIFT (0) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LENGTH (16) +#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF0_OFFSET (0x05F8) + +/* PDP, VID4VCOEFF0, VID4VCOEFF0 +*/ +#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT (0) +#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH (32) +#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF1_OFFSET (0x05FC) + +/* PDP, VID4VCOEFF1, VID4VCOEFF1 +*/ +#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT (0) +#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH (32) +#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF2_OFFSET (0x0600) + +/* PDP, VID4VCOEFF2, VID4VCOEFF2 +*/ +#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT (0) +#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH (32) +#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF3_OFFSET (0x0604) + +/* PDP, VID4VCOEFF3, VID4VCOEFF3 +*/ +#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT (0) +#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH (32) +#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF4_OFFSET (0x0608) + +/* PDP, VID4VCOEFF4, VID4VCOEFF4 +*/ +#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT (0) +#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH (32) +#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF5_OFFSET (0x060C) + +/* PDP, VID4VCOEFF5, VID4VCOEFF5 +*/ +#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT (0) +#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH (32) +#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF6_OFFSET (0x0610) + +/* PDP, VID4VCOEFF6, VID4VCOEFF6 +*/ +#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT (0) +#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH (32) +#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF7_OFFSET (0x0614) + +/* PDP, VID4VCOEFF7, VID4VCOEFF7 +*/ +#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT (0) +#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH (32) +#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4VCOEFF8_OFFSET (0x0618) + +/* PDP, VID4VCOEFF8, VID4VCOEFF8 +*/ +#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_MASK (0x000000FF) +#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK (0x000000FF) +#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT (0) +#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH (8) +#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HSINIT_OFFSET (0x061C) + +/* PDP, VID4HSINIT, VID4HINITIAL +*/ +#define ODN_PDP_VID4HSINIT_VID4HINITIAL_MASK (0xFFFF0000) +#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SHIFT (16) +#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LENGTH (16) +#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4HSINIT, VID4HPITCH +*/ +#define ODN_PDP_VID4HSINIT_VID4HPITCH_MASK (0x0000FFFF) +#define ODN_PDP_VID4HSINIT_VID4HPITCH_LSBMASK (0x0000FFFF) +#define ODN_PDP_VID4HSINIT_VID4HPITCH_SHIFT (0) +#define ODN_PDP_VID4HSINIT_VID4HPITCH_LENGTH (16) +#define ODN_PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF0_OFFSET (0x0620) + +/* PDP, VID4HCOEFF0, VID4HCOEFF0 +*/ +#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT (0) +#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH (32) +#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF1_OFFSET (0x0624) + +/* PDP, VID4HCOEFF1, VID4HCOEFF1 +*/ +#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT (0) +#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH (32) +#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF2_OFFSET (0x0628) + +/* PDP, VID4HCOEFF2, VID4HCOEFF2 +*/ +#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT (0) +#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH (32) +#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF3_OFFSET (0x062C) + +/* PDP, VID4HCOEFF3, VID4HCOEFF3 +*/ +#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT (0) +#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH (32) +#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF4_OFFSET (0x0630) + +/* PDP, VID4HCOEFF4, VID4HCOEFF4 +*/ +#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT (0) +#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH (32) +#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF5_OFFSET (0x0634) + +/* PDP, VID4HCOEFF5, VID4HCOEFF5 +*/ +#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT (0) +#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH (32) +#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF6_OFFSET (0x0638) + +/* PDP, VID4HCOEFF6, VID4HCOEFF6 +*/ +#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT (0) +#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH (32) +#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF7_OFFSET (0x063C) + +/* PDP, VID4HCOEFF7, VID4HCOEFF7 +*/ +#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT (0) +#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH (32) +#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF8_OFFSET (0x0640) + +/* PDP, VID4HCOEFF8, VID4HCOEFF8 +*/ +#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT (0) +#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH (32) +#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF9_OFFSET (0x0644) + +/* PDP, VID4HCOEFF9, VID4HCOEFF9 +*/ +#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT (0) +#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH (32) +#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF10_OFFSET (0x0648) + +/* PDP, VID4HCOEFF10, VID4HCOEFF10 +*/ +#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT (0) +#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH (32) +#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF11_OFFSET (0x064C) + +/* PDP, VID4HCOEFF11, VID4HCOEFF11 +*/ +#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT (0) +#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH (32) +#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF12_OFFSET (0x0650) + +/* PDP, VID4HCOEFF12, VID4HCOEFF12 +*/ +#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT (0) +#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH (32) +#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF13_OFFSET (0x0654) + +/* PDP, VID4HCOEFF13, VID4HCOEFF13 +*/ +#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT (0) +#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH (32) +#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF14_OFFSET (0x0658) + +/* PDP, VID4HCOEFF14, VID4HCOEFF14 +*/ +#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT (0) +#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH (32) +#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF15_OFFSET (0x065C) + +/* PDP, VID4HCOEFF15, VID4HCOEFF15 +*/ +#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_MASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK (0xFFFFFFFF) +#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT (0) +#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH (32) +#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4HCOEFF16_OFFSET (0x0660) + +/* PDP, VID4HCOEFF16, VID4HCOEFF16 +*/ +#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_MASK (0x000000FF) +#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK (0x000000FF) +#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT (0) +#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH (8) +#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4SCALESIZE_OFFSET (0x0664) + +/* PDP, VID4SCALESIZE, VID4SCALEWIDTH +*/ +#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK (0x0FFF0000) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT (16) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH (12) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT +*/ +#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK (0x00000FFF) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT (0) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH (12) +#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND0_OFFSET (0x0668) + +/* PDP, PORTER_BLND0, BLND0BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND0, BLND0PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND1_OFFSET (0x066C) + +/* PDP, PORTER_BLND1, BLND1BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND1, BLND1PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND2_OFFSET (0x0670) + +/* PDP, PORTER_BLND2, BLND2BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND2, BLND2PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND3_OFFSET (0x0674) + +/* PDP, PORTER_BLND3, BLND3BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND3, BLND3PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND4_OFFSET (0x0678) + +/* PDP, PORTER_BLND4, BLND4BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND4, BLND4PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND5_OFFSET (0x067C) + +/* PDP, PORTER_BLND5, BLND5BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND5, BLND5PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND6_OFFSET (0x0680) + +/* PDP, PORTER_BLND6, BLND6BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND6, BLND6PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PORTER_BLND7_OFFSET (0x0684) + +/* PDP, PORTER_BLND7, BLND7BLENDTYPE +*/ +#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK (0x00000010) +#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK (0x00000001) +#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT (4) +#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH (1) +#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND7, BLND7PORTERMODE +*/ +#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_MASK (0x0000000F) +#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK (0x0000000F) +#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT (0) +#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH (4) +#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06C8) + +/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS +*/ +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT (16) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH (10) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE +*/ +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06CC) + +/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX +*/ +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK (0x03FF0000) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK (0x000003FF) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT (16) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH (10) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN +*/ +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK (0x000003FF) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK (0x000003FF) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT (0) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH (10) +#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1LUMAKEY_C_RG_OFFSET (0x06D0) + +/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R +*/ +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK (0x0FFF0000) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT (16) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH (12) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G +*/ +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK (0x00000FFF) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT (0) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH (12) +#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1LUMAKEY_C_B_OFFSET (0x06D4) + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT +*/ +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK (0x20000000) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT (29) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH (1) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN +*/ +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK (0x10000000) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT (28) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH (1) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF +*/ +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK (0x03FF0000) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT (16) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH (10) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B +*/ +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK (0x00000FFF) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK (0x00000FFF) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT (0) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH (12) +#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06D8) + +/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS +*/ +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT (16) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH (10) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE +*/ +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06DC) + +/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX +*/ +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK (0x03FF0000) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK (0x000003FF) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT (16) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH (10) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN +*/ +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK (0x000003FF) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK (0x000003FF) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT (0) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH (10) +#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2LUMAKEY_C_RG_OFFSET (0x06E0) + +/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R +*/ +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK (0x0FFF0000) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT (16) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH (12) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G +*/ +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK (0x00000FFF) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT (0) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH (12) +#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2LUMAKEY_C_B_OFFSET (0x06E4) + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT +*/ +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK (0x20000000) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT (29) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH (1) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN +*/ +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK (0x10000000) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT (28) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH (1) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF +*/ +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK (0x03FF0000) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT (16) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH (10) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B +*/ +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK (0x00000FFF) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK (0x00000FFF) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT (0) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH (12) +#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06E8) + +/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS +*/ +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT (16) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH (10) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE +*/ +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06EC) + +/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX +*/ +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK (0x03FF0000) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK (0x000003FF) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT (16) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH (10) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN +*/ +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK (0x000003FF) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK (0x000003FF) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT (0) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH (10) +#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3LUMAKEY_C_RG_OFFSET (0x06F0) + +/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R +*/ +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK (0x0FFF0000) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT (16) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH (12) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G +*/ +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK (0x00000FFF) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT (0) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH (12) +#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3LUMAKEY_C_B_OFFSET (0x06F4) + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT +*/ +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK (0x20000000) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT (29) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH (1) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN +*/ +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK (0x10000000) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT (28) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH (1) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF +*/ +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK (0x03FF0000) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT (16) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH (10) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B +*/ +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK (0x00000FFF) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK (0x00000FFF) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT (0) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH (12) +#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06F8) + +/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS +*/ +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT (16) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH (10) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE +*/ +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06FC) + +/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX +*/ +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK (0x03FF0000) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK (0x000003FF) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT (16) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH (10) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN +*/ +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK (0x000003FF) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK (0x000003FF) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT (0) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH (10) +#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4LUMAKEY_C_RG_OFFSET (0x0700) + +/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R +*/ +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK (0x0FFF0000) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT (16) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH (12) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G +*/ +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK (0x00000FFF) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT (0) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH (12) +#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4LUMAKEY_C_B_OFFSET (0x0704) + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT +*/ +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK (0x20000000) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT (29) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH (1) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN +*/ +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK (0x10000000) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK (0x00000001) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT (28) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH (1) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF +*/ +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK (0x03FF0000) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT (16) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH (10) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B +*/ +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK (0x00000FFF) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK (0x00000FFF) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT (0) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH (12) +#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CSCCOEFF0_OFFSET (0x0708) + +/* PDP, CSCCOEFF0, CSCCOEFFRU +*/ +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_MASK (0x003FF800) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT (11) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH (11) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF0, CSCCOEFFRY +*/ +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_MASK (0x000007FF) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT (0) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH (11) +#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CSCCOEFF1_OFFSET (0x070C) + +/* PDP, CSCCOEFF1, CSCCOEFFGY +*/ +#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_MASK (0x003FF800) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT (11) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH (11) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF1, CSCCOEFFRV +*/ +#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_MASK (0x000007FF) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT (0) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH (11) +#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CSCCOEFF2_OFFSET (0x0710) + +/* PDP, CSCCOEFF2, CSCCOEFFGV +*/ +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_MASK (0x003FF800) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT (11) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH (11) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF2, CSCCOEFFGU +*/ +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_MASK (0x000007FF) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT (0) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH (11) +#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CSCCOEFF3_OFFSET (0x0714) + +/* PDP, CSCCOEFF3, CSCCOEFFBU +*/ +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_MASK (0x003FF800) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT (11) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH (11) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF3, CSCCOEFFBY +*/ +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_MASK (0x000007FF) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT (0) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH (11) +#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CSCCOEFF4_OFFSET (0x0718) + +/* PDP, CSCCOEFF4, CSCCOEFFBV +*/ +#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_MASK (0x000007FF) +#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK (0x000007FF) +#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT (0) +#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH (11) +#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_BGNDCOL_AR_OFFSET (0x071C) + +/* PDP, BGNDCOL_AR, BGNDCOL_A +*/ +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK (0x03FF0000) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK (0x000003FF) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT (16) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH (10) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD IMG_FALSE + +/* PDP, BGNDCOL_AR, BGNDCOL_R +*/ +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK (0x000003FF) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK (0x000003FF) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT (0) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH (10) +#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_BGNDCOL_GB_OFFSET (0x0720) + +/* PDP, BGNDCOL_GB, BGNDCOL_G +*/ +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK (0x03FF0000) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK (0x000003FF) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT (16) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH (10) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD IMG_FALSE + +/* PDP, BGNDCOL_GB, BGNDCOL_B +*/ +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK (0x000003FF) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK (0x000003FF) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT (0) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH (10) +#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_BORDCOL_R_OFFSET (0x0724) + +/* PDP, BORDCOL_R, BORDCOL_R +*/ +#define ODN_PDP_BORDCOL_R_BORDCOL_R_MASK (0x000003FF) +#define ODN_PDP_BORDCOL_R_BORDCOL_R_LSBMASK (0x000003FF) +#define ODN_PDP_BORDCOL_R_BORDCOL_R_SHIFT (0) +#define ODN_PDP_BORDCOL_R_BORDCOL_R_LENGTH (10) +#define ODN_PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_BORDCOL_GB_OFFSET (0x0728) + +/* PDP, BORDCOL_GB, BORDCOL_G +*/ +#define ODN_PDP_BORDCOL_GB_BORDCOL_G_MASK (0x03FF0000) +#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LSBMASK (0x000003FF) +#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SHIFT (16) +#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LENGTH (10) +#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD IMG_FALSE + +/* PDP, BORDCOL_GB, BORDCOL_B +*/ +#define ODN_PDP_BORDCOL_GB_BORDCOL_B_MASK (0x000003FF) +#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LSBMASK (0x000003FF) +#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SHIFT (0) +#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LENGTH (10) +#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_LINESTAT_OFFSET (0x0734) + +/* PDP, LINESTAT, LINENO +*/ +#define ODN_PDP_LINESTAT_LINENO_MASK (0x00001FFF) +#define ODN_PDP_LINESTAT_LINENO_LSBMASK (0x00001FFF) +#define ODN_PDP_LINESTAT_LINENO_SHIFT (0) +#define ODN_PDP_LINESTAT_LINENO_LENGTH (13) +#define ODN_PDP_LINESTAT_LINENO_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_OFFSET (0x0738) + +/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C12 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK (0x3FFF0000) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT (16) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C11 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_OFFSET (0x073C) + +/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C21 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK (0x3FFF0000) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT (16) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C13 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_OFFSET (0x0740) + +/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C23 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK (0x3FFF0000) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT (16) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C22 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_OFFSET (0x0744) + +/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C32 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK (0x3FFF0000) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT (16) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C31 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_OFFSET (0x0748) + +/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_C33 +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK (0x3FFF0000) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK (0x00003FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT (16) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH (14) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_RANGE +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK (0x00000030) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK (0x00000003) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT (4) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH (2) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_EN +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK (0x00000001) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK (0x00000001) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH (1) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_OFFSET (0x074C) + +/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK (0x0FFF0000) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK (0x00000FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT (16) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH (12) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK (0x00000FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK (0x00000FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH (12) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_OFFSET (0x0750) + +/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK (0x00000FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK (0x00000FFF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH (12) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_OFFSET (0x0754) + +/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK (0x03FF0000) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK (0x000003FF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT (16) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH (10) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK (0x000003FF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK (0x000003FF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH (10) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_OFFSET (0x0758) + +/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R +*/ +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK (0x000003FF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK (0x000003FF) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT (0) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH (10) +#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_SIGNAT_R_OFFSET (0x075C) + +/* PDP, SIGNAT_R, SIGNATURE_R +*/ +#define ODN_PDP_SIGNAT_R_SIGNATURE_R_MASK (0x000003FF) +#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LSBMASK (0x000003FF) +#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SHIFT (0) +#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LENGTH (10) +#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_SIGNAT_GB_OFFSET (0x0760) + +/* PDP, SIGNAT_GB, SIGNATURE_G +*/ +#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_MASK (0x03FF0000) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK (0x000003FF) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SHIFT (16) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LENGTH (10) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD IMG_FALSE + +/* PDP, SIGNAT_GB, SIGNATURE_B +*/ +#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_MASK (0x000003FF) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK (0x000003FF) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SHIFT (0) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LENGTH (10) +#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET (0x0764) + +/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING +*/ +#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK (0x00000004) +#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK (0x00000001) +#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT (2) +#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH (1) +#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD IMG_FALSE + +/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID +*/ +#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK (0x00000002) +#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK (0x00000001) +#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT (1) +#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH (1) +#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD IMG_FALSE + +/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK +*/ +#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK (0x00000001) +#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK (0x00000001) +#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT (0) +#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH (1) +#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_REGISTER_UPDATE_STATUS_OFFSET (0x0768) + +/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED +*/ +#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK (0x00000002) +#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK (0x00000001) +#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT (1) +#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH (1) +#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DBGCTRL_OFFSET (0x076C) + +/* PDP, DBGCTRL, DBG_READ +*/ +#define ODN_PDP_DBGCTRL_DBG_READ_MASK (0x00000002) +#define ODN_PDP_DBGCTRL_DBG_READ_LSBMASK (0x00000001) +#define ODN_PDP_DBGCTRL_DBG_READ_SHIFT (1) +#define ODN_PDP_DBGCTRL_DBG_READ_LENGTH (1) +#define ODN_PDP_DBGCTRL_DBG_READ_SIGNED_FIELD IMG_FALSE + +/* PDP, DBGCTRL, DBG_ENAB +*/ +#define ODN_PDP_DBGCTRL_DBG_ENAB_MASK (0x00000001) +#define ODN_PDP_DBGCTRL_DBG_ENAB_LSBMASK (0x00000001) +#define ODN_PDP_DBGCTRL_DBG_ENAB_SHIFT (0) +#define ODN_PDP_DBGCTRL_DBG_ENAB_LENGTH (1) +#define ODN_PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DBGDATA_R_OFFSET (0x0770) + +/* PDP, DBGDATA_R, DBG_DATA_R +*/ +#define ODN_PDP_DBGDATA_R_DBG_DATA_R_MASK (0x000003FF) +#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LSBMASK (0x000003FF) +#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SHIFT (0) +#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LENGTH (10) +#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DBGDATA_GB_OFFSET (0x0774) + +/* PDP, DBGDATA_GB, DBG_DATA_G +*/ +#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_MASK (0x03FF0000) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK (0x000003FF) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SHIFT (16) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LENGTH (10) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD IMG_FALSE + +/* PDP, DBGDATA_GB, DBG_DATA_B +*/ +#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_MASK (0x000003FF) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK (0x000003FF) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SHIFT (0) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LENGTH (10) +#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DBGSIDE_OFFSET (0x0778) + +/* PDP, DBGSIDE, DBG_VAL +*/ +#define ODN_PDP_DBGSIDE_DBG_VAL_MASK (0x00000008) +#define ODN_PDP_DBGSIDE_DBG_VAL_LSBMASK (0x00000001) +#define ODN_PDP_DBGSIDE_DBG_VAL_SHIFT (3) +#define ODN_PDP_DBGSIDE_DBG_VAL_LENGTH (1) +#define ODN_PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD IMG_FALSE + +/* PDP, DBGSIDE, DBG_SIDE +*/ +#define ODN_PDP_DBGSIDE_DBG_SIDE_MASK (0x00000007) +#define ODN_PDP_DBGSIDE_DBG_SIDE_LSBMASK (0x00000007) +#define ODN_PDP_DBGSIDE_DBG_SIDE_SHIFT (0) +#define ODN_PDP_DBGSIDE_DBG_SIDE_LENGTH (3) +#define ODN_PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_OUTPUT_OFFSET (0x077C) + +/* PDP, OUTPUT, EIGHT_BIT_OUTPUT +*/ +#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK (0x00000002) +#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK (0x00000001) +#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT (1) +#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH (1) +#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD IMG_FALSE + +/* PDP, OUTPUT, OUTPUT_CONFIG +*/ +#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_MASK (0x00000001) +#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK (0x00000001) +#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SHIFT (0) +#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LENGTH (1) +#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_SYNCCTRL_OFFSET (0x0780) + +/* PDP, SYNCCTRL, SYNCACTIVE +*/ +#define ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK (0x80000000) +#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT (31) +#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LENGTH (1) +#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, ODN_PDP_RST +*/ +#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_MASK (0x20000000) +#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SHIFT (29) +#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LENGTH (1) +#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, POWERDN +*/ +#define ODN_PDP_SYNCCTRL_POWERDN_MASK (0x10000000) +#define ODN_PDP_SYNCCTRL_POWERDN_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_POWERDN_SHIFT (28) +#define ODN_PDP_SYNCCTRL_POWERDN_LENGTH (1) +#define ODN_PDP_SYNCCTRL_POWERDN_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, LOWPWRMODE +*/ +#define ODN_PDP_SYNCCTRL_LOWPWRMODE_MASK (0x08000000) +#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SHIFT (27) +#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LENGTH (1) +#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDSYNCTRL +*/ +#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_MASK (0x04000000) +#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SHIFT (26) +#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LENGTH (1) +#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDINTCTRL +*/ +#define ODN_PDP_SYNCCTRL_UPDINTCTRL_MASK (0x02000000) +#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SHIFT (25) +#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LENGTH (1) +#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDCTRL +*/ +#define ODN_PDP_SYNCCTRL_UPDCTRL_MASK (0x01000000) +#define ODN_PDP_SYNCCTRL_UPDCTRL_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_UPDCTRL_SHIFT (24) +#define ODN_PDP_SYNCCTRL_UPDCTRL_LENGTH (1) +#define ODN_PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDWAIT +*/ +#define ODN_PDP_SYNCCTRL_UPDWAIT_MASK (0x000F0000) +#define ODN_PDP_SYNCCTRL_UPDWAIT_LSBMASK (0x0000000F) +#define ODN_PDP_SYNCCTRL_UPDWAIT_SHIFT (16) +#define ODN_PDP_SYNCCTRL_UPDWAIT_LENGTH (4) +#define ODN_PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, FIELD_EN +*/ +#define ODN_PDP_SYNCCTRL_FIELD_EN_MASK (0x00002000) +#define ODN_PDP_SYNCCTRL_FIELD_EN_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_FIELD_EN_SHIFT (13) +#define ODN_PDP_SYNCCTRL_FIELD_EN_LENGTH (1) +#define ODN_PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, CSYNC_EN +*/ +#define ODN_PDP_SYNCCTRL_CSYNC_EN_MASK (0x00001000) +#define ODN_PDP_SYNCCTRL_CSYNC_EN_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_CSYNC_EN_SHIFT (12) +#define ODN_PDP_SYNCCTRL_CSYNC_EN_LENGTH (1) +#define ODN_PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, CLKPOL +*/ +#define ODN_PDP_SYNCCTRL_CLKPOL_MASK (0x00000800) +#define ODN_PDP_SYNCCTRL_CLKPOL_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_CLKPOL_SHIFT (11) +#define ODN_PDP_SYNCCTRL_CLKPOL_LENGTH (1) +#define ODN_PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, VS_SLAVE +*/ +#define ODN_PDP_SYNCCTRL_VS_SLAVE_MASK (0x00000080) +#define ODN_PDP_SYNCCTRL_VS_SLAVE_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_VS_SLAVE_SHIFT (7) +#define ODN_PDP_SYNCCTRL_VS_SLAVE_LENGTH (1) +#define ODN_PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, HS_SLAVE +*/ +#define ODN_PDP_SYNCCTRL_HS_SLAVE_MASK (0x00000040) +#define ODN_PDP_SYNCCTRL_HS_SLAVE_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_HS_SLAVE_SHIFT (6) +#define ODN_PDP_SYNCCTRL_HS_SLAVE_LENGTH (1) +#define ODN_PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, BLNKPOL +*/ +#define ODN_PDP_SYNCCTRL_BLNKPOL_MASK (0x00000020) +#define ODN_PDP_SYNCCTRL_BLNKPOL_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT (5) +#define ODN_PDP_SYNCCTRL_BLNKPOL_LENGTH (1) +#define ODN_PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, BLNKDIS +*/ +#define ODN_PDP_SYNCCTRL_BLNKDIS_MASK (0x00000010) +#define ODN_PDP_SYNCCTRL_BLNKDIS_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_BLNKDIS_SHIFT (4) +#define ODN_PDP_SYNCCTRL_BLNKDIS_LENGTH (1) +#define ODN_PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, VSPOL +*/ +#define ODN_PDP_SYNCCTRL_VSPOL_MASK (0x00000008) +#define ODN_PDP_SYNCCTRL_VSPOL_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_VSPOL_SHIFT (3) +#define ODN_PDP_SYNCCTRL_VSPOL_LENGTH (1) +#define ODN_PDP_SYNCCTRL_VSPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, VSDIS +*/ +#define ODN_PDP_SYNCCTRL_VSDIS_MASK (0x00000004) +#define ODN_PDP_SYNCCTRL_VSDIS_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_VSDIS_SHIFT (2) +#define ODN_PDP_SYNCCTRL_VSDIS_LENGTH (1) +#define ODN_PDP_SYNCCTRL_VSDIS_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, HSPOL +*/ +#define ODN_PDP_SYNCCTRL_HSPOL_MASK (0x00000002) +#define ODN_PDP_SYNCCTRL_HSPOL_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_HSPOL_SHIFT (1) +#define ODN_PDP_SYNCCTRL_HSPOL_LENGTH (1) +#define ODN_PDP_SYNCCTRL_HSPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, HSDIS +*/ +#define ODN_PDP_SYNCCTRL_HSDIS_MASK (0x00000001) +#define ODN_PDP_SYNCCTRL_HSDIS_LSBMASK (0x00000001) +#define ODN_PDP_SYNCCTRL_HSDIS_SHIFT (0) +#define ODN_PDP_SYNCCTRL_HSDIS_LENGTH (1) +#define ODN_PDP_SYNCCTRL_HSDIS_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_HSYNC1_OFFSET (0x0784) + +/* PDP, HSYNC1, HBPS +*/ +#define ODN_PDP_HSYNC1_HBPS_MASK (0x1FFF0000) +#define ODN_PDP_HSYNC1_HBPS_LSBMASK (0x00001FFF) +#define ODN_PDP_HSYNC1_HBPS_SHIFT (16) +#define ODN_PDP_HSYNC1_HBPS_LENGTH (13) +#define ODN_PDP_HSYNC1_HBPS_SIGNED_FIELD IMG_FALSE + +/* PDP, HSYNC1, HT +*/ +#define ODN_PDP_HSYNC1_HT_MASK (0x00001FFF) +#define ODN_PDP_HSYNC1_HT_LSBMASK (0x00001FFF) +#define ODN_PDP_HSYNC1_HT_SHIFT (0) +#define ODN_PDP_HSYNC1_HT_LENGTH (13) +#define ODN_PDP_HSYNC1_HT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_HSYNC2_OFFSET (0x0788) + +/* PDP, HSYNC2, HAS +*/ +#define ODN_PDP_HSYNC2_HAS_MASK (0x1FFF0000) +#define ODN_PDP_HSYNC2_HAS_LSBMASK (0x00001FFF) +#define ODN_PDP_HSYNC2_HAS_SHIFT (16) +#define ODN_PDP_HSYNC2_HAS_LENGTH (13) +#define ODN_PDP_HSYNC2_HAS_SIGNED_FIELD IMG_FALSE + +/* PDP, HSYNC2, HLBS +*/ +#define ODN_PDP_HSYNC2_HLBS_MASK (0x00001FFF) +#define ODN_PDP_HSYNC2_HLBS_LSBMASK (0x00001FFF) +#define ODN_PDP_HSYNC2_HLBS_SHIFT (0) +#define ODN_PDP_HSYNC2_HLBS_LENGTH (13) +#define ODN_PDP_HSYNC2_HLBS_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_HSYNC3_OFFSET (0x078C) + +/* PDP, HSYNC3, HFPS +*/ +#define ODN_PDP_HSYNC3_HFPS_MASK (0x1FFF0000) +#define ODN_PDP_HSYNC3_HFPS_LSBMASK (0x00001FFF) +#define ODN_PDP_HSYNC3_HFPS_SHIFT (16) +#define ODN_PDP_HSYNC3_HFPS_LENGTH (13) +#define ODN_PDP_HSYNC3_HFPS_SIGNED_FIELD IMG_FALSE + +/* PDP, HSYNC3, HRBS +*/ +#define ODN_PDP_HSYNC3_HRBS_MASK (0x00001FFF) +#define ODN_PDP_HSYNC3_HRBS_LSBMASK (0x00001FFF) +#define ODN_PDP_HSYNC3_HRBS_SHIFT (0) +#define ODN_PDP_HSYNC3_HRBS_LENGTH (13) +#define ODN_PDP_HSYNC3_HRBS_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VSYNC1_OFFSET (0x0790) + +/* PDP, VSYNC1, VBPS +*/ +#define ODN_PDP_VSYNC1_VBPS_MASK (0x1FFF0000) +#define ODN_PDP_VSYNC1_VBPS_LSBMASK (0x00001FFF) +#define ODN_PDP_VSYNC1_VBPS_SHIFT (16) +#define ODN_PDP_VSYNC1_VBPS_LENGTH (13) +#define ODN_PDP_VSYNC1_VBPS_SIGNED_FIELD IMG_FALSE + +/* PDP, VSYNC1, VT +*/ +#define ODN_PDP_VSYNC1_VT_MASK (0x00001FFF) +#define ODN_PDP_VSYNC1_VT_LSBMASK (0x00001FFF) +#define ODN_PDP_VSYNC1_VT_SHIFT (0) +#define ODN_PDP_VSYNC1_VT_LENGTH (13) +#define ODN_PDP_VSYNC1_VT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VSYNC2_OFFSET (0x0794) + +/* PDP, VSYNC2, VAS +*/ +#define ODN_PDP_VSYNC2_VAS_MASK (0x1FFF0000) +#define ODN_PDP_VSYNC2_VAS_LSBMASK (0x00001FFF) +#define ODN_PDP_VSYNC2_VAS_SHIFT (16) +#define ODN_PDP_VSYNC2_VAS_LENGTH (13) +#define ODN_PDP_VSYNC2_VAS_SIGNED_FIELD IMG_FALSE + +/* PDP, VSYNC2, VTBS +*/ +#define ODN_PDP_VSYNC2_VTBS_MASK (0x00001FFF) +#define ODN_PDP_VSYNC2_VTBS_LSBMASK (0x00001FFF) +#define ODN_PDP_VSYNC2_VTBS_SHIFT (0) +#define ODN_PDP_VSYNC2_VTBS_LENGTH (13) +#define ODN_PDP_VSYNC2_VTBS_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VSYNC3_OFFSET (0x0798) + +/* PDP, VSYNC3, VFPS +*/ +#define ODN_PDP_VSYNC3_VFPS_MASK (0x1FFF0000) +#define ODN_PDP_VSYNC3_VFPS_LSBMASK (0x00001FFF) +#define ODN_PDP_VSYNC3_VFPS_SHIFT (16) +#define ODN_PDP_VSYNC3_VFPS_LENGTH (13) +#define ODN_PDP_VSYNC3_VFPS_SIGNED_FIELD IMG_FALSE + +/* PDP, VSYNC3, VBBS +*/ +#define ODN_PDP_VSYNC3_VBBS_MASK (0x00001FFF) +#define ODN_PDP_VSYNC3_VBBS_LSBMASK (0x00001FFF) +#define ODN_PDP_VSYNC3_VBBS_SHIFT (0) +#define ODN_PDP_VSYNC3_VBBS_LENGTH (13) +#define ODN_PDP_VSYNC3_VBBS_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_INTSTAT_OFFSET (0x079C) + +/* PDP, INTSTAT, INTS_VID4ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID4ORUN_MASK (0x00080000) +#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SHIFT (19) +#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID3ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID3ORUN_MASK (0x00040000) +#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SHIFT (18) +#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID2ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID2ORUN_MASK (0x00020000) +#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SHIFT (17) +#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID1ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID1ORUN_MASK (0x00010000) +#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SHIFT (16) +#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH4ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_MASK (0x00008000) +#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT (15) +#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH3ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_MASK (0x00004000) +#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT (14) +#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH2ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_MASK (0x00002000) +#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT (13) +#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH1ORUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_MASK (0x00001000) +#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT (12) +#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID4URUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID4URUN_MASK (0x00000800) +#define ODN_PDP_INTSTAT_INTS_VID4URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID4URUN_SHIFT (11) +#define ODN_PDP_INTSTAT_INTS_VID4URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID3URUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID3URUN_MASK (0x00000400) +#define ODN_PDP_INTSTAT_INTS_VID3URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID3URUN_SHIFT (10) +#define ODN_PDP_INTSTAT_INTS_VID3URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID2URUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID2URUN_MASK (0x00000200) +#define ODN_PDP_INTSTAT_INTS_VID2URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID2URUN_SHIFT (9) +#define ODN_PDP_INTSTAT_INTS_VID2URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID1URUN +*/ +#define ODN_PDP_INTSTAT_INTS_VID1URUN_MASK (0x00000100) +#define ODN_PDP_INTSTAT_INTS_VID1URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VID1URUN_SHIFT (8) +#define ODN_PDP_INTSTAT_INTS_VID1URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH4URUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_MASK (0x00000080) +#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SHIFT (7) +#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH3URUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_MASK (0x00000040) +#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SHIFT (6) +#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH2URUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_MASK (0x00000020) +#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SHIFT (5) +#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH1URUN +*/ +#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_MASK (0x00000010) +#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SHIFT (4) +#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VBLNK1 +*/ +#define ODN_PDP_INTSTAT_INTS_VBLNK1_MASK (0x00000008) +#define ODN_PDP_INTSTAT_INTS_VBLNK1_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VBLNK1_SHIFT (3) +#define ODN_PDP_INTSTAT_INTS_VBLNK1_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VBLNK0 +*/ +#define ODN_PDP_INTSTAT_INTS_VBLNK0_MASK (0x00000004) +#define ODN_PDP_INTSTAT_INTS_VBLNK0_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT (2) +#define ODN_PDP_INTSTAT_INTS_VBLNK0_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_HBLNK1 +*/ +#define ODN_PDP_INTSTAT_INTS_HBLNK1_MASK (0x00000002) +#define ODN_PDP_INTSTAT_INTS_HBLNK1_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_HBLNK1_SHIFT (1) +#define ODN_PDP_INTSTAT_INTS_HBLNK1_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_HBLNK0 +*/ +#define ODN_PDP_INTSTAT_INTS_HBLNK0_MASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_HBLNK0_LSBMASK (0x00000001) +#define ODN_PDP_INTSTAT_INTS_HBLNK0_SHIFT (0) +#define ODN_PDP_INTSTAT_INTS_HBLNK0_LENGTH (1) +#define ODN_PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_INTENAB_OFFSET (0x07A0) + +/* PDP, INTENAB, INTEN_VID4ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID4ORUN_MASK (0x00080000) +#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SHIFT (19) +#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID3ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID3ORUN_MASK (0x00040000) +#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SHIFT (18) +#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID2ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID2ORUN_MASK (0x00020000) +#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SHIFT (17) +#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID1ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID1ORUN_MASK (0x00010000) +#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SHIFT (16) +#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH4ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_MASK (0x00008000) +#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT (15) +#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH3ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_MASK (0x00004000) +#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT (14) +#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH2ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_MASK (0x00002000) +#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT (13) +#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH1ORUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_MASK (0x00001000) +#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT (12) +#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID4URUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID4URUN_MASK (0x00000800) +#define ODN_PDP_INTENAB_INTEN_VID4URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID4URUN_SHIFT (11) +#define ODN_PDP_INTENAB_INTEN_VID4URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID3URUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID3URUN_MASK (0x00000400) +#define ODN_PDP_INTENAB_INTEN_VID3URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID3URUN_SHIFT (10) +#define ODN_PDP_INTENAB_INTEN_VID3URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID2URUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID2URUN_MASK (0x00000200) +#define ODN_PDP_INTENAB_INTEN_VID2URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID2URUN_SHIFT (9) +#define ODN_PDP_INTENAB_INTEN_VID2URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID1URUN +*/ +#define ODN_PDP_INTENAB_INTEN_VID1URUN_MASK (0x00000100) +#define ODN_PDP_INTENAB_INTEN_VID1URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VID1URUN_SHIFT (8) +#define ODN_PDP_INTENAB_INTEN_VID1URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH4URUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_MASK (0x00000080) +#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SHIFT (7) +#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH3URUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_MASK (0x00000040) +#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SHIFT (6) +#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH2URUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_MASK (0x00000020) +#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SHIFT (5) +#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH1URUN +*/ +#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_MASK (0x00000010) +#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SHIFT (4) +#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VBLNK1 +*/ +#define ODN_PDP_INTENAB_INTEN_VBLNK1_MASK (0x00000008) +#define ODN_PDP_INTENAB_INTEN_VBLNK1_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VBLNK1_SHIFT (3) +#define ODN_PDP_INTENAB_INTEN_VBLNK1_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VBLNK0 +*/ +#define ODN_PDP_INTENAB_INTEN_VBLNK0_MASK (0x00000004) +#define ODN_PDP_INTENAB_INTEN_VBLNK0_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT (2) +#define ODN_PDP_INTENAB_INTEN_VBLNK0_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_HBLNK1 +*/ +#define ODN_PDP_INTENAB_INTEN_HBLNK1_MASK (0x00000002) +#define ODN_PDP_INTENAB_INTEN_HBLNK1_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_HBLNK1_SHIFT (1) +#define ODN_PDP_INTENAB_INTEN_HBLNK1_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_HBLNK0 +*/ +#define ODN_PDP_INTENAB_INTEN_HBLNK0_MASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_HBLNK0_LSBMASK (0x00000001) +#define ODN_PDP_INTENAB_INTEN_HBLNK0_SHIFT (0) +#define ODN_PDP_INTENAB_INTEN_HBLNK0_LENGTH (1) +#define ODN_PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_INTCLR_OFFSET (0x07A4) + +/* PDP, INTCLR, INTCLR_VID4ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_MASK (0x00080000) +#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SHIFT (19) +#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID3ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_MASK (0x00040000) +#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SHIFT (18) +#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID2ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_MASK (0x00020000) +#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SHIFT (17) +#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID1ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_MASK (0x00010000) +#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SHIFT (16) +#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH4ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_MASK (0x00008000) +#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT (15) +#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH3ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_MASK (0x00004000) +#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT (14) +#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH2ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_MASK (0x00002000) +#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT (13) +#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH1ORUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_MASK (0x00001000) +#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT (12) +#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID4URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID4URUN_MASK (0x00000800) +#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SHIFT (11) +#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID3URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID3URUN_MASK (0x00000400) +#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SHIFT (10) +#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID2URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID2URUN_MASK (0x00000200) +#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SHIFT (9) +#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID1URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_VID1URUN_MASK (0x00000100) +#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SHIFT (8) +#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH4URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_MASK (0x00000080) +#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT (7) +#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH3URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_MASK (0x00000040) +#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT (6) +#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH2URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_MASK (0x00000020) +#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT (5) +#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH1URUN +*/ +#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_MASK (0x00000010) +#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT (4) +#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VBLNK1 +*/ +#define ODN_PDP_INTCLR_INTCLR_VBLNK1_MASK (0x00000008) +#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SHIFT (3) +#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VBLNK0 +*/ +#define ODN_PDP_INTCLR_INTCLR_VBLNK0_MASK (0x00000004) +#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT (2) +#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_HBLNK1 +*/ +#define ODN_PDP_INTCLR_INTCLR_HBLNK1_MASK (0x00000002) +#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SHIFT (1) +#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_HBLNK0 +*/ +#define ODN_PDP_INTCLR_INTCLR_HBLNK0_MASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LSBMASK (0x00000001) +#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SHIFT (0) +#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LENGTH (1) +#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_MEMCTRL_OFFSET (0x07A8) + +/* PDP, MEMCTRL, MEMREFRESH +*/ +#define ODN_PDP_MEMCTRL_MEMREFRESH_MASK (0xC0000000) +#define ODN_PDP_MEMCTRL_MEMREFRESH_LSBMASK (0x00000003) +#define ODN_PDP_MEMCTRL_MEMREFRESH_SHIFT (30) +#define ODN_PDP_MEMCTRL_MEMREFRESH_LENGTH (2) +#define ODN_PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD IMG_FALSE + +/* PDP, MEMCTRL, BURSTLEN +*/ +#define ODN_PDP_MEMCTRL_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_MEMCTRL_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_MEMCTRL_BURSTLEN_SHIFT (0) +#define ODN_PDP_MEMCTRL_BURSTLEN_LENGTH (8) +#define ODN_PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_MEM_THRESH_OFFSET (0x07AC) + +/* PDP, MEM_THRESH, UVTHRESHOLD +*/ +#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, MEM_THRESH, YTHRESHOLD +*/ +#define ODN_PDP_MEM_THRESH_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, MEM_THRESH, THRESHOLD +*/ +#define ODN_PDP_MEM_THRESH_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_MEM_THRESH_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_MEM_THRESH_THRESHOLD_SHIFT (0) +#define ODN_PDP_MEM_THRESH_THRESHOLD_LENGTH (9) +#define ODN_PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_ALTERNATE_3D_CTRL_OFFSET (0x07B0) + +/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON +*/ +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK (0x00000010) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK (0x00000001) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT (4) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH (1) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD IMG_FALSE + +/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL +*/ +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK (0x00000007) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK (0x00000007) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT (0) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH (3) +#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA0_R_OFFSET (0x07B4) + +/* PDP, GAMMA0_R, GAMMA0_R +*/ +#define ODN_PDP_GAMMA0_R_GAMMA0_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA0_R_GAMMA0_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA0_R_GAMMA0_R_SHIFT (0) +#define ODN_PDP_GAMMA0_R_GAMMA0_R_LENGTH (10) +#define ODN_PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA0_GB_OFFSET (0x07B8) + +/* PDP, GAMMA0_GB, GAMMA0_G +*/ +#define ODN_PDP_GAMMA0_GB_GAMMA0_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SHIFT (16) +#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LENGTH (10) +#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA0_GB, GAMMA0_B +*/ +#define ODN_PDP_GAMMA0_GB_GAMMA0_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SHIFT (0) +#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LENGTH (10) +#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA1_R_OFFSET (0x07BC) + +/* PDP, GAMMA1_R, GAMMA1_R +*/ +#define ODN_PDP_GAMMA1_R_GAMMA1_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA1_R_GAMMA1_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA1_R_GAMMA1_R_SHIFT (0) +#define ODN_PDP_GAMMA1_R_GAMMA1_R_LENGTH (10) +#define ODN_PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA1_GB_OFFSET (0x07C0) + +/* PDP, GAMMA1_GB, GAMMA1_G +*/ +#define ODN_PDP_GAMMA1_GB_GAMMA1_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SHIFT (16) +#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LENGTH (10) +#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA1_GB, GAMMA1_B +*/ +#define ODN_PDP_GAMMA1_GB_GAMMA1_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SHIFT (0) +#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LENGTH (10) +#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA2_R_OFFSET (0x07C4) + +/* PDP, GAMMA2_R, GAMMA2_R +*/ +#define ODN_PDP_GAMMA2_R_GAMMA2_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA2_R_GAMMA2_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA2_R_GAMMA2_R_SHIFT (0) +#define ODN_PDP_GAMMA2_R_GAMMA2_R_LENGTH (10) +#define ODN_PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA2_GB_OFFSET (0x07C8) + +/* PDP, GAMMA2_GB, GAMMA2_G +*/ +#define ODN_PDP_GAMMA2_GB_GAMMA2_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SHIFT (16) +#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LENGTH (10) +#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA2_GB, GAMMA2_B +*/ +#define ODN_PDP_GAMMA2_GB_GAMMA2_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SHIFT (0) +#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LENGTH (10) +#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA3_R_OFFSET (0x07CC) + +/* PDP, GAMMA3_R, GAMMA3_R +*/ +#define ODN_PDP_GAMMA3_R_GAMMA3_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA3_R_GAMMA3_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA3_R_GAMMA3_R_SHIFT (0) +#define ODN_PDP_GAMMA3_R_GAMMA3_R_LENGTH (10) +#define ODN_PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA3_GB_OFFSET (0x07D0) + +/* PDP, GAMMA3_GB, GAMMA3_G +*/ +#define ODN_PDP_GAMMA3_GB_GAMMA3_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SHIFT (16) +#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LENGTH (10) +#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA3_GB, GAMMA3_B +*/ +#define ODN_PDP_GAMMA3_GB_GAMMA3_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SHIFT (0) +#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LENGTH (10) +#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA4_R_OFFSET (0x07D4) + +/* PDP, GAMMA4_R, GAMMA4_R +*/ +#define ODN_PDP_GAMMA4_R_GAMMA4_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA4_R_GAMMA4_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA4_R_GAMMA4_R_SHIFT (0) +#define ODN_PDP_GAMMA4_R_GAMMA4_R_LENGTH (10) +#define ODN_PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA4_GB_OFFSET (0x07D8) + +/* PDP, GAMMA4_GB, GAMMA4_G +*/ +#define ODN_PDP_GAMMA4_GB_GAMMA4_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SHIFT (16) +#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LENGTH (10) +#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA4_GB, GAMMA4_B +*/ +#define ODN_PDP_GAMMA4_GB_GAMMA4_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SHIFT (0) +#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LENGTH (10) +#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA5_R_OFFSET (0x07DC) + +/* PDP, GAMMA5_R, GAMMA5_R +*/ +#define ODN_PDP_GAMMA5_R_GAMMA5_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA5_R_GAMMA5_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA5_R_GAMMA5_R_SHIFT (0) +#define ODN_PDP_GAMMA5_R_GAMMA5_R_LENGTH (10) +#define ODN_PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA5_GB_OFFSET (0x07E0) + +/* PDP, GAMMA5_GB, GAMMA5_G +*/ +#define ODN_PDP_GAMMA5_GB_GAMMA5_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SHIFT (16) +#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LENGTH (10) +#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA5_GB, GAMMA5_B +*/ +#define ODN_PDP_GAMMA5_GB_GAMMA5_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SHIFT (0) +#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LENGTH (10) +#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA6_R_OFFSET (0x07E4) + +/* PDP, GAMMA6_R, GAMMA6_R +*/ +#define ODN_PDP_GAMMA6_R_GAMMA6_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA6_R_GAMMA6_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA6_R_GAMMA6_R_SHIFT (0) +#define ODN_PDP_GAMMA6_R_GAMMA6_R_LENGTH (10) +#define ODN_PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA6_GB_OFFSET (0x07E8) + +/* PDP, GAMMA6_GB, GAMMA6_G +*/ +#define ODN_PDP_GAMMA6_GB_GAMMA6_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SHIFT (16) +#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LENGTH (10) +#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA6_GB, GAMMA6_B +*/ +#define ODN_PDP_GAMMA6_GB_GAMMA6_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SHIFT (0) +#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LENGTH (10) +#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA7_R_OFFSET (0x07EC) + +/* PDP, GAMMA7_R, GAMMA7_R +*/ +#define ODN_PDP_GAMMA7_R_GAMMA7_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA7_R_GAMMA7_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA7_R_GAMMA7_R_SHIFT (0) +#define ODN_PDP_GAMMA7_R_GAMMA7_R_LENGTH (10) +#define ODN_PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA7_GB_OFFSET (0x07F0) + +/* PDP, GAMMA7_GB, GAMMA7_G +*/ +#define ODN_PDP_GAMMA7_GB_GAMMA7_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SHIFT (16) +#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LENGTH (10) +#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA7_GB, GAMMA7_B +*/ +#define ODN_PDP_GAMMA7_GB_GAMMA7_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SHIFT (0) +#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LENGTH (10) +#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA8_R_OFFSET (0x07F4) + +/* PDP, GAMMA8_R, GAMMA8_R +*/ +#define ODN_PDP_GAMMA8_R_GAMMA8_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA8_R_GAMMA8_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA8_R_GAMMA8_R_SHIFT (0) +#define ODN_PDP_GAMMA8_R_GAMMA8_R_LENGTH (10) +#define ODN_PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA8_GB_OFFSET (0x07F8) + +/* PDP, GAMMA8_GB, GAMMA8_G +*/ +#define ODN_PDP_GAMMA8_GB_GAMMA8_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SHIFT (16) +#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LENGTH (10) +#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA8_GB, GAMMA8_B +*/ +#define ODN_PDP_GAMMA8_GB_GAMMA8_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SHIFT (0) +#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LENGTH (10) +#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA9_R_OFFSET (0x07FC) + +/* PDP, GAMMA9_R, GAMMA9_R +*/ +#define ODN_PDP_GAMMA9_R_GAMMA9_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA9_R_GAMMA9_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA9_R_GAMMA9_R_SHIFT (0) +#define ODN_PDP_GAMMA9_R_GAMMA9_R_LENGTH (10) +#define ODN_PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA9_GB_OFFSET (0x0800) + +/* PDP, GAMMA9_GB, GAMMA9_G +*/ +#define ODN_PDP_GAMMA9_GB_GAMMA9_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SHIFT (16) +#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LENGTH (10) +#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA9_GB, GAMMA9_B +*/ +#define ODN_PDP_GAMMA9_GB_GAMMA9_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SHIFT (0) +#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LENGTH (10) +#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA10_R_OFFSET (0x0804) + +/* PDP, GAMMA10_R, GAMMA10_R +*/ +#define ODN_PDP_GAMMA10_R_GAMMA10_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA10_R_GAMMA10_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA10_R_GAMMA10_R_SHIFT (0) +#define ODN_PDP_GAMMA10_R_GAMMA10_R_LENGTH (10) +#define ODN_PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA10_GB_OFFSET (0x0808) + +/* PDP, GAMMA10_GB, GAMMA10_G +*/ +#define ODN_PDP_GAMMA10_GB_GAMMA10_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SHIFT (16) +#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LENGTH (10) +#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA10_GB, GAMMA10_B +*/ +#define ODN_PDP_GAMMA10_GB_GAMMA10_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SHIFT (0) +#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LENGTH (10) +#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA11_R_OFFSET (0x080C) + +/* PDP, GAMMA11_R, GAMMA11_R +*/ +#define ODN_PDP_GAMMA11_R_GAMMA11_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA11_R_GAMMA11_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA11_R_GAMMA11_R_SHIFT (0) +#define ODN_PDP_GAMMA11_R_GAMMA11_R_LENGTH (10) +#define ODN_PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA11_GB_OFFSET (0x0810) + +/* PDP, GAMMA11_GB, GAMMA11_G +*/ +#define ODN_PDP_GAMMA11_GB_GAMMA11_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SHIFT (16) +#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LENGTH (10) +#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA11_GB, GAMMA11_B +*/ +#define ODN_PDP_GAMMA11_GB_GAMMA11_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SHIFT (0) +#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LENGTH (10) +#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA12_R_OFFSET (0x0814) + +/* PDP, GAMMA12_R, GAMMA12_R +*/ +#define ODN_PDP_GAMMA12_R_GAMMA12_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA12_R_GAMMA12_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA12_R_GAMMA12_R_SHIFT (0) +#define ODN_PDP_GAMMA12_R_GAMMA12_R_LENGTH (10) +#define ODN_PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA12_GB_OFFSET (0x0818) + +/* PDP, GAMMA12_GB, GAMMA12_G +*/ +#define ODN_PDP_GAMMA12_GB_GAMMA12_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SHIFT (16) +#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LENGTH (10) +#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA12_GB, GAMMA12_B +*/ +#define ODN_PDP_GAMMA12_GB_GAMMA12_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SHIFT (0) +#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LENGTH (10) +#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA13_R_OFFSET (0x081C) + +/* PDP, GAMMA13_R, GAMMA13_R +*/ +#define ODN_PDP_GAMMA13_R_GAMMA13_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA13_R_GAMMA13_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA13_R_GAMMA13_R_SHIFT (0) +#define ODN_PDP_GAMMA13_R_GAMMA13_R_LENGTH (10) +#define ODN_PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA13_GB_OFFSET (0x0820) + +/* PDP, GAMMA13_GB, GAMMA13_G +*/ +#define ODN_PDP_GAMMA13_GB_GAMMA13_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SHIFT (16) +#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LENGTH (10) +#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA13_GB, GAMMA13_B +*/ +#define ODN_PDP_GAMMA13_GB_GAMMA13_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SHIFT (0) +#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LENGTH (10) +#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA14_R_OFFSET (0x0824) + +/* PDP, GAMMA14_R, GAMMA14_R +*/ +#define ODN_PDP_GAMMA14_R_GAMMA14_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA14_R_GAMMA14_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA14_R_GAMMA14_R_SHIFT (0) +#define ODN_PDP_GAMMA14_R_GAMMA14_R_LENGTH (10) +#define ODN_PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA14_GB_OFFSET (0x0828) + +/* PDP, GAMMA14_GB, GAMMA14_G +*/ +#define ODN_PDP_GAMMA14_GB_GAMMA14_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SHIFT (16) +#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LENGTH (10) +#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA14_GB, GAMMA14_B +*/ +#define ODN_PDP_GAMMA14_GB_GAMMA14_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SHIFT (0) +#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LENGTH (10) +#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA15_R_OFFSET (0x082C) + +/* PDP, GAMMA15_R, GAMMA15_R +*/ +#define ODN_PDP_GAMMA15_R_GAMMA15_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA15_R_GAMMA15_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA15_R_GAMMA15_R_SHIFT (0) +#define ODN_PDP_GAMMA15_R_GAMMA15_R_LENGTH (10) +#define ODN_PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA15_GB_OFFSET (0x0830) + +/* PDP, GAMMA15_GB, GAMMA15_G +*/ +#define ODN_PDP_GAMMA15_GB_GAMMA15_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SHIFT (16) +#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LENGTH (10) +#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA15_GB, GAMMA15_B +*/ +#define ODN_PDP_GAMMA15_GB_GAMMA15_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SHIFT (0) +#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LENGTH (10) +#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA16_R_OFFSET (0x0834) + +/* PDP, GAMMA16_R, GAMMA16_R +*/ +#define ODN_PDP_GAMMA16_R_GAMMA16_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA16_R_GAMMA16_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA16_R_GAMMA16_R_SHIFT (0) +#define ODN_PDP_GAMMA16_R_GAMMA16_R_LENGTH (10) +#define ODN_PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA16_GB_OFFSET (0x0838) + +/* PDP, GAMMA16_GB, GAMMA16_G +*/ +#define ODN_PDP_GAMMA16_GB_GAMMA16_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SHIFT (16) +#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LENGTH (10) +#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA16_GB, GAMMA16_B +*/ +#define ODN_PDP_GAMMA16_GB_GAMMA16_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SHIFT (0) +#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LENGTH (10) +#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA17_R_OFFSET (0x083C) + +/* PDP, GAMMA17_R, GAMMA17_R +*/ +#define ODN_PDP_GAMMA17_R_GAMMA17_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA17_R_GAMMA17_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA17_R_GAMMA17_R_SHIFT (0) +#define ODN_PDP_GAMMA17_R_GAMMA17_R_LENGTH (10) +#define ODN_PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA17_GB_OFFSET (0x0840) + +/* PDP, GAMMA17_GB, GAMMA17_G +*/ +#define ODN_PDP_GAMMA17_GB_GAMMA17_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SHIFT (16) +#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LENGTH (10) +#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA17_GB, GAMMA17_B +*/ +#define ODN_PDP_GAMMA17_GB_GAMMA17_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SHIFT (0) +#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LENGTH (10) +#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA18_R_OFFSET (0x0844) + +/* PDP, GAMMA18_R, GAMMA18_R +*/ +#define ODN_PDP_GAMMA18_R_GAMMA18_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA18_R_GAMMA18_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA18_R_GAMMA18_R_SHIFT (0) +#define ODN_PDP_GAMMA18_R_GAMMA18_R_LENGTH (10) +#define ODN_PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA18_GB_OFFSET (0x0848) + +/* PDP, GAMMA18_GB, GAMMA18_G +*/ +#define ODN_PDP_GAMMA18_GB_GAMMA18_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SHIFT (16) +#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LENGTH (10) +#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA18_GB, GAMMA18_B +*/ +#define ODN_PDP_GAMMA18_GB_GAMMA18_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SHIFT (0) +#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LENGTH (10) +#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA19_R_OFFSET (0x084C) + +/* PDP, GAMMA19_R, GAMMA19_R +*/ +#define ODN_PDP_GAMMA19_R_GAMMA19_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA19_R_GAMMA19_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA19_R_GAMMA19_R_SHIFT (0) +#define ODN_PDP_GAMMA19_R_GAMMA19_R_LENGTH (10) +#define ODN_PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA19_GB_OFFSET (0x0850) + +/* PDP, GAMMA19_GB, GAMMA19_G +*/ +#define ODN_PDP_GAMMA19_GB_GAMMA19_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SHIFT (16) +#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LENGTH (10) +#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA19_GB, GAMMA19_B +*/ +#define ODN_PDP_GAMMA19_GB_GAMMA19_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SHIFT (0) +#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LENGTH (10) +#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA20_R_OFFSET (0x0854) + +/* PDP, GAMMA20_R, GAMMA20_R +*/ +#define ODN_PDP_GAMMA20_R_GAMMA20_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA20_R_GAMMA20_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA20_R_GAMMA20_R_SHIFT (0) +#define ODN_PDP_GAMMA20_R_GAMMA20_R_LENGTH (10) +#define ODN_PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA20_GB_OFFSET (0x0858) + +/* PDP, GAMMA20_GB, GAMMA20_G +*/ +#define ODN_PDP_GAMMA20_GB_GAMMA20_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SHIFT (16) +#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LENGTH (10) +#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA20_GB, GAMMA20_B +*/ +#define ODN_PDP_GAMMA20_GB_GAMMA20_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SHIFT (0) +#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LENGTH (10) +#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA21_R_OFFSET (0x085C) + +/* PDP, GAMMA21_R, GAMMA21_R +*/ +#define ODN_PDP_GAMMA21_R_GAMMA21_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA21_R_GAMMA21_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA21_R_GAMMA21_R_SHIFT (0) +#define ODN_PDP_GAMMA21_R_GAMMA21_R_LENGTH (10) +#define ODN_PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA21_GB_OFFSET (0x0860) + +/* PDP, GAMMA21_GB, GAMMA21_G +*/ +#define ODN_PDP_GAMMA21_GB_GAMMA21_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SHIFT (16) +#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LENGTH (10) +#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA21_GB, GAMMA21_B +*/ +#define ODN_PDP_GAMMA21_GB_GAMMA21_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SHIFT (0) +#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LENGTH (10) +#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA22_R_OFFSET (0x0864) + +/* PDP, GAMMA22_R, GAMMA22_R +*/ +#define ODN_PDP_GAMMA22_R_GAMMA22_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA22_R_GAMMA22_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA22_R_GAMMA22_R_SHIFT (0) +#define ODN_PDP_GAMMA22_R_GAMMA22_R_LENGTH (10) +#define ODN_PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA22_GB_OFFSET (0x0868) + +/* PDP, GAMMA22_GB, GAMMA22_G +*/ +#define ODN_PDP_GAMMA22_GB_GAMMA22_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SHIFT (16) +#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LENGTH (10) +#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA22_GB, GAMMA22_B +*/ +#define ODN_PDP_GAMMA22_GB_GAMMA22_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SHIFT (0) +#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LENGTH (10) +#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA23_R_OFFSET (0x086C) + +/* PDP, GAMMA23_R, GAMMA23_R +*/ +#define ODN_PDP_GAMMA23_R_GAMMA23_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA23_R_GAMMA23_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA23_R_GAMMA23_R_SHIFT (0) +#define ODN_PDP_GAMMA23_R_GAMMA23_R_LENGTH (10) +#define ODN_PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA23_GB_OFFSET (0x0870) + +/* PDP, GAMMA23_GB, GAMMA23_G +*/ +#define ODN_PDP_GAMMA23_GB_GAMMA23_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SHIFT (16) +#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LENGTH (10) +#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA23_GB, GAMMA23_B +*/ +#define ODN_PDP_GAMMA23_GB_GAMMA23_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SHIFT (0) +#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LENGTH (10) +#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA24_R_OFFSET (0x0874) + +/* PDP, GAMMA24_R, GAMMA24_R +*/ +#define ODN_PDP_GAMMA24_R_GAMMA24_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA24_R_GAMMA24_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA24_R_GAMMA24_R_SHIFT (0) +#define ODN_PDP_GAMMA24_R_GAMMA24_R_LENGTH (10) +#define ODN_PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA24_GB_OFFSET (0x0878) + +/* PDP, GAMMA24_GB, GAMMA24_G +*/ +#define ODN_PDP_GAMMA24_GB_GAMMA24_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SHIFT (16) +#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LENGTH (10) +#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA24_GB, GAMMA24_B +*/ +#define ODN_PDP_GAMMA24_GB_GAMMA24_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SHIFT (0) +#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LENGTH (10) +#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA25_R_OFFSET (0x087C) + +/* PDP, GAMMA25_R, GAMMA25_R +*/ +#define ODN_PDP_GAMMA25_R_GAMMA25_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA25_R_GAMMA25_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA25_R_GAMMA25_R_SHIFT (0) +#define ODN_PDP_GAMMA25_R_GAMMA25_R_LENGTH (10) +#define ODN_PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA25_GB_OFFSET (0x0880) + +/* PDP, GAMMA25_GB, GAMMA25_G +*/ +#define ODN_PDP_GAMMA25_GB_GAMMA25_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SHIFT (16) +#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LENGTH (10) +#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA25_GB, GAMMA25_B +*/ +#define ODN_PDP_GAMMA25_GB_GAMMA25_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SHIFT (0) +#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LENGTH (10) +#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA26_R_OFFSET (0x0884) + +/* PDP, GAMMA26_R, GAMMA26_R +*/ +#define ODN_PDP_GAMMA26_R_GAMMA26_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA26_R_GAMMA26_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA26_R_GAMMA26_R_SHIFT (0) +#define ODN_PDP_GAMMA26_R_GAMMA26_R_LENGTH (10) +#define ODN_PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA26_GB_OFFSET (0x0888) + +/* PDP, GAMMA26_GB, GAMMA26_G +*/ +#define ODN_PDP_GAMMA26_GB_GAMMA26_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SHIFT (16) +#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LENGTH (10) +#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA26_GB, GAMMA26_B +*/ +#define ODN_PDP_GAMMA26_GB_GAMMA26_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SHIFT (0) +#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LENGTH (10) +#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA27_R_OFFSET (0x088C) + +/* PDP, GAMMA27_R, GAMMA27_R +*/ +#define ODN_PDP_GAMMA27_R_GAMMA27_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA27_R_GAMMA27_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA27_R_GAMMA27_R_SHIFT (0) +#define ODN_PDP_GAMMA27_R_GAMMA27_R_LENGTH (10) +#define ODN_PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA27_GB_OFFSET (0x0890) + +/* PDP, GAMMA27_GB, GAMMA27_G +*/ +#define ODN_PDP_GAMMA27_GB_GAMMA27_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SHIFT (16) +#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LENGTH (10) +#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA27_GB, GAMMA27_B +*/ +#define ODN_PDP_GAMMA27_GB_GAMMA27_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SHIFT (0) +#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LENGTH (10) +#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA28_R_OFFSET (0x0894) + +/* PDP, GAMMA28_R, GAMMA28_R +*/ +#define ODN_PDP_GAMMA28_R_GAMMA28_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA28_R_GAMMA28_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA28_R_GAMMA28_R_SHIFT (0) +#define ODN_PDP_GAMMA28_R_GAMMA28_R_LENGTH (10) +#define ODN_PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA28_GB_OFFSET (0x0898) + +/* PDP, GAMMA28_GB, GAMMA28_G +*/ +#define ODN_PDP_GAMMA28_GB_GAMMA28_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SHIFT (16) +#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LENGTH (10) +#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA28_GB, GAMMA28_B +*/ +#define ODN_PDP_GAMMA28_GB_GAMMA28_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SHIFT (0) +#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LENGTH (10) +#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA29_R_OFFSET (0x089C) + +/* PDP, GAMMA29_R, GAMMA29_R +*/ +#define ODN_PDP_GAMMA29_R_GAMMA29_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA29_R_GAMMA29_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA29_R_GAMMA29_R_SHIFT (0) +#define ODN_PDP_GAMMA29_R_GAMMA29_R_LENGTH (10) +#define ODN_PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA29_GB_OFFSET (0x08A0) + +/* PDP, GAMMA29_GB, GAMMA29_G +*/ +#define ODN_PDP_GAMMA29_GB_GAMMA29_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SHIFT (16) +#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LENGTH (10) +#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA29_GB, GAMMA29_B +*/ +#define ODN_PDP_GAMMA29_GB_GAMMA29_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SHIFT (0) +#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LENGTH (10) +#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA30_R_OFFSET (0x08A4) + +/* PDP, GAMMA30_R, GAMMA30_R +*/ +#define ODN_PDP_GAMMA30_R_GAMMA30_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA30_R_GAMMA30_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA30_R_GAMMA30_R_SHIFT (0) +#define ODN_PDP_GAMMA30_R_GAMMA30_R_LENGTH (10) +#define ODN_PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA30_GB_OFFSET (0x08A8) + +/* PDP, GAMMA30_GB, GAMMA30_G +*/ +#define ODN_PDP_GAMMA30_GB_GAMMA30_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SHIFT (16) +#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LENGTH (10) +#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA30_GB, GAMMA30_B +*/ +#define ODN_PDP_GAMMA30_GB_GAMMA30_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SHIFT (0) +#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LENGTH (10) +#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA31_R_OFFSET (0x08AC) + +/* PDP, GAMMA31_R, GAMMA31_R +*/ +#define ODN_PDP_GAMMA31_R_GAMMA31_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA31_R_GAMMA31_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA31_R_GAMMA31_R_SHIFT (0) +#define ODN_PDP_GAMMA31_R_GAMMA31_R_LENGTH (10) +#define ODN_PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA31_GB_OFFSET (0x08B0) + +/* PDP, GAMMA31_GB, GAMMA31_G +*/ +#define ODN_PDP_GAMMA31_GB_GAMMA31_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SHIFT (16) +#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LENGTH (10) +#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA31_GB, GAMMA31_B +*/ +#define ODN_PDP_GAMMA31_GB_GAMMA31_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SHIFT (0) +#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LENGTH (10) +#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA32_R_OFFSET (0x08B4) + +/* PDP, GAMMA32_R, GAMMA32_R +*/ +#define ODN_PDP_GAMMA32_R_GAMMA32_R_MASK (0x000003FF) +#define ODN_PDP_GAMMA32_R_GAMMA32_R_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA32_R_GAMMA32_R_SHIFT (0) +#define ODN_PDP_GAMMA32_R_GAMMA32_R_LENGTH (10) +#define ODN_PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GAMMA32_GB_OFFSET (0x08B8) + +/* PDP, GAMMA32_GB, GAMMA32_G +*/ +#define ODN_PDP_GAMMA32_GB_GAMMA32_G_MASK (0x03FF0000) +#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SHIFT (16) +#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LENGTH (10) +#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA32_GB, GAMMA32_B +*/ +#define ODN_PDP_GAMMA32_GB_GAMMA32_B_MASK (0x000003FF) +#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LSBMASK (0x000003FF) +#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SHIFT (0) +#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LENGTH (10) +#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VEVENT_OFFSET (0x08BC) + +/* PDP, VEVENT, VEVENT +*/ +#define ODN_PDP_VEVENT_VEVENT_MASK (0x1FFF0000) +#define ODN_PDP_VEVENT_VEVENT_LSBMASK (0x00001FFF) +#define ODN_PDP_VEVENT_VEVENT_SHIFT (16) +#define ODN_PDP_VEVENT_VEVENT_LENGTH (13) +#define ODN_PDP_VEVENT_VEVENT_SIGNED_FIELD IMG_FALSE + +/* PDP, VEVENT, VFETCH +*/ +#define ODN_PDP_VEVENT_VFETCH_MASK (0x00001FFF) +#define ODN_PDP_VEVENT_VFETCH_LSBMASK (0x00001FFF) +#define ODN_PDP_VEVENT_VFETCH_SHIFT (0) +#define ODN_PDP_VEVENT_VFETCH_LENGTH (13) +#define ODN_PDP_VEVENT_VFETCH_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_HDECTRL_OFFSET (0x08C0) + +/* PDP, HDECTRL, HDES +*/ +#define ODN_PDP_HDECTRL_HDES_MASK (0x1FFF0000) +#define ODN_PDP_HDECTRL_HDES_LSBMASK (0x00001FFF) +#define ODN_PDP_HDECTRL_HDES_SHIFT (16) +#define ODN_PDP_HDECTRL_HDES_LENGTH (13) +#define ODN_PDP_HDECTRL_HDES_SIGNED_FIELD IMG_FALSE + +/* PDP, HDECTRL, HDEF +*/ +#define ODN_PDP_HDECTRL_HDEF_MASK (0x00001FFF) +#define ODN_PDP_HDECTRL_HDEF_LSBMASK (0x00001FFF) +#define ODN_PDP_HDECTRL_HDEF_SHIFT (0) +#define ODN_PDP_HDECTRL_HDEF_LENGTH (13) +#define ODN_PDP_HDECTRL_HDEF_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VDECTRL_OFFSET (0x08C4) + +/* PDP, VDECTRL, VDES +*/ +#define ODN_PDP_VDECTRL_VDES_MASK (0x1FFF0000) +#define ODN_PDP_VDECTRL_VDES_LSBMASK (0x00001FFF) +#define ODN_PDP_VDECTRL_VDES_SHIFT (16) +#define ODN_PDP_VDECTRL_VDES_LENGTH (13) +#define ODN_PDP_VDECTRL_VDES_SIGNED_FIELD IMG_FALSE + +/* PDP, VDECTRL, VDEF +*/ +#define ODN_PDP_VDECTRL_VDEF_MASK (0x00001FFF) +#define ODN_PDP_VDECTRL_VDEF_LSBMASK (0x00001FFF) +#define ODN_PDP_VDECTRL_VDEF_SHIFT (0) +#define ODN_PDP_VDECTRL_VDEF_LENGTH (13) +#define ODN_PDP_VDECTRL_VDEF_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_OPMASK_R_OFFSET (0x08C8) + +/* PDP, OPMASK_R, MASKLEVEL +*/ +#define ODN_PDP_OPMASK_R_MASKLEVEL_MASK (0x80000000) +#define ODN_PDP_OPMASK_R_MASKLEVEL_LSBMASK (0x00000001) +#define ODN_PDP_OPMASK_R_MASKLEVEL_SHIFT (31) +#define ODN_PDP_OPMASK_R_MASKLEVEL_LENGTH (1) +#define ODN_PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD IMG_FALSE + +/* PDP, OPMASK_R, BLANKLEVEL +*/ +#define ODN_PDP_OPMASK_R_BLANKLEVEL_MASK (0x40000000) +#define ODN_PDP_OPMASK_R_BLANKLEVEL_LSBMASK (0x00000001) +#define ODN_PDP_OPMASK_R_BLANKLEVEL_SHIFT (30) +#define ODN_PDP_OPMASK_R_BLANKLEVEL_LENGTH (1) +#define ODN_PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD IMG_FALSE + +/* PDP, OPMASK_R, MASKR +*/ +#define ODN_PDP_OPMASK_R_MASKR_MASK (0x000003FF) +#define ODN_PDP_OPMASK_R_MASKR_LSBMASK (0x000003FF) +#define ODN_PDP_OPMASK_R_MASKR_SHIFT (0) +#define ODN_PDP_OPMASK_R_MASKR_LENGTH (10) +#define ODN_PDP_OPMASK_R_MASKR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_OPMASK_GB_OFFSET (0x08CC) + +/* PDP, OPMASK_GB, MASKG +*/ +#define ODN_PDP_OPMASK_GB_MASKG_MASK (0x03FF0000) +#define ODN_PDP_OPMASK_GB_MASKG_LSBMASK (0x000003FF) +#define ODN_PDP_OPMASK_GB_MASKG_SHIFT (16) +#define ODN_PDP_OPMASK_GB_MASKG_LENGTH (10) +#define ODN_PDP_OPMASK_GB_MASKG_SIGNED_FIELD IMG_FALSE + +/* PDP, OPMASK_GB, MASKB +*/ +#define ODN_PDP_OPMASK_GB_MASKB_MASK (0x000003FF) +#define ODN_PDP_OPMASK_GB_MASKB_LSBMASK (0x000003FF) +#define ODN_PDP_OPMASK_GB_MASKB_SHIFT (0) +#define ODN_PDP_OPMASK_GB_MASKB_LENGTH (10) +#define ODN_PDP_OPMASK_GB_MASKB_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_REGLD_ADDR_CTRL_OFFSET (0x08D0) + +/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN +*/ +#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK (0xFFFFFFF0) +#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK (0x0FFFFFFF) +#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT (4) +#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH (28) +#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_REGLD_ADDR_STAT_OFFSET (0x08D4) + +/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT +*/ +#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK (0xFFFFFFF0) +#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK (0x0FFFFFFF) +#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT (4) +#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH (28) +#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_REGLD_STAT_OFFSET (0x08D8) + +/* PDP, REGLD_STAT, REGLD_ADDREN +*/ +#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_MASK (0x00800000) +#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK (0x00000001) +#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SHIFT (23) +#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LENGTH (1) +#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_REGLD_CTRL_OFFSET (0x08DC) + +/* PDP, REGLD_CTRL, REGLD_ADDRLEN +*/ +#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK (0xFF000000) +#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK (0x000000FF) +#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT (24) +#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH (8) +#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD IMG_FALSE + +/* PDP, REGLD_CTRL, REGLD_VAL +*/ +#define ODN_PDP_REGLD_CTRL_REGLD_VAL_MASK (0x00800000) +#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LSBMASK (0x00000001) +#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SHIFT (23) +#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LENGTH (1) +#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_UPDCTRL_OFFSET (0x08E0) + +/* PDP, UPDCTRL, UPDFIELD +*/ +#define ODN_PDP_UPDCTRL_UPDFIELD_MASK (0x00000001) +#define ODN_PDP_UPDCTRL_UPDFIELD_LSBMASK (0x00000001) +#define ODN_PDP_UPDCTRL_UPDFIELD_SHIFT (0) +#define ODN_PDP_UPDCTRL_UPDFIELD_LENGTH (1) +#define ODN_PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_INTCTRL_OFFSET (0x08E4) + +/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINE +*/ +#define ODN_PDP_INTCTRL_HBLNK_LINE_MASK (0x00010000) +#define ODN_PDP_INTCTRL_HBLNK_LINE_LSBMASK (0x00000001) +#define ODN_PDP_INTCTRL_HBLNK_LINE_SHIFT (16) +#define ODN_PDP_INTCTRL_HBLNK_LINE_LENGTH (1) +#define ODN_PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINENO +*/ +#define ODN_PDP_INTCTRL_HBLNK_LINENO_MASK (0x00001FFF) +#define ODN_PDP_INTCTRL_HBLNK_LINENO_LSBMASK (0x00001FFF) +#define ODN_PDP_INTCTRL_HBLNK_LINENO_SHIFT (0) +#define ODN_PDP_INTCTRL_HBLNK_LINENO_LENGTH (13) +#define ODN_PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PDISETUP_OFFSET (0x0900) + +/* PDP, PDISETUP, PDI_BLNKLVL +*/ +#define ODN_PDP_PDISETUP_PDI_BLNKLVL_MASK (0x00000040) +#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LSBMASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SHIFT (6) +#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LENGTH (1) +#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_BLNK +*/ +#define ODN_PDP_PDISETUP_PDI_BLNK_MASK (0x00000020) +#define ODN_PDP_PDISETUP_PDI_BLNK_LSBMASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_BLNK_SHIFT (5) +#define ODN_PDP_PDISETUP_PDI_BLNK_LENGTH (1) +#define ODN_PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_PWR +*/ +#define ODN_PDP_PDISETUP_PDI_PWR_MASK (0x00000010) +#define ODN_PDP_PDISETUP_PDI_PWR_LSBMASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_PWR_SHIFT (4) +#define ODN_PDP_PDISETUP_PDI_PWR_LENGTH (1) +#define ODN_PDP_PDISETUP_PDI_PWR_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_EN +*/ +#define ODN_PDP_PDISETUP_PDI_EN_MASK (0x00000008) +#define ODN_PDP_PDISETUP_PDI_EN_LSBMASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_EN_SHIFT (3) +#define ODN_PDP_PDISETUP_PDI_EN_LENGTH (1) +#define ODN_PDP_PDISETUP_PDI_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_GDEN +*/ +#define ODN_PDP_PDISETUP_PDI_GDEN_MASK (0x00000004) +#define ODN_PDP_PDISETUP_PDI_GDEN_LSBMASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_GDEN_SHIFT (2) +#define ODN_PDP_PDISETUP_PDI_GDEN_LENGTH (1) +#define ODN_PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_NFEN +*/ +#define ODN_PDP_PDISETUP_PDI_NFEN_MASK (0x00000002) +#define ODN_PDP_PDISETUP_PDI_NFEN_LSBMASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_NFEN_SHIFT (1) +#define ODN_PDP_PDISETUP_PDI_NFEN_LENGTH (1) +#define ODN_PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_CR +*/ +#define ODN_PDP_PDISETUP_PDI_CR_MASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_CR_LSBMASK (0x00000001) +#define ODN_PDP_PDISETUP_PDI_CR_SHIFT (0) +#define ODN_PDP_PDISETUP_PDI_CR_LENGTH (1) +#define ODN_PDP_PDISETUP_PDI_CR_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PDITIMING0_OFFSET (0x0904) + +/* PDP, PDITIMING0, PDI_PWRSVGD +*/ +#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_MASK (0x0F000000) +#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK (0x0000000F) +#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SHIFT (24) +#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LENGTH (4) +#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD IMG_FALSE + +/* PDP, PDITIMING0, PDI_LSDEL +*/ +#define ODN_PDP_PDITIMING0_PDI_LSDEL_MASK (0x007F0000) +#define ODN_PDP_PDITIMING0_PDI_LSDEL_LSBMASK (0x0000007F) +#define ODN_PDP_PDITIMING0_PDI_LSDEL_SHIFT (16) +#define ODN_PDP_PDITIMING0_PDI_LSDEL_LENGTH (7) +#define ODN_PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD IMG_FALSE + +/* PDP, PDITIMING0, PDI_PWRSV2GD2 +*/ +#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_MASK (0x000003FF) +#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK (0x000003FF) +#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT (0) +#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH (10) +#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PDITIMING1_OFFSET (0x0908) + +/* PDP, PDITIMING1, PDI_NLDEL +*/ +#define ODN_PDP_PDITIMING1_PDI_NLDEL_MASK (0x000F0000) +#define ODN_PDP_PDITIMING1_PDI_NLDEL_LSBMASK (0x0000000F) +#define ODN_PDP_PDITIMING1_PDI_NLDEL_SHIFT (16) +#define ODN_PDP_PDITIMING1_PDI_NLDEL_LENGTH (4) +#define ODN_PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD IMG_FALSE + +/* PDP, PDITIMING1, PDI_ACBDEL +*/ +#define ODN_PDP_PDITIMING1_PDI_ACBDEL_MASK (0x000003FF) +#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LSBMASK (0x000003FF) +#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SHIFT (0) +#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LENGTH (10) +#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PDICOREID_OFFSET (0x090C) + +/* PDP, PDICOREID, PDI_GROUP_ID +*/ +#define ODN_PDP_PDICOREID_PDI_GROUP_ID_MASK (0xFF000000) +#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LSBMASK (0x000000FF) +#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SHIFT (24) +#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LENGTH (8) +#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREID, PDI_CORE_ID +*/ +#define ODN_PDP_PDICOREID_PDI_CORE_ID_MASK (0x00FF0000) +#define ODN_PDP_PDICOREID_PDI_CORE_ID_LSBMASK (0x000000FF) +#define ODN_PDP_PDICOREID_PDI_CORE_ID_SHIFT (16) +#define ODN_PDP_PDICOREID_PDI_CORE_ID_LENGTH (8) +#define ODN_PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREID, PDI_CONFIG_ID +*/ +#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_MASK (0x0000FFFF) +#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK (0x0000FFFF) +#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SHIFT (0) +#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LENGTH (16) +#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_PDICOREREV_OFFSET (0x0910) + +/* PDP, PDICOREREV, PDI_MAJOR_REV +*/ +#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_MASK (0x00FF0000) +#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK (0x000000FF) +#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT (16) +#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH (8) +#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREREV, PDI_MINOR_REV +*/ +#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_MASK (0x0000FF00) +#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK (0x000000FF) +#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SHIFT (8) +#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LENGTH (8) +#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREREV, PDI_MAINT_REV +*/ +#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_MASK (0x000000FF) +#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK (0x000000FF) +#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SHIFT (0) +#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LENGTH (8) +#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX2_OFFSET (0x0920) + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1 +*/ +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK (0x000000C0) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK (0x00000003) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT (6) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH (2) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1 +*/ +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK (0x00000030) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK (0x00000003) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT (4) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH (2) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0 +*/ +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK (0x0000000C) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK (0x00000003) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT (2) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH (2) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0 +*/ +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK (0x00000003) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK (0x00000003) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT (0) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH (2) +#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX4_0_OFFSET (0x0924) + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK (0xF0000000) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT (28) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK (0x0F000000) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT (24) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK (0x00F00000) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT (20) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK (0x000F0000) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT (16) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK (0x0000F000) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT (12) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK (0x00000F00) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT (8) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK (0x000000F0) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0 +*/ +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT (0) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX4_1_OFFSET (0x0928) + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK (0xF0000000) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT (28) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK (0x0F000000) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT (24) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK (0x00F00000) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT (20) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK (0x000F0000) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT (16) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK (0x0000F000) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT (12) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK (0x00000F00) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT (8) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK (0x000000F0) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2 +*/ +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK (0x0000000F) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT (0) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH (4) +#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_0_OFFSET (0x092C) + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_1_OFFSET (0x0930) + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0 +*/ +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_2_OFFSET (0x0934) + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_3_OFFSET (0x0938) + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1 +*/ +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_4_OFFSET (0x093C) + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2 +*/ +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_5_OFFSET (0x0940) + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_6_OFFSET (0x0944) + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3 +*/ +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_7_OFFSET (0x0948) + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4 +*/ +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_8_OFFSET (0x094C) + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_9_OFFSET (0x0950) + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5 +*/ +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_10_OFFSET (0x0954) + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_11_OFFSET (0x0958) + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK (0x3F000000) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT (24) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6 +*/ +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_DITHERMATRIX8_12_OFFSET (0x095C) + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK (0x00FC0000) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT (18) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK (0x0003F000) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT (12) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK (0x00000FC0) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT (6) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7 +*/ +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK (0x0000003F) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT (0) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH (6) +#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1_MEMCTRL_OFFSET (0x0960) + +/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN +*/ +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT (0) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH (8) +#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1_MEM_THRESH_OFFSET (0x0964) + +/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD +*/ +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD +*/ +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD +*/ +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT (0) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2_MEMCTRL_OFFSET (0x0968) + +/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN +*/ +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT (0) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH (8) +#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2_MEM_THRESH_OFFSET (0x096C) + +/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD +*/ +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD +*/ +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD +*/ +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT (0) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3_MEMCTRL_OFFSET (0x0970) + +/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN +*/ +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT (0) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH (8) +#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3_MEM_THRESH_OFFSET (0x0974) + +/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD +*/ +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD +*/ +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD +*/ +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT (0) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4_MEMCTRL_OFFSET (0x0978) + +/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN +*/ +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT (0) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH (8) +#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4_MEM_THRESH_OFFSET (0x097C) + +/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD +*/ +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD +*/ +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD +*/ +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT (0) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH (9) +#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1_MEMCTRL_OFFSET (0x0980) + +/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_MEMCTRL, VID1_BURSTLEN +*/ +#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT (0) +#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH (8) +#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1_MEM_THRESH_OFFSET (0x0984) + +/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD +*/ +#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD +*/ +#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD +*/ +#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT (0) +#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH (9) +#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2_MEMCTRL_OFFSET (0x0988) + +/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_MEMCTRL, VID2_BURSTLEN +*/ +#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT (0) +#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH (8) +#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2_MEM_THRESH_OFFSET (0x098C) + +/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD +*/ +#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD +*/ +#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD +*/ +#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT (0) +#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH (9) +#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3_MEMCTRL_OFFSET (0x0990) + +/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_MEMCTRL, VID3_BURSTLEN +*/ +#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT (0) +#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH (8) +#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3_MEM_THRESH_OFFSET (0x0994) + +/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD +*/ +#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD +*/ +#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD +*/ +#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT (0) +#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH (9) +#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4_MEMCTRL_OFFSET (0x0998) + +/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL +*/ +#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_MEMCTRL, VID4_BURSTLEN +*/ +#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK (0x000000FF) +#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK (0x000000FF) +#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT (0) +#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH (8) +#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4_MEM_THRESH_OFFSET (0x099C) + +/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD +*/ +#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK (0xFF000000) +#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK (0x000000FF) +#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT (24) +#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH (8) +#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD +*/ +#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK (0x001FF000) +#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT (12) +#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH (9) +#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD +*/ +#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK (0x000001FF) +#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK (0x000001FF) +#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT (0) +#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH (9) +#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH1_PANIC_THRESH_OFFSET (0x09A0) + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE +*/ +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE +*/ +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH2_PANIC_THRESH_OFFSET (0x09A4) + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE +*/ +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE +*/ +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH3_PANIC_THRESH_OFFSET (0x09A8) + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE +*/ +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE +*/ +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_GRPH4_PANIC_THRESH_OFFSET (0x09AC) + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE +*/ +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE +*/ +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID1_PANIC_THRESH_OFFSET (0x09B0) + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE +*/ +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE +*/ +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID2_PANIC_THRESH_OFFSET (0x09B4) + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE +*/ +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE +*/ +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID3_PANIC_THRESH_OFFSET (0x09B8) + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE +*/ +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE +*/ +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_VID4_PANIC_THRESH_OFFSET (0x09BC) + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE +*/ +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK (0x80000000) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT (31) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH (1) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE +*/ +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK (0x40000000) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT (30) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH (1) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX +*/ +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN +*/ +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX +*/ +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN +*/ +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define ODN_PDP_BURST_BOUNDARY_OFFSET (0x09C0) + +/* PDP, BURST_BOUNDARY, BURST_BOUNDARY +*/ +#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK (0x0000003F) +#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK (0x0000003F) +#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT (0) +#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH (6) +#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD IMG_FALSE + + +/* ---------------------- End of register definitions ---------------------- */ + +/* NUMREG defines the extent of register address space. +*/ + +#define ODN_PDP_NUMREG ((0x09C0 >> 2)+1) + +/* Info about video plane addresses */ +#define ODN_PDP_YADDR_BITS (ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH) +#define ODN_PDP_YADDR_ALIGN 5 +#define ODN_PDP_UADDR_BITS (ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH) +#define ODN_PDP_UADDR_ALIGN 5 +#define ODN_PDP_VADDR_BITS (ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH) +#define ODN_PDP_VADDR_ALIGN 5 + +#define ODN_PDP_YSTRIDE_BITS (ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH) +#define ODN_PDP_YSTRIDE_ALIGN 5 + +#define ODN_PDP_MAX_INPUT_WIDTH (ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1) +#define ODN_PDP_MAX_INPUT_HEIGHT (ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1) + +/* Maximum 6 bytes per pixel for RGB161616 */ +#define ODN_PDP_MAX_IMAGE_BYTES (ODN_PDP_MAX_INPUT_WIDTH * ODN_PDP_MAX_INPUT_HEIGHT * 6) + +/* Round up */ +#define ODN_PDP_MAX_IMAGE_PAGES ((ODN_PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE) + +#define ODN_PDP_YADDR_MAX (((1 << ODN_PDP_YADDR_BITS) - 1) << ODN_PDP_YADDR_ALIGN) +#define ODN_PDP_UADDR_MAX (((1 << ODN_PDP_UADDR_BITS) - 1) << ODN_PDP_UADDR_ALIGN) +#define ODN_PDP_VADDR_MAX (((1 << ODN_PDP_VADDR_BITS) - 1) << ODN_PDP_VADDR_ALIGN) +#define ODN_PDP_YSTRIDE_MAX ((1 << ODN_PDP_YSTRIDE_BITS) << ODN_PDP_YSTRIDE_ALIGN) +#define ODN_PDP_YADDR_ALIGNMASK ((1 << ODN_PDP_YADDR_ALIGN) - 1) +#define ODN_PDP_UADDR_ALIGNMASK ((1 << ODN_PDP_UADDR_ALIGN) - 1) +#define ODN_PDP_VADDR_ALIGNMASK ((1 << ODN_PDP_VADDR_ALIGN) - 1) +#define ODN_PDP_YSTRIDE_ALIGNMASK ((1 << ODN_PDP_YSTRIDE_ALIGN) - 1) + +/* Field Values (some are reserved for future use) */ +#define ODN_PDP_SURF_PIXFMT_RGB332 0x3 +#define ODN_PDP_SURF_PIXFMT_ARGB4444 0x4 +#define ODN_PDP_SURF_PIXFMT_ARGB1555 0x5 +#define ODN_PDP_SURF_PIXFMT_RGB888 0x6 +#define ODN_PDP_SURF_PIXFMT_RGB565 0x7 +#define ODN_PDP_SURF_PIXFMT_ARGB8888 0x8 +#define ODN_PDP_SURF_PIXFMT_420_PL8 0x9 +#define ODN_PDP_SURF_PIXFMT_420_PL8IVU 0xA +#define ODN_PDP_SURF_PIXFMT_420_PL8IUV 0xB +#define ODN_PDP_SURF_PIXFMT_422_UY0VY1_8888 0xC +#define ODN_PDP_SURF_PIXFMT_422_VY0UY1_8888 0xD +#define ODN_PDP_SURF_PIXFMT_422_Y0UY1V_8888 0xE +#define ODN_PDP_SURF_PIXFMT_422_Y0VY1U_8888 0xF +#define ODN_PDP_SURF_PIXFMT_AYUV8888 0x10 +#define ODN_PDP_SURF_PIXFMT_YUV101010 0x15 +#define ODN_PDP_SURF_PIXFMT_RGB101010 0x17 +#define ODN_PDP_SURF_PIXFMT_420_PL10IUV 0x18 +#define ODN_PDP_SURF_PIXFMT_420_PL10IVU 0x19 +#define ODN_PDP_SURF_PIXFMT_422_PL10IUV 0x1A +#define ODN_PDP_SURF_PIXFMT_422_PL10IVU 0x1B +#define ODN_PDP_SURF_PIXFMT_RGB121212 0x1E +#define ODN_PDP_SURF_PIXFMT_RGB161616 0x1F + +#define ODN_PDP_CTRL_CKEYSRC_PREV 0x0 +#define ODN_PDP_CTRL_CKEYSRC_CUR 0x1 + +#define ODN_PDP_MEMCTRL_MEMREFRESH_ALWAYS 0x0 +#define ODN_PDP_MEMCTRL_MEMREFRESH_HBLNK 0x1 +#define ODN_PDP_MEMCTRL_MEMREFRESH_VBLNK 0x2 +#define ODN_PDP_MEMCTRL_MEMREFRESH_BOTH 0x3 + +#define ODN_PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0 0x0 +#define ODN_PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1 0x1 +#define ODN_PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2 0x2 +#define ODN_PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3 0x3 +#define ODN_PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4 0x4 +#define ODN_PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5 0x5 +#define ODN_PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6 0x6 +#define ODN_PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7 0x7 + +#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE 0x0 +#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1 +#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2 + +#define ODN_PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1 +#define ODN_PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS 10 + +/*---------------------------------------------------------------------------*/ + +#endif /* ODN_PDP_REGS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_regs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_regs.h new file mode 100644 index 000000000000..01bd5979fed4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/odin_regs.h @@ -0,0 +1,924 @@ +/**************************************************************************** +@Title Odin system control register definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +@Description Odin FPGA register defs for IMG 3rd generation TCF + + Auto generated headers, eg. odn_core.h: + regconv -d . -a 8 odn_core.def + + Source files : + odn_core.def + mca_debug.def + sai_rx_debug.def + sai_tx_debug.def + ad_tx.def + + Changes: + Removed obsolete copyright dates + Changed lower case to upper case + (eg. odn_core changed to ODN_CORE) + Changed PVR5__ to ODN_ + Merged multiple .def files into one header + +****************************************************************************/ + +/* tab size 4 */ + +#ifndef _ODIN_REGS_H_ +#define _ODIN_REGS_H_ + +/****************************** + Generated from: odn_core.def +*******************************/ + +/* + Register ID +*/ +#define ODN_CORE_ID 0x0000 +#define ODN_ID_VARIANT_MASK 0x0000FFFFU +#define ODN_ID_VARIANT_SHIFT 0 +#define ODN_ID_VARIANT_SIGNED 0 + +#define ODN_ID_ID_MASK 0xFFFF0000U +#define ODN_ID_ID_SHIFT 16 +#define ODN_ID_ID_SIGNED 0 + +/* + Register REVISION +*/ +#define ODN_CORE_REVISION 0x0004 +#define ODN_REVISION_MINOR_MASK 0x0000FFFFU +#define ODN_REVISION_MINOR_SHIFT 0 +#define ODN_REVISION_MINOR_SIGNED 0 + +#define ODN_REVISION_MAJOR_MASK 0xFFFF0000U +#define ODN_REVISION_MAJOR_SHIFT 16 +#define ODN_REVISION_MAJOR_SIGNED 0 + +/* + Register CHANGE_SET +*/ +#define ODN_CORE_CHANGE_SET 0x0008 +#define ODN_CHANGE_SET_SET_MASK 0xFFFFFFFFU +#define ODN_CHANGE_SET_SET_SHIFT 0 +#define ODN_CHANGE_SET_SET_SIGNED 0 + +/* + Register USER_ID +*/ +#define ODN_CORE_USER_ID 0x000C +#define ODN_USER_ID_ID_MASK 0x0000000FU +#define ODN_USER_ID_ID_SHIFT 0 +#define ODN_USER_ID_ID_SIGNED 0 + +/* + Register USER_BUILD +*/ +#define ODN_CORE_USER_BUILD 0x0010 +#define ODN_USER_BUILD_BUILD_MASK 0xFFFFFFFFU +#define ODN_USER_BUILD_BUILD_SHIFT 0 +#define ODN_USER_BUILD_BUILD_SIGNED 0 + +/* + Register SW_IF_VERSION +*/ +#define ODN_CORE_SW_IF_VERSION 0x0014 +#define ODN_SW_IF_VERSION_VERSION_MASK 0x0000FFFFU +#define ODN_SW_IF_VERSION_VERSION_SHIFT 0 +#define ODN_SW_IF_VERSION_VERSION_SIGNED 0 + +/* + Register INTERNAL_RESETN +*/ +#define ODN_CORE_INTERNAL_RESETN 0x0080 +#define ODN_INTERNAL_RESETN_DDR_MASK 0x00000001U +#define ODN_INTERNAL_RESETN_DDR_SHIFT 0 +#define ODN_INTERNAL_RESETN_DDR_SIGNED 0 + +#define ODN_INTERNAL_RESETN_MIG0_MASK 0x00000002U +#define ODN_INTERNAL_RESETN_MIG0_SHIFT 1 +#define ODN_INTERNAL_RESETN_MIG0_SIGNED 0 + +#define ODN_INTERNAL_RESETN_MIG1_MASK 0x00000004U +#define ODN_INTERNAL_RESETN_MIG1_SHIFT 2 +#define ODN_INTERNAL_RESETN_MIG1_SIGNED 0 + +#define ODN_INTERNAL_RESETN_PDP1_MASK 0x00000008U +#define ODN_INTERNAL_RESETN_PDP1_SHIFT 3 +#define ODN_INTERNAL_RESETN_PDP1_SIGNED 0 + +#define ODN_INTERNAL_RESETN_PDP2_MASK 0x00000010U +#define ODN_INTERNAL_RESETN_PDP2_SHIFT 4 +#define ODN_INTERNAL_RESETN_PDP2_SIGNED 0 + +#define ODN_INTERNAL_RESETN_PERIP_MASK 0x00000020U +#define ODN_INTERNAL_RESETN_PERIP_SHIFT 5 +#define ODN_INTERNAL_RESETN_PERIP_SIGNED 0 + +#define ODN_INTERNAL_RESETN_GIST_MASK 0x00000040U +#define ODN_INTERNAL_RESETN_GIST_SHIFT 6 +#define ODN_INTERNAL_RESETN_GIST_SIGNED 0 + +#define ODN_INTERNAL_RESETN_PIKE_MASK 0x00000080U +#define ODN_INTERNAL_RESETN_PIKE_SHIFT 7 +#define ODN_INTERNAL_RESETN_PIKE_SIGNED 0 + +/* + Register EXTERNAL_RESETN +*/ +#define ODN_CORE_EXTERNAL_RESETN 0x0084 +#define ODN_EXTERNAL_RESETN_DUT_MASK 0x00000001U +#define ODN_EXTERNAL_RESETN_DUT_SHIFT 0 +#define ODN_EXTERNAL_RESETN_DUT_SIGNED 0 + +#define ODN_EXTERNAL_RESETN_DUT_SPI_MASK 0x00000002U +#define ODN_EXTERNAL_RESETN_DUT_SPI_SHIFT 1 +#define ODN_EXTERNAL_RESETN_DUT_SPI_SIGNED 0 + +/* + Register EXTERNAL_RESET +*/ +#define ODN_CORE_EXTERNAL_RESET 0x0088 +#define ODN_EXTERNAL_RESET_PVT_CAL_MASK 0x00000001U +#define ODN_EXTERNAL_RESET_PVT_CAL_SHIFT 0 +#define ODN_EXTERNAL_RESET_PVT_CAL_SIGNED 0 + +#define ODN_EXTERNAL_RESET_PLL_MASK 0x00000002U +#define ODN_EXTERNAL_RESET_PLL_SHIFT 1 +#define ODN_EXTERNAL_RESET_PLL_SIGNED 0 + +/* + Register INTERNAL_AUTO_RESETN +*/ +#define ODN_CORE_INTERNAL_AUTO_RESETN 0x008C +#define ODN_INTERNAL_AUTO_RESETN_AUX_MASK 0x00000001U +#define ODN_INTERNAL_AUTO_RESETN_AUX_SHIFT 0 +#define ODN_INTERNAL_AUTO_RESETN_AUX_SIGNED 0 + +/* + Register CLK_GEN_RESET +*/ +#define ODN_CORE_CLK_GEN_RESET 0x0090 +#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U +#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 +#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 + +#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U +#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 +#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 + +#define ODN_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U +#define ODN_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 +#define ODN_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 + +#define ODN_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U +#define ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 +#define ODN_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 + +/* + Register INTERRUPT_STATUS +*/ +#define ODN_CORE_INTERRUPT_STATUS 0x0100 +#define ODN_INTERRUPT_STATUS_DUT_MASK 0x00000001U +#define ODN_INTERRUPT_STATUS_DUT_SHIFT 0 +#define ODN_INTERRUPT_STATUS_DUT_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_PDP1_MASK 0x00000002U +#define ODN_INTERRUPT_STATUS_PDP1_SHIFT 1 +#define ODN_INTERRUPT_STATUS_PDP1_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_PDP2_MASK 0x00000004U +#define ODN_INTERRUPT_STATUS_PDP2_SHIFT 2 +#define ODN_INTERRUPT_STATUS_PDP2_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_PERIP_MASK 0x00000008U +#define ODN_INTERRUPT_STATUS_PERIP_SHIFT 3 +#define ODN_INTERRUPT_STATUS_PERIP_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_UART_MASK 0x00000010U +#define ODN_INTERRUPT_STATUS_UART_SHIFT 4 +#define ODN_INTERRUPT_STATUS_UART_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_MASK 0x00000020U +#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SHIFT 5 +#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_MASK 0x00000040U +#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SHIFT 6 +#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_MASK 0x00000080U +#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SHIFT 7 +#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_MASK 0x00000100U +#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SHIFT 8 +#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U +#define ODN_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 +#define ODN_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 + +#define ODN_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U +#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 +#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 + +/* + Register INTERRUPT_ENABLE +*/ +#define ODN_CORE_INTERRUPT_ENABLE 0x0104 +#define ODN_INTERRUPT_ENABLE_DUT_MASK 0x00000001U +#define ODN_INTERRUPT_ENABLE_DUT_SHIFT 0 +#define ODN_INTERRUPT_ENABLE_DUT_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_PDP1_MASK 0x00000002U +#define ODN_INTERRUPT_ENABLE_PDP1_SHIFT 1 +#define ODN_INTERRUPT_ENABLE_PDP1_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_PDP2_MASK 0x00000004U +#define ODN_INTERRUPT_ENABLE_PDP2_SHIFT 2 +#define ODN_INTERRUPT_ENABLE_PDP2_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_PERIP_MASK 0x00000008U +#define ODN_INTERRUPT_ENABLE_PERIP_SHIFT 3 +#define ODN_INTERRUPT_ENABLE_PERIP_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_UART_MASK 0x00000010U +#define ODN_INTERRUPT_ENABLE_UART_SHIFT 4 +#define ODN_INTERRUPT_ENABLE_UART_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_MASK 0x00000020U +#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SHIFT 5 +#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_MASK 0x00000040U +#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SHIFT 6 +#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_MASK 0x00000080U +#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SHIFT 7 +#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_MASK 0x00000100U +#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SHIFT 8 +#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U +#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 +#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 + +#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U +#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 +#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 + +/* + Register INTERRUPT_CLR +*/ +#define ODN_CORE_INTERRUPT_CLR 0x010C +#define ODN_INTERRUPT_CLR_DUT_MASK 0x00000001U +#define ODN_INTERRUPT_CLR_DUT_SHIFT 0 +#define ODN_INTERRUPT_CLR_DUT_SIGNED 0 + +#define ODN_INTERRUPT_CLR_PDP1_MASK 0x00000002U +#define ODN_INTERRUPT_CLR_PDP1_SHIFT 1 +#define ODN_INTERRUPT_CLR_PDP1_SIGNED 0 + +#define ODN_INTERRUPT_CLR_PDP2_MASK 0x00000004U +#define ODN_INTERRUPT_CLR_PDP2_SHIFT 2 +#define ODN_INTERRUPT_CLR_PDP2_SIGNED 0 + +#define ODN_INTERRUPT_CLR_PERIP_MASK 0x00000008U +#define ODN_INTERRUPT_CLR_PERIP_SHIFT 3 +#define ODN_INTERRUPT_CLR_PERIP_SIGNED 0 + +#define ODN_INTERRUPT_CLR_UART_MASK 0x00000010U +#define ODN_INTERRUPT_CLR_UART_SHIFT 4 +#define ODN_INTERRUPT_CLR_UART_SIGNED 0 + +#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_MASK 0x00000020U +#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SHIFT 5 +#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SIGNED 0 + +#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_MASK 0x00000040U +#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SHIFT 6 +#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SIGNED 0 + +#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_MASK 0x00000080U +#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SHIFT 7 +#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SIGNED 0 + +#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_MASK 0x00000100U +#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SHIFT 8 +#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SIGNED 0 + +#define ODN_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U +#define ODN_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 +#define ODN_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 + +#define ODN_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U +#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 +#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 + +/* + Register INTERRUPT_TEST +*/ +#define ODN_CORE_INTERRUPT_TEST 0x0110 +#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U +#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 +#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 + +/* + Register SYSTEM_ID +*/ +#define ODN_CORE_SYSTEM_ID 0x0120 +#define ODN_SYSTEM_ID_ID_MASK 0x0000FFFFU +#define ODN_SYSTEM_ID_ID_SHIFT 0 +#define ODN_SYSTEM_ID_ID_SIGNED 0 + +/* + Register NUM_GPIO +*/ +#define ODN_CORE_NUM_GPIO 0x0180 +#define ODN_NUM_GPIO_NUMBER_MASK 0x0000000FU +#define ODN_NUM_GPIO_NUMBER_SHIFT 0 +#define ODN_NUM_GPIO_NUMBER_SIGNED 0 + +/* + Register GPIO_EN +*/ +#define ODN_CORE_GPIO_EN 0x0184 +#define ODN_GPIO_EN_DIRECTION_MASK 0x000000FFU +#define ODN_GPIO_EN_DIRECTION_SHIFT 0 +#define ODN_GPIO_EN_DIRECTION_SIGNED 0 + +/* + Register GPIO +*/ +#define ODN_CORE_GPIO 0x0188 +#define ODN_GPIO_GPIO_MASK 0x000000FFU +#define ODN_GPIO_GPIO_SHIFT 0 +#define ODN_GPIO_GPIO_SIGNED 0 + +/* + Register NUM_DUT_CTRL +*/ +#define ODN_CORE_NUM_DUT_CTRL 0x0190 +#define ODN_NUM_DUT_CTRL_NUM_PINS_MASK 0xFFFFFFFFU +#define ODN_NUM_DUT_CTRL_NUM_PINS_SHIFT 0 +#define ODN_NUM_DUT_CTRL_NUM_PINS_SIGNED 0 + +/* + Register DUT_CTRL1 +*/ +#define ODN_CORE_DUT_CTRL1 0x0194 +#define ODN_DUT_CTRL1_CONTROL1_MASK 0x3FFFFFFFU +#define ODN_DUT_CTRL1_CONTROL1_SHIFT 0 +#define ODN_DUT_CTRL1_CONTROL1_SIGNED 0 + +#define ODN_DUT_CTRL1_FBDC_BYPASS_MASK 0x40000000U +#define ODN_DUT_CTRL1_FBDC_BYPASS_SHIFT 30 +#define ODN_DUT_CTRL1_FBDC_BYPASS_SIGNED 0 + +#define ODN_DUT_CTRL1_DUT_MST_OFFSET_MASK 0x80000000U +#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SHIFT 31 +#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SIGNED 0 + +/* + Register DUT_CTRL2 +*/ +#define ODN_CORE_DUT_CTRL2 0x0198 +#define ODN_DUT_CTRL2_CONTROL2_MASK 0xFFFFFFFFU +#define ODN_DUT_CTRL2_CONTROL2_SHIFT 0 +#define ODN_DUT_CTRL2_CONTROL2_SIGNED 0 + +/* + Register NUM_DUT_STAT +*/ +#define ODN_CORE_NUM_DUT_STAT 0x019C +#define ODN_NUM_DUT_STAT_NUM_PINS_MASK 0xFFFFFFFFU +#define ODN_NUM_DUT_STAT_NUM_PINS_SHIFT 0 +#define ODN_NUM_DUT_STAT_NUM_PINS_SIGNED 0 + +/* + Register DUT_STAT1 +*/ +#define ODN_CORE_DUT_STAT1 0x01A0 +#define ODN_DUT_STAT1_STATUS1_MASK 0xFFFFFFFFU +#define ODN_DUT_STAT1_STATUS1_SHIFT 0 +#define ODN_DUT_STAT1_STATUS1_SIGNED 0 + +/* + Register DUT_STAT2 +*/ +#define ODN_CORE_DUT_STAT2 0x01A4 +#define ODN_DUT_STAT2_STATUS2_MASK 0xFFFFFFFFU +#define ODN_DUT_STAT2_STATUS2_SHIFT 0 +#define ODN_DUT_STAT2_STATUS2_SIGNED 0 + +/* + Register DASH_LEDS +*/ +#define ODN_CORE_DASH_LEDS 0x01A8 +#define ODN_DASH_LEDS_REPA_MASK 0xFFF00000U +#define ODN_DASH_LEDS_REPA_SHIFT 20 +#define ODN_DASH_LEDS_REPA_SIGNED 0 + +#define ODN_DASH_LEDS_PIKE_MASK 0x00000FFFU +#define ODN_DASH_LEDS_PIKE_SHIFT 0 +#define ODN_DASH_LEDS_PIKE_SIGNED 0 + +/* + Register DUT_CLK_INFO +*/ +#define ODN_CORE_DUT_CLK_INFO 0x01B0 +#define ODN_DUT_CLK_INFO_CORE_MASK 0x0000FFFFU +#define ODN_DUT_CLK_INFO_CORE_SHIFT 0 +#define ODN_DUT_CLK_INFO_CORE_SIGNED 0 + +#define ODN_DUT_CLK_INFO_MEM_MASK 0xFFFF0000U +#define ODN_DUT_CLK_INFO_MEM_SHIFT 16 +#define ODN_DUT_CLK_INFO_MEM_SIGNED 0 + +/* + Register DUT_CLK_PHSE +*/ +#define ODN_CORE_DUT_CLK_PHSE 0x01B4 +#define ODN_DUT_CLK_PHSE_MEM_REQ_MASK 0x0000FFFFU +#define ODN_DUT_CLK_PHSE_MEM_REQ_SHIFT 0 +#define ODN_DUT_CLK_PHSE_MEM_REQ_SIGNED 0 + +#define ODN_DUT_CLK_PHSE_MEM_RD_MASK 0xFFFF0000U +#define ODN_DUT_CLK_PHSE_MEM_RD_SHIFT 16 +#define ODN_DUT_CLK_PHSE_MEM_RD_SIGNED 0 + +/* + Register CORE_STATUS +*/ +#define ODN_CORE_CORE_STATUS 0x0200 +#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U +#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SHIFT 0 +#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SIGNED 0 + +#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_MASK 0x00000010U +#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SHIFT 4 +#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SIGNED 0 + +#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_MASK 0x00000020U +#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SHIFT 5 +#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SIGNED 0 + +#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_MASK 0x00000040U +#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SHIFT 6 +#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SIGNED 0 + +#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_MASK 0x00000080U +#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SHIFT 7 +#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SIGNED 0 + +#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_MASK 0x00000100U +#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SHIFT 8 +#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SIGNED 0 + +#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_MASK 0x00000200U +#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SHIFT 9 +#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SIGNED 0 + +#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00001000U +#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 12 +#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 + +#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00002000U +#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 13 +#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 + +/* + Register CORE_CONTROL +*/ +#define ODN_CORE_CORE_CONTROL 0x0204 +#define ODN_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU +#define ODN_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 +#define ODN_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 + +#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U +#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 +#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 + +#define ODN_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U +#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 +#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 + +#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_MASK 0x00002000U +#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT 13 +#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SIGNED 0 + +#define ODN_CORE_CONTROL_PDP1_OFFSET_MASK 0x00070000U +#define ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT 16 +#define ODN_CORE_CONTROL_PDP1_OFFSET_SIGNED 0 + +#define ODN_CORE_CONTROL_PDP2_OFFSET_MASK 0x00700000U +#define ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT 20 +#define ODN_CORE_CONTROL_PDP2_OFFSET_SIGNED 0 + +#define ODN_CORE_CONTROL_DUT_OFFSET_MASK 0x07000000U +#define ODN_CORE_CONTROL_DUT_OFFSET_SHIFT 24 +#define ODN_CORE_CONTROL_DUT_OFFSET_SIGNED 0 + +/* + Register REG_BANK_STATUS +*/ +#define ODN_CORE_REG_BANK_STATUS 0x0208 +#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU +#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 +#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 + +/* + Register MMCM_LOCK_STATUS +*/ +#define ODN_CORE_MMCM_LOCK_STATUS 0x020C +#define ODN_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U +#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 +#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 + +#define ODN_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U +#define ODN_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 +#define ODN_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 + +#define ODN_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U +#define ODN_MMCM_LOCK_STATUS_MULTI_SHIFT 2 +#define ODN_MMCM_LOCK_STATUS_MULTI_SIGNED 0 + +#define ODN_MMCM_LOCK_STATUS_PDPP_MASK 0x00000008U +#define ODN_MMCM_LOCK_STATUS_PDPP_SHIFT 3 +#define ODN_MMCM_LOCK_STATUS_PDPP_SIGNED 0 + +/* + Register GIST_STATUS +*/ +#define ODN_CORE_GIST_STATUS 0x0210 +#define ODN_GIST_STATUS_MST_MASK 0x000001FFU +#define ODN_GIST_STATUS_MST_SHIFT 0 +#define ODN_GIST_STATUS_MST_SIGNED 0 + +#define ODN_GIST_STATUS_SLV_MASK 0x001FF000U +#define ODN_GIST_STATUS_SLV_SHIFT 12 +#define ODN_GIST_STATUS_SLV_SIGNED 0 + +#define ODN_GIST_STATUS_SLV_OUT_MASK 0x03000000U +#define ODN_GIST_STATUS_SLV_OUT_SHIFT 24 +#define ODN_GIST_STATUS_SLV_OUT_SIGNED 0 + +#define ODN_GIST_STATUS_MST_OUT_MASK 0x70000000U +#define ODN_GIST_STATUS_MST_OUT_SHIFT 28 +#define ODN_GIST_STATUS_MST_OUT_SIGNED 0 + +/* + Register DUT_MST_ADD +*/ +#define ODN_CORE_DUT_MST_ADD 0x0214 +#define ODN_DUT_MST_ADD_SLV_OUT_MASK 0x0000003FU +#define ODN_DUT_MST_ADD_SLV_OUT_SHIFT 0 +#define ODN_DUT_MST_ADD_SLV_OUT_SIGNED 0 + + +/**************************** + Generated from: ad_tx.def +*****************************/ + +/* + Register ADT_CONTROL +*/ +#define ODN_AD_TX_DEBUG_ADT_CONTROL 0x0000 +#define ODN_SET_ADTX_READY_MASK 0x00000004U +#define ODN_SET_ADTX_READY_SHIFT 2 +#define ODN_SET_ADTX_READY_SIGNED 0 + +#define ODN_SEND_ALIGN_DATA_MASK 0x00000002U +#define ODN_SEND_ALIGN_DATA_SHIFT 1 +#define ODN_SEND_ALIGN_DATA_SIGNED 0 + +#define ODN_ENABLE_FLUSHING_MASK 0x00000001U +#define ODN_ENABLE_FLUSHING_SHIFT 0 +#define ODN_ENABLE_FLUSHING_SIGNED 0 + +/* + Register ADT_STATUS +*/ +#define ODN_AD_TX_DEBUG_ADT_STATUS 0x0004 +#define ODN_REQUEST_COMPLETE_MASK 0x00000001U +#define ODN_REQUEST_COMPLETE_SHIFT 0 +#define ODN_REQUEST_COMPLETE_SIGNED 0 + + +/****************************** + Generated from: mca_debug.def +*******************************/ + +/* + Register MCA_CONTROL +*/ +#define ODN_MCA_DEBUG_MCA_CONTROL 0x0000 +#define ODN_ALIGN_START_MASK 0x00000001U +#define ODN_ALIGN_START_SHIFT 0 +#define ODN_ALIGN_START_SIGNED 0 + +/* + Register MCA_STATUS +*/ +#define ODN_MCA_DEBUG_MCA_STATUS 0x0004 +#define ODN_TCHECK_SDEBUG_MASK 0x40000000U +#define ODN_TCHECK_SDEBUG_SHIFT 30 +#define ODN_TCHECK_SDEBUG_SIGNED 0 + +#define ODN_CHECK_SDEBUG_MASK 0x20000000U +#define ODN_CHECK_SDEBUG_SHIFT 29 +#define ODN_CHECK_SDEBUG_SIGNED 0 + +#define ODN_ALIGN_SDEBUG_MASK 0x10000000U +#define ODN_ALIGN_SDEBUG_SHIFT 28 +#define ODN_ALIGN_SDEBUG_SIGNED 0 + +#define ODN_FWAIT_SDEBUG_MASK 0x08000000U +#define ODN_FWAIT_SDEBUG_SHIFT 27 +#define ODN_FWAIT_SDEBUG_SIGNED 0 + +#define ODN_IDLE_SDEBUG_MASK 0x04000000U +#define ODN_IDLE_SDEBUG_SHIFT 26 +#define ODN_IDLE_SDEBUG_SIGNED 0 + +#define ODN_FIFO_FULL_MASK 0x03FF0000U +#define ODN_FIFO_FULL_SHIFT 16 +#define ODN_FIFO_FULL_SIGNED 0 + +#define ODN_FIFO_EMPTY_MASK 0x0000FFC0U +#define ODN_FIFO_EMPTY_SHIFT 6 +#define ODN_FIFO_EMPTY_SIGNED 0 + +#define ODN_TAG_CHECK_ERROR_MASK 0x00000020U +#define ODN_TAG_CHECK_ERROR_SHIFT 5 +#define ODN_TAG_CHECK_ERROR_SIGNED 0 + +#define ODN_ALIGN_CHECK_ERROR_MASK 0x00000010U +#define ODN_ALIGN_CHECK_ERROR_SHIFT 4 +#define ODN_ALIGN_CHECK_ERROR_SIGNED 0 + +#define ODN_ALIGN_ERROR_MASK 0x00000008U +#define ODN_ALIGN_ERROR_SHIFT 3 +#define ODN_ALIGN_ERROR_SIGNED 0 + +#define ODN_TAG_CHECKING_OK_MASK 0x00000004U +#define ODN_TAG_CHECKING_OK_SHIFT 2 +#define ODN_TAG_CHECKING_OK_SIGNED 0 + +#define ODN_ALIGN_CHECK_OK_MASK 0x00000002U +#define ODN_ALIGN_CHECK_OK_SHIFT 1 +#define ODN_ALIGN_CHECK_OK_SIGNED 0 + +#define ODN_ALIGNMENT_FOUND_MASK 0x00000001U +#define ODN_ALIGNMENT_FOUND_SHIFT 0 +#define ODN_ALIGNMENT_FOUND_SIGNED 0 + + +/********************************* + Generated from: sai_rx_debug.def +**********************************/ + +/* + Register SIG_RESULT +*/ +#define ODN_SAI_RX_DEBUG_SIG_RESULT 0x0000 +#define ODN_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU +#define ODN_SIG_RESULT_VALUE_SHIFT 0 +#define ODN_SIG_RESULT_VALUE_SIGNED 0 + +/* + Register INIT_SIG +*/ +#define ODN_SAI_RX_DEBUG_INIT_SIG 0x0004 +#define ODN_INIT_SIG_VALUE_MASK 0x00000001U +#define ODN_INIT_SIG_VALUE_SHIFT 0 +#define ODN_INIT_SIG_VALUE_SIGNED 0 + +/* + Register SAI_BYPASS +*/ +#define ODN_SAI_RX_DEBUG_SAI_BYPASS 0x0008 +#define ODN_BYPASS_CLK_TAPS_VALUE_MASK 0x000003FFU +#define ODN_BYPASS_CLK_TAPS_VALUE_SHIFT 0 +#define ODN_BYPASS_CLK_TAPS_VALUE_SIGNED 0 + +#define ODN_BYPASS_SET_MASK 0x00010000U +#define ODN_BYPASS_SET_SHIFT 16 +#define ODN_BYPASS_SET_SIGNED 0 + +#define ODN_BYPASS_EN_MASK 0x00100000U +#define ODN_BYPASS_EN_SHIFT 20 +#define ODN_BYPASS_EN_SIGNED 0 + +#define ODN_EN_STATUS_MASK 0x01000000U +#define ODN_EN_STATUS_SHIFT 24 +#define ODN_EN_STATUS_SIGNED 0 + +/* + Register SAI_CLK_TAPS +*/ +#define ODN_SAI_RX_DEBUG_SAI_CLK_TAPS 0x000C +#define ODN_CLK_TAPS_VALUE_MASK 0x000003FFU +#define ODN_CLK_TAPS_VALUE_SHIFT 0 +#define ODN_CLK_TAPS_VALUE_SIGNED 0 + +#define ODN_TRAINING_COMPLETE_MASK 0x00010000U +#define ODN_TRAINING_COMPLETE_SHIFT 16 +#define ODN_TRAINING_COMPLETE_SIGNED 0 + +/* + Register SAI_EYES +*/ +#define ODN_SAI_RX_DEBUG_SAI_EYES 0x0010 +#define ODN_MIN_EYE_END_MASK 0x0000FFFFU +#define ODN_MIN_EYE_END_SHIFT 0 +#define ODN_MIN_EYE_END_SIGNED 0 + +#define ODN_MAX_EYE_START_MASK 0xFFFF0000U +#define ODN_MAX_EYE_START_SHIFT 16 +#define ODN_MAX_EYE_START_SIGNED 0 + +/* + Register SAI_DDR_INVERT +*/ +#define ODN_SAI_RX_DEBUG_SAI_DDR_INVERT 0x0014 +#define ODN_DDR_INVERT_MASK 0x00000001U +#define ODN_DDR_INVERT_SHIFT 0 +#define ODN_DDR_INVERT_SIGNED 0 + +#define ODN_OVERIDE_VALUE_MASK 0x00010000U +#define ODN_OVERIDE_VALUE_SHIFT 16 +#define ODN_OVERIDE_VALUE_SIGNED 0 + +#define ODN_INVERT_OVERIDE_MASK 0x00100000U +#define ODN_INVERT_OVERIDE_SHIFT 20 +#define ODN_INVERT_OVERIDE_SIGNED 0 + +/* + Register SAI_TRAIN_ACK +*/ +#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK 0x0018 +#define ODN_TRAIN_ACK_FAIL_MASK 0x00000001U +#define ODN_TRAIN_ACK_FAIL_SHIFT 0 +#define ODN_TRAIN_ACK_FAIL_SIGNED 0 + +#define ODN_TRAIN_ACK_FAIL_COUNT_MASK 0x000000F0U +#define ODN_TRAIN_ACK_FAIL_COUNT_SHIFT 4 +#define ODN_TRAIN_ACK_FAIL_COUNT_SIGNED 0 + +#define ODN_TRAIN_ACK_COMPLETE_MASK 0x00000100U +#define ODN_TRAIN_ACK_COMPLETE_SHIFT 8 +#define ODN_TRAIN_ACK_COMPLETE_SIGNED 0 + +#define ODN_TRAIN_ACK_OVERIDE_MASK 0x00001000U +#define ODN_TRAIN_ACK_OVERIDE_SHIFT 12 +#define ODN_TRAIN_ACK_OVERIDE_SIGNED 0 + +/* + Register SAI_TRAIN_ACK_COUNT +*/ +#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK_COUNT 0x001C +#define ODN_TRAIN_COUNT_MASK 0xFFFFFFFFU +#define ODN_TRAIN_COUNT_SHIFT 0 +#define ODN_TRAIN_COUNT_SIGNED 0 + +/* + Register SAI_CHANNEL_NUMBER +*/ +#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_NUMBER 0x0020 +#define ODN_CHANNEL_NUMBER_MASK 0x0000FFFFU +#define ODN_CHANNEL_NUMBER_SHIFT 0 +#define ODN_CHANNEL_NUMBER_SIGNED 0 + +/* + Register SAI_CHANNEL_EYE_START +*/ +#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_START 0x0024 +#define ODN_CHANNEL_EYE_START_MASK 0xFFFFFFFFU +#define ODN_CHANNEL_EYE_START_SHIFT 0 +#define ODN_CHANNEL_EYE_START_SIGNED 0 + +/* + Register SAI_CHANNEL_EYE_END +*/ +#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_END 0x0028 +#define ODN_CHANNEL_EYE_END_MASK 0xFFFFFFFFU +#define ODN_CHANNEL_EYE_END_SHIFT 0 +#define ODN_CHANNEL_EYE_END_SIGNED 0 + +/* + Register SAI_CHANNEL_EYE_PATTERN +*/ +#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_PATTERN 0x002C +#define ODN_CHANNEL_EYE_PATTERN_MASK 0xFFFFFFFFU +#define ODN_CHANNEL_EYE_PATTERN_SHIFT 0 +#define ODN_CHANNEL_EYE_PATTERN_SIGNED 0 + +/* + Register SAI_CHANNEL_EYE_DEBUG +*/ +#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_DEBUG 0x0030 +#define ODN_CHANNEL_EYE_SENSE_MASK 0x00000001U +#define ODN_CHANNEL_EYE_SENSE_SHIFT 0 +#define ODN_CHANNEL_EYE_SENSE_SIGNED 0 + +#define ODN_CHANNEL_EYE_COMPLETE_MASK 0x00000002U +#define ODN_CHANNEL_EYE_COMPLETE_SHIFT 1 +#define ODN_CHANNEL_EYE_COMPLETE_SIGNED 0 + + +/********************************* + Generated from: sai_tx_debug.def +**********************************/ + +/* + Register SIG_RESULT +*/ +#define ODN_SAI_TX_DEBUG_SIG_RESULT 0x0000 +#define ODN_TX_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU +#define ODN_TX_SIG_RESULT_VALUE_SHIFT 0 +#define ODN_TX_SIG_RESULT_VALUE_SIGNED 0 + +/* + Register INIT_SIG +*/ +#define ODN_SAI_TX_DEBUG_INIT_SIG 0x0004 +#define ODN_TX_INIT_SIG_VALUE_MASK 0x00000001U +#define ODN_TX_INIT_SIG_VALUE_SHIFT 0 +#define ODN_TX_INIT_SIG_VALUE_SIGNED 0 + +/* + Register SAI_BYPASS +*/ +#define ODN_SAI_TX_DEBUG_SAI_BYPASS 0x0008 +#define ODN_TX_BYPASS_EN_MASK 0x00000001U +#define ODN_TX_BYPASS_EN_SHIFT 0 +#define ODN_TX_BYPASS_EN_SIGNED 0 + +#define ODN_TX_ACK_RESEND_MASK 0x00000002U +#define ODN_TX_ACK_RESEND_SHIFT 1 +#define ODN_TX_ACK_RESEND_SIGNED 0 + +#define ODN_TX_DISABLE_ACK_SEND_MASK 0x00000004U +#define ODN_TX_DISABLE_ACK_SEND_SHIFT 2 +#define ODN_TX_DISABLE_ACK_SEND_SIGNED 0 + +/* + Register SAI_STATUS +*/ +#define ODN_SAI_TX_DEBUG_SAI_STATUS 0x000C +#define ODN_TX_TRAINING_COMPLETE_MASK 0x00000001U +#define ODN_TX_TRAINING_COMPLETE_SHIFT 0 +#define ODN_TX_TRAINING_COMPLETE_SIGNED 0 + +#define ODN_TX_TRAINING_ACK_COMPLETE_MASK 0x00000002U +#define ODN_TX_TRAINING_ACK_COMPLETE_SHIFT 1 +#define ODN_TX_TRAINING_ACK_COMPLETE_SIGNED 0 + + + +#endif /* _ODIN_REGS_H_ */ + +/***************************************************************************** + End of file (odin_regs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_defs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_defs.h new file mode 100644 index 000000000000..1691151de58d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_defs.h @@ -0,0 +1,183 @@ +/**************************************************************************** +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Orion Memory Map - View from PCIe +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +****************************************************************************/ + +#ifndef _ORION_DEFS_H_ +#define _ORION_DEFS_H_ + +/* + * These defines have not been autogenerated + * Only values different from Odin will be included here + */ + +#define DEVICE_ID_ORION 0x1020 + +/* Odin system register banks */ +#define SRS_REG_BANK_ODN_CLK_BLK 0x02000 + +/* + * Orion CLK regs - the srs_clk_blk module defs are not auto generated + */ +#define SRS_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 +#define SRS_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU +#define SRS_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 +#define SRS_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U +#define SRS_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 + +#define SRS_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 +#define SRS_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U +#define SRS_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 +#define SRS_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U +#define SRS_PDP_PCLK_ODIV2_EDGE_SHIFT 7 +#define SRS_PDP_PCLK_ODIV2_FRAC_MASK 0x00007C00U +#define SRS_PDP_PCLK_ODIV2_FRAC_SHIFT 10 + +#define SRS_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C + +#define SRS_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 +#define SRS_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU +#define SRS_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 +#define SRS_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U +#define SRS_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 + +#define SRS_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C +#define SRS_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U +#define SRS_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 +#define SRS_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U +#define SRS_PDP_MCLK_ODIV2_EDGE_SHIFT 7 + +#define SRS_PDP_P_CLK_MULTIPLIER_REG1 0x650 +#define SRS_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU +#define SRS_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 +#define SRS_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U +#define SRS_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 + +#define SRS_PDP_P_CLK_MULTIPLIER_REG2 0x654 +#define SRS_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U +#define SRS_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 +#define SRS_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U +#define SRS_PDP_PCLK_MUL2_EDGE_SHIFT 7 +#define SRS_PDP_PCLK_MUL2_FRAC_MASK 0x00007C00U +#define SRS_PDP_PCLK_MUL2_FRAC_SHIFT 10 + +#define SRS_PDP_P_CLK_MULTIPLIER_REG3 0x64C + +#define SRS_PDP_P_CLK_IN_DIVIDER_REG 0x658 +#define SRS_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU +#define SRS_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 +#define SRS_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U +#define SRS_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 +#define SRS_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U +#define SRS_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 +#define SRS_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U +#define SRS_PDP_PCLK_IDIV_EDGE_SHIFT 13 + +/* + * DUT core clock input divider, DUT reference clock input divider + */ +#define SRS_DUT_CORE_CLK_OUT_DIVIDER1 0x0020 +#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U +#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 +#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU +#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 + +#define SRS_DUT_CORE_CLK_OUT_DIVIDER2 0x0024 +#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U +#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 +#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U +#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 + +#define SRS_DUT_REF_CLK_OUT_DIVIDER1 0x0028 +#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U +#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 +#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU +#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 + +#define SRS_DUT_REF_CLK_OUT_DIVIDER2 0x002C +#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U +#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 +#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U +#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 + +/* + * DUT interface reference clock input divider + */ + +#define SRS_DUT_MEM_CLK_OUT_DIVIDER1 0x0228 +#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U +#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 +#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU +#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 + +#define SRS_DUT_MEM_CLK_OUT_DIVIDER2 0x022C +#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U +#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 +#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U +#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 + +/* + * Min max values from Xilinx Virtex Ultrascale data sheet DS893, + * for speed grade 1. All in Hz. + */ +#define SRS_INPUT_CLOCK_SPEED 100000000U +#define SRS_INPUT_CLOCK_SPEED_MIN 10000000U +#define SRS_INPUT_CLOCK_SPEED_MAX 800000000U +#define SRS_OUTPUT_CLOCK_SPEED_MIN 4690000U +#define SRS_OUTPUT_CLOCK_SPEED_MAX 630000000U +#define SRS_VCO_MIN 600000000U +#define SRS_VCO_MAX 1200000000U +#define SRS_PFD_MIN 10000000U +#define SRS_PFD_MAX 450000000U + +/* + * Orion interrupt flags + */ +#define SRS_INTERRUPT_ENABLE_PDP1 (1 << SRS_INTERRUPT_ENABLE_PDP_SHIFT) +#define SRS_INTERRUPT_ENABLE_DUT (1 << SRS_INTERRUPT_ENABLE_DUT_SHIFT) +#define SRS_INTERRUPT_STATUS_PDP1 (1 << SRS_INTERRUPT_STATUS_PDP_SHIFT) +#define SRS_INTERRUPT_STATUS_DUT (1 << SRS_INTERRUPT_STATUS_DUT_SHIFT) +#define SRS_INTERRUPT_CLEAR_PDP1 (1 << SRS_INTERRUPT_CLR_PDP_SHIFT) +#define SRS_INTERRUPT_CLEAR_DUT (1 << SRS_INTERRUPT_CLR_DUT_SHIFT) + +#endif /* _ORION_DEFS_H_ */ + +/***************************************************************************** + End of file (orion_defs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_regs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_regs.h new file mode 100644 index 000000000000..fcbc43006641 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/orion_regs.h @@ -0,0 +1,439 @@ +/**************************************************************************** +@Title Orion system control register definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +@Description Orion FPGA register defs for Sirius RTL +@Author Autogenerated +****************************************************************************/ + +#ifndef _OUT_DRV_H_ +#define _OUT_DRV_H_ + +/* + Register ID +*/ +#define SRS_CORE_ID 0x0000 +#define SRS_ID_VARIANT_MASK 0x0000FFFFU +#define SRS_ID_VARIANT_SHIFT 0 +#define SRS_ID_VARIANT_SIGNED 0 + +#define SRS_ID_ID_MASK 0xFFFF0000U +#define SRS_ID_ID_SHIFT 16 +#define SRS_ID_ID_SIGNED 0 + +/* + Register REVISION +*/ +#define SRS_CORE_REVISION 0x0004 +#define SRS_REVISION_MINOR_MASK 0x000000FFU +#define SRS_REVISION_MINOR_SHIFT 0 +#define SRS_REVISION_MINOR_SIGNED 0 + +#define SRS_REVISION_MAJOR_MASK 0x00000F00U +#define SRS_REVISION_MAJOR_SHIFT 8 +#define SRS_REVISION_MAJOR_SIGNED 0 + +/* + Register CHANGE_SET +*/ +#define SRS_CORE_CHANGE_SET 0x0008 +#define SRS_CHANGE_SET_SET_MASK 0xFFFFFFFFU +#define SRS_CHANGE_SET_SET_SHIFT 0 +#define SRS_CHANGE_SET_SET_SIGNED 0 + +/* + Register USER_ID +*/ +#define SRS_CORE_USER_ID 0x000C +#define SRS_USER_ID_ID_MASK 0x0000000FU +#define SRS_USER_ID_ID_SHIFT 0 +#define SRS_USER_ID_ID_SIGNED 0 + +/* + Register USER_BUILD +*/ +#define SRS_CORE_USER_BUILD 0x0010 +#define SRS_USER_BUILD_BUILD_MASK 0xFFFFFFFFU +#define SRS_USER_BUILD_BUILD_SHIFT 0 +#define SRS_USER_BUILD_BUILD_SIGNED 0 + +/* + Register SOFT_RESETN +*/ +#define SRS_CORE_SOFT_RESETN 0x0080 +#define SRS_SOFT_RESETN_DDR_MASK 0x00000001U +#define SRS_SOFT_RESETN_DDR_SHIFT 0 +#define SRS_SOFT_RESETN_DDR_SIGNED 0 + +#define SRS_SOFT_RESETN_USB_MASK 0x00000002U +#define SRS_SOFT_RESETN_USB_SHIFT 1 +#define SRS_SOFT_RESETN_USB_SIGNED 0 + +#define SRS_SOFT_RESETN_PDP_MASK 0x00000004U +#define SRS_SOFT_RESETN_PDP_SHIFT 2 +#define SRS_SOFT_RESETN_PDP_SIGNED 0 + +#define SRS_SOFT_RESETN_GIST_MASK 0x00000008U +#define SRS_SOFT_RESETN_GIST_SHIFT 3 +#define SRS_SOFT_RESETN_GIST_SIGNED 0 + +/* + Register DUT_SOFT_RESETN +*/ +#define SRS_CORE_DUT_SOFT_RESETN 0x0084 +#define SRS_DUT_SOFT_RESETN_EXTERNAL_MASK 0x00000001U +#define SRS_DUT_SOFT_RESETN_EXTERNAL_SHIFT 0 +#define SRS_DUT_SOFT_RESETN_EXTERNAL_SIGNED 0 + +/* + Register SOFT_AUTO_RESETN +*/ +#define SRS_CORE_SOFT_AUTO_RESETN 0x0088 +#define SRS_SOFT_AUTO_RESETN_CFG_MASK 0x00000001U +#define SRS_SOFT_AUTO_RESETN_CFG_SHIFT 0 +#define SRS_SOFT_AUTO_RESETN_CFG_SIGNED 0 + +/* + Register CLK_GEN_RESET +*/ +#define SRS_CORE_CLK_GEN_RESET 0x0090 +#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U +#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 +#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 + +#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U +#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 +#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 + +#define SRS_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U +#define SRS_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 +#define SRS_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 + +#define SRS_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U +#define SRS_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 +#define SRS_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 + +/* + Register DUT_MEM +*/ +#define SRS_CORE_DUT_MEM 0x0120 +#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_MASK 0x0000FFFFU +#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SHIFT 0 +#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SIGNED 0 + +#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_MASK 0xFFFF0000U +#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SHIFT 16 +#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SIGNED 0 + +/* + Register APM +*/ +#define SRS_CORE_APM 0x0150 +#define SRS_APM_RESET_EVENT_MASK 0x00000001U +#define SRS_APM_RESET_EVENT_SHIFT 0 +#define SRS_APM_RESET_EVENT_SIGNED 0 + +#define SRS_APM_CAPTURE_EVENT_MASK 0x00000002U +#define SRS_APM_CAPTURE_EVENT_SHIFT 1 +#define SRS_APM_CAPTURE_EVENT_SIGNED 0 + +/* + Register NUM_GPIO +*/ +#define SRS_CORE_NUM_GPIO 0x0180 +#define SRS_NUM_GPIO_NUMBER_MASK 0x0000000FU +#define SRS_NUM_GPIO_NUMBER_SHIFT 0 +#define SRS_NUM_GPIO_NUMBER_SIGNED 0 + +/* + Register GPIO_EN +*/ +#define SRS_CORE_GPIO_EN 0x0184 +#define SRS_GPIO_EN_DIRECTION_MASK 0x000000FFU +#define SRS_GPIO_EN_DIRECTION_SHIFT 0 +#define SRS_GPIO_EN_DIRECTION_SIGNED 0 + +/* + Register GPIO +*/ +#define SRS_CORE_GPIO 0x0188 +#define SRS_GPIO_GPIO_MASK 0x000000FFU +#define SRS_GPIO_GPIO_SHIFT 0 +#define SRS_GPIO_GPIO_SIGNED 0 + +/* + Register SPI_MASTER_IFACE +*/ +#define SRS_CORE_SPI_MASTER_IFACE 0x018C +#define SRS_SPI_MASTER_IFACE_ENABLE_MASK 0x00000001U +#define SRS_SPI_MASTER_IFACE_ENABLE_SHIFT 0 +#define SRS_SPI_MASTER_IFACE_ENABLE_SIGNED 0 + +/* + Register SRS_IP_STATUS +*/ +#define SRS_CORE_SRS_IP_STATUS 0x0200 +#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U +#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SHIFT 0 +#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SIGNED 0 + +#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_MASK 0x00000002U +#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SHIFT 1 +#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SIGNED 0 + +#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00000004U +#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 2 +#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 + +#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00000008U +#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 3 +#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 + +/* + Register CORE_CONTROL +*/ +#define SRS_CORE_CORE_CONTROL 0x0204 +#define SRS_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU +#define SRS_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 +#define SRS_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 + +#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U +#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 +#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 + +#define SRS_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U +#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 +#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 + +/* + Register REG_BANK_STATUS +*/ +#define SRS_CORE_REG_BANK_STATUS 0x0208 +#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU +#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 +#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 + +/* + Register MMCM_LOCK_STATUS +*/ +#define SRS_CORE_MMCM_LOCK_STATUS 0x020C +#define SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U +#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 +#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 + +#define SRS_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U +#define SRS_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 +#define SRS_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 + +#define SRS_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U +#define SRS_MMCM_LOCK_STATUS_MULTI_SHIFT 2 +#define SRS_MMCM_LOCK_STATUS_MULTI_SIGNED 0 + +#define SRS_MMCM_LOCK_STATUS_PDP_MASK 0x00000008U +#define SRS_MMCM_LOCK_STATUS_PDP_SHIFT 3 +#define SRS_MMCM_LOCK_STATUS_PDP_SIGNED 0 + +/* + Register GIST_STATUS +*/ +#define SRS_CORE_GIST_STATUS 0x0210 +#define SRS_GIST_STATUS_MST_MASK 0x000001FFU +#define SRS_GIST_STATUS_MST_SHIFT 0 +#define SRS_GIST_STATUS_MST_SIGNED 0 + +#define SRS_GIST_STATUS_SLV_MASK 0x001FF000U +#define SRS_GIST_STATUS_SLV_SHIFT 12 +#define SRS_GIST_STATUS_SLV_SIGNED 0 + +#define SRS_GIST_STATUS_SLV_OUT_MASK 0x03000000U +#define SRS_GIST_STATUS_SLV_OUT_SHIFT 24 +#define SRS_GIST_STATUS_SLV_OUT_SIGNED 0 + +#define SRS_GIST_STATUS_MST_OUT_MASK 0x70000000U +#define SRS_GIST_STATUS_MST_OUT_SHIFT 28 +#define SRS_GIST_STATUS_MST_OUT_SIGNED 0 + +/* + Register SENSOR_BOARD +*/ +#define SRS_CORE_SENSOR_BOARD 0x0214 +#define SRS_SENSOR_BOARD_ID_MASK 0x00000003U +#define SRS_SENSOR_BOARD_ID_SHIFT 0 +#define SRS_SENSOR_BOARD_ID_SIGNED 0 + +/* + Register INTERRUPT_STATUS +*/ +#define SRS_CORE_INTERRUPT_STATUS 0x0218 +#define SRS_INTERRUPT_STATUS_DUT_MASK 0x00000001U +#define SRS_INTERRUPT_STATUS_DUT_SHIFT 0 +#define SRS_INTERRUPT_STATUS_DUT_SIGNED 0 + +#define SRS_INTERRUPT_STATUS_PDP_MASK 0x00000002U +#define SRS_INTERRUPT_STATUS_PDP_SHIFT 1 +#define SRS_INTERRUPT_STATUS_PDP_SIGNED 0 + +#define SRS_INTERRUPT_STATUS_I2C_MASK 0x00000004U +#define SRS_INTERRUPT_STATUS_I2C_SHIFT 2 +#define SRS_INTERRUPT_STATUS_I2C_SIGNED 0 + +#define SRS_INTERRUPT_STATUS_SPI_MASK 0x00000008U +#define SRS_INTERRUPT_STATUS_SPI_SHIFT 3 +#define SRS_INTERRUPT_STATUS_SPI_SIGNED 0 + +#define SRS_INTERRUPT_STATUS_APM_MASK 0x00000010U +#define SRS_INTERRUPT_STATUS_APM_SHIFT 4 +#define SRS_INTERRUPT_STATUS_APM_SIGNED 0 + +#define SRS_INTERRUPT_STATUS_OS_IRQ_MASK 0x00001FE0U +#define SRS_INTERRUPT_STATUS_OS_IRQ_SHIFT 5 +#define SRS_INTERRUPT_STATUS_OS_IRQ_SIGNED 0 + +#define SRS_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U +#define SRS_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 +#define SRS_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 + +#define SRS_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U +#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 +#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 + +/* + Register INTERRUPT_ENABLE +*/ +#define SRS_CORE_INTERRUPT_ENABLE 0x021C +#define SRS_INTERRUPT_ENABLE_DUT_MASK 0x00000001U +#define SRS_INTERRUPT_ENABLE_DUT_SHIFT 0 +#define SRS_INTERRUPT_ENABLE_DUT_SIGNED 0 + +#define SRS_INTERRUPT_ENABLE_PDP_MASK 0x00000002U +#define SRS_INTERRUPT_ENABLE_PDP_SHIFT 1 +#define SRS_INTERRUPT_ENABLE_PDP_SIGNED 0 + +#define SRS_INTERRUPT_ENABLE_I2C_MASK 0x00000004U +#define SRS_INTERRUPT_ENABLE_I2C_SHIFT 2 +#define SRS_INTERRUPT_ENABLE_I2C_SIGNED 0 + +#define SRS_INTERRUPT_ENABLE_SPI_MASK 0x00000008U +#define SRS_INTERRUPT_ENABLE_SPI_SHIFT 3 +#define SRS_INTERRUPT_ENABLE_SPI_SIGNED 0 + +#define SRS_INTERRUPT_ENABLE_APM_MASK 0x00000010U +#define SRS_INTERRUPT_ENABLE_APM_SHIFT 4 +#define SRS_INTERRUPT_ENABLE_APM_SIGNED 0 + +#define SRS_INTERRUPT_ENABLE_OS_IRQ_MASK 0x00001FE0U +#define SRS_INTERRUPT_ENABLE_OS_IRQ_SHIFT 5 +#define SRS_INTERRUPT_ENABLE_OS_IRQ_SIGNED 0 + +#define SRS_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U +#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 +#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 + +#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U +#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 +#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 + +/* + Register INTERRUPT_CLR +*/ +#define SRS_CORE_INTERRUPT_CLR 0x0220 +#define SRS_INTERRUPT_CLR_DUT_MASK 0x00000001U +#define SRS_INTERRUPT_CLR_DUT_SHIFT 0 +#define SRS_INTERRUPT_CLR_DUT_SIGNED 0 + +#define SRS_INTERRUPT_CLR_PDP_MASK 0x00000002U +#define SRS_INTERRUPT_CLR_PDP_SHIFT 1 +#define SRS_INTERRUPT_CLR_PDP_SIGNED 0 + +#define SRS_INTERRUPT_CLR_I2C_MASK 0x00000004U +#define SRS_INTERRUPT_CLR_I2C_SHIFT 2 +#define SRS_INTERRUPT_CLR_I2C_SIGNED 0 + +#define SRS_INTERRUPT_CLR_SPI_MASK 0x00000008U +#define SRS_INTERRUPT_CLR_SPI_SHIFT 3 +#define SRS_INTERRUPT_CLR_SPI_SIGNED 0 + +#define SRS_INTERRUPT_CLR_APM_MASK 0x00000010U +#define SRS_INTERRUPT_CLR_APM_SHIFT 4 +#define SRS_INTERRUPT_CLR_APM_SIGNED 0 + +#define SRS_INTERRUPT_CLR_OS_IRQ_MASK 0x00001FE0U +#define SRS_INTERRUPT_CLR_OS_IRQ_SHIFT 5 +#define SRS_INTERRUPT_CLR_OS_IRQ_SIGNED 0 + +#define SRS_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U +#define SRS_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 +#define SRS_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 + +#define SRS_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U +#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 +#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 + +/* + Register INTERRUPT_TEST +*/ +#define SRS_CORE_INTERRUPT_TEST 0x0224 +#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U +#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 +#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 + +/* + Register INTERRUPT_TIMEOUT_CLR +*/ +#define SRS_CORE_INTERRUPT_TIMEOUT_CLR 0x0228 +#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U +#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1 +#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0 + +#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U +#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0 +#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0 + +/* + Register INTERRUPT_TIMEOUT +*/ +#define SRS_CORE_INTERRUPT_TIMEOUT 0x022C +#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU +#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0 +#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0 + +#endif /* _OUT_DRV_H_ */ + +/***************************************************************************** + End of file (out_drv.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/pdp_regs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/pdp_regs.h new file mode 100644 index 000000000000..bd26b0617f95 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/pdp_regs.h @@ -0,0 +1,75 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PDP_REGS_H__) +#define __PDP_REGS_H__ + +/*************************************************************************/ /*! + PCI Device Information +*/ /**************************************************************************/ + +#define DCPDP_VENDOR_ID_POWERVR (0x1010) + +#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA (0x1CF1) +#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA (0x1CF2) + +/*************************************************************************/ /*! + PCI Device Base Address Information +*/ /**************************************************************************/ + +/* PLL and PDP registers on base address register 0 */ +#define DCPDP_REG_PCI_BASENUM (0) + +#define DCPDP_PCI_PLL_REG_OFFSET (0x1000) +#define DCPDP_PCI_PLL_REG_SIZE (0x0400) + +#define DCPDP_PCI_PDP_REG_OFFSET (0xC000) +#define DCPDP_PCI_PDP_REG_SIZE (0x2000) + +/*************************************************************************/ /*! + Misc register information +*/ /**************************************************************************/ + +/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */ +#define DCPDP_STR1SURF_FORMAT_ARGB8888 (0xE) +#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT (4) +#define DCPDP_STR1POSN_STRIDE_SHIFT (4) + +#endif /* !defined(__PDP_REGS_H__) */ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_clk_ctrl.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_clk_ctrl.h new file mode 100644 index 000000000000..cc7b10fd8116 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_clk_ctrl.h @@ -0,0 +1,1018 @@ +/*************************************************************************/ /*! +@Title Test Chip Framework system control register definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Autogenerated C -- do not edit + Generated from: tcf_clk_ctrl.def +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(_TCF_CLK_CTRL_H_) +#define _TCF_CLK_CTRL_H_ + +/* + * The following register definitions are valid if register 0x28 has value 0. + */ + +/* + Register FPGA_ID_REG +*/ +#define TCF_CLK_CTRL_FPGA_ID_REG 0x0000 +#define FPGA_ID_REG_CORE_CFG_MASK 0x0000FFFFU +#define FPGA_ID_REG_CORE_CFG_SHIFT 0 +#define FPGA_ID_REG_CORE_CFG_SIGNED 0 + +#define FPGA_ID_REG_CORE_ID_MASK 0xFFFF0000U +#define FPGA_ID_REG_CORE_ID_SHIFT 16 +#define FPGA_ID_REG_CORE_ID_SIGNED 0 + +/* + Register FPGA_REV_REG +*/ +#define TCF_CLK_CTRL_FPGA_REV_REG 0x0008 +#define FPGA_REV_REG_MAINT_MASK 0x000000FFU +#define FPGA_REV_REG_MAINT_SHIFT 0 +#define FPGA_REV_REG_MAINT_SIGNED 0 + +#define FPGA_REV_REG_MINOR_MASK 0x0000FF00U +#define FPGA_REV_REG_MINOR_SHIFT 8 +#define FPGA_REV_REG_MINOR_SIGNED 0 + +#define FPGA_REV_REG_MAJOR_MASK 0x00FF0000U +#define FPGA_REV_REG_MAJOR_SHIFT 16 +#define FPGA_REV_REG_MAJOR_SIGNED 0 + +#define FPGA_REV_REG_DESIGNER_MASK 0xFF000000U +#define FPGA_REV_REG_DESIGNER_SHIFT 24 +#define FPGA_REV_REG_DESIGNER_SIGNED 0 + +/* + Register FPGA_DES_REV_1 +*/ +#define TCF_CLK_CTRL_FPGA_DES_REV_1 0x0010 +#define FPGA_DES_REV_1_MASK 0xFFFFFFFFU +#define FPGA_DES_REV_1_SHIFT 0 +#define FPGA_DES_REV_1_SIGNED 0 + +/* + Register FPGA_DES_REV_2 +*/ +#define TCF_CLK_CTRL_FPGA_DES_REV_2 0x0018 +#define FPGA_DES_REV_2_MASK 0xFFFFFFFFU +#define FPGA_DES_REV_2_SHIFT 0 +#define FPGA_DES_REV_2_SIGNED 0 + +/* + Register TCF_CORE_ID_REG +*/ +#define TCF_CLK_CTRL_TCF_CORE_ID_REG 0x0020 +#define TCF_CORE_ID_REG_CORE_CFG_MASK 0x0000FFFFU +#define TCF_CORE_ID_REG_CORE_CFG_SHIFT 0 +#define TCF_CORE_ID_REG_CORE_CFG_SIGNED 0 + +#define TCF_CORE_ID_REG_CORE_ID_MASK 0xFFFF0000U +#define TCF_CORE_ID_REG_CORE_ID_SHIFT 16 +#define TCF_CORE_ID_REG_CORE_ID_SIGNED 0 + +/* + Register TCF_CORE_REV_REG +*/ +#define TCF_CLK_CTRL_TCF_CORE_REV_REG 0x0028 +#define TCF_CORE_REV_REG_MAINT_MASK 0x000000FFU +#define TCF_CORE_REV_REG_MAINT_SHIFT 0 +#define TCF_CORE_REV_REG_MAINT_SIGNED 0 + +#define TCF_CORE_REV_REG_MINOR_MASK 0x0000FF00U +#define TCF_CORE_REV_REG_MINOR_SHIFT 8 +#define TCF_CORE_REV_REG_MINOR_SIGNED 0 + +#define TCF_CORE_REV_REG_MAJOR_MASK 0x00FF0000U +#define TCF_CORE_REV_REG_MAJOR_SHIFT 16 +#define TCF_CORE_REV_REG_MAJOR_SIGNED 0 + +#define TCF_CORE_REV_REG_DESIGNER_MASK 0xFF000000U +#define TCF_CORE_REV_REG_DESIGNER_SHIFT 24 +#define TCF_CORE_REV_REG_DESIGNER_SIGNED 0 + +/* + Register TCF_CORE_DES_REV_1 +*/ +#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1 0x0030 +#define TCF_CORE_DES_REV_1_MASK 0xFFFFFFFFU +#define TCF_CORE_DES_REV_1_SHIFT 0 +#define TCF_CORE_DES_REV_1_SIGNED 0 + +/* + Register TCF_CORE_DES_REV_2 +*/ +#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2 0x0038 +#define TCF_CORE_DES_REV_2_MASK 0xFFFFFFFFU +#define TCF_CORE_DES_REV_2_SHIFT 0 +#define TCF_CORE_DES_REV_2_SIGNED 0 + + +/* + * The following register definitions are valid if register 0x28 has value 1. + */ + +/* + Register ID +*/ +#define TCF_CLK_CTRL_ID 0x0000 +#define VARIANT_MASK 0x0000FFFFU +#define VARIANT_SHIFT 0 +#define VARIANT_SIGNED 0 + +#define ID_MASK 0xFFFF0000U +#define ID_SHIFT 16 +#define ID_SIGNED 0 + +/* + Register REL +*/ +#define TCF_CLK_CTRL_REL 0x0008 +#define MINOR_MASK 0x0000FFFFU +#define MINOR_SHIFT 0 +#define MINOR_SIGNED 0 + +#define MAJOR_MASK 0xFFFF0000U +#define MAJOR_SHIFT 16 +#define MAJOR_SIGNED 0 + +/* + Register CHANGE_SET +*/ +#define TCF_CLK_CTRL_CHANGE_SET 0x0010 +#define SET_MASK 0xFFFFFFFFU +#define SET_SHIFT 0 +#define SET_SIGNED 0 + +/* + Register USER_ID +*/ +#define TCF_CLK_CTRL_USER_ID 0x0018 +#define USER_ID_MASK 0x0000000FU +#define USER_ID_SHIFT 0 +#define USER_ID_SIGNED 0 + +/* + Register USER_BUILD +*/ +#define TCF_CLK_CTRL_USER_BUILD 0x0020 +#define BUILD_MASK 0xFFFFFFFFU +#define BUILD_SHIFT 0 +#define BUILD_SIGNED 0 + +/* + Register SW_IF_VERSION +*/ +#define TCF_CLK_CTRL_SW_IF_VERSION 0x0028 +#define VERSION_MASK 0x0000FFFFU +#define VERSION_SHIFT 0 +#define VERSION_SIGNED 0 + +/* + * The following register definitions are valid for all Apollo builds, + * even if some of the registers are not available for certain cores. + */ + +/* + Register SCB_GENERAL_CONTROL +*/ +#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL 0x0040 +#define SCB_GC_TRANS_HALT_MASK 0x00000200U +#define SCB_GC_TRANS_HALT_SHIFT 9 +#define SCB_GC_TRANS_HALT_SIGNED 0 + +#define SCB_GC_CKD_REGS_MASK 0x00000100U +#define SCB_GC_CKD_REGS_SHIFT 8 +#define SCB_GC_CKD_REGS_SIGNED 0 + +#define SCB_GC_CKD_SLAVE_MASK 0x00000080U +#define SCB_GC_CKD_SLAVE_SHIFT 7 +#define SCB_GC_CKD_SLAVE_SIGNED 0 + +#define SCB_GC_CKD_MASTER_MASK 0x00000040U +#define SCB_GC_CKD_MASTER_SHIFT 6 +#define SCB_GC_CKD_MASTER_SIGNED 0 + +#define SCB_GC_CKD_XDATA_MASK 0x00000020U +#define SCB_GC_CKD_XDATA_SHIFT 5 +#define SCB_GC_CKD_XDATA_SIGNED 0 + +#define SCB_GC_SFR_REG_MASK 0x00000010U +#define SCB_GC_SFR_REG_SHIFT 4 +#define SCB_GC_SFR_REG_SIGNED 0 + +#define SCB_GC_SFR_SLAVE_MASK 0x00000008U +#define SCB_GC_SFR_SLAVE_SHIFT 3 +#define SCB_GC_SFR_SLAVE_SIGNED 0 + +#define SCB_GC_SFR_MASTER_MASK 0x00000004U +#define SCB_GC_SFR_MASTER_SHIFT 2 +#define SCB_GC_SFR_MASTER_SIGNED 0 + +#define SCB_GC_SFR_DET_DATA_MASK 0x00000002U +#define SCB_GC_SFR_DET_DATA_SHIFT 1 +#define SCB_GC_SFR_DET_DATA_SIGNED 0 + +#define SCB_GC_SFR_GEN_DATA_MASK 0x00000001U +#define SCB_GC_SFR_GEN_DATA_SHIFT 0 +#define SCB_GC_SFR_GEN_DATA_SIGNED 0 + +/* + Register SCB_MASTER_READ_COUNT +*/ +#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT 0x0048 +#define MASTER_READ_COUNT_MASK 0x0000FFFFU +#define MASTER_READ_COUNT_SHIFT 0 +#define MASTER_READ_COUNT_SIGNED 0 + +/* + Register SCB_MASTER_READ_DATA +*/ +#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA 0x0050 +#define MASTER_READ_DATA_MASK 0x000000FFU +#define MASTER_READ_DATA_SHIFT 0 +#define MASTER_READ_DATA_SIGNED 0 + +/* + Register SCB_MASTER_ADDRESS +*/ +#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS 0x0058 +#define SCB_MASTER_ADDRESS_MASK 0x000003FFU +#define SCB_MASTER_ADDRESS_SHIFT 0 +#define SCB_MASTER_ADDRESS_SIGNED 0 + +/* + Register SCB_MASTER_WRITE_DATA +*/ +#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA 0x0060 +#define MASTER_WRITE_DATA_MASK 0x000000FFU +#define MASTER_WRITE_DATA_SHIFT 0 +#define MASTER_WRITE_DATA_SIGNED 0 + +/* + Register SCB_MASTER_WRITE_COUNT +*/ +#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068 +#define MASTER_WRITE_COUNT_MASK 0x0000FFFFU +#define MASTER_WRITE_COUNT_SHIFT 0 +#define MASTER_WRITE_COUNT_SIGNED 0 + +/* + Register SCB_BUS_SELECT +*/ +#define TCF_CLK_CTRL_SCB_BUS_SELECT 0x0070 +#define BUS_SELECT_MASK 0x00000003U +#define BUS_SELECT_SHIFT 0 +#define BUS_SELECT_SIGNED 0 + +/* + Register SCB_MASTER_FILL_STATUS +*/ +#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078 +#define MASTER_WRITE_FIFO_EMPTY_MASK 0x00000008U +#define MASTER_WRITE_FIFO_EMPTY_SHIFT 3 +#define MASTER_WRITE_FIFO_EMPTY_SIGNED 0 + +#define MASTER_WRITE_FIFO_FULL_MASK 0x00000004U +#define MASTER_WRITE_FIFO_FULL_SHIFT 2 +#define MASTER_WRITE_FIFO_FULL_SIGNED 0 + +#define MASTER_READ_FIFO_EMPTY_MASK 0x00000002U +#define MASTER_READ_FIFO_EMPTY_SHIFT 1 +#define MASTER_READ_FIFO_EMPTY_SIGNED 0 + +#define MASTER_READ_FIFO_FULL_MASK 0x00000001U +#define MASTER_READ_FIFO_FULL_SHIFT 0 +#define MASTER_READ_FIFO_FULL_SIGNED 0 + +/* + Register CLK_AND_RST_CTRL +*/ +#define TCF_CLK_CTRL_CLK_AND_RST_CTRL 0x0080 +#define GLB_CLKG_EN_MASK 0x00020000U +#define GLB_CLKG_EN_SHIFT 17 +#define GLB_CLKG_EN_SIGNED 0 + +#define CLK_GATE_CNTL_MASK 0x00010000U +#define CLK_GATE_CNTL_SHIFT 16 +#define CLK_GATE_CNTL_SIGNED 0 + +#define DUT_DCM_RESETN_MASK 0x00000400U +#define DUT_DCM_RESETN_SHIFT 10 +#define DUT_DCM_RESETN_SIGNED 0 + +#define MEM_RESYNC_BYPASS_MASK 0x00000200U +#define MEM_RESYNC_BYPASS_SHIFT 9 +#define MEM_RESYNC_BYPASS_SIGNED 0 + +#define SYS_RESYNC_BYPASS_MASK 0x00000100U +#define SYS_RESYNC_BYPASS_SHIFT 8 +#define SYS_RESYNC_BYPASS_SIGNED 0 + +#define SCB_RESETN_MASK 0x00000010U +#define SCB_RESETN_SHIFT 4 +#define SCB_RESETN_SIGNED 0 + +#define PDP2_RESETN_MASK 0x00000008U +#define PDP2_RESETN_SHIFT 3 +#define PDP2_RESETN_SIGNED 0 + +#define PDP1_RESETN_MASK 0x00000004U +#define PDP1_RESETN_SHIFT 2 +#define PDP1_RESETN_SIGNED 0 + +#define DDR_RESETN_MASK 0x00000002U +#define DDR_RESETN_SHIFT 1 +#define DDR_RESETN_SIGNED 0 + +#define DUT_RESETN_MASK 0x00000001U +#define DUT_RESETN_SHIFT 0 +#define DUT_RESETN_SIGNED 0 + +/* + Register TEST_REG_OUT +*/ +#define TCF_CLK_CTRL_TEST_REG_OUT 0x0088 +#define TEST_REG_OUT_MASK 0xFFFFFFFFU +#define TEST_REG_OUT_SHIFT 0 +#define TEST_REG_OUT_SIGNED 0 + +/* + Register TEST_REG_IN +*/ +#define TCF_CLK_CTRL_TEST_REG_IN 0x0090 +#define TEST_REG_IN_MASK 0xFFFFFFFFU +#define TEST_REG_IN_SHIFT 0 +#define TEST_REG_IN_SIGNED 0 + +/* + Register TEST_CTRL +*/ +#define TCF_CLK_CTRL_TEST_CTRL 0x0098 +#define PCI_TEST_OFFSET_MASK 0xF8000000U +#define PCI_TEST_OFFSET_SHIFT 27 +#define PCI_TEST_OFFSET_SIGNED 0 + +#define PDP1_HOST_MEM_SELECT_MASK 0x00000200U +#define PDP1_HOST_MEM_SELECT_SHIFT 9 +#define PDP1_HOST_MEM_SELECT_SIGNED 0 + +#define HOST_PHY_MODE_MASK 0x00000100U +#define HOST_PHY_MODE_SHIFT 8 +#define HOST_PHY_MODE_SIGNED 0 + +#define HOST_ONLY_MODE_MASK 0x00000080U +#define HOST_ONLY_MODE_SHIFT 7 +#define HOST_ONLY_MODE_SIGNED 0 + +#define PCI_TEST_MODE_MASK 0x00000040U +#define PCI_TEST_MODE_SHIFT 6 +#define PCI_TEST_MODE_SIGNED 0 + +#define TURN_OFF_DDR_MASK 0x00000020U +#define TURN_OFF_DDR_SHIFT 5 +#define TURN_OFF_DDR_SIGNED 0 + +#define SYS_RD_CLK_INV_MASK 0x00000010U +#define SYS_RD_CLK_INV_SHIFT 4 +#define SYS_RD_CLK_INV_SIGNED 0 + +#define MEM_REQ_CLK_INV_MASK 0x00000008U +#define MEM_REQ_CLK_INV_SHIFT 3 +#define MEM_REQ_CLK_INV_SIGNED 0 + +#define BURST_SPLIT_MASK 0x00000004U +#define BURST_SPLIT_SHIFT 2 +#define BURST_SPLIT_SIGNED 0 + +#define CLK_INVERSION_MASK 0x00000002U +#define CLK_INVERSION_SHIFT 1 +#define CLK_INVERSION_SIGNED 0 + +#define ADDRESS_FORCE_MASK 0x00000001U +#define ADDRESS_FORCE_SHIFT 0 +#define ADDRESS_FORCE_SIGNED 0 + +/* + Register CLEAR_HOST_MEM_SIG +*/ +#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG 0x00A0 +#define SIGNATURE_TAG_ID_MASK 0x00000F00U +#define SIGNATURE_TAG_ID_SHIFT 8 +#define SIGNATURE_TAG_ID_SIGNED 0 + +#define CLEAR_HOST_MEM_SIGNATURE_MASK 0x00000001U +#define CLEAR_HOST_MEM_SIGNATURE_SHIFT 0 +#define CLEAR_HOST_MEM_SIGNATURE_SIGNED 0 + +/* + Register HOST_MEM_SIGNATURE +*/ +#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE 0x00A8 +#define HOST_MEM_SIGNATURE_MASK 0xFFFFFFFFU +#define HOST_MEM_SIGNATURE_SHIFT 0 +#define HOST_MEM_SIGNATURE_SIGNED 0 + +/* + Register INTERRUPT_STATUS +*/ +#define TCF_CLK_CTRL_INTERRUPT_STATUS 0x00C8 +#define INTERRUPT_MASTER_STATUS_MASK 0x80000000U +#define INTERRUPT_MASTER_STATUS_SHIFT 31 +#define INTERRUPT_MASTER_STATUS_SIGNED 0 + +#define OTHER_INTS_MASK 0x7FFE0000U +#define OTHER_INTS_SHIFT 17 +#define OTHER_INTS_SIGNED 0 + +#define HOST_MST_NORESPONSE_MASK 0x00010000U +#define HOST_MST_NORESPONSE_SHIFT 16 +#define HOST_MST_NORESPONSE_SIGNED 0 + +#define PDP2_INT_MASK 0x00008000U +#define PDP2_INT_SHIFT 15 +#define PDP2_INT_SIGNED 0 + +#define PDP1_INT_MASK 0x00004000U +#define PDP1_INT_SHIFT 14 +#define PDP1_INT_SIGNED 0 + +#define EXT_INT_MASK 0x00002000U +#define EXT_INT_SHIFT 13 +#define EXT_INT_SIGNED 0 + +#define SCB_MST_HLT_BIT_MASK 0x00001000U +#define SCB_MST_HLT_BIT_SHIFT 12 +#define SCB_MST_HLT_BIT_SIGNED 0 + +#define SCB_SLV_EVENT_MASK 0x00000800U +#define SCB_SLV_EVENT_SHIFT 11 +#define SCB_SLV_EVENT_SIGNED 0 + +#define SCB_TDONE_RX_MASK 0x00000400U +#define SCB_TDONE_RX_SHIFT 10 +#define SCB_TDONE_RX_SIGNED 0 + +#define SCB_SLV_WT_RD_DAT_MASK 0x00000200U +#define SCB_SLV_WT_RD_DAT_SHIFT 9 +#define SCB_SLV_WT_RD_DAT_SIGNED 0 + +#define SCB_SLV_WT_PRV_RD_MASK 0x00000100U +#define SCB_SLV_WT_PRV_RD_SHIFT 8 +#define SCB_SLV_WT_PRV_RD_SIGNED 0 + +#define SCB_SLV_WT_WR_DAT_MASK 0x00000080U +#define SCB_SLV_WT_WR_DAT_SHIFT 7 +#define SCB_SLV_WT_WR_DAT_SIGNED 0 + +#define SCB_MST_WT_RD_DAT_MASK 0x00000040U +#define SCB_MST_WT_RD_DAT_SHIFT 6 +#define SCB_MST_WT_RD_DAT_SIGNED 0 + +#define SCB_ADD_ACK_ERR_MASK 0x00000020U +#define SCB_ADD_ACK_ERR_SHIFT 5 +#define SCB_ADD_ACK_ERR_SIGNED 0 + +#define SCB_WR_ACK_ERR_MASK 0x00000010U +#define SCB_WR_ACK_ERR_SHIFT 4 +#define SCB_WR_ACK_ERR_SIGNED 0 + +#define SCB_SDAT_LO_TIM_MASK 0x00000008U +#define SCB_SDAT_LO_TIM_SHIFT 3 +#define SCB_SDAT_LO_TIM_SIGNED 0 + +#define SCB_SCLK_LO_TIM_MASK 0x00000004U +#define SCB_SCLK_LO_TIM_SHIFT 2 +#define SCB_SCLK_LO_TIM_SIGNED 0 + +#define SCB_UNEX_START_BIT_MASK 0x00000002U +#define SCB_UNEX_START_BIT_SHIFT 1 +#define SCB_UNEX_START_BIT_SIGNED 0 + +#define SCB_BUS_INACTIVE_MASK 0x00000001U +#define SCB_BUS_INACTIVE_SHIFT 0 +#define SCB_BUS_INACTIVE_SIGNED 0 + +/* + Register INTERRUPT_OP_CFG +*/ +#define TCF_CLK_CTRL_INTERRUPT_OP_CFG 0x00D0 +#define PULSE_NLEVEL_MASK 0x80000000U +#define PULSE_NLEVEL_SHIFT 31 +#define PULSE_NLEVEL_SIGNED 0 + +#define INT_SENSE_MASK 0x40000000U +#define INT_SENSE_SHIFT 30 +#define INT_SENSE_SIGNED 0 + +#define INTERRUPT_DEST_MASK 0x0000000FU +#define INTERRUPT_DEST_SHIFT 0 +#define INTERRUPT_DEST_SIGNED 0 + +/* + Register INTERRUPT_ENABLE +*/ +#define TCF_CLK_CTRL_INTERRUPT_ENABLE 0x00D8 +#define INTERRUPT_MASTER_ENABLE_MASK 0x80000000U +#define INTERRUPT_MASTER_ENABLE_SHIFT 31 +#define INTERRUPT_MASTER_ENABLE_SIGNED 0 + +#define INTERRUPT_ENABLE_MASK 0x7FFFFFFFU +#define INTERRUPT_ENABLE_SHIFT 0 +#define INTERRUPT_ENABLE_SIGNED 0 + +/* + Register INTERRUPT_CLEAR +*/ +#define TCF_CLK_CTRL_INTERRUPT_CLEAR 0x00E0 +#define INTERRUPT_MASTER_CLEAR_MASK 0x80000000U +#define INTERRUPT_MASTER_CLEAR_SHIFT 31 +#define INTERRUPT_MASTER_CLEAR_SIGNED 0 + +#define INTERRUPT_CLEAR_MASK 0x7FFFFFFFU +#define INTERRUPT_CLEAR_SHIFT 0 +#define INTERRUPT_CLEAR_SIGNED 0 + +/* + Register YCC_RGB_CTRL +*/ +#define TCF_CLK_CTRL_YCC_RGB_CTRL 0x00E8 +#define RGB_CTRL1_MASK 0x000001FFU +#define RGB_CTRL1_SHIFT 0 +#define RGB_CTRL1_SIGNED 0 + +#define RGB_CTRL2_MASK 0x01FF0000U +#define RGB_CTRL2_SHIFT 16 +#define RGB_CTRL2_SIGNED 0 + +/* + Register EXP_BRD_CTRL +*/ +#define TCF_CLK_CTRL_EXP_BRD_CTRL 0x00F8 +#define PDP1_DATA_EN_MASK 0x00000003U +#define PDP1_DATA_EN_SHIFT 0 +#define PDP1_DATA_EN_SIGNED 0 + +#define PDP2_DATA_EN_MASK 0x00000030U +#define PDP2_DATA_EN_SHIFT 4 +#define PDP2_DATA_EN_SIGNED 0 + +#define EXP_BRD_OUTPUT_MASK 0xFFFFFF00U +#define EXP_BRD_OUTPUT_SHIFT 8 +#define EXP_BRD_OUTPUT_SIGNED 0 + +/* + Register HOSTIF_CONTROL +*/ +#define TCF_CLK_CTRL_HOSTIF_CONTROL 0x0100 +#define HOSTIF_CTRL_MASK 0x000000FFU +#define HOSTIF_CTRL_SHIFT 0 +#define HOSTIF_CTRL_SIGNED 0 + +/* + Register DUT_CONTROL_1 +*/ +#define TCF_CLK_CTRL_DUT_CONTROL_1 0x0108 +#define DUT_CTRL_1_MASK 0xFFFFFFFFU +#define DUT_CTRL_1_SHIFT 0 +#define DUT_CTRL_1_SIGNED 0 + +/* TC ES2 additional needs those: */ +#define DUT_CTRL_TEST_MODE_SHIFT 0 +#define DUT_CTRL_TEST_MODE_MASK 0x3 + +#define DUT_CTRL_VCC_0V9EN (1<<12) +#define DUT_CTRL_VCC_1V8EN (1<<13) +#define DUT_CTRL_VCC_IO_INH (1<<14) +#define DUT_CTRL_VCC_CORE_INH (1<<15) + +/* + Register DUT_STATUS_1 +*/ +#define TCF_CLK_CTRL_DUT_STATUS_1 0x0110 +#define DUT_STATUS_1_MASK 0xFFFFFFFFU +#define DUT_STATUS_1_SHIFT 0 +#define DUT_STATUS_1_SIGNED 0 + +/* + Register DUT_CTRL_NOT_STAT_1 +*/ +#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1 0x0118 +#define DUT_STAT_NOT_CTRL_1_MASK 0xFFFFFFFFU +#define DUT_STAT_NOT_CTRL_1_SHIFT 0 +#define DUT_STAT_NOT_CTRL_1_SIGNED 0 + +/* + Register DUT_CONTROL_2 +*/ +#define TCF_CLK_CTRL_DUT_CONTROL_2 0x0120 +#define DUT_CTRL_2_MASK 0xFFFFFFFFU +#define DUT_CTRL_2_SHIFT 0 +#define DUT_CTRL_2_SIGNED 0 + +/* + Register DUT_STATUS_2 +*/ +#define TCF_CLK_CTRL_DUT_STATUS_2 0x0128 +#define DUT_STATUS_2_MASK 0xFFFFFFFFU +#define DUT_STATUS_2_SHIFT 0 +#define DUT_STATUS_2_SIGNED 0 + +/* + Register DUT_CTRL_NOT_STAT_2 +*/ +#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2 0x0130 +#define DUT_CTRL_NOT_STAT_2_MASK 0xFFFFFFFFU +#define DUT_CTRL_NOT_STAT_2_SHIFT 0 +#define DUT_CTRL_NOT_STAT_2_SIGNED 0 + +/* + Register BUS_CAP_BASE_ADDR +*/ +#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR 0x0138 +#define BUS_CAP_BASE_ADDR_MASK 0xFFFFFFFFU +#define BUS_CAP_BASE_ADDR_SHIFT 0 +#define BUS_CAP_BASE_ADDR_SIGNED 0 + +/* + Register BUS_CAP_ENABLE +*/ +#define TCF_CLK_CTRL_BUS_CAP_ENABLE 0x0140 +#define BUS_CAP_ENABLE_MASK 0x00000001U +#define BUS_CAP_ENABLE_SHIFT 0 +#define BUS_CAP_ENABLE_SIGNED 0 + +/* + Register BUS_CAP_COUNT +*/ +#define TCF_CLK_CTRL_BUS_CAP_COUNT 0x0148 +#define BUS_CAP_COUNT_MASK 0xFFFFFFFFU +#define BUS_CAP_COUNT_SHIFT 0 +#define BUS_CAP_COUNT_SIGNED 0 + +/* + Register DCM_LOCK_STATUS +*/ +#define TCF_CLK_CTRL_DCM_LOCK_STATUS 0x0150 +#define DCM_LOCK_STATUS_MASK 0x00000007U +#define DCM_LOCK_STATUS_SHIFT 0 +#define DCM_LOCK_STATUS_SIGNED 0 + +/* + Register AUX_DUT_RESETNS +*/ +#define TCF_CLK_CTRL_AUX_DUT_RESETNS 0x0158 +#define AUX_DUT_RESETNS_MASK 0x0000000FU +#define AUX_DUT_RESETNS_SHIFT 0 +#define AUX_DUT_RESETNS_SIGNED 0 + +/* + Register TCF_SPI_MST_ADDR_RDNWR +*/ +#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160 +#define TCF_SPI_MST_ADDR_MASK 0x0003FFFFU +#define TCF_SPI_MST_ADDR_SHIFT 0 +#define TCF_SPI_MST_ADDR_SIGNED 0 + +#define TCF_SPI_MST_RDNWR_MASK 0x00040000U +#define TCF_SPI_MST_RDNWR_SHIFT 18 +#define TCF_SPI_MST_RDNWR_SIGNED 0 + +#define TCF_SPI_MST_SLAVE_ID_MASK 0x00080000U +#define TCF_SPI_MST_SLAVE_ID_SHIFT 19 +#define TCF_SPI_MST_SLAVE_ID_SIGNED 0 + +#define TCF_SPI_MST_MASTER_ID_MASK 0x00300000U +#define TCF_SPI_MST_MASTER_ID_SHIFT 20 +#define TCF_SPI_MST_MASTER_ID_SIGNED 0 + +/* + Register TCF_SPI_MST_WDATA +*/ +#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA 0x0168 +#define TCF_SPI_MST_WDATA_MASK 0xFFFFFFFFU +#define TCF_SPI_MST_WDATA_SHIFT 0 +#define TCF_SPI_MST_WDATA_SIGNED 0 + +/* + Register TCF_SPI_MST_RDATA +*/ +#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA 0x0170 +#define TCF_SPI_MST_RDATA_MASK 0xFFFFFFFFU +#define TCF_SPI_MST_RDATA_SHIFT 0 +#define TCF_SPI_MST_RDATA_SIGNED 0 + +/* + Register TCF_SPI_MST_STATUS +*/ +#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS 0x0178 +#define TCF_SPI_MST_STATUS_MASK 0x0000000FU +#define TCF_SPI_MST_STATUS_SHIFT 0 +#define TCF_SPI_MST_STATUS_SIGNED 0 + +/* + Register TCF_SPI_MST_GO +*/ +#define TCF_CLK_CTRL_TCF_SPI_MST_GO 0x0180 +#define TCF_SPI_MST_GO_MASK 0x00000001U +#define TCF_SPI_MST_GO_SHIFT 0 +#define TCF_SPI_MST_GO_SIGNED 0 + +/* + Register EXT_SIG_CTRL +*/ +#define TCF_CLK_CTRL_EXT_SIG_CTRL 0x0188 +#define EXT_SYS_REQ_SIG_START_MASK 0x00000001U +#define EXT_SYS_REQ_SIG_START_SHIFT 0 +#define EXT_SYS_REQ_SIG_START_SIGNED 0 + +#define EXT_SYS_RD_SIG_START_MASK 0x00000002U +#define EXT_SYS_RD_SIG_START_SHIFT 1 +#define EXT_SYS_RD_SIG_START_SIGNED 0 + +#define EXT_MEM_REQ_SIG_START_MASK 0x00000004U +#define EXT_MEM_REQ_SIG_START_SHIFT 2 +#define EXT_MEM_REQ_SIG_START_SIGNED 0 + +#define EXT_MEM_RD_SIG_START_MASK 0x00000008U +#define EXT_MEM_RD_SIG_START_SHIFT 3 +#define EXT_MEM_RD_SIG_START_SIGNED 0 + +/* + Register EXT_SYS_REQ_SIG +*/ +#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG 0x0190 +#define EXT_SYS_REQ_SIG_MASK 0xFFFFFFFFU +#define EXT_SYS_REQ_SIG_SHIFT 0 +#define EXT_SYS_REQ_SIG_SIGNED 0 + +/* + Register EXT_SYS_RD_SIG +*/ +#define TCF_CLK_CTRL_EXT_SYS_RD_SIG 0x0198 +#define EXT_SYS_RD_SIG_MASK 0xFFFFFFFFU +#define EXT_SYS_RD_SIG_SHIFT 0 +#define EXT_SYS_RD_SIG_SIGNED 0 + +/* + Register EXT_MEM_REQ_SIG +*/ +#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG 0x01A0 +#define EXT_MEM_REQ_SIG_MASK 0xFFFFFFFFU +#define EXT_MEM_REQ_SIG_SHIFT 0 +#define EXT_MEM_REQ_SIG_SIGNED 0 + +/* + Register EXT_MEM_RD_SIG +*/ +#define TCF_CLK_CTRL_EXT_MEM_RD_SIG 0x01A8 +#define EXT_MEM_RD_SIG_MASK 0xFFFFFFFFU +#define EXT_MEM_RD_SIG_SHIFT 0 +#define EXT_MEM_RD_SIG_SIGNED 0 + +/* + Register EXT_SYS_REQ_WR_CNT +*/ +#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT 0x01B0 +#define EXT_SYS_REQ_WR_CNT_MASK 0xFFFFFFFFU +#define EXT_SYS_REQ_WR_CNT_SHIFT 0 +#define EXT_SYS_REQ_WR_CNT_SIGNED 0 + +/* + Register EXT_SYS_REQ_RD_CNT +*/ +#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT 0x01B8 +#define EXT_SYS_REQ_RD_CNT_MASK 0xFFFFFFFFU +#define EXT_SYS_REQ_RD_CNT_SHIFT 0 +#define EXT_SYS_REQ_RD_CNT_SIGNED 0 + +/* + Register EXT_SYS_RD_CNT +*/ +#define TCF_CLK_CTRL_EXT_SYS_RD_CNT 0x01C0 +#define EXT_SYS_RD_CNT_MASK 0xFFFFFFFFU +#define EXT_SYS_RD_CNT_SHIFT 0 +#define EXT_SYS_RD_CNT_SIGNED 0 + +/* + Register EXT_MEM_REQ_WR_CNT +*/ +#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT 0x01C8 +#define EXT_MEM_REQ_WR_CNT_MASK 0xFFFFFFFFU +#define EXT_MEM_REQ_WR_CNT_SHIFT 0 +#define EXT_MEM_REQ_WR_CNT_SIGNED 0 + +/* + Register EXT_MEM_REQ_RD_CNT +*/ +#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT 0x01D0 +#define EXT_MEM_REQ_RD_CNT_MASK 0xFFFFFFFFU +#define EXT_MEM_REQ_RD_CNT_SHIFT 0 +#define EXT_MEM_REQ_RD_CNT_SIGNED 0 + +/* + Register EXT_MEM_RD_CNT +*/ +#define TCF_CLK_CTRL_EXT_MEM_RD_CNT 0x01D8 +#define EXT_MEM_RD_CNT_MASK 0xFFFFFFFFU +#define EXT_MEM_RD_CNT_SHIFT 0 +#define EXT_MEM_RD_CNT_SIGNED 0 + +/* + Register TCF_CORE_TARGET_BUILD_CFG +*/ +#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0 +#define TCF_CORE_TARGET_BUILD_ID_MASK 0x000000FFU +#define TCF_CORE_TARGET_BUILD_ID_SHIFT 0 +#define TCF_CORE_TARGET_BUILD_ID_SIGNED 0 + +/* + Register MEM_THROUGH_SYS +*/ +#define TCF_CLK_CTRL_MEM_THROUGH_SYS 0x01E8 +#define MEM_THROUGH_SYS_MASK 0x00000001U +#define MEM_THROUGH_SYS_SHIFT 0 +#define MEM_THROUGH_SYS_SIGNED 0 + +/* + Register HOST_PHY_OFFSET +*/ +#define TCF_CLK_CTRL_HOST_PHY_OFFSET 0x01F0 +#define HOST_PHY_OFFSET_MASK 0xFFFFFFFFU +#define HOST_PHY_OFFSET_SHIFT 0 +#define HOST_PHY_OFFSET_SIGNED 0 + +/* + Register DEBUG_REG_SEL +*/ +#define TCF_CLK_CTRL_DEBUG_REG_SEL 0x01F8 +#define DEBUG_REG_SELECT_MASK 0xFFFFFFFFU +#define DEBUG_REG_SELECT_SHIFT 0 +#define DEBUG_REG_SELECT_SIGNED 0 + +/* + Register DEBUG_REG +*/ +#define TCF_CLK_CTRL_DEBUG_REG 0x0200 +#define DEBUG_REG_VALUE_MASK 0xFFFFFFFFU +#define DEBUG_REG_VALUE_SHIFT 0 +#define DEBUG_REG_VALUE_SIGNED 0 + +/* + Register JTAG_CTRL +*/ +#define TCF_CLK_CTRL_JTAG_CTRL 0x0208 +#define JTAG_TRST_MASK 0x00000001U +#define JTAG_TRST_SHIFT 0 +#define JTAG_TRST_SIGNED 0 + +#define JTAG_TMS_MASK 0x00000002U +#define JTAG_TMS_SHIFT 1 +#define JTAG_TMS_SIGNED 0 + +#define JTAG_TCK_MASK 0x00000004U +#define JTAG_TCK_SHIFT 2 +#define JTAG_TCK_SIGNED 0 + +#define JTAG_TDO_MASK 0x00000008U +#define JTAG_TDO_SHIFT 3 +#define JTAG_TDO_SIGNED 0 + +#define JTAG_TDI_MASK 0x00000010U +#define JTAG_TDI_SHIFT 4 +#define JTAG_TDI_SIGNED 0 + +#define JTAG_DASH_N_REG_MASK 0x40000000U +#define JTAG_DASH_N_REG_SHIFT 30 +#define JTAG_DASH_N_REG_SIGNED 0 + +#define JTAG_DISABLE_MASK 0x80000000U +#define JTAG_DISABLE_SHIFT 31 +#define JTAG_DISABLE_SIGNED 0 + +/* + Register SAI_DEBUG_RDNWR +*/ +#define TCF_CLK_CTRL_SAI_DEBUG_RDNWR 0x0300 +#define SAI_DEBUG_REG_ADDR_MASK 0x000001FFU +#define SAI_DEBUG_REG_ADDR_SHIFT 0 +#define SAI_DEBUG_REG_ADDR_SIGNED 0 + +#define SAI_DEBUG_REG_RDNWR_MASK 0x00000200U +#define SAI_DEBUG_REG_RDNWR_SHIFT 9 +#define SAI_DEBUG_REG_RDNWR_SIGNED 0 + +/* + Register SAI_DEBUG_WDATA +*/ +#define TCF_CLK_CTRL_SAI_DEBUG_WDATA 0x0308 +#define SAI_DEBUG_REG_WDATA_MASK 0xFFFFFFFFU +#define SAI_DEBUG_REG_WDATA_SHIFT 0 +#define SAI_DEBUG_REG_WDATA_SIGNED 0 + +/* + Register SAI_DEBUG_RDATA +*/ +#define TCF_CLK_CTRL_SAI_DEBUG_RDATA 0x0310 +#define SAI_DEBUG_REG_RDATA_MASK 0xFFFFFFFFU +#define SAI_DEBUG_REG_RDATA_SHIFT 0 +#define SAI_DEBUG_REG_RDATA_SIGNED 0 + +/* + Register SAI_DEBUG_GO +*/ +#define TCF_CLK_CTRL_SAI_DEBUG_GO 0x0318 +#define SAI_DEBUG_REG_GO_MASK 0x00000001U +#define SAI_DEBUG_REG_GO_SHIFT 0 +#define SAI_DEBUG_REG_GO_SIGNED 0 + +/* + Register AUX_DUT_RESETS +*/ +#define TCF_CLK_CTRL_AUX_DUT_RESETS 0x0320 +#define AUX_DUT_RESETS_MASK 0x0000000FU +#define AUX_DUT_RESETS_SHIFT 0 +#define AUX_DUT_RESETS_SIGNED 0 + +/* + Register DUT_CLK_CTRL +*/ +#define TCF_CLK_CTRL_DUT_CLK_CTRL 0x0328 +#define MEM_REQ_PHSE_MASK 0x0000FFFFU +#define MEM_REQ_PHSE_SHIFT 0 +#define MEM_REQ_PHSE_SIGNED 0 + +/* + Register DUT_CLK_STATUS +*/ +#define TCF_CLK_CTRL_DUT_CLK_STATUS 0x0330 +#define MEM_REQ_PHSE_SET_MASK 0x00000003U +#define MEM_REQ_PHSE_SET_SHIFT 0 +#define MEM_REQ_PHSE_SET_SIGNED 0 + +/* + Register DUT_CLK_INFO +*/ +#define TCF_CLK_CTRL_DUT_CLK_INFO 0x0340 +#define CORE_MASK 0x0000FFFFU +#define CORE_SHIFT 0 +#define CORE_SIGNED 0 + +#define MEM_MASK 0xFFFF0000U +#define MEM_SHIFT 16 +#define MEM_SIGNED 0 + +/* + Register DUT_CLK_PHSE +*/ +#define TCF_CLK_CTRL_DUT_CLK_PHSE 0x0348 +#define MEM_REQ_MASK 0x0000FFFFU +#define MEM_REQ_SHIFT 0 +#define MEM_REQ_SIGNED 0 + +#define MEM_RD_MASK 0xFFFF0000U +#define MEM_RD_SHIFT 16 +#define MEM_RD_SIGNED 0 + +#endif /* !defined(_TCF_CLK_CTRL_H_) */ + +/***************************************************************************** + End of file (tcf_clk_ctrl.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_pll.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_pll.h new file mode 100644 index 000000000000..71eaf924bbd6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_pll.h @@ -0,0 +1,311 @@ +/*************************************************************************/ /*! +@Title Test Chip Framework PDP register definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Autogenerated C -- do not edit + Generated from tcf_pll.def +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(_TCF_PLL_H_) +#define _TCF_PLL_H_ + +/* + Register PLL_DDR2_CLK0 +*/ +#define TCF_PLL_PLL_DDR2_CLK0 0x0000 +#define DDR2_PLL_CLK0_PHS_MASK 0x00300000U +#define DDR2_PLL_CLK0_PHS_SHIFT 20 +#define DDR2_PLL_CLK0_PHS_SIGNED 0 + +#define DDR2_PLL_CLK0_MS_MASK 0x00030000U +#define DDR2_PLL_CLK0_MS_SHIFT 16 +#define DDR2_PLL_CLK0_MS_SIGNED 0 + +#define DDR2_PLL_CLK0_FREQ_MASK 0x000001FFU +#define DDR2_PLL_CLK0_FREQ_SHIFT 0 +#define DDR2_PLL_CLK0_FREQ_SIGNED 0 + +/* + Register PLL_DDR2_CLK1TO5 +*/ +#define TCF_PLL_PLL_DDR2_CLK1TO5 0x0008 +#define DDR2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U +#define DDR2_PLL_CLK1TO5_PHS_SHIFT 20 +#define DDR2_PLL_CLK1TO5_PHS_SIGNED 0 + +#define DDR2_PLL_CLK1TO5_MS_MASK 0x000FFC00U +#define DDR2_PLL_CLK1TO5_MS_SHIFT 10 +#define DDR2_PLL_CLK1TO5_MS_SIGNED 0 + +#define DDR2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU +#define DDR2_PLL_CLK1TO5_FREQ_SHIFT 0 +#define DDR2_PLL_CLK1TO5_FREQ_SIGNED 0 + +/* + Register PLL_DDR2_DRP_GO +*/ +#define TCF_PLL_PLL_DDR2_DRP_GO 0x0010 +#define PLL_DDR2_DRP_GO_MASK 0x00000001U +#define PLL_DDR2_DRP_GO_SHIFT 0 +#define PLL_DDR2_DRP_GO_SIGNED 0 + +/* + Register PLL_PDP_CLK0 +*/ +#define TCF_PLL_PLL_PDP_CLK0 0x0018 +#define PDP_PLL_CLK0_PHS_MASK 0x00300000U +#define PDP_PLL_CLK0_PHS_SHIFT 20 +#define PDP_PLL_CLK0_PHS_SIGNED 0 + +#define PDP_PLL_CLK0_MS_MASK 0x00030000U +#define PDP_PLL_CLK0_MS_SHIFT 16 +#define PDP_PLL_CLK0_MS_SIGNED 0 + +#define PDP_PLL_CLK0_FREQ_MASK 0x000001FFU +#define PDP_PLL_CLK0_FREQ_SHIFT 0 +#define PDP_PLL_CLK0_FREQ_SIGNED 0 + +/* + Register PLL_PDP_CLK1TO5 +*/ +#define TCF_PLL_PLL_PDP_CLK1TO5 0x0020 +#define PDP_PLL_CLK1TO5_PHS_MASK 0x3FF00000U +#define PDP_PLL_CLK1TO5_PHS_SHIFT 20 +#define PDP_PLL_CLK1TO5_PHS_SIGNED 0 + +#define PDP_PLL_CLK1TO5_MS_MASK 0x000FFC00U +#define PDP_PLL_CLK1TO5_MS_SHIFT 10 +#define PDP_PLL_CLK1TO5_MS_SIGNED 0 + +#define PDP_PLL_CLK1TO5_FREQ_MASK 0x000003FFU +#define PDP_PLL_CLK1TO5_FREQ_SHIFT 0 +#define PDP_PLL_CLK1TO5_FREQ_SIGNED 0 + +/* + Register PLL_PDP_DRP_GO +*/ +#define TCF_PLL_PLL_PDP_DRP_GO 0x0028 +#define PLL_PDP_DRP_GO_MASK 0x00000001U +#define PLL_PDP_DRP_GO_SHIFT 0 +#define PLL_PDP_DRP_GO_SIGNED 0 + +/* + Register PLL_PDP2_CLK0 +*/ +#define TCF_PLL_PLL_PDP2_CLK0 0x0030 +#define PDP2_PLL_CLK0_PHS_MASK 0x00300000U +#define PDP2_PLL_CLK0_PHS_SHIFT 20 +#define PDP2_PLL_CLK0_PHS_SIGNED 0 + +#define PDP2_PLL_CLK0_MS_MASK 0x00030000U +#define PDP2_PLL_CLK0_MS_SHIFT 16 +#define PDP2_PLL_CLK0_MS_SIGNED 0 + +#define PDP2_PLL_CLK0_FREQ_MASK 0x000001FFU +#define PDP2_PLL_CLK0_FREQ_SHIFT 0 +#define PDP2_PLL_CLK0_FREQ_SIGNED 0 + +/* + Register PLL_PDP2_CLK1TO5 +*/ +#define TCF_PLL_PLL_PDP2_CLK1TO5 0x0038 +#define PDP2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U +#define PDP2_PLL_CLK1TO5_PHS_SHIFT 20 +#define PDP2_PLL_CLK1TO5_PHS_SIGNED 0 + +#define PDP2_PLL_CLK1TO5_MS_MASK 0x000FFC00U +#define PDP2_PLL_CLK1TO5_MS_SHIFT 10 +#define PDP2_PLL_CLK1TO5_MS_SIGNED 0 + +#define PDP2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU +#define PDP2_PLL_CLK1TO5_FREQ_SHIFT 0 +#define PDP2_PLL_CLK1TO5_FREQ_SIGNED 0 + +/* + Register PLL_PDP2_DRP_GO +*/ +#define TCF_PLL_PLL_PDP2_DRP_GO 0x0040 +#define PLL_PDP2_DRP_GO_MASK 0x00000001U +#define PLL_PDP2_DRP_GO_SHIFT 0 +#define PLL_PDP2_DRP_GO_SIGNED 0 + +/* + Register PLL_CORE_CLK0 +*/ +#define TCF_PLL_PLL_CORE_CLK0 0x0048 +#define CORE_PLL_CLK0_PHS_MASK 0x00300000U +#define CORE_PLL_CLK0_PHS_SHIFT 20 +#define CORE_PLL_CLK0_PHS_SIGNED 0 + +#define CORE_PLL_CLK0_MS_MASK 0x00030000U +#define CORE_PLL_CLK0_MS_SHIFT 16 +#define CORE_PLL_CLK0_MS_SIGNED 0 + +#define CORE_PLL_CLK0_FREQ_MASK 0x000001FFU +#define CORE_PLL_CLK0_FREQ_SHIFT 0 +#define CORE_PLL_CLK0_FREQ_SIGNED 0 + +/* + Register PLL_CORE_CLK1TO5 +*/ +#define TCF_PLL_PLL_CORE_CLK1TO5 0x0050 +#define CORE_PLL_CLK1TO5_PHS_MASK 0x3FF00000U +#define CORE_PLL_CLK1TO5_PHS_SHIFT 20 +#define CORE_PLL_CLK1TO5_PHS_SIGNED 0 + +#define CORE_PLL_CLK1TO5_MS_MASK 0x000FFC00U +#define CORE_PLL_CLK1TO5_MS_SHIFT 10 +#define CORE_PLL_CLK1TO5_MS_SIGNED 0 + +#define CORE_PLL_CLK1TO5_FREQ_MASK 0x000003FFU +#define CORE_PLL_CLK1TO5_FREQ_SHIFT 0 +#define CORE_PLL_CLK1TO5_FREQ_SIGNED 0 + +/* + Register PLL_CORE_DRP_GO +*/ +#define TCF_PLL_PLL_CORE_DRP_GO 0x0058 +#define PLL_CORE_DRP_GO_MASK 0x00000001U +#define PLL_CORE_DRP_GO_SHIFT 0 +#define PLL_CORE_DRP_GO_SIGNED 0 + +/* + Register PLL_SYSIF_CLK0 +*/ +#define TCF_PLL_PLL_SYSIF_CLK0 0x0060 +#define SYSIF_PLL_CLK0_PHS_MASK 0x00300000U +#define SYSIF_PLL_CLK0_PHS_SHIFT 20 +#define SYSIF_PLL_CLK0_PHS_SIGNED 0 + +#define SYSIF_PLL_CLK0_MS_MASK 0x00030000U +#define SYSIF_PLL_CLK0_MS_SHIFT 16 +#define SYSIF_PLL_CLK0_MS_SIGNED 0 + +#define SYSIF_PLL_CLK0_FREQ_MASK 0x000001FFU +#define SYSIF_PLL_CLK0_FREQ_SHIFT 0 +#define SYSIF_PLL_CLK0_FREQ_SIGNED 0 + +/* + Register PLL_SYSIF_CLK1TO5 +*/ +#define TCF_PLL_PLL_SYSIF_CLK1TO5 0x0068 +#define SYSIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U +#define SYSIF_PLL_CLK1TO5_PHS_SHIFT 20 +#define SYSIF_PLL_CLK1TO5_PHS_SIGNED 0 + +#define SYSIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U +#define SYSIF_PLL_CLK1TO5_MS_SHIFT 10 +#define SYSIF_PLL_CLK1TO5_MS_SIGNED 0 + +#define SYSIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU +#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT 0 +#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED 0 + +/* + Register PLL_SYS_DRP_GO +*/ +#define TCF_PLL_PLL_SYS_DRP_GO 0x0070 +#define PLL_SYS_DRP_GO_MASK 0x00000001U +#define PLL_SYS_DRP_GO_SHIFT 0 +#define PLL_SYS_DRP_GO_SIGNED 0 + +/* + Register PLL_MEMIF_CLK0 +*/ +#define TCF_PLL_PLL_MEMIF_CLK0 0x0078 +#define MEMIF_PLL_CLK0_PHS_MASK 0x00300000U +#define MEMIF_PLL_CLK0_PHS_SHIFT 20 +#define MEMIF_PLL_CLK0_PHS_SIGNED 0 + +#define MEMIF_PLL_CLK0_MS_MASK 0x00030000U +#define MEMIF_PLL_CLK0_MS_SHIFT 16 +#define MEMIF_PLL_CLK0_MS_SIGNED 0 + +#define MEMIF_PLL_CLK0_FREQ_MASK 0x000001FFU +#define MEMIF_PLL_CLK0_FREQ_SHIFT 0 +#define MEMIF_PLL_CLK0_FREQ_SIGNED 0 + +/* + Register PLL_MEMIF_CLK1TO5 +*/ +#define TCF_PLL_PLL_MEMIF_CLK1TO5 0x0080 +#define MEMIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U +#define MEMIF_PLL_CLK1TO5_PHS_SHIFT 20 +#define MEMIF_PLL_CLK1TO5_PHS_SIGNED 0 + +#define MEMIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U +#define MEMIF_PLL_CLK1TO5_MS_SHIFT 10 +#define MEMIF_PLL_CLK1TO5_MS_SIGNED 0 + +#define MEMIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU +#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT 0 +#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED 0 + +/* + Register PLL_MEM_DRP_GO +*/ +#define TCF_PLL_PLL_MEM_DRP_GO 0x0088 +#define PLL_MEM_DRP_GO_MASK 0x00000001U +#define PLL_MEM_DRP_GO_SHIFT 0 +#define PLL_MEM_DRP_GO_SIGNED 0 + +/* + Register PLL_ALL_DRP_GO +*/ +#define TCF_PLL_PLL_ALL_DRP_GO 0x0090 +#define PLL_ALL_DRP_GO_MASK 0x00000001U +#define PLL_ALL_DRP_GO_SHIFT 0 +#define PLL_ALL_DRP_GO_SIGNED 0 + +/* + Register PLL_DRP_STATUS +*/ +#define TCF_PLL_PLL_DRP_STATUS 0x0098 +#define PLL_LOCKS_MASK 0x00003F00U +#define PLL_LOCKS_SHIFT 8 +#define PLL_LOCKS_SIGNED 0 + +#define PLL_DRP_GOOD_MASK 0x0000003FU +#define PLL_DRP_GOOD_SHIFT 0 +#define PLL_DRP_GOOD_SIGNED 0 + +#endif /* !defined(_TCF_PLL_H_) */ + +/***************************************************************************** + End of file (tcf_pll.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_rgbpdp_regs.h b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_rgbpdp_regs.h new file mode 100644 index 000000000000..e87ba6152411 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/system/rgx_tc/tcf_rgbpdp_regs.h @@ -0,0 +1,559 @@ +/*************************************************************************/ /*! +@Title Test Chip Framework PDP register definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Autogenerated C -- do not edit + Generated from: tcf_rgbpdp_regs.def +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(_TCF_RGBPDP_REGS_H_) +#define _TCF_RGBPDP_REGS_H_ + +/* + Register PVR_TCF_RGBPDP_STR1SURF +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF 0x0000 +#define STR1HEIGHT_MASK 0x000007FFU +#define STR1HEIGHT_SHIFT 0 +#define STR1HEIGHT_SIGNED 0 + +#define STR1WIDTH_MASK 0x003FF800U +#define STR1WIDTH_SHIFT 11 +#define STR1WIDTH_SIGNED 0 + +#define STR1PIXFMT_MASK 0x0F000000U +#define STR1PIXFMT_SHIFT 24 +#define STR1PIXFMT_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_STR1ADDRCTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004 +#define STR1BASE_MASK 0x03FFFFFFU +#define STR1BASE_SHIFT 0 +#define STR1BASE_SIGNED 0 + +#define STR1INTFIELD_MASK 0x40000000U +#define STR1INTFIELD_SHIFT 30 +#define STR1INTFIELD_SIGNED 0 + +#define STR1STREN_MASK 0x80000000U +#define STR1STREN_SHIFT 31 +#define STR1STREN_SIGNED 0 + +/* + Register PVR_PDP_STR1POSN +*/ +#define TCF_RGBPDP_PVR_PDP_STR1POSN 0x0008 +#define STR1STRIDE_MASK 0x000003FFU +#define STR1STRIDE_SHIFT 0 +#define STR1STRIDE_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_MEMCTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL 0x000C +#define MEMREFRESH_MASK 0xC0000000U +#define MEMREFRESH_SHIFT 30 +#define MEMREFRESH_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_STRCTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL 0x0010 +#define BURSTLEN_GFX_MASK 0x000000FFU +#define BURSTLEN_GFX_SHIFT 0 +#define BURSTLEN_GFX_SIGNED 0 + +#define THRESHOLD_GFX_MASK 0x0000FF00U +#define THRESHOLD_GFX_SHIFT 8 +#define THRESHOLD_GFX_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_SYNCCTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL 0x0014 +#define HSDIS_MASK 0x00000001U +#define HSDIS_SHIFT 0 +#define HSDIS_SIGNED 0 + +#define HSPOL_MASK 0x00000002U +#define HSPOL_SHIFT 1 +#define HSPOL_SIGNED 0 + +#define VSDIS_MASK 0x00000004U +#define VSDIS_SHIFT 2 +#define VSDIS_SIGNED 0 + +#define VSPOL_MASK 0x00000008U +#define VSPOL_SHIFT 3 +#define VSPOL_SIGNED 0 + +#define BLNKDIS_MASK 0x00000010U +#define BLNKDIS_SHIFT 4 +#define BLNKDIS_SIGNED 0 + +#define BLNKPOL_MASK 0x00000020U +#define BLNKPOL_SHIFT 5 +#define BLNKPOL_SIGNED 0 + +#define HS_SLAVE_MASK 0x00000040U +#define HS_SLAVE_SHIFT 6 +#define HS_SLAVE_SIGNED 0 + +#define VS_SLAVE_MASK 0x00000080U +#define VS_SLAVE_SHIFT 7 +#define VS_SLAVE_SIGNED 0 + +#define INTERLACE_MASK 0x00000100U +#define INTERLACE_SHIFT 8 +#define INTERLACE_SIGNED 0 + +#define FIELDPOL_MASK 0x00000200U +#define FIELDPOL_SHIFT 9 +#define FIELDPOL_SIGNED 0 + +#define CLKPOL_MASK 0x00000800U +#define CLKPOL_SHIFT 11 +#define CLKPOL_SIGNED 0 + +#define CSYNC_EN_MASK 0x00001000U +#define CSYNC_EN_SHIFT 12 +#define CSYNC_EN_SIGNED 0 + +#define FIELD_EN_MASK 0x00002000U +#define FIELD_EN_SHIFT 13 +#define FIELD_EN_SIGNED 0 + +#define UPDWAIT_MASK 0x000F0000U +#define UPDWAIT_SHIFT 16 +#define UPDWAIT_SIGNED 0 + +#define UPDCTRL_MASK 0x01000000U +#define UPDCTRL_SHIFT 24 +#define UPDCTRL_SIGNED 0 + +#define UPDINTCTRL_MASK 0x02000000U +#define UPDINTCTRL_SHIFT 25 +#define UPDINTCTRL_SIGNED 0 + +#define UPDSYNCTRL_MASK 0x04000000U +#define UPDSYNCTRL_SHIFT 26 +#define UPDSYNCTRL_SIGNED 0 + +#define POWERDN_MASK 0x10000000U +#define POWERDN_SHIFT 28 +#define POWERDN_SIGNED 0 + +#define DISP_RST_MASK 0x20000000U +#define DISP_RST_SHIFT 29 +#define DISP_RST_SIGNED 0 + +#define SYNCACTIVE_MASK 0x80000000U +#define SYNCACTIVE_SHIFT 31 +#define SYNCACTIVE_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_BORDCOL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL 0x0018 +#define BORDCOL_MASK 0x00FFFFFFU +#define BORDCOL_SHIFT 0 +#define BORDCOL_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_UPDCTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL 0x001C +#define UPDFIELD_MASK 0x00000001U +#define UPDFIELD_SHIFT 0 +#define UPDFIELD_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_HSYNC1 +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1 0x0020 +#define HT_MASK 0x00000FFFU +#define HT_SHIFT 0 +#define HT_SIGNED 0 + +#define HBPS_MASK 0x0FFF0000U +#define HBPS_SHIFT 16 +#define HBPS_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_HSYNC2 +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2 0x0024 +#define HLBS_MASK 0x00000FFFU +#define HLBS_SHIFT 0 +#define HLBS_SIGNED 0 + +#define HAS_MASK 0x0FFF0000U +#define HAS_SHIFT 16 +#define HAS_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_HSYNC3 +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3 0x0028 +#define HRBS_MASK 0x00000FFFU +#define HRBS_SHIFT 0 +#define HRBS_SIGNED 0 + +#define HFPS_MASK 0x0FFF0000U +#define HFPS_SHIFT 16 +#define HFPS_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_VSYNC1 +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1 0x002C +#define VT_MASK 0x00000FFFU +#define VT_SHIFT 0 +#define VT_SIGNED 0 + +#define VBPS_MASK 0x0FFF0000U +#define VBPS_SHIFT 16 +#define VBPS_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_VSYNC2 +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2 0x0030 +#define VTBS_MASK 0x00000FFFU +#define VTBS_SHIFT 0 +#define VTBS_SIGNED 0 + +#define VAS_MASK 0x0FFF0000U +#define VAS_SHIFT 16 +#define VAS_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_VSYNC3 +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3 0x0034 +#define VBBS_MASK 0x00000FFFU +#define VBBS_SHIFT 0 +#define VBBS_SIGNED 0 + +#define VFPS_MASK 0x0FFF0000U +#define VFPS_SHIFT 16 +#define VFPS_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_HDECTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL 0x0038 +#define HDEF_MASK 0x00000FFFU +#define HDEF_SHIFT 0 +#define HDEF_SIGNED 0 + +#define HDES_MASK 0x0FFF0000U +#define HDES_SHIFT 16 +#define HDES_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_VDECTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL 0x003C +#define VDEF_MASK 0x00000FFFU +#define VDEF_SHIFT 0 +#define VDEF_SIGNED 0 + +#define VDES_MASK 0x0FFF0000U +#define VDES_SHIFT 16 +#define VDES_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_VEVENT +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT 0x0040 +#define VFETCH_MASK 0x00000FFFU +#define VFETCH_SHIFT 0 +#define VFETCH_SIGNED 0 + +#define VEVENT_MASK 0x0FFF0000U +#define VEVENT_SHIFT 16 +#define VEVENT_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_OPMASK +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK 0x0044 +#define MASKR_MASK 0x000000FFU +#define MASKR_SHIFT 0 +#define MASKR_SIGNED 0 + +#define MASKG_MASK 0x0000FF00U +#define MASKG_SHIFT 8 +#define MASKG_SIGNED 0 + +#define MASKB_MASK 0x00FF0000U +#define MASKB_SHIFT 16 +#define MASKB_SIGNED 0 + +#define BLANKLEVEL_MASK 0x40000000U +#define BLANKLEVEL_SHIFT 30 +#define BLANKLEVEL_SIGNED 0 + +#define MASKLEVEL_MASK 0x80000000U +#define MASKLEVEL_SHIFT 31 +#define MASKLEVEL_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_INTSTAT +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT 0x0048 +#define INTS_HBLNK0_MASK 0x00000001U +#define INTS_HBLNK0_SHIFT 0 +#define INTS_HBLNK0_SIGNED 0 + +#define INTS_HBLNK1_MASK 0x00000002U +#define INTS_HBLNK1_SHIFT 1 +#define INTS_HBLNK1_SIGNED 0 + +#define INTS_VBLNK0_MASK 0x00000004U +#define INTS_VBLNK0_SHIFT 2 +#define INTS_VBLNK0_SIGNED 0 + +#define INTS_VBLNK1_MASK 0x00000008U +#define INTS_VBLNK1_SHIFT 3 +#define INTS_VBLNK1_SIGNED 0 + +#define INTS_STR1URUN_MASK 0x00000010U +#define INTS_STR1URUN_SHIFT 4 +#define INTS_STR1URUN_SIGNED 0 + +#define INTS_STR1ORUN_MASK 0x00000020U +#define INTS_STR1ORUN_SHIFT 5 +#define INTS_STR1ORUN_SIGNED 0 + +#define INTS_DISPURUN_MASK 0x00000040U +#define INTS_DISPURUN_SHIFT 6 +#define INTS_DISPURUN_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_INTENAB +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB 0x004C +#define INTEN_HBLNK0_MASK 0x00000001U +#define INTEN_HBLNK0_SHIFT 0 +#define INTEN_HBLNK0_SIGNED 0 + +#define INTEN_HBLNK1_MASK 0x00000002U +#define INTEN_HBLNK1_SHIFT 1 +#define INTEN_HBLNK1_SIGNED 0 + +#define INTEN_VBLNK0_MASK 0x00000004U +#define INTEN_VBLNK0_SHIFT 2 +#define INTEN_VBLNK0_SIGNED 0 + +#define INTEN_VBLNK1_MASK 0x00000008U +#define INTEN_VBLNK1_SHIFT 3 +#define INTEN_VBLNK1_SIGNED 0 + +#define INTEN_STR1URUN_MASK 0x00000010U +#define INTEN_STR1URUN_SHIFT 4 +#define INTEN_STR1URUN_SIGNED 0 + +#define INTEN_STR1ORUN_MASK 0x00000020U +#define INTEN_STR1ORUN_SHIFT 5 +#define INTEN_STR1ORUN_SIGNED 0 + +#define INTEN_DISPURUN_MASK 0x00000040U +#define INTEN_DISPURUN_SHIFT 6 +#define INTEN_DISPURUN_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_INTCLEAR +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR 0x0050 +#define INTCLR_HBLNK0_MASK 0x00000001U +#define INTCLR_HBLNK0_SHIFT 0 +#define INTCLR_HBLNK0_SIGNED 0 + +#define INTCLR_HBLNK1_MASK 0x00000002U +#define INTCLR_HBLNK1_SHIFT 1 +#define INTCLR_HBLNK1_SIGNED 0 + +#define INTCLR_VBLNK0_MASK 0x00000004U +#define INTCLR_VBLNK0_SHIFT 2 +#define INTCLR_VBLNK0_SIGNED 0 + +#define INTCLR_VBLNK1_MASK 0x00000008U +#define INTCLR_VBLNK1_SHIFT 3 +#define INTCLR_VBLNK1_SIGNED 0 + +#define INTCLR_STR1URUN_MASK 0x00000010U +#define INTCLR_STR1URUN_SHIFT 4 +#define INTCLR_STR1URUN_SIGNED 0 + +#define INTCLR_STR1ORUN_MASK 0x00000020U +#define INTCLR_STR1ORUN_SHIFT 5 +#define INTCLR_STR1ORUN_SIGNED 0 + +#define INTCLR_DISPURUN_MASK 0x00000040U +#define INTCLR_DISPURUN_SHIFT 6 +#define INTCLR_DISPURUN_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_INTCTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL 0x0054 +#define HBLNK_LINENO_MASK 0x00000FFFU +#define HBLNK_LINENO_SHIFT 0 +#define HBLNK_LINENO_SIGNED 0 + +#define HBLNK_LINE_MASK 0x00010000U +#define HBLNK_LINE_SHIFT 16 +#define HBLNK_LINE_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_SIGNAT +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT 0x0058 +#define SIGNATURE_MASK 0xFFFFFFFFU +#define SIGNATURE_SHIFT 0 +#define SIGNATURE_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_LINESTAT +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT 0x005C +#define LINENO_MASK 0x00000FFFU +#define LINENO_SHIFT 0 +#define LINENO_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_DBGCTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL 0x0060 +#define DBG_ENAB_MASK 0x00000001U +#define DBG_ENAB_SHIFT 0 +#define DBG_ENAB_SIGNED 0 + +#define DBG_READ_MASK 0x00000002U +#define DBG_READ_SHIFT 1 +#define DBG_READ_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_DBGDATA +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA 0x0064 +#define DBG_DATA_MASK 0x00FFFFFFU +#define DBG_DATA_SHIFT 0 +#define DBG_DATA_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_DBGSIDE +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE 0x0068 +#define DBG_SIDE_MASK 0x00000007U +#define DBG_SIDE_SHIFT 0 +#define DBG_SIDE_SIGNED 0 + +#define DBG_VAL_MASK 0x00000008U +#define DBG_VAL_SHIFT 3 +#define DBG_VAL_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_REGLD_STAT +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070 +#define REGLD_ADDROUT_MASK 0x00FFFFFFU +#define REGLD_ADDROUT_SHIFT 0 +#define REGLD_ADDROUT_SIGNED 0 + +#define REGLD_ADDREN_MASK 0x80000000U +#define REGLD_ADDREN_SHIFT 31 +#define REGLD_ADDREN_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_REGLD_CTRL +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074 +#define REGLD_ADDRIN_MASK 0x00FFFFFFU +#define REGLD_ADDRIN_SHIFT 0 +#define REGLD_ADDRIN_SIGNED 0 + +#define REGLD_VAL_MASK 0x01000000U +#define REGLD_VAL_SHIFT 24 +#define REGLD_VAL_SIGNED 0 + +#define REGLD_ADDRLEN_MASK 0xFE000000U +#define REGLD_ADDRLEN_SHIFT 25 +#define REGLD_ADDRLEN_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_CORE_ID +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID 0x0078 +#define CONFIG_ID_MASK 0x0000FFFFU +#define CONFIG_ID_SHIFT 0 +#define CONFIG_ID_SIGNED 0 + +#define CORE_ID_MASK 0x00FF0000U +#define CORE_ID_SHIFT 16 +#define CORE_ID_SIGNED 0 + +#define GROUP_ID_MASK 0xFF000000U +#define GROUP_ID_SHIFT 24 +#define GROUP_ID_SIGNED 0 + +/* + Register PVR_TCF_RGBPDP_CORE_REV +*/ +#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV 0x007C +#define MAINT_REV_MASK 0x000000FFU +#define MAINT_REV_SHIFT 0 +#define MAINT_REV_SIGNED 0 + +#define MINOR_REV_MASK 0x0000FF00U +#define MINOR_REV_SHIFT 8 +#define MINOR_REV_SIGNED 0 + +#define MAJOR_REV_MASK 0x00FF0000U +#define MAJOR_REV_SHIFT 16 +#define MAJOR_REV_SIGNED 0 + +#endif /* !defined(_TCF_RGBPDP_REGS_H_) */ + +/***************************************************************************** + End of file (tcf_rgbpdp_regs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/volcanic/rgx_common.h b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_common.h new file mode 100644 index 000000000000..be9644c933b7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_common.h @@ -0,0 +1,219 @@ +/*************************************************************************/ /*! +@File +@Title RGX Common Types and Defines Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Common types and definitions for RGX software +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_COMMON_H +#define RGX_COMMON_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" + +/* Included to get the BVNC_KM_N defined and other feature defs */ +#include "km/rgxdefs_km.h" + +/*! This macro represents a mask of LSBs that must be zero on data structure + * sizes and offsets to ensure they are 8-byte granular on types shared between + * the FW and host driver */ +#define RGX_FW_ALIGNMENT_LSB (7U) + +/*! Macro to test structure size alignment */ +#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \ + static_assert(((sizeof(_a) & RGX_FW_ALIGNMENT_LSB)?0:1), \ + "Size of " #_a " is not properly aligned") + +/*! Macro to test structure member alignment */ +#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \ + static_assert( ((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB)?0:1), \ + "Offset of " #_a "." #_b " is not properly aligned") + + +/*! The master definition for data masters known to the firmware of RGX. + * When a new DM is added to this list, relevant entry should be added to + * RGX_HWPERF_DM enum list. + * The DM in a V1 HWPerf packet uses this definition. */ +typedef IMG_UINT32 RGXFWIF_DM; + +#define RGXFWIF_DM_GP IMG_UINT32_C(0) +#define RGXFWIF_DM_TDM IMG_UINT32_C(1) +#define RGXFWIF_DM_GEOM IMG_UINT32_C(2) +#define RGXFWIF_DM_3D IMG_UINT32_C(3) +#define RGXFWIF_DM_CDM IMG_UINT32_C(4) + +#define RGXFWIF_DM_LAST RGXFWIF_DM_CDM + + + +typedef enum _RGX_KICK_TYPE_DM_ +{ + RGX_KICK_TYPE_DM_GP = 0x001, + RGX_KICK_TYPE_DM_TDM_2D = 0x002, + RGX_KICK_TYPE_DM_GEOM = 0x004, + RGX_KICK_TYPE_DM_3D = 0x008, + RGX_KICK_TYPE_DM_CDM = 0x010, + RGX_KICK_TYPE_DM_TQ2D = 0x020, + RGX_KICK_TYPE_DM_TQ3D = 0x040, + RGX_KICK_TYPE_DM_LAST = 0x080 +} RGX_KICK_TYPE_DM; + +/* Maximum number of DM in use: GP, 2D, GEOM, 3D, CDM */ +#define RGXFWIF_DM_DEFAULT_MAX (RGXFWIF_DM_LAST + 1U) + +#if !defined(__KERNEL__) +/* Maximum number of DM in use: GP, 2D, GEOM, 3D, CDM */ +#define RGXFWIF_DM_MAX (RGXFWIF_DM_LAST + 1) + +/* Min/Max number of HW DMs (all but GP) */ +#define RGXFWIF_HWDM_MIN (1U) +#define RGXFWIF_HWDM_MAX (RGXFWIF_DM_MAX) +#else /* !defined(__KERNEL__) */ + #define RGXFWIF_DM_MIN_MTS_CNT (6) + #define RGXFWIF_DM_MIN_CNT (5) + #define RGXFWIF_DM_MAX (RGXFWIF_DM_MIN_CNT) +#endif /* !defined(__KERNEL__) */ + +/* + * Data Master Tags to be appended to resources created on behalf of each RGX + * Context. + */ +#define RGX_RI_DM_TAG_KS 'K' +#define RGX_RI_DM_TAG_CDM 'C' +#define RGX_RI_DM_TAG_RC 'R' /* To be removed once TA/3D Timelines are split */ +#define RGX_RI_DM_TAG_GEOM 'V' +#define RGX_RI_DM_TAG_3D 'P' +#define RGX_RI_DM_TAG_TDM 'T' + +/* + * Client API Tags to be appended to resources created on behalf of each + * Client API. + */ +#define RGX_RI_CLIENT_API_GLES1 '1' +#define RGX_RI_CLIENT_API_GLES3 '3' +#define RGX_RI_CLIENT_API_VULKAN 'V' +#define RGX_RI_CLIENT_API_EGL 'E' +#define RGX_RI_CLIENT_API_OPENCL 'C' +#define RGX_RI_CLIENT_API_OPENGL 'G' +#define RGX_RI_CLIENT_API_SERVICES 'S' +#define RGX_RI_CLIENT_API_WSEGL 'W' +#define RGX_RI_CLIENT_API_ANDROID 'A' +#define RGX_RI_CLIENT_API_LWS 'L' + +/* + * Format a RI annotation for a given RGX Data Master context + */ +#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do \ + { \ + annotation[0] = dmTag; \ + annotation[1] = clientAPI; \ + annotation[2] = '\0'; \ + } while (0) + +/*! + ****************************************************************************** + * RGXFW Compiler alignment definitions + *****************************************************************************/ +#if defined(__GNUC__) || defined(INTEGRITY_OS) +#define RGXFW_ALIGN __attribute__ ((aligned (8))) +#define RGXFW_ALIGN_DCACHEL __attribute__((aligned (64))) +#elif defined(_MSC_VER) +#define RGXFW_ALIGN __declspec(align(8)) +#define RGXFW_ALIGN_DCACHEL __declspec(align(64)) +#pragma warning (disable : 4324) +#else +#error "Align MACROS need to be defined for this compiler" +#endif + +/*! + ****************************************************************************** + * Force 8-byte alignment for structures allocated uncached. + *****************************************************************************/ +#define UNCACHED_ALIGN RGXFW_ALIGN + + +/*! + ****************************************************************************** + * GPU Utilisation states + *****************************************************************************/ +#define RGXFWIF_GPU_UTIL_STATE_IDLE (0U) +#define RGXFWIF_GPU_UTIL_STATE_ACTIVE (1U) +#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (2U) +#define RGXFWIF_GPU_UTIL_STATE_NUM (3U) +#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003) + + +/* + * Maximum amount of register writes that can be done by the register + * programmer (FW or META DMA). This is not a HW limitation, it is only + * a protection against malformed inputs to the register programmer. + */ +#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES (128U) + +/* + * Use of the 32-bit context property flags mask + * ( X = taken/in use, - = available/unused ) + * + * 0 + * | + * -------------------------------x + */ + /* + * Context creation flags + * (specify a context's properties at creation time) + */ +#define RGX_CONTEXT_FLAG_DISABLESLR (1UL << 0) /*!< Disable SLR */ + +/* List of attributes that may be set for a context */ +typedef enum _RGX_CONTEXT_PROPERTY_ +{ + RGX_CONTEXT_PROPERTY_FLAGS = 0, /*!< Context flags */ +} RGX_CONTEXT_PROPERTY; + +#if defined(__cplusplus) +} +#endif + +#endif /* RGX_COMMON_H */ + +/****************************************************************************** + End of file +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_alignchecks.h b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_alignchecks.h new file mode 100644 index 000000000000..947d75041ab0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_alignchecks.h @@ -0,0 +1,192 @@ +/*************************************************************************/ /*! +@File +@Title RGX fw interface alignment checks +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Checks to avoid disalignment in RGX fw data structures + shared with the host +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_ALIGNCHECKS_H) +#define RGX_FWIF_ALIGNCHECKS_H + +/* for the offsetof macro */ +#if defined(__KERNEL__) && defined(LINUX) +#include +#else +#include +#endif + +/*! + ****************************************************************************** + * Alignment UM/FW checks array + *****************************************************************************/ + +#define RGXFW_ALIGN_CHECKS_UM_MAX 128U + +#if defined(PM_INTERACTIVE_MODE) +#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMMListDevVAddr), +#define HWRTDATA_HEAPTABLE_OFFSET offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), +#else +#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMRenderStateDevVAddr), +#define HWRTDATA_HEAPTABLE_OFFSET +#endif + +#define RGXFW_ALIGN_CHECKS_INIT0 \ + sizeof(RGXFWIF_TRACEBUF), \ + offsetof(RGXFWIF_TRACEBUF, ui32LogType), \ + offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \ + offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \ + offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \ + \ + sizeof(RGXFWIF_SYSDATA), \ + offsetof(RGXFWIF_SYSDATA, ePowState), \ + offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \ + offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \ + offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \ + offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \ + \ + sizeof(RGXFWIF_OSDATA), \ + offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \ + offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \ + \ + sizeof(RGXFWIF_HWRINFOBUF), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \ + \ + /* RGXFWIF_CMDTA checks */ \ + sizeof(RGXFWIF_CMDTA), \ + offsetof(RGXFWIF_CMDTA, sGeomRegs), \ + \ + /* RGXFWIF_CMD3D checks */ \ + sizeof(RGXFWIF_CMD3D), \ + offsetof(RGXFWIF_CMD3D, s3DRegs), \ + \ + /* RGXFWIF_CMD_COMPUTE checks */ \ + sizeof(RGXFWIF_CMD_COMPUTE), \ + offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \ + \ + /* RGXFWIF_FREELIST checks */ \ + sizeof(RGXFWIF_FREELIST), \ + offsetof(RGXFWIF_FREELIST, sFreeListBaseDevVAddr),\ + offsetof(RGXFWIF_FREELIST, sFreeListStateDevVAddr),\ + offsetof(RGXFWIF_FREELIST, sFreeListLastGrowDevVAddr),\ + offsetof(RGXFWIF_FREELIST, ui32MaxPages),\ + offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\ + \ + /* RGXFWIF_HWRTDATA checks */ \ + sizeof(RGXFWIF_HWRTDATA), \ + HWRTDATA_PM_OFFSET \ + HWRTDATA_HEAPTABLE_OFFSET \ + offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\ + /*offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase),*/ \ + offsetof(RGXFWIF_HWRTDATA, eState), \ + \ +\ + sizeof(RGXFWIF_HWPERF_CTL), \ + offsetof(RGXFWIF_HWPERF_CTL, sBlkCfg), \ + sizeof(RGXFWIF_CMDTDM), \ + offsetof(RGXFWIF_CMDTDM, sTDMRegs) + +#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT0 + + + +/*! + ****************************************************************************** + * Alignment KM checks array + *****************************************************************************/ + +#define RGXFW_ALIGN_CHECKS_INIT_KM0 \ + sizeof(RGXFWIF_SYSINIT), \ + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ + offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ + offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ + offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ + offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ + offsetof(RGXFWIF_SYSINIT, sFwSysData), \ + \ + sizeof(RGXFWIF_OSINIT), \ + offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \ + offsetof(RGXFWIF_OSINIT, psKernelCCB), \ + offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \ + offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \ + offsetof(RGXFWIF_OSINIT, sFwOsData), \ + offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \ + \ + /* RGXFWIF_FWRENDERCONTEXT checks */ \ + sizeof(RGXFWIF_FWRENDERCONTEXT), \ + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \ + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \ + \ + sizeof(RGXFWIF_FWCOMPUTECONTEXT), \ + offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), \ + offsetof(RGXFWIF_FWCOMPUTECONTEXT, sStaticComputeContextState),\ + offsetof(RGXFWIF_FWCOMPUTECONTEXT, ui32WorkEstCCBSubmitted),\ + \ + sizeof(RGXFWIF_FWTDMCONTEXT), \ + offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), \ + offsetof(RGXFWIF_FWTDMCONTEXT, ui32WorkEstCCBSubmitted),\ + \ + sizeof(RGXFWIF_FWCOMMONCONTEXT), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \ + \ + sizeof(RGXFWIF_MMUCACHEDATA), \ + offsetof(RGXFWIF_MMUCACHEDATA, sMemoryContext), \ + offsetof(RGXFWIF_MMUCACHEDATA, ui32Flags), \ + offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \ + offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue) + +#if defined(SUPPORT_TRP) +#define RGXFW_ALIGN_CHECKS_INIT_KM \ + RGXFW_ALIGN_CHECKS_INIT_KM0, \ + offsetof(RGXFWIF_FWTDMCONTEXT, ui32TRPState), \ + offsetof(RGXFWIF_FWTDMCONTEXT, aui64TRPChecksums2D) +#else +#define RGXFW_ALIGN_CHECKS_INIT_KM RGXFW_ALIGN_CHECKS_INIT_KM0 +#endif + +#endif /* RGX_FWIF_ALIGNCHECKS_H */ + +/****************************************************************************** + End of file (rgx_fwif_alignchecks.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_shared.h b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_shared.h new file mode 100644 index 000000000000..d6bf9fbdffa3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_fwif_shared.h @@ -0,0 +1,259 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware interface structures +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware interface structures shared by both host client + and host server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_SHARED_H) +#define RGX_FWIF_SHARED_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_common.h" +#include "powervr/mem_types.h" + +/* Maximum number of UFOs in a CCB command. + * The number is based on having 32 sync prims (as originally), plus 32 sync + * checkpoints. + * Once the use of sync prims is no longer supported, we will retain + * the same total (64) as the number of sync checkpoints which may be + * supporting a fence is not visible to the client driver and has to + * allow for the number of different timelines involved in fence merges. + */ +#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U) + +/* + * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER) + * command passed through the bridge. + * Just across the bridge in the server, any incoming kick command size is + * checked against this maximum limit. + * In case the incoming command size is larger than the specified limit, + * the bridge call is retired with error. + */ +#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) + +typedef struct RGXFWIF_DEV_VIRTADDR_ +{ + IMG_UINT32 ui32Addr; +} RGXFWIF_DEV_VIRTADDR; + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; + RGXFWIF_DEV_VIRTADDR pbyFWAddr; +} UNCACHED_ALIGN RGXFWIF_DMA_ADDR; + +typedef IMG_UINT8 RGXFWIF_CCCB; + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL; + + +typedef struct +{ + PRGXFWIF_UFO_ADDR puiAddrUFO; + IMG_UINT32 ui32Value; +} RGXFWIF_UFO; + +typedef struct +{ + IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */ + IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */ +} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL; + +typedef enum +{ + RGXFWIF_PRBUFFER_START = 0, + RGXFWIF_PRBUFFER_ZSBUFFER = 0, + RGXFWIF_PRBUFFER_MSAABUFFER, + RGXFWIF_PRBUFFER_MAXSUPPORTED, +}RGXFWIF_PRBUFFER_TYPE; + +typedef enum +{ + RGXFWIF_PRBUFFER_UNBACKED = 0, + RGXFWIF_PRBUFFER_BACKED, + RGXFWIF_PRBUFFER_BACKING_PENDING, + RGXFWIF_PRBUFFER_UNBACKING_PENDING, +}RGXFWIF_PRBUFFER_STATE; + +typedef struct +{ + IMG_UINT32 ui32BufferID; /*!< Buffer ID*/ + IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */ + RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */ + RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */ + IMG_UINT32 ui32FWZSBufferFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_PRBUFFER; + +/* + * TA and 3D commands require set of firmware addresses that are stored in the + * Kernel. Client has handle(s) to Kernel containers storing these addresses, + * instead of raw addresses. We have to patch/write these addresses in KM to + * prevent UM from controlling FW addresses directly. + * Typedefs for TA and 3D commands are shared between Client and Firmware (both + * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use + * TA|3D CMD type definitions directly. Therefore we have a SHARED block that + * is shared between UM-KM-FW across all BVNC configurations. + */ +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command, + this is used for context selection and for storing out HW-context, + when TA is switched out for continuing later */ + + RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */ +} CMDTA3D_SHARED; + +/*! + * Client Circular Command Buffer (CCCB) control structure. + * This is shared between the Server and the Firmware and holds byte offsets + * into the CCCB as well as the wrapping mask to aid wrap around. A given + * snapshot of this queue with Cmd 1 running on the GPU might be: + * + * Roff Doff Woff + * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] + * < runnable commands >< !ready to run > + * + * Cmd 1 : Currently executing on the GPU data master. + * Cmd 2,3,4: Fence dependencies met, commands runnable. + * Cmd 5... : Fence dependency not met yet. + */ +typedef struct +{ + IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This + * must be aligned to 16 bytes. */ + IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB. + Points to the command that is + * runnable on GPU, if R!=W */ + IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset. + * Points to commands not ready, i.e. + * fence dependencies are not met. */ + IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity + * in bytes of the CCB-1 */ +} UNCACHED_ALIGN RGXFWIF_CCCB_CTL; + + +typedef IMG_UINT32 RGXFW_FREELIST_TYPE; + +#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0) +#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1) +#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL_FREELIST + 1U) + + +typedef struct +{ + IMG_UINT64 uTAReg_DCE_ROOT_CTRL_STREAM; + IMG_UINT64 uTAReg_DCE_CONTEXT_STATE_BASE_ADDR; + IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; + + struct + { + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM2; + + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM2; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_XFB; + + /* VDM resume state update controls */ + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM2; + + + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM2; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_XFB; + } asTAState[2]; + +} RGXFWIF_TAREGISTERS_CSWITCH; + +typedef struct +{ + IMG_UINT64 u3DReg_IPP_CONTEXT_ADDR; +} RGXFWIF_3DREGISTERS_CSWITCH; + +typedef struct +{ + IMG_UINT64 uCDMReg_CDM_CONTEXT_STATE_BASE_ADDR; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1; + IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS; + IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1; + + /* CDM resume controls */ + IMG_UINT64 uCDMReg_CDM_RESUME_PDS0; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B; + IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B; + +} RGXFWIF_CDM_REGISTERS_CSWITCH; + +static_assert(sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH) % 8U == 0U, + "the size of the structure must be multiple of 8"); + +#define RGXFWIF_CDM_REGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH) + + +typedef struct +{ + RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_GeomRegs;/*!< Geometry registers for ctx switch */ + RGXFWIF_3DREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_3DRegs; /*!< 3D registers for ctx switch */ +} RGXFWIF_STATIC_RENDERCONTEXT_STATE; + +#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE) + +typedef struct +{ + RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */ +} RGXFWIF_STATIC_COMPUTECONTEXT_STATE; + +#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE) + + +#endif /* RGX_FWIF_SHARED_H */ + +/****************************************************************************** + End of file (rgx_fwif_shared.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/volcanic/rgx_heaps.h b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_heaps.h new file mode 100644 index 000000000000..db291de9cf31 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_heaps.h @@ -0,0 +1,175 @@ +/*************************************************************************/ /*! +@File +@Title RGX heap definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_HEAPS_H) +#define RGX_HEAPS_H + +#include "km/rgxdefs_km.h" +#include "img_defs.h" +#include "log2.h" +#include "pvr_debug.h" + +/* RGX Heap IDs, note: not all heaps are available to clients */ +#define RGX_UNDEFINED_HEAP_ID (~0LU) /*!< RGX Undefined Heap ID */ +#define RGX_GENERAL_SVM_HEAP_ID 0 /*!< RGX General SVM (shared virtual memory) Heap ID */ +#define RGX_GENERAL_HEAP_ID 1 /*!< RGX General Heap ID */ +#define RGX_GENERAL_NON4K_HEAP_ID 2 /*!< RGX General none-4K Heap ID */ +#define RGX_PDSCODEDATA_HEAP_ID 3 /*!< RGX PDS Code/Data Heap ID */ +#define RGX_USCCODE_HEAP_ID 4 /*!< RGX USC Code Heap ID */ +#define RGX_FIRMWARE_MAIN_HEAP_ID 5 /*!< RGX Main Firmware Heap ID */ +#define RGX_TQ3DPARAMETERS_HEAP_ID 6 /*!< RGX Firmware Heap ID */ +#define RGX_SIGNALS_HEAP_ID 7 /*!< Compute Signals Heap ID */ +#define RGX_COMPONENT_CTRL_HEAP_ID 8 /*!< DCE Component Ctrl Heap ID */ +#define RGX_FBCDC_HEAP_ID 9 /*!< FBCDC State Table Heap ID */ +#define RGX_FBCDC_LARGE_HEAP_ID 10 /*!< FBCDC Large State Table Heap ID */ +#define RGX_PDS_INDIRECT_STATE_HEAP_ID 11 /*!< PDS Indirect State Table Heap ID */ +#define RGX_TEXTURE_STATE_HEAP_ID 12 /*!< Texture State Heap ID */ +#define RGX_TDM_TPU_YUV_COEFFS_HEAP_ID 13 +#define RGX_VISIBILITY_TEST_HEAP_ID 14 +#define RGX_FIRMWARE_CONFIG_HEAP_ID 15 /*!< RGX Main Firmware Heap ID */ +#define RGX_GUEST_FIRMWARE_RAW_HEAP_ID 16 /*!< Additional OSIDs Firmware */ +#define RGX_MAX_HEAP_ID (RGX_GUEST_FIRMWARE_RAW_HEAP_ID + RGX_NUM_OS_SUPPORTED) /*!< Max Valid Heap ID */ + +/* + Following heaps from the above HEAP IDs can have virtual address space reserved at the start + of the heap. Offsets within this reserved range are intended to be shared between RGX clients + and FW. Naming convention for these macros: Just replace the 'ID' suffix by 'RESERVED_SIZE' + in heap ID macros. + Reserved VA space of a heap must always be multiple of RGX_HEAP_RESERVED_SIZE_GRANULARITY, + this check is validated in the DDK. Note this is only reserving "Virtual Address" space and + physical allocations (and mappings thereon) should only be done as much as required (to avoid + wastage). + Granularity has been chosen to support the max possible practically used OS page size. +*/ +#define RGX_HEAP_RESERVED_SIZE_GRANULARITY 0x10000 /* 64KB is MAX anticipated OS page size */ +#define RGX_GENERAL_HEAP_RESERVED_SIZE 1 * RGX_HEAP_RESERVED_SIZE_GRANULARITY +#define RGX_PDSCODEDATA_HEAP_RESERVED_SIZE 1 * RGX_HEAP_RESERVED_SIZE_GRANULARITY +#define RGX_USCCODE_HEAP_RESERVED_SIZE 1 * RGX_HEAP_RESERVED_SIZE_GRANULARITY +#define RGX_VK_CAPT_REPLAY_BUF_HEAP_RESERVED_SIZE (0) +#define RGX_SIGNALS_HEAP_RESERVED_SIZE 0 +#define RGX_COMPONENT_CTRL_HEAP_RESERVED_SIZE 0 +#define RGX_FBCDC_HEAP_RESERVED_SIZE 0 +#define RGX_FBCDC_LARGE_HEAP_RESERVED_SIZE 0 +#define RGX_PDS_INDIRECT_STATE_HEAP_RESERVED_SIZE 0 +#define RGX_TEXTURE_STATE_HEAP_RESERVED_SIZE 0 +#define RGX_VISIBILITY_TEST_HEAP_RESERVED_SIZE 0 + +/* + Identify heaps by their names +*/ +#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< SVM (shared virtual memory) Heap Identifier */ +#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */ +#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */ +#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ +#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ +#define RGX_TQ3DPARAMETERS_HEAP_IDENT "TQ3DParameters" /*!< RGX TQ 3D Parameters Heap Identifier */ +#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Compute Signals Heap Identifier */ +#define RGX_COMPONENT_CTRL_HEAP_IDENT "Component Control" /*!< RGX DCE Component Control Heap Identifier */ +#define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ +#define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ +#define RGX_PDS_INDIRECT_STATE_HEAP_IDENT "PDS Indirect State" /*!< PDS Indirect State Table Heap Identifier */ +#define RGX_TEXTURE_STATE_HEAP_IDENT "Texture State" /*!< Texture State Heap Identifier */ +#define RGX_TDM_TPU_YUV_COEFFS_HEAP_IDENT "TDM TPU YUV Coeffs" +#define RGX_VISIBILITY_TEST_HEAP_IDENT "Visibility Test" +#define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */ +#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "FwConfig" /*!< RGX Config firmware Heap identifier */ +#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */ +#define RGX_VK_CAPT_REPLAY_BUF_HEAP_IDENT "Vulkan capture replay buffer" /*!< RGX vulkan capture replay buffer Heap Identifier */ + +/* + * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID + */ +#define RGX_HEAP_4KB_PAGE_SHIFT (12U) +#define RGX_HEAP_16KB_PAGE_SHIFT (14U) +#define RGX_HEAP_64KB_PAGE_SHIFT (16U) +#define RGX_HEAP_256KB_PAGE_SHIFT (18U) +#define RGX_HEAP_1MB_PAGE_SHIFT (20U) +#define RGX_HEAP_2MB_PAGE_SHIFT (21U) + +/* Takes a log2 page size parameter and calculates a suitable page size + * for the RGX heaps. Returns 0 if parameter is wrong.*/ +static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) +{ + IMG_BOOL bFound = IMG_FALSE; + + /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT, + * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/ + if (uiLog2PageSize == 0U || + (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) || + (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Provided incompatible log2 page size %u", + __func__, + uiLog2PageSize)); + PVR_ASSERT(0); + return 0; + } + + do + { + switch (uiLog2PageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + case RGX_HEAP_16KB_PAGE_SHIFT: + case RGX_HEAP_64KB_PAGE_SHIFT: + case RGX_HEAP_256KB_PAGE_SHIFT: + case RGX_HEAP_1MB_PAGE_SHIFT: + case RGX_HEAP_2MB_PAGE_SHIFT: + /* All good, RGX page size equals given page size + * => use it as default for heaps */ + bFound = IMG_TRUE; + break; + default: + /* We have to fall back to a smaller device + * page size than given page size because there + * is no exact match for any supported size. */ + uiLog2PageSize -= 1U; + break; + } + } while (!bFound); + + return uiLog2PageSize; +} + + +#endif /* RGX_HEAPS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/volcanic/rgx_hwperf.h b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_hwperf.h new file mode 100644 index 000000000000..b5cb7d0b789a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_hwperf.h @@ -0,0 +1,1534 @@ +/*************************************************************************/ /*! +@File +@Title RGX HWPerf and Debug Types and Defines Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Common data types definitions for hardware performance API +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_HWPERF_H_ +#define RGX_HWPERF_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* These structures are used on both GPU and CPU and must be a size that is a + * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at + * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. + */ + +/****************************************************************************** + * Includes and Defines + *****************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" + +#include "rgx_common.h" +#include "pvrsrv_tlcommon.h" +#include "pvrsrv_sync_km.h" + + +#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) +/* HWPerf interface assumption checks */ +static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, + "Cluster count too large for HWPerf protocol definition"); +#endif + +/*! Perf counter control words */ +#define RGX_HWPERF_CTRL_NOP (0) /*!< only update HW counters */ +#define RGX_HWPERF_CTRL_STATE_UPDATE_EN (1U << 31) /*!< persistent state update; see other flags below */ +#define RGX_HWPERF_CTRL_GEOM_FULLRANGE (1U) /*!< selectable geom and 3D counters are full range */ +#define RGX_HWPERF_CTRL_COMP_FULLRANGE (2U) /*!< selectable compute counters are full range */ +#define RGX_HWPERF_CTRL_TDM_FULLRANGE (4U) /*!< selectable TDM counters are full range */ + +/****************************************************************************** + * Packet Event Type Enumerations + *****************************************************************************/ + +/*! Type used to encode the event that generated the packet. + * NOTE: When this type is updated the corresponding hwperfbin2json tool + * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will + * also need updating when adding new types. + */ +typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE; + +#define RGX_HWPERF_INVALID 0x00U + +/* FW types 0x01..0x06 */ +#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE 0x01U + +#define RGX_HWPERF_FW_BGSTART 0x01U +#define RGX_HWPERF_FW_BGEND 0x02U +#define RGX_HWPERF_FW_IRQSTART 0x03U + +#define RGX_HWPERF_FW_IRQEND 0x04U +#define RGX_HWPERF_FW_DBGSTART 0x05U +#define RGX_HWPERF_FW_DBGEND 0x06U + +#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE 0x06U + +/* HW types 0x07..0x19 */ +#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE 0x07U + +#define RGX_HWPERF_HW_PMOOM_TAPAUSE 0x07U + +#define RGX_HWPERF_HW_TAKICK 0x08U +#define RGX_HWPERF_HW_TAFINISHED 0x09U +#define RGX_HWPERF_HW_3DTQKICK 0x0AU +#define RGX_HWPERF_HW_3DKICK 0x0BU +#define RGX_HWPERF_HW_3DFINISHED 0x0CU +#define RGX_HWPERF_HW_CDMKICK 0x0DU +#define RGX_HWPERF_HW_CDMFINISHED 0x0EU +#define RGX_HWPERF_HW_TLAKICK 0x0FU +#define RGX_HWPERF_HW_TLAFINISHED 0x10U +#define RGX_HWPERF_HW_3DSPMKICK 0x11U +#define RGX_HWPERF_HW_PERIODIC 0x12U +#define RGX_HWPERF_HW_RTUKICK 0x13U +#define RGX_HWPERF_HW_RTUFINISHED 0x14U +#define RGX_HWPERF_HW_SHGKICK 0x15U +#define RGX_HWPERF_HW_SHGFINISHED 0x16U +#define RGX_HWPERF_HW_3DTQFINISHED 0x17U +#define RGX_HWPERF_HW_3DSPMFINISHED 0x18U + +#define RGX_HWPERF_HW_PMOOM_TARESUME 0x19U + +/* HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */ +#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE 0x19U + +/* other types 0x1A..0x1F */ +#define RGX_HWPERF_CLKS_CHG 0x1AU +#define RGX_HWPERF_GPU_STATE_CHG 0x1BU + +/* power types 0x20..0x27 */ +#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE 0x20U +#define RGX_HWPERF_PWR_EST_REQUEST 0x20U +#define RGX_HWPERF_PWR_EST_READY 0x21U +#define RGX_HWPERF_PWR_EST_RESULT 0x22U +#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE 0x22U + +#define RGX_HWPERF_PWR_CHG 0x23U + +/* HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */ +#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE 0x28U + +#define RGX_HWPERF_HW_TDMKICK 0x28U +#define RGX_HWPERF_HW_TDMFINISHED 0x29U +#define RGX_HWPERF_HW_NULLKICK 0x2AU + +#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU + +/* context switch types 0x30..0x31 */ +#define RGX_HWPERF_CSW_START 0x30U +#define RGX_HWPERF_CSW_FINISHED 0x31U + +/* DVFS events */ +#define RGX_HWPERF_DVFS 0x32U + +/* firmware misc 0x38..0x39 */ +#define RGX_HWPERF_UFO 0x38U +#define RGX_HWPERF_FWACT 0x39U + +/* last */ +#define RGX_HWPERF_LAST_TYPE 0x3BU + +/* This enumeration must have a value that is a power of two as it is + * used in masks and a filter bit field (currently 64 bits long). + */ +#define RGX_HWPERF_MAX_TYPE 0x40U + + +/* The event type values are incrementing integers for use as a shift ordinal + * in the event filtering process at the point events are generated. + * This scheme thus implies a limit of 63 event types. + */ +static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types"); + +/* Macro used to check if an event type ID is present in the known set of hardware type events */ +#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \ + ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE)) + +#define HWPERF_PACKET_IS_FW_TYPE(_etype) \ + ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \ + (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE) + + +typedef enum { + RGX_HWPERF_HOST_INVALID = 0x00, + RGX_HWPERF_HOST_ENQ = 0x01, + RGX_HWPERF_HOST_UFO = 0x02, + RGX_HWPERF_HOST_ALLOC = 0x03, + RGX_HWPERF_HOST_CLK_SYNC = 0x04, + RGX_HWPERF_HOST_FREE = 0x05, + RGX_HWPERF_HOST_MODIFY = 0x06, + RGX_HWPERF_HOST_DEV_INFO = 0x07, + RGX_HWPERF_HOST_INFO = 0x08, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09, + RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE = 0x0A, + + /* last */ + RGX_HWPERF_HOST_LAST_TYPE, + + /* This enumeration must have a value that is a power of two as it is + * used in masks and a filter bit field (currently 32 bits long). + */ + RGX_HWPERF_HOST_MAX_TYPE = 0x20 +} RGX_HWPERF_HOST_EVENT_TYPE; + +/* The event type values are incrementing integers for use as a shift ordinal + * in the event filtering process at the point events are generated. + * This scheme thus implies a limit of 31 event types. + */ +static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types"); + + +/****************************************************************************** + * Packet Header Format Version 2 Types + *****************************************************************************/ + +/*! Major version number of the protocol in operation + */ +#define RGX_HWPERF_V2_FORMAT 2 + +/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet + */ +#define HWPERF_PACKET_V2_SIG 0x48575032 + +/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet + */ +#define HWPERF_PACKET_V2A_SIG 0x48575041 + +/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet + */ +#define HWPERF_PACKET_V2B_SIG 0x48575042 + +/*! Signature ASCII pattern 'HWPC' found in the first word of a HWPerfV2c packet + */ +#define HWPERF_PACKET_V2C_SIG 0x48575043 + +#define HWPERF_PACKET_ISVALID(_val) (((_val) == HWPERF_PACKET_V2_SIG) || ((_val) == HWPERF_PACKET_V2A_SIG) || ((_val) == HWPERF_PACKET_V2B_SIG) || ((_val) == HWPERF_PACKET_V2C_SIG)) + +/*! Type defines the HWPerf packet header common to all events. */ +typedef struct +{ + IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */ + IMG_UINT32 ui32Size; /*!< Overall packet size in bytes */ + IMG_UINT32 eTypeId; /*!< Event type information field */ + IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */ + IMG_UINT64 ui64Timestamp; /*!< Event timestamp */ +} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR; + +RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp); + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); + + +/*! Mask for use with the IMG_UINT32 ui32Size header field */ +#define RGX_HWPERF_SIZE_MASK 0xFFFFU + +/*! This macro defines an upper limit to which the size of the largest variable + * length HWPerf packet must fall within, currently 3KB. This constant may be + * used to allocate a buffer to hold one packet. + * This upper limit is policed by packet producing code. + */ +#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U + +/*! Defines an upper limit to the size of a variable length packet payload. + */ +#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\ + sizeof(RGX_HWPERF_V2_PACKET_HDR))) + + +/*! Macro which takes a structure name and provides the packet size for + * a fixed size payload packet, rounded up to 8 bytes to align packets + * for 64 bit architectures. */ +#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT)))) + +/*! Macro which takes the number of bytes written in the data payload of a + * packet for a variable size payload packet, rounded up to 8 bytes to + * align packets for 64 bit architectures. */ +#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(_size, PVRSRVTL_PACKET_ALIGNMENT)))) + +/*! Macro to obtain the size of the packet */ +#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK)) + +/*! Macro to obtain the size of the packet data */ +#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR)) + + + +/*! Masks for use with the IMG_UINT32 eTypeId header field */ +#define RGX_HWPERF_TYPEID_MASK 0x7FFFFU +#define RGX_HWPERF_TYPEID_EVENT_MASK 0x07FFFU +#define RGX_HWPERF_TYPEID_THREAD_MASK 0x08000U +#define RGX_HWPERF_TYPEID_STREAM_MASK 0x70000U +#define RGX_HWPERF_TYPEID_META_DMA_MASK 0x80000U +#define RGX_HWPERF_TYPEID_OSID_MASK 0xFF000000U + +/*! Meta thread macros for encoding the ID into the type field of a packet */ +#define RGX_HWPERF_META_THREAD_SHIFT 15U +#define RGX_HWPERF_META_THREAD_ID0 0x0U +#define RGX_HWPERF_META_THREAD_ID1 0x1U +/*! Obsolete, kept for source compatibility */ +#define RGX_HWPERF_META_THREAD_MASK 0x1U +/*! Stream ID macros for encoding the ID into the type field of a packet */ +#define RGX_HWPERF_STREAM_SHIFT 16U +/*! Meta DMA macro for encoding how the packet was generated into the type field of a packet */ +#define RGX_HWPERF_META_DMA_SHIFT 19U +/*! OSID bit-shift macro used for encoding OSID into type field of a packet */ +#define RGX_HWPERF_OSID_SHIFT 24U +typedef enum { + RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */ + RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */ + RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */ + RGX_HWPERF_STREAM_ID_LAST, +} RGX_HWPERF_STREAM_ID; + +/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */ +static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT), + "Too many HWPerf stream IDs."); + +/*! Macros used to set the packet type and encode meta thread ID (0|1), HWPerf stream ID, and OSID within */ +#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _metadma, _osid)\ + ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \ + (RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \ + (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \ + (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \ + (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)))) + +/*! Obtains the event type that generated the packet */ +#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK) + +/*! Obtains the META Thread number that generated the packet */ +#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT)) + +/*! Obtains the guest OSID which resulted in packet generation */ +#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT) + +/*! Obtain stream id */ +#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT)) + +/*! Obtain information about how the packet was generated, which might affect payload total size */ +#define RGX_HWPERF_GET_META_DMA_INFO(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_META_DMA_MASK) >> RGX_HWPERF_META_DMA_SHIFT)) + +/*! Macros to obtain a typed pointer to a packet or data structure given a packet address */ +#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR *)(void *) (_buffer_addr)) +#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) (IMG_OFFSET_ADDR(_packet_addr, sizeof(RGX_HWPERF_V2_PACKET_HDR))) +#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR(_packet_addr, RGX_HWPERF_SIZE_MASK&(_packet_addr->ui32Size)))) + +/*! Obtains a typed pointer to a packet header given the packed data address */ +#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), -sizeof(RGX_HWPERF_V2_PACKET_HDR)))) + + +/****************************************************************************** + * Other Common Defines + *****************************************************************************/ + +/* This macro is not a real array size, but indicates the array has a variable + * length only known at run-time but always contains at least 1 element. The + * final size of the array is deduced from the size field of a packet header. + */ +#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U + +/* This macro is not a real array size, but indicates the array is optional + * and if present has a variable length only known at run-time. The final + * size of the array is deduced from the size field of a packet header. */ +#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U + + +/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */ +#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U +#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU + +/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */ +#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U +#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U + +/*! Macro used to set the block info word as a combination of two 16-bit integers */ +#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)))) + +/*! Macro used to obtain get the number of counter blocks present in the packet */ +#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT) + +/*! Obtains the offset of the counter block stream in the packet */ +#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) ((_blkinfo & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT) + +/* This macro gets the number of blocks depending on the packet version */ +#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \ + do { \ + if (HWPERF_PACKET_V2B_SIG == _sig || HWPERF_PACKET_V2C_SIG == _sig) \ + { \ + (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\ + } \ + else \ + { \ + IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3); \ + (_numblocks) = *(IMG_UINT16 *)(IMG_OFFSET_ADDR(&(_packet_data)->ui32WorkTarget, ui32VersionOffset)); \ + } \ + } while (0) + +/* This macro gets the counter stream pointer depending on the packet version */ +#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \ +{ \ + if (HWPERF_PACKET_V2B_SIG == _sig || HWPERF_PACKET_V2C_SIG == _sig) \ + { \ + (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo))); \ + } \ + else \ + { \ + IMG_UINT32 ui32BlkStreamOffsetInWords = ((_sig == HWPERF_PACKET_V2_SIG) ? 6 : 8); \ + (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), ui32BlkStreamOffsetInWords)); \ + } \ +} + +/* This is the maximum frame contexts that are supported in the driver at the moment */ +#define RGX_HWPERF_HW_MAX_WORK_CONTEXT 2 + +/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */ +#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U +#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU + +/*! Shift for the UFO count and data stream fields */ +#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U +#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U + +/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */ +#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff) \ + ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) | \ + (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)))) + +/*! Macro used to obtain UFO count*/ +#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo) \ + ((_streaminfo & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT) + +/*! Obtains the offset of the UFO stream in the packet */ +#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo) \ + ((_streaminfo & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT) + + + +/****************************************************************************** + * Data Stream Common Types + *****************************************************************************/ + +/* All the Data Masters HWPerf is aware of. When a new DM is added to this + * list, it should be appended at the end to maintain backward compatibility + * of HWPerf data. + * These are valid for Furian Series 8XT only. + */ +typedef enum { + + RGX_HWPERF_DM_GP, + RGX_HWPERF_DM_TDM, + RGX_HWPERF_DM_GEOM, + RGX_HWPERF_DM_3D, + RGX_HWPERF_DM_CDM, + RGX_HWPERF_DM_RTU, + + RGX_HWPERF_DM_LAST, + + RGX_HWPERF_DM_INVALID = 0x1FFFFFFF +} RGX_HWPERF_DM; + +/* Enum containing bit pos for 32bit feature flags used in hwperf and api */ +typedef enum { + RGX_HWPERF_FEATURE_PERFBUS_FLAG = 0x001, + RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG = 0x002, + RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG = 0x004, + RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG = 0x008, + RGX_HWPERF_FEATURE_ROGUEXE_FLAG = 0x010, + RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG = 0x020, + RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG = 0x040, + RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION = 0x080 +} RGX_HWPERF_FEATURE_FLAGS; + +/*! This structure holds the data of a firmware packet. */ +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ + IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ + IMG_UINT32 ui32TimeCorrIndex; + IMG_UINT32 ui32Padding; +} RGX_HWPERF_FW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); + +/*! This structure holds the data of a hardware packet, including counters. */ +typedef struct +{ + IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ + IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ + IMG_UINT32 ui32PID; /*!< Process identifier */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ + IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ + IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ + IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ + IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ + IMG_UINT32 ui32CtxPriority; /*!< Context priority */ + IMG_UINT32 ui32Padding1; /* To ensure correct alignment */ + IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Counter data */ + IMG_UINT32 ui32Padding2; /* To ensure correct alignment */ +} RGX_HWPERF_HW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); + +/*! Mask for use with the aui32CountBlksStream field when decoding the + * counter block ID and mask word. */ +#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U +#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U + +/*! MAX value used in server handling of counter config arrays */ +#if defined(SUPPORT_VALIDATION) +#define RGX_CNTBLK_COUNTERS_MAX 64 +#else +#define RGX_CNTBLK_COUNTERS_MAX 12 +#endif + + +/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words of + * a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) +#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) + +/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words + * of a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) +#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) + + +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32FrameNum; /*!< Frame number */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ + IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ + IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ +} RGX_HWPERF_CSW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); + +/*! Enumeration of clocks supporting this event */ +typedef enum +{ + RGX_HWPERF_CLKS_CHG_INVALID = 0, + + RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, + + RGX_HWPERF_CLKS_CHG_LAST, +} RGX_HWPERF_CLKS_CHG_NAME; + +/*! This structure holds the data of a clocks change packet. */ +typedef struct +{ + IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ + RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ + IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ + IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ + IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and + correlated to OSTimeStamp */ +} RGX_HWPERF_CLKS_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); + +/*! Enumeration of GPU utilisation states supported by this event */ +typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; + +/*! This structure holds the data of a GPU utilisation state change packet. */ +typedef struct +{ + RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ + IMG_UINT32 uiUnused1; /*!< Padding */ + IMG_UINT32 uiUnused2; /*!< Padding */ + IMG_UINT32 uiUnused3; /*!< Padding */ +} RGX_HWPERF_GPU_STATE_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); + + +/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ +#define HWPERF_PWR_EST_V1_SIG 0x48504531 + +/*! Macros to obtain a component field from a counter ID word */ +#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) +#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) +#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) + +/*! This macro constructs a counter ID for a power estimate data stream from + * the component parts of: high word flag, unit id, counter number */ +#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _number) \ + ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<<31) | ((IMG_UINT32)((IMG_UINT32)(_unit)&0xFU)<<24) | \ + ((_number)&0x0000FFFFU))) + +/*! This structure holds the data for a power estimate packet. */ +typedef struct +{ + IMG_UINT32 ui32StreamVersion; /*!< HWPERF_PWR_EST_V1_SIG */ + IMG_UINT32 ui32StreamSize; /*!< Size of array in bytes of stream data + held in the aui32StreamData member */ + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Counter data */ + IMG_UINT32 ui32Padding; /* To ensure correct alignment */ +} RGX_HWPERF_PWR_EST_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_EST_DATA); + +/*! Enumeration of the kinds of power change events that can occur */ +typedef enum +{ + RGX_HWPERF_PWR_UNDEFINED = 0, + RGX_HWPERF_PWR_ON = 1, /*!< Whole device powered on */ + RGX_HWPERF_PWR_OFF = 2, /*!< Whole device powered off */ + RGX_HWPERF_PWR_UP = 3, /*!< Power turned on to a HW domain */ + RGX_HWPERF_PWR_DOWN = 4, /*!< Power turned off to a HW domain */ + RGX_HWPERF_PWR_PHR_PARTIAL = 5, /*!< Periodic HW partial Rascal/Dust(S6) Reset */ + RGX_HWPERF_PWR_PHR_FULL = 6, /*!< Periodic HW full GPU Reset */ + + RGX_HWPERF_PWR_LAST, +} RGX_HWPERF_PWR; + +/*! This structure holds the data of a power packet. */ +typedef struct +{ + RGX_HWPERF_PWR eChange; /*!< Defines the type of power change */ + IMG_UINT32 ui32Domains; /*!< HW Domains affected */ + IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ + IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and + correlated to OSTimeStamp */ + IMG_UINT32 ui32CalibratedClockSpeed; /*!< GPU clock speed (in Hz) at the time + the two timers were correlated */ + IMG_UINT32 ui32Unused1; /*!< Padding */ +} RGX_HWPERF_PWR_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_CHG_DATA); + + +/* + * PDVFS, GPU clock frequency changes and workload estimation profiling + * data. + */ +/*! DVFS and work estimation events. */ +typedef enum +{ + RGX_HWPERF_DVFS_EV_INVALID, /*! Invalid value. */ + RGX_HWPERF_DVFS_EV_PROACTIVE_EST_START, /*! Proactive DVFS estimate start */ + RGX_HWPERF_DVFS_EV_PROACTIVE_EST_FINISHED, /*! Proactive DVFS estimate finished */ + RGX_HWPERF_DVFS_EV_REACTIVE_EST_START, /*! Reactive DVFS estimate start */ + RGX_HWPERF_DVFS_EV_REACTIVE_EST_FINISHED, /*! Reactive DVFS estimate finished */ + /* workload estimation */ + RGX_HWPERF_DVFS_EV_WORK_EST_START, /*! Workload estimation start */ + RGX_HWPERF_DVFS_EV_WORK_EST_FINISHED, /*! Workload estimation finished */ + RGX_HWPERF_DVFS_EV_FREQ_CHG, /*! DVFS OPP/clock frequency change */ + + RGX_HWPERF_DVFS_EV_LAST /*! Number of element. */ +} RGX_HWPERF_DVFS_EV; + +/*! Enumeration of DVFS transitions that can occur */ +typedef enum +{ + RGX_HWPERF_DVFS_OPP_NONE = 0x0, /*!< No OPP change, already operating at required freq */ +#if defined(SUPPORT_PDVFS_IDLE) + RGX_HWPERF_DVFS_OPP_IDLE = 0x1, /*!< GPU is idle, defer the OPP change */ +#endif + /* 0x2 to 0xF reserved */ + RGX_HWPERF_DVFS_OPP_UPDATE = 0x10, /*!< OPP change, new point is encoded in bits [3:0] */ + RGX_HWPERF_DVFS_OPP_LAST = 0x20, +} RGX_HWPERF_DVFS_OPP; + +typedef union +{ + /*! This structure holds the data of a proactive DVFS calculation packet. */ + struct + { + IMG_UINT64 ui64DeadlineInus; /*!< Next deadline in microseconds */ + IMG_UINT32 ui32Frequency; /*!< Required freq to meet deadline at 90% utilisation */ + IMG_UINT32 ui32WorkloadCycles; /*!< Current workload estimate in cycles */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + } sProDVFSCalc; + + /*! This structure holds the data of a reactive DVFS calculation packet. */ + struct + { + IMG_UINT32 ui32Frequency; /*!< Required freq to achieve average 90% utilisation */ + IMG_UINT32 ui32Utilisation; /*!< GPU utilisation since last update */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + } sDVFSCalc; + + /*! This structure holds the data of a work estimation packet. */ + struct + { + IMG_UINT64 ui64CyclesPrediction; /*!< Predicted cycle count for this workload */ + IMG_UINT64 ui64CyclesTaken; /*!< Actual cycle count for this workload */ + RGXFWIF_DM eDM; /*!< Target DM */ + IMG_UINT32 ui32ReturnDataIndex; /*!< Index into workload estimation table */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + } sWorkEst; + + /*! This structure holds the data of an OPP clock frequency transition packet. */ + struct + { + IMG_UINT32 ui32OPPData; /*!< OPP transition */ + } sOPP; + +} RGX_HWPERF_DVFS_DETAIL; + +typedef struct { + RGX_HWPERF_DVFS_EV eEventType; /*!< DVFS sub-event type */ + RGX_HWPERF_DVFS_DETAIL uData; /*!< DVFS sub-event data */ +} RGX_HWPERF_DVFS_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_DVFS_DATA); + + +/*! Firmware Activity event. */ +typedef enum +{ + RGX_HWPERF_FWACT_EV_INVALID, /*! Invalid value. */ + RGX_HWPERF_FWACT_EV_REGS_SET, /*! Registers set. */ + RGX_HWPERF_FWACT_EV_HWR_DETECTED, /*! HWR detected. */ + RGX_HWPERF_FWACT_EV_HWR_RESET_REQUIRED, /*! Reset required. */ + RGX_HWPERF_FWACT_EV_HWR_RECOVERED, /*! HWR recovered. */ + RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*! Freelist ready. */ + RGX_HWPERF_FWACT_EV_FEATURES, /*! Features present */ + + RGX_HWPERF_FWACT_EV_LAST /*! Number of element. */ +} RGX_HWPERF_FWACT_EV; + +/*! Cause of the HWR event. */ +typedef enum +{ + RGX_HWPERF_HWR_REASON_INVALID, /*! Invalid value. */ + RGX_HWPERF_HWR_REASON_LOCKUP, /*! Lockup. */ + RGX_HWPERF_HWR_REASON_PAGEFAULT, /*! Page fault. */ + RGX_HWPERF_HWR_REASON_POLLFAIL, /*! Poll fail. */ + RGX_HWPERF_HWR_REASON_DEADLINE_OVERRUN, /*! Deadline overrun. */ + RGX_HWPERF_HWR_REASON_CSW_DEADLINE_OVERRUN, /*! Hard Context Switch deadline overrun. */ + + RGX_HWPERF_HWR_REASON_LAST /*! Number of elements. */ +} RGX_HWPERF_HWR_REASON; + + +/* Fixed size for BVNC string so it does not alter packet data format + * Check it is large enough against official BVNC string length maximum + */ +#define RGX_HWPERF_MAX_BVNC_LEN (24) +static_assert((RGX_HWPERF_MAX_BVNC_LEN >= RGX_BVNC_STR_SIZE_MAX), + "Space inside HWPerf packet data for BVNC string insufficient"); + +#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (20U) + +/*! BVNC Features */ +typedef struct +{ + /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ + IMG_UINT16 ui16BlockID; + + /*! Number of counters in this block type */ + IMG_UINT16 ui16NumCounters; + + /*! Number of blocks of this type */ + IMG_UINT16 ui16NumBlocks; + + IMG_UINT16 ui16Reserved; +} RGX_HWPERF_BVNC_BLOCK; + +/*! BVNC Features */ +typedef struct +{ + IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*! BVNC string */ + IMG_UINT32 ui32BvncKmFeatureFlags; /*! See RGX_HWPERF_FEATURE_FLAGS */ + IMG_UINT16 ui16BvncBlocks; /*! Number of blocks described in aBvncBlocks */ + IMG_UINT16 ui16Reserved1; /*! Align to 32bit */ + RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*! Supported Performance Blocks for BVNC */ +} RGX_HWPERF_BVNC; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); + +/*! Performance Counter Config */ +typedef struct +{ + IMG_UINT32 ui32BlockID; + IMG_UINT32 ui32NumCounters; + IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; +} RGX_HWPERF_COUNTER_CFG_DATA_EL; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); + +typedef struct +{ + IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ + RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data */ + IMG_UINT32 ui32Padding; +} RGX_HWPERF_COUNTER_CFG; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); + +/*! Sub-event's data. */ +typedef union +{ + struct + { + RGX_HWPERF_DM eDM; /*!< Data Master ID. */ + RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ + IMG_UINT32 ui32DMContext; /*!< FW render context */ + } sHWR; /*!< HWR sub-event data. */ + + RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features */ + + RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config */ +} RGX_HWPERF_FWACT_DETAIL; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); + +/*! This structure holds the data of a FW activity event packet */ +typedef struct +{ + RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ + RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ + IMG_UINT32 ui32Padding; +} RGX_HWPERF_FWACT_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); + + + +typedef enum { + RGX_HWPERF_UFO_EV_UPDATE, + RGX_HWPERF_UFO_EV_CHECK_SUCCESS, + RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, + RGX_HWPERF_UFO_EV_CHECK_FAIL, + RGX_HWPERF_UFO_EV_PRCHECK_FAIL, + RGX_HWPERF_UFO_EV_FORCE_UPDATE, + + RGX_HWPERF_UFO_EV_LAST +} RGX_HWPERF_UFO_EV; + +/*! Data stream tuple. */ +typedef union +{ + struct + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32Value; + } sCheckSuccess; + struct + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Required; + } sCheckFail; + struct + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32OldValue; + IMG_UINT32 ui32NewValue; + } sUpdate; +} RGX_HWPERF_UFO_DATA_ELEMENT; + +/*! This structure holds the packet payload data for UFO event. */ +typedef struct +{ + RGX_HWPERF_UFO_EV eEvType; + IMG_UINT32 ui32TimeCorrIndex; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32IntJobRef; + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32StreamInfo; + RGX_HWPERF_DM eDM; + IMG_UINT32 ui32Padding; + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; +} RGX_HWPERF_UFO_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); + + + +typedef enum +{ + RGX_HWPERF_KICK_TYPE_TA3D, + RGX_HWPERF_KICK_TYPE_CDM, + RGX_HWPERF_KICK_TYPE_RS, + RGX_HWPERF_KICK_TYPE_SHG, + RGX_HWPERF_KICK_TYPE_TQTDM, + RGX_HWPERF_KICK_TYPE_SYNC, + RGX_HWPERF_KICK_TYPE_LAST, + + RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff +} RGX_HWPERF_KICK_TYPE; + +typedef struct +{ + RGX_HWPERF_KICK_TYPE ui32EnqType; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32IntJobRef; + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32Padding; + IMG_UINT64 ui64CheckFence_UID; + IMG_UINT64 ui64UpdateFence_UID; + IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ + IMG_UINT64 ui64CycleEstimate; /*!< Estimated cycle time for the workload */ + PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ + PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ + PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ + + IMG_UINT32 ui32Pad; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_ENQ_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + RGX_HWPERF_UFO_EV eEvType; + IMG_UINT32 ui32StreamInfo; + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_UFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /* PRIM */ + RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, /* Timeline resource packets are now + emitted in client hwperf buffer */ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /* Fence for use on GPU (SYNC_CP backed) */ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /* Fence created on SW timeline */ + + RGX_HWPERF_HOST_RESOURCE_TYPE_LAST +} RGX_HWPERF_HOST_RESOURCE_TYPE; + +typedef union +{ + struct + { + IMG_UINT32 uiPid; + IMG_UINT64 ui64Timeline_UID1; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sTimelineAlloc; + + struct + { + IMG_PID uiPID; + PVRSRV_FENCE hFence; + IMG_UINT32 ui32CheckPt_FWAddr; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sFenceAlloc; + + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; + PVRSRV_TIMELINE hTimeline; + IMG_PID uiPID; + PVRSRV_FENCE hFence; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sSyncCheckPointAlloc; + + struct + { + IMG_PID uiPID; + PVRSRV_FENCE hSWFence; + PVRSRV_TIMELINE hSWTimeline; + IMG_UINT64 ui64SyncPtIndex; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sSWFenceAlloc; + + struct + { + IMG_UINT32 ui32FWAddr; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + } sSyncAlloc; +} RGX_HWPERF_HOST_ALLOC_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; + RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; +} RGX_HWPERF_HOST_ALLOC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + struct + { + IMG_UINT32 uiPid; + IMG_UINT64 ui64Timeline_UID1; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sTimelineDestroy; + + struct + { + IMG_UINT64 ui64Fence_UID; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sFenceDestroy; + + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; + } sSyncCheckPointFree; + + struct + { + IMG_UINT32 ui32FWAddr; + } sSyncFree; +} RGX_HWPERF_HOST_FREE_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; + RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_FREE_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + IMG_UINT64 ui64CRTimestamp; + IMG_UINT64 ui64OSTimestamp; + IMG_UINT32 ui32ClockSpeed; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_CLK_SYNC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + struct + { + IMG_UINT64 ui64NewFence_UID; + IMG_UINT64 ui64InFence1_UID; + IMG_UINT64 ui64InFence2_UID; + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + } sFenceMerge; +} RGX_HWPERF_HOST_MODIFY_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; + RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; +} RGX_HWPERF_HOST_MODIFY_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, + + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS, + + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; + +typedef enum +{ + RGX_HWPERF_DEV_INFO_EV_HEALTH, + + RGX_HWPERF_DEV_INFO_EV_LAST +} RGX_HWPERF_DEV_INFO_EV; + +typedef union +{ + struct + { + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; + } sDeviceStatus; +} RGX_HWPERF_HOST_DEV_INFO_DETAIL; + +typedef struct +{ + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + RGX_HWPERF_DEV_INFO_EV eEvType; + RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; +} RGX_HWPERF_HOST_DEV_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_INFO_EV_MEM_USAGE, + + RGX_HWPERF_INFO_EV_LAST +} RGX_HWPERF_INFO_EV; + +typedef union +{ + struct + { + IMG_UINT32 ui32TotalMemoryUsage; + struct + { + IMG_UINT32 ui32Pid; + IMG_UINT32 ui32KernelMemUsage; + IMG_UINT32 ui32GraphicsMemUsage; + } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; + } sMemUsageStats; +} RGX_HWPERF_HOST_INFO_DETAIL; + +typedef struct +{ + IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */ + RGX_HWPERF_INFO_EV eEvType; + RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; +} RGX_HWPERF_HOST_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; + +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; + +typedef union +{ + struct + { + IMG_UINT32 ui32TimeoutInMs; + } sBegin; + + struct + { + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; + } sEnd; +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; + +typedef struct +{ + IMG_PID uiPID; + PVRSRV_FENCE hFence; + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; + +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + IMG_PID uiPID; + PVRSRV_TIMELINE hTimeline; + IMG_UINT64 ui64SyncPtIndex; + +} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, + + RGX_HWPERF_RESOURCE_TYPE_COUNT +} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; + +typedef struct +{ + IMG_UINT32 ui32Height; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32BPP; + IMG_UINT32 ui32PixFormat; +} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; + +typedef struct +{ + IMG_INT32 i32XOffset; /*!< render surface X shift */ + IMG_INT32 i32YOffset; /*!< render surface Y shift */ + IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ + IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ +} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; + +typedef union +{ + struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES + { + IMG_UINT32 ui32RenderSurfaceCount; + RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + } sRenderSurfaces; + + struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS + { + RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + } sTLTBuffers; +} RGX_RESOURCE_CAPTURE_DETAIL; + +typedef struct +{ + RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; + IMG_PID uPID; + IMG_UINT32 ui32ContextID; + IMG_UINT32 ui32FrameNum; + IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ + IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ + RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ +} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; + +#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) + +#define RGX_TLT_HARDWARE_HDR_SIZE (16U) + +/* PVRSRVGetHWPerfResourceCaptureResult */ +typedef enum +{ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ +} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; + +typedef struct +{ + IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ + IMG_UINT32 ui32CtxID; + RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, + unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ + IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ +} RGX_RESOURCE_CAPTURE_RESULT; + +/*! This type is a union of packet payload data structures associated with + * various FW and Host events */ +typedef union +{ + RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data */ + RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data */ + RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet data */ + RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state change event packet data */ + RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event packet data */ + RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data */ + RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data */ + RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data */ + RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event packet data */ + RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data */ + /* */ + RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data */ + RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data */ + RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data */ + RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data */ + RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data */ + RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data */ + RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data */ + RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data */ + RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance data */ +} RGX_HWPERF_V2_PACKET_DATA_, *RGX_PHWPERF_V2_PACKET_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA_); + +#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) + +#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ + ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) + +/****************************************************************************** + * API Types + *****************************************************************************/ + +/*! Counter block IDs for all the hardware blocks with counters. + * Directly addressable blocks must have a value between 0..15 [0..0xF]. + * Indirect groups have following encoding: + * First hex digit (LSB) represents a unit number within the group + * and the second hex digit represents the group number. + * Group 0 is the direct group, all others are indirect groups. + */ +typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; + +/* Directly addressable non bank-switched counter blocks */ +#define RGX_CNTBLK_ID_JONES 0x0000U +#define RGX_CNTBLK_ID_SLC 0x0001U // SLC-specific counter control +#define RGX_CNTBLK_ID_FBCDC 0x0002U +#define RGX_CNTBLK_ID_FW_CUSTOM 0x0003U // Custom FW provided counters + +/* Directly addressable SLC counter blocks - presence depends on GPU */ +#define RGX_CNTBLK_ID_SLC0 0x0004U // SLC0 specific counter control +#define RGX_CNTBLK_ID_SLC1 0x0005U // SLC1 specific counter control +#define RGX_CNTBLK_ID_SLC2 0x0006U // SLC2 specific counter control +#define RGX_CNTBLK_ID_SLC3 0x0007U // SLC3 specific counter control + +#define RGX_CNTBLK_ID_PIPELINE_STATS 0x0008U // PIPELINE_STATS counters +#define RGX_CNTBLK_ID_DIRECT_LAST 0x0009U // Last Direct block ID + +/* Indirectly addressable counter blocks */ +#define RGX_CNTBLK_ID_ISP0 0x0010U // ISP 1..#ISP +#define RGX_CNTBLK_ID_ISP1 0x0011U +#define RGX_CNTBLK_ID_ISP2 0x0012U +#define RGX_CNTBLK_ID_ISP3 0x0013U +#define RGX_CNTBLK_ID_ISP4 0x0014U +#define RGX_CNTBLK_ID_ISP5 0x0015U +#define RGX_CNTBLK_ID_ISP6 0x0016U +#define RGX_CNTBLK_ID_ISP7 0x0017U +#define RGX_CNTBLK_ID_ISP_ALL 0x4010U + +#define RGX_CNTBLK_ID_MERCER0 0x0020U // MERCER 1..#MERCER +#define RGX_CNTBLK_ID_MERCER1 0x0021U +#define RGX_CNTBLK_ID_MERCER2 0x0022U +#define RGX_CNTBLK_ID_MERCER3 0x0023U +#define RGX_CNTBLK_ID_MERCER4 0x0024U +#define RGX_CNTBLK_ID_MERCER5 0x0025U +#define RGX_CNTBLK_ID_MERCER6 0x0026U +#define RGX_CNTBLK_ID_MERCER7 0x0027U +#define RGX_CNTBLK_ID_MERCER_ALL 0x4020U + +#define RGX_CNTBLK_ID_PBE0 0x0030U // PBE 1..#PBE_PER_SPU x #SPU +#define RGX_CNTBLK_ID_PBE1 0x0031U +#define RGX_CNTBLK_ID_PBE2 0x0032U +#define RGX_CNTBLK_ID_PBE3 0x0033U +#define RGX_CNTBLK_ID_PBE4 0x0034U +#define RGX_CNTBLK_ID_PBE5 0x0035U +#define RGX_CNTBLK_ID_PBE6 0x0036U +#define RGX_CNTBLK_ID_PBE7 0x0037U +#define RGX_CNTBLK_ID_PBE_ALL 0x4030U + +#define RGX_CNTBLK_ID_PBE_SHARED0 0x0040U // PBE_SHARED 1..#SPU +#define RGX_CNTBLK_ID_PBE_SHARED1 0x0041U +#define RGX_CNTBLK_ID_PBE_SHARED2 0x0042U +#define RGX_CNTBLK_ID_PBE_SHARED3 0x0043U +#define RGX_CNTBLK_ID_PBE_SHARED_ALL 0x4040U + +#define RGX_CNTBLK_ID_USC0 0x0050U // USC 1..#USC +#define RGX_CNTBLK_ID_USC1 0x0051U +#define RGX_CNTBLK_ID_USC2 0x0052U +#define RGX_CNTBLK_ID_USC3 0x0053U +#define RGX_CNTBLK_ID_USC4 0x0054U +#define RGX_CNTBLK_ID_USC5 0x0055U +#define RGX_CNTBLK_ID_USC6 0x0056U +#define RGX_CNTBLK_ID_USC7 0x0057U +#define RGX_CNTBLK_ID_USC_ALL 0x4050U + +#define RGX_CNTBLK_ID_TPU0 0x0060U // TPU 1..#TPU +#define RGX_CNTBLK_ID_TPU1 0x0061U +#define RGX_CNTBLK_ID_TPU2 0x0062U +#define RGX_CNTBLK_ID_TPU3 0x0063U +#define RGX_CNTBLK_ID_TPU_ALL 0x4060U + +#define RGX_CNTBLK_ID_SWIFT0 0x0070U // SWIFT 1..#SWIFT +#define RGX_CNTBLK_ID_SWIFT1 0x0071U +#define RGX_CNTBLK_ID_SWIFT2 0x0072U +#define RGX_CNTBLK_ID_SWIFT3 0x0073U +#define RGX_CNTBLK_ID_SWIFT_ALL 0x4070U + +#define RGX_CNTBLK_ID_TEXAS0 0x0080U // TEXAS 1..#TEXAS +#define RGX_CNTBLK_ID_TEXAS1 0x0081U +#define RGX_CNTBLK_ID_TEXAS2 0x0082U +#define RGX_CNTBLK_ID_TEXAS3 0x0083U +#define RGX_CNTBLK_ID_TEXAS_ALL 0x4080U + +#define RGX_CNTBLK_ID_LAST 0x0084U + +/* Masks for the counter block ID*/ +#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) +#define RGX_CNTBLK_ID_GROUP_SHIFT (4U) +#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) +#define RGX_CNTBLK_ID_UNIT_MASK (0xfU) + +static_assert( + ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), + "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient"); + +#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (e)) + +/* When adding new counters here, make sure changes are made to rgxfw_hwperf_fwblk_valid() as well */ +#define RGX_CUSTOM_FW_CNTRS \ + X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ + \ + X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ + \ + X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ + \ + X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ + \ + X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK)) + +/*! Counter IDs for the firmware held statistics */ +typedef enum +{ +#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id, + RGX_CUSTOM_FW_CNTRS +#undef X + + /* always the last entry in the list */ + RGX_CUSTOM_FW_CNTR_LAST +} RGX_HWPERF_CUSTOM_FW_CNTR_ID; + +/*! Identifier for each counter in a performance counting module */ +typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID; + +/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */ +#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)b1) +#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE) +#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e)) + +/*! Mask macros for use with RGXCtrlHWPerf() API. + */ +#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000)) +#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) + +/*! HWPerf Firmware event masks + * Next macro covers all FW Start/End/Debug (SED) events. + */ +#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE)) + +#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) +#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED)) +#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\ + RGX_HWPERF_EVENT_MASK_FW_UFO |\ + RGX_HWPERF_EVENT_MASK_FW_CSW) + +#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC)) +#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\ + MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\ + ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC)) + +#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\ + RGX_HWPERF_EVENT_MASK_HW_PERIODIC) + +#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE)) + +#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG)) + +/*! HWPerf Host event masks + */ +#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ)) +#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO)) +#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC)) + + +/*! Type used in the RGX API RGXConfigHWPerfCounters() */ +typedef struct +{ + /*! Reserved for future use */ + IMG_UINT32 ui32Reserved; + + /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ + IMG_UINT16 ui16BlockID; + + /*! Number of configured counters within this block */ + IMG_UINT16 ui16NumCounters; + + /*! Counter register values */ + IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX]; + +} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK); + + +#if defined(__cplusplus) +} +#endif + +#endif /* RGX_HWPERF_H_ */ + +/****************************************************************************** + End of file +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/include/volcanic/rgx_options.h b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_options.h new file mode 100644 index 000000000000..c132d197c70e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/volcanic/rgx_options.h @@ -0,0 +1,227 @@ +/*************************************************************************/ /*! +@File +@Title RGX build options +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Each build option listed here is packed into a dword which provides up to + * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and + * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM. + * The corresponding bit is set if the build option was enabled at compile + * time. + * + * In order to extract the enabled build flags the INTERNAL_TEST switch should + * be enabled in a client program which includes this header. Then the client + * can test specific build flags by reading the bit value at + * ##OPTIONNAME##_SET_OFFSET + * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. + * + * IMPORTANT: add new options to unused bits or define a new dword + * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield + * remains backwards compatible. + */ + +#ifndef RGX_OPTIONS_H +#define RGX_OPTIONS_H + +#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL + +#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) + #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0 + #define OPTIONS_BIT0 (0x1ul << 0) + #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT0 0x0UL +#endif /* NO_HARDWARE */ + + +#if defined(PDUMP) || defined(INTERNAL_TEST) + #define PDUMP_SET_OFFSET OPTIONS_BIT1 + #define OPTIONS_BIT1 (0x1ul << 1) + #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT1 0x0UL +#endif /* PDUMP */ + + +#if defined(INTERNAL_TEST) + #define UNUSED_SET_OFFSET OPTIONS_BIT2 + #define OPTIONS_BIT2 (0x1ul << 2) + #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT2 0x0UL +#endif + +/* No longer used */ +#if defined(INTERNAL_TEST) + #define OPTIONS_BIT3 (0x1ul << 3) + #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT3 0x0UL +#endif + + +#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) + #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4 + #define OPTIONS_BIT4 (0x1ul << 4) + #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT4 0x0UL +#endif /* SUPPORT_RGX */ + + +#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) + #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5 + #define OPTIONS_BIT5 (0x1ul << 5) + #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT5 0x0UL +#endif /* SUPPORT_SECURE_EXPORT */ + + +#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) + #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6 + #define OPTIONS_BIT6 (0x1ul << 6) + #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT6 0x0UL +#endif /* SUPPORT_INSECURE_EXPORT */ + + +#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) + #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7 + #define OPTIONS_BIT7 (0x1ul << 7) + #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT7 0x0UL +#endif /* SUPPORT_VFP */ + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) + #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8 + #define OPTIONS_BIT8 (0x1ul << 8) + #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT8 0x0UL +#endif /* SUPPORT_WORKLOAD_ESTIMATION */ +#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1ul << 8) + +#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) + #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9 + #define OPTIONS_BIT9 (0x1ul << 9) + #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT9 0x0UL +#endif /* SUPPORT_PDVFS */ +#define OPTIONS_PDVFS_MASK (0x1ul << 9) + +#if defined(DEBUG) || defined(INTERNAL_TEST) + #define DEBUG_SET_OFFSET OPTIONS_BIT10 + #define OPTIONS_BIT10 (0x1ul << 10) + #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT10 0x0UL +#endif /* DEBUG */ +/* The bit position of this should be the same as DEBUG_SET_OFFSET option + * when defined. + */ +#define OPTIONS_DEBUG_MASK (0x1ul << 10) + +#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) + #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11 + #define OPTIONS_BIT11 (0x1ul << 11) + #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT11 0x0UL +#endif /* SUPPORT_BUFFER_SYNC */ + +#define RGX_BUILD_OPTIONS_KM \ + (OPTIONS_BIT0 |\ + OPTIONS_BIT1 |\ + OPTIONS_BIT2 |\ + OPTIONS_BIT3 |\ + OPTIONS_BIT4 |\ + OPTIONS_BIT6 |\ + OPTIONS_BIT7 |\ + OPTIONS_BIT8 |\ + OPTIONS_BIT9 |\ + OPTIONS_BIT10 |\ + OPTIONS_BIT11 ) + +#define RGX_BUILD_OPTIONS_MASK_FW \ + (RGX_BUILD_OPTIONS_MASK_KM & \ + ~OPTIONS_BIT11) + +#define OPTIONS_BIT31 (0x1ul << 31) +#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM +#error "Bit exceeds reserved range" +#endif +#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31 + +#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) + +#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \ + ~(OPTIONS_DEBUG_MASK | \ + OPTIONS_WORKLOAD_ESTIMATION_MASK | \ + OPTIONS_PDVFS_MASK)) + +#endif /* RGX_OPTIONS_H */ diff --git a/drivers/mcst/gpu-imgtec/include/volcanic/system/rgx_tc/apollo_clocks.h b/drivers/mcst/gpu-imgtec/include/volcanic/system/rgx_tc/apollo_clocks.h new file mode 100644 index 000000000000..d3dd84df0398 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/include/volcanic/system/rgx_tc/apollo_clocks.h @@ -0,0 +1,96 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(APOLLO_CLOCKS_H) +#define APOLLO_CLOCKS_H + +/* + * The core clock speed is passed through a multiplier depending on the TC + * version. + * + * On TC_ES1: Multiplier = x3, final speed = 270MHz + * On TC_ES2: Multiplier = x6, final speed = 540MHz + * On TCF5: Multiplier = 1x final speed = 45MHz + * + * + * The base (unmultiplied speed) can be adjusted using a module parameter + * called "sys_core_clk_speed", a number in Hz. + * As an example: + * + * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start + * + * would result in a core speed of 60MHz xMultiplier. + * + * + * The memory clock is unmultiplied and can be adjusted using a module + * parameter called "sys_mem_clk_speed", this should be the number in Hz for + * the memory clock speed. + * As an example: + * + * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start + * + * would attempt to start the driver with the memory clock speed set to 100MHz. + * + * + * Same applies to the system interface clock speed, "sys_sysif_clk_speed". + * Needed for TCF5 but not for TC_ES2/ES1. + * As an example: + * + * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start + * + * would attempt to start the driver with the system clock speed set to 45MHz. + * + * + * All parameters can be specified at once, e.g., + * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start + */ + +#define RGX_TC_SYS_CLOCK_SPEED (45000000) /*< Unused */ + +#if 1 + /* FPGA tcfvuquad with Odin */ + #define RGX_TC_CORE_CLOCK_SPEED (50000000) /* 3.125MHz */ + #define RGX_TC_MEM_CLOCK_SPEED (40000000) /* 3.75MHz */ +#endif + +#endif /* if !defined(APOLLO_CLOCKS_H) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Kbuild.mk b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Kbuild.mk new file mode 100644 index 000000000000..1df471bec8a1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Kbuild.mk @@ -0,0 +1,123 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ccflags-y := \ + -I$(TOP)/kernel/drivers/staging/imgtec \ + -I$(TOP)/kernel/drivers/staging/imgtec/tc \ + -I$(TOP)/kernel/drivers/staging/imgtec/e2c3_gpu \ + -I$(TOP)/kernel/drivers/staging/imgtec/rk3368 \ + -I$(TOP)/kernel/drivers/staging/imgtec/plato \ + -I$(TOP)/kernel/drivers/staging/imgtec/plato/hdmi \ + -I$(TOP)/include/$(PVR_ARCH)/system/rgx_tc -I$(TOP)/include/system/rgx_tc \ + -I$(TOP)/include/drm \ + -I$(TOP)/hwdefs/$(PVR_ARCH) \ + $(ccflags-y) + +adf_pdp-y += \ + kernel/drivers/staging/imgtec/tc/adf_pdp.o \ + kernel/drivers/staging/imgtec/tc/pdp_apollo.o \ + kernel/drivers/staging/imgtec/tc/pdp_odin.o \ + kernel/drivers/staging/imgtec/adf_common.o \ + kernel/drivers/staging/imgtec/debugfs_dma_buf.o + +tc-y += \ + kernel/drivers/staging/imgtec/tc/tc_apollo.o \ + kernel/drivers/staging/imgtec/tc/tc_odin.o \ + kernel/drivers/staging/imgtec/tc/tc_drv.o + +ifeq ($(SUPPORT_APOLLO_FPGA),1) +tc-y += \ + kernel/drivers/staging/imgtec/tc/tc_apollo_debugfs.o +endif + +ifeq ($(SUPPORT_ION),1) +tc-y += \ + kernel/drivers/staging/imgtec/tc/tc_ion.o \ + kernel/drivers/staging/imgtec/tc/ion_lma_heap.o \ + kernel/drivers/staging/imgtec/ion_fbcdc_clear.o +endif + +e2c3_gpu-y += \ + kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.o + +drm_nulldisp-y += \ + kernel/drivers/staging/imgtec/drm_nulldisp_drv.o \ + kernel/drivers/staging/imgtec/drm_nulldisp_netlink.o \ + kernel/drivers/staging/imgtec/drm_netlink_gem.o + +ifeq ($(LMA),1) +drm_nulldisp-y += \ + kernel/drivers/staging/imgtec/tc/drm_pdp_gem.o +else +drm_nulldisp-y += \ + kernel/drivers/staging/imgtec/drm_nulldisp_gem.o +endif + +drm_pdp-y += \ + kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_drv.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_gem.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_plane.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.o \ + kernel/drivers/staging/imgtec/tc/drm_pdp_fb.o \ + kernel/drivers/staging/imgtec/tc/pdp_apollo.o \ + kernel/drivers/staging/imgtec/tc/pdp_odin.o \ + kernel/drivers/staging/imgtec/tc/pdp_plato.o + +plato-y += \ + kernel/drivers/staging/imgtec/plato/plato_drv.o \ + kernel/drivers/staging/imgtec/plato/plato_init.o + +drm_pdp2_hdmi-y += \ + kernel/drivers/staging/imgtec/plato/hdmi/hdmi_core.o \ + kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.o \ + kernel/drivers/staging/imgtec/plato/hdmi/hdmi_i2c.o \ + kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.o + +drm_rk-y += \ + kernel/drivers/staging/imgtec/rk3368/drm_rk_drv.o \ + kernel/drivers/staging/imgtec/rk3368/drm_rk_gem.o \ + kernel/drivers/staging/imgtec/rk3368/drm_rk_modeset.o \ + kernel/drivers/staging/imgtec/rk3368/drm_rk_crtc.o \ + kernel/drivers/staging/imgtec/rk3368/drm_rk_hdmi.o \ + kernel/drivers/staging/imgtec/rk3368/drm_rk_encoder.o diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Linux.mk b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Linux.mk new file mode 100644 index 000000000000..65712c926146 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/Linux.mk @@ -0,0 +1,73 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +modules := adf_pdp tc e2c3_gpu drm_nulldisp drm_pdp plato drm_rk drm_pdp2_hdmi + +adf_pdp_type := kernel_module +adf_pdp_target := adf_pdp.ko +adf_pdp_makefile := $(THIS_DIR)/Kbuild.mk + +tc_type := kernel_module +tc_target := tc.ko +tc_makefile := $(THIS_DIR)/Kbuild.mk + +e2c3_gpu_type := kernel_module +e2c3_gpu_target := e2c3_gpu.ko +e2c3_gpu_makefile := $(THIS_DIR)/Kbuild.mk + +drm_nulldisp_type := kernel_module +drm_nulldisp_target := drm_nulldisp.ko +drm_nulldisp_makefile := $(THIS_DIR)/Kbuild.mk + +drm_pdp_type := kernel_module +drm_pdp_target := drm_pdp.ko +drm_pdp_makefile := $(THIS_DIR)/Kbuild.mk + +drm_pdp2_hdmi_type := kernel_module +drm_pdp2_hdmi_target := drm_pdp2_hdmi.ko +drm_pdp2_hdmi_makefile := $(THIS_DIR)/Kbuild.mk + +plato_type := kernel_module +plato_target := plato.ko +plato_makefile := $(THIS_DIR)/Kbuild.mk + +drm_rk_type := kernel_module +drm_rk_target := drm_rk.ko +drm_rk_makefile := $(THIS_DIR)/Kbuild.mk diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.c new file mode 100644 index 000000000000..a7c3526ad8fd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.c @@ -0,0 +1,144 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#endif + +#include "drm_netlink_gem.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#include +#endif + +#include + +#include "kernel_compatibility.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static int netlink_gem_mmap_capsys(struct file *file, + struct vm_area_struct *vma) +{ + struct drm_file *file_priv = file->private_data; + struct drm_device *dev = file_priv->minor->dev; + struct drm_vma_offset_node *node; + struct drm_gem_object *obj = NULL; + int err; + + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (node) { + obj = container_of(node, struct drm_gem_object, vma_node); + + /* Don't mmap an object that is being destroyed */ + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + + if (!obj) + return -EINVAL; + + err = drm_vma_node_allow(node, file_priv); + if (!err) { + err = drm_gem_mmap(file, vma); + + drm_vma_node_revoke(node, file_priv); + } + + drm_gem_object_put_unlocked(obj); + + return err; +} + +int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma) +{ + int err; + + err = drm_gem_mmap(file, vma); + if (!!err && capable(CAP_SYS_RAWIO)) + err = netlink_gem_mmap_capsys(file, vma); + + return err; +} +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */ +int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct drm_file *file_priv = file->private_data; + struct drm_device *dev = file_priv->minor->dev; + struct drm_vma_offset_node *node; + struct drm_gem_object *obj; + int err; + + mutex_lock(&dev->struct_mutex); + + node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (!node) { + err = -EINVAL; + goto exit_unlock; + } + + /* Allow Netlink clients to mmap any object for reading */ + if (!capable(CAP_SYS_RAWIO) || (vma->vm_flags & VM_WRITE)) { + if (!drm_vma_node_is_allowed(node, file_priv)) { + err = -EACCES; + goto exit_unlock; + } + } + + obj = container_of(node, struct drm_gem_object, vma_node); + + err = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); + +exit_unlock: + mutex_unlock(&dev->struct_mutex); + return err; +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.h new file mode 100644 index 000000000000..f3b4f08070f6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_netlink_gem.h @@ -0,0 +1,62 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__DRM_NETLINK_GEM_H__) +#define __DRM_NETLINK_GEM_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +struct file; +struct vm_area_struct; +#else +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +#include +#endif + +int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma); + +#endif /* !defined(__DRM_NETLINK_GEM_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.c new file mode 100644 index 000000000000..54517bc8e469 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.c @@ -0,0 +1,2622 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include +#include +#include +#include +#include "pvr_linux_fence.h" +#include +#include +#include +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#include +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) +#include +#endif + +#include "pvr_dma_resv.h" + +#include "img_drm_fourcc_internal.h" +#include + +#include + +#include "drm_nulldisp_drv.h" +#if defined(LMA) +#include "tc_drv.h" +#include "drm_pdp_gem.h" +#include "pdp_drm.h" +#else +#include "drm_nulldisp_gem.h" +#endif +#include "nulldisp_drm.h" +#include "drm_netlink_gem.h" +#include "drm_nulldisp_netlink.h" + +#if defined(NULLDISP_USE_ATOMIC) +#include +#include +#include +#endif + +#include "kernel_compatibility.h" + +#define DRIVER_NAME "nulldisp" +#define DRIVER_DESC "Imagination Technologies Null DRM Display Driver" +#define DRIVER_DATE "20150612" + +#if defined(NULLDISP_USE_ATOMIC) +#define NULLDISP_DRIVER_ATOMIC DRIVER_ATOMIC +#else +#define NULLDISP_DRIVER_ATOMIC 0 +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#define NULLDISP_DRIVER_PRIME 0 +#else +#define NULLDISP_DRIVER_PRIME DRIVER_PRIME +#endif + +#define NULLDISP_FB_WIDTH_MIN 0 +#define NULLDISP_FB_WIDTH_MAX 8192 +#define NULLDISP_FB_HEIGHT_MIN 0 +#define NULLDISP_FB_HEIGHT_MAX 8192 + +#define NULLDISP_DEFAULT_WIDTH 640 +#define NULLDISP_DEFAULT_HEIGHT 480 +#define NULLDISP_DEFAULT_REFRESH_RATE 60 + +#define NULLDISP_MAX_PLANES 3 + +#if defined(NULLDISP_USE_ATOMIC) +#define NULLDISP_NETLINK_TIMEOUT 5 +#else +#define NULLDISP_NETLINK_TIMEOUT 30 +#endif +#define NULLDISP_NETLINK_TIMEOUT_MAX 300 +#define NULLDISP_NETLINK_TIMEOUT_MIN 1 + +enum nulldisp_crtc_flip_status { + NULLDISP_CRTC_FLIP_STATUS_NONE = 0, +#if !defined(NULLDISP_USE_ATOMIC) + NULLDISP_CRTC_FLIP_STATUS_PENDING, +#endif + NULLDISP_CRTC_FLIP_STATUS_DONE, +}; + +struct nulldisp_flip_data { + struct dma_fence_cb base; + struct drm_crtc *crtc; + struct dma_fence *wait_fence; +}; + +struct nulldisp_crtc { + struct drm_crtc base; + struct delayed_work vb_work; +#if defined(NULLDISP_USE_ATOMIC) + struct drm_framebuffer *fb; + struct completion flip_done; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) + struct completion copy_done; +#endif +#else + struct work_struct flip_work; + struct delayed_work flip_to_work; + struct delayed_work copy_to_work; + + struct completion flip_scheduled; + struct completion copy_done; +#endif + + /* Reuse the drm_device event_lock to protect these */ + atomic_t flip_status; + struct drm_pending_vblank_event *flip_event; +#if !defined(NULLDISP_USE_ATOMIC) + struct drm_framebuffer *old_fb; + struct nulldisp_flip_data *flip_data; +#endif + bool flip_async; +}; + +struct nulldisp_display_device { + struct drm_device *dev; + + struct workqueue_struct *workqueue; + struct nulldisp_crtc *nulldisp_crtc; + struct nlpvrdpy *nlpvrdpy; +#if defined(LMA) + struct pdp_gem_private *pdp_gem_priv; +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + struct drm_connector *connector; +#endif +}; + +#if !defined(NULLDISP_USE_ATOMIC) +struct nulldisp_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *obj[NULLDISP_MAX_PLANES]; +}; + +#define to_nulldisp_framebuffer(framebuffer) \ + container_of(framebuffer, struct nulldisp_framebuffer, base) +#endif + +struct nulldisp_module_params { + unsigned int hdisplay; + unsigned int vdisplay; + unsigned int vrefresh; + unsigned int updateto; +}; + +#define to_nulldisp_crtc(crtc) \ + container_of(crtc, struct nulldisp_crtc, base) + +#if defined(LMA) +#define obj_to_resv(obj) pdp_gem_get_resv(obj) +#else +#define obj_to_resv(obj) nulldisp_gem_get_resv(obj) +#endif + +/* + * The order of this array helps determine the order in which EGL configs are + * returned to an application using eglGetConfigs. As such, RGB 8888 formats + * should appear first, followed by RGB 565 configs. YUV configs should appear + * last. + */ +static const uint32_t nulldisp_modeset_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_ABGR2101010, +#ifdef DRM_FORMAT_ABGR16_IMG + DRM_FORMAT_ABGR16_IMG, +#endif + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_YUYV, + DRM_FORMAT_YUV444, + DRM_FORMAT_YVU420, +}; + +/* + * Note that nulldisp, being a no-hardware display controller driver, + * "supports" a number different decompression hardware + * versions (V0, V1, V2 ...). Real, hardware display controllers are + * likely to support only a single version. + */ +static const uint64_t nulldisp_primary_plane_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0_FIX, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V2, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V3, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V8, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10, + DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0_FIX, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V2, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V3, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V8, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10, + DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12, + DRM_FORMAT_MOD_PVR_FBCDC_32x2_V1, + DRM_FORMAT_MOD_PVR_FBCDC_32x2_V3, + DRM_FORMAT_MOD_PVR_FBCDC_32x2_V8, + DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10, + DRM_FORMAT_MOD_PVR_FBCDC_32x2_V12, + DRM_FORMAT_MOD_INVALID +}; + +static struct nulldisp_module_params module_params = { + .hdisplay = NULLDISP_DEFAULT_WIDTH, + .vdisplay = NULLDISP_DEFAULT_HEIGHT, + .vrefresh = NULLDISP_DEFAULT_REFRESH_RATE, + .updateto = NULLDISP_NETLINK_TIMEOUT, +}; + +static int updateto_param_set(const char *val, const struct kernel_param *kp); + +static const struct kernel_param_ops updateto_ops = { + .set = updateto_param_set, + .get = param_get_uint, +}; + +module_param_named(width, module_params.hdisplay, uint, 0444); +module_param_named(height, module_params.vdisplay, uint, 0444); +module_param_named(refreshrate, module_params.vrefresh, uint, 0444); +module_param_cb(updateto, &updateto_ops, &module_params.updateto, 0644); + +MODULE_PARM_DESC(width, "Preferred display width in pixels"); +MODULE_PARM_DESC(height, "Preferred display height in pixels"); +MODULE_PARM_DESC(refreshrate, "Preferred display refresh rate"); +MODULE_PARM_DESC(updateto, "Preferred remote update timeout (in seconds)"); + +/* + * Please use this function to obtain the module parameters instead of + * accessing the global "module_params" structure directly. + */ +static inline const struct nulldisp_module_params * +nulldisp_get_module_params(void) +{ + return &module_params; +} + +static int updateto_param_set(const char *val, const struct kernel_param *kp) +{ + unsigned int updateto; + int err; + + err = kstrtouint(val, 10, &updateto); + if (err) + return err; + + if (updateto < NULLDISP_NETLINK_TIMEOUT_MIN || + updateto > NULLDISP_NETLINK_TIMEOUT_MAX) + return -EINVAL; + + return param_set_uint(val, kp); +} + +static unsigned long nulldisp_netlink_timeout(void) +{ + const struct nulldisp_module_params *module_params = + nulldisp_get_module_params(); + unsigned int updateto; + +#if !defined(CHROMIUMOS_KERNEL) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) + kparam_block_sysfs_write(updateto); +#else + kernel_param_lock(THIS_MODULE); +#endif + + updateto = module_params->updateto; + +#if !defined(CHROMIUMOS_KERNEL) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) + kparam_unblock_sysfs_write(updateto); +#else + kernel_param_unlock(THIS_MODULE); +#endif + + return msecs_to_jiffies(updateto * 1000); +} + +/****************************************************************************** + * Linux compatibility functions + ******************************************************************************/ +static inline void +nulldisp_drm_fb_set_format(struct drm_framebuffer *fb, + u32 pixel_format) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + fb->format = drm_format_info(pixel_format); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + const struct drm_format_info *format = drm_format_info(pixel_format); + + fb->pixel_format = pixel_format; + + fb->depth = format->depth; + fb->bits_per_pixel = format->depth ? (format->cpp[0] * 8) : 0; +#else + fb->pixel_format = pixel_format; + + switch (pixel_format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_YUYV: + /* Unused for YUV formats */ + fb->depth = 0; + fb->bits_per_pixel = 0; + break; + + default: /* RGB */ + drm_fb_get_bpp_depth(pixel_format, + &fb->depth, + &fb->bits_per_pixel); + } +#endif +} + +static inline void nulldisp_drm_fb_set_modifier(struct drm_framebuffer *fb, + uint64_t value) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + fb->modifier = value; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + /* FB modifier values must be the same for all planes */ + fb->modifier[0] = value; + fb->modifier[1] = value; + fb->modifier[2] = value; + fb->modifier[3] = value; +#else + /* Modifiers are not supported */ +#endif +} + +/****************************************************************************** + * Plane functions + ******************************************************************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +static bool nulldisp_primary_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + /* + * All 'nulldisp_modeset_formats' are supported for every modifier + * in the 'nulldisp_primary_plane_modifiers' array. + */ + return true; +} +#endif + +#if defined(NULLDISP_USE_ATOMIC) +static int nulldisp_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_new_state; + + if (!state->crtc) + return 0; + + crtc_new_state = drm_atomic_get_new_crtc_state(state->state, + state->crtc); + + return drm_atomic_helper_check_plane_state(state, crtc_new_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); +} + +static void +nulldisp_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + + if (state->crtc) { + struct nulldisp_crtc *nulldisp_crtc = + to_nulldisp_crtc(state->crtc); + + nulldisp_crtc->fb = state->fb; + } +} + +static const struct drm_plane_helper_funcs nulldisp_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, + .atomic_check = nulldisp_plane_helper_atomic_check, + .atomic_update = nulldisp_plane_helper_atomic_update, +}; + +static const struct drm_plane_funcs nulldisp_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_primary_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .format_mod_supported = nulldisp_primary_format_mod_supported, +}; +#else /* defined(NULLDISP_USE_ATOMIC) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) +static int nulldisp_primary_helper_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, + unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx) +{ + struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; + struct drm_plane_state plane_state = { + .plane = plane, + .crtc = crtc, + .fb = fb, + .crtc_x = crtc_x, + .crtc_y = crtc_y, + .crtc_w = crtc_w, + .crtc_h = crtc_h, + .src_x = src_x, + .src_y = src_y, + .src_w = src_w, + .src_h = src_h, + .alpha = DRM_BLEND_ALPHA_OPAQUE, + .rotation = DRM_MODE_ROTATE_0, + }; + struct drm_crtc_state crtc_state = { + .crtc = crtc, + .enable = crtc->enabled, + .adjusted_mode = crtc->mode, + .mode = crtc->mode, + }; + struct drm_mode_set set = { + .fb = fb, + .crtc = crtc, + .mode = &crtc->mode, + .x = src_x >> 16, /* convert from fixed point */ + .y = src_y >> 16, /* convert from fixed point */ + .connectors = &nulldisp_dev->connector, + .num_connectors = 1, + }; + int err; + + BUG_ON(nulldisp_dev->connector->encoder == NULL); + BUG_ON(nulldisp_dev->connector->encoder->crtc != crtc); + + err = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, false); + if (err) + return err; + + if (!plane_state.visible) + return -EINVAL; + + return crtc->funcs->set_config(&set, ctx); +} + +static int nulldisp_primary_helper_disable(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx) +{ + return -EINVAL; +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) */ + +static const struct drm_plane_funcs nulldisp_plane_funcs = { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + .update_plane = nulldisp_primary_helper_update, + .disable_plane = nulldisp_primary_helper_disable, +#else + .update_plane = drm_primary_helper_update, + .disable_plane = drm_primary_helper_disable, +#endif + .destroy = drm_primary_helper_destroy, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) + .format_mod_supported = nulldisp_primary_format_mod_supported, +#endif +}; +#endif /* defined(NULLDISP_USE_ATOMIC) */ + +/****************************************************************************** + * CRTC functions + ******************************************************************************/ + +static bool +nulldisp_crtc_helper_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* + * Fix up mode so that it's compatible with the hardware. The results + * should be stored in adjusted_mode (i.e. mode should be untouched). + */ + return true; +} + +static void nulldisp_crtc_helper_disable(struct drm_crtc *crtc) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + +#if !defined(NULLDISP_USE_ATOMIC) + if (atomic_read(&nulldisp_crtc->flip_status) == + NULLDISP_CRTC_FLIP_STATUS_PENDING) + wait_for_completion(&nulldisp_crtc->flip_scheduled); + + /* + * Flush any outstanding page flip related work. The order this + * is done is important, to ensure there are no outstanding + * page flips. + */ + flush_work(&nulldisp_crtc->flip_work); + flush_delayed_work(&nulldisp_crtc->flip_to_work); +#endif + flush_delayed_work(&nulldisp_crtc->vb_work); + + drm_crtc_vblank_off(crtc); + flush_delayed_work(&nulldisp_crtc->vb_work); + + /* + * Vblank has been disabled, so the vblank handler shouldn't be + * able to reschedule itself. + */ + BUG_ON(cancel_delayed_work(&nulldisp_crtc->vb_work)); + + BUG_ON(atomic_read(&nulldisp_crtc->flip_status) != + NULLDISP_CRTC_FLIP_STATUS_NONE); + +#if !defined(NULLDISP_USE_ATOMIC) + /* Flush any remaining dirty FB work */ + flush_delayed_work(&nulldisp_crtc->copy_to_work); +#endif +} + +static void nulldisp_crtc_flip_complete(struct drm_crtc *crtc) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + /* The flipping process has been completed so reset the flip state */ + atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE); + nulldisp_crtc->flip_async = false; + +#if !defined(NULLDISP_USE_ATOMIC) + if (nulldisp_crtc->flip_data) { + dma_fence_put(nulldisp_crtc->flip_data->wait_fence); + kfree(nulldisp_crtc->flip_data); + nulldisp_crtc->flip_data = NULL; + } +#endif + if (nulldisp_crtc->flip_event) { + drm_crtc_send_vblank_event(crtc, nulldisp_crtc->flip_event); + nulldisp_crtc->flip_event = NULL; + } + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} + +#if defined(NULLDISP_USE_ATOMIC) +static void nulldisp_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) +{ +} + +static void nulldisp_crtc_helper_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + + if (!crtc->state->active || !old_state->active) + return; + + if (nulldisp_crtc->fb) { + struct nulldisp_display_device *nulldisp_dev = + crtc->dev->dev_private; + + reinit_completion(&nulldisp_crtc->flip_done); + + if (!nlpvrdpy_send_flip(nulldisp_dev->nlpvrdpy, + nulldisp_crtc->fb, + &nulldisp_crtc->fb->obj[0])) { + unsigned long res; + + res = wait_for_completion_timeout( + &nulldisp_crtc->flip_done, + nulldisp_netlink_timeout()); + + if (!res) + DRM_ERROR( + "timed out waiting for remote update\n"); + } + + nulldisp_crtc->fb = NULL; + } + + if (crtc->state->event) { + unsigned long flags; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + nulldisp_crtc->flip_async = crtc->state->async_flip; +#else + nulldisp_crtc->flip_async = !!(crtc->state->pageflip_flags + & DRM_MODE_PAGE_FLIP_ASYNC); +#endif + if (nulldisp_crtc->flip_async) + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + nulldisp_crtc->flip_event = crtc->state->event; + crtc->state->event = NULL; + + atomic_set(&nulldisp_crtc->flip_status, + NULLDISP_CRTC_FLIP_STATUS_DONE); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + if (nulldisp_crtc->flip_async) + nulldisp_crtc_flip_complete(crtc); + } +} + +static void nulldisp_crtc_set_enabled(struct drm_crtc *crtc, bool enable) +{ + if (enable) + drm_crtc_vblank_on(crtc); + else + nulldisp_crtc_helper_disable(crtc); +} + +static void +nulldisp_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + nulldisp_crtc_set_enabled(crtc, true); + + if (crtc->state->event) { + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + unsigned long flags; + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + nulldisp_crtc->flip_event = crtc->state->event; + crtc->state->event = NULL; + + atomic_set(&nulldisp_crtc->flip_status, + NULLDISP_CRTC_FLIP_STATUS_DONE); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static void +nulldisp_crtc_helper_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + + nulldisp_crtc_set_enabled(crtc, false); + + nulldisp_crtc->fb = NULL; + + if (crtc->state->event) { + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} +#else /* defined(NULLDISP_USE_ATOMIC) */ +static void nulldisp_crtc_helper_dpms(struct drm_crtc *crtc, + int mode) +{ + /* + * Change the power state of the display/pipe/port/etc. If the mode + * passed in is unsupported, the provider must use the next lowest + * power level. + */ +} + +static void nulldisp_crtc_helper_prepare(struct drm_crtc *crtc) +{ + drm_crtc_vblank_off(crtc); + + /* + * Prepare the display/pipe/port/etc for a mode change e.g. put them + * in a low power state/turn them off + */ +} + +static void nulldisp_crtc_helper_commit(struct drm_crtc *crtc) +{ + /* Turn the display/pipe/port/etc back on */ + + drm_crtc_vblank_on(crtc); +} + +static int +nulldisp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, + enum mode_set_atomic atomic) +{ + /* Set the display base address or offset from the base address */ + return 0; +} + +static int nulldisp_crtc_helper_mode_set_base(struct drm_crtc *crtc, + int x, int y, + struct drm_framebuffer *old_fb) +{ + return nulldisp_crtc_helper_mode_set_base_atomic(crtc, + crtc->primary->fb, + x, + y, + 0); +} + +static int +nulldisp_crtc_helper_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + /* Setup the the new mode and/or framebuffer */ + return nulldisp_crtc_helper_mode_set_base(crtc, x, y, old_fb); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) +static void nulldisp_crtc_helper_load_lut(struct drm_crtc *crtc) +{ +} +#endif +#endif /* defined(NULLDISP_USE_ATOMIC) */ + +static void nulldisp_crtc_destroy(struct drm_crtc *crtc) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + + DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id); + + drm_crtc_cleanup(crtc); + + BUG_ON(atomic_read(&nulldisp_crtc->flip_status) != + NULLDISP_CRTC_FLIP_STATUS_NONE); + + kfree(nulldisp_crtc); +} + +#if !defined(NULLDISP_USE_ATOMIC) +static void nulldisp_crtc_flip_done(struct nulldisp_crtc *nulldisp_crtc) +{ + struct drm_crtc *crtc = &nulldisp_crtc->base; + + struct drm_framebuffer *old_fb; + + WARN_ON(atomic_read(&nulldisp_crtc->flip_status) != + NULLDISP_CRTC_FLIP_STATUS_PENDING); + + old_fb = nulldisp_crtc->old_fb; + nulldisp_crtc->old_fb = NULL; + + (void) nulldisp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y, + old_fb); + + atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_DONE); + + if (nulldisp_crtc->flip_async) + nulldisp_crtc_flip_complete(crtc); +} + +static bool nulldisp_set_flip_to(struct nulldisp_crtc *nulldisp_crtc) +{ + struct drm_crtc *crtc = &nulldisp_crtc->base; + struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; + + /* Returns false if work already queued, else true */ + return queue_delayed_work(nulldisp_dev->workqueue, + &nulldisp_crtc->flip_to_work, + nulldisp_netlink_timeout()); +} + +static bool nulldisp_set_copy_to(struct nulldisp_crtc *nulldisp_crtc) +{ + struct drm_crtc *crtc = &nulldisp_crtc->base; + struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; + + /* Returns false if work already queued, else true */ + return queue_delayed_work(nulldisp_dev->workqueue, + &nulldisp_crtc->copy_to_work, + nulldisp_netlink_timeout()); +} + +static void nulldisp_flip_to_work(struct work_struct *w) +{ + struct delayed_work *dw = + container_of(w, struct delayed_work, work); + struct nulldisp_crtc *nulldisp_crtc = + container_of(dw, struct nulldisp_crtc, flip_to_work); + + if (atomic_read(&nulldisp_crtc->flip_status) == + NULLDISP_CRTC_FLIP_STATUS_PENDING) + nulldisp_crtc_flip_done(nulldisp_crtc); +} + +static void nulldisp_copy_to_work(struct work_struct *w) +{ + struct delayed_work *dw = + container_of(w, struct delayed_work, work); + struct nulldisp_crtc *nulldisp_crtc = + container_of(dw, struct nulldisp_crtc, copy_to_work); + + complete(&nulldisp_crtc->copy_done); +} + +static void nulldisp_flip_work(struct work_struct *w) +{ + struct nulldisp_crtc *nulldisp_crtc = + container_of(w, struct nulldisp_crtc, flip_work); + struct drm_crtc *crtc = &nulldisp_crtc->base; + struct drm_device *dev = crtc->dev; + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + struct nulldisp_framebuffer *nulldisp_fb = + to_nulldisp_framebuffer(crtc->primary->fb); + + /* + * To prevent races with disconnect requests from user space, + * set the timeout before sending the flip request. + */ + nulldisp_set_flip_to(nulldisp_crtc); + + if (nlpvrdpy_send_flip(nulldisp_dev->nlpvrdpy, + &nulldisp_fb->base, + &nulldisp_fb->obj[0])) + goto fail_cancel; + + return; + +fail_cancel: + /* + * We can't flush the work, as we are running on the same + * single threaded workqueue as the work to be flushed. + */ + cancel_delayed_work(&nulldisp_crtc->flip_to_work); + + nulldisp_crtc_flip_done(nulldisp_crtc); +} + +static void nulldisp_crtc_flip_cb(struct dma_fence *fence, + struct dma_fence_cb *cb) +{ + struct nulldisp_flip_data *flip_data = + container_of(cb, struct nulldisp_flip_data, base); + struct drm_crtc *crtc = flip_data->crtc; + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + + (void) queue_work(nulldisp_dev->workqueue, + &nulldisp_crtc->flip_work); + + complete_all(&nulldisp_crtc->flip_scheduled); +} + +static void nulldisp_crtc_flip_schedule_cb(struct dma_fence *fence, + struct dma_fence_cb *cb) +{ + struct nulldisp_flip_data *flip_data = + container_of(cb, struct nulldisp_flip_data, base); + int err = 0; + + if (flip_data->wait_fence) + err = dma_fence_add_callback(flip_data->wait_fence, + &flip_data->base, + nulldisp_crtc_flip_cb); + + if (!flip_data->wait_fence || err) { + if (err && err != -ENOENT) + DRM_ERROR("flip failed to wait on old buffer\n"); + nulldisp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base); + } +} + +static int nulldisp_crtc_flip_schedule(struct drm_crtc *crtc, + struct drm_gem_object *obj, + struct drm_gem_object *old_obj) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + struct dma_resv *resv = obj_to_resv(obj); + struct dma_resv *old_resv = obj_to_resv(old_obj); + struct nulldisp_flip_data *flip_data; + struct dma_fence *fence; + int err; + + flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL); + if (!flip_data) + return -ENOMEM; + + flip_data->crtc = crtc; + + ww_mutex_lock(&old_resv->lock, NULL); + flip_data->wait_fence = + dma_fence_get(dma_resv_get_excl(old_resv)); + + if (old_resv != resv) { + ww_mutex_unlock(&old_resv->lock); + ww_mutex_lock(&resv->lock, NULL); + } + + fence = dma_fence_get(dma_resv_get_excl(resv)); + ww_mutex_unlock(&resv->lock); + + nulldisp_crtc->flip_data = flip_data; + reinit_completion(&nulldisp_crtc->flip_scheduled); + atomic_set(&nulldisp_crtc->flip_status, + NULLDISP_CRTC_FLIP_STATUS_PENDING); + + if (fence) { + err = dma_fence_add_callback(fence, &flip_data->base, + nulldisp_crtc_flip_schedule_cb); + dma_fence_put(fence); + if (err && err != -ENOENT) + goto err_set_flip_status_none; + } + + if (!fence || err == -ENOENT) { + nulldisp_crtc_flip_schedule_cb(fence, &flip_data->base); + err = 0; + } + + return err; + +err_set_flip_status_none: + atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE); + dma_fence_put(flip_data->wait_fence); + kfree(flip_data); + return err; +} + +static int nulldisp_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + , struct drm_modeset_acquire_ctx *ctx +#endif + ) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + struct nulldisp_framebuffer *nulldisp_fb = to_nulldisp_framebuffer(fb); + struct nulldisp_framebuffer *nulldisp_old_fb = + to_nulldisp_framebuffer(crtc->primary->fb); + enum nulldisp_crtc_flip_status status; + unsigned long flags; + int err; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + status = atomic_read(&nulldisp_crtc->flip_status); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + if (status != NULLDISP_CRTC_FLIP_STATUS_NONE) + return -EBUSY; + + if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) { + err = drm_crtc_vblank_get(crtc); + if (err) + return err; + } + + nulldisp_crtc->old_fb = crtc->primary->fb; + nulldisp_crtc->flip_event = event; + nulldisp_crtc->flip_async = !!(page_flip_flags & + DRM_MODE_PAGE_FLIP_ASYNC); + + /* Set the crtc to point to the new framebuffer */ + crtc->primary->fb = fb; + + err = nulldisp_crtc_flip_schedule(crtc, nulldisp_fb->obj[0], + nulldisp_old_fb->obj[0]); + if (err) { + crtc->primary->fb = nulldisp_crtc->old_fb; + nulldisp_crtc->old_fb = NULL; + nulldisp_crtc->flip_event = NULL; + nulldisp_crtc->flip_async = false; + + DRM_ERROR("failed to schedule flip (err=%d)\n", err); + goto err_vblank_put; + } + + return 0; + +err_vblank_put: + if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) + drm_crtc_vblank_put(crtc); + return err; +} +#endif /* !defined(NULLDISP_USE_ATOMIC) */ + +static const struct drm_crtc_helper_funcs nulldisp_crtc_helper_funcs = { + .mode_fixup = nulldisp_crtc_helper_mode_fixup, +#if defined(NULLDISP_USE_ATOMIC) + .mode_set_nofb = nulldisp_crtc_helper_mode_set_nofb, + .atomic_flush = nulldisp_crtc_helper_atomic_flush, + .atomic_enable = nulldisp_crtc_helper_atomic_enable, + .atomic_disable = nulldisp_crtc_helper_atomic_disable, +#else + .dpms = nulldisp_crtc_helper_dpms, + .prepare = nulldisp_crtc_helper_prepare, + .commit = nulldisp_crtc_helper_commit, + .mode_set = nulldisp_crtc_helper_mode_set, + .mode_set_base = nulldisp_crtc_helper_mode_set_base, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + .load_lut = nulldisp_crtc_helper_load_lut, +#endif + .mode_set_base_atomic = nulldisp_crtc_helper_mode_set_base_atomic, + .disable = nulldisp_crtc_helper_disable, +#endif /* defined(NULLDISP_USE_ATOMIC) */ +}; + +static const struct drm_crtc_funcs nulldisp_crtc_funcs = { + .destroy = nulldisp_crtc_destroy, +#if defined(NULLDISP_USE_ATOMIC) + .reset = drm_atomic_helper_crtc_reset, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +#else + .reset = NULL, + .cursor_set = NULL, + .cursor_move = NULL, + .gamma_set = NULL, + .set_config = drm_crtc_helper_set_config, + .page_flip = nulldisp_crtc_page_flip, +#endif +}; + +static bool nulldisp_queue_vblank_work(struct nulldisp_crtc *nulldisp_crtc) +{ + struct drm_crtc *crtc = &nulldisp_crtc->base; + struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; + int vrefresh; + const int vrefresh_default = 60; + + if (crtc->hwmode.vrefresh) { + vrefresh = crtc->hwmode.vrefresh; + } else { + vrefresh = vrefresh_default; + DRM_INFO_ONCE( + "vertical refresh rate is zero, defaulting to %d\n", + vrefresh); + } + + /* Returns false if work already queued, else true */ + return queue_delayed_work(nulldisp_dev->workqueue, + &nulldisp_crtc->vb_work, + usecs_to_jiffies(1000000/vrefresh)); +} + +static void nulldisp_handle_vblank(struct work_struct *w) +{ + struct delayed_work *dw = + container_of(w, struct delayed_work, work); + struct nulldisp_crtc *nulldisp_crtc = + container_of(dw, struct nulldisp_crtc, vb_work); + struct drm_crtc *crtc = &nulldisp_crtc->base; + struct drm_device *dev = crtc->dev; + enum nulldisp_crtc_flip_status status; + + /* + * Reschedule the handler, if necessary. This is done before + * calling drm_crtc_vblank_put, so that the work can be cancelled + * if vblank events are disabled. + */ + if (drm_handle_vblank(dev, 0)) + (void) nulldisp_queue_vblank_work(nulldisp_crtc); + + status = atomic_read(&nulldisp_crtc->flip_status); + if (status == NULLDISP_CRTC_FLIP_STATUS_DONE) { + if (!nulldisp_crtc->flip_async) + nulldisp_crtc_flip_complete(crtc); +#if !defined(NULLDISP_USE_ATOMIC) + drm_crtc_vblank_put(crtc); +#endif + } + +} + +static struct nulldisp_crtc * +nulldisp_crtc_create(struct nulldisp_display_device *nulldisp_dev) +{ + struct nulldisp_crtc *nulldisp_crtc; + struct drm_crtc *crtc; + struct drm_plane *primary; + + nulldisp_crtc = kzalloc(sizeof(*nulldisp_crtc), GFP_KERNEL); + if (!nulldisp_crtc) + goto err_return; + + primary = kzalloc(sizeof(*primary), GFP_KERNEL); + if (!primary) + goto err_free_crtc; + + crtc = &nulldisp_crtc->base; + + atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE); +#if defined(NULLDISP_USE_ATOMIC) + init_completion(&nulldisp_crtc->flip_done); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) + init_completion(&nulldisp_crtc->copy_done); +#endif +#else + init_completion(&nulldisp_crtc->flip_scheduled); + init_completion(&nulldisp_crtc->copy_done); +#endif + + if (drm_universal_plane_init(nulldisp_dev->dev, primary, 0, + &nulldisp_plane_funcs, + nulldisp_modeset_formats, + ARRAY_SIZE(nulldisp_modeset_formats), + nulldisp_primary_plane_modifiers, + DRM_PLANE_TYPE_PRIMARY, NULL)) { + goto err_free_primary; + } + +#if defined(NULLDISP_USE_ATOMIC) + drm_plane_helper_add(primary, &nulldisp_plane_helper_funcs); +#endif + + if (drm_crtc_init_with_planes(nulldisp_dev->dev, crtc, primary, + NULL, &nulldisp_crtc_funcs, NULL)) { + goto err_cleanup_plane; + } + + drm_crtc_helper_add(crtc, &nulldisp_crtc_helper_funcs); + + INIT_DELAYED_WORK(&nulldisp_crtc->vb_work, nulldisp_handle_vblank); +#if !defined(NULLDISP_USE_ATOMIC) + INIT_WORK(&nulldisp_crtc->flip_work, nulldisp_flip_work); + INIT_DELAYED_WORK(&nulldisp_crtc->copy_to_work, nulldisp_copy_to_work); + INIT_DELAYED_WORK(&nulldisp_crtc->flip_to_work, nulldisp_flip_to_work); +#endif + + DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id); + + return nulldisp_crtc; + +err_cleanup_plane: + drm_plane_cleanup(primary); +err_free_primary: + kfree(primary); +err_free_crtc: + kfree(nulldisp_crtc); +err_return: + return NULL; +} + + +/****************************************************************************** + * Connector functions + ******************************************************************************/ + +static int +nulldisp_validate_module_parameters(void) +{ + const struct nulldisp_module_params *module_params = + nulldisp_get_module_params(); + + if (!module_params->hdisplay || + !module_params->vdisplay || + !module_params->vrefresh || + (module_params->hdisplay > NULLDISP_FB_WIDTH_MAX) || + (module_params->vdisplay > NULLDISP_FB_HEIGHT_MAX)) + return -EINVAL; + + return 0; +} + +static bool +nulldisp_set_preferred_mode(struct drm_connector *connector, + uint32_t hdisplay, + uint32_t vdisplay, + uint32_t vrefresh) +{ + struct drm_display_mode *mode; + + /* + * Mark the first mode, matching the hdisplay, vdisplay and + * vrefresh, preferred. + */ + list_for_each_entry(mode, &connector->probed_modes, head) + if (mode->hdisplay == hdisplay && + mode->vdisplay == vdisplay && + drm_mode_vrefresh(mode) == vrefresh) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + return true; + } + + return false; +} + +static bool +nulldisp_connector_add_preferred_mode(struct drm_connector *connector, + uint32_t hdisplay, + uint32_t vdisplay, + uint32_t vrefresh) +{ + struct drm_display_mode *preferred_mode; + + preferred_mode = drm_cvt_mode(connector->dev, + hdisplay, vdisplay, vrefresh, + false, false, false); + if (!preferred_mode) { + DRM_DEBUG_DRIVER("[CONNECTOR:%s]:create mode %dx%d@%d failed\n", + connector->name, + hdisplay, + vdisplay, + vrefresh); + + return false; + } + + preferred_mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; + + drm_mode_probed_add(connector, preferred_mode); + + return true; +} + +/* + * Gather modes. Here we can get the EDID data from the monitor and + * turn it into drm_display_mode structures. + */ +static int +nulldisp_connector_helper_get_modes(struct drm_connector *connector) +{ + int modes_count; + struct drm_device *dev = connector->dev; + const struct nulldisp_module_params *module_params = + nulldisp_get_module_params(); + uint32_t hdisplay = module_params->hdisplay; + uint32_t vdisplay = module_params->vdisplay; + uint32_t vrefresh = module_params->vrefresh; + + /* Add common modes */ + modes_count = drm_add_modes_noedid(connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + + /* + * Check if any of the connector modes match the preferred mode + * criteria specified by the module parameters. If the mode is + * found - flag it as preferred. Otherwise create the preferred + * mode based on the module parameters criteria, and flag it as + * preferred. + */ + if (!nulldisp_set_preferred_mode(connector, + hdisplay, + vdisplay, + vrefresh)) + if (nulldisp_connector_add_preferred_mode(connector, + hdisplay, + vdisplay, + vrefresh)) + modes_count++; + + /* Sort the connector modes by relevance */ + drm_mode_sort(&connector->probed_modes); + + return modes_count; +} + +static int +nulldisp_connector_helper_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* + * This function is called on each gathered mode (e.g. via EDID) + * and gives the driver a chance to reject it if the hardware + * cannot support it. + */ + return MODE_OK; +} + +#if !defined(NULLDISP_USE_ATOMIC) +static struct drm_encoder * +nulldisp_connector_helper_best_encoder(struct drm_connector *connector) +{ + /* Pick the first encoder we find */ + if (connector->encoder_ids[0] != 0) { + struct drm_encoder *encoder; + + encoder = drm_encoder_find(connector->dev, + NULL, + connector->encoder_ids[0]); + if (encoder) { + DRM_DEBUG_DRIVER( + "[ENCODER:%d:%s] best for [CONNECTOR:%d:%s]\n", + encoder->base.id, + encoder->name, + connector->base.id, + connector->name); + return encoder; + } + } + + return NULL; +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +static enum drm_connector_status +nulldisp_connector_detect(struct drm_connector *connector, + bool force) +{ + /* Return whether or not a monitor is attached to the connector */ + return connector_status_connected; +} +#endif + +static void nulldisp_connector_destroy(struct drm_connector *connector) +{ + DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + drm_connector_update_edid_property(connector, NULL); + drm_connector_cleanup(connector); + + kfree(connector); +} + +static void nulldisp_connector_force(struct drm_connector *connector) +{ +} + +static const struct drm_connector_helper_funcs +nulldisp_connector_helper_funcs = { + .get_modes = nulldisp_connector_helper_get_modes, + .mode_valid = nulldisp_connector_helper_mode_valid, + /* + * For atomic, don't set atomic_best_encoder or best_encoder. This will + * cause the DRM core to fallback to drm_atomic_helper_best_encoder(). + * This is fine as we only have a single connector and encoder. + */ +#if !defined(NULLDISP_USE_ATOMIC) + .best_encoder = nulldisp_connector_helper_best_encoder, +#endif +}; + +static const struct drm_connector_funcs nulldisp_connector_funcs = { +#if defined(NULLDISP_USE_ATOMIC) + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +#else + .dpms = drm_helper_connector_dpms, + .reset = NULL, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + .detect = nulldisp_connector_detect, +#endif + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = nulldisp_connector_destroy, + .force = nulldisp_connector_force, +}; + +static struct drm_connector * +nulldisp_connector_create(struct nulldisp_display_device *nulldisp_dev, + int type) +{ + struct drm_connector *connector; + + connector = kzalloc(sizeof(*connector), GFP_KERNEL); + if (!connector) + return NULL; + + drm_connector_init(nulldisp_dev->dev, + connector, + &nulldisp_connector_funcs, + type); + drm_connector_helper_add(connector, &nulldisp_connector_helper_funcs); + + connector->dpms = DRM_MODE_DPMS_OFF; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + connector->display_info.subpixel_order = SubPixelUnknown; + + DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + return connector; +} + + +/****************************************************************************** + * Encoder functions + ******************************************************************************/ + +static void nulldisp_encoder_helper_dpms(struct drm_encoder *encoder, + int mode) +{ + /* + * Set the display power state or active encoder based on the mode. If + * the mode passed in is unsupported, the provider must use the next + * lowest power level. + */ +} + +static bool +nulldisp_encoder_helper_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* + * Fix up mode so that it's compatible with the hardware. The results + * should be stored in adjusted_mode (i.e. mode should be untouched). + */ + return true; +} + +static void nulldisp_encoder_helper_prepare(struct drm_encoder *encoder) +{ + /* + * Prepare the encoder for a mode change e.g. set the active encoder + * accordingly/turn the encoder off + */ +} + +static void nulldisp_encoder_helper_commit(struct drm_encoder *encoder) +{ + /* Turn the encoder back on/set the active encoder */ +} + +static void +nulldisp_encoder_helper_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* Setup the encoder for the new mode */ +} + +static void nulldisp_encoder_destroy(struct drm_encoder *encoder) +{ + DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", encoder->base.id, encoder->name); + + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nulldisp_encoder_helper_funcs = { + .dpms = nulldisp_encoder_helper_dpms, + .mode_fixup = nulldisp_encoder_helper_mode_fixup, + .prepare = nulldisp_encoder_helper_prepare, + .commit = nulldisp_encoder_helper_commit, + .mode_set = nulldisp_encoder_helper_mode_set, + .get_crtc = NULL, + .detect = NULL, + .disable = NULL, +}; + +static const struct drm_encoder_funcs nulldisp_encoder_funcs = { + .reset = NULL, + .destroy = nulldisp_encoder_destroy, +}; + +static struct drm_encoder * +nulldisp_encoder_create(struct nulldisp_display_device *nulldisp_dev, + int type) +{ + struct drm_encoder *encoder; + int err; + + encoder = kzalloc(sizeof(*encoder), GFP_KERNEL); + if (!encoder) + return ERR_PTR(-ENOMEM); + + err = drm_encoder_init(nulldisp_dev->dev, + encoder, + &nulldisp_encoder_funcs, + type, + NULL); + if (err) { + DRM_ERROR("Failed to initialise encoder\n"); + return ERR_PTR(err); + } + drm_encoder_helper_add(encoder, &nulldisp_encoder_helper_funcs); + + /* + * This is a bit field that's used to determine which + * CRTCs can drive this encoder. + */ + encoder->possible_crtcs = 0x1; + + DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", encoder->base.id, encoder->name); + + return encoder; +} + + +/****************************************************************************** + * Framebuffer functions + ******************************************************************************/ + +#if defined(NULLDISP_USE_ATOMIC) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) +static int +nulldisp_framebuffer_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, + unsigned int flags, + unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + struct nulldisp_display_device *nulldisp_dev = + framebuffer->dev->dev_private; + struct nulldisp_crtc *nulldisp_crtc = nulldisp_dev->nulldisp_crtc; + + reinit_completion(&nulldisp_crtc->copy_done); + + if (!nlpvrdpy_send_copy(nulldisp_dev->nlpvrdpy, + framebuffer, + &framebuffer->obj[0])) { + unsigned long res; + + res = wait_for_completion_timeout(&nulldisp_crtc->copy_done, + nulldisp_netlink_timeout()); + + if (!res) + DRM_ERROR("timed out waiting for remote update\n"); + } + + return 0; +} + +static const struct drm_framebuffer_funcs nulldisp_framebuffer_funcs = { + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, + .dirty = nulldisp_framebuffer_dirty, +}; + +static struct drm_framebuffer * +nulldisp_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + return drm_gem_fb_create_with_funcs(dev, file, mode_cmd, + &nulldisp_framebuffer_funcs); +} +#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) */ +#define nulldisp_fb_create drm_gem_fb_create_with_dirty +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) */ +#else /* defined(NULLDISP_USE_ATOMIC) */ +static void nulldisp_framebuffer_destroy(struct drm_framebuffer *framebuffer) +{ + struct nulldisp_framebuffer *nulldisp_framebuffer = + to_nulldisp_framebuffer(framebuffer); + int i; + + DRM_DEBUG_DRIVER("[FB:%d]\n", framebuffer->base.id); + + drm_framebuffer_cleanup(framebuffer); + + for (i = 0; i < nulldisp_drm_fb_num_planes(framebuffer); i++) + drm_gem_object_put_unlocked(nulldisp_framebuffer->obj[i]); + + kfree(nulldisp_framebuffer); +} + +static int +nulldisp_framebuffer_create_handle(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct nulldisp_framebuffer *nulldisp_framebuffer = + to_nulldisp_framebuffer(framebuffer); + + DRM_DEBUG_DRIVER("[FB:%d]\n", framebuffer->base.id); + + return drm_gem_handle_create(file_priv, + nulldisp_framebuffer->obj[0], + handle); +} + +static int +nulldisp_framebuffer_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, + unsigned int flags, + unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + struct nulldisp_framebuffer *nulldisp_fb = + to_nulldisp_framebuffer(framebuffer); + struct nulldisp_display_device *nulldisp_dev = + framebuffer->dev->dev_private; + struct nulldisp_crtc *nulldisp_crtc = nulldisp_dev->nulldisp_crtc; + + /* + * To prevent races with disconnect requests from user space, + * set the timeout before sending the copy request. + */ + nulldisp_set_copy_to(nulldisp_crtc); + + if (nlpvrdpy_send_copy(nulldisp_dev->nlpvrdpy, + &nulldisp_fb->base, + &nulldisp_fb->obj[0])) + goto fail_flush; + + wait_for_completion(&nulldisp_crtc->copy_done); + + return 0; + +fail_flush: + flush_delayed_work(&nulldisp_crtc->copy_to_work); + + wait_for_completion(&nulldisp_crtc->copy_done); + + return 0; + +} + +static const struct drm_framebuffer_funcs nulldisp_framebuffer_funcs = { + .destroy = nulldisp_framebuffer_destroy, + .create_handle = nulldisp_framebuffer_create_handle, + .dirty = nulldisp_framebuffer_dirty, +}; + +static int +nulldisp_framebuffer_init(struct drm_device *dev, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) + const +#endif + struct drm_mode_fb_cmd2 *mode_cmd, + struct nulldisp_framebuffer *nulldisp_framebuffer, + struct drm_gem_object **obj) +{ + struct drm_framebuffer *fb = &nulldisp_framebuffer->base; + int err; + int i; + + fb->dev = dev; + + nulldisp_drm_fb_set_format(fb, mode_cmd->pixel_format); + + fb->width = mode_cmd->width; + fb->height = mode_cmd->height; + fb->flags = mode_cmd->flags; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + nulldisp_drm_fb_set_modifier(fb, mode_cmd->modifier[0]); +#endif + + for (i = 0; i < nulldisp_drm_fb_num_planes(fb); i++) { + fb->pitches[i] = mode_cmd->pitches[i]; + fb->offsets[i] = mode_cmd->offsets[i]; + + nulldisp_framebuffer->obj[i] = obj[i]; + } + + err = drm_framebuffer_init(dev, fb, &nulldisp_framebuffer_funcs); + if (err) { + DRM_ERROR("failed to initialise framebuffer structure (%d)\n", + err); + return err; + } + + DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); + + return 0; +} + +static struct drm_framebuffer * +nulldisp_fb_create(struct drm_device *dev, + struct drm_file *file_priv, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) + const +#endif + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_gem_object *obj[NULLDISP_MAX_PLANES]; + struct nulldisp_framebuffer *nulldisp_framebuffer; + int err; + int i; + + nulldisp_framebuffer = kzalloc(sizeof(*nulldisp_framebuffer), + GFP_KERNEL); + if (!nulldisp_framebuffer) { + err = -ENOMEM; + goto fail_exit; + } + + for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { + obj[i] = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj[i]) { + DRM_ERROR("failed to find buffer with handle %u\n", + mode_cmd->handles[i]); + err = -ENOENT; + goto fail_unreference; + } + } + + err = nulldisp_framebuffer_init(dev, + mode_cmd, + nulldisp_framebuffer, + obj); + if (err) + goto fail_unreference; + + DRM_DEBUG_DRIVER("[FB:%d]\n", nulldisp_framebuffer->base.base.id); + + return &nulldisp_framebuffer->base; + +fail_unreference: + kfree(nulldisp_framebuffer); + + while (i--) + drm_gem_object_put_unlocked(obj[i]); + +fail_exit: + return ERR_PTR(err); +} +#endif /* defined(NULLDISP_USE_ATOMIC) */ + +static const struct drm_mode_config_funcs nulldisp_mode_config_funcs = { + .fb_create = nulldisp_fb_create, + .output_poll_changed = NULL, +#if defined(NULLDISP_USE_ATOMIC) + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +#endif +}; + +static int nulldisp_nl_flipped_cb(void *data) +{ + struct nulldisp_crtc *nulldisp_crtc = data; + +#if defined(NULLDISP_USE_ATOMIC) + complete(&nulldisp_crtc->flip_done); +#else + flush_delayed_work(&nulldisp_crtc->flip_to_work); +#endif + flush_delayed_work(&nulldisp_crtc->vb_work); + + return 0; +} + +static int nulldisp_nl_copied_cb(void *data) +{ +#if defined(NULLDISP_USE_ATOMIC) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) + struct nulldisp_crtc *nulldisp_crtc = data; + + complete(&nulldisp_crtc->copy_done); +#endif +#else + struct nulldisp_crtc *nulldisp_crtc = data; + + flush_delayed_work(&nulldisp_crtc->copy_to_work); +#endif + return 0; +} + +static void nulldisp_nl_disconnect_cb(void *data) +{ + struct nulldisp_crtc *nulldisp_crtc = data; + +#if defined(NULLDISP_USE_ATOMIC) + complete(&nulldisp_crtc->flip_done); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) + complete(&nulldisp_crtc->copy_done); +#endif +#else + flush_delayed_work(&nulldisp_crtc->flip_to_work); + flush_delayed_work(&nulldisp_crtc->copy_to_work); +#endif +} + +static int nulldisp_early_load(struct drm_device *dev) +{ + struct nulldisp_display_device *nulldisp_dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + int err; + + platform_set_drvdata(to_platform_device(dev->dev), dev); + + nulldisp_dev = kzalloc(sizeof(*nulldisp_dev), GFP_KERNEL); + if (!nulldisp_dev) + return -ENOMEM; + + dev->dev_private = nulldisp_dev; + nulldisp_dev->dev = dev; + + drm_mode_config_init(dev); + + dev->mode_config.funcs = (void *)&nulldisp_mode_config_funcs; + dev->mode_config.min_width = NULLDISP_FB_WIDTH_MIN; + dev->mode_config.max_width = NULLDISP_FB_WIDTH_MAX; + dev->mode_config.min_height = NULLDISP_FB_HEIGHT_MIN; + dev->mode_config.max_height = NULLDISP_FB_HEIGHT_MAX; + dev->mode_config.fb_base = 0; + dev->mode_config.async_page_flip = true; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + dev->mode_config.allow_fb_modifiers = true; +#endif + + nulldisp_dev->nulldisp_crtc = nulldisp_crtc_create(nulldisp_dev); + if (!nulldisp_dev->nulldisp_crtc) { + DRM_ERROR("failed to create a CRTC.\n"); + + err = -ENOMEM; + goto err_config_cleanup; + } + + connector = nulldisp_connector_create(nulldisp_dev, + DRM_MODE_CONNECTOR_Unknown); + if (!connector) { + DRM_ERROR("failed to create a connector.\n"); + + err = -ENOMEM; + goto err_config_cleanup; + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + nulldisp_dev->connector = connector; +#endif + encoder = nulldisp_encoder_create(nulldisp_dev, + DRM_MODE_ENCODER_NONE); + if (IS_ERR(encoder)) { + DRM_ERROR("failed to create an encoder.\n"); + + err = PTR_ERR(encoder); + goto err_config_cleanup; + } + + err = drm_connector_attach_encoder(connector, encoder); + if (err) { + DRM_ERROR("failed to attach [ENCODER:%d:%s] to " + "[CONNECTOR:%d:%s] (err=%d)\n", + encoder->base.id, + encoder->name, + connector->base.id, + connector->name, + err); + goto err_config_cleanup; + } + +#if defined(LMA) + nulldisp_dev->pdp_gem_priv = pdp_gem_init(dev); + if (!nulldisp_dev->pdp_gem_priv) { + err = -ENOMEM; + goto err_config_cleanup; + } +#endif + nulldisp_dev->workqueue = + create_singlethread_workqueue(DRIVER_NAME); + if (!nulldisp_dev->workqueue) { + DRM_ERROR("failed to create work queue\n"); + goto err_gem_cleanup; + } + + err = drm_vblank_init(nulldisp_dev->dev, 1); + if (err) { + DRM_ERROR("failed to complete vblank init (err=%d)\n", err); + goto err_workqueue_cleanup; + } + + dev->irq_enabled = true; + + nulldisp_dev->nlpvrdpy = nlpvrdpy_create(dev, + nulldisp_nl_disconnect_cb, + nulldisp_dev->nulldisp_crtc, + nulldisp_nl_flipped_cb, + nulldisp_dev->nulldisp_crtc, + nulldisp_nl_copied_cb, + nulldisp_dev->nulldisp_crtc); + if (!nulldisp_dev->nlpvrdpy) { + DRM_ERROR("Netlink initialisation failed (err=%d)\n", err); + goto err_vblank_cleanup; + } + + return 0; + +err_vblank_cleanup: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + /* Called by drm_dev_fini in Linux 4.11.0 and later */ + drm_vblank_cleanup(dev); +#endif +err_workqueue_cleanup: + destroy_workqueue(nulldisp_dev->workqueue); + dev->irq_enabled = false; +err_gem_cleanup: +#if defined(LMA) + pdp_gem_cleanup(nulldisp_dev->pdp_gem_priv); +#endif +err_config_cleanup: + drm_mode_config_cleanup(dev); + kfree(nulldisp_dev); + return err; +} + +static int nulldisp_late_load(struct drm_device *dev) +{ + drm_mode_config_reset(dev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + { + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + int err; + + err = drm_connector_register(nulldisp_dev->connector); + if (err) { + DRM_ERROR( + "[CONNECTOR:%d:%s] failed to register (err=%d)\n", + nulldisp_dev->connector->base.id, + nulldisp_dev->connector->name, + err); + return err; + } + } +#endif + return 0; +} + +static void nulldisp_early_unload(struct drm_device *dev) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + + drm_connector_unregister(nulldisp_dev->connector); +#endif +} + +static void nulldisp_late_unload(struct drm_device *dev) +{ + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + + nlpvrdpy_send_disconnect(nulldisp_dev->nlpvrdpy); + nlpvrdpy_destroy(nulldisp_dev->nlpvrdpy); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + /* Called by drm_dev_fini in Linux 4.11.0 and later */ + drm_vblank_cleanup(dev); +#endif + destroy_workqueue(nulldisp_dev->workqueue); + + dev->irq_enabled = false; + +#if defined(LMA) + pdp_gem_cleanup(nulldisp_dev->pdp_gem_priv); +#endif + drm_mode_config_cleanup(dev); + + kfree(nulldisp_dev); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +static int nulldisp_load(struct drm_device *dev, unsigned long flags) +{ + int err; + + err = nulldisp_early_load(dev); + if (err) + return err; + + err = nulldisp_late_load(dev); + if (err) { + nulldisp_late_unload(dev); + return err; + } + + return 0; +} + +static int nulldisp_unload(struct drm_device *dev) +{ + nulldisp_early_unload(dev); + nulldisp_late_unload(dev); + + return 0; +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +static void +nulldisp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file) +{ + struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + if (nulldisp_crtc->flip_event && + nulldisp_crtc->flip_event->base.file_priv == file) { + struct drm_pending_event *pending_event = + &nulldisp_crtc->flip_event->base; + + pending_event->destroy(pending_event); + nulldisp_crtc->flip_event = NULL; + } + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +static void nulldisp_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + nulldisp_crtc_flip_event_cancel(crtc, file); +} +#endif + +static void nulldisp_lastclose(struct drm_device *dev) +{ +#if defined(NULLDISP_USE_ATOMIC) + drm_atomic_helper_shutdown(dev); +#else + struct drm_crtc *crtc; + + drm_modeset_lock_all(dev); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->primary->fb) { + struct drm_mode_set mode_set = { .crtc = crtc }; + int err; + + err = drm_mode_set_config_internal(&mode_set); + if (err) + DRM_ERROR( + "failed to disable crtc %p (err=%d)\n", + crtc, err); + } + } + drm_modeset_unlock_all(dev); +#endif +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) +static int nulldisp_enable_vblank(struct drm_device *dev, unsigned int crtc) +#else +static int nulldisp_enable_vblank(struct drm_device *dev, int crtc) +#endif +{ + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + + switch (crtc) { + case 0: + break; + default: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + DRM_ERROR("invalid crtc %u\n", crtc); +#else + DRM_ERROR("invalid crtc %d\n", crtc); +#endif + return -EINVAL; + } + + if (!nulldisp_queue_vblank_work(nulldisp_dev->nulldisp_crtc)) { + DRM_ERROR("work already queued\n"); + return -1; + } + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) +static void nulldisp_disable_vblank(struct drm_device *dev, unsigned int crtc) +#else +static void nulldisp_disable_vblank(struct drm_device *dev, int crtc) +#endif +{ + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + + switch (crtc) { + case 0: + break; + default: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + DRM_ERROR("invalid crtc %u\n", crtc); +#else + DRM_ERROR("invalid crtc %d\n", crtc); +#endif + return; + } + + /* + * Vblank events may be disabled from within the vblank handler, + * so don't wait for the work to complete. + */ + (void) cancel_delayed_work(&nulldisp_dev->nulldisp_crtc->vb_work); +} + +static const struct vm_operations_struct nulldisp_gem_vm_ops = { +#if defined(LMA) + .fault = pdp_gem_object_vm_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +#else + .fault = nulldisp_gem_object_vm_fault, + .open = nulldisp_gem_vm_open, + .close = nulldisp_gem_vm_close, +#endif +}; + +#if defined(LMA) +static int pdp_gem_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + + return pdp_gem_dumb_create_priv(file, + dev, + nulldisp_dev->pdp_gem_priv, + args); +} + +static int nulldisp_gem_object_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_create *args = data; + struct nulldisp_display_device *nulldisp_dev = dev->dev_private; + struct drm_pdp_gem_create pdp_args; + int err; + + if (args->flags) { + DRM_ERROR("invalid flags: %#08x\n", args->flags); + return -EINVAL; + } + + if (args->handle) { + DRM_ERROR("invalid handle (this should always be 0)\n"); + return -EINVAL; + } + + /* + * Remapping of nulldisp create args to pdp create args. + * + * Note: even though the nulldisp and pdp args are identical + * in this case, they may potentially change in future. + */ + pdp_args.size = args->size; + pdp_args.flags = args->flags; + pdp_args.handle = args->handle; + + err = pdp_gem_object_create_ioctl_priv(dev, + nulldisp_dev->pdp_gem_priv, + &pdp_args, + file); + if (!err) + args->handle = pdp_args.handle; + + return err; +} + +static int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_mmap *args = data; + struct drm_pdp_gem_mmap pdp_args; + int err; + + pdp_args.handle = args->handle; + pdp_args.pad = args->pad; + pdp_args.offset = args->offset; + + err = pdp_gem_object_mmap_ioctl(dev, &pdp_args, file); + + if (!err) + args->offset = pdp_args.offset; + + return err; +} + +static int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_cpu_prep *args = + (struct drm_nulldisp_gem_cpu_prep *)data; + struct drm_pdp_gem_cpu_prep pdp_args; + + pdp_args.handle = args->handle; + pdp_args.flags = args->flags; + + return pdp_gem_object_cpu_prep_ioctl(dev, &pdp_args, file); +} + +static int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_cpu_fini *args = + (struct drm_nulldisp_gem_cpu_fini *)data; + struct drm_pdp_gem_cpu_fini pdp_args; + + pdp_args.handle = args->handle; + pdp_args.pad = args->pad; + + return pdp_gem_object_cpu_fini_ioctl(dev, &pdp_args, file); +} + +static void pdp_gem_object_free(struct drm_gem_object *obj) +{ + struct nulldisp_display_device *nulldisp_dev = obj->dev->dev_private; + + pdp_gem_object_free_priv(nulldisp_dev->pdp_gem_priv, obj); +} +#endif + +static const struct drm_ioctl_desc nulldisp_ioctls[] = { + DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CREATE, + nulldisp_gem_object_create_ioctl, + DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NULLDISP_GEM_MMAP, + nulldisp_gem_object_mmap_ioctl, + DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CPU_PREP, + nulldisp_gem_object_cpu_prep_ioctl, + DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CPU_FINI, + nulldisp_gem_object_cpu_fini_ioctl, + DRM_AUTH | DRM_UNLOCKED), +}; + +static int nulldisp_gem_mmap(struct file *file, struct vm_area_struct *vma) +{ + int err; + + err = netlink_gem_mmap(file, vma); +#if !defined(LMA) + if (!err) { + struct drm_file *file_priv = file->private_data; + struct drm_device *dev = file_priv->minor->dev; + struct drm_gem_object *obj; + + mutex_lock(&dev->struct_mutex); + obj = vma->vm_private_data; + + if (obj->import_attach) + err = dma_buf_mmap(obj->dma_buf, vma, 0); + else + err = nulldisp_gem_object_get_pages(obj); + + mutex_unlock(&dev->struct_mutex); + } +#endif + return err; +} + +static const struct file_operations nulldisp_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = nulldisp_gem_mmap, + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif +}; + +static struct drm_driver nulldisp_drm_driver = { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + .load = NULL, + .unload = NULL, +#else + .load = nulldisp_load, + .unload = nulldisp_unload, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) + .preclose = nulldisp_preclose, +#endif + .lastclose = nulldisp_lastclose, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + .set_busid = drm_platform_set_busid, +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + .get_vblank_counter = NULL, +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + .get_vblank_counter = drm_vblank_no_hw_counter, +#else + .get_vblank_counter = drm_vblank_count, +#endif + .enable_vblank = nulldisp_enable_vblank, + .disable_vblank = nulldisp_disable_vblank, + + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + +#if defined(LMA) + .gem_free_object = pdp_gem_object_free, + .gem_prime_export = pdp_gem_prime_export, + .gem_prime_import = pdp_gem_prime_import, + .gem_prime_import_sg_table = pdp_gem_prime_import_sg_table, + + .dumb_create = pdp_gem_dumb_create, + .dumb_map_offset = pdp_gem_dumb_map_offset, +#else + .gem_free_object = nulldisp_gem_object_free, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + .gem_prime_export = nulldisp_gem_prime_export, +#else + .gem_prime_export = drm_gem_prime_export, +#endif + .gem_prime_import = drm_gem_prime_import, + .gem_prime_pin = nulldisp_gem_prime_pin, + .gem_prime_unpin = nulldisp_gem_prime_unpin, + .gem_prime_get_sg_table = nulldisp_gem_prime_get_sg_table, + .gem_prime_import_sg_table = nulldisp_gem_prime_import_sg_table, + .gem_prime_vmap = nulldisp_gem_prime_vmap, + .gem_prime_vunmap = nulldisp_gem_prime_vunmap, + .gem_prime_mmap = nulldisp_gem_prime_mmap, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + .gem_prime_res_obj = nulldisp_gem_prime_res_obj, +#endif + .dumb_create = nulldisp_gem_dumb_create, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + .dumb_map_offset = nulldisp_gem_dumb_map_offset, +#endif +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + .dumb_destroy = drm_gem_dumb_destroy, +#endif + + .gem_vm_ops = &nulldisp_gem_vm_ops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = PVRVERSION_MAJ, + .minor = PVRVERSION_MIN, + .patchlevel = PVRVERSION_BUILD, + + .driver_features = DRIVER_GEM | + DRIVER_MODESET | + NULLDISP_DRIVER_PRIME | + NULLDISP_DRIVER_ATOMIC, + .ioctls = nulldisp_ioctls, + .num_ioctls = ARRAY_SIZE(nulldisp_ioctls), + .fops = &nulldisp_driver_fops, +}; + +static int nulldisp_probe(struct platform_device *pdev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + struct drm_device *ddev; + int ret; + + ddev = drm_dev_alloc(&nulldisp_drm_driver, &pdev->dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + if (IS_ERR(ddev)) + return PTR_ERR(ddev); +#else + if (!ddev) + return -ENOMEM; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + /* Needed by drm_platform_set_busid */ + ddev->platformdev = pdev; +#endif + /* + * The load callback, called from drm_dev_register, is deprecated, + * because of potential race conditions. + */ + BUG_ON(nulldisp_drm_driver.load != NULL); + + ret = nulldisp_early_load(ddev); + if (ret) + goto err_drm_dev_put; + + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_drm_dev_late_unload; + + ret = nulldisp_late_load(ddev); + if (ret) + goto err_drm_dev_unregister; + + drm_mode_config_reset(ddev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + nulldisp_drm_driver.name, + nulldisp_drm_driver.major, + nulldisp_drm_driver.minor, + nulldisp_drm_driver.patchlevel, + nulldisp_drm_driver.date, + ddev->primary->index); +#endif + return 0; + +err_drm_dev_unregister: + drm_dev_unregister(ddev); +err_drm_dev_late_unload: + nulldisp_late_unload(ddev); +err_drm_dev_put: + drm_dev_put(ddev); + return ret; +#else + return drm_platform_init(&nulldisp_drm_driver, pdev); +#endif +} + +static int nulldisp_remove(struct platform_device *pdev) +{ + struct drm_device *ddev = platform_get_drvdata(pdev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + /* + * The unload callback, called from drm_dev_unregister, is + * deprecated. + */ + BUG_ON(nulldisp_drm_driver.unload != NULL); + + nulldisp_early_unload(ddev); + + drm_dev_unregister(ddev); + + nulldisp_late_unload(ddev); + + drm_dev_put(ddev); +#else + drm_put_dev(ddev); +#endif + return 0; +} + +static void nulldisp_shutdown(struct platform_device *pdev) +{ +} + +static struct platform_device_id nulldisp_platform_device_id_table[] = { +#if defined(LMA) + { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = 0 }, + { .name = ODN_DEVICE_NAME_PDP, .driver_data = 0 }, +#else + { .name = "nulldisp", .driver_data = 0 }, +#endif + { }, +}; + +static struct platform_driver nulldisp_platform_driver = { + .probe = nulldisp_probe, + .remove = nulldisp_remove, + .shutdown = nulldisp_shutdown, + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + }, + .id_table = nulldisp_platform_device_id_table, +}; + + +#if !defined(LMA) +static struct platform_device_info nulldisp_device_info = { + .name = "nulldisp", + .id = -1, +#if defined(NO_HARDWARE) + /* + * Not all cores have 40 bit physical support, but this + * will work unless > 32 bit address is returned on those cores. + * In the future this will be fixed properly. + */ + .dma_mask = DMA_BIT_MASK(40), +#else + .dma_mask = DMA_BIT_MASK(32), +#endif +}; + +static struct platform_device *nulldisp_dev; +#endif + +static int __init nulldisp_init(void) +{ + int err; + + err = nulldisp_validate_module_parameters(); + if (err) { + DRM_ERROR("invalid module parameters (err=%d)\n", err); + return err; + } + + err = nlpvrdpy_register(); + if (err) { + DRM_ERROR("failed to register with netlink (err=%d)\n", err); + return err; + } + +#if !defined(LMA) + nulldisp_dev = platform_device_register_full(&nulldisp_device_info); + if (IS_ERR(nulldisp_dev)) { + err = PTR_ERR(nulldisp_dev); + nulldisp_dev = NULL; + goto err_unregister_family; + } +#endif + err = platform_driver_register(&nulldisp_platform_driver); + if (err) + goto err_unregister_family; + + return 0; + +err_unregister_family: + (void) nlpvrdpy_unregister(); + return err; +} + +static void __exit nulldisp_exit(void) +{ + int err; + + err = nlpvrdpy_unregister(); + BUG_ON(err); + +#if !defined(LMA) + if (nulldisp_dev) + platform_device_unregister(nulldisp_dev); +#endif + platform_driver_unregister(&nulldisp_platform_driver); +} + +module_init(nulldisp_init); +module_exit(nulldisp_exit); + +MODULE_AUTHOR("Imagination Technologies Ltd. "); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.h new file mode 100644 index 000000000000..969c06d548aa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_drv.h @@ -0,0 +1,94 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __DRM_NULLDISP_DRV_H__ +#define __DRM_NULLDISP_DRV_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +#define NULLDISP_USE_ATOMIC +#endif + +struct drm_framebuffer; + +/****************************************************************************** + * Linux compatibility functions + ******************************************************************************/ +static inline u32 nulldisp_drm_fb_format(struct drm_framebuffer *fb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + return fb->format->format; +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ + return fb->pixel_format; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ +} + +static inline u64 nulldisp_drm_fb_modifier(struct drm_framebuffer *fb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + return fb->modifier; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + return fb->modifier[0]; +#else + /* 0 represents DRM_FORMAT_MOD_NONE, doesn't exist before 4.1 */ + return 0; +#endif +} + +/****************************************************************************** + * DRM framebuffer support functions + ******************************************************************************/ +static inline int nulldisp_drm_fb_num_planes(struct drm_framebuffer *fb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + return fb->format->num_planes; +#else + return drm_format_num_planes(nulldisp_drm_fb_format(fb)); +#endif +} +#endif /* __DRM_NULLDISP_DRV_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.c new file mode 100644 index 000000000000..73b453ddc95d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.c @@ -0,0 +1,640 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pvr_dma_resv.h" +#include "drm_nulldisp_gem.h" +#include "nulldisp_drm.h" +#include "kernel_compatibility.h" + +struct nulldisp_gem_object { + struct drm_gem_object base; + + atomic_t pg_refcnt; + struct page **pages; + dma_addr_t *addrs; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + struct dma_resv _resv; +#endif + struct dma_resv *resv; + + bool cpu_prep; + struct sg_table *import_sgt; +}; + +#define to_nulldisp_obj(obj) \ + container_of(obj, struct nulldisp_gem_object, base) + +int nulldisp_gem_object_get_pages(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); + struct page **pages; + int err; + + if (WARN_ON(obj->import_attach)) + return -EEXIST; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (atomic_inc_return(&nulldisp_obj->pg_refcnt) == 1) { + unsigned int npages = obj->size >> PAGE_SHIFT; + dma_addr_t *addrs; + unsigned int i; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) { + err = PTR_ERR(pages); + goto dec_refcnt; + } + + addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); + if (!addrs) { + err = -ENOMEM; + goto free_pages; + } + + for (i = 0; i < npages; i++) { + addrs[i] = dma_map_page(dev->dev, pages[i], + 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + nulldisp_obj->pages = pages; + nulldisp_obj->addrs = addrs; + } + + return 0; + +free_pages: + drm_gem_put_pages(obj, pages, false, false); +dec_refcnt: + atomic_dec(&nulldisp_obj->pg_refcnt); + return err; +} + +static void nulldisp_gem_object_put_pages(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) + return; + + if (atomic_dec_and_test(&nulldisp_obj->pg_refcnt)) { + unsigned int npages = obj->size >> PAGE_SHIFT; + unsigned int i; + + for (i = 0; i < npages; i++) { + dma_unmap_page(dev->dev, nulldisp_obj->addrs[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); + } + + kfree(nulldisp_obj->addrs); + nulldisp_obj->addrs = NULL; + + drm_gem_put_pages(obj, nulldisp_obj->pages, true, true); + nulldisp_obj->pages = NULL; + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +vm_fault_t nulldisp_gem_object_vm_fault(struct vm_fault *vmf) +#else +int nulldisp_gem_object_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + struct vm_area_struct *vma = vmf->vma; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + unsigned long addr = vmf->address; +#else + unsigned long addr = (unsigned long)vmf->virtual_address; +#endif + struct drm_gem_object *obj = vma->vm_private_data; + struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); + unsigned long pg_off; + struct page *page; + + /* + * nulldisp_gem_object_get_pages should have been called in + * nulldisp_gem_mmap so there's no need to do it here. + */ + if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) + return VM_FAULT_SIGBUS; + + pg_off = (addr - vma->vm_start) >> PAGE_SHIFT; + page = nulldisp_obj->pages[pg_off]; + + get_page(page); + vmf->page = page; + + return 0; +} + +void nulldisp_gem_vm_open(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + + drm_gem_vm_open(vma); + + if (!obj->import_attach) { + struct drm_device *dev = obj->dev; + + mutex_lock(&dev->struct_mutex); + (void) nulldisp_gem_object_get_pages(obj); + mutex_unlock(&dev->struct_mutex); + } +} + +void nulldisp_gem_vm_close(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + + if (!obj->import_attach) { + struct drm_device *dev = obj->dev; + + mutex_lock(&dev->struct_mutex); + (void) nulldisp_gem_object_put_pages(obj); + mutex_unlock(&dev->struct_mutex); + } + + drm_gem_vm_close(vma); +} + +void nulldisp_gem_object_free(struct drm_gem_object *obj) +{ + struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); + + WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) != 0); + + if (obj->import_attach) { + kfree(nulldisp_obj->pages); + kfree(nulldisp_obj->addrs); + drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, nulldisp_obj->import_sgt); + } else { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + dma_resv_fini(&nulldisp_obj->_resv); +#endif + drm_gem_object_release(obj); + } + + kfree(nulldisp_obj); +} + +int nulldisp_gem_prime_pin(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + int err; + + mutex_lock(&dev->struct_mutex); + err = nulldisp_gem_object_get_pages(obj); + mutex_unlock(&dev->struct_mutex); + + return err; +} + +void nulldisp_gem_prime_unpin(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + mutex_lock(&dev->struct_mutex); + nulldisp_gem_object_put_pages(obj); + mutex_unlock(&dev->struct_mutex); +} + +struct sg_table * +nulldisp_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); + int nr_pages = obj->size >> PAGE_SHIFT; + + /* + * nulldisp_gem_prime_pin should have been called in which case we don't + * need to call nulldisp_gem_object_get_pages. + */ + if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) + return NULL; + + return drm_prime_pages_to_sg(nulldisp_obj->pages, nr_pages); +} + +struct drm_gem_object * +nulldisp_gem_prime_import_sg_table(struct drm_device *dev, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + struct dma_buf_attachment *attach, +#else + size_t size, +#endif + struct sg_table *sgt) +{ + struct nulldisp_gem_object *nulldisp_obj; + struct drm_gem_object *obj; + struct page **pages; + dma_addr_t *addrs; + unsigned int npages; + + nulldisp_obj = kzalloc(sizeof(*nulldisp_obj), GFP_KERNEL); + if (!nulldisp_obj) + return NULL; + + nulldisp_obj->resv = attach->dmabuf->resv; + + obj = &nulldisp_obj->base; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) + obj->resv = nulldisp_obj->resv; +#endif + + drm_gem_private_object_init(dev, obj, attach->dmabuf->size); + + npages = obj->size >> PAGE_SHIFT; + + pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); + addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); + if (!pages || !addrs) + goto exit_free_arrays; + + if (drm_prime_sg_to_page_addr_arrays(sgt, pages, addrs, npages)) + goto exit_free_arrays; + + nulldisp_obj->import_sgt = sgt; + nulldisp_obj->pages = pages; + nulldisp_obj->addrs = addrs; + + return obj; + +exit_free_arrays: + kfree(pages); + kfree(addrs); + drm_prime_gem_destroy(obj, sgt); + kfree(nulldisp_obj); + return NULL; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +struct dma_buf *nulldisp_gem_prime_export( + struct drm_device *dev, + struct drm_gem_object *obj, + int flags) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) + /* Read/write access required */ + flags |= O_RDWR; +#endif + return drm_gem_prime_export(dev, obj, flags); +} +#endif + +void *nulldisp_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); + int nr_pages = obj->size >> PAGE_SHIFT; + + /* + * nulldisp_gem_prime_pin should have been called in which case we don't + * need to call nulldisp_gem_object_get_pages. + */ + if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) + return NULL; + + + return vmap(nulldisp_obj->pages, nr_pages, 0, PAGE_KERNEL); +} + +void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + vunmap(vaddr); +} + +int nulldisp_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int err; + + mutex_lock(&obj->dev->struct_mutex); + err = nulldisp_gem_object_get_pages(obj); + if (!err) + err = drm_gem_mmap_obj(obj, obj->size, vma); + mutex_unlock(&obj->dev->struct_mutex); + + return err; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +struct dma_resv * +nulldisp_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); + + return nulldisp_obj->resv; +} +#endif + +int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_mmap *args = + (struct drm_nulldisp_gem_mmap *)data; + + if (args->pad) { + DRM_ERROR("invalid pad (this should always be 0)\n"); + return -EINVAL; + } + + if (args->offset) { + DRM_ERROR("invalid offset (this should always be 0)\n"); + return -EINVAL; + } + + return nulldisp_gem_dumb_map_offset(file, dev, args->handle, + &args->offset); +} + +int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_cpu_prep *args = + (struct drm_nulldisp_gem_cpu_prep *)data; + + struct drm_gem_object *obj; + struct nulldisp_gem_object *nulldisp_obj; + bool write = !!(args->flags & NULLDISP_GEM_CPU_PREP_WRITE); + bool wait = !(args->flags & NULLDISP_GEM_CPU_PREP_NOWAIT); + int err; + + if (args->flags & ~(NULLDISP_GEM_CPU_PREP_READ | + NULLDISP_GEM_CPU_PREP_WRITE | + NULLDISP_GEM_CPU_PREP_NOWAIT)) { + DRM_ERROR("invalid flags: %#08x\n", args->flags); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) { + err = -ENOENT; + goto exit_unlock; + } + + nulldisp_obj = to_nulldisp_obj(obj); + + if (nulldisp_obj->cpu_prep) { + err = -EBUSY; + goto exit_unref; + } + + if (wait) { + long lerr; + + lerr = dma_resv_wait_timeout_rcu(nulldisp_obj->resv, + write, + true, + 30 * HZ); + + /* Remap return value (0 indicates busy state, > 0 success) */ + if (lerr > 0) + err = 0; + else if (!lerr) + err = -EBUSY; + else + err = lerr; + } else { + /* + * Remap return value (false indicates busy state, + * true success). + */ + if (!dma_resv_test_signaled_rcu(nulldisp_obj->resv, + write)) + err = -EBUSY; + else + err = 0; + } + + if (!err) + nulldisp_obj->cpu_prep = true; +exit_unref: + drm_gem_object_put_unlocked(obj); +exit_unlock: + mutex_unlock(&dev->struct_mutex); + return err; +} + +int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_cpu_fini *args = + (struct drm_nulldisp_gem_cpu_fini *)data; + + struct drm_gem_object *obj; + struct nulldisp_gem_object *nulldisp_obj; + int err; + + if (args->pad) { + DRM_ERROR("invalid pad (this should always be 0)\n"); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) { + err = -ENOENT; + goto exit_unlock; + } + + nulldisp_obj = to_nulldisp_obj(obj); + + if (!nulldisp_obj->cpu_prep) { + err = -EINVAL; + goto exit_unref; + } + + nulldisp_obj->cpu_prep = false; + err = 0; +exit_unref: + drm_gem_object_put_unlocked(obj); +exit_unlock: + mutex_unlock(&dev->struct_mutex); + return err; +} + +static int nulldisp_gem_object_create_priv(struct drm_file *file, + struct drm_device *dev, + u64 size, + u32 *handle) +{ + struct nulldisp_gem_object *nulldisp_obj; + struct drm_gem_object *obj; + struct address_space *mapping; + int err; + + nulldisp_obj = kzalloc(sizeof(*nulldisp_obj), GFP_KERNEL); + if (!nulldisp_obj) + return -ENOMEM; + + obj = &nulldisp_obj->base; + + err = drm_gem_object_init(dev, obj, size); + if (err) { + kfree(nulldisp_obj); + return err; + } + + mapping = file_inode(obj->filp)->i_mapping; + mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32 | __GFP_NORETRY); + + err = drm_gem_handle_create(file, obj, handle); + if (err) + goto exit; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + dma_resv_init(&nulldisp_obj->_resv); + nulldisp_obj->resv = &nulldisp_obj->_resv; +#else + nulldisp_obj->resv = nulldisp_obj->base.resv; +#endif + +exit: + drm_gem_object_put_unlocked(obj); + return err; +} + +int nulldisp_gem_object_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_nulldisp_gem_create *args = data; + u32 handle; + int err; + u64 aligned_size; + + if (args->flags) { + DRM_ERROR("invalid flags: %#08x\n", args->flags); + return -EINVAL; + } + + if (args->handle) { + DRM_ERROR("invalid handle (this should always be 0)\n"); + return -EINVAL; + } + + aligned_size = PAGE_ALIGN(args->size); + + err = nulldisp_gem_object_create_priv(file, dev, aligned_size, &handle); + if (!err) + args->handle = handle; + + return err; +} + +int nulldisp_gem_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + u32 handle; + u32 pitch; + size_t size; + int err; + + pitch = args->width * (ALIGN(args->bpp, 8) >> 3); + size = PAGE_ALIGN(pitch * args->height); + + err = nulldisp_gem_object_create_priv(file, dev, size, &handle); + if (!err) { + args->handle = handle; + args->pitch = pitch; + args->size = size; + } + + return err; +} + +int nulldisp_gem_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, + uint64_t *offset) +{ + struct drm_gem_object *obj; + int err; + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(file, handle); + if (!obj) { + err = -ENOENT; + goto exit_unlock; + } + + err = drm_gem_create_mmap_offset(obj); + if (err) + goto exit_obj_unref; + + *offset = drm_vma_node_offset_addr(&obj->vma_node); + +exit_obj_unref: + drm_gem_object_put_unlocked(obj); +exit_unlock: + mutex_unlock(&dev->struct_mutex); + return err; +} + +struct dma_resv *nulldisp_gem_get_resv(struct drm_gem_object *obj) +{ + return (to_nulldisp_obj(obj)->resv); +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.h new file mode 100644 index 000000000000..d4f961fda963 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_gem.h @@ -0,0 +1,145 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__DRM_NULLDISP_H__) +#define __DRM_NULLDISP_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#include + +struct dma_buf_attachment; +struct vm_area_struct; +struct vm_fault; +#else +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +#include +#endif + +int nulldisp_gem_object_get_pages(struct drm_gem_object *obj); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) +typedef int vm_fault_t; +#endif +vm_fault_t nulldisp_gem_object_vm_fault(struct vm_fault *vmf); +#else +int nulldisp_gem_object_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf); +#endif + +void nulldisp_gem_vm_open(struct vm_area_struct *vma); + +void nulldisp_gem_vm_close(struct vm_area_struct *vma); + +void nulldisp_gem_object_free(struct drm_gem_object *obj); + +int nulldisp_gem_prime_pin(struct drm_gem_object *obj); + +void nulldisp_gem_prime_unpin(struct drm_gem_object *obj); + +struct sg_table *nulldisp_gem_prime_get_sg_table(struct drm_gem_object *obj); + +struct drm_gem_object * +nulldisp_gem_prime_import_sg_table(struct drm_device *dev, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + struct dma_buf_attachment *attach, +#else + size_t size, +#endif + struct sg_table *sgt); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +struct dma_buf *nulldisp_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags); +#endif + +void *nulldisp_gem_prime_vmap(struct drm_gem_object *obj); + +void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +int nulldisp_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +struct dma_resv * +nulldisp_gem_prime_res_obj(struct drm_gem_object *obj); +#endif + +int nulldisp_gem_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +int nulldisp_gem_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, + uint64_t *offset); + +/* internal interfaces */ +struct dma_resv *nulldisp_gem_get_resv(struct drm_gem_object *obj); + +int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file); + +int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file); + +int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file); + +int nulldisp_gem_object_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file); + +#endif /* !defined(__DRM_NULLDISP_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.c new file mode 100644 index 000000000000..27d3e1f602e2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.c @@ -0,0 +1,656 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#include +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drm_netlink_gem.h" +#include "drm_nulldisp_drv.h" +#include "drm_nulldisp_netlink.h" +#include "kernel_compatibility.h" + +#include "netlink.h" + +struct nlpvrdpy { + atomic_t connected; + struct net *net; + u32 dst_portid; + struct drm_device *dev; + nlpvrdpy_disconnect_cb disconnect_cb; + void *disconnect_cb_data; + nlpvrdpy_flipped_cb flipped_cb; + void *flipped_cb_data; + nlpvrdpy_copied_cb copied_cb; + void *copied_cb_data; + struct mutex mutex; + struct list_head nl_list; + bool gem_names_required; +}; +#define NLPVRDPY_MINOR(nlpvrdpy) \ + ((unsigned int)((nlpvrdpy)->dev->primary->index)) + +/* Command internal flags */ +#define NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED 0x00000001 +#define NLPVRDPY_CIF_NLPVRDPY 0x00000002 + +static LIST_HEAD(nlpvrdpy_list); +static DEFINE_MUTEX(nlpvrdpy_list_mutex); + +static inline void nlpvrdpy_lock(struct nlpvrdpy *nlpvrdpy) +{ + mutex_lock(&nlpvrdpy->mutex); +} + +static inline void nlpvrdpy_unlock(struct nlpvrdpy *nlpvrdpy) +{ + mutex_unlock(&nlpvrdpy->mutex); +} + +struct nlpvrdpy *nlpvrdpy_create(struct drm_device *dev, + nlpvrdpy_disconnect_cb disconnect_cb, + void *disconnect_cb_data, + nlpvrdpy_flipped_cb flipped_cb, + void *flipped_cb_data, + nlpvrdpy_copied_cb copied_cb, + void *copied_cb_data) +{ + struct nlpvrdpy *nlpvrdpy = kzalloc(sizeof(*nlpvrdpy), GFP_KERNEL); + + if (!nlpvrdpy) + return NULL; + + mutex_init(&nlpvrdpy->mutex); + INIT_LIST_HEAD(&nlpvrdpy->nl_list); + + atomic_set(&nlpvrdpy->connected, 0); + + nlpvrdpy->dev = dev; + nlpvrdpy->disconnect_cb = disconnect_cb; + nlpvrdpy->disconnect_cb_data = disconnect_cb_data; + nlpvrdpy->flipped_cb = flipped_cb; + nlpvrdpy->flipped_cb_data = flipped_cb_data; + nlpvrdpy->copied_cb = copied_cb; + nlpvrdpy->copied_cb_data = copied_cb_data; + + mutex_lock(&nlpvrdpy_list_mutex); + list_add_tail(&nlpvrdpy->nl_list, &nlpvrdpy_list); + mutex_unlock(&nlpvrdpy_list_mutex); + + return nlpvrdpy; +} + +void nlpvrdpy_destroy(struct nlpvrdpy *nlpvrdpy) +{ + if (!nlpvrdpy) + return; + + mutex_lock(&nlpvrdpy_list_mutex); + nlpvrdpy_lock(nlpvrdpy); + list_del(&nlpvrdpy->nl_list); + nlpvrdpy_unlock(nlpvrdpy); + mutex_unlock(&nlpvrdpy_list_mutex); + + mutex_destroy(&nlpvrdpy->mutex); + + kfree(nlpvrdpy); +} + +static struct nlpvrdpy *nlpvrdpy_lookup(u32 minor) +{ + struct nlpvrdpy *nlpvrdpy = NULL; + struct nlpvrdpy *iter; + + mutex_lock(&nlpvrdpy_list_mutex); + list_for_each_entry(iter, &nlpvrdpy_list, nl_list) { + if (NLPVRDPY_MINOR(iter) == minor) { + nlpvrdpy = iter; + nlpvrdpy_lock(nlpvrdpy); + break; + } + } + mutex_unlock(&nlpvrdpy_list_mutex); + + return nlpvrdpy; +} + +static int nlpvrdpy_pre_cmd(const struct genl_ops *ops, + struct sk_buff *skb, + struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct nlpvrdpy *nlpvrdpy = NULL; + int ret; + + if (ops->internal_flags & NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED) { + if (!(ops->flags & GENL_ADMIN_PERM)) + return -EINVAL; + } + + if (ops->internal_flags & (NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED | + NLPVRDPY_CIF_NLPVRDPY)) { + u32 minor; + + if (!attrs[NLPVRDPY_ATTR_MINOR]) + return -EINVAL; + + minor = nla_get_u32(attrs[NLPVRDPY_ATTR_MINOR]); + + nlpvrdpy = nlpvrdpy_lookup(minor); + if (!nlpvrdpy) + return -ENODEV; + + if (ops->internal_flags & NLPVRDPY_CIF_NLPVRDPY) { + if (!atomic_read(&nlpvrdpy->connected)) { + ret = -ENOTCONN; + goto err_unlock; + } + if ((nlpvrdpy->net != genl_info_net(info)) || + (nlpvrdpy->dst_portid != info->snd_portid)) { + ret = -EPROTO; + goto err_unlock; + } + } + + info->user_ptr[0] = nlpvrdpy; + } + + ret = 0; + +err_unlock: + nlpvrdpy_unlock(nlpvrdpy); + return ret; +} + +static void nlpvrdpy_post_cmd(const struct genl_ops *ops, + struct sk_buff *skb, + struct genl_info *info) +{ +} + +static struct genl_family nlpvrdpy_family = { + .name = "nlpvrdpy", + .version = 1, + .maxattr = NLPVRDPY_ATTR_MAX, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) + .policy = nlpvrdpy_policy, +#endif + .pre_doit = &nlpvrdpy_pre_cmd, + .post_doit = &nlpvrdpy_post_cmd +}; + +/* Must be called with the struct nlpvrdpy mutex held */ +static int nlpvrdpy_send_msg_locked(struct nlpvrdpy *nlpvrdpy, + struct sk_buff *msg) +{ + int err; + + if (atomic_read(&nlpvrdpy->connected)) { + err = genlmsg_unicast(nlpvrdpy->net, msg, nlpvrdpy->dst_portid); + if (err == -ECONNREFUSED) + atomic_set(&nlpvrdpy->connected, 0); + } else { + err = -ENOTCONN; + nlmsg_free(msg); + } + + return err; +} + +static int nlpvrdpy_send_msg(struct nlpvrdpy *nlpvrdpy, struct sk_buff *msg) +{ + int err; + + nlpvrdpy_lock(nlpvrdpy); + err = nlpvrdpy_send_msg_locked(nlpvrdpy, msg); + nlpvrdpy_unlock(nlpvrdpy); + + return err; +} + +void nlpvrdpy_send_disconnect(struct nlpvrdpy *nlpvrdpy) +{ + struct sk_buff *msg; + void *hdr; + int err; + + if (!atomic_read(&nlpvrdpy->connected)) + return; + + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0, + &nlpvrdpy_family, 0, NLPVRDPY_CMD_DISCONNECT); + if (!hdr) + goto err_msg_free; + + err = nla_put_u32(msg, NLPVRDPY_ATTR_MINOR, NLPVRDPY_MINOR(nlpvrdpy)); + if (err) + goto err_msg_free; + + genlmsg_end(msg, hdr); + + nlpvrdpy_lock(nlpvrdpy); + + (void) nlpvrdpy_send_msg_locked(nlpvrdpy, msg); + + atomic_set(&nlpvrdpy->connected, 0); + nlpvrdpy->net = NULL; + nlpvrdpy->dst_portid = 0; + + nlpvrdpy_unlock(nlpvrdpy); + + return; + +err_msg_free: + nlmsg_free(msg); +} + +static int nlpvrdpy_get_offsets_and_sizes(struct drm_framebuffer *fb, + struct drm_gem_object **objs, + u64 *addr, u64 *size) +{ + int i; + + for (i = 0; i < nulldisp_drm_fb_num_planes(fb); i++) { + int err; + struct drm_gem_object *obj = objs[i]; + + err = drm_gem_create_mmap_offset(obj); + if (err) { + DRM_ERROR( + "Failed to get mmap offset for buffer[%d] = %p\n", + i, obj); + return err; + } + + addr[i] = drm_vma_node_offset_addr(&obj->vma_node); + size[i] = obj->size; + } + + return 0; +} + +static int nlpvrdpy_put_fb_attributes(struct sk_buff *msg, + struct drm_framebuffer *fb, + struct nlpvrdpy *nlpvrdpy, + struct drm_gem_object **objs) +{ +#define RETURN_ON_ERROR(f) \ + { \ + int err = (f); \ + if (err) { \ + pr_err("%s: command failed: %s", __func__, #f); \ + return err; \ + } \ + } + + int i; + const int num_planes = nulldisp_drm_fb_num_planes(fb); + u64 plane_addr[NLPVRDPY_MAX_NUM_PLANES], + plane_size[NLPVRDPY_MAX_NUM_PLANES]; + + RETURN_ON_ERROR(nlpvrdpy_get_offsets_and_sizes(fb, objs, &plane_addr[0], + &plane_size[0])); + + RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_MINOR, + NLPVRDPY_MINOR(nlpvrdpy))); + + RETURN_ON_ERROR(nla_put_u8(msg, NLPVRDPY_ATTR_NUM_PLANES, num_planes)); + + RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_WIDTH, fb->width)); + RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_HEIGHT, fb->height)); + RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_PIXFMT, + nulldisp_drm_fb_format(fb))); + RETURN_ON_ERROR(nla_put_u64_64bit(msg, + NLPVRDPY_ATTR_FB_MODIFIER, + nulldisp_drm_fb_modifier(fb), + NLPVRDPY_ATTR_PAD)); + + /* IMG_COLORSPACE_BT601_CONFORMANT_RANGE */ + RETURN_ON_ERROR(nla_put_u8(msg, NLPVRDPY_ATTR_YUV_CSC, 1)); + + /* 8-bit per sample */ + RETURN_ON_ERROR(nla_put_u8(msg, NLPVRDPY_ATTR_YUV_BPP, 8)); + + for (i = 0; i < num_planes; i++) { + RETURN_ON_ERROR(nla_put_u64_64bit(msg, + NLPVRDPY_ATTR_PLANE(i, ADDR), + plane_addr[i], + NLPVRDPY_ATTR_PAD)); + RETURN_ON_ERROR(nla_put_u64_64bit(msg, + NLPVRDPY_ATTR_PLANE(i, SIZE), + plane_size[i], + NLPVRDPY_ATTR_PAD)); + RETURN_ON_ERROR(nla_put_u64_64bit(msg, + NLPVRDPY_ATTR_PLANE(i, + OFFSET), + fb->offsets[i], + NLPVRDPY_ATTR_PAD)); + RETURN_ON_ERROR(nla_put_u64_64bit(msg, + NLPVRDPY_ATTR_PLANE(i, PITCH), + fb->pitches[i], + NLPVRDPY_ATTR_PAD)); + RETURN_ON_ERROR(nla_put_u32(msg, + NLPVRDPY_ATTR_PLANE(i, + GEM_OBJ_NAME), + (u32)objs[0]->name)); + } + + WARN_ONCE(num_planes > NLPVRDPY_MAX_NUM_PLANES, + "NLPVRDPY_MAX_NUM_PLANES = [%d], num_planes = [%d]\n", + NLPVRDPY_MAX_NUM_PLANES, num_planes); + + return 0; +#undef RETURN_ON_ERROR +} + +static int nlpvrdpy_name_gem_obj(struct drm_device *dev, + struct drm_gem_object *obj) +{ + int ret; + + mutex_lock(&dev->object_name_lock); + if (!obj->name) { + ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); + if (ret < 0) + goto exit_unlock; + + obj->name = ret; + } + + ret = 0; + +exit_unlock: + mutex_unlock(&dev->object_name_lock); + return ret; +} + +static int nlpvrdpy_name_gem_objs(struct drm_framebuffer *fb, + struct drm_gem_object **objs) +{ + int i; + struct drm_device *dev = fb->dev; + + for (i = 0; i < nulldisp_drm_fb_num_planes(fb); i++) { + int err = nlpvrdpy_name_gem_obj(dev, objs[i]); + + if (err < 0) + return err; + } + + return 0; +} + +int nlpvrdpy_send_flip(struct nlpvrdpy *nlpvrdpy, + struct drm_framebuffer *fb, + struct drm_gem_object **objs) +{ + struct sk_buff *msg; + void *hdr; + int err; + + if (!atomic_read(&nlpvrdpy->connected)) + return -ENOTCONN; + + if (nlpvrdpy->gem_names_required) { + err = nlpvrdpy_name_gem_objs(fb, objs); + if (err) + return err; + } + + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0, + &nlpvrdpy_family, 0, NLPVRDPY_CMD_FLIP); + if (!hdr) { + err = -ENOMEM; + goto err_msg_free; + } + + err = nlpvrdpy_put_fb_attributes(msg, fb, nlpvrdpy, objs); + if (err) + goto err_msg_free; + + genlmsg_end(msg, hdr); + + return nlpvrdpy_send_msg(nlpvrdpy, msg); + +err_msg_free: + nlmsg_free(msg); + return err; +} + +int nlpvrdpy_send_copy(struct nlpvrdpy *nlpvrdpy, + struct drm_framebuffer *fb, + struct drm_gem_object **objs) +{ + struct sk_buff *msg; + void *hdr; + int err; + + if (!atomic_read(&nlpvrdpy->connected)) + return -ENOTCONN; + + if (nlpvrdpy->gem_names_required) { + err = nlpvrdpy_name_gem_objs(fb, objs); + if (err) + return err; + } + + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0, + &nlpvrdpy_family, 0, NLPVRDPY_CMD_COPY); + if (!hdr) { + err = -ENOMEM; + goto err_msg_free; + } + + err = nlpvrdpy_put_fb_attributes(msg, fb, nlpvrdpy, objs); + if (err) + goto err_msg_free; + + genlmsg_end(msg, hdr); + + return nlpvrdpy_send_msg(nlpvrdpy, msg); + +err_msg_free: + nlmsg_free(msg); + return err; +} + +static int nlpvrdpy_cmd_connect(struct sk_buff *skb, struct genl_info *info) +{ + struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; + struct sk_buff *msg; + void *hdr; + int err; + + if (info->attrs[NLPVRDPY_ATTR_NAMING_REQUIRED]) + nlpvrdpy->gem_names_required = true; + + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put_reply(msg, info, &nlpvrdpy_family, + 0, NLPVRDPY_CMD_CONNECTED); + if (!hdr) { + err = -ENOMEM; + goto err_msg_free; + } + + err = nla_put_string(msg, NLPVRDPY_ATTR_NAME, + nlpvrdpy->dev->driver->name); + if (err) + goto err_msg_free; + + genlmsg_end(msg, hdr); + + err = genlmsg_reply(msg, info); + + if (!err) { + nlpvrdpy_lock(nlpvrdpy); + + nlpvrdpy->net = genl_info_net(info); + nlpvrdpy->dst_portid = info->snd_portid; + atomic_set(&nlpvrdpy->connected, 1); + + nlpvrdpy_unlock(nlpvrdpy); + } + + return err; + +err_msg_free: + nlmsg_free(msg); + return err; +} + +static int nlpvrdpy_cmd_disconnect(struct sk_buff *skb, struct genl_info *info) +{ + struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; + + atomic_set(&nlpvrdpy->connected, 0); + + if (nlpvrdpy->disconnect_cb) + nlpvrdpy->disconnect_cb(nlpvrdpy->disconnect_cb_data); + + return 0; +} + +static int nlpvrdpy_cmd_flipped(struct sk_buff *skb, struct genl_info *info) +{ + struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; + + return (nlpvrdpy->flipped_cb) ? + nlpvrdpy->flipped_cb(nlpvrdpy->flipped_cb_data) : + 0; +} + +static int nlpvrdpy_cmd_copied(struct sk_buff *skb, struct genl_info *info) +{ + struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; + + return (nlpvrdpy->copied_cb) ? + nlpvrdpy->copied_cb(nlpvrdpy->copied_cb_data) : + 0; +} + +static struct genl_ops nlpvrdpy_ops[] = { + { + .cmd = NLPVRDPY_CMD_CONNECT, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + .policy = nlpvrdpy_policy, +#endif + .doit = nlpvrdpy_cmd_connect, + .flags = GENL_ADMIN_PERM, + .internal_flags = NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED + }, + { + .cmd = NLPVRDPY_CMD_DISCONNECT, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + .policy = nlpvrdpy_policy, +#endif + .doit = nlpvrdpy_cmd_disconnect, + .flags = 0, + .internal_flags = NLPVRDPY_CIF_NLPVRDPY + }, + { + .cmd = NLPVRDPY_CMD_FLIPPED, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + .policy = nlpvrdpy_policy, +#endif + .doit = nlpvrdpy_cmd_flipped, + .flags = 0, + .internal_flags = NLPVRDPY_CIF_NLPVRDPY + }, + { + .cmd = NLPVRDPY_CMD_COPIED, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + .policy = nlpvrdpy_policy, +#endif + .doit = nlpvrdpy_cmd_copied, + .flags = 0, + .internal_flags = NLPVRDPY_CIF_NLPVRDPY + } +}; + +int nlpvrdpy_register(void) +{ + nlpvrdpy_family.module = THIS_MODULE; + nlpvrdpy_family.ops = nlpvrdpy_ops; + nlpvrdpy_family.n_ops = ARRAY_SIZE(nlpvrdpy_ops); + + return genl_register_family(&nlpvrdpy_family); +} + +int nlpvrdpy_unregister(void) +{ + return genl_unregister_family(&nlpvrdpy_family); +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.h new file mode 100644 index 000000000000..d1833a7cd584 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.h @@ -0,0 +1,78 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __DRM_NULLDISP_NETLINK_H__ +#define __DRM_NULLDISP_NETLINK_H__ + +#include +#include + +typedef void (*nlpvrdpy_disconnect_cb)(void *data); +typedef int (*nlpvrdpy_flipped_cb)(void *data); +typedef int (*nlpvrdpy_copied_cb)(void *data); + +struct nlpvrdpy *nlpvrdpy_create(struct drm_device *dev, + nlpvrdpy_disconnect_cb disconnect_cb, + void *disconnect_cb_data, + nlpvrdpy_flipped_cb flipped_cb, + void *flipped_cb_data, + nlpvrdpy_copied_cb copied_cb, + void *copied_cb_data); + +void nlpvrdpy_destroy(struct nlpvrdpy *nlpvrdpy); + +int nlpvrdpy_send_flip(struct nlpvrdpy *nlpvrdpy, + struct drm_framebuffer *fb, + struct drm_gem_object **objs); + +int nlpvrdpy_send_copy(struct nlpvrdpy *nlpvrdpy, + struct drm_framebuffer *fb, + struct drm_gem_object **objs); + +void nlpvrdpy_send_disconnect(struct nlpvrdpy *nlpvrdpy); + +int nlpvrdpy_register(void); + +int nlpvrdpy_unregister(void); + +#endif /* __DRM_NULLDISP_NETLINK_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.c new file mode 100644 index 000000000000..0df591ff466b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.c @@ -0,0 +1,426 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Copyright Copyright (c) MCST +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* + * This is a device driver for the E2C3 GPU. + */ + +#include +#include +#include +#include +#include + +#if defined(CONFIG_MTRR) +#include +#endif + +#include "pvrmodule.h" + +#include "e2c3_gpu_drv_internal.h" + +/* RGX regs on BAR0 */ +#define E2C3_GPU_RGX_REG_PCI_BASENUM 0 + +MODULE_DESCRIPTION("PowerVR E2C3 GPU driver"); + +int request_pci_io_addr(struct pci_dev *pdev, u32 index, resource_size_t offset, + resource_size_t length) +{ + resource_size_t start, end; + + start = pci_resource_start(pdev, index); + end = pci_resource_end(pdev, index); + + if ((start + offset + length - 1) > end) + return -EIO; + if (pci_resource_flags(pdev, index) & IORESOURCE_IO) { + if (request_region(start + offset, length, DRV_NAME) == NULL) + return -EIO; + } else { + if (request_mem_region(start + offset, length, DRV_NAME) == + NULL) + return -EIO; + } + return 0; +} + +void release_pci_io_addr(struct pci_dev *pdev, u32 index, resource_size_t start, + resource_size_t length) +{ + if (pci_resource_flags(pdev, index) & IORESOURCE_IO) + release_region(start, length); + else + release_mem_region(start, length); +} + +int setup_io_region(struct pci_dev *pdev, struct e2c3_gpu_io_region *region, + u32 index, resource_size_t offset, resource_size_t size) +{ + int err; + resource_size_t pci_phys_addr; + + err = request_pci_io_addr(pdev, index, offset, size); + if (err) { + dev_err(&pdev->dev, + "Failed to request E2C3 GPU registers (err=%d)\n", err); + return -EIO; + } + pci_phys_addr = pci_resource_start(pdev, index); + region->region.base = pci_phys_addr + offset; + region->region.size = size; + + region->registers = ioremap(region->region.base, region->region.size); + + if (!region->registers) { + dev_err(&pdev->dev, "Failed to map E2C3 GPU registers\n"); + release_pci_io_addr(pdev, index, region->region.base, + region->region.size); + return -EIO; + } + return 0; +} + +int e2c3_gpu_register_ext_device(struct e2c3_gpu_device *e2c3_gpu) +{ + int err = 0; + struct resource rogue_resources[] = { + DEFINE_RES_MEM_NAMED( + pci_resource_start(e2c3_gpu->pdev, + E2C3_GPU_RGX_REG_PCI_BASENUM), + E2C3_GPU_RGX_REG_REGION_SIZE, "rogue-regs"), + }; + struct platform_device_info rogue_device_info = { + .parent = &e2c3_gpu->pdev->dev, + .name = E2C3_GPU_DEVICE_NAME_ROGUE, + .id = PLATFORM_DEVID_AUTO, + .res = rogue_resources, + .num_res = ARRAY_SIZE(rogue_resources), + .data = NULL, + .size_data = 0, + .dma_mask = DMA_BIT_MASK(40), + }; + + e2c3_gpu->ext_dev = platform_device_register_full(&rogue_device_info); + + if (IS_ERR(e2c3_gpu->ext_dev)) { + err = PTR_ERR(e2c3_gpu->ext_dev); + dev_err(&e2c3_gpu->pdev->dev, + "Failed to register rogue device (%d)\n", err); + e2c3_gpu->ext_dev = NULL; + return err; + } + return err; +} + +irqreturn_t e2c3_gpu_irq_handler(int irq, void *data) +{ + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + struct e2c3_gpu_device *e2c3_gpu = (struct e2c3_gpu_device *)data; + struct e2c3_gpu_interrupt_handler *ext_int; + + spin_lock_irqsave(&e2c3_gpu->interrupt_handler_lock, flags); + + ext_int = &e2c3_gpu->interrupt_handler; + if (ext_int->enabled && ext_int->handler_function) { + ext_int->handler_function(ext_int->handler_data); + } + ret = IRQ_HANDLED; + + spin_unlock_irqrestore(&e2c3_gpu->interrupt_handler_lock, flags); + + return ret; +} + +#if defined(E2C3_GPU_FAKE_INTERRUPTS) +void e2c3_gpu_irq_fake_wrapper(unsigned long data) +{ + struct e2c3_gpu_device *e2c3_gpu = (struct e2c3_gpu_device *)data; + + e2c3_gpu_irq_handler(0, e2c3_gpu); + mod_timer(&e2c3_gpu->timer, + jiffies + msecs_to_jiffies(E2C3_GPU_FAKE_INTERRUPT_TIME_MS)); +} +#endif + +static void e2c3_gpu_devres_release(struct device *dev, void *res) +{ + /* No extra cleanup needed */ +} + +static int e2c3_gpu_cleanup(struct pci_dev *pdev) +{ + struct e2c3_gpu_device *e2c3_gpu = + devres_find(&pdev->dev, e2c3_gpu_devres_release, NULL, NULL); + int err = 0; + + if (!e2c3_gpu) { + dev_err(&pdev->dev, "No E2C3 GPU device resources found\n"); + return -ENODEV; + } + + if (e2c3_gpu->interrupt_handler.enabled) + e2c3_gpu_disable_interrupt(&pdev->dev); + +#if defined(E2C3_GPU_FAKE_INTERRUPTS) + del_timer_sync(&e2c3_gpu->timer); +#else + free_irq(e2c3_gpu->pdev->irq, e2c3_gpu); +#endif + + return err; +} + +static int e2c3_gpu_init(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct e2c3_gpu_device *e2c3_gpu; + int err = 0; + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + e2c3_gpu = devres_alloc(e2c3_gpu_devres_release, sizeof(*e2c3_gpu), + GFP_KERNEL); + if (!e2c3_gpu) { + err = -ENOMEM; + goto err_out; + } + + devres_add(&pdev->dev, e2c3_gpu); + + err = e2c3_gpu_enable(&pdev->dev); + if (err) { + dev_err(&pdev->dev, "e2c3_gpu_enable failed %d\n", err); + goto err_release; + } + + e2c3_gpu->pdev = pdev; + + spin_lock_init(&e2c3_gpu->interrupt_handler_lock); + spin_lock_init(&e2c3_gpu->interrupt_enable_lock); + +#if defined(E2C3_GPU_FAKE_INTERRUPTS) + dev_warn(&pdev->dev, "WARNING: Faking interrupts every %d ms", + FAKE_INTERRUPT_TIME_MS); + setup_timer(&tc->timer, tc_irq_fake_wrapper, (unsigned long)tc); + mod_timer(&tc->timer, + jiffies + msecs_to_jiffies(E2C3_GPU_FAKE_INTERRUPT_TIME_MS)); +#else + err = request_irq(e2c3_gpu->pdev->irq, e2c3_gpu_irq_handler, + IRQF_SHARED, DRV_NAME, e2c3_gpu); + if (err) { + dev_err(&pdev->dev, + "e2c3_gpu_enable request irq #%d failed %d\n", + e2c3_gpu->pdev->irq, err); + goto err_dev_cleanup; + } +#endif + + err = e2c3_gpu_register_ext_device(e2c3_gpu); + if (err) + goto err_dev_cleanup; + + devres_remove_group(&pdev->dev, NULL); + +err_out: + if (err) + dev_err(&pdev->dev, "%s: failed\n", __func__); + + return err; + +err_dev_cleanup: + e2c3_gpu_cleanup(pdev); + e2c3_gpu_disable(&pdev->dev); +err_release: + devres_release_group(&pdev->dev, NULL); + goto err_out; +} + +static void e2c3_gpu_exit(struct pci_dev *pdev) +{ + struct e2c3_gpu_device *e2c3_gpu = + devres_find(&pdev->dev, e2c3_gpu_devres_release, NULL, NULL); + if (!e2c3_gpu) { + dev_err(&pdev->dev, "No E2C3 GPU device resources found\n"); + return; + } + + if (e2c3_gpu->ext_dev) + platform_device_unregister(e2c3_gpu->ext_dev); + + e2c3_gpu_cleanup(pdev); + + e2c3_gpu_disable(&pdev->dev); +} + +static struct pci_device_id e2c3_gpu_pci_tbl[] = { + { PCI_VDEVICE(MCST_TMP, PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650) }, + {}, +}; + +static struct pci_driver e2c3_gpu_pci_driver = { + .name = DRV_NAME, + .id_table = e2c3_gpu_pci_tbl, + .probe = e2c3_gpu_init, + .remove = e2c3_gpu_exit, +}; + +module_pci_driver(e2c3_gpu_pci_driver); + +MODULE_DEVICE_TABLE(pci, e2c3_gpu_pci_tbl); + +int e2c3_gpu_enable(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "error - pci_enable_device returned %d\n", + err); + goto err_out; + } + + /* Enable BUS master */ + pci_set_master(pdev); + + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))) + goto err_disable_device; + if ((err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))) + goto err_disable_device; +err_out: + return err; + +err_disable_device: + pci_disable_device(pdev); + return err; +} +EXPORT_SYMBOL(e2c3_gpu_enable); + +void e2c3_gpu_disable(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + pci_disable_device(pdev); +} +EXPORT_SYMBOL(e2c3_gpu_disable); + +int e2c3_gpu_set_interrupt_handler(struct device *dev, + void (*handler_function)(void *), void *data) +{ + struct e2c3_gpu_device *e2c3_gpu = + devres_find(dev, e2c3_gpu_devres_release, NULL, NULL); + int err = 0; + unsigned long flags; + + if (!e2c3_gpu) { + dev_err(dev, "No E2C3 GPU device resources found\n"); + err = -ENODEV; + goto err_out; + } + + spin_lock_irqsave(&e2c3_gpu->interrupt_handler_lock, flags); + + e2c3_gpu->interrupt_handler.handler_function = handler_function; + e2c3_gpu->interrupt_handler.handler_data = data; + + spin_unlock_irqrestore(&e2c3_gpu->interrupt_handler_lock, flags); + +err_out: + return err; +} +EXPORT_SYMBOL(e2c3_gpu_set_interrupt_handler); + +int e2c3_gpu_enable_interrupt(struct device *dev) +{ + struct e2c3_gpu_device *e2c3_gpu = + devres_find(dev, e2c3_gpu_devres_release, NULL, NULL); + int err = 0; + unsigned long flags; + + if (!e2c3_gpu) { + dev_err(dev, "No E2C3 GPU device resources found\n"); + err = -ENODEV; + goto err_out; + } + spin_lock_irqsave(&e2c3_gpu->interrupt_enable_lock, flags); + + if (e2c3_gpu->interrupt_handler.enabled) { + dev_warn(dev, "Interrupt already enabled\n"); + err = -EEXIST; + goto err_unlock; + } + e2c3_gpu->interrupt_handler.enabled = true; + +err_unlock: + spin_unlock_irqrestore(&e2c3_gpu->interrupt_enable_lock, flags); +err_out: + return err; +} +EXPORT_SYMBOL(e2c3_gpu_enable_interrupt); + +int e2c3_gpu_disable_interrupt(struct device *dev) +{ + struct e2c3_gpu_device *e2c3_gpu = + devres_find(dev, e2c3_gpu_devres_release, NULL, NULL); + int err = 0; + unsigned long flags; + + if (!e2c3_gpu) { + dev_err(dev, "No E2C3 GPU device resources found\n"); + err = -ENODEV; + goto err_out; + } + spin_lock_irqsave(&e2c3_gpu->interrupt_enable_lock, flags); + + if (!e2c3_gpu->interrupt_handler.enabled) { + dev_warn(dev, "Interrupt already disabled\n"); + } + e2c3_gpu->interrupt_handler.enabled = false; + + spin_unlock_irqrestore(&e2c3_gpu->interrupt_enable_lock, flags); +err_out: + return err; +} +EXPORT_SYMBOL(e2c3_gpu_disable_interrupt); diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.h new file mode 100644 index 000000000000..98f7d3f23d02 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv.h @@ -0,0 +1,69 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Copyright Copyright (c) MCST +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _E2C3_GPU_DRV_H +#define _E2C3_GPU_DRV_H + +/* + * This contains the hooks for the E2C3 GPU driver + */ + +#include +#include +#include + +int e2c3_gpu_enable(struct device *dev); +void e2c3_gpu_disable(struct device *dev); + +int e2c3_gpu_enable_interrupt(struct device *dev); +int e2c3_gpu_disable_interrupt(struct device *dev); + +int e2c3_gpu_set_interrupt_handler(struct device *dev, + void (*handler_function)(void *), + void *handler_data); + +#define E2C3_GPU_DEVICE_NAME_ROGUE "e2c3_gpu_rogue" + +#define E2C3_GPU_RGX_REG_REGION_SIZE (0x7FFFF) + +#endif /* _E2C3_GPU_DRV_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv_internal.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv_internal.h new file mode 100644 index 000000000000..209874f8a819 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/e2c3_gpu/e2c3_gpu_drv_internal.h @@ -0,0 +1,107 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Copyright Copyright (c) MCST +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _E2C3_DRV_INTERNAL_H +#define _E2C3_DRV_INTERNAL_H + +#include "e2c3_gpu_drv.h" + +#include + +#if defined(E2C3_GPU_FAKE_INTERRUPTS) +#define E2C3_GPU_FAKE_INTERRUPT_TIME_MS 1600 +#include +#include +#endif + +#define DRV_NAME "e2c3-gpu" + +/* Convert a byte offset to a 32 bit dword offset */ +#define DWORD_OFFSET(byte_offset) ((byte_offset) >> 2) + +#define HEX2DEC(v) ((((v) >> 4) * 10) + ((v)&0x0F)) + +struct e2c3_gpu_interrupt_handler { + bool enabled; + void (*handler_function)(void *); + void *handler_data; +}; + +struct e2c3_gpu_region { + resource_size_t base; + resource_size_t size; +}; + +struct e2c3_gpu_io_region { + struct e2c3_gpu_region region; + void __iomem *registers; +}; + +struct e2c3_gpu_device { + struct pci_dev *pdev; + + spinlock_t interrupt_handler_lock; + spinlock_t interrupt_enable_lock; + + struct e2c3_gpu_interrupt_handler interrupt_handler; + + struct platform_device *ext_dev; + +#if defined(E2C3_GPU_FAKE_INTERRUPTS) + struct timer_list timer; +#endif +}; + +int request_pci_io_addr(struct pci_dev *pdev, u32 index, resource_size_t offset, + resource_size_t length); +void release_pci_io_addr(struct pci_dev *pdev, u32 index, resource_size_t start, + resource_size_t length); + +int setup_io_region(struct pci_dev *pdev, struct e2c3_gpu_io_region *region, + u32 index, resource_size_t offset, resource_size_t size); + +#if defined(E2C3_GPU_FAKE_INTERRUPTS) +void e2c3_gpu_irq_fake_wrapper(unsigned long data); +#endif /* defined(E2C3_GPU_FAKE_INTERRUPTS) */ + +#endif /* _E2C3_DRV_INTERNAL_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_compatibility.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_compatibility.h new file mode 100644 index 000000000000..9afa3a55f12c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_compatibility.h @@ -0,0 +1,465 @@ +/*************************************************************************/ /*! +@Title Kernel versions compatibility macros +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Per-version macros to allow code to seamlessly use older kernel +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __KERNEL_COMPATIBILITY_H__ +#define __KERNEL_COMPATIBILITY_H__ + +#include + +/* + * Stop supporting an old kernel? Remove the top block. + * New incompatible kernel? Append a new block at the bottom. + * + * Please write you version test as `VERSION < X.Y`, and use the earliest + * possible version :) + */ + +/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this + * so we work around the limitation by vsnprintf() + seq_puts(). + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define seq_vprintf(seq_file, fmt, args) \ +do { \ + char aszBuffer[512]; /* maximum message buffer size */ \ + vsnprintf(aszBuffer, sizeof(aszBuffer), fmt, args); \ + seq_puts(seq_file, aszBuffer); \ +} while (0) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + +/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */ +#define VM_DONTDUMP VM_RESERVED + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */ + +/* + * Note: this fix had to be written backwards because get_unused_fd_flags + * was already defined but not exported on kernels < 3.7 + * + * When removing support for kernels < 3.7, this block should be removed + * and all `get_unused_fd()` should be manually replaced with + * `get_unused_fd_flags(0)` + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + +/* Linux 3.19 removed get_unused_fd() */ +/* get_unused_fd_flags was introduced in 3.7 */ +#define get_unused_fd() get_unused_fd_flags(0) + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) + +/* + * Headers shouldn't normally be included by this file but this is a special + * case as it's not obvious from the name that devfreq_add_device needs this + * include. + */ +#include + +#define devfreq_add_device(dev, profile, name, data) \ + ({ \ + struct devfreq *__devfreq; \ + if (name && !strcmp(name, "simple_ondemand")) \ + __devfreq = devfreq_add_device(dev, profile, \ + &devfreq_simple_ondemand, data); \ + else \ + __devfreq = ERR_PTR(-EINVAL); \ + __devfreq; \ + }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) + +#define DRIVER_RENDER 0 +#define DRM_RENDER_ALLOW 0 + +/* Linux 3.12 introduced a new shrinker API */ +#define SHRINK_STOP (~0UL) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + +#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev) +#define dev_pm_opp_get_freq(opp) opp_get_freq(opp) +#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp) +#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt) +#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq) + +#if defined(CONFIG_ARM) +/* Linux 3.13 renamed ioremap_cached to ioremap_cache */ +#define ioremap_cache(cookie, size) ioremap_cached(cookie, size) +#endif /* defined(CONFIG_ARM) */ + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + +/* Linux 3.14 introduced a new set of sized min and max defines */ +#ifndef U32_MAX +#define U32_MAX ((u32)UINT_MAX) +#endif + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) + +/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to + * `struct page **pages` */ +#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) + +/* + * Linux 4.7 removed this function but its replacement was available since 3.19. + */ +#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e) + +/* seq_has_overflowed() was introduced in 3.19 but the structure elements + * have been available since 2.x + */ +#include +static inline bool seq_has_overflowed(struct seq_file *m) +{ + return m->count == m->size; +} + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) + +#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \ + ({ \ + struct dentry *de; \ + de = debugfs_create_file(name, mode, parent, data, fops); \ + if (de) \ + de->d_inode->i_size = file_size; \ + de; \ + }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +#define drm_fb_helper_unregister_fbi(fb_helper) \ + ({ \ + if ((fb_helper) && (fb_helper)->fbdev) \ + unregister_framebuffer((fb_helper)->fbdev); \ + }) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) + +/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */ +#define __GFP_RECLAIM __GFP_WAIT + +#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev) +#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev) +#else +#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base) +#endif + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) + +/* Linux 4.5 added a new printf-style parameter for debug messages */ + +#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \ + drm_encoder_init(dev, encoder, funcs, encoder_type) + +#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ + ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); }) + +#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \ + drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs) + +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + +#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ + ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) + +/* + * Linux 4.6 removed the first two parameters, the "struct task_struct" type + * pointer "current" is defined in asm/current.h, which makes it pointless + * to pass it on every function call. +*/ +#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ + get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) + +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) + +/* Linux 4.9 replaced the write/force parameters with "gup_flags" */ +#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ + get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) + +/* + * Linux 4.6 removed the start and end arguments as it now always maps + * the entire DMA-BUF. + * Additionally, dma_buf_end_cpu_access() now returns an int error. + */ +#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION) +#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) + +/* Linux 4.7 removed the first arguments as it's never been used */ +#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle) + +/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */ +#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) + +/* Linux 4.9 changed the second argument to a drm_file pointer */ +#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp) +#define drm_vma_node_allow(node, file_priv) drm_vma_node_allow(node, (file_priv)->filp) +#define drm_vma_node_revoke(node, file_priv) drm_vma_node_revoke(node, (file_priv)->filp) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +#define refcount_read(r) atomic_read(r) +#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT) + +#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd) +#define drm_fb_helper_init(dev, helper, max_conn_count) \ + drm_fb_helper_init(dev, helper, 1, max_conn_count) + +/* + * In Linux Kernels >= 4.12 for x86 another level of page tables has been + * added. The added level (p4d) sits between pgd and pud, so when it + * doesn`t exist, pud_offset function takes pgd as a parameter instead + * of p4d. + */ +#define p4d_t pgd_t +#define p4d_offset(pgd, address) (pgd) +#define p4d_none(p4d) (0) +#define p4d_bad(p4d) (0) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + +#define drm_mode_object_get(obj) drm_mode_object_reference(obj) +#define drm_mode_object_put(obj) drm_mode_object_unreference(obj) +#define drm_connector_get(obj) drm_connector_reference(obj) +#define drm_connector_put(obj) drm_connector_unreference(obj) +#define drm_framebuffer_get(obj) drm_framebuffer_reference(obj) +#define drm_framebuffer_put(obj) drm_framebuffer_unreference(obj) +#define drm_gem_object_get(obj) drm_gem_object_reference(obj) +#define drm_gem_object_put(obj) drm_gem_object_unreference(obj) +#define __drm_gem_object_put(obj) __drm_gem_object_unreference(obj) +#define drm_gem_object_put_unlocked(obj) drm_gem_object_unreference_unlocked(obj) +#define drm_property_blob_get(obj) drm_property_reference_blob(obj) +#define drm_property_blob_put(obj) drm_property_unreference_blob(obj) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) + +#define drm_dev_put(dev) drm_dev_unref(dev) + +#define drm_mode_object_find(dev, file_priv, id, type) drm_mode_object_find(dev, id, type) +#define drm_encoder_find(dev, file_priv, id) drm_encoder_find(dev, id) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + +#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ + min_scale, max_scale, \ + can_position, can_update_disabled) \ + ({ \ + const struct drm_rect __clip = { \ + .x2 = crtc_state->crtc->mode.hdisplay, \ + .y2 = crtc_state->crtc->mode.vdisplay, \ + }; \ + int __ret = drm_plane_helper_check_state(plane_state, \ + &__clip, \ + min_scale, max_scale, \ + can_position, \ + can_update_disabled); \ + __ret; \ + }) + +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) + +#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ + min_scale, max_scale, \ + can_position, can_update_disabled) \ + ({ \ + const struct drm_rect __clip = { \ + .x2 = crtc_state->crtc->mode.hdisplay, \ + .y2 = crtc_state->crtc->mode.vdisplay, \ + }; \ + int __ret = drm_atomic_helper_check_plane_state(plane_state, \ + crtc_state, \ + &__clip, \ + min_scale, max_scale, \ + can_position, \ + can_update_disabled); \ + __ret; \ + }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) + +#define drm_connector_attach_encoder(connector, encoder) \ + drm_mode_connector_attach_encoder(connector, encoder) + +#define drm_connector_update_edid_property(connector, edid) \ + drm_mode_connector_update_edid_property(connector, edid) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + +/* + * Work around architectures, e.g. MIPS, that define copy_from_user and + * copy_to_user as macros that call access_ok, as this gets redefined below. + * As of kernel 4.12, these functions are no longer defined per-architecture + * so this work around isn't needed. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#if defined(copy_from_user) + /* + * NOTE: This function should not be called directly as it exists simply to + * work around copy_from_user being defined as a macro that calls access_ok. + */ +static inline int +__pvr_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return copy_from_user(to, from, n); +} + +#undef copy_from_user +#define copy_from_user(to, from, n) __copy_from_user(to, from, n) +#endif + +#if defined(copy_to_user) + /* + * NOTE: This function should not be called directly as it exists simply to + * work around copy_to_user being defined as a macro that calls access_ok. + */ +static inline int +__pvr_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return copy_to_user(to, from, n); +} + +#undef copy_to_user +#define copy_to_user(to, from, n) __copy_to_user(to, from, n) +#endif +#endif + +/* + * Linux 5.0 dropped the type argument. + * + * This is unused in at least Linux 3.4 and above for all architectures other + * than 'um' (User Mode Linux), which stopped using it in 4.2. + */ +#if defined(access_ok) + /* + * NOTE: This function should not be called directly as it exists simply to + * work around access_ok being defined as a macro. + */ +static inline int +__pvr_access_ok_compat(int type, const void __user * addr, unsigned long size) +{ + return access_ok(type, addr, size); +} + +#undef access_ok +#define access_ok(addr, size) __pvr_access_ok_compat(0, addr, size) +#else +#define access_ok(addr, size) access_ok(0, addr, size) +#endif + +#endif + +#if defined(CONFIG_L4) + +/* + * Headers shouldn't normally be included by this file but this is a special + * case to access the memory translation API when running on the L4 ukernel + */ +#include + +#undef page_to_phys +#define page_to_phys(x) l4x_virt_to_phys((void *)((phys_addr_t)page_to_pfn(x) << PAGE_SHIFT)) + +#endif /* defined(CONFIG_L4) */ + +#endif /* __KERNEL_COMPATIBILITY_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_config_compatibility.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_config_compatibility.h new file mode 100644 index 000000000000..63effd65ecf9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_config_compatibility.h @@ -0,0 +1,54 @@ +/*************************************************************************/ /*! +@Title Kernel config compatibility define options +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This file is exclusively for Linux config kernel options. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __KERNEL_CONFIG_COMPATIBILITY_H__ +#define __KERNEL_CONFIG_COMPATIBILITY_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +#ifdef SUPPORT_DRM_FBDEV_EMULATION +#define CONFIG_DRM_FBDEV_EMULATION +#endif +#endif + +#endif /* __KERNEL_CONFIG_COMPATIBILITY_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_nospec.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_nospec.h new file mode 100644 index 000000000000..e27a3ebc2ac6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/kernel_nospec.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title Macro to limit CPU speculative execution in kernel code +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Per-version macros to allow code to seamlessly use older kernel +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __KERNEL_NOSPEC_H__ +#define __KERNEL_NOSPEC_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 2) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118))) +#include +#include +#include +#else +#define array_index_nospec(index, size) (index) +#endif + +/* + * For Ubuntu kernels, the features available for a given Linux version code + * may not match those in upstream kernels. This is the case for the + * availability of the array_index_nospec macro. + */ +#if !defined(array_index_nospec) +#define array_index_nospec(index, size) (index) +#endif + +#endif /* __KERNEL_NOSPEC_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi.h new file mode 100644 index 000000000000..4ba54eadb715 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi.h @@ -0,0 +1,198 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Copyright Portions Copyright (c) Synopsys Ltd. All Rights Reserved +@License Synopsys Permissive License + +The Synopsys Software Driver and documentation (hereinafter "Software") +is an unsupported proprietary work of Synopsys, Inc. unless otherwise +expressly agreed to in writing between Synopsys and you. + +The Software IS NOT an item of Licensed Software or Licensed Product under +any End User Software License Agreement or Agreement for Licensed Product +with Synopsys or any supplement thereto. Permission is hereby granted, +free of charge, to any person obtaining a copy of this software annotated +with this license and the Software, to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject +to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +*/ /**************************************************************************/ + +#ifndef _DRM_HDMI_H_ +#define _DRM_HDMI_H_ + +#include + +#include +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) +#include +#endif + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) +#include +#endif + +#define HDMI_EDID_LEN 512 + +#define ENCODING_RGB 0 +#define ENCODING_YCC444 1 +#define ENCODING_YCC422_16BITS 2 +#define ENCODING_YCC422_8BITS 3 +#define ENCODING_XVYCC444 4 + +struct hdmi_device_vmode { + bool dvi; + bool hsync_polarity; + bool vsync_polarity; + bool interlaced; + bool data_enable_polarity; + + u32 pixel_clock; + u32 pixel_repetition_input; + u32 pixel_repetition_output; +}; + +struct hdmi_device_data_info { + u32 enc_in_format; + u32 enc_out_format; + u32 enc_color_depth; + u32 colorimetry; + u32 pix_repet_factor; + u32 pp_default_phase; + u32 hdcp_enable; + u32 active_aspect_ratio; + int vic; + struct hdmi_device_vmode video_mode; +}; + +struct hdmi_i2c { + struct i2c_adapter adap; + + u32 ddc_addr; + u32 segment_ptr; + + struct mutex lock; + struct completion comp; +}; + +struct hdmi_device { + struct drm_device *drm_dev; + struct drm_connector connector; + struct drm_encoder encoder; + struct hdmi_i2c *i2c; + struct device *dev; + + struct workqueue_struct *workq; + struct work_struct hpd_work; + + /* Video parameters */ + struct hdmi_device_data_info hdmi_data; + struct edid *edid; + struct drm_crtc *crtc; + struct drm_display_mode *native_mode; + + bool hpd_detect; + bool phy_enabled; + void __iomem *core_regs; + void __iomem *top_regs; +}; + +#define connector_to_hdmi(c) \ + struct hdmi_device *hdmi = container_of((c), struct hdmi_device, connector) +#define encoder_to_hdmi(e) \ + struct hdmi_device *hdmi = container_of((e), struct hdmi_device, encoder) + + +enum HDMI_STATUS { + HDMI_INIT_SUCCESS = 0, + HDMI_INIT_FAILED_VIDEO, + HDMI_INIT_FAILED_PHY, + HDMI_INIT_FAILED_EDID, + HDMI_INIT_FAILED_HDCP, +}; + +/* Core APIs */ +inline void hdmi_write_reg32(struct hdmi_device *hdmi, int offset, u32 val); +inline u32 hdmi_read_reg32(struct hdmi_device *hdmi, int offset); +inline void hdmi_mod_reg32(struct hdmi_device *hdmi, u32 offset, u32 data, u32 mask); + +/* I2C APIs */ +int hdmi_i2c_init(struct hdmi_device *hdmi); +void hdmi_i2c_deinit(struct hdmi_device *hdmi); + +#if defined(PRINT_HDMI_REGISTERS) +void PrintVideoRegisters(struct hdmi_device *hdmi); +#endif + +#define IS_BIT_SET(value, bit) ((value) & (1 << (bit))) + +#if defined(HDMI_DEBUG) + #define hdmi_info(hdmi, fmt, ...) dev_info(hdmi->dev, fmt, ##__VA_ARGS__) + #define hdmi_error(hdmi, fmt, ...) dev_err(hdmi->dev, fmt, ##__VA_ARGS__) + #define HDMI_CHECKPOINT pr_debug("%s: line %d\n", __func__, __LINE__) +#else + #define hdmi_info(hdmi, fmt, ...) + #define hdmi_error(hdmi, fmt, ...) + #define HDMI_CHECKPOINT +#endif + +#endif diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_core.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_core.c new file mode 100644 index 000000000000..0e351770e68d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_core.c @@ -0,0 +1,1018 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Copyright Portions Copyright (c) Synopsys Ltd. All Rights Reserved +@License Synopsys Permissive License + +The Synopsys Software Driver and documentation (hereinafter "Software") +is an unsupported proprietary work of Synopsys, Inc. unless otherwise +expressly agreed to in writing between Synopsys and you. + +The Software IS NOT an item of Licensed Software or Licensed Product under +any End User Software License Agreement or Agreement for Licensed Product +with Synopsys or any supplement thereto. Permission is hereby granted, +free of charge, to any person obtaining a copy of this software annotated +with this license and the Software, to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject +to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +*/ /**************************************************************************/ + +#include "drm_pdp_drv.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if defined(PDP_USE_ATOMIC) +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) +#include +#endif + +#include "hdmi.h" +#include "hdmi_regs.h" +#include "hdmi_phy.h" +#include "hdmi_video.h" +#include "plato_drv.h" + +#include "kernel_compatibility.h" + +#define DRIVER_NAME "hdmi" +#define DRIVER_DESC "Imagination Technologies HDMI Driver" +#define DRIVER_DATE "20160809" +#define HDMI_VERSION_PLATO 1 + +#if defined(PRINT_HDMI_REGISTERS) +static u32 gs_hdmi_registers[] = { + HDMI_ID_DESIGN_ID_OFFSET, + HDMI_ID_REVISION_ID_OFFSET, + HDMI_ID_PRODUCT_ID0_OFFSET, + HDMI_ID_PRODUCT_ID1_OFFSET, + HDMI_ID_CONFIG0_ID_OFFSET, + HDMI_ID_CONFIG1_ID_OFFSET, + HDMI_ID_CONFIG2_ID_OFFSET, + HDMI_ID_CONFIG3_ID_OFFSET, + HDMI_IH_FC_STAT0_OFFSET, + HDMI_IH_FC_STAT1_OFFSET, + HDMI_IH_FC_STAT2_OFFSET, + HDMI_IH_AS_STAT0_OFFSET, + HDMI_IH_PHY_STAT0_OFFSET, + HDMI_IH_I2CM_STAT0_OFFSET, + HDMI_IH_CEC_STAT0_OFFSET, + HDMI_IH_VP_STAT0_OFFSET, + HDMI_IH_I2CMPHY_STAT0_OFFSET, + HDMI_IH_AHBDMAAUD_STAT0_OFFSET, + HDMI_IH_DECODE_OFFSET, + HDMI_IH_MUTE_FC_STAT0_OFFSET, + HDMI_IH_MUTE_FC_STAT1_OFFSET, + HDMI_IH_MUTE_FC_STAT2_OFFSET, + HDMI_IH_MUTE_AS_STAT0_OFFSET, + HDMI_IH_MUTE_PHY_STAT0_OFFSET, + HDMI_IH_MUTE_I2CM_STAT0_OFFSET, + HDMI_IH_MUTE_CEC_STAT0_OFFSET, + HDMI_IH_MUTE_VP_STAT0_OFFSET, + HDMI_IH_MUTE_I2CMPHY_STAT0_OFFSET, + HDMI_IH_MUTE_AHBDMAAUD_STAT0_OFFSET, + HDMI_IH_MUTE_OFFSET, + HDMI_TX_INVID0_OFFSET, + HDMI_TX_INSTUFFING_OFFSET, + HDMI_TX_GYDATA0_OFFSET, + HDMI_TX_GYDATA1_OFFSET, + HDMI_TX_RCRDATA0_OFFSET, + HDMI_TX_RCRDATA1_OFFSET, + HDMI_TX_BCBDATA0_OFFSET, + HDMI_TX_BCBDATA1_OFFSET, + HDMI_VP_STATUS_OFFSET, + HDMI_VP_PR_CD_OFFSET, + HDMI_VP_STUFF_OFFSET, + HDMI_VP_REMAP_OFFSET, + HDMI_VP_CONF_OFFSET, + HDMI_VP_MASK_OFFSET, + HDMI_FC_INVIDCONF_OFFSET, + HDMI_FC_INHACTIV0_OFFSET, + HDMI_FC_INHACTIV1_OFFSET, + HDMI_FC_INHBLANK0_OFFSET, + HDMI_FC_INHBLANK1_OFFSET, + HDMI_FC_INVACTIV0_OFFSET, + HDMI_FC_INVACTIV1_OFFSET, + HDMI_FC_INVBLANK_OFFSET, + HDMI_FC_HSYNCINDELAY0_OFFSET, + HDMI_FC_HSYNCINDELAY1_OFFSET, + HDMI_FC_HSYNCINWIDTH0_OFFSET, + HDMI_FC_HSYNCINWIDTH1_OFFSET, + HDMI_FC_VSYNCINDELAY_OFFSET, + HDMI_FC_VSYNCINWIDTH_OFFSET, + HDMI_FC_INFREQ0_OFFSET, + HDMI_FC_INFREQ1_OFFSET, + HDMI_FC_INFREQ2_OFFSET, + HDMI_FC_CTRLDUR_OFFSET, + HDMI_FC_EXCTRLDUR_OFFSET, + HDMI_FC_EXCTRLSPAC_OFFSET, + HDMI_FC_CH0PREAM_OFFSET, + HDMI_FC_CH1PREAM_OFFSET, + HDMI_FC_CH2PREAM_OFFSET, + HDMI_FC_AVICONF3_OFFSET, + HDMI_FC_GCP_OFFSET, + HDMI_FC_AVICONF0_OFFSET, + HDMI_FC_AVICONF1_OFFSET, + HDMI_FC_AVICONF2_OFFSET, + HDMI_FC_AVIVID_OFFSET, + HDMI_FC_AVIETB0_OFFSET, + HDMI_FC_AVIETB1_OFFSET, + HDMI_FC_AVISBB0_OFFSET, + HDMI_FC_AVISBB1_OFFSET, + HDMI_FC_AVIELB0_OFFSET, + HDMI_FC_AVIELB1_OFFSET, + HDMI_FC_AVISRB0_OFFSET, + HDMI_FC_AVISRB1_OFFSET, + HDMI_FC_PRCONF_OFFSET, + HDMI_FC_SCRAMBLER_CTRL_OFFSET, + HDMI_FC_MULTISTREAM_CTRL_OFFSET, + HDMI_FC_PACKET_TX_EN_OFFSET, + HDMI_FC_DBGFORCE_OFFSET, + HDMI_FC_DBGTMDS_0_OFFSET, + HDMI_FC_DBGTMDS_1_OFFSET, + HDMI_FC_DBGTMDS_2_OFFSET, + HDMI_PHY_CONF0_OFFSET, + HDMI_PHY_TST0_OFFSET, + HDMI_PHY_TST1_OFFSET, + HDMI_PHY_TST2_OFFSET, + HDMI_PHY_STAT0_OFFSET, + HDMI_PHY_INT0_OFFSET, + HDMI_PHY_MASK0_OFFSET, + HDMI_PHY_POL0_OFFSET, + HDMI_PHY_I2CM_SLAVE_OFFSET, + HDMI_PHY_I2CM_ADDRESS_OFFSET, + HDMI_PHY_I2CM_DATAO_1_OFFSET, + HDMI_PHY_I2CM_DATAO_0_OFFSET, + HDMI_PHY_I2CM_DATAI_1_OFFSET, + HDMI_PHY_I2CM_DATAI_0_OFFSET, + HDMI_PHY_I2CM_OPERATION_OFFSET, + HDMI_AUDIO_SAMPLER_OFFSET, + HDMI_AUD_N1_OFFSET, + HDMI_AUD_N2_OFFSET, + HDMI_AUD_N3_OFFSET, + HDMI_AUD_CTS1_OFFSET, + HDMI_AUD_CTS2_OFFSET, + HDMI_AUD_CTS3_OFFSET, + HDMI_AUD_INPUTCLKFS_OFFSET, + HDMI_AUDIO_DMA_OFFSET, + HDMI_MC_CLKDIS_OFFSET, + HDMI_MC_SWRSTZREQ_OFFSET, + HDMI_MC_OPCTRL_OFFSET, + HDMI_MC_FLOWCTRL_OFFSET, + HDMI_MC_PHYRSTZ_OFFSET, + HDMI_MC_LOCKONCLOCK_OFFSET, + HDMI_MC_HEACPHY_RST_OFFSET, + HDMI_MC_LOCKONCLOCK_2_OFFSET, + HDMI_MC_SWRSTZREQ_2_OFFSET, + HDMI_COLOR_SPACE_CONVERTER_OFFSET, + HDMI_HDCP_ENCRYPTION_OFFSET, + HDMI_I2CM_SLAVE_OFFSET, + HDMI_I2CM_ADDRESS_OFFSET, + HDMI_I2CM_DATAO_OFFSET, + HDMI_I2CM_DATAI_OFFSET, + HDMI_I2CM_OPERATION_OFFSET, + HDMI_I2CM_INT_OFFSET, + HDMI_I2CM_CTLINT_OFFSET, + HDMI_I2CM_DIV_OFFSET, + HDMI_I2CM_SEGADDR_OFFSET, + HDMI_I2CM_SOFTRSTZ_OFFSET, + HDMI_I2CM_SEGPTR_OFFSET, + HDMI_I2CM_SS_SCL_HCNT_1_OFFSET, + HDMI_I2CM_SS_SCL_HCNT_0_OFFSET, + HDMI_I2CM_SS_SCL_LCNT_1_OFFSET, + HDMI_I2CM_SS_SCL_LCNT_0_OFFSET, + HDMI_I2CM_FS_SCL_HCNT_1_OFFSET, + HDMI_I2CM_FS_SCL_HCNT_0_OFFSET, + HDMI_I2CM_FS_SCL_LCNT_1_OFFSET, + HDMI_I2CM_FS_SCL_LCNT_0_OFFSET, + HDMI_I2CM_SDA_HOLD_OFFSET, + HDMI_I2CM_SCDC_READ_UPDATE_OFFSET, +}; + +void PrintVideoRegisters(struct hdmi_device *hdmi) +{ + u32 n; + + dev_info(hdmi->dev, "HDMI register dump:\n"); + for (n = 0; n < ARRAY_SIZE(gs_hdmi_registers); ++n) { + u32 value = hdmi_read_reg32(hdmi, gs_hdmi_registers[n]); + + dev_info(hdmi->dev, "Reg 0x%04x = 0x%08x\n", gs_hdmi_registers[n], value); + } +} +#endif // PRINT_HDMI_REGISTERS + +/* 16 - 1920x1080@60Hz */ +static struct drm_display_mode forced_mode = { + DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, +}; + + +inline void hdmi_write_reg32(struct hdmi_device *hdmi, int offset, u32 val) +{ + plato_write_reg32(hdmi->core_regs, offset * 4, val); +} + +inline u32 hdmi_read_reg32(struct hdmi_device *hdmi, int offset) +{ + return plato_read_reg32(hdmi->core_regs, offset * 4); +} + +inline void hdmi_mod_reg32(struct hdmi_device *hdmi, u32 offset, u32 data, u32 mask) +{ + u32 val = hdmi_read_reg32(hdmi, offset) & ~mask; + + val |= data & mask; + hdmi_write_reg32(hdmi, offset, val); +} + +static void hdmi_mode_setup(struct hdmi_device *hdmi, struct drm_display_mode *mode) +{ + int err; + + HDMI_CHECKPOINT; + + plato_enable_pixel_clock(hdmi->drm_dev->dev->parent, mode->clock); + + /* Step D: Configure video mode */ + err = video_configure_mode(hdmi, mode); + if (err != 0) { + hdmi_error(hdmi, "%s: Failed to configure video mode\n", __func__); + return; + } + + // No need to configure audio on Plato, + // skip to step F: Configure InfoFrames + err = video_configure_infoframes(hdmi, mode); + if (err != 0) { + hdmi_error(hdmi, "%s: Failed to initialise PHY\n", __func__); + return; + } + + hdmi_info(hdmi, "%s: Final HDMI timing configuration:\n\nVIC: %d\nPixel clock: %d\nMode: %dx%d@%d\n\n", + __func__, hdmi->hdmi_data.vic, mode->clock, + mode->hdisplay, mode->vdisplay, mode->vrefresh); + +#if defined(PRINT_HDMI_REGISTERS) + PrintVideoRegisters(hdmi); +#endif +} + +static void hdmi_poweron(struct hdmi_device *hdmi) +{ + HDMI_CHECKPOINT; + mdelay(100); + hdmi_mode_setup(hdmi, hdmi->native_mode); +} + +static void hdmi_poweroff(struct hdmi_device *hdmi) +{ + phy_power_down(hdmi); +} + +static void +hdmi_helper_connector_destroy(struct drm_connector *connector) +{ + connector_to_hdmi(connector); + + HDMI_CHECKPOINT; + + /* Disable all interrupts */ + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_PHY_STAT0_OFFSET, ~0); + kfree(hdmi->edid); + drm_connector_cleanup(connector); +} + +static enum drm_connector_status +hdmi_helper_connector_detect(struct drm_connector *connector, bool force) +{ + connector_to_hdmi(connector); + enum drm_connector_status status; + u32 phy_status; + + HDMI_CHECKPOINT; + + phy_status = hdmi_read_reg32(hdmi, HDMI_PHY_STAT0_OFFSET); + + hdmi_info(hdmi, "%s: HDMI HPD status %d\n", __func__, phy_status); + + if (phy_status & HDMI_PHY_STAT0_HPD_MASK) + status = connector_status_connected; + else + status = connector_status_disconnected; + + return status; +} + +static struct drm_display_mode * +hdmi_connector_native_mode(struct drm_connector *connector) +{ + connector_to_hdmi(connector); + const struct drm_connector_helper_funcs *helper = connector->helper_private; + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *largest = NULL; + int high_w = 0, high_h = 0, high_v = 0; + + list_for_each_entry(mode, &hdmi->connector.probed_modes, head) { + mode->vrefresh = drm_mode_vrefresh(mode); + if (helper->mode_valid(connector, mode) != MODE_OK || + (mode->flags & DRM_MODE_FLAG_INTERLACE)) + continue; + + /* Use preferred mode if there is one.. */ + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + hdmi_info(hdmi, "Retrieving native mode from preferred\n"); + return drm_mode_duplicate(dev, mode); + } + + /* + * Otherwise, take the resolution with the largest width, then + * height, then vertical refresh + */ + if (mode->hdisplay < high_w) + continue; + + if (mode->hdisplay == high_w && mode->vdisplay < high_h) + continue; + + if (mode->hdisplay == high_w && mode->vdisplay == high_h && + mode->vrefresh < high_v) + continue; + + high_w = mode->hdisplay; + high_h = mode->vdisplay; + high_v = mode->vrefresh; + largest = mode; + } + + hdmi_info(hdmi, "native mode from largest: %dx%d@%d\n", + high_w, high_h, high_v); + + return largest ? drm_mode_duplicate(dev, largest) : NULL; +} + +static int hdmi_connector_get_modes(struct drm_connector *connector) +{ + connector_to_hdmi(connector); + struct drm_device *dev = connector->dev; + int count = 0; + + HDMI_CHECKPOINT; + + if (!hdmi->edid) { + hdmi->edid = drm_get_edid(connector, &hdmi->i2c->adap); + if (!hdmi->edid) { + dev_err(hdmi->dev, "failed to get edid\n"); + return 0; + } + } + + drm_connector_update_edid_property(connector, hdmi->edid); + + /* + * Destroy the native mode, the attached monitor could have changed. + */ + if (hdmi->native_mode) { + drm_mode_destroy(dev, hdmi->native_mode); + hdmi->native_mode = NULL; + } + + count = drm_add_edid_modes(connector, hdmi->edid); + hdmi->native_mode = hdmi_connector_native_mode(connector); + if (count == 0 && hdmi->native_mode) { + /* + * Find the native mode if this is a digital panel, if we didn't + * find any modes through DDC previously add the native mode to + * the list of modes. + */ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(dev, hdmi->native_mode); + drm_mode_probed_add(connector, mode); + count = 1; + } + + hdmi_info(hdmi, "Native mode: %dx%d@%d\n", hdmi->native_mode->hdisplay, + hdmi->native_mode->vdisplay, hdmi->native_mode->vrefresh); + + hdmi_info(hdmi, "Edid: width[%d] x height[%d]\n", + hdmi->edid->width_cm, hdmi->edid->height_cm); + hdmi_info(hdmi, "\nVersion: %d\nRevision: %d\nExtensions: %d\n# of modes: %d\n", + hdmi->edid->version, hdmi->edid->revision, + hdmi->edid->extensions, count); + + return count; +} + +#if !defined(PDP_USE_ATOMIC) +static struct drm_encoder *hdmi_connector_best_encoder( + struct drm_connector *connector) +{ + connector_to_hdmi(connector); + + HDMI_CHECKPOINT; + + return &hdmi->encoder; +} +#endif + +static void hdmi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + encoder_to_hdmi(encoder); + + HDMI_CHECKPOINT; + + hdmi_mode_setup(hdmi, mode); +} + +static bool hdmi_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + HDMI_CHECKPOINT; + + return true; +} + +static void hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + encoder_to_hdmi(encoder); + + HDMI_CHECKPOINT; + + if (mode) + hdmi_poweroff(hdmi); + else + hdmi_poweron(hdmi); +} + +static void hdmi_encoder_prepare(struct drm_encoder *encoder) +{ + encoder_to_hdmi(encoder); + + HDMI_CHECKPOINT; + + hdmi_poweroff(hdmi); +} + +static void hdmi_encoder_commit(struct drm_encoder *encoder) +{ + encoder_to_hdmi(encoder); + + HDMI_CHECKPOINT; + + hdmi_poweron(hdmi); +} + +static int hdmi_helper_connector_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t value) +{ +#if defined(HDMI_DEBUG) + connector_to_hdmi(connector); + HDMI_CHECKPOINT; + hdmi_info(hdmi, "property name: %s\n", property->name); +#endif + return 0; +} + +static int hdmi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static struct drm_crtc * +hdmi_encoder_crtc_get(struct drm_encoder *encoder) +{ + encoder_to_hdmi(encoder); + + HDMI_CHECKPOINT; + return hdmi->crtc; +} + +static void +hdmi_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs hdmi_encoder_funcs = { + .destroy = hdmi_encoder_destroy, +}; + +static const struct drm_encoder_helper_funcs hdmi_encoder_helper_funcs = { + .dpms = hdmi_encoder_dpms, + .prepare = hdmi_encoder_prepare, + .commit = hdmi_encoder_commit, + .mode_set = hdmi_encoder_mode_set, + .mode_fixup = hdmi_encoder_mode_fixup, + .get_crtc = hdmi_encoder_crtc_get, +}; + +static const struct drm_connector_funcs hdmi_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = hdmi_helper_connector_detect, + .destroy = hdmi_helper_connector_destroy, + .set_property = hdmi_helper_connector_set_property, +#if defined(PDP_USE_ATOMIC) + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +#else + .dpms = drm_helper_connector_dpms, +#endif +}; + +static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { + .get_modes = hdmi_connector_get_modes, + .mode_valid = hdmi_mode_valid, + /* + * For atomic, don't set atomic_best_encoder or best_encoder. This will + * cause the DRM core to fallback to drm_atomic_helper_best_encoder(). + * This is fine as we only have a single connector and encoder. + */ +#if !defined(PDP_USE_ATOMIC) + .best_encoder = hdmi_connector_best_encoder, +#endif +}; + +static void hdmi_init_interrupts(struct hdmi_device *hdmi) +{ + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_PHY_STAT0_OFFSET, 0x3e); + + /* Mute all other interrupts */ + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_FC_STAT0_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_FC_STAT1_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_FC_STAT2_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_AS_STAT0_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_I2CM_STAT0_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_CEC_STAT0_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_VP_STAT0_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_I2CMPHY_STAT0_OFFSET, 0xff); + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_AHBDMAAUD_STAT0_OFFSET, 0xff); +} + +static void hdmi_disable_hpd(struct hdmi_device *hdmi) +{ + /* Disable hot plug interrupts */ + HDMI_CHECKPOINT; + + /* Clear HPD interrupt by writing 1*/ + if (IS_BIT_SET(hdmi_read_reg32(hdmi, HDMI_IH_PHY_STAT0_OFFSET), + HDMI_IH_PHY_STAT0_HPD_START)) { + hdmi_write_reg32(hdmi, HDMI_IH_PHY_STAT0_OFFSET, + SET_FIELD(HDMI_IH_PHY_STAT0_HPD_START, + HDMI_IH_PHY_STAT0_HPD_MASK, 1)); + } + + hdmi_init_interrupts(hdmi); // actually disables all (ready to enable just what we want). + + /* Power off */ + hdmi_write_reg32(hdmi, HDMI_PHY_CONF0_OFFSET, + SET_FIELD(HDMI_PHY_CONF0_TXPWRON_START, HDMI_PHY_CONF0_TXPWRON_MASK, 0) | + SET_FIELD(HDMI_PHY_CONF0_PDDQ_START, HDMI_PHY_CONF0_PDDQ_MASK, 1) | + SET_FIELD(HDMI_PHY_CONF0_SVSRET_START, HDMI_PHY_CONF0_SVSRET_MASK, 1)); + /* Now flip the master switch to mute */ + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_OFFSET, HDMI_IH_MUTE_ALL_MASK); + +} + +#if defined(HDMI_USE_HPD_INTERRUPTS) + +static void hdmi_enable_hpd(struct hdmi_device *hdmi) +{ + /* Enable hot plug interrupts */ + HDMI_CHECKPOINT; + + /* Clear HPD interrupt by writing 1*/ + if (IS_BIT_SET(hdmi_read_reg32(hdmi, HDMI_IH_PHY_STAT0_OFFSET), + HDMI_IH_PHY_STAT0_HPD_START)) { + hdmi_write_reg32(hdmi, HDMI_IH_PHY_STAT0_OFFSET, + SET_FIELD(HDMI_IH_PHY_STAT0_HPD_START, + HDMI_IH_PHY_STAT0_HPD_MASK, 1)); + } + + hdmi_init_interrupts(hdmi); + + /* Power off */ + hdmi_write_reg32(hdmi, HDMI_PHY_CONF0_OFFSET, + SET_FIELD(HDMI_PHY_CONF0_TXPWRON_START, HDMI_PHY_CONF0_TXPWRON_MASK, 0) | + SET_FIELD(HDMI_PHY_CONF0_PDDQ_START, HDMI_PHY_CONF0_PDDQ_MASK, 1) | + SET_FIELD(HDMI_PHY_CONF0_SVSRET_START, HDMI_PHY_CONF0_SVSRET_MASK, 1)); + + /* Enable hot plug detection */ + hdmi_mod_reg32(hdmi, HDMI_PHY_CONF0_OFFSET, + HDMI_PHY_CONF0_ENHPDRXSENSE_MASK, + HDMI_PHY_CONF0_ENHPDRXSENSE_MASK); + + /* Now flip the master switch to unmute */ + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_OFFSET, + SET_FIELD(HDMI_IH_MUTE_ALL_START, + HDMI_IH_MUTE_ALL_MASK, 0)); + +} + +static void hdmi_irq_handler(void *data) +{ + struct hdmi_device *hdmi = (struct hdmi_device *)data; + u32 phy_stat0 = 0; + u32 ih_phy_stat0 = 0; + u32 hpd_polarity = 0; + u32 decode = 0; + + HDMI_CHECKPOINT; + + /* Mute all interrupts */ + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_OFFSET, 0x03); + + decode = hdmi_read_reg32(hdmi, HDMI_IH_DECODE_OFFSET); + + /* Only support interrupts on PHY (eg HPD) */ + if (!IS_BIT_SET(decode, HDMI_IH_DECODE_PHY_START)) { + hdmi_info(hdmi, "%s: Unknown interrupt generated, decode: %x\n", + __func__, hdmi_read_reg32(hdmi, HDMI_IH_DECODE_OFFSET)); + return; + } + + phy_stat0 = hdmi_read_reg32(hdmi, HDMI_PHY_STAT0_OFFSET); + hpd_polarity = GET_FIELD(hdmi_read_reg32(hdmi, HDMI_PHY_POL0_OFFSET), + HDMI_PHY_POL0_HPD_START, HDMI_PHY_POL0_HPD_MASK); + ih_phy_stat0 = hdmi_read_reg32(hdmi, HDMI_IH_PHY_STAT0_OFFSET); + + hdmi_info(hdmi, "%s: Hdmi interrupt detected PHYStat0: 0x%x, HPD Polarity: 0x%x, IH Decode: 0x%x, IH PhyStat0: 0x%x", + __func__, phy_stat0, hpd_polarity, decode, ih_phy_stat0); + + /* Check if hot-plugging occurred */ + if (GET_FIELD(phy_stat0, HDMI_PHY_STAT0_HPD_START, HDMI_PHY_STAT0_HPD_MASK) == 1 && + GET_FIELD(ih_phy_stat0, HDMI_IH_PHY_STAT0_HPD_START, HDMI_IH_PHY_STAT0_HPD_MASK) == 1) { + hdmi_info(hdmi, "%s: Hot plug detected", __func__); + + /* Flip polarity */ + hdmi_write_reg32(hdmi, HDMI_PHY_POL0_OFFSET, + SET_FIELD(HDMI_PHY_POL0_HPD_START, HDMI_PHY_POL0_HPD_MASK, !hpd_polarity)); + + /* Write 1 to clear interrupt */ + hdmi_write_reg32(hdmi, HDMI_IH_PHY_STAT0_OFFSET, ih_phy_stat0); + + /* Finish mode setup */ + if (hdmi->hpd_detect == 0) { + hdmi->hpd_detect = 1; + //hdmi_poweron(hdmi); // do indelayed work + } + + /* Mute non-HPD interrupts */ + hdmi_init_interrupts(hdmi); + + /* Now flip the master switch to unmute */ + hdmi_write_reg32(hdmi, HDMI_IH_MUTE_OFFSET, 0); + + queue_work(hdmi->workq, &hdmi->hpd_work); + } else { + hdmi_info(hdmi, "%s: Cable unplugged\n", __func__); + + /* Flip polarity */ + hdmi_write_reg32(hdmi, HDMI_PHY_POL0_OFFSET, + SET_FIELD(HDMI_PHY_POL0_HPD_START, HDMI_PHY_POL0_HPD_MASK, !hpd_polarity)); + + /* Write 1 to clear interrupts */ + hdmi_write_reg32(hdmi, HDMI_IH_PHY_STAT0_OFFSET, ih_phy_stat0); + + /* Unmute and enable HPD interrupt */ + hdmi_enable_hpd(hdmi); + + /* Cable was unplugged */ + hdmi->hpd_detect = 0; + } +} +#endif // HDMI_USE_HPD_INTERRUPTS + +static void hdmi_delayed_hpd(struct work_struct *work) +{ + struct hdmi_device *hdmi = container_of(work, struct hdmi_device, hpd_work); + struct drm_connector *connector = &hdmi->connector; + + HDMI_CHECKPOINT; + + drm_kms_helper_hotplug_event(connector->dev); +} + +static int hdmi_register(struct hdmi_device *hdmi) +{ + struct pdp_drm_private *pdp_priv = hdmi->drm_dev->dev_private; + + // we should make our own container connector object and put queue into that. + INIT_WORK(&hdmi->hpd_work, hdmi_delayed_hpd); + + hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; + hdmi->connector.interlace_allowed = 0; + hdmi->connector.doublescan_allowed = 0; + drm_connector_init(hdmi->drm_dev, &hdmi->connector, &hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(&hdmi->connector, + &hdmi_connector_helper_funcs); + + hdmi->encoder.possible_crtcs = (BIT(0) | BIT(1)); + hdmi->encoder.possible_clones = 1 << 1; + drm_encoder_init(hdmi->drm_dev, &hdmi->encoder, &hdmi_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + drm_encoder_helper_add(&hdmi->encoder, &hdmi_encoder_helper_funcs); + + drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder); + + // save the connector in the pdp priv which gets freed by tc layer. + pdp_priv->connector = &hdmi->connector; + + drm_kms_helper_poll_init(hdmi->drm_dev); + + return 0; +} + +static void hdmi_destroy(struct hdmi_device *hdmi) +{ + // disable hot plug detection, its interrupt + hdmi_disable_hpd(hdmi); + plato_disable_interrupt(hdmi->dev->parent, PLATO_INTERRUPT_HDMI); + plato_set_interrupt_handler(hdmi->dev->parent, PLATO_INTERRUPT_HDMI, NULL, NULL); + + if (hdmi->workq) { + flush_workqueue(hdmi->workq); + destroy_workqueue(hdmi->workq); + hdmi->workq = NULL; + } + + hdmi_i2c_deinit(hdmi); + + // all allocs and mappings use devm so will be freed/unmapped on device shutdown. + dev_set_drvdata(hdmi->dev, NULL); +} + + +static int hdmi_init(struct device *dev, struct device *master, void *data) +{ + int err; + struct hdmi_device *hdmi; + struct resource *regs_resource; + struct platform_device *pdev = to_platform_device(dev); + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + hdmi->dev = dev; + hdmi->drm_dev = data; + + hdmi_info(hdmi, "DRM device: %p\n", hdmi->drm_dev); + + hdmi_info(hdmi, "max_width is %d\n", + hdmi->drm_dev->mode_config.max_width); + hdmi_info(hdmi, "max_height is %d\n", + hdmi->drm_dev->mode_config.max_height); + + regs_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, PLATO_HDMI_RESOURCE_REGS); + if (regs_resource == NULL) { + dev_err(dev, "%s: failed to get register resource %s", __func__, PLATO_HDMI_RESOURCE_REGS); + return -ENXIO; + } + + /* Use managed mmio, OS handles free on destroy */ + hdmi->core_regs = devm_ioremap_resource(dev, regs_resource); + if (IS_ERR(hdmi->core_regs)) { + dev_err(dev, "%s: Failed to map HDMI registers", __func__); + return PTR_ERR(hdmi->core_regs); + } + + /* Product and revision IDs */ + hdmi_info(hdmi, "\n\nDesign ID: %d\nRev ID: %d\nProduct ID: %d\nProduct ID HDCP: %d\n", + hdmi_read_reg32(hdmi, HDMI_ID_DESIGN_ID_OFFSET), + hdmi_read_reg32(hdmi, HDMI_ID_REVISION_ID_OFFSET), + hdmi_read_reg32(hdmi, HDMI_ID_PRODUCT_ID0_OFFSET), + GET_FIELD(hdmi_read_reg32(hdmi, HDMI_ID_PRODUCT_ID1_OFFSET), + HDMI_ID_PRODUCT_ID1_HDCP_START, HDMI_ID_PRODUCT_ID1_HDCP_MASK)); + + hdmi_info(hdmi, "\nHDCP Present: %d\nHDMI 1.4: %d\nHDMI 2.0: %d\nPHY Type: %d\n\n", + GET_FIELD(hdmi_read_reg32(hdmi, HDMI_ID_CONFIG0_ID_OFFSET), + HDMI_ID_CONFIG0_ID_HDCP_START, HDMI_ID_CONFIG0_ID_HDCP_MASK), + GET_FIELD(hdmi_read_reg32(hdmi, HDMI_ID_CONFIG0_ID_OFFSET), + HDMI_ID_CONFIG0_ID_HDMI14_START, HDMI_ID_CONFIG0_ID_HDMI14_MASK), + GET_FIELD(hdmi_read_reg32(hdmi, HDMI_ID_CONFIG1_ID_OFFSET), + HDMI_ID_CONFIG1_ID_HDMI20_START, HDMI_ID_CONFIG1_ID_HDMI20_MASK), + hdmi_read_reg32(hdmi, HDMI_ID_CONFIG2_ID_OFFSET)); + + /* Step A: Initialise PHY */ + err = phy_init(hdmi); + if (err != 0) { + hdmi_error(hdmi, "%s: Failed to initialise PHY (err %d)\n", __func__, err); + return err; + } + + /* Step B: Initialise video/frame composer and DTD for VGA/DVI mode */ + /* Don't need this with DRM? */ + err = video_init(hdmi); + if (err != 0) { + hdmi_error(hdmi, "%s: Failed to initialise Video (err %d)\n", __func__, err); + return err; + } + + /* Step B: Initialise EDID/I2C */ + err = hdmi_i2c_init(hdmi); + if (err != 0) { + hdmi_error(hdmi, "Failed to initialise I2C interface (err %d)", err); + return err; + } + + /* 16 - 1920x1080@60Hz */ + hdmi->hdmi_data.vic = 16; + hdmi->native_mode = drm_mode_duplicate(hdmi->drm_dev, &forced_mode); + + hdmi->hdmi_data.video_mode.pixel_repetition_input = 0; + hdmi->hdmi_data.enc_in_format = ENCODING_RGB; + hdmi->hdmi_data.enc_out_format = ENCODING_RGB; + hdmi->hdmi_data.enc_color_depth = 8; // Nick's original was 10. Why? + hdmi->hdmi_data.pix_repet_factor = 0; + hdmi->hdmi_data.pp_default_phase = 0; + hdmi->hdmi_data.hdcp_enable = 0; + hdmi->hdmi_data.colorimetry = 0; + hdmi->hdmi_data.active_aspect_ratio = 8; + + /* Non-drm mode information */ + hdmi->hdmi_data.video_mode.data_enable_polarity = true; + hdmi->hdmi_data.video_mode.dvi = 0; + hdmi->hdmi_data.video_mode.hsync_polarity = 1; + hdmi->hdmi_data.video_mode.vsync_polarity = 1; + hdmi->hdmi_data.video_mode.interlaced = 0; + hdmi->hdmi_data.video_mode.pixel_repetition_input = 0; + hdmi->hdmi_data.video_mode.pixel_repetition_output = 0; + + // drm_mode_probed_add(hdmi->connector, hdmi->native_mode); + + hdmi_mode_setup(hdmi, hdmi->native_mode); + + hdmi->workq = alloc_ordered_workqueue("plato_hdmi_hpdq", 0); + if (!hdmi->workq) { + dev_info(dev, "failed to alloc ordered workqueue\n"); + return 1; + } + +#if defined(HDMI_USE_HPD_INTERRUPTS) + err = plato_set_interrupt_handler(hdmi->dev->parent, + PLATO_INTERRUPT_HDMI, + hdmi_irq_handler, + hdmi); + if (err) { + dev_info(dev, "failed to set interrupt handler (err=%d)\n", err); + return err; + } + + err = plato_enable_interrupt(hdmi->dev->parent, PLATO_INTERRUPT_HDMI); + if (err) { + dev_info(dev, "failed to enable HDMI interrupts (err=%d)\n", err); + return err; + } + + hdmi_enable_hpd(hdmi); +#endif // HDMI_USE_HPD_INTERRUPTS + + mdelay(100); + err = hdmi_register(hdmi); + if (err) { + dev_err(dev, "Failed to register HDMI device (err %d)", err); + return err; + } + + hdmi_info(hdmi, "%s: Number of FBs: %d\n", __func__, + hdmi->drm_dev->mode_config.num_fb); + + dev_set_drvdata(hdmi->dev, hdmi); + + return 0; +} + +static int hdmi_component_bind(struct device *dev, struct device *master, void *data) +{ + HDMI_CHECKPOINT; + dev_info(dev, "loading platform device"); + + return hdmi_init(dev, master, data); +} + +static void hdmi_component_unbind(struct device *dev, struct device *master, void *data) +{ + struct hdmi_device *hdmi = dev_get_drvdata(dev); + + HDMI_CHECKPOINT; + dev_info(dev, "unloading platform device"); + + hdmi_destroy(hdmi); +} + +static const struct component_ops hdmi_component_ops = { + .bind = hdmi_component_bind, + .unbind = hdmi_component_unbind, +}; + +static int hdmi_probe(struct platform_device *pdev) +{ + HDMI_CHECKPOINT; + return component_add(&pdev->dev, &hdmi_component_ops); +} + +static int hdmi_remove(struct platform_device *pdev) +{ + HDMI_CHECKPOINT; + component_del(&pdev->dev, &hdmi_component_ops); + return 0; +} + +static struct platform_device_id hdmi_platform_device_id_table[] = { + { .name = PLATO_DEVICE_NAME_HDMI, .driver_data = HDMI_VERSION_PLATO }, + { }, +}; + +static struct platform_driver hdmi_platform_driver = { + .probe = hdmi_probe, + .remove = hdmi_remove, + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + }, + .id_table = hdmi_platform_device_id_table, +}; + +module_platform_driver(hdmi_platform_driver); + +MODULE_AUTHOR("Imagination Technologies Ltd. "); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_i2c.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_i2c.c new file mode 100644 index 000000000000..346e606c4866 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_i2c.c @@ -0,0 +1,257 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Copyright Portions Copyright (c) Synopsys Ltd. All Rights Reserved +@License Synopsys Permissive License + +The Synopsys Software Driver and documentation (hereinafter "Software") +is an unsupported proprietary work of Synopsys, Inc. unless otherwise +expressly agreed to in writing between Synopsys and you. + +The Software IS NOT an item of Licensed Software or Licensed Product under +any End User Software License Agreement or Agreement for Licensed Product +with Synopsys or any supplement thereto. Permission is hereby granted, +free of charge, to any person obtaining a copy of this software annotated +with this license and the Software, to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject +to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +*/ /**************************************************************************/ +#include +#include + +#include "hdmi.h" +#include "hdmi_regs.h" + +/*************************************************************************/ /*! + EDID Information +*/ /**************************************************************************/ +#define EDID_SIGNATURE 0x00ffffffffffff00ull +#define EDID_BLOCK_SIZE 128 +#define EDID_CHECKSUM 0 // per VESA spec, 8 bit checksum of base block +#define EDID_SLAVE_ADDRESS 0x50 +#define EDID_SEGMENT_ADDRESS 0x00 +#define EDID_REFRESH_RATE 60000 +#define EDID_I2C_TIMEOUT_MS 1000 + +static int hdmi_i2c_read(struct hdmi_device *hdmi, struct i2c_msg *msg) +{ + u32 intr_reg = 0; + u32 poll = 0; + u8 *buf = msg->buf; + u32 len = msg->len; + u32 i = 0; + + if (len != EDID_BLOCK_SIZE) + return 0; + + /* NOT THE RIGHT WAY TO DO THIS, NEED TO USE ADDR FIELD IN I2C_MSG */ + if (hdmi->i2c->ddc_addr >= 0xFF) { + hdmi->i2c->ddc_addr = 0; + hdmi->i2c->segment_ptr++; + } + hdmi_write_reg32(hdmi, HDMI_I2CM_SEGPTR_OFFSET, hdmi->i2c->segment_ptr); + + memset(buf, 0, len); + + for (i = 0; i < msg->len; i++) { + /* Write 1 to clear interrupt status */ + hdmi_write_reg32(hdmi, HDMI_IH_I2CM_STAT0_OFFSET, + SET_FIELD(HDMI_IH_I2CM_STAT0_I2CMASTERDONE_START, + HDMI_IH_I2CM_STAT0_I2CMASTERDONE_MASK, 1)); + + /* Setup EDID base address */ + hdmi_write_reg32(hdmi, HDMI_I2CM_SLAVE_OFFSET, EDID_SLAVE_ADDRESS); + hdmi_write_reg32(hdmi, HDMI_I2CM_SEGADDR_OFFSET, EDID_SEGMENT_ADDRESS); + + /* Address offset */ + hdmi_write_reg32(hdmi, HDMI_I2CM_ADDRESS_OFFSET, hdmi->i2c->ddc_addr); + + /* Set operation to normal read */ + hdmi_write_reg32(hdmi, HDMI_I2CM_OPERATION_OFFSET, + SET_FIELD(HDMI_I2CM_OPERATION_RD_START, + HDMI_I2CM_OPERATION_RD_MASK, 1)); + + intr_reg = hdmi_read_reg32(hdmi, HDMI_IH_I2CM_STAT0_OFFSET); + while (!IS_BIT_SET(intr_reg, HDMI_IH_I2CM_STAT0_I2CMASTERDONE_START) && + !IS_BIT_SET(intr_reg, HDMI_IH_I2CM_STAT0_I2CMASTERERROR_START)) { + intr_reg = hdmi_read_reg32(hdmi, HDMI_IH_I2CM_STAT0_OFFSET); + mdelay(1); + poll += 1; + if (poll > EDID_I2C_TIMEOUT_MS) { + dev_err(hdmi->dev, "%s: Timeout polling on I2CMasterDoneBit (STAT0: %d)\n", __func__, intr_reg); + return -ETIMEDOUT; + } + } + + if (IS_BIT_SET(intr_reg, HDMI_IH_I2CM_STAT0_I2CMASTERERROR_START)) { + dev_err(hdmi->dev, "%s: I2C EDID read failed, Master error bit is set\n", __func__); + return -ETIMEDOUT; + } + + *buf++ = hdmi_read_reg32(hdmi, HDMI_I2CM_DATAI_OFFSET) & 0xFF; + hdmi->i2c->ddc_addr++; + } + + return 0; +} + +static int hdmi_i2c_write(struct hdmi_device *hdmi, struct i2c_msg *msg) +{ + if (msg->addr == EDID_SLAVE_ADDRESS && msg->len == 0x01) + hdmi->i2c->ddc_addr = msg->buf[0]; + + return 0; + +} +static int hdmi_i2c_master_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct hdmi_device *hdmi = i2c_get_adapdata(adap); + int i; + + for (i = 0; i < num; i++) { + struct i2c_msg *msg = &msgs[i]; + + hdmi_info(hdmi, "[msg: %d out of %d][len: %d][type: %s][addr: 0x%x][buf: %d]\n", + i+1, num, msg->len, + (msg->flags & I2C_M_RD) ? "read" : "write", msg->addr, msg->buf[0]); + + if (msg->flags & I2C_M_RD) + hdmi_i2c_read(hdmi, msg); + else + hdmi_i2c_write(hdmi, msg); + } + + return i; +} + +static u32 hdmi_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm hdmi_algo = { + .master_xfer = hdmi_i2c_master_xfer, + .functionality = hdmi_i2c_func, +}; + +int hdmi_i2c_init(struct hdmi_device *hdmi) +{ + int ret; + + hdmi->i2c = devm_kzalloc(hdmi->dev, sizeof(*hdmi->i2c), GFP_KERNEL); + if (!hdmi->i2c) + return -ENOMEM; + + mutex_init(&hdmi->i2c->lock); + init_completion(&hdmi->i2c->comp); + + hdmi->i2c->adap.class = I2C_CLASS_DDC; + hdmi->i2c->adap.owner = THIS_MODULE; + hdmi->i2c->adap.dev.parent = hdmi->dev; + hdmi->i2c->adap.dev.of_node = hdmi->dev->of_node; + hdmi->i2c->adap.algo = &hdmi_algo; + snprintf(hdmi->i2c->adap.name, sizeof(hdmi->i2c->adap.name), "Plato HDMI"); + i2c_set_adapdata(&hdmi->i2c->adap, hdmi); + + ret = i2c_add_adapter(&hdmi->i2c->adap); + if (ret) { + dev_err(hdmi->dev, "Failed to add %s i2c adapter\n", + hdmi->i2c->adap.name); + devm_kfree(hdmi->dev, hdmi->i2c); + return ret; + } + + hdmi->i2c->segment_ptr = 0; + hdmi->i2c->ddc_addr = 0; + + /* Mask interrupts */ + hdmi_write_reg32(hdmi, HDMI_I2CM_INT_OFFSET, + SET_FIELD(HDMI_I2CM_INT_DONE_MASK_START, + HDMI_I2CM_INT_DONE_MASK_MASK, 1) | + SET_FIELD(HDMI_I2CM_INT_READ_REQ_MASK_START, + HDMI_I2CM_INT_READ_REQ_MASK_MASK, 1)); + hdmi_write_reg32(hdmi, HDMI_I2CM_CTLINT_OFFSET, + SET_FIELD(HDMI_I2CM_CTLINT_ARB_MASK_START, + HDMI_I2CM_CTLINT_ARB_MASK_START, 1) | + SET_FIELD(HDMI_I2CM_CTLINT_NACK_MASK_START, + HDMI_I2CM_CTLINT_NACK_MASK_START, 1)); + + hdmi_write_reg32(hdmi, HDMI_I2CM_DIV_OFFSET, + SET_FIELD(HDMI_I2CM_DIV_FAST_STD_MODE_START, + HDMI_I2CM_DIV_FAST_STD_MODE_START, 0)); + + /* Re-enable interrupts */ + hdmi_write_reg32(hdmi, HDMI_I2CM_INT_OFFSET, + SET_FIELD(HDMI_I2CM_INT_DONE_MASK_START, + HDMI_I2CM_INT_DONE_MASK_MASK, 0)); + hdmi_write_reg32(hdmi, HDMI_I2CM_CTLINT_OFFSET, + SET_FIELD(HDMI_I2CM_CTLINT_ARB_MASK_START, + HDMI_I2CM_CTLINT_ARB_MASK_START, 0) | + SET_FIELD(HDMI_I2CM_CTLINT_NACK_MASK_START, + HDMI_I2CM_CTLINT_NACK_MASK_START, 0)); + + hdmi_info(hdmi, "registered %s I2C bus driver\n", hdmi->i2c->adap.name); + + return ret; +} + +void hdmi_i2c_deinit(struct hdmi_device *hdmi) +{ + i2c_del_adapter(&hdmi->i2c->adap); +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.c new file mode 100644 index 000000000000..ce75e3c79fe4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.c @@ -0,0 +1,693 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Copyright Portions Copyright (c) Synopsys Ltd. All Rights Reserved +@License Synopsys Permissive License + +The Synopsys Software Driver and documentation (hereinafter "Software") +is an unsupported proprietary work of Synopsys, Inc. unless otherwise +expressly agreed to in writing between Synopsys and you. + +The Software IS NOT an item of Licensed Software or Licensed Product under +any End User Software License Agreement or Agreement for Licensed Product +with Synopsys or any supplement thereto. Permission is hereby granted, +free of charge, to any person obtaining a copy of this software annotated +with this license and the Software, to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject +to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +*/ /**************************************************************************/ + +#include +#include "hdmi_phy.h" +#include "hdmi_regs.h" + +/* PHY Gen2 slave addr */ +#if defined(EMULATOR) +#define HDMI_PHY_SLAVE_ADDRESS 0xA0 +#else +#define HDMI_PHY_SLAVE_ADDRESS 0x00 +#endif + +#if defined(VIRTUAL_PLATFORM) +#define HDMI_PHY_TIMEOUT_MS 1 +#define HDMI_PHY_POLL_INTERVAL_MS 1 +/* PHY dependent time for reset */ +#define HDMI_PHY_RESET_TIME_MS 1 +#else +/* Timeout for waiting on PHY operations */ +#define HDMI_PHY_TIMEOUT_MS 200 +#define HDMI_PHY_POLL_INTERVAL_MS 100 +/* PHY dependent time for reset */ +#define HDMI_PHY_RESET_TIME_MS 10 +#endif + +/* PHY I2C register definitions */ +#define PHY_PWRCTRL_OFFSET (0x00) +#define PHY_SERDIVCTRL_OFFSET (0x01) +#define PHY_SERCKCTRL_OFFSET (0x02) +#define PHY_SERCKKILLCTRL_OFFSET (0x03) +#define PHY_TXRESCTRL_OFFSET (0x04) +#define PHY_CKCALCTRL_OFFSET (0x05) +#define PHY_OPMODE_PLLCFG_OFFSET (0x06) +#define PHY_CLKMEASCTRL_OFFSET (0x07) +#define PHY_TXMEASCTRL_OFFSET (0x08) +#define PHY_CKSYMTXCTRL_OFFSET (0x09) +#define PHY_CMPSEQCTRL_OFFSET (0x0A) +#define PHY_CMPPWRCTRL_OFFSET (0x0B) +#define PHY_CMPMODECTRL_OFFSET (0x0C) +#define PHY_MEASCTRL_OFFSET (0x0D) +#define PHY_VLEVCTRL_OFFSET (0x0E) +#define PHY_D2ACTRL_OFFSET (0x0F) +#define PHY_PLLCURRCTRL_OFFSET (0x10) +#define PHY_PLLDRVANACTRL_OFFSET (0x11) +#define PHY_PLLCTRL_OFFSET (0x14) +#define PHY_PLLGMPCTRL_OFFSET (0x15) +#define PHY_PLLMEASCTRL_OFFSET (0x16) +#define PHY_PLLCLKBISTPHASE_OFFSET (0x17) +#define PHY_COMPRCAL_OFFSET (0x18) +#define PHY_TXTERM_OFFSET (0x19) +#define PHY_PWRSEQ_PATGENSKIP_OFFSET (0x1A) +/* Leaving out BIST regs */ + +static bool phy_poll(struct hdmi_device *hdmi, int offset, u32 mask) +{ + u16 timeout = 0; + + while (!(hdmi_read_reg32(hdmi, offset) & mask) && + timeout < HDMI_PHY_TIMEOUT_MS) { + msleep(HDMI_PHY_POLL_INTERVAL_MS); + timeout += HDMI_PHY_POLL_INTERVAL_MS; + } + if (timeout == HDMI_PHY_TIMEOUT_MS) { + dev_err(hdmi->dev, "%s: PHY timed out polling on 0x%x in register offset 0x%x\n", __func__, mask, offset); + return false; + } + return true; +} + +#if defined(NEED_PHY_READ) +static u16 phy_i2c_read(struct hdmi_device *hdmi, u8 addr) +{ + u16 data; + + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_SLAVE_OFFSET, HDMI_PHY_SLAVE_ADDRESS); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_ADDRESS_OFFSET, addr); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_SLAVE_OFFSET, HDMI_PHY_SLAVE_ADDRESS); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_ADDRESS_OFFSET, addr); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_OPERATION_OFFSET, + SET_FIELD(HDMI_PHY_I2CM_OPERATION_RD_START, + HDMI_PHY_I2CM_OPERATION_RD_MASK, 1)); + + /* Wait for done indication */ + //phy_poll(base, HDMI_IH_I2CMPHY_STAT0_OFFSET, HDMI_IH_I2CMPHY_STAT0_I2CMPHYDONE_MASK, ERROR); + mdelay(10); + /* read the data registers */ + data = (hdmi_read_reg32(hdmi, HDMI_PHY_I2CM_DATAI_1_OFFSET) & 0xFF) << 8; + data |= (hdmi_read_reg32(hdmi, HDMI_PHY_I2CM_DATAI_0_OFFSET) & 0xFF); + + return data; +} +#endif // NEED_PHY_READ + +static int phy_i2c_write(struct hdmi_device *hdmi, u8 addr, u16 data) +{ + HDMI_CHECKPOINT; + + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_SLAVE_OFFSET, HDMI_PHY_SLAVE_ADDRESS); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_ADDRESS_OFFSET, addr); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_DATAO_1_OFFSET, (data >> 8)); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_DATAO_0_OFFSET, data & 0xFF); + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_OPERATION_OFFSET, + SET_FIELD(HDMI_PHY_I2CM_OPERATION_WR_START, + HDMI_PHY_I2CM_OPERATION_WR_MASK, 1)); + + /* Wait for done interrupt */ + if (!phy_poll(hdmi, HDMI_IH_I2CMPHY_STAT0_OFFSET, + HDMI_IH_I2CMPHY_STAT0_I2CMPHYDONE_MASK)) + return -ETIMEDOUT; + + hdmi_write_reg32(hdmi, HDMI_IH_I2CMPHY_STAT0_OFFSET, 0x3); + return 0; +} + +/* + * Configure PHY based on pixel clock, color resolution, pixel repetition, and PHY model + * NOTE: This assumes PHY model TSMC 28-nm HPM/ 1.8V + */ +void phy_configure_mode(struct hdmi_device *hdmi, struct drm_display_mode *mode) +{ + int pixel_clock; + u8 color_depth; + + HDMI_CHECKPOINT; + + pixel_clock = mode->clock / 10; + color_depth = hdmi->hdmi_data.enc_color_depth; + + hdmi_write_reg32(hdmi, HDMI_PHY_CONF0_OFFSET, + SET_FIELD(HDMI_PHY_CONF0_SELDATAENPOL_START, + HDMI_PHY_CONF0_SELDATAENPOL_MASK, 1) | + SET_FIELD(HDMI_PHY_CONF0_ENHPDRXSENSE_START, + HDMI_PHY_CONF0_ENHPDRXSENSE_MASK, 1) | + SET_FIELD(HDMI_PHY_CONF0_PDDQ_START, + HDMI_PHY_CONF0_PDDQ_MASK, 1)); + + /* RESISTANCE TERM 133Ohm Cfg */ + phy_i2c_write(hdmi, PHY_TXTERM_OFFSET, 0x0004); /* TXTERM */ + /* REMOVE CLK TERM */ + //phy_i2c_write(hdmi, PHY_CKCALCTRL_OFFSET, 0x8000); /* CKCALCTRL */ + + hdmi_info(hdmi, " %s: pixel clock: %d, color_depth: %d\n", __func__, + pixel_clock, color_depth); + + switch (pixel_clock) { + case 2520: + switch (color_depth) { + case 8: + /* PLL/MPLL Cfg */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x00b3); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0018); /* CURRCTRL */ + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0000); /* GMPCTRL */ + break; + case 10: /* TMDS = 31.50MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2153); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0018); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0000); + break; + case 12: /* TMDS = 37.80MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x40F3); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0018); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0000); + break; + case 16: /* TMDS = 50.40MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x60B2); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + /* PREEMP Cgf 0.00 */ + //phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); /* CKSYMTXCTRL */ + /* TX/CK LVL 10 */ + //phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); /* VLEVCTRL */ + break; + case 2700: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x00B3); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0018); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0000); + break; + case 10: /* TMDS = 33.75MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2153); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0018); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0000); + break; + case 12: /* TMDS = 40.50MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x40F3); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0018); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0000); + break; + case 16: /* TMDS = 54MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x60B2); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + //phy_i2c_write(hdmi, 0x8009, 0x09); + //phy_i2c_write(hdmi, 0x0251, 0x0E); + break; + case 5040: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0072); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 10: /* TMDS = 63MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2142); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 12: /* TMDS = 75.60MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x40A2); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 16: /* TMDS = 100.80MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6071); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + //phy_i2c_write(hdmi, 0x8009, 0x09); + //phy_i2c_write(hdmi, 0x0251, 0x0E); + break; + case 5400: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0072); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 10: /* TMDS = 67.50MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2142); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 12: /* TMDS = 81MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x40A2); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 16: /* TMDS = 108MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6071); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + //phy_i2c_write(hdmi, 0x8009, 0x09); + //phy_i2c_write(hdmi, 0x0251, 0x0E); + break; + case 5940: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0072); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 10: /* TMDS = 74.25MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2142); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 12: /* TMDS = 89.10MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x40A2); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 16: /* TMDS = 118.80MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6071); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + //phy_i2c_write(hdmi, 0x8009, 0x09); + //phy_i2c_write(hdmi, 0x0251, 0x0E); + break; + case 7200: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0072); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 10: /* TMDS = 90MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2142); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 12: /* TMDS = 108MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x4061); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + case 16: /* TMDS = 144MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6071); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + //phy_i2c_write(hdmi, 0x8009, 0x09); + //phy_i2c_write(hdmi, 0x0251, 0x0E); + break; + /* 74.25 MHz pixel clock (720p)*/ + case 7425: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0072); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0028); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0001); + break; + case 10: /* TMDS = 92.812MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2145); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + case 12: /* TMDS = 111.375MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x4061); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + case 16: /* TMDS = 148.5MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6071); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + //phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + //phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 10080: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0051); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 10: /* TMDS = 126MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2145); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 12: /* TMDS = 151.20MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x4061); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 16: /* TMDS = 201.60MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6050); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0003); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + break; + case 10100: + case 10225: + case 10650: + case 10800: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0051); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + //phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 10: /* TMDS = 135MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2145); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 12: /* TMDS = 162MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x4061); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + case 16: /* TMDS = 216MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6050); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0003); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + break; + case 11880: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0051); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 10: /* TMDS = 148.50MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2145); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 12: /* TMDS = 178.20MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x4061); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + case 16: /* TMDS = 237.60MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6050); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0003); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + break; + case 14400: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0051); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 10: /* TMDS = 180MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x2145); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + case 12: /* TMDS = 216MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x4064); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + case 16: /* TMDS = 288MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6050); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0003); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + break; + case 14625: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0051); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + break; + case 14850: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0051); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + case 10: /* TMDS = 185.62MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x214C); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0003); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + case 12: /* TMDS = 222.75MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x4064); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0003); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0211); + break; + case 16: /* TMDS = 297MHz */ + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x6050); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0003); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0273); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + phy_i2c_write(hdmi, PHY_CKSYMTXCTRL_OFFSET, 0x8009); + break; + case 15400: + switch (color_depth) { + case 8: + phy_i2c_write(hdmi, PHY_OPMODE_PLLCFG_OFFSET, 0x0051); + phy_i2c_write(hdmi, PHY_PLLCURRCTRL_OFFSET, 0x0038); + phy_i2c_write(hdmi, PHY_PLLGMPCTRL_OFFSET, 0x0002); + phy_i2c_write(hdmi, PHY_VLEVCTRL_OFFSET, 0x0251); + break; + default: + dev_err(hdmi->dev, "%s: Color depth not supported (%d)", + __func__, color_depth); + break; + } + break; + default: + dev_err(hdmi->dev, "%s:\n\n*****Unsupported pixel clock ******\n\n!", + __func__); + break; + } + + { + u32 value = hdmi_read_reg32(hdmi, HDMI_PHY_CONF0_OFFSET); + + value = SET_FIELD(HDMI_PHY_CONF0_SELDATAENPOL_START, + HDMI_PHY_CONF0_SELDATAENPOL_MASK, 1) | + SET_FIELD(HDMI_PHY_CONF0_ENHPDRXSENSE_START, + HDMI_PHY_CONF0_ENHPDRXSENSE_MASK, 1) | + SET_FIELD(HDMI_PHY_CONF0_TXPWRON_START, + HDMI_PHY_CONF0_TXPWRON_MASK, 1) | + SET_FIELD(HDMI_PHY_CONF0_SVSRET_START, + HDMI_PHY_CONF0_SVSRET_MASK, 1); + hdmi_write_reg32(hdmi, HDMI_PHY_CONF0_OFFSET, value); + } +} + +static void phy_reset(struct hdmi_device *hdmi, u8 value) +{ + /* Handle different types of PHY here... */ + hdmi_write_reg32(hdmi, HDMI_MC_PHYRSTZ_OFFSET, value & 0x1); + msleep(HDMI_PHY_RESET_TIME_MS); +} + +int phy_power_down(struct hdmi_device *hdmi) +{ + /* For HDMI TX 1.4 PHY, power down by placing PHY in reset */ + phy_reset(hdmi, 0); + phy_reset(hdmi, 1); + phy_reset(hdmi, 0); + phy_reset(hdmi, 1); + return 0; +} + +int phy_wait_lock(struct hdmi_device *hdmi) +{ + if (!phy_poll(hdmi, HDMI_PHY_STAT0_OFFSET, HDMI_PHY_STAT0_TX_PHY_LOCK_MASK)) + return -ETIMEDOUT; + + return 0; +} + +int phy_init(struct hdmi_device *hdmi) +{ + /* Init slave address */ + hdmi_write_reg32(hdmi, HDMI_PHY_I2CM_SLAVE_OFFSET, HDMI_PHY_SLAVE_ADDRESS); + return 0; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.h new file mode 100644 index 000000000000..ee2bc5ca08d8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_phy.h @@ -0,0 +1,83 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Copyright Portions Copyright (c) Synopsys Ltd. All Rights Reserved +@License Synopsys Permissive License + +The Synopsys Software Driver and documentation (hereinafter "Software") +is an unsupported proprietary work of Synopsys, Inc. unless otherwise +expressly agreed to in writing between Synopsys and you. + +The Software IS NOT an item of Licensed Software or Licensed Product under +any End User Software License Agreement or Agreement for Licensed Product +with Synopsys or any supplement thereto. Permission is hereby granted, +free of charge, to any person obtaining a copy of this software annotated +with this license and the Software, to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject +to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. +*/ /**************************************************************************/ +#ifndef _HDMI_PHY_H_ +#define _HDMI_PHY_H_ + +#include "hdmi.h" + +int phy_init(struct hdmi_device *hdmi); +int phy_power_down(struct hdmi_device *hdmi); +int phy_wait_lock(struct hdmi_device *hdmi); +void phy_configure_mode(struct hdmi_device *hdmi, struct drm_display_mode *mode); + +#endif diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_regs.h new file mode 100644 index 000000000000..844554a0fd02 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_regs.h @@ -0,0 +1,583 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _HDMI_REGS_H +#define _HDMI_REGS_H + +#define SET_FIELD(start, mask, value) ((value << start) & mask) +#define GET_FIELD(value, start, mask) ((value & mask) >> start) + +/* Hardware Register definitions + * NOTE: These are HDMI core address offsets, SoC level offset will be * 4 + */ + +/* Identification registers */ +#define HDMI_ID_DESIGN_ID_OFFSET (0x0000) + +#define HDMI_ID_REVISION_ID_OFFSET (0x0001) + +#define HDMI_ID_PRODUCT_ID0_OFFSET (0x0002) + +#define HDMI_ID_PRODUCT_ID1_OFFSET (0x0003) +#define HDMI_ID_PRODUCT_ID1_TX_START (0) +#define HDMI_ID_PRODUCT_ID1_TX_MASK (0x1) +#define HDMI_ID_PRODUCT_ID1_RX_START (1) +#define HDMI_ID_PRODUCT_ID1_RX_MASK (0x2) +#define HDMI_ID_PRODUCT_ID1_HDCP_START (6) +#define HDMI_ID_PRODUCT_ID1_HDCP_MASK (0xC0) + +#define HDMI_ID_CONFIG0_ID_OFFSET (0x0004) +#define HDMI_ID_CONFIG0_ID_HDCP_START (0) +#define HDMI_ID_CONFIG0_ID_HDCP_MASK (0x1) +#define HDMI_ID_CONFIG0_ID_CEC_START (1) +#define HDMI_ID_CONFIG0_ID_CEC_MASK (0x2) +#define HDMI_ID_CONFIG0_ID_CSC_START (2) +#define HDMI_ID_CONFIG0_ID_CSC_MASK (0x4) +#define HDMI_ID_CONFIG0_ID_HDMI14_START (3) +#define HDMI_ID_CONFIG0_ID_HDMI14_MASK (0x8) +#define HDMI_ID_CONFIG0_ID_AUDI2S_START (4) +#define HDMI_ID_CONFIG0_ID_AUDI2S_MASK (0x10) +#define HDMI_ID_CONFIG0_ID_AUDSPDIF_START (5) +#define HDMI_ID_CONFIG0_ID_AUDSPDIF_MASK (0x20) +#define HDMI_ID_CONFIG0_ID_PREPEN_START (7) +#define HDMI_ID_CONFIG0_ID_PREPEN_MASK (0x80) + +#define HDMI_ID_CONFIG1_ID_OFFSET (0x0005) +#define HDMI_ID_CONFIG1_ID_CONFAPB_START (1) +#define HDMI_ID_CONFIG1_ID_CONFAPB_MASK (0x2) +#define HDMI_ID_CONFIG1_ID_HDMI20_START (5) +#define HDMI_ID_CONFIG1_ID_HDMI20_MASK (0x20) +#define HDMI_ID_CONFIG1_ID_HDCP22_START (6) +#define HDMI_ID_CONFIG1_ID_HDCP22_MASK (0x40) + +#define HDMI_ID_CONFIG2_ID_OFFSET (0x0006) + +#define HDMI_ID_CONFIG3_ID_OFFSET (0x0007) + +/* Interrupt Registers */ +#define HDMI_IH_FC_STAT0_OFFSET (0x0100) + +#define HDMI_IH_FC_STAT1_OFFSET (0x0101) + +#define HDMI_IH_FC_STAT2_OFFSET (0x0102) + +#define HDMI_IH_AS_STAT0_OFFSET (0x0103) + +#define HDMI_IH_PHY_STAT0_OFFSET (0x0104) +#define HDMI_IH_PHY_STAT0_HPD_START (0) +#define HDMI_IH_PHY_STAT0_HPD_MASK (0x1) + +#define HDMI_IH_I2CM_STAT0_OFFSET (0x0105) +#define HDMI_IH_I2CM_STAT0_I2CMASTERERROR_START (0) +#define HDMI_IH_I2CM_STAT0_I2CMASTERERROR_MASK (0x1) +#define HDMI_IH_I2CM_STAT0_I2CMASTERDONE_START (1) +#define HDMI_IH_I2CM_STAT0_I2CMASTERDONE_MASK (0x2) +#define HDMI_IH_I2CM_STAT0_SCDC_READREQ_START (2) +#define HDMI_IH_I2CM_STAT0_SCDC_READREQ_MASK (0x4) + +#define HDMI_IH_CEC_STAT0_OFFSET (0x0106) + +#define HDMI_IH_VP_STAT0_OFFSET (0x0107) + +#define HDMI_IH_I2CMPHY_STAT0_OFFSET (0x0108) +#define HDMI_IH_I2CMPHY_STAT0_I2CMPHYERROR_START (0) +#define HDMI_IH_I2CMPHY_STAT0_I2CMPHYERROR_MASK (0x1) +#define HDMI_IH_I2CMPHY_STAT0_I2CMPHYDONE_START (1) +#define HDMI_IH_I2CMPHY_STAT0_I2CMPHYDONE_MASK (0x2) + +#define HDMI_IH_AHBDMAAUD_STAT0_OFFSET (0x0109) + +#define HDMI_IH_DECODE_OFFSET (0x0170) +#define HDMI_IH_DECODE_PHY_START (3) +#define HDMI_IH_DECODE_PHY_MASK (0x8) + +#define HDMI_IH_MUTE_FC_STAT0_OFFSET (0x0180) + +#define HDMI_IH_MUTE_FC_STAT1_OFFSET (0x0181) + +#define HDMI_IH_MUTE_FC_STAT2_OFFSET (0x0182) + +#define HDMI_IH_MUTE_AS_STAT0_OFFSET (0x0183) + +#define HDMI_IH_MUTE_PHY_STAT0_OFFSET (0x0184) + +#define HDMI_IH_MUTE_I2CM_STAT0_OFFSET (0x0185) + +#define HDMI_IH_MUTE_CEC_STAT0_OFFSET (0x0186) + +#define HDMI_IH_MUTE_VP_STAT0_OFFSET (0x0187) + +#define HDMI_IH_MUTE_I2CMPHY_STAT0_OFFSET (0x0188) + +#define HDMI_IH_MUTE_AHBDMAAUD_STAT0_OFFSET (0x0189) + +#define HDMI_IH_MUTE_OFFSET (0x01ff) +#define HDMI_IH_MUTE_ALL_START (0) +#define HDMI_IH_MUTE_ALL_MASK (0x3) + +/* Video Sampler registers */ +#define HDMI_TX_INVID0_OFFSET (0x0200) +#define HDMI_TX_INVID0_VIDEO_MAPPING_START (0) +#define HDMI_TX_INVID0_VIDEO_MAPPING_MASK (0x1F) +#define HDMI_TX_INVID0_INTERNAL_DE_GEN_START (7) +#define HDMI_TX_INVID0_INTERNAL_DE_GEN_MASK (0x80) + +#define HDMI_TX_INSTUFFING_OFFSET (0x201) + +#define HDMI_TX_GYDATA0_OFFSET (0x202) + +#define HDMI_TX_GYDATA1_OFFSET (0x203) + +#define HDMI_TX_RCRDATA0_OFFSET (0x204) + +#define HDMI_TX_RCRDATA1_OFFSET (0x205) + +#define HDMI_TX_BCBDATA0_OFFSET (0x206) + +#define HDMI_TX_BCBDATA1_OFFSET (0x207) + +/* Video Packetizer */ +#define HDMI_VP_STATUS_OFFSET (0x0800) + +#define HDMI_VP_PR_CD_OFFSET (0x0801) +#define HDMI_VP_PR_CD_DESIRED_PR_FACTOR_START (0) +#define HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK (0xF) +#define HDMI_VP_PR_CD_COLOR_DEPTH_START (4) +#define HDMI_VP_PR_CD_COLOR_DEPTH_MASK (0xF0) + +#define HDMI_VP_STUFF_OFFSET (0x0802) +#define HDMI_VP_STUFF_PR_STUFFING_START (0) +#define HDMI_VP_STUFF_PR_STUFFING_MASK (0x1) +#define HDMI_VP_STUFF_PP_STUFFING_START (1) +#define HDMI_VP_STUFF_PP_STUFFING_MASK (0x2) +#define HDMI_VP_STUFF_YCC422_STUFFING_START (2) +#define HDMI_VP_STUFF_YCC422_STUFFING_MASK (0x4) +#define HDMI_VP_STUFF_ICX_GOTO_P0_ST_START (3) +#define HDMI_VP_STUFF_ICX_GOTO_P0_ST_MASK (0x8) +#define HDMI_VP_STUFF_IFIX_PP_TO_LAST_START (4) +#define HDMI_VP_STUFF_IFIX_PP_TO_LAST_MASK (0x10) +#define HDMI_VP_STUFF_IDEFAULT_PHASE_START (5) +#define HDMI_VP_STUFF_IDEFAULT_PHASE_MASK (0x20) + +#define HDMI_VP_REMAP_OFFSET (0x0803) +#define HDMI_VP_REMAP_YCC422_SIZE_START (0) +#define HDMI_VP_REMAP_YCC422_SIZE_MASK (0x3) + +#define HDMI_VP_CONF_OFFSET (0x0804) +#define HDMI_VP_CONF_OUTPUT_SELECTOR_START (0) +#define HDMI_VP_CONF_OUTPUT_SELECTOR_MASK (0x3) +#define HDMI_VP_CONF_BYPASS_SELECT_START (2) +#define HDMI_VP_CONF_BYPASS_SELECT_MASK (0x4) +#define HDMI_VP_CONF_YCC422_EN_START (3) +#define HDMI_VP_CONF_YCC422_EN_MASK (0x8) +#define HDMI_VP_CONF_PR_EN_START (4) +#define HDMI_VP_CONF_PR_EN_MASK (0x10) +#define HDMI_VP_CONF_PP_EN_START (5) +#define HDMI_VP_CONF_PP_EN_MASK (0x20) +#define HDMI_VP_CONF_BYPASS_EN_START (6) +#define HDMI_VP_CONF_BYPASS_EN_MASK (0x40) + +#define HDMI_VP_MASK_OFFSET (0x0807) + +/* Frame Composer */ +#define HDMI_FC_INVIDCONF_OFFSET (0x1000) +#define HDMI_FC_INVIDCONF_IN_I_P_START (0) +#define HDMI_FC_INVIDCONF_IN_I_P_MASK (0x1) +#define HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_START (1) +#define HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK (0x2) +#define HDMI_FC_INVIDCONF_DVI_MODEZ_START (3) +#define HDMI_FC_INVIDCONF_DVI_MODEZ_MASK (0x8) +#define HDMI_FC_INVIDCONF_DE_IN_POLARITY_START (4) +#define HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK (0x10) +#define HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_START (5) +#define HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK (0x20) +#define HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_START (6) +#define HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK (0x40) +#define HDMI_FC_INVIDCONF_HDCP_KEEPOUT_START (7) +#define HDMI_FC_INVIDCONF_HDCP_KEEPOUT_MASK (0x80) + +#define HDMI_FC_INHACTIV0_OFFSET (0x1001) +#define HDMI_FC_INHACTIV0_H_IN_ACTIV_START (0) +#define HDMI_FC_INHACTIV0_H_IN_ACTIV_MASK (0xFF) + +#define HDMI_FC_INHACTIV1_OFFSET (0x1002) +#define HDMI_FC_INHACTIV1_H_IN_ACTIV_UPPER_START (0) +#define HDMI_FC_INHACTIV1_H_IN_ACTIV_UPPER_MASK (0x3F) + +#define HDMI_FC_INHBLANK0_OFFSET (0x1003) +#define HDMI_FC_INHBLANK0_H_IN_BLANK_START (0) +#define HDMI_FC_INHBLANK0_H_IN_BLANK_MASK (0xFF) + +#define HDMI_FC_INHBLANK1_OFFSET (0x1004) +#define HDMI_FC_INHBLANK1_H_IN_BLANK_UPPER_START (0) +#define HDMI_FC_INHBLANK1_H_IN_BLANK_UPPER_MASK (0x3F) + +#define HDMI_FC_INVACTIV0_OFFSET (0x1005) +#define HDMI_FC_INVACTIV0_V_IN_ACTIV_START (0) +#define HDMI_FC_INVACTIV0_V_IN_ACTIV_MASK (0xFF) + +#define HDMI_FC_INVACTIV1_OFFSET (0x1006) +#define HDMI_FC_INVACTIV1_V_IN_ACTIV_UPPER_START (0) +#define HDMI_FC_INVACTIV1_V_IN_ACTIV_UPPER_MASK (0x3F) + +#define HDMI_FC_INVBLANK_OFFSET (0x1007) +#define HDMI_FC_INVBLANK_V_IN_BLANK_START (0) +#define HDMI_FC_INVBLANK_V_IN_BLANK_MASK (0xFF) + +#define HDMI_FC_HSYNCINDELAY0_OFFSET (0x1008) +#define HDMI_FC_HSYNCINDELAY0_H_IN_DELAY_START (0) +#define HDMI_FC_HSYNCINDELAY0_H_IN_DELAY_MASK (0xFF) + +#define HDMI_FC_HSYNCINDELAY1_OFFSET (0x1009) +#define HDMI_FC_HSYNCINDELAY1_H_IN_DELAY_UPPER_START (0) +#define HDMI_FC_HSYNCINDELAY1_H_IN_DELAY_UPPER_MASK (0x3F) + +#define HDMI_FC_HSYNCINWIDTH0_OFFSET (0x100a) +#define HDMI_FC_HSYNCINWIDTH0_H_IN_WIDTH_START (0) +#define HDMI_FC_HSYNCINWIDTH0_H_IN_WIDTH_MASK (0xFF) + +#define HDMI_FC_HSYNCINWIDTH1_OFFSET (0x100b) +#define HDMI_FC_HSYNCINWIDTH1_H_IN_WIDTH_UPPER_START (0) +#define HDMI_FC_HSYNCINWIDTH1_H_IN_WIDTH_UPPER_MASK (0x3) + +#define HDMI_FC_VSYNCINDELAY_OFFSET (0x100c) +#define HDMI_FC_VSYNCINDELAY_V_IN_DELAY_START (0) +#define HDMI_FC_VSYNCINDELAY_V_IN_DELAY_MASK (0xFF) + +#define HDMI_FC_VSYNCINWIDTH_OFFSET (0x100d) +#define HDMI_FC_VSYNCINWIDTH_V_IN_WIDTH_START (0) +#define HDMI_FC_VSYNCINWIDTH_V_IN_WIDTH_MASK (0x3F) + +#define HDMI_FC_INFREQ0_OFFSET (0x100e) + +#define HDMI_FC_INFREQ1_OFFSET (0x100f) + +#define HDMI_FC_INFREQ2_OFFSET (0x1010) + +#define HDMI_FC_CTRLDUR_OFFSET (0x1011) + +#define HDMI_FC_EXCTRLDUR_OFFSET (0x1012) + +#define HDMI_FC_EXCTRLSPAC_OFFSET (0x1013) + +#define HDMI_FC_CH0PREAM_OFFSET (0x1014) + +#define HDMI_FC_CH1PREAM_OFFSET (0x1015) + +#define HDMI_FC_CH2PREAM_OFFSET (0x1016) + +#define HDMI_FC_AVICONF3_OFFSET (0x1017) + +#define HDMI_FC_GCP_OFFSET (0x1018) + +#define HDMI_FC_AVICONF0_OFFSET (0x1019) +#define HDMI_FC_AVICONF0_RGBYCC_START (0) +#define HDMI_FC_AVICONF0_RGBYCC_MASK (0x3) +#define HDMI_FC_AVICONF0_VBAR_VALID_START (2) +#define HDMI_FC_AVICONF0_VBAR_VALID_MASK (0x4) +#define HDMI_FC_AVICONF0_HBAR_VALID_START (3) +#define HDMI_FC_AVICONF0_HBAR_VALID_MASK (0x8) +#define HDMI_FC_AVICONF0_SCAN_INFO_START (4) +#define HDMI_FC_AVICONF0_SCAN_INFO_MASK (0x30) +#define HDMI_FC_AVICONF0_ACTIVE_FORMAT_PRESENT_START (6) +#define HDMI_FC_AVICONF0_ACTIVE_FORMAT_PRESENT_MASK (0x40) +#define HDMI_FC_AVICONF0_RGBYCC_2_START (7) +#define HDMI_FC_AVICONF0_RGBYCC_2_MASK (0x80) + +#define HDMI_FC_AVICONF1_OFFSET (0x101a) +#define HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_START (0) +#define HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK (0xF) +#define HDMI_FC_AVICONF1_PIC_ASPECT_RATIO_START (4) +#define HDMI_FC_AVICONF1_PIC_ASPECT_RATIO_MASK (0x30) +#define HDMI_FC_AVICONF1_COLORIMETRY_START (6) +#define HDMI_FC_AVICONF1_COLORIMETRY_MASK (0xC0) + +#define HDMI_FC_AVICONF2_OFFSET (0x101b) +#define HDMI_FC_AVICONF2_NON_UNIFORM_PIC_SCALING_START (0) +#define HDMI_FC_AVICONF2_NON_UNIFORM_PIC_SCALING_MASK (0x3) +#define HDMI_FC_AVICONF2_QUANTIZATION_RANGE_START (2) +#define HDMI_FC_AVICONF2_QUANTIZATION_RANGE_MASK (0xC) +#define HDMI_FC_AVICONF2_EXT_COLORIMETRY_START (4) +#define HDMI_FC_AVICONF2_EXT_COLORIMETRY_MASK (0x70) +#define HDMI_FC_AVICONF2_IT_CONTENT_START (7) +#define HDMI_FC_AVICONF2_IT_CONTENT_MASK (0x80) + +#define HDMI_FC_AVIVID_OFFSET (0x101c) +#define HDMI_FC_AVIVID_START (0) +#define HDMI_FC_AVIVID_MASK (0xFF) + +#define HDMI_FC_AVIETB0_OFFSET (0x101d) + +#define HDMI_FC_AVIETB1_OFFSET (0x101e) + +#define HDMI_FC_AVISBB0_OFFSET (0x101f) + +#define HDMI_FC_AVISBB1_OFFSET (0x1020) + +#define HDMI_FC_AVIELB0_OFFSET (0x1021) + +#define HDMI_FC_AVIELB1_OFFSET (0x1022) + +#define HDMI_FC_AVISRB0_OFFSET (0x1023) + +#define HDMI_FC_AVISRB1_OFFSET (0x1024) + +/* A lot of registers related to audio, ACP, ISRC, etc */ + +#define HDMI_FC_PRCONF_OFFSET (0x10e0) +#define HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_START (0) +#define HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK (0xF) +#define HDMI_FC_PRCONF_INCOMING_PR_FACTOR_START (4) +#define HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK (0xF0) + +#define HDMI_FC_SCRAMBLER_CTRL_OFFSET (0x10e1) + +#define HDMI_FC_MULTISTREAM_CTRL_OFFSET (0x10e2) + +#define HDMI_FC_PACKET_TX_EN_OFFSET (0x10e3) + +#define HDMI_FC_DBGFORCE_OFFSET (0x1200) +#define HDMI_FC_DBGFORCE_FORCE_VIDEO_START (0) +#define HDMI_FC_DBGFORCE_FORCE_VIDEO_MASK (0x1) +#define HDMI_FC_DBGFORCE_FORCE_AUDIO_START (4) +#define HDMI_FC_DBGFORCE_FORCE_AUDIO_MASK (0x10) + +#define HDMI_FC_DBGTMDS_0_OFFSET (0x1219) + +#define HDMI_FC_DBGTMDS_1_OFFSET (0x121A) + +#define HDMI_FC_DBGTMDS_2_OFFSET (0x121B) + +/* HDMI Source PHY */ +#define HDMI_PHY_CONF0_OFFSET (0x3000) +#define HDMI_PHY_CONF0_SELDIPIF_START (0) +#define HDMI_PHY_CONF0_SELDIPIF_MASK (0x1) +#define HDMI_PHY_CONF0_SELDATAENPOL_START (1) +#define HDMI_PHY_CONF0_SELDATAENPOL_MASK (0x2) +#define HDMI_PHY_CONF0_ENHPDRXSENSE_START (2) +#define HDMI_PHY_CONF0_ENHPDRXSENSE_MASK (0x4) +#define HDMI_PHY_CONF0_TXPWRON_START (3) +#define HDMI_PHY_CONF0_TXPWRON_MASK (0x8) +#define HDMI_PHY_CONF0_PDDQ_START (4) +#define HDMI_PHY_CONF0_PDDQ_MASK (0x10) +#define HDMI_PHY_CONF0_SVSRET_START (5) +#define HDMI_PHY_CONF0_SVSRET_MASK (0x20) +#define HDMI_PHY_CONF0_ENTMDS_START (6) +#define HDMI_PHY_CONF0_ENTMDS_MASK (0x40) +#define HDMI_PHY_CONF0_PDZ_START (7) +#define HDMI_PHY_CONF0_PDZ_MASK (0x80) + +#define HDMI_PHY_TST0_OFFSET (0x3001) + +#define HDMI_PHY_TST1_OFFSET (0x3002) + +#define HDMI_PHY_TST2_OFFSET (0x3003) + +#define HDMI_PHY_STAT0_OFFSET (0x3004) +#define HDMI_PHY_STAT0_TX_PHY_LOCK_START (0) +#define HDMI_PHY_STAT0_TX_PHY_LOCK_MASK (0x1) +#define HDMI_PHY_STAT0_HPD_START (1) +#define HDMI_PHY_STAT0_HPD_MASK (0x2) + +#define HDMI_PHY_INT0_OFFSET (0x3005) + +#define HDMI_PHY_MASK0_OFFSET (0x3006) + +#define HDMI_PHY_POL0_OFFSET (0x3007) +#define HDMI_PHY_POL0_HPD_START (1) +#define HDMI_PHY_POL0_HPD_MASK (0x2) + +/* I2C Master PHY */ +#define HDMI_PHY_I2CM_SLAVE_OFFSET (0x3020) +#define HDMI_PHY_I2CM_SLAVE_SLAVEADDR_START (0) +#define HDMI_PHY_I2CM_SLAVE_SLAVEADDR_MASK (0x7F) + +#define HDMI_PHY_I2CM_ADDRESS_OFFSET (0x3021) +#define HDMI_PHY_I2CM_ADDRESS_START (0) +#define HDMI_PHY_I2CM_ADDRESS_MASK (0xFF) + +#define HDMI_PHY_I2CM_DATAO_1_OFFSET (0x3022) +#define HDMI_PHY_I2CM_DATAO_1_START (0) +#define HDMI_PHY_I2CM_DATAO_1_MASK (0xFF) + +#define HDMI_PHY_I2CM_DATAO_0_OFFSET (0x3023) +#define HDMI_PHY_I2CM_DATAO_0_START (0) +#define HDMI_PHY_I2CM_DATAO_0_MASK (0xFF) + +#define HDMI_PHY_I2CM_DATAI_1_OFFSET (0x3024) +#define HDMI_PHY_I2CM_DATAI_1_DATAI_START (0) +#define HDMI_PHY_I2CM_DATAI_1_DATAI_MASK (0xFF) + +#define HDMI_PHY_I2CM_DATAI_0_OFFSET (0x3025) +#define HDMI_PHY_I2CM_DATAI_0_DATAI_START (0) +#define HDMI_PHY_I2CM_DATAI_0_DATAI_MASK (0xFF) + +#define HDMI_PHY_I2CM_OPERATION_OFFSET (0x3026) +#define HDMI_PHY_I2CM_OPERATION_RD_START (0) +#define HDMI_PHY_I2CM_OPERATION_RD_MASK (0x1) +#define HDMI_PHY_I2CM_OPERATION_WR_START (4) +#define HDMI_PHY_I2CM_OPERATION_WR_MASK (0x10) +// Many more... + +/* Audio Sampler */ +#define HDMI_AUDIO_SAMPLER_OFFSET (0x3100) + +/* Audio packetizer */ +#define HDMI_AUD_N1_OFFSET (0x3200) + +#define HDMI_AUD_N2_OFFSET (0x3201) + +#define HDMI_AUD_N3_OFFSET (0x3202) + +#define HDMI_AUD_CTS1_OFFSET (0x3203) + +#define HDMI_AUD_CTS2_OFFSET (0x3204) + +#define HDMI_AUD_CTS3_OFFSET (0x3205) + +#define HDMI_AUD_INPUTCLKFS_OFFSET (0x3206) + +/* Generic parallel audio interface */ +#define HDMI_PARALLEL_AUDIO_INTF_OFFFSET (0x3500) + +/* Audio DMA */ +#define HDMI_AUDIO_DMA_OFFSET (0x3600) + +/* Main Controller Registers */ +#define HDMI_MC_CLKDIS_OFFSET (0x4001) +#define HDMI_MC_CLKDIS_PIXELCLK_DIS_START (0) +#define HDMI_MC_CLKDIS_PIXELCLK_DIS_MASK (0x1) +#define HDMI_MC_CLKDIS_TMDSCLK_DIS_START (1) +#define HDMI_MC_CLKDIS_TMDSCLK_DIS_MASK (0x2) +#define HDMI_MC_CLKDIS_PREPCLK_DIS_START (2) +#define HDMI_MC_CLKDIS_PREPCLK_DIS_MASK (0x4) +#define HDMI_MC_CLKDIS_AUDCLK_DIS_START (3) +#define HDMI_MC_CLKDIS_AUDCLK_DIS_MASK (0x8) +#define HDMI_MC_CLKDIS_CSCCLK_DIS_START (4) +#define HDMI_MC_CLKDIS_CSCCLK_DIS_MASK (0x10) +#define HDMI_MC_CLKDIS_CECCLK_DIS_START (5) +#define HDMI_MC_CLKDIS_CECCLK_DIS_MASK (0x20) +#define HDMI_MC_CLKDIS_HDCPCLK_DIS_START (6) +#define HDMI_MC_CLKDIS_HDCPCLK_DIS_MASK (0x40) + +#define HDMI_MC_SWRSTZREQ_OFFSET (0x4002) +#define HDMI_MC_OPCTRL_OFFSET (0x4003) +#define HDMI_MC_FLOWCTRL_OFFSET (0x4004) + +#define HDMI_MC_PHYRSTZ_OFFSET (0x4005) +#define HDMI_MC_PHYRSTZ_PHYRSTZ_START (0) +#define HDMI_MC_PHYRSTZ_PHYRSTZ_MASK (0x1) + +#define HDMI_MC_LOCKONCLOCK_OFFSET (0x4006) +#define HDMI_MC_HEACPHY_RST_OFFSET (0x4007) +#define HDMI_MC_LOCKONCLOCK_2_OFFSET (0x4008) +#define HDMI_MC_SWRSTZREQ_2_OFFSET (0x4009) + +/* Color Space Converter */ +#define HDMI_COLOR_SPACE_CONVERTER_OFFSET (0x4100) + +/* HDCP Encryption */ +#define HDMI_HDCP_ENCRYPTION_OFFSET (0x5000) + +/* I2C Master (E-EDID/EDDC) */ +#define HDMI_I2CM_SLAVE_OFFSET (0x7e00) + +#define HDMI_I2CM_ADDRESS_OFFSET (0x7e01) + +#define HDMI_I2CM_DATAO_OFFSET (0x7e02) + +#define HDMI_I2CM_DATAI_OFFSET (0x7e03) + +#define HDMI_I2CM_OPERATION_OFFSET (0x7e04) +#define HDMI_I2CM_OPERATION_RD_START (0) +#define HDMI_I2CM_OPERATION_RD_MASK (0x1) +#define HDMI_I2CM_OPERATION_RD_EXT_START (1) +#define HDMI_I2CM_OPERATION_RD_EXT_MASK (0x2) +#define HDMI_I2CM_OPERATION_RD_8_START (2) +#define HDMI_I2CM_OPERATION_RD_8_MASK (0x4) +#define HDMI_I2CM_OPERATION_RD_8_EXT_START (3) +#define HDMI_I2CM_OPERATION_RD_8_EXT_MASK (0x8) +#define HDMI_I2CM_OPERATION_WR_START (4) +#define HDMI_I2CM_OPERATION_WR_MASK (0x10) + +#define HDMI_I2CM_INT_OFFSET (0x7e05) +#define HDMI_I2CM_INT_DONE_MASK_START (2) +#define HDMI_I2CM_INT_DONE_MASK_MASK (0x4) +#define HDMI_I2CM_INT_READ_REQ_MASK_START (6) +#define HDMI_I2CM_INT_READ_REQ_MASK_MASK (0x40) + +#define HDMI_I2CM_CTLINT_OFFSET (0x7e06) +#define HDMI_I2CM_CTLINT_ARB_MASK_START (2) +#define HDMI_I2CM_CTLINT_ARB_MASK_MASK (0x4) +#define HDMI_I2CM_CTLINT_NACK_MASK_START (6) +#define HDMI_I2CM_CTLINT_NACK_MASK_MASK (0x40) + +#define HDMI_I2CM_DIV_OFFSET (0x7e07) +#define HDMI_I2CM_DIV_FAST_STD_MODE_START (3) +#define HDMI_I2CM_DIV_FAST_STD_MODE_MASK (0x8) + +#define HDMI_I2CM_SEGADDR_OFFSET (0x7e08) + +#define HDMI_I2CM_SOFTRSTZ_OFFSET (0x7e09) + +#define HDMI_I2CM_SEGPTR_OFFSET (0x7e0a) + +#define HDMI_I2CM_SS_SCL_HCNT_1_OFFSET (0x7e0b) + +#define HDMI_I2CM_SS_SCL_HCNT_0_OFFSET (0x7e0c) + +#define HDMI_I2CM_SS_SCL_LCNT_1_OFFSET (0x7e0d) + +#define HDMI_I2CM_SS_SCL_LCNT_0_OFFSET (0x7e0e) + +#define HDMI_I2CM_FS_SCL_HCNT_1_OFFSET (0x7e0f) + +#define HDMI_I2CM_FS_SCL_HCNT_0_OFFSET (0x7e10) + +#define HDMI_I2CM_FS_SCL_LCNT_1_OFFSET (0x7e11) + +#define HDMI_I2CM_FS_SCL_LCNT_0_OFFSET (0x7e12) + +#define HDMI_I2CM_SDA_HOLD_OFFSET (0x7e13) + +#define HDMI_I2CM_SCDC_READ_UPDATE_OFFSET (0x7e14) + +#endif diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.c new file mode 100644 index 000000000000..09b879f1b6a8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.c @@ -0,0 +1,592 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Copyright Portions Copyright (c) Synopsys Ltd. All Rights Reserved +@License Synopsys Permissive License + +The Synopsys Software Driver and documentation (hereinafter "Software") +is an unsupported proprietary work of Synopsys, Inc. unless otherwise +expressly agreed to in writing between Synopsys and you. + +The Software IS NOT an item of Licensed Software or Licensed Product under +any End User Software License Agreement or Agreement for Licensed Product +with Synopsys or any supplement thereto. Permission is hereby granted, +free of charge, to any person obtaining a copy of this software annotated +with this license and the Software, to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject +to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. +*/ /**************************************************************************/ + +#include "hdmi_regs.h" +#include "hdmi_video.h" +#include "hdmi_phy.h" +#include "plato_drv.h" +#include + +enum hdmi_datamap { + RGB444_8B = 0x01, + RGB444_10B = 0x03, + RGB444_12B = 0x05, + RGB444_16B = 0x07, + YCbCr444_8B = 0x09, + YCbCr444_10B = 0x0B, + YCbCr444_12B = 0x0D, + YCbCr444_16B = 0x0F, + YCbCr422_8B = 0x16, + YCbCr422_10B = 0x14, + YCbCr422_12B = 0x12, +}; + +/* + * Sets up Video Packetizer registers + */ +static bool video_configure_vp(struct hdmi_device *hdmi) +{ + u8 reg = 0; + u8 output_select = 0; + u8 remap_size = 0; + u8 color_depth = 0; + + HDMI_CHECKPOINT; + + if (hdmi->hdmi_data.enc_out_format == ENCODING_RGB || + hdmi->hdmi_data.enc_out_format == ENCODING_YCC444) { + if (hdmi->hdmi_data.enc_color_depth == 0) { + output_select = 3; + } else if (hdmi->hdmi_data.enc_color_depth == 8) { + color_depth = 4; + output_select = 3; + } else if (hdmi->hdmi_data.enc_color_depth == 10) { + color_depth = 5; + } else if (hdmi->hdmi_data.enc_color_depth == 12) { + color_depth = 6; + } else if (hdmi->hdmi_data.enc_color_depth == 16) { + color_depth = 7; + } else { + hdmi_error(hdmi, "- %s: Found unsupported color depth (%d)", __func__, hdmi->hdmi_data.enc_color_depth); + return false; + } + } else if (hdmi->hdmi_data.enc_out_format == ENCODING_YCC422_8BITS) { + if ((hdmi->hdmi_data.enc_color_depth == 8) + || (hdmi->hdmi_data.enc_color_depth == 0)) { + remap_size = 0; + } else if (hdmi->hdmi_data.enc_color_depth == 10) { + remap_size = 1; + } else if (hdmi->hdmi_data.enc_color_depth == 12) { + remap_size = 2; + } else { + hdmi_error(hdmi, "- %s: Invalid color remap size (%d)", __func__, hdmi->hdmi_data.enc_color_depth); + return false; + } + output_select = 1; + } else { + hdmi_error(hdmi, "- %s: Invalid output encoding type: %d", + __func__, hdmi->hdmi_data.enc_out_format); + return false; + } + + hdmi_info(hdmi, " - %s: output_select: %d, color_depth: %d\n", + __func__, output_select, color_depth); + + reg = SET_FIELD(HDMI_VP_PR_CD_COLOR_DEPTH_START, + HDMI_VP_PR_CD_COLOR_DEPTH_MASK, + color_depth) | + SET_FIELD(HDMI_VP_PR_CD_DESIRED_PR_FACTOR_START, + HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK, + hdmi->hdmi_data.pix_repet_factor); + hdmi_write_reg32(hdmi, HDMI_VP_PR_CD_OFFSET, reg); + + reg = SET_FIELD(HDMI_VP_STUFF_IDEFAULT_PHASE_START, + HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, + hdmi->hdmi_data.pp_default_phase); + hdmi_write_reg32(hdmi, HDMI_VP_STUFF_OFFSET, reg); + + reg = SET_FIELD(HDMI_VP_REMAP_YCC422_SIZE_START, + HDMI_VP_REMAP_YCC422_SIZE_MASK, + remap_size); + hdmi_write_reg32(hdmi, HDMI_VP_REMAP_OFFSET, reg); + + /* Grabbed from reference driver */ + reg = 0; + if (output_select == 0) { + /* pixel packing */ + reg = SET_FIELD(HDMI_VP_CONF_PP_EN_START, + HDMI_VP_CONF_PP_EN_MASK, + 1); + } else if (output_select == 1) { + /* YCC422 */ + reg = SET_FIELD(HDMI_VP_CONF_YCC422_EN_START, + HDMI_VP_CONF_YCC422_EN_MASK, + 1); + } else if (output_select == 2 || output_select == 3) { + /* Enable bypass */ + reg = SET_FIELD(HDMI_VP_CONF_BYPASS_EN_START, + HDMI_VP_CONF_BYPASS_EN_MASK, + 1) | + SET_FIELD(HDMI_VP_CONF_BYPASS_SELECT_START, + HDMI_VP_CONF_BYPASS_SELECT_MASK, + 1); + } + + reg |= SET_FIELD(HDMI_VP_CONF_OUTPUT_SELECTOR_START, + HDMI_VP_CONF_OUTPUT_SELECTOR_MASK, + output_select); + +#if defined(EMULATOR) + reg |= SET_FIELD(HDMI_VP_CONF_BYPASS_EN_START, + HDMI_VP_CONF_BYPASS_EN_MASK, + 1) | + SET_FIELD(HDMI_VP_CONF_BYPASS_SELECT_START, + HDMI_VP_CONF_BYPASS_SELECT_MASK, + 1); +#endif + + /* pixel packing */ + reg |= SET_FIELD(HDMI_VP_CONF_PP_EN_START, + HDMI_VP_CONF_PP_EN_MASK, + 1); + hdmi_write_reg32(hdmi, HDMI_VP_CONF_OFFSET, reg); + + /* YCC422 and pixel packing stuffing */ + reg = SET_FIELD(HDMI_VP_STUFF_IDEFAULT_PHASE_START, + HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, + hdmi->hdmi_data.pp_default_phase) | + SET_FIELD(HDMI_VP_STUFF_YCC422_STUFFING_START, + HDMI_VP_STUFF_YCC422_STUFFING_MASK, + 1) | + SET_FIELD(HDMI_VP_STUFF_PP_STUFFING_START, + HDMI_VP_STUFF_PP_STUFFING_MASK, + 1); + hdmi_write_reg32(hdmi, HDMI_VP_STUFF_OFFSET, reg); + + return true; +} + +/* + *Sets up Video Sampler registers + */ +static bool video_configure_vs(struct hdmi_device *hdmi) +{ + u8 map_code = 0; + u8 reg = 0; + + HDMI_CHECKPOINT; + + if (hdmi->hdmi_data.enc_in_format == ENCODING_RGB || + hdmi->hdmi_data.enc_in_format == ENCODING_YCC444) { + if (hdmi->hdmi_data.enc_color_depth == 8 || hdmi->hdmi_data.enc_color_depth == 0) { + map_code = 1; + } else if (hdmi->hdmi_data.enc_color_depth == 10) { + map_code = 3; + } else if (hdmi->hdmi_data.enc_color_depth == 12) { + map_code = 5; + } else if (hdmi->hdmi_data.enc_color_depth == 16) { + map_code = 7; + } else { + hdmi_error(hdmi, "- %s: Invalid color depth\n", __func__); + return false; + } + map_code += (hdmi->hdmi_data.enc_in_format == ENCODING_YCC444) ? 8 : 0; + } else if (hdmi->hdmi_data.enc_in_format == ENCODING_YCC422_8BITS) { + /* YCC422 mapping is discontinued - only map 1 is supported */ + if (hdmi->hdmi_data.enc_color_depth == 12) { + map_code = 18; + } else if (hdmi->hdmi_data.enc_color_depth == 10) { + map_code = 20; + } else if ((hdmi->hdmi_data.enc_color_depth == 8) + || (hdmi->hdmi_data.enc_color_depth == 0)) { + map_code = 22; + } else { + hdmi_error(hdmi, "- %s: Invalid color remap size: %d", + __func__, hdmi->hdmi_data.enc_color_depth); + return false; + } + } else { + hdmi_error(hdmi, "- %s: Invalid input encoding type: %d", + __func__, hdmi->hdmi_data.enc_in_format); + return false; + } + + reg = SET_FIELD(HDMI_TX_INVID0_INTERNAL_DE_GEN_START, HDMI_TX_INVID0_INTERNAL_DE_GEN_MASK, 0); + reg |= SET_FIELD(HDMI_TX_INVID0_VIDEO_MAPPING_START, HDMI_TX_INVID0_VIDEO_MAPPING_MASK, map_code); + hdmi_write_reg32(hdmi, HDMI_TX_INVID0_OFFSET, reg); + +#if !defined(EMULATOR) + hdmi_write_reg32(hdmi, HDMI_TX_GYDATA0_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_TX_GYDATA1_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_TX_RCRDATA0_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_TX_RCRDATA1_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_TX_BCBDATA0_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_TX_BCBDATA1_OFFSET, 0); + /* Sets stuffing enable for BDBDATA, RCRDATA, and GYDATA */ + hdmi_write_reg32(hdmi, HDMI_TX_INSTUFFING_OFFSET, 0x7); +#endif + + return true; +} + +/* + * Frame composer + */ +static bool video_configure_fc(struct hdmi_device *hdmi, struct drm_display_mode *mode) +{ + u32 reg = 0; + u32 hblank, vblank, hsync_delay, vsync_delay, hsync_length, vsync_length; + + /* + *Input video configuration + */ + reg = SET_FIELD(HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_START, + HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK, + hdmi->hdmi_data.video_mode.vsync_polarity) | + SET_FIELD(HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_START, + HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK, + hdmi->hdmi_data.video_mode.hsync_polarity) | + SET_FIELD(HDMI_FC_INVIDCONF_DE_IN_POLARITY_START, + HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK, + hdmi->hdmi_data.video_mode.data_enable_polarity) | + SET_FIELD(HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_START, + HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK, + hdmi->hdmi_data.video_mode.interlaced) | + SET_FIELD(HDMI_FC_INVIDCONF_IN_I_P_START, + HDMI_FC_INVIDCONF_IN_I_P_MASK, + hdmi->hdmi_data.video_mode.interlaced) | + SET_FIELD(HDMI_FC_INVIDCONF_DVI_MODEZ_START, + HDMI_FC_INVIDCONF_DVI_MODEZ_MASK, + hdmi->hdmi_data.video_mode.dvi); // Nick's original was !dvi + hdmi_write_reg32(hdmi, HDMI_FC_INVIDCONF_OFFSET, reg); + + /* + * Input HActive Pixels + */ + reg = SET_FIELD(HDMI_FC_INHACTIV0_H_IN_ACTIV_START, + HDMI_FC_INHACTIV0_H_IN_ACTIV_MASK, + mode->hdisplay & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_INHACTIV0_OFFSET, reg); + + reg = SET_FIELD(HDMI_FC_INHACTIV1_H_IN_ACTIV_UPPER_START, + HDMI_FC_INHACTIV1_H_IN_ACTIV_UPPER_MASK, + mode->hdisplay >> 8); + hdmi_write_reg32(hdmi, HDMI_FC_INHACTIV1_OFFSET, reg); + + /* + * Input VActive Pixels + */ + reg = SET_FIELD(HDMI_FC_INVACTIV0_V_IN_ACTIV_START, + HDMI_FC_INVACTIV0_V_IN_ACTIV_MASK, + mode->vdisplay & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_INVACTIV0_OFFSET, reg); + + reg = SET_FIELD(HDMI_FC_INVACTIV1_V_IN_ACTIV_UPPER_START, + HDMI_FC_INVACTIV1_V_IN_ACTIV_UPPER_MASK, + mode->vdisplay >> 8); + hdmi_write_reg32(hdmi, HDMI_FC_INVACTIV1_OFFSET, reg); + + /* + * Input HBlank Pixels + */ + hblank = mode->htotal - mode->hdisplay; + reg = SET_FIELD(HDMI_FC_INHBLANK0_H_IN_BLANK_START, + HDMI_FC_INHBLANK0_H_IN_BLANK_MASK, + hblank & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_INHBLANK0_OFFSET, reg); + + reg = SET_FIELD(HDMI_FC_INHBLANK1_H_IN_BLANK_UPPER_START, + HDMI_FC_INHBLANK1_H_IN_BLANK_UPPER_MASK, + hblank >> 8); + hdmi_write_reg32(hdmi, HDMI_FC_INHBLANK1_OFFSET, reg); + + /* + * Input VBlank Pixels + */ + vblank = mode->vtotal - mode->vdisplay; + reg = SET_FIELD(HDMI_FC_INVBLANK_V_IN_BLANK_START, + HDMI_FC_INVBLANK_V_IN_BLANK_MASK, + vblank & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_INVBLANK_OFFSET, reg); + + /* + * Input HSync Front Porch (pixel clock cycles from "de" non active edge of the last "de" valid period) + */ + hsync_delay = mode->hsync_start - mode->hdisplay; + reg = SET_FIELD(HDMI_FC_HSYNCINDELAY0_H_IN_DELAY_START, + HDMI_FC_HSYNCINDELAY0_H_IN_DELAY_MASK, + hsync_delay & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_HSYNCINDELAY0_OFFSET, reg); + + reg = SET_FIELD(HDMI_FC_HSYNCINDELAY1_H_IN_DELAY_UPPER_START, + HDMI_FC_HSYNCINDELAY1_H_IN_DELAY_UPPER_MASK, + hsync_delay >> 8); + hdmi_write_reg32(hdmi, HDMI_FC_HSYNCINDELAY1_OFFSET, reg); + + /* + * Input VSync Front porch + */ + vsync_delay = mode->vsync_start - mode->vdisplay; + reg = SET_FIELD(HDMI_FC_VSYNCINDELAY_V_IN_DELAY_START, + HDMI_FC_VSYNCINDELAY_V_IN_DELAY_MASK, + vsync_delay & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_VSYNCINDELAY_OFFSET, reg); + + /* + * Input HSync pulse width + */ + hsync_length = mode->hsync_end - mode->hsync_start; + reg = SET_FIELD(HDMI_FC_HSYNCINWIDTH0_H_IN_WIDTH_START, + HDMI_FC_HSYNCINWIDTH0_H_IN_WIDTH_MASK, + hsync_length & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_HSYNCINWIDTH0_OFFSET, reg); + + reg = SET_FIELD(HDMI_FC_HSYNCINWIDTH1_H_IN_WIDTH_UPPER_START, + HDMI_FC_HSYNCINWIDTH1_H_IN_WIDTH_UPPER_MASK, + hsync_length >> 8); + hdmi_write_reg32(hdmi, HDMI_FC_HSYNCINWIDTH1_OFFSET, reg); + + /* + * Input Vsync pulse width + */ + vsync_length = mode->vsync_end - mode->vsync_start; + reg = SET_FIELD(HDMI_FC_VSYNCINWIDTH_V_IN_WIDTH_START, + HDMI_FC_VSYNCINWIDTH_V_IN_WIDTH_MASK, + vsync_length & 0xFF); + hdmi_write_reg32(hdmi, HDMI_FC_VSYNCINWIDTH_OFFSET, reg); + + return true; + +} + +/* + * Steps for Configuring video mode: + * 1. Configure pixel clock, VP, VS, and FC + * 2. Power down Tx PHY + * 3. Set desired video mode (based on EDID sink information) + * 4. Power-on, PLL configuration, and reset Tx PHY + * 5. Wait for PHY lock to assert + */ +int video_configure_mode(struct hdmi_device *hdmi, struct drm_display_mode *mode) +{ + int err = 0; + + HDMI_CHECKPOINT; + + hdmi_info(hdmi, " - %s: Configuring video mode for VIC %d\n", __func__, hdmi->hdmi_data.vic); + + hdmi->hdmi_data.video_mode.vsync_polarity = mode->flags & DRM_MODE_FLAG_PVSYNC; + hdmi->hdmi_data.video_mode.hsync_polarity = mode->flags & DRM_MODE_FLAG_PHSYNC; + hdmi->hdmi_data.video_mode.interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + + /* + * Step D.2: Set desired video mode based on current VideoParams structure + */ + if (!video_configure_vp(hdmi)) { + err = HDMI_INIT_FAILED_VIDEO; + goto EXIT; + } + + if (!video_configure_vs(hdmi)) { + err = HDMI_INIT_FAILED_VIDEO; + goto EXIT; + } + + if (!video_configure_fc(hdmi, mode)) { + err = HDMI_INIT_FAILED_VIDEO; + goto EXIT; + } + + /* Place phy in reset */ + phy_power_down(hdmi); + + /* Audio N and CTS value */ + hdmi_write_reg32(hdmi, HDMI_AUD_N1_OFFSET, 0xE5); + hdmi_write_reg32(hdmi, HDMI_AUD_N2_OFFSET, 0x0F); + hdmi_write_reg32(hdmi, HDMI_AUD_N3_OFFSET, 0x00); + hdmi_write_reg32(hdmi, HDMI_AUD_CTS1_OFFSET, 0x19); + hdmi_write_reg32(hdmi, HDMI_AUD_CTS2_OFFSET, 0xD5); + hdmi_write_reg32(hdmi, HDMI_AUD_CTS3_OFFSET, 0x02); + + /* Setup PHY for intended video mode */ + phy_configure_mode(hdmi, mode); + + /* Wait for PHY lock */ + err = phy_wait_lock(hdmi); + if (err != HDMI_INIT_SUCCESS) + goto EXIT; + + /* From pdump */ + hdmi_write_reg32(hdmi, HDMI_FC_CTRLDUR_OFFSET, 0x0C); + hdmi_write_reg32(hdmi, HDMI_FC_EXCTRLDUR_OFFSET, 0x20); + hdmi_write_reg32(hdmi, HDMI_FC_EXCTRLSPAC_OFFSET, 0x01); + hdmi_write_reg32(hdmi, HDMI_FC_CH0PREAM_OFFSET, 0x0B); + hdmi_write_reg32(hdmi, HDMI_FC_CH1PREAM_OFFSET, 0x16); + hdmi_write_reg32(hdmi, HDMI_FC_CH2PREAM_OFFSET, 0x21); + hdmi_write_reg32(hdmi, HDMI_MC_CLKDIS_OFFSET, + SET_FIELD(HDMI_MC_CLKDIS_HDCPCLK_DIS_START, HDMI_MC_CLKDIS_HDCPCLK_DIS_MASK, 1)); + +EXIT: + return err; +} + +/* + * See HDMI ref driver packets.c + */ +int video_configure_infoframes(struct hdmi_device *hdmi, struct drm_display_mode *mode) +{ + u32 reg; + + HDMI_CHECKPOINT; + +#if defined(EMULATOR) + return 0; // This breaks on emu +#endif + + /* Only relevant for HDMI */ + if (!hdmi->hdmi_data.video_mode.dvi) { // Nick's original was without ! + hdmi_info(hdmi, "- %s: Sink is DVI, not configuring infoframes\n", __func__); + return 0; + } + + /* AVI CONF0 setup */ + reg = SET_FIELD(HDMI_FC_AVICONF0_RGBYCC_START, HDMI_FC_AVICONF0_RGBYCC_MASK, 0); + reg |= SET_FIELD(HDMI_FC_AVICONF0_SCAN_INFO_START, + HDMI_FC_AVICONF0_SCAN_INFO_MASK, + 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVICONF0_OFFSET, reg); + + /* AVI CONF1 setup */ + reg = SET_FIELD(HDMI_FC_AVICONF1_PIC_ASPECT_RATIO_START, + HDMI_FC_AVICONF1_PIC_ASPECT_RATIO_START, + (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9) ? 2 : 1); + reg |= SET_FIELD(HDMI_FC_AVICONF1_COLORIMETRY_START, + HDMI_FC_AVICONF1_COLORIMETRY_MASK, + hdmi->hdmi_data.colorimetry); + reg |= SET_FIELD(HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_START, + HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK, + hdmi->hdmi_data.active_aspect_ratio); + hdmi_write_reg32(hdmi, HDMI_FC_AVICONF1_OFFSET, reg); + + /* Go back and set active format valid bit */ + reg = SET_FIELD(HDMI_FC_AVICONF0_ACTIVE_FORMAT_PRESENT_START, + HDMI_FC_AVICONF0_ACTIVE_FORMAT_PRESENT_MASK, + 1); + hdmi_mod_reg32(hdmi, HDMI_FC_AVICONF0_OFFSET, + reg, HDMI_FC_AVICONF0_ACTIVE_FORMAT_PRESENT_MASK); + + /* AVI CONF2 setup */ + reg = SET_FIELD(HDMI_FC_AVICONF2_IT_CONTENT_START, + HDMI_FC_AVICONF2_IT_CONTENT_MASK, + 0); + reg |= SET_FIELD(HDMI_FC_AVICONF2_QUANTIZATION_RANGE_START, + HDMI_FC_AVICONF2_QUANTIZATION_RANGE_MASK, + 0); + reg |= SET_FIELD(HDMI_FC_AVICONF2_NON_UNIFORM_PIC_SCALING_START, + HDMI_FC_AVICONF2_NON_UNIFORM_PIC_SCALING_MASK, + 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVICONF2_OFFSET, reg); + + /* AVI VID setup */ + reg = SET_FIELD(HDMI_FC_AVIVID_START, HDMI_FC_AVIVID_MASK, hdmi->hdmi_data.vic); + hdmi_write_reg32(hdmi, HDMI_FC_AVIVID_OFFSET, reg); + + /* Set horizontal bars to 0 */ + hdmi_write_reg32(hdmi, HDMI_FC_AVIETB0_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVIETB1_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVISBB0_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVISBB1_OFFSET, 0); + hdmi_mod_reg32(hdmi, HDMI_FC_AVICONF0_OFFSET, + SET_FIELD(HDMI_FC_AVICONF0_HBAR_VALID_START, HDMI_FC_AVICONF0_HBAR_VALID_MASK, 1), + HDMI_FC_AVICONF0_HBAR_VALID_MASK); + + /* Set vertical bars to 0 */ + hdmi_write_reg32(hdmi, HDMI_FC_AVIELB0_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVIELB1_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVISRB0_OFFSET, 0); + hdmi_write_reg32(hdmi, HDMI_FC_AVISRB1_OFFSET, 0); + hdmi_mod_reg32(hdmi, HDMI_FC_AVICONF0_OFFSET, + SET_FIELD(HDMI_FC_AVICONF0_VBAR_VALID_START, HDMI_FC_AVICONF0_VBAR_VALID_MASK, 1), + HDMI_FC_AVICONF0_VBAR_VALID_MASK); + + /* Set out pixel repetition factor */ + hdmi_mod_reg32(hdmi, HDMI_FC_PRCONF_OFFSET, + SET_FIELD(HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_START, + HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK, + ((hdmi->hdmi_data.video_mode.pixel_repetition_input + 1) * + (hdmi->hdmi_data.pix_repet_factor + 1) - 1)), + HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK); + + return 0; +} + +/* + * Initialize routine will setup video mode for VGA DVI mode to + * start EDID communication + */ +int video_init(struct hdmi_device *hdmi) +{ + int err = 0; + + HDMI_CHECKPOINT; + + /* Force video output on init */ + hdmi_write_reg32(hdmi, HDMI_FC_DBGTMDS_2_OFFSET, 0x00); /* R */ + hdmi_write_reg32(hdmi, HDMI_FC_DBGTMDS_1_OFFSET, 0xFF); /* G */ + hdmi_write_reg32(hdmi, HDMI_FC_DBGTMDS_0_OFFSET, 0x00); /* B */ + hdmi_write_reg32(hdmi, HDMI_FC_DBGFORCE_OFFSET, + SET_FIELD(HDMI_FC_DBGFORCE_FORCE_VIDEO_START, + HDMI_FC_DBGFORCE_FORCE_VIDEO_MASK, 0)); + + return err; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.h new file mode 100644 index 000000000000..9ea35a7259ea --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/hdmi/hdmi_video.h @@ -0,0 +1,91 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Copyright Portions Copyright (c) Synopsys Ltd. All Rights Reserved +@License Synopsys Permissive License + +The Synopsys Software Driver and documentation (hereinafter "Software") +is an unsupported proprietary work of Synopsys, Inc. unless otherwise +expressly agreed to in writing between Synopsys and you. + +The Software IS NOT an item of Licensed Software or Licensed Product under +any End User Software License Agreement or Agreement for Licensed Product +with Synopsys or any supplement thereto. Permission is hereby granted, +free of charge, to any person obtaining a copy of this software annotated +with this license and the Software, to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject +to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. +*/ /**************************************************************************/ +#ifndef _HDMI_VIDEO_H_ +#define _HDMI_VIDEO_H_ + +#include "hdmi.h" + +/* + * Initialize video regs to default values (VGA mode) + */ +int video_init(struct hdmi_device *hdmi); + +/* + * Configure the pixel clock, video packetizer, sampler, frame composer, + * and phy for the given mode. + */ +void hdmi_enable_pclock(struct hdmi_device *hdmi, u32 pixel_clock); +int video_configure_mode(struct hdmi_device *hdmi, struct drm_display_mode *mode); +int video_configure_infoframes(struct hdmi_device *hdmi, struct drm_display_mode *mode); + +#endif diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h new file mode 100644 index 000000000000..6164c5814140 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h @@ -0,0 +1,764 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef _PDP2_MMU_REGS_H +#define _PDP2_MMU_REGS_H + +/* Hardware register definitions */ + +#define PDP_BIF_DIR_BASE_ADDR_OFFSET (0x0020) +#define PDP_BIF_DIR_BASE_ADDR_STRIDE (4) +#define PDP_BIF_DIR_BASE_ADDR_NO_ENTRIES (4) + +/* PDP_BIF, DIR_BASE_ADDR, MMU_DIR_BASE_ADDR +Base address in physical memory for MMU Directory n Entries. When MMU_ENABLE_EXT_ADDRESSING is '1', the bits 31:0 are assigned to the address 31+EXT_ADDR_RANGE:0+EXT_ADDR_RANGE, but then any address offset within a page is forced to 0. When MMU_ENABLE_EXT_ADDRESSING is '0', bits 31:12 are assigned to address 31:12 +*/ +#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_MASK (0xFFFFFFFF) +#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SHIFT (0) +#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LENGTH (32) +#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_TILE_CFG_OFFSET (0x0040) +#define PDP_BIF_TILE_CFG_STRIDE (4) +#define PDP_BIF_TILE_CFG_NO_ENTRIES (4) + +/* PDP_BIF, TILE_CFG, TILE_128INTERLEAVE +*/ +#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_MASK (0x00000010) +#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LSBMASK (0x00000001) +#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SHIFT (4) +#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LENGTH (1) +#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, TILE_CFG, TILE_ENABLE +*/ +#define PDP_BIF_TILE_CFG_TILE_ENABLE_MASK (0x00000008) +#define PDP_BIF_TILE_CFG_TILE_ENABLE_LSBMASK (0x00000001) +#define PDP_BIF_TILE_CFG_TILE_ENABLE_SHIFT (3) +#define PDP_BIF_TILE_CFG_TILE_ENABLE_LENGTH (1) +#define PDP_BIF_TILE_CFG_TILE_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, TILE_CFG, TILE_STRIDE +*/ +#define PDP_BIF_TILE_CFG_TILE_STRIDE_MASK (0x00000007) +#define PDP_BIF_TILE_CFG_TILE_STRIDE_LSBMASK (0x00000007) +#define PDP_BIF_TILE_CFG_TILE_STRIDE_SHIFT (0) +#define PDP_BIF_TILE_CFG_TILE_STRIDE_LENGTH (3) +#define PDP_BIF_TILE_CFG_TILE_STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_TILE_MIN_ADDR_OFFSET (0x0050) +#define PDP_BIF_TILE_MIN_ADDR_STRIDE (4) +#define PDP_BIF_TILE_MIN_ADDR_NO_ENTRIES (4) + +/* PDP_BIF, TILE_MIN_ADDR, TILE_MIN_ADDR +*/ +#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_MASK (0xFFFFFFFF) +#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SHIFT (0) +#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LENGTH (32) +#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_TILE_MAX_ADDR_OFFSET (0x0060) +#define PDP_BIF_TILE_MAX_ADDR_STRIDE (4) +#define PDP_BIF_TILE_MAX_ADDR_NO_ENTRIES (4) + +/* PDP_BIF, TILE_MAX_ADDR, TILE_MAX_ADDR +*/ +#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_MASK (0xFFFFFFFF) +#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SHIFT (0) +#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LENGTH (32) +#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_CONTROL0_OFFSET (0x0000) + +/* PDP_BIF, CONTROL0, MMU_TILING_SCHEME +*/ +#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_MASK (0x00000001) +#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SHIFT (0) +#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LENGTH (1) +#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL0, MMU_CACHE_POLICY +*/ +#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_MASK (0x00000100) +#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SHIFT (8) +#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LENGTH (1) +#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL0, FORCE_CACHE_POLICY_BYPASS +*/ +#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_MASK (0x00000200) +#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SHIFT (9) +#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LENGTH (1) +#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL0, STALL_ON_PROTOCOL_FAULT +*/ +#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_MASK (0x00001000) +#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SHIFT (12) +#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LENGTH (1) +#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_CONTROL1_OFFSET (0x0008) + +/* PDP_BIF, CONTROL1, MMU_FLUSH0 +*/ +#define PDP_BIF_CONTROL1_MMU_FLUSH0_MASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_FLUSH0_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_FLUSH0_SHIFT (0) +#define PDP_BIF_CONTROL1_MMU_FLUSH0_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_FLUSH0_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_FLUSH1 +*/ +#define PDP_BIF_CONTROL1_MMU_FLUSH1_MASK (0x00000002) +#define PDP_BIF_CONTROL1_MMU_FLUSH1_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_FLUSH1_SHIFT (1) +#define PDP_BIF_CONTROL1_MMU_FLUSH1_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_FLUSH1_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_FLUSH2 +*/ +#define PDP_BIF_CONTROL1_MMU_FLUSH2_MASK (0x00000004) +#define PDP_BIF_CONTROL1_MMU_FLUSH2_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_FLUSH2_SHIFT (2) +#define PDP_BIF_CONTROL1_MMU_FLUSH2_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_FLUSH2_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_FLUSH3 +*/ +#define PDP_BIF_CONTROL1_MMU_FLUSH3_MASK (0x00000008) +#define PDP_BIF_CONTROL1_MMU_FLUSH3_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_FLUSH3_SHIFT (3) +#define PDP_BIF_CONTROL1_MMU_FLUSH3_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_FLUSH3_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_INVALDC0 +*/ +#define PDP_BIF_CONTROL1_MMU_INVALDC0_MASK (0x00000100) +#define PDP_BIF_CONTROL1_MMU_INVALDC0_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_INVALDC0_SHIFT (8) +#define PDP_BIF_CONTROL1_MMU_INVALDC0_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_INVALDC0_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_INVALDC1 +*/ +#define PDP_BIF_CONTROL1_MMU_INVALDC1_MASK (0x00000200) +#define PDP_BIF_CONTROL1_MMU_INVALDC1_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_INVALDC1_SHIFT (9) +#define PDP_BIF_CONTROL1_MMU_INVALDC1_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_INVALDC1_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_INVALDC2 +*/ +#define PDP_BIF_CONTROL1_MMU_INVALDC2_MASK (0x00000400) +#define PDP_BIF_CONTROL1_MMU_INVALDC2_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_INVALDC2_SHIFT (10) +#define PDP_BIF_CONTROL1_MMU_INVALDC2_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_INVALDC2_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_INVALDC3 +*/ +#define PDP_BIF_CONTROL1_MMU_INVALDC3_MASK (0x00000800) +#define PDP_BIF_CONTROL1_MMU_INVALDC3_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_INVALDC3_SHIFT (11) +#define PDP_BIF_CONTROL1_MMU_INVALDC3_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_INVALDC3_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_FAULT_CLEAR +*/ +#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_MASK (0x00010000) +#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SHIFT (16) +#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, PROTOCOL_FAULT_CLEAR +*/ +#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_MASK (0x00100000) +#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SHIFT (20) +#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LENGTH (1) +#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_PAUSE_SET +*/ +#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_MASK (0x01000000) +#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SHIFT (24) +#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_PAUSE_CLEAR +*/ +#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_MASK (0x02000000) +#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SHIFT (25) +#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONTROL1, MMU_SOFT_RESET +*/ +#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_MASK (0x10000000) +#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LSBMASK (0x00000001) +#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SHIFT (28) +#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LENGTH (1) +#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_BANK_INDEX_OFFSET (0x0010) + +/* PDP_BIF, BANK_INDEX, MMU_BANK_INDEX +*/ +#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_MASK (0xC0000000) +#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LSBMASK (0x00000003) +#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SHIFT (30) +#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LENGTH (2) +#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIGNED_FIELD IMG_FALSE +#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_NO_REPS (16) +#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIZE (2) + +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_OFFSET (0x0018) + +/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_PRIORITY_ENABLE +*/ +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_MASK (0x00008000) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LSBMASK (0x00000001) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SHIFT (15) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LENGTH (1) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIGNED_FIELD IMG_FALSE +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_NO_REPS (16) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIZE (1) + +/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_MMU_PRIORITY_ENABLE +*/ +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_MASK (0x00010000) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LSBMASK (0x00000001) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SHIFT (16) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LENGTH (1) +#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_OFFSET (0x001C) + +/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, LIMITED_WORDS +*/ +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_MASK (0x000003FF) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LSBMASK (0x000003FF) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SHIFT (0) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LENGTH (10) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, REQUEST_GAP +*/ +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_MASK (0x0FFF0000) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LSBMASK (0x00000FFF) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SHIFT (16) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LENGTH (12) +#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_ADDRESS_CONTROL_OFFSET (0x0070) + +/* PDP_BIF, ADDRESS_CONTROL, MMU_BYPASS +*/ +#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK (0x00000001) +#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LSBMASK (0x00000001) +#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT (0) +#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LENGTH (1) +#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, ADDRESS_CONTROL, MMU_ENABLE_EXT_ADDRESSING +*/ +#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK (0x00000010) +#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LSBMASK (0x00000001) +#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT (4) +#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LENGTH (1) +#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, ADDRESS_CONTROL, UPPER_ADDRESS_FIXED +*/ +#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK (0x00FF0000) +#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LSBMASK (0x000000FF) +#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT (16) +#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LENGTH (8) +#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_CONFIG0_OFFSET (0x0080) + +/* PDP_BIF, CONFIG0, NUM_REQUESTORS +*/ +#define PDP_BIF_CONFIG0_NUM_REQUESTORS_MASK (0x0000000F) +#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LSBMASK (0x0000000F) +#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SHIFT (0) +#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LENGTH (4) +#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG0, EXTENDED_ADDR_RANGE +*/ +#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_MASK (0x000000F0) +#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LSBMASK (0x0000000F) +#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SHIFT (4) +#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LENGTH (4) +#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG0, GROUP_OVERRIDE_SIZE +*/ +#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_MASK (0x00000700) +#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LSBMASK (0x00000007) +#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SHIFT (8) +#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LENGTH (3) +#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG0, ADDR_COHERENCY_SUPPORTED +*/ +#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_MASK (0x00001000) +#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LSBMASK (0x00000001) +#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SHIFT (12) +#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LENGTH (1) +#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG0, MMU_SUPPORTED +*/ +#define PDP_BIF_CONFIG0_MMU_SUPPORTED_MASK (0x00002000) +#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LSBMASK (0x00000001) +#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SHIFT (13) +#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LENGTH (1) +#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG0, TILE_ADDR_GRANULARITY +*/ +#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_MASK (0x001F0000) +#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LSBMASK (0x0000001F) +#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SHIFT (16) +#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LENGTH (5) +#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG0, NO_READ_REORDER +*/ +#define PDP_BIF_CONFIG0_NO_READ_REORDER_MASK (0x00200000) +#define PDP_BIF_CONFIG0_NO_READ_REORDER_LSBMASK (0x00000001) +#define PDP_BIF_CONFIG0_NO_READ_REORDER_SHIFT (21) +#define PDP_BIF_CONFIG0_NO_READ_REORDER_LENGTH (1) +#define PDP_BIF_CONFIG0_NO_READ_REORDER_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG0, TAGS_SUPPORTED +*/ +#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_MASK (0xFFC00000) +#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LSBMASK (0x000003FF) +#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SHIFT (22) +#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LENGTH (10) +#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_CONFIG1_OFFSET (0x0084) + +/* PDP_BIF, CONFIG1, PAGE_SIZE +*/ +#define PDP_BIF_CONFIG1_PAGE_SIZE_MASK (0x0000000F) +#define PDP_BIF_CONFIG1_PAGE_SIZE_LSBMASK (0x0000000F) +#define PDP_BIF_CONFIG1_PAGE_SIZE_SHIFT (0) +#define PDP_BIF_CONFIG1_PAGE_SIZE_LENGTH (4) +#define PDP_BIF_CONFIG1_PAGE_SIZE_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG1, PAGE_CACHE_ENTRIES +*/ +#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_MASK (0x0000FF00) +#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LSBMASK (0x000000FF) +#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SHIFT (8) +#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LENGTH (8) +#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG1, DIR_CACHE_ENTRIES +*/ +#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_MASK (0x001F0000) +#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LSBMASK (0x0000001F) +#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SHIFT (16) +#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LENGTH (5) +#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG1, BANDWIDTH_COUNT_SUPPORTED +*/ +#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_MASK (0x01000000) +#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LSBMASK (0x00000001) +#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SHIFT (24) +#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LENGTH (1) +#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG1, STALL_COUNT_SUPPORTED +*/ +#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_MASK (0x02000000) +#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LSBMASK (0x00000001) +#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SHIFT (25) +#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LENGTH (1) +#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG1, LATENCY_COUNT_SUPPORTED +*/ +#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_MASK (0x04000000) +#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LSBMASK (0x00000001) +#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SHIFT (26) +#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LENGTH (1) +#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, CONFIG1, SUPPORT_READ_INTERLEAVE +*/ +#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_MASK (0x10000000) +#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LSBMASK (0x00000001) +#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SHIFT (28) +#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LENGTH (1) +#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_STATUS0_OFFSET (0x0088) + +/* PDP_BIF, STATUS0, MMU_PF_N_RW +*/ +#define PDP_BIF_STATUS0_MMU_PF_N_RW_MASK (0x00000001) +#define PDP_BIF_STATUS0_MMU_PF_N_RW_LSBMASK (0x00000001) +#define PDP_BIF_STATUS0_MMU_PF_N_RW_SHIFT (0) +#define PDP_BIF_STATUS0_MMU_PF_N_RW_LENGTH (1) +#define PDP_BIF_STATUS0_MMU_PF_N_RW_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, STATUS0, MMU_FAULT_ADDR +*/ +#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_MASK (0xFFFFF000) +#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LSBMASK (0x000FFFFF) +#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SHIFT (12) +#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LENGTH (20) +#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_STATUS1_OFFSET (0x008C) + +/* PDP_BIF, STATUS1, MMU_FAULT_REQ_STAT +*/ +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_MASK (0x0000FFFF) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LSBMASK (0x0000FFFF) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SHIFT (0) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LENGTH (16) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, STATUS1, MMU_FAULT_REQ_ID +*/ +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_MASK (0x000F0000) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LSBMASK (0x0000000F) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SHIFT (16) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LENGTH (4) +#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, STATUS1, MMU_FAULT_INDEX +*/ +#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_MASK (0x03000000) +#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LSBMASK (0x00000003) +#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SHIFT (24) +#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LENGTH (2) +#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, STATUS1, MMU_FAULT_RNW +*/ +#define PDP_BIF_STATUS1_MMU_FAULT_RNW_MASK (0x10000000) +#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LSBMASK (0x00000001) +#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SHIFT (28) +#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LENGTH (1) +#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_MEM_REQ_OFFSET (0x0090) + +/* PDP_BIF, MEM_REQ, TAG_OUTSTANDING +*/ +#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_MASK (0x000003FF) +#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LSBMASK (0x000003FF) +#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SHIFT (0) +#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LENGTH (10) +#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, MEM_REQ, EXT_WRRESP_FAULT +*/ +#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_MASK (0x00001000) +#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LSBMASK (0x00000001) +#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SHIFT (12) +#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LENGTH (1) +#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, MEM_REQ, EXT_RDRESP_FAULT +*/ +#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_MASK (0x00002000) +#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LSBMASK (0x00000001) +#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SHIFT (13) +#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LENGTH (1) +#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, MEM_REQ, EXT_READ_BURST_FAULT +*/ +#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_MASK (0x00004000) +#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LSBMASK (0x00000001) +#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SHIFT (14) +#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LENGTH (1) +#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, MEM_REQ, INT_PROTOCOL_FAULT +*/ +#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_MASK (0x80000000) +#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LSBMASK (0x00000001) +#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SHIFT (31) +#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LENGTH (1) +#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIGNED_FIELD IMG_FALSE +#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_NO_REPS (16) +#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIZE (1) + +#define PDP_BIF_MEM_EXT_OUTSTANDING_OFFSET (0x0094) + +/* PDP_BIF, MEM_EXT_OUTSTANDING, READ_WORDS_OUTSTANDING +*/ +#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_MASK (0x0000FFFF) +#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LSBMASK (0x0000FFFF) +#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SHIFT (0) +#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LENGTH (16) +#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_FAULT_SELECT_OFFSET (0x00A0) + +/* PDP_BIF, FAULT_SELECT, MMU_FAULT_SELECT +*/ +#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_MASK (0x0000000F) +#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LSBMASK (0x0000000F) +#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SHIFT (0) +#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LENGTH (4) +#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_PROTOCOL_FAULT_OFFSET (0x00A8) + +/* PDP_BIF, PROTOCOL_FAULT, FAULT_PAGE_BREAK +*/ +#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_MASK (0x00000001) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LSBMASK (0x00000001) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SHIFT (0) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LENGTH (1) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, PROTOCOL_FAULT, FAULT_WRITE +*/ +#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_MASK (0x00000010) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LSBMASK (0x00000001) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SHIFT (4) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LENGTH (1) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, PROTOCOL_FAULT, FAULT_READ +*/ +#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_MASK (0x00000020) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LSBMASK (0x00000001) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SHIFT (5) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LENGTH (1) +#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_TOTAL_READ_REQ_OFFSET (0x0100) + +/* PDP_BIF, TOTAL_READ_REQ, TOTAL_READ_REQ +*/ +#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_MASK (0xFFFFFFFF) +#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SHIFT (0) +#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LENGTH (32) +#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_TOTAL_WRITE_REQ_OFFSET (0x0104) + +/* PDP_BIF, TOTAL_WRITE_REQ, TOTAL_WRITE_REQ +*/ +#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_MASK (0xFFFFFFFF) +#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SHIFT (0) +#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LENGTH (32) +#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_READS_LESS_64_REQ_OFFSET (0x0108) + +/* PDP_BIF, READS_LESS_64_REQ, READS_LESS_64_REQ +*/ +#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_MASK (0xFFFFFFFF) +#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SHIFT (0) +#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LENGTH (32) +#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_WRITES_LESS_64_REQ_OFFSET (0x010C) + +/* PDP_BIF, WRITES_LESS_64_REQ, WRITES_LESS_64_REQ +*/ +#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_MASK (0xFFFFFFFF) +#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SHIFT (0) +#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LENGTH (32) +#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_EXT_CMD_STALL_OFFSET (0x0120) + +/* PDP_BIF, EXT_CMD_STALL, EXT_CMD_STALL +*/ +#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_MASK (0xFFFFFFFF) +#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SHIFT (0) +#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LENGTH (32) +#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_WRITE_REQ_STALL_OFFSET (0x0124) + +/* PDP_BIF, WRITE_REQ_STALL, WRITE_REQ_STALL +*/ +#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_MASK (0xFFFFFFFF) +#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SHIFT (0) +#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LENGTH (32) +#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_MISS_STALL_OFFSET (0x0128) + +/* PDP_BIF, MISS_STALL, MMU_MISS_STALL +*/ +#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_MASK (0xFFFFFFFF) +#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SHIFT (0) +#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LENGTH (32) +#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_ADDRESS_STALL_OFFSET (0x012C) + +/* PDP_BIF, ADDRESS_STALL, ADDRESS_STALL +*/ +#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_MASK (0xFFFFFFFF) +#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SHIFT (0) +#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LENGTH (32) +#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_TAG_STALL_OFFSET (0x0130) + +/* PDP_BIF, TAG_STALL, TAG_STALL +*/ +#define PDP_BIF_TAG_STALL_TAG_STALL_MASK (0xFFFFFFFF) +#define PDP_BIF_TAG_STALL_TAG_STALL_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_TAG_STALL_TAG_STALL_SHIFT (0) +#define PDP_BIF_TAG_STALL_TAG_STALL_LENGTH (32) +#define PDP_BIF_TAG_STALL_TAG_STALL_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_PEAK_READ_OUTSTANDING_OFFSET (0x0140) + +/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_TAG_OUTSTANDING +*/ +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_MASK (0x000003FF) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LSBMASK (0x000003FF) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SHIFT (0) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LENGTH (10) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_READ_LATENCY +*/ +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_MASK (0xFFFF0000) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LSBMASK (0x0000FFFF) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SHIFT (16) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LENGTH (16) +#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_AVERAGE_READ_LATENCY_OFFSET (0x0144) + +/* PDP_BIF, AVERAGE_READ_LATENCY, AVERAGE_READ_LATENCY +*/ +#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_MASK (0xFFFFFFFF) +#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LSBMASK (0xFFFFFFFF) +#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SHIFT (0) +#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LENGTH (32) +#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_STATISTICS_CONTROL_OFFSET (0x0160) + +/* PDP_BIF, STATISTICS_CONTROL, BANDWIDTH_STATS_INIT +*/ +#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_MASK (0x00000001) +#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LSBMASK (0x00000001) +#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SHIFT (0) +#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LENGTH (1) +#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, STATISTICS_CONTROL, STALL_STATS_INIT +*/ +#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_MASK (0x00000002) +#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LSBMASK (0x00000001) +#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SHIFT (1) +#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LENGTH (1) +#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, STATISTICS_CONTROL, LATENCY_STATS_INIT +*/ +#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_MASK (0x00000004) +#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LSBMASK (0x00000001) +#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SHIFT (2) +#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LENGTH (1) +#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SIGNED_FIELD IMG_FALSE + +#define PDP_BIF_VERSION_OFFSET (0x01D0) + +/* PDP_BIF, VERSION, MMU_MAJOR_REV +*/ +#define PDP_BIF_VERSION_MMU_MAJOR_REV_MASK (0x00FF0000) +#define PDP_BIF_VERSION_MMU_MAJOR_REV_LSBMASK (0x000000FF) +#define PDP_BIF_VERSION_MMU_MAJOR_REV_SHIFT (16) +#define PDP_BIF_VERSION_MMU_MAJOR_REV_LENGTH (8) +#define PDP_BIF_VERSION_MMU_MAJOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, VERSION, MMU_MINOR_REV +*/ +#define PDP_BIF_VERSION_MMU_MINOR_REV_MASK (0x0000FF00) +#define PDP_BIF_VERSION_MMU_MINOR_REV_LSBMASK (0x000000FF) +#define PDP_BIF_VERSION_MMU_MINOR_REV_SHIFT (8) +#define PDP_BIF_VERSION_MMU_MINOR_REV_LENGTH (8) +#define PDP_BIF_VERSION_MMU_MINOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP_BIF, VERSION, MMU_MAINT_REV +*/ +#define PDP_BIF_VERSION_MMU_MAINT_REV_MASK (0x000000FF) +#define PDP_BIF_VERSION_MMU_MAINT_REV_LSBMASK (0x000000FF) +#define PDP_BIF_VERSION_MMU_MAINT_REV_SHIFT (0) +#define PDP_BIF_VERSION_MMU_MAINT_REV_LENGTH (8) +#define PDP_BIF_VERSION_MMU_MAINT_REV_SIGNED_FIELD IMG_FALSE + +#endif /* _PDP2_MMU_REGS_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_regs.h new file mode 100644 index 000000000000..bf85386b1df5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/pdp2_regs.h @@ -0,0 +1,8565 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef _PDP2_REGS_H +#define _PDP2_REGS_H + +/* + * Bitfield operations + * For each argument field, the following preprocessor macros must exist + * field##_MASK - the number of bits in the bit field + * field##_SHIFT - offset from the first bit + */ +#define PLACE_FIELD(field, val) \ + (((u32)(val) << (field##_SHIFT)) & (field##_MASK)) + +#define ADJ_FIELD(x, field, val) \ + (((x) & ~(field##_MASK)) \ + | PLACE_FIELD(field, val)) + +#define SET_FIELD(x, field, val) \ + (x) = ADJ_FIELD(x, field, val) + +#define GET_FIELD(x, field) \ + (((x) & (field##_MASK)) >> (field##_SHIFT)) + +/* Keeps most significant bits */ +#define MOVE_FIELD(x, o1, l1, o2, l2) \ + (((x) >> ((o1) + (l1) - (l2))) << (o2)) + +#define MAX_FIELD_VALUE(field) \ + ((field##_MASK) >> (field##_SHIFT)) + +/* Hardware register definitions */ + +#define PDP_GRPH1SURF_OFFSET (0x0000) + +/* PDP, GRPH1SURF, GRPH1PIXFMT +*/ +#define PDP_GRPH1SURF_GRPH1PIXFMT_MASK (0xF8000000) +#define PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK (0x0000001F) +#define PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT (27) +#define PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH (5) +#define PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1USEGAMMA +*/ +#define PDP_GRPH1SURF_GRPH1USEGAMMA_MASK (0x04000000) +#define PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK (0x00000001) +#define PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT (26) +#define PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH (1) +#define PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1USECSC +*/ +#define PDP_GRPH1SURF_GRPH1USECSC_MASK (0x02000000) +#define PDP_GRPH1SURF_GRPH1USECSC_LSBMASK (0x00000001) +#define PDP_GRPH1SURF_GRPH1USECSC_SHIFT (25) +#define PDP_GRPH1SURF_GRPH1USECSC_LENGTH (1) +#define PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE +*/ +#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK (0x01000000) +#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK (0x00000001) +#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT (24) +#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH (1) +#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SURF, GRPH1USELUT +*/ +#define PDP_GRPH1SURF_GRPH1USELUT_MASK (0x00800000) +#define PDP_GRPH1SURF_GRPH1USELUT_LSBMASK (0x00000001) +#define PDP_GRPH1SURF_GRPH1USELUT_SHIFT (23) +#define PDP_GRPH1SURF_GRPH1USELUT_LENGTH (1) +#define PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2SURF_OFFSET (0x0004) + +/* PDP, GRPH2SURF, GRPH2PIXFMT +*/ +#define PDP_GRPH2SURF_GRPH2PIXFMT_MASK (0xF8000000) +#define PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK (0x0000001F) +#define PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT (27) +#define PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH (5) +#define PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2USEGAMMA +*/ +#define PDP_GRPH2SURF_GRPH2USEGAMMA_MASK (0x04000000) +#define PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK (0x00000001) +#define PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT (26) +#define PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH (1) +#define PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2USECSC +*/ +#define PDP_GRPH2SURF_GRPH2USECSC_MASK (0x02000000) +#define PDP_GRPH2SURF_GRPH2USECSC_LSBMASK (0x00000001) +#define PDP_GRPH2SURF_GRPH2USECSC_SHIFT (25) +#define PDP_GRPH2SURF_GRPH2USECSC_LENGTH (1) +#define PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE +*/ +#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK (0x01000000) +#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK (0x00000001) +#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT (24) +#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH (1) +#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SURF, GRPH2USELUT +*/ +#define PDP_GRPH2SURF_GRPH2USELUT_MASK (0x00800000) +#define PDP_GRPH2SURF_GRPH2USELUT_LSBMASK (0x00000001) +#define PDP_GRPH2SURF_GRPH2USELUT_SHIFT (23) +#define PDP_GRPH2SURF_GRPH2USELUT_LENGTH (1) +#define PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3SURF_OFFSET (0x0008) + +/* PDP, GRPH3SURF, GRPH3PIXFMT +*/ +#define PDP_GRPH3SURF_GRPH3PIXFMT_MASK (0xF8000000) +#define PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK (0x0000001F) +#define PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT (27) +#define PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH (5) +#define PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3USEGAMMA +*/ +#define PDP_GRPH3SURF_GRPH3USEGAMMA_MASK (0x04000000) +#define PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK (0x00000001) +#define PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT (26) +#define PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH (1) +#define PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3USECSC +*/ +#define PDP_GRPH3SURF_GRPH3USECSC_MASK (0x02000000) +#define PDP_GRPH3SURF_GRPH3USECSC_LSBMASK (0x00000001) +#define PDP_GRPH3SURF_GRPH3USECSC_SHIFT (25) +#define PDP_GRPH3SURF_GRPH3USECSC_LENGTH (1) +#define PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE +*/ +#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK (0x01000000) +#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK (0x00000001) +#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT (24) +#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH (1) +#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SURF, GRPH3USELUT +*/ +#define PDP_GRPH3SURF_GRPH3USELUT_MASK (0x00800000) +#define PDP_GRPH3SURF_GRPH3USELUT_LSBMASK (0x00000001) +#define PDP_GRPH3SURF_GRPH3USELUT_SHIFT (23) +#define PDP_GRPH3SURF_GRPH3USELUT_LENGTH (1) +#define PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4SURF_OFFSET (0x000C) + +/* PDP, GRPH4SURF, GRPH4PIXFMT +*/ +#define PDP_GRPH4SURF_GRPH4PIXFMT_MASK (0xF8000000) +#define PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK (0x0000001F) +#define PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT (27) +#define PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH (5) +#define PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4USEGAMMA +*/ +#define PDP_GRPH4SURF_GRPH4USEGAMMA_MASK (0x04000000) +#define PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK (0x00000001) +#define PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT (26) +#define PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH (1) +#define PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4USECSC +*/ +#define PDP_GRPH4SURF_GRPH4USECSC_MASK (0x02000000) +#define PDP_GRPH4SURF_GRPH4USECSC_LSBMASK (0x00000001) +#define PDP_GRPH4SURF_GRPH4USECSC_SHIFT (25) +#define PDP_GRPH4SURF_GRPH4USECSC_LENGTH (1) +#define PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE +*/ +#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK (0x01000000) +#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK (0x00000001) +#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT (24) +#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH (1) +#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SURF, GRPH4USELUT +*/ +#define PDP_GRPH4SURF_GRPH4USELUT_MASK (0x00800000) +#define PDP_GRPH4SURF_GRPH4USELUT_LSBMASK (0x00000001) +#define PDP_GRPH4SURF_GRPH4USELUT_SHIFT (23) +#define PDP_GRPH4SURF_GRPH4USELUT_LENGTH (1) +#define PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1SURF_OFFSET (0x0010) + +/* PDP, VID1SURF, VID1PIXFMT +*/ +#define PDP_VID1SURF_VID1PIXFMT_MASK (0xF8000000) +#define PDP_VID1SURF_VID1PIXFMT_LSBMASK (0x0000001F) +#define PDP_VID1SURF_VID1PIXFMT_SHIFT (27) +#define PDP_VID1SURF_VID1PIXFMT_LENGTH (5) +#define PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEGAMMA +*/ +#define PDP_VID1SURF_VID1USEGAMMA_MASK (0x04000000) +#define PDP_VID1SURF_VID1USEGAMMA_LSBMASK (0x00000001) +#define PDP_VID1SURF_VID1USEGAMMA_SHIFT (26) +#define PDP_VID1SURF_VID1USEGAMMA_LENGTH (1) +#define PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USECSC +*/ +#define PDP_VID1SURF_VID1USECSC_MASK (0x02000000) +#define PDP_VID1SURF_VID1USECSC_LSBMASK (0x00000001) +#define PDP_VID1SURF_VID1USECSC_SHIFT (25) +#define PDP_VID1SURF_VID1USECSC_LENGTH (1) +#define PDP_VID1SURF_VID1USECSC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEI2P +*/ +#define PDP_VID1SURF_VID1USEI2P_MASK (0x01000000) +#define PDP_VID1SURF_VID1USEI2P_LSBMASK (0x00000001) +#define PDP_VID1SURF_VID1USEI2P_SHIFT (24) +#define PDP_VID1SURF_VID1USEI2P_LENGTH (1) +#define PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1COSITED +*/ +#define PDP_VID1SURF_VID1COSITED_MASK (0x00800000) +#define PDP_VID1SURF_VID1COSITED_LSBMASK (0x00000001) +#define PDP_VID1SURF_VID1COSITED_SHIFT (23) +#define PDP_VID1SURF_VID1COSITED_LENGTH (1) +#define PDP_VID1SURF_VID1COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEHQCD +*/ +#define PDP_VID1SURF_VID1USEHQCD_MASK (0x00400000) +#define PDP_VID1SURF_VID1USEHQCD_LSBMASK (0x00000001) +#define PDP_VID1SURF_VID1USEHQCD_SHIFT (22) +#define PDP_VID1SURF_VID1USEHQCD_LENGTH (1) +#define PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SURF, VID1USEINSTREAM +*/ +#define PDP_VID1SURF_VID1USEINSTREAM_MASK (0x00200000) +#define PDP_VID1SURF_VID1USEINSTREAM_LSBMASK (0x00000001) +#define PDP_VID1SURF_VID1USEINSTREAM_SHIFT (21) +#define PDP_VID1SURF_VID1USEINSTREAM_LENGTH (1) +#define PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2SURF_OFFSET (0x0014) + +/* PDP, VID2SURF, VID2PIXFMT +*/ +#define PDP_VID2SURF_VID2PIXFMT_MASK (0xF8000000) +#define PDP_VID2SURF_VID2PIXFMT_LSBMASK (0x0000001F) +#define PDP_VID2SURF_VID2PIXFMT_SHIFT (27) +#define PDP_VID2SURF_VID2PIXFMT_LENGTH (5) +#define PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SURF, VID2COSITED +*/ +#define PDP_VID2SURF_VID2COSITED_MASK (0x00800000) +#define PDP_VID2SURF_VID2COSITED_LSBMASK (0x00000001) +#define PDP_VID2SURF_VID2COSITED_SHIFT (23) +#define PDP_VID2SURF_VID2COSITED_LENGTH (1) +#define PDP_VID2SURF_VID2COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SURF, VID2USEGAMMA +*/ +#define PDP_VID2SURF_VID2USEGAMMA_MASK (0x04000000) +#define PDP_VID2SURF_VID2USEGAMMA_LSBMASK (0x00000001) +#define PDP_VID2SURF_VID2USEGAMMA_SHIFT (26) +#define PDP_VID2SURF_VID2USEGAMMA_LENGTH (1) +#define PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SURF, VID2USECSC +*/ +#define PDP_VID2SURF_VID2USECSC_MASK (0x02000000) +#define PDP_VID2SURF_VID2USECSC_LSBMASK (0x00000001) +#define PDP_VID2SURF_VID2USECSC_SHIFT (25) +#define PDP_VID2SURF_VID2USECSC_LENGTH (1) +#define PDP_VID2SURF_VID2USECSC_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3SURF_OFFSET (0x0018) + +/* PDP, VID3SURF, VID3PIXFMT +*/ +#define PDP_VID3SURF_VID3PIXFMT_MASK (0xF8000000) +#define PDP_VID3SURF_VID3PIXFMT_LSBMASK (0x0000001F) +#define PDP_VID3SURF_VID3PIXFMT_SHIFT (27) +#define PDP_VID3SURF_VID3PIXFMT_LENGTH (5) +#define PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SURF, VID3COSITED +*/ +#define PDP_VID3SURF_VID3COSITED_MASK (0x00800000) +#define PDP_VID3SURF_VID3COSITED_LSBMASK (0x00000001) +#define PDP_VID3SURF_VID3COSITED_SHIFT (23) +#define PDP_VID3SURF_VID3COSITED_LENGTH (1) +#define PDP_VID3SURF_VID3COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SURF, VID3USEGAMMA +*/ +#define PDP_VID3SURF_VID3USEGAMMA_MASK (0x04000000) +#define PDP_VID3SURF_VID3USEGAMMA_LSBMASK (0x00000001) +#define PDP_VID3SURF_VID3USEGAMMA_SHIFT (26) +#define PDP_VID3SURF_VID3USEGAMMA_LENGTH (1) +#define PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SURF, VID3USECSC +*/ +#define PDP_VID3SURF_VID3USECSC_MASK (0x02000000) +#define PDP_VID3SURF_VID3USECSC_LSBMASK (0x00000001) +#define PDP_VID3SURF_VID3USECSC_SHIFT (25) +#define PDP_VID3SURF_VID3USECSC_LENGTH (1) +#define PDP_VID3SURF_VID3USECSC_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4SURF_OFFSET (0x001C) + +/* PDP, VID4SURF, VID4PIXFMT +*/ +#define PDP_VID4SURF_VID4PIXFMT_MASK (0xF8000000) +#define PDP_VID4SURF_VID4PIXFMT_LSBMASK (0x0000001F) +#define PDP_VID4SURF_VID4PIXFMT_SHIFT (27) +#define PDP_VID4SURF_VID4PIXFMT_LENGTH (5) +#define PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SURF, VID4COSITED +*/ +#define PDP_VID4SURF_VID4COSITED_MASK (0x00800000) +#define PDP_VID4SURF_VID4COSITED_LSBMASK (0x00000001) +#define PDP_VID4SURF_VID4COSITED_SHIFT (23) +#define PDP_VID4SURF_VID4COSITED_LENGTH (1) +#define PDP_VID4SURF_VID4COSITED_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SURF, VID4USEGAMMA +*/ +#define PDP_VID4SURF_VID4USEGAMMA_MASK (0x04000000) +#define PDP_VID4SURF_VID4USEGAMMA_LSBMASK (0x00000001) +#define PDP_VID4SURF_VID4USEGAMMA_SHIFT (26) +#define PDP_VID4SURF_VID4USEGAMMA_LENGTH (1) +#define PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SURF, VID4USECSC +*/ +#define PDP_VID4SURF_VID4USECSC_MASK (0x02000000) +#define PDP_VID4SURF_VID4USECSC_LSBMASK (0x00000001) +#define PDP_VID4SURF_VID4USECSC_SHIFT (25) +#define PDP_VID4SURF_VID4USECSC_LENGTH (1) +#define PDP_VID4SURF_VID4USECSC_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1CTRL_OFFSET (0x0020) + +/* PDP, GRPH1CTRL, GRPH1STREN +*/ +#define PDP_GRPH1CTRL_GRPH1STREN_MASK (0x80000000) +#define PDP_GRPH1CTRL_GRPH1STREN_LSBMASK (0x00000001) +#define PDP_GRPH1CTRL_GRPH1STREN_SHIFT (31) +#define PDP_GRPH1CTRL_GRPH1STREN_LENGTH (1) +#define PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1CKEYEN +*/ +#define PDP_GRPH1CTRL_GRPH1CKEYEN_MASK (0x40000000) +#define PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK (0x00000001) +#define PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT (30) +#define PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH (1) +#define PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1CKEYSRC +*/ +#define PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK (0x20000000) +#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK (0x00000001) +#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT (29) +#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH (1) +#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1BLEND +*/ +#define PDP_GRPH1CTRL_GRPH1BLEND_MASK (0x18000000) +#define PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK (0x00000003) +#define PDP_GRPH1CTRL_GRPH1BLEND_SHIFT (27) +#define PDP_GRPH1CTRL_GRPH1BLEND_LENGTH (2) +#define PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1BLENDPOS +*/ +#define PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK (0x07000000) +#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK (0x00000007) +#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT (24) +#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH (3) +#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CTRL, GRPH1DITHEREN +*/ +#define PDP_GRPH1CTRL_GRPH1DITHEREN_MASK (0x00800000) +#define PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK (0x00000001) +#define PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT (23) +#define PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH (1) +#define PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2CTRL_OFFSET (0x0024) + +/* PDP, GRPH2CTRL, GRPH2STREN +*/ +#define PDP_GRPH2CTRL_GRPH2STREN_MASK (0x80000000) +#define PDP_GRPH2CTRL_GRPH2STREN_LSBMASK (0x00000001) +#define PDP_GRPH2CTRL_GRPH2STREN_SHIFT (31) +#define PDP_GRPH2CTRL_GRPH2STREN_LENGTH (1) +#define PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2CKEYEN +*/ +#define PDP_GRPH2CTRL_GRPH2CKEYEN_MASK (0x40000000) +#define PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK (0x00000001) +#define PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT (30) +#define PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH (1) +#define PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2CKEYSRC +*/ +#define PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK (0x20000000) +#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK (0x00000001) +#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT (29) +#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH (1) +#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2BLEND +*/ +#define PDP_GRPH2CTRL_GRPH2BLEND_MASK (0x18000000) +#define PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK (0x00000003) +#define PDP_GRPH2CTRL_GRPH2BLEND_SHIFT (27) +#define PDP_GRPH2CTRL_GRPH2BLEND_LENGTH (2) +#define PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2BLENDPOS +*/ +#define PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK (0x07000000) +#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK (0x00000007) +#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT (24) +#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH (3) +#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CTRL, GRPH2DITHEREN +*/ +#define PDP_GRPH2CTRL_GRPH2DITHEREN_MASK (0x00800000) +#define PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK (0x00000001) +#define PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT (23) +#define PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH (1) +#define PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3CTRL_OFFSET (0x0028) + +/* PDP, GRPH3CTRL, GRPH3STREN +*/ +#define PDP_GRPH3CTRL_GRPH3STREN_MASK (0x80000000) +#define PDP_GRPH3CTRL_GRPH3STREN_LSBMASK (0x00000001) +#define PDP_GRPH3CTRL_GRPH3STREN_SHIFT (31) +#define PDP_GRPH3CTRL_GRPH3STREN_LENGTH (1) +#define PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3CKEYEN +*/ +#define PDP_GRPH3CTRL_GRPH3CKEYEN_MASK (0x40000000) +#define PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK (0x00000001) +#define PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT (30) +#define PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH (1) +#define PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3CKEYSRC +*/ +#define PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK (0x20000000) +#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK (0x00000001) +#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT (29) +#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH (1) +#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3BLEND +*/ +#define PDP_GRPH3CTRL_GRPH3BLEND_MASK (0x18000000) +#define PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK (0x00000003) +#define PDP_GRPH3CTRL_GRPH3BLEND_SHIFT (27) +#define PDP_GRPH3CTRL_GRPH3BLEND_LENGTH (2) +#define PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3BLENDPOS +*/ +#define PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK (0x07000000) +#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK (0x00000007) +#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT (24) +#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH (3) +#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CTRL, GRPH3DITHEREN +*/ +#define PDP_GRPH3CTRL_GRPH3DITHEREN_MASK (0x00800000) +#define PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK (0x00000001) +#define PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT (23) +#define PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH (1) +#define PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4CTRL_OFFSET (0x002C) + +/* PDP, GRPH4CTRL, GRPH4STREN +*/ +#define PDP_GRPH4CTRL_GRPH4STREN_MASK (0x80000000) +#define PDP_GRPH4CTRL_GRPH4STREN_LSBMASK (0x00000001) +#define PDP_GRPH4CTRL_GRPH4STREN_SHIFT (31) +#define PDP_GRPH4CTRL_GRPH4STREN_LENGTH (1) +#define PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4CKEYEN +*/ +#define PDP_GRPH4CTRL_GRPH4CKEYEN_MASK (0x40000000) +#define PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK (0x00000001) +#define PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT (30) +#define PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH (1) +#define PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4CKEYSRC +*/ +#define PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK (0x20000000) +#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK (0x00000001) +#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT (29) +#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH (1) +#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4BLEND +*/ +#define PDP_GRPH4CTRL_GRPH4BLEND_MASK (0x18000000) +#define PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK (0x00000003) +#define PDP_GRPH4CTRL_GRPH4BLEND_SHIFT (27) +#define PDP_GRPH4CTRL_GRPH4BLEND_LENGTH (2) +#define PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4BLENDPOS +*/ +#define PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK (0x07000000) +#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK (0x00000007) +#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT (24) +#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH (3) +#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CTRL, GRPH4DITHEREN +*/ +#define PDP_GRPH4CTRL_GRPH4DITHEREN_MASK (0x00800000) +#define PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK (0x00000001) +#define PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT (23) +#define PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH (1) +#define PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1CTRL_OFFSET (0x0030) + +/* PDP, VID1CTRL, VID1STREN +*/ +#define PDP_VID1CTRL_VID1STREN_MASK (0x80000000) +#define PDP_VID1CTRL_VID1STREN_LSBMASK (0x00000001) +#define PDP_VID1CTRL_VID1STREN_SHIFT (31) +#define PDP_VID1CTRL_VID1STREN_LENGTH (1) +#define PDP_VID1CTRL_VID1STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1CKEYEN +*/ +#define PDP_VID1CTRL_VID1CKEYEN_MASK (0x40000000) +#define PDP_VID1CTRL_VID1CKEYEN_LSBMASK (0x00000001) +#define PDP_VID1CTRL_VID1CKEYEN_SHIFT (30) +#define PDP_VID1CTRL_VID1CKEYEN_LENGTH (1) +#define PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1CKEYSRC +*/ +#define PDP_VID1CTRL_VID1CKEYSRC_MASK (0x20000000) +#define PDP_VID1CTRL_VID1CKEYSRC_LSBMASK (0x00000001) +#define PDP_VID1CTRL_VID1CKEYSRC_SHIFT (29) +#define PDP_VID1CTRL_VID1CKEYSRC_LENGTH (1) +#define PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1BLEND +*/ +#define PDP_VID1CTRL_VID1BLEND_MASK (0x18000000) +#define PDP_VID1CTRL_VID1BLEND_LSBMASK (0x00000003) +#define PDP_VID1CTRL_VID1BLEND_SHIFT (27) +#define PDP_VID1CTRL_VID1BLEND_LENGTH (2) +#define PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1BLENDPOS +*/ +#define PDP_VID1CTRL_VID1BLENDPOS_MASK (0x07000000) +#define PDP_VID1CTRL_VID1BLENDPOS_LSBMASK (0x00000007) +#define PDP_VID1CTRL_VID1BLENDPOS_SHIFT (24) +#define PDP_VID1CTRL_VID1BLENDPOS_LENGTH (3) +#define PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CTRL, VID1DITHEREN +*/ +#define PDP_VID1CTRL_VID1DITHEREN_MASK (0x00800000) +#define PDP_VID1CTRL_VID1DITHEREN_LSBMASK (0x00000001) +#define PDP_VID1CTRL_VID1DITHEREN_SHIFT (23) +#define PDP_VID1CTRL_VID1DITHEREN_LENGTH (1) +#define PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2CTRL_OFFSET (0x0034) + +/* PDP, VID2CTRL, VID2STREN +*/ +#define PDP_VID2CTRL_VID2STREN_MASK (0x80000000) +#define PDP_VID2CTRL_VID2STREN_LSBMASK (0x00000001) +#define PDP_VID2CTRL_VID2STREN_SHIFT (31) +#define PDP_VID2CTRL_VID2STREN_LENGTH (1) +#define PDP_VID2CTRL_VID2STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2CKEYEN +*/ +#define PDP_VID2CTRL_VID2CKEYEN_MASK (0x40000000) +#define PDP_VID2CTRL_VID2CKEYEN_LSBMASK (0x00000001) +#define PDP_VID2CTRL_VID2CKEYEN_SHIFT (30) +#define PDP_VID2CTRL_VID2CKEYEN_LENGTH (1) +#define PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2CKEYSRC +*/ +#define PDP_VID2CTRL_VID2CKEYSRC_MASK (0x20000000) +#define PDP_VID2CTRL_VID2CKEYSRC_LSBMASK (0x00000001) +#define PDP_VID2CTRL_VID2CKEYSRC_SHIFT (29) +#define PDP_VID2CTRL_VID2CKEYSRC_LENGTH (1) +#define PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2BLEND +*/ +#define PDP_VID2CTRL_VID2BLEND_MASK (0x18000000) +#define PDP_VID2CTRL_VID2BLEND_LSBMASK (0x00000003) +#define PDP_VID2CTRL_VID2BLEND_SHIFT (27) +#define PDP_VID2CTRL_VID2BLEND_LENGTH (2) +#define PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2BLENDPOS +*/ +#define PDP_VID2CTRL_VID2BLENDPOS_MASK (0x07000000) +#define PDP_VID2CTRL_VID2BLENDPOS_LSBMASK (0x00000007) +#define PDP_VID2CTRL_VID2BLENDPOS_SHIFT (24) +#define PDP_VID2CTRL_VID2BLENDPOS_LENGTH (3) +#define PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CTRL, VID2DITHEREN +*/ +#define PDP_VID2CTRL_VID2DITHEREN_MASK (0x00800000) +#define PDP_VID2CTRL_VID2DITHEREN_LSBMASK (0x00000001) +#define PDP_VID2CTRL_VID2DITHEREN_SHIFT (23) +#define PDP_VID2CTRL_VID2DITHEREN_LENGTH (1) +#define PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3CTRL_OFFSET (0x0038) + +/* PDP, VID3CTRL, VID3STREN +*/ +#define PDP_VID3CTRL_VID3STREN_MASK (0x80000000) +#define PDP_VID3CTRL_VID3STREN_LSBMASK (0x00000001) +#define PDP_VID3CTRL_VID3STREN_SHIFT (31) +#define PDP_VID3CTRL_VID3STREN_LENGTH (1) +#define PDP_VID3CTRL_VID3STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3CKEYEN +*/ +#define PDP_VID3CTRL_VID3CKEYEN_MASK (0x40000000) +#define PDP_VID3CTRL_VID3CKEYEN_LSBMASK (0x00000001) +#define PDP_VID3CTRL_VID3CKEYEN_SHIFT (30) +#define PDP_VID3CTRL_VID3CKEYEN_LENGTH (1) +#define PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3CKEYSRC +*/ +#define PDP_VID3CTRL_VID3CKEYSRC_MASK (0x20000000) +#define PDP_VID3CTRL_VID3CKEYSRC_LSBMASK (0x00000001) +#define PDP_VID3CTRL_VID3CKEYSRC_SHIFT (29) +#define PDP_VID3CTRL_VID3CKEYSRC_LENGTH (1) +#define PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3BLEND +*/ +#define PDP_VID3CTRL_VID3BLEND_MASK (0x18000000) +#define PDP_VID3CTRL_VID3BLEND_LSBMASK (0x00000003) +#define PDP_VID3CTRL_VID3BLEND_SHIFT (27) +#define PDP_VID3CTRL_VID3BLEND_LENGTH (2) +#define PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3BLENDPOS +*/ +#define PDP_VID3CTRL_VID3BLENDPOS_MASK (0x07000000) +#define PDP_VID3CTRL_VID3BLENDPOS_LSBMASK (0x00000007) +#define PDP_VID3CTRL_VID3BLENDPOS_SHIFT (24) +#define PDP_VID3CTRL_VID3BLENDPOS_LENGTH (3) +#define PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CTRL, VID3DITHEREN +*/ +#define PDP_VID3CTRL_VID3DITHEREN_MASK (0x00800000) +#define PDP_VID3CTRL_VID3DITHEREN_LSBMASK (0x00000001) +#define PDP_VID3CTRL_VID3DITHEREN_SHIFT (23) +#define PDP_VID3CTRL_VID3DITHEREN_LENGTH (1) +#define PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4CTRL_OFFSET (0x003C) + +/* PDP, VID4CTRL, VID4STREN +*/ +#define PDP_VID4CTRL_VID4STREN_MASK (0x80000000) +#define PDP_VID4CTRL_VID4STREN_LSBMASK (0x00000001) +#define PDP_VID4CTRL_VID4STREN_SHIFT (31) +#define PDP_VID4CTRL_VID4STREN_LENGTH (1) +#define PDP_VID4CTRL_VID4STREN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4CKEYEN +*/ +#define PDP_VID4CTRL_VID4CKEYEN_MASK (0x40000000) +#define PDP_VID4CTRL_VID4CKEYEN_LSBMASK (0x00000001) +#define PDP_VID4CTRL_VID4CKEYEN_SHIFT (30) +#define PDP_VID4CTRL_VID4CKEYEN_LENGTH (1) +#define PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4CKEYSRC +*/ +#define PDP_VID4CTRL_VID4CKEYSRC_MASK (0x20000000) +#define PDP_VID4CTRL_VID4CKEYSRC_LSBMASK (0x00000001) +#define PDP_VID4CTRL_VID4CKEYSRC_SHIFT (29) +#define PDP_VID4CTRL_VID4CKEYSRC_LENGTH (1) +#define PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4BLEND +*/ +#define PDP_VID4CTRL_VID4BLEND_MASK (0x18000000) +#define PDP_VID4CTRL_VID4BLEND_LSBMASK (0x00000003) +#define PDP_VID4CTRL_VID4BLEND_SHIFT (27) +#define PDP_VID4CTRL_VID4BLEND_LENGTH (2) +#define PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4BLENDPOS +*/ +#define PDP_VID4CTRL_VID4BLENDPOS_MASK (0x07000000) +#define PDP_VID4CTRL_VID4BLENDPOS_LSBMASK (0x00000007) +#define PDP_VID4CTRL_VID4BLENDPOS_SHIFT (24) +#define PDP_VID4CTRL_VID4BLENDPOS_LENGTH (3) +#define PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CTRL, VID4DITHEREN +*/ +#define PDP_VID4CTRL_VID4DITHEREN_MASK (0x00800000) +#define PDP_VID4CTRL_VID4DITHEREN_LSBMASK (0x00000001) +#define PDP_VID4CTRL_VID4DITHEREN_SHIFT (23) +#define PDP_VID4CTRL_VID4DITHEREN_LENGTH (1) +#define PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1UCTRL_OFFSET (0x0050) + +/* PDP, VID1UCTRL, VID1UVHALFSTR +*/ +#define PDP_VID1UCTRL_VID1UVHALFSTR_MASK (0xC0000000) +#define PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK (0x00000003) +#define PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT (30) +#define PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH (2) +#define PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2UCTRL_OFFSET (0x0054) + +/* PDP, VID2UCTRL, VID2UVHALFSTR +*/ +#define PDP_VID2UCTRL_VID2UVHALFSTR_MASK (0xC0000000) +#define PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK (0x00000003) +#define PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT (30) +#define PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH (2) +#define PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3UCTRL_OFFSET (0x0058) + +/* PDP, VID3UCTRL, VID3UVHALFSTR +*/ +#define PDP_VID3UCTRL_VID3UVHALFSTR_MASK (0xC0000000) +#define PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK (0x00000003) +#define PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT (30) +#define PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH (2) +#define PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4UCTRL_OFFSET (0x005C) + +/* PDP, VID4UCTRL, VID4UVHALFSTR +*/ +#define PDP_VID4UCTRL_VID4UVHALFSTR_MASK (0xC0000000) +#define PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK (0x00000003) +#define PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT (30) +#define PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH (2) +#define PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1STRIDE_OFFSET (0x0060) + +/* PDP, GRPH1STRIDE, GRPH1STRIDE +*/ +#define PDP_GRPH1STRIDE_GRPH1STRIDE_MASK (0xFFC00000) +#define PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK (0x000003FF) +#define PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT (22) +#define PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH (10) +#define PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2STRIDE_OFFSET (0x0064) + +/* PDP, GRPH2STRIDE, GRPH2STRIDE +*/ +#define PDP_GRPH2STRIDE_GRPH2STRIDE_MASK (0xFFC00000) +#define PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK (0x000003FF) +#define PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT (22) +#define PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH (10) +#define PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3STRIDE_OFFSET (0x0068) + +/* PDP, GRPH3STRIDE, GRPH3STRIDE +*/ +#define PDP_GRPH3STRIDE_GRPH3STRIDE_MASK (0xFFC00000) +#define PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK (0x000003FF) +#define PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT (22) +#define PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH (10) +#define PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4STRIDE_OFFSET (0x006C) + +/* PDP, GRPH4STRIDE, GRPH4STRIDE +*/ +#define PDP_GRPH4STRIDE_GRPH4STRIDE_MASK (0xFFC00000) +#define PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK (0x000003FF) +#define PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT (22) +#define PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH (10) +#define PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1STRIDE_OFFSET (0x0070) + +/* PDP, VID1STRIDE, VID1STRIDE +*/ +#define PDP_VID1STRIDE_VID1STRIDE_MASK (0xFFC00000) +#define PDP_VID1STRIDE_VID1STRIDE_LSBMASK (0x000003FF) +#define PDP_VID1STRIDE_VID1STRIDE_SHIFT (22) +#define PDP_VID1STRIDE_VID1STRIDE_LENGTH (10) +#define PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2STRIDE_OFFSET (0x0074) + +/* PDP, VID2STRIDE, VID2STRIDE +*/ +#define PDP_VID2STRIDE_VID2STRIDE_MASK (0xFFC00000) +#define PDP_VID2STRIDE_VID2STRIDE_LSBMASK (0x000003FF) +#define PDP_VID2STRIDE_VID2STRIDE_SHIFT (22) +#define PDP_VID2STRIDE_VID2STRIDE_LENGTH (10) +#define PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3STRIDE_OFFSET (0x0078) + +/* PDP, VID3STRIDE, VID3STRIDE +*/ +#define PDP_VID3STRIDE_VID3STRIDE_MASK (0xFFC00000) +#define PDP_VID3STRIDE_VID3STRIDE_LSBMASK (0x000003FF) +#define PDP_VID3STRIDE_VID3STRIDE_SHIFT (22) +#define PDP_VID3STRIDE_VID3STRIDE_LENGTH (10) +#define PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4STRIDE_OFFSET (0x007C) + +/* PDP, VID4STRIDE, VID4STRIDE +*/ +#define PDP_VID4STRIDE_VID4STRIDE_MASK (0xFFC00000) +#define PDP_VID4STRIDE_VID4STRIDE_LSBMASK (0x000003FF) +#define PDP_VID4STRIDE_VID4STRIDE_SHIFT (22) +#define PDP_VID4STRIDE_VID4STRIDE_LENGTH (10) +#define PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1SIZE_OFFSET (0x0080) + +/* PDP, GRPH1SIZE, GRPH1WIDTH +*/ +#define PDP_GRPH1SIZE_GRPH1WIDTH_MASK (0x0FFF0000) +#define PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK (0x00000FFF) +#define PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT (16) +#define PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH (12) +#define PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1SIZE, GRPH1HEIGHT +*/ +#define PDP_GRPH1SIZE_GRPH1HEIGHT_MASK (0x00000FFF) +#define PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK (0x00000FFF) +#define PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT (0) +#define PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH (12) +#define PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2SIZE_OFFSET (0x0084) + +/* PDP, GRPH2SIZE, GRPH2WIDTH +*/ +#define PDP_GRPH2SIZE_GRPH2WIDTH_MASK (0x0FFF0000) +#define PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK (0x00000FFF) +#define PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT (16) +#define PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH (12) +#define PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2SIZE, GRPH2HEIGHT +*/ +#define PDP_GRPH2SIZE_GRPH2HEIGHT_MASK (0x00000FFF) +#define PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK (0x00000FFF) +#define PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT (0) +#define PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH (12) +#define PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3SIZE_OFFSET (0x0088) + +/* PDP, GRPH3SIZE, GRPH3WIDTH +*/ +#define PDP_GRPH3SIZE_GRPH3WIDTH_MASK (0x0FFF0000) +#define PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK (0x00000FFF) +#define PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT (16) +#define PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH (12) +#define PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3SIZE, GRPH3HEIGHT +*/ +#define PDP_GRPH3SIZE_GRPH3HEIGHT_MASK (0x00000FFF) +#define PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK (0x00000FFF) +#define PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT (0) +#define PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH (12) +#define PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4SIZE_OFFSET (0x008C) + +/* PDP, GRPH4SIZE, GRPH4WIDTH +*/ +#define PDP_GRPH4SIZE_GRPH4WIDTH_MASK (0x0FFF0000) +#define PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK (0x00000FFF) +#define PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT (16) +#define PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH (12) +#define PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4SIZE, GRPH4HEIGHT +*/ +#define PDP_GRPH4SIZE_GRPH4HEIGHT_MASK (0x00000FFF) +#define PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK (0x00000FFF) +#define PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT (0) +#define PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH (12) +#define PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1SIZE_OFFSET (0x0090) + +/* PDP, VID1SIZE, VID1WIDTH +*/ +#define PDP_VID1SIZE_VID1WIDTH_MASK (0x0FFF0000) +#define PDP_VID1SIZE_VID1WIDTH_LSBMASK (0x00000FFF) +#define PDP_VID1SIZE_VID1WIDTH_SHIFT (16) +#define PDP_VID1SIZE_VID1WIDTH_LENGTH (12) +#define PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SIZE, VID1HEIGHT +*/ +#define PDP_VID1SIZE_VID1HEIGHT_MASK (0x00000FFF) +#define PDP_VID1SIZE_VID1HEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID1SIZE_VID1HEIGHT_SHIFT (0) +#define PDP_VID1SIZE_VID1HEIGHT_LENGTH (12) +#define PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2SIZE_OFFSET (0x0094) + +/* PDP, VID2SIZE, VID2WIDTH +*/ +#define PDP_VID2SIZE_VID2WIDTH_MASK (0x0FFF0000) +#define PDP_VID2SIZE_VID2WIDTH_LSBMASK (0x00000FFF) +#define PDP_VID2SIZE_VID2WIDTH_SHIFT (16) +#define PDP_VID2SIZE_VID2WIDTH_LENGTH (12) +#define PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SIZE, VID2HEIGHT +*/ +#define PDP_VID2SIZE_VID2HEIGHT_MASK (0x00000FFF) +#define PDP_VID2SIZE_VID2HEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID2SIZE_VID2HEIGHT_SHIFT (0) +#define PDP_VID2SIZE_VID2HEIGHT_LENGTH (12) +#define PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3SIZE_OFFSET (0x0098) + +/* PDP, VID3SIZE, VID3WIDTH +*/ +#define PDP_VID3SIZE_VID3WIDTH_MASK (0x0FFF0000) +#define PDP_VID3SIZE_VID3WIDTH_LSBMASK (0x00000FFF) +#define PDP_VID3SIZE_VID3WIDTH_SHIFT (16) +#define PDP_VID3SIZE_VID3WIDTH_LENGTH (12) +#define PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SIZE, VID3HEIGHT +*/ +#define PDP_VID3SIZE_VID3HEIGHT_MASK (0x00000FFF) +#define PDP_VID3SIZE_VID3HEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID3SIZE_VID3HEIGHT_SHIFT (0) +#define PDP_VID3SIZE_VID3HEIGHT_LENGTH (12) +#define PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4SIZE_OFFSET (0x009C) + +/* PDP, VID4SIZE, VID4WIDTH +*/ +#define PDP_VID4SIZE_VID4WIDTH_MASK (0x0FFF0000) +#define PDP_VID4SIZE_VID4WIDTH_LSBMASK (0x00000FFF) +#define PDP_VID4SIZE_VID4WIDTH_SHIFT (16) +#define PDP_VID4SIZE_VID4WIDTH_LENGTH (12) +#define PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SIZE, VID4HEIGHT +*/ +#define PDP_VID4SIZE_VID4HEIGHT_MASK (0x00000FFF) +#define PDP_VID4SIZE_VID4HEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID4SIZE_VID4HEIGHT_SHIFT (0) +#define PDP_VID4SIZE_VID4HEIGHT_LENGTH (12) +#define PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1POSN_OFFSET (0x00A0) + +/* PDP, GRPH1POSN, GRPH1XSTART +*/ +#define PDP_GRPH1POSN_GRPH1XSTART_MASK (0x0FFF0000) +#define PDP_GRPH1POSN_GRPH1XSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH1POSN_GRPH1XSTART_SHIFT (16) +#define PDP_GRPH1POSN_GRPH1XSTART_LENGTH (12) +#define PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1POSN, GRPH1YSTART +*/ +#define PDP_GRPH1POSN_GRPH1YSTART_MASK (0x00000FFF) +#define PDP_GRPH1POSN_GRPH1YSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH1POSN_GRPH1YSTART_SHIFT (0) +#define PDP_GRPH1POSN_GRPH1YSTART_LENGTH (12) +#define PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2POSN_OFFSET (0x00A4) + +/* PDP, GRPH2POSN, GRPH2XSTART +*/ +#define PDP_GRPH2POSN_GRPH2XSTART_MASK (0x0FFF0000) +#define PDP_GRPH2POSN_GRPH2XSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH2POSN_GRPH2XSTART_SHIFT (16) +#define PDP_GRPH2POSN_GRPH2XSTART_LENGTH (12) +#define PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2POSN, GRPH2YSTART +*/ +#define PDP_GRPH2POSN_GRPH2YSTART_MASK (0x00000FFF) +#define PDP_GRPH2POSN_GRPH2YSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH2POSN_GRPH2YSTART_SHIFT (0) +#define PDP_GRPH2POSN_GRPH2YSTART_LENGTH (12) +#define PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3POSN_OFFSET (0x00A8) + +/* PDP, GRPH3POSN, GRPH3XSTART +*/ +#define PDP_GRPH3POSN_GRPH3XSTART_MASK (0x0FFF0000) +#define PDP_GRPH3POSN_GRPH3XSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH3POSN_GRPH3XSTART_SHIFT (16) +#define PDP_GRPH3POSN_GRPH3XSTART_LENGTH (12) +#define PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3POSN, GRPH3YSTART +*/ +#define PDP_GRPH3POSN_GRPH3YSTART_MASK (0x00000FFF) +#define PDP_GRPH3POSN_GRPH3YSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH3POSN_GRPH3YSTART_SHIFT (0) +#define PDP_GRPH3POSN_GRPH3YSTART_LENGTH (12) +#define PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4POSN_OFFSET (0x00AC) + +/* PDP, GRPH4POSN, GRPH4XSTART +*/ +#define PDP_GRPH4POSN_GRPH4XSTART_MASK (0x0FFF0000) +#define PDP_GRPH4POSN_GRPH4XSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH4POSN_GRPH4XSTART_SHIFT (16) +#define PDP_GRPH4POSN_GRPH4XSTART_LENGTH (12) +#define PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4POSN, GRPH4YSTART +*/ +#define PDP_GRPH4POSN_GRPH4YSTART_MASK (0x00000FFF) +#define PDP_GRPH4POSN_GRPH4YSTART_LSBMASK (0x00000FFF) +#define PDP_GRPH4POSN_GRPH4YSTART_SHIFT (0) +#define PDP_GRPH4POSN_GRPH4YSTART_LENGTH (12) +#define PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1POSN_OFFSET (0x00B0) + +/* PDP, VID1POSN, VID1XSTART +*/ +#define PDP_VID1POSN_VID1XSTART_MASK (0x0FFF0000) +#define PDP_VID1POSN_VID1XSTART_LSBMASK (0x00000FFF) +#define PDP_VID1POSN_VID1XSTART_SHIFT (16) +#define PDP_VID1POSN_VID1XSTART_LENGTH (12) +#define PDP_VID1POSN_VID1XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1POSN, VID1YSTART +*/ +#define PDP_VID1POSN_VID1YSTART_MASK (0x00000FFF) +#define PDP_VID1POSN_VID1YSTART_LSBMASK (0x00000FFF) +#define PDP_VID1POSN_VID1YSTART_SHIFT (0) +#define PDP_VID1POSN_VID1YSTART_LENGTH (12) +#define PDP_VID1POSN_VID1YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2POSN_OFFSET (0x00B4) + +/* PDP, VID2POSN, VID2XSTART +*/ +#define PDP_VID2POSN_VID2XSTART_MASK (0x0FFF0000) +#define PDP_VID2POSN_VID2XSTART_LSBMASK (0x00000FFF) +#define PDP_VID2POSN_VID2XSTART_SHIFT (16) +#define PDP_VID2POSN_VID2XSTART_LENGTH (12) +#define PDP_VID2POSN_VID2XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2POSN, VID2YSTART +*/ +#define PDP_VID2POSN_VID2YSTART_MASK (0x00000FFF) +#define PDP_VID2POSN_VID2YSTART_LSBMASK (0x00000FFF) +#define PDP_VID2POSN_VID2YSTART_SHIFT (0) +#define PDP_VID2POSN_VID2YSTART_LENGTH (12) +#define PDP_VID2POSN_VID2YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3POSN_OFFSET (0x00B8) + +/* PDP, VID3POSN, VID3XSTART +*/ +#define PDP_VID3POSN_VID3XSTART_MASK (0x0FFF0000) +#define PDP_VID3POSN_VID3XSTART_LSBMASK (0x00000FFF) +#define PDP_VID3POSN_VID3XSTART_SHIFT (16) +#define PDP_VID3POSN_VID3XSTART_LENGTH (12) +#define PDP_VID3POSN_VID3XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3POSN, VID3YSTART +*/ +#define PDP_VID3POSN_VID3YSTART_MASK (0x00000FFF) +#define PDP_VID3POSN_VID3YSTART_LSBMASK (0x00000FFF) +#define PDP_VID3POSN_VID3YSTART_SHIFT (0) +#define PDP_VID3POSN_VID3YSTART_LENGTH (12) +#define PDP_VID3POSN_VID3YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4POSN_OFFSET (0x00BC) + +/* PDP, VID4POSN, VID4XSTART +*/ +#define PDP_VID4POSN_VID4XSTART_MASK (0x0FFF0000) +#define PDP_VID4POSN_VID4XSTART_LSBMASK (0x00000FFF) +#define PDP_VID4POSN_VID4XSTART_SHIFT (16) +#define PDP_VID4POSN_VID4XSTART_LENGTH (12) +#define PDP_VID4POSN_VID4XSTART_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4POSN, VID4YSTART +*/ +#define PDP_VID4POSN_VID4YSTART_MASK (0x00000FFF) +#define PDP_VID4POSN_VID4YSTART_LSBMASK (0x00000FFF) +#define PDP_VID4POSN_VID4YSTART_SHIFT (0) +#define PDP_VID4POSN_VID4YSTART_LENGTH (12) +#define PDP_VID4POSN_VID4YSTART_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1GALPHA_OFFSET (0x00C0) + +/* PDP, GRPH1GALPHA, GRPH1GALPHA +*/ +#define PDP_GRPH1GALPHA_GRPH1GALPHA_MASK (0x000003FF) +#define PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK (0x000003FF) +#define PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT (0) +#define PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH (10) +#define PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2GALPHA_OFFSET (0x00C4) + +/* PDP, GRPH2GALPHA, GRPH2GALPHA +*/ +#define PDP_GRPH2GALPHA_GRPH2GALPHA_MASK (0x000003FF) +#define PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK (0x000003FF) +#define PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT (0) +#define PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH (10) +#define PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3GALPHA_OFFSET (0x00C8) + +/* PDP, GRPH3GALPHA, GRPH3GALPHA +*/ +#define PDP_GRPH3GALPHA_GRPH3GALPHA_MASK (0x000003FF) +#define PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK (0x000003FF) +#define PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT (0) +#define PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH (10) +#define PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4GALPHA_OFFSET (0x00CC) + +/* PDP, GRPH4GALPHA, GRPH4GALPHA +*/ +#define PDP_GRPH4GALPHA_GRPH4GALPHA_MASK (0x000003FF) +#define PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK (0x000003FF) +#define PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT (0) +#define PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH (10) +#define PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1GALPHA_OFFSET (0x00D0) + +/* PDP, VID1GALPHA, VID1GALPHA +*/ +#define PDP_VID1GALPHA_VID1GALPHA_MASK (0x000003FF) +#define PDP_VID1GALPHA_VID1GALPHA_LSBMASK (0x000003FF) +#define PDP_VID1GALPHA_VID1GALPHA_SHIFT (0) +#define PDP_VID1GALPHA_VID1GALPHA_LENGTH (10) +#define PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2GALPHA_OFFSET (0x00D4) + +/* PDP, VID2GALPHA, VID2GALPHA +*/ +#define PDP_VID2GALPHA_VID2GALPHA_MASK (0x000003FF) +#define PDP_VID2GALPHA_VID2GALPHA_LSBMASK (0x000003FF) +#define PDP_VID2GALPHA_VID2GALPHA_SHIFT (0) +#define PDP_VID2GALPHA_VID2GALPHA_LENGTH (10) +#define PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3GALPHA_OFFSET (0x00D8) + +/* PDP, VID3GALPHA, VID3GALPHA +*/ +#define PDP_VID3GALPHA_VID3GALPHA_MASK (0x000003FF) +#define PDP_VID3GALPHA_VID3GALPHA_LSBMASK (0x000003FF) +#define PDP_VID3GALPHA_VID3GALPHA_SHIFT (0) +#define PDP_VID3GALPHA_VID3GALPHA_LENGTH (10) +#define PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4GALPHA_OFFSET (0x00DC) + +/* PDP, VID4GALPHA, VID4GALPHA +*/ +#define PDP_VID4GALPHA_VID4GALPHA_MASK (0x000003FF) +#define PDP_VID4GALPHA_VID4GALPHA_LSBMASK (0x000003FF) +#define PDP_VID4GALPHA_VID4GALPHA_SHIFT (0) +#define PDP_VID4GALPHA_VID4GALPHA_LENGTH (10) +#define PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1CKEY_R_OFFSET (0x00E0) + +/* PDP, GRPH1CKEY_R, GRPH1CKEY_R +*/ +#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK (0x000003FF) +#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK (0x000003FF) +#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT (0) +#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH (10) +#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1CKEY_GB_OFFSET (0x00E4) + +/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G +*/ +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK (0x03FF0000) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK (0x000003FF) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT (16) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH (10) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B +*/ +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK (0x000003FF) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK (0x000003FF) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT (0) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH (10) +#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2CKEY_R_OFFSET (0x00E8) + +/* PDP, GRPH2CKEY_R, GRPH2CKEY_R +*/ +#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK (0x000003FF) +#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK (0x000003FF) +#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT (0) +#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH (10) +#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2CKEY_GB_OFFSET (0x00EC) + +/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G +*/ +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK (0x03FF0000) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK (0x000003FF) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT (16) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH (10) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B +*/ +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK (0x000003FF) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK (0x000003FF) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT (0) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH (10) +#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3CKEY_R_OFFSET (0x00F0) + +/* PDP, GRPH3CKEY_R, GRPH3CKEY_R +*/ +#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK (0x000003FF) +#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK (0x000003FF) +#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT (0) +#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH (10) +#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3CKEY_GB_OFFSET (0x00F4) + +/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G +*/ +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK (0x03FF0000) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK (0x000003FF) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT (16) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH (10) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B +*/ +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK (0x000003FF) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK (0x000003FF) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT (0) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH (10) +#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4CKEY_R_OFFSET (0x00F8) + +/* PDP, GRPH4CKEY_R, GRPH4CKEY_R +*/ +#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK (0x000003FF) +#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK (0x000003FF) +#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT (0) +#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH (10) +#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4CKEY_GB_OFFSET (0x00FC) + +/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G +*/ +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK (0x03FF0000) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK (0x000003FF) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT (16) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH (10) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B +*/ +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK (0x000003FF) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK (0x000003FF) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT (0) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH (10) +#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1CKEY_R_OFFSET (0x0100) + +/* PDP, VID1CKEY_R, VID1CKEY_R +*/ +#define PDP_VID1CKEY_R_VID1CKEY_R_MASK (0x000003FF) +#define PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK (0x000003FF) +#define PDP_VID1CKEY_R_VID1CKEY_R_SHIFT (0) +#define PDP_VID1CKEY_R_VID1CKEY_R_LENGTH (10) +#define PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1CKEY_GB_OFFSET (0x0104) + +/* PDP, VID1CKEY_GB, VID1CKEY_G +*/ +#define PDP_VID1CKEY_GB_VID1CKEY_G_MASK (0x03FF0000) +#define PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK (0x000003FF) +#define PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT (16) +#define PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH (10) +#define PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1CKEY_GB, VID1CKEY_B +*/ +#define PDP_VID1CKEY_GB_VID1CKEY_B_MASK (0x000003FF) +#define PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK (0x000003FF) +#define PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT (0) +#define PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH (10) +#define PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2CKEY_R_OFFSET (0x0108) + +/* PDP, VID2CKEY_R, VID2CKEY_R +*/ +#define PDP_VID2CKEY_R_VID2CKEY_R_MASK (0x000003FF) +#define PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK (0x000003FF) +#define PDP_VID2CKEY_R_VID2CKEY_R_SHIFT (0) +#define PDP_VID2CKEY_R_VID2CKEY_R_LENGTH (10) +#define PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2CKEY_GB_OFFSET (0x010C) + +/* PDP, VID2CKEY_GB, VID2CKEY_G +*/ +#define PDP_VID2CKEY_GB_VID2CKEY_G_MASK (0x03FF0000) +#define PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK (0x000003FF) +#define PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT (16) +#define PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH (10) +#define PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2CKEY_GB, VID2CKEY_B +*/ +#define PDP_VID2CKEY_GB_VID2CKEY_B_MASK (0x000003FF) +#define PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK (0x000003FF) +#define PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT (0) +#define PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH (10) +#define PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3CKEY_R_OFFSET (0x0110) + +/* PDP, VID3CKEY_R, VID3CKEY_R +*/ +#define PDP_VID3CKEY_R_VID3CKEY_R_MASK (0x000003FF) +#define PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK (0x000003FF) +#define PDP_VID3CKEY_R_VID3CKEY_R_SHIFT (0) +#define PDP_VID3CKEY_R_VID3CKEY_R_LENGTH (10) +#define PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3CKEY_GB_OFFSET (0x0114) + +/* PDP, VID3CKEY_GB, VID3CKEY_G +*/ +#define PDP_VID3CKEY_GB_VID3CKEY_G_MASK (0x03FF0000) +#define PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK (0x000003FF) +#define PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT (16) +#define PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH (10) +#define PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3CKEY_GB, VID3CKEY_B +*/ +#define PDP_VID3CKEY_GB_VID3CKEY_B_MASK (0x000003FF) +#define PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK (0x000003FF) +#define PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT (0) +#define PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH (10) +#define PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4CKEY_R_OFFSET (0x0118) + +/* PDP, VID4CKEY_R, VID4CKEY_R +*/ +#define PDP_VID4CKEY_R_VID4CKEY_R_MASK (0x000003FF) +#define PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK (0x000003FF) +#define PDP_VID4CKEY_R_VID4CKEY_R_SHIFT (0) +#define PDP_VID4CKEY_R_VID4CKEY_R_LENGTH (10) +#define PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4CKEY_GB_OFFSET (0x011C) + +/* PDP, VID4CKEY_GB, VID4CKEY_G +*/ +#define PDP_VID4CKEY_GB_VID4CKEY_G_MASK (0x03FF0000) +#define PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK (0x000003FF) +#define PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT (16) +#define PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH (10) +#define PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4CKEY_GB, VID4CKEY_B +*/ +#define PDP_VID4CKEY_GB_VID4CKEY_B_MASK (0x000003FF) +#define PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK (0x000003FF) +#define PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT (0) +#define PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH (10) +#define PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1BLND2_R_OFFSET (0x0120) + +/* PDP, GRPH1BLND2_R, GRPH1PIXDBL +*/ +#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK (0x80000000) +#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK (0x00000001) +#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT (31) +#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH (1) +#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1BLND2_R, GRPH1LINDBL +*/ +#define PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK (0x20000000) +#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK (0x00000001) +#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT (29) +#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH (1) +#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R +*/ +#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK (0x000003FF) +#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT (0) +#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH (10) +#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1BLND2_GB_OFFSET (0x0124) + +/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G +*/ +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK (0x03FF0000) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT (16) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B +*/ +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK (0x000003FF) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT (0) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10) +#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2BLND2_R_OFFSET (0x0128) + +/* PDP, GRPH2BLND2_R, GRPH2PIXDBL +*/ +#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK (0x80000000) +#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK (0x00000001) +#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT (31) +#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH (1) +#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2BLND2_R, GRPH2LINDBL +*/ +#define PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK (0x20000000) +#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK (0x00000001) +#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT (29) +#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH (1) +#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R +*/ +#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK (0x000003FF) +#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT (0) +#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH (10) +#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2BLND2_GB_OFFSET (0x012C) + +/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G +*/ +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK (0x03FF0000) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT (16) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B +*/ +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK (0x000003FF) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT (0) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10) +#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3BLND2_R_OFFSET (0x0130) + +/* PDP, GRPH3BLND2_R, GRPH3PIXDBL +*/ +#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK (0x80000000) +#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK (0x00000001) +#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT (31) +#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH (1) +#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3BLND2_R, GRPH3LINDBL +*/ +#define PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK (0x20000000) +#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK (0x00000001) +#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT (29) +#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH (1) +#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R +*/ +#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK (0x000003FF) +#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT (0) +#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH (10) +#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3BLND2_GB_OFFSET (0x0134) + +/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G +*/ +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK (0x03FF0000) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT (16) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B +*/ +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK (0x000003FF) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT (0) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10) +#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4BLND2_R_OFFSET (0x0138) + +/* PDP, GRPH4BLND2_R, GRPH4PIXDBL +*/ +#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK (0x80000000) +#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK (0x00000001) +#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT (31) +#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH (1) +#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4BLND2_R, GRPH4LINDBL +*/ +#define PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK (0x20000000) +#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK (0x00000001) +#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT (29) +#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH (1) +#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R +*/ +#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK (0x000003FF) +#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT (0) +#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH (10) +#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4BLND2_GB_OFFSET (0x013C) + +/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G +*/ +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK (0x03FF0000) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT (16) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B +*/ +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK (0x000003FF) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT (0) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10) +#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1BLND2_R_OFFSET (0x0140) + +/* PDP, VID1BLND2_R, VID1CKEYMASK_R +*/ +#define PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK (0x000003FF) +#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT (0) +#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH (10) +#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1BLND2_GB_OFFSET (0x0144) + +/* PDP, VID1BLND2_GB, VID1CKEYMASK_G +*/ +#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK (0x03FF0000) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT (16) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH (10) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1BLND2_GB, VID1CKEYMASK_B +*/ +#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK (0x000003FF) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT (0) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH (10) +#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2BLND2_R_OFFSET (0x0148) + +/* PDP, VID2BLND2_R, VID2CKEYMASK_R +*/ +#define PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK (0x000003FF) +#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT (0) +#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH (10) +#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2BLND2_GB_OFFSET (0x014C) + +/* PDP, VID2BLND2_GB, VID2CKEYMASK_G +*/ +#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK (0x03FF0000) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT (16) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH (10) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2BLND2_GB, VID2CKEYMASK_B +*/ +#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK (0x000003FF) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT (0) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH (10) +#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3BLND2_R_OFFSET (0x0150) + +/* PDP, VID3BLND2_R, VID3CKEYMASK_R +*/ +#define PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK (0x000003FF) +#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT (0) +#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH (10) +#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3BLND2_GB_OFFSET (0x0154) + +/* PDP, VID3BLND2_GB, VID3CKEYMASK_G +*/ +#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK (0x03FF0000) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT (16) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH (10) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3BLND2_GB, VID3CKEYMASK_B +*/ +#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK (0x000003FF) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT (0) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH (10) +#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4BLND2_R_OFFSET (0x0158) + +/* PDP, VID4BLND2_R, VID4CKEYMASK_R +*/ +#define PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK (0x000003FF) +#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK (0x000003FF) +#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT (0) +#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH (10) +#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4BLND2_GB_OFFSET (0x015C) + +/* PDP, VID4BLND2_GB, VID4CKEYMASK_G +*/ +#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK (0x03FF0000) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK (0x000003FF) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT (16) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH (10) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4BLND2_GB, VID4CKEYMASK_B +*/ +#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK (0x000003FF) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK (0x000003FF) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT (0) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH (10) +#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1INTERLEAVE_CTRL_OFFSET (0x0160) + +/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD +*/ +#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK (0x00000001) +#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001) +#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0) +#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1) +#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2INTERLEAVE_CTRL_OFFSET (0x0164) + +/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD +*/ +#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001) +#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001) +#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0) +#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1) +#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3INTERLEAVE_CTRL_OFFSET (0x0168) + +/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD +*/ +#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001) +#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001) +#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0) +#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1) +#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4INTERLEAVE_CTRL_OFFSET (0x016C) + +/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD +*/ +#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001) +#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001) +#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0) +#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1) +#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1INTERLEAVE_CTRL_OFFSET (0x0170) + +/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD +*/ +#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001) +#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001) +#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0) +#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1) +#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2INTERLEAVE_CTRL_OFFSET (0x0174) + +/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD +*/ +#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001) +#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001) +#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0) +#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1) +#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3INTERLEAVE_CTRL_OFFSET (0x0178) + +/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD +*/ +#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001) +#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001) +#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0) +#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1) +#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4INTERLEAVE_CTRL_OFFSET (0x017C) + +/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD +*/ +#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001) +#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001) +#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0) +#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1) +#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1BASEADDR_OFFSET (0x0180) + +/* PDP, GRPH1BASEADDR, GRPH1BASEADDR +*/ +#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK (0xFFFFFFE0) +#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT (5) +#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH (27) +#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2BASEADDR_OFFSET (0x0184) + +/* PDP, GRPH2BASEADDR, GRPH2BASEADDR +*/ +#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK (0xFFFFFFE0) +#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT (5) +#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH (27) +#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3BASEADDR_OFFSET (0x0188) + +/* PDP, GRPH3BASEADDR, GRPH3BASEADDR +*/ +#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK (0xFFFFFFE0) +#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT (5) +#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH (27) +#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4BASEADDR_OFFSET (0x018C) + +/* PDP, GRPH4BASEADDR, GRPH4BASEADDR +*/ +#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK (0xFFFFFFE0) +#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT (5) +#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH (27) +#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1BASEADDR_OFFSET (0x0190) + +/* PDP, VID1BASEADDR, VID1BASEADDR +*/ +#define PDP_VID1BASEADDR_VID1BASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID1BASEADDR_VID1BASEADDR_SHIFT (5) +#define PDP_VID1BASEADDR_VID1BASEADDR_LENGTH (27) +#define PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2BASEADDR_OFFSET (0x0194) + +/* PDP, VID2BASEADDR, VID2BASEADDR +*/ +#define PDP_VID2BASEADDR_VID2BASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID2BASEADDR_VID2BASEADDR_SHIFT (5) +#define PDP_VID2BASEADDR_VID2BASEADDR_LENGTH (27) +#define PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3BASEADDR_OFFSET (0x0198) + +/* PDP, VID3BASEADDR, VID3BASEADDR +*/ +#define PDP_VID3BASEADDR_VID3BASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID3BASEADDR_VID3BASEADDR_SHIFT (5) +#define PDP_VID3BASEADDR_VID3BASEADDR_LENGTH (27) +#define PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4BASEADDR_OFFSET (0x019C) + +/* PDP, VID4BASEADDR, VID4BASEADDR +*/ +#define PDP_VID4BASEADDR_VID4BASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID4BASEADDR_VID4BASEADDR_SHIFT (5) +#define PDP_VID4BASEADDR_VID4BASEADDR_LENGTH (27) +#define PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1UBASEADDR_OFFSET (0x01B0) + +/* PDP, VID1UBASEADDR, VID1UBASEADDR +*/ +#define PDP_VID1UBASEADDR_VID1UBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT (5) +#define PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH (27) +#define PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2UBASEADDR_OFFSET (0x01B4) + +/* PDP, VID2UBASEADDR, VID2UBASEADDR +*/ +#define PDP_VID2UBASEADDR_VID2UBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT (5) +#define PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH (27) +#define PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3UBASEADDR_OFFSET (0x01B8) + +/* PDP, VID3UBASEADDR, VID3UBASEADDR +*/ +#define PDP_VID3UBASEADDR_VID3UBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT (5) +#define PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH (27) +#define PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4UBASEADDR_OFFSET (0x01BC) + +/* PDP, VID4UBASEADDR, VID4UBASEADDR +*/ +#define PDP_VID4UBASEADDR_VID4UBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT (5) +#define PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH (27) +#define PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VBASEADDR_OFFSET (0x01D0) + +/* PDP, VID1VBASEADDR, VID1VBASEADDR +*/ +#define PDP_VID1VBASEADDR_VID1VBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT (5) +#define PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH (27) +#define PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VBASEADDR_OFFSET (0x01D4) + +/* PDP, VID2VBASEADDR, VID2VBASEADDR +*/ +#define PDP_VID2VBASEADDR_VID2VBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT (5) +#define PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH (27) +#define PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VBASEADDR_OFFSET (0x01D8) + +/* PDP, VID3VBASEADDR, VID3VBASEADDR +*/ +#define PDP_VID3VBASEADDR_VID3VBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT (5) +#define PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH (27) +#define PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VBASEADDR_OFFSET (0x01DC) + +/* PDP, VID4VBASEADDR, VID4VBASEADDR +*/ +#define PDP_VID4VBASEADDR_VID4VBASEADDR_MASK (0xFFFFFFE0) +#define PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK (0x07FFFFFF) +#define PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT (5) +#define PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH (27) +#define PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1POSTSKIPCTRL_OFFSET (0x0230) + +/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP +*/ +#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK (0x007F0000) +#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK (0x0000007F) +#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT (16) +#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH (7) +#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP +*/ +#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK (0x0000003F) +#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK (0x0000003F) +#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT (0) +#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH (6) +#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2POSTSKIPCTRL_OFFSET (0x0234) + +/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP +*/ +#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK (0x007F0000) +#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK (0x0000007F) +#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT (16) +#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH (7) +#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP +*/ +#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK (0x0000003F) +#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK (0x0000003F) +#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT (0) +#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH (6) +#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3POSTSKIPCTRL_OFFSET (0x0238) + +/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP +*/ +#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK (0x007F0000) +#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK (0x0000007F) +#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT (16) +#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH (7) +#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP +*/ +#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK (0x0000003F) +#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK (0x0000003F) +#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT (0) +#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH (6) +#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4POSTSKIPCTRL_OFFSET (0x023C) + +/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP +*/ +#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK (0x007F0000) +#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK (0x0000007F) +#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT (16) +#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH (7) +#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP +*/ +#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK (0x0000003F) +#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK (0x0000003F) +#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT (0) +#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH (6) +#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1DECIMATE_CTRL_OFFSET (0x0240) + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE +*/ +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN +*/ +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK (0x00000001) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT (0) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH (1) +#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2DECIMATE_CTRL_OFFSET (0x0244) + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE +*/ +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN +*/ +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK (0x00000001) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT (0) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH (1) +#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3DECIMATE_CTRL_OFFSET (0x0248) + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE +*/ +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN +*/ +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK (0x00000001) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT (0) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH (1) +#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4DECIMATE_CTRL_OFFSET (0x024C) + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE +*/ +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN +*/ +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK (0x00000001) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT (0) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH (1) +#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1DECIMATE_CTRL_OFFSET (0x0250) + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE +*/ +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN +*/ +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK (0x00000001) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT (0) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH (1) +#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2DECIMATE_CTRL_OFFSET (0x0254) + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE +*/ +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN +*/ +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK (0x00000001) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT (0) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH (1) +#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3DECIMATE_CTRL_OFFSET (0x0258) + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE +*/ +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN +*/ +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK (0x00000001) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT (0) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH (1) +#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4DECIMATE_CTRL_OFFSET (0x025C) + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT +*/ +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE +*/ +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE +*/ +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK (0x00000004) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT (2) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH (1) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN +*/ +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK (0x00000001) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK (0x00000001) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT (0) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH (1) +#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1SKIPCTRL_OFFSET (0x0270) + +/* PDP, VID1SKIPCTRL, VID1HSKIP +*/ +#define PDP_VID1SKIPCTRL_VID1HSKIP_MASK (0x0FFF0000) +#define PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK (0x00000FFF) +#define PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT (16) +#define PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH (12) +#define PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SKIPCTRL, VID1VSKIP +*/ +#define PDP_VID1SKIPCTRL_VID1VSKIP_MASK (0x00000FFF) +#define PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK (0x00000FFF) +#define PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT (0) +#define PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH (12) +#define PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2SKIPCTRL_OFFSET (0x0274) + +/* PDP, VID2SKIPCTRL, VID2HSKIP +*/ +#define PDP_VID2SKIPCTRL_VID2HSKIP_MASK (0x0FFF0000) +#define PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK (0x00000FFF) +#define PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT (16) +#define PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH (12) +#define PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SKIPCTRL, VID2VSKIP +*/ +#define PDP_VID2SKIPCTRL_VID2VSKIP_MASK (0x00000FFF) +#define PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK (0x00000FFF) +#define PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT (0) +#define PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH (12) +#define PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3SKIPCTRL_OFFSET (0x0278) + +/* PDP, VID3SKIPCTRL, VID3HSKIP +*/ +#define PDP_VID3SKIPCTRL_VID3HSKIP_MASK (0x0FFF0000) +#define PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK (0x00000FFF) +#define PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT (16) +#define PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH (12) +#define PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SKIPCTRL, VID3VSKIP +*/ +#define PDP_VID3SKIPCTRL_VID3VSKIP_MASK (0x00000FFF) +#define PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK (0x00000FFF) +#define PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT (0) +#define PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH (12) +#define PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4SKIPCTRL_OFFSET (0x027C) + +/* PDP, VID4SKIPCTRL, VID4HSKIP +*/ +#define PDP_VID4SKIPCTRL_VID4HSKIP_MASK (0x0FFF0000) +#define PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK (0x00000FFF) +#define PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT (16) +#define PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH (12) +#define PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SKIPCTRL, VID4VSKIP +*/ +#define PDP_VID4SKIPCTRL_VID4VSKIP_MASK (0x00000FFF) +#define PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK (0x00000FFF) +#define PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT (0) +#define PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH (12) +#define PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1SCALECTRL_OFFSET (0x0460) + +/* PDP, VID1SCALECTRL, VID1HSCALEBP +*/ +#define PDP_VID1SCALECTRL_VID1HSCALEBP_MASK (0x80000000) +#define PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK (0x00000001) +#define PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT (31) +#define PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH (1) +#define PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VSCALEBP +*/ +#define PDP_VID1SCALECTRL_VID1VSCALEBP_MASK (0x40000000) +#define PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK (0x00000001) +#define PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT (30) +#define PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH (1) +#define PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1HSBEFOREVS +*/ +#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK (0x20000000) +#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK (0x00000001) +#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT (29) +#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH (1) +#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VSURUNCTRL +*/ +#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK (0x08000000) +#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK (0x00000001) +#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT (27) +#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH (1) +#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1PAN_EN +*/ +#define PDP_VID1SCALECTRL_VID1PAN_EN_MASK (0x00040000) +#define PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK (0x00000001) +#define PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT (18) +#define PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH (1) +#define PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VORDER +*/ +#define PDP_VID1SCALECTRL_VID1VORDER_MASK (0x00030000) +#define PDP_VID1SCALECTRL_VID1VORDER_LSBMASK (0x00000003) +#define PDP_VID1SCALECTRL_VID1VORDER_SHIFT (16) +#define PDP_VID1SCALECTRL_VID1VORDER_LENGTH (2) +#define PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALECTRL, VID1VPITCH +*/ +#define PDP_VID1SCALECTRL_VID1VPITCH_MASK (0x0000FFFF) +#define PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID1SCALECTRL_VID1VPITCH_SHIFT (0) +#define PDP_VID1SCALECTRL_VID1VPITCH_LENGTH (16) +#define PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VSINIT_OFFSET (0x0464) + +/* PDP, VID1VSINIT, VID1VINITIAL1 +*/ +#define PDP_VID1VSINIT_VID1VINITIAL1_MASK (0xFFFF0000) +#define PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK (0x0000FFFF) +#define PDP_VID1VSINIT_VID1VINITIAL1_SHIFT (16) +#define PDP_VID1VSINIT_VID1VINITIAL1_LENGTH (16) +#define PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1VSINIT, VID1VINITIAL0 +*/ +#define PDP_VID1VSINIT_VID1VINITIAL0_MASK (0x0000FFFF) +#define PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK (0x0000FFFF) +#define PDP_VID1VSINIT_VID1VINITIAL0_SHIFT (0) +#define PDP_VID1VSINIT_VID1VINITIAL0_LENGTH (16) +#define PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF0_OFFSET (0x0468) + +/* PDP, VID1VCOEFF0, VID1VCOEFF0 +*/ +#define PDP_VID1VCOEFF0_VID1VCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT (0) +#define PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH (32) +#define PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF1_OFFSET (0x046C) + +/* PDP, VID1VCOEFF1, VID1VCOEFF1 +*/ +#define PDP_VID1VCOEFF1_VID1VCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT (0) +#define PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH (32) +#define PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF2_OFFSET (0x0470) + +/* PDP, VID1VCOEFF2, VID1VCOEFF2 +*/ +#define PDP_VID1VCOEFF2_VID1VCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT (0) +#define PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH (32) +#define PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF3_OFFSET (0x0474) + +/* PDP, VID1VCOEFF3, VID1VCOEFF3 +*/ +#define PDP_VID1VCOEFF3_VID1VCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT (0) +#define PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH (32) +#define PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF4_OFFSET (0x0478) + +/* PDP, VID1VCOEFF4, VID1VCOEFF4 +*/ +#define PDP_VID1VCOEFF4_VID1VCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT (0) +#define PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH (32) +#define PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF5_OFFSET (0x047C) + +/* PDP, VID1VCOEFF5, VID1VCOEFF5 +*/ +#define PDP_VID1VCOEFF5_VID1VCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT (0) +#define PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH (32) +#define PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF6_OFFSET (0x0480) + +/* PDP, VID1VCOEFF6, VID1VCOEFF6 +*/ +#define PDP_VID1VCOEFF6_VID1VCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT (0) +#define PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH (32) +#define PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF7_OFFSET (0x0484) + +/* PDP, VID1VCOEFF7, VID1VCOEFF7 +*/ +#define PDP_VID1VCOEFF7_VID1VCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT (0) +#define PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH (32) +#define PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1VCOEFF8_OFFSET (0x0488) + +/* PDP, VID1VCOEFF8, VID1VCOEFF8 +*/ +#define PDP_VID1VCOEFF8_VID1VCOEFF8_MASK (0x000000FF) +#define PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK (0x000000FF) +#define PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT (0) +#define PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH (8) +#define PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HSINIT_OFFSET (0x048C) + +/* PDP, VID1HSINIT, VID1HINITIAL +*/ +#define PDP_VID1HSINIT_VID1HINITIAL_MASK (0xFFFF0000) +#define PDP_VID1HSINIT_VID1HINITIAL_LSBMASK (0x0000FFFF) +#define PDP_VID1HSINIT_VID1HINITIAL_SHIFT (16) +#define PDP_VID1HSINIT_VID1HINITIAL_LENGTH (16) +#define PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1HSINIT, VID1HPITCH +*/ +#define PDP_VID1HSINIT_VID1HPITCH_MASK (0x0000FFFF) +#define PDP_VID1HSINIT_VID1HPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID1HSINIT_VID1HPITCH_SHIFT (0) +#define PDP_VID1HSINIT_VID1HPITCH_LENGTH (16) +#define PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF0_OFFSET (0x0490) + +/* PDP, VID1HCOEFF0, VID1HCOEFF0 +*/ +#define PDP_VID1HCOEFF0_VID1HCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT (0) +#define PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH (32) +#define PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF1_OFFSET (0x0494) + +/* PDP, VID1HCOEFF1, VID1HCOEFF1 +*/ +#define PDP_VID1HCOEFF1_VID1HCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT (0) +#define PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH (32) +#define PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF2_OFFSET (0x0498) + +/* PDP, VID1HCOEFF2, VID1HCOEFF2 +*/ +#define PDP_VID1HCOEFF2_VID1HCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT (0) +#define PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH (32) +#define PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF3_OFFSET (0x049C) + +/* PDP, VID1HCOEFF3, VID1HCOEFF3 +*/ +#define PDP_VID1HCOEFF3_VID1HCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT (0) +#define PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH (32) +#define PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF4_OFFSET (0x04A0) + +/* PDP, VID1HCOEFF4, VID1HCOEFF4 +*/ +#define PDP_VID1HCOEFF4_VID1HCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT (0) +#define PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH (32) +#define PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF5_OFFSET (0x04A4) + +/* PDP, VID1HCOEFF5, VID1HCOEFF5 +*/ +#define PDP_VID1HCOEFF5_VID1HCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT (0) +#define PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH (32) +#define PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF6_OFFSET (0x04A8) + +/* PDP, VID1HCOEFF6, VID1HCOEFF6 +*/ +#define PDP_VID1HCOEFF6_VID1HCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT (0) +#define PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH (32) +#define PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF7_OFFSET (0x04AC) + +/* PDP, VID1HCOEFF7, VID1HCOEFF7 +*/ +#define PDP_VID1HCOEFF7_VID1HCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT (0) +#define PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH (32) +#define PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF8_OFFSET (0x04B0) + +/* PDP, VID1HCOEFF8, VID1HCOEFF8 +*/ +#define PDP_VID1HCOEFF8_VID1HCOEFF8_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT (0) +#define PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH (32) +#define PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF9_OFFSET (0x04B4) + +/* PDP, VID1HCOEFF9, VID1HCOEFF9 +*/ +#define PDP_VID1HCOEFF9_VID1HCOEFF9_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT (0) +#define PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH (32) +#define PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF10_OFFSET (0x04B8) + +/* PDP, VID1HCOEFF10, VID1HCOEFF10 +*/ +#define PDP_VID1HCOEFF10_VID1HCOEFF10_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT (0) +#define PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH (32) +#define PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF11_OFFSET (0x04BC) + +/* PDP, VID1HCOEFF11, VID1HCOEFF11 +*/ +#define PDP_VID1HCOEFF11_VID1HCOEFF11_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT (0) +#define PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH (32) +#define PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF12_OFFSET (0x04C0) + +/* PDP, VID1HCOEFF12, VID1HCOEFF12 +*/ +#define PDP_VID1HCOEFF12_VID1HCOEFF12_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT (0) +#define PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH (32) +#define PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF13_OFFSET (0x04C4) + +/* PDP, VID1HCOEFF13, VID1HCOEFF13 +*/ +#define PDP_VID1HCOEFF13_VID1HCOEFF13_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT (0) +#define PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH (32) +#define PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF14_OFFSET (0x04C8) + +/* PDP, VID1HCOEFF14, VID1HCOEFF14 +*/ +#define PDP_VID1HCOEFF14_VID1HCOEFF14_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT (0) +#define PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH (32) +#define PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF15_OFFSET (0x04CC) + +/* PDP, VID1HCOEFF15, VID1HCOEFF15 +*/ +#define PDP_VID1HCOEFF15_VID1HCOEFF15_MASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK (0xFFFFFFFF) +#define PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT (0) +#define PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH (32) +#define PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1HCOEFF16_OFFSET (0x04D0) + +/* PDP, VID1HCOEFF16, VID1HCOEFF16 +*/ +#define PDP_VID1HCOEFF16_VID1HCOEFF16_MASK (0x000000FF) +#define PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK (0x000000FF) +#define PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT (0) +#define PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH (8) +#define PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1SCALESIZE_OFFSET (0x04D4) + +/* PDP, VID1SCALESIZE, VID1SCALEWIDTH +*/ +#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK (0x0FFF0000) +#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK (0x00000FFF) +#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT (16) +#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH (12) +#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT +*/ +#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK (0x00000FFF) +#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT (0) +#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH (12) +#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_CORE_ID_OFFSET (0x04E0) + +/* PDP, PVR_PDP_CORE_ID, GROUP_ID +*/ +#define PDP_CORE_ID_GROUP_ID_MASK (0xFF000000) +#define PDP_CORE_ID_GROUP_ID_LSBMASK (0x000000FF) +#define PDP_CORE_ID_GROUP_ID_SHIFT (24) +#define PDP_CORE_ID_GROUP_ID_LENGTH (8) +#define PDP_CORE_ID_GROUP_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_PDP_CORE_ID, CORE_ID +*/ +#define PDP_CORE_ID_CORE_ID_MASK (0x00FF0000) +#define PDP_CORE_ID_CORE_ID_LSBMASK (0x000000FF) +#define PDP_CORE_ID_CORE_ID_SHIFT (16) +#define PDP_CORE_ID_CORE_ID_LENGTH (8) +#define PDP_CORE_ID_CORE_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_PDP_CORE_ID, CONFIG_ID +*/ +#define PDP_CORE_ID_CONFIG_ID_MASK (0x0000FFFF) +#define PDP_CORE_ID_CONFIG_ID_LSBMASK (0x0000FFFF) +#define PDP_CORE_ID_CONFIG_ID_SHIFT (0) +#define PDP_CORE_ID_CONFIG_ID_LENGTH (16) +#define PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE + +#define PDP_CORE_REV_OFFSET (0x04F0) + +/* PDP, PVR_PDP_CORE_REV, MAJOR_REV +*/ +#define PDP_CORE_REV_MAJOR_REV_MASK (0x00FF0000) +#define PDP_CORE_REV_MAJOR_REV_LSBMASK (0x000000FF) +#define PDP_CORE_REV_MAJOR_REV_SHIFT (16) +#define PDP_CORE_REV_MAJOR_REV_LENGTH (8) +#define PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_PDP_CORE_REV, MINOR_REV +*/ +#define PDP_CORE_REV_MINOR_REV_MASK (0x0000FF00) +#define PDP_CORE_REV_MINOR_REV_LSBMASK (0x000000FF) +#define PDP_CORE_REV_MINOR_REV_SHIFT (8) +#define PDP_CORE_REV_MINOR_REV_LENGTH (8) +#define PDP_CORE_REV_MINOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_PDP_CORE_REV, MAINT_REV +*/ +#define PDP_CORE_REV_MAINT_REV_MASK (0x000000FF) +#define PDP_CORE_REV_MAINT_REV_LSBMASK (0x000000FF) +#define PDP_CORE_REV_MAINT_REV_SHIFT (0) +#define PDP_CORE_REV_MAINT_REV_LENGTH (8) +#define PDP_CORE_REV_MAINT_REV_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2SCALECTRL_OFFSET (0x0500) + +/* PDP, VID2SCALECTRL, VID2HSCALEBP +*/ +#define PDP_VID2SCALECTRL_VID2HSCALEBP_MASK (0x80000000) +#define PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK (0x00000001) +#define PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT (31) +#define PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH (1) +#define PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VSCALEBP +*/ +#define PDP_VID2SCALECTRL_VID2VSCALEBP_MASK (0x40000000) +#define PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK (0x00000001) +#define PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT (30) +#define PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH (1) +#define PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2HSBEFOREVS +*/ +#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK (0x20000000) +#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK (0x00000001) +#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT (29) +#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH (1) +#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VSURUNCTRL +*/ +#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK (0x08000000) +#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK (0x00000001) +#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT (27) +#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH (1) +#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2PAN_EN +*/ +#define PDP_VID2SCALECTRL_VID2PAN_EN_MASK (0x00040000) +#define PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK (0x00000001) +#define PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT (18) +#define PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH (1) +#define PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VORDER +*/ +#define PDP_VID2SCALECTRL_VID2VORDER_MASK (0x00030000) +#define PDP_VID2SCALECTRL_VID2VORDER_LSBMASK (0x00000003) +#define PDP_VID2SCALECTRL_VID2VORDER_SHIFT (16) +#define PDP_VID2SCALECTRL_VID2VORDER_LENGTH (2) +#define PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALECTRL, VID2VPITCH +*/ +#define PDP_VID2SCALECTRL_VID2VPITCH_MASK (0x0000FFFF) +#define PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID2SCALECTRL_VID2VPITCH_SHIFT (0) +#define PDP_VID2SCALECTRL_VID2VPITCH_LENGTH (16) +#define PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VSINIT_OFFSET (0x0504) + +/* PDP, VID2VSINIT, VID2VINITIAL1 +*/ +#define PDP_VID2VSINIT_VID2VINITIAL1_MASK (0xFFFF0000) +#define PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK (0x0000FFFF) +#define PDP_VID2VSINIT_VID2VINITIAL1_SHIFT (16) +#define PDP_VID2VSINIT_VID2VINITIAL1_LENGTH (16) +#define PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2VSINIT, VID2VINITIAL0 +*/ +#define PDP_VID2VSINIT_VID2VINITIAL0_MASK (0x0000FFFF) +#define PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK (0x0000FFFF) +#define PDP_VID2VSINIT_VID2VINITIAL0_SHIFT (0) +#define PDP_VID2VSINIT_VID2VINITIAL0_LENGTH (16) +#define PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF0_OFFSET (0x0508) + +/* PDP, VID2VCOEFF0, VID2VCOEFF0 +*/ +#define PDP_VID2VCOEFF0_VID2VCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT (0) +#define PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH (32) +#define PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF1_OFFSET (0x050C) + +/* PDP, VID2VCOEFF1, VID2VCOEFF1 +*/ +#define PDP_VID2VCOEFF1_VID2VCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT (0) +#define PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH (32) +#define PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF2_OFFSET (0x0510) + +/* PDP, VID2VCOEFF2, VID2VCOEFF2 +*/ +#define PDP_VID2VCOEFF2_VID2VCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT (0) +#define PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH (32) +#define PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF3_OFFSET (0x0514) + +/* PDP, VID2VCOEFF3, VID2VCOEFF3 +*/ +#define PDP_VID2VCOEFF3_VID2VCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT (0) +#define PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH (32) +#define PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF4_OFFSET (0x0518) + +/* PDP, VID2VCOEFF4, VID2VCOEFF4 +*/ +#define PDP_VID2VCOEFF4_VID2VCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT (0) +#define PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH (32) +#define PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF5_OFFSET (0x051C) + +/* PDP, VID2VCOEFF5, VID2VCOEFF5 +*/ +#define PDP_VID2VCOEFF5_VID2VCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT (0) +#define PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH (32) +#define PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF6_OFFSET (0x0520) + +/* PDP, VID2VCOEFF6, VID2VCOEFF6 +*/ +#define PDP_VID2VCOEFF6_VID2VCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT (0) +#define PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH (32) +#define PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF7_OFFSET (0x0524) + +/* PDP, VID2VCOEFF7, VID2VCOEFF7 +*/ +#define PDP_VID2VCOEFF7_VID2VCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT (0) +#define PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH (32) +#define PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2VCOEFF8_OFFSET (0x0528) + +/* PDP, VID2VCOEFF8, VID2VCOEFF8 +*/ +#define PDP_VID2VCOEFF8_VID2VCOEFF8_MASK (0x000000FF) +#define PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK (0x000000FF) +#define PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT (0) +#define PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH (8) +#define PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HSINIT_OFFSET (0x052C) + +/* PDP, VID2HSINIT, VID2HINITIAL +*/ +#define PDP_VID2HSINIT_VID2HINITIAL_MASK (0xFFFF0000) +#define PDP_VID2HSINIT_VID2HINITIAL_LSBMASK (0x0000FFFF) +#define PDP_VID2HSINIT_VID2HINITIAL_SHIFT (16) +#define PDP_VID2HSINIT_VID2HINITIAL_LENGTH (16) +#define PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2HSINIT, VID2HPITCH +*/ +#define PDP_VID2HSINIT_VID2HPITCH_MASK (0x0000FFFF) +#define PDP_VID2HSINIT_VID2HPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID2HSINIT_VID2HPITCH_SHIFT (0) +#define PDP_VID2HSINIT_VID2HPITCH_LENGTH (16) +#define PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF0_OFFSET (0x0530) + +/* PDP, VID2HCOEFF0, VID2HCOEFF0 +*/ +#define PDP_VID2HCOEFF0_VID2HCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT (0) +#define PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH (32) +#define PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF1_OFFSET (0x0534) + +/* PDP, VID2HCOEFF1, VID2HCOEFF1 +*/ +#define PDP_VID2HCOEFF1_VID2HCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT (0) +#define PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH (32) +#define PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF2_OFFSET (0x0538) + +/* PDP, VID2HCOEFF2, VID2HCOEFF2 +*/ +#define PDP_VID2HCOEFF2_VID2HCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT (0) +#define PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH (32) +#define PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF3_OFFSET (0x053C) + +/* PDP, VID2HCOEFF3, VID2HCOEFF3 +*/ +#define PDP_VID2HCOEFF3_VID2HCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT (0) +#define PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH (32) +#define PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF4_OFFSET (0x0540) + +/* PDP, VID2HCOEFF4, VID2HCOEFF4 +*/ +#define PDP_VID2HCOEFF4_VID2HCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT (0) +#define PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH (32) +#define PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF5_OFFSET (0x0544) + +/* PDP, VID2HCOEFF5, VID2HCOEFF5 +*/ +#define PDP_VID2HCOEFF5_VID2HCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT (0) +#define PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH (32) +#define PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF6_OFFSET (0x0548) + +/* PDP, VID2HCOEFF6, VID2HCOEFF6 +*/ +#define PDP_VID2HCOEFF6_VID2HCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT (0) +#define PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH (32) +#define PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF7_OFFSET (0x054C) + +/* PDP, VID2HCOEFF7, VID2HCOEFF7 +*/ +#define PDP_VID2HCOEFF7_VID2HCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT (0) +#define PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH (32) +#define PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF8_OFFSET (0x0550) + +/* PDP, VID2HCOEFF8, VID2HCOEFF8 +*/ +#define PDP_VID2HCOEFF8_VID2HCOEFF8_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT (0) +#define PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH (32) +#define PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF9_OFFSET (0x0554) + +/* PDP, VID2HCOEFF9, VID2HCOEFF9 +*/ +#define PDP_VID2HCOEFF9_VID2HCOEFF9_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT (0) +#define PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH (32) +#define PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF10_OFFSET (0x0558) + +/* PDP, VID2HCOEFF10, VID2HCOEFF10 +*/ +#define PDP_VID2HCOEFF10_VID2HCOEFF10_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT (0) +#define PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH (32) +#define PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF11_OFFSET (0x055C) + +/* PDP, VID2HCOEFF11, VID2HCOEFF11 +*/ +#define PDP_VID2HCOEFF11_VID2HCOEFF11_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT (0) +#define PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH (32) +#define PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF12_OFFSET (0x0560) + +/* PDP, VID2HCOEFF12, VID2HCOEFF12 +*/ +#define PDP_VID2HCOEFF12_VID2HCOEFF12_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT (0) +#define PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH (32) +#define PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF13_OFFSET (0x0564) + +/* PDP, VID2HCOEFF13, VID2HCOEFF13 +*/ +#define PDP_VID2HCOEFF13_VID2HCOEFF13_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT (0) +#define PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH (32) +#define PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF14_OFFSET (0x0568) + +/* PDP, VID2HCOEFF14, VID2HCOEFF14 +*/ +#define PDP_VID2HCOEFF14_VID2HCOEFF14_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT (0) +#define PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH (32) +#define PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF15_OFFSET (0x056C) + +/* PDP, VID2HCOEFF15, VID2HCOEFF15 +*/ +#define PDP_VID2HCOEFF15_VID2HCOEFF15_MASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK (0xFFFFFFFF) +#define PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT (0) +#define PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH (32) +#define PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2HCOEFF16_OFFSET (0x0570) + +/* PDP, VID2HCOEFF16, VID2HCOEFF16 +*/ +#define PDP_VID2HCOEFF16_VID2HCOEFF16_MASK (0x000000FF) +#define PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK (0x000000FF) +#define PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT (0) +#define PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH (8) +#define PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2SCALESIZE_OFFSET (0x0574) + +/* PDP, VID2SCALESIZE, VID2SCALEWIDTH +*/ +#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK (0x0FFF0000) +#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK (0x00000FFF) +#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT (16) +#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH (12) +#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT +*/ +#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK (0x00000FFF) +#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT (0) +#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH (12) +#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3SCALECTRL_OFFSET (0x0578) + +/* PDP, VID3SCALECTRL, VID3HSCALEBP +*/ +#define PDP_VID3SCALECTRL_VID3HSCALEBP_MASK (0x80000000) +#define PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK (0x00000001) +#define PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT (31) +#define PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH (1) +#define PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VSCALEBP +*/ +#define PDP_VID3SCALECTRL_VID3VSCALEBP_MASK (0x40000000) +#define PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK (0x00000001) +#define PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT (30) +#define PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH (1) +#define PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3HSBEFOREVS +*/ +#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK (0x20000000) +#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK (0x00000001) +#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT (29) +#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH (1) +#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VSURUNCTRL +*/ +#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK (0x08000000) +#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK (0x00000001) +#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT (27) +#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH (1) +#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3PAN_EN +*/ +#define PDP_VID3SCALECTRL_VID3PAN_EN_MASK (0x00040000) +#define PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK (0x00000001) +#define PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT (18) +#define PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH (1) +#define PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VORDER +*/ +#define PDP_VID3SCALECTRL_VID3VORDER_MASK (0x00030000) +#define PDP_VID3SCALECTRL_VID3VORDER_LSBMASK (0x00000003) +#define PDP_VID3SCALECTRL_VID3VORDER_SHIFT (16) +#define PDP_VID3SCALECTRL_VID3VORDER_LENGTH (2) +#define PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALECTRL, VID3VPITCH +*/ +#define PDP_VID3SCALECTRL_VID3VPITCH_MASK (0x0000FFFF) +#define PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID3SCALECTRL_VID3VPITCH_SHIFT (0) +#define PDP_VID3SCALECTRL_VID3VPITCH_LENGTH (16) +#define PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VSINIT_OFFSET (0x057C) + +/* PDP, VID3VSINIT, VID3VINITIAL1 +*/ +#define PDP_VID3VSINIT_VID3VINITIAL1_MASK (0xFFFF0000) +#define PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK (0x0000FFFF) +#define PDP_VID3VSINIT_VID3VINITIAL1_SHIFT (16) +#define PDP_VID3VSINIT_VID3VINITIAL1_LENGTH (16) +#define PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3VSINIT, VID3VINITIAL0 +*/ +#define PDP_VID3VSINIT_VID3VINITIAL0_MASK (0x0000FFFF) +#define PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK (0x0000FFFF) +#define PDP_VID3VSINIT_VID3VINITIAL0_SHIFT (0) +#define PDP_VID3VSINIT_VID3VINITIAL0_LENGTH (16) +#define PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF0_OFFSET (0x0580) + +/* PDP, VID3VCOEFF0, VID3VCOEFF0 +*/ +#define PDP_VID3VCOEFF0_VID3VCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT (0) +#define PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH (32) +#define PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF1_OFFSET (0x0584) + +/* PDP, VID3VCOEFF1, VID3VCOEFF1 +*/ +#define PDP_VID3VCOEFF1_VID3VCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT (0) +#define PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH (32) +#define PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF2_OFFSET (0x0588) + +/* PDP, VID3VCOEFF2, VID3VCOEFF2 +*/ +#define PDP_VID3VCOEFF2_VID3VCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT (0) +#define PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH (32) +#define PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF3_OFFSET (0x058C) + +/* PDP, VID3VCOEFF3, VID3VCOEFF3 +*/ +#define PDP_VID3VCOEFF3_VID3VCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT (0) +#define PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH (32) +#define PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF4_OFFSET (0x0590) + +/* PDP, VID3VCOEFF4, VID3VCOEFF4 +*/ +#define PDP_VID3VCOEFF4_VID3VCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT (0) +#define PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH (32) +#define PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF5_OFFSET (0x0594) + +/* PDP, VID3VCOEFF5, VID3VCOEFF5 +*/ +#define PDP_VID3VCOEFF5_VID3VCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT (0) +#define PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH (32) +#define PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF6_OFFSET (0x0598) + +/* PDP, VID3VCOEFF6, VID3VCOEFF6 +*/ +#define PDP_VID3VCOEFF6_VID3VCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT (0) +#define PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH (32) +#define PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF7_OFFSET (0x059C) + +/* PDP, VID3VCOEFF7, VID3VCOEFF7 +*/ +#define PDP_VID3VCOEFF7_VID3VCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT (0) +#define PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH (32) +#define PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3VCOEFF8_OFFSET (0x05A0) + +/* PDP, VID3VCOEFF8, VID3VCOEFF8 +*/ +#define PDP_VID3VCOEFF8_VID3VCOEFF8_MASK (0x000000FF) +#define PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK (0x000000FF) +#define PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT (0) +#define PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH (8) +#define PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HSINIT_OFFSET (0x05A4) + +/* PDP, VID3HSINIT, VID3HINITIAL +*/ +#define PDP_VID3HSINIT_VID3HINITIAL_MASK (0xFFFF0000) +#define PDP_VID3HSINIT_VID3HINITIAL_LSBMASK (0x0000FFFF) +#define PDP_VID3HSINIT_VID3HINITIAL_SHIFT (16) +#define PDP_VID3HSINIT_VID3HINITIAL_LENGTH (16) +#define PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3HSINIT, VID3HPITCH +*/ +#define PDP_VID3HSINIT_VID3HPITCH_MASK (0x0000FFFF) +#define PDP_VID3HSINIT_VID3HPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID3HSINIT_VID3HPITCH_SHIFT (0) +#define PDP_VID3HSINIT_VID3HPITCH_LENGTH (16) +#define PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF0_OFFSET (0x05A8) + +/* PDP, VID3HCOEFF0, VID3HCOEFF0 +*/ +#define PDP_VID3HCOEFF0_VID3HCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT (0) +#define PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH (32) +#define PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF1_OFFSET (0x05AC) + +/* PDP, VID3HCOEFF1, VID3HCOEFF1 +*/ +#define PDP_VID3HCOEFF1_VID3HCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT (0) +#define PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH (32) +#define PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF2_OFFSET (0x05B0) + +/* PDP, VID3HCOEFF2, VID3HCOEFF2 +*/ +#define PDP_VID3HCOEFF2_VID3HCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT (0) +#define PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH (32) +#define PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF3_OFFSET (0x05B4) + +/* PDP, VID3HCOEFF3, VID3HCOEFF3 +*/ +#define PDP_VID3HCOEFF3_VID3HCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT (0) +#define PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH (32) +#define PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF4_OFFSET (0x05B8) + +/* PDP, VID3HCOEFF4, VID3HCOEFF4 +*/ +#define PDP_VID3HCOEFF4_VID3HCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT (0) +#define PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH (32) +#define PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF5_OFFSET (0x05BC) + +/* PDP, VID3HCOEFF5, VID3HCOEFF5 +*/ +#define PDP_VID3HCOEFF5_VID3HCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT (0) +#define PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH (32) +#define PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF6_OFFSET (0x05C0) + +/* PDP, VID3HCOEFF6, VID3HCOEFF6 +*/ +#define PDP_VID3HCOEFF6_VID3HCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT (0) +#define PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH (32) +#define PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF7_OFFSET (0x05C4) + +/* PDP, VID3HCOEFF7, VID3HCOEFF7 +*/ +#define PDP_VID3HCOEFF7_VID3HCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT (0) +#define PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH (32) +#define PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF8_OFFSET (0x05C8) + +/* PDP, VID3HCOEFF8, VID3HCOEFF8 +*/ +#define PDP_VID3HCOEFF8_VID3HCOEFF8_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT (0) +#define PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH (32) +#define PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF9_OFFSET (0x05CC) + +/* PDP, VID3HCOEFF9, VID3HCOEFF9 +*/ +#define PDP_VID3HCOEFF9_VID3HCOEFF9_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT (0) +#define PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH (32) +#define PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF10_OFFSET (0x05D0) + +/* PDP, VID3HCOEFF10, VID3HCOEFF10 +*/ +#define PDP_VID3HCOEFF10_VID3HCOEFF10_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT (0) +#define PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH (32) +#define PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF11_OFFSET (0x05D4) + +/* PDP, VID3HCOEFF11, VID3HCOEFF11 +*/ +#define PDP_VID3HCOEFF11_VID3HCOEFF11_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT (0) +#define PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH (32) +#define PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF12_OFFSET (0x05D8) + +/* PDP, VID3HCOEFF12, VID3HCOEFF12 +*/ +#define PDP_VID3HCOEFF12_VID3HCOEFF12_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT (0) +#define PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH (32) +#define PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF13_OFFSET (0x05DC) + +/* PDP, VID3HCOEFF13, VID3HCOEFF13 +*/ +#define PDP_VID3HCOEFF13_VID3HCOEFF13_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT (0) +#define PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH (32) +#define PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF14_OFFSET (0x05E0) + +/* PDP, VID3HCOEFF14, VID3HCOEFF14 +*/ +#define PDP_VID3HCOEFF14_VID3HCOEFF14_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT (0) +#define PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH (32) +#define PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF15_OFFSET (0x05E4) + +/* PDP, VID3HCOEFF15, VID3HCOEFF15 +*/ +#define PDP_VID3HCOEFF15_VID3HCOEFF15_MASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK (0xFFFFFFFF) +#define PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT (0) +#define PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH (32) +#define PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3HCOEFF16_OFFSET (0x05E8) + +/* PDP, VID3HCOEFF16, VID3HCOEFF16 +*/ +#define PDP_VID3HCOEFF16_VID3HCOEFF16_MASK (0x000000FF) +#define PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK (0x000000FF) +#define PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT (0) +#define PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH (8) +#define PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3SCALESIZE_OFFSET (0x05EC) + +/* PDP, VID3SCALESIZE, VID3SCALEWIDTH +*/ +#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK (0x0FFF0000) +#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK (0x00000FFF) +#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT (16) +#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH (12) +#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT +*/ +#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK (0x00000FFF) +#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT (0) +#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH (12) +#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4SCALECTRL_OFFSET (0x05F0) + +/* PDP, VID4SCALECTRL, VID4HSCALEBP +*/ +#define PDP_VID4SCALECTRL_VID4HSCALEBP_MASK (0x80000000) +#define PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK (0x00000001) +#define PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT (31) +#define PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH (1) +#define PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VSCALEBP +*/ +#define PDP_VID4SCALECTRL_VID4VSCALEBP_MASK (0x40000000) +#define PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK (0x00000001) +#define PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT (30) +#define PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH (1) +#define PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4HSBEFOREVS +*/ +#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK (0x20000000) +#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK (0x00000001) +#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT (29) +#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH (1) +#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VSURUNCTRL +*/ +#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK (0x08000000) +#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK (0x00000001) +#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT (27) +#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH (1) +#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4PAN_EN +*/ +#define PDP_VID4SCALECTRL_VID4PAN_EN_MASK (0x00040000) +#define PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK (0x00000001) +#define PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT (18) +#define PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH (1) +#define PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VORDER +*/ +#define PDP_VID4SCALECTRL_VID4VORDER_MASK (0x00030000) +#define PDP_VID4SCALECTRL_VID4VORDER_LSBMASK (0x00000003) +#define PDP_VID4SCALECTRL_VID4VORDER_SHIFT (16) +#define PDP_VID4SCALECTRL_VID4VORDER_LENGTH (2) +#define PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALECTRL, VID4VPITCH +*/ +#define PDP_VID4SCALECTRL_VID4VPITCH_MASK (0x0000FFFF) +#define PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID4SCALECTRL_VID4VPITCH_SHIFT (0) +#define PDP_VID4SCALECTRL_VID4VPITCH_LENGTH (16) +#define PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VSINIT_OFFSET (0x05F4) + +/* PDP, VID4VSINIT, VID4VINITIAL1 +*/ +#define PDP_VID4VSINIT_VID4VINITIAL1_MASK (0xFFFF0000) +#define PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK (0x0000FFFF) +#define PDP_VID4VSINIT_VID4VINITIAL1_SHIFT (16) +#define PDP_VID4VSINIT_VID4VINITIAL1_LENGTH (16) +#define PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4VSINIT, VID4VINITIAL0 +*/ +#define PDP_VID4VSINIT_VID4VINITIAL0_MASK (0x0000FFFF) +#define PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK (0x0000FFFF) +#define PDP_VID4VSINIT_VID4VINITIAL0_SHIFT (0) +#define PDP_VID4VSINIT_VID4VINITIAL0_LENGTH (16) +#define PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF0_OFFSET (0x05F8) + +/* PDP, VID4VCOEFF0, VID4VCOEFF0 +*/ +#define PDP_VID4VCOEFF0_VID4VCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT (0) +#define PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH (32) +#define PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF1_OFFSET (0x05FC) + +/* PDP, VID4VCOEFF1, VID4VCOEFF1 +*/ +#define PDP_VID4VCOEFF1_VID4VCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT (0) +#define PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH (32) +#define PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF2_OFFSET (0x0600) + +/* PDP, VID4VCOEFF2, VID4VCOEFF2 +*/ +#define PDP_VID4VCOEFF2_VID4VCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT (0) +#define PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH (32) +#define PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF3_OFFSET (0x0604) + +/* PDP, VID4VCOEFF3, VID4VCOEFF3 +*/ +#define PDP_VID4VCOEFF3_VID4VCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT (0) +#define PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH (32) +#define PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF4_OFFSET (0x0608) + +/* PDP, VID4VCOEFF4, VID4VCOEFF4 +*/ +#define PDP_VID4VCOEFF4_VID4VCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT (0) +#define PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH (32) +#define PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF5_OFFSET (0x060C) + +/* PDP, VID4VCOEFF5, VID4VCOEFF5 +*/ +#define PDP_VID4VCOEFF5_VID4VCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT (0) +#define PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH (32) +#define PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF6_OFFSET (0x0610) + +/* PDP, VID4VCOEFF6, VID4VCOEFF6 +*/ +#define PDP_VID4VCOEFF6_VID4VCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT (0) +#define PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH (32) +#define PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF7_OFFSET (0x0614) + +/* PDP, VID4VCOEFF7, VID4VCOEFF7 +*/ +#define PDP_VID4VCOEFF7_VID4VCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT (0) +#define PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH (32) +#define PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4VCOEFF8_OFFSET (0x0618) + +/* PDP, VID4VCOEFF8, VID4VCOEFF8 +*/ +#define PDP_VID4VCOEFF8_VID4VCOEFF8_MASK (0x000000FF) +#define PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK (0x000000FF) +#define PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT (0) +#define PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH (8) +#define PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HSINIT_OFFSET (0x061C) + +/* PDP, VID4HSINIT, VID4HINITIAL +*/ +#define PDP_VID4HSINIT_VID4HINITIAL_MASK (0xFFFF0000) +#define PDP_VID4HSINIT_VID4HINITIAL_LSBMASK (0x0000FFFF) +#define PDP_VID4HSINIT_VID4HINITIAL_SHIFT (16) +#define PDP_VID4HSINIT_VID4HINITIAL_LENGTH (16) +#define PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4HSINIT, VID4HPITCH +*/ +#define PDP_VID4HSINIT_VID4HPITCH_MASK (0x0000FFFF) +#define PDP_VID4HSINIT_VID4HPITCH_LSBMASK (0x0000FFFF) +#define PDP_VID4HSINIT_VID4HPITCH_SHIFT (0) +#define PDP_VID4HSINIT_VID4HPITCH_LENGTH (16) +#define PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF0_OFFSET (0x0620) + +/* PDP, VID4HCOEFF0, VID4HCOEFF0 +*/ +#define PDP_VID4HCOEFF0_VID4HCOEFF0_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT (0) +#define PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH (32) +#define PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF1_OFFSET (0x0624) + +/* PDP, VID4HCOEFF1, VID4HCOEFF1 +*/ +#define PDP_VID4HCOEFF1_VID4HCOEFF1_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT (0) +#define PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH (32) +#define PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF2_OFFSET (0x0628) + +/* PDP, VID4HCOEFF2, VID4HCOEFF2 +*/ +#define PDP_VID4HCOEFF2_VID4HCOEFF2_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT (0) +#define PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH (32) +#define PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF3_OFFSET (0x062C) + +/* PDP, VID4HCOEFF3, VID4HCOEFF3 +*/ +#define PDP_VID4HCOEFF3_VID4HCOEFF3_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT (0) +#define PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH (32) +#define PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF4_OFFSET (0x0630) + +/* PDP, VID4HCOEFF4, VID4HCOEFF4 +*/ +#define PDP_VID4HCOEFF4_VID4HCOEFF4_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT (0) +#define PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH (32) +#define PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF5_OFFSET (0x0634) + +/* PDP, VID4HCOEFF5, VID4HCOEFF5 +*/ +#define PDP_VID4HCOEFF5_VID4HCOEFF5_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT (0) +#define PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH (32) +#define PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF6_OFFSET (0x0638) + +/* PDP, VID4HCOEFF6, VID4HCOEFF6 +*/ +#define PDP_VID4HCOEFF6_VID4HCOEFF6_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT (0) +#define PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH (32) +#define PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF7_OFFSET (0x063C) + +/* PDP, VID4HCOEFF7, VID4HCOEFF7 +*/ +#define PDP_VID4HCOEFF7_VID4HCOEFF7_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT (0) +#define PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH (32) +#define PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF8_OFFSET (0x0640) + +/* PDP, VID4HCOEFF8, VID4HCOEFF8 +*/ +#define PDP_VID4HCOEFF8_VID4HCOEFF8_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT (0) +#define PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH (32) +#define PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF9_OFFSET (0x0644) + +/* PDP, VID4HCOEFF9, VID4HCOEFF9 +*/ +#define PDP_VID4HCOEFF9_VID4HCOEFF9_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT (0) +#define PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH (32) +#define PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF10_OFFSET (0x0648) + +/* PDP, VID4HCOEFF10, VID4HCOEFF10 +*/ +#define PDP_VID4HCOEFF10_VID4HCOEFF10_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT (0) +#define PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH (32) +#define PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF11_OFFSET (0x064C) + +/* PDP, VID4HCOEFF11, VID4HCOEFF11 +*/ +#define PDP_VID4HCOEFF11_VID4HCOEFF11_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT (0) +#define PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH (32) +#define PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF12_OFFSET (0x0650) + +/* PDP, VID4HCOEFF12, VID4HCOEFF12 +*/ +#define PDP_VID4HCOEFF12_VID4HCOEFF12_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT (0) +#define PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH (32) +#define PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF13_OFFSET (0x0654) + +/* PDP, VID4HCOEFF13, VID4HCOEFF13 +*/ +#define PDP_VID4HCOEFF13_VID4HCOEFF13_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT (0) +#define PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH (32) +#define PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF14_OFFSET (0x0658) + +/* PDP, VID4HCOEFF14, VID4HCOEFF14 +*/ +#define PDP_VID4HCOEFF14_VID4HCOEFF14_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT (0) +#define PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH (32) +#define PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF15_OFFSET (0x065C) + +/* PDP, VID4HCOEFF15, VID4HCOEFF15 +*/ +#define PDP_VID4HCOEFF15_VID4HCOEFF15_MASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK (0xFFFFFFFF) +#define PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT (0) +#define PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH (32) +#define PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4HCOEFF16_OFFSET (0x0660) + +/* PDP, VID4HCOEFF16, VID4HCOEFF16 +*/ +#define PDP_VID4HCOEFF16_VID4HCOEFF16_MASK (0x000000FF) +#define PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK (0x000000FF) +#define PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT (0) +#define PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH (8) +#define PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4SCALESIZE_OFFSET (0x0664) + +/* PDP, VID4SCALESIZE, VID4SCALEWIDTH +*/ +#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK (0x0FFF0000) +#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK (0x00000FFF) +#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT (16) +#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH (12) +#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT +*/ +#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK (0x00000FFF) +#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK (0x00000FFF) +#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT (0) +#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH (12) +#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND0_OFFSET (0x0668) + +/* PDP, PORTER_BLND0, BLND0BLENDTYPE +*/ +#define PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND0, BLND0PORTERMODE +*/ +#define PDP_PORTER_BLND0_BLND0PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND1_OFFSET (0x066C) + +/* PDP, PORTER_BLND1, BLND1BLENDTYPE +*/ +#define PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND1, BLND1PORTERMODE +*/ +#define PDP_PORTER_BLND1_BLND1PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND2_OFFSET (0x0670) + +/* PDP, PORTER_BLND2, BLND2BLENDTYPE +*/ +#define PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND2, BLND2PORTERMODE +*/ +#define PDP_PORTER_BLND2_BLND2PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND3_OFFSET (0x0674) + +/* PDP, PORTER_BLND3, BLND3BLENDTYPE +*/ +#define PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND3, BLND3PORTERMODE +*/ +#define PDP_PORTER_BLND3_BLND3PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND4_OFFSET (0x0678) + +/* PDP, PORTER_BLND4, BLND4BLENDTYPE +*/ +#define PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND4, BLND4PORTERMODE +*/ +#define PDP_PORTER_BLND4_BLND4PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND5_OFFSET (0x067C) + +/* PDP, PORTER_BLND5, BLND5BLENDTYPE +*/ +#define PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND5, BLND5PORTERMODE +*/ +#define PDP_PORTER_BLND5_BLND5PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND6_OFFSET (0x0680) + +/* PDP, PORTER_BLND6, BLND6BLENDTYPE +*/ +#define PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND6, BLND6PORTERMODE +*/ +#define PDP_PORTER_BLND6_BLND6PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_PORTER_BLND7_OFFSET (0x0684) + +/* PDP, PORTER_BLND7, BLND7BLENDTYPE +*/ +#define PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK (0x00000010) +#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK (0x00000001) +#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT (4) +#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH (1) +#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD IMG_FALSE + +/* PDP, PORTER_BLND7, BLND7PORTERMODE +*/ +#define PDP_PORTER_BLND7_BLND7PORTERMODE_MASK (0x0000000F) +#define PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK (0x0000000F) +#define PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT (0) +#define PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH (4) +#define PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06C8) + +/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS +*/ +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT (16) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH (10) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE +*/ +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06CC) + +/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX +*/ +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK (0x03FF0000) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK (0x000003FF) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT (16) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH (10) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN +*/ +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK (0x000003FF) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK (0x000003FF) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT (0) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH (10) +#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1LUMAKEY_C_RG_OFFSET (0x06D0) + +/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R +*/ +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK (0x0FFF0000) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK (0x00000FFF) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT (16) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH (12) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G +*/ +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK (0x00000FFF) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK (0x00000FFF) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT (0) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH (12) +#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1LUMAKEY_C_B_OFFSET (0x06D4) + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT +*/ +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK (0x20000000) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT (29) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH (1) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN +*/ +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK (0x10000000) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK (0x00000001) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT (28) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH (1) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF +*/ +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK (0x03FF0000) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT (16) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH (10) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B +*/ +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK (0x00000FFF) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK (0x00000FFF) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT (0) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH (12) +#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06D8) + +/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS +*/ +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT (16) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH (10) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE +*/ +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06DC) + +/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX +*/ +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK (0x03FF0000) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK (0x000003FF) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT (16) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH (10) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN +*/ +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK (0x000003FF) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK (0x000003FF) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT (0) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH (10) +#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2LUMAKEY_C_RG_OFFSET (0x06E0) + +/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R +*/ +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK (0x0FFF0000) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK (0x00000FFF) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT (16) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH (12) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G +*/ +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK (0x00000FFF) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK (0x00000FFF) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT (0) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH (12) +#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2LUMAKEY_C_B_OFFSET (0x06E4) + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT +*/ +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK (0x20000000) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT (29) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH (1) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN +*/ +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK (0x10000000) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK (0x00000001) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT (28) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH (1) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF +*/ +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK (0x03FF0000) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT (16) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH (10) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B +*/ +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK (0x00000FFF) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK (0x00000FFF) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT (0) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH (12) +#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06E8) + +/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS +*/ +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT (16) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH (10) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE +*/ +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06EC) + +/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX +*/ +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK (0x03FF0000) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK (0x000003FF) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT (16) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH (10) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN +*/ +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK (0x000003FF) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK (0x000003FF) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT (0) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH (10) +#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3LUMAKEY_C_RG_OFFSET (0x06F0) + +/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R +*/ +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK (0x0FFF0000) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK (0x00000FFF) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT (16) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH (12) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G +*/ +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK (0x00000FFF) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK (0x00000FFF) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT (0) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH (12) +#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3LUMAKEY_C_B_OFFSET (0x06F4) + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT +*/ +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK (0x20000000) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT (29) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH (1) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN +*/ +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK (0x10000000) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK (0x00000001) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT (28) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH (1) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF +*/ +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK (0x03FF0000) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT (16) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH (10) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B +*/ +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK (0x00000FFF) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK (0x00000FFF) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT (0) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH (12) +#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06F8) + +/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS +*/ +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK (0x03FF0000) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT (16) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH (10) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE +*/ +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT (0) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH (10) +#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06FC) + +/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX +*/ +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK (0x03FF0000) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK (0x000003FF) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT (16) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH (10) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN +*/ +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK (0x000003FF) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK (0x000003FF) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT (0) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH (10) +#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4LUMAKEY_C_RG_OFFSET (0x0700) + +/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R +*/ +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK (0x0FFF0000) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK (0x00000FFF) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT (16) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH (12) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G +*/ +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK (0x00000FFF) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK (0x00000FFF) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT (0) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH (12) +#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4LUMAKEY_C_B_OFFSET (0x0704) + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT +*/ +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK (0x20000000) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK (0x00000001) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT (29) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH (1) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN +*/ +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK (0x10000000) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK (0x00000001) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT (28) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH (1) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF +*/ +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK (0x03FF0000) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK (0x000003FF) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT (16) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH (10) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B +*/ +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK (0x00000FFF) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK (0x00000FFF) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT (0) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH (12) +#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD IMG_FALSE + +#define PDP_CSCCOEFF0_OFFSET (0x0708) + +/* PDP, CSCCOEFF0, CSCCOEFFRU +*/ +#define PDP_CSCCOEFF0_CSCCOEFFRU_MASK (0x003FF800) +#define PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT (11) +#define PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH (11) +#define PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF0, CSCCOEFFRY +*/ +#define PDP_CSCCOEFF0_CSCCOEFFRY_MASK (0x000007FF) +#define PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT (0) +#define PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH (11) +#define PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD IMG_FALSE + +#define PDP_CSCCOEFF1_OFFSET (0x070C) + +/* PDP, CSCCOEFF1, CSCCOEFFGY +*/ +#define PDP_CSCCOEFF1_CSCCOEFFGY_MASK (0x003FF800) +#define PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT (11) +#define PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH (11) +#define PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF1, CSCCOEFFRV +*/ +#define PDP_CSCCOEFF1_CSCCOEFFRV_MASK (0x000007FF) +#define PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT (0) +#define PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH (11) +#define PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD IMG_FALSE + +#define PDP_CSCCOEFF2_OFFSET (0x0710) + +/* PDP, CSCCOEFF2, CSCCOEFFGV +*/ +#define PDP_CSCCOEFF2_CSCCOEFFGV_MASK (0x003FF800) +#define PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT (11) +#define PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH (11) +#define PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF2, CSCCOEFFGU +*/ +#define PDP_CSCCOEFF2_CSCCOEFFGU_MASK (0x000007FF) +#define PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT (0) +#define PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH (11) +#define PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD IMG_FALSE + +#define PDP_CSCCOEFF3_OFFSET (0x0714) + +/* PDP, CSCCOEFF3, CSCCOEFFBU +*/ +#define PDP_CSCCOEFF3_CSCCOEFFBU_MASK (0x003FF800) +#define PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT (11) +#define PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH (11) +#define PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD IMG_FALSE + +/* PDP, CSCCOEFF3, CSCCOEFFBY +*/ +#define PDP_CSCCOEFF3_CSCCOEFFBY_MASK (0x000007FF) +#define PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT (0) +#define PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH (11) +#define PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD IMG_FALSE + +#define PDP_CSCCOEFF4_OFFSET (0x0718) + +/* PDP, CSCCOEFF4, CSCCOEFFBV +*/ +#define PDP_CSCCOEFF4_CSCCOEFFBV_MASK (0x000007FF) +#define PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK (0x000007FF) +#define PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT (0) +#define PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH (11) +#define PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD IMG_FALSE + +#define PDP_BGNDCOL_AR_OFFSET (0x071C) + +/* PDP, BGNDCOL_AR, BGNDCOL_A +*/ +#define PDP_BGNDCOL_AR_BGNDCOL_A_MASK (0x03FF0000) +#define PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK (0x000003FF) +#define PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT (16) +#define PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH (10) +#define PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD IMG_FALSE + +/* PDP, BGNDCOL_AR, BGNDCOL_R +*/ +#define PDP_BGNDCOL_AR_BGNDCOL_R_MASK (0x000003FF) +#define PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK (0x000003FF) +#define PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT (0) +#define PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH (10) +#define PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD IMG_FALSE + +#define PDP_BGNDCOL_GB_OFFSET (0x0720) + +/* PDP, BGNDCOL_GB, BGNDCOL_G +*/ +#define PDP_BGNDCOL_GB_BGNDCOL_G_MASK (0x03FF0000) +#define PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK (0x000003FF) +#define PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT (16) +#define PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH (10) +#define PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD IMG_FALSE + +/* PDP, BGNDCOL_GB, BGNDCOL_B +*/ +#define PDP_BGNDCOL_GB_BGNDCOL_B_MASK (0x000003FF) +#define PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK (0x000003FF) +#define PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT (0) +#define PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH (10) +#define PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD IMG_FALSE + +#define PDP_BORDCOL_R_OFFSET (0x0724) + +/* PDP, BORDCOL_R, BORDCOL_R +*/ +#define PDP_BORDCOL_R_BORDCOL_R_MASK (0x000003FF) +#define PDP_BORDCOL_R_BORDCOL_R_LSBMASK (0x000003FF) +#define PDP_BORDCOL_R_BORDCOL_R_SHIFT (0) +#define PDP_BORDCOL_R_BORDCOL_R_LENGTH (10) +#define PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD IMG_FALSE + +#define PDP_BORDCOL_GB_OFFSET (0x0728) + +/* PDP, BORDCOL_GB, BORDCOL_G +*/ +#define PDP_BORDCOL_GB_BORDCOL_G_MASK (0x03FF0000) +#define PDP_BORDCOL_GB_BORDCOL_G_LSBMASK (0x000003FF) +#define PDP_BORDCOL_GB_BORDCOL_G_SHIFT (16) +#define PDP_BORDCOL_GB_BORDCOL_G_LENGTH (10) +#define PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD IMG_FALSE + +/* PDP, BORDCOL_GB, BORDCOL_B +*/ +#define PDP_BORDCOL_GB_BORDCOL_B_MASK (0x000003FF) +#define PDP_BORDCOL_GB_BORDCOL_B_LSBMASK (0x000003FF) +#define PDP_BORDCOL_GB_BORDCOL_B_SHIFT (0) +#define PDP_BORDCOL_GB_BORDCOL_B_LENGTH (10) +#define PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD IMG_FALSE + +#define PDP_LINESTAT_OFFSET (0x0734) + +/* PDP, LINESTAT, LINENO +*/ +#define PDP_LINESTAT_LINENO_MASK (0x00001FFF) +#define PDP_LINESTAT_LINENO_LSBMASK (0x00001FFF) +#define PDP_LINESTAT_LINENO_SHIFT (0) +#define PDP_LINESTAT_LINENO_LENGTH (13) +#define PDP_LINESTAT_LINENO_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_C11C12_OFFSET (0x0738) + +/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C12 +*/ +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK (0x3FFF0000) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT (16) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C11 +*/ +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_C13C21_OFFSET (0x073C) + +/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C21 +*/ +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK (0x3FFF0000) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT (16) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C13 +*/ +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_C22C23_OFFSET (0x0740) + +/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C23 +*/ +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK (0x3FFF0000) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT (16) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C22 +*/ +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_C31C32_OFFSET (0x0744) + +/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C32 +*/ +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK (0x3FFF0000) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT (16) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C31 +*/ +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_C33_OFFSET (0x0748) + +/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_C33 +*/ +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK (0x3FFF0000) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK (0x00003FFF) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT (16) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH (14) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_RANGE +*/ +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK (0x00000030) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK (0x00000003) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT (4) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH (2) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_EN +*/ +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK (0x00000001) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK (0x00000001) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH (1) +#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_OFFSET (0x074C) + +/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G +*/ +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK (0x0FFF0000) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK (0x00000FFF) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT (16) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH (12) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B +*/ +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK (0x00000FFF) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK (0x00000FFF) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH (12) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_OFFSET (0x0750) + +/* PDP, CR_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R +*/ +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK (0x00000FFF) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK (0x00000FFF) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH (12) +#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_OFFSET (0x0754) + +/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G +*/ +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK (0x03FF0000) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK (0x000003FF) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT (16) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH (10) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE + +/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B +*/ +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK (0x000003FF) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK (0x000003FF) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH (10) +#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE + +#define PDP_CR_PDP_PROCAMP_INOFFSET_R_OFFSET (0x0758) + +/* PDP, CR_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R +*/ +#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK (0x000003FF) +#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK (0x000003FF) +#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT (0) +#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH (10) +#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD IMG_FALSE + +#define PDP_SIGNAT_R_OFFSET (0x075C) + +/* PDP, SIGNAT_R, SIGNATURE_R +*/ +#define PDP_SIGNAT_R_SIGNATURE_R_MASK (0x000003FF) +#define PDP_SIGNAT_R_SIGNATURE_R_LSBMASK (0x000003FF) +#define PDP_SIGNAT_R_SIGNATURE_R_SHIFT (0) +#define PDP_SIGNAT_R_SIGNATURE_R_LENGTH (10) +#define PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD IMG_FALSE + +#define PDP_SIGNAT_GB_OFFSET (0x0760) + +/* PDP, SIGNAT_GB, SIGNATURE_G +*/ +#define PDP_SIGNAT_GB_SIGNATURE_G_MASK (0x03FF0000) +#define PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK (0x000003FF) +#define PDP_SIGNAT_GB_SIGNATURE_G_SHIFT (16) +#define PDP_SIGNAT_GB_SIGNATURE_G_LENGTH (10) +#define PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD IMG_FALSE + +/* PDP, SIGNAT_GB, SIGNATURE_B +*/ +#define PDP_SIGNAT_GB_SIGNATURE_B_MASK (0x000003FF) +#define PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK (0x000003FF) +#define PDP_SIGNAT_GB_SIGNATURE_B_SHIFT (0) +#define PDP_SIGNAT_GB_SIGNATURE_B_LENGTH (10) +#define PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD IMG_FALSE + +#define PDP_REGISTER_UPDATE_CTRL_OFFSET (0x0764) + +/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING +*/ +#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK (0x00000004) +#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK (0x00000001) +#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT (2) +#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH (1) +#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD IMG_FALSE + +/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID +*/ +#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK (0x00000002) +#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK (0x00000001) +#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT (1) +#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH (1) +#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD IMG_FALSE + +/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK +*/ +#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK (0x00000001) +#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK (0x00000001) +#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT (0) +#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH (1) +#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD IMG_FALSE + +#define PDP_REGISTER_UPDATE_STATUS_OFFSET (0x0768) + +/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED +*/ +#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK (0x00000002) +#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK (0x00000001) +#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT (1) +#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH (1) +#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD IMG_FALSE + +#define PDP_DBGCTRL_OFFSET (0x076C) + +/* PDP, DBGCTRL, DBG_READ +*/ +#define PDP_DBGCTRL_DBG_READ_MASK (0x00000002) +#define PDP_DBGCTRL_DBG_READ_LSBMASK (0x00000001) +#define PDP_DBGCTRL_DBG_READ_SHIFT (1) +#define PDP_DBGCTRL_DBG_READ_LENGTH (1) +#define PDP_DBGCTRL_DBG_READ_SIGNED_FIELD IMG_FALSE + +/* PDP, DBGCTRL, DBG_ENAB +*/ +#define PDP_DBGCTRL_DBG_ENAB_MASK (0x00000001) +#define PDP_DBGCTRL_DBG_ENAB_LSBMASK (0x00000001) +#define PDP_DBGCTRL_DBG_ENAB_SHIFT (0) +#define PDP_DBGCTRL_DBG_ENAB_LENGTH (1) +#define PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD IMG_FALSE + +#define PDP_DBGDATA_R_OFFSET (0x0770) + +/* PDP, DBGDATA_R, DBG_DATA_R +*/ +#define PDP_DBGDATA_R_DBG_DATA_R_MASK (0x000003FF) +#define PDP_DBGDATA_R_DBG_DATA_R_LSBMASK (0x000003FF) +#define PDP_DBGDATA_R_DBG_DATA_R_SHIFT (0) +#define PDP_DBGDATA_R_DBG_DATA_R_LENGTH (10) +#define PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD IMG_FALSE + +#define PDP_DBGDATA_GB_OFFSET (0x0774) + +/* PDP, DBGDATA_GB, DBG_DATA_G +*/ +#define PDP_DBGDATA_GB_DBG_DATA_G_MASK (0x03FF0000) +#define PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK (0x000003FF) +#define PDP_DBGDATA_GB_DBG_DATA_G_SHIFT (16) +#define PDP_DBGDATA_GB_DBG_DATA_G_LENGTH (10) +#define PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD IMG_FALSE + +/* PDP, DBGDATA_GB, DBG_DATA_B +*/ +#define PDP_DBGDATA_GB_DBG_DATA_B_MASK (0x000003FF) +#define PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK (0x000003FF) +#define PDP_DBGDATA_GB_DBG_DATA_B_SHIFT (0) +#define PDP_DBGDATA_GB_DBG_DATA_B_LENGTH (10) +#define PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD IMG_FALSE + +#define PDP_DBGSIDE_OFFSET (0x0778) + +/* PDP, DBGSIDE, DBG_VAL +*/ +#define PDP_DBGSIDE_DBG_VAL_MASK (0x00000008) +#define PDP_DBGSIDE_DBG_VAL_LSBMASK (0x00000001) +#define PDP_DBGSIDE_DBG_VAL_SHIFT (3) +#define PDP_DBGSIDE_DBG_VAL_LENGTH (1) +#define PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD IMG_FALSE + +/* PDP, DBGSIDE, DBG_SIDE +*/ +#define PDP_DBGSIDE_DBG_SIDE_MASK (0x00000007) +#define PDP_DBGSIDE_DBG_SIDE_LSBMASK (0x00000007) +#define PDP_DBGSIDE_DBG_SIDE_SHIFT (0) +#define PDP_DBGSIDE_DBG_SIDE_LENGTH (3) +#define PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD IMG_FALSE + +#define PDP_OUTPUT_OFFSET (0x077C) + +/* PDP, OUTPUT, EIGHT_BIT_OUTPUT +*/ +#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK (0x00000002) +#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK (0x00000001) +#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT (1) +#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH (1) +#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD IMG_FALSE + +/* PDP, OUTPUT, OUTPUT_CONFIG +*/ +#define PDP_OUTPUT_OUTPUT_CONFIG_MASK (0x00000001) +#define PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK (0x00000001) +#define PDP_OUTPUT_OUTPUT_CONFIG_SHIFT (0) +#define PDP_OUTPUT_OUTPUT_CONFIG_LENGTH (1) +#define PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD IMG_FALSE + +#define PDP_SYNCCTRL_OFFSET (0x0780) + +/* PDP, SYNCCTRL, SYNCACTIVE +*/ +#define PDP_SYNCCTRL_SYNCACTIVE_MASK (0x80000000) +#define PDP_SYNCCTRL_SYNCACTIVE_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_SYNCACTIVE_SHIFT (31) +#define PDP_SYNCCTRL_SYNCACTIVE_LENGTH (1) +#define PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, PDP_RST +*/ +#define PDP_SYNCCTRL_PDP_RST_MASK (0x20000000) +#define PDP_SYNCCTRL_PDP_RST_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_PDP_RST_SHIFT (29) +#define PDP_SYNCCTRL_PDP_RST_LENGTH (1) +#define PDP_SYNCCTRL_PDP_RST_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, POWERDN +*/ +#define PDP_SYNCCTRL_POWERDN_MASK (0x10000000) +#define PDP_SYNCCTRL_POWERDN_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_POWERDN_SHIFT (28) +#define PDP_SYNCCTRL_POWERDN_LENGTH (1) +#define PDP_SYNCCTRL_POWERDN_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, LOWPWRMODE +*/ +#define PDP_SYNCCTRL_LOWPWRMODE_MASK (0x08000000) +#define PDP_SYNCCTRL_LOWPWRMODE_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_LOWPWRMODE_SHIFT (27) +#define PDP_SYNCCTRL_LOWPWRMODE_LENGTH (1) +#define PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDSYNCTRL +*/ +#define PDP_SYNCCTRL_UPDSYNCTRL_MASK (0x04000000) +#define PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_UPDSYNCTRL_SHIFT (26) +#define PDP_SYNCCTRL_UPDSYNCTRL_LENGTH (1) +#define PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDINTCTRL +*/ +#define PDP_SYNCCTRL_UPDINTCTRL_MASK (0x02000000) +#define PDP_SYNCCTRL_UPDINTCTRL_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_UPDINTCTRL_SHIFT (25) +#define PDP_SYNCCTRL_UPDINTCTRL_LENGTH (1) +#define PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDCTRL +*/ +#define PDP_SYNCCTRL_UPDCTRL_MASK (0x01000000) +#define PDP_SYNCCTRL_UPDCTRL_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_UPDCTRL_SHIFT (24) +#define PDP_SYNCCTRL_UPDCTRL_LENGTH (1) +#define PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, UPDWAIT +*/ +#define PDP_SYNCCTRL_UPDWAIT_MASK (0x000F0000) +#define PDP_SYNCCTRL_UPDWAIT_LSBMASK (0x0000000F) +#define PDP_SYNCCTRL_UPDWAIT_SHIFT (16) +#define PDP_SYNCCTRL_UPDWAIT_LENGTH (4) +#define PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, FIELD_EN +*/ +#define PDP_SYNCCTRL_FIELD_EN_MASK (0x00002000) +#define PDP_SYNCCTRL_FIELD_EN_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_FIELD_EN_SHIFT (13) +#define PDP_SYNCCTRL_FIELD_EN_LENGTH (1) +#define PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, CSYNC_EN +*/ +#define PDP_SYNCCTRL_CSYNC_EN_MASK (0x00001000) +#define PDP_SYNCCTRL_CSYNC_EN_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_CSYNC_EN_SHIFT (12) +#define PDP_SYNCCTRL_CSYNC_EN_LENGTH (1) +#define PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, CLKPOL +*/ +#define PDP_SYNCCTRL_CLKPOL_MASK (0x00000800) +#define PDP_SYNCCTRL_CLKPOL_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_CLKPOL_SHIFT (11) +#define PDP_SYNCCTRL_CLKPOL_LENGTH (1) +#define PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, VS_SLAVE +*/ +#define PDP_SYNCCTRL_VS_SLAVE_MASK (0x00000080) +#define PDP_SYNCCTRL_VS_SLAVE_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_VS_SLAVE_SHIFT (7) +#define PDP_SYNCCTRL_VS_SLAVE_LENGTH (1) +#define PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, HS_SLAVE +*/ +#define PDP_SYNCCTRL_HS_SLAVE_MASK (0x00000040) +#define PDP_SYNCCTRL_HS_SLAVE_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_HS_SLAVE_SHIFT (6) +#define PDP_SYNCCTRL_HS_SLAVE_LENGTH (1) +#define PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, BLNKPOL +*/ +#define PDP_SYNCCTRL_BLNKPOL_MASK (0x00000020) +#define PDP_SYNCCTRL_BLNKPOL_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_BLNKPOL_SHIFT (5) +#define PDP_SYNCCTRL_BLNKPOL_LENGTH (1) +#define PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, BLNKDIS +*/ +#define PDP_SYNCCTRL_BLNKDIS_MASK (0x00000010) +#define PDP_SYNCCTRL_BLNKDIS_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_BLNKDIS_SHIFT (4) +#define PDP_SYNCCTRL_BLNKDIS_LENGTH (1) +#define PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, VSPOL +*/ +#define PDP_SYNCCTRL_VSPOL_MASK (0x00000008) +#define PDP_SYNCCTRL_VSPOL_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_VSPOL_SHIFT (3) +#define PDP_SYNCCTRL_VSPOL_LENGTH (1) +#define PDP_SYNCCTRL_VSPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, VSDIS +*/ +#define PDP_SYNCCTRL_VSDIS_MASK (0x00000004) +#define PDP_SYNCCTRL_VSDIS_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_VSDIS_SHIFT (2) +#define PDP_SYNCCTRL_VSDIS_LENGTH (1) +#define PDP_SYNCCTRL_VSDIS_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, HSPOL +*/ +#define PDP_SYNCCTRL_HSPOL_MASK (0x00000002) +#define PDP_SYNCCTRL_HSPOL_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_HSPOL_SHIFT (1) +#define PDP_SYNCCTRL_HSPOL_LENGTH (1) +#define PDP_SYNCCTRL_HSPOL_SIGNED_FIELD IMG_FALSE + +/* PDP, SYNCCTRL, HSDIS +*/ +#define PDP_SYNCCTRL_HSDIS_MASK (0x00000001) +#define PDP_SYNCCTRL_HSDIS_LSBMASK (0x00000001) +#define PDP_SYNCCTRL_HSDIS_SHIFT (0) +#define PDP_SYNCCTRL_HSDIS_LENGTH (1) +#define PDP_SYNCCTRL_HSDIS_SIGNED_FIELD IMG_FALSE + +#define PDP_HSYNC1_OFFSET (0x0784) + +/* PDP, HSYNC1, HBPS +*/ +#define PDP_HSYNC1_HBPS_MASK (0x1FFF0000) +#define PDP_HSYNC1_HBPS_LSBMASK (0x00001FFF) +#define PDP_HSYNC1_HBPS_SHIFT (16) +#define PDP_HSYNC1_HBPS_LENGTH (13) +#define PDP_HSYNC1_HBPS_SIGNED_FIELD IMG_FALSE + +/* PDP, HSYNC1, HT +*/ +#define PDP_HSYNC1_HT_MASK (0x00001FFF) +#define PDP_HSYNC1_HT_LSBMASK (0x00001FFF) +#define PDP_HSYNC1_HT_SHIFT (0) +#define PDP_HSYNC1_HT_LENGTH (13) +#define PDP_HSYNC1_HT_SIGNED_FIELD IMG_FALSE + +#define PDP_HSYNC2_OFFSET (0x0788) + +/* PDP, HSYNC2, HAS +*/ +#define PDP_HSYNC2_HAS_MASK (0x1FFF0000) +#define PDP_HSYNC2_HAS_LSBMASK (0x00001FFF) +#define PDP_HSYNC2_HAS_SHIFT (16) +#define PDP_HSYNC2_HAS_LENGTH (13) +#define PDP_HSYNC2_HAS_SIGNED_FIELD IMG_FALSE + +/* PDP, HSYNC2, HLBS +*/ +#define PDP_HSYNC2_HLBS_MASK (0x00001FFF) +#define PDP_HSYNC2_HLBS_LSBMASK (0x00001FFF) +#define PDP_HSYNC2_HLBS_SHIFT (0) +#define PDP_HSYNC2_HLBS_LENGTH (13) +#define PDP_HSYNC2_HLBS_SIGNED_FIELD IMG_FALSE + +#define PDP_HSYNC3_OFFSET (0x078C) + +/* PDP, HSYNC3, HFPS +*/ +#define PDP_HSYNC3_HFPS_MASK (0x1FFF0000) +#define PDP_HSYNC3_HFPS_LSBMASK (0x00001FFF) +#define PDP_HSYNC3_HFPS_SHIFT (16) +#define PDP_HSYNC3_HFPS_LENGTH (13) +#define PDP_HSYNC3_HFPS_SIGNED_FIELD IMG_FALSE + +/* PDP, HSYNC3, HRBS +*/ +#define PDP_HSYNC3_HRBS_MASK (0x00001FFF) +#define PDP_HSYNC3_HRBS_LSBMASK (0x00001FFF) +#define PDP_HSYNC3_HRBS_SHIFT (0) +#define PDP_HSYNC3_HRBS_LENGTH (13) +#define PDP_HSYNC3_HRBS_SIGNED_FIELD IMG_FALSE + +#define PDP_VSYNC1_OFFSET (0x0790) + +/* PDP, VSYNC1, VBPS +*/ +#define PDP_VSYNC1_VBPS_MASK (0x1FFF0000) +#define PDP_VSYNC1_VBPS_LSBMASK (0x00001FFF) +#define PDP_VSYNC1_VBPS_SHIFT (16) +#define PDP_VSYNC1_VBPS_LENGTH (13) +#define PDP_VSYNC1_VBPS_SIGNED_FIELD IMG_FALSE + +/* PDP, VSYNC1, VT +*/ +#define PDP_VSYNC1_VT_MASK (0x00001FFF) +#define PDP_VSYNC1_VT_LSBMASK (0x00001FFF) +#define PDP_VSYNC1_VT_SHIFT (0) +#define PDP_VSYNC1_VT_LENGTH (13) +#define PDP_VSYNC1_VT_SIGNED_FIELD IMG_FALSE + +#define PDP_VSYNC2_OFFSET (0x0794) + +/* PDP, VSYNC2, VAS +*/ +#define PDP_VSYNC2_VAS_MASK (0x1FFF0000) +#define PDP_VSYNC2_VAS_LSBMASK (0x00001FFF) +#define PDP_VSYNC2_VAS_SHIFT (16) +#define PDP_VSYNC2_VAS_LENGTH (13) +#define PDP_VSYNC2_VAS_SIGNED_FIELD IMG_FALSE + +/* PDP, VSYNC2, VTBS +*/ +#define PDP_VSYNC2_VTBS_MASK (0x00001FFF) +#define PDP_VSYNC2_VTBS_LSBMASK (0x00001FFF) +#define PDP_VSYNC2_VTBS_SHIFT (0) +#define PDP_VSYNC2_VTBS_LENGTH (13) +#define PDP_VSYNC2_VTBS_SIGNED_FIELD IMG_FALSE + +#define PDP_VSYNC3_OFFSET (0x0798) + +/* PDP, VSYNC3, VFPS +*/ +#define PDP_VSYNC3_VFPS_MASK (0x1FFF0000) +#define PDP_VSYNC3_VFPS_LSBMASK (0x00001FFF) +#define PDP_VSYNC3_VFPS_SHIFT (16) +#define PDP_VSYNC3_VFPS_LENGTH (13) +#define PDP_VSYNC3_VFPS_SIGNED_FIELD IMG_FALSE + +/* PDP, VSYNC3, VBBS +*/ +#define PDP_VSYNC3_VBBS_MASK (0x00001FFF) +#define PDP_VSYNC3_VBBS_LSBMASK (0x00001FFF) +#define PDP_VSYNC3_VBBS_SHIFT (0) +#define PDP_VSYNC3_VBBS_LENGTH (13) +#define PDP_VSYNC3_VBBS_SIGNED_FIELD IMG_FALSE + +#define PDP_INTSTAT_OFFSET (0x079C) + +/* PDP, INTSTAT, INTS_VID4ORUN +*/ +#define PDP_INTSTAT_INTS_VID4ORUN_MASK (0x00080000) +#define PDP_INTSTAT_INTS_VID4ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID4ORUN_SHIFT (19) +#define PDP_INTSTAT_INTS_VID4ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID3ORUN +*/ +#define PDP_INTSTAT_INTS_VID3ORUN_MASK (0x00040000) +#define PDP_INTSTAT_INTS_VID3ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID3ORUN_SHIFT (18) +#define PDP_INTSTAT_INTS_VID3ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID2ORUN +*/ +#define PDP_INTSTAT_INTS_VID2ORUN_MASK (0x00020000) +#define PDP_INTSTAT_INTS_VID2ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID2ORUN_SHIFT (17) +#define PDP_INTSTAT_INTS_VID2ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID1ORUN +*/ +#define PDP_INTSTAT_INTS_VID1ORUN_MASK (0x00010000) +#define PDP_INTSTAT_INTS_VID1ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID1ORUN_SHIFT (16) +#define PDP_INTSTAT_INTS_VID1ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH4ORUN +*/ +#define PDP_INTSTAT_INTS_GRPH4ORUN_MASK (0x00008000) +#define PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT (15) +#define PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH3ORUN +*/ +#define PDP_INTSTAT_INTS_GRPH3ORUN_MASK (0x00004000) +#define PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT (14) +#define PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH2ORUN +*/ +#define PDP_INTSTAT_INTS_GRPH2ORUN_MASK (0x00002000) +#define PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT (13) +#define PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH1ORUN +*/ +#define PDP_INTSTAT_INTS_GRPH1ORUN_MASK (0x00001000) +#define PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT (12) +#define PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID4URUN +*/ +#define PDP_INTSTAT_INTS_VID4URUN_MASK (0x00000800) +#define PDP_INTSTAT_INTS_VID4URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID4URUN_SHIFT (11) +#define PDP_INTSTAT_INTS_VID4URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID3URUN +*/ +#define PDP_INTSTAT_INTS_VID3URUN_MASK (0x00000400) +#define PDP_INTSTAT_INTS_VID3URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID3URUN_SHIFT (10) +#define PDP_INTSTAT_INTS_VID3URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID2URUN +*/ +#define PDP_INTSTAT_INTS_VID2URUN_MASK (0x00000200) +#define PDP_INTSTAT_INTS_VID2URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID2URUN_SHIFT (9) +#define PDP_INTSTAT_INTS_VID2URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VID1URUN +*/ +#define PDP_INTSTAT_INTS_VID1URUN_MASK (0x00000100) +#define PDP_INTSTAT_INTS_VID1URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VID1URUN_SHIFT (8) +#define PDP_INTSTAT_INTS_VID1URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH4URUN +*/ +#define PDP_INTSTAT_INTS_GRPH4URUN_MASK (0x00000080) +#define PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH4URUN_SHIFT (7) +#define PDP_INTSTAT_INTS_GRPH4URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH3URUN +*/ +#define PDP_INTSTAT_INTS_GRPH3URUN_MASK (0x00000040) +#define PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH3URUN_SHIFT (6) +#define PDP_INTSTAT_INTS_GRPH3URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH2URUN +*/ +#define PDP_INTSTAT_INTS_GRPH2URUN_MASK (0x00000020) +#define PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH2URUN_SHIFT (5) +#define PDP_INTSTAT_INTS_GRPH2URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_GRPH1URUN +*/ +#define PDP_INTSTAT_INTS_GRPH1URUN_MASK (0x00000010) +#define PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_GRPH1URUN_SHIFT (4) +#define PDP_INTSTAT_INTS_GRPH1URUN_LENGTH (1) +#define PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VBLNK1 +*/ +#define PDP_INTSTAT_INTS_VBLNK1_MASK (0x00000008) +#define PDP_INTSTAT_INTS_VBLNK1_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VBLNK1_SHIFT (3) +#define PDP_INTSTAT_INTS_VBLNK1_LENGTH (1) +#define PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_VBLNK0 +*/ +#define PDP_INTSTAT_INTS_VBLNK0_MASK (0x00000004) +#define PDP_INTSTAT_INTS_VBLNK0_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_VBLNK0_SHIFT (2) +#define PDP_INTSTAT_INTS_VBLNK0_LENGTH (1) +#define PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_HBLNK1 +*/ +#define PDP_INTSTAT_INTS_HBLNK1_MASK (0x00000002) +#define PDP_INTSTAT_INTS_HBLNK1_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_HBLNK1_SHIFT (1) +#define PDP_INTSTAT_INTS_HBLNK1_LENGTH (1) +#define PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTSTAT, INTS_HBLNK0 +*/ +#define PDP_INTSTAT_INTS_HBLNK0_MASK (0x00000001) +#define PDP_INTSTAT_INTS_HBLNK0_LSBMASK (0x00000001) +#define PDP_INTSTAT_INTS_HBLNK0_SHIFT (0) +#define PDP_INTSTAT_INTS_HBLNK0_LENGTH (1) +#define PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD IMG_FALSE + +#define PDP_INTENAB_OFFSET (0x07A0) + +/* PDP, INTENAB, INTEN_VID4ORUN +*/ +#define PDP_INTENAB_INTEN_VID4ORUN_MASK (0x00080000) +#define PDP_INTENAB_INTEN_VID4ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID4ORUN_SHIFT (19) +#define PDP_INTENAB_INTEN_VID4ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID3ORUN +*/ +#define PDP_INTENAB_INTEN_VID3ORUN_MASK (0x00040000) +#define PDP_INTENAB_INTEN_VID3ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID3ORUN_SHIFT (18) +#define PDP_INTENAB_INTEN_VID3ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID2ORUN +*/ +#define PDP_INTENAB_INTEN_VID2ORUN_MASK (0x00020000) +#define PDP_INTENAB_INTEN_VID2ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID2ORUN_SHIFT (17) +#define PDP_INTENAB_INTEN_VID2ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID1ORUN +*/ +#define PDP_INTENAB_INTEN_VID1ORUN_MASK (0x00010000) +#define PDP_INTENAB_INTEN_VID1ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID1ORUN_SHIFT (16) +#define PDP_INTENAB_INTEN_VID1ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH4ORUN +*/ +#define PDP_INTENAB_INTEN_GRPH4ORUN_MASK (0x00008000) +#define PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT (15) +#define PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH3ORUN +*/ +#define PDP_INTENAB_INTEN_GRPH3ORUN_MASK (0x00004000) +#define PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT (14) +#define PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH2ORUN +*/ +#define PDP_INTENAB_INTEN_GRPH2ORUN_MASK (0x00002000) +#define PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT (13) +#define PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH1ORUN +*/ +#define PDP_INTENAB_INTEN_GRPH1ORUN_MASK (0x00001000) +#define PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT (12) +#define PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID4URUN +*/ +#define PDP_INTENAB_INTEN_VID4URUN_MASK (0x00000800) +#define PDP_INTENAB_INTEN_VID4URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID4URUN_SHIFT (11) +#define PDP_INTENAB_INTEN_VID4URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID3URUN +*/ +#define PDP_INTENAB_INTEN_VID3URUN_MASK (0x00000400) +#define PDP_INTENAB_INTEN_VID3URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID3URUN_SHIFT (10) +#define PDP_INTENAB_INTEN_VID3URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID2URUN +*/ +#define PDP_INTENAB_INTEN_VID2URUN_MASK (0x00000200) +#define PDP_INTENAB_INTEN_VID2URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID2URUN_SHIFT (9) +#define PDP_INTENAB_INTEN_VID2URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VID1URUN +*/ +#define PDP_INTENAB_INTEN_VID1URUN_MASK (0x00000100) +#define PDP_INTENAB_INTEN_VID1URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VID1URUN_SHIFT (8) +#define PDP_INTENAB_INTEN_VID1URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH4URUN +*/ +#define PDP_INTENAB_INTEN_GRPH4URUN_MASK (0x00000080) +#define PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH4URUN_SHIFT (7) +#define PDP_INTENAB_INTEN_GRPH4URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH3URUN +*/ +#define PDP_INTENAB_INTEN_GRPH3URUN_MASK (0x00000040) +#define PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH3URUN_SHIFT (6) +#define PDP_INTENAB_INTEN_GRPH3URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH2URUN +*/ +#define PDP_INTENAB_INTEN_GRPH2URUN_MASK (0x00000020) +#define PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH2URUN_SHIFT (5) +#define PDP_INTENAB_INTEN_GRPH2URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_GRPH1URUN +*/ +#define PDP_INTENAB_INTEN_GRPH1URUN_MASK (0x00000010) +#define PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_GRPH1URUN_SHIFT (4) +#define PDP_INTENAB_INTEN_GRPH1URUN_LENGTH (1) +#define PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VBLNK1 +*/ +#define PDP_INTENAB_INTEN_VBLNK1_MASK (0x00000008) +#define PDP_INTENAB_INTEN_VBLNK1_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VBLNK1_SHIFT (3) +#define PDP_INTENAB_INTEN_VBLNK1_LENGTH (1) +#define PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_VBLNK0 +*/ +#define PDP_INTENAB_INTEN_VBLNK0_MASK (0x00000004) +#define PDP_INTENAB_INTEN_VBLNK0_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_VBLNK0_SHIFT (2) +#define PDP_INTENAB_INTEN_VBLNK0_LENGTH (1) +#define PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_HBLNK1 +*/ +#define PDP_INTENAB_INTEN_HBLNK1_MASK (0x00000002) +#define PDP_INTENAB_INTEN_HBLNK1_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_HBLNK1_SHIFT (1) +#define PDP_INTENAB_INTEN_HBLNK1_LENGTH (1) +#define PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTENAB, INTEN_HBLNK0 +*/ +#define PDP_INTENAB_INTEN_HBLNK0_MASK (0x00000001) +#define PDP_INTENAB_INTEN_HBLNK0_LSBMASK (0x00000001) +#define PDP_INTENAB_INTEN_HBLNK0_SHIFT (0) +#define PDP_INTENAB_INTEN_HBLNK0_LENGTH (1) +#define PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD IMG_FALSE + +#define PDP_INTCLR_OFFSET (0x07A4) + +/* PDP, INTCLR, INTCLR_VID4ORUN +*/ +#define PDP_INTCLR_INTCLR_VID4ORUN_MASK (0x00080000) +#define PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID4ORUN_SHIFT (19) +#define PDP_INTCLR_INTCLR_VID4ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID3ORUN +*/ +#define PDP_INTCLR_INTCLR_VID3ORUN_MASK (0x00040000) +#define PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID3ORUN_SHIFT (18) +#define PDP_INTCLR_INTCLR_VID3ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID2ORUN +*/ +#define PDP_INTCLR_INTCLR_VID2ORUN_MASK (0x00020000) +#define PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID2ORUN_SHIFT (17) +#define PDP_INTCLR_INTCLR_VID2ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID1ORUN +*/ +#define PDP_INTCLR_INTCLR_VID1ORUN_MASK (0x00010000) +#define PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID1ORUN_SHIFT (16) +#define PDP_INTCLR_INTCLR_VID1ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH4ORUN +*/ +#define PDP_INTCLR_INTCLR_GRPH4ORUN_MASK (0x00008000) +#define PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT (15) +#define PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH3ORUN +*/ +#define PDP_INTCLR_INTCLR_GRPH3ORUN_MASK (0x00004000) +#define PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT (14) +#define PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH2ORUN +*/ +#define PDP_INTCLR_INTCLR_GRPH2ORUN_MASK (0x00002000) +#define PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT (13) +#define PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH1ORUN +*/ +#define PDP_INTCLR_INTCLR_GRPH1ORUN_MASK (0x00001000) +#define PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT (12) +#define PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID4URUN +*/ +#define PDP_INTCLR_INTCLR_VID4URUN_MASK (0x00000800) +#define PDP_INTCLR_INTCLR_VID4URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID4URUN_SHIFT (11) +#define PDP_INTCLR_INTCLR_VID4URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID3URUN +*/ +#define PDP_INTCLR_INTCLR_VID3URUN_MASK (0x00000400) +#define PDP_INTCLR_INTCLR_VID3URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID3URUN_SHIFT (10) +#define PDP_INTCLR_INTCLR_VID3URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID2URUN +*/ +#define PDP_INTCLR_INTCLR_VID2URUN_MASK (0x00000200) +#define PDP_INTCLR_INTCLR_VID2URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID2URUN_SHIFT (9) +#define PDP_INTCLR_INTCLR_VID2URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VID1URUN +*/ +#define PDP_INTCLR_INTCLR_VID1URUN_MASK (0x00000100) +#define PDP_INTCLR_INTCLR_VID1URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VID1URUN_SHIFT (8) +#define PDP_INTCLR_INTCLR_VID1URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH4URUN +*/ +#define PDP_INTCLR_INTCLR_GRPH4URUN_MASK (0x00000080) +#define PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT (7) +#define PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH3URUN +*/ +#define PDP_INTCLR_INTCLR_GRPH3URUN_MASK (0x00000040) +#define PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT (6) +#define PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH2URUN +*/ +#define PDP_INTCLR_INTCLR_GRPH2URUN_MASK (0x00000020) +#define PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT (5) +#define PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_GRPH1URUN +*/ +#define PDP_INTCLR_INTCLR_GRPH1URUN_MASK (0x00000010) +#define PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT (4) +#define PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH (1) +#define PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VBLNK1 +*/ +#define PDP_INTCLR_INTCLR_VBLNK1_MASK (0x00000008) +#define PDP_INTCLR_INTCLR_VBLNK1_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VBLNK1_SHIFT (3) +#define PDP_INTCLR_INTCLR_VBLNK1_LENGTH (1) +#define PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_VBLNK0 +*/ +#define PDP_INTCLR_INTCLR_VBLNK0_MASK (0x00000004) +#define PDP_INTCLR_INTCLR_VBLNK0_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_VBLNK0_SHIFT (2) +#define PDP_INTCLR_INTCLR_VBLNK0_LENGTH (1) +#define PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_HBLNK1 +*/ +#define PDP_INTCLR_INTCLR_HBLNK1_MASK (0x00000002) +#define PDP_INTCLR_INTCLR_HBLNK1_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_HBLNK1_SHIFT (1) +#define PDP_INTCLR_INTCLR_HBLNK1_LENGTH (1) +#define PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD IMG_FALSE + +/* PDP, INTCLR, INTCLR_HBLNK0 +*/ +#define PDP_INTCLR_INTCLR_HBLNK0_MASK (0x00000001) +#define PDP_INTCLR_INTCLR_HBLNK0_LSBMASK (0x00000001) +#define PDP_INTCLR_INTCLR_HBLNK0_SHIFT (0) +#define PDP_INTCLR_INTCLR_HBLNK0_LENGTH (1) +#define PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD IMG_FALSE + +#define PDP_MEMCTRL_OFFSET (0x07A8) + +/* PDP, MEMCTRL, MEMREFRESH +*/ +#define PDP_MEMCTRL_MEMREFRESH_MASK (0xC0000000) +#define PDP_MEMCTRL_MEMREFRESH_LSBMASK (0x00000003) +#define PDP_MEMCTRL_MEMREFRESH_SHIFT (30) +#define PDP_MEMCTRL_MEMREFRESH_LENGTH (2) +#define PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD IMG_FALSE + +/* PDP, MEMCTRL, BURSTLEN +*/ +#define PDP_MEMCTRL_BURSTLEN_MASK (0x000000FF) +#define PDP_MEMCTRL_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_MEMCTRL_BURSTLEN_SHIFT (0) +#define PDP_MEMCTRL_BURSTLEN_LENGTH (8) +#define PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_MEM_THRESH_OFFSET (0x07AC) + +/* PDP, MEM_THRESH, UVTHRESHOLD +*/ +#define PDP_MEM_THRESH_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_MEM_THRESH_UVTHRESHOLD_SHIFT (24) +#define PDP_MEM_THRESH_UVTHRESHOLD_LENGTH (8) +#define PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, MEM_THRESH, YTHRESHOLD +*/ +#define PDP_MEM_THRESH_YTHRESHOLD_MASK (0x001FF000) +#define PDP_MEM_THRESH_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_MEM_THRESH_YTHRESHOLD_SHIFT (12) +#define PDP_MEM_THRESH_YTHRESHOLD_LENGTH (9) +#define PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, MEM_THRESH, THRESHOLD +*/ +#define PDP_MEM_THRESH_THRESHOLD_MASK (0x000001FF) +#define PDP_MEM_THRESH_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_MEM_THRESH_THRESHOLD_SHIFT (0) +#define PDP_MEM_THRESH_THRESHOLD_LENGTH (9) +#define PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_ALTERNATE_3D_CTRL_OFFSET (0x07B0) + +/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON +*/ +#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK (0x00000010) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK (0x00000001) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT (4) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH (1) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD IMG_FALSE + +/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL +*/ +#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK (0x00000007) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK (0x00000007) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT (0) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH (3) +#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA0_R_OFFSET (0x07B4) + +/* PDP, GAMMA0_R, GAMMA0_R +*/ +#define PDP_GAMMA0_R_GAMMA0_R_MASK (0x000003FF) +#define PDP_GAMMA0_R_GAMMA0_R_LSBMASK (0x000003FF) +#define PDP_GAMMA0_R_GAMMA0_R_SHIFT (0) +#define PDP_GAMMA0_R_GAMMA0_R_LENGTH (10) +#define PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA0_GB_OFFSET (0x07B8) + +/* PDP, GAMMA0_GB, GAMMA0_G +*/ +#define PDP_GAMMA0_GB_GAMMA0_G_MASK (0x03FF0000) +#define PDP_GAMMA0_GB_GAMMA0_G_LSBMASK (0x000003FF) +#define PDP_GAMMA0_GB_GAMMA0_G_SHIFT (16) +#define PDP_GAMMA0_GB_GAMMA0_G_LENGTH (10) +#define PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA0_GB, GAMMA0_B +*/ +#define PDP_GAMMA0_GB_GAMMA0_B_MASK (0x000003FF) +#define PDP_GAMMA0_GB_GAMMA0_B_LSBMASK (0x000003FF) +#define PDP_GAMMA0_GB_GAMMA0_B_SHIFT (0) +#define PDP_GAMMA0_GB_GAMMA0_B_LENGTH (10) +#define PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA1_R_OFFSET (0x07BC) + +/* PDP, GAMMA1_R, GAMMA1_R +*/ +#define PDP_GAMMA1_R_GAMMA1_R_MASK (0x000003FF) +#define PDP_GAMMA1_R_GAMMA1_R_LSBMASK (0x000003FF) +#define PDP_GAMMA1_R_GAMMA1_R_SHIFT (0) +#define PDP_GAMMA1_R_GAMMA1_R_LENGTH (10) +#define PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA1_GB_OFFSET (0x07C0) + +/* PDP, GAMMA1_GB, GAMMA1_G +*/ +#define PDP_GAMMA1_GB_GAMMA1_G_MASK (0x03FF0000) +#define PDP_GAMMA1_GB_GAMMA1_G_LSBMASK (0x000003FF) +#define PDP_GAMMA1_GB_GAMMA1_G_SHIFT (16) +#define PDP_GAMMA1_GB_GAMMA1_G_LENGTH (10) +#define PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA1_GB, GAMMA1_B +*/ +#define PDP_GAMMA1_GB_GAMMA1_B_MASK (0x000003FF) +#define PDP_GAMMA1_GB_GAMMA1_B_LSBMASK (0x000003FF) +#define PDP_GAMMA1_GB_GAMMA1_B_SHIFT (0) +#define PDP_GAMMA1_GB_GAMMA1_B_LENGTH (10) +#define PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA2_R_OFFSET (0x07C4) + +/* PDP, GAMMA2_R, GAMMA2_R +*/ +#define PDP_GAMMA2_R_GAMMA2_R_MASK (0x000003FF) +#define PDP_GAMMA2_R_GAMMA2_R_LSBMASK (0x000003FF) +#define PDP_GAMMA2_R_GAMMA2_R_SHIFT (0) +#define PDP_GAMMA2_R_GAMMA2_R_LENGTH (10) +#define PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA2_GB_OFFSET (0x07C8) + +/* PDP, GAMMA2_GB, GAMMA2_G +*/ +#define PDP_GAMMA2_GB_GAMMA2_G_MASK (0x03FF0000) +#define PDP_GAMMA2_GB_GAMMA2_G_LSBMASK (0x000003FF) +#define PDP_GAMMA2_GB_GAMMA2_G_SHIFT (16) +#define PDP_GAMMA2_GB_GAMMA2_G_LENGTH (10) +#define PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA2_GB, GAMMA2_B +*/ +#define PDP_GAMMA2_GB_GAMMA2_B_MASK (0x000003FF) +#define PDP_GAMMA2_GB_GAMMA2_B_LSBMASK (0x000003FF) +#define PDP_GAMMA2_GB_GAMMA2_B_SHIFT (0) +#define PDP_GAMMA2_GB_GAMMA2_B_LENGTH (10) +#define PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA3_R_OFFSET (0x07CC) + +/* PDP, GAMMA3_R, GAMMA3_R +*/ +#define PDP_GAMMA3_R_GAMMA3_R_MASK (0x000003FF) +#define PDP_GAMMA3_R_GAMMA3_R_LSBMASK (0x000003FF) +#define PDP_GAMMA3_R_GAMMA3_R_SHIFT (0) +#define PDP_GAMMA3_R_GAMMA3_R_LENGTH (10) +#define PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA3_GB_OFFSET (0x07D0) + +/* PDP, GAMMA3_GB, GAMMA3_G +*/ +#define PDP_GAMMA3_GB_GAMMA3_G_MASK (0x03FF0000) +#define PDP_GAMMA3_GB_GAMMA3_G_LSBMASK (0x000003FF) +#define PDP_GAMMA3_GB_GAMMA3_G_SHIFT (16) +#define PDP_GAMMA3_GB_GAMMA3_G_LENGTH (10) +#define PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA3_GB, GAMMA3_B +*/ +#define PDP_GAMMA3_GB_GAMMA3_B_MASK (0x000003FF) +#define PDP_GAMMA3_GB_GAMMA3_B_LSBMASK (0x000003FF) +#define PDP_GAMMA3_GB_GAMMA3_B_SHIFT (0) +#define PDP_GAMMA3_GB_GAMMA3_B_LENGTH (10) +#define PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA4_R_OFFSET (0x07D4) + +/* PDP, GAMMA4_R, GAMMA4_R +*/ +#define PDP_GAMMA4_R_GAMMA4_R_MASK (0x000003FF) +#define PDP_GAMMA4_R_GAMMA4_R_LSBMASK (0x000003FF) +#define PDP_GAMMA4_R_GAMMA4_R_SHIFT (0) +#define PDP_GAMMA4_R_GAMMA4_R_LENGTH (10) +#define PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA4_GB_OFFSET (0x07D8) + +/* PDP, GAMMA4_GB, GAMMA4_G +*/ +#define PDP_GAMMA4_GB_GAMMA4_G_MASK (0x03FF0000) +#define PDP_GAMMA4_GB_GAMMA4_G_LSBMASK (0x000003FF) +#define PDP_GAMMA4_GB_GAMMA4_G_SHIFT (16) +#define PDP_GAMMA4_GB_GAMMA4_G_LENGTH (10) +#define PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA4_GB, GAMMA4_B +*/ +#define PDP_GAMMA4_GB_GAMMA4_B_MASK (0x000003FF) +#define PDP_GAMMA4_GB_GAMMA4_B_LSBMASK (0x000003FF) +#define PDP_GAMMA4_GB_GAMMA4_B_SHIFT (0) +#define PDP_GAMMA4_GB_GAMMA4_B_LENGTH (10) +#define PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA5_R_OFFSET (0x07DC) + +/* PDP, GAMMA5_R, GAMMA5_R +*/ +#define PDP_GAMMA5_R_GAMMA5_R_MASK (0x000003FF) +#define PDP_GAMMA5_R_GAMMA5_R_LSBMASK (0x000003FF) +#define PDP_GAMMA5_R_GAMMA5_R_SHIFT (0) +#define PDP_GAMMA5_R_GAMMA5_R_LENGTH (10) +#define PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA5_GB_OFFSET (0x07E0) + +/* PDP, GAMMA5_GB, GAMMA5_G +*/ +#define PDP_GAMMA5_GB_GAMMA5_G_MASK (0x03FF0000) +#define PDP_GAMMA5_GB_GAMMA5_G_LSBMASK (0x000003FF) +#define PDP_GAMMA5_GB_GAMMA5_G_SHIFT (16) +#define PDP_GAMMA5_GB_GAMMA5_G_LENGTH (10) +#define PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA5_GB, GAMMA5_B +*/ +#define PDP_GAMMA5_GB_GAMMA5_B_MASK (0x000003FF) +#define PDP_GAMMA5_GB_GAMMA5_B_LSBMASK (0x000003FF) +#define PDP_GAMMA5_GB_GAMMA5_B_SHIFT (0) +#define PDP_GAMMA5_GB_GAMMA5_B_LENGTH (10) +#define PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA6_R_OFFSET (0x07E4) + +/* PDP, GAMMA6_R, GAMMA6_R +*/ +#define PDP_GAMMA6_R_GAMMA6_R_MASK (0x000003FF) +#define PDP_GAMMA6_R_GAMMA6_R_LSBMASK (0x000003FF) +#define PDP_GAMMA6_R_GAMMA6_R_SHIFT (0) +#define PDP_GAMMA6_R_GAMMA6_R_LENGTH (10) +#define PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA6_GB_OFFSET (0x07E8) + +/* PDP, GAMMA6_GB, GAMMA6_G +*/ +#define PDP_GAMMA6_GB_GAMMA6_G_MASK (0x03FF0000) +#define PDP_GAMMA6_GB_GAMMA6_G_LSBMASK (0x000003FF) +#define PDP_GAMMA6_GB_GAMMA6_G_SHIFT (16) +#define PDP_GAMMA6_GB_GAMMA6_G_LENGTH (10) +#define PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA6_GB, GAMMA6_B +*/ +#define PDP_GAMMA6_GB_GAMMA6_B_MASK (0x000003FF) +#define PDP_GAMMA6_GB_GAMMA6_B_LSBMASK (0x000003FF) +#define PDP_GAMMA6_GB_GAMMA6_B_SHIFT (0) +#define PDP_GAMMA6_GB_GAMMA6_B_LENGTH (10) +#define PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA7_R_OFFSET (0x07EC) + +/* PDP, GAMMA7_R, GAMMA7_R +*/ +#define PDP_GAMMA7_R_GAMMA7_R_MASK (0x000003FF) +#define PDP_GAMMA7_R_GAMMA7_R_LSBMASK (0x000003FF) +#define PDP_GAMMA7_R_GAMMA7_R_SHIFT (0) +#define PDP_GAMMA7_R_GAMMA7_R_LENGTH (10) +#define PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA7_GB_OFFSET (0x07F0) + +/* PDP, GAMMA7_GB, GAMMA7_G +*/ +#define PDP_GAMMA7_GB_GAMMA7_G_MASK (0x03FF0000) +#define PDP_GAMMA7_GB_GAMMA7_G_LSBMASK (0x000003FF) +#define PDP_GAMMA7_GB_GAMMA7_G_SHIFT (16) +#define PDP_GAMMA7_GB_GAMMA7_G_LENGTH (10) +#define PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA7_GB, GAMMA7_B +*/ +#define PDP_GAMMA7_GB_GAMMA7_B_MASK (0x000003FF) +#define PDP_GAMMA7_GB_GAMMA7_B_LSBMASK (0x000003FF) +#define PDP_GAMMA7_GB_GAMMA7_B_SHIFT (0) +#define PDP_GAMMA7_GB_GAMMA7_B_LENGTH (10) +#define PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA8_R_OFFSET (0x07F4) + +/* PDP, GAMMA8_R, GAMMA8_R +*/ +#define PDP_GAMMA8_R_GAMMA8_R_MASK (0x000003FF) +#define PDP_GAMMA8_R_GAMMA8_R_LSBMASK (0x000003FF) +#define PDP_GAMMA8_R_GAMMA8_R_SHIFT (0) +#define PDP_GAMMA8_R_GAMMA8_R_LENGTH (10) +#define PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA8_GB_OFFSET (0x07F8) + +/* PDP, GAMMA8_GB, GAMMA8_G +*/ +#define PDP_GAMMA8_GB_GAMMA8_G_MASK (0x03FF0000) +#define PDP_GAMMA8_GB_GAMMA8_G_LSBMASK (0x000003FF) +#define PDP_GAMMA8_GB_GAMMA8_G_SHIFT (16) +#define PDP_GAMMA8_GB_GAMMA8_G_LENGTH (10) +#define PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA8_GB, GAMMA8_B +*/ +#define PDP_GAMMA8_GB_GAMMA8_B_MASK (0x000003FF) +#define PDP_GAMMA8_GB_GAMMA8_B_LSBMASK (0x000003FF) +#define PDP_GAMMA8_GB_GAMMA8_B_SHIFT (0) +#define PDP_GAMMA8_GB_GAMMA8_B_LENGTH (10) +#define PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA9_R_OFFSET (0x07FC) + +/* PDP, GAMMA9_R, GAMMA9_R +*/ +#define PDP_GAMMA9_R_GAMMA9_R_MASK (0x000003FF) +#define PDP_GAMMA9_R_GAMMA9_R_LSBMASK (0x000003FF) +#define PDP_GAMMA9_R_GAMMA9_R_SHIFT (0) +#define PDP_GAMMA9_R_GAMMA9_R_LENGTH (10) +#define PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA9_GB_OFFSET (0x0800) + +/* PDP, GAMMA9_GB, GAMMA9_G +*/ +#define PDP_GAMMA9_GB_GAMMA9_G_MASK (0x03FF0000) +#define PDP_GAMMA9_GB_GAMMA9_G_LSBMASK (0x000003FF) +#define PDP_GAMMA9_GB_GAMMA9_G_SHIFT (16) +#define PDP_GAMMA9_GB_GAMMA9_G_LENGTH (10) +#define PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA9_GB, GAMMA9_B +*/ +#define PDP_GAMMA9_GB_GAMMA9_B_MASK (0x000003FF) +#define PDP_GAMMA9_GB_GAMMA9_B_LSBMASK (0x000003FF) +#define PDP_GAMMA9_GB_GAMMA9_B_SHIFT (0) +#define PDP_GAMMA9_GB_GAMMA9_B_LENGTH (10) +#define PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA10_R_OFFSET (0x0804) + +/* PDP, GAMMA10_R, GAMMA10_R +*/ +#define PDP_GAMMA10_R_GAMMA10_R_MASK (0x000003FF) +#define PDP_GAMMA10_R_GAMMA10_R_LSBMASK (0x000003FF) +#define PDP_GAMMA10_R_GAMMA10_R_SHIFT (0) +#define PDP_GAMMA10_R_GAMMA10_R_LENGTH (10) +#define PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA10_GB_OFFSET (0x0808) + +/* PDP, GAMMA10_GB, GAMMA10_G +*/ +#define PDP_GAMMA10_GB_GAMMA10_G_MASK (0x03FF0000) +#define PDP_GAMMA10_GB_GAMMA10_G_LSBMASK (0x000003FF) +#define PDP_GAMMA10_GB_GAMMA10_G_SHIFT (16) +#define PDP_GAMMA10_GB_GAMMA10_G_LENGTH (10) +#define PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA10_GB, GAMMA10_B +*/ +#define PDP_GAMMA10_GB_GAMMA10_B_MASK (0x000003FF) +#define PDP_GAMMA10_GB_GAMMA10_B_LSBMASK (0x000003FF) +#define PDP_GAMMA10_GB_GAMMA10_B_SHIFT (0) +#define PDP_GAMMA10_GB_GAMMA10_B_LENGTH (10) +#define PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA11_R_OFFSET (0x080C) + +/* PDP, GAMMA11_R, GAMMA11_R +*/ +#define PDP_GAMMA11_R_GAMMA11_R_MASK (0x000003FF) +#define PDP_GAMMA11_R_GAMMA11_R_LSBMASK (0x000003FF) +#define PDP_GAMMA11_R_GAMMA11_R_SHIFT (0) +#define PDP_GAMMA11_R_GAMMA11_R_LENGTH (10) +#define PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA11_GB_OFFSET (0x0810) + +/* PDP, GAMMA11_GB, GAMMA11_G +*/ +#define PDP_GAMMA11_GB_GAMMA11_G_MASK (0x03FF0000) +#define PDP_GAMMA11_GB_GAMMA11_G_LSBMASK (0x000003FF) +#define PDP_GAMMA11_GB_GAMMA11_G_SHIFT (16) +#define PDP_GAMMA11_GB_GAMMA11_G_LENGTH (10) +#define PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA11_GB, GAMMA11_B +*/ +#define PDP_GAMMA11_GB_GAMMA11_B_MASK (0x000003FF) +#define PDP_GAMMA11_GB_GAMMA11_B_LSBMASK (0x000003FF) +#define PDP_GAMMA11_GB_GAMMA11_B_SHIFT (0) +#define PDP_GAMMA11_GB_GAMMA11_B_LENGTH (10) +#define PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA12_R_OFFSET (0x0814) + +/* PDP, GAMMA12_R, GAMMA12_R +*/ +#define PDP_GAMMA12_R_GAMMA12_R_MASK (0x000003FF) +#define PDP_GAMMA12_R_GAMMA12_R_LSBMASK (0x000003FF) +#define PDP_GAMMA12_R_GAMMA12_R_SHIFT (0) +#define PDP_GAMMA12_R_GAMMA12_R_LENGTH (10) +#define PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA12_GB_OFFSET (0x0818) + +/* PDP, GAMMA12_GB, GAMMA12_G +*/ +#define PDP_GAMMA12_GB_GAMMA12_G_MASK (0x03FF0000) +#define PDP_GAMMA12_GB_GAMMA12_G_LSBMASK (0x000003FF) +#define PDP_GAMMA12_GB_GAMMA12_G_SHIFT (16) +#define PDP_GAMMA12_GB_GAMMA12_G_LENGTH (10) +#define PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA12_GB, GAMMA12_B +*/ +#define PDP_GAMMA12_GB_GAMMA12_B_MASK (0x000003FF) +#define PDP_GAMMA12_GB_GAMMA12_B_LSBMASK (0x000003FF) +#define PDP_GAMMA12_GB_GAMMA12_B_SHIFT (0) +#define PDP_GAMMA12_GB_GAMMA12_B_LENGTH (10) +#define PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA13_R_OFFSET (0x081C) + +/* PDP, GAMMA13_R, GAMMA13_R +*/ +#define PDP_GAMMA13_R_GAMMA13_R_MASK (0x000003FF) +#define PDP_GAMMA13_R_GAMMA13_R_LSBMASK (0x000003FF) +#define PDP_GAMMA13_R_GAMMA13_R_SHIFT (0) +#define PDP_GAMMA13_R_GAMMA13_R_LENGTH (10) +#define PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA13_GB_OFFSET (0x0820) + +/* PDP, GAMMA13_GB, GAMMA13_G +*/ +#define PDP_GAMMA13_GB_GAMMA13_G_MASK (0x03FF0000) +#define PDP_GAMMA13_GB_GAMMA13_G_LSBMASK (0x000003FF) +#define PDP_GAMMA13_GB_GAMMA13_G_SHIFT (16) +#define PDP_GAMMA13_GB_GAMMA13_G_LENGTH (10) +#define PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA13_GB, GAMMA13_B +*/ +#define PDP_GAMMA13_GB_GAMMA13_B_MASK (0x000003FF) +#define PDP_GAMMA13_GB_GAMMA13_B_LSBMASK (0x000003FF) +#define PDP_GAMMA13_GB_GAMMA13_B_SHIFT (0) +#define PDP_GAMMA13_GB_GAMMA13_B_LENGTH (10) +#define PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA14_R_OFFSET (0x0824) + +/* PDP, GAMMA14_R, GAMMA14_R +*/ +#define PDP_GAMMA14_R_GAMMA14_R_MASK (0x000003FF) +#define PDP_GAMMA14_R_GAMMA14_R_LSBMASK (0x000003FF) +#define PDP_GAMMA14_R_GAMMA14_R_SHIFT (0) +#define PDP_GAMMA14_R_GAMMA14_R_LENGTH (10) +#define PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA14_GB_OFFSET (0x0828) + +/* PDP, GAMMA14_GB, GAMMA14_G +*/ +#define PDP_GAMMA14_GB_GAMMA14_G_MASK (0x03FF0000) +#define PDP_GAMMA14_GB_GAMMA14_G_LSBMASK (0x000003FF) +#define PDP_GAMMA14_GB_GAMMA14_G_SHIFT (16) +#define PDP_GAMMA14_GB_GAMMA14_G_LENGTH (10) +#define PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA14_GB, GAMMA14_B +*/ +#define PDP_GAMMA14_GB_GAMMA14_B_MASK (0x000003FF) +#define PDP_GAMMA14_GB_GAMMA14_B_LSBMASK (0x000003FF) +#define PDP_GAMMA14_GB_GAMMA14_B_SHIFT (0) +#define PDP_GAMMA14_GB_GAMMA14_B_LENGTH (10) +#define PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA15_R_OFFSET (0x082C) + +/* PDP, GAMMA15_R, GAMMA15_R +*/ +#define PDP_GAMMA15_R_GAMMA15_R_MASK (0x000003FF) +#define PDP_GAMMA15_R_GAMMA15_R_LSBMASK (0x000003FF) +#define PDP_GAMMA15_R_GAMMA15_R_SHIFT (0) +#define PDP_GAMMA15_R_GAMMA15_R_LENGTH (10) +#define PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA15_GB_OFFSET (0x0830) + +/* PDP, GAMMA15_GB, GAMMA15_G +*/ +#define PDP_GAMMA15_GB_GAMMA15_G_MASK (0x03FF0000) +#define PDP_GAMMA15_GB_GAMMA15_G_LSBMASK (0x000003FF) +#define PDP_GAMMA15_GB_GAMMA15_G_SHIFT (16) +#define PDP_GAMMA15_GB_GAMMA15_G_LENGTH (10) +#define PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA15_GB, GAMMA15_B +*/ +#define PDP_GAMMA15_GB_GAMMA15_B_MASK (0x000003FF) +#define PDP_GAMMA15_GB_GAMMA15_B_LSBMASK (0x000003FF) +#define PDP_GAMMA15_GB_GAMMA15_B_SHIFT (0) +#define PDP_GAMMA15_GB_GAMMA15_B_LENGTH (10) +#define PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA16_R_OFFSET (0x0834) + +/* PDP, GAMMA16_R, GAMMA16_R +*/ +#define PDP_GAMMA16_R_GAMMA16_R_MASK (0x000003FF) +#define PDP_GAMMA16_R_GAMMA16_R_LSBMASK (0x000003FF) +#define PDP_GAMMA16_R_GAMMA16_R_SHIFT (0) +#define PDP_GAMMA16_R_GAMMA16_R_LENGTH (10) +#define PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA16_GB_OFFSET (0x0838) + +/* PDP, GAMMA16_GB, GAMMA16_G +*/ +#define PDP_GAMMA16_GB_GAMMA16_G_MASK (0x03FF0000) +#define PDP_GAMMA16_GB_GAMMA16_G_LSBMASK (0x000003FF) +#define PDP_GAMMA16_GB_GAMMA16_G_SHIFT (16) +#define PDP_GAMMA16_GB_GAMMA16_G_LENGTH (10) +#define PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA16_GB, GAMMA16_B +*/ +#define PDP_GAMMA16_GB_GAMMA16_B_MASK (0x000003FF) +#define PDP_GAMMA16_GB_GAMMA16_B_LSBMASK (0x000003FF) +#define PDP_GAMMA16_GB_GAMMA16_B_SHIFT (0) +#define PDP_GAMMA16_GB_GAMMA16_B_LENGTH (10) +#define PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA17_R_OFFSET (0x083C) + +/* PDP, GAMMA17_R, GAMMA17_R +*/ +#define PDP_GAMMA17_R_GAMMA17_R_MASK (0x000003FF) +#define PDP_GAMMA17_R_GAMMA17_R_LSBMASK (0x000003FF) +#define PDP_GAMMA17_R_GAMMA17_R_SHIFT (0) +#define PDP_GAMMA17_R_GAMMA17_R_LENGTH (10) +#define PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA17_GB_OFFSET (0x0840) + +/* PDP, GAMMA17_GB, GAMMA17_G +*/ +#define PDP_GAMMA17_GB_GAMMA17_G_MASK (0x03FF0000) +#define PDP_GAMMA17_GB_GAMMA17_G_LSBMASK (0x000003FF) +#define PDP_GAMMA17_GB_GAMMA17_G_SHIFT (16) +#define PDP_GAMMA17_GB_GAMMA17_G_LENGTH (10) +#define PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA17_GB, GAMMA17_B +*/ +#define PDP_GAMMA17_GB_GAMMA17_B_MASK (0x000003FF) +#define PDP_GAMMA17_GB_GAMMA17_B_LSBMASK (0x000003FF) +#define PDP_GAMMA17_GB_GAMMA17_B_SHIFT (0) +#define PDP_GAMMA17_GB_GAMMA17_B_LENGTH (10) +#define PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA18_R_OFFSET (0x0844) + +/* PDP, GAMMA18_R, GAMMA18_R +*/ +#define PDP_GAMMA18_R_GAMMA18_R_MASK (0x000003FF) +#define PDP_GAMMA18_R_GAMMA18_R_LSBMASK (0x000003FF) +#define PDP_GAMMA18_R_GAMMA18_R_SHIFT (0) +#define PDP_GAMMA18_R_GAMMA18_R_LENGTH (10) +#define PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA18_GB_OFFSET (0x0848) + +/* PDP, GAMMA18_GB, GAMMA18_G +*/ +#define PDP_GAMMA18_GB_GAMMA18_G_MASK (0x03FF0000) +#define PDP_GAMMA18_GB_GAMMA18_G_LSBMASK (0x000003FF) +#define PDP_GAMMA18_GB_GAMMA18_G_SHIFT (16) +#define PDP_GAMMA18_GB_GAMMA18_G_LENGTH (10) +#define PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA18_GB, GAMMA18_B +*/ +#define PDP_GAMMA18_GB_GAMMA18_B_MASK (0x000003FF) +#define PDP_GAMMA18_GB_GAMMA18_B_LSBMASK (0x000003FF) +#define PDP_GAMMA18_GB_GAMMA18_B_SHIFT (0) +#define PDP_GAMMA18_GB_GAMMA18_B_LENGTH (10) +#define PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA19_R_OFFSET (0x084C) + +/* PDP, GAMMA19_R, GAMMA19_R +*/ +#define PDP_GAMMA19_R_GAMMA19_R_MASK (0x000003FF) +#define PDP_GAMMA19_R_GAMMA19_R_LSBMASK (0x000003FF) +#define PDP_GAMMA19_R_GAMMA19_R_SHIFT (0) +#define PDP_GAMMA19_R_GAMMA19_R_LENGTH (10) +#define PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA19_GB_OFFSET (0x0850) + +/* PDP, GAMMA19_GB, GAMMA19_G +*/ +#define PDP_GAMMA19_GB_GAMMA19_G_MASK (0x03FF0000) +#define PDP_GAMMA19_GB_GAMMA19_G_LSBMASK (0x000003FF) +#define PDP_GAMMA19_GB_GAMMA19_G_SHIFT (16) +#define PDP_GAMMA19_GB_GAMMA19_G_LENGTH (10) +#define PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA19_GB, GAMMA19_B +*/ +#define PDP_GAMMA19_GB_GAMMA19_B_MASK (0x000003FF) +#define PDP_GAMMA19_GB_GAMMA19_B_LSBMASK (0x000003FF) +#define PDP_GAMMA19_GB_GAMMA19_B_SHIFT (0) +#define PDP_GAMMA19_GB_GAMMA19_B_LENGTH (10) +#define PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA20_R_OFFSET (0x0854) + +/* PDP, GAMMA20_R, GAMMA20_R +*/ +#define PDP_GAMMA20_R_GAMMA20_R_MASK (0x000003FF) +#define PDP_GAMMA20_R_GAMMA20_R_LSBMASK (0x000003FF) +#define PDP_GAMMA20_R_GAMMA20_R_SHIFT (0) +#define PDP_GAMMA20_R_GAMMA20_R_LENGTH (10) +#define PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA20_GB_OFFSET (0x0858) + +/* PDP, GAMMA20_GB, GAMMA20_G +*/ +#define PDP_GAMMA20_GB_GAMMA20_G_MASK (0x03FF0000) +#define PDP_GAMMA20_GB_GAMMA20_G_LSBMASK (0x000003FF) +#define PDP_GAMMA20_GB_GAMMA20_G_SHIFT (16) +#define PDP_GAMMA20_GB_GAMMA20_G_LENGTH (10) +#define PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA20_GB, GAMMA20_B +*/ +#define PDP_GAMMA20_GB_GAMMA20_B_MASK (0x000003FF) +#define PDP_GAMMA20_GB_GAMMA20_B_LSBMASK (0x000003FF) +#define PDP_GAMMA20_GB_GAMMA20_B_SHIFT (0) +#define PDP_GAMMA20_GB_GAMMA20_B_LENGTH (10) +#define PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA21_R_OFFSET (0x085C) + +/* PDP, GAMMA21_R, GAMMA21_R +*/ +#define PDP_GAMMA21_R_GAMMA21_R_MASK (0x000003FF) +#define PDP_GAMMA21_R_GAMMA21_R_LSBMASK (0x000003FF) +#define PDP_GAMMA21_R_GAMMA21_R_SHIFT (0) +#define PDP_GAMMA21_R_GAMMA21_R_LENGTH (10) +#define PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA21_GB_OFFSET (0x0860) + +/* PDP, GAMMA21_GB, GAMMA21_G +*/ +#define PDP_GAMMA21_GB_GAMMA21_G_MASK (0x03FF0000) +#define PDP_GAMMA21_GB_GAMMA21_G_LSBMASK (0x000003FF) +#define PDP_GAMMA21_GB_GAMMA21_G_SHIFT (16) +#define PDP_GAMMA21_GB_GAMMA21_G_LENGTH (10) +#define PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA21_GB, GAMMA21_B +*/ +#define PDP_GAMMA21_GB_GAMMA21_B_MASK (0x000003FF) +#define PDP_GAMMA21_GB_GAMMA21_B_LSBMASK (0x000003FF) +#define PDP_GAMMA21_GB_GAMMA21_B_SHIFT (0) +#define PDP_GAMMA21_GB_GAMMA21_B_LENGTH (10) +#define PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA22_R_OFFSET (0x0864) + +/* PDP, GAMMA22_R, GAMMA22_R +*/ +#define PDP_GAMMA22_R_GAMMA22_R_MASK (0x000003FF) +#define PDP_GAMMA22_R_GAMMA22_R_LSBMASK (0x000003FF) +#define PDP_GAMMA22_R_GAMMA22_R_SHIFT (0) +#define PDP_GAMMA22_R_GAMMA22_R_LENGTH (10) +#define PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA22_GB_OFFSET (0x0868) + +/* PDP, GAMMA22_GB, GAMMA22_G +*/ +#define PDP_GAMMA22_GB_GAMMA22_G_MASK (0x03FF0000) +#define PDP_GAMMA22_GB_GAMMA22_G_LSBMASK (0x000003FF) +#define PDP_GAMMA22_GB_GAMMA22_G_SHIFT (16) +#define PDP_GAMMA22_GB_GAMMA22_G_LENGTH (10) +#define PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA22_GB, GAMMA22_B +*/ +#define PDP_GAMMA22_GB_GAMMA22_B_MASK (0x000003FF) +#define PDP_GAMMA22_GB_GAMMA22_B_LSBMASK (0x000003FF) +#define PDP_GAMMA22_GB_GAMMA22_B_SHIFT (0) +#define PDP_GAMMA22_GB_GAMMA22_B_LENGTH (10) +#define PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA23_R_OFFSET (0x086C) + +/* PDP, GAMMA23_R, GAMMA23_R +*/ +#define PDP_GAMMA23_R_GAMMA23_R_MASK (0x000003FF) +#define PDP_GAMMA23_R_GAMMA23_R_LSBMASK (0x000003FF) +#define PDP_GAMMA23_R_GAMMA23_R_SHIFT (0) +#define PDP_GAMMA23_R_GAMMA23_R_LENGTH (10) +#define PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA23_GB_OFFSET (0x0870) + +/* PDP, GAMMA23_GB, GAMMA23_G +*/ +#define PDP_GAMMA23_GB_GAMMA23_G_MASK (0x03FF0000) +#define PDP_GAMMA23_GB_GAMMA23_G_LSBMASK (0x000003FF) +#define PDP_GAMMA23_GB_GAMMA23_G_SHIFT (16) +#define PDP_GAMMA23_GB_GAMMA23_G_LENGTH (10) +#define PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA23_GB, GAMMA23_B +*/ +#define PDP_GAMMA23_GB_GAMMA23_B_MASK (0x000003FF) +#define PDP_GAMMA23_GB_GAMMA23_B_LSBMASK (0x000003FF) +#define PDP_GAMMA23_GB_GAMMA23_B_SHIFT (0) +#define PDP_GAMMA23_GB_GAMMA23_B_LENGTH (10) +#define PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA24_R_OFFSET (0x0874) + +/* PDP, GAMMA24_R, GAMMA24_R +*/ +#define PDP_GAMMA24_R_GAMMA24_R_MASK (0x000003FF) +#define PDP_GAMMA24_R_GAMMA24_R_LSBMASK (0x000003FF) +#define PDP_GAMMA24_R_GAMMA24_R_SHIFT (0) +#define PDP_GAMMA24_R_GAMMA24_R_LENGTH (10) +#define PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA24_GB_OFFSET (0x0878) + +/* PDP, GAMMA24_GB, GAMMA24_G +*/ +#define PDP_GAMMA24_GB_GAMMA24_G_MASK (0x03FF0000) +#define PDP_GAMMA24_GB_GAMMA24_G_LSBMASK (0x000003FF) +#define PDP_GAMMA24_GB_GAMMA24_G_SHIFT (16) +#define PDP_GAMMA24_GB_GAMMA24_G_LENGTH (10) +#define PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA24_GB, GAMMA24_B +*/ +#define PDP_GAMMA24_GB_GAMMA24_B_MASK (0x000003FF) +#define PDP_GAMMA24_GB_GAMMA24_B_LSBMASK (0x000003FF) +#define PDP_GAMMA24_GB_GAMMA24_B_SHIFT (0) +#define PDP_GAMMA24_GB_GAMMA24_B_LENGTH (10) +#define PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA25_R_OFFSET (0x087C) + +/* PDP, GAMMA25_R, GAMMA25_R +*/ +#define PDP_GAMMA25_R_GAMMA25_R_MASK (0x000003FF) +#define PDP_GAMMA25_R_GAMMA25_R_LSBMASK (0x000003FF) +#define PDP_GAMMA25_R_GAMMA25_R_SHIFT (0) +#define PDP_GAMMA25_R_GAMMA25_R_LENGTH (10) +#define PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA25_GB_OFFSET (0x0880) + +/* PDP, GAMMA25_GB, GAMMA25_G +*/ +#define PDP_GAMMA25_GB_GAMMA25_G_MASK (0x03FF0000) +#define PDP_GAMMA25_GB_GAMMA25_G_LSBMASK (0x000003FF) +#define PDP_GAMMA25_GB_GAMMA25_G_SHIFT (16) +#define PDP_GAMMA25_GB_GAMMA25_G_LENGTH (10) +#define PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA25_GB, GAMMA25_B +*/ +#define PDP_GAMMA25_GB_GAMMA25_B_MASK (0x000003FF) +#define PDP_GAMMA25_GB_GAMMA25_B_LSBMASK (0x000003FF) +#define PDP_GAMMA25_GB_GAMMA25_B_SHIFT (0) +#define PDP_GAMMA25_GB_GAMMA25_B_LENGTH (10) +#define PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA26_R_OFFSET (0x0884) + +/* PDP, GAMMA26_R, GAMMA26_R +*/ +#define PDP_GAMMA26_R_GAMMA26_R_MASK (0x000003FF) +#define PDP_GAMMA26_R_GAMMA26_R_LSBMASK (0x000003FF) +#define PDP_GAMMA26_R_GAMMA26_R_SHIFT (0) +#define PDP_GAMMA26_R_GAMMA26_R_LENGTH (10) +#define PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA26_GB_OFFSET (0x0888) + +/* PDP, GAMMA26_GB, GAMMA26_G +*/ +#define PDP_GAMMA26_GB_GAMMA26_G_MASK (0x03FF0000) +#define PDP_GAMMA26_GB_GAMMA26_G_LSBMASK (0x000003FF) +#define PDP_GAMMA26_GB_GAMMA26_G_SHIFT (16) +#define PDP_GAMMA26_GB_GAMMA26_G_LENGTH (10) +#define PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA26_GB, GAMMA26_B +*/ +#define PDP_GAMMA26_GB_GAMMA26_B_MASK (0x000003FF) +#define PDP_GAMMA26_GB_GAMMA26_B_LSBMASK (0x000003FF) +#define PDP_GAMMA26_GB_GAMMA26_B_SHIFT (0) +#define PDP_GAMMA26_GB_GAMMA26_B_LENGTH (10) +#define PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA27_R_OFFSET (0x088C) + +/* PDP, GAMMA27_R, GAMMA27_R +*/ +#define PDP_GAMMA27_R_GAMMA27_R_MASK (0x000003FF) +#define PDP_GAMMA27_R_GAMMA27_R_LSBMASK (0x000003FF) +#define PDP_GAMMA27_R_GAMMA27_R_SHIFT (0) +#define PDP_GAMMA27_R_GAMMA27_R_LENGTH (10) +#define PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA27_GB_OFFSET (0x0890) + +/* PDP, GAMMA27_GB, GAMMA27_G +*/ +#define PDP_GAMMA27_GB_GAMMA27_G_MASK (0x03FF0000) +#define PDP_GAMMA27_GB_GAMMA27_G_LSBMASK (0x000003FF) +#define PDP_GAMMA27_GB_GAMMA27_G_SHIFT (16) +#define PDP_GAMMA27_GB_GAMMA27_G_LENGTH (10) +#define PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA27_GB, GAMMA27_B +*/ +#define PDP_GAMMA27_GB_GAMMA27_B_MASK (0x000003FF) +#define PDP_GAMMA27_GB_GAMMA27_B_LSBMASK (0x000003FF) +#define PDP_GAMMA27_GB_GAMMA27_B_SHIFT (0) +#define PDP_GAMMA27_GB_GAMMA27_B_LENGTH (10) +#define PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA28_R_OFFSET (0x0894) + +/* PDP, GAMMA28_R, GAMMA28_R +*/ +#define PDP_GAMMA28_R_GAMMA28_R_MASK (0x000003FF) +#define PDP_GAMMA28_R_GAMMA28_R_LSBMASK (0x000003FF) +#define PDP_GAMMA28_R_GAMMA28_R_SHIFT (0) +#define PDP_GAMMA28_R_GAMMA28_R_LENGTH (10) +#define PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA28_GB_OFFSET (0x0898) + +/* PDP, GAMMA28_GB, GAMMA28_G +*/ +#define PDP_GAMMA28_GB_GAMMA28_G_MASK (0x03FF0000) +#define PDP_GAMMA28_GB_GAMMA28_G_LSBMASK (0x000003FF) +#define PDP_GAMMA28_GB_GAMMA28_G_SHIFT (16) +#define PDP_GAMMA28_GB_GAMMA28_G_LENGTH (10) +#define PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA28_GB, GAMMA28_B +*/ +#define PDP_GAMMA28_GB_GAMMA28_B_MASK (0x000003FF) +#define PDP_GAMMA28_GB_GAMMA28_B_LSBMASK (0x000003FF) +#define PDP_GAMMA28_GB_GAMMA28_B_SHIFT (0) +#define PDP_GAMMA28_GB_GAMMA28_B_LENGTH (10) +#define PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA29_R_OFFSET (0x089C) + +/* PDP, GAMMA29_R, GAMMA29_R +*/ +#define PDP_GAMMA29_R_GAMMA29_R_MASK (0x000003FF) +#define PDP_GAMMA29_R_GAMMA29_R_LSBMASK (0x000003FF) +#define PDP_GAMMA29_R_GAMMA29_R_SHIFT (0) +#define PDP_GAMMA29_R_GAMMA29_R_LENGTH (10) +#define PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA29_GB_OFFSET (0x08A0) + +/* PDP, GAMMA29_GB, GAMMA29_G +*/ +#define PDP_GAMMA29_GB_GAMMA29_G_MASK (0x03FF0000) +#define PDP_GAMMA29_GB_GAMMA29_G_LSBMASK (0x000003FF) +#define PDP_GAMMA29_GB_GAMMA29_G_SHIFT (16) +#define PDP_GAMMA29_GB_GAMMA29_G_LENGTH (10) +#define PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA29_GB, GAMMA29_B +*/ +#define PDP_GAMMA29_GB_GAMMA29_B_MASK (0x000003FF) +#define PDP_GAMMA29_GB_GAMMA29_B_LSBMASK (0x000003FF) +#define PDP_GAMMA29_GB_GAMMA29_B_SHIFT (0) +#define PDP_GAMMA29_GB_GAMMA29_B_LENGTH (10) +#define PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA30_R_OFFSET (0x08A4) + +/* PDP, GAMMA30_R, GAMMA30_R +*/ +#define PDP_GAMMA30_R_GAMMA30_R_MASK (0x000003FF) +#define PDP_GAMMA30_R_GAMMA30_R_LSBMASK (0x000003FF) +#define PDP_GAMMA30_R_GAMMA30_R_SHIFT (0) +#define PDP_GAMMA30_R_GAMMA30_R_LENGTH (10) +#define PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA30_GB_OFFSET (0x08A8) + +/* PDP, GAMMA30_GB, GAMMA30_G +*/ +#define PDP_GAMMA30_GB_GAMMA30_G_MASK (0x03FF0000) +#define PDP_GAMMA30_GB_GAMMA30_G_LSBMASK (0x000003FF) +#define PDP_GAMMA30_GB_GAMMA30_G_SHIFT (16) +#define PDP_GAMMA30_GB_GAMMA30_G_LENGTH (10) +#define PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA30_GB, GAMMA30_B +*/ +#define PDP_GAMMA30_GB_GAMMA30_B_MASK (0x000003FF) +#define PDP_GAMMA30_GB_GAMMA30_B_LSBMASK (0x000003FF) +#define PDP_GAMMA30_GB_GAMMA30_B_SHIFT (0) +#define PDP_GAMMA30_GB_GAMMA30_B_LENGTH (10) +#define PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA31_R_OFFSET (0x08AC) + +/* PDP, GAMMA31_R, GAMMA31_R +*/ +#define PDP_GAMMA31_R_GAMMA31_R_MASK (0x000003FF) +#define PDP_GAMMA31_R_GAMMA31_R_LSBMASK (0x000003FF) +#define PDP_GAMMA31_R_GAMMA31_R_SHIFT (0) +#define PDP_GAMMA31_R_GAMMA31_R_LENGTH (10) +#define PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA31_GB_OFFSET (0x08B0) + +/* PDP, GAMMA31_GB, GAMMA31_G +*/ +#define PDP_GAMMA31_GB_GAMMA31_G_MASK (0x03FF0000) +#define PDP_GAMMA31_GB_GAMMA31_G_LSBMASK (0x000003FF) +#define PDP_GAMMA31_GB_GAMMA31_G_SHIFT (16) +#define PDP_GAMMA31_GB_GAMMA31_G_LENGTH (10) +#define PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA31_GB, GAMMA31_B +*/ +#define PDP_GAMMA31_GB_GAMMA31_B_MASK (0x000003FF) +#define PDP_GAMMA31_GB_GAMMA31_B_LSBMASK (0x000003FF) +#define PDP_GAMMA31_GB_GAMMA31_B_SHIFT (0) +#define PDP_GAMMA31_GB_GAMMA31_B_LENGTH (10) +#define PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA32_R_OFFSET (0x08B4) + +/* PDP, GAMMA32_R, GAMMA32_R +*/ +#define PDP_GAMMA32_R_GAMMA32_R_MASK (0x000003FF) +#define PDP_GAMMA32_R_GAMMA32_R_LSBMASK (0x000003FF) +#define PDP_GAMMA32_R_GAMMA32_R_SHIFT (0) +#define PDP_GAMMA32_R_GAMMA32_R_LENGTH (10) +#define PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD IMG_FALSE + +#define PDP_GAMMA32_GB_OFFSET (0x08B8) + +/* PDP, GAMMA32_GB, GAMMA32_G +*/ +#define PDP_GAMMA32_GB_GAMMA32_G_MASK (0x03FF0000) +#define PDP_GAMMA32_GB_GAMMA32_G_LSBMASK (0x000003FF) +#define PDP_GAMMA32_GB_GAMMA32_G_SHIFT (16) +#define PDP_GAMMA32_GB_GAMMA32_G_LENGTH (10) +#define PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD IMG_FALSE + +/* PDP, GAMMA32_GB, GAMMA32_B +*/ +#define PDP_GAMMA32_GB_GAMMA32_B_MASK (0x000003FF) +#define PDP_GAMMA32_GB_GAMMA32_B_LSBMASK (0x000003FF) +#define PDP_GAMMA32_GB_GAMMA32_B_SHIFT (0) +#define PDP_GAMMA32_GB_GAMMA32_B_LENGTH (10) +#define PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD IMG_FALSE + +#define PDP_VEVENT_OFFSET (0x08BC) + +/* PDP, VEVENT, VEVENT +*/ +#define PDP_VEVENT_VEVENT_MASK (0x1FFF0000) +#define PDP_VEVENT_VEVENT_LSBMASK (0x00001FFF) +#define PDP_VEVENT_VEVENT_SHIFT (16) +#define PDP_VEVENT_VEVENT_LENGTH (13) +#define PDP_VEVENT_VEVENT_SIGNED_FIELD IMG_FALSE + +/* PDP, VEVENT, VFETCH +*/ +#define PDP_VEVENT_VFETCH_MASK (0x00001FFF) +#define PDP_VEVENT_VFETCH_LSBMASK (0x00001FFF) +#define PDP_VEVENT_VFETCH_SHIFT (0) +#define PDP_VEVENT_VFETCH_LENGTH (13) +#define PDP_VEVENT_VFETCH_SIGNED_FIELD IMG_FALSE + +#define PDP_HDECTRL_OFFSET (0x08C0) + +/* PDP, HDECTRL, HDES +*/ +#define PDP_HDECTRL_HDES_MASK (0x1FFF0000) +#define PDP_HDECTRL_HDES_LSBMASK (0x00001FFF) +#define PDP_HDECTRL_HDES_SHIFT (16) +#define PDP_HDECTRL_HDES_LENGTH (13) +#define PDP_HDECTRL_HDES_SIGNED_FIELD IMG_FALSE + +/* PDP, HDECTRL, HDEF +*/ +#define PDP_HDECTRL_HDEF_MASK (0x00001FFF) +#define PDP_HDECTRL_HDEF_LSBMASK (0x00001FFF) +#define PDP_HDECTRL_HDEF_SHIFT (0) +#define PDP_HDECTRL_HDEF_LENGTH (13) +#define PDP_HDECTRL_HDEF_SIGNED_FIELD IMG_FALSE + +#define PDP_VDECTRL_OFFSET (0x08C4) + +/* PDP, VDECTRL, VDES +*/ +#define PDP_VDECTRL_VDES_MASK (0x1FFF0000) +#define PDP_VDECTRL_VDES_LSBMASK (0x00001FFF) +#define PDP_VDECTRL_VDES_SHIFT (16) +#define PDP_VDECTRL_VDES_LENGTH (13) +#define PDP_VDECTRL_VDES_SIGNED_FIELD IMG_FALSE + +/* PDP, VDECTRL, VDEF +*/ +#define PDP_VDECTRL_VDEF_MASK (0x00001FFF) +#define PDP_VDECTRL_VDEF_LSBMASK (0x00001FFF) +#define PDP_VDECTRL_VDEF_SHIFT (0) +#define PDP_VDECTRL_VDEF_LENGTH (13) +#define PDP_VDECTRL_VDEF_SIGNED_FIELD IMG_FALSE + +#define PDP_OPMASK_R_OFFSET (0x08C8) + +/* PDP, OPMASK_R, MASKLEVEL +*/ +#define PDP_OPMASK_R_MASKLEVEL_MASK (0x80000000) +#define PDP_OPMASK_R_MASKLEVEL_LSBMASK (0x00000001) +#define PDP_OPMASK_R_MASKLEVEL_SHIFT (31) +#define PDP_OPMASK_R_MASKLEVEL_LENGTH (1) +#define PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD IMG_FALSE + +/* PDP, OPMASK_R, BLANKLEVEL +*/ +#define PDP_OPMASK_R_BLANKLEVEL_MASK (0x40000000) +#define PDP_OPMASK_R_BLANKLEVEL_LSBMASK (0x00000001) +#define PDP_OPMASK_R_BLANKLEVEL_SHIFT (30) +#define PDP_OPMASK_R_BLANKLEVEL_LENGTH (1) +#define PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD IMG_FALSE + +/* PDP, OPMASK_R, MASKR +*/ +#define PDP_OPMASK_R_MASKR_MASK (0x000003FF) +#define PDP_OPMASK_R_MASKR_LSBMASK (0x000003FF) +#define PDP_OPMASK_R_MASKR_SHIFT (0) +#define PDP_OPMASK_R_MASKR_LENGTH (10) +#define PDP_OPMASK_R_MASKR_SIGNED_FIELD IMG_FALSE + +#define PDP_OPMASK_GB_OFFSET (0x08CC) + +/* PDP, OPMASK_GB, MASKG +*/ +#define PDP_OPMASK_GB_MASKG_MASK (0x03FF0000) +#define PDP_OPMASK_GB_MASKG_LSBMASK (0x000003FF) +#define PDP_OPMASK_GB_MASKG_SHIFT (16) +#define PDP_OPMASK_GB_MASKG_LENGTH (10) +#define PDP_OPMASK_GB_MASKG_SIGNED_FIELD IMG_FALSE + +/* PDP, OPMASK_GB, MASKB +*/ +#define PDP_OPMASK_GB_MASKB_MASK (0x000003FF) +#define PDP_OPMASK_GB_MASKB_LSBMASK (0x000003FF) +#define PDP_OPMASK_GB_MASKB_SHIFT (0) +#define PDP_OPMASK_GB_MASKB_LENGTH (10) +#define PDP_OPMASK_GB_MASKB_SIGNED_FIELD IMG_FALSE + +#define PDP_REGLD_ADDR_CTRL_OFFSET (0x08D0) + +/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN +*/ +#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK (0xFFFFFFF0) +#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK (0x0FFFFFFF) +#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT (4) +#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH (28) +#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD IMG_FALSE + +#define PDP_REGLD_ADDR_STAT_OFFSET (0x08D4) + +/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT +*/ +#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK (0xFFFFFFF0) +#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK (0x0FFFFFFF) +#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT (4) +#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH (28) +#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD IMG_FALSE + +#define PDP_REGLD_STAT_OFFSET (0x08D8) + +/* PDP, REGLD_STAT, REGLD_ADDREN +*/ +#define PDP_REGLD_STAT_REGLD_ADDREN_MASK (0x00800000) +#define PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK (0x00000001) +#define PDP_REGLD_STAT_REGLD_ADDREN_SHIFT (23) +#define PDP_REGLD_STAT_REGLD_ADDREN_LENGTH (1) +#define PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD IMG_FALSE + +#define PDP_REGLD_CTRL_OFFSET (0x08DC) + +/* PDP, REGLD_CTRL, REGLD_ADDRLEN +*/ +#define PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK (0xFF000000) +#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK (0x000000FF) +#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT (24) +#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH (8) +#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD IMG_FALSE + +/* PDP, REGLD_CTRL, REGLD_VAL +*/ +#define PDP_REGLD_CTRL_REGLD_VAL_MASK (0x00800000) +#define PDP_REGLD_CTRL_REGLD_VAL_LSBMASK (0x00000001) +#define PDP_REGLD_CTRL_REGLD_VAL_SHIFT (23) +#define PDP_REGLD_CTRL_REGLD_VAL_LENGTH (1) +#define PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD IMG_FALSE + +#define PDP_UPDCTRL_OFFSET (0x08E0) + +/* PDP, UPDCTRL, UPDFIELD +*/ +#define PDP_UPDCTRL_UPDFIELD_MASK (0x00000001) +#define PDP_UPDCTRL_UPDFIELD_LSBMASK (0x00000001) +#define PDP_UPDCTRL_UPDFIELD_SHIFT (0) +#define PDP_UPDCTRL_UPDFIELD_LENGTH (1) +#define PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD IMG_FALSE + +#define PDP_INTCTRL_OFFSET (0x08E4) + +/* PDP, PVR_PDP_INTCTRL, HBLNK_LINE +*/ +#define PDP_INTCTRL_HBLNK_LINE_MASK (0x00010000) +#define PDP_INTCTRL_HBLNK_LINE_LSBMASK (0x00000001) +#define PDP_INTCTRL_HBLNK_LINE_SHIFT (16) +#define PDP_INTCTRL_HBLNK_LINE_LENGTH (1) +#define PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD IMG_FALSE + +/* PDP, PVR_PDP_INTCTRL, HBLNK_LINENO +*/ +#define PDP_INTCTRL_HBLNK_LINENO_MASK (0x00001FFF) +#define PDP_INTCTRL_HBLNK_LINENO_LSBMASK (0x00001FFF) +#define PDP_INTCTRL_HBLNK_LINENO_SHIFT (0) +#define PDP_INTCTRL_HBLNK_LINENO_LENGTH (13) +#define PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE + +#define PDP_PDISETUP_OFFSET (0x0900) + +/* PDP, PDISETUP, PDI_BLNKLVL +*/ +#define PDP_PDISETUP_PDI_BLNKLVL_MASK (0x00000040) +#define PDP_PDISETUP_PDI_BLNKLVL_LSBMASK (0x00000001) +#define PDP_PDISETUP_PDI_BLNKLVL_SHIFT (6) +#define PDP_PDISETUP_PDI_BLNKLVL_LENGTH (1) +#define PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_BLNK +*/ +#define PDP_PDISETUP_PDI_BLNK_MASK (0x00000020) +#define PDP_PDISETUP_PDI_BLNK_LSBMASK (0x00000001) +#define PDP_PDISETUP_PDI_BLNK_SHIFT (5) +#define PDP_PDISETUP_PDI_BLNK_LENGTH (1) +#define PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_PWR +*/ +#define PDP_PDISETUP_PDI_PWR_MASK (0x00000010) +#define PDP_PDISETUP_PDI_PWR_LSBMASK (0x00000001) +#define PDP_PDISETUP_PDI_PWR_SHIFT (4) +#define PDP_PDISETUP_PDI_PWR_LENGTH (1) +#define PDP_PDISETUP_PDI_PWR_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_EN +*/ +#define PDP_PDISETUP_PDI_EN_MASK (0x00000008) +#define PDP_PDISETUP_PDI_EN_LSBMASK (0x00000001) +#define PDP_PDISETUP_PDI_EN_SHIFT (3) +#define PDP_PDISETUP_PDI_EN_LENGTH (1) +#define PDP_PDISETUP_PDI_EN_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_GDEN +*/ +#define PDP_PDISETUP_PDI_GDEN_MASK (0x00000004) +#define PDP_PDISETUP_PDI_GDEN_LSBMASK (0x00000001) +#define PDP_PDISETUP_PDI_GDEN_SHIFT (2) +#define PDP_PDISETUP_PDI_GDEN_LENGTH (1) +#define PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_NFEN +*/ +#define PDP_PDISETUP_PDI_NFEN_MASK (0x00000002) +#define PDP_PDISETUP_PDI_NFEN_LSBMASK (0x00000001) +#define PDP_PDISETUP_PDI_NFEN_SHIFT (1) +#define PDP_PDISETUP_PDI_NFEN_LENGTH (1) +#define PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE + +/* PDP, PDISETUP, PDI_CR +*/ +#define PDP_PDISETUP_PDI_CR_MASK (0x00000001) +#define PDP_PDISETUP_PDI_CR_LSBMASK (0x00000001) +#define PDP_PDISETUP_PDI_CR_SHIFT (0) +#define PDP_PDISETUP_PDI_CR_LENGTH (1) +#define PDP_PDISETUP_PDI_CR_SIGNED_FIELD IMG_FALSE + +#define PDP_PDITIMING0_OFFSET (0x0904) + +/* PDP, PDITIMING0, PDI_PWRSVGD +*/ +#define PDP_PDITIMING0_PDI_PWRSVGD_MASK (0x0F000000) +#define PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK (0x0000000F) +#define PDP_PDITIMING0_PDI_PWRSVGD_SHIFT (24) +#define PDP_PDITIMING0_PDI_PWRSVGD_LENGTH (4) +#define PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD IMG_FALSE + +/* PDP, PDITIMING0, PDI_LSDEL +*/ +#define PDP_PDITIMING0_PDI_LSDEL_MASK (0x007F0000) +#define PDP_PDITIMING0_PDI_LSDEL_LSBMASK (0x0000007F) +#define PDP_PDITIMING0_PDI_LSDEL_SHIFT (16) +#define PDP_PDITIMING0_PDI_LSDEL_LENGTH (7) +#define PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD IMG_FALSE + +/* PDP, PDITIMING0, PDI_PWRSV2GD2 +*/ +#define PDP_PDITIMING0_PDI_PWRSV2GD2_MASK (0x000003FF) +#define PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK (0x000003FF) +#define PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT (0) +#define PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH (10) +#define PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD IMG_FALSE + +#define PDP_PDITIMING1_OFFSET (0x0908) + +/* PDP, PDITIMING1, PDI_NLDEL +*/ +#define PDP_PDITIMING1_PDI_NLDEL_MASK (0x000F0000) +#define PDP_PDITIMING1_PDI_NLDEL_LSBMASK (0x0000000F) +#define PDP_PDITIMING1_PDI_NLDEL_SHIFT (16) +#define PDP_PDITIMING1_PDI_NLDEL_LENGTH (4) +#define PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD IMG_FALSE + +/* PDP, PDITIMING1, PDI_ACBDEL +*/ +#define PDP_PDITIMING1_PDI_ACBDEL_MASK (0x000003FF) +#define PDP_PDITIMING1_PDI_ACBDEL_LSBMASK (0x000003FF) +#define PDP_PDITIMING1_PDI_ACBDEL_SHIFT (0) +#define PDP_PDITIMING1_PDI_ACBDEL_LENGTH (10) +#define PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD IMG_FALSE + +#define PDP_PDICOREID_OFFSET (0x090C) + +/* PDP, PDICOREID, PDI_GROUP_ID +*/ +#define PDP_PDICOREID_PDI_GROUP_ID_MASK (0xFF000000) +#define PDP_PDICOREID_PDI_GROUP_ID_LSBMASK (0x000000FF) +#define PDP_PDICOREID_PDI_GROUP_ID_SHIFT (24) +#define PDP_PDICOREID_PDI_GROUP_ID_LENGTH (8) +#define PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREID, PDI_CORE_ID +*/ +#define PDP_PDICOREID_PDI_CORE_ID_MASK (0x00FF0000) +#define PDP_PDICOREID_PDI_CORE_ID_LSBMASK (0x000000FF) +#define PDP_PDICOREID_PDI_CORE_ID_SHIFT (16) +#define PDP_PDICOREID_PDI_CORE_ID_LENGTH (8) +#define PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREID, PDI_CONFIG_ID +*/ +#define PDP_PDICOREID_PDI_CONFIG_ID_MASK (0x0000FFFF) +#define PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK (0x0000FFFF) +#define PDP_PDICOREID_PDI_CONFIG_ID_SHIFT (0) +#define PDP_PDICOREID_PDI_CONFIG_ID_LENGTH (16) +#define PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD IMG_FALSE + +#define PDP_PDICOREREV_OFFSET (0x0910) + +/* PDP, PDICOREREV, PDI_MAJOR_REV +*/ +#define PDP_PDICOREREV_PDI_MAJOR_REV_MASK (0x00FF0000) +#define PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK (0x000000FF) +#define PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT (16) +#define PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH (8) +#define PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREREV, PDI_MINOR_REV +*/ +#define PDP_PDICOREREV_PDI_MINOR_REV_MASK (0x0000FF00) +#define PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK (0x000000FF) +#define PDP_PDICOREREV_PDI_MINOR_REV_SHIFT (8) +#define PDP_PDICOREREV_PDI_MINOR_REV_LENGTH (8) +#define PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD IMG_FALSE + +/* PDP, PDICOREREV, PDI_MAINT_REV +*/ +#define PDP_PDICOREREV_PDI_MAINT_REV_MASK (0x000000FF) +#define PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK (0x000000FF) +#define PDP_PDICOREREV_PDI_MAINT_REV_SHIFT (0) +#define PDP_PDICOREREV_PDI_MAINT_REV_LENGTH (8) +#define PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX2_OFFSET (0x0920) + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1 +*/ +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK (0x000000C0) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK (0x00000003) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT (6) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH (2) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1 +*/ +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK (0x00000030) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK (0x00000003) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT (4) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH (2) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0 +*/ +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK (0x0000000C) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK (0x00000003) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT (2) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH (2) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0 +*/ +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK (0x00000003) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK (0x00000003) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT (0) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH (2) +#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX4_0_OFFSET (0x0924) + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK (0xF0000000) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT (28) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK (0x0F000000) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT (24) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK (0x00F00000) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT (20) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK (0x000F0000) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT (16) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK (0x0000F000) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT (12) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK (0x00000F00) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT (8) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK (0x000000F0) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0 +*/ +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT (0) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH (4) +#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX4_1_OFFSET (0x0928) + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK (0xF0000000) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT (28) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK (0x0F000000) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT (24) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK (0x00F00000) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT (20) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK (0x000F0000) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT (16) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK (0x0000F000) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT (12) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK (0x00000F00) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT (8) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK (0x000000F0) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2 +*/ +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK (0x0000000F) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT (0) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH (4) +#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_0_OFFSET (0x092C) + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0 +*/ +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT (24) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0 +*/ +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT (18) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0 +*/ +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT (12) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0 +*/ +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT (6) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0 +*/ +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT (0) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_1_OFFSET (0x0930) + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1 +*/ +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT (24) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1 +*/ +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT (18) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0 +*/ +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT (12) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0 +*/ +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT (6) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0 +*/ +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT (0) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH (6) +#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_2_OFFSET (0x0934) + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1 +*/ +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT (24) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1 +*/ +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT (18) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1 +*/ +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT (12) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1 +*/ +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT (6) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1 +*/ +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT (0) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_3_OFFSET (0x0938) + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2 +*/ +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT (24) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2 +*/ +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT (18) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2 +*/ +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT (12) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2 +*/ +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT (6) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1 +*/ +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT (0) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH (6) +#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_4_OFFSET (0x093C) + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3 +*/ +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT (24) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2 +*/ +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT (18) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2 +*/ +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT (12) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2 +*/ +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT (6) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2 +*/ +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT (0) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH (6) +#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_5_OFFSET (0x0940) + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3 +*/ +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT (24) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3 +*/ +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT (18) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3 +*/ +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT (12) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3 +*/ +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT (6) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3 +*/ +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT (0) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_6_OFFSET (0x0944) + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4 +*/ +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT (24) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4 +*/ +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT (18) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4 +*/ +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT (12) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3 +*/ +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT (6) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3 +*/ +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT (0) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH (6) +#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_7_OFFSET (0x0948) + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4 +*/ +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT (24) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4 +*/ +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT (18) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4 +*/ +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT (12) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4 +*/ +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT (6) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4 +*/ +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT (0) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH (6) +#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_8_OFFSET (0x094C) + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5 +*/ +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT (24) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5 +*/ +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT (18) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5 +*/ +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT (12) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5 +*/ +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT (6) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5 +*/ +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT (0) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_9_OFFSET (0x0950) + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6 +*/ +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT (24) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6 +*/ +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT (18) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5 +*/ +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT (12) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5 +*/ +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT (6) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5 +*/ +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT (0) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH (6) +#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_10_OFFSET (0x0954) + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6 +*/ +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT (24) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6 +*/ +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT (18) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6 +*/ +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT (12) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6 +*/ +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT (6) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6 +*/ +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT (0) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_11_OFFSET (0x0958) + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7 +*/ +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK (0x3F000000) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT (24) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7 +*/ +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT (18) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7 +*/ +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT (12) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7 +*/ +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT (6) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6 +*/ +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT (0) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH (6) +#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD IMG_FALSE + +#define PDP_DITHERMATRIX8_12_OFFSET (0x095C) + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7 +*/ +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK (0x00FC0000) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT (18) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7 +*/ +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK (0x0003F000) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT (12) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7 +*/ +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK (0x00000FC0) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT (6) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD IMG_FALSE + +/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7 +*/ +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK (0x0000003F) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK (0x0000003F) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT (0) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH (6) +#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1_MEMCTRL_OFFSET (0x0960) + +/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN +*/ +#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK (0x000000FF) +#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT (0) +#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH (8) +#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1_MEM_THRESH_OFFSET (0x0964) + +/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD +*/ +#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT (24) +#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH (8) +#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD +*/ +#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK (0x001FF000) +#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT (12) +#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH (9) +#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD +*/ +#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK (0x000001FF) +#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT (0) +#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH (9) +#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2_MEMCTRL_OFFSET (0x0968) + +/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN +*/ +#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK (0x000000FF) +#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT (0) +#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH (8) +#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2_MEM_THRESH_OFFSET (0x096C) + +/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD +*/ +#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT (24) +#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH (8) +#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD +*/ +#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK (0x001FF000) +#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT (12) +#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH (9) +#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD +*/ +#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK (0x000001FF) +#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT (0) +#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH (9) +#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3_MEMCTRL_OFFSET (0x0970) + +/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN +*/ +#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK (0x000000FF) +#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT (0) +#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH (8) +#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3_MEM_THRESH_OFFSET (0x0974) + +/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD +*/ +#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT (24) +#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH (8) +#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD +*/ +#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK (0x001FF000) +#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT (12) +#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH (9) +#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD +*/ +#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK (0x000001FF) +#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT (0) +#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH (9) +#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4_MEMCTRL_OFFSET (0x0978) + +/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN +*/ +#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK (0x000000FF) +#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT (0) +#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH (8) +#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4_MEM_THRESH_OFFSET (0x097C) + +/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD +*/ +#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT (24) +#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH (8) +#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD +*/ +#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK (0x001FF000) +#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT (12) +#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH (9) +#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD +*/ +#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK (0x000001FF) +#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT (0) +#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH (9) +#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1_MEMCTRL_OFFSET (0x0980) + +/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_MEMCTRL, VID1_BURSTLEN +*/ +#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK (0x000000FF) +#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT (0) +#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH (8) +#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1_MEM_THRESH_OFFSET (0x0984) + +/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD +*/ +#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT (24) +#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH (8) +#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD +*/ +#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK (0x001FF000) +#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT (12) +#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH (9) +#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD +*/ +#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK (0x000001FF) +#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT (0) +#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH (9) +#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2_MEMCTRL_OFFSET (0x0988) + +/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_MEMCTRL, VID2_BURSTLEN +*/ +#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK (0x000000FF) +#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT (0) +#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH (8) +#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2_MEM_THRESH_OFFSET (0x098C) + +/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD +*/ +#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT (24) +#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH (8) +#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD +*/ +#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK (0x001FF000) +#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT (12) +#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH (9) +#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD +*/ +#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK (0x000001FF) +#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT (0) +#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH (9) +#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3_MEMCTRL_OFFSET (0x0990) + +/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_MEMCTRL, VID3_BURSTLEN +*/ +#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK (0x000000FF) +#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT (0) +#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH (8) +#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3_MEM_THRESH_OFFSET (0x0994) + +/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD +*/ +#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT (24) +#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH (8) +#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD +*/ +#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK (0x001FF000) +#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT (12) +#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH (9) +#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD +*/ +#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK (0x000001FF) +#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT (0) +#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH (9) +#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4_MEMCTRL_OFFSET (0x0998) + +/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL +*/ +#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) +#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) +#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) +#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) +#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_MEMCTRL, VID4_BURSTLEN +*/ +#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK (0x000000FF) +#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK (0x000000FF) +#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT (0) +#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH (8) +#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4_MEM_THRESH_OFFSET (0x099C) + +/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD +*/ +#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK (0xFF000000) +#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK (0x000000FF) +#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT (24) +#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH (8) +#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD +*/ +#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK (0x001FF000) +#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT (12) +#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH (9) +#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD +*/ +#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK (0x000001FF) +#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK (0x000001FF) +#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT (0) +#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH (9) +#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH1_PANIC_THRESH_OFFSET (0x09A0) + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE +*/ +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE +*/ +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH2_PANIC_THRESH_OFFSET (0x09A4) + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE +*/ +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE +*/ +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH3_PANIC_THRESH_OFFSET (0x09A8) + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE +*/ +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE +*/ +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_GRPH4_PANIC_THRESH_OFFSET (0x09AC) + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE +*/ +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE +*/ +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID1_PANIC_THRESH_OFFSET (0x09B0) + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE +*/ +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE +*/ +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID2_PANIC_THRESH_OFFSET (0x09B4) + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE +*/ +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE +*/ +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID3_PANIC_THRESH_OFFSET (0x09B8) + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE +*/ +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE +*/ +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_VID4_PANIC_THRESH_OFFSET (0x09BC) + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE +*/ +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK (0x80000000) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK (0x00000001) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT (31) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH (1) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE +*/ +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK (0x40000000) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK (0x00000001) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT (30) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH (1) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX +*/ +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN +*/ +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX +*/ +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE + +/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN +*/ +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) +#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE + +#define PDP_BURST_BOUNDARY_OFFSET (0x09C0) + +/* PDP, BURST_BOUNDARY, BURST_BOUNDARY +*/ +#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK (0x0000003F) +#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK (0x0000003F) +#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT (0) +#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH (6) +#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD IMG_FALSE + + +/* ------------------------ End of register definitions ------------------------ */ + +/* +// NUMREG defines the extent of register address space. +*/ + +#define PDP_NUMREG ((0x09C0 >> 2)+1) + +/* Info about video plane addresses */ +#define PDP_YADDR_BITS (PDP_VID1BASEADDR_VID1BASEADDR_LENGTH) +#define PDP_YADDR_ALIGN 5 +#define PDP_UADDR_BITS (PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH) +#define PDP_UADDR_ALIGN 5 +#define PDP_VADDR_BITS (PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH) +#define PDP_VADDR_ALIGN 5 + +#define PDP_YSTRIDE_BITS (PDP_VID1STRIDE_VID1STRIDE_LENGTH) +#define PDP_YSTRIDE_ALIGN 5 + +#define PDP_MAX_INPUT_WIDTH (PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1) +#define PDP_MAX_INPUT_HEIGHT (PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1) + +/* Maximum 6 bytes per pixel for RGB161616 */ +#define PDP_MAX_IMAGE_BYTES (PDP_MAX_INPUT_WIDTH * PDP_MAX_INPUT_HEIGHT * 6) + +/* Round up */ +#define PDP_MAX_IMAGE_PAGES ((PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE) + +#define PDP_YADDR_MAX (((1 << PDP_YADDR_BITS) - 1) << PDP_YADDR_ALIGN) +#define PDP_UADDR_MAX (((1 << PDP_UADDR_BITS) - 1) << PDP_UADDR_ALIGN) +#define PDP_VADDR_MAX (((1 << PDP_VADDR_BITS) - 1) << PDP_VADDR_ALIGN) +#define PDP_YSTRIDE_MAX ((1 << PDP_YSTRIDE_BITS) << PDP_YSTRIDE_ALIGN) +#define PDP_YADDR_ALIGNMASK ((1 << PDP_YADDR_ALIGN) - 1) +#define PDP_UADDR_ALIGNMASK ((1 << PDP_UADDR_ALIGN) - 1) +#define PDP_VADDR_ALIGNMASK ((1 << PDP_VADDR_ALIGN) - 1) +#define PDP_YSTRIDE_ALIGNMASK ((1 << PDP_YSTRIDE_ALIGN) - 1) + +/* Field Values */ +#define PDP_SURF_PIXFMT_RGB332 0x3 +#define PDP_SURF_PIXFMT_ARGB4444 0x4 +#define PDP_SURF_PIXFMT_ARGB1555 0x5 +#define PDP_SURF_PIXFMT_RGB888 0x6 +#define PDP_SURF_PIXFMT_RGB565 0x7 +#define PDP_SURF_PIXFMT_ARGB8888 0x8 +#define PDP_SURF_PIXFMT_420_PL8 0x9 +#define PDP_SURF_PIXFMT_420_PL8IVU 0xA +#define PDP_SURF_PIXFMT_420_PL8IUV 0xB +#define PDP_SURF_PIXFMT_422_UY0VY1_8888 0xC +#define PDP_SURF_PIXFMT_422_VY0UY1_8888 0xD +#define PDP_SURF_PIXFMT_422_Y0UY1V_8888 0xE +#define PDP_SURF_PIXFMT_422_Y0VY1U_8888 0xF +#define PDP_SURF_PIXFMT_AYUV8888 0x10 +#define PDP_SURF_PIXFMT_YUV101010 0x15 +#define PDP_SURF_PIXFMT_RGB101010 0x17 +#define PDP_SURF_PIXFMT_420_PL10IUV 0x18 +#define PDP_SURF_PIXFMT_420_PL10IVU 0x19 +#define PDP_SURF_PIXFMT_422_PL10IUV 0x1A +#define PDP_SURF_PIXFMT_422_PL10IVU 0x1B +#define PDP_SURF_PIXFMT_RGB121212 0x1E +#define PDP_SURF_PIXFMT_RGB161616 0x1F + +#define PDP_CTRL_CKEYSRC_PREV 0x0 +#define PDP_CTRL_CKEYSRC_CUR 0x1 + +#define PDP_MEMCTRL_MEMREFRESH_ALWAYS 0x0 +#define PDP_MEMCTRL_MEMREFRESH_HBLNK 0x1 +#define PDP_MEMCTRL_MEMREFRESH_VBLNK 0x2 +#define PDP_MEMCTRL_MEMREFRESH_BOTH 0x3 + +#define PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0 0x0 +#define PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1 0x1 +#define PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2 0x2 +#define PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3 0x3 +#define PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4 0x4 +#define PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5 0x5 +#define PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6 0x6 +#define PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7 0x7 + +#define PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE 0x0 +#define PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1 +#define PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2 + +#define PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1 +#define PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS 10 + +/*-------------------------------------------------------------------------------*/ + +#endif /* _PDP2_REGS_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_aon_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_aon_regs.h new file mode 100644 index 000000000000..6494ac6d8643 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_aon_regs.h @@ -0,0 +1,606 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Autogenerated - don't edit. Generated from aon_regs.def. Regconv 0.2_r110 */ + +#ifndef _PLATO_AON_REGS_H_ +#define _PLATO_AON_REGS_H_ + +/* + Register CR_BM_STATUS +*/ +#define PLATO_AON_CR_BM_STATUS 0x0000 +#define PLATO_CR_GPIO_DEBUG_MASK 0x00000020U +#define PLATO_CR_GPIO_DEBUG_SHIFT 5 +#define PLATO_CR_GPIO_DEBUG_SIGNED 0 + +#define PLATO_CR_SAFE_MODE_MASK 0x00000010U +#define PLATO_CR_SAFE_MODE_SHIFT 4 +#define PLATO_CR_SAFE_MODE_SIGNED 0 + +#define PLATO_CR_BM_DIS_MASK 0x00000008U +#define PLATO_CR_BM_DIS_SHIFT 3 +#define PLATO_CR_BM_DIS_SIGNED 0 + +#define PLATO_CR_BM_DR_MASK 0x00000004U +#define PLATO_CR_BM_DR_SHIFT 2 +#define PLATO_CR_BM_DR_SIGNED 0 + +#define PLATO_CR_BM_CPU_MASK 0x00000002U +#define PLATO_CR_BM_CPU_SHIFT 1 +#define PLATO_CR_BM_CPU_SIGNED 0 + +#define PLATO_CR_BM_PCI_MASK 0x00000001U +#define PLATO_CR_BM_PCI_SHIFT 0 +#define PLATO_CR_BM_PCI_SIGNED 0 + +/* + Register CR_PD_CORE_CTRL +*/ +#define PLATO_AON_CR_PD_CORE_CTRL 0x0004 +#define PLATO_CR_PD_CORE_ISO_DLY_MASK 0x0000F000U +#define PLATO_CR_PD_CORE_ISO_DLY_SHIFT 12 +#define PLATO_CR_PD_CORE_ISO_DLY_SIGNED 0 + +#define PLATO_CR_PD_CORE_CKE_DLY_MASK 0x00000F00U +#define PLATO_CR_PD_CORE_CKE_DLY_SHIFT 8 +#define PLATO_CR_PD_CORE_CKE_DLY_SIGNED 0 + +#define PLATO_CR_PD_CORE_RST_DLY_MASK 0x000000F0U +#define PLATO_CR_PD_CORE_RST_DLY_SHIFT 4 +#define PLATO_CR_PD_CORE_RST_DLY_SIGNED 0 + +#define PLATO_CR_PD_CORE_POWER_DLY_MASK 0x0000000FU +#define PLATO_CR_PD_CORE_POWER_DLY_SHIFT 0 +#define PLATO_CR_PD_CORE_POWER_DLY_SIGNED 0 + +/* + Register CR_ISO_CTRL +*/ +#define PLATO_AON_CR_ISO_CTRL 0x0008 +#define PLATO_CR_DDR_RET_EN_MASK 0x00000100U +#define PLATO_CR_DDR_RET_EN_SHIFT 8 +#define PLATO_CR_DDR_RET_EN_SIGNED 0 + +#define PLATO_CR_GPU_CLK_E_MASK 0x00000010U +#define PLATO_CR_GPU_CLK_E_SHIFT 4 +#define PLATO_CR_GPU_CLK_E_SIGNED 0 + +#define PLATO_CR_HDMI_PHY_ISO_E_MASK 0x00000008U +#define PLATO_CR_HDMI_PHY_ISO_E_SHIFT 3 +#define PLATO_CR_HDMI_PHY_ISO_E_SIGNED 0 + +#define PLATO_CR_DDR_B_PHY_ISO_E_MASK 0x00000004U +#define PLATO_CR_DDR_B_PHY_ISO_E_SHIFT 2 +#define PLATO_CR_DDR_B_PHY_ISO_E_SIGNED 0 + +#define PLATO_CR_USB_PHY_ISO_E_MASK 0x00000002U +#define PLATO_CR_USB_PHY_ISO_E_SHIFT 1 +#define PLATO_CR_USB_PHY_ISO_E_SIGNED 0 + +#define PLATO_CR_GPU_ISO_E_MASK 0x00000001U +#define PLATO_CR_GPU_ISO_E_SHIFT 0 +#define PLATO_CR_GPU_ISO_E_SIGNED 0 + +/* + Register CR_RESET_CTRL +*/ +#define PLATO_AON_CR_RESET_CTRL 0x000C +#define PLATO_CR_EFUSE_RESET_N_MASK 0x00200000U +#define PLATO_CR_EFUSE_RESET_N_SHIFT 21 +#define PLATO_CR_EFUSE_RESET_N_SIGNED 0 + +#define PLATO_CR_VOLT_RESET_N_MASK 0x00100000U +#define PLATO_CR_VOLT_RESET_N_SHIFT 20 +#define PLATO_CR_VOLT_RESET_N_SIGNED 0 + +#define PLATO_CR_TEMP_RESET_N_MASK 0x00080000U +#define PLATO_CR_TEMP_RESET_N_SHIFT 19 +#define PLATO_CR_TEMP_RESET_N_SIGNED 0 + +#define PLATO_CR_SPI_RESET_N_MASK 0x00040000U +#define PLATO_CR_SPI_RESET_N_SHIFT 18 +#define PLATO_CR_SPI_RESET_N_SIGNED 0 + +#define PLATO_CR_I2C_RESET_N_MASK 0x00020000U +#define PLATO_CR_I2C_RESET_N_SHIFT 17 +#define PLATO_CR_I2C_RESET_N_SIGNED 0 + +#define PLATO_CR_UART_RESET_N_MASK 0x00010000U +#define PLATO_CR_UART_RESET_N_SHIFT 16 +#define PLATO_CR_UART_RESET_N_SIGNED 0 + +#define PLATO_CR_DDR_B_CTRL_RESET_N_MASK 0x00000800U +#define PLATO_CR_DDR_B_CTRL_RESET_N_SHIFT 11 +#define PLATO_CR_DDR_B_CTRL_RESET_N_SIGNED 0 + +#define PLATO_CR_DDR_B_DATA_RESET_N_MASK 0x00000400U +#define PLATO_CR_DDR_B_DATA_RESET_N_SHIFT 10 +#define PLATO_CR_DDR_B_DATA_RESET_N_SIGNED 0 + +#define PLATO_CR_DDR_A_CTRL_RESET_N_MASK 0x00000200U +#define PLATO_CR_DDR_A_CTRL_RESET_N_SHIFT 9 +#define PLATO_CR_DDR_A_CTRL_RESET_N_SIGNED 0 + +#define PLATO_CR_DDR_A_DATA_RESET_N_MASK 0x00000100U +#define PLATO_CR_DDR_A_DATA_RESET_N_SHIFT 8 +#define PLATO_CR_DDR_A_DATA_RESET_N_SIGNED 0 + +#define PLATO_CR_SOFT_RESET_REQ_NOCPU_MASK 0x00000040U +#define PLATO_CR_SOFT_RESET_REQ_NOCPU_SHIFT 6 +#define PLATO_CR_SOFT_RESET_REQ_NOCPU_SIGNED 0 + +#define PLATO_CR_DISPLAY_RESET_MASK 0x00000020U +#define PLATO_CR_DISPLAY_RESET_SHIFT 5 +#define PLATO_CR_DISPLAY_RESET_SIGNED 0 + +#define PLATO_CR_USB_PHY_RESET_MASK 0x00000010U +#define PLATO_CR_USB_PHY_RESET_SHIFT 4 +#define PLATO_CR_USB_PHY_RESET_SIGNED 0 + +#define PLATO_CR_USB_PMU_RESET_N_MASK 0x00000008U +#define PLATO_CR_USB_PMU_RESET_N_SHIFT 3 +#define PLATO_CR_USB_PMU_RESET_N_SIGNED 0 + +#define PLATO_CR_USB_CTRL_RESET_N_MASK 0x00000004U +#define PLATO_CR_USB_CTRL_RESET_N_SHIFT 2 +#define PLATO_CR_USB_CTRL_RESET_N_SIGNED 0 + +#define PLATO_CR_GPU_RESET_N_MASK 0x00000002U +#define PLATO_CR_GPU_RESET_N_SHIFT 1 +#define PLATO_CR_GPU_RESET_N_SIGNED 0 + +#define PLATO_CR_SOFT_RESET_REQ_MASK 0x00000001U +#define PLATO_CR_SOFT_RESET_REQ_SHIFT 0 +#define PLATO_CR_SOFT_RESET_REQ_SIGNED 0 + +/* + Register CR_OVERRIDE_RESET_CTRL +*/ +#define PLATO_AON_CR_OVERRIDE_RESET_CTRL 0x0010 +#define PLATO_CR_CPU_NOC_RESET_N_MASK 0x00001000U +#define PLATO_CR_CPU_NOC_RESET_N_SHIFT 12 +#define PLATO_CR_CPU_NOC_RESET_N_SIGNED 0 + +#define PLATO_CR_CPU_NOC_RESET_SEL_MASK 0x00000800U +#define PLATO_CR_CPU_NOC_RESET_SEL_SHIFT 11 +#define PLATO_CR_CPU_NOC_RESET_SEL_SIGNED 0 + +#define PLATO_CR_CPU_CLK_EN_MASK 0x00000400U +#define PLATO_CR_CPU_CLK_EN_SHIFT 10 +#define PLATO_CR_CPU_CLK_EN_SIGNED 0 + +#define PLATO_CR_CPU_CLK_EN_SEL_MASK 0x00000200U +#define PLATO_CR_CPU_CLK_EN_SEL_SHIFT 9 +#define PLATO_CR_CPU_CLK_EN_SEL_SIGNED 0 + +#define PLATO_CR_WDT_RESET_N_MASK 0x00000100U +#define PLATO_CR_WDT_RESET_N_SHIFT 8 +#define PLATO_CR_WDT_RESET_N_SIGNED 0 + +#define PLATO_CR_WDT_RESET_SEL_MASK 0x00000080U +#define PLATO_CR_WDT_RESET_SEL_SHIFT 7 +#define PLATO_CR_WDT_RESET_SEL_SIGNED 0 + +#define PLATO_CR_PCI_PHY_RESET_N_MASK 0x00000040U +#define PLATO_CR_PCI_PHY_RESET_N_SHIFT 6 +#define PLATO_CR_PCI_PHY_RESET_N_SIGNED 0 + +#define PLATO_CR_PCI_CTRL_RESET_N_MASK 0x00000020U +#define PLATO_CR_PCI_CTRL_RESET_N_SHIFT 5 +#define PLATO_CR_PCI_CTRL_RESET_N_SIGNED 0 + +#define PLATO_CR_PCI_RESET_SEL_MASK 0x00000010U +#define PLATO_CR_PCI_RESET_SEL_SHIFT 4 +#define PLATO_CR_PCI_RESET_SEL_SIGNED 0 + +#define PLATO_CR_REG_RESET_N_MASK 0x00000008U +#define PLATO_CR_REG_RESET_N_SHIFT 3 +#define PLATO_CR_REG_RESET_N_SIGNED 0 + +#define PLATO_CR_REG_RESET_SEL_MASK 0x00000004U +#define PLATO_CR_REG_RESET_SEL_SHIFT 2 +#define PLATO_CR_REG_RESET_SEL_SIGNED 0 + +#define PLATO_CR_CPU_RESET_N_MASK 0x00000002U +#define PLATO_CR_CPU_RESET_N_SHIFT 1 +#define PLATO_CR_CPU_RESET_N_SIGNED 0 + +#define PLATO_CR_CPU_RESET_SEL_MASK 0x00000001U +#define PLATO_CR_CPU_RESET_SEL_SHIFT 0 +#define PLATO_CR_CPU_RESET_SEL_SIGNED 0 + +/* + Register CR_USB_PMU_CTRL +*/ +#define PLATO_AON_CR_USB_PMU_CTRL 0x0020 +#define PLATO_CR_USB_PM_POWER_STATE_REQUEST_MASK 0x00000003U +#define PLATO_CR_USB_PM_POWER_STATE_REQUEST_SHIFT 0 +#define PLATO_CR_USB_PM_POWER_STATE_REQUEST_SIGNED 0 + +/* + Register CR_USB_PMU_STATUS +*/ +#define PLATO_AON_CR_USB_PMU_STATUS 0x0024 +#define PLATO_CR_PME_GENERATION_U3PMU_MASK 0x00000080U +#define PLATO_CR_PME_GENERATION_U3PMU_SHIFT 7 +#define PLATO_CR_PME_GENERATION_U3PMU_SIGNED 0 + +#define PLATO_CR_PME_GENERATION_U2PMU_MASK 0x00000040U +#define PLATO_CR_PME_GENERATION_U2PMU_SHIFT 6 +#define PLATO_CR_PME_GENERATION_U2PMU_SIGNED 0 + +#define PLATO_CR_CONNECT_STATE_U3PMU_MASK 0x00000020U +#define PLATO_CR_CONNECT_STATE_U3PMU_SHIFT 5 +#define PLATO_CR_CONNECT_STATE_U3PMU_SIGNED 0 + +#define PLATO_CR_CONNECT_STATE_U2PMU_MASK 0x00000010U +#define PLATO_CR_CONNECT_STATE_U2PMU_SHIFT 4 +#define PLATO_CR_CONNECT_STATE_U2PMU_SIGNED 0 + +#define PLATO_CR_CURRENT_POWER_STATE_U3PMU_MASK 0x0000000CU +#define PLATO_CR_CURRENT_POWER_STATE_U3PMU_SHIFT 2 +#define PLATO_CR_CURRENT_POWER_STATE_U3PMU_SIGNED 0 + +#define PLATO_CR_CURRENT_POWER_STATE_U2PMU_MASK 0x00000003U +#define PLATO_CR_CURRENT_POWER_STATE_U2PMU_SHIFT 0 +#define PLATO_CR_CURRENT_POWER_STATE_U2PMU_SIGNED 0 + +/* + Register CR_USB_SUSPEND_CLK_CTRL +*/ +#define PLATO_AON_CR_USB_SUSPEND_CLK_CTRL 0x0028 +#define PLATO_CR_USB_SUSP_CLK_EN_MASK 0x00001000U +#define PLATO_CR_USB_SUSP_CLK_EN_SHIFT 12 +#define PLATO_CR_USB_SUSP_CLK_EN_SIGNED 0 + +#define PLATO_CR_USB_SUSP_CLK_DIV_MASK 0x000003FFU +#define PLATO_CR_USB_SUSP_CLK_DIV_SHIFT 0 +#define PLATO_CR_USB_SUSP_CLK_DIV_SIGNED 0 + +/* + Register CR_USBPHY_CLK_CFG +*/ +#define PLATO_AON_CR_USBPHY_CLK_CFG 0x002C +#define PLATO_CR_USBP_SSC_RANGE_MASK 0x0000001CU +#define PLATO_CR_USBP_SSC_RANGE_SHIFT 2 +#define PLATO_CR_USBP_SSC_RANGE_SIGNED 0 + +#define PLATO_CR_USBP_RETENABLEN_MASK 0x00000002U +#define PLATO_CR_USBP_RETENABLEN_SHIFT 1 +#define PLATO_CR_USBP_RETENABLEN_SIGNED 0 + +#define PLATO_CR_USBP_REF_SSP_EN_MASK 0x00000001U +#define PLATO_CR_USBP_REF_SSP_EN_SHIFT 0 +#define PLATO_CR_USBP_REF_SSP_EN_SIGNED 0 + +/* + Register CR_USBPHY_DTCT_ADJ +*/ +#define PLATO_AON_CR_USBPHY_DTCT_ADJ 0x0030 +#define PLATO_CR_USBP_LOS_BIAS_MASK 0x00380000U +#define PLATO_CR_USBP_LOS_BIAS_SHIFT 19 +#define PLATO_CR_USBP_LOS_BIAS_SIGNED 0 + +#define PLATO_CR_USBP_LOS_MASK_VAL_MASK 0x0007FE00U +#define PLATO_CR_USBP_LOS_MASK_VAL_SHIFT 9 +#define PLATO_CR_USBP_LOS_MASK_VAL_SIGNED 0 + +#define PLATO_CR_USBP_OTGTUNE_MASK 0x000001C0U +#define PLATO_CR_USBP_OTGTUNE_SHIFT 6 +#define PLATO_CR_USBP_OTGTUNE_SIGNED 0 + +#define PLATO_CR_USBP_COMPDISTUNE_MASK 0x00000038U +#define PLATO_CR_USBP_COMPDISTUNE_SHIFT 3 +#define PLATO_CR_USBP_COMPDISTUNE_SIGNED 0 + +#define PLATO_CR_USBP_SQRXTUNE_MASK 0x00000007U +#define PLATO_CR_USBP_SQRXTUNE_SHIFT 0 +#define PLATO_CR_USBP_SQRXTUNE_SIGNED 0 + +/* + Register CR_USBPHY_CUR_ADJ +*/ +#define PLATO_AON_CR_USBPHY_CUR_ADJ 0x0034 +#define PLATO_CR_USBP_TXPREEMPPULSETUNE_MASK 0x00000100U +#define PLATO_CR_USBP_TXPREEMPPULSETUNE_SHIFT 8 +#define PLATO_CR_USBP_TXPREEMPPULSETUNE_SIGNED 0 + +#define PLATO_CR_USBP_TXPREEMPAMPTUNE_MASK 0x000000C0U +#define PLATO_CR_USBP_TXPREEMPAMPTUNE_SHIFT 6 +#define PLATO_CR_USBP_TXPREEMPAMPTUNE_SIGNED 0 + +#define PLATO_CR_USBP_TXFSLSTUNE_MASK 0x0000003CU +#define PLATO_CR_USBP_TXFSLSTUNE_SHIFT 2 +#define PLATO_CR_USBP_TXFSLSTUNE_SIGNED 0 + +#define PLATO_CR_USBP_TXRESTUNE_MASK 0x00000003U +#define PLATO_CR_USBP_TXRESTUNE_SHIFT 0 +#define PLATO_CR_USBP_TXRESTUNE_SIGNED 0 + +/* + Register CR_USBPHY_VADJ +*/ +#define PLATO_AON_CR_USBPHY_VADJ 0x0038 +#define PLATO_CR_USBP_TXVBOOST_LVL_MASK 0x38000000U +#define PLATO_CR_USBP_TXVBOOST_LVL_SHIFT 27 +#define PLATO_CR_USBP_TXVBOOST_LVL_SIGNED 0 + +#define PLATO_CR_USBP_TXSWING_FULL_MASK 0x07F00000U +#define PLATO_CR_USBP_TXSWING_FULL_SHIFT 20 +#define PLATO_CR_USBP_TXSWING_FULL_SIGNED 0 + +#define PLATO_CR_USBP_TXDEEMPH_6DB_MASK 0x000FC000U +#define PLATO_CR_USBP_TXDEEMPH_6DB_SHIFT 14 +#define PLATO_CR_USBP_TXDEEMPH_6DB_SIGNED 0 + +#define PLATO_CR_USBP_TXDEEMPH_3P5DB_MASK 0x00003F00U +#define PLATO_CR_USBP_TXDEEMPH_3P5DB_SHIFT 8 +#define PLATO_CR_USBP_TXDEEMPH_3P5DB_SIGNED 0 + +#define PLATO_CR_USBP_TXRISETUNE_MASK 0x000000C0U +#define PLATO_CR_USBP_TXRISETUNE_SHIFT 6 +#define PLATO_CR_USBP_TXRISETUNE_SIGNED 0 + +#define PLATO_CR_USBP_TXVREFTUNE_MASK 0x0000003CU +#define PLATO_CR_USBP_TXVREFTUNE_SHIFT 2 +#define PLATO_CR_USBP_TXVREFTUNE_SIGNED 0 + +#define PLATO_CR_USBP_TXHSXVTUNE_MASK 0x00000003U +#define PLATO_CR_USBP_TXHSXVTUNE_SHIFT 0 +#define PLATO_CR_USBP_TXHSXVTUNE_SIGNED 0 + +/* + Register CR_OVERRIDE_GPIO_DEBUG +*/ +#define PLATO_AON_CR_OVERRIDE_GPIO_DEBUG 0x0040 +#define PLATO_CR_GPIO_DEBUG_SEL_MASK 0x00000002U +#define PLATO_CR_GPIO_DEBUG_SEL_SHIFT 1 +#define PLATO_CR_GPIO_DEBUG_SEL_SIGNED 0 + +#define PLATO_CR_GPIO_DEBUG_SW_MASK 0x00000001U +#define PLATO_CR_GPIO_DEBUG_SW_SHIFT 0 +#define PLATO_CR_GPIO_DEBUG_SW_SIGNED 0 + +/* + Register CR_GPU_GPIO_SEL +*/ +#define PLATO_AON_CR_GPU_GPIO_SEL 0x0044 +#define PLATO_CR_GPU_GPIO_SEL_MASK 0x00000001U +#define PLATO_CR_GPU_GPIO_SEL_SHIFT 0 +#define PLATO_CR_GPU_GPIO_SEL_SIGNED 0 + +/* + Register CR_GPU_PLL_CTRL_0 +*/ +#define PLATO_AON_CR_GPU_PLL_CTRL_0 0x0060 +#define PLATO_CR_GPU_PLL_PD_MASK 0x10000000U +#define PLATO_CR_GPU_PLL_PD_SHIFT 28 +#define PLATO_CR_GPU_PLL_PD_SIGNED 0 + +#define PLATO_CR_GPU_PLL_POSTDIV2_MASK 0x07000000U +#define PLATO_CR_GPU_PLL_POSTDIV2_SHIFT 24 +#define PLATO_CR_GPU_PLL_POSTDIV2_SIGNED 0 + +#define PLATO_CR_GPU_PLL_POSTDIV1_MASK 0x00700000U +#define PLATO_CR_GPU_PLL_POSTDIV1_SHIFT 20 +#define PLATO_CR_GPU_PLL_POSTDIV1_SIGNED 0 + +#define PLATO_CR_GPU_PLL_REFDIV_MASK 0x0003F000U +#define PLATO_CR_GPU_PLL_REFDIV_SHIFT 12 +#define PLATO_CR_GPU_PLL_REFDIV_SIGNED 0 + +#define PLATO_CR_GPU_PLL_FBDIV_MASK 0x00000FFFU +#define PLATO_CR_GPU_PLL_FBDIV_SHIFT 0 +#define PLATO_CR_GPU_PLL_FBDIV_SIGNED 0 + +/* + Register CR_GPU_PLL_CTRL_1 +*/ +#define PLATO_AON_CR_GPU_PLL_CTRL_1 0x0064 +#define PLATO_CR_GPU_PLL_DSMPD_MASK 0x01000000U +#define PLATO_CR_GPU_PLL_DSMPD_SHIFT 24 +#define PLATO_CR_GPU_PLL_DSMPD_SIGNED 0 + +#define PLATO_CR_GPU_PLL_FRAC_MASK 0x00FFFFFFU +#define PLATO_CR_GPU_PLL_FRAC_SHIFT 0 +#define PLATO_CR_GPU_PLL_FRAC_SIGNED 0 + +/* + Register CR_DDR_PLL_CTRL_0 +*/ +#define PLATO_AON_CR_DDR_PLL_CTRL_0 0x0068 +#define PLATO_CR_DDR_PLL_PD_MASK 0x10000000U +#define PLATO_CR_DDR_PLL_PD_SHIFT 28 +#define PLATO_CR_DDR_PLL_PD_SIGNED 0 + +#define PLATO_CR_DDR_PLL_POSTDIV2_MASK 0x07000000U +#define PLATO_CR_DDR_PLL_POSTDIV2_SHIFT 24 +#define PLATO_CR_DDR_PLL_POSTDIV2_SIGNED 0 + +#define PLATO_CR_DDR_PLL_POSTDIV1_MASK 0x00700000U +#define PLATO_CR_DDR_PLL_POSTDIV1_SHIFT 20 +#define PLATO_CR_DDR_PLL_POSTDIV1_SIGNED 0 + +#define PLATO_CR_DDR_PLL_REFDIV_MASK 0x0003F000U +#define PLATO_CR_DDR_PLL_REFDIV_SHIFT 12 +#define PLATO_CR_DDR_PLL_REFDIV_SIGNED 0 + +#define PLATO_CR_DDR_PLL_FBDIV_MASK 0x00000FFFU +#define PLATO_CR_DDR_PLL_FBDIV_SHIFT 0 +#define PLATO_CR_DDR_PLL_FBDIV_SIGNED 0 + +/* + Register CR_DDR_PLL_CTRL_1 +*/ +#define PLATO_AON_CR_DDR_PLL_CTRL_1 0x006C +#define PLATO_CR_DDR_PLL_DSMPD_MASK 0x01000000U +#define PLATO_CR_DDR_PLL_DSMPD_SHIFT 24 +#define PLATO_CR_DDR_PLL_DSMPD_SIGNED 0 + +#define PLATO_CR_DDR_PLL_FRAC_MASK 0x00FFFFFFU +#define PLATO_CR_DDR_PLL_FRAC_SHIFT 0 +#define PLATO_CR_DDR_PLL_FRAC_SIGNED 0 + +/* + Register CR_PLL_STATUS +*/ +#define PLATO_AON_CR_PLL_STATUS 0x0070 +#define PLATO_CR_DDR_PLL_LOCK_MASK 0x00000002U +#define PLATO_CR_DDR_PLL_LOCK_SHIFT 1 +#define PLATO_CR_DDR_PLL_LOCK_SIGNED 0 + +#define PLATO_CR_GPU_PLL_LOCK_MASK 0x00000001U +#define PLATO_CR_GPU_PLL_LOCK_SHIFT 0 +#define PLATO_CR_GPU_PLL_LOCK_SIGNED 0 + +/* + Register CR_PLL_BYPASS +*/ +#define PLATO_AON_CR_PLL_BYPASS 0x0080 +#define PLATO_CR_PLL_BYPASS_MASK 0x00000001U +#define PLATO_CR_PLL_BYPASS_SHIFT 0 +#define PLATO_CR_PLL_BYPASS_SIGNED 0 + +/* + Register CR_CPU_CLK_CTRL +*/ +#define PLATO_AON_CR_CPU_CLK_CTRL 0x0084 +#define PLATO_CR_CPUV1_DIV_0_MASK 0x00000700U +#define PLATO_CR_CPUV1_DIV_0_SHIFT 8 +#define PLATO_CR_CPUV1_DIV_0_SIGNED 0 + +#define PLATO_CR_CPUV0_DIV_0_MASK 0x00000030U +#define PLATO_CR_CPUV0_DIV_0_SHIFT 4 +#define PLATO_CR_CPUV0_DIV_0_SIGNED 0 + +#define PLATO_CR_CS_CPU_0_SW_MASK 0x00000001U +#define PLATO_CR_CS_CPU_0_SW_SHIFT 0 +#define PLATO_CR_CS_CPU_0_SW_SIGNED 0 + +/* + Register CR_NOC_CLK_CTRL +*/ +#define PLATO_AON_CR_NOC_CLK_CTRL 0x0088 +#define PLATO_CR_NOCV1_DIV_0_MASK 0x00000070U +#define PLATO_CR_NOCV1_DIV_0_SHIFT 4 +#define PLATO_CR_NOCV1_DIV_0_SIGNED 0 + +#define PLATO_CR_NOCV0_DIV_0_MASK 0x00000003U +#define PLATO_CR_NOCV0_DIV_0_SHIFT 0 +#define PLATO_CR_NOCV0_DIV_0_SIGNED 0 + +/* + Register CR_BOOT_VECTOR +*/ +#define PLATO_AON_CR_BOOT_VECTOR 0x0090 +#define PLATO_CR_BOOT_VECTOR_MASK 0xFFFFFFFFU +#define PLATO_CR_BOOT_VECTOR_SHIFT 0 +#define PLATO_CR_BOOT_VECTOR_SIGNED 0 + +/* + Register CR_TESTBENCH_ADDRESS +*/ +#define PLATO_AON_CR_TESTBENCH_ADDRESS 0x1800 +#define PLATO_CR_TESTBENCH_ADDRESS_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_ADDRESS_SHIFT 0 +#define PLATO_CR_TESTBENCH_ADDRESS_SIGNED 0 + +/* + Register CR_TESTBENCH_WDATA +*/ +#define PLATO_AON_CR_TESTBENCH_WDATA 0x1804 +#define PLATO_CR_TESTBENCH_WDATA_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_WDATA_SHIFT 0 +#define PLATO_CR_TESTBENCH_WDATA_SIGNED 0 + +/* + Register CR_TESTBENCH_RDATA +*/ +#define PLATO_AON_CR_TESTBENCH_RDATA 0x1808 +#define PLATO_CR_TESTBENCH_RDATA_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_RDATA_SHIFT 0 +#define PLATO_CR_TESTBENCH_RDATA_SIGNED 0 + +/* + Register CR_TESTBENCH_COMMAND +*/ +#define PLATO_AON_CR_TESTBENCH_COMMAND 0x180C +#define PLATO_CR_TESTBENCH_COMMAND_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_COMMAND_SHIFT 0 +#define PLATO_CR_TESTBENCH_COMMAND_SIGNED 0 + +/* + Register CR_TESTBENCH_STATUS +*/ +#define PLATO_AON_CR_TESTBENCH_STATUS 0x1810 +#define PLATO_CR_TESTBENCH_STATUS_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_STATUS_SHIFT 0 +#define PLATO_CR_TESTBENCH_STATUS_SIGNED 0 + +/* + Register CR_TESTBENCH_RESULT +*/ +#define PLATO_AON_CR_TESTBENCH_RESULT 0x1814 +#define PLATO_CR_TESTBENCH_RESULT_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_RESULT_SHIFT 0 +#define PLATO_CR_TESTBENCH_RESULT_SIGNED 0 + +/* + Register CR_TESTBENCH_RUNNING +*/ +#define PLATO_AON_CR_TESTBENCH_RUNNING 0x1818 +#define PLATO_CR_TESTBENCH_RUNNING_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_RUNNING_SHIFT 0 +#define PLATO_CR_TESTBENCH_RUNNING_SIGNED 0 + +/* + Register CR_TESTBENCH_MARKER +*/ +#define PLATO_AON_CR_TESTBENCH_MARKER 0x181C +#define PLATO_CR_TESTBENCH_MARKER_MASK 0xFFFFFFFFU +#define PLATO_CR_TESTBENCH_MARKER_SHIFT 0 +#define PLATO_CR_TESTBENCH_MARKER_SIGNED 0 + +#endif /* _PLATO_AON_REGS_H_ */ + +/***************************************************************************** + End of file (plato_aon_regs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_ctrl_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_ctrl_regs.h new file mode 100644 index 000000000000..9b7c417bc051 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_ctrl_regs.h @@ -0,0 +1,138 @@ +/*************************************************************************/ /*! +@Title Plato DDR CTRL register definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PLATO_DDR_CTRL_REGS_H_ +#define _PLATO_DDR_CTRL_REGS_H_ + +#define PLATO_DDR_CTRL_MSTR 0x000 /* Master Register */ +#define PLATO_DDR_CTRL_STAT 0x004 /* Operating Mode Status Register */ +#define PLATO_DDR_CTRL_MRCTRL0 0x0010 /* Mode Register Read/Write Control Register 0. Note: Do not enable more than one of the following... */ +#define PLATO_DDR_CTRL_MRCTRL1 0x0014 /* Mode Register Read/Write Control Register 1 */ +#define PLATO_DDR_CTRL_MRSTAT 0x0018 /* Mode Register Read/Write Status Register */ +#define PLATO_DDR_CTRL_MRCTRL2 0x001c /* Mode Register Read/Write Control Register 2 */ +#define PLATO_DDR_CTRL_DERATEEN 0x0020 /* Temperature Derate Enable Register */ +#define PLATO_DDR_CTRL_DERATEINT 0x0024 /* Temperature Derate Interval Register */ +#define PLATO_DDR_CTRL_PWRCTL 0x0030 /* Low Power Control Register */ +#define PLATO_DDR_CTRL_PWRTMG 0x0034 /* Low Power Timing Register */ +#define PLATO_DDR_CTRL_HWLPCTL 0x0038 /* Hardware Low Power Control Register */ +#define PLATO_DDR_CTRL_RFSHCTL0 0x0050 /* Refresh Control Register 0 */ +#define PLATO_DDR_CTRL_RFSHCTL1 0x0054 /* Refresh Control Register 1 */ +#define PLATO_DDR_CTRL_RFSHCTL2 0x0058 /* Refresh Control Register 2 */ +#define PLATO_DDR_CTRL_RFSHCTL3 0x0060 /* Refresh Control Register 0 */ +#define PLATO_DDR_CTRL_RFSHTMG 0x0064 /* Refresh Timing Register */ +#define PLATO_DDR_CTRL_CRCPARCTL0 0x00c0 /* CRC Parity Control Register0 */ +#define PLATO_DDR_CTRL_CRCPARCTL1 0x00c4 /* CRC Parity Control Register1 */ +#define PLATO_DDR_CTRL_CRCPARSTAT 0x00cc /* CRC Parity Status Register */ +#define PLATO_DDR_CTRL_INIT0 0x00d0 /* SDRAM Initialization Register 0 */ +#define PLATO_DDR_CTRL_INIT1 0x00d4 /* SDRAM Initialization Register 1 */ +#define PLATO_DDR_CTRL_INIT2 0x00d8 /* SDRAM Initialization Register 2 */ +#define PLATO_DDR_CTRL_INIT3 0x00dc /* SDRAM Initialization Register 3 */ +#define PLATO_DDR_CTRL_INIT4 0x00e0 /* SDRAM Initialization Register 4 */ +#define PLATO_DDR_CTRL_INIT5 0x00e4 /* SDRAM Initialization Register 5 */ +#define PLATO_DDR_CTRL_INIT6 0x00e8 /* SDRAM Initialization Register 6 */ +#define PLATO_DDR_CTRL_INIT7 0x00ec /* SDRAM Initialization Register 7 */ +#define PLATO_DDR_CTRL_DIMMCTL 0x00f0 /* DIMM Control Register */ +#define PLATO_DDR_CTRL_RANKCTL 0x00f4 /* Rank Control Register */ +#define PLATO_DDR_CTRL_DRAMTMG0 0x00100 /* SDRAM Timing Register 0 */ +#define PLATO_DDR_CTRL_DRAMTMG1 0x00104 /* SDRAM Timing Register 1 */ +#define PLATO_DDR_CTRL_DRAMTMG2 0x00108 /* SDRAM Timing Register 2 */ +#define PLATO_DDR_CTRL_DRAMTMG3 0x0010c /* SDRAM Timing Register 3 */ +#define PLATO_DDR_CTRL_DRAMTMG4 0x00110 /* SDRAM Timing Register 4 */ +#define PLATO_DDR_CTRL_DRAMTMG5 0x00114 /* SDRAM Timing Register 5 */ +#define PLATO_DDR_CTRL_DRAMTMG6 0x00118 /* SDRAM Timing Register 6 */ +#define PLATO_DDR_CTRL_DRAMTMG7 0x0011c /* SDRAM Timing Register 7 */ +#define PLATO_DDR_CTRL_DRAMTMG8 0x00120 /* SDRAM Timing Register 8 */ +#define PLATO_DDR_CTRL_DRAMTMG9 0x00124 /* SDRAM Timing Register 9 */ +#define PLATO_DDR_CTRL_DRAMTMG10 0x00128 /* SDRAM Timing Register 10 */ +#define PLATO_DDR_CTRL_DRAMTMG11 0x0012c /* SDRAM Timing Register 11 */ +#define PLATO_DDR_CTRL_DRAMTMG12 0x00130 /* SDRAM Timing Register 12 */ +#define PLATO_DDR_CTRL_ZQCTL0 0x00180 /* ZQ Control Register 0 */ +#define PLATO_DDR_CTRL_ZQCTL1 0x00184 /* ZQ Control Register 1 */ +#define PLATO_DDR_CTRL_ZQCTL2 0x00188 /* ZQ Control Register 2 */ +#define PLATO_DDR_CTRL_ZQSTAT 0x0018c /* ZQ Status Register */ +#define PLATO_DDR_CTRL_DFITMG0 0x00190 /* DFI Timing Register 0 */ +#define PLATO_DDR_CTRL_DFITMG1 0x00194 /* DFI Timing Register 1 */ +#define PLATO_DDR_CTRL_DFILPCFG0 0x00198 /* DFI Low Power Configuration Register 0 */ +#define PLATO_DDR_CTRL_DFILPCFG1 0x0019c /* DFI Low Power Configuration Register 1 */ +#define PLATO_DDR_CTRL_DFIUPD0 0x001a0 /* DFI Update Register 0 */ +#define PLATO_DDR_CTRL_DFIUPD1 0x001a4 /* DFI Update Register 1 */ +#define PLATO_DDR_CTRL_DFIUPD2 0x001a8 /* DFI Update Register 2 */ +#define PLATO_DDR_CTRL_DFIUPD3 0x001ac /* DFI Update Register 3 */ +#define PLATO_DDR_CTRL_DFIMISC 0x001b0 /* DFI Miscellaneous Control Register */ +#define PLATO_DDR_CTRL_DFIUPD4 0x001b8 /* DFI Update Register 4 */ +#define PLATO_DDR_CTRL_DBICTL 0x001c0 /* DM/DBI Control Register */ +#define PLATO_DDR_CTRL_ADDRMAP0 0x00200 /* Address Map Register 0 */ +#define PLATO_DDR_CTRL_ADDRMAP1 0x00204 /* Address Map Register 1 */ +#define PLATO_DDR_CTRL_ADDRMAP2 0x00208 /* Address Map Register 2 */ +#define PLATO_DDR_CTRL_ADDRMAP3 0x0020c /* Address Map Register 3 */ +#define PLATO_DDR_CTRL_ADDRMAP4 0x00210 /* Address Map Register 4 */ +#define PLATO_DDR_CTRL_ADDRMAP5 0x00214 /* Address Map Register 5 */ +#define PLATO_DDR_CTRL_ADDRMAP6 0x00218 /* Address Map Register 6 */ +#define PLATO_DDR_CTRL_ADDRMAP7 0x0021c /* Address Map Register 7 */ +#define PLATO_DDR_CTRL_ADDRMAP8 0x00220 /* Address Map Register 8 */ +#define PLATO_DDR_CTRL_ODTCFG 0x00240 /* ODT Configuration Register */ +#define PLATO_DDR_CTRL_ODTMAP 0x00244 /* ODT/Rank Map Register */ +#define PLATO_DDR_CTRL_SCHED 0x00250 /* Scheduler Control Register */ +#define PLATO_DDR_CTRL_SCHED1 0x00254 /* Scheduler Control Register 1 */ +#define PLATO_DDR_CTRL_PERFHPR1 0x0025c /* High Priority Read CAM Register 1 */ +#define PLATO_DDR_CTRL_PERFLPR1 0x00264 /* Low Priority Read CAM Register 1 */ +#define PLATO_DDR_CTRL_PERFWR1 0x0026c /* Write CAM Register 1 */ +#define PLATO_DDR_CTRL_DBG0 0x00300 /* Debug Register 0 */ +#define PLATO_DDR_CTRL_DBG1 0x00304 /* Debug Register 1 */ +#define PLATO_DDR_CTRL_DBGCAM 0x00308 /* CAM Debug Register */ +#define PLATO_DDR_CTRL_DBGCMD 0x0030c /* Command Debug Register */ +#define PLATO_DDR_CTRL_DBGSTAT 0x00310 /* Status Debug Register */ +#define PLATO_DDR_CTRL_SWCTL 0x00320 /* Software register programming control enable */ +#define PLATO_DDR_CTRL_SWSTAT 0x00324 /* Software register programming control status */ + +/* Multi port registers */ +#define PLATO_DDR_CTRL_PSTAT 0x003fc +#define PLATO_DDR_CTRL_PCCFG 0x00400 +#define PLATO_DDR_CTRL_PCFGR 0x00404 +#define PLATO_DDR_CTRL_PCFGW 0x00408 +#define PLATO_DDR_CTRL_PCFGC 0x0040c +#define PLATO_DDR_CTRL_PCFGIDMASKCH_START 0x00410 /* Offset + m*0x08, where m is the virtual channel */ +#define PLATO_DDR_CTRL_PCFGIDVALUECH_START 0x00414 /* Offset + m*0x08, where m is the virtual channel */ +#define PLATO_DDR_CTRL_PCTRL 0x00490 + + +#endif /* _PLATO_DDR_CTRL_REGS_H_ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_publ_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_publ_regs.h new file mode 100644 index 000000000000..762eda907f5b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_ddr_publ_regs.h @@ -0,0 +1,121 @@ +/*************************************************************************/ /*! +@Title Plato DDR CTRL register definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PLATO_DDR_PUBL_REGS_H_ +#define _PLATO_DDR_PUBL_REGS_H_ + +#define PLATO_DDR_PUBL_RIDR_OFFSET (0x0000) +#define PLATO_DDR_PUBL_PIR_OFFSET (0x0004) // PHY init +#define PLATO_DDR_PUBL_PGCR0_OFFSET (0x0010) // PHY General configuration +#define PLATO_DDR_PUBL_PGCR1_OFFSET (0x0014) +#define PLATO_DDR_PUBL_PGCR2_OFFSET (0x0018) +#define PLATO_DDR_PUBL_PGCR3_OFFSET (0x001C) +#define PLATO_DDR_PUBL_PGCR4_OFFSET (0x0020) +#define PLATO_DDR_PUBL_PGCR5_OFFSET (0x0024) +#define PLATO_DDR_PUBL_PGCR6_OFFSET (0x0028) +#define PLATO_DDR_PUBL_PGCR7_OFFSET (0x002C) +#define PLATO_DDR_PUBL_PGSR0_OFFSET (0x0030) // PHY general status +#define PLATO_DDR_PUBL_PGSR1_OFFSET (0x0034) + +#define PLATO_DDR_PUBL_PTR0_OFFSET (0x0040) +#define PLATO_DDR_PUBL_PTR1_OFFSET (0x0044) +#define PLATO_DDR_PUBL_PTR2_OFFSET (0x0048) +#define PLATO_DDR_PUBL_PTR3_OFFSET (0x004C) +#define PLATO_DDR_PUBL_PTR4_OFFSET (0x0050) + +#define PLATO_DDR_PUBL_DXCCR_OFFSET (0x0088) // DXCCR +#define PLATO_DDR_PUBL_DSGCR_OFFSET (0x0090) // DDR System general configuration + +#define PLATO_DDR_PUBL_DCR_OFFSET (0x0100) // DRAM Configuration + +#define PLATO_DDR_PUBL_DTPR0_OFFSET (0x0110) +#define PLATO_DDR_PUBL_DTPR1_OFFSET (0x0114) +#define PLATO_DDR_PUBL_DTPR2_OFFSET (0x0118) +#define PLATO_DDR_PUBL_DTPR3_OFFSET (0x011C) +#define PLATO_DDR_PUBL_DTPR4_OFFSET (0x0120) +#define PLATO_DDR_PUBL_DTPR5_OFFSET (0x0124) +#define PLATO_DDR_PUBL_DTPR6_OFFSET (0x0128) + +#define PLATO_DDR_PUBL_RDIMMGCR0_OFFSET (0x0140) +#define PLATO_DDR_PUBL_RDIMMGCR1_OFFSET (0x0144) +#define PLATO_DDR_PUBL_RDIMMGCR2_OFFSET (0x0148) + +#define PLATO_DDR_PUBL_RDIMMCR0_OFFSET (0x0150) +#define PLATO_DDR_PUBL_RDIMMCR1_OFFSET (0x0154) +#define PLATO_DDR_PUBL_RDIMMCR2_OFFSET (0x0158) +#define PLATO_DDR_PUBL_RDIMMCR3_OFFSET (0x015C) +#define PLATO_DDR_PUBL_RDIMMCR4_OFFSET (0x0160) + +#define PLATO_DDR_PUBL_SCHCR0_OFFSET (0x0168) +#define PLATO_DDR_PUBL_SCHCR1_OFFSET (0x016C) + +#define PLATO_DDR_PUBL_MR0_OFFSET (0x0180) +#define PLATO_DDR_PUBL_MR1_OFFSET (0x0184) +#define PLATO_DDR_PUBL_MR2_OFFSET (0x0188) +#define PLATO_DDR_PUBL_MR3_OFFSET (0x018C) +#define PLATO_DDR_PUBL_MR4_OFFSET (0x0190) +#define PLATO_DDR_PUBL_MR5_OFFSET (0x0194) +#define PLATO_DDR_PUBL_MR6_OFFSET (0x0198) +#define PLATO_DDR_PUBL_MR7_OFFSET (0x019C) + +#define PLATO_DDR_PUBL_MR11_OFFSET (0x01AC) + +#define PLATO_DDR_PUBL_DTCR0_OFFSET (0x0200) +#define PLATO_DDR_PUBL_DTCR1_OFFSET (0x0204) + +#define PLATO_DDR_PUBL_DATX8_OFFSET (0x0700) +// DATX8 registers have 0x100 bytes per lane, reg is register number +#define PLATO_DDR_PUBL_DATX_OFFSET(offset, lane, reg) (offset + (lane * 0x100) + (reg * 4)) +#define PLATO_DDR_PUBL_DATX_LANE_COUNT (8) + +// DATX8 Bit delay line registers +#define PLATO_DDR_PUBL_DX0BDLR0_OFFSET (0x0740) +#define PLATO_DDR_PUBL_DXBDLR_REGS_PER_LANE (3) +#define PLATO_DDR_PUBL_DXnBDLR_OFFSET(lane, reg) PLATO_DDR_PUBL_DATX_OFFSET(PLATO_DDR_PUBL_DX0BDLR0_OFFSET, lane, reg) + +// DATX8 General Status Registers +#define PLATO_DDR_PUBL_DX0GSR0_OFFSET (0x07E0) +#define PLATO_DDR_PUBL_DXGSR_REGS_PER_LANE (4) +#define PLATO_DDR_PUBL_DXnGSR_OFFSET(lane, reg) PLATO_DDR_PUBL_DATX_OFFSET(PLATO_DDR_PUBL_DX0GSR0_OFFSET, lane, reg) + + +#endif diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.c new file mode 100644 index 000000000000..fdd4c0004a29 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.c @@ -0,0 +1,946 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File plato_drv.c +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* + * This is a device driver for the plato PCI card. It creates + * platform devices for the pdp, hdmi, and ext sub-devices, and + * exports functions to manage the shared interrupt handling + */ + +#include +#include +#include +#include + +#include "sysinfo.h" +#include "plato_drv.h" +#include "plato_aon_regs.h" +#include "plato_top_regs.h" +#include "plato_ddr_ctrl_regs.h" +#include "plato_ddr_publ_regs.h" + +#include "pvrmodule.h" + +#include "kernel_compatibility.h" + +MODULE_DESCRIPTION("Plato PCI driver"); + +/* Clock speed module parameters */ +static unsigned int mem_clock_speed = PLATO_MEM_CLOCK_SPEED; +module_param(mem_clock_speed, uint, 0444); +MODULE_PARM_DESC(mem_clock_speed, + "Plato memory clock speed in Hz (600000000 - 800000000)"); + +static unsigned int core_clock_speed = PLATO_RGX_CORE_CLOCK_SPEED; +module_param(core_clock_speed, uint, 0444); +MODULE_PARM_DESC(core_clock_speed, + "Plato core clock speed in Hz (396000000 - 742500000)"); + +static int plato_init(struct pci_dev *pdev, const struct pci_device_id *id); +static void plato_exit(struct pci_dev *pdev); + +struct pci_device_id plato_pci_tbl[] = { + { PCI_VDEVICE(PLATO, PCI_DEVICE_ID_PLATO) }, + { }, +}; + +static struct pci_driver plato_pci_driver = { + .name = PLATO_SYSTEM_NAME, + .id_table = plato_pci_tbl, + .probe = plato_init, + .remove = plato_exit, +}; + +module_pci_driver(plato_pci_driver); +MODULE_DEVICE_TABLE(pci, plato_pci_tbl); + +static struct plato_debug_register plato_noc_regs[] = { + {"NOC Offset 0x00", 0x00, 0}, + {"NOC Offset 0x04", 0x04, 0}, + {"NOC Offset 0x08", 0x08, 0}, + {"NOC Offset 0x0C", 0x0C, 0}, + {"NOC Offset 0x10", 0x10, 0}, + {"NOC Offset 0x14", 0x14, 0}, + {"NOC Offset 0x18", 0x18, 0}, + {"NOC Offset 0x1C", 0x1C, 0}, + {"NOC Offset 0x50", 0x50, 0}, + {"NOC Offset 0x54", 0x54, 0}, + {"NOC Offset 0x58", 0x58, 0}, + {"DDR A Ctrl", SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET, 0}, + {"DDR A Data", SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET, 0}, + {"DDR A Publ", SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET, 0}, + {"DDR B Ctrl", SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET, 0}, + {"DDR B Data", SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET, 0}, + {"DDR B Publ", SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET, 0}, + {"Display S", SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET, 0}, + {"GPIO 0 S", SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET, 0}, + {"GPIO 1 S", SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET, 0}, + {"GPU S", SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET, 0}, + {"PCI PHY", SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET, 0}, + {"PCI Reg", SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET, 0}, + {"PCI S", SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET, 0}, + {"Periph S", SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET, 0}, + {"Ret Reg", SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET, 0}, + {"Service", SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET, 0}, +}; + +static struct plato_debug_register plato_aon_regs[] = { + {"AON Offset 0x0000", 0x0000, 0}, + {"AON Offset 0x0070", 0x0070, 0}, +}; + +static int poll_pr(struct device *dev, void *base, u32 reg, u32 val, + u32 msk, u32 cnt, u32 intrvl) +{ + u32 polnum; + + for (polnum = 0; polnum < cnt; polnum++) { + if ((plato_read_reg32(base, reg) & msk) == val) + break; + plato_sleep_ms(intrvl); + } + if (polnum == cnt) { + dev_info(dev, + "Poll failed for register: 0x%08X. Expected 0x%08X Received 0x%08X", + (unsigned int)reg, val, + plato_read_reg32(base, reg) & msk); + return -ETIME; + } + + return 0; +} + +#define poll(dev, base, reg, val, msk) poll_pr(dev, base, reg, val, msk, 10, 10) + +int request_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t offset, resource_size_t length) +{ + resource_size_t start, end; + + start = pci_resource_start(pdev, index); + end = pci_resource_end(pdev, index); + + if ((start + offset + length - 1) > end) + return -EIO; + if (pci_resource_flags(pdev, index) & IORESOURCE_IO) { + if (request_region(start + offset, length, PVRSRV_MODNAME) + == NULL) + return -EIO; + } else { + if (request_mem_region(start + offset, length, PVRSRV_MODNAME) + == NULL) + return -EIO; + } + return PLATO_INIT_SUCCESS; +} + +void release_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t start, resource_size_t length) +{ + if (pci_resource_flags(pdev, index) & IORESOURCE_IO) + release_region(start, length); + else + release_mem_region(start, length); +} + +static void plato_devres_release(struct device *dev, void *res) +{ + /* No extra cleanup needed */ +} + +static irqreturn_t plato_irq_handler(int irq, void *data) +{ + u32 interrupt_status; + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + struct plato_device *plato = (struct plato_device *)data; +#if !defined(VIRTUAL_PLATFORM) + void *perip_regs = plato->sys_io.registers + SYS_PLATO_REG_PERIP_OFFSET; +#endif + + spin_lock_irqsave(&plato->interrupt_handler_lock, flags); + +#if defined(VIRTUAL_PLATFORM) + /* On virtual platform all interrupt handlers need to be called */ + interrupt_status = + (0x1 << PLATO_INT_SHIFT_GPU) | (0x1 << PLATO_INT_SHIFT_PDP); +#else + interrupt_status = + plato_read_reg32(perip_regs, PLATO_TOP_CR_INT_STATUS); +#endif + + if (interrupt_status & (1 << PLATO_INT_SHIFT_GPU)) { + struct plato_interrupt_handler *rogue_int = + &plato->interrupt_handlers[PLATO_INTERRUPT_GPU]; + + if (rogue_int->enabled && rogue_int->handler_function) + rogue_int->handler_function(rogue_int->handler_data); + ret = IRQ_HANDLED; + } + if (interrupt_status & (1 << PLATO_INT_SHIFT_PDP)) { + struct plato_interrupt_handler *pdp_int = + &plato->interrupt_handlers[PLATO_INTERRUPT_PDP]; + + if (pdp_int->enabled && pdp_int->handler_function) + pdp_int->handler_function(pdp_int->handler_data); + ret = IRQ_HANDLED; + } + if (interrupt_status & (1 << PLATO_INT_SHIFT_HDMI)) { + struct plato_interrupt_handler *hdmi_int = + &plato->interrupt_handlers[PLATO_INTERRUPT_HDMI]; + + if (hdmi_int->enabled && hdmi_int->handler_function) + hdmi_int->handler_function(hdmi_int->handler_data); + ret = IRQ_HANDLED; + } + + spin_unlock_irqrestore(&plato->interrupt_handler_lock, flags); + + return ret; +} + +static u32 hash_noc_addr(u32 addr) +{ + u32 hash_bits = (addr & 0x4000 ? 1 : 0); + + if (addr & 0x2000) + ++hash_bits; + if (addr & 0x1000) + ++hash_bits; + if (addr & 0x0800) + ++hash_bits; + if ((hash_bits == 1) || (hash_bits == 3)) + addr ^= 0x100; + + return addr; +} + +int plato_debug_info(struct device *dev, + struct plato_debug_register *noc_dbg_regs, + struct plato_debug_register *aon_dbg_regs) +{ + int offset = 0; + int i = 0; + void *noc_regs; + void *aon_regs; + struct plato_device *plato = devres_find(dev, plato_devres_release, + NULL, NULL); + + if (!plato) { + dev_err(dev, "No plato device resources found\n"); + return -ENODEV; + } + + noc_regs = plato->sys_io.registers + SYS_PLATO_REG_NOC_OFFSET; + aon_regs = plato->aon_regs.registers; + + /* First set of NOC regs don't need to be hashed */ + for (i = noc_dbg_regs[0].offset; + i < SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET; + i = noc_dbg_regs[offset].offset) { + noc_dbg_regs[offset++].value = plato_read_reg32(noc_regs, i); + } + + /* NOC regs that need to be hashed */ + for (i = offset; + i < ARRAY_SIZE(plato_noc_regs); + i++) { + u32 hashed_offset = hash_noc_addr(noc_dbg_regs[i].offset + 0xC); + + noc_dbg_regs[offset++].value = + plato_read_reg32(noc_regs, hashed_offset); + } + + /* Fill in AON regs */ + for (i = 0; + i < ARRAY_SIZE(plato_aon_regs); + i++) { + aon_dbg_regs[i].value = + plato_read_reg32(aon_regs, aon_dbg_regs[i].offset); + } + + return 0; +} +EXPORT_SYMBOL(plato_debug_info); + +static int plato_enable_irq(struct plato_device *plato) +{ + int err = PLATO_INIT_SUCCESS; + +#if !defined(VIRTUAL_PLATFORM) + err = request_irq(plato->pdev->irq, plato_irq_handler, + IRQF_SHARED, PLATO_SYSTEM_NAME, plato); +#endif + + return err; +} + +static void plato_disable_irq(struct plato_device *plato) +{ + free_irq(plato->pdev->irq, plato); +} + +static int register_rogue_device(struct plato_device *plato) +{ + int err = 0; + struct resource rogue_resources[] = { + DEFINE_RES_MEM_NAMED(pci_resource_start(plato->pdev, + SYS_PLATO_REG_PCI_BASENUM) + SYS_PLATO_REG_RGX_OFFSET, + SYS_PLATO_REG_RGX_SIZE, PLATO_ROGUE_RESOURCE_REGS), + }; + + struct plato_rogue_platform_data pdata = { + .plato_memory_base = plato->rogue_mem.base, + .has_nonmappable = plato->has_nonmappable, + .rogue_heap_dev_addr = plato->dev_mem_base, + .rogue_heap_mappable = plato->rogue_heap_mappable, + .rogue_heap_nonmappable = plato->rogue_heap_nonmappable, + #if defined(SUPPORT_PLATO_DISPLAY) + .pdp_heap = plato->pdp_heap, + #endif + }; + struct platform_device_info rogue_device_info = { + .parent = &plato->pdev->dev, + .name = PLATO_DEVICE_NAME_ROGUE, + .id = -2, + .res = rogue_resources, + .num_res = ARRAY_SIZE(rogue_resources), + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = DMA_BIT_MASK(40), + }; + + plato->rogue_dev = platform_device_register_full(&rogue_device_info); + + if (IS_ERR(plato->rogue_dev)) { + err = PTR_ERR(plato->rogue_dev); + dev_err(&plato->pdev->dev, + "Failed to register rogue device (%d)\n", err); + plato->rogue_dev = NULL; + } + return err; +} + + +#if defined(SUPPORT_PLATO_DISPLAY) + +static int register_pdp_device(struct plato_device *plato) +{ + int err = 0; + + struct resource pdp_resources[] = { + DEFINE_RES_MEM_NAMED(pci_resource_start(plato->pdev, + SYS_PLATO_REG_PCI_BASENUM) + SYS_PLATO_REG_PDP_OFFSET, + SYS_PLATO_REG_PDP_SIZE, PLATO_PDP_RESOURCE_REGS), + DEFINE_RES_MEM_NAMED(pci_resource_start(plato->pdev, + SYS_PLATO_REG_PCI_BASENUM) + SYS_PLATO_REG_PDP_BIF_OFFSET, + SYS_PLATO_REG_PDP_BIF_SIZE, PLATO_PDP_RESOURCE_BIF_REGS), + }; + struct plato_pdp_platform_data pdata = { + // .memory_base is translate factor from CPU (PCI) address to equivalent + // GPU address. GEM PDP memory is allocated in GPU space as dev_addr. + // The cpu_addr = dev_addr - memory_base + .memory_base = plato->rogue_mem.base - plato->dev_mem_base, + .pdp_heap_memory_base = plato->pdp_heap.base, + .pdp_heap_memory_size = plato->pdp_heap.size, + }; + struct platform_device_info pdp_device_info = { + .parent = &plato->pdev->dev, + .name = PLATO_DEVICE_NAME_PDP, + .id = -2, + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = DMA_BIT_MASK(32), + }; + + pdp_device_info.res = pdp_resources; + pdp_device_info.num_res = ARRAY_SIZE(pdp_resources); + + plato->pdp_dev = platform_device_register_full(&pdp_device_info); + if (IS_ERR(plato->pdp_dev)) { + err = PTR_ERR(plato->pdp_dev); + dev_err(&plato->pdev->dev, + "Failed to register PDP device (%d)\n", err); + plato->pdp_dev = NULL; + goto err; + } +err: + return err; +} + +static int register_hdmi_device(struct plato_device *plato) +{ + int err = 0; + + struct resource hdmi_resources[] = { + DEFINE_RES_MEM_NAMED(pci_resource_start(plato->pdev, + SYS_PLATO_REG_PCI_BASENUM) + SYS_PLATO_REG_HDMI_OFFSET, + SYS_PLATO_REG_HDMI_SIZE, PLATO_HDMI_RESOURCE_REGS), + }; + struct plato_hdmi_platform_data pdata = { + .plato_memory_base = plato->rogue_mem.base, + }; + struct platform_device_info hdmi_device_info = { + .parent = &plato->pdev->dev, + .name = PLATO_DEVICE_NAME_HDMI, + .id = -2, + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = DMA_BIT_MASK(32), + }; + + hdmi_device_info.res = hdmi_resources; + hdmi_device_info.num_res = ARRAY_SIZE(hdmi_resources); + + plato->hdmi_dev = platform_device_register_full(&hdmi_device_info); + if (IS_ERR(plato->hdmi_dev)) { + err = PTR_ERR(plato->hdmi_dev); + dev_err(&plato->pdev->dev, + "Failed to register HDMI device (%d)\n", err); + plato->hdmi_dev = NULL; + goto err; + } +err: + return err; +} + +#endif + +unsigned int plato_mem_clock_speed(struct device *dev) +{ + unsigned int ret = mem_clock_speed; + + (mem_clock_speed > PLATO_MAX_MEM_CLOCK_SPEED) ? + ret = PLATO_MAX_MEM_CLOCK_SPEED : 0; + (mem_clock_speed < PLATO_MIN_MEM_CLOCK_SPEED) ? + ret = PLATO_MIN_MEM_CLOCK_SPEED : 0; + + return ret; +} +EXPORT_SYMBOL(plato_mem_clock_speed); + +unsigned int plato_core_clock_speed(struct device *dev) +{ + unsigned int ret = core_clock_speed; + + (core_clock_speed > PLATO_RGX_MAX_CORE_CLOCK_SPEED) ? + ret = PLATO_RGX_MAX_CORE_CLOCK_SPEED : 0; + (core_clock_speed < PLATO_RGX_MIN_CORE_CLOCK_SPEED) ? + ret = PLATO_RGX_MIN_CORE_CLOCK_SPEED : 0; + + return ret; +} +EXPORT_SYMBOL(plato_core_clock_speed); + +unsigned int plato_pll_clock_speed(struct device *dev, + unsigned int clock_speed) +{ + /* + * Force the lowest possible PLL clock in case when the requested + * clock speed is higher than the largest value known by the function. + */ + unsigned int pll_clock_speed = clock_speed; + unsigned int acc_clock_speed = clock_speed; + + /* Tweak the values if the requested clock speed is a supported one */ + (clock_speed <= 742500000) ? + (pll_clock_speed = 742500000, acc_clock_speed = 742500000) : 0; + (clock_speed <= 668250000) ? + (pll_clock_speed = 1336500000, acc_clock_speed = 668250000) : 0; + (clock_speed <= 594000000) ? + (pll_clock_speed = 1188000000, acc_clock_speed = 594000000) : 0; + (clock_speed <= 544500000) ? + (pll_clock_speed = 1633500000, acc_clock_speed = 544500000) : 0; + (clock_speed <= 519750000) ? + (pll_clock_speed = 1039500000, acc_clock_speed = 519750000) : 0; + (clock_speed <= 495000000) ? + (pll_clock_speed = 1485000000, acc_clock_speed = 495000000) : 0; + (clock_speed <= 445500000) ? + (pll_clock_speed = 891000000, acc_clock_speed = 445500000) : 0; + (clock_speed <= 408375000) ? + (pll_clock_speed = 1633500000, acc_clock_speed = 408375000) : 0; + (clock_speed <= 396000000) ? + (pll_clock_speed = 1188000000, acc_clock_speed = 396000000) : 0; + + /* + * Do fine grained adjustment if the requested clock speed + * is different than expected. + */ + return ((((unsigned long long int)pll_clock_speed << 32) / + acc_clock_speed) >> 32) * clock_speed; +} +EXPORT_SYMBOL(plato_pll_clock_speed); + +static int setup_io_region(struct pci_dev *pdev, + struct plato_io_region *region, u32 index, + resource_size_t offset, resource_size_t size) +{ + int err; + resource_size_t pci_phys_addr; + + err = request_pci_io_addr(pdev, index, offset, size); + if (err != PLATO_INIT_SUCCESS) { + dev_err(&pdev->dev, + "Failed to request plato registers (err=%d)\n", err); + return -EIO; + } + pci_phys_addr = pci_resource_start(pdev, index); + region->region.base = pci_phys_addr + offset; + region->region.size = size; + + region->registers = ioremap(region->region.base, region->region.size); + + if (!region->registers) { + dev_err(&pdev->dev, "Failed to map plato registers\n"); + release_pci_io_addr(pdev, index, + region->region.base, region->region.size); + return -EIO; + } + return 0; +} + +static int plato_dev_init(struct plato_device *plato, struct pci_dev *pdev) +{ + int err; + + plato->pdev = pdev; + + spin_lock_init(&plato->interrupt_handler_lock); + spin_lock_init(&plato->interrupt_enable_lock); + + /* Reserve and map the plato sys registers up to the rogue regs */ + err = setup_io_region(pdev, &plato->sys_io, + SYS_PLATO_REG_PCI_BASENUM, + SYS_PLATO_REG_SYS_OFFSET, SYS_PLATO_REG_RGX_OFFSET); + if (err) + goto err_out; + + plato_dev_info(&pdev->dev, + "Initialized system register region at base 0x%llx and size 0x%llx", + (u64)plato->sys_io.region.base, (u64)plato->sys_io.region.size); + + /* Now map AON regs which come after rogue regs */ + err = setup_io_region(pdev, &plato->aon_regs, + SYS_PLATO_REG_PCI_BASENUM, + SYS_PLATO_REG_AON_OFFSET, SYS_PLATO_REG_AON_SIZE); + if (err) + goto err_unmap_sys; + + plato_dev_info(&pdev->dev, + "Initialized AON register region at base 0x%llx and size 0x%llx", + (u64)plato->aon_regs.region.base, + (u64)plato->aon_regs.region.size); + + err = plato_memory_init(plato); + if (err) { + plato_dev_info(&plato->pdev->dev, + "plato_memory_init failed (%d)", err); + goto err_unmap_aon; + } + +err_out: + return err; +err_unmap_aon: + iounmap(plato->aon_regs.registers); + release_pci_io_addr(pdev, SYS_PLATO_REG_PCI_BASENUM, + plato->aon_regs.region.base, plato->aon_regs.region.size); +err_unmap_sys: + iounmap(plato->sys_io.registers); + release_pci_io_addr(pdev, SYS_PLATO_REG_PCI_BASENUM, + plato->sys_io.region.base, plato->sys_io.region.size); + goto err_out; +} + +static void plato_dev_cleanup(struct plato_device *plato) +{ + plato_memory_deinit(plato); + + iounmap(plato->aon_regs.registers); + release_pci_io_addr(plato->pdev, SYS_PLATO_REG_PCI_BASENUM, + plato->aon_regs.region.base, plato->aon_regs.region.size); + + iounmap(plato->sys_io.registers); + release_pci_io_addr(plato->pdev, SYS_PLATO_REG_PCI_BASENUM, + plato->sys_io.region.base, plato->sys_io.region.size); +} + + +static int plato_init(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct plato_device *plato; + int err = 0; + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + plato = devres_alloc(plato_devres_release, + sizeof(*plato), GFP_KERNEL); + if (!plato) { + err = -ENOMEM; + goto err_out; + } + + devres_add(&pdev->dev, plato); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, + "error - pci_enable_device returned %d\n", err); + goto err_out; + } + + /* Sanity check */ + if (pdev->vendor == PCI_VENDOR_ID_PLATO && + pdev->device == PCI_DEVICE_ID_PLATO) { + + err = plato_dev_init(plato, pdev); + if (err) + goto err_disable_device; + + err = plato_cfg_init(plato); + if (err) + goto err_dev_cleanup; + + err = plato_enable_irq(plato); + if (err) { + dev_err(&pdev->dev, "Failed to initialise IRQ\n"); + goto err_dev_cleanup; + } + } else { + dev_err(&pdev->dev, "WARNING: vendor/device ID is not correct"); + goto err_disable_device; + } + + /* Register rogue device */ + register_rogue_device(plato); + + /* Register display devices */ +#if defined(SUPPORT_PLATO_DISPLAY) + register_pdp_device(plato); + register_hdmi_device(plato); +#endif + + devres_remove_group(&pdev->dev, NULL); + + goto plato_init_return; + +err_dev_cleanup: + plato_dev_cleanup(plato); +err_disable_device: + pci_disable_device(pdev); +err_out: + devres_release_group(&pdev->dev, NULL); + +plato_init_return: + return err; +} + +static void plato_exit(struct pci_dev *pdev) +{ + int i; + struct plato_device *plato; + + plato_dev_info(&pdev->dev, "%s entry\n", __func__); + + plato = devres_find(&pdev->dev, plato_devres_release, NULL, NULL); + + if (!plato) + return; + + if (plato->rogue_dev) { + plato_dev_info(&pdev->dev, + "%s: platform_device_unregister rogue_dev\n", + __func__); + platform_device_unregister(plato->rogue_dev); + } + +#if defined(SUPPORT_PLATO_DISPLAY) + if (plato->pdp_dev) { + plato_dev_info(&pdev->dev, "platform_device_unregister pdp_dev\n"); + platform_device_unregister(plato->pdp_dev); + } + + if (plato->hdmi_dev) { + plato_dev_info(&pdev->dev, "platform_device_unregister hdmi_dev\n"); + platform_device_unregister(plato->hdmi_dev); + } +#endif + + for (i = 0; i < PLATO_INTERRUPT_MAX; i++) + plato_disable_interrupt(&pdev->dev, i); + + plato_disable_irq(plato); + plato_dev_cleanup(plato); + + plato_dev_info(&pdev->dev, "pci_disable_device\n"); + + pci_disable_device(pdev); + + plato_dev_info(&pdev->dev, "%s exit\n"); +} + +static u32 plato_interrupt_id_to_flag(enum PLATO_INTERRUPT interrupt_id) +{ + switch (interrupt_id) { + case PLATO_INTERRUPT_GPU: + return (1 << PLATO_INT_SHIFT_GPU); + case PLATO_INTERRUPT_PDP: + return (1 << PLATO_INT_SHIFT_PDP); + case PLATO_INTERRUPT_HDMI: + return (1 << PLATO_INT_SHIFT_HDMI); + default: + BUG(); + } +} + +int plato_enable(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int err; + + err = pci_enable_device(pdev); + if (!err) + pci_set_master(pdev); + + return err; +} +EXPORT_SYMBOL(plato_enable); + +void plato_disable(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + pci_disable_device(pdev); +} +EXPORT_SYMBOL(plato_disable); + +int plato_set_interrupt_handler(struct device *dev, + enum PLATO_INTERRUPT interrupt_id, + void (*handler_function)(void *), + void *data) +{ + int err = 0; + unsigned long flags; + struct plato_device *plato = devres_find(dev, plato_devres_release, + NULL, NULL); + + if (!plato) { + dev_err(dev, "No plato device resources found\n"); + err = -ENODEV; + goto err_out; + } + + if (interrupt_id < 0 || interrupt_id >= PLATO_INTERRUPT_MAX) { + dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); + err = -EINVAL; + goto err_out; + } + + spin_lock_irqsave(&plato->interrupt_handler_lock, flags); + + plato->interrupt_handlers[interrupt_id].handler_function = + handler_function; + plato->interrupt_handlers[interrupt_id].handler_data = data; + + spin_unlock_irqrestore(&plato->interrupt_handler_lock, flags); + +err_out: + return err; +} +EXPORT_SYMBOL(plato_set_interrupt_handler); + +int plato_enable_interrupt(struct device *dev, enum PLATO_INTERRUPT interrupt_id) +{ + int err = 0; + unsigned long flags; + void *perip_regs; + int value; + struct plato_device *plato = devres_find(dev, plato_devres_release, + NULL, NULL); + + if (!plato) { + dev_err(dev, "No plato device resources found\n"); + err = -ENODEV; + goto err_out; + } + if (interrupt_id < 0 || interrupt_id >= PLATO_INTERRUPT_MAX) { + dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); + err = -EINVAL; + goto err_out; + } + spin_lock_irqsave(&plato->interrupt_enable_lock, flags); + + if (plato->interrupt_handlers[interrupt_id].enabled) { + plato_dev_warn(dev, "Interrupt ID %d already enabled\n", + interrupt_id); + err = -EEXIST; + goto err_unlock; + } + plato->interrupt_handlers[interrupt_id].enabled = true; + +#if !defined(VIRTUAL_PLATFORM) + perip_regs = plato->sys_io.registers + SYS_PLATO_REG_PERIP_OFFSET; + + value = plato_read_reg32(perip_regs, PLATO_TOP_CR_PCI_INT_MASK); + value |= plato_interrupt_id_to_flag(interrupt_id); + plato_write_reg32(perip_regs, PLATO_TOP_CR_PCI_INT_MASK, value); + + (void) plato_read_reg32(perip_regs, PLATO_TOP_CR_PCI_INT_MASK); +#endif + +err_unlock: + spin_unlock_irqrestore(&plato->interrupt_enable_lock, flags); +err_out: + return err; +} +EXPORT_SYMBOL(plato_enable_interrupt); + +int plato_disable_interrupt(struct device *dev, enum PLATO_INTERRUPT interrupt_id) +{ + int err = 0; + unsigned long flags; + void *perip_regs; + int value; + struct plato_device *plato = devres_find(dev, plato_devres_release, + NULL, NULL); + + if (!plato) { + dev_err(dev, "No plato device resources found\n"); + err = -ENODEV; + goto err_out; + } + if (interrupt_id < 0 || interrupt_id >= PLATO_INTERRUPT_MAX) { + dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); + err = -EINVAL; + goto err_out; + } + spin_lock_irqsave(&plato->interrupt_enable_lock, flags); + + if (!plato->interrupt_handlers[interrupt_id].enabled) { + plato_dev_warn(dev, "Interrupt ID %d already disabled\n", + interrupt_id); + } + plato->interrupt_handlers[interrupt_id].enabled = false; + +#if !defined(VIRTUAL_PLATFORM) + perip_regs = plato->sys_io.registers + SYS_PLATO_REG_PERIP_OFFSET; + + value = plato_read_reg32(perip_regs, PLATO_TOP_CR_PCI_INT_MASK); + value &= ~(plato_interrupt_id_to_flag(interrupt_id)); + plato_write_reg32(perip_regs, PLATO_TOP_CR_PCI_INT_MASK, value); + + (void) plato_read_reg32(perip_regs, PLATO_TOP_CR_PCI_INT_MASK); +#endif + + spin_unlock_irqrestore(&plato->interrupt_enable_lock, flags); +err_out: + return err; +} +EXPORT_SYMBOL(plato_disable_interrupt); + +void plato_enable_pdp_clock(struct device *dev) +{ + void *perip_regs; + struct plato_device *plato = devres_find(dev, plato_devres_release, + NULL, NULL); + + perip_regs = plato->sys_io.registers + SYS_PLATO_REG_PERIP_OFFSET; + + /* Enabling PDP gated clock output - 198 MHz + * 0x1210 sets the dividers to (1+1)*(2+1) = 6, + * and GPU_PLL defaults to 1188MHz + */ + plato_write_reg32(perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, 0x00001210); + poll(dev, perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, + 0x00001210, 0x00001210); + udelay(100); +} +EXPORT_SYMBOL(plato_enable_pdp_clock); + +/* + * Pixel Clock setup + */ +void plato_enable_pixel_clock(struct device *dev, u32 pixel_clock) +{ + u32 div0; + u32 div1; + u32 reg; + u32 pclock_mhz = pixel_clock / 1000; + u32 core_clock = plato_core_clock_speed(dev); + u32 pll_mhz = (plato_pll_clock_speed(dev, core_clock)) / 1000000; + void *top_regs; + struct plato_device *plato = devres_find(dev, plato_devres_release, + NULL, NULL); + + top_regs = plato->sys_io.registers + SYS_PLATO_REG_CHIP_LEVEL_OFFSET; + + /* + * Obtain divisor, round to nearest. + */ + div1 = (pll_mhz + pclock_mhz / 2) / pclock_mhz; + + if (div1 <= 32) { + if (div1 < 17) { + div0 = 0; + div1--; + } else { + div0 = 1; + div1 /= 2; + div1--; + } + } else { + dev_warn(dev, "- %s: Cannot correctly set up dividers to set pixel clock to requested value! Setting dividers to maximum values ", __func__); + div0 = 1; + div1 = 15; + } + + reg = (PLATO_CR_HDMIG_GATE_EN_MASK | (div1 << PLATO_CR_HDMIV1_DIV_0_SHIFT) | + (div0 << PLATO_CR_HDMIV0_DIV_0_SHIFT)); + + plato_write_reg32(top_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, reg); + poll(dev, top_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, reg, reg); + + plato_dev_info(dev, "- %s: Set up Pixel Clock dividers for %d: Div0=%d(+1) and Div1=%d(+1)", __func__, pixel_clock, div0, div1); + +} +EXPORT_SYMBOL(plato_enable_pixel_clock); diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.h new file mode 100644 index 000000000000..e760124b866e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_drv.h @@ -0,0 +1,415 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File plato_drv.h +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PLATO_DRV_H +#define _PLATO_DRV_H + +/* + * This contains the hooks for the plato pci driver, as used by the + * Rogue and PDP sub-devices, and the platform data passed to each of their + * drivers + */ + +#include +#include +#include +#include +#include +#include + +// Debug output: +// Sometimes will want to always output info or error even in release mode. +// In that case use dev_info, dev_err directly. +#if defined(PLATO_DRM_DEBUG) + #define plato_dev_info(dev, fmt, ...) \ + dev_info(dev, fmt, ##__VA_ARGS__) + #define plato_dev_warn(dev, fmt, ...) \ + dev_warn(dev, fmt, ##__VA_ARGS__) + #define plato_dev_error(dev, fmt, ...) \ + dev_err(dev, fmt, ##__VA_ARGS__) + #define PLATO_DRM_CHECKPOINT pr_info("line %d\n", __LINE__) +#else + #define plato_dev_info(dev, fmt, ...) + #define plato_dev_warn(dev, fmt, ...) + #define plato_dev_error(dev, fmt, ...) + #define PLATO_DRM_CHECKPOINT +#endif + +#define PLATO_INIT_SUCCESS 0 +#define PLATO_INIT_FAILURE 1 +#define PLATO_INIT_RETRY 2 + +#define PCI_VENDOR_ID_PLATO (0x1AEE) +#define PCI_DEVICE_ID_PLATO (0x0003) + +#define PLATO_SYSTEM_NAME "Plato" + +/* Interrupt defines */ +enum PLATO_INTERRUPT { + PLATO_INTERRUPT_GPU = 0, + PLATO_INTERRUPT_PDP, + PLATO_INTERRUPT_HDMI, + PLATO_INTERRUPT_MAX, +}; + +#define PLATO_INT_SHIFT_GPU (0) +#define PLATO_INT_SHIFT_PDP (8) +#define PLATO_INT_SHIFT_HDMI (9) +#define PLATO_INT_SHIFT_HDMI_WAKEUP (11) +#define PLATO_INT_SHIFT_TEMP_A (12) + + +struct plato_region { + resource_size_t base; + resource_size_t size; +}; + +struct plato_io_region { + struct plato_region region; + void __iomem *registers; +}; + +/* The following structs are initialised and passed down by the parent plato + * driver to the respective sub-drivers + */ + +#define PLATO_DEVICE_NAME_PDP "plato_pdp" +#define PLATO_PDP_RESOURCE_REGS "pdp-regs" +#define PLATO_PDP_RESOURCE_BIF_REGS "pdp-bif-regs" + +#define PLATO_DEVICE_NAME_HDMI "plato_hdmi" +#define PLATO_HDMI_RESOURCE_REGS "hdmi-regs" + +struct plato_pdp_platform_data { + resource_size_t memory_base; + + /* The following is used by the drm_pdp driver as it manages the + * pdp memory + */ + resource_size_t pdp_heap_memory_base; + resource_size_t pdp_heap_memory_size; + + /* Used to export host address instead of pdp address, + * defaults to false. + */ + bool dma_map_export_host_addr; +}; + +struct plato_hdmi_platform_data { + resource_size_t plato_memory_base; +}; + + +#define PLATO_DEVICE_NAME_ROGUE "plato_rogue" +#define PLATO_ROGUE_RESOURCE_REGS "rogue-regs" + +struct plato_rogue_platform_data { + + /* The base address of the plato memory (CPU physical address) - + * used to convert from CPU-Physical to device-physical addresses + */ + resource_size_t plato_memory_base; + + /* The following is used to setup the services heaps */ + int has_nonmappable; + struct plato_region rogue_heap_mappable; + resource_size_t rogue_heap_dev_addr; + struct plato_region rogue_heap_nonmappable; +#if defined(SUPPORT_PLATO_DISPLAY) + struct plato_region pdp_heap; +#endif +}; + +struct plato_interrupt_handler { + bool enabled; + void (*handler_function)(void *); + void *handler_data; +}; + +struct plato_device { + struct pci_dev *pdev; + + struct plato_io_region sys_io; + struct plato_io_region aon_regs; + + spinlock_t interrupt_handler_lock; + spinlock_t interrupt_enable_lock; + + struct plato_interrupt_handler interrupt_handlers[PLATO_INTERRUPT_MAX]; + + struct plato_region rogue_mem; + struct plato_region rogue_heap_mappable; + struct plato_region rogue_heap_nonmappable; + int has_nonmappable; + + resource_size_t dev_mem_base; /* Pointer to device memory base */ + + struct platform_device *rogue_dev; + +#if defined(SUPPORT_PLATO_DISPLAY) + struct platform_device *pdp_dev; + struct plato_region pdp_heap; + + struct platform_device *hdmi_dev; +#endif + +#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + int mtrr; +#endif +}; + +#if defined(PLATO_LOG_CHECKPOINTS) +#define PLATO_CHECKPOINT(p) dev_info(&p->pdev->dev, \ + "- %s: %d", __func__, __LINE__) +#else +#define PLATO_CHECKPOINT(p) +#endif + +#define plato_write_reg32(base, offset, value) \ + iowrite32(value, (base) + (offset)) +#define plato_read_reg32(base, offset) ioread32(base + offset) +#define plato_sleep_ms(x) msleep(x) +#define plato_sleep_us(x) msleep(x/1000) + +/* Valid values for the PLATO_MEMORY_CONFIG configuration option */ +#define PLATO_MEMORY_LOCAL (1) +#define PLATO_MEMORY_HOST (2) +#define PLATO_MEMORY_HYBRID (3) + +#if defined(PLATO_MEMORY_CONFIG) +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +#define PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL 2 +#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) +#define PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL 1 +#endif +#endif /* PLATO_MEMORY_CONFIG */ + +#define DCPDP_PHYS_HEAP_ID PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL + +#define PLATO_PDP_MEM_SIZE (384 * 1024 * 1024) + +#define SYS_PLATO_REG_PCI_BASENUM (1) +#define SYS_PLATO_REG_REGION_SIZE (4 * 1024 * 1024) + +/* + * Give system region a whole span of the reg space including + * RGX registers. That's because there are sys register segments + * both before and after the RGX segment. + */ +#define SYS_PLATO_REG_SYS_OFFSET (0x0) +#define SYS_PLATO_REG_SYS_SIZE (4 * 1024 * 1024) + +/* Entire Peripheral region */ +#define SYS_PLATO_REG_PERIP_OFFSET (0x20000) +#define SYS_PLATO_REG_PERIP_SIZE (164 * 1024) + +/* Chip level registers */ +#define SYS_PLATO_REG_CHIP_LEVEL_OFFSET (SYS_PLATO_REG_PERIP_OFFSET) +#define SYS_PLATO_REG_CHIP_LEVEL_SIZE (64 * 1024) + +#define SYS_PLATO_REG_TEMPA_OFFSET (0x80000) +#define SYS_PLATO_REG_TEMPA_SIZE (64 * 1024) + +/* USB, DMA not included */ + +#define SYS_PLATO_REG_DDR_A_CTRL_OFFSET (0x120000) +#define SYS_PLATO_REG_DDR_A_CTRL_SIZE (64 * 1024) + +#define SYS_PLATO_REG_DDR_B_CTRL_OFFSET (0x130000) +#define SYS_PLATO_REG_DDR_B_CTRL_SIZE (64 * 1024) + +#define SYS_PLATO_REG_DDR_A_PUBL_OFFSET (0x140000) +#define SYS_PLATO_REG_DDR_A_PUBL_SIZE (64 * 1024) + +#define SYS_PLATO_REG_DDR_B_PUBL_OFFSET (0x150000) +#define SYS_PLATO_REG_DDR_B_PUBL_SIZE (64 * 1024) + +#define SYS_PLATO_REG_NOC_OFFSET (0x160000) +#define SYS_PLATO_REG_NOC_SIZE (64 * 1024) + +/* Debug NOC registers */ +#define SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET (0x1500) +#define SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET (0x1580) +#define SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET (0x1600) +#define SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET (0x1680) +#define SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET (0x1700) +#define SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET (0x1780) +#define SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET (0x1800) +#define SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET (0x1900) +#define SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET (0x1980) +#define SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET (0x1A00) +#define SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET (0x1A80) +#define SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET (0x1B00) +#define SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET (0x1B80) +#define SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET (0x1c00) +#define SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET (0x1D00) +#define SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET (0x1E00) + +#define SYS_PLATO_REG_RGX_OFFSET (0x170000) +#define SYS_PLATO_REG_RGX_SIZE (64 * 1024) + +#define SYS_PLATO_REG_AON_OFFSET (0x180000) +#define SYS_PLATO_REG_AON_SIZE (64 * 1024) + +#define SYS_PLATO_REG_PDP_OFFSET (0x200000) +#define SYS_PLATO_REG_PDP_SIZE (0x1000) + +#define SYS_PLATO_REG_PDP_BIF_OFFSET \ + (SYS_PLATO_REG_PDP_OFFSET + SYS_PLATO_REG_PDP_SIZE) +#define SYS_PLATO_REG_PDP_BIF_SIZE (0x200) + +#define SYS_PLATO_REG_HDMI_OFFSET \ + (SYS_PLATO_REG_PDP_OFFSET + 0x20000) +#define SYS_PLATO_REG_HDMI_SIZE (128 * 1024) + +/* Device memory (including HP mapping) on base register 4 */ +#define SYS_DEV_MEM_PCI_BASENUM (4) + +/* Device memory size */ +#define ONE_GB_IN_BYTES (0x40000000ULL) +#define SYS_DEV_MEM_REGION_SIZE \ + (PLATO_MEMORY_SIZE_GIGABYTES * ONE_GB_IN_BYTES) + +/* Plato DDR offset in device memory map at 32GB */ +#define PLATO_DDR_DEV_PHYSICAL_BASE (0x800000000) + +/* DRAM is split at 48GB */ +#define PLATO_DRAM_SPLIT_ADDR (0xc00000000) + +/* + * Plato DDR region is aliased if less than 32GB memory is present. + * This defines memory base closest to the DRAM split point. + * If 32GB is present this is equal to PLATO_DDR_DEV_PHYSICAL_BASE + */ +#define PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE \ + (PLATO_DRAM_SPLIT_ADDR - (SYS_DEV_MEM_REGION_SIZE >> 1)) + +#define PLATO_DDR_ALIASED_DEV_PHYSICAL_END \ + (PLATO_DRAM_SPLIT_ADDR + (SYS_DEV_MEM_REGION_SIZE >> 1)) + +#define PLATO_DDR_ALIASED_DEV_SEGMENT_SIZE \ + ((32ULL / PLATO_MEMORY_SIZE_GIGABYTES) * ONE_GB_IN_BYTES) + +/* Plato Host memory offset in device memory map at 512GB */ +#define PLATO_HOSTRAM_DEV_PHYSICAL_BASE (0x8000000000) + +/* Plato PLL, DDR/GPU, PDP and HDMI-SFR/CEC clocks */ +#define PLATO_PLL_REF_CLOCK_SPEED (19200000) + +/* 600 MHz */ +#define PLATO_MEM_CLOCK_SPEED (600000000) +#define PLATO_MIN_MEM_CLOCK_SPEED (600000000) +#define PLATO_MAX_MEM_CLOCK_SPEED (800000000) + +/* 396 MHz (~400 MHz) on HW, around 1MHz on the emulator */ +#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM) +#define PLATO_RGX_CORE_CLOCK_SPEED (1000000) +#else + +#define PLATO_RGX_CORE_CLOCK_SPEED (396000000) +#define PLATO_RGX_MIN_CORE_CLOCK_SPEED (396000000) +#define PLATO_RGX_MAX_CORE_CLOCK_SPEED (742500000) +#endif + +#define PLATO_MIN_PDP_CLOCK_SPEED (165000000) +#define PLATO_TARGET_HDMI_SFR_CLOCK_SPEED (27000000) +#define PLATO_TARGET_HDMI_CEC_CLOCK_SPEED (32768) + +#define REG_TO_CELSIUS(reg) (((reg) * 352/4096) - 109) +#define CELSIUS_TO_REG(temp) ((((temp) + 109) * 4096) / 352) +#define PLATO_MAX_TEMP_CELSIUS (100) + +#define PLATO_LMA_HEAP_REGION_MAPPABLE 0 +#define PLATO_LMA_HEAP_REGION_NONMAPPABLE 1 + +struct plato_debug_register { + char *description; + unsigned int offset; + unsigned int value; +}; + +#if defined(ENABLE_PLATO_HDMI) + +#if defined(HDMI_PDUMP) +/* Hard coded video formats for pdump type run only */ +#define VIDEO_FORMAT_1280_720p 0 +#define VIDEO_FORMAT_1920_1080p 1 +#define DC_DEFAULT_VIDEO_FORMAT (VIDEO_FORMAT_1920_1080p) +#endif + +#endif /* ENABLE_PLATO_HDMI */ + +/* Exposed APIs */ +int plato_enable(struct device *dev); +void plato_disable(struct device *dev); + +int plato_enable_interrupt(struct device *dev, + enum PLATO_INTERRUPT interrupt_id); +int plato_disable_interrupt(struct device *dev, + enum PLATO_INTERRUPT interrupt_id); + +int plato_set_interrupt_handler(struct device *dev, + enum PLATO_INTERRUPT interrupt_id, + void (*handler_function)(void *), + void *handler_data); +unsigned int plato_core_clock_speed(struct device *dev); +unsigned int plato_mem_clock_speed(struct device *dev); +unsigned int plato_pll_clock_speed(struct device *dev, + unsigned int clock_speed); +void plato_enable_pdp_clock(struct device *dev); +void plato_enable_pixel_clock(struct device *dev, u32 pixel_clock); + +int plato_debug_info(struct device *dev, + struct plato_debug_register *noc_dbg_regs, + struct plato_debug_register *aon_dbg_regs); + +/* Internal */ +int plato_memory_init(struct plato_device *plato); +void plato_memory_deinit(struct plato_device *plato); +int plato_cfg_init(struct plato_device *plato); +int request_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t offset, resource_size_t length); +void release_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t start, resource_size_t length); + +#endif /* _PLATO_DRV_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_init.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_init.c new file mode 100644 index 000000000000..4f4a73fde4fa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_init.c @@ -0,0 +1,1890 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File plato_init.c +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* + * Low level device initialization + */ +#if defined(CONFIG_MTRR) +#include +#endif + +#include "plato_drv.h" +#include "plato_aon_regs.h" +#include "plato_top_regs.h" +#include "plato_ddr_ctrl_regs.h" +#include "plato_ddr_publ_regs.h" + +#define PLATO_DDR_KINGSTON 1 + +#define PLATO_HAS_NON_MAPPABLE(dev) (dev->rogue_mem.size < SYS_DEV_MEM_REGION_SIZE) + +static int poll_pr(struct device *dev, void *base, u32 reg, + u32 val, u32 msk, u32 cnt, u32 intrvl) +{ + u32 polnum; + + for (polnum = 0; polnum < cnt; polnum++) { + if ((plato_read_reg32(base, reg) & msk) == val) + break; + plato_sleep_ms(intrvl); + } + if (polnum == cnt) { + dev_info(dev, + "Poll failed for register: 0x%08X. Expected 0x%08X Received 0x%08X", + (unsigned int)reg, val, + plato_read_reg32(base, reg) & msk); + return -ETIME; + } + + return 0; +} + +#define poll(dev, base, reg, val, msk) poll_pr(dev, base, reg, val, msk, 10, 10) + +static int plato_dram_init(struct plato_device *plato, + void *publ_regs, + void *ctrl_regs, + void *aon_regs, + u32 bldr_data[PLATO_DDR_PUBL_DATX_LANE_COUNT][PLATO_DDR_PUBL_DXBDLR_REGS_PER_LANE], + u32 reset_flags) +{ + struct device *dev = &plato->pdev->dev; + /* + * Phase 1: Program the DWC_ddr_umctl2 registers + */ + + /* Single rank only for Kingston DDRs*/ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_MSTR, 0x41040001); + + plato_sleep_us(100); + + /*refresh timings*/ +#if defined(PLATO_DDR_KINGSTON) + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHTMG, 0x0081008B); +#else + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHTMG, 0x006100BB); +#endif + + /* Train DDR sequence */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT0, 0x00020100); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT1, 0x00010000); + +#if defined(PLATO_DDR_KINGSTON) + /* write recovery */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT3, 0x01700000); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT4, 0x00280000); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT5, 0x0012000c); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG0, 0x0f0e2112); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG1, 0x00040618); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG2, 0x0506040B); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG3, 0x00002008); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG4, 0x06020307); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG5, 0x090e0403); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG6, 0x0d0e000e); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG7, 0x00000c0e); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG8, 0x01010a05); + + /*impedance registers */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ZQCTL0, 0x30ab002b); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ZQCTL1, 0x00000070); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ZQCTL2, 0x00000000); + + /*refresh control*/ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL0, 0x00e01020); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL1, 0x0078007e); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL2, 0x0057000e); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL3, 0x00000000); + /*DFI Timings*/ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFITMG0, 0x02878208); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFITMG1, 0x00020202); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD0, 0x00400003); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD1, 0x00f000ff); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD2, 0x80100010); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD3, 0x088105c3); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIMISC, 0x00000000); +#else + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT3, 0x0d700000); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT4, 0x00180000); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_INIT5, 0x00090009); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG0, 0x0c101a0e); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG1, 0x000a0313); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG2, 0x04050509); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG3, 0x00002008); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG4, 0x06020306); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG5, 0x070c0202); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG6, 0x0d0e000e); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG7, 0x00000c0e); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DRAMTMG8, 0x01010a07); + /*impedance registers */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ZQCTL0, 0x10800020); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ZQCTL1, 0x00000070); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ZQCTL2, 0x00000000); + + /*refresh control*/ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL0, 0x00e06010); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL1, 0x00600031); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL2, 0x0004002a); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_RFSHCTL3, 0x00000000); + + /*DFI Timings*/ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFITMG0, 0x02878206); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFITMG1, 0x00020202); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFILPCFG0, 0x07111031); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFILPCFG1, 0x00000050); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD0, 0x00400003); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD1, 0x006a006f); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD2, 0x0d0b02b6); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIUPD3, 0x00100010); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIMISC, 0x00000001); +#endif + + /* Single rank only on Kingston */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP0, 0x00001F1F); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP1, 0x00070707); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP2, 0x00000000); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP3, 0x0F000000); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP4, 0x00000F0F); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP5, 0x06060606); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP6, 0x06060606); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP7, 0x00000F0F); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ADDRMAP8, 0x00000707); + +#if defined(PLATO_DDR_KINGSTON) + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ODTCFG, 0x06000604); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ODTMAP, 0x99c5b050); +#else + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ODTCFG, 0x0d0f0740); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_ODTMAP, 0x99c5b050); +#endif + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_PERFHPR1, 0x9f008f23); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_SCHED, 0x00003f00); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_PERFLPR1, 0x18000064); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_PERFWR1, 0x18000096); + + /* Setup the virtual channels */ + plato_write_reg32(ctrl_regs, 0x00000410, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000414, 0x00000000); + plato_write_reg32(ctrl_regs, 0x00000418, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x0000041C, 0x00000001); + plato_write_reg32(ctrl_regs, 0x00000420, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000424, 0x00000002); + plato_write_reg32(ctrl_regs, 0x00000428, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x0000042C, 0x00000003); + plato_write_reg32(ctrl_regs, 0x00000430, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000434, 0x00000004); + plato_write_reg32(ctrl_regs, 0x00000438, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x0000043C, 0x00000005); + plato_write_reg32(ctrl_regs, 0x00000440, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000444, 0x00000006); + plato_write_reg32(ctrl_regs, 0x00000448, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x0000044C, 0x00000007); + plato_write_reg32(ctrl_regs, 0x00000450, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000454, 0x00000008); + plato_write_reg32(ctrl_regs, 0x00000458, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x0000045C, 0x00000009); + plato_write_reg32(ctrl_regs, 0x00000460, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000464, 0x0000000A); + plato_write_reg32(ctrl_regs, 0x00000468, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x0000046C, 0x0000000B); + plato_write_reg32(ctrl_regs, 0x00000470, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000474, 0x0000000C); + plato_write_reg32(ctrl_regs, 0x00000478, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x0000047C, 0x0000000D); + plato_write_reg32(ctrl_regs, 0x00000480, 0x0000000F); + plato_write_reg32(ctrl_regs, 0x00000484, 0x0000000E); + + poll(dev, ctrl_regs, 0x484, 0x0000000E, 0x0000000F); + + plato_sleep_us(1000); + + PLATO_CHECKPOINT(plato); + + /* + * Phase 2: Deassert soft reset signal core_ddrc_rstn + */ + + /* Now getting DRAM controller out of reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, + reset_flags | + plato_read_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL)); + + plato_sleep_us(1000); + + /* ECC disable */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DBG1, 0x00000000); + /* power related */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_PWRCTL, 0x00000000); + /* Enabling AXI input port (Port control) */ + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_PCTRL, 0x00000001); + + PLATO_CHECKPOINT(plato); + + /* + * Phase 7: Set DFIMISC.dfi_init_complete_en to 1 + */ + + /* + * Phase 3: Start PHY initialization by accessing relevant PUB registers + */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DCR_OFFSET, 0x0000040B); + +#if defined(PLATO_DDR_KINGSTON) + /* IF DDR_RISE_RISE */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DSGCR_OFFSET, 0x0064641F); + + /* MR Registers */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR0_OFFSET, 0x170); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR1_OFFSET, 0x00000400); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR2_OFFSET, 0x00000228); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR3_OFFSET, 0x00000000); + + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR0_OFFSET, 0x06220308); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR1_OFFSET, 0x281b1004); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR2_OFFSET, 0x00060120); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR3_OFFSET, 0x02000101); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR4_OFFSET, 0x01200807); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR5_OFFSET, 0x00300c08); + + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PGCR1_OFFSET, 0x020046A0); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PGCR2_OFFSET, 0x00F09088); +#else + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PTR0_OFFSET, 0x10000010); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PTR1_OFFSET, 0x271012c0); + + /* IF DDR_RISE_RISE */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DSGCR_OFFSET, 0x0064641F); + + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR0_OFFSET, 0xd70); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR1_OFFSET, 0x00000000); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR2_OFFSET, 0x00000018); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_MR3_OFFSET, 0x00000000); + + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR0_OFFSET, 0x061c0B06); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR1_OFFSET, 0x28200400); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR2_OFFSET, 0x00040005); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR3_OFFSET, 0x02000101); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR4_OFFSET, 0x01180805); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTPR5_OFFSET, 0x00250B06); + /* IF TRAIN_DDR */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PGCR2_OFFSET, 0x00F09088); +#endif + + /* DISABLE VT COMPENSATION */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DXCCR_OFFSET, 0x20C01884); + + /* VREF CHANGE */ +#if 0 + plato_write_reg32(publ_regs, 0x0710, 0x0E00083C); + plato_write_reg32(publ_regs, 0x0810, 0x0E00083C); + plato_write_reg32(publ_regs, 0x0910, 0x0E00083C); + plato_write_reg32(publ_regs, 0x0A10, 0x0E00083C); + plato_write_reg32(publ_regs, 0x0B10, 0x0E00083C); + plato_write_reg32(publ_regs, 0x0C10, 0x0E00083C); + plato_write_reg32(publ_regs, 0x0D10, 0x0E00083C); + plato_write_reg32(publ_regs, 0x0E10, 0x0E00083C); +#endif + + PLATO_CHECKPOINT(plato); + + /* + * Phase 4: Trigger PHY initialization: Impedance, PLL, and DDL; assert PHY reset + */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x00000073); + + /* + * Phase 5: Monitor PHY initialization status by polling the PUB register PGSR0 + * (not done on emu) + */ + poll(dev, publ_regs, PLATO_DDR_PUBL_PGSR0_OFFSET, 0xF, 0xF); + + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_SWCTL, 0x00000000); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_DFIMISC, 0x00000001); + plato_write_reg32(ctrl_regs, PLATO_DDR_CTRL_SWCTL, 0x00000001); + + PLATO_CHECKPOINT(plato); + + /* + * Phase 6: Indicate to the PUB that the controller will perform SDRAM + * initialization by setting PIR.INIT and PIR.CTLDINIT, and + * poll PGSR0.IDONE + */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x00040001); + poll(dev, publ_regs, PLATO_DDR_PUBL_PGSR0_OFFSET, 0x11, 0x11); + + /* + * Phase 8: Wait for DWC_ddr_umctl2 to move to "normal" operating + * mode by monitoring STAT.operating_mode signal + */ + poll_pr(dev, ctrl_regs, PLATO_DDR_CTRL_STAT, 0x01, 0x01, 10, 100); + + plato_sleep_us(100); + + /* IF TRAIN_DDR */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTCR0_OFFSET, 0x8000B1C7); + /*single rank only for Kingston */ + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DTCR1_OFFSET, 0x00010237); + +#if defined(PLATO_DDR_BDLR_TRAINING) + /* Bit delay line register training */ + { + u8 lane = 0; + u8 reg = 0; + + for (lane = 0; lane < PLATO_DDR_PUBL_DATX_LANE_COUNT; lane++) { + for (reg = 0; reg < PLATO_DDR_PUBL_DXBDLR_REGS_PER_LANE; reg++) { + plato_write_reg32(publ_regs, + PLATO_DDR_PUBL_DXnBDLR_OFFSET(lane, reg), + bldr_data[lane][reg]); + } + } + } + + /* poll on general status register 2 for each lane */ + { + u8 lane; + + for (lane = 0; lane < PLATO_DDR_PUBL_DATX_LANE_COUNT; lane++) + poll(dev, publ_regs, + PLATO_DDR_PUBL_DXnGSR_OFFSET(lane, 2), + 0, 0x001FFFFF); + } +#endif + + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x0000ff72); + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x0000ff73); + poll(dev, publ_regs, PLATO_DDR_PUBL_PGSR0_OFFSET, 0x80000fff, 0xfff80fff); + poll(dev, ctrl_regs, PLATO_DDR_CTRL_STAT, 0x1, 0x1); + + /* Setting the Anti Glitch OFF (?), Disabling On Die pullup/pulldowns */ +#if defined(PLATO_DDR_KINGSTON) + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DXCCR_OFFSET, 0x02401884); +#else + plato_write_reg32(publ_regs, PLATO_DDR_PUBL_DXCCR_OFFSET, 0x02400004); +#endif + + if (plato_read_reg32(publ_regs, PLATO_DDR_PUBL_PGSR0_OFFSET) != 0x80000fff) { + dev_err(dev, "-%s: DDR Training failed", __func__); + return PLATO_INIT_FAILURE; + } + + plato_sleep_us(100); + +#if defined(PLATO_DRM_DEBUG) + { + u8 lane = 0; + u8 reg = 0; + + for (lane = 0; lane < PLATO_DDR_PUBL_DATX_LANE_COUNT; lane++) { + for (reg = 0; reg < PLATO_DDR_PUBL_DXGSR_REGS_PER_LANE; reg++) { + plato_dev_info(dev, "DX%dGSR%d: 0x%08x", lane, reg, + plato_read_reg32(publ_regs, + PLATO_DDR_PUBL_DXnGSR_OFFSET(lane, reg))); + } + } + } +#endif + + return PLATO_INIT_SUCCESS; +} + +#define PLATO_PDP_REGISTER_CORE_ID_OFFSET (0x04e0) +#define VALID_PDP_CORE_ID (0x7010003) + +/* + * Helpers for getting integer and fractional values which are needed when + * programming PLATO_AON_CR_{GPU|DDR}_PLL_CTRL_{0|1} registers for DDR/GPU PLLs. + * + * PLL_CLOCK_SPEED = (PLL_REF_CLOCK_SPEED * REFDIV) * + * (PLL_INT + (PLL_FRAC / 2^24)) / POSTDIV1 / POSTDIV2 + * + * NOTE: It's assumed that REFDIV, POSTDIV1 and POSTDIV2 are '1' in all cases. + */ +static u32 get_plato_pll_int(u32 pll_clock) +{ + return pll_clock / PLATO_PLL_REF_CLOCK_SPEED; +} + +static u32 get_plato_pll_frac(u32 pll_clock) +{ + /* Shift to get 24 bits for fractional part after div */ + u64 shift = (u64)pll_clock << 24; + + /* Div and return only fractional part of the result */ + return (shift / PLATO_PLL_REF_CLOCK_SPEED) & ((1 << 24) - 1); +} + +/* + * Helper for getting value of integer divider for GPU clock. + */ +static u32 get_plato_gpuv_div0(u32 pll_clock, u32 core_clock) +{ + u32 div, ret; + + div = pll_clock / core_clock; + + /* Bias the result by (-1) with saturation, then clip it */ + ret = (div - (div > 0)) & + (PLATO_CR_GPUV_DIV_0_MASK >> PLATO_CR_GPUV_DIV_0_SHIFT); + + /* Check for lost result after clipping, saturate if so */ + return (div > 1) && (ret != (div - (div > 0))) ? + (PLATO_CR_GPUV_DIV_0_MASK >> PLATO_CR_GPUV_DIV_0_SHIFT) : ret; +} + +/* + * Helpers for getting values of integer dividers for PDP clock. + * + * NOTE: Use only if PLL clock speed > ~350 MHz. + */ +static u32 get_plato_pdpv0_div0(u32 pll_clock) +{ + u32 div, ret; + + div = pll_clock / PLATO_MIN_PDP_CLOCK_SPEED; + + /* Bias the result by (-1) with saturation, then clip it */ + ret = (div - (div > 0)) & + (PLATO_CR_PDPV0_DIV_0_MASK >> PLATO_CR_PDPV0_DIV_0_SHIFT); + + /* Check for lost result after clipping, saturate if so */ + return (div > 1) && (ret != (div - (div > 0))) ? + (PLATO_CR_PDPV0_DIV_0_MASK >> PLATO_CR_PDPV0_DIV_0_SHIFT) : ret; +} + +static u32 get_plato_pdpv1_div0(u32 pll_clock) +{ + u32 div, ret; + + div = (pll_clock / (get_plato_pdpv0_div0(pll_clock) + 1)) / + PLATO_MIN_PDP_CLOCK_SPEED; + + /* Bias the result by (-1) with saturation, then clip it */ + ret = (div - (div > 0)) & + (PLATO_CR_PDPV1_DIV_0_MASK >> PLATO_CR_PDPV1_DIV_0_SHIFT); + + /* Check for lost result after clipping, saturate if so */ + return (div > 1) && (ret != (div - (div > 0))) ? + (PLATO_CR_PDPV1_DIV_0_MASK >> PLATO_CR_PDPV1_DIV_0_SHIFT) : ret; +} + +#if defined(ENABLE_PLATO_HDMI) +/* + * Helpers for getting values of integer dividers for HDMICEC clocks. + * + * NOTE: They strive to get clock speed of HDMI-SFR as close to + * 27 MHz as possible. + */ +static u32 get_plato_hdmicecv0_div0(u32 pll_clock) +{ + u32 hdmicecv0_div0, hdmicecv0_div0_limit; + u32 hdmicecv1_div0, hdmicecv1_div0_limit; + u32 hdmisfr_clock_speed; + + hdmicecv0_div0_limit = PLATO_CR_HDMICECV0_DIV_0_MASK >> + PLATO_CR_HDMICECV0_DIV_0_SHIFT; + hdmicecv1_div0_limit = PLATO_CR_HDMICECV1_DIV_0_MASK >> + PLATO_CR_HDMICECV1_DIV_0_SHIFT; + + hdmicecv0_div0 = 0; + while (hdmicecv0_div0 < hdmicecv0_div0_limit) { + hdmicecv1_div0 = 0; + while (hdmicecv1_div0 < hdmicecv1_div0_limit) { + hdmisfr_clock_speed = pll_clock / + (hdmicecv0_div0 + 1) / (hdmicecv1_div0 + 1); + + if (hdmisfr_clock_speed <= PLATO_TARGET_HDMI_SFR_CLOCK_SPEED) { + /* Done, value of the divider found */ + return hdmicecv0_div0; + } + + hdmicecv1_div0++; + } + + hdmicecv0_div0++; + } + + // Here the function returns the highest possible value of the divider + return hdmicecv0_div0; +} + +static u32 get_plato_hdmicecv1_div0(u32 pll_clock) +{ + u64 div, ret; + + /* Calculate the divider using 32.32 fixed point math */ + div = (u64)pll_clock << 32; + div /= get_plato_hdmicecv0_div0(pll_clock) + 1; + div /= PLATO_TARGET_HDMI_SFR_CLOCK_SPEED; + + /* Round up if the fractional part is present */ + div = (div >> 32) + ((div & 0xFFFFFFFF) > 0); + + /* Bias the result by (-1) with saturation, then clip it */ + ret = (div - (div > 0)) & + (PLATO_CR_HDMICECV1_DIV_0_MASK >> + PLATO_CR_HDMICECV1_DIV_0_SHIFT); + + /* Check for lost result after clipping, saturate if so */ + return (div > 1) && (ret != (div - (div > 0))) ? + (PLATO_CR_HDMICECV1_DIV_0_MASK >> + PLATO_CR_HDMICECV1_DIV_0_SHIFT) : ret; +} + +static u32 get_plato_hdmicecv2_div0(u32 pll_clock) +{ + u64 div, ret; + + /* Calculate the divider using 32.32 fixed point math */ + div = (u64)pll_clock << 32; + div /= get_plato_hdmicecv0_div0(pll_clock) + 1; + div /= get_plato_hdmicecv1_div0(pll_clock) + 1; + div /= PLATO_TARGET_HDMI_CEC_CLOCK_SPEED; + + /* Round up if the fractional part is present */ + div = (div >> 32) + ((div & 0xFFFFFFFF) > 0); + + /* Bias the result by (-1) with saturation, then clip it */ + ret = (div - (div > 0)) & + (PLATO_CR_HDMICECV2_DIV_0_MASK >> + PLATO_CR_HDMICECV2_DIV_0_SHIFT); + + /* Check for lost result after clipping, saturate if so */ + return (div > 1) && (ret != (div - (div > 0))) ? + (PLATO_CR_HDMICECV2_DIV_0_MASK >> + PLATO_CR_HDMICECV2_DIV_0_SHIFT) : ret; +} +#endif + +#if defined(PLATO_DUAL_CHANNEL_DDR) + +static int plato_dual_channel_init(struct plato_device *plato) +{ + struct device *dev = &plato->pdev->dev; + int err = 0; + void *dbg_perip_regs = plato->sys_io.registers + SYS_PLATO_REG_PERIP_OFFSET; + void *ddra_ctrl_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_A_CTRL_OFFSET; + void *ddra_publ_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_A_PUBL_OFFSET; + void *ddrb_ctrl_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_B_CTRL_OFFSET; + void *ddrb_publ_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_B_PUBL_OFFSET; + void *noc_regs = plato->sys_io.registers + SYS_PLATO_REG_NOC_OFFSET; + void *aon_regs = plato->aon_regs.registers; + u32 bdlr_setup_ddra[PLATO_DDR_PUBL_DATX_LANE_COUNT][PLATO_DDR_PUBL_DXBDLR_REGS_PER_LANE] = { + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F}, + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F}, + {0x14141414, 0x14141414, 0x00141414}, + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F}, + {0x14141414, 0x14141414, 0x00141414}, + {0x14141414, 0x14141414, 0x00141414}, + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F}, + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F} }; + u32 bdlr_setup_ddrb[PLATO_DDR_PUBL_DATX_LANE_COUNT][PLATO_DDR_PUBL_DXBDLR_REGS_PER_LANE] = { + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F}, + {0x14141414, 0x14141414, 0x00141414}, + {0x14141414, 0x14141414, 0x00141414}, + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F}, + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F}, + {0x14141414, 0x14141414, 0x00141414}, + {0x14141414, 0x14141414, 0x00141414}, + {0x0F0F0F0F, 0x0F0F0F0F, 0x000F0F0F} }; + u32 mem_clock_speed, mem_pll_clock_speed; + u32 mem_clock_pll_control0, mem_clock_pll_control1; + u32 mem_clock_control; + u32 core_clock_speed, core_pll_clock_speed; + u32 core_clock_pll_control0, core_clock_pll_control1; + u32 core_clock_control; +#if defined(ENABLE_PLATO_HDMI) + u32 hdmicec_clock_control; +#endif + u32 pdp_clock_control, hdmi_clock_control; + + /* Plato Soft reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x40); + plato_sleep_ms(100); + + // Temporary fix for non-32GB PCI BAR Plato boards that seem + // to not signal getting out reset + if (!PLATO_HAS_NON_MAPPABLE(plato)) { + err = poll(dev, aon_regs, PLATO_AON_CR_RESET_CTRL, 0x30, 0xF0); + if (err) { + dev_err(dev, "%s: Plato failed to come out of reset!", __func__); + return PLATO_INIT_FAILURE; + } + } + + // On non-32GB PCI BAR Plato boards, bring display subsystem + // out of reset with PLL bypassed. + if (PLATO_HAS_NON_MAPPABLE(plato)) { + plato_write_reg32(aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x0); + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, PLATO_CR_DISPLAY_RESET_MASK); + plato_write_reg32(aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x1); + } + + /* Setting dual memory interleaved mode */ + plato_write_reg32(noc_regs, 0x00000050, 0x01); + poll_pr(dev, noc_regs, 0x00000058, 0x01, 0x1, 1, 10); + + plato_write_reg32(aon_regs, PLATO_AON_CR_NOC_CLK_CTRL, 0x1); + poll(dev, aon_regs, PLATO_AON_CR_NOC_CLK_CTRL, 0x1, 0x1); + + /* Setup DDR PLL's */ + mem_clock_speed = plato_mem_clock_speed(dev); + mem_pll_clock_speed = mem_clock_speed; + + mem_clock_pll_control0 = + (get_plato_pll_int(mem_pll_clock_speed) << + PLATO_CR_DDR_PLL_FBDIV_SHIFT); + mem_clock_pll_control0 &= PLATO_CR_DDR_PLL_FBDIV_MASK; + mem_clock_pll_control0 |= (1 << PLATO_CR_DDR_PLL_REFDIV_SHIFT); + mem_clock_pll_control0 |= (1 << PLATO_CR_DDR_PLL_POSTDIV1_SHIFT); + mem_clock_pll_control0 |= (1 << PLATO_CR_DDR_PLL_POSTDIV2_SHIFT); + + mem_clock_pll_control1 = + (get_plato_pll_frac(mem_pll_clock_speed) << + PLATO_CR_DDR_PLL_FRAC_SHIFT); + mem_clock_pll_control1 &= PLATO_CR_DDR_PLL_FRAC_MASK; + + plato_write_reg32(aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_0, + mem_clock_pll_control0); + poll(dev, aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_0, + mem_clock_pll_control0, mem_clock_pll_control0); + + plato_write_reg32(aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_1, + mem_clock_pll_control1); + poll(dev, aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_1, + mem_clock_pll_control1, mem_clock_pll_control1); + + dev_info(dev, + "%s: DDR clock: %u", __func__, mem_clock_speed); + + /* Setup GPU PLL's */ + core_clock_speed = plato_core_clock_speed(dev); + core_pll_clock_speed = plato_pll_clock_speed(dev, + core_clock_speed); + + core_clock_pll_control0 = + (get_plato_pll_int(core_pll_clock_speed) << + PLATO_CR_GPU_PLL_FBDIV_SHIFT); + core_clock_pll_control0 &= PLATO_CR_GPU_PLL_FBDIV_MASK; + core_clock_pll_control0 |= (1 << PLATO_CR_GPU_PLL_REFDIV_SHIFT); + core_clock_pll_control0 |= (1 << PLATO_CR_GPU_PLL_POSTDIV1_SHIFT); + core_clock_pll_control0 |= (1 << PLATO_CR_GPU_PLL_POSTDIV2_SHIFT); + + core_clock_pll_control1 = + (get_plato_pll_frac(core_pll_clock_speed) << + PLATO_CR_GPU_PLL_FRAC_SHIFT); + core_clock_pll_control1 &= PLATO_CR_GPU_PLL_FRAC_MASK; + + plato_write_reg32(aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_0, + core_clock_pll_control0); + poll(dev, aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_0, + core_clock_pll_control0, core_clock_pll_control0); + + plato_write_reg32(aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_1, + core_clock_pll_control1); + poll(dev, aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_1, + core_clock_pll_control1, core_clock_pll_control1); + + dev_info(dev, + "%s: GPU clock: %u", __func__, core_clock_speed); + +#if defined(ENABLE_PLATO_HDMI) + /* Setup HDMI CEC clock outputs */ + hdmicec_clock_control = 0; + hdmicec_clock_control |= + (get_plato_hdmicecv0_div0(core_pll_clock_speed) << + PLATO_CR_HDMICECV0_DIV_0_SHIFT); + hdmicec_clock_control |= + (get_plato_hdmicecv1_div0(core_pll_clock_speed) << + PLATO_CR_HDMICECV1_DIV_0_SHIFT); + hdmicec_clock_control |= + (get_plato_hdmicecv2_div0(core_pll_clock_speed) << + PLATO_CR_HDMICECV2_DIV_0_SHIFT); + + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_HDMI_CEC_CLK_CTRL, + hdmicec_clock_control); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_HDMI_CEC_CLK_CTRL, + hdmicec_clock_control, hdmicec_clock_control); + + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_I2C_CLK_CTRL, 0x1); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_I2C_CLK_CTRL, 0x1, 0x1); +#endif + + PLATO_CHECKPOINT(plato); + + /* Waiting for DDR and GPU PLL's to lock */ + poll_pr(dev, aon_regs, PLATO_AON_CR_PLL_STATUS, 0x3, 0x3, -1, 10); + + plato_write_reg32(aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x01); + poll(dev, aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x01, 0x01); + /* PLL Lock is done */ + + plato_sleep_us(1000); + + /* Enabling gated clock output for DDR A/B */ + mem_clock_control = (1 << PLATO_CR_DDRAG_GATE_EN_SHIFT) | + (1 << PLATO_CR_DDRBG_GATE_EN_SHIFT); + + plato_write_reg32(dbg_perip_regs, + PLATO_TOP_CR_DDR_CLK_CTRL, mem_clock_control); + poll(dev, dbg_perip_regs, + PLATO_TOP_CR_DDR_CLK_CTRL, mem_clock_control, mem_clock_control); + + /* Enabling gated clock output for GPU and dividing the clock */ + core_clock_control = (1 << PLATO_CR_GPUG_GATE_EN_SHIFT); + core_clock_control |= + (get_plato_gpuv_div0(core_pll_clock_speed, core_clock_speed) + << PLATO_CR_GPUV_DIV_0_SHIFT); + + plato_write_reg32(dbg_perip_regs, + PLATO_TOP_CR_GPU_CLK_CTRL, core_clock_control); + poll(dev, dbg_perip_regs, + PLATO_TOP_CR_GPU_CLK_CTRL, core_clock_control, core_clock_control); + + PLATO_CHECKPOINT(plato); + + plato_sleep_us(100); + + /* Enabling PDP gated clock output >= 165 MHz for <= 1080p */ + pdp_clock_control = (1 << PLATO_CR_PDPG_GATE_EN_SHIFT); + pdp_clock_control |= + (get_plato_pdpv0_div0(core_pll_clock_speed) + << PLATO_CR_PDPV0_DIV_0_SHIFT); + pdp_clock_control |= + (get_plato_pdpv1_div0(core_pll_clock_speed) + << PLATO_CR_PDPV1_DIV_0_SHIFT); + + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, + pdp_clock_control); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, + pdp_clock_control, pdp_clock_control); + + plato_sleep_us(100); + + /* + * Enabling HDMI gated clock output, + * PDP needs HDMI clocks on for framegrabber. + * + * NOTE: The dividers will be reconfigured in video.c, + * for now they are set to their highest values. + */ + hdmi_clock_control = (1 << PLATO_CR_HDMIG_GATE_EN_SHIFT); + hdmi_clock_control |= (PLATO_CR_HDMIV0_DIV_0_MASK); + hdmi_clock_control |= (PLATO_CR_HDMIV1_DIV_0_MASK); + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, + hdmi_clock_control); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, + hdmi_clock_control, hdmi_clock_control); + + plato_sleep_us(100); + + plato_dev_info(dev, "%s: Enabled PDP and HDMI clocks", __func__); + PLATO_CHECKPOINT(plato); + + /* Now putting DRAM controller out of reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, + PLATO_CR_DDR_A_DATA_RESET_N_MASK | + PLATO_CR_DDR_A_CTRL_RESET_N_MASK | + PLATO_CR_DDR_B_DATA_RESET_N_MASK | + PLATO_CR_DDR_B_CTRL_RESET_N_MASK); + plato_sleep_us(100); + + /* Now putting DRAM controller into reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, + PLATO_CR_DDR_A_CTRL_RESET_N_MASK | + PLATO_CR_DDR_B_CTRL_RESET_N_MASK); + + /* Always configure DDR A */ + err = plato_dram_init(plato, ddra_publ_regs, ddra_ctrl_regs, aon_regs, + bdlr_setup_ddra, + PLATO_CR_DDR_A_DATA_RESET_N_MASK | + PLATO_CR_DDR_A_CTRL_RESET_N_MASK); + if (err != 0) { + dev_err(dev, "DDR Bank A setup failed. Init cannot proceed."); + return err; + } + + plato_dev_info(dev, "%s: Finished DDR A Setup", __func__); + + /* Configure DDR B */ + err = plato_dram_init(plato, ddrb_publ_regs, ddrb_ctrl_regs, aon_regs, + bdlr_setup_ddrb, + PLATO_CR_DDR_B_DATA_RESET_N_MASK | + PLATO_CR_DDR_B_CTRL_RESET_N_MASK); + if (err != 0) { + dev_err(dev, "DDR Bank B setup failed. Init cannot proceed."); + return err; + } + + plato_dev_info(dev, "%s: Finished DDR B Setup", __func__); + + /* Getting GPU And DDR A/B out of reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x00000F12); + err = poll_pr(dev, aon_regs, + PLATO_AON_CR_RESET_CTRL, + 0x00000F12, 0x00000F12, -1, 100); + if (err) + return err; + + /* setting CR_ISO_CTRL:CR_GPU_CLK_E */ + plato_write_reg32(aon_regs, PLATO_AON_CR_ISO_CTRL, 0x000001F); + + return err; + +} +#else + +static int plato_single_channel_init(struct plato_device *plato) +{ + struct device *dev = &plato->pdev->dev; + int err = 0; + void *dbg_perip_regs = plato->sys_io.registers + SYS_PLATO_REG_PERIP_OFFSET; + void *ddra_ctrl_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_A_CTRL_OFFSET; + void *ddra_publ_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_A_PUBL_OFFSET; + void *aon_regs = plato->aon_regs.registers; + u32 mem_clock_speed, mem_pll_clock_speed; + u32 mem_clock_pll_control0, mem_clock_pll_control1; + u32 mem_clock_control; + u32 core_clock_speed, core_pll_clock_speed; + u32 core_clock_pll_control0, core_clock_pll_control1; + u32 core_clock_control; +#if defined(ENABLE_PLATO_HDMI) + u32 hdmicec_clock_control; +#endif + u32 hdmi_clock_control, pdp_clock_control; + + /* Plato Soft reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x40); + plato_sleep_ms(1000); + err = poll(dev, aon_regs, PLATO_AON_CR_RESET_CTRL, 0x030, 0xF0); + if (err) + return err; + + PLATO_CHECKPOINT(plato); + + plato_write_reg32(aon_regs, PLATO_AON_CR_NOC_CLK_CTRL, 0x1); + poll(dev, aon_regs, PLATO_AON_CR_NOC_CLK_CTRL, 0x1, 0x1); + + /* Setup DDR PLL's */ + mem_clock_speed = plato_mem_clock_speed(dev); + mem_pll_clock_speed = mem_clock_speed; + + mem_clock_pll_control0 = + (get_plato_pll_int(mem_pll_clock_speed) << + PLATO_CR_DDR_PLL_FBDIV_SHIFT); + mem_clock_pll_control0 &= PLATO_CR_DDR_PLL_FBDIV_MASK; + mem_clock_pll_control0 |= (1 << PLATO_CR_DDR_PLL_REFDIV_SHIFT); + mem_clock_pll_control0 |= (1 << PLATO_CR_DDR_PLL_POSTDIV1_SHIFT); + mem_clock_pll_control0 |= (1 << PLATO_CR_DDR_PLL_POSTDIV2_SHIFT); + + mem_clock_pll_control1 = + (get_plato_pll_frac(mem_pll_clock_speed) << + PLATO_CR_DDR_PLL_FRAC_SHIFT); + mem_clock_pll_control1 &= PLATO_CR_DDR_PLL_FRAC_MASK; + + plato_write_reg32(aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_0, + mem_clock_pll_control0); + poll(dev, aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_0, + mem_clock_pll_control0, mem_clock_pll_control0); + + plato_write_reg32(aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_1, + mem_clock_pll_control1); + poll(dev, aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_1, + mem_clock_pll_control1, mem_clock_pll_control1); + + dev_info(dev, + "%s: DDR clock: %u", __func__, mem_clock_speed); + + /* Setup GPU PLL's */ + core_clock_speed = plato_core_clock_speed(dev); + core_pll_clock_speed = plato_pll_clock_speed(dev, + core_clock_speed); + + core_clock_pll_control0 = + (get_plato_pll_int(core_pll_clock_speed) << + PLATO_CR_GPU_PLL_FBDIV_SHIFT); + core_clock_pll_control0 &= PLATO_CR_GPU_PLL_FBDIV_MASK; + core_clock_pll_control0 |= (1 << PLATO_CR_GPU_PLL_REFDIV_SHIFT); + core_clock_pll_control0 |= (1 << PLATO_CR_GPU_PLL_POSTDIV1_SHIFT); + core_clock_pll_control0 |= (1 << PLATO_CR_GPU_PLL_POSTDIV2_SHIFT); + + core_clock_pll_control1 = + (get_plato_pll_frac(core_pll_clock_speed) << + PLATO_CR_GPU_PLL_FRAC_SHIFT); + core_clock_pll_control1 &= PLATO_CR_GPU_PLL_FRAC_MASK; + + plato_write_reg32(aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_0, + core_clock_pll_control0); + poll(dev, aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_0, + core_clock_pll_control0, core_clock_pll_control0); + + plato_write_reg32(aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_1, + core_clock_pll_control1); + poll(dev, aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_1, + core_clock_pll_control1, core_clock_pll_control1); + + dev_info(dev, + "%s: GPU clock: %u", __func__, core_clock_speed); + +#if defined(ENABLE_PLATO_HDMI) + /* Setup HDMI CEC clock outputs */ + hdmicec_clock_control = 0; + hdmicec_clock_control |= + (get_plato_hdmicecv0_div0(core_pll_clock_speed) << + PLATO_CR_HDMICECV0_DIV_0_SHIFT); + hdmicec_clock_control |= + (get_plato_hdmicecv1_div0(core_pll_clock_speed) << + PLATO_CR_HDMICECV1_DIV_0_SHIFT); + hdmicec_clock_control |= + (get_plato_hdmicecv2_div0(core_pll_clock_speed) << + PLATO_CR_HDMICECV2_DIV_0_SHIFT); + + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_HDMI_CEC_CLK_CTRL, + hdmicec_clock_control); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_HDMI_CEC_CLK_CTRL, + hdmicec_clock_control, hdmicec_clock_control); + + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_I2C_CLK_CTRL, 0x1); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_I2C_CLK_CTRL, 0x1, 0x1); +#endif + + PLATO_CHECKPOINT(plato); + + /* Waiting for DDR and GPU PLL's to lock */ + poll_pr(dev, aon_regs, PLATO_AON_CR_PLL_STATUS, 0x3, 0x3, -1, 10); + + plato_write_reg32(aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x01); + poll(dev, aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x01, 0x01); + /* PLL Lock is done */ + + plato_sleep_us(100); + + /* Enabling gated clock output for DDR A/B */ + mem_clock_control = (1 << PLATO_CR_DDRAG_GATE_EN_SHIFT) | + (1 << PLATO_CR_DDRBG_GATE_EN_SHIFT); + + plato_write_reg32(dbg_perip_regs, + PLATO_TOP_CR_DDR_CLK_CTRL, mem_clock_control); + poll(dev, dbg_perip_regs, + PLATO_TOP_CR_DDR_CLK_CTRL, mem_clock_control, mem_clock_control); + + /* Enabling gated clock output for GPU and dividing the clock */ + core_clock_control = (1 << PLATO_CR_GPUG_GATE_EN_SHIFT); + core_clock_control |= + (get_plato_gpuv_div0(core_pll_clock_speed, core_clock_speed) + << PLATO_CR_GPUV_DIV_0_SHIFT); + + plato_write_reg32(dbg_perip_regs, + PLATO_TOP_CR_GPU_CLK_CTRL, core_clock_control); + poll(dev, dbg_perip_regs, + PLATO_TOP_CR_GPU_CLK_CTRL, core_clock_control, core_clock_control); + + PLATO_CHECKPOINT(plato); + + plato_sleep_us(1000); + + /* Enabling PDP gated clock output >= 165 MHz for <= 1080p */ + pdp_clock_control = (1 << PLATO_CR_PDPG_GATE_EN_SHIFT); + pdp_clock_control |= + (get_plato_pdpv0_div0(core_pll_clock_speed) + << PLATO_CR_PDPV0_DIV_0_SHIFT); + pdp_clock_control |= + (get_plato_pdpv1_div0(core_pll_clock_speed) + << PLATO_CR_PDPV1_DIV_0_SHIFT); + + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, + pdp_clock_control); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, + pdp_clock_control, pdp_clock_control); + + plato_sleep_us(100); + + /* + * Enabling HDMI gated clock output. PDP needs HDMI clocks on for framegrabber. + * + * NOTE: The dividers will be reconfigured in video.c, + * for now they are set to their highest values. + */ + hdmi_clock_control = (1 << PLATO_CR_HDMIG_GATE_EN_SHIFT); + hdmi_clock_control |= (PLATO_CR_HDMIV0_DIV_0_MASK); + hdmi_clock_control |= (PLATO_CR_HDMIV1_DIV_0_MASK); + plato_write_reg32(dbg_perip_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, + hdmi_clock_control); + poll(dev, dbg_perip_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, + hdmi_clock_control, hdmi_clock_control); + + plato_sleep_us(100); + + plato_dev_info(dev, "%s: Enabled PDP and HDMI clocks", __func__); + + PLATO_CHECKPOINT(plato); + + /* Now putting DRAM A controller out of reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0300); + plato_sleep_us(100); + + /* Now putting DRAM A controller into reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0200); + + /* Configure DRAM A control and publ regs */ + err = plato_dram_init(ddra_publ_regs, ddra_ctrl_regs, aon_regs, + PLATO_CR_DDR_A_DATA_RESET_N_MASK | PLATO_CR_DDR_A_CTRL_RESET_N_MASK); + if (err != 0) { + dev_err(dev, "DDR Bank setup failed. Init cannot proceed."); + return err; + } + plato_dev_info(dev, "- %s: Finished DDR A Setup", __func__); + + /* Getting GPU and DDR A out of reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x00000312); + err = poll_pr(dev, aon_regs, + PLATO_AON_CR_RESET_CTRL, + 0x00000312, 0x00000312, -1, 100); + if (err) + return err; + + /* setting CR_ISO_CTRL:CR_GPU_CLK_E */ + plato_write_reg32(aon_regs, PLATO_AON_CR_ISO_CTRL, 0x000001F); + + return err; +} + +#endif /* PLATO_DUAL_CHANNEL_DDR */ + +static int plato_memory_test(struct plato_device *plato) +{ + struct device *dev = &plato->pdev->dev; + u64 i, j = 0; + u32 tmp = 0; + u32 chunk = sizeof(u32) * 10; + u64 mem_base = plato->rogue_heap_mappable.base; + u64 mem_size = plato->rogue_heap_mappable.size + PLATO_PDP_MEM_SIZE; // test PDP memory heap too. + + plato_dev_info(dev, "Starting Local memory test from 0x%llx to 0x%llx (in CPU space)", + mem_base, mem_base + mem_size); + + while (j < mem_size - chunk) { + u64 p_addr = mem_base + j; + u32 *v_addr = (u32 *)ioremap(p_addr, chunk); + + for (i = 0; i < chunk/sizeof(u32); i++) { + *(v_addr + i) = 0xdeadbeef; + tmp = *(v_addr + i); + if (tmp != 0xdeadbeef) { + dev_err(dev, + "Local memory read-write test failed at address=0x%llx: written 0x%x, read 0x%x", + mem_base + ((i * sizeof(u32)) + j), (u32) 0xdeadbeef, tmp); + + iounmap(v_addr); + return PLATO_INIT_FAILURE; + } + } + + iounmap(v_addr); + + j += (1024 * 1024 * 500); + } + + dev_info(dev, "Local memory read-write test passed!"); + + return PLATO_INIT_SUCCESS; +} + + +#if defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) + +/* + * A return value of: + * 0 or more means success + * -1 means we were unable to add an mtrr but we should continue + * -2 means we were unable to add an mtrr but we shouldn't continue + */ +static int mtrr_setup(struct pci_dev *pdev, + resource_size_t mem_start, + resource_size_t mem_size) +{ + int err; + int mtrr; + + /* Reset MTRR */ + mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0); + if (mtrr < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", + __LINE__, __func__, mtrr); + mtrr = -2; + goto err_out; + } + + err = mtrr_del(mtrr, mem_start, mem_size); + if (err < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", + __LINE__, __func__, err); + mtrr = -2; + goto err_out; + } + + mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0); + if (mtrr < 0) { + /* Stop, but not an error as this may be already be setup */ + dev_warn(&pdev->dev, + "%d - %s: mtrr_add failed (%d) - probably means the mtrr is already setup\n", + __LINE__, __func__, mtrr); + mtrr = -1; + goto err_out; + } + + err = mtrr_del(mtrr, mem_start, mem_size); + if (err < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", + __LINE__, __func__, err); + mtrr = -2; + goto err_out; + } + + if (mtrr == 0) { + /* Replace 0 with a non-overlapping WRBACK mtrr */ + err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0); + if (err < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", + __LINE__, __func__, err); + mtrr = -2; + goto err_out; + } + } + + mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0); + if (mtrr < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", + __LINE__, __func__, mtrr); + mtrr = -1; + } + +err_out: + return mtrr; +} + +#endif /* defined(CONFIG_MTRR) && (LINUX_VERSION_CODEpdev->dev; + + /* Setup card memory */ + plato->rogue_mem.size = pci_resource_len(plato->pdev, SYS_DEV_MEM_PCI_BASENUM); + if (request_pci_io_addr(plato->pdev, SYS_DEV_MEM_PCI_BASENUM, 0, plato->rogue_mem.size) + != PLATO_INIT_SUCCESS) { + dev_err(dev, "Failed to request PCI memory region"); + return PLATO_INIT_FAILURE; + } + + plato->rogue_mem.base = pci_resource_start(plato->pdev, SYS_DEV_MEM_PCI_BASENUM); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + if (arch_io_reserve_memtype_wc(plato->rogue_mem.base, + plato->rogue_mem.size)) + return PLATO_INIT_FAILURE; +#endif + plato->mtrr = arch_phys_wc_add(plato->rogue_mem.base, + plato->rogue_mem.size); + if (plato->mtrr < 0) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + arch_io_free_memtype_wc(plato->rogue_mem.base, + plato->rogue_mem.size); +#endif + return PLATO_INIT_FAILURE; + } +#elif defined(CONFIG_MTRR) + plato->mtrr = mtrr_setup(plato->pdev, plato->rogue_mem.base, + plato->rogue_mem.size); + if (plato->mtrr == -2) + return PLATO_INIT_FAILURE; +#endif + + plato->rogue_heap_mappable.base = plato->rogue_mem.base; + plato->rogue_heap_mappable.size = plato->rogue_mem.size; + + if (!PLATO_HAS_NON_MAPPABLE(plato)) { + plato->has_nonmappable = false; + /* + * With a BAR size that's greater than the actual memory size, + * move base CPU address of the heap region to use last + * aliased part of odd region and first aliased part of even region. + * + * This allows to use full available memory in one contiguous heap region. + */ + dev_info(dev, "System does NOT have non-mappable memory (32GB BAR)"); + plato->rogue_heap_mappable.base += + PLATO_DRAM_SPLIT_ADDR - (SYS_DEV_MEM_REGION_SIZE >> 1) - PLATO_DDR_DEV_PHYSICAL_BASE; + plato->rogue_heap_mappable.size = SYS_DEV_MEM_REGION_SIZE; + + plato->dev_mem_base = PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE; + } else { + u64 preceding_region_base = 0; + u64 preceding_region_size = 0; + u64 following_region_base = 0; + u64 following_region_size = 0; + + plato->has_nonmappable = true; + + dev_info(dev, "System has non-mappable memory (<8GB BAR)"); + + plato->dev_mem_base = PLATO_DDR_DEV_PHYSICAL_BASE; + + /* + * With a BAR size less than the actual memory size (8GB), + * we need to dynamically calculate the device base address + * that the PCI memory window is pointing to. The address depends on + * what address in host memory space the memory BAR was assigned by the BIOS. + * + * Bits x to 34 in host CPU base address decide on where + * within the DRAM region the BAR points to, where x is the shift calculated + * below based on the mapped memory size (BAR). + */ + { + u32 shift = __builtin_ffsll(plato->rogue_heap_mappable.size) - 1; + u64 mask = (1ULL << (35-shift)) - 1ULL; + + plato->dev_mem_base += plato->rogue_heap_mappable.base & (mask << shift); + } + + /* + * Our aliased address can point anywhere in the 32GB - 64GB range, + * we now need to determine the closest aliased address to the split point (48GB). + * This is done by first finding the offset from the previous segment (mod below) + * and adding it to either the DRAM split point or the start of the aliased + * region that's closest to the split point. + */ + if (plato->dev_mem_base >= PLATO_DRAM_SPLIT_ADDR) + plato->dev_mem_base = PLATO_DRAM_SPLIT_ADDR + + (plato->dev_mem_base % PLATO_DDR_ALIASED_DEV_SEGMENT_SIZE); + else + plato->dev_mem_base = PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE + + (plato->dev_mem_base % PLATO_DDR_ALIASED_DEV_SEGMENT_SIZE); + + // Setup non-mappable region if BAR size is less than + // actual memory size (8GB) + plato->rogue_heap_nonmappable.base = PLATO_DDR_DEV_PHYSICAL_BASE; + + /* + * If mapped region is not at the base of memory, + * then it is preceded by a non-mappable region + */ + preceding_region_base = PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE; + preceding_region_size = plato->dev_mem_base - preceding_region_base; + + /* + * If mapped region is not at the end of memory, + * then it is followed by a non-mappable region + */ + following_region_base = plato->dev_mem_base + plato->rogue_heap_mappable.size; + following_region_size = PLATO_DDR_ALIASED_DEV_PHYSICAL_END - + (plato->dev_mem_base + plato->rogue_heap_mappable.size); + + /* Use only bigger region for now */ + if (following_region_size > preceding_region_size) { + plato->rogue_heap_nonmappable.base = following_region_base; + plato->rogue_heap_nonmappable.size = following_region_size; + } else { + plato->rogue_heap_nonmappable.base = preceding_region_base; + plato->rogue_heap_nonmappable.size = preceding_region_size; + } + } + +#if defined(SUPPORT_PLATO_DISPLAY) + if (plato->rogue_heap_mappable.size < PLATO_PDP_MEM_SIZE) { + dev_err(dev, "Not enough memory for the PDP (0x%llx < 0x%llx)", + (u64)plato->rogue_heap_mappable.size, (u64)PLATO_PDP_MEM_SIZE); + plato_memory_deinit(plato); + return PLATO_INIT_FAILURE; + } + + plato->rogue_heap_mappable.size -= PLATO_PDP_MEM_SIZE; + /* Setup ranges for the device heaps */ + plato->pdp_heap.size = PLATO_PDP_MEM_SIZE; + plato->pdp_heap.base = plato->rogue_heap_mappable.base + plato->rogue_heap_mappable.size; +#endif + + plato_dev_info(dev, "Initialized rogue heap with base 0x%llx and size 0x%llx", + (u64)plato->rogue_heap_mappable.base, (u64)plato->rogue_heap_mappable.size); + + if (plato_memory_test(plato) != PLATO_INIT_SUCCESS) { + plato_memory_deinit(plato); + return PLATO_INIT_FAILURE; + } + + return PLATO_INIT_SUCCESS; +} + +void plato_memory_deinit(struct plato_device *plato) +{ + PLATO_DRM_CHECKPOINT; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + if (plato->mtrr >= 0) { + arch_phys_wc_del(plato->mtrr); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + arch_io_free_memtype_wc(plato->rogue_mem.base, + plato->rogue_mem.size); +#endif + } +#elif defined(CONFIG_MTRR) + if (plato->mtrr >= 0) { + int err; + + err = mtrr_del(plato->mtrr, plato->rogue_mem.base, + plato->rogue_mem.size); + if (err < 0) + dev_err(&plato->pdev->dev, + "%d - %s: mtrr_del failed (%d)\n", + __LINE__, __func__, err); + } +#endif + + release_pci_io_addr(plato->pdev, SYS_DEV_MEM_PCI_BASENUM, + plato->rogue_mem.base, plato->rogue_mem.size); +} + +#if defined(EMULATOR) +static int plato_emu_init(struct plato_device *plato) +{ + struct device *dev = &plato->pdev->dev; + void *perip_regs = plato->sys_io.registers + SYS_PLATO_REG_PERIP_OFFSET; + void *ddra_ctrl_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_A_CTRL_OFFSET; + void *ddra_publ_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_A_PUBL_OFFSET; + void *ddrb_ctrl_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_B_CTRL_OFFSET; + void *ddrb_publ_regs = plato->sys_io.registers + SYS_PLATO_REG_DDR_B_PUBL_OFFSET; + void *noc_regs = plato->sys_io.registers + SYS_PLATO_REG_NOC_OFFSET; + void *aon_regs = plato->aon_regs.registers; + +#if defined(ENABLE_PLATO_HDMI) + plato_write_reg32(perip_regs, PLATO_TOP_CR_HDMI_CEC_CLK_CTRL, 0x3370A03); + poll(dev, perip_regs, PLATO_TOP_CR_HDMI_CEC_CLK_CTRL, 0x3370A03, 0x3370A03); + plato_write_reg32(perip_regs, PLATO_TOP_CR_I2C_CLK_CTRL, 0x1); + poll(dev, perip_regs, PLATO_TOP_CR_I2C_CLK_CTRL, 0x1, 0x1); +#endif + + plato_write_reg32(aon_regs, PLATO_AON_CR_NOC_CLK_CTRL, 0x1); + poll(dev, aon_regs, PLATO_AON_CR_NOC_CLK_CTRL, 0x1, 0x1); + + plato_write_reg32(aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_0, 0x01101037); + poll(dev, aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_0, 0x01101037, 0x01101037); + plato_write_reg32(aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_1, 0x00780000); + poll(dev, aon_regs, PLATO_AON_CR_DDR_PLL_CTRL_1, 0x00780000, 0x00780000); + + /* Waiting for DDR PLL getting locked */ + poll_pr(dev, aon_regs, PLATO_AON_CR_PLL_STATUS, 0x2, 0x2, -1, 10); + + plato_write_reg32(aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x01); + poll(dev, aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x01, 0x01); + + plato_sleep_us(100); + +#if defined(PLATO_DUAL_CHANNEL_DDR) + plato_write_reg32(perip_regs, PLATO_TOP_CR_DDR_CLK_CTRL, 0x011); + poll(dev, perip_regs, PLATO_TOP_CR_DDR_CLK_CTRL, 0x011, 0x011); +#else + plato_write_reg32(perip_regs, PLATO_TOP_CR_DDR_CLK_CTRL, 0x01); + poll(dev, perip_regs, PLATO_TOP_CR_DDR_CLK_CTRL, 0x01, 0x01); +#endif + /* PLL Lock is done */ + + plato_sleep_us(1000); + + /* Enabling PDP gated clock output - 198 MHz */ + + plato_write_reg32(perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, 0x00001210); + poll(dev, perip_regs, PLATO_TOP_CR_PDP_CLK_CTRL, 0x00001210, 0x00001210); + + plato_sleep_us(100); + + /* PDP needs HDMI clocks on for framegrabber, start them here */ + /* Enabling HDMI gated clock output - 148.5 MHz */ + plato_write_reg32(perip_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, 0x00001310); + poll(dev, perip_regs, PLATO_TOP_CR_HDMI_CLK_CTRL, 0x00001310, 0x00001310); + + plato_sleep_us(100); + plato_dev_info(dev, "%s: Enabled PDP and HDMI clocks", __func__); + + /* GPU PLL configuration */ + plato_write_reg32(aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_0, 0x0110103D); + poll(dev, aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_0, 0x0110103D, 0x0110103D); + + plato_write_reg32(aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_1, 0x00E00000); + poll(dev, aon_regs, PLATO_AON_CR_GPU_PLL_CTRL_1, 0x00E00000, 0x00E00000); + /* Waiting for GPU PLL getting locked */ + + poll_pr(dev, aon_regs, PLATO_AON_CR_PLL_STATUS, 0x3, 0x3, -1, 10); + /* GPU and DDR PLL Locked */ + + plato_write_reg32(aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x01); + poll(dev, aon_regs, PLATO_AON_CR_PLL_BYPASS, 0x1, 0x1); + + plato_sleep_us(100); + + /* Enabling gated clock output */ + plato_write_reg32(perip_regs, PLATO_TOP_CR_GPU_CLK_CTRL, 0x011); + poll(dev, perip_regs, PLATO_TOP_CR_GPU_CLK_CTRL, 0x011, 0x011); + + plato_sleep_us(1000); + +#if defined(PLATO_DUAL_CHANNEL_DDR) + /* Setting dual memory interleaved mode */ + plato_write_reg32(noc_regs, 0x00000050, 0x01); + poll_pr(dev, noc_regs, 0x00000058, 0x01, 0x1, 1, 10); + + /* Now putting DRAM controller out of reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0F30); + if (poll(dev, aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0F30, 0x0F30)) + return PLATO_INIT_FAILURE; + + plato_sleep_us(10); + + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0A30); + poll(dev, aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0A30, 0xA30); + +#else + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0330); + if (poll(dev, aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0330, 0x0330)) + return PLATO_INIT_FAILURE; + + plato_sleep_us(10); + + /* Now putting DRAM controller into reset */ + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0230); + poll(dev, aon_regs, PLATO_AON_CR_RESET_CTRL, 0x0230, 0x0230); + +#endif + + /* + * Phase 1: Program the DWC_ddr_umctl2 registers + */ + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_MSTR, 0x8F040001); + poll(dev, ddra_ctrl_regs, PLATO_DDR_CTRL_MSTR, 0x8F040001, 0x8F040001); + +#if defined(PLATO_DUAL_CHANNEL_DDR) + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_MSTR, 0x8F040001); + poll(dev, ddrb_ctrl_regs, PLATO_DDR_CTRL_MSTR, 0x8F040001, 0x8F040001); +#endif + + plato_sleep_us(100); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_RFSHTMG, 0x007f0056); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_RFSHTMG, 0x007f0056); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_INIT3, 0x01140000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_INIT3, 0x01140000); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_INIT1, 0x00010000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_INIT1, 0x00010000); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_INIT0, 0x00020001); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_INIT0, 0x00020001); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_INIT4, 0x00280000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_INIT4, 0x00280000); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_INIT5, 0x000C000C); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_INIT5, 0x000C000C); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG0, 0x0f132312); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG0, 0x0f132312); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG1, 0x00080419); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG1, 0x00080419); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG2, 0x0507050b); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG2, 0x0507050b); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG3, 0x00002008); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG3, 0x00002008); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG4, 0x07020407); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG4, 0x07020407); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG5, 0x090e0403); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG5, 0x090e0403); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG6, 0x020e000e); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG6, 0x020e000e); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG7, 0x00000c0e); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG7, 0x00000c0e); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG8, 0x01010a05); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DRAMTMG8, 0x01010a05); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ZQCTL0, 0x30ab002b); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ZQCTL0, 0x30ab002b); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ZQCTL1, 0x00000070); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ZQCTL1, 0x00000070); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ZQCTL2, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ZQCTL2, 0x00000000); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL0, 0x00e01020); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL0, 0x00e01020); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL1, 0x0078007e); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL1, 0x0078007e); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL2, 0x0057000e); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL2, 0x0057000e); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL3, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_RFSHCTL3, 0x00000000); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFITMG0, 0x028A8208); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFITMG0, 0x028A8208); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFITMG1, 0x00020202); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFITMG1, 0x00020202); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFIUPD0, 0x00400003); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFIUPD0, 0x00400003); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFIUPD1, 0x00F000FF); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFIUPD1, 0x00F000FF); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFIUPD2, 0x80100010); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFIUPD2, 0x80100010); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFIUPD3, 0x00100010); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFIUPD3, 0x00100010); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFIMISC, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFIMISC, 0x00000000); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP0, 0x00001414); + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP1, 0x00070707); + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP2, 0x00000000); + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP3, 0x0F000000); + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP4, 0x00000F0F); + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP5, 0x06060606); + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP6, 0x0F0F0606); + + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP0, 0x00001414); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP1, 0x00070707); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP2, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP3, 0x0F000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP4, 0x00000F0F); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP5, 0x06060606); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ADDRMAP6, 0x0F0F0606); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_ODTCFG, 0x04000400); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_ODTCFG, 0x04000400); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_SCHED, 0x00003F00); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_SCHED, 0x00003F00); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_PERFHPR1, 0x0F0000FF); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_PERFHPR1, 0x0F0000FF); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_PERFLPR1, 0x0F0000FF); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_PERFLPR1, 0x0F0000FF); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_PERFWR1, 0x0F0000FF); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_PERFWR1, 0x0F0000FF); + + /* Setup the virtual channels */ + plato_write_reg32(ddra_ctrl_regs, 0x00000410, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000414, 0x00000000); + plato_write_reg32(ddra_ctrl_regs, 0x00000418, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x0000041C, 0x00000001); + plato_write_reg32(ddra_ctrl_regs, 0x00000420, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000424, 0x00000002); + plato_write_reg32(ddra_ctrl_regs, 0x00000428, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x0000042C, 0x00000003); + plato_write_reg32(ddra_ctrl_regs, 0x00000430, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000434, 0x00000004); + plato_write_reg32(ddra_ctrl_regs, 0x00000438, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x0000043C, 0x00000005); + plato_write_reg32(ddra_ctrl_regs, 0x00000440, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000444, 0x00000006); + plato_write_reg32(ddra_ctrl_regs, 0x00000448, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x0000044C, 0x00000007); + plato_write_reg32(ddra_ctrl_regs, 0x00000450, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000454, 0x00000008); + plato_write_reg32(ddra_ctrl_regs, 0x00000458, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x0000045C, 0x00000009); + plato_write_reg32(ddra_ctrl_regs, 0x00000460, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000464, 0x0000000A); + plato_write_reg32(ddra_ctrl_regs, 0x00000468, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x0000046C, 0x0000000B); + plato_write_reg32(ddra_ctrl_regs, 0x00000470, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000474, 0x0000000C); + plato_write_reg32(ddra_ctrl_regs, 0x00000478, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x0000047C, 0x0000000D); + plato_write_reg32(ddra_ctrl_regs, 0x00000480, 0x0000000F); + plato_write_reg32(ddra_ctrl_regs, 0x00000484, 0x0000000E); + + plato_write_reg32(ddrb_ctrl_regs, 0x00000410, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000414, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, 0x00000418, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x0000041C, 0x00000001); + plato_write_reg32(ddrb_ctrl_regs, 0x00000420, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000424, 0x00000002); + plato_write_reg32(ddrb_ctrl_regs, 0x00000428, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x0000042C, 0x00000003); + plato_write_reg32(ddrb_ctrl_regs, 0x00000430, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000434, 0x00000004); + plato_write_reg32(ddrb_ctrl_regs, 0x00000438, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x0000043C, 0x00000005); + plato_write_reg32(ddrb_ctrl_regs, 0x00000440, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000444, 0x00000006); + plato_write_reg32(ddrb_ctrl_regs, 0x00000448, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x0000044C, 0x00000007); + plato_write_reg32(ddrb_ctrl_regs, 0x00000450, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000454, 0x00000008); + plato_write_reg32(ddrb_ctrl_regs, 0x00000458, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x0000045C, 0x00000009); + plato_write_reg32(ddrb_ctrl_regs, 0x00000460, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000464, 0x0000000A); + plato_write_reg32(ddrb_ctrl_regs, 0x00000468, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x0000046C, 0x0000000B); + plato_write_reg32(ddrb_ctrl_regs, 0x00000470, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000474, 0x0000000C); + plato_write_reg32(ddrb_ctrl_regs, 0x00000478, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x0000047C, 0x0000000D); + plato_write_reg32(ddrb_ctrl_regs, 0x00000480, 0x0000000F); + plato_write_reg32(ddrb_ctrl_regs, 0x00000484, 0x0000000E); + + /* DRAM controller registers configuration done */ + + plato_sleep_us(100); + + /* + * Phase 2: Deassert soft reset signal core_ddrc_rstn + */ + + /* Now getting DRAM controller out of reset */ +#if defined(PLATO_DUAL_CHANNEL_DDR) + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x00000F30); +#else + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x00000330); +#endif + + plato_sleep_us(100); + + /* ECC disable */ + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DBG1, 0x00000000); + /* power related */ + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_PWRCTL, 0x00000000); + /* Enabling AXI input port (Port control) */ + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_PCTRL, 0x00000001); + + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DBG1, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_PWRCTL, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_PCTRL, 0x00000001); + + /* + * Phase 7: Set DFIMISC.dfi_init_complete_en to 1 + * (skipped on emu?) + */ + + /* + * Phase 3: Start PHY initialization by accessing relevant PUB registers + */ + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DCR_OFFSET, 0x0000040B); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DCR_OFFSET, 0x0000040B); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_PTR0_OFFSET, 0x10000010); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_PTR0_OFFSET, 0x10000010); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_PTR1_OFFSET, 0x00800080); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_PTR1_OFFSET, 0x00800080); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_PTR2_OFFSET, 0x00080421); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_PTR2_OFFSET, 0x00080421); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DSGCR_OFFSET, 0x0020641F); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DSGCR_OFFSET, 0x0020641F); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_MR0_OFFSET, 0x00000114); + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_MR1_OFFSET, 0x00000000); + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_MR2_OFFSET, 0x00000028); + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_MR3_OFFSET, 0x00000000); + + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_MR0_OFFSET, 0x00000114); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_MR1_OFFSET, 0x00000000); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_MR2_OFFSET, 0x00000028); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_MR3_OFFSET, 0x00000000); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DTPR0_OFFSET, 0x040F0406); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DTPR0_OFFSET, 0x040F0406); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DTPR1_OFFSET, 0x28110402); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DTPR1_OFFSET, 0x28110402); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DTPR2_OFFSET, 0x00030002); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DTPR2_OFFSET, 0x00030002); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DTPR3_OFFSET, 0x02000101); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DTPR3_OFFSET, 0x02000101); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DTPR4_OFFSET, 0x00190602); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DTPR4_OFFSET, 0x00190602); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_DTPR5_OFFSET, 0x0018040B); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_DTPR5_OFFSET, 0x0018040B); + + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_PGCR1_OFFSET, 0x020046A0); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_PGCR1_OFFSET, 0x020046A0); + + /* + * Phase 4: Trigger PHY initialization: Impedance, PLL, and DDL; + * assert PHY reset + */ + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x00000073); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x00000073); + + /* + * Phase 5: Monitor PHY initialization status by polling the + * PUB register PGSR0 + */ + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_SWCTL, 0x00000000); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_SWCTL, 0x00000000); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_DFIMISC, 0x00000001); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_DFIMISC, 0x00000001); + + plato_write_reg32(ddra_ctrl_regs, PLATO_DDR_CTRL_SWCTL, 0x00000001); + plato_write_reg32(ddrb_ctrl_regs, PLATO_DDR_CTRL_SWCTL, 0x00000001); + + /* + * Phase 6: Indicate to the PUB that the controller will perform SDRAM + * initialization by setting PIR.INIT and PIR.CTLDINIT, and + * poll PGSR0.IDONE + */ + plato_write_reg32(ddra_publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x00040001); + plato_write_reg32(ddrb_publ_regs, PLATO_DDR_PUBL_PIR_OFFSET, 0x00040001); + + /* + * Phase 8: Wait for DWC_ddr_umctl2 to move to "normal" operating + * mode by monitoring STAT.operating_mode signal + */ + poll_pr(dev, ddra_ctrl_regs, PLATO_DDR_CTRL_STAT, 0x01, 0x01, 10, 100); +#if defined(PLATO_DUAL_CHANNEL_DDR) + poll_pr(dev, ddrb_ctrl_regs, PLATO_DDR_CTRL_STAT, 0x01, 0x01, 10, 100); +#endif + plato_sleep_us(100); + + /* Getting GPU And DDR A out of reset */ +#if defined(PLATO_DUAL_CHANNEL_DDR) + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x00000F12); + poll_pr(dev, aon_regs, + PLATO_AON_CR_RESET_CTRL, + 0x00000F12, 0x00000F12, -1, 100); +#else + plato_write_reg32(aon_regs, PLATO_AON_CR_RESET_CTRL, 0x00000312); + poll_pr(dev, aon_regs, + PLATO_AON_CR_RESET_CTRL, + 0x00000312, 0x00000312, -1, 100); +#endif + + /* setting CR_ISO_CTRL:CR_GPU_CLK_E */ + plato_write_reg32(aon_regs, PLATO_AON_CR_ISO_CTRL, 0x000001F); + + return PLATO_INIT_SUCCESS; +} + +#else /* Actual HW init */ +static int plato_hw_init(struct plato_device *plato) +{ + int err = 0; + u32 max_restart = PLATO_PDP_RELOADS_MAX; + u32 restart_count = 0; + + PLATO_CHECKPOINT(plato); + + /* Config Plato until PDP registers become accessible */ + do { + #if defined(PLATO_DUAL_CHANNEL_DDR) + err = plato_dual_channel_init(plato); + #else + err = plato_single_channel_init(plato); + #endif + + restart_count++; + + if (err == PLATO_INIT_SUCCESS) + break; + + } while (restart_count < max_restart && + err == PLATO_INIT_RETRY); + + plato_dev_info(dev, "%s: status %d, number of tries %d", + __func__, err, restart_count); + + return err; +} + +#endif + +int plato_cfg_init(struct plato_device *plato) +{ + #if defined(EMULATOR) + return plato_emu_init(plato); + #elif defined(VIRTUAL_PLATFORM) + return PLATO_INIT_SUCCESS; + #else + return plato_hw_init(plato); + #endif +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_top_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_top_regs.h new file mode 100644 index 000000000000..6d7125a0301b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/plato/plato_top_regs.h @@ -0,0 +1,306 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Autogenerated - don't edit. Generated from top_regs.def. Regconv 0.2_r110 */ + +#ifndef _PLATO_TOP_REGS_H_ +#define _PLATO_TOP_REGS_H_ + +/* + Register CR_SPI_CLK_CTRL +*/ +#define PLATO_TOP_CR_SPI_CLK_CTRL 0x0000 +#define PLATO_CR_SPIV1_DIV_0_MASK 0x00007000U +#define PLATO_CR_SPIV1_DIV_0_SHIFT 12 +#define PLATO_CR_SPIV1_DIV_0_SIGNED 0 + +#define PLATO_CR_SPIV0_DIV_0_MASK 0x00000300U +#define PLATO_CR_SPIV0_DIV_0_SHIFT 8 +#define PLATO_CR_SPIV0_DIV_0_SIGNED 0 + +#define PLATO_CR_SPIG_GATE_EN_MASK 0x00000010U +#define PLATO_CR_SPIG_GATE_EN_SHIFT 4 +#define PLATO_CR_SPIG_GATE_EN_SIGNED 0 + +#define PLATO_CR_CS_SPI_0_SW_MASK 0x00000001U +#define PLATO_CR_CS_SPI_0_SW_SHIFT 0 +#define PLATO_CR_CS_SPI_0_SW_SIGNED 0 + +/* + Register CR_PDP_CLK_CTRL +*/ +#define PLATO_TOP_CR_PDP_CLK_CTRL 0x0004 +#define PLATO_CR_PDPV1_DIV_0_MASK 0x00003000U +#define PLATO_CR_PDPV1_DIV_0_SHIFT 12 +#define PLATO_CR_PDPV1_DIV_0_SIGNED 0 + +#define PLATO_CR_PDPV0_DIV_0_MASK 0x00000300U +#define PLATO_CR_PDPV0_DIV_0_SHIFT 8 +#define PLATO_CR_PDPV0_DIV_0_SIGNED 0 + +#define PLATO_CR_PDPG_GATE_EN_MASK 0x00000010U +#define PLATO_CR_PDPG_GATE_EN_SHIFT 4 +#define PLATO_CR_PDPG_GATE_EN_SIGNED 0 + +#define PLATO_CR_CS_PDP_0_SW_MASK 0x00000001U +#define PLATO_CR_CS_PDP_0_SW_SHIFT 0 +#define PLATO_CR_CS_PDP_0_SW_SIGNED 0 + +/* + Register CR_HDMI_CEC_CLK_CTRL +*/ +#define PLATO_TOP_CR_HDMI_CEC_CLK_CTRL 0x0008 +#define PLATO_CR_HDMICECV2_DIV_0_MASK 0x03FF0000U +#define PLATO_CR_HDMICECV2_DIV_0_SHIFT 16 +#define PLATO_CR_HDMICECV2_DIV_0_SIGNED 0 + +#define PLATO_CR_HDMICECV1_DIV_0_MASK 0x00001F00U +#define PLATO_CR_HDMICECV1_DIV_0_SHIFT 8 +#define PLATO_CR_HDMICECV1_DIV_0_SIGNED 0 + +#define PLATO_CR_HDMICECV0_DIV_0_MASK 0x00000003U +#define PLATO_CR_HDMICECV0_DIV_0_SHIFT 0 +#define PLATO_CR_HDMICECV0_DIV_0_SIGNED 0 + +/* + Register CR_HDMI_CLK_CTRL +*/ +#define PLATO_TOP_CR_HDMI_CLK_CTRL 0x000C +#define PLATO_CR_HDMIV1_DIV_0_MASK 0x0000F000U +#define PLATO_CR_HDMIV1_DIV_0_SHIFT 12 +#define PLATO_CR_HDMIV1_DIV_0_SIGNED 0 + +#define PLATO_CR_HDMIV0_DIV_0_MASK 0x00000300U +#define PLATO_CR_HDMIV0_DIV_0_SHIFT 8 +#define PLATO_CR_HDMIV0_DIV_0_SIGNED 0 + +#define PLATO_CR_HDMIG_GATE_EN_MASK 0x00000010U +#define PLATO_CR_HDMIG_GATE_EN_SHIFT 4 +#define PLATO_CR_HDMIG_GATE_EN_SIGNED 0 + +#define PLATO_CR_CS_HDMI_0_SW_MASK 0x00000001U +#define PLATO_CR_CS_HDMI_0_SW_SHIFT 0 +#define PLATO_CR_CS_HDMI_0_SW_SIGNED 0 + +/* + Register CR_DDR_CLK_CTRL +*/ +#define PLATO_TOP_CR_DDR_CLK_CTRL 0x0010 +#define PLATO_CR_DDRBG_GATE_EN_MASK 0x00000010U +#define PLATO_CR_DDRBG_GATE_EN_SHIFT 4 +#define PLATO_CR_DDRBG_GATE_EN_SIGNED 0 + +#define PLATO_CR_DDRAG_GATE_EN_MASK 0x00000001U +#define PLATO_CR_DDRAG_GATE_EN_SHIFT 0 +#define PLATO_CR_DDRAG_GATE_EN_SIGNED 0 + +/* + Register CR_GPU_CLK_CTRL +*/ +#define PLATO_TOP_CR_GPU_CLK_CTRL 0x0014 +#define PLATO_CR_GPUD_DEL_0_MASK 0x0003FF00U +#define PLATO_CR_GPUD_DEL_0_SHIFT 8 +#define PLATO_CR_GPUD_DEL_0_SIGNED 0 + +#define PLATO_CR_GPUV_DIV_0_MASK 0x00000030U +#define PLATO_CR_GPUV_DIV_0_SHIFT 4 +#define PLATO_CR_GPUV_DIV_0_SIGNED 0 + +#define PLATO_CR_GPUG_GATE_EN_MASK 0x00000001U +#define PLATO_CR_GPUG_GATE_EN_SHIFT 0 +#define PLATO_CR_GPUG_GATE_EN_SIGNED 0 + +/* + Register CR_UART_CLK_CTRL +*/ +#define PLATO_TOP_CR_UART_CLK_CTRL 0x0018 +#define PLATO_CR_UARTG_GATE_EN_MASK 0x00000001U +#define PLATO_CR_UARTG_GATE_EN_SHIFT 0 +#define PLATO_CR_UARTG_GATE_EN_SIGNED 0 + +/* + Register CR_I2C_CLK_CTRL +*/ +#define PLATO_TOP_CR_I2C_CLK_CTRL 0x001C +#define PLATO_CR_I2CG_GATE_EN_MASK 0x00000001U +#define PLATO_CR_I2CG_GATE_EN_SHIFT 0 +#define PLATO_CR_I2CG_GATE_EN_SIGNED 0 + +/* + Register CR_SENSOR_CLK_CTRL +*/ +#define PLATO_TOP_CR_SENSOR_CLK_CTRL 0x0020 +#define PLATO_CR_SNRV_DIV_0_MASK 0x000000F0U +#define PLATO_CR_SNRV_DIV_0_SHIFT 4 +#define PLATO_CR_SNRV_DIV_0_SIGNED 0 + +#define PLATO_CR_SNRG_GATE_EN_MASK 0x00000001U +#define PLATO_CR_SNRG_GATE_EN_SHIFT 0 +#define PLATO_CR_SNRG_GATE_EN_SIGNED 0 + +/* + Register CR_WDT_CLK_CTRL +*/ +#define PLATO_TOP_CR_WDT_CLK_CTRL 0x0024 +#define PLATO_CR_WDTV_DIV_0_MASK 0x00003FF0U +#define PLATO_CR_WDTV_DIV_0_SHIFT 4 +#define PLATO_CR_WDTV_DIV_0_SIGNED 0 + +#define PLATO_CR_WDTG_GATE_EN_MASK 0x00000001U +#define PLATO_CR_WDTG_GATE_EN_SHIFT 0 +#define PLATO_CR_WDTG_GATE_EN_SIGNED 0 + +/* + Register CR_USB_CLK_ENABLE +*/ +#define PLATO_TOP_CR_USB_CLK_ENABLE 0x0028 +#define PLATO_CR_USB_CLK_ENABLE_MASK 0x00000001U +#define PLATO_CR_USB_CLK_ENABLE_SHIFT 0 +#define PLATO_CR_USB_CLK_ENABLE_SIGNED 0 + +/* + Register CR_RING_OSC_CTRL +*/ +#define PLATO_TOP_CR_RING_OSC_CTRL 0x0030 +#define PLATO_CR_OSC_EN_MASK 0x00000001U +#define PLATO_CR_OSC_EN_SHIFT 0 +#define PLATO_CR_OSC_EN_SIGNED 0 + +/* + Register CR_RING_OSC0_VAL +*/ +#define PLATO_TOP_CR_RING_OSC0_VAL 0x0034 +#define PLATO_CR_RING_OSC0_VAL_MASK 0xFFFFFFFFU +#define PLATO_CR_RING_OSC0_VAL_SHIFT 0 +#define PLATO_CR_RING_OSC0_VAL_SIGNED 0 + +/* + Register CR_RING_OSC1_VAL +*/ +#define PLATO_TOP_CR_RING_OSC1_VAL 0x0038 +#define PLATO_CR_RING_OSC1_VAL_MASK 0xFFFFFFFFU +#define PLATO_CR_RING_OSC1_VAL_SHIFT 0 +#define PLATO_CR_RING_OSC1_VAL_SIGNED 0 + +/* + Register CR_RING_OSC2_VAL +*/ +#define PLATO_TOP_CR_RING_OSC2_VAL 0x003C +#define PLATO_CR_RING_OSC2_VAL_MASK 0xFFFFFFFFU +#define PLATO_CR_RING_OSC2_VAL_SHIFT 0 +#define PLATO_CR_RING_OSC2_VAL_SIGNED 0 + +/* + Register CR_RING_OSC3_VAL +*/ +#define PLATO_TOP_CR_RING_OSC3_VAL 0x0040 +#define PLATO_CR_RING_OSC3_VAL_MASK 0xFFFFFFFFU +#define PLATO_CR_RING_OSC3_VAL_SHIFT 0 +#define PLATO_CR_RING_OSC3_VAL_SIGNED 0 + +/* + Register CR_PCI_CTRL +*/ +#define PLATO_TOP_CR_PCI_CTRL 0x0080 +#define PLATO_CR_PCI_I_CLK_IN_NS_MASK 0xFFFF0000U +#define PLATO_CR_PCI_I_CLK_IN_NS_SHIFT 16 +#define PLATO_CR_PCI_I_CLK_IN_NS_SIGNED 0 + +#define PLATO_CR_PCI_AXPCIEATTR_MASK 0x00000007U +#define PLATO_CR_PCI_AXPCIEATTR_SHIFT 0 +#define PLATO_CR_PCI_AXPCIEATTR_SIGNED 0 + +/* + Register CR_MAIL_BOX +*/ +#define PLATO_TOP_CR_MAIL_BOX 0x0084 +#define PLATO_CR_MAIL_BOX_MASK 0xFFFFFFFFU +#define PLATO_CR_MAIL_BOX_SHIFT 0 +#define PLATO_CR_MAIL_BOX_SIGNED 0 + +/* + Register CR_PCI_INT_MASK +*/ +#define PLATO_TOP_CR_PCI_INT_MASK 0x0088 +#define PLATO_CR_PCI_INT_MASK_MASK 0xFFFFFFFFU +#define PLATO_CR_PCI_INT_MASK_SHIFT 0 +#define PLATO_CR_PCI_INT_MASK_SIGNED 0 + +/* + Register CR_PCI_PHY_STATUS +*/ +#define PLATO_TOP_CR_PCI_PHY_STATUS 0x008C +#define PLATO_CR_PCI_PHY_READY_MASK 0x00000001U +#define PLATO_CR_PCI_PHY_READY_SHIFT 0 +#define PLATO_CR_PCI_PHY_READY_SIGNED 0 + +/* + Register CR_INT_STATUS +*/ +#define PLATO_TOP_CR_INT_STATUS 0x0090 +#define PLATO_CR_INT_STATUS_MASK 0x00FFFFFFU +#define PLATO_CR_INT_STATUS_SHIFT 0 +#define PLATO_CR_INT_STATUS_SIGNED 0 + +/* + Register CR_PLATO_REV +*/ +#define PLATO_TOP_CR_PLATO_REV 0x009C +#define PLATO_CR_PLATO_MAINT_REV_MASK 0x000000FFU +#define PLATO_CR_PLATO_MAINT_REV_SHIFT 0 +#define PLATO_CR_PLATO_MAINT_REV_SIGNED 0 + +#define PLATO_CR_PLATO_MINOR_REV_MASK 0x0000FF00U +#define PLATO_CR_PLATO_MINOR_REV_SHIFT 8 +#define PLATO_CR_PLATO_MINOR_REV_SIGNED 0 + +#define PLATO_CR_PLATO_MAJOR_REV_MASK 0x00FF0000U +#define PLATO_CR_PLATO_MAJOR_REV_SHIFT 16 +#define PLATO_CR_PLATO_MAJOR_REV_SIGNED 0 + +#define PLATO_CR_PLATO_DESIGNER_MASK 0xFF000000U +#define PLATO_CR_PLATO_DESIGNER_SHIFT 24 +#define PLATO_CR_PLATO_DESIGNER_SIGNED 0 + +#endif /* _PLATO_TOP_REGS_H_ */ + +/***************************************************************************** + End of file (plato_top_regs.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.c new file mode 100644 index 000000000000..722989d32e90 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.c @@ -0,0 +1,592 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title Linux buffer sync interface +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "services_kernel_client.h" +#include "pvr_dma_resv.h" +#include "pvr_buffer_sync.h" +#include "pvr_buffer_sync_shared.h" +#include "pvr_drv.h" +#include "pvr_fence.h" + + +struct pvr_buffer_sync_context { + struct mutex ctx_lock; + struct pvr_fence_context *fence_ctx; + struct ww_acquire_ctx acquire_ctx; +}; + +struct pvr_buffer_sync_check_data { + struct dma_fence_cb base; + + u32 nr_fences; + struct pvr_fence **fences; +}; + +struct pvr_buffer_sync_append_data { + struct pvr_buffer_sync_context *ctx; + + u32 nr_pmrs; + struct _PMR_ **pmrs; + u32 *pmr_flags; + + struct pvr_fence *update_fence; + struct pvr_buffer_sync_check_data *check_data; +}; + + +static struct dma_resv * +pmr_reservation_object_get(struct _PMR_ *pmr) +{ + struct dma_buf *dmabuf; + + dmabuf = PhysmemGetDmaBuf(pmr); + if (dmabuf) + return dmabuf->resv; + + return NULL; +} + +static int +pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs) +{ + struct dma_resv *resv, *cresv = NULL, *lresv = NULL; + int i, err; + struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; + + mutex_lock(&ctx->ctx_lock); + + ww_acquire_init(acquire_ctx, &reservation_ww_class); +retry: + for (i = 0; i < nr_pmrs; i++) { + resv = pmr_reservation_object_get(pmrs[i]); + if (!resv) { + pr_err("%s: Failed to get reservation object from pmr %p\n", + __func__, pmrs[i]); + err = -EINVAL; + goto fail; + } + + if (resv != lresv) { + err = ww_mutex_lock_interruptible(&resv->lock, + acquire_ctx); + if (err) { + cresv = (err == -EDEADLK) ? resv : NULL; + goto fail; + } + } else { + lresv = NULL; + } + } + + ww_acquire_done(acquire_ctx); + + return 0; + +fail: + while (i--) { + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + ww_mutex_unlock(&resv->lock); + } + + if (lresv) + ww_mutex_unlock(&lresv->lock); + + if (cresv) { + err = ww_mutex_lock_slow_interruptible(&cresv->lock, + acquire_ctx); + if (!err) { + lresv = cresv; + cresv = NULL; + goto retry; + } + } + + ww_acquire_fini(acquire_ctx); + + mutex_unlock(&ctx->ctx_lock); + return err; +} + +static void +pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs) +{ + struct dma_resv *resv; + int i; + struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; + + for (i = 0; i < nr_pmrs; i++) { + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + ww_mutex_unlock(&resv->lock); + } + + ww_acquire_fini(acquire_ctx); + + mutex_unlock(&ctx->ctx_lock); +} + +static u32 +pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs, + u32 *pmr_flags) +{ + struct dma_resv *resv; + struct dma_resv_list *resv_list; + struct dma_fence *fence; + u32 fence_count = 0; + bool exclusive; + int i; + + for (i = 0; i < nr_pmrs; i++) { + exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); + + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + + resv_list = dma_resv_get_list(resv); + fence = dma_resv_get_excl(resv); + + if (fence && + (!exclusive || !resv_list || !resv_list->shared_count)) + fence_count++; + + if (exclusive && resv_list) + fence_count += resv_list->shared_count; + } + + return fence_count; +} + +static struct pvr_buffer_sync_check_data * +pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx, + PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs, + u32 *pmr_flags) +{ + struct pvr_buffer_sync_check_data *data; + struct dma_resv *resv; + struct dma_resv_list *resv_list; + struct dma_fence *fence; + u32 fence_count; + bool exclusive; + int i, j; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs, + pmr_flags); + if (fence_count) { + data->fences = kcalloc(fence_count, sizeof(*data->fences), + GFP_KERNEL); + if (!data->fences) + goto err_check_data_free; + } + + for (i = 0; i < nr_pmrs; i++) { + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + + exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); + if (!exclusive) { + err = dma_resv_reserve_shared(resv +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + , 1 +#endif + ); + if (err) + goto err_destroy_fences; + } + + resv_list = dma_resv_get_list(resv); + fence = dma_resv_get_excl(resv); + + if (fence && + (!exclusive || !resv_list || !resv_list->shared_count)) { + data->fences[data->nr_fences++] = + pvr_fence_create_from_fence(fence_ctx, + sync_checkpoint_ctx, + fence, + PVRSRV_NO_FENCE, + "exclusive check fence"); + if (!data->fences[data->nr_fences - 1]) { + data->nr_fences--; + PVR_FENCE_TRACE(fence, + "waiting on exclusive fence\n"); + WARN_ON(dma_fence_wait(fence, true) <= 0); + } + } + + if (exclusive && resv_list) { + for (j = 0; j < resv_list->shared_count; j++) { + fence = rcu_dereference_protected(resv_list->shared[j], + dma_resv_held(resv)); + data->fences[data->nr_fences++] = + pvr_fence_create_from_fence(fence_ctx, + sync_checkpoint_ctx, + fence, + PVRSRV_NO_FENCE, + "check fence"); + if (!data->fences[data->nr_fences - 1]) { + data->nr_fences--; + PVR_FENCE_TRACE(fence, + "waiting on non-exclusive fence\n"); + WARN_ON(dma_fence_wait(fence, true) <= 0); + } + } + } + } + + WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count)); + + return data; + +err_destroy_fences: + for (i = 0; i < data->nr_fences; i++) + pvr_fence_destroy(data->fences[i]); + kfree(data->fences); +err_check_data_free: + kfree(data); + return NULL; +} + +static void +pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data) +{ + int i; + + for (i = 0; i < data->nr_fences; i++) + pvr_fence_destroy(data->fences[i]); + + kfree(data->fences); + kfree(data); +} + +struct pvr_buffer_sync_context * +pvr_buffer_sync_context_create(struct device *dev, const char *name) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + struct pvr_buffer_sync_context *ctx; + int err; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + err = -ENOMEM; + goto err_exit; + } + + ctx->fence_ctx = pvr_fence_context_create(priv->dev_node, + priv->fence_status_wq, + name); + if (!ctx->fence_ctx) { + err = -ENOMEM; + goto err_free_ctx; + } + + mutex_init(&ctx->ctx_lock); + + return ctx; + +err_free_ctx: + kfree(ctx); +err_exit: + return ERR_PTR(err); +} + +void +pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx) +{ + pvr_fence_context_destroy(ctx->fence_ctx); + kfree(ctx); +} + +int +pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, + PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs, + u32 *pmr_flags, + u32 *nr_fence_checkpoints_out, + PSYNC_CHECKPOINT **fence_checkpoints_out, + PSYNC_CHECKPOINT *update_checkpoints_out, + struct pvr_buffer_sync_append_data **data_out) +{ + struct pvr_buffer_sync_append_data *data; + PSYNC_CHECKPOINT *fence_checkpoints; + const size_t data_size = sizeof(*data); + const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs; + const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs; + int i; + int j; + int err; + + if (unlikely((nr_pmrs && !(pmrs && pmr_flags)) || + !nr_fence_checkpoints_out || !fence_checkpoints_out || + !update_checkpoints_out)) + return -EINVAL; + + for (i = 0; i < nr_pmrs; i++) { + if (unlikely(!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK))) { + pr_err("%s: Invalid flags %#08x for pmr %p\n", + __func__, pmr_flags[i], pmrs[i]); + return -EINVAL; + } + } + +#if defined(NO_HARDWARE) + /* + * For NO_HARDWARE there's no checking or updating of sync checkpoints + * which means SW waits on our fences will cause a deadlock (since they + * will never be signalled). Avoid this by not creating any fences. + */ + nr_pmrs = 0; +#endif + + if (!nr_pmrs) { + *nr_fence_checkpoints_out = 0; + *fence_checkpoints_out = NULL; + *update_checkpoints_out = NULL; + *data_out = NULL; + + return 0; + } + + data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL); + if (unlikely(!data)) + return -ENOMEM; + + data->ctx = ctx; + data->pmrs = (struct _PMR_ **)(void *)(data + 1); + data->pmr_flags = (u32 *)(void *)(data->pmrs + nr_pmrs); + + /* + * It's expected that user space will provide a set of unique PMRs + * but, as a PMR can have multiple handles, it's still possible to + * end up here with duplicates. Take this opportunity to filter out + * any remaining duplicates (updating flags when necessary) before + * trying to process them further. + */ + for (i = 0; i < nr_pmrs; i++) { + for (j = 0; j < data->nr_pmrs; j++) { + if (data->pmrs[j] == pmrs[i]) { + data->pmr_flags[j] |= pmr_flags[i]; + break; + } + } + + if (j == data->nr_pmrs) { + data->pmrs[j] = pmrs[i]; + data->pmr_flags[j] = pmr_flags[i]; + data->nr_pmrs++; + } + } + + err = pvr_buffer_sync_pmrs_lock(ctx, data->nr_pmrs, data->pmrs); + if (unlikely(err)) { + pr_err("%s: failed to lock pmrs (errno=%d)\n", + __func__, err); + goto err_free_data; + } + + /* create the check data */ + data->check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx, + sync_checkpoint_ctx, + data->nr_pmrs, + data->pmrs, + data->pmr_flags); + if (unlikely(!data->check_data)) { + err = -ENOMEM; + goto err_pmrs_unlock; + } + + fence_checkpoints = kcalloc(data->check_data->nr_fences, + sizeof(*fence_checkpoints), + GFP_KERNEL); + if (fence_checkpoints) { + pvr_fence_get_checkpoints(data->check_data->fences, + data->check_data->nr_fences, + fence_checkpoints); + } else { + if (unlikely(data->check_data->nr_fences)) { + err = -ENOMEM; + goto err_free_check_data; + } + } + + /* create the update fence */ + data->update_fence = pvr_fence_create(ctx->fence_ctx, + sync_checkpoint_ctx, + SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, "update fence"); + if (unlikely(!data->update_fence)) { + err = -ENOMEM; + goto err_free_fence_checkpoints; + } + + /* + * We need to clean up the fences once the HW has finished with them. + * We can do this using fence callbacks. However, instead of adding a + * callback to every fence, which would result in more work, we can + * simply add one to the update fence since this will be the last fence + * to be signalled. This callback can do all the necessary clean up. + * + * Note: we take an additional reference on the update fence in case + * it signals before we can add it to a reservation object. + */ + PVR_FENCE_TRACE(&data->update_fence->base, + "create fence calling dma_fence_get\n"); + dma_fence_get(&data->update_fence->base); + + *nr_fence_checkpoints_out = data->check_data->nr_fences; + *fence_checkpoints_out = fence_checkpoints; + *update_checkpoints_out = pvr_fence_get_checkpoint(data->update_fence); + *data_out = data; + + return 0; + +err_free_fence_checkpoints: + kfree(fence_checkpoints); +err_free_check_data: + pvr_buffer_sync_check_fences_destroy(data->check_data); +err_pmrs_unlock: + pvr_buffer_sync_pmrs_unlock(ctx, data->nr_pmrs, data->pmrs); +err_free_data: + kfree(data); + return err; +} + +void +pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data) +{ + struct dma_resv *resv; + int i; + + dma_fence_enable_sw_signaling(&data->update_fence->base); + + for (i = 0; i < data->nr_pmrs; i++) { + resv = pmr_reservation_object_get(data->pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + + if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) { + PVR_FENCE_TRACE(&data->update_fence->base, + "added exclusive fence (%s) to resv %p\n", + data->update_fence->name, resv); + dma_resv_add_excl_fence(resv, + &data->update_fence->base); + } else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) { + PVR_FENCE_TRACE(&data->update_fence->base, + "added non-exclusive fence (%s) to resv %p\n", + data->update_fence->name, resv); + dma_resv_add_shared_fence(resv, + &data->update_fence->base); + } + } + + /* + * Now that the fence has been added to the necessary + * reservation objects we can safely drop the extra reference + * we took in pvr_buffer_sync_resolve_and_create_fences(). + */ + dma_fence_put(&data->update_fence->base); + pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, + data->pmrs); + + /* destroy the check fences */ + pvr_buffer_sync_check_fences_destroy(data->check_data); + /* destroy the update fence */ + pvr_fence_destroy(data->update_fence); + + /* free the append data */ + kfree(data); +} + +void +pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data) +{ + + /* drop the extra reference we took on the update fence in + * pvr_buffer_sync_resolve_and_create_fences(). + */ + dma_fence_put(&data->update_fence->base); + + if (data->nr_pmrs > 0) + pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, + data->pmrs); + + /* destroy the check fences */ + pvr_buffer_sync_check_fences_destroy(data->check_data); + /* destroy the update fence */ + pvr_fence_destroy(data->update_fence); + + /* free the append data */ + kfree(data); +} + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +enum tag_img_bool +pvr_buffer_sync_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value) +{ + return pvr_fence_checkpoint_ufo_has_signalled(fwaddr, value); +} + +void +pvr_buffer_sync_check_state(void) +{ + pvr_fence_check_state(); +} +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.h new file mode 100644 index 000000000000..04371cc6c818 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_buffer_sync.h @@ -0,0 +1,142 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File pvr_buffer_sync.h +@Title PowerVR Linux buffer sync interface +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_BUFFER_SYNC_H +#define PVR_BUFFER_SYNC_H + +#include +#include +#include + +struct _PMR_; +struct pvr_buffer_sync_context; +struct pvr_buffer_sync_append_data; + +/** + * pvr_buffer_sync_context_create - creates a buffer sync context + * @dev: Linux device + * @name: context name (used for debugging) + * + * pvr_buffer_sync_context_destroy() should be used to clean up the buffer + * sync context. + * + * Return: A buffer sync context or NULL if it fails for any reason. + */ +struct pvr_buffer_sync_context * +pvr_buffer_sync_context_create(struct device *dev, const char *name); + +/** + * pvr_buffer_sync_context_destroy() - frees a buffer sync context + * @ctx: buffer sync context + */ +void +pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx); + +/** + * pvr_buffer_sync_resolve_and_create_fences() - create checkpoints from + * buffers + * @ctx: buffer sync context + * @sync_checkpoint_ctx: context in which to create sync checkpoints + * @nr_pmrs: number of buffer objects (PMRs) + * @pmrs: buffer array + * @pmr_flags: internal flags + * @nr_fence_checkpoints_out: returned number of fence sync checkpoints + * @fence_checkpoints_out: returned array of fence sync checkpoints + * @update_checkpoint_out: returned update sync checkpoint + * @data_out: returned buffer sync data + * + * After this call, either pvr_buffer_sync_kick_succeeded() or + * pvr_buffer_sync_kick_failed() must be called. + * + * Return: 0 on success or an error code otherwise. + */ +int +pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, + PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs, + u32 *pmr_flags, + u32 *nr_fence_checkpoints_out, + PSYNC_CHECKPOINT **fence_checkpoints_out, + PSYNC_CHECKPOINT *update_checkpoint_out, + struct pvr_buffer_sync_append_data **data_out); + +/** + * pvr_buffer_sync_kick_succeeded() - cleans up after a successful kick + * operation + * @data: buffer sync data returned by + * pvr_buffer_sync_resolve_and_create_fences() + * + * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). + */ +void +pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data); + +/** + * pvr_buffer_sync_kick_failed() - cleans up after a failed kick operation + * @data: buffer sync data returned by + * pvr_buffer_sync_resolve_and_create_fences() + * + * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). + */ +void +pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/** + * pvr_buffer_sync_checkpoint_ufo_has_signalled() - signals that a checkpoint's + * state has been updated + * @fwaddr: firmware address of the updated checkpoint + * @value: the new value of the checkpoint + */ +enum tag_img_bool +pvr_buffer_sync_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value); + +/** + * pvr_buffer_sync_check_state() - performs a full sync state check + */ +void +pvr_buffer_sync_check_state(void); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB)*/ +#endif /* PVR_BUFFER_SYNC_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.c new file mode 100644 index 000000000000..dddcf07b730a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.c @@ -0,0 +1,308 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux software "counting" timeline fence implementation +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include + +#include "services_kernel_client.h" +#include "pvr_counting_timeline.h" +#include "pvr_sw_fence.h" + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ + do { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ + ## __VA_ARGS__); \ + else \ + pr_err(fmt "\n", ## __VA_ARGS__); \ + } while (0) + +struct pvr_counting_fence_timeline { + struct pvr_sw_fence_context *context; + + void *dbg_request_handle; + + spinlock_t active_fences_lock; + u64 current_value; /* guarded by active_fences_lock */ + u64 next_value; /* guarded by active_fences_lock */ + struct list_head active_fences; + + struct kref kref; +}; + +struct pvr_counting_fence { + u64 value; + struct dma_fence *fence; + struct list_head active_list_entry; +}; + +void pvr_counting_fence_timeline_dump_timeline( + void *data, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + + struct pvr_counting_fence_timeline *timeline = + (struct pvr_counting_fence_timeline *) data; + unsigned long flags; + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "TL:%s SeqNum: %llu/%llu", + pvr_sw_fence_context_name( + timeline->context), + timeline->current_value, + timeline->next_value); + + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); +} + +static void +pvr_counting_fence_timeline_debug_request(void *data, u32 verbosity, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + struct pvr_counting_fence_timeline *timeline = + (struct pvr_counting_fence_timeline *)data; + struct pvr_counting_fence *obj; + unsigned long flags; + char value[128]; + + if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { + spin_lock_irqsave(&timeline->active_fences_lock, flags); + pvr_sw_fence_context_value_str(timeline->context, value, + sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "sw: %s @%s cur=%llu", + pvr_sw_fence_context_name(timeline->context), + value, timeline->current_value); + list_for_each_entry(obj, &timeline->active_fences, + active_list_entry) { + obj->fence->ops->fence_value_str(obj->fence, + value, sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " @%s: val=%llu", value, obj->value); + } + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); + } +} + +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( + void *dev_cookie, + const char *name) +{ + PVRSRV_ERROR srv_err; + struct pvr_counting_fence_timeline *timeline = + kmalloc(sizeof(*timeline), GFP_KERNEL); + + if (!timeline) + goto err_out; + + timeline->context = pvr_sw_fence_context_create(name, + "pvr_sw_sync"); + if (!timeline->context) + goto err_free_timeline; + + srv_err = PVRSRVRegisterDbgRequestNotify(&timeline->dbg_request_handle, + dev_cookie, + pvr_counting_fence_timeline_debug_request, + DEBUG_REQUEST_LINUXFENCE, + timeline); + if (srv_err != PVRSRV_OK) { + pr_err("%s: failed to register debug request callback (%s)\n", + __func__, PVRSRVGetErrorString(srv_err)); + goto err_free_timeline_ctx; + } + + timeline->current_value = 0; + timeline->next_value = 1; + kref_init(&timeline->kref); + spin_lock_init(&timeline->active_fences_lock); + INIT_LIST_HEAD(&timeline->active_fences); + +err_out: + return timeline; + +err_free_timeline_ctx: + pvr_sw_fence_context_destroy(timeline->context); + +err_free_timeline: + kfree(timeline); + timeline = NULL; + goto err_out; +} + +void pvr_counting_fence_timeline_force_complete( + struct pvr_counting_fence_timeline *timeline) +{ + struct list_head *entry, *tmp; + unsigned long flags; + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + /* This is just a safety measurement. Normally we should never see any + * unsignaled sw fences when we come here. Warn if we still do! */ + WARN_ON(!list_empty(&timeline->active_fences)); + + list_for_each_safe(entry, tmp, &timeline->active_fences) { + struct pvr_counting_fence *fence = + list_entry(entry, struct pvr_counting_fence, + active_list_entry); + dma_fence_signal(fence->fence); + dma_fence_put(fence->fence); + fence->fence = NULL; + list_del(&fence->active_list_entry); + kfree(fence); + } + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); +} + +static void pvr_counting_fence_timeline_destroy( + struct kref *kref) +{ + struct pvr_counting_fence_timeline *timeline = + container_of(kref, struct pvr_counting_fence_timeline, kref); + + WARN_ON(!list_empty(&timeline->active_fences)); + + PVRSRVUnregisterDbgRequestNotify(timeline->dbg_request_handle); + + pvr_sw_fence_context_destroy(timeline->context); + kfree(timeline); +} + +void pvr_counting_fence_timeline_put( + struct pvr_counting_fence_timeline *timeline) +{ + kref_put(&timeline->kref, pvr_counting_fence_timeline_destroy); +} + +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( + struct pvr_counting_fence_timeline *timeline) +{ + if (!timeline) + return NULL; + kref_get(&timeline->kref); + return timeline; +} + +struct dma_fence *pvr_counting_fence_create( + struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) +{ + unsigned long flags; + struct dma_fence *sw_fence; + struct pvr_counting_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); + + if (!fence) + return NULL; + + sw_fence = pvr_sw_fence_create(timeline->context); + if (!sw_fence) + goto err_free_fence; + + fence->fence = dma_fence_get(sw_fence); + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + fence->value = timeline->next_value++; + if (sync_pt_idx) + *sync_pt_idx = fence->value; + + list_add_tail(&fence->active_list_entry, &timeline->active_fences); + + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); + + /* Counting fences can be signalled any time after creation */ + dma_fence_enable_sw_signaling(sw_fence); + + return sw_fence; + +err_free_fence: + kfree(fence); + return NULL; +} + +bool pvr_counting_fence_timeline_inc( + struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) +{ + struct list_head *entry, *tmp; + unsigned long flags; + bool res; + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + if (timeline->current_value == timeline->next_value-1) { + res = false; + goto exit_unlock; + } + + timeline->current_value++; + + if (sync_pt_idx) { + *sync_pt_idx = timeline->current_value; + } + + list_for_each_safe(entry, tmp, &timeline->active_fences) { + struct pvr_counting_fence *fence = + list_entry(entry, struct pvr_counting_fence, + active_list_entry); + if (fence->value <= timeline->current_value) { + dma_fence_signal(fence->fence); + dma_fence_put(fence->fence); + fence->fence = NULL; + list_del(&fence->active_list_entry); + kfree(fence); + } + } + + res = true; + +exit_unlock: + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); + + return res; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.h new file mode 100644 index 000000000000..ab8bbf7a0e98 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_counting_timeline.h @@ -0,0 +1,70 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_COUNTING_TIMELINE_H__) +#define __PVR_COUNTING_TIMELINE_H__ + +#include "pvr_linux_fence.h" + +struct pvr_counting_fence_timeline; + +void pvr_counting_fence_timeline_dump_timeline( + void *data, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file); + +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( + void *dev_cookie, + const char *name); +void pvr_counting_fence_timeline_put( + struct pvr_counting_fence_timeline *fence_timeline); +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( + struct pvr_counting_fence_timeline *fence_timeline); +struct dma_fence *pvr_counting_fence_create( + struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); +bool pvr_counting_fence_timeline_inc( + struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); +void pvr_counting_fence_timeline_force_complete( + struct pvr_counting_fence_timeline *fence_timeline); + +#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_dma_resv.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_dma_resv.h new file mode 100644 index 000000000000..c95069d8d31f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_dma_resv.h @@ -0,0 +1,71 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Title Kernel reservation object compatibility header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Per-version macros to allow code to seamlessly use older kernel +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __PVR_DMA_RESV_H__ +#define __PVR_DMA_RESV_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#include +#else +#include + +/* Reservation object types */ +#define dma_resv reservation_object +#define dma_resv_list reservation_object_list + +/* Reservation object functions */ +#define dma_resv_add_excl_fence reservation_object_add_excl_fence +#define dma_resv_add_shared_fence reservation_object_add_shared_fence +#define dma_resv_fini reservation_object_fini +#define dma_resv_get_excl reservation_object_get_excl +#define dma_resv_get_list reservation_object_get_list +#define dma_resv_held reservation_object_held +#define dma_resv_init reservation_object_init +#define dma_resv_reserve_shared reservation_object_reserve_shared +#define dma_resv_test_signaled_rcu reservation_object_test_signaled_rcu +#define dma_resv_wait_timeout_rcu reservation_object_wait_timeout_rcu +#endif + +#endif /* __PVR_DMA_RESV_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drm.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drm.c new file mode 100644 index 000000000000..e1ceb91a3de9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drm.c @@ -0,0 +1,305 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PowerVR DRM driver +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#include +#else +#include /* include before drm_crtc.h for kernels older than 3.9 */ +#endif + +#include +#include +#include +#include +#include +#include + +#include "module_common.h" +#include "pvr_drm.h" +#include "pvr_drv.h" +#include "pvrversion.h" +#include "services_kernel_client.h" + +#include "kernel_compatibility.h" + +#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME +#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM" +#define PVR_DRM_DRIVER_DATE "20170530" + + +static int pvr_pm_suspend(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", dev); + + return PVRSRVDeviceSuspend(priv->dev_node); +} + +static int pvr_pm_resume(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", dev); + + return PVRSRVDeviceResume(priv->dev_node); +} + +const struct dev_pm_ops pvr_pm_ops = { + .suspend = pvr_pm_suspend, + .resume = pvr_pm_resume, +}; + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +static +#endif +int pvr_drm_load(struct drm_device *ddev, unsigned long flags) +{ + struct pvr_drm_private *priv; + enum PVRSRV_ERROR srv_err; + int err, deviceId; + + DRM_DEBUG_DRIVER("device %p\n", ddev->dev); + + dev_set_drvdata(ddev->dev, ddev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) + /* + * Older kernels do not have render drm_minor member in drm_device, + * so we fallback to primary node for device identification + */ + deviceId = ddev->primary->index; +#else + if (ddev->render) + deviceId = ddev->render->index; + else /* when render node is NULL, fallback to primary node */ + deviceId = ddev->primary->index; +#endif + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto err_exit; + } + ddev->dev_private = priv; + + if (!ddev->dev->dma_parms) + ddev->dev->dma_parms = &priv->dma_parms; + dma_set_max_seg_size(ddev->dev, DMA_BIT_MASK(32)); + +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + priv->fence_status_wq = create_freezable_workqueue("pvr_fce_status"); + if (!priv->fence_status_wq) { + DRM_ERROR("failed to create fence status workqueue\n"); + err = -ENOMEM; + goto err_unset_dma_parms; + } +#endif + + srv_err = PVRSRVCommonDeviceCreate(ddev->dev, deviceId, &priv->dev_node); + if (srv_err != PVRSRV_OK) { + DRM_ERROR("failed to create device node for device %p (%s)\n", + ddev->dev, PVRSRVGetErrorString(srv_err)); + if (srv_err == PVRSRV_ERROR_PROBE_DEFER) + err = -EPROBE_DEFER; + else + err = -ENODEV; + goto err_workqueue_destroy; + } + + err = PVRSRVDeviceInit(priv->dev_node); + if (err) { + DRM_ERROR("device %p initialisation failed (err=%d)\n", + ddev->dev, err); + goto err_device_destroy; + } + + drm_mode_config_init(ddev); + + return 0; + +err_device_destroy: + PVRSRVCommonDeviceDestroy(priv->dev_node); +err_workqueue_destroy: +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + destroy_workqueue(priv->fence_status_wq); +err_unset_dma_parms: +#endif + if (ddev->dev->dma_parms == &priv->dma_parms) + ddev->dev->dma_parms = NULL; + kfree(priv); +err_exit: + return err; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +static +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +int pvr_drm_unload(struct drm_device *ddev) +#else +void pvr_drm_unload(struct drm_device *ddev) +#endif +{ + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", ddev->dev); + + drm_mode_config_cleanup(ddev); + + PVRSRVDeviceDeinit(priv->dev_node); + + PVRSRVCommonDeviceDestroy(priv->dev_node); + +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + destroy_workqueue(priv->fence_status_wq); +#endif + + if (ddev->dev->dma_parms == &priv->dma_parms) + ddev->dev->dma_parms = NULL; + + kfree(priv); + ddev->dev_private = NULL; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + return 0; +#endif +} + +static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile) +{ + struct pvr_drm_private *priv = ddev->dev_private; + int err; + + if (!try_module_get(THIS_MODULE)) { + DRM_ERROR("failed to get module reference\n"); + return -ENOENT; + } + + err = PVRSRVDeviceOpen(priv->dev_node, dfile); + if (err) + module_put(THIS_MODULE); + + return err; +} + +static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile) +{ + struct pvr_drm_private *priv = ddev->dev_private; + + PVRSRVDeviceRelease(priv->dev_node, dfile); + + module_put(THIS_MODULE); +} + +/* + * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set. + */ +static struct drm_ioctl_desc pvr_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, DRM_RENDER_ALLOW | DRM_UNLOCKED) +}; + +#if defined(CONFIG_COMPAT) +static long pvr_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(file, cmd, arg); + + return drm_ioctl(file, cmd, arg); +} +#endif /* defined(CONFIG_COMPAT) */ + +static const struct file_operations pvr_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = pvr_compat_ioctl, +#endif + .mmap = PVRSRV_MMap, + .poll = drm_poll, + .read = drm_read, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) + .fasync = drm_fasync, +#endif +}; + +const struct drm_driver pvr_drm_generic_driver = { + .driver_features = DRIVER_MODESET | DRIVER_RENDER, + + .dev_priv_size = 0, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + .load = NULL, + .unload = NULL, +#else + .load = pvr_drm_load, + .unload = pvr_drm_unload, +#endif + .open = pvr_drm_open, + .postclose = pvr_drm_release, + + .ioctls = pvr_drm_ioctls, + .num_ioctls = ARRAY_SIZE(pvr_drm_ioctls), + .fops = &pvr_drm_fops, + + .name = PVR_DRM_DRIVER_NAME, + .desc = PVR_DRM_DRIVER_DESC, + .date = PVR_DRM_DRIVER_DATE, + .major = PVRVERSION_MAJ, + .minor = PVRVERSION_MIN, + .patchlevel = PVRVERSION_BUILD, +}; diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drv.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drv.h new file mode 100644 index 000000000000..0b28c3c6a22e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_drv.h @@ -0,0 +1,99 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PowerVR DRM driver +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_DRV_H__) +#define __PVR_DRV_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#else +#include +#endif + +#include + +struct file; +struct _PVRSRV_DEVICE_NODE_; +struct workqueue_struct; +struct vm_area_struct; + +/* This structure is used to store Linux specific per-device information. */ +struct pvr_drm_private { + struct _PVRSRV_DEVICE_NODE_ *dev_node; + + /* + * This is needed for devices that don't already have their own dma + * parameters structure, e.g. platform devices, and, if necessary, will + * be assigned to the 'struct device' during device initialisation. It + * should therefore never be accessed directly via this structure as + * this may not be the version of dma parameters in use. + */ + struct device_dma_parameters dma_parms; + +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + struct workqueue_struct *fence_status_wq; +#endif +}; + +extern const struct dev_pm_ops pvr_pm_ops; +extern const struct drm_driver pvr_drm_generic_driver; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +int pvr_drm_load(struct drm_device *ddev, unsigned long flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +int pvr_drm_unload(struct drm_device *ddev); +#else +void pvr_drm_unload(struct drm_device *ddev); +#endif +#endif + +int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, + struct drm_file *file); +int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma); + +#endif /* !defined(__PVR_DRV_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.c new file mode 100644 index 000000000000..e430101c0912 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.c @@ -0,0 +1,1173 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux fence interface +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +#include +#endif + +#include "pvr_fence.h" +#include "services_kernel_client.h" +#include "sync_checkpoint_external.h" + +#define CREATE_TRACE_POINTS +#include "pvr_fence_trace.h" + +/* This header must always be included last */ +#include "kernel_compatibility.h" + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/* Set size of the hashtable to 4096 entries. Empirical experiments showed + * that the actual required size doesn't exceed few hundreds of entries (~200). + * Even if assumed 5 times higher threshold 4k entries should be enough + * to hold all of them with minimal number of conflicts. + */ +static DEFINE_HASHTABLE(pvr_fence_ufo_lut, 12); +static DEFINE_SPINLOCK(pvr_fence_ufo_lut_spinlock); +#endif + +/* Global kmem_cache for pvr_fence object allocations */ +static struct kmem_cache *pvr_fence_cache; +static DEFINE_MUTEX(pvr_fence_cache_mutex); +static u32 pvr_fence_cache_refcount; + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ + do { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ + ## __VA_ARGS__); \ + else \ + pr_err(fmt "\n", ## __VA_ARGS__); \ + } while (0) + +static inline void +pvr_fence_sync_signal(struct pvr_fence *pvr_fence, u32 fence_sync_flags) +{ + SyncCheckpointSignal(pvr_fence->sync_checkpoint, fence_sync_flags); +} + +static inline bool +pvr_fence_sync_is_signaled(struct pvr_fence *pvr_fence, u32 fence_sync_flags) +{ + return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, + fence_sync_flags); +} + +static inline u32 +pvr_fence_sync_value(struct pvr_fence *pvr_fence) +{ + if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint, + PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + return PVRSRV_SYNC_CHECKPOINT_ERRORED; + else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, + PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + return PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + else + return PVRSRV_SYNC_CHECKPOINT_ACTIVE; +} + +static void +pvr_fence_context_check_status(struct work_struct *data) +{ + PVRSRVCheckStatus(NULL); +} + +void +pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size) +{ + snprintf(str, size, + "%u ctx=%llu refs=%u", + atomic_read(&fctx->fence_seqno), + fctx->fence_context, + refcount_read(&fctx->kref.refcount)); +} + +static void +pvr_fence_context_fences_dump(struct pvr_fence_context *fctx, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + struct pvr_fence *pvr_fence; + unsigned long flags; + char value[128]; + + spin_lock_irqsave(&fctx->list_lock, flags); + pvr_context_value_str(fctx, value, sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "%s: @%s", fctx->name, value); + list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { + struct dma_fence *fence = pvr_fence->fence; + const char *timeline_value_str = "unknown timeline value"; + const char *fence_value_str = "unknown fence value"; + + pvr_fence->base.ops->fence_value_str(&pvr_fence->base, value, + sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " @%s", value); + + if (is_pvr_fence(fence)) + continue; + + if (fence->ops->timeline_value_str) { + fence->ops->timeline_value_str(fence, value, + sizeof(value)); + timeline_value_str = value; + } + + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " | %s: %s (driver: %s)", + fence->ops->get_timeline_name(fence), + timeline_value_str, + fence->ops->get_driver_name(fence)); + + if (fence->ops->fence_value_str) { + fence->ops->fence_value_str(fence, value, + sizeof(value)); + fence_value_str = value; + } + + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " | @%s (foreign)", value); + } + spin_unlock_irqrestore(&fctx->list_lock, flags); +} + +static inline unsigned int +pvr_fence_context_seqno_next(struct pvr_fence_context *fctx) +{ + return atomic_inc_return(&fctx->fence_seqno) - 1; +} + +/* This function prepends seqno to fence name */ +static inline void +pvr_fence_prepare_name(char *fence_name, size_t fence_name_size, + const char *name, unsigned int seqno) +{ + unsigned int len; + + len = OSStringUINT32ToStr(fence_name, fence_name_size, seqno); + if (likely((len > 0) && (fence_name_size >= (len + 1)))) { + fence_name[len] = '-'; + fence_name[len + 1] = '\0'; + } + strlcat(fence_name, name, fence_name_size); +} + +static void +pvr_fence_sched_free(struct rcu_head *rcu) +{ + struct pvr_fence *pvr_fence = container_of(rcu, struct pvr_fence, rcu); + kmem_cache_free(pvr_fence_cache, pvr_fence); +} + +static inline void +pvr_fence_context_free_deferred(struct pvr_fence_context *fctx) +{ + struct pvr_fence *pvr_fence, *tmp; + LIST_HEAD(deferred_free_list); + unsigned long flags; + + spin_lock_irqsave(&fctx->list_lock, flags); + list_for_each_entry_safe(pvr_fence, tmp, + &fctx->deferred_free_list, + fence_head) + list_move(&pvr_fence->fence_head, &deferred_free_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + list_for_each_entry_safe(pvr_fence, tmp, + &deferred_free_list, + fence_head) { + list_del(&pvr_fence->fence_head); + SyncCheckpointFree(pvr_fence->sync_checkpoint); + call_rcu(&pvr_fence->rcu, pvr_fence_sched_free); + module_put(THIS_MODULE); + } +} + +void +pvr_fence_context_free_deferred_callback(void *data) +{ + struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; + + /* + * Free up any fence objects we have deferred freeing. + */ + pvr_fence_context_free_deferred(fctx); +} + +static void +pvr_fence_context_signal_fences(void *data) +{ + struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; + struct pvr_fence *pvr_fence, *tmp; + unsigned long flags1; +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + unsigned long flags2; +#endif + LIST_HEAD(signal_list); + + /* + * We can't call fence_signal while holding the lock as we can end up + * in a situation whereby pvr_fence_foreign_signal_sync, which also + * takes the list lock, ends up being called as a result of the + * fence_signal below, i.e. fence_signal(fence) -> fence->callback() + * -> fence_signal(foreign_fence) -> foreign_fence->callback() where + * the foreign_fence callback is pvr_fence_foreign_signal_sync. + * + * So extract the items we intend to signal and add them to their own + * queue. + */ +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags2); + spin_lock_irqsave(&fctx->list_lock, flags1); + list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list, + signal_head) { + if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { + list_move_tail(&pvr_fence->signal_head, &signal_list); + hash_del(&pvr_fence->ufo_lookup); + } + } + spin_unlock_irqrestore(&fctx->list_lock, flags1); + spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags2); +#else + spin_lock_irqsave(&fctx->list_lock, flags1); + list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list, + signal_head) { + if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + list_move_tail(&pvr_fence->signal_head, &signal_list); + } + spin_unlock_irqrestore(&fctx->list_lock, flags1); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) { + + PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n", + pvr_fence->name); + trace_pvr_fence_signal_fence(pvr_fence); + spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags1); + list_del(&pvr_fence->signal_head); + spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags1); + dma_fence_signal(pvr_fence->fence); + dma_fence_put(pvr_fence->fence); + } + + /* + * Take this opportunity to free up any fence objects we + * have deferred freeing. + */ + pvr_fence_context_free_deferred(fctx); +} + +void +pvr_fence_context_signal_fences_nohw(void *data) +{ + pvr_fence_context_signal_fences(data); +} + +static void +pvr_fence_context_destroy_work(struct work_struct *data) +{ + struct pvr_fence_context *fctx = + container_of(data, struct pvr_fence_context, destroy_work); + + pvr_fence_context_free_deferred(fctx); + + if (WARN_ON(!list_empty_careful(&fctx->fence_list))) + pvr_fence_context_fences_dump(fctx, NULL, NULL); + + PVRSRVUnregisterDbgRequestNotify(fctx->dbg_request_handle); + PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); + + /* Destroy pvr_fence object cache, if no one is using it */ + WARN_ON(pvr_fence_cache == NULL); + mutex_lock(&pvr_fence_cache_mutex); + if (--pvr_fence_cache_refcount == 0) + kmem_cache_destroy(pvr_fence_cache); + mutex_unlock(&pvr_fence_cache_mutex); + + kfree(fctx); +} + +static void +pvr_fence_context_debug_request(void *data, u32 verbosity, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; + + if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf, + pvDumpDebugFile); +} + +/** + * pvr_fence_context_create - creates a PVR fence context + * @dev_cookie: services device cookie + * @name: context name (used for debugging) + * + * Creates a PVR fence context that can be used to create PVR fences or to + * create PVR fences from an existing fence. + * + * pvr_fence_context_destroy should be called to clean up the fence context. + * + * Returns NULL if a context cannot be created. + */ +struct pvr_fence_context * +pvr_fence_context_create(void *dev_cookie, + struct workqueue_struct *fence_status_wq, + const char *name) +{ + struct pvr_fence_context *fctx; + PVRSRV_ERROR srv_err; + + fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (!fctx) + return NULL; + + spin_lock_init(&fctx->lock); + atomic_set(&fctx->fence_seqno, 0); + INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status); + INIT_WORK(&fctx->destroy_work, pvr_fence_context_destroy_work); + spin_lock_init(&fctx->list_lock); + INIT_LIST_HEAD(&fctx->signal_list); + INIT_LIST_HEAD(&fctx->fence_list); + INIT_LIST_HEAD(&fctx->deferred_free_list); + + fctx->fence_wq = fence_status_wq; + + fctx->fence_context = dma_fence_context_alloc(1); + strlcpy(fctx->name, name, sizeof(fctx->name)); + + srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle, +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + pvr_fence_context_free_deferred_callback, +#else + pvr_fence_context_signal_fences, +#endif + fctx); + if (srv_err != PVRSRV_OK) { + pr_err("%s: failed to register command complete callback (%s)\n", + __func__, PVRSRVGetErrorString(srv_err)); + goto err_free_fctx; + } + + /* Create pvr_fence object cache, if not already created */ + mutex_lock(&pvr_fence_cache_mutex); + if (pvr_fence_cache_refcount == 0) { + pvr_fence_cache = KMEM_CACHE(pvr_fence, 0); + if (!pvr_fence_cache) { + pr_err("%s: failed to allocate pvr_fence cache\n", + __func__); + mutex_unlock(&pvr_fence_cache_mutex); + goto err_unregister_cmd_complete_notify; + } + } + pvr_fence_cache_refcount++; + mutex_unlock(&pvr_fence_cache_mutex); + + srv_err = PVRSRVRegisterDbgRequestNotify(&fctx->dbg_request_handle, + dev_cookie, + pvr_fence_context_debug_request, + DEBUG_REQUEST_LINUXFENCE, + fctx); + if (srv_err != PVRSRV_OK) { + pr_err("%s: failed to register debug request callback (%s)\n", + __func__, PVRSRVGetErrorString(srv_err)); + goto err_free_pvr_fence_cache; + } + + kref_init(&fctx->kref); + + PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name); + trace_pvr_fence_context_create(fctx); + + return fctx; + +err_free_pvr_fence_cache: + mutex_lock(&pvr_fence_cache_mutex); + if (--pvr_fence_cache_refcount == 0) + kmem_cache_destroy(pvr_fence_cache); + mutex_unlock(&pvr_fence_cache_mutex); +err_unregister_cmd_complete_notify: + PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); +err_free_fctx: + kfree(fctx); + return NULL; +} + +static void pvr_fence_context_destroy_kref(struct kref *kref) +{ + struct pvr_fence_context *fctx = + container_of(kref, struct pvr_fence_context, kref); + + PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name); + + trace_pvr_fence_context_destroy_kref(fctx); + + schedule_work(&fctx->destroy_work); +} + +/** + * pvr_fence_context_destroy - destroys a context + * @fctx: PVR fence context to destroy + * + * Destroys a PVR fence context with the expectation that all fences have been + * destroyed. + */ +void +pvr_fence_context_destroy(struct pvr_fence_context *fctx) +{ + trace_pvr_fence_context_destroy(fctx); + + kref_put(&fctx->kref, pvr_fence_context_destroy_kref); +} + +static const char * +pvr_fence_get_driver_name(struct dma_fence *fence) +{ + return PVR_LDM_DRIVER_REGISTRATION_NAME; +} + +static const char * +pvr_fence_get_timeline_name(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + return pvr_fence->fctx->name; + return NULL; +} + +static +void pvr_fence_fence_value_str(struct dma_fence *fence, char *str, int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (!pvr_fence) + return; + + snprintf(str, size, + "%llu: (%s%s) refs=%u fwaddr=%#08x enqueue=%u status=%-9s %s%s", + (u64) pvr_fence->fence->seqno, + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + refcount_read(&pvr_fence->fence->refcount.refcount), + SyncCheckpointGetFirmwareAddr( + pvr_fence->sync_checkpoint), + SyncCheckpointGetEnqueuedCount(pvr_fence->sync_checkpoint), + SyncCheckpointGetStateString(pvr_fence->sync_checkpoint), + pvr_fence->name, + (&pvr_fence->base != pvr_fence->fence) ? + "(foreign)" : ""); +} + +static +void pvr_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + pvr_context_value_str(pvr_fence->fctx, str, size); +} + +static bool +pvr_fence_enable_signaling(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned long flags; + + if (!pvr_fence) + return false; + + WARN_ON_SMP(!spin_is_locked(&pvr_fence->fctx->lock)); + + if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + return false; + + dma_fence_get(&pvr_fence->base); + + spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags); + list_add_tail(&pvr_fence->signal_head, &pvr_fence->fctx->signal_list); + spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags); + hash_add(pvr_fence_ufo_lut, &pvr_fence->ufo_lookup, + SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint)); + spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags); +#endif + + PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n", + pvr_fence->name); + trace_pvr_fence_enable_signaling(pvr_fence); + + return true; +} + +static bool +pvr_fence_is_signaled(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + return pvr_fence_sync_is_signaled(pvr_fence, + PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); + return false; +} + +static void +pvr_fence_release(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned long flags; + + if (pvr_fence) { + struct pvr_fence_context *fctx = pvr_fence->fctx; + + PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n", + pvr_fence->name); + trace_pvr_fence_release(pvr_fence); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_move(&pvr_fence->fence_head, + &fctx->deferred_free_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + kref_put(&fctx->kref, pvr_fence_context_destroy_kref); + } +} + +const struct dma_fence_ops pvr_fence_ops = { + .get_driver_name = pvr_fence_get_driver_name, + .get_timeline_name = pvr_fence_get_timeline_name, + .fence_value_str = pvr_fence_fence_value_str, + .timeline_value_str = pvr_fence_timeline_value_str, + .enable_signaling = pvr_fence_enable_signaling, + .signaled = pvr_fence_is_signaled, + .wait = dma_fence_default_wait, + .release = pvr_fence_release, +}; + +/** + * pvr_fence_create - creates a PVR fence + * @fctx: PVR fence context on which the PVR fence should be created + * @sync_checkpoint_ctx: context in which to create sync checkpoints + * @timeline_fd: timeline on which the PVR fence should be created + * @name: PVR fence name (used for debugging) + * + * Creates a PVR fence. + * + * Once the fence is finished with, pvr_fence_destroy should be called. + * + * Returns NULL if a PVR fence cannot be created. + */ +struct pvr_fence * +pvr_fence_create(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + int timeline_fd, const char *name) +{ + struct pvr_fence *pvr_fence; + unsigned int seqno; + unsigned long flags; + PVRSRV_ERROR srv_err; + + if (!try_module_get(THIS_MODULE)) + goto err_exit; + + /* Note: As kmem_cache is used to allocate pvr_fence objects, + * make sure that all members of pvr_fence struct are initialized + * here + */ + pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); + if (unlikely(!pvr_fence)) { + goto err_module_put; + } + + srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, + (PVRSRV_TIMELINE) timeline_fd, PVRSRV_NO_FENCE, + name, &pvr_fence->sync_checkpoint); + if (unlikely(srv_err != PVRSRV_OK)) + goto err_free_fence; + + INIT_LIST_HEAD(&pvr_fence->fence_head); + INIT_LIST_HEAD(&pvr_fence->signal_head); + pvr_fence->fctx = fctx; + seqno = pvr_fence_context_seqno_next(fctx); + /* Add the seqno to the fence name for easier debugging */ + pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), + name, seqno); + + /* Reset cb to zero */ + memset(&pvr_fence->cb, 0, sizeof(pvr_fence->cb)); + pvr_fence->fence = &pvr_fence->base; + + dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock, + fctx->fence_context, seqno); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + kref_get(&fctx->kref); + + PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name); + trace_pvr_fence_create(pvr_fence); + + return pvr_fence; + +err_free_fence: + kmem_cache_free(pvr_fence_cache, pvr_fence); +err_module_put: + module_put(THIS_MODULE); +err_exit: + return NULL; +} + +static const char * +pvr_fence_foreign_get_driver_name(struct dma_fence *fence) +{ + return PVR_LDM_DRIVER_REGISTRATION_NAME; +} + +static const char * +pvr_fence_foreign_get_timeline_name(struct dma_fence *fence) +{ + return "foreign"; +} + +static +void pvr_fence_foreign_fence_value_str(struct dma_fence *fence, char *str, + int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + u32 sync_addr = 0; + u32 sync_value_next; + + if (WARN_ON(!pvr_fence)) + return; + + sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint); + sync_value_next = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + + /* + * Include the fence flag bits from the foreign fence instead of our + * shadow copy. This is done as the shadow fence flag bits aren't used. + */ + snprintf(str, size, + "%llu: (%s%s) refs=%u fwaddr=%#08x cur=%#08x nxt=%#08x %s", + (u64) fence->seqno, + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + refcount_read(&fence->refcount.refcount), + sync_addr, + pvr_fence_sync_value(pvr_fence), + sync_value_next, + pvr_fence->name); +} + +static +void pvr_fence_foreign_timeline_value_str(struct dma_fence *fence, char *str, + int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + pvr_context_value_str(pvr_fence->fctx, str, size); +} + +static bool +pvr_fence_foreign_enable_signaling(struct dma_fence *fence) +{ + WARN_ON("cannot enable signalling on foreign fence"); + return false; +} + +static signed long +pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout) +{ + WARN_ON("cannot wait on foreign fence"); + return 0; +} + +static void +pvr_fence_foreign_release(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned long flags; + + if (pvr_fence) { + struct pvr_fence_context *fctx = pvr_fence->fctx; + struct dma_fence *foreign_fence = pvr_fence->fence; + + PVR_FENCE_TRACE(&pvr_fence->base, + "released fence for foreign fence %llu#%d (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, pvr_fence->name); + trace_pvr_fence_foreign_release(pvr_fence); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_move(&pvr_fence->fence_head, + &fctx->deferred_free_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + dma_fence_put(foreign_fence); + + kref_put(&fctx->kref, + pvr_fence_context_destroy_kref); + } +} + +const struct dma_fence_ops pvr_fence_foreign_ops = { + .get_driver_name = pvr_fence_foreign_get_driver_name, + .get_timeline_name = pvr_fence_foreign_get_timeline_name, + .fence_value_str = pvr_fence_foreign_fence_value_str, + .timeline_value_str = pvr_fence_foreign_timeline_value_str, + .enable_signaling = pvr_fence_foreign_enable_signaling, + .wait = pvr_fence_foreign_wait, + .release = pvr_fence_foreign_release, +}; + +static void +pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb); + struct pvr_fence_context *fctx = pvr_fence->fctx; + + WARN_ON_ONCE(is_pvr_fence(fence)); + + /* Callback registered by dma_fence_add_callback can be called from an atomic ctx */ + pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC); + + trace_pvr_fence_foreign_signal(pvr_fence); + + queue_work(fctx->fence_wq, &fctx->check_status_work); + + PVR_FENCE_TRACE(&pvr_fence->base, + "foreign fence %llu#%d signalled (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, pvr_fence->name); + + /* Drop the reference on the base fence */ + dma_fence_put(&pvr_fence->base); +} + +/** + * pvr_fence_create_from_fence - creates a PVR fence from a fence + * @fctx: PVR fence context on which the PVR fence should be created + * @sync_checkpoint_ctx: context in which to create sync checkpoints + * @fence: fence from which the PVR fence should be created + * @fence_fd: fd for the sync file to which the fence belongs. If it doesn't + * belong to a sync file then PVRSRV_NO_FENCE should be given + * instead. + * @name: PVR fence name (used for debugging) + * + * Creates a PVR fence from an existing fence. If the fence is a foreign fence, + * i.e. one that doesn't originate from a PVR fence context, then a new PVR + * fence will be created using the specified sync_checkpoint_context. + * Otherwise, a reference will be taken on the underlying fence and the PVR + * fence will be returned. + * + * Once the fence is finished with, pvr_fence_destroy should be called. + * + * Returns NULL if a PVR fence cannot be created. + */ + +struct pvr_fence * +pvr_fence_create_from_fence(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + struct dma_fence *fence, + PVRSRV_FENCE fence_fd, + const char *name) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned int seqno; + unsigned long flags; + PVRSRV_ERROR srv_err; + int err; + + if (pvr_fence) { + if (WARN_ON(fence->ops == &pvr_fence_foreign_ops)) + return NULL; + dma_fence_get(fence); + + PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n", + name); + return pvr_fence; + } + + if (!try_module_get(THIS_MODULE)) + goto err_exit; + + /* Note: As kmem_cache is used to allocate pvr_fence objects, + * make sure that all members of pvr_fence struct are initialized + * here + */ + pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); + if (!pvr_fence) { + goto err_module_put; + } + + srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, + SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, + fence_fd, + name, &pvr_fence->sync_checkpoint); + if (srv_err != PVRSRV_OK) + goto err_free_pvr_fence; + + INIT_LIST_HEAD(&pvr_fence->fence_head); + INIT_LIST_HEAD(&pvr_fence->signal_head); + pvr_fence->fctx = fctx; + pvr_fence->fence = dma_fence_get(fence); + seqno = pvr_fence_context_seqno_next(fctx); + /* Add the seqno to the fence name for easier debugging */ + pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), + name, seqno); + + /* + * We use the base fence to refcount the PVR fence and to do the + * necessary clean up once the refcount drops to 0. + */ + dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock, + fctx->fence_context, seqno); + + /* + * Take an extra reference on the base fence that gets dropped when the + * foreign fence is signalled. + */ + dma_fence_get(&pvr_fence->base); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + kref_get(&fctx->kref); + + PVR_FENCE_TRACE(&pvr_fence->base, + "created fence from foreign fence %llu#%d (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, name); + + err = dma_fence_add_callback(fence, &pvr_fence->cb, + pvr_fence_foreign_signal_sync); + if (err) { + if (err != -ENOENT) { + pr_err("%s: failed to add fence callback (err=%d)", + __func__, err); + goto err_put_ref; + } + + /* + * The fence has already signalled so set the sync as signalled. + * The "signalled" hwperf packet should be emitted because the + * callback won't be called for already signalled fence hence, + * PVRSRV_FENCE_FLAG_NONE flag. + */ + pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); + PVR_FENCE_TRACE(&pvr_fence->base, + "foreign fence %llu#%d already signaled (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, + name); + dma_fence_put(&pvr_fence->base); + } + + trace_pvr_fence_foreign_create(pvr_fence); + + return pvr_fence; + +err_put_ref: + kref_put(&fctx->kref, pvr_fence_context_destroy_kref); + spin_lock_irqsave(&fctx->list_lock, flags); + list_del(&pvr_fence->fence_head); + spin_unlock_irqrestore(&fctx->list_lock, flags); + SyncCheckpointFree(pvr_fence->sync_checkpoint); +err_free_pvr_fence: + kmem_cache_free(pvr_fence_cache, pvr_fence); +err_module_put: + module_put(THIS_MODULE); +err_exit: + return NULL; +} + +/** + * pvr_fence_destroy - destroys a PVR fence + * @pvr_fence: PVR fence to destroy + * + * Destroys a PVR fence. Upon return, the PVR fence may still exist if something + * else still references the underlying fence, e.g. a reservation object, or if + * software signalling has been enabled and the fence hasn't yet been signalled. + */ +void +pvr_fence_destroy(struct pvr_fence *pvr_fence) +{ + PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n", + pvr_fence->name); + + dma_fence_put(&pvr_fence->base); +} + +/** + * pvr_fence_sw_signal - signals a PVR fence sync + * @pvr_fence: PVR fence to signal + * + * Sets the PVR fence sync value to signalled. + * + * Returns -EINVAL if the PVR fence represents a foreign fence. + */ +int +pvr_fence_sw_signal(struct pvr_fence *pvr_fence) +{ +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + unsigned long flags; +#endif + + if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) + return -EINVAL; + + pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + dma_fence_put(pvr_fence->fence); +#endif + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags); + hash_del(&pvr_fence->ufo_lookup); + spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags); +#endif + + queue_work(pvr_fence->fctx->fence_wq, + &pvr_fence->fctx->check_status_work); + + PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n", + pvr_fence->name); + + return 0; +} + +/** + * pvr_fence_sw_error - errors the sync checkpoint backing a PVR fence + * @pvr_fence: PVR fence to error + * + * Sets the PVR fence sync checkpoint value to errored. + * + * Returns -EINVAL if the PVR fence represents a foreign fence. + */ +int +pvr_fence_sw_error(struct pvr_fence *pvr_fence) +{ + if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) + return -EINVAL; + + SyncCheckpointError(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_NONE); + PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync errored (%s)\n", + pvr_fence->name); + + return 0; +} + +int +pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, + struct _SYNC_CHECKPOINT **fence_checkpoints) +{ + struct _SYNC_CHECKPOINT **next_fence_checkpoint = fence_checkpoints; + struct pvr_fence **next_pvr_fence = pvr_fences; + int fence_checkpoint_idx; + + if (nr_fences > 0) { + + for (fence_checkpoint_idx = 0; fence_checkpoint_idx < nr_fences; + fence_checkpoint_idx++) { + struct pvr_fence *next_fence = *next_pvr_fence++; + *next_fence_checkpoint++ = next_fence->sync_checkpoint; + /* Take reference on sync checkpoint (will be dropped + * later by kick code) + */ + SyncCheckpointTakeRef(next_fence->sync_checkpoint); + } + } + + return 0; +} + +struct _SYNC_CHECKPOINT * +pvr_fence_get_checkpoint(struct pvr_fence *update_fence) +{ + return update_fence->sync_checkpoint; +} + +/** + * pvr_fence_dump_info_on_stalled_ufos - displays debug + * information on a native fence associated with any of + * the ufos provided. This function will be called from + * pvr_sync_file.c if the driver determines any GPU work + * is stuck waiting for a sync checkpoint representing a + * foreign sync to be signalled. + * @nr_ufos: number of ufos in vaddrs + * @vaddrs: array of FW addresses of UFOs which the + * driver is waiting on. + * + * Output debug information to kernel log on linux fences + * which would be responsible for signalling the sync + * checkpoints indicated by the ufo vaddresses. + * + * Returns the number of ufos in the array which were found + * to be associated with foreign syncs. + */ +u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, + u32 nr_ufos, u32 *vaddrs) +{ + int our_ufo_ct = 0; + struct pvr_fence *pvr_fence; + unsigned long flags; + + spin_lock_irqsave(&fctx->list_lock, flags); + /* dump info on any ufos in our active list */ + list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { + u32 *this_ufo_vaddr = vaddrs; + int ufo_num; + DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL; + + for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++) { + struct _SYNC_CHECKPOINT *checkpoint = + pvr_fence->sync_checkpoint; + const u32 fence_ufo_addr = + SyncCheckpointGetFirmwareAddr(checkpoint); + + if (fence_ufo_addr != this_ufo_vaddr[ufo_num]) + continue; + + /* Dump sync info */ + PVR_DUMPDEBUG_LOG(pfnDummy, NULL, + "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)", + SyncCheckpointGetId(checkpoint), + fence_ufo_addr, + SyncCheckpointGetTimeline(checkpoint), + pvr_fence->fence, + pvr_fence->name); + our_ufo_ct++; + } + } + spin_unlock_irqrestore(&fctx->list_lock, flags); + return our_ufo_ct; +} + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +enum tag_img_bool pvr_fence_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value) +{ + struct pvr_fence *pvr_fence = NULL; + unsigned long flags; + + spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags); + hash_for_each_possible(pvr_fence_ufo_lut, pvr_fence, + ufo_lookup, fwaddr) { + struct _SYNC_CHECKPOINT *checkpoint = + pvr_fence->sync_checkpoint; + + if (SyncCheckpointGetFirmwareAddr(checkpoint) == fwaddr) { + hash_del(&pvr_fence->ufo_lookup); + break; + } + } + spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags); + + if (!pvr_fence) + return IMG_FALSE; + + PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n", + pvr_fence->name); + + trace_pvr_fence_signal_fence(pvr_fence); + spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags); + list_del(&pvr_fence->signal_head); + spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags); + dma_fence_signal(pvr_fence->fence); + dma_fence_put(pvr_fence->fence); + + return IMG_TRUE; +} + +void +pvr_fence_check_state(void) +{ + int bkt; + unsigned long flags, flags2; + struct hlist_node *tmp1; + struct pvr_fence *pvr_fence, *tmp2; + LIST_HEAD(signal_list); + + /* + * Cannot call dma_fence_signal whilst holding spinlock, since + * dma_fence_signal will take fctx->lock and in + * pvr_fence_enable_signalling these are taken the other way around. + */ + spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags); + hash_for_each_safe(pvr_fence_ufo_lut, bkt, tmp1, pvr_fence, ufo_lookup) { + if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { + spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags2); + list_move_tail(&pvr_fence->signal_head, &signal_list); + spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags2); + hash_del(&pvr_fence->ufo_lookup); + } + } + spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags); + + list_for_each_entry_safe(pvr_fence, tmp2, &signal_list, signal_head) { + PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n", + pvr_fence->name); + + trace_pvr_fence_signal_fence(pvr_fence); + spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags); + list_del(&pvr_fence->signal_head); + spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags); + dma_fence_signal(pvr_fence->fence); + dma_fence_put(pvr_fence->fence); + } +} +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.h new file mode 100644 index 000000000000..55e72529d395 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence.h @@ -0,0 +1,244 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux fence interface +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_FENCE_H__) +#define __PVR_FENCE_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +static inline void pvr_fence_cleanup(void) +{ +} +#else +#include "services_kernel_client.h" +#include "pvr_linux_fence.h" +#include +#include +#include + +struct _SYNC_CHECKPOINT_CONTEXT; +struct _SYNC_CHECKPOINT; + +/** + * pvr_fence_context - PVR fence context used to create and manage PVR fences + * @lock: protects the context and fences created on the context + * @name: fence context name (used for debugging) + * @dbg_request_handle: handle for callback used to dump debug data + * @fence_context: fence context with which to associate fences + * @fence_seqno: sequence number to use for the next fence + * @fence_wq: work queue for signalled fence work + * @check_status_work: work item used to inform services when a foreign fence + * has signalled + * @cmd_complete_handle: handle for callback used to signal fences when fence + * syncs are met + * @list_lock: protects the active and active foreign lists + * @signal_list: list of fences waiting to be signalled + * @fence_list: list of fences (used for debugging) + * @deferred_free_list: list of fences that we will free when we are no longer + * holding spinlocks. The frees get implemented when an update fence is + * signalled or the context is freed. + */ +struct pvr_fence_context { + spinlock_t lock; + char name[32]; + void *dbg_request_handle; + u64 fence_context; + atomic_t fence_seqno; + + struct workqueue_struct *fence_wq; + struct work_struct check_status_work; + + void *cmd_complete_handle; + + spinlock_t list_lock; + struct list_head signal_list; + struct list_head fence_list; + struct list_head deferred_free_list; + + struct kref kref; + struct work_struct destroy_work; +}; + +/** + * pvr_fence - PVR fence that represents both native and foreign fences + * @base: fence structure + * @fctx: fence context on which this fence was created + * @name: fence name (used for debugging) + * @fence: pointer to base fence structure or foreign fence + * @sync_checkpoint: services sync checkpoint used by hardware + * @fence_head: entry on the context fence and deferred free list + * @signal_head: entry on the context signal list + * @cb: foreign fence callback to set the sync to signalled + */ +struct pvr_fence { + struct dma_fence base; + struct pvr_fence_context *fctx; + char name[32]; + + struct dma_fence *fence; + struct _SYNC_CHECKPOINT *sync_checkpoint; + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + struct hlist_node ufo_lookup; +#endif + + struct list_head fence_head; + struct list_head signal_head; + struct dma_fence_cb cb; + struct rcu_head rcu; +}; + +extern const struct dma_fence_ops pvr_fence_ops; +extern const struct dma_fence_ops pvr_fence_foreign_ops; + +static inline bool is_our_fence(struct pvr_fence_context *fctx, + struct dma_fence *fence) +{ + return (fence->context == fctx->fence_context); +} + +static inline bool is_pvr_fence(struct dma_fence *fence) +{ + return ((fence->ops == &pvr_fence_ops) || + (fence->ops == &pvr_fence_foreign_ops)); +} + +static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence) +{ + if (is_pvr_fence(fence)) + return container_of(fence, struct pvr_fence, base); + + return NULL; +} + +struct pvr_fence_context * +pvr_fence_context_create(void *dev_cookie, + struct workqueue_struct *fence_status_wq, + const char *name); +void pvr_fence_context_destroy(struct pvr_fence_context *fctx); +void pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size); + +struct pvr_fence * +pvr_fence_create(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + int timeline_fd, const char *name); +struct pvr_fence * +pvr_fence_create_from_fence(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + struct dma_fence *fence, + PVRSRV_FENCE fence_fd, + const char *name); +void pvr_fence_destroy(struct pvr_fence *pvr_fence); +int pvr_fence_sw_signal(struct pvr_fence *pvr_fence); +int pvr_fence_sw_error(struct pvr_fence *pvr_fence); + +int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, + struct _SYNC_CHECKPOINT **fence_checkpoints); +struct _SYNC_CHECKPOINT * +pvr_fence_get_checkpoint(struct pvr_fence *update_fence); + +void pvr_fence_context_signal_fences_nohw(void *data); + +void pvr_fence_context_free_deferred_callback(void *data); + +u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, + u32 nr_ufos, + u32 *vaddrs); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +enum tag_img_bool pvr_fence_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value); + +void pvr_fence_check_state(void); +#endif + +static inline void pvr_fence_cleanup(void) +{ + /* + * Ensure all PVR fence contexts have been destroyed, by flushing + * the global workqueue. + */ + flush_scheduled_work(); +} + +#if defined(PVR_FENCE_DEBUG) +#define PVR_FENCE_CTX_TRACE(c, fmt, ...) \ + do { \ + struct pvr_fence_context *__fctx = (c); \ + pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ + ## __VA_ARGS__); \ + } while (0) +#else +#define PVR_FENCE_CTX_TRACE(c, fmt, ...) +#endif + +#define PVR_FENCE_CTX_WARN(c, fmt, ...) \ + do { \ + struct pvr_fence_context *__fctx = (c); \ + pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ + ## __VA_ARGS__); \ + } while (0) + +#define PVR_FENCE_CTX_ERR(c, fmt, ...) \ + do { \ + struct pvr_fence_context *__fctx = (c); \ + pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ + ## __VA_ARGS__); \ + } while (0) + +#if defined(PVR_FENCE_DEBUG) +#define PVR_FENCE_TRACE(f, fmt, ...) \ + DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) +#else +#define PVR_FENCE_TRACE(f, fmt, ...) +#endif + +#define PVR_FENCE_WARN(f, fmt, ...) \ + DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__) + +#define PVR_FENCE_ERR(f, fmt, ...) \ + DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ +#endif /* !defined(__PVR_FENCE_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence_trace.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence_trace.h new file mode 100644 index 000000000000..50623c2b6b99 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_fence_trace.h @@ -0,0 +1,226 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pvr_fence + +#if !defined(_TRACE_PVR_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PVR_FENCE_H + +#include + +struct pvr_fence; +struct pvr_fence_context; + +DECLARE_EVENT_CLASS(pvr_fence_context, + + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx), + + TP_STRUCT__entry( + __string(name, fctx->name) + __array(char, val, 128) + ), + + TP_fast_assign( + __assign_str(name, fctx->name) + pvr_context_value_str(fctx, __entry->val, + sizeof(__entry->val)); + ), + + TP_printk("name=%s val=%s", + __get_str(name), + __entry->val + ) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_create, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy_kref, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_signal_fences, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DECLARE_EVENT_CLASS(pvr_fence, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence), + + TP_STRUCT__entry( + __string(driver, + fence->base.ops->get_driver_name(&fence->base)) + __string(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + __array(char, val, 128) + __field(u64, context) + ), + + TP_fast_assign( + __assign_str(driver, + fence->base.ops->get_driver_name(&fence->base)) + __assign_str(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + fence->base.ops->fence_value_str(&fence->base, + __entry->val, sizeof(__entry->val)); + __entry->context = fence->base.context; + ), + + TP_printk("driver=%s timeline=%s ctx=%llu val=%s", + __get_str(driver), __get_str(timeline), + __entry->context, __entry->val + ) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_create, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_release, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_enable_signaling, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_signal_fence, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DECLARE_EVENT_CLASS(pvr_fence_foreign, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence), + + TP_STRUCT__entry( + __string(driver, + fence->base.ops->get_driver_name(&fence->base)) + __string(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + __array(char, val, 128) + __field(u64, context) + __string(foreign_driver, + fence->fence->ops->get_driver_name ? + fence->fence->ops->get_driver_name(fence->fence) : + "unknown") + __string(foreign_timeline, + fence->fence->ops->get_timeline_name ? + fence->fence->ops->get_timeline_name(fence->fence) : + "unknown") + __array(char, foreign_val, 128) + __field(u64, foreign_context) + ), + + TP_fast_assign( + __assign_str(driver, + fence->base.ops->get_driver_name(&fence->base)) + __assign_str(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + fence->base.ops->fence_value_str(&fence->base, __entry->val, + sizeof(__entry->val)); + __entry->context = fence->base.context; + __assign_str(foreign_driver, + fence->fence->ops->get_driver_name ? + fence->fence->ops->get_driver_name(fence->fence) : + "unknown") + __assign_str(foreign_timeline, + fence->fence->ops->get_timeline_name ? + fence->fence->ops->get_timeline_name(fence->fence) : + "unknown") + fence->fence->ops->fence_value_str ? + fence->fence->ops->fence_value_str( + fence->fence, __entry->foreign_val, + sizeof(__entry->foreign_val)) : + (void) strlcpy(__entry->foreign_val, + "unknown", sizeof(__entry->foreign_val)); + __entry->foreign_context = fence->fence->context; + ), + + TP_printk("driver=%s timeline=%s ctx=%llu val=%s foreign: driver=%s timeline=%s ctx=%llu val=%s", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->val, __get_str(foreign_driver), + __get_str(foreign_timeline), __entry->foreign_context, + __entry->foreign_val + ) +); + +DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_create, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_release, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_signal, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +#endif /* _TRACE_PVR_FENCE_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . + +/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ +#define TRACE_INCLUDE_FILE pvr_fence_trace + +/* This part must be outside protection */ +#include diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_linux_fence.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_linux_fence.h new file mode 100644 index 000000000000..5cff7aa504a5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_linux_fence.h @@ -0,0 +1,104 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux fence compatibility header +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_LINUX_FENCE_H__) +#define __PVR_LINUX_FENCE_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ + !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) +#include +#else +#include +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ + !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) +/* Structures */ +#define dma_fence fence +#define dma_fence_array fence_array +#define dma_fence_cb fence_cb +#define dma_fence_ops fence_ops + +/* Defines and Enums */ +#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT +#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT +#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS + +#define DMA_FENCE_ERR FENCE_ERR +#define DMA_FENCE_TRACE FENCE_TRACE +#define DMA_FENCE_WARN FENCE_WARN + +/* Functions */ +#define dma_fence_add_callback fence_add_callback +#define dma_fence_context_alloc fence_context_alloc +#define dma_fence_default_wait fence_default_wait +#define dma_fence_is_signaled fence_is_signaled +#define dma_fence_enable_sw_signaling fence_enable_sw_signaling +#define dma_fence_free fence_free +#define dma_fence_get fence_get +#define dma_fence_get_rcu fence_get_rcu +#define dma_fence_init fence_init +#define dma_fence_is_array fence_is_array +#define dma_fence_put fence_put +#define dma_fence_signal fence_signal +#define dma_fence_wait fence_wait +#define to_dma_fence_array to_fence_array + +static inline signed long +dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) +{ + signed long lret; + + lret = fence_wait_timeout(fence, intr, timeout); + if (lret || timeout) + return lret; + + return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1 : 0; +} + +#endif + +#endif /* !defined(__PVR_LINUX_FENCE_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_platform_drv.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_platform_drv.c new file mode 100644 index 000000000000..b2ca31a7a9aa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_platform_drv.c @@ -0,0 +1,326 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PowerVR DRM platform driver +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#include +#include +#include +#else +#include +#endif + +#include +#include + +#include "module_common.h" +#include "pvr_drv.h" +#include "pvrmodule.h" +#include "sysinfo.h" + +/* This header must always be included last */ +#include "kernel_compatibility.h" + +static struct drm_driver pvr_drm_platform_driver; + +#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) +/* + * This is an arbitrary value. If it's changed then the 'num_devices' module + * parameter description should also be updated to match. + */ +#define MAX_DEVICES 16 + +static unsigned int pvr_num_devices = 1; +static struct platform_device **pvr_devices; + +#if defined(NO_HARDWARE) +static int pvr_num_devices_set(const char *val, + const struct kernel_param *param) +{ + int err; + + err = param_set_uint(val, param); + if (err) + return err; + + if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES) + return -EINVAL; + + return 0; +} + +static const struct kernel_param_ops pvr_num_devices_ops = { + .set = pvr_num_devices_set, + .get = param_get_uint, +}; + +module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices, 0444); +MODULE_PARM_DESC(num_devices, + "Number of platform devices to register (default: 1 - max: 16)"); +#endif /* defined(NO_HARDWARE) */ +#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ + +static int pvr_devices_register(void) +{ +#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) + struct platform_device_info pvr_dev_info = { + .name = SYS_RGX_DEV_NAME, + .id = -2, +#if defined(NO_HARDWARE) + /* Not all cores have 40 bit physical support, but this + * will work unless > 32 bit address is returned on those cores. + * In the future this will be fixed more correctly. + */ + .dma_mask = DMA_BIT_MASK(40), +#else + .dma_mask = DMA_BIT_MASK(32), +#endif + }; + unsigned int i; + + BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES); + + pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices), + GFP_KERNEL); + if (!pvr_devices) + return -ENOMEM; + + for (i = 0; i < pvr_num_devices; i++) { + pvr_devices[i] = platform_device_register_full(&pvr_dev_info); + if (IS_ERR(pvr_devices[i])) { + DRM_ERROR("unable to register device %u (err=%ld)\n", + i, PTR_ERR(pvr_devices[i])); + pvr_devices[i] = NULL; + return -ENODEV; + } + } +#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ + + return 0; +} + +static void pvr_devices_unregister(void) +{ +#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) + unsigned int i; + + BUG_ON(!pvr_devices); + + for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++) + platform_device_unregister(pvr_devices[i]); + + kfree(pvr_devices); + pvr_devices = NULL; +#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ +} + +static int pvr_probe(struct platform_device *pdev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + struct drm_device *ddev; + int ret; + + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + + ddev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + if (IS_ERR(ddev)) + return PTR_ERR(ddev); +#else + if (!ddev) + return -ENOMEM; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + /* Needed by drm_platform_set_busid */ + ddev->platformdev = pdev; +#endif + + /* + * The load callback, called from drm_dev_register, is deprecated, + * because of potential race conditions. Calling the function here, + * before calling drm_dev_register, avoids those potential races. + */ + BUG_ON(pvr_drm_platform_driver.load != NULL); + ret = pvr_drm_load(ddev, 0); + if (ret) + goto err_drm_dev_put; + + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_drm_dev_unload; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + pvr_drm_platform_driver.name, + pvr_drm_platform_driver.major, + pvr_drm_platform_driver.minor, + pvr_drm_platform_driver.patchlevel, + pvr_drm_platform_driver.date, + ddev->primary->index); +#endif + return 0; + +err_drm_dev_unload: + pvr_drm_unload(ddev); +err_drm_dev_put: + drm_dev_put(ddev); + return ret; +#else + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + + return drm_platform_init(&pvr_drm_platform_driver, pdev); +#endif +} + +static int pvr_remove(struct platform_device *pdev) +{ + struct drm_device *ddev = platform_get_drvdata(pdev); + + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + drm_dev_unregister(ddev); + + /* The unload callback, called from drm_dev_unregister, is + * deprecated. Call the unload function directly. + */ + BUG_ON(pvr_drm_platform_driver.unload != NULL); + pvr_drm_unload(ddev); + + drm_dev_put(ddev); +#else + drm_put_dev(ddev); +#endif + return 0; +} + +static void pvr_shutdown(struct platform_device *pdev) +{ + struct drm_device *ddev = platform_get_drvdata(pdev); + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + + PVRSRVDeviceShutdown(priv->dev_node); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) +static struct of_device_id pvr_of_ids[] = { +#if defined(SYS_RGX_OF_COMPATIBLE) + { .compatible = SYS_RGX_OF_COMPATIBLE, }, +#endif + {}, +}; + +#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) +MODULE_DEVICE_TABLE(of, pvr_of_ids); +#endif +#endif + +static struct platform_device_id pvr_platform_ids[] = { +#if defined(SYS_RGX_DEV_NAME) + { SYS_RGX_DEV_NAME, 0 }, +#endif + { } +}; + +#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) +MODULE_DEVICE_TABLE(platform, pvr_platform_ids); +#endif + +static struct platform_driver pvr_platform_driver = { + .driver = { + .name = DRVNAME, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + .of_match_table = of_match_ptr(pvr_of_ids), +#endif + .pm = &pvr_pm_ops, + }, + .id_table = pvr_platform_ids, + .probe = pvr_probe, + .remove = pvr_remove, + .shutdown = pvr_shutdown, +}; + +static int __init pvr_init(void) +{ + int err; + + DRM_DEBUG_DRIVER("\n"); + + pvr_drm_platform_driver = pvr_drm_generic_driver; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + pvr_drm_platform_driver.set_busid = drm_platform_set_busid; +#endif + + err = PVRSRVDriverInit(); + if (err) + return err; + + err = platform_driver_register(&pvr_platform_driver); + if (err) + return err; + + return pvr_devices_register(); +} + +static void __exit pvr_exit(void) +{ + DRM_DEBUG_DRIVER("\n"); + + pvr_devices_unregister(); + platform_driver_unregister(&pvr_platform_driver); + PVRSRVDriverDeinit(); + + DRM_DEBUG_DRIVER("done\n"); +} + +late_initcall(pvr_init); +module_exit(pvr_exit); diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.c new file mode 100644 index 000000000000..e8f643072a5c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.c @@ -0,0 +1,200 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include "pvr_sw_fence.h" + +struct pvr_sw_fence_context { + struct kref kref; + unsigned int context; + char context_name[32]; + char driver_name[32]; + atomic_t seqno; + atomic_t fence_count; +}; + +struct pvr_sw_fence { + struct dma_fence base; + struct pvr_sw_fence_context *fence_context; + spinlock_t lock; +}; + +#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base) + +const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx) +{ + return fctx->context_name; +} + +void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, + char *str, int size) +{ + snprintf(str, size, "%d", atomic_read(&fctx->seqno)); +} + +static inline unsigned +pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context) +{ + return atomic_inc_return(&fence_context->seqno) - 1; +} + +static const char *pvr_sw_fence_get_driver_name(struct dma_fence *fence) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + return pvr_sw_fence->fence_context->driver_name; +} + +static const char *pvr_sw_fence_get_timeline_name(struct dma_fence *fence) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + return pvr_sw_fence_context_name(pvr_sw_fence->fence_context); +} + +static void pvr_sw_fence_value_str(struct dma_fence *fence, char *str, int size) +{ + snprintf(str, size, "%llu", (u64) fence->seqno); +} + +static void pvr_sw_fence_timeline_value_str(struct dma_fence *fence, + char *str, int size) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + pvr_sw_fence_context_value_str(pvr_sw_fence->fence_context, str, size); +} + +static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void pvr_sw_fence_context_destroy_kref(struct kref *kref) +{ + struct pvr_sw_fence_context *fence_context = + container_of(kref, struct pvr_sw_fence_context, kref); + unsigned int fence_count; + + fence_count = atomic_read(&fence_context->fence_count); + if (WARN_ON(fence_count)) + pr_debug("%s context has %u fence(s) remaining\n", + fence_context->context_name, fence_count); + + kfree(fence_context); +} + +static void pvr_sw_fence_release(struct dma_fence *fence) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + atomic_dec(&pvr_sw_fence->fence_context->fence_count); + kref_put(&pvr_sw_fence->fence_context->kref, + pvr_sw_fence_context_destroy_kref); + kfree(pvr_sw_fence); +} + +static const struct dma_fence_ops pvr_sw_fence_ops = { + .get_driver_name = pvr_sw_fence_get_driver_name, + .get_timeline_name = pvr_sw_fence_get_timeline_name, + .fence_value_str = pvr_sw_fence_value_str, + .timeline_value_str = pvr_sw_fence_timeline_value_str, + .enable_signaling = pvr_sw_fence_enable_signaling, + .wait = dma_fence_default_wait, + .release = pvr_sw_fence_release, +}; + +struct pvr_sw_fence_context * +pvr_sw_fence_context_create(const char *context_name, const char *driver_name) +{ + struct pvr_sw_fence_context *fence_context; + + fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL); + if (!fence_context) + return NULL; + + fence_context->context = dma_fence_context_alloc(1); + strlcpy(fence_context->context_name, context_name, + sizeof(fence_context->context_name)); + strlcpy(fence_context->driver_name, driver_name, + sizeof(fence_context->driver_name)); + atomic_set(&fence_context->seqno, 0); + atomic_set(&fence_context->fence_count, 0); + kref_init(&fence_context->kref); + + return fence_context; +} + +void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context) +{ + kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref); +} + +struct dma_fence * +pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context) +{ + struct pvr_sw_fence *pvr_sw_fence; + unsigned int seqno; + + pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL); + if (!pvr_sw_fence) + return NULL; + + spin_lock_init(&pvr_sw_fence->lock); + pvr_sw_fence->fence_context = fence_context; + + seqno = pvr_sw_fence_context_seqno_next(fence_context); + dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops, + &pvr_sw_fence->lock, fence_context->context, seqno); + + atomic_inc(&fence_context->fence_count); + kref_get(&fence_context->kref); + + return &pvr_sw_fence->base; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.h new file mode 100644 index 000000000000..4c7d1f325834 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sw_fence.h @@ -0,0 +1,61 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_SW_FENCES_H__) +#define __PVR_SW_FENCES_H__ + +#include "pvr_linux_fence.h" + +struct pvr_sw_fence_context; + +struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name, + const char *driver_name); +void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context); +struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context * + fence_context); + +const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx); +void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, + char *str, int size); + +#endif /* !defined(__PVR_SW_FENCES_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync.h new file mode 100644 index 000000000000..e1c57d4e00f1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync.h @@ -0,0 +1,101 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File pvr_sync.h +@Title Kernel driver for Android's sync mechanism +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVR_SYNC_H +#define _PVR_SYNC_H + +#include + +#include "pvr_fd_sync_kernel.h" +#include "services_kernel_client.h" + + +/* Services internal interface */ + +/** + * pvr_sync_init() - create an internal sync context + * @dev: Linux device + * + * Return: PVRSRV_OK on success. + */ +enum PVRSRV_ERROR pvr_sync_init(struct device *dev); + + +/** + * pvr_sync_deinit() - destroy an internal sync context + * + * Drains any work items with outstanding sync fence updates/dependencies. + */ +void pvr_sync_deinit(void); + +enum PVRSRV_ERROR pvr_sync_fence_wait(void *fence, u32 timeout_in_ms); + +enum PVRSRV_ERROR pvr_sync_fence_release(void *fence); + +enum PVRSRV_ERROR pvr_sync_fence_get(int fence_fd, void **fence_out); + +enum PVRSRV_ERROR pvr_sync_sw_timeline_fence_create(int timeline_fd, + const char *fence_name, + int *fence_fd_out, + u64 *sync_pt_idx); + +enum PVRSRV_ERROR pvr_sync_sw_timeline_advance(void *timeline, + u64 *sync_pt_idx); + +enum PVRSRV_ERROR pvr_sync_sw_timeline_release(void *timeline); + +enum PVRSRV_ERROR pvr_sync_sw_timeline_get(int timeline_fd, + void **timeline_out); + +enum PVRSRV_ERROR +sync_dump_fence(void *sw_fence_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file); + +enum PVRSRV_ERROR +sync_sw_dump_timeline(void *sw_timeline_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file); + +#endif /* _PVR_SYNC_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync2.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync2.c new file mode 100644 index 000000000000..88ee4a2ee980 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync2.c @@ -0,0 +1,2812 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File pvr_sync.c +@Title Kernel driver for Android's sync mechanism +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_drv.h" +#include "pvr_fd_sync_kernel.h" +#include "services_kernel_client.h" +#include "pvr_sync.h" +#include "pvrsrv_sync_km.h" +#include "sync_checkpoint_external.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#include +#include +#else +#include <../drivers/staging/android/sync.h> +#include <../drivers/staging/android/sw_sync.h> +#endif + +#include "linux_sw_sync.h" + +#include "kernel_compatibility.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) + +static inline int sync_fence_get_status(struct sync_fence *psFence) +{ + return psFence->status; +} + +static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) +{ + return pt->parent; +} + +static inline int sync_pt_get_status(struct sync_pt *pt) +{ + return pt->status; +} + +static inline ktime_t sync_pt_get_timestamp(struct sync_pt *pt) +{ + return pt->timestamp; +} + +#define for_each_sync_pt(s, f, c) \ + list_for_each_entry((s), &(f)->pt_list_head, pt_list) + +#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ + +static inline int sync_fence_get_status(struct sync_fence *psFence) +{ + int iStatus = atomic_read(&psFence->status); + + /* + * When Android sync was rebased on top of fences the sync_fence status + * values changed from 0 meaning 'active' to 'signalled' and, likewise, + * values greater than 0 went from meaning 'signalled' to 'active' + * (where the value corresponds to the number of active sync points). + * + * Convert to the old style status values. + */ + return iStatus > 0 ? 0 : iStatus ? iStatus : 1; +} + +static inline int sync_pt_get_status(struct sync_pt *pt) +{ + /* No error state for raw dma-buf fences */ + return fence_is_signaled(&pt->base) ? 1 : 0; +} + +static inline ktime_t sync_pt_get_timestamp(struct sync_pt *pt) +{ + return pt->base.timestamp; +} + +#define for_each_sync_pt(s, f, c) \ + for ((c) = 0, (s) = (f)->num_fences == 0 ? \ + NULL : (struct sync_pt *)(f)->cbs[0].sync_pt; \ + (c) < (f)->num_fences; \ + (c)++, (s) = (c) < (f)->num_fences ? \ + (struct sync_pt *)(f)->cbs[c].sync_pt : NULL) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ + +/* #define DEBUG_OUTPUT 1 */ + +#ifdef DEBUG_OUTPUT +#define DPF(fmt, ...) pr_err("pvr_sync2: " fmt "\n", __VA_ARGS__) +#else +#define DPF(fmt, ...) do {} while (0) +#endif + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ...) \ + do { \ + if (pfnDumpDebugPrintf) { \ + pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \ + } else { \ + pr_info("pvr_sync2: " __VA_ARGS__); \ + } \ + } while (0) + +#if defined(PDUMP) +#define SYNC_MAX_POOL_SIZE 0 +#else +#define SYNC_MAX_POOL_SIZE 10 +#endif + +enum { + SYNC_TL_TYPE = 0, + SYNC_PT_FENCE_TYPE = 1, + SYNC_PT_CLEANUP_TYPE = 2, + SYNC_PT_FOREIGN_FENCE_TYPE = 3, + SYNC_PT_FOREIGN_CLEANUP_TYPE = 4, +}; + +/* Services client sync prim wrapper. This is used to hold debug information + * and make it possible to cache unused syncs. + */ +struct pvr_sync_native_sync_prim { + /* List for the sync pool support. */ + struct list_head list; + + /* Base services sync prim structure */ + struct PVRSRV_CLIENT_SYNC_PRIM *client_sync; + + /* The next queued value which should be used */ + u32 next_value; + + /* Every sync data will get some unique id */ + u32 id; + + /* FWAddr used by the client sync */ + u32 vaddr; + + /* The type this sync is used for in our driver. Used in + * pvr_sync_debug_request(). + */ + u8 type; + + /* A debug class name also printed in pvr_sync_debug_request(). */ + char class[32]; +}; + +struct pvr_sync_native_sync_checkpoint { + /* List for the sync pool support. */ + struct list_head list; + + /* Base services sync checkpoint */ + PSYNC_CHECKPOINT client_sync_checkpoint; + + /* Every sync data will get some unique id */ + u32 id; + + /* FWAddr used by the client sync */ + u32 vaddr; + + /* The type this sync is used for in our driver. Used in + * pvr_sync_debug_request(). + */ + u8 type; + + /* A debug class name also printed in pvr_sync_debug_request(). */ + char class[32]; + + /* We store the foreign sync fence (if applicable), for debug purposes. */ + struct sync_fence *foreign_sync_fence; + char foreign_sync_fence_name[32]; +}; + +struct pvr_sw_sync_timeline { + /* sw_sync_timeline must come first to allow casting of a ptr */ + /* to the wrapping struct to a ptr to the sw_sync_timeline */ + struct sw_sync_timeline *sw_sync_timeline; + u64 current_value; + u64 next_value; + /* Reference count for this object */ + struct kref kref; +}; + +/* This is the actual timeline metadata. We might keep this around after the + * base sync driver has destroyed the pvr_sync_timeline_wrapper object. + */ +struct pvr_sync_timeline { + /* Back reference to the sync_timeline. Not always valid */ + struct sync_timeline *obj; + + /* Global timeline list support */ + struct list_head list; + + /* List of sync points alive on this timeline. */ + struct list_head sync_list; + + /* Timeline sync */ + struct pvr_sync_timeline_kernel_pair *kernel; + + /* Reference count for this object */ + struct kref kref; + + /* Used only by pvr_sync_update_all_timelines(). False if the timeline + * has been detected as racing with pvr_sync_destroy_timeline(). + */ + bool valid; +}; + +/* This is the IMG extension of a sync_timeline */ +struct pvr_sync_timeline_wrapper { + /* Original timeline struct. Needs to come first. */ + struct sync_timeline obj; + + /* Pointer to extra timeline data. Separated life-cycle. */ + struct pvr_sync_timeline *timeline; +}; + +struct pvr_sync_timeline_kernel_pair { + /* Binary sync point representing the android native sync in hw. */ + struct pvr_sync_native_sync_prim *fence_sync; + + /* Sync points can go away when there are deferred hardware operations + * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until + * the hardware is finished, so we add it to a defer list which is + * processed periodically ("defer-free"). + * + * Note that the defer-free list is global, not per-timeline. + */ + struct list_head list; +}; + +struct pvr_sync_kernel_pair { + /* Binary sync point representing the android native sync in hw. */ + struct pvr_sync_native_sync_checkpoint *fence_sync; + + /* Sync points can go away when there are deferred hardware operations + * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until + * the hardware is finished, so we add it to a defer list which is + * processed periodically ("defer-free"). + * + * Note that the defer-free list is global, not per-timeline. + */ + struct list_head list; +}; + +struct pvr_sync_data { + /* Every sync point has a services sync object. This object is used + * by the hardware to enforce ordering -- it is attached as a source + * dependency to various commands. + */ + struct pvr_sync_kernel_pair *kernel; + + /* The timeline update value for this sync point. */ + u32 timeline_update_value; + + /* This refcount is incremented at create and dup time, and decremented + * at free time. It ensures the object doesn't start the defer-free + * process until it is no longer referenced. + */ + struct kref kref; +}; + +/* This is the IMG extension of a sync_pt */ +struct pvr_sync_pt { + /* Original sync_pt structure. Needs to come first. */ + struct sync_pt pt; + + /* Private shared data */ + struct pvr_sync_data *sync_data; + + /* The timeline on which this pvr_sync_pt was created */ + struct pvr_sync_timeline *timeline; +}; + +/* This is the IMG extension of a sync_fence */ +struct pvr_sync_fence { + /* Original sync_fence structure. Needs to come first. */ + struct sync_fence *fence; + + /* To ensure callbacks are always received for fences / sync_pts, even + * after the fence has been 'put' (freed), we must take a reference to + * the fence. We still need to 'put' the fence ourselves, but this might + * happen in irq context, where fput() is not allowed (in kernels <3.6). + * We must add the fence to a list which is processed in WQ context. + */ + struct list_head list; +}; + +/* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow" + * sync prim. This is modelled as a software operation. The foreign driver + * completes the operation by calling a callback we registered with it. + */ +struct pvr_sync_fence_waiter { + /* Base sync driver waiter structure */ + struct sync_fence_waiter waiter; + + /* "Shadow" sync prim backing the foreign driver's sync_pt */ + struct pvr_sync_kernel_pair *kernel; + + /* Optimizes lookup of fence for defer-put operation */ + struct pvr_sync_fence *sync_fence; +}; + +/* Global data for the sync driver */ +static struct { + /* Complete notify handle */ + void *command_complete_handle; + + /* Defer-free workqueue. Syncs may still be in use by the HW when freed, + * so we have to keep them around until the HW is done with them at + * some later time. This workqueue iterates over the list of free'd + * syncs, checks if they are in use, and frees the sync device memory + * when done with. + */ + struct workqueue_struct *defer_free_wq; + struct work_struct defer_free_work; + + /* check_status workqueue: When a foreign point is completed, a SW + * operation marks the sync as completed to allow the operations to + * continue. This completion may require the hardware to be notified, + * which may be expensive/take locks, so we push that to a workqueue + */ + struct workqueue_struct *check_status_wq; + struct work_struct check_status_work; + + /* Context used to create client sync prims. */ + struct SYNC_PRIM_CONTEXT *sync_prim_context; + + /* Debug notify handle */ + void *debug_notify_handle; + + /* Unique id counter for the sync prims */ + atomic_t sync_id; + + /* The global event object (used to wait between checks for + * deferred-free sync status). + */ + void *event_object_handle; + + /* struct used to register with sync_checkpoint.c */ + PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops; +} pvr_sync_data; + +/* List of timelines created by this driver */ +static LIST_HEAD(timeline_list); +static DEFINE_SPINLOCK(timeline_list_lock); + +/* Sync pool support */ +static LIST_HEAD(sync_pool_free_list); +static LIST_HEAD(sync_pool_active_list); +static DEFINE_MUTEX(sync_pool_mutex); +static s32 sync_pool_size;// = 0; +static u32 sync_pool_created;// = 0; +static u32 sync_pool_reused;// = 0; + +/* pvr_sync_pt_active_list is used for debug - when a + * pvr sync_native_sync_checkpoint is created it is added + * to this list (which contains all existing points for + * all pvr timelines). + */ +static LIST_HEAD(pvr_sync_pt_active_list); +static DEFINE_SPINLOCK(pvr_sync_pt_active_list_spinlock); +/* pvr_sw_sync_pt_active_list is used for debug - when a + * pvr sw_sync_native_sync_checkpoint is created it is added + * to this list (which contains all existing points for + * all pvr sw timelines). + */ +static LIST_HEAD(pvr_sw_sync_pt_active_list); +static DEFINE_MUTEX(pvr_sw_sync_pt_active_list_mutex); + +/* The "defer-free" sync_checkpoint list. Driver global. */ +static LIST_HEAD(sync_checkpoint_free_list); +static DEFINE_SPINLOCK(sync_checkpoint_free_list_spinlock); + +/* The "defer-free-timeline" object list. Driver global. */ +static LIST_HEAD(timeline_free_list); +static DEFINE_SPINLOCK(timeline_free_list_spinlock); + +/* The "defer-put" object list. Driver global. */ +static LIST_HEAD(sync_fence_put_list); +static DEFINE_SPINLOCK(sync_fence_put_list_spinlock); + +static void pvr_sync_update_all_timelines(void *command_complete_handle); +static void pvr_sync_free_checkpoint_list_mem(void *mem_ptr); + +static void _dump_fence(struct sync_fence *fence, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 0)) + struct sync_pt *sync_point; + char time_str[16] = { '\0' }; + char pt_value_str[64] = { '\0' }; + char timeline_value_str[64] = { '\0' }; + char value_str[132] = { '\0' }; + int status = sync_fence_get_status(fence); + int i; + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "[%p] %s: %s ref=%u Sync Points:\n", + fence, + fence->name, + (status > 0 ? + "Signalled" : status ? + "Error" : "Active"), + atomic_read(&fence->kref.refcount)); + + for_each_sync_pt(sync_point, fence, i) { + + struct sync_timeline *timeline = sync_pt_parent(sync_point); + ktime_t timestamp = sync_pt_get_timestamp(sync_point); + struct timeval tv = ktime_to_timeval(timestamp); + int i_pt_status = sync_pt_get_status(sync_point); + + char time_pt[16] = { '\0' }; + const struct fence_ops *fence_ops = sync_point->base.ops; + + snprintf(time_str, + sizeof(time_str), + "@%ld.%06ld", + tv.tv_sec, + tv.tv_usec); + + if (timeline->ops->pt_value_str && + timeline->ops->timeline_value_str) { + timeline->ops->pt_value_str(sync_point, + pt_value_str, + sizeof(pt_value_str)); + timeline->ops->timeline_value_str(timeline, + timeline_value_str, + sizeof(timeline_value_str)); + snprintf(value_str, + sizeof(value_str), + "%s / %s", + timeline_value_str, + pt_value_str); + } + fence_ops->timeline_value_str(&sync_point->base, + time_pt, + sizeof(time_pt)); + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "\t@%u Ref=%u TS=%s State=%s %s TLN=%s\n", + sync_point->base.seqno, + atomic_read(&sync_point->base.refcount.refcount), + time_pt, + (i_pt_status > 0 ? + "signalled" : i_pt_status ? + "error" : "active"), + value_str, + fence_ops->get_timeline_name(&sync_point->base)); + } +#else + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "Fence stats not available on this platform!"); +#endif +} + +/* Sync prim helpers */ +static inline void set_sync_prim_value(struct pvr_sync_native_sync_prim *sync, + u32 value) +{ + *(sync->client_sync->pui32LinAddr) = value; +} + +static inline u32 get_sync_prim_value(struct pvr_sync_native_sync_prim *sync) +{ + return *(sync->client_sync->pui32LinAddr); +} + +static inline void complete_sync_prim(struct pvr_sync_native_sync_prim *sync) +{ + *(sync->client_sync->pui32LinAddr) = sync->next_value; +} + +static inline int is_sync_prim_met(struct pvr_sync_native_sync_prim *sync) +{ + return *(sync->client_sync->pui32LinAddr) == sync->next_value; +} + +/* Checkpoint helpers */ +static inline u32 get_sync_checkpoint_value(struct pvr_sync_native_sync_checkpoint *sync) +{ + PVRSRV_SYNC_CHECKPOINT_STATE checkpoint_state = PVRSRV_SYNC_CHECKPOINT_ACTIVE; + + if (SyncCheckpointIsSignalled(sync->client_sync_checkpoint, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + checkpoint_state = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + else if (SyncCheckpointIsErrored(sync->client_sync_checkpoint, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + checkpoint_state = PVRSRV_SYNC_CHECKPOINT_ERRORED; + + return (u32)checkpoint_state; +} + +static inline char get_sync_checkpoint_char(struct pvr_sync_native_sync_checkpoint *sync) +{ + char cState = 'A'; + + if (SyncCheckpointIsSignalled(sync->client_sync_checkpoint, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + cState = 'S'; + else if (SyncCheckpointIsErrored(sync->client_sync_checkpoint, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + cState = 'E'; + + return cState; +} + +static inline void error_sync_checkpoint(struct pvr_sync_native_sync_checkpoint *sync, u32 fence_sync_flags) +{ + SyncCheckpointError(sync->client_sync_checkpoint, fence_sync_flags); +} + +static inline void complete_sync_checkpoint(struct pvr_sync_native_sync_checkpoint *sync, u32 fence_sync_flags) +{ + SyncCheckpointSignal(sync->client_sync_checkpoint, fence_sync_flags); +} + +static inline int is_sync_checkpoint_met(struct pvr_sync_native_sync_checkpoint *sync, u32 fence_sync_flags) +{ + return (int)SyncCheckpointIsSignalled(sync->client_sync_checkpoint, fence_sync_flags); +} + +static inline int is_sync_checkpoint_errored(struct pvr_sync_native_sync_checkpoint *sync, u32 fence_sync_flags) +{ + return (int)SyncCheckpointIsErrored(sync->client_sync_checkpoint, fence_sync_flags); +} + +/* Timeline helpers */ +static inline struct pvr_sync_timeline *get_timeline(struct sync_timeline *obj) +{ + return ((struct pvr_sync_timeline_wrapper *)obj)->timeline; +} + +static inline struct pvr_sync_timeline *get_timeline_pt(struct sync_pt *pt) +{ + return get_timeline(sync_pt_parent(pt)); +} + +static inline int +pvr_sync_has_kernel_signaled(struct pvr_sync_kernel_pair *kernel, u32 fence_sync_flags) +{ + /* Idle syncs are always signaled */ + if (!kernel) + return 1; + + return is_sync_checkpoint_met(kernel->fence_sync, fence_sync_flags); +} + +#ifdef DEBUG_OUTPUT + +static char *debug_info_timeline(struct pvr_sync_timeline *timeline) +{ + static char info[256]; + + if (timeline->kernel->fence_sync) { + snprintf(info, sizeof(info), + "n='%s' id=%u fw=0x%x tl_curr=%u tl_next=%u", + timeline->obj ? timeline->obj->name : "?", + timeline->kernel->fence_sync->id, + timeline->kernel->fence_sync->vaddr, + get_sync_prim_value(timeline->kernel->fence_sync), + timeline->kernel->fence_sync->next_value); + } else { + snprintf(info, sizeof(info), + "n='%s' id=n/a fw=n/a tl_curr=n/a tl_next=n/a", + timeline->obj ? timeline->obj->name : "?"); + } + + return info; +} + +static char *debug_info_sync_pt(struct sync_pt *pt) +{ + //struct pvr_sync_timeline *timeline = get_timeline_pt(pt); + //struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)pt; + //struct pvr_sync_kernel_pair *kernel = pvr_pt->sync_data->kernel; + static char info[256], info1[256]; + +#if 1 + info[0] = '\0'; + info1[0] = '\0'; +#else + if (kernel) { + if (timeline->kernel->fence_sync) { + snprintf(info, sizeof(info), + "status=%d tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u%s # tl: %s", + pvr_sync_has_kernel_signaled(kernel, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT), + pvr_pt->sync_data->timeline_update_value, + atomic_read(&pvr_pt->sync_data->kref.refcount), + kernel->fence_sync->id, + kernel->fence_sync->vaddr, + get_sync_prim_value(timeline->kernel->fence_sync), + kernel->fence_sync->next_value, + info1, debug_info_timeline(timeline)); + } + } else { + snprintf(info, sizeof(info), + "status=%d tl_taken=%u ref=%d # sync: idle # tl: %s", + pvr_sync_has_kernel_signaled(kernel, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT), + pvr_pt->sync_data->timeline_update_value, + atomic_read(&pvr_pt->sync_data->kref.refcount), + debug_info_timeline(timeline)); + } +#endif + return info; +} + +#endif /* DEBUG_OUTPUT */ + +static u32 sync_pool_get_callers; +static enum PVRSRV_ERROR +sync_pool_get(struct pvr_sync_native_sync_prim **_sync, + const char *class_name, u8 type) +{ + struct pvr_sync_native_sync_prim *sync; + enum PVRSRV_ERROR error = PVRSRV_OK; + u32 sync_addr; + + mutex_lock(&sync_pool_mutex); + sync_pool_get_callers++; + + if (list_empty(&sync_pool_free_list)) { + /* If there is nothing in the pool, create a new sync prim. */ + sync = kmalloc(sizeof(*sync), + GFP_KERNEL); + if (!sync) { + pr_err("pvr_sync2: %s: Failed to allocate sync data\n", + __func__); + error = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_unlock; + } + + error = SyncPrimAlloc(pvr_sync_data.sync_prim_context, + &sync->client_sync, class_name); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to allocate sync prim (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_free; + } + + error = SyncPrimGetFirmwareAddr(sync->client_sync, &sync_addr); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to get FW address (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_sync_prim_free; + } + sync->vaddr = sync_addr; + + list_add_tail(&sync->list, &sync_pool_active_list); + ++sync_pool_created; + } else { + sync = list_first_entry(&sync_pool_free_list, + struct pvr_sync_native_sync_prim, list); + list_move_tail(&sync->list, &sync_pool_active_list); + --sync_pool_size; + ++sync_pool_reused; + } + + sync->id = atomic_inc_return(&pvr_sync_data.sync_id); + sync->type = type; + + strncpy(sync->class, class_name, sizeof(sync->class)); + sync->class[sizeof(sync->class) - 1] = '\0'; + /* It's crucial to reset the sync to zero */ + set_sync_prim_value(sync, 0); + sync->next_value = 0; + + *_sync = sync; + +err_unlock: + sync_pool_get_callers--; + mutex_unlock(&sync_pool_mutex); + return error; + +err_sync_prim_free: + SyncPrimFree(sync->client_sync); + +err_free: + kfree(sync); + goto err_unlock; +} + +static u32 sync_pool_put_callers; + +static void sync_pool_put(struct pvr_sync_native_sync_prim *sync) +{ + mutex_lock(&sync_pool_mutex); + sync_pool_put_callers++; + + if (sync_pool_size < SYNC_MAX_POOL_SIZE) { + /* Mark it as unused */ + set_sync_prim_value(sync, 0xffffffff); + + list_move(&sync->list, &sync_pool_free_list); + ++sync_pool_size; + } else { + /* Mark it as invalid */ + set_sync_prim_value(sync, 0xdeadbeef); + + list_del(&sync->list); + SyncPrimFree(sync->client_sync); + kfree(sync); + } + + sync_pool_put_callers--; + mutex_unlock(&sync_pool_mutex); +} + +static void sync_pool_clear(void) +{ + struct pvr_sync_native_sync_prim *sync, *n; + + mutex_lock(&sync_pool_mutex); + + list_for_each_entry_safe(sync, n, &sync_pool_free_list, list) { + /* Mark it as invalid */ + set_sync_prim_value(sync, 0xdeadbeef); + + list_del(&sync->list); + SyncPrimFree(sync->client_sync); + kfree(sync); + --sync_pool_size; + } + + mutex_unlock(&sync_pool_mutex); +} + +static void pvr_sync_debug_request(void *hDebugRequestHandle, + u32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + struct pvr_sync_timeline *tl; + struct pvr_sync_native_sync_checkpoint *sync; + unsigned long flags; + + static const char *const type_names[] = { + "Timeline", "Fence", "Cleanup", + "Foreign Fence", "Foreign Cleanup" + }; + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { + /* if timeline_list_lock and pvr_sync_pt_active_list_spinlock + * are acquired together timeline_list_lock must be always acquired + * first */ + spin_lock_irqsave(&timeline_list_lock, flags); + spin_lock(&pvr_sync_pt_active_list_spinlock); + + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "------[ Native Fence Sync: timelines ]------"); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "foreign timeline:\n"); + + list_for_each_entry(sync, &pvr_sync_pt_active_list, list) { + BUG_ON(sync->type >= ARRAY_SIZE(type_names)); + + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " @%u: fwaddr=%#08x enqu=%u ref=%u state=%s %s (%s)\n", + sync->id, + sync->vaddr, + SyncCheckpointGetEnqueuedCount(sync->client_sync_checkpoint), + SyncCheckpointGetReferenceCount(sync->client_sync_checkpoint), + SyncCheckpointGetStateString(sync->client_sync_checkpoint), + sync->class, + type_names[sync->type]); + } + + list_for_each_entry(tl, &timeline_list, list) { + if (tl->kernel->fence_sync) { + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "%s: @%u/%u refs=%u fwaddr=%#08x\n", + tl->obj->name, + get_sync_prim_value(tl->kernel->fence_sync), + tl->kernel->fence_sync->next_value, + refcount_read(&tl->kref.refcount), + tl->kernel->fence_sync->vaddr); + } else { + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "%s: refs=%u\n", + tl->obj->name, + refcount_read(&tl->kref.refcount)); + } + + list_for_each_entry(sync, &tl->sync_list, list) { + BUG_ON(sync->type >= ARRAY_SIZE(type_names)); + + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " @%u: fwaddr=%#08x enq=%u ref=%u state=%s %s (%s)\n", + sync->id, + sync->vaddr, + SyncCheckpointGetEnqueuedCount(sync->client_sync_checkpoint), + SyncCheckpointGetReferenceCount(sync->client_sync_checkpoint), + SyncCheckpointGetStateString(sync->client_sync_checkpoint), + sync->class, + type_names[sync->type]); + } + } + + spin_unlock(&pvr_sync_pt_active_list_spinlock); + spin_unlock_irqrestore(&timeline_list_lock, flags); + } +} + +static struct sync_pt *pvr_sync_dup(struct sync_pt *sync_pt) +{ + struct pvr_sync_pt *pvr_pt_a = (struct pvr_sync_pt *)sync_pt; + struct pvr_sync_pt *pvr_pt_b = NULL; + + DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt)); + + pvr_pt_b = (struct pvr_sync_pt *) + sync_pt_create(sync_pt_parent(sync_pt), + sizeof(*pvr_pt_b)); + if (!pvr_pt_b) { + pr_err("pvr_sync2: %s: Failed to dup sync pt\n", __func__); + goto err_out; + } + + kref_get(&pvr_pt_a->sync_data->kref); + + pvr_pt_b->sync_data = pvr_pt_a->sync_data; + +err_out: + return (struct sync_pt *)pvr_pt_b; +} + +static int pvr_sync_has_signaled(struct sync_pt *sync_pt) +{ + struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; + + DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt)); + + return pvr_sync_has_kernel_signaled(pvr_pt->sync_data->kernel, + PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); +} + +static int pvr_sync_compare(struct sync_pt *a, struct sync_pt *b) +{ + u32 a1 = ((struct pvr_sync_pt *)a)->sync_data->timeline_update_value; + u32 b1 = ((struct pvr_sync_pt *)b)->sync_data->timeline_update_value; + + DPF("%s: a # %s", __func__, debug_info_sync_pt(a)); + DPF("%s: b # %s", __func__, debug_info_sync_pt(b)); + + if (a1 == b1) + return 0; + + /* Take integer wrapping into account */ + return ((s32)a1 - (s32)b1) < 0 ? -1 : 1; +} + +static void check_for_sync_prim(struct pvr_sync_native_sync_prim *sync) +{ +#ifndef NO_HARDWARE + void *event_object; + enum PVRSRV_ERROR error = PVRSRV_OK; + + if (!sync || is_sync_prim_met(sync)) + return; + + error = OSEventObjectOpen( + pvr_sync_data.event_object_handle, + &event_object); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Error opening event object (%s)\n", + __func__, + PVRSRVGetErrorString(error)); + return; + } + + if (!is_sync_prim_met(sync)) { + /* This debug will indicate if pvr_sync is stuck waiting for a sync prim */ + pr_err("pvr_sync2: %s: sync prim<%p> %s (%d != %d)\n", + __func__, sync->client_sync, sync->class, + *(sync->client_sync->pui32LinAddr), sync->next_value); + } + + OSEventObjectClose(event_object); +#endif /* NO_HARDWARE */ +} + +static void pvr_sync_defer_free_checkpoints(struct pvr_sync_kernel_pair *kernel) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_checkpoint_free_list_spinlock, flags); + list_add_tail(&kernel->list, &sync_checkpoint_free_list); + spin_unlock_irqrestore(&sync_checkpoint_free_list_spinlock, flags); + + queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work); +} + +static void pvr_sync_timeline_defer_free(struct pvr_sync_timeline_kernel_pair *kernel) +{ + unsigned long flags; + + spin_lock_irqsave(&timeline_free_list_spinlock, flags); + list_add_tail(&kernel->list, &timeline_free_list); + spin_unlock_irqrestore(&timeline_free_list_spinlock, flags); + + queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work); +} + +/* This function assumes the timeline_list_lock is held while it runs */ + +static void pvr_sync_destroy_timeline_locked(struct kref *kref) +{ + unsigned long flags; + struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *) + container_of(kref, struct pvr_sync_timeline, kref); + + pvr_sync_timeline_defer_free(timeline->kernel); + /* timeline_list_lock is already locked so it's safe to acquire + * this here */ + spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); + list_del(&timeline->sync_list); + spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); + list_del(&timeline->list); + kfree(timeline); +} + +static void pvr_sw_sync_destroy_timeline(struct kref *kref) +{ + struct pvr_sw_sync_timeline *pvr_sw_timeline = (struct pvr_sw_sync_timeline *) + container_of(kref, struct pvr_sw_sync_timeline, kref); + struct sync_timeline *obj = (void *)pvr_sw_timeline->sw_sync_timeline; + u32 unsignalled_points = 0; + + /* signal any unsignalled points on the sw timeline */ + while (pvr_sw_timeline->current_value < pvr_sw_timeline->next_value-1) { + pvr_sync_sw_timeline_advance(pvr_sw_timeline, NULL); + unsignalled_points++; + } + + if (unsignalled_points > 0) + { + pr_err("pvr_sync2: %s: signalled %d sw sync pts for timeline <%p> %s\n", + __func__, unsignalled_points, pvr_sw_timeline, obj->name); + } + + sync_timeline_destroy(obj); + kfree(pvr_sw_timeline); +} + +static void pvr_sync_release_timeline(struct sync_timeline *obj) +{ + struct pvr_sync_timeline *timeline = get_timeline(obj); + unsigned long flags; + + /* If pvr_sync_open failed after calling sync_timeline_create, this + * can be called with a timeline that has not got a timeline sync + * or been added to our timeline list. Use a NULL timeline to + * detect and handle this condition + */ + if (!timeline) + return; + + DPF("%s: # %s", __func__, debug_info_timeline(timeline)); + + if (timeline->kernel->fence_sync) + check_for_sync_prim(timeline->kernel->fence_sync); + + /* Take timeline_list_lock before clearing timeline->obj, to + * avoid the chance of doing so while the list is being iterated + * by pvr_sync_update_all_timelines(). + */ + spin_lock_irqsave(&timeline_list_lock, flags); + + /* Whether or not we're the last reference, obj is going away + * after this function returns, so remove our back reference + * to it. + */ + timeline->obj = NULL; + + /* This might be the last reference to the timeline object. + * If so, we'll go ahead and delete it now. + */ + kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked); + + spin_unlock_irqrestore(&timeline_list_lock, flags); +} + +/* The print_obj() and print_pt() functions have been removed, so we're forced + * to use the timeline_value_str() and pt_value_str() functions. These are + * worse because we're limited to 64 characters, and the strings for sync + * pts have to be formatted like: + * + * pt active: pt_info / tl_info + * + * For us, the tl_info is complicated and doesn't need to be repeated over + * and over. So try to detect the way sync_print_pt() calls the two value_str + * functions and change what pvr_sync_timeline_value_str() returns dynamically. + */ +static struct sync_timeline *last_pt_timeline; + +static void pvr_sync_timeline_value_str(struct sync_timeline *sync_timeline, + char *str, int size) +{ + struct pvr_sync_timeline *timeline = get_timeline(sync_timeline); + + if (timeline->kernel->fence_sync) { + if (sync_timeline != last_pt_timeline) { + snprintf(str, size, "%u 0x%x %u/%u", + timeline->kernel->fence_sync->id, + timeline->kernel->fence_sync->vaddr, + get_sync_prim_value(timeline->kernel->fence_sync), + timeline->kernel->fence_sync->next_value); + } else { + snprintf(str, size, "%u", + get_sync_prim_value(timeline->kernel->fence_sync)); + } + } else { + snprintf(str, size, "n/a"); + } +} + +static void pvr_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size) +{ + struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; + struct pvr_sync_kernel_pair *kernel; + + if (!pvr_pt->sync_data) + return; + + kernel = pvr_pt->sync_data->kernel; + + /* Messages must be at most 64 bytes (including the null terminator): + * + * 123456789012345678901234567890123456789012345678901234567890123 + * + * ID FW ADDR C/N # REF TAKEN + * 123456 0xdeadbeef 0/1 # r=2 123456 + * + * ID FW ADDR C/N # ID FW ADDR C/N # REF TAKEN + * 123456 0xdeadbeef 0/1 # 123456 0xdeadbeef 0/1 # r=2 123456 + */ + if (kernel && kernel->fence_sync) { + snprintf(str, size, + "%u 0x%x %c e=%d r=%d %u", + kernel->fence_sync->id, + kernel->fence_sync->vaddr, + get_sync_checkpoint_char(kernel->fence_sync), + SyncCheckpointGetEnqueuedCount(kernel->fence_sync->client_sync_checkpoint), + atomic_read(&pvr_pt->sync_data->kref.refcount), + pvr_pt->sync_data->timeline_update_value); + } else { + snprintf(str, size, "idle # r=%d %u", + atomic_read(&pvr_pt->sync_data->kref.refcount), + pvr_pt->sync_data->timeline_update_value); + } + + last_pt_timeline = sync_pt_parent(sync_pt); +} + +/* pvr_sync_create_sync_data() should be called with the bridge lock held */ +static struct pvr_sync_data * +pvr_sync_create_sync_data(struct pvr_sync_timeline *timeline, + const s32 timeline_fd, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + const char *pt_name) +{ + struct pvr_sync_data *sync_data = NULL; + enum PVRSRV_ERROR error; + unsigned long flags; + + sync_data = kzalloc(sizeof(*sync_data), GFP_KERNEL); + if (!sync_data) + goto err_out; + + kref_init(&sync_data->kref); + + sync_data->kernel = + kzalloc(sizeof(*sync_data->kernel), + GFP_KERNEL); + + if (!sync_data->kernel) + goto err_free_data; + + INIT_LIST_HEAD(&sync_data->kernel->list); + + sync_data->kernel->fence_sync = + kzalloc(sizeof(struct pvr_sync_native_sync_checkpoint), GFP_KERNEL); + if (!sync_data->kernel->fence_sync) + goto err_free_kernel; + INIT_LIST_HEAD(&sync_data->kernel->fence_sync->list); + + error = SyncCheckpointAlloc(psSyncCheckpointContext, + (PVRSRV_TIMELINE)timeline_fd, + PVRSRV_NO_FENCE, + pt_name, + &sync_data->kernel->fence_sync->client_sync_checkpoint); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to allocate sync checkpoint (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_free_fence; + } + + sync_data->kernel->fence_sync->foreign_sync_fence = NULL; + sync_data->kernel->fence_sync->foreign_sync_fence_name[0] = '\0'; + + sync_data->kernel->fence_sync->vaddr = + SyncCheckpointGetFirmwareAddr(sync_data->kernel->fence_sync->client_sync_checkpoint); + sync_data->kernel->fence_sync->id = + SyncCheckpointGetId(sync_data->kernel->fence_sync->client_sync_checkpoint); + sync_data->kernel->fence_sync->type = SYNC_PT_FENCE_TYPE; + strncpy(sync_data->kernel->fence_sync->class, pt_name, + sizeof(sync_data->kernel->fence_sync->class)); + + /* Update list (for debug ) */ + spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); + list_add_tail(&sync_data->kernel->fence_sync->list, &timeline->sync_list); + spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); + +err_out: + return sync_data; + +err_free_fence: + kfree(sync_data->kernel->fence_sync); +err_free_kernel: + kfree(sync_data->kernel); +err_free_data: + kfree(sync_data); + sync_data = NULL; + goto err_out; +} + +static void pvr_sync_free_sync_data(struct kref *kref) +{ + struct pvr_sync_data *sync_data = (struct pvr_sync_data *) + container_of(kref, struct pvr_sync_data, kref); + + if (sync_data->kernel) + pvr_sync_defer_free_checkpoints(sync_data->kernel); + + kfree(sync_data); +} + +static void pvr_sync_free_sync(struct sync_pt *sync_pt) +{ + struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; + + DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt)); + + kref_put(&pvr_pt->sync_data->kref, pvr_sync_free_sync_data); +} + +/* this function uses pvr_sync_timeline_ops defined below */ +static int pvr_sync_fill_driver_data(struct sync_pt *, void *, int); + +static struct sync_timeline_ops pvr_sync_timeline_ops = { + .driver_name = PVRSYNC_MODNAME, + .dup = pvr_sync_dup, + .has_signaled = pvr_sync_has_signaled, + .compare = pvr_sync_compare, + .free_pt = pvr_sync_free_sync, + .release_obj = pvr_sync_release_timeline, + .timeline_value_str = pvr_sync_timeline_value_str, + .pt_value_str = pvr_sync_pt_value_str, + .fill_driver_data = pvr_sync_fill_driver_data, +}; + +static inline bool is_pvr_timeline(struct sync_timeline *obj) +{ + return obj->ops == &pvr_sync_timeline_ops; +} + +static inline bool is_pvr_timeline_pt(struct sync_pt *pt) +{ + return is_pvr_timeline(sync_pt_parent(pt)); +} + +static int +pvr_sync_fill_driver_data(struct sync_pt *sync_pt, void *data, int size) +{ + struct pvr_sync_pt_info *info = (struct pvr_sync_pt_info *)data; + struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; + struct pvr_sync_data *sync_data = pvr_pt->sync_data; + struct pvr_sync_kernel_pair *kernel = sync_data->kernel; + + if (size < sizeof(*info)) + return -ENOMEM; + + info->ui32TlTaken = sync_data->timeline_update_value; + + if (kernel && kernel->fence_sync) { + info->id = kernel->fence_sync->id; + info->ui32FWAddr = kernel->fence_sync->vaddr; + info->ui32CurrOp = get_sync_checkpoint_value(kernel->fence_sync); + info->ui32NextOp = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } else { + info->id = 0; + info->ui32FWAddr = 0; + info->ui32CurrOp = 0; + info->ui32NextOp = 0; + } + + return sizeof(*info); +} + +/* foreign sync handling */ + +static void pvr_sync_foreign_sync_pt_signaled(struct sync_fence *fence, + struct sync_fence_waiter *_waiter) +{ + struct pvr_sync_fence_waiter *waiter = + (struct pvr_sync_fence_waiter *)_waiter; + unsigned long flags; + + /* Complete the SW operation and free the sync if we can. If we can't, + * it will be checked by a later workqueue kick. + */ + if (is_sync_checkpoint_errored(waiter->kernel->fence_sync, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT) || + !is_sync_checkpoint_met(waiter->kernel->fence_sync, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { + + if (!is_sync_checkpoint_met(waiter->kernel->fence_sync, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + complete_sync_checkpoint(waiter->kernel->fence_sync, PVRSRV_FENCE_FLAG_CTX_ATOMIC); + + /* We can 'put' the fence now, but this function might be called in + * irq context so we must defer to WQ. + * This WQ is triggered in pvr_sync_defer_free, so adding it to the + * put list before that should guarantee it's cleaned up on the next + * wq run. + */ + spin_lock_irqsave(&sync_fence_put_list_spinlock, flags); + list_add_tail(&waiter->sync_fence->list, &sync_fence_put_list); + spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags); + + pvr_sync_defer_free_checkpoints(waiter->kernel); + + /* The completed sw-sync may allow other tasks to complete, + * so we need to allow them to progress. + */ + queue_work(pvr_sync_data.check_status_wq, + &pvr_sync_data.check_status_work); + + kfree(waiter); + } else { + pr_err("pvr_sync2: %s: this sync checkpoint has already been signalled - why are we asked to do this more than once?!\n", __func__); + } +} + +static PSYNC_CHECKPOINT +pvr_sync_create_waiter_for_foreign_sync(int fd, PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext) +{ + struct pvr_sync_kernel_pair *kernel = NULL; + struct pvr_sync_fence_waiter *waiter; + struct pvr_sync_fence *sync_fence; + PSYNC_CHECKPOINT checkpoint = NULL; + struct sync_fence *fence; + enum PVRSRV_ERROR error; + int err; + unsigned long flags; + + fence = sync_fence_fdget(fd); + if (!fence) { + pr_err("pvr_sync2: %s: Failed to take reference on fence\n", + __func__); + goto err_out; + } + + kernel = kmalloc(sizeof(*kernel), GFP_KERNEL); + if (!kernel) { + pr_err("pvr_sync2: %s: Failed to allocate sync kernel\n", + __func__); + goto err_put_fence; + } + + sync_fence = kmalloc(sizeof(*sync_fence), GFP_KERNEL); + if (!sync_fence) { + pr_err("pvr_sync2: %s: Failed to allocate pvr sync fence\n", + __func__); + goto err_free_kernel; + } + + sync_fence->fence = fence; + + kernel->fence_sync = kzalloc(sizeof(struct pvr_sync_native_sync_checkpoint), GFP_KERNEL); + if (!kernel->fence_sync) + goto err_free_fence; + + INIT_LIST_HEAD(&kernel->fence_sync->list); + + /* Create sync checkpoint for the foreign sync, with an invalid timeline (as we do not know it) */ + error = SyncCheckpointAlloc(psSyncCheckpointContext, + SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, + fd, /* fence_to_resolve */ + fence->name, + &checkpoint); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to allocate sync checkpoint (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_free_fence_sync; + } + kernel->fence_sync->client_sync_checkpoint = checkpoint; + + kernel->fence_sync->foreign_sync_fence = fence; + strncpy(kernel->fence_sync->foreign_sync_fence_name, fence->name, 32); + + kernel->fence_sync->vaddr = + SyncCheckpointGetFirmwareAddr(kernel->fence_sync->client_sync_checkpoint); + kernel->fence_sync->id = + SyncCheckpointGetId(kernel->fence_sync->client_sync_checkpoint); + kernel->fence_sync->type = SYNC_PT_FOREIGN_FENCE_TYPE; + strlcpy(kernel->fence_sync->class, fence->name, sizeof(kernel->fence_sync->class)); + + /* The custom waiter structure is freed in the waiter callback */ + waiter = kmalloc(sizeof(*waiter), GFP_KERNEL); + if (!waiter) { + pr_err("pvr_sync2: %s: Failed to allocate waiter\n", __func__); + goto err_free_cleanup_sync; + } + + waiter->kernel = kernel; + waiter->sync_fence = sync_fence; + + /* Take an extra ref on the checkpoint for the reference handed over to + * the firmware. + * This must be done before the waiter_init, as the waiter can be called + * and it's reference dropped at _any time_ + */ + SyncCheckpointTakeRef(checkpoint); + + sync_fence_waiter_init(&waiter->waiter, + pvr_sync_foreign_sync_pt_signaled); + + spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); + err = sync_fence_wait_async(fence, &waiter->waiter); + if (err) { + spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); + /* -1 means the fence was broken, 1 means the fence already + * signalled. In either case, roll back what we've done and + * skip using this sync_pt for synchronisation. + */ + goto err_put_checkpoint_ref; + } + + /* Update list (for debug ) */ + list_add_tail(&kernel->fence_sync->list, &pvr_sync_pt_active_list); + spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); + +err_out: + return checkpoint; +err_put_checkpoint_ref: + SyncCheckpointDropRef(checkpoint); + kfree(waiter); +err_free_cleanup_sync: + SyncCheckpointFree(checkpoint); + checkpoint = NULL; +err_free_fence_sync: + kfree(kernel->fence_sync); + kernel->fence_sync = NULL; +err_free_fence: + kfree(sync_fence); + sync_fence = NULL; +err_free_kernel: + kfree(kernel); + kernel = NULL; +err_put_fence: + sync_fence_put(fence); + goto err_out; +} + +static +struct pvr_sync_pt *pvr_sync_create_pt(struct pvr_sync_timeline *timeline, + const s32 timeline_fd, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + const char *pt_name) +{ + struct pvr_sync_data *sync_data; + struct pvr_sync_pt *pvr_pt = NULL; + + sync_data = pvr_sync_create_sync_data(timeline, timeline_fd, + psSyncCheckpointContext, pt_name); + if (!sync_data) { + pr_err("pvr_sync2: %s: Failed to create sync data\n", __func__); + goto err_out; + } + + pvr_pt = (struct pvr_sync_pt *) + sync_pt_create(timeline->obj, sizeof(struct pvr_sync_pt)); + if (!pvr_pt) { + pr_err("pvr_sync2: %s: Failed to create sync pt\n", __func__); + goto err_rollback_fence; + } + + pvr_pt->sync_data = sync_data; + + pvr_pt->timeline = timeline; + + /* Increment the timeline next value */ + pvr_pt->sync_data->timeline_update_value = + timeline->kernel->fence_sync->next_value++; + + return pvr_pt; + +err_rollback_fence: + /* Error the sync checkpoint (so the deferred free considers it 'met') */ + error_sync_checkpoint(sync_data->kernel->fence_sync, PVRSRV_FENCE_FLAG_NONE); + kref_put(&sync_data->kref, pvr_sync_free_sync_data); +err_out: + return NULL; +} + +/* Predeclare the pvr_sync_fops as it's used for comparison to ensure the + * update_timeline_fd passed in to pvr_sync_append_fences() is a pvr_sync + * timeline. + */ +static const struct file_operations pvr_sync_fops; + + +/* ioctl and fops handling */ + +static int pvr_sync_open(struct inode *inode, struct file *file) +{ + struct pvr_sync_timeline_wrapper *timeline_wrapper; + struct pvr_sync_timeline *timeline; + char task_comm[TASK_COMM_LEN]; + int err = -ENOMEM; + unsigned long flags; + + get_task_comm(task_comm, current); + + timeline_wrapper = (struct pvr_sync_timeline_wrapper *) + sync_timeline_create(&pvr_sync_timeline_ops, + sizeof(*timeline_wrapper), task_comm); + if (!timeline_wrapper) { + pr_err("pvr_sync2: %s: sync_timeline_create failed\n", __func__); + goto err_out; + } + + timeline = kmalloc(sizeof(*timeline), GFP_KERNEL); + if (!timeline) { + pr_err("pvr_sync2: %s: Out of memory\n", __func__); + goto err_free_timeline_wrapper; + } + + timeline->kernel = kzalloc(sizeof(*timeline->kernel), + GFP_KERNEL); + if (!timeline->kernel) { + pr_err("pvr_sync2: %s: Out of memory\n", __func__); + goto err_free_timeline; + } + + timeline_wrapper->timeline = timeline; + + timeline->obj = &timeline_wrapper->obj; + kref_init(&timeline->kref); + INIT_LIST_HEAD(&timeline->sync_list); + + spin_lock_irqsave(&timeline_list_lock, flags); + list_add_tail(&timeline->list, &timeline_list); + spin_unlock_irqrestore(&timeline_list_lock, flags); + + DPF("%s: # %s", __func__, debug_info_timeline(timeline)); + + file->private_data = timeline_wrapper; + err = 0; +err_out: + return err; + +err_free_timeline: + kfree(timeline); + + /* Use a NULL timeline to detect this partially-setup timeline in the + * timeline release function (called by sync_timeline_destroy) and + * handle it appropriately. + */ + timeline_wrapper->timeline = NULL; +err_free_timeline_wrapper: + sync_timeline_destroy(&timeline_wrapper->obj); + goto err_out; +} + +static int pvr_sync_close(struct inode *inode, struct file *file) +{ + struct sync_timeline *obj = file->private_data; + + if (is_pvr_timeline(obj)) { + DPF("%s: # %s", __func__, + debug_info_timeline(get_timeline(obj))); + + sync_timeline_destroy(obj); + } else { + struct pvr_sw_sync_timeline *pvr_sw_sync_timeline = file->private_data; + + /* SW timeline */ + kref_put(&pvr_sw_sync_timeline->kref, pvr_sw_sync_destroy_timeline); + } + return 0; +} + +/* + * This is the function that kick code will call in order to 'finalise' a + * created output fence just prior to returning from the kick function. + * The OS native sync code needs to implement a function meeting this + * specification - the implementation may be a nop if the OS does not need + * to perform any actions at this point. + * + * Input: fence_fd The PVRSRV_FENCE to be 'finalised'. This value + * will have been returned by an earlier call to + * pvr_sync_create_fence(). + * Input: finalise_data The finalise data returned by an earlier call + * to pvr_sync_create_fence(). + */ +static enum PVRSRV_ERROR +pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data) +{ + struct sync_fence *native_fence = (struct sync_fence *)finalise_data; + + if (!native_fence || (fence_fd < 0)) + return PVRSRV_ERROR_INVALID_PARAMS; + + sync_fence_install(native_fence, fence_fd); + return PVRSRV_OK; +} + +/* + * This is the function that kick code will call in order to obtain a new + * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used + * in that fence. The OS native sync code needs to implement a function + * meeting this specification. + * + * Input: fence_name A string to annotate the fence with (for + * debug). + * Input: timeline The timeline on which the new fence is to be + * created. + * Output: new_fence The new PVRSRV_FENCE to be returned by the + * kick call. + * Output: fence_uid Unique ID of the update fence. + * Output: fence_finalise_data Pointer to data needed to finalise the fence. + * Output: new_checkpoint_handle The PSYNC_CHECKPOINT used by the new fence. + */ +static enum PVRSRV_ERROR +pvr_sync_create_fence(const char *fence_name, + PVRSRV_TIMELINE new_fence_timeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *new_fence, + u64 *fence_uid, + void **fence_finalise_data, + PSYNC_CHECKPOINT *new_checkpoint_handle, + void **timeline_update_sync, + __u32 *timeline_update_value) +{ + PVRSRV_ERROR err = PVRSRV_OK; + PVRSRV_FENCE new_fence_fd = -1; + struct file *timeline_file; + struct pvr_sync_timeline *timeline; + struct pvr_sync_pt *native_sync_point = NULL; + struct sync_fence *native_fence = NULL; + struct pvr_sync_kernel_pair *sync_kernel; + + if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle || !fence_finalise_data) { + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + /* We reserve the new fence FD before taking any operations + * as we do not want to fail (e.g. run out of FDs) + */ + new_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (new_fence_fd < 0) { + err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + goto err_out; + } + + timeline_file = fget(new_fence_timeline); + if (!timeline_file) { + pr_err("pvr_sync2: %s: Failed to open supplied timeline fd (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_put_fd; + } + + if (timeline_file->f_op != &pvr_sync_fops) { + pr_err("pvr_sync2: %s: Supplied timeline not pvr_sync timeline\n", + __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_put_timeline; + } + + timeline = get_timeline(timeline_file->private_data); + + /* We know this will not free the timeline as the user still + * has the fd referencing it. + */ + fput(timeline_file); + + if (!timeline) { + pr_err("pvr_sync2: %s: Supplied timeline has no private data\n", + __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_put_fd; + } + + /* Check if this timeline already has a sync prim, if not create it now */ + if (!timeline->kernel->fence_sync) { + err = sync_pool_get(&timeline->kernel->fence_sync, + timeline->obj->name, + SYNC_TL_TYPE); + + if (err != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to allocate timeline sync prim (%s)\n", + __func__, PVRSRVGetErrorString(err)); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fd; + } + } + + native_sync_point = pvr_sync_create_pt(timeline, new_fence_timeline, + psSyncCheckpointContext, fence_name); + if (!native_sync_point) { + pr_err("pvr_sync2: %s: Failed to create sync point\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fd; + } + + native_fence = sync_fence_create(fence_name, &native_sync_point->pt); + if (!native_fence) { + struct pvr_sync_native_sync_prim *timeline_prim = + timeline->kernel->fence_sync; + + pr_err("pvr_sync2: %s: Failed to create sync fence\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + + /* If the point was created but the fence failed to be + * created, the point must be manually freed as a + * fence has not yet taken ownership. + */ + timeline_prim->next_value--; + /* Error the new fence's sync checkpoint (so the deferred free considers it 'met') */ + error_sync_checkpoint(native_sync_point->sync_data->kernel->fence_sync, PVRSRV_FENCE_FLAG_NONE); + pvr_sync_free_sync(&native_sync_point->pt); + goto err_put_fd; + } + + sync_kernel = native_sync_point->sync_data->kernel; + + /* For Linux, we do not return the fence fd here, but via + * pvr_sync_finalise_fence() - this is because once we + * associate the fd with the fence, it can only be closed + * from client code so it should only be done once we + * know we will definitely require it. + */ + *new_fence = new_fence_fd; + *fence_finalise_data = (void *)native_fence; + *new_checkpoint_handle = sync_kernel->fence_sync->client_sync_checkpoint; + + if (timeline_update_sync && timeline_update_value) { + *timeline_update_sync = (void *)timeline->kernel->fence_sync->client_sync; + *timeline_update_value = timeline->kernel->fence_sync->next_value; + } + + *fence_uid = OSGetCurrentClientProcessIDKM(); + *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX); + + goto err_out; + +err_put_timeline: + fput(timeline_file); +err_put_fd: + pr_err("pvr_sync2: %s: putting fd %d back to unused\n", __func__, new_fence_fd); + put_unused_fd(new_fence_fd); + *fence_uid = PVRSRV_NO_FENCE; +err_out: + return err; +} + +/* + * This is the function that kick code will call in order to 'rollback' a + * created output fence should an error occur when submitting the kick. + * The OS native sync code needs to implement a function meeting this + * specification. + * + * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence + * should be destroyed and any actions taken due to + * its creation that need to be undone should be + * reverted. + * Input: finalise_data The finalise data for the fence to be 'rolled back'. + */ +static enum PVRSRV_ERROR +pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, + void *fence_data_to_rollback) +{ + PVRSRV_ERROR err = PVRSRV_OK; + struct sync_fence *sync_fence = (struct sync_fence *)fence_data_to_rollback; + struct sync_pt *sync_pt; + struct pvr_sync_pt *pvr_pt = NULL; + int j = 0; + + if (!sync_fence) { + pr_err("pvr_sync2: %s: Failed to recognise fence_to_rollback(%d)\n", __func__, fence_to_rollback); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + (void)j; + for_each_sync_pt(sync_pt, sync_fence, j) { + if (!is_pvr_timeline_pt(sync_pt)) { + pr_err("pvr_sync2: %s: Fence(%d) contains non-pvr timeline sync_pt\n", __func__, fence_to_rollback); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out2; + } + + pvr_pt = (struct pvr_sync_pt *)sync_pt; + + SyncCheckpointError(pvr_pt->sync_data->kernel->fence_sync->client_sync_checkpoint, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); + + /* rollback timeline next_value */ + pvr_pt->timeline->kernel->fence_sync->next_value--; + } + + /* close the fence */ + sync_fence_put(sync_fence); + +err_out2: + put_unused_fd(fence_to_rollback); + +err_out: + return err; +} + +/* + * This is the function that kick code will call in order to obtain a list of + * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function. + * The OS native sync code will allocate the memory to hold the returned list + * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has + * finished referencing it. + * + * Input: fence The input (check) fence + * Output: nr_checkpoints The number of PVRSRV_SYNC_CHECKPOINT ptrs + * returned in the checkpoint_handles + * parameter. + * Output: fence_uid Unique ID of the check fence + * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs. + */ +static enum PVRSRV_ERROR +pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints, + PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid) +{ + PVRSRV_ERROR err = PVRSRV_OK; + + if (!nr_checkpoints || !checkpoint_handles) { + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + if (fence_to_resolve < 0) { + /* Null fence passed, so return 0 checkpoints */ + *nr_checkpoints = 0; + *checkpoint_handles = NULL; + *fence_uid = 0; + } else { + struct sync_fence *sync_fence = sync_fence_fdget(fence_to_resolve); + struct sync_pt *sync_pt; + struct pvr_sync_kernel_pair *sync_kernel; + u32 points_on_fence = 0; + PSYNC_CHECKPOINT foreign_checkpoint = NULL; + PSYNC_CHECKPOINT *next_checkpoint; + bool add_foreign_sync = true; + int j = 0; + + if (!sync_fence) { + pr_err("pvr_sync2: %s: Failed to read sync private data for fd %d\n", + __func__, fence_to_resolve); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + /* Alloc memory to hold list of PSYNC_CHECKPOINTs */ + /* (Alloc memory for MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoint handles) */ + *checkpoint_handles = + kmalloc_array(MAX_SYNC_CHECKPOINTS_PER_FENCE, + sizeof(PSYNC_CHECKPOINT), GFP_KERNEL); + if (!(*checkpoint_handles)) { + pr_err("pvr_sync2: %s: Failed to alloc memory for returned list of sync checkpoints\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out2; + } + + next_checkpoint = *checkpoint_handles; + + (void)j; + for_each_sync_pt(sync_pt, sync_fence, j) { + struct pvr_sync_pt *pvr_pt = NULL; + + /* Make sure that we do not overrun the memory we allocated */ + if (points_on_fence >= MAX_SYNC_CHECKPOINTS_PER_FENCE) { + pr_err("pvr_sync2: Maximum number of sync checkpoints in a fence exceeded (greater than %d)", MAX_SYNC_CHECKPOINTS_PER_FENCE); + err = PVRSRV_ERROR_INVALID_PARAMS; + + for (j = 0; j < points_on_fence; j++) + SyncCheckpointDropRef((*checkpoint_handles)[j]); + + kfree(*checkpoint_handles); + goto err_out2; + } + + if (is_pvr_timeline_pt(sync_pt)) { + pvr_pt = (struct pvr_sync_pt *)sync_pt; + sync_kernel = pvr_pt->sync_data->kernel; + + if (!sync_kernel || + is_sync_checkpoint_met(sync_kernel->fence_sync, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { + continue; + } + + /* Take ref on sync_checkpoint - this will be dropped by the caller + * (Kick code) once it has incremented the checkpoint's CCB enqueued count. + * We only really need to do this for foreign sync checkpoints, to prevent + * the sync_checkpoint from being destroyed if it gets signalled while being + * processed by the Kick code, but the Kick code has no knowledge of whether a + * sync_checkpoint is foreign, so we take a ref on all checkpoints. + */ + SyncCheckpointTakeRef(sync_kernel->fence_sync->client_sync_checkpoint); + + *next_checkpoint = sync_kernel->fence_sync->client_sync_checkpoint; + next_checkpoint++; + points_on_fence++; + } else if (add_foreign_sync) { + foreign_checkpoint = pvr_sync_create_waiter_for_foreign_sync(fence_to_resolve, psSyncCheckpointContext); + + if (foreign_checkpoint) { + /* Take ref on sync_checkpoint - this will be dropped by the caller + * (see comment for the other call to SyncCheckpointTakeRef, above). + */ + /* For foreign points, an extra + * checkpoint reference was taken at + * creation time to ensure it wasn't + * completed and free'd before we got + * here, so ownership of that reference + * is effectively passed to the firmware + */ + *next_checkpoint = foreign_checkpoint; + next_checkpoint++; + points_on_fence++; + add_foreign_sync = false; + } + } + } + + if (0) { + int ii; + + pr_err("pvr_sync2: %s: returning nr_checkpoints=%d\n", __func__, points_on_fence); + for (ii = 0; ii < points_on_fence; ii++) { + PSYNC_CHECKPOINT *psTmp = *(checkpoint_handles + ii); + + pr_err("pvr_sync2: %s: pt %d: sync checkpoint <%p>,\n", __func__, ii, psTmp); + pr_err("pvr_sync2: %s: ID=%d\n", __func__, SyncCheckpointGetId(*psTmp)); + } + } + *nr_checkpoints = points_on_fence; + *fence_uid = OSGetCurrentClientProcessIDKM(); + *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX); + +err_out2: + sync_fence_put(sync_fence); + } + +err_out: + return err; +} + +#if defined(PDUMP) +static enum PVRSRV_ERROR +pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints, + struct _SYNC_CHECKPOINT ***checkpoint_handles) +{ + enum PVRSRV_ERROR err; + struct sync_fence *sync_fence; + struct sync_pt *sync_pt; + struct pvr_sync_kernel_pair *sync_kernel; + u32 points_on_fence = 0; + struct _SYNC_CHECKPOINT **next_checkpoint; + struct _SYNC_CHECKPOINT **checkpoints = NULL; + int j = 0; + + if (!nr_checkpoints || !checkpoint_handles) { + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + if (fence_to_pdump < 0) { + /* Null fence passed, so return 0 checkpoints */ + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + sync_fence = sync_fence_fdget(fence_to_pdump); + if (!sync_fence) { + pr_err("pvr_sync2: %s: Failed to read sync private data for fd %d\n", + __func__, fence_to_pdump); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + /* Alloc memory to hold list of PSYNC_CHECKPOINTs */ + checkpoints = kmalloc_array(MAX_SYNC_CHECKPOINTS_PER_FENCE, + sizeof(*checkpoints), GFP_KERNEL); + if (!checkpoints) { + pr_err("pvr_sync2: %s: Failed to alloc memory for returned list of sync checkpoints\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fence; + } + + next_checkpoint = checkpoints; + + (void)j; + for_each_sync_pt(sync_pt, sync_fence, j) { + struct pvr_sync_pt *pvr_pt = NULL; + + /* Make sure that we do not overrun the memory we allocated */ + if (points_on_fence >= MAX_SYNC_CHECKPOINTS_PER_FENCE) { + pr_err("pvr_sync2: Maximum number of sync checkpoints in a fence exceeded (greater than %d)", MAX_SYNC_CHECKPOINTS_PER_FENCE); + err = PVRSRV_ERROR_INVALID_PARAMS; + kfree(*checkpoint_handles); + goto err_put_fence; + } + + if (is_pvr_timeline_pt(sync_pt)) { + pvr_pt = (struct pvr_sync_pt *)sync_pt; + sync_kernel = pvr_pt->sync_data->kernel; + if (!sync_kernel) { + continue; + } + *next_checkpoint = sync_kernel->fence_sync->client_sync_checkpoint; + next_checkpoint++; + points_on_fence++; + } + } + + *checkpoint_handles = checkpoints; + *nr_checkpoints = points_on_fence; + err = PVRSRV_OK; +err_put_fence: + sync_fence_put(sync_fence); +err_out: + return err; + +} +#endif + +static u32 +pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs) +{ + u32 our_ufo_ct = 0; + struct pvr_sync_native_sync_checkpoint *sync; + unsigned long flags; + + spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); + /* dump info on any ufos in our active list */ + list_for_each_entry(sync, &pvr_sync_pt_active_list, list) { + u32 *this_ufo_vaddr = vaddrs; + u32 ufo_num; + DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL; + + for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++, this_ufo_vaddr++) { + if (sync->vaddr == *this_ufo_vaddr) { + static const char *const type_names[] = { + "Timeline", "Fence", "Cleanup", + "Foreign Fence", "Foreign Cleanup" + }; + + /* Dump sync info */ + PVR_DUMPDEBUG_LOG(pfnDummy, NULL, + "\tSyncID = %d, FWAddr = 0x%08x: %s (%s - [%p] %s)", + sync->id, sync->vaddr, + sync->class, + type_names[sync->type], + sync->foreign_sync_fence, + sync->foreign_sync_fence_name); + our_ufo_ct++; + } + } + } + spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); + return our_ufo_ct; +} + +static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + int err = 0; + struct pvr_sync_rename_ioctl_data data; + + if (!access_ok(user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + data.szName[sizeof(data.szName) - 1] = '\0'; + strlcpy(timeline->obj->name, data.szName, sizeof(timeline->obj->name)); +err: + return err; +} + +static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline, + void **private_data) +{ + struct pvr_sw_sync_timeline *pvr_sw_sync_timeline; + + /* We can only convert an empty GPU timeline */ + if (timeline->kernel->fence_sync && + timeline->kernel->fence_sync->next_value) { + pr_err("pvr_sync2: %s ERROR! timeline->kernel->fence_sync=<%p>, timeline->kernel->fence_sync->next_value=%d\n", __func__, timeline->kernel->fence_sync, timeline->kernel->fence_sync->next_value); + return -EFAULT; + } + + /* Create a pvr_sw_sync timeline */ + pvr_sw_sync_timeline = kmalloc(sizeof(*pvr_sw_sync_timeline), GFP_KERNEL); + if (!pvr_sw_sync_timeline) { + pr_err("pvr_sync2: %s ERROR! no memory to allocate pvr_sw_sync_timeline struct\n", __func__); + return -ENOMEM; + } + + pvr_sw_sync_timeline->current_value = 0; + pvr_sw_sync_timeline->next_value = 1; + kref_init(&pvr_sw_sync_timeline->kref); + + /* Create a sw_sync timeline with the old GPU timeline's name */ + pvr_sw_sync_timeline->sw_sync_timeline = sw_sync_timeline_create(timeline->obj->name); + if (!pvr_sw_sync_timeline->sw_sync_timeline) { + pr_err("pvr_sync2: %s ERROR! error returned from sw_sync_timeline_create() for timeline->obj->name '%s'\n", __func__, timeline->obj->name); + kfree(pvr_sw_sync_timeline); + return -ENOMEM; + } + + /* Destroy the old GPU timeline and update the struct file */ + DPF("%s: # %s", __func__, debug_info_timeline(timeline)); + + sync_timeline_destroy(timeline->obj); + DPF("%s pvr_sw_sync_timeline<%p>, sw_sync_timeline<%p> curr=%llu,next=%llu", pvr_sw_sync_timeline, pvr_sw_sync_timeline->sw_sync_timeline, pvr_sw_sync_timeline->current_value, pvr_sw_sync_timeline->next_value); + *private_data = pvr_sw_sync_timeline; + return 0; +} + +static long pvr_sync_ioctl_sw_create_fence(struct pvr_sw_sync_timeline *pvr_sw_timeline, + void __user *user_data) +{ + struct pvr_sw_sync_create_fence_data data; + struct sync_fence *fence; + int fd = get_unused_fd_flags(O_CLOEXEC); + struct sync_pt *sync_pt; + struct sw_sync_timeline *timeline; + int err = -EFAULT; + + if (fd < 0) { + pr_err("pvr_sync2: %s: Failed to find unused fd (%d)\n", + __func__, fd); + err = -EMFILE; + goto err_out; + } + + if (!pvr_sw_timeline) { + pr_err("pvr_sync2: %s: passed a NULL pvr_sw_timeline\n", __func__); + goto err_out; + } + + timeline = pvr_sw_timeline->sw_sync_timeline; + + if (copy_from_user(&data, user_data, sizeof(data))) + goto err_put_fd; + + sync_pt = sw_sync_pt_create(timeline, pvr_sw_timeline->next_value); + if (!sync_pt) { + pr_err("pvr_sync2: %s: Failed to create a sync point (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fd; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence = sync_fence_create(data.name, sync_pt); + if (!fence) { + pr_err("pvr_sync2: %s: Failed to create a fence (%d)\n", + __func__, fd); + sync_pt_free(sync_pt); + err = -ENOMEM; + goto err_put_fd; + } + + data.fence = fd; + data.sync_pt_idx = pvr_sw_timeline->next_value; + + if (copy_to_user(user_data, &data, sizeof(data))) + goto err_put_fence; + + sync_fence_install(fence, fd); + pvr_sw_timeline->next_value++; + + err = 0; +err_out: + return err; +err_put_fence: + sync_fence_put(fence); +err_put_fd: + pr_err("pvr_sync2: %s: putting fd %d back to unused\n", __func__, fd); + put_unused_fd(fd); + goto err_out; +} + +static long pvr_sync_ioctl_sw_inc(struct pvr_sw_sync_timeline *pvr_timeline, + void __user *user_data) +{ + struct sw_sync_timeline *timeline; + struct pvr_sw_timeline_advance_data data; + + if (!pvr_timeline) { + pr_err("pvr_sync2: %s: passed a NULL pvr_timeline\n", __func__); + return -EFAULT; + } + + timeline = pvr_timeline->sw_sync_timeline; + + /* Don't allow sw timeline to be advanced beyond the last defined point */ + if (pvr_timeline->current_value == (pvr_timeline->next_value-1)) { + pr_err("pvr_sync2: attempt to advance SW timeline beyond last defined point\n"); + return -EPERM; + } + + sw_sync_timeline_inc(timeline, 1); + pvr_timeline->current_value++; + data.sync_pt_idx = pvr_timeline->current_value; + + if (copy_to_user(user_data, &data, sizeof(data))) { + pr_err("pvr_sync2: %s: Failed copy to user\n", __func__); + return -EFAULT; + } + + return 0; +} + +static long +pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sync_timeline *obj = file->private_data; + void __user *user_data = (void __user *)arg; + long err = -ENOTTY; + + if (is_pvr_timeline(obj)) { + struct pvr_sync_timeline *pvr = get_timeline(obj); + + switch (cmd) { + case PVR_SYNC_IOC_RENAME: + err = pvr_sync_ioctl_rename(pvr, user_data); + break; + case PVR_SYNC_IOC_FORCE_SW_ONLY: + err = pvr_sync_ioctl_force_sw_only(pvr, + &file->private_data); + break; + default: + break; + } + } else { + struct pvr_sw_sync_timeline *sw = file->private_data; + + switch (cmd) { + case PVR_SW_SYNC_IOC_CREATE_FENCE: + err = pvr_sync_ioctl_sw_create_fence(sw, user_data); + break; + case PVR_SW_SYNC_IOC_INC: + err = pvr_sync_ioctl_sw_inc(sw, user_data); + break; + default: + break; + } + } + + return err; +} + +static void +pvr_sync_check_status_work_queue_function(struct work_struct *data) +{ + /* A completed SW operation may un-block the GPU */ + PVRSRVCheckStatus(NULL); +} + +/* Returns true if the freelist still has entries, else false if empty */ +static bool +pvr_sync_clean_freelist(void) +{ + struct pvr_sync_kernel_pair *kernel, *k; + struct pvr_sync_timeline_kernel_pair *tl_kernel, *tl_k; + struct pvr_sync_fence *sync_fence, *f; + LIST_HEAD(unlocked_free_checkpoint_list); + LIST_HEAD(unlocked_free_timeline_list); + LIST_HEAD(unlocked_free_list); + unsigned long flags; + bool freelist_empty; + + /* We can't call PVRSRVServerSyncFreeKM directly in this loop because + * that will take the mmap mutex. We can't take mutexes while we have + * this list locked with a spinlock. So move all the items we want to + * free to another, local list (no locking required) and process it + * in a second loop. + */ + + spin_lock_irqsave(&sync_checkpoint_free_list_spinlock, flags); + list_for_each_entry_safe(kernel, k, &sync_checkpoint_free_list, list) { + /* Check if this sync is not used anymore. */ + if ((kernel->fence_sync) && + !is_sync_checkpoint_met(kernel->fence_sync, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { + continue; + } + + /* Remove the entry from the free list. */ + list_move_tail(&kernel->list, &unlocked_free_checkpoint_list); + } + + /* Wait and loop if there are still syncs on the free list (IE + * are still in use by the HW). + */ + freelist_empty = list_empty(&sync_checkpoint_free_list); + + spin_unlock_irqrestore(&sync_checkpoint_free_list_spinlock, flags); + + spin_lock_irqsave(&timeline_free_list_spinlock, flags); + list_for_each_entry_safe(tl_kernel, tl_k, &timeline_free_list, list) { + /* Check if this sync is not used anymore. */ + if (tl_kernel->fence_sync && !is_sync_prim_met(tl_kernel->fence_sync)) + continue; + + /* Remove the entry from the free list. */ + list_move_tail(&tl_kernel->list, &unlocked_free_timeline_list); + } + + /* Wait and loop if there are still syncs on the free list (IE + * are still in use by the HW). + */ + freelist_empty &= list_empty(&timeline_free_list); + + spin_unlock_irqrestore(&timeline_free_list_spinlock, flags); + + + list_for_each_entry_safe(kernel, k, &unlocked_free_checkpoint_list, list) { + list_del(&kernel->list); + + if (kernel->fence_sync && kernel->fence_sync->client_sync_checkpoint) { + spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); + if (!list_empty(&kernel->fence_sync->list)) + list_del_init(&kernel->fence_sync->list); + + spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); + SyncCheckpointFree(kernel->fence_sync->client_sync_checkpoint); + kernel->fence_sync->client_sync_checkpoint = NULL; + } + kfree(kernel->fence_sync); + kfree(kernel); + } + + list_for_each_entry_safe(tl_kernel, tl_k, &unlocked_free_timeline_list, list) { + list_del(&tl_kernel->list); + + if (tl_kernel->fence_sync) + sync_pool_put(tl_kernel->fence_sync); + kfree(tl_kernel); + } + + /* sync_fence_put() must be called from process/WQ context + * because it uses fput(), which is not allowed to be called + * from interrupt context in kernels <3.6. + */ + INIT_LIST_HEAD(&unlocked_free_list); + + spin_lock_irqsave(&sync_fence_put_list_spinlock, flags); + list_for_each_entry_safe(sync_fence, f, &sync_fence_put_list, list) { + list_move_tail(&sync_fence->list, &unlocked_free_list); + } + spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags); + + list_for_each_entry_safe(sync_fence, f, &unlocked_free_list, list) { + list_del(&sync_fence->list); + sync_fence_put(sync_fence->fence); + kfree(sync_fence); + } + + return !freelist_empty; +} + +static void +pvr_sync_defer_free_work_queue_function(struct work_struct *data) +{ + enum PVRSRV_ERROR error = PVRSRV_OK; + void *event_object; + + error = OSEventObjectOpen(pvr_sync_data.event_object_handle, + &event_object); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Error opening event object (%s)\n", + __func__, PVRSRVGetErrorString(error)); + return; + + } + + while (pvr_sync_clean_freelist()) { + + error = OSEventObjectWait(event_object); + + switch (error) { + + case PVRSRV_OK: + case PVRSRV_ERROR_TIMEOUT: + /* Timeout is normal behaviour */ + continue; + default: + pr_err("pvr_sync2: %s: Error waiting for event object (%s)\n", + __func__, PVRSRVGetErrorString(error)); + break; + } + } + error = OSEventObjectClose(event_object); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Error closing event object (%s)\n", + __func__, PVRSRVGetErrorString(error)); + } +} + +static const struct file_operations pvr_sync_fops = { + .owner = THIS_MODULE, + .open = pvr_sync_open, + .release = pvr_sync_close, + .unlocked_ioctl = pvr_sync_ioctl, + .compat_ioctl = pvr_sync_ioctl, +}; + +static struct miscdevice pvr_sync_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = PVRSYNC_MODNAME, + .fops = &pvr_sync_fops, +}; + +static +void pvr_sync_free_checkpoint_list_mem(void *mem_ptr) +{ + kfree(mem_ptr); +} + +static +void pvr_sync_update_all_timelines(void *command_complete_handle) +{ + struct pvr_sync_timeline *timeline, *n; + u32 num_signalled = 0; + unsigned long flags; + + spin_lock_irqsave(&timeline_list_lock, flags); + + list_for_each_entry(timeline, &timeline_list, list) { + /* If a timeline is destroyed via pvr_sync_release_timeline() + * in parallel with a call to pvr_sync_update_all_timelines(), + * the timeline_list_lock will block destruction of the + * 'timeline' pointer. Use kref_get_unless_zero() to detect + * and handle this race. Skip the timeline if it's being + * destroyed, blocked only on the timeline_list_lock. + */ + timeline->valid = + kref_get_unless_zero(&timeline->kref) ? true : false; + } + + list_for_each_entry_safe(timeline, n, &timeline_list, list) { + /* We know timeline is valid at this point because we're + * holding the list lock (so pvr_sync_destroy_timeline() has + * to wait). + */ + void *obj = timeline->obj; + + /* If we're racing with pvr_sync_release_timeline(), ignore */ + if (!timeline->valid) + continue; + + /* If syncs have signaled on the GPU, echo this in pvr_sync. + * + * At this point we know the timeline is valid, but obj might + * have raced and been set to NULL. It's only important that + * we use NULL / non-NULL consistently with the if() and call + * to sync_timeline_signal() -- the timeline->obj can't be + * freed (pvr_sync_release_timeline() will be stuck waiting + * for the timeline_list_lock) but it might have been made + * invalid by the base sync driver, in which case this call + * will bounce harmlessly. + */ + if (obj) { + sync_timeline_signal(obj); + num_signalled++; + } + + /* We're already holding the timeline_list_lock */ + kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked); + } + + spin_unlock_irqrestore(&timeline_list_lock, flags); +} + +enum PVRSRV_ERROR pvr_sync_init(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + enum PVRSRV_ERROR error; + int err; + + DPF("%s", __func__); + + atomic_set(&pvr_sync_data.sync_id, 0); + + error = PVRSRVAcquireGlobalEventObjectKM( + &pvr_sync_data.event_object_handle); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to acquire global event object (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_out; + } + + + error = SyncPrimContextCreate(priv->dev_node, + &pvr_sync_data.sync_prim_context); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to create sync prim context (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_release_event_object; + } + + + /* Initialise struct and register with sync_checkpoint.c */ + pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence; + pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence; + pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data; + pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence; + pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_update_all_timelines; + pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = pvr_sync_free_checkpoint_list_mem; + pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = pvr_sync_dump_info_on_stalled_ufos; +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + pvr_sync_data.sync_checkpoint_ops.pfnCheckpointHasSignalled = NULL; + pvr_sync_data.sync_checkpoint_ops.pfnCheckState = NULL; + pvr_sync_data.sync_checkpoint_ops.pfnSignalWaiters = NULL; +#endif + strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync2", SYNC_CHECKPOINT_IMPL_MAX_STRLEN); +#if defined(PDUMP) + pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints = pvr_sync_fence_get_checkpoints; +#endif + + SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops); + + pvr_sync_data.defer_free_wq = + create_freezable_workqueue("pvr_sync_defer_free_workqueue"); + if (!pvr_sync_data.defer_free_wq) { + pr_err("pvr_sync2: %s: Failed to create pvr_sync defer_free workqueue\n", + __func__); + goto err_free_sync_context; + } + + INIT_WORK(&pvr_sync_data.defer_free_work, + pvr_sync_defer_free_work_queue_function); + + /* Use the shared workqueue */ + pvr_sync_data.check_status_wq = priv->fence_status_wq; + + INIT_WORK(&pvr_sync_data.check_status_work, + pvr_sync_check_status_work_queue_function); + error = PVRSRVRegisterCmdCompleteNotify( + &pvr_sync_data.command_complete_handle, + &pvr_sync_update_all_timelines, + &priv->dev_node); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to register MISR notification (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_destroy_defer_free_wq; + } + + error = PVRSRVRegisterDbgRequestNotify( + &pvr_sync_data.debug_notify_handle, + priv->dev_node, + pvr_sync_debug_request, + DEBUG_REQUEST_ANDROIDSYNC, + NULL); + if (error != PVRSRV_OK) { + pr_err("pvr_sync2: %s: Failed to register debug notifier (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_unregister_cmd_complete; + } + + err = misc_register(&pvr_sync_device); + if (err) { + pr_err("pvr_sync2: %s: Failed to register pvr_sync device (%d)\n", + __func__, err); + error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + goto err_unregister_dbg; + } + + error = PVRSRV_OK; + return error; + +err_unregister_dbg: + PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle); +err_unregister_cmd_complete: + PVRSRVUnregisterCmdCompleteNotify( + pvr_sync_data.command_complete_handle); +err_destroy_defer_free_wq: + destroy_workqueue(pvr_sync_data.defer_free_wq); +err_free_sync_context: + SyncPrimContextDestroy(pvr_sync_data.sync_prim_context); +err_release_event_object: + PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle); +err_out: + + return error; +} + +void pvr_sync_deinit(void) +{ + DPF("%s", __func__); + + misc_deregister(&pvr_sync_device); + + PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle); + + PVRSRVUnregisterCmdCompleteNotify( + pvr_sync_data.command_complete_handle); + + /* This will drain the workqueue, so we guarantee that all deferred + * syncs are free'd before returning. + */ + destroy_workqueue(pvr_sync_data.defer_free_wq); + + sync_pool_clear(); + + SyncPrimContextDestroy(pvr_sync_data.sync_prim_context); + + + PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle); +} + +enum PVRSRV_ERROR pvr_sync_fence_wait(void *fence, u32 timeout_in_ms) +{ + int err; + + DPF("fence<%p>, to=%d", fence, timeout_in_ms); + + err = sync_fence_wait(fence, timeout_in_ms); + /* -ETIME means active. In this case we will retry later again. If the + * return value is an error or zero we will close this fence and + * proceed. This makes sure that we are not getting stuck here when a + * fence changes into an error state for whatever reason. */ + if (err == -ETIME) { + DPF("timeout", __func__); +#ifdef DEBUG_OUTPUT + _dump_fence(fence, NULL, NULL); +#endif + return PVRSRV_ERROR_TIMEOUT; + } else if (err != 0) { + pr_err("%s: failed dependencies\n", __func__); + return PVRSRV_ERROR_FAILED_DEPENDENCIES; + } + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_fence_release(void *fence) +{ + sync_fence_put(fence); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_fence_get(int fence_fd, void **pfence) +{ + struct file *file; + + file = fget(fence_fd); + if (file == NULL || file->private_data == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + *pfence = file->private_data; + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_fence_create(int timeline_fd, + const char *fence_name, + int *fence_fd_out, + u64 *sync_pt_idx) +{ + enum PVRSRV_ERROR srv_err; + struct file *file; + struct pvr_sw_sync_timeline *pvr_sw_timeline; + struct sync_fence *fence = NULL; + struct sync_pt *sync_point; + int fd; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + pr_err("%s: invalid fd\n", __func__); + + return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + } + + file = fget(timeline_fd); + if (!file || !file->private_data) { + /* unrecognised timeline */ + pr_err("%s: unrecognised timeline\n", __func__); + + srv_err = PVRSRV_ERROR_INVALID_PARAMS; + if (file) + fput(file); + goto err_put_fd; + } + + pvr_sw_timeline = (struct pvr_sw_sync_timeline *)file->private_data; + + DPF("pvr_sw_timeline<%p>", pvr_sw_timeline); + DPF("psSWTimeline<%p>", pvr_sw_timeline->sw_sync_timeline); + + sync_point = sw_sync_pt_create(pvr_sw_timeline->sw_sync_timeline, + pvr_sw_timeline->next_value); + fput(file); + if (!sync_point) { + srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fd; + } + + fence = sync_fence_create(fence_name, sync_point); + if (!fence) { + srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_free_pt; + } + + sync_fence_install(fence, fd); + + if (sync_pt_idx) { + *sync_pt_idx = pvr_sw_timeline->next_value; + } + pvr_sw_timeline->next_value++; + + *fence_fd_out = fd; + + DPF("returned fence fd %d <%p> '%s'", *fence_fd_out, fence, fence_name); + + return PVRSRV_OK; + +err_free_pt: + sync_pt_free(sync_point); +err_put_fd: + put_unused_fd(fd); + return srv_err; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx) +{ + struct sw_sync_timeline *sw_timeline; + struct pvr_sw_sync_timeline *pvr_sw_timeline; + + if (timeline == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + pvr_sw_timeline = (struct pvr_sw_sync_timeline *)timeline; + sw_timeline = (struct sw_sync_timeline *)pvr_sw_timeline->sw_sync_timeline; + + if (pvr_sw_timeline->current_value == (pvr_sw_timeline->next_value - 1)) { + pr_err("%s: attempt to advance SW timeline beyond last defined point\n", + __func__); + return PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT; + } + + sw_sync_timeline_inc(sw_timeline, 1); + pvr_sw_timeline->current_value++; + + if (sync_pt_idx) { + *sync_pt_idx = pvr_sw_timeline->current_value; + } + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_release(void *timeline) +{ + struct pvr_sw_sync_timeline *pvr_sw_timeline; + + if (timeline == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + pvr_sw_timeline = (struct pvr_sw_sync_timeline *)timeline; + kref_put(&pvr_sw_timeline->kref, pvr_sw_sync_destroy_timeline); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_get(int timeline_fd, + void **timeline_out) +{ + enum PVRSRV_ERROR srv_err; + struct file *file; + struct pvr_sw_sync_timeline *pvr_sw_timeline; + int ret; + + file = fget(timeline_fd); + if (file == NULL || file->private_data == NULL) { + pr_err("%s: invalid params\n", __func__); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *timeline_out = file->private_data; + pvr_sw_timeline = (struct pvr_sw_sync_timeline *)file->private_data; + + /* Take ref on pvr_sw_timeline */ + ret = kref_get_unless_zero(&pvr_sw_timeline->kref); + if (ret) + srv_err = PVRSRV_OK; + else + srv_err = PVRSRV_ERROR_INVALID_PARAMS; + + /* Release reference taken on file */ + fput(file); + + DPF("pvr_sw_timeline=<%p>, pvr_sw_timeline->c=%llu, n=%llu", + pvr_sw_timeline->sw_sync_timeline, pvr_sw_timeline->current_value, + pvr_sw_timeline->next_value); + DPF("&pvr_sw_timeline->current_value=<%p>", + &pvr_sw_timeline->current_value); + DPF("returned, *timeline_out=<%p>", *timeline_out); + + return srv_err; +} + +enum PVRSRV_ERROR sync_dump_fence(void *sw_fence_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + struct sync_fence *fence = (struct sync_fence *) sw_fence_obj; + + _dump_fence(fence, dump_debug_printf, dump_debug_file); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR sync_sw_dump_timeline(void *sw_timeline_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 0)) + struct pvr_sw_sync_timeline *timeline = + (struct pvr_sw_sync_timeline *) sw_timeline_obj; + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "TL:%s SeqNum: %llu/%llu", + timeline->sw_sync_timeline->obj.name, + timeline->current_value, + timeline->next_value); +#else + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "Timeline Stats not available on this kernel!"); +#endif + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync_file.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync_file.c new file mode 100644 index 000000000000..db7f2e63d2d7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/pvr_sync_file.c @@ -0,0 +1,1133 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File pvr_sync_file.c +@Title Kernel driver for Android's sync mechanism +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_kernel_client.h" +#include "pvr_drv.h" +#include "pvr_sync.h" +#include "pvr_fence.h" +#include "pvr_counting_timeline.h" + +#include "linux_sw_sync.h" + +#include +#include +#include +#include +#include +#include +#include + +/* This header must always be included last */ +#include "kernel_compatibility.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && !defined(CHROMIUMOS_KERNEL) +#define sync_file_user_name(s) ((s)->name) +#else +#define sync_file_user_name(s) ((s)->user_name) +#endif + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ + do { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ + ## __VA_ARGS__); \ + else \ + pr_err(fmt "\n", ## __VA_ARGS__); \ + } while (0) + +#define FILE_NAME "pvr_sync_file" + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; +}; +#define SW_SYNC_IOC_MAGIC 'W' +#define SW_SYNC_IOC_CREATE_FENCE \ + (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data)) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +/* Global data for the sync driver */ +static struct { + void *dev_cookie; + void *dbg_request_handle; + struct workqueue_struct *fence_status_wq; + struct pvr_fence_context *foreign_fence_context; +#if defined(NO_HARDWARE) + spinlock_t pvr_timeline_active_list_lock; + struct list_head pvr_timeline_active_list; +#endif + PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops; +} pvr_sync_data; + +static const struct file_operations pvr_sync_fops; + +/* This is the actual timeline metadata. We might keep this around after the + * base sync driver has destroyed the pvr_sync_timeline_wrapper object. + */ +struct pvr_sync_timeline { + char name[32]; + struct file *file; + bool is_sw; + /* Fence context used for hw fences */ + struct pvr_fence_context *hw_fence_context; + /* Timeline and context for sw fences */ + struct pvr_counting_fence_timeline *sw_fence_timeline; +#if defined(NO_HARDWARE) + /* List of all timelines (used to advance all timelines in nohw builds) */ + struct list_head list; +#endif +}; + +static +void pvr_sync_free_checkpoint_list_mem(void *mem_ptr) +{ + kfree(mem_ptr); +} + +#if defined(NO_HARDWARE) +/* function used to signal pvr fence in nohw builds */ +static +void pvr_sync_nohw_signal_fence(void *fence_data_to_signal) +{ + struct pvr_sync_timeline *this_timeline; + unsigned long flags; + + spin_lock_irqsave(&pvr_sync_data.pvr_timeline_active_list_lock, flags); + list_for_each_entry(this_timeline, &pvr_sync_data.pvr_timeline_active_list, list) { + pvr_fence_context_signal_fences_nohw(this_timeline->hw_fence_context); + } + spin_unlock_irqrestore(&pvr_sync_data.pvr_timeline_active_list_lock, flags); +} +#endif + +static bool is_pvr_timeline(struct file *file) +{ + return file->f_op == &pvr_sync_fops; +} + +static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd) +{ + struct file *file = fget(fd); + + if (!file) + return NULL; + + if (!is_pvr_timeline(file)) { + fput(file); + return NULL; + } + + return file->private_data; +} + +static void pvr_sync_timeline_fput(struct pvr_sync_timeline *timeline) +{ + fput(timeline->file); +} + +/* ioctl and fops handling */ + +static int pvr_sync_open(struct inode *inode, struct file *file) +{ + struct pvr_sync_timeline *timeline; + char task_comm[TASK_COMM_LEN]; + int err = -ENOMEM; + + get_task_comm(task_comm, current); + + timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); + if (!timeline) + goto err_out; + + strlcpy(timeline->name, task_comm, sizeof(timeline->name)); + timeline->file = file; + timeline->is_sw = false; + + file->private_data = timeline; + err = 0; +err_out: + return err; +} + +static int pvr_sync_close(struct inode *inode, struct file *file) +{ + struct pvr_sync_timeline *timeline = file->private_data; + + if (timeline->sw_fence_timeline) { + /* This makes sure any outstanding SW syncs are marked as + * complete at timeline close time. Otherwise it'll leak the + * timeline (as outstanding fences hold a ref) and possibly + * wedge the system if something is waiting on one of those + * fences + */ + pvr_counting_fence_timeline_force_complete( + timeline->sw_fence_timeline); + pvr_counting_fence_timeline_put(timeline->sw_fence_timeline); + } + + if (timeline->hw_fence_context) { +#if defined(NO_HARDWARE) + list_del(&timeline->list); +#endif + pvr_fence_context_destroy(timeline->hw_fence_context); + } + + kfree(timeline); + + return 0; +} + +/* + * This is the function that kick code will call in order to 'finalise' a + * created output fence just prior to returning from the kick function. + * The OS native sync code needs to implement a function meeting this + * specification - the implementation may be a nop if the OS does not need + * to perform any actions at this point. + * + * Input: fence_fd The PVRSRV_FENCE to be 'finalised'. This value + * will have been returned by an earlier call to + * pvr_sync_create_fence(). + * Input: finalise_data The finalise data returned by an earlier call + * to pvr_sync_create_fence(). + */ +static enum PVRSRV_ERROR +pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data) +{ + struct sync_file *sync_file = finalise_data; + struct pvr_fence *pvr_fence; + + if (!sync_file || (fence_fd < 0)) { + pr_err(FILE_NAME ": %s: Invalid input fence\n", __func__); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pvr_fence = to_pvr_fence(sync_file->fence); + + /* pvr fences can be signalled any time after creation */ + dma_fence_enable_sw_signaling(&pvr_fence->base); + + fd_install(fence_fd, sync_file->file); + + return PVRSRV_OK; +} + +/* + * This is the function that kick code will call in order to obtain a new + * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used + * in that fence. The OS native sync code needs to implement a function + * meeting this specification. + * + * Input: fence_name A string to annotate the fence with (for + * debug). + * Input: timeline The timeline on which the new fence is to be + * created. + * Output: new_fence The new PVRSRV_FENCE to be returned by the + * kick call. + * Output: fence_uid Unique ID of the update fence. + * Output: fence_finalise_data Pointer to data needed to finalise the fence. + * Output: new_checkpoint_handle The PSYNC_CHECKPOINT used by the new fence. + */ +static enum PVRSRV_ERROR +pvr_sync_create_fence(const char *fence_name, + PVRSRV_TIMELINE new_fence_timeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *new_fence, u64 *fence_uid, + void **fence_finalise_data, + PSYNC_CHECKPOINT *new_checkpoint_handle, + void **timeline_update_sync, + __u32 *timeline_update_value) +{ + PVRSRV_ERROR err = PVRSRV_OK; + PVRSRV_FENCE new_fence_fd = -1; + struct pvr_sync_timeline *timeline; + struct pvr_fence *pvr_fence; + PSYNC_CHECKPOINT checkpoint; + struct sync_file *sync_file; + + if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle + || !fence_finalise_data) { + pr_err(FILE_NAME ": %s: Invalid input params\n", __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + /* We reserve the new fence FD before taking any operations + * as we do not want to fail (e.g. run out of FDs) + */ + new_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (new_fence_fd < 0) { + pr_err(FILE_NAME ": %s: Failed to get fd\n", __func__); + err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + goto err_out; + } + + timeline = pvr_sync_timeline_fget(new_fence_timeline); + if (!timeline) { + pr_err(FILE_NAME ": %s: Failed to open supplied timeline fd (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_put_fd; + } + + if (timeline->is_sw) { + /* This should never happen! */ + pr_err(FILE_NAME ": %s: Request to create a pvr fence on sw timeline (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_put_timeline; + } + + if (!timeline->hw_fence_context) { +#if defined(NO_HARDWARE) + unsigned long flags; +#endif + /* First time we use this timeline, so create a context. */ + timeline->hw_fence_context = + pvr_fence_context_create(pvr_sync_data.dev_cookie, + pvr_sync_data.fence_status_wq, + timeline->name); + if (!timeline->hw_fence_context) { + pr_err(FILE_NAME ": %s: Failed to create fence context (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_timeline; + } +#if defined(NO_HARDWARE) + /* Add timeline to active list */ + INIT_LIST_HEAD(&timeline->list); + spin_lock_irqsave(&pvr_sync_data.pvr_timeline_active_list_lock, flags); + list_add_tail(&timeline->list, &pvr_sync_data.pvr_timeline_active_list); + spin_unlock_irqrestore(&pvr_sync_data.pvr_timeline_active_list_lock, flags); +#endif + } + + pvr_fence = pvr_fence_create(timeline->hw_fence_context, + psSyncCheckpointContext, + new_fence_timeline, + fence_name); + if (!pvr_fence) { + pr_err(FILE_NAME ": %s: Failed to create new pvr_fence\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_timeline; + } + + checkpoint = pvr_fence_get_checkpoint(pvr_fence); + if (!checkpoint) { + pr_err(FILE_NAME ": %s: Failed to get fence checkpoint\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_destroy_fence; + } + + sync_file = sync_file_create(&pvr_fence->base); + if (!sync_file) { + pr_err(FILE_NAME ": %s: Failed to create sync_file\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_destroy_fence; + } + strlcpy(sync_file_user_name(sync_file), + pvr_fence->name, + sizeof(sync_file_user_name(sync_file))); + dma_fence_put(&pvr_fence->base); + + *new_fence = new_fence_fd; + *fence_finalise_data = sync_file; + *new_checkpoint_handle = checkpoint; + *fence_uid = OSGetCurrentClientProcessIDKM(); + *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX); + /* not used but don't want to return dangling pointers */ + *timeline_update_sync = NULL; + *timeline_update_value = 0; + + pvr_sync_timeline_fput(timeline); +err_out: + return err; + +err_destroy_fence: + pvr_fence_destroy(pvr_fence); +err_put_timeline: + pvr_sync_timeline_fput(timeline); +err_put_fd: + put_unused_fd(new_fence_fd); + *fence_uid = PVRSRV_NO_FENCE; + goto err_out; +} + +/* + * This is the function that kick code will call in order to 'rollback' a + * created output fence should an error occur when submitting the kick. + * The OS native sync code needs to implement a function meeting this + * specification. + * + * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence + * should be destroyed and any actions taken due to + * its creation that need to be undone should be + * reverted. + * Input: finalise_data The finalise data for the fence to be 'rolled back'. + */ +static enum PVRSRV_ERROR +pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, + void *fence_data_to_rollback) +{ + struct sync_file *sync_file = fence_data_to_rollback; + struct pvr_fence *pvr_fence; + + if (!sync_file || fence_to_rollback < 0) { + pr_err(FILE_NAME ": %s: Invalid fence (%d)\n", __func__, + fence_to_rollback); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pvr_fence = to_pvr_fence(sync_file->fence); + if (!pvr_fence) { + pr_err(FILE_NAME + ": %s: Non-PVR fence (%p)\n", + __func__, sync_file->fence); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + fput(sync_file->file); + + put_unused_fd(fence_to_rollback); + + return PVRSRV_OK; +} + +/* + * This is the function that kick code will call in order to obtain a list of + * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function. + * The OS native sync code will allocate the memory to hold the returned list + * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has + * finished referencing it. + * + * Input: fence The input (check) fence + * Output: nr_checkpoints The number of PVRSRV_SYNC_CHECKPOINT ptrs + * returned in the checkpoint_handles + * parameter. + * Output: fence_uid Unique ID of the check fence + * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs. + */ +static enum PVRSRV_ERROR +pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints, + PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid) +{ + PSYNC_CHECKPOINT *checkpoints = NULL; + unsigned int i, num_fences, num_used_fences = 0; + struct dma_fence **fences = NULL; + struct dma_fence *fence; + PVRSRV_ERROR err = PVRSRV_OK; + + if (!nr_checkpoints || !checkpoint_handles || !fence_uid) { + pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", + __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + *nr_checkpoints = 0; + *checkpoint_handles = NULL; + *fence_uid = 0; + + if (fence_to_resolve < 0) + goto err_out; + + fence = sync_file_get_fence(fence_to_resolve); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", + __func__, fence_to_resolve); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + + fences = array->fences; + num_fences = array->num_fences; + } else { + fences = &fence; + num_fences = 1; + } + + checkpoints = kmalloc_array(num_fences, sizeof(PSYNC_CHECKPOINT), + GFP_KERNEL); + if (!checkpoints) { + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fence; + } + for (i = 0; i < num_fences; i++) { + /* Only return the checkpoint if the fence is still active. */ + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fences[i]->flags)) { + struct pvr_fence *pvr_fence = + pvr_fence_create_from_fence( + pvr_sync_data.foreign_fence_context, + psSyncCheckpointContext, + fences[i], + fence_to_resolve, + "foreign"); + if (!pvr_fence) { + pr_err(FILE_NAME ": %s: Failed to create fence\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_free_checkpoints; + } + checkpoints[num_used_fences] = + pvr_fence_get_checkpoint(pvr_fence); + SyncCheckpointTakeRef(checkpoints[num_used_fences]); + ++num_used_fences; + dma_fence_put(&pvr_fence->base); + } + } + /* If we don't return any checkpoints, delete the array because + * the caller will not. + */ + if (num_used_fences == 0) { + kfree(checkpoints); + checkpoints = NULL; + } + + *checkpoint_handles = checkpoints; + *nr_checkpoints = num_used_fences; + *fence_uid = OSGetCurrentClientProcessIDKM(); + *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX); + +err_put_fence: + dma_fence_put(fence); +err_out: + return err; + +err_free_checkpoints: + for (i = 0; i < num_used_fences; i++) { + if (checkpoints[i]) + SyncCheckpointDropRef(checkpoints[i]); + } + kfree(checkpoints); + goto err_put_fence; +} + +/* + * This is the function that driver code will call in order to request the + * sync implementation to output debug information relating to any sync + * checkpoints it may have created which appear in the provided array of + * FW addresses of Unified Fence Objects (UFOs). + * + * Input: nr_ufos The number of FW addresses provided in the + * vaddrs parameter. + * Input: vaddrs The array of FW addresses of UFOs. The sync + * implementation should check each of these to + * see if any relate to sync checkpoints it has + * created and where they do output debug information + * pertaining to the native/fallback sync with + * which it is associated. + */ +static u32 +pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs) +{ + return pvr_fence_dump_info_on_stalled_ufos(pvr_sync_data.foreign_fence_context, + nr_ufos, + vaddrs); +} + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +static enum tag_img_bool pvr_sync_checkpoint_ufo_has_signalled(u32 fwaddr, + u32 value) +{ + return pvr_fence_checkpoint_ufo_has_signalled(fwaddr, value); +} + +static void pvr_sync_check_state(void) +{ + pvr_fence_check_state(); +} +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + +#if defined(PDUMP) +static enum PVRSRV_ERROR +pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints, + struct _SYNC_CHECKPOINT ***checkpoint_handles) +{ + struct dma_fence **fences = NULL; + struct dma_fence *fence; + struct pvr_fence *pvr_fence; + struct _SYNC_CHECKPOINT **checkpoints = NULL; + unsigned int i, num_fences, num_used_fences = 0; + enum PVRSRV_ERROR err; + + if (fence_to_pdump < 0) { + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + if (!nr_checkpoints || !checkpoint_handles) { + pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", + __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + fence = sync_file_get_fence(fence_to_pdump); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", + __func__, fence_to_pdump); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + + fences = array->fences; + num_fences = array->num_fences; + } else { + fences = &fence; + num_fences = 1; + } + + checkpoints = kmalloc_array(num_fences, sizeof(*checkpoints), + GFP_KERNEL); + if (!checkpoints) { + pr_err("pvr_sync_file: %s: Failed to alloc memory for returned list of sync checkpoints\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fence; + } + + for (i = 0; i < num_fences; i++) { + pvr_fence = to_pvr_fence(fences[i]); + if (!pvr_fence) { + continue; + } + checkpoints[num_used_fences] = pvr_fence_get_checkpoint(pvr_fence); + ++num_used_fences; + } + + *checkpoint_handles = checkpoints; + *nr_checkpoints = num_used_fences; + err = PVRSRV_OK; + +err_put_fence: + dma_fence_put(fence); +err_out: + return err; +} +#endif + +static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + int err = 0; + struct pvr_sync_rename_ioctl_data data; + + if (!access_ok(user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + data.szName[sizeof(data.szName) - 1] = '\0'; + strlcpy(timeline->name, data.szName, sizeof(timeline->name)); + if (timeline->hw_fence_context) + strlcpy(timeline->hw_fence_context->name, data.szName, + sizeof(timeline->hw_fence_context->name)); + +err: + return err; +} + +static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline, + void **private_data) +{ + /* Already in SW mode? */ + if (timeline->sw_fence_timeline) + return 0; + + /* Create a sw_sync timeline with the old GPU timeline's name */ + timeline->sw_fence_timeline = pvr_counting_fence_timeline_create( + pvr_sync_data.dev_cookie, + timeline->name); + if (!timeline->sw_fence_timeline) + return -ENOMEM; + + timeline->is_sw = true; + + return 0; +} + +static long pvr_sync_ioctl_sw_create_fence(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + struct pvr_sw_sync_create_fence_data data; + struct sync_file *sync_file; + int fd = get_unused_fd_flags(O_CLOEXEC); + struct dma_fence *fence; + int err = -EFAULT; + + if (fd < 0) { + pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n", + __func__, fd); + err = -EMFILE; + goto err_out; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + pr_err(FILE_NAME ": %s: Failed copy from user\n", __func__); + goto err_put_fd; + } + + fence = pvr_counting_fence_create(timeline->sw_fence_timeline, &data.sync_pt_idx); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fd; + } + + sync_file = sync_file_create(fence); + if (!sync_file) { + pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fence; + } + + data.fence = fd; + + if (copy_to_user(user_data, &data, sizeof(data))) { + pr_err(FILE_NAME ": %s: Failed copy to user\n", __func__); + goto err_put_fence; + } + + fd_install(fd, sync_file->file); + err = 0; + + dma_fence_put(fence); +err_out: + return err; + +err_put_fence: + dma_fence_put(fence); +err_put_fd: + put_unused_fd(fd); + goto err_out; +} + +static long pvr_sync_ioctl_sw_inc(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + bool res; + struct pvr_sw_timeline_advance_data data; + + res = pvr_counting_fence_timeline_inc(timeline->sw_fence_timeline, &data.sync_pt_idx); + + /* pvr_counting_fence_timeline_inc won't allow sw timeline to be + * advanced beyond the last defined point + */ + if (!res) { + pr_err("pvr_sync_file: attempt to advance SW timeline beyond last defined point\n"); + return -EPERM; + } + + if (copy_to_user(user_data, &data, sizeof(data))) { + pr_err(FILE_NAME ": %s: Failed copy to user\n", __func__); + return -EFAULT; + } + + return 0; +} + +static long +pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *user_data = (void __user *)arg; + long err = -ENOTTY; + struct pvr_sync_timeline *timeline = file->private_data; + + if (!timeline->is_sw) { + + switch (cmd) { + case PVR_SYNC_IOC_RENAME: + err = pvr_sync_ioctl_rename(timeline, user_data); + break; + case PVR_SYNC_IOC_FORCE_SW_ONLY: + err = pvr_sync_ioctl_force_sw_only(timeline, + &file->private_data); + break; + default: + break; + } + } else { + + switch (cmd) { + case PVR_SW_SYNC_IOC_CREATE_FENCE: + err = pvr_sync_ioctl_sw_create_fence(timeline, + user_data); + break; + case PVR_SW_SYNC_IOC_INC: + err = pvr_sync_ioctl_sw_inc(timeline, user_data); + break; + default: + break; + } + } + + return err; +} + +static const struct file_operations pvr_sync_fops = { + .owner = THIS_MODULE, + .open = pvr_sync_open, + .release = pvr_sync_close, + .unlocked_ioctl = pvr_sync_ioctl, + .compat_ioctl = pvr_sync_ioctl, +}; + +static struct miscdevice pvr_sync_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = PVRSYNC_MODNAME, + .fops = &pvr_sync_fops, +}; + +static void +pvr_sync_debug_request_heading(void *data, u32 verbosity, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, "------[ Native Fence Sync: timelines ]------"); +} + +enum PVRSRV_ERROR pvr_sync_init(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + enum PVRSRV_ERROR error; + int err; + + error = PVRSRVRegisterDbgRequestNotify(&pvr_sync_data.dbg_request_handle, + priv->dev_node, + pvr_sync_debug_request_heading, + DEBUG_REQUEST_LINUXFENCE, + NULL); + if (error != PVRSRV_OK) { + pr_err("%s: failed to register debug request callback (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_out; + } + + pvr_sync_data.dev_cookie = priv->dev_node; + pvr_sync_data.fence_status_wq = priv->fence_status_wq; + + pvr_sync_data.foreign_fence_context = + pvr_fence_context_create(pvr_sync_data.dev_cookie, + pvr_sync_data.fence_status_wq, + "foreign_sync"); + if (!pvr_sync_data.foreign_fence_context) { + pr_err(FILE_NAME ": %s: Failed to create foreign sync context\n", + __func__); + error = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out; + } + +#if defined(NO_HARDWARE) + INIT_LIST_HEAD(&pvr_sync_data.pvr_timeline_active_list); +#endif + + /* Register the resolve fence and create fence functions with + * sync_checkpoint.c + * The pvr_fence context registers its own EventObject callback to + * update sync status + */ + /* Initialise struct and register with sync_checkpoint.c */ + pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence; + pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence; + pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data; + pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence; +#if defined(NO_HARDWARE) + pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_nohw_signal_fence; +#else + pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = NULL; +#endif + pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = pvr_sync_free_checkpoint_list_mem; + pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = pvr_sync_dump_info_on_stalled_ufos; +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + pvr_sync_data.sync_checkpoint_ops.pfnCheckpointHasSignalled = pvr_sync_checkpoint_ufo_has_signalled; + pvr_sync_data.sync_checkpoint_ops.pfnCheckState = pvr_sync_check_state; + pvr_sync_data.sync_checkpoint_ops.pfnSignalWaiters = NULL; +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file", SYNC_CHECKPOINT_IMPL_MAX_STRLEN); +#if defined(PDUMP) + pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints = pvr_sync_fence_get_checkpoints; +#endif + + SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops); + + err = misc_register(&pvr_sync_device); + if (err) { + pr_err(FILE_NAME ": %s: Failed to register pvr_sync device (%d)\n", + __func__, err); + error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + goto err_unregister_checkpoint_funcs; + } + error = PVRSRV_OK; + +err_out: + return error; + +err_unregister_checkpoint_funcs: + SyncCheckpointRegisterFunctions(NULL); + pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); + goto err_out; +} + +void pvr_sync_deinit(void) +{ + SyncCheckpointRegisterFunctions(NULL); + misc_deregister(&pvr_sync_device); + pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); + PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.dbg_request_handle); +} + +enum PVRSRV_ERROR pvr_sync_fence_wait(void *fence, u32 timeout_in_ms) +{ + long timeout = msecs_to_jiffies(timeout_in_ms); + int err; + + err = dma_fence_wait_timeout(fence, true, timeout); + /* + * dma_fence_wait_timeout returns: + * - the remaining timeout on success + * - 0 on timeout + * - -ERESTARTSYS if interrupted + */ + if (err > 0) + return PVRSRV_OK; + else if (err == 0) + return PVRSRV_ERROR_TIMEOUT; + + return PVRSRV_ERROR_FAILED_DEPENDENCIES; +} + +enum PVRSRV_ERROR pvr_sync_fence_release(void *fence) +{ + dma_fence_put(fence); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_fence_get(int fence_fd, void **fence_out) +{ + struct dma_fence *fence; + + fence = sync_file_get_fence(fence_fd); + if (fence == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + *fence_out = fence; + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_fence_create(int timeline_fd, + const char *fence_name, + int *fence_fd_out, + u64 *sync_pt_idx) +{ + enum PVRSRV_ERROR srv_err; + struct pvr_sync_timeline *timeline; + struct dma_fence *fence = NULL; + struct sync_file *sync_file = NULL; + int fd; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + + timeline = pvr_sync_timeline_fget(timeline_fd); + if (!timeline) { + /* unrecognised timeline */ + srv_err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + goto err_put_fd; + } + + fence = pvr_counting_fence_create(timeline->sw_fence_timeline, sync_pt_idx); + pvr_sync_timeline_fput(timeline); + if (!fence) { + srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fd; + } + + sync_file = sync_file_create(fence); + if (!sync_file) { + srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fence; + } + + fd_install(fd, sync_file->file); + + *fence_fd_out = fd; + + return PVRSRV_OK; + +err_put_fence: + dma_fence_put(fence); +err_put_fd: + put_unused_fd(fd); + return srv_err; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx) +{ + if (timeline == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + pvr_counting_fence_timeline_inc(timeline, sync_pt_idx); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_release(void *timeline) +{ + if (timeline == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + pvr_counting_fence_timeline_put(timeline); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR pvr_sync_sw_timeline_get(int timeline_fd, + void **timeline_out) +{ + struct pvr_counting_fence_timeline *sw_timeline; + struct pvr_sync_timeline *timeline; + + timeline = pvr_sync_timeline_fget(timeline_fd); + if (!timeline) + return PVRSRV_ERROR_INVALID_PARAMS; + + sw_timeline = + pvr_counting_fence_timeline_get(timeline->sw_fence_timeline); + pvr_sync_timeline_fput(timeline); + if (!sw_timeline) + return PVRSRV_ERROR_INVALID_PARAMS; + + *timeline_out = sw_timeline; + + return PVRSRV_OK; +} +static void _dump_sync_point(struct dma_fence *fence, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + const struct dma_fence_ops *fence_ops = fence->ops; + bool signaled = dma_fence_is_signaled(fence); + char time[16] = { '\0' }; + + fence_ops->timeline_value_str(fence, time, sizeof(time)); + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "<%p> Seq#=%llu TS=%s State=%s TLN=%s", + fence, + (u64) fence->seqno, + time, + (signaled) ? "Signalled" : "Active", + fence_ops->get_timeline_name(fence)); +} + +static void _dump_fence(struct dma_fence *fence, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + if (dma_fence_is_array(fence)) { + struct dma_fence_array *fence_array = to_dma_fence_array(fence); + int i; + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "Fence: [%p] Sync Points:\n", + fence_array); + + for (i = 0; i < fence_array->num_fences; i++) + _dump_sync_point(fence_array->fences[i], + dump_debug_printf, + dump_debug_file); + + } else { + _dump_sync_point(fence, dump_debug_printf, dump_debug_file); + } +} + +enum PVRSRV_ERROR +sync_dump_fence(void *sw_fence_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + struct dma_fence *fence = (struct dma_fence *) sw_fence_obj; + + _dump_fence(fence, dump_debug_printf, dump_debug_file); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR +sync_sw_dump_timeline(void *sw_timeline_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + pvr_counting_fence_timeline_dump_timeline(sw_timeline_obj, + dump_debug_printf, + dump_debug_file); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/services_kernel_client.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/services_kernel_client.h new file mode 100644 index 000000000000..79d8155c6ffa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/services_kernel_client.h @@ -0,0 +1,269 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File services_kernel_client.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* This file contains a partial redefinition of the PowerVR Services 5 + * interface for use by components which are checkpatch clean. This + * header is included by the unrefined, non-checkpatch clean headers + * to ensure that prototype/typedef/macro changes break the build. + */ + +#ifndef __SERVICES_KERNEL_CLIENT__ +#define __SERVICES_KERNEL_CLIENT__ + +#include "pvrsrv_error.h" + +#include + +#include "pvrsrv_sync_km.h" +#include "sync_checkpoint_external.h" + +#ifndef __pvrsrv_defined_struct_enum__ + +/* sync_external.h */ + +struct PVRSRV_CLIENT_SYNC_PRIM { + volatile __u32 *pui32LinAddr; +}; + +struct PVRSRV_CLIENT_SYNC_PRIM_OP { + __u32 ui32Flags; + struct pvrsrv_sync_prim *psSync; + __u32 ui32FenceValue; + __u32 ui32UpdateValue; +}; + +#else /* __pvrsrv_defined_struct_enum__ */ + +struct PVRSRV_CLIENT_SYNC_PRIM; +struct PVRSRV_CLIENT_SYNC_PRIM_OP; + +enum tag_img_bool; + +#endif /* __pvrsrv_defined_struct_enum__ */ + +struct _PMR_; +struct _PVRSRV_DEVICE_NODE_; +struct dma_buf; +struct SYNC_PRIM_CONTEXT; + +/* pvr_notifier.h */ + +#ifndef _CMDCOMPNOTIFY_PFN_ +typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle); +#define _CMDCOMPNOTIFY_PFN_ +#endif +enum PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(void **phNotify, + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData); +enum PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(void *hNotify); +void PVRSRVCheckStatus(void *hCmdCompCallerHandle); + +#define DEBUG_REQUEST_DC 0 +#define DEBUG_REQUEST_SYNCTRACKING 1 +#define DEBUG_REQUEST_SYS 2 +#define DEBUG_REQUEST_ANDROIDSYNC 3 +#define DEBUG_REQUEST_LINUXFENCE 4 +#define DEBUG_REQUEST_SYNCCHECKPOINT 5 +#define DEBUG_REQUEST_HTB 6 +#define DEBUG_REQUEST_APPHINT 7 +#define DEBUG_REQUEST_FALLBACKSYNC 8 + +#define DEBUG_REQUEST_VERBOSITY_LOW 0 +#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 +#define DEBUG_REQUEST_VERBOSITY_HIGH 2 +#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH + +#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) + +#ifndef _DBGNOTIFY_PFNS_ +typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, + const char *fmt, ...) __printf(2, 3); +typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle, + __u32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +#define _DBGNOTIFY_PFNS_ +#endif +enum PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(void **phNotify, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + __u32 ui32RequesterID, + void *hDbgRequestHandle); +enum PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(void *hNotify); + +/* physmem_dmabuf.h */ + +struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR); + +/* pvrsrv.h */ + +enum PVRSRV_ERROR PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject); +enum PVRSRV_ERROR PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject); + +/* sync.h */ + +enum PVRSRV_ERROR SyncPrimContextCreate( + struct _PVRSRV_DEVICE_NODE_ *psDevConnection, + struct SYNC_PRIM_CONTEXT **phSyncPrimContext); +void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT *hSyncPrimContext); + +enum PVRSRV_ERROR SyncPrimAlloc(struct SYNC_PRIM_CONTEXT *hSyncPrimContext, + struct PVRSRV_CLIENT_SYNC_PRIM **ppsSync, const char *pszClassName); +enum PVRSRV_ERROR SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM *psSync); +enum PVRSRV_ERROR SyncPrimGetFirmwareAddr( + struct PVRSRV_CLIENT_SYNC_PRIM *psSync, + __u32 *sync_addr); + +/* osfunc.h */ +enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM); +enum PVRSRV_ERROR OSEventObjectOpen(void *hEventObject, void **phOSEventKM); +enum PVRSRV_ERROR OSEventObjectClose(void *hOSEventKM); +__u32 OSGetCurrentClientProcessIDKM(void); +__u32 OSStringUINT32ToStr(char *pszBuf, size_t uSize, __u32 ui32Num); + +/* srvkm.h */ + +enum PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, + int i32UMIdentifier, + struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); +enum PVRSRV_ERROR PVRSRVCommonDeviceDestroy( + struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +const char *PVRSRVGetErrorString(enum PVRSRV_ERROR eError); + +#ifndef _CHECKPOINT_PFNS_ +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid); + +#ifndef _CHECKPOINT_PFNS_ +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)( + const char *fence_name, + PVRSRV_TIMELINE timeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *new_fence, + u64 *fence_uid, + void **fence_finalise_data, + PSYNC_CHECKPOINT *new_checkpoint_handle, + void **timeline_update_sync, + __u32 *timeline_update_value); +#endif + +#ifndef _CHECKPOINT_PFNS_ +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); +#endif + +#ifndef _CHECKPOINT_PFNS_ +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); +#endif + +#ifndef _CHECKPOINT_PFNS_ +typedef __u32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(__u32 num_ufos, __u32 *vaddrs); +#endif + +#ifndef _CHECKPOINT_PFNS_ +typedef enum tag_img_bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)( + __u32 ui32FwAddr, __u32 ui32Value); +typedef enum PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void); +typedef void(*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void); +#if defined(PDUMP) +typedef PVRSRV_ERROR(*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, + IMG_UINT32 *puiNumCheckpoints, + PSYNC_CHECKPOINT **papsCheckpoints); +#endif +#endif + +/* This is the function that kick code will call in a NO_HARDWARE build only after + * sync checkpoints have been manually signalled, to allow the OS native sync + * implementation to update its timelines (as the usual callback notification + * of signalled checkpoints is not supported for NO_HARDWARE). + */ +#ifndef _CHECKPOINT_PFNS_ +typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); +typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); + +#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 + +typedef struct { + PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; + PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; + PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; + PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; + PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; + PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; + PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN pfnCheckpointHasSignalled; + PFN_SYNC_CHECKPOINT_CHECK_STATE_FN pfnCheckState; + PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN pfnSignalWaiters; +#endif + char pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; +#if defined(PDUMP) + PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; +#endif +} PFN_SYNC_CHECKPOINT_STRUCT; + +enum PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); + +#define _CHECKPOINT_PFNS_ +#endif + +/* sync_checkpoint.h */ +enum PVRSRV_ERROR SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext); +enum PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext); +void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); +void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); +enum PVRSRV_ERROR SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, PVRSRV_FENCE fence, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint); +void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +enum PVRSRV_ERROR SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); +enum PVRSRV_ERROR SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); +void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); +void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); +PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); +const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); + +#endif + +#endif /* __SERVICES_KERNEL_CLIENT__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c new file mode 100644 index 000000000000..59769fd695da --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c @@ -0,0 +1,996 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_linux_fence.h" +#include "drm_pdp_drv.h" + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#else +#include +#endif + +#include +#include +#include + +#include "pvr_dma_resv.h" +#include "drm_pdp_gem.h" + +#include "pdp_apollo.h" +#include "pdp_odin.h" +#include "pdp_plato.h" + +#include "plato_drv.h" + +#if defined(PDP_USE_ATOMIC) +#include +#endif + +#include "kernel_compatibility.h" + +enum pdp_crtc_flip_status { + PDP_CRTC_FLIP_STATUS_NONE = 0, + PDP_CRTC_FLIP_STATUS_PENDING, + PDP_CRTC_FLIP_STATUS_DONE, +}; + +struct pdp_flip_data { + struct dma_fence_cb base; + struct drm_crtc *crtc; + struct dma_fence *wait_fence; +}; + +/* returns true for ok, false for fail */ +static bool pdp_clocks_set(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode) +{ + struct pdp_drm_private *dev_priv = crtc->dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + bool res; + + switch (dev_priv->version) { + case PDP_VERSION_ODIN: { + pdp_odin_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, false); + res = pdp_odin_clocks_set(crtc->dev->dev, + pdp_crtc->pdp_reg, pdp_crtc->pll_reg, + 0, /* apollo only */ + pdp_crtc->odn_core_reg, /* odin only */ + adjusted_mode->hdisplay, + adjusted_mode->vdisplay, + dev_priv->subversion); + pdp_odin_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, true); + + break; + } + case PDP_VERSION_APOLLO: { + int clock_in_mhz = adjusted_mode->clock / 1000; + + pdp_apollo_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, false); + res = pdp_apollo_clocks_set(crtc->dev->dev, + pdp_crtc->pdp_reg, pdp_crtc->pll_reg, + clock_in_mhz, /* apollo only */ + NULL, /* odin only */ + adjusted_mode->hdisplay, + adjusted_mode->vdisplay); + pdp_apollo_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, true); + + DRM_DEBUG_DRIVER("pdp clock set to %dMhz\n", clock_in_mhz); + + break; + } + case PDP_VERSION_PLATO: +#if defined(SUPPORT_PLATO_DISPLAY) + plato_enable_pdp_clock(dev_priv->dev->dev->parent); + res = true; +#else + DRM_ERROR("Trying to enable plato PDP clock on non-Plato build\n"); + res = false; +#endif + break; + default: + BUG(); + } + + return res; +} + +void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable) +{ + struct pdp_drm_private *dev_priv = crtc->dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + + switch (dev_priv->version) { + case PDP_VERSION_ODIN: + pdp_odin_set_plane_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + 0, enable); + break; + case PDP_VERSION_APOLLO: + pdp_apollo_set_plane_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + 0, enable); + break; + case PDP_VERSION_PLATO: + pdp_plato_set_plane_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + 0, enable); + break; + default: + BUG(); + } +} + +static void pdp_crtc_set_syncgen_enabled(struct drm_crtc *crtc, bool enable) +{ + struct pdp_drm_private *dev_priv = crtc->dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + + switch (dev_priv->version) { + case PDP_VERSION_ODIN: + pdp_odin_set_syncgen_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + enable); + break; + case PDP_VERSION_APOLLO: + pdp_apollo_set_syncgen_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + enable); + break; + case PDP_VERSION_PLATO: + pdp_plato_set_syncgen_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + enable); + break; + default: + BUG(); + } +} + +static void pdp_crtc_set_enabled(struct drm_crtc *crtc, bool enable) +{ + struct pdp_drm_private *dev_priv = crtc->dev->dev_private; + + if (enable) { + pdp_crtc_set_syncgen_enabled(crtc, enable); + pdp_crtc_set_plane_enabled(crtc, dev_priv->display_enabled); + drm_crtc_vblank_on(crtc); + } else { + drm_crtc_vblank_off(crtc); + pdp_crtc_set_plane_enabled(crtc, enable); + pdp_crtc_set_syncgen_enabled(crtc, enable); + } +} + +static void pdp_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode) +{ + /* + * ht = horizontal total + * hbps = horizontal back porch start + * has = horizontal active start + * hlbs = horizontal left border start + * hfps = horizontal front porch start + * hrbs = horizontal right border start + * + * vt = vertical total + * vbps = vertical back porch start + * vas = vertical active start + * vtbs = vertical top border start + * vfps = vertical front porch start + * vbbs = vertical bottom border start + */ + struct pdp_drm_private *dev_priv = crtc->dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + uint32_t ht = adjusted_mode->htotal; + uint32_t hbps = adjusted_mode->hsync_end - adjusted_mode->hsync_start; + uint32_t has = (adjusted_mode->htotal - adjusted_mode->hsync_start); + uint32_t hlbs = has; + uint32_t hfps = (hlbs + adjusted_mode->hdisplay); + uint32_t hrbs = hfps; + uint32_t vt = adjusted_mode->vtotal; + uint32_t vbps = adjusted_mode->vsync_end - adjusted_mode->vsync_start; + uint32_t vas = (adjusted_mode->vtotal - adjusted_mode->vsync_start); + uint32_t vtbs = vas; + uint32_t vfps = (vtbs + adjusted_mode->vdisplay); + uint32_t vbbs = vfps; + bool ok; + + ok = pdp_clocks_set(crtc, adjusted_mode); + + if (!ok) { + dev_info(crtc->dev->dev, "%s failed\n", __func__); + return; + } + + switch (dev_priv->version) { + case PDP_VERSION_ODIN: + pdp_odin_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, false); + pdp_odin_reset_planes(crtc->dev->dev, + pdp_crtc->pdp_reg); + pdp_odin_mode_set(crtc->dev->dev, + pdp_crtc->pdp_reg, + adjusted_mode->hdisplay, adjusted_mode->vdisplay, + hbps, ht, has, + hlbs, hfps, hrbs, + vbps, vt, vas, + vtbs, vfps, vbbs, + adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, + adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC); + pdp_odin_set_powerdwn_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, false); + pdp_odin_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, true); + break; + case PDP_VERSION_APOLLO: + pdp_apollo_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, false); + pdp_apollo_reset_planes(crtc->dev->dev, + pdp_crtc->pdp_reg); + pdp_apollo_mode_set(crtc->dev->dev, + pdp_crtc->pdp_reg, + adjusted_mode->hdisplay, adjusted_mode->vdisplay, + hbps, ht, has, + hlbs, hfps, hrbs, + vbps, vt, vas, + vtbs, vfps, vbbs, + adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, + adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC); + pdp_apollo_set_powerdwn_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, false); + pdp_apollo_set_updates_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, true); + break; + case PDP_VERSION_PLATO: + pdp_plato_mode_set(crtc->dev->dev, + pdp_crtc->pdp_reg, + adjusted_mode->hdisplay, + adjusted_mode->vdisplay, + hbps, ht, has, + hlbs, hfps, hrbs, + vbps, vt, vas, + vtbs, vfps, vbbs, + adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, + adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC); + break; + default: + BUG(); + } +} + + +static bool pdp_crtc_helper_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct pdp_drm_private *dev_priv = crtc->dev->dev_private; + + if (dev_priv->version == PDP_VERSION_ODIN + && mode->hdisplay == 1920 + && mode->vdisplay == 1080) { + + /* 1080p 60Hz */ + const int h_total = 2200; + const int h_active_start = 192; + const int h_back_porch_start = 44; + const int v_total = 1125; + const int v_active_start = 41; + const int v_back_porch_start = 5; + + adjusted_mode->htotal = h_total; + adjusted_mode->hsync_start = adjusted_mode->htotal - + h_active_start; + adjusted_mode->hsync_end = adjusted_mode->hsync_start + + h_back_porch_start; + adjusted_mode->vtotal = v_total; + adjusted_mode->vsync_start = adjusted_mode->vtotal - + v_active_start; + adjusted_mode->vsync_end = adjusted_mode->vsync_start + + v_back_porch_start; + } + return true; +} + +static void pdp_crtc_flip_complete(struct drm_crtc *crtc); + +#if defined(PDP_USE_ATOMIC) +static void pdp_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) +{ + pdp_crtc_mode_set(crtc, &crtc->state->adjusted_mode); +} + +static void pdp_crtc_helper_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_crtc_state *new_crtc_state = crtc->state; + + if (!new_crtc_state->active || !old_crtc_state->active) + return; + + if (crtc->state->event) { + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + unsigned long flags; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + pdp_crtc->flip_async = new_crtc_state->async_flip; +#else + pdp_crtc->flip_async = !!(new_crtc_state->pageflip_flags + & DRM_MODE_PAGE_FLIP_ASYNC); +#endif + if (pdp_crtc->flip_async) + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + pdp_crtc->flip_event = crtc->state->event; + crtc->state->event = NULL; + + atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + if (pdp_crtc->flip_async) + pdp_crtc_flip_complete(crtc); + } +} + +static void pdp_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + pdp_crtc_set_enabled(crtc, true); + + if (crtc->state->event) { + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + unsigned long flags; + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + pdp_crtc->flip_event = crtc->state->event; + crtc->state->event = NULL; + + atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static void pdp_crtc_helper_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + pdp_crtc_set_enabled(crtc, false); + + if (crtc->state->event) { + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} +#else +static void pdp_crtc_helper_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static void pdp_crtc_helper_prepare(struct drm_crtc *crtc) +{ + pdp_crtc_set_enabled(crtc, false); +} + +static void pdp_crtc_helper_commit(struct drm_crtc *crtc) +{ + pdp_crtc_set_enabled(crtc, true); +} + +static int pdp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, + enum mode_set_atomic atomic) +{ + if (x < 0 || y < 0) + return -EINVAL; + + pdp_plane_set_surface(crtc, crtc->primary, fb, + (uint32_t) x, (uint32_t) y); + + return 0; +} + +static int pdp_crtc_helper_mode_set_base(struct drm_crtc *crtc, + int x, int y, + struct drm_framebuffer *old_fb) +{ + if (!crtc->primary->fb) { + DRM_ERROR("no framebuffer\n"); + return 0; + } + + return pdp_crtc_helper_mode_set_base_atomic(crtc, + crtc->primary->fb, + x, y, + 0); +} + +static int pdp_crtc_helper_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + pdp_crtc_mode_set(crtc, adjusted_mode); + + return pdp_crtc_helper_mode_set_base(crtc, x, y, old_fb); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) +static void pdp_crtc_helper_load_lut(struct drm_crtc *crtc) +{ +} +#endif + +static void pdp_crtc_helper_disable(struct drm_crtc *crtc) +{ + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + enum pdp_crtc_flip_status status; + + pdp_crtc_set_enabled(crtc, false); + + status = atomic_read(&pdp_crtc->flip_status); + if (status != PDP_CRTC_FLIP_STATUS_NONE) { + long lerr; + + lerr = wait_event_timeout( + pdp_crtc->flip_pending_wait_queue, + atomic_read(&pdp_crtc->flip_status) + != PDP_CRTC_FLIP_STATUS_PENDING, + 30 * HZ); + if (!lerr) + DRM_ERROR("Failed to wait for pending flip\n"); + else if (!pdp_crtc->flip_async) + pdp_crtc_flip_complete(crtc); + } +} +#endif /* defined(PDP_USE_ATOMIC) */ + +static void pdp_crtc_destroy(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct pdp_drm_private *dev_priv = dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + + DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id); + + drm_crtc_cleanup(crtc); + + iounmap(pdp_crtc->pll_reg); + + iounmap(pdp_crtc->pdp_reg); + release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size); + + kfree(pdp_crtc); + dev_priv->crtc = NULL; +} + +static void pdp_crtc_flip_complete(struct drm_crtc *crtc) +{ + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + /* The flipping process has been completed so reset the flip state */ + atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); + pdp_crtc->flip_async = false; + +#if !defined(PDP_USE_ATOMIC) + if (pdp_crtc->flip_data) { + dma_fence_put(pdp_crtc->flip_data->wait_fence); + kfree(pdp_crtc->flip_data); + pdp_crtc->flip_data = NULL; + } +#endif + + if (pdp_crtc->flip_event) { + drm_crtc_send_vblank_event(crtc, pdp_crtc->flip_event); + pdp_crtc->flip_event = NULL; + } + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} + +#if !defined(PDP_USE_ATOMIC) +static void pdp_crtc_flip(struct drm_crtc *crtc) +{ + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + struct drm_framebuffer *old_fb; + + WARN_ON(atomic_read(&to_pdp_crtc(crtc)->flip_status) + != PDP_CRTC_FLIP_STATUS_PENDING); + + old_fb = pdp_crtc->old_fb; + pdp_crtc->old_fb = NULL; + + /* + * The graphics stream registers latch on vsync so we can go ahead and + * do the flip now. + */ + (void) pdp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y, old_fb); + + atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); + wake_up(&pdp_crtc->flip_pending_wait_queue); + + if (pdp_crtc->flip_async) + pdp_crtc_flip_complete(crtc); +} + +static void pdp_crtc_flip_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct pdp_flip_data *flip_data = + container_of(cb, struct pdp_flip_data, base); + + pdp_crtc_flip(flip_data->crtc); +} + +static void pdp_crtc_flip_schedule_cb(struct dma_fence *fence, + struct dma_fence_cb *cb) +{ + struct pdp_flip_data *flip_data = + container_of(cb, struct pdp_flip_data, base); + int err = 0; + + if (flip_data->wait_fence) + err = dma_fence_add_callback(flip_data->wait_fence, + &flip_data->base, + pdp_crtc_flip_cb); + + if (!flip_data->wait_fence || err) { + if (err && err != -ENOENT) + DRM_ERROR("flip failed to wait on old buffer\n"); + pdp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base); + } +} + +static int pdp_crtc_flip_schedule(struct drm_crtc *crtc, + struct drm_gem_object *obj, + struct drm_gem_object *old_obj) +{ + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + struct dma_resv *resv = pdp_gem_get_resv(obj); + struct dma_resv *old_resv = pdp_gem_get_resv(old_obj); + struct pdp_flip_data *flip_data; + struct dma_fence *fence; + int err; + + flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL); + if (!flip_data) + return -ENOMEM; + + flip_data->crtc = crtc; + + ww_mutex_lock(&old_resv->lock, NULL); + flip_data->wait_fence = + dma_fence_get(dma_resv_get_excl(old_resv)); + + if (old_resv != resv) { + ww_mutex_unlock(&old_resv->lock); + ww_mutex_lock(&resv->lock, NULL); + } + + fence = dma_fence_get(dma_resv_get_excl(resv)); + ww_mutex_unlock(&resv->lock); + + pdp_crtc->flip_data = flip_data; + atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_PENDING); + + if (fence) { + err = dma_fence_add_callback(fence, &flip_data->base, + pdp_crtc_flip_schedule_cb); + dma_fence_put(fence); + if (err && err != -ENOENT) + goto err_set_flip_status_none; + } + + if (!fence || err == -ENOENT) { + pdp_crtc_flip_schedule_cb(fence, &flip_data->base); + err = 0; + } + + return err; + +err_set_flip_status_none: + atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); + dma_fence_put(flip_data->wait_fence); + kfree(flip_data); + return err; +} + +static int pdp_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + , struct drm_modeset_acquire_ctx *ctx +#endif + ) +{ + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); + struct pdp_framebuffer *pdp_old_fb = + to_pdp_framebuffer(crtc->primary->fb); + enum pdp_crtc_flip_status status; + unsigned long flags; + int err; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + status = atomic_read(&pdp_crtc->flip_status); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + if (status != PDP_CRTC_FLIP_STATUS_NONE) + return -EBUSY; + + if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) { + err = drm_crtc_vblank_get(crtc); + if (err) + return err; + } + + pdp_crtc->old_fb = crtc->primary->fb; + pdp_crtc->flip_event = event; + pdp_crtc->flip_async = !!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC); + + /* Set the crtc primary plane to point to the new framebuffer */ + crtc->primary->fb = fb; + + err = pdp_crtc_flip_schedule(crtc, pdp_fb->obj[0], pdp_old_fb->obj[0]); + if (err) { + crtc->primary->fb = pdp_crtc->old_fb; + pdp_crtc->old_fb = NULL; + pdp_crtc->flip_event = NULL; + pdp_crtc->flip_async = false; + + DRM_ERROR("failed to schedule flip (err=%d)\n", err); + goto err_vblank_put; + } + + return 0; + +err_vblank_put: + if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) + drm_crtc_vblank_put(crtc); + return err; +} +#endif /* !defined(PDP_USE_ATOMIC) */ + +static const struct drm_crtc_helper_funcs pdp_crtc_helper_funcs = { + .mode_fixup = pdp_crtc_helper_mode_fixup, +#if defined(PDP_USE_ATOMIC) + .mode_set_nofb = pdp_crtc_helper_mode_set_nofb, + .atomic_flush = pdp_crtc_helper_atomic_flush, + .atomic_enable = pdp_crtc_helper_atomic_enable, + .atomic_disable = pdp_crtc_helper_atomic_disable, +#else + .dpms = pdp_crtc_helper_dpms, + .prepare = pdp_crtc_helper_prepare, + .commit = pdp_crtc_helper_commit, + .mode_set = pdp_crtc_helper_mode_set, + .mode_set_base = pdp_crtc_helper_mode_set_base, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + .load_lut = pdp_crtc_helper_load_lut, +#endif + .mode_set_base_atomic = pdp_crtc_helper_mode_set_base_atomic, + .disable = pdp_crtc_helper_disable, +#endif +}; + +static const struct drm_crtc_funcs pdp_crtc_funcs = { + .destroy = pdp_crtc_destroy, +#if defined(PDP_USE_ATOMIC) + .reset = drm_atomic_helper_crtc_reset, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +#else + .set_config = drm_crtc_helper_set_config, + .page_flip = pdp_crtc_page_flip, +#endif +}; + + +struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number, + struct drm_plane *primary_plane) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + struct pdp_crtc *pdp_crtc; + const char *crtc_name = NULL; + int err; + + pdp_crtc = kzalloc(sizeof(*pdp_crtc), GFP_KERNEL); + if (!pdp_crtc) { + err = -ENOMEM; + goto err_exit; + } + + init_waitqueue_head(&pdp_crtc->flip_pending_wait_queue); + atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); + pdp_crtc->number = number; + + switch (number) { + case 0: + { + struct resource *regs; + + regs = platform_get_resource_byname( + to_platform_device(dev->dev), + IORESOURCE_MEM, + "pdp-regs"); + if (!regs) { + DRM_ERROR("missing pdp register info\n"); + err = -ENXIO; + goto err_crtc_free; + } + + pdp_crtc->pdp_reg_phys_base = regs->start; + pdp_crtc->pdp_reg_size = resource_size(regs); + + if (dev_priv->version == PDP_VERSION_ODIN || + dev_priv->version == PDP_VERSION_APOLLO) { + regs = platform_get_resource_byname( + to_platform_device(dev->dev), + IORESOURCE_MEM, + "pll-regs"); + if (!regs) { + DRM_ERROR("missing pll register info\n"); + err = -ENXIO; + goto err_crtc_free; + } + + pdp_crtc->pll_reg_phys_base = regs->start; + pdp_crtc->pll_reg_size = resource_size(regs); + + pdp_crtc->pll_reg = ioremap(pdp_crtc->pll_reg_phys_base, + pdp_crtc->pll_reg_size); + if (!pdp_crtc->pll_reg) { + DRM_ERROR("failed to map pll registers\n"); + err = -ENOMEM; + goto err_crtc_free; + } + } else if (dev_priv->version == PDP_VERSION_PLATO) { + regs = platform_get_resource_byname( + to_platform_device(dev->dev), + IORESOURCE_MEM, + PLATO_PDP_RESOURCE_BIF_REGS); + if (!regs) { + DRM_ERROR("missing pdp-bif register info\n"); + err = -ENXIO; + goto err_crtc_free; + } + + pdp_crtc->pdp_bif_reg_phys_base = regs->start; + pdp_crtc->pdp_bif_reg_size = resource_size(regs); + + if (!request_mem_region(pdp_crtc->pdp_bif_reg_phys_base, + pdp_crtc->pdp_bif_reg_size, + crtc_name)) { + DRM_ERROR("failed to reserve pdp-bif registers\n"); + err = -EBUSY; + goto err_crtc_free; + } + + pdp_crtc->pdp_bif_reg = + ioremap(pdp_crtc->pdp_bif_reg_phys_base, + pdp_crtc->pdp_bif_reg_size); + if (!pdp_crtc->pdp_bif_reg) { + DRM_ERROR("failed to map pdp-bif registers\n"); + err = -ENOMEM; + goto err_iounmap_regs; + } + } + + if (dev_priv->version == PDP_VERSION_ODIN) { + regs = platform_get_resource_byname( + to_platform_device(dev->dev), + IORESOURCE_MEM, + "odn-core"); + if (!regs) { + DRM_ERROR("missing odn-core info\n"); + err = -ENXIO; + goto err_crtc_free; + } + + pdp_crtc->odn_core_phys_base = regs->start; + pdp_crtc->odn_core_size = resource_size(regs); + + pdp_crtc->odn_core_reg + = ioremap(pdp_crtc->odn_core_phys_base, + pdp_crtc->odn_core_size); + if (!pdp_crtc->odn_core_reg) { + DRM_ERROR("failed to map pdp reset register\n"); + err = -ENOMEM; + goto err_iounmap_regs; + } + } + + crtc_name = "crtc-0"; + break; + } + default: + DRM_ERROR("invalid crtc number %u\n", number); + err = -EINVAL; + goto err_crtc_free; + } + + if (!request_mem_region(pdp_crtc->pdp_reg_phys_base, + pdp_crtc->pdp_reg_size, + crtc_name)) { + DRM_ERROR("failed to reserve pdp registers\n"); + err = -EBUSY; + goto err_crtc_free; + } + + pdp_crtc->pdp_reg = ioremap(pdp_crtc->pdp_reg_phys_base, + pdp_crtc->pdp_reg_size); + if (!pdp_crtc->pdp_reg) { + DRM_ERROR("failed to map pdp registers\n"); + err = -ENOMEM; + goto err_release_mem_region; + } + + err = drm_crtc_init_with_planes(dev, &pdp_crtc->base, primary_plane, + NULL, &pdp_crtc_funcs, NULL); + if (err) { + DRM_ERROR("CRTC init with planes failed"); + goto err_iounmap_regs; + } + + drm_crtc_helper_add(&pdp_crtc->base, &pdp_crtc_helper_funcs); + + DRM_DEBUG_DRIVER("[CRTC:%d]\n", pdp_crtc->base.base.id); + + return &pdp_crtc->base; + +err_iounmap_regs: + iounmap(pdp_crtc->pdp_reg); + if (pdp_crtc->odn_core_reg) + iounmap(pdp_crtc->odn_core_reg); + if (pdp_crtc->pdp_bif_reg) + iounmap(pdp_crtc->pdp_bif_reg); +err_release_mem_region: + release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size); +err_crtc_free: + kfree(pdp_crtc); +err_exit: + return ERR_PTR(err); +} + +void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable) +{ + struct pdp_drm_private *dev_priv = crtc->dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + + switch (dev_priv->version) { + case PDP_VERSION_ODIN: + pdp_odin_set_vblank_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + enable); + break; + case PDP_VERSION_APOLLO: + pdp_apollo_set_vblank_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + enable); + break; + case PDP_VERSION_PLATO: + pdp_plato_set_vblank_enabled(crtc->dev->dev, + pdp_crtc->pdp_reg, + enable); + break; + default: + BUG(); + } +} + +void pdp_crtc_irq_handler(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct pdp_drm_private *dev_priv = dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + bool handled; + + switch (dev_priv->version) { + case PDP_VERSION_ODIN: + handled = pdp_odin_check_and_clear_vblank(dev->dev, + pdp_crtc->pdp_reg); + break; + case PDP_VERSION_APOLLO: + handled = pdp_apollo_check_and_clear_vblank(dev->dev, + pdp_crtc->pdp_reg); + break; + case PDP_VERSION_PLATO: + handled = pdp_plato_check_and_clear_vblank(dev->dev, + pdp_crtc->pdp_reg); + break; + default: + handled = false; + break; + } + + if (handled) { + enum pdp_crtc_flip_status status; + + drm_handle_vblank(dev, pdp_crtc->number); + + status = atomic_read(&pdp_crtc->flip_status); + if (status == PDP_CRTC_FLIP_STATUS_DONE) { + if (!pdp_crtc->flip_async) { + pdp_crtc_flip_complete(crtc); +#if !defined(PDP_USE_ATOMIC) + drm_crtc_vblank_put(crtc); +#endif + } + } + } +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file) +{ + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + if (pdp_crtc->flip_event && + pdp_crtc->flip_event->base.file_priv == file) { + pdp_crtc->flip_event->base.destroy(&pdp_crtc->flip_event->base); + pdp_crtc->flip_event = NULL; + } + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} +#endif diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c new file mode 100644 index 000000000000..619c88af6438 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c @@ -0,0 +1,179 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#endif + +#include "drm_pdp_drv.h" + +#define PDP_DEBUGFS_DISPLAY_ENABLED "display_enabled" + +static int display_enabled_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +static ssize_t display_enabled_read(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position_ptr) +{ + struct drm_device *dev = file->private_data; + struct pdp_drm_private *dev_priv = dev->dev_private; + loff_t position = *position_ptr; + char buffer[] = "N\n"; + size_t buffer_size = ARRAY_SIZE(buffer); + int err; + + if (position < 0) + return -EINVAL; + else if (position >= buffer_size || count == 0) + return 0; + + if (dev_priv->display_enabled) + buffer[0] = 'Y'; + + if (count > buffer_size - position) + count = buffer_size - position; + + err = copy_to_user(user_buffer, &buffer[position], count); + if (err) + return -EFAULT; + + *position_ptr = position + count; + + return count; +} + +static ssize_t display_enabled_write(struct file *file, + const char __user *user_buffer, + size_t count, + loff_t *position) +{ + struct drm_device *dev = file->private_data; + struct pdp_drm_private *dev_priv = dev->dev_private; + char buffer[3]; + int err; + + count = min(count, ARRAY_SIZE(buffer) - 1); + + err = copy_from_user(buffer, user_buffer, count); + if (err) + return -EFAULT; + buffer[count] = '\0'; + + if (!strtobool(buffer, &dev_priv->display_enabled) && dev_priv->crtc) + pdp_crtc_set_plane_enabled(dev_priv->crtc, dev_priv->display_enabled); + + return count; +} + +static const struct file_operations pdp_display_enabled_fops = { + .owner = THIS_MODULE, + .open = display_enabled_open, + .read = display_enabled_read, + .write = display_enabled_write, + .llseek = default_llseek, +}; + +static int pdp_debugfs_create(struct drm_minor *minor, const char *name, + umode_t mode, const struct file_operations *fops) +{ + struct drm_info_node *node; + + /* + * We can't get access to our driver private data when this function is + * called so we fake up a node so that we can clean up entries later on. + */ + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->dent = debugfs_create_file(name, mode, minor->debugfs_root, + minor->dev, fops); + if (!node->dent) { + kfree(node); + return -ENOMEM; + } + + node->minor = minor; + node->info_ent = (void *) fops; + + mutex_lock(&minor->debugfs_lock); + list_add(&node->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); + + return 0; +} + +int pdp_debugfs_init(struct drm_minor *minor) +{ + int err; + + err = pdp_debugfs_create(minor, PDP_DEBUGFS_DISPLAY_ENABLED, + 0100644, + &pdp_display_enabled_fops); + if (err) { + DRM_INFO("failed to create '%s' debugfs entry\n", + PDP_DEBUGFS_DISPLAY_ENABLED); + } + + return err; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +void pdp_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files((struct drm_info_list *) &pdp_display_enabled_fops, + 1, minor); +} +#endif diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c new file mode 100644 index 000000000000..45e08386c4b0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c @@ -0,0 +1,830 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#include +#include +#include +#else +#include +#endif + +#include "tc_drv.h" +#include "pvrversion.h" + +#include "drm_pdp_drv.h" +#include "drm_pdp_gem.h" +#include "pdp_drm.h" + +#include "odin_defs.h" + +#if defined(SUPPORT_PLATO_DISPLAY) +#include "plato_drv.h" +#include "pdp2_regs.h" +#include "pdp2_mmu_regs.h" +#endif + +#define DRIVER_NAME "pdp" +#define DRIVER_DESC "Imagination Technologies PDP DRM Display Driver" +#define DRIVER_DATE "20150612" + +#if defined(PDP_USE_ATOMIC) +#include + +#define PVR_DRIVER_ATOMIC DRIVER_ATOMIC +#else +#define PVR_DRIVER_ATOMIC 0 +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#define PVR_DRIVER_PRIME 0 +#else +#define PVR_DRIVER_PRIME DRIVER_PRIME +#endif + +/* This header must always be included last */ +#include "kernel_compatibility.h" + +static bool display_enable = true; + +module_param(display_enable, bool, 0444); +MODULE_PARM_DESC(display_enable, "Enable all displays (default: Y)"); + + +static void pdp_irq_handler(void *data) +{ + struct drm_device *dev = data; + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + pdp_crtc_irq_handler(crtc); +} + +static int pdp_early_load(struct drm_device *dev) +{ + struct pdp_drm_private *dev_priv; + int err; + + DRM_DEBUG("loading %s device\n", to_platform_device(dev->dev)->name); + + platform_set_drvdata(to_platform_device(dev->dev), dev); + + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (!dev_priv) + return -ENOMEM; + + dev->dev_private = dev_priv; + dev_priv->dev = dev; + dev_priv->version = (enum pdp_version) + to_platform_device(dev->dev)->id_entry->driver_data; + dev_priv->display_enabled = display_enable; + + if (dev_priv->version == PDP_VERSION_APOLLO || + dev_priv->version == PDP_VERSION_ODIN) { +#if !defined(SUPPORT_PLATO_DISPLAY) + err = tc_enable(dev->dev->parent); + if (err) { + DRM_ERROR("failed to enable parent device (err=%d)\n", err); + goto err_dev_priv_free; + } + + /* + * check whether it's Orion PDP for picking + * the right display mode list later on + */ + if (dev_priv->version == PDP_VERSION_ODIN) + dev_priv->subversion = (enum pdp_odin_subversion) + tc_odin_subvers(dev->dev->parent); +#endif + } + +#if defined(SUPPORT_PLATO_DISPLAY) + else if (dev_priv->version == PDP_VERSION_PLATO) { +// XXX do we we need to do this? Plato driver has already enabled device. + err = plato_enable(dev->dev->parent); + if (err) { + DRM_ERROR("failed to enable parent device (err=%d)\n", err); + goto err_dev_priv_free; + } + } +#endif + + dev_priv->gem_priv = pdp_gem_init(dev); + if (!dev_priv->gem_priv) { + DRM_ERROR("gem initialisation failed\n"); + err = -ENOMEM; + goto err_disable_parent_device; + } + + err = pdp_modeset_early_init(dev_priv); + if (err) { + DRM_ERROR("early modeset initialisation failed (err=%d)\n", + err); + goto err_gem_cleanup; + } + + err = drm_vblank_init(dev_priv->dev, 1); + if (err) { + DRM_ERROR("failed to complete vblank init (err=%d)\n", err); + goto err_modeset_late_cleanup; + } + + if (dev_priv->version == PDP_VERSION_APOLLO || + dev_priv->version == PDP_VERSION_ODIN) { +#if !defined(SUPPORT_PLATO_DISPLAY) + err = tc_set_interrupt_handler(dev->dev->parent, + TC_INTERRUPT_PDP, + pdp_irq_handler, + dev); + if (err) { + DRM_ERROR("failed to set interrupt handler (err=%d)\n", + err); + goto err_vblank_cleanup; + } + + err = tc_enable_interrupt(dev->dev->parent, TC_INTERRUPT_PDP); + if (err) { + DRM_ERROR("failed to enable pdp interrupts (err=%d)\n", + err); + goto err_uninstall_interrupt_handle; + } +#endif + } +#if defined(SUPPORT_PLATO_DISPLAY) + else if (dev_priv->version == PDP_VERSION_PLATO) { + err = plato_set_interrupt_handler(dev->dev->parent, + PLATO_INTERRUPT_PDP, + pdp_irq_handler, + dev); + if (err) { + DRM_ERROR("failed to set interrupt handler (err=%d)\n", + err); + goto err_vblank_cleanup; + } + + err = plato_enable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP); + if (err) { + DRM_ERROR("failed to enable pdp interrupts (err=%d)\n", + err); + goto err_uninstall_interrupt_handle; + } + } +#endif + + dev->irq_enabled = true; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) + dev->vblank_disable_allowed = 1; +#endif + + return 0; + +err_uninstall_interrupt_handle: + if (dev_priv->version == PDP_VERSION_APOLLO || + dev_priv->version == PDP_VERSION_ODIN) { +#if !defined(SUPPORT_PLATO_DISPLAY) + tc_set_interrupt_handler(dev->dev->parent, + TC_INTERRUPT_PDP, + NULL, + NULL); +#endif + } +#if defined(SUPPORT_PLATO_DISPLAY) + else if (dev_priv->version == PDP_VERSION_PLATO) { + plato_set_interrupt_handler(dev->dev->parent, + PLATO_INTERRUPT_PDP, + NULL, + NULL); + } +#endif +err_vblank_cleanup: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + /* Called by drm_dev_fini in Linux 4.11.0 and later */ + drm_vblank_cleanup(dev_priv->dev); +#endif +err_modeset_late_cleanup: + pdp_modeset_late_cleanup(dev_priv); +err_gem_cleanup: + pdp_gem_cleanup(dev_priv->gem_priv); +err_disable_parent_device: + if (dev_priv->version == PDP_VERSION_APOLLO || + dev_priv->version == PDP_VERSION_ODIN) { +#if !defined(SUPPORT_PLATO_DISPLAY) + tc_disable(dev->dev->parent); +#endif + } +#if defined(SUPPORT_PLATO_DISPLAY) + else if (dev_priv->version == PDP_VERSION_PLATO) + plato_disable(dev->dev->parent); +#endif +err_dev_priv_free: + kfree(dev_priv); + return err; +} + +static int pdp_late_load(struct drm_device *dev) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + int err; + + err = pdp_modeset_late_init(dev_priv); + if (err) { + DRM_ERROR("late modeset initialisation failed (err=%d)\n", + err); + return err; + } + + return 0; +} + +static void pdp_early_unload(struct drm_device *dev) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + +#if defined(CONFIG_DRM_FBDEV_EMULATION) && defined(PDP_USE_ATOMIC) + drm_atomic_helper_shutdown(dev); +#endif + pdp_modeset_early_cleanup(dev_priv); +} + +static void pdp_late_unload(struct drm_device *dev) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + + DRM_INFO("unloading %s device.\n", to_platform_device(dev->dev)->name); + if (dev_priv->version == PDP_VERSION_APOLLO || + dev_priv->version == PDP_VERSION_ODIN) { +#if !defined(SUPPORT_PLATO_DISPLAY) + tc_disable_interrupt(dev->dev->parent, TC_INTERRUPT_PDP); + tc_set_interrupt_handler(dev->dev->parent, + TC_INTERRUPT_PDP, + NULL, + NULL); +#endif + } +#if defined(SUPPORT_PLATO_DISPLAY) + else if (dev_priv->version == PDP_VERSION_PLATO) { + plato_disable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP); + plato_set_interrupt_handler(dev->dev->parent, + PLATO_INTERRUPT_PDP, + NULL, + NULL); + } +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + /* Called by drm_dev_fini in Linux 4.11.0 and later */ + drm_vblank_cleanup(dev_priv->dev); +#endif + pdp_modeset_late_cleanup(dev_priv); + pdp_gem_cleanup(dev_priv->gem_priv); + + if (dev_priv->version == PDP_VERSION_APOLLO || + dev_priv->version == PDP_VERSION_ODIN) { +#if !defined(SUPPORT_PLATO_DISPLAY) + tc_disable(dev->dev->parent); +#endif + } +#if defined(SUPPORT_PLATO_DISPLAY) + else if (dev_priv->version == PDP_VERSION_PLATO) + plato_disable(dev->dev->parent); +#endif + + kfree(dev_priv); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +static int pdp_load(struct drm_device *dev, unsigned long flags) +{ + int err; + + err = pdp_early_load(dev); + if (err) + return err; + + err = pdp_late_load(dev); + if (err) { + pdp_late_unload(dev); + return err; + } + + return 0; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +static int pdp_unload(struct drm_device *dev) +#else +static void pdp_unload(struct drm_device *dev) +#endif +{ + pdp_early_unload(dev); + pdp_late_unload(dev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + return 0; +#endif +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +static void pdp_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + pdp_crtc_flip_event_cancel(crtc, file); +} +#endif + +static inline void pdp_teardown_drm_config(struct drm_device *dev) +{ +#if defined(PDP_USE_ATOMIC) + drm_atomic_helper_shutdown(dev); +#else + struct drm_crtc *crtc; + + DRM_INFO("%s: %s device\n", __func__, to_platform_device(dev->dev)->name); + + /* + * When non atomic driver is in use, manually trigger ->set_config + * with an empty mode set associated to this crtc. + */ + drm_modeset_lock_all(dev); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->primary->fb) { + struct drm_mode_set mode_set = { .crtc = crtc }; + int err; + + err = drm_mode_set_config_internal(&mode_set); + if (err) + DRM_ERROR("failed to disable crtc %p (err=%d)\n", + crtc, err); + } + } + drm_modeset_unlock_all(dev); +#endif +} + +static void pdp_lastclose(struct drm_device *dev) +{ +#if defined(CONFIG_DRM_FBDEV_EMULATION) + struct pdp_drm_private *dev_priv = dev->dev_private; + struct pdp_fbdev *fbdev = dev_priv->fbdev; + int err; + + if (fbdev) { + /* + * This is a fbdev driver, therefore never attempt to shutdown + * on a client disconnecting. + */ + err = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->helper); + if (err) + DRM_ERROR("failed to restore mode (err=%d)\n", err); + } +#else + pdp_teardown_drm_config(dev); +#endif +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static int pdp_enable_vblank(struct drm_device *dev, unsigned int crtc) +#else +static int pdp_enable_vblank(struct drm_device *dev, int crtc) +#endif +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + + switch (crtc) { + case 0: + pdp_crtc_set_vblank_enabled(dev_priv->crtc, true); + break; + default: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + DRM_ERROR("invalid crtc %u\n", crtc); +#else + DRM_ERROR("invalid crtc %d\n", crtc); +#endif + return -EINVAL; + } + + DRM_DEBUG("vblank interrupts enabled for crtc %d\n", crtc); + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static void pdp_disable_vblank(struct drm_device *dev, unsigned int crtc) +#else +static void pdp_disable_vblank(struct drm_device *dev, int crtc) +#endif +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + + switch (crtc) { + case 0: + pdp_crtc_set_vblank_enabled(dev_priv->crtc, false); + break; + default: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + DRM_ERROR("invalid crtc %u\n", crtc); +#else + DRM_ERROR("invalid crtc %d\n", crtc); +#endif + return; + } + + DRM_DEBUG("vblank interrupts disabled for crtc %d\n", crtc); +} + +static int pdp_gem_object_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + + return pdp_gem_object_create_ioctl_priv(dev, + dev_priv->gem_priv, + data, + file); +} + +static int pdp_gem_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + + return pdp_gem_dumb_create_priv(file, + dev, + dev_priv->gem_priv, + args); +} + +static void pdp_gem_object_free(struct drm_gem_object *obj) +{ + struct pdp_drm_private *dev_priv = obj->dev->dev_private; + + pdp_gem_object_free_priv(dev_priv->gem_priv, obj); +} + +static const struct vm_operations_struct pdp_gem_vm_ops = { + .fault = pdp_gem_object_vm_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_ioctl_desc pdp_ioctls[] = { + DRM_IOCTL_DEF_DRV(PDP_GEM_CREATE, pdp_gem_object_create_ioctl, + DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(PDP_GEM_MMAP, pdp_gem_object_mmap_ioctl, + DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_PREP, pdp_gem_object_cpu_prep_ioctl, + DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_FINI, pdp_gem_object_cpu_fini_ioctl, + DRM_AUTH | DRM_UNLOCKED), +}; + +static const struct file_operations pdp_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = drm_gem_mmap, + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif +}; + +static struct drm_driver pdp_drm_driver = { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + .load = NULL, + .unload = NULL, +#else + .load = pdp_load, + .unload = pdp_unload, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) + .preclose = pdp_preclose, +#endif + .lastclose = pdp_lastclose, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + .set_busid = drm_platform_set_busid, +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + .get_vblank_counter = NULL, +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + .get_vblank_counter = drm_vblank_no_hw_counter, +#else + .get_vblank_counter = drm_vblank_count, +#endif + .enable_vblank = pdp_enable_vblank, + .disable_vblank = pdp_disable_vblank, + + .debugfs_init = pdp_debugfs_init, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + .debugfs_cleanup = pdp_debugfs_cleanup, +#endif + + .gem_free_object = pdp_gem_object_free, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = pdp_gem_prime_export, + .gem_prime_import = pdp_gem_prime_import, + .gem_prime_import_sg_table = pdp_gem_prime_import_sg_table, + + // Set dumb_create to NULL to avoid xorg owning the display (if xorg is running). + .dumb_create = pdp_gem_dumb_create, + .dumb_map_offset = pdp_gem_dumb_map_offset, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + .dumb_destroy = drm_gem_dumb_destroy, +#endif + + .gem_vm_ops = &pdp_gem_vm_ops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = PVRVERSION_MAJ, + .minor = PVRVERSION_MIN, + .patchlevel = PVRVERSION_BUILD, + + .driver_features = DRIVER_GEM | + DRIVER_MODESET | + PVR_DRIVER_PRIME | + PVR_DRIVER_ATOMIC, + .ioctls = pdp_ioctls, + .num_ioctls = ARRAY_SIZE(pdp_ioctls), + .fops = &pdp_driver_fops, +}; + +#if defined(SUPPORT_PLATO_DISPLAY) + +static int compare_parent_dev(struct device *dev, void *data) +{ + struct device *pdp_dev = data; + + return dev->parent && dev->parent == pdp_dev->parent; +} + +static int pdp_component_bind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *ddev; + int ret; + + dev_info(dev, "Loading platform device\n"); + ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + if (IS_ERR(ddev)) + return PTR_ERR(ddev); +#else + if (!ddev) + return -ENOMEM; +#endif + + // XXX no need to do this as happens in pdp_early_load + platform_set_drvdata(pdev, ddev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + /* Needed by drm_platform_set_busid */ + ddev->platformdev = pdev; +#endif + BUG_ON(pdp_drm_driver.load != NULL); + + ret = pdp_early_load(ddev); + if (ret) + goto err_drm_dev_put; + + DRM_DEBUG_DRIVER("Binding other components\n"); + /* Bind other components, including HDMI encoder/connector */ + ret = component_bind_all(dev, ddev); + if (ret) { + DRM_ERROR("Failed to bind other components (ret=%d)\n", ret); + goto err_drm_dev_late_unload; + } + + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_drm_dev_late_unload; + + ret = pdp_late_load(ddev); + if (ret) + goto err_drm_dev_unregister; + + return 0; + +err_drm_dev_unregister: + drm_dev_unregister(ddev); +err_drm_dev_late_unload: + pdp_late_unload(ddev); +err_drm_dev_put: + drm_dev_put(ddev); + return ret; +} + +static void pdp_component_unbind(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + + dev_info(dev, "Unloading platform device\n"); + BUG_ON(pdp_drm_driver.unload != NULL); + pdp_early_unload(ddev); + drm_dev_unregister(ddev); + pdp_late_unload(ddev); + component_unbind_all(dev, ddev); + drm_dev_put(ddev); +} + +static const struct component_master_ops pdp_component_ops = { + .bind = pdp_component_bind, + .unbind = pdp_component_unbind, +}; + + +static int pdp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct component_match *match = NULL; + + component_match_add(dev, &match, compare_parent_dev, dev); + return component_master_add_with_match(dev, &pdp_component_ops, match); +} + +static int pdp_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &pdp_component_ops); + return 0; +} + +#else // !SUPPORT_PLATO_DISPLAY + +static int pdp_probe(struct platform_device *pdev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + struct drm_device *ddev; + int ret; + + ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + if (IS_ERR(ddev)) + return PTR_ERR(ddev); +#else + if (!ddev) + return -ENOMEM; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + /* Needed by drm_platform_set_busid */ + ddev->platformdev = pdev; +#endif + /* + * The load callback, called from drm_dev_register, is deprecated, + * because of potential race conditions. + */ + BUG_ON(pdp_drm_driver.load != NULL); + + ret = pdp_early_load(ddev); + if (ret) + goto err_drm_dev_put; + + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_drm_dev_late_unload; + + ret = pdp_late_load(ddev); + if (ret) + goto err_drm_dev_unregister; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + pdp_drm_driver.name, + pdp_drm_driver.major, + pdp_drm_driver.minor, + pdp_drm_driver.patchlevel, + pdp_drm_driver.date, + ddev->primary->index); +#endif + return 0; + +err_drm_dev_unregister: + drm_dev_unregister(ddev); +err_drm_dev_late_unload: + pdp_late_unload(ddev); +err_drm_dev_put: + drm_dev_put(ddev); + return ret; +#else + return drm_platform_init(&pdp_drm_driver, pdev); +#endif +} + +static int pdp_remove(struct platform_device *pdev) +{ + struct drm_device *ddev = platform_get_drvdata(pdev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + /* + * The unload callback, called from drm_dev_unregister, is + * deprecated. + */ + BUG_ON(pdp_drm_driver.unload != NULL); + + pdp_early_unload(ddev); + + drm_dev_unregister(ddev); + + pdp_late_unload(ddev); + + drm_dev_put(ddev); +#else + drm_put_dev(ddev); +#endif + return 0; +} + +#endif // SUPPORT_PLATO_DISPLAY + +static void pdp_shutdown(struct platform_device *pdev) +{ +} + +static struct platform_device_id pdp_platform_device_id_table[] = { + { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_APOLLO }, + { .name = ODN_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_ODIN }, +#if defined(SUPPORT_PLATO_DISPLAY) + { .name = PLATO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_PLATO }, +#endif // SUPPORT_PLATO_DISPLAY + { }, +}; + +static struct platform_driver pdp_platform_driver = { + .probe = pdp_probe, + .remove = pdp_remove, + .shutdown = pdp_shutdown, + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + }, + .id_table = pdp_platform_device_id_table, +}; + +module_platform_driver(pdp_platform_driver); + +MODULE_AUTHOR("Imagination Technologies Ltd. "); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_DEVICE_TABLE(platform, pdp_platform_device_id_table); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h new file mode 100644 index 000000000000..8fe6f989ef27 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h @@ -0,0 +1,221 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__DRM_PDP_DRV_H__) +#define __DRM_PDP_DRV_H__ + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#else +#include +#endif + +#include +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) +#include +#endif + +#include "pdp_common.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && \ + !defined(PVR_ANDROID_USE_PDP_LEGACY) +#define PDP_USE_ATOMIC +#endif + +struct pdp_gem_context; +enum pdp_crtc_flip_status; +struct pdp_flip_data; +struct pdp_gem_private; + +#if !defined(SUPPORT_PLATO_DISPLAY) +struct tc_pdp_platform_data; +#else +struct plato_pdp_platform_data; +#endif + +struct pdp_drm_private { + struct drm_device *dev; +#if defined(CONFIG_DRM_FBDEV_EMULATION) + struct pdp_fbdev *fbdev; +#endif + + enum pdp_version version; + + /* differentiate Orion from base Odin PDP */ + enum pdp_odin_subversion subversion; + + /* created by pdp_gem_init */ + struct pdp_gem_private *gem_priv; + + /* initialised by pdp_modeset_early_init */ + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_encoder *encoder; + + bool display_enabled; +}; + +struct pdp_crtc { + struct drm_crtc base; + + uint32_t number; + + resource_size_t pdp_reg_size; + resource_size_t pdp_reg_phys_base; + void __iomem *pdp_reg; + + resource_size_t pdp_bif_reg_size; + resource_size_t pdp_bif_reg_phys_base; + void __iomem *pdp_bif_reg; + + resource_size_t pll_reg_size; + resource_size_t pll_reg_phys_base; + void __iomem *pll_reg; + + resource_size_t odn_core_size; /* needed for odin pdp clk reset */ + resource_size_t odn_core_phys_base; + void __iomem *odn_core_reg; + + wait_queue_head_t flip_pending_wait_queue; + + /* Reuse the drm_device event_lock to protect these */ + atomic_t flip_status; + struct drm_pending_vblank_event *flip_event; + struct drm_framebuffer *old_fb; + struct pdp_flip_data *flip_data; + bool flip_async; +}; + +#define to_pdp_crtc(crtc) container_of(crtc, struct pdp_crtc, base) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) +struct drm_gem_object; + +struct pdp_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *obj[1]; +}; + +#define to_pdp_framebuffer(fb) container_of(fb, struct pdp_framebuffer, base) +#define to_drm_framebuffer(fb) (&(fb)->base) +#else +#define pdp_framebuffer drm_framebuffer +#define to_pdp_framebuffer(fb) (fb) +#define to_drm_framebuffer(fb) (fb) +#endif + +#if defined(CONFIG_DRM_FBDEV_EMULATION) +struct pdp_fbdev { + struct drm_fb_helper helper; + struct pdp_framebuffer fb; + struct pdp_drm_private *priv; + u8 preferred_bpp; +}; +#endif + +static inline u32 pdp_drm_fb_cpp(struct drm_framebuffer *fb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + return fb->format->cpp[0]; +#else + return fb->bits_per_pixel / 8; +#endif +} + +static inline u32 pdp_drm_fb_format(struct drm_framebuffer *fb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + return fb->format->format; +#else + return fb->pixel_format; +#endif +} + +int pdp_debugfs_init(struct drm_minor *minor); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +void pdp_debugfs_cleanup(struct drm_minor *minor); +#endif + +struct drm_plane *pdp_plane_create(struct drm_device *dev, + enum drm_plane_type type); +void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_framebuffer *fb, + const uint32_t src_x, const uint32_t src_y); + +struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number, + struct drm_plane *primary_plane); +void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable); +void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable); +void pdp_crtc_irq_handler(struct drm_crtc *crtc); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file); +#endif + +struct drm_connector *pdp_dvi_connector_create(struct drm_device *dev); + +struct drm_encoder *pdp_tmds_encoder_create(struct drm_device *dev); + +int pdp_modeset_early_init(struct pdp_drm_private *dev_priv); +int pdp_modeset_late_init(struct pdp_drm_private *dev_priv); +void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv); +void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv); + +#if defined(CONFIG_DRM_FBDEV_EMULATION) +struct pdp_fbdev *pdp_fbdev_create(struct pdp_drm_private *dev); +void pdp_fbdev_destroy(struct pdp_fbdev *fbdev); +#endif + +int pdp_modeset_validate_init(struct pdp_drm_private *dev_priv, + struct drm_mode_fb_cmd2 *mode_cmd, + struct pdp_framebuffer *pdp_fb, + struct drm_gem_object *obj); +#endif /* !defined(__DRM_PDP_DRV_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c new file mode 100644 index 000000000000..05c588200f5d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c @@ -0,0 +1,309 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "drm_pdp_drv.h" + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#else +#include +#endif + +#include +#include + +#if defined(PDP_USE_ATOMIC) +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) +#include +#endif + +#include "kernel_compatibility.h" + +struct pdp_mode_data { + int hdisplay; + int vdisplay; + int vrefresh; + bool reduced_blanking; + bool interlaced; + bool margins; +}; + +static const struct pdp_mode_data pdp_extra_modes[] = { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) + { + .hdisplay = 1280, + .vdisplay = 720, + .vrefresh = 60, + .reduced_blanking = false, + .interlaced = false, + .margins = false, + }, + { + .hdisplay = 1920, + .vdisplay = 1080, + .vrefresh = 60, + .reduced_blanking = false, + .interlaced = false, + .margins = false, + }, +#endif +}; + +static char preferred_mode_name[DRM_DISPLAY_MODE_LEN] = "\0"; + +module_param_string(dvi_preferred_mode, + preferred_mode_name, + DRM_DISPLAY_MODE_LEN, + 0444); + +MODULE_PARM_DESC(dvi_preferred_mode, + "Specify the preferred mode (if supported), e.g. 1280x1024."); + + +static int pdp_dvi_add_extra_modes(struct drm_connector *connector) +{ + struct drm_display_mode *mode; + int num_modes; + int i; + + for (i = 0, num_modes = 0; i < ARRAY_SIZE(pdp_extra_modes); i++) { + mode = drm_cvt_mode(connector->dev, + pdp_extra_modes[i].hdisplay, + pdp_extra_modes[i].vdisplay, + pdp_extra_modes[i].vrefresh, + pdp_extra_modes[i].reduced_blanking, + pdp_extra_modes[i].interlaced, + pdp_extra_modes[i].margins); + if (mode) { + drm_mode_probed_add(connector, mode); + num_modes++; + } + } + + return num_modes; +} + +static int pdp_dvi_connector_helper_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + int num_modes; + int len = strlen(preferred_mode_name); + + if (len) + dev_info(dev->dev, "detected dvi_preferred_mode=%s\n", + preferred_mode_name); + else + dev_info(dev->dev, "no dvi_preferred_mode\n"); + + num_modes = drm_add_modes_noedid(connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + + num_modes += pdp_dvi_add_extra_modes(connector); + if (num_modes) { + struct drm_display_mode *pref_mode = NULL; + + if (len) { + struct drm_display_mode *mode; + struct list_head *entry; + + list_for_each(entry, &connector->probed_modes) { + mode = list_entry(entry, + struct drm_display_mode, + head); + if (!strcmp(mode->name, preferred_mode_name)) { + pref_mode = mode; + break; + } + } + } + + if (pref_mode) + pref_mode->type |= DRM_MODE_TYPE_PREFERRED; + else + drm_set_preferred_mode(connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + } + + drm_mode_sort(&connector->probed_modes); + + DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s] found %d modes\n", + connector->base.id, + connector->name, + num_modes); + + return num_modes; +} + +static enum drm_mode_status +pdp_dvi_connector_helper_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + else if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + return MODE_OK; +} + +#if !defined(PDP_USE_ATOMIC) +static struct drm_encoder * +pdp_dvi_connector_helper_best_encoder(struct drm_connector *connector) +{ + /* Pick the first encoder we find */ + if (connector->encoder_ids[0] != 0) { + struct drm_encoder *encoder; + + encoder = drm_encoder_find(connector->dev, + NULL, + connector->encoder_ids[0]); + if (encoder) { + DRM_DEBUG_DRIVER("[ENCODER:%d:%s] best for " + "[CONNECTOR:%d:%s]\n", + encoder->base.id, + encoder->name, + connector->base.id, + connector->name); + return encoder; + } + } + + return NULL; +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +static enum drm_connector_status +pdp_dvi_connector_detect(struct drm_connector *connector, + bool force) +{ + /* + * It appears that there is no way to determine if a monitor + * is connected. This needs to be set to connected otherwise + * DPMS never gets set to ON. + */ + return connector_status_connected; +} +#endif + +static void pdp_dvi_connector_destroy(struct drm_connector *connector) +{ + struct pdp_drm_private *dev_priv = connector->dev->dev_private; + + DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + drm_connector_cleanup(connector); + + kfree(connector); + dev_priv->connector = NULL; +} + +static void pdp_dvi_connector_force(struct drm_connector *connector) +{ +} + +static struct drm_connector_helper_funcs pdp_dvi_connector_helper_funcs = { + .get_modes = pdp_dvi_connector_helper_get_modes, + .mode_valid = pdp_dvi_connector_helper_mode_valid, + /* + * For atomic, don't set atomic_best_encoder or best_encoder. This will + * cause the DRM core to fallback to drm_atomic_helper_best_encoder(). + * This is fine as we only have a single connector and encoder. + */ +#if !defined(PDP_USE_ATOMIC) + .best_encoder = pdp_dvi_connector_helper_best_encoder, +#endif +}; + +static const struct drm_connector_funcs pdp_dvi_connector_funcs = { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + .detect = pdp_dvi_connector_detect, +#endif + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = pdp_dvi_connector_destroy, + .force = pdp_dvi_connector_force, +#if defined(PDP_USE_ATOMIC) + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +#else + .dpms = drm_helper_connector_dpms, +#endif +}; + + +struct drm_connector * +pdp_dvi_connector_create(struct drm_device *dev) +{ + struct drm_connector *connector; + + connector = kzalloc(sizeof(*connector), GFP_KERNEL); + if (!connector) + return ERR_PTR(-ENOMEM); + + drm_connector_init(dev, + connector, + &pdp_dvi_connector_funcs, + DRM_MODE_CONNECTOR_DVID); + drm_connector_helper_add(connector, &pdp_dvi_connector_helper_funcs); + + connector->dpms = DRM_MODE_DPMS_OFF; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + + DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + return connector; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_fb.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_fb.c new file mode 100644 index 000000000000..0bf2522d02d2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_fb.c @@ -0,0 +1,294 @@ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(CONFIG_DRM_FBDEV_EMULATION) +#include +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) +#include +#endif +#include +#include + +#include "drm_pdp_gem.h" +#include "kernel_compatibility.h" + +#define FBDEV_NAME "pdpdrmfb" + +static struct fb_ops pdp_fbdev_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + + +static struct fb_info * +pdp_fbdev_helper_alloc(struct drm_fb_helper *helper) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) + struct device *dev = helper->dev->dev; + struct fb_info *info; + int ret; + + info = framebuffer_alloc(0, dev); + if (!info) + return ERR_PTR(-ENOMEM); + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) + goto err_release; + + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto err_free_cmap; + } + + helper->fbdev = info; + + return info; + +err_free_cmap: + fb_dealloc_cmap(&info->cmap); +err_release: + framebuffer_release(info); + return ERR_PTR(ret); +#else + return drm_fb_helper_alloc_fbi(helper); +#endif +} + +static inline void +pdp_fbdev_helper_fill_info(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes, + struct fb_info *info, + struct drm_mode_fb_cmd2 __maybe_unused *mode_cmd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + drm_fb_helper_fill_fix(info, mode_cmd->pitches[0], helper->fb->depth); + drm_fb_helper_fill_var(info, helper, sizes->fb_width, + sizes->fb_height); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + drm_fb_helper_fill_fix(info, mode_cmd->pitches[0], + helper->fb->format->depth); + drm_fb_helper_fill_var(info, helper, helper->fb->width, + helper->fb->height); +#else + drm_fb_helper_fill_info(info, helper, sizes); +#endif +} + +static int pdp_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct pdp_fbdev *pdp_fbdev = + container_of(helper, struct pdp_fbdev, helper); + struct drm_framebuffer *fb = + to_drm_framebuffer(&pdp_fbdev->fb); + struct pdp_gem_private *gem_priv = pdp_fbdev->priv->gem_priv; + struct drm_device *dev = helper->dev; + struct drm_mode_fb_cmd2 mode_cmd; + struct pdp_gem_object *pdp_obj; + struct drm_gem_object *obj; + struct fb_info *info; + void __iomem *vaddr; + size_t obj_size; + int err; + + if (helper->fb) + return 0; + + mutex_lock(&dev->struct_mutex); + + /* Create a framebuffer */ + info = pdp_fbdev_helper_alloc(helper); + if (!info) { + err = -ENOMEM; + goto err_unlock_dev; + } + + memset(&mode_cmd, 0, sizeof(mode_cmd)); + mode_cmd.pitches[0] = + sizes->surface_width * DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + obj_size = mode_cmd.height * mode_cmd.pitches[0]; + + obj = pdp_gem_object_create(dev, gem_priv, obj_size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_unlock_dev; + } + + pdp_obj = to_pdp_obj(obj); + + vaddr = ioremap(pdp_obj->cpu_addr, obj->size); + if (!vaddr) { + err = PTR_ERR(vaddr); + goto err_gem_destroy; + } + + /* Zero fb memory, fb_memset accounts for iomem address space */ + fb_memset(vaddr, 0, obj_size); + + err = pdp_modeset_validate_init(pdp_fbdev->priv, &mode_cmd, + &pdp_fbdev->fb, obj); + if (err) + goto err_gem_unmap; + + helper->fb = fb; + helper->fbdev = info; + + /* Fill out the Linux framebuffer info */ + strlcpy(info->fix.id, FBDEV_NAME, sizeof(info->fix.id)); + pdp_fbdev_helper_fill_info(helper, sizes, info, &mode_cmd); + info->par = helper; + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) + info->flags |= FBINFO_CAN_FORCE_OUTPUT; +#endif + info->fbops = &pdp_fbdev_ops; + info->fix.smem_start = pdp_obj->cpu_addr; + info->fix.smem_len = obj_size; + info->screen_base = vaddr; + info->screen_size = obj_size; + info->apertures->ranges[0].base = pdp_obj->cpu_addr; + info->apertures->ranges[0].size = obj_size; + + mutex_unlock(&dev->struct_mutex); + return 0; + +err_gem_unmap: + iounmap(vaddr); + +err_gem_destroy: + pdp_gem_object_free_priv(gem_priv, obj); + +err_unlock_dev: + mutex_unlock(&dev->struct_mutex); + + DRM_ERROR(FBDEV_NAME " - %s failed (err=%d)\n", __func__, err); + return err; +} + +static const struct drm_fb_helper_funcs pdp_fbdev_helper_funcs = { + .fb_probe = pdp_fbdev_probe, +}; + +struct pdp_fbdev *pdp_fbdev_create(struct pdp_drm_private *dev_priv) +{ + struct pdp_fbdev *pdp_fbdev; + int err; + + pdp_fbdev = kzalloc(sizeof(*pdp_fbdev), GFP_KERNEL); + if (!pdp_fbdev) + return ERR_PTR(-ENOMEM); + + drm_fb_helper_prepare(dev_priv->dev, &pdp_fbdev->helper, + &pdp_fbdev_helper_funcs); + + err = drm_fb_helper_init(dev_priv->dev, &pdp_fbdev->helper, 1); + if (err) + goto err_free_fbdev; + + pdp_fbdev->priv = dev_priv; + drm_fb_helper_single_add_all_connectors(&pdp_fbdev->helper); + pdp_fbdev->preferred_bpp = 32; + + /* Call ->fb_probe() */ + err = drm_fb_helper_initial_config(&pdp_fbdev->helper, pdp_fbdev->preferred_bpp); + if (err) + goto err_fb_helper_fini; + + DRM_DEBUG_DRIVER(FBDEV_NAME " - fb device registered\n"); + return pdp_fbdev; + +err_fb_helper_fini: + drm_fb_helper_fini(&pdp_fbdev->helper); + +err_free_fbdev: + kfree(pdp_fbdev); + + DRM_ERROR(FBDEV_NAME " - %s, failed (err=%d)\n", __func__, err); + return ERR_PTR(err); +} + +void pdp_fbdev_destroy(struct pdp_fbdev *pdp_fbdev) +{ + struct pdp_framebuffer *pdp_fb; + struct pdp_gem_object *pdp_obj; + struct drm_framebuffer *fb; + struct fb_info *info; + + if (!pdp_fbdev) + return; + + drm_fb_helper_unregister_fbi(&pdp_fbdev->helper); + pdp_fb = &pdp_fbdev->fb; + + pdp_obj = to_pdp_obj(pdp_fb->obj[0]); + if (pdp_obj) { + info = pdp_fbdev->helper.fbdev; + iounmap((void __iomem *)info->screen_base); + } + + drm_gem_object_put_unlocked(pdp_fb->obj[0]); + + drm_fb_helper_fini(&pdp_fbdev->helper); + + fb = to_drm_framebuffer(pdp_fb); + drm_framebuffer_cleanup(fb); + + kfree(pdp_fbdev); +} +#endif /* CONFIG_DRM_FBDEV_EMULATION */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c new file mode 100644 index 000000000000..d145842ee7f4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c @@ -0,0 +1,725 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#endif + +#include +#include +#include +#include + +#include + +#if defined(SUPPORT_PLATO_DISPLAY) +#include "plato_drv.h" +#else +#include "tc_drv.h" +#endif + +#include "drm_pdp_gem.h" +#include "pdp_drm.h" +#include "kernel_compatibility.h" + +#if defined(SUPPORT_PLATO_DISPLAY) + typedef struct plato_pdp_platform_data pdp_gem_platform_data; +#else + typedef struct tc_pdp_platform_data pdp_gem_platform_data; +#endif + +struct pdp_gem_private { + struct mutex vram_lock; + struct drm_mm vram; +}; + +static struct pdp_gem_object * +pdp_gem_private_object_create(struct drm_device *dev, + size_t size, + struct dma_resv *resv) +{ + struct pdp_gem_object *pdp_obj; + + WARN_ON(PAGE_ALIGN(size) != size); + + pdp_obj = kzalloc(sizeof(*pdp_obj), GFP_KERNEL); + if (!pdp_obj) + return ERR_PTR(-ENOMEM); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + if (!resv) + dma_resv_init(&pdp_obj->_resv); +#else + pdp_obj->base.resv = resv; +#endif + drm_gem_private_object_init(dev, &pdp_obj->base, size); + + return pdp_obj; +} + +struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev, + struct pdp_gem_private *gem_priv, + size_t size, + u32 flags) +{ + pdp_gem_platform_data *pdata = + to_platform_device(dev->dev)->dev.platform_data; + struct pdp_gem_object *pdp_obj; + struct drm_mm_node *node; + int err = 0; + + pdp_obj = pdp_gem_private_object_create(dev, size, NULL); + if (!pdp_obj) { + err = -ENOMEM; + goto err_exit; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + err = -ENOMEM; + goto err_unref; + } + + mutex_lock(&gem_priv->vram_lock); + err = drm_mm_insert_node(&gem_priv->vram, node, size); + mutex_unlock(&gem_priv->vram_lock); + if (err) + goto err_free_node; + + pdp_obj->vram = node; + pdp_obj->dev_addr = pdp_obj->vram->start; + pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + pdp_obj->resv = &pdp_obj->_resv; +#else + pdp_obj->resv = pdp_obj->base.resv; +#endif + pdp_obj->dma_map_export_host_addr = pdata->dma_map_export_host_addr; + + return &pdp_obj->base; + +err_free_node: + kfree(node); +err_unref: + pdp_gem_object_free_priv(gem_priv, &pdp_obj->base); +err_exit: + return ERR_PTR(err); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +vm_fault_t pdp_gem_object_vm_fault(struct vm_fault *vmf) +#else +int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + struct vm_area_struct *vma = vmf->vma; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + unsigned long addr = vmf->address; +#else + unsigned long addr = (unsigned long)vmf->virtual_address; +#endif + struct drm_gem_object *obj = vma->vm_private_data; + struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); + unsigned long off; + unsigned long pfn; + + off = addr - vma->vm_start; + pfn = (pdp_obj->cpu_addr + off) >> PAGE_SHIFT; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) + return vmf_insert_pfn(vma, addr, pfn); +#else + { + int err; + + err = vm_insert_pfn(vma, addr, pfn); + switch (err) { + case 0: + case -EBUSY: + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } + } +#endif +} + +void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv, + struct drm_gem_object *obj) +{ + struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); + + drm_gem_free_mmap_offset(obj); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + if (&pdp_obj->_resv == pdp_obj->resv) + dma_resv_fini(&pdp_obj->_resv); +#endif + if (pdp_obj->vram) { + mutex_lock(&gem_priv->vram_lock); + drm_mm_remove_node(pdp_obj->vram); + mutex_unlock(&gem_priv->vram_lock); + + kfree(pdp_obj->vram); + } else if (obj->import_attach) { + drm_prime_gem_destroy(obj, pdp_obj->sgt); + } + + drm_gem_object_release(&pdp_obj->base); + kfree(pdp_obj); +} + +static int pdp_gem_prime_attach(struct dma_buf *dma_buf, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) + struct device *dev, +#endif + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dma_buf->priv; + + /* Restrict access to Rogue */ + if (WARN_ON(!obj->dev->dev->parent) || + obj->dev->dev->parent != attach->dev->parent) + return -EPERM; + + return 0; +} + +static struct sg_table * +pdp_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); + struct sg_table *sgt; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + goto err_free_sgt; + + if (pdp_obj->dma_map_export_host_addr) + sg_dma_address(sgt->sgl) = pdp_obj->cpu_addr; + else + sg_dma_address(sgt->sgl) = pdp_obj->dev_addr; + + sg_dma_len(sgt->sgl) = obj->size; + + return sgt; + +err_free_sgt: + kfree(sgt); + return NULL; +} + +static void pdp_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + sg_free_table(sgt); + kfree(sgt); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) +static void *pdp_gem_prime_kmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) +static void *pdp_gem_prime_kmap(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} +#endif + +static int pdp_gem_prime_mmap(struct dma_buf *dma_buf, + struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = dma_buf->priv; + int err; + + mutex_lock(&obj->dev->struct_mutex); + err = drm_gem_mmap_obj(obj, obj->size, vma); + mutex_unlock(&obj->dev->struct_mutex); + + return err; +} + +#if defined(CONFIG_X86) +static void *pdp_gem_prime_vmap(struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); + void *vaddr; + + mutex_lock(&obj->dev->struct_mutex); + + /* + * On x86 platforms, the pointer returned by ioremap can be dereferenced + * directly. As such, explicitly cast away the __ioremap qualifier. + */ + vaddr = (void __force *)ioremap(pdp_obj->cpu_addr, obj->size); + if (vaddr == NULL) + DRM_DEBUG_DRIVER("ioremap failed"); + + mutex_unlock(&obj->dev->struct_mutex); + + return vaddr; +} + +static void pdp_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct drm_gem_object *obj = dma_buf->priv; + + mutex_lock(&obj->dev->struct_mutex); + iounmap((void __iomem *)vaddr); + mutex_unlock(&obj->dev->struct_mutex); +} +#endif + +static const struct dma_buf_ops pdp_gem_prime_dmabuf_ops = { + .attach = pdp_gem_prime_attach, + .map_dma_buf = pdp_gem_prime_map_dma_buf, + .unmap_dma_buf = pdp_gem_prime_unmap_dma_buf, + .release = drm_gem_dmabuf_release, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) + .map_atomic = pdp_gem_prime_kmap_atomic, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) + .map = pdp_gem_prime_kmap, +#endif +#else + .kmap_atomic = pdp_gem_prime_kmap_atomic, + .kmap = pdp_gem_prime_kmap, +#endif + .mmap = pdp_gem_prime_mmap, +#if defined(CONFIG_X86) + .vmap = pdp_gem_prime_vmap, + .vunmap = pdp_gem_prime_vunmap +#endif +}; + + +static int +pdp_gem_lookup_our_object(struct drm_file *file, u32 handle, + struct drm_gem_object **objp) + +{ + struct drm_gem_object *obj; + + obj = drm_gem_object_lookup(file, handle); + if (!obj) + return -ENOENT; + + if (obj->import_attach) { + /* + * The dmabuf associated with the object is not one of ours. + * Our own buffers are handled differently on import. + */ + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + + *objp = obj; + return 0; +} + +struct dma_buf *pdp_gem_prime_export( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + struct drm_device *dev, +#endif + struct drm_gem_object *obj, + int flags) +{ + struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + DEFINE_DMA_BUF_EXPORT_INFO(export_info); + + export_info.ops = &pdp_gem_prime_dmabuf_ops; + export_info.size = obj->size; + export_info.flags = flags; + export_info.resv = pdp_obj->resv; + export_info.priv = obj; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + return drm_gem_dmabuf_export(obj->dev, &export_info); +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + return drm_gem_dmabuf_export(dev, &export_info); +#else + return dma_buf_export(&export_info); +#endif +#endif +#else + return dma_buf_export(obj, &pdp_gem_prime_dmabuf_ops, obj->size, + flags, pdp_obj->resv); +#endif +} + +struct drm_gem_object * +pdp_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; + + if (obj->dev == dev) { + BUG_ON(dma_buf->ops != &pdp_gem_prime_dmabuf_ops); + + /* + * The dmabuf is one of ours, so return the associated + * PDP GEM object, rather than create a new one. + */ + drm_gem_object_get(obj); + + return obj; + } + + return drm_gem_prime_import(dev, dma_buf); +} + +struct drm_gem_object * +pdp_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + pdp_gem_platform_data *pdata = + to_platform_device(dev->dev)->dev.platform_data; + struct pdp_gem_object *pdp_obj; + int err; + + pdp_obj = pdp_gem_private_object_create(dev, + attach->dmabuf->size, + attach->dmabuf->resv); + if (!pdp_obj) { + err = -ENOMEM; + goto err_exit; + } + + pdp_obj->sgt = sgt; + + /* We only expect a single entry for card memory */ + if (pdp_obj->sgt->nents != 1) { + err = -EINVAL; + goto err_obj_unref; + } + + pdp_obj->dev_addr = sg_dma_address(pdp_obj->sgt->sgl); + pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr; + pdp_obj->resv = attach->dmabuf->resv; + + return &pdp_obj->base; + +err_obj_unref: + drm_gem_object_put_unlocked(&pdp_obj->base); +err_exit: + return ERR_PTR(err); +} + +int pdp_gem_dumb_create_priv(struct drm_file *file, + struct drm_device *dev, + struct pdp_gem_private *gem_priv, + struct drm_mode_create_dumb *args) +{ + struct drm_gem_object *obj; + u32 handle; + u32 pitch; + size_t size; + int err; + + pitch = args->width * (ALIGN(args->bpp, 8) >> 3); + size = PAGE_ALIGN(pitch * args->height); + + obj = pdp_gem_object_create(dev, gem_priv, size, 0); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = drm_gem_handle_create(file, obj, &handle); + if (err) + goto exit; + + args->handle = handle; + args->pitch = pitch; + args->size = size; + +exit: + drm_gem_object_put_unlocked(obj); + return err; +} + +int pdp_gem_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, + uint64_t *offset) +{ + struct drm_gem_object *obj; + int err; + + mutex_lock(&dev->struct_mutex); + + err = pdp_gem_lookup_our_object(file, handle, &obj); + if (err) + goto exit_unlock; + + err = drm_gem_create_mmap_offset(obj); + if (err) + goto exit_obj_unref; + + *offset = drm_vma_node_offset_addr(&obj->vma_node); + +exit_obj_unref: + drm_gem_object_put_unlocked(obj); +exit_unlock: + mutex_unlock(&dev->struct_mutex); + return err; +} + +struct pdp_gem_private *pdp_gem_init(struct drm_device *dev) +{ +#if !defined(SUPPORT_ION) || defined(SUPPORT_GEM_ALLOC) + pdp_gem_platform_data *pdata = + to_platform_device(dev->dev)->dev.platform_data; +#endif + struct pdp_gem_private *gem_priv = + kmalloc(sizeof(*gem_priv), GFP_KERNEL); + + if (!gem_priv) + return NULL; + + mutex_init(&gem_priv->vram_lock); + + memset(&gem_priv->vram, 0, sizeof(gem_priv->vram)); + +#if defined(SUPPORT_ION) && !defined(SUPPORT_GEM_ALLOC) + drm_mm_init(&gem_priv->vram, 0, 0); + DRM_INFO("%s has no directly allocatable memory; the memory is managed by ION\n", + dev->driver->name); +#else + drm_mm_init(&gem_priv->vram, + pdata->pdp_heap_memory_base - pdata->memory_base, + pdata->pdp_heap_memory_size); + + DRM_INFO("%s has %pa bytes of allocatable memory at 0x%llx = (0x%llx - 0x%llx)\n", + dev->driver->name, &pdata->pdp_heap_memory_size, + (u64)(pdata->pdp_heap_memory_base - pdata->memory_base), + (u64)pdata->pdp_heap_memory_base, (u64)pdata->memory_base); +#endif + return gem_priv; +} + +void pdp_gem_cleanup(struct pdp_gem_private *gem_priv) +{ + drm_mm_takedown(&gem_priv->vram); + mutex_destroy(&gem_priv->vram_lock); + + kfree(gem_priv); +} + +struct dma_resv *pdp_gem_get_resv(struct drm_gem_object *obj) +{ + return (to_pdp_obj(obj)->resv); +} + +u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj) +{ + struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); + + return pdp_obj->dev_addr; +} + +int pdp_gem_object_create_ioctl_priv(struct drm_device *dev, + struct pdp_gem_private *gem_priv, + void *data, + struct drm_file *file) +{ + struct drm_pdp_gem_create *args = data; + struct drm_gem_object *obj; + int err; + + if (args->flags) { + DRM_ERROR("invalid flags: %#08x\n", args->flags); + return -EINVAL; + } + + if (args->handle) { + DRM_ERROR("invalid handle (this should always be 0)\n"); + return -EINVAL; + } + + obj = pdp_gem_object_create(dev, + gem_priv, + PAGE_ALIGN(args->size), + args->flags); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = drm_gem_handle_create(file, obj, &args->handle); + drm_gem_object_put_unlocked(obj); + + return err; + +} + +int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_pdp_gem_mmap *args = (struct drm_pdp_gem_mmap *)data; + + if (args->pad) { + DRM_ERROR("invalid pad (this should always be 0)\n"); + return -EINVAL; + } + + if (args->offset) { + DRM_ERROR("invalid offset (this should always be 0)\n"); + return -EINVAL; + } + + return pdp_gem_dumb_map_offset(file, dev, args->handle, &args->offset); +} + +int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_pdp_gem_cpu_prep *args = (struct drm_pdp_gem_cpu_prep *)data; + struct drm_gem_object *obj; + struct pdp_gem_object *pdp_obj; + bool write = !!(args->flags & PDP_GEM_CPU_PREP_WRITE); + bool wait = !(args->flags & PDP_GEM_CPU_PREP_NOWAIT); + int err = 0; + + if (args->flags & ~(PDP_GEM_CPU_PREP_READ | + PDP_GEM_CPU_PREP_WRITE | + PDP_GEM_CPU_PREP_NOWAIT)) { + DRM_ERROR("invalid flags: %#08x\n", args->flags); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + + err = pdp_gem_lookup_our_object(file, args->handle, &obj); + if (err) + goto exit_unlock; + + pdp_obj = to_pdp_obj(obj); + + if (pdp_obj->cpu_prep) { + err = -EBUSY; + goto exit_unref; + } + + if (wait) { + long lerr; + + lerr = dma_resv_wait_timeout_rcu(pdp_obj->resv, + write, + true, + 30 * HZ); + if (!lerr) + err = -EBUSY; + else if (lerr < 0) + err = lerr; + } else { + if (!dma_resv_test_signaled_rcu(pdp_obj->resv, + write)) + err = -EBUSY; + } + + if (!err) + pdp_obj->cpu_prep = true; + +exit_unref: + drm_gem_object_put_unlocked(obj); +exit_unlock: + mutex_unlock(&dev->struct_mutex); + return err; +} + +int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_pdp_gem_cpu_fini *args = (struct drm_pdp_gem_cpu_fini *)data; + struct drm_gem_object *obj; + struct pdp_gem_object *pdp_obj; + int err = 0; + + if (args->pad) { + DRM_ERROR("invalid pad (this should always be 0)\n"); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + + err = pdp_gem_lookup_our_object(file, args->handle, &obj); + if (err) + goto exit_unlock; + + pdp_obj = to_pdp_obj(obj); + + if (!pdp_obj->cpu_prep) { + err = -EINVAL; + goto exit_unref; + } + + pdp_obj->cpu_prep = false; + +exit_unref: + drm_gem_object_put_unlocked(obj); +exit_unlock: + mutex_unlock(&dev->struct_mutex); + return err; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h new file mode 100644 index 000000000000..54526c9cd10f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h @@ -0,0 +1,151 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__DRM_PDP_GEM_H__) +#define __DRM_PDP_GEM_H__ + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#else +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +#include +#endif + +#include "drm_pdp_drv.h" +#include "pvr_dma_resv.h" + +struct pdp_gem_private; + +struct pdp_gem_object { + struct drm_gem_object base; + + /* Non-null if backing originated from this driver */ + struct drm_mm_node *vram; + + /* Non-null if backing was imported */ + struct sg_table *sgt; + + bool dma_map_export_host_addr; + phys_addr_t cpu_addr; + dma_addr_t dev_addr; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + struct dma_resv _resv; +#endif + struct dma_resv *resv; + + bool cpu_prep; +}; + +#define to_pdp_obj(obj) container_of(obj, struct pdp_gem_object, base) + +struct pdp_gem_private *pdp_gem_init(struct drm_device *dev); + +void pdp_gem_cleanup(struct pdp_gem_private *dev_priv); + +/* ioctl functions */ +int pdp_gem_object_create_ioctl_priv(struct drm_device *dev, + struct pdp_gem_private *gem_priv, + void *data, + struct drm_file *file); +int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + +/* drm driver functions */ +struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev, + struct pdp_gem_private *gem_priv, + size_t size, + u32 flags); + +void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv, + struct drm_gem_object *obj); + +struct dma_buf *pdp_gem_prime_export( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + struct drm_device *dev, +#endif + struct drm_gem_object *obj, + int flags); + +struct drm_gem_object *pdp_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct drm_gem_object * +pdp_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +int pdp_gem_dumb_create_priv(struct drm_file *file, + struct drm_device *dev, + struct pdp_gem_private *gem_priv, + struct drm_mode_create_dumb *args); + +int pdp_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset); + +/* vm operation functions */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) +typedef int vm_fault_t; +#endif +vm_fault_t pdp_gem_object_vm_fault(struct vm_fault *vmf); +#else +int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +#endif + +/* internal interfaces */ +struct dma_resv *pdp_gem_get_resv(struct drm_gem_object *obj); +u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj); + +#endif /* !defined(__DRM_PDP_GEM_H__) */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c new file mode 100644 index 000000000000..2b4fc8a8e340 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c @@ -0,0 +1,462 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "drm_pdp_drv.h" + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#else +#include +#endif + +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +#include +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) +#define drm_gem_fb_create(...) pdp_framebuffer_create(__VA_ARGS__) +#else +#include +#endif + +#if defined(PDP_USE_ATOMIC) +#include +#endif + +#include "kernel_compatibility.h" + +#define PDP_WIDTH_MIN 640 +#define PDP_WIDTH_MAX 1280 +#define PDP_HEIGHT_MIN 480 +#define PDP_HEIGHT_MAX 1024 + +#define ODIN_PDP_WIDTH_MAX 1920 +#define ODIN_PDP_HEIGHT_MAX 1080 + +#define ORION_PDP_WIDTH_MAX 1280 +#define ORION_PDP_HEIGHT_MAX 720 + +#define PLATO_PDP_WIDTH_MAX 1920 +#define PLATO_PDP_HEIGHT_MAX 1080 + +static bool async_flip_enable = true; + +module_param(async_flip_enable, bool, 0444); + +MODULE_PARM_DESC(async_flip_enable, + "Enable support for 'faked' async flipping (default: Y)"); + +static inline int +drm_mode_fb_cmd2_validate(const struct drm_mode_fb_cmd2 *mode_cmd) +{ + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_RGB565: + break; + default: + DRM_ERROR_RATELIMITED("pixel format not supported (format = %u)\n", + mode_cmd->pixel_format); + return -EINVAL; + } + + if (mode_cmd->flags & DRM_MODE_FB_INTERLACED) { + DRM_ERROR_RATELIMITED("interlaced framebuffers not supported\n"); + return -EINVAL; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + if (mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE) { + DRM_ERROR_RATELIMITED("format modifier 0x%llx is not supported\n", + mode_cmd->modifier[0]); + return -EINVAL; + } +#endif + + return 0; +} + +static void pdp_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); + + DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); + + drm_framebuffer_cleanup(fb); + + drm_gem_object_put_unlocked(pdp_fb->obj[0]); + + kfree(pdp_fb); +} + +static int pdp_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int *handle) +{ + struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); + + DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); + + return drm_gem_handle_create(file, pdp_fb->obj[0], handle); +} + +static const struct drm_framebuffer_funcs pdp_framebuffer_funcs = { + .destroy = pdp_framebuffer_destroy, + .create_handle = pdp_framebuffer_create_handle, + .dirty = NULL, +}; + +static inline int +pdp_framebuffer_init(struct pdp_drm_private *dev_priv, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) + const +#endif + struct drm_mode_fb_cmd2 *mode_cmd, + struct pdp_framebuffer *pdp_fb, + struct drm_gem_object *obj) +{ + struct drm_framebuffer *fb; + + if (!pdp_fb) + return -EINVAL; + + fb = to_drm_framebuffer(pdp_fb); + pdp_fb->obj[0] = obj; + + drm_helper_mode_fill_fb_struct(dev_priv->dev, fb, mode_cmd); + + return drm_framebuffer_init(dev_priv->dev, fb, &pdp_framebuffer_funcs); +} + +int pdp_modeset_validate_init(struct pdp_drm_private *dev_priv, + struct drm_mode_fb_cmd2 *mode_cmd, + struct pdp_framebuffer *pdp_fb, + struct drm_gem_object *obj) +{ + int err; + + err = drm_mode_fb_cmd2_validate(mode_cmd); + if (err) + return err; + + return pdp_framebuffer_init(dev_priv, mode_cmd, pdp_fb, obj); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) +static struct drm_framebuffer * +pdp_framebuffer_create(struct drm_device *dev, + struct drm_file *file, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) + const +#endif + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct pdp_framebuffer *pdp_fb; + int err; + + obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); + if (!obj) { + DRM_ERROR("failed to find buffer with handle %u\n", + mode_cmd->handles[0]); + err = -ENOENT; + goto err_out; + } + + pdp_fb = kzalloc(sizeof(*pdp_fb), GFP_KERNEL); + if (!pdp_fb) { + err = -ENOMEM; + goto err_obj_put; + } + + err = pdp_framebuffer_init(dev_priv, mode_cmd, pdp_fb, obj); + if (err) { + DRM_ERROR("failed to initialise framebuffer (err=%d)\n", err); + goto err_free_fb; + } + + DRM_DEBUG_DRIVER("[FB:%d]\n", pdp_fb->base.base.id); + + return &pdp_fb->base; + +err_free_fb: + kfree(pdp_fb); +err_obj_put: + drm_gem_object_put_unlocked(obj); +err_out: + return ERR_PTR(err); +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ + + +/************************************************************************* + * DRM mode config callbacks + **************************************************************************/ + +static struct drm_framebuffer * +pdp_fb_create(struct drm_device *dev, + struct drm_file *file, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) + const +#endif + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_framebuffer *fb; + int err; + + err = drm_mode_fb_cmd2_validate(mode_cmd); + if (err) + return ERR_PTR(err); + + fb = drm_gem_fb_create(dev, file, mode_cmd); + if (IS_ERR(fb)) + goto out; + + DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); + +out: + return fb; +} + +static const struct drm_mode_config_funcs pdp_mode_config_funcs = { + .fb_create = pdp_fb_create, + .output_poll_changed = NULL, +#if defined(PDP_USE_ATOMIC) + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +#endif +}; + + +int pdp_modeset_early_init(struct pdp_drm_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + int err; + + drm_mode_config_init(dev); + + dev->mode_config.funcs = &pdp_mode_config_funcs; + dev->mode_config.min_width = PDP_WIDTH_MIN; + dev->mode_config.min_height = PDP_HEIGHT_MIN; + + switch (dev_priv->version) { + case PDP_VERSION_APOLLO: + dev->mode_config.max_width = PDP_WIDTH_MAX; + dev->mode_config.max_height = PDP_HEIGHT_MAX; + break; + case PDP_VERSION_ODIN: + if (dev_priv->subversion == PDP_ODIN_ORION) { + dev->mode_config.max_width = ORION_PDP_WIDTH_MAX; + dev->mode_config.max_height = ORION_PDP_HEIGHT_MAX; + } else { + dev->mode_config.max_width = ODIN_PDP_WIDTH_MAX; + dev->mode_config.max_height = ODIN_PDP_HEIGHT_MAX; + } + break; + case PDP_VERSION_PLATO: + dev->mode_config.max_width = PLATO_PDP_WIDTH_MAX; + dev->mode_config.max_height = PLATO_PDP_HEIGHT_MAX; + break; + default: + BUG(); + } + + DRM_INFO("max_width is %d\n", + dev->mode_config.max_width); + DRM_INFO("max_height is %d\n", + dev->mode_config.max_height); + + dev->mode_config.fb_base = 0; + dev->mode_config.async_page_flip = async_flip_enable; + + DRM_INFO("%s async flip support is %s\n", + dev->driver->name, async_flip_enable ? "enabled" : "disabled"); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + dev->mode_config.allow_fb_modifiers = true; +#endif + + dev_priv->plane = pdp_plane_create(dev, DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(dev_priv->plane)) { + DRM_ERROR("failed to create a primary plane\n"); + err = PTR_ERR(dev_priv->plane); + goto err_config_cleanup; + } + + dev_priv->crtc = pdp_crtc_create(dev, 0, dev_priv->plane); + if (IS_ERR(dev_priv->crtc)) { + DRM_ERROR("failed to create a CRTC\n"); + err = PTR_ERR(dev_priv->crtc); + goto err_config_cleanup; + } + + switch (dev_priv->version) { + case PDP_VERSION_APOLLO: + case PDP_VERSION_ODIN: + dev_priv->connector = pdp_dvi_connector_create(dev); + if (IS_ERR(dev_priv->connector)) { + DRM_ERROR("failed to create a connector\n"); + err = PTR_ERR(dev_priv->connector); + goto err_config_cleanup; + } + + dev_priv->encoder = pdp_tmds_encoder_create(dev); + if (IS_ERR(dev_priv->encoder)) { + DRM_ERROR("failed to create an encoder\n"); + err = PTR_ERR(dev_priv->encoder); + goto err_config_cleanup; + } + + err = drm_connector_attach_encoder(dev_priv->connector, + dev_priv->encoder); + if (err) { + DRM_ERROR("failed to attach [ENCODER:%d:%s] to [CONNECTOR:%d:%s] (err=%d)\n", + dev_priv->encoder->base.id, + dev_priv->encoder->name, + dev_priv->connector->base.id, + dev_priv->connector->name, + err); + goto err_config_cleanup; + } + break; + case PDP_VERSION_PLATO: + // PLATO connectors are created in HDMI component driver + break; + default: + BUG(); + } + + DRM_DEBUG_DRIVER("initialised\n"); + + return 0; + +err_config_cleanup: + drm_mode_config_cleanup(dev); + + return err; +} + +static inline int pdp_modeset_init_fbdev(struct pdp_drm_private *dev_priv) +{ +#if defined(CONFIG_DRM_FBDEV_EMULATION) + struct pdp_fbdev *fbdev; + int err; + + fbdev = pdp_fbdev_create(dev_priv); + if (IS_ERR(fbdev)) { + DRM_ERROR("failed to create a fb device"); + return PTR_ERR(fbdev); + } + dev_priv->fbdev = fbdev; + + /* + * pdpdrmfb is registered and available for userspace to use. If this + * is the only or primary device, fbcon has already bound a tty to it, + * and the following call will take no effect. However, this may be + * essential in order to sync the display when fbcon was already bound + * to a different tty (and fbdev). This triggers ->set_config() which + * will in turn set up a config and then do a modeset. + */ + err = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper); + if (err) { + DRM_ERROR("failed to set mode (err=%d)\n", err); + return err; + } +#endif + return 0; +} + +int pdp_modeset_late_init(struct pdp_drm_private *dev_priv) +{ + struct drm_device *ddev = dev_priv->dev; + int err; + + drm_mode_config_reset(ddev); + + err = pdp_modeset_init_fbdev(dev_priv); + if (err) + DRM_INFO("fbdev init failure is not fatal, continue anyway.\n"); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + if (dev_priv->connector != NULL) { + err = drm_connector_register(dev_priv->connector); + if (err) { + DRM_ERROR("[CONNECTOR:%d:%s] failed to register (err=%d)\n", + dev_priv->connector->base.id, + dev_priv->connector->name, + err); + return err; + } + } +#endif + return 0; +} + +void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + if (dev_priv->connector != NULL) + drm_connector_unregister(dev_priv->connector); +#endif +} + +void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv) +{ +#if defined(CONFIG_DRM_FBDEV_EMULATION) + pdp_fbdev_destroy(dev_priv->fbdev); +#endif + drm_mode_config_cleanup(dev_priv->dev); + + DRM_DEBUG_DRIVER("cleaned up\n"); +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_plane.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_plane.c new file mode 100644 index 000000000000..79fe9d1cfa8c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_plane.c @@ -0,0 +1,278 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "drm_pdp_drv.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) +#include +#endif + +#include + +#if defined(PDP_USE_ATOMIC) +#include +#include +#include +#endif + +#include "drm_pdp_gem.h" +#include "pdp_apollo.h" +#include "pdp_odin.h" +#include "pdp_plato.h" + +#include "kernel_compatibility.h" + + +#if defined(PDP_USE_ATOMIC) +static int pdp_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_new_state; + + if (!state->crtc) + return 0; + + crtc_new_state = drm_atomic_get_new_crtc_state(state->state, + state->crtc); + + return drm_atomic_helper_check_plane_state(state, crtc_new_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); +} + +static void pdp_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *plane_state = plane->state; + struct drm_framebuffer *fb = plane_state->fb; + + if (fb) { + pdp_plane_set_surface(plane_state->crtc, plane, fb, + plane_state->src_x, plane_state->src_y); + } +} + +static const struct drm_plane_helper_funcs pdp_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, + .atomic_check = pdp_plane_helper_atomic_check, + .atomic_update = pdp_plane_helper_atomic_update, +}; + +static const struct drm_plane_funcs pdp_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_primary_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; +#else +#define pdp_plane_funcs drm_primary_helper_funcs +#endif + +struct drm_plane *pdp_plane_create(struct drm_device *dev, + enum drm_plane_type type) +{ + struct pdp_drm_private *dev_priv = dev->dev_private; + struct drm_plane *plane; + const uint32_t *supported_formats; + uint32_t num_supported_formats; + const uint32_t apollo_plato_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + }; + const uint32_t odin_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB565, + }; + int err; + + switch (dev_priv->version) { + case PDP_VERSION_ODIN: + supported_formats = odin_formats; + num_supported_formats = ARRAY_SIZE(odin_formats); + break; + case PDP_VERSION_APOLLO: + case PDP_VERSION_PLATO: + supported_formats = apollo_plato_formats; + num_supported_formats = ARRAY_SIZE(apollo_plato_formats); + break; + default: + DRM_ERROR("Unsupported PDP version\n"); + err = -EINVAL; + goto err_exit; + } + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) { + err = -ENOMEM; + goto err_exit; + } + + err = drm_universal_plane_init(dev, plane, 0, &pdp_plane_funcs, + supported_formats, + num_supported_formats, + NULL, type, NULL); + if (err) + goto err_plane_free; + +#if defined(PDP_USE_ATOMIC) + drm_plane_helper_add(plane, &pdp_plane_helper_funcs); +#endif + + DRM_DEBUG_DRIVER("[PLANE:%d]\n", plane->base.id); + + return plane; + +err_plane_free: + kfree(plane); +err_exit: + return ERR_PTR(err); +} + +void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_framebuffer *fb, + const uint32_t src_x, const uint32_t src_y) +{ + struct pdp_drm_private *dev_priv = plane->dev->dev_private; + struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); + struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); + unsigned int pitch = fb->pitches[0]; + uint64_t address = pdp_gem_get_dev_addr(pdp_fb->obj[0]); + uint32_t format; + + /* + * User space specifies 'x' and 'y' and this is used to tell the display + * to scan out from part way through a buffer. + */ + address += ((src_y * pitch) + (src_x * (pdp_drm_fb_cpp(fb)))); + + /* + * NOTE: If the buffer dimensions are less than the current mode then + * the output will appear in the top left of the screen. This can be + * centered by adjusting horizontal active start, right border start, + * vertical active start and bottom border start. At this point it's + * not entirely clear where this should be done. On the one hand it's + * related to pdp_crtc_helper_mode_set but on the other hand there + * might not always be a call to pdp_crtc_helper_mode_set. This needs + * to be investigated. + */ + switch (dev_priv->version) { + case PDP_VERSION_APOLLO: + switch (pdp_drm_fb_format(fb)) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + format = 0xE; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + pdp_drm_fb_format(fb)); + return; + } + + pdp_apollo_set_surface(plane->dev->dev, + pdp_crtc->pdp_reg, + 0, + address, + 0, 0, + fb->width, fb->height, pitch, + format, + 255, + false); + break; + case PDP_VERSION_ODIN: + switch (pdp_drm_fb_format(fb)) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + format = ODN_PDP_SURF_PIXFMT_ARGB8888; + break; + case DRM_FORMAT_RGB565: + format = ODN_PDP_SURF_PIXFMT_RGB565; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + pdp_drm_fb_format(fb)); + return; + } + + pdp_odin_set_surface(plane->dev->dev, + pdp_crtc->pdp_reg, + 0, + address, + 0, 0, + fb->width, fb->height, pitch, + format, + 255, + false); + break; + case PDP_VERSION_PLATO: + switch (pdp_drm_fb_format(fb)) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + format = PLATO_PDP_PIXEL_FORMAT_ARGB8; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + pdp_drm_fb_format(fb)); + return; + } + + pdp_plato_set_surface(crtc->dev->dev, + pdp_crtc->pdp_reg, + pdp_crtc->pdp_bif_reg, + 0, + address, + 0, 0, + fb->width, fb->height, pitch, + format, + 255, + false); + break; + default: + BUG(); + } +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c new file mode 100644 index 000000000000..3e03cdd9fd0f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c @@ -0,0 +1,145 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) +#include +#endif + +#include + +#include "drm_pdp_drv.h" + +#include "kernel_compatibility.h" + +static void pdp_tmds_encoder_helper_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static bool +pdp_tmds_encoder_helper_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void pdp_tmds_encoder_helper_prepare(struct drm_encoder *encoder) +{ +} + +static void pdp_tmds_encoder_helper_commit(struct drm_encoder *encoder) +{ +} + +static void +pdp_tmds_encoder_helper_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void pdp_tmds_encoder_destroy(struct drm_encoder *encoder) +{ + struct pdp_drm_private *dev_priv = encoder->dev->dev_private; + + DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", + encoder->base.id, + encoder->name); + + drm_encoder_cleanup(encoder); + + kfree(encoder); + dev_priv->encoder = NULL; +} + +static const struct drm_encoder_helper_funcs pdp_tmds_encoder_helper_funcs = { + .dpms = pdp_tmds_encoder_helper_dpms, + .mode_fixup = pdp_tmds_encoder_helper_mode_fixup, + .prepare = pdp_tmds_encoder_helper_prepare, + .commit = pdp_tmds_encoder_helper_commit, + .mode_set = pdp_tmds_encoder_helper_mode_set, + .get_crtc = NULL, + .detect = NULL, + .disable = NULL, +}; + +static const struct drm_encoder_funcs pdp_tmds_encoder_funcs = { + .reset = NULL, + .destroy = pdp_tmds_encoder_destroy, +}; + +struct drm_encoder * +pdp_tmds_encoder_create(struct drm_device *dev) +{ + struct drm_encoder *encoder; + int err; + + encoder = kzalloc(sizeof(*encoder), GFP_KERNEL); + if (!encoder) + return ERR_PTR(-ENOMEM); + + err = drm_encoder_init(dev, + encoder, + &pdp_tmds_encoder_funcs, + DRM_MODE_ENCODER_TMDS, + NULL); + if (err) { + DRM_ERROR("Failed to initialise encoder"); + return ERR_PTR(err); + } + drm_encoder_helper_add(encoder, &pdp_tmds_encoder_helper_funcs); + + /* + * This is a bit field that's used to determine which + * CRTCs can drive this encoder. + */ + encoder->possible_crtcs = 0x1; + + DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", + encoder->base.id, + encoder->name); + + return encoder; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.c new file mode 100644 index 000000000000..935c67b7671d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.c @@ -0,0 +1,333 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "pdp_apollo.h" +#include "pdp_common.h" +#include "pdp_regs.h" +#include "tcf_rgbpdp_regs.h" +#include "tcf_pll.h" + +/* Map a register to the "pll-regs" region */ +#define PLL_REG(n) ((n) - TCF_PLL_PLL_PDP_CLK0) + +bool pdp_apollo_clocks_set(struct device *dev, + void __iomem *pdp_reg, void __iomem *pll_reg, + u32 clock_in_mhz, + void __iomem *odn_core_reg, + u32 hdisplay, u32 vdisplay) +{ + /* + * Setup TCF_CR_PLL_PDP_CLK1TO5 based on the main clock speed + * (clock 0 or 3) + */ + const u32 clock = (clock_in_mhz >= 50) ? 0 : 0x3; + + /* Set phase 0, ratio 50:50 and frequency in MHz */ + pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK0), clock_in_mhz); + + pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK1TO5), clock); + + /* Now initiate reprogramming of the PLLs */ + pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x1); + + udelay(1000); + + pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x0); + + return true; +} + +void pdp_apollo_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ +#ifdef PDP_VERBOSE + dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable"); +#endif + /* nothing to do here */ +} + +void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); +#endif + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + SYNCACTIVE_SHIFT, SYNCACTIVE_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); +} + +void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable"); +#endif + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + POWERDN_SHIFT, POWERDN_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); +} + +void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); +#endif + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + INTEN_VBLNK0_SHIFT, INTEN_VBLNK0_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, value); +} + +bool pdp_apollo_check_and_clear_vblank(struct device *dev, + void __iomem *pdp_reg) +{ + u32 value; + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT); + + if (REG_VALUE_GET(value, INTS_VBLNK0_SHIFT, INTS_VBLNK0_MASK)) { + value = REG_VALUE_SET(0, 0x1, + INTCLR_VBLNK0_SHIFT, INTCLR_VBLNK0_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR, value); + return true; + } + return false; +} + +void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, + u32 plane, bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set plane %u: %s\n", + plane, enable ? "enable" : "disable"); +#endif + + if (plane > 0) { + dev_err(dev, "Maximum of 1 plane is supported\n"); + return; + } + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + STR1STREN_SHIFT, STR1STREN_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value); +} + +void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg) +{ +#ifdef PDP_VERBOSE + dev_info(dev, "Reset planes\n"); +#endif + + pdp_apollo_set_plane_enabled(dev, pdp_reg, 0, false); +} + +void pdp_apollo_set_surface(struct device *dev, void __iomem *pdp_reg, + u32 plane, u32 address, + u32 posx, u32 posy, + u32 width, u32 height, u32 stride, + u32 format, u32 alpha, bool blend) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, + "Set surface: size=%dx%d stride=%d format=%d address=0x%x\n", + width, height, stride, format, address); +#endif + + if (plane > 0) { + dev_err(dev, "Maximum of 1 plane is supported\n"); + return; + } + + /* Size & format */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF); + value = REG_VALUE_SET(value, width - 1, + STR1WIDTH_SHIFT, STR1WIDTH_MASK); + value = REG_VALUE_SET(value, height - 1, + STR1HEIGHT_SHIFT, STR1HEIGHT_MASK); + value = REG_VALUE_SET(value, format, + STR1PIXFMT_SHIFT, STR1PIXFMT_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF, value); + /* Stride */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN); + value = REG_VALUE_SET(value, + (stride >> DCPDP_STR1POSN_STRIDE_SHIFT) - 1, + STR1STRIDE_SHIFT, STR1STRIDE_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN, value); + /* Disable interlaced output */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL); + value = REG_VALUE_SET(value, 0x0, + STR1INTFIELD_SHIFT, + STR1INTFIELD_MASK); + /* Frame buffer base address */ + value = REG_VALUE_SET(value, + address >> DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT, + STR1BASE_SHIFT, STR1BASE_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value); +} + +void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg, + u32 h_display, u32 v_display, + u32 hbps, u32 ht, u32 has, + u32 hlbs, u32 hfps, u32 hrbs, + u32 vbps, u32 vt, u32 vas, + u32 vtbs, u32 vfps, u32 vbbs, + bool nhsync, bool nvsync) +{ + u32 value; + + dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); +#ifdef PDP_VERBOSE + dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", + ht, hbps, has, hlbs, hfps, hrbs); + dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", + vt, vbps, vas, vtbs, vfps, vbbs); +#endif + +#if 0 + /* I don't really know what this is doing but it was in the Android + * implementation (not in the Linux one). Seems not to be necessary + * though! + */ + if (pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL) + != 0x0000C010) { + /* Buffer request threshold */ + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL, + 0x00001C10); + } +#endif + + /* Border colour */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL); + value = REG_VALUE_SET(value, 0x0, BORDCOL_SHIFT, BORDCOL_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL, value); + + /* Update control */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL); + value = REG_VALUE_SET(value, 0x0, UPDFIELD_SHIFT, UPDFIELD_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL, value); + + /* Set hsync timings */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1); + value = REG_VALUE_SET(value, hbps, HBPS_SHIFT, HBPS_MASK); + value = REG_VALUE_SET(value, ht, HT_SHIFT, HT_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1, value); + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2); + value = REG_VALUE_SET(value, has, HAS_SHIFT, HAS_MASK); + value = REG_VALUE_SET(value, hlbs, HLBS_SHIFT, HLBS_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2, value); + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3); + value = REG_VALUE_SET(value, hfps, HFPS_SHIFT, HFPS_MASK); + value = REG_VALUE_SET(value, hrbs, HRBS_SHIFT, HRBS_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3, value); + + /* Set vsync timings */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1); + value = REG_VALUE_SET(value, vbps, VBPS_SHIFT, VBPS_MASK); + value = REG_VALUE_SET(value, vt, VT_SHIFT, VT_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1, value); + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2); + value = REG_VALUE_SET(value, vas, VAS_SHIFT, VAS_MASK); + value = REG_VALUE_SET(value, vtbs, VTBS_SHIFT, VTBS_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2, value); + + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3); + value = REG_VALUE_SET(value, vfps, VFPS_SHIFT, VFPS_MASK); + value = REG_VALUE_SET(value, vbbs, VBBS_SHIFT, VBBS_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3, value); + + /* Horizontal data enable */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL); + value = REG_VALUE_SET(value, hlbs, HDES_SHIFT, HDES_MASK); + value = REG_VALUE_SET(value, hfps, HDEF_SHIFT, HDEF_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL, value); + + /* Vertical data enable */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL); + value = REG_VALUE_SET(value, vtbs, VDES_SHIFT, VDES_MASK); + value = REG_VALUE_SET(value, vfps, VDEF_SHIFT, VDEF_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL, value); + + /* Vertical event start and vertical fetch start */ + value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT); + value = REG_VALUE_SET(value, vbps, VFETCH_SHIFT, VFETCH_MASK); + value = REG_VALUE_SET(value, vfps, VEVENT_SHIFT, VEVENT_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT, value); + + /* Set up polarities of sync/blank */ + value = REG_VALUE_SET(0, 0x1, BLNKPOL_SHIFT, BLNKPOL_MASK); + + /* + * Enable this if you want vblnk1. You also need to change to vblnk1 + * in the interrupt handler. + */ +#if 0 + value = REG_VALUE_SET(value, 0x1, FIELDPOL_SHIFT, FIELDPOL_MASK); +#endif + if (nhsync) + value = REG_VALUE_SET(value, 0x1, HSPOL_SHIFT, HSPOL_MASK); + if (nvsync) + value = REG_VALUE_SET(value, 0x1, VSPOL_SHIFT, VSPOL_MASK); + pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.h new file mode 100644 index 000000000000..2530393c510d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_apollo.h @@ -0,0 +1,89 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PDP_APOLLO_H__) +#define __PDP_APOLLO_H__ + +#include +#include + +bool pdp_apollo_clocks_set(struct device *dev, + void __iomem *pdp_reg, void __iomem *pll_reg, + u32 clock_in_mhz, + void __iomem *odn_core_reg, + u32 hdisplay, u32 vdisplay); + +void pdp_apollo_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +bool pdp_apollo_check_and_clear_vblank(struct device *dev, + void __iomem *pdp_reg); + +void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, + u32 plane, bool enable); + +void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg); + +void pdp_apollo_set_surface(struct device *dev, void __iomem *pdp_reg, + u32 plane, u32 address, + u32 posx, u32 posy, + u32 width, u32 height, u32 stride, + u32 format, u32 alpha, bool blend); + +void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg, + u32 h_display, u32 v_display, + u32 hbps, u32 ht, u32 has, + u32 hlbs, u32 hfps, u32 hrbs, + u32 vbps, u32 vt, u32 vas, + u32 vtbs, u32 vfps, u32 vbbs, + bool nhsync, bool nvsync); + +#endif /* __PDP_APOLLO_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_common.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_common.h new file mode 100644 index 000000000000..a802b19f83d2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_common.h @@ -0,0 +1,103 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PDP_COMMON_H__) +#define __PDP_COMMON_H__ + +#include + +/*#define PDP_VERBOSE*/ + +#define REG_VALUE_GET(v, s, m) \ + (u32)(((v) & (m)) >> (s)) +#define REG_VALUE_SET(v, b, s, m) \ + (u32)(((v) & (u32)~(m)) | (u32)(((b) << (s)) & (m))) +/* Active low */ +#define REG_VALUE_LO(v, b, s, m) \ + (u32)((v) & ~(u32)(((b) << (s)) & (m))) + +enum pdp_version { + PDP_VERSION_APOLLO, + PDP_VERSION_ODIN, + PDP_VERSION_PLATO, +}; + +enum pdp_odin_subversion { + PDP_ODIN_NONE = 0, + PDP_ODIN_ORION, +}; + +/* Register R-W */ +static inline u32 core_rreg32(void __iomem *base, resource_size_t reg) +{ + return ioread32(base + reg); +} + +static inline void core_wreg32(void __iomem *base, resource_size_t reg, + u32 value) +{ + iowrite32(value, base + reg); +} + +static inline u32 pdp_rreg32(void __iomem *base, resource_size_t reg) +{ + return ioread32(base + reg); +} + +static inline void pdp_wreg32(void __iomem *base, resource_size_t reg, + u32 value) +{ + iowrite32(value, base + reg); +} + +static inline u32 pll_rreg32(void __iomem *base, resource_size_t reg) +{ + return ioread32(base + reg); +} + +static inline void pll_wreg32(void __iomem *base, resource_size_t reg, + u32 value) +{ + iowrite32(value, base + reg); +} + +#endif /* __PDP_COMMON_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.c new file mode 100644 index 000000000000..511df5058965 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.c @@ -0,0 +1,989 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "pdp_common.h" +#include "pdp_odin.h" +#include "odin_defs.h" +#include "odin_regs.h" +#include "orion_defs.h" +#include "orion_regs.h" + +#define ODIN_PLL_REG(n) ((n) - ODN_PDP_P_CLK_OUT_DIVIDER_REG1) + +struct odin_displaymode { + int w; /* display width */ + int h; /* display height */ + int id; /* pixel clock input divider */ + int m; /* pixel clock multiplier */ + int od1; /* pixel clock output divider */ + int od2; /* mem clock output divider */ +}; + +/* + * For Odin, only the listed modes below are supported. + * 1080p id=5, m=37, od1=5, od2=5 + * 720p id=5, m=37, od1=10, od2=5 + * 1280x1024 id=1, m=14, od1=13, od2=8 + * 1440x900 id=5, m=53, od1=10, od2=8 + * 1280x960 id=3, m=40, od1=13, od2=9 + * 1024x768 id=1, m=13, od1=20, od2=10 + * 800x600 id=2, m=20, od1=25, od2=7 + * 640x480 id=1, m=12, od1=48, od2=9 + * ... where id is the PDP_P_CLK input divider, + * m is PDP_P_CLK multiplier regs 1 to 3 + * od1 is PDP_P_clk output divider regs 1 to 3 + * od2 is PDP_M_clk output divider regs 1 to 2 + */ +static const struct odin_displaymode odin_modes[] = { + {.w = 1920, .h = 1080, .id = 5, .m = 37, .od1 = 5, .od2 = 5}, + {.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 5}, + {.w = 1280, .h = 1024, .id = 1, .m = 14, .od1 = 13, .od2 = 10}, + {.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 8}, + {.w = 1280, .h = 960, .id = 3, .m = 40, .od1 = 13, .od2 = 9}, + {.w = 1024, .h = 768, .id = 1, .m = 13, .od1 = 20, .od2 = 10}, + {.w = 800, .h = 600, .id = 2, .m = 20, .od1 = 25, .od2 = 7}, + {.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 48, .od2 = 9}, + {.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0} +}; + +/* + * For Orion, only the listed modes below are supported. + * 1920x1080 mode is currently not supported. + */ +static const struct odin_displaymode orion_modes[] = { + {.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 7}, + {.w = 1280, .h = 1024, .id = 1, .m = 12, .od1 = 11, .od2 = 10}, + {.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 9}, + {.w = 1280, .h = 960, .id = 5, .m = 51, .od1 = 10, .od2 = 9}, + {.w = 1024, .h = 768, .id = 3, .m = 33, .od1 = 17, .od2 = 10}, + {.w = 800, .h = 600, .id = 2, .m = 24, .od1 = 31, .od2 = 12}, + {.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 50, .od2 = 12}, + {.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0} +}; + +static const u32 GRPH_SURF_OFFSET[] = { + ODN_PDP_GRPH1SURF_OFFSET, + ODN_PDP_GRPH2SURF_OFFSET, + ODN_PDP_VID1SURF_OFFSET, + ODN_PDP_GRPH4SURF_OFFSET +}; +static const u32 GRPH_SURF_GRPH_PIXFMT_SHIFT[] = { + ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT, + ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT, + ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT, + ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT +}; +static const u32 GRPH_SURF_GRPH_PIXFMT_MASK[] = { + ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK, + ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK, + ODN_PDP_VID1SURF_VID1PIXFMT_MASK, + ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK +}; +static const u32 GRPH_GALPHA_OFFSET[] = { + ODN_PDP_GRPH1GALPHA_OFFSET, + ODN_PDP_GRPH2GALPHA_OFFSET, + ODN_PDP_VID1GALPHA_OFFSET, + ODN_PDP_GRPH4GALPHA_OFFSET +}; +static const u32 GRPH_GALPHA_GRPH_GALPHA_SHIFT[] = { + ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT, + ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT, + ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT, + ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT +}; +static const u32 GRPH_GALPHA_GRPH_GALPHA_MASK[] = { + ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK, + ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK, + ODN_PDP_VID1GALPHA_VID1GALPHA_MASK, + ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK +}; +static const u32 GRPH_CTRL_OFFSET[] = { + ODN_PDP_GRPH1CTRL_OFFSET, + ODN_PDP_GRPH2CTRL_OFFSET, + ODN_PDP_VID1CTRL_OFFSET, + ODN_PDP_GRPH4CTRL_OFFSET, +}; +static const u32 GRPH_CTRL_GRPH_BLEND_SHIFT[] = { + ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT, + ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT, + ODN_PDP_VID1CTRL_VID1BLEND_SHIFT, + ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT +}; +static const u32 GRPH_CTRL_GRPH_BLEND_MASK[] = { + ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK, + ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK, + ODN_PDP_VID1CTRL_VID1BLEND_MASK, + ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK +}; +static const u32 GRPH_CTRL_GRPH_BLENDPOS_SHIFT[] = { + ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT, + ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT, + ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT, + ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT +}; +static const u32 GRPH_CTRL_GRPH_BLENDPOS_MASK[] = { + ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK, + ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK, + ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK, + ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK +}; +static const u32 GRPH_CTRL_GRPH_STREN_SHIFT[] = { + ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT, + ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT, + ODN_PDP_VID1CTRL_VID1STREN_SHIFT, + ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT +}; +static const u32 GRPH_CTRL_GRPH_STREN_MASK[] = { + ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK, + ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK, + ODN_PDP_VID1CTRL_VID1STREN_MASK, + ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK +}; +static const u32 GRPH_POSN_OFFSET[] = { + ODN_PDP_GRPH1POSN_OFFSET, + ODN_PDP_GRPH2POSN_OFFSET, + ODN_PDP_VID1POSN_OFFSET, + ODN_PDP_GRPH4POSN_OFFSET +}; +static const u32 GRPH_POSN_GRPH_XSTART_SHIFT[] = { + ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT, + ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT, + ODN_PDP_VID1POSN_VID1XSTART_SHIFT, + ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT, +}; +static const u32 GRPH_POSN_GRPH_XSTART_MASK[] = { + ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK, + ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK, + ODN_PDP_VID1POSN_VID1XSTART_MASK, + ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK, +}; +static const u32 GRPH_POSN_GRPH_YSTART_SHIFT[] = { + ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT, + ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT, + ODN_PDP_VID1POSN_VID1YSTART_SHIFT, + ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT, +}; +static const u32 GRPH_POSN_GRPH_YSTART_MASK[] = { + ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK, + ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK, + ODN_PDP_VID1POSN_VID1YSTART_MASK, + ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK, +}; +static const u32 GRPH_SIZE_OFFSET[] = { + ODN_PDP_GRPH1SIZE_OFFSET, + ODN_PDP_GRPH2SIZE_OFFSET, + ODN_PDP_VID1SIZE_OFFSET, + ODN_PDP_GRPH4SIZE_OFFSET, +}; +static const u32 GRPH_SIZE_GRPH_WIDTH_SHIFT[] = { + ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT, + ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT, + ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT, + ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT +}; +static const u32 GRPH_SIZE_GRPH_WIDTH_MASK[] = { + ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK, + ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK, + ODN_PDP_VID1SIZE_VID1WIDTH_MASK, + ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK +}; +static const u32 GRPH_SIZE_GRPH_HEIGHT_SHIFT[] = { + ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT, + ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT, + ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT, + ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT +}; +static const u32 GRPH_SIZE_GRPH_HEIGHT_MASK[] = { + ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK, + ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK, + ODN_PDP_VID1SIZE_VID1HEIGHT_MASK, + ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK +}; +static const u32 GRPH_STRIDE_OFFSET[] = { + ODN_PDP_GRPH1STRIDE_OFFSET, + ODN_PDP_GRPH2STRIDE_OFFSET, + ODN_PDP_VID1STRIDE_OFFSET, + ODN_PDP_GRPH4STRIDE_OFFSET +}; +static const u32 GRPH_STRIDE_GRPH_STRIDE_SHIFT[] = { + ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT, + ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT, + ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT, + ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT +}; +static const u32 GRPH_STRIDE_GRPH_STRIDE_MASK[] = { + ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK, + ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK, + ODN_PDP_VID1STRIDE_VID1STRIDE_MASK, + ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK +}; +static const u32 GRPH_INTERLEAVE_CTRL_OFFSET[] = { + ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET, + ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET, + ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET, + ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET +}; +static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[] = { + ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT, + ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT, + ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT, + ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT +}; +static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[] = { + ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK, + ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK, + ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK, + ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK +}; +static const u32 GRPH_BASEADDR_OFFSET[] = { + ODN_PDP_GRPH1BASEADDR_OFFSET, + ODN_PDP_GRPH2BASEADDR_OFFSET, + ODN_PDP_VID1BASEADDR_OFFSET, + ODN_PDP_GRPH4BASEADDR_OFFSET +}; + + +static void get_odin_clock_settings(u32 value, u32 *lo_time, u32 *hi_time, + u32 *no_count, u32 *edge) +{ + u32 lt, ht; + + /* If the value is 1, High Time & Low Time are both set to 1 + * and the NOCOUNT bit is set to 1. + */ + if (value == 1) { + *lo_time = 1; + *hi_time = 1; + + /* If od is an odd number then write 1 to NO_COUNT + * otherwise write 0. + */ + *no_count = 1; + + /* If m is and odd number then write 1 to EDGE bit of MR2 + * otherwise write 0. + * If id is an odd number then write 1 to EDGE bit of ID + * otherwise write 0. + */ + *edge = 0; + return; + } + *no_count = 0; + + /* High Time & Low time is half the value listed for each PDP mode */ + lt = value>>1; + ht = lt; + + /* If the value is odd, Low Time is rounded up to nearest integer + * and High Time is rounded down, and Edge is set to 1. + */ + if (value & 1) { + lt++; + + /* If m is and odd number then write 1 to EDGE bit of MR2 + * otherwise write 0. + * If id is an odd number then write 1 to EDGE bit of ID + * otherwise write 0. + */ + *edge = 1; + + } else { + *edge = 0; + } + *hi_time = ht; + *lo_time = lt; +} + +static const struct odin_displaymode *get_odin_mode(int w, int h, + enum pdp_odin_subversion pv) +{ + struct odin_displaymode *pdp_modes; + int n = 0; + + if (pv == PDP_ODIN_ORION) + pdp_modes = (struct odin_displaymode *)orion_modes; + else + pdp_modes = (struct odin_displaymode *)odin_modes; + + do { + if ((pdp_modes[n].w == w) && (pdp_modes[n].h == h)) + return pdp_modes+n; + + } while (pdp_modes[n++].w); + + return NULL; +} + +bool pdp_odin_clocks_set(struct device *dev, + void __iomem *pdp_reg, void __iomem *pll_reg, + u32 clock_freq, + void __iomem *odn_core_reg, + u32 hdisplay, u32 vdisplay, + enum pdp_odin_subversion pdpsubv) +{ + u32 value; + const struct odin_displaymode *odispl; + u32 hi_time, lo_time, no_count, edge; + u32 core_id, core_rev; + + core_id = pdp_rreg32(pdp_reg, ODN_PDP_CORE_ID_OFFSET); + dev_info(dev, "Odin-PDP CORE_ID %08X\n", core_id); + + core_rev = pdp_rreg32(odn_core_reg, ODN_PDP_CORE_REV_OFFSET); + dev_info(dev, "Odin-PDP CORE_REV %08X\n", core_rev); + + odispl = get_odin_mode(hdisplay, vdisplay, pdpsubv); + if (!odispl) { + dev_err(dev, "Display mode not supported.\n"); + return false; + } + + /* + * The PDP uses a Xilinx clock that requires read + * modify write for all registers. + * It is essential that only the specified bits are changed + * because other bits are in use. + * To change PDP clocks reset PDP & PDP mmcm (PLL) first, + * then apply changes and then un-reset mmcm & PDP. + * Warm reset will keep the changes. + * wr 0x000080 0x1f7 ; # reset pdp + * wr 0x000090 8 ; # reset pdp mmcm + * then apply clock changes, then + * wr 0x000090 0x0 ; # un-reset pdp mmcm + * wr 0x000080 0x1ff ; # un-reset pdp + */ + + /* + * Hold Odin PDP1 in reset while changing the clock regs. + * Set the PDP1 bit of ODN_CORE_INTERNAL_RESETN low to reset. + * set bit 3 to 0 (active low) + */ + if (pdpsubv == PDP_ODIN_ORION) { + value = core_rreg32(odn_core_reg, SRS_CORE_SOFT_RESETN); + value = REG_VALUE_LO(value, 1, SRS_SOFT_RESETN_PDP_SHIFT, + SRS_SOFT_RESETN_PDP_MASK); + core_wreg32(odn_core_reg, SRS_CORE_SOFT_RESETN, value); + } else { + value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN); + value = REG_VALUE_LO(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT, + ODN_INTERNAL_RESETN_PDP1_MASK); + core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value); + } + + /* + * Hold the PDP MMCM in reset while changing the clock regs. + * Set the PDP1 bit of ODN_CORE_CLK_GEN_RESET high to reset. + */ + value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET); + value = REG_VALUE_SET(value, 0x1, + ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT, + ODN_CLK_GEN_RESET_PDP_MMCM_MASK); + core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value); + + /* Pixel clock Input divider */ + get_odin_clock_settings(odispl->id, &lo_time, &hi_time, + &no_count, &edge); + + value = pll_rreg32(pll_reg, + ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG)); + value = REG_VALUE_SET(value, lo_time, + ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT, + ODN_PDP_PCLK_IDIV_LO_TIME_MASK); + value = REG_VALUE_SET(value, hi_time, + ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT, + ODN_PDP_PCLK_IDIV_HI_TIME_MASK); + value = REG_VALUE_SET(value, no_count, + ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT, + ODN_PDP_PCLK_IDIV_NOCOUNT_MASK); + value = REG_VALUE_SET(value, edge, + ODN_PDP_PCLK_IDIV_EDGE_SHIFT, + ODN_PDP_PCLK_IDIV_EDGE_MASK); + pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG), + value); + + /* Pixel clock Output divider */ + get_odin_clock_settings(odispl->od1, &lo_time, &hi_time, + &no_count, &edge); + + /* Pixel clock Output divider reg1 */ + value = pll_rreg32(pll_reg, + ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1)); + value = REG_VALUE_SET(value, lo_time, + ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT, + ODN_PDP_PCLK_ODIV1_LO_TIME_MASK); + value = REG_VALUE_SET(value, hi_time, + ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT, + ODN_PDP_PCLK_ODIV1_HI_TIME_MASK); + pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1), + value); + + /* Pixel clock Output divider reg2 */ + value = pll_rreg32(pll_reg, + ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2)); + value = REG_VALUE_SET(value, no_count, + ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT, + ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK); + value = REG_VALUE_SET(value, edge, + ODN_PDP_PCLK_ODIV2_EDGE_SHIFT, + ODN_PDP_PCLK_ODIV2_EDGE_MASK); + if (pdpsubv == PDP_ODIN_ORION) { + /* + * Fractional divide for PLL registers currently does not work + * on Sirius, as duly mentioned on the TRM. However, owing to + * what most likely is a design flaw in the RTL, the + * following register and a later one have their fractional + * divide fields set to values other than 0 by default, + * unlike on Odin. This prevents the PDP device from working + * on Orion + */ + value = REG_VALUE_LO(value, 0x1F, SRS_PDP_PCLK_ODIV2_FRAC_SHIFT, + SRS_PDP_PCLK_ODIV2_FRAC_MASK); + } + pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2), + value); + + /* Pixel clock Multiplier */ + get_odin_clock_settings(odispl->m, &lo_time, &hi_time, + &no_count, &edge); + + /* Pixel clock Multiplier reg1 */ + value = pll_rreg32(pll_reg, + ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1)); + value = REG_VALUE_SET(value, lo_time, + ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT, + ODN_PDP_PCLK_MUL1_LO_TIME_MASK); + value = REG_VALUE_SET(value, hi_time, + ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT, + ODN_PDP_PCLK_MUL1_HI_TIME_MASK); + pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1), + value); + + /* Pixel clock Multiplier reg2 */ + value = pll_rreg32(pll_reg, + ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2)); + value = REG_VALUE_SET(value, no_count, + ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT, + ODN_PDP_PCLK_MUL2_NOCOUNT_MASK); + value = REG_VALUE_SET(value, edge, + ODN_PDP_PCLK_MUL2_EDGE_SHIFT, + ODN_PDP_PCLK_MUL2_EDGE_MASK); + if (pdpsubv == PDP_ODIN_ORION) { + /* Zero out fractional divide fields */ + value = REG_VALUE_LO(value, 0x1F, SRS_PDP_PCLK_MUL2_FRAC_SHIFT, + SRS_PDP_PCLK_MUL2_FRAC_MASK); + } + pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2), + value); + + /* Mem clock Output divider */ + get_odin_clock_settings(odispl->od2, &lo_time, &hi_time, + &no_count, &edge); + + /* Mem clock Output divider reg1 */ + value = pll_rreg32(pll_reg, + ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1)); + value = REG_VALUE_SET(value, lo_time, + ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT, + ODN_PDP_MCLK_ODIV1_LO_TIME_MASK); + value = REG_VALUE_SET(value, hi_time, + ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT, + ODN_PDP_MCLK_ODIV1_HI_TIME_MASK); + pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1), + value); + + /* Mem clock Output divider reg2 */ + value = pll_rreg32(pll_reg, + ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2)); + value = REG_VALUE_SET(value, no_count, + ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT, + ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK); + value = REG_VALUE_SET(value, edge, + ODN_PDP_MCLK_ODIV2_EDGE_SHIFT, + ODN_PDP_MCLK_ODIV2_EDGE_MASK); + pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2), + value); + + /* + * Take the PDP MMCM out of reset. + * Set the PDP1 bit of ODN_CORE_CLK_GEN_RESET to 0. + */ + value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET); + value = REG_VALUE_LO(value, 1, ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT, + ODN_CLK_GEN_RESET_PDP_MMCM_MASK); + core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value); + + /* + * Wait until MMCM_LOCK_STATUS_PDPP bit is '1' in register + * MMCM_LOCK_STATUS. Issue an error if this does not + * go to '1' within 500ms. + */ + { + int count; + bool locked = false; + + for (count = 0; count < 10; count++) { + value = core_rreg32(odn_core_reg, + ODN_CORE_MMCM_LOCK_STATUS); + if (value & ODN_MMCM_LOCK_STATUS_PDPP) { + locked = true; + break; + } + msleep(50); + } + + if (!locked) { + dev_err(dev, "The MMCM pll did not lock\n"); + return false; + } + } + + /* + * Take Odin-PDP1 out of reset: + * Set the PDP1 bit of ODN_CORE_INTERNAL_RESETN to 1. + */ + if (pdpsubv == PDP_ODIN_ORION) { + value = core_rreg32(odn_core_reg, SRS_CORE_SOFT_RESETN); + value = REG_VALUE_SET(value, 1, SRS_SOFT_RESETN_PDP_SHIFT, + SRS_SOFT_RESETN_PDP_MASK); + core_wreg32(odn_core_reg, SRS_CORE_SOFT_RESETN, value); + } else { + value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN); + value = REG_VALUE_SET(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT, + ODN_INTERNAL_RESETN_PDP1_MASK); + core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value); + } + + return true; +} + +void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value = enable ? + (1 << ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT | + 1 << ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT) : + 0x0; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable"); +#endif + + pdp_wreg32(pdp_reg, ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET, value); +} + +void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); +#endif + + value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET); + + value = REG_VALUE_SET(value, + enable ? ODN_SYNC_GEN_ENABLE : ODN_SYNC_GEN_DISABLE, + ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT, + ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK); + + /* Invert the pixel clock */ + value = REG_VALUE_SET(value, ODN_PIXEL_CLOCK_INVERTED, + ODN_PDP_SYNCCTRL_CLKPOL_SHIFT, + ODN_PDP_SYNCCTRL_CLKPOL_MASK); + + /* Set the Horizontal Sync Polarity to active high */ + value = REG_VALUE_LO(value, ODN_HSYNC_POLARITY_ACTIVE_HIGH, + ODN_PDP_SYNCCTRL_HSPOL_SHIFT, + ODN_PDP_SYNCCTRL_HSPOL_MASK); + + pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); + + /* Check for underruns when the sync generator + * is being turned off. + */ + if (!enable) { + value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET); + value &= ODN_PDP_INTSTAT_ALL_OURUN_MASK; + + if (value) { + dev_warn(dev, "underruns detected. status=0x%08X\n", + value); + } else { + dev_info(dev, "no underruns detected\n"); + } + } +} + +void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable"); +#endif + + value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET); + + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + ODN_PDP_SYNCCTRL_POWERDN_SHIFT, + ODN_PDP_SYNCCTRL_POWERDN_MASK); + + pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); +} + +void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); +#endif + + pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET, ODN_PDP_INTCLR_ALL); + + value = pdp_rreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT, + ODN_PDP_INTENAB_INTEN_VBLNK0_MASK); + value = enable ? (1 << ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT) : 0; + pdp_wreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET, value); +} + +bool pdp_odin_check_and_clear_vblank(struct device *dev, + void __iomem *pdp_reg) +{ + u32 value; + + value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET); + + if (REG_VALUE_GET(value, + ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT, + ODN_PDP_INTSTAT_INTS_VBLNK0_MASK)) { + pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET, + (1 << ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT)); + + return true; + } + return false; +} + +void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, + u32 plane, bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set plane %u: %s\n", + plane, enable ? "enable" : "disable"); +#endif + + if (plane > 3) { + dev_err(dev, "Maximum of 4 planes are supported\n"); + return; + } + + value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + GRPH_CTRL_GRPH_STREN_SHIFT[plane], + GRPH_CTRL_GRPH_STREN_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value); +} + +void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg) +{ +#ifdef PDP_VERBOSE + dev_info(dev, "Reset planes\n"); +#endif + + pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[0], 0x00000000); + pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[1], 0x01000000); + pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[2], 0x02000000); + pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[3], 0x03000000); +} + +void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg, + u32 plane, u32 address, + u32 posx, u32 posy, + u32 width, u32 height, u32 stride, + u32 format, u32 alpha, bool blend) +{ + /* + * Use a blender based on the plane number (this defines the Z + * ordering) + */ + static const int GRPH_BLEND_POS[] = { 0x0, 0x1, 0x2, 0x3 }; + u32 blend_mode; + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, + "Set surface: plane=%d pos=%d:%d size=%dx%d stride=%d " + "format=%d alpha=%d address=0x%x\n", + plane, posx, posy, width, height, stride, + format, alpha, address); +#endif + + if (plane > 3) { + dev_err(dev, "Maximum of 4 planes are supported\n"); + return; + } + + if (address & 0xf) + dev_warn(dev, "The frame buffer address is not aligned\n"); + + /* Frame buffer base address */ + pdp_wreg32(pdp_reg, GRPH_BASEADDR_OFFSET[plane], address); + + /* Pos */ + value = REG_VALUE_SET(0x0, posx, + GRPH_POSN_GRPH_XSTART_SHIFT[plane], + GRPH_POSN_GRPH_XSTART_MASK[plane]); + value = REG_VALUE_SET(value, posy, + GRPH_POSN_GRPH_YSTART_SHIFT[plane], + GRPH_POSN_GRPH_YSTART_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_POSN_OFFSET[plane], value); + + /* Size */ + value = REG_VALUE_SET(0x0, width - 1, + GRPH_SIZE_GRPH_WIDTH_SHIFT[plane], + GRPH_SIZE_GRPH_WIDTH_MASK[plane]); + value = REG_VALUE_SET(value, height - 1, + GRPH_SIZE_GRPH_HEIGHT_SHIFT[plane], + GRPH_SIZE_GRPH_HEIGHT_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_SIZE_OFFSET[plane], value); + + /* Stride */ + value = REG_VALUE_SET(0x0, (stride >> 4) - 1, + GRPH_STRIDE_GRPH_STRIDE_SHIFT[plane], + GRPH_STRIDE_GRPH_STRIDE_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_STRIDE_OFFSET[plane], value); + + /* Interlace mode: progressive */ + value = REG_VALUE_SET(0x0, ODN_INTERLACE_DISABLE, + GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[plane], + GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_INTERLEAVE_CTRL_OFFSET[plane], value); + + /* Format */ + value = REG_VALUE_SET(0x0, format, + GRPH_SURF_GRPH_PIXFMT_SHIFT[plane], + GRPH_SURF_GRPH_PIXFMT_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_SURF_OFFSET[plane], value); + + /* Global alpha (0...1023) */ + value = REG_VALUE_SET(0x0, ((1024 * 256) / 255 * alpha) / 256, + GRPH_GALPHA_GRPH_GALPHA_SHIFT[plane], + GRPH_GALPHA_GRPH_GALPHA_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_GALPHA_OFFSET[plane], value); + value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]); + + /* Blend mode */ + if (blend) { + if (alpha != 255) + blend_mode = 0x2; /* 0b10 = global alpha blending */ + else + blend_mode = 0x3; /* 0b11 = pixel alpha blending */ + } else { + blend_mode = 0x0; /* 0b00 = no blending */ + } + value = REG_VALUE_SET(value, blend_mode, + GRPH_CTRL_GRPH_BLEND_SHIFT[plane], + GRPH_CTRL_GRPH_BLEND_MASK[plane]); + + /* Blend position */ + value = REG_VALUE_SET(value, GRPH_BLEND_POS[plane], + GRPH_CTRL_GRPH_BLENDPOS_SHIFT[plane], + GRPH_CTRL_GRPH_BLENDPOS_MASK[plane]); + pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value); +} + +void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg, + u32 h_display, u32 v_display, + u32 hbps, u32 ht, u32 has, + u32 hlbs, u32 hfps, u32 hrbs, + u32 vbps, u32 vt, u32 vas, + u32 vtbs, u32 vfps, u32 vbbs, + bool nhsync, bool nvsync) +{ + u32 value; + + dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); +#ifdef PDP_VERBOSE + dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", + ht, hbps, has, hlbs, hfps, hrbs); + dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", + vt, vbps, vas, vtbs, vfps, vbbs); +#endif + + /* Border colour: 10bits per channel */ + pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_R_OFFSET, 0x0); + pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0); + + /* Background: 10bits per channel */ + value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET); + value = REG_VALUE_SET(value, 0x3ff, + ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT, + ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK); + value = REG_VALUE_SET(value, 0x0, + ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT, + ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET, value); + + value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET); + value = REG_VALUE_SET(value, 0x0, + ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT, + ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK); + value = REG_VALUE_SET(value, 0x0, + ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT, + ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET, value); + pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0); + + /* Update control */ + value = pdp_rreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET); + value = REG_VALUE_SET(value, 0x0, + ODN_PDP_UPDCTRL_UPDFIELD_SHIFT, + ODN_PDP_UPDCTRL_UPDFIELD_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET, value); + + /* Horizontal timing */ + value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET); + value = REG_VALUE_SET(value, hbps, + ODN_PDP_HSYNC1_HBPS_SHIFT, + ODN_PDP_HSYNC1_HBPS_MASK); + value = REG_VALUE_SET(value, ht, + ODN_PDP_HSYNC1_HT_SHIFT, + ODN_PDP_HSYNC1_HT_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET, value); + + value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET); + value = REG_VALUE_SET(value, has, + ODN_PDP_HSYNC2_HAS_SHIFT, + ODN_PDP_HSYNC2_HAS_MASK); + value = REG_VALUE_SET(value, hlbs, + ODN_PDP_HSYNC2_HLBS_SHIFT, + ODN_PDP_HSYNC2_HLBS_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET, value); + + value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET); + value = REG_VALUE_SET(value, hfps, + ODN_PDP_HSYNC3_HFPS_SHIFT, + ODN_PDP_HSYNC3_HFPS_MASK); + value = REG_VALUE_SET(value, hrbs, + ODN_PDP_HSYNC3_HRBS_SHIFT, + ODN_PDP_HSYNC3_HRBS_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET, value); + + /* Vertical timing */ + value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET); + value = REG_VALUE_SET(value, vbps, + ODN_PDP_VSYNC1_VBPS_SHIFT, + ODN_PDP_VSYNC1_VBPS_MASK); + value = REG_VALUE_SET(value, vt, + ODN_PDP_VSYNC1_VT_SHIFT, + ODN_PDP_VSYNC1_VT_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET, value); + + value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET); + value = REG_VALUE_SET(value, vas, + ODN_PDP_VSYNC2_VAS_SHIFT, + ODN_PDP_VSYNC2_VAS_MASK); + value = REG_VALUE_SET(value, vtbs, + ODN_PDP_VSYNC2_VTBS_SHIFT, + ODN_PDP_VSYNC2_VTBS_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET, value); + + value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET); + value = REG_VALUE_SET(value, vfps, + ODN_PDP_VSYNC3_VFPS_SHIFT, + ODN_PDP_VSYNC3_VFPS_MASK); + value = REG_VALUE_SET(value, vbbs, + ODN_PDP_VSYNC3_VBBS_SHIFT, + ODN_PDP_VSYNC3_VBBS_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET, value); + + /* Horizontal data enable */ + value = pdp_rreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET); + value = REG_VALUE_SET(value, hlbs, + ODN_PDP_HDECTRL_HDES_SHIFT, + ODN_PDP_HDECTRL_HDES_MASK); + value = REG_VALUE_SET(value, hfps, + ODN_PDP_HDECTRL_HDEF_SHIFT, + ODN_PDP_HDECTRL_HDEF_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET, value); + + /* Vertical data enable */ + value = pdp_rreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET); + value = REG_VALUE_SET(value, vtbs, + ODN_PDP_VDECTRL_VDES_SHIFT, + ODN_PDP_VDECTRL_VDES_MASK); + value = REG_VALUE_SET(value, vfps, + ODN_PDP_VDECTRL_VDEF_SHIFT, + ODN_PDP_VDECTRL_VDEF_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET, value); + + /* Vertical event start and vertical fetch start */ + value = pdp_rreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET); + value = REG_VALUE_SET(value, vbps, + ODN_PDP_VEVENT_VFETCH_SHIFT, + ODN_PDP_VEVENT_VFETCH_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET, value); + + /* Set up polarities of sync/blank */ + value = REG_VALUE_SET(0, 0x1, + ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT, + ODN_PDP_SYNCCTRL_BLNKPOL_MASK); + if (nhsync) + value = REG_VALUE_SET(value, 0x1, + ODN_PDP_SYNCCTRL_HSPOL_SHIFT, + ODN_PDP_SYNCCTRL_HSPOL_MASK); + if (nvsync) + value = REG_VALUE_SET(value, 0x1, + ODN_PDP_SYNCCTRL_VSPOL_SHIFT, + ODN_PDP_SYNCCTRL_VSPOL_MASK); + pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.h new file mode 100644 index 000000000000..c19a542c5de0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_odin.h @@ -0,0 +1,94 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PDP_ODIN_H__) +#define __PDP_ODIN_H__ + +#include +#include + +/* include here for ODN_PDP_SURF_PIXFMT_ARGB8888 as this is part of the API */ +#include "odin_pdp_regs.h" +#include "pdp_common.h" + +bool pdp_odin_clocks_set(struct device *dev, + void __iomem *pdp_reg, void __iomem *pll_reg, + u32 clock_freq, + void __iomem *odn_core_reg, + u32 hdisplay, u32 vdisplay, + enum pdp_odin_subversion); + +void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +bool pdp_odin_check_and_clear_vblank(struct device *dev, + void __iomem *pdp_reg); + +void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, + u32 plane, bool enable); + +void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg); + +void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg, + u32 plane, u32 address, + u32 posx, u32 posy, + u32 width, u32 height, u32 stride, + u32 format, u32 alpha, bool blend); + +void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg, + u32 h_display, u32 v_display, + u32 hbps, u32 ht, u32 has, + u32 hlbs, u32 hfps, u32 hrbs, + u32 vbps, u32 vt, u32 vas, + u32 vtbs, u32 vfps, u32 vbbs, + bool nhsync, bool nvsync); + +#endif /* __PDP_ODIN_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.c new file mode 100644 index 000000000000..d3e5c5cda820 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.c @@ -0,0 +1,340 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdp_common.h" +#include "pdp_plato.h" +#include "pdp2_mmu_regs.h" +#include "pdp2_regs.h" + +#define PLATO_PDP_STRIDE_SHIFT 5 + + +void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); +#endif + + value = pdp_rreg32(pdp_reg, PDP_SYNCCTRL_OFFSET); + /* Starts Sync Generator. */ + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + PDP_SYNCCTRL_SYNCACTIVE_SHIFT, + PDP_SYNCCTRL_SYNCACTIVE_MASK); + /* Controls polarity of pixel clock: Pixel clock is inverted */ + value = REG_VALUE_SET(value, 0x01, + PDP_SYNCCTRL_CLKPOL_SHIFT, + PDP_SYNCCTRL_CLKPOL_MASK); + pdp_wreg32(pdp_reg, PDP_SYNCCTRL_OFFSET, value); +} + +void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); +#endif + + pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET, 0xFFFFFFFF); + + value = pdp_rreg32(pdp_reg, PDP_INTENAB_OFFSET); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + PDP_INTENAB_INTEN_VBLNK0_SHIFT, + PDP_INTENAB_INTEN_VBLNK0_MASK); + pdp_wreg32(pdp_reg, PDP_INTENAB_OFFSET, value); +} + +bool pdp_plato_check_and_clear_vblank(struct device *dev, + void __iomem *pdp_reg) +{ + u32 value; + + value = pdp_rreg32(pdp_reg, PDP_INTSTAT_OFFSET); + + if (REG_VALUE_GET(value, + PDP_INTSTAT_INTS_VBLNK0_SHIFT, + PDP_INTSTAT_INTS_VBLNK0_MASK)) { + pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET, + (1 << PDP_INTCLR_INTCLR_VBLNK0_SHIFT)); + return true; + } + + return false; +} + +void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, + u32 plane, bool enable) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, "Set plane %u: %s\n", + plane, enable ? "enable" : "disable"); +#endif + value = pdp_rreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET); + value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, + PDP_GRPH1CTRL_GRPH1STREN_SHIFT, + PDP_GRPH1CTRL_GRPH1STREN_MASK); + pdp_wreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET, value); +} + +void pdp_plato_set_surface(struct device *dev, + void __iomem *pdp_reg, void __iomem *pdp_bif_reg, + u32 plane, u64 address, + u32 posx, u32 posy, + u32 width, u32 height, u32 stride, + u32 format, u32 alpha, bool blend) +{ + u32 value; + +#ifdef PDP_VERBOSE + dev_info(dev, + "Set surface: size=%dx%d stride=%d format=%d address=0x%llx\n", + width, height, stride, format, address); +#endif + + pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x0); + /* + * Set the offset position to (0,0) as we've already added any offset + * to the base address. + */ + pdp_wreg32(pdp_reg, PDP_GRPH1POSN_OFFSET, 0); + + /* Set the frame buffer base address */ + if (address & 0xF) + dev_warn(dev, "The frame buffer address is not aligned\n"); + + pdp_wreg32(pdp_reg, PDP_GRPH1BASEADDR_OFFSET, + (u32)address & PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK); + + /* + * Write 8 msb of the address to address extension bits in the PDP + * MMU control register. + */ + value = pdp_rreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET); + value = REG_VALUE_SET(value, address >> 32, + PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT, + PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK); + value = REG_VALUE_SET(value, 0x00, + PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT, + PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK); + value = REG_VALUE_SET(value, 0x01, + PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT, + PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK); + pdp_wreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET, value); + + /* Set the framebuffer pixel format */ + value = pdp_rreg32(pdp_reg, PDP_GRPH1SURF_OFFSET); + value = REG_VALUE_SET(value, format, + PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT, + PDP_GRPH1SURF_GRPH1PIXFMT_MASK); + pdp_wreg32(pdp_reg, PDP_GRPH1SURF_OFFSET, value); + /* + * Set the framebuffer size (this might be smaller than the resolution) + */ + value = REG_VALUE_SET(0, width - 1, + PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT, + PDP_GRPH1SIZE_GRPH1WIDTH_MASK); + value = REG_VALUE_SET(value, height - 1, + PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT, + PDP_GRPH1SIZE_GRPH1HEIGHT_MASK); + pdp_wreg32(pdp_reg, PDP_GRPH1SIZE_OFFSET, value); + + /* Set the framebuffer stride in 16byte words */ + value = REG_VALUE_SET(0, (stride >> PLATO_PDP_STRIDE_SHIFT) - 1, + PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT, + PDP_GRPH1STRIDE_GRPH1STRIDE_MASK); + pdp_wreg32(pdp_reg, PDP_GRPH1STRIDE_OFFSET, value); + + /* Enable the register writes on the next vblank */ + pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x3); + + /* + * Issues with NoC sending interleaved read responses to PDP require + * burst to be 1. + */ + value = REG_VALUE_SET(0, 0x02, + PDP_MEMCTRL_MEMREFRESH_SHIFT, + PDP_MEMCTRL_MEMREFRESH_MASK); + value = REG_VALUE_SET(value, 0x01, + PDP_MEMCTRL_BURSTLEN_SHIFT, + PDP_MEMCTRL_BURSTLEN_MASK); + pdp_wreg32(pdp_reg, PDP_MEMCTRL_OFFSET, value); +} + +void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg, + u32 h_display, u32 v_display, + u32 hbps, u32 ht, u32 has, + u32 hlbs, u32 hfps, u32 hrbs, + u32 vbps, u32 vt, u32 vas, + u32 vtbs, u32 vfps, u32 vbbs, + bool nhsync, bool nvsync) +{ + u32 value; + + dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); +#ifdef PDP_VERBOSE + dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", + ht, hbps, has, hlbs, hfps, hrbs); + dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", + vt, vbps, vas, vtbs, vfps, vbbs); +#endif + + /* Update control */ + value = pdp_rreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET); + value = REG_VALUE_SET(value, 0x0, + PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT, + PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK); + pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, value); + + /* Set hsync timings */ + value = pdp_rreg32(pdp_reg, PDP_HSYNC1_OFFSET); + value = REG_VALUE_SET(value, hbps, + PDP_HSYNC1_HBPS_SHIFT, + PDP_HSYNC1_HBPS_MASK); + value = REG_VALUE_SET(value, ht, + PDP_HSYNC1_HT_SHIFT, + PDP_HSYNC1_HT_MASK); + pdp_wreg32(pdp_reg, PDP_HSYNC1_OFFSET, value); + + value = pdp_rreg32(pdp_reg, PDP_HSYNC2_OFFSET); + value = REG_VALUE_SET(value, has, + PDP_HSYNC2_HAS_SHIFT, + PDP_HSYNC2_HAS_MASK); + value = REG_VALUE_SET(value, hlbs, + PDP_HSYNC2_HLBS_SHIFT, + PDP_HSYNC2_HLBS_MASK); + pdp_wreg32(pdp_reg, PDP_HSYNC2_OFFSET, value); + + value = pdp_rreg32(pdp_reg, PDP_HSYNC3_OFFSET); + value = REG_VALUE_SET(value, hfps, + PDP_HSYNC3_HFPS_SHIFT, + PDP_HSYNC3_HFPS_MASK); + value = REG_VALUE_SET(value, hrbs, + PDP_HSYNC3_HRBS_SHIFT, + PDP_HSYNC3_HRBS_MASK); + pdp_wreg32(pdp_reg, PDP_HSYNC3_OFFSET, value); + + /* Set vsync timings */ + value = pdp_rreg32(pdp_reg, PDP_VSYNC1_OFFSET); + value = REG_VALUE_SET(value, vbps, + PDP_VSYNC1_VBPS_SHIFT, + PDP_VSYNC1_VBPS_MASK); + value = REG_VALUE_SET(value, vt, + PDP_VSYNC1_VT_SHIFT, + PDP_VSYNC1_VT_MASK); + pdp_wreg32(pdp_reg, PDP_VSYNC1_OFFSET, value); + + value = pdp_rreg32(pdp_reg, PDP_VSYNC2_OFFSET); + value = REG_VALUE_SET(value, vas, + PDP_VSYNC2_VAS_SHIFT, + PDP_VSYNC2_VAS_MASK); + value = REG_VALUE_SET(value, vtbs, + PDP_VSYNC2_VTBS_SHIFT, + PDP_VSYNC2_VTBS_MASK); + pdp_wreg32(pdp_reg, PDP_VSYNC2_OFFSET, value); + + value = pdp_rreg32(pdp_reg, PDP_VSYNC3_OFFSET); + value = REG_VALUE_SET(value, vfps, + PDP_VSYNC3_VFPS_SHIFT, + PDP_VSYNC3_VFPS_MASK); + value = REG_VALUE_SET(value, vbbs, + PDP_VSYNC3_VBBS_SHIFT, + PDP_VSYNC3_VBBS_MASK); + pdp_wreg32(pdp_reg, PDP_VSYNC3_OFFSET, value); + + /* Horizontal data enable */ + value = pdp_rreg32(pdp_reg, PDP_HDECTRL_OFFSET); + value = REG_VALUE_SET(value, has, + PDP_HDECTRL_HDES_SHIFT, + PDP_HDECTRL_HDES_MASK); + value = REG_VALUE_SET(value, hrbs, + PDP_HDECTRL_HDEF_SHIFT, + PDP_HDECTRL_HDEF_MASK); + pdp_wreg32(pdp_reg, PDP_HDECTRL_OFFSET, value); + + /* Vertical data enable */ + value = pdp_rreg32(pdp_reg, PDP_VDECTRL_OFFSET); + value = REG_VALUE_SET(value, vtbs, /* XXX: we're setting this to VAS */ + PDP_VDECTRL_VDES_SHIFT, + PDP_VDECTRL_VDES_MASK); + value = REG_VALUE_SET(value, vfps, /* XXX: set to VBBS */ + PDP_VDECTRL_VDEF_SHIFT, + PDP_VDECTRL_VDEF_MASK); + pdp_wreg32(pdp_reg, PDP_VDECTRL_OFFSET, value); + + /* Vertical event start and vertical fetch start */ + value = 0; + value = REG_VALUE_SET(value, 0, + PDP_VEVENT_VEVENT_SHIFT, + PDP_VEVENT_VEVENT_MASK); + value = REG_VALUE_SET(value, vbps, + PDP_VEVENT_VFETCH_SHIFT, + PDP_VEVENT_VFETCH_MASK); + value = REG_VALUE_SET(value, vfps, + PDP_VEVENT_VEVENT_SHIFT, + PDP_VEVENT_VEVENT_MASK); + pdp_wreg32(pdp_reg, PDP_VEVENT_OFFSET, value); + + /* Set up polarities of sync/blank */ + value = REG_VALUE_SET(0, 0x1, + PDP_SYNCCTRL_BLNKPOL_SHIFT, + PDP_SYNCCTRL_BLNKPOL_MASK); + + if (nhsync) + value = REG_VALUE_SET(value, 0x1, + PDP_SYNCCTRL_HSPOL_SHIFT, + PDP_SYNCCTRL_HSPOL_MASK); + + if (nvsync) + value = REG_VALUE_SET(value, 0x1, + PDP_SYNCCTRL_VSPOL_SHIFT, + PDP_SYNCCTRL_VSPOL_MASK); + + pdp_wreg32(pdp_reg, + PDP_SYNCCTRL_OFFSET, + value); +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.h new file mode 100644 index 000000000000..028c2e45e6f1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/pdp_plato.h @@ -0,0 +1,87 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PDP_PLATO_H__) +#define __PDP_PLATO_H__ + +#include +#include + +#define PLATO_PDP_PIXEL_FORMAT_G (0x00) +#define PLATO_PDP_PIXEL_FORMAT_ARGB4 (0x04) +#define PLATO_PDP_PIXEL_FORMAT_ARGB1555 (0x05) +#define PLATO_PDP_PIXEL_FORMAT_RGB8 (0x06) +#define PLATO_PDP_PIXEL_FORMAT_RGB565 (0x07) +#define PLATO_PDP_PIXEL_FORMAT_ARGB8 (0x08) +#define PLATO_PDP_PIXEL_FORMAT_AYUV8 (0x10) +#define PLATO_PDP_PIXEL_FORMAT_YUV10 (0x15) +#define PLATO_PDP_PIXEL_FORMAT_RGBA8 (0x16) + + +void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, + bool enable); + +bool pdp_plato_check_and_clear_vblank(struct device *dev, + void __iomem *pdp_reg); + +void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, + u32 plane, bool enable); + +void pdp_plato_set_surface(struct device *dev, + void __iomem *pdp_reg, void __iomem *pdp_bif_reg, + u32 plane, u64 address, + u32 posx, u32 posy, + u32 width, u32 height, u32 stride, + u32 format, u32 alpha, bool blend); + +void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg, + u32 h_display, u32 v_display, + u32 hbps, u32 ht, u32 has, + u32 hlbs, u32 hfps, u32 hrbs, + u32 vbps, u32 vt, u32 vas, + u32 vtbs, u32 vfps, u32 vbbs, + bool nhsync, bool nvsync); + +#endif /* __PDP_PLATO_H__ */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.c new file mode 100644 index 000000000000..2addaebc5ea7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.c @@ -0,0 +1,1411 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* + * This is a device driver for the apollo testchip framework. It creates + * platform devices for the pdp and ext sub-devices, and exports functions to + * manage the shared interrupt handling + */ + +#include +#include +#include +#include +#include +#include + +#include "tc_drv_internal.h" +#include "tc_apollo.h" +#include "tc_ion.h" + +#include "apollo_regs.h" +#include "tcf_clk_ctrl.h" +#include "tcf_pll.h" + +#if defined(SUPPORT_APOLLO_FPGA) +#include "tc_apollo_debugfs.h" +#endif /* defined(SUPPORT_APOLLO_FPGA) */ + +#define TC_INTERRUPT_FLAG_PDP (1 << PDP1_INT_SHIFT) +#define TC_INTERRUPT_FLAG_EXT (1 << EXT_INT_SHIFT) + +#define PCI_VENDOR_ID_POWERVR 0x1010 +#define DEVICE_ID_PCI_APOLLO_FPGA 0x1CF1 +#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2 + +#define APOLLO_MEM_PCI_BASENUM (2) + +static struct { + struct thermal_zone_device *thermal_zone; + +#if defined(SUPPORT_APOLLO_FPGA) + struct tc_io_region fpga; + struct apollo_debugfs_fpga_entries fpga_entries; +#endif +} apollo_pdata; + +#if defined(SUPPORT_APOLLO_FPGA) + +#define APOLLO_DEVICE_NAME_FPGA "apollo_fpga" + +struct apollo_fpga_platform_data { + /* The testchip memory mode (LMA, HOST or HYBRID) */ + int mem_mode; + + resource_size_t tc_memory_base; + + resource_size_t pdp_heap_memory_base; + resource_size_t pdp_heap_memory_size; +}; + +#endif /* defined(SUPPORT_APOLLO_FPGA) */ + +static void spi_write(struct tc_device *tc, u32 off, u32 val) +{ + iowrite32(off, tc->tcf.registers + + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR); + iowrite32(val, tc->tcf.registers + + TCF_CLK_CTRL_TCF_SPI_MST_WDATA); + iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers + + TCF_CLK_CTRL_TCF_SPI_MST_GO); + udelay(1000); +} + +static int spi_read(struct tc_device *tc, u32 off, u32 *val) +{ + int cnt = 0; + u32 spi_mst_status; + + iowrite32(0x40000 | off, tc->tcf.registers + + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR); + iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers + + TCF_CLK_CTRL_TCF_SPI_MST_GO); + + udelay(100); + + do { + spi_mst_status = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_TCF_SPI_MST_STATUS); + + if (cnt++ > 10000) { + dev_err(&tc->pdev->dev, + "%s: Time out reading SPI reg (0x%x)\n", + __func__, off); + return -1; + } + + } while (spi_mst_status != 0x08); + + *val = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_TCF_SPI_MST_RDATA); + + return 0; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, + unsigned long *t) +#else +static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, + int *t) +#endif +{ + struct tc_device *tc; + int err = -ENODEV; + u32 tmp; + + if (!thermal) + goto err_out; + + tc = (struct tc_device *)thermal->devdata; + + if (!tc) + goto err_out; + + if (spi_read(tc, TCF_TEMP_SENSOR_SPI_OFFSET, &tmp)) { + dev_err(&tc->pdev->dev, + "Failed to read apollo temperature sensor\n"); + + goto err_out; + } + + /* Report this in millidegree Celsius */ + *t = TCF_TEMP_SENSOR_TO_C(tmp) * 1000; + + err = 0; + +err_out: + return err; +} + +static struct thermal_zone_device_ops apollo_thermal_dev_ops = { + .get_temp = apollo_thermal_get_temp, +}; + +#if defined(SUPPORT_RGX) + +static void pll_write_reg(struct tc_device *tc, + resource_size_t reg_offset, u32 reg_value) +{ + BUG_ON(reg_offset < TCF_PLL_PLL_CORE_CLK0); + BUG_ON(reg_offset > tc->tcf_pll.region.size + + TCF_PLL_PLL_CORE_CLK0 - 4); + + /* Tweak the offset because we haven't mapped the full pll region */ + iowrite32(reg_value, tc->tcf_pll.registers + + reg_offset - TCF_PLL_PLL_CORE_CLK0); +} + +static u32 sai_read_es2(struct tc_device *tc, u32 addr) +{ + iowrite32(0x200 | addr, tc->tcf.registers + 0x300); + iowrite32(0x1 | addr, tc->tcf.registers + 0x318); + return ioread32(tc->tcf.registers + 0x310); +} + +static int apollo_align_interface_es2(struct tc_device *tc) +{ + u32 reg = 0; + u32 reg_reset_n; + int reset_cnt = 0; + int err = -EFAULT; + bool aligned = false; + + /* Try to enable the core clock PLL */ + spi_write(tc, 0x1, 0x0); + reg = ioread32(tc->tcf.registers + 0x320); + reg |= 0x1; + iowrite32(reg, tc->tcf.registers + 0x320); + reg &= 0xfffffffe; + iowrite32(reg, tc->tcf.registers + 0x320); + msleep(1000); + + if (spi_read(tc, 0x2, ®)) { + dev_err(&tc->pdev->dev, + "Unable to read PLL status\n"); + goto err_out; + } + + if (reg == 0x1) { + /* Select DUT PLL as core clock */ + reg = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_DUT_CONTROL_1); + reg &= 0xfffffff7; + iowrite32(reg, tc->tcf.registers + + TCF_CLK_CTRL_DUT_CONTROL_1); + } else { + dev_err(&tc->pdev->dev, + "PLL has failed to lock, status = %x\n", reg); + goto err_out; + } + + reg_reset_n = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + + while (!aligned && reset_cnt < 10 && + tc->version != APOLLO_VERSION_TCF_5) { + int bank; + u32 eyes; + u32 clk_taps; + u32 train_ack; + + ++reset_cnt; + + /* Reset the DUT to allow the SAI to retrain */ + reg_reset_n &= ~(0x1 << DUT_RESETN_SHIFT); + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + udelay(100); + reg_reset_n |= (0x1 << DUT_RESETN_SHIFT); + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + udelay(100); + + /* Assume alignment passed, if any bank fails on either DUT or + * FPGA we will set this to false and try again for a max of 10 + * times. + */ + aligned = true; + + /* For each of the banks */ + for (bank = 0; bank < 10; bank++) { + int bank_aligned = 0; + /* Check alignment on the DUT */ + u32 bank_base = 0x7000 + (0x1000 * bank); + + spi_read(tc, bank_base + 0x4, &eyes); + spi_read(tc, bank_base + 0x3, &clk_taps); + spi_read(tc, bank_base + 0x6, &train_ack); + + bank_aligned = tc_is_interface_aligned( + eyes, clk_taps, train_ack); + if (!bank_aligned) { + dev_warn(&tc->pdev->dev, + "Alignment check failed, retrying\n"); + aligned = false; + break; + } + + /* Check alignment on the FPGA */ + bank_base = 0xb0 + (0x10 * bank); + + eyes = sai_read_es2(tc, bank_base + 0x4); + clk_taps = sai_read_es2(tc, bank_base + 0x3); + train_ack = sai_read_es2(tc, bank_base + 0x6); + + bank_aligned = tc_is_interface_aligned( + eyes, clk_taps, train_ack); + + if (!bank_aligned) { + dev_warn(&tc->pdev->dev, + "Alignment check failed, retrying\n"); + aligned = false; + break; + } + } + } + + if (!aligned) { + dev_err(&tc->pdev->dev, "Unable to initialise the testchip (interface alignment failure), please restart the system.\n"); + /* We are not returning an error here, cause VP doesn't + * implement the necessary registers although they claim to be + * TC compatible. */ + } + + if (reset_cnt > 1) { + dev_dbg(&tc->pdev->dev, "Note: The testchip required more than one reset to find a good interface alignment!\n"); + dev_dbg(&tc->pdev->dev, " This should be harmless, but if you do suspect foul play, please reset the machine.\n"); + dev_dbg(&tc->pdev->dev, " If you continue to see this message you may want to report it to PowerVR Verification Platforms.\n"); + } + + err = 0; +err_out: + return err; +} + +static void apollo_set_clocks(struct tc_device *tc, + int core_clock, int mem_clock, int sys_clock) +{ + u32 val; + + /* This is disabled for TCF2 since the current FPGA builds do not + * like their core clocks being set (it takes apollo down). + */ + if (tc->version != APOLLO_VERSION_TCF_2) { + val = core_clock / 1000000; + pll_write_reg(tc, TCF_PLL_PLL_CORE_CLK0, val); + + val = 0x1 << PLL_CORE_DRP_GO_SHIFT; + pll_write_reg(tc, TCF_PLL_PLL_CORE_DRP_GO, val); + } + + val = mem_clock / 1000000; + pll_write_reg(tc, TCF_PLL_PLL_MEMIF_CLK0, val); + + val = 0x1 << PLL_MEM_DRP_GO_SHIFT; + pll_write_reg(tc, TCF_PLL_PLL_MEM_DRP_GO, val); + + if (tc->version == APOLLO_VERSION_TCF_5) { + val = sys_clock / 1000000; + pll_write_reg(tc, TCF_PLL_PLL_SYSIF_CLK0, val); + + val = 0x1 << PLL_MEM_DRP_GO_SHIFT; + pll_write_reg(tc, TCF_PLL_PLL_SYS_DRP_GO, val); + } + + dev_info(&tc->pdev->dev, "Setting clocks to %uMHz/%uMHz\n", + core_clock / 1000000, + mem_clock / 1000000); + udelay(400); +} + +static void apollo_set_mem_latency(struct tc_device *tc, + int mem_latency, int mem_wresp_latency) +{ + u32 regval = 0; + + if (mem_latency <= 4) { + /* The total memory read latency cannot be lower than the + * amount of cycles consumed by the hardware to do a read. + * Set the memory read latency to 0 cycles. + */ + mem_latency = 0; + } else { + mem_latency -= 4; + + dev_info(&tc->pdev->dev, + "Setting memory read latency to %i cycles\n", + mem_latency); + } + + if (mem_wresp_latency <= 2) { + /* The total memory write latency cannot be lower than the + * amount of cycles consumed by the hardware to do a write. + * Set the memory write latency to 0 cycles. + */ + mem_wresp_latency = 0; + } else { + mem_wresp_latency -= 2; + + dev_info(&tc->pdev->dev, + "Setting memory write response latency to %i cycles\n", + mem_wresp_latency); + } + + mem_latency |= mem_wresp_latency << 16; + + spi_write(tc, 0x1009, mem_latency); + + if (spi_read(tc, 0x1009, ®val) != 0) { + dev_err(&tc->pdev->dev, + "Failed to read back memory latency register"); + return; + } + + if (mem_latency != regval) { + dev_err(&tc->pdev->dev, + "Memory latency register doesn't match requested value" + " (actual: %#08x, expected: %#08x)\n", + regval, mem_latency); + } +} + +static void apollo_fpga_update_dut_clk_freq(struct tc_device *tc, + int *core_clock, int *mem_clock) +{ + struct device *dev = &tc->pdev->dev; + +#if defined(SUPPORT_FPGA_DUT_CLK_INFO) + u32 reg; + + /* DUT_CLK_INFO available only if SW_IF_VERSION >= 1 */ + reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); + reg = (reg & VERSION_MASK) >> VERSION_SHIFT; + + if (reg >= 1) { + reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_DUT_CLK_INFO); + + if ((reg != 0) && (reg != 0xbaadface) && (reg != 0xffffffff)) { + dev_info(dev, "TCF_CLK_CTRL_DUT_CLK_INFO = %08x\n", reg); + dev_info(dev, "Overriding provided DUT clock values: " + "core %i, mem %i\n", + *core_clock, *mem_clock); + + *core_clock = ((reg & CORE_MASK) >> CORE_SHIFT) * 1000000; + *mem_clock = ((reg & MEM_MASK) >> MEM_SHIFT) * 1000000; + } + } +#endif + + dev_info(dev, "DUT clock values: core %i, mem %i\n", + *core_clock, *mem_clock); +} + +#endif /* defined(SUPPORT_RGX) */ + +static int apollo_hard_reset(struct tc_device *tc, + int core_clock, int mem_clock, int sys_clock) +{ + u32 reg; + u32 reg_reset_n = 0; + + int err = 0; + + /* This is required for SPI reset which is not yet implemented. */ + /*u32 aux_reset_n;*/ + + if (tc->version == APOLLO_VERSION_TCF_2) { + /* Power down */ + reg = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_DUT_CONTROL_1); + reg &= ~DUT_CTRL_VCC_0V9EN; + reg &= ~DUT_CTRL_VCC_1V8EN; + reg |= DUT_CTRL_VCC_IO_INH; + reg |= DUT_CTRL_VCC_CORE_INH; + iowrite32(reg, tc->tcf.registers + + TCF_CLK_CTRL_DUT_CONTROL_1); + msleep(500); + } + + /* Put everything into reset */ + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + + /* Take PDP1 and PDP2 out of reset */ + reg_reset_n |= (0x1 << PDP1_RESETN_SHIFT); + reg_reset_n |= (0x1 << PDP2_RESETN_SHIFT); + + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + msleep(100); + + /* Take DDR out of reset */ + reg_reset_n |= (0x1 << DDR_RESETN_SHIFT); + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + +#if defined(SUPPORT_RGX) + if (tc->version == APOLLO_VERSION_TCF_5) + apollo_fpga_update_dut_clk_freq(tc, &core_clock, &mem_clock); + + /* Set clock speed here, before reset. */ + apollo_set_clocks(tc, core_clock, mem_clock, sys_clock); + + /* Put take GLB_CLKG and SCB out of reset */ + reg_reset_n |= (0x1 << GLB_CLKG_EN_SHIFT); + reg_reset_n |= (0x1 << SCB_RESETN_SHIFT); + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + msleep(100); + + if (tc->version == APOLLO_VERSION_TCF_2) { + /* Enable the voltage control regulators on DUT */ + reg = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_DUT_CONTROL_1); + reg |= DUT_CTRL_VCC_0V9EN; + reg |= DUT_CTRL_VCC_1V8EN; + reg &= ~DUT_CTRL_VCC_IO_INH; + reg &= ~DUT_CTRL_VCC_CORE_INH; + iowrite32(reg, tc->tcf.registers + + TCF_CLK_CTRL_DUT_CONTROL_1); + msleep(300); + } + + /* Take DUT_DCM out of reset */ + reg_reset_n |= (0x1 << DUT_DCM_RESETN_SHIFT); + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + msleep(100); + + + err = tc_iopol32_nonzero(DCM_LOCK_STATUS_MASK, + tc->tcf.registers + TCF_CLK_CTRL_DCM_LOCK_STATUS); + + if (err != 0) + goto err_out; + + if (tc->version == APOLLO_VERSION_TCF_2) { + /* Set ODT to a specific value that seems to provide the most + * stable signals. + */ + spi_write(tc, 0x11, 0x413130); + } + + /* Take DUT out of reset */ + reg_reset_n |= (0x1 << DUT_RESETN_SHIFT); + iowrite32(reg_reset_n, tc->tcf.registers + + TCF_CLK_CTRL_CLK_AND_RST_CTRL); + msleep(100); + + if (tc->version != APOLLO_VERSION_TCF_5) { + err = apollo_align_interface_es2(tc); + if (err) + goto err_out; + } + +#endif /* defined(SUPPORT_RGX) */ + + if (tc->version == APOLLO_VERSION_TCF_2) { + /* Enable the temperature sensor */ + spi_write(tc, 0xc, 0); /* power up */ + spi_write(tc, 0xc, 2); /* reset */ + spi_write(tc, 0xc, 6); /* init & run */ + + /* Register a new thermal zone */ + apollo_pdata.thermal_zone = + thermal_zone_device_register("apollo", 0, 0, tc, + &apollo_thermal_dev_ops, + NULL, 0, 0); + if (IS_ERR(apollo_pdata.thermal_zone)) { + dev_warn(&tc->pdev->dev, "Couldn't register thermal zone"); + apollo_pdata.thermal_zone = NULL; + } + } + + reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); + reg = (reg & VERSION_MASK) >> VERSION_SHIFT; + + if (reg == 0) { + u32 build_inc; + u32 build_owner; + + /* Check the build */ + reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_DES_REV_1); + build_inc = (reg >> 12) & 0xff; + build_owner = (reg >> 20) & 0xf; + + if (build_inc) { + dev_alert(&tc->pdev->dev, + "BE WARNED: You are not running a tagged release of the FPGA!\n"); + + dev_alert(&tc->pdev->dev, "Owner: 0x%01x, Inc: 0x%02x\n", + build_owner, build_inc); + } + + dev_info(&tc->pdev->dev, "FPGA Release: %u.%02u\n", + reg >> 8 & 0xf, reg & 0xff); + } + +#if defined(SUPPORT_RGX) +err_out: +#endif /* defined(SUPPORT_RGX) */ + return err; +} + +static void apollo_set_mem_mode_lma(struct tc_device *tc) +{ + u32 val; + + val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); + val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK + | HOST_PHY_MODE_MASK); + val |= (0x1 << ADDRESS_FORCE_SHIFT); + iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); +} + +static void apollo_set_mem_mode_hybrid(struct tc_device *tc) +{ + u32 val; + + val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); + val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK + | HOST_PHY_MODE_MASK); + val |= ((0x1 << HOST_ONLY_MODE_SHIFT) | (0x1 << HOST_PHY_MODE_SHIFT)); + iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); + + /* Setup apollo to pass 1GB window of address space to the local memory. + * This is a sub-mode of the host only mode, meaning that the apollo TC + * can address the system memory with a 1GB window of address space + * routed to the device local memory. The simplest approach is to mirror + * the CPU physical address space, by moving the device local memory + * window where it is mapped in the CPU physical address space. + */ + iowrite32(tc->tc_mem.base, + tc->tcf.registers + TCF_CLK_CTRL_HOST_PHY_OFFSET); +} + +static int apollo_set_mem_mode(struct tc_device *tc, int mem_mode) +{ + switch (mem_mode) { + case TC_MEMORY_HYBRID: + apollo_set_mem_mode_hybrid(tc); + dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_HYBRID\n"); + break; + case TC_MEMORY_LOCAL: + apollo_set_mem_mode_lma(tc); + dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_LOCAL\n"); + break; + default: + dev_err(&tc->pdev->dev, "unsupported memory mode = %d\n", + mem_mode); + return -ENOSYS; + }; + + tc->mem_mode = mem_mode; + + return 0; +} + +static bool apollo_pdp_export_host_addr(struct tc_device *tc) +{ + return tc->mem_mode == TC_MEMORY_HYBRID; +} + +static u64 apollo_get_pdp_dma_mask(struct tc_device *tc) +{ + /* The PDP does not access system memory, so there is no + * DMA limitation. + */ + if ((tc->mem_mode == TC_MEMORY_LOCAL) || + (tc->mem_mode == TC_MEMORY_HYBRID)) + return DMA_BIT_MASK(64); + + return DMA_BIT_MASK(32); +} + +#if defined(SUPPORT_RGX) +static u64 apollo_get_rogue_dma_mask(struct tc_device *tc) +#else /* SUPPORT_APOLLO_FPGA */ +static u64 apollo_get_fpga_dma_mask(struct tc_device *tc) +#endif /* defined(SUPPORT_RGX) */ +{ + /* Does not access system memory, so there is no DMA limitation */ + if (tc->mem_mode == TC_MEMORY_LOCAL) + return DMA_BIT_MASK(64); + + return DMA_BIT_MASK(32); +} + +static int apollo_hw_init(struct tc_device *tc, + int core_clock, int mem_clock, int sys_clock, + int mem_latency, int mem_wresp_latency, int mem_mode) +{ + int err = 0; + + err = apollo_hard_reset(tc, core_clock, mem_clock, sys_clock); + if (err) + goto err_out; + + err = apollo_set_mem_mode(tc, mem_mode); + if (err) + goto err_out; + +#if defined(SUPPORT_RGX) + if (tc->version == APOLLO_VERSION_TCF_BONNIE) { + u32 reg; + /* Enable ASTC via SPI */ + if (spi_read(tc, 0xf, ®)) { + dev_err(&tc->pdev->dev, + "Failed to read apollo ASTC register\n"); + err = -ENODEV; + goto err_out; + } + + reg |= 0x1 << 4; + spi_write(tc, 0xf, reg); + } else if (tc->version == APOLLO_VERSION_TCF_5) { + apollo_set_mem_latency(tc, mem_latency, mem_wresp_latency); + } +#endif /* defined(SUPPORT_RGX) */ + +err_out: + return err; +} + +static int apollo_enable_irq(struct tc_device *tc) +{ + int err = 0; + +#if defined(TC_FAKE_INTERRUPTS) + setup_timer(&tc->timer, tc_irq_fake_wrapper, + (unsigned long)tc); + mod_timer(&tc->timer, + jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); +#else + { + u32 val; + + iowrite32(0, tc->tcf.registers + + TCF_CLK_CTRL_INTERRUPT_ENABLE); + iowrite32(0xffffffff, tc->tcf.registers + + TCF_CLK_CTRL_INTERRUPT_CLEAR); + + /* Set sense to active high */ + val = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_INTERRUPT_OP_CFG) & ~(INT_SENSE_MASK); + iowrite32(val, tc->tcf.registers + + TCF_CLK_CTRL_INTERRUPT_OP_CFG); + + err = request_irq(tc->pdev->irq, apollo_irq_handler, + IRQF_SHARED, DRV_NAME, tc); + } +#endif + return err; +} + +static void apollo_disable_irq(struct tc_device *tc) +{ +#if defined(TC_FAKE_INTERRUPTS) + del_timer_sync(&tc->timer); +#else + iowrite32(0, tc->tcf.registers + + TCF_CLK_CTRL_INTERRUPT_ENABLE); + iowrite32(0xffffffff, tc->tcf.registers + + TCF_CLK_CTRL_INTERRUPT_CLEAR); + + free_irq(tc->pdev->irq, tc); +#endif +} + +static enum tc_version_t +apollo_detect_tc_version(struct tc_device *tc) +{ + u32 val = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG); + + switch (val) { + default: + dev_err(&tc->pdev->dev, + "Unknown TCF core target build ID (0x%x) - assuming Hood ES2 - PLEASE REPORT TO ANDROID TEAM\n", + val); + /* Fall-through */ + case 5: + dev_err(&tc->pdev->dev, "Looks like a Hood ES2 TC\n"); + return APOLLO_VERSION_TCF_2; + case 1: + dev_err(&tc->pdev->dev, "Looks like a TCF5\n"); + return APOLLO_VERSION_TCF_5; + case 6: + dev_err(&tc->pdev->dev, "Looks like a Bonnie TC\n"); + return APOLLO_VERSION_TCF_BONNIE; + } +} + +static u32 apollo_interrupt_id_to_flag(int interrupt_id) +{ + switch (interrupt_id) { + case TC_INTERRUPT_PDP: + return TC_INTERRUPT_FLAG_PDP; + case TC_INTERRUPT_EXT: + return TC_INTERRUPT_FLAG_EXT; + default: + BUG(); + } +} + +static int apollo_dev_init(struct tc_device *tc, struct pci_dev *pdev, + int pdp_mem_size, int secure_mem_size) +{ + int err; + + /* Reserve and map the tcf_clk / "sys" registers */ + err = setup_io_region(pdev, &tc->tcf, + SYS_APOLLO_REG_PCI_BASENUM, + SYS_APOLLO_REG_SYS_OFFSET, SYS_APOLLO_REG_SYS_SIZE); + if (err) + goto err_out; + + /* Reserve and map the tcf_pll registers */ + err = setup_io_region(pdev, &tc->tcf_pll, + SYS_APOLLO_REG_PCI_BASENUM, + SYS_APOLLO_REG_PLL_OFFSET + TCF_PLL_PLL_CORE_CLK0, + TCF_PLL_PLL_DRP_STATUS - TCF_PLL_PLL_CORE_CLK0 + 4); + if (err) + goto err_unmap_sys_registers; + +#if defined(SUPPORT_APOLLO_FPGA) +#define FPGA_REGISTERS_SIZE 4 + /* If this is a special 'fgpa' build, have the apollo driver manage + * the second register bar. + */ + err = setup_io_region(pdev, &apollo_pdata.fpga, + SYS_RGX_REG_PCI_BASENUM, 0, FPGA_REGISTERS_SIZE); + if (err) + goto err_unmap_pll_registers; +#endif + + /* Detect testchip version */ + tc->version = apollo_detect_tc_version(tc); + + /* Setup card memory */ + tc->tc_mem.base = + pci_resource_start(pdev, APOLLO_MEM_PCI_BASENUM); + tc->tc_mem.size = + pci_resource_len(pdev, APOLLO_MEM_PCI_BASENUM); + + if (tc->tc_mem.size < pdp_mem_size) { + dev_err(&pdev->dev, + "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu", + APOLLO_MEM_PCI_BASENUM, + (unsigned long)tc->tc_mem.size, + (unsigned long)pdp_mem_size); + err = -EIO; + goto err_unmap_fpga_registers; + } + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + if (tc->tc_mem.size < + (pdp_mem_size + secure_mem_size)) { + dev_err(&pdev->dev, + "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu", + APOLLO_MEM_PCI_BASENUM, + (unsigned long)tc->tc_mem.size, + (unsigned long)pdp_mem_size, + (unsigned long)secure_mem_size); + err = -EIO; + goto err_unmap_fpga_registers; + } +#endif + + err = tc_mtrr_setup(tc); + if (err) + goto err_unmap_fpga_registers; + + /* Setup ranges for the device heaps */ + tc->pdp_heap_mem_size = pdp_mem_size; + + /* We know ext_heap_mem_size won't underflow as we've compared + * tc_mem.size against the pdp_mem_size value earlier + */ + tc->ext_heap_mem_size = + tc->tc_mem.size - tc->pdp_heap_mem_size; + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + tc->ext_heap_mem_size -= secure_mem_size; +#endif + + if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) { + dev_warn(&pdev->dev, + "Apollo MEM region (bar %d) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for ext device, which looks too small", + APOLLO_MEM_PCI_BASENUM, + (unsigned long)tc->tc_mem.size, + (unsigned long)pdp_mem_size, + (unsigned long)tc->ext_heap_mem_size); + /* Continue as this is only a 'helpful warning' not a hard + * requirement + */ + } + + tc->ext_heap_mem_base = tc->tc_mem.base; + tc->pdp_heap_mem_base = + tc->tc_mem.base + tc->ext_heap_mem_size; +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + tc->secure_heap_mem_base = tc->pdp_heap_mem_base + + tc->pdp_heap_mem_size; + tc->secure_heap_mem_size = secure_mem_size; +#endif + +#if defined(SUPPORT_ION) + err = tc_ion_init(tc, APOLLO_MEM_PCI_BASENUM); + if (err) { + dev_err(&pdev->dev, "Failed to initialise ION\n"); + goto err_unmap_fpga_registers; + } +#endif + +#if defined(SUPPORT_APOLLO_FPGA) + apollo_debugfs_add_fpga_entries(tc, &apollo_pdata.fpga, + &apollo_pdata.fpga_entries); +#endif /* defined(SUPPORT_APOLLO_FPGA) */ + +err_out: + return err; +err_unmap_fpga_registers: +#if defined(SUPPORT_APOLLO_FPGA) + iounmap(apollo_pdata.fpga.registers); + release_pci_io_addr(pdev, SYS_RGX_REG_PCI_BASENUM, + apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size); +err_unmap_pll_registers: +#endif /* defined(SUPPORT_APOLLO_FPGA) */ + iounmap(tc->tcf_pll.registers); + release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM, + tc->tcf_pll.region.base, tc->tcf_pll.region.size); +err_unmap_sys_registers: + iounmap(tc->tcf.registers); + release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM, + tc->tcf.region.base, tc->tcf.region.size); + goto err_out; +} + +static void apollo_dev_cleanup(struct tc_device *tc) +{ +#if defined(SUPPORT_APOLLO_FPGA) + apollo_debugfs_remove_fpga_entries(&apollo_pdata.fpga_entries); +#endif + +#if defined(SUPPORT_ION) + tc_ion_deinit(tc, APOLLO_MEM_PCI_BASENUM); +#endif + + tc_mtrr_cleanup(tc); + +#if defined(SUPPORT_APOLLO_FPGA) + iounmap(apollo_pdata.fpga.registers); + release_pci_io_addr(tc->pdev, SYS_RGX_REG_PCI_BASENUM, + apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size); +#endif + + iounmap(tc->tcf_pll.registers); + release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM, + tc->tcf_pll.region.base, tc->tcf_pll.region.size); + + iounmap(tc->tcf.registers); + release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM, + tc->tcf.region.base, tc->tcf.region.size); + + if (apollo_pdata.thermal_zone) + thermal_zone_device_unregister(apollo_pdata.thermal_zone); +} + +int apollo_init(struct tc_device *tc, struct pci_dev *pdev, + int core_clock, int mem_clock, int sys_clock, + int pdp_mem_size, int secure_mem_size, + int mem_latency, int mem_wresp_latency, int mem_mode) +{ + int err = 0; + + err = apollo_dev_init(tc, pdev, pdp_mem_size, secure_mem_size); + if (err) { + dev_err(&pdev->dev, "apollo_dev_init failed\n"); + goto err_out; + } + + err = apollo_hw_init(tc, core_clock, mem_clock, sys_clock, + mem_latency, mem_wresp_latency, mem_mode); + if (err) { + dev_err(&pdev->dev, "apollo_hw_init failed\n"); + goto err_dev_cleanup; + } + + err = apollo_enable_irq(tc); + if (err) { + dev_err(&pdev->dev, + "Failed to initialise IRQ\n"); + goto err_dev_cleanup; + } + +err_out: + return err; + +err_dev_cleanup: + apollo_dev_cleanup(tc); + goto err_out; +} + +int apollo_cleanup(struct tc_device *tc) +{ + apollo_disable_irq(tc); + apollo_dev_cleanup(tc); + + return 0; +} + +int apollo_register_pdp_device(struct tc_device *tc) +{ + int err = 0; + resource_size_t reg_start = + pci_resource_start(tc->pdev, + SYS_APOLLO_REG_PCI_BASENUM); + struct resource pdp_resources_es2[] = { + DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET, + SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"), + DEFINE_RES_MEM_NAMED(reg_start + + SYS_APOLLO_REG_PLL_OFFSET + + TCF_PLL_PLL_PDP_CLK0, + TCF_PLL_PLL_PDP2_DRP_GO - + TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"), + }; + struct resource pdp_resources_tcf5[] = { + DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET, + SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"), + DEFINE_RES_MEM_NAMED(reg_start + + SYS_APOLLO_REG_PLL_OFFSET + + TCF_PLL_PLL_PDP_CLK0, + TCF_PLL_PLL_PDP2_DRP_GO - + TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"), + DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, + TC5_SYS_APOLLO_REG_PCI_BASENUM) + + TC5_SYS_APOLLO_REG_PDP2_OFFSET, + TC5_SYS_APOLLO_REG_PDP2_SIZE, "tc5-pdp2-regs"), + + DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, + TC5_SYS_APOLLO_REG_PCI_BASENUM) + + TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET, + TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE, + "tc5-pdp2-fbdc-regs"), + + DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, + TC5_SYS_APOLLO_REG_PCI_BASENUM) + + TC5_SYS_APOLLO_REG_HDMI_OFFSET, + TC5_SYS_APOLLO_REG_HDMI_SIZE, + "tc5-adv5711-regs"), + }; + + struct tc_pdp_platform_data pdata = { +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + .ion_device = tc->ion_device, + .ion_heap_id = ION_HEAP_TC_PDP, +#endif + .memory_base = tc->tc_mem.base, + .pdp_heap_memory_base = tc->pdp_heap_mem_base, + .pdp_heap_memory_size = tc->pdp_heap_mem_size, + .dma_map_export_host_addr = apollo_pdp_export_host_addr(tc), + }; + struct platform_device_info pdp_device_info = { + .parent = &tc->pdev->dev, + .name = APOLLO_DEVICE_NAME_PDP, + .id = -2, + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = apollo_get_pdp_dma_mask(tc), + }; + + if (tc->version == APOLLO_VERSION_TCF_5) { + pdp_device_info.res = pdp_resources_tcf5; + pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_tcf5); + } else if (tc->version == APOLLO_VERSION_TCF_2 || + tc->version == APOLLO_VERSION_TCF_BONNIE) { + pdp_device_info.res = pdp_resources_es2; + pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_es2); + } else { + dev_err(&tc->pdev->dev, + "Unable to set PDP resource info for unknown apollo device\n"); + } + + tc->pdp_dev = platform_device_register_full(&pdp_device_info); + if (IS_ERR(tc->pdp_dev)) { + err = PTR_ERR(tc->pdp_dev); + dev_err(&tc->pdev->dev, + "Failed to register PDP device (%d)\n", err); + tc->pdp_dev = NULL; + goto err; + } +err: + return err; +} + +#if defined(SUPPORT_RGX) + +int apollo_register_ext_device(struct tc_device *tc) +{ + int err = 0; + struct resource rogue_resources[] = { + DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, + SYS_RGX_REG_PCI_BASENUM), + SYS_RGX_REG_REGION_SIZE, "rogue-regs"), + }; + struct tc_rogue_platform_data pdata = { +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + .ion_device = tc->ion_device, + .ion_heap_id = ION_HEAP_TC_ROGUE, +#endif + .mem_mode = tc->mem_mode, + .tc_memory_base = tc->tc_mem.base, + .pdp_heap_memory_base = tc->pdp_heap_mem_base, + .pdp_heap_memory_size = tc->pdp_heap_mem_size, + .rogue_heap_memory_base = tc->ext_heap_mem_base, + .rogue_heap_memory_size = tc->ext_heap_mem_size, +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + .secure_heap_memory_base = tc->secure_heap_mem_base, + .secure_heap_memory_size = tc->secure_heap_mem_size, +#endif + }; + struct platform_device_info rogue_device_info = { + .parent = &tc->pdev->dev, + .name = TC_DEVICE_NAME_ROGUE, + .id = -2, + .res = rogue_resources, + .num_res = ARRAY_SIZE(rogue_resources), + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = apollo_get_rogue_dma_mask(tc), + }; + + tc->ext_dev + = platform_device_register_full(&rogue_device_info); + + if (IS_ERR(tc->ext_dev)) { + err = PTR_ERR(tc->ext_dev); + dev_err(&tc->pdev->dev, + "Failed to register rogue device (%d)\n", err); + tc->ext_dev = NULL; + } + return err; +} + +#elif defined(SUPPORT_APOLLO_FPGA) + +int apollo_register_ext_device(struct tc_device *tc) +{ + int err = 0; + struct resource fpga_resources[] = { + /* For the 'fpga' build, we don't use the Rogue, but reuse the + * define that mentions RGX. + */ + DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, + SYS_RGX_REG_PCI_BASENUM), + SYS_RGX_REG_REGION_SIZE, "fpga-regs"), + }; + struct apollo_fpga_platform_data pdata = { + .mem_mode = tc->mem_mode, + .tc_memory_base = tc->tc_mem.base, + .pdp_heap_memory_base = tc->pdp_heap_mem_base, + .pdp_heap_memory_size = tc->pdp_heap_mem_size, + }; + struct platform_device_info fpga_device_info = { + .parent = &tc->pdev->dev, + .name = APOLLO_DEVICE_NAME_FPGA, + .id = -1, + .res = fpga_resources, + .num_res = ARRAY_SIZE(fpga_resources), + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = apollo_get_fpga_dma_mask(tc), + }; + + tc->ext_dev = platform_device_register_full(&fpga_device_info); + if (IS_ERR(tc->ext_dev)) { + err = PTR_ERR(tc->ext_dev); + dev_err(&tc->pdev->dev, + "Failed to register fpga device (%d)\n", err); + tc->ext_dev = NULL; + /* Fall through */ + } + + return err; +} + +#else /* defined(SUPPORT_APOLLO_FPGA) */ + +int apollo_register_ext_device(struct tc_device *tc) +{ + return 0; +} + +#endif /* defined(SUPPORT_RGX) */ + +void apollo_enable_interrupt_register(struct tc_device *tc, + int interrupt_id) +{ + u32 val; + + if (interrupt_id == TC_INTERRUPT_PDP || + interrupt_id == TC_INTERRUPT_EXT) { + val = ioread32( + tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); + val |= apollo_interrupt_id_to_flag(interrupt_id); + iowrite32(val, + tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); + } +} + +void apollo_disable_interrupt_register(struct tc_device *tc, + int interrupt_id) +{ + u32 val; + + if (interrupt_id == TC_INTERRUPT_PDP || + interrupt_id == TC_INTERRUPT_EXT) { + val = ioread32( + tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); + val &= ~(apollo_interrupt_id_to_flag(interrupt_id)); + iowrite32(val, + tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); + } +} + +irqreturn_t apollo_irq_handler(int irq, void *data) +{ + u32 interrupt_status; + u32 interrupt_clear = 0; + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + struct tc_device *tc = (struct tc_device *)data; + + spin_lock_irqsave(&tc->interrupt_handler_lock, flags); + +#if defined(TC_FAKE_INTERRUPTS) + /* If we're faking interrupts pretend we got both ext and PDP ints */ + interrupt_status = TC_INTERRUPT_FLAG_EXT + | TC_INTERRUPT_FLAG_PDP; +#else + interrupt_status = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_INTERRUPT_STATUS); +#endif + + if (interrupt_status & TC_INTERRUPT_FLAG_EXT) { + struct tc_interrupt_handler *ext_int = + &tc->interrupt_handlers[TC_INTERRUPT_EXT]; + + if (ext_int->enabled && ext_int->handler_function) { + ext_int->handler_function(ext_int->handler_data); + interrupt_clear |= TC_INTERRUPT_FLAG_EXT; + } + ret = IRQ_HANDLED; + } + if (interrupt_status & TC_INTERRUPT_FLAG_PDP) { + struct tc_interrupt_handler *pdp_int = + &tc->interrupt_handlers[TC_INTERRUPT_PDP]; + + if (pdp_int->enabled && pdp_int->handler_function) { + pdp_int->handler_function(pdp_int->handler_data); + interrupt_clear |= TC_INTERRUPT_FLAG_PDP; + } + ret = IRQ_HANDLED; + } + + if (tc->version == APOLLO_VERSION_TCF_5) { + /* On TC5 the interrupt is not by the TC framework, but + * by the PDP itself. So we always have to callback to the tc5 + * pdp code regardless of the interrupt status of the TCF. + */ + struct tc_interrupt_handler *pdp_int = + &tc->interrupt_handlers[TC_INTERRUPT_TC5_PDP]; + + if (pdp_int->enabled && pdp_int->handler_function) { + pdp_int->handler_function(pdp_int->handler_data); + ret = IRQ_HANDLED; + } + } + + if (interrupt_clear) + iowrite32(0xffffffff, + tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_CLEAR); + + spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); + + return ret; +} + +int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll) +{ + int err = 0; + + *tmp = 0; + *pll = 0; + + if (tc->version == APOLLO_VERSION_TCF_5) + /* Not implemented on TCF5 */ + goto err_out; + else if (tc->version == APOLLO_VERSION_TCF_2) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) + unsigned long t; +#else + int t; +#endif + + err = apollo_thermal_get_temp(apollo_pdata.thermal_zone, &t); + if (err) + goto err_out; + *tmp = t / 1000; + } + + if (spi_read(tc, 0x2, pll)) { + dev_err(&tc->pdev->dev, "Failed to read PLL status\n"); + err = -ENODEV; + goto err_out; + } + +err_out: + return err; +} + +int apollo_sys_strings(struct tc_device *tc, + char *str_fpga_rev, size_t size_fpga_rev, + char *str_tcf_core_rev, size_t size_tcf_core_rev, + char *str_tcf_core_target_build_id, + size_t size_tcf_core_target_build_id, + char *str_pci_ver, size_t size_pci_ver, + char *str_macro_ver, size_t size_macro_ver) +{ + int err = 0; + u32 val; + resource_size_t host_fpga_base; + void __iomem *host_fpga_registers; + + /* To get some of the version information we need to read from a + * register that we don't normally have mapped. Map it temporarily + * (without trying to reserve it) to get the information we need. + */ + host_fpga_base = + pci_resource_start(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM) + + 0x40F0; + + host_fpga_registers = ioremap(host_fpga_base, 0x04); + if (!host_fpga_registers) { + dev_err(&tc->pdev->dev, + "Failed to map host fpga registers\n"); + err = -EIO; + goto err_out; + } + + /* Create the components of the PCI and macro versions */ + val = ioread32(host_fpga_registers); + snprintf(str_pci_ver, size_pci_ver, "%d", + HEX2DEC((val & 0x00FF0000) >> 16)); + snprintf(str_macro_ver, size_macro_ver, "%d.%d", + (val & 0x00000F00) >> 8, + HEX2DEC((val & 0x000000FF) >> 0)); + + /* Unmap the register now that we no longer need it */ + iounmap(host_fpga_registers); + + /* + * Check bits 7:0 of register 0x28 (TCF_CORE_REV_REG or SW_IF_VERSION + * depending on its own value) to find out how the driver should + * generate the strings for FPGA and core revision. + */ + val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); + val = (val & VERSION_MASK) >> VERSION_SHIFT; + + if (val == 0) { + /* Create the components of the TCF core revision number */ + val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TCF_CORE_REV_REG); + snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d.%d.%d", + HEX2DEC((val & TCF_CORE_REV_REG_MAJOR_MASK) + >> TCF_CORE_REV_REG_MAJOR_SHIFT), + HEX2DEC((val & TCF_CORE_REV_REG_MINOR_MASK) + >> TCF_CORE_REV_REG_MINOR_SHIFT), + HEX2DEC((val & TCF_CORE_REV_REG_MAINT_MASK) + >> TCF_CORE_REV_REG_MAINT_SHIFT)); + + /* Create the components of the FPGA revision number */ + val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_REV_REG); + snprintf(str_fpga_rev, size_fpga_rev, "%d.%d.%d", + HEX2DEC((val & FPGA_REV_REG_MAJOR_MASK) + >> FPGA_REV_REG_MAJOR_SHIFT), + HEX2DEC((val & FPGA_REV_REG_MINOR_MASK) + >> FPGA_REV_REG_MINOR_SHIFT), + HEX2DEC((val & FPGA_REV_REG_MAINT_MASK) + >> FPGA_REV_REG_MAINT_SHIFT)); + } else if (val == 1) { + /* Create the components of the TCF core revision number */ + snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val); + + /* Create the components of the FPGA revision number */ + val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_REL); + snprintf(str_fpga_rev, size_fpga_rev, "%d.%d", + HEX2DEC((val & MAJOR_MASK) >> MAJOR_SHIFT), + HEX2DEC((val & MINOR_MASK) >> MINOR_SHIFT)); + } else { + dev_warn(&tc->pdev->dev, + "%s: unrecognised SW_IF_VERSION %#08x\n", + __func__, val); + + /* Create the components of the TCF core revision number */ + snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val); + + /* Create the components of the FPGA revision number */ + snprintf(str_fpga_rev, size_fpga_rev, "N/A"); + } + + /* Create the component of the TCF core target build ID */ + val = ioread32(tc->tcf.registers + + TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG); + snprintf(str_tcf_core_target_build_id, size_tcf_core_target_build_id, + "%d", + (val & TCF_CORE_TARGET_BUILD_ID_MASK) + >> TCF_CORE_TARGET_BUILD_ID_SHIFT); + +err_out: + return err; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.h new file mode 100644 index 000000000000..bd20d6e72d7f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_apollo.h @@ -0,0 +1,78 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _APOLLO_DRV_H +#define _APOLLO_DRV_H + +#include "tc_drv_internal.h" +#include "apollo_regs.h" + +#if defined(SUPPORT_RGX) && defined(SUPPORT_APOLLO_FPGA) +#error Define either SUPPORT_RGX or SUPPORT_APOLLO_FGPA, not both +#endif + +int apollo_init(struct tc_device *tc, struct pci_dev *pdev, + int core_clock, int mem_clock, int sys_clock, + int pdp_mem_size, int secure_mem_size, + int mem_latency, int mem_wresp_latency, int mem_mode); +int apollo_cleanup(struct tc_device *tc); + +int apollo_register_pdp_device(struct tc_device *tc); +int apollo_register_ext_device(struct tc_device *tc); + +void apollo_enable_interrupt_register(struct tc_device *tc, + int interrupt_id); +void apollo_disable_interrupt_register(struct tc_device *tc, + int interrupt_id); + +irqreturn_t apollo_irq_handler(int irq, void *data); + +int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll); +int apollo_sys_strings(struct tc_device *tc, + char *str_fpga_rev, size_t size_fpga_rev, + char *str_tcf_core_rev, size_t size_tcf_core_rev, + char *str_tcf_core_target_build_id, + size_t size_tcf_core_target_build_id, + char *str_pci_ver, size_t size_pci_ver, + char *str_macro_ver, size_t size_macro_ver); + +#endif /* _APOLLO_DRV_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.c new file mode 100644 index 000000000000..a0019f3a74ee --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.c @@ -0,0 +1,856 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* + * This is a device driver for the testchip framework. It creates platform + * devices for the pdp and ext sub-devices, and exports functions to manage the + * shared interrupt handling + */ + +#include +#include +#include +#include +#include + +#if defined(CONFIG_MTRR) +#include +#endif + +#include "pvrmodule.h" + +#include "tc_apollo.h" +#include "tc_odin.h" + +/* How much memory to give to the PDP heap (used for pdp buffers). */ +#define TC_PDP_MEM_SIZE_BYTES ((TC_DISPLAY_MEM_SIZE)*1024*1024) + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) +/* How much memory to give to the secure heap. */ +#define TC_SECURE_MEM_SIZE_BYTES ((TC_SECURE_MEM_SIZE)*1024*1024) +#endif + +#define PCI_VENDOR_ID_POWERVR 0x1010 +#define DEVICE_ID_PCI_APOLLO_FPGA 0x1CF1 +#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2 + +MODULE_DESCRIPTION("PowerVR testchip framework driver"); + +static int tc_core_clock = RGX_TC_CORE_CLOCK_SPEED; +module_param(tc_core_clock, int, 0444); +MODULE_PARM_DESC(tc_core_clock, "TC core clock speed"); + +static int tc_mem_clock = RGX_TC_MEM_CLOCK_SPEED; +module_param(tc_mem_clock, int, 0444); +MODULE_PARM_DESC(tc_mem_clock, "TC memory clock speed"); + +static int tc_sys_clock = RGX_TC_SYS_CLOCK_SPEED; +module_param(tc_sys_clock, int, 0444); +MODULE_PARM_DESC(tc_sys_clock, "TC system clock speed (TCF5 only)"); + +static int tc_mem_latency; +module_param(tc_mem_latency, int, 0444); +MODULE_PARM_DESC(tc_mem_latency, "TC memory read latency in cycles (TCF5 only)"); + +static unsigned long tc_mem_mode = TC_MEMORY_CONFIG; +module_param(tc_mem_mode, ulong, 0444); +MODULE_PARM_DESC(tc_mem_mode, "TC memory mode (local = 1, hybrid = 2, host = 3)"); + +static int tc_wresp_latency; +module_param(tc_wresp_latency, int, 0444); +MODULE_PARM_DESC(tc_wresp_latency, "TC memory write response latency in cycles (TCF5 only)"); + +static unsigned long tc_pdp_mem_size = TC_PDP_MEM_SIZE_BYTES; +module_param(tc_pdp_mem_size, ulong, 0444); +MODULE_PARM_DESC(tc_pdp_mem_size, + "TC PDP reserved memory size in bytes"); + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) +static unsigned long tc_secure_mem_size = TC_SECURE_MEM_SIZE_BYTES; +module_param(tc_secure_mem_size, ulong, 0444); +MODULE_PARM_DESC(tc_secure_mem_size, + "TC secure reserved memory size in bytes"); +#endif + +static struct debugfs_blob_wrapper tc_debugfs_rogue_name_blobs[] = { + [APOLLO_VERSION_TCF_2] = { + .data = "hood", /* probably */ + .size = sizeof("hood") - 1, + }, + [APOLLO_VERSION_TCF_5] = { + .data = "fpga (unknown)", + .size = sizeof("fpga (unknown)") - 1, + }, + [APOLLO_VERSION_TCF_BONNIE] = { + .data = "bonnie", + .size = sizeof("bonnie") - 1, + }, + [ODIN_VERSION_TCF_BONNIE] = { + .data = "bonnie", + .size = sizeof("bonnie") - 1, + }, + [ODIN_VERSION_FPGA] = { + .data = "fpga (unknown)", + .size = sizeof("fpga (unknown)") - 1, + }, + [ODIN_VERSION_ORION] = { + .data = "orion", + .size = sizeof("orion") - 1, + }, +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +/* forward declaration */ +static void tc_devres_release(struct device *dev, void *res); + +static ssize_t rogue_name_show(struct device_driver *drv, char *buf) +{ + struct pci_dev *pci_dev; + struct tc_device *tc; + struct device *dev; + + dev = driver_find_next_device(drv, NULL); + if (!dev) + return -ENODEV; + + pci_dev = to_pci_dev(dev); + if (!pci_dev) + return -ENODEV; + + tc = devres_find(&pci_dev->dev, tc_devres_release, NULL, NULL); + if (!tc) + return -ENODEV; + + return sprintf(buf, "%s\n", (const char *) + tc_debugfs_rogue_name_blobs[tc->version].data); +} + +static DRIVER_ATTR_RO(rogue_name); + +static struct attribute *tc_attrs[] = { + &driver_attr_rogue_name.attr, + NULL, +}; + +static struct attribute_group tc_attr_group = { + .attrs = tc_attrs, +}; + +static const struct attribute_group *tc_attr_groups[] = { + &tc_attr_group, + NULL, +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) */ + +#if defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) +/* + * A return value of: + * 0 or more means success + * -1 means we were unable to add an mtrr but we should continue + * -2 means we were unable to add an mtrr but we shouldn't continue + */ +static int mtrr_setup(struct pci_dev *pdev, + resource_size_t mem_start, + resource_size_t mem_size) +{ + int err; + int mtrr; + + /* Reset MTRR */ + mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0); + if (mtrr < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", + __LINE__, __func__, mtrr); + mtrr = -2; + goto err_out; + } + + err = mtrr_del(mtrr, mem_start, mem_size); + if (err < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", + __LINE__, __func__, err); + mtrr = -2; + goto err_out; + } + + mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0); + if (mtrr < 0) { + /* Stop, but not an error as this may be already be setup */ + dev_dbg(&pdev->dev, + "%d - %s: mtrr_add failed (%d) - probably means the mtrr is already setup\n", + __LINE__, __func__, mtrr); + mtrr = -1; + goto err_out; + } + + err = mtrr_del(mtrr, mem_start, mem_size); + if (err < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", + __LINE__, __func__, err); + mtrr = -2; + goto err_out; + } + + if (mtrr == 0) { + /* Replace 0 with a non-overlapping WRBACK mtrr */ + err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0); + if (err < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", + __LINE__, __func__, err); + mtrr = -2; + goto err_out; + } + } + + mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0); + if (mtrr < 0) { + dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", + __LINE__, __func__, mtrr); + mtrr = -1; + } + +err_out: + return mtrr; +} +#endif /* defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) */ + +int tc_mtrr_setup(struct tc_device *tc) +{ + int err = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + /* Register the LMA as write combined */ + err = arch_io_reserve_memtype_wc(tc->tc_mem.base, + tc->tc_mem.size); + if (err) + return -ENODEV; +#endif + /* Enable write combining */ + tc->mtrr = arch_phys_wc_add(tc->tc_mem.base, + tc->tc_mem.size); + if (tc->mtrr < 0) { + err = -ENODEV; + goto err_out; + } + +#elif defined(CONFIG_MTRR) + /* Enable mtrr region caching */ + tc->mtrr = mtrr_setup(tc->pdev, + tc->tc_mem.base, + tc->tc_mem.size); + if (tc->mtrr == -2) { + err = -ENODEV; + goto err_out; + } +#endif + return err; + +err_out: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + arch_io_free_memtype_wc(tc->tc_mem.base, + tc->tc_mem.size); +#endif + return err; +} + +void tc_mtrr_cleanup(struct tc_device *tc) +{ + if (tc->mtrr >= 0) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + arch_phys_wc_del(tc->mtrr); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + arch_io_free_memtype_wc(tc->tc_mem.base, + tc->tc_mem.size); +#endif +#elif defined(CONFIG_MTRR) + int err; + + err = mtrr_del(tc->mtrr, + tc->tc_mem.base, + tc->tc_mem.size); + if (err < 0) + dev_err(&tc->pdev->dev, + "mtrr_del failed (%d)\n", err); +#endif + } +} + +int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack) +{ + u32 max_eye_start = eyes >> 16; + u32 min_eye_end = eyes & 0xffff; + + /* If either the training or training ack failed, we haven't aligned */ + if (!(clk_taps & 0x10000) || !(train_ack & 0x100)) + return 0; + + /* If the max eye >= min eye it means the readings are nonsense */ + if (max_eye_start >= min_eye_end) + return 0; + + /* If we failed the ack pattern more than 4 times */ + if (((train_ack & 0xf0) >> 4) > 4) + return 0; + + /* If there is less than 7 taps (240ps @40ps/tap, this number should be + * lower for the fpga, since its taps are bigger We should really + * calculate the "7" based on the interface clock speed. + */ + if ((min_eye_end - max_eye_start) < 7) + return 0; + + return 1; +} + +int tc_iopol32_nonzero(u32 mask, void __iomem *addr) +{ + int polnum; + u32 read_value; + + for (polnum = 0; polnum < 50; polnum++) { + read_value = ioread32(addr) & mask; + if (read_value != 0) + break; + msleep(20); + } + if (polnum == 50) { + pr_err(DRV_NAME " iopol32_nonzero timeout\n"); + return -ETIME; + } + return 0; +} + +int request_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t offset, resource_size_t length) +{ + resource_size_t start, end; + + start = pci_resource_start(pdev, index); + end = pci_resource_end(pdev, index); + + if ((start + offset + length - 1) > end) + return -EIO; + if (pci_resource_flags(pdev, index) & IORESOURCE_IO) { + if (request_region(start + offset, length, DRV_NAME) == NULL) + return -EIO; + } else { + if (request_mem_region(start + offset, length, DRV_NAME) + == NULL) + return -EIO; + } + return 0; +} + +void release_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t start, resource_size_t length) +{ + if (pci_resource_flags(pdev, index) & IORESOURCE_IO) + release_region(start, length); + else + release_mem_region(start, length); +} + +int setup_io_region(struct pci_dev *pdev, + struct tc_io_region *region, u32 index, + resource_size_t offset, resource_size_t size) +{ + int err; + resource_size_t pci_phys_addr; + + err = request_pci_io_addr(pdev, index, offset, size); + if (err) { + dev_err(&pdev->dev, + "Failed to request tc registers (err=%d)\n", err); + return -EIO; + } + pci_phys_addr = pci_resource_start(pdev, index); + region->region.base = pci_phys_addr + offset; + region->region.size = size; + + region->registers = ioremap(region->region.base, region->region.size); + + if (!region->registers) { + dev_err(&pdev->dev, "Failed to map tc registers\n"); + release_pci_io_addr(pdev, index, + region->region.base, region->region.size); + return -EIO; + } + return 0; +} + +#if defined(TC_FAKE_INTERRUPTS) +void tc_irq_fake_wrapper(unsigned long data) +{ + struct tc_device *tc = (struct tc_device *)data; + + if (tc->odin) + odin_irq_handler(0, tc); + else + apollo_irq_handler(0, tc); + + mod_timer(&tc->timer, + jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); +} +#endif + +static int tc_register_pdp_device(struct tc_device *tc) +{ + int err = 0; + + if (tc->odin || tc->orion) + err = odin_register_pdp_device(tc); + else + err = apollo_register_pdp_device(tc); + + return err; +} + +static int tc_register_ext_device(struct tc_device *tc) +{ + int err = 0; + + if (tc->odin || tc->orion) + err = odin_register_ext_device(tc); + else + err = apollo_register_ext_device(tc); + + return err; +} + +static void tc_devres_release(struct device *dev, void *res) +{ + /* No extra cleanup needed */ +} + +static int tc_cleanup(struct pci_dev *pdev) +{ + struct tc_device *tc = devres_find(&pdev->dev, + tc_devres_release, NULL, NULL); + int i, err = 0; + + if (!tc) { + dev_err(&pdev->dev, "No tc device resources found\n"); + return -ENODEV; + } + + debugfs_remove(tc->debugfs_rogue_name); + + for (i = 0; i < TC_INTERRUPT_COUNT; i++) + if (tc->interrupt_handlers[i].enabled) + tc_disable_interrupt(&pdev->dev, i); + + if (tc->odin || tc->orion) + err = odin_cleanup(tc); + else + err = apollo_cleanup(tc); + + debugfs_remove(tc->debugfs_tc_dir); + + return err; +} + +static int tc_init(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct tc_device *tc; + int err = 0; +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + int sec_mem_size = TC_SECURE_MEM_SIZE_BYTES; +#else /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */ + int sec_mem_size = 0; +#endif /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */ + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + tc = devres_alloc(tc_devres_release, + sizeof(*tc), GFP_KERNEL); + if (!tc) { + err = -ENOMEM; + goto err_out; + } + + devres_add(&pdev->dev, tc); + + err = tc_enable(&pdev->dev); + if (err) { + dev_err(&pdev->dev, + "tc_enable failed %d\n", err); + goto err_release; + } + + tc->pdev = pdev; + + spin_lock_init(&tc->interrupt_handler_lock); + spin_lock_init(&tc->interrupt_enable_lock); + + tc->debugfs_tc_dir = debugfs_create_dir(DRV_NAME, NULL); + + if (pdev->vendor == PCI_VENDOR_ID_ODIN) { + + if (pdev->device == DEVICE_ID_ODIN) + tc->odin = true; + else if (pdev->device == DEVICE_ID_ORION) + tc->orion = true; + + dev_info(&pdev->dev, "%s detected. Core %d MHz, mem %d MHz\n", + odin_tc_name(tc), + tc_core_clock / 1000000, + tc_mem_clock / 1000000); + + err = odin_init(tc, pdev, + tc_core_clock, tc_mem_clock, + tc_pdp_mem_size, sec_mem_size, + tc_mem_latency, tc_wresp_latency, + tc_mem_mode); + if (err) + goto err_dev_cleanup; + + } else { + dev_info(&pdev->dev, "Apollo detected"); + tc->odin = false; + + err = apollo_init(tc, pdev, + tc_core_clock, tc_mem_clock, tc_sys_clock, + tc_pdp_mem_size, sec_mem_size, + tc_mem_latency, tc_wresp_latency, + tc_mem_mode); + if (err) + goto err_dev_cleanup; + } + + /* Add the rogue name debugfs entry */ + tc->debugfs_rogue_name = + debugfs_create_blob("rogue-name", 0444, + tc->debugfs_tc_dir, + &tc_debugfs_rogue_name_blobs[tc->version]); + +#if defined(TC_FAKE_INTERRUPTS) + dev_warn(&pdev->dev, "WARNING: Faking interrupts every %d ms", + FAKE_INTERRUPT_TIME_MS); +#endif + + /* Register pdp and ext platform devices */ + err = tc_register_pdp_device(tc); + if (err) + goto err_dev_cleanup; + + err = tc_register_ext_device(tc); + if (err) + goto err_dev_cleanup; + + devres_remove_group(&pdev->dev, NULL); + +err_out: + if (err) + dev_err(&pdev->dev, "%s: failed\n", __func__); + + return err; + +err_dev_cleanup: + tc_cleanup(pdev); + tc_disable(&pdev->dev); +err_release: + devres_release_group(&pdev->dev, NULL); + goto err_out; +} + +static void tc_exit(struct pci_dev *pdev) +{ + struct tc_device *tc = devres_find(&pdev->dev, + tc_devres_release, NULL, NULL); + + if (!tc) { + dev_err(&pdev->dev, "No tc device resources found\n"); + return; + } + + if (tc->pdp_dev) + platform_device_unregister(tc->pdp_dev); + + if (tc->ext_dev) + platform_device_unregister(tc->ext_dev); + + tc_cleanup(pdev); + + tc_disable(&pdev->dev); +} + +static struct pci_device_id tc_pci_tbl[] = { + { PCI_VDEVICE(POWERVR, DEVICE_ID_PCI_APOLLO_FPGA) }, + { PCI_VDEVICE(POWERVR, DEVICE_ID_PCIE_APOLLO_FPGA) }, + { PCI_VDEVICE(ODIN, DEVICE_ID_ODIN) }, + { PCI_VDEVICE(ODIN, DEVICE_ID_ORION) }, + { }, +}; + +static struct pci_driver tc_pci_driver = { + .name = DRV_NAME, + .id_table = tc_pci_tbl, + .probe = tc_init, + .remove = tc_exit, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + .groups = tc_attr_groups, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) */ +}; + +module_pci_driver(tc_pci_driver); + +MODULE_DEVICE_TABLE(pci, tc_pci_tbl); + +int tc_enable(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return pci_enable_device(pdev); +} +EXPORT_SYMBOL(tc_enable); + +void tc_disable(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + pci_disable_device(pdev); +} +EXPORT_SYMBOL(tc_disable); + +int tc_set_interrupt_handler(struct device *dev, int interrupt_id, + void (*handler_function)(void *), void *data) +{ + struct tc_device *tc = devres_find(dev, tc_devres_release, + NULL, NULL); + int err = 0; + unsigned long flags; + + if (!tc) { + dev_err(dev, "No tc device resources found\n"); + err = -ENODEV; + goto err_out; + } + + if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { + dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); + err = -EINVAL; + goto err_out; + } + + spin_lock_irqsave(&tc->interrupt_handler_lock, flags); + + tc->interrupt_handlers[interrupt_id].handler_function = + handler_function; + tc->interrupt_handlers[interrupt_id].handler_data = data; + + spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); + +err_out: + return err; +} +EXPORT_SYMBOL(tc_set_interrupt_handler); + +int tc_enable_interrupt(struct device *dev, int interrupt_id) +{ + struct tc_device *tc = devres_find(dev, tc_devres_release, + NULL, NULL); + int err = 0; + unsigned long flags; + + if (!tc) { + dev_err(dev, "No tc device resources found\n"); + err = -ENODEV; + goto err_out; + } + if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { + dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); + err = -EINVAL; + goto err_out; + } + spin_lock_irqsave(&tc->interrupt_enable_lock, flags); + + if (tc->interrupt_handlers[interrupt_id].enabled) { + dev_warn(dev, "Interrupt ID %d already enabled\n", + interrupt_id); + err = -EEXIST; + goto err_unlock; + } + tc->interrupt_handlers[interrupt_id].enabled = true; + + if (tc->odin || tc->orion) + odin_enable_interrupt_register(tc, interrupt_id); + else + apollo_enable_interrupt_register(tc, interrupt_id); + +err_unlock: + spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags); +err_out: + return err; +} +EXPORT_SYMBOL(tc_enable_interrupt); + +int tc_disable_interrupt(struct device *dev, int interrupt_id) +{ + struct tc_device *tc = devres_find(dev, tc_devres_release, + NULL, NULL); + int err = 0; + unsigned long flags; + + if (!tc) { + dev_err(dev, "No tc device resources found\n"); + err = -ENODEV; + goto err_out; + } + if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { + dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); + err = -EINVAL; + goto err_out; + } + spin_lock_irqsave(&tc->interrupt_enable_lock, flags); + + if (!tc->interrupt_handlers[interrupt_id].enabled) { + dev_warn(dev, "Interrupt ID %d already disabled\n", + interrupt_id); + } + tc->interrupt_handlers[interrupt_id].enabled = false; + + if (tc->odin || tc->orion) + odin_disable_interrupt_register(tc, interrupt_id); + else + apollo_disable_interrupt_register(tc, interrupt_id); + + spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags); +err_out: + return err; +} +EXPORT_SYMBOL(tc_disable_interrupt); + +int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll) +{ + int err = -ENODEV; + struct tc_device *tc = devres_find(dev, tc_devres_release, + NULL, NULL); + + if (!tc) { + dev_err(dev, "No tc device resources found\n"); + goto err_out; + } + + if (tc->odin || tc->orion) + err = odin_sys_info(tc, tmp, pll); + else + err = apollo_sys_info(tc, tmp, pll); + +err_out: + return err; +} +EXPORT_SYMBOL(tc_sys_info); + +int tc_sys_strings(struct device *dev, + char *str_fpga_rev, size_t size_fpga_rev, + char *str_tcf_core_rev, size_t size_tcf_core_rev, + char *str_tcf_core_target_build_id, + size_t size_tcf_core_target_build_id, + char *str_pci_ver, size_t size_pci_ver, + char *str_macro_ver, size_t size_macro_ver) +{ + int err = -ENODEV; + + struct tc_device *tc = devres_find(dev, tc_devres_release, + NULL, NULL); + + if (!tc) { + dev_err(dev, "No tc device resources found\n"); + goto err_out; + } + + if (!str_fpga_rev || + !size_fpga_rev || + !str_tcf_core_rev || + !size_tcf_core_rev || + !str_tcf_core_target_build_id || + !size_tcf_core_target_build_id || + !str_pci_ver || + !size_pci_ver || + !str_macro_ver || + !size_macro_ver) { + + err = -EINVAL; + goto err_out; + } + + if (tc->odin || tc->orion) { + err = odin_sys_strings(tc, + str_fpga_rev, size_fpga_rev, + str_tcf_core_rev, size_tcf_core_rev, + str_tcf_core_target_build_id, + size_tcf_core_target_build_id, + str_pci_ver, size_pci_ver, + str_macro_ver, size_macro_ver); + } else { + err = apollo_sys_strings(tc, + str_fpga_rev, size_fpga_rev, + str_tcf_core_rev, size_tcf_core_rev, + str_tcf_core_target_build_id, + size_tcf_core_target_build_id, + str_pci_ver, size_pci_ver, + str_macro_ver, size_macro_ver); + } + +err_out: + return err; +} +EXPORT_SYMBOL(tc_sys_strings); + +int tc_core_clock_speed(struct device *dev) +{ + return tc_core_clock; +} +EXPORT_SYMBOL(tc_core_clock_speed); + +unsigned int tc_odin_subvers(struct device *dev) +{ + struct tc_device *tc = devres_find(dev, tc_devres_release, + NULL, NULL); + + if (tc->orion) + return 1; + else + return 0; +} +EXPORT_SYMBOL(tc_odin_subvers); diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.h new file mode 100644 index 000000000000..13ff4a515921 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv.h @@ -0,0 +1,162 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _TC_DRV_H +#define _TC_DRV_H + +/* + * This contains the hooks for the testchip driver, as used by the Rogue and + * PDP sub-devices, and the platform data passed to each of their drivers + */ + +#include +#include +#include + +/* Valid values for the TC_MEMORY_CONFIG configuration option */ +#define TC_MEMORY_LOCAL 1 +#define TC_MEMORY_HOST 2 +#define TC_MEMORY_HYBRID 3 + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + +#include PVR_ANDROID_ION_HEADER + +/* NOTE: This should be kept in sync with the user side (in buffer_generic.c) */ +#if defined(SUPPORT_RGX) +#define ION_HEAP_TC_ROGUE (ION_HEAP_TYPE_CUSTOM+1) +#endif +#define ION_HEAP_TC_PDP (ION_HEAP_TYPE_CUSTOM+2) + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) +#define ION_HEAP_TC_SECURE (ION_HEAP_TYPE_CUSTOM+3) +#endif + +#endif /* defined(SUPPORT_ION) */ + +#define TC_INTERRUPT_PDP 0 +#define TC_INTERRUPT_EXT 1 +#define TC_INTERRUPT_TC5_PDP 2 +#define TC_INTERRUPT_COUNT 3 + +int tc_enable(struct device *dev); +void tc_disable(struct device *dev); + +int tc_enable_interrupt(struct device *dev, int interrupt_id); +int tc_disable_interrupt(struct device *dev, int interrupt_id); + +int tc_set_interrupt_handler(struct device *dev, int interrupt_id, + void (*handler_function)(void *), void *handler_data); + +int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll); +int tc_sys_strings(struct device *dev, + char *str_fpga_rev, size_t size_fpga_rev, char *str_tcf_core_rev, + size_t size_tcf_core_rev, char *str_tcf_core_target_build_id, + size_t size_tcf_core_target_build_id, char *str_pci_ver, + size_t size_pci_ver, char *str_macro_ver, size_t size_macro_ver); +int tc_core_clock_speed(struct device *dev); + +unsigned int tc_odin_subvers(struct device *dev); + +#define APOLLO_DEVICE_NAME_PDP "apollo_pdp" +#define ODN_DEVICE_NAME_PDP "odin_pdp" + +/* The following structs are initialised and passed down by the parent tc + * driver to the respective sub-drivers + */ + +struct tc_pdp_platform_data { +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + struct ion_device *ion_device; + int ion_heap_id; +#endif + resource_size_t memory_base; + + /* The following is used by the drm_pdp driver as it manages the + * pdp memory + */ + resource_size_t pdp_heap_memory_base; + resource_size_t pdp_heap_memory_size; + + /* Used to export host address instead of pdp address, depends on the + * TC memory mode. + * + * PDP phys address space is from 0 to end of local device memory, + * however if the TC is configured to operate in hybrid mode then the + * GPU is configured to match the CPU phys address space view. + */ + bool dma_map_export_host_addr; +}; + +#if defined(SUPPORT_RGX) + +#define TC_DEVICE_NAME_ROGUE "tc_rogue" + +struct tc_rogue_platform_data { +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + struct ion_device *ion_device; + int ion_heap_id; +#endif + /* The testchip memory mode (LOCAL, HOST or HYBRID) */ + int mem_mode; + + /* The base address of the testchip memory (CPU physical address) - + * used to convert from CPU-Physical to device-physical addresses + */ + resource_size_t tc_memory_base; + + /* The following is used to setup the services heaps that map to the + * ion heaps + */ + resource_size_t pdp_heap_memory_base; + resource_size_t pdp_heap_memory_size; + resource_size_t rogue_heap_memory_base; + resource_size_t rogue_heap_memory_size; +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + resource_size_t secure_heap_memory_base; + resource_size_t secure_heap_memory_size; +#endif +}; + +#endif /* defined(SUPPORT_RGX) */ + +#endif /* _TC_DRV_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv_internal.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv_internal.h new file mode 100644 index 000000000000..1f5a9c3269b4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_drv_internal.h @@ -0,0 +1,183 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _TC_DRV_INTERNAL_H +#define _TC_DRV_INTERNAL_H + +#include "tc_drv.h" + +#include + +#if defined(TC_FAKE_INTERRUPTS) +#define FAKE_INTERRUPT_TIME_MS 1600 +#include +#include +#endif + +#define DRV_NAME "tc" + +/* This is a guess of what's a minimum sensible size for the ext heap + * It is only used for a warning if the ext heap is smaller, and does + * not affect the functional logic in any way + */ +#define TC_EXT_MINIMUM_MEM_SIZE (10*1024*1024) + +#if defined(SUPPORT_ION) + #if defined(SUPPORT_RGX) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + #define TC_ION_HEAP_BASE_COUNT 3 + #else + #define TC_ION_HEAP_BASE_COUNT 2 + #endif + + #if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + #define TC_ION_HEAP_COUNT (TC_ION_HEAP_BASE_COUNT + 1) + #else + #define TC_ION_HEAP_COUNT TC_ION_HEAP_BASE_COUNT + #endif +#endif /* defined(SUPPORT_ION) */ + +/* Convert a byte offset to a 32 bit dword offset */ +#define DWORD_OFFSET(byte_offset) ((byte_offset)>>2) + +#define HEX2DEC(v) ((((v) >> 4) * 10) + ((v) & 0x0F)) + +enum tc_version_t { + TC_INVALID_VERSION, + APOLLO_VERSION_TCF_2, + APOLLO_VERSION_TCF_5, + APOLLO_VERSION_TCF_BONNIE, + ODIN_VERSION_TCF_BONNIE, + ODIN_VERSION_FPGA, + ODIN_VERSION_ORION, +}; + +struct tc_interrupt_handler { + bool enabled; + void (*handler_function)(void *); + void *handler_data; +}; + +struct tc_region { + resource_size_t base; + resource_size_t size; +}; + +struct tc_io_region { + struct tc_region region; + void __iomem *registers; +}; + +struct tc_device { + struct pci_dev *pdev; + + enum tc_version_t version; + bool odin; + bool orion; + + int mem_mode; + + struct tc_io_region tcf; + struct tc_io_region tcf_pll; + + struct tc_region tc_mem; + + struct platform_device *pdp_dev; + + resource_size_t pdp_heap_mem_base; + resource_size_t pdp_heap_mem_size; + + struct platform_device *ext_dev; + + resource_size_t ext_heap_mem_base; + resource_size_t ext_heap_mem_size; + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + resource_size_t secure_heap_mem_base; + resource_size_t secure_heap_mem_size; +#endif + +#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + int mtrr; +#endif + spinlock_t interrupt_handler_lock; + spinlock_t interrupt_enable_lock; + + struct tc_interrupt_handler + interrupt_handlers[TC_INTERRUPT_COUNT]; + +#if defined(TC_FAKE_INTERRUPTS) + struct timer_list timer; +#endif + +#if defined(SUPPORT_ION) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + struct ion_device *ion_device; +#endif + struct ion_heap *ion_heaps[TC_ION_HEAP_COUNT]; + int ion_heap_count; +#endif + + struct dentry *debugfs_tc_dir; + struct dentry *debugfs_rogue_name; +}; + +int tc_mtrr_setup(struct tc_device *tc); +void tc_mtrr_cleanup(struct tc_device *tc); + +int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack); + +int tc_iopol32_nonzero(u32 mask, void __iomem *addr); + +int request_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t offset, resource_size_t length); +void release_pci_io_addr(struct pci_dev *pdev, u32 index, + resource_size_t start, resource_size_t length); + +int setup_io_region(struct pci_dev *pdev, + struct tc_io_region *region, u32 index, + resource_size_t offset, resource_size_t size); + +#if defined(TC_FAKE_INTERRUPTS) +void tc_irq_fake_wrapper(unsigned long data); +#endif /* defined(TC_FAKE_INTERRUPTS) */ + +#endif /* _TC_DRV_INTERNAL_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_ion.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_ion.h new file mode 100644 index 000000000000..139b8c35bcec --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_ion.h @@ -0,0 +1,53 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _TC_ION_H +#define _TC_ION_H + +struct ion_client; +struct tc_device; + +int tc_ion_init(struct tc_device *tc, int mem_bar); + +void tc_ion_deinit(struct tc_device *tc, int mem_bar); + +#endif /* _TC_ION_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.c b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.c new file mode 100644 index 000000000000..7e4a94591ad4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.c @@ -0,0 +1,1948 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* + * This is a device driver for the odin testchip framework. It creates + * platform devices for the pdp and ext sub-devices, and exports functions + * to manage the shared interrupt handling + */ + +#include +#include +#include +#include +#include + +#include "tc_drv_internal.h" +#include "tc_odin.h" +#include "tc_ion.h" + +/* Odin (3rd gen TCF FPGA) */ +#include "odin_defs.h" +#include "odin_regs.h" +#include "bonnie_tcf.h" + +/* Orion demo platform */ +#include "orion_defs.h" +#include "orion_regs.h" + +/* Odin/Orion common registers */ +#include "tc_odin_common_regs.h" + +/* Macros to set and get register fields */ +#define REG_FIELD_GET(v, str) \ + (u32)(((v) & (str##_MASK)) >> (str##_SHIFT)) +#define REG_FIELD_SET(v, f, str) \ + v = (u32)(((v) & (u32)~(str##_MASK)) | \ + (u32)(((f) << (str##_SHIFT)) & (str##_MASK))) + +#define SAI_STATUS_UNALIGNED 0 +#define SAI_STATUS_ALIGNED 1 +#define SAI_STATUS_ERROR 2 + +/* Odin/Orion shared masks */ +static const u32 REVISION_MAJOR_MASK[] = { + ODN_REVISION_MAJOR_MASK, + SRS_REVISION_MAJOR_MASK +}; +static const u32 REVISION_MAJOR_SHIFT[] = { + ODN_REVISION_MAJOR_SHIFT, + SRS_REVISION_MAJOR_SHIFT +}; +static const u32 REVISION_MINOR_MASK[] = { + ODN_REVISION_MINOR_MASK, + SRS_REVISION_MINOR_MASK +}; +static const u32 REVISION_MINOR_SHIFT[] = { + ODN_REVISION_MINOR_SHIFT, + SRS_REVISION_MINOR_SHIFT +}; +static const u32 CHANGE_SET_SET_MASK[] = { + ODN_CHANGE_SET_SET_MASK, + SRS_CHANGE_SET_SET_MASK +}; +static const u32 CHANGE_SET_SET_SHIFT[] = { + ODN_CHANGE_SET_SET_SHIFT, + SRS_CHANGE_SET_SET_SHIFT +}; +static const u32 USER_ID_ID_MASK[] = { + ODN_USER_ID_ID_MASK, + SRS_USER_ID_ID_MASK +}; +static const u32 USER_ID_ID_SHIFT[] = { + ODN_USER_ID_ID_SHIFT, + SRS_USER_ID_ID_SHIFT +}; +static const u32 USER_BUILD_BUILD_MASK[] = { + ODN_USER_BUILD_BUILD_MASK, + SRS_USER_BUILD_BUILD_MASK +}; +static const u32 USER_BUILD_BUILD_SHIFT[] = { + ODN_USER_BUILD_BUILD_SHIFT, + SRS_USER_BUILD_BUILD_SHIFT +}; +static const u32 INPUT_CLOCK_SPEED_MIN[] = { + ODN_INPUT_CLOCK_SPEED_MIN, + SRS_INPUT_CLOCK_SPEED_MIN +}; +static const u32 INPUT_CLOCK_SPEED_MAX[] = { + ODN_INPUT_CLOCK_SPEED_MAX, + SRS_INPUT_CLOCK_SPEED_MAX +}; +static const u32 OUTPUT_CLOCK_SPEED_MIN[] = { + ODN_OUTPUT_CLOCK_SPEED_MIN, + SRS_OUTPUT_CLOCK_SPEED_MIN +}; +static const u32 OUTPUT_CLOCK_SPEED_MAX[] = { + ODN_OUTPUT_CLOCK_SPEED_MAX, + SRS_OUTPUT_CLOCK_SPEED_MAX +}; +static const u32 VCO_MIN[] = { + ODN_VCO_MIN, + SRS_VCO_MIN +}; +static const u32 VCO_MAX[] = { + ODN_VCO_MAX, + SRS_VCO_MAX +}; +static const u32 PFD_MIN[] = { + ODN_PFD_MIN, + SRS_PFD_MIN +}; +static const u32 PFD_MAX[] = { + ODN_PFD_MAX, + SRS_PFD_MAX +}; + +#if defined(SUPPORT_RGX) + +static void spi_write(struct tc_device *tc, u32 off, u32 val) +{ + iowrite32(off, tc->tcf.registers + + ODN_REG_BANK_TCF_SPI_MASTER + + ODN_SPI_MST_ADDR_RDNWR); + iowrite32(val, tc->tcf.registers + + ODN_REG_BANK_TCF_SPI_MASTER + + ODN_SPI_MST_WDATA); + iowrite32(0x1, tc->tcf.registers + + ODN_REG_BANK_TCF_SPI_MASTER + + ODN_SPI_MST_GO); + udelay(1000); +} + +static int spi_read(struct tc_device *tc, u32 off, u32 *val) +{ + int cnt = 0; + u32 spi_mst_status; + + iowrite32(0x40000 | off, tc->tcf.registers + + ODN_REG_BANK_TCF_SPI_MASTER + + ODN_SPI_MST_ADDR_RDNWR); + iowrite32(0x1, tc->tcf.registers + + ODN_REG_BANK_TCF_SPI_MASTER + + ODN_SPI_MST_GO); + udelay(100); + + do { + spi_mst_status = ioread32(tc->tcf.registers + + ODN_REG_BANK_TCF_SPI_MASTER + + ODN_SPI_MST_STATUS); + + if (cnt++ > 10000) { + dev_err(&tc->pdev->dev, + "%s: Time out reading SPI reg (0x%x)\n", + __func__, off); + return -1; + } + + } while (spi_mst_status != 0x08); + + *val = ioread32(tc->tcf.registers + + ODN_REG_BANK_TCF_SPI_MASTER + + ODN_SPI_MST_RDATA); + + return 0; +} + +/* Returns 1 for aligned, 0 for unaligned */ +static int get_odin_sai_status(struct tc_device *tc, int bank) +{ + void __iomem *bank_addr = tc->tcf.registers + + ODN_REG_BANK_SAI_RX_DDR(bank); + void __iomem *reg_addr; + u32 eyes; + u32 clk_taps; + u32 train_ack; + + reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_EYES; + eyes = ioread32(reg_addr); + + reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_CLK_TAPS; + clk_taps = ioread32(reg_addr); + + reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK; + train_ack = ioread32(reg_addr); + +#if 0 /* enable this to get debug info if the board is not aligning */ + dev_info(&tc->pdev->dev, + "odin bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n", + bank, eyes, clk_taps, train_ack); +#endif + + if (tc_is_interface_aligned(eyes, clk_taps, train_ack)) + return SAI_STATUS_ALIGNED; + + dev_warn(&tc->pdev->dev, "odin bank %d is unaligned\n", bank); + return SAI_STATUS_UNALIGNED; +} + +/* Read the odin multi clocked bank align status. + * Returns 1 for aligned, 0 for unaligned + */ +static int read_odin_mca_status(struct tc_device *tc) +{ + void __iomem *bank_addr = tc->tcf.registers + + ODN_REG_BANK_MULTI_CLK_ALIGN; + void __iomem *reg_addr = bank_addr + ODN_MCA_DEBUG_MCA_STATUS; + u32 mca_status; + + mca_status = ioread32(reg_addr); + +#if 0 /* Enable this if there are alignment issues */ + dev_info(&tc->pdev->dev, + "Odin MCA_STATUS = %08x\n", mca_status); +#endif + return mca_status & ODN_ALIGNMENT_FOUND_MASK; +} + +/* Read the DUT multi clocked bank align status. + * Returns 1 for aligned, 0 for unaligned + */ +static int read_dut_mca_status(struct tc_device *tc) +{ + u32 mca_status; + const int mca_status_register_offset = 1; /* not in bonnie_tcf.h */ + int spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN); + + spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN) + + mca_status_register_offset; + + spi_read(tc, spi_address, &mca_status); + +#if 0 /* Enable this if there are alignment issues */ + dev_info(&tc->pdev->dev, + "DUT MCA_STATUS = %08x\n", mca_status); +#endif + return mca_status & 1; /* 'alignment found' status is in bit 1 */ +} + +/* Returns 1 for aligned, 0 for unaligned */ +static int get_dut_sai_status(struct tc_device *tc, int bank) +{ + u32 eyes; + u32 clk_taps; + u32 train_ack; + const u32 bank_base = DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_RX_1 + + (BONNIE_TCF_OFFSET_SAI_RX_DELTA * bank)); + int spi_timeout; + + spi_timeout = spi_read(tc, bank_base + + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_EYES), &eyes); + if (spi_timeout) + return SAI_STATUS_ERROR; + + spi_read(tc, bank_base + + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_CLK_TAPS), &clk_taps); + spi_read(tc, bank_base + + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_TRAIN_ACK), &train_ack); + +#if 0 /* enable this to get debug info if the board is not aligning */ + dev_info(&tc->pdev->dev, + "dut bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n", + bank, eyes, clk_taps, train_ack); +#endif + + if (tc_is_interface_aligned(eyes, clk_taps, train_ack)) + return SAI_STATUS_ALIGNED; + + dev_warn(&tc->pdev->dev, "dut bank %d is unaligned\n", bank); + return SAI_STATUS_UNALIGNED; +} + +/* + * Returns the divider group register fields for the specified counter value. + * See Xilinx Application Note xapp888. + */ +static void odin_mmcm_reg_param_calc(u32 value, u32 *low, u32 *high, + u32 *edge, u32 *no_count) +{ + if (value == 1U) { + *no_count = 1U; + *edge = 0; + *high = 0; + *low = 0; + } else { + *no_count = 0; + *edge = value % 2U; + *high = value >> 1; + *low = (value + *edge) >> 1U; + } +} + +/* + * Returns the MMCM Input Divider, FB Multiplier and Output Divider values for + * the specified input frequency and target output frequency. + * Function doesn't support fractional values for multiplier and output divider + * As per Xilinx 7 series FPGAs clocking resources user guide, aims for highest + * VCO and smallest D and M. + * Configured for Xilinx Virtex7 speed grade 2. + */ +static int odin_mmcm_counter_calc(struct device *dev, + u32 freq_input, u32 freq_output, + u32 *d, u32 *m, u32 *o) +{ + u32 tcver = tc_odin_subvers(dev); + u32 best_diff, d_best, m_best, o_best; + u32 m_min, m_max, m_ideal; + u32 d_cur, m_cur, o_cur; + u32 d_min, d_max; + + /* + * Check specified input frequency is within range + */ + if (freq_input < INPUT_CLOCK_SPEED_MIN[tcver]) { + dev_err(dev, "Input frequency (%u hz) below minimum supported value (%u hz)\n", + freq_input, INPUT_CLOCK_SPEED_MIN[tcver]); + return -EINVAL; + } + if (freq_input > INPUT_CLOCK_SPEED_MAX[tcver]) { + dev_err(dev, "Input frequency (%u hz) above maximum supported value (%u hz)\n", + freq_input, INPUT_CLOCK_SPEED_MAX[tcver]); + return -EINVAL; + } + + /* + * Check specified target frequency is within range + */ + if (freq_output < OUTPUT_CLOCK_SPEED_MIN[tcver]) { + dev_err(dev, "Output frequency (%u hz) below minimum supported value (%u hz)\n", + freq_input, OUTPUT_CLOCK_SPEED_MIN[tcver]); + return -EINVAL; + } + if (freq_output > OUTPUT_CLOCK_SPEED_MAX[tcver]) { + dev_err(dev, "Output frequency (%u hz) above maximum supported value (%u hz)\n", + freq_output, OUTPUT_CLOCK_SPEED_MAX[tcver]); + return -EINVAL; + } + + /* + * Calculate min and max for Input Divider. + * Refer Xilinx 7 series FPGAs clocking resources user guide + * equation 3-6 and 3-7 + */ + d_min = DIV_ROUND_UP(freq_input, PFD_MAX[tcver]); + d_max = min(freq_input/PFD_MIN[tcver], (u32)ODN_DREG_VALUE_MAX); + + /* + * Calculate min and max for Input Divider. + * Refer Xilinx 7 series FPGAs clocking resources user guide. + * equation 3-8 and 3-9 + */ + m_min = DIV_ROUND_UP((VCO_MIN[tcver] * d_min), freq_input); + m_max = min(((VCO_MAX[tcver] * d_max) / freq_input), + (u32)ODN_MREG_VALUE_MAX); + + for (d_cur = d_min; d_cur <= d_max; d_cur++) { + /* + * Refer Xilinx 7 series FPGAs clocking resources user guide. + * equation 3-10 + */ + m_ideal = min(((d_cur * VCO_MAX[tcver])/freq_input), m_max); + + for (m_cur = m_ideal; m_cur >= m_min; m_cur -= 1) { + /** + * Skip if VCO for given 'm' and 'd' value is not an + * integer since fractional component is not supported + */ + if (((freq_input * m_cur) % d_cur) != 0) + continue; + + /** + * Skip if divider for given 'm' and 'd' value is not + * an integer since fractional component is not + * supported + */ + if ((freq_input * m_cur) % (d_cur * freq_output) != 0) + continue; + + /** + * Calculate output divider value. + */ + o_cur = (freq_input * m_cur)/(d_cur * freq_output); + + *d = d_cur; + *m = m_cur; + *o = o_cur; + return 0; + } + } + + /* + * Failed to find exact optimal solution with high VCO. Brute-force find + * a suitable config, again prioritising high VCO, to get lowest jitter + */ + d_min = 1; d_max = (u32)ODN_DREG_VALUE_MAX; + m_min = 1; m_max = (u32)ODN_MREG_VALUE_MAX; + best_diff = 0xFFFFFFFF; + + for (d_cur = d_min; d_cur <= d_max; d_cur++) { + for (m_cur = m_max; m_cur >= m_min; m_cur -= 1) { + u32 pfd, vco, o_avg, o_min, o_max; + + pfd = freq_input / d_cur; + vco = pfd * m_cur; + + if (pfd < PFD_MIN[tcver]) + continue; + + if (pfd > PFD_MAX[tcver]) + continue; + + if (vco < VCO_MIN[tcver]) + continue; + + if (vco > VCO_MAX[tcver]) + continue; + + /* A range of -1/+3 around o_avg gives us 100kHz granularity. It can be extended further. */ + o_avg = vco / freq_output; + o_min = (o_avg >= 2) ? (o_avg - 1) : 1; + o_max = o_avg + 3; + if (o_max > (u32)ODN_OREG_VALUE_MAX) + o_max = (u32)ODN_OREG_VALUE_MAX; + + for (o_cur = o_min; o_cur <= o_max; o_cur++) { + u32 freq_cur, diff_cur; + + freq_cur = vco / o_cur; + + if (freq_cur > freq_output) + continue; + + diff_cur = freq_output - freq_cur; + + if (diff_cur == 0) { + /* Found an exact match */ + *d = d_cur; + *m = m_cur; + *o = o_cur; + return 0; + } + + if (diff_cur < best_diff) { + best_diff = diff_cur; + d_best = d_cur; + m_best = m_cur; + o_best = o_cur; + } + } + } + } + + if (best_diff != 0xFFFFFFFF) { + dev_warn(dev, "Odin: Found similar freq of %u Hz\n", freq_output - best_diff); + *d = d_best; + *m = m_best; + *o = o_best; + return 0; + } + + dev_err(dev, "Odin: Unable to find integer values for d, m and o for " + "requested frequency (%u)\n", + freq_output); + + return -ERANGE; +} + +static int odin_fpga_set_dut_core_clk(struct tc_device *tc, + u32 input_clk, u32 output_clk) +{ + int err = 0; + u32 in_div, mul, out_div; + u32 high_time, low_time, edge, no_count; + u32 value; + void __iomem *base = tc->tcf.registers; + void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK; + struct device *dev = &tc->pdev->dev; + + err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, + &mul, &out_div); + if (err != 0) + return err; + + /* Put DUT into reset */ + iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK, + base + ODN_CORE_EXTERNAL_RESETN); + msleep(20); + + /* Put DUT Core MMCM into reset */ + iowrite32(ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK, + base + ODN_CORE_CLK_GEN_RESET); + msleep(20); + + /* Calculate the register fields for output divider */ + odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to output divider register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1); + + /* Read-modify-write the required fields to output divider register 2 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2); + REG_FIELD_SET(value, edge, + ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2); + + /* Calculate the register fields for multiplier */ + odin_mmcm_reg_param_calc(mul, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to multiplier register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); + + /* Read-modify-write the required fields to multiplier register 2 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); + REG_FIELD_SET(value, edge, + ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); + + /* Calculate the register fields for input divider */ + odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to input divider register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME); + REG_FIELD_SET(value, edge, + ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); + + /* Bring DUT clock MMCM out of reset */ + iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET); + + err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_CORE, + base + ODN_CORE_MMCM_LOCK_STATUS); + if (err != 0) { + dev_err(dev, "MMCM failed to lock for DUT core\n"); + return err; + } + + /* Bring DUT out of reset */ + iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK | + ODN_EXTERNAL_RESETN_DUT_MASK, + tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN); + msleep(20); + + dev_info(dev, "DUT core clock set-up successful\n"); + + return err; +} + +static int odin_fpga_set_dut_if_clk(struct tc_device *tc, + u32 input_clk, u32 output_clk) +{ + int err = 0; + u32 in_div, mul, out_div; + u32 high_time, low_time, edge, no_count; + u32 value; + void __iomem *base = tc->tcf.registers; + void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK; + struct device *dev = &tc->pdev->dev; + + err = odin_mmcm_counter_calc(dev, input_clk, output_clk, + &in_div, &mul, &out_div); + if (err != 0) + return err; + + /* Put DUT into reset */ + iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK, + base + ODN_CORE_EXTERNAL_RESETN); + msleep(20); + + /* Put DUT Core MMCM into reset */ + iowrite32(ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK, + base + ODN_CORE_CLK_GEN_RESET); + msleep(20); + + /* Calculate the register fields for output divider */ + odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to output divider register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); + + /* Read-modify-write the required fields to output divider register 2 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); + REG_FIELD_SET(value, edge, + ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); + + /* Calculate the register fields for multiplier */ + odin_mmcm_reg_param_calc(mul, &high_time, &low_time, &edge, &no_count); + + /* Read-modify-write the required fields to multiplier register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); + + /* Read-modify-write the required fields to multiplier register 2 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); + REG_FIELD_SET(value, edge, + ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); + + /* Calculate the register fields for input divider */ + odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to input divider register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME); + REG_FIELD_SET(value, edge, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); + + /* Bring DUT interface clock MMCM out of reset */ + iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET); + + err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_IF, + base + ODN_CORE_MMCM_LOCK_STATUS); + if (err != 0) { + dev_err(dev, "MMCM failed to lock for DUT IF\n"); + return err; + } + + /* Bring DUT out of reset */ + iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK | + ODN_EXTERNAL_RESETN_DUT_MASK, + tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN); + msleep(20); + + dev_info(dev, "DUT IF clock set-up successful\n"); + + return err; +} + +static void odin_fpga_update_dut_clk_freq(struct tc_device *tc, + int *core_clock, int *mem_clock) +{ + struct device *dev = &tc->pdev->dev; + +#if defined(SUPPORT_FPGA_DUT_CLK_INFO) + int dut_clk_info = ioread32(tc->tcf.registers + ODN_CORE_DUT_CLK_INFO); + + if ((dut_clk_info != 0) && (dut_clk_info != 0xbaadface) && (dut_clk_info != 0xffffffff)) { + dev_info(dev, "ODN_DUT_CLK_INFO = %08x\n", dut_clk_info); + dev_info(dev, "Overriding provided DUT clock values: core %i, mem %i\n", + *core_clock, *mem_clock); + + *core_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_CORE_MASK) + >> ODN_DUT_CLK_INFO_CORE_SHIFT) * 1000000; + + *mem_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_MEM_MASK) + >> ODN_DUT_CLK_INFO_MEM_SHIFT) * 1000000; + } +#endif + + dev_info(dev, "DUT clock values: core %i, mem %i\n", + *core_clock, *mem_clock); +} + +static int odin_hard_reset_fpga(struct tc_device *tc, + int core_clock, int mem_clock) +{ + int err = 0; + + odin_fpga_update_dut_clk_freq(tc, &core_clock, &mem_clock); + + err = odin_fpga_set_dut_core_clk(tc, ODN_INPUT_CLOCK_SPEED, core_clock); + if (err != 0) + goto err_out; + + err = odin_fpga_set_dut_if_clk(tc, ODN_INPUT_CLOCK_SPEED, mem_clock); + +err_out: + return err; +} + +static int odin_hard_reset_bonnie(struct tc_device *tc) +{ + int reset_cnt = 0; + bool aligned = false; + int alignment_found; + + msleep(100); + + /* It is essential to do an SPI reset once on power-up before + * doing any DUT reads via the SPI interface. + */ + iowrite32(1, tc->tcf.registers /* set bit 1 low */ + + ODN_CORE_EXTERNAL_RESETN); + msleep(20); + + iowrite32(3, tc->tcf.registers /* set bit 1 high */ + + ODN_CORE_EXTERNAL_RESETN); + msleep(20); + + while (!aligned && (reset_cnt < 20)) { + + int bank; + + /* Reset the DUT to allow the SAI to retrain */ + iowrite32(2, /* set bit 0 low */ + tc->tcf.registers + + ODN_CORE_EXTERNAL_RESETN); + + /* Hold the DUT in reset for 50ms */ + msleep(50); + + /* Take the DUT out of reset */ + iowrite32(3, /* set bit 0 hi */ + tc->tcf.registers + + ODN_CORE_EXTERNAL_RESETN); + reset_cnt++; + + /* Wait 200ms for the DUT to stabilise */ + msleep(200); + + /* Check the odin Multi Clocked bank Align status */ + alignment_found = read_odin_mca_status(tc); + dev_info(&tc->pdev->dev, + "Odin mca_status indicates %s\n", + (alignment_found)?"aligned":"UNALIGNED"); + + /* Check the DUT MCA status */ + alignment_found = read_dut_mca_status(tc); + dev_info(&tc->pdev->dev, + "DUT mca_status indicates %s\n", + (alignment_found)?"aligned":"UNALIGNED"); + + /* If all banks have aligned then the reset was successful */ + for (bank = 0; bank < 10; bank++) { + + int dut_aligned = 0; + int odin_aligned = 0; + + odin_aligned = get_odin_sai_status(tc, bank); + dut_aligned = get_dut_sai_status(tc, bank); + + if (dut_aligned == SAI_STATUS_ERROR) + return SAI_STATUS_ERROR; + + if (!dut_aligned || !odin_aligned) { + aligned = false; + break; + } + aligned = true; + } + + if (aligned) { + dev_info(&tc->pdev->dev, + "all banks have aligned\n"); + break; + } + + dev_warn(&tc->pdev->dev, + "Warning- not all banks have aligned. Trying again.\n"); + } + + if (!aligned) + dev_warn(&tc->pdev->dev, "odin_hard_reset failed\n"); + + return (aligned) ? 0 : 1; /* return 0 for success */ +} + +static void odin_set_mem_latency(struct tc_device *tc, + int mem_latency, int mem_wresp_latency) +{ + u32 regval = 0; + + if (mem_latency <= 4) { + /* The total memory read latency cannot be lower than the + * amount of cycles consumed by the hardware to do a read. + * Set the memory read latency to 0 cycles. + */ + mem_latency = 0; + } else { + mem_latency -= 4; + + dev_info(&tc->pdev->dev, + "Setting memory read latency to %i cycles\n", + mem_latency); + } + + if (mem_wresp_latency <= 2) { + /* The total memory write latency cannot be lower than the + * amount of cycles consumed by the hardware to do a write. + * Set the memory write latency to 0 cycles. + */ + mem_wresp_latency = 0; + } else { + mem_wresp_latency -= 2; + + dev_info(&tc->pdev->dev, + "Setting memory write response latency to %i cycles\n", + mem_wresp_latency); + } + + mem_latency |= mem_wresp_latency << 16; + + spi_write(tc, 0x1009, mem_latency); + + if (spi_read(tc, 0x1009, ®val) != 0) { + dev_err(&tc->pdev->dev, + "Failed to read back memory latency register"); + return; + } + + if (mem_latency != regval) { + dev_err(&tc->pdev->dev, + "Memory latency register doesn't match requested value" + " (actual: %#08x, expected: %#08x)\n", + regval, mem_latency); + } +} + +static int check_c2c_link_status(struct tc_device *tc) +{ + struct device *dev = &tc->pdev->dev; + int status, status2; + + status = ioread32(tc->tcf.registers + CR_C2C_CHANNEL_STATUS_BASEB); + dev_info(dev, "C2C link: baseboard status = %d\n", status); + + spi_read(tc, CR_C2C_CHANNEL_STATUS_DAUGHTB, &status2); + dev_info(dev, "C2C link: daughterboard status = %d\n", status2); + + return (status << C2C_BB_STATUS_SHIFT) | (status2 << C2C_DB_STATUS_SHIFT); +} + +static int reset_c2c_link(struct tc_device *tc) +{ + struct device *dev = &tc->pdev->dev; + int bb_ctrl, db_ctrl; + int i, status; + + bb_ctrl = ioread32(tc->tcf.registers + CR_C2C_CHANNEL_CTRL_BASEB); + spi_read(tc, CR_C2C_CHANNEL_CTRL_DAUGHTB, &db_ctrl); + + if (bb_ctrl != C2C_DEFVAL || db_ctrl != C2C_DEFVAL) { + dev_warn(dev, + "Control status of baseboard/daughterboard is " + "0x%x/0x%x (expected 0x%x).\n", + bb_ctrl, db_ctrl, C2C_DEFVAL); + } + + /* Reset link */ + iowrite32(C2C_RESETVAL, tc->tcf.registers + CR_C2C_CHANNEL_CTRL_BASEB); + spi_write(tc, CR_C2C_CHANNEL_CTRL_DAUGHTB, C2C_RESETVAL); + msleep(1000); + + /* Take out of reset (baseboard last to maximise link success rate) */ + spi_write(tc, CR_C2C_CHANNEL_CTRL_DAUGHTB, C2C_DEFVAL); + msleep(1000); + iowrite32(C2C_DEFVAL, tc->tcf.registers + CR_C2C_CHANNEL_CTRL_BASEB); + msleep(C2C_READY_WAIT_MS); + + /* Check link */ + for (i = 0; i < C2C_READY_WAIT_ATTEMPTS; i++) { + status = check_c2c_link_status(tc); + + if (status == (C2C_BB_STATUS_OK | C2C_DB_STATUS_OK)) { + dev_warn(dev, "C2C link successful ...\n"); + break; + } + msleep(C2C_READY_WAIT_MS); + } + + if (status != (C2C_BB_STATUS_OK | C2C_DB_STATUS_OK)) + dev_err(dev, "C2C link not ready! (status = 0x%x)\n", status); + + return status; +} + + +#endif /* defined(SUPPORT_RGX) */ + +static int orion_set_dut_core_clk(struct tc_device *tc, + u32 input_clk, + u32 output_clk) +{ + void __iomem *base = tc->tcf.registers; + void __iomem *clk_blk_base = base + SRS_REG_BANK_ODN_CLK_BLK; + struct device *dev = &tc->pdev->dev; + u32 high_time, low_time, edge, no_count; + u32 in_div, mul, out_div; + u32 value; + int err; + + err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, + &mul, &out_div); + if (err != 0) + return err; + + /* Put DUT into reset */ + iowrite32(0, base + SRS_CORE_DUT_SOFT_RESETN); + msleep(20); + + /* Put DUT Core MMCM into reset */ + iowrite32(SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK, + base + SRS_CORE_CLK_GEN_RESET); + msleep(20); + + /* Calculate the register fields for input divider */ + odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to input divider register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME); + REG_FIELD_SET(value, edge, + ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); + + /* Calculate the register fields for multiplier */ + odin_mmcm_reg_param_calc(mul, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to multiplier register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); + + /* Read-modify-write the required fields to multiplier register 2 */ + value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); + REG_FIELD_SET(value, edge, + ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); + + /* Calculate the register fields for output divider */ + odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, + &edge, &no_count); + + /* + * Read-modify-write the required fields to + * core output divider register 1 + */ + value = ioread32(clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER1); + REG_FIELD_SET(value, high_time, + SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); + iowrite32(value, clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER1); + + /* + * Read-modify-write the required fields to core output + * divider register 2 + */ + value = ioread32(clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER2); + REG_FIELD_SET(value, edge, + SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE); + REG_FIELD_SET(value, no_count, + SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT); + iowrite32(value, clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER2); + + /* + * Read-modify-write the required fields to + * reference output divider register 1 + */ + value = ioread32(clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER1); + REG_FIELD_SET(value, high_time, + SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); + iowrite32(value, clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER1); + + /* + * Read-modify-write the required fields to + * reference output divider register 2 + */ + value = ioread32(clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER2); + REG_FIELD_SET(value, edge, + SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE); + REG_FIELD_SET(value, no_count, + SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT); + iowrite32(value, clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER2); + + /* Bring DUT IF clock MMCM out of reset */ + iowrite32(0, tc->tcf.registers + SRS_CORE_CLK_GEN_RESET); + + err = tc_iopol32_nonzero(SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK, + base + SRS_CORE_MMCM_LOCK_STATUS); + if (err != 0) { + dev_err(dev, "MMCM failed to lock for DUT core\n"); + return err; + } + + /* Bring DUT out of reset */ + iowrite32(SRS_DUT_SOFT_RESETN_EXTERNAL_MASK, + tc->tcf.registers + SRS_CORE_DUT_SOFT_RESETN); + msleep(20); + + dev_info(dev, "DUT core clock set-up successful\n"); + + return err; +} + +static int orion_set_dut_sys_mem_clk(struct tc_device *tc, + u32 input_clk, + u32 output_clk) +{ + void __iomem *base = tc->tcf.registers; + void __iomem *clk_blk_base = base + SRS_REG_BANK_ODN_CLK_BLK; + struct device *dev = &tc->pdev->dev; + u32 high_time, low_time, edge, no_count; + u32 in_div, mul, out_div; + u32 value; + int err; + + err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, + &mul, &out_div); + if (err != 0) + return err; + + /* Put DUT into reset */ + iowrite32(0, base + SRS_CORE_DUT_SOFT_RESETN); + msleep(20); + + /* Put DUT Core MMCM into reset */ + iowrite32(SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK, + base + SRS_CORE_CLK_GEN_RESET); + msleep(20); + + /* Calculate the register fields for input divider */ + odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to input divider register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME); + REG_FIELD_SET(value, edge, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); + + /* Calculate the register fields for multiplier */ + odin_mmcm_reg_param_calc(mul, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to multiplier register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); + + /* Read-modify-write the required fields to multiplier register 2 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); + REG_FIELD_SET(value, edge, + ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); + + /* Calculate the register fields for output divider */ + odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, + &edge, &no_count); + + /* Read-modify-write the required fields to output divider register 1 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); + REG_FIELD_SET(value, high_time, + ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); + + /* Read-modify-write the required fields to output divider register 2 */ + value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); + REG_FIELD_SET(value, edge, + ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE); + REG_FIELD_SET(value, no_count, + ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT); + iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); + + /* + * New to Orion, registers undocumented in the TRM, assumed high_time, + * low_time, edge and no_count are in the same bit fields as the + * previous two registers Even though these registers seem to be + * undocumented, setting them is essential for the DUT not to show + * abnormal behaviour, like the firmware jumping to random addresses + */ + + /* + * Read-modify-write the required fields to memory clock output divider + * register 1 + */ + value = ioread32(clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER1); + REG_FIELD_SET(value, high_time, + SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME); + REG_FIELD_SET(value, low_time, + SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME); + iowrite32(value, clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER1); + + /* + * Read-modify-write the required fields to memory clock output divider + * register 1 + */ + value = ioread32(clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER2); + REG_FIELD_SET(value, edge, + SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE); + REG_FIELD_SET(value, no_count, + SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT); + iowrite32(value, clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER2); + + /* Bring DUT clock MMCM out of reset */ + iowrite32(0, tc->tcf.registers + SRS_CORE_CLK_GEN_RESET); + + err = tc_iopol32_nonzero(SRS_MMCM_LOCK_STATUS_DUT_IF_MASK, + base + SRS_CORE_MMCM_LOCK_STATUS); + if (err != 0) { + dev_err(dev, "MMCM failed to lock for DUT IF\n"); + return err; + } + + /* Bring DUT out of reset */ + iowrite32(SRS_DUT_SOFT_RESETN_EXTERNAL_MASK, + tc->tcf.registers + SRS_CORE_DUT_SOFT_RESETN); + msleep(20); + + dev_info(dev, "DUT IF clock set-up successful\n"); + + return err; +} + + +static int orion_hard_reset(struct tc_device *tc, int core_clock, int mem_clock) +{ + int err; + + err = orion_set_dut_core_clk(tc, SRS_INPUT_CLOCK_SPEED, core_clock); + if (err != 0) + goto err_out; + + err = orion_set_dut_sys_mem_clk(tc, SRS_INPUT_CLOCK_SPEED, mem_clock); + +err_out: + return err; +} + +/* Do a hard reset on the DUT */ +static int odin_hard_reset(struct tc_device *tc, int core_clock, int mem_clock) +{ +#if defined(SUPPORT_RGX) + if (tc->version == ODIN_VERSION_TCF_BONNIE) + return odin_hard_reset_bonnie(tc); + if (tc->version == ODIN_VERSION_FPGA) + return odin_hard_reset_fpga(tc, core_clock, mem_clock); + if (tc->version == ODIN_VERSION_ORION) + return orion_hard_reset(tc, core_clock, mem_clock); + + dev_err(&tc->pdev->dev, "Invalid Odin version"); + return 1; +#else /* defined(SUPPORT_RGX) */ + return 0; +#endif /* defined(SUPPORT_RGX) */ +} + +static void odin_set_mem_mode_lma(struct tc_device *tc) +{ + u32 val; + + if (tc->version != ODIN_VERSION_FPGA) + return; + + /* Enable memory offset to be applied to DUT and PDP1 */ + iowrite32(0x80000A10, tc->tcf.registers + ODN_CORE_DUT_CTRL1); + + /* Apply memory offset to GPU and PDP1 to point to DDR memory. + * Enable HDMI. + */ + val = (0x4 << ODN_CORE_CONTROL_DUT_OFFSET_SHIFT) | + (0x4 << ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT) | + (0x2 << ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT) | + (0x1 << ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT); + iowrite32(val, tc->tcf.registers + ODN_CORE_CORE_CONTROL); +} + +static int odin_set_mem_mode(struct tc_device *tc, int mem_mode) +{ + switch (mem_mode) { + case TC_MEMORY_LOCAL: + odin_set_mem_mode_lma(tc); + dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_LOCAL\n"); + break; + default: + dev_err(&tc->pdev->dev, "unsupported memory mode = %d\n", + mem_mode); + return -ENOSYS; + }; + + tc->mem_mode = mem_mode; + + return 0; +} + +static u64 odin_get_pdp_dma_mask(struct tc_device *tc) +{ + /* Does not access system memory, so there is no DMA limitation */ + if ((tc->mem_mode == TC_MEMORY_LOCAL) || + (tc->mem_mode == TC_MEMORY_HYBRID)) + return DMA_BIT_MASK(64); + + return DMA_BIT_MASK(32); +} + +static u64 odin_get_rogue_dma_mask(struct tc_device *tc) +{ + /* Does not access system memory, so there is no DMA limitation */ + if (tc->mem_mode == TC_MEMORY_LOCAL) + return DMA_BIT_MASK(64); + + return DMA_BIT_MASK(32); +} + +static int odin_hw_init(struct tc_device *tc, int core_clock, int mem_clock, + int mem_latency, int mem_wresp_latency, int mem_mode) +{ + int err; + + err = odin_hard_reset(tc, core_clock, mem_clock); + if (err) { + dev_err(&tc->pdev->dev, "Failed to initialise Odin"); + goto err_out; + } + + err = odin_set_mem_mode(tc, mem_mode); + if (err) + goto err_out; + +#if defined(SUPPORT_RGX) + if (tc->version == ODIN_VERSION_FPGA) + odin_set_mem_latency(tc, mem_latency, mem_wresp_latency); + + /* + * Reset C2C if you are using a TCFVUOcta: + * - Read BAR0 0x0000C200 + * TCFVUOCTA returns 0x000000C6 + * - reset_c2c_link(struct tc_device *tc) + */ + if (tc->version == ODIN_VERSION_FPGA) { + if (ioread32(tc->tcf.registers + ODN_REG_BANK_DB_TYPE_ID) == + ODN_REG_BANK_DB_TYPE_ID_TYPE_TCFVUOCTA) { + reset_c2c_link(tc); + } + } +#endif /* defined(SUPPORT_RGX) */ + +err_out: + return err; +} + +static int odin_enable_irq(struct tc_device *tc) +{ + int err = 0; + +#if defined(TC_FAKE_INTERRUPTS) + setup_timer(&tc->timer, tc_irq_fake_wrapper, + (unsigned long)tc); + mod_timer(&tc->timer, + jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); +#else + iowrite32(0, tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); + iowrite32(0xffffffff, tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_CLR)); + + dev_info(&tc->pdev->dev, + "Registering IRQ %d for use by %s\n", + tc->pdev->irq, + odin_tc_name(tc)); + + err = request_irq(tc->pdev->irq, odin_irq_handler, + IRQF_SHARED, DRV_NAME, tc); + + if (err) { + dev_err(&tc->pdev->dev, + "Error - IRQ %d failed to register\n", + tc->pdev->irq); + } else { + dev_info(&tc->pdev->dev, + "IRQ %d was successfully registered for use by %s\n", + tc->pdev->irq, + odin_tc_name(tc)); + } +#endif + return err; +} + +static void odin_disable_irq(struct tc_device *tc) +{ +#if defined(TC_FAKE_INTERRUPTS) + del_timer_sync(&tc->timer); +#else + iowrite32(0, tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); + iowrite32(0xffffffff, tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_CLR)); + + free_irq(tc->pdev->irq, tc); +#endif +} + +static enum tc_version_t +odin_detect_daughterboard_version(struct tc_device *tc) +{ + u32 reg = ioread32(tc->tcf.registers + ODN_REG_BANK_DB_TYPE_ID); + u32 val = reg; + + if (tc->orion) + return ODIN_VERSION_ORION; + + val = (val & ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK) >> + ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT; + + switch (val) { + default: + dev_err(&tc->pdev->dev, + "Unknown odin version ID type %#x " + "(DB_TYPE_ID: %#08x)\n", + val, reg); + return TC_INVALID_VERSION; + case 1: + dev_info(&tc->pdev->dev, "DUT: Bonnie TC\n"); + return ODIN_VERSION_TCF_BONNIE; + case 2: + case 3: + dev_info(&tc->pdev->dev, "DUT: FPGA\n"); + return ODIN_VERSION_FPGA; + } +} + +static int odin_dev_init(struct tc_device *tc, struct pci_dev *pdev, + int pdp_mem_size, int secure_mem_size) +{ + int err; + u32 val; + + /* Reserve and map the tcf system registers */ + err = setup_io_region(pdev, &tc->tcf, + ODN_SYS_BAR, ODN_SYS_REGS_OFFSET, ODN_SYS_REGS_SIZE); + if (err) + goto err_out; + + tc->version = odin_detect_daughterboard_version(tc); + if (tc->version == TC_INVALID_VERSION) { + err = -EIO; + goto err_odin_unmap_sys_registers; + } + + /* Setup card memory */ + tc->tc_mem.base = pci_resource_start(pdev, ODN_DDR_BAR); + tc->tc_mem.size = pci_resource_len(pdev, ODN_DDR_BAR); + + if (tc->tc_mem.size < pdp_mem_size) { + dev_err(&pdev->dev, + "%s MEM region (bar %d) has size of %lu which is " + "smaller than the requested PDP heap of %lu", + odin_tc_name(tc), + ODN_DDR_BAR, + (unsigned long)tc->tc_mem.size, + (unsigned long)pdp_mem_size); + + err = -EIO; + goto err_odin_unmap_sys_registers; + } + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + if (tc->tc_mem.size < + (pdp_mem_size + secure_mem_size)) { + dev_err(&pdev->dev, + "Odin MEM region (bar %d) has size of %lu which is " + "smaller than the requested PDP heap of %lu plus " + "the requested secure heap size %lu", + ODN_DDR_BAR, + (unsigned long)tc->tc_mem.size, + (unsigned long)pdp_mem_size, + (unsigned long)secure_mem_size); + err = -EIO; + goto err_odin_unmap_sys_registers; + } +#endif + + err = tc_mtrr_setup(tc); + if (err) + goto err_odin_unmap_sys_registers; + + /* Setup ranges for the device heaps */ + tc->pdp_heap_mem_size = pdp_mem_size; + + /* We know ext_heap_mem_size won't underflow as we've compared + * tc_mem.size against the pdp_mem_size value earlier + */ + tc->ext_heap_mem_size = + tc->tc_mem.size - tc->pdp_heap_mem_size; + +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + tc->ext_heap_mem_size -= secure_mem_size; +#endif + + if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) { + dev_warn(&pdev->dev, + "%s MEM region (bar 4) has size of %lu, with %lu " + "pdp_mem_size only %lu bytes are left for ext device, " + "which looks too small", + odin_tc_name(tc), + (unsigned long)tc->tc_mem.size, + (unsigned long)pdp_mem_size, + (unsigned long)tc->ext_heap_mem_size); + /* Continue as this is only a 'helpful warning' not a hard + * requirement + */ + } + tc->ext_heap_mem_base = tc->tc_mem.base; + tc->pdp_heap_mem_base = + tc->tc_mem.base + tc->ext_heap_mem_size; +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + tc->secure_heap_mem_base = tc->pdp_heap_mem_base + + tc->pdp_heap_mem_size; + tc->secure_heap_mem_size = secure_mem_size; +#endif + +#if defined(SUPPORT_ION) + err = tc_ion_init(tc, ODN_DDR_BAR); + if (err) { + dev_err(&pdev->dev, "Failed to initialise ION\n"); + goto err_odin_unmap_sys_registers; + } +#endif + + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_REVISION)); + dev_info(&pdev->dev, "%s = 0x%08x\n", + common_reg_name(tc, CORE_REVISION), val); + + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_CHANGE_SET)); + dev_info(&pdev->dev, "%s = 0x%08x\n", + common_reg_name(tc, CORE_CHANGE_SET), val); + + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_USER_ID)); + dev_info(&pdev->dev, "%s = 0x%08x\n", + common_reg_name(tc, CORE_USER_ID), val); + + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_USER_BUILD)); + dev_info(&pdev->dev, "%s = 0x%08x\n", + common_reg_name(tc, CORE_USER_BUILD), val); + +err_out: + return err; + +err_odin_unmap_sys_registers: + dev_info(&pdev->dev, + "%s: failed - unmapping the io regions.\n", __func__); + + iounmap(tc->tcf.registers); + release_pci_io_addr(pdev, ODN_SYS_BAR, + tc->tcf.region.base, tc->tcf.region.size); + goto err_out; +} + +static void odin_dev_cleanup(struct tc_device *tc) +{ +#if defined(SUPPORT_ION) + tc_ion_deinit(tc, ODN_DDR_BAR); +#endif + + tc_mtrr_cleanup(tc); + + iounmap(tc->tcf.registers); + + release_pci_io_addr(tc->pdev, + ODN_SYS_BAR, + tc->tcf.region.base, + tc->tcf.region.size); +} + +static u32 odin_interrupt_id_to_flag(int interrupt_id) +{ + switch (interrupt_id) { + case TC_INTERRUPT_PDP: + return ODN_INTERRUPT_ENABLE_PDP1; + case TC_INTERRUPT_EXT: + return ODN_INTERRUPT_ENABLE_DUT; + default: + BUG(); + } +} + +int odin_init(struct tc_device *tc, struct pci_dev *pdev, + int core_clock, int mem_clock, + int pdp_mem_size, int secure_mem_size, + int mem_latency, int mem_wresp_latency, int mem_mode) +{ + int err = 0; + + err = odin_dev_init(tc, pdev, pdp_mem_size, secure_mem_size); + if (err) { + dev_err(&pdev->dev, "odin_dev_init failed\n"); + goto err_out; + } + + err = odin_hw_init(tc, core_clock, mem_clock, + mem_latency, mem_wresp_latency, mem_mode); + if (err) { + dev_err(&pdev->dev, "odin_hw_init failed\n"); + goto err_dev_cleanup; + } + + err = odin_enable_irq(tc); + if (err) { + dev_err(&pdev->dev, + "Failed to initialise IRQ\n"); + goto err_dev_cleanup; + } + +err_out: + return err; + +err_dev_cleanup: + odin_dev_cleanup(tc); + goto err_out; +} + +int odin_cleanup(struct tc_device *tc) +{ + /* + * Make sure we don't attempt to clean-up after an invalid device. + * We'll have already unmapped the PCI i/o space so cannot access + * anything now. + */ + if (tc->version != TC_INVALID_VERSION) { + odin_disable_irq(tc); + odin_dev_cleanup(tc); + } + + return 0; +} + +int odin_register_pdp_device(struct tc_device *tc) +{ + int err = 0; + resource_size_t reg_start = pci_resource_start(tc->pdev, ODN_SYS_BAR); + struct resource pdp_resources_odin[] = { + DEFINE_RES_MEM_NAMED(reg_start + + ODN_PDP_REGS_OFFSET, /* start */ + ODN_PDP_REGS_SIZE, /* size */ + "pdp-regs"), + DEFINE_RES_MEM_NAMED(reg_start + + ODN_SYS_REGS_OFFSET + + common_reg_offset(tc, REG_BANK_ODN_CLK_BLK) + + ODN_PDP_P_CLK_OUT_DIVIDER_REG1, /* start */ + ODN_PDP_P_CLK_IN_DIVIDER_REG - + ODN_PDP_P_CLK_OUT_DIVIDER_REG1 + 4, /* size */ + "pll-regs"), + DEFINE_RES_MEM_NAMED(reg_start + + ODN_SYS_REGS_OFFSET + + ODN_REG_BANK_CORE, /* start */ + ODN_CORE_MMCM_LOCK_STATUS + 4, /* size */ + "odn-core"), + }; + + struct tc_pdp_platform_data pdata = { +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + .ion_device = tc->ion_device, + .ion_heap_id = ION_HEAP_TC_PDP, +#endif + .memory_base = tc->tc_mem.base, + .pdp_heap_memory_base = tc->pdp_heap_mem_base, + .pdp_heap_memory_size = tc->pdp_heap_mem_size, + }; + struct platform_device_info pdp_device_info = { + .parent = &tc->pdev->dev, + .name = ODN_DEVICE_NAME_PDP, + .id = -2, + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = odin_get_pdp_dma_mask(tc), + }; + + pdp_device_info.res = pdp_resources_odin; + pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_odin); + + tc->pdp_dev = platform_device_register_full(&pdp_device_info); + if (IS_ERR(tc->pdp_dev)) { + err = PTR_ERR(tc->pdp_dev); + dev_err(&tc->pdev->dev, + "Failed to register PDP device (%d)\n", err); + tc->pdp_dev = NULL; + goto err_out; + } + +err_out: + return err; +} + +int odin_register_ext_device(struct tc_device *tc) +{ +#if defined(SUPPORT_RGX) + int err = 0; + struct resource odin_rogue_resources[] = { + DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, + ODN_DUT_SOCIF_BAR), + ODN_DUT_SOCIF_SIZE, "rogue-regs"), + }; + struct tc_rogue_platform_data pdata = { +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + .ion_device = tc->ion_device, + .ion_heap_id = ION_HEAP_TC_ROGUE, +#endif + .mem_mode = tc->mem_mode, + .tc_memory_base = tc->tc_mem.base, + .pdp_heap_memory_base = tc->pdp_heap_mem_base, + .pdp_heap_memory_size = tc->pdp_heap_mem_size, + .rogue_heap_memory_base = tc->ext_heap_mem_base, + .rogue_heap_memory_size = tc->ext_heap_mem_size, +#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) + .secure_heap_memory_base = tc->secure_heap_mem_base, + .secure_heap_memory_size = tc->secure_heap_mem_size, +#endif + }; + struct platform_device_info odin_rogue_dev_info = { + .parent = &tc->pdev->dev, + .name = TC_DEVICE_NAME_ROGUE, + .id = -2, + .res = odin_rogue_resources, + .num_res = ARRAY_SIZE(odin_rogue_resources), + .data = &pdata, + .size_data = sizeof(pdata), + .dma_mask = odin_get_rogue_dma_mask(tc), + }; + + tc->ext_dev + = platform_device_register_full(&odin_rogue_dev_info); + + if (IS_ERR(tc->ext_dev)) { + err = PTR_ERR(tc->ext_dev); + dev_err(&tc->pdev->dev, + "Failed to register rogue device (%d)\n", err); + tc->ext_dev = NULL; + } + return err; +#else /* defined(SUPPORT_RGX) */ + return 0; +#endif /* defined(SUPPORT_RGX) */ +} + +void odin_enable_interrupt_register(struct tc_device *tc, + int interrupt_id) +{ + u32 val; + u32 flag; + + switch (interrupt_id) { + case TC_INTERRUPT_PDP: + dev_info(&tc->pdev->dev, + "Enabling Odin PDP interrupts\n"); + break; + case TC_INTERRUPT_EXT: + dev_info(&tc->pdev->dev, + "Enabling Odin DUT interrupts\n"); + break; + default: + dev_err(&tc->pdev->dev, + "Error - illegal interrupt id\n"); + return; + } + + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); + flag = odin_interrupt_id_to_flag(interrupt_id); + val |= flag; + iowrite32(val, tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); +} + +void odin_disable_interrupt_register(struct tc_device *tc, + int interrupt_id) +{ + u32 val; + + switch (interrupt_id) { + case TC_INTERRUPT_PDP: + dev_info(&tc->pdev->dev, + "Disabling Odin PDP interrupts\n"); + break; + case TC_INTERRUPT_EXT: + dev_info(&tc->pdev->dev, + "Disabling Odin DUT interrupts\n"); + break; + default: + dev_err(&tc->pdev->dev, + "Error - illegal interrupt id\n"); + return; + } + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); + val &= ~(odin_interrupt_id_to_flag(interrupt_id)); + iowrite32(val, tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); +} + +irqreturn_t odin_irq_handler(int irq, void *data) +{ + u32 interrupt_status; + u32 interrupt_clear = 0; + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + struct tc_device *tc = (struct tc_device *)data; + + spin_lock_irqsave(&tc->interrupt_handler_lock, flags); + +#if defined(TC_FAKE_INTERRUPTS) + /* If we're faking interrupts pretend we got both ext and PDP ints */ + interrupt_status = ODN_INTERRUPT_STATUS_DUT + | ODN_INTERRUPT_STATUS_PDP1; +#else + interrupt_status = ioread32(tc->tcf.registers + + common_reg_offset(tc, + CORE_INTERRUPT_STATUS)); +#endif + + if (interrupt_status & ODN_INTERRUPT_STATUS_DUT) { + struct tc_interrupt_handler *ext_int = + &tc->interrupt_handlers[TC_INTERRUPT_EXT]; + + if (ext_int->enabled && ext_int->handler_function) { + ext_int->handler_function(ext_int->handler_data); + interrupt_clear |= ODN_INTERRUPT_CLEAR_DUT; + } + ret = IRQ_HANDLED; + } + if (interrupt_status & ODN_INTERRUPT_STATUS_PDP1) { + struct tc_interrupt_handler *pdp_int = + &tc->interrupt_handlers[TC_INTERRUPT_PDP]; + + if (pdp_int->enabled && pdp_int->handler_function) { + pdp_int->handler_function(pdp_int->handler_data); + interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP1; + } + ret = IRQ_HANDLED; + } + + if (interrupt_clear) + iowrite32(interrupt_clear, + tc->tcf.registers + + common_reg_offset(tc, CORE_INTERRUPT_CLR)); + + /* + * Orion PDP interrupts are occasionally masked because, for unknown + * reasons, a vblank goes without being asserted for about 1000 ms. This + * feature is not present on Odin, and setting the + * INTERRUPT_TIMEOUT_THRESHOLD register to 0 does not seem to disable it + * either. This is probably caused by a bug in some versions of Sirius + * RTL. Also this bug seems to only affect PDP interrupts, but not the + * DUT. This might sometimes lead to a sudden jitter effect in the + * render. Further investigation is pending before this code can + * be safely removed. + */ + + if (tc->orion) { + if (REG_FIELD_GET(ioread32(tc->tcf.registers + + SRS_CORE_INTERRUPT_TIMEOUT_CLR), + SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT)) { + dev_warn(&tc->pdev->dev, + "Orion PDP interrupts were masked, " + "clearing now\n"); + iowrite32(SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK, + tc->tcf.registers + SRS_CORE_INTERRUPT_TIMEOUT_CLR); + } + } + + spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); + + return ret; +} + +int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll) +{ + *tmp = 0; + *pll = 0; + return 0; +} + +int odin_sys_strings(struct tc_device *tc, + char *str_fpga_rev, size_t size_fpga_rev, + char *str_tcf_core_rev, size_t size_tcf_core_rev, + char *str_tcf_core_target_build_id, + size_t size_tcf_core_target_build_id, + char *str_pci_ver, size_t size_pci_ver, + char *str_macro_ver, size_t size_macro_ver) +{ + u32 tcver = tc_odin_subvers(&tc->pdev->dev); + char temp_str[12]; + u32 val; + + /* Read the Odin major and minor revision ID register Rx-xx */ + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_REVISION)); + + snprintf(str_tcf_core_rev, + size_tcf_core_rev, + "%d.%d", + HEX2DEC((val & REVISION_MAJOR_MASK[tcver]) + >> REVISION_MAJOR_SHIFT[tcver]), + HEX2DEC((val & REVISION_MINOR_MASK[tcver]) + >> REVISION_MINOR_SHIFT[tcver])); + + dev_info(&tc->pdev->dev, "%s core revision %s\n", + odin_tc_name(tc), str_tcf_core_rev); + + /* Read the Odin register containing the Perforce changelist + * value that the FPGA build was generated from + */ + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_CHANGE_SET)); + + snprintf(str_tcf_core_target_build_id, + size_tcf_core_target_build_id, + "%d", + (val & CHANGE_SET_SET_MASK[tcver]) + >> CHANGE_SET_SET_SHIFT[tcver]); + + /* Read the Odin User_ID register containing the User ID for + * identification of a modified build + */ + val = ioread32(tc->tcf.registers + common_reg_offset(tc, CORE_USER_ID)); + + snprintf(temp_str, + sizeof(temp_str), + "%d", + HEX2DEC((val & USER_ID_ID_MASK[tcver]) + >> USER_ID_ID_SHIFT[tcver])); + + /* Read the Odin User_Build register containing the User build + * number for identification of modified builds + */ + val = ioread32(tc->tcf.registers + + common_reg_offset(tc, CORE_USER_BUILD)); + + snprintf(temp_str, + sizeof(temp_str), + "%d", + HEX2DEC((val & USER_BUILD_BUILD_MASK[tcver]) + >> USER_BUILD_BUILD_SHIFT[tcver])); + + return 0; +} + +const char *odin_tc_name(struct tc_device *tc) +{ + if (tc->odin) + return "Odin"; + else if (tc->orion) + return "Orion"; + else + return "Unknown TC"; +} diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.h new file mode 100644 index 000000000000..63b7ca593109 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin.h @@ -0,0 +1,76 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _ODIN_DRV_H +#define _ODIN_DRV_H + +#include "tc_drv_internal.h" +#include "odin_defs.h" +#include "orion_defs.h" + +int odin_init(struct tc_device *tc, struct pci_dev *pdev, + int core_clock, int mem_clock, + int pdp_mem_size, int secure_mem_size, + int mem_latency, int mem_wresp_latency, int mem_mode); +int odin_cleanup(struct tc_device *tc); + +int odin_register_pdp_device(struct tc_device *tc); +int odin_register_ext_device(struct tc_device *tc); + +void odin_enable_interrupt_register(struct tc_device *tc, + int interrupt_id); +void odin_disable_interrupt_register(struct tc_device *tc, + int interrupt_id); + +irqreturn_t odin_irq_handler(int irq, void *data); + +int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll); +int odin_sys_strings(struct tc_device *tc, + char *str_fpga_rev, size_t size_fpga_rev, + char *str_tcf_core_rev, size_t size_tcf_core_rev, + char *str_tcf_core_target_build_id, + size_t size_tcf_core_target_build_id, + char *str_pci_ver, size_t size_pci_ver, + char *str_macro_ver, size_t size_macro_ver); + +const char *odin_tc_name(struct tc_device *tc); +#endif /* _ODIN_DRV_H */ diff --git a/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin_common_regs.h b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin_common_regs.h new file mode 100644 index 000000000000..046969ab8f9e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/kernel/drivers/staging/imgtec/tc/tc_odin_common_regs.h @@ -0,0 +1,107 @@ +/*************************************************************************/ /*! +@File odin_common_regs.h +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __TC_ODIN_COMMON_REGS_H__ +#define __TC_ODIN_COMMON_REGS_H__ + +#include +#include + +struct tc_device; + +enum odin_common_regs { + CORE_REVISION = 0, + CORE_CHANGE_SET, + CORE_USER_ID, + CORE_USER_BUILD, + CORE_INTERRUPT_ENABLE, + CORE_INTERRUPT_CLR, + CORE_INTERRUPT_STATUS, + REG_BANK_ODN_CLK_BLK, +}; + +#define ODIN_REGNAME(REG_NAME) "ODN_" __stringify(REG_NAME) +#define ORION_REGNAME(REG_NAME) "SRS_" __stringify(REG_NAME) + +struct odin_orion_reg { + u32 odin_offset; + u32 orion_offset; + const char *odin_name; + const char *orion_name; +}; + +#define COMMON_REG_ENTRY(REG) \ + [REG] = { \ + .odin_offset = ODN_##REG, \ + .orion_offset = SRS_##REG, \ + .odin_name = ODIN_REGNAME(REG), \ + .orion_name = ORION_REGNAME(REG), \ + } + +static const struct odin_orion_reg common_regs[] = { + COMMON_REG_ENTRY(CORE_REVISION), + COMMON_REG_ENTRY(CORE_CHANGE_SET), + COMMON_REG_ENTRY(CORE_USER_ID), + COMMON_REG_ENTRY(CORE_USER_BUILD), + COMMON_REG_ENTRY(CORE_INTERRUPT_ENABLE), + COMMON_REG_ENTRY(CORE_INTERRUPT_CLR), + COMMON_REG_ENTRY(CORE_INTERRUPT_STATUS), + COMMON_REG_ENTRY(REG_BANK_ODN_CLK_BLK), +}; + +static inline const u32 common_reg_offset(struct tc_device *tc, u32 reg) +{ + if (tc->odin) + return common_regs[reg].odin_offset; + else + return common_regs[reg].orion_offset; +} + +static inline const char *common_reg_name(struct tc_device *tc, u32 reg) +{ + if (tc->odin) + return common_regs[reg].odin_name; + else + return common_regs[reg].orion_name; +} + +#endif /* __TC_ODIN_COMMON_REGS_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/include/htbuffer_sf.h b/drivers/mcst/gpu-imgtec/services/include/htbuffer_sf.h new file mode 100644 index 000000000000..27881cf27db7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/htbuffer_sf.h @@ -0,0 +1,237 @@ +/*************************************************************************/ /*! +@File htbuffer_sf.h +@Title Host Trace Buffer interface string format specifiers +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the Host Trace Buffer logging messages. The following + list are the messages the host driver prints. Changing anything + but the first column or spelling mistakes in the strings will + break compatibility with log files created with older/newer + driver versions. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef HTBUFFER_SF_H +#define HTBUFFER_SF_H + +#if defined(__cplusplus) +extern "C" { +#endif + + +/****************************************************************************** + * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you + * WILL BREAK host tracing message compatibility with previous + * driver versions. Only add new ones, if so required. + *****************************************************************************/ + + +/* String used in pvrdebug -h output */ +#define HTB_LOG_GROUPS_STRING_LIST "ctrl,mmu,sync,main,brg" + +/* Used in print statements to display log group state, one %s per group defined */ +#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s" + +/* Available log groups - Master template + * + * Group usage is as follows: + * CTRL - Internal Host Trace information and synchronisation data + * MMU - MMU page mapping information + * SYNC - Synchronisation debug + * MAIN - Data master kicks, etc. tying in with the MAIN group in FWTrace + * DBG - Temporary debugging group, logs not to be left in the driver + * + */ +#define HTB_LOG_SFGROUPLIST \ + X( HTB_GROUP_NONE, NONE ) \ +/* gid, group flag / apphint name */ \ + X( HTB_GROUP_CTRL, CTRL ) \ + X( HTB_GROUP_MMU, MMU ) \ + X( HTB_GROUP_SYNC, SYNC ) \ + X( HTB_GROUP_MAIN, MAIN ) \ + X( HTB_GROUP_BRG, BRG ) \ +/* Debug group HTB_GROUP_DBG must always be last */ \ + X( HTB_GROUP_DBG, DBG ) + + +/* Table of String Format specifiers, the group they belong and the number of + * arguments each expects. Xmacro styled macros are used to generate what is + * needed without requiring hand editing. + * + * id : unique id within a group + * gid : group id as defined above + * sym name : symbolic name of enumerations used to identify message strings + * string : Actual string + * #args : number of arguments the string format requires + */ +#define HTB_LOG_SFIDLIST \ +/*id, gid, sym name, string, # arguments */ \ +X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0) \ +\ +X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \ +X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1) \ +X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1) \ +X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1) \ +X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \ +X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ +X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ +X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1) \ +X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1) \ +X( 10, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_SCALE, "Text not used", 6)\ +\ +X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \ +X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \ +X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \ +X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ +X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ +X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2) \ +\ +X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1) \ +X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1) \ +X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \ +X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \ +X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1) \ +X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1) \ +\ +X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08X @ %d\n", 2) \ +X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08X @ %d\n", 2) \ +X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08X @ %d\n", 2) \ +X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2) \ +X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2) \ +X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08X @ %d\n", 2) \ +X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0) \ +X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1) \ +X( 9, HTB_GROUP_MAIN, HTB_SF_MAIN_PRE_POWER, "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \ +X(10, HTB_GROUP_MAIN, HTB_SF_MAIN_POST_POWER, "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \ +\ +X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3) \ +X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \ +\ +X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \ +\ +X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15) + + + +/* gid - Group numbers */ +typedef enum _HTB_LOG_SFGROUPS { +#define X(A,B) A, + HTB_LOG_SFGROUPLIST +#undef X +} HTB_LOG_SFGROUPS; + + +/* Group flags are stored in an array of elements. + * Each of which have a certain number of bits. + */ +#define HTB_FLAG_EL_T IMG_UINT32 +#define HTB_FLAG_NUM_BITS_IN_EL (sizeof(HTB_FLAG_EL_T) * 8) + +#define HTB_LOG_GROUP_FLAG_GROUP(gid) ((gid-1) / HTB_FLAG_NUM_BITS_IN_EL) +#define HTB_LOG_GROUP_FLAG(gid) (gid ? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)) : 0) +#define HTB_LOG_GROUP_FLAG_NAME(gid) HTB_LOG_TYPE_ ## gid + +/* Group enable flags */ +typedef enum _HTB_LOG_TYPE { +#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a), + HTB_LOG_SFGROUPLIST +#undef X +} HTB_LOG_TYPE; + + + +/* The symbolic names found in the table above are assigned an ui32 value of + * the following format: + * 31 30 28 27 20 19 16 15 12 11 0 bits + * - --- ---- ---- ---- ---- ---- ---- ---- + * 0-11: id number + * 12-15: group id number + * 16-19: number of parameters + * 20-27: unused + * 28-30: active: identify SF packet, otherwise regular int32 + * 31: reserved for signed/unsigned compatibility + * + * The following macro assigns those values to the enum generated SF ids list. + */ +#define HTB_LOG_IDMARKER (0x70000000) +#define HTB_LOG_CREATESFID(a,b,e) (((a) | (b << 12) | (e << 16)) | HTB_LOG_IDMARKER) + +#define HTB_LOG_IDMASK (0xFFF00000) +#define HTB_LOG_VALIDID(I) ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER ) + +typedef enum HTB_LOG_SFids { +#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e), + HTB_LOG_SFIDLIST +#undef X +} HTB_LOG_SFids; + +/* Return the group id that the given (enum generated) id belongs to */ +#define HTB_SF_GID(x) (((x)>>12) & 0xf) +/* Future improvement to support log levels */ +#define HTB_SF_LVL(x) (0) +/* Returns how many arguments the SF(string format) for the given + * (enum generated) id requires. + */ +#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf) +/* Returns the id of given enum */ +#define HTB_SF_ID(x) (x & 0xfff) + +/* Format of messages is: SF:PID:TIMEPT1:TIMEPT2:[PARn]* + */ +#define HTB_LOG_HEADER_SIZE 4 +#define HTB_LOG_MAX_PARAMS 15 + +#if defined(__cplusplus) +} +#endif + +/* Defines for handling MARK_SCALE special case */ +#define HTB_GID_CTRL 1 +#define HTB_ID_MARK_SCALE 10 +#define HTB_MARK_SCALE_ARG_ARRAY_SIZE 6 + +/* Defines for extracting args from array for special case MARK_SCALE */ +#define HTB_ARG_SYNCMARK 0 +#define HTB_ARG_OSTS_PT1 1 +#define HTB_ARG_OSTS_PT2 2 +#define HTB_ARG_CRTS_PT1 3 +#define HTB_ARG_CRTS_PT2 4 +#define HTB_ARG_CLKSPD 5 + +#endif /* HTBUFFER_SF_H */ +/***************************************************************************** + End of file (htbuffer_sf.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/htbuffer_types.h b/drivers/mcst/gpu-imgtec/services/include/htbuffer_types.h new file mode 100644 index 000000000000..a404bf8b7b10 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/htbuffer_types.h @@ -0,0 +1,118 @@ +/*************************************************************************/ /*! +@File htbuffer_types.h +@Title Host Trace Buffer types. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef HTBUFFER_TYPES_H +#define HTBUFFER_TYPES_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "htbuffer_sf.h" + +/* The group flags array of ints large enough to store all the group flags */ +#define HTB_FLAG_NUM_EL (((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1) +extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL]; + +#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF))) + +/*************************************************************************/ /*! + Host Trace Buffer operation mode + Care must be taken if changing this enum to ensure the MapFlags[] array + in htbserver.c is kept in-step. +*/ /**************************************************************************/ +typedef enum +{ + /*! Undefined operation mode */ + HTB_OPMODE_UNDEF = 0, + + /*! Drop latest, intended for continuous logging to a UM daemon. + * If the daemon does not keep up, the most recent log data + * will be dropped + */ + HTB_OPMODE_DROPLATEST, + + /*! Drop oldest, intended for crash logging. + * Data will be continuously written to a circular buffer. + * After a crash the buffer will contain events leading up to the crash + */ + HTB_OPMODE_DROPOLDEST, + + /*! Block write if buffer is full */ + HTB_OPMODE_BLOCK, + + HTB_OPMODE_LAST = HTB_OPMODE_BLOCK +} HTB_OPMODE_CTRL; + + +/*************************************************************************/ /*! + Host Trace Buffer log mode control +*/ /**************************************************************************/ +typedef enum +{ + /*! Undefined log mode, used if update is not applied */ + HTB_LOGMODE_UNDEF = 0, + + /*! Log trace messages for all PIDs. */ + HTB_LOGMODE_ALLPID, + + /*! Log trace messages for specific PIDs only. */ + HTB_LOGMODE_RESTRICTEDPID, + + HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID +} HTB_LOGMODE_CTRL; + + +#if defined(__cplusplus) +} +#endif + +#endif /* HTBUFFER_TYPES_H */ + +/****************************************************************************** + End of file (htbuffer_types.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/info_page_client.h b/drivers/mcst/gpu-imgtec/services/include/info_page_client.h new file mode 100644 index 000000000000..9df2461b55fb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/info_page_client.h @@ -0,0 +1,89 @@ +/*************************************************************************/ /*! +@File +@Title Kernel/User mode general purpose shared memory. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description General purpose shared memory (i.e. information page) mapped by + kernel space driver and user space clients. All info page + entries are sizeof(IMG_UINT32) on both 32/64-bit environments. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef INFO_PAGE_CLIENT_H +#define INFO_PAGE_CLIENT_H + +#include "device_connection.h" +#include "info_page_defs.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#endif + +/*************************************************************************/ /*! +@Function GetInfoPage + +@Description Return Info Page address + +@Input hDevConnection - Services device connection + +@Return Info Page address +*/ +/*****************************************************************************/ +static INLINE IMG_PUINT32 GetInfoPage(SHARED_DEV_CONNECTION hDevConnection) +{ +#if defined(__KERNEL__) + return (PVRSRVGetPVRSRVData())->pui32InfoPage; +#else + return hDevConnection->pui32InfoPage; +#endif +} + +/*************************************************************************/ /*! +@Function GetInfoPageDebugFlags + +@Description Return Info Page debug flags + +@Input hDevConnection - Services device connection + +@Return Info Page debug flags +*/ +/*****************************************************************************/ +static INLINE IMG_UINT32 GetInfoPageDebugFlags(SHARED_DEV_CONNECTION hDevConnection) +{ + return GetInfoPage(hDevConnection)[DEBUG_FEATURE_FLAGS]; +} + +#endif /* INFO_PAGE_CLIENT_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/info_page_defs.h b/drivers/mcst/gpu-imgtec/services/include/info_page_defs.h new file mode 100644 index 000000000000..e4420861bac3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/info_page_defs.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File +@Title Kernel/User mode general purpose shared memory. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description General purpose shared memory (i.e. information page) mapped by + kernel space driver and user space clients. All information page + entries are sizeof(IMG_UINT32) on both 32/64-bit environments. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _INFO_PAGE_DEFS_H_ +#define _INFO_PAGE_DEFS_H_ + + +/* CacheOp information page entries */ +#define CACHEOP_INFO_IDX_START 0x00 +#define CACHEOP_INFO_UMKMTHRESHLD (CACHEOP_INFO_IDX_START + 1) /*!< UM=>KM routing threshold in bytes */ +#define CACHEOP_INFO_KMDFTHRESHLD (CACHEOP_INFO_IDX_START + 2) /*!< KM/DF threshold in bytes */ +#define CACHEOP_INFO_LINESIZE (CACHEOP_INFO_IDX_START + 3) /*!< CPU data cache line size */ +#define CACHEOP_INFO_PGSIZE (CACHEOP_INFO_IDX_START + 4) /*!< CPU MMU page size */ +#define CACHEOP_INFO_IDX_END (CACHEOP_INFO_IDX_START + 5) + +/* HWPerf information page entries */ +#define HWPERF_INFO_IDX_START (CACHEOP_INFO_IDX_END) +#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0) +#define HWPERF_FILTER_EGL_IDX (HWPERF_INFO_IDX_START + 1) +#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2) +#define HWPERF_FILTER_OPENCL_IDX (HWPERF_INFO_IDX_START + 3) +#define HWPERF_FILTER_VULKAN_IDX (HWPERF_INFO_IDX_START + 4) +#define HWPERF_INFO_IDX_END (HWPERF_INFO_IDX_START + 5) + +/* timeout values */ +#define TIMEOUT_INFO_IDX_START (HWPERF_INFO_IDX_END) +#define TIMEOUT_INFO_VALUE_RETRIES (TIMEOUT_INFO_IDX_START + 0) +#define TIMEOUT_INFO_VALUE_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 1) +#define TIMEOUT_INFO_CONDITION_RETRIES (TIMEOUT_INFO_IDX_START + 2) +#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 3) +#define TIMEOUT_INFO_EVENT_OBJECT_RETRIES (TIMEOUT_INFO_IDX_START + 4) +#define TIMEOUT_INFO_EVENT_OBJECT_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 5) +#define TIMEOUT_INFO_IDX_END (TIMEOUT_INFO_IDX_START + 6) + +/* Bridge Info */ +#define BRIDGE_INFO_IDX_START (TIMEOUT_INFO_IDX_END) +#define BRIDGE_INFO_RGX_BRIDGES (BRIDGE_INFO_IDX_START + 0) +#define BRIDGE_INFO_PVR_BRIDGES (BRIDGE_INFO_IDX_START + 1) +#define BRIDGE_INFO_IDX_END (BRIDGE_INFO_IDX_START + 2) + +/* Debug features */ +#define DEBUG_FEATURE_FLAGS (BRIDGE_INFO_IDX_END) +#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED 0x1 +#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED 0x2 +#define DEBUG_FEATURE_FLAGS_IDX_END (DEBUG_FEATURE_FLAGS + 1) + + +#endif /* _INFO_PAGE_DEFS_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/include/km_apphint_defs_common.h b/drivers/mcst/gpu-imgtec/services/include/km_apphint_defs_common.h new file mode 100644 index 000000000000..424c5d272e0c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/km_apphint_defs_common.h @@ -0,0 +1,269 @@ +/*************************************************************************/ /*! +@File +@Title Services AppHint definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef KM_APPHINT_DEFS_COMMON_H +#define KM_APPHINT_DEFS_COMMON_H + +/* +******************************************************************************* + Build variables + All of these should be configurable only through the 'default' value +******************************************************************************/ +#define APPHINT_LIST_BUILDVAR_COMMON \ +/* name, type, class, default, helper, */ \ +X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE ) \ +X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NO_PARAM_TABLE ) \ +X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE ) \ +X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE ) \ +X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE ) \ + +/* +******************************************************************************* + Module parameters +******************************************************************************/ +#define APPHINT_LIST_MODPARAM_COMMON \ +/* name, type, class, default, helper, */ \ +X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE, NO_PARAM_TABLE ) \ +\ +X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NO_PARAM_TABLE ) \ +X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NO_PARAM_TABLE ) \ +\ +X(DisableClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLECLOCKGATING, NO_PARAM_TABLE ) \ +X(DisableDMOverlap, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE ) \ +\ +X(EnableRandomContextSwitch, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH, NO_PARAM_TABLE ) \ +X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH, NO_PARAM_TABLE ) \ +X(EnableFWContextSwitch, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE ) \ +X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE ) \ +\ +X(EnableRDPowerIsland, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE ) \ +\ +X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE ) \ +\ +X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE ) \ +\ +X(HWPerfFWBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NO_PARAM_TABLE ) \ +X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE ) \ +X(HWPerfHostThreadTimeoutInMS, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS, NO_PARAM_TABLE ) \ +\ +X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE ) \ +X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE ) \ +X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE ) \ +X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE ) \ +X(GPIOValidationMode, UINT32, VALIDATION, PVRSRV_APPHINT_GPIOVALIDATIONMODE, NO_PARAM_TABLE ) \ +X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NO_PARAM_TABLE ) \ +\ +X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE ) \ +X(ValidateIrq, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATEIRQ, NO_PARAM_TABLE ) \ +\ +X(OSidRegion0Min, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MIN, NO_PARAM_TABLE ) \ +X(OSidRegion0Max, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MAX, NO_PARAM_TABLE ) \ +X(OSidRegion1Min, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MIN, NO_PARAM_TABLE ) \ +X(OSidRegion1Max, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MAX, NO_PARAM_TABLE ) \ +\ +X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE ) \ +X(FWTraceBufSizeInDWords, UINT32, ALWAYS, PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS, NO_PARAM_TABLE ) \ +\ +X(EnablePageFaultDebug, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG, NO_PARAM_TABLE ) \ +X(EnableFullSyncTracking, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING, NO_PARAM_TABLE ) \ +X(IgnoreHWReportedBVNC, BOOL, ALWAYS, PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC, NO_PARAM_TABLE ) \ +\ +X(PhysMemTestPasses, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSMEMTESTPASSES, NO_PARAM_TABLE ) \ +\ +X(FBCDCVersionOverride, UINT32, VALIDATION, PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE, NO_PARAM_TABLE ) \ +X(TestSLRInterval, UINT32, VALIDATION, PVRSRV_APPHINT_TESTSLRINTERVAL, NO_PARAM_TABLE ) + +/* +******************************************************************************* + Debugfs parameters - driver configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGFS_COMMON \ +/* name, type, class, default, helper, */ \ +X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl ) \ +X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl ) \ +X(EnableFTraceGPU, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFTRACEGPU, NO_PARAM_TABLE ) \ +X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE ) \ +X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_Services, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_EGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_OpenGLES, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_OpenCL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_Vulkan, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN, NO_PARAM_TABLE ) \ +X(CacheOpConfig, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPCONFIG, NO_PARAM_TABLE ) \ +X(CacheOpUMKMThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE, NO_PARAM_TABLE ) \ +X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl ) + +/* +******************************************************************************* + Debugfs parameters - device configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGFS_DEVICE_COMMON \ +/* name, type, class, default, helper, */ \ +/* Device Firmware config */\ +X(AssertOnHWRTrigger, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTONHWRTRIGGER, NO_PARAM_TABLE ) \ +X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NO_PARAM_TABLE ) \ +X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NO_PARAM_TABLE ) \ +X(EnableHWR, BOOL, ALWAYS, APPHNT_BLDVAR_ENABLEHWR, NO_PARAM_TABLE ) \ +X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl ) \ +X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl ) \ +/* Device host config */ \ +X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE ) \ +X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NO_PARAM_TABLE ) \ +X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NO_PARAM_TABLE ) \ +X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NO_PARAM_TABLE ) \ +X(EnableFWPoisonOnFree, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NO_PARAM_TABLE ) \ +X(GPUUnitsPowerChange, BOOL, VALIDATION, PVRSRV_APPHINT_GPUUNITSPOWERCHANGE, NO_PARAM_TABLE ) \ + +/* +******************************************************************************* + * Types used in the APPHINT_LIST_ lists must be defined here. + * New types require specific handling code to be added +******************************************************************************/ +#define APPHINT_DATA_TYPE_LIST \ +X(BOOL) \ +X(UINT64) \ +X(UINT32) \ +X(UINT32Bitfield) \ +X(UINT32List) \ +X(STRING) + +#define APPHINT_CLASS_LIST \ +X(ALWAYS) \ +X(NEVER) \ +X(DEBUG) \ +X(PDUMP) \ +X(VALIDATION) \ +X(GPUVIRT_VAL) + +/* +******************************************************************************* + Visibility control for module parameters + These bind build variables to AppHint Visibility Groups. +******************************************************************************/ +#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE +#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE +#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c) +#if defined(DEBUG) + #define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE + #define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE + #define apphint_modparam_class_DEBUG(a, b, c) +#endif +#if defined(PDUMP) + #define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE + #define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE + #define apphint_modparam_class_PDUMP(a, b, c) +#endif +#if defined(SUPPORT_VALIDATION) + #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE + #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE + #define apphint_modparam_class_VALIDATION(a, b, c) +#endif +#if defined(SUPPORT_GPUVIRT_VALIDATION) + #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE + #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE + #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) +#endif + +/* +******************************************************************************* + AppHint defaults based on other build parameters +******************************************************************************/ +#if defined(HWR_DEFAULT_ENABLED) + #define APPHNT_BLDVAR_ENABLEHWR 1 +#else + #define APPHNT_BLDVAR_ENABLEHWR 0 +#endif +#if defined(DEBUG) + #define APPHNT_BLDVAR_DEBUG 1 + #define APPHNT_BLDVAR_DBGDUMPLIMIT RGXFWIF_HWR_DEBUG_DUMP_ALL +#else + #define APPHNT_BLDVAR_DEBUG 0 + #define APPHNT_BLDVAR_DBGDUMPLIMIT 1 +#endif +#if defined(PDUMP) +#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_TRUE +#else +#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_FALSE +#endif +#if defined(DEBUG) || defined(SUPPORT_VALIDATION) +#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_TRUE +#else +#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_FALSE +#endif + +#if defined(DEBUG) + #define APPHNT_PHYSMEMTEST_ENABLE 1 +#else + #define APPHNT_PHYSMEMTEST_ENABLE 0 +#endif + +/* Data types and actions */ +typedef enum { + APPHINT_DATA_TYPE_INVALID = 0, +#define X(a) APPHINT_DATA_TYPE_ ## a, + APPHINT_DATA_TYPE_LIST +#undef X + APPHINT_DATA_TYPE_MAX +} APPHINT_DATA_TYPE; + +typedef enum { +#define X(a) APPHINT_CLASS_ ## a, + APPHINT_CLASS_LIST +#undef X + APPHINT_CLASS_MAX +} APPHINT_CLASS; + +#endif /* KM_APPHINT_DEFS_COMMON_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/os_cpu_cache.h b/drivers/mcst/gpu-imgtec/services/include/os_cpu_cache.h new file mode 100644 index 000000000000..e2fc4133b9bf --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/os_cpu_cache.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File +@Title OS and CPU d-cache maintenance mechanisms +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines for cache management which are visible internally only +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _OS_CPU_CACHE_H_ +#define _OS_CPU_CACHE_H_ + +#include "info_page_defs.h" + +#define PVRSRV_CACHE_OP_TIMELINE 0x8 /*!< Request SW_SYNC timeline notification when executed */ + +#define CACHEFLUSH_ISA_X86 0x1 /*!< x86/x64 specific UM range-based cache flush */ +#define CACHEFLUSH_ISA_ARM64 0x2 /*!< Aarch64 specific UM range-based cache flush */ +#define CACHEFLUSH_ISA_GENERIC 0x3 /*!< Other ISA's without UM range-based cache flush */ +#ifndef CACHEFLUSH_ISA_TYPE + #if defined(__i386__) || defined(__x86_64__) + #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86 + #elif defined(__arm64__) || defined(__aarch64__) + #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64 + #else + #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC + #endif +#endif + +#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64) +#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH /*!< x86/x86_64/ARM64 supports user-mode d-cache flush */ +#endif + +#endif /* _OS_CPU_CACHE_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/include/pdump.h b/drivers/mcst/gpu-imgtec/services/include/pdump.h new file mode 100644 index 000000000000..bd6fc74c6f12 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/pdump.h @@ -0,0 +1,232 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef SERVICES_PDUMP_H +#define SERVICES_PDUMP_H + +#include "img_types.h" +#include "services_km.h" + + +/* A PDump out2.txt script is made up of 3 sections from three buffers: + * * + * - Init phase buffer - holds PDump data written during driver + * initialisation, non-volatile. + * - Main phase buffer - holds PDump data written after driver init, + * volatile. + * - Deinit phase buffer - holds PDump data needed to shutdown HW/play back, + * written only during driver initialisation using + * the DEINIT flag. + * + * Volatile in this sense means that the buffer is drained and cleared when + * the pdump capture application connects and transfers the data to file. + * + * The PDump sub-system uses the driver state (init/post-init), whether + * the pdump capture application is connected or not (capture range set/unset) + * and, if pdump connected whether the frame is in the range set, to decide + * which of the 3 buffers to write the PDump data. Hence there are several + * key time periods in the lifetime of the kernel driver that is enabled + * with PDUMP=1 (flag XX labels below time line): + * + * Events:load init pdump enter exit pdump + * driver done connects range range disconnects + * |__________________|____________|__________|______________|____________|______ . . . + * State: | init phase | no capture | <- capture client connected -> | no capture + * | | | | + * |__________________|____________|______________________________________|_____ . . . + * Flag: | CT,DI | NONE,CT,PR | NONE,CT,PR | See no + * | Never NONE or PR | Never DI | Never DI | capture + * |__________________|____________|______________________________________|_____ . . . + * Write | NONE -undef | -No write | -No write | -Main buf | -No write | See no + * buffer | CT -Init buf | -Main buf | -Main buf | -Main buf | -Main buf | capture + * | PR -undef | -Init buf | -undef | -Init & Main | -undef | + * | DI -Deinit buf | -undef | -undef | -undef | -undef | + * |__________________|____________|___________|______________|___________|_____ . . . + * + * Note: The time line could repeat if the pdump capture application is + * disconnected and reconnected without unloading the driver module. + * + * The DEINIT (DI) | CONTINUOUS (CT) | PERSISTENT (PR) flags must never + * be OR'd together and given to a PDump call since undefined behaviour may + * result and produce an invalid PDump which does not play back cleanly. + * + * The decision on which flag to use comes down to which time period the + * client or server driver makes the PDump write call AND the nature/purpose + * of the data. + * + * Note: This is a simplified time line, not all conditions represented. + * + */ + +typedef IMG_UINT32 PDUMP_FLAGS_T; + +#define PDUMP_FLAGS_NONE PDUMP_NONE /* +# define PVR_COMPAT_ASSERT assert +#endif + +/* 64bit endian conversion macros */ +#if defined(__BIG_ENDIAN__) +#define RGX_INT64_TO_BE(N) (N) +#define RGX_INT64_FROM_BE(N) (N) +#define RGX_INT32_TO_BE(N) (N) +#define RGX_INT32_FROM_BE(N) (N) +#else +#define RGX_INT64_TO_BE(N) \ + ((((N) >> 56) & 0xff) \ + | (((N) >> 40) & 0xff00) \ + | (((N) >> 24) & 0xff0000) \ + | (((N) >> 8) & 0xff000000U) \ + | ((N) << 56) \ + | (((N) & 0xff00) << 40) \ + | (((N) & 0xff0000) << 24) \ + | (((N) & 0xff000000U) << 8)) +#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N) + +#define RGX_INT32_TO_BE(N) \ + ((((N) >> 24) & 0xff) \ + | (((N) >> 8) & 0xff00) \ + | ((N) << 24) \ + | ((((N) & 0xff00) << 8))) +#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N) +#endif + +/****************************************************************************** + * RGX Version packed into 64-bit (BVNC) to be used by Compatibility Check + *****************************************************************************/ + +#define RGX_BVNC_PACK_SHIFT_B 48 +#define RGX_BVNC_PACK_SHIFT_V 32 +#define RGX_BVNC_PACK_SHIFT_N 16 +#define RGX_BVNC_PACK_SHIFT_C 0 + +#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0xFFFF000000000000)) +#define RGX_BVNC_PACK_MASK_V (IMG_UINT64_C(0x0000FFFF00000000)) +#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000)) +#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF)) + +#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B)) +#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_V) >> RGX_BVNC_PACK_SHIFT_V)) +#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N)) +#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C)) + +#define RGX_BVNC_EQUAL(L,R,all,version,bvnc) do { \ + (bvnc) = IMG_FALSE; \ + (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion); \ + if (version) \ + { \ + (bvnc) = ((L).ui64BVNC == (R).ui64BVNC); \ + } \ + (all) = (version) && (bvnc); \ + } while (0) + + +/**************************************************************************//** + * Utility function for packing BVNC + *****************************************************************************/ +static inline IMG_UINT64 rgx_bvnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C) +{ + /* + * Test for input B, V, N and C exceeding max bit width. + */ + PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0); + PVR_COMPAT_ASSERT((ui32V & (~(RGX_BVNC_PACK_MASK_V >> RGX_BVNC_PACK_SHIFT_V))) == 0); + PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0); + PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0); + + return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) | + ((IMG_UINT64)ui32V << RGX_BVNC_PACK_SHIFT_V) | + ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) | + ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C)); +} + + +#endif /* RGX_COMPAT_BVNC_H */ + +/****************************************************************************** + End of file (rgx_compat_bvnc.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/rgx_fw_info.h b/drivers/mcst/gpu-imgtec/services/include/rgx_fw_info.h new file mode 100644 index 000000000000..2f012d59ba5a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rgx_fw_info.h @@ -0,0 +1,135 @@ +/*************************************************************************/ /*! +@File +@Title FW image information + +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Utility functions used internally for HWPerf data retrieval +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FW_INFO_H) +#define RGX_FW_INFO_H + +#include "img_types.h" +#include "rgx_common.h" + +/* + * Firmware binary block unit in bytes. + * Raw data stored in FW binary will be aligned to this size. + */ +#define FW_BLOCK_SIZE 4096L + +typedef enum +{ + META_CODE = 0, + META_PRIVATE_DATA, + META_COREMEM_CODE, + META_COREMEM_DATA, + MIPS_CODE, + MIPS_EXCEPTIONS_CODE, + MIPS_BOOT_CODE, + MIPS_PRIVATE_DATA, + MIPS_BOOT_DATA, + MIPS_STACK, + RISCV_UNCACHED_CODE, + RISCV_CACHED_CODE, + RISCV_PRIVATE_DATA, + RISCV_COREMEM_CODE, + RISCV_COREMEM_DATA, +} RGX_FW_SECTION_ID; + +typedef enum +{ + NONE = 0, + FW_CODE, + FW_DATA, + FW_COREMEM_CODE, + FW_COREMEM_DATA +} RGX_FW_SECTION_TYPE; + + +/* + * FW binary format with FW info attached: + * + * Contents Offset + * +-----------------+ + * | | 0 + * | | + * | Original binary | + * | file | + * | (.ldr/.elf) | + * | | + * | | + * +-----------------+ + * | FW info header | FILE_SIZE - 4K + * +-----------------+ + * | | + * | FW layout table | + * | | + * +-----------------+ + * FILE_SIZE + */ + +#define FW_INFO_VERSION (1) + +typedef struct +{ + IMG_UINT32 ui32InfoVersion; /* FW info version */ + IMG_UINT32 ui32HeaderLen; /* Header length */ + IMG_UINT32 ui32LayoutEntryNum; /* Number of entries in the layout table */ + IMG_UINT32 ui32LayoutEntrySize; /* Size of an entry in the layout table */ + IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */ + IMG_UINT32 ui32FwPageSize; /* Page size of processor on which firmware executes */ + IMG_UINT32 ui32Flags; /* Compatibility flags */ +} RGX_FW_INFO_HEADER; + +typedef struct +{ + RGX_FW_SECTION_ID eId; + RGX_FW_SECTION_TYPE eType; + IMG_UINT32 ui32BaseAddr; + IMG_UINT32 ui32MaxSize; + IMG_UINT32 ui32AllocSize; + IMG_UINT32 ui32AllocOffset; +} RGX_FW_LAYOUT_ENTRY; + +#endif /* RGX_FW_INFO_H */ + +/****************************************************************************** + End of file (rgx_fw_info.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/rgx_fwif_sf.h b/drivers/mcst/gpu-imgtec/services/include/rgx_fwif_sf.h new file mode 100644 index 000000000000..c0727c28747e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rgx_fwif_sf.h @@ -0,0 +1,830 @@ +/*************************************************************************/ /*! +@File rgx_fwif_sf.h +@Title RGX firmware interface string format specifiers +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the rgx firmware logging messages. The following + list are the messages the firmware prints. Changing anything + but the first column or spelling mistakes in the strings will + break compatibility with log files created with older/newer + firmware versions. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_FWIF_SF_H +#define RGX_FWIF_SF_H + +/****************************************************************************** + * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you + * WILL BREAK fw tracing message compatibility with previous + * fw versions. Only add new ones, if so required. + *****************************************************************************/ +/* Available log groups */ +#define RGXFW_LOG_SFGROUPLIST \ + X(RGXFW_GROUP_NULL,NULL) \ + X(RGXFW_GROUP_MAIN,MAIN) \ + X(RGXFW_GROUP_CLEANUP,CLEANUP) \ + X(RGXFW_GROUP_CSW,CSW) \ + X(RGXFW_GROUP_PM, PM) \ + X(RGXFW_GROUP_RTD,RTD) \ + X(RGXFW_GROUP_SPM,SPM) \ + X(RGXFW_GROUP_MTS,MTS) \ + X(RGXFW_GROUP_BIF,BIF) \ + X(RGXFW_GROUP_MISC,MISC) \ + X(RGXFW_GROUP_POW,POW) \ + X(RGXFW_GROUP_HWR,HWR) \ + X(RGXFW_GROUP_HWP,HWP) \ + X(RGXFW_GROUP_RPM,RPM) \ + X(RGXFW_GROUP_DMA,DMA) \ + X(RGXFW_GROUP_DBG,DBG) + +enum RGXFW_LOG_SFGROUPS { +#define X(A,B) A, + RGXFW_LOG_SFGROUPLIST +#undef X +}; + +#define IMG_SF_STRING_MAX_SIZE 256U + +typedef struct { + IMG_UINT32 ui32Id; + IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE]; +} RGXFW_STID_FMT; /* pair of string format id and string formats */ + +typedef struct { + IMG_UINT32 ui32Id; + const IMG_CHAR *psName; +} RGXKM_STID_FMT; /* pair of string format id and string formats */ + +/* Table of String Format specifiers, the group they belong and the number of + * arguments each expects. Xmacro styled macros are used to generate what is + * needed without requiring hand editing. + * + * id : id within a group + * gid : group id + * Sym name : name of enumerations used to identify message strings + * String : Actual string + * #args : number of arguments the string format requires + */ +#define RGXFW_LOG_SFIDLIST \ +/*id, gid, id name, string, # arguments */ \ +X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \ +\ +X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \ +X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \ +X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \ +X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \ +X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \ +X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \ +X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \ +X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \ +X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \ +X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \ +X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ +X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \ +X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \ +X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \ +X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ +X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \ +X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ +X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \ +X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \ +X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \ +X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ +X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \ +X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ +X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ +X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \ +X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \ +X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \ +X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \ +X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \ +X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \ +X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \ +X( 34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state", 0) \ +X( 35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers", 0) \ +X( 36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u", 1) \ +X( 37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets", 1) \ +X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \ +X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \ +X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \ +X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \ +X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \ +X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ +X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \ +X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \ +X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \ +X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \ +X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \ +X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \ +X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \ +X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \ +X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \ +X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \ +X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \ +X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \ +X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \ +X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \ +X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \ +X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \ +X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \ +X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \ +X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \ +X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \ +X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \ +X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ +X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \ +X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \ +X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \ +X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \ +X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \ +X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \ +X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \ +X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \ +X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \ +X( 77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \ +X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ +X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ +X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \ +X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START_DEPRECATED, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \ +X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_DEPRECATED, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ +X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \ +X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \ +X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ +X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ +X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \ +X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ +X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ +X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ +X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \ +X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \ +X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \ +X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ +X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \ +X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \ +X( 97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ +X( 98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled", 0) \ +X( 99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)", 3) \ +X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)", 3) \ +X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \ +X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \ +X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \ +X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ +X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \ +X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)", 4) \ +X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \ +X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \ +X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \ +X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA", 0) \ +X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x", 2) \ +X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \ +X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled (Roff = %u, Woff = %u)", 2) \ +X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \ +X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u", 3) \ +X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \ +X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ +X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ +X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ +X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \ +X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ +X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \ +X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \ +X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \ +X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \ +X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ +X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ +X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \ +X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET, "HCS changed to %d ms", 1) \ +X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \ +X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d", 2) \ +X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF, "Isolation grouping is disabled", 0) \ +X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF, "Isolation group configured with a priority threshold of %d", 1) \ +X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \ +X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \ +X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ +X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ +X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \ +X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \ +X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \ +X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \ +X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \ +X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \ +X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \ +X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \ +X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \ +X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \ +X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \ +X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \ +X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_WOUT_CHKPT, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \ +X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \ +X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \ +X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \ +X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \ +X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \ +X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \ +X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \ +X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \ +X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \ +X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \ +X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \ +X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \ +X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \ +X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \ +X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \ +X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \ +X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ +X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ +X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \ +X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \ +X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \ +X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \ +X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \ +X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \ +X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET, "PHR mode %d triggered a reset", 1) \ +X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \ +X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FIXME_SERIES8, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \ +X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \ +X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \ +X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context [0x%08x]: Entry mask 0x%08x%08x.", 3) \ +X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \ +X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \ +X(188, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \ +X(189, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN68497_BLIT, "Insert BRN68497 WA blit after TDM Context store.", 0) \ +X(190, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PENDING_UFO_UPDATE_START, "UFO Updates for previously finished FWCtx 0x%08.8x", 1) \ +X(191, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, %u active render targets", 1) \ +X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \ +X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \ +X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT, "ECC fault GPU=0x%08x FW=0x%08x", 2) \ +X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \ +X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \ +X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%d has locked up (see HWR logs for more info)", 1) \ +\ +X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \ +X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \ +X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \ +X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \ +X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \ +X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \ +X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \ +X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \ +X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \ +X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \ +X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \ +X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \ +X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \ +X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \ +X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \ +X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \ +X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \ +X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \ +X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \ +\ +X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \ +X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \ +X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \ +X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \ +X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \ +X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \ +X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \ +X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \ +X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \ +X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \ +X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \ +X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \ +X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \ +X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \ +X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \ +X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \ +X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \ +X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \ +\ +X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \ +X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ +X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%x", 1) \ +X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete", 0) \ +X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start", 0) \ +X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset", 0) \ +X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8x needs resume", 1) \ +X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8x resume", 1) \ +X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete", 0) \ +X( 10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x", 3) \ +X( 11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start", 0) \ +X( 12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8x resume", 1) \ +X( 13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8x needs resume", 1) \ +X( 14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ +X( 15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \ +X( 16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \ +X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \ +X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \ +X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \ +X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \ +X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \ +X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \ +X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ +X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \ +X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \ +X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \ +X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \ +X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \ +X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \ +X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \ +X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \ +X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \ +X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \ +X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \ +X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \ +X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \ +X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \ +X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \ +X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \ +X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \ +X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \ +X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \ +X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \ +X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE, "3D context resume IPP state: 0x%08.8x%08x", 2) \ +X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \ +X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \ +X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \ +\ +X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \ +X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \ +X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC, "Alloc PC reg %d", 1) \ +X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_GRAB, "Grab reg %d refcount now %d", 2) \ +X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_UNGRAB, "Ungrab reg %d refcount now %d", 2) \ +X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ +X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST, "Trust enabled:%d, for BIFreq=%d", 2) \ +X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \ +X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \ +X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x", 3) \ +X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \ +X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \ +X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \ +X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \ +X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ +X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \ +\ +X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \ +X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \ +X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \ +X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \ +X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \ +X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \ +X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \ +X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \ +X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \ +X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \ +X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \ +X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \ +X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \ +X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \ +X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \ +X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \ +X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \ +X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \ +X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \ +X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \ +X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \ +X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE, "TRP state: %d", 1) \ +X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \ +X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \ +X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE, "SW TRP failure: %d", 1) \ +\ +X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \ +X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \ +X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ +X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ +X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ +X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \ +X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ +X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \ +X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \ +X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\ +X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \ +X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \ +X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ +X( 20, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_FAILED, "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ +X( 21, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_VOLCANIC, "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 22, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_VOLCANIC, "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 23, RGXFW_GROUP_PM, RGXFW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)", 5) \ +X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \ +X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ +X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ +X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \ +X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \ +X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \ +\ +X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ +X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ +X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \ +X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \ +X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \ +X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \ +X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \ +X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \ +X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \ +X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \ +X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \ +X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \ +X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \ +X( 14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT_DEPRECATED, "RPM freelist cleanup [0x%08x] requires abort to proceed.", 1) \ +X( 15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT_DEPRECATED, "RPM page table base register: 0x%08x.0x%08x", 2) \ +X( 16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT_DEPRECATED, "Issuing RPM abort.", 0) \ +X( 17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL_DEPRECATED, "RPM OOM received but toggle bits indicate free pages available", 0) \ +X( 18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT_DEPRECATED, "RPM hardware timeout. Unable to process OOM event.", 0) \ +X( 19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD_DEPRECATED_DEPRECATED, "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ +X( 20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD_DEPRECATED, "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ +X( 21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE_DEPRECATED, "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ +X( 22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ +\ +X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \ +X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \ +X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \ +X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \ +X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \ +X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ +X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \ +X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \ +X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \ +X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ +X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ +X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \ +X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ +X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \ +X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \ +X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \ +X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \ +X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \ +X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \ +X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \ +X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \ +X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \ +X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \ +X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \ +X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \ +X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \ +X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \ +X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \ +X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \ +X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \ +X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \ +X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \ +X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \ +X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \ +X( 39, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RENDERSTATE_RESET, "RTData 0x%08x Render State Buffer 0x%08x%08x will be reset.", 3) \ +\ +X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render", 0) \ +X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render", 0) \ +X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL_DEPRECATED, "3D MemFree: Local FL 0x%08x", 1) \ +X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU FL 0x%08x", 1) \ +X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \ +X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \ +X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \ +X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \ +X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \ +X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \ +X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \ +X( 12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG_DEPRECATED, "SPM Owner = 3D-BG", 0) \ +X( 13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ_DEPRECATED, "SPM Owner = 3D-IRQ", 0) \ +X( 14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE_DEPRECATED, "SPM Owner = NONE", 0) \ +X( 15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG_DEPRECATED, "SPM Owner = TA-BG", 0) \ +X( 16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ_DEPRECATED, "SPM Owner = TA-IRQ", 0) \ +X( 17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x", 2) \ +X( 18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x", 2) \ +X( 19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x", 2) \ +X( 20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x", 2) \ +X( 21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided", 0) \ +X( 22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)", 1) \ +X( 23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)", 1) \ +X( 24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)", 1) \ +X( 25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)", 1) \ +X( 26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)", 1) \ +X( 27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)", 1) \ +X( 28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ +X( 29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ +X( 30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)", 1) \ +X( 31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)", 1) \ +X( 32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none", 0) \ +X( 33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked", 0) \ +X( 34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow", 0) \ +X( 35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW", 0) \ +X( 36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", 0) \ +X( 37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \ +X( 38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \ +X( 39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \ +X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \ +X( 41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \ +X( 42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \ +X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \ +X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \ +X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \ +X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \ +X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \ +X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \ +X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \ +X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \ +X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ +X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ +X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \ +X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \ +X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ +X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \ +\ +X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \ +X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \ +X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \ +X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ +X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ +X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \ +X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \ +X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \ +X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \ +X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \ +X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \ +X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ +X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \ +X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \ +X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \ +X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \ +X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \ +X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \ +X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \ +X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \ +X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \ +X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \ +X( 28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x", 2) \ +X( 29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x", 2) \ +X( 30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = %u", 1) \ +X( 31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = %u percent", 1) \ +X( 32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = %u.%u", 2) \ +X( 33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x", 1) \ +X( 34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x", 2) \ +X( 35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x", 2) \ +X( 36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x", 1) \ +X( 37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.", 0) \ +X( 38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE_DEPRECATED, "Proactive DVFS: Invalid node passed to function.", 0) \ +X( 39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u", 1) \ +X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \ +X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \ +X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \ +X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \ +X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \ +X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \ +X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \ +X( 48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \ +X( 49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \ +X( 50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \ +X( 51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.", 0) \ +X( 52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discarding invalid power request: type 0x%x, DM %u", 2) \ +X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ +X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ +X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \ +X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \ +X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \ +X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \ +X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \ +X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \ +X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \ +X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \ +X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \ +X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \ +X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \ +X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \ +X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \ +X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \ +\ +X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ +X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ +X( 3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \ +X( 4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \ +X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \ +X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ +X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \ +X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \ +X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ +X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ +X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \ +X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \ +X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \ +X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \ +X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \ +X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \ +X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \ +X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \ +X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \ +X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \ +X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \ +X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \ +X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \ +X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \ +X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \ +X( 26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x", 2) \ +X( 27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction", 0) \ +X( 28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command", 2) \ +X( 29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command", 2) \ +X( 30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup", 2) \ +X( 31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up", 0) \ +X( 32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR", 1) \ +X( 33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \ +X( 34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \ +X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out", 1) \ +X( 36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \ +X( 37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \ +X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \ +X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \ +X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \ +X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \ +X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \ +X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \ +X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \ +X( 45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)", 2) \ +X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \ +X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \ +X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \ +X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \ +X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \ +X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \ +X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \ +X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \ +X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \ +X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \ +X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ +X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ +X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \ +X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \ +X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \ +X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \ +X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \ +X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \ +X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \ +X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \ +X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \ +X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \ +X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \ +X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \ +X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \ +X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \ +X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \ +X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \ +X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \ +X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \ +X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%d has locked up", 1) \ +\ +X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \ +X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \ +X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW", 1) \ +X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x", 2) \ +X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x", 1) \ +X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x", 1) \ +X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping", 1) \ +X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x", 1) \ +X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x", 1) \ +X( 10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x", 1) \ +X( 11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x", 2) \ +X( 12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x", 1) \ +X( 13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x", 2) \ +X( 14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver", 1) \ +X( 15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver", 0) \ +X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x value: 0x%x", 2) \ +X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:0x%x", 2) \ +X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \ +X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \ +X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \ +X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \ +X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \ +X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \ +X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \ +X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ +X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ +X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \ +X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \ +X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \ +X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \ +X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \ +X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \ +X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \ +X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \ +\ +X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST_DEPRECATED, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u", 5) \ +X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u", 4) \ +X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x", 1) \ +X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x completion...", 1) \ +X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \ +X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \ +X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \ +X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \ +X( 9, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \ +\ +X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \ +X( 2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x", 1) \ +X( 3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x", 2) \ +X( 4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x", 3) \ +X( 5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x", 4) \ +X( 6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5) \ +X( 7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 6) \ +X( 8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 7) \ +X( 9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 8) \ +X( 10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d", 1) \ +X( 11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d", 2) \ +X( 12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d", 3) \ +X( 13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d", 4) \ +X( 14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d", 5) \ +X( 15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d", 6) \ +X( 16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d", 7) \ +X( 17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d", 8) \ +X( 18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u", 1) \ +X( 19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u", 2) \ +X( 20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u", 3) \ +X( 21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u", 4) \ +X( 22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u", 5) \ +X( 23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \ +X( 24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \ +X( 25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \ +\ +X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string", 15) + + +/* The symbolic names found in the table above are assigned an ui32 value of + * the following format: + * 31 30 28 27 20 19 16 15 12 11 0 bits + * - --- ---- ---- ---- ---- ---- ---- ---- + * 0-11: id number + * 12-15: group id number + * 16-19: number of parameters + * 20-27: unused + * 28-30: active: identify SF packet, otherwise regular int32 + * 31: reserved for signed/unsigned compatibility + * + * The following macro assigns those values to the enum generated SF ids list. + */ +#define RGXFW_LOG_IDMARKER (0x70000000U) +#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER + +#define RGXFW_LOG_IDMASK (0xFFF00000) +#define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER) + +typedef enum { +#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e), + RGXFW_LOG_SFIDLIST +#undef X +} RGXFW_LOG_SFids; + +/* Return the group id that the given (enum generated) id belongs to */ +#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU) +/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */ +#define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU) + +#endif /* RGX_FWIF_SF_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/rgx_pdump_panics.h b/drivers/mcst/gpu-imgtec/services/include/rgx_pdump_panics.h new file mode 100644 index 000000000000..fce2b3efab69 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rgx_pdump_panics.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File +@Title RGX PDump panic definitions header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX PDump panic definitions header +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_PDUMP_PANICS_H_) +#define RGX_PDUMP_PANICS_H_ + +/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of an + * RGX PDump panic in a PDump script. */ +typedef enum +{ + RGX_PDUMP_PANIC_UNDEFINED = 0, + + /* These panics occur when test parameters and driver configuration + * enable features that require the firmware and host driver to + * communicate. Such features are not supported with off-line playback. + */ + RGX_PDUMP_PANIC_ZSBUFFER_BACKING = 101, /*!< Requests ZSBuffer to be backed with physical pages */ + RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING = 102, /*!< Requests ZSBuffer to be unbacked */ + RGX_PDUMP_PANIC_FREELIST_GROW = 103, /*!< Requests an on-demand freelist grow/shrink */ + RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */ + RGX_PDUMP_PANIC_SPARSEMEM_SWAP = 105, /*!< Requests sparse remap memory swap feature */ +} RGX_PDUMP_PANIC; + +#endif /* RGX_PDUMP_PANICS_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/include/rgx_tq_shared.h b/drivers/mcst/gpu-imgtec/services/include/rgx_tq_shared.h new file mode 100644 index 000000000000..dc10b6eecc91 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rgx_tq_shared.h @@ -0,0 +1,63 @@ +/*************************************************************************/ /*! +@File +@Title RGX transfer queue shared +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Shared definitions between client and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_TQ_SHARED_H +#define RGX_TQ_SHARED_H + +#define TQ_MAX_PREPARES_PER_SUBMIT 16U + +#define TQ_PREP_FLAGS_COMMAND_3D 0x0U +#define TQ_PREP_FLAGS_COMMAND_2D 0x1U +#define TQ_PREP_FLAGS_COMMAND_MASK (0xfU) +#define TQ_PREP_FLAGS_COMMAND_SHIFT 0 +#define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1U << 4) +#define TQ_PREP_FLAGS_START (1U << 5) +#define TQ_PREP_FLAGS_END (1U << 6) + +#define TQ_PREP_FLAGS_COMMAND_SET(m) \ + ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK) + +#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \ + (((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT) == TQ_PREP_FLAGS_COMMAND_##n) + +#endif /* RGX_TQ_SHARED_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/rgxfw_log_helper.h b/drivers/mcst/gpu-imgtec/services/include/rgxfw_log_helper.h new file mode 100644 index 000000000000..2ac666d6ee03 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rgxfw_log_helper.h @@ -0,0 +1,79 @@ +/*************************************************************************/ /*! +@File rgxfw_log_helper.h +@Title Firmware TBI logging helper function +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Platform Generic +@Description This file contains some helper code to make TBI logging possible + Specifically, it uses the SFIDLIST xmacro to trace ids back to + the original strings. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGXFW_LOG_HELPER_H +#define RGXFW_LOG_HELPER_H + +#include "rgx_fwif_sf.h" + +static IMG_CHAR *const groups[]= { +#define X(A,B) #B, + RGXFW_LOG_SFGROUPLIST +#undef X +}; + +/* idToStringID : Search SFs tuples {id,string} for a matching id. + * return index to array if found or RGXFW_SF_LAST if none found. + * bsearch could be used as ids are in increasing order. */ +#if defined(RGX_FIRMWARE) +static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs) +#else +static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXKM_STID_FMT *const psSFs) +#endif +{ + IMG_UINT32 i = 0, ui32Id = (IMG_UINT32)RGXFW_SF_LAST; + + for ( i = 0 ; psSFs[i].ui32Id != (IMG_UINT32)RGXFW_SF_LAST ; i++) + { + if ( ui32CheckData == psSFs[i].ui32Id ) + { + ui32Id = i; + break; + } + } + return ui32Id; +} + +#endif /* RGXFW_LOG_HELPER_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/rgxtransfer_shader.h b/drivers/mcst/gpu-imgtec/services/include/rgxtransfer_shader.h new file mode 100644 index 000000000000..979f85bd4414 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rgxtransfer_shader.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File rgxtransfer_shader.h +@Title TQ binary shader file info +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header holds info about TQ binary shader file generated + by the TQ shader factory. This header is need by shader factory + when generating the file; by services KM when reading and + loading the file into memory; and by services UM when + constructing blits using the shaders. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXSHADERHEADER_H) +#define RGXSHADERHEADER_H + +typedef struct _RGX_SHADER_HEADER_ +{ + IMG_UINT32 ui32Version; + IMG_UINT32 ui32NumFragment; + IMG_UINT32 ui32SizeFragment; + IMG_UINT32 ui32NumTDMFragment; + IMG_UINT32 ui32SizeTDMFragment; + IMG_UINT32 ui32SizeClientMem; +} RGX_SHADER_HEADER; + +#endif /* RGXSHADERHEADER_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/rogue/km_apphint_defs.h b/drivers/mcst/gpu-imgtec/services/include/rogue/km_apphint_defs.h new file mode 100644 index 000000000000..2c1ee9b33d61 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rogue/km_apphint_defs.h @@ -0,0 +1,144 @@ +/*************************************************************************/ /*! +@File +@Title Services AppHint definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "km_apphint_defs_common.h" + +#ifndef KM_APPHINT_DEFS_H +#define KM_APPHINT_DEFS_H + +/* NB: The 'DEVICE' AppHints must be last in this list as they will be + * duplicated in the case of a driver supporting multiple devices + */ +#define APPHINT_LIST_ALL \ + APPHINT_LIST_BUILDVAR_COMMON \ + APPHINT_LIST_BUILDVAR \ + APPHINT_LIST_MODPARAM_COMMON \ + APPHINT_LIST_MODPARAM \ + APPHINT_LIST_DEBUGFS_COMMON \ + APPHINT_LIST_DEBUGFS \ + APPHINT_LIST_DEBUGFS_DEVICE_COMMON \ + APPHINT_LIST_DEBUGFS_DEVICE + + +/* +******************************************************************************* + Build variables (rogue-specific) + All of these should be configurable only through the 'default' value +******************************************************************************/ +#define APPHINT_LIST_BUILDVAR + +/* +******************************************************************************* + Module parameters (rogue-specific) +******************************************************************************/ +#define APPHINT_LIST_MODPARAM \ +/* name, type, class, default, helper, */ \ +X(EnableCDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE, NO_PARAM_TABLE ) \ +X(VDMContextSwitchMode, UINT32, VALIDATION, PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE, NO_PARAM_TABLE ) \ +\ +X(HWPerfDisableCustomCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE ) \ + +/* +******************************************************************************* + Debugfs parameters (rogue-specific) - driver configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGFS \ +/* name, type, class, default, helper, */ \ + +/* +******************************************************************************* + Debugfs parameters (rogue-specific) - device configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGFS_DEVICE \ +/* name, type, class, default, helper, */ \ + +/* +******************************************************************************* + + Table generated enums + +******************************************************************************/ +/* Unique ID for all AppHints */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_ID_ ## a, + APPHINT_LIST_ALL +#undef X + APPHINT_ID_MAX +} APPHINT_ID; + +/* ID for build variable Apphints - used for build variable only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a, + APPHINT_LIST_BUILDVAR_COMMON + APPHINT_LIST_BUILDVAR +#undef X + APPHINT_BUILDVAR_ID_MAX +} APPHINT_BUILDVAR_ID; + +/* ID for Modparam Apphints - used for modparam only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a, + APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM +#undef X + APPHINT_MODPARAM_ID_MAX +} APPHINT_MODPARAM_ID; + +/* ID for Debugfs Apphints - used for debugfs only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_DEBUGFS_ID_ ## a, + APPHINT_LIST_DEBUGFS_COMMON + APPHINT_LIST_DEBUGFS +#undef X + APPHINT_DEBUGFS_ID_MAX +} APPHINT_DEBUGFS_ID; + +/* ID for Debugfs Device Apphints - used for debugfs device only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_DEBUGFS_DEVICE_ID_ ## a, + APPHINT_LIST_DEBUGFS_DEVICE_COMMON + APPHINT_LIST_DEBUGFS_DEVICE +#undef X + APPHINT_DEBUGFS_DEVICE_ID_MAX +} APPHINT_DEBUGFS_DEVICE_ID; + +#endif /* KM_APPHINT_DEFS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_hwperf.h b/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_hwperf.h new file mode 100644 index 000000000000..b3e1cd2a26d1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_hwperf.h @@ -0,0 +1,243 @@ +/*************************************************************************/ /*! +@File rgx_fwif_hwperf.h +@Title RGX HWPerf support +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Shared header between RGX firmware and Init process +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_FWIF_HWPERF_H +#define RGX_FWIF_HWPERF_H + +#include "rgx_fwif_shared.h" +#include "rgx_hwperf.h" +#include "rgxdefs_km.h" + + +/*****************************************************************************/ + +/* Structure to hold a block's parameters for passing between the BG context + * and the IRQ context when applying a configuration request. */ +typedef struct +{ + IMG_BOOL bValid; + IMG_BOOL bEnabled; + IMG_UINT32 eBlockID; + IMG_UINT32 uiCounterMask; + IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_COUNTERS_MAX]; +} RGXFWIF_HWPERF_CTL_BLK; + +/* Structure used to hold the configuration of the non-mux counters blocks */ +typedef struct +{ + IMG_UINT32 ui32NumSelectedCounters; + IMG_UINT32 aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS]; +} RGXFW_HWPERF_SELECT; + +/* Structure to hold the whole configuration request details for all blocks + * The block masks and counts are used to optimise reading of this data. */ +typedef struct +{ + IMG_UINT32 ui32HWPerfCtlFlags; + + IMG_UINT32 ui32SelectedCountersBlockMask; + RGXFW_HWPERF_SELECT RGXFW_ALIGN SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS]; + + IMG_UINT32 ui32EnabledBlksCount; + RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_DEFINED_BLKS]; +} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL; + +/* NOTE: The switch statement in this function must be kept in alignment with + * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may + * result if not. + * The function provides a hash lookup to get a handle on the global store for + * a block's configuration store from it's block ID. + */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(rgxfw_hwperf_get_block_ctl) +#endif +static INLINE RGXFWIF_HWPERF_CTL_BLK* rgxfw_hwperf_get_block_ctl( + RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData) +{ + IMG_UINT32 ui32Idx; + + /* Hash the block ID into a control configuration array index */ + switch (eBlockID) + { + case RGX_CNTBLK_ID_TA: + case RGX_CNTBLK_ID_RASTER: + case RGX_CNTBLK_ID_HUB: + case RGX_CNTBLK_ID_TORNADO: + case RGX_CNTBLK_ID_JONES: + case RGX_CNTBLK_ID_BF: + case RGX_CNTBLK_ID_BT: + case RGX_CNTBLK_ID_RT: + case RGX_CNTBLK_ID_SH: + { + ui32Idx = eBlockID; + break; + } + case RGX_CNTBLK_ID_TPU_MCU0: + case RGX_CNTBLK_ID_TPU_MCU1: + case RGX_CNTBLK_ID_TPU_MCU2: + case RGX_CNTBLK_ID_TPU_MCU3: + case RGX_CNTBLK_ID_TPU_MCU4: + case RGX_CNTBLK_ID_TPU_MCU5: + case RGX_CNTBLK_ID_TPU_MCU6: + case RGX_CNTBLK_ID_TPU_MCU7: + { + ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + + (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); + break; + } + case RGX_CNTBLK_ID_USC0: + case RGX_CNTBLK_ID_USC1: + case RGX_CNTBLK_ID_USC2: + case RGX_CNTBLK_ID_USC3: + case RGX_CNTBLK_ID_USC4: + case RGX_CNTBLK_ID_USC5: + case RGX_CNTBLK_ID_USC6: + case RGX_CNTBLK_ID_USC7: + case RGX_CNTBLK_ID_USC8: + case RGX_CNTBLK_ID_USC9: + case RGX_CNTBLK_ID_USC10: + case RGX_CNTBLK_ID_USC11: + case RGX_CNTBLK_ID_USC12: + case RGX_CNTBLK_ID_USC13: + case RGX_CNTBLK_ID_USC14: + case RGX_CNTBLK_ID_USC15: + { + ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + + RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + + (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); + break; + } + case RGX_CNTBLK_ID_TEXAS0: + case RGX_CNTBLK_ID_TEXAS1: + case RGX_CNTBLK_ID_TEXAS2: + case RGX_CNTBLK_ID_TEXAS3: + case RGX_CNTBLK_ID_TEXAS4: + case RGX_CNTBLK_ID_TEXAS5: + case RGX_CNTBLK_ID_TEXAS6: + case RGX_CNTBLK_ID_TEXAS7: + { + ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + + RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + + RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + + (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); + break; + } + case RGX_CNTBLK_ID_RASTER0: + case RGX_CNTBLK_ID_RASTER1: + case RGX_CNTBLK_ID_RASTER2: + case RGX_CNTBLK_ID_RASTER3: + { + ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + + RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + + RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + + RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + + (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); + break; + } + case RGX_CNTBLK_ID_BLACKPEARL0: + case RGX_CNTBLK_ID_BLACKPEARL1: + case RGX_CNTBLK_ID_BLACKPEARL2: + case RGX_CNTBLK_ID_BLACKPEARL3: + { + ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + + RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + + RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + + RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + + RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + + (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); + break; + } + case RGX_CNTBLK_ID_PBE0: + case RGX_CNTBLK_ID_PBE1: + case RGX_CNTBLK_ID_PBE2: + case RGX_CNTBLK_ID_PBE3: + case RGX_CNTBLK_ID_PBE4: + case RGX_CNTBLK_ID_PBE5: + case RGX_CNTBLK_ID_PBE6: + case RGX_CNTBLK_ID_PBE7: + case RGX_CNTBLK_ID_PBE8: + case RGX_CNTBLK_ID_PBE9: + case RGX_CNTBLK_ID_PBE10: + case RGX_CNTBLK_ID_PBE11: + case RGX_CNTBLK_ID_PBE12: + case RGX_CNTBLK_ID_PBE13: + case RGX_CNTBLK_ID_PBE14: + case RGX_CNTBLK_ID_PBE15: + { + ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + + RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + + RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + + RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + + RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + + RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) + + (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); + break; + } + case RGX_CNTBLK_ID_BX_TU0: + case RGX_CNTBLK_ID_BX_TU1: + case RGX_CNTBLK_ID_BX_TU2: + case RGX_CNTBLK_ID_BX_TU3: + { + ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + + RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + + RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + + RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + + RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + + RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) + + RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) + + (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); + break; + } + default: + { + ui32Idx = RGX_HWPERF_MAX_DEFINED_BLKS; + break; + } + } + if (ui32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS) + { + return NULL; + } + return &psHWPerfInitData->sBlkCfg[ui32Idx]; +} + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_km.h b/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_km.h new file mode 100644 index 000000000000..750966d188d7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_km.h @@ -0,0 +1,2029 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware interface structures used by pvrsrvkm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware interface structures used by pvrsrvkm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_KM_H) +#define RGX_FWIF_KM_H + +#include "img_types.h" +#include "rgx_fwif_shared.h" +#include "rgxdefs_km.h" +#include "dllist.h" +#include "rgx_hwperf.h" + + +/*************************************************************************/ /*! + Logging type +*/ /**************************************************************************/ +#define RGXFWIF_LOG_TYPE_NONE 0x00000000U +#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U +#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U +#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U +#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U +#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U +#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U +#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U +#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U +#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U +#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U +#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U +#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U +#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U +#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U +#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U +#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U +#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU +#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU + +/* String used in pvrdebug -h output */ +#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" + +/* Table entry to map log group strings to log type value */ +typedef struct { + const IMG_CHAR* pszLogGroupName; + IMG_UINT32 ui32LogGroupType; +} RGXFWIF_LOG_GROUP_MAP_ENTRY; + +/* + Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup + table where needed. Keep log group names short, no more than 20 chars. +*/ +#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ + { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ + { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ + { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ + { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ + { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ + { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ + { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ + { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ + { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ + { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ + { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ + { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ + { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ + { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ + { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } + + +/* Used in print statements to display log group state, one %s per group defined */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" + +/* Used in a print statement to display log group state, one per group */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) ?("misc ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :("")) + + +/************************************************************************ +* RGX FW signature checks +************************************************************************/ +#define RGXFW_SIG_BUFFER_SIZE_MIN (8192) + +/*! + ****************************************************************************** + * Trace Buffer + *****************************************************************************/ + +/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ +#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U +#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) +#define RGXFW_THREAD_NUM 2U +#else +#define RGXFW_THREAD_NUM 1U +#endif + +#define RGXFW_POLL_TYPE_SET 0x80000000U + +typedef struct +{ + IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_UINT32 ui32LineNum; +} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; + +typedef struct +{ + IMG_UINT32 ui32TracePointer; + +#if defined(RGX_FIRMWARE) + IMG_UINT32 *pui32RGXFWIfTraceBuffer; /* To be used by firmware for writing into trace buffer */ +#else + RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; +#endif + IMG_PUINT32 pui32TraceBuffer; /* To be used by host when reading from trace buffer */ + + RGXFWIF_FILE_INFO_BUF sAssertBuf; +} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; + +#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; + IMG_UINT32 RGXFW_ALIGN ui32Data; + IMG_UINT32 ui32Reserved; + RGXFWIF_FILE_INFO_BUF sFaultBuf; +} UNCACHED_ALIGN RGX_FWFAULTINFO; + + +#define RGXFWIF_POW_STATES \ + X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ + X(RGXFWIF_POW_ON) /* running HW commands */ \ + X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ + X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ + +typedef enum +{ +#define X(NAME) NAME, + RGXFWIF_POW_STATES +#undef X +} RGXFWIF_POW_STATE; + +/* Firmware HWR states */ +#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ +#define RGXFWIF_HWR_ANALYSIS_DONE (IMG_UINT32_C(0x1) << 2U) /*!< The analysis of a GPU lockup has been performed */ +#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ +#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ +#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ +#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ +#define RGXFWIF_HWR_RESTART_REQUESTED (IMG_UINT32_C(0x1) << 7U) /*!< The FW has requested the host to restart it */ + +#define RGXFWIF_PHR_STATE_SHIFT (8U) +#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ +#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ +#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) + +typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; + +/* Firmware per-DM HWR states */ +#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ +#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ +#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */ +#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */ +#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */ +#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */ +#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */ +#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */ +#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ +#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ + +/* Firmware's connection state */ +typedef enum +{ + RGXFW_CONNECTION_FW_OFFLINE = 0, /*!< Firmware is offline */ + RGXFW_CONNECTION_FW_READY, /*!< Firmware is initialised */ + RGXFW_CONNECTION_FW_ACTIVE, /*!< Firmware connection is fully established */ + RGXFW_CONNECTION_FW_OFFLOADING, /*!< Firmware is clearing up connection data */ + RGXFW_CONNECTION_FW_STATE_COUNT +} RGXFWIF_CONNECTION_FW_STATE; + +/* OS' connection state */ +typedef enum +{ + RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */ + RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */ + RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */ + RGXFW_CONNECTION_OS_STATE_COUNT +} RGXFWIF_CONNECTION_OS_STATE; + +typedef struct +{ + IMG_UINT bfOsState : 3; + IMG_UINT bfFLOk : 1; + IMG_UINT bfFLGrowPending : 1; + IMG_UINT bfIsolatedOS : 1; + IMG_UINT bfReserved : 26; +} RGXFWIF_OS_RUNTIME_FLAGS; + +typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; + +#if defined(PVRSRV_STALLED_CCB_ACTION) +#define PVR_SLR_LOG_ENTRIES 10 +#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64Timestamp; + IMG_UINT32 ui32FWCtxAddr; + IMG_UINT32 ui32NumUFOs; + IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; +} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; +#endif + +/* firmware trace control data */ +typedef struct +{ + IMG_UINT32 ui32LogType; + RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; + IMG_UINT32 ui32TraceBufSizeInDWords; /*!< Member initialised only when sTraceBuf is actually allocated + * (in RGXTraceBufferInitOnDemandResources) */ + IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_TRACEBUF; + +/* firmware system data shared with the Host driver */ +typedef struct +{ + IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ + IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ + volatile RGXFWIF_POW_STATE ePowState; + volatile IMG_UINT32 ui32HWPerfRIdx; + volatile IMG_UINT32 ui32HWPerfWIdx; + volatile IMG_UINT32 ui32HWPerfWrapCount; + IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ + IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ + + /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with + * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ + IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ + IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ + IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ +#if !defined(RGX_FW_IRQ_OS_COUNTERS) + volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ +#endif + RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ + IMG_UINT32 aui32OSidPrioMirror[RGXFW_MAX_NUM_OS]; /*!< Priority for each Operating System mirrored from Fw coremem */ + IMG_UINT32 ui32PHRModeMirror; /*!< Periodic Hardware Reset Mode mirrored from Fw coremem */ + RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; + IMG_UINT32 ui32FWFaults; + IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; + IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; + IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; + IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; +#if defined(SUPPORT_POWMON_COMPONENT) +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) + RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; + IMG_UINT32 ui32PowerMonBufSizeInDWords; +#endif + IMG_UINT32 ui32PowMonEstimate; /*!< Non-volatile power monitoring results: + * static power (by default) + * energy count (PVR_POWER_MONITOR_DYNAMIC_ENERGY) */ +#endif + +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) +#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) +#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) + IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; +#endif + RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; + RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_SYSDATA; + +/* per-os firmware shared data */ +typedef struct +{ + IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ + IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ + IMG_UINT32 ui32HostSyncCheckMark; +#if defined(PVRSRV_STALLED_CCB_ACTION) + IMG_UINT32 ui32ForcedUpdatesRequested; + IMG_UINT8 ui8SLRLogWp; + RGXFWIF_SLR_ENTRY sSLRLogFirst; + RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; + IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; +#endif + IMG_UINT32 ui32KCCBCmdsExecuted; + RGXFWIF_DEV_VIRTADDR sPowerSync; + IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_OSDATA; + +/* Firmware trace time-stamp field breakup */ + +/* RGX_CR_TIMER register read (48 bits) value*/ +#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) +#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + +/* Extra debug-info (16 bits) */ +#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) +#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK + + +/* Debug-info sub-fields */ +/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) + +/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) + +/* Bit 2-15: Unused bits */ + +#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 +#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " +#define RGXFWT_DEBUG_INFO_STR_APPEND ")" + +/* Table of debug info sub-field's masks and corresponding message strings + * to be appended to firmware trace + * + * Mask : 16 bit mask to be applied to debug-info field + * String : debug info message string + */ + +#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ +/*Mask, String*/ \ +X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ +X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") + +/*! + ****************************************************************************** + * HWR Data + *****************************************************************************/ +typedef enum +{ + RGX_HWRTYPE_UNKNOWNFAILURE = 0, + RGX_HWRTYPE_OVERRUN = 1, + RGX_HWRTYPE_POLLFAILURE = 2, + RGX_HWRTYPE_BIF0FAULT = 3, + RGX_HWRTYPE_BIF1FAULT = 4, + RGX_HWRTYPE_TEXASBIF0FAULT = 5, + RGX_HWRTYPE_MMUFAULT = 6, + RGX_HWRTYPE_MMUMETAFAULT = 7, + RGX_HWRTYPE_MIPSTLBFAULT = 8, +} RGX_HWRTYPE; + +#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) + +#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT)) ? true : false) + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; + IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_BIFINFO; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64MMUStatus; + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_MMUINFO; + +typedef struct +{ + IMG_UINT32 ui32ThreadNum; + IMG_UINT32 ui32CrPollAddr; + IMG_UINT32 ui32CrPollMask; + IMG_UINT32 ui32CrPollLastValue; + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} UNCACHED_ALIGN RGX_POLLINFO; + +typedef struct +{ + IMG_UINT32 ui32BadVAddr; + IMG_UINT32 ui32EntryLo; +} RGX_TLBINFO; + +typedef struct +{ + union + { + RGX_BIFINFO sBIFInfo; + RGX_MMUINFO sMMUInfo; + RGX_POLLINFO sPollInfo; + RGX_TLBINFO sTLBInfo; + } uHWRData; + + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; + IMG_UINT32 ui32FrameNum; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ActiveHWRTData; + IMG_UINT32 ui32HWRNumber; + IMG_UINT32 ui32EventStatus; + IMG_UINT32 ui32HWRRecoveryFlags; + RGX_HWRTYPE eHWRType; + RGXFWIF_DM eDM; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; + IMG_UINT64 RGXFW_ALIGN ui64Reserved[2]; +} UNCACHED_ALIGN RGX_HWRINFO; + +#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ +#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ +typedef struct +{ + RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; + IMG_UINT32 ui32HwrCounter; + IMG_UINT32 ui32WriteIndex; + IMG_UINT32 ui32DDReqCount; + IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ + IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_DEFAULT_MAX]; +} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; + +typedef enum +{ + RGX_ACTIVEPM_FORCE_OFF = 0, + RGX_ACTIVEPM_FORCE_ON = 1, + RGX_ACTIVEPM_DEFAULT = 2 +} RGX_ACTIVEPM_CONF; + +typedef enum +{ + RGX_RD_POWER_ISLAND_FORCE_OFF = 0, + RGX_RD_POWER_ISLAND_FORCE_ON = 1, + RGX_RD_POWER_ISLAND_DEFAULT = 2 +} RGX_RD_POWER_ISLAND_CONF; + +/*! + ****************************************************************************** + * Querying DM state + *****************************************************************************/ + +typedef struct +{ + IMG_UINT16 ui16RegNum; /*!< Register number */ + IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ + IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ + IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ +} RGXFW_REGISTER_LIST; + + +#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) +#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) +#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) +#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) + +#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) +#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) + +#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) +#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) +/*! + ****************************************************************************** + * RGX firmware Init Config Data + *****************************************************************************/ + +/* Flag definitions affecting the firmware globally */ +#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) +#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) +#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) +#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) +#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) +#define RGXFWIF_INICFG_HWR_EN (IMG_UINT32_C(0x1) << 5) +#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) +#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) +#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) +#define RGXFWIF_INICFG_POLL_COUNTERS_EN (IMG_UINT32_C(0x1) << 9) +#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT (10) +#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT) +#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT) +#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT) +#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_MASK (RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX |\ + RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE |\ + RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST) +#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) +#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) +#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) +#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN (IMG_UINT32_C(0x1) << 15) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) +#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) +#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) +#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) +#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) +#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) +#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) +#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) +#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) +#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) +#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ + RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) +#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) + +#define RGXFWIF_INICFG_ALL (0xFFFFFFFFU) + +/* Extended Flag definitions affecting the firmware globally */ +#define RGXFWIF_INICFG_EXT_ALL (0x0U) + +/* Flag definitions affecting only workloads submitted by a particular OS */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_TA_EN (IMG_UINT32_C(0x1) << 0) +#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 1) +#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 2) + +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 3) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TA (IMG_UINT32_C(0x1) << 4) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 5) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 6) + +#define RGXFWIF_INICFG_OS_ALL (0x7F) + +#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ + RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) + +#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_TA_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN) + +#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) + +#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) +#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) +#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) + +#if defined(RGX_FW_IRQ_OS_COUNTERS) +/* Unused registers re-purposed for storing counters of the Firmware's + * interrupts for each OS + */ +#define IRQ_COUNTER_STORAGE_REGS \ + 0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK */ \ + 0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK */ \ + 0x2030U, /* RGX_CR_PM_START_OF_MMU_TACONTEXT*/ \ + 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ + 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ + 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ + 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ + 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ +#endif + +#if defined(RGX_FIRMWARE) +typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; +#else +typedef struct {RGXFWIF_DEV_VIRTADDR p; + RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; +#endif + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA; +#if defined(SUPPORT_TBI_INTERFACE) +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; +#endif +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; +typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_SELECT_CUSTOM_CNTRS; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; + +/*! + * This number is used to represent an invalid page catalogue physical address + */ +#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU + +/*! + * This number is used to represent unallocated page catalog base register + */ +#define RGXFW_BIF_INVALID_PCREG 0xFFFFFFFFU + +/*! + Firmware memory context. +*/ +typedef struct +{ + IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ + IMG_UINT32 uiPageCatBaseRegID; /*!< associated page catalog base register (RGXFW_BIF_INVALID_PCREG == unallocated) */ + IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */ + IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ + IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ + IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 ui32OSid; + IMG_BOOL bOSidAxiProt; +#endif + +} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; + +/*! + * FW context state flags + */ +#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) +#define RGXFWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU) +#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U) + +/* + * Fast scale blit renders can be divided into smaller slices. The maximum + * screen size is 8192x8192 pixels or 256x256 tiles. The blit is sliced + * into 512x512 pixel blits or 16x16 tiles. Therefore, there are at most + * 256 slices of 16x16 tiles, which means we need 8bits to count up to + * which slice we have blitted so far. + */ +#define RGXFWIF_CONTEXT_SLICE_BLIT_X_MASK (0x00000F00) +#define RGXFWIF_CONTEXT_SLICE_BLIT_X_SHIFT (8) +#define RGXFWIF_CONTEXT_SLICE_BLIT_Y_MASK (0x0000F000) +#define RGXFWIF_CONTEXT_SLICE_BLIT_Y_SHIFT (12) + +typedef struct +{ + /* FW-accessible TA state which must be written out to memory on context store */ + IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER; /* To store in mid-TA */ + IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init; /* Initial value (in case is 'lost' due to a lock-up */ + IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_BATCH; + IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM0; + IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM1; + IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM2; + IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM3; + IMG_UINT16 ui16TACurrentIdx; +} UNCACHED_ALIGN RGXFWIF_TACTX_STATE; + +typedef struct +{ + /* FW-accessible ISP state which must be written out to memory on context store */ + IMG_UINT64 RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS; + IMG_UINT64 RGXFW_ALIGN u3DReg_PM_PDS_MTILEFREE_STATUS; + IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */ + /* au3DReg_ISP_STORE should be the last element of the structure + * as this is an array whose size is determined at runtime + * after detecting the RGX core */ + IMG_UINT32 au3DReg_ISP_STORE[]; +} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE; + +#define RGXFWIF_CTX_USING_BUFFER_A (0) +#define RGXFWIF_CTX_USING_BUFFER_B (1U) + +typedef struct +{ + IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ +} RGXFWIF_COMPUTECTX_STATE; + + +typedef struct RGXFWIF_FWCOMMONCONTEXT_ +{ + /* CCB details for this firmware context */ + PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ + PRGXFWIF_CCCB psCCB; /*!< CCB base */ + RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; + + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ + RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ + + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + + /* Context suspend state */ + PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ + + /* Framework state + */ + PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ + + /* + * Flags e.g. for context switching + */ + IMG_UINT32 ui32FWComCtxFlags; + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32PrioritySeqNum; + + /* References to the host side originators */ + IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ + IMG_UINT32 ui32PID; /*!< associated process ID */ + + /* Statistic updates waiting to be passed back to the host... */ + IMG_BOOL bStatsPending; /*!< True when some stats are pending */ + IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ + IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ + IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ + RGXFWIF_DM eDM; /*!< Data Master type */ + IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ + IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ + + IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; + IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ + IMG_BOOL bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ +} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; + +/*! + Firmware render context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */ + RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */ + + RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; + + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + + IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ + +} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; + +/*! + Firmware compute context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */ + + RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState; + + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + + IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ + +} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; + +/*! + Firmware TDM context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */ + + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + +} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT; + +/*! + ****************************************************************************** + * Defines for CMD_TYPE corruption detection and forward compatibility check + *****************************************************************************/ + +/* CMD_TYPE 32bit contains: + * 31:16 Reserved for magic value to detect corruption (16 bits) + * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit) + * 14:0 Bits available for CMD_TYPEs (15 bits) */ + + +/* Magic value to detect corruption */ +#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC) +#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) +#define RGX_CMD_MAGIC_DWORD_SHIFT (16U) +#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT) + + +/*! + ****************************************************************************** + * Kernel CCB control for RGX + *****************************************************************************/ +typedef struct +{ + volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ + volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ + IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ + IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ +} UNCACHED_ALIGN RGXFWIF_CCB_CTL; + +/*! + ****************************************************************************** + * Kernel CCB command structure for RGX + *****************************************************************************/ + +#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ + +#if !defined(__KERNEL) + +#if !defined(RGX_FEATURE_SLC_VIVT) +#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */ + +#else /* RGX_FEATURE_SLC_VIVT */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0) /* not used */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ +#endif + +#else +#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ +#endif + +#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */ + +typedef struct +{ + IMG_UINT32 ui32Flags; + RGXFWIF_DEV_VIRTADDR sMMUCacheSync; + IMG_UINT32 ui32MMUCacheSyncUpdateValue; +} RGXFWIF_MMUCACHEDATA; + +#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0) +#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1) +#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2) +#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3) + +typedef struct +{ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ + IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ + IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ + IMG_UINT32 ui32BPDataFlags; + IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ + IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ + RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ +} RGXFWIF_BPDATA; + +#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ + IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */ + IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */ + IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */ + PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */ + IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */ +} RGXFWIF_KCCB_CMD_KICK_DATA; + +typedef struct +{ + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; +} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA; + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ + IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ +} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; + +typedef struct +{ + IMG_UINT32 ui32PHRMode; /*!< Variable containing PHR configuration values */ +} RGXFWIF_KCCB_CMD_PHR_CFG_DATA; + +#define RGXIF_PHR_MODE_OFF (0UL) +#define RGXIF_PHR_MODE_RD_RESET (1UL) +#define RGXIF_PHR_MODE_FULL_RESET (2UL) + +typedef enum +{ + RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */ + RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */ + RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */ + RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */ +} RGXFWIF_CLEANUP_TYPE; + +typedef struct +{ + RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */ + union { + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */ + PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */ + PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */ + PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */ + } uCleanupData; +} RGXFWIF_CLEANUP_REQUEST; + +typedef enum +{ + RGXFWIF_POW_OFF_REQ = 1, + RGXFWIF_POW_FORCED_IDLE_REQ, + RGXFWIF_POW_NUM_UNITS_CHANGE, + RGXFWIF_POW_APM_LATENCY_CHANGE +} RGXFWIF_POWER_TYPE; + +typedef enum +{ + RGXFWIF_POWER_FORCE_IDLE = 1, + RGXFWIF_POWER_CANCEL_FORCED_IDLE, + RGXFWIF_POWER_HOST_TIMEOUT, +} RGXFWIF_POWER_FORCE_IDLE_TYPE; + +typedef struct +{ + RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ + union + { + IMG_UINT32 ui32NumOfDusts; /*!< Number of active Dusts */ + IMG_BOOL bForced; /*!< If the operation is mandatory */ + RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ + IMG_UINT32 ui32ActivePMLatencyms; /*!< Number of milliseconds to set APM latency */ + } uPowerReqData; +} RGXFWIF_POWER_REQUEST; + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */ + IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */ + IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */ +} RGXFWIF_SLCFLUSHINVALDATA; + +typedef struct +{ + IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */ +} RGXFWIF_HCS_CTL; + +typedef enum{ + RGXFWIF_HWPERF_CTRL_TOGGLE = 0, + RGXFWIF_HWPERF_CTRL_SET = 1, + RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 +} RGXFWIF_HWPERF_UPDATE_CONFIG; + +typedef struct +{ + RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */ + IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */ +} RGXFWIF_HWPERF_CTRL; + +typedef struct +{ + IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ + PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */ +} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; + +typedef struct +{ + IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ +} RGXFWIF_CORECLKSPEEDCHANGE_DATA; + +#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16 + +typedef struct +{ + IMG_BOOL bEnable; + IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */ + IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */ +} RGXFWIF_HWPERF_CTRL_BLKS; + + +typedef struct +{ + IMG_UINT16 ui16CustomBlock; + IMG_UINT16 ui16NumCounters; + PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs; +} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS; + +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */ + IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ +} RGXFWIF_ZSBUFFER_BACKING_DATA; + +typedef struct +{ + IMG_UINT32 ui32IsolationPriorityThreshold; +} RGXFWIF_OSID_ISOLATION_GROUP_DATA; + +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */ + IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */ + IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */ + IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ +} RGXFWIF_FREELIST_GS_DATA; + +#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) +#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U + +typedef struct +{ + IMG_UINT32 ui32FreelistsCount; + IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; +} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA; + + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN sDevSignalAddress; /*!< device virtual address of the updated signal */ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ +} UNCACHED_ALIGN RGXFWIF_SIGNAL_UPDATE_DATA; + + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */ +} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA; + +/*! + ****************************************************************************** + * Proactive DVFS Structures + *****************************************************************************/ +#define NUM_OPP_VALUES 16 + +typedef struct +{ + IMG_UINT32 ui32Volt; /* V */ + IMG_UINT32 ui32Freq; /* Hz */ +} UNCACHED_ALIGN PDVFS_OPP; + +typedef struct +{ + PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; +#if defined(DEBUG) + IMG_UINT32 ui32MinOPPPoint; +#endif + IMG_UINT32 ui32MaxOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; + +typedef struct +{ + IMG_UINT32 ui32MaxOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA; + +typedef struct +{ + IMG_UINT32 ui32MinOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA; + +/*! + ****************************************************************************** + * Register configuration structures + *****************************************************************************/ + +#define RGXFWIF_REG_CFG_MAX_SIZE 512 + +typedef enum +{ + RGXFWIF_REGCFG_CMD_ADD = 101, + RGXFWIF_REGCFG_CMD_CLEAR = 102, + RGXFWIF_REGCFG_CMD_ENABLE = 103, + RGXFWIF_REGCFG_CMD_DISABLE = 104 +} RGXFWIF_REGDATA_CMD_TYPE; + +typedef enum +{ + RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ + RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ + RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ + RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ + RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ + RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */ + RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ + RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ +} RGXFWIF_REG_CFG_TYPE; + +typedef struct +{ + IMG_UINT64 ui64Addr; + IMG_UINT64 ui64Mask; + IMG_UINT64 ui64Value; +} RGXFWIF_REG_CFG_REC; + +typedef struct +{ + RGXFWIF_REGDATA_CMD_TYPE eCmdType; + RGXFWIF_REG_CFG_TYPE eRegConfigType; + RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig; + +} RGXFWIF_REGCONFIG_DATA; + +typedef struct +{ + /** + * PDump WRW command write granularity is 32 bits. + * Add padding to ensure array size is 32 bit granular. + */ + IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN((IMG_UINT32)RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; + RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; +} UNCACHED_ALIGN RGXFWIF_REG_CFG; + +/* OSid Scheduling Priority Change */ +typedef struct +{ + IMG_UINT32 ui32OSidNum; + IMG_UINT32 ui32Priority; +} RGXFWIF_OSID_PRIORITY_DATA; + +typedef enum +{ + RGXFWIF_OS_ONLINE = 1, + RGXFWIF_OS_OFFLINE +} RGXFWIF_OS_STATE_CHANGE; + +typedef struct +{ + IMG_UINT32 ui32OSid; + RGXFWIF_OS_STATE_CHANGE eNewOSState; +} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; + +typedef enum +{ + RGXFWIF_PWR_COUNTER_DUMP_START = 1, + RGXFWIF_PWR_COUNTER_DUMP_STOP, + RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, +} RGXFWIF_COUNTER_DUMP_REQUEST; + +typedef struct +{ + RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest; +} RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA; + +typedef enum +{ + /* Common commands */ + RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ + RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ + RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */ + RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ + RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ + RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has performed a signal update */ + RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ + RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ + + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ + + /* Commands only permitted to the native or host OS */ + RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process*/ + RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */ + /* Free slot */ + RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ + RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set hard context switching deadline */ + RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE = 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the configuration of (or even disables) the OSid Isolation scheduling group. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_COUNTER_DUMP = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */ + /* Free slot */ + RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ + RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 216U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ + RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 217U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ + RGXFWIF_KCCB_CMD_PHR_CFG = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ +} RGXFWIF_KCCB_CMD_TYPE; + +#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) + +/* Kernel CCB command packet */ +typedef struct +{ + RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */ + IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */ + + /* NOTE: Make sure that uCmdData is the last member of this struct + * This is to calculate actual command size for device mem copy. + * (Refer RGXGetCmdMemCopySize()) + * */ + union + { + RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */ + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */ + RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */ + RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */ + RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */ + RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */ + RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */ + RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */ + RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */ + RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */ + RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */ + RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */ + RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */ + RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */ + RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */ + RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */ + RGXFWIF_SIGNAL_UPDATE_DATA sSignalUpdateData; /*!< Data for informing the FW about the signal update */ + RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */ + RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData; /*!< Data for setting the max frequency/OPP */ + RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */ + RGXFWIF_OSID_PRIORITY_DATA sCmdOSidPriorityData; /*!< Data for updating an OSid priority */ + RGXFWIF_HCS_CTL sHCSCtrl; /*!< Data for Hard Context Switching */ + RGXFWIF_OSID_ISOLATION_GROUP_DATA sCmdOSidIsolationData; /*!< Data for updating the OSid isolation group */ + RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ + RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ + RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ + RGXFWIF_KCCB_CMD_PHR_CFG_DATA sPeriodicHwResetCfg; /*!< Data for configuring the Periodic Hw Reset behaviour */ + } UNCACHED_ALIGN uCmdData; +} UNCACHED_ALIGN RGXFWIF_KCCB_CMD; + +RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); + +/*! + ****************************************************************************** + * Firmware CCB command structure for RGX + *****************************************************************************/ + +typedef struct +{ + IMG_UINT32 ui32ZSBufferID; +} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA; + +typedef struct +{ + IMG_UINT32 ui32FreelistID; +} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA; + +typedef struct +{ + IMG_UINT32 ui32FreelistsCount; + IMG_UINT32 ui32HwrCounter; + IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; +} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA; + +/*! + Last reset reason for a context. +*/ +typedef enum +{ + RGXFWIF_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */ + RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */ + RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */ + RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */ + RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */ + RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */ +} RGXFWIF_CONTEXT_RESET_REASON; + +typedef struct +{ + IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */ + RGXFWIF_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ + IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ + IMG_BOOL bPageFault; /*!< Did a page fault happen */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ +} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; + +typedef enum +{ + RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages */ + RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked */ + RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow/shrink */ + RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction */ + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context */ + RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump */ + RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats */ + + RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, +} RGXFWIF_FWCCB_CMD_TYPE; + +typedef enum +{ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ +} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; + +typedef struct +{ + RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */ + IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */ + IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */ +} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA; + +typedef struct +{ + IMG_UINT32 ui32CoreClkRate; +} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; + +typedef struct +{ + RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */ + IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */ + + union + { + RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/ + RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */ + RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */ + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */ + RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ + RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; + } RGXFW_ALIGN uCmdData; +} RGXFW_ALIGN RGXFWIF_FWCCB_CMD; + +RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); + + +/*! + ****************************************************************************** + * Workload estimation Firmware CCB command structure for RGX + *****************************************************************************/ +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; /*!< Index for return data array */ + IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken; /*!< The cycles the workload took on the hardware */ +} RGXFWIF_WORKEST_FWCCB_CMD; + + +/*! + ****************************************************************************** + * Client CCB commands for RGX + *****************************************************************************/ + +/* Required memory alignment for 64-bit variables accessible by Meta + (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared + between the host and meta that contains 64-bit variables has to maintain + this alignment) */ +#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) + +#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) +#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1)) + +typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; + +#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_3D (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_CDM (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_TQ_3D (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_TQ_2D (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_3D_PR (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_NULL (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +/* Free slots 208-211 */ +#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (212U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) + +/* Leave a gap between CCB specific commands and generic commands */ +#define RGXFWIF_CCB_CMD_TYPE_FENCE (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) +#define RGXFWIF_CCB_CMD_TYPE_UPDATE (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) +/* Free slot 215 */ +#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) +#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) +/* Free slot 218 */ +#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) +/* Free slot 220 */ + +#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) + + +typedef struct +{ + /* Index for the KM Workload estimation return data array */ + IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; + /* Deadline for the workload */ + IMG_UINT64 RGXFW_ALIGN ui64Deadline; + /* Predicted time taken to do the work in cycles */ + IMG_UINT64 RGXFW_ALIGN ui64CyclesPrediction; +} RGXFWIF_WORKEST_KICK_DATA; + +typedef struct +{ + RGXFWIF_CCB_CMD_TYPE eCmdType; + IMG_UINT32 ui32CmdSize; + IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ + IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ + RGXFWIF_WORKEST_KICK_DATA sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ +} RGXFWIF_CCB_CMD_HEADER; + +/*! + ****************************************************************************** + * Client CCB commands which are only required by the kernel + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32Priority; +} RGXFWIF_CMD_PRIORITY; + + +/*! + ****************************************************************************** + * Signature and Checksums Buffer + *****************************************************************************/ +typedef struct +{ + PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */ + IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ +} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; + +typedef struct +{ + PRGXFWIF_COUNTERBUFFER sBuffer; /*!< Ptr to counter dump buffer */ + IMG_UINT32 ui32SizeInDwords; /*!< Amount of space for storing in the buffer */ +} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL; + +typedef struct +{ + PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ + IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */ +} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL; + +/*! + ***************************************************************************** + * RGX Compatibility checks + *****************************************************************************/ + +/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC changes, the + following define should be increased by 1 to indicate to the + compatibility logic that layout has changed. */ +#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 + +typedef struct +{ + IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */ + IMG_UINT64 RGXFW_ALIGN ui64BVNC; +} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC; + +typedef struct +{ + IMG_UINT8 ui8OsCountSupport; +} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS; + +#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ + RGXFWIF_COMPCHECKS_BVNC (name) = { \ + RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \ + 0, \ + } +#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \ + do { \ + (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ + (name).ui64BVNC = 0; \ + } while (0) + +typedef struct +{ + RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ + RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ + IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ + IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ + IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ + IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ + RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */ + IMG_BOOL bUpdated; /*!< Information is valid */ +} UNCACHED_ALIGN RGXFWIF_COMPCHECKS; + +/*! + ****************************************************************************** + * Updated configuration post FW data init. + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */ + IMG_UINT32 ui32RuntimeCfgFlags; /* Compatibility and other flags */ + IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */ + IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */ + IMG_UINT32 ui32DefaultDustsNumInit; /* Last number of dusts change requested by the host */ + PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */ +} RGXFWIF_RUNTIME_CFG; + +/*! + ***************************************************************************** + * Control data for RGX + *****************************************************************************/ + +#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U) + +#if defined(PDUMP) + +#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U + +typedef enum +{ + RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, + RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT +} RGXFWIF_PID_FILTER_MODE; + +typedef struct +{ + IMG_PID uiPID; + IMG_UINT32 ui32OSID; +} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; + +typedef struct +{ + RGXFWIF_PID_FILTER_MODE eMode; + /* each process in the filter list is specified by a PID and OS ID pair. + * each PID and OS pair is an item in the items array (asItems). + * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries + * then it must be terminated by an item with pid of zero. + */ + RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS]; +} RGXFW_ALIGN RGXFWIF_PID_FILTER; +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) +#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0) +#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1) +#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2) +#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3) +#endif + +typedef enum +{ + RGXFWIF_TPU_DM_PDM = 0, + RGXFWIF_TPU_DM_VDM = 1, + RGXFWIF_TPU_DM_CDM = 2, + RGXFWIF_TPU_DM_TDM = 3, + RGXFWIF_TPU_DM_LAST +} RGXFWIF_TPU_DM; + +typedef enum +{ + RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */ + RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that + initiates by sending data via the + GPIO and then sends back any data + received over the GPIO */ + RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes + and reads data across the entire + GPIO AP address range.*/ +#if defined(SUPPORT_STRIP_RENDERING) + RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/ + RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/ +#endif + RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */ + RGXFWIF_GPIO_VAL_LAST +} RGXFWIF_GPIO_VAL_MODE; + +typedef enum +{ + FW_PERF_CONF_NONE = 0, + FW_PERF_CONF_ICACHE = 1, + FW_PERF_CONF_DCACHE = 2, + FW_PERF_CONF_POLLS = 3, + FW_PERF_CONF_CUSTOM_TIMER = 4, + FW_PERF_CONF_JTLB_INSTR = 5, + FW_PERF_CONF_INSTRUCTIONS = 6 +} FW_PERF_CONF; + +typedef enum +{ + FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, + FW_BOOT_STAGE_NOT_AVAILABLE = -1, + FW_BOOT_NOT_STARTED = 0, + FW_BOOT_BLDR_STARTED = 1, + FW_BOOT_CACHE_DONE, + FW_BOOT_TLB_DONE, + FW_BOOT_MAIN_STARTED, + FW_BOOT_ALIGNCHECKS_DONE, + FW_BOOT_INIT_DONE, +} FW_BOOT_STAGE; + +/* + * Kernel CCB return slot responses. Usage of bit-fields instead of bare integers + * allows FW to possibly pack-in several responses for each single kCCB command. + */ +#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /* Command executed (return status from FW) */ +#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /* A cleanup was requested but resource busy */ +#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /* Poll failed in FW for a HW operation to complete */ + +#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /* Reset value of a kCCB return slot (set by host) */ + +typedef struct +{ + /* Fw-Os connection states */ + volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; + volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; + volatile IMG_UINT32 ui32AliveFwToken; + volatile IMG_UINT32 ui32AliveOsToken; +} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL; + +typedef struct +{ + /* Kernel CCB */ + PRGXFWIF_CCB_CTL psKernelCCBCtl; + PRGXFWIF_CCB psKernelCCB; + PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; + + /* Firmware CCB */ + PRGXFWIF_CCB_CTL psFirmwareCCBCtl; + PRGXFWIF_CCB psFirmwareCCB; + + /* Workload Estimation Firmware CCB */ + PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl; + PRGXFWIF_CCB psWorkEstFirmwareCCB; + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Checkpoint CCB */ + PRGXFWIF_CCB_CTL psCheckpointCCBCtl; + PRGXFWIF_CCB psCheckpointCCB; +#endif + + PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; + + IMG_UINT32 ui32HWRDebugDumpLimit; + + PRGXFWIF_OSDATA sFwOsData; + + /* Compatibility checks to be populated by the Firmware */ + RGXFWIF_COMPCHECKS sRGXCompChecks; + +} UNCACHED_ALIGN RGXFWIF_OSINIT; + +typedef struct +{ + IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; + + IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; + IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; + + IMG_UINT32 ui32FilterFlags; + + RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_DEFAULT_MAX]; + + PRGXFWIF_RUNTIME_CFG sRuntimeCfg; + + PRGXFWIF_TRACEBUF sTraceBufCtl; + PRGXFWIF_SYSDATA sFwSysData; +#if defined(SUPPORT_TBI_INTERFACE) + PRGXFWIF_TBIBUF sTBIBuf; +#endif + IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; + + PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; + PRGXFWIF_REG_CFG sRegCfg; + PRGXFWIF_HWPERF_CTL sHWPerfCtl; + + RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl; + +#if defined(SUPPORT_FIRMWARE_GCOV) + RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; +#endif + + RGXFWIF_DEV_VIRTADDR sAlignChecks; + + /* Core clock speed at FW boot time */ + IMG_UINT32 ui32InitialCoreClockSpeed; + + /* APM latency in ms before signalling IDLE to the host */ + IMG_UINT32 ui32ActivePMLatencyms; + + /* Flag to be set by the Firmware after successful start */ + IMG_BOOL bFirmwareStarted; + + IMG_UINT32 ui32MarkerVal; + + IMG_UINT32 ui32FirmwareStartedTimeStamp; + + IMG_UINT32 ui32JonesDisableMask; + + RGXFWIF_DMA_ADDR sCorememDataStore; + + FW_PERF_CONF eFirmwarePerf; + + IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; + + RGXFWIF_PDVFS_OPP sPDVFSOPPInfo; + + /** + * FW Pointer to memory containing core clock rate in Hz. + * Firmware (PDVFS) updates the memory when running on non primary FW thread + * to communicate to host driver. + */ + PRGXFWIF_CORE_CLK_RATE sCoreClockRate; + +#if defined(PDUMP) + RGXFWIF_PID_FILTER sPIDFilter; +#endif + + RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode; + IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; + + /*Used in HWPerf for decoding BVNC Features*/ + RGX_HWPERF_BVNC sBvncKmFeatureFlags; + +#if defined(SUPPORT_SECURITY_VALIDATION) + IMG_UINT32 ui32SecurityTestFlags; + RGXFWIF_DEV_VIRTADDR pbSecureBuffer; + RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * Used when validation is enabled to allow the host to check + * that MTS sent the correct sideband in response to a kick + * from a given OSes schedule register. + * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set + * + * Set by the host to: + * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT + * reset to 0 by FW when kicked by the given OSid + */ + IMG_UINT32 ui32OSKickTest; +#endif + +} UNCACHED_ALIGN RGXFWIF_SYSINIT; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 +#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 +#endif + +/*! + ***************************************************************************** + * Timer correlation shared data and defines + *****************************************************************************/ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp; + IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp; + + /* Utility variable used to convert CR timer deltas to OS timer deltas (nS), + * where the deltas are relative to the timestamps above: + * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */ + IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs; + + IMG_UINT32 ui32CoreClockSpeed; + IMG_UINT32 ui32Reserved; +} UNCACHED_ALIGN RGXFWIF_TIME_CORR; + + +/* The following macros are used to help converting FW timestamps to the Host + * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of + * time; it increments by 1 every 256 GPU clock ticks, so the general + * formula to perform the conversion is: + * + * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, + * otherwise if (scale == 10^6) then deltaOS is in uS ] + * + * deltaCR * 256 256 * scale + * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] + * GPUclockspeed GPUclockspeed + * + * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) + * to get some better accuracy and to avoid returning 0 in the integer + * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. + * This is the same as keeping K as a decimal number. + * + * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies + * (deltaCR * K is more or less a constant), and it's relative to the base + * OS timestamp sampled as a part of the timer correlation data. + * This base is refreshed on GPU power-on, DVFS transition and periodic + * frequency calibration (executed every few seconds if the FW is doing + * some work), so as long as the GPU is doing something and one of these + * events is triggered then deltaCR * K will not overflow and deltaOS will be + * correct. + */ + +#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) + +#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \ + (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) + + +/*! + ****************************************************************************** + * GPU Utilisation + *****************************************************************************/ + +/* See rgx_common.h for a list of GPU states */ +#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) + +#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) +#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) + +/* The OS timestamps computed by the FW are approximations of the real time, + * which means they could be slightly behind or ahead the real timer on the Host. + * In some cases we can perform subtractions between FW approximated + * timestamps and real OS timestamps, so we need a form of protection against + * negative results if for instance the FW one is a bit ahead of time. + */ +#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \ + (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) + +#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ + (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) + + +/* The timer correlation array must be big enough to ensure old entries won't be + * overwritten before all the HWPerf events linked to those entries are processed + * by the MISR. The update frequency of this array depends on how fast the system + * can change state (basically how small the APM latency is) and perform DVFS transitions. + * + * The minimum size is 2 (not 1) to avoid race conditions between the FW reading + * an entry while the Host is updating it. With 2 entries in the worst case the FW + * will read old data, which is still quite ok if the Host is updating the timer + * correlation at that time. + */ +#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U +#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE) + +/* Make sure the timer correlation array size is a power of 2 */ +static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, + "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); + +typedef struct +{ + RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; + IMG_UINT32 ui32TimeCorrSeqCount; + + /* Last GPU state + OS time of the last state update */ + IMG_UINT64 RGXFW_ALIGN ui64LastWord; + + /* Counters for the amount of time the GPU was active/idle/blocked */ + IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; + + IMG_UINT32 ui32GpuUtilFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; + + +typedef struct +{ + IMG_UINT32 ui32RenderTargetIndex; //Render number + IMG_UINT32 ui32CurrentRenderTarget; //index in RTA + IMG_UINT32 ui32ActiveRenderTargets; //total active RTs + IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM + RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices + RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target + IMG_UINT32 ui32MaxRTs; //Number of render targets in the array + IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_RTA_CTL; + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr; + IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr; + IMG_UINT32 ui32CurrentStackTop; + IMG_UINT32 ui32MaxPages; + IMG_UINT32 ui32GrowPages; + IMG_UINT32 ui32CurrentPages; /* HW pages */ + IMG_UINT32 ui32AllocatedPageCount; + IMG_UINT32 ui32AllocatedMMUPageCount; +#if defined(SUPPORT_SHADOW_FREELISTS) + IMG_UINT32 ui32HWRCounter; + PRGXFWIF_FWMEMCONTEXT psFWMemContext; +#endif + IMG_UINT32 ui32FreeListID; + IMG_BOOL bGrowPending; + IMG_UINT32 ui32ReadyPages; /* Pages that should be used only when OOM is reached */ + IMG_UINT32 ui32FreelistFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_FREELIST; + + +/*! + ****************************************************************************** + * HWRTData + *****************************************************************************/ + +/* HWRTData flags */ +/* Deprecated flags 1:0 */ +#define HWRTDATA_HAS_LAST_TA (1U << 2) +#define HWRTDATA_PARTIAL_RENDERED (1U << 3) +#define HWRTDATA_DISABLE_TILE_REORDERING (1U << 4) +#define HWRTDATA_NEED_BRN65101_BLIT (1U << 5) +#define HWRTDATA_FIRST_BRN65101_STRIP (1U << 6) +#define HWRTDATA_NEED_BRN67182_2ND_RENDER (1U << 7) + +typedef enum +{ + RGXFWIF_RTDATA_STATE_NONE = 0, + RGXFWIF_RTDATA_STATE_KICKTA, + RGXFWIF_RTDATA_STATE_KICKTAFIRST, + RGXFWIF_RTDATA_STATE_TAFINISHED, + RGXFWIF_RTDATA_STATE_KICK3D, + RGXFWIF_RTDATA_STATE_3DFINISHED, + RGXFWIF_RTDATA_STATE_TAOUTOFMEM, + RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, + /* In case of HWR, we can't set the RTDATA state to NONE, + * as this will cause any TA to become a first TA. + * To ensure all related TA's are skipped, we use the HWR state */ + RGXFWIF_RTDATA_STATE_HWR, + RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU +} RGXFWIF_RTDATA_STATE; + +typedef struct +{ + IMG_BOOL bTACachesNeedZeroing; + +} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; + +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + + IMG_UINT32 ui32HWRTDataFlags; + RGXFWIF_RTDATA_STATE eState; + + IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */ + + IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; + IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; + + IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; + IMG_UINT32 ui32PMMListStackPointer; + + PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; + IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; + + IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; + + RGXFWIF_CLEANUP_CTL sCleanupState; + + RGXFWIF_RTA_CTL sRTACtl; + + IMG_UINT32 ui32ScreenPixelMax; + IMG_UINT64 RGXFW_ALIGN ui64MultiSampleCtl; + IMG_UINT64 ui64FlippedMultiSampleCtl; + IMG_UINT32 ui32TPCStride; + IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32MTileStride; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; + IMG_DEV_VIRTADDR RGXFW_ALIGN sMacrotileArrayDevVAddr; + IMG_DEV_VIRTADDR RGXFW_ALIGN sRgnHeaderDevVAddr; + IMG_DEV_VIRTADDR RGXFW_ALIGN sRTCDevVAddr; + IMG_UINT64 RGXFW_ALIGN uiRgnHeaderSize; + IMG_UINT32 ui32ISPMtileSize; +#if defined(RGX_FIRMWARE) + struct RGXFWIF_FWCOMMONCONTEXT_* RGXFW_ALIGN psOwnerGeom; +#else + RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN pui32OwnerGeomNotUsedByHost; +#endif +} UNCACHED_ALIGN RGXFWIF_HWRTDATA; + +#endif /* RGX_FWIF_KM_H */ + +/****************************************************************************** + End of file (rgx_fwif_km.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_resetframework.h b/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_resetframework.h new file mode 100644 index 000000000000..3a8d49f90654 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rogue/rgx_fwif_resetframework.h @@ -0,0 +1,74 @@ +/*************************************************************************/ /*! +@File rgx_fwif_resetframework.h +@Title Post-reset work-around framework FW interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_RESETFRAMEWORK_H) +#define RGX_FWIF_RESETFRAMEWORK_H + +#include "img_types.h" +#include "rgx_fwif_shared.h" + +typedef struct +{ +#if defined(RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT) && (RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT == 2) + IMG_UINT64 uCDMReg_CDM_CB_QUEUE; + IMG_UINT64 uCDMReg_CDM_CB_BASE; + IMG_UINT64 uCDMReg_CDM_CB; +#else + IMG_UINT64 uCDMReg_CDM_CTRL_STREAM_BASE; +#endif +} RGXFWIF_RF_REGISTERS; + +#define RGXFWIF_RF_FLAG_ENABLE 0x00000001U /*!< enables the reset framework in the firmware */ + +typedef struct +{ + IMG_UINT32 ui32Flags; + + /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */ + RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters; + +} RGXFWIF_RF_CMD; + +/* to opaquely allocate and copy in the kernel */ +#define RGXFWIF_RF_CMD_SIZE sizeof(RGXFWIF_RF_CMD) + +#endif /* RGX_FWIF_RESETFRAMEWORK_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/rogue/rgxapi_km.h b/drivers/mcst/gpu-imgtec/services/include/rogue/rgxapi_km.h new file mode 100644 index 000000000000..d342f2b9d907 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rogue/rgxapi_km.h @@ -0,0 +1,313 @@ +/*************************************************************************/ /*! +@File +@Title RGX API Header kernel mode +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exported RGX API details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __RGXAPI_KM_H__ +#define __RGXAPI_KM_H__ + +#if defined(SUPPORT_SHARED_SLC) +/*************************************************************************/ /*! +@Function RGXInitSLC +@Description Init the SLC after a power up. It is required to call this + function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't + be called. +@Input hDevHandle RGX Device Node +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle); +#endif + +#include "rgx_hwperf.h" + + +/****************************************************************************** + * RGX HW Performance Profiling Control API(s) + *****************************************************************************/ + +typedef struct _RGX_HWPERF_DEVICE_ +{ + IMG_CHAR pszName[20]; /* Helps identify this device uniquely */ + IMG_HANDLE hDevData; /* Handle for the server */ + + struct _RGX_HWPERF_DEVICE_ *psNext; +} RGX_HWPERF_DEVICE; + +typedef struct +{ + RGX_HWPERF_DEVICE *psHWPerfDevList; +} RGX_HWPERF_CONNECTION; + +/*************************************************************************/ /*! +@Function RGXHWPerfLazyConnect +@Description Obtain a HWPerf connection object to the RGX device(s). The + connections to devices are not actually opened until + HWPerfOpen() is called. +@Output ppsHWPerfConnection Address of a HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfOpen +@Description Opens connection(s) to the RGX device(s). Valid handle to the + connection object has to be provided which means the this + function needs to be preceded by the call to + RGXHWPerfLazyConnect() function. +@Input psHWPerfConnection HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConnect +@Description Obtain a connection object to the RGX HWPerf module. Allocated + connection object(s) reference opened connection(s). Calling + this function is an equivalent of calling RGXHWPerfLazyConnect + and RGXHWPerfOpen. This connect should be used when the caller + will be retrieving event data. +@Output ppsHWPerfConnection Address of HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfFreeConnection +@Description Frees the HWPerf connection object +@Input psHWPerfConnection Pointer to connection object as returned + from RGXHWPerfLazyConnect() +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfClose +@Description Closes all the opened connection(s) to RGX device(s) +@Input psHWPerfConnection Pointer to HWPerf connection object as + returned from RGXHWPerfConnect() or + RGXHWPerfOpen() +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfDisconnect +@Description Disconnect from the RGX device +@Input ppsHWPerfConnection Pointer to HWPerf connection object as + returned from RGXHWPerfConnect() or + RGXHWPerfOpen(). Calling this function is + an equivalent of calling RGXHWPerfClose() + and RGXHWPerfFreeConnection(). +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfControl +@Description Enable or disable the generation of RGX HWPerf event packets. + See RGXCtrlHWPerf(). +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input eStreamId ID of the HWPerf stream +@Input bToggle Switch to toggle or apply mask. +@Input ui64Mask Mask of events to control. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfControl( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask); + + +/*************************************************************************/ /*! +@Function RGXHWPerfGetFilter +@Description Reads HWPerf stream filter where stream is identified by the + given stream ID. +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Output ui64Filter HWPerf filter value +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfGetFilter( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_UINT64 *ui64Filter +); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConfigureAndEnableCounters +@Description Enable and configure the performance counter block for one or + more device layout modules. + See RGXHWPerfConfigureAndEnableCustomCounters(). +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input ui32NumBlocks Number of elements in the array +@Input asBlockConfigs Address of the array of configuration blocks +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs); + +/*************************************************************************/ /*! +@Function RGXHWPerfConfigureAndEnableCustomCounters +@Description Enable and configure custom performance counters +@Input psHWPerfConnection Pointer to connection object +@Input ui16CustomBlockID ID of the custom block to configure +@Input ui16NumCustomCounters Number of custom counters +@Input pui32CustomCounterIDs Pointer to array containing custom + counter IDs +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCustomCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT16 ui16CustomBlockID, + IMG_UINT16 ui16NumCustomCounters, + IMG_UINT32 *pui32CustomCounterIDs); + +/*************************************************************************/ /*! +@Function RGXHWPerfDisableCounters +@Description Disable the performance counter block for one or more device + layout modules. +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input ui32NumBlocks Number of elements in the array +@Input aeBlockIDs An array of bytes with values taken from + the RGX_HWPERF_CNTBLK_ID enumeration. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfDisableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs); + +/*************************************************************************/ /*! +@Function RGXHWPerfEnableCounters +@Description Enable the performance counter block for one or more device + layout modules. +@Input hDevData Handle to connection/device object +@Input ui32NumBlocks Number of elements in the array +@Input aeBlockIDs An array of bytes with values taken from the + RGX_HWPERF_CNTBLK_ID enumeration. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs); + +/****************************************************************************** + * RGX HW Performance Profiling Retrieval API(s) + * + * The client must ensure their use of this acquire/release API for a single + * connection/stream must not be shared with multiple execution contexts e.g. + * between a kernel thread and an ISR handler. It is the client's + * responsibility to ensure this API is not interrupted by a high priority + * thread/ISR + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function RGXHWPerfAcquireEvents +@Description When there is data available to read this call returns with OK + and the address and length of the data buffer the client can + safely read. This buffer may contain one or more event packets. + When there is no data to read, this call returns with OK and + sets *puiBufLen to 0 on exit. + Clients must pair this call with a ReleaseEvents call. +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Output ppBuf Address of a pointer to a byte buffer. On exit it + contains the address of buffer to read from +@Output pui32BufLen Pointer to an integer. On exit it is the size of + the data to read from the buffer +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfAcquireEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_PBYTE* ppBuf, + IMG_UINT32* pui32BufLen); + + +/*************************************************************************/ /*! +@Function RGXHWPerfReleaseEvents +@Description Called after client has read the event data out of the buffer + retrieved from the Acquire Events call to release resources. +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR RGXHWPerfReleaseEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConvertCRTimeStamp +@Description Converts the timestamp given by FW events to the common OS + timestamp. The first three inputs are obtained via a CLK_SYNC + event, ui64CRTimeStamp is the CR timestamp from the FW event + to be converted. +@Input ui32ClkSpeed Clock speed given by sync event +@Input ui64CorrCRTimeStamp CR Timestamp given by sync event +@Input ui64CorrOSTimeStamp Correlating OS Timestamp given by sync + event +@Input ui64CRTimeStamp CR Timestamp to convert +@Return IMG_UINT64 Calculated OS Timestamp +*/ /**************************************************************************/ +IMG_UINT64 RGXHWPerfConvertCRTimeStamp( + IMG_UINT32 ui32ClkSpeed, + IMG_UINT64 ui64CorrCRTimeStamp, + IMG_UINT64 ui64CorrOSTimeStamp, + IMG_UINT64 ui64CRTimeStamp); + +#endif /* __RGXAPI_KM_H__ */ + +/****************************************************************************** + End of file (rgxapi_km.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/rogue/rgxheapconfig.h b/drivers/mcst/gpu-imgtec/services/include/rogue/rgxheapconfig.h new file mode 100644 index 000000000000..1738172793f6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/rogue/rgxheapconfig.h @@ -0,0 +1,192 @@ +/*************************************************************************/ /*! +@File +@Title device configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Memory heaps device specific configuration +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXHEAPCONFIG_H +#define RGXHEAPCONFIG_H + +#include "rgxdefs_km.h" + +/* + RGX Device Virtual Address Space Definitions + NOTES: + Base addresses have to be a multiple of 4MiB + + RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, + on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_* + respectively. Therefore if clients use multiple configs they must still + be consistent with their definitions for these heaps. + + Shared virtual memory (GENERAL_SVM) support requires half of the address + space be reserved for SVM allocations unless BRN fixes are required in + which case the SVM heap is disabled. This is reflected in the device + connection capability bits returned to userspace. + + Variable page-size heap (GENERAL_NON4K) support reserves 64GiB from the + available 4K page-size heap (GENERAL) space. The actual heap page-size + defaults to 16K; AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE + can be used to forced it to these values: 4K,64K,256K,1M,2M. + + Heaps must not start at 0x0000000000, as this is reserved for internal + use within device memory layer. +*/ + + /* Start at 4 MiB Size of 512 GiB less 4 MiB (managed by OS/Services) */ + #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) + #define RGX_GENERAL_SVM_HEAP_SIZE IMG_UINT64_C(0x7FFFC00000) + + /* Start at 512GiB. Size of 255 GiB */ + #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000) + #define RGX_GENERAL_HEAP_SIZE IMG_UINT64_C(0x3FC0000000) + + /* Start at 767GiB. Size of 1 GiB */ + #define RGX_VK_CAPT_REPLAY_BUF_HEAP_BASE IMG_UINT64_C(0xBFC0000000) + #define RGX_VK_CAPT_REPLAY_BUF_HEAP_SIZE IMG_UINT64_C(0x0040000000) + + /* HWBRN65273 workaround requires General Heap to use a unique single 1GB PCE entry. */ + #define RGX_GENERAL_BRN_65273_HEAP_BASE IMG_UINT64_C(0x65C0000000) + #define RGX_GENERAL_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0080000000) + + /* Start at 768GiB. Size of 64 GiB */ + #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xC000000000) + #define RGX_GENERAL_NON4K_HEAP_SIZE IMG_UINT64_C(0x1000000000) + + /* HWBRN65273 workaround requires Non4K memory to use a unique single 1GB PCE entry. */ + #define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE IMG_UINT64_C(0x73C0000000) + #define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0080000000) + + /* Start at 832 GiB. Size of 32 GiB */ + #define RGX_BIF_TILING_NUM_HEAPS 4 + #define RGX_BIF_TILING_HEAP_SIZE IMG_UINT64_C(0x0200000000) + #define RGX_BIF_TILING_HEAP_1_BASE IMG_UINT64_C(0xD000000000) + #define RGX_BIF_TILING_HEAP_2_BASE (RGX_BIF_TILING_HEAP_1_BASE + RGX_BIF_TILING_HEAP_SIZE) + #define RGX_BIF_TILING_HEAP_3_BASE (RGX_BIF_TILING_HEAP_2_BASE + RGX_BIF_TILING_HEAP_SIZE) + #define RGX_BIF_TILING_HEAP_4_BASE (RGX_BIF_TILING_HEAP_3_BASE + RGX_BIF_TILING_HEAP_SIZE) + + /* Start at 872 GiB. Size of 4 GiB */ + #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000) + #define RGX_PDSCODEDATA_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* HWBRN65273 workaround requires PDS memory to use a unique single 1GB PCE entry. */ + #define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xA800000000) + #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000) + + /* HWBRN63142 workaround requires Region Header memory to be at the top + of a 16GB aligned range. This is so when masked with 0x03FFFFFFFF the + address will avoid aliasing PB addresses. Start at 879.75GB. Size of 256MB. */ + #define RGX_RGNHDR_BRN_63142_HEAP_BASE IMG_UINT64_C(0xDBF0000000) + #define RGX_RGNHDR_BRN_63142_HEAP_SIZE IMG_UINT64_C(0x0010000000) + + /* Start at 880 GiB, Size of 1 MiB */ + #define RGX_VISTEST_HEAP_BASE IMG_UINT64_C(0xDC00000000) + #define RGX_VISTEST_HEAP_SIZE IMG_UINT64_C(0x0000100000) + + /* HWBRN65273 workaround requires VisTest memory to use a unique single 1GB PCE entry. */ + #define RGX_VISTEST_BRN_65273_HEAP_BASE IMG_UINT64_C(0xE400000000) + #define RGX_VISTEST_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0000100000) + + /* Start at 896 GiB Size of 4 GiB */ + #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000) + #define RGX_USCCODE_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* HWBRN65273 workaround requires USC memory to use a unique single 1GB PCE entry. */ + #define RGX_USCCODE_BRN_65273_HEAP_BASE IMG_UINT64_C(0xBA00000000) + #define RGX_USCCODE_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000) + + /* Start at 903GiB. Firmware heaps defined in rgxdefs_km.h + RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_HOST_MAIN_HEAP_BASE + RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE + RGX_FIRMWARE_MAIN_HEAP_SIZE + RGX_FIRMWARE_CONFIG_HEAP_SIZE + RGX_FIRMWARE_RAW_HEAP_SIZE */ + + /* HWBRN65273 workaround requires TQ memory to start at 64kB and use a unique single 0.99GB PCE entry. */ + #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE IMG_UINT64_C(0x0000010000) + #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x003FFF0000) + + /* Start at 912GiB. Size of 16 GiB. 16GB aligned to match RGX_CR_ISP_PIXEL_BASE */ + #define RGX_TQ3DPARAMETERS_HEAP_BASE IMG_UINT64_C(0xE400000000) + #define RGX_TQ3DPARAMETERS_HEAP_SIZE IMG_UINT64_C(0x0400000000) + + /* Start at 928GiB. Size of 4 GiB */ + #define RGX_DOPPLER_HEAP_BASE IMG_UINT64_C(0xE800000000) + #define RGX_DOPPLER_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* Start at 932GiB. Size of 4 GiB */ + #define RGX_DOPPLER_OVERFLOW_HEAP_BASE IMG_UINT64_C(0xE900000000) + #define RGX_DOPPLER_OVERFLOW_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* CDM Signals heap (31 signals less one reserved for Services). Start at 936GiB, 960bytes rounded up to 4K */ + #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) + #define RGX_SIGNALS_HEAP_SIZE IMG_UINT64_C(0x0000001000) + + /* TDM TPU YUV coeffs - can be reduced to a single page */ + #define RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE IMG_UINT64_C(0xEA00080000) + #define RGX_TDM_TPU_YUV_COEFFS_HEAP_SIZE IMG_UINT64_C(0x0000040000) + + /* HWBRN65273 workaround requires two Region Header buffers 4GB apart. */ + #define RGX_MMU_INIA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF800000000) + #define RGX_MMU_INIA_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000) + #define RGX_MMU_INIB_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF900000000) + #define RGX_MMU_INIB_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000) + + /* Heaps which are barred from using the reserved-region feature (intended for clients + of Services), but need the macro definitions are buried here */ + #define RGX_GENERAL_SVM_HEAP_RESERVED_SIZE 0 /* SVM heap is exclusively managed by USER or KERNEL */ + #define RGX_GENERAL_NON4K_HEAP_RESERVED_SIZE 0 /* Non-4K can have page sizes up to 2MB, which is currently + not supported in reserved-heap implementation */ + /* ... and heaps which are not used outside of Services */ + #define RGX_RGNHDR_BRN_63142_HEAP_RESERVED_SIZE 0 + #define RGX_MMU_INIA_BRN_65273_HEAP_RESERVED_SIZE 0 + #define RGX_MMU_INIB_BRN_65273_HEAP_RESERVED_SIZE 0 + #define RGX_TQ3DPARAMETERS_HEAP_RESERVED_SIZE 0 + #define RGX_DOPPLER_HEAP_RESERVED_SIZE 0 + #define RGX_DOPPLER_OVERFLOW_HEAP_RESERVED_SIZE 0 + #define RGX_SERVICES_SIGNALS_HEAP_RESERVED_SIZE 0 + #define RGX_TDM_TPU_YUV_COEFFS_HEAP_RESERVED_SIZE 0 + +#endif /* RGXHEAPCONFIG_H */ + +/****************************************************************************** + End of file (rgxheapconfig.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal.h b/drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal.h new file mode 100644 index 000000000000..14c2258d2a0c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal.h @@ -0,0 +1,270 @@ +/*************************************************************************/ /*! +@File +@Title Services internal synchronisation checkpoint interface header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines the internal server interface for services + synchronisation checkpoints. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SYNC_CHECKPOINT__ +#define __SYNC_CHECKPOINT__ + +#include "img_types.h" +#include "opaque_types.h" +#include "sync_checkpoint_internal_fw.h" +#include "sync_checkpoint_external.h" +#include "sync_checkpoint.h" +#include "ra.h" +#include "dllist.h" +#include "lock.h" +#include "devicemem.h" +#include "rgx_fwif_shared.h" + +struct SYNC_CHECKPOINT_RECORD; + +/* + Private structures +*/ + +typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL, *_PSYNC_CHECKPOINT_CONTEXT_CTL; + +typedef struct _SYNC_CHECKPOINT_CONTEXT_ +{ + PPVRSRV_DEVICE_NODE psDevNode; + IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the RA */ + RA_ARENA *psSubAllocRA; /*!< RA context */ + IMG_CHAR azSpanName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the span RA */ + RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ + ATOMIC_T hRefCount; /*!< Ref count for this context */ + ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */ + POS_LOCK hLock; + _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl; +#if defined(PDUMP) + DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/ + POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/ + DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/ +#endif +} _SYNC_CHECKPOINT_CONTEXT; + +typedef struct _SYNC_CHECKPOINT_BLOCK_ +{ + ATOMIC_T hRefCount; /*!< Ref count for this sync block */ + POS_LOCK hLock; + _SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */ + PPVRSRV_DEVICE_NODE psDevNode; + IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */ + IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ + DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */ + volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */ + IMG_UINT64 uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */ +#if defined(PDUMP) + DLLIST_NODE sListNode; /*!< List node for the sync chkpt blocks */ +#endif +} SYNC_CHECKPOINT_BLOCK; + +typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE; + +typedef struct _SYNC_CHECKPOINT_ +{ + //_SYNC_CHECKPOINT_CONTEXT *psContext; /*!< pointer to the parent context of this checkpoint */ + /* A sync checkpoint is assigned a unique ID, to avoid any confusion should + * the same memory be re-used later for a different checkpoint + */ + IMG_UINT32 ui32UID; /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/ + ATOMIC_T hRefCount; /*!< Ref count for this sync */ + ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */ + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */ + IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ + volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */ + PRGXFWIF_UFO_ADDR sCheckpointUFOAddr; /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */ + IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */ + PVRSRV_TIMELINE hTimeline; /*!< Timeline on which this sync checkpoint was created */ + IMG_UINT32 ui32ValidationCheck; + IMG_PID uiProcess; /*!< The Process ID of the process which created this sync checkpoint */ + PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */ + DLLIST_NODE sListNode; /*!< List node for the global sync chkpt list */ + DLLIST_NODE sDeferredFreeListNode; /*!< List node for the deferred free sync chkpt list */ + IMG_UINT32 ui32FWAddr; /*!< FWAddr stored at sync checkpoint alloc time */ + PDUMP_FLAGS_T ui32PDumpFlags; /*!< Pdump Capture mode to be used for POL*/ +} _SYNC_CHECKPOINT; + + +typedef struct _SYNC_CHECKPOINT_SIGNAL_ +{ + _SYNC_CHECKPOINT asSyncCheckpoint; /*!< Store sync checkpt for deferred signal */ + IMG_UINT32 ui32Status; /*!< sync checkpt status signal/errored */ +} _SYNC_CHECKPOINT_DEFERRED_SIGNAL; + +#define GET_CP_CB_NEXT_IDX(_curridx) (((_curridx) + 1) % SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL) +#define GET_CP_CB_BASE(_idx) (IMG_OFFSET_ADDR(psDevNode->pui8DeferredSyncCPSignal, \ + ((_idx) * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)))) + + +/*************************************************************************/ /*! +@Function SyncCheckpointGetFirmwareAddr + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the firmware address of + +@Return The firmware address of the sync checkpoint + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointCCBEnqueued + +@Description Increment the CCB enqueued reference count for a + synchronisation checkpoint. This indicates how many FW + operations (checks/update) have been placed into CCBs for the + sync checkpoint. + When the FW services these operation, it increments its own + reference count. When these two values are equal, we know + there are not outstanding FW operating for the checkpoint + in any CCB. + +@Input psSyncCheckpoint Synchronisation checkpoint for which + to increment the enqueued reference + count + +@Return None + +*/ +/*****************************************************************************/ +void +SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetEnqueuedCount + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the enqueued count of + +@Return The enqueued count of the sync checkpoint + (i.e. the number of FW operations (checks or updates) + currently enqueued in CCBs for the sync checkpoint) + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetReferenceCount + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the reference count of + +@Return The host reference count of the sync checkpoint + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetCreator + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the creating process of + +@Return The process id of the process which created this sync checkpoint. + +*/ +/*****************************************************************************/ +IMG_PID +SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetId + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the unique Id of + +@Return The unique Id of the sync checkpoint + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetTimeline + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the parent timeline of + +@Return The parent timeline of the sync checkpoint + +*/ +/*****************************************************************************/ +PVRSRV_TIMELINE +SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetRGXFWIFUFOAddr + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the PRGXFWIF_UFO_ADDR of + +@Return The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when + providing the update in server kick code. + +*/ +/*****************************************************************************/ +PRGXFWIF_UFO_ADDR* +SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint); + +#endif /* __SYNC_CHECKPOINT__ */ diff --git a/drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal_fw.h b/drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal_fw.h new file mode 100644 index 000000000000..49df0cac2d4e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/sync_checkpoint_internal_fw.h @@ -0,0 +1,63 @@ +/*************************************************************************/ /*! +@File +@Title Services internal synchronisation checkpoint FW obj header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines the internal FW object structure for services + synchronisation checkpoints. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_CHECKPOINT_INTERNAL_FW_H +#define SYNC_CHECKPOINT_INTERNAL_FW_H + +#include "img_types.h" + +/* Sync_checkpoint firmware object. + * This is the FW-addressable structure use to hold the sync checkpoint's + * state and other information which needs to be accessed by the firmware. + */ +typedef struct +{ + IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ + IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ +} SYNC_CHECKPOINT_FW_OBJ; + +/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ +#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) + +#endif /* SYNC_CHECKPOINT_INTERNAL_FW_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/volcanic/km_apphint_defs.h b/drivers/mcst/gpu-imgtec/services/include/volcanic/km_apphint_defs.h new file mode 100644 index 000000000000..0fa98a4e64f8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/volcanic/km_apphint_defs.h @@ -0,0 +1,165 @@ +/*************************************************************************/ /*! +@File +@Title Services AppHint definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "km_apphint_defs_common.h" + +#ifndef KM_APPHINT_DEFS_H +#define KM_APPHINT_DEFS_H + +/* NB: The 'DEVICE' AppHints must be last in this list as they will be + * duplicated in the case of a driver supporting multiple devices + */ +#define APPHINT_LIST_ALL \ + APPHINT_LIST_BUILDVAR_COMMON \ + APPHINT_LIST_BUILDVAR \ + APPHINT_LIST_MODPARAM_COMMON \ + APPHINT_LIST_MODPARAM \ + APPHINT_LIST_DEBUGFS_COMMON \ + APPHINT_LIST_DEBUGFS \ + APPHINT_LIST_DEBUGFS_DEVICE_COMMON \ + APPHINT_LIST_DEBUGFS_DEVICE + + +/* +******************************************************************************* + Build variables (volcanic-specific) + All of these should be configurable only through the 'default' value +******************************************************************************/ +#define APPHINT_LIST_BUILDVAR + +/* +******************************************************************************* + Module parameters (volcanic-specific) +******************************************************************************/ +#define APPHINT_LIST_MODPARAM \ +/* name, type, class, default, helper, */ \ +X(FabricCoherencyOverride, UINT32, ALWAYS, PVRSRV_APPHINT_FABRICCOHERENCYOVERRIDE, NO_PARAM_TABLE ) \ +\ +X(EnableGenericDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLEDMKILLINGRANDMODE, NO_PARAM_TABLE ) \ +X(KillingCtl, UINT32, VALIDATION, PVRSRV_APPHINT_KILLINGCTL, NO_PARAM_TABLE ) \ +X(HWValEnableSPUPowerMaskChange, BOOL, VALIDATION, PVRSRV_APPHINT_HWVALENABLESPUPOWERMASKCHANGE, NO_PARAM_TABLE ) \ +X(HWValAvailableSPUMask, UINT32, VALIDATION, PVRSRV_APPHINT_HWVALAVAILABLESPUMASK, NO_PARAM_TABLE ) \ +\ +X(HWPerfDisableCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER, NO_PARAM_TABLE ) \ +\ +X(ISPSchedulingLatencyMode, UINT32, ALWAYS, PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE, NO_PARAM_TABLE ) \ +X(ValidateSOCUSCTimer, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATESOCUSCTIMERS, NO_PARAM_TABLE ) \ +\ +X(USRMNumRegionsVDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsCDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsDDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsPDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsTDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +\ +X(UVBRMNumRegionsVDM, UINT64, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(UVBRMNumRegionsDDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +\ +X(CDMArbitrationOverride, UINT32, ALWAYS, PVRSRV_APPHINT_CDMARBITRATIONOVERRIDE, NO_PARAM_TABLE ) \ +\ +X(DualLockstepFWProcessor, BOOL, VALIDATION, 1, NO_PARAM_TABLE ) \ +X(EnablePollOnChecksumErrorStatus, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ + +/* +******************************************************************************* + Debugfs parameters (volcanic-specific) - driver configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGFS \ +/* name, type, class, default, helper, */ \ + +/* +******************************************************************************* + Debugfs parameters (volcanic-specific) - device configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGFS_DEVICE \ +/* name, type, class, default, helper, */ \ + +/* +******************************************************************************* + + Table generated enums + +******************************************************************************/ +/* Unique ID for all AppHints */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_ID_ ## a, + APPHINT_LIST_ALL +#undef X + APPHINT_ID_MAX +} APPHINT_ID; + +/* ID for build variable Apphints - used for build variable only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a, + APPHINT_LIST_BUILDVAR_COMMON + APPHINT_LIST_BUILDVAR +#undef X + APPHINT_BUILDVAR_ID_MAX +} APPHINT_BUILDVAR_ID; + +/* ID for Modparam Apphints - used for modparam only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a, + APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM +#undef X + APPHINT_MODPARAM_ID_MAX +} APPHINT_MODPARAM_ID; + +/* ID for Debugfs Apphints - used for debugfs only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_DEBUGFS_ID_ ## a, + APPHINT_LIST_DEBUGFS_COMMON + APPHINT_LIST_DEBUGFS +#undef X + APPHINT_DEBUGFS_ID_MAX +} APPHINT_DEBUGFS_ID; + +/* ID for Debugfs Device Apphints - used for debugfs device only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_DEBUGFS_DEVICE_ID_ ## a, + APPHINT_LIST_DEBUGFS_DEVICE_COMMON + APPHINT_LIST_DEBUGFS_DEVICE +#undef X + APPHINT_DEBUGFS_DEVICE_ID_MAX +} APPHINT_DEBUGFS_DEVICE_ID; + +#endif /* KM_APPHINT_DEFS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_hwperf.h b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_hwperf.h new file mode 100644 index 000000000000..703b0edc0866 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_hwperf.h @@ -0,0 +1,119 @@ +/*************************************************************************/ /*! +@File rgx_fwif_hwperf.h +@Title RGX HWPerf support +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Shared header between RGX firmware and Init process +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_FWIF_HWPERF_H +#define RGX_FWIF_HWPERF_H + +#include "rgx_fwif_shared.h" +#include "rgx_hwperf.h" +#include "rgxdefs_km.h" + +/* Server and Firmware definitions only */ + +/*! The number of HWPerf blocks in the GPU */ + +#if defined(RGX_FIRMWARE) +#define RGX_HWPERF_NUM_SPU ((RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_USC ((RGX_FEATURE_NUM_CLUSTERS)) +#define RGX_HWPERF_NUM_ISP_PER_SPU ((RGX_FEATURE_NUM_ISP_PER_SPU)) +#define RGX_HWPERF_NUM_PBE ((RGX_FEATURE_PBE_PER_SPU) * (RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_MERCER ((RGX_FEATURE_NUM_CLUSTERS)) +#define RGX_HWPERF_NUM_PBE_SHARED ((RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_SWIFT ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU)) +#define RGX_HWPERF_NUM_TEXAS ((RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_TPU ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU)) +#define RGX_HWPERF_NUM_ISP ((RGX_FEATURE_NUM_CLUSTERS)) + +#define RGX_CNTBLK_INDIRECT_COUNT(_class) ((RGX_HWPERF_NUM_ ## _class)) + +/*! The number of layout blocks defined with configurable + * performance counters. Compile time constants. + * This is for the Series 8XT+ layout. + */ +#define RGX_HWPERF_MAX_DEFINED_BLKS (\ + (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ + RGX_CNTBLK_INDIRECT_COUNT(ISP) +\ + RGX_CNTBLK_INDIRECT_COUNT(MERCER) +\ + RGX_CNTBLK_INDIRECT_COUNT(PBE) +\ + RGX_CNTBLK_INDIRECT_COUNT(PBE_SHARED) +\ + RGX_CNTBLK_INDIRECT_COUNT(USC) +\ + RGX_CNTBLK_INDIRECT_COUNT(TPU) +\ + RGX_CNTBLK_INDIRECT_COUNT(SWIFT) +\ + RGX_CNTBLK_INDIRECT_COUNT(TEXAS)) + +#endif /* RGX_FIRMWARE */ + +/*****************************************************************************/ + +/* Structure used in the FW's global control data to hold the performance + * counters provisioned for a given block. */ +typedef struct +{ + IMG_UINT32 uiBlockID; + IMG_UINT32 uiNumCounters; // Number of counters held + // in aui32CounterCfg + // [0..RGX_CNTBLK_COUNTERS_MAX) + IMG_UINT32 uiEnabled; // 1 => enabled, 0=> disabled + RGXFWIF_DEV_VIRTADDR psModel; // link to model table for uiBlockID + IMG_UINT32 aui32CounterCfg[RGX_CNTBLK_COUNTERS_MAX]; +} RGXFWIF_HWPERF_CTL_BLK; + + +/*! + ***************************************************************************** + * Structure used in the FW's global RGXFW_CTL store, holding HWPerf counter + * block configuration. It is written to by the Server on FW initialisation + * (PDUMP=1) and by the FW BG kCCB command processing code. It is read by + * the FW IRQ register programming and HWPerf event generation routines. + * Size of the sBlkCfg[] array must be consistent between KM/UM and FW. + * FW will ASSERT if the sizes are different + * (ui32NumBlocks != RGX_HWPERF_MAX_DEFINED_BLKS) + ****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32Reserved; + IMG_UINT32 ui32CtrlWord; + IMG_UINT32 ui32EnabledBlksCount; + IMG_UINT32 ui32NumBlocks; + RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[1]; // First array entry +} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL; +#endif diff --git a/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_km.h b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_km.h new file mode 100644 index 000000000000..27f16a0a1897 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_fwif_km.h @@ -0,0 +1,2166 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware interface structures used by pvrsrvkm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware interface structures used by pvrsrvkm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_KM_H) +#define RGX_FWIF_KM_H + +#include "img_types.h" +#include "rgx_fwif_shared.h" +#include "rgxdefs_km.h" +#include "dllist.h" +#include "rgx_hwperf.h" +#include "rgx_hw_errors.h" + + +/*************************************************************************/ /*! + Logging type +*/ /**************************************************************************/ +#define RGXFWIF_LOG_TYPE_NONE 0x00000000U +#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U +#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U +#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U +#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U +#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U +#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U +#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U +#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U +#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U +#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U +#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U +#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U +#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U +#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U +#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U +#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U +#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU +#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU + +/* String used in pvrdebug -h output */ +#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" + +/* Table entry to map log group strings to log type value */ +typedef struct { + const IMG_CHAR* pszLogGroupName; + IMG_UINT32 ui32LogGroupType; +} RGXFWIF_LOG_GROUP_MAP_ENTRY; + +/* + Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup + table where needed. Keep log group names short, no more than 20 chars. +*/ +#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ + { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ + { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ + { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ + { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ + { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ + { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ + { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ + { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ + { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ + { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ + { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ + { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ + { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ + { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ + { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } + + +/* Used in print statements to display log group state, one %s per group defined */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" + +/* Used in a print statement to display log group state, one per group */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) ?("misc ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :("")) + + +/************************************************************************ +* RGX FW signature checks +************************************************************************/ +#define RGXFW_SIG_BUFFER_SIZE_MIN (8192) + +/*! + ****************************************************************************** + * HWPERF + *****************************************************************************/ +/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the + * Firmware and host driver. */ +#define RGXFW_HWPERF_L1_SIZE_MIN (16U) +#define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB +#define RGXFW_HWPERF_L1_SIZE_MAX (12288U) + +/* This padding value must always be large enough to hold the biggest + * variable sized packet. */ +#define RGXFW_HWPERF_L1_PADDING_DEFAULT (RGX_HWPERF_MAX_PACKET_SIZE) + + +#define RGXFWIF_TIMEDIFF_ID ((0x1 << 28) | RGX_CR_TIMER) + +/*! + ****************************************************************************** + * Trace Buffer + *****************************************************************************/ + +/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ +#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U +#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) +#define RGXFW_THREAD_NUM 2U +#else +#define RGXFW_THREAD_NUM 1U +#endif + +#define RGXFW_POLL_TYPE_SET 0x80000000U + +typedef struct +{ + IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_UINT32 ui32LineNum; +} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; + +typedef struct +{ + IMG_UINT32 ui32TracePointer; + +#if defined(RGX_FIRMWARE) + IMG_UINT32 *pui32RGXFWIfTraceBuffer; /* To be used by firmware for writing into trace buffer */ +#else + RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; +#endif + IMG_PUINT32 pui32TraceBuffer; /* To be used by host when reading from trace buffer */ + + RGXFWIF_FILE_INFO_BUF sAssertBuf; +} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; + +#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; + IMG_UINT32 RGXFW_ALIGN ui32Data; + IMG_UINT32 ui32Reserved; + RGXFWIF_FILE_INFO_BUF sFaultBuf; +} UNCACHED_ALIGN RGX_FWFAULTINFO; + + +#define RGXFWIF_POW_STATES \ + X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ + X(RGXFWIF_POW_ON) /* running HW commands */ \ + X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ + X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ + +typedef enum +{ +#define X(NAME) NAME, + RGXFWIF_POW_STATES +#undef X +} RGXFWIF_POW_STATE; + +/* Firmware HWR states */ +#define RGXFWIF_HWR_HARDWARE_OK (0x1U << 0U) /*!< The HW state is ok or locked up */ +#define RGXFWIF_HWR_RESET_IN_PROGRESS (0x1U << 1U) /*!< Tells if a HWR reset is in progress */ +#define RGXFWIF_HWR_ANALYSIS_DONE (0x1U << 2U) /*!< The analysis of a GPU lockup has been performed */ +#define RGXFWIF_HWR_GENERAL_LOCKUP (0x1U << 3U) /*!< A DM unrelated lockup has been detected */ +#define RGXFWIF_HWR_DM_RUNNING_OK (0x1U << 4U) /*!< At least one DM is running without being close to a lockup */ +#define RGXFWIF_HWR_DM_STALLING (0x1U << 5U) /*!< At least one DM is close to lockup */ +#define RGXFWIF_HWR_FW_FAULT (0x1U << 6U) /*!< The FW has faulted and needs to restart */ +#define RGXFWIF_HWR_RESTART_REQUESTED (0x1U << 7U) /*!< The FW has requested the host to restart it */ + +#define RGXFWIF_PHR_STATE_SHIFT (8U) +#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ +#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ +#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) + +typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; + +/* Firmware per-DM HWR states */ +#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ +#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ +#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */ +#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */ +#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */ +#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */ +#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */ +#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */ +#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ +#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ + +/* Firmware's connection state */ +typedef enum +{ + RGXFW_CONNECTION_FW_OFFLINE = 0, /*!< Firmware is offline */ + RGXFW_CONNECTION_FW_READY, /*!< Firmware is initialised */ + RGXFW_CONNECTION_FW_ACTIVE, /*!< Firmware connection is fully established */ + RGXFW_CONNECTION_FW_OFFLOADING, /*!< Firmware is clearing up connection data */ + RGXFW_CONNECTION_FW_STATE_COUNT +} RGXFWIF_CONNECTION_FW_STATE; + +/* OS' connection state */ +typedef enum +{ + RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */ + RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */ + RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */ + RGXFW_CONNECTION_OS_STATE_COUNT +} RGXFWIF_CONNECTION_OS_STATE; + +typedef struct +{ + IMG_UINT bfOsState : 3; + IMG_UINT bfFLOk : 1; + IMG_UINT bfFLGrowPending : 1; + IMG_UINT bfIsolatedOS : 1; + IMG_UINT bfReserved : 26; +} RGXFWIF_OS_RUNTIME_FLAGS; + +typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; + +#if defined(PVRSRV_STALLED_CCB_ACTION) +#define PVR_SLR_LOG_ENTRIES 10 +#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64Timestamp; + IMG_UINT32 ui32FWCtxAddr; + IMG_UINT32 ui32NumUFOs; + IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; +} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; +#endif + +/* firmware trace control data */ +typedef struct +{ + IMG_UINT32 ui32LogType; + RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; + IMG_UINT32 ui32TraceBufSizeInDWords; /*!< Member initialised only when sTraceBuf is actually allocated + * (in RGXTraceBufferInitOnDemandResources) */ + IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_TRACEBUF; + +/* firmware system data shared with the Host driver */ +typedef struct +{ + IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ + IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ + volatile RGXFWIF_POW_STATE ePowState; + volatile IMG_UINT32 ui32HWPerfRIdx; + volatile IMG_UINT32 ui32HWPerfWIdx; + volatile IMG_UINT32 ui32HWPerfWrapCount; + IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ + IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ + + /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with + * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ + IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ + IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ + IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ + volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ + RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ + IMG_UINT32 aui32OSidPrioMirror[RGXFW_MAX_NUM_OS]; /*!< Priority for each Operating System mirrored from Fw coremem */ + IMG_UINT32 ui32PHRModeMirror; /*!< Periodic Hardware Reset Mode mirrored from Fw coremem */ + RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; + IMG_UINT32 ui32FWFaults; + IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; + IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; + IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; + IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; +#if defined(SUPPORT_POWMON_COMPONENT) +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) + RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; + IMG_UINT32 ui32PowerMonBufSizeInDWords; +#endif + IMG_UINT32 ui32PowMonEnergy; /*!< Non-volatile power monitoring results: + * static power (by default) + * energy count (PVR_POWER_MONITOR_DYNAMIC_ENERGY) */ +#endif + +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32KillingCtl; /*!< DM Killing Configuration from host */ +#endif +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) +#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) +#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) + IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; +#endif + RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; + RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_SYSDATA; + +/* per-os firmware shared data */ +typedef struct +{ + IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ + IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ + IMG_UINT32 ui32HostSyncCheckMark; +#if defined(PVRSRV_STALLED_CCB_ACTION) + IMG_UINT32 ui32ForcedUpdatesRequested; + IMG_UINT8 ui8SLRLogWp; + RGXFWIF_SLR_ENTRY sSLRLogFirst; + RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; + IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; +#endif + IMG_UINT32 ui32KCCBCmdsExecuted; + RGXFWIF_DEV_VIRTADDR sPowerSync; + IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_OSDATA; + +/* Firmware trace time-stamp field breakup */ + +/* RGX_CR_TIMER register read (48 bits) value*/ +#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) +#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + +/* Extra debug-info (16 bits) */ +#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) +#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK + + +/* Debug-info sub-fields */ +/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (IMG_UINT16_C(1) << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) + +/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (IMG_UINT16_C(1) << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) + +/* Bit 2-15: Unused bits */ + +#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 +#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " +#define RGXFWT_DEBUG_INFO_STR_APPEND ")" + +/* Table of debug info sub-field's masks and corresponding message strings + * to be appended to firmware trace + * + * Mask : 16 bit mask to be applied to debug-info field + * String : debug info message string + */ + +#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ +/*Mask, String*/ \ +X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ +X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") + +/*! + ****************************************************************************** + * HWR Data + *****************************************************************************/ +typedef enum +{ + RGX_HWRTYPE_UNKNOWNFAILURE = 0, + RGX_HWRTYPE_OVERRUN = 1, + RGX_HWRTYPE_POLLFAILURE = 2, + RGX_HWRTYPE_BIF0FAULT = 3, + RGX_HWRTYPE_BIF1FAULT = 4, + RGX_HWRTYPE_TEXASBIF0FAULT = 5, + RGX_HWRTYPE_MMUFAULT = 6, + RGX_HWRTYPE_MMUMETAFAULT = 7, + RGX_HWRTYPE_MIPSTLBFAULT = 8, + RGX_HWRTYPE_ECCFAULT = 9 +} RGX_HWRTYPE; + +#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) + +#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT || \ + eHWRType == RGX_HWRTYPE_BIF1FAULT || \ + eHWRType == RGX_HWRTYPE_TEXASBIF0FAULT || \ + eHWRType == RGX_HWRTYPE_MMUFAULT || \ + eHWRType == RGX_HWRTYPE_MMUMETAFAULT || \ + eHWRType == RGX_HWRTYPE_MIPSTLBFAULT) ? 1 : 0) + +typedef struct +{ + IMG_UINT32 ui32FaultGPU; + IMG_UINT32 ui32FaultFW; +} RGX_ECCINFO; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_MMUINFO; + +typedef struct +{ + IMG_UINT32 ui32ThreadNum; + IMG_UINT32 ui32CrPollAddr; + IMG_UINT32 ui32CrPollMask; + IMG_UINT32 ui32CrPollLastValue; + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} UNCACHED_ALIGN RGX_POLLINFO; + +typedef struct +{ + union + { + RGX_MMUINFO sMMUInfo; + RGX_POLLINFO sPollInfo; + RGX_ECCINFO sECCInfo; + } uHWRData; + + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; + IMG_UINT32 ui32FrameNum; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32ActiveHWRTData; + IMG_UINT32 ui32HWRNumber; + IMG_UINT32 ui32EventStatus; + IMG_UINT32 ui32HWRRecoveryFlags; + RGX_HWRTYPE eHWRType; + RGXFWIF_DM eDM; + RGX_HW_ERR eHWErrorCode; /*!< Error code used to determine HW fault */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; + IMG_UINT64 RGXFW_ALIGN ui64Reserved; /*!< Pad to 16 64-bit words */ +} UNCACHED_ALIGN RGX_HWRINFO; + +#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ +#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ + +typedef struct +{ + RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; + IMG_UINT32 ui32HwrCounter; + IMG_UINT32 ui32WriteIndex; + IMG_UINT32 ui32DDReqCount; + IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ + IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_DEFAULT_MAX]; +} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; + +#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (1U) +#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2U) +#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (3U) +#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (4U) + +#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (0x1) +#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (0x2) + +#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (0x1) +#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (0x2) +/*! + ****************************************************************************** + * RGX firmware Init Config Data + * NOTE: Please be careful to keep backwards compatibility with DDKv1 for the + * CTXSWITCH controls. + *****************************************************************************/ + +/* Flag definitions affecting the firmware globally */ +#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (0x1 << 0) /*!< Randomise context switch requests */ +#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (0x1 << 1) +#define RGXFWIF_INICFG_HWPERF_EN (0x1 << 2) +#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (0x1 << 3) /*!< Randomise DM-killing requests */ +#define RGXFWIF_INICFG_POW_RASCALDUST (0x1 << 4) +#define RGXFWIF_INICFG_HWR_EN (0x1 << 5) +#define RGXFWIF_INICFG_FBCDC_V3_1_EN (0x1 << 6) +#define RGXFWIF_INICFG_CHECK_MLIST_EN (0x1 << 7) +#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (0x1 << 8) +#define RGXFWIF_INICFG_POLL_COUNTERS_EN (0x1 << 9) +/* 10 unused */ +/* 11 unused */ +#define RGXFWIF_INICFG_REGCONFIG_EN (0x1 << 12) +#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (0x1 << 13) +#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (0x1 << 14) +#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN (0x1 << 15) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (0x1 << 19) +#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (0x1 << 20) +#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (0x1 << 21) +#define RGXFWIF_INICFG_VALIDATE_IRQ (0x1 << 22) +#define RGXFWIF_INICFG_DISABLE_PDP_EN (0x1 << 23) +#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (0x1 << 24) +#define RGXFWIF_INICFG_WORKEST (0x1 << 25) +#define RGXFWIF_INICFG_PDVFS (0x1 << 26) +#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) +#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) +#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ + RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) +#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (0x1 << 31) +#define RGXFWIF_INICFG_ALL (0xFFFFF3FFU) + +/* Extended Flag definitions affecting the firmware globally */ +#define RGXFWIF_INICFG_EXT_ALL (0x0U) + +#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ + RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) + +/* Flag definitions affecting only workloads submitted by a particular OS */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (0x1 << 0) +#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (0x1 << 1) /*!< Enables GEOM-TA and GEOM-SHG context switch */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (0x1 << 2) +#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (0x1 << 3) +/* #define RGXFWIF_INICFG_OS_CTXSWITCH_RTU_EN_DEPRECATED (0x1 << 4) !< Used for RTU DM-kill only. The RTU does not context switch */ + +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (0x1 << 4) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (0x1 << 5) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (0x1 << 6) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (0x1 << 7) + +#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN) + +#define RGXFWIF_INICFG_OS_ALL (0xFFU) + +#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) + +#define RGXFWIF_FILTCFG_TRUNCATE_HALF (0x1U << 3) +#define RGXFWIF_FILTCFG_TRUNCATE_INT (0x1U << 2) +#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (0x1U << 1) + +typedef enum +{ + RGX_ACTIVEPM_FORCE_OFF = 0, + RGX_ACTIVEPM_FORCE_ON = 1, + RGX_ACTIVEPM_DEFAULT = 2 +} RGX_ACTIVEPM_CONF; + +typedef enum +{ + RGX_RD_POWER_ISLAND_FORCE_OFF = 0, + RGX_RD_POWER_ISLAND_FORCE_ON = 1, + RGX_RD_POWER_ISLAND_DEFAULT = 2 +} RGX_RD_POWER_ISLAND_CONF; + +/*! + ****************************************************************************** + * Querying DM state + *****************************************************************************/ + +typedef struct +{ + IMG_UINT16 ui16RegNum; /*!< Register number */ + IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ + IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ + IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ +} RGXFW_REGISTER_LIST; + + +#if defined(RGX_FIRMWARE) +typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; +#else +typedef struct {RGXFWIF_DEV_VIRTADDR p; + RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; +#endif + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA; +#if defined(SUPPORT_TBI_INTERFACE) +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; +#endif +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; + +/*! + * This number is used to represent an invalid page catalogue physical address + */ +#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU + +/*! + * This number is used to represent unallocated page catalog base register + */ +#define RGXFW_BIF_INVALID_PCREG 0xFFFFFFFFU + +/*! + Firmware memory context. +*/ +typedef struct +{ + IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ + IMG_UINT32 uiPageCatBaseRegID; /*!< associated page catalog base register (RGXFW_BIF_INVALID_PCREG == unallocated) */ + IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */ + IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ + IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ + IMG_UINT64 RGXFW_ALIGN ui64FBCStateIDMask; /*!< FBCDC state descriptor IDs (non-zero means defer on mem context activation) */ + IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 ui32OSid; + IMG_BOOL bOSidAxiProt; +#endif + +} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; + +/*! + * FW context state flags + */ +#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) +#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000002U) + +typedef struct +{ + /* FW-accessible TA state which must be written out to memory on context store */ + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_CMD0; + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_CMD1; + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW0; + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW1; + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_WRITE; + IMG_UINT32 RGXFW_ALIGN uTAReg_GTA_SO_PRIM[4]; + IMG_UINT16 ui16TACurrentIdx; +} UNCACHED_ALIGN RGXFWIF_TACTX_STATE; + +/* The following defines need to be auto generated using the HW defines + * rather than hard coding it */ +#define RGXFWIF_ISP_PIPE_COUNT_MAX (20) +#define RGXFWIF_PIPE_COUNT_PER_ISP (2) +#define RGXFWIF_IPP_RESUME_REG_COUNT (1) + +#if !defined(__KERNEL__) +#define RGXFWIF_ISP_COUNT (RGX_FEATURE_NUM_SPU * RGX_FEATURE_NUM_ISP_PER_SPU) +#define RGXFWIF_ISP_PIPE_COUNT (RGXFWIF_ISP_COUNT * RGXFWIF_PIPE_COUNT_PER_ISP) +#if RGXFWIF_ISP_PIPE_COUNT > RGXFWIF_ISP_PIPE_COUNT_MAX +#error RGXFWIF_ISP_PIPE_COUNT should not be greater than RGXFWIF_ISP_PIPE_COUNT_MAX +#endif +#endif /* !defined(__KERNEL__) */ + +typedef struct +{ +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT64 RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< Managed by PM HW in the non-interactive mode */ +#endif + IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */ + + /* FW-accessible ISP state which must be written out to memory on context store */ + /* au3DReg_ISP_STORE should be the last element of the structure + * as this is an array whose size is determined at runtime + * after detecting the RGX core */ + IMG_UINT64 RGXFW_ALIGN au3DReg_ISP_STORE[]; +} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE; + +#define RGXFWIF_CTX_USING_BUFFER_A (0) +#define RGXFWIF_CTX_USING_BUFFER_B (1U) + +typedef struct +{ + IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ +} RGXFWIF_COMPUTECTX_STATE; + + +typedef struct RGXFWIF_FWCOMMONCONTEXT_ +{ + /* CCB details for this firmware context */ + PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ + PRGXFWIF_CCCB psCCB; /*!< CCB base */ + RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; + + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ + RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ + + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + + /* Context suspend state */ + PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ + + /* + * Flags e.g. for context switching + */ + IMG_UINT32 ui32FWComCtxFlags; + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32PrioritySeqNum; + + /* References to the host side originators */ + IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ + IMG_UINT32 ui32PID; /*!< associated process ID */ + + /* Statistic updates waiting to be passed back to the host... */ + IMG_BOOL bStatsPending; /*!< True when some stats are pending */ + IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ + IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ + IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ + RGXFWIF_DM eDM; /*!< Data Master type */ + IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ + IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ + + IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; + IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ + IMG_BOOL bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ +} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; + +/*! + Firmware render context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */ + RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */ + + IMG_UINT32 ui32TotalNumPartialRenders; /*!< Total number of partial renders */ + IMG_UINT32 ui32TotalNumOutOfMemory; /*!< Total number of OOMs */ + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ + + RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; + +} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; + +/*! + Firmware compute context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */ + + RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState; + + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + +} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; + +/*! + Firmware TDM context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */ + + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ +#if defined(SUPPORT_TRP) + /* TRP state + * + * Stored state is used to kick the same geometry or 3D twice, + * State is stored before first kick and restored before second to rerun the same data. + */ + IMG_UINT32 ui32TRPState; + IMG_UINT64 RGXFW_ALIGN aui64TRPChecksums2D[2]; +#endif + +} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT; + +/*! + ****************************************************************************** + * Defines for CMD_TYPE corruption detection and forward compatibility check + *****************************************************************************/ + +/* CMD_TYPE 32bit contains: + * 31:16 Reserved for magic value to detect corruption (16 bits) + * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit) + * 14:0 Bits available for CMD_TYPEs (15 bits) */ + + +/* Magic value to detect corruption */ +#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC) +#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) +#define RGX_CMD_MAGIC_DWORD_SHIFT (16U) +#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT) + + +/*! + ****************************************************************************** + * Kernel CCB control for RGX + *****************************************************************************/ +typedef struct +{ + volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ + volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ + IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ + IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ +} UNCACHED_ALIGN RGXFWIF_CCB_CTL; + +/*! + ****************************************************************************** + * Kernel CCB command structure for RGX + *****************************************************************************/ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1) /* MMU_CTRL_INVAL_PT_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2) /* MMU_CTRL_INVAL_PD_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4) /* MMU_CTRL_INVAL_PC_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ + +#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000) /* indicates FW should interrupt the host */ + +typedef struct +{ + PRGXFWIF_FWMEMCONTEXT sMemoryContext; + IMG_UINT32 ui32Flags; + IMG_UINT32 ui32FWFlags; + RGXFWIF_DEV_VIRTADDR sMMUCacheSync; + IMG_UINT32 ui32MMUCacheSyncUpdateValue; +} RGXFWIF_MMUCACHEDATA; + +#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0) +#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1) +#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2) +#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3) + +typedef struct +{ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ + IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ + IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ + IMG_UINT32 ui32BPDataFlags; + IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ + IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ + RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ +} RGXFWIF_BPDATA; + +#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ + IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */ + IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */ + IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */ + PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */ + IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */ +} RGXFWIF_KCCB_CMD_KICK_DATA; + +typedef struct +{ + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; +} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA; + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ + IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ +} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; + +typedef struct +{ + IMG_UINT32 ui32PHRMode; /*!< Variable containing PHR configuration values */ +} RGXFWIF_KCCB_CMD_PHR_CFG_DATA; + +#define RGXIF_PHR_MODE_OFF (0UL) +#define RGXIF_PHR_MODE_RD_RESET (1UL) +#define RGXIF_PHR_MODE_FULL_RESET (2UL) + +typedef enum +{ + RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */ + RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */ + RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */ + RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */ +} RGXFWIF_CLEANUP_TYPE; + +typedef struct +{ + RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */ + union { + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */ + PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */ + PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */ + PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */ + } uCleanupData; +} RGXFWIF_CLEANUP_REQUEST; + +typedef enum +{ + RGXFWIF_POW_OFF_REQ = 1, + RGXFWIF_POW_FORCED_IDLE_REQ, + RGXFWIF_POW_NUM_UNITS_CHANGE, + RGXFWIF_POW_APM_LATENCY_CHANGE +} RGXFWIF_POWER_TYPE; + +typedef enum +{ + RGXFWIF_POWER_FORCE_IDLE = 1, + RGXFWIF_POWER_CANCEL_FORCED_IDLE, + RGXFWIF_POWER_HOST_TIMEOUT, +} RGXFWIF_POWER_FORCE_IDLE_TYPE; + +typedef struct +{ + RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ + union + { + IMG_UINT32 ui32PowUnitsStateMask;/*!< New power units state mask*/ + IMG_BOOL bForced; /*!< If the operation is mandatory */ + RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ + IMG_UINT32 ui32ActivePMLatencyms; /*!< Number of milliseconds to set APM latency */ + } uPowerReqData; +} RGXFWIF_POWER_REQUEST; + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */ + IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */ + IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */ + IMG_UINT64 RGXFW_ALIGN ui64Address; /*!< Optional address of range (only useful when bDMContext == FALSE) */ + IMG_UINT64 RGXFW_ALIGN ui64Size; /*!< Optional size of range (only useful when bDMContext == FALSE) */ +} RGXFWIF_SLCFLUSHINVALDATA; + +typedef struct +{ + IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */ +} RGXFWIF_HCS_CTL; + +typedef enum{ + RGXFWIF_HWPERF_CTRL_TOGGLE = 0, + RGXFWIF_HWPERF_CTRL_SET = 1, + RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 +} RGXFWIF_HWPERF_UPDATE_CONFIG; + +typedef struct +{ + RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */ + IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */ +} RGXFWIF_HWPERF_CTRL; + +typedef enum { + RGXFWIF_HWPERF_CNTR_NOOP = 0, /* No-Op */ + RGXFWIF_HWPERF_CNTR_ENABLE = 1, /* Enable Counters */ + RGXFWIF_HWPERF_CNTR_DISABLE = 2 /* Disable Counters */ +} RGXFWIF_HWPERF_CNTR_CONFIG; + +typedef struct +{ + IMG_UINT32 ui32CtrlWord; + IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ + PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */ +} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; + +typedef struct +{ + IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ +} RGXFWIF_CORECLKSPEEDCHANGE_DATA; + +#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16 + +typedef struct +{ + bool bEnable; + IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */ + IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */ +} RGXFWIF_HWPERF_CTRL_BLKS; + +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */ + IMG_UINT32 bDone; /*!< action backing/unbacking succeeded */ +} RGXFWIF_ZSBUFFER_BACKING_DATA; + +typedef struct +{ + IMG_UINT32 ui32IsolationPriorityThreshold; +} RGXFWIF_OSID_ISOLATION_GROUP_DATA; + +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */ + IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */ + IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */ + IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ +} RGXFWIF_FREELIST_GS_DATA; + +#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) +#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U + +typedef struct +{ + IMG_UINT32 ui32FreelistsCount; + IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; +} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA; + + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN sDevSignalAddress; /*!< device virtual address of the updated signal */ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ +} UNCACHED_ALIGN RGXFWIF_SIGNAL_UPDATE_DATA; + + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */ +} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA; + +/*! + ****************************************************************************** + * Proactive DVFS Structures + *****************************************************************************/ +#define NUM_OPP_VALUES 16 + +typedef struct +{ + IMG_UINT32 ui32Volt; /* V */ + IMG_UINT32 ui32Freq; /* Hz */ +} UNCACHED_ALIGN PDVFS_OPP; + +typedef struct +{ + PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; +#if defined(DEBUG) + IMG_UINT32 ui32MinOPPPoint; +#endif + IMG_UINT32 ui32MaxOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; + +typedef struct +{ + IMG_UINT32 ui32MaxOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA; + +typedef struct +{ + IMG_UINT32 ui32MinOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA; + +/*! + ****************************************************************************** + * Register configuration structures + *****************************************************************************/ + +#define RGXFWIF_REG_CFG_MAX_SIZE 512 + +typedef enum +{ + RGXFWIF_REGCFG_CMD_ADD = 101, + RGXFWIF_REGCFG_CMD_CLEAR = 102, + RGXFWIF_REGCFG_CMD_ENABLE = 103, + RGXFWIF_REGCFG_CMD_DISABLE = 104 +} RGXFWIF_REGDATA_CMD_TYPE; + +typedef enum +{ + RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ + RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ + RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ + RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ + RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ + RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ + RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ +} RGXFWIF_REG_CFG_TYPE; + +typedef struct +{ + IMG_UINT64 ui64Addr; + IMG_UINT64 ui64Mask; + IMG_UINT64 ui64Value; +} RGXFWIF_REG_CFG_REC; + +typedef struct +{ + RGXFWIF_REGDATA_CMD_TYPE eCmdType; + RGXFWIF_REG_CFG_TYPE eRegConfigType; + RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig; + +} RGXFWIF_REGCONFIG_DATA; + +typedef struct +{ + /** + * PDump WRW command write granularity is 32 bits. + * Add padding to ensure array size is 32 bit granular. + */ + IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN(RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; + RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; +} UNCACHED_ALIGN RGXFWIF_REG_CFG; + +/* OSid Scheduling Priority Change */ +typedef struct +{ + IMG_UINT32 ui32OSidNum; + IMG_UINT32 ui32Priority; +} RGXFWIF_OSID_PRIORITY_DATA; + +typedef enum +{ + RGXFWIF_OS_ONLINE = 1, + RGXFWIF_OS_OFFLINE +} RGXFWIF_OS_STATE_CHANGE; + +typedef struct +{ + IMG_UINT32 ui32OSid; + RGXFWIF_OS_STATE_CHANGE eNewOSState; +} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; + +typedef enum +{ + /* Common commands */ + RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ + RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ + RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */ + RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ + RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ + RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has performed a signal update */ + RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ + RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ + + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ + + /* Commands only permitted to the native or host OS */ + RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ + RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ + RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process */ + RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, + /* Free slot */ + RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSID. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ + RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set hard context switching deadline */ + RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the configuration of (or even disables) the OSid Isolation scheduling group. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ + RGXFWIF_KCCB_CMD_PHR_CFG = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ +} RGXFWIF_KCCB_CMD_TYPE; + +#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) + +/* Kernel CCB command packet */ +typedef struct +{ + RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */ + IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */ + + /* NOTE: Make sure that uCmdData is the last member of this struct + * This is to calculate actual command size for device mem copy. + * (Refer RGXGetCmdMemCopySize()) + * */ + union + { + RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */ + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */ + RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */ + RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */ + RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */ + RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */ + RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */ + RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */ + RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */ + RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */ + RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */ + RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */ + RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */ + RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */ + RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */ + RGXFWIF_SIGNAL_UPDATE_DATA sSignalUpdateData; /*!< Data for informing the FW about the signal update */ + RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */ +#if defined(SUPPORT_PDVFS) + RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData; + RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */ +#endif + RGXFWIF_OSID_PRIORITY_DATA sCmdOSidPriorityData; /*!< Data for updating an OSid priority */ + RGXFWIF_HCS_CTL sHCSCtrl; /*!< Data for Hard Context Switching */ + RGXFWIF_OSID_ISOLATION_GROUP_DATA sCmdOSidIsolationData; /*!< Data for updating the OSid isolation group */ + RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ + RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ + RGXFWIF_KCCB_CMD_PHR_CFG_DATA sPeriodicHwResetCfg; /*!< Data for configuring the Periodic Hw Reset behaviour */ + } UNCACHED_ALIGN uCmdData; +} UNCACHED_ALIGN RGXFWIF_KCCB_CMD; + +RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); + +/*! + ****************************************************************************** + * Firmware CCB command structure for RGX + *****************************************************************************/ + +typedef struct +{ + IMG_UINT32 ui32ZSBufferID; +} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA; + +typedef struct +{ + IMG_UINT32 ui32FreelistID; +} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA; + +typedef struct +{ + IMG_UINT32 ui32FreelistsCount; + IMG_UINT32 ui32HwrCounter; + IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; +} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA; + +/*! + Last reset reason for a context. +*/ +typedef enum +{ + RGXFWIF_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */ + RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */ + RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */ + RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */ + RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */ + RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */ +} RGXFWIF_CONTEXT_RESET_REASON; + +typedef struct +{ + IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */ + RGXFWIF_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ + IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ + IMG_BOOL bPageFault; /*!< Did a page fault happen */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ +} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; + +typedef enum +{ + RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages */ + RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked */ + RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow/shrink */ + RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction */ + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context */ + RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump */ + RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats */ + /* Unused FWCCB CMD ID 108 */ + /* Unused FWCCB CMD ID 109 */ +#if defined(SUPPORT_PDVFS) + RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, + /* Unused FWCCB CMD ID 111 */ +#endif + RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) + RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, +#endif +} RGXFWIF_FWCCB_CMD_TYPE; + +typedef enum +{ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ +} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; + +typedef struct +{ + RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */ + IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */ + IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */ +} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA; + +typedef struct +{ + IMG_UINT32 ui32CoreClkRate; +} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +typedef struct +{ + IMG_UINT64 ui64timerGray; + IMG_UINT64 ui64timerBinary; + IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; +} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; +#endif + +typedef struct +{ + RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */ + IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */ + + union + { + RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/ + RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */ + RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */ + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */ + RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ + RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) + RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; +#endif + } RGXFW_ALIGN uCmdData; +} RGXFW_ALIGN RGXFWIF_FWCCB_CMD; + +RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); + + +/*! + ****************************************************************************** + * Workload estimation Firmware CCB command structure for RGX + *****************************************************************************/ +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; /*!< Index for return data array */ + IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken; /*!< The cycles the workload took on the hardware */ +} RGXFWIF_WORKEST_FWCCB_CMD; + + +/*! + ****************************************************************************** + * Client CCB commands for RGX + *****************************************************************************/ + +/* Required memory alignment for 64-bit variables accessible by Meta + (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared + between the host and meta that contains 64-bit variables has to maintain + this alignment) */ +#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) + +#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) +#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1)) + +typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; + +#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) + /* Unused CCB CMD ID 202 */ +#define RGXFWIF_CCB_CMD_TYPE_3D (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_3D_PR (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_CDM (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) + /* Unused CCB CMD ID 208 */ + /* Unused CCB CMD ID 209 */ +#define RGXFWIF_CCB_CMD_TYPE_NULL (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) + +/* Leave a gap between CCB specific commands and generic commands */ +#define RGXFWIF_CCB_CMD_TYPE_FENCE (211U | RGX_CMD_MAGIC_DWORD_SHIFTED) +#define RGXFWIF_CCB_CMD_TYPE_UPDATE (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) + /* Unused CCB CMD ID 213 */ +#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) +#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) + /* Unused CCB CMD ID 216 */ +#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) + /* Unused CCB CMD ID 218 */ + +#define RGXFWIF_CCB_CMD_TYPE_PADDING (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) + + +typedef IMG_UINT32 RGXFWIF_TRP_STATUS_TYPE; + +#define RGXFWIF_TRP_STATUS_UNKNOWN 0x000U +#define RGXFWIF_TRP_STATUS_CHECKSUMS_OK 0x001U +#define RGXFWIF_TRP_STATUS_CHECKSUMS_ERROR 0x002U + +#define RGXFWIF_CR_TRP_SIGNATURE_STATUS (RGX_CR_SCRATCH14) + +typedef struct +{ + IMG_UINT32 ui32RenderTargetSize; + IMG_UINT32 ui32NumberOfDrawCalls; + IMG_UINT32 ui32NumberOfIndices; + IMG_UINT32 ui32NumberOfMRTs; +} RGXFWIF_WORKLOAD_TA3D; + +typedef union +{ + RGXFWIF_WORKLOAD_TA3D sTA3D; + /* Created as a union to facilitate other DMs + * in the future. + * for Example: + * RGXFWIF_WORKLOAD_CDM sCDM; + */ +}RGXFWIF_WORKLOAD; + +typedef struct +{ + /* Index for the KM Workload estimation return data array */ + IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; + /* Deadline for the workload */ + IMG_UINT64 RGXFW_ALIGN ui64Deadline; + /* Predicted time taken to do the work in cycles */ + IMG_UINT64 RGXFW_ALIGN ui64CyclesPrediction; +} RGXFWIF_WORKEST_KICK_DATA; + +#if defined(SUPPORT_PDVFS) +typedef struct _RGXFWIF_WORKLOAD_LIST_NODE_ RGXFWIF_WORKLOAD_LIST_NODE; +typedef struct _RGXFWIF_DEADLINE_LIST_NODE_ RGXFWIF_DEADLINE_LIST_NODE; + +struct _RGXFWIF_WORKLOAD_LIST_NODE_ +{ + IMG_UINT64 RGXFW_ALIGN ui64Cycles; + IMG_UINT64 RGXFW_ALIGN ui64SelfMemDesc; + IMG_UINT64 RGXFW_ALIGN ui64WorkloadDataMemDesc; + IMG_BOOL bReleased; + RGXFWIF_WORKLOAD_LIST_NODE *psNextNode; +}; + +struct _RGXFWIF_DEADLINE_LIST_NODE_ +{ + IMG_UINT64 RGXFW_ALIGN ui64Deadline; + RGXFWIF_WORKLOAD_LIST_NODE *psWorkloadList; + IMG_UINT64 RGXFW_ALIGN ui64SelfMemDesc; + IMG_UINT64 RGXFW_ALIGN ui64WorkloadDataMemDesc; + IMG_BOOL bReleased; + RGXFWIF_DEADLINE_LIST_NODE *psNextNode; +}; +#endif +typedef struct +{ + RGXFWIF_CCB_CMD_TYPE eCmdType; + IMG_UINT32 ui32CmdSize; + IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ + IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ + RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ +} RGXFWIF_CCB_CMD_HEADER; + +/*! + ****************************************************************************** + * Client CCB commands which are only required by the kernel + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32Priority; +} RGXFWIF_CMD_PRIORITY; + + +/*! + ****************************************************************************** + * Signature and Checksums Buffer + *****************************************************************************/ +typedef struct +{ + PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */ + IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ +} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; + +typedef struct +{ + PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ + IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */ +} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL; + +/*! + ***************************************************************************** + * RGX Compatibility checks + *****************************************************************************/ + +/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change, + following define should be increased by 1 to indicate to compatibility logic, + that layout has changed */ +#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 + +typedef struct +{ + IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */ + IMG_UINT64 RGXFW_ALIGN ui64BVNC; +} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC; + +typedef struct +{ + IMG_UINT8 ui8OsCountSupport; +} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS; + +#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ + RGXFWIF_COMPCHECKS_BVNC name = { \ + RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \ + 0, \ + } +#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \ + do { \ + (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ + (name).ui64BVNC = 0; \ + } while (0) + +typedef struct +{ + RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ + RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ + IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ + IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ + IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ + IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ + RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */ + IMG_BOOL bUpdated; /*!< Information is valid */ +} UNCACHED_ALIGN RGXFWIF_COMPCHECKS; + +/*! + ****************************************************************************** + * Updated configuration post FW data init. + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */ + IMG_UINT32 ui32RuntimeCfgFlags; /* Compatibility and other flags */ + IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */ + IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */ + IMG_UINT32 ui32PowUnitsStateMask; /* Power Unit state mask set by the host */ + PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */ + RGXFWIF_DMA_ADDR sHWPerfDMABuf; +} RGXFWIF_RUNTIME_CFG; + +/*! + ***************************************************************************** + * Control data for RGX + *****************************************************************************/ + +#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U) + +#if defined(PDUMP) + +#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U + +typedef enum +{ + RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, + RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT +} RGXFWIF_PID_FILTER_MODE; + +typedef struct +{ + IMG_PID uiPID; + IMG_UINT32 ui32OSID; +} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; + +typedef struct +{ + RGXFWIF_PID_FILTER_MODE eMode; + /* each process in the filter list is specified by a PID and OS ID pair. + * each PID and OS pair is an item in the items array (asItems). + * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries + * then it must be terminated by an item with pid of zero. + */ + RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS]; +} RGXFW_ALIGN RGXFWIF_PID_FILTER; +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) +#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0) +#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1) +#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2) +#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3) +#endif + +typedef enum +{ + RGXFWIF_USRM_DM_VDM = 0, + RGXFWIF_USRM_DM_DDM = 1, + RGXFWIF_USRM_DM_CDM = 2, + RGXFWIF_USRM_DM_PDM = 3, + RGXFWIF_USRM_DM_TDM = 4, + RGXFWIF_USRM_DM_LAST +} RGXFWIF_USRM_DM; + +typedef enum +{ + RGXFWIF_UVBRM_DM_VDM = 0, + RGXFWIF_UVBRM_DM_DDM = 1, + RGXFWIF_UVBRM_DM_LAST +} RGXFWIF_UVBRM_DM; + +typedef enum +{ + RGXFWIF_TPU_DM_PDM = 0, + RGXFWIF_TPU_DM_VDM = 1, + RGXFWIF_TPU_DM_CDM = 2, + RGXFWIF_TPU_DM_TDM = 3, + RGXFWIF_TPU_DM_LAST +} RGXFWIF_TPU_DM; + +typedef enum +{ + RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */ + RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that + initiates by sending data via the + GPIO and then sends back any data + received over the GPIO */ + RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes + and reads data across the entire + GPIO AP address range.*/ +#if defined(SUPPORT_STRIP_RENDERING) + RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/ + RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/ +#endif + RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */ + RGXFWIF_GPIO_VAL_LAST +} RGXFWIF_GPIO_VAL_MODE; + +typedef enum +{ + FW_PERF_CONF_NONE = 0, + FW_PERF_CONF_ICACHE = 1, + FW_PERF_CONF_DCACHE = 2, + FW_PERF_CONF_POLLS = 3, + FW_PERF_CONF_CUSTOM_TIMER = 4, + FW_PERF_CONF_JTLB_INSTR = 5, + FW_PERF_CONF_INSTRUCTIONS = 6 +} FW_PERF_CONF; + +typedef enum +{ + FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, + FW_BOOT_STAGE_NOT_AVAILABLE = -1, + FW_BOOT_NOT_STARTED = 0, + FW_BOOT_BLDR_STARTED = 1, + FW_BOOT_CACHE_DONE, + FW_BOOT_TLB_DONE, + FW_BOOT_MAIN_STARTED, + FW_BOOT_ALIGNCHECKS_DONE, + FW_BOOT_INIT_DONE, +} FW_BOOT_STAGE; + +/* + * Kernel CCB return slot responses. Usage of bit-fields instead of bare integers + * allows FW to possibly pack-in several responses for each single kCCB command. + */ +#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /* Command executed (return status from FW) */ +#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /* A cleanup was requested but resource busy */ +#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /* Poll failed in FW for a HW operation to complete */ + +#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /* Reset value of a kCCB return slot (set by host) */ + +typedef struct +{ + /* Fw-Os connection states */ + volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; + volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; + volatile IMG_UINT32 ui32AliveFwToken; + volatile IMG_UINT32 ui32AliveOsToken; +} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL; + +typedef struct +{ + /* Kernel CCB */ + PRGXFWIF_CCB_CTL psKernelCCBCtl; + PRGXFWIF_CCB psKernelCCB; + PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; + + /* Firmware CCB */ + PRGXFWIF_CCB_CTL psFirmwareCCBCtl; + PRGXFWIF_CCB psFirmwareCCB; + + /* Workload Estimation Firmware CCB */ + PRGXFWIF_CCB_CTL RGXFW_ALIGN psWorkEstFirmwareCCBCtl; + PRGXFWIF_CCB RGXFW_ALIGN psWorkEstFirmwareCCB; + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Checkpoint CCB */ + PRGXFWIF_CCB_CTL psCheckpointCCBCtl; + PRGXFWIF_CCB psCheckpointCCB; +#endif + + PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; + + IMG_UINT32 ui32HWRDebugDumpLimit; + + PRGXFWIF_OSDATA sFwOsData; + + /* Compatibility checks to be populated by the Firmware */ + RGXFWIF_COMPCHECKS sRGXCompChecks; + +} UNCACHED_ALIGN RGXFWIF_OSINIT; + +typedef struct +{ + IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; + + IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; + IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; + IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; + IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase; + IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; + IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSIndirectHeapBase; + + IMG_UINT32 ui32FilterFlags; + + RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_DEFAULT_MAX]; +#if defined(SUPPORT_VALIDATION) + RGXFWIF_SIGBUF_CTL asValidationSigBufCtl[RGXFWIF_DM_DEFAULT_MAX]; +#endif + + PRGXFWIF_RUNTIME_CFG sRuntimeCfg; + + PRGXFWIF_TRACEBUF sTraceBufCtl; + PRGXFWIF_SYSDATA sFwSysData; +#if defined(SUPPORT_TBI_INTERFACE) + PRGXFWIF_TBIBUF sTBIBuf; +#endif + IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; + + PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; + PRGXFWIF_REG_CFG sRegCfg; + PRGXFWIF_HWPERF_CTL sHWPerfCtl; + + RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; + + RGXFWIF_DEV_VIRTADDR sAlignChecks; + + /* Core clock speed at FW boot time */ + IMG_UINT32 ui32InitialCoreClockSpeed; + + /* APM latency in ms before signalling IDLE to the host */ + IMG_UINT32 ui32ActivePMLatencyms; + + /* Flag to be set by the Firmware after successful start */ + IMG_BOOL bFirmwareStarted; + + IMG_UINT32 ui32MarkerVal; + + IMG_UINT32 ui32FirmwareStartedTimeStamp; + + IMG_UINT32 ui32JonesDisableMask; + + RGXFWIF_DMA_ADDR sCorememDataStore; + + FW_PERF_CONF eFirmwarePerf; + + IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; + +#if defined(SUPPORT_PDVFS) + RGXFWIF_PDVFS_OPP RGXFW_ALIGN sPDVFSOPPInfo; + + /** + * FW Pointer to memory containing core clock rate in Hz. + * Firmware (PDVFS) updates the memory when running on non primary FW thread + * to communicate to host driver. + */ + PRGXFWIF_CORE_CLK_RATE RGXFW_ALIGN sCoreClockRate; +#endif + +#if defined(PDUMP) + RGXFWIF_PID_FILTER sPIDFilter; +#endif + + RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode; + IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; + IMG_UINT32 RGXFW_ALIGN aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST]; + IMG_UINT64 RGXFW_ALIGN aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST]; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * Used when validation is enabled to allow the host to check + * that MTS sent the correct sideband in response to a kick + * from a given OSes schedule register. + * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set + * + * Set by the host to: + * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT + * reset to 0 by FW when kicked by the given OSid + */ + IMG_UINT32 ui32OSKickTest; +#endif + RGX_HWPERF_BVNC sBvncKmFeatureFlags; + +#if defined(SUPPORT_SECURITY_VALIDATION) + IMG_UINT32 ui32SecurityTestFlags; + RGXFWIF_DEV_VIRTADDR pbSecureBuffer; + RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; +#endif + +} UNCACHED_ALIGN RGXFWIF_SYSINIT; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 +#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 +#endif + +/*! + ***************************************************************************** + * Timer correlation shared data and defines + *****************************************************************************/ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp; + IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp; + + /* Utility variable used to convert CR timer deltas to OS timer deltas (nS), + * where the deltas are relative to the timestamps above: + * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */ + IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs; + + IMG_UINT32 ui32CoreClockSpeed; + IMG_UINT32 ui32Reserved; +} UNCACHED_ALIGN RGXFWIF_TIME_CORR; + + +/* The following macros are used to help converting FW timestamps to the Host + * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of + * time; it increments by 1 every 256 GPU clock ticks, so the general + * formula to perform the conversion is: + * + * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, + * otherwise if (scale == 10^6) then deltaOS is in uS ] + * + * deltaCR * 256 256 * scale + * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] + * GPUclockspeed GPUclockspeed + * + * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) + * to get some better accuracy and to avoid returning 0 in the integer + * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. + * This is the same as keeping K as a decimal number. + * + * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies + * (deltaCR * K is more or less a constant), and it's relative to the base + * OS timestamp sampled as a part of the timer correlation data. + * This base is refreshed on GPU power-on, DVFS transition and periodic + * frequency calibration (executed every few seconds if the FW is doing + * some work), so as long as the GPU is doing something and one of these + * events is triggered then deltaCR * K will not overflow and deltaOS will be + * correct. + */ + +#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) + +#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \ + (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) + + +/*! + ****************************************************************************** + * GPU Utilisation + *****************************************************************************/ + +/* See rgx_common.h for a list of GPU states */ +#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) + +#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) +#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) + +/* The OS timestamps computed by the FW are approximations of the real time, + * which means they could be slightly behind or ahead the real timer on the Host. + * In some cases we can perform subtractions between FW approximated + * timestamps and real OS timestamps, so we need a form of protection against + * negative results if for instance the FW one is a bit ahead of time. + */ +#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \ + ((newtime) > (oldtime) ? ((newtime) - (oldtime)) : 0U) + +#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ + (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) + + +/* The timer correlation array must be big enough to ensure old entries won't be + * overwritten before all the HWPerf events linked to those entries are processed + * by the MISR. The update frequency of this array depends on how fast the system + * can change state (basically how small the APM latency is) and perform DVFS transitions. + * + * The minimum size is 2 (not 1) to avoid race conditions between the FW reading + * an entry while the Host is updating it. With 2 entries in the worst case the FW + * will read old data, which is still quite ok if the Host is updating the timer + * correlation at that time. + */ +#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U +#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE) + +/* Make sure the timer correlation array size is a power of 2 */ +static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, + "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); + +typedef struct +{ + RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; + IMG_UINT32 ui32TimeCorrSeqCount; + + /* Last GPU state + OS time of the last state update */ + IMG_UINT64 RGXFW_ALIGN ui64LastWord; + + /* Counters for the amount of time the GPU was active/idle/blocked */ + IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; + + IMG_UINT32 ui32GpuUtilFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; + + +typedef struct +{ + IMG_UINT32 ui32RenderTargetIndex; //Render number + IMG_UINT32 ui32CurrentRenderTarget; //index in RTA + IMG_UINT32 ui32ActiveRenderTargets; //total active RTs + RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices + RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target + IMG_UINT32 ui32MaxRTs; //Number of render targets in the array + IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_RTA_CTL; + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListBaseDevVAddr; /*!< Free list device base address */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListStateDevVAddr; /*!< Free list state buffer */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListLastGrowDevVAddr; /*!< Free list base address at last grow */ + +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr; + IMG_UINT32 ui32CurrentStackTop; +#endif + + IMG_UINT32 ui32MaxPages; + IMG_UINT32 ui32GrowPages; + IMG_UINT32 ui32CurrentPages; /* HW pages */ +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT32 ui32AllocatedPageCount; + IMG_UINT32 ui32AllocatedMMUPageCount; +#endif +#if defined(SUPPORT_SHADOW_FREELISTS) + IMG_UINT32 ui32HWRCounter; + PRGXFWIF_FWMEMCONTEXT psFWMemContext; +#endif + IMG_UINT32 ui32FreeListID; + IMG_BOOL bGrowPending; + IMG_UINT32 ui32ReadyPages; /* Pages that should be used only when OOM is reached */ + IMG_UINT32 ui32FreelistFlags; /* Compatibility and other flags */ + + IMG_BOOL bUpdatePending; + IMG_UINT32 ui32UpdateNewPages; + IMG_UINT32 ui32UpdateNewReadyPages; +} UNCACHED_ALIGN RGXFWIF_FREELIST; + + +/*! + ****************************************************************************** + * HWRTData + *****************************************************************************/ + +/* HWRTData flags */ +/* Deprecated flags 1:0 */ +#define HWRTDATA_HAS_LAST_TA (1U << 2) +#define HWRTDATA_PARTIAL_RENDERED (1U << 3) +#define HWRTDATA_KILLED (1U << 4) +#define HWRTDATA_KILL_AFTER_TARESTART (1U << 5) + +#if defined(SUPPORT_SW_TRP) +#define SW_TRP_SIGNATURE_FIRST_KICK 0U +#define SW_TRP_SIGNATURE_SECOND_KICK 1U +#define SW_TRP_SIGNATURE_COUNT 2U +#define SW_TRP_GEOMETRY_SIGNATURE_SIZE 8U +#define SW_TRP_FRAGMENT_SIGNATURE_SIZE 8U +/* Space for tile usage bitmap, one bit per tile on screen */ +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define SW_TRP_TILE_USED_SIZE ((ROGUE_RENDERSIZE_MAXX / RGX_FEATURE_TILE_SIZE_X + ROGUE_RENDERSIZE_MAXY / RGX_FEATURE_TILE_SIZE_Y) / (8U * sizeof(IMG_UINT32))) +#endif + +/*! + ****************************************************************************** + * Parameter Management (PM) control data for RGX + *****************************************************************************/ +typedef enum +{ + RGXFW_SPM_STATE_NONE = 0, + RGXFW_SPM_STATE_PR_BLOCKED, + RGXFW_SPM_STATE_WAIT_FOR_GROW, + RGXFW_SPM_STATE_WAIT_FOR_HW, + RGXFW_SPM_STATE_PR_RUNNING, + RGXFW_SPM_STATE_PR_AVOIDED, + RGXFW_SPM_STATE_PR_EXECUTED, + RGXFW_SPM_STATE_PR_FORCEFREE, +} RGXFW_SPM_STATE; + +typedef struct +{ + /* Make sure the structure is aligned to the dcache line */ + IMG_CHAR RGXFW_ALIGN_DCACHEL align[1]; + + RGXFW_SPM_STATE eSPMState; /*!< current owner of this PM data structure */ + RGXFWIF_UFO sPartialRenderTA3DFence; /*!< TA/3D fence object holding the value to let through the 3D partial command */ +#if defined(RGX_FIRMWARE) + RGXFWIF_FWCOMMONCONTEXT *ps3dContext; + RGXFWIF_CCB_CMD_HEADER *psCmdHeader; /*!< Pointer to the header of the command holding the partial render */ + struct RGXFWIF_CMD3D_STRUCT *ps3DCmd; /*!< Pointer to the 3D command holding the partial render register info*/ + RGXFWIF_PRBUFFER *apsPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /*!< Array of pointers to PR Buffers which may be used if partial render is needed */ +#else + RGXFWIF_DEV_VIRTADDR ps3dContext; + RGXFWIF_DEV_VIRTADDR psCmdHeader; /*!< Pointer to the header of the command holding the partial render */ + RGXFWIF_DEV_VIRTADDR ps3DCmd; /*!< Pointer to the 3D command holding the partial render register info*/ + RGXFWIF_DEV_VIRTADDR apsPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /*!< Array of pointers to PR Buffers which may be used if partial render is needed */ +#endif + RGXFW_FREELIST_TYPE eOOMFreeListType; /*!< Indicates the freelist type that went out of memory */ + bool b3DMemFreeDetected; /*!< Indicates if a 3D Memory Free has been detected, which resolves OOM */ +} RGXFW_SPMCTL; + +typedef enum +{ + RGXFWIF_RTDATA_STATE_NONE = 0, + RGXFWIF_RTDATA_STATE_KICKTA, + RGXFWIF_RTDATA_STATE_KICKTAFIRST, + RGXFWIF_RTDATA_STATE_TAFINISHED, + RGXFWIF_RTDATA_STATE_KICK3D, + RGXFWIF_RTDATA_STATE_3DFINISHED, + RGXFWIF_RTDATA_STATE_TAOUTOFMEM, + RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, + /* In case of HWR, we can't set the RTDATA state to NONE, + * as this will cause any TA to become a first TA. + * To ensure all related TA's are skipped, we use the HWR state */ + RGXFWIF_RTDATA_STATE_HWR, + RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU +} RGXFWIF_RTDATA_STATE; + +typedef struct +{ + IMG_UINT32 ui32HWRTDataFlags; + RGXFWIF_RTDATA_STATE eState; + + + IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; + IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; + +#if defined(PM_INTERACTIVE_MODE) + IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; +#endif + +#if defined(PM_INTERACTIVE_MODE) + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMMListDevVAddr; +#else + /* Series8 PM State buffers */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMRenderStateDevVAddr; + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMSecureRenderStateDevVAddr; +#endif + + PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; + IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; + IMG_BOOL bRenderStateNeedsReset; + + RGXFWIF_CLEANUP_CTL sCleanupState; + + RGXFWIF_RTA_CTL sRTACtl; + + IMG_UINT32 ui32ScreenPixelMax; + IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl; + IMG_UINT32 ui32TEStride; + IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32RgnStride; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; +#if defined(RGX_FIRMWARE) + struct RGXFWIF_FWCOMMONCONTEXT_* psOwnerGeom; +#else + RGXFWIF_DEV_VIRTADDR pui32OwnerGeomNotUsedByHost; +#endif + +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; + IMG_UINT32 ui32PMMListStackPointer; +#endif +#if defined(SUPPORT_SW_TRP) + /* SW-TRP state and signature data + * + * Stored state is used to kick the same geometry or 3D twice, + * State is stored before first kick and restored before second to rerun the same data. + * Signatures from both kicks are stored and compared */ + IMG_UINT32 aaui32GeometrySignature[SW_TRP_SIGNATURE_COUNT][SW_TRP_GEOMETRY_SIGNATURE_SIZE]; + IMG_UINT32 aaui32FragmentSignature[SW_TRP_SIGNATURE_COUNT][SW_TRP_FRAGMENT_SIGNATURE_SIZE]; + IMG_UINT32 ui32KickFlagsCopy; + IMG_UINT32 ui32SW_TRPState; + IMG_UINT32 aui32TileUsed[SW_TRP_TILE_USED_SIZE]; + RGXFW_SPMCTL sSPMCtlCopy; +#endif +#if defined(SUPPORT_TRP) + /* TRP state + * + * Stored state is used to kick the same geometry or 3D twice, + * State is stored before first kick and restored before second to rerun the same data. + */ + IMG_UINT32 ui32KickFlagsCopy; + IMG_UINT32 ui32TRPState; + IMG_UINT64 aui64TRPChecksums3D[4]; + IMG_UINT64 aui64TRPChecksumsGeom[2]; +#endif +} UNCACHED_ALIGN RGXFWIF_HWRTDATA; + +#endif /* RGX_FWIF_KM_H */ + +/****************************************************************************** + End of file (rgx_fwif_km.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hw_errors.h b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hw_errors.h new file mode 100644 index 000000000000..1538b4be083f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hw_errors.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@File rgx_hw_errors.h +@Title RGX HW error codes +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Platform RGX +@Description RGX HW error codes +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_HW_ERRORS_H) +#define RGX_HW_ERRORS_H + +/************************ + * GPU HW error codes * + ************************/ +typedef enum +{ + RGX_HW_ERR_NA = 0x0, + RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL = 0x101, +} RGX_HW_ERR; + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hwperf_table.h b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hwperf_table.h new file mode 100644 index 000000000000..1d420549c040 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgx_hwperf_table.h @@ -0,0 +1,473 @@ +/*************************************************************************/ /*! +@File +@Title HWPerf counter table header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Utility functions used internally for HWPerf data retrieval +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_HWPERF_TABLE_H +#define RGX_HWPERF_TABLE_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_fwif_hwperf.h" + +/*****************************************************************************/ + +/* Forward declaration */ +typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL; + +/* Function pointer type for functions to check dynamic power state of + * counter block instance. Used only in firmware. */ +typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)( + RGX_HWPERF_CNTBLK_ID eBlkType, + IMG_UINT8 ui8UnitId); + +/* Counter block run-time info */ +typedef struct +{ + IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core */ +} RGX_HWPERF_CNTBLK_RT_INFO; + +/* Function pointer type for functions to check block is valid and present + * on that RGX Device at runtime. It may have compile logic or run-time + * logic depending on where the code executes: server, srvinit or firmware. + * Values in the psRtInfo output parameter are only valid if true returned. + */ +typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)( + const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc, + void *pvDev_km, + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo); + +/* This structure encodes properties of a type of performance counter block. + * The structure is sometimes referred to as a block type descriptor. These + * properties contained in this structure represent the columns in the block + * type model table variable below. These values vary depending on the build + * BVNC and core type. + * Each direct block has a unique type descriptor and each indirect group has + * a type descriptor. + */ +struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ +{ + IMG_UINT32 uiCntBlkIdBase; /* The starting block id for this block type */ + IMG_UINT32 uiIndirectReg; /* 0 if direct type otherwise the indirect register value to select indirect unit */ + IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core (compile time use) */ + const IMG_CHAR *pszBlockNameComment; /* Name of the PERF register. Used while dumping the perf counters to pdumps */ + PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */ + PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */ + IMG_UINT16 *pszBlkCfgValid; /* Array of supported counters per block type */ +}; + +/*****************************************************************************/ + +/* Shared compile-time context ASSERT macro */ +#if defined(RGX_FIRMWARE) +/* firmware context */ +# define DBG_ASSERT(_c) RGXFW_ASSERT((_c)) +#else +/* host client/server context */ +# define DBG_ASSERT(_c) PVR_ASSERT((_c)) +#endif + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() + + Referenced in gasCntBlkTypeModel[] table below and only called from + RGX_FIRMWARE run-time context. Therefore compile time configuration is used. + *****************************************************************************/ + +#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS) +# include "rgxfw_pow.h" +# include "rgxfw_utils.h" + +static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID, IMG_UINT8); +static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + PVR_UNREFERENCED_PARAMETER(ui8UnitId); + + switch (eBlkType) + { + case RGX_CNTBLK_ID_JONES: + case RGX_CNTBLK_ID_SLC: + case RGX_CNTBLK_ID_SLC0: + case RGX_CNTBLK_ID_FBCDC: + case RGX_CNTBLK_ID_FW_CUSTOM: + case RGX_CNTBLK_ID_PIPELINE_STATS: + return IMG_TRUE; + + case RGX_CNTBLK_ID_SLC1: + if (RGX_FEATURE_NUM_MEMBUS > 1) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } + + case RGX_CNTBLK_ID_SLC2: + case RGX_CNTBLK_ID_SLC3: + if (RGX_FEATURE_NUM_MEMBUS > 2) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } + + default: + return IMG_FALSE; + } +} + +/* Only use conditional compilation when counter blocks appear in different + * islands for different Rogue families. + */ +static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID, IMG_UINT8); +static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + PVR_UNREFERENCED_PARAMETER(ui8UnitId); + + IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); + + // We don't have any Dusts Enabled until first DC opens the GPU. This makes + // setting the PDump HWPerf trace buffers very difficult. + // To work around this we special-case some of the 'have to be there' + // indirect registers (e.g., TPU0) + + switch (eBlkType) + { + case RGX_CNTBLK_ID_TPU0: + return true; + /*NOTREACHED*/ + default: + if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && + (ui32NumDustsEnabled > 0U)) + { + return true; + } + else + { + return false; + } + /*NOTREACHED*/ + } + return true; +} + +#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +# define rgxfw_hwperf_pow_st_direct ((void *)NULL) +# define rgxfw_hwperf_pow_st_indirect ((void *)NULL) + +#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end + *****************************************************************************/ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start + + Referenced in gasCntBlkTypeModel[] table below and called from all build + contexts: + RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server). + + Therefore each function has two implementations, one for compile time and one + run time configuration depending on the context. The functions will inform the + caller whether this block is valid for this particular RGX device. Other + run-time dependent data is returned in psRtInfo for the caller to use. + *****************************************************************************/ + + +/* Used for all block types: Direct and Indirect */ +static inline IMG_BOOL rgx_hwperf_blk_present(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo) +{ +#if defined(__KERNEL__) /* Server context -- Run-time Only */ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; + PVRSRV_DEVICE_NODE *psNode; + IMG_UINT32 ui32MaxTPUPerSPU; + IMG_UINT32 ui32NumMemBus; + + DBG_ASSERT(psDevInfo != NULL); + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT(psRtInfo != NULL); + + if (((psDevInfo == NULL) || (psBlkTypeDesc == NULL)) || (psRtInfo == NULL)) + { + return IMG_FALSE; + } + + psNode = psDevInfo->psDeviceNode; + DBG_ASSERT(psNode != NULL); + + if (psNode == NULL) + { + return IMG_FALSE; + } + + ui32MaxTPUPerSPU = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, MAX_TPU_PER_SPU); + + ui32NumMemBus = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_MEMBUS); + + switch (psBlkTypeDesc->uiCntBlkIdBase) + { + case RGX_CNTBLK_ID_JONES: + case RGX_CNTBLK_ID_SLC: + case RGX_CNTBLK_ID_SLC0: + case RGX_CNTBLK_ID_FBCDC: + case RGX_CNTBLK_ID_FW_CUSTOM: + case RGX_CNTBLK_ID_PIPELINE_STATS: + psRtInfo->uiNumUnits = 1; + break; + + case RGX_CNTBLK_ID_SLC1: + if (ui32NumMemBus >= 2) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + case RGX_CNTBLK_ID_SLC2: + case RGX_CNTBLK_ID_SLC3: + if (ui32NumMemBus > 2) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + case RGX_CNTBLK_ID_TPU0: + case RGX_CNTBLK_ID_SWIFT0: + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + psRtInfo->uiNumUnits *= ui32MaxTPUPerSPU; + break; + + case RGX_CNTBLK_ID_TEXAS0: + case RGX_CNTBLK_ID_PBE_SHARED0: + + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + break; + + case RGX_CNTBLK_ID_USC0: + case RGX_CNTBLK_ID_MERCER0: + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_CLUSTERS); + break; + + case RGX_CNTBLK_ID_PBE0: + + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, PBE_PER_SPU); + psRtInfo->uiNumUnits *= + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + break; + + case RGX_CNTBLK_ID_ISP0: + + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_ISP_PER_SPU); + /* Adjust by NUM_SPU */ + + psRtInfo->uiNumUnits *= + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + break; + + default: + return IMG_FALSE; + } + /* Verify that we have at least one unit present */ + if (psRtInfo->uiNumUnits > 0) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +#else /* FW context -- Compile-time only */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + DBG_ASSERT(psBlkTypeDesc != NULL); + + if (unlikely(psBlkTypeDesc == NULL)) + { + return IMG_FALSE; + } + + switch (psBlkTypeDesc->uiCntBlkIdBase) + { + /* Handle the dynamic-sized SLC blocks which are only present if + * RGX_FEATURE_NUM_MEMBUS is appropriately set. + */ + case RGX_CNTBLK_ID_SLC1: + if (RGX_FEATURE_NUM_MEMBUS >= 2) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + case RGX_CNTBLK_ID_SLC2: + case RGX_CNTBLK_ID_SLC3: + if (RGX_FEATURE_NUM_MEMBUS > 2) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + default: + psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits; + break; + } + if (psRtInfo->uiNumUnits > 0) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +#endif /* defined(__KERNEL__) */ +} + +#if !defined(__KERNEL__) /* Firmware or User-mode context */ + +/* Used to instantiate a null row in the block type model table below where the + * block is not supported for a given build BVNC in firmware/user mode context. + * This is needed as the blockid to block type lookup uses the table as well + * and clients may try to access blocks not in the hardware. */ +#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) X(_blkid, 0, 0, #_blkid, NULL, NULL, NULL) + +#endif + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end + *****************************************************************************/ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table + + This table holds the entries for the performance counter block type model. + Where the block is not present on an RGX device in question the + pfnIsBlkPresent() returns false, if valid and present it returns true. + Columns in the table with a ** indicate the value is a default and the value + returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()should + be used at runtime by the caller. These columns are only valid for compile + time BVNC configured contexts. + + Order of table rows must match order of counter block IDs in the enumeration + RGX_HWPERF_CNTBLK_ID. + + Table contains Xmacro styled entries. Each includer of this file must define + a gasCntBlkTypeModel[] structure which is local to itself. Only the layout is + defined here. + + uiCntBlkIdBase : Block-ID + uiIndirectReg : 0 => Direct, non-zero => INDIRECT register address + uiNumUnits : Number of units present on the GPU + pszBlockNameComment : Name of the Performance Block + pfnIsBlkPowered : Function to determine power state of block + pfnIsBlkPresent : Function to determine block presence on the core + pszBlkCfgValid : Array of counters valid within this block type + *****************************************************************************/ + + // Furian 8XT V2 layout: + + /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ + + /* RGX_CNTBLK_ID_JONES */ +#if defined(RGX_FIRMWARE) || defined(__KERNEL__) + +/* Furian 8XT Direct Performance counter blocks */ + +#define RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST \ + /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \ +X(RGX_CNTBLK_ID_JONES, 0, 1, "PERF_BLK_JONES", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiJONES), \ +X(RGX_CNTBLK_ID_SLC, 0, 1, "PERF_BLK_SLC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC), \ +X(RGX_CNTBLK_ID_FBCDC, 0, 1, "PERF_BLK_FBCDC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFBCDC), \ +X(RGX_CNTBLK_ID_FW_CUSTOM, 0, 1, "PERF_BLK_FW_CUSTOM", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFWCUSTOM), \ +X(RGX_CNTBLK_ID_SLC0, 0, 1, "PERF_BLK_SLC0", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC0), \ +X(RGX_CNTBLK_ID_SLC1, 0, 1, "PERF_BLK_SLC1", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC1), \ +X(RGX_CNTBLK_ID_SLC2, 0, 1, "PERF_BLK_SLC2", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC2), \ +X(RGX_CNTBLK_ID_SLC3, 0, 1, "PERF_BLK_SLC3", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC3), \ +X(RGX_CNTBLK_ID_PIPELINE_STATS, 0, 1, "PERF_BLK_PIPELINE_STATS", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiPIPELINE) + +/* Furian 8XT Indirect Performance counter blocks */ + +#define RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST \ + /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \ +X(RGX_CNTBLK_ID_ISP0, RGX_CR_ISP_INDIRECT, RGX_HWPERF_NUM_SPU * RGX_HWPERF_NUM_ISP_PER_SPU, "PERF_BLK_ISP", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiISP), \ +X(RGX_CNTBLK_ID_MERCER0, RGX_CR_MERCER_INDIRECT, RGX_HWPERF_NUM_MERCER, "PERF_BLK_MERCER", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiMERCER), \ +X(RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_INDIRECT, RGX_HWPERF_NUM_PBE, "PERF_BLK_PBE", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE), \ +X(RGX_CNTBLK_ID_PBE_SHARED0, RGX_CR_PBE_SHARED_INDIRECT, RGX_HWPERF_NUM_PBE_SHARED, "PERF_BLK_PBE_SHARED", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE_SHARED), \ +X(RGX_CNTBLK_ID_USC0, RGX_CR_USC_INDIRECT, RGX_HWPERF_NUM_USC, "PERF_BLK_USC", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiUSC), \ +X(RGX_CNTBLK_ID_TPU0, RGX_CR_TPU_INDIRECT, RGX_HWPERF_NUM_TPU, "PERF_BLK_TPU", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTPU), \ +X(RGX_CNTBLK_ID_SWIFT0, RGX_CR_SWIFT_INDIRECT, RGX_HWPERF_NUM_SWIFT, "PERF_BLK_SWIFT", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiSWIFT), \ +X(RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS_INDIRECT, RGX_HWPERF_NUM_TEXAS, "PERF_BLK_TEXAS", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTEXAS) + +#else /* !defined(RGX_FIRMWARE) && !defined(__KERNEL__) */ + +#error "RGX_FIRMWARE or __KERNEL__ *MUST* be defined" + +#endif /* defined(RGX_FIRMWARE) || defined(__KERNEL__) */ + +#endif /* RGX_HWPERF_TABLE_H */ + +/****************************************************************************** + End of file (rgx_hwperf_table.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/volcanic/rgxapi_km.h b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgxapi_km.h new file mode 100644 index 000000000000..058ba26cd377 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgxapi_km.h @@ -0,0 +1,291 @@ +/*************************************************************************/ /*! +@File +@Title RGX API Header kernel mode +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exported RGX API details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __RGXAPI_KM_H__ +#define __RGXAPI_KM_H__ + +#include "rgx_hwperf.h" + + +/****************************************************************************** + * RGX HW Performance Profiling Control API(s) + *****************************************************************************/ + +typedef struct _RGX_HWPERF_DEVICE_ +{ + IMG_CHAR pszName[20]; /* Helps identify this device uniquely */ + IMG_HANDLE hDevData; /* Handle for the server */ + + struct _RGX_HWPERF_DEVICE_ *psNext; +} RGX_HWPERF_DEVICE; + +typedef struct +{ + RGX_HWPERF_DEVICE *psHWPerfDevList; +} RGX_HWPERF_CONNECTION; + +/*************************************************************************/ /*! +@Function RGXHWPerfLazyConnect +@Description Obtain a HWPerf connection object to the RGX device(s). The + connections to devices are not actually opened until + HWPerfOpen() is called. +@Output ppsHWPerfConnection Address of a HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfOpen +@Description Opens connection(s) to the RGX device(s). Valid handle to the + connection object has to be provided which means the this + function needs to be preceded by the call to + RGXHWPerfLazyConnect() function. +@Input psHWPerfConnection HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConnect +@Description Obtain a connection object to the RGX HWPerf module. Allocated + connection object(s) reference opened connection(s). Calling + this function is an equivalent of calling RGXHWPerfLazyConnect + and RGXHWPerfOpen. This connect should be used when the caller + will be retrieving event data. +@Output ppsHWPerfConnection Address of HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfFreeConnection +@Description Frees the HWPerf connection object +@Input psHWPerfConnection Pointer to connection object as returned + from RGXHWPerfLazyConnect() +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfClose +@Description Closes all the opened connection(s) to RGX device(s) +@Input psHWPerfConnection Pointer to HWPerf connection object as + returned from RGXHWPerfConnect() or + RGXHWPerfOpen() +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfDisconnect +@Description Disconnect from the RGX device +@Input ppsHWPerfConnection Pointer to HWPerf connection object as + returned from RGXHWPerfConnect() or + RGXHWPerfOpen(). Calling this function is + an equivalent of calling RGXHWPerfClose() + and RGXHWPerfFreeConnection(). +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfControl +@Description Enable or disable the generation of RGX HWPerf event packets. + See RGXCtrlHWPerf(). +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input eStreamId ID of the HWPerf stream +@Input bToggle Switch to toggle or apply mask. +@Input ui64Mask Mask of events to control. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfControl( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask); + + +/*************************************************************************/ /*! +@Function RGXHWPerfGetFilter +@Description Reads HWPerf stream filter where stream is identified by the + given stream ID. +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Output ui64Filter HWPerf filter value +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfGetFilter( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_UINT64 *ui64Filter +); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConfigureCounters +@Description Enable and configure the performance counter block for one or + more device layout modules. + See RGXConfigHWPerfCounters(). +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input ui32CtrlWord One of RGX_HWPERF_CTRL_NOP, + RGX_HWPERF_CTRL_GEOM_FULLrange, + RGX_HWPERF_CTRL_COMP_FULLRANGE, + RGX_HWPERF_CTRL_TDM_FULLRANGE +@Input ui32NumBlocks Number of elements in the array +@Input asBlockConfigs Address of the array of configuration blocks +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32NumBlocks, + RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs); + + +/*************************************************************************/ /*! +@Function RGXHWPerfDisableCounters +@Description Disable the performance counter block for one or more device + layout modules. +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input ui32NumBlocks Number of elements in the array +@Input aeBlockIDs An array of bytes with values taken from + the RGX_HWPERF_CNTBLK_ID enumeration. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfDisableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs); + +/*************************************************************************/ /*! +@Function RGXHWPerfEnableCounters +@Description Enable the performance counter block for one or more device + layout modules. +@Input hDevData Handle to connection/device object +@Input ui32NumBlocks Number of elements in the array +@Input aeBlockIDs An array of bytes with values taken from the + RGX_HWPERF_CNTBLK_ID enumeration. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXHWPerfEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs); + +/****************************************************************************** + * RGX HW Performance Profiling Retrieval API(s) + * + * The client must ensure their use of this acquire/release API for a single + * connection/stream must not be shared with multiple execution contexts e.g. + * between a kernel thread and an ISR handler. It is the client's + * responsibility to ensure this API is not interrupted by a high priority + * thread/ISR + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function RGXHWPerfAcquireEvents +@Description When there is data available to read this call returns with OK + and the address and length of the data buffer the client can + safely read. This buffer may contain one or more event packets. + When there is no data to read, this call returns with OK and + sets *puiBufLen to 0 on exit. + Clients must pair this call with a ReleaseEvents call. +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Output ppBuf Address of a pointer to a byte buffer. On exit it + contains the address of buffer to read from +@Output pui32BufLen Pointer to an integer. On exit it is the size of + the data to read from the buffer +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfAcquireEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_PBYTE* ppBuf, + IMG_UINT32* pui32BufLen); + + +/*************************************************************************/ /*! +@Function RGXHWPerfReleaseEvents +@Description Called after client has read the event data out of the buffer + retrieved from the Acquire Events call to release resources. +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR RGXHWPerfReleaseEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConvertCRTimeStamp +@Description Converts the timestamp given by FW events to the common OS + timestamp. The first three inputs are obtained via a CLK_SYNC + event, ui64CRTimeStamp is the CR timestamp from the FW event + to be converted. +@Input ui32ClkSpeed Clock speed given by sync event +@Input ui64CorrCRTimeStamp CR Timestamp given by sync event +@Input ui64CorrOSTimeStamp Correlating OS Timestamp given by sync + event +@Input ui64CRTimeStamp CR Timestamp to convert +@Return IMG_UINT64 Calculated OS Timestamp +*/ /**************************************************************************/ +IMG_UINT64 RGXHWPerfConvertCRTimeStamp( + IMG_UINT32 ui32ClkSpeed, + IMG_UINT64 ui64CorrCRTimeStamp, + IMG_UINT64 ui64CorrOSTimeStamp, + IMG_UINT64 ui64CRTimeStamp); + +#endif /* __RGXAPI_KM_H__ */ + +/****************************************************************************** + End of file (rgxapi_km.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/include/volcanic/rgxheapconfig.h b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgxheapconfig.h new file mode 100644 index 000000000000..e08615409d86 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/include/volcanic/rgxheapconfig.h @@ -0,0 +1,172 @@ +/*************************************************************************/ /*! +@File +@Title device configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Memory heaps device specific configuration +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXHEAPCONFIG_H +#define RGXHEAPCONFIG_H + +#include "rgxdefs_km.h" + +/* + RGX Device Virtual Address Space Definitions: + + Notes: + Base addresses have to be a multiple of 4MiB + + RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, on a + global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_* + respectively. Therefore if clients use multiple configs they must still be + consistent with their definitions for these heaps. + + Shared virtual memory (GENERAL_SVM) support requires half of the address + space be reserved for SVM allocations unless BRN fixes are required in + which case the SVM heap is disabled. + + Variable page-size heap (GENERAL_NON4K) support splits available fixed + 4K page-size heap (GENERAL) address space in half. The actual page size + defaults to 16K; AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE + can be used to forced it to these values: 4K,64K,256K,1M,2M. +*/ + + /* GENERAL_SVM_HEAP - Start at 4MiB, Size of 512GiB less 4MiB */ + /* 0x0000400000 - 0x7FFFC00000 */ + #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) + #define RGX_GENERAL_SVM_HEAP_SIZE IMG_UINT64_C(0x7FFFC00000) + + /* GENERAL_HEAP - Start at 512GiB, Size of 64 GiB (Available 128GB) */ + /* 0x8000000000 - 0x8FFFFFFFFF */ + #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000) + #define RGX_GENERAL_HEAP_SIZE IMG_UINT64_C(0x1000000000) + + #define RGX_GENERAL_NON4K_HEAP_BASE (RGX_GENERAL_HEAP_BASE+RGX_GENERAL_HEAP_SIZE) + #define RGX_GENERAL_NON4K_HEAP_SIZE RGX_GENERAL_HEAP_SIZE + + /* Start at 640GiB. Size of 1 GiB */ + #define RGX_VK_CAPT_REPLAY_BUF_HEAP_BASE (RGX_GENERAL_NON4K_HEAP_BASE + RGX_GENERAL_NON4K_HEAP_SIZE) + #define RGX_VK_CAPT_REPLAY_BUF_HEAP_SIZE IMG_UINT64_C(0x0040000000) + + /* BIF_TILING_HEAP - 660GiB, Size of 32GiB */ + /* 0xA500000000 - 0xA6FFFFFFFF */ + #define RGX_BIF_TILING_NUM_HEAPS 4 + #define RGX_BIF_TILING_HEAP_SIZE IMG_UINT64_C(0x0200000000) + #define RGX_BIF_TILING_HEAP_1_BASE IMG_UINT64_C(0xA500000000) + #define RGX_BIF_TILING_HEAP_2_BASE (RGX_BIF_TILING_HEAP_1_BASE + RGX_BIF_TILING_HEAP_SIZE) + #define RGX_BIF_TILING_HEAP_3_BASE (RGX_BIF_TILING_HEAP_2_BASE + RGX_BIF_TILING_HEAP_SIZE) + #define RGX_BIF_TILING_HEAP_4_BASE (RGX_BIF_TILING_HEAP_3_BASE + RGX_BIF_TILING_HEAP_SIZE) + + /* PDSCODEDATA_HEAP - Start at 700GiB, Size of 4 GiB */ + /* 0xAF00000000 - 0xAFFFFFFFFF */ + #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xAF00000000) + #define RGX_PDSCODEDATA_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* USCCODE_HEAP - Start at 708GiB, Size of 4GiB */ + /* 0xB100000000 - 0xB1FFFFFFFF */ + #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xB100000000) + #define RGX_USCCODE_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* Start at 903GiB. Firmware heaps defined in rgxdefs_km.h + RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_HOST_MAIN_HEAP_BASE + RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE + RGX_FIRMWARE_MAIN_HEAP_SIZE + RGX_FIRMWARE_CONFIG_HEAP_SIZE + RGX_FIRMWARE_RAW_HEAP_SIZE */ + + /* TQ3DPARAMETERS_HEAP - Start at 912GiB, Size 16GiB */ + /* 0xE400000000 - 0xE7FFFFFFFF (16GiB aligned to match RGX_CR_ISP_PIXEL_BASE) */ + #define RGX_TQ3DPARAMETERS_HEAP_BASE IMG_UINT64_C(0xE400000000) + #define RGX_TQ3DPARAMETERS_HEAP_SIZE IMG_UINT64_C(0x0400000000) + + /* CDM Signals heap (31 signals less one reserved for Services). Start at 936GiB, 960bytes rounded up to 4K */ + #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) + #define RGX_SIGNALS_HEAP_SIZE IMG_UINT64_C(0x0000001000) + + /* COMPONENT_CTRL_HEAP - Start at 940GiB, Size 4GiB */ + /* 0xEB00000000 - 0xEB003FFFFF */ + #define RGX_COMPONENT_CTRL_HEAP_BASE IMG_UINT64_C(0xEB00000000) + #define RGX_COMPONENT_CTRL_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* FBCDC_HEAP - Start at 944GiB, Size 2MiB */ + /* 0xEC00000000 - 0xEC001FFFFF */ + #define RGX_FBCDC_HEAP_BASE IMG_UINT64_C(0xEC00000000) + #define RGX_FBCDC_HEAP_SIZE IMG_UINT64_C(0x0000200000) + + /* FBCDC_LARGE_HEAP - Start at 945GiB, Size 2MiB */ + /* 0xEC40000000 - 0xEC401FFFFF */ + #define RGX_FBCDC_LARGE_HEAP_BASE IMG_UINT64_C(0xEC40000000) + #define RGX_FBCDC_LARGE_HEAP_SIZE IMG_UINT64_C(0x0000200000) + + /* PDS_INDIRECT_STATE_HEAP - Start at 948GiB, Size 16MiB */ + /* 0xED00000000 - 0xED00FFFFFF */ + #define RGX_PDS_INDIRECT_STATE_HEAP_BASE IMG_UINT64_C(0xED00000000) + #define RGX_PDS_INDIRECT_STATE_HEAP_SIZE IMG_UINT64_C(0x0001000000) + + /* RGX_TDM_TPU_YUV_COEFFS_HEAP - Start at 956GiB, Size 256KiB */ + /* 0xEE00080000 - 0xEE0003FFFF - (TDM TPU YUV coeffs - can fit 1 page) */ + #define RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE IMG_UINT64_C(0xEE00080000) + #define RGX_TDM_TPU_YUV_COEFFS_HEAP_SIZE IMG_UINT64_C(0x0000040000) + + /* TEXTURE_STATE_HEAP - Start at 960GiB, Size 4GiB */ + /* 0xF000000000 - 0xF0FFFFFFFF (36-bit aligned) */ + #define RGX_TEXTURE_STATE_HEAP_BASE IMG_UINT64_C(0xF000000000) + #define RGX_TEXTURE_STATE_HEAP_SIZE IMG_UINT64_C(0x0100000000) + + /* VISIBILITY_TEST_HEAP - Start at 970GiB, Size 2MiB */ + #define RGX_VISIBILITY_TEST_HEAP_BASE IMG_UINT64_C(0xF280000000) + #define RGX_VISIBILITY_TEST_HEAP_SIZE IMG_UINT64_C(0x0000200000) + + /* Heaps which are barred from using the reserved-region feature (intended for clients + of Services), but need the macro definitions are buried here */ + #define RGX_GENERAL_SVM_HEAP_RESERVED_SIZE 0 /* SVM heap is exclusively managed by USER or KERNEL */ + #define RGX_GENERAL_NON4K_HEAP_RESERVED_SIZE 0 /* Non-4K can have page sizes up to 2MB, which is currently + not supported in reserved-heap implementation */ + /* ... and heaps which are not used outside of Services */ + #define RGX_TQ3DPARAMETERS_HEAP_RESERVED_SIZE 0 + #define RGX_TDM_TPU_YUV_COEFFS_HEAP_RESERVED_SIZE 0 + /* signal we've identified the core by the build */ + #define RGX_CORE_IDENTIFIED + +#endif /* RGXHEAPCONFIG_H */ + +/****************************************************************************** + End of file (rgxheapconfig.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/cache_km.c b/drivers/mcst/gpu-imgtec/services/server/common/cache_km.c new file mode 100644 index 000000000000..f026374054e7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/cache_km.c @@ -0,0 +1,2585 @@ +/*************************************************************************/ /*! +@File cache_km.c +@Title CPU d-cache maintenance operations framework +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements server side code for CPU d-cache maintenance taking + into account the idiosyncrasies of the various types of CPU + d-cache instruction-set architecture (ISA) maintenance + mechanisms. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined(LINUX) +#include +#include +#include +#include +#include +#include +#endif + +#include "pmr.h" +#include "log2.h" +#include "device.h" +#include "pvrsrv.h" +#include "osfunc.h" +#include "cache_km.h" +#include "pvr_debug.h" +#include "lock_types.h" +#include "allocmem.h" +#include "process_stats.h" +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) +#include "ri_server.h" +#endif +#include "devicemem.h" +#include "pvrsrv_apphint.h" +#include "pvrsrv_sync_server.h" +#include "km_apphint_defs.h" +#include "di_server.h" + +/* This header must always be included last */ +#if defined(LINUX) +#include "kernel_compatibility.h" +#endif + +/* Top-level file-local build definitions */ +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(LINUX) +#define CACHEOP_DEBUG +#define CACHEOP_STATS_ITEMS_MAX 32 +#define INCR_WRAP(x) ((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1)) +#define DECR_WRAP(x) ((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1)) +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) +/* Refer to CacheOpStatsExecLogHeader() for header item names */ +#define CACHEOP_RI_PRINTF_HEADER "%-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s %-18s %-12s" +#define CACHEOP_RI_PRINTF "%-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n" +#else +#define CACHEOP_PRINTF_HEADER "%-8s %-10s %-10s %-5s %-10s %-10s %-18s %-18s %-12s" +#define CACHEOP_PRINTF "%-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n" +#endif +#endif + +//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING /* Force OS page (not cache line) flush granularity */ +#define CACHEOP_PVR_ASSERT(x) /* Define as PVR_ASSERT(x), enable for swdev & testing */ +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +#define CACHEOP_THREAD_WAIT_TIMEOUT 0ULL /* Wait indefinitely */ +#else +#define CACHEOP_THREAD_WAIT_TIMEOUT 500000ULL /* Wait 500ms between wait unless woken-up on demand */ +#endif +#define CACHEOP_FENCE_WAIT_TIMEOUT 1000ULL /* Wait 1ms between wait events unless woken-up */ +#define CACHEOP_FENCE_RETRY_ABORT 1000ULL /* Fence retries that aborts fence operation */ +#define CACHEOP_SEQ_MIDPOINT (IMG_UINT32) 0x7FFFFFFF /* Where seqNum(s) are rebase, compared at */ +#define CACHEOP_ABORT_FENCE_ERROR_STRING "detected stalled client, retrying cacheop fence" +#define CACHEOP_DEVMEM_OOR_ERROR_STRING "cacheop device memory request is out of range" +#define CACHEOP_MAX_DEBUG_MESSAGE_LEN 160 + +typedef struct _CACHEOP_WORK_ITEM_ +{ + PMR *psPMR; + IMG_UINT32 ui32OpSeqNum; + IMG_DEVMEM_SIZE_T uiSize; + PVRSRV_CACHE_OP uiCacheOp; + IMG_DEVMEM_OFFSET_T uiOffset; + PVRSRV_TIMELINE iTimeline; + SYNC_TIMELINE_OBJ sSWTimelineObj; + PVRSRV_DEVICE_NODE *psDevNode; +#if defined(CACHEOP_DEBUG) + IMG_UINT64 ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTime; + IMG_UINT64 ui64ExecuteTime; + IMG_BOOL bDeferred; + IMG_BOOL bKMReq; + IMG_BOOL bUMF; + IMG_PID pid; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + RGXFWIF_DM eFenceOpType; +#endif +#endif +} CACHEOP_WORK_ITEM; + +typedef struct _CACHEOP_STATS_EXEC_ITEM_ +{ + IMG_PID pid; + IMG_UINT32 ui32OpSeqNum; + PVRSRV_CACHE_OP uiCacheOp; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT64 ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTime; + IMG_UINT64 ui64ExecuteTime; + IMG_BOOL bIsFence; + IMG_BOOL bKMReq; + IMG_BOOL bUMF; + IMG_BOOL bDeferred; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + RGXFWIF_DM eFenceOpType; +#endif +} CACHEOP_STATS_EXEC_ITEM; + +typedef enum _CACHEOP_CONFIG_ +{ + CACHEOP_CONFIG_DEFAULT = 0, + /* cache flush mechanism types */ + CACHEOP_CONFIG_URBF = 4, + /* sw-emulated deferred flush mechanism */ + CACHEOP_CONFIG_KDF = 8, + /* pseudo configuration items */ + CACHEOP_CONFIG_LAST = 16, + CACHEOP_CONFIG_KLOG = 16, + CACHEOP_CONFIG_ALL = 31 +} CACHEOP_CONFIG; + +typedef struct _CACHEOP_WORK_QUEUE_ +{ +/* + * Init. state & primary device node framework + * is anchored on. + */ + IMG_BOOL bInit; +/* + MMU page size/shift & d-cache line size + */ + size_t uiPageSize; + IMG_UINT32 uiLineSize; + IMG_UINT32 uiLineShift; + IMG_UINT32 uiPageShift; + OS_CACHE_OP_ADDR_TYPE uiCacheOpAddrType; +/* + CacheOp deferred queueing protocol + + Implementation geared for performance, atomic counter based + - Value Space is 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> n. + - Index Space is 0 -> 1 -> 2 -> 3 -> 0 -> 1 -> 2 -> 3 -> 0 -> m. + - Index = Value modulo CACHEOP_INDICES_LOG2_SIZE. + + Write counter never collides with read counter in index space + - Unless at start of day when both are initialised to zero. + - This means we sacrifice one entry when the queue is full. + - Incremented by producer + - Value space tracks total number of CacheOps queued. + - Index space identifies CacheOp CCB queue index. + + Read counter increments towards write counter in value space + - Empty queue occurs when read equals write counter. + - Wrap-round logic handled by consumer as/when needed. + - Incremented by consumer + - Value space tracks total # of CacheOps executed. + - Index space identifies CacheOp CCB queue index. + + Total queued size adjusted up/down during write/read activity + - Counter might overflow but does not compromise framework. + */ + ATOMIC_T hReadCounter; + ATOMIC_T hWriteCounter; +/* + CacheOp sequence numbers + + hCommonSeqNum: + - Common sequence, numbers every CacheOp operation in both UM/KM. + - In KM + - Every deferred CacheOp (on behalf of UM) gets a unique seqNum. + - Last executed deferred CacheOp updates gsCwq.hCompletedSeqNum. + - Under debug, all CacheOp gets a unique seqNum for tracking. + - This includes all UM/KM synchronous non-deferred CacheOp(s) + - In UM + - CacheOp(s) discarding happens in both UM and KM space. + + hCompletedSeqNum: + - Tracks last executed KM/deferred RBF/Global CacheOp(s) + */ + ATOMIC_T hCommonSeqNum; + ATOMIC_T hCompletedSeqNum; +/* + CacheOp information page + + psInfoPagePMR: + - Single system-wide OS page that is multi-mapped in UM/KM. + - Mapped into clients using read-only memory protection. + - Mapped into server using read/write memory protection. + - Contains information pertaining to cache framework. + + pui32InfoPage: + - Server linear address pointer to said information page. + - Each info-page entry currently of sizeof(IMG_UINT32). + */ + PMR *psInfoPagePMR; + IMG_UINT32 *pui32InfoPage; +/* + CacheOp deferred work-item queue + + CACHEOP_INDICES_LOG2_SIZE + */ +#define CACHEOP_INDICES_LOG2_SIZE (4) +#define CACHEOP_INDICES_MAX (1 << CACHEOP_INDICES_LOG2_SIZE) +#define CACHEOP_INDICES_MASK (CACHEOP_INDICES_MAX-1) + CACHEOP_WORK_ITEM asWorkItems[CACHEOP_INDICES_MAX]; +#if defined(CACHEOP_DEBUG) +/* + CacheOp statistics + */ + DI_ENTRY *psDIEntry; + IMG_HANDLE hStatsExecLock; + IMG_UINT32 ui32ServerASync; + IMG_UINT32 ui32ServerSyncVA; + IMG_UINT32 ui32ServerSync; + IMG_UINT32 ui32ServerRBF; + IMG_UINT32 ui32ServerDTL; + IMG_UINT32 ui32ClientSync; + IMG_UINT32 ui32ClientRBF; + IMG_UINT32 ui32TotalFenceOps; + IMG_UINT32 ui32TotalExecOps; + IMG_UINT32 ui32AvgExecTime; + IMG_UINT32 ui32AvgExecTimeRemainder; + IMG_UINT32 ui32AvgFenceTime; + IMG_UINT32 ui32AvgFenceTimeRemainder; + IMG_INT32 i32StatsExecWriteIdx; + CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX]; +#endif +/* + CacheOp (re)configuration + */ + DI_ENTRY *psConfigTune; + IMG_HANDLE hConfigLock; +/* + CacheOp deferred worker thread + + eConfig + - Runtime configuration + + hWorkerThread + - CacheOp thread handler + + hThreadWakeUpEvtObj + - Event object to drive CacheOp worker thread sleep/wake-ups. + + hClientWakeUpEvtObj + - Event object to unblock stalled clients waiting on queue. + */ + CACHEOP_CONFIG eConfig; + IMG_UINT32 ui32Config; + IMG_HANDLE hWorkerThread; + IMG_HANDLE hDeferredLock; + IMG_HANDLE hThreadWakeUpEvtObj; + IMG_HANDLE hClientWakeUpEvtObj; + IMG_UINT32 ui32FenceWaitTimeUs; + IMG_UINT32 ui32FenceRetryAbort; + IMG_BOOL bSupportsUMFlush; +} CACHEOP_WORK_QUEUE; + +/* Top-level CacheOp framework object */ +static CACHEOP_WORK_QUEUE gsCwq; + +#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE) + +static INLINE IMG_UINT32 CacheOpIdxRead(ATOMIC_T *phCounter) +{ + IMG_UINT32 ui32Idx = OSAtomicRead(phCounter); + return ui32Idx & CACHEOP_INDICES_MASK; +} + +static INLINE IMG_UINT32 CacheOpIdxIncrement(ATOMIC_T *phCounter) +{ + IMG_UINT32 ui32Idx = OSAtomicIncrement(phCounter); + return ui32Idx & CACHEOP_INDICES_MASK; +} + +static INLINE IMG_UINT32 CacheOpIdxNext(ATOMIC_T *phCounter) +{ + IMG_UINT32 ui32Idx = OSAtomicRead(phCounter); + return ++ui32Idx & CACHEOP_INDICES_MASK; +} + +static INLINE IMG_UINT32 CacheOpIdxSpan(ATOMIC_T *phLhs, ATOMIC_T *phRhs) +{ + return OSAtomicRead(phLhs) - OSAtomicRead(phRhs); +} + +/* Callback to dump info of cacheop thread in debug_dump */ +static void CacheOpThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_DUMPDEBUG_LOG(" Configuration: QSZ: %d, UKT: %d, KDFT: %d, " + "LINESIZE: %d, PGSIZE: %d, KDF: %s, " + "URBF: %s", + CACHEOP_INDICES_MAX, + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD], + gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD], + gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE], + gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE], + gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No", + gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No" + ); + PVR_DUMPDEBUG_LOG(" Pending deferred CacheOp entries : %u", + CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter)); +} + +#if defined(CACHEOP_DEBUG) +static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN]) +{ + OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF_HEADER, +#else + CACHEOP_PRINTF_HEADER, +#endif + "Pid", + "CacheOp", + " Type", + "Mode", +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + "DevVAddr", + "DevPAddr", +#endif + "Offset", + "Size", + "xTime (us)", + "qTime (us)", + "SeqNum"); +} + +static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ + IMG_UINT64 ui64ExecuteTime; + IMG_UINT64 ui64EnqueuedTime; + IMG_INT32 i32WriteOffset; + + if (!psCacheOpWorkItem->ui32OpSeqNum && !psCacheOpWorkItem->uiCacheOp) + { + /* This breaks the logic of read-out, so we do not queue items + with zero sequence number and no CacheOp */ + return; + } + else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) + { + /* KM logs spams the history due to frequency, this removes it completely */ + return; + } + + OSLockAcquire(gsCwq.hStatsExecLock); + + i32WriteOffset = gsCwq.i32StatsExecWriteIdx; + gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid; + gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx); + gsCwq.asStatsExecuted[i32WriteOffset].bUMF = psCacheOpWorkItem->bUMF; + gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize; + gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq; + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset = psCacheOpWorkItem->uiOffset; + gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp; + gsCwq.asStatsExecuted[i32WriteOffset].bDeferred = psCacheOpWorkItem->bDeferred; + gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum = psCacheOpWorkItem->ui32OpSeqNum; + gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime = psCacheOpWorkItem->ui64ExecuteTime; + gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime; + gsCwq.asStatsExecuted[i32WriteOffset].ui64DequeuedTime = psCacheOpWorkItem->ui64DequeuedTime; + /* During early system initialisation, only non-fence & non-PMR CacheOps are processed */ + gsCwq.asStatsExecuted[i32WriteOffset].bIsFence = gsCwq.bInit && !psCacheOpWorkItem->psPMR; + CACHEOP_PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid); +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + if (gsCwq.bInit && psCacheOpWorkItem->psPMR) + { + IMG_CPU_PHYADDR sDevPAddr; + PVRSRV_ERROR eError; + IMG_BOOL bValid; + + /* Get more detailed information regarding the sub allocations that + PMR has from RI manager for process that requested the CacheOp */ + eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR, + gsCwq.asStatsExecuted[i32WriteOffset].pid, + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, + &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr); + PVR_GOTO_IF_ERROR(eError, e0); + + /* (Re)lock here as some PMR might have not been locked */ + eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR, + gsCwq.uiPageShift, + 1, + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, + &sDevPAddr, + &bValid); + if (eError != PVRSRV_OK) + { + eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + goto e0; + } + + eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + + gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr; + } + + if (gsCwq.asStatsExecuted[i32WriteOffset].bIsFence) + { + gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType = psCacheOpWorkItem->eFenceOpType; + } +#endif + + { + /* Convert timing from nanoseconds to microseconds */ + IMG_UINT64 ui64ExecuteTimeNs = gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime; + IMG_UINT64 ui64EnqueuedTimeNs = gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime; + + do_div(ui64ExecuteTimeNs, 1000); + do_div(ui64EnqueuedTimeNs, 1000); + + ui64ExecuteTime = ui64ExecuteTimeNs; + ui64EnqueuedTime = ui64EnqueuedTimeNs; + } + + /* Coalesced deferred CacheOps do not contribute to statistics, + as both enqueue/execute time is identical for these CacheOps */ + if (!gsCwq.asStatsExecuted[i32WriteOffset].bIsFence) + { + /* Calculate the approximate cumulative moving average execution time. + * This calculation is based on standard equation: + * + * CMAnext = (new + count * CMAprev) / (count + 1) + * + * but in simplified form: + * + * CMAnext = CMAprev + (new - CMAprev) / (count + 1) + * + * this gets rid of multiplication and prevents overflow. + * + * Also to increase accuracy that we lose with integer division, + * we hold the moving remainder of the division and add it. + * + * CMAnext = CMAprev + (new - CMAprev + CMRprev) / (count + 1) + * + * Multiple tests proved it to be the best solution for approximating + * CMA using integers. + * + */ + + IMG_UINT32 ui32Time = ui64ExecuteTime - ui64EnqueuedTime; + IMG_INT32 i32Div = (IMG_INT32)ui32Time - (IMG_INT32)gsCwq.ui32AvgExecTime + (IMG_INT32)gsCwq.ui32AvgExecTimeRemainder; + + gsCwq.ui32AvgExecTime += i32Div / (IMG_INT32)(gsCwq.ui32TotalExecOps + 1); + gsCwq.ui32AvgExecTimeRemainder = i32Div % (IMG_INT32)(gsCwq.ui32TotalExecOps + 1); + + gsCwq.ui32TotalExecOps++; + } + + if (!gsCwq.asStatsExecuted[i32WriteOffset].bKMReq) + { + /* This operation queues only UM CacheOp in per-PID process statistics database */ + PVRSRVStatsUpdateCacheOpStats(gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp, + gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr, + gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr, + gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType, +#endif + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, + gsCwq.asStatsExecuted[i32WriteOffset].uiSize, + ui64ExecuteTime-ui64EnqueuedTime, + gsCwq.asStatsExecuted[i32WriteOffset].bUMF, + gsCwq.asStatsExecuted[i32WriteOffset].bIsFence, + psCacheOpWorkItem->pid); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) +e0: +#endif + OSLockRelease(gsCwq.hStatsExecLock); +} + +static int CacheOpStatsExecLogRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_CHAR *pszFlushype; + IMG_CHAR *pszCacheOpType; + IMG_CHAR *pszFlushSource; + IMG_INT32 i32ReadOffset; + IMG_INT32 i32WriteOffset; + IMG_UINT64 ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTime; + IMG_UINT64 ui64ExecuteTime; + IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0}; + PVR_UNREFERENCED_PARAMETER(pvData); + + OSLockAcquire(gsCwq.hStatsExecLock); + + DIPrintf(psEntry, + "Primary CPU d-cache architecture: LSZ: 0x%d, URBF: %s\n", + gsCwq.uiLineSize, + gsCwq.bSupportsUMFlush ? "Yes" : "No" + ); + + DIPrintf(psEntry, + "Configuration: QSZ: %d, UKT: %d, KDFT: %d, KDF: %s, URBF: %s\n", + CACHEOP_INDICES_MAX, + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD], + gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD], + gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No", + gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No" + ); + + DIPrintf(psEntry, + "Summary: OP[F][TL] (tot.avg): %d.%d/%d.%d/%d, [KM][UM][A]SYNC: %d.%d/%d/%d, RBF (um/km): %d/%d\n", + gsCwq.ui32TotalExecOps, gsCwq.ui32AvgExecTime, gsCwq.ui32TotalFenceOps, gsCwq.ui32AvgFenceTime, gsCwq.ui32ServerDTL, + gsCwq.ui32ServerSync, gsCwq.ui32ServerSyncVA, gsCwq.ui32ClientSync, gsCwq.ui32ServerASync, + gsCwq.ui32ClientRBF, gsCwq.ui32ServerRBF + ); + + CacheOpStatsExecLogHeader(szBuffer); + DIPrintf(psEntry, "%s\n", szBuffer); + + i32WriteOffset = gsCwq.i32StatsExecWriteIdx; + for (i32ReadOffset = DECR_WRAP(i32WriteOffset); + i32ReadOffset != i32WriteOffset; + i32ReadOffset = DECR_WRAP(i32ReadOffset)) + { + if (!gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum && + !gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) + { + break; + } + + { + /* Convert from nano-seconds to micro-seconds */ + IMG_UINT64 ui64ExecuteTimeNs = gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime; + IMG_UINT64 ui64EnqueuedTimeNs = gsCwq.asStatsExecuted[i32ReadOffset].ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTimeNs = gsCwq.asStatsExecuted[i32ReadOffset].ui64DequeuedTime; + + do_div(ui64ExecuteTimeNs, 1000); + do_div(ui64EnqueuedTimeNs, 1000); + do_div(ui64DequeuedTimeNs, 1000); + + ui64ExecuteTime = ui64ExecuteTimeNs; + ui64EnqueuedTime = ui64EnqueuedTimeNs; + ui64DequeuedTime = ui64DequeuedTimeNs; + } + + if (gsCwq.asStatsExecuted[i32ReadOffset].bIsFence) + { + IMG_CHAR *pszMode = ""; + IMG_CHAR *pszFenceType = ""; + pszCacheOpType = "Fence"; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + switch (gsCwq.asStatsExecuted[i32ReadOffset].eFenceOpType) + { + case RGXFWIF_DM_GP: + pszFenceType = " GP "; + break; + + case RGXFWIF_DM_TDM: + pszFenceType = " TDM "; + break; + + case RGXFWIF_DM_GEOM: + pszFenceType = " GEOM"; + break; + + case RGXFWIF_DM_3D: + pszFenceType = " 3D "; + break; + + case RGXFWIF_DM_CDM: + pszFenceType = " CDM "; + break; + + default: + pszFenceType = " DM? "; + CACHEOP_PVR_ASSERT(0); + break; + } +#endif + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF, +#else + CACHEOP_PRINTF, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].pid, + pszCacheOpType, + pszFenceType, + pszMode, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + 0ull, + 0ull, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].uiOffset, + gsCwq.asStatsExecuted[i32ReadOffset].uiSize, + ui64ExecuteTime - ui64EnqueuedTime, + ui64DequeuedTime ? ui64DequeuedTime - ui64EnqueuedTime : 0, /* CacheOp might not have a valid DequeuedTime */ + gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum); + } + else + { + IMG_DEVMEM_SIZE_T ui64NumOfPages; + + ui64NumOfPages = gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift; + if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) + { + pszFlushype = "RBF.Fast"; + } + else + { + pszFlushype = "RBF.Slow"; + } + + if (gsCwq.asStatsExecuted[i32ReadOffset].bUMF) + { + pszFlushSource = " UM"; + } + else + { + /* + - Request originates directly from a KM thread or in KM (KM<), or + - Request originates from a UM thread and is KM deferred (KM+), or + */ + pszFlushSource = + gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM<" : + gsCwq.asStatsExecuted[i32ReadOffset].bDeferred && gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM+" : + !gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM-" : " KM"; + } + + switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) + { + case PVRSRV_CACHE_OP_NONE: + pszCacheOpType = "None"; + break; + case PVRSRV_CACHE_OP_CLEAN: + pszCacheOpType = "Clean"; + break; + case PVRSRV_CACHE_OP_INVALIDATE: + pszCacheOpType = "Invalidate"; + break; + case PVRSRV_CACHE_OP_FLUSH: + pszCacheOpType = "Flush"; + break; + case PVRSRV_CACHE_OP_TIMELINE: + pszCacheOpType = "Timeline"; + pszFlushype = " "; + break; + default: + pszCacheOpType = "Unknown"; + gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum = + (IMG_UINT32) gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp; + break; + } + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF, +#else + CACHEOP_PRINTF, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].pid, + pszCacheOpType, + pszFlushype, + pszFlushSource, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr, + gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].uiOffset, + gsCwq.asStatsExecuted[i32ReadOffset].uiSize, + ui64ExecuteTime - ui64EnqueuedTime, + ui64DequeuedTime ? ui64DequeuedTime - ui64EnqueuedTime : 0, /* CacheOp might not have a valid DequeuedTime */ + gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum); + } + } + + OSLockRelease(gsCwq.hStatsExecLock); + + return 0; +} +#endif /* defined(CACHEOP_DEBUG) */ + +static INLINE void CacheOpStatsReset(void) +{ +#if defined(CACHEOP_DEBUG) + gsCwq.ui32TotalExecOps = 0; + gsCwq.ui32TotalFenceOps = 0; + gsCwq.ui32AvgExecTime = 0; + gsCwq.ui32AvgExecTimeRemainder = 0; + gsCwq.ui32AvgFenceTime = 0; + gsCwq.ui32AvgFenceTimeRemainder = 0; + gsCwq.ui32ClientRBF = 0; + gsCwq.ui32ClientSync = 0; + gsCwq.ui32ServerRBF = 0; + gsCwq.ui32ServerASync = 0; + gsCwq.ui32ServerSyncVA = 0; + gsCwq.ui32ServerSync = 0; + gsCwq.ui32ServerDTL = 0; + gsCwq.i32StatsExecWriteIdx = 0; + OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); +#endif +} + +static void CacheOpConfigUpdate(IMG_UINT32 ui32Config) +{ + OSLockAcquire(gsCwq.hConfigLock); + + /* Step 0, set the gsCwq.eConfig bits */ + if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1))) + { + gsCwq.eConfig = CACHEOP_CONFIG_KDF; + if (gsCwq.bSupportsUMFlush) + { + gsCwq.eConfig |= CACHEOP_CONFIG_URBF; + } + } + else + { + if (ui32Config & CACHEOP_CONFIG_KDF) + { + gsCwq.eConfig |= CACHEOP_CONFIG_KDF; + } + else + { + gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF; + } + + if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF)) + { + gsCwq.eConfig |= CACHEOP_CONFIG_URBF; + } + else + { + gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF; + } + } + + if (ui32Config & CACHEOP_CONFIG_KLOG) + { + /* Suppress logs from KM caller */ + gsCwq.eConfig |= CACHEOP_CONFIG_KLOG; + } + else + { + gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG; + } + + /* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */ + ui32Config = 0; + + if (gsCwq.eConfig & CACHEOP_CONFIG_KDF) + { + ui32Config |= CACHEOP_CONFIG_KDF; + } + if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) + { + ui32Config |= CACHEOP_CONFIG_URBF; + } + if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG) + { + ui32Config |= CACHEOP_CONFIG_KLOG; + } + gsCwq.ui32Config = ui32Config; + + + /* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point + the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM) + is clawed-back because of the overhead of maintaining such large request which might stalls the + user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */ + gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2); + + /* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests + to come down into the KM for maintenance */ + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = 0; + + if (gsCwq.bSupportsUMFlush) + { + /* With URBF enabled we never go to the kernel */ + if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) + { + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0; + } + } + + /* Step 5, reset stats. */ + CacheOpStatsReset(); + + OSLockRelease(gsCwq.hConfigLock); +} + +static int CacheOpConfigRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(pvData); + DIPrintf(psEntry, + "KDF: %s, URBF: %s\n", + gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No", + gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No" + ); + return 0; +} + +static INLINE PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; + PVR_UNREFERENCED_PARAMETER(psDevNode); + + switch (ui32ID) + { + case APPHINT_ID_CacheOpConfig: + *pui32Value = gsCwq.ui32Config; + break; + + case APPHINT_ID_CacheOpUMKMThresholdSize: + *pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]; + break; + + default: + break; + } + + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; + PVR_UNREFERENCED_PARAMETER(psDevNode); + + switch (ui32ID) + { + case APPHINT_ID_CacheOpConfig: + CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL); + break; + + + case APPHINT_ID_CacheOpUMKMThresholdSize: + { + if (!ui32Value || !gsCwq.bSupportsUMFlush) + { + /* CPU ISA does not support UM flush, therefore every request goes down into + the KM, silently ignore request to adjust threshold */ + PVR_ASSERT(! gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]); + break; + } + else if (ui32Value < gsCwq.uiPageSize) + { + /* Silently round-up to OS page size */ + ui32Value = gsCwq.uiPageSize; + } + + /* Align to OS page size */ + ui32Value &= ~(gsCwq.uiPageSize - 1); + + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = ui32Value; + + break; + } + + default: + break; + } + + return PVRSRV_OK; +} + +static INLINE void CacheOpQItemRecycle(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ + PVRSRV_ERROR eError; + eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + /* Set to max as precaution should recycling this CacheOp index fail + to reset it, this is purely to safe-guard having to discard such + subsequent deferred CacheOps or signal the sw sync timeline */ + psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE; + psCacheOpWorkItem->ui32OpSeqNum = (IMG_UINT32)~0; +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->psPMR = (void *)(uintptr_t)~0; +#endif +} + +static INLINE void CacheOpQItemReadCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ +#if defined(CACHEOP_DEBUG) + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR != (void *)(uintptr_t)~0); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum != (IMG_UINT32)~0); +#else + PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem); +#endif +} + +static INLINE void CacheOpQItemWriteCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ +#if defined(CACHEOP_DEBUG) + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR == (void *)(uintptr_t)~0); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum == (IMG_UINT32)~0); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE); +#else + PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem); +#endif +} + +static INLINE IMG_UINT32 CacheOpGetNextCommonSeqNum(void) +{ + IMG_UINT32 ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum); + if (! ui32SeqNum) + { + ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum); + } + return ui32SeqNum; +} + +static INLINE IMG_BOOL CacheOpFenceCheck(IMG_UINT32 ui32CompletedSeqNum, + IMG_UINT32 ui32FenceSeqNum) +{ + IMG_UINT32 ui32RebasedCompletedNum; + IMG_UINT32 ui32RebasedFenceNum; + IMG_UINT32 ui32Rebase; + + if (ui32FenceSeqNum == 0) + { + return IMG_TRUE; + } + + /* + The problem statement is how to compare two values on + a numerical sequentially incrementing timeline in the + presence of wrap around arithmetic semantics using a + single ui32 counter & atomic (increment) operations. + + The rationale for the solution here is to rebase the + incoming values to the sequence midpoint and perform + comparisons there; this allows us to handle overflow + or underflow wrap-round using only a single integer. + + NOTE: Here we assume that the absolute value of the + difference between the two incoming values in _not_ + greater than CACHEOP_SEQ_MIDPOINT. This assumption + holds as it implies that it is very _unlikely_ that 2 + billion CacheOp requests could have been made between + a single client's CacheOp request & the corresponding + fence check. This code sequence is hopefully a _more_ + hand optimised (branchless) version of this: + + x = ui32CompletedOpSeqNum + y = ui32FenceOpSeqNum + + if (|x - y| < CACHEOP_SEQ_MIDPOINT) + return (x - y) >= 0 ? true : false + else + return (y - x) >= 0 ? true : false + */ + ui32Rebase = CACHEOP_SEQ_MIDPOINT - ui32CompletedSeqNum; + + /* ui32Rebase could be either positive/negative, in + any case we still perform operation using unsigned + semantics as 2's complement notation always means + we end up with the correct result */ + ui32RebasedCompletedNum = ui32Rebase + ui32CompletedSeqNum; + ui32RebasedFenceNum = ui32Rebase + ui32FenceSeqNum; + + return (ui32RebasedCompletedNum >= ui32RebasedFenceNum); +} + +static INLINE PVRSRV_ERROR CacheOpTimelineBind(PVRSRV_DEVICE_NODE *psDevNode, + CACHEOP_WORK_ITEM *psCacheOpWorkItem, + PVRSRV_TIMELINE iTimeline) +{ + PVRSRV_ERROR eError; + + /* Always default the incoming CacheOp work-item to safe values */ + SyncClearTimelineObj(&psCacheOpWorkItem->sSWTimelineObj); + psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE; + psCacheOpWorkItem->psDevNode = psDevNode; + if (iTimeline == PVRSRV_NO_TIMELINE) + { + return PVRSRV_OK; + } + + psCacheOpWorkItem->iTimeline = iTimeline; + eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj); + PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj"); + + return eError; +} + +static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ + PVRSRV_ERROR eError; + + if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE) + { + return PVRSRV_OK; + } + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj.pvTlObj); + + eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->psDevNode, + &psCacheOpWorkItem->sSWTimelineObj); + (void) SyncSWTimelineReleaseKM(&psCacheOpWorkItem->sSWTimelineObj); + + return eError; +} + +static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_CACHE_OP uiCacheOp, + IMG_BYTE *pbCpuVirtAddr, + IMG_CPU_PHYADDR sCpuPhyAddr, + IMG_DEVMEM_OFFSET_T uiPgAlignedOffset, + IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset, + IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset) +{ + IMG_BYTE *pbCpuVirtAddrEnd; + IMG_BYTE *pbCpuVirtAddrStart; + IMG_CPU_PHYADDR sCpuPhyAddrEnd; + IMG_CPU_PHYADDR sCpuPhyAddrStart; + IMG_DEVMEM_SIZE_T uiRelFlushSize; + IMG_DEVMEM_OFFSET_T uiRelFlushOffset; + IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset; + + /* These quantities allows us to perform cache operations + at cache-line granularity thereby ensuring we do not + perform more than is necessary */ + CACHEOP_PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset); + uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; + uiRelFlushOffset = 0; + + if (uiCLAlignedStartOffset > uiPgAlignedOffset) + { + /* Zero unless initially starting at an in-page offset */ + uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset; + uiRelFlushSize -= uiRelFlushOffset; + } + + /* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp + size is smaller. The 1st case handles in-page CacheOp range and + the 2nd case handles multiple-page CacheOp range with a last + CacheOp size that is less than gsCwq.uiPageSize */ + uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; + if (uiNextPgAlignedOffset < uiPgAlignedOffset) + { + /* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset + by implication of this wrap-round; this only happens when + uiPgAlignedOffset is the last page aligned offset */ + uiRelFlushSize = uiRelFlushOffset ? + uiCLAlignedEndOffset - uiCLAlignedStartOffset : + uiCLAlignedEndOffset - uiPgAlignedOffset; + } + else + { + if (uiNextPgAlignedOffset > uiCLAlignedEndOffset) + { + uiRelFlushSize = uiRelFlushOffset ? + uiCLAlignedEndOffset - uiCLAlignedStartOffset : + uiCLAlignedEndOffset - uiPgAlignedOffset; + } + } + + /* More efficient to request cache maintenance operation for full + relative range as opposed to multiple cache-aligned ranges */ + sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset; + sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize; + if (pbCpuVirtAddr) + { + pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset; + pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize; + } + else + { + /* Some OS/Env layer support functions expect NULL(s) */ + pbCpuVirtAddrStart = NULL; + pbCpuVirtAddrEnd = NULL; + } + + /* Perform requested CacheOp on the CPU data cache for successive cache + line worth of bytes up to page or in-page cache-line boundary */ + switch (uiCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, + sCpuPhyAddrStart, sCpuPhyAddrEnd); + break; + case PVRSRV_CACHE_OP_INVALIDATE: + OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, + sCpuPhyAddrStart, sCpuPhyAddrEnd); + break; + case PVRSRV_CACHE_OP_FLUSH: + OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, + sCpuPhyAddrStart, sCpuPhyAddrEnd); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", + __func__, uiCacheOp)); + break; + } + +#if defined(CACHEOP_DEBUG) + /* Tracks the number of kernel-mode cacheline maintenance instructions */ + gsCwq.ui32ServerRBF += (uiRelFlushSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift; +#endif +} + +static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode, + IMG_CPU_VIRTADDR pvAddress, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp) +{ + IMG_CPU_PHYADDR sCpuPhyAddrUnused = + { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; + IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize); + IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1)); + + /* + If the start/end address isn't aligned to cache line size, round it up to the + nearest multiple; this ensures that we flush all the cache lines affected by + unaligned start/end addresses. + */ + pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize); + switch (uiCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); + break; + case PVRSRV_CACHE_OP_INVALIDATE: + OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); + break; + case PVRSRV_CACHE_OP_FLUSH: + OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", + __func__, uiCacheOp)); + break; + } + +#if defined(CACHEOP_DEBUG) + /* Tracks the number of kernel-mode cacheline maintenance instructions */ + gsCwq.ui32ServerRBF += (uiSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift; +#endif +} + +static INLINE PVRSRV_ERROR CacheOpValidateVAOffset(PMR *psPMR, + IMG_CPU_VIRTADDR pvAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + void **ppvOutAddress) +{ + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(LINUX) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; +#endif + void __user *pvAddr; + + if (! pvAddress) + { + /* As pvAddress is optional, NULL is expected from UM/KM requests */ + pvAddr = NULL; + goto e0; + } + +#if !defined(LINUX) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) + pvAddr = NULL; +#else + /* Validate VA, assume most basic address limit access_ok() check */ + pvAddr = (void __user *)(uintptr_t)((uintptr_t)pvAddress + uiOffset); + if (!access_ok(pvAddr, uiSize)) + { + pvAddr = NULL; + if (! mm) + { + /* Bad KM request, don't silently ignore */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); + } + } + else if (mm) + { + down_read(&mm->mmap_sem); + + vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddr); + + if (!vma || + vma->vm_start > (unsigned long)(uintptr_t)pvAddr || + vma->vm_end < (unsigned long)(uintptr_t)pvAddr + uiSize || + vma->vm_private_data != psPMR) + { + /* + * Request range is not fully mapped or is not matching the PMR + * Ignore request's VA. + */ + pvAddr = NULL; + } + + up_read(&mm->mmap_sem); + } +#endif + +e0: + *ppvOutAddress = (IMG_CPU_VIRTADDR __force) pvAddr; + return eError; +} + +static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, + IMG_CPU_VIRTADDR pvAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp, + IMG_BOOL bIsRequestValidated) + +{ + IMG_HANDLE hPrivOut = NULL; + IMG_BOOL bPMRIsSparse; + IMG_UINT32 ui32PageIndex; + IMG_UINT32 ui32NumOfPages; + size_t uiOutSize; /* Effectively unused */ + PVRSRV_DEVICE_NODE *psDevNode; + IMG_DEVMEM_SIZE_T uiPgAlignedSize; + IMG_DEVMEM_OFFSET_T uiPgAlignedOffset; + IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset; + IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset; + IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset; + IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr; + IMG_BOOL bIsPMRInfoValid = IMG_FALSE; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BYTE *pbCpuVirtAddr = NULL; + IMG_BOOL *pbValid = abValid; + + if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE) + { + return PVRSRV_OK; + } + + if (! bIsRequestValidated) + { + IMG_DEVMEM_SIZE_T uiLPhysicalSize; + + /* Need to validate parameters before proceeding */ + eError = PMR_PhysicalSize(psPMR, &uiLPhysicalSize); + PVR_LOG_RETURN_IF_ERROR(eError, "uiLPhysicalSize"); + + PVR_LOG_RETURN_IF_FALSE(((uiOffset+uiSize) <= uiLPhysicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_RETURN_IF_ERROR(eError, "PMRLockSysPhysAddresses"); + } + + /* Fast track the request if a CPU VA is provided and CPU ISA supports VA only maintenance */ + eError = CacheOpValidateVAOffset(psPMR, pvAddress, uiOffset, uiSize, (void**)&pbCpuVirtAddr); + if (eError == PVRSRV_OK) + { + pvAddress = pbCpuVirtAddr; + + if (pvAddress && gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp); + if (! bIsRequestValidated) + { + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + } +#if defined(CACHEOP_DEBUG) + gsCwq.ui32ServerSyncVA += 1; +#endif + return PVRSRV_OK; + } + else if (pvAddress) + { + /* Round down the incoming VA (if any) down to the nearest page aligned VA */ + pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1)); +#if defined(CACHEOP_DEBUG) + gsCwq.ui32ServerSyncVA += 1; +#endif + } + } + else + { + /* + * This validation pathway has been added to accommodate any/all requests that might + * cause the kernel to Oops; essentially, KM requests should prevalidate cache maint. + * parameters but if this fails then we would rather fail gracefully than cause the + * kernel to Oops so instead we log the fact that an invalid KM virtual address was + * supplied and what action was taken to mitigate against kernel Oops(ing) if any. + */ + CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL); + + if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress", + __func__, + pvAddress)); + + /* We can still proceed as kernel/cpu uses CPU PA for d-cache maintenance */ + pvAddress = NULL; + } + else + { + /* + * The approach here is to attempt a reacquisition of the PMR kernel VA and see if + * said VA corresponds to the parameter VA, if so fail requested cache maint. op. + * cause this indicates some kind of internal, memory and/or meta-data corruption + * else we reissue the request using this (re)acquired alias PMR kernel VA. + */ + if (PMR_IsSparse(psPMR)) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, + 0, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); + } + else + { + eError = PMRAcquireKernelMappingData(psPMR, + 0, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); + } + + /* Here, we only compare these CPU virtual addresses at granularity of the OS page size */ + if ((uintptr_t)pbCpuVirtAddr == ((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, no alt. so failing request", + __func__, + pvAddress)); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_GOTO_WITH_ERROR("PMRReleaseKernelMappingData", eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); + } + else if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p", + __func__, + pvAddress, + pbCpuVirtAddr)); + + /* Note that this might still fail if there is kernel memory/meta-data corruption; + there is not much we can do here but at the least we will be informed of this + before the kernel Oops(ing) */ + CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pbCpuVirtAddr, uiSize, uiCacheOp); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + eError = PVRSRV_OK; + goto e0; + } + else + { + /* At this junction, we have exhausted every possible work-around possible but we do + know that VA reacquisition returned another/alias page-aligned VA; so with this + future expectation of PMRAcquireKernelMappingData(), we proceed */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Bad vaddress %p in CPU d-cache maint. op, will use reacquired vaddress", + __func__, + pvAddress)); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + /* NULL this to force per-page reacquisition down-stream */ + pvAddress = NULL; + } + } + } + + /* NULL clobbered var., OK to proceed */ + pbCpuVirtAddr = NULL; + eError = PVRSRV_OK; + + /* Need this for kernel mapping */ + bPMRIsSparse = PMR_IsSparse(psPMR); + psDevNode = PMR_DeviceNode(psPMR); + + /* Round the incoming offset down to the nearest cache-line / page aligned-address */ + uiCLAlignedEndOffset = uiOffset + uiSize; + uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize); + uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1)); + + uiPgAlignedEndOffset = uiCLAlignedEndOffset; + uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize); + uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1)); + uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset; + +#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING) + /* For internal debug if cache-line optimised + flushing is suspected of causing data corruption */ + uiCLAlignedStartOffset = uiPgAlignedStartOffset; + uiCLAlignedEndOffset = uiPgAlignedEndOffset; +#endif + + /* Type of allocation backing the PMR data */ + ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift; + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + /* The pbValid array is allocated first as it is needed in + both physical/virtual cache maintenance methods */ + pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); + if (! pbValid) + { + pbValid = abValid; + } + else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR)); + if (! psCpuPhyAddr) + { + psCpuPhyAddr = asCpuPhyAddr; + OSFreeMem(pbValid); + pbValid = abValid; + } + } + } + + /* We always retrieve PMR data in bulk, up-front if number of pages is within + PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a + dynamic buffer has been allocated to satisfy requests outside limits */ + if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid) + { + if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + /* Look-up PMR CpuPhyAddr once, if possible */ + eError = PMR_CpuPhysAddr(psPMR, + gsCwq.uiPageShift, + ui32NumOfPages, + uiPgAlignedStartOffset, + psCpuPhyAddr, + pbValid); + if (eError == PVRSRV_OK) + { + bIsPMRInfoValid = IMG_TRUE; + } + } + else + { + /* Look-up PMR per-page validity once, if possible */ + eError = PMR_IsOffsetValid(psPMR, + gsCwq.uiPageShift, + ui32NumOfPages, + uiPgAlignedStartOffset, + pbValid); + bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE; + } + } + + /* For each (possibly non-contiguous) PMR page(s), carry out the requested cache maint. op. */ + for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0; + uiPgAlignedOffset < uiPgAlignedEndOffset; + uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1) + { + + if (! bIsPMRInfoValid) + { + /* Never cross page boundary without looking up corresponding PMR page physical + address and/or page validity if these were not looked-up, in bulk, up-front */ + ui32PageIndex = 0; + if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + eError = PMR_CpuPhysAddr(psPMR, + gsCwq.uiPageShift, + 1, + uiPgAlignedOffset, + psCpuPhyAddr, + pbValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", e0); + } + else + { + eError = PMR_IsOffsetValid(psPMR, + gsCwq.uiPageShift, + 1, + uiPgAlignedOffset, + pbValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e0); + } + } + + /* Skip invalid PMR pages (i.e. sparse) */ + if (pbValid[ui32PageIndex] == IMG_FALSE) + { + CACHEOP_PVR_ASSERT(bPMRIsSparse); + continue; + } + + if (pvAddress) + { + /* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */ + pbCpuVirtAddr = + (void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset)); + } + /* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */ + else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + { + if (bPMRIsSparse) + { + eError = + PMRAcquireSparseKernelMappingData(psPMR, + uiPgAlignedOffset, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); + } + else + { + eError = + PMRAcquireKernelMappingData(psPMR, + uiPgAlignedOffset, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); + } + } + + /* Issue actual cache maintenance for PMR */ + CacheOpExecRangeBased(psDevNode, + uiCacheOp, + pbCpuVirtAddr, + (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ? + psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0], + uiPgAlignedOffset, + uiCLAlignedStartOffset, + uiCLAlignedEndOffset); + + if (! pvAddress) + { + /* The caller has not supplied either a KM/UM CpuVA, release mapping */ + if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + { + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + } + } + } + +e0: + if (psCpuPhyAddr != asCpuPhyAddr) + { + OSFreeMem(psCpuPhyAddr); + } + + if (pbValid != abValid) + { + OSFreeMem(pbValid); + } + + if (! bIsRequestValidated) + { + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + } + + return eError; +} + +static PVRSRV_ERROR CacheOpQListExecRangeBased(void) +{ + IMG_UINT32 ui32NumOfEntries; + PVRSRV_ERROR eError = PVRSRV_OK; + CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL; + + /* Take a snapshot of the current count of deferred entries at this junction */ + ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter); + if (! ui32NumOfEntries) + { + return PVRSRV_OK; + } +#if defined(CACHEOP_DEBUG) + CACHEOP_PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX); +#endif + + while (ui32NumOfEntries) + { + if (! OSAtomicRead(&gsCwq.hReadCounter)) + { + /* Normally, the read-counter will trail the write counter until the write + counter wraps-round to zero. Under this condition we (re)calculate as the + read-counter too is wrapping around at this point */ + ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter); + } +#if defined(CACHEOP_DEBUG) + /* Something's gone horribly wrong if these 2 counters are identical at this point */ + CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter)); +#endif + + /* Select the next pending deferred work-item for RBF cache maintenance */ + psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)]; + CacheOpQItemReadCheck(psCacheOpWorkItem); +#if defined(CACHEOP_DEBUG) + /* The time waiting in the queue to be serviced */ + psCacheOpWorkItem->ui64DequeuedTime = OSClockns64(); +#endif + + eError = CacheOpPMRExec(psCacheOpWorkItem->psPMR, + NULL, /* No UM virtual address */ + psCacheOpWorkItem->uiOffset, + psCacheOpWorkItem->uiSize, + psCacheOpWorkItem->uiCacheOp, + IMG_TRUE /* PMR is pre-validated */ + ); + if (eError != PVRSRV_OK) + { +#if defined(CACHEOP_DEBUG) +#define PID_FMTSPEC " PID:%u" +#define CACHE_OP_WORK_PID psCacheOpWorkItem->pid +#else +#define PID_FMTSPEC "%s" +#define CACHE_OP_WORK_PID "" +#endif + + PVR_LOG(("Deferred CacheOpPMRExec failed:" + PID_FMTSPEC + " PMR:%p" + " Offset:%" IMG_UINT64_FMTSPECX + " Size:%" IMG_UINT64_FMTSPECX + " CacheOp:%d," + " error: %d", + CACHE_OP_WORK_PID, + psCacheOpWorkItem->psPMR, + psCacheOpWorkItem->uiOffset, + psCacheOpWorkItem->uiSize, + psCacheOpWorkItem->uiCacheOp, + eError)); + +#undef PID_FMTSPEC +#undef CACHE_OP_WORK_PID + } + +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->ui64ExecuteTime = OSClockns64(); + CacheOpStatsExecLogWrite(psCacheOpWorkItem); +#endif + + /* The currently executed CacheOp item updates gsCwq.hCompletedSeqNum. + NOTE: This CacheOp item might be a discard item, if so its seqNum + still updates the gsCwq.hCompletedSeqNum */ + OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum); + + /* If CacheOp is timeline(d), notify timeline waiters */ + eError = CacheOpTimelineExec(psCacheOpWorkItem); + PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec"); + + /* Indicate that this CCB work-item slot is now free for (re)use */ + CacheOpQItemRecycle(psCacheOpWorkItem); + (void) CacheOpIdxIncrement(&gsCwq.hReadCounter); + ui32NumOfEntries = ui32NumOfEntries - 1; + } + + return eError; +} + +static INLINE PVRSRV_ERROR CacheOpQListExec(void) +{ + PVRSRV_ERROR eError; + + eError = CacheOpQListExecRangeBased(); + PVR_LOG_IF_ERROR(eError, "CacheOpQListExecRangeBased"); + + /* Signal any waiting threads blocked on CacheOp fence checks update + completed sequence number to last queue work item */ + eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + + return eError; +} + +static void CacheOpThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; + + /* Open CacheOp thread event object, abort driver if event object open fails */ + eError = OSEventObjectOpen(gsCwq.hThreadWakeUpEvtObj, &hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectOpen"); + + /* While driver is in good state & loaded, perform pending cache maintenance */ + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && gsCwq.bInit) + { + /* Sleep-wait here until when signalled for new queued CacheOp work items; + when woken-up, drain deferred queue completely before next event-wait */ + (void) OSEventObjectWaitKernel(hOSEvent, CACHEOP_THREAD_WAIT_TIMEOUT); + while (CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter)) + { + eError = CacheOpQListExec(); + PVR_LOG_IF_ERROR(eError, "CacheOpQListExec"); + } + } + + eError = CacheOpQListExec(); + PVR_LOG_IF_ERROR(eError, "CacheOpQListExec"); + + eError = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); +} + +static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE iTimeline, + IMG_UINT32 ui32CurrentFenceSeqNum, + IMG_UINT32 *pui32NextFenceSeqNum) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32NextIdx; + CACHEOP_WORK_ITEM sCacheOpWorkItem = {NULL}; + CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL; + + eError = CacheOpTimelineBind(psDevNode, &sCacheOpWorkItem, iTimeline); + PVR_LOG_RETURN_IF_ERROR(eError, "CacheOpTimelineBind"); + + OSLockAcquire(gsCwq.hDeferredLock); + + /* + Check if there is any deferred queueing space available and that nothing is + currently queued. This second check is required as Android where timelines + are used sets a timeline signalling deadline of 1000ms to signal timelines + else complains. So seeing we cannot be sure how long the CacheOp presently + in the queue would take we should not send this timeline down the queue as + well. + */ + ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter); + if (!CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter) && + CacheOpIdxRead(&gsCwq.hReadCounter) != ui32NextIdx) + { + psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx]; + CacheOpQItemWriteCheck(psCacheOpWorkItem); + + psCacheOpWorkItem->sSWTimelineObj = sCacheOpWorkItem.sSWTimelineObj; + psCacheOpWorkItem->iTimeline = sCacheOpWorkItem.iTimeline; + psCacheOpWorkItem->psDevNode = sCacheOpWorkItem.psDevNode; + psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_TIMELINE; + psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0; + psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0; + /* Defer timeline using information page PMR */ + psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR; + eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0); +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->pid = OSGetCurrentClientProcessIDKM(); + psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64(); + gsCwq.ui32ServerASync += 1; + gsCwq.ui32ServerDTL += 1; +#endif + + /* Mark index ready for cache maintenance */ + (void) CacheOpIdxIncrement(&gsCwq.hWriteCounter); + + OSLockRelease(gsCwq.hDeferredLock); + + eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + else + { + IMG_BOOL bExecTimeline = IMG_TRUE; + IMG_UINT32 ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum); + + OSLockRelease(gsCwq.hDeferredLock); + + /* + This pathway requires careful handling here as the client CacheOp(s) predicated on this + timeline might have been broken-up (i.e. batched) into several server requests by client: + 1 - In the first case, a CacheOp from an earlier batch is still in-flight, so we check if + this is the case because even though we might have executed all the CacheOps in this batch + synchronously, we cannot be sure that any in-flight CacheOp pending on this client is not + predicated on this timeline hence we need to synchronise here for safety by fencing until + all in-flight CacheOps are completed. NOTE: On Android, this might cause issues due to + timelines notification deadlines so we do not fence (i.e. cannot sleep or wait) here to + synchronise, instead nudge services client to retry the request. + 2 - In the second case, there is no in-flight CacheOp for this client in which case just + continue processing as normal. + */ + if (!CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32CurrentFenceSeqNum)) + { +#if defined(ANDROID) + bExecTimeline = IMG_FALSE; + eError = PVRSRV_ERROR_RETRY; +#else + eError = CacheOpFence ((RGXFWIF_DM)0, ui32CurrentFenceSeqNum); + PVR_LOG_IF_ERROR(eError, "CacheOpFence"); +#endif + } + + if (bExecTimeline) + { + /* CacheOp fence requirement met, signal timeline */ + eError = CacheOpTimelineExec(&sCacheOpWorkItem); + PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec"); + } + } + + return eError; +e0: + if (psCacheOpWorkItem) + { + /* Need to ensure we leave this CacheOp QItem in the proper recycled state */ + CacheOpQItemRecycle(psCacheOpWorkItem); + OSLockRelease(gsCwq.hDeferredLock); + } + + return eError; +} + +static PVRSRV_ERROR CacheOpBatchExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, + PMR **ppsPMR, + IMG_CPU_VIRTADDR *pvAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + PVRSRV_CACHE_OP *puiCacheOp, + IMG_UINT32 ui32NumCacheOps, + PVRSRV_TIMELINE uiTimeline, + IMG_UINT32 uiCurrentFenceSeqNum, + IMG_UINT32 *pui32NextFenceSeqNum) +{ + IMG_UINT32 ui32Idx; + IMG_UINT32 ui32NextIdx; + IMG_BOOL bBatchHasTimeline; + IMG_BOOL bCacheOpConfigKDF; + IMG_DEVMEM_SIZE_T uiLogicalSize; + PVRSRV_ERROR eError = PVRSRV_OK; + CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL; +#if defined(CACHEOP_DEBUG) + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + IMG_UINT32 ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); +#endif + + /* Check if batch has an associated timeline update */ + bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE; + puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_TIMELINE); + + /* Check if config. supports kernel deferring of cacheops */ + bCacheOpConfigKDF = CacheOpConfigSupports(CACHEOP_CONFIG_KDF); + + /* + Client expects the next fence seqNum to be zero unless the server has deferred + at least one CacheOp in the submitted queue in which case the server informs + the client of the last CacheOp seqNum deferred in this batch. + */ + for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++) + { + /* Fail UM request, don't silently ignore */ + PVR_GOTO_IF_INVALID_PARAM(puiSize[ui32Idx], eError, e0); + + if (bCacheOpConfigKDF) + { + /* Check if there is deferred queueing space available */ + ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter); + if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter)) + { + psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx]; + } + } + + /* + Normally, we would like to defer client CacheOp(s) but we may not always be in a + position or is necessary to do so based on the following reasons: + 0 - There is currently no queueing space left to enqueue this CacheOp, this might + imply the system is queueing more requests than can be consumed by the CacheOp + thread in time. + 1 - Batch has timeline, action this now due to Android timeline signaling deadlines. + 2 - Configuration does not support deferring of cache maintenance operations so we + execute the batch synchronously/immediately. + 3 - CacheOp has an INVALIDATE, as this is used to transfer device memory buffer + ownership back to the processor, we cannot defer it so action it immediately. + 4 - CacheOp size too small (single OS page size) to warrant overhead of deferment, + 5 - CacheOp size OK for deferment, but a client virtual address is supplied so we + might has well just take advantage of said VA & flush immediately in UM context. + 6 - Prevent DoS attack if a malicious client queues something very large, say 1GiB. + Here we upper bound this threshold to PVR_DIRTY_BYTES_FLUSH_THRESHOLD. + */ + if (!psCacheOpWorkItem || + bBatchHasTimeline || + !bCacheOpConfigKDF || + puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE || + (puiSize[ui32Idx] <= (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize) || + (pvAddress[ui32Idx] && puiSize[ui32Idx] < (IMG_DEVMEM_SIZE_T)gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD]) || + (puiSize[ui32Idx] >= (IMG_DEVMEM_SIZE_T)(gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << 2))) + { + /* When the CacheOp thread not keeping up, trash d-cache */ +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64(); + gsCwq.ui32ServerSync += 1; +#endif + psCacheOpWorkItem = NULL; + + eError = CacheOpPMRExec(ppsPMR[ui32Idx], + pvAddress[ui32Idx], + puiOffset[ui32Idx], + puiSize[ui32Idx], + puiCacheOp[ui32Idx], + IMG_FALSE); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpExecPMR", e0); + +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64ExecuteTime = OSClockns64(); + sCacheOpWorkItem.ui32OpSeqNum = ui32OpSeqNum; + sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx]; + sCacheOpWorkItem.uiSize = puiSize[ui32Idx]; + sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx]; + sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx]; + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#endif + + continue; + } + + /* Need to validate request parameters here before enqueing */ + eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_LogicalSize", e0); + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + PVR_LOG_GOTO_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0); + eError = PVRSRV_OK; + + /* For safety, take reference here in user context */ + eError = PMRLockSysPhysAddresses(ppsPMR[ui32Idx]); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0); + + OSLockAcquire(gsCwq.hDeferredLock); + + /* Select next item off the queue to defer with */ + ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter); + if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter)) + { + psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx]; + CacheOpQItemWriteCheck(psCacheOpWorkItem); + } + else + { + /* Retry, disable KDF for this batch */ + OSLockRelease(gsCwq.hDeferredLock); + bCacheOpConfigKDF = IMG_FALSE; + psCacheOpWorkItem = NULL; + eError = PMRUnlockSysPhysAddresses(ppsPMR[ui32Idx]); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRUnlockSysPhysAddresses", e0); + ui32Idx = ui32Idx - 1; + continue; + } + + /* Timeline need to be looked-up (i.e. bind) in the user context + before deferring into the CacheOp thread kernel context */ + eError = CacheOpTimelineBind(psDevNode, psCacheOpWorkItem, PVRSRV_NO_TIMELINE); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpTimelineBind", e1); + + /* Prepare & enqueue next deferred work item for CacheOp thread */ + psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + *pui32NextFenceSeqNum = psCacheOpWorkItem->ui32OpSeqNum; + psCacheOpWorkItem->uiCacheOp = puiCacheOp[ui32Idx]; + psCacheOpWorkItem->uiOffset = puiOffset[ui32Idx]; + psCacheOpWorkItem->uiSize = puiSize[ui32Idx]; + psCacheOpWorkItem->psPMR = ppsPMR[ui32Idx]; +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64(); + psCacheOpWorkItem->pid = sCacheOpWorkItem.pid; + psCacheOpWorkItem->bDeferred = IMG_TRUE; + psCacheOpWorkItem->bKMReq = IMG_FALSE; + psCacheOpWorkItem->bUMF = IMG_FALSE; + gsCwq.ui32ServerASync += 1; +#endif + + /* Increment deferred size & mark index ready for cache maintenance */ + (void) CacheOpIdxIncrement(&gsCwq.hWriteCounter); + + OSLockRelease(gsCwq.hDeferredLock); + psCacheOpWorkItem = NULL; + } + + /* Signal the CacheOp thread to ensure these items get processed */ + eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + +e1: + if (psCacheOpWorkItem) + { + /* Need to ensure we leave this CacheOp QItem in the proper recycled state */ + CacheOpQItemRecycle(psCacheOpWorkItem); + OSLockRelease(gsCwq.hDeferredLock); + } +e0: + if (bBatchHasTimeline) + { + PVRSRV_ERROR eError2; + eError2 = CacheOpBatchExecTimeline(psDevNode, uiTimeline, + uiCurrentFenceSeqNum, pui32NextFenceSeqNum); + eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError; + } + + return eError; +} + + +PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd, + PVRSRV_CACHE_OP uiCacheOp) +{ +#if defined(CACHEOP_DEBUG) + IMG_UINT64 ui64EnqueueTime = OSClockns64(); +#endif + + switch (uiCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); + break; + case PVRSRV_CACHE_OP_INVALIDATE: + OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); + break; + case PVRSRV_CACHE_OP_FLUSH: + OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", + __func__, uiCacheOp)); + break; + } + +#if defined(CACHEOP_DEBUG) + if (CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) + { + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + + gsCwq.ui32ServerSync += 1; + gsCwq.ui32ServerRBF += + ((sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr) & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift; + + sCacheOpWorkItem.uiOffset = 0; + sCacheOpWorkItem.bKMReq = IMG_TRUE; + sCacheOpWorkItem.uiCacheOp = uiCacheOp; + /* Use information page PMR for logging KM request */ + sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR; + sCacheOpWorkItem.ui64EnqueuedTime = ui64EnqueueTime; + sCacheOpWorkItem.ui64ExecuteTime = OSClockns64(); + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr); + + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); + } +#endif + + return PVRSRV_OK; +} + +PVRSRV_ERROR CacheOpValExec(PMR *psPMR, + IMG_UINT64 uiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp) +{ + PVRSRV_ERROR eError; + IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress; +#if defined(CACHEOP_DEBUG) + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + gsCwq.ui32ServerSync += 1; + sCacheOpWorkItem.psPMR = psPMR; + sCacheOpWorkItem.uiSize = uiSize; + sCacheOpWorkItem.uiOffset = uiOffset; + sCacheOpWorkItem.uiCacheOp = uiCacheOp; + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64(); +#endif + + eError = CacheOpPMRExec(psPMR, + pvAddress, + uiOffset, + uiSize, + uiCacheOp, + IMG_FALSE); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpPMRExec", e0); + +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64ExecuteTime = OSClockns64(); + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#endif + +e0: + return eError; +} + +PVRSRV_ERROR CacheOpQueue (CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32NumCacheOps, + PMR **ppsPMR, + IMG_UINT64 *puiAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + PVRSRV_CACHE_OP *puiCacheOp, + IMG_UINT32 ui32OpTimeline, + IMG_UINT32 uiCurrentFenceSeqNum, + IMG_UINT32 *pui32NextFenceSeqNum) +{ + PVRSRV_ERROR eError; + PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline; + IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (!gsCwq.bInit) + { + PVR_LOG(("CacheOp framework not initialised, failing request")); + return PVRSRV_ERROR_NOT_INITIALISED; + } + else if (! ui32NumCacheOps) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* Ensure any single timeline CacheOp request is processed immediately */ + else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE) + { + eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline, uiCurrentFenceSeqNum, pui32NextFenceSeqNum); + } + /* This is the default entry for all client requests */ + else + { + if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1))) + { + /* default the configuration before execution */ + CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); + } + + eError = + CacheOpBatchExecRangeBased(psDevNode, + ppsPMR, + pvAddress, + puiOffset, + puiSize, + puiCacheOp, + ui32NumCacheOps, + uiTimeline, + uiCurrentFenceSeqNum, + pui32NextFenceSeqNum); + } + + return eError; +} + +PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eFenceOpType, IMG_UINT32 ui32FenceOpSeqNum) +{ + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError2; + IMG_UINT32 ui32RetryAbort; + IMG_UINT32 ui32CompletedOpSeqNum; + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(CACHEOP_DEBUG) + IMG_UINT64 uiTimeNow; + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = ui32FenceOpSeqNum; + sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64(); + uiTimeNow = sCacheOpWorkItem.ui64EnqueuedTime; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + sCacheOpWorkItem.eFenceOpType = eFenceOpType; +#endif + sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum); + sCacheOpWorkItem.uiOffset = 0; +#endif + PVR_UNREFERENCED_PARAMETER(eFenceOpType); + + /* If initial fence check fails, then wait-and-retry in loop */ + ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum); + if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum)) + { +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum; +#endif + goto e0; + } + + /* Open CacheOp update event object, if event open fails return error */ + eError2 = OSEventObjectOpen(gsCwq.hClientWakeUpEvtObj, &hOSEvent); + PVR_LOG_GOTO_IF_ERROR(eError2, "OSEventObjectOpen", e0); + + /* Linear (i.e. use exponential?) back-off, upper bounds user wait */ + for (ui32RetryAbort = gsCwq.ui32FenceRetryAbort; ;--ui32RetryAbort) + { + /* (Re)read completed CacheOp sequence number before waiting */ + ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum); + if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum)) + { +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum; +#endif + break; + } + + (void) OSEventObjectWaitTimeout(hOSEvent, gsCwq.ui32FenceWaitTimeUs); + + if (! ui32RetryAbort) + { +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum); + sCacheOpWorkItem.uiOffset = 0; + uiTimeNow = OSClockns64(); +#endif + PVR_LOG(("CacheOpFence() event: "CACHEOP_ABORT_FENCE_ERROR_STRING)); + eError = PVRSRV_ERROR_RETRY; + break; + } + else + { +#if defined(CACHEOP_DEBUG) + uiTimeNow = OSClockns64(); +#endif + } + } + + eError2 = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError2, "OSEventObjectOpen"); + +e0: +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow; + if (ui32FenceOpSeqNum) + { + IMG_UINT64 ui64TimeTakenNs = sCacheOpWorkItem.ui64EnqueuedTime - sCacheOpWorkItem.ui64ExecuteTime; + IMG_UINT32 ui32Time; + IMG_INT32 i32Div; + + do_div(ui64TimeTakenNs, 1000); + ui32Time = ui64TimeTakenNs; + + /* Only fences pending on CacheOps contribute towards statistics, + * Calculate the approximate cumulative moving average fence time. + * This calculation is based on standard equation: + * + * CMAnext = (new + count * CMAprev) / (count + 1) + * + * but in simplified form: + * + * CMAnext = CMAprev + (new - CMAprev) / (count + 1) + * + * this gets rid of multiplication and prevents overflow. + * + * Also to increase accuracy that we lose with integer division, + * we hold the moving remainder of the division and add it. + * + * CMAnext = CMAprev + (new - CMAprev + CMRprev) / (count + 1) + * + * Multiple tests proved it to be the best solution for approximating + * CMA using integers. + * + */ + + i32Div = (IMG_INT32)ui32Time - (IMG_INT32)gsCwq.ui32AvgFenceTime + (IMG_INT32)gsCwq.ui32AvgFenceTimeRemainder; + + + gsCwq.ui32AvgFenceTime += i32Div / (IMG_INT32)(gsCwq.ui32TotalFenceOps + 1); + gsCwq.ui32AvgFenceTimeRemainder = i32Div % (IMG_INT32)(gsCwq.ui32TotalFenceOps + 1); + + + gsCwq.ui32TotalFenceOps++; + + } + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#endif + + return eError; +} + +PVRSRV_ERROR CacheOpLog (PMR *psPMR, + IMG_UINT64 puiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64EnqueuedTimeUs, + IMG_UINT64 ui64ExecuteTimeUs, + IMG_UINT32 ui32NumRBF, + PVRSRV_CACHE_OP uiCacheOp) +{ +#if defined(CACHEOP_DEBUG) + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + PVR_UNREFERENCED_PARAMETER(puiAddress); + + sCacheOpWorkItem.psPMR = psPMR; + sCacheOpWorkItem.uiSize = uiSize; + sCacheOpWorkItem.uiOffset = uiOffset; + sCacheOpWorkItem.uiCacheOp = uiCacheOp; + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + + sCacheOpWorkItem.ui64EnqueuedTime = ui64EnqueuedTimeUs; + sCacheOpWorkItem.ui64ExecuteTime = ui64ExecuteTimeUs; + sCacheOpWorkItem.bUMF = IMG_TRUE; + gsCwq.ui32ClientRBF += ui32NumRBF; + gsCwq.ui32ClientSync += 1; + + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#else + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiCacheOp); + PVR_UNREFERENCED_PARAMETER(ui32NumRBF); + PVR_UNREFERENCED_PARAMETER(puiAddress); + PVR_UNREFERENCED_PARAMETER(ui64ExecuteTimeUs); + PVR_UNREFERENCED_PARAMETER(ui64EnqueuedTimeUs); +#endif + return PVRSRV_OK; +} + +PVRSRV_ERROR CacheOpInit2 (void) +{ + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + /* Create an event object for pending CacheOp work items */ + eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", e0); + + /* Create an event object for updating pending fence checks on CacheOp */ + eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hClientWakeUpEvtObj); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", e0); + + /* Appending work-items is not concurrent, lock protects against this */ + eError = OSLockCreate((POS_LOCK*)&gsCwq.hDeferredLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + /* Apphint read/write is not concurrent, so lock protects against this */ + eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + gsCwq.ui32FenceWaitTimeUs = CACHEOP_FENCE_WAIT_TIMEOUT; + gsCwq.ui32FenceRetryAbort = CACHEOP_FENCE_RETRY_ABORT; + +#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH) + gsCwq.bSupportsUMFlush = IMG_TRUE; +#else + gsCwq.bSupportsUMFlush = IMG_FALSE; +#endif + + gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage; + gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR; + + /* Normally, platforms should use their default configurations, put exceptions here */ +#if defined(__i386__) || defined(__x86_64__) +#if !defined(TC_MEMORY_CONFIG) + CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KDF); +#else + CacheOpConfigUpdate(CACHEOP_CONFIG_KDF); +#endif +#else /* defined(__x86__) */ + CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); +#endif + + /* Initialise the remaining occupants of the CacheOp information page */ + gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE] = (IMG_UINT32)gsCwq.uiPageSize; + gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE] = (IMG_UINT32)gsCwq.uiLineSize; + + /* Set before spawning thread */ + gsCwq.bInit = IMG_TRUE; + + /* Create a thread which is used to execute the deferred CacheOp(s), + these are CacheOp(s) executed by the server on behalf of clients + asynchronously. All clients synchronise with the server before + submitting any HW operation (i.e. device kicks) to ensure that + client device work-load memory is coherent */ + eError = OSThreadCreatePriority(&gsCwq.hWorkerThread, + "pvr_cacheop", + CacheOpThread, + CacheOpThreadDumpInfo, + IMG_TRUE, + psPVRSRVData, + OS_THREAD_HIGHEST_PRIORITY); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority", e0); + { + DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpConfigRead}; + /* Writing the unsigned integer binary encoding of CACHEOP_CONFIG + into this file cycles through avail. configuration(s) */ + eError = DICreateEntry("cacheop_config", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &gsCwq.psConfigTune); + PVR_LOG_GOTO_IF_FALSE(gsCwq.psConfigTune, "DICreateEntry", e0); + } + + /* Register the CacheOp framework (re)configuration handlers */ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig, + CacheOpConfigQuery, + CacheOpConfigSet, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) APPHINT_ID_CacheOpConfig); + + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpUMKMThresholdSize, + CacheOpConfigQuery, + CacheOpConfigSet, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) APPHINT_ID_CacheOpUMKMThresholdSize); + + return PVRSRV_OK; +e0: + CacheOpDeInit2(); + return eError; +} + +void CacheOpDeInit2 (void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + gsCwq.bInit = IMG_FALSE; + + if (gsCwq.hThreadWakeUpEvtObj) + { + eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + if (gsCwq.hClientWakeUpEvtObj) + { + eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + if (gsCwq.hWorkerThread) + { + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + eError = OSThreadDestroy(gsCwq.hWorkerThread); + if (PVRSRV_OK == eError) + { + gsCwq.hWorkerThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + gsCwq.hWorkerThread = NULL; + } + + if (gsCwq.hClientWakeUpEvtObj) + { + eError = OSEventObjectDestroy(gsCwq.hClientWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + gsCwq.hClientWakeUpEvtObj = NULL; + } + + if (gsCwq.hThreadWakeUpEvtObj) + { + eError = OSEventObjectDestroy(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + gsCwq.hThreadWakeUpEvtObj = NULL; + } + + if (gsCwq.hConfigLock) + { + eError = OSLockDestroy(gsCwq.hConfigLock); + PVR_LOG_IF_ERROR(eError, "OSLockDestroy"); + gsCwq.hConfigLock = NULL; + } + + if (gsCwq.hDeferredLock) + { + eError = OSLockDestroy(gsCwq.hDeferredLock); + PVR_LOG_IF_ERROR(eError, "OSLockDestroy"); + gsCwq.hDeferredLock = NULL; + } + + if (gsCwq.psConfigTune) + { + DIDestroyEntry(gsCwq.psConfigTune); + gsCwq.psConfigTune = NULL; + } + + gsCwq.pui32InfoPage = NULL; + gsCwq.psInfoPagePMR = NULL; +} + +PVRSRV_ERROR CacheOpInit (void) +{ + IMG_UINT32 idx; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* DDK initialisation is anticipated to be performed on the boot + processor (little core in big/little systems) though this may + not always be the case. If so, the value cached here is the + system wide safe (i.e. smallest) L1 d-cache line size value + on any/such platforms with mismatched d-cache line sizes */ + gsCwq.uiPageSize = OSGetPageSize(); + gsCwq.uiPageShift = OSGetPageShift(); + gsCwq.uiLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize); + PVR_LOG_RETURN_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE); + gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType(); + + /* More information regarding these atomic counters can be found + in the CACHEOP_WORK_QUEUE type definition at top of file */ + OSAtomicWrite(&gsCwq.hCompletedSeqNum, 0); + OSAtomicWrite(&gsCwq.hCommonSeqNum, 0); + OSAtomicWrite(&gsCwq.hWriteCounter, 0); + OSAtomicWrite(&gsCwq.hReadCounter, 0); + + for (idx = 0; idx < CACHEOP_INDICES_MAX; idx++) + { + gsCwq.asWorkItems[idx].iTimeline = PVRSRV_NO_TIMELINE; + gsCwq.asWorkItems[idx].psPMR = (void *)(uintptr_t)~0; + gsCwq.asWorkItems[idx].ui32OpSeqNum = (IMG_UINT32)~0; + } + + +#if defined(CACHEOP_DEBUG) + /* debugfs file read-out is not concurrent, so lock protects against this */ + eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + gsCwq.i32StatsExecWriteIdx = 0; + OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); + + { + DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpStatsExecLogRead}; + /* File captures the most recent subset of CacheOp(s) executed */ + eError = DICreateEntry("cacheop_history", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &gsCwq.psDIEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", e0); + } +e0: +#endif + return eError; +} + +void CacheOpDeInit (void) +{ +#if defined(CACHEOP_DEBUG) + if (gsCwq.hStatsExecLock) + { + (void) OSLockDestroy(gsCwq.hStatsExecLock); + gsCwq.hStatsExecLock = NULL; + } + + if (gsCwq.psDIEntry) + { + DIDestroyEntry(gsCwq.psDIEntry); + gsCwq.psDIEntry = NULL; + } +#endif +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/connection_server.c b/drivers/mcst/gpu-imgtec/services/server/common/connection_server.c new file mode 100644 index 000000000000..5d169cfbe41e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/connection_server.c @@ -0,0 +1,466 @@ +/*************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Handles connections coming from the client and the management + connection based information +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "handle.h" +#include "pvrsrv.h" +#include "connection_server.h" +#include "osconnection_server.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "sync_server.h" +#include "process_stats.h" +#include "pdump_km.h" +#include "osfunc.h" +#include "tlstream.h" + +/* PID associated with Connection currently being purged by Cleanup thread */ +static IMG_PID gCurrentPurgeConnectionPid; + +static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) +{ + PVRSRV_ERROR eError; + PROCESS_HANDLE_BASE *psProcessHandleBase; + IMG_UINT64 ui64MaxBridgeTime; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData->bUnload) + { + /* driver is unloading so do not allow the bridge lock to be released */ + ui64MaxBridgeTime = 0; + } + else + { + ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS; + } + + PVR_ASSERT(psConnection != NULL); + PVR_LOG_RETURN_IF_INVALID_PARAM(psConnection, "psConnection"); + + /* Close HWPerfClient stream here even though we created it in + * PVRSRVConnectKM(). */ + if (psConnection->hClientTLStream) + { + TLStreamClose(psConnection->hClientTLStream); + psConnection->hClientTLStream = NULL; + PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream.")); + } + + /* Get process handle base to decrement the refcount */ + psProcessHandleBase = psConnection->psProcessHandleBase; + + if (psProcessHandleBase != NULL) + { + /* acquire the lock now to ensure unref and removal from the + * hash table is atomic. + * if the refcount becomes zero then the lock needs to be held + * until the entry is removed from the hash table. + */ + OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock); + + /* In case the refcount becomes 0 we can remove the process handle base */ + if (OSAtomicDecrement(&psProcessHandleBase->iRefCount) == 0) + { + uintptr_t uiHashValue; + + uiHashValue = HASH_Remove(psPVRSRVData->psProcessHandleBase_Table, psConnection->pid); + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); + + if (!uiHashValue) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to remove handle base from hash table.", + __func__)); + return PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE; + } + + eError = PVRSRVFreeKernelHandles(psProcessHandleBase->psHandleBase); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeKernelHandles"); + + eError = PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, ui64MaxBridgeTime); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase:1"); + + OSFreeMem(psProcessHandleBase); + } + else + { + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); + } + + psConnection->psProcessHandleBase = NULL; + } + + /* Free handle base for this connection */ + if (psConnection->psHandleBase != NULL) + { + eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase:2"); + + psConnection->psHandleBase = NULL; + } + + if (psConnection->psSyncConnectionData != NULL) + { + SyncUnregisterConnection(psConnection->psSyncConnectionData); + psConnection->psSyncConnectionData = NULL; + } + + if (psConnection->psPDumpConnectionData != NULL) + { + PDumpUnregisterConnection(psConnection->psPDumpConnectionData); + psConnection->psPDumpConnectionData = NULL; + } + + /* Call environment specific connection data deinit function */ + if (psConnection->hOsPrivateData != NULL) + { + eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); + PVR_LOG_RETURN_IF_ERROR(eError, "OSConnectionPrivateDataDeInit"); + + psConnection->hOsPrivateData = NULL; + } + + /* Close the PID stats entry as late as possible to catch all frees */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (psConnection->hProcessStats != NULL) + { + PVRSRVStatsDeregisterProcess(psConnection->hProcessStats); + psConnection->hProcessStats = NULL; + } +#endif + + OSFreeMemNoStats(psConnection); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) +{ + CONNECTION_DATA *psConnection; + PVRSRV_ERROR eError; + PROCESS_HANDLE_BASE *psProcessHandleBase; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + /* Allocate connection data area, no stats since process not registered yet */ + psConnection = OSAllocZMemNoStats(sizeof(*psConnection)); + PVR_LOG_RETURN_IF_NOMEM(psConnection, "psConnection"); + + /* Allocate process statistics as early as possible to catch all allocs */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", failure); +#endif + + /* Call environment specific connection data init function */ + eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData); + PVR_LOG_GOTO_IF_ERROR(eError, "OSConnectionPrivateDataInit", failure); + + psConnection->pid = OSGetCurrentClientProcessIDKM(); + psConnection->vpid = OSGetCurrentVirtualProcessID(); + psConnection->tid = (IMG_UINT32)OSGetCurrentClientThreadIDKM(); + OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN); + +#if defined(DEBUG) || defined(PDUMP) + PVR_LOG(("%s connected", psConnection->pszProcName)); +#endif + + /* Register this connection with the sync core */ + eError = SyncRegisterConnection(&psConnection->psSyncConnectionData); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncRegisterConnection", failure); + + /* + * Register this connection and Sync PDump callback with + * the pdump core. Pass in the Sync connection data. + */ + eError = PDumpRegisterConnection(psConnection->psSyncConnectionData, + SyncConnectionPDumpSyncBlocks, + &psConnection->psPDumpConnectionData); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpRegisterConnection", failure); + + /* Allocate handle base for this connection */ + eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase, + PVRSRV_HANDLE_BASE_TYPE_CONNECTION); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", failure); + + /* Try to get process handle base if it already exists */ + OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock); + psProcessHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table, + psConnection->pid); + + /* In case there is none we are going to allocate one */ + if (psProcessHandleBase == NULL) + { + psProcessHandleBase = OSAllocZMem(sizeof(PROCESS_HANDLE_BASE)); + PVR_LOG_GOTO_IF_NOMEM(psProcessHandleBase, eError, failureLock); + + OSAtomicWrite(&psProcessHandleBase->iRefCount, 0); + + /* Allocate handle base for this process */ + eError = PVRSRVAllocHandleBase(&psProcessHandleBase->psHandleBase, + PVRSRV_HANDLE_BASE_TYPE_PROCESS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Couldn't allocate handle base for process (%s)", + __func__, + PVRSRVGetErrorString(eError))); + OSFreeMem(psProcessHandleBase); + goto failureLock; + } + + /* Insert the handle base into the global hash table */ + if (!HASH_Insert(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table, + psConnection->pid, + (uintptr_t) psProcessHandleBase)) + { + PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, 0); + + OSFreeMem(psProcessHandleBase); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE, failureLock); + } + } + OSAtomicIncrement(&psProcessHandleBase->iRefCount); + + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); + + /* hConnectionsLock now resides in PVRSRV_DEVICE_NODE */ + { + PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); + + OSLockAcquire(psDevNode->hConnectionsLock); + dllist_add_to_tail(&psDevNode->sConnections, &psConnection->sConnectionListNode); + OSLockRelease(psDevNode->hConnectionsLock); + } + + psConnection->psProcessHandleBase = psProcessHandleBase; + + *ppvPrivData = psConnection; + + return PVRSRV_OK; + +failureLock: + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); +failure: + ConnectionDataDestroy(psConnection); + + return eError; +} + +static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData) +{ + PVRSRV_ERROR eErrorConnection, eErrorKernel; + CONNECTION_DATA *psConnectionData = pvConnectionData; + + gCurrentPurgeConnectionPid = psConnectionData->pid; + + eErrorConnection = ConnectionDataDestroy(psConnectionData); + if (eErrorConnection != PVRSRV_OK) + { + if (eErrorConnection == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to purge connection data %p " + "(deferring destruction)", + __func__, + psConnectionData)); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Connection data %p deferred destruction finished", + __func__, + psConnectionData)); + } + + /* Check if possible resize the global handle base */ + eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE); + PVR_LOG_IF_ERROR(eErrorKernel, "PVRSRVPurgeHandles"); + + gCurrentPurgeConnectionPid = 0; + + return eErrorConnection; +} + +void PVRSRVCommonConnectionDisconnect(void *pvDataPtr) +{ + CONNECTION_DATA *psConnectionData = pvDataPtr; + PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnectionData); + + OSLockAcquire(psDevNode->hConnectionsLock); + dllist_remove_node(&psConnectionData->sConnectionListNode); + OSLockRelease(psDevNode->hConnectionsLock); + + /* Notify the PDump core if the pdump control client is disconnecting */ + if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL) + { + PDumpDisconnectionNotify(); + } + +#if defined(DEBUG) || defined(PDUMP) + PVR_LOG(("%s disconnected", psConnectionData->pszProcName)); +#endif + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) +#endif + { + /* Defer the release of the connection data */ + psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData; + psConnectionData->sCleanupThreadFn.pvData = psConnectionData; + psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE; + CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn, + CLEANUP_THREAD_RETRY_COUNT_DEFAULT); + PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn); + } +} + +IMG_PID PVRSRVGetPurgeConnectionPid(void) +{ + return gCurrentPurgeConnectionPid; +} + +/* Prefix for debug messages about Active Connections */ +#define DEBUG_DUMP_CONNECTION_FORMAT_STR " P%d-V%d-T%d-%s," +#define CONNECTIONS_PREFIX "Connections:" +#define MAX_CONNECTIONS_PREFIX sizeof(CONNECTIONS_PREFIX) +#define MAX_DEBUG_DUMP_CONNECTION_STR_LEN (1+10+10+10+7+PVRSRV_CONNECTION_PROCESS_NAME_LEN) +#define MAX_DEBUG_DUMP_STRING_LEN (1+MAX_CONNECTIONS_PREFIX+(3*MAX_DEBUG_DUMP_CONNECTION_STR_LEN)) + +void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PDLLIST_NODE pNext, pNode; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + IMG_BOOL bFoundConnections = IMG_FALSE; + + /* + * Connections are linked to each device-node within + * PVRSRV_DATA->psDeviceNodeList. Traverse this and display each connection + * within device node. + */ + for (psDevNode = psPVRSRVData->psDeviceNodeList; + psDevNode != NULL; + psDevNode = psDevNode->psNext) + { + /* We must check for an initialised device before accessing its mutex. + * The mutex is initialised as part of DeviceInitialize() which occurs + * on first access to the device node. + */ + if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + continue; + } + + bFoundConnections = IMG_TRUE; + + OSLockAcquire(psDevNode->hConnectionsLock); + if (dllist_is_empty(&psDevNode->sConnections)) + { + PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections"); + } + else + { + IMG_CHAR sActiveConnections[MAX_DEBUG_DUMP_STRING_LEN]; + IMG_UINT16 i, uiPos = 0; + IMG_BOOL bPrinted = IMG_FALSE; + size_t uiSize = sizeof(sActiveConnections); + + OSStringLCopy(sActiveConnections, CONNECTIONS_PREFIX, uiSize); + uiPos = sizeof(CONNECTIONS_PREFIX) - 1; /* Next buffer location to fill */ + uiSize -= uiPos; /* Remaining space in sActiveConnections[] */ + + dllist_foreach_node(&psDevNode->sConnections, pNode, pNext) + { + CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode); + + IMG_CHAR sTmpBuff[MAX_DEBUG_DUMP_CONNECTION_STR_LEN]; + i = OSSNPrintf(sTmpBuff, MAX_DEBUG_DUMP_CONNECTION_STR_LEN, + DEBUG_DUMP_CONNECTION_FORMAT_STR, sData->pid, sData->vpid, sData->tid, sData->pszProcName); + i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i); + bPrinted = IMG_FALSE; + + OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize); + + /* Move the write offset to the end of the current string */ + uiPos += i; + /* Update the amount of remaining space available to copy into */ + uiSize -= i; + + /* If there is not enough space to add another connection to this line, output the line */ + if (uiSize <= MAX_DEBUG_DUMP_CONNECTION_STR_LEN) + { + PVR_DUMPDEBUG_LOG("%s", sActiveConnections); + + /* + * Remove the "Connections:" prefix from the buffer. + * Leave the subsequent buffer contents indented by the same + * amount to aid in interpreting the debug output. + */ + uiPos = sizeof(CONNECTIONS_PREFIX) - 1; + /* Reset the amount of space available to copy into */ + uiSize = MAX_DEBUG_DUMP_STRING_LEN - uiPos; + bPrinted = IMG_TRUE; + } + } + + /* Only print the current line if it hasn't already been printed */ + if (!bPrinted) + { + // Strip of the final comma + sActiveConnections[OSStringNLength(sActiveConnections, MAX_DEBUG_DUMP_STRING_LEN) - 1] = '\0'; + PVR_DUMPDEBUG_LOG("%s", sActiveConnections); + } +#undef MAX_DEBUG_DUMP_STRING_LEN +#undef MAX_DEBUG_DUMP_CONNECTIONS_PER_LINE + } + OSLockRelease(psDevNode->hConnectionsLock); + } + + /* Check to see if we have displayed anything from the loop above */ + if (bFoundConnections == IMG_FALSE) + { + PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections"); + } +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/devicemem_heapcfg.c b/drivers/mcst/gpu-imgtec/services/server/common/devicemem_heapcfg.c new file mode 100644 index 000000000000..20fb7c6ce76d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/devicemem_heapcfg.c @@ -0,0 +1,137 @@ +/*************************************************************************/ /*! +@File devicemem_heapcfg.c +@Title Temporary Device Memory 2 stuff +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +/* our exported API */ +#include "devicemem_heapcfg.h" + +#include "device.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "osfunc.h" + +#include "connection_server.h" + +PVRSRV_ERROR +HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *puiNumHeapConfigsOut) +{ + + PVR_UNREFERENCED_PARAMETER(psConnection); + + *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +HeapCfgHeapCount(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut) +{ + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; + } + + *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +HeapCfgHeapConfigName(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapConfigNameBufSz, + IMG_CHAR *pszHeapConfigNameOut) +{ + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; + } + + OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +HeapCfgHeapDetails(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_UINT32 uiHeapNameBufSz, + IMG_CHAR *pszHeapNameOut, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSizeOut, + IMG_UINT32 *puiLog2ImportAlignmentOut) +{ + DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint; + + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; + } + + if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; + } + + psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex]; + + OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName); + *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr; + *puiHeapLengthOut = psHeapBlueprint->uiHeapLength; + *puiReservedRegionLengthOut = psHeapBlueprint->uiReservedRegionLength; + *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize; + *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment; + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/devicemem_history_server.c b/drivers/mcst/gpu-imgtec/services/server/common/devicemem_history_server.c new file mode 100644 index 000000000000..e51c90969e9f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/devicemem_history_server.c @@ -0,0 +1,1897 @@ +/*************************************************************************/ /*! +@File +@Title Devicemem history functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Devicemem history functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "allocmem.h" +#include "img_defs.h" +#include "pmr.h" +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "pvr_debug.h" +#include "devicemem_server.h" +#include "lock.h" +#include "devicemem_history_server.h" +#include "pdump_km.h" +#include "di_server.h" + +#define ALLOCATION_LIST_NUM_ENTRIES 10000 + +/* data type to hold an allocation index. + * we make it 16 bits wide if possible + */ +#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF +typedef uint16_t ALLOC_INDEX_T; +#else +typedef uint32_t ALLOC_INDEX_T; +#endif + +/* a record describing a single allocation known to DeviceMemHistory. + * this is an element in a doubly linked list of allocations + */ +typedef struct _RECORD_ALLOCATION_ +{ + /* time when this RECORD_ALLOCATION was created/initialised */ + IMG_UINT64 ui64CreationTime; + /* serial number of the PMR relating to this allocation */ + IMG_UINT64 ui64Serial; + /* base DevVAddr of this allocation */ + IMG_DEV_VIRTADDR sDevVAddr; + /* size in bytes of this allocation */ + IMG_DEVMEM_SIZE_T uiSize; + /* Log2 page size of this allocation's GPU pages */ + IMG_UINT32 ui32Log2PageSize; + /* Process ID (PID) this allocation belongs to */ + IMG_PID uiPID; + /* index of previous allocation in the list */ + ALLOC_INDEX_T ui32Prev; + /* index of next allocation in the list */ + ALLOC_INDEX_T ui32Next; + /* annotation/name of this allocation */ + IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN]; +} RECORD_ALLOCATION; + +/* each command in the circular buffer is prefixed with an 8-bit value + * denoting the command type + */ +typedef enum _COMMAND_TYPE_ +{ + COMMAND_TYPE_NONE, + COMMAND_TYPE_TIMESTAMP, + COMMAND_TYPE_MAP_ALL, + COMMAND_TYPE_UNMAP_ALL, + COMMAND_TYPE_MAP_RANGE, + COMMAND_TYPE_UNMAP_RANGE, + /* sentinel value */ + COMMAND_TYPE_COUNT, +} COMMAND_TYPE; + +/* Timestamp command: + * This command is inserted into the circular buffer to provide an updated + * timestamp. + * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order + * for the whole command to fit into 8 bytes. + */ +typedef struct _COMMAND_TIMESTAMP_ +{ + IMG_UINT8 aui8TimeNs[7]; +} COMMAND_TIMESTAMP; + +/* MAP_ALL command: + * This command denotes the allocation at the given index was wholly mapped + * in to the GPU MMU + */ +typedef struct _COMMAND_MAP_ALL_ +{ + ALLOC_INDEX_T uiAllocIndex; +} COMMAND_MAP_ALL; + +/* UNMAP_ALL command: + * This command denotes the allocation at the given index was wholly unmapped + * from the GPU MMU + * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout. + */ +typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL; + +/* packing attributes for the MAP_RANGE command */ +#define MAP_RANGE_MAX_START ((1 << 18) - 1) +#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1) + +/* MAP_RANGE command: + * Denotes a range of pages within the given allocation being mapped. + * The range is expressed as [Page Index] + [Page Count] + * This information is packed into a 40-bit integer, in order to make + * the command size 8 bytes. + */ + +typedef struct _COMMAND_MAP_RANGE_ +{ + IMG_UINT8 aui8Data[5]; + ALLOC_INDEX_T uiAllocIndex; +} COMMAND_MAP_RANGE; + +/* UNMAP_RANGE command: + * Denotes a range of pages within the given allocation being mapped. + * The range is expressed as [Page Index] + [Page Count] + * This information is packed into a 40-bit integer, in order to make + * the command size 8 bytes. + * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout. + */ +typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE; + +/* wrapper structure for a command */ +typedef struct _COMMAND_WRAPPER_ +{ + IMG_UINT8 ui8Type; + union { + COMMAND_TIMESTAMP sTimeStamp; + COMMAND_MAP_ALL sMapAll; + COMMAND_UNMAP_ALL sUnmapAll; + COMMAND_MAP_RANGE sMapRange; + COMMAND_UNMAP_RANGE sUnmapRange; + } u; +} COMMAND_WRAPPER; + +/* target size for the circular buffer of commands */ +#define CIRCULAR_BUFFER_SIZE_KB 2048 +/* turn the circular buffer target size into a number of commands */ +#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER)) + +/* index value denoting the end of a list */ +#define END_OF_LIST 0xFFFFFFFF +#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx])) +#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES) + +/* wrapper structure for the allocation records and the commands circular buffer */ +typedef struct _RECORDS_ +{ + RECORD_ALLOCATION *pasAllocations; + IMG_UINT32 ui32AllocationsListHead; + + IMG_UINT32 ui32Head; + IMG_UINT32 ui32Tail; + COMMAND_WRAPPER *pasCircularBuffer; +} RECORDS; + +typedef struct _DEVICEMEM_HISTORY_DATA_ +{ + /* DI entry */ + DI_ENTRY *psDIEntry; + + RECORDS sRecords; + POS_LOCK hLock; +} DEVICEMEM_HISTORY_DATA; + +static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData; + +static void DevicememHistoryLock(void) +{ + OSLockAcquire(gsDevicememHistoryData.hLock); +} + +static void DevicememHistoryUnlock(void) +{ + OSLockRelease(gsDevicememHistoryData.hLock); +} + +/* given a time stamp, calculate the age in nanoseconds */ +static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now, + IMG_UINT64 ui64Then, + IMG_UINT64 ui64Max) +{ + if (ui64Now >= ui64Then) + { + /* no clock wrap */ + return ui64Now - ui64Then; + } + else + { + /* clock has wrapped */ + return (ui64Max - ui64Then) + ui64Now + 1; + } +} + +/* AcquireCBSlot: + * Acquire the next slot in the circular buffer and + * move the circular buffer head along by one + * Returns a pointer to the acquired slot. + */ +static COMMAND_WRAPPER *AcquireCBSlot(void) +{ + COMMAND_WRAPPER *psSlot; + + psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head]; + + gsDevicememHistoryData.sRecords.ui32Head = + (gsDevicememHistoryData.sRecords.ui32Head + 1) + % CIRCULAR_BUFFER_NUM_COMMANDS; + + return psSlot; +} + +/* TimeStampPack: + * Packs the given timestamp value into the COMMAND_TIMESTAMP structure. + * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit + * integer in the COMMAND_TIMESTAMP command. + */ +static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now) +{ + IMG_UINT32 i; + + for (i = 0; i < ARRAY_SIZE(psTimeStamp->aui8TimeNs); i++) + { + psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF; + ui64Now >>= 8; + } +} + +/* packing a 64-bit nanosecond into a 7-byte integer loses the + * top 8 bits of data. This must be taken into account when + * comparing a full timestamp against an unpacked timestamp + */ +#define TIME_STAMP_MASK ((1LLU << 56) - 1) +#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK) + +/* TimeStampUnpack: + * Unpack the timestamp value from the given COMMAND_TIMESTAMP command + */ +static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp) +{ + IMG_UINT64 ui64TimeNs = 0; + IMG_UINT32 i; + + for (i = ARRAY_SIZE(psTimeStamp->aui8TimeNs); i > 0; i--) + { + ui64TimeNs <<= 8; + ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1]; + } + + return ui64TimeNs; +} + +#if defined(PDUMP) + +static void EmitPDumpAllocation(IMG_UINT32 ui32AllocationIndex, + RECORD_ALLOCATION *psAlloc) +{ + PDUMPCOMMENT("[SrvPFD] Allocation: %u" + " Addr: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Page size: %u" + " PID: %u" + " Process: %s" + " Name: %s", + ui32AllocationIndex, + psAlloc->sDevVAddr.uiAddr, + psAlloc->uiSize, + 1U << psAlloc->ui32Log2PageSize, + psAlloc->uiPID, + OSGetCurrentClientProcessNameKM(), + psAlloc->szName); +} + +static void EmitPDumpMapUnmapAll(COMMAND_TYPE eType, + IMG_UINT32 ui32AllocationIndex) +{ + const IMG_CHAR *pszOpName; + + switch (eType) + { + case COMMAND_TYPE_MAP_ALL: + pszOpName = "MAP_ALL"; + break; + case COMMAND_TYPE_UNMAP_ALL: + pszOpName = "UNMAP_ALL"; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u", + eType)); + return; + + } + + PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u", + pszOpName, + ui32AllocationIndex); +} + +static void EmitPDumpMapUnmapRange(COMMAND_TYPE eType, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + const IMG_CHAR *pszOpName; + + switch (eType) + { + case COMMAND_TYPE_MAP_RANGE: + pszOpName = "MAP_RANGE"; + break; + case COMMAND_TYPE_UNMAP_RANGE: + pszOpName = "UNMAP_RANGE"; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u", + eType)); + return; + } + + PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u", + pszOpName, + ui32AllocationIndex, + ui32StartPage, + ui32Count); +} + +#endif + +/* InsertTimeStampCommand: + * Insert a timestamp command into the circular buffer. + */ +static void InsertTimeStampCommand(IMG_UINT64 ui64Now) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP; + + TimeStampPack(&psCommand->u.sTimeStamp, ui64Now); +} + +/* InsertMapAllCommand: + * Insert a "MAP_ALL" command for the given allocation into the circular buffer + */ +static void InsertMapAllCommand(IMG_UINT32 ui32AllocIndex) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_MAP_ALL; + psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex; + +#if defined(PDUMP) + EmitPDumpMapUnmapAll(COMMAND_TYPE_MAP_ALL, ui32AllocIndex); +#endif +} + +/* InsertUnmapAllCommand: + * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer + */ +static void InsertUnmapAllCommand(IMG_UINT32 ui32AllocIndex) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL; + psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex; + +#if defined(PDUMP) + EmitPDumpMapUnmapAll(COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex); +#endif +} + +/* MapRangePack: + * Pack the given StartPage and Count values into the 40-bit representation + * in the MAP_RANGE command. + */ +static void MapRangePack(COMMAND_MAP_RANGE *psMapRange, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + IMG_UINT64 ui64Data; + IMG_UINT32 i; + + /* we must encode the data into 40 bits: + * 18 bits for the start page index + * 12 bits for the range + */ + PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START); + PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE); + + ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count; + + for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++) + { + psMapRange->aui8Data[i] = ui64Data & 0xFF; + ui64Data >>= 8; + } +} + +/* MapRangePack: + * Unpack the StartPage and Count values from the 40-bit representation + * in the MAP_RANGE command. + */ +static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange, + IMG_UINT32 *pui32StartPage, + IMG_UINT32 *pui32Count) +{ + IMG_UINT64 ui64Data = 0; + IMG_UINT32 i; + + for (i = ARRAY_SIZE(psMapRange->aui8Data); i > 0; i--) + { + ui64Data <<= 8; + ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1]; + } + + *pui32StartPage = (ui64Data >> 12); + *pui32Count = ui64Data & ((1 << 12) - 1); +} + +/* InsertMapRangeCommand: + * Insert a MAP_RANGE command into the circular buffer with the given + * StartPage and Count values. + */ +static void InsertMapRangeCommand(IMG_UINT32 ui32AllocIndex, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE; + psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; + + MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); + +#if defined(PDUMP) + EmitPDumpMapUnmapRange(COMMAND_TYPE_MAP_RANGE, + ui32AllocIndex, + ui32StartPage, + ui32Count); +#endif +} + +/* InsertUnmapRangeCommand: + * Insert a UNMAP_RANGE command into the circular buffer with the given + * StartPage and Count values. + */ +static void InsertUnmapRangeCommand(IMG_UINT32 ui32AllocIndex, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE; + psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; + + MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); + +#if defined(PDUMP) + EmitPDumpMapUnmapRange(COMMAND_TYPE_UNMAP_RANGE, + ui32AllocIndex, + ui32StartPage, + ui32Count); +#endif +} + +/* InsertAllocationToList: + * Helper function for the allocation list. + * Inserts the given allocation at the head of the list, whose current head is + * pointed to by pui32ListHead + */ +static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) +{ + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + if (*pui32ListHead == END_OF_LIST) + { + /* list is currently empty, so just replace it */ + *pui32ListHead = ui32Alloc; + psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead; + } + else + { + RECORD_ALLOCATION *psHeadAlloc; + RECORD_ALLOCATION *psTailAlloc; + + psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead); + psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev); + + /* make the new alloc point forwards to the previous head */ + psAlloc->ui32Next = *pui32ListHead; + /* make the new alloc point backwards to the previous tail */ + psAlloc->ui32Prev = psHeadAlloc->ui32Prev; + + /* the head is now our new alloc */ + *pui32ListHead = ui32Alloc; + + /* the old head now points back to the new head */ + psHeadAlloc->ui32Prev = *pui32ListHead; + + /* the tail now points forward to the new head */ + psTailAlloc->ui32Next = ui32Alloc; + } +} + +static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc) +{ + InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc); +} + +/* RemoveAllocationFromList: + * Helper function for the allocation list. + * Removes the given allocation from the list, whose head is + * pointed to by pui32ListHead + */ +static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) +{ + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + /* if this is the only element in the list then just make the list empty */ + if ((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc)) + { + *pui32ListHead = END_OF_LIST; + } + else + { + RECORD_ALLOCATION *psPrev, *psNext; + + psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev); + psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next); + + /* remove the allocation from the list */ + psPrev->ui32Next = psAlloc->ui32Next; + psNext->ui32Prev = psAlloc->ui32Prev; + + /* if this allocation is the head then update the head */ + if (*pui32ListHead == ui32Alloc) + { + *pui32ListHead = psAlloc->ui32Prev; + } + } +} + +static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc) +{ + RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc); +} + +/* TouchBusyAllocation: + * Move the given allocation to the head of the list + */ +static void TouchBusyAllocation(IMG_UINT32 ui32Alloc) +{ + RemoveAllocationFromBusyList(ui32Alloc); + InsertAllocationToBusyList(ui32Alloc); +} + +/* GetOldestBusyAllocation: + * Returns the index of the oldest allocation in the MRU list + */ +static IMG_UINT32 GetOldestBusyAllocation(void) +{ + IMG_UINT32 ui32Alloc; + RECORD_ALLOCATION *psAlloc; + + ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead; + + if (ui32Alloc == END_OF_LIST) + { + return END_OF_LIST; + } + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + return psAlloc->ui32Prev; +} + +static IMG_UINT32 GetFreeAllocation(void) +{ + IMG_UINT32 ui32Alloc; + + ui32Alloc = GetOldestBusyAllocation(); + + return ui32Alloc; +} + + +/* InitialiseAllocation: + * Initialise the given allocation structure with the given properties + */ +static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc, + const IMG_CHAR *pszName, + IMG_UINT64 ui64Serial, + IMG_PID uiPID, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32Log2PageSize) +{ + OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName)); + psAlloc->ui64Serial = ui64Serial; + psAlloc->uiPID = uiPID; + psAlloc->sDevVAddr = sDevVAddr; + psAlloc->uiSize = uiSize; + psAlloc->ui32Log2PageSize = ui32Log2PageSize; + psAlloc->ui64CreationTime = OSClockns64(); +} + +/* CreateAllocation: + * Creates a new allocation with the given properties then outputs the + * index of the allocation + */ +static PVRSRV_ERROR CreateAllocation(const IMG_CHAR *pszName, + IMG_UINT64 ui64Serial, + IMG_PID uiPID, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32Log2PageSize, + IMG_BOOL bAutoPurge, + IMG_UINT32 *puiAllocationIndex) +{ + IMG_UINT32 ui32Alloc; + RECORD_ALLOCATION *psAlloc; + + ui32Alloc = GetFreeAllocation(); + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc), + pszName, + ui64Serial, + uiPID, + sDevVAddr, + uiSize, + ui32Log2PageSize); + + /* put the newly initialised allocation at the front of the MRU list */ + TouchBusyAllocation(ui32Alloc); + + *puiAllocationIndex = ui32Alloc; + +#if defined(PDUMP) + EmitPDumpAllocation(ui32Alloc, psAlloc); +#endif + + return PVRSRV_OK; +} + +/* MatchAllocation: + * Tests if the allocation at the given index matches the supplied properties. + * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE. + */ +static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex, + IMG_UINT64 ui64Serial, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszName, + IMG_UINT32 ui32Log2PageSize, + IMG_PID uiPID) +{ + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex); + + return (psAlloc->ui64Serial == ui64Serial) && + (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) && + (psAlloc->uiSize == uiSize) && + (psAlloc->ui32Log2PageSize == ui32Log2PageSize) && + (OSStringNCompare(psAlloc->szName, pszName, DEVMEM_ANNOTATION_MAX_LEN) == 0); +} + +/* FindOrCreateAllocation: + * Convenience function. + * Given a set of allocation properties (serial, DevVAddr, size, name, etc), + * this function will look for an existing record of this allocation and + * create the allocation if there is no existing record + */ +static PVRSRV_ERROR FindOrCreateAllocation(IMG_UINT32 ui32AllocationIndexHint, + IMG_UINT64 ui64Serial, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char *pszName, + IMG_UINT32 ui32Log2PageSize, + IMG_PID uiPID, + IMG_BOOL bSparse, + IMG_UINT32 *pui32AllocationIndexOut, + IMG_BOOL *pbCreated) +{ + IMG_UINT32 ui32AllocationIndex; + PVRSRV_ERROR eError; + + if (ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) + { + IMG_BOOL bHaveAllocation; + + /* first, try to match against the index given by the client. + * if the caller provided a hint but the allocation record is no longer + * there, it must have been purged, so go ahead and create a new allocation + */ + bHaveAllocation = MatchAllocation(ui32AllocationIndexHint, + ui64Serial, + sDevVAddr, + uiSize, + pszName, + ui32Log2PageSize, + uiPID); + if (bHaveAllocation) + { + *pbCreated = IMG_FALSE; + *pui32AllocationIndexOut = ui32AllocationIndexHint; + return PVRSRV_OK; + } + } + + /* if there is no record of the allocation then we + * create it now + */ + eError = CreateAllocation(pszName, + ui64Serial, + uiPID, + sDevVAddr, + uiSize, + ui32Log2PageSize, + IMG_TRUE, + &ui32AllocationIndex); + + if (eError == PVRSRV_OK) + { + *pui32AllocationIndexOut = ui32AllocationIndex; + *pbCreated = IMG_TRUE; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create record for allocation %s", + __func__, + pszName)); + } + + return eError; +} + +/* GenerateMapUnmapCommandsForSparsePMR: + * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's + * current mapping table + * + * PMR: The PMR whose mapping table to read. + * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. + * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping + * + * This function goes through every page in the PMR's mapping table and looks for + * virtually contiguous ranges to record as being mapped or unmapped. + */ +static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR, + IMG_UINT32 ui32AllocIndex, + IMG_BOOL bMap) +{ + PMR_MAPPING_TABLE *psMappingTable; + IMG_UINT32 ui32DonePages = 0; + IMG_UINT32 ui32NumPages; + IMG_UINT32 i; + IMG_BOOL bInARun = IMG_FALSE; + IMG_UINT32 ui32CurrentStart = 0; + IMG_UINT32 ui32RunCount = 0; + + psMappingTable = PMR_GetMappigTable(psPMR); + ui32NumPages = psMappingTable->ui32NumPhysChunks; + + if (ui32NumPages == 0) + { + /* nothing to do */ + return; + } + + for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++) + { + if (psMappingTable->aui32Translation[i] != TRANSLATION_INVALID) + { + if (!bInARun) + { + bInARun = IMG_TRUE; + ui32CurrentStart = i; + ui32RunCount = 1; + } + else + { + ui32RunCount++; + } + } + + if (bInARun) + { + /* test if we need to end this current run and generate the command, + * either because the next page is not virtually contiguous + * to the current page, we have reached the maximum range, + * or this is the last page in the mapping table + */ + if ((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) || + (ui32RunCount == MAP_RANGE_MAX_RANGE) || + (i == (psMappingTable->ui32NumVirtChunks - 1))) + { + if (bMap) + { + InsertMapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + else + { + InsertUnmapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + + ui32DonePages += ui32RunCount; + + if (ui32DonePages == ui32NumPages) + { + break; + } + + bInARun = IMG_FALSE; + } + } + } + +} + +/* GenerateMapUnmapCommandsForChangeList: + * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the + * list of page change (page map or page unmap) indices given. + * + * ui32NumPages: Number of pages which have changed. + * pui32PageList: List of indices of the pages which have changed. + * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. + * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping + * + * This function goes through every page in the list and looks for + * virtually contiguous ranges to record as being mapped or unmapped. + */ +static void GenerateMapUnmapCommandsForChangeList(IMG_UINT32 ui32NumPages, + IMG_UINT32 *pui32PageList, + IMG_UINT32 ui32AllocIndex, + IMG_BOOL bMap) +{ + IMG_UINT32 i; + IMG_BOOL bInARun = IMG_FALSE; + IMG_UINT32 ui32CurrentStart = 0; + IMG_UINT32 ui32RunCount = 0; + + for (i = 0; i < ui32NumPages; i++) + { + if (!bInARun) + { + bInARun = IMG_TRUE; + ui32CurrentStart = pui32PageList[i]; + } + + ui32RunCount++; + + /* we flush if: + * - the next page in the list is not one greater than the current page + * - this is the last page in the list + * - we have reached the maximum range size + */ + if ((i == (ui32NumPages - 1)) || + ((pui32PageList[i] + 1) != pui32PageList[i + 1]) || + (ui32RunCount == MAP_RANGE_MAX_RANGE)) + { + if (bMap) + { + InsertMapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + else + { + InsertUnmapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + + bInARun = IMG_FALSE; + ui32RunCount = 0; + } + } +} + +/* DevicememHistoryMapKM: + * Entry point for when an allocation is mapped into the MMU GPU + * + * psPMR: The PMR to which the allocation belongs. + * ui32Offset: The offset within the PMR at which the allocation begins. + * sDevVAddr: The DevVAddr at which the allocation begins. + * szName: Annotation/name for the allocation. + * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. + * ui32AllocationIndex: Allocation index as provided by the client. + * We will use this as a short-cut to find the allocation + * in our records. + * pui32AllocationIndexOut: An updated allocation index for the client. + * This may be a new value if we just created the + * allocation record. + */ +PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_BOOL bSparse = PMR_IsSparse(psPMR); + IMG_UINT64 ui64Serial; + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PMRGetUID(psPMR, &ui64Serial); + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + ui64Serial, + sDevVAddr, + uiSize, + szName, + ui32Log2PageSize, + uiPID, + bSparse, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + if (!bSparse) + { + InsertMapAllCommand(ui32AllocationIndex); + } + else + { + GenerateMapUnmapCommandsForSparsePMR(psPMR, + ui32AllocationIndex, + IMG_TRUE); + } + + InsertTimeStampCommand(OSClockns64()); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; +} + +static void VRangeInsertMapUnmapCommands(IMG_BOOL bMap, + IMG_UINT32 ui32AllocationIndex, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + const IMG_CHAR *pszName) +{ + while (ui32NumPages > 0) + { + IMG_UINT32 ui32PagesToAdd; + + ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE); + + if (ui32StartPage > MAP_RANGE_MAX_START) + { + PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page " + "%u on allocation %s", + bMap ? "map" : "unmap", + ui32StartPage, + pszName)); + return; + } + + if (bMap) + { + InsertMapRangeCommand(ui32AllocationIndex, + ui32StartPage, + ui32PagesToAdd); + } + else + { + InsertUnmapRangeCommand(ui32AllocationIndex, + ui32StartPage, + ui32PagesToAdd); + } + + ui32StartPage += ui32PagesToAdd; + ui32NumPages -= ui32PagesToAdd; + } +} + +PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + 0, + sBaseDevVAddr, + uiAllocSize, + szName, + ui32Log2PageSize, + uiPID, + IMG_FALSE, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + VRangeInsertMapUnmapCommands(IMG_TRUE, + ui32AllocationIndex, + sBaseDevVAddr, + ui32StartPage, + ui32NumPages, + szName); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; + +} + +PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + 0, + sBaseDevVAddr, + uiAllocSize, + szName, + ui32Log2PageSize, + uiPID, + IMG_FALSE, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + VRangeInsertMapUnmapCommands(IMG_FALSE, + ui32AllocationIndex, + sBaseDevVAddr, + ui32StartPage, + ui32NumPages, + szName); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; +} + + + +/* DevicememHistoryUnmapKM: + * Entry point for when an allocation is unmapped from the MMU GPU + * + * psPMR: The PMR to which the allocation belongs. + * ui32Offset: The offset within the PMR at which the allocation begins. + * sDevVAddr: The DevVAddr at which the allocation begins. + * szName: Annotation/name for the allocation. + * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. + * ui32AllocationIndex: Allocation index as provided by the client. + * We will use this as a short-cut to find the allocation + * in our records. + * pui32AllocationIndexOut: An updated allocation index for the client. + * This may be a new value if we just created the + * allocation record. + */ +PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_BOOL bSparse = PMR_IsSparse(psPMR); + IMG_UINT64 ui64Serial; + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PMRGetUID(psPMR, &ui64Serial); + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + ui64Serial, + sDevVAddr, + uiSize, + szName, + ui32Log2PageSize, + uiPID, + bSparse, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + if (!bSparse) + { + InsertUnmapAllCommand(ui32AllocationIndex); + } + else + { + GenerateMapUnmapCommandsForSparsePMR(psPMR, + ui32AllocationIndex, + IMG_FALSE); + } + + InsertTimeStampCommand(OSClockns64()); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; +} + +/* DevicememHistorySparseChangeKM: + * Entry point for when a sparse allocation is changed, such that some of the + * pages within the sparse allocation are mapped or unmapped. + * + * psPMR: The PMR to which the allocation belongs. + * ui32Offset: The offset within the PMR at which the allocation begins. + * sDevVAddr: The DevVAddr at which the allocation begins. + * szName: Annotation/name for the allocation. + * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. + * ui32AllocPageCount: Number of pages which have been mapped. + * paui32AllocPageIndices: Indices of pages which have been mapped. + * ui32FreePageCount: Number of pages which have been unmapped. + * paui32FreePageIndices: Indices of pages which have been unmapped. + * ui32AllocationIndex: Allocation index as provided by the client. + * We will use this as a short-cut to find the allocation + * in our records. + * pui32AllocationIndexOut: An updated allocation index for the client. + * This may be a new value if we just created the + * allocation record. + */ +PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *paui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *paui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_UINT64 ui64Serial; + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PMRGetUID(psPMR, &ui64Serial); + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + ui64Serial, + sDevVAddr, + uiSize, + szName, + ui32Log2PageSize, + uiPID, + IMG_TRUE /* bSparse */, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + GenerateMapUnmapCommandsForChangeList(ui32AllocPageCount, + paui32AllocPageIndices, + ui32AllocationIndex, + IMG_TRUE); + + GenerateMapUnmapCommandsForChangeList(ui32FreePageCount, + paui32FreePageIndices, + ui32AllocationIndex, + IMG_FALSE); + + InsertTimeStampCommand(OSClockns64()); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; + +} + +/* CircularBufferIterateStart: + * Initialise local state for iterating over the circular buffer + */ +static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter) +{ + *pui32Head = gsDevicememHistoryData.sRecords.ui32Head; + + if (*pui32Head != 0) + { + *pui32Iter = *pui32Head - 1; + } + else + { + *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; + } +} + +/* CircularBufferIteratePrevious: + * Iterate to the previous item in the circular buffer. + * This is called repeatedly to iterate over the whole circular buffer. + */ +static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head, + IMG_UINT32 *pui32Iter, + COMMAND_TYPE *peType, + IMG_BOOL *pbLast) +{ + IMG_UINT8 *pui8Header; + COMMAND_WRAPPER *psOut = NULL; + + psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter; + + pui8Header = (void *) psOut; + + /* sanity check the command looks valid. + * this condition should never happen, but check for it anyway + * and try to handle it + */ + if (*pui8Header >= COMMAND_TYPE_COUNT) + { + /* invalid header detected. Circular buffer corrupted? */ + PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: " + "Invalid header: %u", + *pui8Header)); + *pbLast = IMG_TRUE; + return NULL; + } + + *peType = *pui8Header; + + if (*pui32Iter != 0) + { + (*pui32Iter)--; + } + else + { + *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; + } + + + /* inform the caller this is the last command if either we have reached + * the head (where we started) or if we have reached an empty command, + * which means we have covered all populated entries + */ + if ((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE)) + { + /* this is the final iteration */ + *pbLast = IMG_TRUE; + } + + return psOut; +} + +/* MapUnmapCommandGetInfo: + * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL, + * MAP_RANGE or UNMAP_RANGE command + */ +static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand, + COMMAND_TYPE eType, + IMG_DEV_VIRTADDR *psDevVAddrStart, + IMG_DEV_VIRTADDR *psDevVAddrEnd, + IMG_BOOL *pbMap, + IMG_UINT32 *pui32AllocIndex) +{ + if ((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL))) + { + COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll; + RECORD_ALLOCATION *psAlloc; + + *pbMap = (eType == COMMAND_TYPE_MAP_ALL); + *pui32AllocIndex = psMapAll->uiAllocIndex; + + psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex); + + *psDevVAddrStart = psAlloc->sDevVAddr; + psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1; + } + else if ((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE))) + { + COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange; + RECORD_ALLOCATION *psAlloc; + IMG_UINT32 ui32StartPage, ui32Count; + + *pbMap = (eType == COMMAND_TYPE_MAP_RANGE); + *pui32AllocIndex = psMapRange->uiAllocIndex; + + psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex); + + MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count); + + psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr + + ((1ULL << psAlloc->ui32Log2PageSize) * ui32StartPage); + + psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + + ((1ULL << psAlloc->ui32Log2PageSize) * ui32Count) - 1; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u", + __func__, + eType)); + } +} + +/* DevicememHistoryQuery: + * Entry point for rgxdebug to look up addresses relating to a page fault + */ +IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + IMG_UINT32 ui32PageSizeBytes, + IMG_BOOL bMatchAnyAllocInPage) +{ + IMG_UINT32 ui32Head, ui32Iter; + COMMAND_TYPE eType = COMMAND_TYPE_NONE; + COMMAND_WRAPPER *psCommand = NULL; + IMG_BOOL bLast = IMG_FALSE; + IMG_UINT64 ui64StartTime = OSClockns64(); + IMG_UINT64 ui64TimeNs = 0; + + /* initialise the results count for the caller */ + psQueryOut->ui32NumResults = 0; + + DevicememHistoryLock(); + + /* if the search is constrained to a particular PID then we + * first search the list of allocations to see if this + * PID is known to us + */ + if (psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) + { + IMG_UINT32 ui32Alloc; + ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead; + + while (ui32Alloc != END_OF_LIST) + { + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + if (psAlloc->uiPID == psQueryIn->uiPID) + { + goto found_pid; + } + + if (ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead) + { + /* gone through whole list */ + break; + } + } + + /* PID not found, so we do not have any suitable data for this + * page fault + */ + goto out_unlock; + } + +found_pid: + + CircularBufferIterateStart(&ui32Head, &ui32Iter); + + while (!bLast) + { + psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast); + + if (eType == COMMAND_TYPE_TIMESTAMP) + { + ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); + continue; + } + + if ((eType == COMMAND_TYPE_MAP_ALL) || + (eType == COMMAND_TYPE_UNMAP_ALL) || + (eType == COMMAND_TYPE_MAP_RANGE) || + (eType == COMMAND_TYPE_UNMAP_RANGE)) + { + RECORD_ALLOCATION *psAlloc; + IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig; + IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr; + IMG_BOOL bMap; + IMG_UINT32 ui32AllocIndex; + + MapUnmapCommandGetInfo(psCommand, + eType, + &sAllocStartAddrOrig, + &sAllocEndAddrOrig, + &bMap, + &ui32AllocIndex); + + sAllocStartAddr = sAllocStartAddrOrig; + sAllocEndAddr = sAllocEndAddrOrig; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex); + + /* skip this command if we need to search within + * a particular PID, and this allocation is not from + * that PID + */ + if ((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) && + (psAlloc->uiPID != psQueryIn->uiPID)) + { + continue; + } + + /* if the allocation was created after this event, then this + * event must be for an old/removed allocation, so skip it + */ + if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) + { + continue; + } + + /* if the caller wants us to match any allocation in the + * same page as the allocation then tweak the real start/end + * addresses of the allocation here + */ + if (bMatchAnyAllocInPage) + { + sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1); + sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1); + } + + if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) && + (psQueryIn->sDevVAddr.uiAddr < sAllocEndAddr.uiAddr)) + { + DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults]; + + OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString)); + psResult->sBaseDevVAddr = psAlloc->sDevVAddr; + psResult->uiSize = psAlloc->uiSize; + psResult->bMap = bMap; + psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK); + psResult->ui64When = ui64TimeNs; + /* write the responsible PID in the placeholder */ + psResult->sProcessInfo.uiPID = psAlloc->uiPID; + + if ((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL)) + { + psResult->bRange = IMG_FALSE; + psResult->bAll = IMG_TRUE; + } + else + { + psResult->bRange = IMG_TRUE; + MapRangeUnpack(&psCommand->u.sMapRange, + &psResult->ui32StartPage, + &psResult->ui32PageCount); + psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize)) + == psAlloc->uiSize; + psResult->sMapStartAddr = sAllocStartAddrOrig; + psResult->sMapEndAddr = sAllocEndAddrOrig; + } + + psQueryOut->ui32NumResults++; + + if (psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS) + { + break; + } + } + } + } + +out_unlock: + DevicememHistoryUnlock(); + + return psQueryOut->ui32NumResults > 0; +} + +static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN], + IMG_PID uiPID, + const IMG_CHAR *pszName, + const IMG_CHAR *pszAction, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT64 ui64TimeNs) +{ + + OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, + /* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/ + "%04u %-40s %-10s " + IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " " + "0x%08" IMG_UINT64_FMTSPECX " " + "%013" IMG_UINT64_FMTSPEC, /* 13 digits is over 2 hours of ns */ + uiPID, + pszName, + pszAction, + sDevVAddrStart.uiAddr, + sDevVAddrEnd.uiAddr, + sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr + 1, + ui64TimeNs); +} + +static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]) +{ + OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, + "%-4s %-40s %-6s %10s %10s %8s %13s", + "PID", + "NAME", + "ACTION", + "ADDR MIN", + "ADDR MAX", + "SIZE", + "ABS NS"); +} + +static const char *CommandTypeToString(COMMAND_TYPE eType) +{ + switch (eType) + { + case COMMAND_TYPE_MAP_ALL: + return "MapAll"; + case COMMAND_TYPE_UNMAP_ALL: + return "UnmapAll"; + case COMMAND_TYPE_MAP_RANGE: + return "MapRange"; + case COMMAND_TYPE_UNMAP_RANGE: + return "UnmapRange"; + case COMMAND_TYPE_TIMESTAMP: + return "TimeStamp"; + default: + return "???"; + } +} + +static void DevicememHistoryPrintAll(OSDI_IMPL_ENTRY *psEntry) +{ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + IMG_UINT32 ui32Iter; + IMG_UINT32 ui32Head; + IMG_BOOL bLast = IMG_FALSE; + IMG_UINT64 ui64TimeNs = 0; + IMG_UINT64 ui64StartTime = OSClockns64(); + + DeviceMemHistoryFmtHeader(szBuffer); + DIPrintf(psEntry, "%s\n", szBuffer); + + CircularBufferIterateStart(&ui32Head, &ui32Iter); + + while (!bLast) + { + COMMAND_WRAPPER *psCommand; + COMMAND_TYPE eType = COMMAND_TYPE_NONE; + + psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, + &bLast); + + if (eType == COMMAND_TYPE_TIMESTAMP) + { + ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); + continue; + } + + + if ((eType == COMMAND_TYPE_MAP_ALL) || + (eType == COMMAND_TYPE_UNMAP_ALL) || + (eType == COMMAND_TYPE_MAP_RANGE) || + (eType == COMMAND_TYPE_UNMAP_RANGE)) + { + RECORD_ALLOCATION *psAlloc; + IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd; + IMG_BOOL bMap; + IMG_UINT32 ui32AllocIndex; + + MapUnmapCommandGetInfo(psCommand, + eType, + &sDevVAddrStart, + &sDevVAddrEnd, + &bMap, + &ui32AllocIndex); + + psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex); + + if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) + { + /* if this event relates to an allocation we + * are no longer tracking then do not print it + */ + continue; + } + + DeviceMemHistoryFmt(szBuffer, + psAlloc->uiPID, + psAlloc->szName, + CommandTypeToString(eType), + sDevVAddrStart, + sDevVAddrEnd, + ui64TimeNs); + + DIPrintf(psEntry, "%s\n", szBuffer); + } + } + + DIPrintf(psEntry, "\nTimestamp reference: %013" IMG_UINT64_FMTSPEC "\n", + ui64StartTime); +} + +static int DevicememHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry, + void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(pvData); + + DevicememHistoryLock(); + DevicememHistoryPrintAll(psEntry); + DevicememHistoryUnlock(); + + return 0; +} + +static PVRSRV_ERROR CreateRecords(void) +{ + gsDevicememHistoryData.sRecords.pasAllocations = + OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES); + + PVR_RETURN_IF_NOMEM(gsDevicememHistoryData.sRecords.pasAllocations); + + /* Allocated and initialise the circular buffer with zeros so every + * command is initialised as a command of type COMMAND_TYPE_NONE. */ + gsDevicememHistoryData.sRecords.pasCircularBuffer = + OSAllocZMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS); + + if (gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL) + { + OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return PVRSRV_OK; +} + +static void DestroyRecords(void) +{ + OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer); + OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations); +} + +static void InitialiseRecords(void) +{ + IMG_UINT32 i; + + /* initialise the allocations list */ + + gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1; + gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1; + + for (i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++) + { + gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1; + gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1; + } + + gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0; + + gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0; +} + +PVRSRV_ERROR DevicememHistoryInitKM(void) +{ + PVRSRV_ERROR eError; + DI_ITERATOR_CB sIterator = {.pfnShow = DevicememHistoryPrintAllWrapper}; + + eError = OSLockCreate(&gsDevicememHistoryData.hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", err_lock); + + eError = CreateRecords(); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateRecords", err_allocations); + + InitialiseRecords(); + + eError = DICreateEntry("devicemem_history", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, + &gsDevicememHistoryData.psDIEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", err_di_creation); + + return PVRSRV_OK; + +err_di_creation: + DestroyRecords(); +err_allocations: + OSLockDestroy(gsDevicememHistoryData.hLock); +err_lock: + return eError; +} + +void DevicememHistoryDeInitKM(void) +{ + if (gsDevicememHistoryData.psDIEntry != NULL) + { + DIDestroyEntry(gsDevicememHistoryData.psDIEntry); + } + + DestroyRecords(); + + OSLockDestroy(gsDevicememHistoryData.hLock); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/devicemem_server.c b/drivers/mcst/gpu-imgtec/services/server/common/devicemem_server.c new file mode 100644 index 000000000000..a0b20825c7ce --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/devicemem_server.c @@ -0,0 +1,1784 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Server-side component of the Device Memory Management. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +/* our exported API */ +#include "devicemem_server.h" +#include "devicemem_utils.h" +#include "devicemem.h" + +#include "device.h" /* For device node */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +#include "mmu_common.h" +#include "pdump_km.h" +#include "pmr.h" +#include "physmem.h" +#include "pdumpdesc.h" + +#include "allocmem.h" +#include "osfunc.h" +#include "lock.h" + +#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0) + +struct _DEVMEMINT_CTX_ +{ + PVRSRV_DEVICE_NODE *psDevNode; + + /* MMU common code needs to have a context. There's a one-to-one + correspondence between device memory context and MMU context, + but we have the abstraction here so that we don't need to care + what the MMU does with its context, and the MMU code need not + know about us at all. */ + MMU_CONTEXT *psMMUContext; + + ATOMIC_T hRefCount; + + /* This handle is for devices that require notification when a new + memory context is created and they need to store private data that + is associated with the context. */ + IMG_HANDLE hPrivData; + + /* Protects access to sProcessNotifyListHead */ + POSWR_LOCK hListLock; + + /* The following tracks UM applications that need to be notified of a + * page fault */ + DLLIST_NODE sProcessNotifyListHead; + /* The following is a node for the list of registered devmem contexts */ + DLLIST_NODE sPageFaultNotifyListElem; + + /* Device virtual address of a page fault on this context */ + IMG_DEV_VIRTADDR sFaultAddress; + + /* General purpose flags */ + IMG_UINT32 ui32Flags; +}; + +struct _DEVMEMINT_CTX_EXPORT_ +{ + DEVMEMINT_CTX *psDevmemCtx; + PMR *psPMR; + ATOMIC_T hRefCount; + DLLIST_NODE sNode; +}; + +struct _DEVMEMINT_HEAP_ +{ + struct _DEVMEMINT_CTX_ *psDevmemCtx; + IMG_UINT32 uiLog2PageSize; + ATOMIC_T hRefCount; +}; + +struct _DEVMEMINT_RESERVATION_ +{ + struct _DEVMEMINT_HEAP_ *psDevmemHeap; + IMG_DEV_VIRTADDR sBase; + IMG_DEVMEM_SIZE_T uiLength; +}; + +struct _DEVMEMINT_MAPPING_ +{ + struct _DEVMEMINT_RESERVATION_ *psReservation; + PMR *psPMR; + IMG_UINT32 uiNumPages; +}; + +struct _DEVMEMINT_PF_NOTIFY_ +{ + IMG_UINT32 ui32PID; + DLLIST_NODE sProcessNotifyListElem; +}; + +/*************************************************************************/ /*! +@Function DevmemIntCtxAcquire +@Description Acquire a reference to the provided device memory context. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx) +{ + OSAtomicIncrement(&psDevmemCtx->hRefCount); +} + +/*************************************************************************/ /*! +@Function DevmemIntCtxRelease +@Description Release the reference to the provided device memory context. + If this is the last reference which was taken then the + memory context will be freed. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx) +{ + if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0) + { + /* The last reference has gone, destroy the context */ + PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode; + DLLIST_NODE *psNode, *psNodeNext; + + /* If there are any PIDs registered for page fault notification. + * Loop through the registered PIDs and free each one */ + dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) + { + DEVMEMINT_PF_NOTIFY *psNotifyNode = + IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + dllist_remove_node(psNode); + OSFreeMem(psNotifyNode); + } + + /* If this context is in the list registered for a debugger, remove + * from that list */ + if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem)) + { + dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); + } + + if (psDevNode->pfnUnregisterMemoryContext) + { + psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData); + } + MMU_ContextDestroy(psDevmemCtx->psMMUContext); + + OSWRLockDestroy(psDevmemCtx->hListLock); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", + __func__, psDevmemCtx)); + OSFreeMem(psDevmemCtx); + } +} + +/*************************************************************************/ /*! +@Function DevmemIntHeapAcquire +@Description Acquire a reference to the provided device memory heap. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap) +{ + OSAtomicIncrement(&psDevmemHeap->hRefCount); +} + +/*************************************************************************/ /*! +@Function DevmemIntHeapRelease +@Description Release the reference to the provided device memory heap. + If this is the last reference which was taken then the + memory context will be freed. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap) +{ + OSAtomicDecrement(&psDevmemHeap->hRefCount); +} + +PVRSRV_ERROR +DevmemIntUnpin(PMR *psPMR) +{ + PVRSRV_ERROR eError; + + /* Unpin */ + eError = PMRUnpinPMR(psPMR, IMG_FALSE); + + return eError; +} + +PVRSRV_ERROR +DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) +{ + PVRSRV_ERROR eError; + + eError = PMRUnpinPMR(psPMR, IMG_TRUE); + PVR_GOTO_IF_ERROR(eError, e_exit); + + /* Invalidate mapping */ + eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + psDevmemMapping->psReservation->sBase, + psDevmemMapping->uiNumPages, + psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize, + IMG_FALSE, /* !< Choose to invalidate PT entries */ + psPMR); + +e_exit: + return eError; +} + +PVRSRV_ERROR +DevmemIntPin(PMR *psPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Start the pinning */ + eError = PMRPinPMR(psPMR); + + return eError; +} + +PVRSRV_ERROR +DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eErrorMMU = PVRSRV_OK; + IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize; + + /* Start the pinning */ + eError = PMRPinPMR(psPMR); + + if (eError == PVRSRV_OK) + { + /* Make mapping valid again */ + eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + psDevmemMapping->psReservation->sBase, + psDevmemMapping->uiNumPages, + uiLog2PageSize, + IMG_TRUE, /* !< Choose to make PT entries valid again */ + psPMR); + } + else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY) + { + /* If we lost the physical backing we have to map it again because + * the old physical addresses are not valid anymore. */ + IMG_UINT32 uiFlags; + uiFlags = PMR_Flags(psPMR); + + eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + psDevmemMapping->psReservation->sBase, + psPMR, + 0, + psDevmemMapping->uiNumPages, + NULL, + uiLog2PageSize); + } + + /* Just overwrite eError if the mappings failed. + * PMR_NEW_MEMORY has to be propagated to the user. */ + if (eErrorMMU != PVRSRV_OK) + { + eError = eErrorMMU; + } + + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetImportHandle +@Description For given exportable memory descriptor returns PMR handle. +@Return Memory is exportable - Success + PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport) +{ + PVRSRV_ERROR eError; + + if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, e0); + } + + *phImport = psMemDesc->psImport->hPMR; + return PVRSRV_OK; + +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetHeapHandle +@Description For given reservation returns the Heap handle. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, + IMG_HANDLE *phHeap) +{ + if (psReservation == NULL || phHeap == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *phHeap = psReservation->psDevmemHeap; + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetContext +@Description For given heap returns the context. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_CTX **ppsDevmemCtxPtr) +{ + if (psDevmemHeap == NULL || ppsDevmemCtxPtr == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *ppsDevmemCtxPtr = psDevmemHeap->psDevmemCtx; + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetPrivData +@Description For given context returns the private data handle. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, + IMG_HANDLE *phPrivData) +{ + if (psDevmemCtx == NULL || phPrivData == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *phPrivData = psDevmemCtx->hPrivData; + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemIntCtxCreate +@Description Creates and initialises a device memory context. +@Return valid Device Memory context handle - Success + PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntCtxCreate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bKernelMemoryCtx, + DEVMEMINT_CTX **ppsDevmemCtxPtr, + IMG_HANDLE *hPrivData, + IMG_UINT32 *pui32CPUCacheLineSize) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtx; + IMG_HANDLE hPrivDataInt = NULL; + MMU_DEVICEATTRIBS *psMMUDevAttrs = psDeviceNode->pfnGetMMUDeviceAttributes(psDeviceNode, + bKernelMemoryCtx); + + PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); + + /* + * Ensure that we are safe to perform unaligned accesses on memory + * we mark write-combine, as the compiler might generate + * instructions operating on this memory which require this + * assumption to be true. + */ + PVR_ASSERT(OSIsWriteCombineUnalignedSafe()); + + /* allocate a Devmem context */ + psDevmemCtx = OSAllocMem(sizeof(*psDevmemCtx)); + PVR_LOG_GOTO_IF_NOMEM(psDevmemCtx, eError, fail_alloc); + + OSAtomicWrite(&psDevmemCtx->hRefCount, 1); + psDevmemCtx->psDevNode = psDeviceNode; + + /* Call down to MMU context creation */ + + eError = MMU_ContextCreate(psConnection, + psDeviceNode, + &psDevmemCtx->psMMUContext, + psMMUDevAttrs); + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_ContextCreate", fail_mmucontext); + + if (psDeviceNode->pfnRegisterMemoryContext) + { + eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnRegisterMemoryContext", fail_register); + } + + /* Store the private data as it is required to unregister the memory context */ + psDevmemCtx->hPrivData = hPrivDataInt; + *hPrivData = hPrivDataInt; + *ppsDevmemCtxPtr = psDevmemCtx; + + /* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/ + *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + + /* Initialise the PID notify list */ + OSWRLockCreate(&psDevmemCtx->hListLock); + dllist_init(&(psDevmemCtx->sProcessNotifyListHead)); + psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL; + psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL; + + /* Initialise page fault address */ + psDevmemCtx->sFaultAddress.uiAddr = 0ULL; + + /* Initialise flags */ + psDevmemCtx->ui32Flags = 0; + + return PVRSRV_OK; + +fail_register: + MMU_ContextDestroy(psDevmemCtx->psMMUContext); +fail_mmucontext: + OSFreeMem(psDevmemCtx); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemIntHeapCreate +@Description Creates and initialises a device memory heap. +@Return valid Device Memory heap handle - Success + PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sHeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_UINT32 uiLog2DataPageSize, + DEVMEMINT_HEAP **ppsDevmemHeapPtr) +{ + DEVMEMINT_HEAP *psDevmemHeap; + + PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); + + /* allocate a Devmem context */ + psDevmemHeap = OSAllocMem(sizeof(*psDevmemHeap)); + PVR_LOG_RETURN_IF_NOMEM(psDevmemHeap, "psDevmemHeap"); + + psDevmemHeap->psDevmemCtx = psDevmemCtx; + + DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx); + + OSAtomicWrite(&psDevmemHeap->hRefCount, 1); + + psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize; + + *ppsDevmemHeapPtr = psDevmemHeap; + + return PVRSRV_OK; +} + +PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_INT uiInitValue, + IMG_CHAR *pcDefPageName, + IMG_BOOL bInitPage) +{ + IMG_UINT32 ui32RefCnt; + PVRSRV_ERROR eError = PVRSRV_OK; + + OSLockAcquire(psDefPage->psPgLock); + + /* We know there will not be 4G number of sparse PMR's */ + ui32RefCnt = OSAtomicIncrement(&psDefPage->atRefCounter); + + if (1 == ui32RefCnt) + { + IMG_DEV_PHYADDR sDevPhysAddr = {0}; + +#if defined(PDUMP) + PDUMPCOMMENT("Alloc %s page object", pcDefPageName); +#endif + + /* Allocate the dummy page required for sparse backing */ + eError = DevPhysMemAlloc(psDevNode, + (1 << psDefPage->ui32Log2PgSize), + 0, + uiInitValue, + bInitPage, +#if defined(PDUMP) + psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName, + pcDefPageName, + &psDefPage->hPdumpPg, +#endif + &psDefPage->sPageHandle, + &sDevPhysAddr); + if (PVRSRV_OK != eError) + { + OSAtomicDecrement(&psDefPage->atRefCounter); + } + else + { + psDefPage->ui64PgPhysAddr = sDevPhysAddr.uiAddr; + } + } + + OSLockRelease(psDefPage->psPgLock); + + return eError; +} + +void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_CHAR *pcDefPageName) +{ + IMG_UINT32 ui32RefCnt; + + ui32RefCnt = OSAtomicRead(&psDefPage->atRefCounter); + + /* For the cases where the dummy page allocation fails due to lack of memory + * The refcount can still be 0 even for a sparse allocation */ + if (0 != ui32RefCnt) + { + OSLockAcquire(psDefPage->psPgLock); + + /* We know there will not be 4G number of sparse PMR's */ + ui32RefCnt = OSAtomicDecrement(&psDefPage->atRefCounter); + + if (0 == ui32RefCnt) + { + PDUMPCOMMENT("Free %s page object", pcDefPageName); + + /* Free the dummy page when refcount reaches zero */ + DevPhysMemFree(psDevNode, +#if defined(PDUMP) + psDefPage->hPdumpPg, +#endif + &psDefPage->sPageHandle); + +#if defined(PDUMP) + psDefPage->hPdumpPg = NULL; +#endif + psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + } + + OSLockRelease(psDefPage->psPgLock); + } + +} + +PVRSRV_ERROR +DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase) +{ + PVRSRV_ERROR eError; + + if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). " + "PMR contiguity must be a multiple of the heap contiguity!", + __func__, + psReservation->psDevmemHeap->uiLog2PageSize, + PMR_GetLog2Contiguity(psPMR))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + psPMR, + ui32PhysicalPgOffset, + ui32PageCount, + NULL, + psReservation->psDevmemHeap->uiLog2PageSize); + +e0: + return eError; +} + +PVRSRV_ERROR +DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount) +{ + /* Unmap the pages and mark them invalid in the MMU PTE */ + MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + 0, + sDevVAddrBase, + ui32PageCount, + NULL, + psReservation->psDevmemHeap->uiLog2PageSize, + 0); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + DEVMEMINT_MAPPING **ppsMappingPtr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMapping; + /* number of pages (device pages) that allocation spans */ + IMG_UINT32 ui32NumDevPages; + /* device virtual address of start of allocation */ + IMG_DEV_VIRTADDR sAllocationDevVAddr; + /* and its length */ + IMG_DEVMEM_SIZE_T uiAllocationSize; + IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; + IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; + PVRSRV_DEVICE_NODE *psDevNode; + PMR_FLAGS_T uiPMRFlags; + PVRSRV_DEF_PAGE *psDefPage; + IMG_CHAR *pszPageName; + + if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Device heap and PMR have incompatible contiguity (%u - %u). " + "Heap contiguity must be a multiple of the heap contiguity!", + __func__, + uiLog2HeapContiguity, + PMR_GetLog2Contiguity(psPMR) )); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + psDevNode = psDevmemHeap->psDevmemCtx->psDevNode; + + /* allocate memory to record the mapping info */ + psMapping = OSAllocMem(sizeof(*psMapping)); + PVR_LOG_GOTO_IF_NOMEM(psMapping, eError, e0); + + uiAllocationSize = psReservation->uiLength; + + ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1); + PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e2); + + sAllocationDevVAddr = psReservation->sBase; + + /*Check if the PMR that needs to be mapped is sparse */ + bIsSparse = PMR_IsSparse(psPMR); + if (bIsSparse) + { + /*Get the flags*/ + uiPMRFlags = PMR_Flags(psPMR); + bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); + + if (bNeedBacking) + { + IMG_INT uiInitValue; + + if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) + { + psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage; + uiInitValue = PVR_ZERO_PAGE_INIT_VALUE; + pszPageName = DEV_ZERO_PAGE; + } + else + { + psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage; + uiInitValue = PVR_DUMMY_PAGE_INIT_VALUE; + pszPageName = DUMMY_PAGE; + } + + /* Error is logged with in the function if any failures. + * As the allocation fails we need to fail the map request and + * return appropriate error + * + * Allocation of dummy/zero page is done after locking the pages for PMR physically + * By implementing this way, the best case path of dummy/zero page being most likely to be + * allocated after physically locking down pages, is considered. + * If the dummy/zero page allocation fails, we do unlock the physical address and the impact + * is a bit more in on demand mode of operation */ + eError = DevmemIntAllocDefBackingPage(psDevNode, + psDefPage, + uiInitValue, + pszPageName, + IMG_TRUE); + PVR_GOTO_IF_ERROR(eError, e3); + } + + /* N.B. We pass mapping permission flags to MMU_MapPages and let + * it reject the mapping if the permissions on the PMR are not compatible. */ + eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + uiMapFlags, + sAllocationDevVAddr, + psPMR, + 0, + ui32NumDevPages, + NULL, + uiLog2HeapContiguity); + PVR_GOTO_IF_ERROR(eError, e4); + } + else + { + eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, + sAllocationDevVAddr, + psPMR, + ui32NumDevPages << uiLog2HeapContiguity, + uiMapFlags, + uiLog2HeapContiguity); + PVR_GOTO_IF_ERROR(eError, e3); + } + + psMapping->psReservation = psReservation; + psMapping->uiNumPages = ui32NumDevPages; + psMapping->psPMR = psPMR; + + /* Don't bother with refcount on reservation, as a reservation + only ever holds one mapping, so we directly increment the + refcount on the heap instead */ + DevmemIntHeapAcquire(psMapping->psReservation->psDevmemHeap); + + *ppsMappingPtr = psMapping; + + return PVRSRV_OK; +e4: + if (bNeedBacking) + { + /*if the mapping failed, the allocated dummy ref count need + * to be handled accordingly */ + DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, + psDefPage, + pszPageName); + } +e3: + { + PVRSRV_ERROR eError1=PVRSRV_OK; + eError1 = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError1, "PMRUnlockSysPhysAddresses"); + + *ppsMappingPtr = NULL; + } +e2: + OSFreeMem(psMapping); + +e0: + PVR_ASSERT (eError != PVRSRV_OK); + return eError; +} + + +PVRSRV_ERROR +DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap; + /* device virtual address of start of allocation */ + IMG_DEV_VIRTADDR sAllocationDevVAddr; + /* number of pages (device pages) that allocation spans */ + IMG_UINT32 ui32NumDevPages; + IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; + PMR_FLAGS_T uiPMRFlags; + + ui32NumDevPages = psMapping->uiNumPages; + sAllocationDevVAddr = psMapping->psReservation->sBase; + + /*Check if the PMR that needs to be mapped is sparse */ + bIsSparse = PMR_IsSparse(psMapping->psPMR); + + if (bIsSparse) + { + /*Get the flags*/ + uiPMRFlags = PMR_Flags(psMapping->psPMR); + bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); + + if (bNeedBacking) + { + if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) + { + DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, + &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage, + DEV_ZERO_PAGE); + } + else + { + DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, + &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage, + DUMMY_PAGE); + } + } + + MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, + 0, + sAllocationDevVAddr, + ui32NumDevPages, + NULL, + psMapping->psReservation->psDevmemHeap->uiLog2PageSize, + 0); + } + else + { + MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, + sAllocationDevVAddr, + ui32NumDevPages, + psMapping->psReservation->psDevmemHeap->uiLog2PageSize); + } + + + + eError = PMRUnlockSysPhysAddresses(psMapping->psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Don't bother with refcount on reservation, as a reservation + only ever holds one mapping, so we directly decrement the + refcount on the heap instead */ + DevmemIntHeapRelease(psDevmemHeap); + + OSFreeMem(psMapping); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sAllocationDevVAddr, + IMG_DEVMEM_SIZE_T uiAllocationSize, + DEVMEMINT_RESERVATION **ppsReservationPtr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservation; + + /* allocate memory to record the reservation info */ + psReservation = OSAllocMem(sizeof(*psReservation)); + PVR_LOG_GOTO_IF_NOMEM(psReservation, eError, e0); + + psReservation->sBase = sAllocationDevVAddr; + psReservation->uiLength = uiAllocationSize; + + eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext, + uiAllocationSize, + &uiAllocationSize, + 0, /* IMG_UINT32 uiProtFlags */ + 0, /* alignment is n/a since we supply devvaddr */ + &sAllocationDevVAddr, + psDevmemHeap->uiLog2PageSize); + PVR_GOTO_IF_ERROR(eError, e1); + + /* since we supplied the virt addr, MMU_Alloc shouldn't have + chosen a new one for us */ + PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr); + + DevmemIntHeapAcquire(psDevmemHeap); + + psReservation->psDevmemHeap = psDevmemHeap; + *ppsReservationPtr = psReservation; + + return PVRSRV_OK; + + /* + * error exit paths follow + */ + +e1: + OSFreeMem(psReservation); + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation) +{ + IMG_DEV_VIRTADDR sBase = psReservation->sBase; + IMG_UINT32 uiLength = psReservation->uiLength; + IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize; + + MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + sBase, + uiLength, + uiLog2DataPageSize); + + DevmemIntHeapRelease(psReservation->psDevmemHeap); + OSFreeMem(psReservation); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap) +{ + if (OSAtomicRead(&psDevmemHeap->hRefCount) != 1) + { + PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) " + "which probably means allocations have been made from the heap and not freed", + __func__, + OSAtomicRead(&psDevmemHeap->hRefCount))); + + /* + * Try again later when you've freed all the memory + * + * Note: + * We don't expect the application to retry (after all this call would + * succeed if the client had freed all the memory which it should have + * done before calling this function). However, given there should be + * an associated handle, when the handle base is destroyed it will free + * any allocations leaked by the client and then it will retry this call, + * which should then succeed. + */ + return PVRSRV_ERROR_RETRY; + } + + PVR_ASSERT(OSAtomicRead(&psDevmemHeap->hRefCount) == 1); + + DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap)); + OSFreeMem(psDevmemHeap); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, + PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT64 sCpuVAddrBase) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR); + IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; + IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity; + IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff; + + IMG_UINT32 *pai32MapIndices = pai32AllocIndices; + IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices; + IMG_UINT32 uiMapPageCount = ui32AllocPageCount; + IMG_UINT32 uiUnmapPageCount = ui32FreePageCount; + + /* Special case: + * Adjust indices if we map into a heap that uses smaller page sizes + * than the physical allocation itself. + * The incoming parameters are all based on the page size of the PMR + * but the mapping functions expects parameters to be in terms of heap page sizes. */ + if (uiOrderDiff != 0) + { + IMG_UINT32 uiPgIdx, uiPgOffset; + + uiMapPageCount = (uiMapPageCount << uiOrderDiff); + uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff); + + pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices)); + PVR_GOTO_IF_NOMEM(pai32MapIndices, eError, e0); + + pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices)); + if (!pai32UnmapIndices) + { + OSFreeMem(pai32MapIndices); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); + } + + /* Every chunk index needs to be translated from physical indices + * into heap based indices. */ + for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++) + { + for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) + { + pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = + pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; + } + } + + for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++) + { + for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) + { + pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = + pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; + } + } + } + + /* + * The order of steps in which this request is done is given below. The order of + * operations is very important in this case: + * + * 1. The parameters are validated in function PMR_ChangeSparseMem below. + * A successful response indicates all the parameters are correct. + * In failure case we bail out from here without processing further. + * 2. On success, get the PMR specific operations done. this includes page alloc, page free + * and the corresponding PMR status changes. + * when this call fails, it is ensured that the state of the PMR before is + * not disturbed. If it succeeds, then we can go ahead with the subsequent steps. + * 3. Invalidate the GPU page table entries for the pages to be freed. + * 4. Write the GPU page table entries for the pages that got allocated. + * 5. Change the corresponding CPU space map. + * + * The above steps can be selectively controlled using flags. + */ + if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH)) + { + /* Do the PMR specific changes first */ + eError = PMR_ChangeSparseMem(psPMR, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + uiSparseFlags); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to do PMR specific changes.", + __func__)); + goto e1; + } + + /* Invalidate the page table entries for the free pages. + * Optimisation later would be not to touch the ones that gets re-mapped */ + if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE)) + { + PMR_FLAGS_T uiPMRFlags; + + /*Get the flags*/ + uiPMRFlags = PMR_Flags(psPMR); + + if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM)) + { + /* Unmap the pages and mark them invalid in the MMU PTE */ + MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + uiUnmapPageCount, + pai32UnmapIndices, + uiLog2HeapContiguity, + uiPMRFlags); + } + } + + /* Wire the pages tables that got allocated */ + if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC)) + { + /* Map the pages and mark them Valid in the MMU PTE */ + eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + psPMR, + 0, + uiMapPageCount, + pai32MapIndices, + uiLog2HeapContiguity); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to map alloc indices.", + __func__)); + goto e1; + } + } + + /* Currently only used for debug */ + if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM)) + { + eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + psPMR, + 0, + uiMapPageCount, + pai32UnmapIndices, + uiLog2HeapContiguity); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to map Free indices.", + __func__)); + goto e1; + } + } + } + +#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE + /* Do the changes in sparse on to the CPU virtual map accordingly */ + if (uiSparseFlags & SPARSE_MAP_CPU_ADDR) + { + if (sCpuVAddrBase != 0) + { + eError = PMR_ChangeSparseMemCPUMap(psPMR, + sCpuVAddrBase, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to map to CPU addr space.", + __func__)); + goto e0; + } + } + } +#endif + +e1: + if (pai32MapIndices != pai32AllocIndices) + { + OSFreeMem(pai32MapIndices); + } + if (pai32UnmapIndices != pai32FreeIndices) + { + OSFreeMem(pai32UnmapIndices); + } +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemIntCtxDestroy +@Description Destroy that created by DevmemIntCtxCreate +@Input psDevmemCtx Device Memory context +@Return cannot fail. +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx) +{ + /* + We can't determine if we should be freeing the context here + as a refcount!=1 could be due to either the fact that heap(s) + remain with allocations on them, or that this memory context + has been exported. + As the client couldn't do anything useful with this information + anyway and the fact that the refcount will ensure we only + free the context when _all_ references have been released + don't bother checking and just return OK regardless. + */ + DevmemIntCtxRelease(psDevmemCtx); + return PVRSRV_OK; +} + +PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR sDevAddr) +{ + IMG_UINT32 i, j, uiLog2HeapPageSize = 0; + DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo; + DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray; + + IMG_BOOL bFound = IMG_FALSE; + + for (i = 0; + i < psDinfo->uiNumHeapConfigs && !bFound; + i++) + { + for (j = 0; + j < psConfig[i].uiNumHeaps && !bFound; + j++) + { + IMG_DEV_VIRTADDR uiBase = + psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr; + IMG_DEVMEM_SIZE_T uiSize = + psConfig[i].psHeapBlueprintArray[j].uiHeapLength; + + if ((sDevAddr.uiAddr >= uiBase.uiAddr) && + (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize))) + { + uiLog2HeapPageSize = + psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize; + bFound = IMG_TRUE; + } + } + } + + if (uiLog2HeapPageSize == 0) + { + return PVRSRV_ERROR_INVALID_GPU_ADDR; + } + + return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext, + uiLog2HeapPageSize, + sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR; +} + +PVRSRV_ERROR +DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate) +{ + PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; + MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; + + if (psDevNode->pfnInvalFBSCTable) + { + return psDevNode->pfnDevSLCFlushRange(psDevNode, + psMMUContext, + sDevVAddr, + uiSize, + bInvalidate); + } + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR +DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevMemContext, + IMG_UINT64 ui64FBSCEntryMask) +{ + PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; + MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; + + if (psDevNode->pfnInvalFBSCTable) + { + return psDevNode->pfnInvalFBSCTable(psDevNode, + psMMUContext, + ui64FBSCEntryMask); + } + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR *psFaultAddress) +{ + if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) + { + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + *psFaultAddress = psDevMemContext->sFaultAddress; + psDevMemContext->ui32Flags &= ~DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; + + return PVRSRV_OK; +} + +static POSWR_LOCK g_hExportCtxListLock; +static DLLIST_NODE g_sExportCtxList; + +PVRSRV_ERROR +DevmemIntInit(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + dllist_init(&g_sExportCtxList); + + eError = OSWRLockCreate(&g_hExportCtxListLock); + + return eError; +} + +PVRSRV_ERROR +DevmemIntDeInit(void) +{ + PVR_ASSERT(dllist_is_empty(&g_sExportCtxList)); + + OSWRLockDestroy(g_hExportCtxListLock); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntExportCtx(DEVMEMINT_CTX *psContext, + PMR *psPMR, + DEVMEMINT_CTX_EXPORT **ppsContextExport) +{ + DEVMEMINT_CTX_EXPORT *psCtxExport; + + psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT)); + PVR_LOG_RETURN_IF_NOMEM(psCtxExport, "psCtxExport"); + + DevmemIntCtxAcquire(psContext); + PMRRefPMR(psPMR); + psCtxExport->psDevmemCtx = psContext; + psCtxExport->psPMR = psPMR; + OSWRLockAcquireWrite(g_hExportCtxListLock); + dllist_add_to_tail(&g_sExportCtxList, &psCtxExport->sNode); + OSWRLockReleaseWrite(g_hExportCtxListLock); + + *ppsContextExport = psCtxExport; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport) +{ + PMRUnrefPMR(psContextExport->psPMR); + DevmemIntCtxRelease(psContextExport->psDevmemCtx); + OSWRLockAcquireWrite(g_hExportCtxListLock); + dllist_remove_node(&psContextExport->sNode); + OSWRLockReleaseWrite(g_hExportCtxListLock); + OSFreeMem(psContextExport); + + /* Unable to find exported context, return error */ + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntAcquireRemoteCtx(PMR *psPMR, + DEVMEMINT_CTX **ppsContext, + IMG_HANDLE *phPrivData) +{ + PDLLIST_NODE psListNode, psListNodeNext; + DEVMEMINT_CTX_EXPORT *psCtxExport; + + OSWRLockAcquireRead(g_hExportCtxListLock); + /* Find context from list using PMR as key */ + dllist_foreach_node(&g_sExportCtxList, psListNode, psListNodeNext) + { + psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode); + if (psCtxExport->psPMR == psPMR) + { + OSWRLockReleaseRead(g_hExportCtxListLock); + DevmemIntCtxAcquire(psCtxExport->psDevmemCtx); + *ppsContext = psCtxExport->psDevmemCtx; + *phPrivData = psCtxExport->psDevmemCtx->hPrivData; + return PVRSRV_OK; + } + } + OSWRLockReleaseRead(g_hExportCtxListLock); + + /* Unable to find exported context, return error */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire remote context. Could not retrieve context with given PMR", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; +} + +/*************************************************************************/ /*! +@Function DevmemIntRegisterPFNotify +@Description Registers a PID to be notified when a page fault occurs on a + specific device memory context. +@Input psDevmemCtx The context to be notified about. +@Input ui32PID The PID of the process that would like to be + notified. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, + IMG_INT32 ui32PID, + IMG_BOOL bRegister) +{ + PVRSRV_DEVICE_NODE *psDevNode; + DLLIST_NODE *psNode, *psNodeNext; + DEVMEMINT_PF_NOTIFY *psNotifyNode; + IMG_BOOL bPresent = IMG_FALSE; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemCtx, "psDevmemCtx"); + + psDevNode = psDevmemCtx->psDevNode; + + if (bRegister) + { + OSWRLockAcquireRead(psDevmemCtx->hListLock); + /* If this is the first PID in the list, the device memory context + * needs to be registered for notification */ + if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) + { + OSWRLockReleaseRead(psDevmemCtx->hListLock); + dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead, + &psDevmemCtx->sPageFaultNotifyListElem); + } + else + { + OSWRLockReleaseRead(psDevmemCtx->hListLock); + } + } + + /* Loop through the registered PIDs and check whether this one is + * present */ + OSWRLockAcquireRead(psDevmemCtx->hListLock); + dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) + { + psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + + if (psNotifyNode->ui32PID == ui32PID) + { + bPresent = IMG_TRUE; + break; + } + } + OSWRLockReleaseRead(psDevmemCtx->hListLock); + + if (bRegister) + { + if (bPresent) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to register a PID that is already registered", + __func__)); + return PVRSRV_ERROR_PID_ALREADY_REGISTERED; + } + + psNotifyNode = OSAllocMem(sizeof(*psNotifyNode)); + if (psNotifyNode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to allocate memory for the notify list", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psNotifyNode->ui32PID = ui32PID; + OSWRLockAcquireWrite(psDevmemCtx->hListLock); + dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem)); + OSWRLockReleaseWrite(psDevmemCtx->hListLock); + } + else + { + if (!bPresent) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to unregister a PID that is not registered", + __func__)); + return PVRSRV_ERROR_PID_NOT_REGISTERED; + } + dllist_remove_node(psNode); + psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + OSFreeMem(psNotifyNode); + } + + if (!bRegister) + { + /* If the last process in the list is being unregistered, then also + * unregister the device memory context from the notify list. */ + OSWRLockAcquireWrite(psDevmemCtx->hListLock); + if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) + { + dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); + } + OSWRLockReleaseWrite(psDevmemCtx->hListLock); + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemIntPFNotify +@Description Notifies any processes that have registered themselves to be + notified when a page fault happens on a specific device memory + context. +@Input *psDevNode The device node. +@Input ui64FaultedPCAddress The page catalogue address that faulted. +@Input sFaultAddress The address that triggered the fault. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT64 ui64FaultedPCAddress, + IMG_DEV_VIRTADDR sFaultAddress) +{ + DLLIST_NODE *psNode, *psNodeNext; + DEVMEMINT_PF_NOTIFY *psNotifyNode; + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtx = NULL; + IMG_BOOL bFailed = IMG_FALSE; + + OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead))) + { + OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + return PVRSRV_OK; + } + + dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext) + { + DEVMEMINT_CTX *psThisContext = + IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem); + IMG_DEV_PHYADDR sPCDevPAddr; + + eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to Acquire Base Address (%s)", + __func__, + PVRSRVGetErrorString(eError))); + OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + return eError; + } + + if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress) + { + psDevmemCtx = psThisContext; + break; + } + } + OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + + if (psDevmemCtx == NULL) + { + /* Not found, just return */ + return PVRSRV_OK; + } + OSWRLockAcquireRead(psDevmemCtx->hListLock); + + /* + * Store the first occurrence of a page fault address, + * until that address is consumed by a client. + */ + if ((psDevmemCtx->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) + { + psDevmemCtx->sFaultAddress = sFaultAddress; + psDevmemCtx->ui32Flags |= DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; + } + + /* Loop through each registered PID and send a signal to the process */ + dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) + { + psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + + eError = OSDebugSignalPID(psNotifyNode->ui32PID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to signal process for PID: %u", + __func__, + psNotifyNode->ui32PID)); + + PVR_ASSERT(!"Unable to signal process"); + + bFailed = IMG_TRUE; + } + } + OSWRLockReleaseRead(psDevmemCtx->hListLock); + + if (bFailed) + { + return PVRSRV_ERROR_SIGNAL_FAILED; + } + + return PVRSRV_OK; +} + +#if defined(PDUMP) +IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext) +{ + IMG_UINT32 ui32MMUContextID; + MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID, PDUMP_FLAGS_CONTINUOUS); + return ui32MMUContextID; +} + +PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32ArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiPDumpMMUCtx; + + PVR_UNREFERENCED_PARAMETER(ui32ArraySize); + + eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, + &uiPDumpMMUCtx, + ui32PDumpFlags); + + PVR_ASSERT(eError == PVRSRV_OK); + + /* + The following SYSMEM refers to the 'MMU Context', hence it + should be the MMU context, not the PMR, that says what the PDump + MemSpace tag is? + From a PDump P.O.V. it doesn't matter which name space we use as long + as that MemSpace is used on the 'MMU Context' we're dumping from + */ + eError = PDumpMMUSAB(psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, + uiPDumpMMUCtx, + sDevAddrStart, + uiSize, + pszFilename, + ui32FileOffset, + ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); + return PVRSRV_OK; +} + + +PVRSRV_ERROR +DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ContextID; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire MMU context", + __func__)); + return PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID; + } + + eError = PDumpBitmapKM(psDeviceNode, + pszFileName, + ui32FileOffset, + ui32Width, + ui32Height, + ui32StrideInBytes, + sDevBaseAddr, + ui32ContextID, + ui32Size, + ePixelFormat, + ui32AddrMode, + ui32PDumpFlags); + + /* Don't care about return value */ + MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); + + return eError; +} + +PVRSRV_ERROR +DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ContextID; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(ui32Size); + + eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); + PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); + + eError = PDumpImageDescriptor(psDeviceNode, + ui32ContextID, + (IMG_CHAR *)pszFileName, + sData, + ui32DataSize, + ui32LogicalWidth, + ui32LogicalHeight, + ui32PhysicalWidth, + ui32PhysicalHeight, + ePixFmt, + eMemLayout, + eFBCompression, + paui32FBCClearColour, + eFBCSwizzle, + sHeader, + ui32HeaderSize, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "PDumpImageDescriptor"); + + /* Don't care about return value */ + (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); + + return eError; +} + +PVRSRV_ERROR +DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ContextID; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(ui32Size); + + if ((ui32HeaderType != IBIN_HEADER_TYPE) && + (ui32HeaderType != DATA_HEADER_TYPE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid header type (%u)", + __func__, + ui32HeaderType)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); + PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); + + eError = PDumpDataDescriptor(psDeviceNode, + ui32ContextID, + (IMG_CHAR *)pszFileName, + sData, + ui32DataSize, + ui32HeaderType, + ui32ElementType, + ui32ElementCount, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "PDumpDataDescriptor"); + + /* Don't care about return value */ + (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); + + return eError; +} + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/common/di_server.c b/drivers/mcst/gpu-imgtec/services/server/common/di_server.c new file mode 100644 index 000000000000..a475dc921466 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/di_server.c @@ -0,0 +1,613 @@ +/*************************************************************************/ /*! +@File +@Title Debug Info framework functions and types. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "di_server.h" +#include "osdi_impl.h" +#include "pvrsrv_error.h" +#include "dllist.h" +#include "lock.h" +#include "allocmem.h" + +#define ROOT_GROUP_NAME PVR_DRM_NAME + +/*! Implementation object. */ +typedef struct DI_IMPL +{ + const IMG_CHAR *pszName; /*sCb.pfnInit(); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->pfnInit()", return_); + + psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); + PVR_LOG_GOTO_IF_NOMEM(psNativeGroup, eError, deinit_impl_); + + eError = psImpl->sCb.pfnCreateGroup(_g_sRootGroup.pszName, NULL, + &psNativeGroup->pvHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); + + psNativeGroup->psDiImpl = psImpl; + dllist_add_to_head(&_g_sRootGroup.sNativeHandleList, + &psNativeGroup->sListNode); + + psImpl->bInitialised = IMG_TRUE; + + return PVRSRV_OK; + +free_memory_: + OSFreeMem(psNativeGroup); +deinit_impl_: + psImpl->sCb.pfnDeInit(); +return_: + return eError; +} + +PVRSRV_ERROR DIInit(void) +{ + PVRSRV_ERROR eError = OSLockCreate(&_g_hLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + return PVRSRV_OK; +} + +PVRSRV_ERROR DIInitImplementations(void) +{ + DLLIST_NODE *psThis, *psNext; + + OSLockAcquire(_g_hLock); + + dllist_init(&_g_sRootGroup.sNativeHandleList); + + /* Loops over all implementations and initialise it. If the initialisation + * fails throw it away and continue with the others. */ + dllist_foreach_node(&_g_sImpls, psThis, psNext) + { + DI_IMPL *psImpl = IMG_CONTAINER_OF(psThis, DI_IMPL, sListNode); + + PVRSRV_ERROR eError = _InitImpl(psImpl); + if (eError != PVRSRV_OK) + { + /* implementation could not be initialised so remove it from the + * list, free the memory and forget about it */ + + PVR_DPF((PVR_DBG_ERROR, "%s: could not initialise \"%s\" debug " + "info implementation, discarding", __func__, + psImpl->pszName)); + + dllist_remove_node(&psImpl->sListNode); + OSFreeMem(psImpl); + } + } + + /* For now don't return error because the common DI implementation is not + * ready yet and this would break OSs other than Linux and Android. */ +#if 0 + if (dllist_is_empty(&_g_sImpls)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: no debug info implementation exists", + __func__)); + + return PVRSRV_ERROR_INIT_FAILURE; + } +#endif /* 0 */ + + OSLockRelease(_g_hLock); + + return PVRSRV_OK; +} + +void DIDeInit(void) +{ + DLLIST_NODE *psThis, *psNext; + + OSLockAcquire(_g_hLock); + + /* Remove all of the native instances of the root group. */ + dllist_foreach_node(&_g_sRootGroup.sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeGroup = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + DI_IMPL *psImpl = psNativeGroup->psDiImpl; + + psImpl->sCb.pfnDestroyGroup(psNativeGroup->pvHandle); + dllist_remove_node(&psNativeGroup->sListNode); + OSFreeMem(psNativeGroup); + } + + /* Remove all of the implementations. */ + dllist_foreach_node(&_g_sImpls, psThis, psNext) + { + DI_IMPL *psDiImpl = IMG_CONTAINER_OF(psThis, DI_IMPL, sListNode); + + if (psDiImpl->bInitialised) + { + psDiImpl->sCb.pfnDeInit(); + psDiImpl->bInitialised = IMG_FALSE; + } + + dllist_remove_node(&psDiImpl->sListNode); + OSFreeMem(psDiImpl); + } + + OSLockRelease(_g_hLock); + + /* all resources freed so free the lock itself too */ + + OSLockDestroy(_g_hLock); +} + +static IMG_BOOL _ValidateIteratorCb(const DI_ITERATOR_CB *psIterCb, + DI_ENTRY_TYPE eType) +{ + IMG_UINT32 uiFlags = 0; + + if (psIterCb == NULL) + { + return IMG_FALSE; + } + + if (eType == DI_ENTRY_TYPE_GENERIC) + { + uiFlags |= psIterCb->pfnShow != NULL ? BIT(0) : 0; + uiFlags |= psIterCb->pfnStart != NULL ? BIT(1) : 0; + uiFlags |= psIterCb->pfnStop != NULL ? BIT(2) : 0; + uiFlags |= psIterCb->pfnNext != NULL ? BIT(3) : 0; + + /* either only pfnShow or all callbacks need to be set */ + if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x0f)) + { + return IMG_FALSE; + } + } + else if (eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + uiFlags |= psIterCb->pfnRead != NULL ? BIT(0) : 0; + uiFlags |= psIterCb->pfnSeek != NULL ? BIT(1) : 0; + + /* either only pfnRead or all callbacks need to be set */ + if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x03)) + { + return IMG_FALSE; + } + } + else + { + return IMG_FALSE; + } + + return IMG_TRUE; +} + +static PVRSRV_ERROR _CreateNativeEntry(const IMG_CHAR *pszName, + DI_ENTRY *psEntry, + const DI_NATIVE_HANDLE *psNativeParent, + void *pvPriv, + DI_ENTRY_TYPE eType, + DI_NATIVE_HANDLE **ppsNativeEntry) +{ + PVRSRV_ERROR eError; + DI_IMPL *psImpl = psNativeParent->psDiImpl; + + DI_NATIVE_HANDLE *psNativeEntry = OSAllocMem(sizeof(*psNativeEntry)); + PVR_LOG_GOTO_IF_NOMEM(psNativeEntry, eError, return_); + + eError = psImpl->sCb.pfnCreateEntry(pszName, + eType, + &psEntry->sIterCb, + pvPriv, + psNativeParent->pvHandle, + &psNativeEntry->pvHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); + + psNativeEntry->psDiImpl = psImpl; + + *ppsNativeEntry = psNativeEntry; + + return PVRSRV_OK; + +free_memory_: + OSFreeMem(psNativeEntry); +return_: + return eError; +} + +PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, + const DI_GROUP *psGroup, + const DI_ITERATOR_CB *psIterCb, + void *pvPriv, + DI_ENTRY_TYPE eType, + DI_ENTRY **ppsEntry) +{ + PVRSRV_ERROR eError; + DLLIST_NODE *psThis, *psNext; + DI_ENTRY *psEntry; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateIteratorCb(psIterCb, eType), + "psIterCb"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsEntry != NULL, "psEntry"); + + psEntry = OSAllocMem(sizeof(*psEntry)); + PVR_LOG_RETURN_IF_NOMEM(psEntry, "OSAllocMem"); + + if (psGroup == NULL) + { + psGroup = &_g_sRootGroup; + } + + psEntry->pszName = pszName; + psEntry->sIterCb = *psIterCb; + dllist_init(&psEntry->sNativeHandleList); + + OSLockAcquire(_g_hLock); + + /* Iterate over all of the native handles of parent group to create + * the entry for every registered implementation. */ + dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeEntry, *psNativeGroup = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + eError = _CreateNativeEntry(pszName, psEntry, psNativeGroup, pvPriv, + eType, &psNativeEntry); + PVR_GOTO_IF_ERROR(eError, cleanup_); + + dllist_add_to_head(&psEntry->sNativeHandleList, + &psNativeEntry->sListNode); + } + + OSLockRelease(_g_hLock); + + *ppsEntry = psEntry; + + return PVRSRV_OK; + +cleanup_: + OSLockRelease(_g_hLock); + + /* Something went wrong so if there were any native entries created remove + * them from the list, free them and free the DI entry itself. */ + dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeEntry = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + dllist_remove_node(&psNativeEntry->sListNode); + OSFreeMem(psNativeEntry); + } + + OSFreeMem(psEntry); + + return eError; +} + +void DIDestroyEntry(DI_ENTRY *psEntry) +{ + DLLIST_NODE *psThis, *psNext; + + PVR_LOG_RETURN_VOID_IF_FALSE(psEntry != NULL, + "psEntry invalid in DIDestroyEntry()"); + + /* Iterate through all of the native entries of the DI entry, remove + * them from the list and then destroy them. After that, destroy the + * DI entry itself. */ + dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, + sListNode); + + /* The implementation must ensure that entry is not removed if any + * operations are being executed on the entry. If this is the case + * the implementation should block until all of them are finished + * and prevent any further operations. + * This will guarantee proper synchronisation between the DI framework + * and underlying implementations and prevent destruction/access + * races. */ + psNative->psDiImpl->sCb.pfnDestroyEntry(psNative->pvHandle); + dllist_remove_node(&psNative->sListNode); + OSFreeMem(psNative); + } + + OSFreeMem(psEntry); +} + +static PVRSRV_ERROR _CreateNativeGroup(const IMG_CHAR *pszName, + const DI_NATIVE_HANDLE *psNativeParent, + DI_NATIVE_HANDLE **ppsNativeGroup) +{ + PVRSRV_ERROR eError; + DI_IMPL *psImpl = psNativeParent->psDiImpl; + + DI_NATIVE_HANDLE *psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); + PVR_LOG_GOTO_IF_NOMEM(psNativeGroup, eError, return_); + + eError = psImpl->sCb.pfnCreateGroup(pszName, + psNativeParent->pvHandle, + &psNativeGroup->pvHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); + + psNativeGroup->psDiImpl = psImpl; + + *ppsNativeGroup = psNativeGroup; + + return PVRSRV_OK; + +free_memory_: + OSFreeMem(psNativeGroup); +return_: + return eError; +} + +PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, + const DI_GROUP *psParent, + DI_GROUP **ppsGroup) +{ + PVRSRV_ERROR eError; + DLLIST_NODE *psThis, *psNext; + DI_GROUP *psGroup; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsGroup != NULL, "ppsDiGroup"); + + psGroup = OSAllocMem(sizeof(*psGroup)); + PVR_LOG_RETURN_IF_NOMEM(psGroup, "OSAllocMem"); + + if (psParent == NULL) + { + psParent = &_g_sRootGroup; + } + + psGroup->pszName = pszName; + psGroup->psParent = psParent; + dllist_init(&psGroup->sNativeHandleList); + + OSLockAcquire(_g_hLock); + + /* Iterate over all of the native handles of parent group to create + * the group for every registered implementation. */ + dllist_foreach_node(&psParent->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeGroup = NULL, *psNativeParent = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + eError = _CreateNativeGroup(pszName, psNativeParent, &psNativeGroup); + PVR_GOTO_IF_ERROR(eError, cleanup_); + + dllist_add_to_head(&psGroup->sNativeHandleList, + &psNativeGroup->sListNode); + } + + OSLockRelease(_g_hLock); + + *ppsGroup = psGroup; + + return PVRSRV_OK; + +cleanup_: + OSLockRelease(_g_hLock); + + /* Something went wrong so if there were any native groups created remove + * them from the list, free them and free the DI group itself. */ + dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeGroup = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + dllist_remove_node(&psNativeGroup->sListNode); + OSFreeMem(psNativeGroup); + } + + OSFreeMem(psGroup); + + return eError; +} + +void DIDestroyGroup(DI_GROUP *psGroup) +{ + DLLIST_NODE *psThis, *psNext; + + PVR_LOG_RETURN_VOID_IF_FALSE(psGroup != NULL, + "psGroup invalid in DIDestroyGroup()"); + + /* Iterate through all of the native groups of the DI group, remove + * them from the list and then destroy them. After that destroy the + * DI group itself. */ + dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, + sListNode); + + psNative->psDiImpl->sCb.pfnDestroyGroup(psNative->pvHandle); + dllist_remove_node(&psNative->sListNode); + OSFreeMem(psNative); + } + + OSFreeMem(psGroup); +} + +void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry) +{ + PVR_ASSERT(psEntry != NULL); + + return psEntry->pvPrivData; +} + +void DISetPrivData(OSDI_IMPL_ENTRY *psEntry, void *pvPrivData) +{ + PVR_ASSERT(psEntry != NULL); + + psEntry->pvPrivData = pvPrivData; +} + +void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) +{ + va_list args; + + PVR_ASSERT(psEntry != NULL); + PVR_ASSERT(psEntry->psCb != NULL); + PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL); + PVR_ASSERT(psEntry->pvNative != NULL); + + va_start(args, pszFmt); + psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, args); + va_end(args); +} + +void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr) +{ + PVR_ASSERT(psEntry != NULL); + PVR_ASSERT(psEntry->psCb != NULL); + PVR_ASSERT(psEntry->psCb->pfnPuts != NULL); + PVR_ASSERT(psEntry->pvNative != NULL); + + psEntry->psCb->pfnPuts(psEntry->pvNative, pszStr); +} + +IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry) +{ + PVR_ASSERT(psEntry != NULL); + PVR_ASSERT(psEntry->psCb != NULL); + PVR_ASSERT(psEntry->psCb->pfnHasOverflowed != NULL); + PVR_ASSERT(psEntry->pvNative != NULL); + + return psEntry->psCb->pfnHasOverflowed(psEntry->pvNative); +} + +/* ---- OS implementation API ---------------------------------------------- */ + +static IMG_BOOL _ValidateImplCb(const OSDI_IMPL_CB *psImplCb) +{ + PVR_GOTO_IF_FALSE(psImplCb->pfnInit != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnDeInit != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnCreateGroup != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyGroup != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnCreateEntry != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyEntry != NULL, failed_); + + return IMG_TRUE; + +failed_: + return IMG_FALSE; +} + +PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, + const OSDI_IMPL_CB *psImplCb) +{ + DI_IMPL *psDiImpl; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateImplCb(psImplCb), "psImplCb"); + + psDiImpl = OSAllocMem(sizeof(*psDiImpl)); + PVR_LOG_RETURN_IF_NOMEM(psDiImpl, "OSAllocMem"); + + psDiImpl->pszName = pszName; + psDiImpl->sCb = *psImplCb; + + OSLockAcquire(_g_hLock); + dllist_add_to_tail(&_g_sImpls, &psDiImpl->sListNode); + OSLockRelease(_g_hLock); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/handle.c b/drivers/mcst/gpu-imgtec/services/server/common/handle.c new file mode 100644 index 000000000000..8aef399e309f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/handle.c @@ -0,0 +1,2302 @@ +/*************************************************************************/ /*! +@File +@Title Resource Handle Manager +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +/* See handle.h for a description of the handle API. */ + +/* + * The implementation supports movable handle structures, allowing the address + * of a handle structure to change without having to fix up pointers in + * any of the handle structures. For example, the linked list mechanism + * used to link subhandles together uses handle array indices rather than + * pointers to the structures themselves. + */ + +#if defined(LINUX) +#include +#else +#include +#endif + +#include "img_defs.h" +#include "handle.h" +#include "handle_impl.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvrsrv.h" + +#define HANDLE_HASH_TAB_INIT_SIZE 32 + +#define TEST_FLAG(v, f) BITMASK_HAS(v, f) +#define TEST_ALLOC_FLAG(psHandleData, f) BITMASK_HAS((psHandleData)->eFlag, f) + + +/* Linked list structure. Used for both the list head and list items */ +typedef struct _HANDLE_LIST_ +{ + IMG_HANDLE hPrev; + IMG_HANDLE hNext; + IMG_HANDLE hParent; +} HANDLE_LIST; + +typedef struct _HANDLE_DATA_ +{ + /* The handle that represents this structure */ + IMG_HANDLE hHandle; + + /* Handle type */ + PVRSRV_HANDLE_TYPE eType; + + /* Flags specified when the handle was allocated */ + PVRSRV_HANDLE_ALLOC_FLAG eFlag; + + /* Pointer to the data that the handle represents */ + void *pvData; + + /* + * Callback specified at handle allocation time to + * release/destroy/free the data represented by the + * handle when it's reference count reaches 0. This + * should always be NULL for subhandles. + */ + PFN_HANDLE_RELEASE pfnReleaseData; + + /* List head for subhandles of this handle */ + HANDLE_LIST sChildren; + + /* List entry for sibling subhandles */ + HANDLE_LIST sSiblings; + + /* Reference count. The pfnReleaseData callback gets called when the + * reference count hits zero + */ + IMG_UINT32 ui32RefCount; + +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + /* Store the handle base used for this handle, so we + * can later access the handle base lock (or check if + * it has been already acquired) + */ + PVRSRV_HANDLE_BASE *psBase; +#endif + +} HANDLE_DATA; + +struct _HANDLE_BASE_ +{ + /* Pointer to a handle implementations base structure */ + HANDLE_IMPL_BASE *psImplBase; + + /* + * Pointer to handle hash table. + * The hash table is used to do reverse lookups, converting data + * pointers to handles. + */ + HASH_TABLE *psHashTab; + + /* Type specific (connection/global/process) Lock handle */ + POS_LOCK hLock; + + /* Can be connection, process, global */ + PVRSRV_HANDLE_BASE_TYPE eType; +}; + +/* + * The key for the handle hash table is an array of three elements, the + * pointer to the resource, the resource type and the parent handle (or + * NULL if there is no parent). The eHandKey enumeration gives the + * array indices of the elements making up the key. + */ +enum eHandKey +{ + HAND_KEY_DATA = 0, + HAND_KEY_TYPE, + HAND_KEY_PARENT, + HAND_KEY_LEN /* Must be last item in list */ +}; + +/* HAND_KEY is the type of the hash table key */ +typedef uintptr_t HAND_KEY[HAND_KEY_LEN]; + +typedef struct FREE_HANDLE_DATA_TAG +{ + PVRSRV_HANDLE_BASE *psBase; + PVRSRV_HANDLE_TYPE eHandleFreeType; + /* timing data (ns) to release bridge lock upon the deadline */ + IMG_UINT64 ui64TimeStart; + IMG_UINT64 ui64MaxBridgeTime; +} FREE_HANDLE_DATA; + +typedef struct FREE_KERNEL_HANDLE_DATA_TAG +{ + PVRSRV_HANDLE_BASE *psBase; + HANDLE_DATA *psProcessHandleData; + IMG_HANDLE hKernelHandle; +} FREE_KERNEL_HANDLE_DATA; + +/* Stores a pointer to the function table of the handle back-end in use */ +static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs; + +static POS_LOCK gKernelHandleLock; +static IMG_BOOL gbLockInitialised = IMG_FALSE; + +void LockHandle(PVRSRV_HANDLE_BASE *psBase) +{ + OSLockAcquire(psBase->hLock); +} + +void UnlockHandle(PVRSRV_HANDLE_BASE *psBase) +{ + OSLockRelease(psBase->hLock); +} + +/* + * Kernel handle base structure. This is used for handles that are not + * allocated on behalf of a particular process. + */ +PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL; + +/* Increase the reference count on the given handle. + * The handle lock must already be acquired. + * Returns: the reference count after the increment + */ +static inline IMG_UINT32 _HandleRef(HANDLE_DATA *psHandleData) +{ +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + if (!OSLockIsLocked(psHandleData->psBase->hLock)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); + OSDumpStack(); + } +#endif + psHandleData->ui32RefCount++; + return psHandleData->ui32RefCount; +} + +/* Decrease the reference count on the given handle. + * The handle lock must already be acquired. + * Returns: the reference count after the decrement + */ +static inline IMG_UINT32 _HandleUnref(HANDLE_DATA *psHandleData) +{ +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + if (!OSLockIsLocked(psHandleData->psBase->hLock)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); + OSDumpStack(); + } +#endif + PVR_ASSERT(psHandleData->ui32RefCount > 0); + psHandleData->ui32RefCount--; + + return psHandleData->ui32RefCount; +} + +#if defined(PVRSRV_NEED_PVR_DPF) +static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType) +{ + #define HANDLETYPE(x) \ + case PVRSRV_HANDLE_TYPE_##x: \ + return #x; + switch (eType) + { + #include "handle_types.h" + #undef HANDLETYPE + + default: + return "INVALID"; + } +} + +static const IMG_CHAR *HandleBaseTypeToString(PVRSRV_HANDLE_BASE_TYPE eType) +{ + #define HANDLEBASETYPE(x) \ + case PVRSRV_HANDLE_BASE_TYPE_##x: \ + return #x; + switch (eType) + { + HANDLEBASETYPE(CONNECTION); + HANDLEBASETYPE(PROCESS); + HANDLEBASETYPE(GLOBAL); + #undef HANDLEBASETYPE + + default: + return "INVALID"; + } +} +#endif + +/*! +******************************************************************************* + @Function GetHandleData + @Description Get the handle data structure for a given handle + @Input psBase - pointer to handle base structure + hHandle - handle from client + eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the handle + type is not to be checked. + @Output ppsHandleData - pointer to a pointer to the handle data struct + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(GetHandleData) +#endif +static INLINE +PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA **ppsHandleData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData; + PVRSRV_ERROR eError; + + eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase, + hHandle, + (void **)&psHandleData); + PVR_RETURN_IF_ERROR(eError); + + /* + * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function, + * check handle is of the correct type. + */ + if (unlikely(eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)) + { + PVR_DPF((PVR_DBG_ERROR, + "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)", + hHandle, + HandleTypeToString(eType), + eType, + HandleTypeToString(psHandleData->eType), + psHandleData->eType)); + return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH; + } + + /* Return the handle structure */ + *ppsHandleData = psHandleData; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function HandleListInit + @Description Initialise a linked list structure embedded in a handle + structure. + @Input hHandle - handle containing the linked list structure + psList - pointer to linked list structure + hParent - parent handle or NULL +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInit) +#endif +static INLINE +void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent) +{ + psList->hPrev = hHandle; + psList->hNext = hHandle; + psList->hParent = hParent; +} + +/*! +******************************************************************************* + @Function InitParentList + @Description Initialise the children list head in a handle structure. + The children are the subhandles of this handle. + @Input psHandleData - pointer to handle data structure +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitParentList) +#endif +static INLINE +void InitParentList(HANDLE_DATA *psHandleData) +{ + IMG_HANDLE hParent = psHandleData->hHandle; + + HandleListInit(hParent, &psHandleData->sChildren, hParent); +} + +/*! +******************************************************************************* + + @Function InitChildEntry + @Description Initialise the child list entry in a handle structure. The list + entry is used to link together subhandles of a given handle. + @Input psHandleData - pointer to handle data structure +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitChildEntry) +#endif +static INLINE +void InitChildEntry(HANDLE_DATA *psHandleData) +{ + HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL); +} + +/*! +******************************************************************************* + @Function HandleListIsEmpty + @Description Determine whether a given linked list is empty. + @Input hHandle - handle containing the list head + psList - pointer to the list head + @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIsEmpty) +#endif +static INLINE +IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */ +{ + IMG_BOOL bIsEmpty; + + bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle); + +#ifdef DEBUG + { + IMG_BOOL bIsEmpty2; + + bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle); + PVR_ASSERT(bIsEmpty == bIsEmpty2); + } +#endif + + return bIsEmpty; +} + +#ifdef DEBUG +/*! +******************************************************************************* + @Function NoChildren + @Description Determine whether a handle has any subhandles + @Input psHandleData - pointer to handle data structure + @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoChildren) +#endif +static INLINE +IMG_BOOL NoChildren(HANDLE_DATA *psHandleData) +{ + PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle); + + return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren); +} + +/*! +******************************************************************************* + @Function NoParent + @Description Determine whether a handle is a subhandle + @Input psHandleData - pointer to handle data structure + @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoParent) +#endif +static INLINE +IMG_BOOL NoParent(HANDLE_DATA *psHandleData) +{ + if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings)) + { + PVR_ASSERT(psHandleData->sSiblings.hParent == NULL); + + return IMG_TRUE; + } + + PVR_ASSERT(psHandleData->sSiblings.hParent != NULL); + return IMG_FALSE; +} +#endif /*DEBUG*/ + +/*! +******************************************************************************* + @Function ParentHandle + @Description Determine the parent of a handle + @Input psHandleData - pointer to handle data structure + @Return Parent handle, or NULL if the handle is not a subhandle. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentHandle) +#endif +static INLINE +IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData) +{ + return psHandleData->sSiblings.hParent; +} + +/* + * GetHandleListFromHandleAndOffset is used to generate either a + * pointer to the subhandle list head, or a pointer to the linked list + * structure of an item on a subhandle list. + * The list head is itself on the list, but is at a different offset + * in the handle structure to the linked list structure for items on + * the list. The two linked list structures are differentiated by + * the third parameter, containing the parent handle. The parent field + * in the list head structure references the handle structure that contains + * it. For items on the list, the parent field in the linked list structure + * references the parent handle, which will be different from the handle + * containing the linked list structure. + */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(GetHandleListFromHandleAndOffset) +#endif +static INLINE +HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hEntry, + IMG_HANDLE hParent, + size_t uiParentOffset, + size_t uiEntryOffset) +{ + HANDLE_DATA *psHandleData = NULL; + + PVR_ASSERT(psBase != NULL); + + if (GetHandleData(psBase, &psHandleData, hEntry, + PVRSRV_HANDLE_TYPE_NONE) != PVRSRV_OK) + { + return NULL; + } + + if (hEntry == hParent) + { + return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiParentOffset); + } + else + { + return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiEntryOffset); + } +} + +/*! +******************************************************************************* + @Function HandleListInsertBefore + @Description Insert a handle before a handle currently on the list. + @Input hEntry - handle to be inserted after + psEntry - pointer to handle structure to be inserted after + uiParentOffset - offset to list head struct in handle structure + hNewEntry - handle to be inserted + psNewEntry - pointer to handle structure of item to be inserted + uiEntryOffset - offset of list item struct in handle structure + hParent - parent handle of hNewEntry + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInsertBefore) +#endif +static INLINE +PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hEntry, + HANDLE_LIST *psEntry, + size_t uiParentOffset, + IMG_HANDLE hNewEntry, + HANDLE_LIST *psNewEntry, + size_t uiEntryOffset, + IMG_HANDLE hParent) +{ + HANDLE_LIST *psPrevEntry; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psNewEntry != NULL, "psNewEntry"); + + psPrevEntry = GetHandleListFromHandleAndOffset(psBase, + psEntry->hPrev, + hParent, + uiParentOffset, + uiEntryOffset); + if (psPrevEntry == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + PVR_ASSERT(psNewEntry->hParent == NULL); + PVR_ASSERT(hEntry == psPrevEntry->hNext); + +#if defined(DEBUG) + { + HANDLE_LIST *psParentList; + + psParentList = GetHandleListFromHandleAndOffset(psBase, + hParent, + hParent, + uiParentOffset, + uiParentOffset); + PVR_ASSERT(psParentList && psParentList->hParent == hParent); + } +#endif /* defined(DEBUG) */ + + psNewEntry->hPrev = psEntry->hPrev; + psEntry->hPrev = hNewEntry; + + psNewEntry->hNext = hEntry; + psPrevEntry->hNext = hNewEntry; + + psNewEntry->hParent = hParent; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function AdoptChild + @Description Assign a subhandle to a handle + @Input psParentData - pointer to handle structure of parent handle + psChildData - pointer to handle structure of child subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(AdoptChild) +#endif +static INLINE +PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA *psParentData, + HANDLE_DATA *psChildData) +{ + IMG_HANDLE hParent = psParentData->sChildren.hParent; + + PVR_ASSERT(hParent == psParentData->hHandle); + + return HandleListInsertBefore(psBase, + hParent, + &psParentData->sChildren, + offsetof(HANDLE_DATA, sChildren), + psChildData->hHandle, + &psChildData->sSiblings, + offsetof(HANDLE_DATA, sSiblings), + hParent); +} + +/*! +******************************************************************************* + @Function HandleListRemove + @Description Remove a handle from a list + @Input hEntry - handle to be removed + psEntry - pointer to handle structure of item to be removed + uiEntryOffset - offset of list item struct in handle structure + uiParentOffset - offset to list head struct in handle structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListRemove) +#endif +static INLINE +PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hEntry, + HANDLE_LIST *psEntry, + size_t uiEntryOffset, + size_t uiParentOffset) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); + + if (!HandleListIsEmpty(hEntry, psEntry)) + { + HANDLE_LIST *psPrev; + HANDLE_LIST *psNext; + + psPrev = GetHandleListFromHandleAndOffset(psBase, + psEntry->hPrev, + psEntry->hParent, + uiParentOffset, + uiEntryOffset); + if (psPrev == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + psNext = GetHandleListFromHandleAndOffset(psBase, + psEntry->hNext, + psEntry->hParent, + uiParentOffset, + uiEntryOffset); + if (psNext == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + /* + * The list head is on the list, and we don't want to + * remove it. + */ + PVR_ASSERT(psEntry->hParent != NULL); + + psPrev->hNext = psEntry->hNext; + psNext->hPrev = psEntry->hPrev; + + HandleListInit(hEntry, psEntry, NULL); + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function UnlinkFromParent + @Description Remove a subhandle from its parents list + @Input psHandleData - pointer to handle data structure of child + subhandle. + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(UnlinkFromParent) +#endif +static INLINE +PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA *psHandleData) +{ + return HandleListRemove(psBase, + psHandleData->hHandle, + &psHandleData->sSiblings, + offsetof(HANDLE_DATA, sSiblings), + offsetof(HANDLE_DATA, sChildren)); +} + +/*! +******************************************************************************* + @Function HandleListIterate + @Description Iterate over the items in a list + @Input psHead - pointer to list head + uiParentOffset - offset to list head struct in handle structure + uiEntryOffset - offset of list item struct in handle structure + pfnIterFunc - function to be called for each handle in the list + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIterate) +#endif +static INLINE +PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, + HANDLE_LIST *psHead, + size_t uiParentOffset, + size_t uiEntryOffset, + PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) +{ + IMG_HANDLE hHandle = psHead->hNext; + IMG_HANDLE hParent = psHead->hParent; + IMG_HANDLE hNext; + + PVR_ASSERT(psHead->hParent != NULL); + + /* + * Follow the next chain from the list head until we reach + * the list head again, which signifies the end of the list. + */ + while (hHandle != hParent) + { + HANDLE_LIST *psEntry; + PVRSRV_ERROR eError; + + psEntry = GetHandleListFromHandleAndOffset(psBase, + hHandle, + hParent, + uiParentOffset, + uiEntryOffset); + if (psEntry == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + PVR_ASSERT(psEntry->hParent == psHead->hParent); + + /* + * Get the next index now, in case the list item is + * modified by the iteration function. + */ + hNext = psEntry->hNext; + + eError = (*pfnIterFunc)(psBase, hHandle); + PVR_RETURN_IF_ERROR(eError); + + hHandle = hNext; + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function IterateOverChildren + @Description Iterate over the subhandles of a parent handle + @Input psParentData - pointer to parent handle structure + pfnIterFunc - function to be called for each subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(IterateOverChildren) +#endif +static INLINE +PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA *psParentData, + PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) +{ + return HandleListIterate(psBase, + &psParentData->sChildren, + offsetof(HANDLE_DATA, sChildren), + offsetof(HANDLE_DATA, sSiblings), + pfnIterFunc); +} + +/*! +******************************************************************************* + @Function ParentIfPrivate + @Description Return the parent handle if the handle was allocated with + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return NULL. + @Input psHandleData - pointer to handle data structure + @Return Parent handle or NULL +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentIfPrivate) +#endif +static INLINE +IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData) +{ + return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? + ParentHandle(psHandleData) : NULL; +} + +/*! +******************************************************************************* + @Function InitKey + @Description Initialise a hash table key for the current process + @Input aKey - pointer to key + psBase - pointer to handle base structure + pvData - pointer to the resource the handle represents + eType - type of resource +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitKey) +#endif +static INLINE +void InitKey(HAND_KEY aKey, + PVRSRV_HANDLE_BASE *psBase, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + IMG_HANDLE hParent) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + aKey[HAND_KEY_DATA] = (uintptr_t)pvData; + aKey[HAND_KEY_TYPE] = (uintptr_t)eType; + aKey[HAND_KEY_PARENT] = (uintptr_t)hParent; +} + +static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle); + +/*! +******************************************************************************* + @Function FreeHandle + @Description Free a handle data structure. + @Input psBase - Pointer to handle base structure + hHandle - Handle to be freed + eType - Type of the handle to be freed + ppvData - Location for data associated with the freed handle + @Output ppvData - Points to the data associated with the freed handle + @Return PVRSRV_OK or PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + void **ppvData) +{ + HANDLE_DATA *psHandleData = NULL; + HANDLE_DATA *psReleasedHandleData; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (_HandleUnref(psHandleData) > 0) + { + /* this handle still has references so do not destroy it + * or the underlying object yet + */ + return PVRSRV_OK; + } + + /* Call the release data callback for each reference on the handle */ + if (psHandleData->pfnReleaseData != NULL) + { + eError = psHandleData->pfnReleaseData(psHandleData->pvData); + if (eError == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: " + "Got retry while calling release data callback for %p (type = %d)", + __func__, + hHandle, + (IMG_UINT32)psHandleData->eType)); + + /* the caller should retry, so retain a reference on the handle */ + _HandleRef(psHandleData); + + return eError; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + } + + if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + IMG_HANDLE hRemovedHandle; + + InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ParentIfPrivate(psHandleData)); + + hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psBase->psHashTab, aKey); + + PVR_ASSERT(hRemovedHandle != NULL); + PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); + PVR_UNREFERENCED_PARAMETER(hRemovedHandle); + } + + eError = UnlinkFromParent(psBase, psHandleData); + PVR_LOG_RETURN_IF_ERROR(eError, "UnlinkFromParent"); + + /* Free children */ + eError = IterateOverChildren(psBase, psHandleData, FreeHandleWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren"); + + eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, + psHandleData->hHandle, + (void **)&psReleasedHandleData); + if (unlikely(eError == PVRSRV_OK)) + { + PVR_ASSERT(psReleasedHandleData == psHandleData); + } + + if (ppvData) + { + *ppvData = psHandleData->pvData; + } + + OSFreeMem(psHandleData); + + return eError; +} + +static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle) +{ + return FreeHandle(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE, NULL); +} + +/*! +******************************************************************************* + @Function FindHandle + @Description Find handle corresponding to a resource pointer + @Input psBase - pointer to handle base structure + pvData - pointer to resource to be associated with the handle + eType - the type of resource + @Return The handle, or NULL if not found +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(FindHandle) +#endif +static INLINE +IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + IMG_HANDLE hParent) +{ + HAND_KEY aKey; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + InitKey(aKey, psBase, pvData, eType, hParent); + + return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey); +} + +/*! +******************************************************************************* + @Function AllocHandle + @Description Allocate a new handle + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + hParent - parent handle or NULL + pfnReleaseData - Function to release resource at handle release + time + @Output phHandle - points to new handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + IMG_HANDLE hParent, + PFN_HANDLE_RELEASE pfnReleaseData) +{ + HANDLE_DATA *psNewHandleData; + IMG_HANDLE hHandle; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL); + PVR_ASSERT(gpsHandleFuncs); + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* Handle must not already exist */ + PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL); + } + + psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData)); + PVR_LOG_RETURN_IF_NOMEM(psNewHandleData, "OSAllocZMem"); + + eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, + psNewHandleData); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnAcquireHandle", + ErrorFreeHandleData); + + /* + * If a data pointer can be associated with multiple handles, we + * don't put the handle in the hash table, as the data pointer + * may not map to a unique handle + */ + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + + /* Initialise hash key */ + InitKey(aKey, psBase, pvData, eType, hParent); + + /* Put the new handle in the hash table */ + eError = HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle) ? + PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "couldn't add handle to hash table", + ErrorReleaseHandle); + } + + psNewHandleData->hHandle = hHandle; + psNewHandleData->eType = eType; + psNewHandleData->eFlag = eFlag; + psNewHandleData->pvData = pvData; + psNewHandleData->pfnReleaseData = pfnReleaseData; + psNewHandleData->ui32RefCount = 1; + + InitParentList(psNewHandleData); +#if defined(DEBUG) + PVR_ASSERT(NoChildren(psNewHandleData)); +#endif + + InitChildEntry(psNewHandleData); +#if defined(DEBUG) + PVR_ASSERT(NoParent(psNewHandleData)); +#endif + +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + psNewHandleData->psBase = psBase; +#endif + + /* Return the new handle to the client */ + *phHandle = psNewHandleData->hHandle; + + return PVRSRV_OK; + +ErrorReleaseHandle: + (void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL); + +ErrorFreeHandleData: + OSFreeMem(psNewHandleData); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVAllocHandle + @Description Allocate a handle + @Input psBase - pointer to handle base structure + pvData - pointer to resource to be associated with the handle + eType - the type of resource + pfnReleaseData - Function to release resource at handle release + time + @Output phHandle - points to new handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + PFN_HANDLE_RELEASE pfnReleaseData) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVAllocHandleUnlocked + @Description Allocate a handle without acquiring/releasing the handle lock. + The function assumes you hold the lock when called. + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + pfnReleaseData - Function to release resource at handle release + time + @Output phHandle - points to new handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + PFN_HANDLE_RELEASE pfnReleaseData) +{ + *phHandle = NULL; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pfnReleaseData != NULL, "pfnReleaseData"); + + return AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData); +} + +/*! +******************************************************************************* + @Function PVRSRVAllocSubHandle + @Description Allocate a subhandle + @Input pvData - pointer to resource to be associated with the subhandle + eType - the type of resource + hParent - parent handle + @Output phHandle - points to new subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + IMG_HANDLE hParent) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVAllocSubHandleUnlocked + @Description Allocate a subhandle without acquiring/releasing the handle + lock. The function assumes you hold the lock when called. + @Input pvData - pointer to resource to be associated with the subhandle + eType - the type of resource + hParent - parent handle + @Output phHandle - points to new subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + IMG_HANDLE hParent) +{ + HANDLE_DATA *psPHandleData = NULL; + HANDLE_DATA *psCHandleData = NULL; + IMG_HANDLE hParentKey; + IMG_HANDLE hHandle; + PVRSRV_ERROR eError; + + *phHandle = NULL; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_GOTO_IF_INVALID_PARAM(psBase, eError, Exit); + + hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL; + + /* Lookup the parent handle */ + eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", + Exit); + + eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL); + PVR_GOTO_IF_ERROR(eError, Exit); + + eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); + /* If we were able to allocate the handle then there should be no reason why we + * can't also get it's handle structure. Otherwise something has gone badly wrong. + */ + PVR_ASSERT(eError == PVRSRV_OK); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "Failed to get parent handle structure", + ExitFreeHandle); + + /* + * Get the parent handle structure again, in case the handle + * structure has moved (depending on the implementation + * of AllocHandle). + */ + eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", + ExitFreeHandle); + + eError = AdoptChild(psBase, psPHandleData, psCHandleData); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "parent handle failed to adopt subhandle", + ExitFreeHandle); + + *phHandle = hHandle; + + return PVRSRV_OK; + +ExitFreeHandle: + (void) FreeHandle(psBase, hHandle, eType, NULL); +Exit: + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVFindHandle + @Description Find handle corresponding to a resource pointer + @Input pvData - pointer to resource to be associated with the handle + eType - the type of resource + @Output phHandle - points to returned handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVFindHandleUnlocked + @Description Find handle corresponding to a resource pointer without + acquiring/releasing the handle lock. The function assumes you + hold the lock when called. + @Input pvData - pointer to resource to be associated with the handle + eType - the type of resource + @Output phHandle - points to the returned handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType) +{ + IMG_HANDLE hHandle; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + /* See if there is a handle for this data pointer */ + hHandle = FindHandle(psBase, pvData, eType, NULL); + if (hHandle == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error finding handle. Type %u", + __func__, + eType)); + + return PVRSRV_ERROR_HANDLE_NOT_FOUND; + } + + *phHandle = hHandle; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function PVRSRVLookupHandle + @Description Lookup the data pointer corresponding to a handle + @Input hHandle - handle from client + eType - handle type + bRef - If TRUE, a reference will be added on the handle if the + lookup is successful. + @Output ppvData - points to the return data pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, + void **ppvData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + IMG_BOOL bRef) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVLookupHandleUnlocked + @Description Lookup the data pointer corresponding to a handle without + acquiring/releasing the handle lock. The function assumes you + hold the lock when called. + @Input hHandle - handle from client + eType - handle type + bRef - If TRUE, a reference will be added on the handle if the + lookup is successful. + @Output ppvData - points to the returned data pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + void **ppvData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + IMG_BOOL bRef) +{ + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error looking up handle (%s) for base %p of type %s. Handle %p, type %s", + __func__, + PVRSRVGetErrorString(eError), + psBase, + HandleBaseTypeToString(psBase->eType), + (void*) hHandle, + HandleTypeToString(eType))); +#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF) + OSDumpStack(); +#endif + return eError; + } + + if (psHandleData->ui32RefCount == 0) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + if (bRef) + { + _HandleRef(psHandleData); + } + + *ppvData = psHandleData->pvData; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function PVRSRVLookupSubHandle + @Description Lookup the data pointer corresponding to a subhandle + @Input hHandle - handle from client + eType - handle type + hAncestor - ancestor handle + @Output ppvData - points to the returned data pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, + void **ppvData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + IMG_HANDLE hAncestor) +{ + HANDLE_DATA *psPHandleData = NULL; + HANDLE_DATA *psCHandleData = NULL; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + LockHandle(psBase); + + eError = GetHandleData(psBase, &psCHandleData, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error looking up subhandle (%s). Handle %p, type %u", + __func__, + PVRSRVGetErrorString(eError), + (void*) hHandle, + eType)); + OSDumpStack(); + goto ExitUnlock; + } + + /* Look for hAncestor among the handle's ancestors */ + for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; ) + { + eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Subhandle doesn't belong to given ancestor", + __func__)); + eError = PVRSRV_ERROR_INVALID_SUBHANDLE; + goto ExitUnlock; + } + } + + *ppvData = psCHandleData->pvData; + + eError = PVRSRV_OK; + +ExitUnlock: + UnlockHandle(psBase); + + return eError; +} + + +/*! +******************************************************************************* + @Function PVRSRVReleaseHandle + @Description Release a handle that is no longer needed + @Input hHandle - handle from client + eType - handle type + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType); + UnlockHandle(psBase); + + return eError; +} + + +/*! +******************************************************************************* + @Function PVRSRVReleaseHandleUnlocked + @Description Release a handle that is no longer needed without + acquiring/releasing the handle lock. The function assumes you + hold the lock when called. + @Input hHandle - handle from client + eType - handle type + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + return FreeHandle(psBase, hHandle, eType, NULL); +} + +/*! +******************************************************************************* + @Function PVRSRVPurgeHandles + @Description Purge handles for a given handle base + @Input psBase - pointer to handle base structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + LockHandle(psBase); + eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase); + UnlockHandle(psBase); + + return eError; +} + +static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType); + +static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFreeWrapper(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle) +{ + return HandleUnrefAndMaybeMarkForFree(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE); +} + +static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_RETURN_IF_ERROR(eError); + + if (psHandleData->ui32RefCount == 0) + { + /* the handle is already in the destruction phase + * i.e. its refcount has already reached 0 + */ + return PVRSRV_OK; + } + + if (_HandleUnref(psHandleData) > 0) + { + /* this handle still has references so do not destroy it + * or the underlying object yet + */ + return PVRSRV_ERROR_OBJECT_STILL_REFERENCED; + } + + /* Prepare children for destruction */ + eError = IterateOverChildren(psBase, psHandleData, + HandleUnrefAndMaybeMarkForFreeWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleUnrefAndMaybeMarkForFree"); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType); + +static PVRSRV_ERROR HandleFreePrivDataWrapper(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle) +{ + return HandleFreePrivData(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE); +} + +static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_RETURN_IF_ERROR(eError); + + /* Call the release data callback for each reference on the handle */ + if (psHandleData->pfnReleaseData != NULL) + { + eError = psHandleData->pfnReleaseData(psHandleData->pvData); + if (eError == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "FreeHandle: " + "Got retry while calling release data callback for %p (type = %d)", + hHandle, + (IMG_UINT32)psHandleData->eType)); + + return eError; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + + /* we don't need this so make sure it's not called on + * the pvData for the second time + */ + psHandleData->pfnReleaseData = NULL; + } + + /* Free children's data */ + eError = IterateOverChildren(psBase, psHandleData, + HandleFreePrivDataWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreePrivData"); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType); + +static PVRSRV_ERROR HandleFreeDestroyWrapper(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle) +{ + return HandleFreeDestroy(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE); +} + +static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData = NULL; + HANDLE_DATA *psReleasedHandleData; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_RETURN_IF_ERROR(eError); + + eError = UnlinkFromParent(psBase, psHandleData); + PVR_LOG_RETURN_IF_ERROR(eError, "UnlinkFromParent"); + + if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + IMG_HANDLE hRemovedHandle; + + InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, + ParentIfPrivate(psHandleData)); + + hRemovedHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, + aKey); + + PVR_ASSERT(hRemovedHandle != NULL); + PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); + PVR_UNREFERENCED_PARAMETER(hRemovedHandle); + } + + /* Free children */ + eError = IterateOverChildren(psBase, psHandleData, HandleFreeDestroyWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreeDestroy"); + + eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, + psHandleData->hHandle, + (void **)&psReleasedHandleData); + OSFreeMem(psHandleData); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnReleaseHandle"); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVReleaseHandleStagedUnlock(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_FALSE(psBase != NULL, "psBase invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + eError = HandleUnrefAndMaybeMarkForFree(psBase, hHandle, eType); + if (eError == PVRSRV_ERROR_OBJECT_STILL_REFERENCED) + { + return PVRSRV_OK; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + + UnlockHandle(psBase); + + eError = HandleFreePrivData(psBase, hHandle, eType); + if (eError != PVRSRV_OK) + { + LockHandle(psBase); + return eError; + } + + LockHandle(psBase); + + return HandleFreeDestroy(psBase, hHandle, eType); +} + +/*! +******************************************************************************* + @Function PVRSRVAllocHandleBase + @Description Allocate a handle base structure for a process + @Input eType - handle type + @Output ppsBase - points to handle base structure pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, + PVRSRV_HANDLE_BASE_TYPE eType) +{ + PVRSRV_HANDLE_BASE *psBase; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_FALSE(gpsHandleFuncs != NULL, "handle management not initialised", + PVRSRV_ERROR_NOT_READY); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsBase != NULL, "ppsBase"); + + psBase = OSAllocZMem(sizeof(*psBase)); + PVR_LOG_RETURN_IF_NOMEM(psBase, "psBase"); + + eError = OSLockCreate(&psBase->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeHandleBase); + + psBase->eType = eType; + + LockHandle(psBase); + + eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase); + PVR_GOTO_IF_ERROR(eError, ErrorUnlock); + + psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, + sizeof(HAND_KEY), + HASH_Func_Default, + HASH_Key_Comp_Default); + PVR_LOG_GOTO_IF_FALSE(psBase->psHashTab != NULL, "couldn't create data pointer" + " hash table", ErrorDestroyHandleBase); + + *ppsBase = psBase; + + UnlockHandle(psBase); + + return PVRSRV_OK; + +ErrorDestroyHandleBase: + (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); + +ErrorUnlock: + UnlockHandle(psBase); + OSLockDestroy(psBase->hLock); + +ErrorFreeHandleBase: + OSFreeMem(psBase); + + return eError; +} + +#if defined(DEBUG) +typedef struct _COUNT_HANDLE_DATA_ +{ + PVRSRV_HANDLE_BASE *psBase; + IMG_UINT32 uiHandleDataCount; +} COUNT_HANDLE_DATA; + +/* Used to count the number of handles that have data associated with them */ +static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) +{ + COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData; + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); + + eError = GetHandleData(psData->psBase, + &psHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psHandleData != NULL) + { + psData->uiHandleDataCount++; + } + + return PVRSRV_OK; +} + +/* Print a handle in the handle base. Used with the iterator callback. */ +static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData) +{ + PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData; + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + eError = GetHandleData(psBase, + &psHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psHandleData != NULL) + { + PVR_DPF((PVR_DBG_WARNING, " Handle: %6u, Refs: %3u, Type: %s (%u), pvData<%p>", + (IMG_UINT32) (uintptr_t) psHandleData->hHandle, + psHandleData->ui32RefCount, + HandleTypeToString(psHandleData->eType), + psHandleData->eType, + psHandleData->pvData)); + } + + return PVRSRV_OK; +} + +#endif /* defined(DEBUG) */ + +static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime) +{ + /* unsigned arithmetic is well defined so this will wrap around correctly */ + return (OSClockns64() - ui64TimeStart) >= ui64MaxBridgeTime; +} + +static PVRSRV_ERROR FreeKernelHandlesWrapperIterKernel(IMG_HANDLE hHandle, void *pvData) +{ + FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; + HANDLE_DATA *psKernelHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + /* Get kernel handle data. */ + eError = GetHandleData(KERNEL_HANDLE_BASE, + &psKernelHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psKernelHandleData->pvData == psData->psProcessHandleData->pvData) + { + /* This kernel handle belongs to our process handle. */ + psData->hKernelHandle = hHandle; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR FreeKernelHandlesWrapperIterProcess(IMG_HANDLE hHandle, void *pvData) +{ + FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + /* Get process handle data. */ + eError = GetHandleData(psData->psBase, + &psData->psProcessHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psData->psProcessHandleData->eFlag == PVRSRV_HANDLE_ALLOC_FLAG_MULTI +#if defined(SUPPORT_INSECURE_EXPORT) + || psData->psProcessHandleData->eType == PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT +#endif + ) + { + /* Only multi alloc process handles might be in kernel handle base. */ + psData->hKernelHandle = NULL; + /* Iterate over kernel handles. */ + eError = gpsHandleFuncs->pfnIterateOverHandles(KERNEL_HANDLE_BASE->psImplBase, + &FreeKernelHandlesWrapperIterKernel, + (void *)psData); + PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "failed to iterate over kernel handles", + eError); + + if (psData->hKernelHandle) + { + /* Release kernel handle which belongs to our process handle. */ + eError = gpsHandleFuncs->pfnReleaseHandle(KERNEL_HANDLE_BASE->psImplBase, + psData->hKernelHandle, + NULL); + PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "couldn't release kernel handle", + eError); + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) +{ + FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData; + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psData->eHandleFreeType != PVRSRV_HANDLE_TYPE_NONE, + "psData->eHandleFreeType"); + + eError = GetHandleData(psData->psBase, + &psHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType) + { + return PVRSRV_OK; + } + + PVR_ASSERT(psHandleData->ui32RefCount > 0); + + while (psHandleData->ui32RefCount != 0) + { + if (psHandleData->pfnReleaseData != NULL) + { + eError = psHandleData->pfnReleaseData(psHandleData->pvData); + if (eError == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: " + "Got retry while calling release data callback for %p (type = %d)", + __func__, + hHandle, + (IMG_UINT32)psHandleData->eType)); + + return eError; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + } + + _HandleUnref(psHandleData); + } + + if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + IMG_HANDLE hRemovedHandle; + + InitKey(aKey, + psData->psBase, + psHandleData->pvData, + psHandleData->eType, + ParentIfPrivate(psHandleData)); + + hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey); + + PVR_ASSERT(hRemovedHandle != NULL); + PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); + PVR_UNREFERENCED_PARAMETER(hRemovedHandle); + } + + eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL); + PVR_RETURN_IF_ERROR(eError); + + OSFreeMem(psHandleData); + + /* If we reach the end of the time slice release we can release the global + * lock, invoke the scheduler and reacquire the lock */ + if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime)) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")", + __func__, + psData->ui64MaxBridgeTime)); + /* UnlockHandle(psData->psBase); - func only run in single thread ctx */ + /* Invoke the scheduler to check if other processes are waiting for the lock */ + OSReleaseThreadQuanta(); + /* LockHandle(psData->psBase); - func only run in single thread ctx */ + /* Set again lock timeout and reset the counter */ + psData->ui64TimeStart = OSClockns64(); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Lock acquired again", __func__)); + } + + return PVRSRV_OK; +} + +/* The Ordered Array of PVRSRV_HANDLE_TYPE Enum Entries. + * + * Some handles must be destroyed prior to other handles, + * such relationships are established with respect to handle types. + * Therefore elements of this array have to maintain specific order, + * e.g. the PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET must be placed + * before PVRSRV_HANDLE_TYPE_RGX_FREELIST. + * + * If ordering is incorrect driver may fail on the ground of cleanup + * routines. Unfortunately, we can mainly rely on the actual definition of + * the array, there is no explicit information about all relationships + * between handle types. These relationships do not necessarily come from + * bridge-specified handle attributes such as 'sub handle' and 'parent + * handle'. They may come from internal/private ref-counters contained by + * objects referenced by our kernel handles. + * + * For example, at the bridge level, PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET + * and PVRSRV_HANDLE_TYPE_RGX_FREELIST have no explicit relationship, meaning + * none of them is a sub-handle for the other. + * However the freelist contains internal ref-count that is decremented by + * the destroy routine for KM_HW_RT_DATASET. + * + * BE CAREFUL when adding/deleting/moving handle types. + */ +static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] = +{ + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK, + PVRSRV_HANDLE_TYPE_RGX_POPULATION, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, + PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, + PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE, + PVRSRV_HANDLE_TYPE_DC_BUFFER, + PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT, + PVRSRV_HANDLE_TYPE_DC_DEVICE, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, + PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP +}; + +/*! +******************************************************************************* + @Function PVRSRVFreeKernelHandles + @Description Free kernel handles which belongs to process handles + @Input psBase - pointer to handle base structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase) +{ + FREE_KERNEL_HANDLE_DATA sHandleData = {NULL}; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + LockHandle(psBase); + + sHandleData.psBase = psBase; + /* Iterate over process handles. */ + eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &FreeKernelHandlesWrapperIterProcess, + (void *)&sHandleData); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); + + eError = PVRSRV_OK; + +ExitUnlock: + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVRetrieveProcessHandleBase + @Description Returns a pointer to the process handle base for the current + process. If the current process is the cleanup thread, then the + process handle base for the process currently being cleaned up + is returned + @Return Pointer to the process handle base, or NULL if not found. +******************************************************************************/ +PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void) +{ + PVRSRV_HANDLE_BASE *psHandleBase = NULL; + PROCESS_HANDLE_BASE *psProcHandleBase = NULL; + PVRSRV_DATA *psPvrData = PVRSRVGetPVRSRVData(); + IMG_PID ui32PurgePid = PVRSRVGetPurgeConnectionPid(); + + OSLockAcquire(psPvrData->hProcessHandleBase_Lock); + + /* Check to see if we're being called from the cleanup thread... */ + if ((OSGetCurrentClientProcessIDKM() == psPvrData->cleanupThreadPid) && + (ui32PurgePid > 0)) + { + /* Check to see if the cleanup thread has already removed the + * process handle base from the HASH table. + */ + psHandleBase = psPvrData->psProcessHandleBaseBeingFreed; + /* psHandleBase shouldn't be null, as cleanup thread + * should be removing this from the HASH table before + * we get here, so assert if not. + */ + PVR_ASSERT(psHandleBase); + } + else + { + /* Not being called from the cleanup thread, so return the process + * handle base for the current process. + */ + psProcHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(psPvrData->psProcessHandleBase_Table, + OSGetCurrentClientProcessIDKM()); + } + OSLockRelease(psPvrData->hProcessHandleBase_Lock); + + if (psHandleBase == NULL && psProcHandleBase != NULL) + { + psHandleBase = psProcHandleBase->psHandleBase; + } + return psHandleBase; +} + +/*! +******************************************************************************* + @Function PVRSRVFreeHandleBase + @Description Free a handle base structure + @Input psBase - pointer to handle base structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime) +{ +#if defined(DEBUG) + COUNT_HANDLE_DATA sCountData = {NULL}; +#endif + FREE_HANDLE_DATA sHandleData = {NULL}; + IMG_UINT32 i; + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_PID uiCleanupPid = psPVRSRVData->cleanupThreadPid; + + PVR_ASSERT(gpsHandleFuncs); + + /* LockHandle(psBase); - func only run in single thread ctx */ + + /* If this is a process handle base being freed by the cleanup + * thread, store this in psPVRSRVData->psProcessHandleBaseBeingFreed + */ + if ((OSGetCurrentClientProcessIDKM() == uiCleanupPid) && + (psBase->eType == PVRSRV_HANDLE_BASE_TYPE_PROCESS)) + { + psPVRSRVData->psProcessHandleBaseBeingFreed = psBase; + } + + sHandleData.psBase = psBase; + sHandleData.ui64TimeStart = OSClockns64(); + sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime; + + +#if defined(DEBUG) + + sCountData.psBase = psBase; + + eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &CountHandleDataWrapper, + (void *)&sCountData); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); + + if (sCountData.uiHandleDataCount != 0) + { + IMG_BOOL bList = sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM; + + PVR_DPF((PVR_DBG_WARNING, + "%s: %u remaining handles in handle base 0x%p " + "(PVRSRV_HANDLE_BASE_TYPE %u).%s", + __func__, + sCountData.uiHandleDataCount, + psBase, + psBase->eType, + bList ? "": " Skipping details, too many items...")); + + if (bList) + { + PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------")); + (void) gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &ListHandlesInBase, + psBase); + PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing --------")); + } + } + +#endif /* defined(DEBUG) */ + + /* + * As we're freeing handles based on type, make sure all + * handles have actually had their data freed to avoid + * resources being leaked + */ + for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++) + { + sHandleData.eHandleFreeType = g_aeOrderedFreeList[i]; + + /* Make sure all handles have been freed before destroying the handle base */ + eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &FreeHandleDataWrapper, + (void *)&sHandleData); + PVR_GOTO_IF_ERROR(eError, ExitUnlock); + } + + + if (psBase->psHashTab != NULL) + { + HASH_Delete(psBase->psHashTab); + } + + eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); + PVR_GOTO_IF_ERROR(eError, ExitUnlock); + + /* UnlockHandle(psBase); - func only run in single thread ctx */ + OSLockDestroy(psBase->hLock); + OSFreeMem(psBase); + + return eError; + +ExitUnlock: + if (OSGetCurrentClientProcessIDKM() == uiCleanupPid) + { + psPVRSRVData->psProcessHandleBaseBeingFreed = NULL; + } + /* UnlockHandle(psBase); - func only run in single thread ctx */ + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVHandleInit + @Description Initialise handle management + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleInit(void) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsKernelHandleBase == NULL); + PVR_ASSERT(gpsHandleFuncs == NULL); + PVR_ASSERT(!gbLockInitialised); + + eError = OSLockCreate(&gKernelHandleLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + gbLockInitialised = IMG_TRUE; + + eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVHandleGetFuncTable", ErrorHandleDeinit); + + eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase, + PVRSRV_HANDLE_BASE_TYPE_GLOBAL); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorHandleDeinit); + + eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnEnableHandlePurging", + ErrorHandleDeinit); + + return PVRSRV_OK; + +ErrorHandleDeinit: + (void) PVRSRVHandleDeInit(); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVHandleDeInit + @Description De-initialise handle management + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleDeInit(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (gpsHandleFuncs != NULL) + { + if (gpsKernelHandleBase != NULL) + { + eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */); + if (eError == PVRSRV_OK) + { + gpsKernelHandleBase = NULL; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVHandleDeInit: FreeHandleBase failed (%s)", + PVRSRVGetErrorString(eError))); + } + } + + if (eError == PVRSRV_OK) + { + gpsHandleFuncs = NULL; + } + } + else + { + /* If we don't have a handle function table we shouldn't have a handle base either */ + PVR_ASSERT(gpsKernelHandleBase == NULL); + } + + if (gbLockInitialised) + { + OSLockDestroy(gKernelHandleLock); + gbLockInitialised = IMG_FALSE; + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/htb_debug.c b/drivers/mcst/gpu-imgtec/services/server/common/htb_debug.c new file mode 100644 index 000000000000..fe3f006d7265 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/htb_debug.c @@ -0,0 +1,1239 @@ +/*************************************************************************/ /*! +@File htb_debug.c +@Title Debug Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides kernel side debugFS Functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "rgxdevice.h" +#include "htbserver.h" +#include "htbuffer.h" +#include "htbuffer_types.h" +#include "tlstream.h" +#include "tlclient.h" +#include "pvrsrv_tlcommon.h" +#include "di_server.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "htb_debug.h" + +// Global data handles for buffer manipulation and processing +typedef struct +{ + DI_ENTRY *psDumpHostDiEntry; /* debug info entry */ + IMG_HANDLE hStream; /* Stream handle for debugFS use */ +} HTB_DBG_INFO; + +static HTB_DBG_INFO g_sHTBData; + +// Enable for extra debug level +//#define HTB_CHATTY 1 + +typedef void (DI_PRINTF)(const OSDI_IMPL_ENTRY *, const IMG_CHAR *, ...); + +/****************************************************************************** + * debugFS display routines + *****************************************************************************/ +static int HTBDumpBuffer(DI_PRINTF, OSDI_IMPL_ENTRY *, void *); + +static int _DebugHBTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + int retVal; + + PVR_ASSERT(psEntry != NULL); + + /* psEntry should never be NULL */ + if (psEntry == NULL) + { + return -1; + } + + /* + * Ensure that we have a valid address to use to dump info from. If NULL we + * return a failure code to terminate the DI read call. pvData is either + * DI_START_TOKEN (for the initial call) or an HTB buffer address for + * subsequent calls [returned from the NEXT function]. + */ + if (pvData == NULL) + { + return -1; + } + + retVal = HTBDumpBuffer(DIPrintf, psEntry, pvData); + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Returning %d", __func__, retVal)); +#endif /* HTB_CHATTY */ + + return retVal; +} + +typedef struct { + IMG_PBYTE pBuf; /* Raw data buffer from TL stream */ + IMG_UINT32 uiBufLen; /* Amount of data to process from 'pBuf' */ + IMG_UINT32 uiTotal; /* Total bytes processed */ + IMG_UINT32 uiMsgLen; /* Length of HTB message to be processed */ + IMG_PBYTE pCurr; /* pointer to current message to be decoded */ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; /* Output string */ +} HTB_Sentinel_t; + +static IMG_UINT32 idToLogIdx(IMG_UINT32); /* Forward declaration */ + +/* + * HTB_GetNextMessage + * + * Get next non-empty message block from the buffer held in pSentinel->pBuf + * If we exhaust the data buffer we refill it (after releasing the previous + * message(s) [only one non-NULL message, but PAD messages will get released + * as we traverse them]. + * + * Input: + * pSentinel references the already acquired data buffer + * + * Output: + * pSentinel + * -> uiMsglen updated to the size of the non-NULL message + * + * Returns: + * Address of first non-NULL message in the buffer (if any) + * NULL if there is no further data available from the stream and the buffer + * contents have been drained. + */ +static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *); +static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel) +{ + void *pNext, *pLast, *pStart, *pData = NULL; + void *pCurrent; /* Current processing point within buffer */ + PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ + IMG_UINT32 uiHdrType; /* Packet header type */ + IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ + IMG_UINT32 ui32DataSize; + IMG_UINT32 uiBufLen; + IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; + IMG_UINT32 ui32Data; + IMG_UINT32 ui32LogIdx; + PVRSRV_ERROR eError; + + PVR_ASSERT(NULL != pSentinel); + + uiBufLen = pSentinel->uiBufLen; + /* Convert from byte to uint32 size */ + ui32DataSize = pSentinel->uiBufLen / sizeof(IMG_UINT32); + + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + + pStart = pSentinel->pBuf; + + pNext = pStart; + pSentinel->uiMsgLen = 0; // Reset count for this message + uiMsgSize = 0; // nothing processed so far + ui32LogIdx = HTB_SF_LAST; // Loop terminator condition + + do + { + /* + * If we've drained the buffer we must RELEASE and ACQUIRE some more. + */ + if (pNext >= pLast) + { + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", __func__, + "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); + return NULL; + } + + // Reset our limits - if we've returned an empty buffer we're done. + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + pStart = pSentinel->pBuf; + pNext = pStart; + + if (pStart == NULL || pLast == NULL) + { + return NULL; + } + } + + /* + * We should have a header followed by data block(s) in the stream. + */ + + pCurrent = pNext; + ppHdr = GET_PACKET_HDR(pCurrent); + + if (ppHdr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected NULL packet in Host Trace buffer", + __func__)); + pSentinel->uiMsgLen += uiMsgSize; + return NULL; // This should never happen + } + + /* + * This should *NEVER* fire. If it does it means we have got some + * dubious packet header back from the HTB stream. In this case + * the sensible thing is to abort processing and return to + * the caller + */ + uiHdrType = GET_PACKET_TYPE(ppHdr); + + PVR_ASSERT(uiHdrType < PVRSRVTL_PACKETTYPE_LAST && + uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF); + + if (uiHdrType < PVRSRVTL_PACKETTYPE_LAST && + uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF) + { + /* + * We have a (potentially) valid data header. We should see if + * the associated packet header matches one of our expected + * types. + */ + pNext = GET_NEXT_PACKET_ADDR(ppHdr); + + PVR_ASSERT(pNext != NULL); + + uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); + + pSentinel->uiMsgLen += uiMsgSize; + + pData = GET_PACKET_DATA_PTR(ppHdr); + + /* + * Handle non-DATA packet types. These include PAD fields which + * may have data associated and other types. We simply discard + * these as they have no decodable information within them. + */ + if (uiHdrType != PVRSRVTL_PACKETTYPE_DATA) + { + /* + * Now release the current non-data packet and proceed to the + * next entry (if any). + */ + eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, uiMsgSize); + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Packet Type %x Length %u", + __func__, uiHdrType, uiMsgSize)); +#endif + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - '%s' message" + " size %u", __func__, "TLClientReleaseDataLess", + PVRSRVGETERRORSTRING(eError), uiMsgSize)); + } + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s Giving up", + __func__, "TLClientAcquireData", + PVRSRVGETERRORSTRING(eError))); + + return NULL; + } + pSentinel->uiMsgLen = 0; + // Reset our limits - if we've returned an empty buffer we're done. + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + pStart = pSentinel->pBuf; + pNext = pStart; + + if (pStart == NULL || pLast == NULL) + { + return NULL; + } + continue; + } + if (pData == NULL || pData >= pLast) + { + continue; + } + ui32Data = *(IMG_UINT32 *)pData; + ui32LogIdx = idToLogIdx(ui32Data); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "Unexpected Header @%p value %x", + ppHdr, uiHdrType)); + + return NULL; + } + + /* + * Check if the unrecognized ID is valid and therefore, tracebuf + * needs updating. + */ + if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data) + && IMG_FALSE == bUnrecognizedErrorPrinted) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", + __func__, ui32Data, HTB_SF_GID(ui32Data), + HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); + bUnrecognizedErrorPrinted = IMG_FALSE; + } + + } while (HTB_SF_LAST == ui32LogIdx); + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Returning data @ %p Log value '%x'", + __func__, pCurrent, ui32Data)); +#endif /* HTB_CHATTY */ + + return pCurrent; +} + +/* + * HTB_GetFirstMessage + * + * Called from START to obtain the buffer address of the first message within + * pSentinel->pBuf. Will ACQUIRE data if the buffer is empty. + * + * Input: + * pSentinel + * pui64Pos Offset within the debugFS file + * + * Output: + * pSentinel->pCurr Set to reference the first valid non-NULL message within + * the buffer. If no valid message is found set to NULL. + * pSentinel + * ->pBuf if unset on entry + * ->uiBufLen if pBuf unset on entry + * + * Side-effects: + * HTB TL stream will be updated to bypass any zero-length PAD messages before + * the first non-NULL message (if any). + */ +static void HTB_GetFirstMessage(HTB_Sentinel_t *, IMG_UINT64 *); +static void HTB_GetFirstMessage(HTB_Sentinel_t *pSentinel, IMG_UINT64 *pui64Pos) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pui64Pos); + + if (pSentinel == NULL) + return; + + if (pSentinel->pBuf == NULL) + { + /* Acquire data */ + pSentinel->uiMsgLen = 0; + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", + __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); + + pSentinel->pBuf = NULL; + pSentinel->pCurr = NULL; + } + else + { + /* + * If there is no data available we set pSentinel->pCurr to NULL + * and return. This is expected behaviour if we've drained the + * data and nothing else has yet been produced. + */ + if (pSentinel->uiBufLen == 0 || pSentinel->pBuf == NULL) + { +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Empty Buffer @ %p", __func__, + pSentinel->pBuf)); +#endif /* HTB_CHATTY */ + pSentinel->pCurr = NULL; + return; + } + } + } + + /* Locate next message within buffer. NULL => no more data to process */ + pSentinel->pCurr = HTB_GetNextMessage(pSentinel); +} + +/* + * _DebugHBTraceDIStart: + * + * Returns the address to use for subsequent 'Show', 'Next', 'Stop' file ops. + * Return DI_START_TOKEN for the very first call and allocate a sentinel for + * use by the 'Show' routine and its helpers. + * This is stored in the psEntry's private hook field. + * + * We obtain access to the TLstream associated with the HTB. If this doesn't + * exist (because no pvrdebug capture trace has been set) we simply return with + * a NULL value which will stop the DI traversal. + */ +static void *_DebugHBTraceDIStart(OSDI_IMPL_ENTRY *psEntry, + IMG_UINT64 *pui64Pos) +{ + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + PVRSRV_ERROR eError; + IMG_UINT32 uiTLMode; + void *retVal; + IMG_HANDLE hStream; + + /* Check to see if the HTB stream has been configured yet. If not, there is + * nothing to display so we just return NULL to stop the stream access. + */ + if (!HTBIsConfigured()) + { + return NULL; + } + + /* Open the stream in non-blocking mode so that we can determine if there + * is no data to consume. Also disable the producer callback (if any) and + * the open callback so that we do not generate spurious trace data when + * accessing the stream. + */ + uiTLMode = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING| + PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK| + PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK; + + /* If two or more processes try to read from this file at the same time + * the TLClientOpenStream() function will handle this by allowing only + * one of them to actually open the stream. The other process will get + * an error stating that the stream is already open. The open function + * is threads safe. */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, HTB_STREAM_NAME, uiTLMode, + &hStream); + + if (PVRSRV_ERROR_ALREADY_OPEN == eError) + { + /* Stream allows only one reader so return error if it's already + * opened. */ +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Stream handle %p already exists for %s", + __func__, g_sHTBData.hStream, HTB_STREAM_NAME)); +#endif + return NULL; + } + else if (PVRSRV_OK != eError) + { + /* + * No stream available so nothing to report + */ + return NULL; + } + + /* There is a window where hStream can be NULL but the stream is already + * opened. This shouldn't matter since the TLClientOpenStream() will make + * sure that only one stream can be opened and only one process can reach + * this place at a time. Also the .stop function will be always called + * after this function returns so there should be no risk of stream + * not being closed. */ + PVR_ASSERT(g_sHTBData.hStream == NULL); + g_sHTBData.hStream = hStream; + + /* + * Ensure we have our debug-specific data store allocated and hooked from + * our DI entry private data. + * If the allocation fails we can safely return NULL which will stop + * further calls from the DI routines (NULL return from START or NEXT + * means we have no (more) data to process) + */ + if (pSentinel == NULL) + { + pSentinel = (HTB_Sentinel_t *)OSAllocZMem(sizeof(HTB_Sentinel_t)); + DISetPrivData(psEntry, pSentinel); + } + + /* + * Find the first message location within pSentinel->pBuf + * => for DI_START_TOKEN we must issue our first ACQUIRE, also for the + * subsequent re-START calls (if any). + */ + + HTB_GetFirstMessage(pSentinel, pui64Pos); + + if (*pui64Pos == 0) + { + retVal = DI_START_TOKEN; + } + else + { + if (pSentinel == NULL) + { + retVal = NULL; + } + else + { + retVal = (void *)pSentinel->pCurr; + } + } + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Returning %p, Stream %s @ %p", __func__, + retVal, HTB_STREAM_NAME, g_sHTBData.hStream)); +#endif /* HTB_CHATTY */ + + return retVal; + +} + +/* + * _DebugTBTraceDIStop: + * + * Stop processing data collection and release any previously allocated private + * data structure if we have exhausted the previously filled data buffers. + */ +static void _DebugHBTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + IMG_UINT32 uiMsgLen; + + if (NULL == pSentinel) + return; + + uiMsgLen = pSentinel->uiMsgLen; + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: MsgLen = %d", __func__, uiMsgLen)); +#endif /* HTB_CHATTY */ + + /* If we get here the handle should never be NULL because + * _DebugHBTraceDIStart() shouldn't allow that. */ + if (g_sHTBData.hStream != NULL) + { + PVRSRV_ERROR eError; + + if (uiMsgLen != 0) + { + eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, uiMsgLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s, nBytes %u", + __func__, "TLClientReleaseDataLess", + PVRSRVGETERRORSTRING(eError), uiMsgLen)); + } + } + + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", + "TLClientCloseStream", PVRSRVGETERRORSTRING(eError), + __func__)); + } + g_sHTBData.hStream = NULL; + } + + if (pSentinel != NULL) + { + DISetPrivData(psEntry, NULL); + OSFreeMem(pSentinel); + } +} + + +/* + * _DebugHBTraceDINext: + * + * This is where we release any acquired data which has been processed by the + * DIShow routine. If we have encountered a DI entry overflow we stop + * processing and return NULL. Otherwise we release the message that we + * previously processed and simply update our position pointer to the next + * valid HTB message (if any) + */ +static void *_DebugHBTraceDINext(OSDI_IMPL_ENTRY *psEntry, void *pvPriv, + IMG_UINT64 *pui64Pos) +{ + IMG_UINT64 ui64CurPos; + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pvPriv); + + if (pui64Pos) + { + ui64CurPos = *pui64Pos; + *pui64Pos = ui64CurPos+1; + } + + /* + * Determine if we've had an overflow on the previous 'Show' call. If so + * we leave the previously acquired data in the queue (by releasing 0 bytes) + * and return NULL to end this DI entry iteration. + * If we have not overflowed we simply get the next HTB message and use that + * for our display purposes + */ + + if (DIHasOverflowed(psEntry)) + { + (void)TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, 0); + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: OVERFLOW - returning NULL", __func__)); +#endif /* HTB_CHATTY */ + + return (void *)NULL; + } + else + { + eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, + pSentinel->uiMsgLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s' @ %p Length %d", + __func__, "TLClientReleaseDataLess", + PVRSRVGETERRORSTRING(eError), pSentinel->pCurr, + pSentinel->uiMsgLen)); + PVR_DPF((PVR_DBG_WARNING, "%s: Buffer @ %p..%p", __func__, + pSentinel->pBuf, + (IMG_PBYTE)(pSentinel->pBuf+pSentinel->uiBufLen))); + + } + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'\nPrev message len %d", + __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError), + pSentinel->uiMsgLen)); + pSentinel->pBuf = NULL; + } + + pSentinel->uiMsgLen = 0; // We don't (yet) know the message size + } + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Returning %p Msglen %d", + __func__, pSentinel->pBuf, pSentinel->uiMsgLen)); +#endif /* HTB_CHATTY */ + + if (pSentinel->pBuf == NULL || pSentinel->uiBufLen == 0) + { + return NULL; + } + + pSentinel->pCurr = HTB_GetNextMessage(pSentinel); + + return pSentinel->pCurr; +} + +/****************************************************************************** + * HTB Dumping routines and definitions + *****************************************************************************/ +#define IS_VALID_FMT_STRING(FMT) (strchr(FMT, '%') != NULL) +#define MAX_STRING_SIZE (128) + +typedef enum +{ + TRACEBUF_ARG_TYPE_INT, + TRACEBUF_ARG_TYPE_ERR, + TRACEBUF_ARG_TYPE_NONE +} TRACEBUF_ARG_TYPE; + +/* + * Array of all Host Trace log IDs used to convert the tracebuf data + */ +typedef struct _HTB_TRACEBUF_LOG_ { + HTB_LOG_SFids eSFId; + IMG_CHAR *pszName; + IMG_CHAR *pszFmt; + IMG_UINT32 ui32ArgNum; +} HTB_TRACEBUF_LOG; + +static const HTB_TRACEBUF_LOG aLogs[] = { +#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e}, + HTB_LOG_SFIDLIST +#undef X +}; + +static const IMG_CHAR *aGroups[] = { +#define X(A,B) #B, + HTB_LOG_SFGROUPLIST +#undef X +}; +static const IMG_UINT32 uiMax_aGroups = ARRAY_SIZE(aGroups) - 1; + +static TRACEBUF_ARG_TYPE ExtractOneArgFmt(IMG_CHAR **, IMG_CHAR *); +/* + * ExtractOneArgFmt + * + * Scan the input 'printf-like' string *ppszFmt and return the next + * value string to be displayed. If there is no '%' format field in the + * string we return 'TRACEBUF_ARG_TYPE_NONE' and leave the input string + * untouched. + * + * Input + * ppszFmt reference to format string to be decoded + * pszOneArgFmt single field format from *ppszFmt + * + * Returns + * TRACEBUF_ARG_TYPE_ERR unrecognised argument + * TRACEBUF_ARG_TYPE_INT variable is of numeric type + * TRACEBUF_ARG_TYPE_NONE no variable reference in *ppszFmt + * + * Side-effect + * *ppszFmt is updated to reference the next part of the format string + * to be scanned + */ +static TRACEBUF_ARG_TYPE ExtractOneArgFmt( + IMG_CHAR **ppszFmt, + IMG_CHAR *pszOneArgFmt) +{ + IMG_CHAR *pszFmt; + IMG_CHAR *psT; + IMG_UINT32 ui32Count = MAX_STRING_SIZE; + IMG_UINT32 ui32OneArgSize; + TRACEBUF_ARG_TYPE eRet = TRACEBUF_ARG_TYPE_ERR; + + if (NULL == ppszFmt) + return TRACEBUF_ARG_TYPE_ERR; + + pszFmt = *ppszFmt; + if (NULL == pszFmt) + return TRACEBUF_ARG_TYPE_ERR; + + /* + * Find the first '%' + * NOTE: we can be passed a simple string to display which will have no + * parameters embedded within it. In this case we simply return + * TRACEBUF_ARG_TYPE_NONE and the string contents will be the full pszFmt + */ + psT = strchr(pszFmt, '%'); + if (psT == NULL) + { + return TRACEBUF_ARG_TYPE_NONE; + } + + /* Find next conversion identifier after the initial '%' */ + while ((*psT++) && (ui32Count-- > 0)) + { + switch (*psT) + { + case 'd': + case 'i': + case 'o': + case 'u': + case 'x': + case 'X': + { + eRet = TRACEBUF_ARG_TYPE_INT; + goto _found_arg; + } + case 's': + { + eRet = TRACEBUF_ARG_TYPE_ERR; + goto _found_arg; + } + } + } + + if ((psT == NULL) || (ui32Count == 0)) return TRACEBUF_ARG_TYPE_ERR; + +_found_arg: + ui32OneArgSize = psT - pszFmt + 1; + OSCachedMemCopy(pszOneArgFmt, pszFmt, ui32OneArgSize); + pszOneArgFmt[ui32OneArgSize] = '\0'; + + *ppszFmt = psT + 1; + + return eRet; +} + +static IMG_UINT32 idToLogIdx(IMG_UINT32 ui32CheckData) +{ + IMG_UINT32 i = 0; + for (i = 0; aLogs[i].eSFId != HTB_SF_LAST; i++) + { + if ( ui32CheckData == aLogs[i].eSFId ) + return i; + } + /* Nothing found, return max value */ + return HTB_SF_LAST; +} + +/* + * DecodeHTB + * + * Decode the data buffer message located at pBuf. This should be a valid + * HTB message as we are provided with the start of the buffer. If empty there + * is no message to process. We update the uiMsgLen field with the size of the + * HTB message that we have processed so that it can be returned to the system + * on successful logging of the message to the output file. + * + * Input + * pSentinel reference to newly read data and pending completion data + * from a previous invocation [handle DI entry buffer overflow] + * -> pBuf reference to raw data that we are to parse + * -> uiBufLen total number of bytes of data available + * -> pCurr start of message to decode + * + * pvDumpDebugFile output file + * pfnDumpDebugPrintf output generating routine + * + * Output + * pSentinel + * -> uiMsgLen length of the decoded message which will be freed to + * the system on successful completion of the DI entry + * update via _DebugHBTraceDINext(), + * Return Value + * 0 successful decode + * -1 unsuccessful decode + */ +static int +DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, + DI_PRINTF pfnDumpDebugPrintf) +{ + IMG_UINT32 ui32Data, ui32LogIdx, ui32ArgsCur; + IMG_CHAR *pszFmt = NULL; + IMG_CHAR aszOneArgFmt[MAX_STRING_SIZE]; + IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; + + IMG_UINT32 ui32DataSize; + IMG_UINT32 uiBufLen = pSentinel->uiBufLen; + size_t nPrinted; + + void *pNext, *pLast, *pStart, *pData = NULL; + PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ + IMG_UINT32 uiHdrType; /* Packet header type */ + IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ + IMG_BOOL bPacketsDropped; + + /* Convert from byte to uint32 size */ + ui32DataSize = uiBufLen / sizeof(IMG_UINT32); + + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + pStart = pSentinel->pCurr; + + pNext = pStart; + pSentinel->uiMsgLen = 0; // Reset count for this message + uiMsgSize = 0; // nothing processed so far + ui32LogIdx = HTB_SF_LAST; // Loop terminator condition + +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: Buf @ %p..%p, Length = %d", __func__, + pStart, pLast, uiBufLen)); +#endif /* HTB_CHATTY */ + + /* + * We should have a DATA header with the necessary information following + */ + ppHdr = GET_PACKET_HDR(pStart); + + if (ppHdr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected NULL packet in Host Trace buffer", __func__)); + return -1; + } + + uiHdrType = GET_PACKET_TYPE(ppHdr); + PVR_ASSERT(uiHdrType == PVRSRVTL_PACKETTYPE_DATA); + + pNext = GET_NEXT_PACKET_ADDR(ppHdr); + + PVR_ASSERT(pNext != NULL); + + uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); + + pSentinel->uiMsgLen += uiMsgSize; + + pData = GET_PACKET_DATA_PTR(ppHdr); + + if (pData == NULL || pData >= pLast) + { +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: pData = %p, pLast = %p Returning 0", + __func__, pData, pLast)); +#endif /* HTB_CHATTY */ + return 0; + } + + ui32Data = *(IMG_UINT32 *)pData; + ui32LogIdx = idToLogIdx(ui32Data); + + /* + * Check if the unrecognised ID is valid and therefore, tracebuf + * needs updating. + */ + if (ui32LogIdx == HTB_SF_LAST) + { + if (HTB_LOG_VALIDID(ui32Data)) + { + if (!bUnrecognizedErrorPrinted) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", + __func__, ui32Data, HTB_SF_GID(ui32Data), + HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); + bUnrecognizedErrorPrinted = IMG_TRUE; + } + + return 0; + } + + PVR_DPF((PVR_DBG_ERROR, + "%s: Unrecognised and invalid LOG value detected '%x'", + __func__, ui32Data)); + + return -1; + } + + /* The string format we are going to display */ + /* + * The display will show the header (log-ID, group-ID, number of params) + * The maximum parameter list length = 15 (only 4bits used to encode) + * so we need HEADER + 15 * sizeof(UINT32) and the displayed string + * describing the event. We use a buffer in the per-process pSentinel + * structure to hold the data. + */ + pszFmt = aLogs[ui32LogIdx].pszFmt; + + /* add the message payload size to the running count */ + ui32ArgsCur = HTB_SF_PARAMNUM(ui32Data); + + /* Determine if we've over-filled the buffer and had to drop packets */ + bPacketsDropped = CHECK_PACKETS_DROPPED(ppHdr); + if (bPacketsDropped || + (uiHdrType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)) + { + /* Flag this as it is useful to know ... */ + + PVR_DUMPDEBUG_LOG("\n<========================== *** PACKETS DROPPED *** ======================>\n"); + } + + { + IMG_UINT32 ui32Timestampns, ui32PID; + IMG_UINT64 ui64Timestamp, ui64TimestampSec; + IMG_CHAR *szBuffer = pSentinel->szBuffer; // Buffer start + IMG_CHAR *pszBuffer = pSentinel->szBuffer; // Current place in buf + size_t uBufBytesAvailable = sizeof(pSentinel->szBuffer); + IMG_UINT32 *pui32Data = (IMG_UINT32 *)pData; + IMG_UINT32 ui_aGroupIdx; + + // Get PID field from data stream + pui32Data++; + ui32PID = *pui32Data; + // Get Timestamp part 1 from data stream + pui32Data++; + ui64Timestamp = (IMG_UINT64) *pui32Data << 32; + // Get Timestamp part 2 from data stream + pui32Data++; + ui64Timestamp |= (IMG_UINT64) *pui32Data; + // Move to start of message contents data + pui32Data++; + + /* + * We need to snprintf the data to a local in-kernel buffer + * and then PVR_DUMPDEBUG_LOG() that in one shot + */ + ui_aGroupIdx = MIN(HTB_SF_GID(ui32Data), uiMax_aGroups); + + /* Divide by 1B to get seconds & mod using output var (nanosecond resolution)*/ + ui64TimestampSec = OSDivide64r64(ui64Timestamp, 1000000000, &ui32Timestampns); + + nPrinted = OSSNPrintf(szBuffer, uBufBytesAvailable, "%010"IMG_UINT64_FMTSPEC".%09u:%5u-%s> ", + ui64TimestampSec, ui32Timestampns, ui32PID, aGroups[ui_aGroupIdx]); + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + /* Update where our next 'output' point in the buffer is */ + pszBuffer += nPrinted; + uBufBytesAvailable -= nPrinted; + + /* + * Print one argument at a time as this simplifies handling variable + * number of arguments. Special case handling for no arguments. + * This is the case for simple format strings such as + * HTB_SF_MAIN_KICK_UNCOUNTED. + */ + if (ui32ArgsCur == 0) + { + if (pszFmt) + { + nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + /* Don't update the uBufBytesAvailable as we have finished this + * message decode. pszBuffer - szBuffer is the total amount of + * data we have decoded. + */ + } + } + else + { + if (HTB_SF_GID(ui32Data) == HTB_GID_CTRL && HTB_SF_ID(ui32Data) == HTB_ID_MARK_SCALE) + { + IMG_UINT32 i; + IMG_UINT32 ui32ArgArray[HTB_MARK_SCALE_ARG_ARRAY_SIZE]; + IMG_UINT64 ui64OSTS = 0; + IMG_UINT32 ui32OSTSRem = 0; + IMG_UINT64 ui64CRTS = 0; + + /* Retrieve 6 args to an array */ + for (i = 0; i < ARRAY_SIZE(ui32ArgArray); i++) + { + ui32ArgArray[i] = *pui32Data; + pui32Data++; + --ui32ArgsCur; + } + + ui64OSTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_OSTS_PT1] << 32 | ui32ArgArray[HTB_ARG_OSTS_PT2]; + ui64CRTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_CRTS_PT1] << 32 | ui32ArgArray[HTB_ARG_CRTS_PT2]; + + /* Divide by 1B to get seconds, remainder in nano seconds*/ + ui64OSTS = OSDivide64r64(ui64OSTS, 1000000000, &ui32OSTSRem); + + nPrinted = OSSNPrintf(pszBuffer, + uBufBytesAvailable, + "HTBFWMkSync Mark=%u OSTS=%010" IMG_UINT64_FMTSPEC ".%09u CRTS=%" IMG_UINT64_FMTSPEC " CalcClkSpd=%u\n", + ui32ArgArray[HTB_ARG_SYNCMARK], + ui64OSTS, + ui32OSTSRem, + ui64CRTS, + ui32ArgArray[HTB_ARG_CLKSPD]); + + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + uBufBytesAvailable -= nPrinted; + } + else + { + while (IS_VALID_FMT_STRING(pszFmt) && (uBufBytesAvailable > 0)) + { + IMG_UINT32 ui32TmpArg = *pui32Data; + TRACEBUF_ARG_TYPE eArgType; + + eArgType = ExtractOneArgFmt(&pszFmt, aszOneArgFmt); + + pui32Data++; + ui32ArgsCur--; + + switch (eArgType) + { + case TRACEBUF_ARG_TYPE_INT: + nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, + aszOneArgFmt, ui32TmpArg); + break; + + case TRACEBUF_ARG_TYPE_NONE: + nPrinted = OSStringLCopy(pszBuffer, pszFmt, + uBufBytesAvailable); + break; + + default: + nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, + "Error processing arguments, type not " + "recognized (fmt: %s)", aszOneArgFmt); + break; + } + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + uBufBytesAvailable -= nPrinted; + } + /* Display any remaining text in pszFmt string */ + if (pszFmt) + { + nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + /* Don't update the uBufBytesAvailable as we have finished this + * message decode. pszBuffer - szBuffer is the total amount of + * data we have decoded. + */ + } + } + } + + /* Update total bytes processed */ + pSentinel->uiTotal += (pszBuffer - szBuffer); + } + return 0; +} + +/* + * HTBDumpBuffer: Dump the Host Trace Buffer using the TLClient API + * + * This routine just parses *one* message from the buffer. + * The stream will be opened by the Start() routine, closed by the Stop() and + * updated for data consumed by this routine once we have DebugPrintf'd it. + * We use the new TLReleaseDataLess() routine which enables us to update the + * HTB contents with just the amount of data we have successfully processed. + * If we need to leave the data available we can call this with a 0 count. + * This will happen in the case of a buffer overflow so that we can reprocess + * any data which wasn't handled before. + * + * In case of overflow or an error we return -1 otherwise 0 + * + * Input: + * pfnPrintf output routine to display data + * psEntry handle to debug frontend + * pvData data address to start dumping from + * (set by Start() / Next()) + */ +static int HTBDumpBuffer(DI_PRINTF pfnPrintf, OSDI_IMPL_ENTRY *psEntry, + void *pvData) +{ + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + + PVR_ASSERT(NULL != pvData); + + if (pvData == DI_START_TOKEN) + { + if (pSentinel->pCurr == NULL) + { +#ifdef HTB_CHATTY + PVR_DPF((PVR_DBG_WARNING, "%s: DI_START_TOKEN, Empty buffer", + __func__)); +#endif /* HTB_CHATTY */ + return 0; + } + PVR_ASSERT(pSentinel->pCurr != NULL); + + /* Display a Header as we have data to process */ + pfnPrintf(psEntry, "%-20s:%-5s-%s %s\n", "Timestamp", "PID", "Group>", + "Log Entry"); + } + else + { + if (pvData != NULL) + { + PVR_ASSERT(pSentinel->pCurr == pvData); + } + } + + return DecodeHTB(pSentinel, psEntry, pfnPrintf); +} + + +/****************************************************************************** + * External Entry Point routines ... + *****************************************************************************/ +/*************************************************************************/ /*! + @Function HTB_CreateDIEntry + + @Description Create the debugFS entry-point for the host-trace-buffer + + @Returns eError internal error code, PVRSRV_OK on success + + */ /*************************************************************************/ +PVRSRV_ERROR HTB_CreateDIEntry(void) +{ + PVRSRV_ERROR eError; + + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugHBTraceDIStart, + .pfnStop = _DebugHBTraceDIStop, + .pfnNext = _DebugHBTraceDINext, + .pfnShow = _DebugHBTraceDIShow, + }; + + eError = DICreateEntry("host_trace", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &g_sHTBData.psDumpHostDiEntry); + PVR_LOG_RETURN_IF_ERROR(eError, "DICreateEntry"); + + return eError; +} + + +/*************************************************************************/ /*! + @Function HTB_DestroyDIEntry + + @Description Destroy the debugFS entry-point created by earlier + HTB_CreateDIEntry() call. +*/ /**************************************************************************/ +void HTB_DestroyDIEntry(void) +{ + if (g_sHTBData.psDumpHostDiEntry) + { + DIDestroyEntry(g_sHTBData.psDumpHostDiEntry); + g_sHTBData.psDumpHostDiEntry = NULL; + } +} + +/* EOF */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/htb_debug.h b/drivers/mcst/gpu-imgtec/services/server/common/htb_debug.h new file mode 100644 index 000000000000..000ed3a61df1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/htb_debug.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File htb_debug.h +@Title Linux debugFS routine setup header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _HTB_DEBUG_H_ +#define _HTB_DEBUG_H_ + +/**************************************************************************/ /*! + @Function HTB_CreateDIEntry + + @Description Create the debugFS entry-point for the host-trace-buffer + + @Returns eError internal error code, PVRSRV_OK on success + + */ /**************************************************************************/ +PVRSRV_ERROR HTB_CreateDIEntry(void); + +/**************************************************************************/ /*! + @Function HTB_DestroyFSEntry + + @Description Destroy the debugFS entry-point created by earlier + HTB_CreateDIEntry() call. +*/ /**************************************************************************/ +void HTB_DestroyDIEntry(void); + +#endif /* _HTB_DEBUG_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/htbserver.c b/drivers/mcst/gpu-imgtec/services/server/common/htbserver.c new file mode 100644 index 000000000000..9e79978ca3d7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/htbserver.c @@ -0,0 +1,885 @@ +/*************************************************************************/ /*! +@File htbserver.c +@Title Host Trace Buffer server implementation. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "htbserver.h" +#include "htbuffer.h" +#include "htbuffer_types.h" +#include "tlstream.h" +#include "pvrsrv_tlcommon.h" +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "pvrsrv_apphint.h" +#include "oskm_apphint.h" + +/* size of circular buffer controlling the maximum number of concurrent PIDs logged */ +#define HTB_MAX_NUM_PID 8 + +/* number of times to try rewriting a log entry */ +#define HTB_LOG_RETRY_COUNT 5 + +/*************************************************************************/ /*! + Host Trace Buffer control information structure +*/ /**************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32BufferSize; /*!< Requested buffer size in bytes + Once set this may not be changed */ + + HTB_OPMODE_CTRL eOpMode; /*!< Control what trace data is dropped if + the buffer is full. + Once set this may not be changed */ + +/* IMG_UINT32 ui32GroupEnable; */ /*!< Flags word controlling groups to be + logged */ + + IMG_UINT32 ui32LogLevel; /*!< Log level to control messages logged */ + + IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for + a specific set of processes */ + + IMG_UINT32 ui32PIDCount; /*!< Current number of PIDs being logged */ + + IMG_UINT32 ui32PIDHead; /*!< Head of the PID circular buffer */ + + HTB_LOGMODE_CTRL eLogMode; /*!< Logging mode control */ + + IMG_BOOL bLogDropSignalled; /*!< Flag indicating if a log message has + been signalled as dropped */ + + /* synchronisation parameters */ + IMG_UINT64 ui64SyncOSTS; + IMG_UINT64 ui64SyncCRTS; + IMG_UINT32 ui32SyncCalcClkSpd; + IMG_UINT32 ui32SyncMarker; + + IMG_BOOL bInitDone; /* Set by HTBInit, reset by HTBDeInit */ + + POS_SPINLOCK hRepeatMarkerLock; /*!< Spinlock used in HTBLogKM to protect global variables + (ByteCount, OSTS, CRTS ClkSpeed) + from becoming inconsistent due to calls from + both KM and UM */ + + IMG_UINT32 ui32ByteCount; /* Byte count used for triggering repeat sync point */ + /* static variables containing details of previous sync point */ + IMG_UINT64 ui64OSTS; + IMG_UINT64 ui64CRTS; + IMG_UINT32 ui32ClkSpeed; + +} HTB_CTRL_INFO; + + +/*************************************************************************/ /*! +*/ /**************************************************************************/ +static const IMG_UINT32 MapFlags[] = +{ + 0, /* HTB_OPMODE_UNDEF = 0 */ + TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */ + TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */ + TL_OPMODE_BLOCK /* HTB_OPMODE_BLOCK */ +}; + +static_assert(0 == HTB_OPMODE_UNDEF, "Unexpected value for HTB_OPMODE_UNDEF"); +static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST"); +static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST"); +static_assert(3 == HTB_OPMODE_BLOCK, "Unexpected value for HTB_OPMODE_BLOCK"); + +static_assert(1 == TL_OPMODE_DROP_NEWER, "Unexpected value for TL_OPMODE_DROP_NEWER"); +static_assert(2 == TL_OPMODE_DROP_OLDEST, "Unexpected value for TL_OPMODE_DROP_OLDEST"); +static_assert(3 == TL_OPMODE_BLOCK, "Unexpected value for TL_OPMODE_BLOCK"); + +static const IMG_UINT32 g_ui32TLBaseFlags; //TL_FLAG_NO_SIGNAL_ON_COMMIT + +/* Minimum TL buffer size. + * Large enough for around 60 worst case messages or 200 average messages + */ +#define HTB_TL_BUFFER_SIZE_MIN (0x10000) + +/* Minimum concentration of HTB packets in a TL Stream is 60% + * If we just put the HTB header in the TL stream (12 bytes), the TL overhead + * is 8 bytes for its own header, so for the smallest possible (and most + * inefficient) packet we have 3/5 of the buffer used for actual HTB data. + * This shift is used as a guaranteed estimation on when to produce a repeat + * packet. By shifting the size of the buffer by 1 we effectively /2 this + * under the 60% boundary chance we may have overwritten the marker and thus + * guaranteed to always have a marker in the stream */ +#define HTB_MARKER_PREDICTION_THRESHOLD(val) (val >> 1) + +static HTB_CTRL_INFO g_sCtrl; +static IMG_BOOL g_bConfigured = IMG_FALSE; +static IMG_HANDLE g_hTLStream; + + +/************************************************************************/ /*! + @Function _LookupFlags + @Description Convert HTBuffer Operation mode to TLStream flags + + @Input eModeHTBuffer Operation Mode + + @Return IMG_UINT32 TLStream FLags +*/ /**************************************************************************/ +static IMG_UINT32 +_LookupFlags( HTB_OPMODE_CTRL eMode ) +{ + return (eMode < ARRAY_SIZE(MapFlags)) ? MapFlags[eMode] : 0; +} + + +/************************************************************************/ /*! + @Function _HTBLogDebugInfo + @Description Debug dump handler used to dump the state of the HTB module. + Called for each verbosity level during a debug dump. Function + only prints state when called for High verbosity. + + @Input hDebugRequestHandle See PFN_DBGREQ_NOTIFY + + @Input ui32VerbLevel See PFN_DBGREQ_NOTIFY + + @Input pfnDumpDebugPrintf See PFN_DBGREQ_NOTIFY + + @Input pvDumpDebugFile See PFN_DBGREQ_NOTIFY + +*/ /**************************************************************************/ +static void _HTBLogDebugInfo( + PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile +) +{ + PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + + if (g_bConfigured) + { + IMG_INT i; + + PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------"); + + PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode); + PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel); + PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode); + + for (i=0; i < HTB_FLAG_NUM_EL; i++) + { + PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]); + } + } + else + { + PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------"); + } + } +} + +/************************************************************************/ /*! + @Function HTBDeviceCreate + @Description Initialisation actions for HTB at device creation. + + @Input psDeviceNode Reference to the device node in context + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeviceCreate( + PVRSRV_DEVICE_NODE *psDeviceNode +) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hHtbDbgReqNotify, + psDeviceNode, &_HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL); + PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify"); + + return eError; +} + +/************************************************************************/ /*! + @Function HTBIDeviceDestroy + @Description De-initialisation actions for HTB at device destruction. + + @Input psDeviceNode Reference to the device node in context + +*/ /**************************************************************************/ +void +HTBDeviceDestroy( + PVRSRV_DEVICE_NODE *psDeviceNode +) +{ + if (psDeviceNode->hHtbDbgReqNotify) + { + /* No much we can do if it fails, driver unloading */ + (void)PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hHtbDbgReqNotify); + psDeviceNode->hHtbDbgReqNotify = NULL; + } +} + +static IMG_UINT32 g_ui32HTBufferSize = HTB_TL_BUFFER_SIZE_MIN; + +/* + * AppHint access routine forward definitions + */ +static PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32); +static PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32 *); + +static PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32); +static PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32 *); + +static void _OnTLReaderOpenCallback(void *); + +/************************************************************************/ /*! + @Function HTBInit + @Description Allocate and initialise the Host Trace Buffer + The buffer size may be changed by specifying + HTBufferSizeInKB=xxxx + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBInit(void) +{ + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; + IMG_UINT32 ui32BufBytes; + PVRSRV_ERROR eError; + + if (g_sCtrl.bInitDone) + { + PVR_DPF((PVR_DBG_ERROR, "HTBInit: Driver already initialised")); + return PVRSRV_ERROR_ALREADY_EXISTS; + } + + /* + * Buffer Size can be configured by specifying a value in the AppHint + * This will only take effect at module load time so there is no query + * or setting mechanism available. + */ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBufferSizeInKB, + NULL, + NULL, + APPHINT_OF_DRIVER_NO_DEVICE, + NULL); + + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup, + _HTBReadLogGroup, + _HTBSetLogGroup, + APPHINT_OF_DRIVER_NO_DEVICE, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode, + _HTBReadOpMode, + _HTBSetOpMode, + APPHINT_OF_DRIVER_NO_DEVICE, + NULL); + + /* + * Now get whatever values have been configured for our AppHints + */ + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024; + OSGetKMAppHintUINT32(pvAppHintState, HTBufferSizeInKB, + &ui32AppHintDefault, &g_ui32HTBufferSize); + OSFreeKMAppHintState(pvAppHintState); + + ui32BufBytes = g_ui32HTBufferSize * 1024; + + /* initialise rest of state */ + g_sCtrl.ui32BufferSize = + (ui32BufBytes < HTB_TL_BUFFER_SIZE_MIN) + ? HTB_TL_BUFFER_SIZE_MIN + : ui32BufBytes; + g_sCtrl.eOpMode = HTB_OPMODE_DROPOLDEST; + g_sCtrl.ui32LogLevel = 0; + g_sCtrl.ui32PIDCount = 0; + g_sCtrl.ui32PIDHead = 0; + g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID; + g_sCtrl.bLogDropSignalled = IMG_FALSE; + + eError = OSSpinLockCreate(&g_sCtrl.hRepeatMarkerLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSSpinLockCreate"); + + g_sCtrl.bInitDone = IMG_TRUE; + + /* Log the current driver parameter setting for the HTBufferSizeInKB. + * We do this here as there is no other infrastructure for obtaining + * the value. + */ + if (g_ui32HTBufferSize != ui32AppHintDefault) + { + PVR_LOG(("Increasing HTBufferSize to %uKB", g_ui32HTBufferSize)); + } + + return PVRSRV_OK; +} + +/************************************************************************/ /*! + @Function HTBDeInit + @Description Close the Host Trace Buffer and free all resources. Must + perform a no-op if already de-initialised. + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeInit( void ) +{ + if (!g_sCtrl.bInitDone) + return PVRSRV_OK; + + if (g_hTLStream) + { + TLStreamClose( g_hTLStream ); + g_hTLStream = NULL; + } + + if (g_sCtrl.hRepeatMarkerLock != NULL) + { + OSSpinLockDestroy(g_sCtrl.hRepeatMarkerLock); + g_sCtrl.hRepeatMarkerLock = NULL; + } + + g_sCtrl.bInitDone = IMG_FALSE; + return PVRSRV_OK; +} + + +/*************************************************************************/ /*! + AppHint interface functions +*/ /**************************************************************************/ +static +PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + return HTBControlKM(1, &ui32Value, 0, 0, + HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF); +} + +static +PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + *pui32Value = g_auiHTBGroupEnable[0]; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value); +} + +static +PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + *pui32Value = (IMG_UINT32)g_sCtrl.eOpMode; + return PVRSRV_OK; +} + + +static void +_OnTLReaderOpenCallback( void *pvArg ) +{ + if ( g_hTLStream ) + { + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + (void) HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + g_sCtrl.ui32SyncMarker, + ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), + ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), + ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), + ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), + g_sCtrl.ui32SyncCalcClkSpd); + } + + PVR_UNREFERENCED_PARAMETER(pvArg); +} + + +/*************************************************************************/ /*! + @Function HTBControlKM + @Description Update the configuration of the Host Trace Buffer + + @Input ui32NumFlagGroups Number of group enable flags words + + @Input aui32GroupEnable Flags words controlling groups to be logged + + @Input ui32LogLevel Log level to record + + @Input ui32EnablePID PID to enable logging for a specific process + + @Input eLogMode Enable logging for all or specific processes, + + @Input eOpMode Control the behaviour of the data buffer + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBControlKM( + const IMG_UINT32 ui32NumFlagGroups, + const IMG_UINT32 * aui32GroupEnable, + const IMG_UINT32 ui32LogLevel, + const IMG_UINT32 ui32EnablePID, + const HTB_LOGMODE_CTRL eLogMode, + const HTB_OPMODE_CTRL eOpMode +) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; + IMG_UINT32 i; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + + if ( !g_bConfigured && ui32NumFlagGroups ) + { + eError = TLStreamCreate( + &g_hTLStream, + PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + HTB_STREAM_NAME, + g_sCtrl.ui32BufferSize, + _LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags, + _OnTLReaderOpenCallback, NULL, NULL, NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); + g_bConfigured = IMG_TRUE; + } + + if (HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode) + { + g_sCtrl.eOpMode = eOpMode; + eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); + while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) + { + OSReleaseThreadQuanta(); + eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); + } + PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamReconfigure"); + } + + if ( ui32EnablePID ) + { + g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID; + g_sCtrl.ui32PIDHead++; + g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID; + g_sCtrl.ui32PIDCount++; + if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID ) + { + g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID; + } + } + + /* HTB_LOGMODE_ALLPID overrides ui32EnablePID */ + if ( HTB_LOGMODE_ALLPID == eLogMode ) + { + OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID)); + g_sCtrl.ui32PIDCount = 0; + g_sCtrl.ui32PIDHead = 0; + } + if ( HTB_LOGMODE_UNDEF != eLogMode ) + { + g_sCtrl.eLogMode = eLogMode; + } + + if ( ui32NumFlagGroups ) + { + for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++) + { + g_auiHTBGroupEnable[i] = aui32GroupEnable[i]; + } + for (; i < HTB_FLAG_NUM_EL; i++) + { + g_auiHTBGroupEnable[i] = 0; + } + } + + if ( ui32LogLevel ) + { + g_sCtrl.ui32LogLevel = ui32LogLevel; + } + + /* Dump the current configuration state */ + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + for (i = 0; i < g_sCtrl.ui32PIDCount; i++) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + } + /* Else should never be hit as we set the spd when the power state is updated */ + if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + g_sCtrl.ui32SyncMarker, + ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), + ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), + g_sCtrl.ui32SyncCalcClkSpd); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + } + + return eError; +} + +/*************************************************************************/ /*! +*/ /**************************************************************************/ +static IMG_BOOL +_ValidPID( IMG_UINT32 PID ) +{ + IMG_UINT32 i; + + for (i = 0; i < g_sCtrl.ui32PIDCount; i++) + { + if ( g_sCtrl.aui32EnablePID[i] == PID ) + { + return IMG_TRUE; + } + } + return IMG_FALSE; +} + + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarker + @Description Write an HTB sync partition marker to the HTB log + + @Input ui33Marker Marker value + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarker( + const IMG_UINT32 ui32Marker +) +{ + g_sCtrl.ui32SyncMarker = ui32Marker; + if ( g_hTLStream ) + { + PVRSRV_ERROR eError; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + + /* Else should never be hit as we set the spd when the power state is updated */ + if (0 != g_sCtrl.ui32SyncCalcClkSpd) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + ui32Marker, + ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), + ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), + g_sCtrl.ui32SyncCalcClkSpd); + PVR_WARN_IF_ERROR(eError, "HTBLog"); + } + } +} + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarkerRepeat + @Description Write a HTB sync partition marker to the HTB log, given + the previous values to repeat. + + @Input ui33Marker Marker value + @Input ui64SyncOSTS previous OSTS + @Input ui64SyncCRTS previous CRTS + @Input ui32ClkSpeed previous Clock speed + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarkerRepeat( + const IMG_UINT32 ui32Marker, + const IMG_UINT64 ui64SyncOSTS, + const IMG_UINT64 ui64SyncCRTS, + const IMG_UINT32 ui32ClkSpeed +) +{ + if ( g_hTLStream ) + { + PVRSRV_ERROR eError; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + + /* Else should never be hit as we set the spd when the power state is updated */ + if (0 != ui32ClkSpeed) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + ui32Marker, + ((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)), + ((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)), + ui32ClkSpeed); + PVR_WARN_IF_ERROR(eError, "HTBLog"); + } + } +} + +/*************************************************************************/ /*! + @Function HTBSyncScale + @Description Write FW-Host synchronisation data to the HTB log when clocks + change or are re-calibrated + + @Input bLogValues IMG_TRUE if value should be immediately written + out to the log + + @Input ui32OSTS OS Timestamp + + @Input ui32CRTS Rogue timestamp + + @Input ui32CalcClkSpd Calculated clock speed + +*/ /**************************************************************************/ +void +HTBSyncScale( + const IMG_BOOL bLogValues, + const IMG_UINT64 ui64OSTS, + const IMG_UINT64 ui64CRTS, + const IMG_UINT32 ui32CalcClkSpd +) +{ + g_sCtrl.ui64SyncOSTS = ui64OSTS; + g_sCtrl.ui64SyncCRTS = ui64CRTS; + g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd; + if (g_hTLStream && bLogValues) + { + PVRSRV_ERROR eError; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + g_sCtrl.ui32SyncMarker, + ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)), + ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)), + ui32CalcClkSpd); + /* + * Don't spam the log with non-failure cases + */ + PVR_WARN_IF_ERROR(eError, "HTBLog"); + } +} + + +/*************************************************************************/ /*! + @Function HTBLogKM + @Description Record a Host Trace Buffer log event + + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events associated + with a particular process, but performed by + another can be logged correctly. + + @Input ui64TimeStamp The timestamp to be associated with this log event + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBLogKM( + IMG_UINT32 PID, + IMG_UINT64 ui64TimeStamp, + HTB_LOG_SFids SF, + IMG_UINT32 ui32NumArgs, + IMG_UINT32 * aui32Args +) +{ + OS_SPINLOCK_FLAGS uiSpinLockFlags; + IMG_UINT32 ui32ReturnFlags = 0; + + /* Local snapshot variables of global counters */ + IMG_UINT64 ui64OSTSSnap; + IMG_UINT64 ui64CRTSSnap; + IMG_UINT32 ui32ClkSpeedSnap; + + /* format of messages is: SF:PID:TIMEPT1:TIMEPT2:[PARn]* + * Buffer is on the stack so we don't need a semaphore to guard it + */ + IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS]; + + /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/ + * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 72 bytes, + * hence with these constraints this design is unlikely to get + * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error + */ + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED; + IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; + IMG_UINT32 * pui32Message = aui32MessageBuffer; + IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs); + + PVR_LOG_GOTO_IF_INVALID_PARAM(aui32Args != NULL, eError, ReturnError); + PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError); + PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError); + + if ( g_hTLStream + && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) ) +/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */ +/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */ + ) + { + *pui32Message++ = SF; + *pui32Message++ = PID; + *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff)); + *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff)); + while ( ui32NumArgs ) + { + ui32NumArgs--; + pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs]; + } + + eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); + while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) + { + OSReleaseThreadQuanta(); + eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); + } + + if ( PVRSRV_OK == eError ) + { + g_sCtrl.bLogDropSignalled = IMG_FALSE; + } + else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled ) + { + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__)); + } + if ( PVRSRV_ERROR_STREAM_FULL == eError ) + { + g_sCtrl.bLogDropSignalled = IMG_TRUE; + } + + } + + if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE) + { + OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + + /* If a marker is being placed reset byte count from last marker */ + g_sCtrl.ui32ByteCount = 0; + g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2]; + g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2]; + g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD]; + + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + } + else + { + OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + /* Increase global count */ + g_sCtrl.ui32ByteCount += ui32MessageSize; + + /* Check if packet has overwritten last marker/rpt && + If the packet count is over half the size of the buffer */ + if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED && + g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize)) + { + /* Take snapshot of global variables */ + ui64OSTSSnap = g_sCtrl.ui64OSTS; + ui64CRTSSnap = g_sCtrl.ui64CRTS; + ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed; + /* Reset global variable counter */ + g_sCtrl.ui32ByteCount = 0; + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + + /* Produce a repeat marker */ + HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap); + } + else + { + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + } + } + +ReturnError: + return eError; +} + +/*************************************************************************/ /*! + @Function HTBIsConfigured + @Description Determine if HTB stream has been configured + + @Input none + + @Return IMG_FALSE Stream has not been configured + IMG_TRUE Stream has been configured + +*/ /**************************************************************************/ +IMG_BOOL +HTBIsConfigured(void) +{ + return g_bConfigured; +} +/* EOF */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/info_page_km.c b/drivers/mcst/gpu-imgtec/services/server/common/info_page_km.c new file mode 100644 index 000000000000..3762ce22a091 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/info_page_km.c @@ -0,0 +1,133 @@ +/*************************************************************************/ /*! +@File info_page_km.c +@Title Kernel/User space shared memory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements general purpose shared memory between kernel driver + and user mode. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "info_page_defs.h" +#include "info_page.h" +#include "pvrsrv.h" +#include "devicemem.h" +#include "pmr.h" + +PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData) +{ + const DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_CPU_LOCAL; + PVRSRV_ERROR eError; + + PVR_ASSERT(psData != NULL); + + /* Allocate single page of memory for driver information page */ + eError = DevmemAllocateExportable(psData->psHostMemDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + OSGetPageShift(), + uiMemFlags, + "PVRSRVInfoPage", + &psData->psInfoPageMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); + + eError = DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc, + (void **) &psData->pui32InfoPage); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); + + /* Look-up the memory descriptor PMR handle */ + eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc, + (void **) &psData->psInfoPagePMR); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0); + + eError = OSLockCreate(&psData->hInfoPageLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + return PVRSRV_OK; + +e0: + InfoPageDestroy(psData); + return eError; +} + +void InfoPageDestroy(PVRSRV_DATA *psData) +{ + if (psData->psInfoPageMemDesc) + { + if (psData->pui32InfoPage != NULL) + { + DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc); + psData->pui32InfoPage = NULL; + } + + DevmemFree(psData->psInfoPageMemDesc); + psData->psInfoPageMemDesc = NULL; + } + + if (psData->hInfoPageLock) + { + OSLockDestroy(psData->hInfoPageLock); + psData->hInfoPageLock = NULL; + } +} + +PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + + PVR_LOG_RETURN_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC" + " handle", PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Copy the PMR import handle back */ + *ppsPMR = psData->psInfoPagePMR; + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *ppsPMR) +{ + /* Nothing to do here as PMR is singleton */ + PVR_UNREFERENCED_PARAMETER(ppsPMR); + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/lists.c b/drivers/mcst/gpu-imgtec/services/server/common/lists.c new file mode 100644 index 000000000000..e8e7088a3296 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/lists.c @@ -0,0 +1,60 @@ +/*************************************************************************/ /*! +@File +@Title Linked list shared functions implementation. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implementation of the list iterators for types shared among + more than one file in the services code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "lists.h" + +/*=================================================================== + LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just + once are implemented locally). + ===================================================================*/ + +IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE) +IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE) diff --git a/drivers/mcst/gpu-imgtec/services/server/common/mmu_common.c b/drivers/mcst/gpu-imgtec/services/server/common/mmu_common.c new file mode 100644 index 000000000000..bfc22c87134f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/mmu_common.c @@ -0,0 +1,4349 @@ +/*************************************************************************/ /*! +@File +@Title Common MMU Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /***************************************************************************/ + +#include "devicemem_server_utils.h" + +/* Our own interface */ +#include "mmu_common.h" +#include "pvr_ricommon.h" + +#include "rgxmmudefs_km.h" +/* +Interfaces to other modules: + +Let's keep this graph up-to-date: + + +-----------+ + | devicemem | + +-----------+ + | + +============+ + | mmu_common | + +============+ + | + +-----------------+ + | | + +---------+ +----------+ + | pmr | | device | + +---------+ +----------+ + */ + +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "allocmem.h" +#if defined(PDUMP) +#include "pdump_km.h" +#include "pdump_physmem.h" +#endif +#include "pmr.h" +/* include/ */ +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "pvrsrv_error.h" +#include "pvrsrv.h" +#include "htbuffer.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "physmem_lma.h" +#endif + +#include "dllist.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#include "proc_stats.h" +#endif + +/* #define MMU_OBJECT_REFCOUNT_DEBUGING 1 */ +#if defined(MMU_OBJECT_REFCOUNT_DEBUGING) +#define MMU_OBJ_DBG(x) PVR_DPF(x) +#else +#define MMU_OBJ_DBG(x) +#endif + +/*! + * Refcounted structure that is shared between the context and + * the cleanup thread items. + * It is used to keep track of all cleanup items and whether the creating + * MMU context has been destroyed and therefore is not allowed to be + * accessed any more. + * + * The cleanup thread is used to defer the freeing of the page tables + * because we have to make sure that the MMU cache has been invalidated. + * If we don't take care of this the MMU might partially access cached + * and uncached tables which might lead to inconsistencies and in the + * worst case to MMU pending faults on random memory. + */ +typedef struct _MMU_CTX_CLEANUP_DATA_ +{ + /*! Refcount to know when this structure can be destroyed */ + ATOMIC_T iRef; + /*! Protect items in this structure, especially the refcount */ + POS_LOCK hCleanupLock; + /*! List of all cleanup items currently in flight */ + DLLIST_NODE sMMUCtxCleanupItemsHead; + /*! Was the MMU context destroyed and should not be accessed any more? */ + IMG_BOOL bMMUContextExists; +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /*! Associated OSid for this context */ + IMG_UINT32 ui32OSid; +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ +} MMU_CTX_CLEANUP_DATA; + + +/*! + * Structure holding one or more page tables that need to be + * freed after the MMU cache has been flushed which is signalled when + * the stored sync has a value that is <= the required value. + */ +typedef struct _MMU_CLEANUP_ITEM_ +{ + /*! Cleanup thread data */ + PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; + /*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */ + DLLIST_NODE sMMUMappingHead; + /*! Node of the cleanup item list for the context */ + DLLIST_NODE sMMUCtxCleanupItem; + /* Pointer to the cleanup meta data */ + MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData; + /* Sync to query if the MMU cache was flushed */ + PVRSRV_CLIENT_SYNC_PRIM *psSync; + /*! The update value of the sync to signal that the cache was flushed */ + IMG_UINT32 uiRequiredSyncVal; + /*! The device node needed to free the page tables */ + PVRSRV_DEVICE_NODE *psDevNode; +} MMU_CLEANUP_ITEM; + +/*! + All physical allocations and frees are relative to this context, so + we would get all the allocations of PCs, PDs, and PTs from the same + RA. + + We have one per MMU context in case we have mixed UMA/LMA devices + within the same system. + */ +typedef struct _MMU_PHYSMEM_CONTEXT_ +{ + /*! Associated MMU_CONTEXT */ + struct _MMU_CONTEXT_ *psMMUContext; + + /*! Parent device node */ + PVRSRV_DEVICE_NODE *psDevNode; + + /*! Refcount so we know when to free up the arena */ + IMG_UINT32 uiNumAllocations; + + /*! Arena from which physical memory is derived */ + RA_ARENA *psPhysMemRA; + /*! Arena name */ + IMG_CHAR *pszPhysMemRAName; + /*! Size of arena name string */ + size_t uiPhysMemRANameAllocSize; + + /*! Meta data for deferred cleanup */ + MMU_CTX_CLEANUP_DATA *psCleanupData; + /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */ + DLLIST_NODE sTmpMMUMappingHead; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32OSidReg; + IMG_BOOL bOSidAxiProt; +#endif + +} MMU_PHYSMEM_CONTEXT; + +/*! + Mapping structure for MMU memory allocation + */ +typedef struct _MMU_MEMORY_MAPPING_ +{ + /*! Physmem context to allocate from */ + MMU_PHYSMEM_CONTEXT *psContext; + /*! OS/system Handle for this allocation */ + PG_HANDLE sMemHandle; + /*! CPU virtual address of this allocation */ + void *pvCpuVAddr; + /*! Device physical address of this allocation */ + IMG_DEV_PHYADDR sDevPAddr; + /*! Size of this allocation */ + size_t uiSize; + /*! Number of current mappings of this allocation */ + IMG_UINT32 uiCpuVAddrRefCount; + /*! Node for the defer free list */ + DLLIST_NODE sMMUMappingItem; +} MMU_MEMORY_MAPPING; + +/*! + Memory descriptor for MMU objects. There can be more than one memory + descriptor per MMU memory allocation. + */ +typedef struct _MMU_MEMORY_DESC_ +{ + /* NB: bValid is set if this descriptor describes physical + memory. This allows "empty" descriptors to exist, such that we + can allocate them in batches. */ + /*! Does this MMU object have physical backing */ + IMG_BOOL bValid; + /*! Device Physical address of physical backing */ + IMG_DEV_PHYADDR sDevPAddr; + /*! CPU virtual address of physical backing */ + void *pvCpuVAddr; + /*! Mapping data for this MMU object */ + MMU_MEMORY_MAPPING *psMapping; + /*! Memdesc offset into the psMapping */ + IMG_UINT32 uiOffset; + /*! Size of the Memdesc */ + IMG_UINT32 uiSize; +} MMU_MEMORY_DESC; + +/*! + MMU levelx structure. This is generic and is used + for all levels (PC, PD, PT). + */ +typedef struct _MMU_Levelx_INFO_ +{ + /*! The Number of entries in this level */ + IMG_UINT32 ui32NumOfEntries; + + /*! Number of times this level has been reference. Note: For Level1 (PTE) + we still take/drop the reference when setting up the page tables rather + then at map/unmap time as this simplifies things */ + IMG_UINT32 ui32RefCount; + + /*! MemDesc for this level */ + MMU_MEMORY_DESC sMemDesc; + + /*! Array of infos for the next level. Must be last member in structure */ + struct _MMU_Levelx_INFO_ *apsNextLevel[1]; +} MMU_Levelx_INFO; + +/*! + MMU context structure + */ +struct _MMU_CONTEXT_ +{ + /*! Originating Connection */ + CONNECTION_DATA *psConnection; + + MMU_DEVICEATTRIBS *psDevAttrs; + + /*! For allocation and deallocation of the physical memory where + the pagetables live */ + struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx; + +#if defined(PDUMP) + /*! PDump context ID (required for PDump commands with virtual addresses) */ + IMG_UINT32 uiPDumpContextID; + + /*! The refcount of the PDump context ID */ + IMG_UINT32 ui32PDumpContextIDRefCount; +#endif + + /*! MMU cache invalidation flags (only used on Volcanic driver) */ + ATOMIC_T sCacheFlags; + + /*! Lock to ensure exclusive access when manipulating the MMU context or + * reading and using its content + */ + POS_LOCK hLock; + + /*! Base level info structure. Must be last member in structure */ + MMU_Levelx_INFO sBaseLevelInfo; + /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */ +}; + +static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR}; + +#if defined(DEBUG) +#include "log2.h" +#endif + + +/***************************************************************************** + * Utility functions * + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function _FreeMMUMapping + +@Description Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables + they represent. + +@Input psDevNode Device node + +@Input psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free + */ +/*****************************************************************************/ +static void +_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode, + PDLLIST_NODE psTmpMMUMappingHead) +{ + PDLLIST_NODE psNode, psNextNode; + + /* Free the current list unconditionally */ + dllist_foreach_node(psTmpMMUMappingHead, + psNode, + psNextNode) + { + MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode, + MMU_MEMORY_MAPPING, + sMMUMappingItem); + + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, &psMapping->sMemHandle); + dllist_remove_node(psNode); + OSFreeMem(psMapping); + } +} + +/*************************************************************************/ /*! +@Function _CleanupThread_FreeMMUMapping + +@Description Function to be executed by the cleanup thread to free + MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated. + + This function will request a MMU cache invalidate once and + retry to free the MMU_MEMORY_MAPPINGs until the invalidate + has been executed. + + If the memory context that created this cleanup item has been + destroyed in the meantime this function will directly free the + MMU_MEMORY_MAPPINGs without waiting for any MMU cache + invalidation. + +@Input pvData Cleanup data in form of a MMU_CLEANUP_ITEM + +@Return PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY + */ +/*****************************************************************************/ +static PVRSRV_ERROR +_CleanupThread_FreeMMUMapping(void* pvData) +{ + PVRSRV_ERROR eError; + MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *)pvData; + MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData; + PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode; + IMG_BOOL bFreeNow; + IMG_UINT32 uiSyncCurrent; + IMG_UINT32 uiSyncReq; + + OSLockAcquire(psMMUCtxCleanupData->hCleanupLock); + + /* Don't attempt to free anything when the context has been destroyed. + * Especially don't access any device specific structures any more!*/ + if (!psMMUCtxCleanupData->bMMUContextExists) + { + OSFreeMem(psCleanup); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_OK, e0); + } + + if (psCleanup->psSync == NULL) + { + /* Kick to invalidate the MMU caches and get sync info */ + eError = psDevNode->pfnMMUCacheInvalidateKick(psDevNode, + &psCleanup->uiRequiredSyncVal, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + OSLockRelease(psMMUCtxCleanupData->hCleanupLock); + return PVRSRV_ERROR_RETRY; + } + psCleanup->psSync = psDevNode->psMMUCacheSyncPrim; + } + + uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr); + uiSyncReq = psCleanup->uiRequiredSyncVal; + + /* Has the invalidate executed */ + bFreeNow = (uiSyncCurrent >= uiSyncReq) ? + /* ... with the counter wrapped around ... + * There can't be 3*1024*1024 transactions completed, so consider wrapped */ + (((uiSyncCurrent - uiSyncReq) > 0xF0000000UL)? IMG_FALSE : IMG_TRUE): + /* There can't be 3*1024*1024 transactions pending, so consider wrapped */ + (((uiSyncReq - uiSyncCurrent) > 0xF0000000UL)? IMG_TRUE : IMG_FALSE); + +#if defined(NO_HARDWARE) + /* In NOHW the syncs will never be updated so just free the tables */ + bFreeNow = IMG_TRUE; +#endif + /* If the Invalidate operation is not completed, check if the operation timed out */ + if (!bFreeNow) + { + /* If the time left for the completion of invalidate operation is + * within 500ms of time-out, consider the operation as timed out */ + if ((psCleanup->sCleanupThreadFn.ui32TimeEnd - psCleanup->sCleanupThreadFn.ui32TimeStart - 500) <= + (OSClockms() - psCleanup->sCleanupThreadFn.ui32TimeStart)) + { + /* Consider the operation is timed out */ + bFreeNow = IMG_TRUE; + } + } + + /* Free if the invalidate operation completed or the operation itself timed out */ + if (bFreeNow) + { + _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); + + dllist_remove_node(&psCleanup->sMMUCtxCleanupItem); + OSFreeMem(psCleanup); + + eError = PVRSRV_OK; + } + else + { + eError = PVRSRV_ERROR_RETRY; + } + +e0: + + /* If this cleanup task has been successfully executed we can + * decrease the context cleanup data refcount. Successfully + * means here that the MMU_MEMORY_MAPPINGs have been freed by + * either this cleanup task of when the MMU context has been + * destroyed. */ + if (eError == PVRSRV_OK) + { + OSLockRelease(psMMUCtxCleanupData->hCleanupLock); + + if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0) + { + OSLockDestroy(psMMUCtxCleanupData->hCleanupLock); + OSFreeMem(psMMUCtxCleanupData); + } + } + else + { + OSLockRelease(psMMUCtxCleanupData->hCleanupLock); + } + + + return eError; +} + +/*************************************************************************/ /*! +@Function _SetupCleanup_FreeMMUMapping + +@Description Setup a cleanup item for the cleanup thread that will + kick off a MMU invalidate request and free the associated + MMU_MEMORY_MAPPINGs when the invalidate was successful. + +@Input psPhysMemCtx The current MMU physmem context + */ +/*****************************************************************************/ +static void +_SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx) +{ + + MMU_CLEANUP_ITEM *psCleanupItem; + MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData; + PVRSRV_DEVICE_NODE *psDevNode = psPhysMemCtx->psDevNode; + + if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead)) + { + goto e0; + } + +#if !defined(SUPPORT_MMU_PENDING_FAULT_PROTECTION) + /* If users deactivated this we immediately free the page tables */ + goto e1; +#endif + + /* Don't defer the freeing if we are currently unloading the driver + * or if the sync has been destroyed */ + if (PVRSRVGetPVRSRVData()->bUnload || + psDevNode->psMMUCacheSyncPrim == NULL) + { + goto e1; + } + + /* Allocate a cleanup item */ + psCleanupItem = OSAllocMem(sizeof(*psCleanupItem)); + if (!psCleanupItem) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page table cleanup. " + "Freeing tables immediately", + __func__)); + goto e1; + } + + /* Set sync to NULL to indicate we did not interact with + * the FW yet. Kicking off an MMU cache invalidate should + * be done in the cleanup thread to not waste time here. */ + psCleanupItem->psSync = NULL; + psCleanupItem->uiRequiredSyncVal = 0; + psCleanupItem->psDevNode = psDevNode; + psCleanupItem->psMMUCtxCleanupData = psCleanupData; + + OSAtomicIncrement(&psCleanupData->iRef); + + /* Move the page tables to free to the cleanup item */ + dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead, + &psCleanupItem->sMMUMappingHead); + + /* Add the cleanup item itself to the context list */ + dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead, + &psCleanupItem->sMMUCtxCleanupItem); + + /* Setup the cleanup thread data and add the work item */ + psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping; + psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem; + psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; + CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn, + CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); + + PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn); + + return; + +e1: + /* Free the page tables now */ + _FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); +e0: + return; +} + +/*************************************************************************/ /*! +@Function _CalcPCEIdx + +@Description Calculate the page catalogue index + +@Input sDevVAddr Device virtual address + +@Input psDevVAddrConfig Configuration of the virtual address + +@Input bRoundUp Round up the index + +@Return The page catalogue index + */ +/*****************************************************************************/ +static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_BOOL bRoundUp) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 ui32RetVal; + + sTmpDevVAddr = sDevVAddr; + + if (bRoundUp) + { + sTmpDevVAddr.uiAddr--; + } + ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask) + >> psDevVAddrConfig->uiPCIndexShift); + + if (bRoundUp) + { + ui32RetVal++; + } + + return ui32RetVal; +} + + +/*************************************************************************/ /*! +@Function _CalcPDEIdx + +@Description Calculate the page directory index + +@Input sDevVAddr Device virtual address + +@Input psDevVAddrConfig Configuration of the virtual address + +@Input bRoundUp Round up the index + +@Return The page directory index + */ +/*****************************************************************************/ +static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_BOOL bRoundUp) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 ui32RetVal; + + sTmpDevVAddr = sDevVAddr; + + if (bRoundUp) + { + sTmpDevVAddr.uiAddr--; + } + ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask) + >> psDevVAddrConfig->uiPDIndexShift); + + if (bRoundUp) + { + ui32RetVal++; + } + + return ui32RetVal; +} + + +/*************************************************************************/ /*! +@Function _CalcPTEIdx + +@Description Calculate the page entry index + +@Input sDevVAddr Device virtual address + +@Input psDevVAddrConfig Configuration of the virtual address + +@Input bRoundUp Round up the index + +@Return The page entry index + */ +/*****************************************************************************/ +static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_BOOL bRoundUp) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 ui32RetVal; + + sTmpDevVAddr = sDevVAddr; + sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes; + if (bRoundUp) + { + sTmpDevVAddr.uiAddr--; + } + ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask) + >> psDevVAddrConfig->uiPTIndexShift); + + if (bRoundUp) + { + ui32RetVal++; + } + + return ui32RetVal; +} + +/***************************************************************************** + * MMU memory allocation/management functions (mem desc) * + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function _MMU_PhysMem_RAImportAlloc + +@Description Imports MMU Px memory into the RA. This is where the + actual allocation of physical memory happens. + +@Input hArenaHandle Handle that was passed in during the + creation of the RA + +@Input uiSize Size of the memory to import + +@Input uiFlags Flags that where passed in the allocation. + +@Output puiBase The address of where to insert this import + +@Output puiActualSize The actual size of the import + +@Output phPriv Handle which will be passed back when + this import is freed + +@Return PVRSRV_OK if import alloc was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle, + RA_LENGTH_T uiSize, + RA_FLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phPriv) +{ + MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psPhysMemCtx->psDevNode; + MMU_MEMORY_MAPPING *psMapping; + PVRSRV_ERROR eError; + IMG_UINT32 uiPid = 0; + + PVR_UNREFERENCED_PARAMETER(pszAnnotation); + PVR_UNREFERENCED_PARAMETER(uiFlags); + + PVR_ASSERT(psDevNode != NULL); + PVR_GOTO_IF_INVALID_PARAM(psDevNode, eError, e0); + + psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING)); + PVR_GOTO_IF_NOMEM(psMapping, eError, e0); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? + PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * Store the OSid in the PG_HANDLE.uiOSid field for use by the + * pfnDevPxFree() routine. + */ + psMapping->sMemHandle.uiOSid = psPhysMemCtx->ui32OSid; + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAllocGPV(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(uiSize), + &psMapping->sMemHandle, + &psMapping->sDevPAddr, + psPhysMemCtx->ui32OSid, + uiPid); +#else + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAlloc(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(uiSize), + &psMapping->sMemHandle, + &psMapping->sDevPAddr, + uiPid); +#endif + if (eError != PVRSRV_OK) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, + OSGetCurrentClientProcessIDKM()); +#endif + goto e1; + } + + psMapping->psContext = psPhysMemCtx; + psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize); + + psMapping->uiCpuVAddrRefCount = 0; + + *phPriv = (RA_PERISPAN_HANDLE) psMapping; + + /* Note: This assumes this memory never gets paged out */ + *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr; + *puiActualSize = uiSize; + + return PVRSRV_OK; + +e1: + OSFreeMem(psMapping); +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function _MMU_PhysMem_RAImportFree + +@Description Imports MMU Px memory into the RA. This is where the + actual free of physical memory happens. + +@Input hArenaHandle Handle that was passed in during the + creation of the RA + +@Input puiBase The address of where to insert this import + +@Output phPriv Private data that the import alloc provided + +@Return None + */ +/*****************************************************************************/ +static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hPriv) +{ + MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *)hPriv; + MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; + + PVR_UNREFERENCED_PARAMETER(uiBase); + + /* Check we have dropped all CPU mappings */ + PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0); + + /* Add mapping to defer free list */ + psMapping->psContext = NULL; + dllist_add_to_tail(&psPhysMemCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem); +} + +/*************************************************************************/ /*! +@Function _MMU_PhysMemAlloc + +@Description Allocates physical memory for MMU objects + +@Input psPhysMemCtx Physmem context to do the allocation from + +@Output psMemDesc Allocation description + +@Input uiBytes Size of the allocation in bytes + +@Input uiAlignment Alignment requirement of this allocation + +@Return PVRSRV_OK if allocation was successful + */ +/*****************************************************************************/ + +static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, + MMU_MEMORY_DESC *psMemDesc, + size_t uiBytes, + size_t uiAlignment) +{ + PVRSRV_ERROR eError; + RA_BASE_T uiPhysAddr; + + PVR_RETURN_IF_INVALID_PARAM(psMemDesc); + PVR_RETURN_IF_INVALID_PARAM(!psMemDesc->bValid); + + eError = RA_Alloc(psPhysMemCtx->psPhysMemRA, + uiBytes, + RA_NO_IMPORT_MULTIPLIER, + 0, /* flags */ + uiAlignment, + "", + &uiPhysAddr, + NULL, + (RA_PERISPAN_HANDLE *)&psMemDesc->psMapping); + + PVR_LOG_RETURN_IF_ERROR(eError, "RA_Alloc"); + + psMemDesc->bValid = IMG_TRUE; + psMemDesc->pvCpuVAddr = NULL; + psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr; + + if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0) + { + eError = psPhysMemCtx->psDevNode->sDevMMUPxSetup.pfnDevPxMap(psPhysMemCtx->psDevNode, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->psMapping->uiSize, + &psMemDesc->psMapping->sDevPAddr, + &psMemDesc->psMapping->pvCpuVAddr); + if (eError != PVRSRV_OK) + { + RA_Free(psPhysMemCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr); + return eError; + } + } + + psMemDesc->psMapping->uiCpuVAddrRefCount++; + psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr); + psMemDesc->pvCpuVAddr = (IMG_UINT8 *)psMemDesc->psMapping->pvCpuVAddr + psMemDesc->uiOffset; + psMemDesc->uiSize = uiBytes; + PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function _MMU_PhysMemFree + +@Description Allocates physical memory for MMU objects + +@Input psPhysMemCtx Physmem context to do the free on + +@Input psMemDesc Allocation description + +@Return None + */ +/*****************************************************************************/ +static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, + MMU_MEMORY_DESC *psMemDesc) +{ + RA_BASE_T uiPhysAddr; + + PVR_ASSERT(psMemDesc->bValid); + + if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0) + { + psPhysMemCtx->psDevNode->sDevMMUPxSetup.pfnDevPxUnMap(psPhysMemCtx->psDevNode, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->psMapping->pvCpuVAddr); + } + + psMemDesc->pvCpuVAddr = NULL; + + uiPhysAddr = psMemDesc->sDevPAddr.uiAddr; + RA_Free(psPhysMemCtx->psPhysMemRA, uiPhysAddr); + + psMemDesc->bValid = IMG_FALSE; +} + + +/***************************************************************************** + * MMU object allocation/management functions * + *****************************************************************************/ + +static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + MMU_PROTFLAGS_T *uiMMUProtFlags, + MMU_CONTEXT *psMMUContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 uiGPUCacheMode; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Do flag conversion between devmem flags and MMU generic flags */ + if (bInvalidate == IMG_FALSE) + { + *uiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) + >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) + << MMU_PROTFLAGS_DEVICE_OFFSET; + + if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + } + if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE; + } + + eError = DevmemDeviceCacheMode(psDevNode, uiMappingFlags, &uiGPUCacheMode); + PVR_RETURN_IF_ERROR(eError); + + switch (uiGPUCacheMode) + { + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: + case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE: + break; + case PVRSRV_MEMALLOCFLAG_GPU_CACHED: + *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Wrong parameters", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (DevmemDeviceCacheCoherency(psDevNode, uiMappingFlags)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT; + } + /* Only compile if RGX_FEATURE_MIPS_BIT_MASK is defined to avoid compilation + * errors on volcanic cores. + */ + #if defined(SUPPORT_RGX) && defined(RGX_FEATURE_MIPS_BIT_MASK) + if ((psDevNode->pfnCheckDeviceFeature) && + PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) + { + /* If we are allocating on the MMU of the firmware processor, the + * cached/uncached attributes must depend on the FIRMWARE_CACHED + * allocation flag. + */ + if (psMMUContext->psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) + { + if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; + } + else + { + *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED; + + } + *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT; + } + } +#endif + } + else + { + *uiMMUProtFlags |= MMU_PROTFLAGS_INVALID; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function _PxMemAlloc + +@Description Allocates physical memory for MMU objects, initialises + and PDumps it. + +@Input psMMUContext MMU context + +@Input uiNumEntries Number of entries to allocate + +@Input psConfig MMU Px config + +@Input eMMULevel MMU level that that allocation is for + +@Output psMemDesc Description of allocation + +@Return PVRSRV_OK if allocation was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiNumEntries, + const MMU_PxE_CONFIG *psConfig, + MMU_LEVEL eMMULevel, + MMU_MEMORY_DESC *psMemDesc, + IMG_UINT32 uiLog2Align) +{ + PVRSRV_ERROR eError; + size_t uiBytes; + size_t uiAlign; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + PVR_ASSERT(psConfig->uiBytesPerEntry != 0); + + uiBytes = uiNumEntries * psConfig->uiBytesPerEntry; + /* We need here the alignment of the previous level because that is the entry for we generate here */ + uiAlign = 1 << uiLog2Align; + + /* + * If the hardware specifies an alignment requirement for a page table then + * it also requires that all memory up to the next aligned address is + * zeroed. + * + * Failing to do this can result in uninitialised data outside of the actual + * page table range being read by the MMU and treated as valid, e.g. the + * pending flag. + * + * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16 + * and 8 bytes respectively but an alignment requirement of 64 bytes each. + */ + uiBytes = PVR_ALIGN(uiBytes, uiAlign); + + /* allocate the object */ + eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx, + psMemDesc, uiBytes, uiAlign); + if (eError != PVRSRV_OK) + { + PVR_LOG_GOTO_WITH_ERROR("_MMU_PhysMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); + } + + /* + Clear the object + Note: if any MMUs are cleared with non-zero values then will need a + custom clear function + Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is + unlikely + */ + OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes); + + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->uiOffset, + psMemDesc->uiSize); + PVR_GOTO_IF_ERROR(eError, e1); + +#if defined(PDUMP) + PDUMPCOMMENT("Alloc MMU object"); + + PDumpMMUMalloc(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + eMMULevel, + &psMemDesc->sDevPAddr, + uiBytes, + uiAlign, + psMMUContext->psDevAttrs->eMMUType); + + PDumpMMUDumpPxEntries(eMMULevel, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psMemDesc->pvCpuVAddr, + psMemDesc->sDevPAddr, + 0, + uiNumEntries, + NULL, NULL, 0, /* pdump symbolic info is irrelevant here */ + psConfig->uiBytesPerEntry, + uiLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif + + return PVRSRV_OK; +e1: + _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, + psMemDesc); +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*************************************************************************/ /*! +@Function _PxMemFree + +@Description Frees physical memory for MMU objects, de-initialises + and PDumps it. + +@Input psMemDesc Description of allocation + +@Return PVRSRV_OK if allocation was successful + */ +/*****************************************************************************/ + +static void _PxMemFree(MMU_CONTEXT *psMMUContext, + MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel) +{ +#if defined(MMU_CLEARMEM_ON_FREE) + /* + Clear the MMU object + Note: if any MMUs are cleared with non-zero values then will need a + custom clear function + Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is + unlikely + */ + OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes); + +#if defined(PDUMP) + PDUMPCOMMENT("Clear MMU object before freeing it"); +#endif +#endif/* MMU_CLEARMEM_ON_FREE */ + +#if defined(PDUMP) + PDUMPCOMMENT("Free MMU object"); + PDumpMMUFree(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + eMMULevel, + &psMemDesc->sDevPAddr, + psMMUContext->psDevAttrs->eMMUType); +#else + PVR_UNREFERENCED_PARAMETER(eMMULevel); +#endif + /* free the PC */ + _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc); +} + +static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 uiIndex, + const MMU_PxE_CONFIG *psConfig, + const IMG_DEV_PHYADDR *psDevPAddr, + IMG_BOOL bUnmap, +#if defined(PDUMP) + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, +#endif + IMG_UINT64 uiProtFlags) +{ + MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; + IMG_UINT64 ui64PxE64; + IMG_UINT64 uiAddr = psDevPAddr->uiAddr; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + if (psDevNode->pfnValidateOrTweakPhysAddrs) + { + PVRSRV_ERROR eErr = psDevNode->pfnValidateOrTweakPhysAddrs(psDevNode, + psMMUContext->psDevAttrs, + &uiAddr); + /* return if error */ + PVR_LOG_RETURN_IF_ERROR(eErr, "_SetupPTE"); + } + + /* Calculate Entry */ + ui64PxE64 = uiAddr /* Calculate the offset to that base */ + >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */ + << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ + & psConfig->uiAddrMask; /* Delete unused bits */ + ui64PxE64 |= uiProtFlags; + + /* Set the entry */ + if (psConfig->uiBytesPerEntry == 8) + { + IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + pui64Px[uiIndex] = ui64PxE64; + } + else if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + /* assert that the result fits into 32 bits before writing + it into the 32-bit array with a cast */ + PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); + + pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; + } + else + { + return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; + } + + + /* Log modification */ + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiIndex, MMU_LEVEL_1, + HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), + !bUnmap); + +#if defined(PDUMP) + PDumpMMUDumpPxEntries(MMU_LEVEL_1, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psMemDesc->pvCpuVAddr, + psMemDesc->sDevPAddr, + uiIndex, + 1, + pszMemspaceName, + pszSymbolicAddr, + uiSymbolicAddrOffset, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif /*PDUMP*/ + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function _SetupPxE + +@Description Sets up an entry of an MMU object to point to the + provided address + +@Input psMMUContext MMU context to operate on + +@Input psLevel Level info for MMU object + +@Input uiIndex Index into the MMU object to setup + +@Input psConfig MMU Px config + +@Input eMMULevel Level of MMU object + +@Input psDevPAddr Address to setup the MMU object to point to + +@Input pszMemspaceName Name of the PDump memory space that the entry + will point to + +@Input pszSymbolicAddr PDump symbolic address that the entry will + point to + +@Input uiProtFlags MMU protection flags + +@Return PVRSRV_OK if the setup was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 uiIndex, + const MMU_PxE_CONFIG *psConfig, + MMU_LEVEL eMMULevel, + const IMG_DEV_PHYADDR *psDevPAddr, +#if defined(PDUMP) + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, +#endif + MMU_PROTFLAGS_T uiProtFlags, + IMG_UINT32 uiLog2DataPageSize) +{ + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; + + IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32); + IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32); + + if (!psDevPAddr) + { + /* Invalidate entry */ + if (~uiProtFlags & MMU_PROTFLAGS_INVALID) + { + PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry")); + uiProtFlags |= MMU_PROTFLAGS_INVALID; + } + psDevPAddr = &gsBadDevPhyAddr; + } + else + { + if (uiProtFlags & MMU_PROTFLAGS_INVALID) + { + PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry")); + uiProtFlags |= MMU_PROTFLAGS_INVALID; + } + } + + switch (eMMULevel) + { + case MMU_LEVEL_3: + pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4; + pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8; + break; + + case MMU_LEVEL_2: + pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4; + pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8; + break; + + case MMU_LEVEL_1: + pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4; + pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* How big is a PxE in bytes? */ + /* Filling the actual Px entry with an address */ + switch (psConfig->uiBytesPerEntry) + { + case 4: + { + IMG_UINT32 *pui32Px; + IMG_UINT64 ui64PxE64; + + pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */ + >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ + << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ + & psConfig->uiAddrMask; /* Delete unused higher bits */ + + ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags); + /* assert that the result fits into 32 bits before writing + it into the 32-bit array with a cast */ + PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); + + /* We should never invalidate an invalid page */ + if (uiProtFlags & MMU_PROTFLAGS_INVALID) + { + PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64); + } + pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiIndex, eMMULevel, + HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), + (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); + break; + } + case 8: + { + IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + pui64Px[uiIndex] = psDevPAddr->uiAddr /* Calculate the offset to that base */ + >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ + << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ + & psConfig->uiAddrMask; /* Delete unused higher bits */ + pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiIndex, eMMULevel, + HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]), + (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d", + __func__, psConfig->uiBytesPerEntry, eMMULevel)); + + return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; + } + +#if defined(PDUMP) + PDumpMMUDumpPxEntries(eMMULevel, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psMemDesc->pvCpuVAddr, + psMemDesc->sDevPAddr, + uiIndex, + 1, + pszMemspaceName, + pszSymbolicAddr, + uiSymbolicAddrOffset, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif + + psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext, + eMMULevel, + uiProtFlags & MMU_PROTFLAGS_INVALID); + + return PVRSRV_OK; +} + +/***************************************************************************** + * MMU host control functions (Level Info) * + *****************************************************************************/ + + +/*************************************************************************/ /*! +@Function _MMU_FreeLevel + +@Description Recursively frees the specified range of Px entries. If any + level has its last reference dropped then the MMU object + memory and the MMU_Levelx_Info will be freed. + + At each level we might be crossing a boundary from one Px to + another. The values for auiStartArray should be by used for + the first call into each level and the values in auiEndArray + should only be used in the last call for each level. + In order to determine if this is the first/last call we pass + in bFirst and bLast. + When one level calls down to the next only if bFirst/bLast is set + and it's the first/last iteration of the loop at its level will + bFirst/bLast set for the next recursion. + This means that each iteration has the knowledge of the previous + level which is required. + +@Input psMMUContext MMU context to operate on + +@Input psLevel Level info on which to free the + specified range + +@Input auiStartArray Array of start indexes (one for each level) + +@Input auiEndArray Array of end indexes (one for each level) + +@Input auiEntriesPerPxArray Array of number of entries for the Px + (one for each level) + +@Input apsConfig Array of PxE configs (one for each level) + +@Input aeMMULevel Array of MMU levels (one for each level) + +@Input pui32CurrentLevel Pointer to a variable which is set to our + current level + +@Input uiStartIndex Start index of the range to free + +@Input uiEndIndex End index of the range to free + +@Input bFirst This is the first call for this level + +@Input bLast This is the last call for this level + +@Return IMG_TRUE if the last reference to psLevel was dropped + */ +/*****************************************************************************/ +static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 auiStartArray[], + IMG_UINT32 auiEndArray[], + IMG_UINT32 auiEntriesPerPxArray[], + const MMU_PxE_CONFIG *apsConfig[], + MMU_LEVEL aeMMULevel[], + IMG_UINT32 *pui32CurrentLevel, + IMG_UINT32 uiStartIndex, + IMG_UINT32 uiEndIndex, + IMG_BOOL bFirst, + IMG_BOOL bLast, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT32 uiThisLevel = *pui32CurrentLevel; + const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; + IMG_UINT32 i; + IMG_BOOL bFreed = IMG_FALSE; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Sanity check */ + PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); + PVR_ASSERT(psLevel != NULL); + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d", + aeMMULevel[uiThisLevel], uiStartIndex, + uiEndIndex, psLevel->ui32RefCount)); + + for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++) + { + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i]; + IMG_UINT32 uiNextStartIndex; + IMG_UINT32 uiNextEndIndex; + IMG_BOOL bNextFirst; + IMG_BOOL bNextLast; + + /* If we're crossing a Px then the start index changes */ + if (bFirst && (i == uiStartIndex)) + { + uiNextStartIndex = auiStartArray[uiThisLevel + 1]; + bNextFirst = IMG_TRUE; + } + else + { + uiNextStartIndex = 0; + bNextFirst = IMG_FALSE; + } + + /* If we're crossing a Px then the end index changes */ + if (bLast && (i == (uiEndIndex - 1))) + { + uiNextEndIndex = auiEndArray[uiThisLevel + 1]; + bNextLast = IMG_TRUE; + } + else + { + uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; + bNextLast = IMG_FALSE; + } + + /* Recurse into the next level */ + (*pui32CurrentLevel)++; + if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray, + auiEndArray, auiEntriesPerPxArray, + apsConfig, aeMMULevel, pui32CurrentLevel, + uiNextStartIndex, uiNextEndIndex, + bNextFirst, bNextLast, uiLog2DataPageSize)) + { + PVRSRV_ERROR eError; + + /* Un-wire the entry */ + eError = _SetupPxE(psMMUContext, + psLevel, + i, + psConfig, + aeMMULevel[uiThisLevel], + NULL, +#if defined(PDUMP) + NULL, /* Only required for data page */ + NULL, /* Only required for data page */ + 0, /* Only required for data page */ +#endif + MMU_PROTFLAGS_INVALID, + uiLog2DataPageSize); + + PVR_ASSERT(eError == PVRSRV_OK); + + /* Free table of the level below, pointed to by this table entry. + * We don't destroy the table inside the above _MMU_FreeLevel call because we + * first have to set the table entry of the level above to invalid. */ + _PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]); + OSFreeMem(psNextLevel); + + /* The level below us is empty, drop the refcount and clear the pointer */ + psLevel->ui32RefCount--; + psLevel->apsNextLevel[i] = NULL; + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + (*pui32CurrentLevel)--; + } + else + { + psLevel->ui32RefCount--; + } + + /* + Free this level if it is no longer referenced, unless it's the base + level in which case it's part of the MMU context and should be freed + when the MMU context is freed + */ + if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo)) + { + bFreed = IMG_TRUE; + } + } + + /* Level one flushing is done when we actually write the table entries */ + if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL)) + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); + } + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d", + aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1)); + + return bFreed; +} + +/*************************************************************************/ /*! +@Function _MMU_AllocLevel + +@Description Recursively allocates the specified range of Px entries. If any + level has its last reference dropped then the MMU object + memory and the MMU_Levelx_Info will be freed. + + At each level we might be crossing a boundary from one Px to + another. The values for auiStartArray should be by used for + the first call into each level and the values in auiEndArray + should only be used in the last call for each level. + In order to determine if this is the first/last call we pass + in bFirst and bLast. + When one level calls down to the next only if bFirst/bLast is set + and it's the first/last iteration of the loop at its level will + bFirst/bLast set for the next recursion. + This means that each iteration has the knowledge of the previous + level which is required. + +@Input psMMUContext MMU context to operate on + +@Input psLevel Level info on which to free the + specified range + +@Input auiStartArray Array of start indexes (one for each level) + +@Input auiEndArray Array of end indexes (one for each level) + +@Input auiEntriesPerPxArray Array of number of entries for the Px + (one for each level) + +@Input apsConfig Array of PxE configs (one for each level) + +@Input aeMMULevel Array of MMU levels (one for each level) + +@Input pui32CurrentLevel Pointer to a variable which is set to our + current level + +@Input uiStartIndex Start index of the range to free + +@Input uiEndIndex End index of the range to free + +@Input bFirst This is the first call for this level + +@Input bLast This is the last call for this level + +@Return IMG_TRUE if the last reference to psLevel was dropped + */ +/*****************************************************************************/ +static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 auiStartArray[], + IMG_UINT32 auiEndArray[], + IMG_UINT32 auiEntriesPerPxArray[], + const MMU_PxE_CONFIG *apsConfig[], + MMU_LEVEL aeMMULevel[], + IMG_UINT32 *pui32CurrentLevel, + IMG_UINT32 uiStartIndex, + IMG_UINT32 uiEndIndex, + IMG_BOOL bFirst, + IMG_BOOL bLast, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */ + const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */ + PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; + IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */ + IMG_UINT32 i; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Sanity check */ + PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d", + aeMMULevel[uiThisLevel], uiStartIndex, + uiEndIndex, psLevel->ui32RefCount)); + + /* Go from uiStartIndex to uiEndIndex through the Px */ + for (i = uiStartIndex;i < uiEndIndex;i++) + { + /* Only try an allocation if this is not the last level */ + /*Because a PT allocation is already done while setting the entry in PD */ + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + IMG_UINT32 uiNextStartIndex; + IMG_UINT32 uiNextEndIndex; + IMG_BOOL bNextFirst; + IMG_BOOL bNextLast; + + /* If there is already a next Px level existing, do not allocate it */ + if (!psLevel->apsNextLevel[i]) + { + MMU_Levelx_INFO *psNextLevel; + IMG_UINT32 ui32AllocSize; + IMG_UINT32 uiNextEntries; + + /* Allocate and setup the next level */ + uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1]; + ui32AllocSize = sizeof(MMU_Levelx_INFO); + if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1) + { + ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1); + } + psNextLevel = OSAllocZMem(ui32AllocSize); + if (psNextLevel == NULL) + { + uiAllocState = 0; + goto e0; + } + + /* Hook in this level for next time */ + psLevel->apsNextLevel[i] = psNextLevel; + + psNextLevel->ui32NumOfEntries = uiNextEntries; + psNextLevel->ui32RefCount = 0; + /* Allocate Px memory for a sub level*/ + eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1], + aeMMULevel[uiThisLevel + 1], + &psNextLevel->sMemDesc, + psConfig->uiAddrLog2Align); + if (eError != PVRSRV_OK) + { + uiAllocState = 1; + goto e0; + } + + /* Wire up the entry */ + eError = _SetupPxE(psMMUContext, + psLevel, + i, + psConfig, + aeMMULevel[uiThisLevel], + &psNextLevel->sMemDesc.sDevPAddr, +#if defined(PDUMP) + NULL, /* Only required for data page */ + NULL, /* Only required for data page */ + 0, /* Only required for data page */ +#endif + 0, + uiLog2DataPageSize); + + if (eError != PVRSRV_OK) + { + uiAllocState = 2; + goto e0; + } + + psLevel->ui32RefCount++; + } + + /* If we're crossing a Px then the start index changes */ + if (bFirst && (i == uiStartIndex)) + { + uiNextStartIndex = auiStartArray[uiThisLevel + 1]; + bNextFirst = IMG_TRUE; + } + else + { + uiNextStartIndex = 0; + bNextFirst = IMG_FALSE; + } + + /* If we're crossing a Px then the end index changes */ + if (bLast && (i == (uiEndIndex - 1))) + { + uiNextEndIndex = auiEndArray[uiThisLevel + 1]; + bNextLast = IMG_TRUE; + } + else + { + uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; + bNextLast = IMG_FALSE; + } + + /* Recurse into the next level */ + (*pui32CurrentLevel)++; + eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i], + auiStartArray, + auiEndArray, + auiEntriesPerPxArray, + apsConfig, + aeMMULevel, + pui32CurrentLevel, + uiNextStartIndex, + uiNextEndIndex, + bNextFirst, + bNextLast, + uiLog2DataPageSize); + (*pui32CurrentLevel)--; + if (eError != PVRSRV_OK) + { + uiAllocState = 2; + goto e0; + } + } + else + { + /* All we need to do for level 1 is bump the refcount */ + psLevel->ui32RefCount++; + } + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + + /* Level one flushing is done when we actually write the table entries */ + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e0); + } + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d", + aeMMULevel[uiThisLevel], psLevel->ui32RefCount)); + return PVRSRV_OK; + +e0: + /* Sanity check that we've not come down this route unexpectedly */ + PVR_ASSERT(uiAllocState!=99); + PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d" + ,eError, aeMMULevel[uiThisLevel], uiAllocState)); + + /* The start value of index variable i is not initialised on purpose. + * This clean-up loop deinitialises what was already initialised in + * reverse order, so the i index already has the correct value. + */ + for (/* i already set */; i>= uiStartIndex && i< uiEndIndex; i--) + { + switch (uiAllocState) + { + IMG_UINT32 uiNextStartIndex; + IMG_UINT32 uiNextEndIndex; + IMG_BOOL bNextFirst; + IMG_BOOL bNextLast; + + case 3: + /* If we're crossing a Px then the start index changes */ + if (bFirst && (i == uiStartIndex)) + { + uiNextStartIndex = auiStartArray[uiThisLevel + 1]; + bNextFirst = IMG_TRUE; + } + else + { + uiNextStartIndex = 0; + bNextFirst = IMG_FALSE; + } + + /* If we're crossing a Px then the end index changes */ + if (bLast && (i == (uiEndIndex - 1))) + { + uiNextEndIndex = auiEndArray[uiThisLevel + 1]; + bNextLast = IMG_TRUE; + } + else + { + uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; + bNextLast = IMG_FALSE; + } + + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + (*pui32CurrentLevel)++; + if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i], + auiStartArray, auiEndArray, + auiEntriesPerPxArray, apsConfig, + aeMMULevel, pui32CurrentLevel, + uiNextStartIndex, uiNextEndIndex, + bNextFirst, bNextLast, uiLog2DataPageSize)) + { + psLevel->ui32RefCount--; + psLevel->apsNextLevel[i] = NULL; + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + (*pui32CurrentLevel)--; + } + else + { + /* We should never come down this path, but it's here + for completeness */ + psLevel->ui32RefCount--; + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + + __fallthrough; + case 2: + if (psLevel->apsNextLevel[i] != NULL && + psLevel->apsNextLevel[i]->ui32RefCount == 0) + { + _PxMemFree(psMMUContext, &psLevel->sMemDesc, + aeMMULevel[uiThisLevel]); + } + + __fallthrough; + case 1: + if (psLevel->apsNextLevel[i] != NULL && + psLevel->apsNextLevel[i]->ui32RefCount == 0) + { + OSFreeMem(psLevel->apsNextLevel[i]); + psLevel->apsNextLevel[i] = NULL; + } + + __fallthrough; + case 0: + uiAllocState = 3; + break; + } + } + return eError; +} + +/***************************************************************************** + * MMU page table functions * + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function _MMU_GetLevelData + +@Description Get the all the level data and calculates the indexes for the + specified address range + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrStart Start device virtual address + +@Input sDevVAddrEnd End device virtual address + +@Input uiLog2DataPageSize Log2 of the page size to use + +@Input auiStartArray Array of start indexes (one for each level) + +@Input auiEndArray Array of end indexes (one for each level) + +@Input uiEntriesPerPxArray Array of number of entries for the Px + (one for each level) + +@Input apsConfig Array of PxE configs (one for each level) + +@Input aeMMULevel Array of MMU levels (one for each level) + +@Input ppsMMUDevVAddrConfig Device virtual address config + +@Input phPriv Private data of page size config + +@Return IMG_TRUE if the last reference to psLevel was dropped + */ +/*****************************************************************************/ +static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT32 uiLog2DataPageSize, + IMG_UINT32 auiStartArray[], + IMG_UINT32 auiEndArray[], + IMG_UINT32 auiEntriesPerPx[], + const MMU_PxE_CONFIG *apsConfig[], + MMU_LEVEL aeMMULevel[], + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv) +{ + const MMU_PxE_CONFIG *psMMUPDEConfig; + const MMU_PxE_CONFIG *psMMUPTEConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + PVRSRV_ERROR eError; + IMG_UINT32 i = 0; + + eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, + &psMMUPDEConfig, + &psMMUPTEConfig, + ppsMMUDevVAddrConfig, + phPriv); + PVR_ASSERT(eError == PVRSRV_OK); + + psDevVAddrConfig = *ppsMMUDevVAddrConfig; + + if (psDevVAddrConfig->uiPCIndexMask != 0) + { + auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); + auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); + auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC; + apsConfig[i] = psDevAttrs->psBaseConfig; + aeMMULevel[i] = MMU_LEVEL_3; + i++; + } + + if (psDevVAddrConfig->uiPDIndexMask != 0) + { + auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); + auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); + auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD; + if (i == 0) + { + apsConfig[i] = psDevAttrs->psBaseConfig; + } + else + { + apsConfig[i] = psMMUPDEConfig; + } + aeMMULevel[i] = MMU_LEVEL_2; + i++; + } + + /* + There is always a PTE entry so we have a slightly different behaviour than above. + E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there + is a PT with one entry. + + */ + auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); + if (psDevVAddrConfig->uiPTIndexMask !=0) + { + auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); + } + else + { + /* + If the PTE mask is zero it means there is only 1 PTE and thus, as an + an exclusive bound, the end array index is equal to the start index + 1. + */ + + auiEndArray[i] = auiStartArray[i] + 1; + } + + auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT; + + if (i == 0) + { + apsConfig[i] = psDevAttrs->psBaseConfig; + } + else + { + apsConfig[i] = psMMUPTEConfig; + } + aeMMULevel[i] = MMU_LEVEL_1; +} + +static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv) +{ + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); +} + +/*************************************************************************/ /*! +@Function _AllocPageTables + +@Description Allocate page tables and any higher level MMU objects required + for the specified virtual range + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrStart Start device virtual address + +@Input sDevVAddrEnd End device virtual address + +@Input uiLog2DataPageSize Page size of the data pages + +@Return PVRSRV_OK if the allocation was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR +_AllocPageTables(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT32 uiLog2DataPageSize) +{ + PVRSRV_ERROR eError; + IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; + MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; + const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_HANDLE hPriv; + IMG_UINT32 ui32CurrentLevel = 0; + + PVR_DPF((PVR_DBG_ALLOC, + "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, + sDevVAddrStart.uiAddr, + sDevVAddrEnd.uiAddr + )); + +#if defined(PDUMP) + PDUMPCOMMENT("Allocating page tables for %"IMG_UINT64_FMTSPEC" bytes virtual range: " + IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, + (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr, + (IMG_UINT64)sDevVAddrStart.uiAddr, + (IMG_UINT64)sDevVAddrEnd.uiAddr); +#endif + + _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, + (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray, + auiEntriesPerPx, apsConfig, aeMMULevel, + &psDevVAddrConfig, &hPriv); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC, + HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), + HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); + + eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, + auiStartArray, auiEndArray, auiEntriesPerPx, + apsConfig, aeMMULevel, &ui32CurrentLevel, + auiStartArray[0], auiEndArray[0], + IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); + + _MMU_PutLevelData(psMMUContext, hPriv); + + return eError; +} + +/*************************************************************************/ /*! +@Function _FreePageTables + +@Description Free page tables and any higher level MMU objects at are no + longer referenced for the specified virtual range. + This will fill the temporary free list of the MMU context which + needs cleanup after the call. + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrStart Start device virtual address + +@Input sDevVAddrEnd End device virtual address + +@Input uiLog2DataPageSize Page size of the data pages + +@Return None + */ +/*****************************************************************************/ +static void _FreePageTables(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; + MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; + const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_UINT32 ui32CurrentLevel = 0; + IMG_HANDLE hPriv; + + PVR_DPF((PVR_DBG_ALLOC, + "_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, + sDevVAddrStart.uiAddr, + sDevVAddrEnd.uiAddr + )); + + _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, + uiLog2DataPageSize, auiStartArray, auiEndArray, + auiEntriesPerPx, apsConfig, aeMMULevel, + &psDevVAddrConfig, &hPriv); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE, + HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), + HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); + + /* ignoring return code, in this case there should be no references + * to the level anymore, and at this stage there is nothing to do with + * the return status */ + (void) _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, + auiStartArray, auiEndArray, auiEntriesPerPx, + apsConfig, aeMMULevel, &ui32CurrentLevel, + auiStartArray[0], auiEndArray[0], + IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); + + _MMU_PutLevelData(psMMUContext, hPriv); +} + + +/*************************************************************************/ /*! +@Function _MMU_GetPTInfo + +@Description Get the PT level information and PT entry index for the specified + virtual address + +@Input psMMUContext MMU context to operate on + +@Input psDevVAddr Device virtual address to get the PTE info + from. + +@Input psDevVAddrConfig The current virtual address config obtained + by another function call before. + +@Output psLevel Level info of the PT + +@Output pui32PTEIndex Index into the PT the address corresponds to + +@Return None + */ +/*****************************************************************************/ +static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + MMU_Levelx_INFO **psLevel, + IMG_UINT32 *pui32PTEIndex) +{ + MMU_Levelx_INFO *psLocalLevel = NULL; + MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->eTopLevel; + IMG_UINT32 uiPCEIndex; + IMG_UINT32 uiPDEIndex; + + if ((eMMULevel <= MMU_LEVEL_0) || (eMMULevel >= MMU_LEVEL_LAST)) + { + PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level")); + psLevel = NULL; + return; + } + + for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) + { + if (eMMULevel == MMU_LEVEL_3) + { + /* find the page directory containing the PCE */ + uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig, + IMG_FALSE); + psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex]; + } + + if (eMMULevel == MMU_LEVEL_2) + { + /* find the page table containing the PDE */ + uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig, + IMG_FALSE); + if (psLocalLevel != NULL) + { + psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex]; + } + else + { + psLocalLevel = + psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex]; + } + } + + if (eMMULevel == MMU_LEVEL_1) + { + /* find PTE index into page table */ + *pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig, + IMG_FALSE); + if (psLocalLevel == NULL) + { + psLocalLevel = &psMMUContext->sBaseLevelInfo; + } + } + } + *psLevel = psLocalLevel; +} + +/*************************************************************************/ /*! +@Function _MMU_GetPTConfig + +@Description Get the level config. Call _MMU_PutPTConfig after use! + +@Input psMMUContext MMU context to operate on + +@Input uiLog2DataPageSize Log 2 of the page size + +@Output ppsConfig Config of the PTE + +@Output phPriv Private data handle to be passed back + when the info is put + +@Output ppsDevVAddrConfig Config of the device virtual addresses + +@Return None + */ +/*****************************************************************************/ +static INLINE void _MMU_GetPTConfig(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsConfig, + IMG_HANDLE *phPriv, + const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig) +{ + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + const MMU_PxE_CONFIG *psPDEConfig; + const MMU_PxE_CONFIG *psPTEConfig; + + if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, + &psPDEConfig, + &psPTEConfig, + &psDevVAddrConfig, + phPriv) != PVRSRV_OK) + { + /* + There should be no way we got here unless uiLog2DataPageSize + has changed after the MMU_Alloc call (in which case it's a bug in + the MM code) + */ + PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config")); + PVR_ASSERT(0); + } + + *ppsConfig = psPTEConfig; + *ppsDevVAddrConfig = psDevVAddrConfig; +} + +/*************************************************************************/ /*! +@Function _MMU_PutPTConfig + +@Description Put the level info. Has to be called after _MMU_GetPTConfig to + ensure correct refcounting. + +@Input psMMUContext MMU context to operate on + +@Input phPriv Private data handle created by + _MMU_GetPTConfig. + +@Return None + */ +/*****************************************************************************/ +static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext, + IMG_HANDLE hPriv) +{ + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + + if (psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Could not put page size config", + __func__)); + PVR_ASSERT(0); + } +} + + +/***************************************************************************** + * Public interface functions * + *****************************************************************************/ + +/* + MMU_ContextCreate + */ +PVRSRV_ERROR +MMU_ContextCreate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT **ppsMMUContext, + MMU_DEVICEATTRIBS *psDevAttrs) +{ + MMU_CONTEXT *psMMUContext; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + const MMU_PxE_CONFIG *psConfig; + MMU_PHYSMEM_CONTEXT *psPhysMemCtx; + IMG_UINT32 ui32BaseObjects; + IMG_UINT32 ui32Size; + IMG_CHAR sBuf[40]; + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(PDUMP) + PDUMPCOMMENT("MMU context create"); +#endif + + psConfig = psDevAttrs->psBaseConfig; + psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig; + + switch (psDevAttrs->eTopLevel) + { + case MMU_LEVEL_3: + ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC; + break; + + case MMU_LEVEL_2: + ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD; + break; + + case MMU_LEVEL_1: + ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT; + break; + + default: + PVR_LOG_GOTO_WITH_ERROR("psDevAttrs->eTopLevel", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + /* Allocate the MMU context with the Level 1 Px info's */ + ui32Size = sizeof(MMU_CONTEXT) + + ((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *)); + + psMMUContext = OSAllocZMem(ui32Size); + PVR_LOG_GOTO_IF_NOMEM(psMMUContext, eError, e0); + +#if defined(PDUMP) + /* Clear the refcount */ + psMMUContext->ui32PDumpContextIDRefCount = 0; +#endif + /* Record Device specific attributes in the context for subsequent use */ + psMMUContext->psDevAttrs = psDevAttrs; + + /* + Allocate physmem context and set it up + */ + psPhysMemCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT)); + PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx, eError, e1); + + psMMUContext->psPhysMemCtx = psPhysMemCtx; + psMMUContext->psConnection = psConnection; + + psPhysMemCtx->psDevNode = psDevNode; /* Needed for Direct Bridge case */ + psPhysMemCtx->psMMUContext = psMMUContext; /* Back-link to self */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Save the app-specific values for external reference via MMU_GetOSids. */ + if (psConnection != NULL) + { + psPhysMemCtx->ui32OSid = psConnection->ui32OSid; + psPhysMemCtx->ui32OSidReg = psConnection->ui32OSidReg; + psPhysMemCtx->bOSidAxiProt = psConnection->bOSidAxiProtReg; + } + else + { + /* Direct Bridge calling sequence e.g. Firmware */ + psPhysMemCtx->ui32OSid = 0; + psPhysMemCtx->ui32OSidReg = 0; + psPhysMemCtx->bOSidAxiProt = IMG_FALSE; + } +#endif + + OSSNPrintf(sBuf, sizeof(sBuf), "pgtables %p", psPhysMemCtx); + psPhysMemCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1; + psPhysMemCtx->pszPhysMemRAName = OSAllocMem(psPhysMemCtx->uiPhysMemRANameAllocSize); + PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->pszPhysMemRAName, eError, e2); + + OSStringLCopy(psPhysMemCtx->pszPhysMemRAName, sBuf, psPhysMemCtx->uiPhysMemRANameAllocSize); + + psPhysMemCtx->psPhysMemRA = RA_Create(psPhysMemCtx->pszPhysMemRAName, + /* subsequent import */ + psDevNode->sDevMMUPxSetup.uiMMUPxLog2AllocGran, + RA_LOCKCLASS_1, + _MMU_PhysMem_RAImportAlloc, + _MMU_PhysMem_RAImportFree, + psPhysMemCtx, /* priv */ + IMG_FALSE); + if (psPhysMemCtx->psPhysMemRA == NULL) + { + OSFreeMem(psPhysMemCtx->pszPhysMemRAName); + psPhysMemCtx->pszPhysMemRAName = NULL; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e3); + } + + /* Setup cleanup meta data to check if a MMU context + * has been destroyed and should not be accessed anymore */ + psPhysMemCtx->psCleanupData = OSAllocMem(sizeof(*(psPhysMemCtx->psCleanupData))); + PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->psCleanupData, eError, e4); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Record the originating OSid for all allocation / free for this context */ + psPhysMemCtx->psCleanupData->ui32OSid = psPhysMemCtx->ui32OSid; +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + OSLockCreate(&psPhysMemCtx->psCleanupData->hCleanupLock); + psPhysMemCtx->psCleanupData->bMMUContextExists = IMG_TRUE; + dllist_init(&psPhysMemCtx->psCleanupData->sMMUCtxCleanupItemsHead); + OSAtomicWrite(&psPhysMemCtx->psCleanupData->iRef, 1); + + /* allocate the base level object */ + /* + Note: Although this is not required by the this file until + the 1st allocation is made, a device specific callback + might request the base object address so we allocate + it up front. + */ + if (_PxMemAlloc(psMMUContext, + ui32BaseObjects, + psConfig, + psDevAttrs->eTopLevel, + &psMMUContext->sBaseLevelInfo.sMemDesc, + psDevAttrs->ui32BaseAlign)) + { + PVR_LOG_GOTO_WITH_ERROR("_PxMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e5); + } + + dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); + + psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects; + psMMUContext->sBaseLevelInfo.ui32RefCount = 0; + + eError = OSLockCreate(&psMMUContext->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e6); + + /* return context */ + *ppsMMUContext = psMMUContext; + + return PVRSRV_OK; + +e6: + _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel); +e5: + OSFreeMem(psPhysMemCtx->psCleanupData); +e4: + RA_Delete(psPhysMemCtx->psPhysMemRA); +e3: + OSFreeMem(psPhysMemCtx->pszPhysMemRAName); +e2: + OSFreeMem(psPhysMemCtx); +e1: + OSFreeMem(psMMUContext); +e0: + return eError; +} + +/* + MMU_ContextDestroy + */ +void +MMU_ContextDestroy (MMU_CONTEXT *psMMUContext) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PDLLIST_NODE psNode, psNextNode; + + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode; + MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Enter", __func__)); + + if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) + { + /* There should be no way to get here with live pages unless + there is a bug in this module or the MM code */ + PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0); + } + + /* Cleanup lock must be acquired before MMUContext lock. Reverse order + * may lead to a deadlock and is reported by lockdep. */ + OSLockAcquire(psCleanupData->hCleanupLock); + OSLockAcquire(psMMUContext->hLock); + + /* Free the top level MMU object - will be put on defer free list. + * This has to be done before the step below that will empty the + * defer-free list. */ + _PxMemFree(psMMUContext, + &psMMUContext->sBaseLevelInfo.sMemDesc, + psMMUContext->psDevAttrs->eTopLevel); + + /* Empty the temporary defer-free list of Px */ + _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); + PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead)); + + /* Empty the defer free list so the cleanup thread will + * not have to access any MMU context related structures anymore */ + dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead, + psNode, + psNextNode) + { + MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode, + MMU_CLEANUP_ITEM, + sMMUCtxCleanupItem); + + _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); + + dllist_remove_node(psNode); + } + PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead)); + + psCleanupData->bMMUContextExists = IMG_FALSE; + + /* Free physmem context */ + RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA); + psMMUContext->psPhysMemCtx->psPhysMemRA = NULL; + OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName); + psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL; + + OSFreeMem(psMMUContext->psPhysMemCtx); + + OSLockRelease(psMMUContext->hLock); + + OSLockRelease(psCleanupData->hCleanupLock); + + if (OSAtomicDecrement(&psCleanupData->iRef) == 0) + { + OSLockDestroy(psCleanupData->hCleanupLock); + OSFreeMem(psCleanupData); + } + + OSLockDestroy(psMMUContext->hLock); + + /* free the context itself. */ + OSFreeMem(psMMUContext); + /*not nulling pointer, copy on stack*/ + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Exit", __func__)); +} + +/* + MMU_Alloc + */ +PVRSRV_ERROR +MMU_Alloc (MMU_CONTEXT *psMMUContext, + IMG_DEVMEM_SIZE_T uSize, + IMG_DEVMEM_SIZE_T *puActualSize, + IMG_UINT32 uiProtFlags, + IMG_DEVMEM_SIZE_T uDevVAddrAlignment, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 uiLog2PageSize) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevVAddrEnd; + + const MMU_PxE_CONFIG *psPDEConfig; + const MMU_PxE_CONFIG *psPTEConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + + MMU_DEVICEATTRIBS *psDevAttrs; + IMG_HANDLE hPriv; + +#if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment); +#endif + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: uSize=" IMG_DEVMEM_SIZE_FMTSPEC + ", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC, + __func__, uSize, uiProtFlags, uDevVAddrAlignment)); + + /* check params */ + PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext, "psMMUContext"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevVAddr, "psDevVAddr"); + PVR_LOG_RETURN_IF_INVALID_PARAM(puActualSize, "puActualSize"); + + psDevAttrs = psMMUContext->psDevAttrs; + + eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize, + &psPDEConfig, + &psPTEConfig, + &psDevVAddrConfig, + &hPriv); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnGetPageSizeConfiguration"); + + /* size and alignment must be datapage granular */ + if (((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0) + || ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: invalid address or size granularity", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sDevVAddrEnd = *psDevVAddr; + sDevVAddrEnd.uiAddr += uSize; + + OSLockAcquire(psMMUContext->hLock); + eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize); + OSLockRelease(psMMUContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: _AllocPageTables failed", + __func__)); + return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES; + } + + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); + + return PVRSRV_OK; +} + +/* + MMU_Free + */ +void +MMU_Free (MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_DEV_VIRTADDR sDevVAddrEnd; + + PVR_ASSERT(psMMUContext != NULL); + PVR_LOG_RETURN_VOID_IF_FALSE(psMMUContext != NULL, "psMMUContext"); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC, + __func__, sDevVAddr.uiAddr)); + + /* ensure the address range to free is inside the heap */ + sDevVAddrEnd = sDevVAddr; + sDevVAddrEnd.uiAddr += uiSize; + + /* The Cleanup lock has to be taken before the MMUContext hLock to + * prevent deadlock scenarios. It is necessary only for parts of + * _SetupCleanup_FreeMMUMapping though.*/ + OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); + + OSLockAcquire(psMMUContext->hLock); + + _FreePageTables(psMMUContext, + sDevVAddr, + sDevVAddrEnd, + uiLog2DataPageSize); + + _SetupCleanup_FreeMMUMapping(psMMUContext->psPhysMemCtx); + + OSLockRelease(psMMUContext->hLock); + + OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); + + return; + +} + +PVRSRV_ERROR +MMU_MapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + PMR *psPMR, + IMG_UINT32 ui32PhysPgOffset, + IMG_UINT32 ui32MapPageCount, + IMG_UINT32 *paui32MapIndices, + IMG_UINT32 uiLog2HeapPageSize) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hPriv; + + MMU_Levelx_INFO *psLevel = NULL; + + MMU_Levelx_INFO *psPrevLevel = NULL; + + IMG_UINT32 uiPTEIndex = 0; + IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize); + IMG_UINT32 uiLoop = 0; + IMG_UINT32 ui32MappedCount = 0; + IMG_DEVMEM_OFFSET_T uiPgOffset = 0; + IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; + + IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0; + IMG_UINT64 uiDummyProtFlags = 0; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *psDevPAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_BOOL *pbValid; + IMG_BOOL bValid; + IMG_BOOL bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; + IMG_BOOL bNeedBacking = IMG_FALSE; + PVRSRV_DEVICE_NODE *psDevNode; + +#if defined(PDUMP) + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; + + PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", + (IMG_UINT64)(ui32MapPageCount * uiPageSize)); +#endif /*PDUMP*/ + +#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG) + /* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL + * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */ + if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } +#endif + + /* Validate the most essential parameters */ + PVR_LOG_GOTO_IF_INVALID_PARAM(psMMUContext, eError, e0); + PVR_LOG_GOTO_IF_INVALID_PARAM(psPMR, eError, e0); + + psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Allocate memory for page-frame-numbers and validity states, + N.B. assert could be triggered by an illegal uiSizeBytes */ + if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, e0); + + pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL)); + if (pbValid == NULL) + { + /* Should allocation fail, clean-up here before exit */ + OSFreeMem(psDevPAddr); + PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); + } + } + else + { + psDevPAddr = asDevPAddr; + pbValid = abValid; + } + + /* Get the Device physical addresses of the pages we are trying to map + * In the case of non indexed mapping we can get all addresses at once */ + if (NULL == paui32MapIndices) + { + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapPageSize, + ui32MapPageCount, + (ui32PhysPgOffset << uiLog2HeapPageSize), + psDevPAddr, + pbValid); + PVR_GOTO_IF_ERROR(eError, e1); + } + + /*Get the Page table level configuration */ + _MMU_GetPTConfig(psMMUContext, + (IMG_UINT32) uiLog2HeapPageSize, + &psConfig, + &hPriv, + &psDevVAddrConfig); + + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext); + PVR_GOTO_IF_ERROR(eError, e2); + + /* Callback to get device specific protection flags */ + if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE), + uiLog2HeapPageSize); + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE)); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, e2); + } + uiDummyProtFlags = uiProtFlags; + + if (PMR_IsSparse(psPMR)) + { + /* We know there will not be 4G number of PMR's */ + bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(PMR_Flags(psPMR)); + if (bDummyBacking) + { + bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR)); + } + + if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiMappingFlags)) + { + /* Obtain non-coherent protection flags as we cannot have multiple coherent + virtual pages pointing to the same physical page so all dummy page + mappings have to be non-coherent even in a coherent allocation */ + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT, + &uiMMUProtFlags, + psMMUContext); + PVR_GOTO_IF_ERROR(eError, e2); + + /* Callback to get device specific protection flags */ + if (psConfig->uiBytesPerEntry == 8) + { + uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The page table entry byte length is not supported", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e2; + } + } + } + + OSLockAcquire(psMMUContext->hLock); + + for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++) + { + +#if defined(PDUMP) + IMG_DEVMEM_OFFSET_T uiNextSymName; +#endif /*PDUMP*/ + + if (NULL != paui32MapIndices) + { + uiPgOffset = paui32MapIndices[uiLoop]; + + /*Calculate the Device Virtual Address of the page */ + sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize); + + /* Get the physical address to map */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapPageSize, + 1, + uiPgOffset * uiPageSize, + &sDevPAddr, + &bValid); + PVR_GOTO_IF_ERROR(eError, e3); + } + else + { + uiPgOffset = uiLoop + ui32PhysPgOffset; + sDevPAddr = psDevPAddr[uiLoop]; + bValid = pbValid[uiLoop]; + } + + uiDefProtFlags = uiProtFlags; + /* + The default value of the entry is invalid so we don't need to mark + it as such if the page wasn't valid, we just advance pass that address + */ + if (bValid || bDummyBacking) + { + if (!bValid) + { + if (bZeroBacking) + { + sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; + /* Ensure the zero back page PTE is read only */ + uiDefProtFlags = uiProtFlagsReadOnly; + } + else + { + sDevPAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; + } + } + else + { + /* check the physical alignment of the memory to map */ + PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0); + } + +#if defined(DEBUG) + { + IMG_INT32 i32FeatureVal = 0; + IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr); + + i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); + do { + /* i32FeatureVal can be negative for cases where this feature is undefined + * In that situation we need to bail out than go ahead with debug comparison */ + if (0 > i32FeatureVal) + break; + + if (ui32BitLength > i32FeatureVal) + { + PVR_DPF((PVR_DBG_ERROR, + "%s Failed. The physical address bitlength (%d)" + " is greater than the chip can handle (%d).", + __func__, ui32BitLength, i32FeatureVal)); + + PVR_ASSERT(ui32BitLength <= i32FeatureVal); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e3; + } + } while (0); + } +#endif /*DEBUG*/ + +#if defined(PDUMP) + if (bValid) + { + eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize, + sizeof(aszMemspaceName), &aszMemspaceName[0], + sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], + &uiSymbolicAddrOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + } +#endif /*PDUMP*/ + + psPrevLevel = psLevel; + /* Calculate PT index and get new table descriptor */ + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + + if (psPrevLevel == psLevel) + { + /* + * Sparse allocations may have page offsets which + * decrement as well as increment, so make sure we + * update the range we will flush correctly. + */ + if (uiPTEIndex > uiFlushEnd) + uiFlushEnd = uiPTEIndex; + else if (uiPTEIndex < uiFlushStart) + uiFlushStart = uiPTEIndex; + } + else + { + /* Flush if we moved to another psLevel, i.e. page table */ + if (psPrevLevel != NULL) + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psPrevLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e3); + } + + uiFlushStart = uiPTEIndex; + uiFlushEnd = uiFlushStart; + } + + HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), + HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr)); + + /* Set the PT entry with the specified address and protection flags */ + eError = _SetupPTE(psMMUContext, + psLevel, + uiPTEIndex, + psConfig, + &sDevPAddr, + IMG_FALSE, +#if defined(PDUMP) + (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName), + ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE:DUMMY_PAGE)), + (bValid)?uiSymbolicAddrOffset:0, +#endif /*PDUMP*/ + uiDefProtFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", e3); + + if (bValid) + { + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + PVR_DPF ((PVR_DBG_MESSAGE, + "%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", " + "size=" IMG_DEVMEM_OFFSET_FMTSPEC, + __func__, + sDevVAddr.uiAddr, + uiPgOffset * uiPageSize)); + + ui32MappedCount++; + } + } + + sDevVAddr.uiAddr += uiPageSize; + } + + /* Flush the last level we touched */ + if (psLevel != NULL) + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e3); + } + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(pbValid); + OSFreeMem(psDevPAddr); + } + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_FALSE); + +#if defined(PDUMP) + PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount); +#endif /*PDUMP*/ + + return PVRSRV_OK; + +e3: + OSLockRelease(psMMUContext->hLock); + + if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags)) + { + bNeedBacking = IMG_TRUE; + } + + MMU_UnmapPages(psMMUContext, + (bNeedBacking) ? uiMappingFlags : 0, + sDevVAddrBase, + uiLoop, + paui32MapIndices, + uiLog2HeapPageSize, + PMR_IsSparse(psPMR)); +e2: + _MMU_PutPTConfig(psMMUContext, hPriv); +e1: + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(pbValid); + OSFreeMem(psDevPAddr); + } +e0: + return eError; +} + +/* + MMU_UnmapPages + */ +void +MMU_UnmapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags) +{ + IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; + IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; + IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; + MMU_Levelx_INFO *psLevel = NULL; + MMU_Levelx_INFO *psPrevLevel = NULL; + IMG_HANDLE hPriv; + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0; + MMU_PROTFLAGS_T uiMMUProtFlags = 0, uiMMUReadOnlyProtFlags = 0; + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_DEV_PHYADDR sBackingPgDevPhysAddr; + IMG_BOOL bUnmap = IMG_TRUE, bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; + IMG_CHAR *pcBackingPageName = NULL; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, + ui32PageCount, + (IMG_UINT64)sDevVAddr.uiAddr, + ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); +#endif + bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMemAllocFlags); + bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiMemAllocFlags); + + if (bZeroBacking) + { + sBackingPgDevPhysAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; + pcBackingPageName = DEV_ZERO_PAGE; + } + else + { + sBackingPgDevPhysAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; + pcBackingPageName = DUMMY_PAGE; + } + + bUnmap = (uiMappingFlags)? !bDummyBacking : IMG_TRUE; + /* Get PT and address configs */ + _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, + &psConfig, &hPriv, &psDevVAddrConfig); + + if (_MMU_ConvertDevMemFlags(bUnmap, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext) != PVRSRV_OK) + { + return; + } + + uiMMUReadOnlyProtFlags = (uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE) | MMU_PROTFLAGS_READABLE; + + /* Callback to get device specific protection flags */ + if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUReadOnlyProtFlags); + } + else if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUReadOnlyProtFlags, uiLog2PageSize); + } + + + OSLockAcquire(psMMUContext->hLock); + + /* Unmap page by page */ + while (ui32Loop < ui32PageCount) + { + if (NULL != pai32FreeIndices) + { + /*Calculate the Device Virtual Address of the page */ + sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + + pai32FreeIndices[ui32Loop] * (IMG_UINT64) uiPageSize; + } + + psPrevLevel = psLevel; + /* Calculate PT index and get new table descriptor */ + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + + if (psPrevLevel == psLevel) + { + /* + * Sparse allocations may have page offsets which + * decrement as well as increment, so make sure we + * update the range we will flush correctly. + */ + if (uiPTEIndex > uiFlushEnd) + uiFlushEnd = uiPTEIndex; + else if (uiPTEIndex < uiFlushStart) + uiFlushStart = uiPTEIndex; + } + else + { + /* Flush if we moved to another psLevel, i.e. page table */ + if (psPrevLevel != NULL) + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psPrevLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + } + + uiFlushStart = uiPTEIndex; + uiFlushEnd = uiFlushStart; + } + + HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); + + /* Set the PT entry to invalid and poison it with a bad address */ + if (_SetupPTE(psMMUContext, + psLevel, + uiPTEIndex, + psConfig, + (bDummyBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr, + bUnmap, +#if defined(PDUMP) + (bDummyBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL, + (bDummyBacking)? pcBackingPageName: NULL, + 0U, +#endif + (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags) != PVRSRV_OK) + { + goto e0; + } + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + ui32Loop++; + sDevVAddr.uiAddr += uiPageSize; + } + + /* Flush the last level we touched */ + if (psLevel != NULL) + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + } + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_TRUE); + + return; + +e0: + _MMU_PutPTConfig(psMMUContext, hPriv); + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table")); + PVR_ASSERT(0); + OSLockRelease(psMMUContext->hLock); + return; +} + +PVRSRV_ERROR +MMU_MapPMRFast (MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + const PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSizeBytes, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_UINT32 uiLog2HeapPageSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 uiCount, i; + IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize; + IMG_UINT32 uiPTEIndex = 0; + IMG_UINT64 uiProtFlags; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + MMU_Levelx_INFO *psLevel = NULL; + IMG_HANDLE hPriv; + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *psDevPAddr; + IMG_BOOL *pbValid; + IMG_UINT32 uiFlushStart = 0; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; + IMG_UINT32 ui32MappedCount = 0; + PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", uiSizeBytes); +#endif /*PDUMP*/ + + /* We should verify the size and contiguity when supporting variable page size */ + + PVR_ASSERT (psMMUContext != NULL); + PVR_ASSERT (psPMR != NULL); + +#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG) + /* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL + * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */ + if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } +#endif + + /* Allocate memory for page-frame-numbers and validity states, + N.B. assert could be triggered by an illegal uiSizeBytes */ + uiCount = uiSizeBytes >> uiLog2HeapPageSize; + PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes); + if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, return_error); + + pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL)); + if (pbValid == NULL) + { + /* Should allocation fail, clean-up here before exit */ + OSFreeMem(psDevPAddr); + PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_paddr_array); + } + } + else + { + psDevPAddr = asDevPAddr; + pbValid = abValid; + } + + /* Get general PT and address configs */ + _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize, + &psConfig, &hPriv, &psDevVAddrConfig); + + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext); + PVR_GOTO_IF_ERROR(eError, put_mmu_context); + + /* Callback to get device specific protection flags */ + + if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, put_mmu_context); + } + + + /* "uiSize" is the amount of contiguity in the underlying + page. Normally this would be constant for the system, but, + that constant needs to be communicated, in case it's ever + different; caller guarantees that PMRLockSysPhysAddr() has + already been called */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapPageSize, + uiCount, + 0, + psDevPAddr, + pbValid); + PVR_GOTO_IF_ERROR(eError, put_mmu_context); + + OSLockAcquire(psMMUContext->hLock); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + + /* Map in all pages of that PMR page by page*/ + for (i=0, uiCount=0; uiCount < uiSizeBytes; i++) + { +#if defined(DEBUG) + { + IMG_INT32 i32FeatureVal = 0; + IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr); + i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); + do { + if (0 > i32FeatureVal) + break; + + if (ui32BitLength > i32FeatureVal) + { + PVR_DPF((PVR_DBG_ERROR, + "%s Failed. The physical address bitlength (%d)" + " is greater than the chip can handle (%d).", + __func__, ui32BitLength, i32FeatureVal)); + + PVR_ASSERT(ui32BitLength <= i32FeatureVal); + OSLockRelease(psMMUContext->hLock); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, put_mmu_context); + } + } while (0); + } +#endif /*DEBUG*/ +#if defined(PDUMP) + { + IMG_DEVMEM_OFFSET_T uiNextSymName; + + eError = PMR_PDumpSymbolicAddr(psPMR, uiCount, + sizeof(aszMemspaceName), &aszMemspaceName[0], + sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], + &uiSymbolicAddrOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + ui32MappedCount++; + } +#endif /*PDUMP*/ + + HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), + HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr)); + + /* Set the PT entry with the specified address and protection flags */ + eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex, + psConfig, &psDevPAddr[i], IMG_FALSE, +#if defined(PDUMP) + aszMemspaceName, + aszSymbolicAddress, + uiSymbolicAddrOffset, +#endif /*PDUMP*/ + uiProtFlags); + PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); + + sDevVAddr.uiAddr += uiPageSize; + uiCount += uiPageSize; + + /* Calculate PT index and get new table descriptor */ + if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes)) + { + uiPTEIndex++; + } + else + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); + + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + } + } + + OSLockRelease(psMMUContext->hLock); + + + _MMU_PutPTConfig(psMMUContext, hPriv); + + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(pbValid); + OSFreeMem(psDevPAddr); + } + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_FALSE); + +#if defined(PDUMP) + PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, i); +#endif /*PDUMP*/ + + return PVRSRV_OK; + +unlock_mmu_context: + OSLockRelease(psMMUContext->hLock); + MMU_UnmapPMRFast(psMMUContext, + sDevVAddrBase, + uiSizeBytes >> uiLog2HeapPageSize, + uiLog2HeapPageSize); + +put_mmu_context: + _MMU_PutPTConfig(psMMUContext, hPriv); + + if (pbValid != abValid) + { + OSFreeMem(pbValid); + } + +free_paddr_array: + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(psDevPAddr); + } + +return_error: + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +/* + MMU_UnmapPages + */ +void +MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 uiLog2PageSize) +{ + IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; + IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; + MMU_Levelx_INFO *psLevel = NULL; + IMG_HANDLE hPriv; + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_UINT64 uiProtFlags = 0; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + IMG_UINT64 uiEntry = 0; + IMG_UINT32 uiFlushStart = 0; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, + ui32PageCount, + (IMG_UINT64)sDevVAddr.uiAddr, + ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); +#endif + + /* Get PT and address configs */ + _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, + &psConfig, &hPriv, &psDevVAddrConfig); + + if (_MMU_ConvertDevMemFlags(IMG_TRUE, + 0, + &uiMMUProtFlags, + psMMUContext) != PVRSRV_OK) + { + return; + } + + /* Callback to get device specific protection flags */ + + if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); + + /* Fill the entry with a bad address but leave space for protection flags */ + uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags; + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + + /* Fill the entry with a bad address but leave space for protection flags */ + uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The page table entry byte length is not supported", + __func__)); + goto e0; + } + + OSLockAcquire(psMMUContext->hLock); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + + /* Unmap page by page and keep the loop as quick as possible. + * Only use parts of _SetupPTE that need to be executed. */ + while (ui32Loop < ui32PageCount) + { + + /* Set the PT entry to invalid and poison it with a bad address */ + if (psConfig->uiBytesPerEntry == 8) + { + ((IMG_UINT64*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry; + } + else if (psConfig->uiBytesPerEntry == 4) + { + ((IMG_UINT32*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The page table entry byte length is not supported", + __func__)); + goto e1; + } + + /* Log modifications */ + HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiPTEIndex, MMU_LEVEL_1, + HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry), + IMG_FALSE); + +#if defined(PDUMP) + PDumpMMUDumpPxEntries(MMU_LEVEL_1, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psLevel->sMemDesc.pvCpuVAddr, + psLevel->sMemDesc.sDevPAddr, + uiPTEIndex, + 1, + NULL, + NULL, + 0, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif /*PDUMP*/ + + sDevVAddr.uiAddr += uiPageSize; + ui32Loop++; + + /* Calculate PT index and get new table descriptor */ + if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount)) + { + uiPTEIndex++; + } + else + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + } + } + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_TRUE); + + return; + +e1: + OSLockRelease(psMMUContext->hLock); + _MMU_PutPTConfig(psMMUContext, hPriv); +e0: + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table", __func__)); + PVR_ASSERT(0); + return; +} + +/* + MMU_ChangeValidity + */ +PVRSRV_ERROR +MMU_ChangeValidity(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiNumPages, + IMG_UINT32 uiLog2PageSize, + IMG_BOOL bMakeValid, + PMR *psPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + IMG_HANDLE hPriv; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + const MMU_PxE_CONFIG *psConfig; + MMU_Levelx_INFO *psLevel = NULL; + IMG_UINT32 uiFlushStart = 0; + IMG_UINT32 uiPTIndex = 0; + IMG_UINT32 i; + IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; + IMG_BOOL bValid; + + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + + PDUMPCOMMENT("Change valid bit of the data pages to %d (0x%"IMG_UINT64_FMTSPECX" - 0x%"IMG_UINT64_FMTSPECX")", + bMakeValid, + sDevVAddr.uiAddr, + sDevVAddr.uiAddr + (uiNumPages<uiBytesPerEntry == 8) + { + ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask); + } + else if (psConfig->uiBytesPerEntry == 4) + { + ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit); + } + } + } + else + { + if (psConfig->uiBytesPerEntry == 8) + { + ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask); + } + else if (psConfig->uiBytesPerEntry == 4) + { + ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit); + } + } + +#if defined(PDUMP) + + PMR_PDumpSymbolicAddr(psPMR, i<psDevAttrs->pszMMUPxPDumpMemSpaceName, + psLevel->sMemDesc.pvCpuVAddr, + psLevel->sMemDesc.sDevPAddr, + uiPTIndex, + 1, + aszMemspaceName, + aszSymbolicAddress, + uiSymbolicAddrOffset, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif /*PDUMP*/ + + sDevVAddr.uiAddr += uiPageSize; + i++; + + /* Calculate PT index and get new table descriptor */ + if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages)) + { + uiPTIndex++; + } + else + { + + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e_exit); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTIndex); + uiFlushStart = uiPTIndex; + } + } + +e_exit: + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + !bMakeValid); + + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + + +/* + MMU_AcquireBaseAddr + */ +PVRSRV_ERROR +MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr) +{ + if (!psMMUContext) + { + psPhysAddr->uiAddr = 0; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr; + + return PVRSRV_OK; +} + +/* + MMU_ReleaseBaseAddr + */ +void +MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext) +{ + PVR_UNREFERENCED_PARAMETER(psMMUContext); +} + +/* + MMU_AppendCacheFlags, MMU_ExchangeCacheFlags +*/ + +void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32AppendFlags) +{ + PVR_ASSERT(psMMUContext != NULL); + + if (psMMUContext == NULL) + { + return; + } + + OSAtomicOr(&psMMUContext->sCacheFlags, (IMG_INT)ui32AppendFlags); +} + +IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags) +{ + PVR_ASSERT(psMMUContext != NULL); + + if (psMMUContext == NULL) + { + return 0; + } + + return (IMG_UINT32)OSAtomicExchange(&psMMUContext->sCacheFlags, (IMG_INT)ui32NewCacheFlags); +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/* + MMU_GetOSids + */ + +void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt) +{ + *pui32OSid = psMMUContext->psPhysMemCtx->ui32OSid; + *pui32OSidReg = psMMUContext->psPhysMemCtx->ui32OSidReg; + *pbOSidAxiProt = psMMUContext->psPhysMemCtx->bOSidAxiProt; + + return; +} + +#endif + +/* + MMU_CheckFaultAddress + */ +void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR *psDevVAddr, + MMU_FAULT_DATA *psOutFaultData) +{ + /* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */ +#define MMU_VALID_STR(entry,level) \ + (apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \ + ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)]) + static const IMG_PCHAR apszMMUValidStr[1<<2] = {/*--*/ "not valid", + /*-V*/ "valid", + /*P-*/ "pending", + /*PV*/ "inconsistent (pending and valid)"}; + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + MMU_LEVEL eMMULevel = psDevAttrs->eTopLevel; + const MMU_PxE_CONFIG *psConfig; + const MMU_PxE_CONFIG *psMMUPDEConfig; + const MMU_PxE_CONFIG *psMMUPTEConfig; + const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig; + IMG_HANDLE hPriv; + MMU_Levelx_INFO *psLevel = NULL; + PVRSRV_ERROR eError; + IMG_UINT64 uiIndex; + IMG_UINT32 ui32PCIndex = 0xFFFFFFFF; + IMG_UINT32 ui32PDIndex = 0xFFFFFFFF; + IMG_UINT32 ui32PTIndex = 0xFFFFFFFF; + IMG_UINT32 ui32Log2PageSize; + MMU_FAULT_DATA sMMUFaultData = {0}; + MMU_LEVEL_DATA *psMMULevelData; + + OSLockAcquire(psMMUContext->hLock); + + /* + At this point we don't know the page size so assume it's 4K. + When we get the PD level (MMU_LEVEL_2) we can check to see + if this assumption is correct. + */ + eError = psDevAttrs->pfnGetPageSizeConfiguration(12, + &psMMUPDEConfig, + &psMMUPTEConfig, + &psMMUDevVAddrConfig, + &hPriv); + if (eError != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size info for log2 page sizeof 12")); + } + + psLevel = &psMMUContext->sBaseLevelInfo; + psConfig = psDevAttrs->psBaseConfig; + + sMMUFaultData.eTopLevel = psDevAttrs->eTopLevel; + sMMUFaultData.eType = MMU_FAULT_TYPE_NON_PM; + + + for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) + { + if (eMMULevel == MMU_LEVEL_3) + { + /* Determine the PC index */ + uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask; + uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift; + ui32PCIndex = (IMG_UINT32) uiIndex; + PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex)); + + psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_3]; + psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; + psMMULevelData->ui32Index = ui32PCIndex; + + if (ui32PCIndex >= psLevel->ui32NumOfEntries) + { + psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; + break; + } + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui32Ptr[ui32PCIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); + + } + else + { + IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui64Ptr[ui32PCIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); + + } + + psLevel = psLevel->apsNextLevel[ui32PCIndex]; + if (!psLevel) + { + break; + } + psConfig = psMMUPDEConfig; + continue; /* continue to the next level */ + } + + + if (eMMULevel == MMU_LEVEL_2) + { + /* Determine the PD index */ + uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask; + uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift; + ui32PDIndex = (IMG_UINT32) uiIndex; + PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex)); + + psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_2]; + psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; + psMMULevelData->ui32Index = ui32PDIndex; + + if (ui32PDIndex >= psLevel->ui32NumOfEntries) + { + psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; + break; + } + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui32Ptr[ui32PDIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); + + + if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size from the PDE")); + } + } + else + { + IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui64Ptr[ui32PDIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); + + if (psDevAttrs->pfnGetPageSizeFromVirtAddr != NULL) + { + /* MMU_VERSION >= 4 */ + if (psDevAttrs->pfnGetPageSizeFromVirtAddr(psMMUContext->psPhysMemCtx->psDevNode, *psDevVAddr, &ui32Log2PageSize) != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size from the virtual address")); + } + } + else if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size from the PDE")); + } + } + + /* + We assumed the page size was 4K, now we have the actual size + from the PDE we can confirm if our assumption was correct. + Until now it hasn't mattered as the PC and PD are the same + regardless of the page size + */ + if (ui32Log2PageSize != 12) + { + /* Put the 4K page size data */ + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); + + /* Get the correct size data */ + eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize, + &psMMUPDEConfig, + &psMMUPTEConfig, + &psMMUDevVAddrConfig, + &hPriv); + if (eError != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize)); + break; + } + } + psLevel = psLevel->apsNextLevel[ui32PDIndex]; + if (!psLevel) + { + break; + } + psConfig = psMMUPTEConfig; + continue; /* continue to the next level */ + } + + + if (eMMULevel == MMU_LEVEL_1) + { + /* Determine the PT index */ + uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask; + uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift; + ui32PTIndex = (IMG_UINT32) uiIndex; + PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex)); + + psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_1]; + psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; + psMMULevelData->ui32Index = ui32PTIndex; + + if (ui32PTIndex >= psLevel->ui32NumOfEntries) + { + psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; + break; + } + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui32Ptr[ui32PTIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); + + } + else + { + IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui64Ptr[ui32PTIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); + + } + goto e1; + } + + PVR_LOG(("Unsupported MMU setup: %d", eMMULevel)); + break; + } + +e1: + /* Put the page size data back */ + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); + OSLockRelease(psMMUContext->hLock); + + *psOutFaultData = sMMUFaultData; +} + +static IMG_UINT64 MMU_GetVDevAddrPTE(MMU_CONTEXT *psMMUContext, + const MMU_PxE_CONFIG *psConfig, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_UINT32 uiLog2PageSize, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_BOOL *pbStatusOut) +{ + MMU_Levelx_INFO *psLevel = NULL; + IMG_UINT32 uiIndex = 0; + IMG_BOOL bStatus = IMG_FALSE; + IMG_UINT64 ui64Entry = 0; + + OSLockAcquire(psMMUContext->hLock); + + switch (psMMUContext->psDevAttrs->eTopLevel) + { + case MMU_LEVEL_3: + uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; + if (psLevel == NULL) + break; + + __fallthrough; + case MMU_LEVEL_2: + uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + + if (psLevel != NULL) + psLevel = psLevel->apsNextLevel[uiIndex]; + else + psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; + + if (psLevel == NULL) + break; + + __fallthrough; + case MMU_LEVEL_1: + uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + + if (psLevel == NULL) + psLevel = &psMMUContext->sBaseLevelInfo; + + ui64Entry = ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiIndex]; + bStatus = ui64Entry & psConfig->uiValidEnMask; + + break; + default: + PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup")); + break; + } + + OSLockRelease(psMMUContext->hLock); + + *pbStatusOut = bStatus; + + return ui64Entry; +} + +IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiLog2PageSize, + IMG_DEV_VIRTADDR sDevVAddr) +{ + IMG_BOOL bStatus; + const MMU_PxE_CONFIG *psConfig; + IMG_HANDLE hPriv; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + + _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig); + + MMU_GetVDevAddrPTE(psMMUContext, + psConfig, + psDevVAddrConfig, + uiLog2PageSize, + sDevVAddr, + &bStatus); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + return bStatus; +} + +#if defined(PDUMP) +/* + MMU_ContextDerivePCPDumpSymAddr + */ +PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, + IMG_CHAR *pszPDumpSymbolicNameBuffer, + size_t uiPDumpSymbolicNameBufferSize) +{ + size_t uiCount; + IMG_UINT64 ui64PhysAddr; + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; + + if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid) + { + /* We don't have any allocations. You're not allowed to ask + * for the page catalogue base address until you've made at + * least one allocation. + */ + return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR; + } + + ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr; + + PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName))); + + /* Page table Symbolic Name is formed from page table phys addr + prefixed with MMUPT_. */ + uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer, + uiPDumpSymbolicNameBufferSize, + ":%s:%s%016"IMG_UINT64_FMTSPECX, + psDevId->pszPDumpDevName, + psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX", + ui64PhysAddr); + + if (uiCount + 1 > uiPDumpSymbolicNameBufferSize) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + +/* + MMU_PDumpWritePageCatBase + */ +PVRSRV_ERROR +MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, + const IMG_CHAR *pszSpaceName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + PDUMP_FLAGS_T uiPdumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszPageCatBaseSymbolicAddr[100]; + const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName; + + eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext, + &aszPageCatBaseSymbolicAddr[0], + sizeof(aszPageCatBaseSymbolicAddr)); + if (eError == PVRSRV_OK) + { + eError = PDumpWriteSymbAddress(pszSpaceName, + uiOffset, + aszPageCatBaseSymbolicAddr, + 0, /* offset -- Could be non-zero for var. pgsz */ + pszPDumpDevName, + ui32WordSize, + ui32AlignShift, + ui32Shift, + uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS); + } + + return eError; +} + +/* + MMU_AcquirePDumpMMUContext + */ +PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 *pui32PDumpMMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; + + if (!psMMUContext->ui32PDumpContextIDRefCount) + { + PDUMP_MMU_ALLOC_MMUCONTEXT(psDevId->pszPDumpDevName, + psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr, + psMMUContext->psDevAttrs->eMMUType, + &psMMUContext->uiPDumpContextID, + ui32PDumpFlags); + } + + psMMUContext->ui32PDumpContextIDRefCount++; + *pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID; + + return PVRSRV_OK; +} + +/* + MMU_ReleasePDumpMMUContext + */ +PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; + + PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0); + psMMUContext->ui32PDumpContextIDRefCount--; + + if (psMMUContext->ui32PDumpContextIDRefCount == 0) + { + PDUMP_MMU_FREE_MMUCONTEXT(psDevId->pszPDumpDevName, + psMMUContext->uiPDumpContextID, + ui32PDumpFlags); + } + + return PVRSRV_OK; +} +#endif + +/****************************************************************************** + End of file (mmu_common.c) + ******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/pdump_mmu.c b/drivers/mcst/gpu-imgtec/services/server/common/pdump_mmu.c new file mode 100644 index 000000000000..edbed285df4f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/pdump_mmu.c @@ -0,0 +1,898 @@ +/*************************************************************************/ /*! +@File +@Title MMU PDump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Common PDump (MMU specific) functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if defined(PDUMP) + +#include "img_types.h" +#include "img_defs.h" +#include "pdump_mmu.h" +#include "pdump_km.h" +#include "pdump_physmem.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "allocmem.h" + +#define MAX_PDUMP_MMU_CONTEXTS (10) +static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1< 0) + { + /* do some tests for contiguity. If it fails, we flush anyway */ + if (pvBeyondLastPointer != pvBytes || + ui32SymAddrOffset != ui32BeyondLastOffset + /* NB: ought to check that symbolic name agrees too, but + we know this always to be the case in the current use-case */ + ) + { + bFlush = IMG_TRUE; + } + } + + /* Flush if necessary */ + if (bFlush && uiAccumulatedBytes > 0) + { + eErr = PDumpWriteParameter((IMG_UINT8 *)(uintptr_t)pvBasePointer, + uiAccumulatedBytes, ui32Flags, + &ui32ParamOutPos, pszFileName); + if (eErr == PVRSRV_OK) + { + eErr = PDumpSNPrintf(hScript, ui32MaxLenScript, + "LDB %s:0x%X 0x%X 0x%X %s", + /* dest */ + pszSymbolicName, + ui32BaseOffset, + /* size */ + uiAccumulatedBytes, + /* file offset */ + ui32ParamOutPos, + /* filename */ + pszFileName); + PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpSNPrintf", ErrOut); + + PDumpWriteScript(hScript, ui32Flags); + } + else if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) + { + PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpWriteParameter", ErrOut); + } + else + { + /* else Write to parameter file prevented under the flags and + * current state of the driver so skip write to script and error IF. + * this is normal e.g. no in capture range for example. + */ + eErr = PVRSRV_OK; + } + + uiAccumulatedBytes = 0; + } + + /* Initialise offsets and pointers if necessary */ + if (uiAccumulatedBytes == 0) + { + ui32BaseOffset = ui32BeyondLastOffset = ui32SymAddrOffset; + pvBeyondLastPointer = pvBasePointer = (const IMG_CHAR *)pvBytes; + } + + /* Accumulate some bytes */ + ui32BeyondLastOffset += uiNumBytes; + pvBeyondLastPointer += uiNumBytes; + uiAccumulatedBytes += uiNumBytes; + +ErrOut: + PDUMP_RELEASE_SCRIPT_AND_FILE_STRING(); + return eErr; +} + +/************************************************************************** + * Function Name : PDumpMMUMalloc + * Inputs : + * Outputs : + * Returns : PVRSRV_ERROR + * Description : +**************************************************************************/ +PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR *pszPDumpDevName, + MMU_LEVEL eMMULevel, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Align, + PDUMP_MMU_TYPE eMMUType) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA; + IMG_UINT64 ui64SymbolicAddr; + IMG_CHAR *pszMMUPX; + + PDUMP_GET_SCRIPT_STRING(); + + PVR_GOTO_IF_INVALID_PARAM(eMMULevel < MMU_LEVEL_LAST, eErr, ErrOut); + + PDUMP_LOCK(ui32Flags); + + /* + Write a comment to the PDump2 script streams indicating the memory allocation + */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "-- CALLOC :%s:%s Size=0x%08X Alignment=0x%08X 0x0 DevPAddr=0x%08"IMG_UINT64_FMTSPECX, + pszPDumpDevName, + apszMMULevelStringLookup[eMMULevel], + ui32Size, + ui32Align, + psDevPAddr->uiAddr); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + + PDumpWriteScript(hScript, ui32Flags); + + /* + construct the symbolic address + */ + ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr; + + /* + Write to the MMU script stream indicating the memory allocation + */ + if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV) + { + pszMMUPX = MIPSMMUPX_FMT(eMMULevel); + } + else + { + pszMMUPX = MMUPX_FMT(eMMULevel); + } + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "CALLOC :%s:%s%016"IMG_UINT64_FMTSPECX" 0x%X 0x%X 0x0", + pszPDumpDevName, + pszMMUPX, + ui64SymbolicAddr, + ui32Size, + ui32Align + /* don't need this sDevPAddr.uiAddr*/); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + +ErrUnlock: + PDUMP_UNLOCK(ui32Flags); +ErrOut: + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; +} + +/************************************************************************** + * Function Name : PDumpMMUFree + * Inputs : + * Outputs : + * Returns : PVRSRV_ERROR + * Description : +**************************************************************************/ +PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR *pszPDumpDevName, + MMU_LEVEL eMMULevel, + IMG_DEV_PHYADDR *psDevPAddr, + PDUMP_MMU_TYPE eMMUType) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_UINT64 ui64SymbolicAddr; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA; + IMG_CHAR *pszMMUPX; + + PDUMP_GET_SCRIPT_STRING(); + + PVR_GOTO_IF_INVALID_PARAM(eMMULevel < MMU_LEVEL_LAST, eErr, ErrOut); + + PDUMP_LOCK(ui32Flags); + /* + Write a comment to the PDUMP2 script streams indicating the memory free + */ + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "-- FREE :%s:%s", + pszPDumpDevName, apszMMULevelStringLookup[eMMULevel]); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + + PDumpWriteScript(hScript, ui32Flags); + + /* + construct the symbolic address + */ + ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr; + + /* + Write to the MMU script stream indicating the memory free + */ + if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV) + { + pszMMUPX = MIPSMMUPX_FMT(eMMULevel); + } + else + { + pszMMUPX = MMUPX_FMT(eMMULevel); + } + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "FREE :%s:%s%016"IMG_UINT64_FMTSPECX, + pszPDumpDevName, + pszMMUPX, + ui64SymbolicAddr); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + +ErrUnlock: + PDUMP_UNLOCK(ui32Flags); +ErrOut: + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; +} + +/******************************************************************************************************* + * Function Name : PDumpPTBaseObjectToMem64 + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a memory write from the baseobject + * for MIPS MMU device type +********************************************************************************************************/ +PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags, + MMU_LEVEL eMMULevel, + IMG_UINT64 ui64PxSymAddr, + IMG_UINT64 ui64PxOffset) +{ + + IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest; + IMG_DEVMEM_OFFSET_T uiNextSymNameDest; + PVRSRV_ERROR eErr = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eErr = PMR_PDumpSymbolicAddr(psPMRDest, + uiLogicalOffsetDest, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceNameDest, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicNameDest, + &uiPDumpSymbolicOffsetDest, + &uiNextSymNameDest); + + PVR_GOTO_IF_ERROR(eErr, Err); + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, + "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s%016"IMG_UINT64_FMTSPECX":0x%"IMG_UINT64_FMTSPECX, + aszMemspaceNameDest, aszSymbolicNameDest, uiPDumpSymbolicOffsetDest, + pszPDumpDevName, MIPSMMUPX_FMT(eMMULevel), ui64PxSymAddr, ui64PxOffset); + + PVR_GOTO_IF_ERROR(eErr, Err); + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + +Err: + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; +} + +/************************************************************************** + * Function Name : PDumpMMUDumpPxEntries + * Inputs : + * Outputs : + * Returns : PVRSRV_ERROR + * Description : +**************************************************************************/ +PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel, + const IMG_CHAR *pszPDumpDevName, + void *pvPxMem, + IMG_DEV_PHYADDR sPxDevPAddr, + IMG_UINT32 uiFirstEntry, + IMG_UINT32 uiNumEntries, + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_UINT64 uiSymbolicAddrOffset, + IMG_UINT32 uiBytesPerEntry, + IMG_UINT32 uiLog2Align, + IMG_UINT32 uiAddrShift, + IMG_UINT64 uiAddrMask, + IMG_UINT64 uiPxEProtMask, + IMG_UINT64 uiDataValidEnable, + IMG_UINT32 ui32Flags, + PDUMP_MMU_TYPE eMMUType) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_UINT64 ui64PxSymAddr; + IMG_UINT64 ui64PxEValueSymAddr; + IMG_UINT32 ui32SymAddrOffset = 0; + IMG_UINT32 *pui32PxMem; + IMG_UINT64 *pui64PxMem; + IMG_BOOL bPxEValid; + IMG_UINT32 uiPxEIdx; + IMG_INT32 iShiftAmount; + IMG_CHAR *pszWrwSuffix = NULL; + void *pvRawBytes = NULL; + IMG_CHAR aszPxSymbolicAddr[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_UINT64 ui64PxE64; + IMG_UINT64 ui64Protflags64; + IMG_CHAR *pszMMUPX; + + PDUMP_GET_SCRIPT_STRING(); + ui32Flags |= (PDUMP_FLAGS_BLKDATA | PDUMP_FLAGS_CONTINUOUS); + + eErr = PDumpReady(); + if (eErr != PVRSRV_OK) + { + /* Mask suspension from caller as this is terminal & logged */ + eErr = (eErr == PVRSRV_ERROR_PDUMP_NOT_ACTIVE) ? PVRSRV_OK : eErr; + goto ErrOut; + } + + PVR_GOTO_IF_INVALID_PARAM(pvPxMem, eErr, ErrOut); + + /* + create the symbolic address of the Px + */ + ui64PxSymAddr = sPxDevPAddr.uiAddr; + + if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV) + { + pszMMUPX = MIPSMMUPX_FMT(eMMULevel); + } + else + { + pszMMUPX = MMUPX_FMT(eMMULevel); + } + + OSSNPrintf(aszPxSymbolicAddr, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + ":%s:%s%016"IMG_UINT64_FMTSPECX, + pszPDumpDevName, + pszMMUPX, + ui64PxSymAddr); + + PDUMP_LOCK(ui32Flags); + + /* + traverse PxEs, dumping entries + */ + for (uiPxEIdx = uiFirstEntry; + uiPxEIdx < uiFirstEntry + uiNumEntries; + uiPxEIdx++) + { + /* Calc the symbolic address offset of the PxE location + This is what we have to add to the table address to get to a certain entry */ + ui32SymAddrOffset = (uiPxEIdx*uiBytesPerEntry); + + /* Calc the symbolic address of the PxE value and HW protflags */ + /* just read it here */ + switch (uiBytesPerEntry) + { + case 4: + { + pui32PxMem = pvPxMem; + ui64PxE64 = pui32PxMem[uiPxEIdx]; + pszWrwSuffix = ""; + pvRawBytes = &pui32PxMem[uiPxEIdx]; + break; + } + case 8: + { + pui64PxMem = pvPxMem; + ui64PxE64 = pui64PxMem[uiPxEIdx]; + pszWrwSuffix = "64"; + pvRawBytes = &pui64PxMem[uiPxEIdx]; + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMMUPxEntries: error")); + ui64PxE64 = 0; + //!!error + break; + } + } + + ui64PxEValueSymAddr = (ui64PxE64 & uiAddrMask) >> uiAddrShift << uiLog2Align; + ui64Protflags64 = ui64PxE64 & uiPxEProtMask; + bPxEValid = (ui64Protflags64 & uiDataValidEnable) ? IMG_TRUE : IMG_FALSE; + + if (!bPxEValid) + { + /* If the entry was "invalid", simply write the actual + value found to the memory location */ + eErr = _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_FALSE, + uiBytesPerEntry, pvRawBytes, + ui32Flags); + if (eErr == PVRSRV_OK) + { + goto done; + } + else + { + goto ErrUnlock; + } + } + + _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE, + 0, NULL, + ui32Flags); + + iShiftAmount = (IMG_INT32)(uiLog2Align - uiAddrShift); + + /* First put the symbolic representation of the actual + address of the entry into a pdump internal register */ + /* MOV seemed cleaner here, since (a) it's 64-bit; (b) the + target is not memory. However, MOV cannot do the + "reference" of the symbolic address. Apparently WRW is + correct. */ + + if (pszSymbolicAddr == NULL) + { + pszSymbolicAddr = "none"; + } + + if (eMMULevel == MMU_LEVEL_1) + { + if (iShiftAmount == 0) + { + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:%s:0x%"IMG_UINT64_FMTSPECX" | 0x%"IMG_UINT64_FMTSPECX"\n", + pszWrwSuffix, + /* dest */ + pszPDumpDevName, + pszMMUPX, + ui64PxSymAddr, + ui32SymAddrOffset, + /* src */ + pszMemspaceName, + pszSymbolicAddr, + uiSymbolicAddrOffset, + /* ORing prot flags */ + ui64Protflags64); + } + else + { + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW :%s:$1 :%s:%s:0x%"IMG_UINT64_FMTSPECX"\n", + /* dest */ + pszPDumpDevName, + /* src */ + pszMemspaceName, + pszSymbolicAddr, + uiSymbolicAddrOffset); + } + } + else + { + if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV) + { + pszMMUPX = MIPSMMUPX_FMT(eMMULevel - 1); + } + else + { + pszMMUPX = MMUPX_FMT(eMMULevel - 1); + } + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW :%s:$1 :%s:%s%016"IMG_UINT64_FMTSPECX":0x0", + /* dest */ + pszPDumpDevName, + /* src */ + pszPDumpDevName, + pszMMUPX, + ui64PxEValueSymAddr); + if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV) + { + pszMMUPX = MIPSMMUPX_FMT(eMMULevel); + } + else + { + pszMMUPX = MMUPX_FMT(eMMULevel); + } + } + + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + + /* Now shift it to the right place, if necessary: */ + /* Now shift that value down, by the "Align shift" + amount, to get it into units (ought to assert that + we get an integer - i.e. we don't shift any bits + off the bottom, don't know how to do PDUMP + assertions yet) and then back up by the right + amount to get it into the position of the field. + This is optimised into a single shift right by the + difference between the two. */ + if (iShiftAmount > 0) + { + /* Page X Address is specified in units larger + than the position in the PxE would suggest. */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SHR :%s:$1 :%s:$1 0x%X", + /* dest */ + pszPDumpDevName, + /* src A */ + pszPDumpDevName, + /* src B */ + iShiftAmount); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + } + else if (iShiftAmount < 0) + { + /* Page X Address is specified in units smaller + than the position in the PxE would suggest. */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SHL :%s:$1 :%s:$1 0x%X", + /* dest */ + pszPDumpDevName, + /* src A */ + pszPDumpDevName, + /* src B */ + -iShiftAmount); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + } + + if (eMMULevel == MMU_LEVEL_1) + { + if (iShiftAmount != 0) + { + /* Now we can "or" in the protection flags */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "OR :%s:$1 :%s:$1 0x%"IMG_UINT64_FMTSPECX, + /* dest */ + pszPDumpDevName, + /* src A */ + pszPDumpDevName, + /* src B */ + ui64Protflags64); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:$1", + pszWrwSuffix, + /* dest */ + pszPDumpDevName, + pszMMUPX, + ui64PxSymAddr, + ui32SymAddrOffset, + /* src */ + pszPDumpDevName); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + } + } + else + { + /* Now we can "or" in the protection flags */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "OR :%s:$1 :%s:$1 0x%"IMG_UINT64_FMTSPECX, + /* dest */ + pszPDumpDevName, + /* src A */ + pszPDumpDevName, + /* src B */ + ui64Protflags64); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + + /* Finally, we write the register into the actual PxE */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:$1", + pszWrwSuffix, + /* dest */ + pszPDumpDevName, + pszMMUPX, + ui64PxSymAddr, + ui32SymAddrOffset, + /* src */ + pszPDumpDevName); + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(hScript, ui32Flags); + } + } + +done: + /* flush out any partly accumulated stuff for LDB */ + _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE, + 0, NULL, + ui32Flags); + +ErrUnlock: + PDUMP_UNLOCK(ui32Flags); +ErrOut: + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; +} + +/************************************************************************** + * Function Name : _PdumpAllocMMUContext + * Inputs : pui32MMUContextID + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : pdump util to allocate MMU contexts +**************************************************************************/ +static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID) +{ + IMG_UINT32 i; + + /* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */ + for (i=0; i +#else +#include +#endif + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +#include "pdump_physmem.h" +#include "pdump_km.h" + +#include "allocmem.h" +#include "osfunc.h" + +/* #define MAX_PDUMP_MMU_CONTEXTS (10) */ +/* static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<= 'a' && sym <= 'z') + return IMG_TRUE; + else + return IMG_FALSE; +} + +void PDumpMakeStringValid(IMG_CHAR *pszString, + IMG_UINT32 ui32StrLen) +{ + IMG_UINT32 i; + + if (pszString) + { + for (i = 0; i < ui32StrLen; i++) + { + if (_IsAllowedSym(pszString[i])) + { + if (_IsLowerCaseSym(pszString[i])) + pszString[i] = pszString[i]-32; + else + pszString[i] = pszString[i]; + } + else + { + pszString[i] = '_'; + } + } + } +} + +/************************************************************************** + * Function Name : PDumpGetSymbolicAddr + * Inputs : + * Outputs : + * Returns : PVRSRV_ERROR + * Description : + **************************************************************************/ +PVRSRV_ERROR PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, + IMG_CHAR **ppszSymbolicAddress) +{ + PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo; + + PVR_RETURN_IF_INVALID_PARAM(hPhysmemPDumpHandle); + + psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPhysmemPDumpHandle; + *ppszSymbolicAddress = psPDumpAllocationInfo->aszSymbolicAddress; + + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpMalloc + * Inputs : + * Outputs : + * Returns : PVRSRV_ERROR + * Description : + **************************************************************************/ +PVRSRV_ERROR PDumpMalloc(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo; + + PDUMP_GET_SCRIPT_STRING() + + psPDumpAllocationInfo = OSAllocMem(sizeof*psPDumpAllocationInfo); + PVR_ASSERT(psPDumpAllocationInfo != NULL); + + /* + * PDUMP_CONT and PDUMP_PERSIST flag can't set together. + */ + if (ui32PDumpFlags == PDUMP_NONE) + { + /* + Set continuous flag because there is no way of knowing beforehand which + allocation is needed for playback of the captured range. + */ + ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS; + } + + ui32PDumpFlags |= PDUMP_FLAGS_BLKDATA; + + /* + construct the symbolic address + */ + + OSSNPrintf(psPDumpAllocationInfo->aszSymbolicAddress, + sizeof(psPDumpAllocationInfo->aszSymbolicAddress), + ":%s:%s", + pszDevSpace, + pszSymbolicAddress); + + /* + Write to the MMU script stream indicating the memory allocation + */ + if (bInitialise) + { + eError = PDumpSNPrintf(hScript, ui32MaxLen, "CALLOC %s 0x%"IMG_UINT64_FMTSPECX" 0x%"IMG_UINT64_FMTSPECX" 0x%X\n", + psPDumpAllocationInfo->aszSymbolicAddress, + ui64Size, + uiAlign, + ui32InitValue); + } + else + { + eError = PDumpSNPrintf(hScript, ui32MaxLen, "MALLOC %s 0x%"IMG_UINT64_FMTSPECX" 0x%"IMG_UINT64_FMTSPECX"\n", + psPDumpAllocationInfo->aszSymbolicAddress, + ui64Size, + uiAlign); + } + + if (eError != PVRSRV_OK) + { + OSFreeMem(psPDumpAllocationInfo); + goto _return; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + psPDumpAllocationInfo->ui64Size = ui64Size; + psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign); + + *phHandlePtr = (IMG_HANDLE)psPDumpAllocationInfo; + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + + +/************************************************************************** + * Function Name : PDumpFree + * Inputs : + * Outputs : + * Returns : PVRSRV_ERROR + * Description : + **************************************************************************/ +PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA; + + PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo; + + PDUMP_GET_SCRIPT_STRING() + + psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPDumpAllocationInfoHandle; + + /* + Write to the MMU script stream indicating the memory free + */ + eError = PDumpSNPrintf(hScript, ui32MaxLen, "FREE %s\n", + psPDumpAllocationInfo->aszSymbolicAddress); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + OSFreeMem(psPDumpAllocationInfo); + PDUMP_UNLOCK(ui32Flags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRWRW32(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " " + PMR_VALUE32_FMTSPEC, + pszDevSpace, + pszSymbolicName, + uiOffset, + ui32Value); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + const IMG_CHAR *pszInternalVar, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s", + pszDevSpace, + pszSymbolicName, + uiOffset, + pszInternalVar); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "RDW %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC, + pszInternalVar, + pszDevSpace, + pszSymbolicName, + uiOffset); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRWRW64(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " " + PMR_VALUE64_FMTSPEC, + pszDevSpace, + pszSymbolicName, + uiOffset, + ui64Value); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + const IMG_CHAR *pszInternalVar, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s", + pszDevSpace, + pszSymbolicName, + uiOffset, + pszInternalVar); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "RDW64 %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC, + pszInternalVar, + pszDevSpace, + pszSymbolicName, + uiOffset); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRLDB(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "LDB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " " + IMG_DEVMEM_SIZE_FMTSPEC " " + PDUMP_FILEOFFSET_FMTSPEC " %s\n", + pszDevSpace, + pszSymbolicName, + uiOffset, + uiSize, + uiFileOffset, + pszFilename); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR PDumpPMRSAB(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFileName, + IMG_UINT32 uiFileOffset) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 uiPDumpFlags; + + PDUMP_GET_SCRIPT_STRING() + + uiPDumpFlags = 0; + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "SAB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " " + IMG_DEVMEM_SIZE_FMTSPEC " " + "0x%08X %s.bin\n", + pszDevSpace, + pszSymbolicName, + uiOffset, + uiSize, + uiFileOffset, + pszFileName); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRPOL(const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 uiCount, + IMG_UINT32 uiDelay, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "POL :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " " + "0x%08X 0x%08X %d %d %d\n", + pszMemspaceName, + pszSymbolicName, + uiOffset, + ui32Value, + ui32Mask, + eOperator, + uiCount, + uiDelay); + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpPMRCBP(const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PDUMP_FLAGS_T uiPDumpFlags = 0; + + PDUMP_GET_SCRIPT_STRING() + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "CBP :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " " + IMG_DEVMEM_OFFSET_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC "\n", + pszMemspaceName, + pszSymbolicName, + uiReadOffset, + uiWriteOffset, + uiPacketSize, + uiBufferSize); + + PVR_GOTO_IF_ERROR(eError, _return); + + PDUMP_LOCK(uiPDumpFlags); + PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + +_return: + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +PVRSRV_ERROR +PDumpWriteParameterBlob(IMG_UINT8 *pcBuffer, + size_t uiNumBytes, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_CHAR *pszFilenameOut, + size_t uiFilenameBufSz, + PDUMP_FILEOFFSET_T *puiOffsetOut) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_UNREFERENCED_PARAMETER(uiFilenameBufSz); + + PVR_ASSERT(uiNumBytes > 0); + + eError = PDumpReady(); + if (eError != PVRSRV_OK) + { + /* Mask suspension from caller as this is terminal & logged */ + eError = (eError == PVRSRV_ERROR_PDUMP_NOT_ACTIVE) ? + PVRSRV_ERROR_PDUMP_NOT_ALLOWED : + eError; + return eError; + } + + PVR_ASSERT(uiFilenameBufSz <= PDUMP_PARAM_MAX_FILE_NAME); + + PDUMP_LOCK(uiPDumpFlags); + + eError = PDumpWriteParameter(pcBuffer, uiNumBytes, uiPDumpFlags, puiOffsetOut, pszFilenameOut); + PDUMP_UNLOCK(uiPDumpFlags); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)) + { + PVR_LOG_RETURN_IF_ERROR(eError, "PDumpWriteParameter"); + } + /* else Write to parameter file Ok or Prevented under the flags or + * current state of the driver so skip further writes and let caller know. + */ + return eError; +} + +#endif /* PDUMP */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/pdump_server.c b/drivers/mcst/gpu-imgtec/services/server/common/pdump_server.c new file mode 100644 index 000000000000..637034872996 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/pdump_server.c @@ -0,0 +1,5587 @@ +/*************************************************************************/ /*! +@File +@Title Common Server PDump functions layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PDUMP) +#include + +#include "pvrversion.h" +#include "allocmem.h" +#include "osfunc.h" +#include "pvrsrv.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "pdump_physmem.h" +#include "hash.h" +#include "connection_server.h" +#include "services_km.h" +#include +#include "oskm_apphint.h" + +/* pdump headers */ +#include "tlstream.h" +#include "pdump_km.h" + +#include "pdumpdesc.h" +#include "rgxpdump.h" + +#include "tutilsdefs.h" +#include "tutils_km.h" +/* Allow temporary buffer size override */ +#if !defined(PDUMP_TEMP_BUFFER_SIZE) +#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U) +#endif + +#define PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x))) +#define VPTR_PLUS(p, x) PTR_PLUS(void *, p, x) +#define VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x)) +#define MAX_PDUMP_MMU_CONTEXTS (32) + +#define PRM_FILE_SIZE_MAX 0x7FDFFFFFU /*!< Default maximum file size to split output files, 2GB-2MB as fwrite limits it to 2GB-1 on 32bit systems */ + +#define MAX_PDUMP_WRITE_RETRIES 200 /*!< Max number of retries to dump pdump data in to respective buffers */ + +static ATOMIC_T g_sConnectionCount; + +/* + * Structure to store some essential attributes of a PDump stream buffer. + */ +typedef struct +{ + IMG_CHAR* pszName; /*!< Name of the PDump TL Stream buffer */ + IMG_HANDLE hTL; /*!< Handle of created TL stream buffer */ + IMG_UINT32 ui32BufferSize; /*!< The size of the buffer in bytes */ + IMG_UINT32 ui32BufferFullRetries; /*!< The number of times the buffer got full */ + IMG_UINT32 ui32BufferFullAborts; /*!< The number of times we failed to write data */ + IMG_UINT32 ui32HighestRetriesWatermark; /*!< Max number of retries try to dump pdump data */ + IMG_UINT32 ui32MaxAllowedWriteSize; /*!< Max allowed write packet size */ +} PDUMP_STREAM; + +typedef struct +{ + PDUMP_STREAM sInitStream; /*!< Driver initialisation PDump stream */ + PDUMP_STREAM sMainStream; /*!< App framed PDump stream */ + PDUMP_STREAM sDeinitStream; /*!< Driver/HW de-initialisation PDump stream */ + PDUMP_STREAM sBlockStream; /*!< Block mode PDump block data stream - currently its script only */ +} PDUMP_CHANNEL; + +typedef struct +{ + PDUMP_CHANNEL sCh; /*!< Channel handles */ + IMG_UINT32 ui32FileIdx; /*!< File index gets incremented on script out file split */ +} PDUMP_SCRIPT; + +typedef struct +{ + IMG_UINT32 ui32Init; /*!< Count of bytes written to the init phase stream */ + IMG_UINT32 ui32Main; /*!< Count of bytes written to the main stream */ + IMG_UINT32 ui32Deinit; /*!< Count of bytes written to the deinit stream */ + IMG_UINT32 ui32Block; /*!< Count of bytes written to the block stream */ +} PDUMP_CHANNEL_WOFFSETS; + +typedef struct +{ + PDUMP_CHANNEL sCh; /*!< Channel handles */ + PDUMP_CHANNEL_WOFFSETS sWOff; /*!< Channel file write offsets */ + IMG_UINT32 ui32FileIdx; /*!< File index used when file size limit reached and a new file is started, parameter channel only */ + IMG_UINT32 ui32MaxFileSize; /*!< Maximum file size for parameter files */ + + PDUMP_FILEOFFSET_T uiZeroPageOffset; /*!< Offset of the zero page in the parameter file */ + size_t uiZeroPageSize; /*!< Size of the zero page in the parameter file */ + IMG_CHAR szZeroPageFilename[PDUMP_PARAM_MAX_FILE_NAME]; /*< PRM file name where the zero page was pdumped */ +} PDUMP_PARAMETERS; + +/* PDump lock to keep pdump write atomic. + * Which will protect g_PDumpScript & g_PDumpParameters pdump + * specific shared variable. + */ +static POS_LOCK g_hPDumpWriteLock; + +static PDUMP_SCRIPT g_PDumpScript = { { + { PDUMP_SCRIPT_INIT_STREAM_NAME, NULL, + PDUMP_SCRIPT_INIT_STREAM_SIZE, 0, 0, 0 }, + { PDUMP_SCRIPT_MAIN_STREAM_NAME, NULL, + PDUMP_SCRIPT_MAIN_STREAM_SIZE, 0, 0, 0 }, + { PDUMP_SCRIPT_DEINIT_STREAM_NAME, NULL, + PDUMP_SCRIPT_DEINIT_STREAM_SIZE, 0, 0, 0 }, + { PDUMP_SCRIPT_BLOCK_STREAM_NAME, NULL, + PDUMP_SCRIPT_BLOCK_STREAM_SIZE, 0, 0, 0 }, + }, 0 }; +static PDUMP_PARAMETERS g_PDumpParameters = { { + { PDUMP_PARAM_INIT_STREAM_NAME, NULL, + PDUMP_PARAM_INIT_STREAM_SIZE, 0, 0, 0 }, + { PDUMP_PARAM_MAIN_STREAM_NAME, NULL, + PDUMP_PARAM_MAIN_STREAM_SIZE, 0, 0, 0 }, + { PDUMP_PARAM_DEINIT_STREAM_NAME, NULL, + PDUMP_PARAM_DEINIT_STREAM_SIZE, 0, 0, 0 }, + { PDUMP_PARAM_BLOCK_STREAM_NAME, NULL, + PDUMP_PARAM_BLOCK_STREAM_SIZE, 0, 0, 0 }, + }, {0, 0, 0, 0}, 0, PRM_FILE_SIZE_MAX}; + + +#if defined(PDUMP_DEBUG_OUTFILES) +/* counter increments each time debug write is called */ +ATOMIC_T g_sEveryLineCounter; +#endif + +// #define PDUMP_DEBUG_TRANSITION +#if defined(PDUMP_DEBUG_TRANSITION) +# define DEBUG_OUTFILES_COMMENT(fmt, ...) (void)PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, fmt, __VA_ARGS__) +#else +# define DEBUG_OUTFILES_COMMENT(fmt, ...) +#endif + +#if defined(PDUMP_DEBUG) || defined(REFCOUNT_DEBUG) +# define PDUMP_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) +#else +# define PDUMP_REFCOUNT_PRINT(fmt, ...) +#endif + +/* Prototype for the test/debug state dump routine used in debugging */ +void PDumpCommonDumpState(void); +#undef PDUMP_TRACE_STATE + + +/*****************************************************************************/ +/* PDump Control Module Definitions */ +/*****************************************************************************/ + +/* + * struct _PDUMP_CAPTURE_RANGE_ is interpreted differently in different modes of PDump + * + * Non-Block mode: + * ui32Start - Start frame number of range + * ui32End - End frame number of range + * ui32Interval - Frame sample rate interval + * + * Block mode: + * ui32Start - If set to '0', first PDump-block will of minimal (i.e. PDUMP_BLOCKLEN_MIN) + * length, else all blocks will be of block-length provided + * + * ui32End - By default this is set to PDUMP_FRAME_MAX so that Blocked PDump + * will be captured indefinitely till stopped externally. On force capture + * stop, this will be set to (ui32CurrentFrame + 1) to stop capture from + * next frame onwards + * + * ui32Interval - This will be interpreted as PDump block-length provided + **/ +typedef struct _PDUMP_CAPTURE_RANGE_ +{ + IMG_UINT32 ui32Start; + IMG_UINT32 ui32End; + IMG_UINT32 ui32Interval; +} PDUMP_CAPTURE_RANGE; + +/* PDump Block mode specific controls */ +typedef struct _PDUMP_BLOCK_CTRL_ +{ + IMG_UINT32 ui32BlockLength; /*!< PDump block length in term of number of frames per block */ + IMG_UINT32 ui32CurrentBlock; /*!< Current block number */ +} PDUMP_BLOCK_CTRL; + +/*! PDump common module State Machine states */ +typedef enum _PDUMP_SM_ +{ + PDUMP_SM_UNINITIALISED, /*!< Starting state */ + PDUMP_SM_INITIALISING, /*!< Module is initialising */ + PDUMP_SM_READY, /*!< Module is initialised and ready */ + PDUMP_SM_READY_CLIENT_CONNECTED, /*!< Module is ready and capture client connected */ + PDUMP_SM_FORCED_SUSPENDED, /*!< Module forced error, PDumping suspended, this is to force driver reload before next capture */ + PDUMP_SM_ERROR_SUSPENDED, /*!< Module fatal error, PDumping suspended semi-final state */ + PDUMP_SM_DEINITIALISED /*!< Final state */ +} PDUMP_SM; + +/*! PDump control flags */ +#define FLAG_IS_DRIVER_IN_INIT_PHASE 0x1 /*! Control flag that keeps track of State of driver initialisation phase */ +#define FLAG_IS_IN_CAPTURE_RANGE 0x2 /*! Control flag that keeps track of Current capture status, is current frame in range */ + +#define CHECK_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_HAS(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG) +#define SET_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_SET(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG) +#define UNSET_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_UNSET(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG) + +/* No direct access to members from outside the control module - please */ +typedef struct _PDUMP_CTRL_STATE_ +{ + PDUMP_SM eServiceState; /*!< State of the pdump_common module */ + IMG_UINT32 ui32Flags; + + IMG_UINT32 ui32DefaultCapMode; /*!< Capture mode of the dump */ + PDUMP_CAPTURE_RANGE sCaptureRange; /*|< The capture range for capture mode 'framed' */ + IMG_UINT32 ui32CurrentFrame; /*!< Current frame number */ + + PDUMP_BLOCK_CTRL sBlockCtrl; /*!< Pdump block mode ctrl data */ + + POS_LOCK hLock; /*!< Exclusive lock to this structure */ + IMG_PID InPowerTransitionPID;/*!< pid of thread requesting power transition */ +} PDUMP_CTRL_STATE; + +static PDUMP_CTRL_STATE g_PDumpCtrl = +{ + PDUMP_SM_UNINITIALISED, + + FLAG_IS_DRIVER_IN_INIT_PHASE, + + PDUMP_CAPMODE_UNSET, + { + PDUMP_FRAME_UNSET, + PDUMP_FRAME_UNSET, + 0 + }, + 0, + + { + 0, + PDUMP_BLOCKNUM_INVALID, + }, + + NULL, + 0 +}; + +static void PDumpAssertWriteLockHeld(void); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + +/*************************************************************************/ /*! + @Function PDumpCreateIncVarNameStr + @Description When 64 bit register access is split between two 32 bit + accesses, it needs two PDump Internal variables to store register value. + This function creates the string for the second PDump Internal variable + for example if Passed Variable name is :SYSMEM:$1 this function will + generate the string :SYSMEM:$2 + + @Input pszInternalVar String for PDump internal variable in use + + @Return IMG_CHAR* String for second PDump internal variable to be used +*/ /**************************************************************************/ +static INLINE IMG_CHAR* PDumpCreateIncVarNameStr(const IMG_CHAR* pszInternalVar) +{ + IMG_CHAR *pszPDumpVarName; + IMG_UINT32 ui32Size = (IMG_UINT32)OSStringLength(pszInternalVar); + if (ui32Size == 0) + { + return NULL; + } + + ui32Size++; + pszPDumpVarName = (IMG_CHAR*)OSAllocMem((ui32Size) * sizeof(IMG_CHAR)); + if (pszPDumpVarName == NULL) + { + return NULL; + } + + OSStringLCopy(pszPDumpVarName, pszInternalVar, ui32Size); + pszPDumpVarName[ui32Size-1] = pszPDumpVarName[ui32Size-1] + 1; + return pszPDumpVarName; +} + +/*************************************************************************/ /*! + @Function PDumpFreeIncVarNameStr + @Description Free the string created by function PDumpCreateIncVarNameStr + + @Input pszPDumpVarName String to free + + @Return void +*/ /**************************************************************************/ +static INLINE void PDumpFreeIncVarNameStr(IMG_CHAR* pszPDumpVarName) +{ + if (pszPDumpVarName != NULL) + { + OSFreeMem(pszPDumpVarName); + } +} +#endif + +static PVRSRV_ERROR PDumpCtrlInit(void) +{ + g_PDumpCtrl.eServiceState = PDUMP_SM_INITIALISING; + + /* Create lock for PDUMP_CTRL_STATE struct, which is shared between pdump client + and PDumping app. This lock will help us serialize calls from pdump client + and PDumping app */ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&g_PDumpCtrl.hLock), "OSLockCreate"); + + return PVRSRV_OK; +} + +static void PDumpCtrlDeInit(void) +{ + if (g_PDumpCtrl.hLock) + { + OSLockDestroy(g_PDumpCtrl.hLock); + g_PDumpCtrl.hLock = NULL; + } +} + +static INLINE void PDumpCtrlLockAcquire(void) +{ + OSLockAcquire(g_PDumpCtrl.hLock); +} + +static INLINE void PDumpCtrlLockRelease(void) +{ + OSLockRelease(g_PDumpCtrl.hLock); +} + +static INLINE PDUMP_SM PDumpCtrlGetModuleState(void) +{ + return g_PDumpCtrl.eServiceState; +} + +PVRSRV_ERROR PDumpReady(void) +{ + switch (PDumpCtrlGetModuleState()) + { + case PDUMP_SM_READY: + case PDUMP_SM_READY_CLIENT_CONNECTED: + return PVRSRV_OK; + + case PDUMP_SM_FORCED_SUSPENDED: + case PDUMP_SM_ERROR_SUSPENDED: + return PVRSRV_ERROR_PDUMP_NOT_ACTIVE; + + case PDUMP_SM_UNINITIALISED: + case PDUMP_SM_INITIALISING: + case PDUMP_SM_DEINITIALISED: + return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE; + + default: + /* Bad state */ + PVR_ASSERT(1); + return PVRSRV_ERROR_BAD_MAPPING; + } +} + + +/****************************************************************************** + NOTE: + The following PDumpCtrl*** functions require the PDUMP_CTRL_STATE lock be + acquired BEFORE they are called. This is because the PDUMP_CTRL_STATE data + is shared between the PDumping App and the PDump client, hence an exclusive + access is required. The lock can be acquired and released by using the + PDumpCtrlLockAcquire & PDumpCtrlLockRelease functions respectively. +******************************************************************************/ + +static void PDumpCtrlUpdateCaptureStatus(void) +{ + if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_FRAMED) + { + if ((g_PDumpCtrl.ui32CurrentFrame >= g_PDumpCtrl.sCaptureRange.ui32Start) && + (g_PDumpCtrl.ui32CurrentFrame <= g_PDumpCtrl.sCaptureRange.ui32End) && + (((g_PDumpCtrl.ui32CurrentFrame - g_PDumpCtrl.sCaptureRange.ui32Start) % g_PDumpCtrl.sCaptureRange.ui32Interval) == 0)) + { + SET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE); + } + else + { + UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE); + } + } + else if ((g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_CONTINUOUS) || (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_BLOCKED)) + { + SET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE); + } + else if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_UNSET) + { + UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE); + } + else + { + UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE); + PVR_DPF((PVR_DBG_ERROR, "PDumpCtrlUpdateCaptureStatus: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode)); + } + +} + +static INLINE IMG_UINT32 PDumpCtrlCapModIsBlocked(void) +{ + return (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_BLOCKED); +} + +static INLINE IMG_UINT32 PDumpCtrlMinimalFirstBlock(void) +{ + /* If ui32Start is set to zero, first block length will be set to minimum + * (i.e. PDUMP_BLOCKLEN_MIN), else it will be of same length as that of + * rest of the blocks (i.e. ui32BlockLength) + * + * Having shorter first block reduces playback time of final capture. + * */ + + return (PDumpCtrlCapModIsBlocked() && (g_PDumpCtrl.sCaptureRange.ui32Start == 0)); +} + +static void PDumpCtrlSetBlock(IMG_UINT32 ui32BlockNum) +{ + g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock = PDumpCtrlCapModIsBlocked()? ui32BlockNum : PDUMP_BLOCKNUM_INVALID; +} + +static INLINE IMG_UINT32 PDumpCtrlGetBlock(void) +{ + return (PDumpCtrlCapModIsBlocked()? g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock : PDUMP_BLOCKNUM_INVALID); +} + +static PVRSRV_ERROR PDumpCtrlForcedStop(void) +{ + /* In block-mode on forced stop request, capture will be stopped after (current_frame + 1)th frame number. + * This ensures that DumpAfterRender always be called on last frame before exiting the PDump capturing + * */ + g_PDumpCtrl.sCaptureRange.ui32End = g_PDumpCtrl.ui32CurrentFrame + 1; + + return PVRSRV_OK; +} + +static INLINE IMG_BOOL PDumpCtrlIsCaptureForceStopped(void) +{ + return (PDumpCtrlCapModIsBlocked() && (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End)); +} + +static void PDumpCtrlSetCurrentFrame(IMG_UINT32 ui32Frame) +{ + g_PDumpCtrl.ui32CurrentFrame = ui32Frame; + + PDumpCtrlUpdateCaptureStatus(); + + /* Force PDump module to suspend PDumping on forced capture stop */ + if ((PDumpCtrlGetModuleState() != PDUMP_SM_FORCED_SUSPENDED) && PDumpCtrlIsCaptureForceStopped()) + { + PVR_LOG(("PDump forced capture stop received. Suspend PDumping to force driver reload before next capture.")); + g_PDumpCtrl.eServiceState = PDUMP_SM_FORCED_SUSPENDED; + } +#if defined(PDUMP_TRACE_STATE) + PDumpCommonDumpState(); +#endif +} + +static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval) +{ + /* Set the capture range to that supplied by the PDump client tool + */ + g_PDumpCtrl.ui32DefaultCapMode = ui32Mode; + g_PDumpCtrl.sCaptureRange.ui32Start = ui32Start; + g_PDumpCtrl.sCaptureRange.ui32End = ui32End; + g_PDumpCtrl.sCaptureRange.ui32Interval = ui32Interval; + + /* Set pdump block mode ctrl variables */ + g_PDumpCtrl.sBlockCtrl.ui32BlockLength = (ui32Mode == PDUMP_CAPMODE_BLOCKED)? ui32Interval : 0; /* ui32Interval is interpreted as block length */ + g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock = PDUMP_BLOCKNUM_INVALID; + + /* Change module state to record capture client connected */ + if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_UNSET) + g_PDumpCtrl.eServiceState = PDUMP_SM_READY; + else + g_PDumpCtrl.eServiceState = PDUMP_SM_READY_CLIENT_CONNECTED; + + /* Reset the current frame on reset of the capture range, the helps to + * avoid inter-pdump start frame issues when the driver is not reloaded. + * No need to call PDumpCtrlUpdateCaptureStatus() direct as the set + * current frame call will. + */ + PDumpCtrlSetCurrentFrame(0); + +} + +static IMG_UINT32 PDumpCtrlGetCurrentFrame(void) +{ + return g_PDumpCtrl.ui32CurrentFrame; +} + +static INLINE IMG_BOOL PDumpCtrlCaptureOn(void) +{ + return ((g_PDumpCtrl.eServiceState == PDUMP_SM_READY_CLIENT_CONNECTED) && + CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE)) ? IMG_TRUE : IMG_FALSE; +} + +static INLINE IMG_BOOL PDumpCtrlCaptureRangePast(void) +{ + return (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End); +} + +static IMG_BOOL PDumpCtrlIsLastCaptureFrame(void) +{ + if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_FRAMED) + { + /* Is the next capture frame within the range end limit? */ + if ((g_PDumpCtrl.ui32CurrentFrame + g_PDumpCtrl.sCaptureRange.ui32Interval) > g_PDumpCtrl.sCaptureRange.ui32End) + { + return IMG_TRUE; + } + } + else if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_BLOCKED) + { + if (g_PDumpCtrl.ui32CurrentFrame == g_PDumpCtrl.sCaptureRange.ui32End) + { + return IMG_TRUE; + } + } + /* Return false for all other conditions: framed mode but not last frame, + * continuous mode; unset mode. + */ + return IMG_FALSE; +} + +static INLINE IMG_BOOL PDumpCtrlInitPhaseComplete(void) +{ + return !CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE); +} + +static INLINE void PDumpCtrlSetInitPhaseComplete(IMG_BOOL bIsComplete) +{ + PDUMP_HERE_VAR; + + if (bIsComplete) + { + UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE); + PDUMP_HEREA(102); + } + else + { + SET_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE); + PDUMP_HEREA(103); + } +} + +static INLINE void PDumpCtrlPowerTransitionStart(void) +{ + g_PDumpCtrl.InPowerTransitionPID = OSGetCurrentProcessID(); +} + +static INLINE void PDumpCtrlPowerTransitionEnd(void) +{ + g_PDumpCtrl.InPowerTransitionPID = 0; +} + +static INLINE IMG_PID PDumpCtrlInPowerTransitionPID(void) +{ + return g_PDumpCtrl.InPowerTransitionPID; +} + +static INLINE IMG_BOOL PDumpCtrlInPowerTransition(void) +{ + IMG_BOOL bPDumpInPowerTransition = IMG_FALSE; + if (PDumpCtrlInPowerTransitionPID()) + { + bPDumpInPowerTransition = IMG_TRUE; + } + return bPDumpInPowerTransition; +} + +static PVRSRV_ERROR PDumpCtrlGetState(IMG_UINT64 *ui64State) +{ + PDUMP_SM eState; + *ui64State = 0; + if (PDumpCtrlCaptureOn()) + { + *ui64State |= PDUMP_STATE_CAPTURE_FRAME; + } + + eState = PDumpCtrlGetModuleState(); + + if (eState == PDUMP_SM_READY_CLIENT_CONNECTED) + { + *ui64State |= PDUMP_STATE_CONNECTED; + } + + if (eState == PDUMP_SM_ERROR_SUSPENDED) + { + *ui64State |= PDUMP_STATE_SUSPENDED; + } + + return PVRSRV_OK; +} + +/****************************************************************************** + End of PDumpCtrl*** functions +******************************************************************************/ + +/* + Wrapper functions which need to be exposed in pdump_km.h for use in other + pdump_*** modules safely. These functions call the specific PDumpCtrl layer + function after acquiring the PDUMP_CTRL_STATE lock, hence making the calls + from other modules hassle free by avoiding the acquire/release CtrlLock + calls. +*/ + +static INLINE void PDumpModuleTransitionState(PDUMP_SM eNewState) +{ + PDumpCtrlLockAcquire(); + g_PDumpCtrl.eServiceState = eNewState; + PDumpCtrlLockRelease(); + return; +} + +void PDumpPowerTransitionStart(void) +{ + PDumpCtrlLockAcquire(); + PDumpCtrlPowerTransitionStart(); + PDumpCtrlLockRelease(); +} + +void PDumpPowerTransitionEnd(void) +{ + PDumpCtrlLockAcquire(); + PDumpCtrlPowerTransitionEnd(); + PDumpCtrlLockRelease(); +} + +IMG_BOOL PDumpInPowerTransition(void) +{ + IMG_BOOL bPDumpInPowerTransition = IMG_FALSE; + + PDumpCtrlLockAcquire(); + if (PDumpCtrlInPowerTransitionPID()) + { + bPDumpInPowerTransition = IMG_TRUE; + } + PDumpCtrlLockRelease(); + + return bPDumpInPowerTransition; +} + +IMG_BOOL PDumpIsContCaptureOn(void) +{ + IMG_BOOL bPDumpIsCatpureOn; + + PDumpCtrlLockAcquire(); + bPDumpIsCatpureOn = !PDumpCtrlInitPhaseComplete() || (PDumpCtrlGetModuleState() == PDUMP_SM_READY_CLIENT_CONNECTED); + PDumpCtrlLockRelease(); + + return bPDumpIsCatpureOn; +} + +static IMG_BOOL PDumpIsClientConnected(void) +{ + IMG_BOOL bPDumpClientConnected; + + PDumpCtrlLockAcquire(); + bPDumpClientConnected = (PDumpCtrlGetModuleState() == PDUMP_SM_READY_CLIENT_CONNECTED); + PDumpCtrlLockRelease(); + + return bPDumpClientConnected; +} + +/*****************************************************************************/ +/* PDump Common Write Layer just above common Transport Layer */ +/*****************************************************************************/ + + +/*! + * \name _PDumpOSGetStreamOffset + */ +static IMG_BOOL _PDumpSetSplitMarker(IMG_HANDLE hStream, IMG_BOOL bRemoveOld) +{ + PVRSRV_ERROR eError; + /* We have to indicate the reader that we wish to split. Insert an EOS packet in the TL stream */ + eError = TLStreamMarkEOS(hStream, bRemoveOld); + + /* If unsuccessful, return false */ + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_PDumpSetSplitMarker: Cannot set split marker")); + + return IMG_FALSE; + } + + return IMG_TRUE; +} + +/* + Checks in this method were seeded from the original PDumpWriteILock() + and DBGDrivWriteCM() and have grown since to ensure PDump output + matches legacy output. + Note: the order of the checks in this method is important as some + writes have multiple pdump flags set! + */ +static IMG_BOOL PDumpWriteAllowed(IMG_UINT32 ui32Flags, IMG_UINT32* ui32ExitHere) +{ + PDUMP_HERE_VAR; + + /* PDUMP_FLAGS_CONTINUOUS and PDUMP_FLAGS_PERSISTENT can't come together. */ + PVR_ASSERT(IMG_FALSE == ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) && (ui32Flags & PDUMP_FLAGS_PERSISTENT))); + + /* Lock down the PDUMP_CTRL_STATE struct before calling the following + PDumpCtrl*** functions. This is to avoid updates to the Control data + while we are reading from it */ + PDumpCtrlLockAcquire(); + + /* No writes if in framed mode and range pasted */ + if (PDumpCtrlCaptureRangePast()) + { + PDUMP_HERE(10); + goto unlockAndReturnFalse; + } + + /* No writes while PDump is not ready or is suspended */ + if (PDumpReady() != PVRSRV_OK) + { + PDUMP_HERE(11); + goto unlockAndReturnFalse; + } + + /* Prevent PDumping during a power transition */ + if (PDumpCtrlInPowerTransition()) + { /* except when it's flagged */ + if (ui32Flags & PDUMP_FLAGS_POWER) + { + PDUMP_HERE(20); + goto unlockAndReturnTrue; + } + else if (PDumpCtrlInPowerTransitionPID() == OSGetCurrentProcessID()) + { + PDUMP_HERE(16); + goto unlockAndReturnFalse; + } + } + + /* Always allow dumping in init phase and when persistent flagged */ + if (ui32Flags & PDUMP_FLAGS_PERSISTENT) + { + PDUMP_HERE(12); + goto unlockAndReturnTrue; + } + if (!PDumpCtrlInitPhaseComplete()) + { + PDUMP_HERE(15); + goto unlockAndReturnTrue; + } + + /* The following checks are made when the driver has completed initialisation */ + /* No last/deinit statements allowed when not in initialisation phase */ + else /* init phase over */ + { + if (ui32Flags & PDUMP_FLAGS_DEINIT) + { + PVR_ASSERT(0); + PDUMP_HERE(17); + goto unlockAndReturnFalse; + } + } + + /* If PDump client connected allow continuous flagged writes */ + if (PDUMP_IS_CONTINUOUS(ui32Flags)) + { + if (PDumpCtrlGetModuleState() != PDUMP_SM_READY_CLIENT_CONNECTED) /* Is client connected? */ + { + PDUMP_HERE(13); + goto unlockAndReturnFalse; + } + PDUMP_HERE(14); + goto unlockAndReturnTrue; + } + + /* + If no flags are provided then it is FRAMED output and the frame + range must be checked matching expected behaviour. + */ + if (!PDumpCtrlCaptureOn()) + { + PDUMP_HERE(18); + goto unlockAndReturnFalse; + } + + PDUMP_HERE(19); + +unlockAndReturnTrue: + /* Allow the write to take place */ + + PDumpCtrlLockRelease(); + return IMG_TRUE; + +unlockAndReturnFalse: + PDumpCtrlLockRelease(); + if (ui32ExitHere != NULL) + { + *ui32ExitHere = here; + } + return IMG_FALSE; +} + + +/*************************************************************************/ /*! + @Function PDumpWriteToBuffer + @Description Write the supplied data to the PDump stream buffer and attempt + to handle any buffer full conditions to ensure all the data + requested to be written, is. + + @Input psStream The address of the PDump stream buffer to write to + @Input pui8Data Pointer to the data to be written + @Input ui32BCount Number of bytes to write + @Input ui32Flags PDump statement flags. + + @Return IMG_UINT32 Actual number of bytes written, may be less than + ui32BCount when buffer full condition could not + be avoided. +*/ /**************************************************************************/ +static IMG_UINT32 PDumpWriteToBuffer(PDUMP_STREAM* psStream, IMG_UINT8 *pui8Data, + IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32BytesToBeWritten; + IMG_UINT32 ui32Off = 0; + IMG_BYTE *pbyDataBuffer; + IMG_UINT32 ui32BytesAvailable = 0; + static IMG_UINT32 ui32TotalBytesWritten; + PVRSRV_ERROR eError; + IMG_UINT32 uiRetries = 0; + + /* Check PDump stream validity */ + if (psStream->hTL == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "PDumpWriteToBuffer: PDump stream '%s' is invalid", psStream->pszName)); + return 0; + } + + /* This API always called by holding pdump write lock + * to ensure atomic pdump write happened and + * if not holding pdump lock then assert. + */ + PDumpAssertWriteLockHeld(); + + /* No need to check size of data to write as this is asserted + * higher up in the call stack as 1KB and 16KB for each channel + * respectively. */ + + while (ui32BCount > 0) + { + ui32BytesToBeWritten = MIN ( ui32BCount, psStream->ui32MaxAllowedWriteSize ); + + eError = TLStreamReserve2(psStream->hTL, &pbyDataBuffer, ui32BytesToBeWritten, 0, &ui32BytesAvailable, NULL); + if (eError == PVRSRV_ERROR_STREAM_FULL) + { + psStream->ui32BufferFullRetries++; + + /*! Retry write2 only if available bytes is at least 1024 or more. */ + if (ui32BytesAvailable >= 0x400) + { + ui32BytesToBeWritten = ui32BytesAvailable; + PVR_DPF((PVR_DBG_WARNING, "PDumpWriteToBuffer: TL buffer '%s' retrying write2=%u out of %u", psStream->pszName, ui32BytesToBeWritten, ui32BCount)); + eError = TLStreamReserve(psStream->hTL, &pbyDataBuffer, ui32BytesToBeWritten); + /*! Not expected to get PVRSRV_ERROR_STREAM_FULL error and other error may get */ + PVR_ASSERT(eError != PVRSRV_ERROR_STREAM_FULL); + } + else + { + uiRetries++; + PVR_DPF((PVR_DBG_WARNING, "PDumpWriteToBuffer: TL buffer '%s' full, rq=%u, av=%u, retrying write", psStream->pszName, ui32BCount, ui32BytesAvailable)); + + /* Check if we are out of retries , if so then print warning */ + if (uiRetries >= MAX_PDUMP_WRITE_RETRIES) + { + PVR_DPF((PVR_DBG_ERROR, + "PDumpWriteToBuffer: PDump writes blocked to dump %d bytes, %s TLBuffers full for %d seconds, check system", + ui32BCount, + psStream->pszName, + ((200 * uiRetries)/1000))); + + if (uiRetries > psStream->ui32HighestRetriesWatermark) + { + psStream->ui32HighestRetriesWatermark = uiRetries; + } + + psStream->ui32BufferFullAborts++; + uiRetries = 0; + + /* As uiRetries exceed max write retries that means, + * something went wrong in system and thus suspend pdump. + */ + PDumpModuleTransitionState(PDUMP_SM_ERROR_SUSPENDED); + return 0; + } + + OSSleepms(100); + continue; + } + } + + if (eError == PVRSRV_OK) + { + ui32TotalBytesWritten += ui32BytesToBeWritten; + + PVR_ASSERT(pbyDataBuffer != NULL); + + OSDeviceMemCopy((void*)pbyDataBuffer, pui8Data + ui32Off, ui32BytesToBeWritten); + + eError = TLStreamCommit(psStream->hTL, ui32BytesToBeWritten); + if (PVRSRV_OK != eError) + { + return 0; + } + + if (uiRetries > psStream->ui32HighestRetriesWatermark) + { + psStream->ui32HighestRetriesWatermark = uiRetries; + } + + uiRetries = 0; + ui32Off += ui32BytesToBeWritten; + ui32BCount -= ui32BytesToBeWritten; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToBuffer: TLStreamReserve2(%s) unrecoverable error %s", psStream->pszName, PVRSRVGETERRORSTRING(eError))); + /* Fatal -suspend PDump to prevent flooding kernel log buffer */ + PVR_LOG(("Unrecoverable error, PDump suspended!")); + + PDumpModuleTransitionState(PDUMP_SM_ERROR_SUSPENDED); + return 0; + } + + /* + if the capture range is unset + (which is detected via PDumpWriteAllowed()) + */ + + if (!PDumpWriteAllowed(ui32Flags, NULL)) + { + psStream->ui32BufferFullAborts++; + break; + } + } + + return ui32Off; +} + +/*************************************************************************/ /*! + @Function PDumpWriteToChannel + @Description Write the supplied data to the PDump channel specified obeying + flags to write to the necessary channel buffers. + + @Input psChannel Address of the script or parameter channel object + @Input/Output psWOff Address of the channel write offsets object to + update on successful writing + @Input pui8Data Pointer to the data to be written + @Input ui32Size Number of bytes to write + @Input ui32Flags PDump statement flags, they may be clear (no flags) + or persistent flagged and they determine how the + which implies framed data, continuous flagged, data + is output. On the first test app run after driver + load, the Display Controller dumps a resource that + is persistent and this needs writing to both the + init (persistent) and main (continuous) channel + buffers to ensure the data is dumped in subsequent + test runs without reloading the driver. + In subsequent runs the PDump client 'freezes' the + init buffer so that only one dump of persistent + data for the "extended init phase" is captured to + the init buffer. + @Return IMG_BOOL True when the data has been consumed, false otherwise +*/ /**************************************************************************/ +static IMG_BOOL PDumpWriteToChannel(PDUMP_CHANNEL* psChannel, PDUMP_CHANNEL_WOFFSETS* psWOff, + IMG_UINT8* pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32BytesWritten = 0; + PDUMP_HERE_VAR; + + PDUMP_HERE(210); + + /* At this point, PDumpWriteAllowed() has returned TRUE (or called from + * PDumpParameterChannelZeroedPageBlock() during driver init) we know the + * write must proceed because: + * - pdump is not suspended and + * - there is not an ongoing power transition or POWER override flag is set or + * - in driver init phase with ANY flag set or + * - post init with the pdump client connected and + * - - PERSIST flag is present, xor + * - - the CONTINUOUS flag is present, xor + * - - in capture frame range + */ + PDumpAssertWriteLockHeld(); + + /* Dump data to deinit buffer when flagged as deinit */ + if (ui32Flags & PDUMP_FLAGS_DEINIT) + { + PDUMP_HERE(211); + ui32BytesWritten = PDumpWriteToBuffer(&psChannel->sDeinitStream, pui8Data, ui32Size, ui32Flags); + if (ui32BytesWritten != ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: DEINIT Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size)); + PDUMP_HERE(212); + return IMG_FALSE; + } + + if (psWOff) + { + psWOff->ui32Deinit += ui32Size; + } + + } + else + { + IMG_BOOL bDumpedToInitAlready = IMG_FALSE; + IMG_BOOL bMainStreamData = IMG_FALSE; + PDUMP_STREAM* psStream = NULL; + IMG_UINT32* pui32Offset = NULL; + + /* Always append persistent data to init phase so it's available on + * subsequent app runs, but also to the main stream if client connected */ + if (ui32Flags & PDUMP_FLAGS_PERSISTENT) + { + PDUMP_HERE(213); + ui32BytesWritten = PDumpWriteToBuffer(&psChannel->sInitStream, pui8Data, ui32Size, ui32Flags); + if (ui32BytesWritten != ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: PERSIST Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size)); + PDUMP_HERE(214); + return IMG_FALSE; + } + + bDumpedToInitAlready = IMG_TRUE; + if (psWOff) + { + psWOff->ui32Init += ui32Size; + } + + /* Don't write continuous data if client not connected */ + if (PDumpCtrlGetModuleState() != PDUMP_SM_READY_CLIENT_CONNECTED) + { + return IMG_TRUE; + } + } + + /* Prepare to write the data to the main stream for + * persistent, continuous or framed data. Override and use init + * stream if driver still in init phase and we have not written + * to it yet.*/ + PDumpCtrlLockAcquire(); + if (!PDumpCtrlInitPhaseComplete() && !bDumpedToInitAlready) + { + PDUMP_HERE(215); + psStream = &psChannel->sInitStream; + if (psWOff) + { + pui32Offset = &psWOff->ui32Init; + } + } + else + { + PDUMP_HERE(216); + psStream = &psChannel->sMainStream; + if (psWOff) + { + pui32Offset = &psWOff->ui32Main; + } + bMainStreamData = IMG_TRUE; + + } + PDumpCtrlLockRelease(); + + if (PDumpCtrlCapModIsBlocked() && bMainStreamData && !psWOff) + { + /* if PDUMP_FLAGS_BLKDATA flag is set in Blocked mode, Make copy of Main script stream data to Block script stream as well */ + if (ui32Flags & PDUMP_FLAGS_BLKDATA) + { + PDUMP_HERE(217); + ui32BytesWritten = PDumpWriteToBuffer(&psChannel->sBlockStream, pui8Data, ui32Size, ui32Flags); + if (ui32BytesWritten != ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: BLOCK Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size)); + PDUMP_HERE(218); + return IMG_FALSE; + } + } + } + + /* Write the data to the stream */ + ui32BytesWritten = PDumpWriteToBuffer(psStream, pui8Data, ui32Size, ui32Flags); + if (ui32BytesWritten != ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: MAIN Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size)); + PDUMP_HERE(219); + return IMG_FALSE; + } + + if (pui32Offset) + { + *pui32Offset += ui32BytesWritten; + } + } + + return IMG_TRUE; +} + +#if defined(PDUMP_DEBUG_OUTFILES) + +static IMG_UINT32 _GenerateChecksum(void *pvData, size_t uiSize) +{ + IMG_UINT32 ui32Sum = 0; + IMG_UINT32 *pui32Data = pvData; + IMG_UINT8 *pui8Data = pvData; + IMG_UINT32 i; + IMG_UINT32 ui32LeftOver; + + for (i = 0; i < uiSize / sizeof(IMG_UINT32); i++) + { + ui32Sum += pui32Data[i]; + } + + ui32LeftOver = uiSize % sizeof(IMG_UINT32); + + while (ui32LeftOver) + { + ui32Sum += pui8Data[uiSize - ui32LeftOver]; + ui32LeftOver--; + } + + return ui32Sum; +} + +#endif + +PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags, + IMG_UINT32* pui32FileOffset, IMG_CHAR* aszFilenameStr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bPDumpCtrlInitPhaseComplete = IMG_FALSE; + IMG_UINT32 here = 0; + IMG_INT32 iCount; + + PDumpAssertWriteLockHeld(); + + PVR_ASSERT(pui8Data && (ui32Size!=0)); + PVR_ASSERT(pui32FileOffset && aszFilenameStr); + + PDUMP_HERE(1); + + /* Check if write can proceed? */ + if (!PDumpWriteAllowed(ui32Flags, &here)) + { + /* Abort write for the above reason but indicate what happened to + * caller to avoid disrupting the driver, caller should treat it as OK + * but skip any related PDump writes to the script file. */ + return PVRSRV_ERROR_PDUMP_NOT_ALLOWED; + } + + PDUMP_HERE(2); + + PDumpCtrlLockAcquire(); + bPDumpCtrlInitPhaseComplete = PDumpCtrlInitPhaseComplete(); + PDumpCtrlLockRelease(); + + if (!bPDumpCtrlInitPhaseComplete || (ui32Flags & PDUMP_FLAGS_PERSISTENT)) + { + PDUMP_HERE(3); + + /* Init phase stream not expected to get above the file size max */ + PVR_ASSERT(g_PDumpParameters.sWOff.ui32Init < g_PDumpParameters.ui32MaxFileSize); + + /* Return the file write offset at which the parameter data was dumped */ + *pui32FileOffset = g_PDumpParameters.sWOff.ui32Init; + } + else + { + PDUMP_HERE(4); + + /* Do we need to signal the PDump client that a split is required? */ + if (g_PDumpParameters.sWOff.ui32Main + ui32Size > g_PDumpParameters.ui32MaxFileSize) + { + PDUMP_HERE(5); + _PDumpSetSplitMarker(g_PDumpParameters.sCh.sMainStream.hTL, IMG_FALSE); + g_PDumpParameters.ui32FileIdx++; + g_PDumpParameters.sWOff.ui32Main = 0; + } + + /* Return the file write offset at which the parameter data was dumped */ + *pui32FileOffset = g_PDumpParameters.sWOff.ui32Main; + } + + /* Create the parameter file name, based on index, to be used in the script */ + if (g_PDumpParameters.ui32FileIdx == 0) + { + iCount = OSSNPrintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_0_FILE_NAME); + } + else + { + PDUMP_HERE(6); + iCount = OSSNPrintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_N_FILE_NAME, g_PDumpParameters.ui32FileIdx); + } + + PVR_LOG_GOTO_IF_FALSE(((iCount != -1) && (iCount < PDUMP_PARAM_MAX_FILE_NAME)), "OSSNPrintf", errExit); + + /* Write the parameter data to the parameter channel */ + eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL; + if (!PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff, pui8Data, ui32Size, ui32Flags)) + { + PDUMP_HERE(7); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpWrite", errExit); + } +#if defined(PDUMP_DEBUG_OUTFILES) + else + { + IMG_UINT32 ui32Checksum; + PDUMP_GET_SCRIPT_STRING(); + + ui32Checksum = _GenerateChecksum(pui8Data, ui32Size); + + /* CHK CHKSUM SIZE PRMOFFSET PRMFILE */ + eError = PDumpSNPrintf(hScript, ui32MaxLen, "-- CHK 0x%08X 0x%08X 0x%08X %s", + ui32Checksum, + ui32Size, + *pui32FileOffset, + aszFilenameStr); + PVR_GOTO_IF_ERROR(eError, errExit); + + PDumpWriteScript(hScript, ui32Flags); + PDUMP_RELEASE_SCRIPT_STRING(); + } +#endif + + return PVRSRV_OK; + +errExit: + return eError; +} + + +IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags) +{ + PDUMP_HERE_VAR; + + PVR_ASSERT(hString); + + PDumpAssertWriteLockHeld(); + + PDUMP_HERE(201); + +#if defined(DEBUG) + /* Since buffer sizes and buffer writing/reading are a balancing act to + * avoid buffer full errors, check here our assumption on the maximum write size. + */ + { + IMG_UINT32 ui32Size = (IMG_UINT32) OSStringLength((const IMG_CHAR *)hString); + if (ui32Size > 0x400) // 1KB + { + PVR_DPF((PVR_DBG_ERROR, "PDUMP large script write %u bytes", ui32Size)); + OSDumpStack(); + } + } +#endif + + if (!PDumpWriteAllowed(ui32Flags, NULL)) + { + /* Abort write for the above reasons but indicated it was OK to + * caller to avoid disrupting the driver */ + return IMG_TRUE; + } + + if (PDumpCtrlCapModIsBlocked()) + { + if (ui32Flags & PDUMP_FLAGS_FORCESPLIT) + { + IMG_UINT32 ui32CurrentBlock; + + PDumpGetCurrentBlockKM(&ui32CurrentBlock); + /* Keep Main stream script output files belongs to first and last block only */ + if (ui32CurrentBlock == 1) + { + /* To keep first(0th) block, do not remove old script file while + * splitting to second(1st) block (i.e. bRemoveOld=IMG_FALSE). + * */ + _PDumpSetSplitMarker(g_PDumpScript.sCh.sMainStream.hTL, IMG_FALSE); + } + else + { + /* Previous block's Main script output file will be removed + * before splitting to next + * */ + _PDumpSetSplitMarker(g_PDumpScript.sCh.sMainStream.hTL, IMG_TRUE); + } + + /* Split Block stream output file + * + * We are keeping block script output files from all PDump blocks. + * */ + _PDumpSetSplitMarker(g_PDumpScript.sCh.sBlockStream.hTL, IMG_FALSE); + g_PDumpScript.ui32FileIdx++; + } + } + + return PDumpWriteToChannel(&g_PDumpScript.sCh, NULL, (IMG_UINT8*) hString, (IMG_UINT32) OSStringLength((IMG_CHAR*) hString), ui32Flags); +} + + +/*****************************************************************************/ + + +struct _PDUMP_CONNECTION_DATA_ { + ATOMIC_T sRefCount; + POS_LOCK hLock; /*!< Protects access to sListHead. */ + DLLIST_NODE sListHead; + IMG_UINT32 ui32LastSetFrameNumber; + PDUMP_TRANSITION_EVENT eLastEvent; /*!< Last processed transition event */ + PDUMP_TRANSITION_EVENT eFailedEvent; /*!< Failed transition event to retry */ + PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks; /*!< Callback to PDump sync blocks */ + void *hSyncPrivData; /*!< Sync private data */ +}; + +static PDUMP_CONNECTION_DATA * _PDumpConnectionAcquire(PDUMP_CONNECTION_DATA *psPDumpConnectionData) +{ + IMG_INT iRefCount = OSAtomicIncrement(&psPDumpConnectionData->sRefCount); + + PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d", __func__, + psPDumpConnectionData, iRefCount); + PVR_UNREFERENCED_PARAMETER(iRefCount); + + return psPDumpConnectionData; +} + +static void _PDumpConnectionRelease(PDUMP_CONNECTION_DATA *psPDumpConnectionData) +{ + IMG_INT iRefCount = OSAtomicDecrement(&psPDumpConnectionData->sRefCount); + if (iRefCount == 0) + { + OSLockDestroy(psPDumpConnectionData->hLock); + PVR_ASSERT(dllist_is_empty(&psPDumpConnectionData->sListHead)); + OSFreeMem(psPDumpConnectionData); + } + + PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d", __func__, + psPDumpConnectionData, iRefCount); +} + +/****************************************************************************** + * Function Name : PDumpInitStreams + * Outputs : None + * Returns : + * Description : Create the PDump streams +******************************************************************************/ +static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript) +{ + + PVRSRV_ERROR eError; + TL_STREAM_INFO sTLStreamInfo; + + /* TL - Create the streams */ + + /**************************** Parameter stream ***************************/ + + /* Parameter - Init */ + eError = TLStreamCreate(&psParam->sInitStream.hTL, PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + psParam->sInitStream.pszName, psParam->sInitStream.ui32BufferSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, + NULL, NULL, + NULL, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamInit", end); + + TLStreamInfo(psParam->sInitStream.hTL, &sTLStreamInfo); + psParam->sInitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize; + + /* Parameter - Main */ + eError = TLStreamCreate(&psParam->sMainStream.hTL, PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + psParam->sMainStream.pszName, psParam->sMainStream.ui32BufferSize, + TL_OPMODE_DROP_NEWER , + NULL, NULL, + NULL, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamMain", param_main_failed); + + TLStreamInfo(psParam->sMainStream.hTL, &sTLStreamInfo); + psParam->sMainStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize; + + /* Parameter - Deinit */ + eError = TLStreamCreate(&psParam->sDeinitStream.hTL, PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + psParam->sDeinitStream.pszName, psParam->sDeinitStream.ui32BufferSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, + NULL, NULL, + NULL, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamDeinit", param_deinit_failed); + + TLStreamInfo(psParam->sDeinitStream.hTL, &sTLStreamInfo); + psParam->sDeinitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize; + + /* Parameter - Block */ + /* As in current implementation Block script stream is just a filtered + * Main script stream using PDUMP_FLAGS_BLKDATA flag, no separate + * Parameter stream is needed. Block script will be referring to the + * same Parameters as that of Main script stream. + */ + + /***************************** Script streams ****************************/ + + /* Script - Init */ + eError = TLStreamCreate(&psScript->sInitStream.hTL, PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + psScript->sInitStream.pszName, psScript->sInitStream.ui32BufferSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, + NULL, NULL, + NULL, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptInit", script_init_failed); + + TLStreamInfo(psScript->sInitStream.hTL, &sTLStreamInfo); + psScript->sInitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize; + + /* Script - Main */ + eError = TLStreamCreate(&psScript->sMainStream.hTL, PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + psScript->sMainStream.pszName, psScript->sMainStream.ui32BufferSize, + TL_OPMODE_DROP_NEWER, + NULL, NULL, + NULL, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptMain", script_main_failed); + + TLStreamInfo(psScript->sMainStream.hTL, &sTLStreamInfo); + psScript->sMainStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize; + + /* Script - Deinit */ + eError = TLStreamCreate(&psScript->sDeinitStream.hTL, PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + psScript->sDeinitStream.pszName, psScript->sDeinitStream.ui32BufferSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, + NULL, NULL, + NULL, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptDeinit", script_deinit_failed); + + TLStreamInfo(psScript->sDeinitStream.hTL, &sTLStreamInfo); + psScript->sDeinitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize; + + /* Script - Block */ + eError = TLStreamCreate(&psScript->sBlockStream.hTL, PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + psScript->sBlockStream.pszName, psScript->sBlockStream.ui32BufferSize, + TL_OPMODE_DROP_NEWER, + NULL, NULL, + NULL, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptBlock", script_block_failed); + + TLStreamInfo(psScript->sBlockStream.hTL, &sTLStreamInfo); + psScript->sBlockStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize; + + return PVRSRV_OK; + +script_block_failed: + TLStreamClose(psScript->sDeinitStream.hTL); + +script_deinit_failed: + TLStreamClose(psScript->sMainStream.hTL); + +script_main_failed: + TLStreamClose(psScript->sInitStream.hTL); + +script_init_failed: + TLStreamClose(psParam->sDeinitStream.hTL); + +param_deinit_failed: + TLStreamClose(psParam->sMainStream.hTL); + +param_main_failed: + TLStreamClose(psParam->sInitStream.hTL); + +end: + return eError; +} +/****************************************************************************** + * Function Name : PDumpDeInitStreams + * Inputs : psParam, psScript + * Outputs : None + * Returns : None + * Description : Deinitialises the PDump streams +******************************************************************************/ +static void PDumpDeInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript) +{ + /* Script streams */ + TLStreamClose(psScript->sDeinitStream.hTL); + TLStreamClose(psScript->sMainStream.hTL); + TLStreamClose(psScript->sInitStream.hTL); + TLStreamClose(psScript->sBlockStream.hTL); + + /* Parameter streams */ + TLStreamClose(psParam->sDeinitStream.hTL); + TLStreamClose(psParam->sMainStream.hTL); + TLStreamClose(psParam->sInitStream.hTL); + +} + +/****************************************************************************** + * Function Name : PDumpParameterChannelZeroedPageBlock + * Inputs : None + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Set up the zero page block in the parameter stream +******************************************************************************/ +static PVRSRV_ERROR PDumpParameterChannelZeroedPageBlock(void) +{ + IMG_UINT8 aui8Zero[32] = { 0 }; + size_t uiBytesToWrite; + PVRSRV_ERROR eError; + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; + IMG_UINT32 ui32GeneralNon4KHeapPageSize; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize, + &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); + OSFreeKMAppHintState(pvAppHintState); + + /* ZeroPageSize can't be smaller than page size */ + g_PDumpParameters.uiZeroPageSize = MAX(ui32GeneralNon4KHeapPageSize, OSGetPageSize()); + + /* ensure the zero page size of a multiple of the zero source on the stack */ + PVR_ASSERT(g_PDumpParameters.uiZeroPageSize % sizeof(aui8Zero) == 0); + + /* the first write gets the parameter file name and stream offset, + * then subsequent writes do not need to know this as the data is + * contiguous in the stream + */ + PDUMP_LOCK(0); + eError = PDumpWriteParameter(aui8Zero, + sizeof(aui8Zero), + 0, + &g_PDumpParameters.uiZeroPageOffset, + g_PDumpParameters.szZeroPageFilename); + + /* Also treat PVRSRV_ERROR_PDUMP_NOT_ALLOWED as an error in this case + * as it should never happen since all writes during driver Init are + * allowed. + */ + PVR_GOTO_IF_ERROR(eError, err_write); + + uiBytesToWrite = g_PDumpParameters.uiZeroPageSize - sizeof(aui8Zero); + + while (uiBytesToWrite) + { + IMG_BOOL bOK; + + bOK = PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff, + aui8Zero, + sizeof(aui8Zero), 0); + + if (!bOK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PDUMP_BUFFER_FULL, err_write); + } + + uiBytesToWrite -= sizeof(aui8Zero); + } + +err_write: + PDUMP_UNLOCK(0); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to initialise parameter stream zero block")); + } + + return eError; +} + +/****************************************************************************** + * Function Name : PDumpGetParameterZeroPageInfo + * Inputs : None + * Outputs : puiZeroPageOffset: set to the offset of the zero page + * : puiZeroPageSize: set to the size of the zero page + * : ppszZeroPageFilename: set to a pointer to the PRM file name + * : containing the zero page + * Returns : None + * Description : Get information about the zero page +******************************************************************************/ +void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset, + size_t *puiZeroPageSize, + const IMG_CHAR **ppszZeroPageFilename) +{ + *puiZeroPageOffset = g_PDumpParameters.uiZeroPageOffset; + *puiZeroPageSize = g_PDumpParameters.uiZeroPageSize; + *ppszZeroPageFilename = g_PDumpParameters.szZeroPageFilename; +} + +PVRSRV_ERROR PDumpInitCommon(void) +{ + PVRSRV_ERROR eError; + PDUMP_HERE_VAR; + + PDUMP_HEREA(2010); + + /* Initialised with default initial value */ + OSAtomicWrite(&g_sConnectionCount, 0); +#if defined(PDUMP_DEBUG_OUTFILES) + OSAtomicWrite(&g_sEveryLineCounter, 1); +#endif + + eError = OSLockCreate(&g_hPDumpWriteLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", errRet); + + /* Initialise PDump control module in common layer, also sets + * state to PDUMP_SM_INITIALISING. + */ + eError = PDumpCtrlInit(); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCtrlInit", errRetLock); + + /* Call environment specific PDump initialisation Part 2*/ + eError = PDumpInitStreams(&g_PDumpParameters.sCh, &g_PDumpScript.sCh); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpInitStreams", errRetCtrl); + + /* PDump now ready for write calls */ + PDumpModuleTransitionState(PDUMP_SM_READY); + + PDUMP_HEREA(2011); + + /* Test PDump initialised and ready by logging driver details */ + eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Driver Product Version: %s - %s (%s)", PVRVERSION_STRING, PVR_BUILD_DIR, PVR_BUILD_TYPE); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCommentWithFlags", errRetState); + + eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Start of Init Phase"); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCommentWithFlags", errRetState); + + eError = PDumpParameterChannelZeroedPageBlock(); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpParameterChannelZeroedPageBlock", errRetState); + + PDUMP_HEREA(2012); +ret: + return eError; + + +errRetState: + PDumpModuleTransitionState(PDUMP_SM_UNINITIALISED); + PDumpDeInitStreams(&g_PDumpParameters.sCh, &g_PDumpScript.sCh); +errRetCtrl: + PDumpCtrlDeInit(); +errRetLock: + OSLockDestroy(g_hPDumpWriteLock); + PDUMP_HEREA(2013); +errRet: + goto ret; +} +void PDumpDeInitCommon(void) +{ + PDUMP_HERE_VAR; + + PDUMP_HEREA(2020); + + /* Suspend PDump as we want PDumpWriteAllowed to deliberately fail during PDump deinit */ + PDumpModuleTransitionState(PDUMP_SM_DEINITIALISED); + + /*Call environment specific PDump Deinitialisation */ + PDumpDeInitStreams(&g_PDumpParameters.sCh, &g_PDumpScript.sCh); + + /* DeInit the PDUMP_CTRL_STATE data */ + PDumpCtrlDeInit(); + + /* take down the global PDump lock */ + OSLockDestroy(g_hPDumpWriteLock); +} + +void PDumpStopInitPhase(void) +{ + /* output this comment to indicate init phase ending OSs */ + PDUMPCOMMENT("Stop Init Phase"); + + PDumpCtrlLockAcquire(); + PDumpCtrlSetInitPhaseComplete(IMG_TRUE); + PDumpCtrlLockRelease(); +} + +PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame) +{ + PDumpCtrlLockAcquire(); + *pbIsLastCaptureFrame = PDumpCtrlIsLastCaptureFrame(); + PDumpCtrlLockRelease(); + + return PVRSRV_OK; +} + + + +typedef struct _PDUMP_Transition_DATA_ +{ + PFN_PDUMP_TRANSITION pfnCallback; + void *hPrivData; + void *pvDevice; + PDUMP_CONNECTION_DATA *psPDumpConnectionData; + DLLIST_NODE sNode; +} PDUMP_Transition_DATA; + +PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PFN_PDUMP_TRANSITION pfnCallback, + void *hPrivData, + void *pvDevice, + void **ppvHandle) +{ + PDUMP_Transition_DATA *psData; + PVRSRV_ERROR eError; + + psData = OSAllocMem(sizeof(*psData)); + PVR_GOTO_IF_NOMEM(psData, eError, fail_alloc); + + /* Setup the callback and add it to the list for this process */ + psData->pfnCallback = pfnCallback; + psData->hPrivData = hPrivData; + psData->pvDevice = pvDevice; + + OSLockAcquire(psPDumpConnectionData->hLock); + dllist_add_to_head(&psPDumpConnectionData->sListHead, &psData->sNode); + OSLockRelease(psPDumpConnectionData->hLock); + + /* Take a reference on the connection so it doesn't get freed too early */ + psData->psPDumpConnectionData =_PDumpConnectionAcquire(psPDumpConnectionData); + *ppvHandle = psData; + + return PVRSRV_OK; + +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void PDumpUnregisterTransitionCallback(void *pvHandle) +{ + PDUMP_Transition_DATA *psData = pvHandle; + + OSLockAcquire(psData->psPDumpConnectionData->hLock); + dllist_remove_node(&psData->sNode); + OSLockRelease(psData->psPDumpConnectionData->hLock); + _PDumpConnectionRelease(psData->psPDumpConnectionData); + OSFreeMem(psData); +} + +typedef struct _PDUMP_Transition_DATA_FENCE_SYNC_ +{ + PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback; + void *hPrivData; +} PDUMP_Transition_DATA_FENCE_SYNC; + +PVRSRV_ERROR PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, + PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, void **ppvHandle) +{ + PDUMP_Transition_DATA_FENCE_SYNC *psData; + PVRSRV_ERROR eError; + + psData = OSAllocMem(sizeof(*psData)); + PVR_GOTO_IF_NOMEM(psData, eError, fail_alloc_exit); + + /* Setup the callback and add it to the list for this process */ + psData->pfnCallback = pfnCallback; + psData->hPrivData = hPrivData; + + *ppvHandle = psData; + return PVRSRV_OK; + +fail_alloc_exit: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle) +{ + PDUMP_Transition_DATA_FENCE_SYNC *psData = pvHandle; + + OSFreeMem(psData); +} + +static PVRSRV_ERROR _PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) +{ + DLLIST_NODE *psNode, *psNext; + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psThis; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + /* Only call the callbacks if we've really got new event */ + if ((eEvent != psPDumpConnectionData->eLastEvent) && (eEvent != PDUMP_TRANSITION_EVENT_NONE)) + { + OSLockAcquire(psPDumpConnectionData->hLock); + + dllist_foreach_node(&psPDumpConnectionData->sListHead, psNode, psNext) + { + PDUMP_Transition_DATA *psData = + IMG_CONTAINER_OF(psNode, PDUMP_Transition_DATA, sNode); + + eError = psData->pfnCallback(psData->hPrivData, psData->pvDevice, eEvent, ui32PDumpFlags); + + if (eError != PVRSRV_OK) + { + OSLockRelease(psPDumpConnectionData->hLock); + psPDumpConnectionData->eFailedEvent = eEvent; /* Save failed event to retry */ + return eError; + } + } + OSLockRelease(psPDumpConnectionData->hLock); + + /* PDump sync blocks: + * + * Client sync prims are managed in blocks. + * + * sync-blocks gets re-dumped each time we enter into capture range or + * enter into new PDump block. Ensure that live-FW thread and app-thread + * are synchronised before this. + * + * At playback time, script-thread and sim-FW threads needs to be + * synchronised before re-loading sync-blocks. + * */ + psPDumpConnectionData->pfnPDumpSyncBlocks(psPDumpConnectionData->hSyncPrivData, eEvent); + + psThis = psPVRSRVData->psDeviceNodeList; + while (psThis) + { + if (psThis->hTransition) + { + PDUMP_Transition_DATA_FENCE_SYNC *psData = (PDUMP_Transition_DATA_FENCE_SYNC*)psThis->hTransition; + psData->pfnCallback(psData->hPrivData, eEvent); + } + psThis = psThis->psNext; + } + + psPDumpConnectionData->eLastEvent = eEvent; + psPDumpConnectionData->eFailedEvent = PDUMP_TRANSITION_EVENT_NONE; /* Clear failed event on success */ + } + return PVRSRV_OK; +} + +static PVRSRV_ERROR _PDumpBlockTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) +{ + + /* Need to follow following sequence for Block transition: + * + * (1) _PDumpTransition with BLOCK_FINISHED event for current block + * (2) Split MAIN and Block script files + * (3) _PDumpTransition with BLOCK_STARTED event for new block + * + * */ + + PVRSRV_ERROR eError; + IMG_UINT32 ui32CurrentBlock; + IMG_UINT32 ui32Flags = (PDUMP_FLAGS_BLKDATA | PDUMP_FLAGS_CONTINUOUS); /* Internal Block mode specific PDump flags */ + + PDumpGetCurrentBlockKM(&ui32CurrentBlock); + + if (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_FINISHED) + { + /* (1) Current block has finished */ + eError = _PDumpTransition(psPDumpConnectionData, PDUMP_TRANSITION_EVENT_BLOCK_FINISHED, ui32PDumpFlags); + PVR_RETURN_IF_ERROR(eError); + + (void) PDumpCommentWithFlags(ui32Flags, "}PDUMP_BLOCK_END_0x%08X", ui32CurrentBlock - 1); /* Add pdump-block end marker */ + + /* (2) Split MAIN and BLOCK script out files on current pdump-block end */ + ui32Flags |= PDUMP_FLAGS_FORCESPLIT; + + (void) PDumpCommentWithFlags(ui32Flags, "PDUMP_BLOCK_START_0x%08X{", ui32CurrentBlock); /* Add pdump-block start marker */ + } + + /* (3) New block has started */ + return _PDumpTransition(psPDumpConnectionData, PDUMP_TRANSITION_EVENT_BLOCK_STARTED, ui32PDumpFlags); +} + + +PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) +{ + if ((eEvent == PDUMP_TRANSITION_EVENT_BLOCK_FINISHED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) + { + /* Block mode transition events */ + PVR_ASSERT(PDumpCtrlCapModIsBlocked()); + return _PDumpBlockTransition(psPDumpConnectionData, eEvent, ui32PDumpFlags); + } + else + { + /* Non-block mode transition events */ + return _PDumpTransition(psPDumpConnectionData, eEvent, ui32PDumpFlags); + } +} + +PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bInCaptureRange) +{ + IMG_UINT64 ui64State = 0; + PVRSRV_ERROR eError; + + eError = PDumpCtrlGetState(&ui64State); + + *bInCaptureRange = (ui64State & PDUMP_STATE_CAPTURE_FRAME) ? IMG_TRUE : IMG_FALSE; + + return eError; +} + +PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State) +{ + PVRSRV_ERROR eError; + + PDumpCtrlLockAcquire(); + eError = PDumpCtrlGetState(ui64State); + PDumpCtrlLockRelease(); + + return eError; +} + +PVRSRV_ERROR PDumpGetCurrentBlockKM(IMG_UINT32 *pui32BlockNum) +{ + PDumpCtrlLockAcquire(); + *pui32BlockNum = PDumpCtrlGetBlock(); + PDumpCtrlLockRelease(); + + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpUpdateBlockCtrlStatus + * Inputs : ui32Frame - frame number + * Outputs : None + * Returns : IMG_TRUE if Block transition is required, else IMG_FALSE + * Description : Updates Block Ctrl status and checks if block transition + * is required or not +******************************************************************************/ +static INLINE IMG_BOOL PDumpUpdateBlockCtrlStatus(IMG_UINT32 ui32Frame) +{ + IMG_BOOL bForceBlockTransition; + + /* Default length of first block will be PDUMP_BLOCKLEN_MIN. + * User can force it to be same as block length provided (i.e. ui32BlockLength) + * through pdump client. + * + * Here is how blocks will be created. + * + * Assume, + * ui32BlockLength = 20 + * PDUMP_BLOCKLEN_MIN = 10 + * + * Then different pdump blocks will have following number of frames in it: + * + * if(!PDumpCtrlMinimalFirstBlock()) + * { + * //pdump -b + * block 0 -> 00...09 -->minimal first block + * block 1 -> 10...29 + * block 2 -> 30...49 + * block 3 -> 50...69 + * ... + * } + * else + * { + * //pdump -bf + * block 0 -> 00...19 + * block 1 -> 20...39 + * block 2 -> 40...59 + * block 3 -> 60...79 + * ... + * } + * + * */ + + if (PDumpCtrlMinimalFirstBlock()) + { + bForceBlockTransition = ((ui32Frame >= PDUMP_BLOCKLEN_MIN) && !((ui32Frame - PDUMP_BLOCKLEN_MIN) % g_PDumpCtrl.sBlockCtrl.ui32BlockLength)) || (ui32Frame == 0); + } + else + { + bForceBlockTransition = !(ui32Frame % g_PDumpCtrl.sBlockCtrl.ui32BlockLength); + } + + if (bForceBlockTransition) /* Entering in new pdump-block */ + { + /* Update block number + * + * Logic below is to maintain block number and frame number mappings + * in case of some applications where setFrame(0) gets called twice + * at the start. + * */ + PDumpCtrlLockAcquire(); + PDumpCtrlSetBlock((ui32Frame == 0)? 0 : (PDumpCtrlGetBlock() + 1)); + PDumpCtrlLockRelease(); + + if (ui32Frame > 0) /* Do not do transition on first frame itself */ + { + return IMG_TRUE; /* Transition */ + } + } + return IMG_FALSE; /* No transition */ +} + +PVRSRV_ERROR PDumpForceCaptureStopKM(void) +{ + PVRSRV_ERROR eError; + + if (!PDumpCtrlCapModIsBlocked()) + { + PVR_DPF((PVR_DBG_ERROR, "%s: This call is valid only in Block mode of PDump i.e. pdump -b", __func__)); + return PVRSRV_ERROR_PDUMP_NOT_ALLOWED; + } + + (void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA, "PDdump forced STOP capture request received at frame %u", g_PDumpCtrl.ui32CurrentFrame); + + PDumpCtrlLockAcquire(); + eError = PDumpCtrlForcedStop(); + PDumpCtrlLockRelease(); + + return eError; +} + +static PVRSRV_ERROR _PDumpSetFrameKM(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32Frame) +{ + PDUMP_CONNECTION_DATA *psPDumpConnectionData = psConnection->psPDumpConnectionData; + PDUMP_TRANSITION_EVENT eTransitionEvent = PDUMP_TRANSITION_EVENT_NONE; + IMG_BOOL bWasInCaptureRange = IMG_FALSE; + IMG_BOOL bIsInCaptureRange = IMG_FALSE; + PVRSRV_ERROR eError; + + /* + Note: + As we can't test to see if the new frame will be in capture range + before we set the frame number and we don't want to roll back the + frame number if we fail then we have to save the "transient" data + which decides if we're entering or exiting capture range along + with a failure boolean so we know what's required on a retry + */ + if (psPDumpConnectionData->ui32LastSetFrameNumber != ui32Frame) + { + (void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u", ui32Frame); + + /* + The boolean values below decide if the PDump transition + should trigger because of the current context setting the + frame number, hence the functions below should execute + atomically and do not give a chance to some other context + to transition + */ + PDumpCtrlLockAcquire(); + + PDumpIsCaptureFrameKM(&bWasInCaptureRange); + PDumpCtrlSetCurrentFrame(ui32Frame); + PDumpIsCaptureFrameKM(&bIsInCaptureRange); + + PDumpCtrlLockRelease(); + + psPDumpConnectionData->ui32LastSetFrameNumber = ui32Frame; + + /* Check for any transition event only if client is connected */ + if (PDumpIsClientConnected()) + { + if (!bWasInCaptureRange && bIsInCaptureRange) + { + eTransitionEvent = PDUMP_TRANSITION_EVENT_RANGE_ENTERED; + } + else if (bWasInCaptureRange && !bIsInCaptureRange) + { + eTransitionEvent = PDUMP_TRANSITION_EVENT_RANGE_EXITED; + } + + if (PDumpCtrlCapModIsBlocked()) + { + /* Update block ctrl status and check for block transition */ + if (PDumpUpdateBlockCtrlStatus(ui32Frame)) + { + PVR_ASSERT(eTransitionEvent == PDUMP_TRANSITION_EVENT_NONE); /* Something went wrong, can't handle two events at same time */ + eTransitionEvent = PDUMP_TRANSITION_EVENT_BLOCK_FINISHED; + } + } + } + } + else if (psPDumpConnectionData->eFailedEvent != PDUMP_TRANSITION_EVENT_NONE) + { + /* Load the Transition data so we can try again */ + eTransitionEvent = psPDumpConnectionData->eFailedEvent; + } + else + { + /* New frame is the same as the last frame set and the last + * transition succeeded, no need to perform another transition. + */ + return PVRSRV_OK; + } + + if (eTransitionEvent != PDUMP_TRANSITION_EVENT_NONE) + { + DEBUG_OUTFILES_COMMENT("PDump transition event(%u)-begin frame %u (post)", eTransitionEvent, ui32Frame); + eError = PDumpTransition(psPDumpConnectionData, eTransitionEvent, PDUMP_FLAGS_NONE); + DEBUG_OUTFILES_COMMENT("PDump transition event(%u)-complete frame %u (post)", eTransitionEvent, ui32Frame); + PVR_RETURN_IF_ERROR(eError); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Frame) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + +#if defined(PDUMP_TRACE_STATE) + PVR_DPF((PVR_DBG_WARNING, "PDumpSetFrameKM: ui32Frame( %d )", ui32Frame)); +#endif + + DEBUG_OUTFILES_COMMENT("(pre) Set pdump frame %u", ui32Frame); + + eError = _PDumpSetFrameKM(psConnection, ui32Frame); + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_LOG_ERROR(eError, "_PDumpSetFrameKM"); + } + + DEBUG_OUTFILES_COMMENT("(post) Set pdump frame %u", ui32Frame); + + return eError; +} + +PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32* pui32Frame) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* + It may be safe to avoid acquiring this lock here as all the other calls + which read/modify current frame will wait on the PDump Control bridge + lock first. Also, in no way as of now, does the PDumping app modify the + current frame through a call which acquires the global bridge lock. + Still, as a legacy we acquire and then read. + */ + PDumpCtrlLockAcquire(); + + *pui32Frame = PDumpCtrlGetCurrentFrame(); + + PDumpCtrlLockRelease(); + return eError; +} + +PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize) +{ + PVRSRV_ERROR eError; + + eError = PDumpReady(); + PVR_LOG_RETURN_IF_ERROR(eError, "PDumpReady"); + + /* Validate parameters */ + if ((ui32End < ui32Start) || (ui32Mode > PDUMP_CAPMODE_MAX)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + else if (ui32Mode == PDUMP_CAPMODE_BLOCKED) + { + if ((ui32Interval < PDUMP_BLOCKLEN_MIN) || (ui32Interval > PDUMP_BLOCKLEN_MAX)) + { + /* Force client to set ui32Interval (i.e. block length) in valid range */ + eError = PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN; + } + + if (ui32End != PDUMP_FRAME_MAX) + { + /* Force client to set ui32End to PDUMP_FRAME_MAX */ + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + } + else if ((ui32Mode != PDUMP_CAPMODE_UNSET) && (ui32Interval < 1)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_ERROR(eError, "PDumpSetDefaultCaptureParamsKM"); + + /* + Acquire PDUMP_CTRL_STATE struct lock before modifications as a + PDumping app may be reading the state data for some checks + */ + PDumpCtrlLockAcquire(); + PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval); + PDumpCtrlLockRelease(); + + if (ui32MaxParamFileSize == 0) + { + g_PDumpParameters.ui32MaxFileSize = PRM_FILE_SIZE_MAX; + } + else + { + g_PDumpParameters.ui32MaxFileSize = ui32MaxParamFileSize; + } + return PVRSRV_OK; +} + + +/****************************************************************************** + * Function Name : PDumpReg32 + * Inputs : pszPDumpDevName, Register offset, and value to write + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write +******************************************************************************/ +PVRSRV_ERROR PDumpReg32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", pszPDumpRegName, ui32Reg, ui32Data); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpReg64 + * Inputs : pszPDumpDevName, Register offset, and value to write + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write +******************************************************************************/ +PVRSRV_ERROR PDumpReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_UINT64 ui64Data, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Data >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Data); +#endif + + PDUMP_GET_SCRIPT_STRING() + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", + pszPDumpRegName, ui32Reg, ui32LowerValue); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", + pszPDumpRegName, ui32Reg + 4, ui32UpperValue); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); +#else + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X 0x%010" IMG_UINT64_FMTSPECX, pszPDumpRegName, ui32Reg, ui64Data); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); +#endif + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpRegLabelToReg64 + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write + * from a register label +******************************************************************************/ +PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegDst, + IMG_UINT32 ui32RegSrc, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X :%s:0x%08X", pszPDumpRegName, ui32RegDst, pszPDumpRegName, ui32RegSrc); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; + +} + +/****************************************************************************** + * Function Name : PDumpRegLabelToMem32 + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a memory write + * from a register label +******************************************************************************/ +PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + + PDUMP_GET_SCRIPT_STRING() + + eErr = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceName, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicName, + &uiPDumpSymbolicOffset, + &uiNextSymName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:0x%08X",aszMemspaceName, aszSymbolicName, + uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg); + + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpRegLabelToMem64 + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a memory write + * from a register label +******************************************************************************/ +PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + + PDUMP_GET_SCRIPT_STRING() + + eErr = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceName, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicName, + &uiPDumpSymbolicOffset, + &uiNextSymName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:0x%08X",aszMemspaceName, aszSymbolicName, + uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + + +/****************************************************************************** + * Function Name : PDumpPhysHandleToInternalVar64 + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents an internal var + write using a PDump pages handle +******************************************************************************/ +PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar, + IMG_HANDLE hPdumpPages, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR *pszSymbolicName; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; +#endif + + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpGetSymbolicAddr(hPdumpPages, + &pszSymbolicName); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, + "WRW %s %s:0x%llX", + pszInternalVar, pszSymbolicName, 0llu); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s 0x%X", pszPDumpVarName, 0); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + + PDumpWriteScript(hScript, ui32Flags); + +#endif + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpMemLabelToInternalVar64 + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents an internal var + * write using a memory label +******************************************************************************/ +PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; +#endif + + PDUMP_GET_SCRIPT_STRING() + + eErr = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceName, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicName, + &uiPDumpSymbolicOffset, + &uiNextSymName); + + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s :%s:%s:0x%"IMG_UINT64_FMTSPECX, pszInternalVar, + aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset); + + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s 0x%X", pszPDumpVarName, 0); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + + PDumpWriteScript(hScript, ui32Flags); + +#endif + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpInternalVarToMemLabel + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a memory label + * write using an internal var +******************************************************************************/ +PVRSRV_ERROR PDumpInternalVarToMemLabel(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; +#endif + + PDUMP_GET_SCRIPT_STRING() + + eErr = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceName, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicName, + &uiPDumpSymbolicOffset, + &uiNextSymName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" %s", + aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset, pszInternalVar); + + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + + PDumpWriteScript(hScript, ui32Flags); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s 0x%X", pszPDumpVarName, 0); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + + PDumpWriteScript(hScript, ui32Flags); + +#endif + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function PDumpWriteRegORValueOp + + @Description + + Emits the PDump commands for the logical OR operation + Var <- Var OR Value + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpWriteVarORValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Value >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Value); +#endif + + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + "OR %s %s 0x%X", +#else + "OR %s %s 0x%"IMG_UINT64_FMTSPECX, +#endif + pszInternalVariable, + pszInternalVariable, +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + ui32LowerValue +#else + ui64Value +#endif + ); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVariable); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32PDumpFlags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "OR %s %s 0x%X", + pszPDumpVarName, + pszPDumpVarName, + ui32UpperValue); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32PDumpFlags); + return eErr; + } + + PDumpWriteScript(hScript, ui32PDumpFlags); +#endif + + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function PDumpWriteVarORVarOp + + @Description + + Emits the PDump commands for the logical OR operation + Var <- Var OR Var2 + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpWriteVarORVarOp(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszInternalVar2, + const IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "OR %s %s %s", + pszInternalVar, + pszInternalVar, + pszInternalVar2); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + + PDUMP_UNLOCK(ui32PDumpFlags); + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function PDumpWriteVarANDVarOp + + @Description + + Emits the PDump commands for the logical AND operation + Var <- Var AND Var2 + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpWriteVarANDVarOp(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszInternalVar2, + const IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "AND %s %s %s", + pszInternalVar, + pszInternalVar, + pszInternalVar2); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + + PDUMP_UNLOCK(ui32PDumpFlags); + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + + +/****************************************************************************** + * Function Name : PDumpRegLabelToInternalVar + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which writes a register label into + * an internal variable +******************************************************************************/ +PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; +#endif + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s :%s:0x%08X", pszInternalVar, pszPDumpRegName, ui32Reg); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s :%s:0x%08X", pszPDumpVarName, pszPDumpRegName, ui32Reg + 4); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + + PDumpWriteScript(hScript, ui32Flags); +#endif + + PDUMP_UNLOCK(ui32Flags); + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; + +} + +/****************************************************************************** + * Function Name : PDumpInternalVarToReg32 + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write + * from an internal variable +******************************************************************************/ +PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpInternalVarToReg64 + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write + * from an internal variable +******************************************************************************/ +PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; +#endif + PDUMP_GET_SCRIPT_STRING() + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg + 4, pszPDumpVarName); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + +#else + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); +#endif + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + + + +/****************************************************************************** + * Function Name : PDumpMemLabelToMem32 + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a memory write from + * a memory label +******************************************************************************/ +PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest; + IMG_DEVMEM_OFFSET_T uiNextSymNameSource; + IMG_DEVMEM_OFFSET_T uiNextSymNameDest; + + + PDUMP_GET_SCRIPT_STRING() + + eErr = PMR_PDumpSymbolicAddr(psPMRSource, + uiLogicalOffsetSource, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceNameSource, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicNameSource, + &uiPDumpSymbolicOffsetSource, + &uiNextSymNameSource); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + eErr = PMR_PDumpSymbolicAddr(psPMRDest, + uiLogicalOffsetDest, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceNameDest, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicNameDest, + &uiPDumpSymbolicOffsetDest, + &uiNextSymNameDest); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, + "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s:0x%"IMG_UINT64_FMTSPECX, + aszMemspaceNameDest, aszSymbolicNameDest, + uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, + aszSymbolicNameSource, uiPDumpSymbolicOffsetSource); + + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpMemLabelToMem64 + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a memory write from + * a memory label +******************************************************************************/ +PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest; + IMG_DEVMEM_OFFSET_T uiNextSymNameSource; + IMG_DEVMEM_OFFSET_T uiNextSymNameDest; + + + PDUMP_GET_SCRIPT_STRING() + + eErr = PMR_PDumpSymbolicAddr(psPMRSource, + uiLogicalOffsetSource, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceNameSource, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicNameSource, + &uiPDumpSymbolicOffsetSource, + &uiNextSymNameSource); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + eErr = PMR_PDumpSymbolicAddr(psPMRDest, + uiLogicalOffsetDest, + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH, + aszMemspaceNameDest, + PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH, + aszSymbolicNameDest, + &uiPDumpSymbolicOffsetDest, + &uiNextSymNameDest); + + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, + "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s:0x%"IMG_UINT64_FMTSPECX, + aszMemspaceNameDest, aszSymbolicNameDest, + uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, + aszSymbolicNameSource, uiPDumpSymbolicOffsetSource); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + + + +/*! +******************************************************************************* + + @Function PDumpWriteVarSHRValueOp + + @Description + + Emits the PDump commands for the logical SHR operation + Var <- Var SHR Value + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpWriteVarSHRValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Value >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Value); +#endif + + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + "SHR %s %s 0x%X", +#else + "SHR %s %s 0x%"IMG_UINT64_FMTSPECX, +#endif + pszInternalVariable, + pszInternalVariable, +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + ui32LowerValue +#else + ui64Value +#endif + ); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVariable); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32PDumpFlags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SHR %s %s 0x%X", + pszPDumpVarName, + pszPDumpVarName, + ui32UpperValue); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32PDumpFlags); + return eErr; + } + + PDumpWriteScript(hScript, ui32PDumpFlags); +#endif + + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + + @Function PDumpWriteRegANDValueOp + + @Description + + Emits the PDump commands for the logical AND operation + Var <- Var AND Value + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpWriteVarANDValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Value >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Value); +#endif + + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + "AND %s %s 0x%X", +#else + "AND %s %s 0x%"IMG_UINT64_FMTSPECX, +#endif + pszInternalVariable, + pszInternalVariable, +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + ui32LowerValue +#else + ui64Value +#endif + ); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVariable); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32PDumpFlags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "AND %s %s 0x%X", + pszPDumpVarName, + pszPDumpVarName, + ui32UpperValue); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32PDumpFlags); + return eErr; + } + + PDumpWriteScript(hScript, ui32PDumpFlags); +#endif + + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + + +/****************************************************************************** + * Function Name : PDumpSAW + * Inputs : pszDevSpaceName -- device space from which to output + * ui32Offset -- offset value from register base + * ui32NumSaveBytes -- number of bytes to output + * pszOutfileName -- name of file to output to + * ui32OutfileOffsetByte -- offset into output file to write + * uiPDumpFlags -- flags to pass to PDumpOSWriteScript + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Dumps the contents of a register bank into a file + * NB: ui32NumSaveBytes must be divisible by 4 +******************************************************************************/ +PVRSRV_ERROR PDumpSAW(IMG_CHAR *pszDevSpaceName, + IMG_UINT32 ui32HPOffsetBytes, + IMG_UINT32 ui32NumSaveBytes, + IMG_CHAR *pszOutfileName, + IMG_UINT32 ui32OutfileOffsetByte, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + PDUMP_GET_SCRIPT_STRING() + + PVR_DPF((PVR_DBG_ERROR, "PDumpSAW")); + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "SAW :%s:0x%x 0x%x 0x%x %s\n", + pszDevSpaceName, + ui32HPOffsetBytes, + ui32NumSaveBytes / (IMG_UINT32)sizeof(IMG_UINT32), + ui32OutfileOffsetByte, + pszOutfileName); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpSNPrintf failed: eError=%u", eError)); + PDUMP_RELEASE_SCRIPT_STRING() + return eError; + } + + PDUMP_LOCK(uiPDumpFlags); + if (! PDumpWriteScript(hScript, uiPDumpFlags)) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpWriteScript failed!")); + } + PDUMP_UNLOCK(uiPDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; + +} + + +/****************************************************************************** + * Function Name : PDumpRegPolKM + * Inputs : Description of what this register read is trying to do + * pszPDumpDevName + * Register offset + * expected value + * mask for that value + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents a register read + * with the expected value +******************************************************************************/ +PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator) +{ + /* Timings correct for Linux and XP */ + /* Timings should be passed in */ + #define POLL_DELAY 1000U + #define POLL_COUNT_LONG (2000000000U / POLL_DELAY) + #define POLL_COUNT_SHORT (1000000U / POLL_DELAY) + + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PollCount; + + PDUMP_GET_SCRIPT_STRING(); + + ui32PollCount = POLL_COUNT_LONG; + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d", + pszPDumpRegName, ui32RegAddr, ui32RegValue, + ui32Mask, eOperator, ui32PollCount, POLL_DELAY); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING() + return PVRSRV_OK; +} + +/*! + * \name PDumpOSVerifyLineEnding + */ +static void _PDumpVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax) +{ + IMG_UINT32 ui32Count; + IMG_CHAR* pszBuf = hBuffer; + + /* strlen */ + ui32Count = OSStringNLength(pszBuf, ui32BufferSizeMax); + + /* Put \n sequence at the end if it isn't already there */ + if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count= (PVRSRV_PDUMP_MAX_COMMENT_SIZE+80))) + { + eErr = PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + + /* Append the comment to the script stream */ + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "-- %s", + pszTemp); +#else + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "-- %s", + pszComment); +#endif + if ((eErr != PVRSRV_OK) && + (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)) + { + PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpSNPrintf", ErrUnlock); + } + + if (!PDumpWriteScript(hScript, ui32Flags)) + { + if (PDUMP_IS_CONTINUOUS(ui32Flags)) + { + eErr = PVRSRV_ERROR_PDUMP_BUFFER_FULL; + PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock); + } + else + { + eErr = PVRSRV_ERROR_CMD_NOT_PROCESSED; + PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock); + } + } + +ErrUnlock: + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; +} + +/****************************************************************************** + * Function Name : PDumpCommentKM + * Inputs : pszComment, ui32Flags + * Outputs : None + * Returns : None + * Description : Dumps a pre-formatted comment, primarily called from the + * : bridge. +******************************************************************************/ +PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + + PDUMP_LOCK(ui32Flags); + + eErr = _PDumpWriteComment(pszComment, ui32Flags); + + PDUMP_UNLOCK(ui32Flags); + return eErr; +} + +/****************************************************************************** + * Function Name : PDumpCommentWithFlagsNoLock + * Inputs : ui32Flags - PDump flags + * : pszFormat - format string for comment + * : ... - args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comment, caller need to acquire pdump lock + * explicitly before calling this function. +******************************************************************************/ +PVRSRV_ERROR PDumpCommentWithFlagsNoLock(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + va_list args; + + va_start(args, pszFormat); + PDumpCommentWithFlagsNoLockVA(ui32Flags, pszFormat, args); + va_end(args); + + return eErr; +} + +/****************************************************************************** + * Function Name : PDumpCommentWithFlagsNoLockVA + * Inputs : ui32Flags - PDump flags + * : pszFormat - format string for comment + * : args - pre-started va_list args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comment, caller need to acquire pdump lock + * explicitly before calling this function +******************************************************************************/ +PVRSRV_ERROR PDumpCommentWithFlagsNoLockVA(IMG_UINT32 ui32Flags, const IMG_CHAR * pszFormat, va_list args) +{ + IMG_INT32 iCount; + PVRSRV_ERROR eErr = PVRSRV_OK; + PDUMP_GET_MSG_STRING(); + + /* Construct the string */ + iCount = OSVSNPrintf(pszMsg, ui32MaxLen, pszFormat, args); + PVR_LOG_GOTO_IF_FALSE(((iCount != -1) && (iCount < ui32MaxLen)), "OSVSNPrintf", exit); + + eErr = _PDumpWriteComment(pszMsg, ui32Flags); + +exit: + PDUMP_RELEASE_MSG_STRING(); + return eErr; +} + +/****************************************************************************** + * Function Name : PDumpCommentWithFlags + * Inputs : ui32Flags - PDump flags + * : pszFormat - format string for comment + * : ... - args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comments +******************************************************************************/ +PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + va_list args; + + va_start(args, pszFormat); + PDumpCommentWithFlagsVA(ui32Flags, pszFormat, args); + va_end(args); + + return eErr; +} + +/****************************************************************************** + * Function Name : PDumpCommentWithFlagsVA + * Inputs : ui32Flags - PDump flags + * : pszFormat - format string for comment + * : args - pre-started va_list args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comments +******************************************************************************/ +PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags, const IMG_CHAR * pszFormat, va_list args) +{ + IMG_INT32 iCount; + PVRSRV_ERROR eErr = PVRSRV_OK; + PDUMP_GET_MSG_STRING(); + + /* Construct the string */ + iCount = OSVSNPrintf(pszMsg, ui32MaxLen, pszFormat, args); + PVR_LOG_GOTO_IF_FALSE(((iCount != -1) && (iCount < ui32MaxLen)), "OSVSNPrintf", exit); + + PDUMP_LOCK(ui32Flags); + eErr = _PDumpWriteComment(pszMsg, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + +exit: + PDUMP_RELEASE_MSG_STRING(); + return eErr; +} + +/****************************************************************************** + * Function Name : PDumpCOMCommand + * Inputs : ui32PDumpFlags - PDump flags + * : pszPdumpStr - string for COM command + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : PDumps a COM command +******************************************************************************/ +PVRSRV_ERROR PDumpCOMCommand(IMG_UINT32 ui32PDumpFlags, const IMG_CHAR * pszPdumpStr) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "COM %s\n", pszPdumpStr); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +/*************************************************************************/ /*! + * Function Name : PDumpPanic + * Inputs : ui32PanicNo - Unique number for panic condition + * : pszPanicMsg - Panic reason message limited to ~90 chars + * : pszPPFunc - Function name string where panic occurred + * : ui32PPline - Source line number where panic occurred + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : PDumps a panic assertion. Used when the host driver + * : detects a condition that will lead to an invalid PDump + * : script that cannot be played back off-line. + */ /*************************************************************************/ +PVRSRV_ERROR PDumpPanic(IMG_UINT32 ui32PanicNo, + IMG_CHAR* pszPanicMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PDUMP_FLAGS_T uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS; + PDUMP_GET_SCRIPT_STRING(); + + /* Log the panic condition to the live kern.log in both REL and DEB mode + * to aid user PDump troubleshooting. */ + PVR_LOG(("PDUMP PANIC %08x: %s", ui32PanicNo, pszPanicMsg)); + PVR_DPF((PVR_DBG_MESSAGE, "PDUMP PANIC start %s:%d", pszPPFunc, ui32PPline)); + + /* Check the supplied panic reason string is within length limits */ + PVR_ASSERT(OSStringLength(pszPanicMsg)+sizeof("PANIC ") < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1); + + /* Obtain lock to keep the multi-line + * panic statement together in a single atomic write */ + PDUMP_BLKSTART(uiPDumpFlags); + + + /* Write -- Panic start (Function:line) */ + eError = PDumpSNPrintf(hScript, ui32MaxLen, "-- Panic start (%s:%d)", pszPPFunc, ui32PPline); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpSNPrintf", e1); + (void)PDumpWriteScript(hScript, uiPDumpFlags); + + /* Write COM messages */ + eError = PDumpCOMCommand(uiPDumpFlags, + "**** Script invalid and not compatible with off-line playback. ****"); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCOMCommand", e1); + + eError = PDumpCOMCommand(uiPDumpFlags, + "**** Check test parameters and driver configuration, stop imminent. ****"); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCOMCommand", e1); + + /* Write PANIC no msg command */ + eError = PDumpSNPrintf(hScript, ui32MaxLen, "PANIC %08x %s", ui32PanicNo, pszPanicMsg); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpSNPrintf", e1); + (void)PDumpWriteScript(hScript, uiPDumpFlags); + + /* Write -- Panic end */ + eError = PDumpSNPrintf(hScript, ui32MaxLen, "-- Panic end"); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpSNPrintf", e1); + (void)PDumpWriteScript(hScript, uiPDumpFlags); + +e1: + PDUMP_BLKEND(uiPDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return eError; +} + +/*************************************************************************/ /*! + * Function Name : PDumpCaptureError + * Inputs : ui32ErrorNo - Unique number for panic condition + * : pszErrorMsg - Panic reason message limited to ~90 chars + * : pszPPFunc - Function name string where panic occurred + * : ui32PPline - Source line number where panic occurred + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : PDumps an error string to the script file to interrupt + * : play back to inform user of a fatal issue that occurred + * : during PDump capture. + */ /*************************************************************************/ +PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo, + IMG_CHAR* pszErrorMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline) +{ + IMG_CHAR* pszFormatStr = "DRIVER_ERROR: %3d: %s"; + PDUMP_FLAGS_T uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS; + + /* Need to return an error using this macro */ + PDUMP_GET_SCRIPT_STRING(); + + /* Check the supplied panic reason string is within length limits */ + PVR_ASSERT(OSStringLength(pszErrorMsg)+sizeof(pszFormatStr) < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1); + + /* Write driver error message to the script file */ + (void) PDumpSNPrintf(hScript, ui32MaxLen, pszFormatStr, ui32ErrorNo, pszErrorMsg); + + /* Obtain lock to keep the multi-line + * panic statement together in a single atomic write */ + PDUMP_LOCK(uiPDumpFlags); + (void) PDumpWriteScript(hScript, uiPDumpFlags); + PDUMP_UNLOCK(uiPDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function PDumpBitmapKM + + @Description + + Dumps a bitmap from device memory to a file + + @Input psDevId + @Input pszFileName + @Input ui32FileOffset + @Input ui32Width + @Input ui32Height + @Input ui32StrideInBytes + @Input sDevBaseAddr + @Input ui32Size + @Input ePixelFormat + @Input eMemFormat + @Input ui32PDumpFlags + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpBitmapKM( PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId; + PVRSRV_ERROR eErr = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bIsFBC31 = psDevInfo->psRGXFWIfFwSysData-> + ui32ConfigFlags & RGXFWIF_INICFG_FBCDC_V3_1_EN; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(ui32PDumpFlags); + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "Dump bitmap of render."); + + /* Overwrite incoming addrmode compat field if FBC v3.1 is enabled. */ + if (bIsFBC31 && + (ui32AddrMode & PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK) == PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE) + { + ui32AddrMode &= ~PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK; + ui32AddrMode |= PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_RESOURCE; + } + + switch (ePixelFormat) + { + case PVRSRV_PDUMP_PIXEL_FORMAT_YUV8: + { + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YUV data. Switching from SII to SAB. Width=0x%08X Height=0x%08X Stride=0x%08X", + ui32Width, ui32Height, ui32StrideInBytes); + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n", + psDevId->pszPDumpDevName, + ui32MMUContextID, + sDevBaseAddr.uiAddr, + ui32Size, + ui32FileOffset, + pszFileName); + + PVR_GOTO_IF_ERROR(eErr, error); + + PDumpWriteScript(hScript, ui32PDumpFlags); + break; + } + case PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8: // YUV420 2 planes + { + const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height; + const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>1; // YUV420 + const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size; + const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size; + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YUV420 2-plane. Width=0x%08X Height=0x%08X Stride=0x%08X", + ui32Width, ui32Height, ui32StrideInBytes); + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X", + pszFileName, + pszFileName, + + // Plane 0 (Y) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // Context id + sDevBaseAddr.uiAddr, // virtaddr + ui32Plane0Size, // size + ui32FileOffset, // fileoffset + + // Plane 1 (UV) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // Context id + sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr + ui32Plane1Size, // size + ui32Plane1FileOffset, // fileoffset + + ePixelFormat, + ui32Width, + ui32Height, + ui32StrideInBytes, + ui32AddrMode); + + PVR_GOTO_IF_ERROR(eErr, error); + + PDumpWriteScript(hScript, ui32PDumpFlags); + break; + } + + case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12: // YUV420 3 planes + { + const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height; + const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>2; // YUV420 + const IMG_UINT32 ui32Plane2Size = ui32Plane1Size; + const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size; + const IMG_UINT32 ui32Plane2FileOffset = ui32Plane1FileOffset + ui32Plane1Size; + const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size; + const IMG_UINT32 ui32Plane2MemOffset = ui32Plane0Size+ui32Plane1Size; + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YUV420 3-plane. Width=0x%08X Height=0x%08X Stride=0x%08X", + ui32Width, ui32Height, ui32StrideInBytes); + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X", + pszFileName, + pszFileName, + + // Plane 0 (Y) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // MMU context id + sDevBaseAddr.uiAddr, // virtaddr + ui32Plane0Size, // size + ui32FileOffset, // fileoffset + + // Plane 1 (U) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // MMU context id + sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr + ui32Plane1Size, // size + ui32Plane1FileOffset, // fileoffset + + // Plane 2 (V) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // MMU context id + sDevBaseAddr.uiAddr+ui32Plane2MemOffset, // virtaddr + ui32Plane2Size, // size + ui32Plane2FileOffset, // fileoffset + + ePixelFormat, + ui32Width, + ui32Height, + ui32StrideInBytes, + ui32AddrMode); + + PVR_GOTO_IF_ERROR(eErr, error); + + PDumpWriteScript(hScript, ui32PDumpFlags); + break; + } + + case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32: // YV32 - 4 contiguous planes in the order VUYA, stride can be > width. + { + const IMG_UINT32 ui32PlaneSize = ui32StrideInBytes*ui32Height; // All 4 planes are the same size + const IMG_UINT32 ui32Plane0FileOffset = ui32FileOffset + (ui32PlaneSize<<1); // SII plane 0 is Y, which is YV32 plane 2 + const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32PlaneSize; // SII plane 1 is U, which is YV32 plane 1 + const IMG_UINT32 ui32Plane2FileOffset = ui32FileOffset; // SII plane 2 is V, which is YV32 plane 0 + const IMG_UINT32 ui32Plane3FileOffset = ui32Plane0FileOffset + ui32PlaneSize; // SII plane 3 is A, which is YV32 plane 3 + const IMG_UINT32 ui32Plane0MemOffset = ui32PlaneSize<<1; + const IMG_UINT32 ui32Plane1MemOffset = ui32PlaneSize; + const IMG_UINT32 ui32Plane2MemOffset = 0; + const IMG_UINT32 ui32Plane3MemOffset = ui32Plane0MemOffset + ui32PlaneSize; + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YV32 4 planes. Width=0x%08X Height=0x%08X Stride=0x%08X", + ui32Width, ui32Height, ui32StrideInBytes); + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YV32 plane size is 0x%08X", ui32PlaneSize); + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YV32 Plane 0 Mem Offset=0x%08X", ui32Plane0MemOffset); + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YV32 Plane 1 Mem Offset=0x%08X", ui32Plane1MemOffset); + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YV32 Plane 2 Mem Offset=0x%08X", ui32Plane2MemOffset); + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YV32 Plane 3 Mem Offset=0x%08X", ui32Plane3MemOffset); + + /* + SII ::v: Y + ::v: U + ::v: V + ::v: A + + */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X", + pszFileName, + pszFileName, + + // Plane 0 (V) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // MMU context id + sDevBaseAddr.uiAddr+ui32Plane0MemOffset, // virtaddr + ui32PlaneSize, // size + ui32Plane0FileOffset, // fileoffset + + // Plane 1 (U) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // MMU context id + sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr + ui32PlaneSize, // size + ui32Plane1FileOffset, // fileoffset + + // Plane 2 (Y) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // MMU context id + sDevBaseAddr.uiAddr+ui32Plane2MemOffset, // virtaddr + ui32PlaneSize, // size + ui32Plane2FileOffset, // fileoffset + + // Plane 3 (A) + psDevId->pszPDumpDevName, // memsp + ui32MMUContextID, // MMU context id + sDevBaseAddr.uiAddr+ui32Plane3MemOffset, // virtaddr + ui32PlaneSize, // size + ui32Plane3FileOffset, // fileoffset + + ePixelFormat, + ui32Width, + ui32Height, + ui32StrideInBytes, + ui32AddrMode); + + PVR_GOTO_IF_ERROR(eErr, error); + + PDumpWriteScript(hScript, ui32PDumpFlags); + break; + } + + default: // Single plane formats + { + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X", + pszFileName, + pszFileName, + psDevId->pszPDumpDevName, + ui32MMUContextID, + sDevBaseAddr.uiAddr, + ui32Size, + ui32FileOffset, + ePixelFormat, + ui32Width, + ui32Height, + ui32StrideInBytes, + ui32AddrMode); + + PVR_GOTO_IF_ERROR(eErr, error); + + PDumpWriteScript(hScript, ui32PDumpFlags); + break; + } + } + +error: + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; +} + +/*! +******************************************************************************* + + @Function PDumpImageDescriptor + + @Description + + Dumps an OutputImage command and its associated header info. + + @Input psDeviceNode : device + @Input ui32MMUContextID : MMU context + @Input pszSABFileName : filename string + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags) +{ +#if !defined(SUPPORT_RGX) + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(pszSABFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); + PVR_UNREFERENCED_PARAMETER(ePixFmt); + PVR_UNREFERENCED_PARAMETER(eMemLayout); + PVR_UNREFERENCED_PARAMETER(eFBCompression); + PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); + PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); + PVR_UNREFERENCED_PARAMETER(sHeader); + PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#else + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_CHAR *pszPDumpDevName = psDeviceNode->sDevId.pszPDumpDevName; + IMG_BYTE abyPDumpDesc[IMAGE_HEADER_SIZE]; + IMG_UINT32 ui32ParamOutPos, ui32SABOffset = 0; + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + if (pszSABFileName == NULL) + { + eErr = PVRSRV_ERROR_INVALID_PARAMS; + goto error_release_script; + } + + /* Writing image descriptor to persistent buffer is not permitted */ + if (ui32PDumpFlags & PDUMP_FLAGS_PERSISTENT) + { + goto error_release_script; + } + +#if defined(SUPPORT_VALIDATION) && (defined(SUPPORT_FBCDC_SIGNATURE_CHECK) || defined(SUPPORT_TRP)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + /* + * When the render data is deliberately corrupted, don't dump the + * render as this will fail in the image writer. + */ + if (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_ERR_EN) + { + PDUMPCOMMENT("Deliberate FBCDC/TRP Signature mismatch. No image header written."); + goto error_release_script; + } + } +#endif + + /* Prepare OutputImage descriptor header */ + eErr = RGXPDumpPrepareOutputImageDescriptorHdr( psDeviceNode, + ui32HeaderSize, + ui32DataSize, + ui32LogicalWidth, + ui32LogicalHeight, + ui32PhysicalWidth, + ui32PhysicalHeight, + ePixFmt, + eMemLayout, + eFBCompression, + paui32FBCClearColour, + eFBCSwizzle, + &(abyPDumpDesc[0])); + PVR_LOG_GOTO_IF_ERROR(eErr, "RGXPDumpPrepareOutputImageDescriptorHdr", error_release_script); + + PDUMP_LOCK(ui32PDumpFlags); + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "Dump Image descriptor"); + + if (ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 + || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888) + { + IMG_UINT32 ui32ElementType; + IMG_UINT32 ui32ElementCount; + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "YUV data. Switching from OutputImage to SAB. Width=0x%08X Height=0x%08X", + ui32LogicalWidth, ui32LogicalHeight); + + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_AND_FILE_STRING(); + + ui32ElementType = 0; + ui32ElementCount = 0; + + /* Switch to CMD:OutputData with IBIN header. */ + return PDumpDataDescriptor(psDeviceNode, + ui32MMUContextID, + pszSABFileName, + sData, + ui32DataSize, + IBIN_HEADER_TYPE, + ui32ElementType, + ui32ElementCount, + ui32PDumpFlags); + } + + /* Write OutputImage descriptor header to parameter file */ + eErr = PDumpWriteParameter(abyPDumpDesc, + IMAGE_HEADER_SIZE, + ui32PDumpFlags, + &ui32ParamOutPos, + pszFileName); + if (eErr != PVRSRV_OK) + { + if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) + { + PDUMP_ERROR(eErr, "Failed to write device allocation to parameter file"); + PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eErr)); + } + else + { + /* + * Write to parameter file prevented under the flags and + * current state of the driver so skip write to script and return. + */ + eErr = PVRSRV_OK; + } + goto error; + } + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "MALLOC :%s:BINHEADER 0x%08X 0x%08X\n", + pszPDumpDevName, + IMAGE_HEADER_SIZE, + IMAGE_HEADER_SIZE); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "LDB :%s:BINHEADER:0x00 0x%08x 0x%08x %s\n", + pszPDumpDevName, + IMAGE_HEADER_SIZE, + ui32ParamOutPos, + pszFileName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "SAB :%s:BINHEADER:0x00 0x%08X 0x00000000 %s.bin\n", + pszPDumpDevName, + IMAGE_HEADER_SIZE, + pszSABFileName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + ui32SABOffset += IMAGE_HEADER_SIZE; + + /* + * Write out the header section if image is FB compressed + */ + if (eFBCompression != IMG_FB_COMPRESSION_NONE) + { + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n", + pszPDumpDevName, + ui32MMUContextID, + (IMG_UINT64)sHeader.uiAddr, + ui32HeaderSize, + ui32SABOffset, + pszSABFileName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + ui32SABOffset += ui32HeaderSize; + } + + /* + * Now dump out the actual data associated with the surface + */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n", + pszPDumpDevName, + ui32MMUContextID, + (IMG_UINT64)sData.uiAddr, + ui32DataSize, + ui32SABOffset, + pszSABFileName); + + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + /* + * The OutputImage command is required to trigger processing of the output + * data + */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "CMD:OutputImage %s.bin\n", + pszSABFileName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "FREE :%s:BINHEADER\n", + pszPDumpDevName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + +error: + PDUMP_UNLOCK(ui32PDumpFlags); +error_release_script: + PDUMP_RELEASE_SCRIPT_AND_FILE_STRING() + return eErr; +#endif +} + +/*! +******************************************************************************* + + @Function PDumpDataDescriptor + + @Description + + Dumps an OutputData command and its associated header info. + + @Input psDeviceNode : device + @Input ui32MMUContextID : MMU context + @Input pszSABFileName : filename string + @Input sData : GPU virtual address of data + @Input ui32HeaderType : Header type + @Input ui32DataSize : Data size + @Input ui32ElementType : Element type being dumped + @Input ui32ElementCount : Number of elements to be dumped + @Input ui32PDumpFlags : PDump flags + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags) +{ +#if !defined(SUPPORT_RGX) + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(pszSABFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32ElementType); + PVR_UNREFERENCED_PARAMETER(ui32ElementCount); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#else + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_CHAR *pszPDumpDevName = psDeviceNode->sDevId.pszPDumpDevName; + IMG_BYTE abyPDumpDesc[DATA_HEADER_SIZE]; + IMG_UINT32 ui32ParamOutPos, ui32SABOffset = 0; + IMG_UINT32 ui32HeaderSize; + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + PVR_GOTO_IF_INVALID_PARAM(pszSABFileName, eErr, error_release_script); + + if (ui32HeaderType == DATA_HEADER_TYPE) + { + ui32HeaderSize = DATA_HEADER_SIZE; + } + else if (ui32HeaderType == IBIN_HEADER_TYPE) + { + ui32HeaderSize = IBIN_HEADER_SIZE; + } + else + { + PVR_GOTO_WITH_ERROR(eErr, PVRSRV_ERROR_INVALID_PARAMS, error_release_script); + } + + /* Writing data descriptor to persistent buffer is not permitted */ + if (ui32PDumpFlags & PDUMP_FLAGS_PERSISTENT) + { + goto error_release_script; + } + + /* Prepare OutputData descriptor header */ + eErr = RGXPDumpPrepareOutputDataDescriptorHdr( psDeviceNode, + ui32HeaderType, + ui32DataSize, + ui32ElementType, + ui32ElementCount, + &(abyPDumpDesc[0])); + PVR_LOG_GOTO_IF_ERROR(eErr, "RGXPDumpPrepareOutputDataDescriptorHdr", error_release_script); + + PDUMP_LOCK(ui32PDumpFlags); + + PDumpCommentWithFlagsNoLock(ui32PDumpFlags, "Dump Data descriptor"); + + /* Write OutputImage command header to parameter file */ + eErr = PDumpWriteParameter(abyPDumpDesc, + ui32HeaderSize, + ui32PDumpFlags, + &ui32ParamOutPos, + pszFileName); + if (eErr != PVRSRV_OK) + { + if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) + { + PDUMP_ERROR(eErr, "Failed to write device allocation to parameter file"); + PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eErr)); + } + else + { + /* + * Write to parameter file prevented under the flags and + * current state of the driver so skip write to script and return. + */ + eErr = PVRSRV_OK; + } + goto error; + } + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "MALLOC :%s:BINHEADER 0x%08X 0x%08X\n", + pszPDumpDevName, + ui32HeaderSize, + ui32HeaderSize); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "LDB :%s:BINHEADER:0x00 0x%08x 0x%08x %s\n", + pszPDumpDevName, + ui32HeaderSize, + ui32ParamOutPos, + pszFileName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "SAB :%s:BINHEADER:0x00 0x%08X 0x00000000 %s.bin\n", + pszPDumpDevName, + ui32HeaderSize, + pszSABFileName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + ui32SABOffset += ui32HeaderSize; + + /* + * Now dump out the actual data associated + */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n", + pszPDumpDevName, + ui32MMUContextID, + (IMG_UINT64)sData.uiAddr, + ui32DataSize, + ui32SABOffset, + pszSABFileName); + + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + /* + * The OutputData command is required to trigger processing of the output + * data + */ + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "CMD:OutputData %s.bin\n", + pszSABFileName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLenScript, + "FREE :%s:BINHEADER\n", + pszPDumpDevName); + PVR_GOTO_IF_ERROR(eErr, error); + PDumpWriteScript(hScript, ui32PDumpFlags); + +error: + PDUMP_UNLOCK(ui32PDumpFlags); +error_release_script: + PDUMP_RELEASE_SCRIPT_AND_FILE_STRING() + return eErr; +#endif +} + +/*! +******************************************************************************* + + @Function PDumpReadRegKM + + @Description + + Dumps a read from a device register to a file + + @Input psConnection : connection info + @Input pszFileName + @Input ui32FileOffset + @Input ui32Address + @Input ui32Size + @Input ui32PDumpFlags + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + PVR_UNREFERENCED_PARAMETER(ui32Size); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "SAB :%s:0x%08X 0x%08X %s", + pszPDumpRegName, + ui32Address, + ui32FileOffset, + pszFileName); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpRegRead32ToInternalVar + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which reads register into an + * internal variable +******************************************************************************/ +PVRSRV_ERROR PDumpRegRead32ToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags) + +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "RDW %s :%s:0x%08X", + pszInternalVar, + pszPDumpRegName, + ui32Reg); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +/****************************************************************************** + @name PDumpRegRead32 + @brief Dump 32-bit register read to script + @param pszPDumpDevName - pdump device name + @param ui32RegOffset - register offset + @param ui32Flags - pdump flags + @return Error +******************************************************************************/ +PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW :%s:0x%X", + pszPDumpRegName, + ui32RegOffset); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +/****************************************************************************** + @name PDumpRegRead64ToInternalVar + @brief Read 64-bit register into an internal variable + @param pszPDumpDevName - pdump device name + @param ui32RegOffset - register offset + @param ui32Flags - pdump flags + @return Error +******************************************************************************/ +PVRSRV_ERROR PDumpRegRead64ToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszInternalVar, + const IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + IMG_CHAR *pszPDumpVarName; +#endif + PDUMP_GET_SCRIPT_STRING(); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW %s :%s:0x%X", + pszInternalVar, + pszPDumpRegName, + ui32RegOffset); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + + pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar); + if (pszPDumpVarName == NULL) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW %s :%s:0x%X", + pszPDumpVarName, + pszPDumpRegName, + ui32RegOffset + 4); + + PDumpFreeIncVarNameStr(pszPDumpVarName); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + +#else + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW64 %s :%s:0x%X", + pszInternalVar, + pszPDumpRegName, + ui32RegOffset); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); +#endif + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + + +/****************************************************************************** + @name PDumpRegRead64 + @brief Dump 64-bit register read to script + @param pszPDumpDevName - pdump device name + @param ui32RegOffset - register offset + @param ui32Flags - pdump flags + @return Error +******************************************************************************/ +PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + +#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW :%s:0x%X", + pszPDumpRegName, ui32RegOffset); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + return eErr; + } + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW :%s:0x%X", + pszPDumpRegName, ui32RegOffset + 4); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING() + PDUMP_UNLOCK(ui32Flags); + return eErr; + } + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); +#else + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW64 :%s:0x%X", + pszPDumpRegName, + ui32RegOffset); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); +#endif + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + + +/****************************************************************************** + FUNCTION : PDumpWriteShiftedMaskedValue + + PURPOSE : Emits the PDump commands for writing a masked shifted address + into another location + + PARAMETERS : PDump symbolic name and offset of target word + PDump symbolic name and offset of source address + right shift amount + left shift amount + mask + + RETURNS : None +******************************************************************************/ +PVRSRV_ERROR +PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName, + const IMG_CHAR *pszDestSymbolicName, + IMG_DEVMEM_OFFSET_T uiDestOffset, + const IMG_CHAR *pszRefRegspaceName, + const IMG_CHAR *pszRefSymbolicName, + IMG_DEVMEM_OFFSET_T uiRefOffset, + IMG_UINT32 uiSHRAmount, + IMG_UINT32 uiSHLAmount, + IMG_UINT32 uiMask, + IMG_DEVMEM_SIZE_T uiWordSize, + IMG_UINT32 uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Suffix of WRW command in PDump (i.e. WRW or WRW64) */ + const IMG_CHAR *pszWrwSuffix; + + /* Internal PDump register used for interim calculation */ + const IMG_CHAR *pszPDumpIntRegSpace; + IMG_UINT32 uiPDumpIntRegNum; + + PDUMP_GET_SCRIPT_STRING(); + + if ((uiWordSize != 4) && (uiWordSize != 8)) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + pszWrwSuffix = (uiWordSize == 8) ? "64" : ""; + + /* Should really "Acquire" a pdump register here */ + pszPDumpIntRegSpace = pszDestRegspaceName; + uiPDumpIntRegNum = 1; + + PDUMP_LOCK(uiPDumpFlags); + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + /* Should this be "MOV" instead? */ + "WRW :%s:$%d :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n", + /* dest */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum, + /* src */ + pszRefRegspaceName, + pszRefSymbolicName, + uiRefOffset); + PVR_GOTO_IF_ERROR(eError, ErrUnlock); + + PDumpWriteScript(hScript, uiPDumpFlags); + + if (uiSHRAmount > 0) + { + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "SHR :%s:$%d :%s:$%d 0x%X\n", + /* dest */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum, + /* src A */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum, + /* src B */ + uiSHRAmount); + PVR_GOTO_IF_ERROR(eError, ErrUnlock); + PDumpWriteScript(hScript, uiPDumpFlags); + } + + if (uiSHLAmount > 0) + { + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "SHL :%s:$%d :%s:$%d 0x%X\n", + /* dest */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum, + /* src A */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum, + /* src B */ + uiSHLAmount); + PVR_GOTO_IF_ERROR(eError, ErrUnlock); + PDumpWriteScript(hScript, uiPDumpFlags); + } + + if (uiMask != (1ULL << (8*uiWordSize))-1) + { + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "AND :%s:$%d :%s:$%d 0x%X\n", + /* dest */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum, + /* src A */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum, + /* src B */ + uiMask); + PVR_GOTO_IF_ERROR(eError, ErrUnlock); + PDumpWriteScript(hScript, uiPDumpFlags); + } + + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW%s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " :%s:$%d\n", + pszWrwSuffix, + /* dest */ + pszDestRegspaceName, + pszDestSymbolicName, + uiDestOffset, + /* src */ + pszPDumpIntRegSpace, + uiPDumpIntRegNum); + PVR_GOTO_IF_ERROR(eError, ErrUnlock); + PDumpWriteScript(hScript, uiPDumpFlags); + +ErrUnlock: + PDUMP_UNLOCK(uiPDumpFlags); + PDUMP_RELEASE_SCRIPT_STRING(); + + return eError; +} + + +PVRSRV_ERROR +PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName, + IMG_DEVMEM_OFFSET_T uiDestOffset, + const IMG_CHAR *pszRefSymbolicName, + IMG_DEVMEM_OFFSET_T uiRefOffset, + const IMG_CHAR *pszPDumpDevName, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + IMG_UINT32 uiPDumpFlags) +{ + const IMG_CHAR *pszWrwSuffix = ""; + PVRSRV_ERROR eError = PVRSRV_OK; + + PDUMP_GET_SCRIPT_STRING(); + + if (ui32WordSize == 8) + { + pszWrwSuffix = "64"; + } + + PDUMP_LOCK(uiPDumpFlags); + + if (ui32AlignShift != ui32Shift) + { + /* Write physical address into a variable */ + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW%s :%s:$1 %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n", + pszWrwSuffix, + /* dest */ + pszPDumpDevName, + /* src */ + pszRefSymbolicName, + uiRefOffset); + PVR_GOTO_IF_ERROR(eError, symbAddress_error); + PDumpWriteScript(hScript, uiPDumpFlags); + + /* apply address alignment */ + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "SHR :%s:$1 :%s:$1 0x%X", + /* dest */ + pszPDumpDevName, + /* src A */ + pszPDumpDevName, + /* src B */ + ui32AlignShift); + PVR_GOTO_IF_ERROR(eError, symbAddress_error); + PDumpWriteScript(hScript, uiPDumpFlags); + + /* apply address shift */ + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "SHL :%s:$1 :%s:$1 0x%X", + /* dest */ + pszPDumpDevName, + /* src A */ + pszPDumpDevName, + /* src B */ + ui32Shift); + PVR_GOTO_IF_ERROR(eError, symbAddress_error); + PDumpWriteScript(hScript, uiPDumpFlags); + + + /* write result to register */ + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW%s :%s:0x%08X :%s:$1", + pszWrwSuffix, + pszDestSpaceName, + (IMG_UINT32)uiDestOffset, + pszPDumpDevName); + PVR_GOTO_IF_ERROR(eError, symbAddress_error); + PDumpWriteScript(hScript, uiPDumpFlags); + } + else + { + eError = PDumpSNPrintf(hScript, + ui32MaxLen, + "WRW%s :%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n", + pszWrwSuffix, + /* dest */ + pszDestSpaceName, + uiDestOffset, + /* src */ + pszRefSymbolicName, + uiRefOffset); + PVR_GOTO_IF_ERROR(eError, symbAddress_error); + PDumpWriteScript(hScript, uiPDumpFlags); + } + +symbAddress_error: + PDUMP_UNLOCK(uiPDumpFlags); + PDUMP_RELEASE_SCRIPT_STRING(); + + return eError; +} + +/****************************************************************************** + * Function Name : PDumpIDLWithFlags + * Inputs : Idle time in clocks + * Outputs : None + * Returns : Error + * Description : Dump IDL command to script +******************************************************************************/ +PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "IDL %u", ui32Clocks); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + + +/****************************************************************************** + * Function Name : PDumpIDL + * Inputs : Idle time in clocks + * Outputs : None + * Returns : Error + * Description : Dump IDL command to script +******************************************************************************/ +PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks) +{ + return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS); +} + +/****************************************************************************** + * Function Name : PDumpRegBasedCBP + * Inputs : pszPDumpRegName, ui32RegOffset, ui32WPosVal, ui32PacketSize + * ui32BufferSize, ui32Flags + * Outputs : None + * Returns : Error + * Description : Dump CBP command to script +******************************************************************************/ +PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X", + pszPDumpRegName, + ui32RegOffset, + ui32WPosVal, + ui32PacketSize, + ui32BufferSize); + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace, + IMG_UINT32 ui32MMUCtxID, + IMG_UINT32 ui32RegionID, + IMG_BOOL bEnable, + IMG_UINT64 ui64VAddr, + IMG_UINT64 ui64LenBytes, + IMG_UINT32 ui32XStride, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + if (bEnable) + { + eErr = PDumpSNPrintf(hScript, ui32MaxLen, + "TRG :%s:v%u %u 0x%08"IMG_UINT64_FMTSPECX" 0x%08"IMG_UINT64_FMTSPECX" %u", + pszMemSpace, ui32MMUCtxID, ui32RegionID, + ui64VAddr, ui64LenBytes, ui32XStride); + } + else + { + eErr = PDumpSNPrintf(hScript, ui32MaxLen, + "TRG :%s:v%u %u", + pszMemSpace, ui32MMUCtxID, ui32RegionID); + + } + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32Flags); + PDumpWriteScript(hScript, ui32Flags); + PDUMP_UNLOCK(ui32Flags); + + PDUMP_RELEASE_SCRIPT_STRING(); + + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpConnectionNotify + * Description : Called by the srvcore to tell PDump core that the + * PDump capture and control client has connected +******************************************************************************/ +void PDumpConnectionNotify(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psThis; +#if defined(TL_BUFFER_STATS) + PVRSRV_ERROR eErr; +#endif + + OSAtomicIncrement(&g_sConnectionCount); + + /* Reset the parameter file attributes */ + g_PDumpParameters.sWOff.ui32Main = g_PDumpParameters.sWOff.ui32Init; + g_PDumpParameters.ui32FileIdx = 0; + + /* Reset the script file attributes */ + g_PDumpScript.ui32FileIdx = 0; + + /* The Main script & parameter buffers should be empty after the previous + * PDump capture if it completed correctly. + * When PDump client is not connected, writes are prevented to Main + * buffers in PDumpWriteAllowed() since no capture range, no client, + * no writes to Main buffers for continuous flagged and regular writes. + */ + if (!TLStreamOutOfData(g_PDumpParameters.sCh.sMainStream.hTL)) /* !empty */ + { + PVR_DPF((PVR_DBG_ERROR, "PDump Main parameter buffer not empty, capture will be corrupt!")); + } + if (!TLStreamOutOfData(g_PDumpScript.sCh.sMainStream.hTL)) /* !empty */ + { + PVR_DPF((PVR_DBG_ERROR, "PDump Main script buffer not empty, capture will be corrupt!")); + } + +#if defined(TL_BUFFER_STATS) + eErr = TLStreamResetProducerByteCount(g_PDumpParameters.sCh.sMainStream.hTL, g_PDumpParameters.sWOff.ui32Init); + PVR_LOG_IF_ERROR(eErr, "TLStreamResetByteCount Parameter Main"); + + eErr = TLStreamResetProducerByteCount(g_PDumpScript.sCh.sMainStream.hTL, 0); + PVR_LOG_IF_ERROR(eErr, "TLStreamResetByteCount Script Main"); +#endif + + /* Loop over all known devices */ + psThis = psPVRSRVData->psDeviceNodeList; + while (psThis) + { + if (psThis->pfnPDumpInitDevice) + { + /* Reset pdump according to connected device */ + psThis->pfnPDumpInitDevice(psThis); + } + psThis = psThis->psNext; + } +} + +/****************************************************************************** + * Function Name : PDumpDisconnectionNotify + * Description : Called by the connection_server to tell PDump core that + * the PDump capture and control client has disconnected +******************************************************************************/ +void PDumpDisconnectionNotify(void) +{ + PVRSRV_ERROR eErr; + + if (PDumpCtrlCaptureOn()) + { + PVR_LOG(("pdump killed, capture files may be invalid or incomplete!")); + + /* Disable capture in server, in case PDump client was killed and did + * not get a chance to reset the capture parameters. + * Will set module state back to READY. + */ + eErr = PDumpSetDefaultCaptureParamsKM(PDUMP_CAPMODE_UNSET, + PDUMP_FRAME_UNSET, PDUMP_FRAME_UNSET, 0, 0); + PVR_LOG_IF_ERROR(eErr, "PDumpSetDefaultCaptureParamsKM"); + } +} + +/****************************************************************************** + * Function Name : PDumpRegCondStr + * Inputs : Description of what this register read is trying to do + * pszPDumpDevName + * Register offset + * expected value + * mask for that value + * Outputs : PDump conditional test for use with 'IF' and 'DOW' + * Returns : None + * Description : Create a PDUMP conditional test. The string is allocated + * on the heap and should be freed by the caller on success. +******************************************************************************/ +PVRSRV_ERROR PDumpRegCondStr(IMG_CHAR **ppszPDumpCond, + IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator) +{ + IMG_UINT32 ui32PollCount; + + PDUMP_GET_MSG_STRING(); + + ui32PollCount = POLL_COUNT_SHORT; + + if (0 == OSSNPrintf(pszMsg, ui32MaxLen, ":%s:0x%08X 0x%08X 0x%08X %d %u %d", + pszPDumpRegName, ui32RegAddr, ui32RegValue, + ui32Mask, eOperator, ui32PollCount, POLL_DELAY)) + { + PDUMP_RELEASE_MSG_STRING() + return PVRSRV_ERROR_INTERNAL_ERROR; + } + + *ppszPDumpCond = pszMsg; + + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpInternalValCondStr + * Inputs : Description of what this register read is trying to do + * pszPDumpDevName + * Internal variable + * expected value + * mask for that value + * Outputs : PDump conditional test for use with 'IF' and 'DOW' + * Returns : None + * Description : Create a PDUMP conditional test. The string is allocated + * on the heap and should be freed by the caller on success. +******************************************************************************/ +PVRSRV_ERROR PDumpInternalValCondStr(IMG_CHAR **ppszPDumpCond, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator) +{ + IMG_UINT32 ui32PollCount; + + PDUMP_GET_MSG_STRING(); + + ui32PollCount = POLL_COUNT_SHORT; + + if (0 == OSSNPrintf(pszMsg, ui32MaxLen, "%s 0x%08X 0x%08X %d %u %d", + pszInternalVar, ui32RegValue, + ui32Mask, eOperator, ui32PollCount, POLL_DELAY)) + { + PDUMP_RELEASE_MSG_STRING() + return PVRSRV_ERROR_INTERNAL_ERROR; + } + + *ppszPDumpCond = pszMsg; + + return PVRSRV_OK; +} + + +/****************************************************************************** + * Function Name : PDumpIfKM + * Inputs : pszPDumpCond - string for condition + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents IF command + with condition. +******************************************************************************/ +PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "IF %s\n", pszPDumpCond); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpElseKM + * Inputs : pszPDumpCond - string for condition + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents ELSE command + with condition. +******************************************************************************/ +PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "ELSE %s\n", pszPDumpCond); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpFiKM + * Inputs : pszPDumpCond - string for condition + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents FI command + with condition. +******************************************************************************/ +PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "FI %s\n", pszPDumpCond); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpStartDoLoopKM + * Inputs : None + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents SDO command + with condition. +******************************************************************************/ +PVRSRV_ERROR PDumpStartDoLoopKM(IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "SDO"); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + + return PVRSRV_OK; +} + +/****************************************************************************** + * Function Name : PDumpEndDoWhileLoopKM + * Inputs : pszPDumpWhileCond - string for loop condition + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents DOW command + with condition. +******************************************************************************/ +PVRSRV_ERROR PDumpEndDoWhileLoopKM(IMG_CHAR *pszPDumpWhileCond, IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + eErr = PDumpSNPrintf(hScript, ui32MaxLen, "DOW %s\n", pszPDumpWhileCond); + + if (eErr != PVRSRV_OK) + { + PDUMP_RELEASE_SCRIPT_STRING(); + return eErr; + } + + PDUMP_LOCK(ui32PDumpFlags); + PDumpWriteScript(hScript, ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + + PDUMP_RELEASE_SCRIPT_STRING(); + + return PVRSRV_OK; +} + + +void PDumpLock(void) +{ + OSLockAcquire(g_hPDumpWriteLock); +} +void PDumpUnlock(void) +{ + OSLockRelease(g_hPDumpWriteLock); +} +static void PDumpAssertWriteLockHeld(void) +{ + /* It is expected to be g_hPDumpWriteLock is locked at this point. */ + PVR_ASSERT(OSLockIsLocked(g_hPDumpWriteLock)); +} + +void PDumpCommonDumpState(void) +{ + PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.hTL (In, Mn, De, Bk) ( %p, %p, %p, %p )", + g_PDumpScript.sCh.sInitStream.hTL, g_PDumpScript.sCh.sMainStream.hTL, g_PDumpScript.sCh.sDeinitStream.hTL, g_PDumpScript.sCh.sBlockStream.hTL)); + PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.ui32BufferFullRetries (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )", + g_PDumpScript.sCh.sInitStream.ui32BufferFullRetries, + g_PDumpScript.sCh.sMainStream.ui32BufferFullRetries, + g_PDumpScript.sCh.sDeinitStream.ui32BufferFullRetries, + g_PDumpScript.sCh.sBlockStream.ui32BufferFullRetries)); + PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.ui32BufferFullAborts (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )", + g_PDumpScript.sCh.sInitStream.ui32BufferFullAborts, + g_PDumpScript.sCh.sMainStream.ui32BufferFullAborts, + g_PDumpScript.sCh.sDeinitStream.ui32BufferFullAborts, + g_PDumpScript.sCh.sBlockStream.ui32BufferFullAborts)); + + PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.ui32HighestRetriesWatermark (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )", + g_PDumpScript.sCh.sInitStream.ui32HighestRetriesWatermark, + g_PDumpScript.sCh.sMainStream.ui32HighestRetriesWatermark, + g_PDumpScript.sCh.sDeinitStream.ui32HighestRetriesWatermark, + g_PDumpScript.sCh.sBlockStream.ui32HighestRetriesWatermark)); + PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.ui32FileIdx( %d )", g_PDumpScript.ui32FileIdx)); + + + + PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.hTL (In, Mn, De, Bk) ( %p, %p, %p, %p )", + g_PDumpParameters.sCh.sInitStream.hTL, g_PDumpParameters.sCh.sMainStream.hTL, g_PDumpParameters.sCh.sDeinitStream.hTL, g_PDumpParameters.sCh.sBlockStream.hTL)); + PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.ui32BufferFullRetries (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )", + g_PDumpParameters.sCh.sInitStream.ui32BufferFullRetries, + g_PDumpParameters.sCh.sMainStream.ui32BufferFullRetries, + g_PDumpParameters.sCh.sDeinitStream.ui32BufferFullRetries, + g_PDumpParameters.sCh.sBlockStream.ui32BufferFullRetries)); + PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.ui32BufferFullAborts (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )", + g_PDumpParameters.sCh.sInitStream.ui32BufferFullAborts, + g_PDumpParameters.sCh.sMainStream.ui32BufferFullAborts, + g_PDumpParameters.sCh.sDeinitStream.ui32BufferFullAborts, + g_PDumpParameters.sCh.sBlockStream.ui32BufferFullAborts)); + PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.ui32HighestRetriesWatermark (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )", + g_PDumpParameters.sCh.sInitStream.ui32HighestRetriesWatermark, + g_PDumpParameters.sCh.sMainStream.ui32HighestRetriesWatermark, + g_PDumpParameters.sCh.sDeinitStream.ui32HighestRetriesWatermark, + g_PDumpParameters.sCh.sBlockStream.ui32HighestRetriesWatermark)); + + + PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sWOff.* (In, Mn, De, Bk) ( %d, %d, %d, %d )", + g_PDumpParameters.sWOff.ui32Init, g_PDumpParameters.sWOff.ui32Main, g_PDumpParameters.sWOff.ui32Deinit, g_PDumpParameters.sWOff.ui32Block)); + PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.ui32FileIdx( %d )", g_PDumpParameters.ui32FileIdx)); + + PVR_LOG(("--- PDUMP COMMON: g_PDumpCtrl( %p ) eServiceState( %d ), IsDriverInInitPhase( %s ) ui32Flags( %x )", + &g_PDumpCtrl, g_PDumpCtrl.eServiceState, CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE) ? "yes" : "no", g_PDumpCtrl.ui32Flags)); + PVR_LOG(("--- PDUMP COMMON: ui32DefaultCapMode( %d ) ui32CurrentFrame( %d )", + g_PDumpCtrl.ui32DefaultCapMode, g_PDumpCtrl.ui32CurrentFrame)); + PVR_LOG(("--- PDUMP COMMON: sCaptureRange.ui32Start( %x ) sCaptureRange.ui32End( %x ) sCaptureRange.ui32Interval( %u )", + g_PDumpCtrl.sCaptureRange.ui32Start, g_PDumpCtrl.sCaptureRange.ui32End, g_PDumpCtrl.sCaptureRange.ui32Interval)); + PVR_LOG(("--- PDUMP COMMON: IsInCaptureRange( %s ) InPowerTransition( %d )", + CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE) ? "yes" : "no", PDumpCtrlInPowerTransition())); + PVR_LOG(("--- PDUMP COMMON: sBlockCtrl.ui32BlockLength( %d ), sBlockCtrl.ui32CurrentBlock( %d )", + g_PDumpCtrl.sBlockCtrl.ui32BlockLength, g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock)); +} + + +PVRSRV_ERROR PDumpRegisterConnection(void *hSyncPrivData, + PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, + PDUMP_CONNECTION_DATA **ppsPDumpConnectionData) +{ + PDUMP_CONNECTION_DATA *psPDumpConnectionData; + PVRSRV_ERROR eError; + + PVR_ASSERT(ppsPDumpConnectionData != NULL); + PVR_ASSERT(pfnPDumpSyncBlocks != NULL); + PVR_ASSERT(hSyncPrivData != NULL); + + psPDumpConnectionData = OSAllocMem(sizeof(*psPDumpConnectionData)); + PVR_GOTO_IF_NOMEM(psPDumpConnectionData, eError, fail_alloc); + + eError = OSLockCreate(&psPDumpConnectionData->hLock); + PVR_GOTO_IF_ERROR(eError, fail_lockcreate); + + dllist_init(&psPDumpConnectionData->sListHead); + OSAtomicWrite(&psPDumpConnectionData->sRefCount, 1); + psPDumpConnectionData->ui32LastSetFrameNumber = PDUMP_FRAME_UNSET; + psPDumpConnectionData->eLastEvent = PDUMP_TRANSITION_EVENT_NONE; + psPDumpConnectionData->eFailedEvent = PDUMP_TRANSITION_EVENT_NONE; + + /* + * Although we don't take a ref count here, handle base destruction + * will ensure that any resource that might trigger us to do a Transition + * will have been freed before the sync blocks which are keeping the sync + * connection data alive. + */ + psPDumpConnectionData->hSyncPrivData = hSyncPrivData; + psPDumpConnectionData->pfnPDumpSyncBlocks = pfnPDumpSyncBlocks; + + *ppsPDumpConnectionData = psPDumpConnectionData; + + return PVRSRV_OK; + +fail_lockcreate: + OSFreeMem(psPDumpConnectionData); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData) +{ + _PDumpConnectionRelease(psPDumpConnectionData); +} + + +/*! + * \name PDumpSNPrintf + */ +PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) +{ + IMG_CHAR* pszBuf = hBuf; + IMG_INT32 n; + va_list vaArgs; + + va_start(vaArgs, pszFormat); + + n = OSVSNPrintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs); + + va_end(vaArgs); + + if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */ + { + PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete.")); + + return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + +#if defined(PDUMP_DEBUG_OUTFILES) + OSAtomicIncrement(&g_sEveryLineCounter); +#endif + + /* Put line ending sequence at the end if it isn't already there */ + _PDumpVerifyLineEnding(pszBuf, ui32ScriptSizeMax); + + return PVRSRV_OK; +} + +#endif /* defined(PDUMP) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/physheap.c b/drivers/mcst/gpu-imgtec/services/server/common/physheap.c new file mode 100644 index 000000000000..bf85b7a9ba5d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/physheap.c @@ -0,0 +1,343 @@ +/*************************************************************************/ /*! +@File physheap.c +@Title Physical heap management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Management functions for the physical heap(s). A heap contains + all the information required by services when using memory from + that heap (such as CPU <> Device physical address translation). + A system must register one heap but can have more then one which + is why a heap must register with a (system) unique ID. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ +#include "img_types.h" +#include "img_defs.h" +#include "physheap.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "pvrsrv.h" + +struct _PHYS_HEAP_ +{ + /*! ID of this physical memory heap */ + IMG_UINT32 ui32PhysHeapID; + /*! The type of this heap */ + PHYS_HEAP_TYPE eType; + + /*! PDump name of this physical memory heap */ + IMG_CHAR *pszPDumpMemspaceName; + /*! Private data for the translate routines */ + IMG_HANDLE hPrivData; + /*! Function callbacks */ + PHYS_HEAP_FUNCTIONS *psMemFuncs; + + /*! Array of sub-regions of the heap */ + PHYS_HEAP_REGION *pasRegions; + IMG_UINT32 ui32NumOfRegions; + + /*! Refcount */ + IMG_UINT32 ui32RefCount; + /*! Pointer to next physical heap */ + struct _PHYS_HEAP_ *psNext; +}; + +static PHYS_HEAP *g_psPhysHeapList; +static POS_LOCK g_hPhysHeapLock; + +#if defined(REFCOUNT_DEBUG) +#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \ + PVRSRVDebugPrintf(PVR_DBG_WARNING, \ + __FILE__, \ + __LINE__, \ + fmt, \ + __VA_ARGS__) +#else +#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) +#endif + + +PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig, + PHYS_HEAP **ppsPhysHeap) +{ + PHYS_HEAP *psNew; + PHYS_HEAP *psTmp; + + PVR_DPF_ENTERED; + + if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Check this heap ID isn't already in use */ + psTmp = g_psPhysHeapList; + while (psTmp) + { + if (psTmp->ui32PhysHeapID == psConfig->ui32PhysHeapID) + { + return PVRSRV_ERROR_PHYSHEAP_ID_IN_USE; + } + psTmp = psTmp->psNext; + } + + psNew = OSAllocMem(sizeof(PHYS_HEAP)); + PVR_RETURN_IF_NOMEM(psNew); + + psNew->ui32PhysHeapID = psConfig->ui32PhysHeapID; + psNew->eType = psConfig->eType; + psNew->psMemFuncs = psConfig->psMemFuncs; + psNew->hPrivData = psConfig->hPrivData; + psNew->ui32RefCount = 0; + psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName; + + psNew->pasRegions = psConfig->pasRegions; + psNew->ui32NumOfRegions = psConfig->ui32NumOfRegions; + + psNew->psNext = g_psPhysHeapList; + g_psPhysHeapList = psNew; + + *ppsPhysHeap = psNew; + + PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap); +} + +void PhysHeapUnregister(PHYS_HEAP *psPhysHeap) +{ + PVR_DPF_ENTERED1(psPhysHeap); + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) +#endif + { + PVR_ASSERT(psPhysHeap->ui32RefCount == 0); + } + + if (g_psPhysHeapList == psPhysHeap) + { + g_psPhysHeapList = psPhysHeap->psNext; + } + else + { + PHYS_HEAP *psTmp = g_psPhysHeapList; + + while (psTmp->psNext != psPhysHeap) + { + psTmp = psTmp->psNext; + } + psTmp->psNext = psPhysHeap->psNext; + } + + OSFreeMem(psPhysHeap); + + PVR_DPF_RETURN; +} + +PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID, + PHYS_HEAP **ppsPhysHeap) +{ + PHYS_HEAP *psTmp = g_psPhysHeapList; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_DPF_ENTERED1(ui32PhysHeapID); + + OSLockAcquire(g_hPhysHeapLock); + + while (psTmp) + { + if (psTmp->ui32PhysHeapID == ui32PhysHeapID) + { + break; + } + psTmp = psTmp->psNext; + } + + if (psTmp == NULL) + { + eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; + } + else + { + psTmp->ui32RefCount++; + PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", + __func__, psTmp, psTmp->ui32RefCount); + } + + OSLockRelease(g_hPhysHeapLock); + + *ppsPhysHeap = psTmp; + PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); +} + +void PhysHeapRelease(PHYS_HEAP *psPhysHeap) +{ + PVR_DPF_ENTERED1(psPhysHeap); + + OSLockAcquire(g_hPhysHeapLock); + psPhysHeap->ui32RefCount--; + PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", + __func__, psPhysHeap, psPhysHeap->ui32RefCount); + OSLockRelease(g_hPhysHeapLock); + + PVR_DPF_RETURN; +} + +PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap) +{ + PVR_ASSERT(psPhysHeap->eType != PHYS_HEAP_TYPE_UNKNOWN); + return psPhysHeap->eType; +} + +/* + * This function will set the psDevPAddr to whatever the system layer + * has set it for the referenced region. + * It will not fail if the psDevPAddr is invalid. + */ +PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32RegionId, + IMG_DEV_PHYADDR *psDevPAddr) +{ + if (ui32RegionId < psPhysHeap->ui32NumOfRegions) + { + *psDevPAddr = psPhysHeap->pasRegions[ui32RegionId].sCardBase; + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } +} + +/* + * This function will set the psCpuPAddr to whatever the system layer + * has set it for the referenced region. + * It will not fail if the psCpuPAddr is invalid. + */ +PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32RegionId, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + if (ui32RegionId < psPhysHeap->ui32NumOfRegions) + { + *psCpuPAddr = psPhysHeap->pasRegions[ui32RegionId].sStartAddr; + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } +} + +PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32RegionId, + IMG_UINT64 *puiSize) +{ + if (ui32RegionId < psPhysHeap->ui32NumOfRegions) + { + *puiSize = psPhysHeap->pasRegions[ui32RegionId].uiSize; + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } +} + +void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData, + ui32NumOfAddr, + psDevPAddr, + psCpuPAddr); +} + +void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData, + ui32NumOfAddr, + psCpuPAddr, + psDevPAddr); +} + +IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) +{ + if (psPhysHeap->psMemFuncs->pfnGetRegionId == NULL) + { + return 0; + } + + return psPhysHeap->psMemFuncs->pfnGetRegionId(psPhysHeap->hPrivData, + uiAllocFlags); +} + +IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->pszPDumpMemspaceName; +} + +PVRSRV_ERROR PhysHeapInit(void) +{ + PVRSRV_ERROR eError; + + g_psPhysHeapList = NULL; + + eError = OSLockCreate(&g_hPhysHeapLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PhysHeapDeinit(void) +{ + PVR_ASSERT(g_psPhysHeapList == NULL); + + OSLockDestroy(g_hPhysHeapLock); + + return PVRSRV_OK; +} + +IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->ui32NumOfRegions; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/physmem.c b/drivers/mcst/gpu-imgtec/services/server/common/physmem.c new file mode 100644 index 000000000000..c4cbf43f50df --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/physmem.c @@ -0,0 +1,655 @@ +/*************************************************************************/ /*! +@File physmem.c +@Title Physmem +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Common entry point for creation of RAM backed PMR's +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "device.h" +#include "physmem.h" +#include "pvrsrv.h" +#include "osfunc.h" +#include "pdump_physmem.h" +#include "pdump_km.h" +#include "rgx_heaps.h" +#include "pvr_ricommon.h" + +#if defined(DEBUG) +static IMG_UINT32 gPMRAllocFail; + +#if defined(LINUX) +#include + +module_param(gPMRAllocFail, uint, 0644); +MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches " + "this value, it will fail (default value is 0 which " + "means that alloc function will behave normally)."); +#endif /* defined(LINUX) */ +#endif /* defined(DEBUG) */ + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#include "proc_stats.h" +#endif + +PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32MemSize, + IMG_UINT32 ui32Log2Align, + const IMG_UINT8 u8Value, + IMG_BOOL bInitPage, +#if defined(PDUMP) + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_HANDLE *phHandlePtr, +#endif + IMG_HANDLE hMemHandle, + IMG_DEV_PHYADDR *psDevPhysAddr) +{ + void *pvCpuVAddr; + PVRSRV_ERROR eError; +#if defined(PDUMP) + IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME]; + PDUMP_FILEOFFSET_T uiOffsetOut; + IMG_UINT32 ui32PageSize; + IMG_UINT32 ui32PDumpMemSize = ui32MemSize; +#endif + PG_HANDLE *psMemHandle; + IMG_UINT64 uiMask; + IMG_DEV_PHYADDR sDevPhysAddr_int; + IMG_PID uiPid = 0; + + psMemHandle = hMemHandle; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? + PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); +#endif + + /* Allocate the pages */ + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAlloc(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), + psMemHandle, + &sDevPhysAddr_int, + uiPid); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:1"); + + /* Check to see if the page allocator returned pages with our desired + * alignment, which is not unlikely + */ + uiMask = (1 << ui32Log2Align) - 1; + if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask)) + { + /* use over allocation instead */ + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); + + ui32MemSize += (IMG_UINT32) uiMask; + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAlloc(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), + psMemHandle, + &sDevPhysAddr_int, + uiPid); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:2"); + + sDevPhysAddr_int.uiAddr += uiMask; + sDevPhysAddr_int.uiAddr &= ~uiMask; + } + *psDevPhysAddr = sDevPhysAddr_int; + +#if defined(PDUMP) + ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize(); + eError = PDumpMalloc(pszDevSpace, + pszSymbolicAddress, + ui32PDumpMemSize, + ui32PageSize, + IMG_FALSE, + 0, + phHandlePtr, + PDUMP_NONE); + if (PVRSRV_OK != eError) + { + PDUMPCOMMENT("Allocating pages failed"); + *phHandlePtr = NULL; + } +#endif + + if (bInitPage) + { + /*Map the page to the CPU VA space */ + eError = psDevNode->sDevMMUPxSetup.pfnDevPxMap(psDevNode, + psMemHandle, + ui32MemSize, + &sDevPhysAddr_int, + &pvCpuVAddr); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to map the allocated page")); + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); + return eError; + } + + /*Fill the memory with given content */ + OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize); + + /*Map the page to the CPU VA space */ + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + psMemHandle, + 0, + ui32MemSize); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to clean the allocated page")); + psDevNode->sDevMMUPxSetup.pfnDevPxUnMap(psDevNode, psMemHandle, pvCpuVAddr); + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); + return eError; + } + +#if defined(PDUMP) + /* PDumping of the page contents can be done in two ways + * 1. Store the single byte init value to the .prm file + * and load the same value to the entire dummy page buffer + * This method requires lot of LDB's inserted into the out2.txt + * + * 2. Store the entire contents of the buffer to the .prm file + * and load them back. + * This only needs a single LDB instruction in the .prm file + * and chosen this method + * size of .prm file might go up but that's not huge at least + * for this allocation + */ + /* Write the buffer contents to the prm file */ + eError = PDumpWriteParameterBlob(pvCpuVAddr, + ui32PDumpMemSize, + PDUMP_FLAGS_CONTINUOUS, + szFilenameOut, + sizeof(szFilenameOut), + &uiOffsetOut); + if (PVRSRV_OK == eError) + { + /* Load the buffer back to the allocated memory when playing the pdump */ + eError = PDumpPMRLDB(pszDevSpace, + pszSymbolicAddress, + 0, + ui32PDumpMemSize, + szFilenameOut, + uiOffsetOut, + PDUMP_FLAGS_CONTINUOUS); + if (PVRSRV_OK != eError) + { + PDUMP_ERROR(eError, "Failed to write LDB statement to script file"); + PVR_DPF((PVR_DBG_ERROR, "Failed to write LDB statement to script file, error %d", eError)); + } + } + else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) + { + PDUMP_ERROR(eError, "Failed to write device allocation to parameter file"); + PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eError)); + } + else + { + /* Else write to parameter file prevented under the flags and + * current state of the driver so skip write to script and error IF. + * This is expected e.g., if not in the capture range. + */ + eError = PVRSRV_OK; + } +#endif + + /* Unmap the page */ + psDevNode->sDevMMUPxSetup.pfnDevPxUnMap(psDevNode, + psMemHandle, + pvCpuVAddr); + } + + return PVRSRV_OK; +} + +void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, +#if defined(PDUMP) + IMG_HANDLE hPDUMPMemHandle, +#endif + IMG_HANDLE hMemHandle) +{ + PG_HANDLE *psMemHandle; + + psMemHandle = hMemHandle; + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); +#if defined(PDUMP) + if (NULL != hPDUMPMemHandle) + { + PDumpFree(hPDUMPMemHandle); + } +#endif + +} + + +/* Checks the input parameters and adjusts them if possible and necessary */ +static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 *puiLog2AllocPageSize, + IMG_DEVMEM_SIZE_T *puiSize, + PMR_SIZE_T *puiChunkSize) +{ + IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize; + IMG_DEVMEM_SIZE_T uiSize = *puiSize; + PMR_SIZE_T uiChunkSize = *puiChunkSize; + /* Sparse if we have different number of virtual and physical chunks plus + * in general all allocations with more than one virtual chunk */ + IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks || + ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE; + + /* Protect against ridiculous page sizes */ + if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT) + { + PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Sanity check of the alloc size */ + if (uiSize >= 0x1000000000ULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cancelling allocation request of over 64 GB. " + "This is likely a bug." + , __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Fail if requesting coherency on one side but uncached on the other */ + if (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) && + (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) + { + PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached " + "Please use GPU cached flags for coherency.")); + return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + } + + if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && + (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) + { + PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached " + "Please use CPU cached flags for coherency.")); + return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + } + + if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (bIsSparse) + { + /* For sparse we need correct parameters like a suitable page size.... */ + if (OSGetPageShift() > uiLog2AllocPageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid log2-contiguity for sparse allocation. " + "Requested %u, required minimum %zd", + __func__, + uiLog2AllocPageSize, + OSGetPageShift() )); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* ... chunk size must be a equal to page size ... */ + if (uiChunkSize != (1 << uiLog2AllocPageSize)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid chunk size for sparse allocation. Requested " + "%#" IMG_UINT64_FMTSPECx ", must be same as page size %#x.", + __func__, uiChunkSize, 1 << uiLog2AllocPageSize)); + + return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; + } + + if (ui32NumVirtChunks * uiChunkSize != uiSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") " + "is not equal to virtual chunks * chunk size " + "(%#" IMG_UINT64_FMTSPECx ")", + __func__, uiSize, ui32NumVirtChunks * uiChunkSize)); + + return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; + } + + if (ui32NumPhysChunks > ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Number of physical chunks (%u) must not be greater " + "than number of virtual chunks (%u)", + __func__, + ui32NumPhysChunks, + ui32NumVirtChunks)); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + else + { + /* + * Silently round up alignment/pagesize if request was less that PAGE_SHIFT + * because it would never be harmful for memory to be _more_ contiguous that + * was desired. + */ + uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ? + OSGetPageShift() : uiLog2AllocPageSize; + + /* Same for total size */ + uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + *puiChunkSize = uiSize; + } + + if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Total size (%#" IMG_UINT64_FMTSPECx ") " + "must be a multiple of the requested contiguity (%u)", + __func__, uiSize, 1 << uiLog2AllocPageSize)); + return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; + } + + *puiLog2AllocPageSize = uiLog2AllocPageSize; + *puiSize = uiSize; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx; + PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize = + psDevNode->psDevConfig->pfnCheckMemAllocSize; + + PVR_UNREFERENCED_PARAMETER(uiAnnotationLength); + + eError = _ValidateParams(ui32NumPhysChunks, + ui32NumVirtChunks, + uiFlags, + &uiLog2AllocPageSize, + &uiSize, + &uiChunkSize); + PVR_RETURN_IF_ERROR(eError); + + /* Lookup the requested physheap index to use for this PMR allocation */ + if (PVRSRV_CHECK_FW_LOCAL(uiFlags)) + { + if (PVRSRV_FW_ALLOC_TYPE(uiFlags) == FW_ALLOC_RAW) + { + /* Only a Host driver can handle memory on behalf of other drivers */ + PVR_RETURN_IF_INVALID_PARAM(PVRSRV_VZ_MODE_IS(HOST)); + ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST; + } + else + { + ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL; + } + } + else if (PVRSRV_CHECK_CPU_LOCAL(uiFlags)) + { + ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL; + } + else + { + ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL; + } + + if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx]) + { + /* In case a heap hasn't been acquired for this type, return invalid heap error */ + PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from " + "an invalid heap (HeapIndex=%d)", + __func__, psDevNode, ePhysHeapIdx)); + return PVRSRV_ERROR_INVALID_HEAP; + } + + /* Apply memory budgeting policy */ + if (pfnCheckMemAllocSize) + { + IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks; + + eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize); + PVR_RETURN_IF_ERROR(eError); + } + +#if defined(DEBUG) + if (gPMRAllocFail > 0) + { + static IMG_UINT32 ui32AllocCount = 1; + + if (ui32AllocCount < gPMRAllocFail) + { + ui32AllocCount++; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.", + __func__, ui32AllocCount)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } +#endif /* defined(DEBUG) */ + + /* If the driver is in an 'init' state all of the allocated memory + * should be attributed to the driver (PID 1) rather than to the + * process those allocations are made under. Same applies to the memory + * allocated for the Firmware. */ + if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + PVRSRV_CHECK_FW_LOCAL(uiFlags)) + { + uiPid = PVR_SYS_ALLOC_PID; + } + + eError = psDevNode->pfnCreateRamBackedPMR[ePhysHeapIdx](psConnection, + psDevNode, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiFlags, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError != PVRSRV_OK) + { + PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, + OSGetCurrentClientProcessIDKM()); + } +#endif + + return eError; +} + +PVRSRV_ERROR +PhysmemNewRamBackedLockedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + + PVRSRV_ERROR eError; + eError = PhysmemNewRamBackedPMR(psConnection, + psDevNode, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2PageSize, + uiFlags, + uiAnnotationLength, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); + + if (eError == PVRSRV_OK) + { + eError = PMRLockSysPhysAddresses(*ppsPMRPtr); + } + + return eError; +} + +PVRSRV_ERROR +PVRSRVGetMaxDevMemSizeKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T *puiLMASize, + IMG_DEVMEM_SIZE_T *puiUMASize ) +{ + IMG_DEVMEM_SIZE_T uiLMASize = 0; + IMG_DEVMEM_SIZE_T uiUMASize = 0; + PHYS_HEAP *psPhysHeap; + IMG_UINT uiHeapIndex; + IMG_UINT uiNumRegions; + IMG_UINT uiRegionID; + IMG_UINT64 uiRegionSize; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* + * psDevNode->apsPhysHeap may contain duplicates so loop over all registered + * heaps instead. + */ + for (uiHeapIndex = 0; uiHeapIndex < psDevNode->ui32RegisteredPhysHeaps; uiHeapIndex++) + { + psPhysHeap = psDevNode->papsRegisteredPhysHeaps[uiHeapIndex]; + + if (psPhysHeap != psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] && + psPhysHeap != psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) + { + continue; + } + + if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA) + { + uiNumRegions = PhysHeapNumberOfRegions(psPhysHeap); + for (uiRegionID = 0; uiRegionID < uiNumRegions; uiRegionID++) + { + /* + * Initialise uiRegionSize to 0 on each iteration in case + * PhysHeapRegionGetSize() returns an error, which should never + * actually happen in practice. + */ + uiRegionSize = 0; + PhysHeapRegionGetSize(psPhysHeap, uiRegionID, &uiRegionSize); + uiLMASize += uiRegionSize; + } + } + else if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_UMA) + { + if (uiUMASize == 0) + { + uiUMASize = OSGetRAMSize(); + } + } + } + + *puiLMASize = uiLMASize; + *puiUMASize = uiUMASize; + + return PVRSRV_OK; +} + +/* 'Wrapper' function to call PMRImportPMR(), which first checks the PMR is + * for the current device. This avoids the need to do this in pmr.c, which + * would then need PVRSRV_DEVICE_NODE (defining this type in pmr.h causes a + * typedef redefinition issue). + */ +PVRSRV_ERROR +PhysmemImportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (PMRGetExportDeviceNode(psPMRExport) != psDevNode) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + return PMRImportPMR(psPMRExport, + uiPassword, + uiSize, + uiLog2Contig, + ppsPMR); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/physmem_fwdedicatedmem.c b/drivers/mcst/gpu-imgtec/services/server/common/physmem_fwdedicatedmem.c new file mode 100644 index 000000000000..12279c2c57d6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/physmem_fwdedicatedmem.c @@ -0,0 +1,571 @@ +/*************************************************************************/ /*! +@File +@Title PMR functions for Trusted Device firmware code memory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for physical memory + imported from a trusted environment. The driver cannot acquire + CPU mappings for this secure memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "physmem_fwdedicatedmem.h" +#include "physheap.h" +#include "rgxdevice.h" +#include "rgx_bvnc_defs_km.h" +#include "devicemem_server_utils.h" + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +#if !defined(NO_HARDWARE) + +typedef struct _PMR_FWMEM_DATA_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + PHYS_HEAP *psFWMemPhysHeap; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + PMR_LOG2ALIGN_T uiLog2Align; + IMG_UINT64 ui64Size; +} PMR_FWMEM_DATA; + + +/* + * Implementation of callback functions + */ + +static PVRSRV_ERROR +PMRLockPhysAddressesFWMem(PMR_IMPL_PRIVDATA pvPriv) +{ + /* There is nothing to do as we control LMA physical memory */ + PVR_UNREFERENCED_PARAMETER(pvPriv); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRUnlockPhysAddressesFWMem(PMR_IMPL_PRIVDATA pvPriv) +{ + /* There is nothing to do as we control LMA physical memory */ + PVR_UNREFERENCED_PARAMETER(pvPriv); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR PMRSysPhysAddrFWMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PMR_FWMEM_DATA *psPrivData = pvPriv; + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(ui32Log2PageSize); + + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + psDevPAddr[i].uiAddr = psPrivData->sDevPAddr.uiAddr + puiOffset[i]; + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataFWMem(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PMR_FWMEM_DATA *psPrivData = pvPriv; + void *pvKernLinAddr = NULL; + IMG_UINT32 ui32CPUCacheFlags; + PVRSRV_ERROR eError; + + eError = DevmemCPUCacheMode(psPrivData->psDeviceNode, ulFlags, &ui32CPUCacheFlags); + PVR_RETURN_IF_ERROR(eError); + + PVR_UNREFERENCED_PARAMETER(uiSize); +#ifdef CONFIG_MCST + *pvPtr = OSMapPhysToLin(psPrivData->sCpuPAddr, psPrivData->sDevPAddr, psPrivData->ui64Size, + ui32CPUCacheFlags); +#else + pvKernLinAddr = OSMapPhysToLin(psPrivData->sCpuPAddr, psPrivData->ui64Size, ui32CPUCacheFlags); +#endif + PVR_RETURN_IF_NOMEM(pvKernLinAddr); + + *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + uiOffset; + *phHandleOut = pvKernLinAddr; + + return PVRSRV_OK; +} + +static void +PMRReleaseKernelMappingDataFWMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_FWMEM_DATA *psPrivData = pvPriv; + void *pvKernLinAddr = hHandle; + + OSUnMapPhysToLin(pvKernLinAddr, (size_t)psPrivData->ui64Size, 0); +} + +static PVRSRV_ERROR PMRFinalizeFWMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PMR_FWMEM_DATA *psPrivData = NULL; + + psPrivData = pvPriv; + RA_Free(psPrivData->psDeviceNode->psDedicatedFWMemArena, + psPrivData->sDevPAddr.uiAddr); + OSFreeMem(psPrivData); + + return PVRSRV_OK; +} + +static PMR_IMPL_FUNCTAB _sPMRFWMemFuncTab = { + .pfnLockPhysAddresses = &PMRLockPhysAddressesFWMem, + .pfnUnlockPhysAddresses = &PMRUnlockPhysAddressesFWMem, + .pfnDevPhysAddr = &PMRSysPhysAddrFWMem, + .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataFWMem, + .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataFWMem, + .pfnFinalize = &PMRFinalizeFWMem, +}; + + +/* + * Public functions + */ +PVRSRV_ERROR PhysmemNewFWDedicatedMemPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + PMR **ppsPMRPtr) +{ + PMR_FWMEM_DATA *psPrivData = NULL; + PMR *psPMR = NULL; + RA_BASE_T uiCardAddr = 0; + RA_LENGTH_T uiActualSize = 0; + IMG_UINT32 uiMappingTable = 0; + PMR_FLAGS_T uiPMRFlags; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* In this instance, we simply pass flags straight through. Generally, + * uiFlags can include things that control the PMR factory, but we + * don't need any such thing (at the time of writing!), and our caller + * specifies all PMR flags so we don't need to adjust what was given + * to us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* Check no significant bits were lost in cast due to different bit + * widths for flags. + */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (psDevNode->psDedicatedFWMemHeap == NULL || psDevNode->psDedicatedFWMemArena == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid params (phys heap %p, arena %p)", + __func__, + psDevNode->psDedicatedFWMemHeap, + psDevNode->psDedicatedFWMemArena)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = RA_Alloc(psDevNode->psDedicatedFWMemArena, + uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + 1ULL << uiLog2Align, + "FW_mem_alloc", + &uiCardAddr, + &uiActualSize, + NULL); /* No private handle */ + PVR_LOG_RETURN_IF_ERROR(eError, "RA_Alloc"); + + psPrivData = OSAllocZMem(sizeof(PMR_FWMEM_DATA)); + PVR_GOTO_IF_NOMEM(psPrivData, eError, errorOnAllocData); + + /* + * uiLog2Align is only used to get memory with the correct alignment. + * The page size is still determined by the OS. + */ + psPrivData->uiLog2Align = OSGetPageShift(); + psPrivData->psFWMemPhysHeap = psDevNode->psDedicatedFWMemHeap; + psPrivData->ui64Size = uiSize; + psPrivData->psDeviceNode = psDevNode; + psPrivData->sDevPAddr.uiAddr = uiCardAddr; + + PhysHeapDevPAddrToCpuPAddr(psPrivData->psFWMemPhysHeap, + 1, + &psPrivData->sCpuPAddr, + &psPrivData->sDevPAddr); + + eError = PMRCreatePMR(psDevNode, + psPrivData->psFWMemPhysHeap, + psPrivData->ui64Size, + psPrivData->ui64Size, + 1, /* ui32NumPhysChunks */ + 1, /* ui32NumVirtChunks */ + &uiMappingTable, /* pui32MappingTable (not used) */ + uiLog2Align, /* uiLog2ContiguityGuarantee */ + uiPMRFlags, + "FWMEM_PMR", + &_sPMRFWMemFuncTab, + psPrivData, + PMR_TYPE_LMA, + &psPMR, + PDUMP_NONE); + PVR_GOTO_IF_ERROR(eError, errorOnCreatePMR); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = RIWritePMREntryKM(psPMR); + PVR_WARN_IF_ERROR(eError, "RIWritePMREntryKM"); +#endif + + *ppsPMRPtr = psPMR; + return PVRSRV_OK; + +errorOnCreatePMR: + OSFreeMem(psPrivData); +errorOnAllocData: + RA_Free(psDevNode->psDedicatedFWMemArena, + uiCardAddr); + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +PVRSRV_ERROR PhysmemInitFWDedicatedMem(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT64 ui64Size; + PVRSRV_ERROR eError; + RGX_DATA *psRGXData = (RGX_DATA *)psDevConfig->hDevData; + + if (!psRGXData->bHasFWMemPhysHeap) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Dedicated FW memory not available", __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = PhysHeapAcquire(psRGXData->uiFWMemPhysHeapID, + &psDeviceNode->psDedicatedFWMemHeap); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire", errorOnPhysHeapAcquire); + + if (PhysHeapGetType(psDeviceNode->psDedicatedFWMemHeap) != PHYS_HEAP_TYPE_LMA || + PhysHeapNumberOfRegions(psDeviceNode->psDedicatedFWMemHeap) != 1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Wrong heap details: type %u, number of regions %u", + __func__, + PhysHeapGetType(psDeviceNode->psDedicatedFWMemHeap), + PhysHeapNumberOfRegions(psDeviceNode->psDedicatedFWMemHeap))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, errorOnValidatePhysHeap); + } + + PhysHeapRegionGetSize(psDeviceNode->psDedicatedFWMemHeap, 0, &ui64Size); + PhysHeapRegionGetDevPAddr(psDeviceNode->psDedicatedFWMemHeap, 0, &sDevPAddr); + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->psDedicatedFWMemArena, + NULL, + 0, + sDevPAddr.uiAddr, + ui64Size, + 0, + "Dedicated Fw Mem"); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateRegionRA(DedicatedFwMem)", errorOnRACreate); + + return PVRSRV_OK; + +errorOnRACreate: +errorOnValidatePhysHeap: + PhysHeapRelease(psDeviceNode->psDedicatedFWMemHeap); +errorOnPhysHeapAcquire: + return eError; +} + +void PhysmemDeinitFWDedicatedMem(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Validate arguments before dereferencing as we are called on Init errors + * as well as normal shutdown. + */ + if (psDeviceNode->psDedicatedFWMemArena) + { + RA_Delete(psDeviceNode->psDedicatedFWMemArena); + } + if (psDeviceNode->psDedicatedFWMemHeap) + { + PhysHeapRelease(psDeviceNode->psDedicatedFWMemHeap); + } +} + +#else /* !defined(NO_HARDWARE) */ + +#include "physmem_osmem.h" + +typedef struct _PMR_FWDEDICATEDMEM_DATA_ { + PHYS_HEAP *psFWMemPhysHeap; + PMR *psOSMemPMR; + IMG_UINT32 ui32Log2PageSize; +} PMR_FWDEDICATEDMEM_DATA; + + +/* + * Implementation of callback functions + */ + +static PVRSRV_ERROR +PMRLockPhysAddressesFWMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PMR_FWDEDICATEDMEM_DATA *psPrivData = pvPriv; + + return PMRLockSysPhysAddresses(psPrivData->psOSMemPMR); +} + +static PVRSRV_ERROR +PMRUnlockPhysAddressesFWMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PMR_FWDEDICATEDMEM_DATA *psPrivData = pvPriv; + + return PMRUnlockSysPhysAddresses(psPrivData->psOSMemPMR); +} + +static PVRSRV_ERROR +PMRSysPhysAddrFWMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PMR_FWDEDICATEDMEM_DATA *psPrivData = pvPriv; + + if (psPrivData->ui32Log2PageSize != ui32Log2PageSize) + { + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + /* On the assumption that this PMR was created with + * NumPhysChunks == NumVirtChunks then + * puiOffset[0] == uiLogicalOffset + */ + return PMR_DevPhysAddr(psPrivData->psOSMemPMR, + ui32Log2PageSize, + ui32NumOfPages, + puiOffset[0], + psDevPAddr, + pbValid); +} + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataFWMem(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PMR_FWDEDICATEDMEM_DATA *psPrivData = pvPriv; + size_t uiLengthOut; + + PVR_UNREFERENCED_PARAMETER(ulFlags); + + return PMRAcquireKernelMappingData(psPrivData->psOSMemPMR, + uiOffset, + uiSize, + ppvKernelAddressOut, + &uiLengthOut, + phHandleOut); +} + +static void +PMRReleaseKernelMappingDataFWMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_FWDEDICATEDMEM_DATA *psPrivData = pvPriv; + + PMRReleaseKernelMappingData(psPrivData->psOSMemPMR, hHandle); +} + +static PVRSRV_ERROR PMRFinalizeFWMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PMR_FWDEDICATEDMEM_DATA *psPrivData = pvPriv; + + PMRUnrefPMR(psPrivData->psOSMemPMR); + PhysHeapRelease(psPrivData->psFWMemPhysHeap); + OSFreeMem(psPrivData); + + return PVRSRV_OK; +} + +static PMR_IMPL_FUNCTAB _sPMRFWFuncTab = { + .pfnLockPhysAddresses = &PMRLockPhysAddressesFWMem, + .pfnUnlockPhysAddresses = &PMRUnlockPhysAddressesFWMem, + .pfnDevPhysAddr = &PMRSysPhysAddrFWMem, + .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataFWMem, + .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataFWMem, + .pfnFinalize = &PMRFinalizeFWMem, +}; + + +/* + * Public functions + */ +PVRSRV_ERROR PhysmemNewFWDedicatedMemPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + PMR **ppsPMRPtr) +{ + RGX_DATA *psRGXData = (RGX_DATA *)(psDevNode->psDevConfig->hDevData); + PMR_FWDEDICATEDMEM_DATA *psPrivData = NULL; + PMR *psPMR = NULL; + PMR *psOSPMR = NULL; + IMG_UINT32 uiMappingTable = 0; + PMR_FLAGS_T uiPMRFlags; + PVRSRV_ERROR eError; + + /* In this instance, we simply pass flags straight through. Generally, + * uiFlags can include things that control the PMR factory, but we + * don't need any such thing (at the time of writing!), and our caller + * specifies all PMR flags so we don't need to adjust what was given + * to us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* Check no significant bits were lost in cast due to different bit + * widths for flags. + */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + psPrivData = OSAllocZMem(sizeof(PMR_FWDEDICATEDMEM_DATA)); + PVR_GOTO_IF_NOMEM(psPrivData, eError, errorOnAllocData); + + /* Get required info for the dedicated FW memory physical heap */ + if (!psRGXData->bHasFWMemPhysHeap) + { + PVR_LOG_GOTO_WITH_ERROR("psRGXData->bHasFWMemPhysHeap", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, errorOnAcquireHeap); + } + eError = PhysHeapAcquire(psRGXData->uiFWMemPhysHeapID, + &psPrivData->psFWMemPhysHeap); + PVR_GOTO_IF_ERROR(eError, errorOnAcquireHeap); + + /* The alignment requested by the caller is only used to generate the + * secure FW allocation pdump command with the correct alignment. + * Internally we use another PMR with OS page alignment. + */ + psPrivData->ui32Log2PageSize = OSGetPageShift(); + + /* Note that this PMR is only used to copy the FW blob to memory and + * to dump this memory to pdump, it doesn't need to have the alignment + * requested by the caller. + */ + eError = PhysmemNewOSRamBackedPMR(psConnection, + psDevNode, + uiSize, + uiSize, + 1, /* ui32NumPhysChunks */ + 1, /* ui32NumVirtChunks */ + &uiMappingTable, + psPrivData->ui32Log2PageSize, + uiFlags, + "DEDICATEDFWMEM_OSMEM", + OSGetCurrentClientProcessIDKM(), + &psOSPMR, + PDUMP_NONE); + PVR_GOTO_IF_ERROR(eError, errorOnCreateOSPMR); + + /* This is the primary PMR dumped with correct memspace and alignment */ + eError = PMRCreatePMR(psDevNode, + psPrivData->psFWMemPhysHeap, + uiSize, + uiSize, + 1, /* ui32NumPhysChunks */ + 1, /* ui32NumVirtChunks */ + &uiMappingTable, /* pui32MappingTable (not used) */ + uiLog2Align, /* uiLog2ContiguityGuarantee */ + uiPMRFlags, + "DEDICATEDFWMEM_PMR", + &_sPMRFWFuncTab, + psPrivData, + PMR_TYPE_OSMEM, + &psPMR, + PDUMP_NONE); + PVR_GOTO_IF_ERROR(eError, errorOnCreatePMR); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = RIWritePMREntryKM(psPMR); + PVR_WARN_IF_ERROR(eError, "RIWritePMREntryKM"); +#endif + + psPrivData->psOSMemPMR = psOSPMR; + *ppsPMRPtr = psPMR; + + return PVRSRV_OK; + +errorOnCreatePMR: + PMRUnrefPMR(psOSPMR); + +errorOnCreateOSPMR: + PhysHeapRelease(psPrivData->psFWMemPhysHeap); + +errorOnAcquireHeap: + OSFreeMem(psPrivData); + +errorOnAllocData: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/common/physmem_hostmem.c b/drivers/mcst/gpu-imgtec/services/server/common/physmem_hostmem.c new file mode 100644 index 000000000000..4e364fac229b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/physmem_hostmem.c @@ -0,0 +1,150 @@ +/*************************************************************************/ /*! +@File physmem_hostmem.c +@Title Host memory device node functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Functions relevant to device memory allocations made from host + mem device node. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "physmem_hostmem.h" + +#include "img_defs.h" +#include "img_types.h" +#include "physheap.h" +#include "pvrsrv_device.h" + +static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +/* heap callbacks for host driver's device's heap */ +static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs = +{ + /* pfnCpuPAddrToDevPAddr */ + HostMemCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ + HostMemDevPAddrToCpuPAddr, + /* pfnGetRegionId */ + NULL, +}; + +static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[]; + +/* heap configuration for host driver's device */ +static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] = +{ + { + PHYS_HEAP_ID_HOSTMEM, + PHYS_HEAP_TYPE_UMA, + "SYSMEM", + &gsHostMemDevPhysHeapFuncs, + NULL, + 0, + (IMG_HANDLE)&gsHostMemDevConfig[0], + } +}; + +/* device configuration for host driver's device */ +static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] = +{ + { + .pszName = "HostMemDevice", + .eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE, + .pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0], + .ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfigHostMemDevice), + .aui32PhysHeapID = { + PHYS_HEAP_ID_HOSTMEM, + PHYS_HEAP_ID_HOSTMEM, + PHYS_HEAP_ID_HOSTMEM + }, + } +}; + +static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); +#ifdef CONFIG_MCST + BUG(); +#endif + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; + } + } +} + +static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); +#ifdef CONFIG_MCST + BUG(); +#endif + /* Optimise common case */ + psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr); + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr); + } + } +} + +PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void) +{ + return &gsHostMemDevConfig[0]; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/physmem_lma.c b/drivers/mcst/gpu-imgtec/services/server/common/physmem_lma.c new file mode 100644 index 000000000000..75389fada729 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/physmem_lma.c @@ -0,0 +1,1510 @@ +/*************************************************************************/ /*! +@File physmem_lma.c +@Title Local card memory allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for local card memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "rgx_pdump_panics.h" +#include "allocmem.h" +#include "osfunc.h" +#include "pvrsrv.h" +#include "devicemem_server_utils.h" +#include "physmem_lma.h" +#include "pdump_km.h" +#include "pmr.h" +#include "pmr_impl.h" +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "rgxutils.h" +#endif + +/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid + * page address */ +#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0) + +typedef struct _PMR_LMALLOCARRAY_DATA_ { + PVRSRV_DEVICE_NODE *psDevNode; + IMG_PID uiPid; + IMG_INT32 iNumPagesAllocated; + /* + * uiTotalNumPages: + * Total number of pages supported by this PMR. + * (Fixed as of now due the fixed Page table array size) + */ + IMG_UINT32 uiTotalNumPages; + IMG_UINT32 uiPagesToAlloc; + + IMG_UINT32 uiLog2AllocSize; + IMG_UINT32 uiContigAllocSize; + IMG_DEV_PHYADDR *pasDevPAddr; + + IMG_BOOL bZeroOnAlloc; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bFwLocalAlloc; + FIRMWARE_ALLOC_TYPE eFwAllocType; + IMG_UINT32 ui32OSid; + + IMG_BOOL bOnDemand; + + /* + Record at alloc time whether poisoning will be required when the + PMR is freed. + */ + IMG_BOOL bPoisonOnFree; + + /* Physical heap and arena pointers for this allocation */ + PHYS_HEAP* psPhysHeap; + RA_ARENA* psArena; + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags; + + /* + Connection data for this requests' originating process. NULL for + direct-bridge originating calls + */ + CONNECTION_DATA *psConnection; +} PMR_LMALLOCARRAY_DATA; + +static PVRSRV_ERROR _MapAlloc(PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEV_PHYADDR *psDevPAddr, + size_t uiSize, + IMG_BOOL bFwLocalAlloc, + PMR_FLAGS_T ulFlags, + void **pvPtr) +{ + IMG_UINT32 ui32CPUCacheFlags; + IMG_CPU_PHYADDR sCpuPAddr; + PHYS_HEAP *psPhysHeap; + PVRSRV_ERROR eError; + + eError = DevmemCPUCacheMode(psDevNode, ulFlags, &ui32CPUCacheFlags); + PVR_RETURN_IF_ERROR(eError); + + if (bFwLocalAlloc) + { + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + } + else + { + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]; + } + + PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr); + +#ifdef CONFIG_MCST + *pvPtr = OSMapPhysToLin(sCpuPAddr, *psDevPAddr, uiSize, + ui32CPUCacheFlags); +#else + *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags); +#endif + PVR_RETURN_IF_NOMEM(*pvPtr); + + return PVRSRV_OK; +} + +static void _UnMapAlloc(PVRSRV_DEVICE_NODE *psDevNode, + size_t uiSize, + IMG_BOOL bFwLocalAlloc, + PMR_FLAGS_T ulFlags, + void *pvPtr) +{ + OSUnMapPhysToLin(pvPtr, uiSize, PVRSRV_CPU_CACHE_MODE(ulFlags)); +} + +static PVRSRV_ERROR +_PoisonAlloc(PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_BOOL bFwLocalAlloc, + IMG_UINT32 uiContigAllocSize, + IMG_BYTE ui8PoisonValue) +{ + PVRSRV_ERROR eError; + void *pvKernLin = NULL; + + eError = _MapAlloc(psDevNode, + psDevPAddr, + uiContigAllocSize, + bFwLocalAlloc, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvKernLin); + PVR_GOTO_IF_ERROR(eError, map_failed); + + OSDeviceMemSet(pvKernLin, ui8PoisonValue, uiContigAllocSize); + + _UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0, pvKernLin); + + return PVRSRV_OK; + +map_failed: + PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation")); + return eError; +} + +static PVRSRV_ERROR +_ZeroAlloc(PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_BOOL bFwLocalAlloc, + IMG_UINT32 uiContigAllocSize) +{ + void *pvKernLin = NULL; + PVRSRV_ERROR eError; + + eError = _MapAlloc(psDevNode, + psDevPAddr, + uiContigAllocSize, + bFwLocalAlloc, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvKernLin); + PVR_GOTO_IF_ERROR(eError, map_failed); + + OSDeviceMemSet(pvKernLin, 0, uiContigAllocSize); + + _UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0, pvKernLin); + + return PVRSRV_OK; + +map_failed: + PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation")); + return eError; +} + +static PVRSRV_ERROR +_AllocLMPageArray(PVRSRV_DEVICE_NODE *psDevNode, + PMR_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pabMappingTable, + IMG_UINT32 uiLog2AllocPageSize, + IMG_BOOL bZero, + IMG_BOOL bPoisonOnAlloc, + IMG_BOOL bPoisonOnFree, + IMG_BOOL bContig, + IMG_BOOL bOnDemand, + IMG_BOOL bFwLocalAlloc, + FIRMWARE_ALLOC_TYPE eFwAllocType, + IMG_UINT32 ui32OSid, + PHYS_HEAP* psPhysHeap, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, + IMG_PID uiPid, + PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr, + CONNECTION_DATA *psConnection + ) +{ + PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL; + IMG_UINT32 ui32Index; + PVRSRV_ERROR eError; + + PVR_ASSERT(!bZero || !bPoisonOnAlloc); + PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize); + +#if defined(SUPPORT_SECURITY_VALIDATION) + /* The following check is done before any attempt to use either security flag */ + if ((PVRSRV_CHECK_SECURE_FW_CODE(uiAllocFlags) && PVRSRV_CHECK_SECURE_BUFFER(uiAllocFlags)) || + (PVRSRV_CHECK_SECURE_FW_DATA(uiAllocFlags) && PVRSRV_CHECK_SECURE_BUFFER(uiAllocFlags)) || + (PVRSRV_CHECK_SECURE_FW_CODE(uiAllocFlags) && PVRSRV_CHECK_SECURE_FW_DATA(uiAllocFlags))) + { + PVR_DPF((PVR_DBG_ERROR, "Multiple secure allocation flags are set!")); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#endif + + psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA)); + PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray); + + if (bContig) + { + /* + Some allocations require kernel mappings in which case in order + to be virtually contiguous we also have to be physically contiguous. + */ + psPageArrayData->uiTotalNumPages = 1; + psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages; + psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize); + psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize; + } + else + { + IMG_UINT32 uiNumPages; + + /* Use of cast below is justified by the assertion that follows to + prove that no significant bits have been truncated */ + uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1); + PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize); + + psPageArrayData->uiTotalNumPages = uiNumPages; + + if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks)) + { + psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks; + } + else + { + psPageArrayData->uiPagesToAlloc = uiNumPages; + } + psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize; + psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize; + } + psPageArrayData->psDevNode = psDevNode; + psPageArrayData->psConnection = psConnection; + psPageArrayData->uiPid = uiPid; + psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) * + psPageArrayData->uiTotalNumPages); + PVR_GOTO_IF_NOMEM(psPageArrayData->pasDevPAddr, eError, errorOnAllocAddr); + + /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */ + for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++) + { + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; + } + + psPageArrayData->iNumPagesAllocated = 0; + psPageArrayData->bZeroOnAlloc = bZero; + psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc; + psPageArrayData->bPoisonOnFree = bPoisonOnFree; + psPageArrayData->bOnDemand = bOnDemand; + psPageArrayData->bFwLocalAlloc = bFwLocalAlloc; + psPageArrayData->psPhysHeap = psPhysHeap; + psPageArrayData->uiAllocFlags = uiAllocFlags; + psPageArrayData->eFwAllocType = eFwAllocType; + psPageArrayData->ui32OSid = ui32OSid; + + *ppsPageArrayDataPtr = psPageArrayData; + + return PVRSRV_OK; + + /* + error exit paths follow: + */ +errorOnAllocAddr: + OSFreeMem(psPageArrayData); + +errorOnAllocArray: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR +_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) +{ + PVRSRV_ERROR eError; + RA_BASE_T uiCardAddr; + RA_LENGTH_T uiActualSize; + IMG_UINT32 i, ui32Index = 0; + IMG_UINT32 uiContigAllocSize; + IMG_UINT32 uiLog2AllocSize; + PVRSRV_DEVICE_NODE *psDevNode; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bZeroOnAlloc; + RA_ARENA *pArena; + + PVR_ASSERT(NULL != psPageArrayData); + PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated); + + uiContigAllocSize = psPageArrayData->uiContigAllocSize; + uiLog2AllocSize = psPageArrayData->uiLog2AllocSize; + psDevNode = psPageArrayData->psDevNode; + bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc; + bZeroOnAlloc = psPageArrayData->bZeroOnAlloc; + + if (!PVRSRV_VZ_MODE_IS(NATIVE) && psPageArrayData->bFwLocalAlloc) + { + /* Allocate from FW_LOCAL or FW_GUEST physheap via the RAs */ + switch (psPageArrayData->eFwAllocType) + { + case FW_ALLOC_CONFIG: + { + pArena = psDevNode->psKernelFwConfigMemArena; + break; + } + case FW_ALLOC_RAW: + { + pArena = psDevNode->psKernelFwRawMemArena[psPageArrayData->ui32OSid]; + break; + } + default: + { + PVR_ASSERT(psPageArrayData->eFwAllocType != FW_ALLOC_NO_FW_ACCESS); + pArena = psDevNode->psKernelFwMainMemArena; + break; + } + } + } + else + { + IMG_UINT32 uiRegionId; + + /* Get suitable local memory region for this GPU physheap allocation */ + uiRegionId = PhysHeapGetRegionId(psPageArrayData->psPhysHeap, + psPageArrayData->uiAllocFlags); + PVR_ASSERT(uiRegionId < psDevNode->ui32NumOfLocalMemArenas); + pArena = psDevNode->apsLocalDevMemArenas[uiRegionId]; + } + + if (psPageArrayData->uiTotalNumPages < + (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc)) + { + PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. " + "Allocated: %u + Requested: %u > Total Allowed: %u", + psPageArrayData->iNumPagesAllocated, + psPageArrayData->uiPagesToAlloc, + psPageArrayData->uiTotalNumPages)); + return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; + } + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + { + IMG_UINT32 ui32OSid=0; + + /* Obtain the OSid specific data from our connection handle */ + if (psPageArrayData->psConnection != NULL) + { + ui32OSid = psPageArrayData->psConnection->ui32OSid; + } + + pArena=psDevNode->psOSidSubArena[ui32OSid]; + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Giving from OS slot %d", + ui32OSid)); + } +#endif + + psPageArrayData->psArena = pArena; + + for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++) + { + /* This part of index finding should happen before allocating the page. + * Just avoiding intricate paths */ + if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc) + { + ui32Index = i; + } + else + { + if (NULL == pui32MapTable) + { + PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc); + } + + ui32Index = pui32MapTable[i]; + if (ui32Index >= psPageArrayData->uiTotalNumPages) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Page alloc request Index out of bounds for PMR @0x%p", + __func__, + psPageArrayData)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, errorOnRAAlloc); + } + + if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr) + { + PVR_LOG_GOTO_WITH_ERROR("Mapping already exists", eError, PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS, errorOnRAAlloc); + } + } + + eError = RA_Alloc(pArena, + uiContigAllocSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + 1ULL << uiLog2AllocSize, + "LMA_Page_Alloc", + &uiCardAddr, + &uiActualSize, + NULL); /* No private handle */ + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to Allocate the page @index:%d, size = 0x%llx", + ui32Index, 1ULL << uiLog2AllocSize)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Address: 0x%"IMG_UINT64_FMTSPECX, + uiCardAddr)); +} +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Allocation is done a page at a time */ + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize, psPageArrayData->uiPid); +#else + { + IMG_CPU_PHYADDR sLocalCpuPAddr; + + sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr; + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + NULL, + sLocalCpuPAddr, + uiActualSize, + NULL, + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); + } +#endif +#endif + + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr; + if (bPoisonOnAlloc) + { + eError = _PoisonAlloc(psDevNode, + &psPageArrayData->pasDevPAddr[ui32Index], + psPageArrayData->bFwLocalAlloc, + uiContigAllocSize, + PVRSRV_POISON_ON_ALLOC_VALUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PoisonAlloc", errorOnPoison); + } + + if (bZeroOnAlloc) + { + eError = _ZeroAlloc(psDevNode, + &psPageArrayData->pasDevPAddr[ui32Index], + psPageArrayData->bFwLocalAlloc, + uiContigAllocSize); + PVR_LOG_GOTO_IF_ERROR(eError, "_ZeroAlloc", errorOnZero); + } + } + psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc; + + return PVRSRV_OK; + + /* + error exit paths follow: + */ +errorOnZero: +errorOnPoison: + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; +errorOnRAAlloc: + PVR_DPF((PVR_DBG_ERROR, + "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)", + __func__, + ui32Index, + i, + psPageArrayData->uiPagesToAlloc, + PVRSRVGetErrorString(eError))); + while (--i < psPageArrayData->uiPagesToAlloc) + { + if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc) + { + ui32Index = i; + } + else + { + if (NULL == pui32MapTable) + { + break; + } + + ui32Index = pui32MapTable[i]; + } + + if (ui32Index < psPageArrayData->uiTotalNumPages) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Allocation is done a page at a time */ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + uiContigAllocSize, + psPageArrayData->uiPid); +#else + { + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + psPageArrayData->pasDevPAddr[ui32Index].uiAddr, + psPageArrayData->uiPid); + } +#endif +#endif + RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr); + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; + } + } + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR +_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData) +{ + OSFreeMem(psPageArrayData->pasDevPAddr); + + PVR_DPF((PVR_DBG_MESSAGE, + "physmem_lma.c: freed local memory array structure for PMR @0x%p", + psPageArrayData)); + + OSFreeMem(psPageArrayData); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, + IMG_UINT32 *pui32FreeIndices, + IMG_UINT32 ui32FreePageCount) +{ + IMG_UINT32 uiContigAllocSize; + IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0; + RA_ARENA *pArena = psPageArrayData->psArena; + + PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0); + + uiContigAllocSize = psPageArrayData->uiContigAllocSize; + + ui32PagesToFree = (NULL == pui32FreeIndices) ? + psPageArrayData->uiTotalNumPages : ui32FreePageCount; + + for (i = 0; i < ui32PagesToFree; i++) + { + if (NULL == pui32FreeIndices) + { + ui32Index = i; + } + else + { + ui32Index = pui32FreeIndices[i]; + } + + if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr) + { + ui32PagesFreed++; + if (psPageArrayData->bPoisonOnFree) + { + _PoisonAlloc(psPageArrayData->psDevNode, + &psPageArrayData->pasDevPAddr[ui32Index], + psPageArrayData->bFwLocalAlloc, + uiContigAllocSize, + PVRSRV_POISON_ON_FREE_VALUE); + } + + RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Allocation is done a page at a time */ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + uiContigAllocSize, + psPageArrayData->uiPid); +#else + { + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + psPageArrayData->pasDevPAddr[ui32Index].uiAddr, + psPageArrayData->uiPid); + } +#endif +#endif + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; + } + } + psPageArrayData->iNumPagesAllocated -= ui32PagesFreed; + + PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: freed %d local memory for PMR @0x%p", + __func__, + (ui32PagesFreed * uiContigAllocSize), + psPageArrayData)); + + return PVRSRV_OK; +} + +/* + * + * Implementation of callback functions + * + */ + +/* destructor func is called after last reference disappears, but + before PMR itself is freed. */ +static PVRSRV_ERROR +PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + + psLMAllocArrayData = pvPriv; + + /* We can't free pages until now. */ + if (psLMAllocArrayData->iNumPagesAllocated != 0) + { + eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); + PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ + } + + eError = _FreeLMPageArray(psLMAllocArrayData); + PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ + + return PVRSRV_OK; +} + +/* callback function for locking the system physical page addresses. + As we are LMA there is nothing to do as we control physical memory. */ +static PVRSRV_ERROR +PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; + + psLMAllocArrayData = pvPriv; + + if (psLMAllocArrayData->bOnDemand) + { + /* Allocate Memory for deferred allocation */ + eError = _AllocLMPages(psLMAllocArrayData, NULL); + PVR_RETURN_IF_ERROR(eError); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; + + psLMAllocArrayData = pvPriv; + + if (psLMAllocArrayData->bOnDemand) + { + /* Free Memory for deferred allocation */ + eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); + PVR_RETURN_IF_ERROR(eError); + } + + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */ +static PVRSRV_ERROR +PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + IMG_UINT32 idx; + IMG_UINT32 uiLog2AllocSize; + IMG_UINT32 uiNumAllocs; + IMG_UINT64 uiAllocIndex; + IMG_DEVMEM_OFFSET_T uiInAllocOffset; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; + + if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requested physical addresses from PMR " + "for incompatible contiguity %u!", + __func__, + ui32Log2PageSize)); + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + uiNumAllocs = psLMAllocArrayData->uiTotalNumPages; + if (uiNumAllocs > 1) + { + PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0); + uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize; + + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize; + uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize); + + PVR_ASSERT(uiAllocIndex < uiNumAllocs); + PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize)); + + psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset; + } + } + } + else + { + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx]; + } + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + void *pvKernLinAddr = NULL; + IMG_UINT32 ui32PageIndex = 0; + size_t uiOffsetMask = uiOffset; + + psLMAllocArrayData = pvPriv; + + /* Check that we can map this in contiguously */ + if (psLMAllocArrayData->uiTotalNumPages != 1) + { + size_t uiStart = uiOffset; + size_t uiEnd = uiOffset + uiSize - 1; + size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1); + + /* We can still map if only one page is required */ + if ((uiStart & uiPageMask) != (uiEnd & uiPageMask)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, e0); + } + + /* Locate the desired physical page to map in */ + ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize; + uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1; + } + + PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages); + + eError = _MapAlloc(psLMAllocArrayData->psDevNode, + &psLMAllocArrayData->pasDevPAddr[ui32PageIndex], + psLMAllocArrayData->uiContigAllocSize, + psLMAllocArrayData->bFwLocalAlloc, + ulFlags, + &pvKernLinAddr); + + *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask); + *phHandleOut = pvKernLinAddr; + + return eError; + + /* + error exit paths follow: + */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + void *pvKernLinAddr = NULL; + + psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv; + pvKernLinAddr = (void *) hHandle; + + _UnMapAlloc(psLMAllocArrayData->psDevNode, + psLMAllocArrayData->uiContigAllocSize, + psLMAllocArrayData->bFwLocalAlloc, + 0, + pvKernLinAddr); +} + + +static PVRSRV_ERROR +CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes, + void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize)) +{ + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + size_t uiBytesCopied; + size_t uiBytesToCopy; + size_t uiBytesCopyableFromAlloc; + void *pvMapping = NULL; + IMG_UINT8 *pcKernelPointer = NULL; + size_t uiBufferOffset; + IMG_UINT64 uiAllocIndex; + IMG_DEVMEM_OFFSET_T uiInAllocOffset; + PVRSRV_ERROR eError; + + psLMAllocArrayData = pvPriv; + + uiBytesCopied = 0; + uiBytesToCopy = uiBufSz; + uiBufferOffset = 0; + + if (psLMAllocArrayData->uiTotalNumPages > 1) + { + while (uiBytesToCopy > 0) + { + /* we have to map one alloc in at a time */ + PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0); + uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize; + uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize); + uiBytesCopyableFromAlloc = uiBytesToCopy; + if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize)) + { + uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset); + } + + PVR_ASSERT(uiBytesCopyableFromAlloc != 0); + PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages); + PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize)); + + eError = _MapAlloc(psLMAllocArrayData->psDevNode, + &psLMAllocArrayData->pasDevPAddr[uiAllocIndex], + psLMAllocArrayData->uiContigAllocSize, + psLMAllocArrayData->bFwLocalAlloc, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvMapping); + PVR_GOTO_IF_ERROR(eError, e0); + pcKernelPointer = pvMapping; + pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc); + + _UnMapAlloc(psLMAllocArrayData->psDevNode, + psLMAllocArrayData->uiContigAllocSize, + psLMAllocArrayData->bFwLocalAlloc, + 0, + pvMapping); + + uiBufferOffset += uiBytesCopyableFromAlloc; + uiBytesToCopy -= uiBytesCopyableFromAlloc; + uiOffset += uiBytesCopyableFromAlloc; + uiBytesCopied += uiBytesCopyableFromAlloc; + } + } + else + { + PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize); + PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0); + eError = _MapAlloc(psLMAllocArrayData->psDevNode, + &psLMAllocArrayData->pasDevPAddr[0], + psLMAllocArrayData->uiContigAllocSize, + psLMAllocArrayData->bFwLocalAlloc, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvMapping); + PVR_GOTO_IF_ERROR(eError, e0); + pcKernelPointer = pvMapping; + pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz); + + _UnMapAlloc(psLMAllocArrayData->psDevNode, + psLMAllocArrayData->uiContigAllocSize, + psLMAllocArrayData->bFwLocalAlloc, + 0, + pvMapping); + + uiBytesCopied = uiBufSz; + } + *puiNumBytes = uiBytesCopied; + return PVRSRV_OK; +e0: + *puiNumBytes = uiBytesCopied; + return eError; +} + +static void ReadLocalMem(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize) +{ + /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which + * we *assume* in the LMA code will be faster, and doesn't need to + * worry about ARM64. + */ + OSCachedMemCopy(pcBuffer, pcPMR, uiSize); +} + +static PVRSRV_ERROR +PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + return CopyBytesLocalMem(pvPriv, + uiOffset, + pcBuffer, + uiBufSz, + puiNumBytes, + ReadLocalMem); +} + +static void WriteLocalMem(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize) +{ + /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which + * we *assume* in the LMA code will be faster, and doesn't need to + * worry about ARM64. + */ + OSCachedMemCopy(pcPMR, pcBuffer, uiSize); +} + +static PVRSRV_ERROR +PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + return CopyBytesLocalMem(pvPriv, + uiOffset, + pcBuffer, + uiBufSz, + puiNumBytes, + WriteLocalMem); +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemLocalMem +@Description This function Changes the sparse mapping by allocating and + freeing of pages. It also changes the GPU maps accordingly. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static PVRSRV_ERROR +PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiFlags) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; + + IMG_UINT32 ui32AdtnlAllocPages = 0; + IMG_UINT32 ui32AdtnlFreePages = 0; + IMG_UINT32 ui32CommonRequstCount = 0; + IMG_UINT32 ui32Loop = 0; + IMG_UINT32 ui32Index = 0; + IMG_UINT32 uiAllocpgidx; + IMG_UINT32 uiFreepgidx; + + PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; + IMG_DEV_PHYADDR sPhyAddr; + +#if defined(DEBUG) + IMG_BOOL bPoisonFail = IMG_FALSE; + IMG_BOOL bZeroFail = IMG_FALSE; +#endif + + /* Fetch the Page table array represented by the PMR */ + IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr; + PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR); + + /* The incoming request is classified into two operations independent of + * each other: alloc & free pages. + * These operations can be combined with two mapping operations as well + * which are GPU & CPU space mappings. + * + * From the alloc and free page requests, the net amount of pages to be + * allocated or freed is computed. Pages that were requested to be freed + * will be reused to fulfil alloc requests. + * + * The order of operations is: + * 1. Allocate new pages from the OS + * 2. Move the free pages from free request to alloc positions. + * 3. Free the rest of the pages not used for alloc + * + * Alloc parameters are validated at the time of allocation + * and any error will be handled then. */ + + if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) + { + ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ? + ui32FreePageCount : ui32AllocPageCount; + + PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported"); + } + + if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) + { + ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount; + } + else + { + ui32AllocPageCount = 0; + } + + if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) + { + ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount; + } + else + { + ui32FreePageCount = 0; + } + + PVR_LOG_RETURN_IF_FALSE( + (ui32CommonRequstCount | ui32AdtnlAllocPages | ui32AdtnlFreePages) != 0, + "Invalid combination of parameters: ui32CommonRequstCount," + " ui32AdtnlAllocPages and ui32AdtnlFreePages.", + PVRSRV_ERROR_INVALID_PARAMS + ); + + { + /* Validate the free page indices */ + if (ui32FreePageCount) + { + if (NULL != pai32FreeIndices) + { + for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) + { + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr) + { + PVR_LOG_GOTO_WITH_ERROR("psPageArray[uiFreepgidx].uiAddr", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + }else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Given non-zero free count but missing indices array", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + /*The following block of code verifies any issues with common alloc page indices */ + for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + uiAllocpgidx = pai32AllocIndices[ui32Loop]; + if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) || + (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) + { + PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + else + { + if ((INVALID_PAGE_ADDR == psPageArray[uiAllocpgidx].uiAddr) || + (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx])) + { + PVR_LOG_GOTO_WITH_ERROR("Unable to remap memory due to missing page", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + } + + + ui32Loop = 0; + + /* Allocate new pages */ + if (0 != ui32AdtnlAllocPages) + { + /* Say how many pages to allocate */ + psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages; + + eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices); + PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0); + + /* Mark the corresponding pages of translation table as valid */ + for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) + { + psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; + } + + psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; + } + + ui32Index = ui32Loop; + + /* Move the corresponding free pages to alloc request */ + for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++) + { + + uiAllocpgidx = pai32AllocIndices[ui32Index]; + uiFreepgidx = pai32FreeIndices[ui32Loop]; + sPhyAddr = psPageArray[uiAllocpgidx]; + psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; + + /* Is remap mem used in real world scenario? Should it be turned to a + * debug feature? The condition check needs to be out of loop, will be + * done at later point though after some analysis */ + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR; + } + else + { + psPageArray[uiFreepgidx] = sPhyAddr; + psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + } + + /* Be sure to honour the attributes associated with the allocation + * such as zeroing, poisoning etc. */ + if (psPMRPageArrayData->bPoisonOnAlloc) + { + eError = _PoisonAlloc(psPMRPageArrayData->psDevNode, + &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx], + psPMRPageArrayData->bFwLocalAlloc, + psPMRPageArrayData->uiContigAllocSize, + PVRSRV_POISON_ON_ALLOC_VALUE); + + /* Consider this as a soft failure and go ahead but log error to kernel log */ + if (eError != PVRSRV_OK) + { +#if defined(DEBUG) + bPoisonFail = IMG_TRUE; +#endif + } + } + else + { + if (psPMRPageArrayData->bZeroOnAlloc) + { + eError = _ZeroAlloc(psPMRPageArrayData->psDevNode, + &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx], + psPMRPageArrayData->bFwLocalAlloc, + psPMRPageArrayData->uiContigAllocSize); + /* Consider this as a soft failure and go ahead but log error to kernel log */ + if (eError != PVRSRV_OK) + { +#if defined(DEBUG) + /*Don't think we need to zero any pages further*/ + bZeroFail = IMG_TRUE; +#endif + } + } + } + } + + /*Free the additional free pages */ + if (0 != ui32AdtnlFreePages) + { + ui32Index = ui32Loop; + _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages); + ui32Loop = 0; + + while (ui32Loop++ < ui32AdtnlFreePages) + { + /*Set the corresponding mapping table entry to invalid address */ + psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID; + } + + psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; + } + + } + +#if defined(DEBUG) + if (IMG_TRUE == bPoisonFail) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __func__)); + } + + if (IMG_TRUE == bZeroFail) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __func__)); + } +#endif + + /* Update the PMR memory holding information */ + eError = PVRSRV_OK; + +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemCPUMapLocalMem +@Description This function Changes CPU maps accordingly +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static +PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices) +{ + PVRSRV_ERROR eError; + IMG_DEV_PHYADDR *psPageArray; + PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; + uintptr_t sCpuVABase = sCpuVAddrBase; + IMG_CPU_PHYADDR sCpuAddrPtr; + IMG_BOOL bValid = IMG_FALSE; + + /*Get the base address of the heap */ + eError = PMR_CpuPhysAddr(psPMR, + psPMRPageArrayData->uiLog2AllocSize, + 1, + 0, /* offset zero here mean first page in the PMR */ + &sCpuAddrPtr, + &bValid); + PVR_LOG_RETURN_IF_ERROR(eError, "PMR_CpuPhysAddr"); + + /* Phys address of heap is computed here by subtracting the offset of this page + * basically phys address of any page = Base address of heap + offset of the page */ + sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr; + psPageArray = psPMRPageArrayData->pasDevPAddr; + + return OSChangeSparseMemCPUAddrMap((void **)psPageArray, + sCpuVABase, + sCpuAddrPtr, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + IMG_TRUE); +} + +static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = { + /* pfnLockPhysAddresses */ + &PMRLockSysPhysAddressesLocalMem, + /* pfnUnlockPhysAddresses */ + &PMRUnlockSysPhysAddressesLocalMem, + /* pfnDevPhysAddr */ + &PMRSysPhysAddrLocalMem, + /* pfnAcquireKernelMappingData */ + &PMRAcquireKernelMappingDataLocalMem, + /* pfnReleaseKernelMappingData */ + &PMRReleaseKernelMappingDataLocalMem, +#if defined(INTEGRITY_OS) + /* pfnMapMemoryObject */ + NULL, + /* pfnUnmapMemoryObject */ + NULL, +#endif + /* pfnReadBytes */ + &PMRReadBytesLocalMem, + /* pfnWriteBytes */ + &PMRWriteBytesLocalMem, + /* pfnUnpinMem */ + NULL, + /* pfnPinMem */ + NULL, + /* pfnChangeSparseMem*/ + &PMRChangeSparseMemLocalMem, + /* pfnChangeSparseMemCPUMap */ + &PMRChangeSparseMemCPUMapLocalMem, + /* pfnMMap */ + NULL, + /* pfnFinalize */ + &PMRFinalizeLocalMem +}; + +PVRSRV_ERROR +PhysmemNewLocalRamBackedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PMR *psPMR = NULL; + PMR_LMALLOCARRAY_DATA *psPrivData = NULL; + PMR_FLAGS_T uiPMRFlags; + PHYS_HEAP *psPhysHeap; + IMG_BOOL bZero; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bPoisonOnFree; + IMG_BOOL bOnDemand; + IMG_BOOL bContig; + IMG_BOOL bFwLocalAlloc; + IMG_BOOL bCpuLocalAlloc; + FIRMWARE_ALLOC_TYPE eFwAllocType; + IMG_UINT32 ui32OSid; + + /* For sparse requests we have to do the allocation + * in chunks rather than requesting one contiguous block */ + if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1) + { + if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: LMA kernel mapping functions currently " + "don't work with discontiguous memory.", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam); + } + bContig = IMG_FALSE; + } + else + { + bContig = IMG_TRUE; + } + + bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE; + bCpuLocalAlloc = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE; + bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; + bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; + bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE; + bFwLocalAlloc = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE; + eFwAllocType = PVRSRV_FW_ALLOC_TYPE(uiFlags); + ui32OSid = PVRSRV_FW_RAW_ALLOC_OSID(uiFlags); + + if (bFwLocalAlloc) + { + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + } + else if (bCpuLocalAlloc) + { + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]; + } + else + { + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]; + } + + /* Create Array structure that holds the physical pages */ + eError = _AllocLMPageArray(psDevNode, + uiChunkSize * ui32NumVirtChunks, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + bZero, + bPoisonOnAlloc, + bPoisonOnFree, + bContig, + bOnDemand, + bFwLocalAlloc, + eFwAllocType, + ui32OSid, + psPhysHeap, + uiFlags, + uiPid, + &psPrivData, + psConnection); + PVR_GOTO_IF_ERROR(eError, errorOnAllocPageArray); + + if (!bOnDemand) + { + /* Allocate the physical pages */ + eError = _AllocLMPages(psPrivData, pui32MappingTable); + PVR_GOTO_IF_ERROR(eError, errorOnAllocPages); + } + + /* In this instance, we simply pass flags straight through. + + Generically, uiFlags can include things that control the PMR + factory, but we don't need any such thing (at the time of + writing!), and our caller specifies all PMR flags so we don't + need to meddle with what was given to us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + /* check no significant bits were lost in cast due to different + bit widths for flags */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (bOnDemand) + { + PDUMPCOMMENT("Deferred Allocation PMR (LMA)"); + } + + eError = PMRCreatePMR(psDevNode, + psPhysHeap, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiPMRFlags, + pszAnnotation, + &_sPMRLMAFuncTab, + psPrivData, + PMR_TYPE_LMA, + &psPMR, + ui32PDumpFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRCreatePMR", errorOnCreate); + + *ppsPMRPtr = psPMR; + return PVRSRV_OK; + +errorOnCreate: + if (!bOnDemand && psPrivData->iNumPagesAllocated) + { + eError2 = _FreeLMPages(psPrivData, NULL, 0); + PVR_ASSERT(eError2 == PVRSRV_OK); + } + +errorOnAllocPages: + eError2 = _FreeLMPageArray(psPrivData); + PVR_ASSERT(eError2 == PVRSRV_OK); + +errorOnAllocPageArray: +errorOnParam: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/physmem_tdfwmem.c b/drivers/mcst/gpu-imgtec/services/server/common/physmem_tdfwmem.c new file mode 100644 index 000000000000..15b9035cf960 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/physmem_tdfwmem.c @@ -0,0 +1,345 @@ +/*************************************************************************/ /*! +@File +@Title PMR functions for Trusted Device firmware memory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for physical memory + imported from a trusted environment. The driver cannot acquire + CPU mappings for this secure memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "physmem_tdfwmem.h" +#include "physheap.h" +#include "rgxdevice.h" + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + + +#if !defined(SUPPORT_SECURITY_VALIDATION) + +typedef struct _PMR_TDFWMEM_DATA_ { + PHYS_HEAP *psTDFWMemPhysHeap; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + PMR_LOG2ALIGN_T uiLog2Align; + IMG_UINT64 ui64Size; +} PMR_TDFWMEM_DATA; + + +/* + * Implementation of callback functions + */ + +static PVRSRV_ERROR PMRSysPhysAddrTDFWMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PMR_TDFWMEM_DATA *psPrivData = pvPriv; + IMG_UINT32 i; + + if (psPrivData->uiLog2Align != ui32Log2PageSize) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Incompatible contiguity (requested %u, got %u)", + __func__, ui32Log2PageSize, psPrivData->uiLog2Align)); + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + psDevPAddr[i].uiAddr = psPrivData->sDevPAddr.uiAddr + puiOffset[i]; + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR PMRFinalizeTDFWMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PMR_TDFWMEM_DATA *psPrivData = NULL; + + psPrivData = pvPriv; + PhysHeapRelease(psPrivData->psTDFWMemPhysHeap); + OSFreeMem(psPrivData); + + return PVRSRV_OK; +} + +static PMR_IMPL_FUNCTAB _sPMRTDFWMemFuncTab = { + .pfnDevPhysAddr = &PMRSysPhysAddrTDFWMem, + .pfnFinalize = &PMRFinalizeTDFWMem, +}; + + +/* + * Public functions + */ +PVRSRV_ERROR PhysmemNewTDFWMemPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + PVRSRV_TD_FW_MEM_REGION eRegion, + PMR **ppsPMRPtr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData); + PMR_TDFWMEM_DATA *psPrivData = NULL; + PMR *psPMR = NULL; + IMG_UINT32 uiMappingTable = 0; + PMR_FLAGS_T uiPMRFlags; + IMG_UINT32 ui32CacheLineSize = 0; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS)); + + /* In this instance, we simply pass flags straight through. + * Generically, uiFlags can include things that control the PMR + * factory, but we don't need any such thing (at the time of + * writing!), and our caller specifies all PMR flags so we don't + * need to meddle with what was given to us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* Check no significant bits were lost in cast due to different bit widths for flags */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + /* Many flags can be dropped as the driver cannot access this memory + * and it is assumed that the trusted zone is physically contiguous + */ + uiPMRFlags &= ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | + PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | + PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING); + + psPrivData = OSAllocZMem(sizeof(PMR_TDFWMEM_DATA)); + PVR_GOTO_IF_NOMEM(psPrivData, eError, errorOnAllocData); + + /* Get required info for the TD Meta Code physical heap */ + if (!psRGXData->bHasTDFWMemPhysHeap) + { + PVR_LOG_GOTO_WITH_ERROR("psRGXData->bHasTDFWMemPhysHeap", + eError, PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL, errorOnAcquireHeap); + } + + eError = PhysHeapAcquire(psRGXData->uiTDFWMemPhysHeapID, + &psPrivData->psTDFWMemPhysHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Could not acquire secure physical heap %u", + __func__, psRGXData->uiTDFWMemPhysHeapID)); + goto errorOnAcquireHeap; + } + + eError = PhysHeapRegionGetCpuPAddr(psPrivData->psTDFWMemPhysHeap, + eRegion, + &psPrivData->sCpuPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetCpuPAddr", errorOnValidateParams); + + if ((((1ULL << uiLog2Align) - 1) & psPrivData->sCpuPAddr.uiAddr) != 0) + { + PVR_DPF((PVR_DBG_ERROR, + "Trusted Device physical heap has the wrong alignment! " + "Physical address 0x%llx, alignment mask 0x%llx", + (unsigned long long) psPrivData->sCpuPAddr.uiAddr, + ((1ULL << uiLog2Align) - 1))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL, errorOnValidateParams); + } + + eError = PhysHeapRegionGetSize(psPrivData->psTDFWMemPhysHeap, + eRegion, + &psPrivData->ui64Size); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetSize", errorOnValidateParams); + + if (uiSize > psPrivData->ui64Size) + { + PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not big enough! Required %llu, available %llu", + uiSize, psPrivData->ui64Size)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL, errorOnValidateParams); + } + + PhysHeapCpuPAddrToDevPAddr(psPrivData->psTDFWMemPhysHeap, + 1, + &psPrivData->sDevPAddr, + &psPrivData->sCpuPAddr); + + /* Check that the FW memory is aligned to a Rogue cache line */ + if (ui32CacheLineSize > 0 && + (psPrivData->sDevPAddr.uiAddr & (ui32CacheLineSize - 1)) != 0) + { + PVR_LOG_GOTO_WITH_ERROR("Trusted Device physical heap not aligned to a Rogue cache line", + eError, PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL, errorOnValidateParams); + } + + /* + * uiLog2Align is only used to check the alignment of the secure memory + * region. The page size is still determined by the OS, and we expect the + * number of pages from TDGetTDFWMemParams to have the same granularity. + */ + psPrivData->uiLog2Align = OSGetPageShift(); + + eError = PMRCreatePMR(psDevNode, + psPrivData->psTDFWMemPhysHeap, + psPrivData->ui64Size, + psPrivData->ui64Size, + 1, /* ui32NumPhysChunks */ + 1, /* ui32NumVirtChunks */ + &uiMappingTable, /* pui32MappingTable (not used) */ + uiLog2Align, /* uiLog2ContiguityGuarantee */ + uiPMRFlags, + "TDFWMEM_PMR", + &_sPMRTDFWMemFuncTab, + psPrivData, + PMR_TYPE_TDFWMEM, + &psPMR, + PDUMP_NONE); + PVR_GOTO_IF_ERROR(eError, errorOnCreatePMR); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = RIWritePMREntryKM(psPMR); + PVR_WARN_IF_ERROR(eError, "RIWritePMREntryKM"); +#endif + + *ppsPMRPtr = psPMR; + return PVRSRV_OK; + +errorOnCreatePMR: +errorOnValidateParams: + PhysHeapRelease(psPrivData->psTDFWMemPhysHeap); + +errorOnAcquireHeap: + OSFreeMem(psPrivData); + +errorOnAllocData: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#else /* !defined(SUPPORT_SECURITY_VALIDATION) */ + +#include "physmem.h" + +/* + * Public functions + */ +PVRSRV_ERROR PhysmemNewTDFWMemPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + PVRSRV_TD_FW_MEM_REGION eRegion, + PMR **ppsPMRPtr) +{ + PMR *psPMR = NULL; + IMG_UINT32 uiLog2AllocPageSize = OSGetPageShift(); + IMG_UINT32 uiMappingTable = 0; + PMR_FLAGS_T uiPMRFlags; + PVRSRV_ERROR eError; + + /* In this instance, we simply pass flags straight through. + * Generically, uiFlags can include things that control the PMR + * factory, but we don't need any such thing (at the time of + * writing!), and our caller specifies all PMR flags so we don't + * need to meddle with what was given to us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* Check no significant bits were lost in cast due to different bit widths for flags */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + /* Add validation flag */ + if (eRegion == PVRSRV_DEVICE_FW_CODE_REGION || eRegion == PVRSRV_DEVICE_FW_COREMEM_CODE_REGION) + { + uiPMRFlags |= PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_CODE; + } + else if (eRegion == PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION || eRegion == PVRSRV_DEVICE_FW_COREMEM_DATA_REGION) + { + uiPMRFlags |= PVRSRV_MEMALLOCFLAG_VAL_SECURE_FW_DATA; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid TD FW memory region %u", __func__, eRegion)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errorOnCreatePMR; + } + + /* Align allocation size to page size */ + uiSize = PVR_ALIGN(uiSize, 1 << uiLog2AllocPageSize); + + eError = PhysmemNewRamBackedPMR(psConnection, + psDevNode, + uiSize, + uiSize, + 1, /* ui32NumPhysChunks */ + 1, /* ui32NumVirtChunks */ + &uiMappingTable, + uiLog2AllocPageSize, + uiPMRFlags, + sizeof("TDFWMEM"), + "TDFWMEM", + OSGetCurrentClientProcessIDKM(), + &psPMR, + PDUMP_NONE); + PVR_GOTO_IF_ERROR(eError, errorOnCreatePMR); + + /* All the PMR callbacks will be redirected to the internal LMA PMR */ + + *ppsPMRPtr = psPMR; + return PVRSRV_OK; + +errorOnCreatePMR: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#endif /* !defined(SUPPORT_SECURITY_VALIDATION) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/pmr.c b/drivers/mcst/gpu-imgtec/services/server/common/pmr.c new file mode 100644 index 000000000000..4747520241d8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/pmr.c @@ -0,0 +1,3500 @@ +/*************************************************************************/ /*! +@File +@Title Physmem (PMR) abstraction +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + the "PMR" abstraction. A PMR (Physical Memory Resource) + represents some unit of physical memory which is + allocated/freed/mapped/unmapped as an indivisible unit + (higher software levels provide an abstraction above that + to deal with dividing this down into smaller manageable units). + Importantly, this module knows nothing of virtual memory, or + of MMUs etc., with one excusable exception. We have the + concept of a "page size", which really means nothing in + physical memory, but represents a "contiguity quantum" such + that the higher level modules which map this memory are able + to verify that it matches the needs of the page size for the + virtual realm into which it is being mapped. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +#include "pdump.h" +#include "devicemem_server_utils.h" + +#include "osfunc.h" +#include "pdump_km.h" +#include "pdump_physmem.h" +#include "pmr_impl.h" +#include "pmr_os.h" +#include "pvrsrv.h" + +#include "allocmem.h" +#include "lock.h" + +#if defined(SUPPORT_SECURE_EXPORT) +#include "secure_export.h" +#include "ossecure_export.h" +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +/* ourselves */ +#include "pmr.h" + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +#include "mmap_stats.h" +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#include "proc_stats.h" +#endif + +/* A "context" for the physical memory block resource allocator. + * + * Context is probably the wrong word. + * + * There is almost certainly only one of these, ever, in the system. + * But, let's keep the notion of a context anyway, "just-in-case". + */ +static struct _PMR_CTX_ +{ + /* For debugging, and PDump, etc., let's issue a forever incrementing + * serial number to each allocation. + */ + IMG_UINT64 uiNextSerialNum; + + /* For security, we only allow a PMR to be mapped if the caller knows + * its key. We can pseudo-randomly generate keys + */ + IMG_UINT64 uiNextKey; + + /* For debugging only, I guess: Number of live PMRs */ + IMG_UINT32 uiNumLivePMRs; + + /* Lock for this structure */ + POS_LOCK hLock; + + /* In order to seed the uiNextKey, we enforce initialisation at driver + * load time. Also, we can debug check at driver unload that the PMR + * count is zero. + */ + IMG_BOOL bModuleInitialised; +} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE }; + + +/* A PMR. One per physical allocation. May be "shared". + * + * "shared" is ambiguous. We need to be careful with terminology. + * There are two ways in which a PMR may be "shared" and we need to be sure + * that we are clear which we mean. + * + * i) multiple small allocations living together inside one PMR. + * + * ii) one single allocation filling a PMR but mapped into multiple memory + * contexts. + * + * This is more important further up the stack - at this level, all we care is + * that the PMR is being referenced multiple times. + */ +struct _PMR_ +{ + /* This object is strictly refcounted. References include: + * - mapping + * - live handles (to this object) + * - live export handles + * (thus it is normal for allocated and exported memory to have a refcount of 3) + * The object is destroyed when and only when the refcount reaches 0 + */ + + /* Device node on which this PMR was created and is valid */ + PVRSRV_DEVICE_NODE *psDevNode; + + /* Physical address translation (device <> cpu) is done on a per device + * basis which means we need the physical heap info + */ + PHYS_HEAP *psPhysHeap; + + ATOMIC_T iRefCount; + + /* Lock count - this is the number of times PMRLockSysPhysAddresses() + * has been called, less the number of PMRUnlockSysPhysAddresses() + * calls. This is arguably here for debug reasons only, as the refcount + * is already incremented as a matter of course. + * Really, this just allows us to trap protocol errors: i.e. calling + * PMRSysPhysAddr(), without a lock, or calling + * PMRUnlockSysPhysAddresses() too many or too few times. + */ + ATOMIC_T iLockCount; + + /* Lock for this structure */ + POS_LOCK hLock; + + /* Incrementing serial number to each allocation. */ + IMG_UINT64 uiSerialNum; + + /* For security, we only allow a PMR to be mapped if the caller knows + * its key. We can pseudo-randomly generate keys + */ + PMR_PASSWORD_T uiKey; + + /* Callbacks for per-flavour functions */ + const PMR_IMPL_FUNCTAB *psFuncTab; + + /* Data associated with the "subtype" */ + PMR_IMPL_PRIVDATA pvFlavourData; + + /* What kind of PMR do we have? */ + PMR_IMPL_TYPE eFlavour; + + /* And for pdump */ + const IMG_CHAR *pszPDumpDefaultMemspaceName; + + /* Allocation annotation */ + IMG_CHAR szAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; + +#if defined(PDUMP) + + IMG_HANDLE hPDumpAllocHandle; + + IMG_UINT32 uiNumPDumpBlocks; +#endif + + /* Logical size of allocation. "logical", because a PMR can represent + * memory that will never physically exist. This is the amount of + * virtual space that the PMR would consume when it's mapped into a + * virtual allocation. + */ + PMR_SIZE_T uiLogicalSize; + + /* Mapping table for the allocation. + * PMR's can be sparse in which case not all the "logic" addresses in + * it are valid. We need to know which addresses are and aren't valid + * when mapping or reading the PMR. + * The mapping table translates "logical" offsets into physical offsets + * which is what we always pass to the PMR factory (so it doesn't have + * to be concerned about sparseness issues) + */ + PMR_MAPPING_TABLE *psMappingTable; + + /* Indicates whether this PMR has been allocated as sparse. + * The condition for this variable to be set at allocation time is: + * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1) + */ + IMG_BOOL bSparseAlloc; + + /* Indicates whether this PMR has been unpinned. + * By default, all PMRs are pinned at creation. + */ + IMG_BOOL bIsUnpinned; + + /* Minimum Physical Contiguity Guarantee. Might be called "page size", + * but that would be incorrect, as page size is something meaningful + * only in virtual realm. This contiguity guarantee provides an + * inequality that can be verified/asserted/whatever to ensure that + * this PMR conforms to the page size requirement of the place the PMR + * gets mapped. (May be used to select an appropriate heap in variable + * page size systems) + * + * The absolutely necessary condition is this: + * + * device MMU page size <= actual physical contiguity. + * + * We go one step further in order to be able to provide an early + * warning / early compatibility check and say this: + * + * device MMU page size <= + * 2**(uiLog2ContiguityGuarantee) <= + * actual physical contiguity. + * + * In this way, it is possible to make the page table reservation + * in the device MMU without even knowing the granularity of the + * physical memory (i.e. useful for being able to allocate virtual + * before physical) + */ + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee; + + /* Flags. We store a copy of the "PMR flags" (usually a subset of the + * flags given at allocation time) and return them to any caller of + * PMR_Flags(). The intention of these flags is that the ones stored + * here are used to represent permissions, such that no one is able + * to map a PMR in a mode in which they are not allowed, e.g., + * writeable for a read-only PMR, etc. + */ + PMR_FLAGS_T uiFlags; + + /* Do we really need this? + * For now we'll keep it, until we know we don't. + * NB: this is not the "memory context" in client terms - this is + * _purely_ the "PMR" context, of which there is almost certainly only + * ever one per system as a whole, but we'll keep the concept anyway, + * just-in-case. + */ + struct _PMR_CTX_ *psContext; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Stored handle to PMR RI entry */ + void *hRIHandle; +#endif +}; + +/* Do we need a struct for the export handle? + * I'll use one for now, but if nothing goes in it, we'll lose it + */ +struct _PMR_EXPORT_ +{ + struct _PMR_ *psPMR; +}; + +struct _PMR_PAGELIST_ +{ + struct _PMR_ *psReferencePMR; +}; + +PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR) +{ + PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL; + + PVR_ASSERT(psExportPMR != NULL); + if (psExportPMR) + { + PVR_ASSERT(psExportPMR->psPMR != NULL); + if (psExportPMR->psPMR) + { + PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0); + if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0) + { + psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR); + } + } + } + + return psReturnedDeviceNode; +} + +static PVRSRV_ERROR +_PMRCreate(PMR_SIZE_T uiLogicalSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, + PMR_FLAGS_T uiFlags, + PMR **ppsPMR) +{ + void *pvPMRLinAddr; + PMR *psPMR; + PMR_MAPPING_TABLE *psMappingTable; + struct _PMR_CTX_ *psContext; + IMG_UINT32 i, ui32Temp = 0; + IMG_UINT32 ui32Remainder; + PVRSRV_ERROR eError; + IMG_BOOL bSparse = IMG_FALSE; + + psContext = &_gsSingletonPMRContext; + + /* Do we have a sparse allocation? */ + if ( (ui32NumVirtChunks != ui32NumPhysChunks) || + (ui32NumVirtChunks > 1) ) + { + bSparse = IMG_TRUE; + } + + /* Extra checks required for sparse PMRs */ + if (uiLogicalSize != uiChunkSize) + { + /* Check the logical size and chunk information agree with each other */ + if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)", + __func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks)); + return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; + } + + /* Check that the chunk size is a multiple of the contiguity */ + OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder); + if (ui32Remainder) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Bad chunk size, must be a multiple of the contiguity " + "(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)", + __func__, + (unsigned long long) uiChunkSize, + uiLog2ContiguityGuarantee)); + return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE; + } + } + + pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks); + PVR_RETURN_IF_NOMEM(pvPMRLinAddr); + + psPMR = (PMR *) pvPMRLinAddr; + psMappingTable = IMG_OFFSET_ADDR(pvPMRLinAddr, sizeof(*psPMR)); + + eError = OSLockCreate(&psPMR->hLock); + if (eError != PVRSRV_OK) + { + OSFreeMem(psPMR); + return eError; + } + + /* Setup the mapping table */ + psMappingTable->uiChunkSize = uiChunkSize; + psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks; + psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks; + OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])* + ui32NumVirtChunks); + for (i=0; iaui32Translation[ui32Temp] = ui32Temp; + } + + /* Setup the PMR */ + OSAtomicWrite(&psPMR->iRefCount, 0); + + /* If allocation is not made on demand, it will be backed now and + * backing will not be removed until the PMR is destroyed, therefore + * we can initialise the iLockCount to 1 rather than 0. + */ + OSAtomicWrite(&psPMR->iLockCount, (PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : 1)); + + psPMR->psContext = psContext; + psPMR->uiLogicalSize = uiLogicalSize; + psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee; + psPMR->uiFlags = uiFlags; + psPMR->psMappingTable = psMappingTable; + psPMR->bSparseAlloc = bSparse; + psPMR->bIsUnpinned = IMG_FALSE; + psPMR->szAnnotation[0] = '\0'; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + psPMR->hRIHandle = NULL; +#endif + + OSLockAcquire(psContext->hLock); + psPMR->uiKey = psContext->uiNextKey; + psPMR->uiSerialNum = psContext->uiNextSerialNum; + psContext->uiNextKey = (0x80200003 * psContext->uiNextKey) + ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr); + psContext->uiNextSerialNum++; + *ppsPMR = psPMR; + PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR)); + /* Increment live PMR count */ + psContext->uiNumLivePMRs++; + OSLockRelease(psContext->hLock); + + return PVRSRV_OK; +} + +/* This function returns true if the PMR is in use and false otherwise. + * This function is not thread safe and hence the caller + * needs to ensure the thread safety by explicitly taking + * the lock on the PMR or through other means */ +IMG_BOOL PMRIsPMRLive(PMR *psPMR) +{ + return (OSAtomicRead(&psPMR->iRefCount) > 0); +} + +static IMG_UINT32 +_Ref(PMR *psPMR) +{ + PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0); + return OSAtomicIncrement(&psPMR->iRefCount); +} + +static IMG_UINT32 +_Unref(PMR *psPMR) +{ + PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0); + return OSAtomicDecrement(&psPMR->iRefCount); +} + +static void +_UnrefAndMaybeDestroy(PMR *psPMR) +{ + PVRSRV_ERROR eError2; + struct _PMR_CTX_ *psCtx; + IMG_INT iRefCount; + + PVR_ASSERT(psPMR != NULL); + + /* Acquire PMR factory lock if provided */ + if (psPMR->psFuncTab->pfnGetPMRFactoryLock) + { + psPMR->psFuncTab->pfnGetPMRFactoryLock(); + } + + iRefCount = _Unref(psPMR); + + if (iRefCount == 0) + { + if (psPMR->psFuncTab->pfnFinalize != NULL) + { + eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData); + + /* PMR unref can be called asynchronously by the kernel or other + * third party modules (eg. display) which doesn't go through the + * usual services bridge. The same PMR can be referenced simultaneously + * in a different path that results in a race condition. + * Hence depending on the race condition, a factory may refuse to destroy + * the resource associated with this PMR if a reference on it was taken + * prior to unref. In that case the PMR factory function returns the error. + * + * When such an error is encountered, the factory needs to ensure the state + * associated with PMR is undisturbed. At this point we just bail out from + * freeing the PMR itself. The PMR handle will then be freed at a later point + * when the same PMR is unreferenced. + * */ + if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2) + { + if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + { + psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + } + return; + } + PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */ + } +#if defined(PDUMP) + PDumpPMRFreePMR(psPMR, + psPMR->uiLogicalSize, + (1 << psPMR->uiLog2ContiguityGuarantee), + psPMR->uiLog2ContiguityGuarantee, + psPMR->hPDumpAllocHandle); +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + /* This PMR is about to be destroyed, update its mmap stats record (if present) + * to avoid dangling pointer. Additionally, this is required because mmap stats + * are identified by PMRs and a new PMR down the line "might" get the same address + * as the one we're about to free and we'd like 2 different entries in mmaps + * stats for such cases */ + MMapStatsRemovePMR(psPMR); +#endif + +#ifdef PVRSRV_NEED_PVR_ASSERT + /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */ + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + { + PVRSRV_ERROR eError; + + /* Delete RI entry */ + if (psPMR->hRIHandle) + { + eError = RIDeletePMREntryKM (psPMR->hRIHandle); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s", + __func__, + PVRSRVGetErrorString(eError))); + /* continue destroying the PMR */ + } + } + } +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + psCtx = psPMR->psContext; + + OSLockDestroy(psPMR->hLock); + + /* Release PMR factory lock acquired if any */ + if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + { + psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + } + + OSFreeMem(psPMR); + + /* Decrement live PMR count. Probably only of interest for debugging */ + PVR_ASSERT(psCtx->uiNumLivePMRs > 0); + + OSLockAcquire(psCtx->hLock); + psCtx->uiNumLivePMRs--; + OSLockRelease(psCtx->hLock); + } + else + { + /* Release PMR factory lock acquired if any */ + if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + { + psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + } + } +} + +static IMG_BOOL _PMRIsSparse(const PMR *psPMR) +{ + return psPMR->bSparseAlloc; +} + +PVRSRV_ERROR +PMRCreatePMR(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP *psPhysHeap, + PMR_SIZE_T uiLogicalSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, + PMR_FLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + const PMR_IMPL_FUNCTAB *psFuncTab, + PMR_IMPL_PRIVDATA pvPrivData, + PMR_IMPL_TYPE eType, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PMR *psPMR = NULL; + PVRSRV_ERROR eError; + + eError = _PMRCreate(uiLogicalSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2ContiguityGuarantee, + uiFlags, + &psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + psPMR->psDevNode = psDevNode; + psPMR->psPhysHeap = psPhysHeap; + psPMR->psFuncTab = psFuncTab; + psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap); + psPMR->pvFlavourData = pvPrivData; + psPMR->eFlavour = eType; + OSAtomicWrite(&psPMR->iRefCount, 1); + + OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PDUMP) + { + PMR_FLAGS_T uiFlags = psPMR->uiFlags; + IMG_BOOL bInitialise = IMG_FALSE; + IMG_UINT32 ui32InitValue = 0; + + if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) + { + bInitialise = IMG_TRUE; + } + else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) + { + ui32InitValue = 0xDEADBEEF; + bInitialise = IMG_TRUE; + } + + PDumpPMRMallocPMR(psPMR, + (uiChunkSize * ui32NumVirtChunks), + 1ULL<hPDumpAllocHandle, + ui32PDumpFlags); + } +#endif + + *ppsPMRPtr = psPMR; + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, + IMG_UINT32 ui32NestingLevel) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psPMR != NULL); + + /* Note: taking this lock is not required to protect the PMR reference + * count, because the PMR reference count is atomic. Rather, taking + * the lock here guarantees that no caller will exit this function + * without the underlying physical addresses being locked. + */ + OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); + /* We also count the locks as references, so that the PMR is not freed + * while someone is using a physical address. + * "lock" here simply means incrementing the refcount. It means the + * refcount is multipurpose, but that's okay. We only have to promise + * that physical addresses are valid after this point, and remain valid + * until the corresponding PMRUnlockSysPhysAddressesOSMem() + */ + _Ref(psPMR); + + /* Also count locks separately from other types of references, to + * allow for debug assertions + */ + + /* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */ + if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2)) + { + if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL) + { + /* must always have lock and unlock in pairs! */ + PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL); + + eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData); + + PVR_GOTO_IF_ERROR(eError, e1); + } + } + OSLockRelease(psPMR->hLock); + + return PVRSRV_OK; + +e1: + OSAtomicDecrement(&psPMR->iLockCount); + _Unref(psPMR); + PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0); + OSLockRelease(psPMR->hLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRLockSysPhysAddresses(PMR *psPMR) +{ + return PMRLockSysPhysAddressesNested(psPMR, 0); +} + +PVRSRV_ERROR +PMRUnlockSysPhysAddresses(PMR *psPMR) +{ + return PMRUnlockSysPhysAddressesNested(psPMR, 2); +} + +PVRSRV_ERROR +PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psPMR != NULL); + + /* Acquiring the lock here, as well as during the Lock operation ensures + * the lock count hitting zero and the unlocking of the phys addresses is + * an atomic operation + */ + OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); + + if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)) + { + if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL) + { + PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL); + + eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData); + /* must never fail */ + PVR_ASSERT(eError == PVRSRV_OK); + } + } + + OSLockRelease(psPMR->hLock); + + /* We also count the locks as references, so that the PMR is not + * freed while someone is using a physical address. + */ + _UnrefAndMaybeDestroy(psPMR); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psPMR != NULL); + + OSLockAcquire(psPMR->hLock); + /* Stop if we still have references on the PMR */ + if ( ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2)) + || (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) ) + { + OSLockRelease(psPMR->hLock); + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR is still referenced %u times. " + "That means this PMR is probably exported or used somewhere else. " + "Allowed are 2 references if it is mapped to device, otherwise 1.", + __func__, + OSAtomicRead(&psPMR->iRefCount))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_STILL_REFERENCED, e_exit); + } + OSLockRelease(psPMR->hLock); + + if (psPMR->psFuncTab->pfnUnpinMem != NULL) + { + eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData); + if (eError == PVRSRV_OK) + { + psPMR->bIsUnpinned = IMG_TRUE; + } + } + +e_exit: + return eError; +} + +PVRSRV_ERROR +PMRPinPMR(PMR *psPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psPMR != NULL); + + if (psPMR->psFuncTab->pfnPinMem != NULL) + { + eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData, + psPMR->psMappingTable); + if (eError == PVRSRV_OK) + { + psPMR->bIsUnpinned = IMG_FALSE; + } + } + + return eError; +} + +PVRSRV_ERROR +PMRMakeLocalImportHandle(PMR *psPMR, + PMR **ppsPMR) +{ + PMRRefPMR(psPMR); + *ppsPMR = psPMR; + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnmakeLocalImportHandle(PMR *psPMR) +{ + PMRUnrefPMR(psPMR); + return PVRSRV_OK; +} + +/* + Note: + We pass back the PMR as it was passed in as a different handle type + (DEVMEM_MEM_IMPORT) and it allows us to change the import structure + type if we should need to embed any meta data in it. + */ +PVRSRV_ERROR +PMRLocalImportPMR(PMR *psPMR, + PMR **ppsPMR, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + _Ref(psPMR); + + /* Return the PMR */ + *ppsPMR = psPMR; + *puiSize = psPMR->uiLogicalSize; + *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee; + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRGetUID(PMR *psPMR, + IMG_UINT64 *pui64UID) +{ + PVR_ASSERT(psPMR != NULL); + + *pui64UID = psPMR->uiSerialNum; + + return PVRSRV_OK; +} + +#if defined(SUPPORT_INSECURE_EXPORT) +PVRSRV_ERROR +PMRExportPMR(PMR *psPMR, + PMR_EXPORT **ppsPMRExportPtr, + PMR_SIZE_T *puiSize, + PMR_LOG2ALIGN_T *puiLog2Contig, + PMR_PASSWORD_T *puiPassword) +{ + IMG_UINT64 uiPassword; + PMR_EXPORT *psPMRExport; + + uiPassword = psPMR->uiKey; + + psPMRExport = OSAllocMem(sizeof(*psPMRExport)); + PVR_RETURN_IF_NOMEM(psPMRExport); + + psPMRExport->psPMR = psPMR; + _Ref(psPMR); + + *ppsPMRExportPtr = psPMRExport; + *puiSize = psPMR->uiLogicalSize; + *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee; + *puiPassword = uiPassword; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRUnexportPMR(PMR_EXPORT *psPMRExport) +{ + PVR_ASSERT(psPMRExport != NULL); + PVR_ASSERT(psPMRExport->psPMR != NULL); + PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); + + _UnrefAndMaybeDestroy(psPMRExport->psPMR); + + OSFreeMem(psPMRExport); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRImportPMR(PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR) +{ + PMR *psPMR; + + PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); + + psPMR = psPMRExport->psPMR; + + + if (psPMR->uiKey != uiPassword) + { + PVR_DPF((PVR_DBG_ERROR, + "PMRImport: Import failed, password specified does not match the export")); + return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR; + } + + if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig) + { + return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES; + } + + _Ref(psPMR); + + *ppsPMR = psPMR; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnimportPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + + return PVRSRV_OK; +} + +#else /* if defined(SUPPORT_INSECURE_EXPORT) */ + +PVRSRV_ERROR +PMRExportPMR(PMR *psPMR, + PMR_EXPORT **ppsPMRExportPtr, + PMR_SIZE_T *puiSize, + PMR_LOG2ALIGN_T *puiLog2Contig, + PMR_PASSWORD_T *puiPassword) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr); + PVR_UNREFERENCED_PARAMETER(puiSize); + PVR_UNREFERENCED_PARAMETER(puiLog2Contig); + PVR_UNREFERENCED_PARAMETER(puiPassword); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRUnexportPMR(PMR_EXPORT *psPMRExport) +{ + PVR_UNREFERENCED_PARAMETER(psPMRExport); + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRImportPMR(PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR) +{ + PVR_UNREFERENCED_PARAMETER(psPMRExport); + PVR_UNREFERENCED_PARAMETER(uiPassword); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiLog2Contig); + PVR_UNREFERENCED_PARAMETER(ppsPMR); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnimportPMR(PMR *psPMR) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + return PVRSRV_OK; +} +#endif /* if defined(SUPPORT_INSECURE_EXPORT) */ + +#if defined(SUPPORT_SECURE_EXPORT) +PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + return PVRSRV_OK; +} + +static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport) +{ + return PMRSecureUnexportPMR(psExport); +} + +PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + PMR *psPMR, + IMG_SECURE_TYPE *phSecure, + PMR **ppsPMR, + CONNECTION_DATA **ppsSecureConnection) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(ppsSecureConnection); + + /* We are acquiring reference to PMR here because OSSecureExport + * releases bridge lock and PMR lock for a moment and we don't want PMR + * to be removed by other thread in the meantime. */ + _Ref(psPMR); + + eError = OSSecureExport("secure_pmr", + _ReleaseSecurePMR, + (void *) psPMR, + phSecure); + PVR_GOTO_IF_ERROR(eError, e0); + + *ppsPMR = psPMR; + + return PVRSRV_OK; +e0: + PVR_ASSERT(eError != PVRSRV_OK); + _UnrefAndMaybeDestroy(psPMR); + return eError; +} + +PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_SECURE_TYPE hSecure, + PMR **ppsPMR, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PVRSRV_ERROR eError; + PMR *psPMR; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + eError = OSSecureImport(hSecure, (void **) &psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + if (psPMR->psDevNode != psDevNode) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + _Ref(psPMR); + + /* Return the PMR */ + *ppsPMR = psPMR; + *puiSize = psPMR->uiLogicalSize; + *puiAlign = 1 << psPMR->uiLog2ContiguityGuarantee; + return PVRSRV_OK; +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + return PVRSRV_OK; +} +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +PVRSRV_ERROR +PMRStoreRIHandle(PMR *psPMR, + void *hRIHandle) +{ + PVR_ASSERT(psPMR != NULL); + + psPMR->hRIHandle = hRIHandle; + return PVRSRV_OK; +} +#endif + +static PVRSRV_ERROR +_PMRAcquireKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut, + IMG_BOOL bMapSparse) +{ + PVRSRV_ERROR eError; + void *pvKernelAddress; + IMG_HANDLE hPriv; + + PVR_ASSERT(psPMR != NULL); + + if (_PMRIsSparse(psPMR) && !bMapSparse) + { + /* Mapping of sparse allocations must be signalled. */ + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + /* Acquire/Release functions must be overridden in pairs */ + if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL) + { + PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL); + + /* If PMR implementation does not supply this pair of + * functions, it means they do not permit the PMR to be mapped + * into kernel memory at all + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); + } + PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); + + eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, + uiLogicalOffset, + uiSize, + &pvKernelAddress, + &hPriv, + psPMR->uiFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + *ppvKernelAddressOut = pvKernelAddress; + if (uiSize == 0) + { + /* Zero size means map in the whole PMR ... */ + *puiLengthOut = (size_t)psPMR->uiLogicalSize; + } + else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee)) + { + /* ... map in the requested pages ... */ + *puiLengthOut = uiSize; + } + else + { + /* ... otherwise we just map in one page */ + *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee; + } + *phPrivOut = hPriv; + + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRAcquireKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut) +{ + return _PMRAcquireKernelMappingData(psPMR, + uiLogicalOffset, + uiSize, + ppvKernelAddressOut, + puiLengthOut, + phPrivOut, + IMG_FALSE); +} + +PVRSRV_ERROR +PMRAcquireSparseKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut) +{ + return _PMRAcquireKernelMappingData(psPMR, + uiLogicalOffset, + uiSize, + ppvKernelAddressOut, + puiLengthOut, + phPrivOut, + IMG_TRUE); +} + +PVRSRV_ERROR +PMRReleaseKernelMappingData(PMR *psPMR, + IMG_HANDLE hPriv) +{ + PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL); + PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); + + psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, + hPriv); + + return PVRSRV_OK; +} + +#if defined(INTEGRITY_OS) + +PVRSRV_ERROR +PMRMapMemoryObject(PMR *psPMR, + IMG_HANDLE *phMemObj, + void **pvClientAddr, + IMG_HANDLE *phPrivOut) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_HANDLE hPriv = *phPrivOut; + + PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL); + + eError = psPMR->psFuncTab->pfnMapMemoryObject(hPriv, phMemObj, pvClientAddr, phPrivOut); + + return eError; +} + +PVRSRV_ERROR +PMRUnmapMemoryObject(PMR *psPMR, + IMG_HANDLE hPriv) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT (psPMR->psFuncTab->pfnUnmapMemoryObject != NULL); + + eError = psPMR->psFuncTab->pfnUnmapMemoryObject(hPriv); + + return eError; +} + +#endif /* INTEGRITY_OS */ + +/* + _PMRLogicalOffsetToPhysicalOffset + + Translate between the "logical" offset which the upper levels + provide and the physical offset which is what the PMR + factories works on. + + As well as returning the physical offset we return the number of + bytes remaining till the next chunk and if this chunk is valid. + + For multi-page operations, upper layers communicate their + Log2PageSize else argument is redundant (set to zero). + */ + +static void +_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_OFFSET_T *puiPhysicalOffset, + IMG_UINT32 *pui32BytesRemain, + IMG_BOOL *bValid) +{ + PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable; + IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize; + IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset; + IMG_UINT64 ui64ChunkIndex; + IMG_UINT32 ui32Remain; + IMG_UINT32 idx; + + /* Must be translating at least a page */ + PVR_ASSERT(ui32NumOfPages); + + if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks) + { + /* Fast path the common case, as logical and physical offsets are + equal we assume the ui32NumOfPages span is also valid */ + *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset); + puiPhysicalOffset[0] = uiOffset; + bValid[0] = IMG_TRUE; + + if (ui32NumOfPages > 1) + { + /* initial offset may not be page aligned, round down */ + uiOffset &= ~(uiPageSize-1); + for (idx=1; idx < ui32NumOfPages; idx++) + { + uiOffset += uiPageSize; + puiPhysicalOffset[idx] = uiOffset; + bValid[idx] = IMG_TRUE; + } + } + } + else + { + for (idx=0; idx < ui32NumOfPages; idx++) + { + ui64ChunkIndex = OSDivide64r64( + uiOffset, + TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize), + &ui32Remain); + + if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID) + { + bValid[idx] = IMG_FALSE; + } + else + { + bValid[idx] = IMG_TRUE; + } + + if (idx == 0) + { + if (ui32Remain == 0) + { + /* Start of chunk so return the chunk size */ + *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize); + } + else + { + *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain); + } + + puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) + ui32Remain; + + /* initial offset may not be page aligned, round down */ + uiOffset &= ~(uiPageSize-1); + } + else + { + puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain; + } + uiOffset += uiPageSize; + } + } +} + +static PVRSRV_ERROR +_PMR_ReadBytesPhysical(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiPhysicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError; + + if (psPMR->psFuncTab->pfnReadBytes != NULL) + { + /* defer to callback if present */ + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData, + uiPhysicalOffset, + pcBuffer, + uiBufSz, + puiNumBytes); + PMRUnlockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + } + else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) + { + /* "default" handler for reading bytes */ + + IMG_HANDLE hKernelMappingHandle; + IMG_UINT8 *pcKernelAddress; + + eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, + (size_t) uiPhysicalOffset, + uiBufSz, + (void **)&pcKernelAddress, + &hKernelMappingHandle, + psPMR->uiFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Use the conservative 'DeviceMemCopy' here because we can't + * know if this PMR will be mapped cached. + */ + + OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz); + *puiNumBytes = uiBufSz; + + psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, + hKernelMappingHandle); + } + else + { + OSPanic(); + PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + *puiNumBytes = 0; + return eError; +} + +PVRSRV_ERROR +PMR_ReadBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_OFFSET_T uiPhysicalOffset; + size_t uiBytesCopied = 0; + + if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) + { + uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); + } + PVR_ASSERT(uiBufSz > 0); + PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); + + /* PMR implementations can override this. If they don't, a "default" + * handler uses kernel virtual mappings. If the kernel can't + * provide a kernel virtual mapping, this function fails. + */ + PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || + psPMR->psFuncTab->pfnReadBytes != NULL); + + while (uiBytesCopied != uiBufSz) + { + IMG_UINT32 ui32Remain; + size_t uiBytesToCopy; + size_t uiRead; + IMG_BOOL bValid; + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + /* Copy till either then end of the chunk or end + * of the buffer + */ + uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); + + if (bValid) + { + /* Read the data from the PMR */ + eError = _PMR_ReadBytesPhysical(psPMR, + uiPhysicalOffset, + &pcBuffer[uiBytesCopied], + uiBytesToCopy, + &uiRead); + if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", + __func__, + PVRSRVGetErrorString(eError), + uiRead, + uiBytesToCopy)); + /* Bail out as soon as we hit an error */ + break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")", + __func__, + uiLogicalOffset, + psPMR->uiLogicalSize)); + /* Fill invalid chunks with 0 */ + OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy); + uiRead = uiBytesToCopy; + eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR; + } + uiLogicalOffset += uiRead; + uiBytesCopied += uiRead; + } + + *puiNumBytes = uiBytesCopied; + return eError; +} + +static PVRSRV_ERROR +_PMR_WriteBytesPhysical(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiPhysicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError; + + if (psPMR->psFuncTab->pfnWriteBytes != NULL) + { + /* defer to callback if present */ + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData, + uiPhysicalOffset, + pcBuffer, + uiBufSz, + puiNumBytes); + PMRUnlockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + } + else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) + { + /* "default" handler for reading bytes */ + + IMG_HANDLE hKernelMappingHandle; + IMG_UINT8 *pcKernelAddress; + + eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, + (size_t) uiPhysicalOffset, + uiBufSz, + (void **)&pcKernelAddress, + &hKernelMappingHandle, + psPMR->uiFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Use the conservative 'DeviceMemCopy' here because we can't know + * if this PMR will be mapped cached. + */ + + OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz); + *puiNumBytes = uiBufSz; + + psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, + hKernelMappingHandle); + } + else + { + /* The write callback is optional as it's only required by the + * debug tools + */ + OSPanic(); + PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); + } + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + *puiNumBytes = 0; + return eError; +} + +PVRSRV_ERROR +PMR_WriteBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_OFFSET_T uiPhysicalOffset; + size_t uiBytesCopied = 0; + + if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) + { + uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); + } + PVR_ASSERT(uiBufSz > 0); + PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); + + /* PMR implementations can override this. If they don't, a "default" + * handler uses kernel virtual mappings. If the kernel can't provide + * a kernel virtual mapping, this function fails. + */ + PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || + psPMR->psFuncTab->pfnWriteBytes != NULL); + + while (uiBytesCopied != uiBufSz) + { + IMG_UINT32 ui32Remain; + size_t uiBytesToCopy; + size_t uiWrite; + IMG_BOOL bValid; + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + + /* Copy till either then end of the chunk or end of the buffer + */ + uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); + + if (bValid) + { + /* Write the data to the PMR */ + eError = _PMR_WriteBytesPhysical(psPMR, + uiPhysicalOffset, + &pcBuffer[uiBytesCopied], + uiBytesToCopy, + &uiWrite); + if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", + __func__, + PVRSRVGetErrorString(eError), + uiWrite, + uiBytesToCopy)); + /* Bail out as soon as we hit an error */ + break; + } + } + else + { + /* Ignore writes to invalid pages */ + uiWrite = uiBytesToCopy; + } + uiLogicalOffset += uiWrite; + uiBytesCopied += uiWrite; + } + + *puiNumBytes = uiBytesCopied; + return eError; +} + +PVRSRV_ERROR +PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) +{ + if (psPMR->psFuncTab->pfnMMap) + { + return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData); + } + + return OSMMapPMRGeneric(psPMR, pOSMMapData); +} + +void +PMRRefPMR(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + _Ref(psPMR); +} + +PVRSRV_ERROR +PMRUnrefPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnrefUnlockPMR(PMR *psPMR) +{ + PMRUnlockSysPhysAddresses(psPMR); + + PMRUnrefPMR(psPMR); + + return PVRSRV_OK; +} + +PVRSRV_DEVICE_NODE * +PMR_DeviceNode(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->psDevNode; +} + +PMR_FLAGS_T +PMR_Flags(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->uiFlags; +} + +IMG_BOOL +PMR_IsSparse(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return _PMRIsSparse(psPMR); +} + +IMG_BOOL +PMR_IsUnpinned(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->bIsUnpinned; +} + +PVRSRV_ERROR +PMR_LogicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiLogicalSize) +{ + PVR_ASSERT(psPMR != NULL); + + *puiLogicalSize = psPMR->uiLogicalSize; + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMR_PhysicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiPhysicalSize) +{ + PVR_ASSERT(psPMR != NULL); + + /* iLockCount will be > 0 for any backed PMR (backed on demand or not) */ + if ((OSAtomicRead(&psPMR->iLockCount) > 0) && !psPMR->bIsUnpinned) + { + if (psPMR->bSparseAlloc) + { + *puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks; + } + else + { + *puiPhysicalSize = psPMR->uiLogicalSize; + } + } + else + { + *puiPhysicalSize = 0; + } + return PVRSRV_OK; +} + +PHYS_HEAP * +PMR_PhysHeap(const PMR *psPMR) +{ + return psPMR->psPhysHeap; +} + +PVRSRV_ERROR +PMR_IsOffsetValid(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_BOOL *pbValid) +{ + IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; + IMG_UINT32 *pui32BytesRemain = aui32BytesRemain; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psPMR != NULL); + PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset); + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); + PVR_GOTO_IF_NOMEM(puiPhysicalOffset, eError, e0); + + pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32)); + PVR_GOTO_IF_NOMEM(pui32BytesRemain, eError, e0); + } + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + uiLogicalOffset, + puiPhysicalOffset, + pui32BytesRemain, + pbValid); + +e0: + if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL) + { + OSFreeMem(puiPhysicalOffset); + } + + if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL) + { + OSFreeMem(pui32BytesRemain); + } + + return eError; +} + +PMR_MAPPING_TABLE * +PMR_GetMappigTable(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->psMappingTable; + +} + +IMG_UINT32 +PMR_GetLog2Contiguity(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->uiLog2ContiguityGuarantee; +} + +const IMG_CHAR * +PMR_GetAnnotation(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->szAnnotation; +} + +PMR_IMPL_TYPE +PMR_GetType(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->eFlavour; +} + +IMG_INT32 +PMR_GetRefCount(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return OSAtomicRead(&psPMR->iRefCount); +} + +/* must have called PMRLockSysPhysAddresses() before calling this! */ +PVRSRV_ERROR +PMR_DevPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEV_PHYADDR *psDevAddrPtr, + IMG_BOOL *pbValid) +{ + IMG_UINT32 ui32Remain; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; + + PVR_ASSERT(psPMR != NULL); + PVR_ASSERT(ui32NumOfPages > 0); + PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL); + +#ifdef PVRSRV_NEED_PVR_ASSERT + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); +#endif + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); + PVR_GOTO_IF_NOMEM(puiPhysicalOffset, eError, e0); + } + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + uiLogicalOffset, + puiPhysicalOffset, + &ui32Remain, + pbValid); + if (*pbValid || _PMRIsSparse(psPMR)) + { + /* Sparse PMR may not always have the first page valid */ + eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, + ui32Log2PageSize, + ui32NumOfPages, + puiPhysicalOffset, + pbValid, + psDevAddrPtr); +#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) + /* Currently excluded from the default build because of performance concerns. + * We do not need this part in all systems because the GPU has the same address view of system RAM as the CPU. + * Alternatively this could be implemented as part of the PMR-factories directly */ + + if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA || + PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA) + { + IMG_UINT32 i; + IMG_DEV_PHYADDR sDevPAddrCorrected; + + /* Copy the translated addresses to the correct array */ + for (i = 0; i < ui32NumOfPages; i++) + { + PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap, + 1, + &sDevPAddrCorrected, + (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]); + psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr; + } + } +#endif + } + + if (puiPhysicalOffset != auiPhysicalOffset) + { + OSFreeMem(puiPhysicalOffset); + } + + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* must have called PMRLockSysPhysAddresses() before calling this! */ +PVRSRV_ERROR +PMR_CpuPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_CPU_PHYADDR *psCpuAddrPtr, + IMG_BOOL *pbValid) +{ + PVRSRV_ERROR eError; + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr; + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); + PVR_GOTO_IF_NOMEM(psDevPAddr, eError, e0); + } + + eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages, + uiLogicalOffset, psDevPAddr, pbValid); + PVR_GOTO_IF_ERROR(eError, e1); + PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr); + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + OSFreeMem(psDevPAddr); + } + + return PVRSRV_OK; +e1: + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(psDevPAddr); + } +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiSparseFlags) +{ + PVRSRV_ERROR eError; + + if (NULL == psPMR->psFuncTab->pfnChangeSparseMem) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This type of sparse PMR cannot be changed.", + __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData, + psPMR, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + uiSparseFlags); + if (eError != PVRSRV_OK) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) + { + PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, + OSGetCurrentClientProcessIDKM()); + } +#endif + goto e0; + } + +#if defined(PDUMP) + { + IMG_BOOL bInitialise = IMG_FALSE; + IMG_UINT32 ui32InitValue = 0; + + if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR))) + { + bInitialise = IMG_TRUE; + } + else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR))) + { + ui32InitValue = 0xDEADBEEF; + bInitialise = IMG_TRUE; + } + + PDumpPMRChangeSparsePMR(psPMR, + 1 << psPMR->uiLog2ContiguityGuarantee, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + bInitialise, + ui32InitValue, + &psPMR->hPDumpAllocHandle); + } + +#endif + +e0: + return eError; +} + + +PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices) +{ + PVRSRV_ERROR eError; + + if ((NULL == psPMR->psFuncTab) || + (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This type of sparse PMR cannot be changed.", + __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData, + psPMR, + sCpuVAddrBase, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices); + + return eError; +} + + +#if defined(PDUMP) + +static PVRSRV_ERROR +_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiPhysicalOffset, + IMG_UINT32 ui32MemspaceNameLen, + IMG_CHAR *pszMemspaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(SUPPORT_SECURITY_VALIDATION) + if (PVRSRV_CHECK_SECURE_FW_CODE(psPMR->uiFlags) || + PVRSRV_CHECK_SECURE_FW_DATA(psPMR->uiFlags) || + PVRSRV_CHECK_SECURE_BUFFER(psPMR->uiFlags) ) + { + PVRSRV_DEVICE_NODE *psDevNode = psPMR->psDevNode; + + eError = psDevNode->pfnGetSecurePDumpMemspace(psPMR->psDevNode, + psPMR->uiFlags, + pszMemspaceName, + ui32MemspaceNameLen); + + PVR_RETURN_IF_ERROR(eError); + } + else +#endif + if (DevmemCPUCacheCoherency(psPMR->psDevNode, psPMR->uiFlags) && + DevmemDeviceCacheCoherency(psPMR->psDevNode, psPMR->uiFlags)) + { + OSSNPrintf(pszMemspaceName, + ui32MemspaceNameLen, + PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC, + psPMR->pszPDumpDefaultMemspaceName); + } + else + { + OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC, + psPMR->pszPDumpDefaultMemspaceName); + } + + OSSNPrintf(pszSymbolicAddr, + ui32SymbolicAddrLen, + PMR_SYMBOLICADDR_FMTSPEC, + PMR_DEFAULT_PREFIX, + psPMR->uiSerialNum, + uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR), + psPMR->szAnnotation); + + if (pszSymbolicAddr) + { + PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr)); + } + + + *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1); + *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1) + << PMR_GetLog2Contiguity(psPMR)); + + return eError; +} + + +PVRSRV_ERROR +PMR_PDumpSymbolicAddr(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32MemspaceNameLen, + IMG_CHAR *pszMemspaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName +) +{ + IMG_DEVMEM_OFFSET_T uiPhysicalOffset; + IMG_UINT32 ui32Remain; + IMG_BOOL bValid; + + PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize); + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + + if (!bValid) + { + /* For sparse allocations, for a given logical address, there + * may not be a physical memory backing, the virtual range can + * still be valid. + */ + uiPhysicalOffset = uiLogicalOffset; + } + + return _PMR_PDumpSymbolicAddrPhysical(psPMR, + uiPhysicalOffset, + ui32MemspaceNameLen, + pszMemspaceName, + ui32SymbolicAddrLen, + pszSymbolicAddr, + puiNewOffset, + puiNextSymName); +} + +/*! + * @brief Writes a WRW command to the script2 buffer, representing a + * dword write to a physical allocation. Size is always + * sizeof(IMG_UINT32). + * @param psPMR - PMR object representing allocation + * @param uiLogicalOffset - offset + * @param ui32Value - value to write + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue32(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) + <= uiPMRPageSize)); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the PMR */ + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Write the WRW script command */ + eError = PDumpPMRWRW32(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + ui32Value, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief Writes a RDW followed by a WRW command to the pdump script to perform + * an effective copy from memory to memory. Memory copied is of size + * sizeof(IMG_UINT32) + * + * @param psDstPMR - PMR object representing allocation of destination + * @param uiDstLogicalOffset - destination offset + * @param psSrcPMR - PMR object representing allocation of source + * @param uiSrcLogicalOffset - source offset + * @param pszTmpVar - pdump temporary variable used during the copy + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpCopyMem32(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; + const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; + + PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiSrcPMRPageSize)); + + PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiDstPMRPageSize)); + + + eError = PMRLockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the source PMR */ + eError = PMR_PDumpSymbolicAddr(psSrcPMR, + uiSrcLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Issue PDump read command */ + eError = PDumpPMRRDW32MemToInternalVar(pszTmpVar, + aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + + eError = PMRLockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Get the symbolic address of the destination PMR */ + eError = PMR_PDumpSymbolicAddr(psDstPMR, + uiDstLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Write the WRW script command */ + eError = PDumpPMRWRW32InternalVarToMem(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + pszTmpVar, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + + eError = PMRUnlockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief Writes a WRW64 command to the script2 buffer, representing a + * dword write to a physical allocation. Size is always + * sizeof(IMG_UINT64). + * @param psPMR - PMR object representing allocation + * @param uiLogicalOffset - offset + * @param ui64Value - value to write + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue64(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + + PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value)) + <= uiPMRPageSize)); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the PMR */ + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Write the WRW script command */ + eError = PDumpPMRWRW64(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + ui64Value, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to + * perform an effective copy from memory to memory. Memory copied is of + * size sizeof(IMG_UINT32) + * + * @param psDstPMR - PMR object representing allocation of destination + * @param uiDstLogicalOffset - destination offset + * @param psSrcPMR - PMR object representing allocation of source + * @param uiSrcLogicalOffset - source offset + * @param pszTmpVar - pdump temporary variable used during the copy + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpCopyMem64(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; + const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; + + PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiSrcPMRPageSize)); + + PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiDstPMRPageSize)); + + + eError = PMRLockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the source PMR */ + eError = PMR_PDumpSymbolicAddr(psSrcPMR, + uiSrcLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Issue PDump read command */ + eError = PDumpPMRRDW64MemToInternalVar(pszTmpVar, + aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + + eError = PMRLockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Get the symbolic address of the destination PMR */ + eError = PMR_PDumpSymbolicAddr(psDstPMR, + uiDstLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Write the WRW script command */ + eError = PDumpPMRWRW64InternalVarToMem(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + pszTmpVar, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + + eError = PMRUnlockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief PDumps the contents of the given allocation. + * If bZero is IMG_TRUE then the zero page in the parameter stream is used + * as the source of data, rather than the allocation's actual backing. + * @param psPMR - PMR object representing allocation + * @param uiLogicalOffset - Offset to write at + * @param uiSize - Number of bytes to write + * @param uiPDumpFlags - PDump flags + * @param bZero - Use the PDump zero page as the source + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpLoadMem(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_BOOL bZero) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOutOffset; + IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName = 0; + const IMG_CHAR *pszParamStreamFileName; + PDUMP_FILEOFFSET_T uiParamStreamFileOffset; + + /* required when !bZero */ +#define PMR_MAX_PDUMP_BUFSZ (1<<21) + IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME]; + IMG_UINT8 *pcBuffer = NULL; + size_t uiBufSz; + IMG_BOOL bValid; + IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize; + + PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); + + /* Check if pdump client is connected */ + if (!PDumpIsContCaptureOn()) + { + /* Dumping of memory in Pdump buffer will be rejected for no client connected case. + * So return early and save reading of data from PMR. */ + return PVRSRV_OK; + } + + /* Get the correct PDump stream file name */ + if (bZero) + { + PDumpCommentWithFlags(uiPDumpFlags, + "Zeroing allocation (" IMG_DEVMEM_SIZE_FMTSPEC " bytes)", + uiSize); + + /* get the zero page information. it is constant for this function */ + PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset, + &uiBufSz, + &pszParamStreamFileName); + } + else + { + + uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR); + PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ); + + pcBuffer = OSAllocMem(uiBufSz); + + PVR_LOG_RETURN_IF_NOMEM(pcBuffer, "OSAllocMem"); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + pszParamStreamFileName = aszParamStreamFilename; + } + + /* Loop over all touched symbolic addresses of the PMR and + * emit LDBs to load the contents. */ + while (uiCurrentOffset < (uiLogicalOffset + uiSize)) + { + /* Get the correct symbolic name for the current offset */ + eError = PMR_PDumpSymbolicAddr(psPMR, + uiCurrentOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOutOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz); + + PMR_IsOffsetValid(psPMR, + 0, + 1, + uiCurrentOffset, + &bValid); + + /* Either just LDB the zeros or read from the PMR and store that + * in the pdump stream */ + if (bValid) + { + size_t uiNumBytes; + + if (bZero) + { + uiNumBytes = MIN(uiSizeRemain, uiNextSymName - uiCurrentOffset); + } + else + { + IMG_DEVMEM_OFFSET_T uiReadOffset; + uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? + uiLogicalOffset + uiSize - uiCurrentOffset : + uiNextSymName - uiCurrentOffset); + + eError = PMR_ReadBytes(psPMR, + uiCurrentOffset, + pcBuffer, + uiReadOffset, + &uiNumBytes); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpWriteParameterBlob(pcBuffer, + uiNumBytes, + uiPDumpFlags, + &aszParamStreamFilename[0], + sizeof(aszParamStreamFilename), + &uiParamStreamFileOffset); + if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED) + { + /* Write to parameter file prevented under the flags and + * current state of the driver so skip further writes. + */ + eError = PVRSRV_OK; + } + else if (eError != PVRSRV_OK) + { + PDUMP_ERROR(eError, "Failed to write PMR memory to parameter file"); + } + } + + /* Emit the LDB command to the current symbolic address */ + eError = PDumpPMRLDB(aszMemspaceName, + aszSymbolicName, + uiOutOffset, + uiNumBytes, + pszParamStreamFileName, + uiParamStreamFileOffset, + uiPDumpFlags); + uiSizeRemain = uiSizeRemain - uiNumBytes; + } + uiCurrentOffset = uiNextSymName; + } + + if (!bZero) + { + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + OSFreeMem(pcBuffer); + } + + return PVRSRV_OK; +} + + + +PVRSRV_ERROR +PMRPDumpSaveToFile(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOutOffset; + IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName = 0; + IMG_UINT32 uiCurrentFileOffset = uiFileOffset; + + PVR_UNREFERENCED_PARAMETER(uiArraySize); + + PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); + + while (uiCurrentOffset < (uiLogicalOffset + uiSize)) + { + IMG_DEVMEM_OFFSET_T uiReadOffset; + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiCurrentOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOutOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize); + + uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? + uiLogicalOffset + uiSize - uiCurrentOffset : + uiNextSymName - uiCurrentOffset); + + eError = PDumpPMRSAB(aszMemspaceName, + aszSymbolicName, + uiOutOffset, + uiReadOffset, + pszFilename, + uiCurrentFileOffset); + PVR_ASSERT(eError == PVRSRV_OK); + + uiCurrentFileOffset += uiNextSymName - uiCurrentOffset; + uiCurrentOffset = uiNextSymName; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRPDumpPol32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + /* Make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) + <= uiPMRPageSize)); + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpOffset, + &uiNextSymName); + PVR_GOTO_IF_ERROR(eError, e0); + +#define _MEMPOLL_DELAY (1000) +#define _MEMPOLL_COUNT (2000000000 / _MEMPOLL_DELAY) + + eError = PDumpPMRPOL(aszMemspaceName, + aszSymbolicName, + uiPDumpOffset, + ui32Value, + ui32Mask, + eOperator, + _MEMPOLL_COUNT, + _MEMPOLL_DELAY, + uiPDumpFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRPDumpCheck32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + /* Make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) + < uiPMRPageSize)); + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpOffset, + &uiNextSymName); + if (eError != PVRSRV_OK) + { + goto e0; + } + + eError = PDumpPMRPOL(aszMemspaceName, + aszSymbolicName, + uiPDumpOffset, + ui32Value, + ui32Mask, + eOperator, + 1, + 1, + uiPDumpFlags); + if (eError != PVRSRV_OK) + { + goto e0; + } + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRPDumpCBP(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiReadOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpOffset, + &uiNextSymName); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = PDumpPMRCBP(aszMemspaceName, + aszSymbolicName, + uiPDumpOffset, + uiWriteOffset, + uiPacketSize, + uiBufferSize); + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void +PDumpPMRChangeSparsePMR(PMR *psPMR, + IMG_UINT32 uiBlockSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut) +{ + PVRSRV_ERROR eError; + IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle; + + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 i, uiIndex; + + /* Remove pages from the PMR */ + for (i = 0; i < ui32FreePageCount; i++) + { + uiIndex = pai32FreeIndices[i]; + + eError = PDumpFree(phPDumpAllocInfo[uiIndex]); + PVR_ASSERT(eError == PVRSRV_OK); + phPDumpAllocInfo[uiIndex] = NULL; + } + + /* Add new pages to the PMR */ + for (i = 0; i < ui32AllocPageCount; i++) + { + uiIndex = pai32AllocIndices[i]; + + PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL); + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiIndex * uiBlockSize, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpMalloc(aszMemspaceName, + aszSymbolicName, + uiBlockSize, + uiBlockSize, + bInitialise, + ui32InitValue, + &phPDumpAllocInfo[uiIndex], + PDUMP_NONE); + PVR_ASSERT(eError == PVRSRV_OK); + } + + /* (IMG_HANDLE) <- (IMG_HANDLE*) */ + *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; +} + +void +PDumpPMRFreePMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 uiLog2Contiguity, + IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + + /* (IMG_HANDLE*) <- (IMG_HANDLE) */ + IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle; + + for (i = 0; i < psPMR->uiNumPDumpBlocks; i++) + { + if (ahPDumpAllocHandleArray[i] != NULL) + { + eError = PDumpFree(ahPDumpAllocHandleArray[i]); + PVR_ASSERT(eError == PVRSRV_OK); + ahPDumpAllocHandleArray[i] = NULL; + } + } + + OSFreeMem(ahPDumpAllocHandleArray); +} + + +void +PDumpPMRMallocPMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiMappingTable, + IMG_UINT32 uiLog2Contiguity, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_HANDLE *phPDumpAllocInfo; + + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiNumPhysBlocks; + IMG_UINT32 uiNumVirtBlocks; + IMG_UINT32 i, uiIndex; + + if (PMR_IsSparse(psPMR)) + { + uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity; + /* Make sure we did not cut off anything */ + PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks)); + } + else + { + uiNumPhysBlocks = uiSize >> uiLog2Contiguity; + /* Make sure we did not cut off anything */ + PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize); + } + + uiNumVirtBlocks = uiSize >> uiLog2Contiguity; + PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize); + + psPMR->uiNumPDumpBlocks = uiNumVirtBlocks; + + phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE)); + + + for (i = 0; i < uiNumPhysBlocks; i++) + { + uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i; + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiIndex * uiBlockSize, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpMalloc(aszMemspaceName, + aszSymbolicName, + uiBlockSize, + uiBlockSize, + bInitialise, + ui32InitValue, + &phPDumpAllocInfo[uiIndex], + ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + } + + /* (IMG_HANDLE) <- (IMG_HANDLE*) */ + *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; + +} +#endif /* PDUMP */ + + +void *PMRGetPrivateData(const PMR *psPMR, + const PMR_IMPL_FUNCTAB *psFuncTab) +{ + return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL; +} + +#define PMR_PM_WORD_SIZE 4 + +PVRSRV_ERROR +PMRWritePMPageList(/* Target PMR, offset, and length */ + PMR *psPageListPMR, + IMG_DEVMEM_OFFSET_T uiTableOffset, + IMG_DEVMEM_SIZE_T uiTableLength, + /* Referenced PMR, and "page" granularity */ + PMR *psReferencePMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, + PMR_PAGELIST **ppsPageList) +{ + PVRSRV_ERROR eError; + IMG_DEVMEM_SIZE_T uiWordSize; + IMG_UINT32 uiNumPages; + IMG_UINT32 uiPageIndex; + PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags; + PMR_PAGELIST *psPageList; +#if defined(PDUMP) + IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset; + IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPagePDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; +#endif +#if !defined(NO_HARDWARE) + IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee; + IMG_UINT64 uiPageListPMRPage = 0; + IMG_UINT64 uiPrevPageListPMRPage = 0; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + IMG_UINT32 *pui32DataPtr = NULL; + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *pasDevAddrPtr; + IMG_BOOL *pbPageIsValid; +#endif + + uiWordSize = PMR_PM_WORD_SIZE; + + /* check we're being asked to write the same number of 4-byte units as there are pages */ + uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize); + + if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize) + { + /* Strictly speaking, it's possible to provoke this error in two ways: + (i) if it's not a whole multiple of the page size; or + (ii) if there are more than 4 billion pages. + The latter is unlikely. :) but the check is required in order to justify the cast. + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); + } + uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages; + if (uiNumPages * uiWordSize != uiTableLength) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); + } + + /* Check we're not being asked to write off the end of the PMR */ + PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength <= psPageListPMR->uiLogicalSize, eError, return_error); + + /* the PMR into which we are writing must not be user CPU mappable: */ + if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "Masked flags = 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC, + (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)))); + PVR_DPF((PVR_DBG_ERROR, + "Page list PMR allows CPU mapping (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")", + uiFlags)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); + } + + if (_PMRIsSparse(psPageListPMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psPageListPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); + } + + if (_PMRIsSparse(psReferencePMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psReferencePMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); + } + + psPageList = OSAllocMem(sizeof(PMR_PAGELIST)); + PVR_LOG_GOTO_IF_NOMEM(psPageList, eError, return_error); + + psPageList->psReferencePMR = psReferencePMR; + + /* Need to lock down the physical addresses of the reference PMR */ + /* N.B. This also checks that the requested "contiguity" is achievable */ + eError = PMRLockSysPhysAddresses(psReferencePMR); + PVR_GOTO_IF_ERROR(eError, free_page_list); + +#if !defined(NO_HARDWARE) + if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(pasDevAddrPtr, eError, unlock_phys_addrs); + + pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL)); + if (pbPageIsValid == NULL) + { + /* Clean-up before exit */ + OSFreeMem(pasDevAddrPtr); + + PVR_LOG_GOTO_WITH_ERROR("pbPageIsValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_devaddr_array); + } + } + else + { + pasDevAddrPtr = asDevPAddr; + pbPageIsValid = abValid; + } + + eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0, + pasDevAddrPtr, pbPageIsValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", free_valid_array); +#endif + + for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) + { + IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex); + +#if defined(PDUMP) + eError = PMR_PDumpSymbolicAddr(psPageListPMR, + uiPMROffset, + sizeof(aszTableEntryMemspaceName), + &aszTableEntryMemspaceName[0], + sizeof(aszTableEntrySymbolicName), + &aszTableEntrySymbolicName[0], + &uiTableEntryPDumpOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMR_PDumpSymbolicAddr(psReferencePMR, + (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, + sizeof(aszPageMemspaceName), + &aszPageMemspaceName[0], + sizeof(aszPageSymbolicName), + &aszPageSymbolicName[0], + &uiPagePDumpOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpWriteShiftedMaskedValue(/* destination */ + aszTableEntryMemspaceName, + aszTableEntrySymbolicName, + uiTableEntryPDumpOffset, + /* source */ + aszPageMemspaceName, + aszPageSymbolicName, + uiPagePDumpOffset, + /* shift right */ + uiLog2PageSize, + /* shift left */ + 0, + /* mask */ + 0xffffffff, + /* word size */ + uiWordSize, + /* flags */ + PDUMP_FLAGS_CONTINUOUS); + PVR_ASSERT(eError == PVRSRV_OK); +#else + PVR_UNREFERENCED_PARAMETER(uiPMROffset); +#endif + +#if !defined(NO_HARDWARE) + + /* + We check for sparse PMR's at function entry, but as we can, + check that every page is valid + */ + PVR_ASSERT(pbPageIsValid[uiPageIndex]); + PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0); + PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); + + uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee; + + if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage)) + { + size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1)); + size_t uiMappedSize; + + /* If we already had a page list mapped, we need to unmap it... */ + if (pui32DataPtr != NULL) + { + PMRReleaseKernelMappingData(psPageListPMR, hPrivData); + } + + eError = PMRAcquireKernelMappingData(psPageListPMR, + uiMappingOffset, + uiPageListPageSize, + &pvKernAddr, + &uiMappedSize, + &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)", + uiPageListPMRPage, eError)); + goto free_valid_array; + } + + uiPrevPageListPMRPage = uiPageListPMRPage; + PVR_ASSERT(uiMappedSize >= uiPageListPageSize); + PVR_ASSERT(pvKernAddr != NULL); + + pui32DataPtr = IMG_OFFSET_ADDR(pvKernAddr, (uiPMROffset & (uiPageListPageSize - 1))); + } + + PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); + + /* Write the physical page index into the page list PMR */ + *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize); + + /* Last page so unmap */ + if (uiPageIndex == (uiNumPages - 1)) + { + PMRReleaseKernelMappingData(psPageListPMR, hPrivData); + } +#endif + } + +#if !defined(NO_HARDWARE) + if (pasDevAddrPtr != asDevPAddr) + { + OSFreeMem(pbPageIsValid); + OSFreeMem(pasDevAddrPtr); + } +#endif + *ppsPageList = psPageList; + return PVRSRV_OK; + + /* Error exit paths follow */ +#if !defined(NO_HARDWARE) + +free_valid_array: + if (pbPageIsValid != abValid) + { + OSFreeMem(pbPageIsValid); + } + +free_devaddr_array: + if (pasDevAddrPtr != asDevPAddr) + { + OSFreeMem(pasDevAddrPtr); + } + +unlock_phys_addrs: + PMRUnlockSysPhysAddresses(psReferencePMR); +#endif + +free_page_list: + OSFreeMem(psPageList); + +return_error: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +PVRSRV_ERROR +PMRUnwritePMPageList(PMR_PAGELIST *psPageList) +{ + PVRSRV_ERROR eError; + + eError = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR); + PVR_ASSERT(eError == PVRSRV_OK); + OSFreeMem(psPageList); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRZeroingPMR(PMR *psPMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) +{ + IMG_UINT32 uiNumPages; + IMG_UINT32 uiPageIndex; + IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + size_t uiMappedSize; + + PVR_ASSERT(psPMR); + + /* Calculate number of pages in this PMR */ + uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); + + /* Verify the logical Size is a multiple or the physical page size */ + if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR is not a multiple of %u", + __func__, + ui32PageSize)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); + } + + if (_PMRIsSparse(psPMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); + } + + /* Scan through all pages of the PMR */ + for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) + { + /* map the physical page (for a given PMR offset) into kernel space */ + eError = PMRAcquireKernelMappingData(psPMR, + (size_t)uiPageIndex << uiLog2PageSize, + ui32PageSize, + &pvKernAddr, + &uiMappedSize, + &hPrivData); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", AcquireKernelMapping_Error); + + /* ensure the mapped page size is the same as the physical page size */ + if (uiMappedSize != ui32PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx, + __func__, + ui32PageSize, + (IMG_UINT64)uiMappedSize)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, MappingSize_Error); + } + + /* Use the conservative 'DeviceMemSet' here because we can't know + * if this PMR will be mapped cached. + */ + OSDeviceMemSet(pvKernAddr, 0, ui32PageSize); + + /* release mapping */ + PMRReleaseKernelMappingData(psPMR, hPrivData); + + } + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Zeroing PMR %p done (num pages %u, page size %u)", + __func__, + psPMR, + uiNumPages, + ui32PageSize)); + + return PVRSRV_OK; + + + /* Error handling */ + +MappingSize_Error: + PMRReleaseKernelMappingData(psPMR, hPrivData); + +AcquireKernelMapping_Error: +Sparse_Error: +MultiPage_Error: + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRDumpPageList(PMR *psPMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) +{ + IMG_DEV_PHYADDR sDevAddrPtr; + IMG_UINT32 uiNumPages; + IMG_UINT32 uiPageIndex; + IMG_BOOL bPageIsValid; + IMG_UINT32 ui32Col = 16; + IMG_UINT32 ui32SizePerCol = 11; + IMG_UINT32 ui32ByteCount = 0; + IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1]; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Get number of pages */ + uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); + + /* Verify the logical Size is a multiple or the physical page size */ + if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) + { + PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is not a multiple of %u", 1 << uiLog2PageSize)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); + } + + if (_PMRIsSparse(psPMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); + } + + PVR_LOG((" PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize)); + + /* Print the address of the physical pages */ + for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) + { + /* Get Device physical Address */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2PageSize, + 1, + (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, + &sDevAddrPtr, + &bPageIsValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR %p failed to get DevPhysAddr with error %u", + psPMR, + eError)); + goto DevPhysAddr_Error; + } + + ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize)); + PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol); + + if (uiPageIndex % ui32Col == ui32Col-1) + { + PVR_LOG((" Phys Page: %s", pszBuffer)); + ui32ByteCount = 0; + } + } + if (ui32ByteCount > 0) + { + PVR_LOG((" Phys Page: %s", pszBuffer)); + } + + return PVRSRV_OK; + + /* Error handling */ +DevPhysAddr_Error: +Sparse_Error: +MultiPage_Error: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRInit(void) +{ + PVRSRV_ERROR eError; + + /* Singleton PMR context already initialised */ + if (_gsSingletonPMRContext.bModuleInitialised) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); + } + + eError = OSLockCreate(&_gsSingletonPMRContext.hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", out); + + _gsSingletonPMRContext.uiNextSerialNum = 1; + + _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext; + + _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE; + + _gsSingletonPMRContext.uiNumLivePMRs = 0; + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + eError = MMapStatsInit(); + PVR_LOG_GOTO_IF_ERROR(eError, "MMapStatsInit", out); +#endif + +out: + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRDeInit(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + goto out; + } + + /* Singleton PMR context is not initialised */ + if (!_gsSingletonPMRContext.bModuleInitialised) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); + } + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + MMapStatsDeInit(); +#endif + + if (_gsSingletonPMRContext.uiNumLivePMRs != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain", + __func__, + _gsSingletonPMRContext.uiNumLivePMRs)); + PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); + } + + OSLockDestroy(_gsSingletonPMRContext.hLock); + + _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE; + +out: + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/power.c b/drivers/mcst/gpu-imgtec/services/server/common/power.c new file mode 100644 index 000000000000..77d0f0069b53 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/power.c @@ -0,0 +1,1085 @@ +/*************************************************************************/ /*! +@File power.c +@Title Power management functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdump_km.h" +#include "allocmem.h" +#include "osfunc.h" + +#include "lock.h" +#include "pvrsrv.h" +#include "pvr_debug.h" +#include "process_stats.h" + + +struct _PVRSRV_POWER_DEV_TAG_ +{ + PFN_PRE_POWER pfnDevicePrePower; + PFN_POST_POWER pfnDevicePostPower; + PFN_SYS_DEV_PRE_POWER pfnSystemPrePower; + PFN_SYS_DEV_POST_POWER pfnSystemPostPower; + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange; + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest; + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest; + PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange; + IMG_HANDLE hSysData; + IMG_HANDLE hDevCookie; + PVRSRV_DEV_POWER_STATE eDefaultPowerState; + ATOMIC_T eCurrentPowerState; +}; + +/*! + Typedef for a pointer to a function that will be called for re-acquiring + device powerlock after releasing it temporarily for some timeout period + in function PVRSRVDeviceIdleRequestKM + */ +typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PPVRSRV_DEVICE_NODE psDevNode); + +static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + return OSClockns64(); +#else + return 0; +#endif +} + +static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + return OSClockus(); +#else + return 0; +#endif +} + +/*! +****************************************************************************** + + @Function _IsSystemStatePowered + + @Description Tests whether a given system state represents powered-up. + + @Input eSystemPowerState : a system power state + + @Return IMG_BOOL + +******************************************************************************/ +static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState) +{ + return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON); +} + +/* We don't expect PID=0 to acquire device power-lock */ +#define PWR_LOCK_OWNER_PID_CLR_VAL 0 + +PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = OSLockCreate(&psDeviceNode->hPowerLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; + return PVRSRV_OK; +} + +void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; + OSLockDestroy(psDeviceNode->hPowerLock); +} + +/*! +****************************************************************************** + + @Function PVRSRVPwrLockIsLockedByMe + + @Description Determine if the calling context is holding the device power-lock + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode) +{ + return OSLockIsLocked(psDeviceNode->hPowerLock) && + OSGetCurrentClientProcessIDKM() == psDeviceNode->uiPwrLockOwnerPID; +} + +/*! +****************************************************************************** + + @Function PVRSRVPowerLock + + @Description Obtain the mutex for power transitions. Only allowed when + system power is on. + + @Return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + OSLockAcquire(psDeviceNode->hPowerLock); + + /* Only allow to take powerlock when the system power is on */ + if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) + { + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + return PVRSRV_OK; + } + + OSLockRelease(psDeviceNode->hPowerLock); + + return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; +} + +/*! +****************************************************************************** + + @Function PVRSRVPowerTryLock + + @Description Try to obtain the mutex for power transitions. Only allowed when + system power is on. + + @Return PVRSRV_ERROR_RETRY or PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or + PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + if (!(OSTryLockAcquire(psDeviceNode->hPowerLock))) + { + return PVRSRV_ERROR_RETRY; + } + + /* Only allow to take powerlock when the system power is on */ + if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) + { + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + + /* System is powered ON, return OK */ + return PVRSRV_OK; + } + else + { + /* System is powered OFF, release the lock and return error */ + OSLockRelease(psDeviceNode->hPowerLock); + return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; + } +} + +/*! +****************************************************************************** + + @Function _PVRSRVForcedPowerLock + + @Description Obtain the mutex for power transitions regardless of system + power state + + @Return Always returns PVRSRV_OK. Function prototype required same as + PFN_POWER_LOCK_ACQUIRE + +******************************************************************************/ +static PVRSRV_ERROR _PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + OSLockAcquire(psDeviceNode->hPowerLock); + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVPowerUnlock + + @Description Release the mutex for power transitions + + @Return PVRSRV_ERROR + +******************************************************************************/ +void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDeviceNode)); + + /* Reset uiPwrLockOwnerPID before releasing lock */ + psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; + OSLockRelease(psDeviceNode->hPowerLock); +} + +IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice) +{ + return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF); +} + +/*! +****************************************************************************** + + @Function PVRSRVSetDeviceDefaultPowerState + + @Description Set the default device power state to eNewPowerState + + @Input psDeviceNode : Device node + @Input eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_POWER_DEV *psPowerDevice; + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice == NULL) + { + return PVRSRV_ERROR_INVALID_DEVICE; + } + + psPowerDevice->eDefaultPowerState = eNewPowerState; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function _PVRSRVDeviceIdleRequestKM + + @Description Perform device-specific processing required to force the device + idle. The device power-lock might be temporarily released (and + again re-acquired) during the course of this call, hence to + maintain lock-ordering power-lock should be the last acquired + lock before calling this function + + @Input psDeviceNode : Device node + + @Input pfnIsDefaultStateOff : When specified, the idle request is only + processed if this function passes. + + @Input bDeviceOffPermitted : IMG_TRUE if the transition should not fail + if device off + IMG_FALSE if the transition should fail if + device off + + @Input pfnPowerLockAcquire : Function to re-acquire power-lock in-case + it was necessary to release it. + + @Return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED + When re-acquisition of power-lock failed. + This error NEEDS EXPLICIT HANDLING at call + site as it signifies the caller needs to + AVOID calling PVRSRVPowerUnlock, since + power-lock is no longer "possessed" by + this context. + + PVRSRV_OK When idle request succeeded. + PVRSRV_ERROR Other system errors. + +******************************************************************************/ +static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, + IMG_BOOL bDeviceOffPermitted, + PFN_POWER_LOCK_ACQUIRE pfnPowerLockAcquire) +{ + PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; + PVRSRV_ERROR eError; + + if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) && + (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev))) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie, + bDeviceOffPermitted); + if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) + { + PVRSRV_ERROR eErrPwrLockAcq; + /* FW denied idle request */ + PVRSRVPowerUnlock(psDeviceNode); + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + + eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode); + if (eErrPwrLockAcq != PVRSRV_OK) + { + /* We only understand PVRSRV_ERROR_RETRY, so assert on others. + * Moreover, we've ended-up releasing the power-lock which was + * originally "held" by caller before calling this function - + * since this needs vigilant handling at call-site, we pass + * back an explicit error, for caller(s) to "avoid" calling + * PVRSRVPowerUnlock */ + PVR_ASSERT(eErrPwrLockAcq == PVRSRV_ERROR_RETRY); + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to re-acquire power-lock " + "(%s) after releasing it for a time-out", + __func__, PVRSRVGetErrorString(eErrPwrLockAcq))); + return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED; + } + } + else + { + /* idle request successful or some other error occurred, return */ + break; + } + } END_LOOP_UNTIL_TIMEOUT(); + } + else + { + return PVRSRV_OK; + } + + return eError; +} + +/* + * Wrapper function helps limiting calling complexity of supplying additional + * PFN_POWER_LOCK_ACQUIRE argument (required by _PVRSRVDeviceIdleRequestKM) + */ +inline PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, + IMG_BOOL bDeviceOffPermitted) +{ + return _PVRSRVDeviceIdleRequestKM(psDeviceNode, + pfnIsDefaultStateOff, + bDeviceOffPermitted, + PVRSRVPowerLock); +} + +/*! +****************************************************************************** + + @Function PVRSRVDeviceIdleCancelRequestKM + + @Description + + Perform device-specific processing required to cancel the forced idle state on the device, returning to normal operation. + + @Input psDeviceNode : Device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; + + if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest) + { + return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie); + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePrePowerStateKM + + @Description + + Perform device-specific processing required before a power transition + + @Input psPowerDevice : Power device + @Input eNewPowerState : New power state + @Input bForced : TRUE if the transition should not fail (e.g. OS request) + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced) +{ + PVRSRV_DEV_POWER_STATE eCurrentPowerState; + IMG_UINT64 ui64SysTimer1 = 0; + IMG_UINT64 ui64SysTimer2 = 0; + IMG_UINT64 ui64DevTimer1 = 0; + IMG_UINT64 ui64DevTimer2 = 0; + PVRSRV_ERROR eError; + + PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + + eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + if (psPowerDevice->pfnDevicePrePower != NULL) + { + ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); + + /* Call the device's power callback. */ + eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie, + eNewPowerState, + eCurrentPowerState, + bForced); + + ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + /* Do any required system-layer processing. */ + if (psPowerDevice->pfnSystemPrePower != NULL) + { + ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); + + eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData, + eNewPowerState, + eCurrentPowerState, + bForced); + + ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2, + ui64DevTimer1, ui64DevTimer2, + bForced, + eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, + IMG_TRUE); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostPowerStateKM + + @Description + + Perform device-specific processing required after a power transition + + @Input psPowerDevice : Power device + @Input eNewPowerState : New power state + @Input bForced : TRUE if the transition should not fail (e.g. OS request) + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced) +{ + PVRSRV_DEV_POWER_STATE eCurrentPowerState; + IMG_UINT64 ui64SysTimer1 = 0; + IMG_UINT64 ui64SysTimer2 = 0; + IMG_UINT64 ui64DevTimer1 = 0; + IMG_UINT64 ui64DevTimer2 = 0; + PVRSRV_ERROR eError; + + PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + + eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + /* Do any required system-layer processing. */ + if (psPowerDevice->pfnSystemPostPower != NULL) + { + ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); + + eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData, + eNewPowerState, + eCurrentPowerState, + bForced); + + ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + if (psPowerDevice->pfnDevicePostPower != NULL) + { + ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); + + /* Call the device's power callback. */ + eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie, + eNewPowerState, + eCurrentPowerState, + bForced); + + ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2, + ui64DevTimer1, ui64DevTimer2, + bForced, + eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + + OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVSetDevicePowerStateKM + + @Description Set the Device into a new state + + @Input psDeviceNode : Device node + @Input eNewPowerState : New power state + @Input bForced : TRUE if the transition should not fail (e.g. OS request) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced) +{ + PVRSRV_ERROR eError; + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_POWER_DEV *psPowerDevice; + + psPowerDevice = psDeviceNode->psPowerDev; + if (!psPowerDevice) + { + return PVRSRV_OK; + } + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + eNewPowerState = psPowerDevice->eDefaultPowerState; + } + + if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState) + { + eError = PVRSRVDevicePrePowerStateKM(psPowerDevice, + eNewPowerState, + bForced); + PVR_GOTO_IF_ERROR(eError, ErrorExit); + + eError = PVRSRVDevicePostPowerStateKM(psPowerDevice, + eNewPowerState, + bForced); + PVR_GOTO_IF_ERROR(eError, ErrorExit); + + /* Signal Device Watchdog Thread about power mode change. */ + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) + { + psPVRSRVData->ui32DevicesWatchdogPwrTrans++; +#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT) +#endif + { + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + } +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + else if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* signal watchdog thread and give it a chance to switch to + * longer / infinite wait time */ + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + } + + return PVRSRV_OK; + +ErrorExit: + + if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Transition to %d was denied, Forced=%d", + __func__, eNewPowerState, bForced)); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Transition to %d FAILED (%s)", + __func__, eNewPowerState, PVRSRVGetErrorString(eError))); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function PVRSRVSetDeviceSystemPowerState +@Description Set the device into a new power state based on the systems power + state +@Input psDeviceNode Device node +@Input eNewSysPowerState New system power state +@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError; + IMG_UINT uiStage = 0; + + PVRSRV_DEV_POWER_STATE eNewDevicePowerState = + _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF; + + /* If setting devices to default state, force idle all devices whose default state is off */ + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff = + (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL; + + /* require a proper power state */ + if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Prevent simultaneous SetPowerStateKM calls */ + _PVRSRVForcedPowerLock(psDeviceNode); + + /* no power transition requested, so do nothing */ + if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState) + { + PVRSRVPowerUnlock(psDeviceNode); + return PVRSRV_OK; + } + + eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff, + IMG_TRUE, _PVRSRVForcedPowerLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)", + __func__, PVRSRVGetErrorString(eError))); + uiStage++; + goto ErrorExit; + } + + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + uiStage++; + goto ErrorExit; + } + + psDeviceNode->eCurrentSysPowerState = eNewSysPowerState; + + PVRSRVPowerUnlock(psDeviceNode); + + return PVRSRV_OK; + +ErrorExit: + PVRSRVPowerUnlock(psDeviceNode); + + PVR_DPF((PVR_DBG_ERROR, + "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.", + __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState, + PVRSRVGetErrorString(eError), uiStage)); + + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + + return eError; +} + + +PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_PRE_POWER pfnDevicePrePower, + PFN_POST_POWER pfnDevicePostPower, + PFN_SYS_DEV_PRE_POWER pfnSystemPrePower, + PFN_SYS_DEV_POST_POWER pfnSystemPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState) +{ + PVRSRV_POWER_DEV *psPowerDevice; + + PVR_ASSERT(!psDeviceNode->psPowerDev); + + PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + + psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV)); + PVR_LOG_RETURN_IF_NOMEM(psPowerDevice, "psPowerDevice"); + + /* setup device for power manager */ + psPowerDevice->pfnDevicePrePower = pfnDevicePrePower; + psPowerDevice->pfnDevicePostPower = pfnDevicePostPower; + psPowerDevice->pfnSystemPrePower = pfnSystemPrePower; + psPowerDevice->pfnSystemPostPower = pfnSystemPostPower; + psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; + psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; + psPowerDevice->pfnGPUUnitsPowerChange = pfnGPUUnitsPowerChange; + psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData; + psPowerDevice->hDevCookie = hDevCookie; + OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eCurrentPowerState); + psPowerDevice->eDefaultPowerState = eDefaultPowerState; + +#if defined(SUPPORT_AUTOVZ) + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + psPowerDevice->pfnForcedIdleRequest = NULL; + psPowerDevice->pfnForcedIdleCancelRequest = NULL; + } + else +#endif + { + psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest; + psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest; + } + + psDeviceNode->psPowerDev = psPowerDevice; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVRemovePowerDevice + + @Description + + Removes device from power management register. Device is located by Device Index + + @Input psDeviceNode : Device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + if (psDeviceNode->psPowerDev) + { + OSFreeMem(psDeviceNode->psPowerDev); + psDeviceNode->psPowerDev = NULL; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVGetDevicePowerState + + @Description + + Return the device power state + + @Input psDeviceNode : Device node + @Output pePowerState : Current power state + + @Return PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. PVRSRV_OK otherwise. + +******************************************************************************/ +PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PPVRSRV_DEV_POWER_STATE pePowerState) +{ + PVRSRV_POWER_DEV *psPowerDevice; + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice == NULL) + { + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + + *pePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVIsDevicePowered + + @Description + + Whether the device is powered, for the purposes of lockup detection. + + @Input psDeviceNode : Device node + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_DEV_POWER_STATE ePowerState; + + if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK) + { + return IMG_FALSE; + } + + return (ePowerState == PVRSRV_DEV_POWER_STATE_ON); +} + +/**************************************************************************/ /*! +@Function PVRSRVDevicePreClockSpeedChange + +@Description This function is called before a voltage/frequency change is + made to the GPU HW. It informs the host driver of the intention + to make a DVFS change. If allows the host driver to idle + the GPU and begin a hold off period from starting new work + on the GPU. + When this call succeeds the caller *must* call + PVRSRVDevicePostClockSpeedChange() to end the hold off period + to allow new work to be submitted to the GPU. + + Called form system layer or OS layer implementation that + is responsible for triggering a GPU DVFS transition. + +@Input psDeviceNode pointer to the device affected by DVFS transition. +@Input bIdleDevice when True, the driver will wait for the GPU to + reach an idle state before the call returns. +@Input pvInfo unused + +@Return PVRSRV_OK on success, power lock acquired and held on exit, + GPU idle. + PVRSRV_ERROR on failure, power lock not held on exit, do not + call PVRSRVDevicePostClockSpeedChange(). +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void* pvInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_POWER_DEV *psPowerDevice; + IMG_UINT64 ui64StartTimer, ui64StopTimer; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); + + /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */ + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice) + { + PVRSRV_DEV_POWER_STATE eCurrentPowerState = + OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) + { + /* We can change the clock speed if the device is either IDLE or OFF */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failed (%s)", + __func__, PVRSRVGetErrorString(eError))); + if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + PVRSRVPowerUnlock(psDeviceNode); + } + return eError; + } + } + + eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie, + eCurrentPowerState); + } + + ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); + + InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer); + + return eError; +} + +/**************************************************************************/ /*! +@Function PVRSRVDevicePostClockSpeedChange + +@Description This function is called after a voltage/frequency change has + been made to the GPU HW following a call to + PVRSRVDevicePreClockSpeedChange(). + Before calling this function the caller must ensure the system + data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has + been updated with the new frequency set, measured in Hz. + The function informs the host driver that the DVFS change has + completed. The driver will end the work hold off period, cancel + the device idle period and update its time data records. + When this call returns work submissions are unblocked and + are submitted to the GPU as normal. + This function *must* not be called if the preceding call to + PVRSRVDevicePreClockSpeedChange() failed. + + Called form system layer or OS layer implementation that + is responsible for triggering a GPU DVFS transition. + +@Input psDeviceNode pointer to the device affected by DVFS transition. +@Input bIdleDevice when True, the driver will cancel the GPU + device idle state before the call returns. Value + given must match that used in the call to + PVRSRVDevicePreClockSpeedChange() otherwise + undefined behaviour will result. +@Input pvInfo unused + +@Return void power lock released, no longer held on exit. +*/ /**************************************************************************/ +void +PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void* pvInfo) +{ + PVRSRV_ERROR eError; + PVRSRV_POWER_DEV *psPowerDevice; + IMG_UINT64 ui64StartTimer, ui64StopTimer; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice) + { + PVRSRV_DEV_POWER_STATE eCurrentPowerState = + OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie, + eCurrentPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + } + + if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) + { + eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); + PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM"); + } + } + + /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */ + PVRSRVPowerUnlock(psDeviceNode); + + OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges); + + ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); + + InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer); +} + +/*! +****************************************************************************** + +@Function PVRSRVDeviceGPUUnitsPowerChange +@Description Request from system layer for changing power state of GPU + units +@Input psDeviceNode RGX Device Node. +@Input ui32NewValue Value indicating the new power state + of GPU units. how this is interpreted + depends upon the device-specific + function subsequently called by the + server via a pfn. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 ui32NewValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_POWER_DEV *psPowerDevice; + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice) + { + PVRSRV_DEV_POWER_STATE eDevicePowerState; + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + eDevicePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) + { + /* Device must be idle to change GPU unit(s) power state */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)", + __func__, PVRSRVGetErrorString(eError))); + if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + goto ErrorExit; + } + goto ErrorUnlockAndExit; + } + } + + if (psPowerDevice->pfnGPUUnitsPowerChange != NULL) + { + PVRSRV_ERROR eError2 = psPowerDevice->pfnGPUUnitsPowerChange(psPowerDevice->hDevCookie, ui32NewValue); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", + __func__, psDeviceNode, + PVRSRVGetErrorString(eError))); + } + } + + if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) + { + eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM", ErrorUnlockAndExit); + } + + PVRSRVPowerUnlock(psDeviceNode); + } + + return eError; + +ErrorUnlockAndExit: + PVRSRVPowerUnlock(psDeviceNode); +ErrorExit: + return eError; +} + +/****************************************************************************** + End of file (power.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/process_stats.c b/drivers/mcst/gpu-imgtec/services/server/common/process_stats.c new file mode 100644 index 000000000000..60e02d3251ff --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/process_stats.c @@ -0,0 +1,3452 @@ +/*************************************************************************/ /*! +@File +@Title Process based statistics +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Manages a collection of statistics based around a process + and referenced via OS agnostic methods. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "pvr_debug.h" +#include "lock.h" +#include "allocmem.h" +#include "osfunc.h" +#include "lists.h" +#include "process_stats.h" +#include "ri_server.h" +#include "hash.h" +#include "connection_server.h" +#include "pvrsrv.h" +#include "proc_stats.h" +#include "htbuffer.h" +#include "pvr_ricommon.h" +#include "di_server.h" +#if defined(__linux__) +#include "trace_events.h" +#endif + +/* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */ +#if defined(LINUX) && ( \ + defined(PVRSRV_ENABLE_PERPID_STATS) || \ + defined(PVRSRV_ENABLE_CACHEOP_STATS) || \ + defined(PVRSRV_ENABLE_MEMORY_STATS) || \ + defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ) +#define ENABLE_DEBUGFS_PIDS +#endif + +/* Enable GPU memory accounting tracepoint */ +#if defined(__linux__) && ( \ + defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) ) +#define ENABLE_GPU_MEM_TRACEPOINT +#endif + +/* + * Maximum history of process statistics that will be kept. + */ +#define MAX_DEAD_LIST_PROCESSES (10) + +/* + * Definition of all the strings used to format process based statistics. + */ + +#if defined(PVRSRV_ENABLE_PERPID_STATS) +/* Array of Process stat type defined using the X-Macro */ +#define X(stat_type, stat_str) stat_str, +const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY }; +#undef X +#endif + +/* Array of Driver stat type defined using the X-Macro */ +#define X(stat_type, stat_str) stat_str, +const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY }; +#undef X + +/* structure used in hash table to track statistic entries */ +typedef struct { + size_t uiSizeInBytes; + IMG_PID uiPid; +} _PVR_STATS_TRACKING_HASH_ENTRY; + +/* Function used internally to decrement tracked per-process statistic entries */ +static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, + PVRSRV_MEM_ALLOC_TYPE eAllocType); + +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) +int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); +#endif +int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); +int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/* Note: all of the accesses to the global stats should be protected + * by the gsGlobalStats.hGlobalStatsLock lock. This means all of the + * invocations of macros *_GLOBAL_STAT_VALUE. */ + +/* Macros for fetching stat values */ +#define GET_STAT_VALUE(ptr,var) (ptr)->i32StatValue[(var)] +#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui32StatValue[idx] + +#define GET_GPUMEM_GLOBAL_STAT_VALUE() \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT) + +#define GET_GPUMEM_PERPID_STAT_VALUE(ptr) \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT) +/* + * Macros for updating stat values. + */ +#define UPDATE_MAX_VALUE(a,b) do { if ((b) > (a)) {(a) = (b);} } while (0) +#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while (0) +#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui32StatValue[(idx)] += (val); if ((var).ui32StatValue[(idx)] > (var).ui32StatValue[(idx##_MAX)]) {(var).ui32StatValue[(idx##_MAX)] = (var).ui32StatValue[(idx)];} } while (0) +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) +/* Allow stats to go negative */ +#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] -= (val); } while (0) +#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui32StatValue[(idx)] -= (val); } while (0) +#else +#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while (0) +#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { if ((var).ui32StatValue[(idx)] >= (val)) { (var).ui32StatValue[(idx)] -= (val); } else { (var).ui32StatValue[(idx)] = 0; } } while (0) +#endif +#define MAX_CACHEOP_STAT 16 +#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1)) +#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1)) + +/* + * Structures for holding statistics... + */ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +typedef struct _PVRSRV_MEM_ALLOC_REC_ +{ + PVRSRV_MEM_ALLOC_TYPE eAllocType; + IMG_UINT64 ui64Key; + void* pvCpuVAddr; + IMG_CPU_PHYADDR sCpuPAddr; + size_t uiBytes; + void* pvPrivateData; +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + void* pvAllocdFromFile; + IMG_UINT32 ui32AllocdFromLine; +#endif + IMG_PID pid; + struct _PVRSRV_MEM_ALLOC_REC_* psNext; + struct _PVRSRV_MEM_ALLOC_REC_** ppsThis; +} PVRSRV_MEM_ALLOC_REC; +#endif + +typedef struct _PVRSRV_PROCESS_STATS_ { + + /* Linked list pointers */ + struct _PVRSRV_PROCESS_STATS_* psNext; + struct _PVRSRV_PROCESS_STATS_* psPrev; + + /* Create per process lock that need to be held + * to edit of its members */ + POS_LOCK hLock; + + /* OS level process ID */ + IMG_PID pid; + IMG_UINT32 ui32RefCount; + + /* Stats... */ + IMG_INT32 i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT]; + IMG_UINT32 ui32StatAllocFlags; + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + struct _CACHEOP_STRUCT_ { + PVRSRV_CACHE_OP uiCacheOp; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + RGXFWIF_DM eFenceOpType; +#endif + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT64 ui64ExecuteTime; + IMG_BOOL bUserModeFlush; + IMG_UINT32 ui32OpSeqNum; + IMG_BOOL bIsFence; + IMG_PID ownerPid; + } asCacheOp[MAX_CACHEOP_STAT]; + IMG_INT32 uiCacheOpWriteIndex; +#endif + + /* Other statistics structures */ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRV_MEM_ALLOC_REC* psMemoryRecords; +#endif +} PVRSRV_PROCESS_STATS; + +#if defined(ENABLE_DEBUGFS_PIDS) + +typedef struct _PVRSRV_OS_STAT_ENTRY_ +{ + DI_GROUP *psStatsDIGroup; + DI_ENTRY *psProcessStatsDIEntry; + DI_ENTRY *psMemStatsDIEntry; + DI_ENTRY *psRIMemStatsDIEntry; + DI_ENTRY *psCacheOpStatsDIEntry; +} PVRSRV_OS_STAT_ENTRY; + +static PVRSRV_OS_STAT_ENTRY gsLiveStatEntries; +static PVRSRV_OS_STAT_ENTRY gsRetiredStatEntries; + +int GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData); +int GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/* + * Functions for printing the information stored... + */ +#if defined(PVRSRV_ENABLE_PERPID_STATS) +void ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +void MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) +void CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +typedef void (PVRSRV_STATS_PRINT_ELEMENTS)(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); + +typedef enum +{ + PVRSRV_STAT_TYPE_PROCESS, + PVRSRV_STAT_TYPE_MEMORY, + PVRSRV_STAT_TYPE_RIMEMORY, + PVRSRV_STAT_TYPE_CACHEOP, + PVRSRV_STAT_TYPE_LAST +} PVRSRV_STAT_TYPE; + +#define SEPARATOR_STR_LEN 166 + +typedef struct _PVRSRV_STAT_PV_DATA_ { + + PVRSRV_STAT_TYPE eStatType; + PVRSRV_STATS_PRINT_ELEMENTS* pfnStatsPrintElements; + IMG_CHAR szLiveStatsHeaderStr[SEPARATOR_STR_LEN + 1]; + IMG_CHAR szRetiredStatsHeaderStr[SEPARATOR_STR_LEN + 1]; + +} PVRSRV_STAT_PV_DATA; + +static PVRSRV_STAT_PV_DATA g_StatPvDataArr[] = { + { PVRSRV_STAT_TYPE_PROCESS, NULL, " Process" , " Process" }, + { PVRSRV_STAT_TYPE_MEMORY, NULL, " Memory Allocation" , " Memory Allocation" }, + { PVRSRV_STAT_TYPE_RIMEMORY, NULL, " Resource Allocation" , " Resource Allocation" }, + { PVRSRV_STAT_TYPE_CACHEOP, NULL, " Cache Maintenance Ops" , " Cache Maintenance Ops" } + }; + +#define GET_STAT_ENTRY_ID(STAT_TYPE) &g_StatPvDataArr[(STAT_TYPE)] + +/* Generic header strings */ +static const IMG_CHAR g_szLiveHeaderStr[] = " Statistics for LIVE Processes "; +static const IMG_CHAR g_szRetiredHeaderStr[] = " Statistics for RETIRED Processes "; + +/* Separator string used for separating stats for different PIDs */ +static IMG_CHAR g_szSeparatorStr[SEPARATOR_STR_LEN + 1] = ""; + +static inline void +_prepareStatsHeaderString(IMG_CHAR *pszStatsSpecificStr, const IMG_CHAR* pszGenericHeaderStr) +{ + IMG_UINT32 ui32NumSeparators; + IMG_CHAR szStatsHeaderFooterStr[75]; + + /* Prepare text content of the header in a local string */ + OSStringLCopy(szStatsHeaderFooterStr, pszStatsSpecificStr, ARRAY_SIZE(szStatsHeaderFooterStr)); + OSStringLCat(szStatsHeaderFooterStr, pszGenericHeaderStr, ARRAY_SIZE(szStatsHeaderFooterStr)); + + /* Write all '-' characters to the header string */ + memset(pszStatsSpecificStr, '-', SEPARATOR_STR_LEN); + pszStatsSpecificStr[SEPARATOR_STR_LEN] = '\0'; + + /* Find the spot for text content in the header string */ + ui32NumSeparators = (SEPARATOR_STR_LEN - OSStringLength(szStatsHeaderFooterStr)) >> 1; + + /* Finally write the text content */ + OSSNPrintf(pszStatsSpecificStr + ui32NumSeparators, + OSStringLength(szStatsHeaderFooterStr), + "%s", szStatsHeaderFooterStr); + + /* Overwrite the '\0' character added by OSSNPrintf() */ + if (OSStringLength(szStatsHeaderFooterStr) > 0) + { + pszStatsSpecificStr[ui32NumSeparators + OSStringLength(szStatsHeaderFooterStr) - 1] = ' '; + } +} + +static inline void +_prepareSeparatorStrings(void) +{ + IMG_UINT32 i; + + /* Prepare header strings for each stat type */ + for (i = 0; i < PVRSRV_STAT_TYPE_LAST; ++i) + { + _prepareStatsHeaderString(g_StatPvDataArr[i].szLiveStatsHeaderStr, g_szLiveHeaderStr); + _prepareStatsHeaderString(g_StatPvDataArr[i].szRetiredStatsHeaderStr, g_szRetiredHeaderStr); + } + + /* Prepare separator string to separate stats for different PIDs */ + memset(g_szSeparatorStr, '-', SEPARATOR_STR_LEN); + g_szSeparatorStr[SEPARATOR_STR_LEN] = '\0'; +} + +static inline void +_prepareStatsPrivateData(void) +{ +#if defined(PVRSRV_ENABLE_PERPID_STATS) + g_StatPvDataArr[PVRSRV_STAT_TYPE_PROCESS].pfnStatsPrintElements = ProcessStatsPrintElements; +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + g_StatPvDataArr[PVRSRV_STAT_TYPE_MEMORY].pfnStatsPrintElements = MemStatsPrintElements; +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + g_StatPvDataArr[PVRSRV_STAT_TYPE_RIMEMORY].pfnStatsPrintElements = RIMemStatsPrintElements; +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + g_StatPvDataArr[PVRSRV_STAT_TYPE_CACHEOP].pfnStatsPrintElements = CacheOpStatsPrintElements; +#endif + + _prepareSeparatorStrings(); +} + +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC) +static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC) +#endif + +/* + * Global Boolean to flag when the statistics are ready to monitor + * memory allocations. + */ +static IMG_BOOL bProcessStatsInitialised = IMG_FALSE; + +/* + * Linked lists for process stats. Live stats are for processes which are still running + * and the dead list holds those that have exited. + */ +static PVRSRV_PROCESS_STATS *g_psLiveList; +static PVRSRV_PROCESS_STATS *g_psDeadList; + +static POS_LOCK g_psLinkedListLock; +/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type. + * This allows it to group all such instances of the same lock type under one class + * The consequence of this is that, if lock acquisition is nested on different instances, it generates + * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition. + * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */ +#define PROCESS_LOCK_SUBCLASS_CURRENT 1 +#define PROCESS_LOCK_SUBCLASS_PREV 2 +#define PROCESS_LOCK_SUBCLASS_NEXT 3 +#if defined(ENABLE_DEBUGFS_PIDS) +/* + * Pointer to OS folder to hold PID folders. + */ +static DI_GROUP *psProcStatsDIGroup; +#endif +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) +static DI_ENTRY *psProcStatsDIEntry; +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +/* Global driver PID stats registration handle */ +static IMG_HANDLE g_hDriverProcessStats; +#endif + +/* Global driver-data folders */ +typedef struct _GLOBAL_STATS_ +{ + IMG_UINT32 ui32StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT]; + POS_LOCK hGlobalStatsLock; +} GLOBAL_STATS; + +static DI_ENTRY *psGlobalMemDIEntry; +static GLOBAL_STATS gsGlobalStats; + +#define HASH_INITIAL_SIZE 5 +/* A hash table used to store the size of any vmalloc'd allocation + * against its address (not needed for kmallocs as we can use ksize()) */ +static HASH_TABLE* gpsSizeTrackingHashTable; +static POS_LOCK gpsSizeTrackingHashTableLock; + +static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid); + +static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats); +static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats); +static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats); + +static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats); + +static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, + PVRSRV_PROCESS_STATS* psProcessStats, + IMG_UINT32 uiBytes); +/* + * Power statistics related definitions + */ + +/* For the mean time, use an exponentially weighted moving average with a + * 1/4 weighting for the new measurement. + */ +#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) ) + +#define UPDATE_TIME(time, newtime) \ + ((time) > 0 ? MEAN_TIME((time), (newtime)) : (newtime)) + +/* Enum to be used as input to GET_POWER_STAT_INDEX */ +typedef enum +{ + DEVICE = 0, + SYSTEM = 1, + POST_POWER = 0, + PRE_POWER = 2, + POWER_OFF = 0, + POWER_ON = 4, + NOT_FORCED = 0, + FORCED = 8, +} PVRSRV_POWER_STAT_TYPE; + +/* Macro used to access one of the power timing statistics inside an array */ +#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \ + ((forced) + (powon) + (prepow) + (system)) + +/* For the power timing stats we need 16 variables to store all the + * combinations of forced/not forced, power-on/power-off, pre-power/post-power + * and device/system statistics + */ +#define NUM_POWER_STATS (16) +static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS]; + +static DI_ENTRY *psPowerStatsDIEntry; + +typedef struct _EXTRA_POWER_STATS_ +{ + IMG_UINT64 ui64PreClockSpeedChangeDuration; + IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration; + IMG_UINT64 ui64PostClockSpeedChangeDuration; +} EXTRA_POWER_STATS; + +#define NUM_EXTRA_POWER_STATS 10 + +static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS]; +static IMG_UINT32 ui32ClockSpeedIndexStart, ui32ClockSpeedIndexEnd; + + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, + IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, + IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) +{ + IMG_UINT32 *pui32Stat; + IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime; + IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime; + IMG_UINT32 ui32Index; + + if (bPrePower) + { + HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff); + } + else + { + HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff, ui64DeviceDiff); + } + + ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, + bPowerOn ? POWER_ON : POWER_OFF, + bPrePower ? PRE_POWER : POST_POWER, + DEVICE); + pui32Stat = &aui32PowerTimingStats[ui32Index]; + *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff); + + ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, + bPowerOn ? POWER_ON : POWER_OFF, + bPrePower ? PRE_POWER : POST_POWER, + SYSTEM); + pui32Stat = &aui32PowerTimingStats[ui32Index]; + *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff); +} + +static IMG_UINT64 ui64PreClockSpeedChangeMark; + +void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) +{ + asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer; + + ui64PreClockSpeedChangeMark = OSClockus(); +} + +void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) +{ + IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark; + + PVR_ASSERT(ui64PreClockSpeedChangeMark > 0); + + asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration; + asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer; + + ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS; + + if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart) + { + ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS; + } + + ui64PreClockSpeedChangeMark = 0; +} +#endif + +/*************************************************************************/ /*! +@Function _FindProcessStatsInLiveList +@Description Searches the Live Process List for a statistics structure that + matches the PID given. +@Input pid Process to search for. +@Return Pointer to stats structure for the process. +*/ /**************************************************************************/ +static PVRSRV_PROCESS_STATS* +_FindProcessStatsInLiveList(IMG_PID pid) +{ + PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + if (psProcessStats->pid == pid) + { + return psProcessStats; + } + + psProcessStats = psProcessStats->psNext; + } + + return NULL; +} /* _FindProcessStatsInLiveList */ + +/*************************************************************************/ /*! +@Function _FindProcessStatsInDeadList +@Description Searches the Dead Process List for a statistics structure that + matches the PID given. +@Input pid Process to search for. +@Return Pointer to stats structure for the process. +*/ /**************************************************************************/ +static PVRSRV_PROCESS_STATS* +_FindProcessStatsInDeadList(IMG_PID pid) +{ + PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; + + while (psProcessStats != NULL) + { + if (psProcessStats->pid == pid) + { + return psProcessStats; + } + + psProcessStats = psProcessStats->psNext; + } + + return NULL; +} /* _FindProcessStatsInDeadList */ + +/*************************************************************************/ /*! +@Function _FindProcessStats +@Description Searches the Live and Dead Process Lists for a statistics + structure that matches the PID given. +@Input pid Process to search for. +@Return Pointer to stats structure for the process. +*/ /**************************************************************************/ +static PVRSRV_PROCESS_STATS* +_FindProcessStats(IMG_PID pid) +{ + PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid); + + if (psProcessStats == NULL) + { + psProcessStats = _FindProcessStatsInDeadList(pid); + } + + return psProcessStats; +} /* _FindProcessStats */ + +/*************************************************************************/ /*! +@Function _CompressMemoryUsage +@Description Reduces memory usage by deleting old statistics data. + This function requires that the list lock is not held! +*/ /**************************************************************************/ +static void +_CompressMemoryUsage(void) +{ + PVRSRV_PROCESS_STATS* psProcessStats; + PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed; + IMG_UINT32 ui32ItemsRemaining; + + /* + * We hold the lock whilst checking the list, but we'll release it + * before freeing memory (as that will require the lock too)! + */ + OSLockAcquire(g_psLinkedListLock); + + /* Check that the dead list is not bigger than the max size... */ + psProcessStats = g_psDeadList; + psProcessStatsToBeFreed = NULL; + ui32ItemsRemaining = MAX_DEAD_LIST_PROCESSES; + + while (psProcessStats != NULL && ui32ItemsRemaining > 0) + { + ui32ItemsRemaining--; + if (ui32ItemsRemaining == 0) + { + /* This is the last allowed process, cut the linked list here! */ + psProcessStatsToBeFreed = psProcessStats->psNext; + psProcessStats->psNext = NULL; + } + else + { + psProcessStats = psProcessStats->psNext; + } + } + + OSLockRelease(g_psLinkedListLock); + + /* Any processes stats remaining will need to be destroyed... */ + while (psProcessStatsToBeFreed != NULL) + { + PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext; + + psProcessStatsToBeFreed->psNext = NULL; + _DestroyProcessStat(psProcessStatsToBeFreed); + psProcessStatsToBeFreed = psNextProcessStats; + } +} /* _CompressMemoryUsage */ + +/* These functions move the process stats from the live to the dead list. + * _MoveProcessToDeadList moves the entry in the global lists and + * it needs to be protected by g_psLinkedListLock. + * _MoveProcessToDeadList performs the OS calls and it + * shouldn't be used under g_psLinkedListLock because this could generate a + * lockdep warning. */ +static void +_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + /* Take the element out of the live list and append to the dead list... */ + _RemoveProcessStatsFromList(psProcessStats); + _AddProcessStatsToFrontOfDeadList(psProcessStats); +} /* _MoveProcessToDeadList */ + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) +/* These functions move the process stats from the dead to the live list. + * _MoveProcessToLiveList moves the entry in the global lists and + * it needs to be protected by g_psLinkedListLock. + * _MoveProcessToLiveList performs the OS calls and it + * shouldn't be used under g_psLinkedListLock because this could generate a + * lockdep warning. */ +static void +_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + /* Take the element out of the live list and append to the dead list... */ + _RemoveProcessStatsFromList(psProcessStats); + _AddProcessStatsToFrontOfLiveList(psProcessStats); +} /* _MoveProcessToLiveList */ +#endif + +/*************************************************************************/ /*! +@Function _AddProcessStatsToFrontOfLiveList +@Description Add a statistic to the live list head. +@Input psProcessStats Process stats to add. +*/ /**************************************************************************/ +static void +_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + /* This function should always be called under global list lock g_psLinkedListLock. + */ + PVR_ASSERT(psProcessStats != NULL); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + if (g_psLiveList != NULL) + { + PVR_ASSERT(psProcessStats != g_psLiveList); + OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psLiveList->psPrev = psProcessStats; + OSLockRelease(g_psLiveList->hLock); + psProcessStats->psNext = g_psLiveList; + } + + g_psLiveList = psProcessStats; + + OSLockRelease(psProcessStats->hLock); +} /* _AddProcessStatsToFrontOfLiveList */ + +/*************************************************************************/ /*! +@Function _AddProcessStatsToFrontOfDeadList +@Description Add a statistic to the dead list head. +@Input psProcessStats Process stats to add. +*/ /**************************************************************************/ +static void +_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + PVR_ASSERT(psProcessStats != NULL); + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + if (g_psDeadList != NULL) + { + PVR_ASSERT(psProcessStats != g_psDeadList); + OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psDeadList->psPrev = psProcessStats; + OSLockRelease(g_psDeadList->hLock); + psProcessStats->psNext = g_psDeadList; + } + + g_psDeadList = psProcessStats; + + OSLockRelease(psProcessStats->hLock); +} /* _AddProcessStatsToFrontOfDeadList */ + +/*************************************************************************/ /*! +@Function _RemoveProcessStatsFromList +@Description Detaches a process from either the live or dead list. +@Input psProcessStats Process stats to remove. +*/ /**************************************************************************/ +static void +_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + PVR_ASSERT(psProcessStats != NULL); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Remove the item from the linked lists... */ + if (g_psLiveList == psProcessStats) + { + g_psLiveList = psProcessStats->psNext; + + if (g_psLiveList != NULL) + { + PVR_ASSERT(psProcessStats != g_psLiveList); + OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psLiveList->psPrev = NULL; + OSLockRelease(g_psLiveList->hLock); + + } + } + else if (g_psDeadList == psProcessStats) + { + g_psDeadList = psProcessStats->psNext; + + if (g_psDeadList != NULL) + { + PVR_ASSERT(psProcessStats != g_psDeadList); + OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psDeadList->psPrev = NULL; + OSLockRelease(g_psDeadList->hLock); + } + } + else + { + PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext; + PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev; + + if (psProcessStats->psNext != NULL) + { + PVR_ASSERT(psProcessStats != psNext); + OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT); + psProcessStats->psNext->psPrev = psPrev; + OSLockRelease(psNext->hLock); + } + if (psProcessStats->psPrev != NULL) + { + PVR_ASSERT(psProcessStats != psPrev); + OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV); + psProcessStats->psPrev->psNext = psNext; + OSLockRelease(psPrev->hLock); + } + } + + + /* Reset the pointers in this cell, as it is not attached to anything */ + psProcessStats->psNext = NULL; + psProcessStats->psPrev = NULL; + + OSLockRelease(psProcessStats->hLock); + +} /* _RemoveProcessStatsFromList */ + +static PVRSRV_ERROR +_AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid) +{ + PVRSRV_ERROR eError; + PVRSRV_PROCESS_STATS *psProcessStats; + + psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS)); + PVR_RETURN_IF_NOMEM(psProcessStats); + + psProcessStats->pid = ownerPid; + psProcessStats->ui32RefCount = 1; + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1; + + eError = OSLockCreateNoStats(&psProcessStats->hLock); + PVR_GOTO_IF_ERROR(eError, e0); + + *ppsProcessStats = psProcessStats; + return PVRSRV_OK; + +e0: + OSFreeMemNoStats(psProcessStats); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +/*************************************************************************/ /*! +@Function _DestroyProcessStat +@Description Frees memory and resources held by a process statistic. +@Input psProcessStats Process stats to destroy. +*/ /**************************************************************************/ +static void +_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats) +{ + PVR_ASSERT(psProcessStats != NULL); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Free the memory statistics... */ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + while (psProcessStats->psMemoryRecords) + { + List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryRecords); + } +#endif + OSLockRelease(psProcessStats->hLock); + + /*Destroy the lock */ + OSLockDestroyNoStats(psProcessStats->hLock); + + /* Free the memory... */ + OSFreeMemNoStats(psProcessStats); +} /* _DestroyProcessStat */ + +#if defined(ENABLE_DEBUGFS_PIDS) +static inline void +_createStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries, + DI_PFN_SHOW pfnStatsShow) +{ + PVRSRV_ERROR eError; + DI_ITERATOR_CB sIterator = {.pfnShow = pfnStatsShow}; + +#if defined(PVRSRV_ENABLE_PERPID_STATS) + eError = DICreateEntry("process_stats", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_PROCESS), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psProcessStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (1)"); +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + eError = DICreateEntry("cache_ops_exec", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_CACHEOP), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psCacheOpStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (2)"); +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + eError = DICreateEntry("mem_area", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_MEMORY), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psMemStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (3)"); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = DICreateEntry("gpu_mem_area", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_RIMEMORY), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psRIMemStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (4)"); +#endif +} + +static inline void +_createStatisticsEntries(void) +{ + PVRSRV_ERROR eError; + + eError = DICreateGroup("proc_stats", NULL, &psProcStatsDIGroup); + PVR_LOG_IF_ERROR(eError, "DICreateGroup (1)"); + eError = DICreateGroup("live_pids_stats", psProcStatsDIGroup, + &gsLiveStatEntries.psStatsDIGroup); + PVR_LOG_IF_ERROR(eError, "DICreateGroup (2)"); + eError = DICreateGroup("retired_pids_stats", psProcStatsDIGroup, + &gsRetiredStatEntries.psStatsDIGroup); + PVR_LOG_IF_ERROR(eError, "DICreateGroup (3)"); + + _createStatsFiles(&gsLiveStatEntries, GenericStatsPrintElementsLive); + _createStatsFiles(&gsRetiredStatEntries, GenericStatsPrintElementsRetired); + + _prepareStatsPrivateData(); +} + +static inline void +_removeStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries) +{ +#if defined(PVRSRV_ENABLE_PERPID_STATS) + DIDestroyEntry(psStatsEntries->psProcessStatsDIEntry); + psStatsEntries->psProcessStatsDIEntry = NULL; +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + DIDestroyEntry(psStatsEntries->psCacheOpStatsDIEntry); + psStatsEntries->psCacheOpStatsDIEntry = NULL; +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + DIDestroyEntry(psStatsEntries->psMemStatsDIEntry); + psStatsEntries->psMemStatsDIEntry = NULL; +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + DIDestroyEntry(psStatsEntries->psRIMemStatsDIEntry); + psStatsEntries->psRIMemStatsDIEntry = NULL; +#endif +} + +static inline void +_removeStatisticsEntries(void) +{ + _removeStatsFiles(&gsLiveStatEntries); + _removeStatsFiles(&gsRetiredStatEntries); + + DIDestroyGroup(gsLiveStatEntries.psStatsDIGroup); + gsLiveStatEntries.psStatsDIGroup = NULL; + DIDestroyGroup(gsRetiredStatEntries.psStatsDIGroup); + gsRetiredStatEntries.psStatsDIGroup = NULL; + DIDestroyGroup(psProcStatsDIGroup); + psProcStatsDIGroup = NULL; +} +#endif + +/*************************************************************************/ /*! +@Function PVRSRVStatsInitialise +@Description Entry point for initialising the statistics module. +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVStatsInitialise(void) +{ + PVRSRV_ERROR error; + + PVR_ASSERT(g_psLiveList == NULL); + PVR_ASSERT(g_psDeadList == NULL); + PVR_ASSERT(g_psLinkedListLock == NULL); + PVR_ASSERT(gpsSizeTrackingHashTable == NULL); + PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE); + + /* We need a lock to protect the linked lists... */ + error = OSLockCreate(&g_psLinkedListLock); + if (error == PVRSRV_OK) + { + /* We also need a lock to protect the hash table used for size tracking.. */ + error = OSLockCreate(&gpsSizeTrackingHashTableLock); + + PVR_GOTO_IF_ERROR(error, e0); + + /* We also need a lock to protect the GlobalStat counters */ + error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock); + PVR_GOTO_IF_ERROR(error, e1); + + /* Flag that we are ready to start monitoring memory allocations. */ + + gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE); + + OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges)); + + bProcessStatsInitialised = IMG_TRUE; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Register our 'system' PID to hold driver-wide alloc stats */ + _RegisterProcess(&g_hDriverProcessStats, PVR_SYS_ALLOC_PID); +#endif + } + return error; +e1: + OSLockDestroy(gpsSizeTrackingHashTableLock); + gpsSizeTrackingHashTableLock = NULL; +e0: + OSLockDestroy(g_psLinkedListLock); + g_psLinkedListLock = NULL; + return error; + +} /* PVRSRVStatsInitialise */ + +PVRSRV_ERROR +PVRSRVStatsInitialiseDI(void) +{ + PVRSRV_ERROR error; + +#if defined(ENABLE_DEBUGFS_PIDS) + _createStatisticsEntries(); +#endif + +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) + { + DI_ITERATOR_CB sIterator = {.pfnShow = RawProcessStatsPrintElements}; + error = DICreateEntry("memtrack_stats", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &psProcStatsDIEntry); + PVR_LOG_IF_ERROR(error, "DICreateEntry (1)"); + } +#endif + + { + DI_ITERATOR_CB sIterator = {.pfnShow = PowerStatsPrintElements}; + /* Create power stats entry... */ + error = DICreateEntry("power_timing_stats", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &psPowerStatsDIEntry); + PVR_LOG_IF_ERROR(error, "DICreateEntry (2)"); + } + + { + DI_ITERATOR_CB sIterator = {.pfnShow = GlobalStatsPrintElements}; + error = DICreateEntry("driver_stats", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &psGlobalMemDIEntry); + PVR_LOG_IF_ERROR(error, "DICreateEntry (3)"); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v) +{ +#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) + _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)(uintptr_t)v; + IMG_UINT64 uiCpuVAddr = (IMG_UINT64)k; + + PVR_DPF((PVR_DBG_ERROR, "%s: " IMG_SIZE_FMTSPEC " bytes @ 0x%" IMG_UINT64_FMTSPECx " (PID %u)", __func__, + psNewTrackingHashEntry->uiSizeInBytes, + uiCpuVAddr, + psNewTrackingHashEntry->uiPid)); +#endif + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function PVRSRVStatsDestroy +@Description Method for destroying the statistics module data. +*/ /**************************************************************************/ +void +PVRSRVStatsDestroy(void) +{ + PVR_ASSERT(bProcessStatsInitialised); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Deregister our 'system' PID which holds driver-wide alloc stats */ + PVRSRVStatsDeregisterProcess(g_hDriverProcessStats); +#endif + + /* Stop monitoring memory allocations... */ + bProcessStatsInitialised = IMG_FALSE; + + /* Destroy the locks... */ + if (g_psLinkedListLock != NULL) + { + OSLockDestroy(g_psLinkedListLock); + g_psLinkedListLock = NULL; + } + + /* Free the live and dead lists... */ + while (g_psLiveList != NULL) + { + PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; + _RemoveProcessStatsFromList(psProcessStats); + _DestroyProcessStat(psProcessStats); + } + + while (g_psDeadList != NULL) + { + PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; + _RemoveProcessStatsFromList(psProcessStats); + _DestroyProcessStat(psProcessStats); + } + + if (gpsSizeTrackingHashTable != NULL) + { + /* Dump all remaining entries in HASH table (list any remaining vmallocs) */ + HASH_Iterate(gpsSizeTrackingHashTable, (HASH_pfnCallback)_DumpAllVMallocEntries); + HASH_Delete(gpsSizeTrackingHashTable); + } + if (gpsSizeTrackingHashTableLock != NULL) + { + OSLockDestroy(gpsSizeTrackingHashTableLock); + gpsSizeTrackingHashTableLock = NULL; + } + + if (NULL != gsGlobalStats.hGlobalStatsLock) + { + OSLockDestroy(gsGlobalStats.hGlobalStatsLock); + gsGlobalStats.hGlobalStatsLock = NULL; + } + +} /* PVRSRVStatsDestroy */ + +void +PVRSRVStatsDestroyDI(void) +{ +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) + if (psProcStatsDIEntry != NULL) + { + DIDestroyEntry(psProcStatsDIEntry); + psProcStatsDIEntry = NULL; + } +#endif + + /* Destroy the power stats entry... */ + if (psPowerStatsDIEntry!=NULL) + { + DIDestroyEntry(psPowerStatsDIEntry); + psPowerStatsDIEntry = NULL; + } + + /* Destroy the global data entry */ + if (psGlobalMemDIEntry!=NULL) + { + DIDestroyEntry(psGlobalMemDIEntry); + psGlobalMemDIEntry = NULL; + } + +#if defined(ENABLE_DEBUGFS_PIDS) + _removeStatisticsEntries(); +#endif +} + +static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes) +{ +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); +#endif + + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); + break; + + default: + PVR_ASSERT(0); + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + { + IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemGlobal(0, ui64Size); + } + } +#endif + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); +} + +static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes) +{ +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); +#endif + + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); + break; + + default: + PVR_ASSERT(0); + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + { + IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemGlobal(0, ui64Size); + } + } +#endif + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); +} + +static PVRSRV_ERROR +_RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid) +{ + PVRSRV_PROCESS_STATS* psProcessStats=NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(phProcessStats != NULL); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Register process PID %d [%s]", + __func__, ownerPid, (ownerPid == PVR_SYS_ALLOC_PID) + ? "system" : OSGetCurrentClientProcessNameKM())); + + /* Check the PID has not already moved to the dead list... */ + OSLockAcquire(g_psLinkedListLock); + psProcessStats = _FindProcessStatsInDeadList(ownerPid); + if (psProcessStats != NULL) + { + /* Move it back onto the live list! */ + _RemoveProcessStatsFromList(psProcessStats); + _AddProcessStatsToFrontOfLiveList(psProcessStats); + } + else + { + /* Check the PID is not already registered in the live list... */ + psProcessStats = _FindProcessStatsInLiveList(ownerPid); + } + + /* If the PID is on the live list then just increment the ref count and return... */ + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + psProcessStats->ui32RefCount++; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount; + UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]); + OSLockRelease(psProcessStats->hLock); + OSLockRelease(g_psLinkedListLock); + + *phProcessStats = psProcessStats; + + return PVRSRV_OK; + } + OSLockRelease(g_psLinkedListLock); + + /* Allocate a new node structure and initialise it... */ + eError = _AllocateProcessStats(&psProcessStats, ownerPid); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Add it to the live list... */ + OSLockAcquire(g_psLinkedListLock); + _AddProcessStatsToFrontOfLiveList(psProcessStats); + OSLockRelease(g_psLinkedListLock); + + /* Done */ + *phProcessStats = (IMG_HANDLE) psProcessStats; + + return PVRSRV_OK; + +e0: + *phProcessStats = (IMG_HANDLE) NULL; + return PVRSRV_ERROR_OUT_OF_MEMORY; +} /* _RegisterProcess */ + +/*************************************************************************/ /*! +@Function PVRSRVStatsRegisterProcess +@Description Register a process into the list statistics list. +@Output phProcessStats Handle to the process to be used to deregister. +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats) +{ + return _RegisterProcess(phProcessStats, OSGetCurrentClientProcessIDKM()); +} + +/*************************************************************************/ /*! +@Function PVRSRVStatsDeregisterProcess +@Input hProcessStats Handle to the process returned when registered. +@Description Method for destroying the statistics module data. +*/ /**************************************************************************/ +void +PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats) +{ + PVR_DPF((PVR_DBG_MESSAGE, "%s: Deregister process entered PID %d [%s]", + __func__, OSGetCurrentClientProcessIDKM(), + OSGetCurrentProcessName())); + + if (hProcessStats != (IMG_HANDLE) NULL) + { + PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats; + + /* Lower the reference count, if zero then move it to the dead list */ + OSLockAcquire(g_psLinkedListLock); + if (psProcessStats->ui32RefCount > 0) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->ui32RefCount--; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount; + +#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (psProcessStats->ui32RefCount == 0) + { + OSLockRelease(psProcessStats->hLock); + _MoveProcessToDeadList(psProcessStats); + }else +#endif + { + OSLockRelease(psProcessStats->hLock); + } + } + OSLockRelease(g_psLinkedListLock); + + /* Check if the dead list needs to be reduced */ + _CompressMemoryUsage(); + } +} /* PVRSRVStatsDeregisterProcess */ + +void +PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + void *pvCpuVAddr, + IMG_CPU_PHYADDR sCpuPAddr, + size_t uiBytes, + void *pvPrivateData, + IMG_PID currentPid + DEBUG_MEMSTATS_PARAMS) +{ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_MEM_ALLOC_REC* psRecord = NULL; + PVRSRV_PROCESS_STATS* psProcessStats; + enum { PVRSRV_PROC_NOTFOUND, + PVRSRV_PROC_FOUND, + PVRSRV_PROC_RESURRECTED + } eProcSearch = PVRSRV_PROC_FOUND; + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + /* + * To prevent a recursive loop, we make the memory allocations for our + * memstat records via OSAllocMemNoStats(), which does not try to + * create a memstat record entry. + */ + + /* Allocate the memory record... */ + psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC)); + if (psRecord == NULL) + { + return; + } + + psRecord->eAllocType = eAllocType; + psRecord->pvCpuVAddr = pvCpuVAddr; + psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr; + psRecord->uiBytes = uiBytes; + psRecord->pvPrivateData = pvPrivateData; + + psRecord->pid = currentPid; + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + psRecord->pvAllocdFromFile = pvAllocFromFile; + psRecord->ui32AllocdFromLine = ui32AllocFromLine; +#endif + + _increase_global_stat(eAllocType, uiBytes); + /* Lock while we find the correct process... */ + OSLockAcquire(g_psLinkedListLock); + + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + + if (psProcessStats == NULL) + { + eProcSearch = PVRSRV_PROC_NOTFOUND; + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat increment called for 'unknown' process PID(%d)", + __func__, currentPid)); + + if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) + { + OSLockRelease(g_psLinkedListLock); + goto e0; + } + + /* Add it to the live list... */ + _AddProcessStatsToFrontOfLiveList(psProcessStats); + + OSLockRelease(g_psLinkedListLock); + +#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ + OSLockRelease(g_psLinkedListLock); +#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ + + if (psProcessStats == NULL) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_ERROR, + "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", + __func__, currentPid, OSGetCurrentProcessName(), uiBytes)); +#endif + if (psRecord != NULL) + { + OSFreeMemNoStats(psRecord); + } + return; + } + } + else + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (eProcSearch == PVRSRV_PROC_RESURRECTED) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat incremented on 'dead' process PID(%d)", + __func__, currentPid)); + /* Move process from dead list to live list */ + _MoveProcessToLiveList(psProcessStats); + } +#endif + OSLockRelease(g_psLinkedListLock); + } + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Insert the memory record... */ + if (psRecord != NULL) + { + List_PVRSRV_MEM_ALLOC_REC_Insert(&psProcessStats->psMemoryRecords, psRecord); + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); +#endif + + /* Update the memory watermarks... */ + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + if (psRecord != NULL) + { + psRecord->ui64Key = sCpuPAddr.uiAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + if (psRecord != NULL) + { + psRecord->ui64Key = sCpuPAddr.uiAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + if (psRecord != NULL) + { + psRecord->ui64Key = sCpuPAddr.uiAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); + } + } +#endif + + OSLockRelease(psProcessStats->hLock); + + return; + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) +e0: + OSFreeMemNoStats(psRecord); + return; +#endif +#endif +} /* PVRSRVStatsAddMemAllocRecord */ + +void +PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 ui64Key, + IMG_PID currentPid) +{ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + PVRSRV_MEM_ALLOC_REC* psRecord = NULL; + IMG_BOOL bFound = IMG_FALSE; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + /* Lock while we find the correct process and remove this record... */ + OSLockAcquire(g_psLinkedListLock); + + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + if (psProcessStats != NULL) + { + psRecord = psProcessStats->psMemoryRecords; + while (psRecord != NULL) + { + if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) + { + bFound = IMG_TRUE; + break; + } + + psRecord = psRecord->psNext; + } + } + + /* If not found, we need to do a full search in case it was allocated to a different PID... */ + if (!bFound) + { + PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats; + + /* Search all live lists first... */ + psProcessStats = g_psLiveList; + while (psProcessStats != NULL) + { + if (psProcessStats != psProcessStatsAlreadyChecked) + { + psRecord = psProcessStats->psMemoryRecords; + while (psRecord != NULL) + { + if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) + { + bFound = IMG_TRUE; + break; + } + + psRecord = psRecord->psNext; + } + } + + if (bFound) + { + break; + } + + psProcessStats = psProcessStats->psNext; + } + + /* If not found, then search all dead lists next... */ + if (!bFound) + { + psProcessStats = g_psDeadList; + while (psProcessStats != NULL) + { + if (psProcessStats != psProcessStatsAlreadyChecked) + { + psRecord = psProcessStats->psMemoryRecords; + while (psRecord != NULL) + { + if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) + { + bFound = IMG_TRUE; + break; + } + + psRecord = psRecord->psNext; + } + } + + if (bFound) + { + break; + } + + psProcessStats = psProcessStats->psNext; + } + } + } + + /* Update the watermark and remove this record...*/ + if (bFound) + { + _decrease_global_stat(eAllocType, psRecord->uiBytes); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + _DecreaseProcStatValue(eAllocType, + psProcessStats, + psRecord->uiBytes); + + List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord); + OSLockRelease(psProcessStats->hLock); + OSLockRelease(g_psLinkedListLock); + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* If all stats are now zero, remove the entry for this thread */ + if (psProcessStats->ui32StatAllocFlags == 0) + { + OSLockAcquire(g_psLinkedListLock); + _MoveProcessToDeadList(psProcessStats); + OSLockRelease(g_psLinkedListLock); + + /* Check if the dead list needs to be reduced */ + _CompressMemoryUsage(); + } +#endif + /* + * Free the record outside the lock so we don't deadlock and so we + * reduce the time the lock is held. + */ + OSFreeMemNoStats(psRecord); + } + else + { + OSLockRelease(g_psLinkedListLock); + } + +#else +PVR_UNREFERENCED_PARAMETER(eAllocType); +PVR_UNREFERENCED_PARAMETER(ui64Key); +#endif +} /* PVRSRVStatsRemoveMemAllocRecord */ + +void +PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_UINT64 uiCpuVAddr, + IMG_PID uiPid) +{ + IMG_BOOL bRes = IMG_FALSE; + _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL; + + if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + /* Alloc untracked memory for the new hash table entry */ + psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry)); + if (psNewTrackingHashEntry) + { + /* Fill-in the size of the allocation and PID of the allocating process */ + psNewTrackingHashEntry->uiSizeInBytes = uiBytes; + psNewTrackingHashEntry->uiPid = uiPid; + OSLockAcquire(gpsSizeTrackingHashTableLock); + /* Insert address of the new struct into the hash table */ + bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry); + OSLockRelease(gpsSizeTrackingHashTableLock); + } + + if (psNewTrackingHashEntry) + { + if (bRes) + { + PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes, uiPid); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!", + __func__, __LINE__)); + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!", + __func__, __LINE__)); + } +} + +void +PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID currentPid) + +{ + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + enum { PVRSRV_PROC_NOTFOUND, + PVRSRV_PROC_FOUND, + PVRSRV_PROC_RESURRECTED + } eProcSearch = PVRSRV_PROC_FOUND; + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + _increase_global_stat(eAllocType, uiBytes); + OSLockAcquire(g_psLinkedListLock); + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + + if (psProcessStats == NULL) + { + eProcSearch = PVRSRV_PROC_NOTFOUND; + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat increment called for 'unknown' process PID(%d)", + __func__, currentPid)); + + if (bProcessStatsInitialised) + { + if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) + { + return; + } + /* Add it to the live list... */ + _AddProcessStatsToFrontOfLiveList(psProcessStats); + } +#else + OSLockRelease(g_psLinkedListLock); +#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ + + } + + if (psProcessStats != NULL) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (eProcSearch == PVRSRV_PROC_RESURRECTED) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat incremented on 'dead' process PID(%d)", + __func__, currentPid)); + + /* Move process from dead list to live list */ + _MoveProcessToLiveList(psProcessStats); + } +#endif + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + /* Release the list lock as soon as we acquire the process lock, + * this ensures if the process is in deadlist the entry cannot be + * deleted or modified + */ + OSLockRelease(g_psLinkedListLock); + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); +#endif + + /* Update the memory watermarks... */ + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, + ui64Size); + } + } +#endif + + OSLockRelease(psProcessStats->hLock); + } + +} + +static void +_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, + PVRSRV_PROCESS_STATS* psProcessStats, + IMG_UINT32 uiBytes) +{ +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); +#endif + + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); + } + } +#endif +} + +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) +int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_PROCESS_STATS *psProcessStats; + + DIPrintf(psEntry, + "%s,%s,%s,%s,%s,%s\n", + "PID", + "MemoryUsageKMalloc", // PVRSRV_PROCESS_STAT_TYPE_KMALLOC + "MemoryUsageAllocPTMemoryUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA + "MemoryUsageAllocPTMemoryLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA + "MemoryUsageAllocGPUMemLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES + "MemoryUsageAllocGPUMemUMA"); // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + DIPrintf(psEntry, + "%d,%d,%d,%d,%d,%d\n", + psProcessStats->pid, + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]); + } + + psProcessStats = psProcessStats->psNext; + } + + OSLockRelease(g_psLinkedListLock); + + return 0; +} /* RawProcessStatsPrintElements */ +#endif + +void +PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, + IMG_PID decrPID) +{ + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(decrPID); + + if (psProcessStats != NULL) + { + /* Decrement the kmalloc memory stat... */ + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + } + + OSLockRelease(g_psLinkedListLock); +} + +static void +_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, + PVRSRV_MEM_ALLOC_TYPE eAllocType) +{ + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + _decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid); + + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + /* Decrement the memory stat... */ + _DecreaseProcStatValue(eAllocType, + psProcessStats, + psTrackingHashEntry->uiSizeInBytes); + OSLockRelease(psProcessStats->hLock); + } + + OSLockRelease(g_psLinkedListLock); +} + +void +PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 uiCpuVAddr) +{ + _PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL; + + if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) + { + return; + } + + OSLockAcquire(gpsSizeTrackingHashTableLock); + psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr); + OSLockRelease(gpsSizeTrackingHashTableLock); + if (psTrackingHashEntry) + { + _StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType); + OSFreeMemNoStats(psTrackingHashEntry); + } +} + +void +PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID currentPid) +{ + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + _decrease_global_stat(eAllocType, uiBytes); + + OSLockAcquire(g_psLinkedListLock); + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + + + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + /* Release the list lock as soon as we acquire the process lock, + * this ensures if the process is in deadlist the entry cannot be + * deleted or modified + */ + OSLockRelease(g_psLinkedListLock); + /* Update the memory watermarks... */ + _DecreaseProcStatValue(eAllocType, + psProcessStats, + uiBytes); + OSLockRelease(psProcessStats->hLock); + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* If all stats are now zero, remove the entry for this thread */ + if (psProcessStats->ui32StatAllocFlags == 0) + { + OSLockAcquire(g_psLinkedListLock); + _MoveProcessToDeadList(psProcessStats); + OSLockRelease(g_psLinkedListLock); + + /* Check if the dead list needs to be reduced */ + _CompressMemoryUsage(); + } +#endif + }else{ + OSLockRelease(g_psLinkedListLock); + } +} + +/* For now we do not want to expose the global stats API + * so we wrap it into this specific function for pooled pages. + * As soon as we need to modify the global stats directly somewhere else + * we want to replace these functions with more general ones. + */ +void +PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes) +{ + _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); +} + +void +PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes) +{ + _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); +} + +void +PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner) +{ + PVRSRV_PROCESS_STAT_TYPE eOOMStatType = (PVRSRV_PROCESS_STAT_TYPE) ui32OOMStatType; + IMG_PID pidCurrent = pidOwner; + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(pidCurrent); + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[eOOMStatType]++; + OSLockRelease(psProcessStats->hLock); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateOOMStats: Process not found for Pid=%d", pidCurrent)); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateOOMStats */ + +PVRSRV_ERROR +PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner) +{ + if (ui32OOMStatType >= PVRSRV_PROCESS_STAT_TYPE_COUNT) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVRSRVStatsUpdateOOMStats(ui32OOMStatType, pidOwner); + + return PVRSRV_OK; +} + +void +PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders, + IMG_UINT32 ui32TotalNumOutOfMemory, + IMG_UINT32 ui32NumTAStores, + IMG_UINT32 ui32Num3DStores, + IMG_UINT32 ui32NumCDMStores, + IMG_UINT32 ui32NumTDMStores, + IMG_PID pidOwner) +{ + IMG_PID pidCurrent = pidOwner; + + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(pidCurrent); + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores; + OSLockRelease(psProcessStats->hLock); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Process not found for Pid=%d", pidCurrent)); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateRenderContextStats */ + +void +PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp, + IMG_UINT32 ui32NumReqByFW, + IMG_PID owner) +{ + IMG_PID currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner; + PVRSRV_PROCESS_STATS* psProcessStats; + + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(currentPid); + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW; + OSLockRelease(psProcessStats->hLock); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateZSBufferStats */ + +void +PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp, + IMG_UINT32 ui32NumGrowReqByFW, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32NumHighPages, + IMG_PID ownerPid) +{ + IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(currentPid); + + if (psProcessStats != NULL) + { + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW; + + UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT], + (IMG_INT32) ui32InitFLPages); + + UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES], + (IMG_INT32) ui32NumHighPages); + + OSLockRelease(psProcessStats->hLock); + + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateFreelistStats */ + + +#if defined(ENABLE_DEBUGFS_PIDS) + +int +GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); + PVRSRV_PROCESS_STATS* psProcessStats; + + PVR_UNREFERENCED_PARAMETER(pvData); + + PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); + + DIPrintf(psEntry, "%s\n", psStatType->szLiveStatsHeaderStr); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = g_psLiveList; + + if (psProcessStats == NULL) + { + DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); + } + else + { + while (psProcessStats != NULL) + { + psStatType->pfnStatsPrintElements(psEntry, psProcessStats); + psProcessStats = psProcessStats->psNext; + DIPrintf(psEntry, "%s\n", g_szSeparatorStr); + } + } + OSLockRelease(g_psLinkedListLock); + + return 0; +} + +int +GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); + PVRSRV_PROCESS_STATS* psProcessStats; + + PVR_UNREFERENCED_PARAMETER(pvData); + + PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); + + DIPrintf(psEntry, "%s\n", psStatType->szRetiredStatsHeaderStr); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = g_psDeadList; + + if (psProcessStats == NULL) + { + DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); + } + else + { + while (psProcessStats != NULL) + { + psStatType->pfnStatsPrintElements(psEntry, psProcessStats); + psProcessStats = psProcessStats->psNext; + DIPrintf(psEntry, "%s\n", g_szSeparatorStr); + } + } + OSLockRelease(g_psLinkedListLock); + + return 0; +} + +#if defined(PVRSRV_ENABLE_PERPID_STATS) +/*************************************************************************/ /*! +@Function ProcessStatsPrintElements +@Description Prints all elements for this process statistic record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void +ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_UINT32 ui32StatNumber; + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); + + /* Loop through all the values and print them... */ + for (ui32StatNumber = 0; + ui32StatNumber < ARRAY_SIZE(pszProcessStatType); + ui32StatNumber++) + { + if (OSStringNCompare(pszProcessStatType[ui32StatNumber], "", 1) != 0) + { +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if ((ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) || + (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES)) + { + /* get the stat from RI */ + IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->pid, + (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) + ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA); + + DIPrintf(psEntry, "%-34s%10d %8dK\n", + pszProcessStatType[ui32StatNumber], ui32Total, ui32Total>>10); + } + else +#endif + { + if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC && + ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX) + { + DIPrintf(psEntry, "%-34s%10d %8dK\n", + pszProcessStatType[ui32StatNumber], + psProcessStats->i32StatValue[ui32StatNumber], + psProcessStats->i32StatValue[ui32StatNumber] >> 10); + } + else + { + DIPrintf(psEntry, "%-34s%10d\n", + pszProcessStatType[ui32StatNumber], + psProcessStats->i32StatValue[ui32StatNumber]); + } + } + } + } + + OSLockRelease(psProcessStats->hLock); +} /* ProcessStatsPrintElements */ +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) +void +PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, + IMG_UINT32 ui32OpSeqNum, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT32 eFenceOpType, +#endif + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64ExecuteTime, + IMG_BOOL bUserModeFlush, + IMG_BOOL bIsFence, + IMG_PID ownerPid) +{ + IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(currentPid); + + if (psProcessStats != NULL) + { + IMG_INT32 Idx; + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Look-up next buffer write index */ + Idx = psProcessStats->uiCacheOpWriteIndex; + psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx); + + /* Store all CacheOp meta-data */ + psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr; + psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr; + psProcessStats->asCacheOp[Idx].eFenceOpType = eFenceOpType; +#endif + psProcessStats->asCacheOp[Idx].uiOffset = uiOffset; + psProcessStats->asCacheOp[Idx].uiSize = uiSize; + psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush; + psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime; + psProcessStats->asCacheOp[Idx].ui32OpSeqNum = ui32OpSeqNum; + psProcessStats->asCacheOp[Idx].bIsFence = bIsFence; + + OSLockRelease(psProcessStats->hLock); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateCacheOpStats */ + +/*************************************************************************/ /*! +@Function CacheOpStatsPrintElements +@Description Prints all elements for this process statistic CacheOp record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void +CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_CHAR *pszCacheOpType, *pszFlushType, *pszFlushMode; + IMG_INT32 i32WriteIdx, i32ReadIdx; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + #define CACHEOP_RI_PRINTF_HEADER \ + "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s %-12s\n" + #define CACHEOP_RI_PRINTF_FENCE \ + "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12llu 0x%-10x\n" + #define CACHEOP_RI_PRINTF \ + "%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n" +#else + #define CACHEOP_PRINTF_HEADER \ + "%-10s %-10s %-5s %-10s %-10s %-12s %-12s\n" + #define CACHEOP_PRINTF_FENCE \ + "%-10s %-10s %-5s %-10s %-10s %-12llu 0x%-10x\n" + #define CACHEOP_PRINTF \ + "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n" +#endif + + DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); + + /* File header info */ + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF_HEADER, +#else + CACHEOP_PRINTF_HEADER, +#endif + "CacheOp", + "Type", + "Mode", +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + "DevVAddr", + "DevPAddr", +#endif + "Offset", + "Size", + "Time (us)", + "SeqNo"); + + /* Take a snapshot of write index, read backwards in buffer + and wrap round at boundary */ + i32WriteIdx = psProcessStats->uiCacheOpWriteIndex; + for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx); + i32ReadIdx != i32WriteIdx; + i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx)) + { + IMG_UINT64 ui64ExecuteTime; + + if (! psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum) + { + break; + } + + ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime; + + if (psProcessStats->asCacheOp[i32ReadIdx].bIsFence) + { + IMG_CHAR *pszFenceType = ""; + pszCacheOpType = "Fence"; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + switch (psProcessStats->asCacheOp[i32ReadIdx].eFenceOpType) + { + case RGXFWIF_DM_GP: + pszFenceType = "GP"; + break; + + case RGXFWIF_DM_TDM: + /* Also case RGXFWIF_DM_2D: */ + pszFenceType = "TDM/2D"; + break; + + case RGXFWIF_DM_GEOM: + pszFenceType = "GEOM"; + break; + + case RGXFWIF_DM_3D: + pszFenceType = "3D"; + break; + + case RGXFWIF_DM_CDM: + pszFenceType = "CDM"; + break; + + default: + PVR_ASSERT(0); + break; + } +#endif + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF_FENCE, +#else + CACHEOP_PRINTF_FENCE, +#endif + pszCacheOpType, + pszFenceType, + "", +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + "", + "", +#endif + "", + "", + ui64ExecuteTime, + psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum); + } + else + { + IMG_DEVMEM_SIZE_T ui64NumOfPages; + + ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift(); + if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) + { + pszFlushType = "RBF.Fast"; + } + else + { + pszFlushType = "RBF.Slow"; + } + + if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush) + { + pszFlushMode = "UM"; + } + else + { + pszFlushMode = "KM"; + } + + switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp) + { + case PVRSRV_CACHE_OP_NONE: + pszCacheOpType = "None"; + break; + case PVRSRV_CACHE_OP_CLEAN: + pszCacheOpType = "Clean"; + break; + case PVRSRV_CACHE_OP_INVALIDATE: + pszCacheOpType = "Invalidate"; + break; + case PVRSRV_CACHE_OP_FLUSH: + pszCacheOpType = "Flush"; + break; + default: + pszCacheOpType = "Unknown"; + break; + } + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF, +#else + CACHEOP_PRINTF, +#endif + pszCacheOpType, + pszFlushType, + pszFlushMode, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr, + psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr, +#endif + psProcessStats->asCacheOp[i32ReadIdx].uiOffset, + psProcessStats->asCacheOp[i32ReadIdx].uiSize, + ui64ExecuteTime, + psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum); + } + } +} /* CacheOpStatsPrintElements */ +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +/*************************************************************************/ /*! +@Function MemStatsPrintElements +@Description Prints all elements for the memory statistic record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void +MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32); + IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32); + PVRSRV_MEM_ALLOC_REC *psRecord; + IMG_UINT32 ui32ItemNumber; + + /* Write the header... */ + DIPrintf(psEntry, "PID "); + + DIPrintf(psEntry, "Type VAddress"); + for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, " "); + } + + DIPrintf(psEntry, " PAddress"); + for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, " "); + } + + DIPrintf(psEntry, " Size(bytes)\n"); + + psRecord = psProcessStats->psMemoryRecords; + if (psRecord == NULL) + { + DIPrintf(psEntry, "%-5d\n", psProcessStats->pid); + } + + while (psRecord != NULL) + { + IMG_BOOL bPrintStat = IMG_TRUE; + + DIPrintf(psEntry, "%-5d ", psProcessStats->pid); + + switch (psRecord->eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: DIPrintf(psEntry, "KMALLOC "); break; + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: DIPrintf(psEntry, "VMALLOC "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_LMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_UMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: DIPrintf(psEntry, "IOREMAP_PT_LMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: DIPrintf(psEntry, "VMAP_PT_UMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: DIPrintf(psEntry, "ALLOC_LMA_PAGES "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: DIPrintf(psEntry, "ALLOC_UMA_PAGES "); break; + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: DIPrintf(psEntry, "MAP_UMA_LMA_PAGES "); break; + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: DIPrintf(psEntry, "DMA_BUF_IMPORT "); break; + default: DIPrintf(psEntry, "INVALID "); break; + } + + if (bPrintStat) + { + for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1)); + } + DIPrintf(psEntry, " "); + + for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1)); + } + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC, psRecord->uiBytes); + + DIPrintf(psEntry, " %s", (IMG_CHAR*) psRecord->pvAllocdFromFile); + + DIPrintf(psEntry, " %d\n", psRecord->ui32AllocdFromLine); +#else + DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC "\n", psRecord->uiBytes); +#endif + } + /* Move to next record... */ + psRecord = psRecord->psNext; + } +} /* MemStatsPrintElements */ +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +/*************************************************************************/ /*! +@Function RIMemStatsPrintElements +@Description Prints all elements for the RI Memory record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_CHAR *pszStatFmtText = NULL; + IMG_HANDLE *pRIHandle = NULL; + + /* Acquire RI lock */ + RILockAcquireKM(); + + /* + * Loop through the RI system to get each line of text. + */ + while (RIGetListEntryKM(psProcessStats->pid, + &pRIHandle, + &pszStatFmtText)) + { + DIPrintf(psEntry, "%s", pszStatFmtText); + } + + /* Release RI lock */ + RILockReleaseKM(); + +} /* RIMemStatsPrintElements */ +#endif + +#endif + +static IMG_UINT32 ui32FirmwareStartTimestamp; +static IMG_UINT64 ui64FirmwareIdleDuration; + +void SetFirmwareStartTime(IMG_UINT32 ui32Time) +{ + ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time); +} + +void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration) +{ + ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration); +} + +static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats, + OSDI_IMPL_ENTRY *psEntry, + PVRSRV_POWER_STAT_TYPE eForced, + PVRSRV_POWER_STAT_TYPE ePowerOn) +{ + IMG_UINT32 ui32Index; + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE); + DIPrintf(psEntry, " Pre-Device: %9u\n", pui32Stats[ui32Index]); + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM); + DIPrintf(psEntry, " Pre-System: %9u\n", pui32Stats[ui32Index]); + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM); + DIPrintf(psEntry, " Post-System: %9u\n", pui32Stats[ui32Index]); + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE); + DIPrintf(psEntry, " Post-Device: %9u\n", pui32Stats[ui32Index]); +} + +int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0]; + IMG_UINT32 ui32Idx; + + PVR_UNREFERENCED_PARAMETER(pvData); + + DIPrintf(psEntry, "Forced Power-on Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_ON); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Forced Power-off Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_OFF); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Not Forced Power-on Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_ON); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Not Forced Power-off Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_OFF); + DIPrintf(psEntry, "\n"); + + + DIPrintf(psEntry, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp); + DIPrintf(psEntry, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration)); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS); + DIPrintf(psEntry, "Prepare DVFS\tDVFS Change\tPost DVFS\n"); + + for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS) + { + DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration, + asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration, + asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration); + } + + return 0; +} /* PowerStatsPrintElements */ + +int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_UINT32 ui32StatNumber; + PVR_UNREFERENCED_PARAMETER(pvData); + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + + for (ui32StatNumber = 0; + ui32StatNumber < ARRAY_SIZE(pszDriverStatType); + ui32StatNumber++) + { + if (OSStringNCompare(pszDriverStatType[ui32StatNumber], "", 1) != 0) + { + DIPrintf(psEntry, "%-34s%10d\n", + pszDriverStatType[ui32StatNumber], + GET_GLOBAL_STAT_VALUE(ui32StatNumber)); + } + } + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); + + return 0; +} + +/*************************************************************************/ /*! +@Function PVRSRVFindProcessMemStats +@Description Using the provided PID find memory stats for that process. + Memstats will be provided for live/connected processes only. + Memstat values provided by this API relate only to the physical + memory allocated by the process and does not relate to any of + the mapped or imported memory. +@Input pid Process to search for. +@Input ArraySize Size of the array where memstat + records will be stored +@Input bAllProcessStats Flag to denote if stats for + individual process are requested + stats for all processes are + requested +@Input MemoryStats Handle to the memory where memstats + are stored. +@Output Memory statistics records for the requested pid. +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats) +{ + IMG_INT i; + PVRSRV_PROCESS_STATS* psProcessStats; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pui32MemoryStats, "pui32MemoryStats"); + + if (bAllProcessStats) + { + /* expect size of array decreased by 2 to maintain compatibility with + * older clients (ones that do not know about new dmabuf import stats) */ + PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_DRIVER_STAT_TYPE_COUNT - 2, + "MemStats array size is incorrect", + PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + + for (i = 0; i < ui32ArrSize; i++) + { + pui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i); + } + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); + + return PVRSRV_OK; + } + + /* expect size of array decreased by 2 to maintain compatibility with + * older clients (ones that do not know about new dmabuf import stats) */ + PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_PROCESS_STAT_TYPE_COUNT - 2, + "MemStats array size is incorrect", + PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquire(g_psLinkedListLock); + + /* Search for the given PID in the Live List */ + psProcessStats = _FindProcessStatsInLiveList(pid); + + if (psProcessStats == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid)); + OSLockRelease(g_psLinkedListLock); + + return PVRSRV_ERROR_PROCESS_NOT_FOUND; + } + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + for (i = 0; i < ui32ArrSize; i++) + { + pui32MemoryStats[i] = psProcessStats->i32StatValue[i]; + } + OSLockRelease(psProcessStats->hLock); + + OSLockRelease(g_psLinkedListLock); + + return PVRSRV_OK; + +} /* PVRSRVFindProcessMemStats */ + +/*************************************************************************/ /*! +@Function PVRSRVGetProcessMemUsage +@Description Calculate allocated kernel and graphics memory for all live or + connected processes. Memstat values provided by this API relate + only to the physical memory allocated by the process and does + not relate to any of the mapped or imported memory. +@Output pui32TotalMem Total memory usage for all live + PIDs connected to the driver. +@Output pui32NumberOfLivePids Number of live pids currently + connected to the server. +@Output ppsPerProcessMemUsageData Handle to an array of + PVRSRV_PER_PROCESS_MEM_USAGE, + number of elements defined by + pui32NumberOfLivePids. +@Return PVRSRV_OK Success + PVRSRV_ERROR_PROCESS_NOT_FOUND No live processes. + PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate memory for + ppsPerProcessMemUsageData. +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, + IMG_UINT32 *pui32NumberOfLivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData) +{ + IMG_UINT32 ui32Counter = 0; + IMG_UINT32 ui32NumberOfLivePids = 0; + PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND; + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL; + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + + *pui32TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA); + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); + + OSLockAcquire(g_psLinkedListLock); + psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + psProcessStats = psProcessStats->psNext; + ui32NumberOfLivePids++; + } + + if (ui32NumberOfLivePids > 0) + { + /* Use OSAllocZMemNoStats to prevent deadlock. */ + psPerProcessMemUsageData = OSAllocZMemNoStats(ui32NumberOfLivePids * sizeof(*psPerProcessMemUsageData)); + + if (psPerProcessMemUsageData) + { + psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid; + + psPerProcessMemUsageData[ui32Counter].ui32KernelMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC]; + + psPerProcessMemUsageData[ui32Counter].ui32GraphicsMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]; + + OSLockRelease(psProcessStats->hLock); + psProcessStats = psProcessStats->psNext; + ui32Counter++; + } + eError = PVRSRV_OK; + } + else + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + OSLockRelease(g_psLinkedListLock); + *pui32NumberOfLivePids = ui32NumberOfLivePids; + *ppsPerProcessMemUsageData = psPerProcessMemUsageData; + + return eError; + +} /* PVRSRVGetProcessMemUsage */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/pvr_notifier.c b/drivers/mcst/gpu-imgtec/services/server/common/pvr_notifier.c new file mode 100644 index 000000000000..ee803a0000c0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/pvr_notifier.c @@ -0,0 +1,518 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR notifier interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "allocmem.h" +#include "dllist.h" + +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "pvrversion.h" +#include "connection_server.h" + +#include "osfunc.h" +#include "sofunc_pvr.h" + +#define PVR_DUMP_DRIVER_INFO(x, y) \ + PVR_DUMPDEBUG_LOG("%s info: %d.%d @ %8d (%s) build options: 0x%08x", \ + (x), \ + PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion), \ + PVRVERSION_UNPACK_MIN((y).ui32BuildVersion), \ + (y).ui32BuildRevision, \ + (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug":"release", \ + (y).ui32BuildOptions); + +#if !defined(WINDOW_SYSTEM) +#define WINDOW_SYSTEM "Unknown" +#endif + +#define IS_DECLARED(x) (x[0] != '\0') + +/*************************************************************************/ /*! +Command Complete Notifier Interface +*/ /**************************************************************************/ + +typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG +{ + PVRSRV_CMDCOMP_HANDLE hCmdCompHandle; + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify; + DLLIST_NODE sListNode; +} PVRSRV_CMDCOMP_NOTIFY; + +/* Head of the list of callbacks called when command complete happens */ +static DLLIST_NODE g_sCmdCompNotifyHead; +static POSWR_LOCK g_hCmdCompNotifyLock; + +PVRSRV_ERROR +PVRSRVCmdCompleteInit(void) +{ + PVRSRV_ERROR eError; + + eError = OSWRLockCreate(&g_hCmdCompNotifyLock); + PVR_RETURN_IF_ERROR(eError); + + dllist_init(&g_sCmdCompNotifyHead); + + return PVRSRV_OK; +} + +void +PVRSRVCmdCompleteDeinit(void) +{ + /* Check that all notify function have been unregistered */ + if (!dllist_is_empty(&g_sCmdCompNotifyHead)) + { + PDLLIST_NODE psNode; + + PVR_DPF((PVR_DBG_ERROR, + "%s: Command complete notify list is not empty!", __func__)); + + /* Clean up any stragglers */ + psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); + while (psNode) + { + PVRSRV_CMDCOMP_NOTIFY *psNotify; + + dllist_remove_node(psNode); + + psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); + OSFreeMem(psNotify); + + psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); + } + } + + if (g_hCmdCompNotifyLock) + { + OSWRLockDestroy(g_hCmdCompNotifyLock); + } +} + +PVRSRV_ERROR +PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, + PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) +{ + PVRSRV_CMDCOMP_NOTIFY *psNotify; + + PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pfnCmdCompleteNotify, "pfnCmdCompleteNotify"); + PVR_LOG_RETURN_IF_INVALID_PARAM(hCmdCompHandle, "hCmdCompHandle"); + + psNotify = OSAllocMem(sizeof(*psNotify)); + PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); + + /* Set-up the notify data */ + psNotify->hCmdCompHandle = hCmdCompHandle; + psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify; + + /* Add it to the list of Notify functions */ + OSWRLockAcquireWrite(g_hCmdCompNotifyLock); + dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode); + OSWRLockReleaseWrite(g_hCmdCompNotifyLock); + + *phNotify = psNotify; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify) +{ + PVRSRV_CMDCOMP_NOTIFY *psNotify; + + psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify; + PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "hNotify"); + + OSWRLockAcquireWrite(g_hCmdCompNotifyLock); + dllist_remove_node(&psNotify->sListNode); + OSWRLockReleaseWrite(g_hCmdCompNotifyLock); + + OSFreeMem(psNotify); + + return PVRSRV_OK; +} + +void +PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); +#if !defined(NO_HARDWARE) + DLLIST_NODE *psNode, *psNext; +#endif + + /* Call notify callbacks to check if blocked work items can now proceed */ +#if !defined(NO_HARDWARE) + OSWRLockAcquireRead(g_hCmdCompNotifyLock); + dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext) + { + PVRSRV_CMDCOMP_NOTIFY *psNotify = + IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); + + if (hCmdCompCallerHandle != psNotify->hCmdCompHandle) + { + psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle); + } + } + OSWRLockReleaseRead(g_hCmdCompNotifyLock); +#endif + + if (psPVRSRVData->hGlobalEventObject) + { + OSEventObjectSignal(psPVRSRVData->hGlobalEventObject); + } +} + +/*************************************************************************/ /*! +Debug Notifier Interface +*/ /**************************************************************************/ + +typedef struct DEBUG_REQUEST_ENTRY_TAG +{ + IMG_UINT32 ui32RequesterID; + DLLIST_NODE sListHead; +} DEBUG_REQUEST_ENTRY; + +typedef struct DEBUG_REQUEST_TABLE_TAG +{ + POSWR_LOCK hLock; + IMG_UINT32 ui32RequestCount; + DEBUG_REQUEST_ENTRY asEntry[1]; +} DEBUG_REQUEST_TABLE; + +typedef struct DEBUG_REQUEST_NOTIFY_TAG +{ + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_DBGREQ_HANDLE hDbgRequestHandle; + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify; + IMG_UINT32 ui32RequesterID; + DLLIST_NODE sListNode; +} DEBUG_REQUEST_NOTIFY; + + +PVRSRV_ERROR +PVRSRVRegisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode, + const IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length) +{ + DEBUG_REQUEST_TABLE *psDebugTable; + IMG_UINT32 i; + PVRSRV_ERROR eError; + + if (psDevNode->hDebugTable) + { + return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED; + } + + psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) + + (sizeof(DEBUG_REQUEST_ENTRY) * (ui32Length-1))); + PVR_RETURN_IF_NOMEM(psDebugTable); + + eError = OSWRLockCreate(&psDebugTable->hLock); + PVR_GOTO_IF_ERROR(eError, ErrorFreeDebugTable); + + psDebugTable->ui32RequestCount = ui32Length; + + /* Init the list heads */ + for (i = 0; i < ui32Length; i++) + { + psDebugTable->asEntry[i].ui32RequesterID = paui32Table[i]; + dllist_init(&psDebugTable->asEntry[i].sListHead); + } + + psDevNode->hDebugTable = (IMG_HANDLE *) psDebugTable; + + return PVRSRV_OK; + +ErrorFreeDebugTable: + OSFreeMem(psDebugTable); + + return eError; +} + +void +PVRSRVUnregisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode) +{ + DEBUG_REQUEST_TABLE *psDebugTable; + IMG_UINT32 i; + + PVR_ASSERT(psDevNode->hDebugTable); + psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; + psDevNode->hDebugTable = NULL; + + for (i = 0; i < psDebugTable->ui32RequestCount; i++) + { + if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d", + __func__, i)); + } + } + + OSWRLockDestroy(psDebugTable->hLock); + psDebugTable->hLock = NULL; + + OSFreeMem(psDebugTable); +} + +PVRSRV_ERROR +PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify, + PVRSRV_DEVICE_NODE *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + IMG_UINT32 ui32RequesterID, + PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) +{ + DEBUG_REQUEST_TABLE *psDebugTable; + DEBUG_REQUEST_NOTIFY *psNotify; + PDLLIST_NODE psHead = NULL; + IMG_UINT32 i; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode, "psDevNode"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pfnDbgRequestNotify, "pfnDbRequestNotify"); + + psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; + + PVR_ASSERT(psDebugTable); + + /* NoStats used since this may be called outside of the register/de-register + * process calls which track memory use. */ + psNotify = OSAllocMemNoStats(sizeof(*psNotify)); + PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); + + /* Set-up the notify data */ + psNotify->psDevNode = psDevNode; + psNotify->hDbgRequestHandle = hDbgRequestHandle; + psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify; + psNotify->ui32RequesterID = ui32RequesterID; + + /* Lock down all the lists */ + OSWRLockAcquireWrite(psDebugTable->hLock); + + /* Find which list to add it to */ + for (i = 0; i < psDebugTable->ui32RequestCount; i++) + { + if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID) + { + psHead = &psDebugTable->asEntry[i].sListHead; + } + } + + /* Failed to find debug requester */ + PVR_LOG_GOTO_IF_INVALID_PARAM(psHead, eError, ErrorReleaseLock); + + /* Add it to the list of Notify functions */ + dllist_add_to_tail(psHead, &psNotify->sListNode); + + /* Unlock the lists */ + OSWRLockReleaseWrite(psDebugTable->hLock); + + *phNotify = psNotify; + + return PVRSRV_OK; + +ErrorReleaseLock: + OSWRLockReleaseWrite(psDebugTable->hLock); + OSFreeMem(psNotify); + + return eError; +} + +PVRSRV_ERROR +SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify, + PVRSRV_DEVICE_NODE *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + IMG_UINT32 ui32RequesterID, + PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) +{ + return PVRSRVRegisterDbgRequestNotify(phNotify, + psDevNode, + pfnDbgRequestNotify, + ui32RequesterID, + hDbgRequestHandle); +} + +PVRSRV_ERROR +PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify) +{ + DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify; + DEBUG_REQUEST_TABLE *psDebugTable; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "psNotify"); + + psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->psDevNode->hDebugTable; + + OSWRLockAcquireWrite(psDebugTable->hLock); + dllist_remove_node(&psNotify->sListNode); + OSWRLockReleaseWrite(psDebugTable->hLock); + + OSFreeMemNoStats(psNotify); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify) +{ + return PVRSRVUnregisterDbgRequestNotify(hNotify); +} + +void +PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + DEBUG_REQUEST_TABLE *psDebugTable = + (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; + static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" }; + const IMG_CHAR *szVerbosityLevel; + const IMG_CHAR *Bit32 = "32 Bit", *Bit64 = "64 Bit"; + IMG_UINT32 i; + + static_assert(ARRAY_SIZE(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1, + "Incorrect number of verbosity levels"); + + PVR_ASSERT(psDebugTable); + + OSWRLockAcquireRead(psDebugTable->hLock); + + if (ui32VerbLevel < ARRAY_SIZE(apszVerbosityTable)) + { + szVerbosityLevel = apszVerbosityTable[ui32VerbLevel]; + } + else + { + szVerbosityLevel = "unknown"; + PVR_ASSERT(!"Invalid verbosity level received"); + } + + PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------", + szVerbosityLevel); + + OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile); + + PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s", + PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR); + + PVR_DUMPDEBUG_LOG("Time now: %" IMG_UINT64_FMTSPEC "us", + OSClockus64()); + + switch (psPVRSRVData->eServicesState) + { + case PVRSRV_SERVICES_STATE_OK: + PVR_DUMPDEBUG_LOG("Services State: OK"); + break; + case PVRSRV_SERVICES_STATE_BAD: + PVR_DUMPDEBUG_LOG("Services State: BAD"); + break; + case PVRSRV_SERVICES_STATE_UNDEFINED: + PVR_DUMPDEBUG_LOG("Services State: UNDEFINED"); + break; + default: + PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)", + psPVRSRVData->eServicesState); + break; + } + + PVRSRVConnectionDebugNotify(pfnDumpDebugPrintf, pvDumpDebugFile); + + PVR_DUMPDEBUG_LOG("------[ Driver Info ]------"); + + PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s", + (psPVRSRVData->sDriverInfo.bIsNoMatch) ? "MISMATCH" : "MATCHING"); + + PVR_DUMPDEBUG_LOG("KM Arch: %s", + (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); + + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PVR_DUMPDEBUG_LOG("Driver Mode: %s", + (PVRSRV_VZ_MODE_IS(HOST)) ? "Host":"Guest"); + } + + if (psPVRSRVData->sDriverInfo.ui8UMSupportedArch) + { + if ((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) == + BUILD_ARCH_BOTH) + { + PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32); + + }else + { + PVR_DUMPDEBUG_LOG("UM Connected Clients: %s", + (psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); + } + } + + PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo); + PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo); + + PVR_DUMPDEBUG_LOG("Window system: %s", (IS_DECLARED(WINDOW_SYSTEM)) ? (WINDOW_SYSTEM) : "Not declared"); + + /* For each requester */ + for (i = 0; i < psDebugTable->ui32RequestCount; i++) + { + DLLIST_NODE *psNode; + DLLIST_NODE *psNext; + + /* For each notifier on this requestor */ + dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext) + { + DEBUG_REQUEST_NOTIFY *psNotify = + IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode); + psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel, + pfnDumpDebugPrintf, pvDumpDebugFile); + } + } + + PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------"); + OSWRLockReleaseRead(psDebugTable->hLock); + + if (!pfnDumpDebugPrintf) + { + /* Only notify OS of an issue if the debug dump has gone there */ + OSWarnOn(IMG_TRUE); + } +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv.c b/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv.c new file mode 100644 index 000000000000..e77850118631 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv.c @@ -0,0 +1,3499 @@ +/*************************************************************************/ /*! +@File +@Title core services functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for core services functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxdebug.h" +#include "handle.h" +#include "connection_server.h" +#include "osconnection_server.h" +#include "pdump_km.h" +#include "ra.h" +#include "allocmem.h" +#include "pmr.h" +#include "pvrsrv.h" +#include "srvcore.h" +#include "services_km.h" +#include "pvrsrv_device.h" +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "sync.h" +#include "sync_server.h" +#include "sync_checkpoint.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint_init.h" +#include "devicemem.h" +#include "cache_km.h" +#include "info_page.h" +#include "info_page_defs.h" +#include "pvrsrv_bridge_init.h" +#include "devicemem_server.h" +#include "km_apphint_defs.h" +#include "di_server.h" +#include "htb_debug.h" + +#include "log2.h" + +#include "lists.h" +#include "dllist.h" +#include "syscommon.h" +#include "sysvalidation.h" + +#include "physmem_lma.h" +#include "physmem_osmem.h" +#include "physmem_hostmem.h" + +#include "tlintern.h" +#include "htbserver.h" + +#if defined(SUPPORT_RGX) +#include "rgxinit.h" +#include "rgxhwperf.h" +#include "rgxfwutils.h" +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#include "vz_vmm_pvz.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + #if !defined(GPUVIRT_SIZEOF_ARENA0) + #define GPUVIRT_SIZEOF_ARENA0 64 * 1024 * 1024 //Giving 64 megs of LMA memory to arena 0 for firmware and other allocations + #endif +#endif + +#include "devicemem_history_server.h" + +#if defined(SUPPORT_LINUX_DVFS) +#include "pvr_dvfs_device.h" +#endif + +#if defined(SUPPORT_DISPLAY_CLASS) +#include "dc_server.h" +#endif + +#include "rgx_options.h" +#include "srvinit.h" +#include "rgxutils.h" + +#include "oskm_apphint.h" +#include "pvrsrv_apphint.h" + +#include "pvrsrv_tlstreams.h" +#include "tlstream.h" + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) +#include "physmem_test.h" +#endif + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +#define INFINITE_SLEEP_TIMEOUT 0ULL +#endif + +/*! Wait 100ms before retrying deferred clean-up again */ +#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL + +/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times + * a day to check for any missed clean-up. */ +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT INFINITE_SLEEP_TIMEOUT +#else +#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL +#endif + +/*! When unloading try a few times to free everything remaining on the list */ +#define CLEANUP_THREAD_UNLOAD_RETRY 4 + +#define PVRSRV_PROC_HANDLE_BASE_INIT 10 + +#define PVRSRV_TL_CTLR_STREAM_SIZE 4096 + +static PVRSRV_DATA *gpsPVRSRVData; +static IMG_UINT32 g_ui32InitFlags; + +/* mark which parts of Services were initialised */ +#define INIT_DATA_ENABLE_PDUMPINIT 0x1U + +static IMG_UINT32 g_aui32DebugOrderTable[] = { + DEBUG_REQUEST_SYS, + DEBUG_REQUEST_APPHINT, + DEBUG_REQUEST_HTB, + DEBUG_REQUEST_DC, + DEBUG_REQUEST_SYNCCHECKPOINT, + DEBUG_REQUEST_SYNCTRACKING, + DEBUG_REQUEST_ANDROIDSYNC, + DEBUG_REQUEST_FALLBACKSYNC, + DEBUG_REQUEST_LINUXFENCE +}; + +static void LogRACreation(const char *pszMemType, + IMG_UINT32 ui32RegionId, + IMG_UINT64 ui64CpuPA, + IMG_UINT64 ui64DevPA, + IMG_UINT64 ui64Size) +{ + if ((ui64CpuPA != 0) && (ui64DevPA != 0) && (ui64CpuPA != ui64DevPA)) + { + PVR_DPF((PVR_DBG_MESSAGE, + "Creating RA for %s memory - region %d" + " - Cpu PA 0x%016"IMG_UINT64_FMTSPECx"-0x%016" IMG_UINT64_FMTSPECx + " - Dev PA 0x%016"IMG_UINT64_FMTSPECx"-0x%016" IMG_UINT64_FMTSPECx, + pszMemType, ui32RegionId, + ui64CpuPA, ui64CpuPA + ui64Size, + ui64DevPA, ui64DevPA + ui64Size)); + } + else + { + __maybe_unused IMG_UINT64 ui64PA = + (ui64CpuPA != 0) ? (ui64CpuPA) : (ui64DevPA); + __maybe_unused const IMG_CHAR *pszAddrType = + (ui64CpuPA == ui64DevPA) ? ("Cpu/Dev") : (ui64CpuPA != 0 ? "Cpu" : "Dev"); + + PVR_DPF((PVR_DBG_MESSAGE, + "Creating RA for %s memory - region %d - %s PA 0x%016" + IMG_UINT64_FMTSPECx"-0x%016" IMG_UINT64_FMTSPECx, + pszMemType, ui32RegionId, pszAddrType, + ui64PA, ui64PA + ui64Size)); + } +} + +PVRSRV_ERROR PVRSRVCreateRegionRA(PVRSRV_DEVICE_CONFIG *psDevConfig, + RA_ARENA **ppsRegionRA, + IMG_CHAR *pszRAName, + IMG_UINT64 ui64CpuBase, + IMG_UINT64 ui64DevBase, + IMG_UINT64 ui64Size, + IMG_UINT32 ui32RegionId, + IMG_CHAR *pszLabel) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + LogRACreation(pszLabel, ui32RegionId, ui64CpuBase, ui64DevBase, ui64Size); + + if (pszRAName != NULL) + { + OSSNPrintf(pszRAName, + PVRSRV_MAX_RA_NAME_LENGTH, + "%s: %s [%u]", + psDevConfig->pszName, + pszLabel, + ui32RegionId); + } + + *ppsRegionRA = RA_Create((pszRAName != NULL) ? pszRAName : pszLabel, + OSGetPageShift(), /* Use OS page size, keeps things simple */ + RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */ + NULL, /* No Import */ + NULL, /* No free import */ + NULL, /* No import handle */ + IMG_FALSE); /* No restriction on import splitting */ + PVR_LOG_GOTO_IF_NOMEM(*ppsRegionRA, eError, e0); + + if (!RA_Add(*ppsRegionRA, + (RA_BASE_T) ui64DevBase, + (RA_LENGTH_T) ui64Size, 0, NULL)) + { + RA_Delete(*ppsRegionRA); + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_LOG_ERROR(eError, "RA_Add"); + } + +e0: + return eError; +} +/* Callback to dump info of cleanup thread in debug_dump */ +static void CleanupThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DATA *psPVRSRVData; + psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items : %u", + OSAtomicRead(&psPVRSRVData->i32NumCleanupItems)); +} + +/* Add work to the cleanup thread work list. + * The work item will be executed by the cleanup thread + */ +void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData) +{ + PVRSRV_DATA *psPVRSRVData; + PVRSRV_ERROR eError; + + psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_ASSERT(psData != NULL); +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload) +#else + if (psPVRSRVData->bUnload) +#endif + { + CLEANUP_THREAD_FN pfnFree = psData->pfnFree; + + PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately")); + + eError = pfnFree(psData->pvData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " + "(callback " IMG_PFN_FMTSPEC "). " + "Immediate free will not be retried.", + pfnFree)); + } + } + else + { + OS_SPINLOCK_FLAGS uiFlags; + + /* add this work item to the list */ + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode); + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItems); + + /* signal the cleanup thread to ensure this item gets processed */ + eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } +} + +/* Pop an item from the head of the cleanup thread work list */ +static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData) +{ + DLLIST_NODE *psNode; + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList); + if (psNode != NULL) + { + dllist_remove_node(psNode); + } + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + return psNode; +} + +/* Process the cleanup thread work list */ +static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData, + IMG_BOOL *pbUseGlobalEO) +{ + DLLIST_NODE *psNodeIter, *psNodeLast; + PVRSRV_ERROR eError; + IMG_BOOL bNeedRetry = IMG_FALSE; + OS_SPINLOCK_FLAGS uiFlags; + + /* any callback functions which return error will be + * moved to the back of the list, and additional items can be added + * to the list at any time so we ensure we only iterate from the + * head of the list to the current tail (since the tail may always + * be changing) + */ + + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + psNodeLast = dllist_get_prev_node(&psPVRSRVData->sCleanupThreadWorkList); + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + if (psNodeLast == NULL) + { + /* no elements to clean up */ + return IMG_FALSE; + } + + do + { + psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData); + + if (psNodeIter != NULL) + { + PVRSRV_CLEANUP_THREAD_WORK *psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); + CLEANUP_THREAD_FN pfnFree; + + /* get the function pointer address here so we have access to it + * in order to report the error in case of failure, without having + * to depend on psData not having been freed + */ + pfnFree = psData->pfnFree; + + *pbUseGlobalEO = psData->bDependsOnHW; + eError = pfnFree(psData->pvData); + + if (eError != PVRSRV_OK) + { + /* move to back of the list, if this item's + * retry count hasn't hit zero. + */ + if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData)) + { + if (CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(psData)) + { + bNeedRetry = IMG_TRUE; + } + } + else + { + if (psData->ui32RetryCount-- > 0) + { + bNeedRetry = IMG_TRUE; + } + } + + if (bNeedRetry) + { + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter); + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " + "(callback " IMG_PFN_FMTSPEC "). " + "Retry limit reached", + pfnFree)); + } + } + else + { + OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItems); + } + } + } while ((psNodeIter != NULL) && (psNodeIter != psNodeLast)); + + return bNeedRetry; +} + +// #define CLEANUP_DPFL PVR_DBG_WARNING +#define CLEANUP_DPFL PVR_DBG_MESSAGE + +/* Create/initialise data required by the cleanup thread, + * before the cleanup thread is started + */ +static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData) +{ + PVRSRV_ERROR eError; + + /* Create the clean up event object */ + + eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Exit); + + /* initialise the mutex and linked list required for the cleanup thread work list */ + + eError = OSSpinLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Exit); + + dllist_init(&psPVRSRVData->sCleanupThreadWorkList); + +Exit: + return eError; +} + +static void CleanupThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_BOOL bRetryWorkList = IMG_FALSE; + IMG_HANDLE hGlobalEvent; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eRc; + IMG_BOOL bUseGlobalEO = IMG_FALSE; + IMG_UINT32 uiUnloadRetry = 0; + + /* Store the process id (pid) of the clean-up thread */ + psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID(); + OSAtomicWrite(&psPVRSRVData->i32NumCleanupItems, 0); + + PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... ")); + + /* Open an event on the clean up event object so we can listen on it, + * abort the clean up thread and driver if this fails. + */ + eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent); + PVR_ASSERT(eRc == PVRSRV_OK); + + eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent); + PVR_ASSERT(eRc == PVRSRV_OK); + + /* While the driver is in a good state and is not being unloaded + * try to free any deferred items when signalled + */ + while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) + { + IMG_HANDLE hEvent; + + if (psPVRSRVData->bUnload) + { + if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) || + uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY) + { + break; + } + uiUnloadRetry++; + } + + /* Wait until signalled for deferred clean up OR wait for a + * short period if the previous deferred clean up was not able + * to release all the resources before trying again. + * Bridge lock re-acquired on our behalf before the wait call returns. + */ + + if (bRetryWorkList && bUseGlobalEO) + { + hEvent = hGlobalEvent; + } + else + { + hEvent = hOSEvent; + } + + eRc = OSEventObjectWaitKernel(hEvent, + bRetryWorkList ? + CLEANUP_THREAD_WAIT_RETRY_TIMEOUT : + CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT); + if (eRc == PVRSRV_ERROR_TIMEOUT) + { + PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout")); + } + else if (eRc == PVRSRV_OK) + { + PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received")); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "CleanupThread: wait error %d", eRc)); + } + + bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO); + } + + OSSpinLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock); + + eRc = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); + + eRc = OSEventObjectClose(hGlobalEvent); + PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); + + PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... ")); +} + +static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, + va_list va) +{ +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; +#endif + PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus; + PVRSRV_ERROR eError; + PVRSRV_DEVICE_DEBUG_DUMP_STATUS eDebugDumpState; + IMG_BOOL bCheckAfterTimePassed; + + pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *); + bCheckAfterTimePassed = va_arg(va, IMG_BOOL); + + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + return; + } + + if (psDeviceNode->pfnUpdateHealthStatus != NULL) + { + eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, bCheckAfterTimePassed); + PVR_WARN_IF_ERROR(eError, "pfnUpdateHealthStatus"); + } + eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); + + if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK) + { + if (eHealthStatus != *pePreviousHealthStatus) + { +#if defined(SUPPORT_RGX) + if (!(psDevInfo->ui32DeviceFlags & + RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)) +#else + /* In this case we don't have an RGX device */ + if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED) +#endif + { + PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " + "Device status not OK!!!")); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, + NULL, NULL); + } + } + } + + *pePreviousHealthStatus = eHealthStatus; + + /* Have we received request from FW to capture debug dump(could be due to HWR) */ + eDebugDumpState = (PVRSRV_DEVICE_DEBUG_DUMP_STATUS)OSAtomicCompareExchange( + &psDeviceNode->eDebugDumpRequested, + PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE, + PVRSRV_DEVICE_DEBUG_DUMP_NONE); + if (PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE == eDebugDumpState) + { + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + +} + +#if defined(SUPPORT_RGX) +static void HWPerfPeriodicHostEventsThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; + IMG_BOOL bHostStreamIsOpenForReading; + PVRSRV_RGXDEV_INFO *psDevInfo; + + eError = OSEventObjectOpen(psPVRSRVData->hHWPerfHostPeriodicEvObj, &hOSEvent); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && + !psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) +#else + while (!psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) +#endif + { + eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)psPVRSRVData->ui32HWPerfHostThreadTimeout * 1000); + if (eError == PVRSRV_OK && (psPVRSRVData->bUnload || psPVRSRVData->bHWPerfHostThreadStop)) + { + PVR_DPF((PVR_DBG_MESSAGE, "HWPerfPeriodicHostEventsThread: Shutdown event received.")); + break; + } + + psDevInfo = (PVRSRV_RGXDEV_INFO*)psPVRSRVData->psDeviceNodeList->pvDevice; + + /* Check if the HWPerf host stream is open for reading before writing a packet, + this covers cases where the event filter is not zeroed before a reader disconnects. */ + bHostStreamIsOpenForReading = TLStreamIsOpenForReading(psDevInfo->hHWPerfHostStream); + + if (bHostStreamIsOpenForReading) + { +#if defined(SUPPORT_RGX) + RGXSRV_HWPERF_HOST_INFO(psPVRSRVData->psDeviceNodeList->pvDevice, RGX_HWPERF_INFO_EV_MEM_USAGE); +#endif + } + else + { +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + psPVRSRVData->ui32HWPerfHostThreadTimeout = INFINITE_SLEEP_TIMEOUT; +#else + /* This 'long' timeout is temporary until functionality is added to services to put a thread to sleep indefinitely. */ + psPVRSRVData->ui32HWPerfHostThreadTimeout = 60 * 60 * 8 * 1000; // 8 hours +#endif + } + } + + eError = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); +} +#endif + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + +typedef enum +{ + DWT_ST_INIT, + DWT_ST_SLEEP_POWERON, + DWT_ST_SLEEP_POWEROFF, + DWT_ST_SLEEP_DEFERRED, + DWT_ST_FINAL +} DWT_STATE; + +typedef enum +{ + DWT_SIG_POWERON, + DWT_SIG_POWEROFF, + DWT_SIG_TIMEOUT, + DWT_SIG_UNLOAD, + DWT_SIG_ERROR +} DWT_SIGNAL; + +static inline IMG_BOOL _DwtIsPowerOn(PVRSRV_DATA *psPVRSRVData) +{ + return List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, + PVRSRVIsDevicePowered); +} + +static inline void _DwtCheckHealthStatus(PVRSRV_DATA *psPVRSRVData, + PVRSRV_DEVICE_HEALTH_STATUS *peStatus, + IMG_BOOL bTimeOut) +{ + List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, + DevicesWatchdogThread_ForEachVaCb, + peStatus, + bTimeOut); +} + +static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent, + IMG_UINT32 ui32Timeout) +{ + PVRSRV_ERROR eError; + + eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000); + +#ifdef PVR_TESTING_UTILS + psPVRSRVData->ui32DevicesWdWakeupCounter++; +#endif + + if (eError == PVRSRV_OK) + { + if (psPVRSRVData->bUnload) + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event" + " received.")); + return DWT_SIG_UNLOAD; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state " + "change event received.")); + + if (_DwtIsPowerOn(psPVRSRVData)) + { + return DWT_SIG_POWERON; + } + else + { + return DWT_SIG_POWEROFF; + } + } + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + return DWT_SIG_TIMEOUT; + } + + PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Error (%d) when" + " waiting for event!", eError)); + return DWT_SIG_ERROR; +} + +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + +static void DevicesWatchdogThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + DWT_STATE eState = DWT_ST_INIT; + const IMG_UINT32 ui32OnTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + const IMG_UINT32 ui32OffTimeout = INFINITE_SLEEP_TIMEOUT; +#else + IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + /* Flag used to defer the sleep timeout change by 1 loop iteration. + * This helps to ensure at least two health checks are performed before a long sleep. + */ + IMG_BOOL bDoDeferredTimeoutChange = IMG_FALSE; +#endif + + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.", + DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)); + + /* Open an event on the devices watchdog event object so we can listen on it + and abort the devices watchdog thread. */ + eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); + + /* Loop continuously checking the device status every few seconds. */ +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && + !psPVRSRVData->bUnload) +#else + while (!psPVRSRVData->bUnload) +#endif + { +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + + switch (eState) + { + case DWT_ST_INIT: + { + if (_DwtIsPowerOn(psPVRSRVData)) + { + eState = DWT_ST_SLEEP_POWERON; + } + else + { + eState = DWT_ST_SLEEP_POWEROFF; + } + + break; + } + case DWT_ST_SLEEP_POWERON: + { + DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, + ui32OnTimeout); + + switch (eSignal) { + case DWT_SIG_POWERON: + /* self-transition, nothing to do */ + break; + case DWT_SIG_POWEROFF: + eState = DWT_ST_SLEEP_DEFERRED; + break; + case DWT_SIG_TIMEOUT: + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_TRUE); + /* self-transition */ + break; + case DWT_SIG_UNLOAD: + eState = DWT_ST_FINAL; + break; + case DWT_SIG_ERROR: + /* deliberately ignored */ + break; + } + + break; + } + case DWT_ST_SLEEP_POWEROFF: + { + DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, + ui32OffTimeout); + + switch (eSignal) { + case DWT_SIG_POWERON: + eState = DWT_ST_SLEEP_POWERON; + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_FALSE); + break; + case DWT_SIG_POWEROFF: + /* self-transition, nothing to do */ + break; + case DWT_SIG_TIMEOUT: + /* self-transition */ + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_TRUE); + break; + case DWT_SIG_UNLOAD: + eState = DWT_ST_FINAL; + break; + case DWT_SIG_ERROR: + /* deliberately ignored */ + break; + } + + break; + } + case DWT_ST_SLEEP_DEFERRED: + { + DWT_SIGNAL eSignal =_DwtWait(psPVRSRVData, hOSEvent, + ui32OnTimeout); + + switch (eSignal) { + case DWT_SIG_POWERON: + eState = DWT_ST_SLEEP_POWERON; + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_FALSE); + break; + case DWT_SIG_POWEROFF: + /* self-transition, nothing to do */ + break; + case DWT_SIG_TIMEOUT: + eState = DWT_ST_SLEEP_POWEROFF; + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_FALSE); + break; + case DWT_SIG_UNLOAD: + eState = DWT_ST_FINAL; + break; + case DWT_SIG_ERROR: + /* deliberately ignored */ + break; + } + + break; + } + case DWT_ST_FINAL: + /* the loop should terminate on next spin if this state is + * reached so nothing to do here. */ + break; + } + +#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + IMG_BOOL bPwrIsOn = IMG_FALSE; + IMG_BOOL bTimeOut = IMG_FALSE; + + /* Wait time between polls (done at the start of the loop to allow devices + to initialise) or for the event signal (shutdown or power on). */ + eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); + +#ifdef PVR_TESTING_UTILS + psPVRSRVData->ui32DevicesWdWakeupCounter++; +#endif + if (eError == PVRSRV_OK) + { + if (psPVRSRVData->bUnload) + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received.")); + break; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received.")); + } + } + else if (eError != PVRSRV_ERROR_TIMEOUT) + { + /* If timeout do nothing otherwise print warning message. */ + PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " + "Error (%d) when waiting for event!", eError)); + } + else + { + bTimeOut = IMG_TRUE; + } + + bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, + PVRSRVIsDevicePowered); + + if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans) + { + psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0; + ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + bDoDeferredTimeoutChange = IMG_FALSE; + } + else + { + /* First, check if the previous loop iteration signalled a need to change the timeout period */ + if (bDoDeferredTimeoutChange) + { + ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT; + bDoDeferredTimeoutChange = IMG_FALSE; + } + else + { + /* Signal that we need to change the sleep timeout in the next loop iteration + * to allow the device health check code a further iteration at the current + * sleep timeout in order to determine bad health (e.g. stalled cCCB) by + * comparing past and current state snapshots */ + bDoDeferredTimeoutChange = IMG_TRUE; + } + } + + List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, + DevicesWatchdogThread_ForEachVaCb, + &ePreviousHealthStatus, + bTimeOut); + +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + } + + eError = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); +} + +PVRSRV_DATA *PVRSRVGetPVRSRVData(void) +{ + return gpsPVRSRVData; +} + +static PVRSRV_ERROR _HostMemDeviceCreate(void) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = HostMemGetDeviceConfig(); + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + /* Assert ensures HostMemory device isn't already created and + * that data is initialised */ + PVR_ASSERT(psPVRSRVData->psHostMemDeviceNode == NULL); + + /* for now, we only know a single heap (UMA) config for host device */ + PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 && + psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA); + + /* N.B.- In case of any failures in this function, we just return error to + the caller, as clean-up is taken care by _HostMemDeviceDestroy function */ + + psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); + + /* early save return pointer to aid clean-up */ + psPVRSRVData->psHostMemDeviceNode = psDeviceNode; + + psDeviceNode->psDevConfig = psDevConfig; + psDeviceNode->papsRegisteredPhysHeaps = + OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) * + psDevConfig->ui32PhysHeapCount); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem"); + + eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[0], + &psDeviceNode->papsRegisteredPhysHeaps[0]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapRegister"); + psDeviceNode->ui32RegisteredPhysHeaps = 1; + + /* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */ + eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], + &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire"); + + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR; + + return PVRSRV_OK; +} + +static void _HostMemDeviceDestroy(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psHostMemDeviceNode; + + if (!psDeviceNode) + { + return; + } + + psPVRSRVData->psHostMemDeviceNode = NULL; + if (psDeviceNode->papsRegisteredPhysHeaps) + { + if (psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) + { + PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]); + } + + if (psDeviceNode->papsRegisteredPhysHeaps[0]) + { + /* clean-up function as well is aware of only one heap */ + PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1); + PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[0]); + } + + OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps); + } + OSFreeMem(psDeviceNode); +} + +static PVRSRV_ERROR InitialiseInfoPageTimeouts(PVRSRV_DATA *psPVRSRVData) +{ + if (NULL == psPVRSRVData) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_RETRIES] = WAIT_TRY_COUNT; + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_TIMEOUT_MS] = + ((MAX_HW_TIME_US / 10000) + 1000); + /* TIMEOUT_INFO_VALUE_TIMEOUT_MS resolves to... + vp : 2000 + 1000 + emu : 2000 + 1000 + rgx_nohw : 50 + 1000 + plato : 30000 + 1000 (VIRTUAL_PLATFORM or EMULATOR) + 50 + 1000 (otherwise) + */ + + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_RETRIES] = 5; + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_TIMEOUT_MS] = + ((MAX_HW_TIME_US / 10000) + 100); + /* TIMEOUT_INFO_CONDITION_TIMEOUT_MS resolves to... + vp : 2000 + 100 + emu : 2000 + 100 + rgx_nohw : 50 + 100 + plato : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR) + 50 + 100 (otherwise) + */ + + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_EVENT_OBJECT_RETRIES] = 5; + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_EVENT_OBJECT_TIMEOUT_MS] = + ((MAX_HW_TIME_US / 10000) + 100); + /* TIMEOUT_INFO_EVENT_OBJECT_TIMEOUT_MS resolves to... + vp : 2000 + 100 + emu : 2000 + 100 + rgx_nohw : 50 + 100 + plato : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR) + 50 + 100 (otherwise) + */ + + return PVRSRV_OK; +} + +static PVRSRV_ERROR PopulateInfoPageBridges(PVRSRV_DATA *psPVRSRVData) +{ + PVR_RETURN_IF_INVALID_PARAM(psPVRSRVData); + + psPVRSRVData->pui32InfoPage[BRIDGE_INFO_PVR_BRIDGES] = gui32PVRBridges; + +#if defined(SUPPORT_RGX) + psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = gui32RGXBridges; +#else + psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = 0; +#endif + + return PVRSRV_OK; +} + +PVRSRV_ERROR IMG_CALLCONV +PVRSRVCommonDriverInit(void) +{ + PVRSRV_ERROR eError; + + PVRSRV_DATA *psPVRSRVData = NULL; + + IMG_UINT32 ui32AppHintCleanupThreadPriority; + IMG_UINT32 ui32AppHintWatchdogThreadPriority; + IMG_BOOL bEnablePageFaultDebug; + IMG_BOOL bEnableFullSyncTracking; + + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; + + /* + * As this function performs one time driver initialisation, use the + * Services global device-independent data to determine whether or not + * this function has already been called. + */ + if (gpsPVRSRVData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__)); + return PVRSRV_ERROR_ALREADY_EXISTS; + } + + eError = DIInitImplementations(); + PVR_GOTO_IF_ERROR(eError, Error); + +#ifdef PVRSRV_ENABLE_PROCESS_STATS + eError = PVRSRVStatsInitialiseDI(); + PVR_GOTO_IF_ERROR(eError, Error); +#endif /* PVRSRV_ENABLE_PROCESS_STATS */ + + eError = HTB_CreateDIEntry(); + PVR_GOTO_IF_ERROR(eError, Error); + +#ifdef LINUX + { + int error = pvr_apphint_init(); + if (error != 0) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed AppHint setup(%d)", __func__, + error)); + } + } +#endif /* LINUX */ + + /* + * Initialise the server bridges + */ + eError = ServerBridgeInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = PhysHeapInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = DevmemIntInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* + * Allocate the device-independent data + */ + psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData)); + PVR_GOTO_IF_NOMEM(psPVRSRVData, eError, Error); + + /* Now it is set up, point gpsPVRSRVData to the actual data */ + gpsPVRSRVData = psPVRSRVData; + + eError = BridgeDispatcherInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Init any OS specific's */ + eError = OSInitEnvData(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Early init. server cache maintenance */ + eError = CacheOpInit(); + PVR_GOTO_IF_ERROR(eError, Error); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RIInitKM(); +#endif + + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, EnablePageFaultDebug, + &ui32AppHintDefault, &bEnablePageFaultDebug); + OSFreeKMAppHintState(pvAppHintState); + + if (bEnablePageFaultDebug) + { + eError = DevicememHistoryInitKM(); + PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryInitKM", Error); + } + + eError = PMRInit(); + PVR_GOTO_IF_ERROR(eError, Error); + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = DCInit(); + PVR_GOTO_IF_ERROR(eError, Error); +#endif + + /* Initialise overall system state */ + gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK; + + /* Create an event object */ + eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject); + PVR_GOTO_IF_ERROR(eError, Error); + gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0; + + eError = PVRSRVCmdCompleteInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = PVRSRVHandleInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY; + OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadPriority, + &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority); + + ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY; + OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadPriority, + &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority); + + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING; + OSGetKMAppHintBOOL(pvAppHintState, EnableFullSyncTracking, + &ui32AppHintDefault, &bEnableFullSyncTracking); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + eError = _CleanupThreadPrepare(gpsPVRSRVData); + PVR_LOG_GOTO_IF_ERROR(eError, "_CleanupThreadPrepare", Error); + + /* Create a thread which is used to do the deferred cleanup */ + eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread, + "pvr_defer_free", + CleanupThread, + CleanupThreadDumpInfo, + IMG_TRUE, + gpsPVRSRVData, + ui32AppHintCleanupThreadPriority); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:1", Error); + + /* Create the devices watchdog event object */ + eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error); + + /* Create a thread which is used to detect fatal errors */ + eError = OSThreadCreatePriority(&gpsPVRSRVData->hDevicesWatchdogThread, + "pvr_device_wdg", + DevicesWatchdogThread, + NULL, + IMG_TRUE, + gpsPVRSRVData, + ui32AppHintWatchdogThreadPriority); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:2", Error); + + gpsPVRSRVData->psProcessHandleBase_Table = HASH_Create(PVRSRV_PROC_HANDLE_BASE_INIT); + + if (gpsPVRSRVData->psProcessHandleBase_Table == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("psProcessHandleBase_Table", eError, PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE, Error); + } + + eError = OSLockCreate(&gpsPVRSRVData->hProcessHandleBase_Lock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", Error); + +#if defined(SUPPORT_RGX) + eError = OSLockCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:2", Error); +#endif + + eError = _HostMemDeviceCreate(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Initialise the Transport Layer */ + eError = TLInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Initialise pdump */ + eError = PDUMPINIT(); + PVR_GOTO_IF_ERROR(eError, Error); + + g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT; + + /* Initialise TL control stream */ + eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream, + psPVRSRVData->psHostMemDeviceNode, + PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE, + TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL, + NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to create TL control plane stream" + " (%d).", eError)); + psPVRSRVData->hTLCtrlStream = NULL; + } + + eError = InfoPageCreate(psPVRSRVData); + PVR_LOG_GOTO_IF_ERROR(eError, "InfoPageCreate", Error); + + + /* Initialise the Timeout Info */ + eError = InitialiseInfoPageTimeouts(psPVRSRVData); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = PopulateInfoPageBridges(psPVRSRVData); + + PVR_GOTO_IF_ERROR(eError, Error); + + if (bEnableFullSyncTracking) + { + psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED; + } + if (bEnablePageFaultDebug) + { + psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; + } + + /* Initialise the Host Trace Buffer */ + eError = HTBInit(); + PVR_GOTO_IF_ERROR(eError, Error); + +#if defined(SUPPORT_RGX) + RGXHWPerfClientInitAppHintCallbacks(); +#endif + + /* Late init. client cache maintenance via info. page */ + eError = CacheOpInit2(); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpInit2", Error); + + return 0; + +Error: + PVRSRVCommonDriverDeInit(); + return eError; +} + +void IMG_CALLCONV +PVRSRVCommonDriverDeInit(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bEnablePageFaultDebug; + + if (gpsPVRSRVData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data", + __func__)); + return; + } + + bEnablePageFaultDebug = GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; + + gpsPVRSRVData->bUnload = IMG_TRUE; + + if (gpsPVRSRVData->hProcessHandleBase_Lock) + { + OSLockDestroy(gpsPVRSRVData->hProcessHandleBase_Lock); + gpsPVRSRVData->hProcessHandleBase_Lock = NULL; + } + +#if defined(SUPPORT_RGX) + PVRSRVDestroyHWPerfHostThread(); + if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) + { + OSLockDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock = NULL; + } +#endif + + if (gpsPVRSRVData->psProcessHandleBase_Table) + { + HASH_Delete(gpsPVRSRVData->psProcessHandleBase_Table); + gpsPVRSRVData->psProcessHandleBase_Table = NULL; + } + + if (gpsPVRSRVData->hGlobalEventObject) + { + OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject); + } + + /* Stop and cleanup the devices watchdog thread */ + if (gpsPVRSRVData->hDevicesWatchdogThread) + { + if (gpsPVRSRVData->hDevicesWatchdogEvObj) + { + eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread); + if (PVRSRV_OK == eError) + { + gpsPVRSRVData->hDevicesWatchdogThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + } + + if (gpsPVRSRVData->hDevicesWatchdogEvObj) + { + eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj); + gpsPVRSRVData->hDevicesWatchdogEvObj = NULL; + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } + + /* Stop and cleanup the deferred clean up thread, event object and + * deferred context list. + */ + if (gpsPVRSRVData->hCleanupThread) + { + if (gpsPVRSRVData->hCleanupEventObject) + { + eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread); + if (PVRSRV_OK == eError) + { + gpsPVRSRVData->hCleanupThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + } + + if (gpsPVRSRVData->hCleanupEventObject) + { + eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject); + gpsPVRSRVData->hCleanupEventObject = NULL; + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } + + /* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */ + /* HTB De-init happens in device de-registration currently */ + eError = HTBDeInit(); + PVR_LOG_IF_ERROR(eError, "HTBDeInit"); + + /* Tear down CacheOp framework information page first */ + CacheOpDeInit2(); + + /* Clean up information page */ + InfoPageDestroy(gpsPVRSRVData); + + /* Close the TL control plane stream. */ + TLStreamClose(gpsPVRSRVData->hTLCtrlStream); + + /* deinitialise pdump */ + if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0) + { + PDUMPDEINIT(); + } + + /* Clean up Transport Layer resources that remain */ + TLDeInit(); + + _HostMemDeviceDestroy(); + + eError = PVRSRVHandleDeInit(); + PVR_LOG_IF_ERROR(eError, "PVRSRVHandleDeInit"); + + /* destroy event object */ + if (gpsPVRSRVData->hGlobalEventObject) + { + OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject); + gpsPVRSRVData->hGlobalEventObject = NULL; + } + + PVRSRVCmdCompleteDeinit(); + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = DCDeInit(); + PVR_LOG_IF_ERROR(eError, "DCDeInit"); +#endif + + eError = PMRDeInit(); + PVR_LOG_IF_ERROR(eError, "PMRDeInit"); + + BridgeDispatcherDeinit(); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RIDeInitKM(); +#endif + + if (bEnablePageFaultDebug) + { + DevicememHistoryDeInitKM(); + } + + CacheOpDeInit(); + + OSDeInitEnvData(); + + (void) DevmemIntDeInit(); + + eError = ServerBridgeDeInit(); + PVR_LOG_IF_ERROR(eError, "ServerBridgeDeinit"); + + eError = PhysHeapDeinit(); + PVR_LOG_IF_ERROR(eError, "PhysHeapDeinit"); + +#ifdef LINUX + pvr_apphint_deinit(); +#endif + + HTB_DestroyDIEntry(); + +#ifdef PVRSRV_ENABLE_PROCESS_STATS + PVRSRVStatsDestroyDI(); +#endif /* PVRSRV_ENABLE_PROCESS_STATS */ + + DIDeInit(); + + OSFreeMem(gpsPVRSRVData); + gpsPVRSRVData = NULL; +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +static PVRSRV_ERROR CreateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_UINT uiCounter=0; + PVRSRV_ERROR eError; + PHYS_HEAP *psLMAHeap; + IMG_DEV_PHYADDR sDevPAddr; /* Heap Physical address starting point */ + RA_BASE_T uBase; /* LMA-0 sub-arena starting point */ + + for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++) + { + psDeviceNode->psOSidSubArena[uiCounter] = + RA_Create(psDeviceNode->apszRANames[0], + OSGetPageShift(), /* Use host page size, keeps things simple */ + RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */ + NULL, /* No Import */ + NULL, /* No free import */ + NULL, /* No import handle */ + IMG_FALSE); + PVR_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[uiCounter]); + } + + /* + * Arena creation takes place earlier than when the client side reads the apphints and transfers them over the bridge. Since we don't + * know how the memory is going to be partitioned and since we already need some memory for all the initial allocations that take place, + * we populate the first sub-arena (0) with a span of 64 megabytes. This has been shown to be enough even for cases where EWS is allocated + * memory in this sub arena and then a multi app example is executed. This pre-allocation also means that consistency must be maintained + * between apphints and reality. That's why in the Apphints, the OSid0 region must start from 0 and end at 3FFFFFF. + * We have to take account of where the GPU Physical Heap is based. We must start the LMA-0 sub-arena at the same point to avoid confusion. + */ + + psLMAHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]; + eError = PhysHeapRegionGetDevPAddr(psLMAHeap, 0, &sDevPAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PhysHeapRegionGetDevPAddr FAILED: %s", + __func__, PVRSRVGetErrorString(eError))); + uBase = 0; + } + else + { + uBase = sDevPAddr.uiAddr; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Calling RA_Add with base 0x%" + IMG_UINT64_FMTSPECx " and size %u", uBase, GPUVIRT_SIZEOF_ARENA0)); + + if (!RA_Add(psDeviceNode->psOSidSubArena[0], uBase, GPUVIRT_SIZEOF_ARENA0, 0 , NULL)) + { + RA_Delete(psDeviceNode->psOSidSubArena[0]); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + * Release the about-to-be-overwritten arena reference from + * apsLocalDevMemArenas[0] + */ + RA_Delete(psDeviceNode->apsLocalDevMemArenas[0]); + + psDeviceNode->apsLocalDevMemArenas[0] = psDeviceNode->psOSidSubArena[0]; + + return PVRSRV_OK; +} + +void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]) +{ + IMG_UINT uiCounter; + + /* + * We need to ensure that the LMA sub-arenas correctly reflect the GPU + * physical addresses presented by the card. I.e., if we have the arena + * 0-based but the GPU is 0x80000000 based we need to adjust the ranges + * which we are adding into the sub-arenas to correctly reflect the + * hardware. Otherwise we will fail when trying to ioremap() these + * addresses when accessing the GPU. + */ + PHYS_HEAP *psLMAHeap; + IMG_DEV_PHYADDR sDevPAddr; + PVRSRV_ERROR eError; + RA_BASE_T uBase; + RA_LENGTH_T uSize; + RA_BASE_T uMaxPhysAddr; + + psLMAHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]; + + eError = PhysHeapRegionGetDevPAddr(psLMAHeap, 0, &sDevPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr", error); + + eError = PhysHeapRegionGetSize(psLMAHeap, 0, &uSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetSize", error); + + uMaxPhysAddr = sDevPAddr.uiAddr + uSize - 1; + + /* Since Sub Arena[0] has been populated already, now we populate the rest starting from 1*/ + + psDeviceNode->ui32NumOSId = GPUVIRT_VALIDATION_NUM_OS; + + for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++) + { + uSize = aui32OSidMax[0][uiCounter] - aui32OSidMin[0][uiCounter] + 1; + uBase = aui32OSidMin[0][uiCounter] + sDevPAddr.uiAddr; + + if (uBase + uSize > uMaxPhysAddr) + { + PVR_DPF((PVR_DBG_WARNING, + "(GPU Virtualization Validation): Region %d [0x%" + IMG_UINT64_FMTSPECx "..0x%" IMG_UINT64_FMTSPECx + "] exceeds max physical memory 0x%" IMG_UINT64_FMTSPECx, + uiCounter, uBase, uBase+uSize-1, uMaxPhysAddr)); + psDeviceNode->ui32NumOSId = uiCounter; + break; /* Update pfnSysDevVirtInit */ + } + + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Calling RA_Add with base 0x%" + IMG_UINT64_FMTSPECx " and size 0x%" IMG_UINT64_FMTSPECx, + uBase, uSize)); + + if (!RA_Add(psDeviceNode->psOSidSubArena[uiCounter], uBase, uSize, 0, NULL)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RA_Add(%p, 0x%" IMG_UINT64_FMTSPECx ", 0x%" + IMG_UINT64_FMTSPECx ") *FAILED*", __func__, + psDeviceNode->psOSidSubArena[uiCounter], uBase, uSize)); + + goto error; + } + } + + if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL) + { + psDeviceNode->psDevConfig->pfnSysDevVirtInit(aui32OSidMin, aui32OSidMax); + } + + return; + +error: + for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++) + { + RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]); + } + + return; +} + +/* + * Counter-part to CreateLMASubArenas. + */ +static void DestroyLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_UINT32 uiCounter = 0; + + /* + * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must + * not free it here as it gets cleared later. + */ + for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++) + { + if (psDeviceNode->psOSidSubArena[uiCounter] == NULL) + { + continue; + } + RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]); + } +} + +#endif + +static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + /* Only dump info once */ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hDebugRequestHandle; + + PVR_DUMPDEBUG_LOG("------[ System Summary ]------"); + + switch (psDeviceNode->eCurrentSysPowerState) + { + case PVRSRV_SYS_POWER_STATE_OFF: + PVR_DUMPDEBUG_LOG("Device System Power State: OFF"); + break; + case PVRSRV_SYS_POWER_STATE_ON: + PVR_DUMPDEBUG_LOG("Device System Power State: ON"); + break; + default: + PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)", + psDeviceNode->eCurrentSysPowerState); + break; + } + + PVR_DUMPDEBUG_LOG("MaxHWTOut: %dus, WtTryCt: %d, WDGTOut(on,off): (%dms,%dms)", + MAX_HW_TIME_US, WAIT_TRY_COUNT, DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT, DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT); + + SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile); +} + +static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(hDbgReqestHandle); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + PVR_DUMPDEBUG_LOG("------[ Server Thread Summary ]------"); + OSThreadDumpInfo(pfnDumpDebugPrintf, pvDumpDebugFile); + } +} + +PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + IMG_UINT32 ui32RegionId = 0; + PVRSRV_ERROR eError; + IMG_UINT32 i; + PHYS_HEAP *psPhysHeap; + PHYS_HEAP_TYPE eHeapType; + IMG_UINT64 uPhysheapSize; + IMG_UINT32 ui32RegionCount; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + + /* Register the physical memory heaps */ + psDeviceNode->papsRegisteredPhysHeaps = + OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) * + psDevConfig->ui32PhysHeapCount); + PVR_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps); + + for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) + { + /* No real device should register a heap with ID same as host device's heap ID */ + PVR_ASSERT(psDevConfig->pasPhysHeaps[i].ui32PhysHeapID != PHYS_HEAP_ID_HOSTMEM); + + eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[i], + &psDeviceNode->papsRegisteredPhysHeaps[i]); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to register physical heap %d (%s)", + __func__, psDevConfig->pasPhysHeaps[i].ui32PhysHeapID, + PVRSRVGetErrorString(eError))); + goto ErrorDeinit; + } + + psDeviceNode->ui32RegisteredPhysHeaps++; + } + + /* + * The physical backing storage for the following physical heaps + * [CPU,GPU,FW] may or may not come from the same underlying source + */ + eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], + &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:GPU", ErrorDeinit); + + eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], + &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:CPU", ErrorDeinit); + + eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL], + &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW", ErrorDeinit); + + eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL], + &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:EXTERNAL", ErrorDeinit); + + eHeapType = PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]); + + if (eHeapType == PHYS_HEAP_TYPE_UMA) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses OS System memory (UMA)", __func__)); + + psDeviceNode->sDevMMUPxSetup.pfnDevPxAlloc = OSPhyContigPagesAlloc; + psDeviceNode->sDevMMUPxSetup.pfnDevPxFree = OSPhyContigPagesFree; + psDeviceNode->sDevMMUPxSetup.pfnDevPxMap = OSPhyContigPagesMap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxUnMap = OSPhyContigPagesUnmap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxClean = OSPhyContigPagesClean; + psDeviceNode->sDevMMUPxSetup.psPxRA = NULL; + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewOSRamBackedPMR; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PVR_DPF((PVR_DBG_ERROR, "%s: Virtualisation Validation builds are currently only" + " supported on systems with local memory (LMA).", __func__)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + goto ErrorDeinit; +#endif + } + else + { + psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]; + ui32RegionCount = PhysHeapNumberOfRegions(psPhysHeap); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses local memory managed by the driver (LMA)", __func__)); + if (ui32RegionCount == 0) + { + PVR_LOG_GOTO_WITH_ERROR("ui32RegionCount", eError, PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP, ErrorDeinit); + } + + /* Allocate memory for RA pointers and name strings */ + psDeviceNode->apsLocalDevMemArenas = OSAllocZMem(sizeof(RA_ARENA*) * ui32RegionCount); + PVR_LOG_GOTO_IF_NOMEM(psDeviceNode->apsLocalDevMemArenas, eError, ErrorDeinit); + + psDeviceNode->ui32NumOfLocalMemArenas = ui32RegionCount; + psDeviceNode->apszRANames = OSAllocZMem(ui32RegionCount * sizeof(IMG_PCHAR)); + if (!psDeviceNode->apszRANames) + { + OSFreeMem(psDeviceNode->apsLocalDevMemArenas); + PVR_LOG_GOTO_WITH_ERROR("apszRANames", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrorDeinit); + } + + for (; ui32RegionId < ui32RegionCount; ui32RegionId++) + { + eError = PhysHeapRegionGetSize(psPhysHeap, ui32RegionId, &uPhysheapSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetSize", ErrorDeinit); + + eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, ui32RegionId, &sCpuPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetCpuPAddr", ErrorDeinit); + + eError = PhysHeapRegionGetDevPAddr(psPhysHeap, ui32RegionId, &sDevPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr", ErrorDeinit); + + psDeviceNode->apszRANames[ui32RegionId] = OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH); + PVR_LOG_GOTO_IF_NOMEM(psDeviceNode->apszRANames[ui32RegionId], eError, ErrorDeinit); + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->apsLocalDevMemArenas[ui32RegionId], + psDeviceNode->apszRANames[ui32RegionId], + sCpuPAddr.uiAddr, + sDevPAddr.uiAddr, + uPhysheapSize, + ui32RegionId, + "GPU LMA"); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateRegionRA(GPU)", ErrorDeinit); + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + eError = CreateLMASubArenas(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateLMASubArenas", ErrorDeinit); +#endif + + psDeviceNode->sDevMMUPxSetup.pfnDevPxAlloc = LMA_PhyContigPagesAlloc; + psDeviceNode->sDevMMUPxSetup.pfnDevPxFree = LMA_PhyContigPagesFree; + psDeviceNode->sDevMMUPxSetup.pfnDevPxMap = LMA_PhyContigPagesMap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxUnMap = LMA_PhyContigPagesUnmap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxClean = LMA_PhyContigPagesClean; + psDeviceNode->sDevMMUPxSetup.psPxRA = psDeviceNode->apsLocalDevMemArenas[0]; + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR; +#if defined(SUPPORT_GPUVIRT_VALIDATION) + psDeviceNode->sDevMMUPxSetup.pfnDevPxAllocGPV = LMA_PhyContigPagesAllocGPV; +#endif + } + + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* All firmware heap allocations the Host does on behalf of Guest drivers must be handled as LMA memory. + * Host receives the physical base address and size of the Guest heaps and allocates them using the LMA PMR factories. + * When the Host has its firmware heap configured as UMA, we need to create a separate FW_GUEST pseudo-heap with the + * same properties as the FW_LOCAL heap, but using the LMA factory function pointer for obtaining physical memory. */ + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = PhysmemNewLocalRamBackedPMR; + psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + } + + if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) == PHYS_HEAP_TYPE_LMA) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: CPU physical heap uses local memory managed by the driver (LMA)", __func__)); + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewLocalRamBackedPMR; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: CPU physical heap uses OS System memory (UMA)", __func__)); + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR; + } + + return PVRSRV_OK; + +ErrorDeinit: + PVR_ASSERT(IMG_FALSE); + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + + return eError; +} + +void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx; + IMG_UINT32 i; + IMG_UINT32 ui32RegionIdx; + + psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = NULL; + + if (psDeviceNode->psFwMMUReservedMemArena) + { + RA_Delete(psDeviceNode->psFwMMUReservedMemArena); + psDeviceNode->psFwMMUReservedMemArena = NULL; + } + + if (psDeviceNode->psKernelFwConfigMemArena) + { + RA_Delete(psDeviceNode->psKernelFwConfigMemArena); + psDeviceNode->psKernelFwConfigMemArena = NULL; + } + + if (psDeviceNode->psKernelFwMainMemArena) + { + RA_Delete(psDeviceNode->psKernelFwMainMemArena); + psDeviceNode->psKernelFwMainMemArena = NULL; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Remove local LMA subarenas */ + DestroyLMASubArenas(psDeviceNode); +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + + /* Remove RAs and RA names for local card memory */ + for (ui32RegionIdx = 0; + ui32RegionIdx < psDeviceNode->ui32NumOfLocalMemArenas; + ui32RegionIdx++) + { + if (psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx]) + { + RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx]); + psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx] = NULL; + } + + if (psDeviceNode->apszRANames[ui32RegionIdx]) + { + OSFreeMem(psDeviceNode->apszRANames[ui32RegionIdx]); + psDeviceNode->apszRANames[ui32RegionIdx] = NULL; + } + } + + if (psDeviceNode->apsLocalDevMemArenas) + { + OSFreeMem(psDeviceNode->apsLocalDevMemArenas); + psDeviceNode->apsLocalDevMemArenas = NULL; + psDeviceNode->sDevMMUPxSetup.psPxRA = NULL; + } + + if (psDeviceNode->apszRANames) + { + OSFreeMem(psDeviceNode->apszRANames); + psDeviceNode->apszRANames = NULL; + } + + /* Release heaps */ + for (ePhysHeapIdx = 0; + ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap); + ePhysHeapIdx++) + { + if (psDeviceNode->apsPhysHeap[ePhysHeapIdx]) + { + PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]); + } + } + + /* Unregister heaps */ + for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++) + { + PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]); + } + + OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps); +} + +PVRSRV_ERROR IMG_CALLCONV PVRSRVCommonDeviceCreate(void *pvOSDevice, + IMG_INT32 i32UMIdentifier, + PVRSRV_DEVICE_NODE **ppsDeviceNode) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError; + PVRSRV_DEVICE_CONFIG *psDevConfig; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32AppHintDefault; + IMG_UINT32 ui32AppHintDriverMode; +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + IMG_UINT32 ui32AppHintPhysMemTestPasses; +#endif + void *pvAppHintState = NULL; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + IMG_HANDLE hProcessStats; +#endif + + /* Read driver mode (i.e. native, host or guest) AppHint early as it is + required by SysDevInit */ + ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, DriverMode, + &ui32AppHintDefault, &ui32AppHintDriverMode); + psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode); + psPVRSRVData->bForceApphintDriverMode = PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode)); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "psDeviceNode"); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Allocate process statistics */ + eError = PVRSRVStatsRegisterProcess(&hProcessStats); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", ErrorFreeDeviceNode); +#endif + + psDeviceNode->sDevId.i32UMIdentifier = i32UMIdentifier; + + eError = SysDevInit(pvOSDevice, &psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); + + PVR_ASSERT(psDevConfig); + PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice); + PVR_ASSERT(!psDevConfig->psDevNode); + + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; + psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; + psDeviceNode->psDevConfig = psDevConfig; + psDevConfig->psDevNode = psDeviceNode; + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + if (PVRSRV_VZ_MODE_IS(NATIVE)) + { + /* Read AppHint - Configurable memory test pass count */ + ui32AppHintDefault = 0; + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, PhysMemTestPasses, + &ui32AppHintDefault, &ui32AppHintPhysMemTestPasses); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + if (ui32AppHintPhysMemTestPasses > 0) + { + eError = PhysMemTest(psDevConfig, ui32AppHintPhysMemTestPasses); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemTest", ErrorSysDevDeInit); + } + } +#endif + + /* Initialise the paravirtualised connection */ + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PvzConnectionInit(psDevConfig); + PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit); + } + + eError = PVRSRVRegisterDbgTable(psDeviceNode, + g_aui32DebugOrderTable, + ARRAY_SIZE(g_aui32DebugOrderTable)); + PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit); + + eError = PVRSRVPowerLockInit(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorUnregisterDbgTable); + + eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig); + PVR_GOTO_IF_ERROR(eError, ErrorPowerLockDeInit); + +#if defined(SUPPORT_RGX) + /* Requirements: + * registered GPU and FW local heaps */ + /* debug table */ + eError = RGXRegisterDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register device", __func__)); + eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED; + goto ErrorPhysMemHeapsDeinit; + } +#endif + + if (psDeviceNode->pfnPhysMemDeviceHeapsInit != NULL) + { + psDeviceNode->pfnPhysMemDeviceHeapsInit(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorPhysMemHeapsDeinit); + } + + psDeviceNode->sDevMMUPxSetup.uiMMUPxLog2AllocGran = OSGetPageShift(); + + eError = SyncServerInit(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorDeInitRgx); + + eError = SyncCheckpointInit(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointInit", ErrorSyncCheckpointInit); + +#if defined(SUPPORT_RGX) && defined(SUPPORT_DEDICATED_FW_MEMORY) && !defined(NO_HARDWARE) + eError = PhysmemInitFWDedicatedMem(psDeviceNode, psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemInitFWDedicatedMem", ErrorOnFWMemInit); +#endif + + /* + * This is registered before doing device specific initialisation to ensure + * generic device information is dumped first during a debug request. + */ + eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hDbgReqNotify, + psDeviceNode, + _SysDebugRequestNotify, + DEBUG_REQUEST_SYS, + psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify", ErrorRegDbgReqNotify); + + eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hThreadsDbgReqNotify, + psDeviceNode, + _ThreadsDebugRequestNotify, + DEBUG_REQUEST_SYS, + NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify(threads)", ErrorRegThreadsDbgReqNotify); + + eError = HTBDeviceCreate(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "HTBDeviceCreate", ErrorHTBDeviceCreate); + + psPVRSRVData->ui32RegisteredDevices++; + +#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) + eError = InitDVFS(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "InitDVFS", ErrorDecrementDeviceCount); +#endif + + OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0); + +#if defined(PVR_TESTING_UTILS) + TUtilsInit(psDeviceNode); +#endif + + OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock); + if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list", + __func__)); + goto ErrorDecrementDeviceCount; + } + + dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead); + + PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode)); + PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx", + (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr)); + PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ)); + +/* SUPPORT_ALT_REGBASE is defined for rogue cores only */ +#if defined(SUPPORT_RGX) && defined(SUPPORT_ALT_REGBASE) + { + IMG_DEV_PHYADDR sRegsGpuPBase; + + PhysHeapCpuPAddrToDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], + 1, + &sRegsGpuPBase, + &(psDeviceNode->psDevConfig->sRegsCpuPBase)); + + PVR_LOG(("%s: Using alternate Register bank GPU address: 0x%08lx (orig: 0x%08lx)", __func__, + (unsigned long)psDevConfig->sAltRegsGpuPBase.uiAddr, + (unsigned long)sRegsGpuPBase.uiAddr)); + } +#endif + + /* Finally insert the device into the dev-list and set it as active */ + List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList, + psDeviceNode); + + *ppsDeviceNode = psDeviceNode; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Close the process statistics */ + PVRSRVStatsDeregisterProcess(hProcessStats); +#endif + +#if defined(SUPPORT_VALIDATION) + OSLockCreateNoStats(&psDeviceNode->hValidationLock); +#endif + + return PVRSRV_OK; + +ErrorDecrementDeviceCount: + psPVRSRVData->ui32RegisteredDevices--; +#if defined(PVR_TESTING_UTILS) + TUtilsDeinit(psDeviceNode); +#endif + HTBDeviceDestroy(psDeviceNode); + +ErrorHTBDeviceCreate: + if (psDeviceNode->hThreadsDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify); + } + +ErrorRegThreadsDbgReqNotify: + if (psDeviceNode->hDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify); + } + +ErrorRegDbgReqNotify: +#if defined(SUPPORT_RGX) && defined(SUPPORT_DEDICATED_FW_MEMORY) && !defined(NO_HARDWARE) + PhysmemDeinitFWDedicatedMem(psDeviceNode); +ErrorOnFWMemInit: +#endif + + SyncCheckpointDeinit(psDeviceNode); + +ErrorSyncCheckpointInit: + SyncServerDeinit(psDeviceNode); + +ErrorDeInitRgx: +#if defined(SUPPORT_RGX) + DevDeInitRGX(psDeviceNode); +#endif +ErrorPhysMemHeapsDeinit: + PVRSRVPhysMemHeapsDeinit(psDeviceNode); +ErrorPowerLockDeInit: + PVRSRVPowerLockDeInit(psDeviceNode); +ErrorUnregisterDbgTable: + PVRSRVUnregisterDbgTable(psDeviceNode); +ErrorPvzConnectionDeInit: + psDevConfig->psDevNode = NULL; + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PvzConnectionDeInit(); + } +ErrorSysDevDeInit: + SysDevDeInit(psDevConfig); +ErrorDeregisterStats: +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Close the process statistics */ + PVRSRVStatsDeregisterProcess(hProcessStats); +ErrorFreeDeviceNode: +#endif + OSFreeMemNoStats(psDeviceNode); + + return eError; +} + +#if defined(SUPPORT_RGX) +static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL bValue) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, + ui32Flag, bValue); + + return eResult; +} + +static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL *pbValue) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + IMG_UINT32 ui32State; + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, + &ui32State); + + if (PVRSRV_OK == eResult) + { + *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; + } + + return eResult; +} +static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL bValue) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + /* EnableHWR is a special case + * only possible to disable after FW is running + */ + if (bValue && RGXFWIF_INICFG_HWR_EN == ui32Flag) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, + ui32Flag, NULL, bValue); + + return eResult; +} + +static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL *pbValue) +{ + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + IMG_UINT32 ui32State; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice; + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + ui32State = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; + + if (pbValue) + { + *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; + } + + return PVRSRV_OK; +} +#endif + +PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_BOOL bInitSuccesful = IMG_FALSE; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + IMG_HANDLE hProcessStats; +#endif + PVRSRV_ERROR eError; + + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + /* Initialise Connection_Data access mechanism */ + dllist_init(&psDeviceNode->sConnections); + eError = OSLockCreate(&psDeviceNode->hConnectionsLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + /* Allocate process statistics */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + eError = PVRSRVStatsRegisterProcess(&hProcessStats); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVStatsRegisterProcess"); +#endif + +#if defined(SUPPORT_RGX) + eError = RGXInit(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInit", Exit); +#endif + + bInitSuccesful = IMG_TRUE; + +#if defined(SUPPORT_RGX) +Exit: +#endif + eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful); + PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceFinalise"); + +#if defined(SUPPORT_RGX) + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableHWR, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_HWR_EN)); + } + + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging, + _ReadDeviceFlag, _SetDeviceFlag, + psDeviceNode, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist, + _ReadDeviceFlag, _SetDeviceFlag, + psDeviceNode, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST)); +#if defined(SUPPORT_VALIDATION) + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_GPUUnitsPowerChange, + _ReadDeviceFlag, _SetDeviceFlag, + psDeviceNode, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN)); +#endif + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic, + RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable, + psDeviceNode, + NULL); +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Close the process statistics */ + PVRSRVStatsDeregisterProcess(hProcessStats); +#endif + + return eError; +} + +PVRSRV_ERROR IMG_CALLCONV PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + IMG_BOOL bForceUnload = IMG_FALSE; + + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bForceUnload = IMG_TRUE; + } +#endif + + psPVRSRVData->ui32RegisteredDevices--; + + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT; + + if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL) + { + OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); + } + +#if defined(SUPPORT_VALIDATION) + OSLockDestroyNoStats(psDeviceNode->hValidationLock); + psDeviceNode->hValidationLock = NULL; +#endif + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + SyncFbDeregisterDevice(psDeviceNode); +#endif + /* Counter part to what gets done in PVRSRVDeviceFinalise */ + if (psDeviceNode->hSyncCheckpointContext) + { + SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); + psDeviceNode->hSyncCheckpointContext = NULL; + } + if (psDeviceNode->hSyncPrimContext) + { + if (psDeviceNode->psMMUCacheSyncPrim) + { + PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim; + + /* Ensure there are no pending MMU Cache Ops in progress before freeing this sync. */ + eError = PVRSRVPollForValueKM(psDeviceNode, + psSync->pui32LinAddr, + psDeviceNode->ui32NextMMUInvalidateUpdate-1, + 0xFFFFFFFF, + IMG_TRUE); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPollForValueKM"); + + /* Important to set the device node pointer to NULL + * before we free the sync-prim to make sure we don't + * defer the freeing of the sync-prim's page tables itself. + * The sync is used to defer the MMU page table + * freeing. */ + psDeviceNode->psMMUCacheSyncPrim = NULL; + + /* Free general purpose sync primitive */ + SyncPrimFree(psSync); + } + + SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext); + psDeviceNode->hSyncPrimContext = NULL; + } + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + /* + * Firmware probably not responding if bForceUnload is set, but we still want to unload the + * driver. + */ + if (!bForceUnload) +#endif + { + /* Force idle device */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)", + __func__, PVRSRVGetErrorString(eError))); + if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + PVRSRVPowerUnlock(psDeviceNode); + } + return eError; + } + } + + /* Power down the device if necessary */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + IMG_TRUE); + PVRSRVPowerUnlock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed PVRSRVSetDevicePowerStateKM call (%s). Dump debug.", + __func__, PVRSRVGetErrorString(eError))); + + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + + /* + * If the driver is okay then return the error, otherwise we can ignore + * this error. + */ + if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) + { + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Will continue to unregister as driver status is not OK", + __func__)); + } + } + +#if defined(PVR_TESTING_UTILS) + TUtilsDeinit(psDeviceNode); +#endif + +#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) + DeinitDVFS(psDeviceNode); +#endif + + HTBDeviceDestroy(psDeviceNode); + + if (psDeviceNode->hThreadsDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify); + } + + if (psDeviceNode->hDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify); + } + +#if defined(SUPPORT_RGX) && defined(SUPPORT_DEDICATED_FW_MEMORY) && !defined(NO_HARDWARE) + PhysmemDeinitFWDedicatedMem(psDeviceNode); +#endif + + SyncCheckpointDeinit(psDeviceNode); + + SyncServerDeinit(psDeviceNode); + +#if defined(SUPPORT_RGX) + DevDeInitRGX(psDeviceNode); +#endif + + List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); + + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + PVRSRVPowerLockDeInit(psDeviceNode); + + PVRSRVUnregisterDbgTable(psDeviceNode); + + /* Release the Connection-Data lock as late as possible. */ + if (psDeviceNode->hConnectionsLock) + { + eError = OSLockDestroy(psDeviceNode->hConnectionsLock); + PVR_LOG_IF_ERROR(eError, "ConnectionLock destruction failed"); + } + + psDeviceNode->psDevConfig->psDevNode = NULL; + + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PvzConnectionDeInit(); + } + SysDevDeInit(psDeviceNode->psDevConfig); + + OSFreeMemNoStats(psDeviceNode); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _LMA_DoPhyContigPagesAlloc(RA_ARENA *pArena, + size_t uiSize, + PG_HANDLE *psMemHandle, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ + RA_BASE_T uiCardAddr = 0; + RA_LENGTH_T uiActualSize; + PVRSRV_ERROR eError; +#if defined(DEBUG) + static IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */ +#endif /* defined(DEBUG) */ + + IMG_UINT32 ui32Log2NumPages = 0; + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + + eError = RA_Alloc(pArena, + uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + uiSize, + "LMA_PhyContigPagesAlloc", + &uiCardAddr, + &uiActualSize, + NULL); /* No private handle */ + + PVR_ASSERT(uiSize == uiActualSize); + + psMemHandle->u.ui64Handle = uiCardAddr; + psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr; + + if (PVRSRV_OK == eError) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + uiSize, + uiCardAddr, + uiPid); +#else + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = psDevPAddr->uiAddr; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + NULL, + sCpuPAddr, + uiSize, + NULL, + uiPid, + DEBUG_MEMSTATS_VALUES); +#endif +#endif +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PVR_DPF((PVR_DBG_MESSAGE, + "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" IMG_UINT64_FMTSPECX ", Arena ID %u", + __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid)); +#endif + +#if defined(DEBUG) + PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages)); + if (ui32Log2NumPages > ui32MaxLog2NumPages) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__, + ui32MaxLog2NumPages, ui32Log2NumPages )); + ui32MaxLog2NumPages = ui32Log2NumPages; + } +#endif /* defined(DEBUG) */ + psMemHandle->uiOrder = ui32Log2NumPages; + } + + return eError; +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +PVRSRV_ERROR LMA_PhyContigPagesAllocGPV(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32OSid, IMG_PID uiPid) +{ + RA_ARENA *pArena; + IMG_UINT32 ui32Log2NumPages = 0; + PVRSRV_ERROR eError; + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + + PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); + if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u defaulting to 0", + __func__, ui32OSid)); + ui32OSid = 0; + } + pArena = psDevNode->psOSidSubArena[ui32OSid]; + + if (psMemHandle->uiOSid != ui32OSid) + { + PVR_LOG(("%s: Unexpected OSid value %u - expecting %u", __func__, + psMemHandle->uiOSid, ui32OSid)); + } + + psMemHandle->uiOSid = ui32OSid; /* For Free() use */ + + eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, + psDevPAddr, uiPid); + PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); + + return eError; +} +#endif + +PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ + PVRSRV_ERROR eError; + + RA_ARENA *pArena = psDevNode->sDevMMUPxSetup.psPxRA; + IMG_UINT32 ui32Log2NumPages = 0; + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + + eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, + psDevPAddr, uiPid); + PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); + + return eError; +} + +void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle) +{ + RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle; + RA_ARENA *pArena; +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 ui32OSid = psMemHandle->uiOSid; + + /* + * The Arena ID is set by the originating allocation, and maintained via + * the call stacks into this function. We have a limited range of IDs + * and if the passed value falls outside this we simply treat it as a + * 'global' arena ID of 0. This is where all default OS-specific allocations + * are created. + */ + PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); + if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u PhysAddr 0x%" + IMG_UINT64_FMTSPECx " Reverting to Arena 0", __func__, + ui32OSid, uiCardAddr)); + /* + * No way of determining what we're trying to free so default to the + * global default arena index 0. + */ + ui32OSid = 0; + } + + pArena = psDevNode->psOSidSubArena[ui32OSid]; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: (GPU Virtualisation) Freeing 0x%" + IMG_UINT64_FMTSPECx ", Arena %u", __func__, + uiCardAddr, ui32OSid)); + +#else + pArena = psDevNode->sDevMMUPxSetup.psPxRA; +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + (IMG_UINT64)uiCardAddr); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + (IMG_UINT64)uiCardAddr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + RA_Free(pArena, uiCardAddr); + psMemHandle->uiOrder = 0; +} + +PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr) +{ + IMG_CPU_PHYADDR sCpuPAddr; + IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); + PVR_UNREFERENCED_PARAMETER(psMemHandle); + PVR_UNREFERENCED_PARAMETER(uiSize); + + PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr); +#ifdef CONFIG_MCST + *pvPtr = OSMapPhysToLin(sCpuPAddr, *psDevPAddr, + ui32NumPages * OSGetPageSize(), + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE); +#else + *pvPtr = OSMapPhysToLin(sCpuPAddr, + ui32NumPages * OSGetPageSize(), + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE); +#endif + PVR_RETURN_IF_NOMEM(*pvPtr); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + ui32NumPages * OSGetPageSize(), + OSGetCurrentClientProcessIDKM()); +#else + { + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + *pvPtr, + sCpuPAddr, + ui32NumPages * OSGetPageSize(), + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_VALUES); + } +#endif +#endif + return PVRSRV_OK; +} + +void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + void *pvPtr) +{ + IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); + PVR_UNREFERENCED_PARAMETER(psMemHandle); + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + ui32NumPages * OSGetPageSize(), + OSGetCurrentClientProcessIDKM()); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + (IMG_UINT64)(uintptr_t)pvPtr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize(), + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); +} + +PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength) +{ + /* No need to flush because we map as uncached */ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(psMemHandle); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiLength); + + return PVRSRV_OK; +} + +IMG_BOOL IsPhysmemNewRamBackedByLMA(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx) +{ + return psDeviceNode->pfnCreateRamBackedPMR[ePhysHeapIdx] == PhysmemNewLocalRamBackedPMR; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceFinalise +@Description Performs the final parts of device initialisation. +@Input psDeviceNode Device node of the device to finish + initialising +@Input bInitSuccessful Whether or not device specific + initialisation was successful +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bInitSuccessful) +{ + PVRSRV_ERROR eError; + __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); + + if (bInitSuccessful) + { + eError = SyncCheckpointContextCreate(psDeviceNode, + &psDeviceNode->hSyncCheckpointContext); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointContextCreate", ErrorExit); +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + eError = SyncFbRegisterDevice(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorExit); +#endif + eError = SyncPrimContextCreate(psDeviceNode, + &psDeviceNode->hSyncPrimContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create sync prim context (%s)", + __func__, PVRSRVGetErrorString(eError))); + SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); + goto ErrorExit; + } + + /* Allocate MMU cache invalidate sync */ + eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext, + &psDeviceNode->psMMUCacheSyncPrim, + "pvrsrv dev MMU cache"); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncPrimAlloc", ErrorExit); + + /* Set the sync prim value to a much higher value near the + * wrapping range. This is so any wrapping bugs would be + * seen early in the driver start-up. + */ + SyncPrimSet(psDeviceNode->psMMUCacheSyncPrim, 0xFFFFFFF6UL); + + /* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6 */ + psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7UL; + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPowerLock", ErrorExit); + + /* + * Always ensure a single power on command appears in the pdump. This + * should be the only power related call outside of PDUMPPOWCMDSTART + * and PDUMPPOWCMDEND. + */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set device %p power state to 'on' (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDeviceNode); + goto ErrorExit; + } + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) + eError = ValidateFWOnLoad(psDeviceNode->pvDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to verify FW code (%s)", + __func__, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDeviceNode); + return eError; + } +#endif + + /* Verify firmware compatibility for device */ + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* defer the compatibility checks in case of Guest Mode until after + * the first kick was submitted, as the firmware only fills the + * compatibility data then. */ + eError = PVRSRV_OK; + } + else + { + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed compatibility check for device %p (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDeviceNode); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto ErrorExit; + } + + PDUMPPOWCMDSTART(); + + /* Force the device to idle if its default power state is off */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, + &PVRSRVDeviceIsDefaultStateOFF, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)", + __func__, PVRSRVGetErrorString(eError))); + if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + PVRSRVPowerUnlock(psDeviceNode); + } + goto ErrorExit; + } + + /* Place device into its default power state. */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_DEFAULT, + IMG_TRUE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set device %p into its default power state (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + + PVRSRVPowerUnlock(psDeviceNode); + goto ErrorExit; + } + + PVRSRVPowerUnlock(psDeviceNode); + + /* + * If PDUMP is enabled and RGX device is supported, then initialise the + * performance counters that can be further modified in PDUMP. Then, + * before ending the init phase of the pdump, drain the commands put in + * the kCCB during the init phase. + */ +#if defined(SUPPORT_RGX) +#if defined(PDUMP) + { + eError = RGXInitHWPerfCounters(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitHWPerfCounters", ErrorExit); + + eError = RGXPdumpDrainKCCB(psDevInfo, + psDevInfo->psKernelCCBCtl->ui32WriteOffset); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", ErrorExit); + } +#endif + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { +#if defined(SUPPORT_AUTOVZ) + /* AutoVz Guest drivers expect the firmware to have set its end of the + * connection to Ready state by now. Poll indefinitely otherwise. */ + if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__)); + } + while (!KM_FW_CONNECTION_IS(READY, psDevInfo)) + { + OSWaitus(1000000); + } + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__)); +#if defined(SUPPORT_AUTOVZ_HW_REGS) + /* Guest can only access the register holding the connection states, after the GPU is confirmed to be powered up */ + KM_SET_OS_CONNECTION(READY, psDevInfo); +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ +#endif /* defined(SUPPORT_AUTOVZ) */ + + /* Kick an initial dummy command to make the firmware initialise all + * its internal guest OS data structures and compatibility information */ + if (RGXFWHealthCheckCmd((PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice)) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot kick initial command to the Device (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto ErrorExit; + } + + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed compatibility check for device %p (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto ErrorExit; + } + } + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)) + { + if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) + { + KM_SET_OS_CONNECTION(ACTIVE, psDevInfo); +#if defined(SUPPORT_AUTOVZ) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* During first-time boot the flag is set here, while subsequent reboots will already + * have set it earlier in RGXInit. Set to true from this point onwards in any case. */ + psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + } +#endif + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Connection can't be established, firmware not ready", __func__)); + eError = PVRSRV_ERROR_NOT_INITIALISED; + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto ErrorExit; + } + } +#endif +#endif /* defined(SUPPORT_RGX) */ + /* Now that the device(s) are fully initialised set them as active */ + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE; + eError = PVRSRV_OK; + } + else + { + /* Initialisation failed so set the device(s) into a bad state */ + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; + eError = PVRSRV_ERROR_NOT_INITIALISED; + } + + /* Give PDump control a chance to end the init phase, depends on OS */ + PDumpStopInitPhase(); + + return eError; + +ErrorExit: + /* Initialisation failed so set the device(s) into a bad state */ + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; + + return eError; +} + +PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Only check devices which specify a compatibility check callback */ + if (psDeviceNode->pfnInitDeviceCompatCheck) + return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode); + else + return PVRSRV_OK; +} + +/* + PollForValueKM +*/ +static +PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Timeoutus, + IMG_UINT32 ui32PollPeriodus, + POLL_FLAGS ePollFlags) +{ +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(ui32Timeoutus); + PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); + PVR_UNREFERENCED_PARAMETER(ePollFlags); + return PVRSRV_OK; +#else + IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ + + LOOP_UNTIL_TIMEOUT(ui32Timeoutus) + { + ui32ActualValue = OSReadHWReg32((void __iomem *)pui32LinMemAddr, 0) & ui32Mask; + + if (ui32ActualValue == ui32Value) + { + return PVRSRV_OK; + } + + if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + OSWaitus(ui32PollPeriodus); + } END_LOOP_UNTIL_TIMEOUT(); + + if (BITMASK_HAS(ePollFlags, POLL_FLAG_LOG_ERROR)) + { + PVR_DPF((PVR_DBG_ERROR, + "PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", + ui32Value, ui32ActualValue, ui32Mask)); + } + + return PVRSRV_ERROR_TIMEOUT; +#endif /* NO_HARDWARE */ +} + + +/* + PVRSRVPollForValueKM +*/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM (PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + POLL_FLAGS ePollFlags) +{ + PVRSRV_ERROR eError; + + eError = PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + ePollFlags); + if (eError != PVRSRV_OK && BITMASK_HAS(ePollFlags, POLL_FLAG_DEBUG_DUMP)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", + __func__, PVRSRVGetErrorString(eError), + pui32LinMemAddr, ui32Value)); + PVRSRVDebugRequest(psDevNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + + return eError; +} + +PVRSRV_ERROR IMG_CALLCONV +PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask) +{ +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + return PVRSRV_OK; +#else + + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; + PVRSRV_ERROR eErrorWait; + IMG_UINT32 ui32ActualValue; + + eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectOpen", EventObjectOpenError); + + eError = PVRSRV_ERROR_TIMEOUT; /* Initialiser for following loop */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask); + + if (ui32ActualValue == ui32Value) + { + /* Expected value has been found */ + eError = PVRSRV_OK; + break; + } + else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + /* Services in bad state, don't wait any more */ + eError = PVRSRV_ERROR_NOT_READY; + break; + } + else + { + /* wait for event and retry */ + eErrorWait = OSEventObjectWait(hOSEvent); + if (eErrorWait != PVRSRV_OK && eErrorWait != PVRSRV_ERROR_TIMEOUT) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Failed with error %d. Found value 0x%x but was expected " + "to be 0x%x (Mask 0x%08x). Retrying", + __func__, + eErrorWait, + ui32ActualValue, + ui32Value, + ui32Mask)); + } + } + } END_LOOP_UNTIL_TIMEOUT(); + + OSEventObjectClose(hOSEvent); + + /* One last check in case the object wait ended after the loop timeout... */ + if (eError != PVRSRV_OK && + (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask) == ui32Value) + { + eError = PVRSRV_OK; + } + + /* Provide event timeout information to aid the Device Watchdog Thread... */ + if (eError == PVRSRV_OK) + { + psPVRSRVData->ui32GEOConsecutiveTimeouts = 0; + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + psPVRSRVData->ui32GEOConsecutiveTimeouts++; + } + +EventObjectOpenError: + + return eError; + +#endif /* NO_HARDWARE */ +} + +int PVRSRVGetDriverStatus(void) +{ + return PVRSRVGetPVRSRVData()->eServicesState; +} + +/* + PVRSRVSystemHasCacheSnooping +*/ +IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) && + (psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) || + (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) || + (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + return psDevConfig->bHasNonMappableLocalMemory; +} + +/* + PVRSRVSystemWaitCycles +*/ +void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles) +{ + /* Delay in us */ + IMG_UINT32 ui32Delayus = 1; + + /* obtain the device freq */ + if (psDevConfig->pfnClockFreqGet != NULL) + { + IMG_UINT32 ui32DeviceFreq; + + ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData); + + ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq; + + if (ui32Delayus == 0) + { + ui32Delayus = 1; + } + } + + OSWaitus(ui32Delayus); +} + +static void * +PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, + va_list va) +{ + void *pvOSDevice = va_arg(va, void *); + + if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice) + { + return psDeviceNode; + } + + return NULL; +} + +PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + + psDeviceNode = + List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb, + pvOSDevice); + if (!psDeviceNode) + { + /* Device can't be found in the list so it isn't in the system */ + PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present", + __func__, pvOSDevice, ui32IRQ)); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ, + pszName, pfnLISR, pvData, phLISRData); +} + +PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + return SysUninstallDeviceLISR(hLISRData); +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) +/* functions only used on rogue, but header defining them is common */ +void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState) +{ + SysSetAxiProtOSid(ui32OSid, bState); + return; +} + +void SetTrustedDeviceAceEnabled(void) +{ + SysSetTrustedDeviceAceEnabled(); + return; +} +#endif + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!ui32Timeout) + return PVRSRV_ERROR_INVALID_PARAMS; + + OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + + /* Create only once */ + if (gpsPVRSRVData->hHWPerfHostPeriodicThread == NULL) + { + /* Create the HWPerf event object */ + eError = OSEventObjectCreate("PVRSRV_HWPERFHOSTPERIODIC_EVENTOBJECT", &gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + + if (eError == PVRSRV_OK) + { + gpsPVRSRVData->bHWPerfHostThreadStop = IMG_FALSE; + gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; + /* Create a thread which is used to periodically emit host stream packets */ + eError = OSThreadCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread, + "pvr_hwperf_host", + HWPerfPeriodicHostEventsThread, + NULL, IMG_TRUE, gpsPVRSRVData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create HWPerf host periodic thread", __func__)); + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectCreate failed", __func__)); + } + } + /* If the thread has already been created then just update the timeout and wake up thread */ + else + { + gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; + eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + return eError; +} + +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyHWPerfHostThread(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + + /* Stop and cleanup the HWPerf periodic thread */ + if (gpsPVRSRVData->hHWPerfHostPeriodicThread) + { + if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) + { + gpsPVRSRVData->bHWPerfHostThreadStop = IMG_TRUE; + eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + eError = OSThreadDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread); + if (PVRSRV_OK == eError) + { + gpsPVRSRVData->hHWPerfHostPeriodicThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + + if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) + { + eError = OSEventObjectDestroy(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + gpsPVRSRVData->hHWPerfHostPeriodicEvObj = NULL; + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } + } + + OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + return eError; +} +#endif + +/***************************************************************************** + End of file (pvrsrv.c) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_bridge_init.c b/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_bridge_init.c new file mode 100644 index 000000000000..ee0774b77171 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_bridge_init.c @@ -0,0 +1,453 @@ +/*************************************************************************/ /*! +@File +@Title PVR Common Bridge Init/Deinit Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements common PVR Bridge init/deinit code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv_bridge_init.h" +#include "srvcore.h" + +/* These will go when full bridge gen comes in */ +#if defined(PDUMP) +PVRSRV_ERROR InitPDUMPCTRLBridge(void); +PVRSRV_ERROR DeinitPDUMPCTRLBridge(void); +PVRSRV_ERROR InitPDUMPBridge(void); +PVRSRV_ERROR DeinitPDUMPBridge(void); +PVRSRV_ERROR InitRGXPDUMPBridge(void); +PVRSRV_ERROR DeinitRGXPDUMPBridge(void); +#endif +#if defined(SUPPORT_DISPLAY_CLASS) +PVRSRV_ERROR InitDCBridge(void); +PVRSRV_ERROR DeinitDCBridge(void); +#endif +PVRSRV_ERROR InitMMBridge(void); +PVRSRV_ERROR DeinitMMBridge(void); +#if !defined(EXCLUDE_CMM_BRIDGE) +PVRSRV_ERROR InitCMMBridge(void); +PVRSRV_ERROR DeinitCMMBridge(void); +#endif +PVRSRV_ERROR InitPDUMPMMBridge(void); +PVRSRV_ERROR DeinitPDUMPMMBridge(void); +PVRSRV_ERROR InitSRVCOREBridge(void); +PVRSRV_ERROR DeinitSRVCOREBridge(void); +PVRSRV_ERROR InitSYNCBridge(void); +PVRSRV_ERROR DeinitSYNCBridge(void); + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR InitRGXTA3DBridge(void); +PVRSRV_ERROR DeinitRGXTA3DBridge(void); +#if defined(SUPPORT_RGXTQ_BRIDGE) +PVRSRV_ERROR InitRGXTQBridge(void); +PVRSRV_ERROR DeinitRGXTQBridge(void); +#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ +PVRSRV_ERROR InitRGXTQ2Bridge(void); +PVRSRV_ERROR DeinitRGXTQ2Bridge(void); +PVRSRV_ERROR InitRGXCMPBridge(void); +PVRSRV_ERROR DeinitRGXCMPBridge(void); +#if defined(SUPPORT_USC_BREAKPOINT) +PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); +PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void); +#endif +PVRSRV_ERROR InitRGXFWDBGBridge(void); +PVRSRV_ERROR DeinitRGXFWDBGBridge(void); +PVRSRV_ERROR InitRGXHWPERFBridge(void); +PVRSRV_ERROR DeinitRGXHWPERFBridge(void); +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) +PVRSRV_ERROR InitRGXREGCONFIGBridge(void); +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void); +#endif +PVRSRV_ERROR InitRGXKICKSYNCBridge(void); +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void); +PVRSRV_ERROR InitRGXSIGNALSBridge(void); +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void); +#endif /* SUPPORT_RGX */ +PVRSRV_ERROR InitCACHEBridge(void); +PVRSRV_ERROR DeinitCACHEBridge(void); +#if defined(SUPPORT_SECURE_EXPORT) +PVRSRV_ERROR InitSMMBridge(void); +PVRSRV_ERROR DeinitSMMBridge(void); +#endif +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +PVRSRV_ERROR InitHTBUFFERBridge(void); +PVRSRV_ERROR DeinitHTBUFFERBridge(void); +#endif +PVRSRV_ERROR InitPVRTLBridge(void); +PVRSRV_ERROR DeinitPVRTLBridge(void); +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +PVRSRV_ERROR InitRIBridge(void); +PVRSRV_ERROR DeinitRIBridge(void); +#endif +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void); +#if defined(SUPPORT_VALIDATION_BRIDGE) +PVRSRV_ERROR InitVALIDATIONBridge(void); +PVRSRV_ERROR DeinitVALIDATIONBridge(void); +#endif +#if defined(PVR_TESTING_UTILS) +PVRSRV_ERROR InitTUTILSBridge(void); +PVRSRV_ERROR DeinitTUTILSBridge(void); +#endif +PVRSRV_ERROR InitSYNCTRACKINGBridge(void); +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void); +#if defined(SUPPORT_WRAP_EXTMEM) +PVRSRV_ERROR InitMMEXTMEMBridge(void); +PVRSRV_ERROR DeinitMMEXTMEMBridge(void); +#endif +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) +PVRSRV_ERROR InitSYNCFALLBACKBridge(void); +PVRSRV_ERROR DeinitSYNCFALLBACKBridge(void); +#endif + + +PVRSRV_ERROR +ServerBridgeInit(void) +{ + PVRSRV_ERROR eError; + + BridgeDispatchTableStartOffsetsInit(); + + eError = InitSRVCOREBridge(); + PVR_LOG_IF_ERROR(eError, "InitSRVCOREBridge"); + + eError = InitSYNCBridge(); + PVR_LOG_IF_ERROR(eError, "InitSYNCBridge"); + +#if defined(PDUMP) + eError = InitPDUMPCTRLBridge(); + PVR_LOG_IF_ERROR(eError, "InitPDUMPCTRLBridge"); +#endif + + eError = InitMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitMMBridge"); + +#if !defined(EXCLUDE_CMM_BRIDGE) + eError = InitCMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitCMMBridge"); +#endif + +#if defined(PDUMP) + eError = InitPDUMPMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitPDUMPMMBridge"); + + eError = InitPDUMPBridge(); + PVR_LOG_IF_ERROR(eError, "InitPDUMPBridge"); +#endif + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = InitDCBridge(); + PVR_LOG_IF_ERROR(eError, "InitDCBridge"); +#endif + + eError = InitCACHEBridge(); + PVR_LOG_IF_ERROR(eError, "InitCACHEBridge"); + +#if defined(SUPPORT_SECURE_EXPORT) + eError = InitSMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitSMMBridge"); +#endif + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) + eError = InitHTBUFFERBridge(); + PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge"); +#endif + + eError = InitPVRTLBridge(); + PVR_LOG_IF_ERROR(eError, "InitPVRTLBridge"); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = InitRIBridge(); + PVR_LOG_IF_ERROR(eError, "InitRIBridge"); +#endif + +#if defined(SUPPORT_VALIDATION_BRIDGE) + eError = InitVALIDATIONBridge(); + PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge"); +#endif + +#if defined(PVR_TESTING_UTILS) + eError = InitTUTILSBridge(); + PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge"); +#endif + + eError = InitDEVICEMEMHISTORYBridge(); + PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge"); + + eError = InitSYNCTRACKINGBridge(); + PVR_LOG_IF_ERROR(eError, "InitSYNCTRACKINGBridge"); + +#if defined(SUPPORT_RGX) + +#if defined(SUPPORT_RGXTQ_BRIDGE) + eError = InitRGXTQBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXTQBridge"); +#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ + + eError = InitRGXTA3DBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXTA3DBridge"); + + #if defined(SUPPORT_USC_BREAKPOINT) + eError = InitRGXBREAKPOINTBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXBREAKPOINTBridge"); +#endif + + eError = InitRGXFWDBGBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXFWDBGBridge"); + +#if defined(PDUMP) + eError = InitRGXPDUMPBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXPDUMPBridge"); +#endif + + eError = InitRGXHWPERFBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXHWPERFBridge"); + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) + eError = InitRGXREGCONFIGBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge"); +#endif + + eError = InitRGXKICKSYNCBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge"); + +#endif /* SUPPORT_RGX */ + +#if defined(SUPPORT_WRAP_EXTMEM) + eError = InitMMEXTMEMBridge(); + PVR_LOG_IF_ERROR(eError, "InitMMEXTMEMBridge"); +#endif + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + eError = InitSYNCFALLBACKBridge(); + PVR_LOG_IF_ERROR(eError, "InitSYNCFALLBACKBridge"); +#endif + + eError = OSPlatformBridgeInit(); + PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeInit"); + + return eError; +} + +PVRSRV_ERROR +ServerBridgeDeInit(void) +{ + PVRSRV_ERROR eError; + + eError = OSPlatformBridgeDeInit(); + PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeDeInit"); + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + eError = DeinitSYNCFALLBACKBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSYNCFALLBACKBridge"); +#endif + +#if defined(SUPPORT_WRAP_EXTMEM) + eError = DeinitMMEXTMEMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitMMEXTMEMBridge"); +#endif + + eError = DeinitSRVCOREBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSRVCOREBridge"); + + eError = DeinitSYNCBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSYNCBridge"); + +#if defined(PDUMP) + eError = DeinitPDUMPCTRLBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPDUMPCTRLBridge"); +#endif + + eError = DeinitMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitMMBridge"); + +#if !defined(EXCLUDE_CMM_BRIDGE) + eError = DeinitCMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitCMMBridge"); +#endif + +#if defined(PDUMP) + eError = DeinitPDUMPMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPDUMPMMBridge"); + + eError = DeinitPDUMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPDUMPBridge"); +#endif + +#if defined(PVR_TESTING_UTILS) + eError = DeinitTUTILSBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitTUTILSBridge"); +#endif + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = DeinitDCBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitDCBridge"); +#endif + + eError = DeinitCACHEBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitCACHEBridge"); + +#if defined(SUPPORT_SECURE_EXPORT) + eError = DeinitSMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSMMBridge"); +#endif + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) + eError = DeinitHTBUFFERBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitHTBUFFERBridge"); +#endif + + eError = DeinitPVRTLBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPVRTLBridge"); + +#if defined(SUPPORT_VALIDATION_BRIDGE) + eError = DeinitVALIDATIONBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitVALIDATIONBridge"); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = DeinitRIBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRIBridge"); +#endif + + eError = DeinitDEVICEMEMHISTORYBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitDEVICEMEMHISTORYBridge"); + + eError = DeinitSYNCTRACKINGBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSYNCTRACKINGBridge"); + +#if defined(SUPPORT_RGX) + +#if defined(SUPPORT_RGXTQ_BRIDGE) + eError = DeinitRGXTQBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXTQBridge"); +#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ + + eError = DeinitRGXTA3DBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXTA3DBridge"); + +#if defined(SUPPORT_USC_BREAKPOINT) + eError = DeinitRGXBREAKPOINTBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXBREAKPOINTBridge"); +#endif + + eError = DeinitRGXFWDBGBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXFWDBGBridge"); + +#if defined(PDUMP) + eError = DeinitRGXPDUMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXPDUMPBridge"); +#endif + + eError = DeinitRGXHWPERFBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXHWPERFBridge"); + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) + eError = DeinitRGXREGCONFIGBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXREGCONFIGBridge"); +#endif + + eError = DeinitRGXKICKSYNCBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXKICKSYNCBridge"); + +#endif /* SUPPORT_RGX */ + + return eError; +} + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR +DeviceDepBridgeInit(IMG_UINT64 ui64Features) +{ + PVRSRV_ERROR eError; + +#if defined(RGX_FEATURE_COMPUTE_BIT_MASK) + if (ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) +#endif + { + eError = InitRGXCMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); + } + + if (ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK) + { + eError = InitRGXSIGNALSBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); + } + +#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) + if (ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK) +#endif + { + eError = InitRGXTQ2Bridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXTQ2Bridge"); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DeviceDepBridgeDeInit(IMG_UINT64 ui64Features) +{ + PVRSRV_ERROR eError; + +#if defined(RGX_FEATURE_COMPUTE_BIT_MASK) + if (ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) +#endif + { + eError = DeinitRGXCMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXCMPBridge"); + } + + if (ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK) + { + eError = DeinitRGXSIGNALSBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXSIGNALSBridge"); + } + +#if defined(RGX_FEATURE_COMPUTE_BIT_MASK) + if (ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK) +#endif + { + eError = DeinitRGXTQ2Bridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXTQ2Bridge"); + } + + return PVRSRV_OK; +} +#endif /* SUPPORT_RGX */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_pool.c b/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_pool.c new file mode 100644 index 000000000000..d62a062a944c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/pvrsrv_pool.c @@ -0,0 +1,260 @@ +/**************************************************************************/ /*! +@File +@Title Services pool implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides a generic pool implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "lock.h" +#include "dllist.h" +#include "allocmem.h" + +struct _PVRSRV_POOL_ +{ + POS_LOCK hLock; + /* total max number of permitted entries in the pool */ + IMG_UINT uiMaxEntries; + /* currently number of pool entries created. these may be in the pool + * or in-use + */ + IMG_UINT uiNumBusy; + /* number of not-in-use entries currently free in the pool */ + IMG_UINT uiNumFree; + + DLLIST_NODE sFreeList; + + const IMG_CHAR *pszName; + + PVRSRV_POOL_ALLOC_FUNC *pfnAlloc; + PVRSRV_POOL_FREE_FUNC *pfnFree; + void *pvPrivData; +}; + +typedef struct _PVRSRV_POOL_ENTRY_ +{ + DLLIST_NODE sNode; + void *pvData; +} PVRSRV_POOL_ENTRY; + +PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, + PVRSRV_POOL_FREE_FUNC *pfnFree, + IMG_UINT32 ui32MaxEntries, + const IMG_CHAR *pszName, + void *pvPrivData, + PVRSRV_POOL **ppsPool) +{ + PVRSRV_POOL *psPool; + PVRSRV_ERROR eError; + + psPool = OSAllocMem(sizeof(PVRSRV_POOL)); + PVR_GOTO_IF_NOMEM(psPool, eError, err_alloc); + + eError = OSLockCreate(&psPool->hLock); + + PVR_GOTO_IF_ERROR(eError, err_lock_create); + + psPool->uiMaxEntries = ui32MaxEntries; + psPool->uiNumBusy = 0; + psPool->uiNumFree = 0; + psPool->pfnAlloc = pfnAlloc; + psPool->pfnFree = pfnFree; + psPool->pvPrivData = pvPrivData; + psPool->pszName = pszName; + + dllist_init(&psPool->sFreeList); + + *ppsPool = psPool; + + return PVRSRV_OK; + +err_lock_create: + OSFreeMem(psPool); +err_alloc: + return eError; +} + +static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool, + PVRSRV_POOL_ENTRY *psEntry) +{ + psPool->pfnFree(psPool->pvPrivData, psEntry->pvData); + OSFreeMem(psEntry); + + return PVRSRV_OK; +} + +void PVRSRVPoolDestroy(PVRSRV_POOL *psPool) +{ + if (psPool->uiNumBusy != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s " + "with %u entries still in use", + __func__, + psPool->pszName, + psPool->uiNumBusy)); + return; + } + + OSLockDestroy(psPool->hLock); + + if (psPool->uiNumFree) + { + PVRSRV_POOL_ENTRY *psEntry; + DLLIST_NODE *psChosenNode; + + psChosenNode = dllist_get_next_node(&psPool->sFreeList); + + while (psChosenNode) + { + dllist_remove_node(psChosenNode); + + psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); + _DestroyPoolEntry(psPool, psEntry); + + psPool->uiNumFree--; + + psChosenNode = dllist_get_next_node(&psPool->sFreeList); + } + + PVR_ASSERT(psPool->uiNumFree == 0); + } + + OSFreeMem(psPool); +} + +static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool, + PVRSRV_POOL_ENTRY **ppsEntry) +{ + PVRSRV_POOL_ENTRY *psNewEntry; + PVRSRV_ERROR eError; + + psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY)); + PVR_GOTO_IF_NOMEM(psNewEntry, eError, err_allocmem); + + dllist_init(&psNewEntry->sNode); + + eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData); + + PVR_GOTO_IF_ERROR(eError, err_pfn_alloc); + + *ppsEntry = psNewEntry; + + return PVRSRV_OK; + +err_pfn_alloc: + OSFreeMem(psNewEntry); +err_allocmem: + return eError; +} + +PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, + PVRSRV_POOL_TOKEN *hToken, + void **ppvDataOut) +{ + PVRSRV_POOL_ENTRY *psEntry; + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psChosenNode; + + OSLockAcquire(psPool->hLock); + + psChosenNode = dllist_get_next_node(&psPool->sFreeList); + if (unlikely(psChosenNode == NULL)) + { + /* no available elements in the pool. try to create one */ + + eError = _CreateNewPoolEntry(psPool, &psEntry); + + PVR_GOTO_IF_ERROR(eError, out_unlock); + } + else + { + dllist_remove_node(psChosenNode); + + psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); + + psPool->uiNumFree--; + } + +#if defined(DEBUG) || defined(SUPPORT_VALIDATION) + /* Don't poison the IN buffer as that is copied from client and would be + * waste of cycles. + */ + OSCachedMemSet(((IMG_PBYTE)psEntry->pvData)+PVRSRV_MAX_BRIDGE_IN_SIZE, + PVRSRV_POISON_ON_ALLOC_VALUE, PVRSRV_MAX_BRIDGE_OUT_SIZE); +#endif + + psPool->uiNumBusy++; + *hToken = psEntry; + *ppvDataOut = psEntry->pvData; + +out_unlock: + OSLockRelease(psPool->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_POOL_ENTRY *psEntry = hToken; + + PVR_ASSERT(psPool->uiNumBusy > 0); + + OSLockAcquire(psPool->hLock); + + /* put this entry in the pool if the pool has space, + * otherwise free it + */ + if (psPool->uiNumFree < psPool->uiMaxEntries) + { + dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode); + psPool->uiNumFree++; + } + else + { + eError = _DestroyPoolEntry(psPool, psEntry); + } + + psPool->uiNumBusy--; + + OSLockRelease(psPool->hLock); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/ri_server.c b/drivers/mcst/gpu-imgtec/services/server/common/ri_server.c new file mode 100644 index 000000000000..b6334cd72e2c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/ri_server.c @@ -0,0 +1,2111 @@ +/*************************************************************************/ /*! +@File ri_server.c +@Title Resource Information (RI) server implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Resource Information (RI) server functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include "img_defs.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "osfunc.h" + +#include "srvkm.h" +#include "lock.h" + +/* services/include */ +#include "pvr_ricommon.h" + +/* services/server/include/ */ +#include "ri_server.h" + +/* services/include/shared/ */ +#include "hash.h" +/* services/shared/include/ */ +#include "dllist.h" + +#include "pmr.h" + +/* include/device.h */ +#include "device.h" + +#if !defined(RI_UNIT_TEST) +#include "pvrsrv.h" +#endif + + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + +#define USE_RI_LOCK 1 + +/* + * Initial size use for Hash table. (Used to index the RI list entries). + */ +#define _RI_INITIAL_HASH_TABLE_SIZE 64 + +/* + * Values written to the 'valid' field of RI structures when created and + * cleared prior to being destroyed. The code can then check this value + * before accessing the provided pointer contents as a valid RI structure. + */ +#define _VALID_RI_LIST_ENTRY 0x66bccb66 +#define _VALID_RI_SUBLIST_ENTRY 0x77cddc77 +#define _INVALID 0x00000000 + +/* + * If this define is set to 1, details of the linked lists (addresses, + * prev/next ptrs, etc) are also output when function RIDumpList() is called. + */ +#define _DUMP_LINKEDLIST_INFO 0 + + +typedef IMG_UINT64 _RI_BASE_T; + + +/* No +1 in SIZE macros since sizeof includes \0 byte in size */ + +#define RI_PROC_BUF_SIZE 16 + +#define RI_MEMDESC_SUM_FRMT "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\ + "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\ + "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n" +#define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+60) + + +#define RI_PMR_SUM_FRMT "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K "\ + "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n" +#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(40)) + +#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c" +#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+16+PVR_ANNOTATION_MAX_LEN+10+10)) +#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT)) + +/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */ +#define RI_MEMDESC_ENTRY_PROC_FRMT "[%5d:%s]" +#define RI_MEMDESC_ENTRY_PROC_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_PROC_FRMT)+5+16) + +#define RI_SYS_ALLOC_IMPORT_FRMT "{Import from PID %d}" +#define RI_SYS_ALLOC_IMPORT_FRMT_SIZE (sizeof(RI_SYS_ALLOC_IMPORT_FRMT)+5) +static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE]; + +#define RI_MEMDESC_ENTRY_IMPORT_FRMT "{Import from PID %d}" +#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5) + +#define RI_MEMDESC_ENTRY_UNPINNED_FRMT "{Unpinned}" +#define RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_UNPINNED_FRMT)) + +#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%s%%c" +#define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\ + RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE+RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE)) +#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)) + + +#define RI_FRMT_SIZE_MAX (MAX(RI_MEMDESC_ENTRY_BUF_SIZE,\ + MAX(RI_PMR_ENTRY_BUF_SIZE,\ + MAX(RI_MEMDESC_SUM_BUF_SIZE,\ + RI_PMR_SUM_BUF_SIZE)))) + + + + +/* Structure used to make linked sublist of memory allocations (MEMDESC) */ +struct _RI_SUBLIST_ENTRY_ +{ + DLLIST_NODE sListNode; + struct _RI_LIST_ENTRY_ *psRI; + IMG_UINT32 valid; + IMG_BOOL bIsImport; + IMG_BOOL bIsSuballoc; + IMG_PID pid; + IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; + IMG_DEV_VIRTADDR sVAddr; + IMG_UINT64 ui64Offset; + IMG_UINT64 ui64Size; + IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1]; + DLLIST_NODE sProcListNode; +}; + +/* + * Structure used to make linked list of PMRs. Sublists of allocations + * (MEMDESCs) made from these PMRs are chained off these entries. + */ +struct _RI_LIST_ENTRY_ +{ + DLLIST_NODE sListNode; + DLLIST_NODE sSysAllocListNode; + DLLIST_NODE sSubListFirst; + IMG_UINT32 valid; + PMR *psPMR; + IMG_PID pid; + IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; + IMG_UINT16 ui16SubListCount; + IMG_UINT16 ui16MaxSubListCount; + IMG_UINT32 ui32RIPMRFlags; /* Flags used to indicate the type of allocation */ + IMG_UINT32 ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */ +}; + +typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY; +typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY; + +static IMG_UINT16 g_ui16RICount; +static HASH_TABLE *g_pRIHashTable; +static IMG_UINT16 g_ui16ProcCount; +static HASH_TABLE *g_pProcHashTable; + +static POS_LOCK g_hRILock; + +/* Linked list of PMR allocations made against the PVR_SYS_ALLOC_PID and lock + * to prevent concurrent access to it. + */ +static POS_LOCK g_hSysAllocPidListLock; +static DLLIST_NODE g_sSysAllocPidListHead; + +/* + * Flag used to indicate if RILock should be destroyed when final PMR entry is + * deleted, i.e. if RIDeInitKM() has already been called before that point but + * the handle manager has deferred deletion of RI entries. + */ +static IMG_BOOL bRIDeInitDeferred = IMG_FALSE; + +/* + * Used as head of linked-list of PMR RI entries - this is useful when we wish + * to iterate all PMR list entries (when we don't have a PMR ref) + */ +static DLLIST_NODE sListFirst; + +/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ +static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); +/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */ +static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); + +static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v); +static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v); +static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v); +static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid); +#define _RIOutput(x) PVR_LOG(x) + +#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS 0x1 +#define RI_FLAG_SYSALLOC_PMR 0x2 + +static IMG_UINT32 +_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + +static IMG_UINT32 +_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + IMG_UINT32 *p = (IMG_UINT32 *)pKey; + IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); + IMG_UINT32 ui; + IMG_UINT32 uHashKey = 0; + + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + for (ui = 0; ui < uKeyLen; ui++) + { + IMG_UINT32 uHashPart = *p++; + + uHashPart += (uHashPart << 12); + uHashPart ^= (uHashPart >> 22); + uHashPart += (uHashPart << 4); + uHashPart ^= (uHashPart >> 9); + uHashPart += (uHashPart << 10); + uHashPart ^= (uHashPart >> 2); + uHashPart += (uHashPart << 7); + uHashPart ^= (uHashPart >> 12); + + uHashKey += uHashPart; + } + + return uHashKey; +} + +static IMG_BOOL +_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2); + +static IMG_BOOL +_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2) +{ + IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1; + IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2; + IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); + IMG_UINT32 ui; + + for (ui = 0; ui < uKeyLen; ui++) + { + if (*p1++ != *p2++) + return IMG_FALSE; + } + + return IMG_TRUE; +} + +static void _RILock(void) +{ +#if (USE_RI_LOCK == 1) + OSLockAcquire(g_hRILock); +#endif +} + +static void _RIUnlock(void) +{ +#if (USE_RI_LOCK == 1) + OSLockRelease(g_hRILock); +#endif +} + +/* This value maintains a count of the number of PMRs attributed to the + * PVR_SYS_ALLOC_PID. Access to this value is protected by g_hRILock, so it + * does not need to be an ATOMIC_T. + */ +static IMG_UINT32 g_ui32SysAllocPMRCount; + + +PVRSRV_ERROR RIInitKM(void) +{ + IMG_INT iCharsWritten; + PVRSRV_ERROR eError; + + bRIDeInitDeferred = IMG_FALSE; + + iCharsWritten = OSSNPrintf(g_szSysAllocImport, + RI_SYS_ALLOC_IMPORT_FRMT_SIZE, + RI_SYS_ALLOC_IMPORT_FRMT, + PVR_SYS_ALLOC_PID); + PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), \ + "OSSNPrintf failed to initialise g_szSysAllocImport"); + + eError = OSLockCreate(&g_hSysAllocPidListLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSLockCreate (g_hSysAllocPidListLock) failed (returned %d)", + __func__, + eError)); + } + dllist_init(&(g_sSysAllocPidListHead)); +#if (USE_RI_LOCK == 1) + eError = OSLockCreate(&g_hRILock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSLockCreate (g_hRILock) failed (returned %d)", + __func__, + eError)); + } +#endif + return eError; +} +void RIDeInitKM(void) +{ +#if (USE_RI_LOCK == 1) + if (g_ui16RICount > 0) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: called with %d entries remaining - deferring OSLockDestroy()", + __func__, + g_ui16RICount)); + bRIDeInitDeferred = IMG_TRUE; + } + else + { + OSLockDestroy(g_hRILock); + OSLockDestroy(g_hSysAllocPidListLock); + } +#endif +} + +/*! +******************************************************************************* + + @Function RILockAcquireKM + + @Description + Acquires the RI Lock (which protects the integrity of the RI + linked lists). Caller will be suspended until lock is acquired. + + @Return None + +******************************************************************************/ +void RILockAcquireKM(void) +{ + _RILock(); +} + +/*! +******************************************************************************* + + @Function RILockReleaseKM + + @Description + Releases the RI Lock (which protects the integrity of the RI + linked lists). + + @Return None + +******************************************************************************/ +void RILockReleaseKM(void) +{ + _RIUnlock(); +} + +/*! +******************************************************************************* + + @Function RIWritePMREntryWithOwnerKM + + @Description + Writes a new Resource Information list entry. + The new entry will be inserted at the head of the list of + PMR RI entries and assigned the values provided. + + @input psPMR - Reference (handle) to the PMR to which this reference relates + + @input ui32Owner - PID of the process which owns the allocation. This + may not be the current process (e.g. a request to + grow a buffer may happen in the context of a kernel + thread, or we may import further resource for a + suballocation made from the FW heap which can then + also be utilized by other processes) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, + IMG_PID ui32Owner) +{ + PMR *pPMRHashKey = psPMR; + RI_LIST_ENTRY *psRIEntry; + uintptr_t hashData; + + /* if Hash table has not been created, create it now */ + if (!g_pRIHashTable) + { + g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); + g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); + } + PVR_RETURN_IF_NOMEM(g_pRIHashTable); + PVR_RETURN_IF_NOMEM(g_pProcHashTable); + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + + /* Acquire RI Lock */ + _RILock(); + + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + if (!psRIEntry) + { + /* + * If failed to find a matching existing entry, create a new one + */ + psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY)); + if (!psRIEntry) + { + /* Release RI Lock */ + _RIUnlock(); + /* Error - no memory to allocate for new RI entry */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + else + { + IMG_UINT32 ui32PMRFlags = PMR_Flags(psPMR); + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR); + + /* + * Add new RI Entry + */ + if (g_ui16RICount == 0) + { + /* Initialise PMR entry linked-list head */ + dllist_init(&sListFirst); + } + g_ui16RICount++; + + dllist_init (&(psRIEntry->sSysAllocListNode)); + dllist_init (&(psRIEntry->sSubListFirst)); + psRIEntry->ui16SubListCount = 0; + psRIEntry->ui16MaxSubListCount = 0; + psRIEntry->valid = _VALID_RI_LIST_ENTRY; + + /* Check if this PMR should be accounted for under the + * PVR_SYS_ALLOC_PID debugFS entry. This should happen if + * we are in the driver init phase, the flags indicate + * this is a FW local allocation (made from FW heap) + * or the owner PID is PVR_SYS_ALLOC_PID. + * Also record host dev node allocs on the system PID. + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + PVRSRV_CHECK_FW_LOCAL(ui32PMRFlags) || + ui32Owner == PVR_SYS_ALLOC_PID || + psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) + { + psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR; + OSSNPrintf(psRIEntry->ai8ProcName, + RI_PROC_BUF_SIZE, + "SysProc"); + psRIEntry->pid = PVR_SYS_ALLOC_PID; + OSLockAcquire(g_hSysAllocPidListLock); + /* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */ + dllist_add_to_tail(&g_sSysAllocPidListHead, (PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); + OSLockRelease(g_hSysAllocPidListLock); + g_ui32SysAllocPMRCount++; + } + else + { + psRIEntry->ui32RIPMRFlags = 0; + psRIEntry->pid = ui32Owner; + } + + OSSNPrintf(psRIEntry->ai8ProcName, + RI_PROC_BUF_SIZE, + "%s", + OSGetCurrentClientProcessNameKM()); + /* Add PMR entry to linked-list of all PMR entries */ + dllist_init (&(psRIEntry->sListNode)); + dllist_add_to_tail(&sListFirst, (PDLLIST_NODE)&(psRIEntry->sListNode)); + } + + psRIEntry->psPMR = psPMR; + psRIEntry->ui32Flags = 0; + + /* Create index entry in Hash Table */ + HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry); + + /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */ + PMRStoreRIHandle(psPMR, psRIEntry); + } + /* Release RI Lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIWritePMREntryKM + + @Description + Writes a new Resource Information list entry. + The new entry will be inserted at the head of the list of + PMR RI entries and assigned the values provided. + + @input psPMR - Reference (handle) to the PMR to which this reference relates + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR) +{ + return RIWritePMREntryWithOwnerKM(psPMR, + OSGetCurrentClientProcessIDKM()); +} + +/*! +******************************************************************************* + + @Function RIWriteMEMDESCEntryKM + + @Description + Writes a new Resource Information sublist entry. + The new entry will be inserted at the head of the sublist of + the indicated PMR list entry, and assigned the values provided. + + @input psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates + @input ui32TextBSize - Length of string provided in psz8TextB parameter + @input psz8TextB - String describing this secondary reference (may be null) + @input ui64Offset - Offset from the start of the PMR at which this allocation begins + @input ui64Size - Size of this allocation + @input bIsImport - Flag indicating if this is an allocation or an import + @input bIsSuballoc - Flag indicating if this is a sub-allocation + @output phRIHandle - Handle to the created RI entry + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR *psz8TextB, + IMG_UINT64 ui64Offset, + IMG_UINT64 ui64Size, + IMG_BOOL bIsImport, + IMG_BOOL bIsSuballoc, + RI_HANDLE *phRIHandle) +{ + RI_SUBLIST_ENTRY *psRISubEntry; + RI_LIST_ENTRY *psRIEntry; + PMR *pPMRHashKey = psPMR; + uintptr_t hashData; + IMG_PID pid; + + /* Check Hash tables have been created (meaning at least one PMR has been defined) */ + PVR_RETURN_IF_INVALID_PARAM(g_pRIHashTable); + PVR_RETURN_IF_INVALID_PARAM(g_pProcHashTable); + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + PVR_RETURN_IF_INVALID_PARAM(phRIHandle); + + /* Acquire RI Lock */ + _RILock(); + + *phRIHandle = NULL; + + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + if (!psRIEntry) + { + /* Release RI Lock */ + _RIUnlock(); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); + if (!psRISubEntry) + { + /* Release RI Lock */ + _RIUnlock(); + /* Error - no memory to allocate for new RI sublist entry */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + else + { + /* + * Insert new entry in sublist + */ + PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst)); + + /* + * Insert new entry before currentNode + */ + if (!currentNode) + { + currentNode = &(psRIEntry->sSubListFirst); + } + dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode)); + + psRISubEntry->psRI = psRIEntry; + + /* Increment number of entries in sublist */ + psRIEntry->ui16SubListCount++; + if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount) + { + psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount; + } + psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; + } + + /* If allocation is made during device or driver initialisation, + * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use + * the current PID. + * Record host dev node allocations on the system PID. + */ + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR); + + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) + { + psRISubEntry->pid = psRISubEntry->psRI->pid; + } + else + { + psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); + } + } + + if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: TextBSize too long (%u). Text will be truncated " + "to %zu characters", __func__, + ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); + } + + /* copy ai8TextB field data */ + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); + + psRISubEntry->ui64Offset = ui64Offset; + psRISubEntry->ui64Size = ui64Size; + psRISubEntry->bIsImport = bIsImport; + psRISubEntry->bIsSuballoc = bIsSuballoc; + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); + dllist_init (&(psRISubEntry->sProcListNode)); + + /* + * Now insert this MEMDESC into the proc list + */ + /* look-up pid in Hash Table */ + pid = psRISubEntry->pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + if (!hashData) + { + /* + * No allocations for this pid yet + */ + HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); + /* Increment number of entries in proc hash table */ + g_ui16ProcCount++; + } + else + { + /* + * Insert allocation into pid allocations linked list + */ + PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; + + /* + * Insert new entry + */ + dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); + } + *phRIHandle = (RI_HANDLE)psRISubEntry; + /* Release RI Lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIWriteProcListEntryKM + + @Description + Write a new entry in the process list directly. We have to do this + because there might be no, multiple or changing PMR handles. + + In the common case we have a PMR that will be added to the PMR list + and one or several MemDescs that are associated to it in a sub-list. + Additionally these MemDescs will be inserted in the per-process list. + + There might be special descriptors from e.g. new user APIs that + are associated with no or multiple PMRs and not just one. + These can be now added to the per-process list (as RI_SUBLIST_ENTRY) + directly with this function and won't be listed in the PMR list (RIEntry) + because there might be no PMR. + + To remove entries from the per-process list, just use + RIDeleteMEMDESCEntryKM(). + + @input psz8TextB - String describing this secondary reference (may be null) + @input ui64Size - Size of this allocation + @input ui64DevVAddr - Virtual address of this entry + @output phRIHandle - Handle to the created RI entry + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize, + const IMG_CHAR *psz8TextB, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64DevVAddr, + RI_HANDLE *phRIHandle) +{ + uintptr_t hashData = 0; + IMG_PID pid; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + + if (!g_pRIHashTable) + { + g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); + g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); + + if (!g_pRIHashTable || !g_pProcHashTable) + { + /* Error - no memory to allocate for Hash table(s) */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + /* Acquire RI Lock */ + _RILock(); + + *phRIHandle = NULL; + + psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); + if (!psRISubEntry) + { + /* Release RI Lock */ + _RIUnlock(); + /* Error - no memory to allocate for new RI sublist entry */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; + + psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); + + if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: TextBSize too long (%u). Text will be truncated " + "to %zu characters", __func__, + ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); + } + + /* copy ai8TextB field data */ + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); + + psRISubEntry->ui64Offset = 0; + psRISubEntry->ui64Size = ui64Size; + psRISubEntry->sVAddr.uiAddr = ui64DevVAddr; + psRISubEntry->bIsImport = IMG_FALSE; + psRISubEntry->bIsSuballoc = IMG_FALSE; + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); + dllist_init (&(psRISubEntry->sProcListNode)); + + /* + * Now insert this MEMDESC into the proc list + */ + /* look-up pid in Hash Table */ + pid = psRISubEntry->pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + if (!hashData) + { + /* + * No allocations for this pid yet + */ + HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); + /* Increment number of entries in proc hash table */ + g_ui16ProcCount++; + } + else + { + /* + * Insert allocation into pid allocations linked list + */ + PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; + + /* + * Insert new entry + */ + dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); + } + *phRIHandle = (RI_HANDLE)psRISubEntry; + /* Release RI Lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIUpdateMEMDESCAddrKM + + @Description + Update a Resource Information entry. + + @input hRIHandle - Handle of object whose reference info is to be updated + @input sVAddr - New address for the RI entry + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, + IMG_DEV_VIRTADDR sVAddr) +{ + RI_SUBLIST_ENTRY *psRISubEntry; + + PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + + psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + /* Pointer does not point to valid structure */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Acquire RI lock*/ + _RILock(); + + psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr; + + /* Release RI lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDeletePMREntryKM + + @Description + Delete a Resource Information entry. + + @input hRIHandle - Handle of object whose reference info is to be deleted + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle) +{ + RI_LIST_ENTRY *psRIEntry; + PMR *pPMRHashKey; + PVRSRV_ERROR eResult = PVRSRV_OK; + + PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + + psRIEntry = (RI_LIST_ENTRY *)hRIHandle; + + if (psRIEntry->valid != _VALID_RI_LIST_ENTRY) + { + /* Pointer does not point to valid structure */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psRIEntry->ui16SubListCount == 0) + { + /* Acquire RI lock*/ + _RILock(); + + /* Remove the HASH table index entry */ + pPMRHashKey = psRIEntry->psPMR; + HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey); + + psRIEntry->valid = _INVALID; + + /* Remove PMR entry from linked-list of PMR entries */ + dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode)); + + if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) + { + dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); + g_ui32SysAllocPMRCount--; + } + + /* Now, free the memory used to store the RI entry */ + OSFreeMemNoStats(psRIEntry); + psRIEntry = NULL; + + /* + * Decrement number of RI entries - if this is now zero, + * we can delete the RI hash table + */ + if (--g_ui16RICount == 0) + { + HASH_Delete(g_pRIHashTable); + g_pRIHashTable = NULL; + + _RIUnlock(); + + /* If deInit has been deferred, we can now destroy the RI Lock */ + if (bRIDeInitDeferred) + { + OSLockDestroy(g_hRILock); + } + } + else + { + /* Release RI lock*/ + _RIUnlock(); + } + /* + * Make the handle NULL once PMR RI entry is deleted + */ + hRIHandle = NULL; + } + else + { + eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; + } + + return eResult; +} + +/*! +******************************************************************************* + + @Function RIDeleteMEMDESCEntryKM + + @Description + Delete a Resource Information entry. + Entry can be from RIEntry list or ProcList. + + @input hRIHandle - Handle of object whose reference info is to be deleted + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry; + uintptr_t hashData; + IMG_PID pid; + + PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + + psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + /* Pointer does not point to valid structure */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Acquire RI lock*/ + _RILock(); + + /* For entries which do have a parent PMR remove the node from the sublist */ + if (psRISubEntry->psRI) + { + psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI; + + /* Now, remove entry from the sublist */ + dllist_remove_node(&(psRISubEntry->sListNode)); + } + + psRISubEntry->valid = _INVALID; + + /* Remove the entry from the proc allocations linked list */ + pid = psRISubEntry->pid; + /* If this is the only allocation for this pid, just remove it from the hash table */ + if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) + { + HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); + /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */ + if (--g_ui16ProcCount == 0) + { + HASH_Delete(g_pProcHashTable); + g_pProcHashTable = NULL; + } + } + else + { + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode)) + { + HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); + HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode))); + } + } + dllist_remove_node(&(psRISubEntry->sProcListNode)); + + /* Now, free the memory used to store the sublist entry */ + OSFreeMemNoStats(psRISubEntry); + psRISubEntry = NULL; + + /* + * Decrement number of entries in sublist if this MemDesc had a parent entry. + */ + if (psRIEntry) + { + psRIEntry->ui16SubListCount--; + } + + /* Release RI lock*/ + _RIUnlock(); + + /* + * Make the handle NULL once MEMDESC RI entry is deleted + */ + hRIHandle = NULL; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDeleteListKM + + @Description + Delete all Resource Information entries and free associated + memory. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDeleteListKM(void) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + + _RILock(); + + if (g_pRIHashTable) + { + eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries); + if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) + { + /* + * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when + * the hash table gets deleted as a result of deleting the final PMR entry, + * so this is not a real error condition... + */ + eResult = PVRSRV_OK; + } + } + + /* After the run through the RIHashTable that holds the PMR entries there might be + * still entries left in the per-process hash table because they were added with + * RIWriteProcListEntryKM() and have no PMR parent associated. + */ + if (g_pProcHashTable) + { + eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries); + if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) + { + /* + * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when + * the hash table gets deleted as a result of deleting the final PMR entry, + * so this is not a real error condition... + */ + eResult = PVRSRV_OK; + } + } + + _RIUnlock(); + + return eResult; +} + +/*! +******************************************************************************* + + @Function RIDumpListKM + + @Description + Dumps out the contents of the RI List entry for the + specified PMR, and all MEMDESC allocation entries + in the associated sub linked list. + At present, output is directed to Kernel log + via PVR_DPF. + + @input psPMR - PMR for which RI entry details are to be output + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpListKM(PMR *psPMR) +{ + PVRSRV_ERROR eError; + + /* Acquire RI lock*/ + _RILock(); + + eError = _DumpList(psPMR, 0); + + /* Release RI lock*/ + _RIUnlock(); + + return eError; +} + +/*! +******************************************************************************* + + @Function RIGetListEntryKM + + @Description + Returns pointer to a formatted string with details of the specified + list entry. If no entry exists (e.g. it may have been deleted + since the previous call), NULL is returned. + + @input pid - pid for which RI entry details are to be output + @input ppHandle - handle to the entry, if NULL, the first entry will be + returned. + @output pszEntryString - string to be output for the entry + @output hEntry - hEntry will be returned pointing to the next entry + (or NULL if there is no next entry) + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_BOOL RIGetListEntryKM(IMG_PID pid, + IMG_HANDLE **ppHandle, + IMG_CHAR **ppszEntryString) +{ + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + RI_LIST_ENTRY *psRIEntry = NULL; + uintptr_t hashData = 0; + IMG_PID hashKey = pid; + + static IMG_CHAR acStringBuffer[RI_FRMT_SIZE_MAX]; + + static IMG_UINT64 ui64TotalMemdescAlloc; + static IMG_UINT64 ui64TotalImport; + static IMG_UINT64 ui64TotalPMRAlloc; + static IMG_UINT64 ui64TotalPMRBacked; + static enum { + RI_GET_STATE_MEMDESCS_LIST_START, + RI_GET_STATE_MEMDESCS_SUMMARY, + RI_GET_STATE_PMR_LIST, + RI_GET_STATE_PMR_SUMMARY, + RI_GET_STATE_END, + RI_GET_STATE_LAST + } g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; + + static DLLIST_NODE *psNode; + static DLLIST_NODE *psSysAllocNode; + static IMG_CHAR szProcName[RI_PROC_BUF_SIZE]; + static IMG_UINT32 ui32ProcessedSysAllocPMRCount; + + acStringBuffer[0] = '\0'; + + switch (g_bNextGetState) + { + case RI_GET_STATE_MEMDESCS_LIST_START: + /* look-up pid in Hash Table, to obtain first entry for pid */ + hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey); + if (hashData) + { + if (*ppHandle) + { + psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle; + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + psRISubEntry = NULL; + } + } + else + { + psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + psRISubEntry = NULL; + } + } + } + + if (psRISubEntry) + { + PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode); + + if (psRISubEntry->bIsImport) + { + ui64TotalImport += psRISubEntry->ui64Size; + } + else + { + ui64TotalMemdescAlloc += psRISubEntry->ui64Size; + } + + _GenerateMEMDESCEntryString(psRISubEntry, + IMG_TRUE, + RI_MEMDESC_ENTRY_BUF_SIZE, + acStringBuffer); + + if (szProcName[0] == '\0') + { + OSStringLCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ? + PVRSRV_MODNAME : psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE); + } + + + *ppszEntryString = acStringBuffer; + *ppHandle = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode); + + if (psNextProcListNode == NULL || + psNextProcListNode == (PDLLIST_NODE)hashData) + { + g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; + } + /* else continue to list MEMDESCs */ + } + else + { + if (ui64TotalMemdescAlloc == 0) + { + acStringBuffer[0] = '\0'; + *ppszEntryString = acStringBuffer; + g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; + } + /* else continue to list MEMDESCs */ + } + break; + + case RI_GET_STATE_MEMDESCS_SUMMARY: + OSSNPrintf(acStringBuffer, + RI_MEMDESC_SUM_BUF_SIZE, + RI_MEMDESC_SUM_FRMT, + pid, + szProcName, + ui64TotalMemdescAlloc, + ui64TotalMemdescAlloc >> 10, + ui64TotalImport, + ui64TotalImport >> 10, + (ui64TotalMemdescAlloc + ui64TotalImport), + (ui64TotalMemdescAlloc + ui64TotalImport) >> 10); + + *ppszEntryString = acStringBuffer; + ui64TotalMemdescAlloc = 0; + ui64TotalImport = 0; + szProcName[0] = '\0'; + + g_bNextGetState = RI_GET_STATE_PMR_LIST; + break; + + case RI_GET_STATE_PMR_LIST: + if (pid == PVR_SYS_ALLOC_PID) + { + OSLockAcquire(g_hSysAllocPidListLock); + acStringBuffer[0] = '\0'; + if (!psSysAllocNode) + { + psSysAllocNode = &g_sSysAllocPidListHead; + ui32ProcessedSysAllocPMRCount = 0; + } + psSysAllocNode = dllist_get_next_node(psSysAllocNode); + + if (szProcName[0] == '\0') + { + OSStringLCopy(szProcName, PVRSRV_MODNAME, RI_PROC_BUF_SIZE); + } + if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead) + { + IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; + + psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); + _GeneratePMREntryString(psRIEntry, + IMG_TRUE, + RI_PMR_ENTRY_BUF_SIZE, + acStringBuffer); + PMR_LogicalSize(psRIEntry->psPMR, + &uiPMRLogicalSize); + ui64TotalPMRAlloc += uiPMRLogicalSize; + PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); + ui64TotalPMRBacked += uiPMRPhysicalBacking; + + ui32ProcessedSysAllocPMRCount++; + if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1) + { + g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; + } + /* else continue to list PMRs */ + } + else + { + g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; + } + *ppszEntryString = (IMG_CHAR *)acStringBuffer; + OSLockRelease(g_hSysAllocPidListLock); + } + else + { + IMG_BOOL bPMRToDisplay = IMG_FALSE; + + /* Iterate through the 'touched' PMRs and display details */ + if (!psNode) + { + psNode = dllist_get_next_node(&sListFirst); + } + else + { + psNode = dllist_get_next_node(psNode); + } + + while ((psNode != NULL && psNode != &sListFirst) && + !bPMRToDisplay) + { + psRIEntry = IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode); + if (psRIEntry->pid == pid) + { + IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; + + /* This PMR was 'touched', so display details and unflag it*/ + _GeneratePMREntryString(psRIEntry, + IMG_TRUE, + RI_PMR_ENTRY_BUF_SIZE, + acStringBuffer); + PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize); + ui64TotalPMRAlloc += uiPMRLogicalSize; + PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); + ui64TotalPMRBacked += uiPMRPhysicalBacking; + + /* Remember the name of the process for 1 PMR for the summary */ + if (szProcName[0] == '\0') + { + OSStringLCopy(szProcName, psRIEntry->ai8ProcName, RI_PROC_BUF_SIZE); + } + bPMRToDisplay = IMG_TRUE; + } + else + { + psNode = dllist_get_next_node(psNode); + } + } + + if (psNode == NULL || (psNode == &sListFirst)) + { + g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; + } + /* else continue listing PMRs */ + } + break; + + case RI_GET_STATE_PMR_SUMMARY: + OSSNPrintf(acStringBuffer, + RI_PMR_SUM_BUF_SIZE, + RI_PMR_SUM_FRMT, + pid, + szProcName, + ui64TotalPMRAlloc, + ui64TotalPMRAlloc >> 10, + ui64TotalPMRBacked, + ui64TotalPMRBacked >> 10); + + *ppszEntryString = acStringBuffer; + ui64TotalPMRAlloc = 0; + ui64TotalPMRBacked = 0; + szProcName[0] = '\0'; + psSysAllocNode = NULL; + + g_bNextGetState = RI_GET_STATE_END; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Bad %d)",__func__, g_bNextGetState)); + + __fallthrough; + case RI_GET_STATE_END: + /* Reset state ready for the next gpu_mem_area file to display */ + *ppszEntryString = NULL; + *ppHandle = NULL; + psNode = NULL; + szProcName[0] = '\0'; + + g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; + return IMG_FALSE; + break; + } + + return IMG_TRUE; +} + +/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ +static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, + IMG_BOOL bDebugFs, + IMG_UINT16 ui16MaxStrLen, + IMG_CHAR *pszEntryString) +{ + IMG_CHAR szProc[RI_MEMDESC_ENTRY_PROC_BUF_SIZE]; + IMG_CHAR szImport[RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE]; + IMG_CHAR szEntryFormat[RI_MEMDESC_ENTRY_FRMT_SIZE]; + const IMG_CHAR *pszAnnotationText; + IMG_PID uiRIPid = 0; + PMR* psRIPMR = NULL; + IMG_UINT32 ui32RIPMRFlags = 0; + + if (psRISubEntry->psRI != NULL) + { + uiRIPid = psRISubEntry->psRI->pid; + psRIPMR = psRISubEntry->psRI->psPMR; + ui32RIPMRFlags = psRISubEntry->psRI->ui32RIPMRFlags; + } + + OSSNPrintf(szEntryFormat, + RI_MEMDESC_ENTRY_FRMT_SIZE, + RI_MEMDESC_ENTRY_FRMT, + DEVMEM_ANNOTATION_MAX_LEN); + + if (!bDebugFs) + { + /* we don't include process ID info for debugfs output */ + OSSNPrintf(szProc, + RI_MEMDESC_ENTRY_PROC_BUF_SIZE, + RI_MEMDESC_ENTRY_PROC_FRMT, + psRISubEntry->pid, + psRISubEntry->ai8ProcName); + } + + if (psRISubEntry->bIsImport && psRIPMR) + { + OSSNPrintf((IMG_CHAR *)&szImport, + RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE, + RI_MEMDESC_ENTRY_IMPORT_FRMT, + uiRIPid); + /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ + pszAnnotationText = PMR_GetAnnotation(psRIPMR); + } + else if (!psRISubEntry->bIsSuballoc && psRIPMR) + { + /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ + pszAnnotationText = PMR_GetAnnotation(psRIPMR); + } + else + { + /* Set pszAnnotationText to that of the MEMDESC RI entry */ + pszAnnotationText = psRISubEntry->ai8TextB; + } + + /* Don't print memdescs if they are local imports + * (i.e. imported PMRs allocated by this process) + */ + if (bDebugFs && + ((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) && + (psRISubEntry->bIsImport && ((psRISubEntry->pid == uiRIPid) + || (psRISubEntry->pid == PVR_SYS_ALLOC_PID)))) + { + /* Don't print this entry */ + pszEntryString[0] = '\0'; + } + else + { + OSSNPrintf(pszEntryString, + ui16MaxStrLen, + szEntryFormat, + (bDebugFs ? "" : " "), + psRISubEntry->pid, + (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset), + pszAnnotationText, + (bDebugFs ? "" : (char *)szProc), + psRISubEntry->ui64Size, + psRIPMR, + (psRISubEntry->bIsImport ? (char *)&szImport : ""), + (!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "", + (psRIPMR && PMR_IsUnpinned(psRIPMR)) ? RI_MEMDESC_ENTRY_UNPINNED_FRMT : "", + (bDebugFs ? '\n' : ' ')); + } +} + +/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */ +static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, + IMG_BOOL bDebugFs, + IMG_UINT16 ui16MaxStrLen, + IMG_CHAR *pszEntryString) +{ + const IMG_CHAR* pszAnnotationText; + IMG_DEVMEM_SIZE_T uiLogicalSize = 0; + IMG_DEVMEM_SIZE_T uiPhysicalSize = 0; + IMG_CHAR szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE]; + + PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize); + + PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize); + + OSSNPrintf(szEntryFormat, + RI_PMR_ENTRY_FRMT_SIZE, + RI_PMR_ENTRY_FRMT, + DEVMEM_ANNOTATION_MAX_LEN); + + /* Set pszAnnotationText to that PMR RI entry */ + pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR); + + OSSNPrintf(pszEntryString, + ui16MaxStrLen, + szEntryFormat, + (bDebugFs ? "" : " "), + psRIEntry->pid, + (void*)psRIEntry->psPMR, + pszAnnotationText, + uiLogicalSize, + uiPhysicalSize, + (bDebugFs ? '\n' : ' ')); +} + +/*! +******************************************************************************* + + @Function _DumpList + + @Description + Dumps out RI List entries according to parameters passed. + + @input psPMR - If not NULL, function will output the RI entries for + the specified PMR only + @input pid - If non-zero, the function will only output MEMDESC RI + entries made by the process with ID pid. + If zero, all MEMDESC RI entries will be output. + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + IMG_UINT16 ui16SubEntriesParsed = 0; + uintptr_t hashData = 0; + IMG_PID hashKey; + PMR *pPMRHashKey = psPMR; + IMG_BOOL bDisplayedThisPMR = IMG_FALSE; + IMG_UINT64 ui64LogicalSize = 0; + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + + if (g_pRIHashTable && g_pProcHashTable) + { + if (pid != 0) + { + /* look-up pid in Hash Table */ + hashKey = pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); + if (hashData) + { + psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + else + { + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + } + if (!psRIEntry) + { + /* No entry found in hash table */ + return PVRSRV_ERROR_NOT_FOUND; + } + while (psRIEntry) + { + bDisplayedThisPMR = IMG_FALSE; + /* Output details for RI entry */ + if (!pid) + { + PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); + + _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, + PMR_GetAnnotation(psRIEntry->psPMR), + psRIEntry->psPMR, + (IMG_UINT)psRIEntry->ui16SubListCount, + ui64LogicalSize)); + bDisplayedThisPMR = IMG_TRUE; + } + ui16SubEntriesParsed = 0; + if (psRIEntry->ui16SubListCount) + { +#if _DUMP_LINKEDLIST_INFO + _RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%p}\n", + psRIEntry->sSubListFirst.psNextNode)); +#endif /* _DUMP_LINKEDLIST_INFO */ + if (!pid) + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), + RI_SUBLIST_ENTRY, sListNode); + } + /* Traverse RI sublist and output details for each entry */ + while (psRISubEntry) + { + if (psRIEntry) + { + if ((ui16SubEntriesParsed >= psRIEntry->ui16SubListCount)) + { + break; + } + if (!bDisplayedThisPMR) + { + PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); + + _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, + PMR_GetAnnotation(psRIEntry->psPMR), + psRIEntry->psPMR, + (IMG_UINT)psRIEntry->ui16SubListCount, + ui64LogicalSize)); + bDisplayedThisPMR = IMG_TRUE; + } + } +#if _DUMP_LINKEDLIST_INFO + _RIOutput (("RI LIST: [this subentry:0x%p]\n",psRISubEntry)); + _RIOutput (("RI LIST: psRI:0x%p\n",psRISubEntry->psRI)); +#endif /* _DUMP_LINKEDLIST_INFO */ + + { + IMG_CHAR szEntryString[RI_MEMDESC_ENTRY_BUF_SIZE]; + + _GenerateMEMDESCEntryString(psRISubEntry, + IMG_FALSE, + RI_MEMDESC_ENTRY_BUF_SIZE, + szEntryString); + _RIOutput (("%s",szEntryString)); + } + + if (pid) + { + if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || + (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + { + psRISubEntry = NULL; + } + else + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), + RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + if (psRIEntry != psRISubEntry->psRI) + { + /* + * The next MEMDESC in the process linked list is in a different PMR + */ + psRIEntry = psRISubEntry->psRI; + bDisplayedThisPMR = IMG_FALSE; + } + } + } + } + else + { + ui16SubEntriesParsed++; + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), + RI_SUBLIST_ENTRY, sListNode); + } + } + } + if (!pid && psRIEntry) + { + if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount) + { + /* + * Output error message as sublist does not contain the + * number of entries indicated by sublist count + */ + _RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n", + ui16SubEntriesParsed, psRIEntry->ui16SubListCount)); + } + else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst))) + { + /* + * Output error message as sublist is empty but sublist count + * is not zero + */ + _RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n", + psRIEntry->ui16SubListCount)); + } + } + psRIEntry = NULL; + } + } + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDumpAllKM + + @Description + Dumps out the contents of all RI List entries (i.e. for all + MEMDESC allocations for each PMR). + At present, output is directed to Kernel log + via PVR_DPF. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpAllKM(void) +{ + if (g_pRIHashTable) + { + return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries); + } + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDumpProcessKM + + @Description + Dumps out the contents of all MEMDESC RI List entries (for every + PMR) which have been allocate by the specified process only. + At present, output is directed to Kernel log + via PVR_DPF. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid) +{ + PVRSRV_ERROR eError; + IMG_UINT32 dummyPMR; + + if (!g_pProcHashTable) + { + return PVRSRV_OK; + } + + /* Acquire RI lock*/ + _RILock(); + + eError = _DumpList((PMR *)&dummyPMR, pid); + + /* Release RI lock*/ + _RIUnlock(); + + return eError; +} + +/*! +******************************************************************************* + + @Function _TotalAllocsForProcess + + @Description + Totals all PMR physical backing for given process. + + @input pid - ID of process. + + @input ePhysHeapType - type of Physical Heap for which to total allocs + + @Return Size of all physical backing for PID's PMRs allocated from the + specified heap type (in bytes). + +******************************************************************************/ +static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + uintptr_t hashData = 0; + IMG_PID hashKey; + IMG_INT32 i32TotalPhysical = 0; + + if (g_pRIHashTable && g_pProcHashTable) + { + if (pid == PVR_SYS_ALLOC_PID) + { + IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0; + DLLIST_NODE *psSysAllocNode = NULL; + + OSLockAcquire(g_hSysAllocPidListLock); + psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead); + while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead) + { + psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); + ui32ProcessedSysAllocPMRCount++; + if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType) + { + IMG_UINT64 ui64PhysicalSize; + + PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); + if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); + } + i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); + } + psSysAllocNode = dllist_get_next_node(psSysAllocNode); + } + OSLockRelease(g_hSysAllocPidListLock); + } + else + { + if (pid != 0) + { + /* look-up pid in Hash Table */ + hashKey = pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); + if (hashData) + { + psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); + psRISubEntry = psInitialRISubEntry; + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + + while (psRISubEntry && psRIEntry) + { + if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) && + (pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) && + (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)) + { + IMG_UINT64 ui64PhysicalSize; + + + PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); + if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); + } + i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); + psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; + } + if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || + (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + { + psRISubEntry = NULL; + psRIEntry = NULL; + } + else + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), + RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + psRISubEntry = psInitialRISubEntry; + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + while (psRISubEntry && psRIEntry) + { + psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; + if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || + (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + { + psRISubEntry = NULL; + psRIEntry = NULL; + } + else + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), + RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + } + } + return i32TotalPhysical; +} + +/*! +******************************************************************************* + + @Function RITotalAllocProcessKM + + @Description + Returns the total of allocated GPU memory (backing for PMRs) + which has been allocated from the specific heap by the specified + process only. + + @Return Amount of physical backing allocated (in bytes) + +******************************************************************************/ +IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) +{ + IMG_INT32 i32BackingTotal = 0; + + if (g_pProcHashTable) + { + /* Acquire RI lock*/ + _RILock(); + + i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType); + + /* Release RI lock*/ + _RIUnlock(); + } + return i32BackingTotal; +} + +#if defined(DEBUG) +/*! +******************************************************************************* + + @Function _DumpProcessList + + @Description + Dumps out RI List entries according to parameters passed. + + @input psPMR - If not NULL, function will output the RI entries for + the specified PMR only + @input pid - If non-zero, the function will only output MEMDESC RI + entries made by the process with ID pid. + If zero, all MEMDESC RI entries will be output. + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR _DumpProcessList(PMR *psPMR, + IMG_PID pid, + IMG_UINT64 ui64Offset, + IMG_DEV_VIRTADDR *psDevVAddr) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + IMG_UINT16 ui16SubEntriesParsed = 0; + uintptr_t hashData = 0; + PMR *pPMRHashKey = psPMR; + + psDevVAddr->uiAddr = 0; + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + + if (g_pRIHashTable && g_pProcHashTable) + { + PVR_ASSERT(psPMR && pid); + + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + + if (!psRIEntry) + { + /* No entry found in hash table */ + return PVRSRV_ERROR_NOT_FOUND; + } + + if (psRIEntry->ui16SubListCount) + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), + RI_SUBLIST_ENTRY, sListNode); + + /* Traverse RI sublist and output details for each entry */ + while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount)) + { + if (pid == psRISubEntry->pid) + { + IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset; + IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size; + + if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset) + { + psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr; + return PVRSRV_OK; + } + } + + ui16SubEntriesParsed++; + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), + RI_SUBLIST_ENTRY, sListNode); + } + } + } + + return PVRSRV_ERROR_INVALID_PARAMS; +} + +/*! +******************************************************************************* + + @Function RIDumpProcessListKM + + @Description + Dumps out selected contents of all MEMDESC RI List entries (for a + PMR) which have been allocate by the specified process only. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, + IMG_PID pid, + IMG_UINT64 ui64Offset, + IMG_DEV_VIRTADDR *psDevVAddr) +{ + PVRSRV_ERROR eError; + + if (!g_pProcHashTable) + { + return PVRSRV_OK; + } + + /* Acquire RI lock*/ + _RILock(); + + eError = _DumpProcessList(psPMR, + pid, + ui64Offset, + psDevVAddr); + + /* Release RI lock*/ + _RIUnlock(); + + return eError; +} +#endif + +static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v) +{ + RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; + + PVR_UNREFERENCED_PARAMETER (k); + + return RIDumpListKM(psRIEntry->psPMR); +} + +static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v) +{ + RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; + RI_SUBLIST_ENTRY *psRISubEntry; + PVRSRV_ERROR eResult = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER (k); + + while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0)) + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode); + eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry); + } + if (eResult == PVRSRV_OK) + { + eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry); + /* + * If we've deleted the Hash table, return + * an error to stop the iterator... + */ + if (!g_pRIHashTable) + { + eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + } + return eResult; +} + +static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v) +{ + RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v; + PVRSRV_ERROR eResult; + + PVR_UNREFERENCED_PARAMETER (k); + + eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry); + if (eResult == PVRSRV_OK && !g_pProcHashTable) + { + /* + * If we've deleted the Hash table, return + * an error to stop the iterator... + */ + eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + return eResult; +} + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/srvcore.c b/drivers/mcst/gpu-imgtec/services/server/common/srvcore.c new file mode 100644 index 000000000000..9263895e1891 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/srvcore.c @@ -0,0 +1,1397 @@ +/*************************************************************************/ /*! +@File +@Title PVR Common Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements core PVRSRV API, server side +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "pvr_debug.h" +#include "ra.h" +#include "pvr_bridge.h" +#include "connection_server.h" +#include "device.h" +#include "htbuffer.h" + +#include "pdump_km.h" + +#include "srvkm.h" +#include "allocmem.h" +#include "devicemem.h" +#include "log2.h" + +#include "srvcore.h" +#include "pvrsrv.h" +#include "power.h" + +#if defined(SUPPORT_RGX) +#include "rgxdevice.h" +#include "rgxinit.h" +#include "rgx_compat_bvnc.h" +#endif + +#include "rgx_options.h" +#include "pvrversion.h" +#include "lock.h" +#include "osfunc.h" +#include "device_connection.h" +#include "process_stats.h" +#include "pvrsrv_pool.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "physmem_lma.h" +#include "services_km.h" +#endif + +#include "pvrsrv_tlstreams.h" +#include "tlstream.h" + +#if defined(PVRSRV_MISSING_NO_SPEC_IMPL) +#pragma message ("There is no implementation of OSConfineArrayIndexNoSpeculation() - see osfunc.h") +#endif + +/* For the purpose of maintainability, it is intended that this file should not + * contain any OS specific #ifdefs. Please find a way to add e.g. + * an osfunc.c abstraction or override the entire function in question within + * env,*,pvr_bridge_k.c + */ + +PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT] = { {.pfFunction = DummyBW,} ,}; + +#define PVR_DISPATCH_OFFSET_FIRST_FUNC 0 +#define PVR_DISPATCH_OFFSET_LAST_FUNC 1 +#define PVR_DISPATCH_OFFSET_ARRAY_MAX 2 + +#define PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE + +static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT][PVR_DISPATCH_OFFSET_ARRAY_MAX]; + + +#define PVRSRV_MAX_POOLED_BRIDGE_BUFFERS 8 /*!< Initial number of pooled bridge buffers */ + +static PVRSRV_POOL *g_psBridgeBufferPool; /*! Pool of bridge buffers */ + + +#if defined(DEBUG_BRIDGE_KM) +/* a lock used for protecting bridge call timing calculations + * for calls which do not acquire a lock + */ +static POS_LOCK g_hStatsLock; +PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; + +void BridgeGlobalStatsLock(void) +{ + OSLockAcquire(g_hStatsLock); +} + +void BridgeGlobalStatsUnlock(void) +{ + OSLockRelease(g_hStatsLock); +} +#endif + +void BridgeDispatchTableStartOffsetsInit(void) +{ + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED1][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RESERVED1_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED1][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RESERVED2_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RESERVED2_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST; +#if defined(SUPPORT_RGX) + /* Need a gap here to start next entry at element 128 */ + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST; + g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST; +#endif +} + +#if defined(DEBUG_BRIDGE_KM) + +#if defined(INTEGRITY_OS) +PVRSRV_ERROR PVRSRVPrintBridgeStats() +{ + IMG_UINT32 ui32Index; + IMG_UINT32 ui32Remainder; + + printf("Total Bridge call count = %u\n" + "Total number of bytes copied via copy_from_user = %u\n" + "Total number of bytes copied via copy_to_user = %u\n" + "Total number of bytes copied via copy_*_user = %u\n\n" + "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s \n", + g_BridgeGlobalStats.ui32IOCTLCount, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + "#", + "Bridge Name", + "Wrapper Function", + "Call Count", + "copy_from_user (B)", + "copy_to_user (B)", + "Total Time (us)", + "Max Time (us)"); + + /* Is the item asked for (starts at 0) a valid table index? */ + for ( ui32Index=0; ui32Index < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT; ui32Index++ ) + { + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = &g_BridgeDispatchTable[ui32Index]; + printf("%3d: %-60s %-48s %-10u %-20u %-20u %-20llu %-20llu\n", + (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)), + psEntry->pszIOCName, + (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)", + psEntry->ui32CallCount, + psEntry->ui32CopyFromUserTotalBytes, + psEntry->ui32CopyToUserTotalBytes, + (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder), + (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); + + + } +} +#endif + +PVRSRV_ERROR +CopyFromUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void *pvDest, + void __user *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size; + return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); +} +PVRSRV_ERROR +CopyToUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void __user *pvDest, + void *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size; + return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); +} +#else +INLINE PVRSRV_ERROR +CopyFromUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void *pvDest, + void __user *pvSrc, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); + return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); +} +INLINE PVRSRV_ERROR +CopyToUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void __user *pvDest, + void *pvSrc, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); + return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); +} +#endif + +PVRSRV_ERROR +PVRSRVConnectKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32ClientBuildOptions, + IMG_UINT32 ui32ClientDDKVersion, + IMG_UINT32 ui32ClientDDKBuild, + IMG_UINT8 *pui8KernelArch, + IMG_UINT32 *pui32CapabilityFlags, + IMG_UINT64 *ui64PackedBvnc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch; + IMG_UINT32 ui32DDKVersion, ui32DDKBuild; + PVRSRV_DATA *psSRVData = NULL; + IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize(); + static IMG_BOOL bIsFirstConnection=IMG_FALSE; + +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* Gather BVNC information to output to UM */ + + *ui64PackedBvnc = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); +#else + *ui64PackedBvnc = 0; +#endif /* defined(SUPPORT_RGX)*/ + + /* Clear the flags */ + *pui32CapabilityFlags = 0; + + psSRVData = PVRSRVGetPVRSRVData(); + + psConnection->ui32ClientFlags = ui32Flags; + + /*Set flags to pass back to the client showing which cache coherency is available.*/ + /* Is the system snooping of caches emulated in software? */ + if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG; + } + else + { + /*Set flags to pass back to the client showing which cache coherency is available.*/ + /*Is the system CPU cache coherent?*/ + if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG; + } + /*Is the system device cache coherent?*/ + if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG; + } + } + + /* Has the system device non-mappable local memory?*/ + if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG; + } + + /* Is system using FBCDC v31? */ + if (psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode)) + { + *pui32CapabilityFlags |= PVRSRV_FBCDC_V3_1_USED; + } + + /* Set flags to indicate shared-virtual-memory (SVM) allocation availability */ + if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize) + { + *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED; + } + else + { + if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA) + { + *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED; + } + else + { + /* This can happen when processor has more virtual address bits + than device (i.e. alloc is not always guaranteed to succeed) */ + *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL; + } + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; + IMG_BOOL bOSidAxiProtReg = IMG_FALSE; + + ui32OSid = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK) >> (VIRTVAL_FLAG_OSID_SHIFT); + ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT); + +#if defined(EMULATOR) +/* AXI_ACELITE is only supported on rogue cores - volcanic cores all support full ACE + * and don't want to compile the code below (RGX_FEATURE_AXI_ACELITE_BIT_MASK is not + * defined for volcanic cores). + */ +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) + if (((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice)->sDevFeatureCfg.ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK) + { + IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0; + + ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT); + ui32OSidAxiProtTD = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK) >> (VIRTVAL_FLAG_AXIPTD_SHIFT); + + PVR_DPF((PVR_DBG_MESSAGE, + "[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s", + ui32OSidReg, + (ui32OSidAxiProtTD == 1)?"TRUE":"FALSE")); + + bOSidAxiProtReg = ui32OSidAxiProtReg == 1; + PVR_DPF((PVR_DBG_MESSAGE, + "[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s", + ui32OSidReg, + bOSidAxiProtReg?"TRUE":"FALSE")); + + SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD); + } +#endif /* defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK)*/ +#endif /* defined(EMULATOR) */ + + /* We now know the OSid, OSidReg and bOSidAxiProtReg setting for this + * connection. We can access these from wherever we have a connection + * reference and do not need to traverse an arbitrary linked-list to + * obtain them. The settings are process-specific. + */ + psConnection->ui32OSid = ui32OSid; + psConnection->ui32OSidReg = ui32OSidReg; + psConnection->bOSidAxiProtReg = bOSidAxiProtReg; + + PVR_DPF((PVR_DBG_MESSAGE, + "[GPU Virtualization Validation]: OSIDs: %d, %d", + ui32OSid, + ui32OSidReg)); +} +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Only enabled if enabled in the UM */ + if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Workload Estimation disabled. Not enabled in UM", + __func__)); + } +#endif + +#if defined(SUPPORT_PDVFS) + /* Only enabled if enabled in the UM */ + if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Proactive DVFS disabled. Not enabled in UM", + __func__)); + } +#endif + + ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); + ui32DDKBuild = PVRVERSION_BUILD; + + if (ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT) + { + psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT; + } + else + { + psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT; + } + + if (IMG_FALSE == bIsFirstConnection) + { + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM); + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions; + + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion; + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion; + + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild; + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild; + + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType = + ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; + + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType = + (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; + + if (sizeof(void *) == POINTER_SIZE_64BIT) + { + psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT; + } + else + { + psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT; + } + } + + /* Masking out every option that is not kernel specific*/ + ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM; + + /* + * Validate the build options + */ + ui32BuildOptions = (RGX_BUILD_OPTIONS_KM); + if (ui32BuildOptions != ui32ClientBuildOptions) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions; +#if !defined(PVRSRV_STRICT_COMPAT_CHECK) + /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ + ui32BuildOptionsMismatch &= OPTIONS_STRICT; +#endif + if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " + "extra options present in client-side driver: (0x%x). Please check rgx_options.h", + __func__, + ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); + } + + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " + "extra options present in KM driver: (0x%x). Please check rgx_options.h", + __func__, + ui32BuildOptions & ui32BuildOptionsMismatch )); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); + } + if (IMG_FALSE == bIsFirstConnection) + { + PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.", + __func__, + ui32ClientBuildOptions, + (psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug", + ui32BuildOptions, + (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug")); + }else{ + PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.", + __func__, + ui32ClientBuildOptions, + ui32BuildOptions)); + + } + if (!psSRVData->sDriverInfo.bIsNoMatch) + psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __func__)); + } + + /* + * Validate DDK version + */ + if (ui32ClientDDKVersion != ui32DDKVersion) + { + if (!psSRVData->sDriverInfo.bIsNoMatch) + psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; + PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).", + __func__, + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion), + PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion))); + PVR_DBG_BREAK; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_VERSION_MISMATCH, chk_exit); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]", + __func__, + PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN)); + } + + /* Create stream for every connection except for the special clients + * that don't need it e.g.: recipients of HWPerf data. */ + if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM)) + { + IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; + OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, + PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC, + psDeviceNode->sDevId.i32UMIdentifier, + psConnection->pid); + + eError = TLStreamCreate(&psConnection->hClientTLStream, psDeviceNode, + acStreamName, + PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT, + TL_OPMODE_DROP_NEWER | + TL_FLAG_ALLOCATE_ON_FIRST_OPEN, + NULL, NULL, NULL, NULL); + if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS) + { + PVR_DPF((PVR_DBG_ERROR, "Could not create private TL stream (%s)", + PVRSRVGetErrorString(eError))); + psConnection->hClientTLStream = NULL; + } + else if (eError == PVRSRV_OK) + { + /* Set "tlctrl" stream as a notification channel. This channel is + * is used to notify recipients about stream open/close (by writer) + * actions (and possibly other actions in the future). */ + eError = TLStreamSetNotifStream(psConnection->hClientTLStream, + psSRVData->hTLCtrlStream); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set notification stream")); + TLStreamClose(psConnection->hClientTLStream); + psConnection->hClientTLStream = NULL; + } + } + + /* Reset error status. We don't want to propagate any errors from here. */ + eError = PVRSRV_OK; + PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName)); + } + + /* + * Validate DDK build + */ + if (ui32ClientDDKBuild != ui32DDKBuild) + { + if (!psSRVData->sDriverInfo.bIsNoMatch) + psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).", + __func__, ui32DDKBuild, ui32ClientDDKBuild)); +#if defined(PVRSRV_STRICT_COMPAT_CHECK) + PVR_DBG_BREAK; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_BUILD_MISMATCH, chk_exit); +#endif + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]", + __func__, ui32DDKBuild, ui32ClientDDKBuild)); + } + + /* Success so far so is it the PDump client that is connecting? */ + if (ui32Flags & SRV_FLAGS_PDUMPCTRL) + { + PDumpConnectionNotify(); + } + + PVR_ASSERT(pui8KernelArch != NULL); + + if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) + { + *pui8KernelArch = 64; + } + else + { + *pui8KernelArch = 32; + } + + bIsFirstConnection = IMG_TRUE; + +#if defined(DEBUG_BRIDGE_KM) + { + int ii; + + /* dump dispatch table offset lookup table */ + PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __func__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1)); + for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++) + { + PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC])); + } + } +#endif + +#if defined(PDUMP) + if (!(ui32Flags & SRV_FLAGS_PDUMPCTRL)) + { + IMG_UINT64 ui64PDumpState = 0; + + PDumpGetStateKM(&ui64PDumpState); + if (ui64PDumpState & PDUMP_STATE_CONNECTED) + { + *pui32CapabilityFlags |= PVRSRV_PDUMP_IS_RECORDING; + } + } +#endif + +chk_exit: + return eError; +} + +PVRSRV_ERROR +PVRSRVDisconnectKM(void) +{ +#if defined(INTEGRITY_OS) && defined(DEBUG_BRIDGE_KM) + PVRSRVPrintBridgeStats(); +#endif + /* just return OK, per-process data is cleaned up by resmgr */ + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function PVRSRVAcquireGlobalEventObjectKM +@Description Acquire the global event object. +@Output phGlobalEventObject On success, points to the global event + object handle +@Return PVRSRV_ERROR PVRSRV_OK on success or an error + otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR +PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + *phGlobalEventObject = psPVRSRVData->hGlobalEventObject; + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function PVRSRVReleaseGlobalEventObjectKM +@Description Release the global event object. +@Output hGlobalEventObject Global event object handle +@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR +PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject) +{ + PVR_ASSERT(PVRSRVGetPVRSRVData()->hGlobalEventObject == hGlobalEventObject); + + return PVRSRV_OK; +} + +/* + PVRSRVDumpDebugInfoKM +*/ +PVRSRV_ERROR +PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32VerbLevel) +{ + if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + PVR_LOG(("User requested PVR debug info")); + + PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL); + + return PVRSRV_OK; +} + +/* + PVRSRVGetDevClockSpeedKM +*/ +PVRSRV_ERROR +PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_PUINT32 pui32RGXClockSpeed) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed); + PVR_WARN_IF_ERROR(eError, "pfnDeviceClockSpeed"); + + return eError; +} + + +/* + PVRSRVHWOpTimeoutKM +*/ +PVRSRV_ERROR +PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(PVRSRV_RESET_ON_HWTIMEOUT) + PVR_LOG(("User requested OS reset")); + OSPanic(); +#endif + PVR_LOG(("HW operation timeout, dump server info")); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + return PVRSRV_OK; +} + + +IMG_INT +DummyBW(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 *psBridgeIn, + IMG_UINT8 *psBridgeOut, + CONNECTION_DATA *psConnection) +{ + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + PVR_UNREFERENCED_PARAMETER(psConnection); + +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to " + "Dummy Wrapper (probably not what you want!)", + __func__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName)); +#else + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to " + "Dummy Wrapper (probably not what you want!)", + __func__, ui32DispatchTableEntry)); +#endif + return PVRSRV_ERROR_BRIDGE_ENOTTY; +} + +PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32AlignChecksSize, + IMG_UINT32 aui32AlignChecks[]) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + +#if !defined(NO_HARDWARE) + + PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL); + return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize, + aui32AlignChecks); + +#else + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize); + PVR_UNREFERENCED_PARAMETER(aui32AlignChecks); + + return PVRSRV_OK; + +#endif /* !defined(NO_HARDWARE) */ + +} + +PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32DeviceStatus) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* First try to update the status. */ + if (psDeviceNode->pfnUpdateHealthStatus != NULL) + { + PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, + IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to " + "check for device status (%d)", eError)); + + /* Return unknown status and error because we don't know what + * happened and if the status is valid. */ + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; + return eError; + } + } + + switch (OSAtomicRead(&psDeviceNode->eHealthStatus)) + { + case PVRSRV_DEVICE_HEALTH_STATUS_OK: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK; + return PVRSRV_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING; + return PVRSRV_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR; + return PVRSRV_OK; + default: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; + return PVRSRV_ERROR_INTERNAL_ERROR; + } +} + +PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED; + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (psDeviceNode->pfnGetMultiCoreInfo != NULL) + { + eError = psDeviceNode->pfnGetMultiCoreInfo(psDeviceNode, ui32CapsSize, pui32NumCores, pui64Caps); + } + return eError; +} + + +/*! + * ***************************************************************************** + * @brief A wrapper for removing entries in the g_BridgeDispatchTable array. + * All this does is zero the entry to allow for a full table re-population + * later. + * + * @param ui32BridgeGroup + * @param ui32Index + * + * @return + ********************************************************************************/ +void +UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, IMG_UINT32 ui32Index) +{ + ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; + + g_BridgeDispatchTable[ui32Index].pfFunction = NULL; + g_BridgeDispatchTable[ui32Index].hBridgeLock = NULL; +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32Index].pszIOCName = NULL; + g_BridgeDispatchTable[ui32Index].pszFunctionName = NULL; + g_BridgeDispatchTable[ui32Index].pszBridgeLockName = NULL; + g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; + g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; + g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; + g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; +#endif +} + +/*! + * ***************************************************************************** + * @brief A wrapper for filling in the g_BridgeDispatchTable array that does + * error checking. + * + * @param ui32Index + * @param pszIOCName + * @param pfFunction + * @param pszFunctionName + * + * @return + ********************************************************************************/ +void +_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, + IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName, + POS_LOCK hBridgeLock, + const IMG_CHAR *pszBridgeLockName) +{ + static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX; /* -1 */ + +#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM) + PVR_UNREFERENCED_PARAMETER(pszFunctionName); + PVR_UNREFERENCED_PARAMETER(pszBridgeLockName); +#endif + + ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; + +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + /* Enable this to dump out the dispatch table entries */ + PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __func__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC])); + PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __func__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName)); +#endif + + /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly + * interested in spotting any large gap of wasted memory that could be + * accidentally introduced. + * + * This will currently flag up any gaps > 5 entries. + * + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likely to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at a performance critical time. + */ + if ((ui32PrevIndex != IMG_UINT32_MAX) && + ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) || + (ui32Index <= ui32PrevIndex))) + { +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + PVR_DPF((PVR_DBG_WARNING, + "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)", + __func__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName, + ui32Index, pszIOCName)); +#else + PVR_DPF((PVR_DBG_MESSAGE, + "%s: There is a gap in the dispatch table between indices %u and %u (%s)", + __func__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName)); +#endif + } + + if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range", + __func__, (IMG_UINT)ui32Index, pszIOCName)); + +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu", + __func__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)); +#if defined(SUPPORT_RGX) + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST)); + + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGX_LAST)); +#endif + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu", + __func__, PVRSRV_BRIDGE_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)); +#endif + + OSPanic(); + } + + /* Panic if the previous entry has been overwritten as this is not allowed! + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likely to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at a performance critical time. + */ + if (g_BridgeDispatchTable[ui32Index].pfFunction) + { + if (g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction) + { +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + PVR_DPF((PVR_DBG_ERROR, + "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)", + __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName), + (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); +#else + PVR_DPF((PVR_DBG_ERROR, + "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)", + __func__, pszIOCName, ui32Index, + (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); + PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); +#endif + OSPanic(); + } + } + else + { + g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; + g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock; +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; + g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; + g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName; + g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; + g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; + g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; + g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; +#endif + } + + ui32PrevIndex = ui32Index; +} + +static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut) +{ + PVR_UNREFERENCED_PARAMETER(pvPrivData); + + *pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE + + PVRSRV_MAX_BRIDGE_OUT_SIZE); + PVR_RETURN_IF_NOMEM(*pvOut); + + return PVRSRV_OK; +} + +static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData) +{ + PVR_UNREFERENCED_PARAMETER(pvPrivData); + + OSFreeMem(pvFreeData); +} + +PVRSRV_ERROR BridgeDispatcherInit(void) +{ + PVRSRV_ERROR eError; + +#if defined(DEBUG_BRIDGE_KM) + eError = OSLockCreate(&g_hStatsLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", errorLockCreateFailed); +#endif + + eError = PVRSRVPoolCreate(_BridgeBufferAlloc, + _BridgeBufferFree, + PVRSRV_MAX_POOLED_BRIDGE_BUFFERS, + "Bridge buffer pool", + NULL, + &g_psBridgeBufferPool); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPoolCreate", erroPoolCreateFailed); + + return PVRSRV_OK; + +erroPoolCreateFailed: +#if defined(DEBUG_BRIDGE_KM) + OSLockDestroy(g_hStatsLock); + g_hStatsLock = NULL; +errorLockCreateFailed: +#endif + return eError; +} + +void BridgeDispatcherDeinit(void) +{ + if (g_psBridgeBufferPool) + { + PVRSRVPoolDestroy(g_psBridgeBufferPool); + g_psBridgeBufferPool = NULL; + } + +#if defined(DEBUG_BRIDGE_KM) + if (g_hStatsLock) + { + OSLockDestroy(g_hStatsLock); + g_hStatsLock = NULL; + } +#endif +} + +PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, + PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM) +{ + + void * psBridgeIn=NULL; + void * psBridgeOut=NULL; + BridgeWrapperFunction pfBridgeHandler; + IMG_UINT32 ui32DispatchTableEntry, ui32GroupBoundary; + PVRSRV_ERROR err = PVRSRV_OK; + PVRSRV_POOL_TOKEN hBridgeBufferPoolToken = NULL; + IMG_UINT32 ui32Timestamp = OSClockus(); +#if defined(DEBUG_BRIDGE_KM) + IMG_UINT64 ui64TimeStart; + IMG_UINT64 ui64TimeEnd; + IMG_UINT64 ui64TimeDiff; +#endif + IMG_UINT32 ui32DispatchTableIndex, ui32DispatchTableEntryIndex; + +#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH) + PVR_DBG_BREAK; +#endif + + if (psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d", + __func__, psBridgePackageKM->ui32BridgeID)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); + } + + ui32DispatchTableIndex = OSConfineArrayIndexNoSpeculation(psBridgePackageKM->ui32BridgeID, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT); + + ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_FIRST_FUNC]; + ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_LAST_FUNC]; + + /* bridge function is not implemented in this build */ + if (0 == ui32DispatchTableEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", + __func__, + ui32DispatchTableEntry, + ui32GroupBoundary, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID)); + /* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */ + err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry, + psBridgeIn, + psBridgeOut, + psConnection); + goto return_error; + } + if ((ui32DispatchTableEntry + psBridgePackageKM->ui32FunctionID) > ui32GroupBoundary) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", + __func__, + ui32DispatchTableEntry, + ui32GroupBoundary, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); + } + ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID; + ui32DispatchTableEntryIndex = OSConfineArrayIndexNoSpeculation(ui32DispatchTableEntry, ui32GroupBoundary+1); + if (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu," + " (bridge module %d, function %d)", __func__, + ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); + } +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)", + __func__, + ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: %s", + __func__, + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName)); + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++; + g_BridgeGlobalStats.ui32IOCTLCount++; +#endif + + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) + { + OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); + } +#if !defined(INTEGRITY_OS) + /* try to acquire a bridge buffer from the pool */ + + err = PVRSRVPoolGet(g_psBridgeBufferPool, + &hBridgeBufferPoolToken, + &psBridgeIn); + PVR_LOG_GOTO_IF_ERROR(err, "PVRSRVPoolGet", unlock_and_return_error); + + psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE; +#endif + +#if defined(DEBUG_BRIDGE_KM) + ui64TimeStart = OSClockns64(); +#endif + + if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small " + "(data size %u, buffer size %u)!", __func__, + psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); + } + +#if !defined(INTEGRITY_OS) + if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small " + "(data size %u, buffer size %u)!", __func__, + psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); + } + + if ((CopyFromUserWrapper (psConnection, + ui32DispatchTableEntryIndex, + psBridgeIn, + psBridgePackageKM->pvParamIn, + psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK) +#if defined(__QNXNTO__) +/* For Neutrino, the output bridge buffer acts as an input as well */ + || (CopyFromUserWrapper(psConnection, + ui32DispatchTableEntryIndex, + psBridgeOut, + (void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize), + psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) +#endif + ) /* end of if-condition */ + { + PVR_LOG_GOTO_WITH_ERROR("CopyFromUserWrapper", err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); + } +#else + psBridgeIn = psBridgePackageKM->pvParamIn; + psBridgeOut = psBridgePackageKM->pvParamOut; +#endif + + pfBridgeHandler = + (BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pfFunction; + + if (pfBridgeHandler == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!", + __func__, ui32DispatchTableEntry)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); + } + + /* pfBridgeHandler functions do not fail and return an IMG_INT. + * The value returned is either 0 or PVRSRV_OK (0). + * In the event this changes an error may be +ve or -ve, + * so try to return something consistent here. + */ + if (0 != pfBridgeHandler(ui32DispatchTableEntryIndex, + psBridgeIn, + psBridgeOut, + psConnection) + ) + { + PVR_LOG_GOTO_WITH_ERROR("pfBridgeHandler", err, PVRSRV_ERROR_BRIDGE_EPERM, unlock_and_return_error); + } + + /* + This should always be true as a.t.m. all bridge calls have to + return an error message, but this could change so we do this + check to be safe. + */ + if (psBridgePackageKM->ui32OutBufferSize > 0) + { +#if !defined(INTEGRITY_OS) + if (CopyToUserWrapper (psConnection, + ui32DispatchTableEntryIndex, + psBridgePackageKM->pvParamOut, + psBridgeOut, + psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); + } +#endif + } + +#if defined(DEBUG_BRIDGE_KM) + ui64TimeEnd = OSClockns64(); + + ui64TimeDiff = ui64TimeEnd - ui64TimeStart; + + /* if there is no lock held then acquire the stats lock to + * ensure the calculations are done safely + */ + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL) + { + BridgeGlobalStatsLock(); + } + + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff; + + if (ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS) + { + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff; + } + + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL) + { + BridgeGlobalStatsUnlock(); + } +#endif + +unlock_and_return_error: + + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) + { + OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); + } + +#if !defined(INTEGRITY_OS) + if (hBridgeBufferPoolToken != NULL) + { + err = PVRSRVPoolPut(g_psBridgeBufferPool, + hBridgeBufferPoolToken); + PVR_LOG_IF_ERROR(err, "PVRSRVPoolPut"); + } +#endif + +return_error: + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __func__, err)); + } + /* ignore transport layer bridge to avoid HTB flooding */ + if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL) + { + if (err) + { + HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID, err); + } + else + { + HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID); + } + } + + return err; +} + +PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray) +{ +#if !defined(__QNXNTO__) + return PVRSRVFindProcessMemStats(pid, + ui32ArrSize, + bAllProcessStats, + pui32MemStatArray); +#else + PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); + + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif + +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/sync_checkpoint.c b/drivers/mcst/gpu-imgtec/services/server/common/sync_checkpoint.c new file mode 100644 index 000000000000..460b4f749a3c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/sync_checkpoint.c @@ -0,0 +1,3006 @@ +/*************************************************************************/ /*! +@File +@Title Services synchronisation checkpoint interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Server side code for services synchronisation interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "osfunc.h" +#include "dllist.h" +#include "sync.h" +#include "sync_checkpoint_external.h" +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" +#include "sync_checkpoint_internal_fw.h" +#include "sync_checkpoint_init.h" +#include "lock.h" +#include "log2.h" +#include "pvrsrv.h" +#include "pdump_km.h" +#include "info_page.h" + +#include "pvrsrv_sync_km.h" +#include "rgxhwperf.h" + +#if defined(PVRSRV_NEED_PVR_DPF) + +/* Enable this to turn on debug relating to the creation and + resolution of contexts */ +#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 + +/* Enable this to turn on debug relating to the creation and + resolution of fences */ +#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 + +/* Enable this to turn on debug relating to the sync checkpoint + allocation and freeing */ +#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 + +/* Enable this to turn on debug relating to the sync checkpoint + enqueuing and signalling */ +#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 + +/* Enable this to turn on debug relating to the sync checkpoint pool */ +#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 + +/* Enable this to turn on debug relating to sync checkpoint UFO + lookup */ +#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 + +/* Enable this to turn on sync checkpoint deferred cleanup debug + * (for syncs we have been told to free but which have some + * outstanding FW operations remaining (enqueued in CCBs) + */ +#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 + +#else + +#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 + +#endif + +/* Maximum number of deferred sync checkpoint signal/error received for atomic context */ +#define SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL 500 + +/* Set the size of the sync checkpoint pool (not used if 0). + * A pool will be maintained for each sync checkpoint context. + */ +#if defined(PDUMP) +#define SYNC_CHECKPOINT_POOL_SIZE 0 +#else +#define SYNC_CHECKPOINT_POOL_SIZE 128 +#define SYNC_CHECKPOINT_POOL_MASK (SYNC_CHECKPOINT_POOL_SIZE - 1) +#endif + +/* The 'sediment' value represents the minimum number of + * sync checkpoints which must be in the pool before one + * will be allocated from the pool rather than from memory. + * This effectively helps avoid re-use of a sync checkpoint + * just after it has been returned to the pool, making + * debugging somewhat easier to understand. + */ +#define SYNC_CHECKPOINT_POOL_SEDIMENT 20 + +#if (SYNC_CHECKPOINT_POOL_SIZE & (SYNC_CHECKPOINT_POOL_SIZE - 1)) != 0 +#error "SYNC_CHECKPOINT_POOL_SIZE must be power of 2." +#endif + +#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE 10 + +/* + This defines the maximum amount of synchronisation memory + that can be allocated per sync checkpoint context. + In reality this number is meaningless as we would run out + of synchronisation memory before we reach this limit, but + we need to provide a size to the span RA. + */ +#define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024) + + +typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_ +{ + IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */ + IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */ + SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */ +} SYNC_CHECKPOINT_BLOCK_LIST; + +typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ +{ + SHARED_DEV_CONNECTION psDeviceNode; + PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; + PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; + /* + * Used as head of linked-list of sync checkpoints for which + * SyncCheckpointFree() has been called, but have outstanding + * FW operations (enqueued in CCBs) + * This list will be check whenever a SyncCheckpointFree() is + * called, and when SyncCheckpointContextDestroy() is called. + */ + DLLIST_NODE sDeferredCleanupListHead; + /* Lock to protect the deferred cleanup list */ + POS_SPINLOCK hDeferredCleanupListLock; + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + _SYNC_CHECKPOINT *psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE]; + IMG_BOOL bSyncCheckpointPoolFull; + IMG_BOOL bSyncCheckpointPoolValid; + IMG_UINT32 ui32SyncCheckpointPoolCount; + IMG_UINT32 ui32SyncCheckpointPoolWp; + IMG_UINT32 ui32SyncCheckpointPoolRp; + POS_SPINLOCK hSyncCheckpointPoolLock; /*! Protects access to the checkpoint pool control data. */ +#endif +} _SYNC_CHECKPOINT_CONTEXT_CTL; + +/* this is the max number of sync checkpoint records we will search or dump + * at any time. + */ +#define SYNC_CHECKPOINT_RECORD_LIMIT 20000 + +#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) + +struct SYNC_CHECKPOINT_RECORD +{ + PVRSRV_DEVICE_NODE *psDevNode; + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */ + IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ + IMG_UINT32 ui32FwBlockAddr; + IMG_PID uiPID; + IMG_UINT32 ui32UID; + IMG_UINT64 ui64OSTime; + DLLIST_NODE sNode; + IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; + PSYNC_CHECKPOINT pSyncCheckpt; +}; + +static IMG_BOOL gbSyncCheckpointInit = IMG_FALSE; +static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct; + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext); +static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint); +static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext); +#endif + +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) +static IMG_UINT32 gui32NumSyncCheckpointContexts = 0; +#endif + +/* Defined values to indicate status of sync checkpoint, which is + * stored in the memory of the structure */ +#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa +#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb +#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc + +#if defined(SUPPORT_RGX) +static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO *psDevInfo, + _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) +{ + if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) + && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + { + RGX_HWPERF_UFO_EV eEv; + RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; + + if (psSyncCheckpointInt) + { + if ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || + (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)) + { + sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); + sSyncData.sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS; + } + else + { + sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); + sSyncData.sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL; + } + RGXHWPerfHostPostUfoEvent(psDevInfo, eEv, &sSyncData, + (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); + } + } +} + +static inline void RGXSRVHWPerfSyncCheckpointUFOUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) +{ + if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) + && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + { + RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; + + if (psSyncCheckpointInt) + { + sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); + sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + RGXHWPerfHostPostUfoEvent(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, + (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); + } + } +} +#endif + +static PVRSRV_ERROR +_SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord, + SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_UINT32 ui32UID, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt); +static PVRSRV_ERROR +_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord); +static void _SyncCheckpointState(PDLLIST_NODE psNode, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode); +static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode); + +#if defined(PDUMP) +static void +MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData); +static PVRSRV_ERROR _SyncCheckpointAllocPDump(_SYNC_CHECKPOINT *psSyncCheckpoint); +static PVRSRV_ERROR _SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, _SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags); +static PVRSRV_ERROR _SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent); +#endif + +/* Unique incremental ID assigned to sync checkpoints when allocated */ +static IMG_UINT32 g_SyncCheckpointUID; + +static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext); + +void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext) +{ + _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *) psContext; + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContextInt->psContextCtl; + IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); + + if (ui32RefCt == 0) + { + PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, + "SyncCheckpointContextUnref context already freed"); + } + else if (OSAtomicDecrement(&psContextInt->hRefCount) == 0) + { + /* SyncCheckpointContextDestroy only when no longer referenced */ + OSSpinLockDestroy(psCtxCtl->hDeferredCleanupListLock); + psCtxCtl->hDeferredCleanupListLock = NULL; +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + if (psCtxCtl->ui32SyncCheckpointPoolCount) + { + PVR_DPF((PVR_DBG_WARNING, + "%s called for context<%p> with %d sync checkpoints still" + " in the pool", + __func__, + (void *) psContext, + psCtxCtl->ui32SyncCheckpointPoolCount)); + } + psCtxCtl->bSyncCheckpointPoolValid = IMG_FALSE; + OSSpinLockDestroy(psCtxCtl->hSyncCheckpointPoolLock); + psCtxCtl->hSyncCheckpointPoolLock = NULL; +#endif + OSFreeMem(psContextInt->psContextCtl); + RA_Delete(psContextInt->psSpanRA); + RA_Delete(psContextInt->psSubAllocRA); + OSLockDestroy(psContextInt->hLock); + psContextInt->hLock = NULL; + OSFreeMem(psContext); + } +} + +void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext) +{ + _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *)psContext; + IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); + + if (ui32RefCt == 0) + { + PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, + "SyncCheckpointContextRef context use after free"); + } + else + { + OSAtomicIncrement(&psContextInt->hRefCount); + } +} + +/* + Internal interfaces for management of synchronisation block memory + */ +static PVRSRV_ERROR +_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext, + SYNC_CHECKPOINT_BLOCK **ppsSyncBlock) +{ + PVRSRV_DEVICE_NODE *psDevNode; + SYNC_CHECKPOINT_BLOCK *psSyncBlk; + PVRSRV_ERROR eError; + + psSyncBlk = OSAllocMem(sizeof(*psSyncBlk)); + PVR_LOG_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); + + psSyncBlk->psContext = psContext; + + /* Allocate sync checkpoint block */ + psDevNode = psContext->psDevNode; + PVR_LOG_GOTO_IF_INVALID_PARAM(psDevNode, eError, fail_alloc_ufo_block); + + psSyncBlk->psDevNode = psDevNode; + + eError = psDevNode->pfnAllocUFOBlock(psDevNode, + &psSyncBlk->hMemDesc, + &psSyncBlk->ui32FirmwareAddr, + &psSyncBlk->ui32SyncBlockSize); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnAllocUFOBlock", fail_alloc_ufo_block); + + eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, + (void **) &psSyncBlk->pui32LinAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail_devmem_acquire); + + OSAtomicWrite(&psSyncBlk->hRefCount, 1); + + OSLockCreate(&psSyncBlk->hLock); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)", + psSyncBlk->ui32FirmwareAddr); +#if defined(PDUMP) + OSLockAcquire(psContext->hSyncCheckpointBlockListLock); + dllist_add_to_tail(&psContext->sSyncCheckpointBlockListHead, &psSyncBlk->sListNode); + OSLockRelease(psContext->hSyncCheckpointBlockListLock); +#endif + + *ppsSyncBlock = psSyncBlk; + return PVRSRV_OK; + +fail_devmem_acquire: + psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); +fail_alloc_ufo_block: + OSFreeMem(psSyncBlk); +fail_alloc: + return eError; +} + +static void +_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk) +{ + OSLockAcquire(psSyncBlk->hLock); + if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount)) + { + PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; + +#if defined(PDUMP) + OSLockAcquire(psSyncBlk->psContext->hSyncCheckpointBlockListLock); + dllist_remove_node(&psSyncBlk->sListNode); + OSLockRelease(psSyncBlk->psContext->hSyncCheckpointBlockListLock); +#endif + DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); + psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); + OSLockRelease(psSyncBlk->hLock); + OSLockDestroy(psSyncBlk->hLock); + psSyncBlk->hLock = NULL; + OSFreeMem(psSyncBlk); + } + else + { + OSLockRelease(psSyncBlk->hLock); + } +} + +static PVRSRV_ERROR +_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phImport) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; + SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL; + RA_LENGTH_T uiSpanSize; + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(uFlags); + + PVR_LOG_RETURN_IF_INVALID_PARAM((hArena != NULL), "hArena"); + + /* Check we've not be called with an unexpected size */ + PVR_LOG_RETURN_IF_INVALID_PARAM((uSize == sizeof(SYNC_CHECKPOINT_FW_OBJ)), "uSize"); + + /* + Ensure the sync checkpoint context doesn't go away while we have + sync blocks attached to it. + */ + SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); + + /* Allocate the block of memory */ + eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock); + PVR_GOTO_IF_ERROR(eError, fail_syncblockalloc); + + /* Allocate a span for it */ + eError = RA_Alloc(psContext->psSpanRA, + psSyncBlock->ui32SyncBlockSize, + RA_NO_IMPORT_MULTIPLIER, + 0, + psSyncBlock->ui32SyncBlockSize, + pszAnnotation, + &psSyncBlock->uiSpanBase, + &uiSpanSize, + NULL); + PVR_GOTO_IF_ERROR(eError, fail_spanalloc); + + /* + There is no reason the span RA should return an allocation larger + then we request + */ + PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize), + "uiSpanSize invalid"); + + *puiBase = psSyncBlock->uiSpanBase; + *puiActualSize = psSyncBlock->ui32SyncBlockSize; + *phImport = psSyncBlock; + return PVRSRV_OK; + +fail_spanalloc: + _FreeSyncCheckpointBlock(psSyncBlock); +fail_syncblockalloc: + SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); + + return eError; +} + +static void +_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hImport) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; + SYNC_CHECKPOINT_BLOCK *psSyncBlock = hImport; + + PVR_LOG_RETURN_VOID_IF_FALSE((psContext != NULL), "hArena invalid"); + PVR_LOG_RETURN_VOID_IF_FALSE((psSyncBlock != NULL), "hImport invalid"); + PVR_LOG_RETURN_VOID_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid"); + + /* Free the span this import is using */ + RA_Free(psContext->psSpanRA, uiBase); + + /* Free the sync checkpoint block */ + _FreeSyncCheckpointBlock(psSyncBlock); + + /* Drop our reference to the sync checkpoint context */ + SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); +} + +static INLINE IMG_UINT32 _SyncCheckpointGetOffset(_SYNC_CHECKPOINT *psSyncInt) +{ + IMG_UINT64 ui64Temp; + + ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase; + PVR_ASSERT(ui64TemppfnFenceResolve)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL"); + return eError; + } + + if (papsSyncCheckpoints) + { + eError = g_psSyncCheckpointPfnStruct->pfnFenceResolve( + psSyncCheckpointContext, + hFence, + pui32NumSyncCheckpoints, + papsSyncCheckpoints, + pui64FenceUID); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceResolve"); + +#if defined(PDUMP) + if (*papsSyncCheckpoints) + { + for (i = 0; i < *pui32NumSyncCheckpoints; i++) + { + psSyncCheckpoint = (_SYNC_CHECKPOINT *)(*papsSyncCheckpoints)[i]; + psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; + } + } +#endif + + if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() returned too many checkpoints (%u > MAX_SYNC_CHECKPOINTS_PER_FENCE=%u)", + __func__, *pui32NumSyncCheckpoints, MAX_SYNC_CHECKPOINTS_PER_FENCE)); + + /* Free resources after error */ + if (*papsSyncCheckpoints) + { + for (i = 0; i < *pui32NumSyncCheckpoints; i++) + { + SyncCheckpointDropRef((*papsSyncCheckpoints)[i]); + } + + SyncCheckpointFreeCheckpointListMem(*papsSyncCheckpoints); + } + + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + { + IMG_UINT32 ii; + + PVR_DPF((PVR_DBG_WARNING, + "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() for fence %d returned the following %d checkpoints:", + __func__, + hFence, + *pui32NumSyncCheckpoints)); + + for (ii=0; ii<*pui32NumSyncCheckpoints; ii++) + { + PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints + ii); + PVR_DPF((PVR_DBG_WARNING, + "%s: *papsSyncCheckpoints[%d]:<%p>", + __func__, + ii, + (void*)psNextCheckpoint)); + } + } +#endif + + return eError; +} + +PVRSRV_ERROR +SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode, + const IMG_CHAR *pszFenceName, + PVRSRV_TIMELINE hTimeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *phNewFence, + IMG_UINT64 *puiUpdateFenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *psNewSyncCheckpoint, + void **ppvTimelineUpdateSyncPrim, + IMG_UINT32 *pui32TimelineUpdateValue, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + + if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceCreate)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceCreate is NULL"); + } + else + { + eError = g_psSyncCheckpointPfnStruct->pfnFenceCreate( + pszFenceName, + hTimeline, + psSyncCheckpointContext, + phNewFence, + puiUpdateFenceUID, + ppvFenceFinaliseData, + psNewSyncCheckpoint, + ppvTimelineUpdateSyncPrim, + pui32TimelineUpdateValue); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed to create new fence<%p> for timeline<%d> using " + "sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s", + __func__, + (void*)phNewFence, + hTimeline, + (void*)psSyncCheckpointContext, + (void*)psNewSyncCheckpoint, + PVRSRVGetErrorString(eError))); + } +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s created new fence<%d> for timeline<%d> using " + "sync checkpoint context<%p>, new sync_checkpoint=<%p>", + __func__, + *phNewFence, + hTimeline, + (void*)psSyncCheckpointContext, + (void*)*psNewSyncCheckpoint)); + } +#endif + +#if defined(PDUMP) + if (eError == PVRSRV_OK) + { + _SYNC_CHECKPOINT *psSyncCheckpoint = (_SYNC_CHECKPOINT*)(*psNewSyncCheckpoint); + if (psSyncCheckpoint) + { + psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; + } + } +#endif + } + return eError; +} + +PVRSRV_ERROR +SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceDataRollback) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback is NULL"); + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: called to rollback fence data <%p>", + __func__, + pvFinaliseData)); +#endif + eError = g_psSyncCheckpointPfnStruct->pfnFenceDataRollback( + hFence, pvFinaliseData); + PVR_LOG_IF_ERROR(eError, + "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback returned error"); + } + return eError; +} + +PVRSRV_ERROR +SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, + PVRSRV_FENCE hFence, + void *pvFinaliseData, + PSYNC_CHECKPOINT psSyncCheckpoint, + const IMG_CHAR *pszName) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceFinalise) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: called to finalise fence <%d>", + __func__, + hFence)); +#endif + eError = g_psSyncCheckpointPfnStruct->pfnFenceFinalise(hFence, pvFinaliseData); + PVR_LOG_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceFinalise returned error"); + + RGXSRV_HWPERF_ALLOC_FENCE(psDevNode, OSGetCurrentClientProcessIDKM(), hFence, + SyncCheckpointGetFirmwareAddr(psSyncCheckpoint), + pszName, OSStringLength(pszName)); + } + return eError; +} + +void +SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem) +{ + if (g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem) + { + g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem(pvCheckpointListMem); + } +} + +PVRSRV_ERROR +SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines is NULL"); + } + else + { + g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines(pvPrivateData); + } + return eError; + +} + +PVRSRV_ERROR +SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vaddrs, IMG_UINT32 *pui32NumSyncOwnedUFOs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((pui32NumSyncOwnedUFOs != NULL), "pui32NumSyncOwnedUFOs invalid", PVRSRV_ERROR_INVALID_PARAMS); + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs) + { + *pui32NumSyncOwnedUFOs = 0; + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs is NULL"); + } + else + { + *pui32NumSyncOwnedUFOs = g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs(ui32NumUFOs, pui32Vaddrs); + PVR_LOG(("%d sync checkpoint%s owned by %s in stalled context", + *pui32NumSyncOwnedUFOs, *pui32NumSyncOwnedUFOs==1 ? "" : "s", + g_psSyncCheckpointPfnStruct->pszImplName)); + } + return eError; +} + +PVRSRV_ERROR +SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, + PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = NULL; + _SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpointContext != NULL), + "ppsSyncCheckpointContext invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + psContext = OSAllocMem(sizeof(*psContext)); + PVR_LOG_GOTO_IF_NOMEM(psContext, eError, fail_alloc); /* Sets OOM error code */ + + psContextCtl = OSAllocMem(sizeof(*psContextCtl)); + PVR_LOG_GOTO_IF_NOMEM(psContextCtl, eError, fail_alloc2); /* Sets OOM error code */ + + eError = OSLockCreate(&psContext->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", fail_create_context_lock); + + eError = OSSpinLockCreate(&psContextCtl->hDeferredCleanupListLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:1", fail_create_deferred_cleanup_lock); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointPoolLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:2", fail_create_pool_lock); +#endif + + dllist_init(&psContextCtl->sDeferredCleanupListHead); +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + psContextCtl->ui32SyncCheckpointPoolCount = 0; + psContextCtl->ui32SyncCheckpointPoolWp = 0; + psContextCtl->ui32SyncCheckpointPoolRp = 0; + psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE; + psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE; +#endif + psContext->psDevNode = psDevNode; + + OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext); + OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext); + + /* + Create the RA for sub-allocations of the sync checkpoints + + Note: + The import size doesn't matter here as the server will pass + back the blocksize when it does the import which overrides + what we specify here. + */ + psContext->psSubAllocRA = RA_Create(psContext->azName, + /* Params for imports */ + _Log2(sizeof(IMG_UINT32)), + RA_LOCKCLASS_2, + _SyncCheckpointBlockImport, + _SyncCheckpointBlockUnimport, + psContext, + IMG_FALSE); + PVR_LOG_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); + + /* + Create the span-management RA + + The RA requires that we work with linear spans. For our use + here we don't require this behaviour as we're always working + within offsets of blocks (imports). However, we need to keep + the RA happy so we create the "span" management RA which + ensures that all are imports are added to the RA in a linear + fashion + */ + psContext->psSpanRA = RA_Create(psContext->azSpanName, + /* Params for imports */ + 0, + RA_LOCKCLASS_1, + NULL, + NULL, + NULL, + IMG_FALSE); + PVR_LOG_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); + + if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL)) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed"); + goto fail_span_add; + } + + OSAtomicWrite(&psContext->hRefCount, 1); + OSAtomicWrite(&psContext->hCheckpointCount, 0); + + psContext->psContextCtl = psContextCtl; + + *ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext; +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: created psSyncCheckpointContext=<%p> (%d contexts exist)", + __func__, + (void*)*ppsSyncCheckpointContext, + ++gui32NumSyncCheckpointContexts)); +#endif + +#if defined(PDUMP) + dllist_init(&psContext->sSyncCheckpointBlockListHead); + + eError = OSLockCreate(&psContext->hSyncCheckpointBlockListLock); + PVR_GOTO_IF_ERROR(eError, fail_span_add); + + OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); + dllist_add_to_tail(&psDevNode->sSyncCheckpointContextListHead, &psContext->sListNode); + OSLockRelease(psDevNode->hSyncCheckpointContextListLock); + +#endif + + return PVRSRV_OK; + +fail_span_add: + RA_Delete(psContext->psSpanRA); +fail_span: + RA_Delete(psContext->psSubAllocRA); +fail_suballoc: +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + OSSpinLockDestroy(psContextCtl->hSyncCheckpointPoolLock); + psContextCtl->hSyncCheckpointPoolLock = NULL; +fail_create_pool_lock: +#endif + OSSpinLockDestroy(psContextCtl->hDeferredCleanupListLock); + psContextCtl->hDeferredCleanupListLock = NULL; +fail_create_deferred_cleanup_lock: + OSLockDestroy(psContext->hLock); + psContext->hLock = NULL; +fail_create_context_lock: + OSFreeMem(psContextCtl); +fail_alloc2: + OSFreeMem(psContext); +fail_alloc: + return eError; +} + +/* Poisons and frees the checkpoint + * Decrements context refcount. */ +static void _FreeSyncCheckpoint(_SYNC_CHECKPOINT *psSyncCheckpoint) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; + + psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0; + psSyncCheckpoint->psSyncCheckpointFwObj = NULL; + psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED; + + RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, + psSyncCheckpoint->uiSpanAddr); + psSyncCheckpoint->psSyncCheckpointBlock = NULL; + + OSFreeMem(psSyncCheckpoint); + + OSAtomicDecrement(&psContext->hCheckpointCount); +} + +PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext; + PVRSRV_DEVICE_NODE *psDevNode; + IMG_INT iRf = 0; + + PVR_LOG_RETURN_IF_FALSE((psSyncCheckpointContext != NULL), + "psSyncCheckpointContext invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; + +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: destroying psSyncCheckpointContext=<%p> (now have %d contexts)", + __func__, + (void*)psSyncCheckpointContext, + --gui32NumSyncCheckpointContexts)); +#endif + + _CheckDeferredCleanupList(psContext); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) + { + IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext); + +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s freed %d sync checkpoints that were still in the pool for context<%p>", + __func__, + ui32NumFreedFromPool, + (void*)psContext)); +#else + PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool); +#endif + } +#endif + + iRf = OSAtomicRead(&psContext->hCheckpointCount); + + if (iRf != 0) + { + OS_SPINLOCK_FLAGS uiFlags; + + /* Note, this is not a permanent error as the caller may retry later */ + PVR_DPF((PVR_DBG_WARNING, + "%s <%p> attempted with active references (iRf=%d), " + "may be the result of a race", + __func__, + (void*)psContext, + iRf)); + + eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT; + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + { + DLLIST_NODE *psNode, *psNext; + + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + _SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + bool bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode); + + /* Line below avoids build error in release builds (where PVR_DPF is not defined) */ + PVR_UNREFERENCED_PARAMETER(bDeferredFree); + PVR_DPF((PVR_DBG_WARNING, + "%s syncCheckpoint<%p> ID=%d, %s, refs=%d, state=%s, fwaddr=%#08x, enqCount:%d, FWCount:%d %s", + __func__, + (void*)psSyncCheckpoint, + psSyncCheckpoint->ui32UID, + psSyncCheckpoint->azName, + OSAtomicRead(&psSyncCheckpoint->hRefCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ? + "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" : + psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ? + "PVRSRV_SYNC_CHECKPOINT_ACTIVE" : "PVRSRV_SYNC_CHECKPOINT_ERRORED", + psSyncCheckpoint->ui32FWAddr, + OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, + bDeferredFree ? "(deferred free)" : "")); + +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) + gui32NumSyncCheckpointContexts++; +#endif + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } + else + { + IMG_INT iRf2 = 0; + + iRf2 = OSAtomicRead(&psContext->hRefCount); + SyncCheckpointContextUnref(psSyncCheckpointContext); + } + +#if defined(PDUMP) + if (dllist_is_empty(&psContext->sSyncCheckpointBlockListHead)) + { + OSLockDestroy(psContext->hSyncCheckpointBlockListLock); + psContext->hSyncCheckpointBlockListLock = NULL; + + OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); + dllist_remove_node(&psContext->sListNode); + OSLockRelease(psDevNode->hSyncCheckpointContextListLock); + } +#endif + + return eError; +} + +PVRSRV_ERROR +SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, + PVRSRV_TIMELINE hTimeline, + PVRSRV_FENCE hFence, + const IMG_CHAR *pszCheckpointName, + PSYNC_CHECKPOINT *ppsSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL; + _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext; + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS); + + psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode; + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool", + __func__)); +#endif + psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt); + if (!psNewSyncCheckpoint) + { +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s checkpoint pool empty - will have to allocate", + __func__)); +#endif + } +#endif + /* If pool is empty (or not defined) alloc the new sync checkpoint */ + if (!psNewSyncCheckpoint) + { + psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint)); + PVR_LOG_GOTO_IF_NOMEM(psNewSyncCheckpoint, eError, fail_alloc); /* Sets OOM error code */ + + eError = RA_Alloc(psSyncContextInt->psSubAllocRA, + sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj), + RA_NO_IMPORT_MULTIPLIER, + 0, + sizeof(IMG_UINT32), + (IMG_CHAR*)pszCheckpointName, + &psNewSyncCheckpoint->uiSpanAddr, + NULL, + (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock); + PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_raalloc); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + __func__, + (void*)psSyncContextInt->psSubAllocRA, + psNewSyncCheckpoint->uiSpanAddr)); +#endif + psNewSyncCheckpoint->psSyncCheckpointFwObj = + (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr + + (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32))); + psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1; + OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount); + psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called to allocate new sync checkpoint<%p> for context<%p>", + __func__, (void*)psNewSyncCheckpoint, (void*)psSyncContext)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpointFwObj<%p>", + __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint FwAddr=0x%x", + __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint))); + PVR_DPF((PVR_DBG_WARNING, + "%s pszCheckpointName = %s", + __func__, pszCheckpointName)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint Timeline=%d", + __func__, hTimeline)); +#endif + } + + psNewSyncCheckpoint->hTimeline = hTimeline; + OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1); + OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0); + psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0; + psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ACTIVE; + psNewSyncCheckpoint->uiProcess = OSGetCurrentClientProcessIDKM(); + OSCachedMemSet(&psNewSyncCheckpoint->sDeferredFreeListNode, 0, sizeof(psNewSyncCheckpoint->sDeferredFreeListNode)); + + if (pszCheckpointName) + { + /* Copy over the checkpoint name annotation */ + OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); + } + else + { + /* No sync checkpoint name annotation */ + psNewSyncCheckpoint->azName[0] = '\0'; + } + + /* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */ + psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint); + + /* Assign unique ID to this sync checkpoint */ + psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++; + +#if defined(PDUMP) + /* Flushing deferred fence signals to pdump */ + MISRHandler_PdumpDeferredSyncSignalPoster(psDevNode); + + _SyncCheckpointAllocPDump(psNewSyncCheckpoint); +#endif + + RGXSRV_HWPERF_ALLOC_SYNC_CP(psDevNode, psNewSyncCheckpoint->hTimeline, + OSGetCurrentClientProcessIDKM(), + hFence, + psNewSyncCheckpoint->ui32FWAddr, + psNewSyncCheckpoint->azName, + sizeof(psNewSyncCheckpoint->azName)); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH]; + + if (pszCheckpointName) + { + /* Copy the checkpoint name annotation into a fixed-size array */ + OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); + } + else + { + /* No checkpoint name annotation */ + szChkptName[0] = 0; + } + /* record this sync */ + eError = _SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord, + psNewSyncCheckpoint->psSyncCheckpointBlock, + psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr, + _SyncCheckpointGetOffset(psNewSyncCheckpoint), + psNewSyncCheckpoint->ui32UID, + OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH), + szChkptName, (PSYNC_CHECKPOINT)psNewSyncCheckpoint); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)", + __func__, + szChkptName, + PVRSRVGetErrorString(eError))); + psNewSyncCheckpoint->hRecord = NULL; + /* note the error but continue without affecting driver operation */ + } + } + + { + OS_SPINLOCK_FLAGS uiFlags; + /* Add the sync checkpoint to the device list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList, + &psNewSyncCheckpoint->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } + + *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint; + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>", + __func__, + psNewSyncCheckpoint->ui32UID, + (void*)psNewSyncCheckpoint)); +#endif + return PVRSRV_OK; + +fail_raalloc: + OSFreeMem(psNewSyncCheckpoint); +fail_alloc: + return eError; +} + +static void SyncCheckpointUnref(_SYNC_CHECKPOINT *psSyncCheckpointInt) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext; + PVRSRV_DEVICE_NODE *psDevNode; + + psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext; + psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; + + /* + * Without this reference, the context may be destroyed as soon + * as _FreeSyncCheckpoint is called, but the context is still + * needed when _CheckDeferredCleanupList is called at the end + * of this function. + */ + SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); + + PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE); + if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed")); + } + else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount)) + { + /* If the firmware has serviced all enqueued references to the sync checkpoint, free it */ + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) + { +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..", + __func__)); +#endif + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + && psSyncCheckpointInt->hRecord) + { + PVRSRV_ERROR eError; + /* remove this sync record */ + eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); + PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); + } + + { + OS_SPINLOCK_FLAGS uiFlags; + /* Remove the sync checkpoint from the global list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_remove_node(&psSyncCheckpointInt->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } + + RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s attempting to return sync checkpoint to the pool", + __func__)); +#endif + if (!_PutCheckpointInPool(psSyncCheckpointInt)) +#endif + { +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s pool is full, so just free it", + __func__)); +#endif +#endif +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + __func__, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA, + psSyncCheckpointInt->uiSpanAddr)); +#endif + _FreeSyncCheckpoint(psSyncCheckpointInt); + } + } + else + { + OS_SPINLOCK_FLAGS uiFlags; +#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d " + "- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>", + __func__, + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt)); +#endif + /* Add the sync checkpoint to the deferred free list */ + OSSpinLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); + dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead, + &psSyncCheckpointInt->sDeferredFreeListNode); + OSSpinLockRelease(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); + } + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d", + __func__, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)))); +#endif + } + + /* See if any sync checkpoints in the deferred cleanup list can be freed */ + _CheckDeferredCleanupList(psContext); + + SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); +} + +void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_VOID_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x", + __func__, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpoint, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)), + psSyncCheckpointInt->ui32ValidationCheck)); +#endif + SyncCheckpointUnref(psSyncCheckpointInt); +} + +void +SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { + PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + "psSyncCheckpoint already signalled"); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); +#endif + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + +#if defined(PDUMP) + _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags); +#endif + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " + "when value is already %d", + __func__, + PVRSRV_SYNC_CHECKPOINT_SIGNALLED, + psSyncCheckpointInt->ui32UID, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); + } + } +} + +void +SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { + PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + "psSyncCheckpoint already signalled"); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE); +#endif + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " + "when value is already %d", + __func__, + PVRSRV_SYNC_CHECKPOINT_SIGNALLED, + psSyncCheckpointInt->ui32UID, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); +#endif + } + } +} + +void +SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { + PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + "psSyncCheckpoint already signalled"); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + { + RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; + + sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint); + sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED; + + RGXSRV_HWPERF_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, + (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); + } +#endif + + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED; + +#if defined(PDUMP) + _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags); +#endif + } + } +} + +IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + IMG_BOOL bRet = IMG_FALSE; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); +#endif + bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || + (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)); + +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called for psSyncCheckpoint<%p>, returning %d", + __func__, + (void*)psSyncCheckpoint, + bRet)); +#endif + } + return bRet; +} + +IMG_BOOL +SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + IMG_BOOL bRet = IMG_FALSE; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); +#endif + bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED); + +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called for psSyncCheckpoint<%p>, returning %d", + __func__, + (void*)psSyncCheckpoint, + bRet)); +#endif + } + return bRet; +} + +const IMG_CHAR * +SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", "Null"); + + switch (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State) + { + case PVRSRV_SYNC_CHECKPOINT_SIGNALLED: + return "Signalled"; + case PVRSRV_SYNC_CHECKPOINT_ACTIVE: + return "Active"; + case PVRSRV_SYNC_CHECKPOINT_ERRORED: + return "Errored"; + case PVRSRV_SYNC_CHECKPOINT_UNDEF: + return "Undefined"; + default: + return "Unknown"; + } +} + +PVRSRV_ERROR +SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + PVRSRV_ERROR eRet = PVRSRV_OK; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", + __func__, + psSyncCheckpointInt, + OSAtomicRead(&psSyncCheckpointInt->hRefCount), + OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + OSAtomicIncrement(&psSyncCheckpointInt->hRefCount); + + return eRet; +} + +PVRSRV_ERROR +SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + PVRSRV_ERROR eRet = PVRSRV_OK; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", + __func__, + psSyncCheckpointInt, + OSAtomicRead(&psSyncCheckpointInt->hRefCount), + OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + SyncCheckpointUnref(psSyncCheckpointInt); + + return eRet; +} + +void +SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_VOID_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint"); + + if (psSyncCheckpointInt) + { +#if !defined(NO_HARDWARE) +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", + __func__, + (void*)psSyncCheckpoint, + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount); +#endif + } +} + +PRGXFWIF_UFO_ADDR* +SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { + if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) + { + return &psSyncCheckpointInt->sCheckpointUFOAddr; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", + __func__, + (void*)psSyncCheckpoint, + psSyncCheckpointInt->ui32ValidationCheck)); + } + } + +invalid_chkpt: + return NULL; +} + +IMG_UINT32 +SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + IMG_UINT32 ui32Ret = 0; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { + if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) + { + ui32Ret = psSyncCheckpointInt->ui32FWAddr; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", + __func__, + (void*)psSyncCheckpoint, + psSyncCheckpointInt->ui32ValidationCheck)); + } + } + +invalid_chkpt: + return ui32Ret; +} + +IMG_UINT32 +SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + IMG_UINT32 ui32Ret = 0; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s returning ID for sync checkpoint<%p>", + __func__, + (void*)psSyncCheckpointInt)); + PVR_DPF((PVR_DBG_WARNING, + "%s (validationCheck=0x%x)", + __func__, + psSyncCheckpointInt->ui32ValidationCheck)); +#endif + ui32Ret = psSyncCheckpointInt->ui32UID; +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s (ui32UID=0x%x)", + __func__, + psSyncCheckpointInt->ui32UID)); +#endif + } + return ui32Ret; + +invalid_chkpt: + return 0; +} + +PVRSRV_TIMELINE +SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVRSRV_TIMELINE i32Ret = PVRSRV_NO_TIMELINE; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { + i32Ret = psSyncCheckpointInt->hTimeline; + } + return i32Ret; + +invalid_chkpt: + return 0; +} + + +IMG_UINT32 +SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); + + return OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount); +} + +IMG_UINT32 +SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); + + return OSAtomicRead(&psSyncCheckpointInt->hRefCount); +} + +IMG_PID +SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); + + return psSyncCheckpointInt->uiProcess; +} + +IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt; + PDLLIST_NODE psNode, psNext; + IMG_UINT32 ui32State = 0; + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) + { + ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + break; + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + return ui32State; +} + +void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt; + PDLLIST_NODE psNode, psNext; + OS_SPINLOCK_FLAGS uiFlags; + +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called to error UFO with ui32FWAddr=%d", + __func__, + ui32FwAddr)); +#endif + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) + { +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s calling SyncCheckpointError for sync checkpoint <%p>", + __func__, + (void*)psSyncCheckpointInt)); +#endif + /* Mark as errored */ + SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, IMG_TRUE); + break; + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); +} + +void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr) +{ +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called to rollback UFO with ui32FWAddr=0x%x", + __func__, + ui32FwAddr)); +#endif +#if !defined(NO_HARDWARE) + { + _SYNC_CHECKPOINT *psSyncCheckpointInt = NULL; + PDLLIST_NODE psNode = NULL, psNext = NULL; + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) + { +#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called for psSyncCheckpointInt<%p> %d->%d", + __func__, + (void *) psSyncCheckpointInt, + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount) - 1)); +#endif + OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount); + break; + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } +#endif +} + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +IMG_BOOL SyncCheckpointUFOHasSignalled(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value) +{ +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_LOG(("%s called because UFO with ui32FWAddr=%#08x has set to %#04x", + __func__, + ui32FwAddr, + ui32Value)); +#endif + + if (g_psSyncCheckpointPfnStruct && + g_psSyncCheckpointPfnStruct->pfnCheckpointHasSignalled) + { + return g_psSyncCheckpointPfnStruct->pfnCheckpointHasSignalled(ui32FwAddr, ui32Value); + } + else + { + return IMG_FALSE; + } +} + +void +SyncCheckpointCheckState(void) +{ + if (g_psSyncCheckpointPfnStruct && + g_psSyncCheckpointPfnStruct->pfnCheckState) + { + g_psSyncCheckpointPfnStruct->pfnCheckState(); + } +} + +void +SyncCheckpointSignalWaiters(void) +{ + if (g_psSyncCheckpointPfnStruct && + g_psSyncCheckpointPfnStruct->pfnSignalWaiters) + { + PVRSRV_ERROR eError = g_psSyncCheckpointPfnStruct->pfnSignalWaiters(); + PVR_LOG_IF_ERROR(eError, __func__); + } +} +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + +static void _SyncCheckpointState(PDLLIST_NODE psNode, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + _SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + + if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { + PVR_DUMPDEBUG_LOG("\t- ID = %d, FWAddr = 0x%08x, r%d:e%d:f%d: %s", + psSyncCheckpoint->ui32UID, + psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psSyncCheckpoint), + OSAtomicRead(&psSyncCheckpoint->hRefCount), + OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, + psSyncCheckpoint->azName); + } +} + +static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + DLLIST_NODE *psNode, *psNext; + OS_SPINLOCK_FLAGS uiFlags; + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------"); + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + _SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile); + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } +} + +PVRSRV_ERROR +SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode) +{ + PVRSRV_ERROR eError; +#if defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; +#endif + + if (gbSyncCheckpointInit) + { + PVR_DPF((PVR_DBG_ERROR, "%s called but already initialised", __func__)); + return PVRSRV_OK; + } + + eError = OSSpinLockCreate(&psDevNode->hSyncCheckpointListLock); + PVR_RETURN_IF_ERROR(eError); + + dllist_init(&psDevNode->sSyncCheckpointSyncsList); + + eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointNotify, + psDevNode, + _SyncCheckpointDebugRequest, + DEBUG_REQUEST_SYNCCHECKPOINT, + (PVRSRV_DBGREQ_HANDLE)psDevNode); + PVR_GOTO_IF_ERROR(eError, e0); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + _SyncCheckpointRecordListInit(psDevNode); + } + +#if defined(PDUMP) + eError = OSSpinLockCreate(&psDevInfo->hSyncCheckpointSignalSpinLock); + if (eError != PVRSRV_OK) + { + psDevInfo->hSyncCheckpointSignalSpinLock = NULL; + goto e1; + } + + eError = OSLockCreate(&psDevNode->hSyncCheckpointSignalLock); + if (eError != PVRSRV_OK) + { + psDevNode->hSyncCheckpointSignalLock = NULL; + goto e2; + } + + psDevNode->pui8DeferredSyncCPSignal = OSAllocMem(SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL + * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)); + PVR_GOTO_IF_NOMEM(psDevNode->pui8DeferredSyncCPSignal, eError, e3); + + psDevNode->ui16SyncCPWriteIdx = 0; + psDevNode->ui16SyncCPReadIdx = 0; + + eError = OSInstallMISR(&psDevNode->pvSyncCPMISR, + MISRHandler_PdumpDeferredSyncSignalPoster, + psDevNode, + "RGX_PdumpDeferredSyncSignalPoster"); + PVR_GOTO_IF_ERROR(eError, e4); + + eError = OSLockCreate(&psDevNode->hSyncCheckpointContextListLock); + if (eError != PVRSRV_OK) + { + psDevNode->hSyncCheckpointContextListLock = NULL; + goto e5; + } + + + dllist_init(&psDevNode->sSyncCheckpointContextListHead); + + eError = PDumpRegisterTransitionCallbackFenceSync(psDevNode, + _SyncCheckpointPDumpTransition, + &psDevNode->hTransition); + if (eError != PVRSRV_OK) + { + psDevNode->hTransition = NULL; + goto e6; + } +#endif + + gbSyncCheckpointInit = IMG_TRUE; + return PVRSRV_OK; + +#if defined(PDUMP) +e6: + OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); + psDevNode->hSyncCheckpointContextListLock = NULL; +e5: + (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); + psDevNode->pvSyncCPMISR = NULL; +e4: + if (psDevNode->pui8DeferredSyncCPSignal) + { + OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); + psDevNode->pui8DeferredSyncCPSignal = NULL; + } +e3: + OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); + psDevNode->hSyncCheckpointSignalLock = NULL; +e2: + OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); + psDevInfo->hSyncCheckpointSignalSpinLock = NULL; +e1: + _SyncCheckpointRecordListDeinit(psDevNode); +#endif +e0: + OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); + psDevNode->hSyncCheckpointListLock = NULL; + + return eError; +} + +void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode) +{ +#if defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; + PDumpUnregisterTransitionCallbackFenceSync(psDevNode->hTransition); + psDevNode->hTransition = NULL; + + if (psDevNode->hSyncCheckpointContextListLock) + { + OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); + psDevNode->hSyncCheckpointContextListLock = NULL; + } + + if (psDevNode->pvSyncCPMISR) + { + (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); + psDevNode->pvSyncCPMISR = NULL; + } + + if (psDevNode->pui8DeferredSyncCPSignal) + { + OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); + psDevNode->pui8DeferredSyncCPSignal = NULL; + } + if (psDevNode->hSyncCheckpointSignalLock) + { + OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); + psDevNode->hSyncCheckpointSignalLock = NULL; + } + if (psDevInfo->hSyncCheckpointSignalSpinLock) + { + OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); + psDevInfo->hSyncCheckpointSignalSpinLock = NULL; + } +#endif + + PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointNotify); + psDevNode->hSyncCheckpointNotify = NULL; + OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); + psDevNode->hSyncCheckpointListLock = NULL; + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + _SyncCheckpointRecordListDeinit(psDevNode); + } + gbSyncCheckpointInit = IMG_FALSE; +} + +void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len) +{ + DLLIST_NODE *psNode, *psNext; + IMG_BOOL bFound = IMG_FALSE; + + if (!pszSyncInfo) + { + return; + } + + pszSyncInfo[0] = '\0'; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) + { + struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = + IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); + if ((psSyncCheckpointRec->ui32FwBlockAddr + psSyncCheckpointRec->ui32SyncOffset + 1) == ui32FwAddr) + { + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; + if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) + { + void *pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, + psSyncCheckpointRec->ui32SyncOffset); + OSSNPrintf(pszSyncInfo, len, "%s Checkpoint:%05u (%s)", + (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? + "SIGNALLED" : + ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? + "ERRORED" : "ACTIVE"), + psSyncCheckpointRec->uiPID, + psSyncCheckpointRec->szClassName); + } + else + { + OSSNPrintf(pszSyncInfo, len, "Checkpoint:%05u (%s)", + psSyncCheckpointRec->uiPID, + psSyncCheckpointRec->szClassName); + } + + bFound = IMG_TRUE; + break; + } + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + if (!bFound && (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)) + { + OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); + } +} + +static PVRSRV_ERROR +_SyncCheckpointRecordAdd( + PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord, + SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_UINT32 ui32UID, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt) +{ + struct SYNC_CHECKPOINT_RECORD * psSyncRec; + _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext; + PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_RETURN_IF_INVALID_PARAM(phRecord); + + *phRecord = NULL; + + psSyncRec = OSAllocMem(sizeof(*psSyncRec)); + PVR_LOG_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); /* Sets OOM error code */ + + psSyncRec->psDevNode = psDevNode; + psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock; + psSyncRec->ui32SyncOffset = ui32SyncOffset; + psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; + psSyncRec->ui64OSTime = OSClockns64(); + psSyncRec->uiPID = OSGetCurrentProcessID(); + psSyncRec->ui32UID = ui32UID; + psSyncRec->pSyncCheckpt = pSyncCheckpt; + if (pszClassName) + { + if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) + ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; + /* Copy over the class name annotation */ + OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); + } + else + { + /* No class name annotation */ + psSyncRec->szClassName[0] = 0; + } + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + if (psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT) + { + dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode); + psDevNode->ui32SyncCheckpointRecordCount++; + + if (psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark) + { + psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.", + __func__, + pszClassName, + psDevNode->ui32SyncCheckpointRecordCount)); + OSFreeMem(psSyncRec); + psSyncRec = NULL; + eError = PVRSRV_ERROR_TOOMANYBUFFERS; + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + *phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec; + +fail_alloc: + return eError; +} + +static PVRSRV_ERROR +_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord) +{ + struct SYNC_CHECKPOINT_RECORD **ppFreedSync; + struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord; + PVRSRV_DEVICE_NODE *psDevNode; + + PVR_RETURN_IF_INVALID_PARAM(hRecord); + + psDevNode = pSync->psDevNode; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + + dllist_remove_node(&pSync->sNode); + + if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range", + __func__)); + psDevNode->uiSyncCheckpointRecordFreeIdx = 0; + } + ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx]; + psDevNode->uiSyncCheckpointRecordFreeIdx = + (psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; + + if (*ppFreedSync) + { + OSFreeMem(*ppFreedSync); + } + pSync->psSyncCheckpointBlock = NULL; + pSync->ui64OSTime = OSClockns64(); + *ppFreedSync = pSync; + + psDevNode->ui32SyncCheckpointRecordCount--; + + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + return PVRSRV_OK; +} + +#define NS_IN_S (1000000000UL) +static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec, + IMG_UINT64 ui64TimeNow, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + _SYNC_CHECKPOINT *psSyncCheckpoint = (_SYNC_CHECKPOINT *)psSyncCheckpointRec->pSyncCheckpt; + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; + IMG_UINT64 ui64DeltaS; + IMG_UINT32 ui32DeltaF; + IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime; + ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); + + if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) + { + void *pSyncCheckpointAddr; + pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, + psSyncCheckpointRec->ui32SyncOffset); + + PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)", + psSyncCheckpointRec->uiPID, + ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, + (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), + OSAtomicRead(&psSyncCheckpoint->hRefCount), + OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, + (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? + "SIGNALLED" : + ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? + "ERRORED" : "ACTIVE"), + psSyncCheckpointRec->szClassName); + } + else + { + PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x State= (%s)", + psSyncCheckpointRec->uiPID, + ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, + (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), + psSyncCheckpointRec->szClassName + ); + } +} + +static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + IMG_UINT64 ui64TimeNowS; + IMG_UINT32 ui32TimeNowF; + IMG_UINT64 ui64TimeNow = OSClockns64(); + DLLIST_NODE *psNode, *psNext; + + ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_UINT32 i; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + + PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05" IMG_UINT64_FMTSPEC ".%09u)", + psDevNode->ui32SyncCheckpointRecordCount, + psDevNode->ui32SyncCheckpointRecordCountHighWatermark, + ui64TimeNowS, + ui32TimeNowF); + if (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT) + { + PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", + SYNC_CHECKPOINT_RECORD_LIMIT); + } + PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", + "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); + + dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) + { + struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = + IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); + _SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow, + pfnDumpDebugPrintf, pvDumpDebugFile); + } + + PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05" IMG_UINT64_FMTSPEC ".%09u", + ui64TimeNowS, + ui32TimeNowF); + PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", + "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); + for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); + i != psDevNode->uiSyncCheckpointRecordFreeIdx; + i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) + { + if (psDevNode->apsSyncCheckpointRecordsFreed[i]) + { + _SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i], + ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); + } + else + { + break; + } + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + } +} +#undef NS_IN_S +static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + + eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock); + PVR_GOTO_IF_ERROR(eError, fail_lock_create); + dllist_init(&psDevNode->sSyncCheckpointRecordList); + + psDevNode->ui32SyncCheckpointRecordCount = 0; + psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0; + + eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify, + psDevNode, + _SyncCheckpointRecordRequest, + DEBUG_REQUEST_SYNCCHECKPOINT, + (PVRSRV_DBGREQ_HANDLE)psDevNode); + PVR_GOTO_IF_ERROR(eError, fail_dbg_register); + + return PVRSRV_OK; + +fail_dbg_register: + OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); +fail_lock_create: + return eError; +} + +static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) +{ + DLLIST_NODE *psNode, *psNext; + int i; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) + { + struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec = + IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); + + dllist_remove_node(psNode); + OSFreeMem(pSyncCheckpointRec); + } + + for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) + { + if (psDevNode->apsSyncCheckpointRecordsFreed[i]) + { + OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]); + psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL; + } + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + if (psDevNode->hSyncCheckpointRecordNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify); + } + OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); +} + +#if defined(PDUMP) + +static PVRSRV_ERROR +_SyncCheckpointAllocPDump(_SYNC_CHECKPOINT *psSyncCheckpoint) +{ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, + psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr); + + DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, + _SyncCheckpointGetOffset(psSyncCheckpoint), + PVRSRV_SYNC_CHECKPOINT_ACTIVE, + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, _SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags) +{ + IMG_BOOL bSleepAllowed = (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; + /* + We might be ask to PDump sync state outside of capture range + (e.g. texture uploads) so make this continuous. + */ + if (bSleepAllowed) + { + if (ui32Status == PVRSRV_SYNC_CHECKPOINT_ERRORED) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, + (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psSyncCheckpoint))); + } + else + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, + (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psSyncCheckpoint))); + } + + DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, + _SyncCheckpointGetOffset(psSyncCheckpoint), + ui32Status, + PDUMP_FLAGS_CONTINUOUS); + } + else + { + _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; + OS_SPINLOCK_FLAGS uiFlags; + IMG_UINT16 ui16NewWriteIdx; + + OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + + ui16NewWriteIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPWriteIdx); + if (ui16NewWriteIdx == psDevNode->ui16SyncCPReadIdx) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR Deferred SyncCheckpointSignal CB is full)", + __func__)); + } + else + { + psSyncData = GET_CP_CB_BASE(psDevNode->ui16SyncCPWriteIdx); + psSyncData->asSyncCheckpoint = *psSyncCheckpoint; + psSyncData->ui32Status = ui32Status; + psDevNode->ui16SyncCPWriteIdx = ui16NewWriteIdx; + } + + OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + + OSScheduleMISR(psDevNode->pvSyncCPMISR); + } + + return PVRSRV_OK; +} + +static void +MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData) +{ + PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; + OS_SPINLOCK_FLAGS uiFlags; + IMG_UINT16 ui16ReadIdx, ui16WriteIdx; + _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; + + OSLockAcquire(psDevNode->hSyncCheckpointSignalLock); + + OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + /* Snapshot current write and read offset of CB */ + ui16WriteIdx = psDevNode->ui16SyncCPWriteIdx; + ui16ReadIdx = psDevNode->ui16SyncCPReadIdx; + + OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + /* CB is empty */ + if (ui16WriteIdx == ui16ReadIdx) + { + OSLockRelease(psDevNode->hSyncCheckpointSignalLock); + return; + } + do + { + /* Read item in the CB and flush it to pdump */ + psSyncData = GET_CP_CB_BASE(ui16ReadIdx); + _SyncCheckpointUpdatePDump(psDevNode, &psSyncData->asSyncCheckpoint, psSyncData->ui32Status, PVRSRV_FENCE_FLAG_NONE); + ui16ReadIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPReadIdx); + /* Increment read offset in CB as one item is flushed to pdump */ + OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + psDevNode->ui16SyncCPReadIdx = ui16ReadIdx; + OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + /* Call to this function will flush all the items present in CB + * when this function is called i.e. use snapshot of WriteOffset + * taken at the beginning in this function and iterate till Write != Read */ + } while (ui16WriteIdx != ui16ReadIdx); + + OSLockRelease(psDevNode->hSyncCheckpointSignalLock); +} + +PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) +{ + PVRSRV_ERROR eError; + PSYNC_CHECKPOINT *apsCheckpoints = NULL; + _SYNC_CHECKPOINT *psSyncCheckpoint = NULL; + IMG_UINT32 i, uiNumCheckpoints = 0; + + if (hFence != PVRSRV_NO_FENCE) + { + eError = g_psSyncCheckpointPfnStruct->pfnSyncFenceGetCheckpoints(hFence, &uiNumCheckpoints, &apsCheckpoints); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_ERROR(eError, "g_pfnFenceGetCheckpoints"); + + if (uiNumCheckpoints) + { + /* Flushing deferred fence signals to pdump */ + psSyncCheckpoint = (_SYNC_CHECKPOINT *)apsCheckpoints[0]; + MISRHandler_PdumpDeferredSyncSignalPoster(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode); + } + + for (i=0; i < uiNumCheckpoints; i++) + { + psSyncCheckpoint = (_SYNC_CHECKPOINT *)apsCheckpoints[i]; + if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) + { + PDUMPCOMMENTWITHFLAGS(psSyncCheckpoint->ui32PDumpFlags, + "Wait for Fence %s (ID:%d)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID); + + eError = DevmemPDumpDevmemPol32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, + _SyncCheckpointGetOffset(psSyncCheckpoint), + PVRSRV_SYNC_CHECKPOINT_SIGNALLED, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, + psSyncCheckpoint->ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); + } + } + + /* Free the memory that was allocated for the sync checkpoint list returned */ + if (apsCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsCheckpoints); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext; + DLLIST_NODE *psNode, *psNext; + DLLIST_NODE *psNode1, *psNext1; + PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; + + if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) + { + OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); + dllist_foreach_node(&psDevNode->sSyncCheckpointContextListHead, psNode, psNext) + { + psContext = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT_CONTEXT, sListNode); + + OSLockAcquire(psContext->hSyncCheckpointBlockListLock); + dllist_foreach_node(&psContext->sSyncCheckpointBlockListHead, psNode1, psNext1) + { + SYNC_CHECKPOINT_BLOCK *psSyncBlk = + IMG_CONTAINER_OF(psNode1, SYNC_CHECKPOINT_BLOCK, sListNode); + DevmemPDumpLoadMem(psSyncBlk->hMemDesc, + 0, + psSyncBlk->ui32SyncBlockSize, + PDUMP_FLAGS_CONTINUOUS); + } + OSLockRelease(psContext->hSyncCheckpointBlockListLock); + } + OSLockRelease(psDevNode->hSyncCheckpointContextListLock); + } + + return PVRSRV_OK; +} +#endif + +static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) +{ + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode; + DECLARE_DLLIST(sCleanupList); + DLLIST_NODE *psNode, *psNext; + OS_SPINLOCK_FLAGS uiFlags; + +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called", __func__)); +#endif + + /* Check the deferred cleanup list and free any sync checkpoints we can */ + OSSpinLockAcquire(psCtxCtl->hDeferredCleanupListLock, uiFlags); + + if (dllist_is_empty(&psCtxCtl->sDeferredCleanupListHead)) + { + OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s: Defer free list is empty", __func__)); +#endif + /* if list is empty then we have nothing to do here */ + return; + } + + dllist_foreach_node(&psCtxCtl->sDeferredCleanupListHead, psNode, psNext) + { + _SYNC_CHECKPOINT *psSyncCheckpointInt = + IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sDeferredFreeListNode); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) + { + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + && psSyncCheckpointInt->hRecord) + { + PVRSRV_ERROR eError; + /* remove this sync record */ + eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); + PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); + } + + /* Move the sync checkpoint from the deferred free list to local list */ + dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode); + /* It's not an ideal solution to traverse list of checkpoints-to-free + * twice but it allows us to avoid holding the lock for too long */ + dllist_add_to_tail(&sCleanupList, &psSyncCheckpointInt->sDeferredFreeListNode); + } +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + else + { + PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), " + "still pending (enq=%d,FWRef=%d)", __func__, + psSyncCheckpointInt->azName, psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); + } +#endif + } + + OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); + + dllist_foreach_node(&sCleanupList, psNode, psNext) { + _SYNC_CHECKPOINT *psSyncCheckpointInt = + IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sDeferredFreeListNode); + + /* Remove the sync checkpoint from the global list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_remove_node(&psSyncCheckpointInt->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + + RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s attempting to return sync(ID:%d),%p> to pool", + __func__, + psSyncCheckpointInt->ui32UID, + (void *) psSyncCheckpointInt)); +#endif + if (!_PutCheckpointInPool(psSyncCheckpointInt)) +#endif + { +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", + __func__)); +#endif +#endif +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)", + __func__, + psSyncCheckpointInt->azName, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + _FreeSyncCheckpoint(psSyncCheckpointInt); + } + } +} + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext) +{ + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + _SYNC_CHECKPOINT *psSyncCheckpoint = NULL; + OS_SPINLOCK_FLAGS uiFlags; + + /* Acquire sync checkpoint pool lock */ + OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + /* Check if we can allocate from the pool */ + if (psCtxCtl->bSyncCheckpointPoolValid && + (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT) && + (psCtxCtl->ui32SyncCheckpointPoolWp != psCtxCtl->ui32SyncCheckpointPoolRp)) + { + /* Get the next sync checkpoint from the pool */ + psSyncCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; + psCtxCtl->ui32SyncCheckpointPoolRp = + (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; + psCtxCtl->ui32SyncCheckpointPoolCount--; + psCtxCtl->bSyncCheckpointPoolFull = IMG_FALSE; + psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, " + "poolRp=%d, poolWp=%d", + __func__, + psSyncCheckpoint->ui32UID, + psCtxCtl->ui32SyncCheckpointPoolCount, + SYNC_CHECKPOINT_POOL_SIZE, + (void *) psContext, + psCtxCtl->ui32SyncCheckpointPoolRp, + psCtxCtl->ui32SyncCheckpointPoolWp)); +#endif + } + /* Release sync checkpoint pool lock */ + OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + return psSyncCheckpoint; +} + +static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + IMG_BOOL bReturnedToPool = IMG_FALSE; + OS_SPINLOCK_FLAGS uiFlags; + + /* Acquire sync checkpoint pool lock */ + OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + /* Check if pool has space */ + if (psCtxCtl->bSyncCheckpointPoolValid && !psCtxCtl->bSyncCheckpointPoolFull) + { + /* Put the sync checkpoint into the next write slot in the pool */ + psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint; + psCtxCtl->ui32SyncCheckpointPoolWp = + (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & SYNC_CHECKPOINT_POOL_MASK; + psCtxCtl->ui32SyncCheckpointPoolCount++; + psCtxCtl->bSyncCheckpointPoolFull = + ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) && + (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)); + bReturnedToPool = IMG_TRUE; + psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF; + psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL; +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d", + __func__, + psSyncCheckpoint->ui32UID, + psCtxCtl->ui32SyncCheckpointPoolCount, + SYNC_CHECKPOINT_POOL_SIZE, + psCtxCtl->ui32SyncCheckpointPoolRp, + psCtxCtl->ui32SyncCheckpointPoolWp)); +#endif + } + /* Release sync checkpoint pool lock */ + OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + return bReturnedToPool; +} + +static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) +{ + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + _SYNC_CHECKPOINT *psCheckpoint = NULL; + DECLARE_DLLIST(sCleanupList); + DLLIST_NODE *psThis, *psNext; + OS_SPINLOCK_FLAGS uiFlags; + IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0, ui32PoolCount; + IMG_BOOL bPoolValid; + + /* Acquire sync checkpoint pool lock */ + OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + bPoolValid = psCtxCtl->bSyncCheckpointPoolValid; + ui32PoolCount = psCtxCtl->ui32SyncCheckpointPoolCount; + + /* While the pool still contains sync checkpoints, free them */ + while (bPoolValid && psCtxCtl->ui32SyncCheckpointPoolCount > 0) + { + /* Get the sync checkpoint from the next read slot in the pool */ + psCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; + psCtxCtl->ui32SyncCheckpointPoolRp = + (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; + psCtxCtl->ui32SyncCheckpointPoolCount--; + psCtxCtl->bSyncCheckpointPoolFull = + ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) && + (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)); + + if (psCheckpoint) + { + PVR_ASSERT(!dllist_node_is_in_list(&psCheckpoint->sListNode)); + /* before checkpoints are added to the pool they are removed + * from the list so it's safe to use sListNode here */ + dllist_add_to_head(&sCleanupList, &psCheckpoint->sListNode); + } + else + { + ui32NullScpCount++; + } + } + + /* Release sync checkpoint pool lock */ + OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + /* go through the local list and free all of the sync checkpoints */ + +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, bSyncCheckpointPoolValid=%d, " + "uiSyncCheckpointPoolCount=%d", __func__, (void *) psContext, + bPoolValid, ui32PoolCount)); + + if (ui32NullScpCount > 0) + { + PVR_DPF((PVR_DBG_WARNING, "%s pool contained %u NULL entries", __func__, + ui32NullScpCount)); + } +#endif + + dllist_foreach_node(&sCleanupList, psThis, psNext) + { + psCheckpoint = IMG_CONTAINER_OF(psThis, _SYNC_CHECKPOINT, sListNode); + +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + if (psCheckpoint->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL) + { + PVR_DPF((PVR_DBG_WARNING, "%s pool contains invalid entry " + "(ui32ValidationCheck=0x%x)", __func__, + psCheckpoint->ui32ValidationCheck)); + } + + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint(ID:%d)", + __func__, psCheckpoint->ui32UID)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->ui32ValidationCheck=0x%x", + __func__, psCheckpoint->ui32ValidationCheck)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->uiSpanAddr=0x%llx", + __func__, psCheckpoint->uiSpanAddr)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>", + __func__, (void *) psCheckpoint->psSyncCheckpointBlock)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>", + __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>", + __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA)); + + PVR_DPF((PVR_DBG_WARNING, + "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), " + "psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + __func__, + psCheckpoint->ui32UID, + (void *) psCheckpoint, + (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, + psCheckpoint->uiSpanAddr)); +#endif + + dllist_remove_node(psThis); + + _FreeSyncCheckpoint(psCheckpoint); + ui32ItemsFreed++; + } + + return ui32ItemsFreed; +} +#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/sync_server.c b/drivers/mcst/gpu-imgtec/services/server/common/sync_server.c new file mode 100644 index 000000000000..5d2ca2419069 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/sync_server.c @@ -0,0 +1,1227 @@ +/*************************************************************************/ /*! +@File sync_server.c +@Title Server side synchronisation functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side functions that for synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "img_types.h" +#include "img_defs.h" +#include "sync_server.h" +#include "allocmem.h" +#include "device.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pdump.h" +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "pdump_km.h" +#include "sync.h" +#include "sync_internal.h" +#include "connection_server.h" +#include "htbuffer.h" +#include "rgxhwperf.h" +#include "info_page.h" + +#include "sync_checkpoint_internal.h" +#include "sync_checkpoint.h" + +/* Include this to obtain MAX_SYNC_CHECKPOINTS_PER_FENCE */ +#include "sync_checkpoint_external.h" + +/* Include this to obtain PVRSRV_MAX_DEV_VARS */ +#include "pvrsrv_devvar.h" + +#if defined(SUPPORT_SECURE_EXPORT) +#include "ossecure_export.h" +#endif + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +#include "rgxdebug.h" +#endif + +/* Set this to enable debug relating to the construction and maintenance of the sync address list */ +#define SYNC_ADDR_LIST_DEBUG 0 + +/* Set maximum number of FWAddrs that can be accommodated in a SYNC_ADDR_LIST. + * This should allow for PVRSRV_MAX_DEV_VARS dev vars plus + * MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoints for check fences. + * The same SYNC_ADDR_LIST is also used to hold UFOs for updates. While this + * may need to accommodate the additional sync prim update returned by Native + * sync implementation (used for timeline debug), the size calculated from + * PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE should be ample. + */ +#define PVRSRV_MAX_SYNC_ADDR_LIST_SIZE (PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE) +/* Check that helper functions will not be preparing longer lists of + * UFOs than the FW can handle. + */ +static_assert(PVRSRV_MAX_SYNC_ADDR_LIST_SIZE <= RGXFWIF_CCB_CMD_MAX_UFOS, + "PVRSRV_MAX_SYNC_ADDR_LIST_SIZE > RGXFWIF_CCB_CMD_MAX_UFOS."); + +/* Max number of syncs allowed in a sync prim op */ +#define SYNC_PRIM_OP_MAX_SYNCS 1024 + +struct _SYNC_PRIMITIVE_BLOCK_ +{ + PVRSRV_DEVICE_NODE *psDevNode; + DEVMEM_MEMDESC *psMemDesc; + IMG_UINT32 *pui32LinAddr; + IMG_UINT32 ui32BlockSize; /*!< Size of the Sync Primitive Block */ + ATOMIC_T sRefCount; + DLLIST_NODE sConnectionNode; + SYNC_CONNECTION_DATA *psSyncConnectionData; /*!< Link back to the sync connection data if there is one */ + PRGXFWIF_UFO_ADDR uiFWAddr; /*!< The firmware address of the sync prim block */ +}; + +struct _SYNC_CONNECTION_DATA_ +{ + DLLIST_NODE sListHead; /*!< list of sync block associated with / created against this connection */ + ATOMIC_T sRefCount; /*!< number of references to this object */ + POS_LOCK hLock; /*!< lock protecting the list of sync blocks */ +}; + +#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) + +/* this is the max number of syncs we will search or dump + * at any time. + */ +#define SYNC_RECORD_LIMIT 20000 + +enum SYNC_RECORD_TYPE +{ + SYNC_RECORD_TYPE_UNKNOWN = 0, + SYNC_RECORD_TYPE_CLIENT, + SYNC_RECORD_TYPE_SERVER, +}; + +struct SYNC_RECORD +{ + PVRSRV_DEVICE_NODE *psDevNode; + SYNC_PRIMITIVE_BLOCK *psServerSyncPrimBlock; /*!< handle to _SYNC_PRIMITIVE_BLOCK_ */ + IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ + IMG_UINT32 ui32FwBlockAddr; + IMG_PID uiPID; + IMG_UINT64 ui64OSTime; + enum SYNC_RECORD_TYPE eRecordType; + DLLIST_NODE sNode; + IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; +}; + +#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG) +#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) +#else +#define SYNC_REFCOUNT_PRINT(fmt, ...) +#endif + +#if defined(SYNC_DEBUG) +#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) +#else +#define SYNC_UPDATES_PRINT(fmt, ...) +#endif + +/*! +***************************************************************************** + @Function : SyncPrimitiveBlockToFWAddr + + @Description : Given a pointer to a sync primitive block and an offset, + returns the firmware address of the sync. + + @Input psSyncPrimBlock : Sync primitive block which contains the sync + @Input ui32Offset : Offset of sync within the sync primitive block + @Output psAddrOut : Absolute FW address of the sync is written out through + this pointer + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ + +PVRSRV_ERROR +SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, + IMG_UINT32 ui32Offset, + PRGXFWIF_UFO_ADDR *psAddrOut) +{ + /* check offset is legal */ + if (unlikely((ui32Offset >= psSyncPrimBlock->ui32BlockSize) || + (ui32Offset % sizeof(IMG_UINT32)))) + { + PVR_DPF((PVR_DBG_ERROR, "SyncPrimitiveBlockToFWAddr: parameters check failed")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset; + return PVRSRV_OK; +} + +/*! +***************************************************************************** + @Function : SyncAddrListGrow + + @Description : Grow the SYNC_ADDR_LIST so it can accommodate the given + number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS. + + @Input psList : The SYNC_ADDR_LIST to grow + @Input ui32NumSyncs : The number of sync addresses to be able to hold + @Return : PVRSRV_OK on success +*****************************************************************************/ + +static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs) +{ + if (unlikely(ui32NumSyncs > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE=%u", __func__, ui32NumSyncs, PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + if (ui32NumSyncs > psList->ui32NumSyncs) + { + if (psList->pasFWAddrs == NULL) + { + psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_ADDR_LIST_SIZE); + PVR_RETURN_IF_NOMEM(psList->pasFWAddrs); + } + + psList->ui32NumSyncs = ui32NumSyncs; + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + return PVRSRV_OK; +} + +/*! +***************************************************************************** + @Function : SyncAddrListInit + + @Description : Initialise a SYNC_ADDR_LIST structure ready for use + + @Input psList : The SYNC_ADDR_LIST structure to initialise + @Return : None +*****************************************************************************/ + +void +SyncAddrListInit(SYNC_ADDR_LIST *psList) +{ + psList->ui32NumSyncs = 0; + psList->pasFWAddrs = NULL; +} + +/*! +***************************************************************************** + @Function : SyncAddrListDeinit + + @Description : Frees any resources associated with the given SYNC_ADDR_LIST + + @Input psList : The SYNC_ADDR_LIST structure to deinitialise + @Return : None +*****************************************************************************/ + +void +SyncAddrListDeinit(SYNC_ADDR_LIST *psList) +{ + if (psList->pasFWAddrs != NULL) + { + OSFreeMem(psList->pasFWAddrs); + } +} + +/*! +***************************************************************************** + @Function : SyncAddrListPopulate + + @Description : Populate the given SYNC_ADDR_LIST with the FW addresses + of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets + + @Input ui32NumSyncs : The number of syncs being passed in + @Input apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures + in which the syncs are based + @Input paui32SyncOffset: Array of offsets within each of the sync primitive blocks + where the syncs are located + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ + +PVRSRV_ERROR +SyncAddrListPopulate(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumSyncs, + SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, + IMG_UINT32 *paui32SyncOffset) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + if (ui32NumSyncs > psList->ui32NumSyncs) + { + eError = SyncAddrListGrow(psList, ui32NumSyncs); + + PVR_RETURN_IF_ERROR(eError); + } + + psList->ui32NumSyncs = ui32NumSyncs; + + for (i = 0; i < ui32NumSyncs; i++) + { + eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i], + paui32SyncOffset[i], + &psList->pasFWAddrs[i]); + + PVR_RETURN_IF_ERROR(eError); + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + return PVRSRV_OK; +} + +PVRSRV_ERROR +SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, + PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32FwAddr = 0; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs)); +#endif + /* Ensure there's room in psList for the additional sync prim update */ + eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1); + PVR_GOTO_IF_ERROR(eError, e0); + + SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr); +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __func__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1)); +#endif + psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + { + IMG_UINT32 iii; + + PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); + for (iii=0; iiiui32NumSyncs; iii++) + { + PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __func__, iii, psList->pasFWAddrs[iii].ui32Addr)); + } + } +#endif +e0: +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __func__, (void*)psList, psList->ui32NumSyncs)); +#endif + return eError; +} + + +static PVRSRV_ERROR +_AppendCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint, + IMG_BOOL bDeRefCheckpoints) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32SyncCheckpointIndex; + IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); +#endif + /* Ensure there's room in psList for the sync checkpoints */ + eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); + goto e0; + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __func__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize)); + if (ui32RollbackSize > 0) + { + { + IMG_UINT32 kk; + for (kk=0; kkpsList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, + (void*)&psList->pasFWAddrs[kk], kk, + psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); + } + } + } + PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __func__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0])); +#endif + for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndexpasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddr(apsSyncCheckpoint[ui32SyncCheckpointIndex]); +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCCBEnqueued(<%p>)", __func__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex])); + PVR_DPF((PVR_DBG_ERROR, "%s: ID:%d", __func__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]))); +#endif + SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); + if (bDeRefCheckpoints) + { + /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */ + SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); + } + } +#if (SYNC_ADDR_LIST_DEBUG == 1) + if (psList->ui32NumSyncs > 0) + { + IMG_UINT32 kk; + for (kk=0; kkui32NumSyncs; kk++) + { + PVR_DPF((PVR_DBG_ERROR, "%s: <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, + (void*)&psList->pasFWAddrs[kk], kk, + psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); + } + } +#endif + return eError; + +e0: + for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); +#endif + return eError; +} + +/*! +***************************************************************************** + @Function : SyncAddrListAppendCheckpoints + + @Description : Append the FW addresses of the sync checkpoints given in + the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST + + @Input ui32NumSyncCheckpoints : The number of sync checkpoints + being passed in + @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details + are to be appended to the SYNC_ADDR_LIST + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ +PVRSRV_ERROR +SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint) +{ + return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE); +} + +/*! +***************************************************************************** + @Function : SyncAddrListAppendAndDeRefCheckpoints + + @Description : Append the FW addresses of the sync checkpoints given in + the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST. + A reference is dropped for each of the checkpoints. + + @Input ui32NumSyncCheckpoints : The number of sync checkpoints + being passed in + @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details + are to be appended to the SYNC_ADDR_LIST + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ +PVRSRV_ERROR +SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint) +{ + return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE); +} + +void +SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint) +{ + IMG_UINT32 ui32SyncCheckpointIndex; + + for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex)", __func__, (void*)psList)); +#endif + if (psList) + { +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); +#endif + for (ui32SyncIndex=0; ui32SyncIndexui32NumSyncs; ui32SyncIndex++) + { + if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1) + { + SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr); + } + } + } + return eError; +} + +PVRSRV_ERROR +PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + SYNC_RECORD_HANDLE *phRecord, + SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_BOOL bServerSync, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName) +{ + struct SYNC_RECORD * psSyncRec; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, + ui32FwBlockAddr + ui32SyncOffset, + pszClassName, + ui32ClassNameSize); + + PVR_RETURN_IF_INVALID_PARAM(phRecord); + + *phRecord = NULL; + + psSyncRec = OSAllocMem(sizeof(*psSyncRec)); + PVR_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); + + psSyncRec->psDevNode = psDevNode; + psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock; + psSyncRec->ui32SyncOffset = ui32SyncOffset; + psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; + psSyncRec->ui64OSTime = OSClockns64(); + psSyncRec->uiPID = OSGetCurrentProcessID(); + psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT; + + if (pszClassName) + { + if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) + ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; + /* Copy over the class name annotation */ + OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); + } + else + { + /* No class name annotation */ + psSyncRec->szClassName[0] = 0; + } + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + if (psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT) + { + dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode); + psDevNode->ui32SyncServerRecordCount++; + + if (psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark) + { + psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.", + __func__, + pszClassName, + psDevNode->ui32SyncServerRecordCount)); + OSFreeMem(psSyncRec); + psSyncRec = NULL; + eError = PVRSRV_ERROR_TOOMANYBUFFERS; + } + OSLockRelease(psDevNode->hSyncServerRecordLock); + + *phRecord = (SYNC_RECORD_HANDLE)psSyncRec; + +fail_alloc: + return eError; +} + +PVRSRV_ERROR +PVRSRVSyncRecordRemoveByHandleKM( + SYNC_RECORD_HANDLE hRecord) +{ + struct SYNC_RECORD **ppFreedSync; + struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord; + PVRSRV_DEVICE_NODE *psDevNode; + + PVR_RETURN_IF_INVALID_PARAM(hRecord); + + psDevNode = pSync->psDevNode; + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + + RGXSRV_HWPERF_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset); + + dllist_remove_node(&pSync->sNode); + + if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) + { + PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range", + __func__)); + psDevNode->uiSyncServerRecordFreeIdx = 0; + } + ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx]; + psDevNode->uiSyncServerRecordFreeIdx = + (psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; + + if (*ppFreedSync) + { + OSFreeMem(*ppFreedSync); + } + pSync->psServerSyncPrimBlock = NULL; + pSync->ui64OSTime = OSClockns64(); + *ppFreedSync = pSync; + + psDevNode->ui32SyncServerRecordCount--; + + OSLockRelease(psDevNode->hSyncServerRecordLock); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_BOOL bServerSync, + IMG_UINT32 ui32FWAddr, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FWAddr) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + RGXSRV_HWPERF_FREE(psDevNode, SYNC, ui32FWAddr); + + return PVRSRV_OK; +} + +static +void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData) +{ + IMG_INT iRefCount = OSAtomicIncrement(&psSyncConnectionData->sRefCount); + + SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", + __func__, psSyncConnectionData, iRefCount); + PVR_UNREFERENCED_PARAMETER(iRefCount); +} + +static +void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData) +{ + IMG_INT iRefCount = OSAtomicDecrement(&psSyncConnectionData->sRefCount); + if (iRefCount == 0) + { + SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", + __func__, psSyncConnectionData, iRefCount); + + PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead)); + OSLockDestroy(psSyncConnectionData->hLock); + OSFreeMem(psSyncConnectionData); + } + else + { + SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", + __func__, psSyncConnectionData, iRefCount); + PVR_ASSERT(iRefCount > 0); + } +} + +static +void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock) +{ + if (psConnection) + { + SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData; + + /* + Make sure the connection doesn't go away. It doesn't matter that we will release + the lock between as the refcount and list don't have to be atomic w.r.t. to each other + */ + _SyncConnectionRef(psSyncConnectionData); + + OSLockAcquire(psSyncConnectionData->hLock); + if (psConnection != NULL) + { + dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode); + } + OSLockRelease(psSyncConnectionData->hLock); + psBlock->psSyncConnectionData = psSyncConnectionData; + } + else + { + psBlock->psSyncConnectionData = NULL; + } +} + +static +void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock) +{ + SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData; + + if (psBlock->psSyncConnectionData) + { + OSLockAcquire(psSyncConnectionData->hLock); + dllist_remove_node(&psBlock->sConnectionNode); + OSLockRelease(psSyncConnectionData->hLock); + + _SyncConnectionUnref(psBlock->psSyncConnectionData); + } +} + +static inline +void _DoPrimBlockFree(SYNC_PRIMITIVE_BLOCK *psSyncBlk) +{ + PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; + + SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)", + __func__, psSyncBlk, OSAtomicRead(&psSyncBlk->sRefCount)); + + PVR_ASSERT(OSAtomicRead(&psSyncBlk->sRefCount) == 1); + + _SyncConnectionRemoveBlock(psSyncBlk); + DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc); + psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc); + OSFreeMem(psSyncBlk); +} + +PVRSRV_ERROR +PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, + IMG_UINT32 *puiSyncPrimVAddr, + IMG_UINT32 *puiSyncPrimBlockSize, + PMR **ppsSyncPMR) +{ + SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; + PVRSRV_ERROR eError; + + psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); + PVR_GOTO_IF_NOMEM(psNewSyncBlk, eError, e0); + + psNewSyncBlk->psDevNode = psDevNode; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); + + eError = psDevNode->pfnAllocUFOBlock(psDevNode, + &psNewSyncBlk->psMemDesc, + &psNewSyncBlk->uiFWAddr.ui32Addr, + &psNewSyncBlk->ui32BlockSize); + PVR_GOTO_IF_ERROR(eError, e1); + + *puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr; + + eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, + (void **) &psNewSyncBlk->pui32LinAddr); + PVR_GOTO_IF_ERROR(eError, e2); + + eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR); + + PVR_GOTO_IF_ERROR(eError, e3); + + OSAtomicWrite(&psNewSyncBlk->sRefCount, 1); + + /* If there is a connection pointer then add the new block onto it's list */ + _SyncConnectionAddBlock(psConnection, psNewSyncBlk); + + *ppsSyncBlk = psNewSyncBlk; + *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Allocated UFO block (FirmwareVAddr = 0x%08x)", + *puiSyncPrimVAddr); + + return PVRSRV_OK; + +e3: + DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); +e2: + psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); +e1: + OSFreeMem(psNewSyncBlk); +e0: + return eError; +} + +PVRSRV_ERROR +PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk) +{ + + /* This function is an alternative to the above without reference counting. + * With the removal of sync prim ops for server syncs we no longer have to + * reference count prim blocks as the reference will never be incremented / + * decremented by a prim op */ + _DoPrimBlockFree(psSyncBlk); + return PVRSRV_OK; +} + +static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk, + IMG_UINT32 ui32Index) +{ + return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize); +} + +PVRSRV_ERROR +PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value) +{ + if (_CheckSyncIndex(psSyncBlk, ui32Index)) + { + psSyncBlk->pui32LinAddr[ui32Index] = ui32Value; + return PVRSRV_OK; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for " + "0x%08X byte sync block (value 0x%08X)", + ui32Index, + psSyncBlk->ui32BlockSize, + ui32Value)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +} + +#if defined(PDUMP) +PVRSRV_ERROR +PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ + /* + We might be ask to PDump sync state outside of capture range + (e.g. texture uploads) so make this continuous. + */ + DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc, + ui32Offset, + ui32Value, + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) +{ + /* + We might be ask to PDump sync state outside of capture range + (e.g. texture uploads) so make this continuous. + */ + DevmemPDumpLoadMem(psSyncBlk->psMemDesc, + ui32Offset, + sizeof(IMG_UINT32), + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc, + ui32Offset, + ui32Value, + ui32Mask, + eOperator, + ui32PDumpFlags); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, + IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + DevmemPDumpCBP(psSyncBlk->psMemDesc, + ui32Offset, + uiWriteOffset, + uiPacketSize, + uiBufferSize); + return PVRSRV_OK; +} +#endif + +/* SyncRegisterConnection */ +PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData) +{ + SYNC_CONNECTION_DATA *psSyncConnectionData; + PVRSRV_ERROR eError; + + psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA)); + if (psSyncConnectionData == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + + eError = OSLockCreate(&psSyncConnectionData->hLock); + PVR_GOTO_IF_ERROR(eError, fail_lockcreate); + dllist_init(&psSyncConnectionData->sListHead); + OSAtomicWrite(&psSyncConnectionData->sRefCount, 1); + + *ppsSyncConnectionData = psSyncConnectionData; + return PVRSRV_OK; + +fail_lockcreate: + OSFreeMem(psSyncConnectionData); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* SyncUnregisterConnection */ +void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData) +{ + _SyncConnectionUnref(psSyncConnectionData); +} + +void SyncConnectionPDumpSyncBlocks(void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent) +{ + if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) + { + SYNC_CONNECTION_DATA *psSyncConnectionData = hSyncPrivData; + DLLIST_NODE *psNode, *psNext; + + OSLockAcquire(psSyncConnectionData->hLock); + + PDUMPCOMMENT("Dump client Sync Prim state"); + dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext) + { + SYNC_PRIMITIVE_BLOCK *psSyncBlock = + IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode); + + DevmemPDumpLoadMem(psSyncBlock->psMemDesc, + 0, + psSyncBlock->ui32BlockSize, + PDUMP_FLAGS_CONTINUOUS); + } + + OSLockRelease(psSyncConnectionData->hLock); + } +} + +void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len) +{ + DLLIST_NODE *psNode, *psNext; + IMG_INT iEnd; + IMG_BOOL bFound = IMG_FALSE; + + if (!pszSyncInfo) + { + return; + } + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + pszSyncInfo[0] = '\0'; + + dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) + { + struct SYNC_RECORD *psSyncRec = + IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); + if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr + && SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType + && psSyncRec->psServerSyncPrimBlock + && psSyncRec->psServerSyncPrimBlock->pui32LinAddr + ) + { + IMG_UINT32 *pui32SyncAddr; + pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr + + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); + iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)", + *pui32SyncAddr, + ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), + psSyncRec->uiPID, + psSyncRec->szClassName + ); + if (iEnd >= 0 && iEnd < len) + { + pszSyncInfo[iEnd] = '\0'; + } + bFound = IMG_TRUE; + break; + } + } + + OSLockRelease(psDevNode->hSyncServerRecordLock); + + if (!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)) + { + OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); + } +} + +#define NS_IN_S (1000000000UL) +static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec, + IMG_UINT64 ui64TimeNow, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock; + + if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType) + { + IMG_UINT64 ui64DeltaS; + IMG_UINT32 ui32DeltaF; + IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime; + ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); + + if (psSyncBlock && psSyncBlock->pui32LinAddr) + { + IMG_UINT32 *pui32SyncAddr; + pui32SyncAddr = psSyncBlock->pui32LinAddr + + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); + + PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=0x%08x (%s)", + ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), + psSyncRec->uiPID, + ui64DeltaS, ui32DeltaF, + (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), + *pui32SyncAddr, + psSyncRec->szClassName + ); + } + else + { + PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val= (%s)", + ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), + psSyncRec->uiPID, + ui64DeltaS, ui32DeltaF, + (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), + psSyncRec->szClassName + ); + } + } +} + +static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + IMG_UINT64 ui64TimeNowS; + IMG_UINT32 ui32TimeNowF; + IMG_UINT64 ui64TimeNow = OSClockns64(); + DLLIST_NODE *psNode, *psNext; + + ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_UINT32 i; + OSLockAcquire(psDevNode->hSyncServerRecordLock); + + PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05" IMG_UINT64_FMTSPEC ".%09u", + psDevNode->ui32SyncServerRecordCount, + psDevNode->ui32SyncServerRecordCountHighWatermark, + ui64TimeNowS, + ui32TimeNowF); + if (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT) + { + PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", + SYNC_RECORD_LIMIT); + } + + PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", + "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); + + dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) + { + struct SYNC_RECORD *psSyncRec = + IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); + _SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); + } + + PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05" IMG_UINT64_FMTSPEC ".%09u", + ui64TimeNowS, ui32TimeNowF); + PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", + "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); + for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); + i != psDevNode->uiSyncServerRecordFreeIdx; + i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) + { + if (psDevNode->apsSyncServerRecordsFreed[i]) + { + _SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i], + ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); + } + else + { + break; + } + } + + OSLockRelease(psDevNode->hSyncServerRecordLock); + } +} +#undef NS_IN_S + +static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + + psDevNode->ui32SyncServerRecordCount = 0; + psDevNode->ui32SyncServerRecordCountHighWatermark = 0; + + eError = OSLockCreate(&psDevNode->hSyncServerRecordLock); + PVR_GOTO_IF_ERROR(eError, fail_lock_create); + dllist_init(&psDevNode->sSyncServerRecordList); + + eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerRecordNotify, + psDevNode, + _SyncRecordRequest, + DEBUG_REQUEST_SYNCTRACKING, + psDevNode); + + PVR_GOTO_IF_ERROR(eError, fail_dbg_register); + + return PVRSRV_OK; + +fail_dbg_register: + OSLockDestroy(psDevNode->hSyncServerRecordLock); +fail_lock_create: + return eError; +} + +static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) +{ + DLLIST_NODE *psNode, *psNext; + int i; + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) + { + struct SYNC_RECORD *pSyncRec = + IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); + + dllist_remove_node(psNode); + OSFreeMem(pSyncRec); + } + + for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) + { + if (psDevNode->apsSyncServerRecordsFreed[i]) + { + OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]); + psDevNode->apsSyncServerRecordsFreed[i] = NULL; + } + } + OSLockRelease(psDevNode->hSyncServerRecordLock); + + if (psDevNode->hSyncServerRecordNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerRecordNotify); + } + OSLockDestroy(psDevNode->hSyncServerRecordLock); +} + +PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + eError = SyncRecordListInit(psDevNode); + PVR_GOTO_IF_ERROR(eError, fail_record_list); + } + + return PVRSRV_OK; + +fail_record_list: + return eError; +} + +void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode) +{ + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + SyncRecordListDeinit(psDevNode); + } +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/tlintern.c b/drivers/mcst/gpu-imgtec/services/server/common/tlintern.c new file mode 100644 index 000000000000..70d8b09d6e40 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/tlintern.c @@ -0,0 +1,473 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer kernel side API implementation. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Transport Layer functions available to driver components in + the driver. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON +#include "pvr_debug.h" + +#include "allocmem.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "devicemem.h" + +#include "pvrsrv_tlcommon.h" +#include "tlintern.h" + +/* + * Make functions + */ +PTL_STREAM_DESC +TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3) +{ + PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC)); + if (ps == NULL) + { + return NULL; + } + ps->psNode = f1; + ps->ui32Flags = f2; + ps->hReadEvent = f3; + ps->uiRefCount = 1; + + if (f2 & PVRSRV_STREAM_FLAG_READ_LIMIT) + { + ps->ui32ReadLimit = f1->psStream->ui32Write; + } + return ps; +} + +PTL_SNODE +TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4) +{ + PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE)); + if (ps == NULL) + { + return NULL; + } + ps->hReadEventObj = f2; + ps->psStream = f3; + ps->psRDesc = f4; + f3->psNode = ps; + return ps; +} + +/* + * Transport Layer Global top variables and functions + */ +static TL_GLOBAL_DATA sTLGlobalData; + +TL_GLOBAL_DATA *TLGGD(void) /* TLGetGlobalData() */ +{ + return &sTLGlobalData; +} + +/* TLInit must only be called once at driver initialisation. + * An assert is provided to check this condition on debug builds. + */ +PVRSRV_ERROR +TLInit(void) +{ + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL); + + /* Allocate a lock for TL global data, to be used while updating the TL data. + * This is for making TL global data multi-thread safe */ + eError = OSLockCreate(&sTLGlobalData.hTLGDLock); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Allocate the event object used to signal global TL events such as + * a new stream created */ + eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj); + PVR_GOTO_IF_ERROR(eError, e1); + + PVR_DPF_RETURN_OK; + +/* Don't allow the driver to start up on error */ +e1: + OSLockDestroy (sTLGlobalData.hTLGDLock); + sTLGlobalData.hTLGDLock = NULL; +e0: + PVR_DPF_RETURN_RC (eError); +} + +static void RemoveAndFreeStreamNode(PTL_SNODE psRemove) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE* last; + PTL_SNODE psn; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + /* Unlink the stream node from the master list */ + PVR_ASSERT(psGD->psHead); + last = &psGD->psHead; + for (psn = psGD->psHead; psn; psn=psn->psNext) + { + if (psn == psRemove) + { + /* Other calling code may have freed and zeroed the pointers */ + if (psn->psRDesc) + { + OSFreeMem(psn->psRDesc); + psn->psRDesc = NULL; + } + if (psn->psStream) + { + OSFreeMem(psn->psStream); + psn->psStream = NULL; + } + *last = psn->psNext; + break; + } + last = &psn->psNext; + } + + /* Release the event list object owned by the stream node */ + if (psRemove->hReadEventObj) + { + eError = OSEventObjectDestroy(psRemove->hReadEventObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + + psRemove->hReadEventObj = NULL; + } + + /* Release the memory of the stream node */ + OSFreeMem(psRemove); + + PVR_DPF_RETURN; +} + +static void FreeGlobalData(void) +{ + PTL_SNODE psCurrent = sTLGlobalData.psHead; + PTL_SNODE psNext; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + /* Clean up the SNODE list */ + if (psCurrent) + { + while (psCurrent) + { + psNext = psCurrent->psNext; + + /* Other calling code may have freed and zeroed the pointers */ + if (psCurrent->psRDesc) + { + OSFreeMem(psCurrent->psRDesc); + psCurrent->psRDesc = NULL; + } + if (psCurrent->psStream) + { + OSFreeMem(psCurrent->psStream); + psCurrent->psStream = NULL; + } + + /* Release the event list object owned by the stream node */ + if (psCurrent->hReadEventObj) + { + eError = OSEventObjectDestroy(psCurrent->hReadEventObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + + psCurrent->hReadEventObj = NULL; + } + + OSFreeMem(psCurrent); + psCurrent = psNext; + } + + sTLGlobalData.psHead = NULL; + } + + PVR_DPF_RETURN; +} + +void +TLDeInit(void) +{ + PVR_DPF_ENTERED; + + if (sTLGlobalData.uiClientCnt) + { + PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt)); + sTLGlobalData.uiClientCnt = 0; + } + + FreeGlobalData(); + + /* Clean up the TL global event object */ + if (sTLGlobalData.hTLEventObj) + { + OSEventObjectDestroy(sTLGlobalData.hTLEventObj); + sTLGlobalData.hTLEventObj = NULL; + } + + /* Destroy the TL global data lock */ + if (sTLGlobalData.hTLGDLock) + { + OSLockDestroy (sTLGlobalData.hTLGDLock); + sTLGlobalData.hTLGDLock = NULL; + } + + PVR_DPF_RETURN; +} + +void TLAddStreamNode(PTL_SNODE psAdd) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psAdd); + psAdd->psNext = TLGGD()->psHead; + TLGGD()->psHead = psAdd; + + PVR_DPF_RETURN; +} + +PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psn; + + PVR_DPF_ENTERED; + + PVR_ASSERT(pszName); + + for (psn = psGD->psHead; psn; psn=psn->psNext) + { + if (psn->psStream && OSStringNCompare(psn->psStream->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE)==0) + { + PVR_DPF_RETURN_VAL(psn); + } + } + + PVR_DPF_RETURN_VAL(NULL); +} + +PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psn; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDesc); + + for (psn = psGD->psHead; psn; psn=psn->psNext) + { + if (psn->psRDesc == psDesc || psn->psWDesc == psDesc) + { + PVR_DPF_RETURN_VAL(psn); + } + } + PVR_DPF_RETURN_VAL(NULL); +} + +static inline IMG_BOOL IsDigit(IMG_CHAR c) +{ + return c >= '0' && c <= '9'; +} + +static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer, + IMG_UINT32 *pui32Number) +{ + IMG_CHAR acTmp[11] = {0}; /* max 10 digits */ + IMG_UINT32 ui32Result; + IMG_UINT i; + + for (i = 0; i < sizeof(acTmp) - 1; i++) + { + if (!IsDigit(*pszBuffer)) + break; + acTmp[i] = *pszBuffer++; + } + + /* if there are no digits or there is something after the number */ + if (i == 0 || *pszBuffer != '\0') + return IMG_FALSE; + + if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK) + return IMG_FALSE; + + *pui32Number = ui32Result; + + return IMG_TRUE; +} + +IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, + IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 ui32Max) +{ + TL_GLOBAL_DATA *psGD = TLGGD(); + PTL_SNODE psn; + IMG_UINT32 ui32Count = 0; + size_t uiLen; + + PVR_ASSERT(pszNamePattern); + + if ((uiLen = OSStringLength(pszNamePattern)) == 0) + return 0; + + for (psn = psGD->psHead; psn; psn = psn->psNext) + { + if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0) + continue; + + /* If aaszStreams is NULL we only count how many string match + * the given pattern. If it's a valid pointer we also return + * the names. */ + if (aaszStreams != NULL) + { + if (ui32Count >= ui32Max) + break; + + /* all of names are shorter than MAX and null terminated */ + OSStringLCopy(aaszStreams[ui32Count], psn->psStream->szName, + PRVSRVTL_MAX_STREAM_NAME_SIZE); + } + + ui32Count++; + } + + return ui32Count; +} + +PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc) +{ + PTL_SNODE psn; + + PVR_DPF_ENTERED; + + psn = TLFindStreamNodeByDesc(psDesc); + if (psn == NULL) + PVR_DPF_RETURN_VAL(NULL); + + PVR_ASSERT(psDesc == psn->psWDesc); + + psn->uiWRefCount++; + psDesc->uiRefCount++; + + PVR_DPF_RETURN_VAL(psn); +} + +void TLReturnStreamNode(PTL_SNODE psNode) +{ + psNode->uiWRefCount--; + psNode->psWDesc->uiRefCount--; + + PVR_ASSERT(psNode->uiWRefCount > 0); + PVR_ASSERT(psNode->psWDesc->uiRefCount > 0); +} + +IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psRemove); + + /* If there is a client connected to this stream, defer stream's deletion */ + if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL) + { + PVR_DPF_RETURN_VAL(IMG_FALSE); + } + + /* Remove stream from TL_GLOBAL_DATA's list and free stream node */ + psRemove->psStream = NULL; + RemoveAndFreeStreamNode(psRemove); + + PVR_DPF_RETURN_VAL(IMG_TRUE); +} + +IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove, + PTL_STREAM_DESC psSD) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psNodeToRemove); + PVR_ASSERT(psSD); + + /* Decrement reference count. For descriptor obtained by reader it must + * reach 0 (only single reader allowed) and for descriptors obtained by + * writers it must reach value greater or equal to 0 (multiple writers + * model). */ + psSD->uiRefCount--; + + if (psSD == psNodeToRemove->psRDesc) + { + PVR_ASSERT(0 == psSD->uiRefCount); + /* Remove stream descriptor (i.e. stream reader context) */ + psNodeToRemove->psRDesc = NULL; + } + else if (psSD == psNodeToRemove->psWDesc) + { + PVR_ASSERT(0 <= psSD->uiRefCount); + + psNodeToRemove->uiWRefCount--; + + /* Remove stream descriptor if reference == 0 */ + if (0 == psSD->uiRefCount) + { + psNodeToRemove->psWDesc = NULL; + } + } + + /* Do not Free Stream Node if there is a write reference (a producer + * context) to the stream */ + if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc || + 0 != psNodeToRemove->uiWRefCount) + { + PVR_DPF_RETURN_VAL(IMG_FALSE); + } + + /* Make stream pointer NULL to prevent it from being destroyed in + * RemoveAndFreeStreamNode. Cleanup of stream should be done by the + * calling context */ + psNodeToRemove->psStream = NULL; + RemoveAndFreeStreamNode(psNodeToRemove); + + PVR_DPF_RETURN_VAL(IMG_TRUE); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/tlserver.c b/drivers/mcst/gpu-imgtec/services/server/common/tlserver.c new file mode 100644 index 000000000000..bff5b0d83184 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/tlserver.c @@ -0,0 +1,751 @@ +/*************************************************************************/ /*! +@File +@Title KM server Transport Layer implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main bridge APIs for Transport Layer client functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" + +/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/ +#undef PVR_DPF_FUNCTION_TRACE_ON +#include "pvr_debug.h" + +#include "connection_server.h" +#include "allocmem.h" +#include "devicemem.h" + +#include "tlintern.h" +#include "tlstream.h" +#include "tlserver.h" + +#include "pvrsrv_tlstreams.h" +#define NO_STREAM_WAIT_PERIOD_US 2000000ULL +#define NO_DATA_WAIT_PERIOD_US 500000ULL +#define NO_ACQUIRE 0xffffffffU + + +/* + * Transport Layer Client API Kernel-Mode bridge implementation + */ +PVRSRV_ERROR +TLServerOpenStreamKM(const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + PTL_STREAM_DESC* ppsSD, + PMR** ppsTLPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eErrorEO = PVRSRV_OK; + PTL_SNODE psNode; + PTL_STREAM psStream; + TL_STREAM_DESC *psNewSD = NULL; + IMG_HANDLE hEvent; + IMG_BOOL bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? + IMG_TRUE : IMG_FALSE; + IMG_BOOL bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ? + IMG_TRUE : IMG_FALSE; + IMG_BOOL bNoOpenCB = ui32Mode & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ? + IMG_TRUE : IMG_FALSE; + PTL_GLOBAL_DATA psGD = TLGGD(); + +#if defined(PVR_DPF_FUNCTION_TRACE_ON) + PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode)); +#endif + + PVR_ASSERT(pszName); + + /* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName + * returns NON NULL PTL_SNODE, we try updating the global data client count and + * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has + * not been deleted) while we are updating it + */ + OSLockAcquire (psGD->hTLGDLock); + + psNode = TLFindStreamNodeByName(pszName); + if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT)) + { /* Blocking code to wait for stream to be created if it does not exist */ + eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent); + PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectOpen", e0); + + do + { + if ((psNode = TLFindStreamNodeByName(pszName)) == NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName)); + + /* Release TL_GLOBAL_DATA lock before sleeping */ + OSLockRelease (psGD->hTLGDLock); + + /* Will exit OK or with timeout, both cases safe to ignore */ + eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD_US); + + /* Acquire lock after waking up */ + OSLockAcquire (psGD->hTLGDLock); + } + } + while ((psNode == NULL) && (eErrorEO == PVRSRV_OK)); + + eError = OSEventObjectClose(hEvent); + PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectClose", e0); + } + + /* Make sure we have found a stream node after wait/search */ + if (psNode == NULL) + { + /* Did we exit the wait with timeout, inform caller */ + if (eErrorEO == PVRSRV_ERROR_TIMEOUT) + { + eError = eErrorEO; + } + else + { + eError = PVRSRV_ERROR_NOT_FOUND; + PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName)); + } + goto e0; + } + + psStream = psNode->psStream; + + /* Allocate memory for the stream. The memory will be allocated with the + * first call. */ + eError = TLAllocSharedMemIfNull(psStream); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream" + " \"%s\"", pszName)); + goto e0; + } + + if (bIsWriteOnly) + { + + /* If psWDesc == NULL it means that this is the first attempt + * to open stream for write. If yes create the descriptor or increment + * reference count otherwise. */ + if (psNode->psWDesc == NULL) + { + psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL); + psNode->psWDesc = psNewSD; + } + else + { + psNewSD = psNode->psWDesc; + psNode->psWDesc->uiRefCount++; + } + + PVR_LOG_GOTO_IF_NOMEM(psNewSD, eError, e0); + + psNode->uiWRefCount++; + } + else + { + /* Only one reader per stream supported */ + if (psNode->psRDesc != NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already" + " opened", pszName)); + eError = PVRSRV_ERROR_ALREADY_OPEN; + goto e0; + } + + /* Create an event handle for this client to wait on when no data in + * stream buffer. */ + eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Not possible to open node's event object")); + eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT; + goto e0; + } + + psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent); + psNode->psRDesc = psNewSD; + + if (!psNewSD) + { + PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e1; + } + + PVR_DPF((PVR_DBG_VERBOSE, + "TLServerOpenStreamKM evList=%p, evObj=%p", + psNode->hReadEventObj, + psNode->psRDesc->hReadEvent)); + } + + /* Copy the import handle back to the user mode API to enable access to + * the stream buffer from user-mode process. */ + eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream), + (void**) ppsTLPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2); + + psGD->uiClientCnt++; + + /* Global data updated. Now release global lock */ + OSLockRelease (psGD->hTLGDLock); + + *ppsSD = psNewSD; + + if (bResetOnOpen) + { + TLStreamReset(psStream); + } + + /* This callback is executed only on reader open. There are some actions + * executed on reader open that don't make much sense for writers e.g. + * injection on time synchronisation packet into the stream. */ + if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL && !bNoOpenCB) + { + psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData); + } + + /* psNode->uiWRefCount is set to '1' on stream create so the first open + * is '2'. */ + if (bIsWriteOnly && psStream->psNotifStream != NULL && + psNode->uiWRefCount == 2) + { + TLStreamMarkStreamOpen(psStream); + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName, + ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read")); + + PVR_DPF_RETURN_OK; + +e2: + OSFreeMem(psNewSD); +e1: + if (!bIsWriteOnly) + OSEventObjectClose(hEvent); +e0: + OSLockRelease (psGD->hTLGDLock); + PVR_DPF_RETURN_RC (eError); +} + +PVRSRV_ERROR +TLServerCloseStreamKM(PTL_STREAM_DESC psSD) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PTL_GLOBAL_DATA psGD = TLGGD(); + PTL_SNODE psNode; + PTL_STREAM psStream; + IMG_BOOL bDestroyStream; + IMG_BOOL bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ? + IMG_TRUE : IMG_FALSE; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + /* Sanity check, quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Check stream still valid */ + psNode = TLFindStreamNodeByDesc(psSD); + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since the descriptor is valid, the stream should not have been made NULL */ + PVR_ASSERT (psNode->psStream); + + /* Save the stream's reference in-case its destruction is required after this + * client is removed */ + psStream = psNode->psStream; + + /* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode + * call will update the TL_SNODE's descriptor value */ + OSLockAcquire (psGD->hTLGDLock); + + /* Close event handle because event object list might be destroyed in + * TLUnrefDescAndTryFreeStreamNode(). */ + if (!bIsWriteOnly) + { + /* Reset the read position on close if the stream requires it. */ + TLStreamResetReadPos(psStream); + + /* Close and free the event handle resource used by this descriptor */ + eError = OSEventObjectClose(psSD->hReadEvent); + if (eError != PVRSRV_OK) + { + /* Log error but continue as it seems best */ + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectClose() failed error %d", + eError)); + eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + } + else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL) + { + /* psNode->uiWRefCount is set to '1' on stream create so the last close + * before destruction is '2'. */ + TLStreamMarkStreamClose(psStream); + } + + /* Remove descriptor from stream object/list */ + bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD); + + /* Assert the counter is sane after input data validated. */ + PVR_ASSERT(psGD->uiClientCnt > 0); + psGD->uiClientCnt--; + + OSLockRelease (psGD->hTLGDLock); + + /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */ + if (bDestroyStream) + { + TLStreamDestroy (psStream); + psStream = NULL; + } + + PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__)); + + /* Free the descriptor if ref count reaches 0. */ + if (psSD->uiRefCount == 0) + { + /* Free the stream descriptor object */ + OSFreeMem(psSD); + } + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLServerReserveStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32* ui32BufferOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + IMG_UINT8* pui8Buffer = NULL; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Sanity check, quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Acquire the global lock. We have to be sure that no one modifies + * the list while we are looking for our stream. */ + OSLockAcquire(psGD->hTLGDLock); + /* Check stream still valid */ + psNode = TLFindAndGetStreamNodeByDesc(psSD); + OSLockRelease(psGD->hTLGDLock); + + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + /* The TL writers that currently land here are at a very low to none risk + * to breach max TL packet size constraint (even if there is no reader + * connected to the TL stream and hence eventually will cause the TL stream + * to be full). Hence no need to know the status of TL stream reader + * connection. + */ + eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size, + ui32SizeMin, pui32Available, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.", + ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError))); + } + else if (pui8Buffer == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream.")); + eError = PVRSRV_ERROR_STREAM_FULL; + } + else + { + *ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer; + PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size); + } + + OSLockAcquire(psGD->hTLGDLock); + TLReturnStreamNode(psNode); + OSLockRelease(psGD->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLServerCommitStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Sanity check, quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Acquire the global lock. We have to be sure that no one modifies + * the list while we are looking for our stream. */ + OSLockAcquire(psGD->hTLGDLock); + /* Check stream still valid */ + psNode = TLFindAndGetStreamNodeByDesc(psSD); + OSLockRelease(psGD->hTLGDLock); + + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + eError = TLStreamCommit(psNode->psStream, ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to commit data into stream.")); + } + + OSLockAcquire(psGD->hTLGDLock); + TLReturnStreamNode(psNode); + OSLockRelease(psGD->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, + IMG_UINT32 ui32Size, + IMG_CHAR *pszStreams, + IMG_UINT32 *pui32NumFound) +{ + PTL_SNODE psNode = NULL; + IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] = + (IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) (void *)pszStreams; + + if (*pszNamePattern == '\0') + return PVRSRV_ERROR_INVALID_PARAMS; + + if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0) + return PVRSRV_ERROR_INVALID_PARAMS; + + /* Sanity check, quick exit if there are no streams */ + if (TLGGD()->psHead == NULL) + { + *pui32NumFound = 0; + return PVRSRV_OK; + } + + OSLockAcquire(TLGGD()->hTLGDLock); + + *pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams, + ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE); + + /* Find "tlctrl" stream and reset it */ + psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM); + if (psNode != NULL) + TLStreamReset(psNode->psStream); + + OSLockRelease(TLGGD()->hTLGDLock); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +TLServerAcquireDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32* puiReadOffset, + IMG_UINT32* puiReadLen) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + TL_GLOBAL_DATA* psGD = TLGGD(); + IMG_UINT32 uiTmpOffset; + IMG_UINT32 uiTmpLen = 0; + PTL_SNODE psNode; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + TL_COUNTER_INC(psSD->ui32AcquireCount); + + /* Sanity check, quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Check stream still valid */ + psNode = TLFindStreamNodeByDesc(psSD); + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* If we are here, the stream will never be made NULL until this context itself + * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will + * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode) + * when a valid stream descriptor is present (i.e. a client is connected). + * Hence, no checks for stream being NON NULL are required after this. */ + PVR_ASSERT (psNode->psStream); + + psSD->ui32ReadLen = 0; /* Handle NULL read returns */ + + do + { + uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK, &uiTmpOffset); + + /* Check we have not already exceeded read limit with just offset + * regardless of data length to ensure the client sees the RC */ + if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) + { + /* Check to see if we are reading beyond the read limit */ + if (uiTmpOffset >= psSD->ui32ReadLimit) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_READLIMIT_REACHED); + } + } + + if (uiTmpLen > 0) + { /* Data found */ + + /* Check we have not already exceeded read limit offset+len */ + if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) + { + /* Adjust the read length if it goes beyond the read limit + * limit always guaranteed to be on packet */ + if ((uiTmpOffset + uiTmpLen) >= psSD->ui32ReadLimit) + { + uiTmpLen = psSD->ui32ReadLimit - uiTmpOffset; + } + } + + *puiReadOffset = uiTmpOffset; + *puiReadLen = uiTmpLen; + psSD->ui32ReadLen = uiTmpLen; /* Save the original data length in the stream desc */ + PVR_DPF_RETURN_OK; + } + else if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) + { /* No data found blocking */ + + /* Instead of doing a complete sleep for `NO_DATA_WAIT_PERIOD_US` us, we sleep in chunks + * of 168 ms. In a "deferred" signal scenario from writer, this gives us a chance to + * wake-up (timeout) early and continue reading in-case some data is available */ + IMG_UINT64 ui64WaitInChunksUs = MIN(NO_DATA_WAIT_PERIOD_US, 168000ULL); + IMG_BOOL bDataFound = IMG_FALSE; + + TL_COUNTER_INC(psSD->ui32NoDataSleep); + + LOOP_UNTIL_TIMEOUT(NO_DATA_WAIT_PERIOD_US) + { + eError = OSEventObjectWaitTimeout(psSD->hReadEvent, ui64WaitInChunksUs); + if (eError == PVRSRV_OK) + { + bDataFound = IMG_TRUE; + TL_COUNTER_INC(psSD->ui32Signalled); + break; + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + if (TLStreamOutOfData(psNode->psStream)) + { + /* Return on timeout if stream empty, else let while exit and return data */ + continue; + } + else + { + bDataFound = IMG_TRUE; + TL_COUNTER_INC(psSD->ui32TimeoutData); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Data found at timeout. Current BuffUt = %u", + __func__, TLStreamGetUT(psNode->psStream))); + break; + } + } + else + { /* Some other system error with event objects */ + PVR_DPF_RETURN_RC(eError); + } + } END_LOOP_UNTIL_TIMEOUT(); + + if (bDataFound) + { + continue; + } + else + { + TL_COUNTER_INC(psSD->ui32TimeoutEmpty); + return PVRSRV_ERROR_TIMEOUT; + } + } + else + { /* No data non-blocking */ + TL_COUNTER_INC(psSD->ui32NoData); + + /* When no-data in non-blocking mode, uiReadOffset should be set to NO_ACQUIRE + * signifying there's no need of Release call */ + *puiReadOffset = NO_ACQUIRE; + *puiReadLen = 0; + PVR_DPF_RETURN_OK; + } + } + while (1); +} + +PVRSRV_ERROR +TLServerReleaseDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 uiReadOffset, + IMG_UINT32 uiReadLen) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + + PVR_DPF_ENTERED; + + /* Unreferenced in release builds */ + PVR_UNREFERENCED_PARAMETER(uiReadOffset); + + PVR_ASSERT(psSD); + + /* Sanity check, quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + if ((uiReadLen % PVRSRVTL_PACKET_ALIGNMENT != 0)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Check stream still valid */ + psNode = TLFindStreamNodeByDesc(psSD); + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen)); + + /* Move read position on to free up space in stream buffer */ + PVR_DPF_RETURN_RC(TLStreamAdvanceReadPos(psNode->psStream, uiReadLen, psSD->ui32ReadLen)); +} + +PVRSRV_ERROR +TLServerWriteDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size, + IMG_BYTE* pui8Data) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Sanity check, quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + OSLockAcquire(psGD->hTLGDLock); + /* Check stream still valid */ + psNode = TLFindAndGetStreamNodeByDesc(psSD); + OSLockRelease(psGD->hTLGDLock); + + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size); + PVR_LOG_IF_ERROR(eError, "TLStreamWrite"); + + OSLockAcquire(psGD->hTLGDLock); + TLReturnStreamNode(psNode); + OSLockRelease(psGD->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +/****************************************************************************** + End of file (tlserver.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/tlstream.c b/drivers/mcst/gpu-imgtec/services/server/common/tlstream.c new file mode 100644 index 000000000000..bf28bb9ddec3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/tlstream.c @@ -0,0 +1,1624 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer kernel side API implementation. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Transport Layer API implementation. + These functions are provided to driver components. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON +#include "pvr_debug.h" + +#include "allocmem.h" +#include "devicemem.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "log2.h" + +#include "tlintern.h" +#include "tlstream.h" + +#include "pvrsrv.h" + +#define EVENT_OBJECT_TIMEOUT_US 1000000ULL +#define READ_PENDING_TIMEOUT_US 100000ULL + +/*! Compute maximum TL packet size for this stream. Max packet size will be + * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation + * is required to avoid a corner case that was observed when TL buffer size is + * smaller than twice of TL max packet size and read, write index are positioned + * in such a way that the TL packet (write packet + padding packet) size is may + * be bigger than the buffer size itself. + */ +#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) ) + +/* Given the state of the buffer it returns a number of bytes that the client + * can use for a successful allocation. */ +static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead, + IMG_UINT32 ui32LWrite, + IMG_UINT32 ui32CBSize, + IMG_UINT32 ui32ReqSizeMin, + IMG_UINT32 ui32MaxPacketSize) +{ + IMG_UINT32 ui32AvSpace = 0; + + /* This could be written in fewer lines using the ? operator but it + would not be kind to potential readers of this source at all. */ + if (ui32LRead > ui32LWrite) /* Buffer WRAPPED */ + { + if ((ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) + { + ui32AvSpace = ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; + } + } + else /* Normal, no wrap */ + { + if ((ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) + { + ui32AvSpace = ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; + } + else if ((ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) + { + ui32AvSpace = ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; + } + } + /* The max size of a TL packet currently is UINT16. adjust accordingly */ + return MIN(ui32AvSpace, ui32MaxPacketSize); +} + +/* Returns bytes left in the buffer. Negative if there is not any. + * two 8b aligned values are reserved, one for the write failed buffer flag + * and one to be able to distinguish the buffer full state to the buffer + * empty state. + * Always returns free space -8 even when the "write failed" packet may be + * already in the stream before this write. */ +static INLINE IMG_INT +circbufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) +{ + /* We need to reserve 8b (one packet) in the buffer to be able to tell empty + * buffers from full buffers and one more for packet write fail packet */ + if (ui32Read > ui32Write) + { + return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE; + } + else + { + return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE; + } +} + +IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + IMG_UINT32 ui32LRead = psStream->ui32Read, ui32LWrite = psStream->ui32Write; + + if (ui32LWrite >= ui32LRead) + { + return (ui32LWrite-ui32LRead); + } + else + { + return (psStream->ui32Size-ui32LRead+ui32LWrite); + } +} + +PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + PVRSRV_ERROR eError; + + /* CPU Local memory used as these buffers are not accessed by the device. + * CPU Uncached write combine memory used to improve write performance, + * memory barrier added in TLStreamCommit to ensure data written to memory + * before CB write point is updated before consumption by the reader. + */ + IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20]; + DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_CPU_LOCAL; /* TL for now is only used by host driver, so cpulocal mem suffices */ + + /* Exit if memory has already been allocated. */ + if (psStream->pbyBuffer != NULL) + return PVRSRV_OK; + + OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s", + psStream->szName); + + + /* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster + * accesses to CPU local memory. When the framework to access CPU_LOCAL device + * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for + * TL buffers */ + eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + (IMG_DEVMEM_SIZE_T) psStream->ui32Size, + (IMG_DEVMEM_ALIGN_T) OSGetPageSize(), + ExactLog2(OSGetPageSize()), + uiMemFlags, + pszBufferLabel, + &psStream->psStreamMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); + + eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc, + (void**) &psStream->pbyBuffer); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1); + + return PVRSRV_OK; + +e1: + DevmemFree(psStream->psStreamMemDesc); +e0: + return eError; +} + +void TLFreeSharedMem(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + + if (psStream->pbyBuffer != NULL) + { + DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc); + psStream->pbyBuffer = NULL; + } + if (psStream->psStreamMemDesc != NULL) + { + DevmemFree(psStream->psStreamMemDesc); + psStream->psStreamMemDesc = NULL; + } +} + +/* Special space left routine for TL_FLAG_PERMANENT_NO_WRAP streams */ +static INLINE IMG_UINT +bufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) +{ + /* buffers from full buffers and one more for packet write fail packet */ + PVR_ASSERT(ui32Read<=ui32Write); + return ui32size - ui32Write; +} + +/******************************************************************************* + * TL Server public API implementation. + ******************************************************************************/ +PVRSRV_ERROR +TLStreamCreate(IMG_HANDLE *phStream, + PVRSRV_DEVICE_NODE *psDevNode, + const IMG_CHAR *szStreamName, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32StreamFlags, + TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, + void *pvOnReaderOpenUD, + TL_STREAM_SOURCECB pfProducerCB, + void *pvProducerUD) +{ + PTL_STREAM psTmp; + PVRSRV_ERROR eError; + IMG_HANDLE hEventList; + PTL_SNODE psn; + TL_OPMODE eOpMode; + + PVR_DPF_ENTERED; + /* Sanity checks: */ + /* non NULL handler required */ + if (NULL == phStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + if (szStreamName == NULL || *szStreamName == '\0' || + OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + if (NULL == psDevNode) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eOpMode = ui32StreamFlags & TL_OPMODE_MASK; + if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) + { + PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName() + * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */ + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Check if there already exists a stream with this name. */ + psn = TLFindStreamNodeByName( szStreamName ); + if (NULL != psn) + { + eError = PVRSRV_ERROR_ALREADY_EXISTS; + goto e0; + } + + /* Allocate stream structure container (stream struct) for the new stream */ + psTmp = OSAllocZMem(sizeof(TL_STREAM)); + if (NULL == psTmp) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e0; + } + + OSStringLCopy(psTmp->szName, szStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE); + + if (ui32StreamFlags & TL_FLAG_FORCE_FLUSH) + { + psTmp->bWaitForEmptyOnDestroy = IMG_TRUE; + } + + psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE; + psTmp->bNoWrapPermanent = (ui32StreamFlags&TL_FLAG_PERMANENT_NO_WRAP) ? IMG_TRUE : IMG_FALSE; + + psTmp->eOpMode = eOpMode; + if (psTmp->eOpMode == TL_OPMODE_BLOCK) + { + /* Only allow drop properties to be mixed with no-wrap type streams + * since space does not become available when reads take place hence + * no point blocking. + */ + if (psTmp->bNoWrapPermanent) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + } + + /* Additional synchronisation object required for some streams e.g. blocking */ + eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj); + PVR_GOTO_IF_ERROR(eError, e1); + /* Create an event handle for this kind of stream */ + eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent); + PVR_GOTO_IF_ERROR(eError, e2); + + psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB; + psTmp->pvOnReaderOpenUserData = pvOnReaderOpenUD; + /* Remember producer supplied CB and data for later */ + psTmp->pfProducerCallback = (void(*)(void))pfProducerCB; + psTmp->pvProducerUserData = pvProducerUD; + + psTmp->psNotifStream = NULL; + + /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */ + psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size); + + /* Signalling from TLStreamCommit is deferred until buffer is slightly (~12%) filled */ + psTmp->ui32ThresholdUsageForSignal = psTmp->ui32Size >> 3; + psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size); + psTmp->ui32Read = 0; + psTmp->ui32Write = 0; + psTmp->ui32Pending = NOTHING_PENDING; + psTmp->psDevNode = psDevNode; + psTmp->bReadPending = IMG_FALSE; + psTmp->bSignalPending = IMG_FALSE; + +#if defined(TL_BUFFER_STATS) + OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); + /* Setting MAX possible value for "minimum" time to full, + * helps in the logic which calculates this time */ + psTmp->ui32MinTimeToFullInUs = IMG_UINT32_MAX; +#endif + + /* Memory will be allocated on first connect to the stream */ + if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN)) + { + /* Allocate memory for the circular buffer and export it to user space. */ + eError = TLAllocSharedMemIfNull(psTmp); + PVR_LOG_GOTO_IF_ERROR(eError, "TLAllocSharedMem", e3); + } + + /* Synchronisation object to synchronise with user side data transfers. */ + eError = OSEventObjectCreate(psTmp->szName, &hEventList); + PVR_GOTO_IF_ERROR(eError, e4); + + eError = OSLockCreate (&psTmp->hStreamWLock); + PVR_GOTO_IF_ERROR(eError, e5); + + eError = OSLockCreate (&psTmp->hReadLock); + PVR_GOTO_IF_ERROR(eError, e6); + + /* Now remember the stream in the global TL structures */ + psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, NULL); + PVR_GOTO_IF_NOMEM(psn, eError, e7); + + /* Stream node created, now reset the write reference count to 1 + * (i.e. this context's reference) */ + psn->uiWRefCount = 1; + + TLAddStreamNode(psn); + + /* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */ + OSLockRelease (TLGGD()->hTLGDLock); + + /* Best effort signal, client wait timeout will ultimately let it find the + * new stream if this fails, acceptable to avoid clean-up as it is tricky + * at this point */ + (void) OSEventObjectSignal(TLGGD()->hTLEventObj); + + /* Pass the newly created stream handle back to caller */ + *phStream = (IMG_HANDLE)psTmp; + PVR_DPF_RETURN_OK; + +e7: + OSLockDestroy(psTmp->hReadLock); +e6: + OSLockDestroy(psTmp->hStreamWLock); +e5: + OSEventObjectDestroy(hEventList); +e4: + TLFreeSharedMem(psTmp); +e3: + OSEventObjectClose(psTmp->hProducerEvent); +e2: + OSEventObjectDestroy(psTmp->hProducerEventObj); +e1: + OSFreeMem(psTmp); +e0: + OSLockRelease (TLGGD()->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +void TLStreamReset(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + + PVR_ASSERT(psStream != NULL); + + OSLockAcquire(psStream->hStreamWLock); + + while (psStream->ui32Pending != NOTHING_PENDING) + { + PVRSRV_ERROR eError; + + /* We're in the middle of a write so we cannot reset the stream. + * We are going to wait until the data is committed. Release lock while + * we're here. */ + OSLockRelease(psStream->hStreamWLock); + + /* Event when psStream->bNoSignalOnCommit is set we can still use + * the timeout capability of event object API (time in us). */ + eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100); + if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK) + { + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectWaitTimeout"); + } + + OSLockAcquire(psStream->hStreamWLock); + + /* Either timeout occurred or the stream has been signalled. + * If former we have to check if the data was committed and if latter + * if the stream hasn't been re-reserved. Either way we have to go + * back to the condition. + * If the stream has been released we'll exit with the lock held so + * we can finally go and reset the stream. */ + } + + psStream->ui32Read = 0; + psStream->ui32Write = 0; + /* we know that ui32Pending already has correct value (no need to set) */ + + OSLockRelease(psStream->hStreamWLock); +} + +PVRSRV_ERROR +TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + + if (hStream == NULL || hNotifStream == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + psStream->psNotifStream = (PTL_STREAM) hNotifStream; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +TLStreamReconfigure( + IMG_HANDLE hStream, + IMG_UINT32 ui32StreamFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PTL_STREAM psTmp; + TL_OPMODE eOpMode; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eOpMode = ui32StreamFlags & TL_OPMODE_MASK; + if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) + { + PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + psTmp = (PTL_STREAM)hStream; + + /* Prevent the TL Stream buffer from being written to + * while its mode is being reconfigured + */ + OSLockAcquire (psTmp->hStreamWLock); + if (NOTHING_PENDING != psTmp->ui32Pending) + { + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); + } + psTmp->ui32Pending = 0; + OSLockRelease (psTmp->hStreamWLock); + + psTmp->eOpMode = eOpMode; + if (psTmp->eOpMode == TL_OPMODE_BLOCK) + { + /* Only allow drop properties to be mixed with no-wrap type streams + * since space does not become available when reads take place hence + * no point blocking. + */ + if (psTmp->bNoWrapPermanent) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + } + + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Pending = NOTHING_PENDING; + OSLockRelease (psTmp->hStreamWLock); +e1: + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLStreamOpen(IMG_HANDLE *phStream, + const IMG_CHAR *szStreamName) +{ + PTL_SNODE psTmpSNode; + + PVR_DPF_ENTERED; + + if (NULL == phStream || NULL == szStreamName) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Acquire the TL_GLOBAL_DATA lock first to ensure, + * the TL_STREAM while returned and being modified, + * is not deleted by some other context */ + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Search for a stream node with a matching stream name */ + psTmpSNode = TLFindStreamNodeByName(szStreamName); + + if (NULL == psTmpSNode) + { + OSLockRelease (TLGGD()->hTLGDLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND); + } + + if (psTmpSNode->psStream->psNotifStream != NULL && + psTmpSNode->uiWRefCount == 1) + { + TLStreamMarkStreamOpen(psTmpSNode->psStream); + } + + /* The TL_SNODE->uiWRefCount governs the presence of this node in the + * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing + * this node from the TL_GLOBAL_DATA list. Hence, is protected using the + * TL_GLOBAL_DATA lock and not TL_STREAM lock */ + psTmpSNode->uiWRefCount++; + + OSLockRelease (TLGGD()->hTLGDLock); + + /* Return the stream handle to the caller */ + *phStream = (IMG_HANDLE)psTmpSNode->psStream; + + PVR_DPF_RETURN_VAL(PVRSRV_OK); +} + +void +TLStreamClose(IMG_HANDLE hStream) +{ + PTL_STREAM psTmp; + IMG_BOOL bDestroyStream; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF((PVR_DBG_WARNING, + "TLStreamClose failed as NULL stream handler passed, nothing done.")); + PVR_DPF_RETURN; + } + + psTmp = (PTL_STREAM)hStream; + + /* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required + * in-case this TL_STREAM node is to be deleted */ + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Decrement write reference counter of the stream */ + psTmp->psNode->uiWRefCount--; + + if (0 != psTmp->psNode->uiWRefCount) + { + /* The stream is still being used in other context(s) do not destroy + * anything */ + + /* uiWRefCount == 1 means that stream was closed for write. Next + * close is pairing TLStreamCreate(). Send notification to indicate + * that no writer are connected to the stream any more. */ + if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1) + { + TLStreamMarkStreamClose(psTmp); + } + + OSLockRelease (TLGGD()->hTLGDLock); + PVR_DPF_RETURN; + } + else + { + /* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */ + + if (psTmp->bWaitForEmptyOnDestroy) + { + /* We won't require the TL_STREAM lock to be acquired here for accessing its read + * and write offsets. REASON: We are here because there is no producer context + * referencing this TL_STREAM, hence its ui32Write offset won't be changed now. + * Also, the update of ui32Read offset is not protected by locks */ + while (psTmp->ui32Read != psTmp->ui32Write) + { + /* Release lock before sleeping */ + OSLockRelease (TLGGD()->hTLGDLock); + + OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US); + + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Ensure destruction of stream is still required */ + if (0 != psTmp->psNode->uiWRefCount) + { + OSLockRelease (TLGGD()->hTLGDLock); + PVR_DPF_RETURN; + } + } + } + + /* Try removing the stream from TL_GLOBAL_DATA */ + bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode); + + OSLockRelease (TLGGD()->hTLGDLock); + + if (bDestroyStream) + { + /* Destroy the stream if it was removed from TL_GLOBAL_DATA */ + TLStreamDestroy (psTmp); + psTmp = NULL; + } + PVR_DPF_RETURN; + } +} + +/* + * DoTLSetPacketHeader + * + * Ensure that whenever we update a Header we always add the RESERVED field + */ +static inline void DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR, IMG_UINT32); +static inline void +DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR pHdr, + IMG_UINT32 ui32Val) +{ + PVR_ASSERT(((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) == 0); + + /* Check that this is a correctly aligned packet header. */ + if (((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) != 0) + { + /* Should return an error because the header is misaligned */ + PVR_DPF((PVR_DBG_ERROR, "%s: Misaligned header @ %p", __func__, pHdr)); + pHdr->uiTypeSize = ui32Val; + } + else + { + pHdr->uiTypeSize = ui32Val; + pHdr->uiReserved = PVRSRVTL_PACKETHDR_RESERVED; + } +} + +static PVRSRV_ERROR +DoTLStreamReserve(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32ReqSize, + IMG_UINT32 ui32ReqSizeMin, + PVRSRVTL_PACKETTYPE ePacketType, + IMG_UINT32* pui32AvSpace, + IMG_UINT32* pui32Flags) +{ + PTL_STREAM psTmp; + IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace; + IMG_INT pad, iFreeSpace; + IMG_UINT8 *pui8IncrRead = NULL; + PVRSRVTL_PPACKETHDR pHdr; + + PVR_DPF_ENTERED; + if (pui32AvSpace) *pui32AvSpace = 0; + if (pui32Flags) *pui32Flags = 0; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + psTmp = (PTL_STREAM)hStream; + + /* Assert used as the packet type parameter is currently only provided + * by the TL APIs, not the calling client */ + PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType)); + + /* The buffer is only used in "rounded" (aligned) chunks */ + lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize); + + /* Lock the stream before reading it's pending value, because if pending is set + * to NOTHING_PENDING, we update the pending value such that subsequent calls to + * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */ + OSLockAcquire (psTmp->hStreamWLock); + +#if defined(TL_BUFFER_STATS) + /* If writing into an empty buffer, start recording time-to-full */ + if (psTmp->ui32Read == psTmp->ui32Write) + { + OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 1); + psTmp->ui32TimeStart = OSClockus(); + } + + if (ui32ReqSize > psTmp->ui32MaxReserveWatermark) + { + psTmp->ui32MaxReserveWatermark = ui32ReqSize; + } +#endif + + /* Get a local copy of the stream buffer parameters */ + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + ui32LPending = psTmp->ui32Pending; + + /* Multiple pending reserves are not supported. */ + if (NOTHING_PENDING != ui32LPending) + { + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); + } + + if (psTmp->ui32MaxPacketSize < lReqSizeAligned) + { + PVR_DPF((PVR_DBG_ERROR, "Requested Size: %u > TL Max Packet size: %u", lReqSizeAligned, psTmp->ui32MaxPacketSize)); + psTmp->ui32Pending = NOTHING_PENDING; + if (pui32AvSpace) + { + *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); + if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) + { + *pui32AvSpace = psTmp->ui32MaxPacketSize; + PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to: %u", *pui32AvSpace)); + } + } + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED); + } + + /* Prevent other threads from entering this region before we are done + * updating the pending value and write offset (in case of padding). This + * is not exactly a lock but a signal for other contexts that there is a + * TLStreamCommit operation pending on this stream */ + psTmp->ui32Pending = 0; + + OSLockRelease (psTmp->hStreamWLock); + + /* If there is enough contiguous space following the current Write + * position then no padding is required */ + if ( psTmp->ui32Size + < ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) ) + { + pad = psTmp->ui32Size - ui32LWrite; + } + else + { + pad = 0; + } + + lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad; + if (psTmp->bNoWrapPermanent) + { + iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + else + { + iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + + if (iFreeSpace < (IMG_INT) lReqSizeActual) + { + /* If this is a blocking reserve and there is not enough space then wait. */ + if (psTmp->eOpMode == TL_OPMODE_BLOCK) + { + /* Stream create should stop us entering here when + * psTmp->bNoWrapPermanent is true as it does not make sense to + * block on permanent data streams. */ + PVR_ASSERT(psTmp->bNoWrapPermanent == IMG_FALSE); + while ( ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) + <(IMG_INT) lReqSizeActual ) ) + { + /* The TL bridge is lockless now, so changing to OSEventObjectWait() */ + OSEventObjectWait(psTmp->hProducerEvent); + // update local copies. + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + } + } + /* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */ + else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) + { + OSLockAcquire(psTmp->hReadLock); + + while (psTmp->bReadPending) + { + PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete.")); + OSLockRelease(psTmp->hReadLock); +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psTmp->ui32CntWriteWaits); +#endif + (void) OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US); + OSLockAcquire(psTmp->hReadLock); + } + +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psTmp->ui32CntWriteSuccesses); +#endif + ui32LRead = psTmp->ui32Read; + + if ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) + < (IMG_INT) lReqSizeActual ) + { + ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100); + if (ui32CreateFreeSpace < lReqSizeActual) + { + ui32CreateFreeSpace = lReqSizeActual; + } + + while (ui32CreateFreeSpace > (IMG_UINT32)circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)) + { + pui8IncrRead = &psTmp->pbyBuffer[ui32LRead]; + ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) )); + + /* Check if buffer needs to wrap */ + if (ui32LRead >= psTmp->ui32Size) + { + ui32LRead = 0; + } + } + psTmp->ui32Read = ui32LRead; + pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read]; + + pHdr = GET_PACKET_HDR(pui8IncrRead); + DoTLSetPacketHeader(pHdr, SET_PACKETS_DROPPED(pHdr)); + } + /* else fall through as there is enough space now to write the data */ + + OSLockRelease(psTmp->hReadLock); + /* If we accepted a flag var set the OVERWRITE bit*/ + if (pui32Flags) *pui32Flags |= TL_FLAG_OVERWRITE_DETECTED; + } + /* No data overwriting, insert write_failed flag and return */ + else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER) + { + /* Caller should not try to use ppui8Data, + * NULLify to give user a chance of avoiding memory corruption */ + *ppui8Data = NULL; + + /* This flag should not be inserted two consecutive times, so + * check the last ui32 in case it was a packet drop packet. */ + pui32Buf = ui32LWrite + ? + (void *)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)] + : // Previous four bytes are not guaranteed to be a packet header... + (void *)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT]; + + pHdr = GET_PACKET_HDR(pui32Buf); + if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED + != + GET_PACKET_TYPE( pHdr ) ) + { + /* Insert size-stamped packet header */ + pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; + pHdr = GET_PACKET_HDR(pui32Buf); + DoTLSetPacketHeader(pHdr, PVRSRVTL_SET_PACKET_WRITE_FAILED); + ui32LWrite += sizeof(PVRSRVTL_PACKETHDR); + ui32LWrite %= psTmp->ui32Size; + iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR); + } + + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Write = ui32LWrite; + psTmp->ui32Pending = NOTHING_PENDING; + OSLockRelease (psTmp->hStreamWLock); + + if (pui32AvSpace) + { + *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); + } + + /* Inform call of permanent stream misuse, no space left, + * the size of the stream will need to be increased. */ + if (psTmp->bNoWrapPermanent) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE); + } + + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL); + } + } + + /* The easy case: buffer has enough space to hold the requested packet (data + header) */ + + /* Should we treat the buffer as non-circular buffer? */ + if (psTmp->bNoWrapPermanent) + { + iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + else + { + iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + + if (iFreeSpace >= (IMG_INT) lReqSizeActual) + { + if (pad) + { + /* Inserting padding packet. */ + pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; + pHdr = GET_PACKET_HDR(pui32Buf); + DoTLSetPacketHeader(pHdr, + PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR))); + + /* CAUTION: the used pad value should always result in a properly + * aligned ui32LWrite pointer, which in this case is 0 */ + ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size; + /* Detect unaligned pad value */ + PVR_ASSERT(ui32LWrite == 0); + } + /* Insert size-stamped packet header */ + pui32Buf = (void *) &psTmp->pbyBuffer[ui32LWrite]; + + pHdr = GET_PACKET_HDR(pui32Buf); + DoTLSetPacketHeader(pHdr, + PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType)); + + /* return the next position in the buffer to the user */ + *ppui8Data = &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ]; + + /* update pending offset: size stamp + data */ + ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR); + } + else + { + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Pending = NOTHING_PENDING; + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Acquire stream lock for updating stream parameters */ + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Write = ui32LWrite; + psTmp->ui32Pending = ui32LPending; + OSLockRelease (psTmp->hStreamWLock); + +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psTmp->ui32CntNumWriteSuccess); +#endif + + PVR_DPF_RETURN_OK; +} + +PVRSRV_ERROR +TLStreamReserve(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size) +{ + return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, NULL); +} + +PVRSRV_ERROR +TLStreamReserve2(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available, + IMG_BOOL* pbIsReaderConnected) +{ + PVRSRV_ERROR eError; + + eError = DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available, NULL); + if (eError != PVRSRV_OK && pbIsReaderConnected != NULL) + { + *pbIsReaderConnected = TLStreamIsOpenForReading(hStream); + } + + return eError; +} + +PVRSRV_ERROR +TLStreamReserveReturnFlags(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32* pui32Flags) +{ + return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, pui32Flags); +} + +PVRSRV_ERROR +TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize) +{ + PTL_STREAM psTmp; + IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending; + PVRSRV_ERROR eError; + +#if defined(TL_BUFFER_STATS) + IMG_UINT32 ui32UnreadBytes; +#endif + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + psTmp = (PTL_STREAM)hStream; + + /* Get a local copy of the stream buffer parameters */ + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + ui32LPending = psTmp->ui32Pending; + + ui32OldWrite = ui32LWrite; + + // Space in buffer is aligned + ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR); + + /* Check pending reserver and ReqSize + packet header size. */ + if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); + } + + /* Update pointer to written data. */ + ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size; + + /* and reset LPending to 0 since data are now submitted */ + ui32LPending = NOTHING_PENDING; + +#if defined(TL_BUFFER_STATS) + /* Calculate new number of bytes unread */ + if (ui32LWrite > ui32LRead) + { + ui32UnreadBytes = (ui32LWrite-ui32LRead); + } + else if (ui32LWrite < ui32LRead) + { + ui32UnreadBytes = (psTmp->ui32Size-ui32LRead+ui32LWrite); + } + else + { /* else equal, ignore */ + ui32UnreadBytes = 0; + } + + /* Calculate high water mark for debug purposes */ + if (ui32UnreadBytes > psTmp->ui32BufferUt) + { + psTmp->ui32BufferUt = ui32UnreadBytes; + } +#endif + + /* Memory barrier required to ensure prior data written by writer is + * flushed from WC buffer to main memory. */ + OSWriteMemoryBarrier(); + + /* Acquire stream lock to ensure other context(s) (if any) + * wait on the lock (in DoTLStreamReserve) for consistent values + * of write offset and pending value */ + OSLockAcquire (psTmp->hStreamWLock); + + /* Update stream buffer parameters to match local copies */ + psTmp->ui32Write = ui32LWrite; + psTmp->ui32Pending = ui32LPending; + + TL_COUNTER_ADD(psTmp->ui32ProducerByteCount, ui32ReqSize); + TL_COUNTER_INC(psTmp->ui32NumCommits); + +#if defined(TL_BUFFER_STATS) + /* IF there has been no-reader since first reserve on an empty-buffer, + * AND current utilisation is considerably high (90%), calculate the + * time taken to fill up the buffer */ + if ((OSAtomicRead(&psTmp->bNoReaderSinceFirstReserve) == 1) && + (TLStreamGetUT(psTmp) >= 90 * psTmp->ui32Size/100)) + { + IMG_UINT32 ui32TimeToFullInUs = OSClockus() - psTmp->ui32TimeStart; + if (psTmp->ui32MinTimeToFullInUs > ui32TimeToFullInUs) + { + psTmp->ui32MinTimeToFullInUs = ui32TimeToFullInUs; + } + /* Following write ensures ui32MinTimeToFullInUs doesn't lose its + * real (expected) value in case there is no reader until next Commit call */ + OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); + } +#endif + + if (!psTmp->bNoSignalOnCommit) + { + /* If we have transitioned from an empty buffer to a non-empty buffer, we + * must signal possibly waiting consumer. BUT, let the signal be "deferred" + * until buffer is at least 'ui32ThresholdUsageForSignal' bytes full. This + * avoids a race between OSEventObjectSignal and OSEventObjectWaitTimeout + * (in TLServerAcquireDataKM), where a "signal" might happen before "wait", + * resulting into signal being lost and stream-reader waiting even though + * buffer is no-more empty */ + if (ui32OldWrite == ui32LRead) + { + psTmp->bSignalPending = IMG_TRUE; + } + + if (psTmp->bSignalPending && (TLStreamGetUT(psTmp) >= psTmp->ui32ThresholdUsageForSignal)) + { + TL_COUNTER_INC(psTmp->ui32SignalsSent); + psTmp->bSignalPending = IMG_FALSE; + + /* Signal consumers that may be waiting */ + eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); + if (eError != PVRSRV_OK) + { + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(eError); + } + } + else + { + TL_COUNTER_INC(psTmp->ui32SignalNotSent); + } + } + OSLockRelease (psTmp->hStreamWLock); + + PVR_DPF_RETURN_OK; +} + +PVRSRV_ERROR +TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size) +{ + IMG_BYTE *pbyDest = NULL; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eError = TLStreamReserve(hStream, &pbyDest, ui32Size); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + else + { + OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); + eError = TLStreamCommit(hStream, ui32Size); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + } + + PVR_DPF_RETURN_OK; +} + +PVRSRV_ERROR +TLStreamWriteRetFlags(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size, IMG_UINT32 *pui32Flags){ + IMG_BYTE *pbyDest = NULL; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eError = TLStreamReserveReturnFlags(hStream, &pbyDest, ui32Size, pui32Flags); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + else + { + OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); + eError = TLStreamCommit(hStream, ui32Size); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + } + + PVR_DPF_RETURN_OK; +} + +void TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo) +{ + IMG_DEVMEM_SIZE_T actual_req_size; + IMG_DEVMEM_ALIGN_T align = 4; /* Low dummy value so the real value can be obtained */ + + actual_req_size = 2; + /* ignore error as OSGetPageShift() should always return correct value */ + (void) DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align); + + psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR); + psInfo->minReservationSize = sizeof(IMG_UINT32); + psInfo->pageSize = (IMG_UINT32)(actual_req_size); + psInfo->pageAlign = (IMG_UINT32)(align); + psInfo->maxTLpacketSize = ((PTL_STREAM)hStream)->ui32MaxPacketSize; +} + +PVRSRV_ERROR +TLStreamMarkEOS(IMG_HANDLE psStream, IMG_BOOL bRemoveOld) +{ + PTL_STREAM psTmp; + PVRSRV_ERROR eError; + IMG_UINT8* pData; + + PVR_DPF_ENTERED; + + if (NULL == psStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + psTmp = (PTL_STREAM)psStream; + + /* Do not support EOS packets on permanent stream buffers at present, + * EOS is best used with streams where data is consumed. */ + if (psTmp->bNoWrapPermanent) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); + } + + if (bRemoveOld) + { + eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD, NULL, NULL); + } + else + { + eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL); + } + + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + + PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0)); +} + + +static PVRSRV_ERROR +_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType) +{ + PVRSRV_ERROR eError; + PTL_STREAM psStream = hStream; + IMG_UINT32 ui32Size; + IMG_UINT8 *pData; + + PVR_DPF_ENTERED; + + if (NULL == psStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + if (NULL == psStream->psNotifStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM); + } + + ui32Size = OSStringLength(psStream->szName) + 1; + + eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size, + ui32Size, ePacketType, NULL, NULL); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + + OSDeviceMemCopy(pData, psStream->szName, ui32Size); + + PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size)); +} + +PVRSRV_ERROR +TLStreamMarkStreamOpen(IMG_HANDLE psStream) +{ + return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE); +} + +PVRSRV_ERROR +TLStreamMarkStreamClose(IMG_HANDLE psStream) +{ + return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE); +} + +PVRSRV_ERROR +TLStreamSync(IMG_HANDLE psStream) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PTL_STREAM psTmp; + + PVR_DPF_ENTERED; + + if (NULL == psStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + psTmp = (PTL_STREAM)psStream; + + /* If read client exists and has opened stream in blocking mode, + * signal when data is available to read. */ + if (psTmp->psNode->psRDesc && + (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) && + psTmp->ui32Read != psTmp->ui32Write) + { + TL_COUNTER_INC(psTmp->ui32ManSyncs); + eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); + } + + PVR_DPF_RETURN_RC(eError); +} + +IMG_BOOL +TLStreamIsOpenForReading(IMG_HANDLE hStream) +{ + PTL_STREAM psTmp; + + PVR_DPF_ENTERED; + + PVR_ASSERT(hStream); + psTmp = (PTL_STREAM)hStream; + + PVR_DPF_RETURN_VAL(psTmp->psNode->psRDesc != NULL); +} + +IMG_BOOL +TLStreamOutOfData(IMG_HANDLE hStream) +{ + PTL_STREAM psTmp; + + PVR_DPF_ENTERED; + + PVR_ASSERT(hStream); + psTmp = (PTL_STREAM)hStream; + + /* If both pointers are equal then the buffer is empty */ + PVR_DPF_RETURN_VAL(psTmp->ui32Read == psTmp->ui32Write); +} + + +PVRSRV_ERROR +TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value) +{ + PTL_STREAM psTmp; + IMG_UINT32 ui32LRead, ui32LWrite; + PVRSRV_ERROR eErr = PVRSRV_OK; + + PVR_DPF_ENTERED; + + PVR_ASSERT(hStream); + psTmp = (PTL_STREAM)hStream; + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + + if (ui32LRead != ui32LWrite) + { + eErr = PVRSRV_ERROR_STREAM_MISUSE; + } +#if defined(TL_BUFFER_STATS) + psTmp->ui32ProducerByteCount = ui32Value; +#else + PVR_UNREFERENCED_PARAMETER(ui32Value); +#endif + PVR_DPF_RETURN_RC(eErr); +} +/* + * Internal stream APIs to server part of Transport Layer, declared in + * header tlintern.h. Direct pointers to stream objects are used here as + * these functions are internal. + */ +IMG_UINT32 +TLStreamAcquireReadPos(PTL_STREAM psStream, + IMG_BOOL bDisableCallback, + IMG_UINT32* puiReadOffset) +{ + IMG_UINT32 uiReadLen = 0; + IMG_UINT32 ui32LRead, ui32LWrite; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + PVR_ASSERT(puiReadOffset); + + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + if (!OSTryLockAcquire(psStream->hReadLock)) + { + /* + * This is a normal event when the system is under load. + * An example of how to produce this is to run testrunner / + * regression/ddk_test_seq2_host_fw_mem.conf with HTB / pvrhtbd + * configured as + * + * # pvrdebug -log trace -loggroups main,pow,debug \ + * -hostloggroups main,ctrl,sync,brg -hostlogtype dropoldest + * + * # pvrhtbd -hostloggroups main,ctrl,sync,brg + * + * We will see a small number of these collisions but as this is + * an expected calling path, and an expected return code, we drop + * the severity to just be a debug MESSAGE instead of WARNING + */ + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Read lock on stream '%s' is acquired by some writer, " + "hence reader failed to acquire read lock.", __func__, + psStream->szName)); +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psStream->ui32CntReadFails); +#endif + PVR_DPF_RETURN_VAL(0); + } + } + +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psStream->ui32CntReadSuccesses); +#endif + + /* Grab a local copy */ + ui32LRead = psStream->ui32Read; + ui32LWrite = psStream->ui32Write; + + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + psStream->bReadPending = IMG_TRUE; + OSLockRelease(psStream->hReadLock); + } + + /* No data available and CB defined - try and get data */ + if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback) + { + PVRSRV_ERROR eRc; + IMG_UINT32 ui32Resp = 0; + + eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS, + &ui32Resp, psStream->pvProducerUserData); + PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback"); + + ui32LWrite = psStream->ui32Write; + } + + /* No data available... */ + if (ui32LRead == ui32LWrite) + { + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + psStream->bReadPending = IMG_FALSE; + } + PVR_DPF_RETURN_VAL(0); + } + +#if defined(TL_BUFFER_STATS) + /* The moment reader knows it will see a non-zero data, it marks its presence in writer's eyes */ + OSAtomicWrite (&psStream->bNoReaderSinceFirstReserve, 0); +#endif + + /* Data is available to read... */ + *puiReadOffset = ui32LRead; + + /*PVR_DPF((PVR_DBG_VERBOSE, + * "TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d", + * ui32LWrite, ui32LRead, psStream->ui32Size)); + */ + + if (ui32LRead > ui32LWrite) + { /* CB has wrapped around. */ + PVR_ASSERT(!psStream->bNoWrapPermanent); + /* Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer] + * and let a subsequent AcquireReadPos read the rest of the Buffer */ + /*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/ + uiReadLen = psStream->ui32Size - ui32LRead; + TL_COUNTER_INC(psStream->ui32AcquireRead2); + } + else + { /* CB has not wrapped */ + uiReadLen = ui32LWrite - ui32LRead; + TL_COUNTER_INC(psStream->ui32AcquireRead1); + } + + PVR_DPF_RETURN_VAL(uiReadLen); +} + +PVRSRV_ERROR +TLStreamAdvanceReadPos(PTL_STREAM psStream, + IMG_UINT32 uiReadLen, + IMG_UINT32 uiOrigReadLen) +{ + IMG_UINT32 uiNewReadPos; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + + /* + * This API does not use Read lock as 'bReadPending' is sufficient + * to keep Read index safe by preventing a write from updating the + * index and 'bReadPending' itself is safe as it can only be modified + * by readers and there can be only one reader in action at a time. + */ + + /* Update the read offset by the length provided in a circular manner. + * Assuming the update to be atomic hence, avoiding use of locks + */ + uiNewReadPos = (psStream->ui32Read + uiReadLen) % psStream->ui32Size; + + /* Must validate length is on a packet boundary, for + * TLReleaseDataLess calls. + */ + if (uiReadLen != uiOrigReadLen) /* buffer not empty */ + { + PVRSRVTL_PPACKETHDR psHdr = GET_PACKET_HDR(psStream->pbyBuffer+uiNewReadPos); + PVRSRVTL_PACKETTYPE eType = GET_PACKET_TYPE(psHdr); + + if ((psHdr->uiReserved != PVRSRVTL_PACKETHDR_RESERVED) || + (eType == PVRSRVTL_PACKETTYPE_UNDEF) || + (eType >= PVRSRVTL_PACKETTYPE_LAST)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_ALIGNMENT); + } + /* else OK, on a packet boundary */ + } + /* else no check needed */ + + psStream->ui32Read = uiNewReadPos; + + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + psStream->bReadPending = IMG_FALSE; + } + + /* notify reserves that may be pending */ + /* The producer event object is used to signal the StreamReserve if the TL + * Buffer is in blocking mode and is full. + * Previously this event was only signalled if the buffer was created in + * blocking mode. Since the buffer mode can now change dynamically the event + * is signalled every time to avoid any potential race where the signal is + * required, but not produced. + */ + { + PVRSRV_ERROR eError; + eError = OSEventObjectSignal(psStream->hProducerEventObj); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, + "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u", + eError)); + /* We've failed to notify the producer event. This means there may + * be a delay in generating more data to be consumed until the next + * Write() generating action occurs. + */ + } + } + + PVR_DPF((PVR_DBG_VERBOSE, + "TLStreamAdvanceReadPos Read now at: %d", + psStream->ui32Read)); + PVR_DPF_RETURN_OK; +} + +void +TLStreamResetReadPos(PTL_STREAM psStream) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + + if (psStream->bNoWrapPermanent) + { + + /* Update the read offset by the length provided in a circular manner. + * Assuming the update to be atomic hence, avoiding use of locks */ + psStream->ui32Read = 0; + + PVR_DPF((PVR_DBG_VERBOSE, + "TLStreamResetReadPos Read now at: %d", + psStream->ui32Read)); + } + else + { + /* else for other stream types this is a no-op */ + PVR_DPF((PVR_DBG_VERBOSE, + "No need to reset read position of circular tlstream")); + } + + PVR_DPF_RETURN; +} + +void +TLStreamDestroy (PTL_STREAM psStream) +{ + PVR_ASSERT (psStream); + + OSLockDestroy (psStream->hStreamWLock); + OSLockDestroy (psStream->hReadLock); + + OSEventObjectClose(psStream->hProducerEvent); + OSEventObjectDestroy(psStream->hProducerEventObj); + + TLFreeSharedMem(psStream); + OSFreeMem(psStream); +} + +DEVMEM_MEMDESC* +TLStreamGetBufferPointer(PTL_STREAM psStream) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + + PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_client.c b/drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_client.c new file mode 100644 index 000000000000..58ca045ddfbd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_client.c @@ -0,0 +1,143 @@ +/*************************************************************************/ /*! +@File vmm_pvz_client.c +@Title VM manager client para-virtualization +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header provides VMM client para-virtualization APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#include "vmm_impl.h" +#include "vz_vmm_pvz.h" +#include "vmm_pvz_client.h" + + +static inline void +PvzClientLockAcquire(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockAcquire(psPVRSRVData->hPvzConnectionLock); +} + +static inline void +PvzClientLockRelease(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockRelease(psPVRSRVData->hPvzConnectionLock); +} + +/* + * =========================================================== + * The following client para-virtualization (pvz) functions + * are exclusively called by guests to initiate a pvz call + * to the host via hypervisor (guest -> vm manager -> host) + * =========================================================== + */ + +PVRSRV_ERROR +PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT64 ui64Size; + VMM_PVZ_CONNECTION *psVmmPvz; + IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP; + PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + + eError = PhysHeapRegionGetDevPAddr(psFwPhysHeap, 0, &sDevPAddr); + +#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) +{ + /* Host expects PA rather than IPA address, so on the platforms where + * IPA<->PA translation is not done by h/w, we ensure it is done by s/w */ + + IMG_DEV_PHYADDR sDevPAddrTranslated; + + PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr); + sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr; +} +#endif + + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr"); + PVR_LOG_RETURN_IF_FALSE((sDevPAddr.uiAddr != 0), "PhysHeapRegionGetDevPAddr", PVRSRV_ERROR_INVALID_PARAMS); + + eError = PhysHeapRegionGetSize(psFwPhysHeap, 0, &ui64Size); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapRegionGetSize"); + PVR_LOG_RETURN_IF_FALSE((ui64Size != 0), "PhysHeapRegionGetSize", PVRSRV_ERROR_INVALID_PARAMS); + + psVmmPvz = PvzConnectionAcquire(); + PvzClientLockAcquire(); + + eError = psVmmPvz->sClientFuncTab.pfnMapDevPhysHeap(uiFuncID, + 0, + ui64Size, + sDevPAddr.uiAddr); + + PvzClientLockRelease(); + PvzConnectionRelease(psVmmPvz); + + return eError; +} + +PVRSRV_ERROR +PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP; + VMM_PVZ_CONNECTION *psVmmPvz = PvzConnectionAcquire(); + PVR_ASSERT(psVmmPvz); + + PvzClientLockAcquire(); + + PVR_ASSERT(psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap); + + eError = psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap(uiFuncID, 0); + + PvzClientLockRelease(); + PvzConnectionRelease(psVmmPvz); + + return eError; +} + +/****************************************************************************** + End of file (vmm_pvz_client.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_server.c b/drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_server.c new file mode 100644 index 000000000000..bd03e29ff4b0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/vmm_pvz_server.c @@ -0,0 +1,245 @@ +/*************************************************************************/ /*! +@File vmm_pvz_server.c +@Title VM manager server para-virtualization handlers +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header provides VMM server para-virtz handler APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxfwutils.h" + +#include "vz_vm.h" +#include "vmm_impl.h" +#include "vz_vmm_pvz.h" +#include "vmm_pvz_server.h" + +static inline void +PvzServerLockAcquire(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockAcquire(psPVRSRVData->hPvzConnectionLock); +} + +static inline void +PvzServerLockRelease(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockRelease(psPVRSRVData->hPvzConnectionLock); +} + + +/* + * =========================================================== + * The following server para-virtualization (pvz) functions + * are exclusively called by the VM manager (hypervisor) on + * behalf of guests to complete guest pvz calls + * (guest -> vm manager -> host) + * =========================================================== + */ + +PVRSRV_ERROR +PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr) +{ +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* + * Reject hypercall if called on a system configured at build time to + * preallocate the Guest's firmware heaps from static carveout memory. + */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ config: Does not match with Guest PVZ config\n" + " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); + return PVRSRV_ERROR_INVALID_PVZ_CONFIG; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS); + + if (ui32FuncID != PVZ_BRIDGE_MAPDEVICEPHYSHEAP) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d", + __func__, + ui32OSID, + (IMG_UINT32)PVZ_BRIDGE_MAPDEVICEPHYSHEAP, + ui32FuncID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PvzServerLockAcquire(); + +#if defined(SUPPORT_RGX) + if (IsVmOnline(ui32OSID)) + { + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList; + IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr}; + IMG_UINT32 sync; + + eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32OSID, sDevPAddr, ui64Size); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", e0); + + /* Invalidate MMU cache in preparation for a kick from this Guest */ + eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode, &sync, IMG_TRUE); + PVR_LOG_GOTO_IF_ERROR(eError, "MMUCacheInvalidateKick", e0); + + /* Everything is ready for the firmware to start interacting with this OS */ + eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_ONLINE); + } +e0: +#endif /* defined(SUPPORT_RGX) */ + PvzServerLockRelease(); + + return eError; +#endif +} + +PVRSRV_ERROR +PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID) +{ +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* + * Reject hypercall if called on a system configured at built time to + * preallocate the Guest's firmware heaps from static carveout memory. + */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ config: Does not match with Guest PVZ config\n" + " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); + return PVRSRV_ERROR_INVALID_PVZ_CONFIG; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS); + + if (ui32FuncID != PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d", + __func__, + ui32OSID, + (IMG_UINT32)PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP, + ui32FuncID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PvzServerLockAcquire(); + +#if defined(SUPPORT_RGX) + if (IsVmOnline(ui32OSID)) + { + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList; + + /* Order firmware to offload this OS' data and stop accepting commands from it */ + eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_OFFLINE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXFWSetFwOsState", e0); + + /* it is now safe to remove the Guest's memory mappings */ + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } +e0: +#endif + + PvzServerLockRelease(); + + return eError; +#endif +} + +/* + * ============================================================ + * The following server para-virtualization (pvz) functions + * are exclusively called by the VM manager (hypervisor) to + * pass side band information to the host (vm manager -> host) + * ============================================================ + */ + +PVRSRV_ERROR +PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PvzServerLockAcquire(); + + eError = PvzOnVmOnline(ui32OSID, ui32Priority); + + PvzServerLockRelease(); + + return eError; +} + +PVRSRV_ERROR +PvzServerOnVmOffline(IMG_UINT32 ui32OSID) +{ + PVRSRV_ERROR eError; + + PvzServerLockAcquire(); + + eError = PvzOnVmOffline(ui32OSID); + + PvzServerLockRelease(); + + return eError; +} + +PVRSRV_ERROR +PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue) +{ + PVRSRV_ERROR eError; + + PvzServerLockAcquire(); + + eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue); + + PvzServerLockRelease(); + + return eError; +} + +/****************************************************************************** + End of file (vmm_pvz_server.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_pvz.c b/drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_pvz.c new file mode 100644 index 000000000000..a029f3df6cee --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_pvz.c @@ -0,0 +1,192 @@ +/*************************************************************************/ /*! +@File vz_vmm_pvz.c +@Title VM manager para-virtualization APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description VM manager para-virtualization management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "allocmem.h" +#include "pvrsrv.h" +#include "vz_vmm_pvz.h" + +#if (RGX_NUM_OS_SUPPORTED > 1) +static PVRSRV_ERROR +PvzConnectionValidate(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + VMM_PVZ_CONNECTION *psVmmPvz; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* + * Acquire the underlying VM manager PVZ connection & validate it. + */ + psVmmPvz = PvzConnectionAcquire(); + if (psVmmPvz == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s PVZ config: Unable to acquire PVZ connection", + __func__, PVRSRV_VZ_MODE_IS(GUEST) ? "Guest" : "Host")); + eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; + goto e0; + } + + /* Log which PVZ setup type is being used by driver */ +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* + * Static PVZ bootstrap setup + * + * This setup uses carve-out memory, has no hypercall mechanism & does not support + * out-of-order initialisation of host/guest VMs/drivers. The host driver has all + * the information needed to initialize all OSIDs firmware state when it's loaded + * and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ + * initialisation. Having no out-of-order initialisation support, the guest driver + * can only submit a workload to the device after the host driver has completely + * initialized the firmware, the VZ hypervisor/VM setup must guarantee this. + */ + PVR_LOG(("Using static PVZ bootstrap setup")); +#else + /* + * Dynamic PVZ bootstrap setup + * + * This setup uses guest memory, has PVZ hypercall mechanism & supports out-of-order + * initialisation of host/guest VMs/drivers. The host driver initializes only its + * own OSID-0 firmware state when its loaded and each guest driver will use its PVZ + * interface to hypercall to the host driver to both synchronise its initialisation + * so it does not submit any workload to the firmware before the host driver has + * had a chance to initialize the firmware and to also initialize its own OSID-x + * firmware state. + */ + PVR_LOG(("Using dynamic PVZ bootstrap setup")); + + if (!PVRSRV_VZ_MODE_IS(GUEST) && + (psVmmPvz->sServerFuncTab.pfnMapDevPhysHeap == NULL || + psVmmPvz->sServerFuncTab.pfnUnmapDevPhysHeap == NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Host PVZ config: Functions for mapping a Guest's heaps not implemented\n", __func__)); + eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; + } +#endif + + PvzConnectionRelease(psVmmPvz); +e0: + return eError; +} +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + +PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + +#if (RGX_NUM_OS_SUPPORTED == 1) + PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_OS_SUPPORTED > 1")); + PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode", + psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest")); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + goto e0; +#else + + /* Create para-virtualization connection lock */ + eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSLockCreate failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto e0; + } + + /* Create VM manager para-virtualization connection */ + eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection); + if (eError != PVRSRV_OK) + { + OSLockDestroy(psPVRSRVData->hPvzConnectionLock); + psPVRSRVData->hPvzConnectionLock = NULL; + + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to create PVZ connection (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto e0; + } + + /* Ensure pvz connection is configured correctly */ + eError = PvzConnectionValidate(psDevConfig); + PVR_LOG_RETURN_IF_ERROR(eError, "PvzConnectionValidate"); + + psPVRSRVData->abVmOnline[RGXFW_HOST_OS] = IMG_TRUE; +#endif +e0: + return eError; +} + +void PvzConnectionDeInit(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection); + psPVRSRVData->hPvzConnection = NULL; + + OSLockDestroy(psPVRSRVData->hPvzConnectionLock); + psPVRSRVData->hPvzConnectionLock = NULL; +} + +VMM_PVZ_CONNECTION* PvzConnectionAcquire(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL); + return psPVRSRVData->hPvzConnection; +} + +void PvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + /* Nothing to do, sanity check the pointer passed back */ + PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection); +} + +/****************************************************************************** + End of file (vz_vmm_pvz.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_vm.c b/drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_vm.c new file mode 100644 index 000000000000..254b1bba0821 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/common/vz_vmm_vm.c @@ -0,0 +1,225 @@ +/*************************************************************************/ /*! +@File vz_vmm_vm.c +@Title System virtualization VM support APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System virtualization VM support functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "osfunc.h" +#include "pvrsrv.h" +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv.h" +#include "pvrsrv_error.h" +#include "vz_vm.h" +#include "rgxfwutils.h" + +bool IsVmOnline(IMG_UINT32 ui32OSID) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + return (ui32OSID >= RGX_NUM_OS_SUPPORTED) ? (false) : (psPVRSRVData->abVmOnline[ui32OSID]); +} + +PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority) +{ +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: invalid OSID (%d)", + __func__, ui32OSid)); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (psPVRSRVData->abVmOnline[ui32OSid]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSID %d is already enabled.", + __func__, ui32OSid)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + /* For now, limit support to single device setups */ + psDevNode = psPVRSRVData->psDeviceNodeList; + psDevInfo = psDevNode->pvDevice; + + if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT) + { + + /* Firmware not initialized yet, do it here */ + eError = PVRSRVCommonDeviceInitialise(psDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to initialize firmware (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto e0; + } + } + + /* request new priority */ + eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Priority); + if (eError != PVRSRV_OK) + { + goto e0; + } + + psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE; + +e0: +#endif + return eError; +} + +PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid) +{ +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: invalid OSID (%d)", + __func__, ui32OSid)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (!psPVRSRVData->abVmOnline[ui32OSid]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSID %d is already disabled.", + __func__, ui32OSid)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + /* For now, limit support to single device setups */ + psDevNode = psPVRSRVData->psDeviceNodeList; + psDevInfo = psDevNode->pvDevice; + + eError = RGXFWSetFwOsState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE); + if (eError == PVRSRV_OK) + { + psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE; + } + +e0: +#endif + return eError; +} + +PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDeviceNode = psPVRSRVData->psDeviceNodeList; + psDevInfo = psDeviceNode->pvDevice; + + switch (eVMMParamType) + { +#if defined(SUPPORT_RGX) + case VMM_CONF_PRIO_OSID0: + case VMM_CONF_PRIO_OSID1: + case VMM_CONF_PRIO_OSID2: + case VMM_CONF_PRIO_OSID3: + case VMM_CONF_PRIO_OSID4: + case VMM_CONF_PRIO_OSID5: + case VMM_CONF_PRIO_OSID6: + case VMM_CONF_PRIO_OSID7: + { + IMG_UINT32 ui32OSid = eVMMParamType; + IMG_UINT32 ui32Prio = ui32ParamValue; + + if (ui32OSid < RGX_NUM_OS_SUPPORTED) + { + eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + break; + } + case VMM_CONF_ISOL_THRES: + { + IMG_UINT32 ui32Threshold = ui32ParamValue; + eError = RGXFWSetOSIsolationThreshold(psDevInfo, ui32Threshold); + break; + } + case VMM_CONF_HCS_DEADLINE: + { + IMG_UINT32 ui32HCSDeadline = ui32ParamValue; + eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline); + break; + } +#else + PVR_UNREFERENCED_PARAMETER(ui32ParamValue); +#endif + default: + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + } + + return eError; +} + +/****************************************************************************** + End of file (vz_vmm_vm.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxbreakpoint.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxbreakpoint.h new file mode 100644 index 000000000000..fc6656887009 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxbreakpoint.h @@ -0,0 +1,141 @@ +/*************************************************************************/ /*! +@File +@Title RGX breakpoint functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX breakpoint functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXBREAKPOINT_H__) +#define __RGXBREAKPOINT_H__ + +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgx_fwif_km.h" + +/*! +******************************************************************************* + @Function PVRSRVRGXSetBreakpointKM + + @Description + Server-side implementation of RGXSetBreakpoint + + @Input psDeviceNode - RGX Device node + @Input eDataMaster - Data Master to schedule command for + @Input hMemCtxPrivData - memory context private data + @Input ui32BPAddr - Address of breakpoint + @Input ui32HandlerAddr - Address of breakpoint handler + @Input ui32BPCtl - Breakpoint controls + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + RGXFWIF_DM eFWDataMaster, + IMG_UINT32 ui32BPAddr, + IMG_UINT32 ui32HandlerAddr, + IMG_UINT32 ui32DataMaster); + +/*! +******************************************************************************* + @Function PVRSRVRGXClearBreakpointKM + + @Description + Server-side implementation of RGXClearBreakpoint + + @Input psDeviceNode - RGX Device node + @Input hMemCtxPrivData - memory context private data + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData); + +/*! +******************************************************************************* + @Function PVRSRVRGXEnableBreakpointKM + + @Description + Server-side implementation of RGXEnableBreakpoint + + @Input psDeviceNode - RGX Device node + @Input hMemCtxPrivData - memory context private data + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData); + +/*! +******************************************************************************* + @Function PVRSRVRGXDisableBreakpointKM + + @Description + Server-side implementation of RGXDisableBreakpoint + + @Input psDeviceNode - RGX Device node + @Input hMemCtxPrivData - memory context private data + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData); + +/*! +******************************************************************************* + @Function PVRSRVRGXOverallocateBPRegistersKM + + @Description + Server-side implementation of RGXOverallocateBPRegisters + + @Input psDeviceNode - RGX Device node + @Input ui32TempRegs - Number of temporary registers to overallocate + @Input ui32SharedRegs - Number of shared registers to overallocate + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32TempRegs, + IMG_UINT32 ui32SharedRegs); +#endif /* __RGXBREAKPOINT_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxbvnc.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxbvnc.h new file mode 100644 index 000000000000..bc5bac6a040e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxbvnc.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File +@Title BVNC handling specific header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the BVNC related work + (see hwdefs/km/rgx_bvnc_table_km.h and + hwdefs/km/rgx_bvnc_defs_km.h +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXBVNC_H__) +#define __RGXBVNC_H__ + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "rgxdevice.h" + +/*************************************************************************/ /*! +@brief This function detects the Rogue variant and configures the + essential config info associated with such a device. + The config info includes features, errata, etc +@param psDeviceNode - Device Node pointer +@return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*************************************************************************/ /*! +@brief This function checks if a particular feature is available on + the given rgx device +@param psDeviceNode - Device Node pointer +@param ui64FeatureMask - feature to be checked +@return true if feature is supported, false otherwise +*/ /**************************************************************************/ +IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask); + +/*************************************************************************/ /*! +@brief This function returns the value of a feature on the given + rgx device +@param psDeviceNode - Device Node pointer +@param ui64FeatureMask - feature for which to return the value +@return the value for the specified feature +*/ /**************************************************************************/ +IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex); + +/*************************************************************************/ /*! +@brief This function validates that the BVNC values in CORE_ID regs are + consistent and correct. +@param psDeviceNode - Device Node pointer +@param GivenBVNC - BVNC to be verified against as supplied by caller +@param CoreIdMask - mask of components to pull from CORE_ID register +@return success or fail +*/ /**************************************************************************/ +PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); + +#endif /* __RGXBVNC_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.c b/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.c new file mode 100644 index 000000000000..13832587671e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.c @@ -0,0 +1,267 @@ +/*************************************************************************/ /*! +@File +@Title Debugging and miscellaneous functions server implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel services functions for debugging and other + miscellaneous functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "pvr_debug.h" +#include "rgxfwdbg.h" +#include "rgxfwutils.h" +#include "rgxta3d.h" +#include "pdump_km.h" +#include "mmu_common.h" +#include "devicemem_server.h" +#include "osfunc.h" + +PVRSRV_ERROR +PVRSRVRGXFWDebugQueryFWLogKM( + const CONNECTION_DATA *psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32RGXFWLogType) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + if (!psDeviceNode || !pui32RGXFWLogType) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBufCtl) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetFWLogKM( + const CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RGXFWLogType) +{ + RGXFWIF_KCCB_CMD sLogTypeUpdateCmd; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; + IMG_UINT32 ui32kCCBCommandSlot; + IMG_BOOL bWaitForFwUpdate = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* check log type is valid */ + if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSLockAcquire(psDevInfo->hRGXFWIfBufInitLock); + + /* set the new log type and ensure the new log type is written to memory + * before requesting the FW to read it + */ + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32RGXFWLogType; + OSMemoryBarrier(); + + /* Allocate firmware trace buffer resource(s) if not already done */ + if (RGXTraceBufferIsInitRequired(psDevInfo)) + { + eError = RGXTraceBufferInitOnDemandResources(psDevInfo, RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS); + } +#if defined(SUPPORT_TBI_INTERFACE) + /* Check if LogType is TBI then allocate resource on demand and copy + * SFs to it + */ + else if (RGXTBIBufferIsInitRequired(psDevInfo)) + { + eError = RGXTBIBufferInitOnDemandResources(psDevInfo); + } + + /* TBI buffer address will be 0 if not initialised */ + sLogTypeUpdateCmd.uCmdData.sTBIBuffer = psDevInfo->sRGXFWIfTBIBuffer; +#else + sLogTypeUpdateCmd.uCmdData.sTBIBuffer.ui32Addr = 0; +#endif + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate resource on-demand. Reverting to old value", + __func__)); + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32OldRGXFWLogTpe; + OSMemoryBarrier(); + + OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); + + return eError; + } + + OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); + + eError = PVRSRVPowerLock((const PPVRSRV_DEVICE_NODE) psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire power lock (%u)", + __func__, + eError)); + return eError; + } + + eError = PVRSRVGetDevicePowerState((const PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + /* Ask the FW to update its cached version of logType value */ + sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sLogTypeUpdateCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); + bWaitForFwUpdate = IMG_TRUE; + } + +unlock: + PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode); + if (bWaitForFwUpdate) + { + /* Wait for the LogType value to be updated in FW */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + } + return eError; +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetHCSDeadlineKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HCSDeadlineMS) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSidPriorityKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSidPriority) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSNewOnlineStateKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSNewState) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_OS_STATE_CHANGE eState; + PVR_UNREFERENCED_PARAMETER(psConnection); + + eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE); + return RGXFWSetFwOsState(psDevInfo, ui32OSid, eState); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugPHRConfigureKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PHRMode) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWConfigPHR(psDevInfo, + ui32PHRMode); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugDumpFreelistPageListKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + DLLIST_NODE *psNode, *psNext; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (dllist_is_empty(&psDevInfo->sFreeListHead)) + { + return PVRSRV_OK; + } + + PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------")); + + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + RGXDumpFreeListPageList(psFreeList); + } + OSLockRelease(psDevInfo->hLockFreeList); + + PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------")); + + return PVRSRV_OK; + +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.h new file mode 100644 index 000000000000..3cb47775e5e7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwdbg.h @@ -0,0 +1,107 @@ +/*************************************************************************/ /*! +@File +@Title Debugging and miscellaneous functions server interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel services functions for debugging and other + miscellaneous functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXFWDBG_H) +#define RGXFWDBG_H + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "device.h" +#include "pmr.h" + +#include "connection_server.h" + + +PVRSRV_ERROR +PVRSRVRGXFWDebugInitFWImageKM( + PMR *psFWImgDestPMR, + PMR *psFWImgSrcPMR, + IMG_UINT64 ui64FWImgLen, + PMR *psFWImgSigPMR, + IMG_UINT64 ui64FWSigLen); + +PVRSRV_ERROR +PVRSRVRGXFWDebugQueryFWLogKM( + const CONNECTION_DATA *psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32RGXFWLogType); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetFWLogKM( + const CONNECTION_DATA *psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RGXFWLogType); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetHCSDeadlineKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HCSDeadlineMS); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSidPriorityKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSidPriority); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSNewOnlineStateKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSNewState); + +PVRSRV_ERROR +PVRSRVRGXFWDebugPHRConfigureKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PHRMode); + +PVRSRV_ERROR +PVRSRVRGXFWDebugDumpFreelistPageListKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwtrace_strings.c b/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwtrace_strings.c new file mode 100644 index 000000000000..d95050871994 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxfwtrace_strings.c @@ -0,0 +1,56 @@ +/*************************************************************************/ /*! +@File rgxfwtrace_strings.c +@Title RGX Firmware trace strings +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgx_fwif_sf.h" +#include "fwtrace_string.h" + +/* The tuple pairs that will be generated using XMacros will be stored here. + * This macro definition must match the definition of SFids in rgx_fwif_sf.h + */ +const RGXKM_STID_FMT SFs[]= { +#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e), d }, + RGXFW_LOG_SFIDLIST +#undef X +}; + +const IMG_UINT32 g_ui32SFsCount = ARRAY_SIZE(SFs); diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxkicksync.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxkicksync.h new file mode 100644 index 000000000000..b968520bbe3c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxkicksync.h @@ -0,0 +1,129 @@ +/*************************************************************************/ /*! +@File rgxkicksync.h +@Title Server side of the sync only kick API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXKICKSYNC_H) +#define RGXKICKSYNC_H + +#include "pvrsrv_error.h" +#include "connection_server.h" +#include "sync_server.h" +#include "rgxdevice.h" + + +typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT; + +/**************************************************************************/ /*! +@Function DumpKickSyncCtxtsInfo +@Description Function that dumps debug info of kick sync ctxs on this device +@Return none +*/ /**************************************************************************/ +void +DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/**************************************************************************/ /*! +@Function CheckForStalledClientKickSyncCtxt +@Description Function that checks if a kick sync client is stalled +@Return RGX_KICK_TYPE_DM_GP on stalled context. Otherwise, 0 +*/ /**************************************************************************/ +IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +/**************************************************************************/ /*! +@Function PVRSRVRGXCreateKickSyncContextKM +@Description Server-side implementation of RGXCreateKicksyncContext +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext); + + + +/**************************************************************************/ /*! +@Function PVRSRVRGXDestroyKickSyncContextKM +@Description Server-side implementation of RGXDestroyKicksyncContext +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext); + +/**************************************************************************/ /*! +@Function PVRSRVRGXSetKickSyncContextPropertyKM +@Description Server-side implementation of RGXSetKickSyncContextProperty +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code + */ /**************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/**************************************************************************/ /*! +@Function PVRSRVRGXKickSyncKM +@Description Kicks a sync only command +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateDevVarOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + + IMG_UINT32 ui32ExtJobRef); + +#endif /* RGXKICKSYNC_H */ + +/**************************************************************************//** + End of file (rgxkicksync.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxmulticore.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxmulticore.h new file mode 100644 index 000000000000..b0030f28743c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxmulticore.h @@ -0,0 +1,66 @@ +/*************************************************************************/ /*! +@File rgxmulticore.h +@Title Functions related to multicore devices +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description General purpose memory shared between kernel driver and user + mode. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXMULTICORE_H +#define RGXMULTICORE_H + +#include "pvrsrv_error.h" +#include "pvrsrv.h" + +/** + * @Function RGXGetMultiCoreInfo + * @Description Returns hardware configuration info about multi-core devices. + * @Input ui32CapsSize is number of entries in Caps array. + * @Output pui32NumCores is filled in with the number of cores in a + * multi-core device. + * @Output pui32Caps is filled in with per core caps info. + * @Return PVRSRV_OK on success or PVRSRV_ERROR_NOT_SUPPORTED if not a + * multi-core device. + */ +PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32* pui32NumCores, + IMG_UINT64* pui32Caps); + +#endif /* RGXMULTICORE_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.c b/drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.c new file mode 100644 index 000000000000..52f17b42c70c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.c @@ -0,0 +1,279 @@ +/*************************************************************************/ /*! +@File rgxpdvfs.c +@Title RGX Proactive DVFS Functionality +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel mode Proactive DVFS Functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxpdvfs.h" +#include "rgxfwutils.h" +#include "rgx_options.h" +#include "rgxtimecorr.h" + +#define USEC_TO_MSEC 1000 + +static inline IMG_BOOL _PDVFSEnabled(void) +{ + PVRSRV_DATA *psSRVData = PVRSRVGetPVRSRVData(); + + if (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions & + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions & + OPTIONS_PDVFS_MASK) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint) +{ + RGXFWIF_KCCB_CMD sGPCCBCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CmdKCCBSlot; + + if (!_PDVFSEnabled()) + { + /* No error message to avoid excessive messages */ + return PVRSRV_OK; + } + + /* send feedback */ + sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ; + sGPCCBCmd.uCmdData.sPDVFSMaxFreqData.ui32MaxOPPPoint = ui32MaxOPPPoint; + + /* Submit command to the firmware. */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sGPCCBCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32CmdKCCBSlot); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MinOPPPoint) +{ + RGXFWIF_KCCB_CMD sGPCCBCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CmdKCCBSlot; + + if (!_PDVFSEnabled()) + { + /* No error message to avoid excessive messages */ + return PVRSRV_OK; + } + + /* send feedback */ + sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ; + sGPCCBCmd.uCmdData.sPDVFSMinFreqData.ui32MinOPPPoint = ui32MinOPPPoint; + + /* Submit command to the firmware. */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sGPCCBCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32CmdKCCBSlot); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return PVRSRV_OK; +} + + +#if (PDVFS_COM == PDVFS_COM_HOST) +/*************************************************************************/ /*! +@Function PDVFSProcessCoreClkChangeRequest +@Description Processes a core clock rate change request. +@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO. +@Input ui32CoreClockRate New core clock rate. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR PDVFSProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &psDevConfig->sDVFS.sDVFSDeviceCfg; + RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo; + IMG_UINT32 ui32CoreClockRateCurrent = psRGXTimingInfo->ui32CoreClockSpeed; + const IMG_OPP *psOpp = NULL; + IMG_UINT32 ui32Index; + PVRSRV_ERROR eError; + + if (!_PDVFSEnabled()) + { + /* No error message to avoid excessive messages */ + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "Core clock rate = %u", ui32CoreClockRate)); + + /* Find the matching OPP (Exact). */ + for (ui32Index = 0; ui32Index < psDVFSDeviceCfg->ui32OPPTableSize; ui32Index++) + { + if (ui32CoreClockRate == psDVFSDeviceCfg->pasOPPTable[ui32Index].ui32Freq) + { + psOpp = &psDVFSDeviceCfg->pasOPPTable[ui32Index]; + break; + } + } + + if (! psOpp) + { + PVR_DPF((PVR_DBG_ERROR, "Frequency not present in OPP table - %u", ui32CoreClockRate)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = PVRSRVDevicePreClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange failed")); + return eError; + } + + psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; + + /* Increasing frequency, change voltage first */ + if (ui32CoreClockRate > ui32CoreClockRateCurrent) + { + psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt); + } + + psDVFSDeviceCfg->pfnSetFrequency(ui32CoreClockRate); + + /* Decreasing frequency, change frequency first */ + if (ui32CoreClockRate < ui32CoreClockRateCurrent) + { + psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt); + } + + PVRSRVDevicePostClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL); + + return PVRSRV_OK; +} +#else +/*************************************************************************/ /*! +@Function PDVFSProcessCoreClkChangeNotification +@Description Processes a core clock rate change notification. +@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO. +@Input ui32CoreClockRate New core clock rate. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR PDVFSProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig; + RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_ERROR eError; + + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", + __func__, PVRSRVGetErrorString(eError))); + return eError; + } + + eError = PVRSRVGetDevicePowerState(psDevInfo->psDeviceNode, &ePowerState); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire power state (%s)", + __func__, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + return eError; + } + + if ((ePowerState != PVRSRV_DEV_POWER_STATE_OFF) + && (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF)) + { + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS); + psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; + RGXTimeCorrBegin(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS); + } + else + { + psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; + } + + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + + return PVRSRV_OK; +} +#endif + + +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) +/*************************************************************************/ /*! +@Function RGXPDVFSCheckCoreClkRateChange +@Description Checks if core clock rate has changed since the last snap-shot. +@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO. +@Return None. +*/ /**************************************************************************/ +void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32CoreClkRate = *psDevInfo->pui32RGXFWIFCoreClkRate; + + if (!_PDVFSEnabled()) + { + /* No error message to avoid excessive messages */ + return; + } + + if (ui32CoreClkRate != 0 && psDevInfo->ui32CoreClkRateSnapshot != ui32CoreClkRate) + { + psDevInfo->ui32CoreClkRateSnapshot = ui32CoreClkRate; + PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, ui32CoreClkRate); + } +} +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.h new file mode 100644 index 000000000000..13a94b5a031b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxpdvfs.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File rgxpdvfs.h +@Title RGX Proactive DVFS Functionality +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the kernel mode Proactive DVFS Functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXPDVFS_H +#define RGXPDVFS_H + +#include "img_types.h" +#include "rgxdevice.h" + + +PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint); + +PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MinOPPPoint); + +#if (PDVFS_COM == PDVFS_COM_HOST) +PVRSRV_ERROR PDVFSProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate); +#define PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk) PDVFSProcessCoreClkChangeRequest(devinfo, clk) +#else +PVRSRV_ERROR PDVFSProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate); +#define PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk) PDVFSProcessCoreClkChangeNotification(devinfo, clk) +#endif + +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) +void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +#endif /* RGXPDVFS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxregconfig.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxregconfig.h new file mode 100644 index 000000000000..5edb2b96e765 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxregconfig.h @@ -0,0 +1,130 @@ +/*************************************************************************/ /*! +@File +@Title RGX register configuration functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX register configuration functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXREGCONFIG_H__) +#define __RGXREGCONFIG_H__ + +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgx_fwif_km.h" + +/*! +******************************************************************************* + @Function PVRSRVRGXSetRegConfigTypeKM + + @Description + Server-side implementation of RGXSetRegConfig + + @Input psDeviceNode - RGX Device node + @Input ui8RegPowerIsland - Reg configuration + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT8 ui8RegPowerIsland); +/*! +******************************************************************************* + @Function PVRSRVRGXSetRegConfigKM + + @Description + Server-side implementation of RGXSetRegConfig + + @Input psDeviceNode - RGX Device node + @Input ui64RegAddr - Register address + @Input ui64RegValue - Reg value + @Input ui64RegMask - Reg mask + + @Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui64RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask); + +/*! +******************************************************************************* + @Function PVRSRVRGXClearRegConfigKM + + @Description + Server-side implementation of RGXClearRegConfig + + @Input psDeviceNode - RGX Device node + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +******************************************************************************* + @Function PVRSRVRGXEnableRegConfigKM + + @Description + Server-side implementation of RGXEnableRegConfig + + @Input psDeviceNode - RGX Device node + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +******************************************************************************* + @Function PVRSRVRGXDisableRegConfigKM + + @Description + Server-side implementation of RGXDisableRegConfig + + @Input psDeviceNode - RGX Device node + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* __RGXREGCONFIG_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.c b/drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.c new file mode 100644 index 000000000000..2f05dcac09f6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.c @@ -0,0 +1,304 @@ +/*************************************************************************/ /*! +@File rgxshader.c +@Title TQ Shader Load +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Shader code and info are shared for all context on the device. + If allocation doesn't already exist, read shader data from file + and allocate PMR memory. PMR memory is not deallocated until + device deinit. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxshader.h" +#include "osfunc_common.h" +#include "rgxdevice.h" +#include "pdump_km.h" +#include "physmem.h" +#include "ri_server.h" +#include "pvr_ricommon.h" + +static void +RGXShaderReadHeader(OS_FW_IMAGE *psShaderFW, RGX_SHADER_HEADER *psHeader) +{ + const void * pvData; + + pvData = OSFirmwareData(psShaderFW); + + OSDeviceMemCopy(psHeader, pvData, sizeof(RGX_SHADER_HEADER)); +} + +static size_t +RGXShaderCLIMemSize(OS_FW_IMAGE *psShaderFW) +{ + RGX_SHADER_HEADER sHeader; + + RGXShaderReadHeader(psShaderFW, &sHeader); + + return sHeader.ui32SizeClientMem; +} + +static size_t +RGXShaderUSCMemSize(OS_FW_IMAGE *psShaderFW) +{ + RGX_SHADER_HEADER sHeader; + + RGXShaderReadHeader(psShaderFW, &sHeader); + + return sHeader.ui32SizeFragment; +} + +static void * +RGXShaderCLIMem(OS_FW_IMAGE *psShaderFW) +{ + return (void*)OSFirmwareData(psShaderFW); +} + +static void * +RGXShaderUSCMem(OS_FW_IMAGE *psShaderFW) +{ + IMG_PBYTE pui8Data; + + pui8Data = (IMG_PBYTE)OSFirmwareData(psShaderFW); + + pui8Data += RGXShaderCLIMemSize(psShaderFW); + + return (void*) pui8Data; +} + +#define RGX_SHADER_FILENAME_MAX_SIZE ((sizeof(RGX_SH_FILENAME)+ \ + RGX_BVNC_STR_SIZE_MAX)) + +static void +_GetShaderFileName(PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_CHAR * pszShaderFilenameStr, + IMG_CHAR * pszShaderpFilenameStr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSSNPrintf(pszShaderFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STR_FMTSPEC, + RGX_SH_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); + + OSSNPrintf(pszShaderpFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STRP_FMTSPEC, + RGX_SH_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); +} + +PVRSRV_ERROR +PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + OS_FW_IMAGE *psShaderFW; + RGX_SHADER_HEADER sHeader; + IMG_UINT32 ui32MappingTable = 0; + IMG_UINT32 ui32NumPages; + IMG_CHAR aszShaderFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; + IMG_CHAR aszShaderpFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; + size_t uiNumBytes; + PVRSRV_ERROR eError; + + _GetShaderFileName(psDeviceNode, aszShaderFilenameStr, aszShaderpFilenameStr); + + psShaderFW = OSLoadFirmware(psDeviceNode, aszShaderFilenameStr, NULL); + + if (psShaderFW == NULL) + { + psShaderFW = OSLoadFirmware(psDeviceNode, aszShaderpFilenameStr, NULL); + if (psShaderFW == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load shader binary file %s", + __func__, + aszShaderpFilenameStr)); + eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE; + goto failed_init; + } + } + + RGXShaderReadHeader(psShaderFW, &sHeader); + + ui32NumPages = (sHeader.ui32SizeFragment / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; + + PDUMPCOMMENT("Allocate TDM USC PMR Block (Pages %08X)", ui32NumPages); + + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + 1, + 1, + &ui32MappingTable, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE + | PVRSRV_MEMALLOCFLAG_GPU_READABLE + | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT, + sizeof("tquscpmr"), + "tquscpmr", + PVR_SYS_ALLOC_PID, + (PMR**)&psDevInfo->hTQUSCSharedMem, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_firmware; + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQUSCSharedMem, PVR_SYS_ALLOC_PID); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_uscpmr; + } +#endif + + eError = PMR_WriteBytes(psDevInfo->hTQUSCSharedMem, 0, RGXShaderUSCMem(psShaderFW), RGXShaderUSCMemSize(psShaderFW), &uiNumBytes); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_uscpmr; + } + + ui32NumPages = (sHeader.ui32SizeClientMem / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; + + PDUMPCOMMENT("Allocate TDM Client PMR Block (Pages %08X)", ui32NumPages); + + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + 1, + 1, + &ui32MappingTable, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE + | PVRSRV_MEMALLOCFLAG_CPU_READABLE + | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT, + sizeof("tqclipmr"), + "tqclipmr", + PVR_SYS_ALLOC_PID, + (PMR**)&psDevInfo->hTQCLISharedMem, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_uscpmr; + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQCLISharedMem, PVR_SYS_ALLOC_PID); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_clipmr; + } +#endif + + eError = PMR_WriteBytes(psDevInfo->hTQCLISharedMem, 0, RGXShaderCLIMem(psShaderFW), RGXShaderCLIMemSize(psShaderFW), &uiNumBytes); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_clipmr; + } + + OSUnloadFirmware(psShaderFW); + + PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); + PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); + + return PVRSRV_OK; + +failed_clipmr: + PMRUnrefPMR(psDevInfo->hTQCLISharedMem); +failed_uscpmr: + PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); +failed_firmware: + OSUnloadFirmware(psShaderFW); +failed_init: + return eError; +} + +void +PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); + PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); + + *ppsUSCPMRMem = psDevInfo->hTQUSCSharedMem; + *ppsCLIPMRMem = psDevInfo->hTQCLISharedMem; +} + +PVRSRV_ERROR +PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + eError = PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = PMRUnrefPMR(psDevInfo->hTQCLISharedMem); + if (eError != PVRSRV_OK) + { + return eError; + } + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.h new file mode 100644 index 000000000000..20db8ddd8499 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxshader.h @@ -0,0 +1,85 @@ +/*************************************************************************/ /*! +@File rgxshader.h +@Title TQ Shader Load +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Shader code and info are shared for all context on the device. + If allocation doesn't already exist, read shader data from file + and allocate PMR memory. PMR memory is not deallocated until + device deinit. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXSHADER_H) +#define RGXSHADER_H + +#include "fwload.h" +#include "rgxtransfer_shader.h" +#include "connection_server.h" + +/*************************************************************************/ /*! +@Function PVRSRVTQLoadShaders +@Description If PMR is not allocated, reads shader binary data from file + and allocates new PMR memory. +@Input psDeviceNode Device node +@Return PVRSRV_ERROR Returns PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*************************************************************************/ /*! +@Function PVRSRVTQAcquireShaders +@Description Get handle to ready allocated shader PMR memory +@Input psDeviceNode Device node +@Output ppsCLIPMRMem Shader data used by CPU client side. +@Output ppsUSCPMRMem Shader usc code used by GPU. +*/ /**************************************************************************/ +void +PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE *psDeviceNode, + PMR **ppsCLIPMRMem, + PMR **ppsUSCPMRMem); + +/*************************************************************************/ /*! +@Function PVRSRVTQUnLoadShaders +@Description Unref PMR memory. +@Input psDeviceNode Device node +@Return PVRSRV_ERROR Returns PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* RGXSHADER_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxsignals.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxsignals.h new file mode 100644 index 000000000000..509d960c5622 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxsignals.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@File rgxsignals.h +@Title RGX Signals routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Signals routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(_RGX_SIGNALS_H) +#define _RGX_SIGNALS_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "connection_server.h" +#include "device.h" + +/*! +******************************************************************************* + + @Function PVRSRVRGXNotifySignalUpdateKM + + @Description Server-side implementation of RGXNotifySignalUpdate + + @Input hMemCtxPrivData - memory context private data + @Input sDevSignalAddress - device virtual address of the updated signal + + @Return PVRSRV_ERROR + +******************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_DEV_VIRTADDR sDevSignalAddress); + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxstartstop.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxstartstop.h new file mode 100644 index 000000000000..178afe2849a0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxstartstop.h @@ -0,0 +1,84 @@ +/*************************************************************************/ /*! +@File +@Title RGX start/stop header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX start/stop functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXSTARTSTOP_H) +#define RGXSTARTSTOP_H + +/* The routines declared here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when DRM security is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. + */ +#include "rgxlayer.h" + +/*! +******************************************************************************* + + @Function RGXStart + + @Description Perform GPU reset and initialisation + + @Input hPrivate : Implementation specific data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXStart(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXStop + + @Description Stop Rogue in preparation for power down + + @Input hPrivate : Implementation specific data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXStop(const void *hPrivate); + +#endif /* RGXSTARTSTOP_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxsyncutils.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxsyncutils.h new file mode 100644 index 000000000000..0de72b6d45c4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxsyncutils.h @@ -0,0 +1,86 @@ +/*************************************************************************/ /*! +@File rgxsyncutils.h +@Title RGX Sync Utilities +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Sync helper functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXSYNCUTILS_H +#define RGXSYNCUTILS_H + +#include "rgxdevice.h" +#include "sync_server.h" +#include "rgxdebug.h" +#include "rgx_fwif_km.h" + +typedef struct _RGX_SYNC_DATA_ +{ + PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress; + IMG_UINT32 *paui32ClientUpdateValue; + IMG_UINT32 ui32ClientUpdateValueCount; + IMG_UINT32 ui32ClientUpdateCount; + + PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress; + IMG_UINT32 *paui32ClientPRUpdateValue; + IMG_UINT32 ui32ClientPRUpdateValueCount; + IMG_UINT32 ui32ClientPRUpdateCount; +} RGX_SYNC_DATA; + +//#define TA3D_CHECKPOINT_DEBUG + +#if 0 //defined(TA3D_CHECKPOINT_DEBUG) +void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues, + IMG_UINT32 ui32Count); + +void _DebugSyncCheckpoints(PSYNC_CHECKPOINT *apsSyncCheckpoints, + IMG_UINT32 ui32Count); +#endif + +PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, + SYNC_ADDR_LIST *psSyncList, + SYNC_ADDR_LIST *psPRSyncList, + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, + RGX_SYNC_DATA *psSyncData, + IMG_BOOL bKick3D); + +#endif /* RGXSYNCUTILS_H */ + +/****************************************************************************** + End of file (rgxsyncutils.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.c b/drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.c new file mode 100644 index 000000000000..875411d63b07 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.c @@ -0,0 +1,645 @@ +/*************************************************************************/ /*! +@File +@Title Device specific time correlation and calibration routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific time correlation and calibration routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxtimecorr.h" +#include "rgxfwutils.h" +#include "htbserver.h" +#include "pvrsrv_apphint.h" + +/****************************************************************************** + * + * - A calibration period is started on power-on and after a DVFS transition, + * and it's closed before a power-off and before a DVFS transition + * (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs..., + * where each arrow is a calibration period). + * + * - The timers on the Host and on the FW are correlated at the beginning of + * each period together with the current GPU frequency. + * + * - Correlation and calibration are also done at regular intervals using + * a best effort approach. + * + *****************************************************************************/ + +static IMG_UINT32 g_ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; + +/* + AppHint interfaces +*/ + +static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + static __maybe_unused const char* const apszClocks[] = { + "mono", "mono_raw", "sched" + }; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (ui32Value >= RGXTIMECORR_CLOCK_LAST) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode, + RGXTIMECORR_EVENT_CLOCK_CHANGE); + + PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"", + apszClocks[g_ui32ClockSource], + apszClocks[ui32Value])); + + g_ui32ClockSource = ui32Value; + + RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode, + RGXTIMECORR_EVENT_CLOCK_CHANGE); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + *pui32Value = g_ui32ClockSource; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + return PVRSRV_OK; +} + +void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock, + _SetClock, psDeviceNode, NULL); +} + +/* + End of AppHint interface +*/ + +IMG_UINT64 RGXTimeCorrGetClockns64(void) +{ + IMG_UINT64 ui64Clock; + + switch (g_ui32ClockSource) { + case RGXTIMECORR_CLOCK_MONO: + return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock); + case RGXTIMECORR_CLOCK_MONO_RAW: + return OSClockMonotonicRawns64(); + case RGXTIMECORR_CLOCK_SCHED: + return OSClockns64(); + default: + PVR_ASSERT(IMG_FALSE); + return 0; + } +} + +IMG_UINT64 RGXTimeCorrGetClockus64(void) +{ + IMG_UINT32 rem; + return OSDivide64r64(RGXTimeCorrGetClockns64(), 1000, &rem); +} + +void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_TIME_CORR *psTimeCorrs, + IMG_UINT32 ui32NumOut) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount; + + while (ui32NumOut--) + { + *(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)]; + ui32CurrentIndex--; + } +} + +static __maybe_unused const IMG_CHAR* _EventToString(RGXTIMECORR_EVENT eEvent) +{ + switch (eEvent) + { + case RGXTIMECORR_EVENT_POWER: + return "power"; + case RGXTIMECORR_EVENT_DVFS: + return "dvfs"; + case RGXTIMECORR_EVENT_PERIODIC: + return "periodic"; + case RGXTIMECORR_EVENT_CLOCK_CHANGE: + return "clock source"; + default: + return "n/a"; + } +} + +static inline IMG_UINT32 _RGXGetSystemLayerGPUClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + + return psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; +} + +static inline IMG_UINT32 _RGXGetEstimatedGPUClockSpeed(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + GPU_FREQ_TRACKING_DATA *psTrackingData; + + psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; + + return psTrackingData->ui32EstCoreClockSpeed; +} + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) +static inline void _DumpTimerCorrelationHistory(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + IMG_UINT32 i = psGpuDVFSTable->ui32HistoryIndex; + + PVR_DPF((PVR_DBG_ERROR, "Dumping history of timer correlation data (latest first):")); + + do + { + PVR_DPF((PVR_DBG_ERROR, + " Begin times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " + "End times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " + "Core clk %u, Estimated clk %u", + psGpuDVFSTable->asTrackingHistory[i].ui64BeginOSTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui64BeginCRTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui64EndOSTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui64EndCRTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui32CoreClockSpeed, + psGpuDVFSTable->asTrackingHistory[i].ui32EstCoreClockSpeed)); + + i = (i - 1) % RGX_GPU_FREQ_TRACKING_SIZE; + + } while (i != psGpuDVFSTable->ui32HistoryIndex); +} +#endif + +static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1; + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)]; + + /* + * The following reads must be done as close together as possible, because + * they represent the same current time sampled from different clock sources. + */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "_RGXMakeTimeCorrData: System Monotonic Clock not available.")); + PVR_ASSERT(0); + } +#endif + psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); + psTimeCorr->ui64OSTimeStamp = RGXTimeCorrGetClockns64(); + psTimeCorr->ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo); + psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); + + if (psTimeCorr->ui64CRDeltaToOSDeltaKNs == 0) + { +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + _DumpTimerCorrelationHistory(psDevInfo); +#endif + + /* Revert to original clock speed (error already printed) */ + psTimeCorr->ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); + psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); + } + + /* Make sure the values are written to memory before updating the index of the current entry */ + OSWriteMemoryBarrier(); + + /* Update the index of the current entry in the timer correlation array */ + psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount; + + PVR_DPF((PVR_DBG_MESSAGE, + "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, " + "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)", + _EventToString(eEvent), + psTimeCorr->ui64OSTimeStamp, + psTimeCorr->ui64CRTimeStamp, + RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode))); + + /* + * Don't log timing data to the HTB log after a power(-on) event. + * Otherwise this will be logged before the HTB partition marker, breaking + * the log sync grammar. This data will be automatically repeated when the + * partition marker is written. + */ + HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER, + psTimeCorr->ui64OSTimeStamp, + psTimeCorr->ui64CRTimeStamp, + psTimeCorr->ui32CoreClockSpeed); +} + +static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_GPU_DVFS_TABLE *psGpuDVFSTable) +{ +#if !defined(NO_HARDWARE) && !defined(VIRTUAL_PLATFORM) && defined(DEBUG) +#define SCALING_FACTOR (10) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index]; + IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp; + IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff; + IMG_INT64 i64Diff; + IMG_UINT32 ui32Ratio, ui32Remainder; + + /* + * The following reads must be done as close together as possible, because + * they represent the same current time sampled from different clock sources. + */ + ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); + ui64OSTimeStamp = RGXTimeCorrGetClockns64(); + + if ((ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) < (1 << SCALING_FACTOR)) + { + /* + * Less than ~1us has passed since the timer correlation data was generated. + * A time frame this short is probably not enough to get an estimate + * of how good the timer correlation data was. + * Skip calculations for the above reason and to avoid a division by 0 below. + */ + return; + } + + + /* Calculate an estimated timestamp based on the latest timer correlation data */ + ui64CRTimeDiff = ui64CRTimeStamp - psTimeCorr->ui64CRTimeStamp; + ui64OSTimeDiff = RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff, + psTimeCorr->ui64CRDeltaToOSDeltaKNs); + ui64EstimatedTime = psTimeCorr->ui64OSTimeStamp + ui64OSTimeDiff; + + /* Get difference between estimated timestamp and current timestamp, in ns */ + i64Diff = ui64EstimatedTime - ui64OSTimeStamp; + + /* + * Calculate ratio between estimated time diff and real time diff: + * ratio% : 100% = (OSestimate - OStimecorr) : (OSreal - OStimecorr) + * + * The operands are scaled down (approximately from ns to us) so at least + * the divisor fits on 32 bit. + */ + ui32Ratio = OSDivide64(((ui64EstimatedTime - psTimeCorr->ui64OSTimeStamp) * 100ULL) >> SCALING_FACTOR, + (ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) >> SCALING_FACTOR, + &ui32Remainder); + + PVR_DPF((PVR_DBG_MESSAGE, + "Estimated timestamp check: diff %" IMG_INT64_FMTSPECd " ns over " + "period %" IMG_UINT64_FMTSPEC " ns, estimated timer speed %u%%", + i64Diff, + ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, + ui32Ratio)); + + /* Warn if the estimated timestamp is not within +/- 1% of the current time */ + if (ui32Ratio < 99 || ui32Ratio > 101) + { + PVR_DPF((PVR_DBG_WARNING, + "Estimated timestamps generated in the last %" IMG_UINT64_FMTSPEC " ns " + "were %s the real time (increasing at %u%% speed)", + ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, + i64Diff > 0 ? "ahead of" : "behind", + ui32Ratio)); + + /* Higher ratio == higher delta OS == higher delta CR == frequency higher than expected (and viceversa) */ + PVR_DPF((PVR_DBG_WARNING, + "Current GPU frequency %u Hz (given as %u Hz) is probably %s than expected", + RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + i64Diff > 0 ? "lower" : "higher")); + } +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); +#endif +} + +static inline IMG_UINT32 _RGXGPUFreqGetIndex(RGX_GPU_DVFS_TABLE *psGpuDVFSTable, IMG_UINT32 ui32CoreClockSpeed) +{ + IMG_UINT32 *paui32GPUFrequencies = psGpuDVFSTable->aui32GPUFrequency; + IMG_UINT32 i; + + for (i = 0; i < RGX_GPU_DVFS_TABLE_SIZE; i++) + { + if (paui32GPUFrequencies[i] == ui32CoreClockSpeed) + { + return i; + } + + if (paui32GPUFrequencies[i] == 0) + { + paui32GPUFrequencies[i] = ui32CoreClockSpeed; + return i; + } + } + + i--; + + PVR_DPF((PVR_DBG_ERROR, "GPU frequency table in the driver is full! " + "Table size should be increased! Overriding last entry (%u) with %u", + paui32GPUFrequencies[i], ui32CoreClockSpeed)); + + paui32GPUFrequencies[i] = ui32CoreClockSpeed; + + return i; +} + +static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + GPU_FREQ_TRACKING_DATA *psTrackingData; + IMG_UINT32 ui32CoreClockSpeed, ui32Index; + + IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); + IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(); + + psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp; + psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp; + + ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); + ui32Index = _RGXGPUFreqGetIndex(psGpuDVFSTable, ui32CoreClockSpeed); + psTrackingData = &psGpuDVFSTable->asTrackingData[ui32Index]; + + /* Set the time needed to (re)calibrate the GPU frequency */ + if (psTrackingData->ui32CalibrationCount == 0) /* We never met this frequency */ + { + psTrackingData->ui32EstCoreClockSpeed = ui32CoreClockSpeed; + psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US; + } + else if (psTrackingData->ui32CalibrationCount == 1) /* We calibrated this frequency only once */ + { + psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US; + } + else + { + psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US; + } + + /* Update the index to the DVFS table */ + psGpuDVFSTable->ui32FreqIndex = ui32Index; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + /* Update tracking history */ + { + GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; + + psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; + psTrackingHistory->ui32CoreClockSpeed = ui32CoreClockSpeed; + psTrackingHistory->ui32EstCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; + psTrackingHistory->ui64BeginCRTimestamp = ui64CRTimestamp; + psTrackingHistory->ui64BeginOSTimestamp = ui64OSTimestamp; + psTrackingHistory->ui64EndCRTimestamp = 0ULL; + psTrackingHistory->ui64EndOSTimestamp = 0ULL; + } +#endif +} + +static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_GPU_DVFS_TABLE *psGpuDVFSTable) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); + IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(); + + psGpuDVFSTable->ui64CalibrationCRTimediff = + ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp; + psGpuDVFSTable->ui64CalibrationOSTimediff = + ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp; + + /* Check if the current timer correlation data is good enough */ + _RGXCheckTimeCorrData(psDeviceNode, psGpuDVFSTable); + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + /* Update tracking history */ + { + GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; + + psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; + psTrackingHistory->ui64EndCRTimestamp = ui64CRTimestamp; + psTrackingHistory->ui64EndOSTimestamp = ui64OSTimestamp; + } +#endif +} + +static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_GPU_DVFS_TABLE *psGpuDVFSTable, + RGXTIMECORR_EVENT eEvent) +{ +#if !defined(DISABLE_GPU_FREQUENCY_CALIBRATION) + GPU_FREQ_TRACKING_DATA *psTrackingData; + IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed; + IMG_INT32 i32Diff; + IMG_UINT32 ui32Remainder; + + /* + * Find out what the GPU frequency was in the last period. + * This should return a value very close to the frequency passed by the system layer. + */ + ui32EstCoreClockSpeed = + RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff, + psGpuDVFSTable->ui64CalibrationOSTimediff, + ui32Remainder); + + /* Update GPU frequency used by the driver for a given system layer frequency */ + psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; + + ui32PrevCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; + psTrackingData->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; + psTrackingData->ui32CalibrationCount++; + + i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32PrevCoreClockSpeed); + + if ((i32Diff < -1000000) || (i32Diff > 1000000)) + { + /* Warn if the frequency changed by more than 1 MHz between recalculations */ + PVR_DPF((PVR_DBG_WARNING, + "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " + "more than 1 MHz difference between old and new value " + "(%u Hz -> %u Hz over %" IMG_UINT64_FMTSPEC " us)", + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + _EventToString(eEvent), + RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), + RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), + psGpuDVFSTable->ui64CalibrationOSTimediff)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " + "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us", + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + _EventToString(eEvent), + RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), + RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), + psGpuDVFSTable->ui64CalibrationOSTimediff)); + } + + /* Reset time deltas to avoid recalibrating the same frequency over and over again */ + psGpuDVFSTable->ui64CalibrationCRTimediff = 0; + psGpuDVFSTable->ui64CalibrationOSTimediff = 0; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + /* Update tracking history */ + { + GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; + + psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; + psTrackingHistory->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; + psGpuDVFSTable->ui32HistoryIndex = + (psGpuDVFSTable->ui32HistoryIndex + 1) % RGX_GPU_FREQ_TRACKING_SIZE; + } +#endif + +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); + PVR_UNREFERENCED_PARAMETER(eEvent); +#endif +} + +void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable); + _RGXMakeTimeCorrData(psDeviceNode, eEvent); +} + +void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable); + + if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod) + { + _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent); + } +} + +void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + IMG_UINT64 ui64TimeNow = RGXTimeCorrGetClockus64(); + PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + if (psGpuDVFSTable == NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Required data not initialised yet", __func__)); + return; + } + + /* Check if it's the right time to recalibrate the GPU clock frequency */ + if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return; + + /* Try to acquire the powerlock, if not possible then don't wait */ + if (PVRSRVPowerTryLock(psDeviceNode) != PVRSRV_OK) return; + + /* If the GPU is off then we can't do anything */ + PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + if (ePowerState != PVRSRV_DEV_POWER_STATE_ON) + { + PVRSRVPowerUnlock(psDeviceNode); + return; + } + + /* All checks passed, we can calibrate and correlate */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); + + PVRSRVPowerUnlock(psDeviceNode); +} + +/* + RGXTimeCorrGetClockSource +*/ +RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void) +{ + return g_ui32ClockSource; +} + +/* + RGXTimeCorrSetClockSource +*/ +PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXTIMECORR_CLOCK_TYPE eClockType) +{ + return _SetClock(psDeviceNode, NULL, eClockType); +} + +PVRSRV_ERROR +PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT64 * pui64Time) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + *pui64Time = RGXTimeCorrGetClockns64(); + + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxtimecorr.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.h new file mode 100644 index 000000000000..48dd821998db --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxtimecorr.h @@ -0,0 +1,269 @@ +/*************************************************************************/ /*! +@File +@Title RGX time correlation and calibration header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX time correlation and calibration routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXTIMECORR_H__) +#define __RGXTIMECORR_H__ + +#include "img_types.h" +#include "device.h" +#include "osfunc.h" +#include "connection_server.h" + +typedef enum +{ + RGXTIMECORR_CLOCK_MONO, + RGXTIMECORR_CLOCK_MONO_RAW, + RGXTIMECORR_CLOCK_SCHED, + + RGXTIMECORR_CLOCK_LAST +} RGXTIMECORR_CLOCK_TYPE; + +typedef enum +{ + RGXTIMECORR_EVENT_POWER, + RGXTIMECORR_EVENT_DVFS, + RGXTIMECORR_EVENT_PERIODIC, + RGXTIMECORR_EVENT_CLOCK_CHANGE +} RGXTIMECORR_EVENT; + +/* + * Calibrated GPU frequencies are rounded to the nearest multiple of 1 KHz + * before use, to reduce the noise introduced by calculations done with + * imperfect operands (correlated timers not sampled at exactly the same + * time, GPU CR timer incrementing only once every 256 GPU cycles). + * This also helps reducing the variation between consecutive calculations. + */ +#define RGXFWIF_CONVERT_TO_KHZ(freq) (((freq) + 500) / 1000) +#define RGXFWIF_ROUND_TO_KHZ(freq) ((((freq) + 500) / 1000) * 1000) + +/* Constants used in different calculations */ +#define SECONDS_TO_MICROSECONDS (1000000ULL) +#define CRTIME_TO_CYCLES_WITH_US_SCALE (RGX_CRTIME_TICK_IN_CYCLES * SECONDS_TO_MICROSECONDS) + +/* + * Use this macro to get a more realistic GPU core clock speed than the one + * given by the upper layers (used when doing GPU frequency calibration) + */ +#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \ + OSDivide64((deltacr_us) * CRTIME_TO_CYCLES_WITH_US_SCALE, (deltaos_us), &(remainder)) + + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetConversionFactor + + @Description Generate constant used to convert a GPU time difference into + an OS time difference (for more info see rgx_fwif_km.h). + + @Input ui32ClockSpeed : GPU clock speed + + @Return 0 on failure, conversion factor otherwise + +******************************************************************************/ +static inline IMG_UINT64 RGXTimeCorrGetConversionFactor(IMG_UINT32 ui32ClockSpeed) +{ + IMG_UINT32 ui32Remainder; + + if (RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: GPU clock frequency %u is too low", + __func__, ui32ClockSpeed)); + + return 0; + } + + return OSDivide64r64(CRTIME_TO_CYCLES_WITH_US_SCALE << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT, + RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed), &ui32Remainder); +} + +/*! +****************************************************************************** + + @Function RGXTimeCorrBegin + + @Description Generate new timer correlation data, and start tracking + the current GPU frequency. + + @Input hDevHandle : RGX Device Node + @Input eEvent : Event associated with the beginning of a timer + correlation period + + @Return void + +******************************************************************************/ +void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); + +/*! +****************************************************************************** + + @Function RGXTimeCorrEnd + + @Description Stop tracking the CPU and GPU timers, and if possible + recalculate the GPU frequency to a value which makes the timer + correlation data more accurate. + + @Input hDevHandle : RGX Device Node + @Input eEvent : Event associated with the end of a timer + correlation period + + @Return void + +******************************************************************************/ +void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); + +/*! +****************************************************************************** + + @Function RGXTimeCorrRestartPeriodic + + @Description Perform actions from RGXTimeCorrEnd and RGXTimeCorrBegin, + but only if enough time has passed since the last timer + correlation data was generated. + + @Input hDevHandle : RGX Device Node + + @Return void + +******************************************************************************/ +void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle); + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetClockns64 + + @Description Returns value of currently selected clock (in ns). + + @Return clock value from currently selected clock source + +******************************************************************************/ +IMG_UINT64 RGXTimeCorrGetClockns64(void); + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetClockus64 + + @Description Returns value of currently selected clock (in us). + + @Return clock value from currently selected clock source + +******************************************************************************/ +IMG_UINT64 RGXTimeCorrGetClockus64(void); + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetClockSource + + @Description Returns currently selected clock source + + @Return clock source type + +******************************************************************************/ +RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void); + +/*! +****************************************************************************** + + @Function RGXTimeCorrSetClockSource + + @Description Sets clock source for correlation data. + + @Input psDeviceNode : RGX Device Node + @Input eClockType : clock source type + + @Return error code + +******************************************************************************/ +PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXTIMECORR_CLOCK_TYPE eClockType); + +/*! +****************************************************************************** + + @Function RGXTimeCorrInitAppHintCallbacks + + @Description Initialise apphint callbacks for timer correlation + related apphints. + + @Input psDeviceNode : RGX Device Node + + @Return void + +******************************************************************************/ +void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +****************************************************************************** + + @Function RGXGetTimeCorrData + + @Description Get a number of the most recent time correlation data points + + @Input psDeviceNode : RGX Device Node + @Output psTimeCorrs : Output array of RGXFWIF_TIME_CORR elements + for data to be written to + @Input ui32NumOut : Number of elements to be written out + + @Return void + +******************************************************************************/ +void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_TIME_CORR *psTimeCorrs, + IMG_UINT32 ui32NumOut); + +/**************************************************************************/ /*! +@Function PVRSRVRGXCurrentTime +@Description Returns the current state of the device timer +@Input psDevData Device data. +@Out pui64Time +@Return PVRSRV_OK on success. +*/ /***************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT64 * pui64Time); + +#endif /* __RGXTIMECORR_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxutils.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxutils.h new file mode 100644 index 000000000000..670986323d2b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxutils.h @@ -0,0 +1,185 @@ +/*************************************************************************/ /*! +@File +@Title Device specific utility routines declarations +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Inline functions/structures specific to RGX +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "device.h" +#include "rgxdevice.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" + +/*! +****************************************************************************** + + @Function RGXQueryAPMState + + @Description Query the state of the APM configuration + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Output pui32State : The APM configuration state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 *pui32State); + +/*! +****************************************************************************** + + @Function RGXSetAPMState + + @Description Set the APM configuration state. Currently only 'OFF' is + supported + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Input ui32State : The requested APM configuration state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 ui32State); + +/*! +****************************************************************************** + + @Function RGXQueryPdumpPanicDisable + + @Description Get the PDump Panic Enable configuration state. + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Input pbDisabled : IMG_TRUE if PDump Panic is disabled + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL *pbDisabled); + +/*! +****************************************************************************** + + @Function RGXSetPdumpPanicDisable + + @Description Set the PDump Panic Enable flag + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Input bDisable : The requested configuration state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL bDisable); + +/*! +****************************************************************************** + + @Function RGXGetDeviceFlags + + @Description Get the device flags for a given device + + @Input psDevInfo : The device descriptor query + + @Output pui32DeviceFlags : The current state of the device flags + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32DeviceFlags); + +/*! +****************************************************************************** + + @Function RGXSetDeviceFlags + + @Description Set the device flags for a given device + + @Input psDevInfo : The device descriptor to modify + + @Input ui32Config : The device flags to modify + + @Input bSetNotClear : Set or clear the specified flags + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_BOOL bSetNotClear); + +/*! +****************************************************************************** + + @Function RGXStringifyKickTypeDM + + @Description Gives the kick type DM name stringified + + @Input Kick type DM + + @Return Array containing the kick type DM name + +******************************************************************************/ +const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM); + +#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : "" +/****************************************************************************** + End of file (rgxutils.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.c b/drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.c new file mode 100644 index 000000000000..e0bd05552b64 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.c @@ -0,0 +1,618 @@ +/*************************************************************************/ /*! +@File rgxworkest.c +@Title RGX Workload Estimation Functionality +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel mode workload estimation functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxdevice.h" +#include "rgxworkest.h" +#include "rgxfwutils.h" +#include "rgxpdvfs.h" +#include "rgx_options.h" +#include "device.h" +#include "hash.h" +#include "pvr_debug.h" + +#define ROUND_DOWN_TO_NEAREST_1024(number) (((number) >> 10) << 10) + +static inline IMG_BOOL _WorkEstEnabled(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions & + psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions & + OPTIONS_WORKLOAD_ESTIMATION_MASK) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +static inline IMG_UINT32 _WorkEstDoHash(IMG_UINT32 ui32Input) +{ + IMG_UINT32 ui32HashPart; + + /* Hash function borrowed from hash.c */ + ui32HashPart = ui32Input; + ui32HashPart += (ui32HashPart << 12); + ui32HashPart ^= (ui32HashPart >> 22); + ui32HashPart += (ui32HashPart << 4); + ui32HashPart ^= (ui32HashPart >> 9); + ui32HashPart += (ui32HashPart << 10); + ui32HashPart ^= (ui32HashPart >> 2); + ui32HashPart += (ui32HashPart << 7); + ui32HashPart ^= (ui32HashPart >> 12); + + return ui32HashPart; +} + +/*! Hash functions for TA/3D workload estimation */ +IMG_BOOL WorkEstHashCompareTA3D(size_t uKeySize, void *pKey1, void *pKey2); +IMG_UINT32 WorkEstHashFuncTA3D(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + +/*! Hash functions for compute workload estimation */ +IMG_BOOL WorkEstHashCompareCompute(size_t uKeySize, void *pKey1, void *pKey2); +IMG_UINT32 WorkEstHashFuncCompute(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + +/*! Hash functions for TDM/transfer workload estimation */ +IMG_BOOL WorkEstHashCompareTDM(size_t uKeySize, void *pKey1, void *pKey2); +IMG_UINT32 WorkEstHashFuncTDM(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + + +IMG_BOOL WorkEstHashCompareTA3D(size_t uKeySize, void *pKey1, void *pKey2) +{ + RGX_WORKLOAD *psWorkload1; + RGX_WORKLOAD *psWorkload2; + PVR_UNREFERENCED_PARAMETER(uKeySize); + + if (pKey1 && pKey2) + { + psWorkload1 = *((RGX_WORKLOAD **)pKey1); + psWorkload2 = *((RGX_WORKLOAD **)pKey2); + + PVR_ASSERT(psWorkload1); + PVR_ASSERT(psWorkload2); + + if (psWorkload1->sTA3D.ui32RenderTargetSize == psWorkload2->sTA3D.ui32RenderTargetSize && + psWorkload1->sTA3D.ui32NumberOfDrawCalls == psWorkload2->sTA3D.ui32NumberOfDrawCalls && + psWorkload1->sTA3D.ui32NumberOfIndices == psWorkload2->sTA3D.ui32NumberOfIndices && + psWorkload1->sTA3D.ui32NumberOfMRTs == psWorkload2->sTA3D.ui32NumberOfMRTs) + { + /* This is added to allow this memory to be freed */ + *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1; + return IMG_TRUE; + } + } + + return IMG_FALSE; +} + +IMG_UINT32 WorkEstHashFuncTA3D(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + RGX_WORKLOAD *psWorkload = *((RGX_WORKLOAD**)pKey); + IMG_UINT32 ui32HashKey = 0; + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + PVR_UNREFERENCED_PARAMETER(uKeySize); + + /* Hash key predicated on multiple render target attributes */ + ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32RenderTargetSize); + ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32NumberOfDrawCalls); + ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32NumberOfIndices); + ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32NumberOfMRTs); + + return ui32HashKey; +} + +IMG_BOOL WorkEstHashCompareCompute(size_t uKeySize, void *pKey1, void *pKey2) +{ + RGX_WORKLOAD *psWorkload1; + RGX_WORKLOAD *psWorkload2; + PVR_UNREFERENCED_PARAMETER(uKeySize); + + if (pKey1 && pKey2) + { + psWorkload1 = *((RGX_WORKLOAD **)pKey1); + psWorkload2 = *((RGX_WORKLOAD **)pKey2); + + PVR_ASSERT(psWorkload1); + PVR_ASSERT(psWorkload2); + + if (psWorkload1->sCompute.ui32NumberOfWorkgroups == psWorkload2->sCompute.ui32NumberOfWorkgroups && + psWorkload1->sCompute.ui32NumberOfWorkitems == psWorkload2->sCompute.ui32NumberOfWorkitems) + { + /* This is added to allow this memory to be freed */ + *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1; + return IMG_TRUE; + } + } + + return IMG_FALSE; +} + +IMG_UINT32 WorkEstHashFuncCompute(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + RGX_WORKLOAD *psWorkload = *((RGX_WORKLOAD**)pKey); + IMG_UINT32 ui32HashKey = 0; + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + PVR_UNREFERENCED_PARAMETER(uKeySize); + + /* Hash key predicated on multiple render target attributes */ + ui32HashKey += _WorkEstDoHash(psWorkload->sCompute.ui32NumberOfWorkgroups); + ui32HashKey += _WorkEstDoHash(psWorkload->sCompute.ui32NumberOfWorkitems); + return ui32HashKey; +} + +IMG_BOOL WorkEstHashCompareTDM(size_t uKeySize, void *pKey1, void *pKey2) +{ + RGX_WORKLOAD *psWorkload1; + RGX_WORKLOAD *psWorkload2; + PVR_UNREFERENCED_PARAMETER(uKeySize); + + if (pKey1 && pKey2) + { + psWorkload1 = *((RGX_WORKLOAD **)pKey1); + psWorkload2 = *((RGX_WORKLOAD **)pKey2); + + PVR_ASSERT(psWorkload1); + PVR_ASSERT(psWorkload2); + + if (psWorkload1->sTransfer.ui32Characteristic1 == psWorkload2->sTransfer.ui32Characteristic1 && + psWorkload1->sTransfer.ui32Characteristic2 == psWorkload2->sTransfer.ui32Characteristic2) + { + /* This is added to allow this memory to be freed */ + *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1; + return IMG_TRUE; + } + } + + return IMG_FALSE; +} + +IMG_UINT32 WorkEstHashFuncTDM(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + RGX_WORKLOAD *psWorkload = *((RGX_WORKLOAD**)pKey); + IMG_UINT32 ui32HashKey = 0; + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + PVR_UNREFERENCED_PARAMETER(uKeySize); + + /* Hash key predicated on transfer src/dest attributes */ + ui32HashKey += _WorkEstDoHash(psWorkload->sTransfer.ui32Characteristic1); + ui32HashKey += _WorkEstDoHash(psWorkload->sTransfer.ui32Characteristic2); + + return ui32HashKey; +} + +void WorkEstHashLockCreate(POS_LOCK *ppsHashLock) +{ + if (*ppsHashLock == NULL) + { + OSLockCreate(ppsHashLock); + } +} + +void WorkEstHashLockDestroy(POS_LOCK psWorkEstHashLock) +{ + if (psWorkEstHashLock != NULL) + { + OSLockDestroy(psWorkEstHashLock); + psWorkEstHashLock = NULL; + } +} + +void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_WORKEST_FWCCB_CMD *psFwCCBCmd; + IMG_UINT8 *psFWCCB = psDevInfo->psWorkEstFirmwareCCB; + RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; + + while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) + { + PVRSRV_ERROR eError; + + /* Point to the next command */ + psFwCCBCmd = (RGXFWIF_WORKEST_FWCCB_CMD *)((uintptr_t)psFWCCB + psFWCCBCtl->ui32ReadOffset * sizeof(RGXFWIF_WORKEST_FWCCB_CMD)); + + eError = WorkEstRetire(psDevInfo, psFwCCBCmd); + PVR_LOG_IF_ERROR(eError, "WorkEstCheckFirmwareCCB: WorkEstRetire failed"); + + /* Update read offset */ + psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; + } +} + +PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO *psDevInfo, + WORKEST_HOST_DATA *psWorkEstHostData, + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData, + const RGXFWIF_CCB_CMD_TYPE eDMCmdType, + const RGX_WORKLOAD *psWorkloadCharsIn, + IMG_UINT64 ui64DeadlineInus, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData) +{ + RGX_WORKLOAD *psWorkloadCharacteristics; + IMG_UINT64 *pui64CyclePrediction; + IMG_UINT64 ui64CurrentTime; + WORKEST_RETURN_DATA *psReturnData; + IMG_UINT32 ui32ReturnDataWO; +#if defined(SUPPORT_SOC_TIMER) + PVRSRV_DEVICE_CONFIG *psDevConfig; + IMG_UINT64 ui64CurrentSoCTime; +#endif + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; + + if (!_WorkEstEnabled()) + { + /* No error message to avoid excessive messages */ + return PVRSRV_OK; + } + + if (eDMCmdType == RGXFWIF_CCB_CMD_TYPE_NULL) + { + /* No workload, only fence updates */ + return PVRSRV_OK; + } + +#if !defined(PVRSRV_NEED_PVR_DPF) + PVR_UNREFERENCED_PARAMETER(eDMCmdType); +#endif + + /* Validate all required objects required for preparing work estimation */ + PVR_LOG_RETURN_IF_FALSE(psDevInfo, "device info not available", eError); + PVR_LOG_RETURN_IF_FALSE(psWorkEstHostData, "host data not available", eError); + PVR_LOG_RETURN_IF_FALSE(psWorkloadMatchingData, "Workload Matching Data not available", eError); + PVR_LOG_RETURN_IF_FALSE(psWorkloadMatchingData->psHashLock, "hash lock not available", eError); + PVR_LOG_RETURN_IF_FALSE(psWorkloadMatchingData->psHashTable, "hash table not available", eError); + +#if defined(SUPPORT_SOC_TIMER) + psDevConfig = psDevInfo->psDeviceNode->psDevConfig; + PVR_LOG_RETURN_IF_FALSE(psDevConfig->pfnSoCTimerRead, "SoC timer not available", eError); + ui64CurrentSoCTime = psDevConfig->pfnSoCTimerRead(psDevConfig->hSysData); +#endif + + eError = OSClockMonotonicus64(&ui64CurrentTime); + PVR_LOG_RETURN_IF_ERROR(eError, "unable to access System Monotonic clock"); + + OSLockAcquire(psDevInfo->hWorkEstLock); + + /* Select the next index for the return data and update it (is this thread safe?) */ + ui32ReturnDataWO = psDevInfo->ui32ReturnDataWO; + psDevInfo->ui32ReturnDataWO = (ui32ReturnDataWO + 1) & RETURN_DATA_ARRAY_WRAP_MASK; + + /* Index for the return data passed to/from the firmware. */ + psWorkEstKickData->ui64ReturnDataIndex = ui32ReturnDataWO; + if (ui64DeadlineInus > ui64CurrentTime) + { + /* Rounding is done to reduce multiple deadlines with minor spread flooding the fw workload array. */ +#if defined(SUPPORT_SOC_TIMER) + IMG_UINT64 ui64TimeDelta = (ui64DeadlineInus - ui64CurrentTime) * SOC_TIMER_FREQ; + psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64CurrentSoCTime + ui64TimeDelta); +#else + psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64DeadlineInus); +#endif + } + else + { + /* If deadline has already passed, assign zero to suggest full frequency */ + psWorkEstKickData->ui64Deadline = 0; + } + + /* Set up data for the return path to process the workload; the matching data is needed + as it holds the hash data, the host data is needed for completion updates */ + psReturnData = &psDevInfo->asReturnData[ui32ReturnDataWO]; + psReturnData->psWorkloadMatchingData = psWorkloadMatchingData; + psReturnData->psWorkEstHostData = psWorkEstHostData; + + /* The workload characteristic is needed in the return data for the matching + of future workloads via the hash. */ + psWorkloadCharacteristics = &psReturnData->sWorkloadCharacteristics; + memcpy(psWorkloadCharacteristics, psWorkloadCharsIn, sizeof(RGX_WORKLOAD)); + + OSLockRelease(psDevInfo->hWorkEstLock); + + /* Acquire the lock to access hash */ + OSLockAcquire(psWorkloadMatchingData->psHashLock); + + /* Check if there is a prediction for this workload */ + pui64CyclePrediction = (IMG_UINT64*) HASH_Retrieve(psWorkloadMatchingData->psHashTable, + (uintptr_t)psWorkloadCharacteristics); + + /* Release lock */ + OSLockRelease(psWorkloadMatchingData->psHashLock); + + if (pui64CyclePrediction != NULL) + { + /* Cycle prediction is available, store this prediction */ + psWorkEstKickData->ui64CyclesPrediction = *pui64CyclePrediction; + +#if defined(PVRSRV_NEED_PVR_DPF) + switch (eDMCmdType) + { + case RGXFWIF_CCB_CMD_TYPE_GEOM: + case RGXFWIF_CCB_CMD_TYPE_3D: + PVR_DPF((PVR_DBG_MESSAGE, "%s: RT size = %u, draw count = %u, indices = %u, prediction = " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + psWorkloadCharacteristics->sTA3D.ui32RenderTargetSize, + psWorkloadCharacteristics->sTA3D.ui32NumberOfDrawCalls, + psWorkloadCharacteristics->sTA3D.ui32NumberOfIndices, + *pui64CyclePrediction)); + break; + case RGXFWIF_CCB_CMD_TYPE_CDM: + PVR_DPF((PVR_DBG_MESSAGE, "%s: Number of workgroups = %u, max workgroup size = %u, prediction = " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + psWorkloadCharacteristics->sCompute.ui32NumberOfWorkgroups, + psWorkloadCharacteristics->sCompute.ui32NumberOfWorkitems, + *pui64CyclePrediction)); + break; + case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: + PVR_DPF((PVR_DBG_MESSAGE, "%s: Dest size = %u, Pixel format ID = %u, prediction = " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + psWorkloadCharacteristics->sTransfer.ui32Characteristic1, + psWorkloadCharacteristics->sTransfer.ui32Characteristic2, + *pui64CyclePrediction)); + break; + default: + break; + } +#endif + } + else + { + /* There is no prediction */ + psWorkEstKickData->ui64CyclesPrediction = 0; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_WORKEST_FWCCB_CMD *psReturnCmd) +{ + RGX_WORKLOAD *psWorkloadCharacteristics; + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData; + IMG_UINT64 *paui64WorkloadHashData; + RGX_WORKLOAD *pasWorkloadHashKeys; + IMG_UINT32 ui32HashArrayWO; + IMG_UINT64 *pui64CyclesTaken; + WORKEST_RETURN_DATA *psReturnData; + WORKEST_HOST_DATA *psWorkEstHostData; + + if (!_WorkEstEnabled()) + { + /* No error message to avoid excessive messages */ + return PVRSRV_OK; + } + + PVR_LOG_RETURN_IF_FALSE(psReturnCmd, + "WorkEstRetire: Missing return command", + PVRSRV_ERROR_INVALID_PARAMS); + + if (psReturnCmd->ui64ReturnDataIndex >= RETURN_DATA_ARRAY_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "WorkEstRetire: Handle reference out-of-bounds:" + " %" IMG_UINT64_FMTSPEC " >= %" IMG_UINT64_FMTSPEC, + psReturnCmd->ui64ReturnDataIndex, + (IMG_UINT64) RETURN_DATA_ARRAY_SIZE)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSLockAcquire(psDevInfo->hWorkEstLock); + + /* Retrieve/validate the return data from this completed workload */ + psReturnData = &psDevInfo->asReturnData[psReturnCmd->ui64ReturnDataIndex]; + psWorkloadCharacteristics = &psReturnData->sWorkloadCharacteristics; + psWorkEstHostData = psReturnData->psWorkEstHostData; + PVR_LOG_GOTO_IF_FALSE(psWorkEstHostData, + "WorkEstRetire: Missing host data", + unlock_workest); + + /* Retrieve/validate completed workload matching data */ + psWorkloadMatchingData = psReturnData->psWorkloadMatchingData; + PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData, + "WorkEstRetire: Missing matching data", + unlock_workest); + PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData->psHashTable, + "WorkEstRetire: Missing hash", + unlock_workest); + PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData->psHashLock, + "WorkEstRetire: Missing hash/lock", + unlock_workest); + paui64WorkloadHashData = psWorkloadMatchingData->aui64HashData; + pasWorkloadHashKeys = psWorkloadMatchingData->asHashKeys; + ui32HashArrayWO = psWorkloadMatchingData->ui32HashArrayWO; + + OSLockRelease(psDevInfo->hWorkEstLock); + + OSLockAcquire(psWorkloadMatchingData->psHashLock); + + /* Update workload prediction by removing old hash entry (if any) + * & inserting new hash entry */ + pui64CyclesTaken = (IMG_UINT64*) HASH_Remove(psWorkloadMatchingData->psHashTable, + (uintptr_t)psWorkloadCharacteristics); + + if (paui64WorkloadHashData[ui32HashArrayWO] > 0) + { + /* Out-of-space so remove the oldest hash data before it becomes + * overwritten */ + RGX_WORKLOAD *psWorkloadHashKey = &pasWorkloadHashKeys[ui32HashArrayWO]; + (void) HASH_Remove(psWorkloadMatchingData->psHashTable, (uintptr_t)psWorkloadHashKey); + } + + if (pui64CyclesTaken == NULL) + { + /* There is no existing entry for this workload characteristics, + * store it */ + paui64WorkloadHashData[ui32HashArrayWO] = psReturnCmd->ui64CyclesTaken; + pasWorkloadHashKeys[ui32HashArrayWO] = *psWorkloadCharacteristics; + } + else + { + /* Found prior entry for workload characteristics, average with + * completed; also reset the old value to 0 so it is known to be + * invalid */ + paui64WorkloadHashData[ui32HashArrayWO] = (*pui64CyclesTaken + psReturnCmd->ui64CyclesTaken)/2; + pasWorkloadHashKeys[ui32HashArrayWO] = *psWorkloadCharacteristics; + *pui64CyclesTaken = 0; + } + + /* Hash insertion should not fail but if it does best we can do is to exit + * gracefully and not update the FW received counter */ + if (IMG_TRUE != HASH_Insert((HASH_TABLE*)psWorkloadMatchingData->psHashTable, + (uintptr_t)&pasWorkloadHashKeys[ui32HashArrayWO], + (uintptr_t)&paui64WorkloadHashData[ui32HashArrayWO])) + { + PVR_ASSERT(0); + PVR_LOG(("WorkEstRetire: HASH_Insert failed")); + } + + psWorkloadMatchingData->ui32HashArrayWO = (ui32HashArrayWO + 1) & WORKLOAD_HASH_WRAP_MASK; + + OSLockRelease(psWorkloadMatchingData->psHashLock); + + /* Update the received counter so that the FW is able to check as to whether + * all the workloads connected to a render context are finished. + * Note: needs to be done also for *unlock_workest* label below. */ + psWorkEstHostData->ui32WorkEstCCBReceived++; + + return PVRSRV_OK; + +unlock_workest: + OSLockRelease(psDevInfo->hWorkEstLock); + psWorkEstHostData->ui32WorkEstCCBReceived++; + + return PVRSRV_ERROR_INVALID_PARAMS; +} + +static void _WorkEstInit(PVRSRV_RGXDEV_INFO *psDevInfo, + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData, + HASH_FUNC *pfnWorkEstHashFunc, + HASH_KEY_COMP *pfnWorkEstHashCompare) +{ + HASH_TABLE *psWorkloadHashTable; + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + /* Create a lock to protect the per-DM hash table */ + WorkEstHashLockCreate(&psWorkloadMatchingData->psHashLock); + + /* Create hash table for the per-DM workload matching */ + psWorkloadHashTable = HASH_Create_Extended(WORKLOAD_HASH_SIZE, + sizeof(RGX_WORKLOAD *), + pfnWorkEstHashFunc, + pfnWorkEstHashCompare); + psWorkloadMatchingData->psHashTable = psWorkloadHashTable; +} + +static void _WorkEstDeInit(PVRSRV_RGXDEV_INFO *psDevInfo, + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData) +{ + HASH_TABLE *psWorkloadHashTable; + RGX_WORKLOAD *pasWorkloadHashKeys; + RGX_WORKLOAD *psWorkloadHashKey; + IMG_UINT64 *paui64WorkloadCycleData; + IMG_UINT32 ui32Itr; + + /* Tear down per-DM hash */ + pasWorkloadHashKeys = psWorkloadMatchingData->asHashKeys; + paui64WorkloadCycleData = psWorkloadMatchingData->aui64HashData; + psWorkloadHashTable = psWorkloadMatchingData->psHashTable; + + if (psWorkloadHashTable) + { + for (ui32Itr = 0; ui32Itr < WORKLOAD_HASH_SIZE; ui32Itr++) + { + if (paui64WorkloadCycleData[ui32Itr] > 0) + { + psWorkloadHashKey = &pasWorkloadHashKeys[ui32Itr]; + HASH_Remove(psWorkloadHashTable, (uintptr_t)psWorkloadHashKey); + } + } + + HASH_Delete(psWorkloadHashTable); + } + + /* Remove the hash lock */ + WorkEstHashLockDestroy(psWorkloadMatchingData->psHashLock); + + return; +} + +void WorkEstInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData) +{ + _WorkEstInit(psDevInfo, + &psWorkEstData->uWorkloadMatchingData.sTA3D.sDataTA, + (HASH_FUNC *)WorkEstHashFuncTA3D, + (HASH_KEY_COMP *)WorkEstHashCompareTA3D); + _WorkEstInit(psDevInfo, + &psWorkEstData->uWorkloadMatchingData.sTA3D.sData3D, + (HASH_FUNC *)WorkEstHashFuncTA3D, + (HASH_KEY_COMP *)WorkEstHashCompareTA3D); +} + +void WorkEstDeInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData) +{ + _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sTA3D.sDataTA); + _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sTA3D.sData3D); +} + +void WorkEstInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData) +{ + _WorkEstInit(psDevInfo, + &psWorkEstData->uWorkloadMatchingData.sCompute.sDataCDM, + (HASH_FUNC *)WorkEstHashFuncCompute, + (HASH_KEY_COMP *)WorkEstHashCompareCompute); +} + +void WorkEstDeInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData) +{ + _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sCompute.sDataCDM); +} + +void WorkEstInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData) +{ + _WorkEstInit(psDevInfo, + &psWorkEstData->uWorkloadMatchingData.sTransfer.sDataTDM, + (HASH_FUNC *)WorkEstHashFuncTDM, + (HASH_KEY_COMP *)WorkEstHashCompareTDM); +} + +void WorkEstDeInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData) +{ + _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sTransfer.sDataTDM); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.h b/drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.h new file mode 100644 index 000000000000..dc01eec1374f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rgxworkest.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@File rgxworkest.h +@Title RGX Workload Estimation Functionality +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the kernel mode workload estimation functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXWORKEST_H +#define RGXWORKEST_H + +#include "img_types.h" +#include "rgxta3d.h" + + +void WorkEstInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData); + +void WorkEstDeInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData); + +void WorkEstInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData); + +void WorkEstDeInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData); + +void WorkEstInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData); + +void WorkEstDeInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData); + +PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO *psDevInfo, + WORKEST_HOST_DATA *psWorkEstHostData, + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData, + const RGXFWIF_CCB_CMD_TYPE eDMCmdType, + const RGX_WORKLOAD *psWorkloadCharsIn, + IMG_UINT64 ui64DeadlineInus, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData); + +PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_WORKEST_FWCCB_CMD *psReturnCmd); + +void WorkEstHashLockCreate(POS_LOCK *ppsHashLock); + +void WorkEstHashLockDestroy(POS_LOCK psHashLock); + +void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* RGXWORKEST_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbreakpoint.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbreakpoint.c new file mode 100644 index 000000000000..168d070cdafe --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbreakpoint.c @@ -0,0 +1,295 @@ +/*************************************************************************/ /*! +@File +@Title RGX Breakpoint routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Breakpoint routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxbreakpoint.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxmem.h" +#include "device.h" +#include "sync_internal.h" +#include "pdump_km.h" +#include "pvrsrv.h" + +PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + RGXFWIF_DM eFWDataMaster, + IMG_UINT32 ui32BPAddr, + IMG_UINT32 ui32HandlerAddr, + IMG_UINT32 ui32DataMaster) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psDevInfo->hBPLock); + + if (psDevInfo->bBPSet) + { + eError = PVRSRV_ERROR_BP_ALREADY_SET; + goto unlock; + } + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr; + sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr; + sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE; + sBPCmd.uCmdData.sBPData.eDM = eFWDataMaster; + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + eFWDataMaster, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + + psDevInfo->eBPDM = eFWDataMaster; + psDevInfo->bBPSet = IMG_TRUE; + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPAddr = 0; + sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL; + sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; + + OSLockAcquire(psDevInfo->hBPLock); + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + psDevInfo->eBPDM, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + + psDevInfo->bBPSet = IMG_FALSE; + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psDevInfo->hBPLock); + + if (psDevInfo->bBPSet == IMG_FALSE) + { + eError = PVRSRV_ERROR_BP_NOT_SET; + goto unlock; + } + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL | RGXFWIF_BPDATA_FLAGS_ENABLE; + sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + psDevInfo->eBPDM, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psDevInfo->hBPLock); + + if (psDevInfo->bBPSet == IMG_FALSE) + { + eError = PVRSRV_ERROR_BP_NOT_SET; + goto unlock; + } + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL; + sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + psDevInfo->eBPDM, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32TempRegs, + IMG_UINT32 ui32SharedRegs) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_REGS; + sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs; + sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs; + sBPCmd.uCmdData.sBPData.psFWMemContext.ui32Addr = 0U; + sBPCmd.uCmdData.sBPData.eDM = RGXFWIF_DM_GP; + + OSLockAcquire(psDevInfo->hBPLock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +/****************************************************************************** + End of file (rgxbreakpoint.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbvnc.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbvnc.c new file mode 100644 index 000000000000..a79bdcb6683c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxbvnc.c @@ -0,0 +1,702 @@ +/*************************************************************************/ /*! +@File +@Title BVNC handling specific routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Functions used for BNVC related work +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxbvnc.h" +#define _RGXBVNC_C_ +#include "rgx_bvnc_table_km.h" +#undef _RGXBVNC_C_ +#include "oskm_apphint.h" +#include "pvrsrv.h" +#include "pdump_km.h" +#include "rgx_compat_bvnc.h" + +#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1) + +/* This function searches the given array for a given search value */ +static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array, + IMG_UINT uiEnd, + IMG_UINT64 ui64SearchValue, + IMG_UINT uiRowCount) +{ + IMG_UINT uiStart = 0, index; + IMG_UINT64 value, *pui64Ptr = NULL; + + while (uiStart < uiEnd) + { + index = (uiStart + uiEnd)/2; + pui64Ptr = pui64Array + (index * uiRowCount); + value = *(pui64Ptr); + + if (value == ui64SearchValue) + { + return pui64Ptr; + } + + if (value > ui64SearchValue) + { + uiEnd = index; + }else + { + uiStart = index + 1; + } + } + return NULL; +} +#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \ + ARRAY_SIZE(t), (b), \ + sizeof((t)[0])/sizeof(IMG_UINT64)) ) + + +#if defined(DEBUG) + +#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \ + if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \ + { PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); } \ + else \ + { PVR_LOG(("%s N/A", szShortName)); } + +static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1; + + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC: ", NUM_CLUSTERS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF: ", CDM_CONTROL_STREAM_FORMAT); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA: ", FBCDC_ARCHITECTURE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB: ", META_COREMEM_BANKS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS: ", META_COREMEM_SIZE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt: ", META_DMA_CHANNEL_COUNT); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP: ", NUM_ISP_IPP_PIPES); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW: ", PHYS_BUS_WIDTH); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch: ", SCALABLE_TE_ARCH); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA: ", SCALABLE_VCE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS: ", SLC_CACHE_LINE_SIZE_BITS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCSize: ", SLC_SIZE_IN_BYTES); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB: ", VIRTUAL_ADDRESS_SPACE_BITS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META: ", META); + +#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX) + /* Dump the features with no values */ + ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features; + while (ui64Mask) + { + if (ui64Mask & 0x01) + { + if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX) + { + PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1])); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx, + ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); + } + } + ui64Mask >>= 1; + ui32IdOrNameIdx++; + } +#endif + +#if defined(ERNSBRNS_IDS_MAX_IDX) + /* Dump the ERN and BRN flags for this core */ + ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns; + ui32IdOrNameIdx = 1; + + while (ui64Mask) + { + if (ui64Mask & 0x1) + { + if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX) + { + PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1])); + } + else + { + PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); + } + } + ui64Mask >>= 1; + ui32IdOrNameIdx++; + } +#endif + +} +#endif + +static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 *pui64Cfg) +{ + IMG_UINT32 ui32Index; + + /* Read the feature values for the runtime BVNC */ + for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++) + { + IMG_UINT16 bitPosition = aui16FeaturesWithValuesBitPositions[ui32Index]; + IMG_UINT64 ui64PackedValues = pui64Cfg[2 + bitPosition / 64]; + IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (bitPosition % 64); + + if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]) + { + if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED; + } + else + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex]; + } + } + else + { + /* This case should never be reached */ + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex)); + PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]); + } + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED; + } + + /* Get the max number of dusts in the core */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) + { + psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); + } + else + { + /* This case should never be reached as all cores have clusters */ + psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); + PVR_ASSERT(0); + } + + + /* Transform the SLC cacheline size info in bytes */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_BYTES)) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_SLC_SIZE_IN_BYTES_IDX] *= 1024; + } + + /* Transform the META coremem size info in bytes */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024; + } +} + +static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount) +{ + const IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC; + void *pvAppHintState = NULL; + IMG_UINT32 ui32BVNCCount = 0; + IMG_BOOL bRet; + IMG_CHAR szBVNCAppHint[RGXBVNC_BUFFER_SIZE]; + IMG_CHAR *pszCurrentBVNC = szBVNCAppHint; + szBVNCAppHint[0] = '\0'; + + OSCreateKMAppHintState(&pvAppHintState); + + bRet = (IMG_BOOL)OSGetKMAppHintSTRING(pvAppHintState, + RGXBVNC, + pszAppHintDefault, + szBVNCAppHint, + sizeof(szBVNCAppHint)); + + OSFreeKMAppHintState(pvAppHintState); + + if (!bRet || (szBVNCAppHint[0] == '\0')) + { + return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, szBVNCAppHint)); + + while (*pszCurrentBVNC != '\0') + { + IMG_CHAR *pszNext = pszCurrentBVNC; + + if (ui32BVNCCount >= PVRSRV_MAX_DEVICES) + { + break; + } + + while (1) + { + if (*pszNext == ',') + { + pszNext[0] = '\0'; + pszNext++; + break; + } else if (*pszNext == '\0') + { + break; + } + pszNext++; + } + + if (ui32BVNCCount == ui32RGXDevCount) + { + OSStringLCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX); + return; + } + + ui32BVNCCount++; + pszCurrentBVNC = pszNext; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than " + "number of actual devices", __func__)); + + /* If only one BVNC parameter is specified, the same is applied for all RGX + * devices detected */ + if (1 == ui32BVNCCount) + { + OSStringLCopy(pszBVNC, szBVNCAppHint, RGX_BVNC_STR_SIZE_MAX); + } +} + +/* Function that parses the BVNC List passed as module parameter */ +static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB, + IMG_UINT32 *pV, + IMG_UINT32 *pN, + IMG_UINT32 *pC, + const IMG_UINT32 ui32RGXDevCount) +{ + unsigned int ui32ScanCount = 0; + IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX]; + + aszBVNCString[0] = '\0'; + + /* 4 components of a BVNC string is B, V, N & C */ +#define RGX_BVNC_INFO_PARAMS (4) + + _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount); + + if ('\0' == aszBVNCString[0]) + { + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; + } + + /* Parse the given RGX_BVNC string */ + ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STR_FMTSPEC, pB, pV, pN, pC); + if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) + { + ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STRP_FMTSPEC, pB, pV, pN, pC); + } + if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) + { + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; + } + PVR_LOG(("BVNC module parameter honoured: %s", aszBVNCString)); + + return PVRSRV_OK; +} + +/* This function detects the Rogue variant and configures the essential + * config info associated with such a device. + * The config info includes features, errata, etc + */ +PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + static IMG_UINT32 ui32RGXDevCnt = 0; + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT64 ui64BVNC=0; + IMG_UINT32 B=0, V=0, N=0, C=0; + IMG_UINT64 *pui64Cfg = NULL; + IMG_UINT32 ui32Cores = 1U; + + /* Check for load time RGX BVNC parameter */ + eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt); + if (PVRSRV_OK == eError) + { + PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC + " from driver load parameter", B, V, N, C)); + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!"); + } + + { + void *pvAppHintState = NULL; + const IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, + IgnoreHWReportedBVNC, + &bAppHintDefault, + &psDevInfo->bIgnoreHWReportedBVNC); + OSFreeKMAppHintState(pvAppHintState); + } + +#if !defined(NO_HARDWARE) + + /* Try to detect the RGX BVNC from the HW device */ + if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC) + { + IMG_UINT64 ui32ID; + IMG_HANDLE hSysData; + + hSysData = psDeviceNode->psDevConfig->hSysData; + + /* Power-up the device as required to read the registers */ + if (psDeviceNode->psDevConfig->pfnPrePowerState) + { + eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPrePowerState ON"); + } + + if (psDeviceNode->psDevConfig->pfnPostPowerState) + { + eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPostPowerState ON"); + } + + /* Read the BVNC, in to new way first, if B not set, use old scheme */ + ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC); + + if (GET_B(ui32ID)) + { + B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT; + V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT; + N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT; + C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT; + + } + else + { + IMG_UINT64 ui32CoreID, ui32CoreRev; + ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION); + ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); + B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >> + RGX_CR_CORE_REVISION_MAJOR_SHIFT; + V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >> + RGX_CR_CORE_REVISION_MINOR_SHIFT; + N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_N_SHIFT; + C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_C_SHIFT; + } + PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC + " from HW device registers", B, V, N, C)); + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Read the number of cores in the system for newer BVNC (Branch ID > 20) */ + if (B > 20) + { + ui32Cores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); + } + } + + /* Power-down the device */ + if (psDeviceNode->psDevConfig->pfnPrePowerState) + { + eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF, + PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPrePowerState OFF"); + } + + if (psDeviceNode->psDevConfig->pfnPostPowerState) + { + eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF, + PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPostPowerState OFF"); + } + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + if (ui64BVNC != 0) + { + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!"); + } + else if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + /* + * On host OS we should not get here as CORE_ID should not be zero, so flag an error. + * On older cores, guest OS only has CORE_ID if defined(RGX_FEATURE_COREID_PER_OS) + */ + PVR_LOG_ERROR(PVRSRV_ERROR_DEVICE_REGISTER_FAILED, "CORE_ID register returns zero. Unknown BVNC"); + } + } +#endif + +#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C) + if (NULL == pui64Cfg) + { + /* We reach here if the HW is not present, + * or we are running in a guest OS with no COREID_PER_OS feature, + * or HW is unstable during register read giving invalid values, + * or runtime detection has been disabled - fall back to compile time BVNC + */ + B = RGX_BVNC_KM_B; + N = RGX_BVNC_KM_N; + C = RGX_BVNC_KM_C; + { + IMG_UINT32 ui32ScanCount = 0; + ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%u", &V); + if (1 != ui32ScanCount) + { + ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%up", &V); + if (1 != ui32ScanCount) + { + V = 0; + } + } + } + PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM)); + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!"); + } +#endif /* defined(RGX_BVNC) */ + + /* Have we failed to identify the BVNC to use? */ + if (NULL == pui64Cfg) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. " + "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); + return PVRSRV_ERROR_BVNC_UNSUPPORTED; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016" + IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016" + IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n", __func__, + pui64Cfg[0], pui64Cfg[1], pui64Cfg[2], pui64Cfg[3])); + + /* Parsing feature config depends on available features on the core + * hence this parsing should always follow the above feature assignment */ + psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1]; + _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg); + + /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */ + ui64BVNC = BVNC_PACK(B,V,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC); + if (NULL == pui64Cfg) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. " + "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); + psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0; + return PVRSRV_ERROR_BVNC_UNSUPPORTED; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx + " 0x%016" IMG_UINT64_FMTSPECx, __func__, *pui64Cfg, pui64Cfg[1])); + psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1]; + + psDevInfo->sDevFeatureCfg.ui32B = B; + psDevInfo->sDevFeatureCfg.ui32V = V; + psDevInfo->sDevFeatureCfg.ui32N = N; + psDevInfo->sDevFeatureCfg.ui32C = C; + + /* Message to confirm configuration look up was a success */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { +#if defined(NO_HARDWARE) + { + PVR_UNREFERENCED_PARAMETER(ui32Cores); + PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, + B, V, N, C)); + } +#else + { + PVR_LOG(("RGX Device registered BVNC " RGX_BVNC_STR_FMTSPEC + " with %u %s in the system", B ,V ,N ,C, ui32Cores , + ((ui32Cores == 1U)?"core":"cores"))); + } +#endif + } + else + { + PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, + B, V, N, C)); + } + + ui32RGXDevCnt++; + +#if defined(DEBUG) + _RGXBvncDumpParsedConfig(psDeviceNode); +#endif + return PVRSRV_OK; +} + +/* + * This function checks if a particular feature is available on the given rgx device */ +IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +/* + * This function returns the value of a feature on the given rgx device */ +IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX) + { + return -1; + } + + if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED) + { + return -1; + } + + return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex]; +} + +/**************************************************************************/ /*! +@Function RGXVerifyBVNC +@Description Checks that the device's BVNC registers have the correct values. +@Input psDeviceNode Device node +@Return PVRSRV_ERROR +*/ /***************************************************************************/ +#define NUM_RGX_CORE_IDS 8 +PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT64 ui64MatchBVNC; + IMG_UINT32 i; + + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + + /* The device info */ + psDevInfo = psDeviceNode->pvDevice; + + PDUMPCOMMENT("PDUMP VERIFY CORE_ID registers for all OSIDs\n"); + + /* construct the value to match against */ + if ((ui64GivenBVNC | ui64CoreIdMask) == 0) /* both zero means use configured DDK value */ + { + ui64MatchBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + } + else + { + /* use the value in CORE_ID for any zero elements in the BVNC */ + ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID) & ui64CoreIdMask); + } + PVR_LOG(("matchBVNC %d.%d.%d.%d", + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + + /* read in all the CORE_ID registers */ + for (i = 0; i < NUM_RGX_CORE_IDS; ++i) + { +#if !defined(NO_HARDWARE) + IMG_UINT64 ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (i << 16)); + + PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i, + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + + if (ui64BVNC != ui64MatchBVNC) + { + eError = PVRSRV_ERROR_BVNC_MISMATCH; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CORE_ID%d %d.%d.%d.%d, Expected %d.%d.%d.%d", __func__, i, + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + break; + } +#endif + +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) + /* check upper DWORD */ + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + (RGX_CR_CORE_ID + 4) + (i << 16), + (IMG_UINT32)(ui64MatchBVNC >> 32), + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + if (eError == PVRSRV_OK) + { + /* check lower DWORD */ + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_CORE_ID + (i << 16), + (IMG_UINT32)(ui64MatchBVNC & 0xFFFFFFFF), + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + } +#endif + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.c new file mode 100644 index 000000000000..dfe1a0f36ea5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.c @@ -0,0 +1,2697 @@ +/*************************************************************************/ /*! +@File +@Title RGX CCB routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX CCB routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "rgxdevice.h" +#include "pdump_km.h" +#include "allocmem.h" +#include "devicemem.h" +#include "rgxfwutils.h" +#include "osfunc.h" +#include "rgxccb.h" +#include "rgx_memallocflags.h" +#include "devicemem_pdump.h" +#include "dllist.h" +#if defined(LINUX) +#include "trace_events.h" +#endif +#include "sync_checkpoint_external.h" +#include "sync_checkpoint.h" +#include "rgxutils.h" +#include "info_page.h" + +/* + * Defines the number of fence updates to record so that future fences in the + * CCB. Can be checked to see if they are already known to be satisfied. + */ +#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE (32) + +#define RGX_UFO_PTR_ADDR(ufoptr) \ + (((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC) + +#define GET_CCB_SPACE(WOff, ROff, CCBSize) \ + ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1)) + +#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \ + (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1)) + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1 +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2 +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB 0x4 + +typedef struct _RGX_CLIENT_CCB_UTILISATION_ +{ + /* the threshold in bytes. + * when the CCB utilisation hits the threshold then we will print + * a warning message. + */ + IMG_UINT32 ui32ThresholdBytes; + /* Maximum cCCB usage at some point in time */ + IMG_UINT32 ui32HighWaterMark; + /* keep track of the warnings already printed. + * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz + */ + IMG_UINT32 ui32Warnings; + /* Keep track how many times CCB was full. + * Counters are reset after every grow. + */ + IMG_UINT32 ui32CCBFull; + IMG_UINT32 ui32CCBAcquired; +} RGX_CLIENT_CCB_UTILISATION; + +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + +struct _RGX_CLIENT_CCB_ { + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; /*!< CPU mapping of the CCB control structure used by the fw */ + void *pvClientCCB; /*!< CPU mapping of the CCB */ + DEVMEM_MEMDESC *psClientCCBMemDesc; /*!< MemDesc for the CCB */ + DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */ + IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */ + IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */ + IMG_UINT32 ui32FinishedPDumpWriteOffset; /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */ + IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */ + IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */ + IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */ + IMG_UINT32 ui32LastByteCount; /*!< Last value of ui32ByteCount to help detect any CCB wedge */ + IMG_UINT32 ui32Size; /*!< Size of the CCB */ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + POS_LOCK hCCBGrowLock; /*!< Prevents CCB Grow while DumpCCB() is called and vice versa */ + IMG_UINT32 ui32VirtualAllocSize; /*!< Virtual size of the CCB */ + IMG_PUINT32 pui32MappingTable; /*!< Mapping table for sparse allocation of the CCB */ +#endif + DLLIST_NODE sNode; /*!< Node used to store this CCB on the per connection list */ + PDUMP_CONNECTION_DATA *psPDumpConnectionData; /*!< Pointer to the per connection data in which we reside */ + void *hTransition; /*!< Handle for Transition callback */ + IMG_CHAR szName[MAX_CLIENT_CCB_NAME]; /*!< Name of this client CCB */ + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; /*!< Parent server common context that this CCB belongs to */ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor; + RGX_CLIENT_CCB_UTILISATION sUtilisation; /*!< CCB utilisation data */ +#endif +#if defined(DEBUG) + IMG_UINT32 ui32UpdateEntries; /*!< Number of Fence Updates in asFenceUpdateList */ + RGXFWIF_UFO asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */ +#endif + IMG_UINT32 ui32CCBFlags; /*!< Bitmask for various flags relating to CCB. Bit defines in rgxccb.h */ +}; + +/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for + DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings: + { "FwClientCCB:" , "FwClientCCBControl:" , }, + The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl + structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following + build assert. */ +const IMG_CHAR *const aszCCBRequestors[][3] = +{ +#define REQUESTOR_STRING(prefix,req) #prefix ":" #req +#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req }, + RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE) +#undef FORM_REQUESTOR_TUPLE +}; + +PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32PDumpFlags) +{ + + IMG_UINT32 ui32PollOffset; + + if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) + { + /* Draining CCB on a command that hasn't finished, and FW isn't expected + * to have updated Roff up to Woff. Only drain to the first + * finished command prior to this. The Roff for this + * is stored in ui32FinishedPDumpWriteOffset. + */ + ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)", + psClientCCB->szName, + psClientCCB, + ui32PollOffset); + } + else + { + /* Command to a finished CCB stream and FW is drained to empty + * out remaining commands until R==W. + */ + ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)", + psClientCCB->szName, + psClientCCB, + ui32PollOffset); + } + + return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + ui32PollOffset, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); +} + +/****************************************************************************** + FUNCTION : RGXCCBPDumpSyncCCB + + PURPOSE : Synchronise Client CCBs from both live and playback contexts. + Waits for live-FW to empty live-CCB. + Waits for sim-FW to empty sim-CCB by adding POL + + PARAMETERS : psClientCCB - The client CCB + ui32PDumpFlags - PDump flags + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXCCBPDumpSyncCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Wait for the live FW to catch up/empty CCB. This is done by returning + * retry which will get pushed back out to Services client where it + * waits on the event object and then resubmits the command. + */ + if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset) + { + return PVRSRV_ERROR_RETRY; + } + + /* Wait for the sim FW to catch up/empty sim CCB. + * We drain whenever capture range is entered, even if no commands + * have been issued on this CCB when out of capture range. We have to + * wait for commands that might have been issued in the last capture + * range to finish so the connection's sync block snapshot dumped after + * all the PDumpTransition callbacks have been execute doesn't clobber + * syncs which the sim FW is currently working on. + * + * Although this is sub-optimal for play-back - while out of capture + * range for every continuous operation we synchronise the sim + * play-back processing the script and the sim FW, there is no easy + * solution. Not all modules that work with syncs register a + * PDumpTransition callback and thus we have no way of knowing if we + * can skip this sim CCB drain and sync block dump or not. + */ + + eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "RGXCCBPDumpDrainCCB"); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Live CCB and simulation CCB now empty, FW idle on CCB in both + * contexts. + */ + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : RGXCCBPDumpFastForwardCCB + + PURPOSE : Fast-forward sim-CCB and live-CCB offsets to live app-thread + values. + This helps to skip any commands submitted when out of capture + range and start with first command in capture range in both + live and playback contexts. In case of Block mode, this helps + to playback any intermediate PDump block directly after first + block. + + + PARAMETERS : psClientCCB - The client CCB + ui32PDumpFlags - PDump flags + + RETURNS : void +******************************************************************************/ +static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) +{ + volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl; + + /* Make sure that we have synced live-FW and live-App threads */ + PVR_ASSERT(psCCBCtl->ui32ReadOffset == psClientCCB->ui32HostWriteOffset); + + psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; + psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; + psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Fast-forward from %d to %d", + psClientCCB->szName, + psClientCCB, + psClientCCB->ui32LastPDumpWriteOffset, + psClientCCB->ui32HostWriteOffset); + + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + 0, + sizeof(RGXFWIF_CCCB_CTL), + ui32PDumpFlags); + + /* Although we've entered capture range for this process connection + * we might not do any work on this CCB so update the + * ui32LastPDumpWriteOffset to reflect where we got to for next + * time so we start the drain from where we got to last time. + */ + psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; + +} + +static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) +{ + RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData; +#if defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice; +#endif + PVRSRV_ERROR eError; + + /* Block mode: + * Here is block structure at transition (ui32BlockLength=N frames): + * + * ... + * ... + * PDUMP_BLOCK_START_0x0000000x{ + * + * + * ... + * ... + * ... (N frames data) + * ... + * ... + * <(1) Drain sim-KCCB> ''| + * <(2) Sync live and sim CCCB> | + * }PDUMP_BLOCK_END_0x0000000x | <- BlockTransition Steps + * <(3) Split MAIN and BLOCK stream script> | + * PDUMP_BLOCK_START_0x0000000y{ | + * <(4) Fast-forward sim-CCCB> | + * <(5) Re-dump SyncBlocks> ,,| + * ... + * ... + * ... (N frames data) + * ... + * ... + * + * + * }PDUMP_BLOCK_END_0x0000000y + * ... + * ... + * + * Steps (3) and (5) are done in pdump_server.c + * */ + switch (eEvent) + { + case PDUMP_TRANSITION_EVENT_RANGE_ENTERED: + { + /* We're about to transition into capture range and we've submitted + * new commands since the last time we entered capture range so drain + * the live CCB and simulation (sim) CCB as required, i.e. leave CCB + * idle in both live and sim contexts. + * This requires the host driver to ensure the live FW & the sim FW + * have both emptied out the remaining commands until R==W (CCB empty). + */ + + eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); + PVR_RETURN_IF_ERROR(eError); + + if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset) + { + /* If new commands have been written when out of capture range in + * the live CCB then we need to fast forward the sim CCBCtl + * offsets past uncaptured commands. This is done by PDUMPing + * the CCBCtl memory to align sim values with the live CCBCtl + * values. Both live and sim FWs can start with the 1st command + * which is in the new capture range. + */ + RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); + } + break; + } + case PDUMP_TRANSITION_EVENT_RANGE_EXITED: + { + /* Nothing to do */ + break; + } + case PDUMP_TRANSITION_EVENT_BLOCK_FINISHED: + { + /* (1) Drain KCCB from current block before starting new: + * + * At playback, this will ensure that sim-FW drains all commands in KCCB + * belongs to current block before 'jumping' to any future commands (from + * next block). This will synchronise script-thread and sim-FW thread KCCBs + * at end of each pdump block. + * + * This will additionally force redump of KCCBCtl structure at start of next/new block. + * */ + +#if defined(PDUMP) + eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->psKernelCCBCtl->ui32WriteOffset); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPdumpDrainKCCB"); +#endif + + /* (2) Synchronise Client CCBs from live and playback contexts before starting new block: + * + * This operation will, + * a. Force synchronisation between app-thread and live-FW thread (i.e. Wait + * for live-FW to empty live Client CCB). + * + * b. Next, it will dump poll command to drain Client CCB at end of every + * pdump block. At playback time this will synchronise sim-FW and + * script-thread Client CCBs at end of each block. + * + * This is to ensure that all commands in CCB from current block are processed + * before moving on to future commands. + * */ + + eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); + PVR_RETURN_IF_ERROR(eError); + break; + } + case PDUMP_TRANSITION_EVENT_BLOCK_STARTED: + { + /* (4) Fast-forward CCB write offsets to current live values: + * + * We have already synchronised live-FW and app-thread above at end of each + * block (in Step 2a above), now fast-forward Client CCBCtl write offsets to that of + * current app-thread values at start of every block. This will allow us to + * skip any intermediate pdump blocks and start with last (or any next) block + * immediately after first pdump block. + * */ + + RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); + break; + } + case PDUMP_TRANSITION_EVENT_NONE: + /* Invalid event for transition */ + default: + { + /* Unknown Transition event */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + return PVRSRV_OK; +} + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + +static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */ + psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size * + PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD) / 100; + psClientCCB->sUtilisation.ui32Warnings = 0; + psClientCCB->sUtilisation.ui32CCBAcquired = 0; + psClientCCB->sUtilisation.ui32CCBFull = 0; +} + +static INLINE void _RGXPrintCCBUtilisationWarning(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32WarningType, + IMG_UINT32 ui32CmdSize) +{ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE) + if (ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED) + { + PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize)); + } + + PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)", + __func__, + psClientCCB->szName, + psClientCCB->sUtilisation.ui32HighWaterMark, + psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size, + psClientCCB->ui32Size)); +#else + PVR_UNREFERENCED_PARAMETER(ui32WarningType); + PVR_UNREFERENCED_PARAMETER(ui32CmdSize); + + PVR_LOG(("GPU %s command buffer usage high (%u). This is not an error but the application may not run optimally.", + aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT], + psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size)); +#endif +} + +static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32WarningType, + IMG_UINT32 ui32CmdSize) +{ + /* in VERBOSE mode we will print a message for each different + * event type as they happen. + * but by default we will only issue one message + */ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE) + if (!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType)) +#else + if (!psClientCCB->sUtilisation.ui32Warnings) +#endif + { + _RGXPrintCCBUtilisationWarning(psClientCCB, + ui32WarningType, + ui32CmdSize); + /* record that we have issued a warning of this type */ + psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType; + } +} + +/* Check the current CCB utilisation. Print a one-time warning message if it is above the + * specified threshold + */ +static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + /* Print a warning message if the cCCB watermark is above the threshold value */ + if (psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes) + { + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD, + 0); + } +} + +/* Update the cCCB high watermark level if necessary */ +static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage; + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace; + + if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark) + { + psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage; + + /* The high water mark has increased. Check if it is above the + * threshold so we can print a warning if necessary. + */ + _RGXCheckCCBUtilisation(psClientCCB); + } +} + +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + +PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CCBSizeLog2, + IMG_UINT32 ui32CCBMaxSizeLog2, + IMG_UINT32 ui32ContextFlags, + CONNECTION_DATA *psConnectionData, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGX_CLIENT_CCB **ppsClientCCB, + DEVMEM_MEMDESC **ppsClientCCBMemDesc, + DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DEVMEM_FLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags; + IMG_UINT32 ui32AllocSize = (1U << ui32CCBSizeLog2); + IMG_UINT32 ui32MinAllocSize = (1U << MIN_SAFE_CCB_SIZE_LOG2); + RGX_CLIENT_CCB *psClientCCB; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + IMG_UINT32 ui32FWLog2PageSize = DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap); + IMG_UINT32 ui32FWPageSize = (1U << ui32FWLog2PageSize); + IMG_UINT32 ui32NumPages = ui32AllocSize / ui32FWPageSize; + IMG_UINT32 ui32VirtualAllocSize = (1U << ui32CCBMaxSizeLog2); + IMG_UINT32 ui32NumVirtPages = ui32VirtualAllocSize / ui32FWPageSize; + IMG_UINT32 i; + + /* For the allocation request to be valid, at least one page is required. + * This is relevant on systems where the page size is greater than the client CCB size. */ + ui32NumPages = MAX(1, ui32NumPages); + ui32NumVirtPages = MAX(1, ui32NumVirtPages); +#else + PVR_UNREFERENCED_PARAMETER(ui32CCBMaxSizeLog2); +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + + /* All client CCBs should be at-least of the "minimum" size and not to exceed "maximum" */ + if ((ui32CCBSizeLog2 < MIN_SAFE_CCB_SIZE_LOG2) || + (ui32CCBSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s CCB size is invalid (%d). Should be from %d to %d", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32CCBSizeLog2, MIN_SAFE_CCB_SIZE_LOG2, MAX_SAFE_CCB_SIZE_LOG2)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if ((ui32CCBMaxSizeLog2 < ui32CCBSizeLog2) || + (ui32CCBMaxSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s CCB maximum size is invalid (%d). Should be from %d to %d", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32CCBMaxSizeLog2, ui32CCBSizeLog2, MAX_SAFE_CCB_SIZE_LOG2)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#endif + + psClientCCB = OSAllocMem(sizeof(*psClientCCB)); + if (psClientCCB == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + psClientCCB->psServerCommonContext = psServerCommonContext; + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + psClientCCB->ui32VirtualAllocSize = 0; + psClientCCB->pui32MappingTable = NULL; +#endif + + uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + /* If connection data indicates Sync Lockup Recovery (SLR) should be disabled, + * or if the caller has set ui32ContextFlags to disable SLR for this context, + * indicate this in psClientCCB->ui32CCBFlags. + */ + if ((psConnectionData->ui32ClientFlags & SRV_FLAGS_CLIENT_SLR_DISABLED) || + (ui32ContextFlags & RGX_CONTEXT_FLAG_DISABLESLR)) + { + BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + + PDUMPCOMMENT("Allocate RGXFW cCCB"); +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN)) + { + psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize; + + /* + * Growing CCB is doubling the size. Last grow would require only ui32NumVirtPages/2 new pages + * because another ui32NumVirtPages/2 is already allocated. + * Sometimes initial pages count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed. + */ + psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumPages, ui32NumVirtPages/2) * sizeof(IMG_UINT32)); + if (psClientCCB->pui32MappingTable == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_mtable; + } + for (i = 0; i < ui32NumPages; i++) + { + psClientCCB->pui32MappingTable[i] = i; + } + + if (IsPhysmemNewRamBackedByLMA(psDevInfo->psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)) + { + /* + * On LMA sparse memory can't be mapped to kernel. + * To work around this whole ccb memory is allocated at once as contiguous. + */ + eError = DevmemFwAllocate(psDevInfo, + ui32VirtualAllocSize, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + } + else + { + eError = DevmemFwAllocateSparse(psDevInfo, + ui32VirtualAllocSize, + ui32FWPageSize, + ui32NumPages, + ui32NumVirtPages, + psClientCCB->pui32MappingTable, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + } + } + + if (eError != PVRSRV_OK) + { + OSFreeMem(psClientCCB->pui32MappingTable); + psClientCCB->pui32MappingTable = NULL; + psClientCCB->ui32VirtualAllocSize = 0; + } + + if (!BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN) || + (eError != PVRSRV_OK)) +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + { + /* Allocate ui32AllocSize, or the next best POT allocation */ + do + { + eError = DevmemFwAllocate(psDevInfo, + ui32AllocSize, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + if (eError != PVRSRV_OK) + { + /* Failed to allocate - ensure CCB grow is disabled from + * now on for this device. + */ + BITMASK_UNSET(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); + + /* Failed to allocate, try next POT down */ + ui32AllocSize >>= 1; + } + } while ((eError != PVRSRV_OK) && (ui32AllocSize > ui32MinAllocSize)); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate RGX client CCB (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_alloc_ccb; + } + + OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s", + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + (unsigned long) OSGetCurrentClientProcessIDKM(), + (unsigned long) OSGetCurrentClientThreadIDKM(), + OSGetCurrentClientProcessNameKM()); + + if (ui32AllocSize < (1U << ui32CCBSizeLog2)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unable to allocate %d bytes for RGX client CCB (%s) but allocated %d bytes", + __func__, + (1U << ui32CCBSizeLog2), + psClientCCB->szName, + ui32AllocSize)); + } + + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map RGX client CCB (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_map_ccb; + } + + PDUMPCOMMENT("Allocate RGXFW cCCB control"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_CCCB_CTL), + uiClientCCBCtlMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING], + &psClientCCB->psClientCCBCtrlMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate RGX client CCB control (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_alloc_ccbctrl; + } + + + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc, + (void **) &psClientCCB->psClientCCBCtrl); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map RGX client CCB control (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_map_ccbctrl; + } + + psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0; + psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0; + psClientCCB->psClientCCBCtrl->ui32DepOffset = 0; + psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1; + + PDUMPCOMMENT("cCCB control"); + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + 0, + sizeof(RGXFWIF_CCCB_CTL), + PDUMP_FLAGS_CONTINUOUS); + PVR_ASSERT(eError == PVRSRV_OK); + + psClientCCB->ui32HostWriteOffset = 0; + psClientCCB->ui32LastPDumpWriteOffset = 0; + psClientCCB->ui32FinishedPDumpWriteOffset = 0; + psClientCCB->ui32Size = ui32AllocSize; + psClientCCB->ui32LastROff = ui32AllocSize - 1; + psClientCCB->ui32ByteCount = 0; + psClientCCB->ui32LastByteCount = 0; + BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + eError = OSLockCreate(&psClientCCB->hCCBGrowLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create hCCBGrowLock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_create_ccbgrow_lock; + } +#endif +#if defined(DEBUG) + psClientCCB->ui32UpdateEntries = 0; +#endif + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXInitCCBUtilisation(psClientCCB); + psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor; +#endif + eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData, + _RGXCCBPDumpTransition, + psClientCCB, + psDevInfo, + &psClientCCB->hTransition); + if (eError != PVRSRV_OK) + { + goto fail_pdumpreg; + } + + /* + * Note: + * Save the PDump specific structure, which is ref counted unlike + * the connection data, to ensure it's not freed too early + */ + psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData; + PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created", + psClientCCB->szName, + psClientCCB); + + *ppsClientCCB = psClientCCB; + *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; + *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc; + return PVRSRV_OK; + +fail_pdumpreg: +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockDestroy(psClientCCB->hCCBGrowLock); +fail_create_ccbgrow_lock: +#endif + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); +fail_map_ccbctrl: + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); +fail_alloc_ccbctrl: + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); +fail_map_ccb: + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); +#if defined(PVRSRV_ENABLE_CCCB_GROW) +fail_alloc_ccb: + if ( psClientCCB->ui32VirtualAllocSize > 0) + { + OSFreeMem(psClientCCB->pui32MappingTable); + } +fail_alloc_mtable: +#else +fail_alloc_ccb: +#endif + OSFreeMem(psClientCCB); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB) +{ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + if (psClientCCB->sUtilisation.ui32CCBFull) + { + PVR_LOG(("CCBUtilisationInfo: GPU %s command buffer was full %d times out of %d. " + "This is not an error but the application may not run optimally.", + aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT], + psClientCCB->sUtilisation.ui32CCBFull, + psClientCCB->sUtilisation.ui32CCBAcquired)); + } +#endif +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockDestroy(psClientCCB->hCCBGrowLock); +#endif + PDumpUnregisterTransitionCallback(psClientCCB->hTransition); + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if (psClientCCB->pui32MappingTable) + { + OSFreeMem(psClientCCB->pui32MappingTable); + } +#endif + OSFreeMem(psClientCCB); +} + +#if defined(PVRSRV_ENABLE_CCCB_GROW) +static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32AllocPageCount) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); +#endif + + for (i = 0; i < ui32AllocPageCount; i++) + { + psClientCCB->pui32MappingTable[i] = ui32AllocPageCount + i; + } + + /* Double the CCB size (CCB must be POT) by adding ui32AllocPageCount new pages */ + eError = DeviceMemChangeSparse(psClientCCB->psClientCCBMemDesc, + ui32AllocPageCount, + psClientCCB->pui32MappingTable, + 0, + NULL, +#if !defined(PVRSRV_UNMAP_ON_SPARSE_CHANGE) + SPARSE_MAP_CPU_ADDR | +#endif + SPARSE_RESIZE_ALLOC); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to grow RGX client CCB (%s)", + PVRSRVGetErrorString(eError))); + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + if (DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to reacquire CCB mapping")); + psClientCCB->pvClientCCB = NULL; + } +#endif + + return eError; + } + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to map RGX client CCB (%s)", + PVRSRVGetErrorString(eError))); + return eError; + } +#endif + + return PVRSRV_OK; +} +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + +PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize) +{ + IMG_UINT32 ui32FreeSpace; + + /* Check that the CCB can hold this command + padding */ + if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB" + " (%d bytes)", ui32CmdSize, psClientCCB->ui32Size)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + + /* + Check we don't overflow the end of the buffer and make sure we have + enough space for the padding command. If we don't have enough space + (including the minimum amount for the padding command) we need to make + sure we insert a padding command now and wrap before adding the main + command. + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) + { + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Don't allow all the space to be used */ + if (ui32FreeSpace > ui32CmdSize) + { + return PVRSRV_OK; + } + + goto e_retry; + } + else + { + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Check there is space for both the command and the padding command */ + if (ui32FreeSpace > ui32Remain + ui32CmdSize) + { + return PVRSRV_OK; + } + + goto e_retry; + } + +e_retry: +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB, + ui32CmdSize); +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + + return PVRSRV_ERROR_RETRY; +} + +/****************************************************************************** + FUNCTION : RGXAcquireCCB + + PURPOSE : Obtains access to write some commands to a CCB + + PARAMETERS : psClientCCB - The client CCB + ui32CmdSize - How much space is required + ppvBufferSpace - Pointer to space in the buffer + ui32PDumpFlags - Should this be PDump continuous? + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + void **ppvBufferSpace, + IMG_UINT32 ui32PDumpFlags) +{ + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + IMG_UINT32 ui32RetryCount = 2; +#endif + +#if defined(PDUMP) + PVRSRV_ERROR eError; + IMG_BOOL bInCaptureRange; + IMG_BOOL bPdumpEnabled; + IMG_UINT64 ui64PDumpState = 0; + + PDumpGetStateKM(&ui64PDumpState); + PDumpIsCaptureFrameKM(&bInCaptureRange); + bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)); + + /* + PDumpSetFrame will detect as we Transition into capture range for + frame based data but if we are PDumping continuous data then we + need to inform the PDump layer ourselves + */ + if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && PDUMP_IS_CONTINUOUS(ui32PDumpFlags) + && !bInCaptureRange) + { + eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_ENTERED, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif + + /* Check that the CCB can hold this command + padding */ + if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)", + ui32CmdSize, psClientCCB->ui32Size)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + while (ui32RetryCount--) +#endif + { +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + psClientCCB->sUtilisation.ui32CCBAcquired++; +#endif + + /* + Check we don't overflow the end of the buffer and make sure we have + enough space for the padding command. We don't have enough space (including the + minimum amount for the padding command) we will need to make sure we insert a + padding command now and wrap before adding the main command. + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) + { + /* The command can fit without wrapping... */ + IMG_UINT32 ui32FreeSpace; + +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + psClientCCB->ui32Size); +#endif + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Can command fit? */ + if (ui32FreeSpace > ui32CmdSize) + { + *ppvBufferSpace = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + return PVRSRV_OK; + } + /* There is not enough free space in CCB. */ + goto e_retry; + } + else + { + /* + We're at the end of the buffer without enough contiguous space. + The command cannot fit without wrapping, we need to insert a + padding command and wrap. We need to do this in one go otherwise + we would be leaving unflushed commands and forcing the client to + deal with flushing the padding command but not the command they + wanted to write. Therefore we either do all or nothing. + */ + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT32 ui32FreeSpace; + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + /* Check this is a growable CCB */ + if (psClientCCB->ui32VirtualAllocSize > 0) + { + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + /* + * Check if CCB should grow or be wrapped. + * Wrap CCB if there is no need for grow (CCB is half empty) or CCB can't grow, + * and when is free space for command and padding. + */ + if (((ui32FreeSpace > psClientCCB->ui32Size/2) || (psClientCCB->ui32Size == psClientCCB->ui32VirtualAllocSize)) && + (ui32FreeSpace > ui32Remain + ui32CmdSize)) + { + /* Wrap CCB */ + psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; + psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32Remain, + ui32PDumpFlags); + } +#endif + + *ppvBufferSpace = psClientCCB->pvClientCCB; + return PVRSRV_OK; + } + else if ((psClientCCB->ui32Size < psClientCCB->ui32VirtualAllocSize) && + (psClientCCB->ui32HostWriteOffset >= psClientCCB->psClientCCBCtrl->ui32ReadOffset)) + { + /* Grow CCB */ + PVRSRV_ERROR eErr = PVRSRV_OK; + + /* Something went wrong if we are here a second time */ + PVR_ASSERT(ui32RetryCount != 0); + OSLockAcquire(psClientCCB->hCCBGrowLock); + + /* + * On LMA sparse memory can't be mapped to kernel. + * To work around this whole ccb memory was allocated at once as contiguous. + * In such case below sparse change is not needed because memory is already allocated. + */ + if (!IsPhysmemNewRamBackedByLMA(psDevInfo->psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)) + { + IMG_UINT32 ui32FWPageSize = 1U << DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap); + IMG_UINT32 ui32AllocPageCount = psClientCCB->ui32Size / ui32FWPageSize; + + eErr = _RGXCCBMemChangeSparse(psClientCCB, ui32AllocPageCount); + } + + /* Setup new CCB size */ + if (eErr == PVRSRV_OK) + { + psClientCCB->ui32Size += psClientCCB->ui32Size; + } + else + { + PVR_LOG(("%s: Client CCB (%s) grow failed (%s)", __func__, psClientCCB->szName, PVRSRVGetErrorString(eErr))); + OSLockRelease(psClientCCB->hCCBGrowLock); + goto e_retry; + } + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB update for grow"); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32WrapMask), + sizeof(psClientCCB->psClientCCBCtrl->ui32WrapMask), + ui32PDumpFlags); + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + offsetof(RGX_CLIENT_CCB, ui32Size), + sizeof(psClientCCB->ui32Size), + ui32PDumpFlags); + } +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + PVR_LOG(("%s: Client CCB (%s) grew to %u", __func__, psClientCCB->szName, psClientCCB->ui32Size)); + /* Reset counters */ + _RGXInitCCBUtilisation(psClientCCB); +#endif + + /* CCB doubled the size so retry now. */ + OSLockRelease(psClientCCB->hCCBGrowLock); + } + else + { + /* CCB can't grow anymore and can't be wrapped */ +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32Remain, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + 0 /*ui32HostWriteOffset after wrap */, + ui32CmdSize, + psClientCCB->ui32Size); + /* CCB has now space for our command so try wrapping again. Retry now. */ +#else /* defined(PDUMP) */ + goto e_retry; +#endif /* defined(PDUMP) */ + } + } + else +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + { +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32Remain, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + 0 /*ui32HostWriteOffset after wrap */, + ui32CmdSize, + psClientCCB->ui32Size); +#endif + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + if (ui32FreeSpace > ui32Remain + ui32CmdSize) + { + psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; + psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32Remain, + ui32PDumpFlags); + } +#endif + + *ppvBufferSpace = psClientCCB->pvClientCCB; + return PVRSRV_OK; + } + + goto e_retry; + } + } + } +e_retry: +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + psClientCCB->sUtilisation.ui32CCBFull++; + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED, + ui32CmdSize); +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + return PVRSRV_ERROR_RETRY; +} + +/****************************************************************************** + FUNCTION : RGXReleaseCCB + + PURPOSE : Release a CCB that we have been writing to. + + PARAMETERS : psDevData - device data + psCCB - the CCB + + RETURNS : None +******************************************************************************/ +void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_BOOL bInCaptureRange; + IMG_BOOL bPdumpEnabled; + IMG_UINT64 ui64PDumpState = 0; + + PDumpGetStateKM(&ui64PDumpState); + PDumpIsCaptureFrameKM(&bInCaptureRange); + bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)); + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockAcquire(psClientCCB->hCCBGrowLock); +#endif + /* + * If a padding command was needed then we should now move ui32HostWriteOffset + * forward. The command has already be dumped (if bPdumpEnabled). + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size) + { + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + + UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + psClientCCB->ui32ByteCount += ui32Remain; + } + + /* Dump the CCB data */ + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + ui32PDumpFlags); + } + + /* + * Check if there any fences being written that will already be + * satisfied by the last written update command in this CCB. At the + * same time we can ASSERT that all sync addresses are not NULL. + */ +#if defined(DEBUG) + { + void *pvBufferStart = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + void *pvBufferEnd = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset + ui32CmdSize); + IMG_BOOL bMessagePrinted = IMG_FALSE; + + /* Walk through the commands in this section of CCB being released... */ + while (pvBufferStart < pvBufferEnd) + { + RGXFWIF_CCB_CMD_HEADER *psCmdHeader = pvBufferStart; + + if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE) + { + /* If an UPDATE then record the values incase an adjacent fence uses it. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + psClientCCB->ui32UpdateEntries = 0; + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE) + { + psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++; + } + } + } + else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) + { + /* If a FENCE then check the values against the last UPDATE issued. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + + if (bMessagePrinted == IMG_FALSE) + { + RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList; + IMG_UINT32 ui32UpdateIndex; + + for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr)) + { + PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x", + psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value)); + bMessagePrinted = IMG_TRUE; + break; + } + } + else + { + if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr && + psUFOPtr->ui32Value == psUpdatePtr->ui32Value) + { + PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x", + psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); + bMessagePrinted = IMG_TRUE; + break; + } + } + psUpdatePtr++; + } + } + + psUFOPtr++; + } + } + else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR || + psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE) + { + /* For all other UFO ops check the UFO address is not NULL. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + psUFOPtr++; + } + } + + /* Move to the next command in this section of CCB being released... */ + pvBufferStart = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize); + } + } +#endif /* REDUNDANT_SYNCS_DEBUG */ + + /* + * Update the CCB write offset. + */ + UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + psClientCCB->ui32Size); + psClientCCB->ui32ByteCount += ui32CmdSize; + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXUpdateCCBUtilisation(psClientCCB); +#endif + /* + PDumpSetFrame will detect as we Transition out of capture range for + frame based data but if we are PDumping continuous data then we + need to inform the PDump layer ourselves + */ + if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && PDUMP_IS_CONTINUOUS(ui32PDumpFlags) + && !bInCaptureRange) + { + PVRSRV_ERROR eError; + + /* Only Transitioning into capture range can cause an error */ + eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_EXITED, ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + } + + if (bPdumpEnabled) + { + if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) + { + /* Store offset to last finished CCB command. This offset can + * be needed when appending commands to a non finished CCB. + */ + psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset; + } + + /* Update the PDump write offset to show we PDumped this command */ + psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; + } + +#if defined(NO_HARDWARE) + /* + The firmware is not running, it cannot update these; we do here instead. + */ + psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; + psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; +#endif + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psClientCCB->hCCBGrowLock); +#endif +} + +IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB) +{ + return psClientCCB->ui32HostWriteOffset; +} + +IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB) +{ + return psClientCCB->ui32Size-1; +} + +PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Flags) +{ + if ((ui32Flags & RGX_CONTEXT_FLAG_DISABLESLR)) + { + BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + else + { + BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + return PVRSRV_OK; +} + +#define SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL PVR_DBG_ERROR +#define CHECK_COMMAND(cmd, fenceupdate) \ + case RGXFWIF_CCB_CMD_TYPE_##cmd: \ + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, #cmd " command (%d bytes)", psHeader->ui32CmdSize)); \ + bFenceUpdate = fenceupdate; \ + break + +static void _RGXClientCCBDumpCommands(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32ByteCount) +{ +#if defined(SUPPORT_DUMP_CLIENT_CCB_COMMANDS) + void *pvPtr = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, ui32Offset); + IMG_UINT32 ui32ConsumeSize = ui32ByteCount; + + while (ui32ConsumeSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader = pvPtr; + IMG_BOOL bFenceUpdate = IMG_FALSE; + + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "@offset 0x%08lx", IMG_OFFSET_ADDR(pvPtr, -psClientCCB->pvClientCCB)); + switch (psHeader->eCmdType) + { + CHECK_COMMAND(TA, IMG_FALSE); + CHECK_COMMAND(3D, IMG_FALSE); + CHECK_COMMAND(CDM, IMG_FALSE); + CHECK_COMMAND(TQ_3D, IMG_FALSE); + CHECK_COMMAND(TQ_2D, IMG_FALSE); + CHECK_COMMAND(3D_PR, IMG_FALSE); + CHECK_COMMAND(NULL, IMG_FALSE); + CHECK_COMMAND(SHG, IMG_FALSE); + CHECK_COMMAND(RTU, IMG_FALSE); + CHECK_COMMAND(RTU_FC, IMG_FALSE); + CHECK_COMMAND(FENCE, IMG_TRUE); + CHECK_COMMAND(UPDATE, IMG_TRUE); + CHECK_COMMAND(UNFENCED_UPDATE, IMG_FALSE); + CHECK_COMMAND(FENCE_PR, IMG_TRUE); + CHECK_COMMAND(PADDING, IMG_FALSE); + CHECK_COMMAND(TQ_TDM, IMG_FALSE); + default: + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Unknown command!")); + break; + } + pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psHeader)); + if (bFenceUpdate) + { + IMG_UINT32 j; + RGXFWIF_UFO *psUFOPtr = pvPtr; + for (j=0;jui32CmdSize/sizeof(RGXFWIF_UFO);j++) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Addr = 0x%08x, value = 0x%08x", + psUFOPtr[j].puiAddrUFO.ui32Addr, psUFOPtr[j].ui32Value)); + } + } + else + { + IMG_UINT32 *pui32Ptr = pvPtr; + IMG_UINT32 ui32Remain = psHeader->ui32CmdSize/sizeof(IMG_UINT32); + while (ui32Remain) + { + if (ui32Remain >= 4) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x 0x%08x", + pui32Ptr[0], pui32Ptr[1], pui32Ptr[2], pui32Ptr[3])); + pui32Ptr += 4; + ui32Remain -= 4; + } + if (ui32Remain == 3) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x", + pui32Ptr[0], pui32Ptr[1], pui32Ptr[2])); + pui32Ptr += 3; + ui32Remain -= 3; + } + if (ui32Remain == 2) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x", + pui32Ptr[0], pui32Ptr[1])); + pui32Ptr += 2; + ui32Remain -= 2; + } + if (ui32Remain == 1) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x", + pui32Ptr[0])); + pui32Ptr += 1; + ui32Remain -= 1; + } + } + } + pvPtr = IMG_OFFSET_ADDR(pvPtr, psHeader->ui32CmdSize); + ui32ConsumeSize -= sizeof(*psHeader) + psHeader->ui32CmdSize; + } +#else + PVR_UNREFERENCED_PARAMETER(psClientCCB); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32ByteCount); +#endif +} + +void RGXCmdHelperInitCmdCCB_CommandSize(IMG_UINT32 ui32ClientFenceCount, + IMG_UINT32 ui32ClientUpdateCount, + IMG_UINT32 ui32CmdSize, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + IMG_UINT32 ui32FenceCount, ui32UpdateCount, ui32UnfencedUpdateCount; + + /* Init the generated data members */ + psCmdHelperData->ui32ServerFenceCount = 0; + psCmdHelperData->ui32ServerUpdateCount = 0; + psCmdHelperData->ui32ServerUnfencedUpdateCount = 0; + psCmdHelperData->ui32FenceCmdSize = 0; + psCmdHelperData->ui32UpdateCmdSize = 0; + psCmdHelperData->ui32UnfencedUpdateCmdSize = 0; + + /* Work out how many fences and updates this command will have */ + + /* total DM command size (header plus command data) */ + + psCmdHelperData->ui32DMCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* total fence command size (header plus command data) */ + + ui32FenceCount = ui32ClientFenceCount + + psCmdHelperData->ui32ServerFenceCount; + if (ui32FenceCount != 0) + { + psCmdHelperData->ui32FenceCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32FenceCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + + /* Total update command size (header plus command data) */ + + ui32UpdateCount = ui32ClientUpdateCount + + psCmdHelperData->ui32ServerUpdateCount; + if (ui32UpdateCount != 0) + { + psCmdHelperData->ui32UpdateCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32UpdateCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + + /* total unfenced update command size (header plus command data) */ + + ui32UnfencedUpdateCount = psCmdHelperData->ui32ServerUnfencedUpdateCount; + if (ui32UnfencedUpdateCount != 0) + { + psCmdHelperData->ui32UnfencedUpdateCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32UnfencedUpdateCount * + sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } +} + +/* + Work out how much space this command will require +*/ +void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + /* Job reference values */ + psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef; + psCmdHelperData->ui32IntJobRef = ui32IntJobRef; + + /* Save the data we require in the submit call */ + psCmdHelperData->psClientCCB = psClientCCB; +#if defined(PDUMP) + psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags; +#endif + psCmdHelperData->pszCommandName = pszCommandName; + if (bCCBStateOpen) + { + BIT_SET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + } + else + { + BIT_UNSET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + } + + /* Client sync data */ + psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount; + psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress; + psCmdHelperData->paui32FenceValue = paui32FenceValue; + psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount; + psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress; + psCmdHelperData->paui32UpdateValue = paui32UpdateValue; + + /* Command data */ + psCmdHelperData->ui32CmdSize = ui32CmdSize; + psCmdHelperData->pui8DMCmd = pui8DMCmd; + psCmdHelperData->eType = eType; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "%s Command Server Init on FWCtx %08x", pszCommandName, + FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Workload Data added */ + psCmdHelperData->psWorkEstKickData = psWorkEstKickData; +#endif +} + +/* + Work out how much space this command will require +*/ +void RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + RGXCmdHelperInitCmdCCB_OtherData(psClientCCB, + ui32ClientFenceCount, + pauiFenceUFOAddress, + paui32FenceValue, + ui32ClientUpdateCount, + pauiUpdateUFOAddress, + paui32UpdateValue, + ui32CmdSize, + pui8DMCmd, + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + psWorkEstKickData, + pszCommandName, + bCCBStateOpen, + psCmdHelperData); + + RGXCmdHelperInitCmdCCB_CommandSize(ui32ClientFenceCount, + ui32ClientUpdateCount, + ui32CmdSize, + psCmdHelperData); +} + +/* + Reserve space in the CCB and fill in the command and client sync data +*/ +PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) +{ + const IMG_UINT32 ui32MaxUFOCmdSize = RGX_CCB_FWALLOC_ALIGN((RGXFWIF_CCB_CMD_MAX_UFOS * sizeof(RGXFWIF_UFO)) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; + void *pvStartPtr; + PVRSRV_ERROR eError; + + /* + Check the number of fences & updates are valid. + */ + for (i = 0; i < ui32CmdCount; i++) + { + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i]; + + if (psCmdHelperData->ui32FenceCmdSize > ui32MaxUFOCmdSize || + psCmdHelperData->ui32UpdateCmdSize > ui32MaxUFOCmdSize || + psCmdHelperData->ui32UnfencedUpdateCmdSize > ui32MaxUFOCmdSize) + { + return PVRSRV_ERROR_TOO_MANY_SYNCS; + } + } + + /* + Work out how much space we need for all the command(s) + */ + ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); + +#if defined(PDUMP) + for (i = 0; i < ui32CmdCount; i++) + { + if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d", + __func__, + PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", + PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", + ui32CmdCount)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +#endif + + /* + Acquire space in the CCB for all the command(s). + */ + eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB, + ui32AllocSize, + &pvStartPtr, + asCmdHelperData[0].ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + return eError; + } + + /* + For each command fill in the fence, DM, and update command + + Note: + We only fill in the client fences here, the server fences (and updates) + will be filled in together at the end. This is because we might fail the + kernel CCB alloc and would then have to rollback the server syncs if + we took the operation here + */ + for (i = 0; i < ui32CmdCount; i++) + { + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i]; + void *pvCmdPtr; + void *pvServerFenceStart = NULL; + void *pvServerUpdateStart = NULL; +#if defined(PDUMP) + IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr; + IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext)); +#endif + + if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) + { + PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + + /* + Create the fence command. + */ + if (psCmdHelperData->ui32FenceCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT k, uiNextValueIndex; + + /* Fences are at the start of the command */ + pvCmdPtr = pvStartPtr; + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE; + + psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* Fill in the client fences */ + uiNextValueIndex = 0; + for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++) + { + RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + + psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k]; + + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { + /* Only increment uiNextValueIndex for non sync checkpoints + * (as paui32FenceValue only contains values for sync prims) + */ + psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++]; + } + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + +#if defined(SYNC_COMMAND_DEBUG) + PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); +#endif + PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + + + } + pvServerFenceStart = pvCmdPtr; + } + + /* jump over the Server fences */ + pvCmdPtr = IMG_OFFSET_ADDR(pvStartPtr, psCmdHelperData->ui32FenceCmdSize); + + /* + Create the DM command + */ + if (psCmdHelperData->ui32DMCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + + psHeader = pvCmdPtr; + psHeader->eCmdType = psCmdHelperData->eType; + + psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (psCmdHelperData->psWorkEstKickData != NULL && + psCmdHelperData->eType != RGXFWIF_CCB_CMD_TYPE_NULL) + { + PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_GEOM || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_CDM || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TQ_TDM); + psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData; + } + else + { + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; + } +#endif + + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* The buffer is write-combine, so no special device memory treatment required. */ + OSCachedMemCopy(pvCmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32CmdSize); + } + + + /* + Create the update command. + + Note: + We only fill in the client updates here, the server updates (and fences) + will be filled in together at the end + */ + if (psCmdHelperData->ui32UpdateCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT k, uiNextValueIndex; + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE; + psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* Fill in the client updates */ + uiNextValueIndex = 0; + for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++) + { + RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + + psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k]; + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { + /* Only increment uiNextValueIndex for non sync checkpoints + * (as paui32UpdateValue only contains values for sync prims) + */ + psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++]; + } + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + +#if defined(SYNC_COMMAND_DEBUG) + PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); +#endif + PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + + } + pvServerUpdateStart = pvCmdPtr; + } + + /* Save the server sync fence & update offsets for submit time */ + psCmdHelperData->pui8ServerFenceStart = pvServerFenceStart; + psCmdHelperData->pui8ServerUpdateStart = pvServerUpdateStart; + + /* jump over the fenced update */ + if (psCmdHelperData->ui32UnfencedUpdateCmdSize != 0) + { + RGXFWIF_CCB_CMD_HEADER * const psHeader = IMG_OFFSET_ADDR(psCmdHelperData->pui8ServerUpdateStart, psCmdHelperData->ui32UpdateCmdSize); + /* header should not be zero but check for code analysis */ + if (unlikely(psHeader == NULL)) + { + return PVRSRV_ERROR_MEMORY_ACCESS; + } + /* set up the header for unfenced updates */ + PVR_ASSERT(psHeader); /* Could be zero if ui32UpdateCmdSize is 0 which is never expected */ + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE; + psHeader->ui32CmdSize = psCmdHelperData->ui32UnfencedUpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + + /* jump over the header */ + psCmdHelperData->pui8ServerUnfencedUpdateStart = IMG_OFFSET_ADDR(psHeader, sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + else + { + psCmdHelperData->pui8ServerUnfencedUpdateStart = NULL; + } + + /* Save start for sanity checking at submit time */ + psCmdHelperData->pui8StartPtr = pvStartPtr; + + /* Set the start pointer for the next iteration around the loop */ + pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr, + psCmdHelperData->ui32FenceCmdSize + + psCmdHelperData->ui32DMCmdSize + + psCmdHelperData->ui32UpdateCmdSize + + psCmdHelperData->ui32UnfencedUpdateCmdSize); + + if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) + { + PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + else + { + PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + } + + return PVRSRV_OK; +} + +/* + Fill in the server syncs data and release the CCB space +*/ +void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + const IMG_CHAR *pcszDMName, + IMG_UINT32 ui32CtxAddr) +{ + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; +#if defined(LINUX) + IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced(); + IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced(); +#endif + + /* + Work out how much space we need for all the command(s) + */ + ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); + /* + For each command fill in the server sync info + */ + for (i=0;ipszCommandName, + pcszDMName, + ui32CtxAddr, + psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, + psCmdHelperData->ui32ClientFenceCount, + psCmdHelperData->pauiFenceUFOAddress, + psCmdHelperData->paui32FenceValue); + } + if (bTraceUpdates) + { + trace_rogue_fence_updates(psCmdHelperData->pszCommandName, + pcszDMName, + ui32CtxAddr, + psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, + psCmdHelperData->ui32ClientUpdateCount, + psCmdHelperData->pauiUpdateUFOAddress, + psCmdHelperData->paui32UpdateValue); + } +#endif + /* + All the commands have been filled in so release the CCB space. + The FW still won't run this command until we kick it + */ + PDUMPCOMMENTWITHFLAGS(psCmdHelperData->ui32PDumpFlags, + "%s Command Server Release on FWCtx %08x", + psCmdHelperData->pszCommandName, ui32CtxAddr); + } + + _RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB, + asCmdHelperData[0].psClientCCB->ui32HostWriteOffset, + ui32AllocSize); + + RGXReleaseCCB(asCmdHelperData[0].psClientCCB, + ui32AllocSize, + asCmdHelperData[0].ui32PDumpFlags); + + BIT_UNSET(asCmdHelperData[0].psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); +} + +IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) +{ + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; + + /* + Work out how much space we need for all the command(s) + */ + for (i = 0; i < ui32CmdCount; i++) + { + ui32AllocSize += + asCmdHelperData[i].ui32FenceCmdSize + + asCmdHelperData[i].ui32DMCmdSize + + asCmdHelperData[i].ui32UpdateCmdSize + + asCmdHelperData[i].ui32UnfencedUpdateCmdSize; + } + + return ui32AllocSize; +} + +/* Work out how much of an offset there is to a specific command. */ +IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + IMG_UINT32 ui32Cmdindex) +{ + IMG_UINT32 ui32Offset = 0; + IMG_UINT32 i; + + for (i = 0; i < ui32Cmdindex; i++) + { + ui32Offset += + asCmdHelperData[i].ui32FenceCmdSize + + asCmdHelperData[i].ui32DMCmdSize + + asCmdHelperData[i].ui32UpdateCmdSize + + asCmdHelperData[i].ui32UnfencedUpdateCmdSize; + } + + return ui32Offset; +} + +/* Returns the offset of the data master command from a write offset */ +IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + return psCmdHelperData->ui32FenceCmdSize; +} + +static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType) +{ + switch (cmdType) + { + case RGXFWIF_CCB_CMD_TYPE_GEOM: return "TA"; + case RGXFWIF_CCB_CMD_TYPE_3D: return "3D"; + case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM"; + case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D"; + case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D"; + case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR"; + case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL"; + case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM"; + case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE"; + case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR"; + case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY"; + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING"; + + default: + PVR_ASSERT(IMG_FALSE); + break; + } + + return "INVALID"; +} + +PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM) +{ + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; + IMG_UINT32 ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff, ui32WrapMask; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psCurrentClientCCB == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + /* If CCB grow is enabled, take the lock while sampling offsets + * (to guard against a grow happening mid-sample) + */ + OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); +#endif + /* NB. use psCurrentClientCCB->ui32Size as basis for wrap mask (rather than psClientCCBCtrl->ui32WrapMask) + * as if CCB grow happens, psCurrentClientCCB->ui32Size will have been updated but + * psClientCCBCtrl->ui32WrapMask is only updated once the firmware sees the CCB has grown. + * If we use the wrong value, we might incorrectly determine that the offsets are invalid. + */ + ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); + psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; + ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset; + ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psCurrentClientCCB->hCCBGrowLock); +#endif + + if (ui32SampledRdOff > ui32WrapMask || + ui32SampledDpOff > ui32WrapMask || + ui32SampledWrOff > ui32WrapMask) + { + PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)", + ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff)); + return PVRSRV_ERROR_INVALID_OFFSET; + } + + if (ui32SampledRdOff != ui32SampledWrOff && + psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff && + ui32SampledRdOff == psCurrentClientCCB->ui32LastROff && + (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; + + /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle) */ + if ((psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_ON) && + psDevInfo->ui32SLRHoldoffCounter == 0) + { + static __maybe_unused const char *pszStalledAction = +#if defined(PVRSRV_STALLED_CCB_ACTION) + "force"; +#else + "warn"; +#endif + /* Don't log this by default unless debugging since a higher up + * function will log the stalled condition. Helps avoid double + * messages in the log. + */ + PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"", + __func__, pszStalledAction, ui32SampledRdOff, + ui32SampledDpOff, ui32SampledWrOff, + psCurrentClientCCB->szName)); + eError = PVRSRV_ERROR_CCCB_STALLED; + + { + void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); + + /* Special case - if readOffset is on a PADDING packet, CCB has wrapped. + * In this case, skip over the PADDING packet. + */ + if (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_PADDING) + { + psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, + ((ui32SampledRdOff + + psCommandHeader->ui32CmdSize + + sizeof(RGXFWIF_CCB_CMD_HEADER)) + & psCurrentClientCCB->psClientCCBCtrl->ui32WrapMask)); + } + + /* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could + * take a long time to complete, during which time the CCB ptrs would not advance. + */ + if (((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) || + (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) && + (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff))) + { + /* Acquire the cCCB recovery lock */ + OSLockAcquire(psDevInfo->hCCBRecoveryLock); + + if (!psDevInfo->pvEarliestStalledClientCCB) + { + psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; + psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; + } + else + { + /* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking + * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes + * our preferred fence to be unblocked/ + */ + if ((psCommandHeader->ui32IntJobRef < psDevInfo->ui32OldestSubmissionOrdinal) && + ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32IntJobRef) < 0x8000000)) + { + psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; + psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; + } + } + + /* Release the cCCB recovery lock */ + OSLockRelease(psDevInfo->hCCBRecoveryLock); + } + } + } + } + + psCurrentClientCCB->ui32LastROff = ui32SampledRdOff; + psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff; + psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount; + + return eError; +} + +void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; + void *pvClientCCBBuff; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32DepOffset; + IMG_UINT32 ui32EndOffset; + IMG_UINT32 ui32WrapMask; + IMG_CHAR * pszState = "Ready"; + + /* Ensure hCCBGrowLock is acquired before reading + * psCurrentClientCCB->pvClientCCB as a CCB grow + * could remap the virtual addresses. + */ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); +#endif + psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset; + OSMemoryBarrier(); + ui32Offset = psClientCCBCtrl->ui32ReadOffset; + ui32DepOffset = psClientCCBCtrl->ui32DepOffset; + /* NB. Use psCurrentClientCCB->ui32Size as basis for wrap mask (rather + * than psClientCCBCtrl->ui32WrapMask) as if CCB grow happened, + * psCurrentClientCCB->ui32Size will have been updated but + * psClientCCBCtrl->ui32WrapMask is only updated once the firmware + * sees the CCB has grown. If we use the wrong value, ui32NextOffset + * can end up being wrapped prematurely and pointing to garbage. + */ + ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); + + PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr, psCurrentClientCCB->szName); + if (ui32Offset == ui32EndOffset) + { + PVR_DUMPDEBUG_LOG(" `--"); + } + + while (ui32Offset != ui32EndOffset) + { + RGXFWIF_CCB_CMD_HEADER *psCmdHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset); + IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask; + IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE; + IMG_BOOL bLastUFO; + #define CCB_SYNC_INFO_LEN 80 + IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN]; + IMG_UINT32 ui32NoOfUpdates, i; + RGXFWIF_UFO *psUFOPtr; + + ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + psUFOPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER)); + pszSyncInfo[0] = '\0'; + + if (ui32Offset == ui32DepOffset) + { + pszState = "Waiting"; + } + + PVR_DUMPDEBUG_LOG(" %s--%s %s @ %u Int=%u Ext=%u", + bLastCommand? "`": "|", + pszState, _CCBCmdTypename(psCmdHeader->eCmdType), + ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef + ); + + /* switch on type and write checks and updates */ + switch (psCmdHeader->eCmdType) + { + case RGXFWIF_CCB_CMD_TYPE_UPDATE: + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: + case RGXFWIF_CCB_CMD_TYPE_FENCE: + case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: + { + for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) + { + bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + else + { + SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + } + + PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val=0x%08x %s", + bLastCommand? " ": "|", + bLastUFO? "`": "|", + psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value, + pszSyncInfo + ); + } + break; + } + + default: + break; + } + ui32Offset = ui32NextOffset; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psCurrentClientCCB->hCCBGrowLock); +#endif +} + +void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + volatile void *pvPtr; + IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; + IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; + IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; + + pvPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + + if ((ui32SampledRdOff == ui32SampledDepOff) && + (ui32SampledRdOff != ui32SampledWrOff)) + { + volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; + volatile void *pvPtr = psCommandHeader; + + /* CCB is stalled on a fence... */ + if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) + { +#if defined(SUPPORT_EXTRA_METASP_DEBUG) + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); + IMG_UINT32 ui32Val; +#endif + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); + IMG_UINT32 jj; + + /* Display details of the fence object on which the context is pending */ + PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:", + sFWCommonContext.ui32Addr, + ui32SampledRdOff, + psCurrentClientCCB->szName, + _CCBCmdTypename(eCommandType)); + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { +#if !defined(SUPPORT_EXTRA_METASP_DEBUG) + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); +#else + ui32Val = 0; + RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, ui32Val); +#endif + } + + /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */ + pvPtr = IMG_OFFSET_ADDR(psUFOPtr, psCommandHeader->ui32CmdSize); + psCommandHeader = pvPtr; + if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType)); + /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */ + pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize); + psCommandHeader = pvPtr; + /* If the next command is an update, display details of that so we can see what would then become unblocked */ + if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) + { + eCommandType = psCommandHeader->eCmdType; + + if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE) + { + psUFOPtr = IMG_OFFSET_ADDR(psCommandHeader, sizeof(*psCommandHeader)); + PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType)); + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { +#if !defined(SUPPORT_EXTRA_METASP_DEBUG) + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); +#else + ui32Val = 0; + RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, + ui32Val); +#endif + } + } + } + else + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); + } + } + else + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); + } + } + } +} + +void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_CLIENT_CCB *psStalledClientCCB; + + PVR_ASSERT(psDevInfo); + + psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB; + + if (psStalledClientCCB) + { + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl; + IMG_UINT32 ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset; + void *pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset); + RGXFWIF_CCB_CMD_HEADER *psCommandHeader = pvPtr; + RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; + + if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) + { + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); + IMG_UINT32 jj; + IMG_UINT32 ui32NumUnsignalledUFOs = 0; + IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNCS]; + +#if defined(PVRSRV_STALLED_CCB_ACTION) + if (!psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName[0]) + { + OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui64Timestamp); + psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); + psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; + OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName, + psStalledClientCCB->szName, + MAX_CLIENT_CCB_NAME); + } + else + { + OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui64Timestamp); + psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); + psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; + OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName, + psStalledClientCCB->szName, + MAX_CLIENT_CCB_NAME); + psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp = (psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES; + } + psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested++; +#endif + PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs", + FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr, + psStalledClientCCB->szName, ui32SampledDepOffset, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)))); + + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj])) + { + IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode, + psUFOPtr[jj].puiAddrUFO.ui32Addr); + PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, + ui32ReadValue)); + /* If fence is unmet, dump debug info on it */ + if (ui32ReadValue != psUFOPtr[jj].ui32Value) + { + /* Add to our list to pass to pvr_sync */ + ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr; + ui32NumUnsignalledUFOs++; + } + } + else + { + PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x", jj+1, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value)); + } + } +#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) + if (ui32NumUnsignalledUFOs > 0) + { + IMG_UINT32 ui32NumSyncsOwned; + PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned); + + PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed."); + } +#endif +#if defined(PVRSRV_STALLED_CCB_ACTION) + if (BIT_ISSET(psStalledClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED)) + { + PRGXFWIF_FWCOMMONCONTEXT psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); + + PVR_LOG(("SLR disabled for FWCtx 0x%08X", psContext.ui32Addr)); + } + else + { + if (ui32NumUnsignalledUFOs > 0) + { + RGXFWIF_KCCB_CMD sSignalFencesCmd; + + sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE; + sSignalFencesCmd.ui32KCCBFlags = 0; + sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); + sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledDepOffset; + + PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr)); + + RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext), + RGXFWIF_DM_GP, + &sSignalFencesCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } +#endif + } + psDevInfo->pvEarliestStalledClientCCB = NULL; + } +} + +/****************************************************************************** + End of file (rgxccb.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.h new file mode 100644 index 000000000000..fdb9d3bbd0e2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxccb.h @@ -0,0 +1,333 @@ +/*************************************************************************/ /*! +@File +@Title RGX Circular Command Buffer functionality. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX Circular Command Buffer functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXCCB_H__) +#define __RGXCCB_H__ + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "sync_server.h" +#include "connection_server.h" +#include "rgxdebug.h" +#include "rgxdefs_km.h" +#include "pvr_notifier.h" + +#define MAX_CLIENT_CCB_NAME 30 +#define SYNC_FLAG_MASK_ALL IMG_UINT32_MAX + +/* + * This size is to be used when a client CCB is found to consume very + * negligible space (e.g. a few hundred bytes to few KBs - less than a page). + * In such a case, instead of allocating CCB of size of only a few KBs, we + * allocate at-least this much to be future risk-free. + */ +#define MIN_SAFE_CCB_SIZE_LOG2 13 /* 8K (2 Pages) */ +#define MAX_SAFE_CCB_SIZE_LOG2 18 /* 256K (64 Pages) */ + +#define RGX_TQ3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D +static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TQ3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is invalid"); +#define RGX_TQ3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D +static_assert(RGX_TQ3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D + && RGX_TQ3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D max CCB size is invalid"); + +#define RGX_TQ2D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D +static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TQ2D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is invalid"); +#define RGX_TQ2D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D +static_assert(RGX_TQ2D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D && + RGX_TQ2D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D max CCB size is invalid"); + +#define RGX_CDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM +static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_CDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM CCB size is invalid"); +#define RGX_CDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM +static_assert(RGX_CDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM && + RGX_CDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM max CCB size is invalid"); + +#define RGX_TA_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA +static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TA_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA CCB size is invalid"); +#define RGX_TA_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA +static_assert(RGX_TA_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA && + RGX_TA_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA max CCB size is invalid"); + +#define RGX_3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D +static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D CCB size is invalid"); +#define RGX_3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D +static_assert(RGX_3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D && + RGX_3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D max CCB size is invalid"); + +#define RGX_KICKSYNC_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC +static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_KICKSYNC_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is invalid"); +#define RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC +static_assert(RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC && + RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync max CCB size is invalid"); + +#define RGX_TDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM +static_assert(RGX_TDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM CCB size is invalid"); +#define RGX_TDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM +static_assert(RGX_TDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM && + RGX_TDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM max CCB size is invalid"); + +typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB; + +/* + This structure is declared here as it's allocated on the heap by + the callers +*/ + +typedef struct _RGX_CCB_CMD_HELPER_DATA_ { + /* Data setup at command init time */ + RGX_CLIENT_CCB *psClientCCB; + IMG_CHAR *pszCommandName; + IMG_UINT32 ui32PDumpFlags; + + IMG_UINT32 ui32ClientFenceCount; + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress; + IMG_UINT32 *paui32FenceValue; + IMG_UINT32 ui32ClientUpdateCount; + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress; + IMG_UINT32 *paui32UpdateValue; + RGXFWIF_CCB_CMD_TYPE eType; + IMG_UINT32 ui32CmdSize; + IMG_UINT8 *pui8DMCmd; + IMG_UINT32 ui32FenceCmdSize; + IMG_UINT32 ui32DMCmdSize; + IMG_UINT32 ui32UpdateCmdSize; + IMG_UINT32 ui32UnfencedUpdateCmdSize; + + /* Data setup at command acquire time */ + IMG_UINT8 *pui8StartPtr; + IMG_UINT8 *pui8ServerUpdateStart; + IMG_UINT8 *pui8ServerUnfencedUpdateStart; + IMG_UINT8 *pui8ServerFenceStart; + IMG_UINT32 ui32ServerFenceCount; + IMG_UINT32 ui32ServerUpdateCount; + IMG_UINT32 ui32ServerUnfencedUpdateCount; + + /* Job reference fields */ + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32IntJobRef; + + /* Workload kick information */ + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData; + +} RGX_CCB_CMD_HELPER_DATA; + +#define PADDING_COMMAND_SIZE (sizeof(RGXFWIF_CCB_CMD_HEADER)) + + +#define RGX_CCB_REQUESTORS(TYPE) \ + /* for debugging purposes */ TYPE(UNDEF) \ + TYPE(TA) \ + TYPE(3D) \ + TYPE(CDM) \ + TYPE(SH) \ + TYPE(RS) \ + TYPE(TQ_3D) \ + TYPE(TQ_2D) \ + TYPE(TQ_TDM) \ + TYPE(KICKSYNC) \ + +/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as + an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere + to the following build assert. +*/ +typedef enum _RGX_CCB_REQUESTOR_TYPE_ +{ +#define CONSTRUCT_ENUM(req) REQ_TYPE_##req, + RGX_CCB_REQUESTORS (CONSTRUCT_ENUM) +#undef CONSTRUCT_ENUM + + /* should always be at the end */ + REQ_TYPE_TOTAL_COUNT, +} RGX_CCB_REQUESTOR_TYPE; + +/* Tuple describing the columns of the following table */ +typedef enum _RGX_CCB_REQUESTOR_TUPLE_ +{ + REQ_RGX_FW_CLIENT_CCB_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */ + REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */ + REQ_PDUMP_COMMENT, /* Index to comment to be dumped in PDUMPs */ + + /* should always be at the end */ + REQ_TUPLE_CARDINALITY, +} RGX_CCB_REQUESTOR_TUPLE; + +/* Unpack U8 values from U32. */ +#define U32toU8_Unpack1(U32Packed) (U32Packed & 0xFF) +#define U32toU8_Unpack2(U32Packed) ((U32Packed>>8) & 0xFF) +#define U32toU8_Unpack3(U32Packed) ((U32Packed>>16) & 0xFF) +#define U32toU8_Unpack4(U32Packed) ((U32Packed>>24) & 0xFF) + +/* Defines for bit meanings within the ui32CCBFlags member of struct _RGX_CLIENT_CCB_ + * + * ( X = taken/in use, - = available/unused ) + * + * 31 10 + * | || + * ------------------------------XX + * Bit Meaning + * 0 = If set, CCB is still open and commands will be appended to it + * 1 = If set, do not perform Sync Lockup Recovery (SLR) for this CCB + */ +#define CCB_FLAGS_CCB_STATE_OPEN (0) /*!< This bit is set to indicate CCB is in the 'Open' state. */ +#define CCB_FLAGS_SLR_DISABLED (1) /*!< This bit is set to disable Sync Lockup Recovery (SLR) for this CCB. */ + + +/* Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in + this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for + use in other modules. +*/ +extern const IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY]; + +PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CCBSizeLog2, + IMG_UINT32 ui32CCBMaxSizeLog2, + IMG_UINT32 ui32ContextFlags, + CONNECTION_DATA *psConnectionData, + RGX_CCB_REQUESTOR_TYPE eCCBRequestor, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGX_CLIENT_CCB **ppsClientCCB, + DEVMEM_MEMDESC **ppsClientCCBMemDesc, + DEVMEM_MEMDESC **ppsClientCCBCtlMemDesc); + +void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB); + +PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize); + +PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + void **ppvBufferSpace, + IMG_UINT32 ui32PDumpFlags); + +void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + IMG_UINT32 ui32PDumpFlags); + +IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB); +IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB); + +PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Flags); + +void RGXCmdHelperInitCmdCCB_CommandSize(IMG_UINT32 ui32ClientFenceCount, + IMG_UINT32 ui32ClientUpdateCount, + IMG_UINT32 ui32CmdSize, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); + +void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + const IMG_CHAR *pcszDMName, + IMG_UINT32 ui32CtxAddr); + +IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); + +IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + IMG_UINT32 ui32Cmdindex); + +IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM); + +void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif /* __RGXCCB_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.c new file mode 100644 index 000000000000..2d90a3c8ff83 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.c @@ -0,0 +1,1125 @@ +/*************************************************************************/ /*! +@File +@Title RGX Compute routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Compute routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "srvkm.h" +#include "pdump_km.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxcompute.h" +#include "rgx_bvnc_defs_km.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "rgxccb.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "rgx_memallocflags.h" + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" + +#define HASH_CLEAN_LIMIT 6 +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_CMP_UFO_DUMP 0 + +//#define CMP_CHECKPOINT_DEBUG 1 + +#if defined(CMP_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +struct _RGX_SERVER_COMPUTE_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + DEVMEM_MEMDESC *psFWComputeContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + DEVMEM_MEMDESC *psFWComputeContextStateMemDesc; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +}; + +PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pbyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticComputecontextStateSize, + IMG_PBYTE pStaticComputecontextState, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext; + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; + IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; + + /* Prepare cleanup struct */ + *ppsComputeContext = NULL; + + psComputeContext = OSAllocZMem(sizeof(*psComputeContext)); + if (psComputeContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + Create the FW compute context, this has the CDM common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWCOMPUTECONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwComputeContext", + &psComputeContext->psFWComputeContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwcomputecontext; + } + + eError = OSLockCreate(&psComputeContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_createlock; + } + + psComputeContext->psDeviceNode = psDeviceNode; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware compute context suspend state"); + + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_COMPUTECTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwComputeContextState", + &psComputeContext->psFWComputeContextStateMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%d)", + __func__, + eError)); + goto fail_contextsuspendalloc; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData); +#endif + + /* + * Create the FW framework buffer + */ + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psComputeContext->psFWFrameworkMemDesc, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%d)", + __func__, + eError)); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psComputeContext->psFWFrameworkMemDesc, + pbyFrameworkCommand, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + + sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc; + + ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); + ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_CDM, + RGXFWIF_DM_CDM, + psComputeContext->psFWComputeContextMemDesc, + offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), + psFWMemContextMemDesc, + psComputeContext->psFWComputeContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psComputeContext->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, + (void **)&psFWComputeContext); + if (eError != PVRSRV_OK) + { + goto fail_acquire_cpu_mapping; + } + + OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputecontextState, ui32StaticComputecontextStateSize); + DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); + + SyncAddrListInit(&psComputeContext->sSyncAddrListFence); + SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); + dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); + } + + *ppsComputeContext = psComputeContext; + return PVRSRV_OK; + +fail_acquire_cpu_mapping: + FWCommonContextFree(psComputeContext->psServerCommonContext); +fail_contextalloc: +fail_frameworkcopy: + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); +fail_frameworkcreate: + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc); +fail_contextsuspendalloc: + OSLockDestroy(psComputeContext->hLock); +fail_createlock: + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); +fail_fwcomputecontext: + OSFreeMem(psComputeContext); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; +#endif + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode, + psComputeContext->psServerCommonContext, + RGXFWIF_DM_CDM, + PDUMP_FLAGS_NONE); + + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, + (void **)&psFWComputeContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware compute context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)); + + return PVRSRV_ERROR_RETRY; + } +#endif + + /* ... it has so we can free its resources */ + + OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); + dllist_remove_node(&(psComputeContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData); +#endif + + FWCommonContextFree(psComputeContext->psServerCommonContext); + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); + + OSLockDestroy(psComputeContext->hLock); + OSFreeMem(psComputeContext); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32NumWorkgroups, + IMG_UINT32 ui32NumWorkitems, + IMG_UINT64 ui64DeadlineInus) +{ + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + IMG_UINT32 ui32CDMCmdOffset = 0; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT32 ui32FWCtx; + IMG_BOOL bCCBStateOpen = IMG_FALSE; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0}; + IMG_UINT32 ui32CDMWorkloadDataRO = 0; + IMG_UINT32 ui32CDMCmdHeaderOffset = 0; + IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + + IMG_UINT32 ui32IntClientFenceCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL; + IMG_UINT32 ui32IntClientUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 *paui32IntUpdateValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Ensure the string is null-terminated (Required for safety) */ + pszUpdateFenceName[31] = '\0'; + + OSLockAcquire(psComputeContext->hLock); + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + + ui32IntClientUpdateCount = ui32ClientUpdateCount; + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); + goto fail_resolve_input_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 ii; + for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode, + pszUpdateFenceName, + iUpdateTimeline, + psComputeContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError)); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); + /* Now append the timeline sync prim addr to the compute context update list */ + SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue)); + for (iii=0; iii", __func__, iii, (void*)pui32Tmp)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); + +#if (ENABLE_CMP_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups; + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems; + + /* Prepare workload estimation */ + WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice, + &psComputeContext->sWorkEstData, + &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, + RGXFWIF_CCB_CMD_TYPE_CDM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataCompute); +#endif + + RGXCmdHelperInitCmdCCB(psClientCCB, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32CmdSize, + pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE_CDM, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataCompute, +#else + NULL, +#endif + "Compute", + bCCBStateOpen, + asCmdHelperData); + + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + if (eError != PVRSRV_OK) + { + goto fail_cmdaquire; + } + + + /* + We should reserve space in the kernel CCB here and fill in the command + directly. + This is so if there isn't space in the kernel CCB we can return with + retry back to services client before we take any operations + */ + + /* + We might only be kicking for flush out a padding packet so only submit + the command if the create was successful + */ + if (eError == PVRSRV_OK) + { + /* + All the required resources are ready at this point, we can't fail so + take the required server sync operations and commit all the resources + */ + + ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); + RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); + + ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck) + { + ui32CDMWorkloadDataRO = ui32CDMCmdOffset; + } + else + { + ui32CDMWorkloadDataRO = 0; + } +#endif + + /* Construct the kernel compute CCB command. */ + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset; +#else + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr; + + HTBLOGK(HTB_SF_MAIN_KICK_CDM, + sCmpKCCBCmd.uCmdData.sCmdKickData.psContext, + ui32CDMCmdOffset + ); + RGXSRV_HWPERF_ENQ(psComputeContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_CDM, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + /* + * Submit the compute command to the firmware. + */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + RGXFWIF_DM_CDM, + &sCmpKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed to schedule kernel CCB command (%s)", + __func__, + PVRSRVGetErrorString(eError2))); + } + else + { + PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_CDM); + } + /* + * Now check eError (which may have returned an error from our earlier call + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdaquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + + *piUpdateFence = iUpdateFence; + + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, pszUpdateFenceName); + } + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psComputeContext->hLock); + + return PVRSRV_OK; + +fail_cmdaquire: + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); +fail_resolve_input_fence: + +err_populate_sync_addr_list: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + OSLockRelease(psComputeContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + RGXFWIF_KCCB_CMD sFlushCmd; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush"); +#endif + sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE; + sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE; + sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); + + OSLockAcquire(psComputeContext->hLock); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + RGXFWIF_DM_CDM, + &sFlushCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule SLC flush command (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Compute flush aborted (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & + RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + } + + OSLockRelease(psComputeContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) && + 2 == RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT)) + { + + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + OSLockAcquire(psComputeContext->hLock); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; + sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + RGXFWIF_DM_CDM, + &sKCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, + eError, + PVRSRVGETERRORSTRING(eError))); + } + + OSLockRelease(psComputeContext->hLock); + return eError; + }else + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } +} + + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + OSLockAcquire(psComputeContext->hLock); + + eError = ContextSetPriority(psComputeContext->psServerCommonContext, + psConnection, + psComputeContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_CDM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError))); + } + + OSLockRelease(psComputeContext->hLock); + return eError; +} + +/* + * PVRSRVRGXGetLastComputeContextResetReasonKM + */ +PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef) +{ + PVR_ASSERT(psComputeContext != NULL); + PVR_ASSERT(peLastResetReason != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + *peLastResetReason = FWCommonContextGetLastResetReason(psComputeContext->psServerCommonContext, + pui32LastResetJobRef); + + return PVRSRV_OK; +} + +/* + * PVRSRVRGXSetComputeContextPropertyKM + */ +PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psComputeContext->hLock); + eError = FWCommonContextSetFlags(psComputeContext->psServerCommonContext, + (IMG_UINT32)ui64Input); + OSLockRelease(psComputeContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); + dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); + DumpFWCommonContextInfo(psCurrentServerComputeCtx->psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); +} + +IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32ContextBitMask = 0; + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); + dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); + + if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM) + == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM; + } + } + OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); + return ui32ContextBitMask; +} + +/****************************************************************************** + End of file (rgxcompute.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.h new file mode 100644 index 000000000000..75033ef3e199 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxcompute.h @@ -0,0 +1,171 @@ +/*************************************************************************/ /*! +@File +@Title RGX compute functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX compute functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXCOMPUTE_H__) +#define __RGXCOMPUTE_H__ + +#include "devicemem.h" +#include "device.h" +#include "rgxfwutils.h" +#include "rgx_fwif_resetframework.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "connection_server.h" + + +typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT; + +/*! +******************************************************************************* + @Function PVRSRVRGXCreateComputeContextKM + + @Description + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkRegisterSize, + IMG_PBYTE pbyFrameworkRegisters, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticComputecontextStateSize, + IMG_PBYTE pStaticComputecontextState, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext); + +/*! +******************************************************************************* + @Function PVRSRVRGXDestroyComputeContextKM + + @Description + Server-side implementation of RGXDestroyComputeContext + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + + +/*! +******************************************************************************* + @Function PVRSRVRGXKickCDMKM + + @Description + Server-side implementation of RGXKickCDM + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32NumWorkgroups, + IMG_UINT32 ui32NumWorkitems, + IMG_UINT64 ui64DeadlineInus); + +/*! +******************************************************************************* + @Function PVRSRVRGXFlushComputeDataKM + + @Description + Server-side implementation of RGXFlushComputeData + + @Input psComputeContext - Compute context to flush + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + +/*! +******************************************************************************* + + @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM + @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate + + @Input psComputeContext - Compute context to flush + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef); + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of compute contexts on this device */ +void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client compute contexts are stalled */ +IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* __RGXCOMPUTE_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.c new file mode 100644 index 000000000000..30f0207ac37f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.c @@ -0,0 +1,5497 @@ +/*************************************************************************/ /*! +@File +@Title Rgx debug information +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON + +#include "img_defs.h" +#include "rgxdefs_km.h" +#include "rgxdevice.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "cache_km.h" +#include "osfunc.h" + +#include "rgxdebug.h" +#include "pvrversion.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "rgxutils.h" +#include "tlstream.h" +#include "rgxfwutils.h" +#include "pvrsrv.h" +#include "services_km.h" + +#include "rgxfwimageutils.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "devicemem_utils.h" +#include "rgx_fwif_km.h" +#include "rgx_fwif_sf.h" +#include "rgxfw_log_helper.h" +#include "fwtrace_string.h" +#include "rgxfwimageutils.h" +#include "fwload.h" + +#include "rgxta3d.h" +#include "rgxkicksync.h" +#include "rgxcompute.h" +#include "rgxtransfer.h" +#include "rgxtdmtransfer.h" +#include "rgxtimecorr.h" +#include "rgx_options.h" +#include "rgxinit.h" +#include "devicemem_history_server.h" +#include "info_page.h" +#include "rgx_bvnc_defs_km.h" + +#if !defined(PVR_ARCH_NAME) +#define PVR_ARCH_NAME "Unknown" +#endif + +#define PVR_DUMP_FIRMWARE_INFO(x) \ + PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ + PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ + PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ + (x).ui32DDKBuild, \ + ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\ + (x).ui32BuildOptions); + +#define DD_SUMMARY_INDENT "" +#define DD_NORMAL_INDENT " " + +#define RGX_DEBUG_STR_SIZE (150U) +#define MAX_FW_DESCRIPTION_LENGTH (500U) + +#define RGX_CR_BIF_CAT_BASE0 (0x1200U) +#define RGX_CR_BIF_CAT_BASE1 (0x1208U) + +#define RGX_CR_BIF_CAT_BASEN(n) \ + RGX_CR_BIF_CAT_BASE0 + \ + ((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n) + + +#define RGXDBG_BIF_IDS \ + X(BIF0)\ + X(BIF1)\ + X(TEXAS_BIF)\ + X(DPX_BIF) + +#define RGXDBG_SIDEBAND_TYPES \ + X(META)\ + X(TLA)\ + X(DMA)\ + X(VDMM)\ + X(CDM)\ + X(IPP)\ + X(PM)\ + X(TILING)\ + X(MCU)\ + X(PDS)\ + X(PBE)\ + X(VDMS)\ + X(IPF)\ + X(ISP)\ + X(TPF)\ + X(USCS)\ + X(PPP)\ + X(VCE)\ + X(TPF_CPF)\ + X(IPF_CPF)\ + X(FBCDC) + +typedef enum +{ +#define X(NAME) RGXDBG_##NAME, + RGXDBG_BIF_IDS +#undef X +} RGXDBG_BIF_ID; + +typedef enum +{ +#define X(NAME) RGXDBG_##NAME, + RGXDBG_SIDEBAND_TYPES +#undef X +} RGXDBG_SIDEBAND_TYPE; + +static const IMG_CHAR *const pszPowStateName[] = +{ +#define X(NAME) #NAME, + RGXFWIF_POW_STATES +#undef X +}; + +static const IMG_CHAR *const pszBIFNames[] = +{ +#define X(NAME) #NAME, + RGXDBG_BIF_IDS +#undef X +}; + +typedef struct _IMG_FLAGS2DESC_ +{ + IMG_UINT32 uiFlag; + const IMG_CHAR *pszLabel; +} IMG_FLAGS2DESC; + +static const IMG_FLAGS2DESC asCswOpts2Description[] = +{ + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, + {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, + {RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX, " VDM CS INDEX mode;"}, + {RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE, " VDM CS INSTANCE mode;"}, + {RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST, " VDM CS LIST mode;"}, +}; + +static const IMG_FLAGS2DESC asMisc2Description[] = +{ + {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, + {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, + {RGXFWIF_INICFG_HWR_EN, " HWR EN;"}, + {RGXFWIF_INICFG_HWR_EN, " FBCDCv3.1;"}, + {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, + {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, + {RGXFWIF_INICFG_POLL_COUNTERS_EN, " Poll Counters;"}, + {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, + {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, + {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, + {RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN, " Custom PerfTimer;"}, + {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, + {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, + {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, + {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"}, + {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, + {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, + {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, + {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, + {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, + {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"}, + {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, + {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"}, +}; + +static const IMG_FLAGS2DESC asFwOsCfg2Description[] = +{ + {RGXFWIF_INICFG_OS_CTXSWITCH_TA_EN, " TA;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TA, " LowPrio TA;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, +}; + +static const IMG_FLAGS2DESC asHwrState2Description[] = +{ + {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, + {RGXFWIF_HWR_ANALYSIS_DONE, " Analysis done;"}, + {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"}, + {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, + {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, + {RGXFWIF_HWR_FW_FAULT, " FW fault;"}, + {RGXFWIF_HWR_RESTART_REQUESTED, " Restarting;"}, +}; + +static const IMG_FLAGS2DESC asDmState2Description[] = +{ + {RGXFWIF_DM_STATE_WORKING, " working;"}, + {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, + {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, + {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, + {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, + {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, + {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, + {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, + {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, + {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"}, +}; + +#if !defined(NO_HARDWARE) +/* Translation of MIPS exception encoding */ +typedef struct _MIPS_EXCEPTION_ENCODING_ +{ + const IMG_CHAR *const pszStr; /* Error type */ + const IMG_BOOL bIsFatal; /* Error is fatal or non-fatal */ +} MIPS_EXCEPTION_ENCODING; + +static const MIPS_EXCEPTION_ENCODING apsMIPSExcCodes[] = +{ + {"Interrupt", IMG_FALSE}, + {"TLB modified exception", IMG_FALSE}, + {"TLB exception (load/instruction fetch)", IMG_FALSE}, + {"TLB exception (store)", IMG_FALSE}, + {"Address error exception (load/instruction fetch)", IMG_TRUE}, + {"Address error exception (store)", IMG_TRUE}, + {"Bus error exception (instruction fetch)", IMG_TRUE}, + {"Bus error exception (load/store)", IMG_TRUE}, + {"Syscall exception", IMG_FALSE}, + {"Breakpoint exception (FW assert)", IMG_FALSE}, + {"Reserved instruction exception", IMG_TRUE}, + {"Coprocessor Unusable exception", IMG_FALSE}, + {"Arithmetic Overflow exception", IMG_FALSE}, + {"Trap exception", IMG_FALSE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + {"Implementation-Specific Exception 1 (COP2)", IMG_FALSE}, + {"CorExtend Unusable", IMG_FALSE}, + {"Coprocessor 2 exceptions", IMG_FALSE}, + {"TLB Read-Inhibit", IMG_TRUE}, + {"TLB Execute-Inhibit", IMG_TRUE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + {"Reference to WatchHi/WatchLo address", IMG_FALSE}, + {"Machine check", IMG_FALSE}, + {NULL, IMG_FALSE}, + {"DSP Module State Disabled exception", IMG_FALSE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + /* Can only happen in MIPS debug mode */ + {"Parity error", IMG_FALSE}, + {NULL, IMG_FALSE} +}; + +static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode) +{ + if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) + { + PVR_DPF((PVR_DBG_WARNING, + "Only %lu exceptions available in MIPS, %u is not a valid exception code", + (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); + return NULL; + } + + return apsMIPSExcCodes[ui32ExcCode].pszStr; +} +#endif + +typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_ +{ + IMG_UINT32 ui32Mask; + const IMG_CHAR * pszExplanation; +} RGXMIPSFW_C0_DEBUG_TBL_ENTRY; + +#if !defined(NO_HARDWARE) +static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] = +{ + { RGXMIPSFW_C0_DEBUG_DSS, "Debug single-step exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DBP, "Debug software breakpoint exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DDBL, "Debug data break exception occurred on a load" }, + { RGXMIPSFW_C0_DEBUG_DDBS, "Debug data break exception occurred on a store" }, + { RGXMIPSFW_C0_DEBUG_DIB, "Debug instruction break exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DINT, "Debug interrupt exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DIBIMPR, "Imprecise debug instruction break exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" }, + { RGXMIPSFW_C0_DEBUG_IEXI, "Imprecise error exception inhibit controls exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DBUSEP, "Data access Bus Error exception pending" }, + { RGXMIPSFW_C0_DEBUG_CACHEEP, "Imprecise Cache Error pending" }, + { RGXMIPSFW_C0_DEBUG_MCHECKP, "Imprecise Machine Check exception pending" }, + { RGXMIPSFW_C0_DEBUG_IBUSEP, "Instruction fetch Bus Error exception pending" }, + { RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" } +}; +#endif + +static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] = +{ + "offline", + "ready", + "active", + "offloading" +}; + +#if defined(PVR_ENABLE_PHR) +static const IMG_FLAGS2DESC asPHRConfig2Description[] = +{ + {BIT_ULL(RGXIF_PHR_MODE_OFF), "off"}, + {BIT_ULL(RGXIF_PHR_MODE_RD_RESET), "reset RD hardware"}, + {BIT_ULL(RGXIF_PHR_MODE_FULL_RESET), "full gpu reset "}, +}; +#endif + +static PVRSRV_ERROR +RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) +{ + IMG_UINT32 ui32RegValue, ui32NumPolls = 0; + PVRSRV_ERROR eError; + + do + { + eError = RGXReadWithSP(psDevInfo, ui32RegOffset, &ui32RegValue); + if (eError != PVRSRV_OK) + { + return eError; + } + } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); + + return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; +} + +static PVRSRV_ERROR +RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) +{ + PVRSRV_ERROR eError; + + /* Core Read Ready? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Set the reg we are interested in reading */ + eError = RGXWriteWithSP(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, + ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteWithSP"); + + /* Core Read Done? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Read the value */ + return RGXReadWithSP(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); +} + +PVRSRV_ERROR +RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + + return eError; +} + +PVRSRV_ERROR +RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + return eError; +} + +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) +static PVRSRV_ERROR _ValidateWithSP(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DEV_VIRTADDR *psFWAddr, + void *pvHostCodeAddr, + IMG_UINT32 ui32MaxLen, + const IMG_CHAR *pszDesc, + IMG_UINT32 ui32StartOffset) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Value, i; + IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; + IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset); + +#ifdef CONFIG_MCST + if (NATIVE_IS_MACHINE_SIM) { + PVR_DUMPDEBUG_LOG("Skiped validating between Host and Meta view of the %s (Slave port not supported under simulator)", pszDesc); + return PVRSRV_OK; + } +#endif + +#if 0 + { + PMR *psFWImagePMR; + IMG_CPU_PHYADDR sCPUPAddrStart, sCPUPAddrEnd; + IMG_BOOL bValid; + + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + + eError = DevmemServerGetImportHandle(psDevInfo->psRGXFWCodeMemDesc, + (void **)&psFWImagePMR); + + eError = PMR_CpuPhysAddr(psFWImagePMR, 12, 1, 0, + &sCPUPAddrStart, &bValid); + sCPUPAddrEnd.uiAddr = sCPUPAddrStart.uiAddr + ui32MaxLen; + + PVR_DPF((PVR_DBG_MESSAGE, "!!! [debug] flushing of FW code: v(%p - %p)), p(0x%010llx - 0x%010llx)", + pui32FWCode, (IMG_UINT8 *)pui32FWCode + ui32MaxLen, + sCPUPAddrStart.uiAddr, sCPUPAddrEnd.uiAddr )); + OSCPUCacheFlushRangeKM(psDeviceNode, pui32FWCode, (IMG_UINT8 *) pui32FWCode + ui32MaxLen, + sCPUPAddrStart, sCPUPAddrEnd); + } +#endif + + ui32MaxLen -= ui32StartOffset; + ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ + + for (i = 0; i < ui32MaxLen; i++) + { + eError = RGXReadMETAAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + return eError; + } + + PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); + + if (pui32FWCode[i] != ui32Value) + { + PVR_DUMPDEBUG_LOG("_ValidateWithSP: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", + pszDesc, + (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); + return PVRSRV_ERROR_FW_IMAGE_MISMATCH; + } + + ui32FWCodeDevVAAddr += 4; + } + + PVR_DUMPDEBUG_LOG("Match between Host and Meta view of the %s", pszDesc); + return PVRSRV_OK; +} +#endif + +#if !defined(NO_HARDWARE) +static PVRSRV_ERROR _ValidateFWImageForMIPS(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + char *pszFormat) +{ +#if !defined(SUPPORT_TRUSTED_DEVICE) + PVRSRV_ERROR eError; + IMG_UINT32 *pui32HostFWCode = NULL; + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + IMG_UINT32 *pui32CodeMemoryPointer; + RGX_LAYER_PARAMS sLayerParams; + sLayerParams.psDevInfo = psDevInfo; + + /* Load FW from system for code verification */ + pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); + if (pui32HostFWCode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW code. " + "So skipping FW code verification", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Load FW image */ + pbRGXFirmware = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW); + if (!pbRGXFirmware) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file.",__func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto cleanup_initfw; + } + + eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, pui32HostFWCode, NULL, NULL, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); + goto cleanup_initfw; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32CodeMemoryPointer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error in acquiring MIPS FW code memory area (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto cleanup_initfw; + } + + if (OSMemCmp(pui32HostFWCode, pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes) == 0) + { + PVR_DUMPDEBUG_LOG("%sMatch between Host and MIPS views of the FW code", pszFormat); + } + else + { + IMG_UINT32 ui32Count = 10; /* Show only the first 10 mismatches */ + IMG_UINT32 ui32Offset; + + PVR_DUMPDEBUG_LOG("%sMismatch between Host and MIPS views of the FW code", pszFormat); + for (ui32Offset = 0; (ui32Offset*4 < psDevInfo->ui32FWCodeSizeInBytes) || (ui32Count == 0); ui32Offset++) + { + if (pui32HostFWCode[ui32Offset] != pui32CodeMemoryPointer[ui32Offset]) + { + PVR_DUMPDEBUG_LOG("%s At %d bytes, code should be 0x%x but it is instead 0x%x", + pszFormat, ui32Offset*4, pui32HostFWCode[ui32Offset], pui32CodeMemoryPointer[ui32Offset]); + ui32Count--; + } + } + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + +cleanup_initfw: + if (psRGXFW) + { + OSUnloadFirmware(psRGXFW); + } + + if (pui32HostFWCode) + { + OSFreeMem(pui32HostFWCode); + } + return eError; +#else + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(pszFormat); + return PVRSRV_OK; +#endif +} +#endif + +static PVRSRV_ERROR _ValidateFWImageForMETA(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) + IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + RGXFWIF_DEV_VIRTADDR sFWAddr; + PVRSRV_ERROR eError; + RGX_LAYER_PARAMS sLayerParams; + sLayerParams.psDevInfo = psDevInfo; + + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); + return PVRSRV_ERROR_BAD_MAPPING; + } + + /* Load FW from system for code verification */ + pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); + if (pui32HostFWCode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW code. " + "So skipping FW code verification", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); + if (pui32HostFWCoremem == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW core code. " + "So skipping FW code verification", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto freeHostFWCode; + } + } + + /* Load FW image */ + pbRGXFirmware = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW); + if (!pbRGXFirmware) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in loading FW image file.", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto cleanup_initfw; + } + + eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware, + (void*) pui32HostFWCode, NULL, + (void*) pui32HostFWCoremem, NULL, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); + goto cleanup_initfw; + } + + /* starting checking after BOOT LOADER config */ + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, + "FW code", RGXFW_MAX_BOOTLDR_OFFSET); + if (eError != PVRSRV_OK) + { + goto cleanup_initfw; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + + eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, + "FW coremem code", 0); + } + +cleanup_initfw: + if (psRGXFW) + { + OSUnloadFirmware(psRGXFW); + } + + if (pui32HostFWCoremem) + { + OSFreeMem(pui32HostFWCoremem); + } +freeHostFWCode: + if (pui32HostFWCode) + { + OSFreeMem(pui32HostFWCode); + } + return eError; +#else + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + PVR_UNREFERENCED_PARAMETER(psDevInfo); + return PVRSRV_OK; +#endif +} + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) + IMG_PBYTE pbCodeMemoryPointer; + PVRSRV_ERROR eError; + RGXFWIF_DEV_VIRTADDR sFWAddr; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + return eError; + } + + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); + if (eError != PVRSRV_OK) + { + goto releaseFWCodeMapping; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + goto releaseFWCoreCodeMapping; + } + + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + + eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, + psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); + } + +releaseFWCoreCodeMapping: + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + } +releaseFWCodeMapping: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + return PVRSRV_OK; +#endif +} +#endif + +/*! +******************************************************************************* + + @Function _RGXDecodePMPC + + @Description + + Return the name for the PM managed Page Catalogues + + @Input ui32PC - Page Catalogue number + + @Return void + +******************************************************************************/ +static const IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC) +{ + const IMG_CHAR* pszPMPC = " (-)"; + + switch (ui32PC) + { + case 0x8: pszPMPC = " (PM-VCE0)"; break; + case 0x9: pszPMPC = " (PM-TE0)"; break; + case 0xA: pszPMPC = " (PM-ZLS0)"; break; + case 0xB: pszPMPC = " (PM-ALIST0)"; break; + case 0xC: pszPMPC = " (PM-VCE1)"; break; + case 0xD: pszPMPC = " (PM-TE1)"; break; + case 0xE: pszPMPC = " (PM-ZLS1)"; break; + case 0xF: pszPMPC = " (PM-ALIST1)"; break; + } + + return pszPMPC; +} + +/*! +******************************************************************************* + + @Function _RGXDecodeBIFReqTags + + @Description + + Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs + + @Input eBankID - BIF identifier + @Input ui32TagID - Tag ID value + @Input ui32TagSB - Tag Sideband data + @Output ppszTagID - Decoded string from the Tag ID + @Output ppszTagSB - Decoded string from the Tag SB + @Output pszScratchBuf - Buffer provided to the function to generate the debug strings + @Input ui32ScratchBufSize - Size of the provided buffer + + @Return void + +******************************************************************************/ +#include "rgxmhdefs_km.h" + +static void _RGXDecodeBIFReqTagsXE(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TagID, + IMG_UINT32 ui32TagSB, + IMG_CHAR **ppszTagID, + IMG_CHAR **ppszTagSB, + IMG_CHAR *pszScratchBuf, + IMG_UINT32 ui32ScratchBufSize) +{ + /* default to unknown */ + IMG_CHAR *pszTagID = "-"; + IMG_CHAR *pszTagSB = "-"; + + PVR_ASSERT(ppszTagID != NULL); + PVR_ASSERT(ppszTagSB != NULL); + + switch (ui32TagID) + { + /* MMU tags */ + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT: + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD: + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC: + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM: + { + switch (ui32TagID) + { + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT: pszTagID = "MMU PT"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD: pszTagID = "MMU PD"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC: pszTagID = "MMU PC"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM: pszTagID = "MMU PM"; break; + } + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST: pszTagSB = "PT"; break; + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST: pszTagSB = "PD"; break; + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST: pszTagSB = "PC"; break; + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST: pszTagSB = "PM PT"; break; + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST: pszTagSB = "PM PD"; break; + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST: pszTagSB = "PM PC"; break; + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST: pszTagSB = "PM PD W"; break; + case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST: pszTagSB = "PM PC W"; break; + } + break; + } + + /* MIPS */ + case RGX_MH_TAG_ENCODING_MH_TAG_MIPS: + { + pszTagID = "MIPS"; + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH: pszTagSB = "Opcode"; break; + case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS: pszTagSB = "Data"; break; + } + break; + } + + /* CDM tags */ + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0: + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1: + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2: + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3: + { + switch (ui32TagID) + { + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0: pszTagID = "CDM Stage 0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1: pszTagID = "CDM Stage 1"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2: pszTagID = "CDM Stage 2"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3: pszTagID = "CDM Stage 3"; break; + } + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM: pszTagSB = "Control"; break; + case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA: pszTagSB = "Indirect"; break; + case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA: pszTagSB = "Event"; break; + case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE: pszTagSB = "Context"; break; + } + break; + } + + /* VDM tags */ + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0: + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1: + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2: + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3: + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4: + { + switch (ui32TagID) + { + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0: pszTagID = "VDM Stage 0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1: pszTagID = "VDM Stage 1"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2: pszTagID = "VDM Stage 2"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3: pszTagID = "VDM Stage 3"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4: pszTagID = "VDM Stage 4"; break; + } + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL: pszTagSB = "Control"; break; + case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE: pszTagSB = "State"; break; + case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX: pszTagSB = "Index"; break; + case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK: pszTagSB = "Stack"; break; + case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT: pszTagSB = "Context"; break; + } + break; + } + + /* PDS */ + case RGX_MH_TAG_ENCODING_MH_TAG_PDS_0: + pszTagID = "PDS req 0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_PDS_1: + pszTagID = "PDS req 1"; break; + + /* MCU */ + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA: + pszTagID = "MCU USCA"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB: + pszTagID = "MCU USCB"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC: + pszTagID = "MCU USCC"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD: + pszTagID = "MCU USCD"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA: + pszTagID = "MCU PDS USCA"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB: + pszTagID = "MCU PDS USCB"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC: + pszTagID = "MCU PDS USCC"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD: + pszTagID = "MCU PDSUSCD"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW: + pszTagID = "PDS PDSRW"; break; + + /* TCU */ + case RGX_MH_TAG_ENCODING_MH_TAG_TCU_0: + pszTagID = "TCU req 0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TCU_1: + pszTagID = "TCU req 1"; break; + + /* FBCDC */ + case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0: + pszTagID = "FBCDC0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1: + pszTagID = "FBCDC1"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2: + pszTagID = "FBCDC2"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3: + pszTagID = "FBCDC3"; break; + + /* USC Shared */ + case RGX_MH_TAG_ENCODING_MH_TAG_USC: + pszTagID = "USCS"; break; + + /* ISP */ + case RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS: + pszTagID = "ISP0 ZLS"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS: + pszTagID = "ISP0 DS"; break; + + /* TPF */ + case RGX_MH_TAG_ENCODING_MH_TAG_TPF: + case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS: + case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF: + { + switch (ui32TagID) + { + case RGX_MH_TAG_ENCODING_MH_TAG_TPF: pszTagID = "TPF0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS: pszTagID = "TPF0 DBIAS"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF: pszTagID = "TPF0 SPF"; break; + } + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE: pszTagSB = "PDS state"; break; + case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS: pszTagSB = "Depth bias"; break; + case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA: pszTagSB = "Floor offset"; break; + case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA: pszTagSB = "Delta"; break; + } + break; + } + + /* IPF */ + case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ: + case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS: + { + switch (ui32TagID) + { + case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ: pszTagID = "IPF0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS: pszTagID = "IPF0"; break; + } + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_ISP_IPP_PIPES)) + { + if (ui32TagID < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) + { + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "CReq%d", ui32TagID); + pszTagSB = pszScratchBuf; + } + else if (ui32TagID < 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) + { + ui32TagID -= RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES); + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "PReq%d", ui32TagID); + pszTagSB = pszScratchBuf; + } + else + { + switch (ui32TagSB - 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) + { + case 0: pszTagSB = "RReq"; break; + case 1: pszTagSB = "DBSC"; break; + case 2: pszTagSB = "CPF"; break; + case 3: pszTagSB = "Delta"; break; + } + } + } + break; + } + + /* VDM Stage 5 (temporary) */ + case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5: + pszTagID = "VDM Stage 5"; break; + + /* TA */ + case RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP: + pszTagID = "PPP"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC: + pszTagID = "TPW RTC"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC: + pszTagID = "TEAC RTC"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC: + pszTagID = "PSG RTC"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION: + pszTagID = "PSG Region"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM: + pszTagID = "PSG Stream"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW: + pszTagID = "TPW"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC: + pszTagID = "TPC"; break; + + /* PM */ + case RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC: + { + pszTagID = "PMA"; + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK: pszTagSB = "TA Fstack"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST: pszTagSB = "TA MList"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK: pszTagSB = "3D Fstack"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST: pszTagSB = "3D MList"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0: pszTagSB = "Context0"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1: pszTagSB = "Context1"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP: pszTagSB = "MAVP"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK: pszTagSB = "UFstack"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK: pszTagSB = "TA MMUstack"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK: pszTagSB = "3D MMUstack"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK: pszTagSB = "TA UFstack"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK: pszTagSB = "3D UFstack"; break; + case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP: pszTagSB = "TA VFP"; break; + } + break; + } + case RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC: + { + pszTagID = "PMD"; + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK: pszTagSB = "TA Fstack"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST: pszTagSB = "TA MList"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK: pszTagSB = "3D Fstack"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST: pszTagSB = "3D MList"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0: pszTagSB = "Context0"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1: pszTagSB = "Context1"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK: pszTagSB = "UFstack"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK: pszTagSB = "TA MMUstack"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK: pszTagSB = "3D MMUstack"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK: pszTagSB = "TA UFstack"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK: pszTagSB = "3D UFstack"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP: pszTagSB = "TA VFP"; break; + case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP: pszTagSB = "3D VFP"; break; + } + break; + } + + /* TDM */ + case RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA: + { + pszTagID = "TDM DMA"; + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM: pszTagSB = "Ctl stream"; break; + case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER: pszTagSB = "Ctx buffer"; break; + case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL: pszTagSB = "Queue ctl"; break; + } + break; + } + case RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL: + { + pszTagID = "TDM CTL"; + switch (ui32TagSB) + { + case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE: pszTagSB = "Fence"; break; + case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT: pszTagSB = "Context"; break; + case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE: pszTagSB = "Queue"; break; + } + break; + } + + /* PBE */ + case RGX_MH_TAG_ENCODING_MH_TAG_PBE0: + pszTagID = "PBE0"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_PBE1: + pszTagID = "PBE1"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_PBE2: + pszTagID = "PBE2"; break; + case RGX_MH_TAG_ENCODING_MH_TAG_PBE3: + pszTagID = "PBE3"; break; + } + + *ppszTagID = pszTagID; + *ppszTagSB = pszTagSB; +} + + +static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXDBG_BIF_ID eBankID, + IMG_UINT32 ui32TagID, + IMG_UINT32 ui32TagSB, + IMG_CHAR **ppszTagID, + IMG_CHAR **ppszTagSB, + IMG_CHAR *pszScratchBuf, + IMG_UINT32 ui32ScratchBufSize) +{ + /* default to unknown */ + IMG_CHAR *pszTagID = "-"; + IMG_CHAR *pszTagSB = "-"; + + PVR_ASSERT(ppszTagID != NULL); + PVR_ASSERT(ppszTagSB != NULL); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + _RGXDecodeBIFReqTagsXE(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize); + return; + } + + switch (ui32TagID) + { + case 0x0: + { + pszTagID = "MMU"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Table"; break; + case 0x1: pszTagSB = "Directory"; break; + case 0x2: pszTagSB = "Catalogue"; break; + } + break; + } + case 0x1: + { + pszTagID = "TLA"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Pixel data"; break; + case 0x1: pszTagSB = "Command stream data"; break; + case 0x2: pszTagSB = "Fence or flush"; break; + } + break; + } + case 0x2: + { + pszTagID = "HOST"; + break; + } + case 0x3: + { + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + pszTagID = "META"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "DCache - Thread 0"; break; + case 0x1: pszTagSB = "ICache - Thread 0"; break; + case 0x2: pszTagSB = "JTag - Thread 0"; break; + case 0x3: pszTagSB = "Slave bus - Thread 0"; break; + case 0x4: pszTagSB = "DCache - Thread "; break; + case 0x5: pszTagSB = "ICache - Thread 1"; break; + case 0x6: pszTagSB = "JTag - Thread 1"; break; + case 0x7: pszTagSB = "Slave bus - Thread 1"; break; + } + } + else if (RGX_IS_ERN_SUPPORTED(psDevInfo, 57596)) + { + pszTagID="TCU"; + } + else + { + /* Unreachable code */ + PVR_ASSERT(IMG_FALSE); + } + break; + } + case 0x4: + { + pszTagID = "USC"; + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, + "Cache line %d", (ui32TagSB & 0x3f)); + pszTagSB = pszScratchBuf; + break; + } + case 0x5: + { + pszTagID = "PBE"; + break; + } + case 0x6: + { + pszTagID = "ISP"; + switch (ui32TagSB) + { + case 0x00: pszTagSB = "ZLS"; break; + case 0x20: pszTagSB = "Occlusion Query"; break; + } + break; + } + case 0x7: + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) + { + if (eBankID == RGXDBG_TEXAS_BIF) + { + pszTagID = "IPF"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "CPF"; break; + case 0x1: pszTagSB = "DBSC"; break; + case 0x2: + case 0x4: + case 0x6: + case 0x8: pszTagSB = "Control Stream"; break; + case 0x3: + case 0x5: + case 0x7: + case 0x9: pszTagSB = "Primitive Block"; break; + } + } + else + { + pszTagID = "IPP"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Macrotile Header"; break; + case 0x1: pszTagSB = "Region Header"; break; + } + } + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIMPLE_INTERNAL_PARAMETER_FORMAT)) + { + pszTagID = "IPF"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Region Header"; break; + case 0x1: pszTagSB = "DBSC"; break; + case 0x2: pszTagSB = "CPF"; break; + case 0x3: pszTagSB = "Control Stream"; break; + case 0x4: pszTagSB = "Primitive Block"; break; + } + } + else + { + pszTagID = "IPF"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Macrotile Header"; break; + case 0x1: pszTagSB = "Region Header"; break; + case 0x2: pszTagSB = "DBSC"; break; + case 0x3: pszTagSB = "CPF"; break; + case 0x4: + case 0x6: + case 0x8: pszTagSB = "Control Stream"; break; + case 0x5: + case 0x7: + case 0x9: pszTagSB = "Primitive Block"; break; + } + } + break; + } + case 0x8: + { + pszTagID = "CDM"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Control Stream"; break; + case 0x1: pszTagSB = "Indirect Data"; break; + case 0x2: pszTagSB = "Event Write"; break; + case 0x3: pszTagSB = "Context State"; break; + } + break; + } + case 0x9: + { + pszTagID = "VDM"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Control Stream"; break; + case 0x1: pszTagSB = "PPP State"; break; + case 0x2: pszTagSB = "Index Data"; break; + case 0x4: pszTagSB = "Call Stack"; break; + case 0x8: pszTagSB = "Context State"; break; + } + break; + } + case 0xA: + { + pszTagID = "PM"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "PMA_TAFSTACK"; break; + case 0x1: pszTagSB = "PMA_TAMLIST"; break; + case 0x2: pszTagSB = "PMA_3DFSTACK"; break; + case 0x3: pszTagSB = "PMA_3DMLIST"; break; + case 0x4: pszTagSB = "PMA_PMCTX0"; break; + case 0x5: pszTagSB = "PMA_PMCTX1"; break; + case 0x6: pszTagSB = "PMA_MAVP"; break; + case 0x7: pszTagSB = "PMA_UFSTACK"; break; + case 0x8: pszTagSB = "PMD_TAFSTACK"; break; + case 0x9: pszTagSB = "PMD_TAMLIST"; break; + case 0xA: pszTagSB = "PMD_3DFSTACK"; break; + case 0xB: pszTagSB = "PMD_3DMLIST"; break; + case 0xC: pszTagSB = "PMD_PMCTX0"; break; + case 0xD: pszTagSB = "PMD_PMCTX1"; break; + case 0xF: pszTagSB = "PMD_UFSTACK"; break; + case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break; + case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break; + case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break; + case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break; + case 0x14: pszTagSB = "PMA_TAUFSTACK"; break; + case 0x15: pszTagSB = "PMA_3DUFSTACK"; break; + case 0x16: pszTagSB = "PMD_TAUFSTACK"; break; + case 0x17: pszTagSB = "PMD_3DUFSTACK"; break; + case 0x18: pszTagSB = "PMA_TAVFP"; break; + case 0x19: pszTagSB = "PMD_3DVFP"; break; + case 0x1A: pszTagSB = "PMD_TAVFP"; break; + } + break; + } + case 0xB: + { + pszTagID = "TA"; + switch (ui32TagSB) + { + case 0x1: pszTagSB = "VCE"; break; + case 0x2: pszTagSB = "TPC"; break; + case 0x3: pszTagSB = "TE Control Stream"; break; + case 0x4: pszTagSB = "TE Region Header"; break; + case 0x5: pszTagSB = "TE Render Target Cache"; break; + case 0x6: pszTagSB = "TEAC Render Target Cache"; break; + case 0x7: pszTagSB = "VCE Render Target Cache"; break; + case 0x8: pszTagSB = "PPP Context State"; break; + } + break; + } + case 0xC: + { + pszTagID = "TPF"; + switch (ui32TagSB) + { + case 0x0: pszTagSB = "TPF0: Primitive Block"; break; + case 0x1: pszTagSB = "TPF0: Depth Bias"; break; + case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break; + case 0x3: pszTagSB = "CPF - Tables"; break; + case 0x4: pszTagSB = "TPF1: Primitive Block"; break; + case 0x5: pszTagSB = "TPF1: Depth Bias"; break; + case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break; + case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break; + case 0x8: pszTagSB = "TPF2: Primitive Block"; break; + case 0x9: pszTagSB = "TPF2: Depth Bias"; break; + case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break; + case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break; + case 0xC: pszTagSB = "TPF3: Primitive Block"; break; + case 0xD: pszTagSB = "TPF3: Depth Bias"; break; + case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break; + case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break; + } + break; + } + case 0xD: + { + pszTagID = "PDS"; + break; + } + case 0xE: + { + pszTagID = "MCU"; + { + IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7; + IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7; + IMG_UINT32 ui32Group = ui32TagSB & 0x3; + + IMG_CHAR* pszBurst = ""; + IMG_CHAR* pszGroupEnc = ""; + IMG_CHAR* pszGroup = ""; + + switch (ui32Burst) + { + case 0x0: + case 0x1: pszBurst = "128bit word within the Lower 256bits"; break; + case 0x2: + case 0x3: pszBurst = "128bit word within the Upper 256bits"; break; + case 0x4: pszBurst = "Lower 256bits"; break; + case 0x5: pszBurst = "Upper 256bits"; break; + case 0x6: pszBurst = "512 bits"; break; + } + switch (ui32GroupEnc) + { + case 0x0: pszGroupEnc = "TPUA_USC"; break; + case 0x1: pszGroupEnc = "TPUB_USC"; break; + case 0x2: pszGroupEnc = "USCA_USC"; break; + case 0x3: pszGroupEnc = "USCB_USC"; break; + case 0x4: pszGroupEnc = "PDS_USC"; break; + case 0x5: + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && + 6 > RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) + { + pszGroupEnc = "PDSRW"; + } else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && + 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) + { + pszGroupEnc = "UPUC_USC"; + } + break; + case 0x6: + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && + 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) + { + pszGroupEnc = "TPUC_USC"; + } + break; + case 0x7: + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && + 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) + { + pszGroupEnc = "PDSRW"; + } + break; + } + switch (ui32Group) + { + case 0x0: pszGroup = "Banks 0-3"; break; + case 0x1: pszGroup = "Banks 4-7"; break; + case 0x2: pszGroup = "Banks 8-11"; break; + case 0x3: pszGroup = "Banks 12-15"; break; + } + + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, + "%s, %s, %s", pszBurst, pszGroupEnc, pszGroup); + pszTagSB = pszScratchBuf; + } + break; + } + case 0xF: + { + pszTagID = "FB_CDC"; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) + { + IMG_UINT32 ui32Req = (ui32TagSB >> 0) & 0xf; + IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3; + IMG_CHAR* pszReqOrig = ""; + + switch (ui32Req) + { + case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break; + case 0x1: pszReqOrig = "FBC Request, originator PBE"; break; + case 0x2: pszReqOrig = "FBC Request, originator Host"; break; + case 0x3: pszReqOrig = "FBC Request, originator TLA"; break; + case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break; + case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break; + case 0x6: pszReqOrig = "FBDC Request, originator Host"; break; + case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break; + case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break; + case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break; + case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break; + case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break; + case 0xc: pszReqOrig = "Reserved"; break; + case 0xd: pszReqOrig = "Reserved"; break; + case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break; + case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break; + } + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, + "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB); + pszTagSB = pszScratchBuf; + } + else + { + IMG_UINT32 ui32Req = (ui32TagSB >> 2) & 0x7; + IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3; + IMG_CHAR* pszReqOrig = ""; + + switch (ui32Req) + { + case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break; + case 0x1: pszReqOrig = "FBC Request, originator PBE"; break; + case 0x2: pszReqOrig = "FBC Request, originator Host"; break; + case 0x3: pszReqOrig = "FBC Request, originator TLA"; break; + case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break; + case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break; + case 0x6: pszReqOrig = "FBDC Request, originator Host"; break; + case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break; + } + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, + "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB); + pszTagSB = pszScratchBuf; + } + break; + } + } /* switch (TagID) */ + + *ppszTagID = pszTagID; + *ppszTagSB = pszTagSB; +} + + + +/*! +******************************************************************************* + + @Function _RGXDecodeMMULevel + + @Description + + Return the name for the MMU level that faulted. + + @Input ui32MMULevel - MMU level + + @Return IMG_CHAR* to the sting describing the MMU level that faulted. + +******************************************************************************/ +static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) +{ + const IMG_CHAR* pszMMULevel = ""; + + switch (ui32MMULevel) + { + case 0x0: pszMMULevel = " (Page Table)"; break; + case 0x1: pszMMULevel = " (Page Directory)"; break; + case 0x2: pszMMULevel = " (Page Catalog)"; break; + case 0x3: pszMMULevel = " (Cat Base Reg)"; break; + } + + return pszMMULevel; +} + + +/*! +******************************************************************************* + + @Function _RGXDecodeMMUReqTags + + @Description + + Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and + RGX_CR_MMU_FAULT_STATUS regs. + + @Input ui32TagID - Tag ID value + @Input ui32TagSB - Tag Sideband data + @Input bRead - Read flag + @Output ppszTagID - Decoded string from the Tag ID + @Output ppszTagSB - Decoded string from the Tag SB + @Output pszScratchBuf - Buffer provided to the function to generate the debug strings + @Input ui32ScratchBufSize - Size of the provided buffer + + @Return void + +******************************************************************************/ +static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TagID, + IMG_UINT32 ui32TagSB, + IMG_BOOL bRead, + IMG_CHAR **ppszTagID, + IMG_CHAR **ppszTagSB, + IMG_CHAR *pszScratchBuf, + IMG_UINT32 ui32ScratchBufSize) +{ + IMG_INT32 i32SideBandType = -1; + IMG_CHAR *pszTagID = "-"; + IMG_CHAR *pszTagSB = "-"; + + PVR_ASSERT(ppszTagID != NULL); + PVR_ASSERT(ppszTagSB != NULL); + + + switch (ui32TagID) + { + case 0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break; + case 1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break; + case 2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break; + case 3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break; + case 4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break; + case 5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break; + case 6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break; + case 7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break; + case 8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break; + case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break; + case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break; + case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break; + case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break; + case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break; + case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break; + case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break; + case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break; + case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break; + case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break; + case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break; + case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break; + case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break; + case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break; + case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break; + case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break; + case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break; + case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break; + case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break; + case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break; + case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break; + case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break; + case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break; + case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break; + case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break; + case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break; + case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break; + case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break; + case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break; + case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break; + case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break; + } + if (('-' == pszTagID[0]) && '\n' == pszTagID[1]) + { + + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539) || + (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, FBCDC_ARCHITECTURE) && RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC_ARCHITECTURE) >= 3)) + { + switch (ui32TagID) + { + case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break; + case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break; + case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break; + case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break; + case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break; + case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break; + case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break; + case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break; + } + + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539)) + { + switch (ui32TagID) + { + case 9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; + case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; + case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; + case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; + case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; + case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; + case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; + case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; + case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; + case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; + case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; + case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; + case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; + case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; + case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; + case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; + case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; + case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; + case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; + case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; + case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; + case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; + case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; + case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; + } + }else + { + switch (ui32TagID) + { + case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; + case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; + case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; + case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; + case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; + case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; + case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; + case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; + case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; + case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; + case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; + case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; + case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; + case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; + case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; + case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; + case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; + case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; + case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; + case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; + case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; + case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; + case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; + case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; + } + } + }else + { + switch (ui32TagID) + { + case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; + case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; + case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; + case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break; + case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break; + case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; + case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; + case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; + case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; + case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; + case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; + case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break; + case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break; + case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; + case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; + case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; + case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; + case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; + case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; + case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break; + case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break; + case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; + case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; + case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; + case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; + case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; + case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; + case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break; + case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break; + case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; + case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; + case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; + } + } + + } + + switch (i32SideBandType) + { + case RGXDBG_META: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "DCache - Thread 0"; break; + case 0x1: pszTagSB = "ICache - Thread 0"; break; + case 0x2: pszTagSB = "JTag - Thread 0"; break; + case 0x3: pszTagSB = "Slave bus - Thread 0"; break; + case 0x4: pszTagSB = "DCache - Thread 1"; break; + case 0x5: pszTagSB = "ICache - Thread 1"; break; + case 0x6: pszTagSB = "JTag - Thread 1"; break; + case 0x7: pszTagSB = "Slave bus - Thread 1"; break; + } + break; + } + + case RGXDBG_TLA: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Pixel data"; break; + case 0x1: pszTagSB = "Command stream data"; break; + case 0x2: pszTagSB = "Fence or flush"; break; + } + break; + } + + case RGXDBG_VDMM: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Control Stream - Read Only"; break; + case 0x1: pszTagSB = "PPP State - Read Only"; break; + case 0x2: pszTagSB = "Indices - Read Only"; break; + case 0x4: pszTagSB = "Call Stack - Read/Write"; break; + case 0x6: pszTagSB = "DrawIndirect - Read Only"; break; + case 0xA: pszTagSB = "Context State - Write Only"; break; + } + break; + } + + case RGXDBG_CDM: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Control Stream"; break; + case 0x1: pszTagSB = "Indirect Data"; break; + case 0x2: pszTagSB = "Event Write"; break; + case 0x3: pszTagSB = "Context State"; break; + } + break; + } + + case RGXDBG_IPP: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Macrotile Header"; break; + case 0x1: pszTagSB = "Region Header"; break; + } + break; + } + + case RGXDBG_PM: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "PMA_TAFSTACK"; break; + case 0x1: pszTagSB = "PMA_TAMLIST"; break; + case 0x2: pszTagSB = "PMA_3DFSTACK"; break; + case 0x3: pszTagSB = "PMA_3DMLIST"; break; + case 0x4: pszTagSB = "PMA_PMCTX0"; break; + case 0x5: pszTagSB = "PMA_PMCTX1"; break; + case 0x6: pszTagSB = "PMA_MAVP"; break; + case 0x7: pszTagSB = "PMA_UFSTACK"; break; + case 0x8: pszTagSB = "PMD_TAFSTACK"; break; + case 0x9: pszTagSB = "PMD_TAMLIST"; break; + case 0xA: pszTagSB = "PMD_3DFSTACK"; break; + case 0xB: pszTagSB = "PMD_3DMLIST"; break; + case 0xC: pszTagSB = "PMD_PMCTX0"; break; + case 0xD: pszTagSB = "PMD_PMCTX1"; break; + case 0xF: pszTagSB = "PMD_UFSTACK"; break; + case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break; + case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break; + case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break; + case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break; + case 0x14: pszTagSB = "PMA_TAUFSTACK"; break; + case 0x15: pszTagSB = "PMA_3DUFSTACK"; break; + case 0x16: pszTagSB = "PMD_TAUFSTACK"; break; + case 0x17: pszTagSB = "PMD_3DUFSTACK"; break; + case 0x18: pszTagSB = "PMA_TAVFP"; break; + case 0x19: pszTagSB = "PMD_3DVFP"; break; + case 0x1A: pszTagSB = "PMD_TAVFP"; break; + } + break; + } + + case RGXDBG_TILING: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "PSG Control Stream TP0"; break; + case 0x1: pszTagSB = "TPC TP0"; break; + case 0x2: pszTagSB = "VCE0"; break; + case 0x3: pszTagSB = "VCE1"; break; + case 0x4: pszTagSB = "PSG Control Stream TP1"; break; + case 0x5: pszTagSB = "TPC TP1"; break; + case 0x8: pszTagSB = "PSG Region Header TP0"; break; + case 0xC: pszTagSB = "PSG Region Header TP1"; break; + } + break; + } + + case RGXDBG_VDMS: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "Context State - Write Only"; break; + } + break; + } + + case RGXDBG_IPF: + { + switch (ui32TagSB) + { + case 0x00: + case 0x20: pszTagSB = "CPF"; break; + case 0x01: pszTagSB = "DBSC"; break; + case 0x02: + case 0x04: + case 0x06: + case 0x08: + case 0x0A: + case 0x0C: + case 0x0E: + case 0x10: pszTagSB = "Control Stream"; break; + case 0x03: + case 0x05: + case 0x07: + case 0x09: + case 0x0B: + case 0x0D: + case 0x0F: + case 0x11: pszTagSB = "Primitive Block"; break; + } + break; + } + + case RGXDBG_ISP: + { + switch (ui32TagSB) + { + case 0x00: pszTagSB = "ZLS read/write"; break; + case 0x20: pszTagSB = "Occlusion query read/write"; break; + } + break; + } + + case RGXDBG_TPF: + { + switch (ui32TagSB) + { + case 0x0: pszTagSB = "TPF0: Primitive Block"; break; + case 0x1: pszTagSB = "TPF0: Depth Bias"; break; + case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break; + case 0x3: pszTagSB = "CPF - Tables"; break; + case 0x4: pszTagSB = "TPF1: Primitive Block"; break; + case 0x5: pszTagSB = "TPF1: Depth Bias"; break; + case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break; + case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break; + case 0x8: pszTagSB = "TPF2: Primitive Block"; break; + case 0x9: pszTagSB = "TPF2: Depth Bias"; break; + case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break; + case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break; + case 0xC: pszTagSB = "TPF3: Primitive Block"; break; + case 0xD: pszTagSB = "TPF3: Depth Bias"; break; + case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break; + case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break; + } + break; + } + + case RGXDBG_FBCDC: + { + /* + * FBC faults on a 4-cluster phantom does not always set SB + * bit 5, but since FBC is write-only and FBDC is read-only, + * we can set bit 5 if this is a write fault, before decoding. + */ + if (bRead == IMG_FALSE) + { + ui32TagSB |= 0x20; + } + + switch (ui32TagSB) + { + case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break; + case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break; + case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break; + case 0x20: pszTagSB = "FBC Request, originator ZLS"; break; + case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break; + case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break; + case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break; + case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break; + case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break; + case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break; + case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break; + case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break; + case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break; + } + break; + } + + case RGXDBG_MCU: + { + IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7; + IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7; + IMG_UINT32 ui32Group = ui32TagSB & 0x3; + + IMG_CHAR* pszGroup = ""; + + switch (ui32Group) + { + case 0x0: pszGroup = "Banks 0-1"; break; + case 0x1: pszGroup = "Banks 2-3"; break; + case 0x2: pszGroup = "Banks 4-5"; break; + case 0x3: pszGroup = "Banks 6-7"; break; + } + + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, + "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup); + pszTagSB = pszScratchBuf; + break; + } + + default: + { + OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB); + pszTagSB = pszScratchBuf; + break; + } + } + + *ppszTagID = pszTagID; + *ppszTagSB = pszTagSB; +} + +static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, + IMG_UINT64 *pui64Seconds, + IMG_UINT64 *pui64Nanoseconds) +{ + IMG_UINT32 ui32Remainder; + + *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); + *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); +} + + +typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ +{ + DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, + DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, + DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, + DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, +} DEVICEMEM_HISTORY_QUERY_INDEX; + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryResult + + @Description + + Print details of a single result from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psResult - The DevicememHistory result to be printed + @Input ui32Index - The index of the result + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, + IMG_UINT32 ui32Index, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 ui32Remainder; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + ConvertOSTimestampToSAndNS(psResult->ui64When, + &ui64Seconds, + &ui64Nanoseconds); + + if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC " s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC + ") PID: %u (%s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds, + psResult->sProcessInfo.uiPID, + psResult->sProcessInfo.szProcessName); + } + + if (!psResult->bRange) + { + PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); + } + else + { + PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", + pszIndent, + psResult->ui32StartPage, + psResult->ui32StartPage + psResult->ui32PageCount - 1, + psResult->sMapStartAddr.uiAddr, + psResult->sMapEndAddr.uiAddr, + psResult->bAll ? "(whole allocation) " : "", + psResult->bMap ? "mapped": "unmapped"); + } +} + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryOut + + @Description + + Print details of all the results from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psQueryOut - Storage for the query results + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + + if (psQueryOut->ui32NumResults == 0) + { + PVR_DUMPDEBUG_LOG("%s No results", pszIndent); + } + else + { + for (i = 0; i < psQueryOut->ui32NumResults; i++) + { + _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, + psFaultProcessInfo, + &psQueryOut->sResults[i], + i, + pszIndent); + } + } +} + +/* table of HW page size values and the equivalent */ +static const unsigned int aui32HWPageSizeTable[][2] = +{ + { 0, PVRSRV_4K_PAGE_SIZE }, + { 1, PVRSRV_16K_PAGE_SIZE }, + { 2, PVRSRV_64K_PAGE_SIZE }, + { 3, PVRSRV_256K_PAGE_SIZE }, + { 4, PVRSRV_1M_PAGE_SIZE }, + { 5, PVRSRV_2M_PAGE_SIZE } +}; + +/*! +******************************************************************************* + + @Function _PageSizeHWToBytes + + @Description + + Convert a HW page size value to its size in bytes + + @Input ui32PageSizeHW - The HW page size value + + @Return IMG_UINT32 The page size in bytes + +******************************************************************************/ +static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) +{ + if (ui32PageSizeHW > 5) + { + /* This is invalid, so return a default value as we cannot ASSERT in this code! */ + return PVRSRV_4K_PAGE_SIZE; + } + + return aui32HWPageSizeTable[ui32PageSizeHW][1]; +} + +/*! +******************************************************************************* + + @Function _GetDevicememHistoryData + + @Description + + Get the DevicememHistory results for the given PID and faulting device virtual address. + The function will query DevicememHistory for information about the faulting page, as well + as the page before and after. + + @Input uiPID - The process ID to search for allocations belonging to + @Input sFaultDevVAddr - The device address to search for allocations at/before/after + @Input asQueryOut - Storage for the query results + @Input ui32PageSizeBytes - Faulted page size in bytes + + @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault + +******************************************************************************/ +static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr, + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], + IMG_UINT32 ui32PageSizeBytes) +{ + IMG_UINT32 i; + DEVICEMEM_HISTORY_QUERY_IN sQueryIn; + IMG_BOOL bAnyHits = IMG_FALSE; + + /* if the page fault originated in the firmware then the allocation may + * appear to belong to any PID, because FW allocations are attributed + * to the client process creating the allocation, so instruct the + * devicemem_history query to search all available PIDs + */ + if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; + } + else + { + sQueryIn.uiPID = uiPID; + } + + /* query the DevicememHistory about the preceding / faulting / next page */ + + for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + IMG_BOOL bHits; + + switch (i) + { + case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: + sQueryIn.sDevVAddr = sFaultDevVAddr; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; + break; + } + + /* First try matching any record at the exact address... */ + bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_FALSE); + if (!bHits) + { + /* If not matched then try matching any record in the same page... */ + bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_TRUE); + } + + if (bHits) + { + bAnyHits = IMG_TRUE; + } + } + + + return bAnyHits; +} + +/* stored data about one page fault */ +typedef struct _FAULT_INFO_ +{ + /* the process info of the memory context that page faulted */ + RGXMEM_PROCESS_INFO sProcessInfo; + IMG_DEV_VIRTADDR sFaultDevVAddr; + MMU_FAULT_DATA sMMUFaultData; + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; + /* the CR timer value at the time of the fault, recorded by the FW. + * used to differentiate different page faults + */ + IMG_UINT64 ui64CRTimer; + /* time when this FAULT_INFO entry was added. used for timing + * reference against the map/unmap information + */ + IMG_UINT64 ui64When; + IMG_UINT32 ui32FaultInfoFlags; +} FAULT_INFO; + +/* history list of page faults. + * Keeps the first `n` page faults and the last `n` page faults, like the FW + * HWR log + */ +typedef struct _FAULT_INFO_LOG_ +{ + IMG_UINT32 ui32Head; + /* the number of faults in this log need not correspond exactly to + * the HWINFO number of the FW, as the FW HWINFO log may contain + * non-page fault HWRs + */ + FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; +} FAULT_INFO_LOG; + +#define FAULT_INFO_PROC_INFO (0x1U) +#define FAULT_INFO_DEVMEM_HIST (0x2U) + +static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; + +static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo, + RGXMEM_PROCESS_INFO *psProcInfo) +{ + IMG_UINT32 i, j; + + for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) + { + IMG_BOOL bFound; + + RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; + bFound = RGXPCPIDToProcessInfo(psDevInfo, + psProcInfo->uiPID, + psProcInfo); + if (!bFound) + { + OSStringLCopy(psProcInfo->szProcessName, + "(unknown)", + sizeof(psProcInfo->szProcessName)); + } + } + } +} + +/*! +******************************************************************************* + + @Function _PrintFaultInfo + + @Description + + Print all the details of a page fault from a FAULT_INFO structure + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psInfo - The page fault occurrence to print + + @Return void + +******************************************************************************/ +static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + FAULT_INFO *psInfo, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) + { + IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? + 0 : psInfo->sProcessInfo.uiPID; + + PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC + ", PID: %u " + "(%s, unregistered: %u) OS time: " + "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + pszIndent, + psInfo->sFaultDevVAddr.uiAddr, + uiPID, + psInfo->sProcessInfo.szProcessName, + psInfo->sProcessInfo.bUnregistered, + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); + } + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) + { + for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + const IMG_CHAR *pszWhich; + + switch (i) + { + case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: + pszWhich = "Preceding page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: + pszWhich = "Faulted page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: + pszWhich = "Next page"; + break; + } + + PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); + _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, + &psInfo->sProcessInfo, + &psInfo->asQueryOut[i], + pszIndent); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); + } +} + +static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo, + IMG_DEV_VIRTADDR sFaultDevVAddr, + IMG_DEV_PHYADDR sPCDevPAddr, + IMG_UINT64 ui64CRTimer, + IMG_UINT32 ui32PageSizeBytes) +{ + IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; + RGXMEM_PROCESS_INFO sProcessInfo; + + psInfo->ui32FaultInfoFlags = 0; + psInfo->sFaultDevVAddr = sFaultDevVAddr; + psInfo->ui64CRTimer = ui64CRTimer; + psInfo->ui64When = OSClockns64(); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + /* Check if this is PM fault */ + if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) + { + bIsPMFault = IMG_TRUE; + bFound = IMG_TRUE; + sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; + OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); + sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; + sProcessInfo.bUnregistered = IMG_FALSE; + } + else + { + /* look up the process details for the faulting page catalogue */ + bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); + } + + if (bFound) + { + IMG_BOOL bHits; + + psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; + psInfo->sProcessInfo = sProcessInfo; + + if (bIsPMFault) + { + bHits = IMG_TRUE; + } + else + { + /* get any DevicememHistory data for the faulting address */ + bHits = _GetDevicememHistoryData(sProcessInfo.uiPID, + sFaultDevVAddr, + psInfo->asQueryOut, + ui32PageSizeBytes); + + if (bHits) + { + psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; + + /* if the page fault was caused by the firmware then get information about + * which client application created the related allocations. + * + * Fill in the process info data for each query result. + */ + + if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo); + } + } + } + } + } +} + +/*! +******************************************************************************* + + @Function _DumpFaultAddressHostView + + @Description + + Dump FW HWR fault status in human readable form. + + @Input ui32Index - Index of global Fault info + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Return void + +******************************************************************************/ +static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const IMG_CHAR* pszIndent) +{ + MMU_LEVEL eTopLevel; + const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; + const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; + + eTopLevel = psFaultData->eTopLevel; + + if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) + { + PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); + return; + } + else if (psFaultData->eType == MMU_FAULT_TYPE_PM) + { + PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); + } + else + { + MMU_LEVEL eCurrLevel; + PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); + + for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) + { + MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; + if (psMMULevelData->ui64Address) + { + if (psMMULevelData->uiBytesPerEntry == 4) + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + (IMG_UINT) psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + else + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", + pszIndent, + szPageError[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui32NumOfEntries); + break; + } + } + } + +} + +/*! +******************************************************************************* + + @Function _RGXDumpRGXBIFBank + + @Description + + Dump BIF Bank state in human readable form. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input eBankID - BIF identifier + @Input ui64MMUStatus - MMU Status register value + @Input ui64ReqStatus - BIF request Status register value + @Return void + +******************************************************************************/ +static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + RGXDBG_BIF_ID eBankID, + IMG_UINT64 ui64MMUStatus, + IMG_UINT64 ui64ReqStatus, + const IMG_CHAR *pszIndent) +{ + if (ui64MMUStatus == 0x0) + { + PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]); + } + else + { + IMG_UINT32 ui32PageSize; + IMG_UINT32 ui32PC = + (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; + + /* Bank 0 & 1 share the same fields */ + PVR_DUMPDEBUG_LOG("%s%s - FAULT:", + pszIndent, + pszBIFNames[eBankID]); + + /* MMU Status */ + { + IMG_UINT32 ui32MMUDataType = + (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT; + + IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0; + IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0; + + ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; + + PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d%s, Page Size = %d%s%s%s.", + pszIndent, + ui64MMUStatus, + ui32PC, + (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC), + ui32PageSize, + (bROFault)?", Read Only fault":"", + (bProtFault)?", PM/META protection fault":"", + _RGXDecodeMMULevel(ui32MMUDataType)); + } + + /* Req Status */ + { + IMG_CHAR *pszTagID; + IMG_CHAR *pszTagSB; + IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; + IMG_BOOL bRead; + IMG_UINT32 ui32TagSB, ui32TagID; + IMG_UINT64 ui64Addr; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN) != 0; + ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT; + ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT; + } + else + { + bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0; + ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT; + ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT; + } + ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) << + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT; + + _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE); + + PVR_DUMPDEBUG_LOG("%s * Request (0x%016" IMG_UINT64_FMTSPECx + "): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".", + pszIndent, + ui64ReqStatus, + pszTagID, + pszTagSB, + (bRead)?"Reading from":"Writing to", + ui64Addr); + } + } +} + +/*! +******************************************************************************* + + @Function _RGXDumpRGXMMUFaultStatus + + @Description + + Dump MMU Fault status in human readable form. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input ui64MMUStatus - MMU Status register value + @Input pszMetaOrCore - string representing call is for META or MMU core + @Return void + +******************************************************************************/ +static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 ui64MMUStatus, + const IMG_PCHAR pszMetaOrCore, + const IMG_CHAR *pszIndent) +{ + if (ui64MMUStatus == 0x0) + { + PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore); + } + else + { + IMG_UINT32 ui32PC = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; + IMG_UINT64 ui64Addr = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) << 4; /* align shift */ + IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT; + IMG_UINT32 ui32SideBand = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT; + IMG_UINT32 ui32MMULevel = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT; + IMG_BOOL bRead = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0; + IMG_BOOL bFault = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0; + IMG_BOOL bROFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2; + IMG_BOOL bProtFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3; + IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; + IMG_CHAR *pszTagID; + IMG_CHAR *pszTagSB; + + _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE); + + PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore); + PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECx ", %s (%s)%s%s%s%s.", + pszIndent, + ui64MMUStatus, + ui32PC, + (bRead)?"Reading from":"Writing to", + ui64Addr, + pszTagID, + pszTagSB, + (bFault)?", Fault":"", + (bROFault)?", Read Only fault":"", + (bProtFault)?", PM/META protection fault":"", + _RGXDecodeMMULevel(ui32MMULevel)); + + } +} +static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); + + + +#if !defined(NO_HARDWARE) +static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + IMG_UINT32 ui32RegRead; + IMG_UINT32 eError = PVRSRV_OK; + IMG_UINT32 *pui32NMIMemoryPointer; + IMG_UINT32 volatile *pui32SyncFlag; + IMG_DEVMEM_OFFSET_T uiNMIMemoryBootOffset; + + /* Map the FW data area to the kernel */ + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, + (void **)&pui32NMIMemoryPointer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire NMI shared memory area (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto map_error_fail; + } + + /* Calculate offset to the boot/NMI data page */ + uiNMIMemoryBootOffset = RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA)); + + /* Jump to the NMI shared data area within the page above */ + pui32NMIMemoryPointer += uiNMIMemoryBootOffset + RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE); + + /* Acquire the NMI operations lock */ + OSLockAcquire(psDevInfo->hNMILock); + + /* Make sure the synchronisation flag is set to 0 */ + pui32SyncFlag = &pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET]; + *pui32SyncFlag = 0; + OSWriteMemoryBarrier(); + (void) *pui32SyncFlag; + + /* Enable NMI issuing in the MIPS wrapper */ + OSWriteHWReg64(pvRegsBaseKM, + RGX_CR_MIPS_WRAPPER_NMI_ENABLE, + RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN); + (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); + + /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */ + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)) + { + + eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; + goto fail; + } + ui32RegRead = 0; + + /* Issue NMI */ + OSWriteHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_WRAPPER_NMI_EVENT, + RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN); + (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_EVENT); + + + /* Wait for NMI Taken to be asserted */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0) + { + eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; + goto fail; + } + ui32RegRead = 0; + + /* Allow the firmware to proceed */ + *pui32SyncFlag = 1; + OSWriteMemoryBarrier(); + (void) *pui32SyncFlag; + + /* Wait for the FW to have finished the NMI routine */ + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) + { + eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; + goto fail; + } + ui32RegRead = 0; + + /* Copy state */ + OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState)); + + --(psMIPSState->ui32ErrorEPC); + --(psMIPSState->ui32EPC); + + /* Disable NMI issuing in the MIPS wrapper */ + OSWriteHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_WRAPPER_NMI_ENABLE, + 0); + (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); + +fail: + /* Release the NMI operations lock */ + OSLockRelease(psDevInfo->hNMILock); + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); +map_error_fail: + return eError; +} + +/* Print decoded information from cause register */ +static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32Cause, + IMG_UINT32 ui32ErrorState) +{ +#define INDENT " " + const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause); + const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode); + + if (ui32ErrorState == RGXMIPSFW_NMI_ERROR_STATE_SET && + pszException != NULL) + { + PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException); + } + + if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING) + { + PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending"); + } + + if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV)) + { + PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector"); + } + + if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING) + { + PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending"); + } + + /* Unusable Coproc exception */ + if (ui32ExcCode == 11) + { + PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause)); + } + +#undef INDENT +} + +static IMG_BOOL _IsFWCodeException(IMG_UINT32 ui32ExcCode) +{ + if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) + { + PVR_DPF((PVR_DBG_WARNING, + "Only %lu exceptions available in MIPS, %u is not a valid exception code", + (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); + return IMG_FALSE; + } + + return apsMIPSExcCodes[ui32ExcCode].bIsFatal; +} + +static void _RGXMipsDumpDebugDecode(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32Debug, + IMG_UINT32 ui32DEPC) +{ + const IMG_CHAR *pszDException = NULL; + IMG_UINT32 i; +#define INDENT " " + + if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM)) + { + return; + } + + PVR_DUMPDEBUG_LOG("DEBUG :"); + + pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)); + + if (pszDException != NULL) + { + PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException); + } + + for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i) + { + const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i]; + + if (ui32Debug & psDebugEntry->ui32Mask) + { + PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation); + } + } +#undef INDENT + PVR_DUMPDEBUG_LOG("DEPC :0x%08X", ui32DEPC); +} + +static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, + IMG_UINT64 *pui64PA0Start, + IMG_UINT64 *pui64PA0End, + IMG_UINT64 *pui64PA1Start, + IMG_UINT64 *pui64PA1End) +{ + IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; + IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask); + + if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0) + { + /* Dummy values to fail the range checks later */ + *pui64PA0Start = -1ULL; + *pui64PA0End = -1ULL; + } + else if (bUseRemapOutput) + { + *pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; + *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; + } + else + { + *pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); + *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; + } + + if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0) + { + /* Dummy values to fail the range checks later */ + *pui64PA1Start = -1ULL; + *pui64PA1End = -1ULL; + } + else if (bUseRemapOutput) + { + *pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; + *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; + } + else + { + *pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); + *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; + } +} + +static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGX_MIPS_TLB_ENTRY *psTLB, + const RGX_MIPS_REMAP_ENTRY *psRemap) +{ + IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ; + IMG_UINT64 ui64PA0EndI, ui64PA1EndI, ui64PA0EndJ, ui64PA1EndJ; + IMG_UINT32 i, j; + +#define RANGES_OVERLAP(start0,end0,start1,end1) ((start0) < (end1) && (start1) < (end0)) + + for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++) + { + _GetMipsTLBPARanges(&psTLB[i], + psRemap ? &psRemap[i] : NULL, + psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, + &ui64PA0StartI, &ui64PA0EndI, + &ui64PA1StartI, &ui64PA1EndI); + + for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++) + { + _GetMipsTLBPARanges(&psTLB[j], + psRemap ? &psRemap[j] : NULL, + psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, + &ui64PA0StartJ, &ui64PA0EndJ, + &ui64PA1StartJ, &ui64PA1EndJ); + + if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) || + RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) || + RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) || + RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ) ) + { + PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j); + } + } + } +} + +static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGX_MIPS_TLB_ENTRY *psTLBEntry, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, + IMG_UINT32 ui32Index) +{ + IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; + IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); + IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); + IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0; + IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0; + + static const IMG_CHAR * const apszPermissionInhibit[4] = + { + "", + "XI", + "RI", + "RIXI" + }; + + static const IMG_CHAR * const apszCoherencyTLB[8] = + { + "C", + "C", + " ", + "C", + "C", + "C", + "C", + " " + }; + + static const IMG_CHAR * const apszDirtyGlobalValid[8] = + { + " ", + " G", + " V ", + " VG", + "D ", + "D G", + "DV ", + "DVG" + }; + + if (bDumpRemapEntries) + { + /* RemapAddrIn is always 4k aligned and on 32 bit */ + ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12; + ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12; + + /* RemapAddrOut is always 4k aligned and on 32 or 36 bit */ + ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; + ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; + + /* If TLB and remap entries match, then merge them else, print them separately */ + if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn && + (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn) + { + ui64PA0 = ui64Remap0AddrOut; + ui64PA1 = ui64Remap1AddrOut; + bDumpRemapEntries = IMG_FALSE; + } + } + + PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECx " %s%s%s, " + "PA1 0x%08" IMG_UINT64_FMTSPECx " %s%s%s", + ui32Index, + psTLBEntry->ui32TLBHi, + RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask), + ui64PA0, + apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)], + apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)], + apszCoherencyTLB[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)], + ui64PA1, + apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)], + apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)], + apszCoherencyTLB[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]); + + if (bDumpRemapEntries) + { + PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, + ui32Index, + ui32Remap0AddrIn, + RGXMIPSFW_REMAP_GET_REGION_SIZE(psRemapEntry0->ui32RemapRegionSize), + ui64Remap0AddrOut); + + PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, + ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES, + ui32Remap1AddrIn, + RGXMIPSFW_REMAP_GET_REGION_SIZE(psRemapEntry1->ui32RemapRegionSize), + ui64Remap1AddrOut); + } +} + +#endif /* !defined(NO_HARDWARE) */ + +/* + Appends flags strings to a null-terminated string buffer - each flag + description string starts with a space. +*/ +static void _Flags2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG_FLAGS2DESC *psConvTable, + IMG_UINT32 ui32TableSize, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) + { + if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag) + { + OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + } + } +} + +/* + Writes flags strings to an uninitialised buffer. +*/ +static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR *psCswLabel = "Ctx switch options:"; + size_t uLabelLen = OSStringLength(psCswLabel); + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringLCopy(psDesc, psCswLabel, ui32DescSize); + + _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); + _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); +} + +static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR *psCswLabel = "Ctx switch:"; + size_t uLabelLen = OSStringLength(psCswLabel); + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringLCopy(psDesc, psCswLabel, ui32DescSize); + + _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); +} + + +/*! +******************************************************************************* + + @Function _RGXDumpFWAssert + + @Description + + Dump FW assert strings when a thread asserts. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer + + @Return void + +******************************************************************************/ +static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl) +{ + IMG_CHAR *pszTraceAssertPath; + IMG_CHAR *pszTraceAssertInfo; + IMG_INT32 ui32TraceAssertLine; + IMG_UINT32 i; + + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; + pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; + ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; + + /* print non-null assert strings */ + if (*pszTraceAssertInfo) + { + PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)", + i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine); + } + } +} + +/*! +******************************************************************************* + + @Function _RGXDumpFWFaults + + @Description + + Dump FW assert strings when a thread asserts. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFwSysData - RGX FW shared system data + + @Return void + +******************************************************************************/ +static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData) +{ + if (psFwSysData->ui32FWFaults > 0) + { + IMG_UINT32 ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX; + IMG_UINT32 ui32EndFault = psFwSysData->ui32FWFaults - 1; + IMG_UINT32 ui32Index; + + if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX) + { + ui32StartFault = 0; + } + + for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++) + { + RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + /* Split OS timestamp in seconds and nanoseconds */ + ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + + PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)", + ui32Index+1, psFaultInfo->sFaultBuf.szInfo, + psFaultInfo->sFaultBuf.szPath, + psFaultInfo->sFaultBuf.ui32LineNum); + PVR_DUMPDEBUG_LOG(" Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + psFaultInfo->ui32Data, + psFaultInfo->ui64CRTimer, + ui64Seconds, ui64Nanoseconds); + } + } +} + +static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData) +{ + IMG_UINT32 i; + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + if (psFwSysData->aui32CrPollAddr[i]) + { + PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)", + i, + ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[i]); + } + } + +} + +static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData, RGXFWIF_HWRINFOBUF *psHWRInfoBuf, PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bAnyLocked = IMG_FALSE; + IMG_UINT32 dm, i; + IMG_UINT32 ui32LineSize; + IMG_CHAR *pszLine, *pszTemp; + const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "TA", "3D", "CDM"}; + const IMG_CHAR szMsgHeader[] = "Number of HWR: "; + const IMG_CHAR szMsgFalse[] = "FALSE("; + IMG_CHAR *pszLockupType = ""; + RGX_HWRINFO *psHWRInfo; + const IMG_UINT32 ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */ + const IMG_UINT32 ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1; + IMG_UINT32 ui32HWRRecoveryFlags; + IMG_UINT32 ui32ReadIndex; + + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) + { + apszDmNames[RGXFWIF_DM_TDM] = "2D"; + } + + for (dm = 0; dm < RGXFWIF_DM_MAX; dm++) + { + if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || + psHWRInfoBuf->aui32HwrDmOverranCount[dm]) + { + bAnyLocked = IMG_TRUE; + break; + } + } + + if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) + { + /* No HWR situation, print nothing */ + return; + } + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_BOOL bAnyHWROccured = IMG_FALSE; + + for (dm = 0; dm < RGXFWIF_DM_MAX; dm++) + { + if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) + { + bAnyHWROccured = IMG_TRUE; + break; + } + } + + if (!bAnyHWROccured) + { + return; + } + } + + ui32LineSize = sizeof(IMG_CHAR) * ( + ui32MsgHeaderCharCount + + (RGXFWIF_DM_MAX * ( 4/*DM name + left parenthesis*/ + + 10/*UINT32 max num of digits*/ + + 1/*slash*/ + + 10/*UINT32 max num of digits*/ + + 3/*right parenthesis + comma + space*/)) + + ui32MsgFalseCharCount + 1 + (RGXFWIF_DM_MAX*6) + 1 + /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ + ); + + pszLine = OSAllocMem(ui32LineSize); + if (pszLine == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + ui32LineSize)); + return; + } + + OSStringLCopy(pszLine, szMsgHeader, ui32LineSize); + pszTemp = pszLine + ui32MsgHeaderCharCount; + + for (dm = 0; dm < RGXFWIF_DM_MAX; dm++) + { + pszTemp += OSSNPrintf(pszTemp, + 4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 + /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */, + "%s(%u/%u+%u), ", + apszDmNames[dm], + psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm], + psHWRInfoBuf->aui32HwrDmOverranCount[dm]); + } + + OSStringLCat(pszLine, szMsgFalse, ui32LineSize); + pszTemp += ui32MsgFalseCharCount; + + for (dm = 0; dm < RGXFWIF_DM_MAX; dm++) + { + pszTemp += OSSNPrintf(pszTemp, + 10 + 1 + 1 /* UINT32 max num + comma + \0 */, + (dm < RGXFWIF_DM_MAX-1 ? "%u," : "%u)"), + psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]); + } + + PVR_DUMPDEBUG_LOG("%s", pszLine); + + OSFreeMem(pszLine); + + /* Print out per HWR info */ + for (dm = 0; dm < RGXFWIF_DM_MAX; dm++) + { + if (dm == RGXFWIF_DM_GP) + { + PVR_DUMPDEBUG_LOG("DM %d (GP)", dm); + } + else + { + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; + sPerDmHwrDescription[0] = '\0'; + + _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, + asDmState2Description, ARRAY_SIZE(asDmState2Description), + psFwSysData->aui32HWRRecoveryFlags[dm]); + PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, psFwSysData->aui32HWRRecoveryFlags[dm], sPerDmHwrDescription); + } + else + { + PVR_DUMPDEBUG_LOG("DM %d", dm); + } + } + + ui32ReadIndex = 0; + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) + { + IMG_BOOL bPMFault = IMG_FALSE; + IMG_UINT32 ui32PC; + IMG_UINT32 ui32PageSize = 0; + IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; + + psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; + + if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) + { + IMG_CHAR aui8RecoveryNum[10+10+1]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + IMG_BOOL bPageFault = IMG_FALSE; + IMG_DEV_VIRTADDR sFaultDevVAddr; + + /* Split OS timestamp in seconds and nanoseconds */ + ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + + ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; + if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } + + OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); + PVR_DUMPDEBUG_LOG(" %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + aui8RecoveryNum, + psHWRInfo->ui32PID, + psHWRInfo->ui32FrameNum, + psHWRInfo->ui32ActiveHWRTData, + psHWRInfo->ui32EventStatus, + pszLockupType); + pszTemp = &aui8RecoveryNum[0]; + while (*pszTemp != '\0') + { + *pszTemp++ = ' '; + } + + /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + ui64Seconds, + ui64Nanoseconds, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECx", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + + if (psHWRInfo->ui64CRTimeHWResetFinish != 0) + { + if (psHWRInfo->ui64CRTimeFreelistReady != 0) + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); + } + } + + switch (psHWRInfo->eHWRType) + { + case RGX_HWRTYPE_BIF0FAULT: + case RGX_HWRTYPE_BIF1FAULT: + { + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) + { + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType), + psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); + ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; + bPMFault = (ui32PC >= 8); + ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; + } + } + break; + case RGX_HWRTYPE_TEXASBIF0FAULT: + { + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) + { + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); + ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; + bPMFault = (ui32PC >= 8); + ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; + } + } + } + break; + case RGX_HWRTYPE_MMUFAULT: + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus, + "Core", + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + ui32PC = (psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; +#if defined(SUPPORT_TRUSTED_DEVICE) + ui32PC = ui32PC - 1; +#endif + bPMFault = (ui32PC <= 8); + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + } + } + break; + case RGX_HWRTYPE_MMUMETAFAULT: + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus, + "Meta", + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + } + } + break; + + + case RGX_HWRTYPE_POLLFAILURE: + { + PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)", + psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum, + ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue); + } + break; + + case RGX_HWRTYPE_MIPSTLBFAULT: + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + IMG_UINT32 ui32EntryLo = psHWRInfo->uHWRData.sTLBInfo.ui32EntryLo; + + /* This is not exactly what the MMU code does, but the result should be the same */ + const IMG_UINT32 ui32UnmappedEntry = + ((IMG_UINT32)(MMU_BAD_PHYS_ADDR & 0xffffffff) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) | RGXMIPSFW_ENTRYLO_UNCACHED; + + PVR_DUMPDEBUG_LOG(" MIPS TLB fault: BadVA = 0x%08X, EntryLo = 0x%08X" + " (page PA 0x%" IMG_UINT64_FMTSPECx", V %u)", + psHWRInfo->uHWRData.sTLBInfo.ui32BadVAddr, + ui32EntryLo, + RGXMIPSFW_TLB_GET_PA(ui32EntryLo), + ui32EntryLo & RGXMIPSFW_TLB_VALID ? 1 : 0); + + if (ui32EntryLo == ui32UnmappedEntry) + { + PVR_DUMPDEBUG_LOG(" Potential use-after-free detected"); + } + } + } + break; + + case RGX_HWRTYPE_OVERRUN: + case RGX_HWRTYPE_UNKNOWNFAILURE: + { + /* Nothing to dump */ + } + break; + + default: + { + PVR_ASSERT(IMG_FALSE); + } + break; + } + + if (bPageFault) + { + + FAULT_INFO *psInfo; + + OSLockAcquire(psDevInfo->hDebugFaultInfoLock); + + /* Find the matching Fault Info for this HWRInfo */ + psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; + + /* if they do not match, we need to update the psInfo */ + if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || + (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr)) + { + MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; + + psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; + + if (bPMFault) + { + /* PM fault and we dump PC details only */ + psFaultData->eTopLevel = MMU_LEVEL_0; + psFaultData->eType = MMU_FAULT_TYPE_PM; + psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr; + } + else + { + RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData); + } + + _RecordFaultInfo(psDevInfo, psInfo, + sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer, + _PageSizeHWToBytes(ui32PageSize)); + + } + + _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); + } + + OSLockRelease(psDevInfo->hDebugFaultInfoLock); + } + + } + + if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1) + ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex; + else + ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST; + } + } +} + +#if !defined(NO_HARDWARE) + +/*! +******************************************************************************* + + @Function _CheckForPendingPage + + @Description + + Check if the MMU indicates it is blocked on a pending page + + @Input psDevInfo - RGX device info + + @Return IMG_BOOL - IMG_TRUE if there is a pending page + +******************************************************************************/ +static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32BIFMMUEntry; + + ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY); + + if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +} + +/*! +******************************************************************************* + + @Function _GetPendingPageInfo + + @Description + + Get information about the pending page from the MMU status registers + + @Input psDevInfo - RGX device info + @Output psDevVAddr - The device virtual address of the pending MMU address translation + @Output pui32CatBase - The page catalog base + @Output pui32DataType - The MMU entry data type + + @Return void + +******************************************************************************/ +static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 *pui32CatBase, + IMG_UINT32 *pui32DataType) +{ + IMG_UINT64 ui64BIFMMUEntryStatus; + + ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS); + + psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK); + + *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT; + + *pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >> + RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT; +} + +#endif + +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON) +{ + IMG_CHAR *pszState, *pszReason; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + IMG_UINT32 ui32OSid; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + /* space for the current clock speed and 3 previous */ + RGXFWIF_TIME_CORR asTimeCorrs[4]; + IMG_UINT32 ui32NumClockSpeedChanges; + +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); +#else + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + + IMG_UINT64 ui64RegValMMUStatus; + + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Core", DD_SUMMARY_INDENT); + + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Meta", DD_SUMMARY_INDENT); + }else + { + IMG_UINT64 ui64RegValMMUStatus, ui64RegValREQStatus; + + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS); + + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF))) + { + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS); + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) + { + IMG_UINT32 ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0; + + if (ui32PhantomCnt > 1) + { + IMG_UINT32 ui32Phantom; + for (ui32Phantom = 0; ui32Phantom < ui32PhantomCnt; ui32Phantom++) + { + /* This can't be done as it may interfere with the FW... */ + /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/ + + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); + + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + } + }else + { + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); + + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + } + } + } + + if (_CheckForPendingPage(psDevInfo)) + { + IMG_UINT32 ui32CatBase; + IMG_UINT32 ui32DataType; + IMG_DEV_VIRTADDR sDevVAddr; + + PVR_DUMPDEBUG_LOG("MMU Pending page: Yes"); + + _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType); + + if (ui32CatBase >= 8) + { + PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase); + } + else + { + IMG_DEV_PHYADDR sPCDevPAddr; + MMU_FAULT_DATA sFaultData; + + sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase)); + + PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC + " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx, + sDevVAddr.uiAddr, + ui32CatBase, + sPCDevPAddr.uiAddr); + RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); + _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); + } + } + } +#endif /* NO_HARDWARE */ + + /* Firmware state */ + switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)) + { + case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break; + default: pszState = "UNKNOWN"; break; + } + + switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason)) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failing"; break; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " - Missing interrupts"; break; + default: pszReason = " - Unknown reason"; break; + } + +#if !defined(NO_HARDWARE) + /* Determine the type virtualisation support used */ +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) +#if defined(SUPPORT_AUTOVZ) +#if defined(SUPPORT_AUTOVZ_HW_REGS) + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support"); +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory"); +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation"); +#endif /* defined(SUPPORT_AUTOVZ) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)) + { + RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo); + RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo); + + PVR_DUMPDEBUG_LOG("RGX firmware connection state: %s (Fw=%s; OS=%s)", + ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), + (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"), + (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid")); + + } +#endif + +#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); + IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); + + PVR_DUMPDEBUG_LOG("RGX virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", + ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); + } +#endif +#endif /* !defined(NO_HARDWARE) */ + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; + + if (psFwSysData == NULL) + { + /* can't dump any more information */ + PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason); + return; + } + +#if defined(PVR_ENABLE_PHR) + { + IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; + + sPHRConfigDescription[0] = '\0'; + _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, + asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), + BIT_ULL(psFwSysData->ui32PHRModeMirror)); + PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psFwSysData->ui32PHRModeMirror, sPHRConfigDescription); + } +#endif + + sHwrStateDescription[0] = '\0'; + _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE, + asHwrState2Description, ARRAY_SIZE(asHwrState2Description), + psFwSysData->ui32HWRStateFlags); + PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription); + PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", + pszPowStateName[psFwSysData->ePowState], + (psDevInfo->pvAPMISRData)?"enabled":"disabled", + psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqDenied, + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqRetry, + psDevInfo->ui32ActivePMReqTotal - + psDevInfo->ui32ActivePMReqOk - + psDevInfo->ui32ActivePMReqDenied - + psDevInfo->ui32ActivePMReqRetry - + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqTotal, + psRuntimeCfg->ui32ActivePMLatencyms); + + ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); + RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); + + PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. " + "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). " + "FW frequency: %u.%03u MHz.", + ui32NumClockSpeedChanges, + asTimeCorrs[0].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[0].ui64OSTimeStamp, + psRuntimeCfg->ui32CoreClockSpeed / 1000000, + (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000); + if (ui32NumClockSpeedChanges > 0) + { + PVR_DUMPDEBUG_LOG(" Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at " + "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")", + asTimeCorrs[1].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[2].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[3].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[1].ui64OSTimeStamp, + asTimeCorrs[2].ui64OSTimeStamp, + asTimeCorrs[3].ui64OSTimeStamp); + } + + for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + + PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d; %s", ui32OSid, + apszFwOsStateName[sFwRunFlags.bfOsState], + (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", + (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", + psFwSysData->aui32OSidPrioMirror[ui32OSid], + (sFwRunFlags.bfIsolatedOS) ? "; Isolated;" : "" + ); + } + + _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl); + _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + } + else + { + PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation"); + PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); + } + + _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); + +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) + /* Dump all non-zero values in lines of 8... */ + { + IMG_CHAR pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1]; + IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf; + IMG_UINT32 ui32Index1, ui32Index2; + + PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX); + for (ui32Index1 = 0; ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX; ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE) + { + IMG_UINT32 ui32OrOfValues = 0; + IMG_CHAR *pszBuf = pszLine; + + /* Print all values in this line and skip if all zero... */ + for (ui32Index2 = 0; ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE; ui32Index2++) + { + ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2]; + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]); + pszBuf += 9; /* write over the '\0' */ + } + + if (ui32OrOfValues != 0) + { + PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine); + } + } + PVR_DUMPDEBUG_LOG("STATS[END]"); + } +#endif +} + +static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +/* List of extra META Slave Port debug registers */ +#define RGX_META_SP_EXTRA_DEBUG \ + X(RGX_CR_META_SP_MSLVCTRL0) \ + X(RGX_CR_META_SP_MSLVCTRL1) \ + X(RGX_CR_META_SP_MSLVDATAX) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS) \ + X(RGX_CR_META_SP_MSLVIRQENABLE) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL) + + IMG_UINT32 ui32Idx, ui32RegIdx; + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32RegAddr; + + const IMG_UINT32 aui32DebugRegAddr [] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + + const IMG_CHAR* apszDebugRegName [] = { +#define X(A) #A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + + const IMG_UINT32 aui32Debug2RegAddr [] = {0xA28, 0x0A30, 0x0A38}; + + PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); + + /* dump first set of Slave Port debug registers */ + for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) + { + const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; + + ui32RegAddr = aui32DebugRegAddr[ui32Idx]; + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); + PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); + } + + /* dump second set of Slave Port debug registers */ + for (ui32Idx = 0; ui32Idx < 4; ui32Idx++) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx); + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20); + PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal); + + } + + for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++) + { + ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx]; + for (ui32Idx = 0; ui32Idx < 2; ui32Idx++) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx); + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); + PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal); + } + } + +} + +/* + * Array of all the Firmware Trace log IDs used to convert the trace data. + */ +typedef struct _TRACEBUF_LOG_ { + RGXFW_LOG_SFids eSFId; + const IMG_CHAR *pszName; + const IMG_CHAR *pszFmt; + IMG_UINT32 ui32ArgNum; +} TRACEBUF_LOG; + +static const TRACEBUF_LOG aLogDefinitions[] = +{ +#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, + RGXFW_LOG_SFIDLIST +#undef X +}; + +#define NARGS_MASK ~(0xF<<16) +static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; + IMG_BOOL bIntegrityOk = IMG_TRUE; + + /* + * For every log ID, check the format string and number of arguments is valid. + */ + while (psLogDef->eSFId != RGXFW_SF_LAST) + { + const TRACEBUF_LOG *psLogDef2; + const IMG_CHAR *pszString; + IMG_UINT32 ui32Count; + + /* + * Check the number of arguments matches the number of '%' in the string and + * check that no string uses %s which is not supported as it requires a + * pointer to memory that is not going to be valid. + */ + pszString = psLogDef->pszFmt; + ui32Count = 0; + + while (*pszString != '\0') + { + if (*pszString++ == '%') + { + ui32Count++; + if (*pszString == 's') + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", + psLogDef->pszName, *pszString); + } + else if (*pszString == '%') + { + /* Double % is a printable % sign and not a format string... */ + ui32Count--; + } + } + } + + if (ui32Count != psLogDef->ui32ArgNum) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", + psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); + } + + /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ + if (ui32Count > 20) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", + psLogDef->pszName, ui32Count); + } + + /* Check the id number is unique (don't take into account the number of arguments) */ + ui32Count = 0; + psLogDef2 = &aLogDefinitions[0]; + + while (psLogDef2->eSFId != RGXFW_SF_LAST) + { + if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) + { + ui32Count++; + } + psLogDef2++; + } + + if (ui32Count != 1) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", + psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); + } + + /* Move to the next log ID... */ + psLogDef++; + } + + return bIntegrityOk; +} + +typedef struct { + IMG_UINT16 ui16Mask; + const IMG_CHAR *pszStr; +} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ + + +/*! +******************************************************************************* + + @Function RGXPrepareExtraDebugInfo + + @Description + + Prepares debug info string by decoding ui16DebugInfo value passed + + @Input pszBuffer - pointer to debug info string buffer + + @Return void + +******************************************************************************/ +static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) +{ + const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = + { +#define X(a, b) {a, b}, + RGXFWT_DEBUG_INFO_MSKSTRLIST +#undef X + }; + + IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); + IMG_UINT32 i; + IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; + + /* Add prepend string */ + OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); + + /* Add debug info strings */ + for (i = 0; i < ui32NumFields; i++) + { + if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) + { + if (bHasExtraDebugInfo) + { + OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ + } + OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); + bHasExtraDebugInfo = IMG_TRUE; + } + } + + /* Add append string */ + OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); +} + +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; + + /* Check that the firmware trace is correctly defined... */ + if (!bIntegrityCheckPassed) + { + bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); + if (!bIntegrityCheckPassed) + { + return; + } + } + + /* Dump FW trace information... */ + if (psRGXFWIfTraceBufCtl != NULL) + { + IMG_UINT32 tid; + IMG_UINT32 ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; + + /* Print the log type settings... */ + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + /* Print the decoded log for each thread... */ + for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) + { + IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; + IMG_UINT32 ui32TracePtr = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer; + IMG_UINT32 ui32Count = 0; + + if (pui32TraceBuf == NULL) + { + /* trace buffer not yet allocated */ + continue; + } + + while (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_UINT32 ui32Data, ui32DataToId; + + /* Find the first valid log ID, skipping whitespace... */ + do + { + ui32Data = pui32TraceBuf[ui32TracePtr]; + ui32DataToId = idToStringID(ui32Data, SFs); + + /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ + if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) + { + PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); + } + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 1) % ui32TraceBufSizeInDWords; + ui32Count++; + } while ((RGXFW_SF_LAST == ui32DataToId || ui32DataToId >= RGXFW_SF_FIRST) && + ui32Count < ui32TraceBufSizeInDWords); + + if (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; + IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; + IMG_UINT64 ui64Timestamp; + IMG_UINT16 ui16DebugInfo; + + /* If we hit the ASSERT message then this is the end of the log... */ + if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) + { + PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u", + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo, + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath, + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum); + break; + } + + ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | + (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32TraceBufSizeInDWords]); + + ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); + ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; + + /* + * Print the trace string and provide up to 20 arguments which + * printf function will be able to use. We have already checked + * that no string uses more than this. + */ + OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); + + /* Check and append any extra debug info available */ + if (ui16DebugInfo) + { + /* Prepare debug info string */ + RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); + + /* Append debug info string */ + OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); + } + + PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)], + pui32TraceBuf[(ui32TracePtr + 2) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 3) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 4) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 5) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 6) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 7) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 8) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 9) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 10) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 11) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 12) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 13) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 14) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 15) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 16) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 17) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 18) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 19) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 20) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 21) % ui32TraceBufSizeInDWords]); + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % ui32TraceBufSizeInDWords; + ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); + } + } + } + } +} + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Print the power monitoring counters... */ + if (psFwSysData != NULL) + { + IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer; + IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer; + IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords; + IMG_UINT32 ui32Count = 0; + IMG_UINT64 ui64Timestamp; + + if (pui32TraceBuf == NULL) + { + /* power monitoring buffer not yet allocated */ + return; + } + + if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER) + { + PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available.")); + return; + } + ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 | + (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]); + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords; + ui32Count = (ui32Count + 3); + + PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x", + pui32TraceBuf, + ui32TracePtr, + ui32PowerMonBufSizeInDWords)); + + while (ui32Count < ui32PowerMonBufSizeInDWords) + { + /* power monitoring data is (register, value) dword pairs */ + PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x", + ui64Timestamp, + pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]); + + if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID || + pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID) + { + /* end of buffer */ + break; + } + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords; + ui32Count = (ui32Count + 4); + } + } +} +#endif + +static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) +{ + switch (eDevState) + { + case PVRSRV_DEVICE_STATE_INIT: + return "Initialising"; + case PVRSRV_DEVICE_STATE_ACTIVE: + return "Active"; + case PVRSRV_DEVICE_STATE_DEINIT: + return "De-initialising"; + case PVRSRV_DEVICE_STATE_BAD: + return "Bad"; + case PVRSRV_DEVICE_STATE_UNDEFINED: + PVR_ASSERT(!"Device has undefined state"); + __fallthrough; + default: + return "Unknown"; + } +} + +static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) +{ + switch (ePowerState) + { + case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; + case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; + case PVRSRV_DEV_POWER_STATE_ON: return "ON"; + default: return "UNKNOWN"; + } +} + +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32Meta = (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; + IMG_UINT32 ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles; + IMG_UINT32 ui32RegVal; + IMG_BOOL bFirmwarePerf; + IMG_BOOL bS7Infra = RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE); + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + PVRSRV_ERROR eError; + + PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); + + /* Check if firmware perf was set at Init time */ + bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); + +/* Helper macros to emit data */ +#define REG32_FMTSPEC "%-30s: 0x%08X" +#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECx +#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); + +#if defined(NO_HARDWARE) + /* OSReadHWReg variants don't use params passed in NoHW builds */ + PVR_UNREFERENCED_PARAMETER(pvRegsBaseKM); +#endif + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG)) + { + DDLOG64(CORE_ID); + } + else + { + DDLOG32(CORE_ID); + } + DDLOG32(CORE_REVISION); + DDLOG32(DESIGNER_REV_FIELD1); + DDLOG32(DESIGNER_REV_FIELD2); + DDLOG64(CHANGESET_NUMBER); + if (ui32Meta) + { + DDLOG32(META_SP_MSLVIRQSTATUS); + } + + DDLOG64(CLK_CTRL); + DDLOG64(CLK_STATUS); + DDLOG64(CLK_CTRL2); + DDLOG64(CLK_STATUS2); + + if (bS7Infra) + { + DDLOG64(CLK_XTPLUS_CTRL); + DDLOG64(CLK_XTPLUS_STATUS); + } + DDLOG32(EVENT_STATUS); + DDLOG64(TIMER); + if (bS7Infra) + { + DDLOG64(MMU_FAULT_STATUS); + DDLOG64(MMU_FAULT_STATUS_META); + } + else + { + DDLOG32(BIF_FAULT_BANK0_MMU_STATUS); + DDLOG64(BIF_FAULT_BANK0_REQ_STATUS); + DDLOG32(BIF_FAULT_BANK1_MMU_STATUS); + DDLOG64(BIF_FAULT_BANK1_REQ_STATUS); + } + DDLOG32(BIF_MMU_STATUS); + DDLOG32(BIF_MMU_ENTRY); + DDLOG64(BIF_MMU_ENTRY_STATUS); + + if (bS7Infra) + { + DDLOG32(BIF_JONES_OUTSTANDING_READ); + DDLOG32(BIF_BLACKPEARL_OUTSTANDING_READ); + DDLOG32(BIF_DUST_OUTSTANDING_READ); + } + else + { + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))) + { + DDLOG32(BIF_STATUS_MMU); + DDLOG32(BIF_READS_EXT_STATUS); + DDLOG32(BIF_READS_INT_STATUS); + } + DDLOG32(BIFPM_STATUS_MMU); + DDLOG32(BIFPM_READS_EXT_STATUS); + DDLOG32(BIFPM_READS_INT_STATUS); + } + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 44871)) + { + PVR_DUMPDEBUG_LOG("Warning: BRN44871 is present"); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) + { + DDLOG64(CONTEXT_MAPPING0); + DDLOG64(CONTEXT_MAPPING1); + DDLOG64(CONTEXT_MAPPING2); + DDLOG64(CONTEXT_MAPPING3); + DDLOG64(CONTEXT_MAPPING4); + } + else + { + DDLOG64(BIF_CAT_BASE_INDEX); + DDLOG64(BIF_CAT_BASE0); + DDLOG64(BIF_CAT_BASE1); + DDLOG64(BIF_CAT_BASE2); + DDLOG64(BIF_CAT_BASE3); + DDLOG64(BIF_CAT_BASE4); + DDLOG64(BIF_CAT_BASE5); + DDLOG64(BIF_CAT_BASE6); + DDLOG64(BIF_CAT_BASE7); + } + + DDLOG32(BIF_CTRL_INVAL); + DDLOG32(BIF_CTRL); + + DDLOG64(BIF_PM_CAT_BASE_VCE0); + DDLOG64(BIF_PM_CAT_BASE_TE0); + DDLOG64(BIF_PM_CAT_BASE_ALIST0); + DDLOG64(BIF_PM_CAT_BASE_VCE1); + DDLOG64(BIF_PM_CAT_BASE_TE1); + DDLOG64(BIF_PM_CAT_BASE_ALIST1); + + DDLOG32(PERF_TA_PHASE); + DDLOG32(PERF_TA_CYCLE); + DDLOG32(PERF_3D_PHASE); + DDLOG32(PERF_3D_CYCLE); + + ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE); + ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE); + ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE); + ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0; + DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles); + DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles); + + DDLOG32(PERF_COMPUTE_PHASE); + DDLOG32(PERF_COMPUTE_CYCLE); + + DDLOG32(PM_PARTIAL_RENDER_ENABLE); + + DDLOG32(ISP_RENDER); + DDLOG64(TLA_STATUS); + DDLOG64(MCU_FENCE); + + DDLOG32(VDM_CONTEXT_STORE_STATUS); + DDLOG64(VDM_CONTEXT_STORE_TASK0); + DDLOG64(VDM_CONTEXT_STORE_TASK1); + DDLOG64(VDM_CONTEXT_STORE_TASK2); + DDLOG64(VDM_CONTEXT_RESUME_TASK0); + DDLOG64(VDM_CONTEXT_RESUME_TASK1); + DDLOG64(VDM_CONTEXT_RESUME_TASK2); + + DDLOG32(ISP_CTL); + DDLOG32(ISP_STATUS); + DDLOG32(MTS_INTCTX); + DDLOG32(MTS_BGCTX); + DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); + DDLOG32(MTS_SCHEDULE); + DDLOG32(MTS_GPU_INT_STATUS); + + DDLOG32(CDM_CONTEXT_STORE_STATUS); + DDLOG64(CDM_CONTEXT_PDS0); + DDLOG64(CDM_CONTEXT_PDS1); + DDLOG64(CDM_TERMINATE_PDS); + DDLOG64(CDM_TERMINATE_PDS1); + + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 47025)) + { + DDLOG64(CDM_CONTEXT_LOAD_PDS0); + DDLOG64(CDM_CONTEXT_LOAD_PDS1); + } + + if (bS7Infra) + { + DDLOG32(JONES_IDLE); + } + + DDLOG32(SIDEKICK_IDLE); + + if (!bS7Infra) + { + DDLOG32(SLC_IDLE); + DDLOG32(SLC_STATUS0); + DDLOG64(SLC_STATUS1); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS)) + { + DDLOG64(SLC_STATUS2); + } + + DDLOG32(SLC_CTRL_BYPASS); + DDLOG64(SLC_CTRL_MISC); + } + else + { + DDLOG32(SLC3_IDLE); + DDLOG64(SLC3_STATUS); + DDLOG32(SLC3_FAULT_STOP_STATUS); + } + + if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) + { + DDLOG32(SCRATCH0); + DDLOG32(SCRATCH1); + DDLOG32(SCRATCH2); + DDLOG32(SCRATCH3); + DDLOG32(SCRATCH4); + DDLOG32(SCRATCH5); + DDLOG32(SCRATCH6); + DDLOG32(SCRATCH7); + DDLOG32(SCRATCH8); + DDLOG32(SCRATCH9); + DDLOG32(SCRATCH10); + DDLOG32(SCRATCH11); + DDLOG32(SCRATCH12); + DDLOG32(SCRATCH13); + DDLOG32(SCRATCH14); + DDLOG32(SCRATCH15); + } + + if (ui32Meta) + { + IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; + + /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0); + + eError = RGXReadWithSP(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T0 TXENABLE", ui32RegVal); + if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) + { + bIsT0Enabled = IMG_TRUE; + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T0 TXSTATUS", ui32RegVal); + + /* check for FW fault */ + if (((ui32RegVal >> 20) & 0x3) == 0x2) + { + bIsFWFaulted = IMG_TRUE; + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T0 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 SP", ui32RegVal); + + + if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) + { + eError = RGXReadWithSP(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T1 TXENABLE", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T1 TXSTATUS", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T1 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 SP", ui32RegVal); + } + + if (bFirmwarePerf) + { + eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("PERF_COUNT0", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("PERF_COUNT1", ui32RegVal); + } + + if (bIsT0Enabled & bIsFWFaulted) + { + PVRSRV_ERROR eError; + eError = _ValidateFWImageForMETA(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + } + } + else if (bIsFWFaulted) + { + PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); + } + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + DDLOG32(MIPS_ADDR_REMAP1_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP1_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP2_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP2_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP3_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP3_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP4_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP4_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP5_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP5_CONFIG2); + DDLOG64(MIPS_WRAPPER_CONFIG); + DDLOG32(MIPS_EXCEPTION_STATUS); + +#if !defined(NO_HARDWARE) + { + RGX_MIPS_STATE sMIPSState = {0}; + + eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState); + PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----"); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("MIPS extra debug not available"); + } + else + { + DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC); + DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister); + DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister); + _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile, + sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState); + DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister); + DDLOGVAL32("EPC", sMIPSState.ui32EPC); + DDLOGVAL32("SP", sMIPSState.ui32SP); + DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr); + _RGXMipsDumpDebugDecode(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, + sMIPSState.ui32Debug, sMIPSState.ui32DEPC); + + { + IMG_UINT32 ui32Idx; + + IMG_BOOL bCheckBRN63553WA = + RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) && + (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN)); + + IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; + + PVR_DUMPDEBUG_LOG("TLB :"); + + for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++) + { + RGX_MIPS_REMAP_ENTRY *psRemapEntry0 = NULL; + RGX_MIPS_REMAP_ENTRY *psRemapEntry1 = NULL; + + if (bUseRemapRanges) + { + psRemapEntry0 = &sMIPSState.asRemap[ui32Idx]; + psRemapEntry1 = &sMIPSState.asRemap[ui32Idx+16]; + } + + + _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf, + pvDumpDebugFile, + &sMIPSState.asTLB[ui32Idx], + psRemapEntry0, + psRemapEntry1, + ui32Idx); + + if (bCheckBRN63553WA) + { + const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx]; + + #define BRN63553_TLB_IS_NUL(X) (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0)) + + if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1)) + { + PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0."); + } + } + } + + /* This implicitly also checks for overlaps between memory and regbank addresses */ + _CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf, + pvDumpDebugFile, + sMIPSState.asTLB, + bUseRemapRanges ? sMIPSState.asRemap : NULL); + + if (bUseRemapRanges) + { + /* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */ + if (sMIPSState.ui32UnmappedAddress) + { + PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X", + sMIPSState.ui32UnmappedAddress); + } + } + } + + /* Check FW code corruption in case of known errors */ + if (_IsFWCodeException(RGXMIPSFW_C0_CAUSE_EXCCODE(sMIPSState.ui32CauseRegister))) + { + PVRSRV_ERROR eError; + eError = _ValidateFWImageForMIPS(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ""); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + } + } + } + PVR_DUMPDEBUG_LOG("--------------------------------"); + } +#endif + } + + return PVRSRV_OK; + +_METASPError: + PVR_DPF((PVR_DBG_ERROR, "Dump Slave Port debug information")); + _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + + return eError; +} + +/*! +******************************************************************************* + + @Function RGXDebugRequestProcess + + @Description + + This function will print out the debug for the specified level of verbosity + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input ui32VerbLevel - Verbosity level + + @Return void + +******************************************************************************/ +static +void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32VerbLevel) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_DEV_POWER_STATE ePowerState; + IMG_BOOL bRGXPoweredON; + IMG_UINT8 ui8FwOsCount; + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_BOOL bPwrLockAlreadyHeld; + + bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); + if (!bPwrLockAlreadyHeld) + { + /* Only acquire the power-lock if not already held by the calling context */ + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return; + } + } + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error retrieving RGX power state. No debug info dumped.", + __func__)); + goto Exit; + } + + if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) + { + PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); + } + else + { + PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); + } + + if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || + (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) + { + PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount); + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device: Start ]------"); + + bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); + + PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); + PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, \ + psDevInfo->sDevFeatureCfg.ui32V, \ + psDevInfo->sDevFeatureCfg.ui32N, \ + psDevInfo->sDevFeatureCfg.ui32C, + PVR_ARCH_NAME); + PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState)); + PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); + + RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); + + /* Dump out the kernel CCB. */ + { + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + + if (psKCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", + psKCCBCtl->ui32WriteOffset, + psKCCBCtl->ui32ReadOffset); + } + } + + /* Dump out the firmware CCB. */ + { + RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; + + if (psFCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", + psFCCBCtl->ui32WriteOffset, + psFCCBCtl->ui32ReadOffset); + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Dump out the Workload estimation CCB. */ + { + RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; + + if (psWorkEstCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", + psWorkEstCCBCtl->ui32WriteOffset, + psWorkEstCCBCtl->ui32ReadOffset); + } + } +#endif + + + if (psFwOsData != NULL) + { +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Dump out the checkpoint CCB offsets. */ + { + RGXFWIF_CCB_CTL *psCheckpointCCBCtl = psDevInfo->psCheckpointCCBCtl; + + if (psCheckpointCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Checkpoint CCB WO:0x%X RO:0x%X (Check State: FW=%#X, HOST=%#X)", + psCheckpointCCBCtl->ui32WriteOffset, + psCheckpointCCBCtl->ui32ReadOffset, + psFwOsData->ui32FWSyncCheckMark, + psFwOsData->ui32HostSyncCheckMark); + } + } +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + /* Dump the KCCB commands executed */ + PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", + psFwOsData->ui32KCCBCmdsExecuted); + +#if defined(PVRSRV_STALLED_CCB_ACTION) + /* Dump the number of times we have performed a forced UFO update, + * and (if non-zero) the timestamp of the most recent occurrence/ + */ + PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", + psFwOsData->ui32ForcedUpdatesRequested); + if (psFwOsData->ui32ForcedUpdatesRequested > 0) + { + IMG_UINT8 ui8Idx; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) + { + ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", + ui64Seconds, ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); + } + /* Dump SLR log */ + if (psFwOsData->sSLRLogFirst.aszCCBName[0]) + { + ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "} Fence found on context 0x%x '%s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLogFirst.ui32FWCtxAddr, + psFwOsData->sSLRLogFirst.aszCCBName, + psFwOsData->sSLRLogFirst.ui32NumUFOs); + } + for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) + { + ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "] Fence found on context 0x%x '%s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, + psFwOsData->sSLRLog[ui8Idx].aszCCBName, + psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); + } + } + } +#else + PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); +#endif + + /* Dump the IRQ info for threads or OS IDs */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_UINT32 ui32idx; + + for_each_irq_cnt(ui32idx) + { + IMG_UINT32 ui32IrqCnt; + + get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); + if (ui32IrqCnt) + { + PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt); +#if defined(RGX_FW_IRQ_OS_COUNTERS) + if (ui32idx == RGXFW_HOST_OS) +#endif + { + PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]); + } + } + } + } + } + + /* Dump the FW Sys config flags on the Host */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwSysData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); + goto Exit; + } + + _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); + PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); + } + + /* Dump the FW OS config flags */ + { + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwOsData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); + goto Exit; + } + + _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); + PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); + } + + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) + { + + eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXDumpRGXRegisters failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + } + else + { + PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down"); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_INT tid; + /* Dump FW trace information */ + if (psRGXFWIfTraceBufCtl != NULL) + { + for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++) + { + IMG_UINT32 i; + IMG_BOOL bPrevLineWasZero = IMG_FALSE; + IMG_BOOL bLineIsAllZeros = IMG_FALSE; + IMG_UINT32 ui32CountLines = 0; + IMG_UINT32 *pui32TraceBuffer; + IMG_CHAR *pszLine; + + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; + + /* Skip if trace buffer is not allocated */ + if (pui32TraceBuffer == NULL) + { + PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); + continue; + } + +/* Max number of DWords to be printed per line, in debug dump output */ +#define PVR_DD_FW_TRACEBUF_LINESIZE 30U + /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ + pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); + if (pszLine == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); + goto Exit; + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); + PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); + PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords); + + for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) + { + IMG_UINT32 k = 0; + IMG_UINT32 ui32Line = 0x0; + IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); + IMG_CHAR *pszBuf = pszLine; + + for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) + { + if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords) + { + /* Stop reading when the index goes beyond trace buffer size. This condition is + * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not + * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ + break; + } + + ui32Line |= pui32TraceBuffer[i + k]; + + /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); + pszBuf += 9; /* write over the '\0' */ + } + + bLineIsAllZeros = (ui32Line == 0x0); + + if (bLineIsAllZeros) + { + if (bPrevLineWasZero) + { + ui32CountLines++; + } + else + { + bPrevLineWasZero = IMG_TRUE; + ui32CountLines = 1; + PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); + } + } + else + { + if (bPrevLineWasZero && ui32CountLines > 1) + { + PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); + } + bPrevLineWasZero = IMG_FALSE; + + PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); + } + + } + if (bPrevLineWasZero) + { + PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); + + OSFreeMem(pszLine); + } + } + + { + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); + } + else + { + PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------"); + } + + DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + + DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + + DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) + { + DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device: End ]------"); + +Exit: + if (!bPwrLockAlreadyHeld) + { + PVRSRVPowerUnlock(psDeviceNode); + } +} + +/*! + ****************************************************************************** + + @Function RGXDebugRequestNotify + + @Description Dump the debug data for RGX + + ******************************************************************************/ +static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = hDbgReqestHandle; + + /* Only action the request if we've fully init'ed */ + if (psDevInfo->bDevInit2Done) + { + RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); + } +} + +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + return PVRSRVRegisterDbgRequestNotify(&psDevInfo->hDbgReqNotify, + psDevInfo->psDeviceNode, + RGXDebugRequestNotify, + DEBUG_REQUEST_SYS, + psDevInfo); +} + +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + if (psDevInfo->hDbgReqNotify) + { + return PVRSRVUnregisterDbgRequestNotify(psDevInfo->hDbgReqNotify); + } + + /* No notifier registered */ + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxdebug.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.h new file mode 100644 index 000000000000..e0cb1353bfbd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdebug.h @@ -0,0 +1,260 @@ +/*************************************************************************/ /*! +@File +@Title RGX debug header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXDEBUG_H__) +#define __RGXDEBUG_H__ + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "rgxdevice.h" + +/** + * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in + * LISR for each RGX FW thread. + * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. + */ + +#if defined(RGX_FW_IRQ_OS_COUNTERS) +#define for_each_irq_cnt(ui32idx) \ + for (ui32idx = 0; ui32idx < RGX_NUM_OS_SUPPORTED; ui32idx++) + +#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ + extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS]; \ + ui32Dest = OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]); + +#define MSG_IRQ_CNT_TYPE "OS" + +#else + +#define for_each_irq_cnt(ui32idx) \ + for (ui32idx = 0; ui32idx < RGXFW_THREAD_NUM; ui32idx++) + +#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ + ui32Dest = (psRgxDevInfo)->psRGXFWIfFwSysData->aui32InterruptCount[ui32idx] + +#define MSG_IRQ_CNT_TYPE "Thread" +#endif /* RGX_FW_IRQ_OS_COUNTERS */ + +static inline void RGXDEBUG_PRINT_IRQ_COUNT(PVRSRV_RGXDEV_INFO* psRgxDevInfo) +{ +#if defined(PVRSRV_NEED_PVR_DPF) && defined(DEBUG) + IMG_UINT32 ui32idx; + + for_each_irq_cnt(ui32idx) + { + IMG_UINT32 ui32IrqCnt; + + get_irq_cnt_val(ui32IrqCnt, ui32idx, psRgxDevInfo); + + PVR_DPF((DBGPRIV_VERBOSE, MSG_IRQ_CNT_TYPE + " %u FW IRQ count = %u", ui32idx, ui32IrqCnt)); + +#if defined(RGX_FW_IRQ_OS_COUNTERS) + if (ui32idx == RGXFW_HOST_OS) +#endif + { + PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u", + (psRgxDevInfo)->aui32SampleIRQCount[ui32idx])); + } + } +#endif /* PVRSRV_NEED_PVR_DPF */ +} + +/*! +******************************************************************************* + + @Function RGXDumpRGXRegisters + + @Description + + Dumps an extensive list of RGX registers required for debugging + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDumpFirmwareTrace + + @Description Dumps the decoded version of the firmware trace buffer. + + Dump useful debugging info + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return void + +******************************************************************************/ +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* + + @Function RGXReadWithSP + + @Description + + Reads data from a memory location (FW memory map) using the META Slave Port + + @Input psDevInfo - Pointer to RGX DevInfo to be used while reading + @Input ui32FWAddr - 32 bit FW address + @Input pui32Value - When the read is successful, value at above FW address + is returned at this location + + @Return PVRSRV_ERROR PVRSRV_OK if read success, error code otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* + + @Function RGXWriteWithSP + + @Description + + Writes data to a memory location (FW memory map) using the META Slave Port + + @Input psDevInfo - Pointer to RGX DevInfo to be used while writing + @Input ui32FWAddr - 32 bit FW address + + @Input ui32Value - 32 bit Value to write + + @Return PVRSRV_ERROR PVRSRV_OK if write success, error code otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value); + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +/*! +******************************************************************************* + + @Function ValidateFWOnLoad + + @Description Compare the Firmware image as seen from the CPU point of view + against the same memory area as seen from the META point of view + after first power up. + + @Input psDevInfo - Device Info + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* + + @Function RGXDumpRGXDebugSummary + + @Description + + Dump a summary in human readable form with the RGX state + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input bRGXPoweredON - IMG_TRUE if RGX device is on + + @Return void + +******************************************************************************/ +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON); + +/*! +******************************************************************************* + + @Function RGXDebugInit + + @Description + + Setup debug requests, calls into PVRSRVRegisterDbgRequestNotify + + @Input psDevInfo RGX device info + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDebugDeinit + + @Description + + Remove debug requests, calls into PVRSRVUnregisterDbgRequestNotify + + @Output phNotify Points to debug notifier handle + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* __RGXDEBUG_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdevice.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdevice.h new file mode 100644 index 000000000000..4736d92346ed --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxdevice.h @@ -0,0 +1,750 @@ +/*************************************************************************/ /*! +@File +@Title RGX device node header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX device node +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXDEVICE_H__) +#define __RGXDEVICE_H__ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_device_types.h" +#include "mmu_common.h" +#include "rgx_fwif_km.h" +#include "cache_ops.h" +#include "device.h" +#include "osfunc.h" +#include "rgxlayer_impl.h" +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "hash.h" +#endif +typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT; + +typedef struct { + DEVMEM_MEMDESC *psFWFrameworkMemDesc; +} RGX_COMMON_CONTEXT_INFO; + + +/*! + ****************************************************************************** + * Device state flags + *****************************************************************************/ +#define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1) /*!< Zeroing the physical pages of reconstructed free lists */ +#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x2) /*!< Used to disable the Devices Watchdog logging */ +#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject dust requests every TA/3D kick */ +#define RGXKM_DEVICE_STATE_CCB_GROW_EN (0x8) /*!< Used to indicate CCB grow is permitted */ +#define RGXKM_DEVICE_STATE_MASK (0xF) + +/*! + ****************************************************************************** + * GPU DVFS Table + *****************************************************************************/ + +#define RGX_GPU_DVFS_TABLE_SIZE 32 +#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US 25000 /* Time required to calibrate a clock frequency the first time */ +#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */ +#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */ + +typedef struct _GPU_FREQ_TRACKING_DATA_ +{ + /* Core clock speed estimated by the driver */ + IMG_UINT32 ui32EstCoreClockSpeed; + + /* Amount of successful calculations of the estimated core clock speed */ + IMG_UINT32 ui32CalibrationCount; +} GPU_FREQ_TRACKING_DATA; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) +#define RGX_GPU_FREQ_TRACKING_SIZE 16 + +typedef struct +{ + IMG_UINT64 ui64BeginCRTimestamp; + IMG_UINT64 ui64BeginOSTimestamp; + + IMG_UINT64 ui64EndCRTimestamp; + IMG_UINT64 ui64EndOSTimestamp; + + IMG_UINT32 ui32EstCoreClockSpeed; + IMG_UINT32 ui32CoreClockSpeed; +} GPU_FREQ_TRACKING_HISTORY; +#endif + +typedef struct _RGX_GPU_DVFS_TABLE_ +{ + /* Beginning of current calibration period (in us) */ + IMG_UINT64 ui64CalibrationCRTimestamp; + IMG_UINT64 ui64CalibrationOSTimestamp; + + /* Calculated calibration period (in us) */ + IMG_UINT64 ui64CalibrationCRTimediff; + IMG_UINT64 ui64CalibrationOSTimediff; + + /* Current calibration period (in us) */ + IMG_UINT32 ui32CalibrationPeriod; + + /* System layer frequency table and frequency tracking data */ + IMG_UINT32 ui32FreqIndex; + IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE]; + GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE]; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + IMG_UINT32 ui32HistoryIndex; + GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE]; +#endif +} RGX_GPU_DVFS_TABLE; + + +/*! + ****************************************************************************** + * GPU utilisation statistics + *****************************************************************************/ + +typedef struct _RGXFWIF_GPU_UTIL_STATS_ +{ + IMG_BOOL bValid; /* If TRUE, statistics are valid. + FALSE if the driver couldn't get reliable stats. */ + IMG_UINT64 ui64GpuStatActive; /* GPU active statistic */ + IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */ + IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */ + IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */ + IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */ +} RGXFWIF_GPU_UTIL_STATS; + + +typedef struct _RGX_REG_CONFIG_ +{ + IMG_BOOL bEnabled; + RGXFWIF_REG_CFG_TYPE eRegCfgTypeToPush; + IMG_UINT32 ui32NumRegRecords; + POS_LOCK hLock; +} RGX_REG_CONFIG; + +typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; + +typedef struct +{ + IMG_UINT32 ui32DustCount1; + IMG_UINT32 ui32DustCount2; + IMG_BOOL bToggle; +} RGX_DUST_STATE; + +typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ +{ + IMG_UINT64 ui64ErnsBrns; + IMG_UINT64 ui64Features; + IMG_UINT32 ui32B; + IMG_UINT32 ui32V; + IMG_UINT32 ui32N; + IMG_UINT32 ui32C; + IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX]; + IMG_UINT32 ui32MAXDustCount; + IMG_PCHAR pszBVNCString; +}PVRSRV_DEVICE_FEATURE_CONFIG; + +/* This is used to get the value of a specific feature. + * Note that it will assert if the feature is disabled or value is invalid. */ +#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \ + ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] ) + +/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */ +#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \ + ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED ) + +/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */ +#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK) + +/* This is used to check if the ERN is available for the currently running BVNC or not */ +#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK) + +/* This is used to check if the BRN is available for the currently running BVNC or not */ +#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK) + +/* there is a corresponding define in rgxapi.h */ +#define RGX_MAX_TIMER_QUERIES 16U + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +/*! + * The host maintains a 512-deep cache of submitted workloads per device, + * i.e. a global look-up table for TA, 3D and compute (depending on the RGX + * hardware support present) + */ + +/* + * For the workload estimation return data array, the max amount of commands the + * MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for + * all corner cases + */ +#define RETURN_DATA_ARRAY_SIZE_LOG2 (9) +#define RETURN_DATA_ARRAY_SIZE ((1UL) << RETURN_DATA_ARRAY_SIZE_LOG2) +#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1) + +#define WORKLOAD_HASH_SIZE_LOG2 6 +#define WORKLOAD_HASH_SIZE ((1UL) << WORKLOAD_HASH_SIZE_LOG2) +#define WORKLOAD_HASH_WRAP_MASK (WORKLOAD_HASH_SIZE - 1) + +/*! + * Workload characteristics for supported data masters. + * All characteristics must match for the workload estimate to be used/updated. + */ +typedef union _RGX_WORKLOAD_ +{ + struct + { + IMG_UINT32 ui32RenderTargetSize; + IMG_UINT32 ui32NumberOfDrawCalls; + IMG_UINT32 ui32NumberOfIndices; + IMG_UINT32 ui32NumberOfMRTs; + } sTA3D; + + struct + { + IMG_UINT32 ui32NumberOfWorkgroups; + IMG_UINT32 ui32NumberOfWorkitems; + } sCompute; + + struct + { + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + } sTransfer; +} RGX_WORKLOAD; + +/*! + * Host data used to match the return data (actual cycles count) to the + * submitted command packet. + * The hash table is a per-DM circular buffer containing a key based on the + * workload characteristics. On job completion, the oldest workload data + * is evicted if the CB is full and the driver matches the characteristics + * to the matching data. + * + * o If the driver finds a match the existing cycle estimate is averaged with + * the actual cycles used. + * o Otherwise a new hash entry is created with the actual cycles for this + * workload. + * + * Subsequently if a match is found during command submission, the estimate + * is passed to the scheduler, e.g. adjust the GPU frequency if PDVFS is enabled. + */ +typedef struct _WORKLOAD_MATCHING_DATA_ +{ + POS_LOCK psHashLock; + HASH_TABLE *psHashTable; /*! existing workload cycle estimates for this DM */ + RGX_WORKLOAD asHashKeys[WORKLOAD_HASH_SIZE]; + IMG_UINT64 aui64HashData[WORKLOAD_HASH_SIZE]; + IMG_UINT32 ui32HashArrayWO; /*! track the most recent workload estimates */ +} WORKLOAD_MATCHING_DATA; + +/*! + * A generic container for the workload matching data for GPU contexts: + * rendering (TA, 3D), compute, etc. + */ +typedef struct _WORKEST_HOST_DATA_ +{ + union + { + struct + { + WORKLOAD_MATCHING_DATA sDataTA; /*!< matching data for TA commands */ + WORKLOAD_MATCHING_DATA sData3D; /*!< matching data for 3D commands */ + } sTA3D; + + struct + { + WORKLOAD_MATCHING_DATA sDataCDM; /*!< matching data for CDM commands */ + } sCompute; + + struct + { + WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */ + } sTransfer; + } uWorkloadMatchingData; + + /* + * This is a per-context property, hence the TA and 3D share the same + * per render context counter. + */ + IMG_UINT32 ui32WorkEstCCBReceived; /*!< Used to ensure all submitted work + estimation commands are received + by the host before clean up. */ +} WORKEST_HOST_DATA; + +/*! + * Entries in the list of submitted workloads, used when the completed command + * returns data to the host. + * + * - the matching data is needed as it holds the hash data + * - the host data is needed for completion updates, ensuring memory is not + * freed while workload estimates are in-flight. + * - the workload characteristic is used in the hash table look-up. + */ +typedef struct _WORKEST_RETURN_DATA_ +{ + WORKEST_HOST_DATA *psWorkEstHostData; + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData; + RGX_WORKLOAD sWorkloadCharacteristics; +} WORKEST_RETURN_DATA; +#endif + + +typedef struct +{ +#if defined(PDUMP) + IMG_HANDLE hPdumpPages; +#endif + PG_HANDLE sPages; + IMG_DEV_PHYADDR sPhysAddr; +} RGX_MIPS_ADDRESS_TRAMPOLINE; + + +/*! + ****************************************************************************** + * RGX Device info + *****************************************************************************/ + +typedef struct _PVRSRV_RGXDEV_INFO_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg; + + IMG_BOOL bDevInit2Done; + + IMG_BOOL bFirmwareInitialised; + IMG_BOOL bPDPEnabled; + + IMG_HANDLE hDbgReqNotify; + + /* Kernel mode linear address of device registers */ + void __iomem *pvRegsBaseKM; + + IMG_HANDLE hRegMapping; + + /* System physical address of device registers */ + IMG_CPU_PHYADDR sRegsPhysBase; + /* Register region size in bytes */ + IMG_UINT32 ui32RegSize; + + PVRSRV_STUB_PBDESC *psStubPBDescListKM; + + /* Firmware memory context info */ + DEVMEM_CONTEXT *psKernelDevmemCtx; + DEVMEM_HEAP *psFirmwareMainHeap; + DEVMEM_HEAP *psFirmwareConfigHeap; + MMU_CONTEXT *psKernelMMUCtx; + + void *pvDeviceMemoryHeap; + + /* Kernel CCB */ + DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */ + RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */ + DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */ + IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */ + DEVMEM_MEMDESC *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */ + IMG_UINT32 *pui32KernelCCBRtnSlots; /*!< kernel mapping for return slot array */ + + /* Firmware CCB */ + DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */ + RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */ + DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */ + IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */ + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Checkpoint CCB */ + DEVMEM_MEMDESC *psCheckpointCCBCtlMemDesc; /*!< memdesc for Checkpoint CCB control */ + RGXFWIF_CCB_CTL *psCheckpointCCBCtl; /*!< kernel mapping for Checkpoint CCB control */ + DEVMEM_MEMDESC *psCheckpointCCBMemDesc; /*!< memdesc for Checkpoint CCB */ + IMG_UINT8 *psCheckpointCCB; /*!< kernel mapping for Checkpoint CCB */ +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + /* Workload Estimation Firmware CCB */ + DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */ + RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */ + DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */ + IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */ + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + /* Counter dumping */ + DEVMEM_MEMDESC *psCounterBufferMemDesc; /*!< mem desc for counter dumping buffer */ + POS_LOCK hCounterDumpingLock; /*!< Lock for guarding access to counter dumping buffer */ +#endif + + IMG_UINT32 ui32FWPoisonOnFreeFlag; /*!< Flag for poisoning FW allocations when freed */ + + IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */ + + /* + if we don't preallocate the pagetables we must + insert newly allocated page tables dynamically + */ + void *pvMMUContextList; + + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; + + DEVMEM_MEMDESC *psRGXFWCodeMemDesc; + IMG_DEV_VIRTADDR sFWCodeDevVAddrBase; + IMG_UINT32 ui32FWCodeSizeInBytes; + DEVMEM_MEMDESC *psRGXFWDataMemDesc; + IMG_DEV_VIRTADDR sFWDataDevVAddrBase; + RGX_MIPS_ADDRESS_TRAMPOLINE *psTrampoline; + + DEVMEM_MEMDESC *psRGXFWCorememCodeMemDesc; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_UINT32 ui32FWCorememCodeSizeInBytes; + + DEVMEM_MEMDESC *psRGXFWIfCorememDataStoreMemDesc; + IMG_DEV_VIRTADDR sFWCorememDataStoreDevVAddrBase; + RGXFWIF_DEV_VIRTADDR sFWCorememDataStoreFWAddr; + + DEVMEM_MEMDESC *psRGXFWAlignChecksMemDesc; + +#if defined(PDUMP) + DEVMEM_MEMDESC *psRGXFWSigTAChecksMemDesc; + IMG_UINT32 ui32SigTAChecksSize; + + DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc; + IMG_UINT32 ui32Sig3DChecksSize; + + DEVMEM_MEMDESC *psRGXFWSigTDM2DChecksMemDesc; + IMG_UINT32 ui32SigTDM2DChecksSize; + + IMG_BOOL bDumpedKCCBCtlAlready; + + POS_SPINLOCK hSyncCheckpointSignalSpinLock; /*!< Guards data shared between an atomic & sleepable-context */ +#endif + + POS_LOCK hRGXFWIfBufInitLock; /*!< trace buffer lock for initialisation phase */ + + DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */ + DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */ + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */ + + DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */ + RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing trace control data and actual trace buffer */ + + DEVMEM_MEMDESC *psRGXFWIfFwOsDataMemDesc; /*!< memdesc of the firmware-shared os structure */ + RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing trace control data and actual trace buffer */ + +#if defined(SUPPORT_TBI_INTERFACE) + DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */ + RGXFWIF_DEV_VIRTADDR sRGXFWIfTBIBuffer; /*!< TBI buffer data */ + IMG_UINT32 ui32FWIfTBIBufferSize; +#endif + + DEVMEM_MEMDESC *psRGXFWIfHWRInfoBufCtlMemDesc; + RGXFWIF_HWRINFOBUF *psRGXFWIfHWRInfoBufCtl; + + DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc; + RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb; + + DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc; + IMG_BYTE *psRGXFWIfHWPerfBuf; + IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */ + + DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc; + + DEVMEM_MEMDESC *psRGXFWIfHWPerfCountersMemDesc; + + DEVMEM_MEMDESC *psRGXFWIfConnectionCtlMemDesc; + RGXFWIF_CONNECTION_CTL *psRGXFWIfConnectionCtl; + + DEVMEM_MEMDESC *psRGXFWIfSysInitMemDesc; + RGXFWIF_SYSINIT *psRGXFWIfSysInit; + + DEVMEM_MEMDESC *psRGXFWIfOsInitMemDesc; + RGXFWIF_OSINIT *psRGXFWIfOsInit; + + DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc; + RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg; + + /* Additional guest firmware memory context info */ + DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED]; + DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED]; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Array to store data needed for workload estimation when a workload + has finished and its cycle time is returned to the host. */ + WORKEST_RETURN_DATA asReturnData[RETURN_DATA_ARRAY_SIZE]; + IMG_UINT32 ui32ReturnDataWO; + POS_LOCK hWorkEstLock; +#endif + +#if defined(SUPPORT_PDVFS) + /** + * Host memdesc and pointer to memory containing core clock rate in Hz. + * Firmware updates the memory on changing the core clock rate over GPIO. + * Note: Shared memory needs atomic access from Host driver and firmware, + * hence size should not be greater than memory transaction granularity. + * Currently it is chosen to be 32 bits. + */ + DEVMEM_MEMDESC *psRGXFWIFCoreClkRateMemDesc; + volatile IMG_UINT32 *pui32RGXFWIFCoreClkRate; + /** + * Last sampled core clk rate. + */ + volatile IMG_UINT32 ui32CoreClkRateSnapshot; +#endif + + /* + HWPerf data for the RGX device + */ + + POS_LOCK hHWPerfLock; /*! Critical section lock that protects HWPerf code + * from multiple thread duplicate init/deinit + * and loss/freeing of FW & Host resources while in + * use in another thread e.g. MSIR. */ + + IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */ + IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */ + IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */ + IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */ + + IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */ + POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */ + IMG_HANDLE hHWPerfHostStream; /*! TL Stream buffer for host only event stream */ + IMG_UINT32 ui32HWPerfHostBufSize; /*! Host side buffer size in bytes */ + IMG_UINT32 ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream. + * Guarded by hLockHWPerfHostStream */ + IMG_UINT32 ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */ + IMG_UINT8 *pui8DeferredEvents; /*! List of HWPerfHost events yet to be emitted in the TL stream. + * Events generated from atomic context are deferred "emitted" + * as the "emission" code can sleep */ + IMG_UINT16 ui16DEReadIdx; /*! Read index in the above deferred events buffer */ + IMG_UINT16 ui16DEWriteIdx; /*! Write index in the above deferred events buffer */ + void *pvHostHWPerfMISR; /*! MISR to emit pending/deferred events in HWPerfHost TL stream */ + POS_SPINLOCK hHWPerfHostSpinLock; /*! Guards data shared between an atomic & sleepable-context */ +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + IMG_UINT32 ui32DEHighWatermark; /*! High watermark of deferred events buffer usage. Protected by + *! hHWPerfHostSpinLock */ + /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */ + IMG_UINT32 ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ + /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */ + IMG_BOOL bWarnedAtomicCtxPktLost; + /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */ + IMG_UINT32 ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ + /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */ + IMG_BOOL bWarnedPktOrdinalBroke; +#endif + + void *pvGpuFtraceData; + + /* Poll data for detecting firmware fatal errors */ + IMG_UINT32 aui32CrLastPollCount[RGXFW_THREAD_NUM]; + IMG_UINT32 ui32KCCBCmdsExecutedLastTime; + IMG_BOOL bKCCBCmdsWaitingLastTime; + IMG_UINT32 ui32GEOTimeoutsLastTime; + IMG_UINT32 ui32InterruptCountLastTime; + IMG_UINT32 ui32MissingInterruptsLastTime; + + /* Client stall detection */ + IMG_UINT32 ui32StalledClientMask; + + IMG_BOOL bWorkEstEnabled; + IMG_BOOL bPDVFSEnabled; + + void *pvLISRData; + void *pvMISRData; + void *pvAPMISRData; + RGX_ACTIVEPM_CONF eActivePMConf; + + volatile IMG_UINT32 aui32SampleIRQCount[RGXFW_THREAD_NUM]; + + DEVMEM_MEMDESC *psRGXFaultAddressMemDesc; + + DEVMEM_MEMDESC *psSLC3FenceMemDesc; + + /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */ + IMG_UINT32 ui32ZSBufferCurrID; /*!< ID assigned to the next deferred devmem allocation */ + IMG_UINT32 ui32FreelistCurrID; /*!< ID assigned to the next freelist */ + + POS_LOCK hLockZSBuffer; /*!< Lock to protect simultaneous access to ZSBuffers */ + DLLIST_NODE sZSBufferHead; /*!< List of on-demand ZSBuffers */ + POS_LOCK hLockFreeList; /*!< Lock to protect simultaneous access to Freelists */ + DLLIST_NODE sFreeListHead; /*!< List of growable Freelists */ + PSYNC_PRIM_CONTEXT hSyncPrimContext; + PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim; + + IMG_UINT32 ui32ActivePMReqOk; + IMG_UINT32 ui32ActivePMReqDenied; + IMG_UINT32 ui32ActivePMReqNonIdle; + IMG_UINT32 ui32ActivePMReqRetry; + IMG_UINT32 ui32ActivePMReqTotal; + + IMG_HANDLE hProcessQueuesMISR; + + IMG_UINT32 ui32DeviceFlags; /*!< Flags to track general device state */ + + /* GPU DVFS Table */ + RGX_GPU_DVFS_TABLE *psGpuDVFSTable; + + /* Pointer to function returning the GPU utilisation statistics since the last + * time the function was called. Supports different users at the same time. + * + * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked) + * in microseconds since the last time the function was called + * by a specific user (identified by hGpuUtilUser) + * + * Returns PVRSRV_OK in case the call completed without errors, + * some other value otherwise. + */ + PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hGpuUtilUser, + RGXFWIF_GPU_UTIL_STATS *psReturnStats); + + POS_LOCK hGPUUtilLock; + + /* Register configuration */ + RGX_REG_CONFIG sRegCongfig; + + IMG_BOOL bRGXPowered; + DLLIST_NODE sMemoryContextList; + + POSWR_LOCK hRenderCtxListLock; + POSWR_LOCK hComputeCtxListLock; + POSWR_LOCK hTransferCtxListLock; + POSWR_LOCK hTDMCtxListLock; + POSWR_LOCK hMemoryCtxListLock; + POSWR_LOCK hKickSyncCtxListLock; + + /* Linked list of deferred KCCB commands due to a full KCCB. + * Access to members sKCCBDeferredCommandsListHead and ui32KCCBDeferredCommandsCount + * are protected by the hLockKCCBDeferredCommandsList spin lock. */ + POS_SPINLOCK hLockKCCBDeferredCommandsList; /*!< Protects deferred KCCB commands list */ + DLLIST_NODE sKCCBDeferredCommandsListHead; + IMG_UINT32 ui32KCCBDeferredCommandsCount; /*!< No of commands in the deferred list */ + + /* Linked lists of contexts on this device */ + DLLIST_NODE sRenderCtxtListHead; + DLLIST_NODE sComputeCtxtListHead; + DLLIST_NODE sTransferCtxtListHead; + DLLIST_NODE sTDMCtxtListHead; + DLLIST_NODE sKickSyncCtxtListHead; + + DLLIST_NODE sCommonCtxtListHead; + POSWR_LOCK hCommonCtxtListLock; + IMG_UINT32 ui32CommonCtxtCurrentID; /*!< ID assigned to the next common context */ + + POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */ + POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */ + + POS_LOCK hNMILock; /*!< Lock to protect NMI operations */ + + RGX_DUST_STATE sDustReqState; + + RGX_LAYER_PARAMS sLayerParams; + + RGXFWIF_DM eBPDM; /*!< Current breakpoint data master */ + IMG_BOOL bBPSet; /*!< A Breakpoint has been set */ + POS_LOCK hBPLock; /*!< Lock for break point operations */ + + IMG_UINT32 ui32CoherencyTestsDone; + + ATOMIC_T iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */ + POS_LOCK hCCBRecoveryLock; /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables */ + void *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */ + IMG_UINT32 ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */ + IMG_UINT32 ui32SLRHoldoffCounter; /* Decremented each time health check is called until zero. SLR only happen when zero. */ + + POS_LOCK hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */ + +#if defined(SUPPORT_FIRMWARE_GCOV) + /* Firmware gcov buffer */ + DEVMEM_MEMDESC *psFirmwareGcovBufferMemDesc; /*!< mem desc for Firmware gcov dumping buffer */ + IMG_UINT32 ui32FirmwareGcovSize; +#endif + + IMG_HANDLE hTQCLISharedMem; /*!< TQ Client Shared Mem PMR */ + IMG_HANDLE hTQUSCSharedMem; /*!< TQ USC Shared Mem PMR */ + +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */ + IMG_UINT32 ui32TestSLRCount; /* (used to test SLR operation) */ + IMG_UINT32 ui32SLRSkipFWAddr; +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) + DEVMEM_MEMDESC *psRGXFWIfSecureBufMemDesc; + DEVMEM_MEMDESC *psRGXFWIfNonSecureBufMemDesc; +#endif + +} PVRSRV_RGXDEV_INFO; + + + +typedef struct _RGX_TIMING_INFORMATION_ +{ + /*! GPU default core clock speed in Hz */ + IMG_UINT32 ui32CoreClockSpeed; + + /*! Active Power Management: GPU actively requests the host driver to be powered off */ + IMG_BOOL bEnableActivePM; + + /*! Enable the GPU to power off internal Power Islands independently from the host driver */ + IMG_BOOL bEnableRDPowIsland; + + /*! Active Power Management: Delay between the GPU idle and the request to the host */ + IMG_UINT32 ui32ActivePMLatencyms; + +} RGX_TIMING_INFORMATION; + +typedef struct _RGX_DATA_ +{ + /*! Timing information */ + RGX_TIMING_INFORMATION *psRGXTimingInfo; + IMG_BOOL bHasTDFWMemPhysHeap; + IMG_UINT32 uiTDFWMemPhysHeapID; + IMG_BOOL bHasFWMemPhysHeap; + IMG_UINT32 uiFWMemPhysHeapID; +} RGX_DATA; + + +/* + RGX PDUMP register bank name (prefix) +*/ +#define RGX_PDUMPREG_NAME "RGXREG" +#define RGX_TB_PDUMPREG_NAME "EMUREG" + +#endif /* __RGXDEVICE_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.c new file mode 100644 index 000000000000..0c09230a1251 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.c @@ -0,0 +1,1059 @@ +/*************************************************************************/ /*! +@File +@Title Services Firmware image utilities used at init time +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Services Firmware image utilities used at init time +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* The routines implemented here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when trusted device is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. */ +#include "rgxfwimageutils.h" +#include "pvrsrv.h" + + +/************************************************************************ +* FW layout information +************************************************************************/ +#define MAX_NUM_ENTRIES (8) +static RGX_FW_LAYOUT_ENTRY asRGXFWLayoutTable[MAX_NUM_ENTRIES]; +static IMG_UINT32 ui32LayoutEntryNum; + + +static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + if (asRGXFWLayoutTable[i].eId == eId) + { + return &asRGXFWLayoutTable[i]; + } + } + + RGXErrorLog(hPrivate, "%s: id %u not found, returning entry 0\n", + __func__, eId); + + return &asRGXFWLayoutTable[0]; +} + +/*! +******************************************************************************* + + @Function FindMMUSegment + + @Description Given a 32 bit FW address attempt to find the corresponding + pointer to FW allocation + + @Input ui32OffsetIn : 32 bit FW address + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem code + @Input uiHostAddrOut : CPU pointer equivalent to ui32OffsetIn + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void *pvHostFWCorememCodeAddr, + void *pvHostFWCorememDataAddr, + void **uiHostAddrOut) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + if ((ui32OffsetIn >= asRGXFWLayoutTable[i].ui32BaseAddr) && + (ui32OffsetIn < (asRGXFWLayoutTable[i].ui32BaseAddr + asRGXFWLayoutTable[i].ui32AllocSize))) + { + switch (asRGXFWLayoutTable[i].eType) + { + case FW_CODE: + *uiHostAddrOut = pvHostFWCodeAddr; + break; + + case FW_DATA: + *uiHostAddrOut = pvHostFWDataAddr; + break; + + case FW_COREMEM_CODE: + *uiHostAddrOut = pvHostFWCorememCodeAddr; + break; + + case FW_COREMEM_DATA: + *uiHostAddrOut = pvHostFWCorememDataAddr; + break; + + default: + return PVRSRV_ERROR_INIT_FAILURE; + } + + goto found; + } + } + + return PVRSRV_ERROR_INIT_FAILURE; + +found: + if (*uiHostAddrOut == NULL) + { + return PVRSRV_OK; + } + + /* Direct Mem write to mapped memory */ + ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr; + ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset; + + /* Add offset to pointer to FW allocation only if + * that allocation is available + */ + if (*uiHostAddrOut) + { + *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn; + } + + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + + @Function RGXFWConfigureSegID + + @Description Configures a single segment of the Segment MMU + (base, limit and out_addr) + + @Input hPrivate : Implementation specific data + @Input ui64SegOutAddr : Segment output base address (40 bit devVaddr) + @Input ui32SegBase : Segment input base address (32 bit FW address) + @Input ui32SegLimit : Segment size + @Input ui32SegID : Segment ID + @Input pszName : Segment name + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureSegID(const void *hPrivate, + IMG_UINT64 ui64SegOutAddr, + IMG_UINT32 ui32SegBase, + IMG_UINT32 ui32SegLimit, + IMG_UINT32 ui32SegID, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT32 *pui32BootConf = *ppui32BootConf; + IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL; + IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL; + + /* META segments have a minimum size */ + IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ? + RGXFW_SEGMMU_ALIGN : ui32SegLimit; + /* the limit is an offset, therefore off = size - 1 */ + ui32LimitOff -= 1; + + RGXCommentLog(hPrivate, + "* Seg%d: meta_addr = 0x%08x, devv_addr = 0x%" IMG_UINT64_FMTSPECx ", limit = 0x%x", + ui32SegID, + ui32SegBase, + ui64SegOutAddr, + ui32LimitOff); + + ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID); + *pui32BootConf++ = ui32SegBase; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID); + *pui32BootConf++ = ui32LimitOff; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID); + *pui32BootConf++ = ui32SegOutAddr0; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID); + *pui32BootConf++ = ui32SegOutAddr1; + + *ppui32BootConf = pui32BootConf; +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureSegMMU + + @Description Configures META's Segment MMU + + @Input hPrivate : Implementation specific data + @Input psFWCodeDevVAddrBase : FW code base device virtual address + @Input psFWDataDevVAddrBase : FW data base device virtual address + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureSegMMU(const void *hPrivate, + IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase, + IMG_DEV_VIRTADDR *psFWDataDevVAddrBase, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT64 ui64SegOutAddrTop; + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase); + + /* Configure Segment MMU */ + RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********"); + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) + { + ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWPRIV); + } + else + { + ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, RGXFW_SEGMMU_META_BIFDM_ID); + } + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + /* + * FW code is using the bootloader segment which is already configured on boot. + * FW coremem code and data don't use the segment MMU. + * Only the FW data segment needs to be configured. + */ + + if (asRGXFWLayoutTable[i].eType == FW_DATA) + { + IMG_UINT64 ui64SegOutAddr; + IMG_UINT32 ui32SegId = RGXFW_SEGMMU_DATA_ID; + + ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) + + asRGXFWLayoutTable[i].ui32AllocOffset; + + RGXFWConfigureSegID(hPrivate, + ui64SegOutAddr, + asRGXFWLayoutTable[i].ui32BaseAddr, + asRGXFWLayoutTable[i].ui32AllocSize, + ui32SegId, + ppui32BootConf); /*write the sequence to the bootldr */ + + break; + } + } +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureMetaCaches + + @Description Configure and enable the Meta instruction and data caches + + @Input hPrivate : Implementation specific data + @Input ui32NumThreads : Number of FW threads in use + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureMetaCaches(const void *hPrivate, + IMG_UINT32 ui32NumThreads, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT32 *pui32BootConf = *ppui32BootConf; + IMG_UINT32 ui32DCacheT0, ui32ICacheT0; + IMG_UINT32 ui32DCacheT1, ui32ICacheT1; + IMG_UINT32 ui32DCacheT2, ui32ICacheT2; + IMG_UINT32 ui32DCacheT3, ui32ICacheT3; + +#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600) +#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14) +#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6) +#define META_CR_SYSC_DCPART(n) (0x04830200 + (n)*0x8) +#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31) +#define META_CR_SYSC_ICPART(n) (0x04830220 + (n)*0x8) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7) +#define META_CR_MMCU_DCACHE_CTRL (0x04830018) +#define META_CR_MMCU_ICACHE_CTRL (0x04830020) +#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1) + + RGXCommentLog(hPrivate, "********** Meta caches configuration *********"); + + /* Initialise I/Dcache settings */ + ui32DCacheT0 = ui32DCacheT1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + ui32DCacheT2 = ui32DCacheT3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0; + + if (ui32NumThreads == 1) + { + ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + } + else + { + ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; + ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; + + ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | + META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; + ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | + META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; + } + + /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */ + *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL; + *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN | + META_CR_MMCU_LOCAL_EBCTRL_DCWIN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_LOCAL_EBCTRL, + META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN); + + /* Data cache partitioning thread 0 to 3 */ + *pui32BootConf++ = META_CR_SYSC_DCPART(0); + *pui32BootConf++ = ui32DCacheT0; + *pui32BootConf++ = META_CR_SYSC_DCPART(1); + *pui32BootConf++ = ui32DCacheT1; + *pui32BootConf++ = META_CR_SYSC_DCPART(2); + *pui32BootConf++ = ui32DCacheT2; + *pui32BootConf++ = META_CR_SYSC_DCPART(3); + *pui32BootConf++ = ui32DCacheT3; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(0), ui32DCacheT0); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(1), ui32DCacheT1); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(2), ui32DCacheT2); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(3), ui32DCacheT3); + + /* Enable data cache hits */ + *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL; + *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_DCACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + /* Instruction cache partitioning thread 0 to 3 */ + *pui32BootConf++ = META_CR_SYSC_ICPART(0); + *pui32BootConf++ = ui32ICacheT0; + *pui32BootConf++ = META_CR_SYSC_ICPART(1); + *pui32BootConf++ = ui32ICacheT1; + *pui32BootConf++ = META_CR_SYSC_ICPART(2); + *pui32BootConf++ = ui32ICacheT2; + *pui32BootConf++ = META_CR_SYSC_ICPART(3); + *pui32BootConf++ = ui32ICacheT3; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(0), ui32ICacheT0); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(1), ui32ICacheT1); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(2), ui32ICacheT2); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(3), ui32ICacheT3); + + /* Enable instruction cache hits */ + *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL; + *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_ICACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + *pui32BootConf++ = 0x040000C0; + *pui32BootConf++ = 0; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0); + + *ppui32BootConf = pui32BootConf; +} + +/*! +******************************************************************************* + + @Function ProcessLDRCommandStream + + @Description Process the output of the Meta toolchain in the .LDR format + copying code and data sections into their final location and + passing some information to the Meta bootloader + + @Input hPrivate : Implementation specific data + @Input pbLDR : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + @Input ppui32BootConf : Pointer to bootloader data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, + const IMG_BYTE* pbLDR, + void* pvHostFWCodeAddr, + void* pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr, + IMG_UINT32 **ppui32BootConf) +{ + RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR; + RGX_META_LDR_L1_DATA_BLK *psL1Data = + (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData); + + IMG_UINT32 *pui32BootConf = ppui32BootConf ? *ppui32BootConf : NULL; + IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate); + + RGXCommentLog(hPrivate, "**********************************************"); + RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************"); + RGXCommentLog(hPrivate, "**********************************************"); + + while (psL1Data != NULL) + { + if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd)) + { + /* Don't process comment blocks */ + goto NextBlock; + } + + switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK) + { + case RGX_META_LDR_CMD_LOADMEM: + { + RGX_META_LDR_L2_DATA_BLK *psL2Block = + (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]); + IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; + IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; + void *pvWriteAddr; + PVRSRV_ERROR eError; + + if (!RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize) && + !RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) + { + /* Global range is aliased to local range */ + ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; + } + + eError = FindMMUSegment(ui32Offset, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", + ui32Offset, ui32DataSize); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemCopy(hPrivate, + pvWriteAddr, + psL2Block->aui32BlockData, + ui32DataSize); + } + + break; + } + case RGX_META_LDR_CMD_LOADCORE: + case RGX_META_LDR_CMD_LOADMMREG: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + case RGX_META_LDR_CMD_START_THREADS: + { + /* Don't process this block */ + break; + } + case RGX_META_LDR_CMD_ZEROMEM: + { + IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; + IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1]; + void *pvWriteAddr; + PVRSRV_ERROR eError; + + if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) + { + /* cannot zero coremem directly */ + break; + } + + /* Global range is aliased to local range */ + ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; + + eError = FindMMUSegment(ui32Offset, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", + ui32Offset, ui32ByteCount); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount); + } + + break; + } + case RGX_META_LDR_CMD_CONFIG: + { + RGX_META_LDR_L2_DATA_BLK *psL2Block = + (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]); + RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData; + IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; + IMG_UINT32 ui32CurrBlockSize = 0; + + while (ui32L2BlockSize) + { + switch (psConfigCommand->ui32Type) + { + case RGX_META_LDR_CFG_PAUSE: + case RGX_META_LDR_CFG_READ: + { + ui32CurrBlockSize = 8; + return PVRSRV_ERROR_INIT_FAILURE; + } + case RGX_META_LDR_CFG_WRITE: + { + IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0]; + IMG_UINT32 ui32RegisterValue = psConfigCommand->aui32BlockData[1]; + + /* Only write to bootloader if we got a valid + * pointer to the FW code allocation + */ + if (pui32BootConf) + { + /* Do register write */ + *pui32BootConf++ = ui32RegisterOffset; + *pui32BootConf++ = ui32RegisterValue; + } + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + ui32RegisterOffset, ui32RegisterValue); + + ui32CurrBlockSize = 12; + break; + } + case RGX_META_LDR_CFG_MEMSET: + case RGX_META_LDR_CFG_MEMCHECK: + { + ui32CurrBlockSize = 20; + return PVRSRV_ERROR_INIT_FAILURE; + } + default: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + } + ui32L2BlockSize -= ui32CurrBlockSize; + psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize); + } + + break; + } + default: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + } + +NextBlock: + + if (psL1Data->ui32Next == 0xFFFFFFFF) + { + psL1Data = NULL; + } + else + { + psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next); + } + } + + if (pui32BootConf) + { + *ppui32BootConf = pui32BootConf; + } + + RGXCommentLog(hPrivate, "**********************************************"); + RGXCommentLog(hPrivate, "************** End Loader Parsing ************"); + RGXCommentLog(hPrivate, "**********************************************"); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function ProcessELFCommandStream + + @Description Process a file in .ELF format copying code and data sections + into their final location + + @Input hPrivate : Implementation specific data + @Input pbELF : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, + const IMG_BYTE *pbELF, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr) +{ + IMG_UINT32 ui32Entry; + IMG_ELF_HDR *psHeader = (IMG_ELF_HDR *)pbELF; + IMG_ELF_PROGRAM_HDR *psProgramHeader = + (IMG_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff); + PVRSRV_ERROR eError; + + for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++) + { + void *pvWriteAddr; + + /* Only consider loadable entries in the ELF segment table */ + if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue; + + eError = FindMMUSegment(psProgramHeader->ui32Pvaddr, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "%s: Addr 0x%x (size: %d) not found in any segment",__func__, + psProgramHeader->ui32Pvaddr, + psProgramHeader->ui32Pfilesz); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemCopy(hPrivate, + pvWriteAddr, + (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset), + psProgramHeader->ui32Pfilesz); + + RGXMemSet(hPrivate, + (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz, + 0, + psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz); + } + } + + return PVRSRV_OK; +} + +IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32AllocOffset; +} + +IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32MaxSize; +} + +IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32AllocSize; +} + +IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32BaseAddr; +} + +PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + const IMG_UINT32 ui32RGXFirmwareSize, + IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize) +{ + RGX_FW_INFO_HEADER *psInfoHeader; + const IMG_BYTE *pbRGXFirmwareInfo; + const IMG_BYTE *pbRGXFirmwareLayout; + IMG_UINT32 i; + + if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE) + { + RGXErrorLog(hPrivate, "%s: Invalid FW binary at %p, size %u", + __func__, pbRGXFirmware, ui32RGXFirmwareSize); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + + /* + * Acquire pointer to the FW info header within the FW image. + * The format of the header in the FW image might not be the one expected + * by the driver, but the driver should still be able to correctly read + * the information below, as long as new/incompatible elements are added + * at the end of the header (they will be ignored by the driver). + */ + + pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE; + psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo; + + /* If any of the following checks fails, the FW will likely not work properly */ + + if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION) + { + RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) FW_INFO_VERSION, + psInfoHeader->ui32InfoVersion); + } + + if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER)) + { + RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER), + psInfoHeader->ui32HeaderLen); + } + + if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY)) + { + RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY), + psInfoHeader->ui32LayoutEntrySize); + } + + if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES) + { + RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)", + __func__, + MAX_NUM_ENTRIES, + psInfoHeader->ui32LayoutEntryNum); + } + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate)) + { + RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) RGXGetOSPageSize(hPrivate), + psInfoHeader->ui32FwPageSize); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum; + + + /* + * Copy FW layout table from FW image to local array. + * One entry is copied at a time and the copy is limited to what the driver + * expects to find in it. Assuming that new/incompatible elements + * are added at the end of each entry, the loop below adapts the table + * in the FW image into the format expected by the driver. + */ + + pbRGXFirmwareLayout = pbRGXFirmwareInfo + psInfoHeader->ui32HeaderLen; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + RGX_FW_LAYOUT_ENTRY *psOutEntry = &asRGXFWLayoutTable[i]; + + RGX_FW_LAYOUT_ENTRY *psInEntry = (RGX_FW_LAYOUT_ENTRY*) + (pbRGXFirmwareLayout + i * psInfoHeader->ui32LayoutEntrySize); + + RGXMemCopy(hPrivate, + (void*)psOutEntry, + (void*)psInEntry, + sizeof(RGX_FW_LAYOUT_ENTRY)); + } + + + /* Calculate how much memory the FW needs for its code and data segments */ + + *puiFWCodeAllocSize = 0; + *puiFWDataAllocSize = 0; + *puiFWCorememCodeAllocSize = 0; + *puiFWCorememDataAllocSize = 0; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + switch (asRGXFWLayoutTable[i].eType) + { + case FW_CODE: + *puiFWCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_DATA: + *puiFWDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_COREMEM_CODE: + *puiFWCorememCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_COREMEM_DATA: + *puiFWCorememDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + default: + RGXErrorLog(hPrivate, "%s: Unknown FW section type %u\n", + __func__, asRGXFWLayoutTable[i].eType); + break; + } + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + void *pvFWCode, + void *pvFWData, + void *pvFWCorememCode, + void *pvFWCorememData, + RGX_FW_BOOT_PARAMS *puFWParams) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bMIPS = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); + IMG_BOOL bRISCV = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); + IMG_BOOL bMETA = !bMIPS && !bRISCV; + + if (bMETA) + { + IMG_UINT32 *pui32BootConf = NULL; + /* Skip bootloader configuration if a pointer to the FW code + * allocation is not available + */ + if (pvFWCode) + { + /* This variable points to the bootloader code which is mostly + * a sequence of pairs + */ + pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET; + + /* Slave port and JTAG accesses are privileged */ + *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD; + *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN; + + RGXFWConfigureSegMMU(hPrivate, + &puFWParams->sMeta.sFWCodeDevVAddr, + &puFWParams->sMeta.sFWDataDevVAddr, + &pui32BootConf); + } + + /* Process FW image data stream */ + eError = ProcessLDRCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + pvFWCorememCode, + pvFWCorememData, + &pui32BootConf); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + /* Skip bootloader configuration if a pointer to the FW code + * allocation is not available + */ + if (pvFWCode) + { + IMG_UINT32 ui32NumThreads = puFWParams->sMeta.ui32NumThreads; + + if ((ui32NumThreads == 0) || (ui32NumThreads > 2)) + { + RGXErrorLog(hPrivate, + "ProcessFWImage: Wrong Meta threads configuration, using one thread only"); + + ui32NumThreads = 1; + } + + RGXFWConfigureMetaCaches(hPrivate, + ui32NumThreads, + &pui32BootConf); + + /* Signal the end of the conf sequence */ + *pui32BootConf++ = 0x0; + *pui32BootConf++ = 0x0; + + if (puFWParams->sMeta.uiFWCorememCodeSize && (puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr != 0)) + { + *pui32BootConf++ = puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr; + *pui32BootConf++ = puFWParams->sMeta.uiFWCorememCodeSize; + } + else + { + *pui32BootConf++ = 0; + *pui32BootConf++ = 0; + } + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA)) + { + *pui32BootConf++ = (IMG_UINT32) (puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr >> 32); + *pui32BootConf++ = (IMG_UINT32) puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr; + } + else + { + *pui32BootConf++ = 0; + *pui32BootConf++ = 0; + } + + } + } + else if (bMIPS) + { + /* Process FW image data stream */ + eError = ProcessELFCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + if (pvFWData) + { + RGXMIPSFW_BOOT_DATA *psBootData = (RGXMIPSFW_BOOT_DATA*) + /* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */ + IMG_OFFSET_ADDR(pvFWData, + /* ... jump to the boot/NMI data page... */ + (RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA) + /* ... and then jump to the bootloader data offset within the page */ + + RGXMIPSFW_BOOTLDR_CONF_OFFSET)); + + /* Rogue Registers physical address */ + psBootData->ui64RegBase = puFWParams->sMips.sGPURegAddr.uiAddr; + + /* MIPS Page Table physical address */ + psBootData->ui32PTLog2PageSize = puFWParams->sMips.ui32FWPageTableLog2PageSize; + psBootData->ui32PTNumPages = puFWParams->sMips.ui32FWPageTableNumPages; + psBootData->aui64PTPhyAddr[0U] = puFWParams->sMips.asFWPageTableAddr[0U].uiAddr; + psBootData->aui64PTPhyAddr[1U] = puFWParams->sMips.asFWPageTableAddr[1U].uiAddr; + psBootData->aui64PTPhyAddr[2U] = puFWParams->sMips.asFWPageTableAddr[2U].uiAddr; + psBootData->aui64PTPhyAddr[3U] = puFWParams->sMips.asFWPageTableAddr[3U].uiAddr; + + /* MIPS Stack Pointer Physical Address */ + psBootData->ui64StackPhyAddr = puFWParams->sMips.sFWStackAddr.uiAddr; + + /* Reserved for future use */ + psBootData->ui32Reserved1 = 0; + psBootData->ui32Reserved2 = 0; + } + } + else + { + /* Process FW image data stream */ + eError = ProcessELFCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + pvFWCorememCode, + pvFWCorememData); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + if (pvFWData) + { + RGXRISCVFW_BOOT_DATA *psBootData = (RGXRISCVFW_BOOT_DATA*) + IMG_OFFSET_ADDR(pvFWData, RGXRISCVFW_BOOTLDR_CONF_OFFSET); + + psBootData->ui64CorememCodeDevVAddr = puFWParams->sRISCV.sFWCorememCodeDevVAddr.uiAddr; + psBootData->ui32CorememCodeFWAddr = puFWParams->sRISCV.sFWCorememCodeFWAddr.ui32Addr; + psBootData->ui32CorememCodeSize = puFWParams->sRISCV.uiFWCorememCodeSize; + + psBootData->ui64CorememDataDevVAddr = puFWParams->sRISCV.sFWCorememDataDevVAddr.uiAddr; + psBootData->ui32CorememDataFWAddr = puFWParams->sRISCV.sFWCorememDataFWAddr.ui32Addr; + psBootData->ui32CorememDataSize = puFWParams->sRISCV.uiFWCorememDataSize; + } + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.h new file mode 100644 index 000000000000..71b3d9bcf38d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwimageutils.h @@ -0,0 +1,260 @@ +/*************************************************************************/ /*! +@File +@Title Header for Services Firmware image utilities used at init time +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for Services Firmware image utilities used at init time +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWIMAGEUTILS_H +#define RGXFWIMAGEUTILS_H + +/* The routines declared here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when DRM security is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. + */ +#include "rgxlayer.h" + + +typedef union _RGX_FW_BOOT_PARAMS_ +{ + struct + { + IMG_DEV_VIRTADDR sFWCodeDevVAddr; + IMG_DEV_VIRTADDR sFWDataDevVAddr; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_UINT32 ui32NumThreads; + } sMeta; + + struct + { + IMG_DEV_PHYADDR sGPURegAddr; + IMG_DEV_PHYADDR asFWPageTableAddr[RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES]; + IMG_DEV_PHYADDR sFWStackAddr; + IMG_UINT32 ui32FWPageTableLog2PageSize; + IMG_UINT32 ui32FWPageTableNumPages; + } sMips; + + struct + { + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememDataSize; + } sRISCV; + +} RGX_FW_BOOT_PARAMS; + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionOffset + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return offset of a Firmware section, relative to the beginning + of the code or data allocation (depending on the section id) + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionMaxSize + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return maximum size (not allocation size) of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionAllocSize + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return allocation size of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionAddress + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return base address of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageAllocSize + + @Description Return size of Firmware code/data/coremem code allocations + + @Input hPrivate : Implementation specific data + @Input pbRGXFirmware : Pointer to FW binary + @Input ui32RGXFirmwareSize : FW binary size + @Output puiFWCodeAllocSize : Code size + @Output puiFWDataAllocSize : Data size + @Output puiFWCorememCodeAllocSize : Coremem code size (0 if N/A) + @Output puiFWCorememDataAllocSize : Coremem data size (0 if N/A) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + const IMG_UINT32 ui32RGXFirmwareSize, + IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize); + +/*! +******************************************************************************* + + @Function ProcessLDRCommandStream + + @Description Process the output of the Meta toolchain in the .LDR format + copying code and data sections into their final location and + passing some information to the Meta bootloader + + @Input hPrivate : Implementation specific data + @Input pbLDR : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + @Input ppui32BootConf : Pointer to bootloader data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, + const IMG_BYTE* pbLDR, + void* pvHostFWCodeAddr, + void* pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr, + IMG_UINT32 **ppui32BootConf); + +/*! +******************************************************************************* + + @Function ProcessELFCommandStream + + @Description Process a file in .ELF format copying code and data sections + into their final location + + @Input hPrivate : Implementation specific data + @Input pbELF : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, + const IMG_BYTE *pbELF, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr); + +/*! +******************************************************************************* + + @Function RGXProcessFWImage + + @Description Process the Firmware binary blob copying code and data + sections into their final location and passing some + information to the Firmware bootloader. + If a pointer to the final memory location for FW code or data + is not valid (NULL) then the relative section will not be + processed. + + @Input hPrivate : Implementation specific data + @Input pbRGXFirmware : Pointer to FW blob + @Input pvFWCode : Pointer to FW code + @Input pvFWData : Pointer to FW data + @Input pvFWCorememCode : Pointer to FW coremem code + @Input pvFWCorememData : Pointer to FW coremem data + @Input puFWParams : Parameters used by the FW at boot time + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + void *pvFWCode, + void *pvFWData, + void *pvFWCorememCode, + void *pvFWCorememData, + RGX_FW_BOOT_PARAMS *puFWParams); + +#endif /* RGXFWIMAGEUTILS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.c new file mode 100644 index 000000000000..b436cee82219 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.c @@ -0,0 +1,5932 @@ +/*************************************************************************/ /*! +@File +@Title Rogue firmware utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Rogue firmware utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(LINUX) +#include +#else +#include +#endif + +#include "img_defs.h" + +#include "rgxdefs_km.h" +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "osfunc.h" +#include "oskm_apphint.h" +#include "cache_km.h" +#include "allocmem.h" +#include "physheap.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "devicemem_server.h" + +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "rgxfwutils.h" +#include "rgx_options.h" +#include "rgx_fwif_alignchecks.h" +#include "rgx_fwif_resetframework.h" +#include "rgx_pdump_panics.h" +#include "fwtrace_string.h" +#include "rgxheapconfig.h" +#include "pvrsrv.h" +#include "rgxdebug.h" +#include "rgxhwperf.h" +#include "rgxccb.h" +#include "rgxcompute.h" +#include "rgxtransfer.h" +#include "rgxpower.h" +#include "rgxtdmtransfer.h" +#if defined(SUPPORT_DISPLAY_CLASS) +#include "dc_server.h" +#endif +#include "rgxmem.h" +#include "rgxta3d.h" +#include "rgxkicksync.h" +#include "rgxutils.h" +#include "rgxtimecorr.h" +#include "sync_internal.h" +#include "sync.h" +#include "sync_checkpoint.h" +#include "sync_checkpoint_external.h" +#include "tlstream.h" +#include "devicemem_server_utils.h" +#include "htbuffer.h" +#include "rgx_bvnc_defs_km.h" +#include "info_page.h" + +#include "physmem_lma.h" +#include "physmem_osmem.h" + +#ifdef __linux__ +#include /* sprintf */ +#include "rogue_trace_events.h" +#else +#include +#include +#endif +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif +#endif + +#include "vz_vmm_pvz.h" +#include "rgx_heaps.h" + +/*! + ****************************************************************************** + * HWPERF + *****************************************************************************/ +/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the + * Firmware and host driver. */ +#define RGXFW_HWPERF_L1_SIZE_MIN (16U) +#define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB +#define RGXFW_HWPERF_L1_SIZE_MAX (12288U) + +/* Kernel CCB length */ +/* Reducing the size of the KCCB in an attempt to avoid flooding and overflowing the FW kick queue + * in the case of multiple OSes */ +#define RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE (6) +#define RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT (7) + + +/* Firmware CCB length */ +#if defined(NO_HARDWARE) && defined(PDUMP) +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (10) +#elif defined(SUPPORT_PDVFS) +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8) +#else +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5) +#endif + +#if defined(RGX_FW_IRQ_OS_COUNTERS) +const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS] = {IRQ_COUNTER_STORAGE_REGS}; +#endif + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/* Checkpoint CCB length */ +#define RGXFWIF_CHECKPOINTCCB_NUMCMDS_LOG2 (10) +#endif + +/* + * Maximum length of time a DM can run for before the DM will be marked + * as out-of-time. CDM has an increased value due to longer running kernels. + * + * These deadlines are increased on FPGA, EMU and VP due to the slower + * execution time of these platforms. + */ +#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (60000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (250000) +#else +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (30000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000) +#endif + +/* Workload Estimation Firmware CCB length */ +#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2 (7) + +/* Size of memory buffer for firmware gcov data + * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */ +#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024) + +#if defined(CONFIG_L4) && !defined(RGX_FEATURE_GPU_VIRTUALISATION) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) +#define MTS_SCHEDULE_DM_VAL (RGXFWIF_DM_GP + PVRSRV_VZ_MODE_IS(GUEST) ? (1) : (0)) +#else +#define MTS_SCHEDULE_DM_VAL (RGXFWIF_DM_GP) +#endif + +typedef struct +{ + RGXFWIF_KCCB_CMD sKCCBcmd; + DLLIST_NODE sListNode; + PDUMP_FLAGS_T uiPdumpFlags; + PVRSRV_RGXDEV_INFO *psDevInfo; +} RGX_DEFERRED_KCCB_CMD; + +#if defined(PDUMP) +/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the + * PID filter example entries + */ +static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32), + "FW PID filtering assumes the IMG_PID type is 32-bits wide as it " + "generates WRW commands for loading the PID values"); +#endif + +static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo); +static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo); + +static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit) +{ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc; + IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE( + RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); + + PVR_DPF_ENTERED; + + eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap, + 1, + ui32CacheLineSize, + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_MAIN, + "FwSLC3FenceWA", + ppsSLC3FenceMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + /* We need to map it so the heap for this allocation is set */ + eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc, + psDevInfo->psFirmwareMainHeap, + &psFwSysInit->sSLC3FenceDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsSLC3FenceMemDesc); + *ppsSLC3FenceMemDesc = NULL; + } + + PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc); +} + +static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo) +{ + DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc; + + if (psSLC3FenceMemDesc) + { + DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc); + DevmemFree(psSLC3FenceMemDesc); + } +} + +static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value) +{ + /* ensure memory is flushed before kicking MTS */ + OSWriteMemoryBarrier(); + + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value); + + /* ensure the MTS kick goes through before continuing */ + OSMemoryBarrier(); +} + +/*************************************************************************/ /*! +@Function RGXSetupFwAllocation + +@Description Sets a pointer in a firmware data structure. + +@Input psDevInfo Device Info struct +@Input uiAllocFlags Flags determining type of memory allocation +@Input ui32Size Size of memory allocation +@Input pszName Allocation label +@Input ppsMemDesc pointer to the allocation's memory descriptor +@Input psFwPtr Address of the firmware pointer to set +@Input ppvCpuPtr Address of the cpu pointer to set +@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_FLAGS_T uiAllocFlags, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszName, + DEVMEM_MEMDESC **ppsMemDesc, + RGXFWIF_DEV_VIRTADDR *psFwPtr, + void **ppvCpuPtr, + IMG_UINT32 ui32DevVAFlags) +{ + PVRSRV_ERROR eError; +#if defined(SUPPORT_AUTOVZ) + IMG_BOOL bClearByMemset = ((uiAllocFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0); + + /* Under AutoVz the ZERO_ON_ALLOC flag is avoided as it causes the memory to + * be allocated from a different PMR than an allocation without the flag. + * When the content of an allocation needs to be recovered from physical memory + * on a later driver reboot, the memory then cannot be zeroed but the allocation + * addresses must still match. + * If the memory requires clearing, perform a memset after the allocation. */ + uiAllocFlags &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; +#endif + + PDUMPCOMMENT("Allocate %s", pszName); + eError = DevmemFwAllocate(psDevInfo, + ui32Size, + uiAllocFlags, + pszName, + ppsMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for %s (%u)", + __func__, + ui32Size, + pszName, + eError)); + goto fail_alloc; + } + + if (psFwPtr) + { + eError = RGXSetFirmwareAddress(psFwPtr, *ppsMemDesc, 0, ui32DevVAFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire firmware virtual address for %s (%u)", + __func__, + pszName, + eError)); + goto fail_fwaddr; + } + } + +#if defined(SUPPORT_AUTOVZ) + if ((bClearByMemset) || (ppvCpuPtr)) +#else + if (ppvCpuPtr) +#endif + { + void *pvTempCpuPtr; + + eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, &pvTempCpuPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire CPU virtual address for %s (%u)", + __func__, + pszName, + eError)); + goto fail_cpuva; + } + +#if defined(SUPPORT_AUTOVZ) + if (bClearByMemset) + { + OSDeviceMemSet(pvTempCpuPtr, 0, ui32Size); + } + if (ppvCpuPtr) +#endif + { + *ppvCpuPtr = pvTempCpuPtr; + } +#if defined(SUPPORT_AUTOVZ) + else + { + DevmemReleaseCpuVirtAddr(*ppsMemDesc); + pvTempCpuPtr = NULL; + } +#endif + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: %s set up at Fw VA 0x%x and CPU VA 0x%p", + __func__, pszName, + (psFwPtr) ? (psFwPtr->ui32Addr) : (0), + (ppvCpuPtr) ? (*ppvCpuPtr) : (NULL))); + + return eError; + +fail_cpuva: + if (psFwPtr) + { + RGXUnsetFirmwareAddress(*ppsMemDesc); + } +fail_fwaddr: + DevmemFree(*ppsMemDesc); +fail_alloc: + return eError; +} + +/*************************************************************************/ /*! +@Function GetHwPerfBufferSize + +@Description Computes the effective size of the HW Perf Buffer +@Input ui32HWPerfFWBufSizeKB Device Info struct +@Return HwPerfBufferSize +*/ /**************************************************************************/ +static IMG_UINT32 GetHwPerfBufferSize(IMG_UINT32 ui32HWPerfFWBufSizeKB) +{ + IMG_UINT32 HwPerfBufferSize; + + /* HWPerf: Determine the size of the FW buffer */ + if (ui32HWPerfFWBufSizeKB == 0 || + ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT) + { + /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero, + * use default size from driver constant. Set it to the default + * size, no logging. + */ + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10; + } + else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX)) + { + /* Size specified as a AppHint but it is too big */ + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)", + __func__, + ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX)); + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MAX<<10; + } + else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN)) + { + /* Size specified as in AppHint HWPerfFWBufSizeInKB */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Using HWPerf FW buffer size of %u KB", + __func__, + ui32HWPerfFWBufSizeKB)); + HwPerfBufferSize = ui32HWPerfFWBufSizeKB<<10; + } + else + { + /* Size specified as a AppHint but it is too small */ + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)", + __func__, + ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN)); + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MIN<<10; + } + + return HwPerfBufferSize; +} + +#if defined(PDUMP) +/*! +******************************************************************************* + @Function RGXFWSetupSignatureChecks + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsSigChecksMemDesc, + IMG_UINT32 ui32SigChecksBufSize, + RGXFWIF_SIGBUF_CTL* psSigBufCtl, + const IMG_CHAR* pszBufferName) +{ + PVRSRV_ERROR eError; + + /* Allocate memory for the checks */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + ui32SigChecksBufSize, + "FwSignatureChecks", + ppsSigChecksMemDesc, + &psSigBufCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + DevmemPDumpLoadMem( *ppsSigChecksMemDesc, + 0, + ui32SigChecksBufSize, + PDUMP_FLAGS_CONTINUOUS); + + psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32); +fail: + return eError; +} +#endif + + +#if defined(SUPPORT_FIRMWARE_GCOV) +/*! +******************************************************************************* + @Function RGXFWSetupFirmwareGcovBuffer + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsBufferMemDesc, + IMG_UINT32 ui32FirmwareGcovBufferSize, + RGXFWIF_FIRMWARE_GCOV_CTL* psFirmwareGcovCtl, + const IMG_CHAR* pszBufferName) +{ + PVRSRV_ERROR eError; + + /* Allocate memory for gcov */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + ui32FirmwareGcovBufferSize, + pszBufferName, + ppsBufferMemDesc, + &psFirmwareGcovCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize; + + return PVRSRV_OK; +} +#endif + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) +/*! + ****************************************************************************** + @Function RGXFWSetupCounterBuffer + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR + *****************************************************************************/ +static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsBufferMemDesc, + IMG_UINT32 ui32CounterDataBufferSize, + RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl, + const IMG_CHAR* pszBufferName) +{ + PVRSRV_ERROR eError; + + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + ui32CounterDataBufferSize, + "FwCounterBuffer", + ppsBufferMemDesc, + &psCounterDumpCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation"); + + psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2; + + return PVRSRV_OK; +} +#endif + +/*! + ****************************************************************************** + @Function RGXFWSetupAlignChecks + @Description This functions allocates and fills memory needed for the + aligns checks of the UM and KM structures shared with the + firmware. The format of the data in the memory is as follows: + + + + + The UM array is passed from the user side. Now the firmware is + is responsible for filling this part of the memory. If that + happens the check of the UM structures will be performed + by the host driver on client's connect. + If the macro is not defined the client driver fills the memory + and the firmware checks for the alignment of all structures. + @Input psDeviceNode + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_DEV_VIRTADDR *psAlignChecksDevFW, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32RGXFWAlignChecksArrLength) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM }; + IMG_UINT32 ui32RGXFWAlingChecksTotal; + IMG_UINT32* paui32AlignChecks; + PVRSRV_ERROR eError; + + /* In this case we don't know the number of elements in UM array. + * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. + */ + PVR_ASSERT(ui32RGXFWAlignChecksArrLength == 0); + ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM) + + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32) + + 2 * sizeof(IMG_UINT32); + + /* Allocate memory for the checks */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + ui32RGXFWAlingChecksTotal, + "FwAlignmentChecks", + &psDevInfo->psRGXFWAlignChecksMemDesc, + psAlignChecksDevFW, + (void**) &paui32AlignChecks, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* Copy the values */ + *paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM); + OSDeviceMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM)); + paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM); + + *paui32AlignChecks = 0; + } + + DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc, + 0, + ui32RGXFWAlingChecksTotal, + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; + +fail: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo) +{ + if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc); + psDevInfo->psRGXFWAlignChecksMemDesc = NULL; + } +} + +PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, + DEVMEM_MEMDESC *psSrc, + IMG_UINT32 uiExtraOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR psDevVirtAddr; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc); + psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + IMG_UINT32 ui32Offset; + IMG_BOOL bCachedInMETA; + DEVMEM_FLAGS_T uiDevFlags; + IMG_UINT32 uiGPUCacheMode; + + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); + + /* Convert to an address in META memmap */ + ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; + + /* Check in the devmem flags whether this memory is cached/uncached */ + DevmemGetFlags(psSrc, &uiDevFlags); + + /* Honour the META cache flags */ + bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; + + /* Honour the SLC cache flags */ + eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode); + + ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + if (bCachedInMETA) + { + ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED; + } + else + { + ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED; + } + + if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode)) + { + ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED; + } + else + { + ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED; + } + ppDest->ui32Addr = ui32Offset; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_GOTO_IF_ERROR(eError, failDevVAAcquire); + + ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF); + } + else + { + IMG_UINT32 ui32Offset; + IMG_BOOL bCachedInRISCV; + DEVMEM_FLAGS_T uiDevFlags; + + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); + + /* Convert to an address in RISCV memmap */ + ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; + + /* Check in the devmem flags whether this memory is cached/uncached */ + DevmemGetFlags(psSrc, &uiDevFlags); + + /* Honour the RISCV cache flags */ + bCachedInRISCV = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; + + if (bCachedInRISCV) + { + ui32Offset |= RGXRISCVFW_SHARED_CACHED_DATA_BASE; + } + else + { + ui32Offset |= RGXRISCVFW_SHARED_UNCACHED_DATA_BASE; + } + + ppDest->ui32Addr = ui32Offset; + } + + if ((ppDest->ui32Addr & 0x3U) != 0) + { + IMG_CHAR *pszAnnotation; + /* It is expected that the annotation returned by DevmemGetAnnotation() is always valid */ + DevmemGetAnnotation(psSrc, &pszAnnotation); + + PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit", + __func__, pszAnnotation, ppDest->ui32Addr)); + + return PVRSRV_ERROR_INVALID_ALIGNMENT; + } + + if (ui32Flags & RFW_FWADDR_NOREF_FLAG) + { + DevmemReleaseDevVirtAddr(psSrc); + } + + return PVRSRV_OK; + +failDevCacheMode: + DevmemReleaseDevVirtAddr(psSrc); +failDevVAAcquire: + return eError; +} + +void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, + DEVMEM_MEMDESC *psSrcMemDesc, + RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, + IMG_UINT32 uiOffset) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevVirtAddr; + + eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr); + PVR_ASSERT(eError == PVRSRV_OK); + + psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr; + psDest->psDevVirtAddr.uiAddr += uiOffset; + psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr; + + DevmemReleaseDevVirtAddr(psSrcMemDesc); +} + + +void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc) +{ + DevmemReleaseDevVirtAddr(psSrc); +} + +struct _RGX_SERVER_COMMON_CONTEXT_ { + PVRSRV_RGXDEV_INFO *psDevInfo; + DEVMEM_MEMDESC *psFWCommonContextMemDesc; + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; + DEVMEM_MEMDESC *psFWMemContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_CLIENT_CCB *psClientCCB; + DEVMEM_MEMDESC *psClientCCBMemDesc; + DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; + IMG_BOOL bCommonContextMemProvided; + IMG_UINT32 ui32ContextID; + DLLIST_NODE sListNode; + RGXFWIF_CONTEXT_RESET_REASON eLastResetReason; + IMG_UINT32 ui32LastResetJobRef; +}; + +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext; + IMG_UINT32 ui32FWCommonContextOffset; + IMG_UINT8 *pui8Ptr; + PVRSRV_ERROR eError; + + /* + * Allocate all the resources that are required + */ + psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); + if (psServerCommonContext == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + + psServerCommonContext->psDevInfo = psDevInfo; + + if (psAllocatedMemDesc) + { + PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)", + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32AllocatedOffset); + ui32FWCommonContextOffset = ui32AllocatedOffset; + psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; + psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; + } + else + { + /* Allocate device memory for the firmware context */ + PDUMPCOMMENT("Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWCommonContext), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwContext", + &psServerCommonContext->psFWCommonContextMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_contextalloc; + } + ui32FWCommonContextOffset = 0; + psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; + } + + /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ + psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; + + /* + * Temporarily map the firmware context to the kernel and initialise it + */ + eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, + (void **)&pui8Ptr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware %s context to CPU (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_cpuvirtacquire; + } + + /* Allocate the client CCB */ + eError = RGXCreateCCB(psDevInfo, + ui32CCBAllocSizeLog2, + ui32CCBMaxAllocSizeLog2, + ui32ContextFlags, + psConnection, + eRGXCCBRequestor, + psServerCommonContext, + &psServerCommonContext->psClientCCB, + &psServerCommonContext->psClientCCBMemDesc, + &psServerCommonContext->psClientCCBCtrlMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create CCB for %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_allocateccb; + } + + psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset); + psFWCommonContext->eDM = eDM; + + /* Set the firmware CCB device addresses in the firmware common context */ + eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB, + psServerCommonContext->psClientCCBMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); + + eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl, + psServerCommonContext->psClientCCBCtrlMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr, + psServerCommonContext->psClientCCBMemDesc, + &psFWCommonContext->psCCB, + 0); + } + + /* Set the memory context device address */ + psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; + eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext, + psFWMemContextMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); + + /* Set the framework register updates address */ + psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc; + if (psInfo->psFWFrameworkMemDesc != NULL) + { + eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd, + psInfo->psFWFrameworkMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr); + } + else + { + /* This should never be touched in this contexts without a framework + * memdesc, but ensure it is zero so we see crashes if it is. + */ + psFWCommonContext->psRFCmd.ui32Addr = 0; + } + + psFWCommonContext->ui32Priority = ui32Priority; + psFWCommonContext->ui32PrioritySeqNum = 0; + psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, + (eDM == RGXFWIF_DM_CDM ? + RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS : + RGXFWIF_MAX_WORKLOAD_DEADLINE_MS)); + psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress; + + /* Store a references to Server Common Context and PID for notifications back from the FW. */ + psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; + psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM(); + + /* Set the firmware GPU context state buffer */ + psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; + if (psContextStateMemDesc) + { + eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState, + psContextStateMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr); + } + + /* + * Dump the created context + */ + PDUMPCOMMENT("Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + sizeof(*psFWCommonContext), + PDUMP_FLAGS_CONTINUOUS); + + /* We've finished the setup so release the CPU mapping */ + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); + + /* Map this allocation into the FW */ + eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, + psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr); + +#if defined(LINUX) + { + IMG_UINT32 ui32FWAddr; + switch (eDM) { + case RGXFWIF_DM_GEOM: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); + break; + case RGXFWIF_DM_3D: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); + break; + default: + ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; + break; + } + + trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32FWAddr); + } +#endif + /*Add the node to the list when finalised */ + OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); + dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); + + *ppsServerCommonContext = psServerCommonContext; + return PVRSRV_OK; + +fail_fwcommonctxfwaddr: + if (psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psContextStateMemDesc); + } +fail_ctxstatefwaddr: + if (psInfo->psFWFrameworkMemDesc != NULL) + { + RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc); + } +fail_fwframeworkfwaddr: + RGXUnsetFirmwareAddress(psFWMemContextMemDesc); +fail_fwmemctxfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); +fail_cccbctrlfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); +fail_cccbfwaddr: + RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); +fail_allocateccb: + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); +fail_cpuvirtacquire: + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } +fail_contextalloc: + OSFreeMem(psServerCommonContext); +fail_alloc: + return eError; +} + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + + OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + /* Remove the context from the list of all contexts. */ + dllist_remove_node(&psServerCommonContext->sListNode); + OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + + /* + Unmap the context itself and then all its resources + */ + + /* Unmap the FW common context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); + /* Umap context state buffer (if there was one) */ + if (psServerCommonContext->psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); + } + /* Unmap the framework buffer */ + if (psServerCommonContext->psFWFrameworkMemDesc) + { + RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc); + } + /* Unmap client CCB and CCB control */ + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); + /* Unmap the memory context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); + + /* Destroy the client CCB */ + RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); + + + /* Free the FW common context (if there was one) */ + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, + psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } + /* Free the hosts representation of the common context */ + OSFreeMem(psServerCommonContext); +} + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->sFWCommonContextFWAddr; +} + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psClientCCB; +} + +RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef) +{ + RGXFWIF_CONTEXT_RESET_REASON eLastResetReason; + + PVR_ASSERT(psServerCommonContext != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + /* Take the most recent reason & job ref and reset for next time... */ + eLastResetReason = psServerCommonContext->eLastResetReason; + *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; + psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + + if (eLastResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) + { + PVR_DPF((PVR_DBG_WARNING, + "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); + } + + return eLastResetReason; +} + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psDevInfo; +} + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags) +{ + return RGXSetCCBFlags(psServerCommonContext->psClientCCB, + ui32ContextFlags); +} + +/*! +******************************************************************************* + @Function RGXFreeCCB + @Description Free the kernel or firmware CCB + @Input psDevInfo + @Input ppsCCBCtl + @Input ppsCCBCtlMemDesc + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr +******************************************************************************/ +static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc) +{ + if (*ppsCCBMemDesc != NULL) + { + if (*ppui8CCB != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc); + *ppui8CCB = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc); + *ppsCCBMemDesc = NULL; + } + if (*ppsCCBCtlMemDesc != NULL) + { + if (*ppsCCBCtl != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc); + *ppsCCBCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); + *ppsCCBCtlMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXFreeCCBReturnSlots + @Description Free the kernel CCB's return slot array and associated mappings + @Input psDevInfo Device Info struct + @Input ppui32CCBRtnSlots CPU mapping of slot array + @Input ppsCCBRtnSlotsMemDesc Slot array's device memdesc +******************************************************************************/ +static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 **ppui32CCBRtnSlots, + DEVMEM_MEMDESC **ppsCCBRtnSlotsMemDesc) +{ + /* Free the return slot array if allocated */ + if (*ppsCCBRtnSlotsMemDesc != NULL) + { + /* Before freeing, ensure the CPU mapping as well is released */ + if (*ppui32CCBRtnSlots != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc); + *ppui32CCBRtnSlots = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc); + *ppsCCBRtnSlotsMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXSetupCCB + @Description Allocate and initialise a circular command buffer + @Input psDevInfo + @Input ppsCCBCtl + @Input ppsCCBCtlMemDesc + @Input ppui8CCB + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr + @Input ui32NumCmdsLog2 + @Input ui32CmdSize + @Input uiCCBMemAllocFlags + @Input pszName + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc, + PRGXFWIF_CCB_CTL *psCCBCtlFWAddr, + PRGXFWIF_CCB *psCCBFWAddr, + IMG_UINT32 ui32NumCmdsLog2, + IMG_UINT32 ui32CmdSize, + DEVMEM_FLAGS_T uiCCBMemAllocFlags, + const IMG_CHAR *pszName) +{ + PVRSRV_ERROR eError; + RGXFWIF_CCB_CTL *psCCBCtl; + IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2); + IMG_CHAR szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN]; + IMG_INT32 iStrLen; + + /* Append "Control" to the name for the control struct. */ + iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName); + PVR_ASSERT(iStrLen < sizeof(szCCBCtlName)); + + if (unlikely(iStrLen < 0)) + { + szCCBCtlName[0] = '\0'; + } + + /* Allocate memory for the CCB control.*/ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + sizeof(RGXFWIF_CCB_CTL), + szCCBCtlName, + ppsCCBCtlMemDesc, + psCCBCtlFWAddr, + (void**) ppsCCBCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* + * Allocate memory for the CCB. + * (this will reference further command data in non-shared CCBs) + */ + eError = RGXSetupFwAllocation(psDevInfo, + uiCCBMemAllocFlags, + ui32CCBSize * ui32CmdSize, + pszName, + ppsCCBMemDesc, + psCCBFWAddr, + (void**) ppui8CCB, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* + * Initialise the CCB control. + */ + psCCBCtl = *ppsCCBCtl; + psCCBCtl->ui32WriteOffset = 0; + psCCBCtl->ui32ReadOffset = 0; + psCCBCtl->ui32WrapMask = ui32CCBSize - 1; + psCCBCtl->ui32CmdSize = ui32CmdSize; + + /* Pdump the CCB control */ + PDUMPCOMMENT("Initialise %s", szCCBCtlName); + DevmemPDumpLoadMem(*ppsCCBCtlMemDesc, + 0, + sizeof(RGXFWIF_CCB_CTL), + 0); + + return PVRSRV_OK; + +fail: + RGXFreeCCB(psDevInfo, + ppsCCBCtl, + ppsCCBCtlMemDesc, + ppui8CCB, + ppsCCBMemDesc); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PMR *psPMR; + + if (psDevInfo->psRGXFaultAddressMemDesc) + { + if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK) + { + PMRUnlockSysPhysAddresses(psPMR); + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); + psDevInfo->psRGXFaultAddressMemDesc = NULL; + } +} + +static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 *pui32MemoryVirtAddr; + IMG_UINT32 i; + size_t ui32PageSize = OSGetPageSize(); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PMR *psPMR; + + /* Allocate page of memory to use for page faults on non-blocking memory transactions. + * Doesn't need to be cleared as it is initialised with the 0xDEADBEE0 pattern below. */ + psDevInfo->psRGXFaultAddressMemDesc = NULL; + eError = DevmemFwAllocateExportable(psDeviceNode, + ui32PageSize, + ui32PageSize, + RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC), + "FwExFaultAddress", + &psDevInfo->psRGXFaultAddressMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAlloc; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, + (void **)&pui32MemoryVirtAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAqCpuVirt; + } + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* fill the page with a known pattern when booting the firmware */ + for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++) + { + *(pui32MemoryVirtAddr + i) = 0xDEADBEE0; + } + } + + eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error getting PMR for fault address (%u)", + __func__, eError)); + + goto failFaultAddressDescGetPMR; + } + else + { + IMG_BOOL bValid; + IMG_UINT32 ui32Log2PageSize = OSGetPageShift(); + + eError = PMRLockSysPhysAddresses(psPMR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error locking physical address for fault address MemDesc (%u)", + __func__, eError)); + + goto failFaultAddressDescLockPhys; + } + + eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error getting physical address for fault address MemDesc (%u)", + __func__, eError)); + + goto failFaultAddressDescGetPhys; + } + + if (!bValid) + { + psFwSysInit->sFaultPhysAddr.uiAddr = 0; + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")", + __func__, psFwSysInit->sFaultPhysAddr.uiAddr)); + + goto failFaultAddressDescGetPhys; + } + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + + return PVRSRV_OK; + +failFaultAddressDescGetPhys: + PMRUnlockSysPhysAddresses(psPMR); + +failFaultAddressDescLockPhys: + +failFaultAddressDescGetPMR: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + +failFaultAddressDescAqCpuVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); + psDevInfo->psRGXFaultAddressMemDesc = NULL; + +failFaultAddressDescAlloc: + + return eError; +} + +#if defined(PDUMP) +/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */ +static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError; + PMR *psFWInitPMR, *psFaultAddrPMR; + IMG_UINT32 ui32Dstoffset; + + psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR); + ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr); + + psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR); + + eError = PDumpMemLabelToMem64(psFaultAddrPMR, + psFWInitPMR, + 0, + ui32Dstoffset, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError)); + } + return eError; +} +#endif + +#if defined(SUPPORT_TBI_INTERFACE) +/*************************************************************************/ /*! +@Function RGXTBIBufferIsInitRequired + +@Description Returns true if the firmware tbi buffer is not allocated and + might be required by the firmware soon. TBI buffer allocated + on-demand to reduce RAM footprint on systems not needing + tbi. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + /* The firmware expects a tbi buffer only when: + * - Logtype is "tbi" + */ + if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) + && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXTBIBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW tbi buffer + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc); + psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL; + psDevInfo->ui32RGXFWIfHWPerfBufSize = 0; +} + +/*************************************************************************/ /*! +@Function RGXTBIBufferInitOnDemandResources + +@Description Allocates the firmware TBI buffer required for reading SFs + strings and initialize it with SFs. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 i, ui32Len; + const IMG_UINT32 ui32FWTBIBufsize = g_ui32SFsCount * sizeof(RGXFW_STID_FMT); + RGXFW_STID_FMT *psFW_SFs = NULL; + + /* Firmware address should not be already set */ + if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: FW address for FWTBI is already set. Resetting it with newly allocated one", + __func__)); + } + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS, + ui32FWTBIBufsize, + "FwTBIBuffer", + &psDevInfo->psRGXFWIfTBIBufferMemDesc, + &psDevInfo->sRGXFWIfTBIBuffer, + (void**)&psFW_SFs, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* Copy SFs entries to FW buffer */ + for (i = 0; i < g_ui32SFsCount; i++) + { + OSDeviceMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id)); + ui32Len = OSStringLength(SFs[i].psName); + OSDeviceMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1)); + } + + /* Set size of TBI buffer */ + psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize; + + /* release CPU mapping */ + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc); + + return PVRSRV_OK; +fail: + RGXTBIBufferDeinit(psDevInfo); + return eError; +} +#endif + +/*************************************************************************/ /*! +@Function RGXTraceBufferIsInitRequired + +@Description Returns true if the firmware trace buffer is not allocated and + might be required by the firmware soon. Trace buffer allocated + on-demand to reduce RAM footprint on systems not needing + firmware trace. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + /* The firmware expects a trace buffer only when: + * - Logtype is "trace" AND + * - at least one LogGroup is configured + * - the Driver Mode is not Guest + */ + if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + && !PVRSRV_VZ_MODE_IS(GUEST)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXTraceBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW trace buffer(s) + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + IMG_UINT32 i; + + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i]) + { + if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); + psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); + psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL; + } + } +} + +/*************************************************************************/ /*! +@Function RGXTraceBufferInitOnDemandResources + +@Description Allocates the firmware trace buffer required for dumping trace + info from the firmware. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_FLAGS_T uiAllocFlags) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32FwThreadNum; + IMG_UINT32 ui32DefaultTraceBufSize; + IMG_DEVMEM_SIZE_T uiTraceBufSizeInBytes; + void *pvAppHintState = NULL; + IMG_CHAR pszBufferName[] = "FwTraceBuffer_Thread0"; + + /* Check AppHint value for module-param FWTraceBufSizeInDWords */ + OSCreateKMAppHintState(&pvAppHintState); + ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; + OSGetKMAppHintUINT32(pvAppHintState, + FWTraceBufSizeInDWords, + &ui32DefaultTraceBufSize, + &psTraceBufCtl->ui32TraceBufSizeInDWords); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + + for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++) + { +#if !defined(SUPPORT_AUTOVZ) + /* Ensure allocation API is only called when not already allocated */ + PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL); + /* Firmware address should not be already set */ + PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0); +#endif + + /* update the firmware thread number in the Trace Buffer's name */ + pszBufferName[sizeof(pszBufferName) - 2] += ui32FwThreadNum; + + eError = RGXSetupFwAllocation(psDevInfo, + uiAllocFlags, + uiTraceBufSizeInBytes, + pszBufferName, + &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum], + &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer, + (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + } + + return PVRSRV_OK; + +fail: + RGXTraceBufferDeinit(psDevInfo); + return eError; +} + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function RGXPDumpLoadFWInitData + +@Description Allocates the firmware trace buffer required for dumping trace + info from the firmware. + +@Input psDevInfo RGX device info + */ /*************************************************************************/ +static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_BOOL bEnableSignatureChecks) +{ + IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; + IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; + + PDUMPCOMMENT("Dump RGXFW Init data"); + if (!bEnableSignatureChecks) + { + PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, asSigBufCtl), + sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX), + PDUMP_FLAGS_CONTINUOUS); + } + + PDUMPCOMMENT("Dump initial state of FW runtime configuration"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + 0, + sizeof(RGXFWIF_RUNTIME_CFG), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw hwperfctl structure"); + DevmemPDumpLoadZeroMem(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, + 0, + ui32HWPerfCountersDataSize, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw trace control structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + 0, + sizeof(RGXFWIF_TRACEBUF), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump firmware system data structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwSysDataMemDesc, + 0, + sizeof(RGXFWIF_SYSDATA), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump firmware OS data structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwOsDataMemDesc, + 0, + sizeof(RGXFWIF_OSDATA), + PDUMP_FLAGS_CONTINUOUS); + +#if defined(SUPPORT_TBI_INTERFACE) + PDUMPCOMMENT("Dump rgx TBI buffer"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTBIBufferMemDesc, + 0, + psDevInfo->ui32FWIfTBIBufferSize, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_TBI_INTERFACE) */ + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PDUMPCOMMENT("Dump rgxfw register configuration buffer"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRegCfgMemDesc, + 0, + sizeof(RGXFWIF_REG_CFG), + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_USER_REGISTER_CONFIGURATION) */ + PDUMPCOMMENT("Dump rgxfw system init structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, + 0, + sizeof(RGXFWIF_SYSINIT), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw os init structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOsInitMemDesc, + 0, + sizeof(RGXFWIF_OSINIT), + PDUMP_FLAGS_CONTINUOUS); + + /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */ + PDUMPCOMMENT("Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address"); + RGXPDumpFaultReadRegister(psDevInfo); + + PDUMPCOMMENT("RTCONF: run-time configuration"); + + + /* Dump the config options so they can be edited. + * + */ + PDUMPCOMMENT("(Set the FW system config options here)"); + PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND); + PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN); + PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN); + PDUMPCOMMENT("( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN); + PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST); + PDUMPCOMMENT("( Enable HWR: 0x%08x)", RGXFWIF_INICFG_HWR_EN); + PDUMPCOMMENT("( FBCDC Version 3.1 Enable: 0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN); + PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN); + PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN); + PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, VDM_OBJECT_LEVEL_LLS)) + { + PDUMPCOMMENT("( Ctx Switch Object mode Index: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX); + PDUMPCOMMENT("( Ctx Switch Object mode Instance: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE); + PDUMPCOMMENT("( Ctx Switch Object mode List: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST); + } + + PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN); + PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY); + PDUMPCOMMENT("( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER); + PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN); + PDUMPCOMMENT("( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK); + PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP); + PDUMPCOMMENT("( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER); + PDUMPCOMMENT("( Enable coherent memory accesses: 0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED); + PDUMPCOMMENT("( Enable IRQ validation: 0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ); + PDUMPCOMMENT("( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN); +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + PDUMPCOMMENT("( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); +#if defined(SUPPORT_PDVFS) + PDUMPCOMMENT("( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); +#endif /* defined(SUPPORT_PDVFS) */ +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + PDUMPCOMMENT("( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK); + PDUMPCOMMENT("( ISP Scheduling Mode (v1=b'01, v2=b'10): 0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK); + PDUMPCOMMENT("( Validate SOC & USC timers: 0x%08x)", RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ui32ConfigFlags), + ui32ConfigFlags, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("( Extended FW system config options not used.)"); + + PDUMPCOMMENT("(Set the FW OS config options here)"); + PDUMPCOMMENT("( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TA_EN); + PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); + PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); + PDUMPCOMMENT("( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); + PDUMPCOMMENT("( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TA); + PDUMPCOMMENT("( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); + PDUMPCOMMENT("( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc, + offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags), + ui32FwOsCfgFlags, + PDUMP_FLAGS_CONTINUOUS); + + +#if defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENT("(Select one or more security tests here)"); + PDUMPCOMMENT("( Read/write FW private data from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA); + PDUMPCOMMENT("( Read/write FW code from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE); + PDUMPCOMMENT("( Execute FW code from non-secure memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE); + PDUMPCOMMENT("( Execute FW code from secure (non-FW) memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags), + psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags, + PDUMP_FLAGS_CONTINUOUS); +#endif + + PDUMPCOMMENT("( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)", + RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, + RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode), + psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))", + RGXFWIF_PID_FILTER_MAX_NUM_PIDS); + { + IMG_UINT32 i; + + /* generate a few WRWs in the pdump stream as an example */ + for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++) + { + /* + * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler output is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiPIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID); + + const IMG_DEVMEM_OFFSET_T uiOSIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID); + + PDUMPCOMMENT("(PID and OSID pair %u)", i); + + PDUMPCOMMENT("(PID)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + uiPIDOff, + 0, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("(OSID)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + uiOSIDOff, + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } + + /* + * Dump the log config so it can be edited. + */ + PDUMPCOMMENT("(Set the log config here)"); + PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)"); + PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN); + PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS); + PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP); + PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW); + PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF); + PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM); + PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD); + PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM); + PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW); + PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR); + PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); + } + + PDUMPCOMMENT("( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC); + PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, ui32LogType), + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Set the HWPerf Filter config here"); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, ui64HWPerfFilter), + psDevInfo->psRGXFWIfSysInit->ui64HWPerfFilter, + PDUMP_FLAGS_CONTINUOUS); + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PDUMPCOMMENT("(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))", + RGXFWIF_REG_CFG_TYPE_PWR_ON, + RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, + RGXFWIF_REG_CFG_TYPE_TA, + RGXFWIF_REG_CFG_TYPE_3D, + RGXFWIF_REG_CFG_TYPE_CDM, + RGXFWIF_REG_CFG_TYPE_TLA, + RGXFWIF_REG_CFG_TYPE_TDM); + + { + IMG_UINT32 i; + + /* Write 32 bits in each iteration as required by PDUMP WRW command */ + for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32)) + { + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]), + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } + + PDUMPCOMMENT("(Set registers here: address, mask, value)"); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr), + 0, + PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask), + 0, + PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value), + 0, + PDUMP_FLAGS_CONTINUOUS); +#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */ +} +#endif /* defined(PDUMP) */ + +/*! +******************************************************************************* + @Function RGXSetupFwSysData + + @Description Setups all system-wide firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 *pui32TPUTrilinearFracMask, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; + + psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch)); + PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail); + + /* Sys Fw init data */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSINIT), + "FwSysInitStructure", + &psDevInfo->psRGXFWIfSysInitMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfSysInit, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail); + + /* Setup Fault read register */ + eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch); + PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail); + + /* RD Power Island */ + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland; + IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || + (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); + + ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; +#if defined(SUPPORT_PDVFS) + { + RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; + + /* Pro-active DVFS depends on Workload Estimation */ + psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); + + if (psDVFSDeviceCfg->pasOPPTable != NULL) + { + if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OPP Table too large: Size = %u, Maximum size = %lu", + __func__, + psDVFSDeviceCfg->ui32OPPTableSize, + (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail; + } + + OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues, + psDVFSDeviceCfg->pasOPPTable, + sizeof(psPDVFSOPPInfo->asOPPValues)); + + psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; + + ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS; + } + } +#endif /* defined(SUPPORT_PDVFS) */ +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + + /* FW trace control structure */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWINITDATA_WC_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_TRACEBUF), + "FwTraceCtlStruct", + &psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + &psFwSysInitScratch->sTraceBufCtl, + (void**) &psDevInfo->psRGXFWIfTraceBufCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* Set initial firmware log type/group(s) */ + if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid initial log type (0x%X)", + __func__, ui32LogType)); + goto fail; + } + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType; + } + + /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource + * (irrespective of loggroup(s) enabled), given that logtype/loggroups can + * be set during PDump playback in logconfig, at any point of time, + * Otherwise, allocate only if required. */ +#if !defined(PDUMP) +#if defined(SUPPORT_AUTOVZ) + /* always allocate trace buffer for AutoVz Host drivers to allow + * deterministic addresses of all SysData structures */ + if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo))) +#else + if (RGXTraceBufferIsInitRequired(psDevInfo)) +#endif +#endif + { + eError = RGXTraceBufferInitOnDemandResources(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp)); + } + PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSDATA), + "FwSysData", + &psDevInfo->psRGXFWIfFwSysDataMemDesc, + &psFwSysInitScratch->sFwSysData, + (void**) &psDevInfo->psRGXFWIfFwSysData, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* GPIO validation setup */ + psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF; +#if defined(SUPPORT_VALIDATION) + { + IMG_INT32 ui32AppHintDefault; + IMG_INT32 ui32GPIOValidationMode; + void *pvAppHintState = NULL; + + /* Check AppHint for GPIO validation mode */ + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE; + OSGetKMAppHintUINT32(pvAppHintState, + GPIOValidationMode, + &ui32AppHintDefault, + &ui32GPIOValidationMode); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.", + __func__, + ui32GPIOValidationMode, + RGXFWIF_GPIO_VAL_LAST)); + } + else + { + psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode; + } + + psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode; + } +#endif + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + eError = RGXFWSetupCounterBuffer(psDevInfo, + &psDevInfo->psCounterBufferMemDesc, + PAGE_SIZE, + &psFwSysInitScratch->sCounterDumpCtl, + "CounterBuffer"); + PVR_LOG_GOTO_IF_ERROR(eError, "Counter Buffer allocation", fail); +#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ + +#if defined(SUPPORT_FIRMWARE_GCOV) + eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo, + &psDevInfo->psFirmwareGcovBufferMemDesc, + RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE, + &psFwSysInitScratch->sFirmwareGcovCtl, + "FirmwareGcovBuffer"); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail); + psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE; +#endif /* defined(SUPPORT_FIRMWARE_GCOV) */ + +#if defined(PDUMP) + /* Require a minimum amount of memory for the signature buffers */ + if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN) + { + ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN; + } + + /* Setup Signature and Checksum Buffers for TA and 3D */ + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTAChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM], + "TA"); + PVR_LOG_GOTO_IF_ERROR(eError, "TA Signature check setup", fail); + psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize; + + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSig3DChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D], + "3D"); + PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail); + psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + /* Buffer allocated only when feature present because, all known TDM + * signature registers are dependent on this feature being present */ + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTDM2DChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM], + "TDM"); + PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); + psDevInfo->ui32SigTDM2DChecksSize = ui32SignatureChecksBufSize; + } + else + { + psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; + psDevInfo->ui32SigTDM2DChecksSize = 0; + } +#endif + + if (!bEnableSignatureChecks) + { + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0; + } + + eError = RGXFWSetupAlignChecks(psDeviceNode, + &psFwSysInitScratch->sAlignChecks, + pui32RGXFWAlignChecks, + ui32RGXFWAlignChecksArrLength); + PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail); + + psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags; + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + /* Fill the remaining bits of fw the init data */ + psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE; + psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE; + } + else + { + /* Fill the remaining bits of fw the init data */ + psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE; + psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE; + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask; + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) + { + eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); + PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); + } + +#if defined(SUPPORT_PDVFS) + /* Core clock rate */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(IMG_UINT32), + "FwPDVFSCoreClkRate", + &psDevInfo->psRGXFWIFCoreClkRateMemDesc, + &psFwSysInitScratch->sCoreClockRate, + (void**) &psDevInfo->pui32RGXFWIFCoreClkRate, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail); +#endif + +#if defined(SUPPORT_TBI_INTERFACE) +#if !defined(PDUMP) + /* allocate only if required */ + if (RGXTBIBufferIsInitRequired(psDevInfo)) +#endif /* !defined(PDUMP) */ + { + /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource + * (irrespective of loggroup(s) enabled), given that logtype/loggroups + * can be set during PDump playback in logconfig, at any point of time + */ + eError = RGXTBIBufferInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail); + } + + psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer; +#endif /* defined(SUPPORT_TBI_INTERFACE) */ + + /* Allocate shared buffer for GPU utilisation */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_GPU_UTIL_FWCB), + "FwGPUUtilisationBuffer", + &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc, + &psFwSysInitScratch->sGpuUtilFWCbCtl, + (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_RUNTIME_CFG), + "FwRuntimeCfg", + &psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + &psFwSysInitScratch->sRuntimeCfg, + (void**) &psDevInfo->psRGXFWIfRuntimeCfg, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware runtime configuration memory allocation", fail); + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_REG_CFG), + "FwRegisterConfigStructure", + &psDevInfo->psRGXFWIfRegCfgMemDesc, + &psFwSysInitScratch->sRegCfg, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail); +#endif + + psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB); + /* Second stage initialisation or HWPerf, hHWPerfLock created in first + * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */ + if (psDevInfo->ui64HWPerfFilter == 0) + { + psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter; + psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter; + } + else + { + /* The filter has already been modified. This can happen if + * pvr/apphint/EnableFTraceGPU was enabled. */ + psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter; + } + +#if !defined(PDUMP) + /* Allocate if HWPerf filter has already been set. This is possible either + * by setting a proper AppHint or enabling GPU ftrace events. */ + if (psDevInfo->ui64HWPerfFilter != 0) +#endif + { + /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources + * (irrespective of HWPerf enabled or not), given that HWPerf can be + * enabled during PDump playback via RTCONF at any point of time. */ + eError = RGXHWPerfInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail); + } + + RGXHWPerfInitAppHintCallbacks(psDeviceNode); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWINITDATA_WC_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + ui32HWPerfCountersDataSize, + "FwHWPerfControlStructure", + &psDevInfo->psRGXFWIfHWPerfCountersMemDesc, + &psFwSysInitScratch->sHWPerfCtl, + NULL, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail); + + psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN) + ? IMG_FALSE : IMG_TRUE; + + psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf; + +#if defined(PDUMP) + /* default: no filter */ + psFwSysInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT; + psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0; +#endif + +#if defined(SUPPORT_VALIDATION) + { + IMG_UINT32 dm; + + /* TPU trilinear rounding mask override */ + for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++) + { + psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm]; + } + } +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENT("Allocate non-secure buffer for security validation test"); + eError = DevmemFwAllocateExportable(psDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + RGX_FWSHAREDMEM_ALLOCFLAGS, + "FwExNonSecureBuffer", + &psDevInfo->psRGXFWIfNonSecureBufMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail); + + eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbNonSecureBuffer, + psDevInfo->psRGXFWIfNonSecureBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail); + + PDUMPCOMMENT("Allocate secure buffer for security validation test"); + eError = DevmemFwAllocateExportable(psDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_VAL_SECURE_BUFFER, + "FwExSecureBuffer", + &psDevInfo->psRGXFWIfSecureBufMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "Secure buffer allocation", fail); + + eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbSecureBuffer, + psDevInfo->psRGXFWIfSecureBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail); +#endif /* SUPPORT_SECURITY_VALIDATION */ + + /* Initialize FW started flag */ + psFwSysInitScratch->bFirmwareStarted = IMG_FALSE; + psFwSysInitScratch->ui32MarkerVal = 1; + + if (!psDeviceNode->bAutoVzFwIsUp) + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + + /* Required info by FW to calculate the ActivePM idle timer latency */ + psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + psFwSysInitScratch->ui32ActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms; + + /* Initialise variable runtime configuration to the system defaults */ + psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed; + psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32ActivePMLatencyms; + psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE; + + /* Initialize the DefaultDustsNumInit Field to Max Dusts */ + psRuntimeCfg->ui32DefaultDustsNumInit = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; + + /* Setup FW coremem data */ + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + IMG_BOOL bMetaDMA = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA); + + psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; + + if (bMetaDMA) + { + RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore, + psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + &psFwSysInitScratch->sCorememDataStore.pbyFWAddr, + 0); + } + } + + psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL; + psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL; + + /* Initialise GPU utilisation buffer */ + psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord = + RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE); + + /* init HWPERF data */ + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0; + psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0; + psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0; +#if defined(SUPPORT_POWMON_COMPONENT) + psDevInfo->psRGXFWIfFwSysData->ui32PowMonEstimate = 0; +#endif + + /*Send through the BVNC Feature Flags*/ + eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail); + + /* populate the real FwOsInit structure with the values stored in the scratch copy */ + OSDeviceMemCopy(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT)); + } + + OSFreeMem(psFwSysInitScratch); + + return PVRSRV_OK; + +fail: + if (psFwSysInitScratch) + { + OSFreeMem(psFwSysInitScratch); + } + + RGXFreeFwSysData(psDevInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*! +******************************************************************************* + @Function RGXSetupFwOsData + + @Description Sets up all os-specific firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32FwOsCfgFlags) +{ + PVRSRV_ERROR eError; + RGXFWIF_OSINIT sFwOsInitScratch; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32kCCBSize = (!PVRSRV_VZ_MODE_IS(NATIVE) && + !(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK)) ? + (RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE) : (RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT); + + OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT)); + + /* Memory tracking the connection state should be non-volatile and + * is not cleared on allocation to prevent loss of pre-reset information */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG) + & (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC), + sizeof(RGXFWIF_CONNECTION_CTL), + "FwConnectionCtl", + &psDevInfo->psRGXFWIfConnectionCtlMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfConnectionCtl, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Connection Control structure allocation", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG), + sizeof(RGXFWIF_OSINIT), + "FwOsInitStructure", + &psDevInfo->psRGXFWIfOsInitMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfOsInit, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Os Init structure allocation", fail); + + /* init HWR frame info */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + sizeof(RGXFWIF_HWRINFOBUF), + "FwHWRInfoBuffer", + &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc, + &sFwOsInitScratch.sRGXFWIfHWRInfoBufCtl, + (void**) &psDevInfo->psRGXFWIfHWRInfoBufCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail); + + /* Might be uncached. Be conservative and use a DeviceMemSet */ + OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF)); + + /* Allocate a sync for power management */ + eError = SyncPrimContextCreate(psDevInfo->psDeviceNode, + &psDevInfo->hSyncPrimContext); + PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive context allocation", fail); + + eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack"); + PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive allocation", fail); + + /* Set up kernel CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlMemDesc, + &psDevInfo->psKernelCCB, + &psDevInfo->psKernelCCBMemDesc, + &sFwOsInitScratch.psKernelCCBCtl, + &sFwOsInitScratch.psKernelCCB, + ui32kCCBSize, + sizeof(RGXFWIF_KCCB_CMD), + (RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + "FwKernelCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB allocation", fail); + + /* KCCB additionally uses a return slot array for FW to be able to send back + * return codes for each required command + */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + (1U << ui32kCCBSize) * sizeof(IMG_UINT32), + "FwKernelCCBRtnSlots", + &psDevInfo->psKernelCCBRtnSlotsMemDesc, + &sFwOsInitScratch.psKernelCCBRtnSlots, + (void**) &psDevInfo->pui32KernelCCBRtnSlots, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB return slot array allocation", fail); + + /* Set up firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlMemDesc, + &psDevInfo->psFirmwareCCB, + &psDevInfo->psFirmwareCCBMemDesc, + &sFwOsInitScratch.psFirmwareCCBCtl, + &sFwOsInitScratch.psFirmwareCCB, + RGXFWIF_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware CCB allocation", fail); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Set up checkpoint CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psCheckpointCCBCtl, + &psDevInfo->psCheckpointCCBCtlMemDesc, + &psDevInfo->psCheckpointCCB, + &psDevInfo->psCheckpointCCBMemDesc, + &sFwOsInitScratch.psCheckpointCCBCtl, + &sFwOsInitScratch.psCheckpointCCB, + RGXFWIF_CHECKPOINTCCB_NUMCMDS_LOG2, + sizeof(PRGXFWIF_UFO_ADDR), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwChkptCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Checkpoint CCB allocation", fail); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + sizeof(RGXFWIF_OSDATA), + "FwOsData", + &psDevInfo->psRGXFWIfFwOsDataMemDesc, + &sFwOsInitScratch.sFwOsData, + (void**) &psDevInfo->psRGXFWIfFwOsData, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags = ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_ALL; + + eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psDevInfo->psRGXFWIfFwOsData->sPowerSync.ui32Addr); + PVR_LOG_GOTO_IF_ERROR(eError, "Get Sync Prim FW address", fail); + + sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Set up Workload Estimation firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc, + &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, + &sFwOsInitScratch.psWorkEstFirmwareCCB, + RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_WORKEST_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwWEstCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + + /* Initialise the compatibility check data */ + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); + + /* populate the real FwOsInit structure with the values stored in the scratch copy */ + OSDeviceMemCopy(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT)); + +#if defined(SUPPORT_AUTOVZ) && defined(SUPPORT_AUTOVZ_HW_REGS) + /* if hardware registers are used to store connection states, + * these can only be accessed if the GPU is powered up */ + if (PVRSRV_VZ_MODE_IS(HOST) && (psDeviceNode->bAutoVzFwIsUp)) +#endif /* defined(SUPPORT_AUTOVZ) && defined(SUPPORT_AUTOVZ_HW_REGS)*/ + { + KM_SET_OS_CONNECTION(READY, psDevInfo); + } + + return PVRSRV_OK; + +fail: + RGXFreeFwOsData(psDevInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*! +******************************************************************************* + @Function RGXSetupFirmware + + @Description Setups all firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 *pui32TPUTrilinearFracMask, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = RGXSetupFwOsData(psDeviceNode, ui32HWRDebugDumpLimit, ui32FwOsCfgFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest drivers do not configure system-wide firmware data */ + psDevInfo->psRGXFWIfSysInit = NULL; + } + else + { + /* Native and Host drivers must initialise the firmware's system data */ + eError = RGXSetupFwSysData(psDeviceNode, + bEnableSignatureChecks, + ui32SignatureChecksBufSize, + ui32HWPerfFWBufSizeKB, + ui64HWPerfFilter, + ui32RGXFWAlignChecksArrLength, + pui32RGXFWAlignChecks, + ui32ConfigFlags, + ui32ConfigFlagsExt, + ui32LogType, + ui32FilterFlags, + ui32JonesDisableMask, + ui32HWPerfCountersDataSize, + pui32TPUTrilinearFracMask, + eRGXRDPowerIslandConf, + eFirmwarePerf); + PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail); + } + + psDevInfo->bFirmwareInitialised = IMG_TRUE; + +#if defined(PDUMP) + RGXPDumpLoadFWInitData(psDevInfo, + ui32HWPerfCountersDataSize, + bEnableSignatureChecks); +#endif /* PDUMP */ + +fail: + return eError; +} + +/*! +******************************************************************************* + @Function RGXFreeFwSysData + + @Description Frees all system-wide firmware related data + + @Input psDevInfo +******************************************************************************/ +static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + psDevInfo->bFirmwareInitialised = IMG_FALSE; + + if (psDevInfo->psRGXFWAlignChecksMemDesc) + { + RGXFWFreeAlignChecks(psDevInfo); + } + +#if defined(PDUMP) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM) && + psDevInfo->psRGXFWSigTDM2DChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDM2DChecksMemDesc); + psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSigTAChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc); + psDevInfo->psRGXFWSigTAChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSig3DChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc); + psDevInfo->psRGXFWSig3DChecksMemDesc = NULL; + } +#endif + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + if (psDevInfo->psCounterBufferMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCounterBufferMemDesc); + psDevInfo->psCounterBufferMemDesc = NULL; + } +#endif + +#if defined(SUPPORT_FIRMWARE_GCOV) + if (psDevInfo->psFirmwareGcovBufferMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc); + psDevInfo->psFirmwareGcovBufferMemDesc = NULL; + } +#endif + + RGXSetupFaultReadRegisterRollback(psDevInfo); + + if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc) + { + if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCb = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL; + } + + RGXHWPerfDeinit(psDevInfo); + + if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc) + { + if (psDevInfo->psRGXFWIfRuntimeCfg != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc); + psDevInfo->psRGXFWIfRuntimeCfg = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc); + psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc) + { + if (psDevInfo->psRGXFWIfTraceBufCtl != NULL) + { + /* first deinit/free the tracebuffer allocation */ + RGXTraceBufferDeinit(psDevInfo); + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc); + psDevInfo->psRGXFWIfTraceBufCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc); + psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfFwSysDataMemDesc) + { + if (psDevInfo->psRGXFWIfFwSysData != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); + psDevInfo->psRGXFWIfFwSysData = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwSysDataMemDesc); + psDevInfo->psRGXFWIfFwSysDataMemDesc = NULL; + } + +#if defined(SUPPORT_TBI_INTERFACE) + if (psDevInfo->psRGXFWIfTBIBufferMemDesc) + { + RGXTBIBufferDeinit(psDevInfo); + } +#endif + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + if (psDevInfo->psRGXFWIfRegCfgMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc); + psDevInfo->psRGXFWIfRegCfgMemDesc = NULL; + } +#endif + if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc) + { + RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL; + } + +#if defined(SUPPORT_SECURITY_VALIDATION) + if (psDevInfo->psRGXFWIfNonSecureBufMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfNonSecureBufMemDesc); + psDevInfo->psRGXFWIfNonSecureBufMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfSecureBufMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSecureBufMemDesc); + psDevInfo->psRGXFWIfSecureBufMemDesc = NULL; + } +#endif + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) + { + _FreeSLC3Fence(psDevInfo); + } + +#if defined(SUPPORT_PDVFS) + if (psDevInfo->psRGXFWIFCoreClkRateMemDesc) + { + if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc); + psDevInfo->pui32RGXFWIFCoreClkRate = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc); + psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL; + } +#endif +} + +/*! +******************************************************************************* + @Function RGXFreeFwOsData + + @Description Frees all os-specific firmware related data + + @Input psDevInfo +******************************************************************************/ +static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFreeCCBReturnSlots(psDevInfo, + &psDevInfo->pui32KernelCCBRtnSlots, + &psDevInfo->psKernelCCBRtnSlotsMemDesc); + RGXFreeCCB(psDevInfo, + &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlMemDesc, + &psDevInfo->psKernelCCB, + &psDevInfo->psKernelCCBMemDesc); + + RGXFreeCCB(psDevInfo, + &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlMemDesc, + &psDevInfo->psFirmwareCCB, + &psDevInfo->psFirmwareCCBMemDesc); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + RGXFreeCCB(psDevInfo, + &psDevInfo->psCheckpointCCBCtl, + &psDevInfo->psCheckpointCCBCtlMemDesc, + &psDevInfo->psCheckpointCCB, + &psDevInfo->psCheckpointCCBMemDesc); +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFreeCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc); +#endif + + if (psDevInfo->psPowSyncPrim != NULL) + { + SyncPrimFree(psDevInfo->psPowSyncPrim); + psDevInfo->psPowSyncPrim = NULL; + } + + if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL) + { + SyncPrimContextDestroy(psDevInfo->hSyncPrimContext); + psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL; + } + + if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc) + { + if (psDevInfo->psRGXFWIfHWRInfoBufCtl != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); + psDevInfo->psRGXFWIfHWRInfoBufCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); + psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfFwOsDataMemDesc) + { + if (psDevInfo->psRGXFWIfFwOsData != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwOsDataMemDesc); + psDevInfo->psRGXFWIfFwOsData = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwOsDataMemDesc); + psDevInfo->psRGXFWIfFwOsDataMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXFreeFirmware + + @Description Frees all the firmware-related allocations + + @Input psDevInfo +******************************************************************************/ +void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFreeFwOsData(psDevInfo); + + if (psDevInfo->psRGXFWIfConnectionCtl) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfConnectionCtlMemDesc); + psDevInfo->psRGXFWIfConnectionCtl = NULL; + } + + if (psDevInfo->psRGXFWIfConnectionCtlMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfConnectionCtlMemDesc); + psDevInfo->psRGXFWIfConnectionCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfOsInit) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOsInitMemDesc); + psDevInfo->psRGXFWIfOsInit = NULL; + } + + if (psDevInfo->psRGXFWIfOsInitMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfOsInitMemDesc); + psDevInfo->psRGXFWIfOsInitMemDesc = NULL; + } + + RGXFreeFwSysData(psDevInfo); + if (psDevInfo->psRGXFWIfSysInit) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfSysInitMemDesc); + psDevInfo->psRGXFWIfSysInit = NULL; + } + + if (psDevInfo->psRGXFWIfSysInitMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSysInitMemDesc); + psDevInfo->psRGXFWIfSysInitMemDesc = NULL; + } +} + +/****************************************************************************** + FUNCTION : RGXAcquireKernelCCBSlot + + PURPOSE : Attempts to obtain a slot in the Kernel CCB + + PARAMETERS : psCCB - the CCB + : Address of space if available, NULL otherwise + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc, + RGXFWIF_CCB_CTL *psKCCBCtl, + IMG_UINT32 *pui32Offset) +{ + IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + + ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + + /* + * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1 + * executing kick), hence the kernel CCB should not queue more than + * 254 commands. + */ + PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255); + +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset); + DevmemPDumpCBP(psKCCBCtrlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), + ui32NextWriteOffset, + 1, + (psKCCBCtl->ui32WrapMask + 1)); +#endif + + if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } + *pui32Offset = ui32NextWriteOffset; + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : RGXPollKernelCCBSlot + + PURPOSE : Poll for space in Kernel CCB + + PARAMETERS : psCCB - the CCB + : Address of space if available, NULL otherwise + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXPollKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc, + RGXFWIF_CCB_CTL *psKCCBCtl) +{ + IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + + ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + + /* + * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1 + * executing kick), hence the kernel CCB should not queue more than + * 254 commands. + */ + PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + + if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset) + { + return PVRSRV_OK; + } + { + /* + * The following sanity check doesn't impact performance, + * since the CPU has to wait for the GPU anyway (full kernel CCB). + */ + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return PVRSRV_ERROR_KERNEL_CCB_FULL; +} + +/****************************************************************************** + FUNCTION : RGXGetCmdMemCopySize + + PURPOSE : Calculates actual size of KCCB command getting used + + PARAMETERS : eCmdType Type of KCCB command + + RETURNS : Returns actual size of KCCB command on success else zero +******************************************************************************/ +static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) +{ + /* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD + * This will account alignment requirement of uCmdData union + * + * Then add command-data size depending on command type to calculate actual + * command size required to do mem copy + * + * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct. + */ + switch (eCmdType) + { + case RGXFWIF_KCCB_CMD_KICK: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA); + } + case RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA); + } + case RGXFWIF_KCCB_CMD_MMUCACHE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA); + } +#if defined(SUPPORT_USC_BREAKPOINT) + case RGXFWIF_KCCB_CMD_BP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA); + } +#endif + case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA); + } + case RGXFWIF_KCCB_CMD_CLEANUP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST); + } + case RGXFWIF_KCCB_CMD_POW: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST); + } + case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE: + case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA); + } + case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA); + } + case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA); + } + case RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SIGNAL_UPDATE_DATA); + } + case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA); + } + case RGXFWIF_KCCB_CMD_FORCE_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA); + } +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + case RGXFWIF_KCCB_CMD_REGCONFIG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA); + } +#endif + case RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS); + } +#if defined(SUPPORT_PDVFS) + case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA); + } +#endif + case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OSID_PRIORITY_DATA); + } + case RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HCS_CTL); + } + case RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OSID_ISOLATION_GROUP_DATA); + } + case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA); + } + case RGXFWIF_KCCB_CMD_COUNTER_DUMP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_COUNTER_DUMP_DATA); + } + case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL); + } + case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS); + } + case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS); + } + case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA); + } + case RGXFWIF_KCCB_CMD_HEALTH_CHECK: + case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT: + case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE: + case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL: + { + /* No command specific data */ + return offsetof(RGXFWIF_KCCB_CMD, uCmdData); + } + case RGXFWIF_KCCB_CMD_PHR_CFG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_PHR_CFG_DATA); + } + default: + { + /* Invalid (OR) Unused (OR) Newly added command type */ + return 0; /* Error */ + } + } +} + +PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SlotNum, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVWaitForValueKM( + (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM"); + +#if defined(PDUMP) + /* PDumping conditions same as RGXSendCommandRaw for the actual command and poll command to go in harmony */ + if (PDumpIsContCaptureOn()) + { + IMG_BOOL bIsInCaptureRange; + + PDumpIsCaptureFrameKM(&bIsInCaptureRange); + + if ((bIsInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)) && !PDUMPPOWCMDINTRANS()) + { + PDUMPCOMMENT("Poll on KCCB slot %u for value %u (mask: 0x%x)", ui32SlotNum, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32SlotNum * sizeof(IMG_UINT32), + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); + } + } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + + return eError; +} + +static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB; + IMG_UINT32 ui32NewWriteOffset; + IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + IMG_UINT32 ui32CmdMemCopySize; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(uiPdumpFlags); +#else + IMG_BOOL bPdumpEnabled = IMG_FALSE; + IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS(); + IMG_BOOL bContCaptureOn = PDumpIsContCaptureOn(); /* client connected or in pdump init phase */ + + if (bContCaptureOn) + { + IMG_BOOL bIsInCaptureRange; + + PDumpIsCaptureFrameKM(&bIsInCaptureRange); + bPdumpEnabled = (bIsInCaptureRange || PDUMP_IS_CONTINUOUS(uiPdumpFlags)) && !bPDumpPowTrans; + + /* in capture range */ + if (bPdumpEnabled) + { + if (!psDevInfo->bDumpedKCCBCtlAlready) + { + /* entering capture range */ + psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE; + + /* Wait for the live FW to catch up */ + PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d", + __func__, + psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset)); + PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, + ui32OldWriteOffset, 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP); + + /* Dump Init state of Kernel CCB control (read and write offset) */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control, roff: %d, woff: %d", + psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset); + + DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, + 0, + sizeof(RGXFWIF_CCB_CTL), + PDUMP_FLAGS_CONTINUOUS); + } + } + } +#endif + +#if defined(SUPPORT_AUTOVZ) + if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) || + (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" + "driver state = %u / firmware state = %u;" + "expected READY (%u/%u) or ACTIVE (%u/%u);", + __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), + RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, + RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE)); + eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; + goto _RGXSendCommandRaw_Exit; + } +#endif + + PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize); + if (!OSLockIsLocked(psDeviceNode->hPowerLock)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s called without power lock held!", + __func__)); + PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); + } + + /* Acquire a slot in the CCB */ + eError = RGXAcquireKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psKCCBCtl, &ui32NewWriteOffset); + if (eError != PVRSRV_OK) + { + goto _RGXSendCommandRaw_Exit; + } + + /* Calculate actual size of command to optimize device mem copy */ + ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType); + PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND); + + /* Copy the command into the CCB */ + OSDeviceMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize], + psKCCBCmd, ui32CmdMemCopySize); + + /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */ + if (pui32CmdKCCBSlot) + { + *pui32CmdKCCBSlot = ui32OldWriteOffset; + + /* Each such command enqueue needs to reset the slot value first. This is so that a caller + * doesn't get to see stale/false value in allotted slot */ + psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset] = RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE; +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Reset kCCB slot number %u", ui32OldWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32OldWriteOffset * sizeof(IMG_UINT32), + sizeof(IMG_UINT32), + uiPdumpFlags); +#endif + PVR_DPF((PVR_DBG_MESSAGE, "%s: Device (%p) KCCB slot %u reset with value %u for command type %u", + __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType)); + } + + /* ensure kCCB data is written before the offsets */ + OSWriteMemoryBarrier(); + + /* Move past the current command */ + psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset; + /* Force a read-back to memory to avoid posted writes on certain buses */ + (void) psKCCBCtl->ui32WriteOffset; + + +#if defined(PDUMP) + if (bContCaptureOn) + { + /* in capture range */ + if (bPdumpEnabled) + { + /* Dump new Kernel CCB content */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB cmd woff = %d", + ui32OldWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc, + ui32OldWriteOffset * psKCCBCtl->ui32CmdSize, + ui32CmdMemCopySize, + PDUMP_FLAGS_CONTINUOUS); + + /* Dump new kernel CCB write offset */ + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl woff: %d", + ui32NewWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), + sizeof(IMG_UINT32), + uiPdumpFlags); + + /* mimic the read-back of the write from above */ + DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), + ui32NewWriteOffset, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, + uiPdumpFlags); + + } + /* out of capture range */ + else + { + eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit); + } + } +#endif + + + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB"); + /* + * Kick the MTS to schedule the firmware. + */ + __MTSScheduleWrite(psDevInfo, MTS_SCHEDULE_DM_VAL & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK); + + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, MTS_SCHEDULE_DM_VAL & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, uiPdumpFlags); + +#if defined(NO_HARDWARE) + /* keep the roff updated because fw isn't there to update it */ + psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset; +#endif + +_RGXSendCommandRaw_Exit: + return eError; +} + +/****************************************************************************** + FUNCTION : _AllocDeferredCommand + + PURPOSE : Allocate a KCCB command and add it to KCCB deferred list + + PARAMETERS : psDevInfo RGX device info + : eKCCBType Firmware Command type + : psKCCBCmd Firmware Command + : uiPdumpFlags Pdump flags + + RETURNS : PVRSRV_OK If all went good, PVRSRV_ERROR_RETRY otherwise. +******************************************************************************/ +static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags) +{ + RGX_DEFERRED_KCCB_CMD *psDeferredCommand; + OS_SPINLOCK_FLAGS uiFlags; + + psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand)); + + if (!psDeferredCommand) + { + PVR_DPF((PVR_DBG_ERROR, + "Deferring a KCCB command failed: allocation failure: requesting retry")); + return PVRSRV_ERROR_RETRY; + } + + psDeferredCommand->sKCCBcmd = *psKCCBCmd; + psDeferredCommand->uiPdumpFlags = uiPdumpFlags; + psDeferredCommand->psDevInfo = psDevInfo; + + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode)); + psDevInfo->ui32KCCBDeferredCommandsCount++; + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : _FreeDeferredCommand + + PURPOSE : Remove from the deferred list the sent deferred KCCB command + + PARAMETERS : psNode Node in deferred list + : psDeferredKCCBCmd KCCB Command to free + + RETURNS : None +******************************************************************************/ +static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd) +{ + dllist_remove_node(psNode); + psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--; + OSFreeMem(psDeferredKCCBCmd); +} + +/****************************************************************************** + FUNCTION : RGXSendCommandsFromDeferredList + + PURPOSE : Try send KCCB commands in deferred list to KCCB + Should be called by holding PowerLock + + PARAMETERS : psDevInfo RGX device info + : bPoll Poll for space in KCCB + + RETURNS : PVRSRV_OK If all commands in deferred list are sent to KCCB, + PVRSRV_ERROR_KERNEL_CCB_FULL otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psNode, *psNext; + RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd; + DLLIST_NODE sCommandList; + OS_SPINLOCK_FLAGS uiFlags; + + PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDevInfo->psDeviceNode)); + + /* !!! Important !!! + * + * The idea of moving the whole list hLockKCCBDeferredCommandsList below + * to the temporary list is only valid under the principle that all of the + * operations are also protected by the power lock. It must be held + * so that the order of the commands doesn't get messed up while we're + * performing the operations on the local list. + * + * The necessity of releasing the hLockKCCBDeferredCommandsList comes from + * the fact that _FreeDeferredCommand() is allocating memory and it can't + * be done in atomic context (inside section protected by a spin lock). + * + * We're using spin lock here instead of mutex to quickly perform a check + * if the list is empty in MISR without a risk that the MISR is going + * to sleep due to a lock. + */ + + /* move the whole list to a local list so it can be processed without lock */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (dllist_is_empty(&sCommandList)) + { + return PVRSRV_OK; + } + + /* For every deferred KCCB command, try to send it*/ + dllist_foreach_node(&sCommandList, psNode, psNext) + { + psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode); + eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo, + &psTempDeferredKCCBCmd->sKCCBcmd, + psTempDeferredKCCBCmd->uiPdumpFlags, + NULL /* We surely aren't interested in kCCB slot number of deferred command */); + if (eError != PVRSRV_OK) + { + if (!bPoll) + { + eError = PVRSRV_ERROR_KERNEL_CCB_FULL; + goto cleanup_; + } + break; + } + + _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd); + } + + if (bPoll) + { + PVRSRV_ERROR eErrPollForKCCBSlot; + + /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the + * outer loop times-out, we'll still want to return KCCB_FULL to caller + */ + eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, + psDevInfo->psKernelCCBCtl); + if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + eError = PVRSRV_ERROR_KERNEL_CCB_FULL; + goto cleanup_; + } + } + } END_LOOP_UNTIL_TIMEOUT(); + +cleanup_: + /* if the local list is not empty put it back to the deferred list head + * so that the old order of commands is retained */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_insert_list_at_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + return eError; +} + +PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + IMG_BOOL bPoll = (pui32CmdKCCBSlot != NULL); + PVRSRV_ERROR eError; + + /* + * First try to Flush all the cmds in deferred list. + * + * We cannot defer an incoming command if the caller is interested in + * knowing the command's kCCB slot: it plans to poll/wait for a + * response from the FW just after the command is enqueued, so we must + * poll for space to be available. + */ + eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll); + if (eError == PVRSRV_OK) + { + eError = RGXSendCommandRaw(psDevInfo, + psKCCBCmd, + uiPdumpFlags, + pui32CmdKCCBSlot); + } + + /* + * If we don't manage to enqueue one of the deferred commands or the command + * passed as argument because the KCCB is full, insert the latter into the deferred commands list. + * The deferred commands will also be flushed eventually by: + * - one more KCCB command sent for any DM + * - RGX_MISRHandler_CheckFWActivePowerState + */ + if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + if (pui32CmdKCCBSlot == NULL) + { + eError = _AllocDeferredCommand(psDevInfo, psKCCBCmd, uiPdumpFlags); + } + else + { + /* Let the caller retry. Otherwise if we deferred the command and returned OK, + * the caller can end up looking in a stale CCB slot. + */ + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't flush the deferred queue for a command (Type:%d) " + "that needed the kCCB command slot number! Returning kCCB FULL", + __func__, psKCCBCmd->eCmdType)); + } + } + + return eError; +} + +PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + + /* Ensure Rogue is powered up before kicking MTS */ + eError = PVRSRVPowerLock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to acquire powerlock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto _PVRSRVPowerLock_Exit; + } + + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto _PVRSRVSetDevicePowerStateKM_Exit; + } + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + psKCCBCmd, + ui32PDumpFlags, + pui32CmdKCCBSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)", + __func__, + PVRSRVGetErrorString(eError))); +#if defined(DEBUG) + /* PVRSRVDebugRequest must be called without powerlock */ + PVRSRVPowerUnlock(psDeviceNode); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto _PVRSRVPowerLock_Exit; +#endif + } + +_PVRSRVSetDevicePowerStateKM_Exit: + PVRSRVPowerUnlock(psDeviceNode); + +_PVRSRVPowerLock_Exit: + return eError; +} + +void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSScheduleMISR(psDevInfo->hProcessQueuesMISR); +} + +/*! +******************************************************************************* + + @Function RGX_MISRHandler_ScheduleProcessQueues + + @Description - Sends uncounted kick to all the DMs (the FW will process all + the queue for all the DMs) +******************************************************************************/ +static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + PVRSRV_DEV_POWER_STATE ePowerState; + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + return; + } + + /* Check whether it's worth waking up the GPU */ + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if (!PVRSRV_VZ_MODE_IS(GUEST) && + (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) + { + /* For now, guest drivers will always wake-up the GPU */ + RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_BOOL bGPUHasWorkWaiting; + + bGPUHasWorkWaiting = + (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); + + if (!bGPUHasWorkWaiting) + { + /* all queues are empty, don't wake up the GPU */ + PVRSRVPowerUnlock(psDeviceNode); + return; + } + } + + PDUMPPOWCMDSTART(); + /* wake up the GPU */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + + PVRSRVPowerUnlock(psDeviceNode); + return; + } + + /* uncounted kick to the FW */ + HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED); + __MTSScheduleWrite(psDevInfo, (MTS_SCHEDULE_DM_VAL & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED); + + PVRSRVPowerUnlock(psDeviceNode); +} + +PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode) +{ + return OSInstallMISR(phMISR, + RGX_MISRHandler_ScheduleProcessQueues, + psDeviceNode, + "RGX_ScheduleProcessQueues"); +} + +PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eKCCBType, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32CacheOpFence, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiMMUSyncUpdate; + + /* Don't send the command/power up request if the device is de-initialising. + * The de-init thread could destroy the device whilst the power up + * sequence below is accessing the HW registers. + */ + if (unlikely((psDevInfo == NULL) || + (psDevInfo->psDeviceNode == NULL) || + (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) + { + return PVRSRV_ERROR_INVALID_DEVICE; + } + + eError = CacheOpFence(eKCCBType, ui32CacheOpFence); + if (unlikely(eError != PVRSRV_OK)) goto RGXScheduleCommand_exit; + +#if defined(SUPPORT_VALIDATION) + /* For validation, force the core to different dust count states with each kick */ + if ((eKCCBType == RGXFWIF_DM_GEOM) || (eKCCBType == RGXFWIF_DM_CDM)) + { + if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN) + { + IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount); + PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32NumDusts); + } + } +#endif + + /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful + in a scenario with several applications allocating resources. */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + + goto RGXScheduleCommand_exit; + } + + if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)) + { + /* If we have the power lock the device is valid but the deinit + * thread could be waiting for the lock. */ + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + /* Ensure device is powered up before sending any commands */ + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto _PVRSRVSetDevicePowerStateKM_Exit; + } + + eError = RGXPreKickCacheCommand(psDevInfo, NULL, eKCCBType, &uiMMUSyncUpdate, IMG_FALSE); + if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot); + if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; + +_PVRSRVSetDevicePowerStateKM_Exit: + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + +RGXScheduleCommand_exit: + return eError; +} + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/* + * RGXCheckCheckpointCCB + */ +void RGXCheckCheckpointCCB(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bSignal = IMG_FALSE; + + PRGXFWIF_UFO_ADDR *psFwUFOAddr; + RGXFWIF_CCB_CTL *psChptCCBCtl = psDevInfo->psCheckpointCCBCtl; + IMG_UINT8 *psChptCCB = psDevInfo->psCheckpointCCB; + IMG_UINT32 ui32WriteOffset, ui32ReadOffset, ui32WrapMask = psChptCCBCtl->ui32WrapMask; + IMG_UINT32 uiFwAddr; + PVRSRV_SYNC_CHECKPOINT_STATE uiChptState; + + /* + * Check if the firmware has signalled a full sync state check. + */ + if (psDevInfo->psRGXFWIfFwOsData->ui32FWSyncCheckMark != psDevInfo->psRGXFWIfFwOsData->ui32HostSyncCheckMark) + { + /* + * Update the offsets first so that if the firmware tries to write + * another checkpoint it is not missed by the check state. + */ + psDevInfo->psRGXFWIfFwOsData->ui32HostSyncCheckMark = psDevInfo->psRGXFWIfFwOsData->ui32FWSyncCheckMark; + psChptCCBCtl->ui32ReadOffset = psChptCCBCtl->ui32WriteOffset; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Checkpoint CCB full, performing full sync checkpoint state check", __func__)); + + SyncCheckpointCheckState(); + bSignal = IMG_TRUE; + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_check_state(); +#endif + + goto exit_signal; + } + + /* + * Take a snapshot of the current CCB ctl pointers at the start of + * processing. + */ + ui32WriteOffset = psChptCCBCtl->ui32WriteOffset; + ui32ReadOffset = psChptCCBCtl->ui32ReadOffset; + ui32WrapMask = psChptCCBCtl->ui32WrapMask; + + while (ui32ReadOffset != ui32WriteOffset) + { + /* Point to the next checkpoint address */ + psFwUFOAddr = ((PRGXFWIF_UFO_ADDR *)psChptCCB) + ui32ReadOffset; + + /* + * State is encoded in bit 1 of ufo address + * 1 = signalled, 0 = errored + */ + uiChptState = PVRSRV_SYNC_CHECKPOINT_ERRORED; + uiFwAddr = psFwUFOAddr->ui32Addr; + + if (uiFwAddr & 0x1U) + { + uiChptState = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + uiFwAddr |= 0x1U; + + if (SyncCheckpointUFOHasSignalled(psDeviceNode, uiFwAddr, uiChptState)) + { + bSignal = IMG_TRUE; + } + else +#if defined(SUPPORT_BUFFER_SYNC) + if (pvr_buffer_sync_checkpoint_ufo_has_signalled(uiFwAddr, uiChptState)) + { + /* Buffer sync does not need a signal call. */ + } + else +#endif + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware signalled checkpoint (%#08X) with no host backing", __func__, uiFwAddr)); + } + + /* Update read offset */ + ui32ReadOffset = (ui32ReadOffset + 1) & ui32WrapMask; + } + + psChptCCBCtl->ui32ReadOffset = ui32ReadOffset; + +exit_signal: + if (bSignal) + { + SyncCheckpointSignalWaiters(); + } +} +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + +/* + * RGXCheckFirmwareCCB + */ +void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_FWCCB_CMD *psFwCCBCmd; + + RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; + IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB; + + while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) + { + /* Point to the next command */ + psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset; + + HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType); + switch (psFwCCBCmd->eCmdType) + { + case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(ZSBUFFER_BACKING, "Request to add backing to ZSBuffer"); + } + RGXProcessRequestZSBufferBacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; + } + + case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer"); + } + RGXProcessRequestZSBufferUnbacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; + } + + case RGXFWIF_FWCCB_CMD_FREELIST_GROW: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(FREELIST_GROW, "Request to grow the free list"); + } + RGXProcessRequestGrow(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID); + break; + } + + case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists"); + } + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } + else + { + PVR_ASSERT(psDevInfo->psRGXFWIfHWRInfoBufCtl); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } + + RGXProcessRequestFreelistsReconstruction(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs); + break; + } + + case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: + { + DLLIST_NODE *psNode, *psNext; + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = + &psFwCCBCmd->uCmdData.sCmdContextResetNotification; + IMG_UINT32 ui32ServerCommonContextID = + psCmdContextResetNotification->ui32ServerCommonContextID; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; + + OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + if (psThisContext->ui32ContextID == ui32ServerCommonContextID) + { + psServerCommonContext = psThisContext; + break; + } + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", + __func__, + psServerCommonContext, + psCmdContextResetNotification->ui32ServerCommonContextID, + (IMG_UINT32)(psCmdContextResetNotification->eResetReason), + psCmdContextResetNotification->ui32ResetJobRef)); + + if (psServerCommonContext != NULL) + { + psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; + psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; + } + OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); + + if (psCmdContextResetNotification->bPageFault) + { + DevmemIntPFNotify(psDevInfo->psDeviceNode, + psCmdContextResetNotification->ui64PCAddress, + psCmdContextResetNotification->sFaultAddress); + } + break; + } + + case RGXFWIF_FWCCB_CMD_DEBUG_DUMP: + { + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE); + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__)); + PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + break; + } + + case RGXFWIF_FWCCB_CMD_UPDATE_STATS: + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner; + IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue; + + switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate) + { + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: + { + PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: + { + PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp); + break; + } + } +#endif + break; + } + case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: + { +#if defined(SUPPORT_PDVFS) + PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, + psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); +#endif + break; + } + + case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: + { + if (psDevInfo->psRGXFWIfFwSysData != NULL && + psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) + { + PVRSRV_ERROR eError; + + /* Power down... */ + eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF); + if (eError == PVRSRV_OK) + { + /* Clear the FW faulted flags... */ + psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); + + /* Power back up again... */ + eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON); + + /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ + if (eError == PVRSRV_OK) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXFWHealthCheckCmd(psDevInfo); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)", + __func__, PVRSRVGetErrorString(eError))); + } + } + break; + } + + default: + { + /* unknown command */ + PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)", + __func__, psFwCCBCmd->eCmdType)); + /* Assert on magic value corruption */ + PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD); + } + } + + /* Update read offset */ + psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; + } +} + +/* + * PVRSRVRGXFrameworkCopyCommand +*/ +PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc, + IMG_PBYTE pbyGPUFRegisterList, + IMG_UINT32 ui32FrameworkRegisterSize) +{ + PVRSRV_ERROR eError; + RGXFWIF_RF_REGISTERS *psRFReg; + + eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc, + (void **)&psRFReg); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context state (%u)", + __func__, eError)); + return eError; + } + + OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize); + + /* Release the CPU mapping */ + DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc); + + /* + * Dump the FW framework buffer + */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump FWFramework buffer"); + DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS); +#endif + + return PVRSRV_OK; +} + +/* + * PVRSRVRGXFrameworkCreateKM +*/ +PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC **ppsFWFrameworkMemDesc, + IMG_UINT32 ui32FrameworkCommandSize) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* + Allocate device memory for the firmware GPU framework state. + Sufficient info to kick one or more DMs should be contained in this buffer + */ + PDUMPCOMMENT("Allocate Rogue firmware framework state"); + + eError = DevmemFwAllocate(psDevInfo, + ui32FrameworkCommandSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "FwGPUFrameworkState", + ppsFWFrameworkMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware framework state (%u)", + __func__, eError)); + return eError; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR IMG_CALLCONV RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_CCB_CTL *psKCCBCtl; + IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + + psKCCBCtl = psDevInfo->psKernelCCBCtl; + ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 + + psKCCBCtl->ui32WriteOffset - + psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask; + ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount; + + for (ui32MaxRetries = ui32CurrentQueueLength + 1; + ui32MaxRetries > 0; + ui32MaxRetries--) + { + + /* + * PVRSRVPollForValueKM flags are set to POLL_FLAG_NONE in this case so that the function + * does not generate an error message. In this case, the PollForValueKM is expected to + * timeout as there is work ongoing on the GPU which may take longer than the timeout period. + */ + eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE); + if (eError != PVRSRV_ERROR_TIMEOUT) + { + break; + } + + RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", + __func__, PVRSRVGetErrorString(eError), + pui32LinMemAddr, ui32Value)); + } + + return eError; +} + +PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_UINT32 *pui32ConfigState, + IMG_BOOL bSetNotClear) +{ + PVRSRV_ERROR eError; + PVRSRV_DEV_POWER_STATE ePowerState; + RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 }; + PVRSRV_DEVICE_NODE *psDeviceNode; + RGXFWIF_SYSDATA *psSysData; + IMG_UINT32 ui32kCCBCommandSlot; + IMG_BOOL bWaitForFwUpdate = IMG_FALSE; + + if (!psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + psDeviceNode = psDevInfo->psDeviceNode; + psSysData = psDevInfo->psRGXFWIfFwSysData; + + if (NULL == psSysData) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Fw Sys Config is not mapped into CPU space", __func__)); + return PVRSRV_ERROR_INVALID_CPU_ADDR; + } + + /* apply change and ensure the new data is written to memory + * before requesting the FW to read it + */ + ui32Config = ui32Config & RGXFWIF_INICFG_ALL; + if (bSetNotClear) + { + psSysData->ui32ConfigFlags |= ui32Config; + } + else + { + psSysData->ui32ConfigFlags &= ~ui32Config; + } + + /* return current/new value to caller */ + if (pui32ConfigState) + { + *pui32ConfigState = psSysData->ui32ConfigFlags; + } + + OSMemoryBarrier(); + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + /* notify FW to update setting */ + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + /* Ask the FW to update its cached version of the value */ + sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sStateFlagCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); + bWaitForFwUpdate = IMG_TRUE; + } + +unlock: + PVRSRVPowerUnlock(psDeviceNode); + if (bWaitForFwUpdate) + { + /* Wait for the value to be updated as the FW validates + * the parameters and modifies the ui32ConfigFlags + * accordingly + * (for completeness as registered callbacks should also + * not permit invalid transitions) + */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + } + return eError; +} + +static +PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eDM, + RGXFWIF_KCCB_CMD *psKCCBCmd, + RGXFWIF_CLEANUP_TYPE eCleanupType, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32kCCBCommandSlot; + + psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP; + psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType; + + /* + Send the cleanup request to the firmware. If the resource is still busy + the firmware will tell us and we'll drop out with a retry. + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + eDM, + psKCCBCmd, + 0, + ui32PDumpFlags, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail_command); + + /* Wait for command kCCB slot to be updated by FW */ + PDUMPCOMMENT("Wait for the firmware to reply to the cleanup command"); + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, + ui32PDumpFlags); + /* + If the firmware hasn't got back to us in a timely manner + then bail and let the caller retry the command. + */ + if (eError == PVRSRV_ERROR_TIMEOUT) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: RGXWaitForKCCBSlotUpdate timed out. Dump debug information.", + __func__)); + + eError = PVRSRV_ERROR_RETRY; +#if defined(DEBUG) + PVRSRVDebugRequest(psDevInfo->psDeviceNode, + DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); +#endif + goto fail_poll; + } + else if (eError != PVRSRV_OK) + { + goto fail_poll; + } + +#if defined(PDUMP) + /* + * The cleanup request to the firmware will tell us if a given resource is busy or not. + * If the RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY flag is set, this means that the resource is + * still in use. In this case we return a PVRSRV_ERROR_RETRY error to the client drivers + * and they will re-issue the cleanup request until it succeed. + * + * Since this retry mechanism doesn't work for pdumps, client drivers should ensure + * that cleanup requests are only submitted if the resource is unused. + * If this is not the case, the following poll will block infinitely, making sure + * the issue doesn't go unnoticed. + */ + PDUMPCOMMENT("Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps", + eDM, + psKCCBCmd->uCmdData.sCleanupData.eCleanupType, + psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr); + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32kCCBCommandSlot * sizeof(IMG_UINT32), + 0, + RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); +#endif + + /* + If the command has was run but a resource was busy, then the request + will need to be retried. + */ + if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)) + { + if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + eError = PVRSRV_ERROR_RETRY; + goto fail_requestbusy; + } + + return PVRSRV_OK; + +fail_requestbusy: +fail_poll: +fail_command: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + RGXRequestCommonContextCleanUp +*/ +PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGXFWIF_DM eDM, + IMG_UINT32 ui32PDumpFlags) +{ + RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0}; + PVRSRV_ERROR eError; + PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + + /* Force retry if this context's CCB is currently being dumped + * as part of the stalled CCB debug */ + if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>", + __func__, + (void*)psServerCommonContext->psClientCCB)); + return PVRSRV_ERROR_RETRY; + } + + psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext); +#if defined(PDUMP) + PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]", + eDM, psFWCommonContextFWAddr.ui32Addr); + PDUMPCOMMENT("Wait for CCB to be empty before common ctx cleanup"); + + RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags); +#endif + + /* Setup our command data, the cleanup call will fill in the rest */ + sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, + eDM, + &sRCCleanUpCmd, + RGXFWIF_CLEANUP_FWCOMMONCONTEXT, + ui32PDumpFlags); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + * RGXFWRequestHWRTDataCleanUp + */ + +PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + PRGXFWIF_HWRTDATA psHWRTData) +{ + RGXFWIF_KCCB_CMD sHWRTDataCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("HW RTData cleanup Request [HWRTData = 0x%08x]", psHWRTData.ui32Addr); + + sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData; + + eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sHWRTDataCleanUpCmd, + RGXFWIF_CLEANUP_HWRTDATA, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a HWRTData cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + RGXFWRequestFreeListCleanUp +*/ +PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FREELIST psFWFreeList) +{ + RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr); + + /* Setup our command data, the cleanup call will fill in the rest */ + sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDevInfo, + RGXFWIF_DM_GP, + &sFLCleanUpCmd, + RGXFWIF_CLEANUP_FREELIST, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + RGXFWRequestZSBufferCleanUp +*/ +PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_ZSBUFFER psFWZSBuffer) +{ + RGXFWIF_KCCB_CMD sZSBufferCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr); + + /* Setup our command data, the cleanup call will fill in the rest */ + sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDevInfo, + RGXFWIF_DM_3D, + &sZSBufferCleanUpCmd, + RGXFWIF_CLEANUP_ZSBUFFER, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HCSDeadlineMs) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sSetHCSDeadline = { 0 }; + + sSetHCSDeadline.eCmdType = RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE; + sSetHCSDeadline.uCmdData.sHCSCtrl.ui32HCSDeadlineMS = ui32HCSDeadlineMs; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sSetHCSDeadline, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_KCCB_CMD sCmpKCCBCmd = { 0 }; + + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; + + return RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sCmpKCCBCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); +} + +PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32IsolationPriorityThreshold) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sOSidIsoConfCmd = { 0 }; + + sOSidIsoConfCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE; + sOSidIsoConfCmd.uCmdData.sCmdOSidIsolationData.ui32IsolationPriorityThreshold = ui32IsolationPriorityThreshold; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sOSidIsoConfCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSid, + RGXFWIF_OS_STATE_CHANGE eOSOnlineState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState; + + if (eOSOnlineState == RGXFWIF_OS_ONLINE) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sOSOnlineStateCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) break; + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + else if (psFwSysData) + { + IMG_UINT32 ui32kCCBCommandSlot; + volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags; + + psFwRunFlags = (volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + /* Attempt several times until the FW manages to offload the OS */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + /* Send request */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + RGXFWIF_DM_GP, + &sOSOnlineStateCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue; + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", return_); + + /* Wait for FW to process the cmd */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); + + /* read the OS state */ + OSMemoryBarrier(); + /* check if FW finished offloading the OSID and is stopped */ + if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE) + { + eError = PVRSRV_OK; + break; + } + else + { + eError = PVRSRV_ERROR_TIMEOUT; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + else + { + eError = PVRSRV_ERROR_NOT_INITIALISED; + } + +return_ : + return eError; +} + +PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 }; + + sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE; + sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32OSidNum = ui32OSid; + sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32Priority = ui32Priority; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sOSidPriorityCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Priority, + RGXFWIF_DM eDM) +{ + IMG_UINT32 ui32CmdSize; + IMG_UINT8 *pui8CmdPtr; + RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; + RGXFWIF_CCB_CMD_HEADER *psCmdHeader; + RGXFWIF_CMD_PRIORITY *psCmd; + PVRSRV_ERROR eError; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); + + /* + Get space for command + */ + ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); + + eError = RGXAcquireCCB(psClientCCB, + ui32CmdSize, + (void **) &pui8CmdPtr, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); + } + goto fail_ccbacquire; + } + + /* + Write the command header and command + */ + psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr; + psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; + psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); + pui8CmdPtr += sizeof(*psCmdHeader); + + psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr; + psCmd->ui32Priority = ui32Priority; + pui8CmdPtr += sizeof(*psCmd); + + /* + We should reserved space in the kernel CCB here and fill in the command + directly. + This is so if there isn't space in the kernel CCB we can return with + retry back to services client before we take any operations + */ + + /* + Submit the command + */ + RGXReleaseCCB(psClientCCB, + ui32CmdSize, + PDUMP_FLAGS_CONTINUOUS); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); + return eError; + } + + /* Construct the priority command. */ + sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + eDM, + &sPriorityCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to submit set priority command with error (%u)", + __func__, + eError)); + } + + return PVRSRV_OK; + +fail_ccbacquire: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PHRMode) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 }; + + sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG; + sCfgPHRCmd.uCmdData.sPeriodicHwResetCfg.ui32PHRMode = ui32PHRMode; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sCfgPHRCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +/* + RGXReadMETAAddr +*/ +PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) +{ + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + IMG_UINT32 ui32Value; + + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Read */ + OSWriteHWReg32( + psDevInfo->pvRegsBaseKM, + RGX_CR_META_SP_MSLVCTRL0, + ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); + (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); + + /* Wait for Slave Port to be Ready: read complete */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Read the value */ + ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX); + + *pui32Value = ui32Value; + + return PVRSRV_OK; +} + +/* + RGXWriteMETAAddr +*/ +PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value) +{ + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value); + + return PVRSRV_OK; +} + +void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious) +{ + /* Attempt to detect and deal with any stalled client contexts. + * bIgnorePrevious may be set by the caller if they know a context to be + * stalled, as otherwise this function will only identify stalled + * contexts which have not been previously reported. + */ + + IMG_UINT32 ui32StalledClientMask = 0; + + if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock))) + { + PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning...")); + return; + } + + ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo); + + if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) + { + ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); + } + + /* If at least one DM stalled bit is different than before */ + if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))//(psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask)) + { + if (ui32StalledClientMask > 0) + { + static __maybe_unused const char *pszStalledAction = +#if defined(PVRSRV_STALLED_CCB_ACTION) + "force"; +#else + "warn"; +#endif + /* Print all the stalled DMs */ + PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s%s%s", + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D))); + + PVR_LOG(("Trying to identify stalled context...(%s) [%d]", + pszStalledAction, bIgnorePrevious)); + + DumpStalledContextInfo(psDevInfo); + } + else + { + if (psDevInfo->ui32StalledClientMask> 0) + { + /* Indicate there are no stalled DMs */ + PVR_LOG(("No further stalled client contexts exist")); + } + } + psDevInfo->ui32StalledClientMask = ui32StalledClientMask; + psDevInfo->pvEarliestStalledClientCCB = NULL; + } + OSLockRelease(psDevInfo->hCCBStallCheckLock); +} + +/* + RGXUpdateHealthStatus +*/ +PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, + IMG_BOOL bCheckAfterTimePassed) +{ + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_HEALTH_STATUS eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; + PVRSRV_DEVICE_HEALTH_REASON eNewReason = PVRSRV_DEVICE_HEALTH_REASON_NONE; + PVRSRV_RGXDEV_INFO* psDevInfo; + RGXFWIF_TRACEBUF* psRGXFWIfTraceBufCtl; + RGXFWIF_SYSDATA* psFwSysData; + RGXFWIF_CCB_CTL *psKCCBCtl; + IMG_UINT32 ui32ThreadCount; + IMG_BOOL bKCCBCmdsWaiting; + + PVR_ASSERT(psDevNode != NULL); + psDevInfo = psDevNode->pvDevice; + + /* If the firmware is not yet initialised or has already deinitialised, stop here */ + if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || + psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT) + { + return PVRSRV_OK; + } + + psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + +#if defined(SUPPORT_AUTOVZ) + if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)) + { + /* read and write back the alive token value to confirm to the + * virtualisation watchdog that this connection is healthy */ + KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); + } +#endif + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* If this is a quick update, then include the last current value... */ + if (!bCheckAfterTimePassed) + { + eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus); + eNewReason = OSAtomicRead(&psDevNode->eHealthReason); + } + + /* Decrement the SLR holdoff counter (if non-zero) */ + if (psDevInfo->ui32SLRHoldoffCounter > 0) + { + psDevInfo->ui32SLRHoldoffCounter--; + } + + /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */ + if (PVRSRVIsDevicePowered(psDevNode)) + { + if (psRGXFWIfTraceBufCtl != NULL) + { + /* + Firmware thread checks... + */ + for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++) + { + IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; + + /* + Check if the FW has hit an assert... + */ + if (*pszTraceAssertInfo != '\0') + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)", + __func__, ui32ThreadCount, pszTraceAssertInfo, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED; + goto _RGXUpdateHealthStatus_Exit; + } + + /* + Check the threads to see if they are in the same poll locations as last time... + */ + if (bCheckAfterTimePassed) + { + if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && + psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", + __func__, ui32ThreadCount, + ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[ui32ThreadCount])); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; + goto _RGXUpdateHealthStatus_Exit; + } + psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; + } + } + + /* + Check if the FW has faulted... + */ + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Firmware has faulted and needs to restart", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; + } + else + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; + } + goto _RGXUpdateHealthStatus_Exit; + } + } + + /* + Event Object Timeouts check... + */ + if (!bCheckAfterTimePassed) + { + if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)", + __func__, + psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS; + } + psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts; + } + + /* + Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check + that some have executed since then. + */ + bKCCBCmdsWaiting = IMG_FALSE; + psKCCBCtl = psDevInfo->psKernelCCBCtl; + + if (psKCCBCtl != NULL) + { + if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask || + psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask) + { + PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)", + __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; + } + + if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset) + { + bKCCBCmdsWaiting = IMG_TRUE; + } + } + + if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfFwOsData != NULL) + { + IMG_UINT32 ui32KCCBCmdsExecuted = psDevInfo->psRGXFWIfFwOsData->ui32KCCBCmdsExecuted; + + if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted) + { + /* + If something was waiting last time then the Firmware has stopped processing commands. + */ + if (psDevInfo->bKCCBCmdsWaitingLastTime) + { + PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED; + } + + /* + If no commands are currently pending and nothing happened since the last poll, then + schedule a dummy command to ping the firmware so we know it is alive and processing. + */ + if (!bKCCBCmdsWaiting) + { + /* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the + * PMR lock itself, because some bridge functions will take the PMR lock + * before calling RGXScheduleCommand + */ + PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)", + __func__, eError)); + } + else + { + bKCCBCmdsWaiting = IMG_TRUE; + } + } + } + + psDevInfo->bKCCBCmdsWaitingLastTime = bKCCBCmdsWaiting; + psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted; + } + } + + /* + Interrupt counts check... + */ + if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfFwSysData != NULL) + { + IMG_UINT32 ui32LISRCount = 0; + IMG_UINT32 ui32FWCount = 0; + IMG_UINT32 ui32MissingInts = 0; + + /* Add up the total number of interrupts issued, sampled/received and missed... */ +#if defined(RGX_FW_IRQ_OS_COUNTERS) + /* Only the Host OS has a sample count, so only one counter to check. */ + ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_OS]; + ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_OS]); +#else + IMG_UINT32 ui32Index; + + for (ui32Index = 0; ui32Index < RGXFW_THREAD_NUM; ui32Index++) + { + ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index]; + ui32FWCount += psDevInfo->psRGXFWIfFwSysData->aui32InterruptCount[ui32Index]; + } +#endif /* RGX_FW_IRQ_OS_COUNTERS */ + + if (ui32LISRCount < ui32FWCount) + { + ui32MissingInts = (ui32FWCount-ui32LISRCount); + } + + if (ui32LISRCount == psDevInfo->ui32InterruptCountLastTime && + ui32MissingInts >= psDevInfo->ui32MissingInterruptsLastTime && + psDevInfo->ui32MissingInterruptsLastTime > 1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: LISR has not received the last %d interrupts", + __func__, ui32MissingInts)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; + + /* Schedule the MISRs to help mitigate the problems of missing interrupts. */ + OSScheduleMISR(psDevInfo->pvMISRData); + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + psDevInfo->ui32InterruptCountLastTime = ui32LISRCount; + psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts; + } + + /* + Stalled CCB check... + */ + if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus)) + { + RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE); + } + + /* + Finished, save the new status... + */ +_RGXUpdateHealthStatus_Exit: + OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus); + OSAtomicWrite(&psDevNode->eHealthReason, eNewReason); + RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason); + + /* + * Attempt to service the HWPerf buffer to regularly transport idle/periodic + * packets to host buffer. + */ + if (psDevNode->pfnServiceHWPerf != NULL) + { + PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: " + "Error occurred when servicing HWPerf buffer (%d)", + __func__, eError)); + } + } + + /* Attempt to refresh timer correlation data */ + RGXTimeCorrRestartPeriodic(psDevNode); + + return PVRSRV_OK; +} /* RGXUpdateHealthStatus */ + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return PVRSRV_OK; + } + + return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, + psCurrentServerCommonContext->psClientCCB, + eKickTypeDM); +} + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return; + } + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + /* If high verbosity requested, dump whole CCB */ + DumpCCB(psCurrentServerCommonContext->psDevInfo, + psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } + else + { + /* Otherwise, only dump first stalled command in the CCB */ + DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } +} + +PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) +{ + PVRSRV_ERROR eError; + PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; + + PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); + PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); + + if (bKick) + { + if (psKMHWRTDataSet) + { + PRGXFWIF_CLEANUP_CTL psCleanupCtl; + + eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc, + offsetof(RGXFWIF_HWRTDATA, sCleanupState), + RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); + + *(psCleanupCtlWrite++) = psCleanupCtl; + } + + if (eDM == RGXFWIF_DM_3D) + { + RGXFWIF_PRBUFFER_TYPE eBufferType; + RGX_ZSBUFFER_DATA *psBuffer = NULL; + + for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++) + { + switch (eBufferType) + { + case RGXFWIF_PRBUFFER_ZSBUFFER: + psBuffer = psZSBuffer; + break; + case RGXFWIF_PRBUFFER_MSAABUFFER: + psBuffer = psMSAAScratchBuffer; + break; + case RGXFWIF_PRBUFFER_MAXSUPPORTED: + psBuffer = NULL; + break; + } + if (psBuffer) + { + (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + + offsetof(RGXFWIF_PRBUFFER, sCleanupState); + psBuffer = NULL; + } + } + } + } + + *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl; + PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_HWRINFOBUF *psHWRInfoBuf; + IMG_UINT32 i; + + if (psDevNode->pvDevice == NULL) + { + return PVRSRV_ERROR_INVALID_DEVINFO; + } + psDevInfo = psDevNode->pvDevice; + + psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; + + for (i = 0 ; i < RGXFWIF_DM_MAX ; i++) + { + /* Reset the HWR numbers */ + psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0; + psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0; + psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0; + psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0; + } + + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) + { + psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0; + } + + psHWRInfoBuf->ui32WriteIndex = 0; + psHWRInfoBuf->ui32DDReqCount = 0; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid) +{ + + PVRSRV_ERROR eError; + + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; + } + + eError = PMR_DevPhysAddr(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + ui32LogicalOffset, + psPhyAddr, + bValid); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR_DevPhysAddr failed (%u)", + __func__, + eError)); + return eError; + } + + + eError = PMRUnlockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRUnLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; + } + + return eError; +} + +#if defined(PDUMP) +PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) +{ + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDevInfo->bDumpedKCCBCtlAlready) + { + /* exiting capture range or pdump block */ + psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; + + /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, + "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", + psKCCBCtl, + ui32WriteOffset, + ui32WriteOffset); + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), + ui32WriteOffset, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError)); + } + } + + return eError; + +} +#endif + +/*! +******************************************************************************* + + @Function RGXClientConnectCompatCheck_ClientAgainstFW + + @Description + + Check compatibility of client and firmware (build options) + at the connection time. + + @Input psDeviceNode - device node + @Input ui32ClientBuildOptions - build options for the client + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) +{ +#if !defined(NO_HARDWARE) || defined(PDUMP) +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptionsMismatch; + IMG_UINT32 ui32BuildOptionsFW; +#endif + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#endif + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(NO_HARDWARE) + if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.", + __func__)); + return PVRSRV_ERROR_NOT_INITIALISED; + } + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); +#endif + +#if defined(PDUMP) + { + PVRSRV_ERROR eError; + + PDUMPCOMMENT("Compatibility check: client and FW build options"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions), + ui32ClientBuildOptions, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", + __func__, + eError)); + return eError; + } + } +#endif + +#if !defined(NO_HARDWARE) + ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions; + ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; + + if (ui32BuildOptionsMismatch != 0) + { + if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in client: (0x%x). Please check rgx_options.h", + ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + } + + if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFW & ui32BuildOptionsMismatch )); + } + + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXFwRawHeapAllocMap + + @Description Register firmware heap for the specified guest OSID + + @Input psDeviceNode - device node + @Input ui32OSID - Guest OSID + @Input sDevPAddr - Heap address + @Input ui64DevPSize - Heap size + + @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful. + +******************************************************************************/ +PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT64 ui64DevPSize) +{ + PVRSRV_ERROR eError; + IMG_CHAR szRegionRAName[PVRSRV_MAX_RA_NAME_LENGTH]; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32RawFwHeapAllocFlags = (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_RAW | + PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(ui32OSID)); + + PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK); + + OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + + if (!ui64DevPSize || + !sDevPAddr.uiAddr || + ui32OSID >= RGX_NUM_OS_SUPPORTED || + ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = PVRSRVCreateRegionRA(psDeviceNode->psDevConfig, + &psDeviceNode->psKernelFwRawMemArena[ui32OSID], + psDeviceNode->szKernelFwRawRAName[ui32OSID], + 0, + sDevPAddr.uiAddr, + RGX_FIRMWARE_RAW_HEAP_SIZE, + 0, + szRegionRAName); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVCreateRegionRA"); + + PDUMPCOMMENT("Allocate and map raw firmware heap for OSID: [%d]", ui32OSID); + +#if (RGX_NUM_OS_SUPPORTED > 1) + /* don't clear the heap of other guests on allocation */ + ui32RawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0); +#endif + + /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ + if (psDeviceNode->bAutoVzFwIsUp) + { + ui32RawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + } + + eError = DevmemFwAllocate(psDevInfo, + RGX_FIRMWARE_RAW_HEAP_SIZE, + ui32RawFwHeapAllocFlags, + psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName, + &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); + + /* Mark this devmem heap as premapped so allocations will not require device mapping. */ + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + + if (ui32OSID == RGXFW_HOST_OS) + { + /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly + * No memory allocated from these sub-heaps will be individually mapped into the device's + * address space so they can remain marked permanently as premapped. */ + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); + } + + return eError; +} + +/*! +******************************************************************************* + + @Function RGXFwRawHeapUnmapFree + + @Description Unregister firmware heap for the specified guest OSID + + @Input psDeviceNode - device node + @Input ui32OSID - Guest OSID + +******************************************************************************/ +void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* remove the premap status, so the heap can be unmapped and freed */ + if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID]) + { + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE); + } + + if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); + psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL; + } + + if (psDeviceNode->psKernelFwRawMemArena[ui32OSID]) + { + RA_Delete(psDeviceNode->psKernelFwRawMemArena[ui32OSID]); + } + + psDeviceNode->psKernelFwRawMemArena[ui32OSID] = NULL; +} + +/****************************************************************************** + End of file (rgxfwutils.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.h new file mode 100644 index 000000000000..79066477ea61 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxfwutils.h @@ -0,0 +1,1298 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWUTILS_H +#define RGXFWUTILS_H + +#include "log2.h" +#include "rgxdevice.h" +#include "rgxccb.h" +#include "devicemem.h" +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "connection_server.h" +#include "rgxta3d.h" +#include "devicemem_utils.h" + +#if defined(SUPPORT_TRUSTED_DEVICE) +#include "physmem_tdfwmem.h" +#endif + +#if defined(SUPPORT_DEDICATED_FW_MEMORY) +#include "physmem_fwdedicatedmem.h" +#endif + +static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVMEM_FLAGS_T *puiFlags, + DEVMEM_HEAP **ppsFwHeap) +{ + switch (PVRSRV_FW_ALLOC_TYPE(*puiFlags)) + { + case FW_ALLOC_MAIN: + { + *ppsFwHeap = psDevInfo->psFirmwareMainHeap; + break; + } + case FW_ALLOC_CONFIG: + { + *ppsFwHeap = psDevInfo->psFirmwareConfigHeap; + break; + } + case FW_ALLOC_RAW: + { + IMG_UINT32 ui32OSID = PVRSRV_FW_RAW_ALLOC_OSID(*puiFlags); + + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID"); + *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID]; + break; + } + default: + { + /* Firmware local allocations are by default from the fw main heap */ + *puiFlags |= PVRSRV_MEMALLOCFLAG_FW_ALLOC_MAIN; + *ppsFwHeap = psDevInfo->psFirmwareMainHeap; + break; + } + } + + return PVRSRV_OK; +} + +/* + * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size. + * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems + * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't + * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation. + */ +static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEVMEM_SIZE_T uiSize, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); + + /* Imported from AppHint , flag to poison allocations when freed */ + uiFlags |= psDevInfo->ui32FWPoisonOnFreeFlag; + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateAndMap(psFwHeap, + uiSize, + GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)), + uiFlags, + pszText, + ppsMemDescPtr, + &sTmpDevVAddr); + + PVR_DPF_RETURN_RC(eError); +} + +static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && + (pszText[0] == 'F') && (pszText[1] == 'w') && + (pszText[2] == 'E') && (pszText[3] == 'x')); + + /* Imported from AppHint , flag to poison allocations when freed */ + uiFlags |= psDevInfo->ui32FWPoisonOnFreeFlag; + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateExportable(psDeviceNode, + uiSize, + uiAlign, + RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? + ExactLog2(uiAlign) : + DevmemGetHeapLog2PageSize(psFwHeap), + uiFlags, + pszText, + ppsMemDescPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError)); + PVR_DPF_RETURN_RC(eError); + } + + /* + We need to map it so the heap for this allocation + is set + */ + eError = DevmemMapToDevice(*ppsMemDescPtr, + psDevInfo->psFirmwareMainHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsMemDescPtr); + PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError)); + } + + PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr); +} + +static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + IMG_UINT32 ui32Align; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); + ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); + + /* Imported from AppHint , flag to poison allocations when freed */ + uiFlags |= psDevInfo->ui32FWPoisonOnFreeFlag; + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateSparse(psDevInfo->psDeviceNode, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + ui32Align, + DevmemGetHeapLog2PageSize(psFwHeap), + uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING, + pszText, + ppsMemDescPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + /* + We need to map it so the heap for this allocation + is set + */ + eError = DevmemMapToDevice(*ppsMemDescPtr, + psFwHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsMemDescPtr); + PVR_DPF_RETURN_RC(eError); + } + + PVR_DPF_RETURN_RC(eError); +} + + +static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVMEM_MEMDESC *psMemDesc) +{ + PVR_DPF_ENTERED1(psMemDesc); + + DevmemReleaseDevVirtAddr(psMemDesc); + DevmemFree(psMemDesc); + + PVR_DPF_RETURN; +} + +#if defined(SUPPORT_TRUSTED_DEVICE) +static INLINE +PVRSRV_ERROR DevmemImportTDFWMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + IMG_UINT32 uiMemAllocFlags, + PVRSRV_TD_FW_MEM_REGION eRegion, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + PMR *psTDFWMemPMR; + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_DEVMEM_SIZE_T uiMemDescSize; + IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align; + PVRSRV_ERROR eError; + + if (ppsMemDescPtr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: memdesc pointer is null", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiSize, + &uiAlign); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "DevmemExportalignAdjustSizeAndAlign failed (%u)", eError)); + goto PMRCreateError; + } + + eError = PhysmemNewTDFWMemPMR(NULL, + psDeviceNode, + uiSize, + uiLog2Align, + uiMemAllocFlags, + eRegion, + &psTDFWMemPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDFWMemPMR failed (%u)", eError)); + goto PMRCreateError; + } + + /* NB: TDFWMemPMR refcount: 1 -> 2 */ + eError = DevmemLocalImport(psDeviceNode, + psTDFWMemPMR, + uiMemAllocFlags, + ppsMemDescPtr, + &uiMemDescSize, + "TDFWMem"); + if (eError != PVRSRV_OK) + { + goto ImportError; + } + + eError = DevmemMapToDevice(*ppsMemDescPtr, + psDevInfo->psFirmwareMainHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to map TD META code PMR (%u)", eError)); + goto MapError; + } + + /* NB: TDFWMemPMR refcount: 2 -> 1 + * The PMR will be unreferenced again (and destroyed) when + * the memdesc tracking it is cleaned up + */ + PMRUnrefPMR(psTDFWMemPMR); + + return PVRSRV_OK; + +MapError: + DevmemFree(*ppsMemDescPtr); + *ppsMemDescPtr = NULL; +ImportError: + /* Unref and destroy the PMR */ + PMRUnrefPMR(psTDFWMemPMR); +PMRCreateError: + + return eError; +} +#endif + + +#if defined(SUPPORT_DEDICATED_FW_MEMORY) +static INLINE +PVRSRV_ERROR DevmemAllocateDedicatedFWMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + IMG_UINT32 uiMemAllocFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + PMR *psPMR; + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_DEVMEM_SIZE_T uiMemDescSize; + IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align; + PVRSRV_ERROR eError; + + PVR_ASSERT(ppsMemDescPtr); + + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiSize, + &uiAlign); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "DevmemExportalignAdjustSizeAndAlign failed (%u)", eError)); + goto PMRCreateError; + } + + eError = PhysmemNewFWDedicatedMemPMR(NULL, + psDeviceNode, + uiSize, + uiLog2Align, + uiMemAllocFlags, + &psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PhysmemNewFWDedicatedMemPMR failed (%u)", eError)); + goto PMRCreateError; + } + + /* NB: FWDedicatedMemPMR refcount: 1 -> 2 */ + eError = DevmemLocalImport(psDeviceNode, + psPMR, + uiMemAllocFlags, + ppsMemDescPtr, + &uiMemDescSize, + pszText); + if (eError != PVRSRV_OK) + { + goto ImportError; + } + + eError = DevmemMapToDevice(*ppsMemDescPtr, + psDevInfo->psFirmwareMainHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to map dedicated FW memory (%u)", eError)); + goto MapError; + } + + /* NB: FWDedicatedMemPMR refcount: 2 -> 1 + * The PMR will be unreferenced again (and destroyed) when + * the memdesc tracking it is cleaned up + */ + PMRUnrefPMR(psPMR); + + return PVRSRV_OK; + +MapError: + DevmemFree(*ppsMemDescPtr); + *ppsMemDescPtr = NULL; +ImportError: + /* Unref and destroy the PMR */ + PMRUnrefPMR(psPMR); +PMRCreateError: + + return eError; +} +#endif + + +/* + * This function returns the value of the hardware register RGX_CR_TIMER + * which is a timer counting in ticks. + */ + +static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); + + /* + * In order to avoid having to issue three 32-bit reads to detect the + * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated + * in the MSB of the high 32-bit word. If the wrap happens, we just read + * the register again (it will not wrap again so soon). + */ + if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK) + { + ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); + } + + return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT; +} + +/* + * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, + * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first kick) + */ +#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + +#define RGX_FWSHAREDMEM_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_UNCACHED | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + +/* + * This FW Init Data is initialised on the CPU and then passed to the FW. We need + * to make the CPU mapping write-combined to avoid CPU-specific alignment issues + * for device memory. + */ +#define RGX_FWINITDATA_WC_ALLOCFLAGS ((RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE) +#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE)) +#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)) + +/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */ +#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0U)) + +/****************************************************************************** + * RGXSetFirmwareAddress Flags + *****************************************************************************/ +#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */ +#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer, + otherwise RGXUnsetFirmwareAddress() must be call when finished. */ + +IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, DEVMEM_FLAGS_T uiAllocFlags); + +#if defined(SUPPORT_TBI_INTERFACE) +IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 *pui32TPUTrilinearFracMask, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf); + + + +void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*************************************************************************/ /*! +@Function RGXSetupFwAllocation + +@Description Sets a pointer in a firmware data structure. + +@Input psDevInfo Device Info struct +@Input uiAllocFlags Flags determining type of memory allocation +@Input ui32Size Size of memory allocation +@Input pszName Allocation label +@Input psFwPtr Address of the firmware pointer to set +@Input ppvCpuPtr Address of the cpu pointer to set +@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVMEM_FLAGS_T uiAllocFlags, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszName, + DEVMEM_MEMDESC **ppsMemDesc, + RGXFWIF_DEV_VIRTADDR *psFwPtr, + void **ppvCpuPtr, + IMG_UINT32 ui32DevVAFlags); + +/*************************************************************************/ /*! +@Function RGXSetFirmwareAddress + +@Description Sets a pointer in a firmware data structure. + +@Input ppDest Address of the pointer to set +@Input psSrc MemDesc describing the pointer +@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, + DEVMEM_MEMDESC *psSrc, + IMG_UINT32 uiOffset, + IMG_UINT32 ui32Flags); + + +/*************************************************************************/ /*! +@Function RGXSetMetaDMAAddress + +@Description Fills a Firmware structure used to setup the Meta DMA with two + pointers to the same data, one on 40 bit and one on 32 bit + (pointer in the FW memory space). + +@Input ppDest Address of the structure to set +@Input psSrcMemDesc MemDesc describing the pointer +@Input psSrcFWDevVAddr Firmware memory space pointer + +@Return void +*/ /**************************************************************************/ +void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, + DEVMEM_MEMDESC *psSrcMemDesc, + RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, + IMG_UINT32 uiOffset); + + +/*************************************************************************/ /*! +@Function RGXUnsetFirmwareAddress + +@Description Unsets a pointer in a firmware data structure + +@Input psSrc MemDesc describing the pointer + +@Return void +*/ /**************************************************************************/ +void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc); + +/*************************************************************************/ /*! +@Function FWCommonContextAllocate + +@Description Allocate a FW common context. This allocates the HW memory + for the context, the CCB and wires it all together. + +@Input psConnection Connection this context is being created on +@Input psDeviceNode Device node to create the FW context on + (must be RGX device node) +@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which + which represents the requestor of this FWCC +@Input eDM Data Master type +@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use + as the FW context or NULL if this function + should allocate it +@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use + as the FW context. If psAllocatedMemDesc + is NULL then this parameter is ignored +@Input psFWMemContextMemDesc MemDesc of the FW memory context this + common context resides on +@Input psContextStateMemDesc FW context state (context switch) MemDesc +@Input ui32CCBAllocSizeLog2 Size of the CCB for this context +@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context +@Input ui32ContextFlags Flags which specify properties of the context +@Input ui32Priority Priority of the context +@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run +@Input ui64RobustnessAddress Address for FW to signal a context reset +@Input psInfo Structure that contains extra info + required for the creation of the context + (elements might change from core to core) +@Return PVRSRV_OK if the context was successfully created +*/ /**************************************************************************/ +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef); + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags); +/*! +******************************************************************************* +@Function RGXScheduleProcessQueuesKM + +@Description Software command complete handler + (sends uncounted kicks for all the DMs through the MISR) + +@Input hCmdCompHandle RGX device node + +@Return None +******************************************************************************/ +void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); + +/*! +******************************************************************************* + +@Function RGXInstallProcessQueuesMISR + +@Description Installs the MISR to handle Process Queues operations + +@Input phMISR Pointer to the MISR handler +@Input psDeviceNode RGX Device node + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll); + +/*************************************************************************/ /*! +@Function RGXSendCommandWithPowLockAndGetKCCBSlot + +@Description Sends a command to a particular DM without honouring + pending cache operations but taking the power lock. + +@Input psDevInfo Device Info +@Input psKCCBCmd The cmd to send. +@Input ui32PDumpFlags Pdump flags +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); + +#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ + RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXSendCommandAndGetKCCBSlot + +@Description Sends a command to a particular DM without honouring + pending cache operations or the power lock. + The function flushes any deferred KCCB commands first. + +@Input psDevInfo Device Info +@Input psKCCBCmd The cmd to send. +@Input uiPdumpFlags PDump flags. +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + PDUMP_FLAGS_T uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); + +#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ + RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXScheduleCommand + +@Description Sends a command to a particular DM and kicks the firmware but + first schedules any commands which have to happen before + handle + +@Input psDevInfo Device Info +@Input eDM To which DM the cmd is sent. +@Input psKCCBCmd The cmd to send. +@Input ui32CacheOpFence Pending cache op. fence value. +@Input ui32PDumpFlags PDump flags +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eKCCBType, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32CacheOpFence, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); +#define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32CacheOpFence, ui32PDumpFlags) \ + RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32CacheOpFence, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXWaitForKCCBSlotUpdate + +@Description Waits until the required kCCB slot value is updated by the FW + (signifies command completion). Additionally, dumps a relevant + PDump poll command. + +@Input psDevInfo Device Info +@Input ui32SlotNum The kCCB slot number to wait for an update on +@Input ui32PDumpFlags + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SlotNum, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*************************************************************************/ /*! +@Function PVRSRVRGXFrameworkCopyCommand + +@Description Copy framework command into FW addressable buffer + +@param psFWFrameworkMemDesc +@param pbyGPUFRegisterList +@param ui32FrameworkRegisterSize + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc, + IMG_PBYTE pbyGPUFRegisterList, + IMG_UINT32 ui32FrameworkRegisterSize); + + +/*************************************************************************/ /*! +@Function PVRSRVRGXFrameworkCreateKM + +@Description Create FW addressable buffer for framework + +@param psDeviceNode +@param ppsFWFrameworkMemDesc +@param ui32FrameworkRegisterSize + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode, + DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc, + IMG_UINT32 ui32FrameworkRegisterSize); + +/*************************************************************************/ /*! +@Function RGXPollForGPCommandCompletion + +@Description Polls for completion of a submitted GP command. Poll is done + on a value matching a masked read from the address. + +@Input psDevNode Pointer to device node struct +@Input pui32LinMemAddr CPU linear address to poll +@Input ui32Value Required value +@Input ui32Mask Mask + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask); + +/*************************************************************************/ /*! +@Function RGXStateFlagCtrl + +@Description Set and return FW internal state flags. + +@Input psDevInfo Device Info +@Input ui32Config AppHint config flags +@Output pui32State Current AppHint state flag configuration +@Input bSetNotClear Set or clear the provided config flags + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_UINT32 *pui32State, + IMG_BOOL bSetNotClear); + +/*! +******************************************************************************* +@Function RGXFWRequestCommonContextCleanUp + +@Description Schedules a FW common context cleanup. The firmware doesn't + block waiting for the resource to become idle but rather + notifies the host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psServerCommonContext context to be cleaned up +@Input eDM Data master, to which the cleanup command should + be sent +@Input ui32PDumpFlags PDump continuous flag + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGXFWIF_DM eDM, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* +@Function RGXFWRequestHWRTDataCleanUp + +@Description Schedules a FW HWRTData memory cleanup. The firmware doesn't + block waiting for the resource to become idle but rather + notifies the host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psHWRTData firmware address of the HWRTData for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + PRGXFWIF_HWRTDATA psHWRTData); + +/*! +******************************************************************************* +@Function RGXFWRequestFreeListCleanUp + +@Description Schedules a FW FreeList cleanup. The firmware doesn't block + waiting for the resource to become idle but rather notifies the + host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psFWFreeList firmware address of the FreeList for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, + PRGXFWIF_FREELIST psFWFreeList); + +/*! +******************************************************************************* +@Function RGXFWRequestZSBufferCleanUp + +@Description Schedules a FW ZS Buffer cleanup. The firmware doesn't block + waiting for the resource to become idle but rather notifies the + host that the resources is busy. + +@Input psDevInfo pointer to device node +@Input psFWZSBuffer firmware address of the ZS Buffer for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_ZSBUFFER psFWZSBuffer); + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Priority, + RGXFWIF_DM eDM); + +/*! +******************************************************************************* +@Function RGXFWSetHCSDeadline + +@Description Requests the Firmware to set a new Hard Context Switch timeout + deadline. Context switches that surpass that deadline cause the + system to kill the currently running workloads. + +@Input psDeviceNode pointer to device node +@Input ui32HCSDeadlineMs The deadline in milliseconds. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HCSDeadlineMs); + +/*! +******************************************************************************* +@Function RGXFWChangeOSidPriority + +@Description Requests the Firmware to change the priority of an operating + system. Higher priority number equals higher priority on the + scheduling system. + +@Input psDevInfo pointer to device info +@Input ui32OSid The OSid whose priority is to be altered +@Input ui32Priority The new priority number for the specified OSid + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32Priority); + +/*! +******************************************************************************* +@Function RGXFWSetOSIsolationThreshold + +@Description Requests the Firmware to change the priority threshold of the + OS Isolation group. Any OS with a priority higher or equal than + the threshold is considered to be belonging to the isolation + group. + +@Input psDevInfo pointer to device info +@Input ui32IsolationPriorityThreshold The new priority threshold + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32IsolationPriorityThreshold); + +/*! +******************************************************************************* +@Function RGXFWHealthCheckCmd + +@Description Ping the firmware to check if it is responsive. + +@Input psDevInfo pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXFWSetFwOsState + +@Description Requests the Firmware to change the guest OS Online states. + This should be initiated by the VMM when a guest VM comes + online or goes offline. If offline, the FW offloads any current + resource from that OSID. The request is repeated until the FW + has had time to free all the resources or has waited for + workloads to finish. + +@Input psDevInfo pointer to device info +@Input ui32OSid The Guest OSid whose state is being altered +@Input eOSOnlineState The new state (Online or Offline) + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + RGXFWIF_OS_STATE_CHANGE eOSOnlineState); + +/*! +******************************************************************************* +@Function RGXFWConfigPHR + +@Description Configure the Periodic Hardware Reset functionality + +@Input psDevInfo pointer to device info +@Input ui32PHRMode desired PHR mode + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PHRMode); + +/*! +******************************************************************************* +@Function RGXReadMETAAddr + +@Description Reads a value at given address in META memory space + (it can be either a memory location or a META register) + +@Input psDevInfo pointer to device info +@Input ui32METAAddr address in META memory space + +@Output pui32Value value + +@Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32METAAddr, + IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* +@Function RGXWriteMETAAddr + +@Description Write a value to the given address in META memory space + (it can be either a memory location or a META register) + +@Input psDevInfo pointer to device info +@Input ui32METAAddr address in META memory space +@Input ui32Value Value to write to address in META memory space + +@Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32METAAddr, + IMG_UINT32 ui32Value); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/*! +******************************************************************************* +@Function RGXCheckCheckpointCCB + +@Description Processes all signalled checkpoints which are found in the + checkpoint CCB. + +@Input psDevInfo pointer to device node + +@Return None +******************************************************************************/ +void RGXCheckCheckpointCCB(PVRSRV_DEVICE_NODE *psDevInfo); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + +/*! +******************************************************************************* +@Function RGXCheckFirmwareCCB + +@Description Processes all commands that are found in the Firmware CCB. + +@Input psDevInfo pointer to device + +@Return None +******************************************************************************/ +void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXCheckForStalledClientContexts + +@Description Checks all client contexts, for the device with device info + provided, to see if any are waiting for a fence to signal and + optionally force signalling of the fence for the context which + has been waiting the longest. + This function is called by RGXUpdateHealthStatus() and also + may be invoked from other trigger points. + +@Input psDevInfo pointer to device info +@Input bIgnorePrevious If IMG_TRUE, any stalled contexts will be + indicated immediately, rather than only + checking against any previous stalled contexts + +@Return None +******************************************************************************/ +void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious); + +/*! +******************************************************************************* +@Function RGXUpdateHealthStatus + +@Description Tests a number of conditions which might indicate a fatal error + has occurred in the firmware. The result is stored in the + device node eHealthStatus. + +@Input psDevNode Pointer to device node structure. +@Input bCheckAfterTimePassed When TRUE, the function will also test + for firmware queues and polls not changing + since the previous test. + + Note: if not enough time has passed since the + last call, false positives may occur. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, + IMG_BOOL bCheckAfterTimePassed); + + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/*! +******************************************************************************* +@Function AttachKickResourcesCleanupCtls + +@Description Attaches the cleanup structures to a kick command so that + submission reference counting can be performed when the + firmware processes the command + +@Output apsCleanupCtl Array of CleanupCtl structure pointers to populate. +@Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out. +@Input eDM Which data master is the subject of the command. +@Input bKick TRUE if the client originally wanted to kick this DM. +@Input psRTDataCleanup Optional RTData cleanup associated with the command. +@Input psZBuffer Optional ZSBuffer associated with the command. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer); + +/*! +******************************************************************************* +@Function RGXResetHWRLogs + +@Description Resets the HWR Logs buffer + (the hardware recovery count is not reset) + +@Input psDevNode Pointer to the device + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode); + +/*! +******************************************************************************* +@Function RGXGetPhyAddr + +@Description Get the physical address of a PMR at an offset within it + +@Input psPMR PMR of the allocation +@Input ui32LogicalOffset Logical offset + +@Output psPhyAddr Physical address of the allocation + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid); + +#if defined(PDUMP) +/*! +******************************************************************************* +@Function RGXPdumpDrainKCCB + +@Description Wait for the firmware to execute all the commands in the kCCB + +@Input psDevInfo Pointer to the device +@Input ui32WriteOffset Woff we have to POL for the Roff to be equal to + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32WriteOffset); +#endif /* PDUMP */ + +/*! +******************************************************************************* +@Function RGXFwRawHeapAllocMap + +@Description Register and maps to device, a raw firmware physheap + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT64 ui64DevPSize); + +/*! +******************************************************************************* +@Function RGXFwRawHeapUnmapFree + +@Description Unregister and unmap from device, a raw firmware physheap +******************************************************************************/ +void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID); + +#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ) +#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers." +#endif + +#if defined(SUPPORT_AUTOVZ_HW_REGS) +/* AutoVz with hw support */ +#define KM_GET_FW_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3) +#define KM_GET_OS_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2) +#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val) + +#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1) +#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0) +#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val) + +#else + +#if defined(SUPPORT_AUTOVZ) +#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken) +#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken) +#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken = val +#endif /* defined(SUPPORT_AUTOVZ) */ + +#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1))) +/* native, static-vz and AutoVz using shared memory */ +#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState) +#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState) +#define KM_SET_OS_CONNECTION(val, psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState = RGXFW_CONNECTION_OS_##val) +#else +/* dynamic-vz & nohw */ +#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE) +#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE) +#define KM_SET_OS_CONNECTION(val, psDevInfo) +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */ +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ + +#if defined(SUPPORT_AUTOVZ) +#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS +#else +#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START +#endif + +#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val) +#define KM_FW_CONNECTION_IS(val, psDevInfo) (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val) + +#endif /* RGXFWUTILS_H */ +/****************************************************************************** + End of file (rgxfwutils.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.c new file mode 100644 index 000000000000..68b455da6588 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.c @@ -0,0 +1,3920 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX HW Performance implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON + +#include "img_defs.h" +#include "pvr_debug.h" +#include "rgxdevice.h" +#include "pvrsrv_error.h" +#include "pvr_notifier.h" +#include "osfunc.h" +#include "allocmem.h" + +#include "pvrsrv.h" +#include "pvrsrv_tlstreams.h" +#include "pvrsrv_tlcommon.h" +#include "tlclient.h" +#include "tlstream.h" + +#include "rgxhwperf.h" +#include "rgxapi_km.h" +#include "rgxfwutils.h" +#include "rgxtimecorr.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pdump_km.h" +#include "pvrsrv_apphint.h" +#include "process_stats.h" +#include "rgx_hwperf_table.h" +#include "rgxinit.h" + +#include "info_page_defs.h" + +/* This is defined by default to enable producer callbacks. + * Clients of the TL interface can disable the use of the callback + * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */ +#define SUPPORT_TL_PRODUCER_CALLBACK 1 + +/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ +#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) + +/* Defines size of buffers returned from acquire/release calls */ +#define FW_STREAM_BUFFER_SIZE (0x80000) +#define HOST_STREAM_BUFFER_SIZE (0x20000) + +/* Must be at least as large as two tl packets of maximum size */ +static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), + "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); +static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), + "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); + +static inline IMG_UINT32 +RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, + IMG_UINT32 ui32AllowedSize, + RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) +{ + IMG_UINT32 sizeSum = 0; + + /* Traverse the array to find how many packets will fit in the available space. */ + while ( sizeSum < ui32BytesExp && + sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) + { + sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); + psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); + } + + return sizeSum; +} + +static inline void +RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, + IMG_BOOL bIsReaderConnected) +{ + if (!bIsReaderConnected) + { + PVR_DPF((PVR_DBG_ERROR, "%s : HWPerf FW events enabled but host buffer for FW events is full " + "and no reader is currently connected, suspending event collection. " + "Restart driver and connect a reader to avoid event loss.", __func__)); + psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE; + } +} + +/* + RGXHWPerfCopyDataL1toL2 + */ +static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo, + IMG_BYTE *pbFwBuffer, + IMG_UINT32 ui32BytesExp) +{ + IMG_HANDLE hHWPerfStream = psDeviceInfo->hHWPerfStream; + IMG_BYTE * pbL2Buffer; + IMG_UINT32 ui32L2BufFree; + IMG_UINT32 ui32BytesCopied = 0; + IMG_UINT32 ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer)); + PVRSRV_ERROR eError; + IMG_BOOL bIsReaderConnected; + + /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */ +#ifdef HWPERF_MISR_FUNC_DEBUG + static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX; +#endif + + PVR_DPF_ENTERED; + +#ifdef HWPERF_MISR_FUNC_DEBUG + PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d", + pbFwBuffer, ui32BytesExp)); +#endif + +#ifdef HWPERF_MISR_FUNC_DEBUG + { + /* Check the incoming buffer of data has not lost any packets */ + IMG_BYTE *pbFwBufferIter = pbFwBuffer; + IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp; + do + { + RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter); + IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal; + if (gui32Ordinal != IMG_UINT32_MAX) + { + if ((gui32Ordinal+1) != ui32CurOrdinal) + { + if (gui32Ordinal < ui32CurOrdinal) + { + PVR_DPF((PVR_DBG_WARNING, + "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u", + pbFwBufferIter, + ui32CurOrdinal - gui32Ordinal - 1, + gui32Ordinal, + ui32CurOrdinal)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u", + pbFwBufferIter, + gui32Ordinal, + ui32CurOrdinal)); + } + } + } + gui32Ordinal = asCurPos->ui32Ordinal; + pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos); + } while (pbFwBufferIter < pbFwBufferEnd); + } +#endif + + if (ui32BytesExp > psDeviceInfo->ui32L2BufMaxPacketSize) + { + IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, + psDeviceInfo->ui32L2BufMaxPacketSize, + RGX_HWPERF_GET_PACKET(pbFwBuffer)); + + if (0 != sizeSum) + { + ui32BytesExp = sizeSum; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as " + "packet is too big and hence it breaches TL " + "packet size limit (TLBufferSize / 2.5)")); + goto e0; + } + } + + /* Try submitting all data in one TL packet. */ + eError = TLStreamReserve2(hHWPerfStream, + &pbL2Buffer, + (size_t)ui32BytesExp, ui32BytesExpMin, + &ui32L2BufFree, &bIsReaderConnected); + if ( eError == PVRSRV_OK ) + { + OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp ); + eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp); + if ( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR, + "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", + eError, __func__)); + goto e0; + } + /* Data were successfully written */ + ui32BytesCopied = ui32BytesExp; + } + else if (eError == PVRSRV_ERROR_STREAM_FULL) + { + /* There was not enough space for all data, copy as much as possible */ + IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer)); + + PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree)); + + if ( 0 != sizeSum ) + { + eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum); + + if ( eError == PVRSRV_OK ) + { + OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum ); + eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum); + if ( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR, + "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", + eError, __func__)); + goto e0; + } + /* sizeSum bytes of hwperf packets have been successfully written */ + ui32BytesCopied = sizeSum; + } + else if ( PVRSRV_ERROR_STREAM_FULL == eError ) + { + PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); + } + } + if ( PVRSRV_OK != eError && /* Some other error occurred */ + PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller */ + { + PVR_DPF((PVR_DBG_ERROR, + "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.", + eError)); + } + +e0: + /* Return the remaining packets left to be transported. */ + PVR_DPF_RETURN_VAL(ui32BytesCopied); +} + + +static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx( + const IMG_UINT32 ui32BufSize, + const IMG_UINT32 ui32Pos, + const IMG_UINT32 ui32Size) +{ + return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 ); +} + + +/* + RGXHWPerfDataStore + */ +static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf; + IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount; + IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0; +#ifdef HWPERF_MISR_FUNC_DEBUG + IMG_UINT32 ui32BytesExpSum = 0; +#endif + + PVR_DPF_ENTERED; + + /* Caller should check this member is valid before calling */ + PVR_ASSERT(psDevInfo->hHWPerfStream); + + if (psDevInfo->bSuspendHWPerfL2DataCopy) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s : Copying data to host buffer for FW events is " + "suspended. Restart driver if HWPerf FW events are " + "needed", __func__)); + + PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); + } + + /* Get a copy of the current + * read (first packet to read) + * write (empty location for the next write to be inserted) + * WrapCount (size in bytes of the buffer at or past end) + * indexes of the FW buffer */ + ui32SrcRIdx = psFwSysData->ui32HWPerfRIdx; + ui32SrcWIdx = psFwSysData->ui32HWPerfWIdx; + OSMemoryBarrier(); + ui32SrcWrapCount = psFwSysData->ui32HWPerfWrapCount; + + /* Is there any data in the buffer not yet retrieved? */ + if ( ui32SrcRIdx != ui32SrcWIdx ) + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx)); + + /* Is the write position higher than the read position? */ + if ( ui32SrcWIdx > ui32SrcRIdx ) + { + /* Yes, buffer has not wrapped */ + ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx; +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo + ui32SrcRIdx, + ui32BytesExp); + + /* Advance the read index and the free bytes counter by the number + * of bytes transported. Items will be left in buffer if not all data + * could be transported. Exit to allow buffer to drain. */ + psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + ui32BytesCopiedSum += ui32BytesCopied; + } + /* No, buffer has wrapped and write position is behind read position */ + else + { + /* Byte count equal to + * number of bytes from read position to the end of the buffer, + * + data in the extra space in the end of the buffer. */ + ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx; + +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + /* Attempt to transfer the packets to the TL stream buffer */ + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo + ui32SrcRIdx, + ui32BytesExp); + + /* Advance read index as before and Update the local copy of the + * read index as it might be used in the last if branch*/ + ui32SrcRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + /* Update Wrap Count */ + if ( ui32SrcRIdx == 0) + { + psFwSysData->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize; + } + psFwSysData->ui32HWPerfRIdx = ui32SrcRIdx; + + ui32BytesCopiedSum += ui32BytesCopied; + + /* If all the data in the end of the array was copied, try copying + * wrapped data in the beginning of the array, assuming there is + * any and the RIdx was wrapped. */ + if ( (ui32BytesCopied == ui32BytesExp) + && (ui32SrcWIdx > 0) + && (ui32SrcRIdx == 0) ) + { + ui32BytesExp = ui32SrcWIdx; +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo, + ui32BytesExp); + /* Advance the FW buffer read position. */ + psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + ui32BytesCopiedSum += ui32BytesCopied; + } + } +#ifdef HWPERF_MISR_FUNC_DEBUG + if (ui32BytesCopiedSum != ui32BytesExpSum) + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum)); + } +#endif + + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport")); + } + + PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); +} + + +PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + IMG_UINT32 ui32BytesCopied; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDevInfo); + psRgxDevInfo = psDevInfo->pvDevice; + + /* Store FW event data if the destination buffer exists.*/ + if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + { + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo); + if ( ui32BytesCopied ) + { /* Signal consumers that packets may be available to read when + * running from a HW kick, not when called by client APP thread + * via the transport layer CB as this can lead to stream + * corruption.*/ + eError = TLStreamSync(psRgxDevInfo->hHWPerfStream); + PVR_ASSERT(eError == PVRSRV_OK); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied")); + RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo); + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + } + + + PVR_DPF_RETURN_OK; +} + + +/* Currently supported by default */ +#if defined(SUPPORT_TL_PRODUCER_CALLBACK) +static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream, + IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser; + + PVR_UNREFERENCED_PARAMETER(hStream); + PVR_UNREFERENCED_PARAMETER(ui32Resp); + + PVR_ASSERT(psRgxDevInfo); + + switch (ui32ReqOp) + { + case TL_SOURCECB_OP_CLIENT_EOS: + /* Keep HWPerf resource init check and use of + * resources atomic, they may not be freed during use + */ + + /* This solution is for avoiding a deadlock situation where - + * in DoTLStreamReserve(), writer has acquired HWPerfLock and + * ReadLock and is waiting on ReadPending (which will be reset + * by reader), And + * the reader after setting ReadPending in TLStreamAcquireReadPos(), + * is waiting for HWPerfLock in RGXHWPerfTLCB(). + * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we + * will return to the reader without waiting to acquire HWPerfLock. + */ + if (!OSTryLockAcquire(psRgxDevInfo->hHWPerfLock)) + { + PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write " + "operation might already be in process")); + return PVRSRV_OK; + } + + if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + { + (void) RGXHWPerfDataStore(psRgxDevInfo); + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + break; + + default: + break; + } + + return eError; +} +#endif + + +static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc) + { + if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL) + { + DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL; + } + DevmemFwUnmapAndFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; + } +} + +/*************************************************************************/ /*! +@Function RGXHWPerfInit + +@Description Called during driver init for initialization of HWPerf module + in the Rogue device driver. This function keeps allocated + only the minimal necessary resources, which are required for + functioning of HWPerf server module. + +@Input psRgxDevInfo RGX Device Info + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_DPF_ENTERED; + + /* expecting a valid device info */ + PVR_ASSERT(psRgxDevInfo); + + /* Create a lock for HWPerf server module used for serializing, L1 to L2 + * copy calls (e.g. in case of TL producer callback) and L1, L2 resource + * allocation */ + eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + /* avoid uninitialised data */ + psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL; + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; + + PVR_DPF_RETURN_OK; +} + +/*************************************************************************/ /*! +@Function RGXHWPerfIsInitRequired + +@Description Returns true if the HWperf firmware buffer (L1 buffer) and host + driver TL buffer (L2 buffer) are not already allocated. Caller + must possess hHWPerfLock lock before calling this + function so the state tested is not inconsistent. + +@Input psRgxDevInfo RGX Device Info, on which init requirement is + checked. + +@Return IMG_BOOL Whether initialization (allocation) is required + */ /**************************************************************************/ +static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock)); + +#if !defined(NO_HARDWARE) + /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver + * built for actual hardware (TC, EMU, etc.) + */ + if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL) + { + /* The allocation API (RGXHWPerfInitOnDemandResources) allocates + * device memory for both L1 and L2 without any checks. Hence, + * either both should be allocated or both be NULL. + * + * In-case this changes in future (for e.g. a situation where one + * of the 2 buffers is already allocated and other is required), + * add required checks before allocation calls to avoid memory leaks. + */ + PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL); + return IMG_TRUE; + } + PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL); +#else + /* On a NO-HW driver L2 is not allocated. So, no point in checking its + * allocation */ + if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL) + { + return IMG_TRUE; + } +#endif + return IMG_FALSE; +} +#if !defined(NO_HARDWARE) +static void _HWPerfFWOnReaderOpenCB(void *pvArg) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; + PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; + sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV; + sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = 0; + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevNode->pvDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in " + "firmware (error = %d)", __func__, eError)); + return; + } + + eError = RGXWaitForKCCBSlotUpdate(psRgxDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); +} +#endif +/*************************************************************************/ /*! +@Function RGXHWPerfInitOnDemandResources + +@Description This function allocates the HWperf firmware buffer (L1 buffer) + and host driver TL buffer (L2 buffer) if HWPerf is enabled at + driver load time. Otherwise, these buffers are allocated + on-demand as and when required. Caller + must possess hHWPerfLock lock before calling this + function so the state tested is not inconsistent if called + outside of driver initialisation. + +@Input psRgxDevInfo RGX Device Info, on which init is done + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) +{ + IMG_HANDLE hStream = NULL; /* Init required for noHW */ + PVRSRV_ERROR eError; + IMG_UINT32 ui32L2BufferSize = 0; + DEVMEM_FLAGS_T uiMemAllocFlags; + IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold + names up to "hwperf_9999", which is enough */ + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + /* Create the L1 HWPerf buffer on demand */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) + | PVRSRV_MEMALLOCFLAG_GPU_READABLE + | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE + | PVRSRV_MEMALLOCFLAG_CPU_READABLE + | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE + | PVRSRV_MEMALLOCFLAG_UNCACHED +#if defined(PDUMP) /* Helps show where the packet data ends */ + | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC +#else /* Helps show corruption issues in driver-live */ + | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC +#endif + ; + + /* Allocate HWPerf FW L1 buffer */ + eError = DevmemFwAllocate(psRgxDevInfo, + /* Pad it enough to hold the biggest variable sized packet. */ + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGX_HWPERF_MAX_PACKET_SIZE, + uiMemAllocFlags, + "FwHWPerfBuffer", + &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate kernel fw hwperf buffer (%u)", + __func__, eError)); + goto e0; + } + + /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory. + * Also, make sure the FW address is not already set */ + PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0); + + /* Meta cached flag removed from this allocation as it was found + * FW performance was better without it. */ + eError = RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e0); + + eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire kernel hwperf buffer (%u)", + __func__, eError)); + goto e0; + } + + /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence, + * L2 buffer is not allocated */ +#if !defined(NO_HARDWARE) + /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer + * accessed by the FW. The MISR may try to write one packet the size of the L1 + * buffer in some scenarios. When logging is enabled in the MISR, it can be seen + * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers + * are the more chance of this happening. + * Size chosen to allow MISR to write an L1 sized packet and for the client + * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. + */ + ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize + + (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1); + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = TLStreamCreate(&hStream, + psRgxDevInfo->psDeviceNode, + pszHWPerfStreamName, + ui32L2BufferSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, + _HWPerfFWOnReaderOpenCB, psRgxDevInfo, +#if !defined(SUPPORT_TL_PRODUCER_CALLBACK) + NULL, NULL +#else + /* Not enabled by default */ + RGXHWPerfTLCB, psRgxDevInfo +#endif + ); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1); + + eError = TLStreamSetNotifStream(hStream, + PVRSRVGetPVRSRVData()->hTLCtrlStream); + /* we can still discover host stream so leave it as is and just log error */ + PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); + + /* send the event here because host stream is implicitly opened for write + * in TLStreamCreate and TLStreamOpen is never called (so the event is + * never emitted) */ + TLStreamMarkStreamOpen(hStream); + + { + TL_STREAM_INFO sTLStreamInfo; + + TLStreamInfo(hStream, &sTLStreamInfo); + psRgxDevInfo->ui32L2BufMaxPacketSize = sTLStreamInfo.maxTLpacketSize; + + psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; + } + + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d", + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize)); + +#else /* defined(NO_HARDWARE) */ + PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize); + PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB); + PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName); + ui32L2BufferSize = 0; +#endif + + psRgxDevInfo->hHWPerfStream = hStream; + PVR_DPF_RETURN_OK; + +#if !defined(NO_HARDWARE) + e1: /* L2 buffer initialisation failures */ + psRgxDevInfo->hHWPerfStream = NULL; +#endif + e0: /* L1 buffer initialisation failures */ + RGXHWPerfL1BufferDeinit(psRgxDevInfo); + + PVR_DPF_RETURN_RC(eError); +} + + +void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_HANDLE hStream = psRgxDevInfo->hHWPerfStream; + + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + psRgxDevInfo->hHWPerfStream = NULL; + + /* Clean up the L2 buffer stream object if allocated */ + if (hStream) + { + /* send the event here because host stream is implicitly opened for + * write in TLStreamCreate and TLStreamClose is never called (so the + * event is never emitted) */ + TLStreamMarkStreamClose(hStream); + TLStreamClose(hStream); + } + + /* Cleanup L1 buffer resources */ + RGXHWPerfL1BufferDeinit(psRgxDevInfo); + + /* Cleanup the HWPerf server module lock resource */ + if (psRgxDevInfo->hHWPerfLock) + { + OSLockDestroy(psRgxDevInfo->hHWPerfLock); + psRgxDevInfo->hHWPerfLock = NULL; + } + + PVR_DPF_RETURN; +} + + +/****************************************************************************** + * RGX HW Performance Profiling Server API(s) + *****************************************************************************/ + +static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + /* If this method is being used whether to enable or disable + * then the hwperf buffers (host and FW) are likely to be needed + * eventually so create them, also helps unit testing. Buffers + * allocated on demand to reduce RAM foot print on systems not + * needing HWPerf resources. + * Obtain lock first, test and init if required. */ + OSLockAcquire(psDevice->hHWPerfLock); + + if (!psDevice->bFirmwareInitialised) + { + psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter + eError = PVRSRV_ERROR_NOT_INITIALISED; + + PVR_DPF((PVR_DBG_ERROR, + "HWPerf has NOT been initialised yet. Mask has been SET to " + "(%" IMG_UINT64_FMTSPECx ")", + ui64Mask)); + + goto unlock_and_return; + } + + if (RGXHWPerfIsInitRequired(psDevice)) + { + eError = RGXHWPerfInitOnDemandResources(psDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " + "resources failed", __func__)); + goto unlock_and_return; + } + } + + /* Unlock here as no further HWPerf resources are used below that would be + * affected if freed by another thread */ + OSLockRelease(psDevice->hHWPerfLock); + + /* Return if the filter is the same */ + if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask) + goto return_; + + /* Prepare command parameters ... */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; + sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET; + sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask; + + /* Ask the FW to carry out the HWPerf configuration command */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in " + "firmware (error = %d)", __func__, eError)); + goto return_; + } + + psDevice->ui64HWPerfFilter = bToggle ? + psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask; + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED", + ui64Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", + ui64Mask)); + } +#endif + + return PVRSRV_OK; + +unlock_and_return: + OSLockRelease(psDevice->hHWPerfLock); + +return_: + return eError; +} + +#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800 + +static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bToggle, + IMG_UINT32 ui32Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter; +#endif + + OSLockAcquire(psDevice->hLockHWPerfHostStream); + if (psDevice->hHWPerfHostStream == NULL) + { + eError = RGXHWPerfHostInitOnDemandResources(psDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfHost resources failed", + __func__)); + OSLockRelease(psDevice->hLockHWPerfHostStream); + return eError; + } + } + + psDevice->ui32HWPerfHostFilter = bToggle ? + psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask; + + // Deferred creation of host periodic events thread + if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) + { + eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); + PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); + } + else if (!(psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))) + { + eError = PVRSRVDestroyHWPerfHostThread(); + PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread"); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + // Log deferred events stats if filter changed from non-zero to zero + if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0)) + { + PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)", + psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS)); + + PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) " + "WaitForRightOrdPktHighWatermark(%u)", + psDevice->ui32WaitForAtomicCtxPktHighWatermark, + psDevice->ui32WaitForRightOrdPktHighWatermark)); + } +#endif + + OSLockRelease(psDevice->hLockHWPerfHostStream); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED", + ui32Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)", + ui32Mask)); + } +#endif + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle, + IMG_UINT32 ui32InfoPageIdx, + IMG_UINT32 ui32Mask) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + + PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START && + ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info" + " page index", PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquire(psData->hInfoPageLock); + psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ? + psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask; + OSLockRelease(psData->hInfoPageLock); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED", + ui32InfoPageIdx, ui32Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)", + ui32InfoPageIdx, ui32Mask)); + } +#endif + + return PVRSRV_OK; +} + +static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock( + RGX_HWPERF_BVNC_BLOCK * const psBlocks, + IMG_UINT16 * const pui16Count, + const IMG_UINT16 ui16BlockID, /* see RGX_HWPERF_CNTBLK_ID */ + const IMG_UINT16 ui16NumCounters, + const IMG_UINT16 ui16NumBlocks) +{ + const IMG_UINT16 ui16Count = *pui16Count; + + if (ui16Count < RGX_HWPERF_MAX_BVNC_BLOCK_LEN) + { + RGX_HWPERF_BVNC_BLOCK * const psBlock = &psBlocks[ui16Count]; + + /* If the GROUP is non-zero, convert from e.g. RGX_CNTBLK_ID_USC0 to RGX_CNTBLK_ID_USC_ALL. The table stores the former (plus the + number of blocks and counters) but PVRScopeServices expects the latter (plus the number of blocks and counters). The conversion + could always be moved to PVRScopeServices, but it's less code this way. */ + psBlock->ui16BlockID = (ui16BlockID & RGX_CNTBLK_ID_GROUP_MASK) ? (ui16BlockID | RGX_CNTBLK_ID_UNIT_ALL_MASK) : ui16BlockID; + psBlock->ui16NumCounters = ui16NumCounters; + psBlock->ui16NumBlocks = ui16NumBlocks; + + *pui16Count = ui16Count + 1; + return IMG_TRUE; + } + return IMG_FALSE; +} + +PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC) +{ + IMG_PCHAR pszBVNC; + PVR_LOG_RETURN_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS); + + if ((pszBVNC = RGXDevBVNCString(psDevInfo))) + { + size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1); + OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); + memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength); + } + else + { + *psBVNC->aszBvncString = 0; + } + + psBVNC->ui32BvncKmFeatureFlags = 0x0; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG; + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG; + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG; + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERF_COUNTER_BATCH)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG; + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_ROGUEXE_FLAG; + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, DUST_POWER_ISLAND_S7)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG; + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG; + } + +#ifdef SUPPORT_WORKLOAD_ESTIMATION + /* Not a part of BVNC feature line and so doesn't need the feature supported check */ + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; +#endif + + /* Define the HW counter block counts. */ + { + RGX_HWPERF_BVNC_BLOCK * const psBlocks = psBVNC->aBvncBlocks; + IMG_UINT16 * const pui16Count = &psBVNC->ui16BvncBlocks; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; + const IMG_UINT32 ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); + IMG_UINT32 ui32BlkCfgIdx; + size_t uiCount; + IMG_BOOL bOk = IMG_TRUE; + + // Initialise to zero blocks + *pui16Count = 0; + + // Add all the blocks + for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) + { + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL * const psCntBlkInfo = &asCntBlkTypeModel[ui32BlkCfgIdx]; + RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; + /* psCntBlkInfo->ui8NumUnits gives compile-time info. For BVNC agnosticism, we use this: */ + if (psCntBlkInfo->pfnIsBlkPresent(psCntBlkInfo, psDevInfo, &sCntBlkRtInfo)) + { + bOk &= RGXServerFeatureFlagsToHWPerfFlagsAddBlock(psBlocks, pui16Count, psCntBlkInfo->ui32CntBlkIdBase, psCntBlkInfo->ui8NumCounters, sCntBlkRtInfo.ui32NumUnits); + } + } + + /* If this fails, consider why the static_assert didn't fail, and consider increasing RGX_HWPERF_MAX_BVNC_BLOCK_LEN */ + PVR_ASSERT(bOk); + + // Zero the remaining entries + uiCount = *pui16Count; + OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks)); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_BVNC *psBVNC) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_FALSE((NULL != psDeviceNode), "psConnection invalid", PVRSRV_ERROR_INVALID_PARAMS); + + psDevInfo = psDeviceNode->pvDevice; + eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, psBVNC); + + return eError; +} + +/* + PVRSRVRGXCtrlHWPerfKM + */ +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + PVR_ASSERT(psDeviceNode); + + if (eStreamId == RGX_HWPERF_STREAM_ID0_FW) + { + return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask); + } + else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST) + { + return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask); + } + else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT) + { + IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32); + IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask; + + return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id.")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_DPF_RETURN_OK; +} + +/* + AppHint interfaces + */ +static +PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT64 ui64Value) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + psDevNode = psPVRSRVData->psDeviceNodeList; + /* Control HWPerf on all the devices */ + while (psDevNode) + { + eError = RGXHWPerfCtrlFwBuffer(psDevNode, IMG_FALSE, ui64Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier)); + return eError; + } + psDevNode = psDevNode->psNext; + } + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT64 *pui64Value) +{ + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Configuration command is applied for all devices, so filter value should + * be same for all */ + psDevice = psDeviceNode->pvDevice; + *pui64Value = psDevice->ui64HWPerfFilter; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + psDevNode = psPVRSRVData->psDeviceNodeList; + /* Control HWPerf on all the devices */ + while (psDevNode) + { + eError = RGXHWPerfCtrlHostBuffer(psDevNode, IMG_FALSE, ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier)); + return eError; + } + psDevNode = psDevNode->psNext; + } + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevice = psDeviceNode->pvDevice; + *pui32Value = psDevice->ui32HWPerfHostFilter; + return PVRSRV_OK; +} + +static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivData, + IMG_UINT32 *pui32Value) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; + PVR_UNREFERENCED_PARAMETER(psDevice); + + OSLockAcquire(psData->hInfoPageLock); + *pui32Value = psData->pui32InfoPage[ui32Idx]; + OSLockRelease(psData->hInfoPageLock); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivData, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; + PVR_UNREFERENCED_PARAMETER(psDevice); + + return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value); +} + +void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter, + RGXHWPerfReadFwFilter, + RGXHWPerfSetFwFilter, + psDeviceNode, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter, + RGXHWPerfReadHostFilter, + RGXHWPerfSetHostFilter, + psDeviceNode, + NULL); +} + +void RGXHWPerfClientInitAppHintCallbacks(void) +{ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_SERVICES_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_EGL_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_OPENGLES_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_OPENCL_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_VULKAN_IDX); +} + +/* + PVRSRVRGXEnableHWPerfCountersKM + */ +PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32ArrayLen, + RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + DEVMEM_MEMDESC* psFwBlkConfigsMemDesc; + RGX_HWPERF_CONFIG_CNTBLK* psFwArray; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL", + PVRSRV_ERROR_INVALID_PARAMS); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDeviceNode); + + /* Fill in the command structure with the parameters needed + */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS; + sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen; + + eError = DevmemFwAllocate(psDevice, + sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwHWPerfCountersConfigBlock", + &psFwBlkConfigsMemDesc); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); + + eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs, + psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); + + eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); + + OSDeviceMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen); + DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, + 0, + sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, + PDUMP_FLAGS_CONTINUOUS); + + /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM parameters set, calling FW"));*/ + + /* Ask the FW to carry out the HWPerf configuration command + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); + + /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM command scheduled for FW"));*/ + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); + + /* Release temporary memory used for block configuration + */ + RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); + DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); + DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); + + /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM firmware completed"));*/ + + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); + + PVR_DPF_RETURN_OK; + +fail3: + DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); +fail2: + RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); +fail1: + DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); + + PVR_DPF_RETURN_RC(eError); +} + + +/* + PVRSRVRGXConfigCustomCountersReadingHWPerfKM + */ +PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT16 ui16CustomBlockID, + IMG_UINT16 ui16NumCustomCounters, + IMG_UINT32 * pui32CustomCounterIDs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + DEVMEM_MEMDESC* psFwSelectCntrsMemDesc = NULL; + IMG_UINT32* psFwArray; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDeviceNode); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters)); + + /* Fill in the command structure with the parameters needed */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS; + sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters; + sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID; + + if (ui16NumCustomCounters > 0) + { + PVR_ASSERT(pui32CustomCounterIDs); + + eError = DevmemFwAllocate(psDevice, + sizeof(IMG_UINT32) * ui16NumCustomCounters, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwHWPerfConfigCustomCounters", + &psFwSelectCntrsMemDesc); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); + + eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs, + psFwSelectCntrsMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); + + eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); + + OSDeviceMemCopy(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters); + DevmemPDumpLoadMem(psFwSelectCntrsMemDesc, + 0, + sizeof(IMG_UINT32) * ui16NumCustomCounters, + PDUMP_FLAGS_CONTINUOUS); + } + + /* Push in the KCCB the command to configure the custom counters block */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail3); + + PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled")); + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); + + PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed")); + + if (ui16NumCustomCounters > 0) + { + /* Release temporary memory used for block configuration */ + RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc); + DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc); + DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc); + } + + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters)); + + PVR_DPF_RETURN_OK; + +fail3: + if (psFwSelectCntrsMemDesc) + { + DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc); + } +fail2: + if (psFwSelectCntrsMemDesc) + { + RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc); + } +fail1: + if (psFwSelectCntrsMemDesc) + { + DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc); + } + + PVR_DPF_RETURN_RC(eError); +} +/* + PVRSRVRGXDisableHWPerfcountersKM + */ +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_BOOL bEnable, + IMG_UINT32 ui32ArrayLen, + IMG_UINT16 * psBlockIDs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBlockIDs != NULL, "psBlockIDs"); + PVR_LOG_RETURN_IF_INVALID_PARAM((ui32ArrayLen>0) && (ui32ArrayLen <= RGXFWIF_HWPERF_CTRL_BLKS_MAX), "ui32ArrayLen"); + + PVR_ASSERT(psDeviceNode); + psDevice = psDeviceNode->pvDevice; + + /* Fill in the command structure with the parameters needed + */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS; + sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable; + sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen; + + OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16) * ui32ArrayLen); + + /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM parameters set, calling FW")); */ + + /* Ask the FW to carry out the HWPerf configuration command + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); + + /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM command scheduled for FW")); */ + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + + /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM firmware completed")); */ + +#if defined(DEBUG) + if (bEnable) + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen)); + else + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen)); +#endif + + PVR_DPF_RETURN_OK; +} + +static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB) +{ + if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX) + { + /* Size specified as a AppHint but it is too big */ + PVR_DPF((PVR_DBG_WARNING, + "RGXHWPerfHostInit: HWPerf Host buffer size " + "value (%u) too big, using maximum (%u)", + ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MAX)); + return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10; + } + else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN) + { + return ui32BufSizeKB<<10; + } + else if (ui32BufSizeKB > 0) + { + /* Size specified as a AppHint but it is too small */ + PVR_DPF((PVR_DBG_WARNING, + "RGXHWPerfHostInit: HWPerf Host buffer size " + "value (%u) too small, using minimum (%u)", + ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MIN)); + return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10; + } + else + { + /* 0 size implies AppHint not set or is set to zero, + * use default size from driver constant. */ + return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10; + } +} + +/****************************************************************************** + * RGX HW Performance Host Stream API + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function RGXHWPerfHostInit + +@Description Called during driver init for initialisation of HWPerfHost + stream in the Rogue device driver. This function keeps allocated + only the minimal necessary resources, which are required for + functioning of HWPerf server module. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_ASSERT(psRgxDevInfo != NULL); + + eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error); + + psRgxDevInfo->hHWPerfHostStream = NULL; + psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */ + psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1; + psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + psRgxDevInfo->pui8DeferredEvents = NULL; + /* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic + * is maintained */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0; + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + +error: + return eError; +} + +static void _HWPerfHostOnConnectCB(void *pvArg) +{ + PVRSRV_RGXDEV_INFO* psDevice; + PVRSRV_ERROR eError; + + RGXSRV_HWPERF_CLK_SYNC(pvArg); + + psDevice = (PVRSRV_RGXDEV_INFO*) pvArg; + + /* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter + * before the host stream is opened for reading by a HWPerf client. + * Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */ + if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) + { + eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); + PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); + } +} + +/* Avoiding a holder struct using fields below, as a struct gets along padding, + * packing, and other compiler dependencies, and we want a continuous stream of + * bytes for (header+data) for use in TLStreamWrite. See + * _HWPerfHostDeferredEventsEmitter(). + * + * A deferred (UFO) packet is represented in memory as: + * - IMG_BOOL --> Indicates whether a packet write is + * "complete" by atomic context or not. + * - RGX_HWPERF_V2_PACKET_HDR --. + * |--> Fed together to TLStreamWrite for + * | deferred packet to be written to + * | HWPerfHost buffer + * - RGX_HWPERF_HOST_UFO_DATA---` + * + * PS: Currently only UFO events are supported in deferred list */ +#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\ + sizeof(RGX_HWPERF_V2_PACKET_HDR) +\ + sizeof(RGX_HWPERF_HOST_UFO_DATA)) + +static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData); +static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32MaxOrdinal); + +/*************************************************************************/ /*! +@Function RGXHWPerfHostInitOnDemandResources + +@Description This function allocates the HWPerfHost buffer if HWPerf is + enabled at driver load time. Otherwise, these buffers are + allocated on-demand as and when required. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError; + IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; /* 5 makes space up to "hwperf_host_9999" streams */ + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", + PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf host stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream, + psRgxDevInfo->psDeviceNode, + pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize, + TL_OPMODE_DROP_NEWER, + _HWPerfHostOnConnectCB, psRgxDevInfo, + NULL, NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); + + eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream, + PVRSRVGetPVRSRVData()->hTLCtrlStream); + /* we can still discover host stream so leave it as is and just log error */ + PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); + + /* send the event here because host stream is implicitly opened for write + * in TLStreamCreate and TLStreamOpen is never called (so the event is + * never emitted) */ + eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream); + PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen"); + + /* HWPerfHost deferred events specific initialization */ + eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR, + RGX_MISRHandler_HWPerfPostDeferredHostEvents, + psRgxDevInfo, + "RGX_HWPerfDeferredEventPoster"); + PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR", err_install_misr); + + eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create); + + psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS + * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE); + if (NULL == psRgxDevInfo->pui8DeferredEvents) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for " + "HWPerfHost deferred events array", __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_alloc_deferred_events; + } + psRgxDevInfo->ui16DEReadIdx = 0; + psRgxDevInfo->ui16DEWriteIdx = 0; +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + psRgxDevInfo->ui32DEHighWatermark = 0; + psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0; + psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0; +#endif + + PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB", + psRgxDevInfo->ui32HWPerfHostBufSize)); + + return PVRSRV_OK; + +err_alloc_deferred_events: + OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + +err_spinlock_create: + (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + +err_install_misr: + TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); + TLStreamClose(psRgxDevInfo->hHWPerfHostStream); + psRgxDevInfo->hHWPerfHostStream = NULL; + + return eError; +} + +void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_ASSERT (psRgxDevInfo); + + if (psRgxDevInfo->pui8DeferredEvents) + { + OSFreeMem(psRgxDevInfo->pui8DeferredEvents); + psRgxDevInfo->pui8DeferredEvents = NULL; + } + + if (psRgxDevInfo->hHWPerfHostSpinLock) + { + OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + } + + if (psRgxDevInfo->pvHostHWPerfMISR) + { + (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + } + + if (psRgxDevInfo->hHWPerfHostStream) + { + /* send the event here because host stream is implicitly opened for + * write in TLStreamCreate and TLStreamClose is never called (so the + * event is never emitted) */ + TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); + TLStreamClose(psRgxDevInfo->hHWPerfHostStream); + psRgxDevInfo->hHWPerfHostStream = NULL; + } + + if (psRgxDevInfo->hLockHWPerfHostStream) + { + OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream); + psRgxDevInfo->hLockHWPerfHostStream = NULL; + } +} + +inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter) +{ + PVRSRV_VZ_RETN_IF_MODE(GUEST); + psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter; +} + +inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent) +{ + PVR_ASSERT(psRgxDevInfo); + return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE; +} + +#define MAX_RETRY_COUNT 80 +static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32CurrentOrdinal) +{ + IMG_UINT32 ui32Retry = MAX_RETRY_COUNT; + + PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL); + PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL); + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + + /* First, flush pending events (if any) */ + _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal); + + while ((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1) + && (--ui32Retry != 0)) + { + /* Release lock and give a chance to a waiting context to emit the + * expected packet */ + OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream); + OSSleepms(100); + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Will warn only once! Potential packet(s) lost after ordinal" + " %u (Current ordinal = %u)", + __func__, + psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal)); + psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE; + } + + if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) + { + psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; + } +#endif +} + +static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32CurrentOrdinal) +{ + /* update last ordinal emitted */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal; + + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); +} + +static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) +{ + IMG_UINT8 *pui8Dest; + + PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream, + &pui8Dest, ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer" + " (%d). Dropping packet.", + __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + return NULL; + } + PVR_ASSERT(pui8Dest != NULL); + + return pui8Dest; +} + +static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream, + ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s" + " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + } +} + +/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */ +static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_V2_PACKET_HDR *psHeader) +{ + PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream, + IMG_OFFSET_ADDR(psHeader, 0), psHeader->ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer" + " (%d). Dropping packet.", + __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + } + + /* Regardless of whether write passed/failed, we consider it "written" */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal; + + return (eError == PVRSRV_OK); +} + +/* Helper macros for deferred events operations */ +#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS) +#define GET_DE_EVENT_BASE(_idx) (IMG_OFFSET_ADDR(psRgxDevInfo->pui8DeferredEvents, \ + (_idx) * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE)) + +#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*)((void *) (_base))) +#define GET_DE_EVENT_DATA(_base) (IMG_OFFSET_ADDR((_base), sizeof(IMG_BOOL))) + +/* Emits HWPerfHost event packets present in the deferred list stopping when one + * of the following cases is hit: + * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering + * criteria (ordinal == last_ordinal + 1) + * + * case 2: A packet with ordinal > ui32MaxOrdinal is found + * + * case 3: Deferred list's (read == write) i.e. no more deferred packets. + * + * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling + * this function.*/ +static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32MaxOrdinal) +{ + RGX_HWPERF_V2_PACKET_HDR *psHeader; + IMG_UINT32 ui32Retry; + IMG_UINT8 *pui8DeferredEvent; + IMG_BOOL *pbPacketWritten; + IMG_BOOL bWritePassed; + + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); + + while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) + { + pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx); + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent); + psHeader = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent); + + for (ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--) + { + /* Packet not yet written, re-check after a while. Wait for a short period as + * atomic contexts are generally expected to finish fast */ + OSWaitus(10); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Will warn only once. Dropping a deferred packet as atomic context" + " took too long to write it", + __func__)); + psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE; + } + + if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) + { + psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; + } +#endif + + if (*pbPacketWritten) + { + if ((psHeader->ui32Ordinal > ui32MaxOrdinal) || + (psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1))) + { + /* Leave remaining events to be emitted by next call to this function */ + break; + } + bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__)); + bWritePassed = IMG_FALSE; + } + + /* Move on to next packet */ + psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx); + + if (!bWritePassed // if write failed + && ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR + && psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events + { + /* Stop emitting here and re-schedule MISR */ + OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); + break; + } + } +} + +static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData) +{ + PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData; + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + + /* Since we're called from MISR, there is no upper cap of ordinal to be emitted. + * Send IMG_UINT32_MAX to signify all possible packets. */ + _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX); + + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); +} + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) +static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_UINT32 ui32DEWatermark; + IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx; + IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx; + + if (ui16LWrite >= ui16LRead) + { + ui32DEWatermark = ui16LWrite - ui16LRead; + } + else + { + ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite); + } + + if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark) + { + psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark; + } +} +#endif + +/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost + buffer. Since the data returned by this function is required in both, an + atomic as well as a process/sleepable context, it is protected under spinlock + + @Output pui32Ordinal Pointer to ordinal number assigned to this packet + @Output pui64Timestamp Timestamp value for this packet + @Output ppui8Dest If the current context cannot sleep, pointer to a place in + deferred events buffer where the packet data should be written. + Don't care, otherwise. + */ +static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 *pui32Ordinal, + IMG_UINT64 *pui64Timestamp, + IMG_UINT8 **ppui8Dest, + IMG_BOOL bSleepAllowed) +{ + OS_SPINLOCK_FLAGS uiFlags; + + /* Spin lock is required to avoid getting scheduled out by a higher priority + * context while we're getting header specific details and packet place in + * HWPerf buffer (when in atomic context) for ourselves */ + OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); + + *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++; + *pui64Timestamp = RGXTimeCorrGetClockus64(); + + if (!bSleepAllowed) + { + /* We're in an atomic context. So return the next position available in + * deferred events buffer */ + IMG_UINT16 ui16NewWriteIdx; + IMG_BOOL *pbPacketWritten; + + PVR_ASSERT(ppui8Dest != NULL); + + ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx); + if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx) + { + /* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be + * big enough to avoid any such scenario */ +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + /* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do + * this debug output here when trace_printk support is added to DDK */ + // PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u", + // __func__, psRgxDevInfo->ui32DEHighWatermark, + // HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx, + // psRgxDevInfo->ui16DEReadIdx)); +#endif + *ppui8Dest = NULL; + } + else + { + /* Return the position where deferred event would be written */ + *ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx); + + /* Make sure packet write "state" is "write-pending" _before_ moving write + * pointer forward */ + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest); + *pbPacketWritten = IMG_FALSE; + + psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx; + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + _UpdateDEBufferHighWatermark(psRgxDevInfo); +#endif + } + } + + OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); +} + +static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_EVENT_TYPE eEvType, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Ordinal, + IMG_UINT64 ui64Timestamp) +{ + RGX_HWPERF_V2_PACKET_HDR *psHeader = IMG_OFFSET_ADDR(pui8Dest, 0); + + PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE); + + psHeader->ui32Ordinal = ui32Ordinal; + psHeader->ui64Timestamp = ui64Timestamp; + psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG; + psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST, + eEvType, 0, 0, 0); + psHeader->ui32Size = ui32Size; +} + +static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate) +{ + RGX_HWPERF_HOST_ENQ_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->ui32EnqType = eEnqType; + psData->ui32PID = ui32Pid; + psData->ui32ExtJobRef = ui32ExtJobRef; + psData->ui32IntJobRef = ui32IntJobRef; + psData->ui32DMContext = ui32FWDMContext; + psData->hCheckFence = hCheckFence; + psData->hUpdateFence = hUpdateFence; + psData->hUpdateTimeline = hUpdateTimeline; + psData->ui64CheckFence_UID = ui64CheckFenceUID; + psData->ui64UpdateFence_UID = ui64UpdateFenceUID; + psData->ui64DeadlineInus = ui64DeadlineInus; + psData->ui64CycleEstimate = ui64CycleEstimate; +} + +void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate ) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostEnqPacketData(pui8Dest, + eEnqType, + ui32Pid, + ui32FWDMContext, + ui32ExtJobRef, + ui32IntJobRef, + hCheckFence, + hUpdateFence, + hUpdateTimeline, + ui64CheckFenceUID, + ui64UpdateFenceUID, + ui64DeadlineInus, + ui64CycleEstimate); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType) +{ + IMG_UINT32 ui32Size = + (IMG_UINT32) offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData); + RGX_HWPERF_UFO_DATA_ELEMENT *puData; + + switch (eUfoType) + { + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + ui32Size += sizeof(puData->sCheckSuccess); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + ui32Size += sizeof(puData->sCheckFail); + break; + case RGX_HWPERF_UFO_EV_UPDATE: + ui32Size += sizeof(puData->sUpdate); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" + " event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData) +{ + RGX_HWPERF_HOST_UFO_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT*)psData->aui32StreamData; + + psData->eEvType = eUfoType; + /* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping + * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */ + psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1, + offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData)); + + switch (eUfoType) + { + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + puData->sCheckSuccess.ui32FWAddr = + psUFOData->sCheckSuccess.ui32FWAddr; + puData->sCheckSuccess.ui32Value = + psUFOData->sCheckSuccess.ui32Value; + + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess)); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + puData->sCheckFail.ui32FWAddr = + psUFOData->sCheckFail.ui32FWAddr; + puData->sCheckFail.ui32Value = + psUFOData->sCheckFail.ui32Value; + puData->sCheckFail.ui32Required = + psUFOData->sCheckFail.ui32Required; + + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); + break; + case RGX_HWPERF_UFO_EV_UPDATE: + puData->sUpdate.ui32FWAddr = + psUFOData->sUpdate.ui32FWAddr; + puData->sUpdate.ui32OldValue = + psUFOData->sUpdate.ui32OldValue; + puData->sUpdate.ui32NewValue = + psUFOData->sUpdate.ui32NewValue; + + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate)); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" + " event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, + const IMG_BOOL bSleepAllowed) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_BOOL *pbPacketWritten = NULL; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + &pui8Dest, bSleepAllowed); + + if (bSleepAllowed) + { + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + } + else + { + if (pui8Dest == NULL) + { + // Give-up if we couldn't get a place in deferred events buffer + goto cleanup; + } + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest); + pui8Dest = GET_DE_EVENT_DATA(pui8Dest); + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData); + + if (bSleepAllowed) + { + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + else + { + *pbPacketWritten = IMG_TRUE; + OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); + } + +cleanup: + if (bSleepAllowed) + { + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + } +} + +#define UNKNOWN_SYNC_NAME "UnknownSync" + +static_assert(PVRSRV_SYNC_NAME_LENGTH==PVRSRV_SYNC_NAME_LENGTH, "Sync class name max does not match Fence Sync name max"); + +static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize( + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR **ppsName, + IMG_UINT32 *ui32NameSize) +{ + RGX_HWPERF_HOST_ALLOC_DATA *psData; + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail); + + if (*ppsName != NULL && *ui32NameSize > 0) + { + /* if string longer than maximum cut it (leave space for '\0') */ + if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) + *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid" + " resource name given.")); + *ppsName = UNKNOWN_SYNC_NAME; + *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME); + } + + switch (eAllocType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: + ui32Size += sizeof(psData->uAllocDetail.sSWFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + RGX_HWPERF_HOST_ALLOC_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + IMG_CHAR *acName = NULL; + + psData->ui32AllocType = eAllocType; + + switch (eAllocType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc; + acName = psData->uAllocDetail.sSyncAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc; + acName = psData->uAllocDetail.sFenceAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: + psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc; + acName = psData->uAllocDetail.sSWFenceAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc; + acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); + PVR_ASSERT(IMG_FALSE); + } + + + if (acName != NULL) + { + if (ui32NameSize) + { + OSStringLCopy(acName, psName, ui32NameSize); + } + else + { + /* In case no name was given make sure we don't access random + * memory */ + acName[0] = '\0'; + } + } +} + +void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType, + &psName, + &ui32NameSize); + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size, + ui32Ordinal, ui64Timestamp); + + _SetupHostAllocPacketData(pui8Dest, + eAllocType, + puAllocDetail, + psName, + ui32NameSize); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr) +{ + RGX_HWPERF_HOST_FREE_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->ui32FreeType = eFreeType; + + switch (eFreeType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostFreeEvent: Invalid free event type")); + PVR_ASSERT(IMG_FALSE); + } +} + +void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostFreePacketData(pui8Dest, + eFreeType, + ui64UID, + ui32PID, + ui32FWAddr); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize( + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + const IMG_CHAR **ppsName, + IMG_UINT32 *ui32NameSize) +{ + RGX_HWPERF_HOST_MODIFY_DATA *psData; + RGX_HWPERF_HOST_MODIFY_DETAIL *puData; + IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType); + + if (*ppsName != NULL && *ui32NameSize > 0) + { + /* first strip the terminator */ + if ((*ppsName)[*ui32NameSize - 1] == '\0') + *ui32NameSize -= 1; + /* if string longer than maximum cut it (leave space for '\0') */ + if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) + *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH - 1; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid" + " resource name given.")); + *ppsName = UNKNOWN_SYNC_NAME; + *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1; + } + + switch (eModifyType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + ui32Size += sizeof(puData->sFenceMerge) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize + 1; /* +1 for '\0' */ + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + IMG_CHAR *acName = NULL; + + psData->ui32ModifyType = eModifyType; + + switch (eModifyType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID; + psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1; + psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2; + acName = psData->uModifyDetail.sFenceMerge.acName; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); + PVR_ASSERT(IMG_FALSE); + } + + if (acName != NULL) + { + if (ui32NameSize) + { + OSStringLCopy(acName, psName, ui32NameSize); + } + else + { + /* In case no name was given make sure we don't access random + * memory */ + acName[0] = '\0'; + } + } +} + +void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType, + &psName, + &ui32NameSize); + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostModifyPacketData(pui8Dest, + eModifyType, + ui64NewUID, + ui64UID1, + ui64UID2, + psName, + ui32NameSize); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest) +{ + RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32CurrIdx = + RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx]; + + psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp; + psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp; + psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed; +} + +void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = + RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus) +{ + switch (eDeviceHealthStatus) + { + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + } +} + +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) +{ + switch (eDeviceHealthReason) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED; + } +} + +static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason, + IMG_UINT8 *pui8Dest) +{ + RGX_HWPERF_HOST_DEV_INFO_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->eEvType = eEvType; + + switch (eEvType) + { + case RGX_HWPERF_DEV_INFO_EV_HEALTH: + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus); + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_EV eEvType) +{ + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_DEV_INFO_DATA, uDevInfoDetail); + + switch (eEvType) + { + case RGX_HWPERF_DEV_INFO_EV_HEALTH: + ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Size; + + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) + { + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + ui32Size = _CalculateHostDeviceInfoPacketSize(eEvType); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) + { + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest); + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + } + + OSLockRelease(psRgxDevInfo->hHWPerfLock); +} + +static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, + IMG_UINT32 ui32TotalMemoryUsage, + IMG_UINT32 ui32LivePids, + PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage, + IMG_UINT8 *pui8Dest) +{ + IMG_INT i; + RGX_HWPERF_HOST_INFO_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->eEvType = eEvType; + + switch (eEvType) + { + case RGX_HWPERF_INFO_EV_MEM_USAGE: + psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage; + + if (psPerProcessMemUsage) + { + for (i = 0; i < ui32LivePids; ++i) + { + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage; + } + } + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType, + IMG_UINT32 *pui32TotalMemoryUsage, + IMG_UINT32 *pui32LivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage) +{ + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail); + + switch (eEvType) + { + case RGX_HWPERF_INFO_EV_MEM_USAGE: +#if !defined(__QNXNTO__) + if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK) + { + ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size) + + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage))); + } +#else + PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); +#endif + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_INFO_EV eEvType) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32TotalMemoryUsage = 0; + PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL; + IMG_UINT32 ui32LivePids = 0; + + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) + { + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) + { + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest); + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + + if (psPerProcessMemUsage) + OSFreeMemNoStats(psPerProcessMemUsage); // psPerProcessMemUsage was allocated with OSAllocZMemNoStats + } + + OSLockRelease(psRgxDevInfo->hHWPerfLock); +} + +static inline IMG_UINT32 +_CalculateHostFenceWaitPacketSize(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType) +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psSizeCalculator; + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA, uDetail); + + switch (eWaitType) + { + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: + ui32Size += sizeof(psSizeCalculator->uDetail.sBegin); + break; + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: + ui32Size += sizeof(psSizeCalculator->uDetail.sEnd); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid wait event type (%u)", __func__, + eWaitType)); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void +_SetupHostFenceWaitPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data) +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->eType = eWaitType; + psData->uiPID = uiPID; + psData->hFence = hFence; + + switch (eWaitType) + { + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: + psData->uDetail.sBegin.ui32TimeoutInMs = ui32Data; + break; + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: + psData->uDetail.sEnd.eResult = + (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT) ui32Data; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid fence-wait event type", __func__)); + PVR_ASSERT(IMG_FALSE); + } +} + +void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostFenceWaitPacketSize(eType); + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT, + ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _CalculateHostSWTimelineAdvPacketSize(void) +{ + IMG_UINT32 ui32Size = sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA); + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void +_SetupHostSWTimelineAdvPacketData(IMG_UINT8 *pui8Dest, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex) + +{ + RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *psData = IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->uiPID = uiPID; + psData->hTimeline = hSWTimeline; + psData->ui64SyncPtIndex = ui64SyncPtIndex; +} + +void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostSWTimelineAdvPacketSize(); + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE, + ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + +} + +/****************************************************************************** + * Currently only implemented on Linux. Feature can be enabled to provide + * an interface to 3rd-party kernel modules that wish to access the + * HWPerf data. The API is documented in the rgxapi_km.h header and + * the rgx_hwperf* headers. + *****************************************************************************/ + +/* Internal HWPerf kernel connection/device data object to track the state + * of a client session. + */ +typedef struct +{ + PVRSRV_DEVICE_NODE* psRgxDevNode; + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + + /* TL Open/close state */ + IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; + + /* TL Acquire/release state */ + IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ + IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ + IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ + IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ + IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ + IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ + IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ + + +} RGX_KM_HWPERF_DEVDATA; + +PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + RGX_KM_HWPERF_DEVDATA *psDevData; + RGX_HWPERF_DEVICE *psNewHWPerfDevice; + RGX_HWPERF_CONNECTION* psHWPerfConnection; + IMG_BOOL bFWActive = IMG_FALSE; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* avoid uninitialised data */ + PVR_ASSERT(*ppsHWPerfConnection == NULL); + PVR_ASSERT(psPVRSRVData); + + /* Allocate connection object */ + psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection)); + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* early save the return pointer to aid clean-up if failure occurs */ + *ppsHWPerfConnection = psHWPerfConnection; + + psDeviceNode = psPVRSRVData->psDeviceNodeList; + while (psDeviceNode) + { + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerf: Device not currently active. ID:%u", + __func__, + psDeviceNode->sDevId.i32UMIdentifier)); + psDeviceNode = psDeviceNode->psNext; + continue; + } + /* Create a list node to be attached to connection object's list */ + psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice)); + if (!psNewHWPerfDevice) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* Insert node at head of the list */ + psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList; + psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice; + + /* create a device data object for kernel server */ + psDevData = OSAllocZMem(sizeof(*psDevData)); + psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData; + if (!psDevData) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName), + "hwperf_device_%d", psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf device name for device %d", + __func__, + psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevData->psRgxDevNode = psDeviceNode; + psDevData->psRgxDevInfo = psDeviceNode->pvDevice; + + psDeviceNode = psDeviceNode->psNext; + + /* At least one device is active */ + bFWActive = IMG_TRUE; + } + + if (!bFWActive) + { + return PVRSRV_ERROR_NOT_READY; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) +{ + RGX_KM_HWPERF_DEVDATA *psDevData; + RGX_HWPERF_DEVICE *psHWPerfDev; + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + PVRSRV_ERROR eError; + IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; + IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; + IMG_UINT32 ui32BufSize; + + /* Disable producer callback by default for the Kernel API. */ + IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING | + PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + psRgxDevInfo = psDevData->psRgxDevInfo; + + /* In the case where the AppHint has not been set we need to + * initialise the HWPerf resources here. Allocated on-demand + * to reduce RAM foot print on systems not needing HWPerf. + */ + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + if (RGXHWPerfIsInitRequired(psRgxDevInfo)) + { + eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfFW resources failed", + __func__)); + OSLockRelease(psRgxDevInfo->hHWPerfLock); + return eError; + } + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + if (psRgxDevInfo->hHWPerfHostStream == NULL) + { + eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfHost resources failed", + __func__)); + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); + return eError; + } + } + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* Open the RGX TL stream for reading in this session */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfFwStreamName, + ui32StreamFlags, + &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)"); + + /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", + PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf host stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Open the host TL stream for reading in this session */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfHostStreamName, + PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, + &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)"); + + /* Allocate a large enough buffer for use during the entire session to + * avoid the need to resize in the Acquire call as this might be in an ISR + * Choose size that can contain at least one packet. + */ + /* Allocate buffer for FW Stream */ + ui32BufSize = FW_STREAM_BUFFER_SIZE; + psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize); + if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize; + + /* Allocate buffer for Host Stream */ + ui32BufSize = HOST_STREAM_BUFFER_SIZE; + psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize); + if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL) + { + OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize; + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + eError = RGXHWPerfLazyConnect(ppsHWPerfConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0); + + eError = RGXHWPerfOpen(*ppsHWPerfConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfOpen", e1); + + return PVRSRV_OK; + + e1: /* HWPerfOpen might have opened some, and then failed */ + RGXHWPerfClose(*ppsHWPerfConnection); + e0: /* LazyConnect might have allocated some resources and then failed, + * make sure they are cleaned up */ + RGXHWPerfFreeConnection(ppsHWPerfConnection); + return eError; +} + + +PVRSRV_ERROR RGXHWPerfControl( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE* psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfConfigureAndEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE *psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXConfigEnableHWPerfCountersKM(NULL, + psDevData->psRgxDevNode, ui32NumBlocks, asBlockConfigs); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + + psHWPerfDev = psHWPerfDev->psNext; + } + + return eError; +} + + +PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT16 ui16CustomBlockID, + IMG_UINT16 ui16NumCustomCounters, + IMG_UINT32 *pui32CustomCounterIDs) +{ + PVRSRV_ERROR eError; + RGX_HWPERF_DEVICE *psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input arguments supplied by the caller */ + PVR_LOG_RETURN_IF_FALSE((NULL != psHWPerfConnection), "psHWPerfConnection invalid", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((0 != ui16NumCustomCounters), "uiNumBlocks invalid", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((NULL != pui32CustomCounterIDs),"asBlockConfigs invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Check # of blocks */ + PVR_LOG_RETURN_IF_FALSE((!(ui16CustomBlockID > RGX_HWPERF_MAX_CUSTOM_BLKS)),"ui16CustomBlockID invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Check # of counters */ + PVR_LOG_RETURN_IF_FALSE((!(ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)),"ui16NumCustomCounters invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + RGX_KM_HWPERF_DEVDATA *psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + eError = PVRSRVRGXConfigCustomCountersKM(NULL, + psDevData->psRgxDevNode, + ui16CustomBlockID, ui16NumCustomCounters, pui32CustomCounterIDs); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlCustHWPerfKM"); + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +static PVRSRV_ERROR RGXHWPerfToggleCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs, + IMG_BOOL bToggle, + const char* szFunctionString) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE* psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXCtrlHWPerfCountersKM(NULL, + psDevData->psRgxDevNode, + bToggle, + ui32NumBlocks, + aeBlockIDs); + + PVR_LOG_RETURN_IF_ERROR(eError, szFunctionString); + + psHWPerfDev = psHWPerfDev->psNext; + } + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXHWPerfDisableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs) +{ + return RGXHWPerfToggleCounters(psHWPerfConnection, + ui32NumBlocks, + aeBlockIDs, + IMG_FALSE, + __func__); +} + + +PVRSRV_ERROR RGXHWPerfEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs) +{ + return RGXHWPerfToggleCounters(psHWPerfConnection, + ui32NumBlocks, + aeBlockIDs, + IMG_TRUE, + __func__); +} + + +PVRSRV_ERROR RGXHWPerfAcquireEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_PBYTE* ppBuf, + IMG_UINT32* pui32BufLen) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; + IMG_PBYTE pDataDest; + IMG_UINT32 ui32TlPackets = 0; + PVRSRVTL_PPACKETHDR psHDRptr; + PVRSRVTL_PPACKETHDR pBufferEnd; + PVRSRVTL_PACKETTYPE ui16TlType; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Reset the output arguments in case we discover an error */ + *ppBuf = NULL; + *pui32BufLen = 0; + + /* Valid input argument values supplied by the caller */ + if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psDevData->pTlBuf[eStreamId] == NULL) + { + /* Acquire some data to read from the HWPerf TL stream */ + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + psDevData->hSD[eStreamId], + &psDevData->pTlBuf[eStreamId], + &psDevData->ui32AcqDataLen[eStreamId]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientAcquireData"); + + psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId]; + } + + /* TL indicates no data exists so return OK and zero. */ + if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0)) + { + return PVRSRV_OK; + } + + /* Process each TL packet in the data buffer we have acquired */ + pBufferEnd = IMG_OFFSET_ADDR(psDevData->pTlBuf[eStreamId], psDevData->ui32AcqDataLen[eStreamId]); + pDataDest = psDevData->pHwpBuf[eStreamId]; + psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]); + psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId]; + while (psHDRptr < pBufferEnd) + { + ui16TlType = GET_PACKET_TYPE(psHDRptr); + if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) + { + IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); + if (0 == ui16DataLen) + { + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr)); + } + else + { + /* Check next packet does not fill buffer */ + if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId]) + { + break; + } + + /* For valid data copy it into the client buffer and move + * the write position on */ + OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen); + pDataDest += ui16DataLen; + } + } + else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full")); + } + else + { + /* else Ignore padding packet type and others */ + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType )); + } + + /* Update loop variable to the next packet and increment counts */ + psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); + /* Updated to keep track of the next packet to be read. */ + psDevData->pTlBufRead[eStreamId] = IMG_OFFSET_ADDR(psHDRptr, 0); + ui32TlPackets++; + } + + PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets)); + + psDevData->bRelease[eStreamId] = IMG_FALSE; + if (psHDRptr >= pBufferEnd) + { + psDevData->bRelease[eStreamId] = IMG_TRUE; + } + + /* Update output arguments with client buffer details and true length */ + *ppBuf = psDevData->pHwpBuf[eStreamId]; + *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId]; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfReleaseEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Valid input argument values supplied by the caller */ + if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psDevData->bRelease[eStreamId]) + { + /* Inform the TL that we are done with reading the data. */ + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]); + psDevData->ui32AcqDataLen[eStreamId] = 0; + psDevData->pTlBuf[eStreamId] = NULL; + } + else + { + psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId]; + } + return eError; +} + + +PVRSRV_ERROR RGXHWPerfGetFilter( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_UINT64 *ui64Filter) +{ + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Valid input argument values supplied by the caller */ + psRgxDevInfo = hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL; + if (!psRgxDevInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* No need to take hHWPerfLock here since we are only reading data + * from always existing integers to return to debugfs which is an + * atomic operation. + */ + switch (eStreamId) { + case RGX_HWPERF_STREAM_ID0_FW: + *ui64Filter = psRgxDevInfo->ui64HWPerfFilter; + break; + case RGX_HWPERF_STREAM_ID1_HOST: + *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev; + RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection; + + /* if connection object itself is NULL, nothing to free */ + if (psHWPerfConnection == NULL) + { + return PVRSRV_OK; + } + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfNextDev) + { + psHWPerfDev = psHWPerfNextDev; + psHWPerfNextDev = psHWPerfNextDev->psNext; + + /* Free the session memory */ + if (psHWPerfDev->hDevData) + OSFreeMem(psHWPerfDev->hDevData); + OSFreeMem(psHWPerfDev); + } + OSFreeMem(psHWPerfConnection); + *ppsHWPerfConnection = NULL; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection) +{ + RGX_HWPERF_DEVICE *psHWPerfDev; + RGX_KM_HWPERF_DEVDATA* psDevData; + IMG_UINT uiStreamId; + PVRSRV_ERROR eError; + + /* Check session connection is not zero */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++) + { + /* If the TL buffer exists they have not called ReleaseData + * before disconnecting so clean it up */ + if (psDevData->pTlBuf[uiStreamId]) + { + /* TLClientReleaseData call and null out the buffer fields + * and length */ + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]); + psDevData->ui32AcqDataLen[uiStreamId] = 0; + psDevData->pTlBuf[uiStreamId] = NULL; + PVR_LOG_IF_ERROR(eError, "TLClientReleaseData"); + /* Packets may be lost if release was not required */ + if (!psDevData->bRelease[uiStreamId]) + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost.")); + } + } + + /* Close the TL stream, ignore the error if it occurs as we + * are disconnecting */ + if (psDevData->hSD[uiStreamId]) + { + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psDevData->hSD[uiStreamId]); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + psDevData->hSD[uiStreamId] = NULL; + } + + /* Free the client buffer used in session */ + if (psDevData->pHwpBuf[uiStreamId]) + { + OSFreeMem(psDevData->pHwpBuf[uiStreamId]); + psDevData->pHwpBuf[uiStreamId] = NULL; + } + } + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + eError = RGXHWPerfClose(*ppsHWPerfConnection); + PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose"); + + eError = RGXHWPerfFreeConnection(ppsHWPerfConnection); + PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection"); + + return eError; +} + +IMG_UINT64 RGXHWPerfConvertCRTimeStamp( + IMG_UINT32 ui32ClkSpeed, + IMG_UINT64 ui64CorrCRTimeStamp, + IMG_UINT64 ui64CorrOSTimeStamp, + IMG_UINT64 ui64CRTimeStamp) +{ + IMG_UINT64 ui64CRDeltaToOSDeltaKNs; + IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns; + + if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp)) + { + return 0; + } + + ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(ui32ClkSpeed); + + /* RGX CR timer ticks delta */ + deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp; + /* RGX time delta in nanoseconds */ + delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); + /* Calculate OS time of HWPerf event */ + ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns; + + return ui64EventOSTimestamp; +} + +/****************************************************************************** + End of file (rgxhwperf.c) + ******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.h new file mode 100644 index 000000000000..b35edcfd3a56 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxhwperf.h @@ -0,0 +1,502 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX HWPerf functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXHWPERF_H_ +#define RGXHWPERF_H_ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#include "device.h" +#include "connection_server.h" +#include "rgxdevice.h" +#include "rgx_hwperf.h" + +/* HWPerf host buffer size constraints in KBs */ +#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB +#define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U) +#define HWPERF_HOST_TL_STREAM_SIZE_MAX (3072U) + +/****************************************************************************** + * RGX HW Performance decode Bvnc Features for HWPerf + *****************************************************************************/ +PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_BVNC *psBVNC); + +PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_BVNC *psBVNC); + +/****************************************************************************** + * RGX HW Performance Data Transport Routines + *****************************************************************************/ + +PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo); + +PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); +PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); +void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); +void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); +void RGXHWPerfClientInitAppHintCallbacks(void); + +/****************************************************************************** + * RGX HW Performance Profiling API(s) + *****************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask); + + +PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32ArrayLen, + RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs); + +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_BOOL bEnable, + IMG_UINT32 ui32ArrayLen, + IMG_UINT16 * psBlockIDs); + +PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT16 ui16CustomBlockID, + IMG_UINT16 ui16NumCustomCounters, + IMG_UINT32 * pui32CustomCounterIDs); + +/****************************************************************************** + * RGX HW Performance Host Stream API + *****************************************************************************/ + +PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB); +PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); +void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); + +void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32Filter); + +void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate); + +void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail); + +void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr); + +void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize); + +void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, + const IMG_BOOL bSleepAllowed); + +void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo); + +void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason); + +void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_INFO_EV eEvType); + +void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data); + +void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex); + +IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent); + +#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \ + (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \ + & RGX_HWPERF_EVENT_MASK_VALUE(EV)) + +#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \ + ((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice) + +#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \ + ((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice) + +/* Deadline and cycle estimate is not supported for all ENQ events */ +#define NO_DEADLINE 0 +#define NO_CYCEST 0 + + +#if defined(SUPPORT_RGX) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param C Kick context + * @param P Pid of kicking process + * @param X Related FW context + * @param E External job reference + * @param I Job ID + * @param K Kick type + * @param CF Check fence handle + * @param UF Update fence handle + * @param UT Update timeline (on which above UF was created) handle + * @param CHKUID Check fence UID + * @param UPDUID Update fence UID + * @param D Deadline + * @param CE Cycle estimate + */ +#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) \ + do { \ + if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \ + { \ + RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \ + (K), (P), (X), (E), (I), \ + (CF), (UF), (UT), \ + (CHKUID), (UPDUID), (D), (CE)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device Info pointer + * @param T Host UFO event type + * @param D Pointer to UFO data + * @param S Is sleeping allowed? + */ +#define RGXSRV_HWPERF_UFO(I, T, D, S) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \ + { \ + RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device node pointer + * @param T Host ALLOC event type + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSyncAlloc.ui32FWAddr = (FWADDR); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (N), (Z), &uAllocDetail); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param PID ID of allocating process + * @param FENCE PVRSRV_FENCE object + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sFenceAlloc.uiPID = (PID); \ + uAllocDetail.sFenceAlloc.hFence = (FENCE); \ + uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = (FWADDR); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * @param D Device Node pointer + * @param TL PVRSRV_TIMELINE on which CP is allocated + * @param PID Allocating process ID of this TL/FENCE + * @param FENCE PVRSRV_FENCE as passed to SyncCheckpointResolveFence OR PVRSRV_NO_FENCE + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = (FWADDR); \ + uAllocDetail.sSyncCheckPointAlloc.hTimeline = (TL); \ + uAllocDetail.sSyncCheckPointAlloc.uiPID = (PID); \ + uAllocDetail.sSyncCheckPointAlloc.hFence = (FENCE); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * @param D Device Node pointer + * @param PID ID of allocating process + * @param SW_FENCE PVRSRV_FENCE object + * @param SW_TL PVRSRV_TIMELINE on which SW_FENCE is allocated + * @param SPI Sync point index on the SW_TL on which this SW_FENCE is allocated + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSWFenceAlloc.uiPID = (PID); \ + uAllocDetail.sSWFenceAlloc.hSWFence = (SW_FENCE); \ + uAllocDetail.sSWFenceAlloc.hSWTimeline = (SW_TL); \ + uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = (SPI); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param FWADDR sync firmware address + */ +#define RGXSRV_HWPERF_FREE(D, T, FWADDR) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ + { \ + RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (0), (0), (FWADDR)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param UID ID of input object + * @param PID ID of allocating process + * @param FWADDR sync firmware address + */ +#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ + { \ + RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (UID), (PID), (FWADDR)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param NEWUID ID of output object + * @param UID1 ID of first input object + * @param UID2 ID of second input object + * @param N string containing new object's name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \ + { \ + RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (NEWUID), (UID1), (UID2), N, Z); \ + } \ + } while (0) + + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device info pointer + */ +#define RGXSRV_HWPERF_CLK_SYNC(I) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \ + { \ + RGXHWPerfHostPostClkSyncEvent((I)); \ + } \ + } while (0) + + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts a device info event to the HWPerfHost stream. + * + * @param I Device info pointer + * @param T Event type + * @param H Health status enum + * @param R Health reason enum + */ +#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ + { \ + RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device info pointer + * @param T Event type + */ +#define RGXSRV_HWPERF_HOST_INFO(I, T) \ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_INFO)) \ + { \ + RGXHWPerfHostPostInfo((I), (T)); \ + } \ +} while (0) + +/** + * @param I Device info pointer + * @param T Wait Event type + * @param PID Process ID that the following fence belongs to + * @param F Fence handle + * @param D Data for this wait event type + */ +#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) \ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_FENCE_WAIT)) \ + { \ + RGXHWPerfHostPostFenceWait(I, RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_##T, \ + (PID), (F), (D)); \ + } \ +} while (0) + +/** + * @param I Device info pointer + * @param PID Process ID that the following timeline belongs to + * @param F SW-timeline handle + * @param SPI Sync-pt index where this SW-timeline has reached + */ +#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)\ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE)) \ + { \ + RGXHWPerfHostPostSWTimelineAdv((I), (PID), (SW_TL), (SPI)); \ + } \ +} while (0) +#else + +#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) +#define RGXSRV_HWPERF_UFO(I, T, D, S) +#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) +#define RGXSRV_HWPERF_FREE(D, T, FWADDR) +#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) +#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) +#define RGXSRV_HWPERF_CLK_SYNC(I) +#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) +#define RGXSRV_HWPERF_HOST_INFO(I, T) +#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) +#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI) + +#endif + +#endif /* RGXHWPERF_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.c new file mode 100644 index 000000000000..15fc7523cb31 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.c @@ -0,0 +1,4945 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(LINUX) +#include +#else +#include +#endif + +#include "img_defs.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "pvrsrv_bridge_init.h" +#include "syscommon.h" +#include "rgx_heaps.h" +#include "rgxheapconfig.h" +#include "rgxpower.h" +#include "tlstream.h" +#include "pvrsrv_tlstreams.h" + +#include "rgxinit.h" +#include "rgxbvnc.h" +#include "rgxmulticore.h" + +#include "pdump_km.h" +#include "handle.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "rgxmem.h" +#include "sync_internal.h" +#include "pvrsrv_apphint.h" +#include "oskm_apphint.h" +#include "rgxfwdbg.h" +#include "info_page.h" + +#include "rgxfwimageutils.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgx_fwif_km.h" + +#include "rgxmmuinit.h" +#include "rgxmipsmmuinit.h" +#include "physmem.h" +#include "devicemem_utils.h" +#include "devicemem_server.h" +#include "physmem_osmem.h" +#include "physmem_lma.h" + +#include "rgxdebug.h" +#include "rgxhwperf.h" +#include "htbserver.h" + +#include "rgx_options.h" +#include "pvrversion.h" + +#include "rgx_compat_bvnc.h" + +#include "rgx_heaps.h" + +#include "rgxta3d.h" +#include "rgxtimecorr.h" +#include "rgxshader.h" + +#include "rgx_bvnc_defs_km.h" +#if defined(PDUMP) +#include "rgxstartstop.h" +#endif + +#include "rgx_fwif_alignchecks.h" +#include "vmm_pvz_client.h" + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) +#include "pdump_physmem.h" +#endif + +static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); +static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString); +static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed); +static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode); + +#if (RGX_NUM_OS_SUPPORTED > 1) +static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid); +static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); +#endif + +#if defined(SUPPORT_AUTOVZ) +#define RGX_FW_MMU_RESERVED_MEM_SETUP(devnode) (MMU_PX_SETUP) { \ + LMA_PhyContigPagesAlloc, \ + LMA_PhyContigPagesFree, \ + LMA_PhyContigPagesMap, \ + LMA_PhyContigPagesUnmap, \ + LMA_PhyContigPagesClean, \ + OSGetPageShift(), \ + (devnode)->psFwMMUReservedMemArena \ + } +#endif + + +#define RGX_MMU_LOG2_PAGE_SIZE_4KB (12) +#define RGX_MMU_LOG2_PAGE_SIZE_16KB (14) +#define RGX_MMU_LOG2_PAGE_SIZE_64KB (16) +#define RGX_MMU_LOG2_PAGE_SIZE_256KB (18) +#define RGX_MMU_LOG2_PAGE_SIZE_1MB (20) +#define RGX_MMU_LOG2_PAGE_SIZE_2MB (21) + +#define RGX_MMU_PAGE_SIZE_4KB ( 4 * 1024) +#define RGX_MMU_PAGE_SIZE_16KB ( 16 * 1024) +#define RGX_MMU_PAGE_SIZE_64KB ( 64 * 1024) +#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024) +#define RGX_MMU_PAGE_SIZE_1MB (1024 * 1024) +#define RGX_MMU_PAGE_SIZE_2MB (2048 * 1024) +#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB +#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB + +#define VAR(x) #x + +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + +/* bits used by the LISR to provide a trace of its last execution */ +#define RGX_LISR_DEVICE_NOT_POWERED (1 << 0) +#define RGX_LISR_FWIF_POW_OFF (1 << 1) +#define RGX_LISR_EVENT_EN (1 << 2) +#define RGX_LISR_COUNTS_EQUAL (1 << 3) +#define RGX_LISR_PROCESSED (1 << 4) + +typedef struct _LISR_EXECUTION_INFO_ +{ + /* bit mask showing execution flow of last LISR invocation */ + IMG_UINT32 ui32State; + /* snapshot from the last LISR invocation, regardless of + * whether an interrupt was handled + */ +#if defined(RGX_FW_IRQ_OS_COUNTERS) + IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_OS_SUPPORTED]; +#else + IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM]; +#endif + /* time of the last LISR invocation */ + IMG_UINT64 ui64Clockns; +} LISR_EXECUTION_INFO; + +/* information about the last execution of the LISR */ +static LISR_EXECUTION_INFO g_sLISRExecutionInfo; + +#endif + +IMG_BOOL RGXFwIrqEventRx(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bIrqRx = IMG_TRUE; + +#if defined(RGX_IRQ_HYPERV_HANDLER) + /* The hypervisor reads and clears the fw status register. + * Then it injects an irq only in the recipient OS. + * The KM driver should only execute the handler.*/ + PVR_UNREFERENCED_PARAMETER(psDevInfo); +#else + + IMG_UINT32 ui32IRQStatus, ui32IRQStatusReg, ui32IRQStatusEventMsk, ui32IRQClearReg, ui32IRQClearMask; + + if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, IRQ_PER_OS)) && (!PVRSRV_VZ_MODE_IS(NATIVE))) + { + /* status & clearing registers are available on both Host and Guests + * and are agnostic of the Fw CPU type. Due to the remappings done + * by the 2nd stage device MMU, all drivers assume they are accessing + * register bank 0 */ + ui32IRQStatusReg = RGX_CR_IRQ_OS0_EVENT_STATUS; + ui32IRQStatusEventMsk = RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN; + ui32IRQClearReg = RGX_CR_IRQ_OS0_EVENT_CLEAR; + ui32IRQClearMask = RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN; + } + else if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest drivers on cores sharing a single interrupt don't need to + * clear it, as the Host driver or Hypervisor is responsible for it. */ + return bIrqRx; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + ui32IRQStatusReg = RGX_CR_MIPS_WRAPPER_IRQ_STATUS; + ui32IRQStatusEventMsk = RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN; + ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR; + ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN; + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + ui32IRQStatusReg = RGX_CR_META_SP_MSLVIRQSTATUS; + ui32IRQStatusEventMsk = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN; + ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS; + ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK; + } + else + { + /* unhandled case */ + PVR_DPF((PVR_DBG_ERROR, "%s: GPU IRQ clearing mechanism not implemented " + "for the this architecture.", __func__)); + PVR_ASSERT(IMG_FALSE); + return IMG_FALSE; + } + + ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg); + + if (ui32IRQStatus & ui32IRQStatusEventMsk) + { + /* acknowledge and clear the interrupt */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask); + } + else + { + /* spurious interrupt */ + bIrqRx = IMG_FALSE; + } +#endif + + return bIrqRx; +} + +#if !defined(NO_HARDWARE) +/*************************************************************************/ /*! +@Function SampleIRQCount +@Description Utility function taking snapshots of RGX FW interrupt count. +@Input psDevInfo Device Info structure + +@Return IMG_BOOL Returns IMG_TRUE, if RGX FW IRQ is not equal to + sampled RGX FW IRQ count for any RGX FW thread. + */ /**************************************************************************/ +static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bReturnVal = IMG_FALSE; + volatile IMG_UINT32 *aui32SampleIrqCount = psDevInfo->aui32SampleIRQCount; + IMG_UINT32 ui32IrqCnt; + +#if defined(RGX_FW_IRQ_OS_COUNTERS) + get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_OS, psDevInfo); + + if (ui32IrqCnt != aui32SampleIrqCount[RGXFW_THREAD_0]) + { + aui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt; + bReturnVal = IMG_TRUE; + } +#else + IMG_UINT32 ui32TID; + + for_each_irq_cnt(ui32TID) + { + get_irq_cnt_val(ui32IrqCnt, ui32TID, psDevInfo); + if (aui32SampleIrqCount[ui32TID] != ui32IrqCnt) + { + /** + * we are handling any unhandled interrupts here so align the host + * count with the FW count + */ + + /* Sample the current count from the FW _after_ we've cleared the interrupt. */ + aui32SampleIrqCount[ui32TID] = ui32IrqCnt; + bReturnVal = IMG_TRUE; + } + } +#endif /* defined(RGX_FW_IRQ_OS_COUNTERS) */ + + return bReturnVal; +} + +static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + IMG_UINT32 ui32idx; +#endif + + RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo); + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + PVR_DPF((PVR_DBG_ERROR, "Last RGX_LISRHandler State: 0x%08X Clock: %llu", + g_sLISRExecutionInfo.ui32State, + g_sLISRExecutionInfo.ui64Clockns)); + + for_each_irq_cnt(ui32idx) + { + PVR_DPF((PVR_DBG_ERROR, + MSG_IRQ_CNT_TYPE " %u: InterruptCountSnapshot: 0x%X", + ui32idx, g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx])); + } +#else + PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION")); +#endif + + + if (psFwSysData->ePowState != RGXFWIF_POW_OFF) + { + PVR_DPF((PVR_DBG_ERROR, "_WaitForInterruptsTimeout: FW pow state is not OFF (is %u)", + (unsigned int) psFwSysData->ePowState)); + } + + return SampleIRQCount(psDevInfo); +} + +void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bScheduleMISR; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + bScheduleMISR = IMG_TRUE; + } + else + { + bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo); + } + + if (bScheduleMISR) + { + OSScheduleMISR(psDevInfo->pvMISRData); + + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } +} + +/* + RGX LISR Handler + */ +static IMG_BOOL RGX_LISRHandler (void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bInterruptProcessed = IMG_FALSE; + RGXFWIF_SYSDATA *psFwSysData; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + if (psDevInfo->bRGXPowered && RGXFwIrqEventRx(psDevInfo)) + { + bInterruptProcessed = IMG_TRUE; + OSScheduleMISR(psDevInfo->pvMISRData); + } + + return bInterruptProcessed; + } + + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + { + IMG_UINT32 ui32idx; + IMG_UINT32 ui32IrqCnt; + + for_each_irq_cnt(ui32idx) + { + get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); + g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx] = ui32IrqCnt; + } + + g_sLISRExecutionInfo.ui32State = 0; + g_sLISRExecutionInfo.ui64Clockns = OSClockns64(); + } +#endif + + if (psDevInfo->bRGXPowered == IMG_FALSE) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_DEVICE_NOT_POWERED; +#endif + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_FWIF_POW_OFF; +#endif + return bInterruptProcessed; + } + } + + if (RGXFwIrqEventRx(psDevInfo)) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_EVENT_EN; + psDeviceNode->ui64nLISR++; +#endif + + bInterruptProcessed = SampleIRQCount(psDevInfo); + + if (!bInterruptProcessed) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_COUNTS_EQUAL; +#endif + +#if defined(RGX_FW_IRQ_OS_COUNTERS) && !defined(RGX_IRQ_HYPERV_HANDLER) + /* if per-OS GPU IRQ counters are used, but the Host OS is still the + * one that handles and clears the HW CPU IRQ, this IRQ request must be + * marked as processed. Consider an interrupt aimed at a Guest OS that + * doesn't require the MISR to run on the Host, only clearing the IRQ. + * + * This prevents the HW CPU IRQ bit being left set and marking this as + * a spurious interrupt, which in time, could lead the OS to assume + * a hardware failure occurred and disable the interrupt line. + */ + return IMG_TRUE; +#else + return bInterruptProcessed; +#endif + } + + bInterruptProcessed = IMG_TRUE; +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_PROCESSED; + psDeviceNode->ui64nMISR++; +#endif + + OSScheduleMISR(psDevInfo->pvMISRData); + + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + + return bInterruptProcessed; +} + +static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + OS_SPINLOCK_FLAGS uiFlags; + + /* First check whether there are pending commands in Deferred KCCB List */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead)) + { + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + return; + } + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + /* Powerlock to avoid further Power transition requests + while KCCB deferred list is being processed */ + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire PowerLock (device: %p, error: %s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + goto _RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed; + } + + /* Try to send deferred KCCB commands Do not Poll from here*/ + eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); + + PVRSRVPowerUnlock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s could not flush Deferred KCCB list, KCCB is full.", + __func__)); + } + +_RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed: + + return; +} + +static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevice; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { + RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); + } + + if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { + /* The FW is IDLE and therefore could be shut down */ + eError = RGXActivePowerRequest(psDeviceNode); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Failed RGXActivePowerRequest call (device: %p) with %s", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + else + { + /* Re-schedule the power down request as it was deferred. */ + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + } + +} + +/* Shorter defines to keep the code a bit shorter */ +#define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE +#define GPU_ACTIVE RGXFWIF_GPU_UTIL_STATE_ACTIVE +#define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED +#define MAX_ITERATIONS 64 + +static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hGpuUtilUser, + RGXFWIF_GPU_UTIL_STATS *psReturnStats) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + IMG_UINT64 ui64TimeNow; + IMG_UINT32 ui32Attempts; + IMG_UINT32 ui32Remainder; + + + /***** (1) Initialise return stats *****/ + + psReturnStats->bValid = IMG_FALSE; + psReturnStats->ui64GpuStatIdle = 0; + psReturnStats->ui64GpuStatActive = 0; + psReturnStats->ui64GpuStatBlocked = 0; + psReturnStats->ui64GpuStatCumulative = 0; + + if (hGpuUtilUser == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + psAggregateStats = hGpuUtilUser; + + + /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */ + for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++) + { + IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0}; + IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0; + IMG_UINT32 i = 0; + + + /***** (2) Get latest data from shared area *****/ + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + /* + * First attempt at detecting if the FW is in the middle of an update. + * This should also help if the FW is in the middle of a 64 bit variable update. + */ + while (((ui64LastWord != psUtilFWCb->ui64LastWord) || + (aui64TmpCounters[ui64LastState] != + psUtilFWCb->aui64StatsCounters[ui64LastState])) && + (i < MAX_ITERATIONS)) + { + ui64LastWord = psUtilFWCb->ui64LastWord; + ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord); + aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE]; + aui64TmpCounters[GPU_ACTIVE] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE]; + aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED]; + i++; + } + + OSLockRelease(psDevInfo->hGPUUtilLock); + + if (i == MAX_ITERATIONS) + { + PVR_DPF((PVR_DBG_WARNING, + "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); + return PVRSRV_ERROR_TIMEOUT; + } + + + /***** (3) Compute return stats *****/ + + /* Update temp counters to account for the time since the last update to the shared ones */ + OSMemoryBarrier(); /* Ensure the current time is read after the loop above */ + ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64()); + ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord); + ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); + aui64TmpCounters[ui64LastState] += ui64LastPeriod; + + /* Get statistics for a user since its last request */ + psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE], + psAggregateStats->ui64GpuStatIdle); + psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE], + psAggregateStats->ui64GpuStatActive); + psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED], + psAggregateStats->ui64GpuStatBlocked); + psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle + + psReturnStats->ui64GpuStatActive + + psReturnStats->ui64GpuStatBlocked; + + if (psAggregateStats->ui64TimeStamp != 0) + { + IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp; + /* We expect to return at least 75% of the time since the last call in GPU stats */ + IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4); + + /* + * If the returned stats are substantially lower than the time since + * the last call, then the Host might have read a partial update from the FW. + * If this happens, try sampling the shared counters again. + */ + if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low " + "(call period %" IMG_UINT64_FMTSPEC ")", + __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again", + __func__, ui32Attempts)); + continue; + } + } + + break; + } + + + /***** (4) Update aggregate stats for the current user *****/ + + psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle; + psAggregateStats->ui64GpuStatActive += psReturnStats->ui64GpuStatActive; + psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked; + psAggregateStats->ui64TimeStamp = ui64TimeNow; + + + /***** (5) Convert return stats to microseconds *****/ + + psReturnStats->ui64GpuStatIdle = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatActive = OSDivide64(psReturnStats->ui64GpuStatActive, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder); + + /* Check that the return stats make sense */ + if (psReturnStats->ui64GpuStatCumulative == 0) + { + /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD + * returned 0. This could happen if the GPU frequency value + * is not well calibrated and the FW is updating the GPU state + * while the Host is reading it. + * When such an event happens frequently, timers or the aggregate + * stats might not be accurate... + */ + PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data.")); + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + psReturnStats->bValid = IMG_TRUE; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser) +{ + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + + /* NoStats used since this may be called outside of the register/de-register + * process calls which track memory use. */ + psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS)); + if (psAggregateStats == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psAggregateStats->ui64GpuStatIdle = 0; + psAggregateStats->ui64GpuStatActive = 0; + psAggregateStats->ui64GpuStatBlocked = 0; + psAggregateStats->ui64TimeStamp = 0; + + /* Not used */ + psAggregateStats->bValid = IMG_FALSE; + psAggregateStats->ui64GpuStatCumulative = 0; + + *phGpuUtilUser = psAggregateStats; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser) +{ + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + + if (hGpuUtilUser == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psAggregateStats = hGpuUtilUser; + OSFreeMemNoStats(psAggregateStats); + + return PVRSRV_OK; +} + +/* + RGX MISR Handler +*/ +static void RGX_MISRHandler_Main (void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* Give the HWPerf service a chance to transfer some data from the FW + * buffer to the host driver transport layer buffer. + */ + RGXHWPerfDataStoreCB(psDeviceNode); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Process the signalled checkpoints in the checkpoint CCB, before + * handling all other notifiers. */ + RGXCheckCheckpointCCB(psDeviceNode); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + /* Inform other services devices that we have finished an operation */ + PVRSRVCheckStatus(psDeviceNode); + +#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD) + /* Normally, firmware CCB only exists for the primary FW thread unless PDVFS + is running on the second[ary] FW thread, here we process said CCB */ + RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice); +#endif + + /* Process the Firmware CCB for pending commands */ + RGXCheckFirmwareCCB(psDeviceNode->pvDevice); + + /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */ + RGXTimeCorrRestartPeriodic(psDeviceNode); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Process Workload Estimation Specific commands from the FW */ + WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); +#endif + + if (psDevInfo->pvAPMISRData == NULL) + { + RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); + } +} +#endif /* !defined(NO_HARDWARE) */ + + +#if defined(PDUMP) +static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PMR *psFWDataPMR; + RGXMIPSFW_BOOT_DATA *psBootData; + IMG_DEV_PHYADDR sTmpAddr; + IMG_UINT32 ui32BootConfOffset, ui32ParamOffset, i; + PVRSRV_ERROR eError; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); + ui32BootConfOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); + ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET; + + /* The physical addresses used by a pdump player will be different + * than the ones we have put in the MIPS bootloader configuration data. + * We have to tell the pdump player to replace the original values with the real ones. + */ + PDUMPCOMMENT("Pass new boot parameters to the FW"); + + /* Rogue Registers physical address */ + ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64RegBase); + + eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME, + 0x0, + psFWDataPMR, + ui32ParamOffset, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError)); + return eError; + } + + /* Page Table physical Address */ + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXBootldrDataInit: MMU_AcquireBaseAddr failed (%u)", + eError)); + return eError; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, + (void **)&psBootData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire pointer to FW data (%s)", + __func__, PVRSRVGetErrorString(eError))); + return eError; + } + + psBootData = IMG_OFFSET_ADDR(psBootData, ui32BootConfOffset); + + for (i = 0; i < psBootData->ui32PTNumPages; i++) + { + ui32ParamOffset = ui32BootConfOffset + + offsetof(RGXMIPSFW_BOOT_DATA, aui64PTPhyAddr[0]) + + i * sizeof(psBootData->aui64PTPhyAddr[0]); + + eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, + psFWDataPMR, + 0, + ui32ParamOffset, + PDUMP_FLAGS_CONTINUOUS, + MMU_LEVEL_1, + sTmpAddr.uiAddr, + i << psBootData->ui32PTLog2PageSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError)); + return eError; + } + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); + + /* Stack physical address */ + ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64StackPhyAddr); + + eError = PDumpMemLabelToMem64(psFWDataPMR, + psFWDataPMR, + RGXGetFWImageSectionOffset(NULL, MIPS_STACK), + ui32ParamOffset, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError)); + return eError; + } + + return eError; +} +#endif /* PDUMP */ + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +PVRSRV_ERROR RGXVirtPopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_BOOL bEnableTrustedDeviceAceConfig) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32OS, ui32Region; + + psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + + for (ui32OS = 0; ui32OS < GPUVIRT_VALIDATION_NUM_OS; ui32OS++) + { + for (ui32Region = 0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++) + { + PVR_DPF((PVR_DBG_MESSAGE, + "OS=%u, Region=%u, Min=0x%x, Max=0x%x", + ui32OS, + ui32Region, + aui32OSidMin[ui32OS][ui32Region], + aui32OSidMax[ui32OS][ui32Region])); + } + } + + PopulateLMASubArenas(psDeviceNode, aui32OSidMin, aui32OSidMax); + +#if defined(EMULATOR) + if ((bEnableTrustedDeviceAceConfig) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))) + { + SetTrustedDeviceAceEnabled(); + } +#else + { + PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig); + } +#endif + + return PVRSRV_OK; +} +#endif + +static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* Save information used on power transitions for later + * (when RGXStart and RGXStop are executed) + */ + psDevInfo->sLayerParams.psDevInfo = psDevInfo; + psDevInfo->sLayerParams.psDevConfig = psDevConfig; +#if defined(PDUMP) + psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; + + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, + &sKernelMMUCtxPCAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); + return eError; + } + + psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; + } + else + { + PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR); + PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); + IMG_DEV_PHYADDR sPhyAddr; + IMG_BOOL bValid; + +#if defined(SUPPORT_ALT_REGBASE) + psDevInfo->sLayerParams.sGPURegAddr = psDevConfig->sAltRegsGpuPBase; +#else + /* The physical address of the GPU registers needs to be translated + * in case we are in a LMA scenario + */ + PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], + 1, + &sPhyAddr, + &(psDevConfig->sRegsCpuPBase)); + + psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr; +#endif + + /* Register bank must be aligned to 512KB (as per the core integration) to + * prevent the FW accessing incorrect registers */ + if ((psDevInfo->sLayerParams.sGPURegAddr.uiAddr & 0x7FFFFU) != 0U) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Register bank must be aligned to 512KB, but current address (0x%016"IMG_UINT64_FMTSPECX") is not", + psDevInfo->sLayerParams.sGPURegAddr.uiAddr)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + eError = RGXGetPhyAddr(psFWCodePMR, + &sPhyAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE), + OSGetPageShift(), /* FW will be using the same page size as the OS */ + 1, + &bValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address")); + return eError; + } + + psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr; + + eError = RGXGetPhyAddr(psFWDataPMR, + &sPhyAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA), + OSGetPageShift(), + 1, + &bValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address")); + return eError; + } + + psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr; + + eError = RGXGetPhyAddr(psFWCodePMR, + &sPhyAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE), + OSGetPageShift(), + 1, + &bValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address")); + return eError; + } + + psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr; + + psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->psTrampoline->sPhysAddr.uiAddr; + + psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid; + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + /* Send information used on power transitions to the trusted device as + * in this setup the driver cannot start/stop the GPU and perform resets + */ + if (psDevConfig->pfnTDSetPowerParams) + { + PVRSRV_TD_POWER_PARAMS sTDPowerParams; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + sTDPowerParams.sGPURegAddr = psDevInfo->sLayerParams.sGPURegAddr; + sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr; + sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr; + sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr; + } + + eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, + &sTDPowerParams); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!")); + eError = PVRSRV_ERROR_NOT_IMPLEMENTED; + } +#endif + + return eError; +} + +/* + RGXSystemHasFBCDCVersion31 +*/ +static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32FBCDCVersionOverride = 0; +#endif + + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622)) + { +#if defined(SUPPORT_VALIDATION) + void *pvAppHintState = NULL; + + IMG_UINT32 ui32AppHintDefault; + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE; + OSGetKMAppHintUINT32(pvAppHintState, FBCDCVersionOverride, + &ui32AppHintDefault, &ui32FBCDCVersionOverride); + + if (ui32FBCDCVersionOverride > 0) + { + if (ui32FBCDCVersionOverride == 2) + { + return IMG_TRUE; + } + } + else +#endif + { + if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) + { + return IMG_TRUE; + } + } + } + else + { + +#if defined(SUPPORT_VALIDATION) + if (ui32FBCDCVersionOverride == 2) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!", + __func__)); + } +#endif + +#if !defined(NO_HARDWARE) + if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: System uses FBCDC3.1 but GPU doesn't support it!", + __func__)); + } +#endif + } + + return IMG_FALSE; +} + +/* + RGXDevMMUAttributes +*/ +static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bKernelMemoryCtx) +{ + MMU_DEVICEATTRIBS *psMMUDevAttrs; + + if ((psDeviceNode->pfnCheckDeviceFeature) && + PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + psMMUDevAttrs = bKernelMemoryCtx ? + psDeviceNode->psFirmwareMMUDevAttrs : + psDeviceNode->psMMUDevAttrs; + } + else + { + psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; + } + return psMMUDevAttrs; +} + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) +/* + RGXGetSecurePDumpMemspace +*/ +static PVRSRV_ERROR RGXGetSecurePDumpMemspace(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_CHAR *pszMemspaceName, + IMG_UINT32 ui32MemspaceNameLen) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (PVRSRV_CHECK_SECURE_FW_CODE(uiFlags) || + (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + PVRSRV_CHECK_SECURE_FW_DATA(uiFlags))) + { + OSSNPrintf(pszMemspaceName, + ui32MemspaceNameLen, + PMR_MEMSPACE_FMTSPEC, + "TDFWMEM"); + } + else if (PVRSRV_CHECK_SECURE_BUFFER(uiFlags) || + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + PVRSRV_CHECK_SECURE_FW_DATA(uiFlags))) + { + OSSNPrintf(pszMemspaceName, + ui32MemspaceNameLen, + PMR_MEMSPACE_FMTSPEC, + "TDSECBUFMEM"); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Not a secure allocation, flags 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC, + __func__, uiFlags)); + eError = PVRSRV_ERROR_INVALID_REQUEST; + } + + return eError; +} +#endif + +/* + * RGXInitDevPart2 + */ +PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DeviceFlags, + IMG_UINT32 ui32HWPerfHostBufSizeKB, + IMG_UINT32 ui32HWPerfHostFilter, + RGX_ACTIVEPM_CONF eActivePMConf) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + +#if defined(PDUMP) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + RGXPDumpBootldrData(psDeviceNode, psDevInfo); + } +#endif +#if defined(TIMING) || defined(DEBUG) + OSUserModeAccessToPerfCountersEn(); +#endif + + PDUMPCOMMENT("RGX Initialisation Part 2"); + + /* Initialise Device Flags */ + psDevInfo->ui32DeviceFlags = 0; + RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE); + + /* Allocate DVFS Table (needs to be allocated before GPU trace events + * component is initialised because there is a dependency between them) */ + psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable))); + if (psDevInfo->psGpuDVFSTable == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitDevPart2KM: failed to allocate gpu dvfs table storage")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Initialise HWPerfHost buffer. */ + if (RGXHWPerfHostInit(psDevInfo, ui32HWPerfHostBufSizeKB) == PVRSRV_OK) + { + if (psDevInfo->ui32HWPerfHostFilter == 0) + { + RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter); + } + + /* If HWPerf enabled allocate all resources for the host side buffer. */ + if (psDevInfo->ui32HWPerfHostFilter != 0) + { + if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand" + " initialisation failed.")); + } + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer initialisation failed.")); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Initialise work estimation lock */ + eError = OSLockCreate(&psDevInfo->hWorkEstLock); + PVR_ASSERT(eError == PVRSRV_OK); +#endif + + /* Initialise lists of ZSBuffers */ + eError = OSLockCreate(&psDevInfo->hLockZSBuffer); + PVR_ASSERT(eError == PVRSRV_OK); + dllist_init(&psDevInfo->sZSBufferHead); + psDevInfo->ui32ZSBufferCurrID = 1; + + /* Initialise lists of growable Freelists */ + eError = OSLockCreate(&psDevInfo->hLockFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + dllist_init(&psDevInfo->sFreeListHead); + psDevInfo->ui32FreelistCurrID = 1; + + eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock); + + if (eError != PVRSRV_OK) + { + return eError; + } + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + + eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock); + + if (eError != PVRSRV_OK) + { + return eError; + } + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = OSLockCreate(&psDevInfo->hNMILock); + + if (eError != PVRSRV_OK) + { + return eError; + } + } + + /* Setup GPU utilisation stats update callback */ + eError = OSLockCreate(&psDevInfo->hGPUUtilLock); + PVR_ASSERT(eError == PVRSRV_OK); +#if !defined(NO_HARDWARE) + psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats; +#endif + + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; + psDevInfo->eActivePMConf = eActivePMConf; + + /* set-up the Active Power Mgmt callback */ +#if !defined(NO_HARDWARE) + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM; + IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || + (eActivePMConf == RGX_ACTIVEPM_FORCE_ON); + + if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE))) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__)); + bEnableAPM = false; + } + +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) + /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */ + PVR_ASSERT(bEnableAPM == IMG_FALSE); +#endif + + if (bEnableAPM) + { + eError = OSInstallMISR(&psDevInfo->pvAPMISRData, + RGX_MISRHandler_CheckFWActivePowerState, + psDeviceNode, + "RGX_CheckFWActivePower"); + if (eError != PVRSRV_OK) + { + return eError; + } + + /* Prevent the device being woken up before there is something to do. */ + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF; + } + } +#endif + + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM, + RGXQueryAPMState, + RGXSetAPMState, + psDeviceNode, + NULL); + + RGXTimeCorrInitAppHintCallbacks(psDeviceNode); + + /* + Register the device with the power manager. + Normal/Hyperv Drivers: Supports power management + Guest Drivers: Do not currently support power management + */ + eError = PVRSRVRegisterPowerDevice(psDeviceNode, + &RGXPrePowerState, &RGXPostPowerState, + psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, + &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, + &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, + &RGXDustCountChange, + (IMG_HANDLE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + eDefaultPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitDevPart2KM: failed to register device with power manager")); + return eError; + } + + eError = RGXSetPowerParams(psDevInfo, psDevConfig); + if (eError != PVRSRV_OK) return eError; + +#if defined(SUPPORT_VALIDATION) + { + void *pvAppHintState = NULL; + + IMG_UINT32 ui32AppHintDefault; + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL; + OSGetKMAppHintUINT32(pvAppHintState, TestSLRInterval, + &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval); + PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d", + ui32AppHintDefault, psDevInfo->ui32TestSLRInterval)); + OSFreeKMAppHintState(pvAppHintState); + psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; + psDevInfo->ui32SLRSkipFWAddr = 0; + } +#endif + +#if defined(PDUMP) +#if defined(NO_HARDWARE) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle"); + + /* Kick the FW once, in case it still needs to detect and set the idle state */ + PDUMPREG32(RGX_PDUMPREG_NAME, + RGX_CR_MTS_SCHEDULE, + RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); + + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ePowState), + RGXFWIF_POW_IDLE, + 0xFFFFFFFFU, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); + if (eError != PVRSRV_OK) return eError; +#endif + + /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation commands"); + + psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW; + + if (! PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = RGXStop(&psDevInfo->sLayerParams); + if (eError != PVRSRV_OK) return eError; + } + + psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW); +#endif + +#if !defined(NO_HARDWARE) + eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode); + if (eError != PVRSRV_OK) + { + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } + return eError; + } + + /* Register the interrupt handlers */ + eError = OSInstallMISR(&psDevInfo->pvMISRData, + RGX_MISRHandler_Main, + psDeviceNode, + "RGX_Main"); + if (eError != PVRSRV_OK) + { + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + return eError; + } + + eError = SysInstallDeviceLISR(psDevConfig->hSysData, + psDevConfig->ui32IRQ, + PVRSRV_MODNAME, + RGX_LISRHandler, + psDeviceNode, + &psDevInfo->pvLISRData); + if (eError != PVRSRV_OK) + { + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + (void) OSUninstallMISR(psDevInfo->pvMISRData); + return eError; + } +#endif + +#if defined(PDUMP) +/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside + * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the + * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its + * last parameter which will not exist on architectures which do not have this + * feature. + * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for + * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this + * means we can build the kernel driver without having to worry about the BVNC + * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given + * architecture, whereas the FEATURE is only defined for those BVNCs that + * support it). + */ +#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY))) +#endif + { + if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) && + !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping"); + } + else + { + if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping"); + } + if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping"); + } + } + } +#endif + + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY)) + { + eError = PVRSRVTQLoadShaders(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load TQ shaders", __func__)); + return eError; + } + } + + psDevInfo->bDevInit2Done = IMG_TRUE; + + return PVRSRV_OK; +} + +#define VZ_RGX_FW_FILENAME_SUFFIX ".vz" +#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ + RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX))) + +static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFWFilenameStr, + IMG_CHAR *pszFWpFilenameStr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + const IMG_CHAR * const pszFWFilenameSuffix = + PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; + + OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STR_FMTSPEC "%s", + RGX_FW_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, + pszFWFilenameSuffix); + + OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STRP_FMTSPEC "%s", + RGX_FW_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, + pszFWFilenameSuffix); +} + +const void * RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, + OS_FW_IMAGE **ppsRGXFW) +{ + IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; + IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; + IMG_CHAR *pszLoadedFwStr; + + /* Prepare the image filenames to use in the following code */ + _GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr); + + /* Get pointer to Firmware image */ + pszLoadedFwStr = aszFWFilenameStr; + *ppsRGXFW = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION); + if (*ppsRGXFW == NULL) + { + pszLoadedFwStr = aszFWpFilenameStr; + *ppsRGXFW = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION); + if (*ppsRGXFW == NULL) + { + pszLoadedFwStr = RGX_FW_FILENAME; + *ppsRGXFW = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION); + if (*ppsRGXFW == NULL) + { + PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s'", + aszFWFilenameStr)); + return NULL; + } + } + } + + PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr)); + + return OSFirmwareData(*ppsRGXFW); +} + +#if defined(PDUMP) +PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + RGXFWIF_KCCB_CMD sKccbCmd; + PVRSRV_ERROR eError; + + /* Fill in the command structure with the parameters needed */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT; + + eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice, + &sKccbCmd, + PDUMP_FLAGS_CONTINUOUS); + + return eError; +} +#endif + +PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* set up fw memory contexts */ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + +#if defined(SUPPORT_AUTOVZ) + MMU_PX_SETUP sDefaultPxSetup = psDeviceNode->sDevMMUPxSetup; + + if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) + { + /* Temporarily swap the MMU Px methods and default LMA region of GPU physheap to + * allow the page tables of all memory mapped by the FwKernel context to be placed + * in a dedicated memory carveout. This should allow the firmware mappings to + * persist after a Host kernel crash or driver reset. */ + + psDeviceNode->sDevMMUPxSetup = RGX_FW_MMU_RESERVED_MEM_SETUP(psDeviceNode); + } +#endif + + /* Register callbacks for creation of device memory contexts */ + psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; + psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + + /* Create the memory context for the firmware. */ + eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META, + &psDevInfo->psKernelDevmemCtx); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemCreateContext (%u)", + __func__, + eError)); + goto failed_to_create_ctx; + } + + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT, + &psDevInfo->psFirmwareMainHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemFindHeapByName (%u)", + __func__, + eError)); + goto failed_to_find_heap; + } + + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT, + &psDevInfo->psFirmwareConfigHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemFindHeapByName (%u)", + __func__, + eError)); + goto failed_to_find_heap; + } + +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSID; + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + IMG_CHAR szHeapName[PVRSRV_MAX_RA_NAME_LENGTH]; + + OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName, + &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap); + } + } +#endif + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_DEV_PHYADDR sPhysHeapBase; + IMG_UINT32 ui32OSID; + + eError = PhysHeapRegionGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL], 0, &sPhysHeapBase); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr", failed_to_find_heap); + + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)}; + + eError = RGXFwRawHeapAllocMap(psDeviceNode, + ui32OSID, + sRawFwHeapBase, + RGX_FIRMWARE_RAW_HEAP_SIZE); + if (eError != PVRSRV_OK) + { + for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--) + { + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } + PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap); + } + } + +#if defined(SUPPORT_AUTOVZ) + /* restore default Px setup */ + psDeviceNode->sDevMMUPxSetup = sDefaultPxSetup; +#endif + } +#else + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); + } +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + + return eError; + +failed_to_find_heap: + /* + * Clear the mem context create callbacks before destroying the RGX firmware + * context to avoid a spurious callback. + */ + psDeviceNode->pfnRegisterMemoryContext = NULL; + psDeviceNode->pfnUnregisterMemoryContext = NULL; + DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); + psDevInfo->psKernelDevmemCtx = NULL; +failed_to_create_ctx: + return eError; +} + +void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(HOST)) + { +#if defined(SUPPORT_AUTOVZ) + MMU_PX_SETUP sDefaultPxSetup = psDeviceNode->sDevMMUPxSetup; + + psDeviceNode->sDevMMUPxSetup = RGX_FW_MMU_RESERVED_MEM_SETUP(psDeviceNode); + + if (!psDeviceNode->bAutoVzFwIsUp) +#endif + { + IMG_UINT32 ui32OSID; + + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } + } +#if defined(SUPPORT_AUTOVZ) + psDeviceNode->sDevMMUPxSetup = sDefaultPxSetup; +#endif + } +#else + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig); + + if (psDevInfo->psFirmwareMainHeap) + { + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_FALSE); + } + if (psDevInfo->psFirmwareConfigHeap) + { + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_FALSE); + } + } +#endif + + /* + * Clear the mem context create callbacks before destroying the RGX firmware + * context to avoid a spurious callback. + */ + psDeviceNode->pfnRegisterMemoryContext = NULL; + psDeviceNode->pfnUnregisterMemoryContext = NULL; + + if (psDevInfo->psKernelDevmemCtx) + { + eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); + PVR_ASSERT(eError == PVRSRV_OK); + } +} + +static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32AlignChecksSizeUM, + IMG_UINT32 aui32AlignChecksUM[]) +{ + static const IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM}; + IMG_UINT32 ui32UMChecksOffset = ARRAY_SIZE(aui32AlignChecksKM) + 1; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + IMG_UINT32 i, *paui32FWAlignChecks; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Skip the alignment check if the driver is guest + since there is no firmware to check against */ + PVRSRV_VZ_RET_IF_MODE(GUEST, eError); + + if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: FW Alignment Check" + " Mem Descriptor is NULL", __func__)); + return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc, + (void **) &paui32FWAlignChecks); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire kernel address for alignment checks (%u)", + __func__, + eError)); + return eError; + } + + paui32FWAlignChecks += ui32UMChecksOffset; + if (*paui32FWAlignChecks++ != ui32AlignChecksSizeUM) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mismatching sizes of RGXFW_ALIGN_CHECKS_INIT" + " array between UM(%d) and FW(%d)", + __func__, + ui32AlignChecksSizeUM, + *paui32FWAlignChecks)); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + goto return_; + } + + for (i = 0; i < ui32AlignChecksSizeUM; i++) + { + if (aui32AlignChecksUM[i] != paui32FWAlignChecks[i]) + { + PVR_DPF((PVR_DBG_ERROR, "%s: size/offset mismatch in RGXFW_ALIGN_CHECKS_INIT[%d]" + " between UM(%d) and FW(%d)", + __func__, i, aui32AlignChecksUM[i], paui32FWAlignChecks[i])); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + } + } + + if (eError == PVRSRV_ERROR_INVALID_ALIGNMENT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Check for FW/KM structure" + " alignment failed.", __func__)); + } + +return_: + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); + + return eError; +} + +static +PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T ui32Size, + IMG_UINT32 uiMemAllocFlags, + PVRSRV_TD_FW_MEM_REGION eRegion, + const IMG_PCHAR pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift(); +#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) || defined(SUPPORT_TRUSTED_DEVICE) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#endif + +#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K; + } +#endif + +#if defined(SUPPORT_DEDICATED_FW_MEMORY) + PVR_UNREFERENCED_PARAMETER(eRegion); + + PDUMPCOMMENT("Allocate dedicated FW %s memory", pszText); + + eError = DevmemAllocateDedicatedFWMem(psDeviceNode, + ui32Size, + uiLog2Align, + uiMemAllocFlags, + pszText, + ppsMemDescPtr); +#else /* defined(SUPPORT_DEDICATED_FW_MEMORY) */ + +#if defined(SUPPORT_TRUSTED_DEVICE) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR) || + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + (eRegion == PVRSRV_DEVICE_FW_CODE_REGION || + RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32))) + { + PDUMPCOMMENT("Import secure FW %s memory", pszText); + + eError = DevmemImportTDFWMem(psDeviceNode, + ui32Size, + uiLog2Align, + uiMemAllocFlags, + eRegion, + ppsMemDescPtr); + } + else +#endif + { + uiMemAllocFlags = (uiMemAllocFlags | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + PVR_UNREFERENCED_PARAMETER(eRegion); + + PDUMPCOMMENT("Allocate FW %s memory", pszText); + + eError = DevmemFwAllocateExportable(psDeviceNode, + ui32Size, + 1 << uiLog2Align, + uiMemAllocFlags, + pszText, + ppsMemDescPtr); + } +#endif /* defined(SUPPORT_DEDICATED_FW_MEMORY) */ + + return eError; +} + +/*! + ******************************************************************************* + + @Function RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver + + @Description + + Validate the FW build options against KM driver build options (KM build options only) + + Following check is redundant, because next check checks the same bits. + Redundancy occurs because if client-server are build-compatible and client-firmware are + build-compatible then server-firmware are build-compatible as well. + + This check is left for clarity in error messages if any incompatibility occurs. + + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + + ******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit) +{ +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch; + + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW); + + ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; + + if (ui32BuildOptions != ui32BuildOptionsFWKMPart) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart; +#if !defined(PVRSRV_STRICT_COMPAT_CHECK) + /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ + ui32BuildOptionsMismatch &= OPTIONS_STRICT; +#endif + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; " + "extra options present in the KM driver: (0x%x). Please check rgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch )); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + + if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; " + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ.")); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]")); + } +#endif + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver + + @Description + + Validate FW DDK version against driver DDK version + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + + ******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + IMG_UINT32 ui32DDKVersion; + PVRSRV_ERROR eError; + + ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); +#endif + +#if defined(PDUMP) + PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion), + ui32DDKVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK version (%u.%u).", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion), + PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion))); + eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; + PVR_DBG_BREAK; + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK version (%u.%u) match. [ OK ]", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_MAJ, PVRVERSION_MIN)); + } +#endif + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver + + @Description + + Validate FW DDK build against driver DDK build + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + + ******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ + PVRSRV_ERROR eError=PVRSRV_OK; +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + IMG_UINT32 ui32DDKBuild; + + ui32DDKBuild = PVRVERSION_BUILD; +#endif + +#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK) + PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild), + ui32DDKBuild, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild) + { + PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).", + ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); +#if defined(PVRSRV_STRICT_COMPAT_CHECK) + eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH; + PVR_DBG_BREAK; + return eError; +#endif + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]", + ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); + } +#endif + return eError; +} + +/*! + ******************************************************************************* + + @Function RGXDevInitCompatCheck_BVNC_FWAgainstDriver + + @Description + + Validate FW BVNC against driver BVNC + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + + ******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if !defined(NO_HARDWARE) + IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; +#endif +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC); + PVRSRV_ERROR eError; + + sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); +#endif + +#if defined(PDUMP) + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), + sBVNC.ui32LayoutVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } + + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BVNC part - Lower 32 bits)"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), + (IMG_UINT32)sBVNC.ui64BVNC, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } + + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BVNC part - Higher 32 bits)"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + sizeof(IMG_UINT32), + (IMG_UINT32)(sBVNC.ui64BVNC >> 32), + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); + + if (!bCompatibleAll) + { + if (!bCompatibleVersion) + { + PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).", + __func__, + sBVNC.ui32LayoutVersion, + psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion)); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + + if (!bCompatibleBVNC) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", + RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC))); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]")); + } +#endif + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver + + @Description + + Validate HW BVNC against driver BVNC + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + + ******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP) || !defined(NO_HARDWARE) + IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B | + RGX_BVNC_PACK_MASK_V | + RGX_BVNC_PACK_MASK_N | + RGX_BVNC_PACK_MASK_C; + + PVRSRV_ERROR eError; + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC); +#endif +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + +#if !defined(NO_HARDWARE) + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC); + IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; +#endif + + if (psDevInfo->bIgnoreHWReportedBVNC) + { + PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)")); + return PVRSRV_OK; + } + +#if defined(PDUMP) || !defined(NO_HARDWARE) +#if defined(COMPAT_BVNC_MASK_V) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V; +#endif +#if defined(COMPAT_BVNC_MASK_N) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N; +#endif +#if defined(COMPAT_BVNC_MASK_C) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; +#endif + + sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 38344) && (psDevInfo->sDevFeatureCfg.ui32C >= 10)) + { + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; + } + + if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) + { + PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.", + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):("")))); + } +#endif + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: Layout version of compchecks struct"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), + sSWBVNC.ui32LayoutVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPCOM(ui32PDumpFlags, "BVNC compatibility check started"); + if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) + { + PDUMPIF("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Lower 32 bits)"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), + (IMG_UINT32)sSWBVNC.ui64BVNC , + (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Higher 32 bits)"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + sizeof(IMG_UINT32), + (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32), + (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPFI("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + } + if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V) + { + PDUMPIF("DISABLE_HWV_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWV_CHECK", ui32PDumpFlags); + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW V and FW V"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0), + (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)), + RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + PDUMPFI("DISABLE_HWV_CHECK", ui32PDumpFlags); + } + PDUMPCOM(ui32PDumpFlags, "BVNC compatibility check finished"); +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC; + + sHWBVNC.ui64BVNC &= ui64MaskBVNC; + sSWBVNC.ui64BVNC &= ui64MaskBVNC; + + RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); + + if (!bCompatibleAll) + { + if (!bCompatibleVersion) + { + PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).", + __func__, + sHWBVNC.ui32LayoutVersion, + sSWBVNC.ui32LayoutVersion)); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + + if (!bCompatibleBVNC) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).", + RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", + RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); + } +#endif + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver + + @Description + + Validate HW META version against driver META version + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + + ******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + PVRSRV_ERROR eError; +#endif +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + IMG_UINT32 ui32FWCoreIDValue = 0; + IMG_CHAR *pcRGXFW_PROCESSOR = NULL; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE; + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + switch (RGX_GET_FEATURE_VALUE(psDevInfo, META)) + { + case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break; + case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break; + case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break; + case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); + PVR_ASSERT(0); + } + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE; + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); + PVR_ASSERT(0); + } + +#if defined(PDUMP) + PDUMPIF("DISABLE_HWMETA_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWMETA_CHECK", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: KM driver and HW FW Processor version"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion), + ui32FWCoreIDValue, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + PDUMPFI("DISABLE_HWMETA_CHECK", ui32PDumpFlags); +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue) + { + PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).", + pcRGXFW_PROCESSOR, + ui32FWCoreIDValue, + pcRGXFW_PROCESSOR, + psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); + eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH; + PVR_DBG_BREAK; + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].", + pcRGXFW_PROCESSOR, + ui32FWCoreIDValue, + pcRGXFW_PROCESSOR, + psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); + } +#endif + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck + + @Description + + Check compatibility of host driver and firmware (DDK and build options) + for RGX devices at services/device initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR - depending on mismatch found + + ******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32RegValue; + IMG_UINT8 ui8FwOsCount; + IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; + +#if defined(SUPPORT_AUTOVZ) + /* AutoVz drivers booting while the firmware is running might have to wait + * longer to have their compatibility data filled if the firmware is busy */ + ui32FwTimeout = (psDeviceNode->bAutoVzFwIsUp) ? + (PVR_AUTOVZ_WDG_PERIOD_MS * 1000 * 3) : (MAX_HW_TIME_US); +#endif + + LOOP_UNTIL_TIMEOUT(ui32FwTimeout) + { + if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + ui32RegValue = 0; + + if ((!PVRSRV_VZ_MODE_IS(GUEST)) && + RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue); + + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)", + __func__, eError)); + goto chk_exit; + } + + if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT)) + { + eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED; + PVR_DPF((PVR_DBG_ERROR, + "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)", + __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError)); + goto chk_exit; + } + } + + if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + eError = PVRSRV_ERROR_TIMEOUT; + PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", + __func__, eError)); + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's " + "OsConfig initialisation data was not accepted by the firmware", __func__)); + } + goto chk_exit; + } + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || + (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount)); + } +#endif /* defined(NO_HARDWARE) */ + + eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + } + eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = PVRSRV_OK; +chk_exit: + + return eError; +} + +/**************************************************************************/ /*! +@Function RGXSoftReset +@Description Resets some modules of the RGX device +@Input psDeviceNode Device node +@Input ui64ResetValue1 A mask for which each bit set corresponds + to a module to reset (via the SOFT_RESET + register). +@Input ui64ResetValue2 A mask for which each bit set corresponds + to a module to reset (via the SOFT_RESET2 + register). +@Return PVRSRV_ERROR + */ /***************************************************************************/ +static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT64 ui64ResetValue1, + IMG_UINT64 ui64ResetValue2) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_BOOL bSoftReset = IMG_FALSE; + IMG_UINT64 ui64SoftResetMask = 0; + + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* the device info */ + psDevInfo = psDeviceNode->pvDevice; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) + { + ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL; + }else + { + ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL; + } + + if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) && + ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2)) + { + bSoftReset = IMG_TRUE; + } + + if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Set in soft-reset */ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2); + } + + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); + } + + /* Take the modules out of reset... */ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0); + } + + /* ...and fence again */ + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); + } + + return PVRSRV_OK; +} + +static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline; + +static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + DevPhysMemFree(psDeviceNode, +#if defined(PDUMP) + psDevInfo->psTrampoline->hPdumpPages, +#endif + &psDevInfo->psTrampoline->sPages); + + if (psDevInfo->psTrampoline != &sNullTrampoline) + { + OSFreeMem(psDevInfo->psTrampoline); + } + psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; +} + +#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size)) +#define TRAMPOLINE_ALLOC_MAX_RETIRES (3) + +static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + IMG_INT32 i, j; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_MIPS_ADDRESS_TRAMPOLINE *pasTrampoline[TRAMPOLINE_ALLOC_MAX_RETIRES]; + + PDUMPCOMMENT("Allocate pages for trampoline"); + + /* Retry the allocation of the trampoline block (16KB), retaining any + * previous allocations overlapping with the target range until we get an + * allocation that doesn't overlap with the target range. + * Any allocation like this will require a maximum of 3 tries as we are + * allocating a physical contiguous block of memory, not individual pages. + * Free the unused allocations at the end only after the desired range + * is obtained to prevent the alloc function from returning the same bad + * range repeatedly. + */ + for (i = 0; i < TRAMPOLINE_ALLOC_MAX_RETIRES; i++) + { + pasTrampoline[i] = OSAllocMem(sizeof(RGX_MIPS_ADDRESS_TRAMPOLINE)); + eError = DevPhysMemAlloc(psDeviceNode, + RGXMIPSFW_TRAMPOLINE_SIZE, + RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE, + 0, // (init) u8Value + IMG_FALSE, // bInitPage, +#if defined(PDUMP) + psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, + "TrampolineRegion", + &pasTrampoline[i]->hPdumpPages, +#endif + &pasTrampoline[i]->sPages, + &pasTrampoline[i]->sPhysAddr); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed (%u)", + __func__, + eError)); + goto fail; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Set the persistent uiOSid value so that we free from the correct + * base arena when unloading the driver and freeing the trampoline. + */ + pasTrampoline[i]->sPages.uiOSid = 0; /* Firmware global arena */ +#endif + + if (!RANGES_OVERLAP(pasTrampoline[i]->sPhysAddr.uiAddr, + RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, + RGXMIPSFW_TRAMPOLINE_SIZE)) + { + break; + } + } + if (TRAMPOLINE_ALLOC_MAX_RETIRES == i) + { + /* Failed to find a physical allocation after 3 attempts */ + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + PVR_DPF((PVR_DBG_ERROR, + "%s failed to allocate non-overlapping pages (%u)", + __func__, eError)); + /* Fall through, clean up and return error. */ + } + else + { + /* Remember the last physical block allocated, it will not be freed */ + psDevInfo->psTrampoline = pasTrampoline[i]; + } + +fail: + /* free all unused allocations */ + for (j = 0; j < i; j++) + { + DevPhysMemFree(psDeviceNode, +#if defined(PDUMP) + pasTrampoline[j]->hPdumpPages, +#endif + &pasTrampoline[j]->sPages); + OSFreeMem(pasTrampoline[j]); + } + + return eError; +} + +#undef RANGES_OVERLAP + + +PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiFWCodeLen, + IMG_DEVMEM_SIZE_T uiFWDataLen, + IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, + IMG_DEVMEM_SIZE_T uiFWCorememDataLen) +{ + DEVMEM_FLAGS_T uiMemAllocFlags; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_DEVMEM_SIZE_T uiDummyLen; + DEVMEM_MEMDESC *psDummyMemDesc = NULL; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = RGXAllocTrampoline(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate trampoline region (%u)", + eError)); + goto failTrampolineMemDescAlloc; + } + } + + /* + * Set up Allocation for FW code section + */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCodeLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_CODE_REGION, + "FwExCodeRegion", + &psDevInfo->psRGXFWCodeMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw code mem (%u)", + eError)); + goto failFWCodeMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc, + &psDevInfo->sFWCodeDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw code mem (%u)", + eError)); + goto failFWCodeMemDescAqDevVirt; + } + + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(GUEST)))) + { + /* + * The FW code must be the first allocation in the firmware heap, otherwise + * the bootloader will not work (the FW will not be able to find the bootloader). + */ + PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + /* + * Allocate Dummy Pages so that Data segment allocation gets the same + * device virtual address as specified in MIPS firmware linker script + */ + uiDummyLen = RGXGetFWImageSectionMaxSize(NULL, MIPS_CODE) + + RGXGetFWImageSectionMaxSize(NULL, MIPS_EXCEPTIONS_CODE) + + RGXGetFWImageSectionMaxSize(NULL, MIPS_BOOT_CODE) - + uiFWCodeLen; /* code actual size */ + + if (uiDummyLen > 0) + { + eError = DevmemFwAllocateExportable(psDeviceNode, + uiDummyLen, + OSGetPageSize(), + uiMemAllocFlags, + "FwExDummyPages", + &psDummyMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw dummy mem (%u)", + eError)); + goto failDummyMemDescAlloc; + } + } + } + + /* + * Set up Allocation for FW data section + */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWDataLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION, + "FwExDataRegion", + &psDevInfo->psRGXFWDataMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw data mem (%u)", + eError)); + goto failFWDataMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc, + &psDevInfo->sFWDataDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw data mem (%u)", + eError)); + goto failFWDataMemDescAqDevVirt; + } + + if (uiFWCorememCodeLen != 0) + { + /* + * Set up Allocation for FW coremem code section + */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCorememCodeLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_COREMEM_CODE_REGION, + "FwExCorememCodeRegion", + &psDevInfo->psRGXFWCorememCodeMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw coremem code mem, size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", + uiFWCorememCodeLen, uiMemAllocFlags, eError)); + goto failFWCorememCodeMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, + &psDevInfo->sFWCorememCodeDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw coremem mem code (%u)", + eError)); + goto failFWCorememCodeMemDescAqDevVirt; + } + + eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr, + psDevInfo->psRGXFWCorememCodeMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", failFWCorememCodeMemDescFwAddr); + } + else + { + psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0; + psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0; + } + + if (uiFWCorememDataLen != 0) + { + /* + * Set up Allocation for FW coremem data section + */ + uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) + & RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCorememDataLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_COREMEM_DATA_REGION, + "FwExCorememDataRegion", + &psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw coremem data mem, " + "size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", + uiFWCorememDataLen, + uiMemAllocFlags, + eError)); + goto failFWCorememDataMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + &psDevInfo->sFWCorememDataStoreDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw coremem mem data (%u)", + eError)); + goto failFWCorememDataMemDescAqDevVirt; + } + + eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr, + psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", failFWCorememDataMemDescFwAddr); + } + else + { + psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0; + psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0; + } + + /* Free Dummy Pages */ + if (psDummyMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); + } + + return PVRSRV_OK; + +failFWCorememDataMemDescFwAddr: +failFWCorememDataMemDescAqDevVirt: + if (uiFWCorememDataLen != 0) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } +failFWCorememDataMemDescAlloc: +failFWCorememCodeMemDescFwAddr: +failFWCorememCodeMemDescAqDevVirt: + if (uiFWCorememCodeLen != 0) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); + psDevInfo->psRGXFWCorememCodeMemDesc = NULL; + } +failFWCorememCodeMemDescAlloc: +failFWDataMemDescAqDevVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); + psDevInfo->psRGXFWDataMemDesc = NULL; +failFWDataMemDescAlloc: + if (psDummyMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); + } +failDummyMemDescAlloc: +failFWCodeMemDescAqDevVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); + psDevInfo->psRGXFWCodeMemDesc = NULL; +failFWCodeMemDescAlloc: + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + RGXFreeTrampoline(psDeviceNode); + } +failTrampolineMemDescAlloc: + return eError; +} + +/* + AppHint parameter interface + */ +static +PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eResult; + + eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); + *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK; + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eResult; + + eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); + if (PVRSRV_OK == eResult) + { + if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE) + { + *pui32Value = 0; /* Trace */ + } + else + { + *pui32Value = 1; /* TBI */ + } + } + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eResult; + IMG_UINT32 ui32RGXFWLogType; + + eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType); + if (PVRSRV_OK == eResult) + { + if (0 == ui32RGXFWLogType) + { + BITMASK_SET(ui32Value, RGXFWIF_LOG_TYPE_TRACE); + } + eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value); + } + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eResult; + IMG_UINT32 ui32RGXFWLogType = ui32Value; + + eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType); + if (PVRSRV_OK != eResult) + { + return eResult; + } + + /* 0 - trace, 1 - tbi */ + if (0 == ui32Value) + { + BITMASK_SET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); + } +#if defined(SUPPORT_TBI_INTERFACE) + else if (1 == ui32Value) + { + BITMASK_UNSET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); + } +#endif + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid parameter %u specified to set FW log type AppHint.", + __func__, ui32Value)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType); + return eResult; +} + +static +PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_BOOL *pbValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + + *pbValue = (PVRSRV_MEMALLOCFLAG_POISON_ON_FREE == psDevInfo->ui32FWPoisonOnFreeFlag) + ? IMG_TRUE + : IMG_FALSE; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_BOOL bValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + psDevInfo->ui32FWPoisonOnFreeFlag = bValue + ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE + : 0UL; + + return PVRSRV_OK; +} + + +/* + * RGXInitFirmware + */ +PVRSRV_ERROR +RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 *pui32TPUTrilinearFracMask, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags) +{ + PVRSRV_ERROR eError; + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE; + + eError = RGXSetupFirmware(psDeviceNode, + bEnableSignatureChecks, + ui32SignatureChecksBufSize, + ui32HWPerfFWBufSizeKB, + ui64HWPerfFilter, + ui32RGXFWAlignChecksArrLength, + pui32RGXFWAlignChecks, + ui32ConfigFlags, + ui32ConfigFlagsExt, + ui32FwOsCfgFlags, + ui32LogType, + ui32FilterFlags, + ui32JonesDisableMask, + ui32HWRDebugDumpLimit, + ui32HWPerfCountersDataSize, + pui32TPUTrilinearFracMask, + eRGXRDPowerIslandingConf, + eFirmwarePerf); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", + eError)); + goto failed_init_firmware; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup, + RGXFWTraceQueryFilter, + RGXFWTraceSetFilter, + psDeviceNode, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType, + RGXFWTraceQueryLogType, + RGXFWTraceSetLogType, + psDeviceNode, + NULL); + } + + OSCreateKMAppHintState(&pvAppHintState); + + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; + OSGetKMAppHintBOOL(pvAppHintState, + EnableFWPoisonOnFree, + &ui32AppHintDefault, + &bEnableFWPoisonOnFree); + + OSFreeKMAppHintState(pvAppHintState); + + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree, + RGXQueryFWPoisonOnFree, + RGXSetFWPoisonOnFree, + psDeviceNode, + NULL); + + psDevInfo->ui32FWPoisonOnFreeFlag = bEnableFWPoisonOnFree + ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE + : 0UL; + + return PVRSRV_OK; + +failed_init_firmware: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* See device.h for function declaration */ +static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC **psMemDesc, + IMG_UINT32 *puiSyncPrimVAddr, + IMG_UINT32 *puiSyncPrimBlockSize) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + RGXFWIF_DEV_VIRTADDR pFirmwareAddr; + IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32); + IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32); + + psDevInfo = psDeviceNode->pvDevice; + + /* Size and align are 'expanded' because we request an Exportalign allocation */ + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiUFOBlockSize, + &ui32UFOBlockAlign); + if (eError != PVRSRV_OK) + { + goto e0; + } + + eError = DevmemFwAllocateExportable(psDeviceNode, + uiUFOBlockSize, + ui32UFOBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE, + "FwExUFOBlock", + psMemDesc); + if (eError != PVRSRV_OK) + { + goto e0; + } + + eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_GOTO_IF_ERROR(eError, e1); + + *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr; + *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize); + + return PVRSRV_OK; +e1: + DevmemFwUnmapAndFree(psDevInfo, *psMemDesc); +e0: + return eError; +} + +/* See device.h for function declaration */ +static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* + If the system has snooping of the device cache then the UFO block + might be in the cache so we need to flush it out before freeing + the memory + + When the device is being shutdown/destroyed we don't care anymore. + Several necessary data structures to issue a flush were destroyed + already. + */ + if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && + psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) + { + RGXFWIF_KCCB_CMD sFlushInvalCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32kCCBCommandSlot; + + /* Schedule the SLC flush command ... */ +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); +#endif + sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; + + eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, + &sFlushInvalCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule SLC flush command with error (%u)", + __func__, + eError)); + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: SLC flush and invalidate aborted with error (%u)", + __func__, + eError)); + } + else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & + RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + } + } + + RGXUnsetFirmwareAddress(psMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psMemDesc); +} + +/* + DevDeInitRGX + */ +PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + IMG_UINT32 ui32Temp=0; + + if (!psDevInfo) + { + /* Can happen if DevInitRGX failed */ + PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Null DevInfo")); + return PVRSRV_OK; + } + + if (psDevInfo->psRGXFWIfOsInit) + { + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + } + + eError = DeviceDepBridgeDeInit(psDevInfo->sDevFeatureCfg.ui64Features); + PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeDeInit"); + +#if defined(PDUMP) + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + DUMMY_PAGE); + + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDevZeroPage, + DEV_ZERO_PAGE); +#endif + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0); + PVR_UNREFERENCED_PARAMETER(ui32Temp); + } + else +#else + { + /*Delete the Dummy page related info */ + ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter); + if (0 != ui32Temp) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Dummy page reference counter is non zero (%u)", + __func__, + ui32Temp)); + PVR_ASSERT(0); + } + } +#endif + + /*Delete the Dummy page related info */ + ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter); + if (0 != ui32Temp) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Zero page reference counter is non zero (%u)", + __func__, + ui32Temp)); + } + +#if defined(PDUMP) + if (NULL != psDeviceNode->sDummyPage.hPdumpPg) + { + PDUMPCOMMENT("Error dummy page handle is still active"); + } + + if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg) + { + PDUMPCOMMENT("Error Zero page handle is still active"); + } +#endif + + /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */ + OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); + + /* Destroy the zero page lock */ + OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + OSLockDestroy(psDevInfo->hCounterDumpingLock); +#endif + + /* Unregister debug request notifiers first as they could depend on anything. */ + + RGXDebugDeinit(psDevInfo); + + + /* Cancel notifications to this device */ + PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify); + psDeviceNode->hCmdCompNotify = NULL; + + /* + * De-initialise in reverse order, so stage 2 init is undone first. + */ + if (psDevInfo->bDevInit2Done) + { + psDevInfo->bDevInit2Done = IMG_FALSE; + + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY)) + { + eError = PVRSRVTQUnloadShaders(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + } + +#if !defined(NO_HARDWARE) + (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData); + (void) OSUninstallMISR(psDevInfo->pvMISRData); + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } +#endif /* !NO_HARDWARE */ + + /* Remove the device from the power manager */ + eError = PVRSRVRemovePowerDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + + psDevInfo->pfnGetGpuUtilStats = NULL; + OSLockDestroy(psDevInfo->hGPUUtilLock); + + /* Free DVFS Table */ + if (psDevInfo->psGpuDVFSTable != NULL) + { + OSFreeMem(psDevInfo->psGpuDVFSTable); + psDevInfo->psGpuDVFSTable = NULL; + } + + /* De-init Freelists/ZBuffers... */ + OSLockDestroy(psDevInfo->hLockFreeList); + OSLockDestroy(psDevInfo->hLockZSBuffer); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* De-init work estimation lock */ + OSLockDestroy(psDevInfo->hWorkEstLock); +#endif + + /* Unregister MMU related stuff */ + eError = RGXMMUInit_Unregister(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", + eError)); + return eError; + } + + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + /* Unregister MMU related stuff */ + eError = RGXMipsMMUInit_Unregister(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", + eError)); + return eError; + } + } + } + + /* UnMap Regs */ + if (psDevInfo->pvRegsBaseKM != NULL) + { +#if !defined(NO_HARDWARE) + OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); +#endif /* !NO_HARDWARE */ + psDevInfo->pvRegsBaseKM = NULL; + } + +#if 0 /* not required at this time */ + if (psDevInfo->hTimer) + { + eError = OSRemoveTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Failed to remove timer")); + return eError; + } + psDevInfo->hTimer = NULL; + } +#endif + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + + RGXDeInitHeaps(psDevMemoryInfo); + + if (psDevInfo->psRGXFWCodeMemDesc) + { + /* Free fw code */ + PDUMPCOMMENT("Freeing FW code memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); + psDevInfo->psRGXFWCodeMemDesc = NULL; + } + else if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_WARNING, "No firmware code memory to free")); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + if (psDevInfo->psTrampoline->sPages.u.pvHandle) + { + /* Free trampoline region */ + PDUMPCOMMENT("Freeing trampoline memory"); + RGXFreeTrampoline(psDeviceNode); + } + } + + if (psDevInfo->psRGXFWDataMemDesc) + { + /* Free fw data */ + PDUMPCOMMENT("Freeing FW data memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); + psDevInfo->psRGXFWDataMemDesc = NULL; + } + else if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_WARNING, "No firmware data memory to free")); + } + + if (psDevInfo->psRGXFWCorememCodeMemDesc) + { + /* Free fw core mem code */ + PDUMPCOMMENT("Freeing FW coremem code memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); + psDevInfo->psRGXFWCorememCodeMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + /* Free fw core mem data */ + PDUMPCOMMENT("Freeing FW coremem data store memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } + + /* + Free the firmware allocations. + */ + RGXFreeFirmware(psDevInfo); + RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); + + /* De-initialise non-device specific (TL) users of RGX device memory */ + RGXHWPerfHostDeInit(psDevInfo); + eError = HTBDeInit(); + PVR_LOG_IF_ERROR(eError, "HTBDeInit"); + + /* destroy the stalled CCB locks */ + OSLockDestroy(psDevInfo->hCCBRecoveryLock); + OSLockDestroy(psDevInfo->hCCBStallCheckLock); + + /* destroy the context list locks */ + OSLockDestroy(psDevInfo->sRegCongfig.hLock); + OSLockDestroy(psDevInfo->hBPLock); + OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); + OSWRLockDestroy(psDevInfo->hRenderCtxListLock); + OSWRLockDestroy(psDevInfo->hComputeCtxListLock); + OSWRLockDestroy(psDevInfo->hTransferCtxListLock); + OSWRLockDestroy(psDevInfo->hTDMCtxListLock); + OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); + OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); + OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); + OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); + + + if ((psDevInfo->hNMILock != NULL) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))) + { + OSLockDestroy(psDevInfo->hNMILock); + } + + if (psDevInfo->hDebugFaultInfoLock != NULL) + { + OSLockDestroy(psDevInfo->hDebugFaultInfoLock); + } + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + if (psDevInfo->hMMUCtxUnregLock != NULL) + { + OSLockDestroy(psDevInfo->hMMUCtxUnregLock); + } + } + + /* Free device BVNC string */ + if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString) + { + OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString); + } + + /* DeAllocate devinfo */ + OSFreeMem(psDevInfo); + + psDeviceNode->pvDevice = NULL; + + return PVRSRV_OK; +} + +#if defined(PDUMP) +static +PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); + + psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; + + return PVRSRV_OK; +} +#endif /* PDUMP */ + +static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name, + IMG_UINT64 heap_base, + IMG_DEVMEM_SIZE_T heap_length, + IMG_DEVMEM_SIZE_T heap_reserved_region_length, + IMG_UINT32 log2_import_alignment) +{ + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; + IMG_UINT32 ui32GeneralNon4KHeapPageSize; + IMG_UINT32 ui32OSLog2PageShift = OSGetPageShift(); + IMG_UINT32 ui32OSPageSize; + + DEVMEM_HEAP_BLUEPRINT b = { + .pszName = name, + .sHeapBaseAddr.uiAddr = heap_base, + .uiHeapLength = heap_length, + .uiReservedRegionLength = heap_reserved_region_length, + .uiLog2DataPageSize = RGXHeapDerivePageSize(ui32OSLog2PageShift), + .uiLog2ImportAlignment = log2_import_alignment, + }; + + ui32OSPageSize = (1 << ui32OSLog2PageShift); + + /* Any heap length should at least match OS page size at the minimum or + * a multiple of OS page size */ + if ((b.uiHeapLength == 0) || (b.uiHeapLength & (ui32OSPageSize - 1))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid Heap \"%s\" Size: " + "%"IMG_UINT64_FMTSPEC + "("IMG_DEVMEM_SIZE_FMTSPEC")", + __func__, + b.pszName, b.uiHeapLength, b.uiHeapLength)); + PVR_DPF((PVR_DBG_ERROR, + "Heap Size should always be a non-zero value and a " + "multiple of OS Page Size:%u(0x%x)", + ui32OSPageSize, ui32OSPageSize)); + PVR_ASSERT(b.uiHeapLength >= ui32OSPageSize); + } + + + PVR_ASSERT(b.uiReservedRegionLength % RGX_HEAP_RESERVED_SIZE_GRANULARITY == 0); + + if (!OSStringNCompare(name, RGX_GENERAL_NON4K_HEAP_IDENT, sizeof(RGX_GENERAL_NON4K_HEAP_IDENT))) + { + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize, + &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); + switch (ui32GeneralNon4KHeapPageSize) + { + case (1 << RGX_HEAP_4KB_PAGE_SHIFT): + b.uiLog2DataPageSize = RGX_HEAP_4KB_PAGE_SHIFT; + break; + case (1 << RGX_HEAP_16KB_PAGE_SHIFT): + b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT; + break; + case (1 << RGX_HEAP_64KB_PAGE_SHIFT): + b.uiLog2DataPageSize = RGX_HEAP_64KB_PAGE_SHIFT; + break; + case (1 << RGX_HEAP_256KB_PAGE_SHIFT): + b.uiLog2DataPageSize = RGX_HEAP_256KB_PAGE_SHIFT; + break; + case (1 << RGX_HEAP_1MB_PAGE_SHIFT): + b.uiLog2DataPageSize = RGX_HEAP_1MB_PAGE_SHIFT; + break; + case (1 << RGX_HEAP_2MB_PAGE_SHIFT): + b.uiLog2DataPageSize = RGX_HEAP_2MB_PAGE_SHIFT; + break; + default: + b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT; + + PVR_DPF((PVR_DBG_ERROR, + "Invalid AppHint GeneralAltHeapPageSize [%d] value, using 16KB", + ui32AppHintDefault)); + break; + } + OSFreeKMAppHintState(pvAppHintState); + } + + return b; +} + +#define INIT_HEAP(NAME) \ + do { \ + *psDeviceMemoryHeapCursor = _blueprint_init( \ + RGX_ ## NAME ## _HEAP_IDENT, \ + RGX_ ## NAME ## _HEAP_BASE, \ + RGX_ ## NAME ## _HEAP_SIZE, \ + RGX_ ## NAME ## _HEAP_RESERVED_SIZE, \ + 0); \ + psDeviceMemoryHeapCursor++; \ + } while (0) + +#define INIT_FW_MAIN_HEAP(MODE, HEAP_SIZE) \ + do { \ + *psDeviceMemoryHeapCursor = _blueprint_init( \ + RGX_FIRMWARE_MAIN_HEAP_IDENT, \ + RGX_FIRMWARE_ ## MODE ## _MAIN_HEAP_BASE, \ + HEAP_SIZE, \ + 0, /* No reserved space in any FW heaps */ \ + 0); \ + psDeviceMemoryHeapCursor++; \ + } while (0) + +#define INIT_FW_CONFIG_HEAP(MODE) \ + do { \ + *psDeviceMemoryHeapCursor = _blueprint_init( \ + RGX_FIRMWARE_CONFIG_HEAP_IDENT, \ + RGX_FIRMWARE_ ## MODE ## _CONFIG_HEAP_BASE, \ + RGX_FIRMWARE_CONFIG_HEAP_SIZE, \ + 0, /* No reserved space in any FW heaps */ \ + 0); \ + psDeviceMemoryHeapCursor++; \ + } while (0) + +#define INIT_HEAP_NAME(STR, NAME) \ + do { \ + *psDeviceMemoryHeapCursor = _blueprint_init( \ + RGX_ ## STR ## _HEAP_IDENT, \ + RGX_ ## NAME ## _HEAP_BASE, \ + RGX_ ## NAME ## _HEAP_SIZE, \ + RGX_ ## STR ## _HEAP_RESERVED_SIZE, \ + 0); \ + psDeviceMemoryHeapCursor++; \ + } while (0) + +static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVICE_MEMORY_INFO *psNewMemoryInfo, + IMG_UINT32 *pui32Log2DummyPgSize) +{ + DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor; + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; + IMG_UINT32 ui32GeneralNon4KHeapPageSize; + IMG_DEVMEM_SIZE_T uFWMainHeapSize; + + psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * RGX_MAX_HEAP_ID); + if (psNewMemoryInfo->psDeviceMemoryHeap == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_BLUEPRINT")); + goto e0; + } + + /* Get the page size for the dummy page from the NON4K heap apphint */ + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize, + &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); + *pui32Log2DummyPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize); + OSFreeKMAppHintState(pvAppHintState); + + /* Initialise the heaps */ + psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap; + + INIT_HEAP(GENERAL_SVM); + + /* vulkan capture replay buffer heap */ + INIT_HEAP_NAME(VK_CAPT_REPLAY_BUF, VK_CAPT_REPLAY_BUF); + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + INIT_HEAP_NAME(GENERAL, GENERAL_BRN_65273); + } + else + { + INIT_HEAP(GENERAL); + } + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142)) + { + /* BRN63142 heap must be at the top of an aligned 16GB range. */ + INIT_HEAP(RGNHDR_BRN_63142); + PVR_ASSERT((RGX_RGNHDR_BRN_63142_HEAP_BASE & IMG_UINT64_C(0x3FFFFFFFF)) + + RGX_RGNHDR_BRN_63142_HEAP_SIZE == IMG_UINT64_C(0x400000000)); + } + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + INIT_HEAP_NAME(GENERAL_NON4K, GENERAL_NON4K_BRN_65273); + INIT_HEAP_NAME(VISTEST, VISTEST_BRN_65273); + + /* HWBRN65273 workaround also requires two Region Header buffers 4GB apart. */ + INIT_HEAP(MMU_INIA_BRN_65273); + INIT_HEAP(MMU_INIB_BRN_65273); + } + else + { + INIT_HEAP(GENERAL_NON4K); + INIT_HEAP(VISTEST); + } + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + INIT_HEAP_NAME(PDSCODEDATA, PDSCODEDATA_BRN_65273); + INIT_HEAP_NAME(USCCODE, USCCODE_BRN_65273); + } + else + { + INIT_HEAP(PDSCODEDATA); + INIT_HEAP(USCCODE); + } + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + INIT_HEAP_NAME(TQ3DPARAMETERS, TQ3DPARAMETERS_BRN_65273); + } + else + { + INIT_HEAP(TQ3DPARAMETERS); + } + + INIT_HEAP(TDM_TPU_YUV_COEFFS); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING)) + { + INIT_HEAP(SIGNALS); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) + { + uFWMainHeapSize = RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_BRN65101; + } + else + { + uFWMainHeapSize = RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_NORMAL; + } + } + else + { + uFWMainHeapSize = RGX_FIRMWARE_META_MAIN_HEAP_SIZE; + } + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + INIT_FW_CONFIG_HEAP(GUEST); + INIT_FW_MAIN_HEAP(GUEST, uFWMainHeapSize); + } + else + { + INIT_FW_MAIN_HEAP(HOST, uFWMainHeapSize); + INIT_FW_CONFIG_HEAP(HOST); + } + + /* set the heap count */ + psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap); + + PVR_ASSERT(psNewMemoryInfo->ui32HeapCount <= RGX_MAX_HEAP_ID); + + /* + In the new heap setup, we initialise 2 configurations: + 1 - One will be for the firmware only (index 1 in array) + a. This primarily has the firmware heap in it. + b. It also has additional guest OSID firmware heap(s) + - Only if the number of support firmware OSID > 1 + 2 - Others shall be for clients only (index 0 in array) + a. This has all the other client heaps in it. + */ + psNewMemoryInfo->uiNumHeapConfigs = 2; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs); + if (psNewMemoryInfo->psDeviceMemoryHeapConfigArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_CONFIG")); + goto e1; + } + + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration"; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap; + + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration"; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + +#if (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSid; + + /* Create additional raw firmware heaps */ + for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK) + { + /* if any allocation fails, free previously allocated heaps and abandon initialisation */ + for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--) + { + RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); + psDeviceMemoryHeapCursor--; + } + goto e1; + } + + /* Append additional firmware heaps to host driver firmware context heap configuration */ + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1; + + /* advance to the next heap */ + psDeviceMemoryHeapCursor++; + } + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + + return PVRSRV_OK; +e1: + OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap); +e0: + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +#undef INIT_HEAP +#undef INIT_HEAP_NAME + + +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo) +{ +#if (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSid; + DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap; + + /* Delete all guest firmware heaps */ + for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); + psDeviceMemoryHeapCursor++; + } + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + + OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray); + OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap); +} + + +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PHYS_HEAP *psPhysHeap; + PHYS_HEAP_TYPE eHeapType; + IMG_UINT64 uPhysheapSize; + IMG_UINT32 ui32RegionCount; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + /* Initialise the objects used to manage the physical firmware heap */ + psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + eHeapType = PhysHeapGetType(psPhysHeap); + + if (eHeapType == PHYS_HEAP_TYPE_UMA) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__)); + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewOSRamBackedPMR; + } + else + { + IMG_UINT64 uRawHeapBase; + RA_BASE_T uFwCfgSubHeapBase, uFwMainSubHeapBase; + const IMG_UINT64 ui64ExpectedHeapSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + const RA_LENGTH_T uFwCfgSubHeapSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; + RA_LENGTH_T uFwMainSubHeapSize; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) + { + uFwMainSubHeapSize = RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_BRN65101; + } + else + { + uFwMainSubHeapSize = RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_NORMAL; + } + } + else + { + uFwMainSubHeapSize = RGX_FIRMWARE_META_MAIN_HEAP_SIZE; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__)); + ui32RegionCount = PhysHeapNumberOfRegions(psPhysHeap); + if (ui32RegionCount > 1) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware heap currently supports 1 region only. " + "PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL contains %u regions. Only the 1st will be used.", __func__, ui32RegionCount)); + } + + eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr", ErrorDeinit); + + eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetCpuPAddr", ErrorDeinit); + + eError = PhysHeapRegionGetSize(psPhysHeap, 0, &uPhysheapSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetSize", ErrorDeinit); + PVR_LOG_GOTO_IF_FALSE(uPhysheapSize >= ui64ExpectedHeapSize, + "Invalid firmware physical heap size.", ErrorDeinit); + + /* Now we construct RAs to manage the FW heaps */ + uRawHeapBase = sDevPAddr.uiAddr; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest subheap layout: Config + Main */ + uFwCfgSubHeapBase = uRawHeapBase; + uFwMainSubHeapBase = uFwCfgSubHeapBase + uFwCfgSubHeapSize; + } + else + { + /* Native/Host subheap layout: Main + (optional MIPS reserved range) + Config */ + uFwMainSubHeapBase = uRawHeapBase; + uFwCfgSubHeapBase = uRawHeapBase + RGX_FIRMWARE_RAW_HEAP_SIZE - uFwCfgSubHeapSize; + } + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->psKernelFwMainMemArena, + psDeviceNode->szKernelFwMainRAName, + sCpuPAddr.uiAddr + (uFwMainSubHeapBase - uRawHeapBase), + uFwMainSubHeapBase, + uFwMainSubHeapSize, + 0, + "Fw Main subheap"); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVCreateRegionRA(FwMain)", ErrorDeinit); + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->psKernelFwConfigMemArena, + psDeviceNode->szKernelFwConfigRAName, + sCpuPAddr.uiAddr + (uFwCfgSubHeapBase - uRawHeapBase), + uFwCfgSubHeapBase, + uFwCfgSubHeapSize, + 0, + "Fw Cfg subheap"); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVCreateRegionRA(FwCfg)", ErrorDeinit); + + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR; + +#if defined(SUPPORT_AUTOVZ) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers: + * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb; + * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */ + RA_LENGTH_T uMaxFwMmuPageTableSize = 1 * 1024 * 1024; + + /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap. + * If a different base address is specified for this reserved range, use the overriding define instead. */ +#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) + RA_BASE_T uFwMmuReservedMemStart = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; +#else + RA_BASE_T uFwMmuReservedMemStart = uRawHeapBase + (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED); +#endif + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->psFwMMUReservedMemArena, + NULL, + 0, + uFwMmuReservedMemStart, + uMaxFwMmuPageTableSize, + 0, + "Fw MMU Mem "); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVCreateRegionRA(FwMMU)", ErrorDeinit); + } +#endif + } + + return eError; + +ErrorDeinit: + PVR_ASSERT(IMG_FALSE); + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + + return eError; +} + +/* + RGXRegisterDevice + */ +PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#ifdef CONFIG_MCST + IMG_DEV_PHYADDR RegsCpuDevBase = { .uiAddr = 0 }; +#endif + PVRSRV_ERROR eError; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PDUMPCOMMENT("Device Name: %s", psDeviceNode->psDevConfig->pszName); + + if (psDeviceNode->psDevConfig->pszVersion) + { + PDUMPCOMMENT("Device Version: %s", psDeviceNode->psDevConfig->pszVersion); + } + + PDUMPCOMMENT("RGX Initialisation (Part 1)"); + + /********************* + * Device node setup * + *********************/ + /* Setup static data and callbacks on the device agnostic device node */ +#if defined(PDUMP) + psDeviceNode->sDevId.pszPDumpRegName = RGX_PDUMPREG_NAME; + psDeviceNode->sDevId.pszPDumpDevName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]); + psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump; +#endif /* PDUMP */ + + OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK); + OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE); + + /* Configure MMU specific stuff */ + RGXMMUInit_Register(psDeviceNode); + + psDeviceNode->pfnDevSLCFlushRange = NULL; + psDeviceNode->pfnInvalFBSCTable = NULL; + + psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL; + + psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate; + + psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick; + + /* Register RGX to receive notifies when other devices complete some work */ + PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode); + + psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck; + + /* Register callbacks for creation of device memory contexts */ + psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; + psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + + /* Register callbacks for Unified Fence Objects */ + psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock; + psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock; + + /* Register callback for checking the device's health */ + psDeviceNode->pfnUpdateHealthStatus = RGXUpdateHealthStatus; + + /* Register method to service the FW HWPerf buffer */ + psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB; + + /* Register callback for getting the device version information string */ + psDeviceNode->pfnDeviceVersionString = RGXDevVersionString; + + /* Register callback for getting the device clock speed */ + psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed; + + /* Register callback for soft resetting some device modules */ + psDeviceNode->pfnSoftReset = RGXSoftReset; + + /* Register callback for resetting the HWR logs */ + psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs; + + /* Register callback for resetting the HWR logs */ + psDeviceNode->pfnVerifyBVNC = RGXVerifyBVNC; + + /* Register callback for checking alignment of UM structures */ + psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck; + + /*Register callback for checking the supported features and getting the + * corresponding values */ + psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported; + psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue; + + /* Callback for checking if system layer supports FBC 3.1 */ + psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; + + /* Callback for getting the MMU device attributes */ + psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) + /* Callback for getting a secure PDump memory space name */ + psDeviceNode->pfnGetSecurePDumpMemspace = RGXGetSecurePDumpMemspace; +#endif + + /* Register callback for initialising device-specific physical memory heaps */ + psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit; + + /* Set up required support for dummy page */ + OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0); + OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0); + + /* Set the order to 0 */ + psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0; + psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0; + + /* Set the size of the Dummy page to zero */ + psDeviceNode->sDummyPage.ui32Log2PgSize = 0; + + /* Set the size of the Zero page to zero */ + psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0; + + /* Set the Dummy page phys addr */ + psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* Set the Zero page phys addr */ + psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* The lock can be acquired from MISR (Z-buffer) path */ + eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__)); + return eError; + } + + /* Create the lock for zero page */ + eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__)); + goto free_dummy_page; + } +#if defined(PDUMP) + psDeviceNode->sDummyPage.hPdumpPg = NULL; + psDeviceNode->sDevZeroPage.hPdumpPg = NULL; +#endif + + /********************* + * Device info setup * + *********************/ + /* Allocate device control block */ + psDevInfo = OSAllocZMem(sizeof(*psDevInfo)); + if (psDevInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "DevInitRGXPart1 : Failed to alloc memory for DevInfo")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* Default psTrampoline to point to null struct */ + psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; + + /* create locks for the context lists stored in the DevInfo structure. + * these lists are modified on context create/destroy and read by the + * watchdog thread + */ + + eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__)); + goto e0; + } + + eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__)); + goto e1; + } + + eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__)); + goto e2; + } + + eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__)); + goto e3; + } + + eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__)); + goto e4; + } + + eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__)); + goto e5; + } + + eError = OSSpinLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__)); + goto e6; + } + dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead)); + + dllist_init(&(psDevInfo->sRenderCtxtListHead)); + dllist_init(&(psDevInfo->sComputeCtxtListHead)); + dllist_init(&(psDevInfo->sTransferCtxtListHead)); + dllist_init(&(psDevInfo->sTDMCtxtListHead)); + dllist_init(&(psDevInfo->sKickSyncCtxtListHead)); + + dllist_init(&(psDevInfo->sCommonCtxtListHead)); + psDevInfo->ui32CommonCtxtCurrentID = 1; + + + eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__)); + goto e7; + } + + eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__)); + goto e8; + } + + eError = OSLockCreate(&psDevInfo->hBPLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__)); + goto e9; + } + + eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__)); + goto e10; + } + + eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__)); + goto e11; + } + eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__)); + goto e12; + } + + dllist_init(&psDevInfo->sMemoryContextList); + + /* initialise ui32SLRHoldoffCounter */ + if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT) + { + psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + } + else + { + psDevInfo->ui32SLRHoldoffCounter = 0; + } + + /* Setup static data and callbacks on the device specific device info */ + psDevInfo->psDeviceNode = psDeviceNode; + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + /* + * Map RGX Registers + */ + psDevInfo->ui32RegSize = psDeviceNode->psDevConfig->ui32RegsSize; + psDevInfo->sRegsPhysBase = psDeviceNode->psDevConfig->sRegsCpuPBase; + +#if !defined(NO_HARDWARE) +#ifdef CONFIG_MCST + psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, RegsCpuDevBase, + psDeviceNode->psDevConfig->ui32RegsSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); +#else + psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, + psDeviceNode->psDevConfig->ui32RegsSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); +#endif + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create RGX register mapping", + __func__)); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto e13; + } +#endif + + psDeviceNode->pvDevice = psDevInfo; + + eError = RGXBvncInitialiseConfiguration(psDeviceNode); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unsupported HW device detected by driver", + __func__)); + goto e14; + } + + /* pdump info about the core */ + PDUMPCOMMENT("RGX Version Information (KM): %d.%d.%d.%d", + psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + + eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo, + &psDeviceNode->sDummyPage.ui32Log2PgSize); + if (eError != PVRSRV_OK) + { + goto e14; + } + + /*Set the zero page size as needed for the heap with biggest page size */ + psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDeviceNode->sDummyPage.ui32Log2PgSize; + + eError = RGXHWPerfInit(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14); + + /* Register callback for dumping debug info */ + eError = RGXDebugInit(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", e15); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + RGXMipsMMUInit_Register(psDeviceNode); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + /* Callback to return info about multicore setup */ + psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo; + } + + /* The device shared-virtual-memory heap address-space size is stored here for faster + look-up without having to walk the device heap configuration structures during + client device connection (i.e. this size is relative to a zero-based offset) */ + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + psDeviceNode->ui64GeneralSVMHeapTopVA = 0; + }else + { + psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE; + } + + if (NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit) + { + psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig, + psDevInfo->sDevFeatureCfg.ui64Features); + } + + /* Initialise the device dependent bridges */ + eError = DeviceDepBridgeInit(psDevInfo->sDevFeatureCfg.ui64Features); + PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit"); + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + eError = OSLockCreate(&psDevInfo->hCounterDumpingLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__)); + goto e15; + } +#endif + +#if defined(PDUMP) + eError = DevmemIntAllocDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + PVR_DUMMY_PAGE_INIT_VALUE, + DUMMY_PAGE, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__)); + goto e16; + } + + eError = DevmemIntAllocDefBackingPage(psDeviceNode, + &psDeviceNode->sDevZeroPage, + PVR_ZERO_PAGE_INIT_VALUE, + DEV_ZERO_PAGE, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__)); + goto e17; + } +#endif + + return PVRSRV_OK; + +#if defined(PDUMP) +e17: + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + DUMMY_PAGE); +e16: +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + OSLockDestroy(psDevInfo->hCounterDumpingLock); +#endif +#endif + +e15: + RGXHWPerfDeinit(psDevInfo); +e14: +#if !defined(NO_HARDWARE) + OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + +e13: +#endif /* !NO_HARDWARE */ + OSLockDestroy(psDevInfo->hCCBRecoveryLock); +e12: + OSLockDestroy(psDevInfo->hCCBStallCheckLock); +e11: + OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); +e10: + OSLockDestroy(psDevInfo->hBPLock); +e9: + OSLockDestroy(psDevInfo->sRegCongfig.hLock); +e8: + OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); +e7: + OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); +e6: + OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); +e5: + OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); +e4: + OSWRLockDestroy(psDevInfo->hTDMCtxListLock); +e3: + OSWRLockDestroy(psDevInfo->hTransferCtxListLock); +e2: + OSWRLockDestroy(psDevInfo->hComputeCtxListLock); +e1: + OSWRLockDestroy(psDevInfo->hRenderCtxListLock); +e0: + OSFreeMem(psDevInfo); + + /* Destroy the zero page lock created above */ + OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); + +free_dummy_page: + /* Destroy the dummy page lock created above */ + OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString; + if (NULL == psz) + { + IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN]; + size_t uiBVNCStringSize; + size_t uiStringLength; + + uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d", + psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN); + + uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR); + psz = OSAllocMem(uiBVNCStringSize); + if (NULL != psz) + { + OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize); + psDevInfo->sDevFeatureCfg.pszBVNCString = psz; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Allocating memory for BVNC Info string failed", + __func__)); + } + } + + return psz; +} + +/*************************************************************************/ /*! +@Function RGXDevVersionString +@Description Gets the version string for the given device node and returns + a pointer to it in ppszVersionString. It is then the + responsibility of the caller to free this memory. +@Input psDeviceNode Device node from which to obtain the + version string +@Output ppszVersionString Contains the version string upon return +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR **ppszVersionString) +{ +#if defined(NO_HARDWARE) || defined(EMULATOR) + const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)"; +#else + const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)"; +#endif + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_PCHAR pszBVNC; + size_t uiStringLength; + + if (psDeviceNode == NULL || ppszVersionString == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + pszBVNC = RGXDevBVNCString(psDevInfo); + + if (NULL == pszBVNC) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + uiStringLength = OSStringLength(pszBVNC); + uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */ + *ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); + if (*ppszVersionString == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString, + pszBVNC); + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function RGXDevClockSpeed +@Description Gets the clock speed for the given device node and returns + it in pui32RGXClockSpeed. +@Input psDeviceNode Device node +@Output pui32RGXClockSpeed Variable for storing the clock speed +@Return PVRSRV_ERROR + */ /***************************************************************************/ +static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_PUINT32 pui32RGXClockSpeed) +{ + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + + /* get clock speed */ + *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + + return PVRSRV_OK; +} + +#if (RGX_NUM_OS_SUPPORTED > 1) +/*! + ******************************************************************************* + + @Function RGXInitFwRawHeap + + @Description Called to perform additional initialisation + ******************************************************************************/ +static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid) +{ + IMG_UINT32 uiStringLength; + IMG_UINT32 uiStringLengthMax = 32; + + uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1); + + /* Start by allocating memory for this OSID heap identification string */ + psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); + if (psDevMemHeap->pszName == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ + OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid); + + /* Use the common blueprint template support function to initialise the heap */ + *psDevMemHeap = _blueprint_init((IMG_CHAR *)psDevMemHeap->pszName, + RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE), + RGX_FIRMWARE_RAW_HEAP_SIZE, + 0, + 0); + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function RGXDeInitFwRawHeap + + @Description Called to perform additional deinitialisation + ******************************************************************************/ +static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) +{ + IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE; + IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); + + /* Safe to do as the guest firmware heaps are last in the list */ + if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase && + psDevMemHeap->sHeapBaseAddr.uiAddr < uiSpan) + { + void *pszName = (void*)psDevMemHeap->pszName; + OSFreeMem(pszName); + } +} +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + +/****************************************************************************** + End of file (rgxinit.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.h new file mode 100644 index 000000000000..12add8d2e5f9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxinit.h @@ -0,0 +1,316 @@ +/*************************************************************************/ /*! +@File +@Title RGX initialisation header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXINIT_H__) +#define __RGXINIT_H__ + +#include "connection_server.h" +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgx_bridge.h" +#include "fwload.h" + +#if defined(LINUX) +#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware +#else +#define OS_FW_VERIFY_FUNCTION NULL +#endif + +/*! +******************************************************************************* + + @Function RGXInitDevPart2 + + @Description + + Second part of server-side RGX initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DeviceFlags, + IMG_UINT32 ui32HWPerfHostBufSizeKB, + IMG_UINT32 ui32HWPerfHostFilter, + RGX_ACTIVEPM_CONF eActivePMConf); + +PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T ui32FWCodeLen, + IMG_DEVMEM_SIZE_T ui32FWDataLen, + IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, + IMG_DEVMEM_SIZE_T uiFWCorememDataLen); + + +/*! +******************************************************************************* + + @Function RGXInitFirmware + + @Description + + Server-side RGX firmware initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 *pui32TPUTrilinearFracMask, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags); + + +/*! +******************************************************************************* + + @Function RGXLoadAndGetFWData + + @Description + + Load FW and return pointer to FW data. + + @Input psDeviceNode - device node + + @Input ppsRGXFW - fw pointer + + @Return void * - pointer to FW data + +******************************************************************************/ +const void *RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, OS_FW_IMAGE **ppsRGXFW); + +#if defined(PDUMP) +/*! +******************************************************************************* + + @Function RGXInitHWPerfCounters + + @Description + + Initialisation of the performance counters + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +/*! +******************************************************************************* + + @Function RGXRegisterDevice + + @Description + + Registers the device with the system + + @Input: psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +******************************************************************************* + + @Function RGXDevBVNCString + + @Description + + Returns the Device BVNC string. It will allocate and fill it first, if necessary. + + @Input: psDevInfo - device info (must not be null) + + @Return IMG_PCHAR - pointer to BVNC string + +******************************************************************************/ +IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function DevDeInitRGX + + @Description + + Reset and deinitialise Chip + + @Input psDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#if !defined(NO_HARDWARE) + +void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsRegister + + @Description SO Interface function called from the OS layer implementation. + Initialise data used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). This function must be called only once for each + different user/handle. + + @Input phGpuUtilUser - Pointer to handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); + + +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsUnregister + + @Description SO Interface function called from the OS layer implementation. + Free data previously used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). + + @Input hGpuUtilUser - Handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); +#endif /* !defined(NO_HARDWARE) */ + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/*! +******************************************************************************* + + @Function RGXVirtPopulateLMASubArenas + + @Description Populates the LMA arenas based on the min max values passed by + the client during initialization. GPU Virtualisation Validation + only. + + @Input psDeviceNode : Pointer to a device info structure. + ui32NumElements : Total number of min / max values passed by + the client + pui32Elements : The array containing all the min / max values + passed by the client, all bundled together + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXVirtPopulateLMASubArenas(PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_BOOL bEnableTrustedDeviceAceConfig); +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +/*! + ******************************************************************************* + + @Function RGXInitCreateFWKernelMemoryContext + + @Description Called to perform initialisation during firmware kernel context + creation. + + @Input psDeviceNode device node + ******************************************************************************/ +PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! + ******************************************************************************* + + @Function RGXDeInitDestroyFWKernelMemoryContext + + @Description Called to perform deinitialisation during firmware kernel + context destruction. + + @Input psDeviceNode device node + ******************************************************************************/ +void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! + ******************************************************************************* + + @Function RGXFwIrqEventRx + + @Description Checks the implementation specific IRQ status register, + clearing it if necessary and returning the IRQ status. + + @Input: psDevInfo - device info + + @Return: IRQ status + + ******************************************************************************/ +IMG_BOOL RGXFwIrqEventRx(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* __RGXINIT_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxkicksync.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxkicksync.c new file mode 100644 index 000000000000..b65d22e03810 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxkicksync.c @@ -0,0 +1,789 @@ +/*************************************************************************/ /*! +@File rgxkicksync.c +@Title Server side of the sync only kick API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxkicksync.h" + +#include "rgxdevice.h" +#include "rgxmem.h" +#include "rgxfwutils.h" +#include "allocmem.h" +#include "sync.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_KICKSYNC_UFO_DUMP 0 + +//#define KICKSYNC_CHECKPOINT_DEBUG 1 + +#if defined(KICKSYNC_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +struct _RGX_SERVER_KICKSYNC_CONTEXT_ +{ + PVRSRV_DEVICE_NODE * psDeviceNode; + RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +}; + + +PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_KICKSYNC_CONTEXT **ppsKickSyncContext) +{ + PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext; + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; + + /* Prepare cleanup struct */ + * ppsKickSyncContext = NULL; + psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext)); + if (psKickSyncContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSLockCreate(&psKickSyncContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto err_lockcreate; + } + + psKickSyncContext->psDeviceNode = psDeviceNode; + + sInfo.psFWFrameworkMemDesc = NULL; + + ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); + ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_KICKSYNC, + RGXFWIF_DM_GP, + NULL, + 0, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_KICKSYNC_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_KICKSYNC_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + 0, /* priority */ + 0, /* max deadline MS */ + 0, /* robustness address */ + & sInfo, + & psKickSyncContext->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); + dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); + + SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence); + SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate); + + * ppsKickSyncContext = psKickSyncContext; + return PVRSRV_OK; + +fail_contextalloc: + OSLockDestroy(psKickSyncContext->hLock); +err_lockcreate: + OSFreeMem(psKickSyncContext); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode, + psKickSyncContext->psServerCommonContext, + RGXFWIF_DM_GP, + PDUMP_FLAGS_NONE); + + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + + OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); + dllist_remove_node(&(psKickSyncContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); + + FWCommonContextFree(psKickSyncContext->psServerCommonContext); + + SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence); + SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate); + + OSLockDestroy(psKickSyncContext->hLock); + + OSFreeMem(psKickSyncContext); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psKickSyncContext->hLock); + eError = FWCommonContextSetFlags(psKickSyncContext->psServerCommonContext, + (IMG_UINT32)ui64Input); + + OSLockRelease(psKickSyncContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); + dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) + { + RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); + + if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) + { + DumpFWCommonContextInfo(psCurrentServerKickSyncCtx->psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); +} + +IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); + + dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) + { + RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); + + if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); + return ui32ContextBitMask; +} + +PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + + IMG_UINT32 ui32ExtJobRef) +{ + RGXFWIF_KCCB_CMD sKickSyncKCCBCmd; + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + IMG_BOOL bCCBStateOpen = IMG_FALSE; + PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress = NULL; + IMG_UINT32 ui32ClientFenceCount = 0; + IMG_UINT32 *paui32ClientFenceValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psKickSyncContext->psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + void *pvUpdateFenceFinaliseData = NULL; + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have dev var updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + OSLockAcquire(psKickSyncContext->hLock); + eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateOffset); + + if (eError != PVRSRV_OK) + { + goto fail_syncaddrlist; + } + + if (ui32ClientUpdateCount > 0) + { + pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; + } + /* Ensure the string is null-terminated (Required for safety) */ + szUpdateFenceName[31] = '\0'; + + /* This will never be true if called from the bridge since piUpdateFence will always be valid */ + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto out_unlock; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), " + "psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheckFence, + (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_OK) + { + goto fail_resolve_fence; + } + + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...", + __func__, iUpdateTimeline)); + eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode, + szUpdateFenceName, + iUpdateTimeline, + psKickSyncContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", + __func__, eError)); + goto fail_create_output_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ...returned from SyncCheckpointCreateFence " + "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " + "ui32FenceTimelineUpdateValue=%u)", + __func__, iUpdateFence, psFenceTimelineUpdateSync, + ui32FenceTimelineUpdateValue)); + + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount); + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32ClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the kicksync context update list */ + SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); + } + } + + /* Reset number of fence syncs in kicksync context fence list to 0 */ + SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence, + 0, NULL, NULL); + + if (ui32FenceSyncCheckpointCount > 0) + { + /* Append the checks (from input fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d sync checkpoints to KickSync Fence " + "(&psKickSyncContext->sSyncAddrListFence=<%p>)...", + __func__, ui32FenceSyncCheckpointCount, + (void*)&psKickSyncContext->sSyncAddrListFence)); + SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiClientFenceUFOAddress) + { + pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs; + } + ui32ClientFenceCount += ui32FenceSyncCheckpointCount; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + + if (psUpdateSyncCheckpoint) + { + PVRSRV_ERROR eErr; + + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint to KickSync Update " + "(&psKickSyncContext->sSyncAddrListUpdate=<%p>)...", + __func__, (void*)&psKickSyncContext->sSyncAddrListUpdate)); + eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (eErr != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ...done. SyncAddrListAppendCheckpoints() returned error (%d)", + __func__, eErr)); + } + else + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done.", __func__)); + } + if (!pauiClientUpdateUFOAddress) + { + pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32ClientUpdateCount++; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + +#if (ENABLE_KICKSYNC_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...", + __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress; + IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Prepared %d KickSync fence syncs " + "(&psKickSyncContext->sSyncAddrListFence=<%p>, " + "pauiClientFenceUFOAddress=<%p>):", + __func__, ui32ClientFenceCount, + (void*)&psKickSyncContext->sSyncAddrListFence, + (void*)pauiClientFenceUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, " + "CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, ii + 1, ui32ClientFenceCount, + (void*)psTmpIntFenceUFOAddress, + psTmpIntFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, ii + 1, ui32ClientFenceCount, + (void*)psTmpIntFenceUFOAddress, + psTmpIntFenceUFOAddress->ui32Addr, + *pui32TmpIntFenceValue, + *pui32TmpIntFenceValue)); + pui32TmpIntFenceValue++; + } + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, + "%s: Prepared %d KickSync update syncs " + "(&psKickSyncContext->sSyncAddrListUpdate=<%p>, " + "pauiClientUpdateUFOAddress=<%p>):", + __func__, ui32ClientUpdateCount, + (void*)&psKickSyncContext->sSyncAddrListUpdate, + (void*)pauiClientUpdateUFOAddress)); + for (ii=0; ii", + __func__, __LINE__, + (void*)psTmpIntUpdateUFOAddress)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Line %d, pui32TmpIntUpdateValue=<%p>", + __func__, __LINE__, + (void*)pui32TmpIntUpdateValue)); + if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, " + "UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, ii + 1, ui32ClientUpdateCount, + (void*)psTmpIntUpdateUFOAddress, + psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", + __func__, ii + 1, ui32ClientUpdateCount, + (void*)psTmpIntUpdateUFOAddress, + psTmpIntUpdateUFOAddress->ui32Addr, + *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + RGXCmdHelperInitCmdCCB(psClientCCB, + ui32ClientFenceCount, + pauiClientFenceUFOAddress, + paui32ClientFenceValue, + ui32ClientUpdateCount, + pauiClientUpdateUFOAddress, + paui32ClientUpdateValue, + 0, + NULL, + RGXFWIF_CCB_CMD_TYPE_NULL, + ui32ExtJobRef, + ui32IntJobRef, + PDUMP_FLAGS_NONE, + NULL, + "KickSync", + bCCBStateOpen, + asCmdHelperData); + + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + if (eError != PVRSRV_OK) + { + goto fail_cmdaquire; + } + + /* + * We should reserve space in the kernel CCB here and fill in the command + * directly. + * This is so if there isn't space in the kernel CCB we can return with + * retry back to services client before we take any operations + */ + + /* + * We might only be kicking for flush out a padding packet so only submit + * the command if the create was successful + */ + if (eError == PVRSRV_OK) + { + /* + * All the required resources are ready at this point, we can't fail so + * take the required server sync operations and commit all the resources + */ + RGXCmdHelperReleaseCmdCCB(1, + asCmdHelperData, + "KickSync", + FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr); + } + + /* Construct the kernel kicksync CCB command. */ + sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext); + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + /* + * Submit the kicksync command to the firmware. + */ + RGXSRV_HWPERF_ENQ(psKickSyncContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_SYNC, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + & sKickSyncKCCBCmd, + ui32ClientCacheOpSeqNum, + PDUMP_FLAGS_NONE); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_SYNC); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)", + eError)); + } + + /* + * Now check eError (which may have returned an error from our earlier call + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdaquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", + __func__, (void*)psUpdateSyncCheckpoint, + SyncCheckpointGetId(psUpdateSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Updating NOHW sync prim<%p> to %d", + __func__, (void*)psFenceTimelineUpdateSync, + ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + *piUpdateFence = iUpdateFence; + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psKickSyncContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, szUpdateFenceName); + } + + OSLockRelease(psKickSyncContext->hLock); + return PVRSRV_OK; + +fail_cmdaquire: + SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate); + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } + + /* Free memory allocated to hold update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + } +fail_alloc_update_values_mem: +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free memory allocated to hold the resolved fence's checkpoints */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } +fail_resolve_fence: +fail_syncaddrlist: +out_unlock: + OSLockRelease(psKickSyncContext->hLock); + return eError; +} + + +/**************************************************************************//** + End of file (rgxkicksync.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer.h new file mode 100644 index 000000000000..34deb4ef1bf1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer.h @@ -0,0 +1,817 @@ +/*************************************************************************/ /*! +@File +@Title Header for Services abstraction layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declaration of an interface layer used to abstract code that + can be compiled outside of the DDK, potentially in a + completely different OS. + All the headers included by this file must also be copied to + the alternative source tree. + All the functions declared here must have a DDK implementation + inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and + another different implementation in case they are used outside + of the DDK. + All of the functions accept as a first parameter a + "const void *hPrivate" argument. It should be used to pass + around any implementation specific data required. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXLAYER_H__) +#define __RGXLAYER_H__ + +#if defined(__cplusplus) +extern "C" { +#endif + + +#include "img_defs.h" +#include "img_types.h" +#include "img_elf.h" +#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */ +#include "rgx_bvnc_defs_km.h" +#include "rgx_fw_info.h" +#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */ +#include "rgx_meta.h" +#include "rgx_mips.h" +#include "rgx_riscv.h" + +#include "rgxdefs_km.h" +/* includes: + * rgx_cr_defs_km.h, + * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h), + * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h) + */ + + +/*! +******************************************************************************* + + @Function RGXMemCopy + + @Description MemCopy implementation + + @Input hPrivate : Implementation specific data + @Input pvDst : Pointer to the destination + @Input pvSrc : Pointer to the source location + @Input uiSize : The amount of memory to copy in bytes + + @Return void + +******************************************************************************/ +void RGXMemCopy(const void *hPrivate, + void *pvDst, + void *pvSrc, + size_t uiSize); + +/*! +******************************************************************************* + + @Function RGXMemSet + + @Description MemSet implementation + + @Input hPrivate : Implementation specific data + @Input pvDst : Pointer to the start of the memory region + @Input ui8Value : The value to be written + @Input uiSize : The number of bytes to be set to ui8Value + + @Return void + +******************************************************************************/ +void RGXMemSet(const void *hPrivate, + void *pvDst, + IMG_UINT8 ui8Value, + size_t uiSize); + +/*! +******************************************************************************* + + @Function RGXCommentLog + + @Description Generic log function used for debugging or other purposes + + @Input hPrivate : Implementation specific data + @Input pszString : Message to be printed + @Input ... : Variadic arguments + + @Return void + +******************************************************************************/ +__printf(2, 3) +void RGXCommentLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...); + +/*! +******************************************************************************* + + @Function RGXErrorLog + + @Description Generic error log function used for debugging or other purposes + + @Input hPrivate : Implementation specific data + @Input pszString : Message to be printed + @Input ... : Variadic arguments + + @Return void + +******************************************************************************/ +__printf(2, 3) +void RGXErrorLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...); + +/*! +******************************************************************************* + + @Function RGXGetOSPageSize + + @Description Return Page size used on OS + + @Input hPrivate : Implementation specific data + + @Return IMG_UINT32 + +******************************************************************************/ + +IMG_UINT32 RGXGetOSPageSize(const void *hPrivate); + +/* This is used to get the value of a specific feature from hprivate. + * Should be used instead of calling RGXDeviceHasFeature. */ +#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \ + RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK) + +/*! +******************************************************************************* + + @Function RGXDeviceHasFeature + + @Description Checks if a device has a particular feature + + @Input hPrivate : Implementation specific data + @Input ui64Feature : Feature to check + + @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature); + +/*! +******************************************************************************* + + @Function RGXGetFWCorememSize + + @Description Get the FW coremem size + + @Input hPrivate : Implementation specific data + + @Return FW coremem size + +******************************************************************************/ +IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXWriteReg32/64 + + @Description Write a value to a 32/64 bit RGX register + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui32/64RegValue : New register value + + @Return void + +******************************************************************************/ +void __RGXWriteReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue); + +void __RGXWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue); + +#define RGXWriteReg32(hPrivate, __off, __val) \ +({ \ + u32 __val2 = __val; \ + pr_debug("w: %x:%x\t%pf %s:%d\n", \ + __off, __val2, \ + __builtin_return_address(0), __func__, __LINE__);\ + __RGXWriteReg32(hPrivate, __off, __val2); \ +}) +#define RGXWriteReg64(hPrivate, __off, __val) \ +({ \ + u64 __val2 = __val; \ + pr_debug("w: %x:%llx\t%pf %s:%d\n", \ + __off, __val2, \ + __builtin_return_address(0), __func__, __LINE__);\ + __RGXWriteReg64(hPrivate, __off, __val2); \ +}) + +/*! +******************************************************************************* + + @Function RGXReadReg32/64 + + @Description Read a 32/64 bit RGX register + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + + @Return Register value + +******************************************************************************/ +IMG_UINT32 RGXReadReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr); + +IMG_UINT64 RGXReadReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr); + +/*! +******************************************************************************* + + @Function RGXReadModifyWriteReg32 + + @Description Read-modify-write a 32 bit RGX register + + @Input hPrivate : Implementation specific data. + @Input ui32RegAddr : Register offset inside the register bank. + @Input ui32RegValue : New register value. + @Input ui32RegMask : Keep the bits set in the mask. + + @Return Always returns PVRSRV_OK + +******************************************************************************/ +IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegKeepMask); + +/*! +******************************************************************************* + + @Function RGXPollReg32/64 + + @Description Poll on a 32/64 bit RGX register until some bits are set/unset + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui32/64RegValue : Value expected from the register + @Input ui32/64RegMask : Only the bits set in this mask will be + checked against uiRegValue + + @Return PVRSRV_OK if the poll succeeds, + PVRSRV_ERROR_TIMEOUT if the poll takes too long + +******************************************************************************/ +PVRSRV_ERROR RGXPollReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32RegMask); + +PVRSRV_ERROR RGXPollReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask); + +/*! +******************************************************************************* + + @Function RGXWaitCycles + + @Description Wait for a number of GPU cycles and/or microseconds + + @Input hPrivate : Implementation specific data + @Input ui32Cycles : Number of GPU cycles to wait for in pdumps, + it can also be used when running driver-live + if desired (ignoring the next parameter) + @Input ui32WaitUs : Number of microseconds to wait for when running + driver-live + + @Return void + +******************************************************************************/ +void RGXWaitCycles(const void *hPrivate, + IMG_UINT32 ui32Cycles, + IMG_UINT32 ui32WaitUs); + +/*! +******************************************************************************* + + @Function RGXAcquireKernelMMUPC + + @Description Acquire the Kernel MMU Page Catalogue device physical address + + @Input hPrivate : Implementation specific data + @Input psPCAddr : Returned page catalog address + + @Return void + +******************************************************************************/ +void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr); + +/*! +******************************************************************************* + + @Function RGXWriteKernelMMUPC32/64 + + @Description Write the Kernel MMU Page Catalogue to the 32/64 bit + RGX register passed as argument. + In a driver-live scenario without PDump these functions + are the same as RGXWriteReg32/64 and they don't need + to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32PCReg : Register offset inside the register bank + @Input ui32AlignShift : PC register alignshift + @Input ui32Shift : PC register shift + @Input ui32/64PCVal : Page catalog value (aligned and shifted) + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXWriteKernelMMUPC64(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT64 ui64PCVal); + +void RGXWriteKernelMMUPC32(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT32 ui32PCVal); +#else /* defined(PDUMP) */ + +#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \ + RGXWriteReg64(priv, pcreg, pcval) + +#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \ + RGXWriteReg32(priv, pcreg, pcval) + +#endif /* defined(PDUMP) */ + + + +/*! +******************************************************************************* + + @Function RGXAcquireGPURegsAddr + + @Description Acquire the GPU registers base device physical address + + @Input hPrivate : Implementation specific data + @Input psGPURegsAddr : Returned GPU registers base address + + @Return void + +******************************************************************************/ +void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr); + +/*! +******************************************************************************* + + @Function RGXMIPSWrapperConfig + + @Description Write GPU register bank transaction ID and MIPS boot mode + to the MIPS wrapper config register (passed as argument). + In a driver-live scenario without PDump this is the same as + RGXWriteReg64 and it doesn't need to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui64GPURegsAddr : GPU registers base address + @Input ui32GPURegsAlign : Register bank transactions alignment + @Input ui32BootMode : Mips BOOT ISA mode + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXMIPSWrapperConfig(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64GPURegsAddr, + IMG_UINT32 ui32GPURegsAlign, + IMG_UINT32 ui32BootMode); +#else +#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \ + RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode)) +#endif + +/*! +******************************************************************************* + + @Function RGXAcquireBootRemapAddr + + @Description Acquire the device physical address of the MIPS bootloader + accessed through remap region + + @Input hPrivate : Implementation specific data + @Output psBootRemapAddr : Base address of the remapped bootloader + + @Return void + +******************************************************************************/ +void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr); + +/*! +******************************************************************************* + + @Function RGXBootRemapConfig + + @Description Configure the bootloader remap registers passed as arguments. + In a driver-live scenario without PDump this is the same as + two RGXWriteReg64 and it doesn't need to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32Config1RegAddr : Remap config1 register offset + @Input ui64Config1RegValue : Remap config1 register value + @Input ui32Config2RegAddr : Remap config2 register offset + @Input ui64Config2PhyAddr : Output remapped aligned physical address + @Input ui64Config2PhyMask : Mask for the output physical address + @Input ui64Config2Settings : Extra settings for this remap region + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXBootRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings); +#else +#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ + RGXWriteReg64(priv, c1reg, (c1val)); \ + RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ + } while (0) +#endif + +/*! +******************************************************************************* + + @Function RGXAcquireCodeRemapAddr + + @Description Acquire the device physical address of the MIPS code + accessed through remap region + + @Input hPrivate : Implementation specific data + @Output psCodeRemapAddr : Base address of the remapped code + + @Return void + +******************************************************************************/ +void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr); + +/*! +******************************************************************************* + + @Function RGXCodeRemapConfig + + @Description Configure the code remap registers passed as arguments. + In a driver-live scenario without PDump this is the same as + two RGXWriteReg64 and it doesn't need to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32Config1RegAddr : Remap config1 register offset + @Input ui64Config1RegValue : Remap config1 register value + @Input ui32Config2RegAddr : Remap config2 register offset + @Input ui64Config2PhyAddr : Output remapped aligned physical address + @Input ui64Config2PhyMask : Mask for the output physical address + @Input ui64Config2Settings : Extra settings for this remap region + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXCodeRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings); +#else +#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ + RGXWriteReg64(priv, c1reg, (c1val)); \ + RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ + } while (0) +#endif + +/*! +******************************************************************************* + + @Function RGXAcquireDataRemapAddr + + @Description Acquire the device physical address of the MIPS data + accessed through remap region + + @Input hPrivate : Implementation specific data + @Output psDataRemapAddr : Base address of the remapped data + + @Return void + +******************************************************************************/ +void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr); + +/*! +******************************************************************************* + + @Function RGXDataRemapConfig + + @Description Configure the data remap registers passed as arguments. + In a driver-live scenario without PDump this is the same as + two RGXWriteReg64 and it doesn't need to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32Config1RegAddr : Remap config1 register offset + @Input ui64Config1RegValue : Remap config1 register value + @Input ui32Config2RegAddr : Remap config2 register offset + @Input ui64Config2PhyAddr : Output remapped aligned physical address + @Input ui64Config2PhyMask : Mask for the output physical address + @Input ui64Config2Settings : Extra settings for this remap region + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXDataRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings); +#else +#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ + RGXWriteReg64(priv, c1reg, (c1val)); \ + RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ + } while (0) +#endif + +/*! +******************************************************************************* + + @Function RGXAcquireTrampolineRemapAddr + + @Description Acquire the device physical address of the MIPS data + accessed through remap region + + @Input hPrivate : Implementation specific data + @Output psTrampolineRemapAddr: Base address of the remapped data + + @Return void + +******************************************************************************/ +void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr); + +/*! +******************************************************************************* + + @Function RGXTrampolineRemapConfig + + @Description Configure the trampoline remap registers passed as arguments. + In a driver-live scenario without PDump this is the same as + two RGXWriteReg64 and it doesn't need to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32Config1RegAddr : Remap config1 register offset + @Input ui64Config1RegValue : Remap config1 register value + @Input ui32Config2RegAddr : Remap config2 register offset + @Input ui64Config2PhyAddr : Output remapped aligned physical address + @Input ui64Config2PhyMask : Mask for the output physical address + @Input ui64Config2Settings : Extra settings for this remap region + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXTrampolineRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings); +#else +#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ + RGXWriteReg64(priv, c1reg, (c1val)); \ + RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ + } while (0) +#endif + +/*! +******************************************************************************* + + @Function RGXDoFWSlaveBoot + + @Description Returns whether or not a FW Slave Boot is required + while powering on + + @Input hPrivate : Implementation specific data + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXFabricCoherencyTest + + @Description Performs a coherency test + + @Input hPrivate : Implementation specific data + + @Return PVRSRV_OK if the test succeeds, + PVRSRV_ERROR_INIT_FAILURE if the test fails at some point + +******************************************************************************/ +PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate); + +/* This is used to check if a specific ERN/BRN is enabled from hprivate. + * Should be used instead of calling RGXDeviceHasErnBrn. */ +#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \ + RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK) + +#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \ + RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK) + +/*! +******************************************************************************* + + @Function RGXDeviceHasErnBrn + + @Description Checks if a device has a particular errata + + @Input hPrivate : Implementation specific data + @Input ui64ErnsBrns : Flags to check + + @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); + +/*! +******************************************************************************* + + @Function RGXGetDeviceSLCBanks + + @Description Returns the number of SLC banks used by the device + + @Input hPrivate : Implementation specific data + + @Return Number of SLC banks + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDeviceSLCSize + + @Description Returns the device SLC size + + @Input hPrivate : Implementation specific data + + @Return SLC size + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDeviceCacheLineSize + + @Description Returns the device cache line size + + @Input hPrivate : Implementation specific data + + @Return Cache line size + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDevicePhysBusWidth + + @Description Returns the device physical bus width + + @Input hPrivate : Implementation specific data + + @Return Physical bus width + +******************************************************************************/ +IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXDevicePA0IsValid + + @Description Returns true if the device physical address 0x0 is a valid + address and can be accessed by the GPU. + + @Input hPrivate : Implementation specific data + + @Return IMG_TRUE if device physical address 0x0 is a valid address, + IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXAcquireBootCodeAddr + + @Description Acquire the device virtual address of the RISCV boot code + + @Input hPrivate : Implementation specific data + @Output psBootCodeAddr : Boot code base address + + @Return void + +******************************************************************************/ +void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr); + +/*! +******************************************************************************* + + @Function RGXAcquireBootDataAddr + + @Description Acquire the device virtual address of the RISCV boot data + + @Input hPrivate : Implementation specific data + @Output psBootDataAddr : Boot data base address + + @Return void + +******************************************************************************/ +void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr); + +/*! + ******************************************************************************* + + @Function RGXDeviceIrqEventRx + + @Description Checks the implementation specific IRQ status register, + clearing it if necessary and returning the IRQ status. + + @Input hPrivate : Implementation specific data + + @Return: IRQ status + + ******************************************************************************/ +IMG_BOOL RGXDeviceIrqEventRx(const void *hPrivate); + +#if defined(__cplusplus) +} +#endif + +#endif /* __RGXLAYER_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.c new file mode 100644 index 000000000000..ef086ad94df1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.c @@ -0,0 +1,1265 @@ +/*************************************************************************/ /*! +@File +@Title DDK implementation of the Services abstraction layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description DDK implementation of the Services abstraction layer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxlayer_impl.h" +#include "osfunc.h" +#include "pdump_km.h" +#include "rgxfwutils.h" +#include "rgxinit.h" +#include "rgxfwimageutils.h" +#include "devicemem.h" +#include "cache_km.h" +#include "pmr.h" + +#if defined(PDUMP) +#include +#endif + +void RGXMemCopy(const void *hPrivate, + void *pvDst, + void *pvSrc, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemCopy(pvDst, pvSrc, uiSize); +} + +void RGXMemSet(const void *hPrivate, + void *pvDst, + IMG_UINT8 ui8Value, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemSet(pvDst, ui8Value, uiSize); +} + +void RGXCommentLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ +#if defined(PDUMP) + va_list argList; + va_start(argList, pszString); + PDumpCommentWithFlagsVA(PDUMP_FLAGS_CONTINUOUS, pszString, argList); + va_end(argList); + PVR_UNREFERENCED_PARAMETER(hPrivate); +#else + PVR_UNREFERENCED_PARAMETER(hPrivate); + PVR_UNREFERENCED_PARAMETER(pszString); +#endif +} + +void RGXErrorLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list argList; + + PVR_UNREFERENCED_PARAMETER(hPrivate); + + va_start(argList, pszString); + vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); + va_end(argList); + + PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); +} + +IMG_UINT32 RGXGetOSPageSize(const void *hPrivate) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + return OSGetPageSize(); +} + +IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32CorememSize = 0; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); + } + + return ui32CorememSize; +} + +void __RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); + } + + PDUMPREG32(RGX_PDUMPREG_NAME, ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); +} + +void __RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); + } + + PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); +} + +IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT32 ui32RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui32RegValue = IMG_UINT32_MAX; + } + else +#endif + { + ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD32(RGX_PDUMPREG_NAME, ui32RegAddr, psParams->ui32PdumpFlags); + + return ui32RegValue; +} + +IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT64 ui64RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui64RegValue = IMG_UINT64_MAX; + } + else +#endif + { + ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD64(RGX_PDUMPREG_NAME, ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); + + return ui64RegValue; +} + +IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 uiRegValueNew, + IMG_UINT64 uiRegKeepMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + + /* only use the new values for bits we update according to the keep mask */ + uiRegValueNew &= ~uiRegKeepMask; + +#if defined(PDUMP) + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store register offset to temp PDump variable */ + PDumpRegRead64ToInternalVar(RGX_PDUMPREG_NAME, ":SYSMEM:$1", ui32RegAddr, ui32PDumpFlags); + + /* Keep the bits set in the mask */ + PDumpWriteVarANDValueOp(":SYSMEM:$1", uiRegKeepMask, ui32PDumpFlags); + + /* OR the new values */ + PDumpWriteVarORValueOp(":SYSMEM:$1", uiRegValueNew, ui32PDumpFlags); + + /* Do the actual register write */ + PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); + + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + + { + IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); + uiRegValue &= uiRegKeepMask; + OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32RegValue, + ui32RegMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32RegValue, + ui32RegMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + /* Split lower and upper words */ + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); + IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); + IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), + ui32UpperValue, + ui32UpperMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32LowerValue, + ui32LowerMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr + 4, + ui32UpperValue, + ui32UpperMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32LowerValue, + ui32LowerMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSWaitus(ui32TimeUs); + PDUMPIDLWITHFLAGS(ui32Cycles, PDUMP_FLAGS_CONTINUOUS); +} + +void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; +} + +#if defined(PDUMP) +void RGXWriteKernelMMUPC64(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT64 ui64PCVal) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write the cat-base address */ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal); + + /* Pdump catbase address */ + MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, + RGX_PDUMPREG_NAME, + ui32PCReg, + 8, + ui32PCRegAlignShift, + ui32PCRegShift, + PDUMP_FLAGS_CONTINUOUS); +} + +void RGXWriteKernelMMUPC32(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT32 ui32PCVal) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write the cat-base address */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal); + + /* Pdump catbase address */ + MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, + RGX_PDUMPREG_NAME, + ui32PCReg, + 4, + ui32PCRegAlignShift, + ui32PCRegShift, + PDUMP_FLAGS_CONTINUOUS); +} +#endif /* defined(PDUMP) */ + +void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr; +} + +#if defined(PDUMP) +void RGXMIPSWrapperConfig(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64GPURegsAddr, + IMG_UINT32 ui32GPURegsAlign, + IMG_UINT32 ui32BootMode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, + ui32RegAddr, + (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode); + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store register offset to temp PDump variable */ + PDumpRegLabelToInternalVar(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); + + /* Align register transactions identifier */ + PDumpWriteVarSHRValueOp(":SYSMEM:$1", ui32GPURegsAlign, ui32PDumpFlags); + + /* Enable micromips instruction encoding */ + PDumpWriteVarORValueOp(":SYSMEM:$1", ui32BootMode, ui32PDumpFlags); + + /* Do the actual register write */ + PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); +} +#endif + +void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr; +} + +void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr; +} + +void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr; +} + +void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr; +} + +#if defined(PDUMP) +static inline +void RGXWriteRemapConfig2Reg(void __iomem *pvRegs, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64PhyAddr, + IMG_UINT64 ui64PhyMask, + IMG_UINT64 ui64Settings) +{ + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + + OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings); + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store memory offset to temp PDump variable */ + PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, uiLogicalOffset, ui32PDumpFlags); + + /* Keep only the relevant bits of the output physical address */ + PDumpWriteVarANDValueOp(":SYSMEM:$1", ui64PhyMask, ui32PDumpFlags); + + /* Extra settings for this remapped region */ + PDumpWriteVarORValueOp(":SYSMEM:$1", ui64Settings, ui32PDumpFlags); + + /* Do the actual register write */ + PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); +} + +void RGXBootRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32BootRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE); + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write remap config1 register */ + RGXWriteReg64(hPrivate, + ui32Config1RegAddr, + ui64Config1RegValue); + + /* Write remap config2 register */ + RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, + psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, + psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset, + ui32Config2RegAddr, + ui64Config2PhyAddr, + ui64Config2PhyMask, + ui64Config2Settings); +} + +void RGXCodeRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32CodeRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE); + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write remap config1 register */ + RGXWriteReg64(hPrivate, + ui32Config1RegAddr, + ui64Config1RegValue); + + /* Write remap config2 register */ + RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, + psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, + psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset, + ui32Config2RegAddr, + ui64Config2PhyAddr, + ui64Config2PhyMask, + ui64Config2Settings); +} + +void RGXDataRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32DataRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write remap config1 register */ + RGXWriteReg64(hPrivate, + ui32Config1RegAddr, + ui64Config1RegValue); + + /* Write remap config2 register */ + RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, + psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, + psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset, + ui32Config2RegAddr, + ui64Config2PhyAddr, + ui64Config2PhyMask, + ui64Config2Settings); +} + +void RGXTrampolineRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* write the register for real, without PDump */ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, + ui32Config1RegAddr, + ui64Config1RegValue); + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store the memory address in a PDump variable */ + PDumpPhysHandleToInternalVar64(":SYSMEM:$1", + psDevInfo->psTrampoline->hPdumpPages, + ui32PDumpFlags); + + /* Keep only the relevant bits of the input physical address */ + PDumpWriteVarANDValueOp(":SYSMEM:$1", + ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK, + ui32PDumpFlags); + + /* Enable bit */ + PDumpWriteVarORValueOp(":SYSMEM:$1", + RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, + ui32PDumpFlags); + + /* Do the PDump register write */ + PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, + ui32Config1RegAddr, + ":SYSMEM:$1", + ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); + + /* this can be written directly */ + RGXWriteReg64(hPrivate, + ui32Config2RegAddr, + (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings); +} +#endif + +#define MAX_NUM_COHERENCY_TESTS (10) +IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_CONFIG *psDevConfig; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS) + { + return IMG_FALSE; + } + + psDevConfig = ((RGX_LAYER_PARAMS*)hPrivate)->psDevConfig; + + return PVRSRVSystemSnoopingOfCPUCache(psDevConfig); +} + +static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + + /* Issue a Write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); + + return eError; +} + +/* + * The fabric coherency test is performed when platform supports fabric coherency + * either in the form of ACE-lite or Full-ACE. This test is done quite early + * with the firmware processor quiescent and makes exclusive use of the slave + * port interface for reading/writing through the device memory hierarchy. The + * rationale for the test is to ensure that what the CPU writes to its dcache + * is visible to the GPU via coherency snoop miss/hit and vice-versa without + * any intervening cache maintenance by the writing agent. + */ +PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 *pui32FabricCohTestBufferCpuVA; + DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc; + RGXFWIF_DEV_VIRTADDR sFabricCohTestBufferDevVA; + IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64); + IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64); + IMG_UINT64 ui64SegOutAddrTopCached = 0; + IMG_UINT64 ui64SegOutAddrTopUncached = 0; + IMG_UINT32 ui32SLCCTRL = 0; + IMG_UINT32 ui32OddEven; + IMG_BOOL bFeatureS7; + IMG_UINT32 ui32TestType; + IMG_UINT32 ui32OddEvenSeed = 1; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bFullTestPassed = IMG_TRUE; + IMG_BOOL bSubTestPassed = IMG_FALSE; + IMG_BOOL bExit = IMG_FALSE; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + PVR_LOG(("Starting fabric coherency test .....")); + + bFeatureS7 = RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE); + + if (bFeatureS7) + { + ui64SegOutAddrTopCached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF); + ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF); + + /* Configure META to use SLC force-linefill for the bootloader segment */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + } + else + { + /* Bypass the SLC when IO coherency is enabled */ + ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS); + RGXWriteReg32(hPrivate, + RGX_CR_SLC_CTRL_BYPASS, + ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN); + } + + /* Size and align are 'expanded' because we request an export align allocation */ + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiFabricCohTestBlockSize, + &uiFabricCohTestBlockAlign); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemExportalignAdjustSizeAndAlign() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e0; + } + + /* Allocate, acquire cpu address and set firmware address */ + eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, + uiFabricCohTestBlockSize, + uiFabricCohTestBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, + "FwExFabricCoherencyTestBuffer", + &psFabricCohTestBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemFwAllocateExportable() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e0; + } + + eError = DevmemAcquireCpuVirtAddr(psFabricCohTestBufferMemDesc, (void **) &pui32FabricCohTestBufferCpuVA); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemAcquireCpuVirtAddr() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e1; + } + + /* Create a FW address which is uncached in the Meta DCache and in the SLC + * using the Meta bootloader segment. + * This segment is the only one configured correctly out of reset + * (when this test is meant to be executed). + */ + eError = RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA, + psFabricCohTestBufferMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e2); + + /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ + sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK; + sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK; + sFabricCohTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + /* Map the buffer in the bootloader segment as uncached */ + sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; + sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; + + for (ui32TestType = 0; ui32TestType < 4 && bExit == IMG_FALSE; ui32TestType++) + { + IMG_CPU_PHYADDR sCpuPhyAddr; + IMG_BOOL bValid; + PMR *psPMR; + + /* Acquire underlying PMR CpuPA in preparation for cache maintenance */ + (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR); + eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid); + if (eError != PVRSRV_OK || bValid == IMG_FALSE) + { + PVR_DPF((PVR_DBG_ERROR, + "PMR_CpuPhysAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Here we do two passes [runs] mostly to account for the effects of using + the different seed (i.e. ui32OddEvenSeed) value to read and write */ + for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++) + { + IMG_UINT32 i; + +#if defined(DEBUG) + switch (ui32TestType) + { + case 0: + PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); + break; + case 1: + PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); + break; + case 2: + PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); + break; + case 3: + PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); + break; + default: + PVR_LOG(("Internal error, exiting test")); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } +#endif + + for (i = 0; i < 2 && bExit == IMG_FALSE; i++) + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32FWValue; + IMG_UINT32 ui32FWValue2; + IMG_CPU_PHYADDR sCpuPhyAddrStart; + IMG_CPU_PHYADDR sCpuPhyAddrEnd; + IMG_UINT32 ui32LastFWValue = ~0; + IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32); + + /* Calculate next address and seed value to write/read from slave-port */ + ui32FWAddr = sFabricCohTestBufferDevVA.ui32Addr + ui32Offset; + sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + ui32Offset; + sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr; + ui32OddEvenSeed += 1; + + if (ui32TestType & 0x1) + { + ui32FWValue = i + ui32OddEvenSeed; + + switch (ui32TestType) + { + case 1: + case 3: + /* Clean dcache to ensure there is no stale data in dcache that might over-write + what we are about to write via slave-port here because if it drains from the CPU + dcache before we read it, it would corrupt what we are going to read back via + the CPU */ + sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); + CacheOpExec(psDevInfo->psDeviceNode, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), + sCpuPhyAddrStart, + sCpuPhyAddrEnd, + PVRSRV_CACHE_OP_CLEAN); + break; + } + + /* Write the value using the RGX slave-port interface */ + eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32FWValue); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXWriteMETAAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Read back value using RGX slave-port interface, this is used + as a sort of memory barrier for the above write */ + eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue2); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXReadMETAAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + else if (ui32FWValue != ui32FWValue2) + { + /* Fatal error, we should abort */ + PVR_DPF((PVR_DBG_ERROR, + "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x", + i, + ui32FWValue, + ui32FWValue2)); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + + if (! PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) + { + /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory + region is discarded before we read (i.e. next read must trigger a cache miss). + If there is snooping of device cache, then any prefetching done by the CPU + will reflect the most up to date datum writing by GPU into said location, + that is to say prefetching must be coherent so CPU d-flush is not needed */ + sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); + CacheOpExec(psDevInfo->psDeviceNode, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), + sCpuPhyAddrStart, + sCpuPhyAddrEnd, + PVRSRV_CACHE_OP_INVALIDATE); + } + } + else + { + IMG_UINT32 ui32RAWCpuValue; + + /* Ensures line is in dcache */ + ui32FWValue = IMG_UINT32_MAX; + + /* Dirty allocation in dcache */ + ui32RAWCpuValue = i + ui32OddEvenSeed; + pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed; + + /* Flush possible cpu store-buffer(ing) on LMA */ + OSWriteMemoryBarrier(); + + switch (ui32TestType) + { + case 0: + /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so + memory is coherent before the SlavePort reads */ + sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); + CacheOpExec(psDevInfo->psDeviceNode, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), + sCpuPhyAddrStart, + sCpuPhyAddrEnd, + PVRSRV_CACHE_OP_FLUSH); + break; + } + + /* Read back value using RGX slave-port interface */ + eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXReadWithSP error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* We are being mostly paranoid here, just to account for CPU RAW operations */ + sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); + CacheOpExec(psDevInfo->psDeviceNode, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), + sCpuPhyAddrStart, + sCpuPhyAddrEnd, + PVRSRV_CACHE_OP_FLUSH); + if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue) + { + /* Fatal error, we should abort */ + PVR_DPF((PVR_DBG_ERROR, + "At Offset: %d, RAW by CPU failed: expected: %x, got: %x", + i, + ui32RAWCpuValue, + pui32FabricCohTestBufferCpuVA[i])); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + } + + /* Compare to see if sub-test passed */ + if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue) + { + bSubTestPassed = IMG_TRUE; + } + else + { + bSubTestPassed = IMG_FALSE; + bFullTestPassed = IMG_FALSE; + eError = PVRSRV_ERROR_INIT_FAILURE; + if (ui32LastFWValue != ui32FWValue) + { +#if defined(DEBUG) + PVR_LOG(("At Offset: %d, Expected: %x, Got: %x", + i, + (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i], + (ui32TestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue)); +#endif + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "test encountered unexpected error, exiting")); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + } + + ui32LastFWValue = (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i]; + } + +#if defined(DEBUG) + if (bExit) + { + continue; + } + + switch (ui32TestType) + { + case 0: + PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case 1: + PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case 2: + PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case 3: + PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + default: + PVR_LOG(("Internal error, exiting test")); + bExit = IMG_TRUE; + continue; + } +#endif + } + } + + RGXUnsetFirmwareAddress(psFabricCohTestBufferMemDesc); +e2: + DevmemReleaseCpuVirtAddr(psFabricCohTestBufferMemDesc); +e1: + DevmemFwUnmapAndFree(psDevInfo, psFabricCohTestBufferMemDesc); + +e0: + if (bFeatureS7) + { + /* Restore bootloader segment settings */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + } + else + { + /* Restore SLC bypass settings */ + RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL); + } + + bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed; + if (bFullTestPassed) + { + PVR_LOG(("fabric coherency test: PASSED")); + psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1; + } + else + { + PVR_LOG(("fabric coherency test: FAILED")); + psDevInfo->ui32CoherencyTestsDone++; + } + + return eError; +} + +IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; +} + +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; +} + +IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); +} + +IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_BYTES)) + { + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_BYTES); + } + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_KILOBYTES)) + { + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_KILOBYTES) * 1024; + } + + return 0; +} + +IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); +} + +IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH); +} + +IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return psDevInfo->sLayerParams.bDevicePA0IsValid; +} + +void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; +} + +void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; +} + +IMG_BOOL RGXDeviceIrqEventRx(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return RGXFwIrqEventRx(psDevInfo); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.h new file mode 100644 index 000000000000..4d7c0f0c7798 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxlayer_impl.h @@ -0,0 +1,67 @@ +/*************************************************************************/ /*! +@File +@Title Header for DDK implementation of the Services abstraction layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for DDK implementation of the Services abstraction layer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXLAYER_IMPL_H) +#define RGXLAYER_IMPL_H + +#include "rgxlayer.h" +#include "device_connection.h" + +typedef struct _RGX_LAYER_PARAMS_ +{ + void *psDevInfo; + void *psDevConfig; +#if defined(PDUMP) + IMG_UINT32 ui32PdumpFlags; +#endif + + IMG_DEV_PHYADDR sPCAddr; + IMG_DEV_PHYADDR sGPURegAddr; + IMG_DEV_PHYADDR sBootRemapAddr; + IMG_DEV_PHYADDR sCodeRemapAddr; + IMG_DEV_PHYADDR sDataRemapAddr; + IMG_DEV_PHYADDR sTrampolineRemapAddr; + IMG_BOOL bDevicePA0IsValid; +} RGX_LAYER_PARAMS; + +#endif /* RGXLAYER_IMPL_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.c new file mode 100644 index 000000000000..a31b9fbb121b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.c @@ -0,0 +1,764 @@ +/*************************************************************************/ /*! +@File +@Title RGX memory context management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX memory context management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_server_utils.h" +#include "devicemem_pdump.h" +#include "rgxdevice.h" +#include "rgx_fwif_km.h" +#include "rgxfwutils.h" +#include "pdump_km.h" +#include "pdump_physmem.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "sync_internal.h" +#include "rgx_memallocflags.h" +#include "rgx_bvnc_defs_km.h" +#include "info_page.h" + +/* + * TestAndReset of gui32CacheOps is protected by the device power-lock, + * in the following way: + * + * LOCK(Power-Lock); + * ui32CacheOps = _GetCacheOpsPending(); // Gets gui32CacheOpps + * if (ui32CacheOps) + * { + * _PrepareAndSubmitCacheCommand(ui32CacheOps); + * _CacheOpsCompleted(ui32CacheOps); // Resets gui32CacheOpps + * } + * UNLOCK(Power-lock); + */ +static IMG_UINT32 gui32CacheOpps; + + +typedef struct _SERVER_MMU_CONTEXT_ { + DEVMEM_MEMDESC *psFWMemContextMemDesc; + MMU_CONTEXT *psMMUContext; + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + DLLIST_NODE sNode; + PVRSRV_RGXDEV_INFO *psDevInfo; +} SERVER_MMU_CONTEXT; + + + +void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eMMULevel, + IMG_BOOL bUnmap) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psMMUContext); /* Not used on Rogue, but needed by MMU common */ + PVR_UNREFERENCED_PARAMETER(bUnmap); + + switch (eMMULevel) + { + case MMU_LEVEL_3: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PC; + break; + case MMU_LEVEL_2: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PD; + break; + case MMU_LEVEL_1: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PT; + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))) + { + gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB; + } + break; + default: + PVR_ASSERT(0); + break; + } +} + +static inline IMG_UINT32 _GetCacheOpsPending(void) +{ + return gui32CacheOpps; +} + +static inline void _CacheOpsCompleted(IMG_UINT32 ui32CacheOpsServiced) +{ + /* Mark in the global cache-ops that ui32CacheOpsServiced were submitted */ + gui32CacheOpps ^= ui32CacheOpsServiced; +} + +static +PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_DM eDM, IMG_UINT32 ui32CacheOps, + IMG_BOOL bInterrupt, + IMG_UINT32 *pui32MMUInvalidateUpdate) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sFlushCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + *pui32MMUInvalidateUpdate = psDeviceNode->ui32NextMMUInvalidateUpdate++; + + /* Setup cmd and add the device nodes sync object */ + sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE; + sFlushCmd.uCmdData.sMMUCacheData.ui32MMUCacheSyncUpdateValue = *pui32MMUInvalidateUpdate; + SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim, + &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr); + sFlushCmd.uCmdData.sMMUCacheData.ui32Flags = + ui32CacheOps | + /* Set which memory context this command is for (all ctxs for now) */ + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT) ? RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL : 0) | + (bInterrupt ? RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT : 0); + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Submit MMU flush and invalidate (flags = 0x%08x)", + sFlushCmd.uCmdData.sMMUCacheData.ui32Flags); +#endif + + /* Schedule MMU cache command */ + eError = RGXSendCommand(psDevInfo, &sFlushCmd, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule MMU cache command to " + "DM=%d with error (%u)", __func__, eDM, eError)); + psDeviceNode->ui32NextMMUInvalidateUpdate--; + } + + return eError; +} + +PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32MMUInvalidateUpdate, + IMG_BOOL bInterrupt) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32CacheOps; + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto RGXMMUCacheInvalidateKick_exit; + } + + ui32CacheOps = _GetCacheOpsPending(); + if (ui32CacheOps == 0) + { + eError = PVRSRV_OK; + goto _PowerUnlockAndReturnErr; + } + + /* Ensure device is powered up before sending cache command */ + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto _PowerUnlockAndReturnErr; + } + + eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, + ui32CacheOps, bInterrupt, + pui32MMUInvalidateUpdate); + if (eError != PVRSRV_OK) + { + /* failed to submit cache operations, return failure */ + goto _PowerUnlockAndReturnErr; + } + + /* Mark the cache ops we serviced */ + _CacheOpsCompleted(ui32CacheOps); + +_PowerUnlockAndReturnErr: + PVRSRVPowerUnlock(psDeviceNode); + +RGXMMUCacheInvalidateKick_exit: + return eError; +} + +PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DM eDM, + IMG_UINT32 *pui32MMUInvalidateUpdate, + IMG_BOOL bInterrupt) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CacheOps; + PVR_UNREFERENCED_PARAMETER(psServerMMUContext); + + /* Caller should ensure that power lock is held before calling this function */ + PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); + + ui32CacheOps = _GetCacheOpsPending(); + if (ui32CacheOps == 0) + { + return PVRSRV_OK; + } + + eError = _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32CacheOps, + bInterrupt, pui32MMUInvalidateUpdate); + if (eError != PVRSRV_OK) + { + /* failed to submit cache operations, return failure */ + return eError; + } + + _CacheOpsCompleted(ui32CacheOps); + + return eError; +} + +/* page fault debug is the only current use case for needing to find process info + * after that process device memory context has been destroyed + */ + +typedef struct _UNREGISTERED_MEMORY_CONTEXT_ +{ + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_DEV_PHYADDR sPCDevPAddr; +} UNREGISTERED_MEMORY_CONTEXT; + +/* must be a power of two */ +#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3) + +static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE]; +static IMG_UINT32 gui32UnregisteredMemCtxsHead; + +/* record a device memory context being unregistered. + * the list of unregistered contexts can be used to find the PID and process name + * belonging to a memory context which has been destroyed + */ +static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext) +{ + UNREGISTERED_MEMORY_CONTEXT *psRecord; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead]; + + gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1) + & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1); + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + + psRecord->uiPID = psServerMMUContext->uiPID; + if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context")); + } + OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); +} + + +void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData) +{ + SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData; + PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo; + + OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); + dllist_remove_node(&psServerMMUContext->sNode); + OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext); + } + + /* + * Release the page catalogue address acquired in RGXRegisterMemoryContext(). + */ + MMU_ReleaseBaseAddr(NULL); + + /* + * Free the firmware memory context. + */ + DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); + + OSFreeMem(psServerMMUContext); +} + + +/* + * RGXRegisterMemoryContext + */ +PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_FLAGS_T uiFWMemContextMemAllocFlags; + RGXFWIF_FWMEMCONTEXT *psFWMemContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc; + SERVER_MMU_CONTEXT *psServerMMUContext; + + if (psDevInfo->psKernelMMUCtx == NULL) + { + /* + * This must be the creation of the Kernel memory context. Take a copy + * of the MMU context for use when programming the BIF. + */ + psDevInfo->psKernelMMUCtx = psMMUContext; + } + else + { + psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext)); + if (psServerMMUContext == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_server_ctx; + } + + psServerMMUContext->psDevInfo = psDevInfo; + + /* + * This FW MemContext is only mapped into kernel for initialisation purposes. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, + * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick) + */ + uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + /* + Allocate device memory for the firmware memory context for the new + application. + */ + PDUMPCOMMENT("Allocate RGX firmware memory context"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWMemContext), + uiFWMemContextMemAllocFlags, + "FwMemoryContext", + &psFWMemContextMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware memory context (%u)", + __func__, + eError)); + goto fail_alloc_fw_ctx; + } + + /* + Temporarily map the firmware memory context to the kernel. + */ + eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc, + (void **)&psFWMemContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware memory context (%u)", + __func__, + eError)); + goto fail_acquire_cpu_addr; + } + + /* + * Write the new memory context's page catalogue into the firmware memory + * context for the client. + */ + eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_acquire_base_addr; + } + + /* + * Set default values for the rest of the structure. + */ + psFWMemContext->uiPageCatBaseRegID = RGXFW_BIF_INVALID_PCREG; + psFWMemContext->uiBreakpointAddr = 0; + psFWMemContext->uiBPHandlerAddr = 0; + psFWMemContext->uiBreakpointCtl = 0; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; + IMG_BOOL bOSidAxiProt; + + MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt); + + psFWMemContext->ui32OSid = ui32OSidReg; + psFWMemContext->bOSidAxiProt = bOSidAxiProt; +} +#endif + +#if defined(PDUMP) + { + IMG_CHAR aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOffset = 0; + + /* + * Dump the Mem context allocation + */ + DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS); + + + /* + * Obtain a symbolic addr of the mem context structure + */ + eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, + &uiOffset, + aszName, + PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to generate a Dump Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_pdump_cat_base_addr; + } + + /* + * Dump the Page Cat tag in the mem context (symbolic address) + */ + eError = MMU_PDumpWritePageCatBase(psMMUContext, + aszName, + uiOffset, + 8, /* 64-bit register write */ + 0, + 0, + 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_pdump_cat_base; + } + } +#endif + + /* + * Release kernel address acquired above. + */ + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + + /* + * Store the process information for this device memory context + * for use with the host page-fault analysis. + */ + psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM(); + psServerMMUContext->psMMUContext = psMMUContext; + psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; + OSStringLCopy(psServerMMUContext->szProcessName, + OSGetCurrentClientProcessNameKM(), + sizeof(psServerMMUContext->szProcessName)); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "New memory context: Process Name: %s PID: %u (0x%08X)", + psServerMMUContext->szProcessName, + psServerMMUContext->uiPID, + psServerMMUContext->uiPID); + + OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); + dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode); + OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); + + *hPrivData = psServerMMUContext; + } + + return PVRSRV_OK; + +#if defined(PDUMP) +fail_pdump_cat_base: +fail_pdump_cat_base_addr: + MMU_ReleaseBaseAddr(NULL); +#endif +fail_acquire_base_addr: + /* Done before jumping to the fail point as the release is done before exit */ +fail_acquire_cpu_addr: + DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); +fail_alloc_fw_ctx: + OSFreeMem(psServerMMUContext); +fail_alloc_server_ctx: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv) +{ + SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv; + + return psMMUContext->psFWMemContextMemDesc; +} + +void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_DEV_PHYADDR *psDevPAddr, + MMU_FAULT_DATA *psOutFaultData) +{ + IMG_DEV_PHYADDR sPCDevPAddr; + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); + + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psServerMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + + if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for memory context")); + continue; + } + + if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) + { + MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr, psOutFaultData); + goto out_unlock; + } + } + + /* Lastly check for fault in the kernel allocated memory */ + if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for kernel memory context")); + } + + if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) + { + MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData); + } + +out_unlock: + OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); +} + +/* given the physical address of a page catalogue, searches for a corresponding + * MMU context and if found, provides the caller details of the process. + * Returns IMG_TRUE if a process is found. + */ +IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, + RGXMEM_PROCESS_INFO *psInfo) +{ + IMG_BOOL bRet = IMG_FALSE; + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + + /* check if the input PC addr corresponds to an active memory context */ + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psThisMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + IMG_DEV_PHYADDR sPCDevPAddr; + + if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for memory context")); + continue; + } + + if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr) + { + psServerMMUContext = psThisMMUContext; + break; + } + } + + if (psServerMMUContext != NULL) + { + psInfo->uiPID = psServerMMUContext->uiPID; + OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + /* else check if the input PC addr corresponds to the firmware */ + else + { + IMG_DEV_PHYADDR sKernelPCDevPAddr; + PVRSRV_ERROR eError; + + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr); + + if (eError != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for kernel memory context")); + } + else + { + if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr) + { + psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; + OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + } + } + + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && + (bRet == IMG_FALSE)) + { + /* no active memory context found with the given PC address. + * Check the list of most recently freed memory contexts. + */ + IMG_UINT32 i; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + /* iterate through the list of unregistered memory contexts + * from newest (one before the head) to the oldest (the current head) + */ + i = gui32UnregisteredMemCtxsHead; + + do + { + UNREGISTERED_MEMORY_CONTEXT *psRecord; + + i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1)); + + psRecord = &gasUnregisteredMemCtxs[i]; + + if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr) + { + psInfo->uiPID = psRecord->uiPID; + OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_TRUE; + bRet = IMG_TRUE; + break; + } + } while (i != gui32UnregisteredMemCtxsHead); + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + + } + + return bRet; +} + +IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, + RGXMEM_PROCESS_INFO *psInfo) +{ + IMG_BOOL bRet = IMG_FALSE; + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + + /* check if the input PID corresponds to an active memory context */ + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psThisMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + + if (psThisMMUContext->uiPID == uiPID) + { + psServerMMUContext = psThisMMUContext; + break; + } + } + + if (psServerMMUContext != NULL) + { + psInfo->uiPID = psServerMMUContext->uiPID; + OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + /* else check if the input PID corresponds to the firmware */ + else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; + OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && + (bRet == IMG_FALSE)) + { + /* if the PID didn't correspond to an active context or the + * FW address then see if it matches a recently unregistered context + */ + const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1; + IMG_UINT32 i, j; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0; + j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE; + i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j++) + { + UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i]; + + if (psRecord->uiPID == uiPID) + { + psInfo->uiPID = psRecord->uiPID; + OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_TRUE; + bRet = IMG_TRUE; + break; + } + } + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + } + + return bRet; +} + +/****************************************************************************** + End of file (rgxmem.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.h new file mode 100644 index 000000000000..de457d918ec5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmem.h @@ -0,0 +1,135 @@ +/*************************************************************************/ /*! +@File +@Title RGX memory context management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for RGX memory context management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXMEM_H__) +#define __RGXMEM_H__ + +#include "pvrsrv_error.h" +#include "device.h" +#include "mmu_common.h" +#include "rgxdevice.h" + +#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16 + +/* this PID denotes the firmware */ +#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF + +/* this PID denotes the PM */ +#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF + +typedef struct _RGXMEM_PROCESS_INFO_ +{ + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_BOOL bUnregistered; +} RGXMEM_PROCESS_INFO; + +typedef struct _SERVER_MMU_CONTEXT_ SERVER_MMU_CONTEXT; + +IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext); + +void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDeviceNode); +void RGXMMUSyncPrimFree(void); + +void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eMMULevel, + IMG_BOOL bUnmap); + +/*************************************************************************/ /*! +@Function RGXMMUCacheInvalidateKick + +@Description Sends a flush command to a particular DM but first takes + the power lock. + +@Input psDevInfo Device Info +@Input pui32NextMMUInvalidateUpdate +@Input bInterrupt Should the firmware signal command completion to + the host + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevInfo, + IMG_UINT32 *pui32NextMMUInvalidateUpdate, + IMG_BOOL bInterrupt); + +/*************************************************************************/ /*! +@Function RGXPreKickCacheCommand + +@Description Sends a cache flush command to a particular DM without + honouring the power lock. It's the caller's responsibility + to ensure power lock is held before calling this function. + +@Input psDevInfo Device Info +@Input eDM To which DM the cmd is sent. +@Input pui32MMUInvalidateUpdate +@Input bInterrupt Should the firmware signal command completion to + the host + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DM eDM, + IMG_UINT32 *pui32MMUInvalidateUpdate, + IMG_BOOL bInterrupt); + +void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData); +PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData); + +DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv); + +void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_DEV_PHYADDR *psDevPAddr, + MMU_FAULT_DATA *psOutFaultData); + +IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, + RGXMEM_PROCESS_INFO *psInfo); + +IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, + RGXMEM_PROCESS_INFO *psInfo); + +#endif /* __RGXMEM_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.c new file mode 100644 index 000000000000..5c9ba46c98e0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.c @@ -0,0 +1,991 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "rgxmipsmmuinit.h" + +#include "device.h" +#include "img_types.h" +#include "img_defs.h" +#include "mmu_common.h" +#include "pdump_mmu.h" +#include "rgxheapconfig.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "rgx_memallocflags.h" +#include "pdump_km.h" +#include "rgxdevice.h" + +/* + * Bits of PT, PD and PC not involving addresses + */ + +/* Currently there is no page directory for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PDE_PROTMASK 0 +/* Currently there is no page catalog for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PCE_PROTMASK 0 + + +static MMU_PxE_CONFIG sRGXMMUPCEConfig; +static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; + + +/* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; + + +/* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; + + +/* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; + + +/* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; + + +/* + * + * Configuration for heaps with 1MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; + + +/* + * + * Configuration for heaps with 2MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; + + +/* Forward declaration of protection bits derivation functions, for + the following structure */ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); + +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv); + +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); + +static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; + +/* Cached policy */ +static IMG_UINT32 gui32CachedPolicy; + +static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + IMG_UINT64 *pui64Addr); + +PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bPhysBusAbove32Bit = 0; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) + { + bPhysBusAbove32Bit = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; + } + + sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = + PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]); + + /* + * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently + */ + sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */ + sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */ + + sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */ + sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; /* Alignment of PD AND PC */ + + sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits of the PC */ + sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */ + + sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ + sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */ + + /* + * Setup sRGXMMUTopLevelDevVAddrConfig + */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0; + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0; + + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */ + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0; + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0; + + sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */ + sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + +/* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently + */ + sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0; + + /* No PD used for MIPS */ + sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + + sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0); + sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_4KBDP. + */ + sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + + if (bPhysBusAbove32Bit) + { + sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; + } + else + { + sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; + } + + sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; + sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + + sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | + RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; + sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; + sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_4KBDP + */ + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0; + + + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); + + /* + * Setup gsPageSizeConfig4KB + */ + gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; + gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; + gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; + gsPageSizeConfig4KB.uiRefCount = 0; + gsPageSizeConfig4KB.uiMaxRefCount = 0; + + +/* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_16KBDP + */ + sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */ + sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */ + + sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_16KBDP.uiProtMask = 0; + sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet + */ + sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0; + + sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */ + sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */ + + sRGXMMUPTEConfig_16KBDP.uiProtMask = 0; + sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_16KBDP + */ + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig16KB + */ + gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; + gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; + gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; + gsPageSizeConfig16KB.uiRefCount = 0; + gsPageSizeConfig16KB.uiMaxRefCount = 0; + + +/* + * + * Configuration for heaps with 64kB Data-Page size. Not supported yet + * + */ + + /* + * Setup sRGXMMUPDEConfig_64KBDP + */ + sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_64KBDP.uiProtMask = 0; + sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_64KBDP. + * + */ + sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + if (bPhysBusAbove32Bit) + { + sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; + } + else + { + sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; + } + + /* Even while using 64K pages, MIPS still aligns addresses to 4K */ + sRGXMMUPTEConfig_64KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; + sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + + sRGXMMUPTEConfig_64KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | + RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; + sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; + sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_64KBDP. + */ + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000); + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); + + /* + * Setup gsPageSizeConfig64KB. + */ + gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; + gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; + gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; + gsPageSizeConfig64KB.uiRefCount = 0; + gsPageSizeConfig64KB.uiMaxRefCount = 0; + + +/* + * + * Configuration for heaps with 256kB Data-Page size. Not supported yet + * + */ + + /* + * Setup sRGXMMUPDEConfig_256KBDP + */ + sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_256KBDP.uiProtMask = 0; + sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0; + + /* + * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP + */ + sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0; + + sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0; + sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0; + + sRGXMMUPTEConfig_256KBDP.uiProtMask = 0; + sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_256KBDP + */ + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig256KB + */ + gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; + gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; + gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; + gsPageSizeConfig256KB.uiRefCount = 0; + gsPageSizeConfig256KB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_1MBDP. Not supported yet + */ + sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_1MBDP.uiProtMask = 0; + sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_1MBDP + */ + sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0; + sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0; + + sRGXMMUPTEConfig_1MBDP.uiProtMask = 0; + sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_1MBDP + */ + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig1MB + */ + gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; + gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; + gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; + gsPageSizeConfig1MB.uiRefCount = 0; + gsPageSizeConfig1MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet + */ + sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_2MBDP.uiProtMask = 0; + sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_2MBDP + */ + sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0; + + sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0; + sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0; + + sRGXMMUPTEConfig_2MBDP.uiProtMask = 0; + sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_2MBDP + */ + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig2MB + */ + gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; + gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; + gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; + gsPageSizeConfig2MB.uiRefCount = 0; + gsPageSizeConfig2MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUDeviceAttributes + */ + sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV; + sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1; + /* The page table fits in one big physical page as big as the page table itself */ + sRGXMMUDeviceAttributes.ui32BaseAlign = RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K; + /* The base configuration is set to 4kB pages*/ + sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP; + sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; + + /* Functions for deriving page table/dir/cat protection bits */ + sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; + sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; + sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; + sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; + sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; + sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; + + /* Functions for establishing configurations for PDE/PTE/DEVVADDR + on per-heap basis */ + sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; + sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; + + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; + + psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes; + + psDeviceNode->pfnValidateOrTweakPhysAddrs = RGXCheckTrampolineAddrs; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + IMG_UINT64 *pui64Addr) +{ + if (PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) + { + /* + * If mapping for the MIPS FW context, check for sensitive PAs + */ + if (psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) + { + PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; + + if (RGXMIPSFW_SENSITIVE_ADDR(*pui64Addr)) + { + *pui64Addr = psDevice->psTrampoline->sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(*pui64Addr); + } + /* FIX_HW_BRN_63553 is mainlined for all MIPS cores */ + else if (*pui64Addr == 0x0 && !psDevice->sLayerParams.bDevicePA0IsValid) + { + PVR_DPF((PVR_DBG_ERROR, "%s attempt to map addr 0x0 in the FW but 0x0 is not considered valid.", __func__)); + return PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE; + } + } + } + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = PVRSRV_OK; + +#if defined(PDUMP) + psDeviceNode->pfnMMUGetContextID = NULL; +#endif + + psDeviceNode->psFirmwareMMUDevAttrs = NULL; + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); + PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", + gsPageSizeConfig4KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", + gsPageSizeConfig4KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", + gsPageSizeConfig16KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", + gsPageSizeConfig16KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", + gsPageSizeConfig64KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", + gsPageSizeConfig64KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", + gsPageSizeConfig256KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", + gsPageSizeConfig256KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", + gsPageSizeConfig1MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", + gsPageSizeConfig1MB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", + gsPageSizeConfig2MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", + gsPageSizeConfig2MB.uiRefCount)); +#endif + if (gsPageSizeConfig4KB.uiRefCount > 0 || + gsPageSizeConfig16KB.uiRefCount > 0 || + gsPageSizeConfig64KB.uiRefCount > 0 || + gsPageSizeConfig256KB.uiRefCount > 0 || + gsPageSizeConfig1MB.uiRefCount > 0 || + gsPageSizeConfig2MB.uiRefCount > 0 + ) + { + PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt4 +@Description calculate the PCE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt8 +@Description calculate the PCE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt4 +@Description derive the PDE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt8 +@Description derive the PDE protection flags based on an 8 byte entry + +@Input uiLog2DataPageSize The log2 of the required page size. + E.g, for 4KiB pages, this parameter must be 12. + For 2MiB pages, it must be set to 21. + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt4 +@Description calculate the PTE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) +{ + IMG_UINT32 ui32MMUFlags = 0; + + if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) + { + /* read/write */ + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN; + } + else if (MMU_PROTFLAGS_READABLE & uiProtFlags) + { + /* read only */ + } + else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) + { + /* write only */ + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN; + } + else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified...")); + } + + /* cache coherency */ + if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches")); + } + + /* cache setup */ + if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) + { + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED; + } + else + { + ui32MMUFlags |= gui32CachedPolicy << + RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT; + } + + if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) + { + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN; + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN; + } + + if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) + { + /* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */ + } + + return ui32MMUFlags; +} + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt8 +@Description calculate the PTE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device")); + + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXGetPageSizeConfig +@Description Set up configuration for variable sized data pages. + RGXPutPageSizeConfigCB has to be called to ensure correct + refcounting. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv) +{ + MMU_PAGESIZECONFIG *psPageSizeConfig; + + switch (uiLog2DataPageSize) + { + case RGXMIPSFW_LOG2_PAGE_SIZE_64K: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGXMIPSFW_LOG2_PAGE_SIZE_4K: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + *phPriv = NULL; + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Refer caller's pointers to the data */ + *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; + *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; + *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; + +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + /* Increment ref-count - not that we're allocating anything here + (I'm using static structs), but one day we might, so we want + the Get/Put code to be balanced properly */ + psPageSizeConfig->uiRefCount++; + + /* This is purely for debug statistics */ + psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, + psPageSizeConfig->uiRefCount); +#endif + + *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; + PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RGXPutPageSizeConfig +@Description Tells this code that the mmu module is done with the + configurations set in RGXGetPageSizeConfig. This can + be a no-op. + Called after RGXGetPageSizeConfigCB. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) +{ +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + MMU_PAGESIZECONFIG *psPageSizeConfig; + IMG_UINT32 uiLog2DataPageSize; + + uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; + + switch (uiLog2DataPageSize) + { + case RGXMIPSFW_LOG2_PAGE_SIZE_64K: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGXMIPSFW_LOG2_PAGE_SIZE_4K: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Ref-count here is not especially useful, but it's an extra + check that the API is being used correctly */ + psPageSizeConfig->uiRefCount--; +#else + PVR_UNREFERENCED_PARAMETER(hPriv); +#endif + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui64PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.h new file mode 100644 index 000000000000..8d59c189b3e8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmipsmmuinit.h @@ -0,0 +1,94 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation for the MIPS firmware +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* NB: this file is not to be included arbitrarily. It exists solely + for the linkage between rgxinit.c and rgxmmuinit.c, the former + being otherwise cluttered by the contents of the latter */ + +#ifndef _SRVKM_RGXMIPSMMUINIT_H_ +#define _SRVKM_RGXMIPSMMUINIT_H_ + +#include "device.h" +#include "img_types.h" +#include "mmu_common.h" +#include "img_defs.h" +#include "rgx_mips.h" + +/* + + Labelling of fields within virtual address. No PD and PC are used currently for + the MIPS MMU +*/ +/* +Page Table entry # +*/ +#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF)) + + +/* PC entries related definitions */ +/* No PC is currently used for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK (0U) + +#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN (0U) + +/* PD entries related definitions */ +/* No PD is currently used for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK (0U) + +#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN (0U) + + +PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#endif /* #ifndef _SRVKM_RGXMIPSMMUINIT_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.c new file mode 100644 index 000000000000..1b18943c42d1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.c @@ -0,0 +1,1079 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ +#include "rgxmmuinit.h" +#include "rgxmmudefs_km.h" + +#include "device.h" +#include "img_types.h" +#include "img_defs.h" +#include "mmu_common.h" +#include "pdump_mmu.h" + +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "rgx_memallocflags.h" +#include "rgx_heaps.h" +#include "pdump_km.h" + + +/* useful macros */ +/* units represented in a bitfield */ +#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1) + + +/* + * Bits of PT, PD and PC not involving addresses + */ + +#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ + RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \ + RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \ + RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \ + RGX_MMUCTRL_PT_DATA_CC_EN | \ + RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ + RGX_MMUCTRL_PT_DATA_VALID_EN) + +#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \ + ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \ + RGX_MMUCTRL_PD_DATA_VALID_EN) + +#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \ + RGX_MMUCTRL_PC_DATA_VALID_EN) + + + +static MMU_PxE_CONFIG sRGXMMUPCEConfig; +static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; + + +/* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; + + +/* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; + + +/* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; + + +/* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; + + +/* + * + * Configuration for heaps with 1MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; + + +/* + * + * Configuration for heaps with 2MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; + + +/* Forward declaration of protection bits derivation functions, for + the following structure */ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); + +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv); + +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); + +static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; + +PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Setup of Px Entries: + * + * + * PAGE TABLE (8 Byte): + * + * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 | + * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid | + * + * + * PAGE DIRECTORY (8 Byte): + * + * | 40 | 39...5 (varies) | 4 | 3...1 | 0 | + * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid | + * + * + * PAGE CATALOGUE (4 Byte): + * + * | 31...4 | 3...2 | 1 | 0 | + * | Page Directory base address | (reserved) | Entry Pending | Valid | + * + */ + + + /* Example how to get the PD address from a PC entry. + * The procedure is the same for PD and PT entries to retrieve PT and Page addresses: + * + * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&': + * | 31...4 | 3...2 | 1 | 0 | + * | PD Addr | 0 | 0 | 0 | + * + * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>': + * | 27...0 | + * | PD Addr | + * + * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<': + * | 39...0 | + * | PD Addr | + * + */ + + + sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = + PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]); + + /* + * Setup sRGXMMUPCEConfig + */ + sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */ + sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ + + sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */ + sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */ + + sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/ + sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the status bits */ + + sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ + sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */ + + /* + * Setup sRGXMMUTopLevelDevVAddrConfig + */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */ + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask, + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift)); + + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */ + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */ + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask, + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift)); + + /* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_4KBDP + */ + sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12; + sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12; + + sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_4KBDP + */ + sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000); + sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; + sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */ + + sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_4KBDP + */ + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig4KB + */ + gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; + gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; + gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; + gsPageSizeConfig4KB.uiRefCount = 0; + gsPageSizeConfig4KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_16KBDP + */ + sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; + sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10; + + sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_16KBDP + */ + sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000); + sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; + sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14; + + sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_16KBDP + */ + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000); + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift)); + + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff); + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig16KB + */ + gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; + gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; + gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; + gsPageSizeConfig16KB.uiRefCount = 0; + gsPageSizeConfig16KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_64KBDP + */ + sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8; + sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8; + + sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_64KBDP + */ + sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000); + sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16; + sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16; + + sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_64KBDP + */ + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000); + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig64KB + */ + gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; + gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; + gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; + gsPageSizeConfig64KB.uiRefCount = 0; + gsPageSizeConfig64KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_256KBDP + */ + sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP + */ + sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000); + sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18; + sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18; + + sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_256KBDP + */ + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000); + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff); + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig256KB + */ + gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; + gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; + gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; + gsPageSizeConfig256KB.uiRefCount = 0; + gsPageSizeConfig256KB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_1MBDP + */ + sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + /* + * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even + * if they contain fewer entries. + */ + sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_1MBDP + */ + sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000); + sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20; + sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20; + + sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_1MBDP + */ + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000); + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff); + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig1MB + */ + gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; + gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; + gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; + gsPageSizeConfig1MB.uiRefCount = 0; + gsPageSizeConfig1MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_2MBDP + */ + sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + /* + * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even + * if they contain fewer entries. + */ + sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_2MBDP + */ + sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000); + sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21; + sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21; + + sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_2MBDP + */ + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000); + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff); + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig2MB + */ + gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; + gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; + gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; + gsPageSizeConfig2MB.uiRefCount = 0; + gsPageSizeConfig2MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUDeviceAttributes + */ + sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT; + sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3; + sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; + sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig; + sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; + + /* Functions for deriving page table/dir/cat protection bits */ + sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; + sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; + sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; + sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; + sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; + sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; + + /* Functions for establishing configurations for PDE/PTE/DEVVADDR + on per-heap basis */ + sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; + sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; + + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL; + + psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = PVRSRV_OK; + +#if defined(PDUMP) + psDeviceNode->pfnMMUGetContextID = NULL; +#endif + + psDeviceNode->psMMUDevAttrs = NULL; + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); + PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", + gsPageSizeConfig4KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", + gsPageSizeConfig4KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", + gsPageSizeConfig16KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", + gsPageSizeConfig16KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", + gsPageSizeConfig64KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", + gsPageSizeConfig64KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", + gsPageSizeConfig256KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", + gsPageSizeConfig256KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", + gsPageSizeConfig1MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", + gsPageSizeConfig1MB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", + gsPageSizeConfig2MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", + gsPageSizeConfig2MB.uiRefCount)); +#endif + if (gsPageSizeConfig4KB.uiRefCount > 0 || + gsPageSizeConfig16KB.uiRefCount > 0 || + gsPageSizeConfig64KB.uiRefCount > 0 || + gsPageSizeConfig256KB.uiRefCount > 0 || + gsPageSizeConfig1MB.uiRefCount > 0 || + gsPageSizeConfig2MB.uiRefCount > 0 + ) + { + PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt4 +@Description calculate the PCE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) +{ + return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt8 +@Description calculate the PCE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt4 +@Description derive the PDE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt8 +@Description derive the PDE protection flags based on an 8 byte entry + +@Input uiLog2DataPageSize The log2 of the required page size. + E.g, for 4KiB pages, this parameter must be 12. + For 2MiB pages, it must be set to 21. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ret_value = 0; /* 0 means invalid */ + + if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ + { + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", + __FILE__, __LINE__, __func__, uiLog2DataPageSize)); + } + } + return ret_value; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt4 +@Description calculate the PTE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device")); + + return 0; +} + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt8 +@Description calculate the PTE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ui64MMUFlags=0; + + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) + { + /* read/write */ + } + else if (MMU_PROTFLAGS_READABLE & uiProtFlags) + { + /* read only */ + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN; + } + else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) + { + /* write only */ + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device")); + } + else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified...")); + } + + /* cache coherency */ + if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN; + } + + /* cache setup */ + if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN; + } + + if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN; + } + + if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN; + } + + return ui64MMUFlags; +} + + +/*************************************************************************/ /*! +@Function RGXGetPageSizeConfig +@Description Set up configuration for variable sized data pages. + RGXPutPageSizeConfigCB has to be called to ensure correct + refcounting. +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv) +{ + MMU_PAGESIZECONFIG *psPageSizeConfig; + + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + *phPriv = NULL; + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Refer caller's pointers to the data */ + *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; + *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; + *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; + +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + /* Increment ref-count - not that we're allocating anything here + (I'm using static structs), but one day we might, so we want + the Get/Put code to be balanced properly */ + psPageSizeConfig->uiRefCount++; + + /* This is purely for debug statistics */ + psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, + psPageSizeConfig->uiRefCount); +#endif + + *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; + PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RGXPutPageSizeConfig +@Description Tells this code that the mmu module is done with the + configurations set in RGXGetPageSizeConfig. This can + be a no-op. + Called after RGXGetPageSizeConfigCB. +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) +{ +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + MMU_PAGESIZECONFIG *psPageSizeConfig; + IMG_UINT32 uiLog2DataPageSize; + + uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; + + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Ref-count here is not especially useful, but it's an extra + check that the API is being used correctly */ + psPageSizeConfig->uiRefCount--; +#else + PVR_UNREFERENCED_PARAMETER(hPriv); +#endif + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) +{ + switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK)) + { + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB: + *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB: + *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB: + *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB: + *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB: + *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB: + *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT; + break; + default: + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.h new file mode 100644 index 000000000000..48fd722eaaf6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmmuinit.h @@ -0,0 +1,60 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* NB: this file is not to be included arbitrarily. It exists solely + for the linkage between rgxinit.c and rgxmmuinit.c, the former + being otherwise cluttered by the contents of the latter */ + +#ifndef _SRVKM_RGXMMUINIT_H_ +#define _SRVKM_RGXMMUINIT_H_ + +#include "device.h" +#include "img_types.h" +#include "mmu_common.h" +#include "img_defs.h" + +PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#endif /* #ifndef _SRVKM_RGXMMUINIT_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmulticore.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmulticore.c new file mode 100644 index 000000000000..8adc683676d7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxmulticore.c @@ -0,0 +1,126 @@ +/*************************************************************************/ /*! +@File rgxmulticore.c +@Title Functions related to multicore devices +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel mode workload estimation functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxdevice.h" +#include "rgxdefs_km.h" +#include "pdump_km.h" +#include "rgxmulticore.h" +#include "pvr_debug.h" + +/* + * RGXGetMultiCoreInfo: + * Read multicore HW registers and fill in data structure for clients. + * Return not supported on cores without multicore. + */ +PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + void *hPrivate = (void*)&psDevInfo->sLayerParams; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH)); + IMG_UINT32 ui32MulticoreGPUReg = RGX_CR_MULTICORE_GPU; + IMG_UINT32 ui32NumCores; + IMG_UINT32 i; + + ui32NumCores = RGXReadReg32(hPrivate, RGX_CR_MULTICORE_SYSTEM); +#if !defined(NO_HARDWARE) + PVR_LOG(("Multicore system has %u cores", ui32NumCores)); + /* check that the number of cores reported is in-bounds */ + if (ui32NumCores > (RGX_CR_MULTICORE_SYSTEM_MASKFULL >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT)) + { + PVR_DPF((PVR_DBG_ERROR, "invalid return (%u) read from MULTICORE_SYSTEM", ui32NumCores)); + return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; + } +#else + /* simulation: currently we support one primary and one secondary */ + ui32NumCores = 2; +#endif + + *pui32NumCores = ui32NumCores; + /* CapsSize of zero is allowed to just return number of cores */ + if (ui32CapsSize > 0) + { +#if !defined(NO_HARDWARE) + PVR_LOG(("Configured for %u multicores", ui32NumCores)); +#endif + if (ui32CapsSize < ui32NumCores) + { + eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + } + else + { + for (i = 0; i < ui32NumCores; ++i) + { + *pui64Caps = RGXReadReg64(hPrivate, ui32MulticoreGPUReg) & ~0xFFFFFFFF; +#if !defined(NO_HARDWARE) + PVR_LOG(("Core %d has capabilities value 0x%x", i, (IMG_UINT32)(*pui64Caps) )); +#else + /* emulation for what we think caps are */ + *pui64Caps = i | ((i == 0) ? (RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN + | RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN) : 0) + | RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN + | RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN; +#endif + + ++pui64Caps; + ui32MulticoreGPUReg += ui32MulticoreRegBankOffset; + } + } + } + } + else + { + /* MULTICORE not supported on this device */ + PVR_DPF((PVR_DBG_ERROR, "Multicore not supported on this device")); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.c new file mode 100644 index 000000000000..1d819d8d4076 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.c @@ -0,0 +1,542 @@ +/*************************************************************************/ /*! +@File rgxpdump.c +@Title Device specific pdump routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific pdump functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PDUMP) +#include "pvrsrv.h" +#include "devicemem_pdump.h" +#include "rgxpdump.h" +#include "rgx_bvnc_defs_km.h" +#include "pdumpdesc.h" + +/* + * There are two different set of functions one for META/RISCV and one for MIPS + * because the Pdump player does not implement the support for + * the MIPS MMU yet. So for MIPS builds we cannot use DevmemPDumpSaveToFileVirtual, + * we have to use DevmemPDumpSaveToFile instead. + */ +static PVRSRV_ERROR _FWDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PDUMPIF("DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); + PDUMPELSE("DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); + +#if defined(SUPPORT_FIRMWARE_GCOV) + /* Gcov */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Gcov Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psFirmwareGcovBufferMemDesc, + 0, + psDevInfo->ui32FirmwareGcovSize, + "firmware_gcov.img", + 0, + ui32PDumpFlags); +#endif + /* TA signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc, + 0, + psDevInfo->ui32SigTAChecksSize, + "out.tasig", + 0, + ui32PDumpFlags); + + /* 3D signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc, + 0, + psDevInfo->ui32Sig3DChecksSize, + "out.3dsig", + 0, + ui32PDumpFlags); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + /* TDM signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTDM2DChecksMemDesc, + 0, + psDevInfo->ui32SigTDM2DChecksSize, + "out.tdmsig", + 0, + ui32PDumpFlags); + } + + PDUMPFI("DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); + + return PVRSRV_OK; +} +static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* Dump trace buffers */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers"); + for (ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++) + { + /* + * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]); + + /* ui32TracePointer tracepointer */ + ui32Size = sizeof(IMG_UINT32); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + uiTraceBufThreadNumOff, + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* next, dump size of trace buffer in DWords */ + ui32Size = sizeof(IMG_UINT32); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* trace buffer */ + ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], + 0, /* 0 offset in the trace buffer mem desc */ + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* assert info buffer */ + ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + sizeof(IMG_UINT32); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */ + + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */ + + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */ + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + } + + /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */ + PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc); + + /* Dump hwperf buffer */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc, + 0, + psDevInfo->ui32RGXFWIfHWPerfBufSize, + "out.hwperf", + 0, + ui32PDumpFlags); + + return PVRSRV_OK; + +} + + +static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* TA signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer"); + + DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTAChecksMemDesc, + 0, + psDevInfo->ui32SigTAChecksSize, + "out.tasig", + 0); + + /* 3D signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer"); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWSig3DChecksMemDesc, + 0, + psDevInfo->ui32Sig3DChecksSize, + "out.3dsig", + 0); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + /* TDM signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTDM2DChecksMemDesc, + 0, + psDevInfo->ui32SigTDM2DChecksSize, + "out.tdmsig", + 0); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* Dump trace buffers */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers"); + for (ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++) + { + /* + * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiTraceBufOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]); + + /* Same again... */ + const IMG_DEVMEM_OFFSET_T uiTraceBufSpaceAssertBufOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF_SPACE *)0)->sAssertBuf); + + /* ui32TracePointer tracepointer */ + ui32Size = sizeof(IMG_UINT32); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + uiTraceBufOff, + ui32Size, + "out.trace", + ui32OutFileOffset); + ui32OutFileOffset += ui32Size; + + /* next, dump size of trace buffer in DWords */ + ui32Size = sizeof(IMG_UINT32); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), + ui32Size, + "out.trace", + ui32OutFileOffset); + ui32OutFileOffset += ui32Size; + + /* trace buffer */ + ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], + 0, /* 0 offset in the trace buffer mem desc */ + ui32Size, + "out.trace", + ui32OutFileOffset); + ui32OutFileOffset += ui32Size; + + /* assert info buffer */ + ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + sizeof(IMG_UINT32); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + uiTraceBufOff + uiTraceBufSpaceAssertBufOff, + ui32Size, + "out.trace", + ui32OutFileOffset); + ui32OutFileOffset += ui32Size; + } + + /* Dump hwperf buffer */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer"); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfHWPerfBufMemDesc, + 0, + psDevInfo->ui32RGXFWIfHWPerfBufSize, + "out.hwperf", + 0); + + return PVRSRV_OK; + +} + + +/* + * PVRSRVPDumpSignatureBufferKM + */ +PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + if ((psDeviceNode->pfnCheckDeviceFeature) && + PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + return _MipsDumpSignatureBufferKM(psConnection, + psDeviceNode, + ui32PDumpFlags); + } + else + { + return _FWDumpSignatureBufferKM(psConnection, + psDeviceNode, + ui32PDumpFlags); + } +} + + +PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + if ((psDeviceNode->pfnCheckDeviceFeature) && + PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); + } + else + { + return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); + } +} + +PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_PBYTE pbyPDumpImageHdr) +{ + IMG_PUINT32 pui32Word; + IMG_UINT32 ui32HeaderDataSize; + + /* Validate parameters */ + if (((IMAGE_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) || + ((IMAGE_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pui32Word = IMG_OFFSET_ADDR(pbyPDumpImageHdr, 0); + pui32Word[0] = (IMAGE_HEADER_TYPE << HEADER_WORD0_TYPE_SHIFT); + pui32Word[1] = (IMAGE_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) | + (IMAGE_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT); + + ui32HeaderDataSize = ui32DataSize; + if (eFBCompression != IMG_FB_COMPRESSION_NONE) + { + ui32HeaderDataSize += ui32HeaderSize; + } + pui32Word[2] = ui32HeaderDataSize << HEADER_WORD2_DATA_SIZE_SHIFT; + + pui32Word[3] = ui32LogicalWidth << IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT; + pui32Word[4] = ui32LogicalHeight << IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT; + + pui32Word[5] = ePixFmt << IMAGE_HEADER_WORD5_FORMAT_SHIFT; + + pui32Word[6] = ui32PhysicalWidth << IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT; + pui32Word[7] = ui32PhysicalHeight << IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT; + + pui32Word[8] = IMAGE_HEADER_WORD8_STRIDE_POSITIVE | IMAGE_HEADER_WORD8_BIFTYPE_NONE; + + switch (eMemLayout) + { + case IMG_MEMLAYOUT_STRIDED: + pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_STRIDED; + break; + case IMG_MEMLAYOUT_TWIDDLED: + pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported memory layout - %d", eMemLayout)); + return PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT; + } + + pui32Word[9] = 0; + if (eFBCompression != IMG_FB_COMPRESSION_NONE) + { + switch (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, FBCDC_ALGORITHM)) + { + case 1: + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_BASE; + break; + case 2: + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V2; + break; + case 3: + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2; + break; + case 4: + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V4; + + if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 || + eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 || + eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) + { + pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_ON; + } + + pui32Word[9] |= (eFBCSwizzle << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) & IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK; + + break; + case 50: + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC; + + if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 || + eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 || + eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) + { + pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_ON; + } + + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported algorithm - %d", + PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, FBCDC_ALGORITHM))); + return PVRSRV_ERROR_NOT_ENABLED; + } + } + + switch (eFBCompression) + { + case IMG_FB_COMPRESSION_NONE: + break; + case IMG_FB_COMPRESSION_DIRECT_8x8: + case IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8: + pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_8X8; + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE; + break; + case IMG_FB_COMPRESSION_DIRECT_16x4: + case IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4: + pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_16x4; + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE; + break; + case IMG_FB_COMPRESSION_DIRECT_32x2: + case IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2: + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported compression mode - %d", eFBCompression)); + return PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE; + } + + pui32Word[10] = paui32FBCClearColour[0]; + pui32Word[11] = paui32FBCClearColour[1]; + pui32Word[12] = paui32FBCClearColour[2]; + pui32Word[13] = paui32FBCClearColour[3]; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPDumpPrepareOutputDataDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_PBYTE pbyPDumpDataHdr) +{ + IMG_PUINT32 pui32Word; + + /* Validate parameters */ + if (((DATA_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) || + ((DATA_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pui32Word = IMG_OFFSET_ADDR(pbyPDumpDataHdr, 0); + + if (ui32HeaderType == DATA_HEADER_TYPE) + { + pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT); + pui32Word[1] = (DATA_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) | + (DATA_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT); + pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT; + + pui32Word[3] = ui32ElementType << DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT; + pui32Word[4] = ui32ElementCount << DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT; + } + + if (ui32HeaderType == IBIN_HEADER_TYPE) + { + pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT); + pui32Word[1] = (IBIN_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) | + (IBIN_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT); + pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT; + } + + return PVRSRV_OK; +} +#endif /* PDUMP */ + +/****************************************************************************** + End of file (rgxpdump.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.h new file mode 100644 index 000000000000..ddb74f321f66 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpdump.h @@ -0,0 +1,178 @@ +/*************************************************************************/ /*! +@File +@Title RGX pdump Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX pdump functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxdevice.h" +#include "device.h" +#include "devicemem.h" +#include "pdump_km.h" +#include "pvr_debug.h" + +#if defined(PDUMP) +/*! +******************************************************************************* + + @Function PVRSRVPDumpSignatureBufferKM + + @Description + + Dumps TA and 3D signature and checksum buffers + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* + + @Function PVRSRVPDumpIntegritySignatureCheckKM + + @Description + + Poll on FBC/FBDC end-to-end signature status + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* + + @Function PVRSRVPDumpTraceBufferKM + + @Description + + Dumps TA and 3D signature and checksum buffers + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* + + @Function RGXPDumpPrepareOutputImageDescriptorHdr + + @Description + + Dumps the header for an OutputImage command + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_PBYTE pbyPDumpImageHdr); + +/*! +******************************************************************************* + + @Function RGXPDumpPrepareOutputDataDescriptorHdr + + @Description + + Dumps the header for an OutputData command + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXPDumpPrepareOutputDataDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_PBYTE pbyPDumpDataHdr); + +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPDumpSignatureBufferKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPDumpTraceBufferKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} +#endif /* PDUMP */ +/****************************************************************************** + End of file (rgxpdump.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.c new file mode 100644 index 000000000000..80d1b88d00ee --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.c @@ -0,0 +1,1252 @@ +/*************************************************************************/ /*! +@File +@Title Device specific power routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(LINUX) +#include +#else +#include +#endif + +#include "rgxpower.h" +#include "rgxinit.h" +#include "rgx_fwif_km.h" +#include "rgxfwutils.h" +#include "pdump_km.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "rgxdebug.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "rgxtimecorr.h" +#include "devicemem_utils.h" +#include "htbserver.h" +#include "rgxstartstop.h" +#include "rgxfwimageutils.h" +#include "sync.h" +#include "rgxdefs_km.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif +#if defined(SUPPORT_LINUX_DVFS) +#include "pvr_dvfs_device.h" +#endif + +static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_KCCB_CMD sCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CmdKCCBSlot; + + /* Send the Timeout notification to the FW */ + sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + return eError; +} + +static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb; + IMG_UINT64 *paui64StatsCounters; + IMG_UINT64 ui64LastPeriod; + IMG_UINT64 ui64LastState; + IMG_UINT64 ui64LastTime; + IMG_UINT64 ui64TimeNow; + + psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0]; + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64()); + + /* Update counters to account for the time since the last update */ + ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord); + ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord); + ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); + paui64StatsCounters[ui64LastState] += ui64LastPeriod; + + /* Update state and time of the latest update */ + psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); + + OSLockRelease(psDevInfo->hGPUUtilLock); +} + +static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (psDevConfig->pfnTDRGXStop == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData); +#else + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = RGXStop(&psDevInfo->sLayerParams); +#endif + + return eError; +} + +/* + RGXPrePowerState +*/ +PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + { + return PVRSRV_OK; + } + + if ((eNewPowerState != eCurrentPowerState) && + (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + IMG_UINT32 ui32CmdKCCBSlot; + + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Send the Power off request to the FW */ + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = bForced; + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", + __func__)); + return eError; + } + + /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies + on the EventObject which is signalled in this MISR */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + + /* Check the Power state after the answer */ + if (eError == PVRSRV_OK) + { + /* Finally, de-initialise some registers. */ + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { +#if !defined(NO_HARDWARE) +#if defined(RGX_FW_IRQ_OS_COUNTERS) + IMG_UINT32 ui32idx = RGXFW_HOST_OS; +#else + IMG_UINT32 ui32idx; + for_each_irq_cnt(ui32idx) +#endif /* RGX_FW_IRQ_OS_COUNTERS */ + { + IMG_UINT32 ui32IrqCnt; + + get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); + + /* Wait for the pending FW processor to host interrupts to come back. */ + eError = PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32idx], + ui32IrqCnt, + 0xffffffff, + POLL_FLAG_LOG_ERROR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Wait for pending interrupts failed." MSG_IRQ_CNT_TYPE " %u Host: %u, FW: %u", + __func__, + ui32idx, + psDevInfo->aui32SampleIRQCount[ui32idx], + ui32IrqCnt)); + + RGX_WaitForInterruptsTimeout(psDevInfo); + } + } +#endif /* NO_HARDWARE */ + + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); + + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); + +#if defined(SUPPORT_LINUX_DVFS) + eError = SuspendDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); + return eError; + } +#endif + + psDevInfo->bRGXPowered = IMG_FALSE; + + eError = RGXDoStop(psDeviceNode); + if (eError != PVRSRV_OK) + { + /* Power down failures are treated as successful since the power was removed but logged. */ + PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", + __func__, PVRSRVGetErrorString(eError))); + psDevInfo->ui32ActivePMReqNonIdle++; + eError = PVRSRV_OK; + } + } + else + { + /* the sync was updated but the pow state isn't off -> the FW denied the transition */ + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; + + if (bForced) + { /* It is an error for a forced request to be denied */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Failure to power off during a forced power off. FW: %d", + __func__, psFwSysData->ePowState)); + } + } + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + /* timeout waiting for the FW to ack the request: return timeout */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Timeout waiting for powoff ack from the FW", + __func__)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error waiting for powoff ack from the FW (%s)", + __func__, PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; + } + } + + return eError; +} + +#if defined(TRACK_FW_BOOT) +static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + FW_BOOT_STAGE eStage; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + /* Boot stage temporarily stored to the register below */ + eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, + RGX_FW_BOOT_STAGE_REGISTER); + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14); + } + else + { + IMG_BYTE *pbBootData; + + if (PVRSRV_OK != DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, + (void**)&pbBootData)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Could not acquire pointer to FW boot stage", __func__)); + eStage = FW_BOOT_STAGE_NOT_AVAILABLE; + } + else + { + pbBootData += RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); + + eStage = *(FW_BOOT_STAGE*)&pbBootData[RGXMIPSFW_BOOT_STAGE_OFFSET]; + + if (eStage == FW_BOOT_STAGE_TLB_INIT_FAILURE) + { + RGXMIPSFW_BOOT_DATA *psBootData = + (RGXMIPSFW_BOOT_DATA*) (pbBootData + RGXMIPSFW_BOOTLDR_CONF_OFFSET); + + PVR_LOG(("MIPS TLB could not be initialised. Boot data info:" + " num PT pages %u, log2 PT page size %u, PT page addresses" + " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx + " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx, + psBootData->ui32PTNumPages, + psBootData->ui32PTLog2PageSize, + psBootData->aui64PTPhyAddr[0U], + psBootData->aui64PTPhyAddr[1U], + psBootData->aui64PTPhyAddr[2U], + psBootData->aui64PTPhyAddr[3U])); + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); + } + } + + PVR_LOG(("%s: FW reached boot stage %i/%i.", + __func__, eStage, FW_BOOT_INIT_DONE)); +} +#endif + +static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + + if (psDevConfig->pfnTDRGXStart == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData); +#else + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = RGXStart(&psDevInfo->sLayerParams); +#endif + + return eError; +} + + +#if defined(NO_HARDWARE) && defined(PDUMP) + +#if 0 +#include "rgxtbdefs.h" +#else + +/* + Register RGX_TB_SYSTEM_STATUS +*/ +#define RGX_TB_SYSTEM_STATUS (0x00E0U) +#define RGX_TB_SYSTEM_STATUS_MASKFULL (IMG_UINT64_C(0x00000000030100FF)) +/* +directly indicates the status of power_abort flag from the power management controller (RGX_PRCM) +*/ +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_SHIFT (25U) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_EN (IMG_UINT64_C(0X0000000002000000)) +/* +directly indicates the status of power_complete flag from the power management controller (RGX_PRCM) +*/ +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_SHIFT (24U) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_EN (IMG_UINT64_C(0X0000000001000000)) +/* +directly indicates the status of GPU's hmmu_irq +*/ +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_SHIFT (16U) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF)) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_EN (IMG_UINT64_C(0X0000000000010000)) +/* +directly indicates the status of GPU's irq per OS_ID +*/ +#define RGX_TB_SYSTEM_STATUS_IRQ_SHIFT (1U) +#define RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE01)) +/* +old deprecated single irq +*/ +#define RGX_TB_SYSTEM_STATUS_OLD_IRQ_SHIFT (0U) +#define RGX_TB_SYSTEM_STATUS_OLD_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE)) +#endif + +static PVRSRV_ERROR +_ValidateIrqs(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32ConfigFlags; + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + + { + PVRSRV_ERROR eError; + RGXFWIF_SYSDATA *psFwSysData; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc, (void **)&psFwSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire OS Config (%u)", + __func__, + eError)); + return eError; + } + + ui32ConfigFlags = psFwSysData->ui32ConfigFlags; + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); + } + + /* Check if the Validation IRQ flag is set */ + if ((ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) == 0) + { + return PVRSRV_OK; + } + + PDUMPIF("IMG_PVR_TESTBENCH", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for TB irq status to be set (irqs signalled)..."); + PDUMPREGPOL(RGX_TB_PDUMPREG_NAME, + RGX_TB_SYSTEM_STATUS, + ~RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK, + ~RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK, + ui32PDumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "... and then clear them"); + for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++) + { + PDUMPREG32(RGX_PDUMPREG_NAME, + RGX_CR_IRQ_OS0_EVENT_CLEAR + ui32OSid * 0x10000, + RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL, + ui32PDumpFlags); + } + + PDUMPFI("IMG_PVR_TESTBENCH", ui32PDumpFlags); + + /* Poll on all the interrupt status registers for all OSes */ + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Validate Interrupt lines."); + + for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++) + { + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_IRQ_OS0_EVENT_STATUS + ui32OSid * 0x10000, + 0x0, + 0xFFFFFFFF, + ui32PDumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + } + + return PVRSRV_OK; +} +#endif /* defined(NO_HARDWARE) && defined(PDUMP) */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) +/* + * To validate the MTS unit we do the following: + * - Immediately after firmware loading for each OSID + * - Write the OSid to a memory location shared with FW + * - Kick the register of that OSid + * (Uncounted, DM 0) + * - FW clears the memory location if OSid matches + * - Host checks that memory location is cleared + * + * See firmware/devices/rgx/rgxfw_bg.c + */ +static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_SYSINIT *psFwSysInit, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32ScheduleRegister; + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32KickType; + IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS); + + /* Nothing to do if the device does not support GPU_VIRTUALISATION */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION)) + { + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:")); + + /* Need to get the maximum supported OSid value from the per-device info. + * This can change according to how much memory is physically present and + * what the carve-out mapping looks like (provided by the module load-time + * parameters). + */ + ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, psDeviceNode->ui32NumOSId); + + if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS) + { + PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:")); + PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped)); + PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped)); + } + + ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED; + + for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++) + { + /* set Test field */ + psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT; + /* Force a read-back to memory to avoid posted writes on certain buses */ + (void) psFwSysInit->ui32OSKickTest; + OSWriteMemoryBarrier(); + + /* kick register */ + ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS); + PVR_DPF((PVR_DBG_MESSAGE, " Testing OS: %u, Kick Reg: %X", + ui32OSid, + ui32ScheduleRegister)); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType); + OSMemoryBarrier(); + + /* Wait test enable bit to be unset */ + if (PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest, + 0, + RGXFWIF_KICK_TEST_ENABLED_BIT, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)", + ui32OSid, + psFwSysInit->ui32OSKickTest)); + + return PVRSRV_ERROR_TIMEOUT; + } + + /* sanity check that the value is what we expect */ + if (psFwSysInit->ui32OSKickTest != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location", + ui32OSid, + psFwSysInit->ui32OSKickTest)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + PVR_DPF((PVR_DBG_MESSAGE, " PASS")); + } + + PVR_LOG(("MTS passed sideband tests")); + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */ + +/* + RGXPostPowerState +*/ +PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + if ((eNewPowerState != eCurrentPowerState) && + (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + { + psDevInfo->bRGXPowered = IMG_TRUE; + return PVRSRV_OK; + } + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* Update timer correlation related data */ + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); + + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); + + eError = RGXDoStart(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed")); + return eError; + } + + OSMemoryBarrier(); + + /* + * Check whether the FW has started by polling on bFirmwareStarted flag + */ + if (PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, + IMG_TRUE, + 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); + eError = PVRSRV_ERROR_TIMEOUT; + +#if defined(TRACK_FW_BOOT) + RGXCheckFWBootStage(psDevInfo); +#endif + + /* + * When bFirmwareStarted fails some info may be gained by doing the following + * debug dump but unfortunately it could be potentially dangerous if the reason + * for not booting is the GPU power is not ON. However, if we have reached this + * point the System Layer has returned without errors, we assume the GPU power + * is indeed ON. + */ + RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); + RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); + + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), + IMG_TRUE, + 0xFFFFFFFFU, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", + eError)); + return eError; + } + +#if defined(NO_HARDWARE) && defined(PDUMP) + eError = _ValidateIrqs(psDevInfo); + if (eError != PVRSRV_OK) + { + return eError; + } +#endif +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) + eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); + if (eError != PVRSRV_OK) + { + return eError; + } +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); +#endif + + HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); + + psDevInfo->bRGXPowered = IMG_TRUE; + +#if defined(SUPPORT_LINUX_DVFS) + eError = ResumeDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS")); + return eError; + } +#endif + } + } + + PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState); + + return PVRSRV_OK; +} + +/* + RGXPreClockSpeedChange +*/ +PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVR_UNREFERENCED_PARAMETER(psRGXData); + + PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz", + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); + + if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && + (psFwSysData->ePowState != RGXFWIF_POW_OFF)) + { + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS); + } + + return eError; +} + +/* + RGXPostClockSpeedChange +*/ +PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* Update runtime configuration with the new value */ + psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed; + + if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && + (psFwSysData->ePowState != RGXFWIF_POW_OFF)) + { + RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd; + IMG_UINT32 ui32CmdKCCBSlot; + + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS); + + sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE; + sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed; + + /* Ensure the new clock speed is written to memory before requesting the FW to read it */ + OSMemoryBarrier(); + + PDUMPCOMMENT("Scheduling CORE clock speed change command"); + + PDUMPPOWCMDSTART(); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sCOREClkSpeedChangeCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling CORE clock speed change command failed"); + PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError)); + return eError; + } + + PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz", + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); + } + + return eError; +} + +/*! + ****************************************************************************** + + @Function RGXDustCountChange + + @Description + + Does change of number of DUSTs + + @Input hDevHandle : RGX Device Node + @Input ui32NumberOfDusts : Number of DUSTs to make transition to + + @Return PVRSRV_ERROR : + + ******************************************************************************/ +PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32NumberOfDusts) +{ + + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sDustCountChange; + IMG_UINT32 ui32MaxAvailableDusts = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; + IMG_UINT32 ui32CmdKCCBSlot; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (ui32NumberOfDusts > ui32MaxAvailableDusts) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u", + __func__, + ui32NumberOfDusts, + ui32MaxAvailableDusts, + eError)); + return eError; + } + + psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts; + +#if !defined(NO_HARDWARE) + { + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { + return PVRSRV_OK; + } + + if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) + { + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; + PVR_DPF((PVR_DBG_ERROR, + "%s: Attempt to change dust count when not IDLE", + __func__)); + return eError; + } + } +#endif + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + + sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW; + sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; + sDustCountChange.uCmdData.sPowData.uPowerReqData.ui32NumOfDusts = ui32NumberOfDusts; + + PDUMPCOMMENT("Scheduling command to change Dust Count to %u", ui32NumberOfDusts); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sDustCountChange, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling command to change Dust Count failed. Error:%u", eError); + PVR_DPF((PVR_DBG_ERROR, + "%s: Scheduling KCCB to change Dust Count failed. Error:%u", + __func__, eError)); + return eError; + } + + /* Wait for the firmware to answer. */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__)); + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENT("RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + + return PVRSRV_OK; +} + +/* + @Function RGXAPMLatencyChange +*/ +PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ActivePMLatencyms, + IMG_BOOL bActivePMLatencyPersistant) +{ + + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + IMG_UINT32 ui32CmdKCCBSlot; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock")); + return eError; + } + + /* Update runtime configuration with the new values and ensure the + * new APM latency is written to memory before requesting the FW to + * read it + */ + psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms; + psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant; + OSMemoryBarrier(); + + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + RGXFWIF_KCCB_CMD sActivePMLatencyChange; + sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW; + sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE; + sActivePMLatencyChange.uCmdData.sPowData.uPowerReqData.ui32ActivePMLatencyms = ui32ActivePMLatencyms; + + PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sActivePMLatencyChange, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError); + PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError)); + goto ErrorExit; + } + } + +ErrorExit: + PVRSRVPowerUnlock(psDeviceNode); + + return eError; +} + +/* + RGXActivePowerRequest +*/ +PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + + psDevInfo->ui32ActivePMReqTotal++; + + /* Powerlock to avoid further requests from racing with the FW hand-shake + * from now on (previous kicks to this point are detected by the FW) + * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid + * potential dead lock between PDumpWriteLock and PowerLock + * during 'DriverLive + PDUMP=1 + EnableAPM=1'. + */ + eError = PVRSRVPowerTryLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock"); + } + else + { + psDevInfo->ui32ActivePMReqRetry++; + } + goto _RGXActivePowerRequest_PowerLock_failed; + } + + /* Check again for IDLE once we have the power lock */ + if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime); +#endif + + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + IMG_FALSE); /* forced */ + PDUMPPOWCMDEND(); + + if (eError == PVRSRV_OK) + { + psDevInfo->ui32ActivePMReqOk++; + } + else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) + { + psDevInfo->ui32ActivePMReqDenied++; + } + } + else + { + psDevInfo->ui32ActivePMReqNonIdle++; + } + + PVRSRVPowerUnlock(psDeviceNode); + +_RGXActivePowerRequest_PowerLock_failed: + + return eError; +} +/* + RGXForcedIdleRequest +*/ + +#define RGX_FORCED_IDLE_RETRY_COUNT 10 + +PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32RetryCount = 0; + IMG_UINT32 ui32CmdKCCBSlot; +#if !defined(NO_HARDWARE) + RGXFWIF_SYSDATA *psFwSysData; +#endif + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(NO_HARDWARE) + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Firmware already forced idle */ + if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE) + { + return PVRSRV_OK; + } + + /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */ + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { + return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; + } +#endif + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE; + + PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command"); + + /* Send one forced IDLE command to GP */ + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__)); + return eError; + } + + /* Wait for GPU to finish current workload */ + do { + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT)) + { + break; + } + ui32RetryCount++; + PVR_DPF((PVR_DBG_WARNING, + "%s: Request timeout. Retry %d of %d", + __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT)); + } while (IMG_TRUE); + + if (eError != PVRSRV_OK) + { + RGXFWNotifyHostTimeout(psDevInfo); + PVR_DPF((PVR_DBG_ERROR, + "%s: Idle request failed. Firmware potentially left in forced idle state", + __func__)); + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + +#if !defined(NO_HARDWARE) + /* Check the firmware state for idleness */ + if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) + { + return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; + } +#endif + + return PVRSRV_OK; +} + +/* + RGXCancelForcedIdleRequest +*/ +PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32CmdKCCBSlot; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + goto ErrorExit; + } + + /* Send the IDLE request to the FW */ + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE; + + PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command"); + + /* Send cancel forced IDLE command to GP */ + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP); + goto ErrorExit; + } + + /* Wait for the firmware to answer. */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 1, 0xFFFFFFFF); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__)); + goto ErrorExit; + } + +#if defined(PDUMP) + PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + + return eError; + +ErrorExit: + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__)); + return eError; +} + +/*! + ****************************************************************************** + + @Function PVRSRVGetNextDustCount + + @Description + + Calculate a sequence of dust counts to achieve full transition coverage. + We increment two counts of dusts and switch up and down between them. + It does contain a few redundant transitions. If two dust exist, the + output transitions should be as follows. + + 0->1, 0<-1, 0->2, 0<-2, (0->1) + 1->1, 1->2, 1<-2, (1->2) + 2->2, (2->0), + 0->0. Repeat. + + Redundant transitions in brackets. + + @Input psDustReqState : Counter state used to calculate next dust count + @Input ui32DustCount : Number of dusts in the core + + @Return PVRSRV_ERROR + + ******************************************************************************/ +IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount) +{ + if (psDustReqState->bToggle) + { + psDustReqState->ui32DustCount2++; + } + + if (psDustReqState->ui32DustCount2 > ui32DustCount) + { + psDustReqState->ui32DustCount1++; + psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1; + } + + if (psDustReqState->ui32DustCount1 > ui32DustCount) + { + psDustReqState->ui32DustCount1 = 0; + psDustReqState->ui32DustCount2 = 0; + } + + psDustReqState->bToggle = !psDustReqState->bToggle; + + return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2; +} + +/****************************************************************************** + End of file (rgxpower.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.h new file mode 100644 index 000000000000..6b522bdd3807 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxpower.h @@ -0,0 +1,245 @@ +/*************************************************************************/ /*! +@File +@Title RGX power header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX power +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXPOWER_H__) +#define __RGXPOWER_H__ + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "servicesext.h" +#include "rgxdevice.h" + + +/*! +****************************************************************************** + + @Function RGXPrePowerState + + @Description + + does necessary preparation before power state transition + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +/*! +****************************************************************************** + + @Function RGXPostPowerState + + @Description + + does necessary preparation after power state transition + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + + +/*! +****************************************************************************** + + @Function RGXPreClockSpeedChange + + @Description + + Does processing required before an RGX clock speed change. + + @Input hDevHandle : RGX Device Node + @Input bIdleDevice : Whether the firmware needs to be idled + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +/*! +****************************************************************************** + + @Function RGXPostClockSpeedChange + + @Description + + Does processing required after an RGX clock speed change. + + @Input hDevHandle : RGX Device Node + @Input bIdleDevice : Whether the firmware had been idled previously + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + + +/*! +****************************************************************************** + + @Function RGXDustCountChange + + @Description Change of number of DUSTs + + @Input hDevHandle : RGX Device Node + @Input ui32NumberOfDusts : Number of DUSTs to make transition to + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32NumberOfDusts); + +/*! +****************************************************************************** + + @Function RGXAPMLatencyChange + + @Description + + Changes the wait duration used before firmware indicates IDLE. + Reducing this value will cause the firmware to shut off faster and + more often but may increase bubbles in GPU scheduling due to the added + power management activity. If bPersistent is NOT set, APM latency will + return back to system default on power up. + + @Input hDevHandle : RGX Device Node + @Input ui32ActivePMLatencyms : Number of milliseconds to wait + @Input bPersistent : Set to ensure new value is not reset + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ActivePMLatencyms, + IMG_BOOL bActivePMLatencyPersistant); + +/*! +****************************************************************************** + + @Function RGXActivePowerRequest + + @Description Initiate a handshake with the FW to power off the GPU + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); + +/*! +****************************************************************************** + + @Function RGXForcedIdleRequest + + @Description Initiate a handshake with the FW to idle the GPU + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted); + +/*! +****************************************************************************** + + @Function RGXCancelForcedIdleRequest + + @Description Send a request to cancel idle to the firmware. + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle); + +/*! +****************************************************************************** + + @Function PVRSRVGetNextDustCount + + @Description + + Calculate a sequence of dust counts to achieve full transition coverage. + We increment two counts of dusts and switch up and down between them. + It does contain a few redundant transitions. If two dust exist, the + output transitions should be as follows. + + 0->1, 0<-1, 0->2, 0<-2, (0->1) + 1->1, 1->2, 1<-2, (1->2) + 2->2, (2->0), + 0->0. Repeat. + + Redundant transitions in brackets. + + @Input psDustReqState : Counter state used to calculate next dust count + @Input ui32DustCount : Number of dusts in the core + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount); + + +#endif /* __RGXPOWER_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxregconfig.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxregconfig.c new file mode 100644 index 000000000000..0a96f81e3bfa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxregconfig.c @@ -0,0 +1,287 @@ +/*************************************************************************/ /*! +@File +@Title RGX Register configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Regconfig routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxregconfig.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "device.h" +#include "sync_internal.h" +#include "pdump_km.h" +#include "pvrsrv.h" + +PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT8 ui8RegCfgType) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType; + + PVR_UNREFERENCED_PARAMETER(psDevConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (eRegCfgType < psRegCfg->eRegCfgTypeToPush) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXSetRegConfigTypeKM: Register configuration requested (%d) is not valid since it has to be at least %d." + " Configurations of different types need to go in order", + eRegCfgType, + psRegCfg->eRegCfgTypeToPush)); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE; + } + + psRegCfg->eRegCfgTypeToPush = eRegCfgType; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psDevConnection); + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigTypeKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (psRegCfg->bEnabled) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Cannot add record whilst register configuration active.")); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_ENABLED; + } + if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Register configuration full.")); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_FULL; + } + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask; + sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->ui32NumRegRecords++; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (psRegCfg->bEnabled) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Attempt to clear register configuration whilst active.")); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_ENABLED; + } + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->ui32NumRegRecords = 0; + psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->bEnabled = IMG_TRUE; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->bEnabled = IMG_FALSE; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + + +/****************************************************************************** + End of file (rgxregconfig.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsignals.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsignals.c new file mode 100644 index 000000000000..466f41ead97e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsignals.c @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File rgxsignals.c +@Title RGX Signals routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Signals routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxsignals.h" + +#include "rgxmem.h" +#include "rgx_fwif_km.h" +#include "mmu_common.h" +#include "devicemem.h" +#include "rgxfwutils.h" + + +PVRSRV_ERROR +PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_DEV_VIRTADDR sDevSignalAddress) +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc; + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE; + sKCCBCmd.uCmdData.sSignalUpdateData.sDevSignalAddress = sDevSignalAddress; + eError = RGXSetFirmwareAddress(&sKCCBCmd.uCmdData.sSignalUpdateData.psFWMemContext, + psFWMemContextMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail_fwaddr); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sKCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, + eError, PVRSRVGETERRORSTRING(eError))); + } + +fail_fwaddr: + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsrvinit.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsrvinit.c new file mode 100644 index 000000000000..48f42913a569 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsrvinit.c @@ -0,0 +1,1455 @@ +/*************************************************************************/ /*! +@File +@Title Services initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "srvinit.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "km_apphint_defs.h" +#include "htbuffer_types.h" +#include "htbuffer_init.h" + +#include "devicemem.h" +#include "devicemem_pdump.h" + +#include "rgx_fwif_km.h" +#include "pdump_km.h" + +#include "rgxinit.h" + +#include "rgx_compat_bvnc.h" + +#include "osfunc.h" + +#include "rgxdefs_km.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +#include "rgx_fwif_hwperf.h" +#include "rgx_hwperf_table.h" + +#include "fwload.h" +#include "rgxlayer_impl.h" +#include "rgxfwimageutils.h" +#include "rgxfwutils.h" + +#include "rgx_hwperf.h" +#include "rgx_bvnc_defs_km.h" + +#include "rgxdevice.h" + +#include "pvrsrv.h" + +#if defined(SUPPORT_TRUSTED_DEVICE) +#include "rgxdevice.h" +#include "pvrsrv_device.h" +#endif + +#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */ + +#define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */ +#define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */ + +#if defined(SUPPORT_VALIDATION) +#include "pvrsrv_apphint.h" +#endif + +#include "os_srvinit_param.h" +#if !defined(LINUX) +/*! +******************************************************************************* + * AppHint mnemonic data type helper tables +******************************************************************************/ +/* apphint map of name vs. enable flag */ +static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = { +#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, + HTB_LOG_SFGROUPLIST +#undef X +}; +/* apphint map of arg vs. OpMode */ +static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = { + { "droplatest", HTB_OPMODE_DROPLATEST}, + { "dropoldest", HTB_OPMODE_DROPOLDEST}, + /* HTB should never be started in HTB_OPMODE_BLOCK + * as this can lead to deadlocks + */ +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = { + { "trace", 0}, + { "none", 0} +#if defined(SUPPORT_TBI_INTERFACE) + , { "tbi", 1} +#endif +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = { + { "mono", 0 }, + { "mono_raw", 1 }, + { "sched", 2 } +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP }; + +/* + * Services AppHints initialisation + */ +#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e) +APPHINT_LIST_ALL +#undef X +#endif /* !defined(LINUX) */ + +/* + * Container for all the apphints used by this module + */ +typedef struct _RGX_SRVINIT_APPHINTS_ +{ + IMG_UINT32 ui32DriverMode; + IMG_BOOL bGPUUnitsPowerChange; + IMG_BOOL bEnableSignatureChecks; + IMG_UINT32 ui32SignatureChecksBufSize; + +#if defined(DEBUG) + IMG_BOOL bAssertOnOutOfMem; +#endif +#if defined(SUPPORT_VALIDATION) + IMG_BOOL bValidateIrq; +#endif + IMG_BOOL bAssertOnHWRTrigger; +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; + IMG_UINT32 ui32FBCDCVersionOverride; +#endif + IMG_BOOL bCheckMlist; + IMG_BOOL bDisableClockGating; + IMG_BOOL bDisableDMOverlap; + IMG_BOOL bDisableFEDLogging; + IMG_BOOL bDisablePDP; + IMG_BOOL bEnableCDMKillRand; + IMG_BOOL bEnableRandomCsw; + IMG_BOOL bEnableSoftResetCsw; + IMG_BOOL bEnableHWR; + IMG_BOOL bFilteringMode; + IMG_BOOL bHWPerfDisableCustomCounterFilter; + IMG_BOOL bZeroFreelist; + IMG_UINT32 ui32EnableFWContextSwitch; + IMG_UINT32 ui32FWContextSwitchProfile; + IMG_UINT32 ui32VDMContextSwitchMode; + IMG_UINT32 ui32HWPerfFWBufSize; + IMG_UINT32 ui32HWPerfHostBufSize; + IMG_UINT32 ui32HWPerfFilter0; + IMG_UINT32 ui32HWPerfFilter1; + IMG_UINT32 ui32HWPerfHostFilter; + IMG_UINT32 ui32TimeCorrClock; + IMG_UINT32 ui32HWRDebugDumpLimit; + IMG_UINT32 ui32JonesDisableMask; + IMG_UINT32 ui32LogType; + IMG_UINT32 ui32TruncateMode; + FW_PERF_CONF eFirmwarePerf; + RGX_ACTIVEPM_CONF eRGXActivePMConf; + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; +#endif + IMG_BOOL bEnableTrustedDeviceAceConfig; + IMG_UINT32 ui32FWContextSwitchCrossDM; +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + IMG_UINT32 ui32PhysMemTestPasses; +#endif +} RGX_SRVINIT_APPHINTS; + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/* + * Parses the dot('.') separated OSID regions on a string and stores the integer results + * in an array. Numbers can be decimal or hex (starting with 0x) and there must be a . between each + * (example: 1.2.3.4.5.6.7.8) + */ +static void _ParseOSidRegionString(IMG_CHAR *apszBuffer, IMG_UINT32 *pui32ApphintArray) +{ + IMG_UINT32 ui32OSid; + IMG_CHAR *pui8StringParsingBase=apszBuffer; + IMG_UINT32 ui32StringLength = OSStringLength(apszBuffer); + + /* Initialize all apphints to 0 */ + for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++) + { + pui32ApphintArray[ui32OSid] = 0; + } + + /* Parse the string. Even if it fails, apphints will have been initialized */ + for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++) + { + IMG_UINT32 ui32Base=10; + IMG_CHAR *pui8StringParsingNextDelimiter; + + /* Find the next character in the string that's not a ',' '.' or ' ' */ + while ((*pui8StringParsingBase == '.' || + *pui8StringParsingBase == ',' || + *pui8StringParsingBase == ' ') && + pui8StringParsingBase - apszBuffer <= ui32StringLength) + { + pui8StringParsingBase++; + } + + if (pui8StringParsingBase - apszBuffer > ui32StringLength) + { + PVR_DPF((PVR_DBG_ERROR, "Reached the end of the apphint string while trying to parse it.\nBuffer: %s, OSid: %d", pui8StringParsingBase, ui32OSid)); + return; + } + + /* If the substring begins with "0x" move the pointer 2 bytes forward and set the base to 16 */ + if (*pui8StringParsingBase == '0' && *(pui8StringParsingBase+1) =='x') + { + ui32Base=16; + pui8StringParsingBase+=2; + } + + /* Find the next delimiter in the string or the end of the string itself if we're parsing the final number */ + pui8StringParsingNextDelimiter = pui8StringParsingBase; + + while (*pui8StringParsingNextDelimiter!='.' && + *pui8StringParsingNextDelimiter!=',' && + *pui8StringParsingNextDelimiter!=' ' && + *pui8StringParsingNextDelimiter!='\0' && + (pui8StringParsingNextDelimiter - apszBuffer <= ui32StringLength)) + { + pui8StringParsingNextDelimiter++; + } + + /* + * Each number is followed by a '.' except for the last one. If a string termination is found + * when not expected the functions returns + */ + + if (*pui8StringParsingNextDelimiter=='\0' && ui32OSid < GPUVIRT_VALIDATION_NUM_OS - 1) + { + PVR_DPF((PVR_DBG_ERROR, "There was an error parsing the OSid Region Apphint Strings")); + return; + } + + /*replace the . with a string termination so that it can be properly parsed to an integer */ + *pui8StringParsingNextDelimiter = '\0'; + + /* Parse the number. The fact that it is followed by '\0' means that the string parsing utility + * will finish there and not try to parse the rest */ + + OSStringToUINT32(pui8StringParsingBase, ui32Base, &pui32ApphintArray[ui32OSid]); + + pui8StringParsingBase = pui8StringParsingNextDelimiter + 1; + } +} + +#endif +/*! +******************************************************************************* + + @Function GetApphints + + @Description Read init time apphints and initialise internal variables + + @Input psHints : Pointer to apphints container + + @Return void + +******************************************************************************/ +static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints) +{ + void *pvParamState = SrvInitParamOpen(); + IMG_UINT32 ui32ParamTemp; + IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE; + IMG_BOOL bE42606 = IMG_FALSE, bAXIACELite = IMG_FALSE; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + bS7TopInfra = IMG_TRUE; + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL)) + { + bTPUFiltermodeCtrl = IMG_TRUE; + } + + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290)) + { + bE42290 = IMG_TRUE; + } + + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606)) + { + bE42606 = IMG_TRUE; + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) + { + bAXIACELite = IMG_TRUE; + } + + /* + * NB AppHints initialised to a default value via SrvInitParamInit* macros above + */ + SrvInitParamGetUINT32(pvParamState, DriverMode, psHints->ui32DriverMode); + SrvInitParamGetBOOL(pvParamState, GPUUnitsPowerChange, psHints->bGPUUnitsPowerChange); + SrvInitParamGetBOOL(pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks); + SrvInitParamGetUINT32(pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize); + +#if defined(DEBUG) + SrvInitParamGetBOOL(pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem); +#endif + SrvInitParamGetBOOL(pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger); + SrvInitParamGetBOOL(pvParamState, CheckMList, psHints->bCheckMlist); + SrvInitParamGetBOOL(pvParamState, DisableClockGating, psHints->bDisableClockGating); + SrvInitParamGetBOOL(pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap); + SrvInitParamGetBOOL(pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging); + SrvInitParamGetUINT32(pvParamState, EnableAPM, ui32ParamTemp); + psHints->eRGXActivePMConf = ui32ParamTemp; + SrvInitParamGetBOOL(pvParamState, EnableCDMKillingRandMode, psHints->bEnableCDMKillRand); + SrvInitParamGetBOOL(pvParamState, EnableRandomContextSwitch, psHints->bEnableRandomCsw); + SrvInitParamGetBOOL(pvParamState, EnableSoftResetContextSwitch, psHints->bEnableSoftResetCsw); + SrvInitParamGetUINT32(pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch); + SrvInitParamGetUINT32(pvParamState, VDMContextSwitchMode, psHints->ui32VDMContextSwitchMode); + SrvInitParamGetBOOL(pvParamState, EnableHWR, psHints->bEnableHWR); + SrvInitParamGetUINT32(pvParamState, EnableRDPowerIsland, ui32ParamTemp); + psHints->eRGXRDPowerIslandConf = ui32ParamTemp; + SrvInitParamGetUINT32(pvParamState, FirmwarePerf, ui32ParamTemp); + psHints->eFirmwarePerf = ui32ParamTemp; + SrvInitParamGetUINT32(pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile); + SrvInitParamGetBOOL(pvParamState, HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter); + SrvInitParamGetUINT32(pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize); + SrvInitParamGetUINT32(pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize); +#if defined(LINUX) + /* name changes */ + { + IMG_UINT64 ui64Tmp; + SrvInitParamGetBOOL(pvParamState, DisablePDumpPanic, psHints->bDisablePDP); + SrvInitParamGetUINT64(pvParamState, HWPerfFWFilter, ui64Tmp); + psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu); + psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu); + } +#else + SrvInitParamUnreferenced(DisablePDumpPanic); + SrvInitParamUnreferenced(HWPerfFWFilter); + SrvInitParamUnreferenced(RGXBVNC); +#endif + SrvInitParamGetUINT32(pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter); + SrvInitParamGetUINT32List(pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock); + SrvInitParamGetUINT32(pvParamState, HWRDebugDumpLimit, ui32ParamTemp); + psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL); + + if (bS7TopInfra) + { + #define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK (0XFFFFFFCFU) + #define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN (0X00000020U) + #define RGX_CR_JONES_FIX_MT_ORDER_TE_EN (0X00000010U) + + SrvInitParamGetUINT32(pvParamState, JonesDisableMask, ui32ParamTemp); + if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) || + ((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN)) + { + ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN | + RGX_CR_JONES_FIX_MT_ORDER_ISP_EN); + PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d", + ui32ParamTemp)); + } + psHints->ui32JonesDisableMask = ui32ParamTemp; + } + + if ((bE42290) && (bTPUFiltermodeCtrl)) + { + SrvInitParamGetBOOL(pvParamState, NewFilteringMode, psHints->bFilteringMode); + } + + if (bE42606) + { + SrvInitParamGetUINT32(pvParamState, TruncateMode, psHints->ui32TruncateMode); + } +#if defined(EMULATOR) + if (bAXIACELite) + { + SrvInitParamGetBOOL(pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig); + } +#endif + + SrvInitParamGetBOOL(pvParamState, ZeroFreelist, psHints->bZeroFreelist); + +#if defined(LINUX) + SrvInitParamGetUINT32(pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM); +#else + SrvInitParamUnreferenced(FWContextSwitchCrossDM); +#endif + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + SrvInitParamGetUINT32(pvParamState, PhysMemTestPasses, psHints->ui32PhysMemTestPasses); +#endif + +#if defined(SUPPORT_VALIDATION) + /* Apphints for TPU trilinear frac masking */ + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]); + SrvInitParamGetBOOL(pvParamState, ValidateIrq, psHints->bValidateIrq); + SrvInitParamGetUINT32(pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride); +#endif + + /* + * FW logs apphints + */ + { + IMG_UINT32 ui32LogGroup, ui32TraceOrTBI; + + SrvInitParamGetUINT32BitField(pvParamState, EnableLogGroup, ui32LogGroup); + SrvInitParamGetUINT32List(pvParamState, FirmwareLogType, ui32TraceOrTBI); + + /* Defaulting to TRACE */ + BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); + +#if defined(SUPPORT_TBI_INTERFACE) + if (ui32TraceOrTBI == 1 /* TBI */) + { + if ((ui32LogGroup & RGXFWIF_LOG_TYPE_GROUP_MASK) == 0) + { + /* No groups configured - defaulting to MAIN group */ + BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_GROUP_MAIN); + } + BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); + } +#endif + psHints->ui32LogType = ui32LogGroup; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * GPU virtualisation validation apphints + */ + { + IMG_CHAR pszOSidRegionBuffer[GPUVIRT_VALIDATION_MAX_STRING_LENGTH]; + + SrvInitParamGetSTRING(pvParamState, OSidRegion0Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[0]); + + SrvInitParamGetSTRING(pvParamState, OSidRegion0Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[0]); + + SrvInitParamGetSTRING(pvParamState, OSidRegion1Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[1]); + + SrvInitParamGetSTRING(pvParamState, OSidRegion1Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[1]); + } +#else +#if !defined(LINUX) + SrvInitParamUnreferenced(OSidRegion0Min); + SrvInitParamUnreferenced(OSidRegion0Max); + SrvInitParamUnreferenced(OSidRegion1Min); + SrvInitParamUnreferenced(OSidRegion1Max); +#endif /* !defined(LINUX) */ +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + + + SrvInitParamClose(pvParamState); +} + + +/*! +******************************************************************************* + + @Function GetFWConfigFlags + + @Description Initialise and return FW config flags + + @Input psHints : Apphints container + @Input pui32FWConfigFlags : Pointer to config flags + + @Return void + +******************************************************************************/ +static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SRVINIT_APPHINTS *psHints, + IMG_UINT32 *pui32FWConfigFlags, + IMG_UINT32 *pui32FWConfigFlagsExt, + IMG_UINT32 *pui32FwOsCfgFlags) +{ + IMG_UINT32 ui32FWConfigFlags = 0; + IMG_UINT32 ui32FWConfigFlagsExt = 0; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + ui32FWConfigFlags = 0; + ui32FWConfigFlagsExt = 0; + } + else + { +#if defined(DEBUG) + ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; +#endif + ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; + ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; + ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; + ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; + ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; + ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; + ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; + ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; + ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; + ui32FWConfigFlags |= psHints->bEnableHWR ? RGXFWIF_INICFG_HWR_EN : 0; + ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; + ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_CUSTOM_TIMER) ? RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN : 0; + ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_POLLS) ? RGXFWIF_INICFG_POLL_COUNTERS_EN : 0; + ui32FWConfigFlags |= (psHints->ui32VDMContextSwitchMode << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT) & RGXFWIF_INICFG_VDM_CTX_STORE_MODE_MASK; + ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; + +#if defined(SUPPORT_VALIDATION) +#if defined(NO_HARDWARE) && defined(PDUMP) + ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0; +#endif + + if (psHints->ui32FBCDCVersionOverride > 0) + { + ui32FWConfigFlags |= (psHints->ui32FBCDCVersionOverride == 2) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; + } + else +#endif /* defined(SUPPORT_VALIDATION) */ + { + ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; + } + } + + *pui32FWConfigFlags = ui32FWConfigFlags; + *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt; + *pui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | + (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); +} + + +/*! +******************************************************************************* + + @Function GetFilterFlags + + @Description Initialise and return filter flags + + @Input psHints : Apphints container + + @Return IMG_UINT32 : Filter flags + +******************************************************************************/ +static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints) +{ + IMG_UINT32 ui32FilterFlags = 0; + + ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; + if (psHints->ui32TruncateMode == 2) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; + } + else if (psHints->ui32TruncateMode == 3) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; + } + + return ui32FilterFlags; +} + + +/*! +******************************************************************************* + + @Function InittDeviceFlags + + @Description Initialise and return device flags + + @Input psHints : Apphints container + @Input pui32DeviceFlags : Pointer to device flags + + @Return void + +******************************************************************************/ +static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, + IMG_UINT32 *pui32DeviceFlags) +{ + IMG_UINT32 ui32DeviceFlags = 0; + + ui32DeviceFlags |= psHints->bGPUUnitsPowerChange ? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0; + ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; + ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); +#endif + + *pui32DeviceFlags = ui32DeviceFlags; +} + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) +/*! +******************************************************************************* + + @Function RGXTDProcessFWImage + + @Description Fetch and send data used by the trusted device to complete + the FW image setup + + @Input psDeviceNode : Device node + @Input psRGXFW : Firmware blob + @Input puFWParams : Parameters used by the FW at boot time + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, + OS_FW_IMAGE *psRGXFW, + RGX_FW_BOOT_PARAMS *puFWParams) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_TD_FW_PARAMS sTDFWParams; + RGX_LAYER_PARAMS sLayerParams; + PVRSRV_ERROR eError; + + if (psDevConfig->pfnTDSendFWImage == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + sLayerParams.psDevInfo = psDevInfo; + + sTDFWParams.pvFirmware = OSFirmwareData(psRGXFW); + sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + sTDFWParams.uFWP.sMeta.sFWCodeDevVAddr = puFWParams->sMeta.sFWCodeDevVAddr; + sTDFWParams.uFWP.sMeta.sFWDataDevVAddr = puFWParams->sMeta.sFWDataDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememCodeDevVAddr = puFWParams->sMeta.sFWCorememCodeDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememCodeFWAddr = puFWParams->sMeta.sFWCorememCodeFWAddr; + sTDFWParams.uFWP.sMeta.uiFWCorememCodeSize = puFWParams->sMeta.uiFWCorememCodeSize; + sTDFWParams.uFWP.sMeta.sFWCorememDataDevVAddr = puFWParams->sMeta.sFWCorememDataDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememDataFWAddr = puFWParams->sMeta.sFWCorememDataFWAddr; + sTDFWParams.uFWP.sMeta.ui32NumThreads = puFWParams->sMeta.ui32NumThreads; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + IMG_UINT32 i; + + sTDFWParams.uFWP.sMips.sGPURegAddr = puFWParams->sMips.sGPURegAddr; + sTDFWParams.uFWP.sMips.sFWStackAddr = puFWParams->sMips.sFWStackAddr; + sTDFWParams.uFWP.sMips.ui32FWPageTableLog2PageSize = puFWParams->sMips.ui32FWPageTableLog2PageSize; + sTDFWParams.uFWP.sMips.ui32FWPageTableNumPages = puFWParams->sMips.ui32FWPageTableNumPages; + + if (puFWParams->sMips.ui32FWPageTableNumPages > TD_MAX_NUM_MIPS_PAGETABLE_PAGES) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Number of page table pages %u greater " + "than what is allowed by the TD interface (%u), FW might " + "not work properly!", __func__, + puFWParams->sMips.ui32FWPageTableNumPages, + TD_MAX_NUM_MIPS_PAGETABLE_PAGES)); + } + + for (i = 0; i < MIN(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES, TD_MAX_NUM_MIPS_PAGETABLE_PAGES); i++) + { + sTDFWParams.uFWP.sMips.asFWPageTableAddr[i] = puFWParams->sMips.asFWPageTableAddr[i]; + } + } + + eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams); + + return eError; +} +#endif + +/*! +******************************************************************************* + + @Function RGXAcquireMipsBootldrData + + @Description Acquire MIPS bootloader data parameters + + @Input psDeviceNode : Device node + @Input puFWParams : FW boot parameters + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_FW_BOOT_PARAMS *puFWParams) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice; + IMG_DEV_PHYADDR sAddr; + PVRSRV_ERROR eError; + IMG_BOOL bValid; + + /* Rogue Registers physical address */ +#if defined(SUPPORT_ALT_REGBASE) + puFWParams->sMips.sGPURegAddr = psDeviceNode->psDevConfig->sAltRegsGpuPBase; +#else + PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], + 1, + &puFWParams->sMips.sGPURegAddr, + &(psDeviceNode->psDevConfig->sRegsCpuPBase)); +#endif + + /* MIPS Page Table physical address */ + MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sAddr); + + /* MIPS Page Table allocation is contiguous, only one address needs to be passed to the FW */ + puFWParams->sMips.ui32FWPageTableNumPages = 1U; + puFWParams->sMips.ui32FWPageTableLog2PageSize = + psDevInfo->psDeviceNode->psFirmwareMMUDevAttrs->ui32BaseAlign; + + puFWParams->sMips.asFWPageTableAddr[0U].uiAddr = sAddr.uiAddr; + puFWParams->sMips.asFWPageTableAddr[1U].uiAddr = 0ULL; + puFWParams->sMips.asFWPageTableAddr[2U].uiAddr = 0ULL; + puFWParams->sMips.asFWPageTableAddr[3U].uiAddr = 0ULL; + + /* MIPS Stack Pointer Physical Address */ + eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, + &puFWParams->sMips.sFWStackAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_STACK), + OSGetPageShift(), + 1, + &bValid); + + return eError; +} + +/*! +******************************************************************************* + + @Function InitFirmware + + @Description Allocate, initialise and pdump Firmware code and data memory + + @Input psDeviceNode : Device Node + @Input psHints : Apphints + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SRVINIT_APPHINTS *psHints) +{ + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + + /* FW code memory */ + IMG_DEVMEM_SIZE_T uiFWCodeAllocSize; + void *pvFWCodeHostAddr; + + /* FW data memory */ + IMG_DEVMEM_SIZE_T uiFWDataAllocSize; + void *pvFWDataHostAddr; + + /* FW coremem code memory */ + IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize; + void *pvFWCorememCodeHostAddr = NULL; + + /* FW coremem data memory */ + IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize; + void *pvFWCorememDataHostAddr = NULL; + + RGX_FW_BOOT_PARAMS uFWParams; + RGX_LAYER_PARAMS sLayerParams; + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + IMG_BOOL bUseSecureFWData = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR) || + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32); +#endif + + /* + * Get pointer to Firmware image + */ + pbRGXFirmware = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW); + if (!pbRGXFirmware) + { + /* Error or confirmation message generated in RGXLoadAndGetFWData */ + eError = PVRSRV_ERROR_INIT_FAILURE; + goto cleanup_initfw; + } + + sLayerParams.psDevInfo = psDevInfo; + + /* + * Allocate Firmware memory + */ + + eError = RGXGetFWImageAllocSize(&sLayerParams, + pbRGXFirmware, + OSFirmwareSize(psRGXFW), + &uiFWCodeAllocSize, + &uiFWDataAllocSize, + &uiFWCorememCodeAllocSize, + &uiFWCorememDataAllocSize); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXGetFWImageAllocSize failed", + __func__)); + goto cleanup_initfw; + } + + psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* Disable META core memory allocation unless the META DMA is available */ + if (!RGX_DEVICE_HAS_FEATURE(&sLayerParams, META_DMA)) + { + uiFWCorememCodeAllocSize = 0; + uiFWCorememDataAllocSize = 0; + } +#endif + + psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize; + + eError = RGXInitAllocFWImgMem(psDeviceNode, + uiFWCodeAllocSize, + uiFWDataAllocSize, + uiFWCorememCodeAllocSize, + uiFWCorememDataAllocSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXInitAllocFWImgMem failed (%d)", + __func__, + eError)); + goto cleanup_initfw; + } + + /* + * Acquire pointers to Firmware allocations + */ + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); + +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCodeHostAddr = NULL; +#endif + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) + { + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWDataHostAddr = NULL; + } + else +#endif + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); + } + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememCodeAllocSize) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data); + } +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCorememCodeHostAddr = NULL; +#endif + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) + { + pvFWCorememDataHostAddr = NULL; + } + else +#endif + if (uiFWCorememDataAllocSize) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode); + } + + /* + * Prepare FW boot parameters + */ + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXAcquireMipsBootldrData failed (%d)", + __func__, eError)); + goto release_fw_allocations; + } + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; + uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; + uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; + uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; + uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; + uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; + uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) + uFWParams.sMeta.ui32NumThreads = 2; +#else + uFWParams.sMeta.ui32NumThreads = 1; +#endif + } + else + { + uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; + uFWParams.sRISCV.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; + uFWParams.sRISCV.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; + + uFWParams.sRISCV.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; + uFWParams.sRISCV.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; + uFWParams.sRISCV.uiFWCorememDataSize = uiFWCorememDataAllocSize; + } + + + /* + * Process the Firmware image and setup code and data segments. + * + * When the trusted device is enabled and the FW code lives + * in secure memory we will only setup the data segments here, + * while the code segments will be loaded to secure memory + * by the trusted device. + */ + if (!psDeviceNode->bAutoVzFwIsUp) + { + eError = RGXProcessFWImage(&sLayerParams, + pbRGXFirmware, + pvFWCodeHostAddr, + pvFWDataHostAddr, + pvFWCorememCodeHostAddr, + pvFWCorememDataHostAddr, + &uFWParams); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXProcessFWImage failed (%d)", + __func__, eError)); + goto release_fw_allocations; + } + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams); +#endif + + + /* + * PDump Firmware allocations + */ + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware code image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc, + 0, + uiFWCodeAllocSize, + PDUMP_FLAGS_CONTINUOUS); +#endif + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData) +#endif + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware data image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc, + 0, + uiFWDataAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememCodeAllocSize) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem code image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc, + 0, + uiFWCorememCodeAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } +#endif + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData && uiFWCorememDataAllocSize) +#else + if (uiFWCorememDataAllocSize) +#endif + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem data store image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + 0, + uiFWCorememDataAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } + + /* + * Release Firmware allocations and clean up + */ + +release_fw_allocations: +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData && uiFWCorememDataAllocSize) +#else + if (uiFWCorememDataAllocSize) +#endif + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + } +release_corememcode: +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememCodeAllocSize) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + } +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) +release_data: +#endif +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData) +#endif + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); + } + +release_code: +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); +#endif +cleanup_initfw: + if (psRGXFW != NULL) + { + OSUnloadFirmware(psRGXFW); + } + + return eError; +} + + +#if defined(PDUMP) +/*! +******************************************************************************* + + @Function InitialiseHWPerfCounters + + @Description Initialisation of hardware performance counters and dumping + them out to pdump, so that they can be modified at a later + point. + + @Input pvDevice + @Input psHWPerfDataMemDesc + @Input psHWPerfInitDataInt + + @Return void + +******************************************************************************/ + +static void InitialiseHWPerfCounters(void *pvDevice, DEVMEM_MEMDESC *psHWPerfDataMemDesc, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt) +{ + RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData; + IMG_UINT32 ui32CntBlkModelLen; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc; + IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx; + RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; + + ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); + for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) + { + /* Exit early if this core does not have any of these counter blocks + * due to core type/BVNC features.... */ + psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx]; + if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE) + { + continue; + } + + /* Program all counters in one block so those already on may + * be configured off and vice-a-versa. */ + for (ui32BlockID = psBlkTypeDesc->ui32CntBlkIdBase; + ui32BlockID < psBlkTypeDesc->ui32CntBlkIdBase+sCntBlkRtInfo.ui32NumUnits; + ui32BlockID++) + { + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Unit %d Block : %s", + ui32BlockID-psBlkTypeDesc->ui32CntBlkIdBase, psBlkTypeDesc->pszBlockNameComment); + /* Get the block configure store to update from the global store of + * block configuration. This is used to remember the configuration + * between configurations and core power on in APM */ + psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt); + /* Assert to check for HWPerf block mis-configuration */ + PVR_ASSERT(psHWPerfInitBlkData); + + psHWPerfInitBlkData->bValid = IMG_TRUE; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bValid: This specifies if the layout block is valid for the given BVNC."); + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->bValid, + PDUMP_FLAGS_CONTINUOUS); + + psHWPerfInitBlkData->bEnabled = IMG_FALSE; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bEnabled: Set to 0x1 if the block needs to be enabled during playback."); + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->bEnabled, + PDUMP_FLAGS_CONTINUOUS); + + psHWPerfInitBlkData->eBlockID = ui32BlockID; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information."); + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->eBlockID, + PDUMP_FLAGS_CONTINUOUS); + + psHWPerfInitBlkData->uiCounterMask = 0x00; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiCounterMask: Bitmask for selecting the counters that need to be configured. (Bit 0 - counter0, bit 1 - counter1 and so on.)"); + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->uiCounterMask, + PDUMP_FLAGS_CONTINUOUS); + + for (ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->ui8NumCounters; ui32CounterIdx++) + { + psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment, ui32CounterIdx); + DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx], + PDUMP_FLAGS_CONTINUOUS); + + } + } + } +} +/*! +******************************************************************************* + + @Function InitialiseCustomCounters + + @Description Initialisation of custom counters and dumping them out to + pdump, so that they can be modified at a later point. + + @Input psHWPerfDataMemDesc + + @Return void + +******************************************************************************/ + +static void InitialiseCustomCounters(DEVMEM_MEMDESC *psHWPerfDataMemDesc) +{ + IMG_UINT32 ui32CustomBlock, ui32CounterID; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected"); + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask), + 0, + PDUMP_FLAGS_CONTINUOUS); + + for (ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++) + { + /* + * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock ); + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + uiOffsetOfCustomBlockSelectedCounters, + 0, + PDUMP_FLAGS_CONTINUOUS); + + for (ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ ) + { + const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID); + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + uiOffsetOfCustomBlockSelectedCounterIDs, + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } +} + +/*! +******************************************************************************* + + @Function InitialiseAllCounters + + @Description Initialise HWPerf and custom counters + + @Input psDeviceNode : Device Node + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + RGXFWIF_HWPERF_CTL *psHWPerfInitData; + PVRSRV_ERROR eError; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt); + + InitialiseHWPerfCounters(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData); + InitialiseCustomCounters(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + +failHWPerfCountersMemDescAqCpuVirt: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + + return eError; +} +#endif /* PDUMP */ + +/* + * _ParseHTBAppHints: + * + * Generate necessary references to the globally visible AppHints which are + * declared in the above #include "km_apphint_defs.h" + * Without these local references some compiler tool-chains will treat + * unreferenced declarations as fatal errors. This function duplicates the + * HTB_specific apphint references which are made in htbserver.c:HTBInit() + * However, it makes absolutely *NO* use of these hints. + */ +static void +_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + void *pvParamState = NULL; + IMG_UINT32 ui32LogType; + IMG_BOOL bAnyLogGroupConfigured; + IMG_UINT32 ui32BufferSize; + IMG_UINT32 ui32OpMode; + + /* Services initialisation parameters */ + pvParamState = SrvInitParamOpen(); + if (pvParamState == NULL) + return; + + SrvInitParamGetUINT32BitField(pvParamState, EnableHTBLogGroup, ui32LogType); + bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE; + SrvInitParamGetUINT32List(pvParamState, HTBOperationMode, ui32OpMode); + SrvInitParamGetUINT32(pvParamState, HTBufferSizeInKB, ui32BufferSize); + + SrvInitParamClose(pvParamState); +} + +/*! +******************************************************************************* + + @Function RGXInit + + @Description RGX Initialisation + + @Input psDeviceNode + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Services initialisation parameters */ + RGX_SRVINIT_APPHINTS sApphints = {0}; + IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags; + IMG_UINT32 ui32DeviceFlags; + + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + +#if defined(SUPPORT_AUTOVZ) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + const IMG_UINT32 ui32MtsDm0IntEnableReg = 0xB58; + + /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation + * and it provides a good method of determining if the firmware has been booted previously */ + psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32MtsDm0IntEnableReg) != 0); + + PVR_LOG(("AutoVz startup check: firmware is %s;", + (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down")); + } + else if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest assumes the firmware is always available */ + psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + } + else +#endif + { + /* Firmware does not follow the AutoVz life-cycle */ + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; + } + + /* Services initialisation parameters */ + _ParseHTBAppHints(psDeviceNode); + GetApphints(psDevInfo, &sApphints); + InitDeviceFlags(&sApphints, &ui32DeviceFlags); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + RGXVirtPopulateLMASubArenas(psDeviceNode, sApphints.aui32OSidMin, sApphints.aui32OSidMax, sApphints.bEnableTrustedDeviceAceConfig); +#endif + + eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)", + __func__, eError)); + goto cleanup; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = InitFirmware(psDeviceNode, &sApphints); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: InitFirmware failed (%d)", + __func__, eError)); + goto cleanup; + } + } + + /* + * Setup Firmware initialisation data + */ + + GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags); + + eError = RGXInitFirmware(psDeviceNode, + sApphints.bEnableSignatureChecks, + sApphints.ui32SignatureChecksBufSize, + sApphints.ui32HWPerfFWBufSize, + (IMG_UINT64)sApphints.ui32HWPerfFilter0 | + ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32), + 0, + NULL, + ui32FWConfigFlags, + sApphints.ui32LogType, + GetFilterFlags(&sApphints), + sApphints.ui32JonesDisableMask, + sApphints.ui32HWRDebugDumpLimit, + sizeof(RGXFWIF_HWPERF_CTL), +#if defined(SUPPORT_VALIDATION) + &sApphints.aui32TPUTrilinearFracMask[0], +#else + NULL, +#endif + sApphints.eRGXRDPowerIslandConf, + sApphints.eFirmwarePerf, + ui32FWConfigFlagsExt, + ui32FwOsCfgFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXInitFirmware failed (%d)", + __func__, + eError)); + goto cleanup; + } + +#if defined(PDUMP) + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = InitialiseAllCounters(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: InitialiseAllCounters failed (%d)", + __func__, eError)); + goto cleanup; + } + } +#endif + + /* + * Perform second stage of RGX initialisation + */ + eError = RGXInitDevPart2(psDeviceNode, + ui32DeviceFlags, + sApphints.ui32HWPerfHostBufSize, + sApphints.ui32HWPerfHostFilter, + sApphints.eRGXActivePMConf); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXInitDevPart2 failed (%d)", + __func__, eError)); + goto cleanup; + } + +#if defined(SUPPORT_VALIDATION) + PVRSRVAppHintDumpState(); +#endif + + eError = PVRSRV_OK; + +cleanup: + return eError; +} + +/****************************************************************************** + End of file (rgxsrvinit.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxstartstop.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxstartstop.c new file mode 100644 index 000000000000..dbbb9259e5d5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxstartstop.c @@ -0,0 +1,1248 @@ +/*************************************************************************/ /*! +@File +@Title Device specific start/stop routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific start/stop routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* The routines implemented here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when trusted device is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. */ +#include "rgxstartstop.h" + +#if defined(SUPPORT_SHARED_SLC) +#include "rgxapi_km.h" +#endif + +#include "rgxdevice.h" +#include "km/rgxdefs_km.h" + +#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING + + +/*! +******************************************************************************* + + @Function RGXEnableClocks + + @Description Enable RGX Clocks + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXEnableClocks(const void *hPrivate) +{ + RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)"); +} + +static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + + /* Issue a Write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ + + return eError; +} + +static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32* ui32RegValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + + /* Issue a Read */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + +#if !defined(NO_HARDWARE) + *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX); +#else + *ui32RegValue = 0xFFFFFFFF; +#endif + + return eError; +} + +static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate, + IMG_UINT32 ui32CoreReg, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 i = 0; + + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value); + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT); + + do + { + RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value); + } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000)); + + if (i == 1000) + { + RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout"); + return PVRSRV_ERROR_TIMEOUT; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate) +{ + PVRSRV_ERROR eError; + + /* Give privilege to debug and slave port */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); + + /* Point Meta to the bootloader address, global (uncached) range */ + eError = RGXWriteMetaCoreRegThoughSP(hPrivate, + PC_ACCESS(0), + RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT); + + if (eError != PVRSRV_OK) + { + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!"); + return eError; + } + + /* Enable minim encoding */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN); + + /* Enable Meta thread */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXInitMetaProcWrapper + + @Description Configures the hardware wrapper of the META processor + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitMetaProcWrapper(const void *hPrivate) +{ + IMG_UINT64 ui64GartenConfig; + + /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */ + + /* Garten IDLE bit controlled by META */ + ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; + + /* The fence addr is set at the fw init sequence */ + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + /* Set PC = 0 for fences */ + ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK; + ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV + << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT; + + } + else + { + /* Set PC = 0 for fences */ + ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK; + ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV + << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT; + + /* Set SLC DM=META */ + ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_BIFDM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT; + } + + RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig); +} + + +/*! +******************************************************************************* + + @Function RGXInitMipsProcWrapper + + @Description Configures the hardware wrapper of the MIPS processor + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitMipsProcWrapper(const void *hPrivate) +{ + IMG_DEV_PHYADDR sPhyAddr; + IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */ + + RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper"); + + /* + * MIPS wrapper (registers transaction ID and ISA mode) setup + */ + + RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register"); + + if (RGXGetDevicePhysBusWidth(hPrivate) > 32) + { + RGXWriteReg32(hPrivate, + RGX_CR_MIPS_WRAPPER_CONFIG, + (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >> + RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) | + RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); + } + else + { + RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr); + + RGXMIPSWrapperConfig(hPrivate, + RGX_CR_MIPS_WRAPPER_CONFIG, + sPhyAddr.uiAddr, + RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN, + RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); + } + + /* + * Boot remap setup + */ + + RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr); + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* Do not mark accesses to a FW code remap region as DRM accesses */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; +#endif + +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; + ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#endif + + RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers"); + RGXBootRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP1_CONFIG1, + RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP1_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + + if (RGX_DEVICE_HAS_BRN(hPrivate, 63553)) + { + IMG_BOOL bPhysBusAbove32Bit = RGXGetDevicePhysBusWidth(hPrivate) > 32; + IMG_BOOL bDevicePA0IsValid = RGXDevicePA0IsValid(hPrivate); + + /* WA always required on 36 bit cores, to avoid continuous unmapped memory accesses to address 0x0 */ + if (bPhysBusAbove32Bit || !bDevicePA0IsValid) + { + RGXCodeRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP5_CONFIG1, + 0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP5_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + } + } + + /* + * Data remap setup + */ + + RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr); + +#if defined(SUPPORT_TRUSTED_DEVICE) + if (RGXGetDevicePhysBusWidth(hPrivate) > 32) + { + /* Remapped private data in secure memory */ + ui64RemapSettings |= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN; + } + else + { + /* Remapped data in non-secure memory */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; + } +#endif + +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; +#endif + + RGXCommentLog(hPrivate, "RGXStart: Write data remap registers"); + RGXDataRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP2_CONFIG1, + RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP2_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + + /* + * Code remap setup + */ + + RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr); + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* Do not mark accesses to a FW code remap region as DRM accesses */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; +#endif + +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; + ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#endif + + RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers"); + RGXCodeRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP3_CONFIG1, + RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP3_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + + if (RGXGetDevicePhysBusWidth(hPrivate) == 32) + { + /* + * Trampoline remap setup + */ + + RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr); + ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE; + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* Remapped data in non-secure memory */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; +#endif + +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; +#endif + + RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers"); + RGXTrampolineRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP4_CONFIG1, + sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP4_CONFIG2, + RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, + ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + } + + /* Garten IDLE bit controlled by MIPS */ + RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); + + /* Turn on the EJTAG probe (only useful driver live) */ + RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0); +} + + +/*! +******************************************************************************* + + @Function RGXInitRiscvProcWrapper + + @Description Configures the hardware wrapper of the RISCV processor + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitRiscvProcWrapper(const void *hPrivate) +{ + IMG_DEV_VIRTADDR sTmp; + + RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); + + RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); + RGXAcquireBootCodeAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + RGXRISCVFW_BOOTLDR_CODE_REMAP, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_ALIGN) + << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_SHIFT | + TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_EN); + + RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); + RGXAcquireBootDataAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + RGXRISCVFW_BOOTLDR_DATA_REMAP, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_ALIGN) + << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_SHIFT | + TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_EN); + + /* Garten IDLE bit controlled by RISCV */ + RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); +} + + +/*! +******************************************************************************* + + @Function __RGXInitSLC + + @Description Initialise RGX SLC + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void __RGXInitSLC(const void *hPrivate) +{ + if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_CACHE_HIERARCHY)) + { + IMG_UINT32 ui32Reg; + IMG_UINT32 ui32RegVal; + + /* + * SLC control + */ + ui32Reg = RGX_CR_SLC3_CTRL_MISC; + ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH | + RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN; + RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); + + /* + * SLC scramble bits + */ + { + IMG_UINT32 i; + IMG_UINT32 ui32Count=0; + IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate); + IMG_UINT64 aui64ScrambleValues[4]; + IMG_UINT32 aui32ScrambleRegs[] = { + RGX_CR_SLC3_SCRAMBLE, + RGX_CR_SLC3_SCRAMBLE2, + RGX_CR_SLC3_SCRAMBLE3, + RGX_CR_SLC3_SCRAMBLE4 + }; + + if (2 == ui32SLCBanks) + { + aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a); + aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a); + aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566); + aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a); + ui32Count = 4; + } + else if (4 == ui32SLCBanks) + { + aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4); + aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372); + aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1); + aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478); + ui32Count = 4; + + } + else if (8 == ui32SLCBanks) + { + aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688); + aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33); + aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447); + ui32Count = 3; + } + + for (i = 0; i < ui32Count; i++) + { + IMG_UINT32 ui32Reg = aui32ScrambleRegs[i]; + IMG_UINT64 ui64Value = aui64ScrambleValues[i]; + RGXWriteReg64(hPrivate, ui32Reg, ui64Value); + } + } + + { + /* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */ + RGXCommentLog(hPrivate, "Disable forced SLC coherency"); + RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0); + } + } + else + { + IMG_UINT32 ui32Reg; + IMG_UINT32 ui32RegVal; + IMG_UINT64 ui64RegVal; + IMG_UINT32 ui32SLCSize = RGXGetDeviceSLCSize(hPrivate); + + /* + * SLC Bypass control + */ + ui32Reg = RGX_CR_SLC_CTRL_BYPASS; + ui64RegVal = 0; + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLCSIZE8) || + RGX_DEVICE_HAS_BRN(hPrivate, 61450)) + { + RGXCommentLog(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF"); + ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN | + (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN; + } + + if (ui32SLCSize < RGX_TPU_CACHED_SLC_SIZE_THRESHOLD) + { + /* Bypass SLC for textures if the SLC size is less than the threshold. */ + RGXCommentLog(hPrivate, "Bypass SLC for TPU"); + ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN; + } + + if (ui64RegVal != 0) + { + RGXReadModifyWriteReg64(hPrivate, ui32Reg, ui64RegVal, ~ui64RegVal); + } + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, XE_MEMORY_HIERARCHY) && (ui32SLCSize > RGX_TCU_CACHED_SLC_SIZE_THRESHOLD)) + { + /* The default for the TCU bit is 1 and we want to knock it down, + * which is not possible as part of the rmw above. So, repeat + * the rmw here OR'ing a value of 0. + */ + RGXReadModifyWriteReg64(hPrivate, RGX_CR_SLC_CTRL_BYPASS, 0, RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK); + + } + + + + /* + * SLC Misc control. + * + * Note: This is a 64bit register and we set only the lower 32bits leaving the top + * 32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default. + */ + ui32Reg = RGX_CR_SLC_CTRL_MISC; + ui32RegVal = (RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) | + RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1; + + if (RGX_DEVICE_HAS_BRN(hPrivate, 60084)) + { +#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING) + ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; +#else + if (RGX_DEVICE_HAS_ERN(hPrivate, 61389)) + { + ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; + } +#endif + } + /* Bypass burst combiner if SLC line size is smaller than 1024 bits */ + if (RGXGetDeviceCacheLineSize(hPrivate) < 1024) + { + ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN; + } + + RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); + } +} + + +/*! +******************************************************************************* + + @Function RGXInitBIF + + @Description Initialise RGX BIF + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitBIF(const void *hPrivate) +{ + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + IMG_DEV_PHYADDR sPCAddr; + + /* + * Acquire the address of the Kernel Page Catalogue. + */ + RGXAcquireKernelMMUPC(hPrivate, &sPCAddr); + + /* + * Write the kernel catalogue base. + */ + RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); + + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) + { + /* Write the cat-base address */ + RGXWriteKernelMMUPC64(hPrivate, + RGX_CR_BIF_CAT_BASE0, + RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT, + RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT, + ((sPCAddr.uiAddr + >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT) + << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT) + & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK); + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + /* Keep catbase registers in sync */ + RGXWriteKernelMMUPC64(hPrivate, + TMP_RGX_CR_FWCORE_MEM_CAT_BASE0, + TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, + TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, + ((sPCAddr.uiAddr + >> TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) + << TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) + & ~TMP_RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); + } + + /* + * Trusted Firmware boot + */ +#if defined(SUPPORT_TRUSTED_DEVICE) + RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); + RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); +#endif + } + else + { + IMG_UINT32 uiPCAddr; + uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT) + << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) + & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK); + + /* Set the mapping context */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ + + /* Write the cat-base address */ + RGXWriteKernelMMUPC32(hPrivate, + RGX_CR_MMU_CBASE_MAPPING, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, + uiPCAddr); + +#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) + /* Set-up different MMU ID mapping to the same PC used above */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ + + RGXWriteKernelMMUPC32(hPrivate, + RGX_CR_MMU_CBASE_MAPPING, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, + uiPCAddr); +#endif + } + } + else + { + /* + * Trusted Firmware boot + */ +#if defined(SUPPORT_TRUSTED_DEVICE) + RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); + RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); +#endif + } +} + + +/*! +******************************************************************************* + + @Function RGXAXIACELiteInit + + @Description Initialise AXI-ACE Lite interface + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXAXIACELiteInit(const void *hPrivate) +{ + IMG_UINT32 ui32RegAddr; + IMG_UINT64 ui64RegVal; + + ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION; + + /* Setup AXI-ACE config. Set everything to outer cache */ + ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) | + (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT); + + if (RGX_DEVICE_HAS_BRN(hPrivate, 42321)) + { + ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT); + } + + if (RGX_DEVICE_HAS_BRN(hPrivate, 68186)) + { + /* default value for reg_enable_fence_out is zero. Force to 1 to allow core_clk < mem_clk */ + ui64RegVal |= (IMG_UINT64)1 << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT; + } + +#if defined(SUPPORT_TRUSTED_DEVICE) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) + { + RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted"); + ui64RegVal |= IMG_UINT64_C(0xFC) + << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT; + } +#endif + + RGXCommentLog(hPrivate, "Init AXI-ACE interface"); + RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal); +} + + +PVRSRV_ERROR RGXStart(const void *hPrivate) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bDoFWSlaveBoot; + IMG_CHAR *pcRGXFW_PROCESSOR; + IMG_BOOL bMetaFW; + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; + bMetaFW = IMG_FALSE; + bDoFWSlaveBoot = IMG_FALSE; + } + else if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + bMetaFW = IMG_FALSE; + bDoFWSlaveBoot = IMG_FALSE; + } + else + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; + bMetaFW = IMG_TRUE; + bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); + } + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET)) + { + /* Disable the default sys_bus_secure protection to perform minimal setup */ + RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure"); + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ + } + +#if defined(SUPPORT_SHARED_SLC) + /* When the SLC is shared, the SLC reset is performed by the System layer when calling + * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid + * soft_resetting it here. + */ +#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN) + RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)"); +#else +#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL) +#endif + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + /* Set RGX in soft-reset */ + RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); + + RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); + + /* Take everything out of reset but the FW processor */ + RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); + + RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + } + else + { + /* Set RGX in soft-reset */ + RGXCommentLog(hPrivate, "RGXStart: soft reset everything"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + /* Take Rascal and Dust out of reset */ + RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + /* Take everything out of reset but the FW processor */ + RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + } + + /* Enable clocks */ + RGXEnableClocks(hPrivate); + + /* + * Initialise SLC. + */ +#if !defined(SUPPORT_SHARED_SLC) + __RGXInitSLC(hPrivate); +#endif + + if (bMetaFW) + { + if (bDoFWSlaveBoot) + { + /* Configure META to Slave boot */ + RGXCommentLog(hPrivate, "RGXStart: META Slave boot"); + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); + + } + else + { + /* Configure META to Master boot */ + RGXCommentLog(hPrivate, "RGXStart: META Master boot"); + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN); + } + } + + /* + * Initialise Firmware wrapper + */ + if (bMetaFW) + { + RGXInitMetaProcWrapper(hPrivate); + } + else if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + RGXInitRiscvProcWrapper(hPrivate); + } + else + { + RGXInitMipsProcWrapper(hPrivate); + } + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE)) + { + /* We must init the AXI-ACE interface before 1st BIF transaction */ + RGXAXIACELiteInit(hPrivate); + } + + /* + * Initialise BIF. + */ + RGXInitBIF(hPrivate); + + RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); + + /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */ + RGXWaitCycles(hPrivate, 32, 3); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + /* ... and afterwards */ + RGXWaitCycles(hPrivate, 32, 3); + + if (bMetaFW && bDoFWSlaveBoot) + { + eError = RGXFabricCoherencyTest(hPrivate); + if (eError != PVRSRV_OK) return eError; + + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start"); + eError = RGXStartFirmware(hPrivate); + if (eError != PVRSRV_OK) return eError; + } + else + { + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + RGXWriteReg32(hPrivate, TMP_RGX_CR_FWCORE_BOOT, 1); + RGXWaitCycles(hPrivate, 32, 3); + } + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) + RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure"); + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ +#endif + + return eError; +} + +PVRSRV_ERROR RGXStop(const void *hPrivate) +{ + IMG_BOOL bMipsFW = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); + IMG_BOOL bRiscvFW = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); + IMG_BOOL bMetaFW = !bMipsFW && !bRiscvFW; + PVRSRV_ERROR eError; + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + RGXDeviceIrqEventRx(hPrivate); + + /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */ + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SIDEKICK_IDLE, + RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN), + RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN)); + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); + } + + if (eError != PVRSRV_OK) return eError; + + +#if !defined(SUPPORT_SHARED_SLC) + /* Wait for SLC to signal IDLE */ + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC_IDLE, + RGX_CR_SLC_IDLE_MASKFULL, + RGX_CR_SLC_IDLE_MASKFULL); + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC3_IDLE, + RGX_CR_SLC3_IDLE_MASKFULL, + RGX_CR_SLC3_IDLE_MASKFULL); + } +#endif /* SUPPORT_SHARED_SLC */ + if (eError != PVRSRV_OK) return eError; + + + /* Unset MTS DM association with threads */ + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); + + +#if defined(PDUMP) + if (bMetaFW) + { + /* Disabling threads is only required for pdumps to stop the fw gracefully */ + + /* Disable thread 0 */ + eError = RGXWriteMetaRegThroughSP(hPrivate, + META_CR_T0ENABLE_OFFSET, + ~META_CR_TXENABLE_ENABLE_BIT); + if (eError != PVRSRV_OK) return eError; + + /* Disable thread 1 */ + eError = RGXWriteMetaRegThroughSP(hPrivate, + META_CR_T1ENABLE_OFFSET, + ~META_CR_TXENABLE_ENABLE_BIT); + if (eError != PVRSRV_OK) return eError; + + /* Clear down any irq raised by META (done after disabling the FW + * threads to avoid a race condition). + * This is only really needed for PDumps but we do it anyway driver-live. + */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ + + /* Wait for the Slave Port to finish all the transactions */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + } +#endif + + + /* Extra Idle checks */ + eError = RGXPollReg32(hPrivate, + RGX_CR_BIF_STATUS_MMU, + 0, + RGX_CR_BIF_STATUS_MMU_MASKFULL); + if (eError != PVRSRV_OK) return eError; + + eError = RGXPollReg32(hPrivate, + RGX_CR_BIFPM_STATUS_MMU, + 0, + RGX_CR_BIFPM_STATUS_MMU_MASKFULL); + if (eError != PVRSRV_OK) return eError; + + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) && + !RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_BIF_READS_EXT_STATUS, + 0, + RGX_CR_BIF_READS_EXT_STATUS_MASKFULL); + if (eError != PVRSRV_OK) return eError; + } + + + eError = RGXPollReg32(hPrivate, + RGX_CR_BIFPM_READS_EXT_STATUS, + 0, + RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL); + if (eError != PVRSRV_OK) return eError; + + { + IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL; + eError = RGXPollReg64(hPrivate, + RGX_CR_SLC_STATUS1, + 0, + ui64SLCMask); + if (eError != PVRSRV_OK) return eError; + } + + if (4 == RGXGetDeviceSLCBanks(hPrivate)) + { + eError = RGXPollReg64(hPrivate, + RGX_CR_SLC_STATUS2, + 0, + RGX_CR_SLC_STATUS2_MASKFULL); + if (eError != PVRSRV_OK) return eError; + } + +#if !defined(SUPPORT_SHARED_SLC) + /* Wait for SLC to signal IDLE */ + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC_IDLE, + RGX_CR_SLC_IDLE_MASKFULL, + RGX_CR_SLC_IDLE_MASKFULL); + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC3_IDLE, + RGX_CR_SLC3_IDLE_MASKFULL, + RGX_CR_SLC3_IDLE_MASKFULL); + } +#endif /* SUPPORT_SHARED_SLC */ + if (eError != PVRSRV_OK) return eError; + + + /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */ + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SIDEKICK_IDLE, + RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN), + RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN)); + } + else + { + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, FASTRENDER_DM)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); + } + } + + if (eError != PVRSRV_OK) return eError; + + + if (bMetaFW) + { + IMG_UINT32 ui32RegValue; + + eError = RGXReadMetaRegThroughSP(hPrivate, + META_CR_TxVECINT_BHALT, + &ui32RegValue); + if (eError != PVRSRV_OK) return eError; + + if ((ui32RegValue & 0xFFFFFFFFU) == 0x0) + { + /* Wait for Sidekick/Jones to signal IDLE including + * the Garten Wrapper if there is no debugger attached + * (TxVECINT_BHALT = 0x0) */ + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SIDEKICK_IDLE, + RGX_CR_SIDEKICK_IDLE_GARTEN_EN, + RGX_CR_SIDEKICK_IDLE_GARTEN_EN); + if (eError != PVRSRV_OK) return eError; + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + RGX_CR_JONES_IDLE_GARTEN_EN, + RGX_CR_JONES_IDLE_GARTEN_EN); + if (eError != PVRSRV_OK) return eError; + } + } + } + else + { + if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) + { + /* As FW has been separated from SIDEKICK to the new MARS domain, + * checking for CPU & System Arbiter idle bits( SOCIF will never be Idle if + * Host polling on this register) */ + eError = RGXPollReg32(hPrivate, + RGX_CR_MARS_IDLE, + RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN, + RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN); + if (eError != PVRSRV_OK) return eError; + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_SIDEKICK_IDLE, + RGX_CR_SIDEKICK_IDLE_GARTEN_EN, + RGX_CR_SIDEKICK_IDLE_GARTEN_EN); + if (eError != PVRSRV_OK) return eError; + } + } + + return eError; +} + + +/* + * RGXInitSLC + */ +#if defined(SUPPORT_SHARED_SLC) +PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo; + void *pvPowerParams; + + if (psDeviceNode == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + psDevInfo = psDeviceNode->pvDevice; + pvPowerParams = &psDevInfo->sLayerParams; + + /* reset the SLC */ + RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC"); + RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET); + + /* Take everything out of reset */ + RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0); + + __RGXInitSLC(pvPowerParams); + + return PVRSRV_OK; +} +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsyncutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsyncutils.c new file mode 100644 index 000000000000..a2f066ad8352 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxsyncutils.c @@ -0,0 +1,175 @@ +/*************************************************************************/ /*! +@File rgxsyncutils.c +@Title RGX Sync Utilities +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Sync helper functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "rgxsyncutils.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "allocmem.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +//#define TA3D_CHECKPOINT_DEBUG + +#if defined(TA3D_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +static +void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues, + IMG_UINT32 ui32Count) +{ + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; + + for (iii = 0; iii < ui32Count; iii++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } +} +#else +#define CHKPT_DBG(X) +#endif + + +PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, + SYNC_ADDR_LIST *psSyncList, + SYNC_ADDR_LIST *psPRSyncList, + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, + RGX_SYNC_DATA *psSyncData, + IMG_BOOL bKick3D) +{ + IMG_UINT32 *pui32TimelineUpdateWOff = NULL; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + + IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount; + + /* Space for original client updates, and the one new update */ + size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1); + + if (!bKick3D) + { + /* Additional space for one PR update, only the newest one */ + uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)", __func__, \ + (void*)pui32IntAllocatedUpdateValues)); + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize); + pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues; + + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)", __func__, \ + ui32ClientUpdateValueCount, bKick3D ? "TA/3D" : "TA/PR", (void*)pui32IntAllocatedUpdateValues)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount); +#endif + + pui32TimelineUpdateWOff += ui32ClientUpdateValueCount; + } + + /* Now set the additional update value and append the timeline sync prim addr to either the + * render context 3D (or TA) update list + */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", __func__, \ + ui32FenceTimelineUpdateValue, bKick3D ? "TA/3D" : "TA/PR")); + + /* Append the TA/3D update */ + { + *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; + psSyncData->ui32ClientUpdateValueCount++; + psSyncData->ui32ClientUpdateCount++; + SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync); + + if (!psSyncData->pauiClientUpdateUFOAddress) + { + psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs; + } + /* Update paui32ClientUpdateValue to point to our new list of update values */ + psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount); +#endif + } + + if (!bKick3D) + { + /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */ + *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; + psSyncData->ui32ClientPRUpdateValueCount = 1; + psSyncData->ui32ClientPRUpdateCount = 1; + SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync); + + if (!psSyncData->pauiClientPRUpdateUFOAddress) + { + psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs; + } + /* Update paui32ClientPRUpdateValue to point to our new list of update values */ + psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount]; + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount); +#endif + } + + /* Do not free the old psSyncData->ui32ClientUpdateValueCount, + * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */ + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.c new file mode 100644 index 000000000000..31ba5c12a9dd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.c @@ -0,0 +1,5420 @@ +/*************************************************************************/ /*! +@File +@Title RGX TA/3D routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX TA/3D routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +/* for the offsetof macro */ +#if defined(LINUX) +#include +#else +#include +#endif + +#include "pdump_km.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxta3d.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "ri_server.h" +#include "osfunc.h" +#include "pvrsrv.h" +#include "rgx_memallocflags.h" +#include "rgxccb.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "rgxsyncutils.h" +#include "htbuffer.h" + +#include "rgxdefs_km.h" +#include "rgx_fwif_km.h" +#include "physmem.h" +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "process_stats.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" + +#define HASH_CLEAN_LIMIT 6 +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TA3D_UFO_DUMP 0 + +//#define TA3D_CHECKPOINT_DEBUG + +#if defined(TA3D_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +static INLINE +void _DebugSyncValues(const IMG_CHAR *pszFunction, + const IMG_UINT32 *pui32UpdateValues, + const IMG_UINT32 ui32Count) +{ + IMG_UINT32 i; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; + + for (i = 0; i < ui32Count; i++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } +} + +static INLINE +void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction, + const IMG_CHAR *pszDMName, + const PSYNC_CHECKPOINT *apsSyncCheckpoints, + const IMG_UINT32 ui32Count) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32Count; i++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i))); + } +} + +#else +#define CHKPT_DBG(X) +#endif + +/* define the number of commands required to be set up by the CCB helper */ +/* 1 command for the TA */ +#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1 +/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */ +#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3 + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui64CyclesPrediction) +#else +#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST) +#endif + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_RC_TA_DATA; + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_RC_3D_DATA; + +struct _RGX_SERVER_RENDER_CONTEXT_ { + /* this lock protects usage of the render context. + * it ensures only one kick is being prepared and/or submitted on + * this render context at any time + */ + POS_LOCK hLock; + RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS]; + RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS]; + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWRenderContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + RGX_SERVER_RC_TA_DATA sTAData; + RGX_SERVER_RC_3D_DATA s3DData; + IMG_UINT32 ui32CleanupStatus; +#define RC_CLEANUP_TA_COMPLETE (1 << 0) +#define RC_CLEANUP_3D_COMPLETE (1 << 1) + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListTAFence; + SYNC_ADDR_LIST sSyncAddrListTAUpdate; + SYNC_ADDR_LIST sSyncAddrList3DFence; + SYNC_ADDR_LIST sSyncAddrList3DUpdate; + ATOMIC_T hIntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +}; + + +/* + Static functions used by render context code +*/ + +static +PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + psTAData->psServerCommonContext, + RGXFWIF_DM_GEOM, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ +#if defined(DEBUG) + /* Log the number of TA context stores which occurred */ + { + RGXFWIF_TACTX_STATE *psFWTAState; + + eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc, + (void**)&psFWTAState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context state (%s)", + __func__, PVRSRVGetErrorString(eError))); + } + else + { + /* Release the CPU virt addr */ + DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc); + } + } +#endif + FWCommonContextFree(psTAData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc); + psTAData->psServerCommonContext = NULL; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + ps3DData->psServerCommonContext, + RGXFWIF_DM_3D, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ +#if defined(DEBUG) + /* Log the number of 3D context stores which occurred */ + { + RGXFWIF_3DCTX_STATE *psFW3DState; + + eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc, + (void**)&psFW3DState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context state (%s)", + __func__, PVRSRVGetErrorString(eError))); + } + else + { + /* Release the CPU virt addr */ + DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc); + } + } +#endif + + FWCommonContextFree(ps3DData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc); + ps3DData->psServerCommonContext = NULL; + return PVRSRV_OK; +} + +static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) +{ + RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + PVRSRV_ERROR eError; + + eError = PMRDumpPageList(psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Error (%s) printing pmr %p", + PVRSRVGetErrorString(eError), + psPMRNode->psPMR)); + } +} + +IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList) +{ + DLLIST_NODE *psNode, *psNext; + + PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx, + psFreeList->sFreeListFWDevVAddr.ui32Addr, + psFreeList->ui32FreelistID, + psFreeList->ui64FreelistChecksum)); + + /* Dump Init FreeList page list */ + PVR_LOG((" Initial Memory block")); + dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) + { + _RGXDumpPMRPageList(psNode); + } + + /* Dump Grow FreeList page list */ + PVR_LOG((" Grow Memory blocks")); + dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) + { + _RGXDumpPMRPageList(psNode); + } + + return IMG_TRUE; +} + +static void _CheckFreelist(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumOfPagesToCheck, + IMG_UINT64 ui64ExpectedCheckSum, + IMG_UINT64 *pui64CalculatedCheckSum) +{ +#if defined(NO_HARDWARE) + /* No checksum needed as we have all information in the pdumps */ + PVR_UNREFERENCED_PARAMETER(psFreeList); + PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck); + PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum); + *pui64CalculatedCheckSum = 0; +#else + PVRSRV_ERROR eError; + size_t uiNumBytes; + IMG_UINT8* pui8Buffer; + IMG_UINT32* pui32Buffer; + IMG_UINT32 ui32CheckSumAdd = 0; + IMG_UINT32 ui32CheckSumXor = 0; + IMG_UINT32 ui32Entry; + IMG_UINT32 ui32Entry2; + IMG_BOOL bFreelistBad = IMG_FALSE; + + *pui64CalculatedCheckSum = 0; + + PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); + + /* Allocate Buffer of the size of the freelist */ + pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); + if (pui8Buffer == NULL) + { + PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!", + __func__, psFreeList)); + PVR_ASSERT(0); + return; + } + + /* Copy freelist content into Buffer */ + eError = PMR_ReadBytes(psFreeList->psFreeListPMR, + psFreeList->uiFreeListPMROffset + + (((psFreeList->ui32MaxFLPages - + psFreeList->ui32CurrentFLPages - + psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)), + pui8Buffer, + ui32NumOfPagesToCheck * sizeof(IMG_UINT32), + &uiNumBytes); + if (eError != PVRSRV_OK) + { + OSFreeMem(pui8Buffer); + PVR_LOG(("%s: Failed to get freelist data for freelist %p!", + __func__, psFreeList)); + PVR_ASSERT(0); + return; + } + + PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); + + /* Generate checksum (skipping the first page if not allocated) */ + pui32Buffer = (IMG_UINT32 *)pui8Buffer; + ui32Entry = ((psFreeList->ui32GrowFLPages == 0 && psFreeList->ui32CurrentFLPages > 1) ? 1 : 0); + for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++) + { + ui32CheckSumAdd += pui32Buffer[ui32Entry]; + ui32CheckSumXor ^= pui32Buffer[ui32Entry]; + + /* Check for double entries */ + for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++) + { + if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]) + { + PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d", + __func__, + psFreeList->sFreeListFWDevVAddr.ui32Addr, + pui32Buffer[ui32Entry2], + ui32Entry, + ui32Entry2, + psFreeList->ui32CurrentFLPages)); + bFreelistBad = IMG_TRUE; + break; + } + } + } + + OSFreeMem(pui8Buffer); + + /* Check the calculated checksum against the expected checksum... */ + *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd; + + if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum) + { + PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx, + __func__, psFreeList, + ui64ExpectedCheckSum, *pui64CalculatedCheckSum)); + bFreelistBad = IMG_TRUE; + } + + if (bFreelistBad) + { + PVR_LOG(("%s: Sleeping for ever!", __func__)); + PVR_ASSERT(!bFreelistBad); + } +#endif +} + + +/* + * Function to work out the number of freelist pages to reserve for growing + * within the FW without having to wait for the host to progress a grow + * request. + * + * The number of pages must be a multiple of 4 to align the PM addresses + * for the initial freelist allocation and also be less than the grow size. + * + * If the threshold or grow size means less than 4 pages, then the feature + * is not used. + */ +static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32FLPages) +{ + IMG_UINT32 ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) & + ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1); + + if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages) + { + ui32ReadyFLPages = psFreeList->ui32GrowFLPages; + } + + return ui32ReadyFLPages; +} + + +PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumPages, + PDLLIST_NODE pListHeader) +{ + RGX_PMR_NODE *psPMRNode; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32MappingTable = 0; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiLength; + IMG_DEVMEM_SIZE_T uistartPage; + PVRSRV_ERROR eError; + static const IMG_CHAR szAllocName[] = "Free List"; + + /* Are we allowed to grow ? */ + if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages) + { + PVR_DPF((PVR_DBG_WARNING, + "Freelist [0x%p]: grow by %u pages denied. " + "Max PB size reached (current pages %u+%u/%u)", + psFreeList, + ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32ReadyFLPages, + psFreeList->ui32MaxFLPages)); + return PVRSRV_ERROR_PBSIZE_ALREADY_MAX; + } + + /* Allocate kernel memory block structure */ + psPMRNode = OSAllocMem(sizeof(*psPMRNode)); + if (psPMRNode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate host data structure", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocHost; + } + + /* + * Lock protects simultaneous manipulation of: + * - the memory block list + * - the freelist's ui32CurrentFLPages + */ + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + + + /* + * The PM never takes the last page in a freelist, so if this block + * of pages is the first one and there is no ability to grow, then + * we can skip allocating one 4K page for the lowest entry. + */ + if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + { + /* + * Allocation size will be rounded up to the OS page size, + * any attempt to change it a bit now will be invalidated later. + */ + psPMRNode->bFirstPageMissing = IMG_FALSE; + } + else + { + psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0 && ui32NumPages > 1); + } + + psPMRNode->ui32NumPages = ui32NumPages; + psPMRNode->psFreeList = psFreeList; + + /* Allocate Memory Block */ + PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages); + uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE; + if (psPMRNode->bFirstPageMissing) + { + uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE; + } + eError = PhysmemNewRamBackedPMR(NULL, + psFreeList->psDevInfo->psDeviceNode, + uiSize, + uiSize, + 1, + 1, + &ui32MappingTable, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVRSRV_MEMALLOCFLAG_GPU_READABLE, + sizeof(szAllocName), + szAllocName, + psFreeList->ownerPid, + &psPMRNode->psPMR, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX, + __func__, + (IMG_UINT64)uiSize)); + goto ErrorBlockAlloc; + } + + /* Zeroing physical pages pointed by the PMR */ + if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) + { + eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to zero PMR %p of freelist %p (%s)", + __func__, + psPMRNode->psPMR, + psFreeList, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(0); + } + } + + uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); + uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); + uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + + eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR, + psFreeList->ownerPid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: call to RIWritePMREntryWithOwnerKM failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + + /* Attach RI information */ + eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR, + OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), + szAllocName, + 0, + uiSize, + IMG_FALSE, + IMG_FALSE, + &psPMRNode->hRIHandle); + PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM"); + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + /* write Freelist with Memory Block physical addresses */ + eError = PMRWritePMPageList( + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to write pages of Node %p", + __func__, + psPMRNode)); + goto ErrorPopulateFreelist; + } + +#if defined(SUPPORT_SHADOW_FREELISTS) + /* Copy freelist memory to shadow freelist */ + { + const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof(IMG_UINT32); + const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2; + const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset; + IMG_BYTE *pFLMapAddr; + size_t uiNumBytes; + PVRSRV_ERROR res; + IMG_HANDLE hMapHandle; + + /* Map both the FL and the shadow FL */ + res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize, + (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); + if (res != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map freelist (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + goto ErrorPopulateFreelist; + } + + /* Copy only the newly added memory */ + memcpy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength); + +#if defined(PDUMP) + PDUMPCOMMENT("Initialize shadow freelist"); + + /* Translate memcpy to pdump */ + { + IMG_DEVMEM_OFFSET_T uiCurrOffset; + + for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32)) + { + PMRPDumpCopyMem32(psFreeList->psFreeListPMR, + uiCurrOffset + ui32FLMaxSize, + psFreeList->psFreeListPMR, + uiCurrOffset, + ":SYSMEM:$1", + PDUMP_FLAGS_CONTINUOUS); + } + } +#endif + + + res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle); + + if (res != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to release freelist mapping (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + goto ErrorPopulateFreelist; + } + } +#endif + + /* We add It must be added to the tail, otherwise the freelist population won't work */ + dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock); + + /* Update number of available pages */ + psFreeList->ui32CurrentFLPages += ui32NumPages; + + /* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */ + if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages) + { + psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages; + } + + /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */ + psFreeList->ui32ReadyFLPages = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages); + psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; + + if (psFreeList->bCheckFreelist) + { + /* + * We can only calculate the freelist checksum when the list is full + * (e.g. at initial creation time). At other times the checksum cannot + * be calculated and has to be disabled for this freelist. + */ + if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages) + { + _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum); + } + else + { + psFreeList->ui64FreelistChecksum = 0; + } + } + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + PVR_DPF((PVR_DBG_MESSAGE, + "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)", + psFreeList, + ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"), + ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32ReadyFLPages, + psFreeList->ui32MaxFLPages, + psFreeList->ui64FreelistChecksum, + (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : ""))); + + return PVRSRV_OK; + + /* Error handling */ +ErrorPopulateFreelist: + PMRUnrefPMR(psPMRNode->psPMR); + +ErrorBlockAlloc: + OSFreeMem(psPMRNode); + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + +ErrorAllocHost: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; + +} + +static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, + RGX_FREELIST *psFreeList) +{ + DLLIST_NODE *psNode; + RGX_PMR_NODE *psPMRNode; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32OldValue; + + /* + * Lock protects simultaneous manipulation of: + * - the memory block list + * - the freelist's ui32CurrentFLPages value + */ + PVR_ASSERT(pListHeader); + PVR_ASSERT(psFreeList); + PVR_ASSERT(psFreeList->psDevInfo); + PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList); + + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + + /* Get node from head of list and remove it */ + psNode = dllist_get_next_node(pListHeader); + if (psNode) + { + dllist_remove_node(psNode); + + psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + PVR_ASSERT(psPMRNode); + PVR_ASSERT(psPMRNode->psPMR); + PVR_ASSERT(psPMRNode->psFreeList); + + /* remove block from freelist list */ + + /* Unwrite Freelist with Memory Block physical addresses */ + eError = PMRUnwritePMPageList(psPMRNode->psPageList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to unwrite pages of Node %p", + __func__, + psPMRNode)); + PVR_ASSERT(IMG_FALSE); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + + if (psPMRNode->hRIHandle) + { + PVRSRV_ERROR eError; + + eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle); + PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM"); + } + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + /* Free PMR (We should be the only one that holds a ref on the PMR) */ + eError = PMRUnrefPMR(psPMRNode->psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to free PB block %p (%s)", + __func__, + psPMRNode->psPMR, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(IMG_FALSE); + } + + /* update available pages in freelist */ + ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; + + /* + * Deallocated pages should first be deducted from ReadyPages bank, once + * there are no more left, start deducting them from CurrentPage bank. + */ + if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages) + { + psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages; + psFreeList->ui32ReadyFLPages = 0; + } + else + { + psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages; + } + + /* check underflow */ + PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)", + psFreeList, + psPMRNode->ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32MaxFLPages)); + + OSFreeMem(psPMRNode); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)", + psFreeList, + psFreeList->ui32InitFLPages)); + eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN; + } + + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + return eError; +} + +static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID) +{ + DLLIST_NODE *psNode, *psNext; + RGX_FREELIST *psFreeList = NULL; + + OSLockAcquire(psDevInfo->hLockFreeList); + + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + + if (psThisFreeList->ui32FreelistID == ui32FreelistID) + { + psFreeList = psThisFreeList; + break; + } + } + + OSLockRelease(psDevInfo->hLockFreeList); + return psFreeList; +} + +void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistID) +{ + RGX_FREELIST *psFreeList = NULL; + RGXFWIF_KCCB_CMD s3DCCBCmd; + IMG_UINT32 ui32GrowValue; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + psFreeList = FindFreeList(psDevInfo, ui32FreelistID); + + if (psFreeList) + { + /* Since the FW made the request, it has already consumed the ready pages, update the host struct */ + psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; + psFreeList->ui32ReadyFLPages = 0; + + /* Try to grow the freelist */ + eError = RGXGrowFreeList(psFreeList, + psFreeList->ui32GrowFLPages, + &psFreeList->sMemoryBlockHead); + + if (eError == PVRSRV_OK) + { + /* Grow successful, return size of grow size */ + ui32GrowValue = psFreeList->ui32GrowFLPages; + + psFreeList->ui32NumGrowReqByFW++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* Update Stats */ + PVRSRVStatsUpdateFreelistStats(0, + 1, /* Add 1 to the appropriate counter (Requests by FW) */ + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); + +#endif + + } + else + { + /* Grow failed */ + ui32GrowValue = 0; + PVR_DPF((PVR_DBG_ERROR, + "Grow for FreeList %p failed (%s)", + psFreeList, + PVRSRVGetErrorString(eError))); + } + + /* send feedback */ + s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE; + s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; + + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_3D, + &s3DCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + /* Kernel CCB should never fill up, as the FW is processing them right away */ + + PVR_ASSERT(eError == PVRSRV_OK); + } + else + { + /* Should never happen */ + PVR_DPF((PVR_DBG_ERROR, + "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", + ui32FreelistID)); + PVR_ASSERT(IMG_FALSE); + } +} + +static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) +{ + + PVRSRV_RGXDEV_INFO *psDevInfo; + RGX_FREELIST *psFreeList; + RGX_PMR_NODE *psPMRNode; + PVRSRV_ERROR eError; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiLength; + IMG_UINT32 ui32StartPage; + + psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + psFreeList = psPMRNode->psFreeList; + PVR_ASSERT(psFreeList); + psDevInfo = psFreeList->psDevInfo; + PVR_ASSERT(psDevInfo); + + uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); + ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); + uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); + + PMRUnwritePMPageList(psPMRNode->psPageList); + psPMRNode->psPageList = NULL; + eError = PMRWritePMPageList( + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error (%s) writing FL 0x%08x", + __func__, + PVRSRVGetErrorString(eError), + (IMG_UINT32)psFreeList->ui32FreelistID)); + } + + /* Zeroing physical pages pointed by the reconstructed freelist */ + if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) + { + eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to zero PMR %p of freelist %p (%s)", + __func__, + psPMRNode->psPMR, + psFreeList, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(0); + } + } + + + psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages; +} + + +static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) +{ + IMG_UINT32 ui32OriginalFLPages; + DLLIST_NODE *psNode, *psNext; + RGXFWIF_FREELIST *psFWFreeList; + PVRSRV_ERROR eError; + + //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID)); + + /* Do the FreeList Reconstruction */ + ui32OriginalFLPages = psFreeList->ui32CurrentFLPages; + psFreeList->ui32CurrentFLPages = 0; + + /* Reconstructing Init FreeList pages */ + dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) + { + _RGXFreeListReconstruction(psNode); + } + + /* Reconstructing Grow FreeList pages */ + dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) + { + _RGXFreeListReconstruction(psNode); + } + + /* Ready pages are allocated but kept hidden until OOM occurs. */ + psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; + if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages) + { + PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages); + return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED; + } + + /* Reset the firmware freelist structure */ + eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + if (eError != PVRSRV_OK) + { + return eError; + } + + psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->ui32AllocatedPageCount = 0; + psFWFreeList->ui32AllocatedMMUPageCount = 0; + + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + + /* Check the Freelist checksum if required (as the list is fully populated) */ + if (psFreeList->bCheckFreelist) + { + IMG_UINT64 ui64CheckSum; + + _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); + } + + return eError; +} + + +void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistsCount, + IMG_UINT32 *paui32Freelists) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32Loop; + RGXFWIF_KCCB_CMD sTACCBCmd; +#if !defined(SUPPORT_SHADOW_FREELISTS) + DLLIST_NODE *psNodeHWRTData, *psNextHWRTData; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; + RGXFWIF_HWRTDATA *psHWRTData; +#endif + IMG_UINT32 ui32FinalFreelistsCount = 0; + IMG_UINT32 aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */ + + PVR_ASSERT(psDevInfo != NULL); + PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT); + if (ui32FreelistsCount > RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT) + { + ui32FreelistsCount = RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT; + } + + //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount)); + + /* + * Initialise the response command (in case we don't find a freelist ID). + * Also copy the list to the 'final' freelist array. + */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE; + sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount; + + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] | + RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; + aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop]; + } + + ui32FinalFreelistsCount = ui32FreelistsCount; + + /* + * The list of freelists we have been given for reconstruction will + * consist of local and global freelists (maybe MMU as well). Any + * local freelists should have their global list specified as well. + * There may be cases where the global freelist is not given (in + * cases of partial setups before a poll failure for example). To + * handle that we must first ensure every local freelist has a global + * freelist specified, otherwise we add that to the 'final' list. + * This final list of freelists is created in a first pass. + * + * Even with the global freelists listed, there may be other local + * freelists not listed, which are going to have their global freelist + * reconstructed. Therefore we have to find those freelists as well + * meaning we will have to iterate the entire list of freelists to + * find which must be reconstructed. This is the second pass. + */ + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + IMG_BOOL bInList = IMG_FALSE; + IMG_BOOL bGlobalInList = IMG_FALSE; + + /* Check if this local freelist is in the list and ensure its global is too. */ + if (psFreeList->ui32FreelistGlobalID != 0) + { + for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + { + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID) + { + bInList = IMG_TRUE; + } + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + { + bGlobalInList = IMG_TRUE; + } + } + + if (bInList && !bGlobalInList) + { + aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID; + ui32FinalFreelistsCount++; + } + } + } + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + IMG_BOOL bReconstruct = IMG_FALSE; + + /* + * Check if this freelist needs to be reconstructed (was it requested + * or is its global freelist going to be reconstructed)... + */ + for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + { + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID || + aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + { + bReconstruct = IMG_TRUE; + break; + } + } + + if (bReconstruct) + { + eError = RGXReconstructFreeList(psFreeList); + if (eError == PVRSRV_OK) + { +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* Mark all HWRTData's of reconstructing local freelists as HWR (applies to TA/3D's not finished yet) */ + dllist_foreach_node(&psFreeList->sNodeHWRTDataHead, psNodeHWRTData, psNextHWRTData) + { + psKMHWRTDataSet = IMG_CONTAINER_OF(psNodeHWRTData, RGX_KM_HW_RT_DATASET, sNodeHWRTData); + eError = DevmemAcquireCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc, (void **)&psHWRTData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Devmem AcquireCpuVirtAddr Failed during Reconstructing of FreeList, FwMemDesc(%p),psHWRTData(%p)", + psKMHWRTDataSet->psHWRTDataFwMemDesc, + psHWRTData)); + continue; + } + + psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR; + psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA; + + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); + } +#endif + + /* Update the response for this freelist if it was specifically requested for reconstruction. */ + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID) + { + /* Reconstruction of this requested freelist was successful... */ + sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; + break; + } + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "Reconstructing of FreeList %p failed (%s)", + psFreeList, + PVRSRVGetErrorString(eError))); + } + } + } + OSLockRelease(psDevInfo->hLockFreeList); + + /* Check that all freelists were found and reconstructed... */ + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] & + RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0); + } + + /* send feedback */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); +} + +/* Create a single HWRTData instance */ +static PVRSRV_ERROR RGXCreateHWRTData_aux( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR psPMMListDevVAddr, /* per-HWRTData */ + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64MultiSampleCtl, + IMG_UINT64 ui64FlippedMultiSampleCtl, + IMG_UINT32 ui32TPCStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32MTileStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr, /* per-HWRTData */ + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr, /* per-HWRTData */ + IMG_DEV_VIRTADDR sRTCDevVAddr, + IMG_UINT64 uiRgnHeaderSize, + IMG_UINT32 ui32ISPMtileSize, + IMG_UINT16 ui16MaxRTs, + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32Loop; + + /* KM cookie storing all the FW/HW data */ + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; + + /* local pointers for memory descriptors of FW allocations */ + DEVMEM_MEMDESC *psHWRTDataFwMemDesc = NULL; + DEVMEM_MEMDESC *psRTArrayFwMemDesc = NULL; + DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc = NULL; + + /* local pointer for CPU-mapped [FW]HWRTData */ + RGXFWIF_HWRTDATA *psHWRTData = NULL; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* Prepare the HW RT DataSet struct */ + psKMHWRTDataSet = OSAllocZMem(sizeof(*psKMHWRTDataSet)); + if (psKMHWRTDataSet == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto AllocError; + } + + *ppsKMHWRTDataSet = psKMHWRTDataSet; + psKMHWRTDataSet->psDeviceNode = psDeviceNode; + + psKMHWRTDataSet->psHWRTDataCommonCookie = psHWRTDataCommonCookie; + + psDevInfo = psDeviceNode->pvDevice; + + /* + * This FW RT-Data is only mapped into kernel for initialisation. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, + * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_HWRTDATA), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, + "FwHWRTData", + &psHWRTDataFwMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed", + __func__)); + goto FWRTDataAllocateError; + } + + psKMHWRTDataSet->psHWRTDataFwMemDesc = psHWRTDataFwMemDesc; + eError = RGXSetFirmwareAddress( &psKMHWRTDataSet->sHWRTDataFwAddr, + psHWRTDataFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", FWRTDataFwAddrError); + + eError = DevmemAcquireCpuVirtAddr(psHWRTDataFwMemDesc, + (void **)&psHWRTData); + PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError); + + psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr; + + psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr; + + psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr; + + psHWRTData->ui32ScreenPixelMax = ui32ScreenPixelMax; + psHWRTData->ui64MultiSampleCtl = ui64MultiSampleCtl; + psHWRTData->ui64FlippedMultiSampleCtl = ui64FlippedMultiSampleCtl; + psHWRTData->ui32TPCStride = ui32TPCStride; + psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr; + psHWRTData->ui32TPCSize = ui32TPCSize; + psHWRTData->ui32TEScreen = ui32TEScreen; + psHWRTData->ui32TEAA = ui32TEAA; + psHWRTData->ui32TEMTILE1 = ui32TEMTILE1; + psHWRTData->ui32TEMTILE2 = ui32TEMTILE2; + psHWRTData->ui32MTileStride = ui32MTileStride; + psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX; + psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY; + psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX; + psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY; + psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX; + psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY; + psHWRTData->sMacrotileArrayDevVAddr = sMacrotileArrayDevVAddr; + psHWRTData->sRgnHeaderDevVAddr = sRgnHeaderDevVAddr; + psHWRTData->sRTCDevVAddr = sRTCDevVAddr; + psHWRTData->uiRgnHeaderSize = uiRgnHeaderSize; + psHWRTData->ui32ISPMtileSize = ui32ISPMtileSize; + + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; + psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; + /* invalid initial snapshot value, the snapshot is always taken during first kick + * and hence the value get replaced during the first kick anyway. So it's safe to set it 0. + */ + psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + { + RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl; + + psRTACtl->ui32RenderTargetIndex = 0; + psRTACtl->ui32ActiveRenderTargets = 0; + psRTACtl->sValidRenderTargets.ui32Addr = 0; + psRTACtl->sRTANumPartialRenders.ui32Addr = 0; + psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs; + + if (ui16MaxRTs > 1) + { + PDUMPCOMMENT("Allocate memory for shadow render target cache"); + eError = DevmemFwAllocate(psDevInfo, + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED| + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwShadowRTCache", + &psRTArrayFwMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for render target array (%s)", + __func__, + ui16MaxRTs, PVRSRVGetErrorString(eError))); + goto FWAllocateRTArryError; + } + + psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc; + eError = RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets, + psRTArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError); + + PDUMPCOMMENT("Allocate memory for tracking renders accumulation"); + eError = DevmemFwAllocate(psDevInfo, + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED| + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwRendersAccumulation", + &psRendersAccArrayFwMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)", + __func__, + ui16MaxRTs, PVRSRVGetErrorString(eError))); + goto FWAllocateRTAccArryError; + } + psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc; + eError = RGXSetFirmwareAddress(&psRTACtl->sRTANumPartialRenders, + psRendersAccArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError); + } + } + +#if defined(PDUMP) + PDUMPCOMMENT("Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr); + DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS); +#endif + + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); + return PVRSRV_OK; + +FWAllocRTAccArryFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc); +FWAllocateRTAccArryError: + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); +FWAllocateRTArryFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); +FWAllocateRTArryError: + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; + } + OSLockRelease(psDevInfo->hLockFreeList); + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataCpuMapError: + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataAllocateError: + *ppsKMHWRTDataSet = NULL; + OSFreeMem(psKMHWRTDataSet); + +AllocError: + return eError; +} + +/* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */ +PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR psPMMListDevVAddr_0, + IMG_DEV_VIRTADDR psPMMListDevVAddr_1, + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64MultiSampleCtl, + IMG_UINT64 ui64FlippedMultiSampleCtl, + IMG_UINT32 ui32TPCStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32MTileStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr_0, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr_1, + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr_0, + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr_1, + IMG_DEV_VIRTADDR sRTCDevVAddr, + IMG_UINT64 uiRgnHeaderSize, + IMG_UINT32 ui32ISPMtileSize, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet_0, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet_1) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; + RGXFWIF_HWRTDATA_COMMON *psHWRTDataCommon; + DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + + /* Prepare KM cleanup object for HWRTDataCommon FW object */ + psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie)); + if (psHWRTDataCommonCookie == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_HWRTDataCommonCookieAlloc; + } + + /* + * This FW common context is only mapped into kernel for initialisation. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, + * and write-combine is suffice on the CPU side + * (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_HWRTDATA_COMMON), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, + "FwHWRTDataCommon", + &psHWRTDataCommonFwMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__)); + goto err_HWRTDataCommonAlloc; + } + eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr); + + eError = DevmemAcquireCpuVirtAddr(psHWRTDataCommonFwMemDesc, (void **)&psHWRTDataCommon); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", err_HWRTDataCommonVA); + + psHWRTDataCommon->bTACachesNeedZeroing = IMG_FALSE; +#if defined(PDUMP) + PDUMPCOMMENT("Dump HWRTDataCommon"); + DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS); +#endif + DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc); + + psHWRTDataCommonCookie->ui32RefCount = 0; + psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc; + psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr; + + /* Here we are creating a set of HWRTData(s) + the number of elements in the set equals RGXMKIF_NUM_RTDATAS. + */ + + eError = RGXCreateHWRTData_aux( + psConnection, + psDeviceNode, + psVHeapTableDevVAddr, + psPMMListDevVAddr_0, + apsFreeLists, + ui32ScreenPixelMax, + ui64MultiSampleCtl, + ui64FlippedMultiSampleCtl, + ui32TPCStride, + sTailPtrsDevVAddr, + ui32TPCSize, + ui32TEScreen, + ui32TEAA, + ui32TEMTILE1, + ui32TEMTILE2, + ui32MTileStride, + ui32ISPMergeLowerX, + ui32ISPMergeLowerY, + ui32ISPMergeUpperX, + ui32ISPMergeUpperY, + ui32ISPMergeScaleX, + ui32ISPMergeScaleY, + sMacrotileArrayDevVAddr_0, + sRgnHeaderDevVAddr_0, + sRTCDevVAddr, + uiRgnHeaderSize, + ui32ISPMtileSize, + ui16MaxRTs, + psHWRTDataCommonCookie, + ppsKMHWRTDataSet_0); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create HWRTData [slot 0] (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto err_HWRTDataAlloc_0; + } + psHWRTDataCommonCookie->ui32RefCount += 1; + + eError = RGXCreateHWRTData_aux( + psConnection, + psDeviceNode, + psVHeapTableDevVAddr, + psPMMListDevVAddr_1, + apsFreeLists, + ui32ScreenPixelMax, + ui64MultiSampleCtl, + ui64FlippedMultiSampleCtl, + ui32TPCStride, + sTailPtrsDevVAddr, + ui32TPCSize, + ui32TEScreen, + ui32TEAA, + ui32TEMTILE1, + ui32TEMTILE2, + ui32MTileStride, + ui32ISPMergeLowerX, + ui32ISPMergeLowerY, + ui32ISPMergeUpperX, + ui32ISPMergeUpperY, + ui32ISPMergeScaleX, + ui32ISPMergeScaleY, + sMacrotileArrayDevVAddr_1, + sRgnHeaderDevVAddr_1, + sRTCDevVAddr, + uiRgnHeaderSize, + ui32ISPMtileSize, + ui16MaxRTs, + psHWRTDataCommonCookie, + ppsKMHWRTDataSet_1); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create HWRTData [slot 1] (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto err_HWRTDataAlloc_1; + } + psHWRTDataCommonCookie->ui32RefCount += 1; + + + return PVRSRV_OK; + + +err_HWRTDataAlloc_1: +err_HWRTDataAlloc_0: +err_HWRTDataCommonVA: + RGXUnsetFirmwareAddress(psHWRTDataCommonFwMemDesc); +err_HWRTDataCommonFwAddr: + DevmemFwUnmapAndFree(psDevInfo, psHWRTDataCommonFwMemDesc); + +err_HWRTDataCommonAlloc: + OSFreeMem(psHWRTDataCommonCookie); + +err_HWRTDataCommonCookieAlloc: + + return eError; +} + +/* Destroy a single instance of HWRTData. + Additionally, destroy the HWRTDataCommon{Cookie} objects + when it is the last HWRTData within a corresponding set of HWRTDatas. +*/ +PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + PRGXFWIF_HWRTDATA psHWRTData; + IMG_UINT32 ui32Loop; + + PVR_ASSERT(psKMHWRTDataSet); + + psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice; + + eError = RGXSetFirmwareAddress(&psHWRTData, psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); + + /* Cleanup HWRTData */ + eError = RGXFWRequestHWRTDataCleanUp(psKMHWRTDataSet->psDeviceNode, psHWRTData); + + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + + if (psKMHWRTDataSet->psRTArrayFwMemDesc) + { + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); + } + + if (psKMHWRTDataSet->psRendersAccArrayFwMemDesc) + { + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRendersAccArrayFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRendersAccArrayFwMemDesc); + } + + /* Decrease freelist refcount */ + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_remove_node(&psKMHWRTDataSet->sNodeHWRTData); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + /* Freeing the memory has to happen _after_ removing the HWRTData from the freelist + * otherwise we risk traversing the freelist to find a pointer from a freed data structure */ + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); + + /* We punched through potential PVRSRV_ERROR_RETRY events, so we are + sure that the HWRTDATA instance will be destroyed during this call. + Consequently, we decrease the ref count for HWRTDataCommonCookie. + + NOTE: This ref count does not require locks or atomics. + ------------------------------------------------------- + HWRTDatas bound into one pair are always destroyed sequentially, + within a single loop on the Client side. + The Common/Cookie objects always belong to only one pair of + HWRTDatas, and ref count is used to ensure that the Common/Cookie + objects will be destroyed after destruction of all HWRTDatas + within a single pair. + */ + psKMHWRTDataSet->psHWRTDataCommonCookie->ui32RefCount--; + + /* When ref count for HWRTDataCommonCookie hits ZERO + we have to destroy the HWRTDataCommon [FW object] and the cookie [KM object] afterwards. + */ + if (psKMHWRTDataSet->psHWRTDataCommonCookie->ui32RefCount == 0) + { + PVRSRV_DEVICE_NODE *psDeviceNode = psKMHWRTDataSet->psDeviceNode; + + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc); + + /* We don't need to flush the SLC before freeing. + FW RequestCleanUp has already done that for HWRTData, so we're fine now. + */ + + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psKMHWRTDataSet->psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc); + OSFreeMem(psKMHWRTDataSet->psHWRTDataCommonCookie); + } + + OSFreeMem(psKMHWRTDataSet); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + IMG_DEV_VIRTADDR sFreeListDevVAddr, + PMR *psFreeListPMR, + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, + RGX_FREELIST **ppsFreeList) +{ + PVRSRV_ERROR eError; + RGXFWIF_FREELIST *psFWFreeList; + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGX_FREELIST *psFreeList; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) + { + IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages; + + /* Round up number of FL pages to the next multiple of the OS page size */ + + ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", + __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); + + ui32InitFLPages = ui32NewInitFLPages; + ui32GrowFLPages = ui32NewGrowFLPages; + ui32MaxFLPages = ui32NewMaxFLPages; + } + + /* Allocate kernel freelist struct */ + psFreeList = OSAllocZMem(sizeof(*psFreeList)); + if (psFreeList == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate host data structure", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocHost; + } + + /* + * This FW FreeList context is only mapped into kernel for initialisation + * and reconstruction (at other times it is not mapped and only used by + * the FW. Therefore the GPU cache doesn't need coherency, and write-combine + * is suffice on the CPU side (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWFreeList), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, + "FwFreeList", + &psFWFreelistMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: DevmemAllocate for RGXFWIF_FREELIST failed", + __func__)); + goto FWFreeListAlloc; + } + + /* Initialise host data structures */ + psFreeList->psDevInfo = psDevInfo; + psFreeList->psFreeListPMR = psFreeListPMR; + psFreeList->uiFreeListPMROffset = uiFreeListPMROffset; + psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc; + eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + + /* psFreeList->ui32FreelistID set below with lock... */ + psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0); + psFreeList->ui32MaxFLPages = ui32MaxFLPages; + psFreeList->ui32InitFLPages = ui32InitFLPages; + psFreeList->ui32GrowFLPages = ui32GrowFLPages; + psFreeList->ui32CurrentFLPages = 0; + psFreeList->ui32ReadyFLPages = 0; + psFreeList->ui32GrowThreshold = ui32GrowParamThreshold; + psFreeList->ui64FreelistChecksum = 0; + psFreeList->ui32RefCount = 0; + psFreeList->bCheckFreelist = bCheckFreelist; + dllist_init(&psFreeList->sMemoryBlockHead); + dllist_init(&psFreeList->sMemoryBlockInitHead); +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_init(&psFreeList->sNodeHWRTDataHead); +#endif + psFreeList->ownerPid = OSGetCurrentClientProcessIDKM(); + + + /* Add to list of freelists */ + OSLockAcquire(psDevInfo->hLockFreeList); + psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++; + dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode); + OSLockRelease(psDevInfo->hLockFreeList); + + + /* Initialise FW data structure */ + eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap); + + { + const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); + + psFWFreeList->ui32MaxPages = ui32MaxFLPages; + psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; + psFWFreeList->ui32GrowPages = ui32GrowFLPages; + psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr; + psFWFreeList->ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr + + ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); + psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; + psFWFreeList->bGrowPending = IMG_FALSE; + psFWFreeList->ui32ReadyPages = ui32ReadyPages; + +#if defined(SUPPORT_SHADOW_FREELISTS) + /* Get the FW Memory Context address... */ + eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext, + RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData), + 0, RFW_FWADDR_NOREF_FLAG); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", + __func__)); + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + goto FWFreeListCpuMap; + } +#else + PVR_UNREFERENCED_PARAMETER(hMemCtxPrivData); +#endif + } + + PVR_DPF((PVR_DBG_MESSAGE, + "Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, " + "Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", " + "Init FL base address 0x%016" IMG_UINT64_FMTSPECx, + psFreeList, + ui32MaxFLPages, + ui32InitFLPages, + sFreeListDevVAddr.uiAddr, + psFWFreeList->ui64CurrentDevVAddr)); +#if defined(PDUMP) + PDUMPCOMMENT("Dump FW FreeList"); + DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS); + + /* + * Separate dump of the Freelist's number of Pages and stack pointer. + * This allows to easily modify the PB size in the out2.txt files. + */ + PDUMPCOMMENT("FreeList TotalPages"); + DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, + offsetof(RGXFWIF_FREELIST, ui32CurrentPages), + psFWFreeList->ui32CurrentPages, + PDUMP_FLAGS_CONTINUOUS); + PDUMPCOMMENT("FreeList StackPointer"); + DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, + offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), + psFWFreeList->ui32CurrentStackTop, + PDUMP_FLAGS_CONTINUOUS); +#endif + + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + + + /* Add initial PB block */ + eError = RGXGrowFreeList(psFreeList, + ui32InitFLPages, + &psFreeList->sMemoryBlockInitHead); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%d)", + __func__, + sFreeListDevVAddr.uiAddr, + eError)); + goto FWFreeListCpuMap; + } +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* Update Stats */ + PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/ + 0, + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); + +#endif + + /* return values */ + *ppsFreeList = psFreeList; + + return PVRSRV_OK; + + /* Error handling */ + +FWFreeListCpuMap: + /* Remove freelists from list */ + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_remove_node(&psFreeList->sNode); + OSLockRelease(psDevInfo->hLockFreeList); + RGXUnsetFirmwareAddress(psFWFreelistMemDesc); + +ErrorSetFwAddr: + DevmemFwUnmapAndFree(psDevInfo, psFWFreelistMemDesc); + +FWFreeListAlloc: + OSFreeMem(psFreeList); + +ErrorAllocHost: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + RGXDestroyFreeList + */ +PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32RefCount; + + PVR_ASSERT(psFreeList); + + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + ui32RefCount = psFreeList->ui32RefCount; + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + if (ui32RefCount != 0) + { + /* Freelist still busy */ + return PVRSRV_ERROR_RETRY; + } + + /* Freelist is not in use => start firmware cleanup */ + eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo, + psFreeList->sFreeListFWDevVAddr); + if (eError != PVRSRV_OK) + { + /* Can happen if the firmware took too long to handle the cleanup request, + * or if SLC-flushes didn't went through (due to some GPU lockup) */ + return eError; + } + + /* Remove FreeList from linked list before we destroy it... */ + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + dllist_remove_node(&psFreeList->sNode); +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* Confirm all HWRTData nodes are freed before releasing freelist */ + PVR_ASSERT(dllist_is_empty(&psFreeList->sNodeHWRTDataHead)); +#endif + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + if (psFreeList->bCheckFreelist) + { + RGXFWIF_FREELIST *psFWFreeList; + IMG_UINT64 ui32CurrentStackTop; + IMG_UINT64 ui64CheckSum; + + /* Get the current stack pointer for this free list */ + DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop; + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + + if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) + { + /* Do consistency tests (as the list is fully populated) */ + _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); + } + else + { + /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */ + _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum); + } + } + + /* Destroy FW structures */ + RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc); + DevmemFwUnmapAndFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc); + + /* Remove grow shrink blocks */ + while (!dllist_is_empty(&psFreeList->sMemoryBlockHead)) + { + eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + } + + /* Remove initial PB block */ + eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + + /* consistency checks */ + PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead)); + PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0); + + /* free Freelist */ + OSFreeMem(psFreeList); + + return eError; +} + + +/* + RGXCreateZSBuffer + */ +PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + RGX_ZSBUFFER_DATA **ppsZSBuffer) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_PRBUFFER *psFWZSBuffer; + RGX_ZSBUFFER_DATA *psZSBuffer; + DEVMEM_MEMDESC *psFWZSBufferMemDesc; + IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE; + + /* Allocate host data structure */ + psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer)); + if (psZSBuffer == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate cleanup data structure for ZS-Buffer", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocCleanup; + } + + /* Populate Host data */ + psZSBuffer->psDevInfo = psDevInfo; + psZSBuffer->psReservation = psReservation; + psZSBuffer->psPMR = psPMR; + psZSBuffer->uiMapFlags = uiMapFlags; + psZSBuffer->ui32RefCount = 0; + psZSBuffer->bOnDemand = bOnDemand; + if (bOnDemand) + { + /* psZSBuffer->ui32ZSBufferID set below with lock... */ + psZSBuffer->psMapping = NULL; + + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; + dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); + OSLockRelease(psDevInfo->hLockZSBuffer); + } + + /* Allocate firmware memory for ZS-Buffer. */ + PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWZSBuffer), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, + "FwZSBuffer", + &psFWZSBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware ZS-Buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto ErrorAllocFWZSBuffer; + } + psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc; + + /* Temporarily map the firmware render context to the kernel. */ + eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc, + (void **)&psFWZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware ZS-Buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto ErrorAcquireFWZSBuffer; + } + + /* Populate FW ZS-Buffer data structure */ + psFWZSBuffer->bOnDemand = bOnDemand; + psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; + psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID; + + /* Get firmware address of ZS-Buffer. */ + eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + + /* Dump the ZS-Buffer and the memory content */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump firmware ZS-Buffer"); + DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS); +#endif + + /* Release address acquired above. */ + DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); + + + /* define return value */ + *ppsZSBuffer = psZSBuffer; + + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)", + psZSBuffer, + (bOnDemand) ? "On-Demand": "Up-front")); + + psZSBuffer->owner=OSGetCurrentClientProcessIDKM(); + + return PVRSRV_OK; + + /* error handling */ + +ErrorSetFwAddr: + DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); +ErrorAcquireFWZSBuffer: + DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc); + +ErrorAllocFWZSBuffer: + OSFreeMem(psZSBuffer); + +ErrorAllocCleanup: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + RGXDestroyZSBuffer + */ +PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + PVR_ASSERT(psZSBuffer); + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + /* Request ZS Buffer cleanup */ + eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo, + psZSBuffer->sZSBufferFWDevVAddr); + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the firmware render context. */ + RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); + DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); + + /* Remove Deferred Allocation from list */ + if (psZSBuffer->bOnDemand) + { + OSLockAcquire(hLockZSBuffer); + PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); + dllist_remove_node(&psZSBuffer->sNode); + OSLockRelease(hLockZSBuffer); + } + + PVR_ASSERT(psZSBuffer->ui32RefCount == 0); + + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); + + /* Free ZS-Buffer host data structure */ + OSFreeMem(psZSBuffer); + + } + + return eError; +} + +PVRSRV_ERROR +RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + if (!psZSBuffer) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (!psZSBuffer->bOnDemand) + { + /* Only deferred allocations can be populated */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "ZS Buffer [%p, ID=0x%08x]: Physical backing requested", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + OSLockAcquire(hLockZSBuffer); + + if (psZSBuffer->ui32RefCount == 0) + { + if (psZSBuffer->bOnDemand) + { + IMG_HANDLE hDevmemHeap; + + PVR_ASSERT(psZSBuffer->psMapping == NULL); + + /* Get Heap */ + eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); + PVR_ASSERT(psZSBuffer->psMapping == NULL); + if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) + { + OSLockRelease(hLockZSBuffer); + return PVRSRV_ERROR_INVALID_HEAP; + } + + eError = DevmemIntMapPMR(hDevmemHeap, + psZSBuffer->psReservation, + psZSBuffer->psPMR, + psZSBuffer->uiMapFlags, + &psZSBuffer->psMapping); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; + + } + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + } + } + + /* Increase refcount*/ + psZSBuffer->ui32RefCount++; + + OSLockRelease(hLockZSBuffer); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_POPULATION **ppsPopulation) +{ + RGX_POPULATION *psPopulation; + PVRSRV_ERROR eError; + + psZSBuffer->ui32NumReqByApp++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner); +#endif + + /* Do the backing */ + eError = RGXBackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + goto OnErrorBacking; + } + + /* Create the handle to the backing */ + psPopulation = OSAllocMem(sizeof(*psPopulation)); + if (psPopulation == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto OnErrorAlloc; + } + + psPopulation->psZSBuffer = psZSBuffer; + + /* return value */ + *ppsPopulation = psPopulation; + + return PVRSRV_OK; + +OnErrorAlloc: + RGXUnbackingZSBuffer(psZSBuffer); + +OnErrorBacking: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + if (!psZSBuffer) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_ASSERT(psZSBuffer->ui32RefCount); + + PVR_DPF((PVR_DBG_MESSAGE, + "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + OSLockAcquire(hLockZSBuffer); + + if (psZSBuffer->bOnDemand) + { + if (psZSBuffer->ui32RefCount == 1) + { + PVR_ASSERT(psZSBuffer->psMapping); + + eError = DevmemIntUnmapPMR(psZSBuffer->psMapping); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; + } + + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + } + } + + /* Decrease refcount*/ + psZSBuffer->ui32RefCount--; + + OSLockRelease(hLockZSBuffer); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation) +{ + PVRSRV_ERROR eError; + + if (!psPopulation) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer); + if (eError != PVRSRV_OK) + { + return eError; + } + + OSFreeMem(psPopulation); + + return PVRSRV_OK; +} + +static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) +{ + DLLIST_NODE *psNode, *psNext; + RGX_ZSBUFFER_DATA *psZSBuffer = NULL; + + OSLockAcquire(psDevInfo->hLockZSBuffer); + + dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext) + { + RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode); + + if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID) + { + psZSBuffer = psThisZSBuffer; + break; + } + } + + OSLockRelease(psDevInfo->hLockZSBuffer); + return psZSBuffer; +} + +void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID) +{ + RGX_ZSBUFFER_DATA *psZSBuffer; + RGXFWIF_KCCB_CMD sTACCBCmd; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + /* scan all deferred allocations */ + psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + + if (psZSBuffer) + { + IMG_BOOL bBackingDone = IMG_TRUE; + + /* Populate ZLS */ + eError = RGXBackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Populating ZS-Buffer (ID = 0x%08x) failed (%s)", + ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + bBackingDone = IMG_FALSE; + } + + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); + + psZSBuffer->ui32NumReqByFW++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner); +#endif + + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", + ui32ZSBufferID)); + } +} + +void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID) +{ + RGX_ZSBUFFER_DATA *psZSBuffer; + RGXFWIF_KCCB_CMD sTACCBCmd; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + /* scan all deferred allocations */ + psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + + if (psZSBuffer) + { + /* Unpopulate ZLS */ + eError = RGXUnbackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)", + ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(IMG_FALSE); + } + + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); + + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", + ui32ZSBufferID)); + } +} + +static +PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_DEV_VIRTADDR sVDMCallStackAddr, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_TA_DATA *psTAData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_TACTX_STATE *psContextState; + PVRSRV_ERROR eError; + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware TA context suspend state"); + + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_TACTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTAContextState", + &psTAData->psContextStateMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_tacontextsuspendalloc; + } + + eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc, + (void **)&psContextState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_suspendcpuvirtacquire; + } + psContextState->uTAReg_VDM_CALL_STACK_POINTER_Init = sVDMCallStackAddr.uiAddr; + DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc); + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_TA, + RGXFWIF_DM_GEOM, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + psTAData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &psTAData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init TA fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_tacommoncontext; + } + + /* + * Dump the FW 3D context suspend state buffer + */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump the TA context suspend state buffer"); + DevmemPDumpLoadMem(psTAData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_TACTX_STATE), + PDUMP_FLAGS_CONTINUOUS); +#endif + + psTAData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_tacommoncontext: +fail_suspendcpuvirtacquire: + DevmemFwUnmapAndFree(psDevInfo, psTAData->psContextStateMemDesc); +fail_tacontextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +static +PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_3D_DATA *ps3DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_UINT uiNumISPStoreRegs; + IMG_UINT ui3DRegISPStateStoreSize = 0; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state"); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_NUM_RASTER_PIPES_IDX); + } + else + { + uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); + } + + /* Size of the CS buffer */ + /* Calculate the size of the 3DCTX ISP state */ + ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + + uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]); + + eError = DevmemFwAllocate(psDevInfo, + ui3DRegISPStateStoreSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "Fw3DContextState", + &ps3DData->psContextStateMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_3dcontextsuspendalloc; + } + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_3D, + RGXFWIF_DM_3D, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + ps3DData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &ps3DData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init 3D fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_3dcommoncontext; + } + + /* + * Dump the FW 3D context suspend state buffer + */ + PDUMPCOMMENT("Dump the 3D context suspend state buffer"); + DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_3DCTX_STATE), + PDUMP_FLAGS_CONTINUOUS); + + ps3DData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_3dcommoncontext: + DevmemFwUnmapAndFree(psDevInfo, ps3DData->psContextStateMemDesc); +fail_3dcontextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + + +/* + * PVRSRVRGXCreateRenderContextKM + */ +PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_DEV_VIRTADDR sVDMCallStackAddr, + IMG_UINT32 ui32FrameworkRegisterSize, + IMG_PBYTE pabyFrameworkRegisters, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_RENDER_CONTEXT *psRenderContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_COMMON_CONTEXT_INFO sInfo; + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + + *ppsRenderContext = NULL; + + if (ui32StaticRenderContextStateSize > RGXFWIF_STATIC_RENDERCONTEXT_SIZE) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psRenderContext = OSAllocZMem(sizeof(*psRenderContext)); + if (psRenderContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSLockCreate(&psRenderContext->hLock); + + if (eError != PVRSRV_OK) + { + goto fail_lock; + } + + psRenderContext->psDeviceNode = psDeviceNode; + + /* + Create the FW render context, this has the TA and 3D FW common + contexts embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWRENDERCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwRenderContext", + &psRenderContext->psFWRenderContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwrendercontext; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); +#endif + + /* + * Create the FW framework buffer + */ + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psRenderContext->psFWFrameworkMemDesc, + ui32FrameworkRegisterSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psRenderContext->psFWFrameworkMemDesc, + pabyFrameworkRegisters, + ui32FrameworkRegisterSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + + sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc; + + eError = _Create3DContext(psConnection, + psDeviceNode, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), + psFWMemContextMemDesc, + ui32Priority, + ui32Max3DDeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->s3DData, + U32toU8_Unpack3(ui32PackedCCBSizeU8888), + U32toU8_Unpack4(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_3dcontext; + } + + eError = _CreateTAContext(psConnection, + psDeviceNode, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), + psFWMemContextMemDesc, + sVDMCallStackAddr, + ui32Priority, + ui32MaxTADeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->sTAData, + U32toU8_Unpack1(ui32PackedCCBSizeU8888), + U32toU8_Unpack2(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_tacontext; + } + + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + goto fail_acquire_cpu_mapping; + } + + /* Copy the static render context data */ + OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize); + DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + +#if defined(SUPPORT_BUFFER_SYNC) + psRenderContext->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-ta3d"); + if (IS_ERR(psRenderContext->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psRenderContext->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence); + SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence); + SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + } + + *ppsRenderContext = psRenderContext; + return PVRSRV_OK; + +#if defined(SUPPORT_BUFFER_SYNC) +fail_buffer_sync_context_create: +#endif +fail_acquire_cpu_mapping: + _DestroyTAContext(&psRenderContext->sTAData, + psDeviceNode); +fail_tacontext: + _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); +fail_3dcontext: +fail_frameworkcopy: + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); +fail_frameworkcreate: + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); +fail_fwrendercontext: + OSLockDestroy(psRenderContext->hLock); +fail_lock: + OSFreeMem(psRenderContext); + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + * PVRSRVRGXDestroyRenderContextKM + */ +PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; +#endif + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_remove_node(&(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext); +#endif + + /* Cleanup the TA if we haven't already */ + if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0) + { + eError = _DestroyTAContext(&psRenderContext->sTAData, + psRenderContext->psDeviceNode); + if (eError == PVRSRV_OK) + { + psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE; + } + else + { + goto e0; + } + } + + /* Cleanup the 3D if we haven't already */ + if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0) + { + eError = _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); + if (eError == PVRSRV_OK) + { + psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE; + } + else + { + goto e0; + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto e0; + } + + ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) + { + + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); + + eError = PVRSRV_ERROR_RETRY; + goto e0; + } +#endif + + /* + Only if both TA and 3D contexts have been cleaned up can we + free the shared resources + */ + if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE)) + { + /* Free the framework buffer */ + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); + + /* Free the firmware render context */ + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); + + SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence); + SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence); + SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); +#endif + + OSLockDestroy(psRenderContext->hLock); + + OSFreeMem(psRenderContext); + } + + return PVRSRV_OK; + +e0: + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + return eError; +} + + + +#if (ENABLE_TA3D_UFO_DUMP == 1) +static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount, + IMG_UINT32 ui32ClientTAUpdateCount, + IMG_UINT32 ui32Client3DFenceCount, + IMG_UINT32 ui32Client3DUpdateCount, + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress, + IMG_UINT32 *paui32ClientTAFenceValue, + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress, + IMG_UINT32 *paui32ClientTAUpdateValue, + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress, + IMG_UINT32 *paui32Client3DFenceValue, + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress, + IMG_UINT32 *paui32Client3DUpdateValue) +{ + IMG_UINT32 i; + + PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~", + __func__)); + + /* Dump Fence syncs, Update syncs and PR Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", + __func__, ui32ClientTAFenceCount)); + for (i = 0; i < ui32ClientTAFenceCount; i++) + { + if (BITMASK_HAS(pauiClientTAFenceUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32ClientTAFenceCount, + (void *) pauiClientTAFenceUFOAddress, + pauiClientTAFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, i + 1, ui32ClientTAFenceCount, + (void *) pauiClientTAFenceUFOAddress, + pauiClientTAFenceUFOAddress->ui32Addr, + *paui32ClientTAFenceValue, + *paui32ClientTAFenceValue)); + paui32ClientTAFenceValue++; + } + pauiClientTAFenceUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", + __func__, ui32ClientTAUpdateCount)); + for (i = 0; i < ui32ClientTAUpdateCount; i++) + { + if (BITMASK_HAS(pauiClientTAUpdateUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32ClientTAUpdateCount, + (void *) pauiClientTAUpdateUFOAddress, + pauiClientTAUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", + __func__, i + 1, ui32ClientTAUpdateCount, + (void *) pauiClientTAUpdateUFOAddress, + pauiClientTAUpdateUFOAddress->ui32Addr, + *paui32ClientTAUpdateValue, + *paui32ClientTAUpdateValue)); + paui32ClientTAUpdateValue++; + } + pauiClientTAUpdateUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", + __func__, ui32Client3DFenceCount)); + for (i = 0; i < ui32Client3DFenceCount; i++) + { + if (BITMASK_HAS(pauiClient3DFenceUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32Client3DFenceCount, + (void *) pauiClient3DFenceUFOAddress, + pauiClient3DFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, i + 1, ui32Client3DFenceCount, + (void *) pauiClient3DFenceUFOAddress, + pauiClient3DFenceUFOAddress->ui32Addr, + *paui32Client3DFenceValue, + *paui32Client3DFenceValue)); + paui32Client3DFenceValue++; + } + pauiClient3DFenceUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", + __func__, ui32Client3DUpdateCount)); + for (i = 0; i < ui32Client3DUpdateCount; i++) + { + if (BITMASK_HAS(pauiClient3DUpdateUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32Client3DUpdateCount, + (void *) pauiClient3DUpdateUFOAddress, + pauiClient3DUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", + __func__, i + 1, ui32Client3DUpdateCount, + (void *) pauiClient3DUpdateUFOAddress, + pauiClient3DUpdateUFOAddress->ui32Addr, + *paui32Client3DUpdateValue, + *paui32Client3DUpdateValue)); + paui32Client3DUpdateValue++; + } + pauiClient3DUpdateUFOAddress++; + } +} +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + +/* + * PVRSRVRGXKickTA3DKM + */ +PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientTAFenceCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, + IMG_UINT32 *paui32ClientTAFenceSyncOffset, + IMG_UINT32 *paui32ClientTAFenceValue, + IMG_UINT32 ui32ClientTAUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock, + IMG_UINT32 *paui32ClientTAUpdateSyncOffset, + IMG_UINT32 *paui32ClientTAUpdateValue, + IMG_UINT32 ui32Client3DUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, + IMG_UINT32 *paui32Client3DUpdateSyncOffset, + IMG_UINT32 *paui32Client3DUpdateValue, + SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock, + IMG_UINT32 ui32PRFenceSyncOffset, + IMG_UINT32 ui32PRFenceValue, + PVRSRV_FENCE iCheckTAFence, + PVRSRV_TIMELINE iUpdateTATimeline, + PVRSRV_FENCE *piUpdateTAFence, + IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iCheck3DFence, + PVRSRV_TIMELINE iUpdate3DTimeline, + PVRSRV_FENCE *piUpdate3DFence, + IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32TACmdSize, + IMG_PBYTE pui8TADMCmd, + IMG_UINT32 ui323DPRCmdSize, + IMG_PBYTE pui83DPRDMCmd, + IMG_UINT32 ui323DCmdSize, + IMG_PBYTE pui83DDMCmd, + IMG_UINT32 ui32ExtJobRef, + IMG_BOOL bKickTA, + IMG_BOOL bKickPR, + IMG_BOOL bKick3D, + IMG_BOOL bAbort, + IMG_UINT32 ui32PDumpFlags, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32RenderTargetSize, + IMG_UINT32 ui32NumberOfDrawCalls, + IMG_UINT32 ui32NumberOfIndices, + IMG_UINT32 ui32NumberOfMRTs, + IMG_UINT64 ui64DeadlineInus) +{ + /* per-context helper structures */ + RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData; + RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData; + + IMG_UINT32 ui32TACmdCount=0; + IMG_UINT32 ui323DCmdCount=0; + IMG_UINT32 ui32TACmdOffset=0; + IMG_UINT32 ui323DCmdOffset=0; + RGXFWIF_UFO sPRUFO; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2; + + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + IMG_UINT32 ui32ClientPRUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; + IMG_UINT32 *paui32ClientPRUpdateValue = NULL; + + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; + + IMG_UINT64 uiCheckTAFenceUID = 0; + IMG_UINT64 uiCheck3DFenceUID = 0; + IMG_UINT64 uiUpdateTAFenceUID = 0; + IMG_UINT64 uiUpdate3DFenceUID = 0; + + IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd; + + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; + IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D); + + IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0; + + IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE; + + PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; + + IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; + IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0; + IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; + + /* + * Count of the number of TA and 3D update values (may differ from number of + * TA and 3D updates later, as sync checkpoints do not need to specify a value) + */ + IMG_UINT32 ui32ClientPRUpdateValueCount = 0; + IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount; + IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount; + PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL; /*!< TA fence checkpoints */ + PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL; /*!< 3D fence checkpoints */ + IMG_UINT32 ui32FenceTASyncCheckpointCount = 0; + IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL; /*!< TA update checkpoint (output) */ + PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL; /*!< 3D update checkpoint (output) */ + PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL; + PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; + void *pvTAUpdateFenceFinaliseData = NULL; + void *pv3DUpdateFenceFinaliseData = NULL; + + RGX_SYNC_DATA sTASyncData = {NULL}; /*!< Contains internal update syncs for TA */ + RGX_SYNC_DATA s3DSyncData = {NULL}; /*!< Contains internal update syncs for 3D */ + + IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE; +#if defined(SUPPORT_VALIDATION) + PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE; + PSYNC_CHECKPOINT psDummySyncCheckpoint; +#endif + +#if defined(SUPPORT_BUFFER_SYNC) + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA = {0}; + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D = {0}; + IMG_UINT32 ui32TACommandOffset = 0; + IMG_UINT32 ui323DCommandOffset = 0; + IMG_UINT32 ui32TACmdHeaderOffset = 0; + IMG_UINT32 ui323DCmdHeaderOffset = 0; + IMG_UINT32 ui323DFullRenderCommandOffset = 0; + IMG_UINT32 ui32TACmdOffsetWrapCheck = 0; + IMG_UINT32 ui323DCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + + IMG_UINT32 ui32TAFenceCount, ui323DFenceCount; + IMG_UINT32 ui32TAUpdateCount, ui323DUpdateCount; + IMG_UINT32 ui32PRUpdateCount; + + IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM(); + + IMG_UINT32 ui32Client3DFenceCount = 0; + + /* Ensure we haven't been given a null ptr to + * TA fence values if we have been told we + * have TA sync prim fences + */ + if (ui32ClientTAFenceCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientTAFenceValue != NULL, + "paui32ClientTAFenceValue NULL but " + "ui32ClientTAFenceCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Ensure we haven't been given a null ptr to + * TA update values if we have been told we + * have TA updates + */ + if (ui32ClientTAUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientTAUpdateValue != NULL, + "paui32ClientTAUpdateValue NULL but " + "ui32ClientTAUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Ensure we haven't been given a null ptr to + * 3D update values if we have been told we + * have 3D updates + */ + if (ui32Client3DUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32Client3DUpdateValue != NULL, + "paui32Client3DUpdateValue NULL but " + "ui32Client3DUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Write FW addresses into CMD SHARED BLOCKs */ + { + CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd; + CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd; + CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd; + + if (psKMHWRTDataSet == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "KMHWRTDataSet is a null-pointer")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Write FW address for TA CMD + */ + if (psGeomCmdShared != NULL) + { + psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + + /* Write FW address for 3D CMD + */ + if (ps3DCmdShared != NULL) + { + ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + + /* Write FW address for PR3D CMD + */ + if (psPR3DCmdShared != NULL) + { + psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + } + + if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, " + "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d", + __func__, + ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount)); + /* Sanity check we have a PR kick if there are client fences */ + if (unlikely(!bKickPR && ui32Client3DFenceCount != 0)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence passed without a PR kick", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure the string is null-terminated (Required for safety) */ + szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + + OSLockAcquire(psRenderContext->hLock); + + ui32TAFenceCount = ui32ClientTAFenceCount; + ui323DFenceCount = ui32Client3DFenceCount; + ui32TAUpdateCount = ui32ClientTAUpdateCount; + ui323DUpdateCount = ui32Client3DUpdateCount; + ui32PRUpdateCount = ui32ClientPRUpdateCount; + +#if defined(SUPPORT_BUFFER_SYNC) + if (ui32SyncPMRCount) + { + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling" + " pvr_buffer_sync_resolve_and_create_fences", __func__)); + + err = pvr_buffer_sync_resolve_and_create_fences( + psRenderContext->psBufferSyncContext, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData + ); + + if (unlikely(err)) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: " + "pvr_buffer_sync_resolve_and_create_fences failed (%d)", + __func__, eError)); + } + OSLockRelease(psRenderContext->hLock); + + return eError; + } + +#if !defined(SUPPORT_STRIP_RENDERING) + if (bKickTA) + { + ui32TAFenceCount += ui32BufferFenceSyncCheckpointCount; + } + else + { + ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; + } +#else /* !defined(SUPPORT_STRIP_RENDERING) */ + ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; + + PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly); +#endif /* !defined(SUPPORT_STRIP_RENDERING) */ + + if (psBufferUpdateSyncCheckpoint != NULL) + { + if (bKick3D) + { + ui323DUpdateCount++; + } + else + { + ui32PRUpdateCount++; + } + } + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +#if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 +#error "Invalid value for UPDATE_FENCE_CHECKPOINT_COUNT. Must be either 1 or 2." +#endif /* !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 */ + + if (iCheckTAFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA]" + " (iCheckFence=%d)," + " psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheckTAFence, + (void *) psRenderContext->psDeviceNode->hSyncCheckpointContext)); + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence( + psRenderContext->psDeviceNode->hSyncCheckpointContext, + iCheckTAFence, + &ui32FenceTASyncCheckpointCount, + &apsFenceTASyncCheckpoints, + &uiCheckTAFenceUID, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", + __func__, eError)); + goto fail_resolve_input_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " + "checkpoints (apsFenceSyncCheckpoints=<%p>)", + __func__, iCheckTAFence, ui32FenceTASyncCheckpointCount, + (void *) apsFenceTASyncCheckpoints)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + if (apsFenceTASyncCheckpoints) + { + _DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints, + ui32FenceTASyncCheckpointCount); + } +#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ + } + + if (iCheck3DFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D]" + " (iCheckFence=%d), " + "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheck3DFence, + (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence( + psRenderContext->psDeviceNode->hSyncCheckpointContext, + iCheck3DFence, + &ui32Fence3DSyncCheckpointCount, + &apsFence3DSyncCheckpoints, + &uiCheck3DFenceUID, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", + __func__, eError)); + goto fail_resolve_input_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " + "checkpoints (apsFenceSyncCheckpoints=<%p>)", + __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount, + (void*)apsFence3DSyncCheckpoints)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + if (apsFence3DSyncCheckpoints) + { + _DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints, + ui32Fence3DSyncCheckpointCount); + } +#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ + } + + if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || + iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) + { + IMG_UINT32 i; + + if (bKickTA) + { + ui32TAFenceCount += ui32FenceTASyncCheckpointCount; + + for (i = 0; i < ui32Fence3DSyncCheckpointCount; i++) + { + if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != + uiCurrentProcess) + { + ui32TAFenceCount++; + } + } + } + + if (bKick3D) + { + ui323DFenceCount += ui32Fence3DSyncCheckpointCount; + } + + ui32TAUpdateCount += iUpdateTATimeline != PVRSRV_NO_TIMELINE ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + ui323DUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + ui32PRUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE && !bKick3D ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + } + +#if defined(SUPPORT_VALIDATION) + /* Check if TestingSLR is adding an extra sync checkpoint to the + * 3D fence check (which we won't signal) + */ + if ((psDevInfo->ui32TestSLRInterval > 0) && + (--psDevInfo->ui32TestSLRCount == 0)) + { + bTestSLRAdd3DCheck = IMG_TRUE; + psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; + } + + if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)) + { + if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint " + "to 3D fence but no update 3D timeline provided", __func__)); + } + else + { + SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext, + iUpdate3DTimeline, + hTestSLRTmpFence, + "TestSLRCheck", + &psDummySyncCheckpoint); + PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence " + "checkpoints (psDummySyncCheckpoint=<%p>)", + __func__, (void*)psDummySyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, + 1, + &psDummySyncCheckpoint); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui323DFenceCount++; + } + } +#endif /* defined(SUPPORT_VALIDATION) */ + + if (bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", + __func__, ui32TAFenceCount, ui32TAUpdateCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + ui32TAFenceCount, + ui32TAUpdateCount, + ui32TACmdSize, + pasTACmdHelperData + ); + } + + if (bKickPR) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32Client3DFenceCount=%d", __func__, + ui323DFenceCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + ui323DFenceCount, + 0, + sizeof(sPRUFO), + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKickPR && !bUseCombined3DAnd3DPR) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32PRUpdateCount=%d", __func__, + ui32PRUpdateCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + 0, + ui32PRUpdateCount, + /* if the client has not provided a 3DPR command, the regular 3D + * command should be used instead */ + pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKick3D || bAbort) + { + if (!bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32Client3DFenceCount=%d", __func__, + ui323DFenceCount)); + } + + RGXCmdHelperInitCmdCCB_CommandSize( + bKickTA ? 0 : ui323DFenceCount, + ui323DUpdateCount, + ui323DCmdSize, + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKickTA) + { + ui32TACmdSizeTmp = RGXCmdHelperGetCommandSize(1, pasTACmdHelperData); + + eError = RGXCheckSpaceCCB( + FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext), + ui32TACmdSizeTmp + ); + if (eError != PVRSRV_OK) + { + goto err_not_enough_space; + } + } + + if (ui323DCmdCount > 0) + { + ui323DCmdSizeTmp = RGXCmdHelperGetCommandSize(ui323DCmdCount, pas3DCmdHelperData); + + eError = RGXCheckSpaceCCB( + FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext), + ui323DCmdSizeTmp + ); + if (eError != PVRSRV_OK) + { + goto err_not_enough_space; + } + } + + /* need to reset the counter here */ + + ui323DCmdCount = 0; + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", + __func__, ui32ClientTAFenceCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence, + ui32ClientTAFenceCount, + apsClientTAFenceSyncPrimBlock, + paui32ClientTAFenceSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_ta_fence; + } + + if (ui32ClientTAFenceCount) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientTAFenceUFOAddress=<%p> ", + __func__, (void*)pauiClientTAFenceUFOAddress)); + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", + __func__, ui32ClientTAUpdateCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate, + ui32ClientTAUpdateCount, + apsClientTAUpdateSyncPrimBlock, + paui32ClientTAUpdateSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_ta_update; + } + + if (ui32ClientTAUpdateCount) + { + pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientTAUpdateUFOAddress=<%p> ", + __func__, (void*)pauiClientTAUpdateUFOAddress)); + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", + __func__, ui32Client3DFenceCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence, + ui32Client3DFenceCount, + NULL, + NULL); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_3d_fence; + } + + if (ui32Client3DFenceCount) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DFenceUFOAddress=<%p> ", + __func__, (void*)pauiClient3DFenceUFOAddress)); + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", + __func__, ui32Client3DUpdateCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate, + ui32Client3DUpdateCount, + apsClient3DUpdateSyncPrimBlock, + paui32Client3DUpdateSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_3d_update; + } + + if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D)) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", + __func__, (void*)pauiClient3DUpdateUFOAddress)); + + eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, ui32PRFenceSyncOffset, &uiPRFenceUFOAddress); + + if (unlikely(eError != PVRSRV_OK)) + { + goto err_pr_fence_address; + } + +#if (ENABLE_TA3D_UFO_DUMP == 1) + DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + ui32Client3DUpdateCount, + pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, + pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, + pauiClient3DFenceUFOAddress, NULL, + pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) +#if !defined(SUPPORT_STRIP_RENDERING) + /* Append buffer sync fences to TA fences */ + if (ui32BufferFenceSyncCheckpointCount > 0 && bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d buffer sync checkpoints to TA Fence " + "(&psRenderContext->sSyncAddrListTAFence=<%p>, " + "pauiClientTAFenceUFOAddress=<%p>)...", + __func__, + ui32BufferFenceSyncCheckpointCount, + (void*)&psRenderContext->sSyncAddrListTAFence , + (void*)pauiClientTAFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount; + } + else +#endif + /* Append buffer sync fences to 3D fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d buffer sync checkpoints to 3D Fence " + "(&psRenderContext->sSyncAddrList3DFence=<%p>, " + "pauiClient3DFenceUFOAddress=<%p>)...", + __func__, + ui32BufferFenceSyncCheckpointCount, + (void*)&psRenderContext->sSyncAddrList3DFence, + (void*)pauiClient3DFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + /* If we have a 3D kick append update to the 3D updates else append to the PR update */ + if (bKick3D) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 buffer sync checkpoint<%p> to 3D Update" + " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," + " pauiClient3DUpdateUFOAddress=<%p>)...", + __func__, + (void*)psBufferUpdateSyncCheckpoint, + (void*)&psRenderContext->sSyncAddrList3DUpdate, + (void*)pauiClient3DUpdateUFOAddress)); + /* Append buffer sync update to 3D updates */ + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiClient3DUpdateUFOAddress) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32Client3DUpdateCount++; + } + else + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 buffer sync checkpoint<%p> to PR Update" + " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," + " pauiClientPRUpdateUFOAddress=<%p>)...", + __func__, + (void*)psBufferUpdateSyncCheckpoint, + (void*)&psRenderContext->sSyncAddrList3DUpdate, + (void*)pauiClientPRUpdateUFOAddress)); + /* Attach update to the 3D (used for PR) Updates */ + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiClientPRUpdateUFOAddress) + { + pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32ClientPRUpdateCount++; + } + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: (after buffer_sync) ui32ClientTAFenceCount=%d, " + "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " + "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", + __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount, + ui32ClientPRUpdateCount)); + +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Buffer sync not supported but got %u buffers", + __func__, ui32SyncPMRCount)); + OSLockRelease(psRenderContext->hLock); + return PVRSRV_ERROR_INVALID_PARAMS; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* + * The hardware requires a PR to be submitted if there is a TA (otherwise + * it can wedge if we run out of PB space with no PR to run) + * + * If we only have a TA, attach native checks to the TA and updates to the PR + * If we have a TA and 3D, attach checks to TA, updates to 3D + * If we only have a 3D, attach checks and updates to the 3D + * + * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in + * addition to the update fence FD (if supplied) + * + * Currently, the client driver never kicks only the 3D, so we only support + * that for the time being. + */ + if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || + iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) + { + PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", + __func__, iCheckTAFence, iUpdateTATimeline)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d", + __func__, iCheck3DFence, iUpdate3DTimeline)); + + { + /* Create the output fence for TA (if required) */ + if (iUpdateTATimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointCreateFence[TA] " + "(iUpdateFence=%d, iUpdateTimeline=%d, " + "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", + __func__, iUpdateTAFence, iUpdateTATimeline, + (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, + szFenceNameTA, + iUpdateTATimeline, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateTAFence, + &uiUpdateTAFenceUID, + &pvTAUpdateFenceFinaliseData, + &psUpdateTASyncCheckpoint, + (void*)&psTAFenceTimelineUpdateSync, + &ui32TAFenceTimelineUpdateValue, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: SyncCheckpointCreateFence[TA] failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: returned from SyncCheckpointCreateFence[TA] " + "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " + "ui32FenceTimelineUpdateValue=0x%x)", + __func__, iUpdateTAFence, + (void*)psTAFenceTimelineUpdateSync, + ui32TAFenceTimelineUpdateValue)); + + /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ + pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", + __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); + } + + /* Append the sync prim update for the TA timeline (if required) */ + if (psTAFenceTimelineUpdateSync) + { + sTASyncData.ui32ClientUpdateCount = ui32ClientTAUpdateCount; + sTASyncData.ui32ClientUpdateValueCount = ui32ClientTAUpdateValueCount; + sTASyncData.ui32ClientPRUpdateValueCount = (bKick3D) ? 0 : ui32ClientPRUpdateValueCount; + sTASyncData.paui32ClientUpdateValue = paui32ClientTAUpdateValue; + + eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue, + &psRenderContext->sSyncAddrListTAUpdate, + (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate, + psTAFenceTimelineUpdateSync, + &sTASyncData, + bKick3D); + if (unlikely(eError != PVRSRV_OK)) + { + goto fail_alloc_update_values_mem_TA; + } + + paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue; + ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount; + pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress; + ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount; + } + + /* Create the output fence for 3D (if required) */ + if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointCreateFence[3D] " + "(iUpdateFence=%d, iUpdateTimeline=%d, " + "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", + __func__, iUpdate3DFence, iUpdate3DTimeline, + (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, + szFenceName3D, + iUpdate3DTimeline, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + &iUpdate3DFence, + &uiUpdate3DFenceUID, + &pv3DUpdateFenceFinaliseData, + &psUpdate3DSyncCheckpoint, + (void*)&ps3DFenceTimelineUpdateSync, + &ui323DFenceTimelineUpdateValue, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: SyncCheckpointCreateFence[3D] failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: returned from SyncCheckpointCreateFence[3D] " + "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " + "ui32FenceTimelineUpdateValue=0x%x)", + __func__, iUpdate3DFence, + (void*)ps3DFenceTimelineUpdateSync, + ui323DFenceTimelineUpdateValue)); + + /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ + pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", + __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); + } + + /* Append the sync prim update for the 3D timeline (if required) */ + if (ps3DFenceTimelineUpdateSync) + { + s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount; + s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount; + s3DSyncData.ui32ClientPRUpdateValueCount = ui32ClientPRUpdateValueCount; + s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue; + + eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue, + &psRenderContext->sSyncAddrList3DUpdate, + &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */ + ps3DFenceTimelineUpdateSync, + &s3DSyncData, + bKick3D); + if (unlikely(eError != PVRSRV_OK)) + { + goto fail_alloc_update_values_mem_3D; + } + + paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue; + ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount; + pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress; + ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount; + + if (!bKick3D) + { + paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue; + ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount; + pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress; + ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount; + } + } + + /* + * The hardware requires a PR to be submitted if there is a TA OOM. + * If we only have a TA, attach native checks and updates to the TA + * and 3D updates to the PR. + * If we have a TA and 3D, attach the native TA checks and updates + * to the TA and similarly for the 3D. + * Note that 'updates' includes the cleanup syncs for 'check' fence + * FDs, in addition to the update fence FD (if supplied). + * Currently, the client driver never kicks only the 3D, so we don't + * support that for the time being. + */ + + { + if (bKickTA) + { + /* Attach checks and updates to TA */ + + /* Checks (from input fence) */ + if (ui32FenceTASyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", + __func__, + ui32FenceTASyncCheckpointCount, + (void*)apsFenceTASyncCheckpoints)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, + ui32FenceTASyncCheckpointCount, + apsFenceTASyncCheckpoints); + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32ClientTAFenceCount was %d, now %d}", + __func__, ui32ClientTAFenceCount, + ui32ClientTAFenceCount + ui32FenceTASyncCheckpointCount)); + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32ClientTAFenceCount now %d}", + __func__, ui32ClientTAFenceCount)); + + if (psUpdateTASyncCheckpoint) + { + /* Update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint<%p> (ID=%d) to TA Update...", + __func__, (void*)psUpdateTASyncCheckpoint, + SyncCheckpointGetId(psUpdateTASyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate, + 1, + &psUpdateTASyncCheckpoint); + if (!pauiClientTAUpdateUFOAddress) + { + pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; + } + ui32ClientTAUpdateCount++; + } + + if (!bKick3D && psUpdate3DSyncCheckpoint) + { + /* Attach update to the 3D (used for PR) Updates */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...", + __func__, (void*)psUpdate3DSyncCheckpoint, + SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psUpdate3DSyncCheckpoint); + if (!pauiClientPRUpdateUFOAddress) + { + pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32ClientPRUpdateCount++; + } + } + + if (bKick3D) + { + /* Attach checks and updates to the 3D */ + + /* Checks (from input fence) */ + if (ui32Fence3DSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d sync checkpoints to 3D Fence...", + __func__, ui32Fence3DSyncCheckpointCount)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, + ui32Fence3DSyncCheckpointCount, + apsFence3DSyncCheckpoints); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32Client3DFenceCount was %d, now %d}", + __func__, ui32Client3DFenceCount, + ui32Client3DFenceCount + ui32Fence3DSyncCheckpointCount)); + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32Client3DFenceCount was %d}", + __func__, ui32Client3DFenceCount)); + + if (psUpdate3DSyncCheckpoint) + { + /* Update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", + __func__, (void*)psUpdate3DSyncCheckpoint, + SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psUpdate3DSyncCheckpoint); + if (!pauiClient3DUpdateUFOAddress) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32Client3DUpdateCount++; + } + } + + /* + * Relocate sync check points from the 3D fence that are + * external to the current process, to the TA fence. + * This avoids a sync lockup when dependent renders are + * submitted out-of-order and a PR must be scheduled. + */ + if (bKickTA) + { + /* Search for external timeline dependencies */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Checking 3D fence for external sync points (%d)...", + __func__, ui32Fence3DSyncCheckpointCount)); + + for (i=0; i (ID=%d) to TA Fence...", + __func__, (void*)apsFence3DSyncCheckpoints[i], + SyncCheckpointGetId(apsFence3DSyncCheckpoints[i]))); + + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, + 1, + &apsFence3DSyncCheckpoints[i]); + + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32ClientTAFenceCount was %d, now %d}", + __func__, + ui32ClientTAFenceCount, + ui32ClientTAFenceCount + 1)); + + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + + ui32ClientTAFenceCount++; + } + } + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: (after pvr_sync) ui32ClientTAFenceCount=%d, " + "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " + "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", + __func__, + ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount, + ui32ClientPRUpdateCount)); + } + } + + if (ui32ClientTAFenceCount) + { + PVR_ASSERT(pauiClientTAFenceUFOAddress); + if (!bTAFenceOnSyncCheckpointsOnly) + { + PVR_ASSERT(paui32ClientTAFenceValue); + } + } + if (ui32ClientTAUpdateCount) + { + PVR_ASSERT(pauiClientTAUpdateUFOAddress); + if (ui32ClientTAUpdateValueCount>0) + { + PVR_ASSERT(paui32ClientTAUpdateValue); + } + } + if (ui32Client3DFenceCount) + { + PVR_ASSERT(pauiClient3DFenceUFOAddress); + PVR_ASSERT(b3DFenceOnSyncCheckpointsOnly); + } + if (ui32Client3DUpdateCount) + { + PVR_ASSERT(pauiClient3DUpdateUFOAddress); + if (ui32Client3DUpdateValueCount>0) + { + PVR_ASSERT(paui32Client3DUpdateValue); + } + } + if (ui32ClientPRUpdateCount) + { + PVR_ASSERT(pauiClientPRUpdateUFOAddress); + if (ui32ClientPRUpdateValueCount>0) + { + PVR_ASSERT(paui32ClientPRUpdateValue); + } + } + + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ", + __func__, + ui32ClientTAFenceCount, + (void*)paui32ClientTAFenceValue)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ", + __func__, + ui32ClientTAUpdateCount, + (void*)pauiClientTAUpdateUFOAddress)); +#if (ENABLE_TA3D_UFO_DUMP == 1) + DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + ui32Client3DUpdateCount, + pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, + pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, + pauiClient3DFenceUFOAddress, NULL, + pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + + /* command size sanity check */ + + if (ui32TAFenceCount != ui32ClientTAFenceCount) + { + PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of fences" + " is different than the actual number (%u != %u)", + ui32TAFenceCount, ui32ClientTAFenceCount)); + } + if (ui32TAUpdateCount != ui32ClientTAUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui32TAUpdateCount, ui32ClientTAUpdateCount)); + } + if (!bTestSLRAdd3DCheck && (ui323DFenceCount != ui32Client3DFenceCount)) + { + PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of fences" + " is different than the actual number (%u != %u)", + ui323DFenceCount, ui32Client3DFenceCount)); + } + if (ui323DUpdateCount != ui32Client3DUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui323DUpdateCount, ui32Client3DUpdateCount)); + } + if (ui32PRUpdateCount != ui32ClientPRUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "PR pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui32PRUpdateCount, ui32ClientPRUpdateCount)); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (bKickTA || bKick3D || bAbort) + { + sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize; + sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls; + sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices = ui32NumberOfIndices; + sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs = ui32NumberOfMRTs; + } +#endif + + /* Init and acquire to TA command if required */ + if (bKickTA) + { + RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, + RGXFWIF_CCB_CMD_TYPE_GEOM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTA); +#endif + + /* Init the TA command helper */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", + __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext), + ui32ClientTAFenceCount, + pauiClientTAFenceUFOAddress, + paui32ClientTAFenceValue, + ui32ClientTAUpdateCount, + pauiClientTAUpdateUFOAddress, + paui32ClientTAUpdateValue, + ui32TACmdSize, + pui8TADMCmd, + RGXFWIF_CCB_CMD_TYPE_GEOM, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataTA, +#else + NULL, +#endif + "TA", + bCCBStateOpen, + pasTACmdHelperData); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); +#endif + + eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taacquirecmd; + } + else + { + ui32TACmdCount++; + } + } + + /* Only kick the 3D if required */ + if (bKickPR) + { + RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; + + /* + The command helper doesn't know about the PR fence so create + the command with all the fences against it and later create + the PR command itself which _must_ come after the PR fence. + */ + sPRUFO.puiAddrUFO = uiPRFenceUFOAddress; + sPRUFO.ui32Value = ui32PRFenceValue; + + /* Init the PR fence command helper */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", + __func__, ui32Client3DFenceCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + pauiClient3DFenceUFOAddress, + NULL, + 0, + NULL, + NULL, + sizeof(sPRUFO), + (IMG_UINT8*) &sPRUFO, + RGXFWIF_CCB_CMD_TYPE_FENCE_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR-Fence", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + + /* Init the 3D PR command helper */ + /* + Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update + if no 3D is present. This is so the timeline update cannot happen out of order with any + other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB). + This out of order timeline sync prim update could happen if we attach it to the TA update. + */ + if (ui32ClientPRUpdateCount) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Line %d, ui32ClientPRUpdateCount=%d, " + "pauiClientPRUpdateUFOAddress=0x%x, " + "ui32ClientPRUpdateValueCount=%d, " + "paui32ClientPRUpdateValue=0x%x", + __func__, __LINE__, ui32ClientPRUpdateCount, + pauiClientPRUpdateUFOAddress->ui32Addr, + ui32ClientPRUpdateValueCount, + (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue)); + } + + if (!bUseCombined3DAnd3DPR) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", + __func__, ui32ClientPRUpdateCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + 0, + NULL, + NULL, + ui32ClientPRUpdateCount, + pauiClientPRUpdateUFOAddress, + paui32ClientPRUpdateValue, + pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead + pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd, + RGXFWIF_CCB_CMD_TYPE_3D_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + } + } + + if (bKick3D || bAbort) + { + RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, + RGXFWIF_CCB_CMD_TYPE_3D, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickData3D); +#endif + + /* Init the 3D command helper */ + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ + bKickTA ? NULL : pauiClient3DFenceUFOAddress, + NULL, + ui32Client3DUpdateCount, + pauiClient3DUpdateUFOAddress, + paui32Client3DUpdateValue, + ui323DCmdSize, + pui83DDMCmd, + RGXFWIF_CCB_CMD_TYPE_3D, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickData3D, +#else + NULL, +#endif + "3D", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following are used to determine the offset of the command header containing the workload estimation + data so that can be accessed when the KCCB is read */ + ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); + ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); +#endif + } + + /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */ + if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); + goto fail_3dcmdinit; + } + + if (ui323DCmdCount) + { + PVR_ASSERT(bKickPR || bKick3D); + + /* Acquire space for all the 3D command(s) */ + eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData); + if (unlikely(eError != PVRSRV_OK)) + { + /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling + * of a new TA command with the same Write offset in Kernel CCB. + */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); + goto fail_3dacquirecmd; + } + } + + /* + We should acquire the space in the kernel CCB here as after this point + we release the commands which will take operations on server syncs + which can't be undone + */ + + /* + Everything is ready to go now, release the commands + */ + if (ui32TACmdCount) + { + ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui32TACmdCount, + pasTACmdHelperData, + "TA", + FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and therefore would start at an + offset of 0 rather than the current command offset */ + if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) + { + ui32TACommandOffset = ui32TACmdOffset; + } + else + { + ui32TACommandOffset = 0; + } +#endif + } + + if (ui323DCmdCount) + { + ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, + pas3DCmdHelperData, + "3D", + FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + + if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) + { + ui323DCommandOffset = ui323DCmdOffset; + } + else + { + ui323DCommandOffset = 0; + } +#endif + } + + if (ui32TACmdCount) + { + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext); + + sTACmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext); + sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ + sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; +#else + sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, + &sTACmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_GEOM, + bKickTA, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taattachcleanupctls; + } + + + HTBLOGK(HTB_SF_MAIN_KICK_TA, + sTACmdKickData.psContext, + ui32TACmdOffset + ); + + RGXSRV_HWPERF_ENQ(psRenderContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TA, + iCheckTAFence, + iUpdateTAFence, + iUpdateTATimeline, + uiCheckTAFenceUID, + uiUpdateTAFenceUID, + ui64DeadlineInus, + WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA)); + + if (!bUseSingleFWCommand) + { + /* Construct the kernel TA CCB command. */ + RGXFWIF_KCCB_CMD sTAKCCBCmd; + sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + RGXFWIF_DM_GEOM, + &sTAKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + + PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TA3D); + } + + if (ui323DCmdCount) + { + RGXFWIF_KCCB_CMD s3DKCCBCmd = { 0 }; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext); + + s3DCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); + s3DCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + s3DCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; +#else + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, + &s3DCmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_3D, + bKick3D, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dattachcleanupctls; + } + + HTBLOGK(HTB_SF_MAIN_KICK_3D, + s3DCmdKickData.psContext, + ui323DCmdOffset); + + RGXSRV_HWPERF_ENQ(psRenderContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_3D, + iCheck3DFence, + iUpdate3DFence, + iUpdate3DTimeline, + uiCheck3DFenceUID, + uiUpdate3DFenceUID, + ui64DeadlineInus, + WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D)); + + if (bUseSingleFWCommand) + { + /* Construct the kernel TA/3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK; + s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.sTACmdKickData = sTACmdKickData; + s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.s3DCmdKickData = s3DCmdKickData; + } + else + { + /* Construct the kernel 3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData; + } + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + RGXFWIF_DM_3D, + &s3DKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (unlikely(eError != PVRSRV_OK )) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dacquirecmd; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateTASyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x", + __func__, (void*)psUpdateTASyncCheckpoint, + SyncCheckpointGetId(psUpdateTASyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint); + } + if (psTAFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Updating NOHW sync prim [TA] <%p> to %d", + __func__, (void*)psTAFenceTimelineUpdateSync, + ui32TAFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue); + } + + if (psUpdate3DSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x", + __func__, (void*)psUpdate3DSyncCheckpoint, + SyncCheckpointGetId(psUpdate3DSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint); + } + if (ps3DFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Updating NOHW sync prim [3D] <%p> to %d", + __func__, (void*)ps3DFenceTimelineUpdateSync, + ui323DFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); + +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...", + __func__, (void*)psBufferSyncData)); + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + if (piUpdateTAFence) + { + *piUpdateTAFence = iUpdateTAFence; + } + if (piUpdate3DFence) + { + *piUpdate3DFence = iUpdate3DFence; + } + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence. + * NOTE: 3D fence is always submitted, either via 3D or TA(PR). + */ + if (bKickTA) + { + SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); + } + SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); + + if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence, + pvTAUpdateFenceFinaliseData, + psUpdateTASyncCheckpoint, szFenceNameTA); + } + if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence, + pv3DUpdateFenceFinaliseData, + psUpdate3DSyncCheckpoint, szFenceName3D); + } + + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceTASyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); + } + if (apsFence3DSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); + } + + if (sTASyncData.paui32ClientUpdateValue) + { + OSFreeMem(sTASyncData.paui32ClientUpdateValue); + } + if (s3DSyncData.paui32ClientUpdateValue) + { + OSFreeMem(s3DSyncData.paui32ClientUpdateValue); + } + +#if defined(SUPPORT_VALIDATION) + if (bTestSLRAdd3DCheck) + { + SyncCheckpointFree(psDummySyncCheckpoint); + } +#endif + OSLockRelease(psRenderContext->hLock); + + return PVRSRV_OK; + +fail_3dattachcleanupctls: +fail_taattachcleanupctls: +fail_3dacquirecmd: +fail_3dcmdinit: +fail_taacquirecmd: + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate); + /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list. + * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what + * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the + * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate. + */ + if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs)) + { + SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr); + } + +fail_alloc_update_values_mem_3D: +fail_alloc_update_values_mem_TA: + if (iUpdateTAFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData); + } + if (iUpdate3DFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence. + * NOTE: 3D fence is always submitted, either via 3D or TA(PR). + */ + if (bKickTA) + { + SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); + } + SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); + +err_pr_fence_address: +err_populate_sync_addr_list_3d_update: +err_populate_sync_addr_list_3d_fence: +err_populate_sync_addr_list_ta_update: +err_populate_sync_addr_list_ta_fence: +err_not_enough_space: +fail_resolve_input_fence: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceTASyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); + } + if (apsFence3DSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); + } + if (sTASyncData.paui32ClientUpdateValue) + { + OSFreeMem(sTASyncData.paui32ClientUpdateValue); + } + if (s3DSyncData.paui32ClientUpdateValue) + { + OSFreeMem(s3DSyncData.paui32ClientUpdateValue); + } +#if defined(SUPPORT_VALIDATION) + if (bTestSLRAdd3DCheck) + { + SyncCheckpointFree(psDummySyncCheckpoint); + } +#endif +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_ASSERT(eError != PVRSRV_OK); + OSLockRelease(psRenderContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + OSLockAcquire(psRenderContext->hLock); + + if (psRenderContext->sTAData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext, + psConnection, + psRenderContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_GEOM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set the priority of the TA part of the rendercontext (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto fail_tacontext; + } + psRenderContext->sTAData.ui32Priority = ui32Priority; + } + + if (psRenderContext->s3DData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext, + psConnection, + psRenderContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_3D); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto fail_3dcontext; + } + psRenderContext->s3DData.ui32Priority = ui32Priority; + } + + OSLockRelease(psRenderContext->hLock); + return PVRSRV_OK; + +fail_3dcontext: +fail_tacontext: + OSLockRelease(psRenderContext->hLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2 = PVRSRV_OK; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psRenderContext->hLock); + eError = FWCommonContextSetFlags(psRenderContext->sTAData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + eError2 = FWCommonContextSetFlags(psRenderContext->s3DData.psServerCommonContext, + (IMG_UINT32)ui64Input); + } + OSLockRelease(psRenderContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags eError"); + PVR_LOG_IF_ERROR(eError2, "FWCommonContextSetFlags eError2"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + + +/* + * PVRSRVRGXGetLastRenderContextResetReasonKM + */ +PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef) +{ + RGX_SERVER_RC_TA_DATA *psRenderCtxTAData; + RGX_SERVER_RC_3D_DATA *psRenderCtx3DData; + RGX_SERVER_COMMON_CONTEXT *psCurrentServerTACommonCtx, *psCurrentServer3DCommonCtx; + RGXFWIF_CONTEXT_RESET_REASON eLastTAResetReason, eLast3DResetReason; + IMG_UINT32 ui32LastTAResetJobRef, ui32Last3DResetJobRef; + + PVR_ASSERT(psRenderContext != NULL); + PVR_ASSERT(peLastResetReason != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + psRenderCtxTAData = &(psRenderContext->sTAData); + psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext; + psRenderCtx3DData = &(psRenderContext->s3DData); + psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext; + + /* Get the last reset reasons from both the TA and 3D so they are reset... */ + eLastTAResetReason = FWCommonContextGetLastResetReason(psCurrentServerTACommonCtx, &ui32LastTAResetJobRef); + eLast3DResetReason = FWCommonContextGetLastResetReason(psCurrentServer3DCommonCtx, &ui32Last3DResetJobRef); + + /* Combine the reset reason from TA and 3D into one... */ + *peLastResetReason = (IMG_UINT32) eLast3DResetReason; + *pui32LastResetJobRef = ui32Last3DResetJobRef; + if (eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_NONE || + ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP || + eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING) && + (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP || + eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING)) || + ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP || + eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING) && + (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH))) + { + *peLastResetReason = eLastTAResetReason; + *pui32LastResetJobRef = ui32LastTAResetJobRef; + } + + return PVRSRV_OK; +} + +void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); + dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) + { + RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + + DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); +} + +IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); + + dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) + { + RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA; + } + } + + if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); + return ui32ContextBitMask; +} + +/* + * RGXRenderContextStalledKM + */ +PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) +{ + RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE); + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxta3d.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.h new file mode 100644 index 000000000000..6f56a22719fb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxta3d.h @@ -0,0 +1,508 @@ +/*************************************************************************/ /*! +@File +@Title RGX TA and 3D Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX TA and 3D Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXTA3D_H +#define RGXTA3D_H + +#include "devicemem.h" +#include "devicemem_server.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgx_fwif_shared.h" +#include "rgx_fwif_resetframework.h" +#include "sync_server.h" +#include "connection_server.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT; +typedef struct _RGX_FREELIST_ RGX_FREELIST; +typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE; + +/***************************************************************************** + * The Design of Data Storage System for Render Targets * + * ==================================================== * + * Relevant for * + * understanding RGXCreateHWRTDataSet & RGXDestroyHWRTDataSet * + * * + * * + * +=========================================+ * + * | RenderTargetDataSet | * + * +---------------|---------|---------------+ * + * | | * + * V V * + * +- - - - - - - - - - - - + +- - - - - - - - - - - - + * + * | KM_HW_RT_DATA_HANDLE_0 | | KM_HW_RT_DATA_HANDLE_1 | * + * +- - -|- - - - - - - - - + +- - - - - - - - - | - - + * + * | | * + * | | [UM]Client * + * ------|-----------------------------------------|----------------------- * + * | | Bridge * + * ------|-----------------------------------------|----------------------- * + * | | [KM]Server * + * | | * + * | KM-ptr | KM-ptr * + * V V * + * +====================+ +====================+ * + * | KM_HW_RT_DATA_0 | | KM_HW_RT_DATA_1 | * + * +-----|------------|-+ +-|------------|-----+ * + * | | | | * + * | | | | * + * | | | | * + * | | | | * + * | | KM-ptr | KM-ptr | * + * | V V | * + * | +==========================+ | * + * | | HW_RT_DATA_COMMON_COOKIE | | * + * | +--------------------------+ | * + * | | | * + * | | | * + * ------|-------------------|---------------------|----------------------- * + * | | | [FW]Firmware * + * | | | * + * | FW-addr | | FW-addr * + * V | V * + * +===============+ | +===============+ * + * | HW_RT_DATA_0 | | | HW_RT_DATA_1 | * + * +------------|--+ | +--|------------+ * + * | | | * + * | FW-addr | FW-addr | FW-addr * + * V V V * + * +=========================================+ * + * | HW_RT_DATA_COMMON | * + * +-----------------------------------------+ * + * * + *****************************************************************************/ + +typedef struct _RGX_HWRTDATA_COMMON_COOKIE_ +{ + DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + IMG_UINT32 ui32RefCount; + +} RGX_HWRTDATA_COMMON_COOKIE; + +typedef struct _RGX_KM_HW_RT_DATASET_ +{ + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; + + PVRSRV_DEVICE_NODE *psDeviceNode; + RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr; + + DEVMEM_MEMDESC *psHWRTDataFwMemDesc; + DEVMEM_MEMDESC *psRTArrayFwMemDesc; + DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc; + + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS]; +#if !defined(SUPPORT_SHADOW_FREELISTS) + DLLIST_NODE sNodeHWRTData; +#endif + +} RGX_KM_HW_RT_DATASET; + +struct _RGX_FREELIST_ { + PVRSRV_RGXDEV_INFO *psDevInfo; + + /* Free list PMR */ + PMR *psFreeListPMR; + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset; + + /* Freelist config */ + IMG_UINT32 ui32MaxFLPages; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32CurrentFLPages; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32ReadyFLPages; + IMG_UINT32 ui32GrowThreshold; /* Percentage of FL memory used that should trigger a new grow request */ + IMG_UINT32 ui32FreelistID; + IMG_UINT32 ui32FreelistGlobalID; /* related global freelist for this freelist */ + IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */ + IMG_BOOL bCheckFreelist; /* freelist check enabled */ + IMG_UINT32 ui32RefCount; /* freelist reference counting */ + + IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application */ + IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */ + IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */ + + IMG_PID ownerPid; /* Pid of the owner of the list */ + + /* Memory Blocks */ + DLLIST_NODE sMemoryBlockHead; + DLLIST_NODE sMemoryBlockInitHead; + DLLIST_NODE sNode; +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* HWRTData nodes linked to local freelist */ + DLLIST_NODE sNodeHWRTDataHead; +#endif + + /* FW data structures */ + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; +}; + +struct _RGX_PMR_NODE_ { + RGX_FREELIST *psFreeList; + PMR *psPMR; + PMR_PAGELIST *psPageList; + DLLIST_NODE sMemoryBlock; + IMG_UINT32 ui32NumPages; + IMG_BOOL bFirstPageMissing; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RI_HANDLE hRIHandle; +#endif +}; + +typedef struct { + PVRSRV_RGXDEV_INFO *psDevInfo; + DEVMEM_MEMDESC *psFWZSBufferMemDesc; + RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; + + DEVMEMINT_RESERVATION *psReservation; + PMR *psPMR; + DEVMEMINT_MAPPING *psMapping; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; + IMG_UINT32 ui32ZSBufferID; + IMG_UINT32 ui32RefCount; + IMG_BOOL bOnDemand; + + IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */ + IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */ + + IMG_PID owner; + + DLLIST_NODE sNode; +}RGX_ZSBUFFER_DATA; + +typedef struct { + RGX_ZSBUFFER_DATA *psZSBuffer; +} RGX_POPULATION; + +/* Dump the physical pages of a freelist */ +IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList); + + +/* Create set of HWRTData(s) */ +PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR psPMMListDevVAddr_0, + IMG_DEV_VIRTADDR psPMMListDevVAddr_1, + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64MultiSampleCtl, + IMG_UINT64 ui64FlippedMultiSampleCtl, + IMG_UINT32 ui32TPCStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32MTileStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr_0, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr_1, + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr_0, + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr_1, + IMG_DEV_VIRTADDR sRTCDevVAddr, + IMG_UINT64 uiRgnHeaderSize, + IMG_UINT32 ui32ISPMtileSize, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet_0, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet_1); + +/* Destroy HWRTDataSet */ +PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet); + +/* + RGXCreateZSBufferKM +*/ +PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + RGX_ZSBUFFER_DATA **ppsZSBuffer); + +/* + RGXDestroyZSBufferKM +*/ +PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer); + + +/* + * RGXBackingZSBuffer() + * + * Backs ZS-Buffer with physical pages + */ +PVRSRV_ERROR +RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); + +/* + * RGXPopulateZSBufferKM() + * + * Backs ZS-Buffer with physical pages (called by Bridge calls) + */ +PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_POPULATION **ppsPopulation); + +/* + * RGXUnbackingZSBuffer() + * + * Frees ZS-Buffer's physical pages + */ +PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); + +/* + * RGXUnpopulateZSBufferKM() + * + * Frees ZS-Buffer's physical pages (called by Bridge calls) + */ +PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation); + +/* + RGXProcessRequestZSBufferBacking +*/ +void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID); + +/* + RGXProcessRequestZSBufferUnbacking +*/ +void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID); + +/* + RGXGrowFreeList +*/ +PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumPages, + PDLLIST_NODE pListHeader); + +/* Create free list */ +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + IMG_DEV_VIRTADDR sFreeListDevVAddr, + PMR *psFreeListPMR, + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, + RGX_FREELIST **ppsFreeList); + +/* Destroy free list */ +PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList); + +/* + RGXProcessRequestGrow +*/ +void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistID); + + +/* Reconstruct free list after Hardware Recovery */ +void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistsCount, + IMG_UINT32 *paui32Freelists); + +/*! +******************************************************************************* + + @Function PVRSRVRGXCreateRenderContextKM + + @Description + Server-side implementation of RGXCreateRenderContext + + @Input psConnection - + @Input psDeviceNode - device node + @Input ui32Priority - context priority + @Input sVDMCallStackAddr - VDM call stack device virtual address + @Input ui32FrameworkCommandSize - framework command size + @Input pabyFrameworkCommand - ptr to framework command + @Input hMemCtxPrivData - memory context private data + @Input ui32StaticRenderContextStateSize - size of fixed render state + @Input pStaticRenderContextState - ptr to fixed render state buffer + @Input ui32PackedCCBSizeU8888 : + ui8TACCBAllocSizeLog2 - TA CCB size + ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow + ui83DCCBAllocSizeLog2 - 3D CCB size + ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow + @Input ui32ContextFlags - flags which specify properties of the context + @Output ppsRenderContext - + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_DEV_VIRTADDR sVDMCallStackAddr, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXDestroyRenderContextKM + + @Description + Server-side implementation of RGXDestroyRenderContext + + @Input psRenderContext - + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXKickTA3DKM + + @Description + Server-side implementation of RGXKickTA3D + + @Input psRTDataCleanup - RT data associated with the kick (or NULL) + @Input psZBuffer - Z-buffer associated with the kick (or NULL) + @Input psSBuffer - S-buffer associated with the kick (or NULL) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientTAFenceCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, + IMG_UINT32 *paui32ClientTAFenceSyncOffset, + IMG_UINT32 *paui32ClientTAFenceValue, + IMG_UINT32 ui32ClientTAUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClientUpdateSyncPrimBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientTAUpdateValue, + IMG_UINT32 ui32Client3DUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, + IMG_UINT32 *paui32Client3DUpdateSyncOffset, + IMG_UINT32 *paui32Client3DUpdateValue, + SYNC_PRIMITIVE_BLOCK *psPRSyncPrimBlock, + IMG_UINT32 ui32PRSyncOffset, + IMG_UINT32 ui32PRFenceValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR szFenceName[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iCheckFence3D, + PVRSRV_TIMELINE iUpdateTimeline3D, + PVRSRV_FENCE *piUpdateFence3D, + IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32TACmdSize, + IMG_PBYTE pui8TADMCmd, + IMG_UINT32 ui323DPRCmdSize, + IMG_PBYTE pui83DPRDMCmd, + IMG_UINT32 ui323DCmdSize, + IMG_PBYTE pui83DDMCmd, + IMG_UINT32 ui32ExtJobRef, + IMG_BOOL bKickTA, + IMG_BOOL bKickPR, + IMG_BOOL bKick3D, + IMG_BOOL bAbort, + IMG_UINT32 ui32PDumpFlags, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32RenderTargetSize, + IMG_UINT32 ui32NumberOfDrawCalls, + IMG_UINT32 ui32NumberOfIndices, + IMG_UINT32 ui32NumberOfMRTs, + IMG_UINT64 ui64DeadlineInus); + + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef); + +/* Debug - Dump debug info of render contexts on this device */ +void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client contexts are stalled */ +IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); + +#endif /* RGXTA3D_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.c new file mode 100644 index 000000000000..88aa1afcf898 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.c @@ -0,0 +1,1297 @@ +/*************************************************************************/ /*! +@File rgxtdmtransfer.c +@Title Device specific TDM transfer queue routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdump_km.h" +#include "rgxdevice.h" +#include "rgxccb.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxtdmtransfer.h" +#include "rgx_tq_shared.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "rgx_fwif_resetframework.h" +#include "rgx_memallocflags.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" +#include "rgxshader.h" + +#include "pdump_km.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TDM_UFO_DUMP 0 + +//#define TDM_CHECKPOINT_DEBUG 1 + +#if defined(TDM_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +typedef struct { + RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; + IMG_UINT32 ui32Priority; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +} RGX_SERVER_TQ_TDM_DATA; + + +struct _RGX_SERVER_TQ_TDM_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + DEVMEM_MEMDESC *psFWTransferContextMemDesc; + IMG_UINT32 ui32Flags; + RGX_SERVER_TQ_TDM_DATA sTDMData; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +}; + +static PVRSRV_ERROR _CreateTDMTransferContext( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + DEVMEM_MEMDESC * psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC * psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + RGX_COMMON_CONTEXT_INFO * psInfo, + RGX_SERVER_TQ_TDM_DATA * psTDMData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_BUFFER_SYNC) + psTDMData->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-tdm"); + if (IS_ERR(psTDMData->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psTDMData->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + eError = FWCommonContextAllocate( + psConnection, + psDeviceNode, + REQ_TYPE_TQ_TDM, + RGXFWIF_DM_TDM, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, /* max deadline MS */ + 0, /* robustness address */ + psInfo, + &psTDMData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + psTDMData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_contextalloc: +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); + psTDMData->psBufferSyncContext = NULL; +fail_buffer_sync_context_create: +#endif + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR _DestroyTDMTransferContext( + RGX_SERVER_TQ_TDM_DATA * psTDMData, + PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp( + psDeviceNode, + psTDMData->psServerCommonContext, + RGXFWIF_DM_TDM, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free it's resources */ + FWCommonContextFree(psTDMData->psServerCommonContext); + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); + psTDMData->psBufferSyncContext = NULL; +#endif + + return PVRSRV_OK; +} + +/* + * PVRSRVCreateTransferContextKM + */ +PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext) +{ + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext; + + DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Allocate the server side structure */ + *ppsTransferContext = NULL; + psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); + if (psTransferContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + Create the FW transfer context, this has the TDM common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWTDMCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTransferContext", + &psTransferContext->psFWTransferContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwtransfercontext; + } + + eError = OSLockCreate(&psTransferContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_lockcreate; + } + + psTransferContext->psDeviceNode = psDeviceNode; + + /* + * Create the FW framework buffer + */ + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psTransferContext->psFWFrameworkMemDesc, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc, + pabyFrameworkCommand, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + + sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; + + eError = _CreateTDMTransferContext(psConnection, + psDeviceNode, + psTransferContext->psFWTransferContextMemDesc, + offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), + psFWMemContextMemDesc, + ui32Priority, + &sInfo, + &psTransferContext->sTDMData, + U32toU8_Unpack1(ui32PackedCCBSizeU88), + U32toU8_Unpack2(ui32PackedCCBSizeU88), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_tdmtransfercontext; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData); + } +#endif + + SyncAddrListInit(&psTransferContext->sSyncAddrListFence); + SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate); + + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + *ppsTransferContext = psTransferContext; + + return PVRSRV_OK; + +fail_tdmtransfercontext: +fail_frameworkcopy: + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); +fail_frameworkcreate: + OSLockDestroy(psTransferContext->hLock); +fail_lockcreate: + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); +fail_fwtransfercontext: + OSFreeMem(psTransferContext); + PVR_ASSERT(eError != PVRSRV_OK); + *ppsTransferContext = NULL; + return eError; +} + +PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem) +{ + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem) +{ + PVR_UNREFERENCED_PARAMETER(psPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWTDMCONTEXT *psFWTransferContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc, + (void **)&psFWTransferContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware transfer context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)); + + return PVRSRV_ERROR_RETRY; + } + } +#endif + + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_remove_node(&(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + + + eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData, + psTransferContext->psDeviceNode); + if (eError != PVRSRV_OK) + { + goto fail_destroyTDM; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData); + } +#endif + + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); + + SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence); + SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate); + + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); + + OSLockDestroy(psTransferContext->hLock); + + OSFreeMem(psTransferContext); + + return PVRSRV_OK; + +fail_destroyTDM: + + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + * PVRSRVSubmitTQ3DKickKM + */ +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * paui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; + RGX_CCB_CMD_HELPER_DATA *psCmdHelper; + PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 ui32IntClientFenceCount = 0; + IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue; + IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + + IMG_UINT32 ui32CmdOffset = 0; + IMG_BOOL bCCBStateOpen; + + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0}; + IMG_UINT32 ui32TDMWorkloadDataRO = 0; + IMG_UINT32 ui32TDMCmdHeaderOffset = 0; + IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; +#endif + + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if !defined(SUPPORT_WORKLOAD_ESTIMATION) + PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1); + PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2); + PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus); +#endif + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Ensure the string is null-terminated (Required for safety) */ + szUpdateFenceName[31] = '\0'; + + if (ui32SyncPMRCount != 0) + { + if (!ppsSyncPMRs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + OSLockAcquire(psTransferContext->hLock); + + /* We can't allocate the required amount of stack space on all consumer architectures */ + psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA)); + if (psCmdHelper == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_allochelper; + } + + + /* + Init the command helper commands for all the prepares + */ + { + IMG_CHAR *pszCommandName; + RGXFWIF_CCB_CMD_TYPE eType; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif + + pszCommandName = "TQ-TDM"; + + if (ui32FWCommandSize == 0) + { + /* A NULL CMD for TDM is used to append updates to a non finished + * FW command. bCCBStateOpen is used in case capture range is + * entered on this command, to not drain CCB up to the Roff for this + * command, but the finished command prior to this. + */ + bCCBStateOpen = IMG_TRUE; + eType = RGXFWIF_CCB_CMD_TYPE_NULL; + } + else + { + bCCBStateOpen = IMG_FALSE; + eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM; + } +#if defined(SUPPORT_BUFFER_SYNC) + psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext; +#endif + + eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto fail_populate_sync_addr_list; + } + + eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset); + if (eError != PVRSRV_OK) + { + goto fail_populate_sync_addr_list; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + + + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); + err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData); + if (err) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_resolve_input_fence; + } + + /* Append buffer sync fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; + } +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_populate_sync_addr_list; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto fail_resolve_input_fence; + } +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 ii; + for (ii=0; ii<32; ii++) + { + PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii); + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii])); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode, + szUpdateFenceName, + iUpdateTimeline, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto fail_create_output_fence; + } + + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + if (paui32IntUpdateValue) + { + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); + } + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the transfer context update list */ + SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + if (ui32FenceSyncCheckpointCount) + { + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence)); +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + +#if (ENABLE_TDM_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1; + sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2; + + /* Prepare workload estimation */ + WorkEstPrepare(psDeviceNode->pvDevice, + &psTransferContext->sWorkEstData, + &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM, + eType, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTransfer); + } +#endif + + /* + Create the command helper data for this command + */ + RGXCmdHelperInitCmdCCB(psClientCCB, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32FWCommandSize, + pui8FWCommand, + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataTransfer, +#else /* SUPPORT_WORKLOAD_ESTIMATION */ + NULL, +#endif /* SUPPORT_WORKLOAD_ESTIMATION */ + pszCommandName, + bCCBStateOpen, + psCmdHelper); + } + + /* + Acquire space for all the commands in one go + */ + + eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper); + if (eError != PVRSRV_OK) + { + goto fail_3dcmdacquire; + } + + + /* + We should acquire the kernel CCB(s) space here as the schedule could fail + and we would have to roll back all the syncs + */ + + /* + Only do the command helper release (which takes the server sync + operations if the acquire succeeded + */ + ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(1, + psCmdHelper, + "TQ_TDM", + FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr); + + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper); + + ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck) + { + ui32TDMWorkloadDataRO = ui32CmdOffset; + } + else + { + ui32TDMWorkloadDataRO = 0; + } + } +#endif + + /* + Even if we failed to acquire the client CCB space we might still need + to kick the HW to process a padding packet to release space for us next + time round + */ + { + RGXFWIF_KCCB_CMD sTDMKCCBCmd; + IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress( + psTransferContext->sTDMData.psServerCommonContext).ui32Addr; + + /* Construct the kernel 3D CCB command. */ + sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + /* Add the Workload data into the KCCB kick */ + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset; + } +#endif + + /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */ + /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */ + /* ui323DCmdOffset); */ + RGXSRV_HWPERF_ENQ(psTransferContext, + OSGetCurrentClientProcessIDKM(), + FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TQTDM, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_TDM, + & sTDMKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef, + ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_2dcmdacquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + * piUpdateFence = iUpdateFence; + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, szUpdateFenceName); + } + + OSFreeMem(psCmdHelper); + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; + +/* + No resources are created in this function so there is nothing to free + unless we had to merge syncs. + If we fail after the client CCB acquire there is still nothing to do + as only the client CCB release will modify the client CCB +*/ +fail_2dcmdacquire: +fail_3dcmdacquire: + + SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + +/* fail_pdumpcheck: */ +/* fail_cmdtype: */ + + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + +fail_resolve_input_fence: + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +fail_populate_sync_addr_list: + PVR_ASSERT(eError != PVRSRV_OK); + OSFreeMem(psCmdHelper); +fail_allochelper: + + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + OSLockRelease(psTransferContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32PDumpFlags) +{ + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + OSLockAcquire(psTransferContext->hLock); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; + sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice, + RGXFWIF_DM_TDM, + &sKCCBCmd, + 0, + ui32PDumpFlags); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, eError, PVRSRVGETERRORSTRING(eError))); + } + + OSLockRelease(psTransferContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + OSLockAcquire(psTransferContext->hLock); + + if (psTransferContext->sTDMData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext, + psConnection, + psTransferContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_TDM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError))); + + OSLockRelease(psTransferContext->hLock); + return eError; + } + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psTransferContext->hLock); + eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + psTransferContext->ui32Flags = (IMG_UINT32)ui64Input; + } + OSLockRelease(psTransferContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); + + dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); + + DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + + OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); +} + + +IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); + + dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); + + if (CheckStalledClientCommonContext( + psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D) + == PVRSRV_ERROR_CCCB_STALLED) { + ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D; + } + } + + OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); + return ui32ContextBitMask; +} + +/**************************************************************************//** + End of file (rgxtdmtransfer.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.h new file mode 100644 index 000000000000..6bd1413e408b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtdmtransfer.h @@ -0,0 +1,132 @@ +/*************************************************************************/ /*! +@File rgxtdmtransfer.h +@Title RGX Transfer queue 2 Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX Transfer queue Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXTDMTRANSFER_H__) +#define __RGXTDMTRANSFER_H__ + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgxfwutils.h" +#include "rgx_fwif_resetframework.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "connection_server.h" + +typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT; + + +PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext); + + +PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem); + + +PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psUSCPMRMem); + + +PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext); + + +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * pui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus); + +PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of TDM transfer contexts on this device */ +void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client transfer contexts are stalled */ +IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + + +#endif /* __RGXTDMTRANSFER_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.c new file mode 100644 index 000000000000..0e91f6c4daa1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.c @@ -0,0 +1,1647 @@ +/*************************************************************************/ /*! +@File +@Title Device specific transfer queue routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdump_km.h" +#include "rgxdevice.h" +#include "rgxccb.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxtransfer.h" +#include "rgx_tq_shared.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "rgx_fwif_resetframework.h" +#include "rgx_memallocflags.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" +#include "rgxshader.h" + +#include "pdump_km.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "rgx_bvnc_defs_km.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TQ_UFO_DUMP 0 + +//#define TRANSFER_CHECKPOINT_DEBUG 1 + +#if defined(TRANSFER_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +typedef struct { + DEVMEM_MEMDESC *psFWContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +} RGX_SERVER_TQ_3D_DATA; + +typedef struct { + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +} RGX_SERVER_TQ_2D_DATA; + +struct _RGX_SERVER_TQ_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + IMG_UINT32 ui32Flags; +#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D (1<<0) +#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D (1<<1) + RGX_SERVER_TQ_3D_DATA s3DData; + RGX_SERVER_TQ_2D_DATA s2DData; + DLLIST_NODE sListNode; + ATOMIC_T hIntJobRef; + IMG_UINT32 ui32PDumpFlags; + /* per-prepare sync address lists */ + SYNC_ADDR_LIST asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT]; + SYNC_ADDR_LIST asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT]; + POS_LOCK hLock; +}; + +/* + Static functions used by transfer context code +*/ +static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_TQ_3D_DATA *ps3DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_UINT ui3DRegISPStateStoreSize = 0; + IMG_UINT uiNumISPStoreRegs = 1; /* default value 1 expected */ + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state"); + + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); + } + + /* Calculate the size of the 3DCTX ISP state */ + ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + + uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]); + +#if defined(SUPPORT_BUFFER_SYNC) + ps3DData->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-tq3d"); + if (IS_ERR(ps3DData->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(ps3DData->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + eError = DevmemFwAllocate(psDevInfo, + ui3DRegISPStateStoreSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTQ3DContext", + &ps3DData->psFWContextStateMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_contextswitchstate; + } + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_TQ_3D, + RGXFWIF_DM_3D, + NULL, + 0, + psFWMemContextMemDesc, + ps3DData->psFWContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ3D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ3D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, /* max deadline MS */ + 0, /* robustness address */ + psInfo, + &ps3DData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + + PDUMPCOMMENT("Dump 3D context suspend state buffer"); + DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS); + + ps3DData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_contextalloc: + DevmemFwUnmapAndFree(psDevInfo, ps3DData->psFWContextStateMemDesc); +fail_contextswitchstate: +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); + ps3DData->psBufferSyncContext = NULL; +fail_buffer_sync_context_create: +#endif + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_TQ_2D_DATA *ps2DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_BUFFER_SYNC) + ps2DData->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-tqtla"); + if (IS_ERR(ps2DData->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(ps2DData->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_TQ_2D, + RGXFWIF_DM_2D, + NULL, + 0, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, /* max deadline MS */ + 0, /* robustness address */ + psInfo, + &ps2DData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + ps2DData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_contextalloc: +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); + ps2DData->psBufferSyncContext = NULL; +fail_buffer_sync_context_create: +#endif + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + ps2DData->psServerCommonContext, + RGXFWIF_DM_2D, + ui32PDumpFlags); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free it's resources */ + FWCommonContextFree(ps2DData->psServerCommonContext); + ps2DData->psServerCommonContext = NULL; + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); + ps2DData->psBufferSyncContext = NULL; +#endif + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + ps3DData->psServerCommonContext, + RGXFWIF_DM_3D, + ui32PDumpFlags); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free it's resources */ + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc); + FWCommonContextFree(ps3DData->psServerCommonContext); + ps3DData->psServerCommonContext = NULL; + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); + ps3DData->psBufferSyncContext = NULL; +#endif + + return PVRSRV_OK; +} + + +/* + * PVRSRVCreateTransferContextKM + */ +PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_CONTEXT **ppsTransferContext, + PMR **ppsCLIPMRMem, + PMR **ppsUSCPMRMem) +{ + RGX_SERVER_TQ_CONTEXT *psTransferContext; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Allocate the server side structure */ + *ppsTransferContext = NULL; + psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); + if (psTransferContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSLockCreate(&psTransferContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_createlock; + } + + psTransferContext->psDeviceNode = psDeviceNode; + + /* + * Create the FW framework buffer + */ + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psTransferContext->psFWFrameworkMemDesc, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc, + pabyFrameworkCommand, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + + sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; + + eError = _Create3DTransferContext(psConnection, + psDeviceNode, + psFWMemContextMemDesc, + ui32Priority, + &sInfo, + &psTransferContext->s3DData, + U32toU8_Unpack3(ui32PackedCCBSizeU8888), + U32toU8_Unpack4(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_3dtransfercontext; + } + psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)) + { + eError = _Create2DTransferContext(psConnection, + psDeviceNode, + psFWMemContextMemDesc, + ui32Priority, + &sInfo, + &psTransferContext->s2DData, + U32toU8_Unpack1(ui32PackedCCBSizeU8888), + U32toU8_Unpack2(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_2dtransfercontext; + } + psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D; + } + + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); + *ppsTransferContext = psTransferContext; + } + + return PVRSRV_OK; + +fail_2dtransfercontext: + _Destroy3DTransferContext(&psTransferContext->s3DData, + psTransferContext->psDeviceNode, + psTransferContext->ui32PDumpFlags); +fail_3dtransfercontext: +fail_frameworkcopy: + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); +fail_frameworkcreate: + OSLockDestroy(psTransferContext->hLock); +fail_createlock: + OSFreeMem(psTransferContext); + PVR_ASSERT(eError != PVRSRV_OK); + *ppsTransferContext = NULL; + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; + IMG_UINT32 i; + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); + dllist_remove_node(&(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); + + if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + eError = _Destroy2DTransferContext(&psTransferContext->s2DData, + psTransferContext->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + goto fail_destroy2d; + } + /* We've freed the 2D context, don't try to free it again */ + psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D; + } + + if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) + { + eError = _Destroy3DTransferContext(&psTransferContext->s3DData, + psTransferContext->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + goto fail_destroy3d; + } + /* We've freed the 3D context, don't try to free it again */ + psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D; + } + + /* free any resources within the per-prepare UFO address stores */ + for (i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++) + { + SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]); + SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]); + } + + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); + + OSLockDestroy(psTransferContext->hLock); + + OSFreeMem(psTransferContext); + + return PVRSRV_OK; + +fail_destroy3d: + +fail_destroy2d: + OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* + * PVRSRVSubmitTQ3DKickKM + */ +PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32PrepareCount, + IMG_UINT32 *paui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, + IMG_UINT32 **papaui32ClientUpdateSyncOffset, + IMG_UINT32 **papaui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE i2DUpdateTimeline, + PVRSRV_FENCE *pi2DUpdateFence, + PVRSRV_TIMELINE i3DUpdateTimeline, + PVRSRV_FENCE *pi3DUpdateFence, + IMG_CHAR szFenceName[32], + IMG_UINT32 *paui32FWCommandSize, + IMG_UINT8 **papaui8FWCommand, + IMG_UINT32 *pui32TQPrepareFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper; + RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper; + IMG_UINT32 ui323DCmdCount = 0; + IMG_UINT32 ui322DCmdCount = 0; + IMG_UINT32 ui323DCmdLast = 0; + IMG_UINT32 ui322DCmdLast = 0; + IMG_UINT32 ui323DCmdOffset = 0; + IMG_UINT32 ui322DCmdOffset = 0; + IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE; + IMG_UINT32 i; + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 ui2DUpdateFenceUID = 0; + IMG_UINT64 ui3DUpdateFenceUID = 0; + + PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL; + IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL; + PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui322DFenceTimelineUpdateValue = 0; + IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; + void *pv2DUpdateFenceFinaliseData = NULL; + void *pv3DUpdateFenceFinaliseData = NULL; +#if defined(SUPPORT_BUFFER_SYNC) + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2; + PVRSRV_FENCE i2DUpdateFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE i3DUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT32 ui32PreparesDone = 0; + + if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi2DUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (i3DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi3DUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Validate sync prim fence/update value ptrs + * for each prepare. + */ + { + IMG_UINT32 ui32Prepare; + IMG_UINT32 *pui32UpdateCount = paui32ClientUpdateCount; + IMG_UINT32 **papui32UpdateValue = papaui32ClientUpdateValue; + + /* Check that we have not been given a null ptr for + * update count parameters. + */ + PVR_LOG_RETURN_IF_FALSE((paui32ClientUpdateCount != NULL), + "paui32ClientUpdateCount NULL", + PVRSRV_ERROR_INVALID_PARAMS); + + for (ui32Prepare=0; ui32Prepare 0) + { + PVR_LOG_RETURN_IF_FALSE(*papui32UpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Advance local ptr to update values ptr for next prepare. */ + papui32UpdateValue++; + /* Advance local ptr to update count for next prepare. */ + pui32UpdateCount++; + } + } + + /* Ensure the string is null-terminated (Required for safety) */ + szFenceName[31] = '\0'; + + if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32SyncPMRCount != 0) + { + if (!ppsSyncPMRs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if defined(SUPPORT_BUFFER_SYNC) + /* PMR sync is valid only when there is no batching */ + if ((ui32PrepareCount != 1)) +#endif + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + OSLockAcquire(psTransferContext->hLock); + + /* We can't allocate the required amount of stack space on all consumer architectures */ + pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount); + if (pas3DCmdHelper == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc3dhelper; + } + pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount); + if (pas2DCmdHelper == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc2dhelper; + } + + if (iCheckFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); + goto fail_resolve_fencesync_input_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 ii; + for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); + } + } +#endif + } + /* + Ensure we do the right thing for server syncs which cross call boundaries + */ + for (i=0;iasSyncAddrListFence[i]; + SYNC_ADDR_LIST *psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i]; + IMG_UINT32 ui32IntClientFenceCount = 0U; + IMG_UINT32 ui32IntClientUpdateCount = paui32ClientUpdateCount[i]; + IMG_UINT32 *paui32IntUpdateValue = papaui32ClientUpdateValue[i]; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif + + PVRSRV_FENCE *piUpdateFence = NULL; + PVRSRV_TIMELINE iUpdateTimeline = PVRSRV_NO_TIMELINE; + void **ppvUpdateFenceFinaliseData = NULL; + PSYNC_CHECKPOINT * ppsUpdateSyncCheckpoint = NULL; + PVRSRV_CLIENT_SYNC_PRIM **ppsFenceTimelineUpdateSync = NULL; + IMG_UINT32 *pui32FenceTimelineUpdateValue = NULL; + IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL; + IMG_BOOL bCheckFence = IMG_FALSE; + IMG_BOOL bUpdateFence = IMG_FALSE; + IMG_UINT64 *puiUpdateFenceUID = NULL; + + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D)) + { + psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext; + psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); + pszCommandName = "TQ-3D"; + psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++]; + eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D; +#if defined(SUPPORT_BUFFER_SYNC) + psBufferSyncContext = psTransferContext->s3DData.psBufferSyncContext; +#endif + bCheckFence = ui323DCmdCount == 1; + bUpdateFence = ui323DCmdCount == ui323DCmdLast + && i3DUpdateTimeline != PVRSRV_NO_TIMELINE; + + if (bUpdateFence) + { + piUpdateFence = &i3DUpdateFence; + iUpdateTimeline = i3DUpdateTimeline; + ppvUpdateFenceFinaliseData = &pv3DUpdateFenceFinaliseData; + ppsUpdateSyncCheckpoint = &ps3DUpdateSyncCheckpoint; + ppsFenceTimelineUpdateSync = &ps3DFenceTimelineUpdateSync; + pui32FenceTimelineUpdateValue = &ui323DFenceTimelineUpdateValue; + ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues; + puiUpdateFenceUID = &ui3DUpdateFenceUID; + } + } + else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext; + psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); + pszCommandName = "TQ-2D"; + psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++]; + eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D; +#if defined(SUPPORT_BUFFER_SYNC) + psBufferSyncContext = psTransferContext->s2DData.psBufferSyncContext; +#endif + bCheckFence = ui322DCmdCount == 1; + bUpdateFence = ui322DCmdCount == ui322DCmdLast + && i2DUpdateTimeline != PVRSRV_NO_TIMELINE; + + if (bUpdateFence) + { + piUpdateFence = &i2DUpdateFence; + iUpdateTimeline = i2DUpdateTimeline; + ppvUpdateFenceFinaliseData = &pv2DUpdateFenceFinaliseData; + ppsUpdateSyncCheckpoint = &ps2DUpdateSyncCheckpoint; + ppsFenceTimelineUpdateSync = &ps2DFenceTimelineUpdateSync; + pui32FenceTimelineUpdateValue = &ui322DFenceTimelineUpdateValue; + ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues; + puiUpdateFenceUID = &ui2DUpdateFenceUID; + } + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_prepare_loop; + } + + if (i == 0) + { + ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr); + psTransferContext->ui32PDumpFlags |= ui32PDumpFlags; + } + else + { + IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; + if (ui32NewPDumpFlags != ui32PDumpFlags) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__)); + goto fail_prepare_loop; + } + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount)); + eError = SyncAddrListPopulate(psSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto fail_prepare_loop; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount)); + eError = SyncAddrListPopulate(psSyncAddrListUpdate, + ui32IntClientUpdateCount, + papauiClientUpdateUFODevVarBlock[i], + papaui32ClientUpdateSyncOffset[i]); + if (eError != PVRSRV_OK) + { + goto fail_prepare_loop; + } + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after sync prims) ui32IntClientUpdateCount=%d", __func__, ui32IntClientUpdateCount)); + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); + err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData); + if (err) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_resolve_buffersync_input_fence; + } + + /* Append buffer sync fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; + } + ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->asSyncAddrListUpdate[i]=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)psSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); + /* Append the update (from output fence) */ + SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; + } + ui32IntClientUpdateCount++; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); + PVR_DPF((PVR_DBG_ERROR, "%s: <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS)); + OSLockRelease(psTransferContext->hLock); + return PVRSRV_ERROR_INVALID_PARAMS; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* Create the output fence (if required) */ + if (bUpdateFence) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (piUpdateFence=%p, iUpdateTimeline=%d, psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, piUpdateFence, iUpdateTimeline, (void*)psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psDeviceNode, + szFenceName, + iUpdateTimeline, + psDeviceNode->hSyncCheckpointContext, + piUpdateFence, + puiUpdateFenceUID, + ppvUpdateFenceFinaliseData, + ppsUpdateSyncCheckpoint, + (void*)ppsFenceTimelineUpdateSync, + pui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: SyncCheckpointCreateFence failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_prepare_loop; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence)); + + /* Append the sync prim update for the timeline (if required) */ + if (*ppsFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + *ppui32IntAllocatedUpdateValues = OSAllocMem(sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!*ppui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_prepare_loop; + } + OSCachedMemSet(*ppui32IntAllocatedUpdateValues, 0xbb, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferUpdateSyncCheckpoint) + { + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1)); + pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1); + } + else +#endif + { + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); + pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, *pui32FenceTimelineUpdateValue)); + /* Now set the additional update value */ + *pui32TimelineUpdateWp = *pui32FenceTimelineUpdateValue; +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the transfer context update list */ + SyncAddrListAppendSyncPrim(psSyncAddrListUpdate, + *ppsFenceTimelineUpdateSync); + ui32IntClientUpdateCount++; +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to *ppui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)*ppui32IntAllocatedUpdateValues)); + paui32IntUpdateValue = *ppui32IntAllocatedUpdateValues; + } + } + + if (bCheckFence && ui32FenceSyncCheckpointCount) + { + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence)); + SyncAddrListAppendCheckpoints(psSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32IntClientFenceCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iiipasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + if (bUpdateFence && *ppsUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); + SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, + 1, + ppsUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(TRANSFER_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); + +#if (ENABLE_TQ_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; iiui32Addr & 0x1); + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + ui32PreparesDone++; + + /* + Create the command helper data for this command + */ + RGXCmdHelperInitCmdCCB(psClientCCB, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, /* fence value */ + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + paui32FWCommandSize[i], + papaui8FWCommand[i], + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + pszCommandName, + bCCBStateOpen, + psCmdHelper); + } + + /* + Acquire space for all the commands in one go + */ + if (ui323DCmdCount) + { + eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, + &pas3DCmdHelper[0]); + if (eError != PVRSRV_OK) + { + goto fail_cmdacquire; + } + } + + if (ui322DCmdCount) + { + eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount, + &pas2DCmdHelper[0]); + if (eError != PVRSRV_OK) + { + goto fail_cmdacquire; + } + } + + /* + We should acquire the kernel CCB(s) space here as the schedule could fail + and we would have to roll back all the syncs + */ + + if (ui323DCmdCount) + { + ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, + &pas3DCmdHelper[0], + "TQ_3D", + FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr); + } + + if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui322DCmdCount, + &pas2DCmdHelper[0], + "TQ_2D", + FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr); + } + + if (ui323DCmdCount) + { + RGXFWIF_KCCB_CMD s3DKCCBCmd; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *ps3DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext); + + /* Construct the kernel 3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext); + s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps3DTQCCB); + s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps3DTQCCB); + s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + HTBLOGK(HTB_SF_MAIN_KICK_3D, + s3DKCCBCmd.uCmdData.sCmdKickData.psContext, + ui323DCmdOffset); + RGXSRV_HWPERF_ENQ(psTransferContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TQ3D, + iCheckFence, + i3DUpdateFence, + i3DUpdateTimeline, + uiCheckFenceUID, + ui3DUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_3D, + &s3DKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, + ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ3D); + } + + if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + RGXFWIF_KCCB_CMD s2DKCCBCmd; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *ps2DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext); + + /* Construct the kernel 2D CCB command. */ + s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext); + s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps2DTQCCB); + s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps2DTQCCB); + s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + HTBLOGK(HTB_SF_MAIN_KICK_2D, + s2DKCCBCmd.uCmdData.sCmdKickData.psContext, + ui322DCmdOffset); + RGXSRV_HWPERF_ENQ(psTransferContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TQ2D, + iCheckFence, + i2DUpdateFence, + i2DUpdateTimeline, + uiCheckFenceUID, + ui2DUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_2D, + &s2DKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, + ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ2D); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdacquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (ps2DUpdateSyncCheckpoint) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Signalling TLA NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps2DUpdateSyncCheckpoint, SyncCheckpointGetId(ps2DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps2DUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(ps2DUpdateSyncCheckpoint); + } + if (ps2DFenceTimelineUpdateSync) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Updating TLA NOHW sync prim<%p> to %d", __func__, (void*)ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue); + } + if (ps3DUpdateSyncCheckpoint) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Signalling TQ3D NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps3DUpdateSyncCheckpoint, SyncCheckpointGetId(ps3DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps3DUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(ps3DUpdateSyncCheckpoint); + } + if (ps3DFenceTimelineUpdateSync) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Updating TQ3D NOHW sync prim<%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + if (pi2DUpdateFence) + { + *pi2DUpdateFence = i2DUpdateFence; + } + if (pi3DUpdateFence) + { + *pi3DUpdateFence = i3DUpdateFence; + } + if (pv2DUpdateFenceFinaliseData && (i2DUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psDeviceNode, i2DUpdateFence, pv2DUpdateFenceFinaliseData, + ps2DUpdateSyncCheckpoint, szFenceName); + } + if (pv3DUpdateFenceFinaliseData && (i3DUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psDeviceNode, i3DUpdateFence, pv3DUpdateFenceFinaliseData, + ps3DUpdateSyncCheckpoint, szFenceName); + } + + OSFreeMem(pas2DCmdHelper); + OSFreeMem(pas3DCmdHelper); + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui322DIntAllocatedUpdateValues) + { + OSFreeMem(pui322DIntAllocatedUpdateValues); + pui322DIntAllocatedUpdateValues = NULL; + } + if (pui323DIntAllocatedUpdateValues) + { + OSFreeMem(pui323DIntAllocatedUpdateValues); + pui323DIntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; + +/* + No resources are created in this function so there is nothing to free + unless we had to merge syncs. + If we fail after the client CCB acquire there is still nothing to do + as only the client CCB release will modify the client CCB +*/ +fail_cmdacquire: +fail_prepare_loop: + + PVR_ASSERT(eError != PVRSRV_OK); + + for (i=0;iasSyncAddrListFence[i]); + SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[i]); + } +#if defined(SUPPORT_BUFFER_SYNC) + if (ui32PreparesDone > 0) + { + /* Prevent duplicate rollback in case of buffer sync. */ + psBufferUpdateSyncCheckpoint = NULL; + } +#endif + + /* Free memory allocated to hold the internal list of update values */ + if (pui322DIntAllocatedUpdateValues) + { + OSFreeMem(pui322DIntAllocatedUpdateValues); + pui322DIntAllocatedUpdateValues = NULL; + } + if (pui323DIntAllocatedUpdateValues) + { + OSFreeMem(pui323DIntAllocatedUpdateValues); + pui323DIntAllocatedUpdateValues = NULL; + } + + if (i2DUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(i2DUpdateFence, pv2DUpdateFenceFinaliseData); + } + if (i3DUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(i3DUpdateFence, pv3DUpdateFenceFinaliseData); + } +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferUpdateSyncCheckpoint) + { + SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[0]); + } + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +fail_resolve_buffersync_input_fence: +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } +fail_resolve_fencesync_input_fence: + OSFreeMem(pas2DCmdHelper); +fail_alloc2dhelper: + OSFreeMem(pas3DCmdHelper); +fail_alloc3dhelper: + + OSLockRelease(psTransferContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + + OSLockAcquire(psTransferContext->hLock); + + if ((psTransferContext->s2DData.ui32Priority != ui32Priority) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext, + psConnection, + psTransferContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_2D); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_2dcontext; + } + psTransferContext->s2DData.ui32Priority = ui32Priority; + } + + if (psTransferContext->s3DData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext, + psConnection, + psTransferContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_3D); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_3dcontext; + } + psTransferContext->s3DData.ui32Priority = ui32Priority; + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; + +fail_3dcontext: + +fail_2dcontext: + OSLockRelease(psTransferContext->hLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2 = PVRSRV_OK; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psTransferContext->hLock); + eError = FWCommonContextSetFlags(psTransferContext->s2DData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + eError2 = FWCommonContextSetFlags(psTransferContext->s3DData.psServerCommonContext, + (IMG_UINT32)ui64Input); + } + if ((eError == PVRSRV_OK) && (eError2 == PVRSRV_OK)) + { + psTransferContext->ui32Flags = (IMG_UINT32)ui64Input; + } + OSLockRelease(psTransferContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags eError"); + PVR_LOG_IF_ERROR(eError2, "FWCommonContextSetFlags eError2"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); + + dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); + + if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + DumpFWCommonContextInfo(psCurrentServerTransferCtx->s2DData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + + if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) + { + DumpFWCommonContextInfo(psCurrentServerTransferCtx->s3DData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + + OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); +} + +IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); + + dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); + + if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && + (NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D; + } + } + + if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext)) + { + if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED)) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); + return ui32ContextBitMask; +} + +/**************************************************************************//** + End of file (rgxtransfer.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.h b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.h new file mode 100644 index 000000000000..6a8cc3e364eb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxtransfer.h @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@File +@Title RGX Transfer queue Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX Transfer queue Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXTRANSFER_H__) +#define __RGXTRANSFER_H__ + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgxfwutils.h" +#include "rgx_fwif_resetframework.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "connection_server.h" + +typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT; + +/*! +******************************************************************************* + + @Function PVRSRVRGXCreateTransferContextKM + + @Description + Server-side implementation of RGXCreateTransferContext + + @Input pvDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_CONTEXT **ppsTransferContext, + PMR **ppsCLIPMRMem, + PMR **ppsUSCPMRMem); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXDestroyTransferContextKM + + @Description + Server-side implementation of RGXDestroyTransferContext + + @Input psTransferContext - Transfer context + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext); + +/*! +******************************************************************************* + + @Function PVRSRVSubmitTransferKM + + @Description + Schedules one or more 2D or 3D HW commands on the firmware + + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32PrepareCount, + IMG_UINT32 *paui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, + IMG_UINT32 **papaui32ClientUpdateSyncOffset, + IMG_UINT32 **papaui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE i2DUpdateTimeline, + PVRSRV_FENCE *pi2DUpdateFence, + PVRSRV_TIMELINE i3DUpdateTimeline, + PVRSRV_FENCE *pi3DUpdateFence, + IMG_CHAR szFenceName[32], + IMG_UINT32 *paui32FWCommandSize, + IMG_UINT8 **papaui8FWCommand, + IMG_UINT32 *pui32TQPrepareFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs); + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of transfer contexts on this device */ +void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client transfer contexts are stalled */ +IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* __RGXTRANSFER_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxutils.c new file mode 100644 index 000000000000..f27babc8de9a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/rogue/rgxutils.c @@ -0,0 +1,221 @@ +/*************************************************************************/ /*! +@File +@Title Device specific utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "power.h" +#include "pvrsrv.h" +#include "sync_internal.h" +#include "rgxfwutils.h" + + +PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 *pui32State) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode) + return PVRSRV_ERROR_INVALID_PARAMS; + + psDevInfo = psDeviceNode->pvDevice; + *pui32State = psDevInfo->eActivePMConf; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 ui32State) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + if (RGX_ACTIVEPM_FORCE_OFF != ui32State + || !psDevInfo->pvAPMISRData) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + +#if !defined(NO_HARDWARE) + eError = OSUninstallMISR(psDevInfo->pvAPMISRData); + if (PVRSRV_OK == eError) + { + psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF; + psDevInfo->pvAPMISRData = NULL; + eError = PVRSRVSetDeviceDefaultPowerState((const PPVRSRV_DEVICE_NODE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON); + } +#endif + + return eError; +} + +PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL *pbDisabled) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + *pbDisabled = !psDevInfo->bPDPEnabled; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL bDisable) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + psDevInfo->bPDPEnabled = !bDisable; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32DeviceFlags) +{ + if (!pui32DeviceFlags || !psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32DeviceFlags = psDevInfo->ui32DeviceFlags; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_BOOL bSetNotClear) +{ + if (!psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if ((ui32Config & ~RGXKM_DEVICE_STATE_MASK) != 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Bits outside of device state mask set (input: 0x%x, mask: 0x%x)", + __func__, ui32Config, RGXKM_DEVICE_STATE_MASK)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (bSetNotClear) + { + psDevInfo->ui32DeviceFlags |= ui32Config; + } + else + { + psDevInfo->ui32DeviceFlags &= ~ui32Config; + } + + return PVRSRV_OK; +} + +inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM) +{ + PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST); + + switch (eKickTypeDM) { + case RGX_KICK_TYPE_DM_GP: + return "GP "; + case RGX_KICK_TYPE_DM_TDM_2D: + return "TDM/2D "; + case RGX_KICK_TYPE_DM_TA: + return "TA "; + case RGX_KICK_TYPE_DM_3D: + return "3D "; + case RGX_KICK_TYPE_DM_CDM: + return "CDM "; + case RGX_KICK_TYPE_DM_RTU: + return "RTU "; + case RGX_KICK_TYPE_DM_SHG: + return "SHG "; + case RGX_KICK_TYPE_DM_TQ2D: + return "TQ2D "; + case RGX_KICK_TYPE_DM_TQ3D: + return "TQ3D "; + default: + return "Invalid DM "; + } +} + +/****************************************************************************** + End of file (rgxutils.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxbvnc.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxbvnc.c new file mode 100644 index 000000000000..a4ed7984db51 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxbvnc.c @@ -0,0 +1,658 @@ +/*************************************************************************/ /*! +@File +@Title BVNC handling specific routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Functions used for BNVC related work +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxbvnc.h" +#define _RGXBVNC_C_ +#include "rgx_bvnc_table_km.h" +#undef _RGXBVNC_C_ +#include "oskm_apphint.h" +#include "pvrsrv.h" +#include "pdump_km.h" +#include "rgx_compat_bvnc.h" + +#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1) + +/* This function searches the given array for a given search value */ +static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array, + IMG_UINT uiEnd, + IMG_UINT64 ui64SearchValue, + IMG_UINT uiRowCount) +{ + IMG_UINT uiStart = 0, index; + IMG_UINT64 value, *pui64Ptr = NULL; + + while (uiStart < uiEnd) + { + index = (uiStart + uiEnd)/2; + pui64Ptr = pui64Array + (index * uiRowCount); + value = *(pui64Ptr); + + if (value == ui64SearchValue) + { + return pui64Ptr; + } + + if (value > ui64SearchValue) + { + uiEnd = index; + }else + { + uiStart = index + 1; + } + } + return NULL; +} +#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \ + ARRAY_SIZE(t), (b), \ + sizeof((t)[0])/sizeof(IMG_UINT64)) ) + + +#if defined(DEBUG) + +#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \ + if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \ + { PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); } \ + else \ + { PVR_LOG(("%s N/A", szShortName)); } + +static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1; + + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC: ", NUM_CLUSTERS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA: ", FBCDC_ARCHITECTURE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB: ", META_COREMEM_BANKS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS: ", META_COREMEM_SIZE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt: ", META_DMA_CHANNEL_COUNT); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP: ", NUM_ISP_IPP_PIPES); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIPS: ", NUM_ISP_PER_SPU); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PPS: ", PBE_PER_SPU); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NSPU: ", NUM_SPU); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW: ", PHYS_BUS_WIDTH); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch: ", SCALABLE_TE_ARCH); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA: ", SCALABLE_VCE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS: ", SLC_CACHE_LINE_SIZE_BITS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCSize: ", SLC_SIZE_IN_KILOBYTES); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB: ", VIRTUAL_ADDRESS_SPACE_BITS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META: ", META); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NOSIDS: ", NUM_OSIDS); + +#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX) + /* Dump the features with no values */ + ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features; + while (ui64Mask) + { + if (ui64Mask & 0x01) + { + if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX) + { + PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1])); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx, + ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); + } + } + ui64Mask >>= 1; + ui32IdOrNameIdx++; + } +#endif + +#if defined(ERNSBRNS_IDS_MAX_IDX) + /* Dump the ERN and BRN flags for this core */ + ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns; + ui32IdOrNameIdx = 1; + + while (ui64Mask) + { + if (ui64Mask & 0x1) + { + if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX) + { + PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1])); + } + else + { + PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); + } + } + ui64Mask >>= 1; + ui32IdOrNameIdx++; + } +#endif + +} +#endif + +static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 *pui64Cfg) +{ + IMG_UINT32 ui32Index; + + /* Read the feature values for the runtime BVNC */ + for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++) + { + IMG_UINT16 bitPosition = aui16FeaturesWithValuesBitPositions[ui32Index]; + IMG_UINT64 ui64PackedValues = pui64Cfg[2 + bitPosition / 64]; + IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (bitPosition % 64); + + if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]) + { + if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED; + } + else + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex]; + } + } + else + { + /* This case should never be reached */ + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex)); + PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]); + } + } + + + psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_MIN_CNT; + psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount = RGXFWIF_DM_MIN_MTS_CNT; + + /* Get the max number of dusts in the core */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) + { + RGX_LAYER_PARAMS sParams; + + OSCachedMemSet(&sParams, 0, sizeof(RGX_LAYER_PARAMS)); + sParams.psDevInfo = psDevInfo; + + if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) == 1) + { + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); + } + else if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) == 2) + { + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS); + } + else + { + /* All volcanic cores support power islanding */ + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Power island feature version not found!", __func__)); + PVR_ASSERT(0); + } + } + else + { + /* This case should never be reached as all cores have clusters */ + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); + PVR_ASSERT(0); + } + + + /* Transform the META coremem size info in bytes */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024; + } +} + +static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount) +{ + const IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC; + void *pvAppHintState = NULL; + IMG_UINT32 ui32BVNCCount = 0; + IMG_BOOL bRet; + IMG_CHAR szBVNCAppHint[RGXBVNC_BUFFER_SIZE]; + IMG_CHAR *pszCurrentBVNC = szBVNCAppHint; + szBVNCAppHint[0] = '\0'; + + OSCreateKMAppHintState(&pvAppHintState); + + bRet = (IMG_BOOL)OSGetKMAppHintSTRING(pvAppHintState, + RGXBVNC, + pszAppHintDefault, + szBVNCAppHint, + sizeof(szBVNCAppHint)); + + OSFreeKMAppHintState(pvAppHintState); + + if (!bRet || (szBVNCAppHint[0] == '\0')) + { + return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, szBVNCAppHint)); + + while (*pszCurrentBVNC != '\0') + { + IMG_CHAR *pszNext = pszCurrentBVNC; + + if (ui32BVNCCount >= PVRSRV_MAX_DEVICES) + { + break; + } + + while (1) + { + if (*pszNext == ',') + { + pszNext[0] = '\0'; + pszNext++; + break; + } else if (*pszNext == '\0') + { + break; + } + pszNext++; + } + + if (ui32BVNCCount == ui32RGXDevCount) + { + OSStringLCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX); + return; + } + + ui32BVNCCount++; + pszCurrentBVNC = pszNext; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than " + "number of actual devices", __func__)); + + /* If only one BVNC parameter is specified, the same is applied for all RGX + * devices detected */ + if (1 == ui32BVNCCount) + { + OSStringLCopy(pszBVNC, szBVNCAppHint, RGX_BVNC_STR_SIZE_MAX); + } +} + +/* Function that parses the BVNC List passed as module parameter */ +static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB, + IMG_UINT32 *pV, + IMG_UINT32 *pN, + IMG_UINT32 *pC, + const IMG_UINT32 ui32RGXDevCount) +{ + unsigned int ui32ScanCount = 0; + IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX]; + + aszBVNCString[0] = '\0'; + + /* 4 components of a BVNC string is B, V, N & C */ +#define RGX_BVNC_INFO_PARAMS (4) + + _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount); + + if ('\0' == aszBVNCString[0]) + { + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; + } + + /* Parse the given RGX_BVNC string */ + ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STR_FMTSPEC, pB, pV, pN, pC); + if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) + { + ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STRP_FMTSPEC, pB, pV, pN, pC); + } + if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) + { + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; + } + PVR_LOG(("BVNC module parameter honoured: %s", aszBVNCString)); + + return PVRSRV_OK; +} + +/* This function detects the Rogue variant and configures the essential + * config info associated with such a device. + * The config info includes features, errata, etc + */ +PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + static IMG_UINT32 ui32RGXDevCnt = 0; + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT64 ui64BVNC=0; + IMG_UINT32 B=0, V=0, N=0, C=0; + IMG_UINT64 *pui64Cfg = NULL; + + /* Check for load time RGX BVNC parameter */ + eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt); + if (PVRSRV_OK == eError) + { + PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC + " from driver load parameter", B, V, N, C)); + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!"); + } + + { + void *pvAppHintState = NULL; + const IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, + IgnoreHWReportedBVNC, + &bAppHintDefault, + &psDevInfo->bIgnoreHWReportedBVNC); + OSFreeKMAppHintState(pvAppHintState); + } + +#if !defined(NO_HARDWARE) + + /* Try to detect the RGX BVNC from the HW device */ + if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC) + { + IMG_UINT64 ui32ID; + IMG_HANDLE hSysData; + + hSysData = psDeviceNode->psDevConfig->hSysData; + + /* Power-up the device as required to read the registers */ + if (psDeviceNode->psDevConfig->pfnPrePowerState) + { + eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPrePowerState ON"); + } + + if (psDeviceNode->psDevConfig->pfnPostPowerState) + { + eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPostPowerState ON"); + } + + ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); + + B = (ui32ID & ~RGX_CR_CORE_ID_BRANCH_ID_CLRMSK) >> + RGX_CR_CORE_ID_BRANCH_ID_SHIFT; + V = (ui32ID & ~RGX_CR_CORE_ID_VERSION_ID_CLRMSK) >> + RGX_CR_CORE_ID_VERSION_ID_SHIFT; + N = (ui32ID & ~RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> + RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT; + C = (ui32ID & ~RGX_CR_CORE_ID_CONFIG_ID_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_ID_SHIFT; + + PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC + " from HW device registers", B, V, N, C)); + + /* Power-down the device */ + if (psDeviceNode->psDevConfig->pfnPrePowerState) + { + eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF, + PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPrePowerState OFF"); + } + + if (psDeviceNode->psDevConfig->pfnPostPowerState) + { + eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF, + PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnPostPowerState OFF"); + } + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!"); + } +#endif + +#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C) + if (NULL == pui64Cfg) + { + /* We reach here if the HW is not present, or we are running in a guest OS, + * or HW is unstable during register read giving invalid values, or + * runtime detection has been disabled - fall back to compile time BVNC + */ + B = RGX_BVNC_KM_B; + N = RGX_BVNC_KM_N; + C = RGX_BVNC_KM_C; + { + IMG_UINT32 ui32ScanCount = 0; + ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%u", &V); + if (1 != ui32ScanCount) + { + ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%up", &V); + if (1 != ui32ScanCount) + { + V = 0; + } + } + } + PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM)); + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!"); + } +#endif /* defined(RGX_BVNC) */ + + /* Have we failed to identify the BVNC to use? */ + if (NULL == pui64Cfg) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. " + "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); + return PVRSRV_ERROR_BVNC_UNSUPPORTED; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016" + IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016" + IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n", __func__, + pui64Cfg[0], pui64Cfg[1], pui64Cfg[2], pui64Cfg[3])); + + /* Parsing feature config depends on available features on the core + * hence this parsing should always follow the above feature assignment */ + psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1]; + _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg); + + /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */ + ui64BVNC = BVNC_PACK(B,V,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC); + if (NULL == pui64Cfg) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. " + "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); + psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0; + return PVRSRV_ERROR_BVNC_UNSUPPORTED; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx + " 0x%016" IMG_UINT64_FMTSPECx, __func__, *pui64Cfg, pui64Cfg[1])); + psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1]; + + psDevInfo->sDevFeatureCfg.ui32B = B; + psDevInfo->sDevFeatureCfg.ui32V = V; + psDevInfo->sDevFeatureCfg.ui32N = N; + psDevInfo->sDevFeatureCfg.ui32C = C; + + /* Message to confirm configuration look up was a success */ + PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, + B, V, N, C)); + + ui32RGXDevCnt++; + +#if defined(DEBUG) + _RGXBvncDumpParsedConfig(psDeviceNode); +#endif + return PVRSRV_OK; +} + +/* + * This function checks if a particular feature is available on the given rgx device */ +IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +/* + * This function returns the value of a feature on the given rgx device */ +IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX) + { + return -1; + } + + if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED) + { + return -1; + } + + return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex]; +} + +/**************************************************************************/ /*! +@Function RGXVerifyBVNC +@Description Checks that the device's BVNC registers have the correct values. +@Input psDeviceNode Device node +@Return PVRSRV_ERROR +*/ /***************************************************************************/ +#define NUM_RGX_CORE_IDS 8 +PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT64 ui64MatchBVNC; + IMG_UINT32 i; + + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + + /* The device info */ + psDevInfo = psDeviceNode->pvDevice; + + PDUMPCOMMENT("PDUMP VERIFY CORE_ID registers for all OSIDs\n"); + + /* construct the value to match against */ + if ((ui64GivenBVNC | ui64CoreIdMask) == 0) /* both zero means use configured DDK value */ + { + ui64MatchBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + } + else + { + /* use the value in CORE_ID for any zero elements in the BVNC */ + ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID) & ui64CoreIdMask); + } + PVR_LOG(("matchBVNC %d.%d.%d.%d", + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + + /* read in all the CORE_ID registers */ + for (i = 0; i < NUM_RGX_CORE_IDS; ++i) + { +#if !defined(NO_HARDWARE) + IMG_UINT64 ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (i << 16)); + + PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i, + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + + if (ui64BVNC != ui64MatchBVNC) + { + eError = PVRSRV_ERROR_BVNC_MISMATCH; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CORE_ID%d %d.%d.%d.%d, Expected %d.%d.%d.%d", __func__, i, + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + break; + } +#endif + +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) + /* check upper DWORD */ + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + (RGX_CR_CORE_ID + 4) + (i << 16), + (IMG_UINT32)(ui64MatchBVNC >> 32), + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + if (eError == PVRSRV_OK) + { + /* check lower DWORD */ + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_CORE_ID + (i << 16), + (IMG_UINT32)(ui64MatchBVNC & 0xFFFFFFFF), + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + } +#endif + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.c new file mode 100644 index 000000000000..5ddbb582bf12 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.c @@ -0,0 +1,2738 @@ +/*************************************************************************/ /*! +@File +@Title RGX CCB routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX CCB routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "rgxdevice.h" +#include "pdump_km.h" +#include "allocmem.h" +#include "devicemem.h" +#include "rgxfwutils.h" +#include "osfunc.h" +#include "rgxccb.h" +#include "rgx_memallocflags.h" +#include "devicemem_pdump.h" +#include "dllist.h" +#if defined(LINUX) +#include "trace_events.h" +#endif +#include "sync_checkpoint_external.h" +#include "sync_checkpoint.h" +#include "rgxutils.h" +#include "info_page.h" + +/* + * Defines the number of fence updates to record so that future fences in the + * CCB. Can be checked to see if they are already known to be satisfied. + */ +#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE (32) + +#define RGX_UFO_PTR_ADDR(ufoptr) \ + (((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC) + +#define GET_CCB_SPACE(WOff, ROff, CCBSize) \ + ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1)) + +#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \ + (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1)) + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1 +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2 +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB 0x4 + +typedef struct _RGX_CLIENT_CCB_UTILISATION_ +{ + /* the threshold in bytes. + * when the CCB utilisation hits the threshold then we will print + * a warning message. + */ + IMG_UINT32 ui32ThresholdBytes; + /* Maximum cCCB usage at some point in time */ + IMG_UINT32 ui32HighWaterMark; + /* keep track of the warnings already printed. + * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz + */ + IMG_UINT32 ui32Warnings; + /* Keep track how many times CCB was full. + * Counters are reset after every grow. + */ + IMG_UINT32 ui32CCBFull; + IMG_UINT32 ui32CCBAcquired; +} RGX_CLIENT_CCB_UTILISATION; + +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + +struct _RGX_CLIENT_CCB_ { + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; /*!< CPU mapping of the CCB control structure used by the fw */ + void *pvClientCCB; /*!< CPU mapping of the CCB */ + DEVMEM_MEMDESC *psClientCCBMemDesc; /*!< MemDesc for the CCB */ + DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */ + IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */ + IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */ + IMG_UINT32 ui32FinishedPDumpWriteOffset; /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */ + IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */ + IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */ + IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */ + IMG_UINT32 ui32LastByteCount; /*!< Last value of ui32ByteCount to help detect any CCB wedge */ + IMG_UINT32 ui32Size; /*!< Size of the CCB */ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + POS_LOCK hCCBGrowLock; /*!< Prevents CCB Grow while DumpCCB() is called and vice versa */ + IMG_UINT32 ui32VirtualAllocSize; /*!< Virtual size of the CCB */ + IMG_PUINT32 pui32MappingTable; /*!< Mapping table for sparse allocation of the CCB */ +#endif + DLLIST_NODE sNode; /*!< Node used to store this CCB on the per connection list */ + PDUMP_CONNECTION_DATA *psPDumpConnectionData; /*!< Pointer to the per connection data in which we reside */ + void *hTransition; /*!< Handle for Transition callback */ + IMG_CHAR szName[MAX_CLIENT_CCB_NAME]; /*!< Name of this client CCB */ + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; /*!< Parent server common context that this CCB belongs to */ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor; + RGX_CLIENT_CCB_UTILISATION sUtilisation; /*!< CCB utilisation data */ +#endif +#if defined(DEBUG) + IMG_UINT32 ui32UpdateEntries; /*!< Number of Fence Updates in asFenceUpdateList */ + RGXFWIF_UFO asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */ +#endif + IMG_UINT32 ui32CCBFlags; /*!< Bitmask for various flags relating to CCB. Bit defines in rgxccb.h */ +}; + +/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for + DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings: + { "FwClientCCB:" , "FwClientCCBControl:" , }, + The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl + structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following + build assert. */ +const IMG_CHAR *const aszCCBRequestors[][3] = +{ +#define REQUESTOR_STRING(prefix,req) #prefix ":" #req +#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req }, + RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE) +#undef FORM_REQUESTOR_TUPLE +}; + +PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32PDumpFlags) +{ + + IMG_UINT32 ui32PollOffset; + + if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) + { + /* Draining CCB on a command that hasn't finished, and FW isn't expected + * to have updated Roff up to Woff. Only drain to the first + * finished command prior to this. The Roff for this + * is stored in ui32FinishedPDumpWriteOffset. + */ + ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)", + psClientCCB->szName, + psClientCCB, + ui32PollOffset); + } + else + { + /* Command to a finished CCB stream and FW is drained to empty + * out remaining commands until R==W. + */ + ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)", + psClientCCB->szName, + psClientCCB, + ui32PollOffset); + } + + return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + ui32PollOffset, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); +} + +/****************************************************************************** + FUNCTION : RGXCCBPDumpSyncCCB + + PURPOSE : Synchronise Client CCBs from both live and playback contexts. + Waits for live-FW to empty live-CCB. + Waits for sim-FW to empty sim-CCB by adding POL + + PARAMETERS : psClientCCB - The client CCB + ui32PDumpFlags - PDump flags + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXCCBPDumpSyncCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Wait for the live FW to catch up/empty CCB. This is done by returning + * retry which will get pushed back out to Services client where it + * waits on the event object and then resubmits the command. + */ + if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset) + { + return PVRSRV_ERROR_RETRY; + } + + /* Wait for the sim FW to catch up/empty sim CCB. + * We drain whenever capture range is entered, even if no commands + * have been issued on this CCB when out of capture range. We have to + * wait for commands that might have been issued in the last capture + * range to finish so the connection's sync block snapshot dumped after + * all the PDumpTransition callbacks have been execute doesn't clobber + * syncs which the sim FW is currently working on. + * + * Although this is sub-optimal for play-back - while out of capture + * range for every continuous operation we synchronise the sim + * play-back processing the script and the sim FW, there is no easy + * solution. Not all modules that work with syncs register a + * PDumpTransition callback and thus we have no way of knowing if we + * can skip this sim CCB drain and sync block dump or not. + */ + + eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "RGXCCBPDumpDrainCCB"); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Live CCB and simulation CCB now empty, FW idle on CCB in both + * contexts. + */ + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : RGXCCBPDumpFastForwardCCB + + PURPOSE : Fast-forward sim-CCB and live-CCB offsets to live app-thread + values. + This helps to skip any commands submitted when out of capture + range and start with first command in capture range in both + live and playback contexts. In case of Block mode, this helps + to playback any intermediate PDump block directly after first + block. + + + PARAMETERS : psClientCCB - The client CCB + ui32PDumpFlags - PDump flags + + RETURNS : void +******************************************************************************/ +static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) +{ + volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl; + + /* Make sure that we have synced live-FW and live-App threads */ + PVR_ASSERT(psCCBCtl->ui32ReadOffset == psClientCCB->ui32HostWriteOffset); + + psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; + psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; + psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Fast-forward from %d to %d", + psClientCCB->szName, + psClientCCB, + psClientCCB->ui32LastPDumpWriteOffset, + psClientCCB->ui32HostWriteOffset); + + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + 0, + sizeof(RGXFWIF_CCCB_CTL), + ui32PDumpFlags); + + /* Although we've entered capture range for this process connection + * we might not do any work on this CCB so update the + * ui32LastPDumpWriteOffset to reflect where we got to for next + * time so we start the drain from where we got to last time. + */ + psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; + +} + +static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) +{ + RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData; +#if defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice; +#endif + PVRSRV_ERROR eError; + + /* Block mode: + * Here is block structure at transition (ui32BlockLength=N frames): + * + * ... + * ... + * PDUMP_BLOCK_START_0x0000000x{ + * + * + * ... + * ... + * ... (N frames data) + * ... + * ... + * <(1) Drain sim-KCCB> ''| + * <(2) Sync live and sim CCCB> | + * }PDUMP_BLOCK_END_0x0000000x | <- BlockTransition Steps + * <(3) Split MAIN and BLOCK stream script> | + * PDUMP_BLOCK_START_0x0000000y{ | + * <(4) Fast-forward sim-CCCB> | + * <(5) Re-dump SyncBlocks> ,,| + * ... + * ... + * ... (N frames data) + * ... + * ... + * + * + * }PDUMP_BLOCK_END_0x0000000y + * ... + * ... + * + * Steps (3) and (5) are done in pdump_server.c + * */ + switch (eEvent) + { + case PDUMP_TRANSITION_EVENT_RANGE_ENTERED: + { + /* We're about to transition into capture range and we've submitted + * new commands since the last time we entered capture range so drain + * the live CCB and simulation (sim) CCB as required, i.e. leave CCB + * idle in both live and sim contexts. + * This requires the host driver to ensure the live FW & the sim FW + * have both emptied out the remaining commands until R==W (CCB empty). + */ + + eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); + PVR_RETURN_IF_ERROR(eError); + + if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset) + { + /* If new commands have been written when out of capture range in + * the live CCB then we need to fast forward the sim CCBCtl + * offsets past uncaptured commands. This is done by PDUMPing + * the CCBCtl memory to align sim values with the live CCBCtl + * values. Both live and sim FWs can start with the 1st command + * which is in the new capture range. + */ + RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); + } + break; + } + case PDUMP_TRANSITION_EVENT_RANGE_EXITED: + { + /* Nothing to do */ + break; + } + case PDUMP_TRANSITION_EVENT_BLOCK_FINISHED: + { + /* (1) Drain KCCB from current block before starting new: + * + * At playback, this will ensure that sim-FW drains all commands in KCCB + * belongs to current block before 'jumping' to any future commands (from + * next block). This will synchronise script-thread and sim-FW thread KCCBs + * at end of each pdump block. + * + * This will additionally force redump of KCCBCtl structure at start of next/new block. + * */ + +#if defined(PDUMP) + eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->psKernelCCBCtl->ui32WriteOffset); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPdumpDrainKCCB"); +#endif + + /* (2) Synchronise Client CCBs from live and playback contexts before starting new block: + * + * This operation will, + * a. Force synchronisation between app-thread and live-FW thread (i.e. Wait + * for live-FW to empty live Client CCB). + * + * b. Next, it will dump poll command to drain Client CCB at end of every + * pdump block. At playback time this will synchronise sim-FW and + * script-thread Client CCBs at end of each block. + * + * This is to ensure that all commands in CCB from current block are processed + * before moving on to future commands. + * */ + + eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); + PVR_RETURN_IF_ERROR(eError); + break; + } + case PDUMP_TRANSITION_EVENT_BLOCK_STARTED: + { + /* (4) Fast-forward CCB write offsets to current live values: + * + * We have already synchronised live-FW and app-thread above at end of each + * block (in Step 2a above), now fast-forward Client CCBCtl write offsets to that of + * current app-thread values at start of every block. This will allow us to + * skip any intermediate pdump blocks and start with last (or any next) block + * immediately after first pdump block. + * */ + + RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); + break; + } + case PDUMP_TRANSITION_EVENT_NONE: + /* Invalid event for transition */ + default: + { + /* Unknown Transition event */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + return PVRSRV_OK; +} + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + +static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */ + psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size * + PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD) / 100; + psClientCCB->sUtilisation.ui32Warnings = 0; + psClientCCB->sUtilisation.ui32CCBAcquired = 0; + psClientCCB->sUtilisation.ui32CCBFull = 0; +} + +static INLINE void _RGXPrintCCBUtilisationWarning(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32WarningType, + IMG_UINT32 ui32CmdSize) +{ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE) + if (ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED) + { + PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize)); + } + + PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)", + __func__, + psClientCCB->szName, + psClientCCB->sUtilisation.ui32HighWaterMark, + psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size, + psClientCCB->ui32Size)); +#else + PVR_UNREFERENCED_PARAMETER(ui32WarningType); + PVR_UNREFERENCED_PARAMETER(ui32CmdSize); + + PVR_LOG(("GPU %s command buffer usage high (%u). This is not an error but the application may not run optimally.", + aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT], + psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size)); +#endif +} + +static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32WarningType, + IMG_UINT32 ui32CmdSize) +{ + /* in VERBOSE mode we will print a message for each different + * event type as they happen. + * but by default we will only issue one message + */ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE) + if (!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType)) +#else + if (!psClientCCB->sUtilisation.ui32Warnings) +#endif + { + _RGXPrintCCBUtilisationWarning(psClientCCB, + ui32WarningType, + ui32CmdSize); + /* record that we have issued a warning of this type */ + psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType; + } +} + +/* Check the current CCB utilisation. Print a one-time warning message if it is above the + * specified threshold + */ +static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + /* Print a warning message if the cCCB watermark is above the threshold value */ + if (psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes) + { + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD, + 0); + } +} + +/* Update the cCCB high watermark level if necessary */ +static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage; + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace; + + if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark) + { + psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage; + + /* The high water mark has increased. Check if it is above the + * threshold so we can print a warning if necessary. + */ + _RGXCheckCCBUtilisation(psClientCCB); + } +} + +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + +PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CCBSizeLog2, + IMG_UINT32 ui32CCBMaxSizeLog2, + IMG_UINT32 ui32ContextFlags, + CONNECTION_DATA *psConnectionData, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGX_CLIENT_CCB **ppsClientCCB, + DEVMEM_MEMDESC **ppsClientCCBMemDesc, + DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DEVMEM_FLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags; + IMG_UINT32 ui32AllocSize = (1U << ui32CCBSizeLog2); + IMG_UINT32 ui32MinAllocSize = (1U << MIN_SAFE_CCB_SIZE_LOG2); + RGX_CLIENT_CCB *psClientCCB; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + IMG_UINT32 ui32FWLog2PageSize = DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap); + IMG_UINT32 ui32FWPageSize = (1U << ui32FWLog2PageSize); + IMG_UINT32 ui32NumPages = ui32AllocSize / ui32FWPageSize; + IMG_UINT32 ui32VirtualAllocSize = (1U << ui32CCBMaxSizeLog2); + IMG_UINT32 ui32NumVirtPages = ui32VirtualAllocSize / ui32FWPageSize; + IMG_UINT32 i; + + /* For the allocation request to be valid, at least one page is required. + * This is relevant on systems where the page size is greater than the client CCB size. */ + ui32NumPages = MAX(1, ui32NumPages); + ui32NumVirtPages = MAX(1, ui32NumVirtPages); +#else + PVR_UNREFERENCED_PARAMETER(ui32CCBMaxSizeLog2); +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + + /* All client CCBs should be at-least of the "minimum" size and not to exceed "maximum" */ + if ((ui32CCBSizeLog2 < MIN_SAFE_CCB_SIZE_LOG2) || + (ui32CCBSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s CCB size is invalid (%d). Should be from %d to %d", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32CCBSizeLog2, MIN_SAFE_CCB_SIZE_LOG2, MAX_SAFE_CCB_SIZE_LOG2)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if ((ui32CCBMaxSizeLog2 < ui32CCBSizeLog2) || + (ui32CCBMaxSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s CCB maximum size is invalid (%d). Should be from %d to %d", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32CCBMaxSizeLog2, ui32CCBSizeLog2, MAX_SAFE_CCB_SIZE_LOG2)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#endif + + psClientCCB = OSAllocMem(sizeof(*psClientCCB)); + if (psClientCCB == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + psClientCCB->psServerCommonContext = psServerCommonContext; + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + psClientCCB->ui32VirtualAllocSize = 0; + psClientCCB->pui32MappingTable = NULL; +#endif + + uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + /* If connection data indicates Sync Lockup Recovery (SLR) should be disabled, + * or if the caller has set ui32ContextFlags to disable SLR for this context, + * indicate this in psClientCCB->ui32CCBFlags. + */ + if ((psConnectionData->ui32ClientFlags & SRV_FLAGS_CLIENT_SLR_DISABLED) || + (ui32ContextFlags & RGX_CONTEXT_FLAG_DISABLESLR)) + { + BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + + PDUMPCOMMENT("Allocate RGXFW cCCB"); +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN)) + { + psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize; + + /* + * Growing CCB is doubling the size. Last grow would require only ui32NumVirtPages/2 new pages + * because another ui32NumVirtPages/2 is already allocated. + * Sometimes initial pages count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed. + */ + psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumPages, ui32NumVirtPages/2) * sizeof(IMG_UINT32)); + if (psClientCCB->pui32MappingTable == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_mtable; + } + for (i = 0; i < ui32NumPages; i++) + { + psClientCCB->pui32MappingTable[i] = i; + } + + if (IsPhysmemNewRamBackedByLMA(psDevInfo->psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)) + { + /* + * On LMA sparse memory can't be mapped to kernel. + * To work around this whole ccb memory is allocated at once as contiguous. + */ + eError = DevmemFwAllocate(psDevInfo, + ui32VirtualAllocSize, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + } + else + { + eError = DevmemFwAllocateSparse(psDevInfo, + ui32VirtualAllocSize, + ui32FWPageSize, + ui32NumPages, + ui32NumVirtPages, + psClientCCB->pui32MappingTable, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + } + } + + if (eError != PVRSRV_OK) + { + OSFreeMem(psClientCCB->pui32MappingTable); + psClientCCB->pui32MappingTable = NULL; + psClientCCB->ui32VirtualAllocSize = 0; + } + + if (!BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN) || + (eError != PVRSRV_OK)) +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + { + /* Allocate ui32AllocSize, or the next best POT allocation */ + do + { + eError = DevmemFwAllocate(psDevInfo, + ui32AllocSize, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + if (eError != PVRSRV_OK) + { + /* Failed to allocate - ensure CCB grow is disabled from + * now on for this device. + */ + BITMASK_UNSET(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); + + /* Failed to allocate, try next POT down */ + ui32AllocSize >>= 1; + } + } while ((eError != PVRSRV_OK) && (ui32AllocSize > ui32MinAllocSize)); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate RGX client CCB (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_alloc_ccb; + } + + OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s", + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + (unsigned long) OSGetCurrentClientProcessIDKM(), + (unsigned long) OSGetCurrentClientThreadIDKM(), + OSGetCurrentClientProcessNameKM()); + + if (ui32AllocSize < (1U << ui32CCBSizeLog2)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unable to allocate %d bytes for RGX client CCB (%s) but allocated %d bytes", + __func__, + (1U << ui32CCBSizeLog2), + psClientCCB->szName, + ui32AllocSize)); + } + + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map RGX client CCB (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_map_ccb; + } + + PDUMPCOMMENT("Allocate RGXFW cCCB control"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_CCCB_CTL), + uiClientCCBCtlMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING], + &psClientCCB->psClientCCBCtrlMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate RGX client CCB control (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_alloc_ccbctrl; + } + + + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc, + (void **) &psClientCCB->psClientCCBCtrl); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map RGX client CCB control (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_map_ccbctrl; + } + + psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0; + psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0; + psClientCCB->psClientCCBCtrl->ui32DepOffset = 0; + psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1; + + PDUMPCOMMENT("cCCB control"); + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + 0, + sizeof(RGXFWIF_CCCB_CTL), + PDUMP_FLAGS_CONTINUOUS); + PVR_ASSERT(eError == PVRSRV_OK); + + psClientCCB->ui32HostWriteOffset = 0; + psClientCCB->ui32LastPDumpWriteOffset = 0; + psClientCCB->ui32FinishedPDumpWriteOffset = 0; + psClientCCB->ui32Size = ui32AllocSize; + psClientCCB->ui32LastROff = ui32AllocSize - 1; + psClientCCB->ui32ByteCount = 0; + psClientCCB->ui32LastByteCount = 0; + BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + eError = OSLockCreate(&psClientCCB->hCCBGrowLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create hCCBGrowLock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_create_ccbgrow_lock; + } +#endif +#if defined(DEBUG) + psClientCCB->ui32UpdateEntries = 0; +#endif + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXInitCCBUtilisation(psClientCCB); + psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor; +#endif + eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData, + _RGXCCBPDumpTransition, + psClientCCB, + psDevInfo, + &psClientCCB->hTransition); + if (eError != PVRSRV_OK) + { + goto fail_pdumpreg; + } + + /* + * Note: + * Save the PDump specific structure, which is ref counted unlike + * the connection data, to ensure it's not freed too early + */ + psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData; + PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created", + psClientCCB->szName, + psClientCCB); + + *ppsClientCCB = psClientCCB; + *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; + *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc; + return PVRSRV_OK; + +fail_pdumpreg: +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockDestroy(psClientCCB->hCCBGrowLock); +fail_create_ccbgrow_lock: +#endif + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); +fail_map_ccbctrl: + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); +fail_alloc_ccbctrl: + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); +fail_map_ccb: + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); +#if defined(PVRSRV_ENABLE_CCCB_GROW) +fail_alloc_ccb: + if ( psClientCCB->ui32VirtualAllocSize > 0) + { + OSFreeMem(psClientCCB->pui32MappingTable); + } +fail_alloc_mtable: +#else +fail_alloc_ccb: +#endif + OSFreeMem(psClientCCB); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB) +{ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + if (psClientCCB->sUtilisation.ui32CCBFull) + { + PVR_LOG(("CCBUtilisationInfo: GPU %s command buffer was full %d times out of %d. " + "This is not an error but the application may not run optimally.", + aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT], + psClientCCB->sUtilisation.ui32CCBFull, + psClientCCB->sUtilisation.ui32CCBAcquired)); + } +#endif +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockDestroy(psClientCCB->hCCBGrowLock); +#endif + PDumpUnregisterTransitionCallback(psClientCCB->hTransition); + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if (psClientCCB->pui32MappingTable) + { + OSFreeMem(psClientCCB->pui32MappingTable); + } +#endif + OSFreeMem(psClientCCB); +} + +#if defined(PVRSRV_ENABLE_CCCB_GROW) +static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32AllocPageCount) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); +#endif + + for (i = 0; i < ui32AllocPageCount; i++) + { + psClientCCB->pui32MappingTable[i] = ui32AllocPageCount + i; + } + + /* Double the CCB size (CCB must be POT) by adding ui32AllocPageCount new pages */ + eError = DeviceMemChangeSparse(psClientCCB->psClientCCBMemDesc, + ui32AllocPageCount, + psClientCCB->pui32MappingTable, + 0, + NULL, +#if !defined(PVRSRV_UNMAP_ON_SPARSE_CHANGE) + SPARSE_MAP_CPU_ADDR | +#endif + SPARSE_RESIZE_ALLOC); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to grow RGX client CCB (%s)", + PVRSRVGetErrorString(eError))); + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + if (DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to reacquire CCB mapping")); + psClientCCB->pvClientCCB = NULL; + } +#endif + + return eError; + } + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to map RGX client CCB (%s)", + PVRSRVGetErrorString(eError))); + return eError; + } +#endif + + return PVRSRV_OK; +} +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + +PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize) +{ + IMG_UINT32 ui32FreeSpace; + + /* Check that the CCB can hold this command + padding */ + if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB" + " (%d bytes)", ui32CmdSize, psClientCCB->ui32Size)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + + /* + Check we don't overflow the end of the buffer and make sure we have + enough space for the padding command. If we don't have enough space + (including the minimum amount for the padding command) we need to make + sure we insert a padding command now and wrap before adding the main + command. + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) + { + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Don't allow all the space to be used */ + if (ui32FreeSpace > ui32CmdSize) + { + return PVRSRV_OK; + } + + goto e_retry; + } + else + { + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Check there is space for both the command and the padding command */ + if (ui32FreeSpace > ui32Remain + ui32CmdSize) + { + return PVRSRV_OK; + } + + goto e_retry; + } + +e_retry: +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB, + ui32CmdSize); +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + + return PVRSRV_ERROR_RETRY; +} + +/****************************************************************************** + FUNCTION : RGXAcquireCCB + + PURPOSE : Obtains access to write some commands to a CCB + + PARAMETERS : psClientCCB - The client CCB + ui32CmdSize - How much space is required + ppvBufferSpace - Pointer to space in the buffer + ui32PDumpFlags - Should this be PDump continuous? + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + void **ppvBufferSpace, + IMG_UINT32 ui32PDumpFlags) +{ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + IMG_UINT32 ui32RetryCount = 2; +#endif + +#if defined(PDUMP) + PVRSRV_ERROR eError; + IMG_BOOL bInCaptureRange; + IMG_BOOL bPdumpEnabled; + + PDumpIsCaptureFrameKM(&bInCaptureRange); + bPdumpEnabled = (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)); + + /* + PDumpSetFrame will detect as we Transition into capture range for + frame based data but if we are PDumping continuous data then we + need to inform the PDump layer ourselves + */ + if (PDUMP_IS_CONTINUOUS(ui32PDumpFlags) && !bInCaptureRange) + { + eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_ENTERED, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif + + /* Check that the CCB can hold this command + padding */ + if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)", + ui32CmdSize, psClientCCB->ui32Size)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + while (ui32RetryCount--) +#endif + { +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + psClientCCB->sUtilisation.ui32CCBAcquired++; +#endif + + /* + Check we don't overflow the end of the buffer and make sure we have + enough space for the padding command. We don't have enough space (including the + minimum amount for the padding command) we will need to make sure we insert a + padding command now and wrap before adding the main command. + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) + { + /* The command can fit without wrapping... */ + IMG_UINT32 ui32FreeSpace; + +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + psClientCCB->ui32Size); +#endif + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Can command fit? */ + if (ui32FreeSpace > ui32CmdSize) + { + *ppvBufferSpace = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + return PVRSRV_OK; + } + /* There is not enough free space in CCB. */ + goto e_retry; + } + else + { + /* + We're at the end of the buffer without enough contiguous space. + The command cannot fit without wrapping, we need to insert a + padding command and wrap. We need to do this in one go otherwise + we would be leaving unflushed commands and forcing the client to + deal with flushing the padding command but not the command they + wanted to write. Therefore we either do all or nothing. + */ + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT32 ui32FreeSpace; + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + /* Check this is a growable CCB */ + if (psClientCCB->ui32VirtualAllocSize > 0) + { + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + /* + * Check if CCB should grow or be wrapped. + * Wrap CCB if there is no need for grow (CCB is half empty) or CCB can't grow, + * and when is free space for command and padding. + */ + if (((ui32FreeSpace > psClientCCB->ui32Size/2) || (psClientCCB->ui32Size == psClientCCB->ui32VirtualAllocSize)) && + (ui32FreeSpace > ui32Remain + ui32CmdSize)) + { + /* Wrap CCB */ + psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; + psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32Remain, + ui32PDumpFlags); + } +#endif + + *ppvBufferSpace = psClientCCB->pvClientCCB; + return PVRSRV_OK; + } + else if ((psClientCCB->ui32Size < psClientCCB->ui32VirtualAllocSize) && + (psClientCCB->ui32HostWriteOffset >= psClientCCB->psClientCCBCtrl->ui32ReadOffset)) + { + /* Grow CCB */ + PVRSRV_ERROR eErr = PVRSRV_OK; + + /* Something went wrong if we are here a second time */ + PVR_ASSERT(ui32RetryCount != 0); + OSLockAcquire(psClientCCB->hCCBGrowLock); + + /* + * On LMA sparse memory can't be mapped to kernel. + * To work around this whole ccb memory was allocated at once as contiguous. + * In such case below sparse change is not needed because memory is already allocated. + */ + if (!IsPhysmemNewRamBackedByLMA(psDevInfo->psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)) + { + IMG_UINT32 ui32FWPageSize = 1U << DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap); + IMG_UINT32 ui32AllocPageCount = psClientCCB->ui32Size / ui32FWPageSize; + + eErr = _RGXCCBMemChangeSparse(psClientCCB, ui32AllocPageCount); + } + + /* Setup new CCB size */ + if (eErr == PVRSRV_OK) + { + psClientCCB->ui32Size += psClientCCB->ui32Size; + } + else + { + PVR_LOG(("%s: Client CCB (%s) grow failed (%s)", __func__, psClientCCB->szName, PVRSRVGetErrorString(eErr))); + OSLockRelease(psClientCCB->hCCBGrowLock); + goto e_retry; + } + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB update for grow"); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32WrapMask), + sizeof(psClientCCB->psClientCCBCtrl->ui32WrapMask), + ui32PDumpFlags); + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + offsetof(RGX_CLIENT_CCB, ui32Size), + sizeof(psClientCCB->ui32Size), + ui32PDumpFlags); + } +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + PVR_LOG(("%s: Client CCB (%s) grew to %u", __func__, psClientCCB->szName, psClientCCB->ui32Size)); + /* Reset counters */ + _RGXInitCCBUtilisation(psClientCCB); +#endif + + /* CCB doubled the size so retry now. */ + OSLockRelease(psClientCCB->hCCBGrowLock); + } + else + { + /* CCB can't grow anymore and can't be wrapped */ +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32Remain, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + 0 /*ui32HostWriteOffset after wrap */, + ui32CmdSize, + psClientCCB->ui32Size); + /* CCB has now space for our command so try wrapping again. Retry now. */ +#else /* defined(PDUMP) */ + goto e_retry; +#endif /* defined(PDUMP) */ + } + } + else +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + { +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32Remain, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + 0 /*ui32HostWriteOffset after wrap */, + ui32CmdSize, + psClientCCB->ui32Size); +#endif + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + if (ui32FreeSpace > ui32Remain + ui32CmdSize) + { + psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; + psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32Remain, + ui32PDumpFlags); + } +#endif + + *ppvBufferSpace = psClientCCB->pvClientCCB; + return PVRSRV_OK; + } + + goto e_retry; + } + } + } +e_retry: +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + psClientCCB->sUtilisation.ui32CCBFull++; + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED, + ui32CmdSize); +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + return PVRSRV_ERROR_RETRY; +} + +/****************************************************************************** + FUNCTION : RGXReleaseCCB + + PURPOSE : Release a CCB that we have been writing to. + + PARAMETERS : psDevData - device data + psCCB - the CCB + + RETURNS : None +******************************************************************************/ +void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_BOOL bInCaptureRange; + IMG_BOOL bPdumpEnabled; + + PDumpIsCaptureFrameKM(&bInCaptureRange); + bPdumpEnabled = (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)); + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockAcquire(psClientCCB->hCCBGrowLock); +#endif + /* + * If a padding command was needed then we should now move ui32HostWriteOffset + * forward. The command has already be dumped (if bPdumpEnabled). + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size) + { + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + + UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + psClientCCB->ui32ByteCount += ui32Remain; + } + + /* Dump the CCB data */ + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + ui32PDumpFlags); + } + + /* + * Check if there any fences being written that will already be + * satisfied by the last written update command in this CCB. At the + * same time we can ASSERT that all sync addresses are not NULL. + */ +#if defined(DEBUG) + { + void *pvBufferStart = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + void *pvBufferEnd = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset + ui32CmdSize); + IMG_BOOL bMessagePrinted = IMG_FALSE; + + /* Walk through the commands in this section of CCB being released... */ + while (pvBufferStart < pvBufferEnd) + { + RGXFWIF_CCB_CMD_HEADER *psCmdHeader = pvBufferStart; + + if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE) + { + /* If an UPDATE then record the values incase an adjacent fence uses it. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + psClientCCB->ui32UpdateEntries = 0; + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE) + { + psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++; + } + } + } + else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) + { + /* If a FENCE then check the values against the last UPDATE issued. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + + if (bMessagePrinted == IMG_FALSE) + { + RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList; + IMG_UINT32 ui32UpdateIndex; + + for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr)) + { + PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x", + psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value)); + bMessagePrinted = IMG_TRUE; + break; + } + } + else + { + if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr && + psUFOPtr->ui32Value == psUpdatePtr->ui32Value) + { + PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x", + psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); + bMessagePrinted = IMG_TRUE; + break; + } + } + psUpdatePtr++; + } + } + + psUFOPtr++; + } + } + else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR || + psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE) + { + /* For all other UFO ops check the UFO address is not NULL. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + psUFOPtr++; + } + } + + /* Move to the next command in this section of CCB being released... */ + pvBufferStart = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize); + } + } +#endif /* REDUNDANT_SYNCS_DEBUG */ + + /* + * Update the CCB write offset. + */ + UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + psClientCCB->ui32Size); + psClientCCB->ui32ByteCount += ui32CmdSize; + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXUpdateCCBUtilisation(psClientCCB); +#endif + /* + PDumpSetFrame will detect as we Transition out of capture range for + frame based data but if we are PDumping continuous data then we + need to inform the PDump layer ourselves + */ + if (PDUMP_IS_CONTINUOUS(ui32PDumpFlags)&& !bInCaptureRange) + { + PVRSRV_ERROR eError; + + /* Only Transitioning into capture range can cause an error */ + eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_EXITED, ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + } + + if (bPdumpEnabled) + { + if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) + { + /* Store offset to last finished CCB command. This offset can + * be needed when appending commands to a non finished CCB. + */ + psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset; + } + + /* Update the PDump write offset to show we PDumped this command */ + psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; + } + +#if defined(NO_HARDWARE) + /* + The firmware is not running, it cannot update these; we do here instead. + */ + psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; + psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; +#endif + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psClientCCB->hCCBGrowLock); +#endif +} + +IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB) +{ + return psClientCCB->ui32HostWriteOffset; +} + +IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB) +{ + return psClientCCB->ui32Size-1; +} + +PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Flags) +{ + if ((ui32Flags & RGX_CONTEXT_FLAG_DISABLESLR)) + { + BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + else + { + BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + return PVRSRV_OK; +} + +#define SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL PVR_DBG_ERROR +#define CHECK_COMMAND(cmd, fenceupdate) \ + case RGXFWIF_CCB_CMD_TYPE_##cmd: \ + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, #cmd " command (%d bytes)", psHeader->ui32CmdSize)); \ + bFenceUpdate = fenceupdate; \ + break + +static void _RGXClientCCBDumpCommands(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32ByteCount) +{ +#if defined(SUPPORT_DUMP_CLIENT_CCB_COMMANDS) + void *pvPtr = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, ui32Offset); + IMG_UINT32 ui32ConsumeSize = ui32ByteCount; + + while (ui32ConsumeSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader = pvPtr; + IMG_BOOL bFenceUpdate = IMG_FALSE; + + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "@offset 0x%08x", IMG_OFFSET_ADDR(pvPtr, -psClientCCB->pvClientCCB))); + switch (psHeader->eCmdType) + { + CHECK_COMMAND(GEOM_PPP, IMG_FALSE); + CHECK_COMMAND(GEOM_SHG, IMG_FALSE); + CHECK_COMMAND(3D, IMG_FALSE); + CHECK_COMMAND(3D_PR, IMG_FALSE); + CHECK_COMMAND(CDM, IMG_FALSE); + CHECK_COMMAND(TQ_TDM, IMG_FALSE); + CHECK_COMMAND(RTU, IMG_FALSE); + CHECK_COMMAND(RTU_FC, IMG_FALSE); + CHECK_COMMAND(NULL, IMG_FALSE); + + CHECK_COMMAND(FENCE, IMG_TRUE); + CHECK_COMMAND(UPDATE, IMG_TRUE); + CHECK_COMMAND(FENCE_PR, IMG_TRUE); + CHECK_COMMAND(PRIORITY, IMG_TRUE); + + + CHECK_COMMAND(UNFENCED_UPDATE, IMG_FALSE); + + CHECK_COMMAND(PADDING, IMG_FALSE); + default: + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Unknown command!")); + break; + } + pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psHeader)); + if (bFenceUpdate) + { + IMG_UINT32 j; + RGXFWIF_UFO *psUFOPtr = pvPtr; + for (j=0;jui32CmdSize/sizeof(RGXFWIF_UFO);j++) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Addr = 0x%08x, value = 0x%08x", + psUFOPtr[j].puiAddrUFO.ui32Addr, psUFOPtr[j].ui32Value)); + } + } + else + { + IMG_UINT32 *pui32Ptr = pvPtr; + IMG_UINT32 ui32Remain = psHeader->ui32CmdSize/sizeof(IMG_UINT32); + while (ui32Remain) + { + if (ui32Remain >= 4) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x 0x%08x", + pui32Ptr[0], pui32Ptr[1], pui32Ptr[2], pui32Ptr[3])); + pui32Ptr += 4; + ui32Remain -= 4; + } + if (ui32Remain == 3) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x", + pui32Ptr[0], pui32Ptr[1], pui32Ptr[2])); + pui32Ptr += 3; + ui32Remain -= 3; + } + if (ui32Remain == 2) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x", + pui32Ptr[0], pui32Ptr[1])); + pui32Ptr += 2; + ui32Remain -= 2; + } + if (ui32Remain == 1) + { + PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x", + pui32Ptr[0])); + pui32Ptr += 1; + ui32Remain -= 1; + } + } + } + pvPtr = IMG_OFFSET_ADDR(pvPtr, psHeader->ui32CmdSize); + ui32ConsumeSize -= sizeof(*psHeader) + psHeader->ui32CmdSize; + } +#else + PVR_UNREFERENCED_PARAMETER(psClientCCB); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32ByteCount); +#endif +} + +void RGXCmdHelperInitCmdCCB_CommandSize(IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + IMG_UINT32 ui32ClientUpdateCount, + IMG_UINT32 ui32CmdSize, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + IMG_UINT32 ui32FenceCount, ui32UpdateCount, ui32UnfencedUpdateCount; + + /* Init the generated data members */ + psCmdHelperData->ui32FBSCInvalCmdSize = 0; + psCmdHelperData->ui64FBSCEntryMask = 0; + psCmdHelperData->ui32ServerFenceCount = 0; + psCmdHelperData->ui32ServerUpdateCount = 0; + psCmdHelperData->ui32ServerUnfencedUpdateCount = 0; + psCmdHelperData->ui32FenceCmdSize = 0; + psCmdHelperData->ui32UpdateCmdSize = 0; + psCmdHelperData->ui32UnfencedUpdateCmdSize = 0; + + /* Work out how many fences and updates this command will have */ + + /* Total FBSC invalidate command size (header plus command data) */ + + if (ui64FBSCEntryMask != 0) + { + psCmdHelperData->ui32FBSCInvalCmdSize = + RGX_CCB_FWALLOC_ALIGN(sizeof(psCmdHelperData->ui64FBSCEntryMask) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + psCmdHelperData->ui64FBSCEntryMask = ui64FBSCEntryMask; + } + + /* total DM command size (header plus command data) */ + + psCmdHelperData->ui32DMCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* total fence command size (header plus command data) */ + + ui32FenceCount = ui32ClientFenceCount + + psCmdHelperData->ui32ServerFenceCount; + if (ui32FenceCount != 0) + { + psCmdHelperData->ui32FenceCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32FenceCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + + /* Total update command size (header plus command data) */ + + ui32UpdateCount = ui32ClientUpdateCount + + psCmdHelperData->ui32ServerUpdateCount; + if (ui32UpdateCount != 0) + { + psCmdHelperData->ui32UpdateCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32UpdateCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + + /* total unfenced update command size (header plus command data) */ + + ui32UnfencedUpdateCount = psCmdHelperData->ui32ServerUnfencedUpdateCount; + if (ui32UnfencedUpdateCount != 0) + { + psCmdHelperData->ui32UnfencedUpdateCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32UnfencedUpdateCount * + sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } +} + +/* + Work out how much space this command will require +*/ +void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + /* Job reference values */ + psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef; + psCmdHelperData->ui32IntJobRef = ui32IntJobRef; + + /* Save the data we require in the submit call */ + psCmdHelperData->psClientCCB = psClientCCB; +#if defined(PDUMP) + psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags; +#endif + psCmdHelperData->pszCommandName = pszCommandName; + if (bCCBStateOpen) + { + BIT_SET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + } + else + { + BIT_UNSET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + } + + /* Client sync data */ + psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount; + psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress; + psCmdHelperData->paui32FenceValue = paui32FenceValue; + psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount; + psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress; + psCmdHelperData->paui32UpdateValue = paui32UpdateValue; + + /* Command data */ + psCmdHelperData->ui32CmdSize = ui32CmdSize; + psCmdHelperData->pui8DMCmd = pui8DMCmd; + psCmdHelperData->eType = eType; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "%s Command Server Init on FWCtx %08x", pszCommandName, + FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Workload Data added */ + psCmdHelperData->psWorkEstKickData = psWorkEstKickData; +#endif +} + +/* + Work out how much space this command will require +*/ +void RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + RGXCmdHelperInitCmdCCB_OtherData(psClientCCB, + ui32ClientFenceCount, + pauiFenceUFOAddress, + paui32FenceValue, + ui32ClientUpdateCount, + pauiUpdateUFOAddress, + paui32UpdateValue, + ui32CmdSize, + pui8DMCmd, + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + psWorkEstKickData, + pszCommandName, + bCCBStateOpen, + psCmdHelperData); + + RGXCmdHelperInitCmdCCB_CommandSize(ui64FBSCEntryMask, + ui32ClientFenceCount, + ui32ClientUpdateCount, + ui32CmdSize, + psCmdHelperData); +} + +/* + Reserve space in the CCB and fill in the command and client sync data +*/ +PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) +{ + const IMG_UINT32 ui32MaxUFOCmdSize = RGX_CCB_FWALLOC_ALIGN((RGXFWIF_CCB_CMD_MAX_UFOS * sizeof(RGXFWIF_UFO)) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; + void *pvStartPtr; + PVRSRV_ERROR eError; + + /* + Check the number of fences & updates are valid. + */ + for (i = 0; i < ui32CmdCount; i++) + { + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i]; + + if (psCmdHelperData->ui32FenceCmdSize > ui32MaxUFOCmdSize || + psCmdHelperData->ui32UpdateCmdSize > ui32MaxUFOCmdSize || + psCmdHelperData->ui32UnfencedUpdateCmdSize > ui32MaxUFOCmdSize) + { + return PVRSRV_ERROR_TOO_MANY_SYNCS; + } + } + + /* + Work out how much space we need for all the command(s) + */ + ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); + +#if defined(PDUMP) + for (i = 0; i < ui32CmdCount; i++) + { + if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d", + __func__, + PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", + PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", + ui32CmdCount)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +#endif + + /* + Acquire space in the CCB for all the command(s). + */ + eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB, + ui32AllocSize, + &pvStartPtr, + asCmdHelperData[0].ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + return eError; + } + + /* + For each command fill in the fence, DM, and update command + + Note: + We only fill in the client fences here, the server fences (and updates) + will be filled in together at the end. This is because we might fail the + kernel CCB alloc and would then have to rollback the server syncs if + we took the operation here + */ + for (i = 0; i < ui32CmdCount; i++) + { + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i]; + void *pvCmdPtr; + void *pvServerFenceStart = NULL; + void *pvServerUpdateStart = NULL; +#if defined(PDUMP) + IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr; + IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext)); +#endif + + if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) + { + PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + + /* + Create the fence command. + */ + if (psCmdHelperData->ui32FenceCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT k, uiNextValueIndex; + + /* Fences are at the start of the command */ + pvCmdPtr = pvStartPtr; + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE; + + psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* Fill in the client fences */ + uiNextValueIndex = 0; + for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++) + { + RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + + psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k]; + + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { + /* Only increment uiNextValueIndex for non sync checkpoints + * (as paui32FenceValue only contains values for sync prims) + */ + psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++]; + } + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + +#if defined(SYNC_COMMAND_DEBUG) + PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); +#endif + PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + + + } + pvServerFenceStart = pvCmdPtr; + } + + /* jump over the Server fences */ + pvCmdPtr = IMG_OFFSET_ADDR(pvStartPtr, psCmdHelperData->ui32FenceCmdSize); + + /* + Create the FBSC invalidate command. + */ + if (psCmdHelperData->ui32FBSCInvalCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT64 *pui64FBSCInvalCmdData; + + /* pui8CmdPtr */ + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE; + + psHeader->ui32CmdSize = psCmdHelperData->ui32FBSCInvalCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + pui64FBSCInvalCmdData = (IMG_UINT64 *)(psHeader + 1); + *pui64FBSCInvalCmdData = psCmdHelperData->ui64FBSCEntryMask; + /* leap over the FBSC invalidate command */ + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32FBSCInvalCmdSize); + + } + + /* + Create the DM command + */ + if (psCmdHelperData->ui32DMCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + + psHeader = pvCmdPtr; + psHeader->eCmdType = psCmdHelperData->eType; + + psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (psCmdHelperData->psWorkEstKickData != NULL && + psCmdHelperData->eType != RGXFWIF_CCB_CMD_TYPE_NULL) + { + PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_GEOM || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_CDM || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TQ_TDM); + psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData; + } + else + { + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; + } +#endif + + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* The buffer is write-combine, so no special device memory treatment required. */ + OSDeviceMemCopy(pvCmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32CmdSize); + } + + + /* + Create the update command. + + Note: + We only fill in the client updates here, the server updates (and fences) + will be filled in together at the end + */ + if (psCmdHelperData->ui32UpdateCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT k, uiNextValueIndex; + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE; + psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* Fill in the client updates */ + uiNextValueIndex = 0; + for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++) + { + RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + + psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k]; + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { + /* Only increment uiNextValueIndex for non sync checkpoints + * (as paui32UpdateValue only contains values for sync prims) + */ + psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++]; + } + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + +#if defined(SYNC_COMMAND_DEBUG) + PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); +#endif + PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + + } + pvServerUpdateStart = pvCmdPtr; + } + + /* Save the server sync fence & update offsets for submit time */ + psCmdHelperData->pui8ServerFenceStart = pvServerFenceStart; + psCmdHelperData->pui8ServerUpdateStart = pvServerUpdateStart; + + /* jump over the fenced update */ + if (psCmdHelperData->ui32UnfencedUpdateCmdSize != 0) + { + RGXFWIF_CCB_CMD_HEADER * const psHeader = IMG_OFFSET_ADDR(psCmdHelperData->pui8ServerUpdateStart, psCmdHelperData->ui32UpdateCmdSize); + /* header should not be zero but check for code analysis */ + if (unlikely(psHeader == NULL)) + { + return PVRSRV_ERROR_MEMORY_ACCESS; + } + /* set up the header for unfenced updates */ + PVR_ASSERT(psHeader); /* Could be zero if ui32UpdateCmdSize is 0 which is never expected */ + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE; + psHeader->ui32CmdSize = psCmdHelperData->ui32UnfencedUpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + + /* jump over the header */ + psCmdHelperData->pui8ServerUnfencedUpdateStart = IMG_OFFSET_ADDR(psHeader, sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + else + { + psCmdHelperData->pui8ServerUnfencedUpdateStart = NULL; + } + + /* Save start for sanity checking at submit time */ + psCmdHelperData->pui8StartPtr = pvStartPtr; + + /* Set the start pointer for the next iteration around the loop */ + pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr, + psCmdHelperData->ui32FenceCmdSize + + psCmdHelperData->ui32FBSCInvalCmdSize + + psCmdHelperData->ui32DMCmdSize + + psCmdHelperData->ui32UpdateCmdSize + + psCmdHelperData->ui32UnfencedUpdateCmdSize); + + if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) + { + PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + else + { + PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + } + + return PVRSRV_OK; +} + +/* + Fill in the server syncs data and release the CCB space +*/ +void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + const IMG_CHAR *pcszDMName, + IMG_UINT32 ui32CtxAddr) +{ + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; +#if defined(LINUX) + IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced(); + IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced(); +#endif + + /* + Work out how much space we need for all the command(s) + */ + ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); + /* + For each command fill in the server sync info + */ + for (i=0;ipszCommandName, + pcszDMName, + ui32CtxAddr, + psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, + psCmdHelperData->ui32ClientFenceCount, + psCmdHelperData->pauiFenceUFOAddress, + psCmdHelperData->paui32FenceValue); + } + if (bTraceUpdates) + { + trace_rogue_fence_updates(psCmdHelperData->pszCommandName, + pcszDMName, + ui32CtxAddr, + psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, + psCmdHelperData->ui32ClientUpdateCount, + psCmdHelperData->pauiUpdateUFOAddress, + psCmdHelperData->paui32UpdateValue); + } +#endif + /* + All the commands have been filled in so release the CCB space. + The FW still won't run this command until we kick it + */ + PDUMPCOMMENTWITHFLAGS(psCmdHelperData->ui32PDumpFlags, + "%s Command Server Release on FWCtx %08x", + psCmdHelperData->pszCommandName, ui32CtxAddr); + } + + _RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB, + asCmdHelperData[0].psClientCCB->ui32HostWriteOffset, + ui32AllocSize); + + RGXReleaseCCB(asCmdHelperData[0].psClientCCB, + ui32AllocSize, + asCmdHelperData[0].ui32PDumpFlags); + + BIT_UNSET(asCmdHelperData[0].psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); +} + +IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) +{ + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; + + /* + Work out how much space we need for all the command(s) + */ + for (i = 0; i < ui32CmdCount; i++) + { + ui32AllocSize += + asCmdHelperData[i].ui32FenceCmdSize + + asCmdHelperData[i].ui32FBSCInvalCmdSize + + asCmdHelperData[i].ui32DMCmdSize + + asCmdHelperData[i].ui32UpdateCmdSize + + asCmdHelperData[i].ui32UnfencedUpdateCmdSize; + } + + return ui32AllocSize; +} + +/* Work out how much of an offset there is to a specific command. */ +IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + IMG_UINT32 ui32Cmdindex) +{ + IMG_UINT32 ui32Offset = 0; + IMG_UINT32 i; + + for (i = 0; i < ui32Cmdindex; i++) + { + ui32Offset += + asCmdHelperData[i].ui32FenceCmdSize + + asCmdHelperData[i].ui32FBSCInvalCmdSize + + asCmdHelperData[i].ui32DMCmdSize + + asCmdHelperData[i].ui32UpdateCmdSize + + asCmdHelperData[i].ui32UnfencedUpdateCmdSize; + } + + return ui32Offset; +} + +/* Returns the offset of the data master command from a write offset */ +IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + return psCmdHelperData->ui32FenceCmdSize + + psCmdHelperData->ui32FBSCInvalCmdSize; +} + +static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType) +{ + switch (cmdType) + { + case RGXFWIF_CCB_CMD_TYPE_GEOM: return "GEOM_PPP"; + case RGXFWIF_CCB_CMD_TYPE_3D: return "3D"; + case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR"; + case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM"; + case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM"; + case RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE: return "FBSC_INVALIDATE"; + case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL"; + case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE"; + case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR"; + case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY"; + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING"; + + default: + PVR_ASSERT(IMG_FALSE); + break; + } + + return "INVALID"; +} + +PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM) +{ + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; + IMG_UINT32 ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff, ui32WrapMask; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psCurrentClientCCB == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + /* If CCB grow is enabled, take the lock while sampling offsets + * (to guard against a grow happening mid-sample) + */ + OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); +#endif + /* NB. use psCurrentClientCCB->ui32Size as basis for wrap mask (rather than psClientCCBCtrl->ui32WrapMask) + * as if CCB grow happens, psCurrentClientCCB->ui32Size will have been updated but + * psClientCCBCtrl->ui32WrapMask is only updated once the firmware sees the CCB has grown. + * If we use the wrong value, we might incorrectly determine that the offsets are invalid. + */ + ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); + psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; + ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset; + ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psCurrentClientCCB->hCCBGrowLock); +#endif + + if (ui32SampledRdOff > ui32WrapMask || + ui32SampledDpOff > ui32WrapMask || + ui32SampledWrOff > ui32WrapMask) + { + PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)", + ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff)); + return PVRSRV_ERROR_INVALID_OFFSET; + } + + if (ui32SampledRdOff != ui32SampledWrOff && + psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff && + ui32SampledRdOff == psCurrentClientCCB->ui32LastROff && + (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; + + /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle) */ + if ((psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_ON) && + psDevInfo->ui32SLRHoldoffCounter == 0) + { + static __maybe_unused const char *pszStalledAction = +#if defined(PVRSRV_STALLED_CCB_ACTION) + "force"; +#else + "warn"; +#endif + /* Don't log this by default unless debugging since a higher up + * function will log the stalled condition. Helps avoid double + * messages in the log. + */ + PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"", + __func__, pszStalledAction, ui32SampledRdOff, + ui32SampledDpOff, ui32SampledWrOff, + (IMG_PCHAR)&psCurrentClientCCB->szName)); + eError = PVRSRV_ERROR_CCCB_STALLED; + + { + void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); + + /* Special case - if readOffset is on a PADDING packet, CCB has wrapped. + * In this case, skip over the PADDING packet. + */ + if (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_PADDING) + { + psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, + ((ui32SampledRdOff + + psCommandHeader->ui32CmdSize + + sizeof(RGXFWIF_CCB_CMD_HEADER)) + & psCurrentClientCCB->psClientCCBCtrl->ui32WrapMask)); + } + + /* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could + * take a long time to complete, during which time the CCB ptrs would not advance. + */ + if (((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) || + (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) && + (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff))) + { + /* Acquire the cCCB recovery lock */ + OSLockAcquire(psDevInfo->hCCBRecoveryLock); + + if (!psDevInfo->pvEarliestStalledClientCCB) + { + psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; + psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; + } + else + { + /* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking + * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes + * our preferred fence to be unblocked/ + */ + if ((psCommandHeader->ui32IntJobRef < psDevInfo->ui32OldestSubmissionOrdinal) && + ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32IntJobRef) < 0x8000000)) + { + psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; + psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; + } + } + + /* Release the cCCB recovery lock */ + OSLockRelease(psDevInfo->hCCBRecoveryLock); + } + } + } + } + + psCurrentClientCCB->ui32LastROff = ui32SampledRdOff; + psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff; + psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount; + + return eError; +} + +void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; + void *pvClientCCBBuff; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32DepOffset; + IMG_UINT32 ui32EndOffset; + IMG_UINT32 ui32WrapMask; + IMG_CHAR * pszState = "Ready"; + + /* Ensure hCCBGrowLock is acquired before reading + * psCurrentClientCCB->pvClientCCB as a CCB grow + * could remap the virtual addresses. + */ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); +#endif + psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset; + OSMemoryBarrier(); + ui32Offset = psClientCCBCtrl->ui32ReadOffset; + ui32DepOffset = psClientCCBCtrl->ui32DepOffset; + /* NB. Use psCurrentClientCCB->ui32Size as basis for wrap mask (rather + * than psClientCCBCtrl->ui32WrapMask) as if CCB grow happened, + * psCurrentClientCCB->ui32Size will have been updated but + * psClientCCBCtrl->ui32WrapMask is only updated once the firmware + * sees the CCB has grown. If we use the wrong value, ui32NextOffset + * can end up being wrapped prematurely and pointing to garbage. + */ + ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); + + PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr, + (IMG_PCHAR)&psCurrentClientCCB->szName); + if (ui32Offset == ui32EndOffset) + { + PVR_DUMPDEBUG_LOG(" `--"); + } + + while (ui32Offset != ui32EndOffset) + { + RGXFWIF_CCB_CMD_HEADER *psCmdHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset); + IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask; + IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE; + IMG_BOOL bLastUFO; + #define CCB_SYNC_INFO_LEN 80 + IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN]; + IMG_UINT32 ui32NoOfUpdates, i; + RGXFWIF_UFO *psUFOPtr; + + ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + psUFOPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER)); + pszSyncInfo[0] = '\0'; + + if (ui32Offset == ui32DepOffset) + { + pszState = "Waiting"; + } + + PVR_DUMPDEBUG_LOG(" %s--%s %s @ %u Int=%u Ext=%u", + bLastCommand? "`": "|", + pszState, _CCBCmdTypename(psCmdHeader->eCmdType), + ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef + ); + + /* switch on type and write checks and updates */ + switch (psCmdHeader->eCmdType) + { + case RGXFWIF_CCB_CMD_TYPE_UPDATE: + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: + case RGXFWIF_CCB_CMD_TYPE_FENCE: + case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: + { + for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) + { + bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + else + { + SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + } + + PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val=0x%08x %s", + bLastCommand? " ": "|", + bLastUFO? "`": "|", + psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value, + pszSyncInfo + ); + } + break; + } + + default: + break; + } + ui32Offset = ui32NextOffset; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psCurrentClientCCB->hCCBGrowLock); +#endif +} + +void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + volatile void *pvPtr; + IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; + IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; + IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; + + pvPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + + if ((ui32SampledRdOff == ui32SampledDepOff) && + (ui32SampledRdOff != ui32SampledWrOff)) + { + volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; + volatile void *pvPtr = psCommandHeader; + + /* CCB is stalled on a fence... */ + if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) + { +#if defined(SUPPORT_EXTRA_METASP_DEBUG) + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); + IMG_UINT32 ui32Val; +#endif + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); + IMG_UINT32 jj; + + /* Display details of the fence object on which the context is pending */ + PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:", + sFWCommonContext.ui32Addr, + ui32SampledRdOff, + (IMG_PCHAR)&psCurrentClientCCB->szName, + _CCBCmdTypename(eCommandType)); + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { +#if !defined(SUPPORT_EXTRA_METASP_DEBUG) + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); +#else + ui32Val = 0; + RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, ui32Val); +#endif + } + + /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */ + pvPtr = IMG_OFFSET_ADDR(psUFOPtr, psCommandHeader->ui32CmdSize); + psCommandHeader = pvPtr; + if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType)); + /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */ + pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize); + psCommandHeader = pvPtr; + /* If the next command is an update, display details of that so we can see what would then become unblocked */ + if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) + { + eCommandType = psCommandHeader->eCmdType; + + if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE) + { + psUFOPtr = IMG_OFFSET_ADDR(psCommandHeader, sizeof(*psCommandHeader)); + PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType)); + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { +#if !defined(SUPPORT_EXTRA_METASP_DEBUG) + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); +#else + ui32Val = 0; + RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, + ui32Val); +#endif + } + } + } + else + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); + } + } + else + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); + } + } + } +} + +void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_CLIENT_CCB *psStalledClientCCB; + + PVR_ASSERT(psDevInfo); + + psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB; + + if (psStalledClientCCB) + { + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl; + IMG_UINT32 ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset; + void *pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset); + RGXFWIF_CCB_CMD_HEADER *psCommandHeader = pvPtr; + RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; + + if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) + { + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); + IMG_UINT32 jj; + IMG_UINT32 ui32NumUnsignalledUFOs = 0; + IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNCS]; + +#if defined(PVRSRV_STALLED_CCB_ACTION) + if (!psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName[0]) + { + OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui64Timestamp); + psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); + psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; + OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName, + psStalledClientCCB->szName, + MAX_CLIENT_CCB_NAME); + } + else + { + OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui64Timestamp); + psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); + psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; + OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName, + psStalledClientCCB->szName, + MAX_CLIENT_CCB_NAME); + psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp = (psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES; + } + psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested++; +#endif + PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs", + FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr, + psStalledClientCCB->szName, ui32SampledDepOffset, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)))); + + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj])) + { + IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode, + psUFOPtr[jj].puiAddrUFO.ui32Addr); + PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, + ui32ReadValue)); + /* If fence is unmet, dump debug info on it */ + if (ui32ReadValue != psUFOPtr[jj].ui32Value) + { + /* Add to our list to pass to pvr_sync */ + ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr; + ui32NumUnsignalledUFOs++; + } + } + else + { + PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x", jj+1, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value)); + } + } +#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) + if (ui32NumUnsignalledUFOs > 0) + { + IMG_UINT32 ui32NumSyncsOwned; + PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned); + + PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed."); + } +#endif +#if defined(PVRSRV_STALLED_CCB_ACTION) + if (BIT_ISSET(psStalledClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED)) + { + PRGXFWIF_FWCOMMONCONTEXT psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); + + PVR_LOG(("SLR disabled for FWCtx 0x%08X", psContext.ui32Addr)); + } + else + { + if (ui32NumUnsignalledUFOs > 0) + { + RGXFWIF_KCCB_CMD sSignalFencesCmd; + + sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE; + sSignalFencesCmd.ui32KCCBFlags = 0; + sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); + sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledDepOffset; + + PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr)); + + RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext), + FWCommonContextGetServerMMUCtx(psStalledClientCCB->psServerCommonContext), + RGXFWIF_DM_GP, + &sSignalFencesCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } +#endif + } + psDevInfo->pvEarliestStalledClientCCB = NULL; + } +} + +/****************************************************************************** + End of file (rgxccb.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.h new file mode 100644 index 000000000000..b1f13e1e2788 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxccb.h @@ -0,0 +1,334 @@ +/*************************************************************************/ /*! +@File +@Title RGX Circular Command Buffer functionality. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX Circular Command Buffer functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXCCB_H__) +#define __RGXCCB_H__ + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "sync_server.h" +#include "connection_server.h" +#include "rgxdebug.h" +#include "rgxdefs_km.h" +#include "pvr_notifier.h" + +#define MAX_CLIENT_CCB_NAME 30 +#define SYNC_FLAG_MASK_ALL IMG_UINT32_MAX + +/* + * This size is to be used when a client CCB is found to consume very + * negligible space (e.g. a few hundred bytes to few KBs - less than a page). + * In such a case, instead of allocating CCB of size of only a few KBs, we + * allocate at-least this much to be future risk-free. + */ +#define MIN_SAFE_CCB_SIZE_LOG2 13 /* 8K (2 Pages) */ +#define MAX_SAFE_CCB_SIZE_LOG2 18 /* 256K (64 Pages) */ + +#define RGX_TQ_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D +static_assert(RGX_TQ_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TQ_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ CCB size is invalid"); +#define RGX_TQ_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D +static_assert(RGX_TQ_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D && + RGX_TQ_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ max CCB size is invalid"); + +#define RGX_CDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM +static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_CDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM CCB size is invalid"); +#define RGX_CDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM +static_assert(RGX_CDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM && + RGX_CDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM max CCB size is invalid"); + +#define RGX_TA_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA +static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TA_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA CCB size is invalid"); +#define RGX_TA_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA +static_assert(RGX_TA_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA && + RGX_TA_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA max CCB size is invalid"); + +#define RGX_3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D +static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D CCB size is invalid"); +#define RGX_3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D +static_assert(RGX_3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D && + RGX_3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D max CCB size is invalid"); + +#define RGX_KICKSYNC_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC +static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_KICKSYNC_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is invalid"); +#define RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC +static_assert(RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC && + RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync max CCB size is invalid"); + +#define RGX_TDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM +static_assert(RGX_TDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM CCB size is invalid"); +#define RGX_TDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM +static_assert(RGX_TDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM && + RGX_TDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM max CCB size is invalid"); + +typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB; + +/* + This structure is declared here as it's allocated on the heap by + the callers +*/ + +typedef struct _RGX_CCB_CMD_HELPER_DATA_ { + /* Data setup at command init time */ + RGX_CLIENT_CCB *psClientCCB; + IMG_CHAR *pszCommandName; + IMG_UINT32 ui32PDumpFlags; + + IMG_UINT32 ui32ClientFenceCount; + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress; + IMG_UINT32 *paui32FenceValue; + IMG_UINT32 ui32ClientUpdateCount; + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress; + IMG_UINT32 *paui32UpdateValue; + RGXFWIF_CCB_CMD_TYPE eType; + IMG_UINT32 ui32CmdSize; + IMG_UINT8 *pui8DMCmd; + IMG_UINT32 ui32FenceCmdSize; + IMG_UINT64 ui32FBSCInvalCmdSize; + IMG_UINT32 ui32DMCmdSize; + IMG_UINT32 ui32UpdateCmdSize; + IMG_UINT32 ui32UnfencedUpdateCmdSize; + + /* data for FBSC invalidate command */ + IMG_UINT64 ui64FBSCEntryMask; + + + + /* Data setup at command acquire time */ + IMG_UINT8 *pui8StartPtr; + IMG_UINT8 *pui8ServerUpdateStart; + IMG_UINT8 *pui8ServerUnfencedUpdateStart; + IMG_UINT8 *pui8ServerFenceStart; + IMG_UINT32 ui32ServerFenceCount; + IMG_UINT32 ui32ServerUpdateCount; + IMG_UINT32 ui32ServerUnfencedUpdateCount; + + /* Job reference fields */ + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32IntJobRef; + + /* FW Memdesc for Workload information */ + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData; + +} RGX_CCB_CMD_HELPER_DATA; + +#define PADDING_COMMAND_SIZE (sizeof(RGXFWIF_CCB_CMD_HEADER)) + + +#define RGX_CCB_REQUESTORS(TYPE) \ + /* for debugging purposes */ TYPE(UNDEF) \ + TYPE(GEOM_PPP) \ + TYPE(GEOM_SHG) \ + TYPE(3D) \ + TYPE(CDM) \ + TYPE(RS) \ + TYPE(TQ_3D) \ + TYPE(TQ_2D) \ + TYPE(TQ_TDM) \ + TYPE(KICKSYNC) \ + +/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as + an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere + to the following build assert. +*/ +typedef enum _RGX_CCB_REQUESTOR_TYPE_ +{ +#define CONSTRUCT_ENUM(req) REQ_TYPE_##req, + RGX_CCB_REQUESTORS (CONSTRUCT_ENUM) +#undef CONSTRUCT_ENUM + + /* should always be at the end */ + REQ_TYPE_TOTAL_COUNT, +} RGX_CCB_REQUESTOR_TYPE; + +/* Tuple describing the columns of the following table */ +typedef enum _RGX_CCB_REQUESTOR_TUPLE_ +{ + REQ_RGX_FW_CLIENT_CCB_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */ + REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */ + REQ_PDUMP_COMMENT, /* Index to comment to be dumped in PDUMPs */ + + /* should always be at the end */ + REQ_TUPLE_CARDINALITY, +} RGX_CCB_REQUESTOR_TUPLE; + +/* Unpack U8 values from U32. */ +#define U32toU8_Unpack1(U32Packed) (U32Packed & 0xFF) +#define U32toU8_Unpack2(U32Packed) ((U32Packed>>8) & 0xFF) +#define U32toU8_Unpack3(U32Packed) ((U32Packed>>16) & 0xFF) +#define U32toU8_Unpack4(U32Packed) ((U32Packed>>24) & 0xFF) + +/* Defines for bit meanings within the ui32CCBFlags member of struct _RGX_CLIENT_CCB_ + * + * ( X = taken/in use, - = available/unused ) + * + * 31 10 + * | || + * ------------------------------XX + * Bit Meaning + * 0 = If set, CCB is still open and commands will be appended to it + * 1 = If set, do not perform Sync Lockup Recovery (SLR) for this CCB + */ +#define CCB_FLAGS_CCB_STATE_OPEN (0) /*!< This bit is set to indicate CCB is in the 'Open' state. */ +#define CCB_FLAGS_SLR_DISABLED (1) /*!< This bit is set to disable Sync Lockup Recovery (SLR) for this CCB. */ + + +/* Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in + this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for + use in other modules. +*/ +extern const IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY]; + +PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CCBSizeLog2, + IMG_UINT32 ui32CCBMaxSizeLog2, + IMG_UINT32 ui32ContextFlags, + CONNECTION_DATA *psConnectionData, + RGX_CCB_REQUESTOR_TYPE eCCBRequestor, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGX_CLIENT_CCB **ppsClientCCB, + DEVMEM_MEMDESC **ppsClientCCBMemDesc, + DEVMEM_MEMDESC **ppsClientCCBCtlMemDesc); + +void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB); + +PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize); + +PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + void **ppvBufferSpace, + IMG_UINT32 ui32PDumpFlags); + +void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + IMG_UINT32 ui32PDumpFlags); + +IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB); +IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB); + +PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Flags); + +void RGXCmdHelperInitCmdCCB_CommandSize(IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + IMG_UINT32 ui32ClientUpdateCount, + IMG_UINT32 ui32CmdSize, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_UINT8 *pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); + +void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + const IMG_CHAR *pcszDMName, + IMG_UINT32 ui32CtxAddr); + +IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); + +IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + IMG_UINT32 ui32Cmdindex); + +IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM); + +void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif /* __RGXCCB_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.c new file mode 100644 index 000000000000..fc28153afbd7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.c @@ -0,0 +1,1182 @@ +/*************************************************************************/ /*! +@File +@Title RGX Compute routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Compute routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "srvkm.h" +#include "pdump_km.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxcompute.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "rgxccb.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "rgx_memallocflags.h" + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" + +#define HASH_CLEAN_LIMIT 6 +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_CMP_UFO_DUMP 0 + +//#define CMP_CHECKPOINT_DEBUG 1 + +#if defined(CMP_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_CC_CMP_DATA; + +struct _RGX_SERVER_COMPUTE_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + //RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + //DEVMEM_MEMDESC *psFWComputeContextStateMemDesc; + DEVMEM_MEMDESC *psFWComputeContextMemDesc; + RGX_SERVER_CC_CMP_DATA sComputeData; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +}; + +static +PVRSRV_ERROR _CreateComputeContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_CC_CMP_DATA *psComputeData) +{ + IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware compute context suspend state"); + + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_COMPUTECTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwComputeContextState", + &psComputeData->psContextStateMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%d)", + __func__, + eError)); + goto fail_contextsuspendalloc; + } + + ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); + ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_CDM, + RGXFWIF_DM_CDM, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + psComputeData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &psComputeData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init Compute fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_computecommoncontext; + } + + /* + * Dump the FW compute context suspend state buffer + */ + PDUMPCOMMENT("Dump the compute context suspend state buffer"); + DevmemPDumpLoadMem(psComputeData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_COMPUTECTX_STATE), + PDUMP_FLAGS_CONTINUOUS); + + psComputeData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_computecommoncontext: + DevmemFree(psComputeData->psContextStateMemDesc); +fail_contextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +static +PVRSRV_ERROR _DestroyComputeContext(RGX_SERVER_CC_CMP_DATA *psComputeData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + psComputeData->psServerCommonContext, + RGXFWIF_DM_CDM, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + FWCommonContextFree(psComputeData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psComputeData->psContextStateMemDesc); + psComputeData->psServerCommonContext = NULL; + return PVRSRV_OK; + } + +PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticComputeContextStateSize, + IMG_PBYTE pStaticComputeContextState, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext) +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext; + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; + + /* Prepare cleanup struct */ + *ppsComputeContext = NULL; + + if (ui32StaticComputeContextStateSize > RGXFWIF_STATIC_COMPUTECONTEXT_SIZE) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psComputeContext = OSAllocZMem(sizeof(*psComputeContext)); + if (psComputeContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + Create the FW compute context, this has the CDM common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWCOMPUTECONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwComputeContext", + &psComputeContext->psFWComputeContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwcomputecontext; + } + + eError = OSLockCreate(&psComputeContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_createlock; + } + + psComputeContext->psDeviceNode = psDeviceNode; + + eError = _CreateComputeContext(psConnection, + psDeviceNode, + psComputeContext->psFWComputeContextMemDesc, + offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), + hMemCtxPrivData, + psFWMemContextMemDesc, + ui32PackedCCBSizeU88, + ui32ContextFlags, + ui32Priority, + ui64RobustnessAddress, + ui32MaxDeadlineMS, + &sInfo, + &psComputeContext->sComputeData); + if (eError != PVRSRV_OK) + { + goto fail_computecontext; + } + + eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, + (void **)&psFWComputeContext); + if (eError != PVRSRV_OK) + { + goto fail_acquire_cpu_mapping; + } + + OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputeContextState, ui32StaticComputeContextStateSize); + DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData); +#endif + + SyncAddrListInit(&psComputeContext->sSyncAddrListFence); + SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); + dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); + } + + *ppsComputeContext = psComputeContext; + return PVRSRV_OK; + +fail_acquire_cpu_mapping: + FWCommonContextFree(psComputeContext->sComputeData.psServerCommonContext); +fail_computecontext: + OSLockDestroy(psComputeContext->hLock); +fail_createlock: + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); +fail_fwcomputecontext: + OSFreeMem(psComputeContext); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; + + eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, + (void **)&psFWComputeContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware compute context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)); + + return PVRSRV_ERROR_RETRY; + } +#endif + + eError = _DestroyComputeContext(&psComputeContext->sComputeData, + psComputeContext->psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + + OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); + dllist_remove_node(&(psComputeContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData); +#endif + + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); + + OSLockDestroy(psComputeContext->hLock); + OSFreeMem(psComputeContext); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32NumWorkgroups, + IMG_UINT32 ui32NumWorkitems, + IMG_UINT64 ui64DeadlineInus) +{ + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + IMG_UINT32 ui32CDMCmdOffset = 0; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->sComputeData.psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT32 ui32FWCtx; + IMG_BOOL bCCBStateOpen = IMG_FALSE; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0}; + IMG_UINT32 ui32CDMWorkloadDataRO = 0; + IMG_UINT32 ui32CDMCmdHeaderOffset = 0; + IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + + IMG_UINT64 ui64FBSCEntryMask; + IMG_UINT32 ui32IntClientFenceCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL; + IMG_UINT32 ui32IntClientUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 *paui32IntUpdateValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Ensure the string is null-terminated (Required for safety) */ + pszUpdateFenceName[31] = '\0'; + + OSLockAcquire(psComputeContext->hLock); + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + + ui32IntClientUpdateCount = ui32ClientUpdateCount; + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); + goto fail_resolve_input_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 ii; + for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode, + pszUpdateFenceName, + iUpdateTimeline, + psComputeContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError)); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); + /* Now append the timeline sync prim addr to the compute context update list */ + SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue)); + for (iii=0; iii", __func__, iii, (void*)pui32Tmp)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); + +#if (ENABLE_CMP_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + /* + * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, + * in other words, take the value and set it to zero afterwards. + * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts + * as it must be ready at the time of context activation. + */ + { + eError = RGXExtractFBSCEntryMaskFromMMUContext(psComputeContext->psDeviceNode, + FWCommonContextGetServerMMUCtx(psComputeContext->sComputeData.psServerCommonContext), + &ui64FBSCEntryMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError)); + goto fail_cmdinvalfbsc; + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups; + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems; + + /* Prepare workload estimation */ + WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice, + &psComputeContext->sWorkEstData, + &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, + RGXFWIF_CCB_CMD_TYPE_CDM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataCompute); +#endif + + RGXCmdHelperInitCmdCCB(psClientCCB, + ui64FBSCEntryMask, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32CmdSize, + pui8DMCmd, + RGXFWIF_CCB_CMD_TYPE_CDM, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataCompute, +#else + NULL, +#endif + "Compute", + bCCBStateOpen, + asCmdHelperData); + + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + if (eError != PVRSRV_OK) + { + goto fail_cmdaquire; + } + + + /* + We should reserve space in the kernel CCB here and fill in the command + directly. + This is so if there isn't space in the kernel CCB we can return with + retry back to services client before we take any operations + */ + + /* + We might only be kicking for flush out a padding packet so only submit + the command if the create was successful + */ + if (eError == PVRSRV_OK) + { + /* + All the required resources are ready at this point, we can't fail so + take the required server sync operations and commit all the resources + */ + + ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); + RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); + + ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck) + { + ui32CDMWorkloadDataRO = ui32CDMCmdOffset; + } + else + { + ui32CDMWorkloadDataRO = 0; + } +#endif + + /* Construct the kernel compute CCB command. */ + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset; +#else + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr; + + HTBLOGK(HTB_SF_MAIN_KICK_CDM, + sCmpKCCBCmd.uCmdData.sCmdKickData.psContext, + ui32CDMCmdOffset + ); + RGXSRV_HWPERF_ENQ(psComputeContext, OSGetCurrentClientProcessIDKM(), + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_CDM, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID); + + /* + * Submit the compute command to the firmware. + */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + FWCommonContextGetServerMMUCtx(psComputeContext->sComputeData.psServerCommonContext), + RGXFWIF_DM_CDM, + &sCmpKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed to schedule kernel CCB command (%s)", + __func__, + PVRSRVGetErrorString(eError2))); + } + else + { + PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_CDM); + } + /* + * Now check eError (which may have returned an error from our earlier call + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdaquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + + *piUpdateFence = iUpdateFence; + + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, pszUpdateFenceName); + } + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psComputeContext->hLock); + + return PVRSRV_OK; + +fail_cmdaquire: +fail_cmdinvalfbsc: + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); +fail_resolve_input_fence: + +err_populate_sync_addr_list: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + OSLockRelease(psComputeContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + RGXFWIF_KCCB_CMD sFlushCmd; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush"); +#endif + sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0; + sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0; + sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE; + sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE; + sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext); + + OSLockAcquire(psComputeContext->hLock); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + FWCommonContextGetServerMMUCtx(psComputeContext->sComputeData.psServerCommonContext), + RGXFWIF_DM_CDM, + &sFlushCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule SLC flush command (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Compute flush aborted (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & + RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + } + + OSLockRelease(psComputeContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + OSLockAcquire(psComputeContext->hLock); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; + sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + FWCommonContextGetServerMMUCtx(psComputeContext->sComputeData.psServerCommonContext), + RGXFWIF_DM_CDM, + &sKCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, + eError, + PVRSRVGETERRORSTRING(eError))); + } + + OSLockRelease(psComputeContext->hLock); + + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + OSLockAcquire(psComputeContext->hLock); + + eError = ContextSetPriority(psComputeContext->sComputeData.psServerCommonContext, + psConnection, + psComputeContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_CDM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError))); + } + + OSLockRelease(psComputeContext->hLock); + return eError; +} + +/* + * PVRSRVRGXGetLastComputeContextResetReasonKM + */ +PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef) +{ + PVR_ASSERT(psComputeContext != NULL); + PVR_ASSERT(peLastResetReason != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + *peLastResetReason = FWCommonContextGetLastResetReason(psComputeContext->sComputeData.psServerCommonContext, + pui32LastResetJobRef); + + return PVRSRV_OK; +} + +/* + * PVRSRVRGXSetComputeContextPropertyKM + */ +PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psComputeContext->hLock); + eError = FWCommonContextSetFlags(psComputeContext->sComputeData.psServerCommonContext, + (IMG_UINT32)ui64Input); + OSLockRelease(psComputeContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); + dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); + DumpFWCommonContextInfo(psCurrentServerComputeCtx->sComputeData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); +} + +IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32ContextBitMask = 0; + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); + dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); + + if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->sComputeData.psServerCommonContext, RGX_KICK_TYPE_DM_CDM) + == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM; + } + } + OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); + return ui32ContextBitMask; +} + +/****************************************************************************** + End of file (rgxcompute.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.h new file mode 100644 index 000000000000..273251d30133 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxcompute.h @@ -0,0 +1,177 @@ +/*************************************************************************/ /*! +@File +@Title RGX compute functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX compute functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXCOMPUTE_H__) +#define __RGXCOMPUTE_H__ + +#include "devicemem.h" +#include "device.h" +#include "rgxfwutils.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "connection_server.h" + + +typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT; + +/*! +******************************************************************************* + @Function PVRSRVRGXCreateComputeContextKM + + @Description + Creates a RGX device context for submitting commands to CDM. + + @Input pvDeviceNode - Services-managed device + @Input ui32Priority - Scheduling priority for commands on this context + @Input hMemCtxPrivData - private data + @Input ui32PackedCCBSizeU88 - packed CCB size. The first byte contains the + log2 CCB size and the second byte the log2 maximum CCB size. + @Input ui32ComputeCtxSwitchSize - Context control size + @Input pComputeCtxSwitch_Regs - Context control registers + @Output ppsComputeContext - cleanup data + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticComputeContextStateSize, + IMG_PBYTE pStaticComputeContextState, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext); + +/*! +******************************************************************************* + @Function PVRSRVRGXDestroyComputeContextKM + + @Description + Server-side implementation of RGXDestroyComputeContext + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + + +/*! +******************************************************************************* + @Function PVRSRVRGXKickCDMKM + + @Description + Server-side implementation of RGXKickCDM + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32NumWorkgroups, + IMG_UINT32 ui32NumWorkitems, + IMG_UINT64 ui64DeadlineInus); + +/*! +******************************************************************************* + @Function PVRSRVRGXFlushComputeDataKM + + @Description + Server-side implementation of RGXFlushComputeData + + @Input psComputeContext - Compute context to flush + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + +/*! +******************************************************************************* + + @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM + @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate + + @Input psComputeContext - Compute context to flush + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef); + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of compute contexts on this device */ +void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client compute contexts are stalled */ +IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* __RGXCOMPUTE_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.c new file mode 100644 index 000000000000..ccf40471f97b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.c @@ -0,0 +1,3527 @@ +/*************************************************************************/ /*! +@File +@Title Rgx debug information +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON + +#include "img_defs.h" +#include "rgxdefs_km.h" +#include "rgxdevice.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "cache_km.h" +#include "osfunc.h" + +#include "rgxdebug.h" +#include "pvrversion.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "rgxutils.h" +#include "tlstream.h" +#include "rgxfwutils.h" +#include "pvrsrv.h" +#include "services_km.h" + +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "devicemem_utils.h" +#include "rgx_fwif_km.h" +#include "rgx_fwif_sf.h" +#include "rgxfw_log_helper.h" +#include "fwtrace_string.h" +#include "rgxfwimageutils.h" +#include "fwload.h" + +#include "rgxta3d.h" +#include "rgxkicksync.h" +#include "rgxcompute.h" +#include "rgxtdmtransfer.h" +#include "rgxtimecorr.h" +#include "rgx_options.h" +#include "rgxinit.h" +#include "rgxlayer_impl.h" +#include "devicemem_history_server.h" +#include "info_page.h" +#include "rgx_hw_errors.h" + +#if !defined(PVR_ARCH_NAME) +#define PVR_ARCH_NAME "Unknown" +#endif + +#define PVR_DUMP_FIRMWARE_INFO(x) \ + PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ + PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ + PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ + (x).ui32DDKBuild, \ + ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\ + (x).ui32BuildOptions); + +#define DD_SUMMARY_INDENT "" +#define DD_NORMAL_INDENT " " + +#define RGX_DEBUG_STR_SIZE (150U) +#define MAX_FW_DESCRIPTION_LENGTH (600U) + + +#define RGX_TEXAS_BIF0_ID (0) +#define RGX_TEXAS_BIF1_ID (1) + +/* + * The first 7 or 8 cat bases are memory contexts used for PM + * or firmware. The rest are application contexts. The numbering + * is zero-based. + */ +#if defined(SUPPORT_TRUSTED_DEVICE) +#define MAX_RESERVED_FW_MMU_CONTEXT (7) +#else +#define MAX_RESERVED_FW_MMU_CONTEXT (6) +#endif + +static const IMG_CHAR *const pszPowStateName[] = +{ +#define X(NAME) #NAME, + RGXFWIF_POW_STATES +#undef X +}; + +typedef struct _IMG_FLAGS2DESC_ +{ + IMG_UINT32 uiFlag; + const IMG_CHAR *pszLabel; +} IMG_FLAGS2DESC; + +static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] = +{ + "offline", + "ready", + "active", + "offloading" +}; + +#if defined(PVR_ENABLE_PHR) +static const IMG_FLAGS2DESC asPHRConfig2Description[] = +{ + {BIT_ULL(RGXIF_PHR_MODE_OFF), "off"}, + {BIT_ULL(RGXIF_PHR_MODE_RD_RESET), "reset RD hardware"}, + {BIT_ULL(RGXIF_PHR_MODE_FULL_RESET), "full gpu reset "}, +}; +#endif + +#if !defined(NO_HARDWARE) +static PVRSRV_ERROR +RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) +{ + IMG_UINT32 ui32RegValue, ui32NumPolls = 0; + PVRSRV_ERROR eError; + + do + { + eError = RGXReadWithSP(psDevInfo, ui32RegOffset, &ui32RegValue); + if (eError != PVRSRV_OK) + { + return eError; + } + } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); + + return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; +} + +static PVRSRV_ERROR +RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) +{ + PVRSRV_ERROR eError; + + /* Core Read Ready? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Set the reg we are interested in reading */ + eError = RGXWriteWithSP(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, + ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteWithSP"); + + /* Core Read Done? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Read the value */ + return RGXReadWithSP(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); +} +#endif + +PVRSRV_ERROR +RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + + return eError; +} + +PVRSRV_ERROR +RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + return eError; +} + +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) +static PVRSRV_ERROR _ValidateWithSP(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DEV_VIRTADDR *psFWAddr, + void *pvHostCodeAddr, + IMG_UINT32 ui32MaxLen, + const IMG_CHAR *pszDesc, + IMG_UINT32 ui32StartOffset) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Value, i; + IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; + IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset); + + ui32MaxLen -= ui32StartOffset; + ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ + + for (i = 0; i < ui32MaxLen; i++) + { + eError = RGXReadMETAAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + return eError; + } + + PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); + + if (pui32FWCode[i] != ui32Value) + { + PVR_DUMPDEBUG_LOG("_ValidateWithSP: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", + pszDesc, + (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); + return PVRSRV_ERROR_FW_IMAGE_MISMATCH; + } + + ui32FWCodeDevVAAddr += 4; + } + + PVR_DUMPDEBUG_LOG("Match between Host and Meta view of the %s", pszDesc); + return PVRSRV_OK; +} +#endif /* !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) */ + + +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) +static PVRSRV_ERROR _ValidateFWImageForMETA(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + RGXFWIF_DEV_VIRTADDR sFWAddr; + PVRSRV_ERROR eError; + + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); + return PVRSRV_ERROR_BAD_MAPPING; + } + + /* Load FW from system for code verification */ + pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); + if (pui32HostFWCode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW code. " + "So skipping FW code verification", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); + if (pui32HostFWCoremem == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW core code. " + "So skipping FW code verification", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto freeHostFWCode; + } + } + + /* Load FW image */ + pbRGXFirmware = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW); + if (!pbRGXFirmware) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in loading FW image file.", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto cleanup_initfw; + } + + eError = ProcessLDRCommandStream(&psDevInfo->sLayerParams, pbRGXFirmware, + (IMG_PBYTE) pui32HostFWCode, NULL, + (IMG_PBYTE) pui32HostFWCoremem, NULL, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); + goto cleanup_initfw; + } + + /* starting checking after BOOT LOADER config */ + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, + "FW code", RGXFW_MAX_BOOTLDR_OFFSET); + if (eError != PVRSRV_OK) + { + goto cleanup_initfw; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + + eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, + "FW coremem code", 0); + } + +cleanup_initfw: + if (psRGXFW) + { + OSUnloadFirmware(psRGXFW); + } + + if (pui32HostFWCoremem) + { + OSFreeMem(pui32HostFWCoremem); + } +freeHostFWCode: + if (pui32HostFWCode) + { + OSFreeMem(pui32HostFWCode); + } + return eError; +} +#endif + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) + IMG_PBYTE pbCodeMemoryPointer; + PVRSRV_ERROR eError; + RGXFWIF_DEV_VIRTADDR sFWAddr; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + return eError; + } + + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); + if (eError != PVRSRV_OK) + { + goto releaseFWCodeMapping; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + goto releaseFWCoreCodeMapping; + } + + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + + eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, + psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); + } + +releaseFWCoreCodeMapping: + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememMemDesc); + } +releaseFWCodeMapping: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + return PVRSRV_OK; +#endif +} +#endif + + +/*! +******************************************************************************* + + @Function _RGXDecodeMMULevel + + @Description + + Return the name for the MMU level that faulted. + + @Input ui32MMULevel - MMU level + + @Return IMG_CHAR* to the sting describing the MMU level that faulted. + +******************************************************************************/ +static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) +{ + const IMG_CHAR* pszMMULevel = ""; + + switch (ui32MMULevel) + { + case 0x0: pszMMULevel = " (Page Table)"; break; + case 0x1: pszMMULevel = " (Page Directory)"; break; + case 0x2: pszMMULevel = " (Page Catalog)"; break; + case 0x3: pszMMULevel = " (Cat Base Reg)"; break; + } + + return pszMMULevel; +} + + +/*! +******************************************************************************* + + @Function _RGXDecodeMMUReqTags + + @Description + + Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and + RGX_CR_MMU_FAULT_STATUS regs. + + @Input ui32TagID - Tag ID value + @Input ui32BIFModule - BIF module + @Input bRead - Read flag + @Input bWriteBack - Write Back flag + @Output ppszTagID - Decoded string from the Tag ID + @Output ppszTagSB - Decoded string from the Tag SB + @Output pszScratchBuf - Buffer provided to the function to generate the debug strings + @Input ui32ScratchBufSize - Size of the provided buffer + + @Return void + +******************************************************************************/ +#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8 (12) +#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8 (15) +#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX (6) +#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX (9) +#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST (33) +#define RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST (41) +#define RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST (48) +#define RGX_TEXAS_BIF0_TAG_LAST (51) + +#define RGX_TEXAS_BIF1_TAG_LAST (26) + +#define RGX_JONES_BIF_IPP_TAG (0) +#define RGX_JONES_BIF_DCE_TAG_FIRST (1) +#define RGX_JONES_BIF_DCE_TAG_LAST (14) +#define RGX_JONES_BIF_TDM_TAG_FIRST (15) +#define RGX_JONES_BIF_TDM_TAG_LAST (19) +#define RGX_JONES_BIF_PM_TAG (20) +#define RGX_JONES_BIF_CDM_TAG_FIRST (21) +#define RGX_JONES_BIF_CDM_TAG_LAST (31) +#define RGX_JONES_BIF_META_TAG (32) +#define RGX_JONES_BIF_META_DMA_TAG (33) +#define RGX_JONES_BIF_TE_TAG_FIRST (34) +#define RGX_JONES_BIF_TE_TAG_LAST (47) +#define RGX_JONES_BIF_RTU_TAG_FIRST (48) +#define RGX_JONES_BIF_RTU_TAG_LAST (53) +#define RGX_JONES_BIF_RPM_TAG (54) +#define RGX_JONES_BIF_TAG_LAST (54) + + +/* The MCU L1 requestors are common to all Texas BIFs so put them + * in their own function. */ +static INLINE void _RGXDecodeMMUReqMCULevel1(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TagID, + IMG_CHAR **ppszTagSB) +{ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + switch (ui32TagID) + { + case 0: *ppszTagSB = "IP0 PDS"; break; + case 1: *ppszTagSB = "IP0 Global"; break; + case 2: *ppszTagSB = "IP1 PDS"; break; + case 3: *ppszTagSB = "IP1 Global"; break; + case 4: *ppszTagSB = "IP2 PDS"; break; + case 5: *ppszTagSB = "IP2 Global"; break; + } + } + else + { + switch (ui32TagID) + { + case 0: *ppszTagSB = "IP0 PDS"; break; + case 1: *ppszTagSB = "IP0 Global"; break; + case 2: *ppszTagSB = "IP0 BSC"; break; + case 3: *ppszTagSB = "IP0 Constants"; break; + + case 4: *ppszTagSB = "IP1 PDS"; break; + case 5: *ppszTagSB = "IP1 Global"; break; + case 6: *ppszTagSB = "IP1 BSC"; break; + case 7: *ppszTagSB = "IP1 Constants"; break; + + case 8: *ppszTagSB = "IP2 PDS"; break; + case 9: *ppszTagSB = "IP2 Global"; break; + case 10: *ppszTagSB = "IP2 BSC"; break; + case 11: *ppszTagSB = "IP2 Constants"; break; + } + } +} + +static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TagID, + IMG_UINT32 ui32BIFModule, + IMG_BOOL bRead, + IMG_BOOL bWriteBack, + IMG_BOOL bFBMFault, + IMG_CHAR **ppszTagID, + IMG_CHAR **ppszTagSB, + IMG_CHAR *pszScratchBuf, + IMG_UINT32 ui32ScratchBufSize) +{ + IMG_UINT32 ui32BIFsPerSPU = 2; + IMG_CHAR *pszTagID = "-"; + IMG_CHAR *pszTagSB = "-"; + + PVR_ASSERT(ppszTagID != NULL); + PVR_ASSERT(ppszTagSB != NULL); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + ui32BIFsPerSPU = 4; + } + + if (bFBMFault) + { + pszTagID = "FBM"; + if (bWriteBack) + { + pszTagSB = "Header/state cache request"; + } + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_SPU) && + ui32BIFModule < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU) + { + if ((ui32BIFModule % ui32BIFsPerSPU) == 0) + { + IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST = + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX + : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8; + IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST = + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + ? RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX + : RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8; + + /* Texas 0 BIF */ + if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST) + { + pszTagID = "MCU L1"; + _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB); + } + else if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST) + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + switch (ui32TagID) + { + case 6: pszTagID = "TCU L1"; break; + case 7: + case 8: pszTagID = "PBE0"; break; + } + } + else + { + switch (ui32TagID) + { + case 12: pszTagID = "TCU L1"; break; + case 13: + case 14: pszTagID = "PBE0"; break; + } + } + } + else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST) + { + pszTagID = "IPF ID Array"; + } + else if (ui32TagID < RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST) + { + switch (ui32TagID) + { + case 34: pszTagID = "IPF_CPF"; break; + case 35: pszTagID = "PPP"; break; + case 36: + case 37: pszTagID = "ISP0 ID Array"; break; + case 38: + case 39: pszTagID = "ISP2 ID Array"; break; + case 40: pszTagID = "VCE RTC"; break; + } + } + else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST) + { + pszTagID = "RTU RAC"; + } + else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_LAST) + { + switch (ui32TagID) + { + case 49: pszTagID = "VCE AMC"; break; + case 50: + case 51: pszTagID = "SHF"; break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID)); + } + } + else if ((ui32BIFModule % ui32BIFsPerSPU) == 1) + { + IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST = + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX + : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8; + + /* Texas 1 BIF */ + if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST) + { + pszTagID = "MCU L1"; + _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB); + } + else if (ui32TagID <= RGX_TEXAS_BIF1_TAG_LAST) + { + switch (ui32TagID) + { + /** Albiorix/NUM_TPU_PER_SPU > 1 **/ + case 6: + case 7: pszTagID = "BSC"; break; + /** All cores **/ + case 12: pszTagID = "TCU L1"; break; + case 13: pszTagID = "TPF"; break; + case 14: pszTagID = "TPF CPF"; break; + case 15: + case 16: pszTagID = "PBE1"; break; + case 17: pszTagID = "PDSRW cache"; break; + case 18: pszTagID = "PDS"; break; + case 19: + case 20: pszTagID = "ISP1 ID Array"; break; + case 21: pszTagID = "USC L2"; break; + case 22: pszTagID = "VDM L2"; break; + case 23: pszTagID = "RTU FBA L2"; break; + case 24: pszTagID = "RTU SHR L2"; break; + case 25: pszTagID = "RTU SHG L2"; break; + case 26: pszTagID = "RTU TUL L2"; break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID)); + } + } + } + else if (ui32BIFModule == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU) + { + /* Jones BIF */ + + if ((ui32TagID >= RGX_JONES_BIF_DCE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_DCE_TAG_LAST)) + { + pszTagID = "DCE"; + } + else if ((ui32TagID >= RGX_JONES_BIF_TDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TDM_TAG_LAST)) + { + pszTagID = "TDM"; + } + else if ((ui32TagID >= RGX_JONES_BIF_CDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_CDM_TAG_LAST)) + { + pszTagID = "CDM"; + } + else if ((ui32TagID >= RGX_JONES_BIF_TE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TE_TAG_LAST)) + { + pszTagID = "Tiling Engine (TE3)"; + } + else if ((ui32TagID >= RGX_JONES_BIF_RTU_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_RTU_TAG_LAST)) + { + pszTagID = "RTU"; + } + else if (ui32TagID <= RGX_JONES_BIF_TAG_LAST) + { + switch (ui32TagID) + { + case RGX_JONES_BIF_IPP_TAG: pszTagID = "IPP"; break; + case RGX_JONES_BIF_PM_TAG: pszTagID = "PM"; break; + case RGX_JONES_BIF_META_TAG: pszTagID = "META"; break; + case RGX_JONES_BIF_META_DMA_TAG:pszTagID = "META DMA"; break; + case RGX_JONES_BIF_RPM_TAG: pszTagID = "RPM"; break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Jones BIF Tag ID: %d", __func__, ui32TagID)); + } + } + else if (bWriteBack) + { + pszTagID = ""; + pszTagSB = "Writeback of dirty cacheline"; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified BIF Module: %d", __func__, ui32BIFModule)); + } + + *ppszTagID = pszTagID; + *ppszTagSB = pszTagSB; +} + + +static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, + IMG_UINT64 *pui64Seconds, + IMG_UINT64 *pui64Nanoseconds) +{ + IMG_UINT32 ui32Remainder; + + *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); + *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); +} + + +typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ +{ + DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, + DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, + DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, + DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, +} DEVICEMEM_HISTORY_QUERY_INDEX; + + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryResult + + @Description + + Print details of a single result from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psResult - The DevicememHistory result to be printed + @Input ui32Index - The index of the result + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, + IMG_UINT32 ui32Index, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 ui32Remainder; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + ConvertOSTimestampToSAndNS(psResult->ui64When, + &ui64Seconds, + &ui64Nanoseconds); + + if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC " s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC + ") PID: %u (%s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds, + psResult->sProcessInfo.uiPID, + psResult->sProcessInfo.szProcessName); + } + + if (!psResult->bRange) + { + PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); + } + else + { + PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", + pszIndent, + psResult->ui32StartPage, + psResult->ui32StartPage + psResult->ui32PageCount - 1, + psResult->sMapStartAddr.uiAddr, + psResult->sMapEndAddr.uiAddr, + psResult->bAll ? "(whole allocation) " : "", + psResult->bMap ? "mapped": "unmapped"); + } +} + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryOut + + @Description + + Print details of all the results from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psQueryOut - Storage for the query results + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + + if (psQueryOut->ui32NumResults == 0) + { + PVR_DUMPDEBUG_LOG("%s No results", pszIndent); + } + else + { + for (i = 0; i < psQueryOut->ui32NumResults; i++) + { + _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, + psFaultProcessInfo, + &psQueryOut->sResults[i], + i, + pszIndent); + } + } +} + +/* table of HW page size values and the equivalent */ +static const unsigned int aui32HWPageSizeTable[][2] = +{ + { 0, PVRSRV_4K_PAGE_SIZE }, + { 1, PVRSRV_16K_PAGE_SIZE }, + { 2, PVRSRV_64K_PAGE_SIZE }, + { 3, PVRSRV_256K_PAGE_SIZE }, + { 4, PVRSRV_1M_PAGE_SIZE }, + { 5, PVRSRV_2M_PAGE_SIZE } +}; + +/*! +******************************************************************************* + + @Function _PageSizeHWToBytes + + @Description + + Convert a HW page size value to its size in bytes + + @Input ui32PageSizeHW - The HW page size value + + @Return IMG_UINT32 The page size in bytes + +******************************************************************************/ +static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) +{ + if (ui32PageSizeHW > 5) + { + /* This is invalid, so return a default value as we cannot ASSERT in this code! */ + return PVRSRV_4K_PAGE_SIZE; + } + + return aui32HWPageSizeTable[ui32PageSizeHW][1]; +} + +/*! +******************************************************************************* + + @Function _GetDevicememHistoryData + + @Description + + Get the DevicememHistory results for the given PID and faulting device virtual address. + The function will query DevicememHistory for information about the faulting page, as well + as the page before and after. + + @Input uiPID - The process ID to search for allocations belonging to + @Input sFaultDevVAddr - The device address to search for allocations at/before/after + @Input asQueryOut - Storage for the query results + @Input ui32PageSizeBytes - Faulted page size in bytes + + @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault + +******************************************************************************/ +static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr, + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], + IMG_UINT32 ui32PageSizeBytes) +{ + IMG_UINT32 i; + DEVICEMEM_HISTORY_QUERY_IN sQueryIn; + IMG_BOOL bAnyHits = IMG_FALSE; + + /* if the page fault originated in the firmware then the allocation may + * appear to belong to any PID, because FW allocations are attributed + * to the client process creating the allocation, so instruct the + * devicemem_history query to search all available PIDs + */ + if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; + } + else + { + sQueryIn.uiPID = uiPID; + } + + /* query the DevicememHistory about the preceding / faulting / next page */ + + for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + IMG_BOOL bHits; + + switch (i) + { + case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: + sQueryIn.sDevVAddr = sFaultDevVAddr; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; + break; + } + + /* First try matching any record at the exact address... */ + bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_FALSE); + if (!bHits) + { + /* If not matched then try matching any record in the same page... */ + bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_TRUE); + } + + if (bHits) + { + bAnyHits = IMG_TRUE; + } + } + + + return bAnyHits; +} + +/* stored data about one page fault */ +typedef struct _FAULT_INFO_ +{ + /* the process info of the memory context that page faulted */ + RGXMEM_PROCESS_INFO sProcessInfo; + IMG_DEV_VIRTADDR sFaultDevVAddr; + MMU_FAULT_DATA sMMUFaultData; + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; + /* the CR timer value at the time of the fault, recorded by the FW. + * used to differentiate different page faults + */ + IMG_UINT64 ui64CRTimer; + /* time when this FAULT_INFO entry was added. used for timing + * reference against the map/unmap information + */ + IMG_UINT64 ui64When; + IMG_UINT32 ui32FaultInfoFlags; +} FAULT_INFO; + +/* history list of page faults. + * Keeps the first `n` page faults and the last `n` page faults, like the FW + * HWR log + */ +typedef struct _FAULT_INFO_LOG_ +{ + IMG_UINT32 ui32Head; + /* the number of faults in this log need not correspond exactly to + * the HWINFO number of the FW, as the FW HWINFO log may contain + * non-page fault HWRs + */ + FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; +} FAULT_INFO_LOG; + +#define FAULT_INFO_PROC_INFO (0x1U) +#define FAULT_INFO_DEVMEM_HIST (0x2U) + +static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; + +static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo, + RGXMEM_PROCESS_INFO *psProcInfo) +{ + IMG_UINT32 i, j; + + for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) + { + IMG_BOOL bFound; + + RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; + bFound = RGXPCPIDToProcessInfo(psDevInfo, + psProcInfo->uiPID, + psProcInfo); + if (!bFound) + { + OSStringLCopy(psProcInfo->szProcessName, + "(unknown)", + sizeof(psProcInfo->szProcessName)); + } + } + } +} + +/*! +******************************************************************************* + + @Function _PrintFaultInfo + + @Description + + Print all the details of a page fault from a FAULT_INFO structure + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psInfo - The page fault occurrence to print + + @Return void + +******************************************************************************/ +static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + FAULT_INFO *psInfo, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) + { + IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? + 0 : psInfo->sProcessInfo.uiPID; + + PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC + ", CRTimer: 0x%016" IMG_UINT64_FMTSPECX + ", PID: %u (%s, unregistered: %u) OS time: " + "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + pszIndent, + psInfo->sFaultDevVAddr.uiAddr, + psInfo->ui64CRTimer, + uiPID, + psInfo->sProcessInfo.szProcessName, + psInfo->sProcessInfo.bUnregistered, + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); + } + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) + { + for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + const IMG_CHAR *pszWhich = NULL; + + switch (i) + { + case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: + pszWhich = "Preceding page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: + pszWhich = "Faulted page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: + pszWhich = "Next page"; + break; + } + + PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); + _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, + &psInfo->sProcessInfo, + &psInfo->asQueryOut[i], + pszIndent); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); + } +} + +static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo, + IMG_DEV_VIRTADDR sFaultDevVAddr, + IMG_DEV_PHYADDR sPCDevPAddr, + IMG_UINT64 ui64CRTimer, + IMG_UINT32 ui32PageSizeBytes) +{ + IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; + RGXMEM_PROCESS_INFO sProcessInfo; + + psInfo->ui32FaultInfoFlags = 0; + psInfo->sFaultDevVAddr = sFaultDevVAddr; + psInfo->ui64CRTimer = ui64CRTimer; + psInfo->ui64When = OSClockns64(); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + /* Check if this is PM fault */ + if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) + { + bIsPMFault = IMG_TRUE; + bFound = IMG_TRUE; + sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; + OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); + sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; + sProcessInfo.bUnregistered = IMG_FALSE; + } + else + { + /* look up the process details for the faulting page catalogue */ + bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); + } + + if (bFound) + { + IMG_BOOL bHits; + + psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; + psInfo->sProcessInfo = sProcessInfo; + + if (bIsPMFault) + { + bHits = IMG_TRUE; + } + else + { + /* get any DevicememHistory data for the faulting address */ + bHits = _GetDevicememHistoryData(sProcessInfo.uiPID, + sFaultDevVAddr, + psInfo->asQueryOut, + ui32PageSizeBytes); + + if (bHits) + { + psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; + + /* if the page fault was caused by the firmware then get information about + * which client application created the related allocations. + * + * Fill in the process info data for each query result. + */ + + if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo); + } + } + } + } + } +} + +/*! +******************************************************************************* + + @Function _DumpFaultAddressHostView + + @Description + + Dump FW HWR fault status in human readable form. + + @Input ui32Index - Index of global Fault info + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Return void + +******************************************************************************/ +static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const IMG_CHAR* pszIndent) +{ + MMU_LEVEL eTopLevel; + const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; + const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; + + eTopLevel = psFaultData->eTopLevel; + + if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) + { + PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); + return; + } + else if (psFaultData->eType == MMU_FAULT_TYPE_PM) + { + PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); + } + else + { + MMU_LEVEL eCurrLevel; + PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); + + for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) + { + MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; + if (psMMULevelData->ui64Address) + { + if (psMMULevelData->uiBytesPerEntry == 4) + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + (IMG_UINT) psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + else + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", + pszIndent, + szPageError[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui32NumOfEntries); + break; + } + } + } + +} + +/*! +******************************************************************************* + + @Function _RGXDumpRGXMMUFaultStatus + + @Description + + Dump MMU Fault status in human readable form. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input ui64MMUStatus - MMU Status register value + @Input pszMetaOrCore - string representing call is for META or MMU core + @Return void + +******************************************************************************/ +static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 aui64MMUStatus[], + const IMG_PCHAR pszMetaOrCore, + const IMG_CHAR *pszIndent) +{ + if (aui64MMUStatus[0] == 0x0) + { + PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore); + } + else + { + IMG_UINT32 ui32PC = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT; + IMG_UINT64 ui64Addr = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT) << 4; /* align shift */ + IMG_UINT32 ui32Requester = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT; + IMG_UINT32 ui32MMULevel = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT; + IMG_BOOL bRead = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS1_RNW_EN) != 0; + IMG_BOOL bFault = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS1_FAULT_EN) != 0; + IMG_BOOL bROFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT) == 0x2; + IMG_BOOL bProtFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT) == 0x3; + IMG_UINT32 ui32BIFModule; + IMG_BOOL bWriteBack, bFBMFault; + IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; + IMG_CHAR *pszTagID = NULL; + IMG_CHAR *pszTagSB = NULL; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT; + bWriteBack = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN) != 0; + bFBMFault = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN) != 0; + } + else + { + ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT; + bWriteBack = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN) != 0; + bFBMFault = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN) != 0; + } + + if (strcmp(pszMetaOrCore, "Meta") == 0) + { + ui32PC = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT; + ui64Addr = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT) << 4; /* align shift */ + ui32Requester = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT; + ui32MMULevel = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT; + bRead = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS_META_RNW_EN) != 0; + bFault = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN) != 0; + bROFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x2; + bProtFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x3; + } + else + { + _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32BIFModule, bRead, bWriteBack, bFBMFault, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE); + } + + PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore); + PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECX " | 0x%08" IMG_UINT64_FMTSPECX "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECX ", %s(%s)%s%s%s%s.", + pszIndent, + aui64MMUStatus[0], + aui64MMUStatus[1], + ui32PC, + (bRead)?"Reading from":"Writing to", + ui64Addr, + (pszTagID)? pszTagID : "META", + (pszTagSB)? pszTagSB : "-", + (bFault)?", Fault":"", + (bROFault)?", Read Only fault":"", + (bProtFault)?", PM/META protection fault":"", + _RGXDecodeMMULevel(ui32MMULevel)); + + } +} + +static_assert((RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); + + +static const IMG_FLAGS2DESC asCswOpts2Description[] = +{ + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, + {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, +}; + +static const IMG_FLAGS2DESC asMisc2Description[] = +{ + {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, + {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, + {RGXFWIF_INICFG_HWR_EN, " HWR EN;"}, + {RGXFWIF_INICFG_HWR_EN, " FBCDCv3.1;"}, + {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, + {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, + {RGXFWIF_INICFG_POLL_COUNTERS_EN, " Poll Counters;"}, + {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, + {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, + {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, + {RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN, " Custom PerfTimer;"}, + {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, + {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, + {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, + {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"}, + {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, + {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, + {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, + {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, + {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, + {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"}, + {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, + {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"} +}; + +static const IMG_FLAGS2DESC asFwOsCfg2Description[] = +{ + {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " GEOM;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio GEOM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, +}; + +static const IMG_FLAGS2DESC asHwrState2Description[] = +{ + {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, + {RGXFWIF_HWR_RESET_IN_PROGRESS, " Reset ongoing;"}, + {RGXFWIF_HWR_ANALYSIS_DONE, " Analysis done;"}, + {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"}, + {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, + {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, + {RGXFWIF_HWR_FW_FAULT, " FW Fault;"}, + {RGXFWIF_HWR_RESTART_REQUESTED, " Restart requested;"}, +}; + +static const IMG_FLAGS2DESC asDmState2Description[] = +{ + {RGXFWIF_DM_STATE_WORKING, " working;"}, + {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, + {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, + {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, + {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, + {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, + {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, + {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, + {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, +}; + +static const IMG_FLAGS2DESC asHWErrorState[] = +{ + {RGX_HW_ERR_NA, "N/A"}, + {RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL, "Primitive ID failure during DM kill."}, +}; + +/* + Appends flags strings to a null-terminated string buffer - each flag + description string starts with a space. +*/ +static void _Flags2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) + { + if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag) + { + OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + } + } +} + +/* + * Translate ID code to descriptive string. + * Returns on the first match. + */ +static void _ID2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32ID) +{ + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) + { + if (ui32ID == psConvTable[ui32Idx].uiFlag) + { + OSStringLCopy(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + return; + } + } +} + +/* + Writes flags strings to an uninitialised buffer. +*/ +static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR *psCswLabel = "Ctx switch options:"; + size_t uLabelLen = OSStringLength(psCswLabel); + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringLCopy(psDesc, psCswLabel, ui32DescSize); + + _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); + _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); +} + +static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR *psCswLabel = "Ctx switch:"; + size_t uLabelLen = OSStringLength(psCswLabel); + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringLCopy(psDesc, psCswLabel, ui32DescSize); + + _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); +} + +/*! +******************************************************************************* + + @Function _RGXDumpFWAssert + + @Description + + Dump FW assert strings when a thread asserts. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer + + @Return void + +******************************************************************************/ +static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl) +{ + IMG_CHAR *pszTraceAssertPath; + IMG_CHAR *pszTraceAssertInfo; + IMG_INT32 ui32TraceAssertLine; + IMG_UINT32 i; + + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; + pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; + ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; + + /* print non-null assert strings */ + if (*pszTraceAssertInfo) + { + PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)", + i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine); + } + } +} + +/*! +******************************************************************************* + + @Function _RGXDumpFWFaults + + @Description + + Dump FW assert strings when a thread asserts. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFwSysData - RGX FW shared system data + + @Return void + +******************************************************************************/ +static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData) +{ + if (psFwSysData->ui32FWFaults > 0) + { + IMG_UINT32 ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX; + IMG_UINT32 ui32EndFault = psFwSysData->ui32FWFaults - 1; + IMG_UINT32 ui32Index; + + if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX) + { + ui32StartFault = 0; + } + + for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++) + { + RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + /* Split OS timestamp in seconds and nanoseconds */ + ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + + PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)", + ui32Index+1, psFaultInfo->sFaultBuf.szInfo, + psFaultInfo->sFaultBuf.szPath, + psFaultInfo->sFaultBuf.ui32LineNum); + PVR_DUMPDEBUG_LOG(" Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + psFaultInfo->ui32Data, + psFaultInfo->ui64CRTimer, + ui64Seconds, ui64Nanoseconds); + } + } +} + +static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData) +{ + IMG_UINT32 i; + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + if (psFwSysData->aui32CrPollAddr[i]) + { + PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)", + i, + ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[i]); + } + } + +} + +static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData, + RGXFWIF_HWRINFOBUF *psHWRInfoBuf, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bAnyLocked = IMG_FALSE; + IMG_UINT32 dm, i; + IMG_UINT32 ui32LineSize; + IMG_CHAR *pszLine, *pszTemp; + const IMG_CHAR *apszDmNames[] = {"GP", "2D", "GEOM", "3D", "CDM"}; + + const IMG_CHAR szMsgHeader[] = "Number of HWR: "; + const IMG_CHAR szMsgFalse[] = "FALSE("; + IMG_CHAR *pszLockupType = ""; + RGX_HWRINFO *psHWRInfo; + const IMG_UINT32 ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */ + const IMG_UINT32 ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1; + IMG_UINT32 ui32HWRRecoveryFlags; + IMG_UINT32 ui32ReadIndex; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || + psHWRInfoBuf->aui32HwrDmOverranCount[dm]) + { + bAnyLocked = IMG_TRUE; + break; + } + } + + if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) + { + /* No HWR situation, print nothing */ + return; + } + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_BOOL bAnyHWROccured = IMG_FALSE; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) + { + bAnyHWROccured = IMG_TRUE; + break; + } + } + + if (!bAnyHWROccured) + { + return; + } + } + + ui32LineSize = sizeof(IMG_CHAR) * ( + ui32MsgHeaderCharCount + + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*( 4/*DM name + left parenthesis*/ + + 10/*UINT32 max num of digits*/ + + 1/*slash*/ + + 10/*UINT32 max num of digits*/ + + 3/*right parenthesis + comma + space*/)) + + ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1 + /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ + ); + + pszLine = OSAllocMem(ui32LineSize); + if (pszLine == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + ui32LineSize)); + return; + } + + OSStringLCopy(pszLine, szMsgHeader, ui32LineSize); + pszTemp = pszLine + ui32MsgHeaderCharCount; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + pszTemp += OSSNPrintf(pszTemp, + 4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 + /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */, + "%s(%u/%u+%u), ", + apszDmNames[dm], + psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm], + psHWRInfoBuf->aui32HwrDmOverranCount[dm]); + } + + OSStringLCat(pszLine, szMsgFalse, ui32LineSize); + pszTemp += ui32MsgFalseCharCount; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + pszTemp += OSSNPrintf(pszTemp, + 10 + 1 + 1 /* UINT32 max num + comma + \0 */, + (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"), + psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]); + } + + PVR_DUMPDEBUG_LOG("%s", pszLine); + + OSFreeMem(pszLine); + + /* Print out per HWR info */ + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + if (dm == RGXFWIF_DM_GP) + { + PVR_DUMPDEBUG_LOG("DM %d (GP)", dm); + } + else + { + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; + sPerDmHwrDescription[0] = '\0'; + + _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, + asDmState2Description, ARRAY_SIZE(asDmState2Description), + psFwSysData->aui32HWRRecoveryFlags[dm]); + PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, psFwSysData->aui32HWRRecoveryFlags[dm], sPerDmHwrDescription); + } + else + { + PVR_DUMPDEBUG_LOG("DM %d", dm); + } + } + + ui32ReadIndex = 0; + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) + { + IMG_BOOL bPMFault = IMG_FALSE; + IMG_UINT32 ui32PC; + IMG_UINT32 ui32PageSize = 0; + IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; + + psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; + + if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) + { + IMG_CHAR aui8RecoveryNum[10+10+1]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + IMG_BOOL bPageFault = IMG_FALSE; + IMG_DEV_VIRTADDR sFaultDevVAddr; + + /* Split OS timestamp in seconds and nanoseconds */ + ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + + ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; + if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } + + OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); + PVR_DUMPDEBUG_LOG(" %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + aui8RecoveryNum, + psHWRInfo->ui32PID, + psHWRInfo->ui32FrameNum, + psHWRInfo->ui32ActiveHWRTData, + psHWRInfo->ui32EventStatus, + pszLockupType); + if (psHWRInfo->eHWErrorCode != RGX_HW_ERR_NA) + { + IMG_CHAR sHWDebugInfo[RGX_DEBUG_STR_SIZE] = ""; + + _ID2Description(sHWDebugInfo, RGX_DEBUG_STR_SIZE, asHWErrorState, ARRAY_SIZE(asHWErrorState), + psHWRInfo->eHWErrorCode); + PVR_DUMPDEBUG_LOG(" HW error code = 0x%X: %s", + psHWRInfo->eHWErrorCode, sHWDebugInfo); + } + + pszTemp = &aui8RecoveryNum[0]; + while (*pszTemp != '\0') + { + *pszTemp++ = ' '; + } + + /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + ui64Seconds, + ui64Nanoseconds, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + + if (psHWRInfo->ui64CRTimeHWResetFinish != 0) + { + if (psHWRInfo->ui64CRTimeFreelistReady != 0) + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd "", + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); + } + } + + switch (psHWRInfo->eHWRType) + { + case RGX_HWRTYPE_ECCFAULT: + { + PVR_DUMPDEBUG_LOG(" ECC fault GPU=0x%08x FW=0x%08x", + psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU, + psHWRInfo->uHWRData.sECCInfo.ui32FaultFW); + } + break; + + case RGX_HWRTYPE_MMUFAULT: + { + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], + "Core", + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT; +#if defined(SUPPORT_TRUSTED_DEVICE) + ui32PC = ui32PC - 1; +#endif + bPMFault = (ui32PC <= 8); + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + + } + break; + case RGX_HWRTYPE_MMUMETAFAULT: + { + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], + "Meta", + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + } + break; + case RGX_HWRTYPE_POLLFAILURE: + { + PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)", + psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum, + ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue); + } + break; + + case RGX_HWRTYPE_OVERRUN: + case RGX_HWRTYPE_UNKNOWNFAILURE: + { + /* Nothing to dump */ + } + break; + + default: + { + PVR_ASSERT(IMG_FALSE); + } + break; + } + + if (bPageFault) + { + + FAULT_INFO *psInfo; + + OSLockAcquire(psDevInfo->hDebugFaultInfoLock); + + /* Find the matching Fault Info for this HWRInfo */ + psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; + + /* if they do not match, we need to update the psInfo */ + if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || + (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr)) + { + MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; + + psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; + + if (bPMFault) + { + /* PM fault and we dump PC details only */ + psFaultData->eTopLevel = MMU_LEVEL_0; + psFaultData->eType = MMU_FAULT_TYPE_PM; + psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr; + } + else + { + RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData); + } + + _RecordFaultInfo(psDevInfo, psInfo, + sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer, + _PageSizeHWToBytes(ui32PageSize)); + + } + + _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); + } + + OSLockRelease(psDevInfo->hDebugFaultInfoLock); + } + + } + + if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1) + ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex; + else + ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST; + } + } +} + +#if !defined(NO_HARDWARE) + +/*! +******************************************************************************* + + @Function _CheckForPendingPage + + @Description + + Check if the MMU indicates it is blocked on a pending page + MMU4 does not support pending pages, so return false. + + @Input psDevInfo - RGX device info + + @Return IMG_BOOL - IMG_TRUE if there is a pending page + +******************************************************************************/ +static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + /* MMU4 doesn't support pending pages */ + return (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) < 4) && + (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY) & RGX_CR_MMU_ENTRY_PENDING_EN); +} + +/*! +******************************************************************************* + + @Function _GetPendingPageInfo + + @Description + + Get information about the pending page from the MMU status registers + + @Input psDevInfo - RGX device info + @Output psDevVAddr - The device virtual address of the pending MMU address translation + @Output pui32CatBase - The page catalog base + + @Return void + +******************************************************************************/ +static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 *pui32CatBase) +{ + IMG_UINT64 ui64BIFMMUEntryStatus; + + ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY_STATUS); + + psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK); + + *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK) >> + RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT; +} + +#endif + +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON) +{ + IMG_CHAR *pszState, *pszReason; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + IMG_UINT32 ui32OSid; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + /* space for the current clock speed and 3 previous */ + RGXFWIF_TIME_CORR asTimeCorrs[4]; + IMG_UINT32 ui32NumClockSpeedChanges; + +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); +#else + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_UINT64 aui64RegValMMUStatus[2]; + + aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS1); + aui64RegValMMUStatus[1] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS2); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], "Core", DD_SUMMARY_INDENT); + + aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], "Meta", DD_SUMMARY_INDENT); + + if (_CheckForPendingPage(psDevInfo)) + { + IMG_UINT32 ui32CatBase; + IMG_DEV_VIRTADDR sDevVAddr; + + PVR_DUMPDEBUG_LOG("MMU Pending page: Yes"); + + _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase); + + if (ui32CatBase <= MAX_RESERVED_FW_MMU_CONTEXT) + { + PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase); + } + else + { + IMG_UINT64 ui64CBaseMapping; + IMG_DEV_PHYADDR sPCDevPAddr; + MMU_FAULT_DATA sFaultData; + IMG_BOOL bIsValid; + + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, ui32CatBase); + + ui64CBaseMapping = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING); + sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK) + >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) + << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT); + bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN); + + PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC + " on cat base %u. PC Addr = 0x%llX is %s", + (unsigned long long) sDevVAddr.uiAddr, + ui32CatBase, + (unsigned long long) sPCDevPAddr.uiAddr, + bIsValid ? "valid":"invalid"); + RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); + _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); + } + } + } +#endif /* NO_HARDWARE */ + + /* Firmware state */ + switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)) + { + case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break; + default: pszState = "UNKNOWN"; break; + } + + switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason)) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failing"; break; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " - Missing interrupts"; break; + default: pszReason = " - Unknown reason"; break; + } + +#if !defined(NO_HARDWARE) + /* Determine the type virtualisation support used */ +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) +#if defined(SUPPORT_AUTOVZ) +#if defined(SUPPORT_AUTOVZ_HW_REGS) + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support"); +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory"); +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation"); +#endif /* defined(SUPPORT_AUTOVZ) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)) + { + RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo); + RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo); + + PVR_DUMPDEBUG_LOG("RGX firmware connection state: %s (Fw=%s; OS=%s)", + ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), + (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"), + (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid")); + + } +#endif + +#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); + IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); + + PVR_DUMPDEBUG_LOG("RGX virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", + ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); + } +#endif +#endif /* !defined(NO_HARDWARE) */ + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; + + if (psFwSysData == NULL) + { + /* can't dump any more information */ + PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason); + return; + } + +#if defined(PVR_ENABLE_PHR) + { + IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; + + sPHRConfigDescription[0] = '\0'; + _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, + asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), + BIT_ULL(psFwSysData->ui32PHRModeMirror)); + PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psFwSysData->ui32PHRModeMirror, sPHRConfigDescription); + } +#endif + + sHwrStateDescription[0] = '\0'; + + _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE, + asHwrState2Description, ARRAY_SIZE(asHwrState2Description), + psFwSysData->ui32HWRStateFlags); + PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription); + PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", + pszPowStateName[psFwSysData->ePowState], + (psDevInfo->pvAPMISRData)?"enabled":"disabled", + psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqDenied, + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqRetry, + psDevInfo->ui32ActivePMReqTotal - + psDevInfo->ui32ActivePMReqOk - + psDevInfo->ui32ActivePMReqDenied - + psDevInfo->ui32ActivePMReqRetry - + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqTotal, + psRuntimeCfg->ui32ActivePMLatencyms); + + ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); + RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); + + PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. " + "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). " + "FW frequency: %u.%03u MHz.", + ui32NumClockSpeedChanges, + asTimeCorrs[0].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[0].ui64OSTimeStamp, + psRuntimeCfg->ui32CoreClockSpeed / 1000000, + (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000); + if (ui32NumClockSpeedChanges > 0) + { + PVR_DUMPDEBUG_LOG(" Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at " + "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")", + asTimeCorrs[1].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[2].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[3].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[1].ui64OSTimeStamp, + asTimeCorrs[2].ui64OSTimeStamp, + asTimeCorrs[3].ui64OSTimeStamp); + } + + for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + + PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d; %s", ui32OSid, + apszFwOsStateName[sFwRunFlags.bfOsState], + (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", + (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", + psFwSysData->aui32OSidPrioMirror[ui32OSid], + (sFwRunFlags.bfIsolatedOS) ? "; Isolated;" : "" + ); + } + + _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl); + _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + } + else + { + PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation"); + PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); + } + + _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); + +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) + /* Dump all non-zero values in lines of 8... */ + { + IMG_CHAR pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1]; + IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf; + IMG_UINT32 ui32Index1, ui32Index2; + + PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX); + for (ui32Index1 = 0; ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX; ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE) + { + IMG_UINT32 ui32OrOfValues = 0; + IMG_CHAR *pszBuf = pszLine; + + /* Print all values in this line and skip if all zero... */ + for (ui32Index2 = 0; ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE; ui32Index2++) + { + ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2]; + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]); + pszBuf += 9; /* write over the '\0' */ + } + + if (ui32OrOfValues != 0) + { + PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine); + } + } + PVR_DUMPDEBUG_LOG("STATS[END]"); + } +#endif +} + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +/* List of extra META Slave Port debug registers */ +/* Order in these two initialisers must match */ +#define RGX_META_SP_EXTRA_DEBUG \ + X(RGX_CR_META_SP_MSLVCTRL0) \ + X(RGX_CR_META_SP_MSLVCTRL1) \ + X(RGX_CR_META_SP_MSLVDATAX) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS) \ + X(RGX_CR_META_SP_MSLVIRQENABLE) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL) + +#define RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES \ + X(RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES) + + IMG_UINT32 ui32Idx, ui32RegIdx; + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32RegAddr; + + const IMG_UINT32* pui32DebugRegAddr; + const IMG_UINT32 aui32DebugRegAddr [] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + const IMG_UINT32 aui32DebugRegAddrUA [] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES +#undef X + }; + + const IMG_CHAR* apszDebugRegName [] = { +#define X(A) #A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + + const IMG_UINT32 aui32Debug2RegAddr [] = {0xA28, 0x0A30, 0x0A38}; + + PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); + + /* array of register offset values depends on feature. But don't augment names in apszDebugRegName */ + PVR_ASSERT(sizeof(aui32DebugRegAddrUA) == sizeof(aui32DebugRegAddr)); + pui32DebugRegAddr = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? + aui32DebugRegAddrUA : aui32DebugRegAddr; + + /* dump first set of Slave Port debug registers */ + for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) + { + const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; + + ui32RegAddr = pui32DebugRegAddr[ui32Idx]; + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); + PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); + } + + /* dump second set of Slave Port debug registers */ + for (ui32Idx = 0; ui32Idx < 4; ui32Idx++) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx); + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20); + PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal); + + } + + for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++) + { + ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx]; + for (ui32Idx = 0; ui32Idx < 2; ui32Idx++) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx); + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); + PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal); + } + } + +} +#endif /* SUPPORT_EXTRA_METASP_DEBUG */ + +/* + * Array of all the Firmware Trace log IDs used to convert the trace data. + */ +typedef struct _TRACEBUF_LOG_ { + RGXFW_LOG_SFids eSFId; + const IMG_CHAR *pszName; + const IMG_CHAR *pszFmt; + IMG_UINT32 ui32ArgNum; +} TRACEBUF_LOG; + +static const TRACEBUF_LOG aLogDefinitions[] = +{ +#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, + RGXFW_LOG_SFIDLIST +#undef X +}; + +#define NARGS_MASK ~(0xF<<16) +static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; + IMG_BOOL bIntegrityOk = IMG_TRUE; + + /* + * For every log ID, check the format string and number of arguments is valid. + */ + while (psLogDef->eSFId != RGXFW_SF_LAST) + { + const TRACEBUF_LOG *psLogDef2; + const IMG_CHAR *pszString; + IMG_UINT32 ui32Count; + + /* + * Check the number of arguments matches the number of '%' in the string and + * check that no string uses %s which is not supported as it requires a + * pointer to memory that is not going to be valid. + */ + pszString = psLogDef->pszFmt; + ui32Count = 0; + + while (*pszString != '\0') + { + if (*pszString++ == '%') + { + ui32Count++; + if (*pszString == 's') + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", + psLogDef->pszName, *pszString); + } + else if (*pszString == '%') + { + /* Double % is a printable % sign and not a format string... */ + ui32Count--; + } + } + } + + if (ui32Count != psLogDef->ui32ArgNum) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", + psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); + } + + /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ + if (ui32Count > 20) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", + psLogDef->pszName, ui32Count); + } + + /* Check the id number is unique (don't take into account the number of arguments) */ + ui32Count = 0; + psLogDef2 = &aLogDefinitions[0]; + + while (psLogDef2->eSFId != RGXFW_SF_LAST) + { + if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) + { + ui32Count++; + } + psLogDef2++; + } + + if (ui32Count != 1) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", + psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); + } + + /* Move to the next log ID... */ + psLogDef++; + } + + return bIntegrityOk; +} + +typedef struct { + IMG_UINT16 ui16Mask; + const IMG_CHAR *pszStr; +} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ + + +/*! +******************************************************************************* + + @Function RGXPrepareExtraDebugInfo + + @Description + + Prepares debug info string by decoding ui16DebugInfo value passed + + @Input pszBuffer - pointer to debug info string buffer + + @Return void + +******************************************************************************/ +static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) +{ + const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = + { +#define X(a, b) {a, b}, + RGXFWT_DEBUG_INFO_MSKSTRLIST +#undef X + }; + + IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); + IMG_UINT32 i; + IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; + + /* Add prepend string */ + OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); + + /* Add debug info strings */ + for (i = 0; i < ui32NumFields; i++) + { + if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) + { + if (bHasExtraDebugInfo) + { + OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ + } + OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); + bHasExtraDebugInfo = IMG_TRUE; + } + } + + /* Add append string */ + OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); +} + +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; + + /* Check that the firmware trace is correctly defined... */ + if (!bIntegrityCheckPassed) + { + bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); + if (!bIntegrityCheckPassed) + { + return; + } + } + + /* Dump FW trace information... */ + if (psRGXFWIfTraceBufCtl != NULL) + { + IMG_UINT32 tid; + IMG_UINT32 ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; + + /* Print the log type settings... */ + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + /* Print the decoded log for each thread... */ + for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) + { + IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; + IMG_UINT32 ui32TracePtr = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer; + IMG_UINT32 ui32Count = 0; + + if (pui32TraceBuf == NULL) + { + /* trace buffer not yet allocated */ + continue; + } + + while (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_UINT32 ui32Data, ui32DataToId; + + /* Find the first valid log ID, skipping whitespace... */ + do + { + ui32Data = pui32TraceBuf[ui32TracePtr]; + ui32DataToId = idToStringID(ui32Data, SFs); + + /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ + if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) + { + PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); + } + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 1) % ui32TraceBufSizeInDWords; + ui32Count++; + } while ((RGXFW_SF_LAST == ui32DataToId || ui32DataToId >= RGXFW_SF_FIRST) && + ui32Count < ui32TraceBufSizeInDWords); + + if (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; + IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; + IMG_UINT64 ui64Timestamp; + IMG_UINT16 ui16DebugInfo; + + /* If we hit the ASSERT message then this is the end of the log... */ + if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) + { + PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u", + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo, + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath, + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum); + break; + } + + ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | + (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32TraceBufSizeInDWords]); + + ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); + ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; + + /* + * Print the trace string and provide up to 20 arguments which + * printf function will be able to use. We have already checked + * that no string uses more than this. + */ + OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); + + /* Check and append any extra debug info available */ + if (ui16DebugInfo) + { + /* Prepare debug info string */ + RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); + + /* Append debug info string */ + OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); + } + + PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)], + pui32TraceBuf[(ui32TracePtr + 2) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 3) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 4) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 5) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 6) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 7) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 8) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 9) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 10) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 11) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 12) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 13) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 14) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 15) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 16) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 17) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 18) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 19) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 20) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 21) % ui32TraceBufSizeInDWords]); + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % ui32TraceBufSizeInDWords; + ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); + } + } + } + } +} + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Print the power monitoring counters... */ + if (psFwSysData != NULL) + { + IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer; + IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer; + IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords; + IMG_UINT32 ui32Count = 0; + IMG_UINT64 ui64Timestamp; + + if (pui32TraceBuf == NULL) + { + /* power monitoring buffer not yet allocated */ + return; + } + + if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER) + { + PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available.")); + return; + } + ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 | + (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]); + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords; + ui32Count = (ui32Count + 3); + + PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x", + pui32TraceBuf, + ui32TracePtr, + ui32PowerMonBufSizeInDWords)); + + while (ui32Count < ui32PowerMonBufSizeInDWords) + { + /* power monitoring data is (register, value) dword pairs */ + PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x", + ui64Timestamp, + pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]); + + if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID || + pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID) + { + /* end of buffer */ + break; + } + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords; + ui32Count = (ui32Count + 4); + } + } +} +#endif + +static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) +{ + switch (eDevState) + { + case PVRSRV_DEVICE_STATE_INIT: + return "Initialising"; + case PVRSRV_DEVICE_STATE_ACTIVE: + return "Active"; + case PVRSRV_DEVICE_STATE_DEINIT: + return "De-initialising"; + case PVRSRV_DEVICE_STATE_BAD: + return "Bad"; + case PVRSRV_DEVICE_STATE_UNDEFINED: + PVR_ASSERT(!"Device has undefined state"); + __fallthrough; + default: + return "Unknown"; + } +} + +static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) +{ + switch (ePowerState) + { + case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; + case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; + case PVRSRV_DEV_POWER_STATE_ON: return "ON"; + default: return "UNKNOWN"; + } +} + +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32Meta = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; + IMG_UINT32 ui32RegVal; +#endif + IMG_BOOL bFirmwarePerf; + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + + /* Check if firmware perf was set at Init time */ + bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); + +/* Helper macros to emit data */ +#define REG32_FMTSPEC "%-30s: 0x%08X" +#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECX +#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); +#define DDLOG32UNPACKED(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R##__META_REGISTER_UNPACKED_ACCESSES)); + +#if defined(NO_HARDWARE) + /* OSReadHWReg variants don't use params passed in NoHW builds */ + PVR_UNREFERENCED_PARAMETER(pvRegsBaseKM); +#endif + + DDLOG64(CORE_ID); + DDLOG32(EVENT_STATUS); + DDLOG64(TIMER); + DDLOG64(MMU_FAULT_STATUS1); + DDLOG64(MMU_FAULT_STATUS2); + DDLOG64(MMU_FAULT_STATUS_PM); + DDLOG64(MMU_FAULT_STATUS_META); + DDLOG64(SLC_STATUS1); + DDLOG64(SLC_STATUS2); + DDLOG64(SLC_STATUS_DEBUG); + DDLOG32(SPU_ENABLE); + + DDLOG64(CONTEXT_MAPPING0); + DDLOG64(CONTEXT_MAPPING2); + DDLOG64(CONTEXT_MAPPING3); + DDLOG64(CONTEXT_MAPPING4); + + DDLOG32(PERF_PHASE_2D); + DDLOG32(PERF_CYCLE_2D_TOTAL); + DDLOG32(PERF_PHASE_GEOM); + DDLOG32(PERF_CYCLE_GEOM_TOTAL); + DDLOG32(PERF_PHASE_FRAG); + DDLOG32(PERF_CYCLE_FRAG_TOTAL); + DDLOG32(PERF_CYCLE_GEOM_OR_FRAG_TOTAL); + DDLOG32(PERF_CYCLE_GEOM_AND_FRAG_TOTAL); + DDLOG32(PERF_PHASE_COMP); + DDLOG32(PERF_CYCLE_COMP_TOTAL); + DDLOG32(PM_PARTIAL_RENDER_ENABLE); + + DDLOG32(ISP_RENDER); + DDLOG32(ISP_CTL); + + DDLOG32(MTS_INTCTX); + DDLOG32(MTS_BGCTX); + DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); + DDLOG32(MTS_SCHEDULE); + DDLOG32(MTS_GPU_INT_STATUS); + + DDLOG32(CDM_CONTEXT_STORE_STATUS); + DDLOG64(CDM_CONTEXT_PDS0); + DDLOG64(CDM_CONTEXT_PDS1); + DDLOG64(CDM_TERMINATE_PDS); + DDLOG64(CDM_TERMINATE_PDS1); + DDLOG64(CDM_CONTEXT_LOAD_PDS0); + DDLOG64(CDM_CONTEXT_LOAD_PDS1); + + DDLOG32(JONES_IDLE); + DDLOG32(SLC_IDLE); + DDLOG32(SLC_FAULT_STOP_STATUS); + + DDLOG64(SCRATCH0); + DDLOG64(SCRATCH1); + DDLOG64(SCRATCH2); + DDLOG64(SCRATCH3); + DDLOG64(SCRATCH4); + DDLOG64(SCRATCH5); + DDLOG64(SCRATCH6); + DDLOG64(SCRATCH7); + DDLOG64(SCRATCH8); + DDLOG64(SCRATCH9); + DDLOG64(SCRATCH10); + DDLOG64(SCRATCH11); + DDLOG64(SCRATCH12); + DDLOG64(SCRATCH13); + DDLOG64(SCRATCH14); + DDLOG64(SCRATCH15); + DDLOG32(IRQ_OS0_EVENT_STATUS); + +#if !defined(NO_HARDWARE) + if (ui32Meta) + { + PVRSRV_ERROR eError; + IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + DDLOG32UNPACKED(META_SP_MSLVIRQSTATUS); + } + else + { + DDLOG32(META_SP_MSLVIRQSTATUS); + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T0 TXENABLE", ui32RegVal); + if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) + { + bIsT0Enabled = IMG_TRUE; + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T0 TXSTATUS", ui32RegVal); + + /* check for FW fault */ + if (((ui32RegVal >> 20) & 0x3) == 0x2) + { + bIsFWFaulted = IMG_TRUE; + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T0 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T0 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T0 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T0 SP", ui32RegVal); + + if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) + { + eError = RGXReadWithSP(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T1 TXENABLE", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T1 TXSTATUS", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T1 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T1 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T1 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("T1 SP", ui32RegVal); + } + + if (bFirmwarePerf) + { + eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("META_CR_PERF_COUNT0", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXReadWithSP"); + DDLOGVAL32("META_CR_PERF_COUNT1", ui32RegVal); + } + +#if !defined(SUPPORT_TRUSTED_DEVICE) + if (bIsT0Enabled & bIsFWFaulted) + { + PVRSRV_ERROR eError; + eError = _ValidateFWImageForMETA(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + } + } + else if (bIsFWFaulted) + { + PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); + } +#endif + } +#endif + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDebugRequestProcess + + @Description + + This function will print out the debug for the specified level of verbosity + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input ui32VerbLevel - Verbosity level + + @Return void + +******************************************************************************/ +static +void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32VerbLevel) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_DEV_POWER_STATE ePowerState; + IMG_BOOL bRGXPoweredON; + IMG_UINT8 ui8FwOsCount; + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_BOOL bPwrLockAlreadyHeld; + + bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); + if (!bPwrLockAlreadyHeld) + { + /* Only acquire the power-lock if not already held by the calling context */ + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return; + } + } + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error retrieving RGX power state. No debug info dumped.", + __func__)); + goto Exit; + } + + if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) + { + PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); + } + else + { + PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); + } + + if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || + (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) + { + PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount); + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device: Start ]------"); + + bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); + + PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); + PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C, + PVR_ARCH_NAME); + PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState)); + PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); + + RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); + + /* Dump out the kernel CCB. */ + { + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + + if (psKCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", + psKCCBCtl->ui32WriteOffset, + psKCCBCtl->ui32ReadOffset); + } + } + + /* Dump out the firmware CCB. */ + { + RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; + + if (psFCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", + psFCCBCtl->ui32WriteOffset, + psFCCBCtl->ui32ReadOffset); + } + } + + if (psFwOsData != NULL) + { +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Dump out the checkpoint CCB offsets. */ + { + RGXFWIF_CCB_CTL *psCheckpointCCBCtl = psDevInfo->psCheckpointCCBCtl; + + if (psCheckpointCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Checkpoint CCB WO:0x%X RO:0x%X (Check State: FW=%#X, HOST=%#X)", + psCheckpointCCBCtl->ui32WriteOffset, + psCheckpointCCBCtl->ui32ReadOffset, + psFwOsData->ui32FWSyncCheckMark, + psFwOsData->ui32HostSyncCheckMark); + } + } +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + /* Dump the KCCB commands executed */ + PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", + psFwOsData->ui32KCCBCmdsExecuted); + +#if defined(PVRSRV_STALLED_CCB_ACTION) + /* Dump the number of times we have performed a forced UFO update, + * and (if non-zero) the timestamp of the most recent occurrence/ + */ + PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", + psFwOsData->ui32ForcedUpdatesRequested); + if (psFwOsData->ui32ForcedUpdatesRequested > 0) + { + IMG_UINT8 ui8Idx; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) + { + ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", + ui64Seconds, ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); + } + /* Dump SLR log */ + if (psFwOsData->sSLRLogFirst.aszCCBName[0]) + { + ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "} Fence found on context 0x%x '%s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLogFirst.ui32FWCtxAddr, + psFwOsData->sSLRLogFirst.aszCCBName, + psFwOsData->sSLRLogFirst.ui32NumUFOs); + } + for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) + { + ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "] Fence found on context 0x%x '%s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, + psFwOsData->sSLRLog[ui8Idx].aszCCBName, + psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); + } + } + } +#else + PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); +#endif + + /* Dump the IRQ info for threads */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_UINT32 ui32TID; + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + PVR_DUMPDEBUG_LOG("RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u", + ui32TID, + psDevInfo->psRGXFWIfFwSysData->aui32InterruptCount[ui32TID], + psDevInfo->aui32SampleIRQCount[ui32TID]); + } + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Dump out the Workload estimation CCB. */ + { + RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; + + if (psWorkEstCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", + psWorkEstCCBCtl->ui32WriteOffset, + psWorkEstCCBCtl->ui32ReadOffset); + } + } +#endif + + /* Dump the FW Sys config flags on the Host */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwSysData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); + goto Exit; + } + + _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); + PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); + } + + /* Dump the FW OS config flags */ + { + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwOsData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); + goto Exit; + } + + _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); + PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); + } + + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) + { + + PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, 0x0); + } + else + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0); + } + } + + eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXDumpRegisters failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); +#if defined(SUPPORT_EXTRA_METASP_DEBUG) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + PVR_DPF((PVR_DBG_ERROR, "Dump Slave Port debug information")); + _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + } +#endif + } + } + else + { + PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down"); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_INT tid; + /* Dump FW trace information */ + if (psRGXFWIfTraceBufCtl != NULL) + { + for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++) + { + IMG_UINT32 i; + IMG_BOOL bPrevLineWasZero = IMG_FALSE; + IMG_BOOL bLineIsAllZeros = IMG_FALSE; + IMG_UINT32 ui32CountLines = 0; + IMG_UINT32 *pui32TraceBuffer; + IMG_CHAR *pszLine; + + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; + + /* Skip if trace buffer is not allocated */ + if (pui32TraceBuffer == NULL) + { + PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); + continue; + } + +/* Max number of DWords to be printed per line, in debug dump output */ +#define PVR_DD_FW_TRACEBUF_LINESIZE 30U + /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ + pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); + if (pszLine == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); + goto Exit; + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); + PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); + PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords); + + for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) + { + IMG_UINT32 k = 0; + IMG_UINT32 ui32Line = 0x0; + IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); + IMG_CHAR *pszBuf = pszLine; + + for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) + { + if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords) + { + /* Stop reading when the index goes beyond trace buffer size. This condition is + * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not + * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ + break; + } + + ui32Line |= pui32TraceBuffer[i + k]; + + /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); + pszBuf += 9; /* write over the '\0' */ + } + + bLineIsAllZeros = (ui32Line == 0x0); + + if (bLineIsAllZeros) + { + if (bPrevLineWasZero) + { + ui32CountLines++; + } + else + { + bPrevLineWasZero = IMG_TRUE; + ui32CountLines = 1; + PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); + } + } + else + { + if (bPrevLineWasZero && ui32CountLines > 1) + { + PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); + } + bPrevLineWasZero = IMG_FALSE; + + PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); + } + + } + if (bPrevLineWasZero) + { + PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); + + OSFreeMem(pszLine); + } + } + + { + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); + } + else + { + PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------"); + } + + DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + + DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + + DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device: End ]------"); + +Exit: + if (!bPwrLockAlreadyHeld) + { + PVRSRVPowerUnlock(psDeviceNode); + } +} + +/*! + ****************************************************************************** + + @Function RGXDebugRequestNotify + + @Description Dump the debug data for RGX + + ******************************************************************************/ +static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = hDbgReqestHandle; + + /* Only action the request if we've fully init'ed */ + if (psDevInfo->bDevInit2Done) + { + RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); + } +} + +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + return PVRSRVRegisterDbgRequestNotify(&psDevInfo->hDbgReqNotify, + psDevInfo->psDeviceNode, + RGXDebugRequestNotify, + DEBUG_REQUEST_SYS, + psDevInfo); +} + +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + if (psDevInfo->hDbgReqNotify) + { + return PVRSRVUnregisterDbgRequestNotify(psDevInfo->hDbgReqNotify); + } + + /* No notifier registered */ + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxdebug.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.h new file mode 100644 index 000000000000..4eb91876cd5a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdebug.h @@ -0,0 +1,227 @@ +/*************************************************************************/ /*! +@File +@Title RGX debug header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXDEBUG_H__) +#define __RGXDEBUG_H__ + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "rgxdevice.h" + +/** + * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in + * LISR for each RGX FW thread. + * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. + */ +#define RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo) \ + do \ + { \ + IMG_UINT32 ui32TID; \ + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) \ + { \ + PVR_DPF((DBGPRIV_VERBOSE, \ + "RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u)", \ + ui32TID, \ + (psRgxDevInfo)->psRGXFWIfFwSysData->aui32InterruptCount[ui32TID], \ + (psRgxDevInfo)->aui32SampleIRQCount[ui32TID])); \ + } \ + } while (0) + +/*! +******************************************************************************* + + @Function RGXDumpRGXRegisters + + @Description + + Dumps an extensive list of RGX registers required for debugging + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDumpFirmwareTrace + + @Description Dumps the decoded version of the firmware trace buffer. + + Dump useful debugging info + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return void + +******************************************************************************/ +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* + + @Function RGXReadWithSP + + @Description + + Reads data from a memory location (FW memory map) using the META Slave Port + + @Input psDevInfo - Pointer to RGX DevInfo to be used while reading + @Input ui32FWAddr - 32 bit FW address + @Input pui32Value - When the read is successful, value at above FW address + is returned at this location + + @Return PVRSRV_ERROR PVRSRV_OK if read success, error code otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* + + @Function RGXWriteWithSP + + @Description + + Writes data to a memory location (FW memory map) using the META Slave Port + + @Input psDevInfo - Pointer to RGX DevInfo to be used while writing + @Input ui32FWAddr - 32 bit FW address + + @Input ui32Value - 32 bit Value to write + + @Return PVRSRV_ERROR PVRSRV_OK if write success, error code otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value); + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +/*! +******************************************************************************* + + @Function ValidateFWOnLoad + + @Description Compare the Firmware image as seen from the CPU point of view + against the same memory area as seen from the META point of view + after first power up. + + @Input psDevInfo - Device Info + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* + + @Function RGXDumpRGXDebugSummary + + @Description + + Dump a summary in human readable form with the RGX state + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input bRGXPoweredON - IMG_TRUE if RGX device is on + + @Return void + +******************************************************************************/ +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON); + +/*! +******************************************************************************* + + @Function RGXDebugInit + + @Description + + Setup debug requests, calls into PVRSRVRegisterDbgRequestNotify + + @Input psDevInfo RGX device info + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDebugDeinit + + @Description + + Remove debug requests, calls into PVRSRVUnregisterDbgRequestNotify + + @Output phNotify Points to debug notifier handle + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* __RGXDEBUG_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdevice.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdevice.h new file mode 100644 index 000000000000..4698b2ea1acc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxdevice.h @@ -0,0 +1,784 @@ +/*************************************************************************/ /*! +@File +@Title RGX device node header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX device node +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXDEVICE_H__) +#define __RGXDEVICE_H__ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_device_types.h" +#include "mmu_common.h" +#include "rgx_fwif_km.h" +#include "cache_ops.h" +#include "device.h" +#include "osfunc.h" +#include "rgxlayer_impl.h" +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "hash.h" +#endif +typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT; + +typedef struct { + IMG_UINT32 uiPadding; +} RGX_COMMON_CONTEXT_INFO; + + +/*! + ****************************************************************************** + * Device state flags + *****************************************************************************/ +#define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1) /*!< Zeroing the physical pages of reconstructed free lists */ +#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x2) /*!< Used to disable the Devices Watchdog logging */ +#if defined(SUPPORT_VALIDATION) +#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for HW validation to inject SPU power state mask change every DM kick */ +#endif +#define RGXKM_DEVICE_STATE_CCB_GROW_EN (0x8) /*!< Used to indicate CCB grow is permitted */ +#define RGXKM_DEVICE_STATE_MASK (0xF) + +/*! + ****************************************************************************** + * GPU DVFS Table + *****************************************************************************/ + +#define RGX_GPU_DVFS_TABLE_SIZE 32 +#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US 25000 /* Time required to calibrate a clock frequency the first time */ +#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */ +#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */ + + +/*! + ****************************************************************************** + * Global flags for driver validation + *****************************************************************************/ +#define RGX_VAL_LS_EN (0x1U) /*!< Enable dual lockstep firmware */ +#define RGX_VAL_SIG_CHECK_NOERR_EN (0x2U) /*!< Enable signature check. Signatures must match */ +#define RGX_VAL_SIG_CHECK_ERR_EN (0x4U) /*!< Enable signature check. Signatures must not match */ + +typedef struct _GPU_FREQ_TRACKING_DATA_ +{ + /* Core clock speed estimated by the driver */ + IMG_UINT32 ui32EstCoreClockSpeed; + + /* Amount of successful calculations of the estimated core clock speed */ + IMG_UINT32 ui32CalibrationCount; +} GPU_FREQ_TRACKING_DATA; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) +#define RGX_GPU_FREQ_TRACKING_SIZE 16 + +typedef struct +{ + IMG_UINT64 ui64BeginCRTimestamp; + IMG_UINT64 ui64BeginOSTimestamp; + + IMG_UINT64 ui64EndCRTimestamp; + IMG_UINT64 ui64EndOSTimestamp; + + IMG_UINT32 ui32EstCoreClockSpeed; + IMG_UINT32 ui32CoreClockSpeed; +} GPU_FREQ_TRACKING_HISTORY; +#endif + +typedef struct _RGX_GPU_DVFS_TABLE_ +{ + /* Beginning of current calibration period (in us) */ + IMG_UINT64 ui64CalibrationCRTimestamp; + IMG_UINT64 ui64CalibrationOSTimestamp; + + /* Calculated calibration period (in us) */ + IMG_UINT64 ui64CalibrationCRTimediff; + IMG_UINT64 ui64CalibrationOSTimediff; + + /* Current calibration period (in us) */ + IMG_UINT32 ui32CalibrationPeriod; + + /* System layer frequency table and frequency tracking data */ + IMG_UINT32 ui32FreqIndex; + IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE]; + GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE]; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + IMG_UINT32 ui32HistoryIndex; + GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE]; +#endif +} RGX_GPU_DVFS_TABLE; + + +/*! + ****************************************************************************** + * GPU utilisation statistics + *****************************************************************************/ + +typedef struct _RGXFWIF_GPU_UTIL_STATS_ +{ + IMG_BOOL bValid; /* If TRUE, statistics are valid. + FALSE if the driver couldn't get reliable stats. */ + IMG_UINT64 ui64GpuStatActive; /* GPU active statistic */ + IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */ + IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */ + IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */ + IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */ +} RGXFWIF_GPU_UTIL_STATS; + + +typedef struct _RGX_REG_CONFIG_ +{ + IMG_BOOL bEnabled; + RGXFWIF_REG_CFG_TYPE eRegCfgTypeToPush; + IMG_UINT32 ui32NumRegRecords; + POS_LOCK hLock; +} RGX_REG_CONFIG; + +typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; + +#if defined(SUPPORT_VALIDATION) +/** + * Structure containing information for calculating next SPU power domain state. + */ +typedef struct _RGX_POWER_DOMAIN_STATE_ +{ + /** + * Total number of power units in the core. + */ + IMG_UINT32 ui32PowUnitsCount; + /** + * Current power domain state + */ + IMG_UINT32 ui32CurrentState; + /** + * Stores last transition that happened for each power domain state. + */ + IMG_UINT32 *paui32LastTransition; +} RGX_POWER_DOMAIN_STATE; +#endif + +typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ +{ + IMG_UINT64 ui64ErnsBrns; + IMG_UINT64 ui64Features; + IMG_UINT32 ui32B; + IMG_UINT32 ui32V; + IMG_UINT32 ui32N; + IMG_UINT32 ui32C; + IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX]; + IMG_UINT32 ui32MAXDMCount; + IMG_UINT32 ui32MAXDMMTSCount; + IMG_UINT32 ui32MAXPowUnitCount; + IMG_PCHAR pszBVNCString; +}PVRSRV_DEVICE_FEATURE_CONFIG; + +/* This is used to get the value of a specific feature. + * Note that it will assert if the feature is disabled or value is invalid. */ +#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \ + ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] ) + +/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */ +#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \ + ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED ) + +/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */ +#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK) + +/* This is used to check if the ERN is available for the currently running BVNC or not */ +#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK) + +/* This is used to check if the BRN is available for the currently running BVNC or not */ +#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK) + +/* there is a corresponding define in rgxapi.h */ +#define RGX_MAX_TIMER_QUERIES 16U + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +/*! + * The host maintains a 512-deep cache of submitted workloads per device, + * i.e. a global look-up table for TA, 3D and compute (depending on the RGX + * hardware support present) + */ + +/* + * For the workload estimation return data array, the max amount of commands the + * MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for + * all corner cases + */ +#define RETURN_DATA_ARRAY_SIZE_LOG2 (9) +#define RETURN_DATA_ARRAY_SIZE ((1UL) << RETURN_DATA_ARRAY_SIZE_LOG2) +#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1) + +#define WORKLOAD_HASH_SIZE_LOG2 6 +#define WORKLOAD_HASH_SIZE ((1UL) << WORKLOAD_HASH_SIZE_LOG2) +#define WORKLOAD_HASH_WRAP_MASK (WORKLOAD_HASH_SIZE - 1) + +/*! + * Workload characteristics for supported data masters. + * All characteristics must match for the workload estimate to be used/updated. + */ +typedef union _RGX_WORKLOAD_ +{ + struct + { + IMG_UINT32 ui32RenderTargetSize; + IMG_UINT32 ui32NumberOfDrawCalls; + IMG_UINT32 ui32NumberOfIndices; + IMG_UINT32 ui32NumberOfMRTs; + } sTA3D; + + struct + { + IMG_UINT32 ui32NumberOfWorkgroups; + IMG_UINT32 ui32NumberOfWorkitems; + } sCompute; + + struct + { + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + } sTransfer; +} RGX_WORKLOAD; + +/*! + * Host data used to match the return data (actual cycles count) to the + * submitted command packet. + * The hash table is a per-DM circular buffer containing a key based on the + * workload characteristics. On job completion, the oldest workload data + * is evicted if the CB is full and the driver matches the characteristics + * to the matching data. + * + * o If the driver finds a match the existing cycle estimate is averaged with + * the actual cycles used. + * o Otherwise a new hash entry is created with the actual cycles for this + * workload. + * + * Subsequently if a match is found during command submission, the estimate + * is passed to the scheduler, e.g. adjust the GPU frequency if PDVFS is enabled. + */ +typedef struct _WORKLOAD_MATCHING_DATA_ +{ + POS_LOCK psHashLock; + HASH_TABLE *psHashTable; /*! existing workload cycle estimates for this DM */ + RGX_WORKLOAD asHashKeys[WORKLOAD_HASH_SIZE]; + IMG_UINT64 aui64HashData[WORKLOAD_HASH_SIZE]; + IMG_UINT32 ui32HashArrayWO; /*! track the most recent workload estimates */ +} WORKLOAD_MATCHING_DATA; + +/*! + * A generic container for the workload matching data for GPU contexts: + * rendering (TA, 3D), compute, etc. + */ +typedef struct _WORKEST_HOST_DATA_ +{ + union + { + struct + { + WORKLOAD_MATCHING_DATA sDataTA; /*!< matching data for TA commands */ + WORKLOAD_MATCHING_DATA sData3D; /*!< matching data for 3D commands */ + } sTA3D; + + struct + { + WORKLOAD_MATCHING_DATA sDataCDM; /*!< matching data for CDM commands */ + } sCompute; + + struct + { + WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */ + } sTransfer; + } uWorkloadMatchingData; + + /* + * This is a per-context property, hence the TA and 3D share the same + * per render context counter. + */ + IMG_UINT32 ui32WorkEstCCBReceived; /*!< Used to ensure all submitted work + estimation commands are received + by the host before clean up. */ +} WORKEST_HOST_DATA; + +/*! + * Entries in the list of submitted workloads, used when the completed command + * returns data to the host. + * + * - the matching data is needed as it holds the hash data + * - the host data is needed for completion updates, ensuring memory is not + * freed while workload estimates are in-flight. + * - the workload characteristic is used in the hash table look-up. + */ +typedef struct _WORKEST_RETURN_DATA_ +{ + WORKEST_HOST_DATA *psWorkEstHostData; + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData; + RGX_WORKLOAD sWorkloadCharacteristics; +} WORKEST_RETURN_DATA; +#endif + + +#define RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES 4 + + +/*! + ****************************************************************************** + * RGX Device info + *****************************************************************************/ + +typedef struct _PVRSRV_RGXDEV_INFO_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg; + + IMG_BOOL bDevInit2Done; + + IMG_BOOL bFirmwareInitialised; + IMG_BOOL bPDPEnabled; + + IMG_HANDLE hDbgReqNotify; + + /* Kernel mode linear address of device registers */ + void __iomem *pvRegsBaseKM; + + IMG_HANDLE hRegMapping; + + /* System physical address of device registers */ + IMG_CPU_PHYADDR sRegsPhysBase; + /* Register region size in bytes */ + IMG_UINT32 ui32RegSize; + + PVRSRV_STUB_PBDESC *psStubPBDescListKM; + + /* Firmware memory context info */ + DEVMEM_CONTEXT *psKernelDevmemCtx; + DEVMEM_HEAP *psFirmwareMainHeap; + DEVMEM_HEAP *psFirmwareConfigHeap; + MMU_CONTEXT *psKernelMMUCtx; + + void *pvDeviceMemoryHeap; + + /* Kernel CCB */ + DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */ + RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */ + DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */ + IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */ + DEVMEM_MEMDESC *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */ + IMG_UINT32 *pui32KernelCCBRtnSlots; /*!< kernel mapping for return slot array */ + + /* Firmware CCB */ + DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */ + RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */ + DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */ + IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */ + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Checkpoint CCB */ + DEVMEM_MEMDESC *psCheckpointCCBCtlMemDesc; /*!< memdesc for Checkpoint CCB control */ + RGXFWIF_CCB_CTL *psCheckpointCCBCtl; /*!< kernel mapping for Checkpoint CCB control */ + DEVMEM_MEMDESC *psCheckpointCCBMemDesc; /*!< memdesc for Checkpoint CCB */ + IMG_UINT8 *psCheckpointCCB; /*!< kernel mapping for Checkpoint CCB */ +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + /* Workload Estimation Firmware CCB */ + DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */ + RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */ + DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */ + IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */ + + IMG_UINT32 ui32FWPoisonOnFreeFlag; /*!< Flag for poisoning FW allocations when freed */ + + IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */ + + /* + if we don't preallocate the pagetables we must + insert newly allocated page tables dynamically + */ + void *pvMMUContextList; + + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; + + DEVMEM_MEMDESC *psRGXFWCodeMemDesc; + IMG_DEV_VIRTADDR sFWCodeDevVAddrBase; + IMG_UINT32 ui32FWCodeSizeInBytes; + DEVMEM_MEMDESC *psRGXFWDataMemDesc; + IMG_DEV_VIRTADDR sFWDataDevVAddrBase; + + DEVMEM_MEMDESC *psRGXFWCorememCodeMemDesc; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_UINT32 ui32FWCorememCodeSizeInBytes; + + DEVMEM_MEMDESC *psRGXFWIfCorememDataStoreMemDesc; + IMG_DEV_VIRTADDR sFWCorememDataStoreDevVAddrBase; + RGXFWIF_DEV_VIRTADDR sFWCorememDataStoreFWAddr; + + DEVMEM_MEMDESC *psRGXFWAlignChecksMemDesc; + +#if defined(PDUMP) + DEVMEM_MEMDESC *psRGXFWSigTAChecksMemDesc; + IMG_UINT32 ui32SigTAChecksSize; + + DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc; + IMG_UINT32 ui32Sig3DChecksSize; + + DEVMEM_MEMDESC *psRGXFWSigCDMChecksMemDesc; + IMG_UINT32 ui32SigCDMChecksSize; + + DEVMEM_MEMDESC *psRGXFWSigTDMChecksMemDesc; + IMG_UINT32 ui32SigTDMChecksSize; + +#if defined(SUPPORT_TRP) || defined(SUPPORT_FBCDC_SIGNATURE_CHECK) + DEVMEM_MEMDESC *psRGXFWSigTRP_FBCDCMemDesc; + IMG_UINT32 ui32SigTRP_FBCDCSize; +#endif + + IMG_BOOL bDumpedKCCBCtlAlready; + + POS_SPINLOCK hSyncCheckpointSignalSpinLock; /*!< Guards data shared between an atomic & sleepable-context */ +#endif + + POS_LOCK hRGXFWIfBufInitLock; /*!< trace buffer lock for initialisation phase */ + + DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */ + DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */ + DEVMEM_MEMDESC *psRGXFWIfPowMonBufferMemDesc; /*!< memdesc of FW power monitoring data */ + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */ + + DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */ + RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing trace control data and actual trace buffer */ + + DEVMEM_MEMDESC *psRGXFWIfFwOsDataMemDesc; /*!< memdesc of the firmware-shared os structure */ + RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing trace control data and actual trace buffer */ + +#if defined(SUPPORT_TBI_INTERFACE) + DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */ + RGXFWIF_DEV_VIRTADDR sRGXFWIfTBIBuffer; /*!< TBI buffer data */ + IMG_UINT32 ui32FWIfTBIBufferSize; +#endif + + DEVMEM_MEMDESC *psRGXFWIfHWRInfoBufCtlMemDesc; + RGXFWIF_HWRINFOBUF *psRGXFWIfHWRInfoBufCtl; + + DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc; + RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb; + + DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc; + IMG_BYTE *psRGXFWIfHWPerfBuf; + IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */ + + DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc; + + DEVMEM_MEMDESC *psRGXFWIfHWPerfCountersMemDesc; + + DEVMEM_MEMDESC *psRGXFWIfConnectionCtlMemDesc; + RGXFWIF_CONNECTION_CTL *psRGXFWIfConnectionCtl; + + DEVMEM_MEMDESC *psRGXFWIfSysInitMemDesc; + RGXFWIF_SYSINIT *psRGXFWIfSysInit; + + DEVMEM_MEMDESC *psRGXFWIfOsInitMemDesc; + RGXFWIF_OSINIT *psRGXFWIfOsInit; + + DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc; + RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg; + + /* Additional guest firmware memory context info */ + DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED]; + DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED]; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Array to store data needed for workload estimation when a workload + has finished and its cycle time is returned to the host. */ + WORKEST_RETURN_DATA asReturnData[RETURN_DATA_ARRAY_SIZE]; + IMG_UINT32 ui32ReturnDataWO; + POS_LOCK hWorkEstLock; +#endif + +#if defined(SUPPORT_PDVFS) + /** + * Host memdesc and pointer to memory containing core clock rate in Hz. + * Firmware (PDVFS) updates the memory on changing the core clock rate over + * GPIO. + * Note: Shared memory needs atomic access from Host driver and firmware, + * hence size should not be greater than memory transaction granularity. + * Currently it is chosen to be 32 bits. + */ + DEVMEM_MEMDESC *psRGXFWIFCoreClkRateMemDesc; + volatile IMG_UINT32 *pui32RGXFWIFCoreClkRate; + /** + * Last sampled core clk rate. + */ + volatile IMG_UINT32 ui32CoreClkRateSnapshot; +#endif + + /* + HWPerf data for the RGX device + */ + + POS_LOCK hHWPerfLock; /*! Critical section lock that protects HWPerf code + * from multiple thread duplicate init/deinit + * and loss/freeing of FW & Host resources while in + * use in another thread e.g. MSIR. */ + + IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */ + IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */ + IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */ + IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */ + + IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */ + POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */ + IMG_HANDLE hHWPerfHostStream; /*! TL Stream buffer for host only event stream */ + IMG_UINT32 ui32HWPerfHostBufSize; /*! Host side buffer size in bytes */ + IMG_UINT32 ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream. + * Guarded by hLockHWPerfHostStream */ + IMG_UINT32 ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */ + IMG_UINT8 *pui8DeferredEvents; /*! List of HWPerfHost events yet to be emitted in the TL stream. + * Events generated from atomic context are deferred "emitted" + * as the "emission" code can sleep */ + IMG_UINT16 ui16DEReadIdx; /*! Read index in the above deferred events buffer */ + IMG_UINT16 ui16DEWriteIdx; /*! Write index in the above deferred events buffer */ + void *pvHostHWPerfMISR; /*! MISR to emit pending/deferred events in HWPerfHost TL stream */ + POS_SPINLOCK hHWPerfHostSpinLock; /*! Guards data shared between an atomic & sleepable-context */ +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + IMG_UINT32 ui32DEHighWatermark; /*! High watermark of deferred events buffer usage. Protected by + *! hHWPerfHostSpinLock */ + /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */ + IMG_UINT32 ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ + /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */ + IMG_BOOL bWarnedAtomicCtxPktLost; + /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */ + IMG_UINT32 ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ + /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */ + IMG_BOOL bWarnedPktOrdinalBroke; +#endif + + void *pvGpuFtraceData; + + /* Poll data for detecting firmware fatal errors */ + IMG_UINT32 aui32CrLastPollCount[RGXFW_THREAD_NUM]; + IMG_UINT32 ui32KCCBCmdsExecutedLastTime; + IMG_BOOL bKCCBCmdsWaitingLastTime; + IMG_UINT32 ui32GEOTimeoutsLastTime; + IMG_UINT32 ui32InterruptCountLastTime; + IMG_UINT32 ui32MissingInterruptsLastTime; + + /* Client stall detection */ + IMG_UINT32 ui32StalledClientMask; + + IMG_BOOL bWorkEstEnabled; + IMG_BOOL bPDVFSEnabled; + + void *pvLISRData; + void *pvMISRData; + void *pvAPMISRData; + RGX_ACTIVEPM_CONF eActivePMConf; + + volatile IMG_UINT32 aui32SampleIRQCount[RGXFW_THREAD_NUM]; + + DEVMEM_MEMDESC *psRGXFaultAddressMemDesc; + + DEVMEM_MEMDESC *psSLC3FenceMemDesc; + + /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */ + IMG_UINT32 ui32ZSBufferCurrID; /*!< ID assigned to the next deferred devmem allocation */ + IMG_UINT32 ui32FreelistCurrID; /*!< ID assigned to the next freelist */ + + POS_LOCK hLockZSBuffer; /*!< Lock to protect simultaneous access to ZSBuffers */ + DLLIST_NODE sZSBufferHead; /*!< List of on-demand ZSBuffers */ + POS_LOCK hLockFreeList; /*!< Lock to protect simultaneous access to Freelists */ + DLLIST_NODE sFreeListHead; /*!< List of growable Freelists */ + PSYNC_PRIM_CONTEXT hSyncPrimContext; + PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim; + + IMG_UINT32 ui32ActivePMReqOk; + IMG_UINT32 ui32ActivePMReqDenied; + IMG_UINT32 ui32ActivePMReqNonIdle; + IMG_UINT32 ui32ActivePMReqRetry; + IMG_UINT32 ui32ActivePMReqTotal; + + IMG_HANDLE hProcessQueuesMISR; + + IMG_UINT32 ui32DeviceFlags; /*!< Flags to track general device state */ + + /* GPU DVFS Table */ + RGX_GPU_DVFS_TABLE *psGpuDVFSTable; + + /* Pointer to function returning the GPU utilisation statistics since the last + * time the function was called. Supports different users at the same time. + * + * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked) + * in microseconds since the last time the function was called + * by a specific user (identified by hGpuUtilUser) + * + * Returns PVRSRV_OK in case the call completed without errors, + * some other value otherwise. + */ + PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hGpuUtilUser, + RGXFWIF_GPU_UTIL_STATS *psReturnStats); + + POS_LOCK hGPUUtilLock; + + /* Register configuration */ + RGX_REG_CONFIG sRegCongfig; + + IMG_BOOL bRGXPowered; + DLLIST_NODE sMemoryContextList; + + POSWR_LOCK hRenderCtxListLock; + POSWR_LOCK hComputeCtxListLock; + POSWR_LOCK hTransferCtxListLock; + POSWR_LOCK hTDMCtxListLock; + POSWR_LOCK hMemoryCtxListLock; + POSWR_LOCK hKickSyncCtxListLock; + + /* Linked list of deferred KCCB commands due to a full KCCB. + * Access to members sKCCBDeferredCommandsListHead and ui32KCCBDeferredCommandsCount + * are protected by the hLockKCCBDeferredCommandsList spin lock. */ + POS_SPINLOCK hLockKCCBDeferredCommandsList; /*!< Protects deferred KCCB commands list */ + DLLIST_NODE sKCCBDeferredCommandsListHead; + IMG_UINT32 ui32KCCBDeferredCommandsCount; /*!< No of commands in the deferred list */ + + /* Linked lists of contexts on this device */ + DLLIST_NODE sRenderCtxtListHead; + DLLIST_NODE sComputeCtxtListHead; + DLLIST_NODE sTDMCtxtListHead; + DLLIST_NODE sKickSyncCtxtListHead; + + DLLIST_NODE sCommonCtxtListHead; + POSWR_LOCK hCommonCtxtListLock; + IMG_UINT32 ui32CommonCtxtCurrentID; /*!< ID assigned to the next common context */ + + POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */ + POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */ + +#if defined(SUPPORT_VALIDATION) + RGX_POWER_DOMAIN_STATE sPowerDomainState; /*!< Power island sequence */ + IMG_UINT32 ui32ValidationFlags; /*!< Validation flags for host driver */ +#endif + IMG_UINT32 ui32AvailablePowUnitsMask; + + RGX_LAYER_PARAMS sLayerParams; + + RGXFWIF_DM eBPDM; /*!< Current breakpoint data master */ + IMG_BOOL bBPSet; /*!< A Breakpoint has been set */ + POS_LOCK hBPLock; /*!< Lock for break point operations */ + + IMG_UINT32 ui32CoherencyTestsDone; + + ATOMIC_T iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */ + POS_LOCK hCCBRecoveryLock; /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables */ + void *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */ + IMG_UINT32 ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */ + IMG_UINT32 ui32SLRHoldoffCounter; /* Decremented each time health check is called until zero. SLR only happen when zero. */ + + POS_LOCK hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */ + +#if defined(SUPPORT_FIRMWARE_GCOV) + /* Firmware gcov buffer */ + DEVMEM_MEMDESC *psFirmwareGcovBufferMemDesc; /*!< mem desc for Firmware gcov dumping buffer */ + IMG_UINT32 ui32FirmwareGcovSize; +#endif + /* Value to store for each page size range config register in MMU4 */ + IMG_UINT64 aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES]; + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) + struct + { + IMG_UINT64 ui64timerGray; + IMG_UINT64 ui64timerBinary; + IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; + } sRGXTimerValues; +#endif + + IMG_HANDLE hTQCLISharedMem; /*!< TQ Client Shared Mem PMR */ + IMG_HANDLE hTQUSCSharedMem; /*!< TQ USC Shared Mem PMR */ + +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */ + IMG_UINT32 ui32TestSLRCount; /* (used to test SLR operation) */ + IMG_UINT32 ui32SLRSkipFWAddr; +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) + DEVMEM_MEMDESC *psRGXFWIfSecureBufMemDesc; + DEVMEM_MEMDESC *psRGXFWIfNonSecureBufMemDesc; +#endif + +} PVRSRV_RGXDEV_INFO; + + + +typedef struct _RGX_TIMING_INFORMATION_ +{ + /*! GPU default core clock speed in Hz */ + IMG_UINT32 ui32CoreClockSpeed; + + /*! Active Power Management: GPU actively requests the host driver to be powered off */ + IMG_BOOL bEnableActivePM; + + /*! Enable the GPU to power off internal Power Islands independently from the host driver */ + IMG_BOOL bEnableRDPowIsland; + + /*! Active Power Management: Delay between the GPU idle and the request to the host */ + IMG_UINT32 ui32ActivePMLatencyms; + +} RGX_TIMING_INFORMATION; + +typedef struct _RGX_DATA_ +{ + /*! Timing information */ + RGX_TIMING_INFORMATION *psRGXTimingInfo; + IMG_BOOL bHasTDFWMemPhysHeap; + IMG_UINT32 uiTDFWMemPhysHeapID; + IMG_BOOL bHasFWMemPhysHeap; + IMG_UINT32 uiFWMemPhysHeapID; +} RGX_DATA; + + +/* + RGX PDUMP register bank name (prefix) +*/ +#define RGX_PDUMPREG_NAME "RGXREG" + +#endif /* __RGXDEVICE_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.c new file mode 100644 index 000000000000..d82e96f5ccae --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.c @@ -0,0 +1,993 @@ +/*************************************************************************/ /*! +@File +@Title Services Firmware image utilities used at init time +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Services Firmware image utilities used at init time +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* The routines implemented here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when trusted device is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. */ +#include "rgxfwimageutils.h" +#include "pvrsrv.h" + + +/************************************************************************ +* FW layout information +************************************************************************/ +#define MAX_NUM_ENTRIES (8) +static RGX_FW_LAYOUT_ENTRY asRGXFWLayoutTable[MAX_NUM_ENTRIES]; +static IMG_UINT32 ui32LayoutEntryNum; + + +static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + if (asRGXFWLayoutTable[i].eId == eId) + { + return &asRGXFWLayoutTable[i]; + } + } + + RGXErrorLog(hPrivate, "%s: id %u not found, returning entry 0\n", + __func__, eId); + + return &asRGXFWLayoutTable[0]; +} + +/*! +******************************************************************************* + + @Function FindMMUSegment + + @Description Given a 32 bit FW address attempt to find the corresponding + pointer to FW allocation + + @Input ui32OffsetIn : 32 bit FW address + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem code + @Input uiHostAddrOut : CPU pointer equivalent to ui32OffsetIn + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void *pvHostFWCorememCodeAddr, + void *pvHostFWCorememDataAddr, + void **uiHostAddrOut) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + if ((ui32OffsetIn >= asRGXFWLayoutTable[i].ui32BaseAddr) && + (ui32OffsetIn < (asRGXFWLayoutTable[i].ui32BaseAddr + asRGXFWLayoutTable[i].ui32AllocSize))) + { + switch (asRGXFWLayoutTable[i].eType) + { + case FW_CODE: + *uiHostAddrOut = pvHostFWCodeAddr; + break; + + case FW_DATA: + *uiHostAddrOut = pvHostFWDataAddr; + break; + + case FW_COREMEM_CODE: + *uiHostAddrOut = pvHostFWCorememCodeAddr; + break; + + case FW_COREMEM_DATA: + *uiHostAddrOut = pvHostFWCorememDataAddr; + break; + + default: + return PVRSRV_ERROR_INIT_FAILURE; + } + + goto found; + } + } + + return PVRSRV_ERROR_INIT_FAILURE; + +found: + if (*uiHostAddrOut == NULL) + { + return PVRSRV_OK; + } + + /* Direct Mem write to mapped memory */ + ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr; + ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset; + + /* Add offset to pointer to FW allocation only if + * that allocation is available + */ + if (*uiHostAddrOut) + { + *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn; + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureSegID + + @Description Configures a single segment of the Segment MMU + (base, limit and out_addr) + + @Input hPrivate : Implementation specific data + @Input ui64SegOutAddr : Segment output base address (40 bit devVaddr) + @Input ui32SegBase : Segment input base address (32 bit FW address) + @Input ui32SegLimit : Segment size + @Input ui32SegID : Segment ID + @Input pszName : Segment name + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureSegID(const void *hPrivate, + IMG_UINT64 ui64SegOutAddr, + IMG_UINT32 ui32SegBase, + IMG_UINT32 ui32SegLimit, + IMG_UINT32 ui32SegID, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT32 *pui32BootConf = *ppui32BootConf; + IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL; + IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL; + + /* META segments have a minimum size */ + IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ? + RGXFW_SEGMMU_ALIGN : ui32SegLimit; + /* the limit is an offset, therefore off = size - 1 */ + ui32LimitOff -= 1; + + RGXCommentLog(hPrivate, + "* Seg%d: meta_addr = 0x%08x, devv_addr = 0x%" IMG_UINT64_FMTSPECx ", limit = 0x%x", + ui32SegID, + ui32SegBase, + ui64SegOutAddr, + ui32LimitOff); + + ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID); + *pui32BootConf++ = ui32SegBase; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID); + *pui32BootConf++ = ui32LimitOff; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID); + *pui32BootConf++ = ui32SegOutAddr0; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID); + *pui32BootConf++ = ui32SegOutAddr1; + + *ppui32BootConf = pui32BootConf; +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureSegMMU + + @Description Configures META's Segment MMU + + @Input hPrivate : Implementation specific data + @Input psFWCodeDevVAddrBase : FW code base device virtual address + @Input psFWDataDevVAddrBase : FW data base device virtual address + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureSegMMU(const void *hPrivate, + IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase, + IMG_DEV_VIRTADDR *psFWDataDevVAddrBase, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase); + + /* Configure Segment MMU */ + RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********"); + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + /* + * FW code is using the bootloader segment which is already configured on boot. + * FW coremem code and data don't use the segment MMU. + * Only the FW data segment needs to be configured. + */ + + if (asRGXFWLayoutTable[i].eType == FW_DATA) + { + IMG_UINT64 ui64SegOutAddr; + IMG_UINT32 ui32SegId = RGXFW_SEGMMU_DATA_ID; + + ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | + RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWPRIV)) + + asRGXFWLayoutTable[i].ui32AllocOffset; + + RGXFWConfigureSegID(hPrivate, + ui64SegOutAddr, + asRGXFWLayoutTable[i].ui32BaseAddr, + asRGXFWLayoutTable[i].ui32AllocSize, + ui32SegId, + ppui32BootConf); /*write the sequence to the bootldr */ + + break; + } + } +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureMetaCaches + + @Description Configure and enable the Meta instruction and data caches + + @Input hPrivate : Implementation specific data + @Input ui32NumThreads : Number of FW threads in use + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureMetaCaches(const void *hPrivate, + IMG_UINT32 ui32NumThreads, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT32 *pui32BootConf = *ppui32BootConf; + IMG_UINT32 ui32DCacheT0, ui32ICacheT0; + IMG_UINT32 ui32DCacheT1, ui32ICacheT1; + IMG_UINT32 ui32DCacheT2, ui32ICacheT2; + IMG_UINT32 ui32DCacheT3, ui32ICacheT3; + +#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600) +#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14) +#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6) +#define META_CR_SYSC_DCPART(n) (0x04830200 + (n)*0x8) +#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31) +#define META_CR_SYSC_ICPART(n) (0x04830220 + (n)*0x8) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7) +#define META_CR_MMCU_DCACHE_CTRL (0x04830018) +#define META_CR_MMCU_ICACHE_CTRL (0x04830020) +#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1) + + RGXCommentLog(hPrivate, "********** Meta caches configuration *********"); + + /* Initialise I/Dcache settings */ + ui32DCacheT0 = ui32DCacheT1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + ui32DCacheT2 = ui32DCacheT3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0; + + if (ui32NumThreads == 1) + { + ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + } + else + { + ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; + ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; + + ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | + META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; + ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | + META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; + } + + /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */ + *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL; + *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN | + META_CR_MMCU_LOCAL_EBCTRL_DCWIN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_LOCAL_EBCTRL, + META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN); + + /* Data cache partitioning thread 0 to 3 */ + *pui32BootConf++ = META_CR_SYSC_DCPART(0); + *pui32BootConf++ = ui32DCacheT0; + *pui32BootConf++ = META_CR_SYSC_DCPART(1); + *pui32BootConf++ = ui32DCacheT1; + *pui32BootConf++ = META_CR_SYSC_DCPART(2); + *pui32BootConf++ = ui32DCacheT2; + *pui32BootConf++ = META_CR_SYSC_DCPART(3); + *pui32BootConf++ = ui32DCacheT3; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(0), ui32DCacheT0); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(1), ui32DCacheT1); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(2), ui32DCacheT2); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(3), ui32DCacheT3); + + /* Enable data cache hits */ + *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL; + *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_DCACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + /* Instruction cache partitioning thread 0 to 3 */ + *pui32BootConf++ = META_CR_SYSC_ICPART(0); + *pui32BootConf++ = ui32ICacheT0; + *pui32BootConf++ = META_CR_SYSC_ICPART(1); + *pui32BootConf++ = ui32ICacheT1; + *pui32BootConf++ = META_CR_SYSC_ICPART(2); + *pui32BootConf++ = ui32ICacheT2; + *pui32BootConf++ = META_CR_SYSC_ICPART(3); + *pui32BootConf++ = ui32ICacheT3; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(0), ui32ICacheT0); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(1), ui32ICacheT1); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(2), ui32ICacheT2); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(3), ui32ICacheT3); + + /* Enable instruction cache hits */ + *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL; + *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_ICACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + *pui32BootConf++ = 0x040000C0; + *pui32BootConf++ = 0; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0); + + *ppui32BootConf = pui32BootConf; +} + +/*! +******************************************************************************* + + @Function ProcessLDRCommandStream + + @Description Process the output of the Meta toolchain in the .LDR format + copying code and data sections into their final location and + passing some information to the Meta bootloader + + @Input hPrivate : Implementation specific data + @Input pbLDR : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + @Input ppui32BootConf : Pointer to bootloader data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, + const IMG_BYTE* pbLDR, + void* pvHostFWCodeAddr, + void* pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr, + IMG_UINT32 **ppui32BootConf) +{ + RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR; + RGX_META_LDR_L1_DATA_BLK *psL1Data = + (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData); + + IMG_UINT32 *pui32BootConf = ppui32BootConf ? *ppui32BootConf : NULL; + IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate); + + RGXCommentLog(hPrivate, "**********************************************"); + RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************"); + RGXCommentLog(hPrivate, "**********************************************"); + + while (psL1Data != NULL) + { + if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd)) + { + /* Don't process comment blocks */ + goto NextBlock; + } + + switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK) + { + case RGX_META_LDR_CMD_LOADMEM: + { + RGX_META_LDR_L2_DATA_BLK *psL2Block = + (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]); + IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; + IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; + void *pvWriteAddr; + PVRSRV_ERROR eError; + + if (!RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize) && + !RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) + { + /* Global range is aliased to local range */ + ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; + } + + eError = FindMMUSegment(ui32Offset, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", + ui32Offset, ui32DataSize); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemCopy(hPrivate, + pvWriteAddr, + psL2Block->aui32BlockData, + ui32DataSize); + } + + break; + } + case RGX_META_LDR_CMD_LOADCORE: + case RGX_META_LDR_CMD_LOADMMREG: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + case RGX_META_LDR_CMD_START_THREADS: + { + /* Don't process this block */ + break; + } + case RGX_META_LDR_CMD_ZEROMEM: + { + IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; + IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1]; + void *pvWriteAddr; + PVRSRV_ERROR eError; + + if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) + { + /* cannot zero coremem directly */ + break; + } + + /* Global range is aliased to local range */ + ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; + + eError = FindMMUSegment(ui32Offset, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", + ui32Offset, ui32ByteCount); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount); + } + + break; + } + case RGX_META_LDR_CMD_CONFIG: + { + RGX_META_LDR_L2_DATA_BLK *psL2Block = + (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]); + RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData; + IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; + IMG_UINT32 ui32CurrBlockSize = 0; + + while (ui32L2BlockSize) + { + switch (psConfigCommand->ui32Type) + { + case RGX_META_LDR_CFG_PAUSE: + case RGX_META_LDR_CFG_READ: + { + ui32CurrBlockSize = 8; + return PVRSRV_ERROR_INIT_FAILURE; + } + case RGX_META_LDR_CFG_WRITE: + { + IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0]; + IMG_UINT32 ui32RegisterValue = psConfigCommand->aui32BlockData[1]; + + /* Only write to bootloader if we got a valid + * pointer to the FW code allocation + */ + if (pui32BootConf) + { + /* Do register write */ + *pui32BootConf++ = ui32RegisterOffset; + *pui32BootConf++ = ui32RegisterValue; + } + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + ui32RegisterOffset, ui32RegisterValue); + + ui32CurrBlockSize = 12; + break; + } + case RGX_META_LDR_CFG_MEMSET: + case RGX_META_LDR_CFG_MEMCHECK: + { + ui32CurrBlockSize = 20; + return PVRSRV_ERROR_INIT_FAILURE; + } + default: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + } + ui32L2BlockSize -= ui32CurrBlockSize; + psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize); + } + + break; + } + default: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + } + +NextBlock: + + if (psL1Data->ui32Next == 0xFFFFFFFF) + { + psL1Data = NULL; + } + else + { + psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next); + } + } + + if (pui32BootConf) + { + *ppui32BootConf = pui32BootConf; + } + + RGXCommentLog(hPrivate, "**********************************************"); + RGXCommentLog(hPrivate, "************** End Loader Parsing ************"); + RGXCommentLog(hPrivate, "**********************************************"); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function ProcessELFCommandStream + + @Description Process a file in .ELF format copying code and data sections + into their final location + + @Input hPrivate : Implementation specific data + @Input pbELF : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, + const IMG_BYTE *pbELF, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr) +{ + IMG_UINT32 ui32Entry; + IMG_ELF_HDR *psHeader = (IMG_ELF_HDR *)pbELF; + IMG_ELF_PROGRAM_HDR *psProgramHeader = + (IMG_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff); + PVRSRV_ERROR eError; + + for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++) + { + void *pvWriteAddr; + + /* Only consider loadable entries in the ELF segment table */ + if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue; + + eError = FindMMUSegment(psProgramHeader->ui32Pvaddr, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "%s: Addr 0x%x (size: %d) not found in any segment",__func__, + psProgramHeader->ui32Pvaddr, + psProgramHeader->ui32Pfilesz); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemCopy(hPrivate, + pvWriteAddr, + (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset), + psProgramHeader->ui32Pfilesz); + + RGXMemSet(hPrivate, + (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz, + 0, + psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz); + } + } + + return PVRSRV_OK; +} + + +IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32AllocOffset; +} + +IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32MaxSize; +} + +IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32AllocSize; +} + +IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32BaseAddr; +} + +PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + const IMG_UINT32 ui32RGXFirmwareSize, + IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize) +{ + RGX_FW_INFO_HEADER *psInfoHeader; + const IMG_BYTE *pbRGXFirmwareInfo; + const IMG_BYTE *pbRGXFirmwareLayout; + IMG_UINT32 i; + + if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE) + { + RGXErrorLog(hPrivate, "%s: Invalid FW binary at %p, size %u", + __func__, pbRGXFirmware, ui32RGXFirmwareSize); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + + /* + * Acquire pointer to the FW info header within the FW image. + * The format of the header in the FW image might not be the one expected + * by the driver, but the driver should still be able to correctly read + * the information below, as long as new/incompatible elements are added + * at the end of the header (they will be ignored by the driver). + */ + + pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE; + psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo; + + /* If any of the following checks fails, the FW will likely not work properly */ + + if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION) + { + RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) FW_INFO_VERSION, + psInfoHeader->ui32InfoVersion); + } + + if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER)) + { + RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER), + psInfoHeader->ui32HeaderLen); + } + + if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY)) + { + RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY), + psInfoHeader->ui32LayoutEntrySize); + } + + if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES) + { + RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)", + __func__, + MAX_NUM_ENTRIES, + psInfoHeader->ui32LayoutEntryNum); + } + + ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum; + + + /* + * Copy FW layout table from FW image to local array. + * One entry is copied at a time and the copy is limited to what the driver + * expects to find in it. Assuming that new/incompatible elements + * are added at the end of each entry, the loop below adapts the table + * in the FW image into the format expected by the driver. + */ + + pbRGXFirmwareLayout = pbRGXFirmwareInfo + psInfoHeader->ui32HeaderLen; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + RGX_FW_LAYOUT_ENTRY *psOutEntry = &asRGXFWLayoutTable[i]; + + RGX_FW_LAYOUT_ENTRY *psInEntry = (RGX_FW_LAYOUT_ENTRY*) + (pbRGXFirmwareLayout + i * psInfoHeader->ui32LayoutEntrySize); + + RGXMemCopy(hPrivate, + (void*)psOutEntry, + (void*)psInEntry, + sizeof(RGX_FW_LAYOUT_ENTRY)); + } + + + /* Calculate how much memory the FW needs for its code and data segments */ + + *puiFWCodeAllocSize = 0; + *puiFWDataAllocSize = 0; + *puiFWCorememCodeAllocSize = 0; + *puiFWCorememDataAllocSize = 0; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + switch (asRGXFWLayoutTable[i].eType) + { + case FW_CODE: + *puiFWCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_DATA: + *puiFWDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_COREMEM_CODE: + *puiFWCorememCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_COREMEM_DATA: + *puiFWCorememDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + default: + RGXErrorLog(hPrivate, "%s: Unknown FW section type %u\n", + __func__, asRGXFWLayoutTable[i].eType); + break; + } + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + void *pvFWCode, + void *pvFWData, + void *pvFWCorememCode, + void *pvFWCorememData, + RGX_FW_BOOT_PARAMS *puFWParams) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 *pui32BootConf = NULL; + IMG_BOOL bRISCV = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); + IMG_BOOL bMETA = !bRISCV; + + if (bMETA) + { + /* Skip bootloader configuration if a pointer to the FW code + * allocation is not available + */ + if (pvFWCode) + { + /* This variable points to the bootloader code which is mostly + * a sequence of pairs + */ + pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET; + + /* Slave port and JTAG accesses are privileged */ + *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD; + *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN; + + RGXFWConfigureSegMMU(hPrivate, + &puFWParams->sMeta.sFWCodeDevVAddr, + &puFWParams->sMeta.sFWDataDevVAddr, + &pui32BootConf); + } + + /* Process FW image data stream */ + eError = ProcessLDRCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + pvFWCorememCode, + pvFWCorememData, + &pui32BootConf); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + + /* Skip bootloader configuration if a pointer to the FW code + * allocation is not available + */ + if (pvFWCode) + { + IMG_UINT32 ui32NumThreads = puFWParams->sMeta.ui32NumThreads; + + if ((ui32NumThreads == 0) || (ui32NumThreads > 2)) + { + RGXErrorLog(hPrivate, + "ProcessFWImage: Wrong Meta threads configuration, using one thread only"); + + ui32NumThreads = 1; + } + + RGXFWConfigureMetaCaches(hPrivate, + ui32NumThreads, + &pui32BootConf); + + /* Signal the end of the conf sequence */ + *pui32BootConf++ = 0x0; + *pui32BootConf++ = 0x0; + + if (puFWParams->sMeta.uiFWCorememCodeSize && (puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr != 0)) + { + *pui32BootConf++ = puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr; + *pui32BootConf++ = puFWParams->sMeta.uiFWCorememCodeSize; + } + else + { + *pui32BootConf++ = 0; + *pui32BootConf++ = 0; + } + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA)) + { + *pui32BootConf++ = (IMG_UINT32) (puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr >> 32); + *pui32BootConf++ = (IMG_UINT32) puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr; + } + else + { + *pui32BootConf++ = 0; + *pui32BootConf++ = 0; + } + } + } + else + { + /* Process FW image data stream */ + eError = ProcessELFCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + pvFWCorememCode, + pvFWCorememData); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + if (pvFWData) + { + RGXRISCVFW_BOOT_DATA *psBootData = (RGXRISCVFW_BOOT_DATA*) + IMG_OFFSET_ADDR(pvFWData, RGXRISCVFW_BOOTLDR_CONF_OFFSET); + + psBootData->ui64CorememCodeDevVAddr = puFWParams->sRISCV.sFWCorememCodeDevVAddr.uiAddr; + psBootData->ui32CorememCodeFWAddr = puFWParams->sRISCV.sFWCorememCodeFWAddr.ui32Addr; + psBootData->ui32CorememCodeSize = puFWParams->sRISCV.uiFWCorememCodeSize; + + psBootData->ui64CorememDataDevVAddr = puFWParams->sRISCV.sFWCorememDataDevVAddr.uiAddr; + psBootData->ui32CorememDataFWAddr = puFWParams->sRISCV.sFWCorememDataFWAddr.ui32Addr; + psBootData->ui32CorememDataSize = puFWParams->sRISCV.uiFWCorememDataSize; + } + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.h new file mode 100644 index 000000000000..e066184ce5a5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwimageutils.h @@ -0,0 +1,251 @@ +/*************************************************************************/ /*! +@File +@Title Header for Services Firmware image utilities used at init time +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for Services Firmware image utilities used at init time +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWIMAGEUTILS_H +#define RGXFWIMAGEUTILS_H + +/* The routines declared here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when DRM security is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. + */ +#include "rgxlayer.h" + + +typedef union _RGX_FW_BOOT_PARAMS_ +{ + struct + { + IMG_DEV_VIRTADDR sFWCodeDevVAddr; + IMG_DEV_VIRTADDR sFWDataDevVAddr; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_UINT32 ui32NumThreads; + } sMeta; + + struct + { + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememDataSize; + } sRISCV; + +} RGX_FW_BOOT_PARAMS; + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionOffset + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return offset of a Firmware section, relative to the beginning + of the code or data allocation (depending on the section id) + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionMaxSize + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return maximum size (not allocation size) of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionAllocSize + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return allocation size of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionAddress + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return base address of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageAllocSize + + @Description Return size of Firmware code/data/coremem code allocations + + @Input hPrivate : Implementation specific data + @Input pbRGXFirmware : Pointer to FW binary + @Input ui32RGXFirmwareSize : FW binary size + @Output puiFWCodeAllocSize : Code size + @Output puiFWDataAllocSize : Data size + @Output puiFWCorememCodeAllocSize : Coremem code size (0 if N/A) + @Output puiFWCorememDataAllocSize : Coremem data size (0 if N/A) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + const IMG_UINT32 ui32RGXFirmwareSize, + IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize); + +/*! +******************************************************************************* + + @Function ProcessLDRCommandStream + + @Description Process the output of the Meta toolchain in the .LDR format + copying code and data sections into their final location and + passing some information to the Meta bootloader + + @Input hPrivate : Implementation specific data + @Input pbLDR : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + @Input ppui32BootConf : Pointer to bootloader data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, + const IMG_BYTE* pbLDR, + void* pvHostFWCodeAddr, + void* pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr, + IMG_UINT32 **ppui32BootConf); + +/*! +******************************************************************************* + + @Function ProcessELFCommandStream + + @Description Process a file in .ELF format copying code and data sections + into their final location + + @Input hPrivate : Implementation specific data + @Input pbELF : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, + const IMG_BYTE *pbELF, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr); + +/*! +******************************************************************************* + + @Function RGXProcessFWImage + + @Description Process the Firmware binary blob copying code and data + sections into their final location and passing some + information to the Firmware bootloader. + If a pointer to the final memory location for FW code or data + is not valid (NULL) then the relative section will not be + processed. + + @Input hPrivate : Implementation specific data + @Input pbRGXFirmware : Pointer to FW blob + @Input pvFWCode : Pointer to FW code + @Input pvFWData : Pointer to FW data + @Input pvFWCorememCode : Pointer to FW coremem code + @Input pvFWCorememData : Pointer to FW coremem data + @Input puFWParams : Parameters used by the FW at boot time + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + void *pvFWCode, + void *pvFWData, + void *pvFWCorememCode, + void *pvFWCorememData, + RGX_FW_BOOT_PARAMS *puFWParams); + +#endif /* RGXFWIMAGEUTILS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.c new file mode 100644 index 000000000000..7ca8c4de1199 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.c @@ -0,0 +1,6240 @@ +/*************************************************************************/ /*! +@File +@Title Rogue firmware utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Rogue firmware utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(LINUX) +#include +#else +#include +#endif + +#include "img_defs.h" + +#include "rgxdefs_km.h" +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "osfunc.h" +#if defined(LINUX) +#include "km_apphint.h" +#endif +#include "cache_km.h" +#include "allocmem.h" +#include "physheap.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "devicemem_server.h" + +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "rgxfwutils.h" +#include "rgx_options.h" +#include "rgx_fwif_alignchecks.h" +#include "rgx_pdump_panics.h" +#include "fwtrace_string.h" +#include "rgxheapconfig.h" +#include "pvrsrv.h" +#include "rgxdebug.h" +#include "rgxhwperf.h" +#include "rgxccb.h" +#include "rgxcompute.h" +#include "rgxtdmtransfer.h" +#include "rgxpower.h" +#if defined(SUPPORT_DISPLAY_CLASS) +#include "dc_server.h" +#endif +#include "rgxmem.h" +#include "rgxta3d.h" +#include "rgxkicksync.h" +#include "rgxutils.h" +#include "rgxtimecorr.h" +#include "rgxfwimageutils.h" +#include "sync_internal.h" +#include "sync.h" +#include "sync_checkpoint.h" +#include "sync_checkpoint_external.h" +#include "tlstream.h" +#include "devicemem_server_utils.h" +#include "htbuffer.h" +#include "info_page.h" + +#include "physmem_lma.h" +#include "physmem_osmem.h" +#include "oskm_apphint.h" + +#ifdef __linux__ +#include /* sprintf */ +#include "rogue_trace_events.h" +#else +#include +#endif +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif +#endif + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +#include "validation_soc.h" +#endif + +#include "vz_vmm_pvz.h" +#include "rgx_heaps.h" + +#if defined(DEBUG) +/* Catch the use of auto-increment when meta_registers_unpacked_accesses feature is + * present in case we ever use it. No WA exists so it must not be used */ +#define CHECK_HWBRN_68777(v) \ + do { \ + PVR_ASSERT(((v) & RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN) == 0); \ + } while (0) +#else +#define CHECK_HWBRN_68777(v) +#endif + +/* Kernel CCB length */ +#define RGXFWIF_KCCB_NUMCMDS_LOG2 (7) + +/* Firmware CCB length */ +#if defined(NO_HARDWARE) && defined(PDUMP) +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (10) +#elif defined(SUPPORT_PDVFS) || defined(SUPPORT_WORKLOAD_ESTIMATION) +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8) +#else +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5) +#endif + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/* Checkpoint CCB length */ +#define RGXFWIF_CHECKPOINTCCB_NUMCMDS_LOG2 (10) +#endif + +/* + * Maximum length of time a DM can run for before the DM will be marked + * as out-of-time. CDM has an increased value due to longer running kernels. + * + * These deadlines are increased on FPGA, EMU and VP due to the slower + * execution time of these platforms. + */ +#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (60000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (250000) +#else +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (30000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000) +#endif + +/* Workload Estimation Firmware CCB length */ +#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2 (7) + +/* Size of memory buffer for firmware gcov data + * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */ +#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024) + +#if defined(CONFIG_L4) && !defined(RGX_FEATURE_GPU_VIRTUALISATION) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) +#define MTS_SCHEDULE_DM_VAL (RGXFWIF_DM_GP + PVRSRV_VZ_MODE_IS(GUEST) ? (1) : (0)) +#else +#define MTS_SCHEDULE_DM_VAL (RGXFWIF_DM_GP) +#endif + +typedef struct +{ + RGXFWIF_KCCB_CMD sKCCBcmd; + DLLIST_NODE sListNode; + PDUMP_FLAGS_T uiPdumpFlags; + PVRSRV_RGXDEV_INFO *psDevInfo; +} RGX_DEFERRED_KCCB_CMD; + +#if defined(PDUMP) +/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the + * PID filter example entries + */ +static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32), + "FW PID filtering assumes the IMG_PID type is 32-bits wide as it " + "generates WRW commands for loading the PID values"); +#endif + +static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo); +static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo); + +static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit) +{ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc; + IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE( + RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); + + PVR_DPF_ENTERED; + + eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap, + 1, + ui32CacheLineSize, + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_MAIN, + "FwSLC3FenceWA", + ppsSLC3FenceMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + /* We need to map it so the heap for this allocation is set */ + eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc, + psDevInfo->psFirmwareMainHeap, + &psFwSysInit->sSLC3FenceDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsSLC3FenceMemDesc); + *ppsSLC3FenceMemDesc = NULL; + } + + PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc); +} + +static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo) +{ + DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc; + + if (psSLC3FenceMemDesc) + { + DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc); + DevmemFree(psSLC3FenceMemDesc); + } +} + +static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value) +{ + /* ensure memory is flushed before kicking MTS */ + OSWriteMemoryBarrier(); + + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value); + + /* ensure the MTS kick goes through before continuing */ + OSMemoryBarrier(); +} + +/*************************************************************************/ /*! +@Function RGXSetupFwAllocation + +@Description Sets a pointer in a firmware data structure. + +@Input psDevInfo Device Info struct +@Input uiAllocFlags Flags determining type of memory allocation +@Input ui32Size Size of memory allocation +@Input pszName Allocation label +@Input ppsMemDesc pointer to the allocation's memory descriptor +@Input psFwPtr Address of the firmware pointer to set +@Input ppvCpuPtr Address of the cpu pointer to set +@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_FLAGS_T uiAllocFlags, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszName, + DEVMEM_MEMDESC **ppsMemDesc, + RGXFWIF_DEV_VIRTADDR *psFwPtr, + void **ppvCpuPtr, + IMG_UINT32 ui32DevVAFlags) +{ + PVRSRV_ERROR eError; +#if defined(SUPPORT_AUTOVZ) + IMG_BOOL bClearByMemset = ((uiAllocFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0); + + /* Under AutoVz the ZERO_ON_ALLOC flag is avoided as it causes the memory to + * be allocated from a different PMR than an allocation without the flag. + * When the content of an allocation needs to be recovered from physical memory + * on a later driver reboot, the memory then cannot be zeroed but the allocation + * addresses must still match. + * If the memory requires clearing, perform a memset after the allocation. */ + uiAllocFlags &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; +#endif + + PDUMPCOMMENT("Allocate %s", pszName); + eError = DevmemFwAllocate(psDevInfo, + ui32Size, + uiAllocFlags, + pszName, + ppsMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for %s (%u)", + __func__, + ui32Size, + pszName, + eError)); + goto fail_alloc; + } + + if (psFwPtr) + { + eError = RGXSetFirmwareAddress(psFwPtr, *ppsMemDesc, 0, ui32DevVAFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire firmware virtual address for %s (%u)", + __func__, + pszName, + eError)); + goto fail_fwaddr; + } + } + +#if defined(SUPPORT_AUTOVZ) + if ((bClearByMemset) || (ppvCpuPtr)) +#else + if (ppvCpuPtr) +#endif + { + void *pvTempCpuPtr; + + eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, &pvTempCpuPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire CPU virtual address for %s (%u)", + __func__, + pszName, + eError)); + goto fail_cpuva; + } + +#if defined(SUPPORT_AUTOVZ) + if (bClearByMemset) + { + OSDeviceMemSet(pvTempCpuPtr, 0, ui32Size); + } + if (ppvCpuPtr) +#endif + { + *ppvCpuPtr = pvTempCpuPtr; + } +#if defined(SUPPORT_AUTOVZ) + else + { + DevmemReleaseCpuVirtAddr(*ppsMemDesc); + pvTempCpuPtr = NULL; + } +#endif + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: %s set up at Fw VA 0x%x and CPU VA 0x%p", + __func__, pszName, + (psFwPtr) ? (psFwPtr->ui32Addr) : (0), + (ppvCpuPtr) ? (*ppvCpuPtr) : (NULL))); + + return eError; + +fail_cpuva: + if (psFwPtr) + { + RGXUnsetFirmwareAddress(*ppsMemDesc); + } +fail_fwaddr: + DevmemFree(*ppsMemDesc); +fail_alloc: + return eError; +} + +/*************************************************************************/ /*! +@Function GetHwPerfBufferSize + +@Description Computes the effective size of the HW Perf Buffer +@Input ui32HWPerfFWBufSizeKB Device Info struct +@Return HwPerfBufferSize +*/ /**************************************************************************/ +static IMG_UINT32 GetHwPerfBufferSize(IMG_UINT32 ui32HWPerfFWBufSizeKB) +{ + IMG_UINT32 HwPerfBufferSize; + + /* HWPerf: Determine the size of the FW buffer */ + if (ui32HWPerfFWBufSizeKB == 0 || + ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT) + { + /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero, + * use default size from driver constant. Set it to the default + * size, no logging. + */ + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10; + } + else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX)) + { + /* Size specified as a AppHint but it is too big */ + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)", + __func__, + ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX)); + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MAX<<10; + } + else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN)) + { + /* Size specified as in AppHint HWPerfFWBufSizeInKB */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Using HWPerf FW buffer size of %u KB", + __func__, + ui32HWPerfFWBufSizeKB)); + HwPerfBufferSize = ui32HWPerfFWBufSizeKB<<10; + } + else + { + /* Size specified as a AppHint but it is too small */ + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)", + __func__, + ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN)); + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MIN<<10; + } + + return HwPerfBufferSize; +} + +#if defined(PDUMP) +/*! +******************************************************************************* + @Function RGXFWSetupSignatureChecks + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsSigChecksMemDesc, + IMG_UINT32 ui32SigChecksBufSize, + RGXFWIF_SIGBUF_CTL* psSigBufCtl, + const IMG_CHAR* pszBufferName) +{ + PVRSRV_ERROR eError; + + /* Allocate memory for the checks */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + ui32SigChecksBufSize, + "FwSignatureChecks", + ppsSigChecksMemDesc, + &psSigBufCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + DevmemPDumpLoadMem( *ppsSigChecksMemDesc, + 0, + ui32SigChecksBufSize, + PDUMP_FLAGS_CONTINUOUS); + + psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32); +fail: + return eError; +} +#endif + + +#if defined(SUPPORT_FIRMWARE_GCOV) +/*! +******************************************************************************* + @Function RGXFWSetupFirmwareGcovBuffer + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsBufferMemDesc, + IMG_UINT32 ui32FirmwareGcovBufferSize, + RGXFWIF_FIRMWARE_GCOV_CTL* psFirmwareGcovCtl, + const IMG_CHAR* pszBufferName) +{ + PVRSRV_ERROR eError; + + /* Allocate memory for gcov */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + ui32FirmwareGcovBufferSize, + pszBufferName, + ppsBufferMemDesc, + &psFirmwareGcovCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize; + + return PVRSRV_OK; +} +#endif + +/*! +******************************************************************************* + @Function RGXFWSetupAlignChecks + @Description This functions allocates and fills memory needed for the + aligns checks of the UM and KM structures shared with the + firmware. The format of the data in the memory is as follows: + + + + + The UM array is passed from the user side. Now the firmware is + is responsible for filling this part of the memory. If that + happens the check of the UM structures will be performed + by the host driver on client's connect. + If the macro is not defined the client driver fills the memory + and the firmware checks for the alignment of all structures. + @Input psDeviceNode + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_DEV_VIRTADDR *psAlignChecksDevFW, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32RGXFWAlignChecksArrLength) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM }; + IMG_UINT32 ui32RGXFWAlingChecksTotal; + IMG_UINT32* paui32AlignChecks; + PVRSRV_ERROR eError; + + /* In this case we don't know the number of elements in UM array. + * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. + */ + PVR_ASSERT(ui32RGXFWAlignChecksArrLength == 0); + ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM) + + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32) + + 2 * sizeof(IMG_UINT32); + + /* Allocate memory for the checks */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + ui32RGXFWAlingChecksTotal, + "FwAlignmentChecks", + &psDevInfo->psRGXFWAlignChecksMemDesc, + psAlignChecksDevFW, + (void**) &paui32AlignChecks, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* Copy the values */ + *paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM); + OSDeviceMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM)); + paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM); + + *paui32AlignChecks = 0; + } + + DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc, + 0, + ui32RGXFWAlingChecksTotal, + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; + +fail: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo) +{ + if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc); + psDevInfo->psRGXFWAlignChecksMemDesc = NULL; + } +} + +PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, + DEVMEM_MEMDESC *psSrc, + IMG_UINT32 uiExtraOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR psDevVirtAddr; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc); + psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + IMG_UINT32 ui32Offset; + IMG_BOOL bCachedInMETA; + DEVMEM_FLAGS_T uiDevFlags; + IMG_UINT32 uiGPUCacheMode; + + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); + + /* Convert to an address in META memmap */ + ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; + + /* Check in the devmem flags whether this memory is cached/uncached */ + DevmemGetFlags(psSrc, &uiDevFlags); + + /* Honour the META cache flags */ + bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; + + /* Honour the SLC cache flags */ + eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode); + + /* + * Choose Meta virtual address based on Meta and SLC cacheability. + */ + ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + if (bCachedInMETA) + { + ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED; + } + else + { + ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED; + } + + if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode)) + { + ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED; + } + else + { + ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED; + } + + ppDest->ui32Addr = ui32Offset; + } + else + { + IMG_UINT32 ui32Offset; + IMG_BOOL bCachedInRISCV; + DEVMEM_FLAGS_T uiDevFlags; + + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); + + /* Convert to an address in RISCV memmap */ + ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; + + /* Check in the devmem flags whether this memory is cached/uncached */ + DevmemGetFlags(psSrc, &uiDevFlags); + + /* Honour the RISCV cache flags */ + bCachedInRISCV = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; + + if (bCachedInRISCV) + { + ui32Offset |= RGXRISCVFW_SHARED_CACHED_DATA_BASE; + } + else + { + ui32Offset |= RGXRISCVFW_SHARED_UNCACHED_DATA_BASE; + } + + ppDest->ui32Addr = ui32Offset; + } + + if ((ppDest->ui32Addr & 0x3U) != 0) + { + IMG_CHAR *pszAnnotation; + /* It is expected that the annotation returned by DevmemGetAnnotation() is always valid */ + DevmemGetAnnotation(psSrc, &pszAnnotation); + + PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit", + __func__, pszAnnotation, ppDest->ui32Addr)); + + return PVRSRV_ERROR_INVALID_ALIGNMENT; + } + + if (ui32Flags & RFW_FWADDR_NOREF_FLAG) + { + DevmemReleaseDevVirtAddr(psSrc); + } + + return PVRSRV_OK; + +failDevCacheMode: + DevmemReleaseDevVirtAddr(psSrc); +failDevVAAcquire: + return eError; +} + +void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, + DEVMEM_MEMDESC *psSrcMemDesc, + RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, + IMG_UINT32 uiOffset) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevVirtAddr; + + eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr); + PVR_ASSERT(eError == PVRSRV_OK); + + psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr; + psDest->psDevVirtAddr.uiAddr += uiOffset; + psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr; + + DevmemReleaseDevVirtAddr(psSrcMemDesc); +} + + +void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc) +{ + DevmemReleaseDevVirtAddr(psSrc); +} + +PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Wait for Slave Port to be Ready */ + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Write */ + CHECK_HWBRN_68777(ui32RegAddr); + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32RegAddr); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32RegValue); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + } + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ + } + } + + return eError; +} + +PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Wait for Slave Port to be Ready */ + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Read */ + CHECK_HWBRN_68777(ui32RegAddr); + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, + ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + } +#if !defined(NO_HARDWARE) + *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES); +#else + *ui32RegValue = 0xFFFFFFFF; +#endif + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Read */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + } +#if !defined(NO_HARDWARE) + *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX); +#else + *ui32RegValue = 0xFFFFFFFF; +#endif + } + + return eError; +} + + +struct _RGX_SERVER_COMMON_CONTEXT_ { + PVRSRV_RGXDEV_INFO *psDevInfo; + DEVMEM_MEMDESC *psFWCommonContextMemDesc; + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; + SERVER_MMU_CONTEXT *psServerMMUContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc; + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_CLIENT_CCB *psClientCCB; + DEVMEM_MEMDESC *psClientCCBMemDesc; + DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; + IMG_BOOL bCommonContextMemProvided; + IMG_UINT32 ui32ContextID; + DLLIST_NODE sListNode; + RGXFWIF_CONTEXT_RESET_REASON eLastResetReason; + IMG_UINT32 ui32LastResetJobRef; +}; + +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSize, + IMG_UINT32 ui32CCBMaxAllocSize, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext; + IMG_UINT32 ui32FWCommonContextOffset; + IMG_UINT8 *pui8Ptr; + PVRSRV_ERROR eError; + + /* + * Allocate all the resources that are required + */ + psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); + if (psServerCommonContext == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + + psServerCommonContext->psDevInfo = psDevInfo; + psServerCommonContext->psServerMMUContext = psServerMMUContext; + + if (psAllocatedMemDesc) + { + PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)", + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32AllocatedOffset); + ui32FWCommonContextOffset = ui32AllocatedOffset; + psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; + psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; + } + else + { + /* Allocate device memory for the firmware context */ + PDUMPCOMMENT("Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWCommonContext), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwContext", + &psServerCommonContext->psFWCommonContextMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_contextalloc; + } + ui32FWCommonContextOffset = 0; + psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; + } + + /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ + psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; + + /* + * Temporarily map the firmware context to the kernel and initialise it + */ + eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, + (void **)&pui8Ptr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware %s context to CPU (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_cpuvirtacquire; + } + + /* Allocate the client CCB */ + eError = RGXCreateCCB(psDevInfo, + ui32CCBAllocSize, + ui32CCBMaxAllocSize, + ui32ContextFlags, + psConnection, + eRGXCCBRequestor, + psServerCommonContext, + &psServerCommonContext->psClientCCB, + &psServerCommonContext->psClientCCBMemDesc, + &psServerCommonContext->psClientCCBCtrlMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create CCB for %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_allocateccb; + } + + psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset); + psFWCommonContext->eDM = eDM; + + /* Set the firmware CCB device addresses in the firmware common context */ + eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB, + psServerCommonContext->psClientCCBMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); + + eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl, + psServerCommonContext->psClientCCBCtrlMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr, + psServerCommonContext->psClientCCBMemDesc, + &psFWCommonContext->psCCB, + 0); + } + + /* Set the memory context device address */ + psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; + eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext, + psFWMemContextMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); + + /* Store the FWMemContext device virtual address in server mmu context + * to be used in schedule command path */ + RGXSetFWMemContextDevVirtAddr(psServerMMUContext, psFWCommonContext->psFWMemContext); + + psFWCommonContext->ui32Priority = ui32Priority; + psFWCommonContext->ui32PrioritySeqNum = 0; + psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, + (eDM == RGXFWIF_DM_CDM ? + RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS : + RGXFWIF_MAX_WORKLOAD_DEADLINE_MS)); + psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress; + + /* Store a references to Server Common Context and PID for notifications back from the FW. */ + psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; + psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM(); + + /* Set the firmware GPU context state buffer */ + psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; + if (psContextStateMemDesc) + { + eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState, + psContextStateMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_ctxstatefwaddr); + } + + /* + * Dump the created context + */ + PDUMPCOMMENT("Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + sizeof(*psFWCommonContext), + PDUMP_FLAGS_CONTINUOUS); + + /* We've finished the setup so release the CPU mapping */ + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); + + /* Map this allocation into the FW */ + eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, + psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_fwcommonctxfwaddr); + +#if defined(LINUX) + { + IMG_UINT32 ui32FWAddr; + switch (eDM) { + case RGXFWIF_DM_GEOM: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); + break; + case RGXFWIF_DM_3D: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); + break; + default: + ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; + break; + } + + trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32FWAddr); + } +#endif + /*Add the node to the list when finalised */ + OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); + dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); + + *ppsServerCommonContext = psServerCommonContext; + return PVRSRV_OK; + +fail_fwcommonctxfwaddr: + if (psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psContextStateMemDesc); + } +fail_ctxstatefwaddr: + RGXUnsetFirmwareAddress(psFWMemContextMemDesc); +fail_fwmemctxfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); +fail_cccbctrlfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); +fail_cccbfwaddr: + RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); +fail_allocateccb: + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); +fail_cpuvirtacquire: + RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } +fail_contextalloc: + OSFreeMem(psServerCommonContext); +fail_alloc: + return eError; +} + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + + OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + /* Remove the context from the list of all contexts. */ + dllist_remove_node(&psServerCommonContext->sListNode); + OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + + /* + Unmap the context itself and then all its resources + */ + + /* Unmap the FW common context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); + /* Umap context state buffer (if there was one) */ + if (psServerCommonContext->psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); + } + /* Unmap client CCB and CCB control */ + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); + /* Unmap the memory context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); + + /* Destroy the client CCB */ + RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); + + + /* Free the FW common context (if there was one) */ + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, + psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } + /* Free the hosts representation of the common context */ + OSFreeMem(psServerCommonContext); +} + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->sFWCommonContextFWAddr; +} + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psClientCCB; +} + +SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psServerMMUContext; +} + +RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef) +{ + RGXFWIF_CONTEXT_RESET_REASON eLastResetReason; + + PVR_ASSERT(psServerCommonContext != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + /* Take the most recent reason & job ref and reset for next time... */ + eLastResetReason = psServerCommonContext->eLastResetReason; + *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; + psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + + if (eLastResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) + { + PVR_DPF((PVR_DBG_WARNING, + "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); + } + + return eLastResetReason; +} + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psDevInfo; +} + +PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr) +{ + DLLIST_NODE *psNode, *psNext; + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + if (psThisContext->psServerMMUContext == psServerMMUContext) + { + psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr; + return PVRSRV_OK; + } + } + return PVRSRV_ERROR_INVALID_PARAMS; +} + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags) +{ + return RGXSetCCBFlags(psServerCommonContext->psClientCCB, + ui32ContextFlags); +} + +/*! +******************************************************************************* + @Function RGXFreeCCB + @Description Free the kernel or firmware CCB + @Input psDevInfo + @Input ppsCCBCtl + @Input ppsCCBCtlMemDesc + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr +******************************************************************************/ +static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc) +{ + if (*ppsCCBMemDesc != NULL) + { + if (*ppui8CCB != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc); + *ppui8CCB = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc); + *ppsCCBMemDesc = NULL; + } + if (*ppsCCBCtlMemDesc != NULL) + { + if (*ppsCCBCtl != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc); + *ppsCCBCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); + *ppsCCBCtlMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXFreeCCBReturnSlots + @Description Free the kernel CCB's return slot array and associated mappings + @Input psDevInfo Device Info struct + @Input ppui32CCBRtnSlots CPU mapping of slot array + @Input ppsCCBRtnSlotsMemDesc Slot array's device memdesc +******************************************************************************/ +static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 **ppui32CCBRtnSlots, + DEVMEM_MEMDESC **ppsCCBRtnSlotsMemDesc) +{ + /* Free the return slot array if allocated */ + if (*ppsCCBRtnSlotsMemDesc != NULL) + { + /* Before freeing, ensure the CPU mapping as well is released */ + if (*ppui32CCBRtnSlots != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc); + *ppui32CCBRtnSlots = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc); + *ppsCCBRtnSlotsMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXSetupCCB + @Description Allocate and initialise a circular command buffer + @Input psDevInfo + @Input ppsCCBCtl + @Input ppsCCBCtlMemDesc + @Input ppui8CCB + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr + @Input ui32NumCmdsLog2 + @Input ui32CmdSize + @Input uiCCBMemAllocFlags + @Input pszName + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc, + PRGXFWIF_CCB_CTL *psCCBCtlFWAddr, + PRGXFWIF_CCB *psCCBFWAddr, + IMG_UINT32 ui32NumCmdsLog2, + IMG_UINT32 ui32CmdSize, + DEVMEM_FLAGS_T uiCCBMemAllocFlags, + const IMG_CHAR *pszName) +{ + PVRSRV_ERROR eError; + RGXFWIF_CCB_CTL *psCCBCtl; + IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2); + IMG_CHAR szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN]; + IMG_INT32 iStrLen; + + /* Append "Control" to the name for the control struct. */ + iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName); + PVR_ASSERT(iStrLen < sizeof(szCCBCtlName)); + + if (unlikely(iStrLen < 0)) + { + szCCBCtlName[0] = '\0'; + } + + /* Allocate memory for the CCB control.*/ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + sizeof(RGXFWIF_CCB_CTL), + szCCBCtlName, + ppsCCBCtlMemDesc, + psCCBCtlFWAddr, + (void**) ppsCCBCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* + * Allocate memory for the CCB. + * (this will reference further command data in non-shared CCBs) + */ + eError = RGXSetupFwAllocation(psDevInfo, + uiCCBMemAllocFlags, + ui32CCBSize * ui32CmdSize, + pszName, + ppsCCBMemDesc, + psCCBFWAddr, + (void**) ppui8CCB, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* + * Initialise the CCB control. + */ + psCCBCtl = *ppsCCBCtl; + psCCBCtl->ui32WriteOffset = 0; + psCCBCtl->ui32ReadOffset = 0; + psCCBCtl->ui32WrapMask = ui32CCBSize - 1; + psCCBCtl->ui32CmdSize = ui32CmdSize; + + /* Pdump the CCB control */ + PDUMPCOMMENT("Initialise %s", szCCBCtlName); + DevmemPDumpLoadMem(*ppsCCBCtlMemDesc, + 0, + sizeof(RGXFWIF_CCB_CTL), + 0); + + return PVRSRV_OK; + +fail: + RGXFreeCCB(psDevInfo, + ppsCCBCtl, + ppsCCBCtlMemDesc, + ppui8CCB, + ppsCCBMemDesc); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) +static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PMR *psPMR; + + /* Run-time check feature support */ + if (PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + if (psDevInfo->psRGXFaultAddressMemDesc) + { + if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK) + { + PMRUnlockSysPhysAddresses(psPMR); + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); + psDevInfo->psRGXFaultAddressMemDesc = NULL; + } + } +} + +static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 *pui32MemoryVirtAddr; + IMG_UINT32 i; + size_t ui32PageSize = OSGetPageSize(); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PMR *psPMR; + + /* Run-time check feature support */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + return PVRSRV_OK; + } + + /* Allocate page of memory to use for page faults on non-blocking memory transactions. + * Doesn't need to be cleared as it is initialised with the 0xDEADBEEF pattern below. */ + psDevInfo->psRGXFaultAddressMemDesc = NULL; + eError = DevmemFwAllocateExportable(psDeviceNode, + ui32PageSize, + ui32PageSize, + RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC), + "FwExFaultAddress", + &psDevInfo->psRGXFaultAddressMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAlloc; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, + (void **)&pui32MemoryVirtAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAqCpuVirt; + } + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* fill the page with a known pattern when booting the firmware */ + for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++) + { + *(pui32MemoryVirtAddr + i) = 0xDEADBEEF; + } + } + + eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error getting PMR for fault address (%u)", + __func__, eError)); + + goto failFaultAddressDescGetPMR; + } + else + { + IMG_BOOL bValid; + IMG_UINT32 ui32Log2PageSize = OSGetPageShift(); + + eError = PMRLockSysPhysAddresses(psPMR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error locking physical address for fault address MemDesc (%u)", + __func__, eError)); + + goto failFaultAddressDescLockPhys; + } + + eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error getting physical address for fault address MemDesc (%u)", + __func__, eError)); + + goto failFaultAddressDescGetPhys; + } + + if (!bValid) + { + psFwSysInit->sFaultPhysAddr.uiAddr = 0; + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")", + __func__, psFwSysInit->sFaultPhysAddr.uiAddr)); + + goto failFaultAddressDescGetPhys; + } + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + + return PVRSRV_OK; + +failFaultAddressDescGetPhys: + PMRUnlockSysPhysAddresses(psPMR); + +failFaultAddressDescLockPhys: + +failFaultAddressDescGetPMR: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + +failFaultAddressDescAqCpuVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); + psDevInfo->psRGXFaultAddressMemDesc = NULL; + +failFaultAddressDescAlloc: + + return eError; +} + +#if defined(PDUMP) +/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */ +static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError; + PMR *psFWInitPMR, *psFaultAddrPMR; + IMG_UINT32 ui32Dstoffset; + + /* Run-time check feature support */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + return PVRSRV_OK; + } + + psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR); + ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr); + + psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR); + + eError = PDumpMemLabelToMem64(psFaultAddrPMR, + psFWInitPMR, + 0, + ui32Dstoffset, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError)); + } + return eError; +} +#endif +#endif /* #if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */ + +#if defined(SUPPORT_TBI_INTERFACE) +/*************************************************************************/ /*! +@Function RGXTBIBufferIsInitRequired + +@Description Returns true if the firmware tbi buffer is not allocated and + might be required by the firmware soon. TBI buffer allocated + on-demand to reduce RAM footprint on systems not needing + tbi. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + /* The firmware expects a tbi buffer only when: + * - Logtype is "tbi" + */ + if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) + && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXTBIBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW tbi buffer + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc); + psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL; + psDevInfo->ui32RGXFWIfHWPerfBufSize = 0; +} + +/*************************************************************************/ /*! +@Function RGXTBIBufferInitOnDemandResources + +@Description Allocates the firmware TBI buffer required for reading SFs + strings and initialize it with SFs. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 i, ui32Len; + const IMG_UINT32 ui32FWTBIBufsize = g_ui32SFsCount * sizeof(RGXFW_STID_FMT); + RGXFW_STID_FMT *psFW_SFs = NULL; + + /* Firmware address should not be already set */ + if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: FW address for FWTBI is already set. Resetting it with newly allocated one", + __func__)); + } + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS, + ui32FWTBIBufsize, + "FwTBIBuffer", + &psDevInfo->psRGXFWIfTBIBufferMemDesc, + &psDevInfo->sRGXFWIfTBIBuffer, + (void**)&psFW_SFs, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* Copy SFs entries to FW buffer */ + for (i = 0; i < g_ui32SFsCount; i++) + { + OSDeviceMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id)); + ui32Len = OSStringLength(SFs[i].psName); + OSDeviceMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1)); + } + + /* Set size of TBI buffer */ + psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize; + + /* release CPU mapping */ + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc); + + return PVRSRV_OK; +fail: + RGXTBIBufferDeinit(psDevInfo); + return eError; +} +#endif + +/*************************************************************************/ /*! +@Function RGXTraceBufferIsInitRequired + +@Description Returns true if the firmware trace buffer is not allocated and + might be required by the firmware soon. Trace buffer allocated + on-demand to reduce RAM footprint on systems not needing + firmware trace. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + /* The firmware expects a trace buffer only when: + * - Logtype is "trace" AND + * - at least one LogGroup is configured + * - the Driver Mode is not Guest + */ + if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + && !PVRSRV_VZ_MODE_IS(GUEST)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXTraceBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW trace buffer(s) + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + IMG_UINT32 i; + + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i]) + { + if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); + psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); + psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL; + } + } +} + +/*************************************************************************/ /*! +@Function RGXTraceBufferInitOnDemandResources + +@Description Allocates the firmware trace buffer required for dumping trace + info from the firmware. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_FLAGS_T uiAllocFlags) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32FwThreadNum; + IMG_UINT32 ui32DefaultTraceBufSize; + IMG_DEVMEM_SIZE_T uiTraceBufSizeInBytes; + void *pvAppHintState = NULL; + IMG_CHAR pszBufferName[] = "FwTraceBuffer_Thread0"; + + /* Check AppHint value for module-param FWTraceBufSizeInDWords */ + OSCreateKMAppHintState(&pvAppHintState); + ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; + OSGetKMAppHintUINT32(pvAppHintState, + FWTraceBufSizeInDWords, + &ui32DefaultTraceBufSize, + &psTraceBufCtl->ui32TraceBufSizeInDWords); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + + for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++) + { +#if !defined(SUPPORT_AUTOVZ) + /* Ensure allocation API is only called when not already allocated */ + PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL); + /* Firmware address should not be already set */ + PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0); +#endif + + /* update the firmware thread number in the Trace Buffer's name */ + pszBufferName[sizeof(pszBufferName) - 2] += ui32FwThreadNum; + + eError = RGXSetupFwAllocation(psDevInfo, + uiAllocFlags, + uiTraceBufSizeInBytes, + pszBufferName, + &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum], + &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer, + (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + } + + return PVRSRV_OK; + +fail: + RGXTraceBufferDeinit(psDevInfo); + return eError; +} + +#if defined(SUPPORT_POWMON_COMPONENT) +/*************************************************************************/ /*! +@Function RGXPowmonBufferIsInitRequired + +@Description Returns true if the power monitoring buffer is not allocated and + might be required by the firmware soon. Powmon buffer allocated + on-demand to reduce RAM footprint on systems not needing + power monitoring. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + /* The firmware expects a power monitoring buffer only when: + * - Single-shot power counters are enabled with RGX_HWPERF_PWR_EST_REQUEST + * - the Driver Mode is not Guest + */ + if ((psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL) + && (psDevInfo->ui64HWPerfFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_EST_REQUEST)) + && !PVRSRV_VZ_MODE_IS(GUEST)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXPowmonBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW power monitoring buffer + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXPowmonBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (psDevInfo->psRGXFWIfPowMonBufferMemDesc) + { + if (psFwSysData->sPowerMonBuf.pui32TraceBuffer != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfPowMonBufferMemDesc); + psFwSysData->sPowerMonBuf.pui32TraceBuffer = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfPowMonBufferMemDesc); + psDevInfo->psRGXFWIfPowMonBufferMemDesc = NULL; + } +} + +/*************************************************************************/ /*! +@Function RGXPowmonBufferInitOnDemandResources + +@Description Allocates the power monitoring buffer. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + +#define POWER_MON_BUF_SIZE (512UL) + /* Ensure allocation API is only called when not already allocated */ + PVR_ASSERT(psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + POWER_MON_BUF_SIZE, + "FwPowMonBuffer", + &psDevInfo->psRGXFWIfPowMonBufferMemDesc, + &psFwSysData->sPowerMonBuf.pui32RGXFWIfTraceBuffer, + (void **)&psFwSysData->sPowerMonBuf.pui32TraceBuffer, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Power Monitoring Buffer allocation", fail); + + psFwSysData->ui32PowerMonBufSizeInDWords = POWER_MON_BUF_SIZE >> 2; + + return PVRSRV_OK; +fail: + RGXPowmonBufferDeinit(psDevInfo); + return eError; +} +#endif + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function RGXPDumpLoadFWInitData + +@Description Allocates the firmware trace buffer required for dumping trace + info from the firmware. + +@Input psDevInfo RGX device info + */ /*************************************************************************/ +static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32KillingCtl, + IMG_BOOL bEnableSignatureChecks) +{ + IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; + IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; + + PDUMPCOMMENT("Dump RGXFW Init data"); + if (!bEnableSignatureChecks) + { + PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, asSigBufCtl), + sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX), + PDUMP_FLAGS_CONTINUOUS); + } + + PDUMPCOMMENT("Dump initial state of FW runtime configuration"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + 0, + sizeof(RGXFWIF_RUNTIME_CFG), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw hwperfctl structure"); + DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc, + 0, + ui32HWPerfCountersDataSize, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw trace control structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + 0, + sizeof(RGXFWIF_TRACEBUF), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump firmware system data structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwSysDataMemDesc, + 0, + sizeof(RGXFWIF_SYSDATA), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump firmware OS data structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwOsDataMemDesc, + 0, + sizeof(RGXFWIF_OSDATA), + PDUMP_FLAGS_CONTINUOUS); + +#if defined(SUPPORT_TBI_INTERFACE) + PDUMPCOMMENT("Dump rgx TBI buffer"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTBIBufferMemDesc, + 0, + psDevInfo->ui32FWIfTBIBufferSize, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_TBI_INTERFACE) */ + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PDUMPCOMMENT("Dump rgxfw register configuration buffer"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRegCfgMemDesc, + 0, + sizeof(RGXFWIF_REG_CFG), + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_USER_REGISTER_CONFIGURATION) */ + PDUMPCOMMENT("Dump rgxfw system init structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, + 0, + sizeof(RGXFWIF_SYSINIT), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw os init structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOsInitMemDesc, + 0, + sizeof(RGXFWIF_OSINIT), + PDUMP_FLAGS_CONTINUOUS); + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */ + PDUMPCOMMENT("Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address"); + RGXPDumpFaultReadRegister(psDevInfo); +#endif /* defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */ + + PDUMPCOMMENT("RTCONF: run-time configuration"); + + /* Dump the config options so they can be edited. */ + + PDUMPCOMMENT("(Set the FW system config options here)"); + PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND); + PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN); + PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN); +#if defined(SUPPORT_VALIDATION) + PDUMPCOMMENT("( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN); +#endif /* defined(SUPPORT_VALIDATION) */ + PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST); + PDUMPCOMMENT("( Enable HWR: 0x%08x)", RGXFWIF_INICFG_HWR_EN); + PDUMPCOMMENT("( FBCDC Version 3.1 Enable: 0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN); + PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN); + PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN); + PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN); + PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN); + PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY); + PDUMPCOMMENT("( Disable HWPerf counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER); + PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN); + PDUMPCOMMENT("( Enable Ctx Switch profile mode: 0x%08x (none=d'0, fast=d'1, medium=d'2, slow=d'3, nodelay=d'4))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK); + PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP); + PDUMPCOMMENT("( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER); + PDUMPCOMMENT("( Enable coherent memory accesses: 0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED); + PDUMPCOMMENT("( Enable IRQ validation: 0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ); + PDUMPCOMMENT("( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN); +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + PDUMPCOMMENT("( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); +#if defined(SUPPORT_PDVFS) + PDUMPCOMMENT("( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); +#endif /* defined(SUPPORT_PDVFS) */ +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + PDUMPCOMMENT("( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK); + PDUMPCOMMENT("( ISP Scheduling Mode (v1=b'01, v2=b'10): 0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK); + PDUMPCOMMENT("( Validate SOC & USC timers: 0x%08x)", RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ui32ConfigFlags), + ui32ConfigFlags, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("( Extended FW system config options not used.)"); + + PDUMPCOMMENT("(Set the FW OS config options here)"); + PDUMPCOMMENT("( Ctx Switch TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN); + PDUMPCOMMENT("( Ctx Switch GEOM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); + PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); + PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); + PDUMPCOMMENT("( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); + PDUMPCOMMENT("( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM); + PDUMPCOMMENT("( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); + PDUMPCOMMENT("( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc, + offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags), + ui32FwOsCfgFlags, + PDUMP_FLAGS_CONTINUOUS); + + { + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; +#if defined(SUPPORT_VALIDATION) + IMG_BOOL bRunTimeUpdate = (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1); +#else + IMG_BOOL bRunTimeUpdate = IMG_FALSE; +#endif + IMG_UINT32 ui32DstOffset = psDevInfo->psRGXFWIfRuntimeCfgMemDesc->uiOffset + offsetof(RGXFWIF_RUNTIME_CFG, ui32PowUnitsStateMask); + IMG_CHAR aszPowUnitsMaskRegVar[] = ":SYSMEM:$1"; + IMG_CHAR aszPowUnitsEnable[] = "RUNTIME_POW_UNITS_MASK"; + PMR *psPMR = (PMR *)(psDevInfo->psRGXFWIfRuntimeCfgMemDesc->psImport->hPMR); + + + if (bRunTimeUpdate) + { + PDUMPIF(aszPowUnitsEnable, ui32PDumpFlags); + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Load initial value power units mask in FW runtime configuration"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + ui32DstOffset, + psDevInfo->psRGXFWIfRuntimeCfg->ui32PowUnitsStateMask, + ui32PDumpFlags); + + if (bRunTimeUpdate) + { + PDUMPELSE(aszPowUnitsEnable, ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Read initial SPU mask value from HW registers"); + PDumpRegRead32ToInternalVar(RGX_PDUMPREG_NAME, RGX_CR_SPU_ENABLE, aszPowUnitsMaskRegVar, ui32PDumpFlags); + PDumpWriteVarANDValueOp(aszPowUnitsMaskRegVar, ui32AllPowUnitsMask, ui32PDumpFlags); + PDumpInternalVarToMemLabel(psPMR, ui32DstOffset, aszPowUnitsMaskRegVar, ui32PDumpFlags); + PDUMPFI(aszPowUnitsEnable, ui32PDumpFlags); + } + } + +#if defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENT("(Select one or more security tests here)"); + PDUMPCOMMENT("( Read/write FW private data from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA); + PDUMPCOMMENT("( Read/write FW code from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE); + PDUMPCOMMENT("( Execute FW code from non-secure memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE); + PDUMPCOMMENT("( Execute FW code from secure (non-FW) memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags), + psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_SECURITY_VALIDATION) */ + + PDUMPCOMMENT("( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)", + RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, + RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode), + psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))", + RGXFWIF_PID_FILTER_MAX_NUM_PIDS); + { + IMG_UINT32 i; + + /* generate a few WRWs in the pdump stream as an example */ + for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++) + { + /* + * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler output is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiPIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID); + + const IMG_DEVMEM_OFFSET_T uiOSIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID); + + PDUMPCOMMENT("(PID and OSID pair %u)", i); + + PDUMPCOMMENT("(PID)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + uiPIDOff, + 0, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("(OSID)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + uiOSIDOff, + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } +#if defined(SUPPORT_VALIDATION) + PDUMPCOMMENT("(Set the FW Killing Control.)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ui32KillingCtl), + ui32KillingCtl, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_VALIDATION) */ + /* + * Dump the log config so it can be edited. + */ + PDUMPCOMMENT("(Set the log config here)"); + PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)"); + PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN); + PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS); + PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP); + PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW); + PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF); + PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM); + PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD); + PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM); + PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW); + PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR); + PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); + } + + PDUMPCOMMENT("( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC); + PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, ui32LogType), + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Set the HWPerf Filter config here"); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, ui64HWPerfFilter), + psDevInfo->psRGXFWIfSysInit->ui64HWPerfFilter, + PDUMP_FLAGS_CONTINUOUS); + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PDUMPCOMMENT("(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), TDM(%d))", + RGXFWIF_REG_CFG_TYPE_PWR_ON, + RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, + RGXFWIF_REG_CFG_TYPE_TA, + RGXFWIF_REG_CFG_TYPE_3D, + RGXFWIF_REG_CFG_TYPE_CDM, + RGXFWIF_REG_CFG_TYPE_TDM); + + { + IMG_UINT32 i; + + /* Write 32 bits in each iteration as required by PDUMP WRW command */ + for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32)) + { + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]), + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } + + PDUMPCOMMENT("(Set registers here: address, mask, value)"); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr), + 0, + PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask), + 0, + PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value), + 0, + PDUMP_FLAGS_CONTINUOUS); +#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */ +} +#endif /* defined(PDUMP) */ + +/*! +******************************************************************************* + @Function RGXSetupFwSysData + + @Description Setups all system-wide firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32KillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32AvailablePowUnitsMask) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; + RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; + + psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch)); + PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail); + + /* Sys Fw init data */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSINIT), + "FwSysInitStructure", + &psDevInfo->psRGXFWIfSysInitMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfSysInit, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail); + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + /* Setup Fault read register */ + eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch); + PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail); +#endif + + /* RD Power Island */ + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland; + IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || + (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); + + ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0; + } + + /* Make sure to inform firmware if the device supports fullace fabric coherency */ + ui32ConfigFlags |= (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && + PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ? + RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED : 0; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; +#if defined(SUPPORT_PDVFS) + { + RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; + + /* Pro-active DVFS depends on Workload Estimation */ + psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); + + if (psDVFSDeviceCfg->pasOPPTable != NULL) + { + if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OPP Table too large: Size = %u, Maximum size = %lu", + __func__, + psDVFSDeviceCfg->ui32OPPTableSize, + (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail; + } + + OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues, + psDVFSDeviceCfg->pasOPPTable, + sizeof(psPDVFSOPPInfo->asOPPValues)); + + psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; + + ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS; + } + } +#endif /* defined(SUPPORT_PDVFS) */ +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + + /* FW trace control structure */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWINITDATA_WC_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_TRACEBUF), + "FwTraceCtlStruct", + &psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + &psFwSysInitScratch->sTraceBufCtl, + (void**) &psDevInfo->psRGXFWIfTraceBufCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* Set initial firmware log type/group(s) */ + if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid initial log type (0x%X)", + __func__, ui32LogType)); + goto fail; + } + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType; + } + + /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource + * (irrespective of loggroup(s) enabled), given that logtype/loggroups can + * be set during PDump playback in logconfig, at any point of time, + * Otherwise, allocate only if required. */ +#if !defined(PDUMP) +#if defined(SUPPORT_AUTOVZ) + /* always allocate trace buffer for AutoVz Host drivers to allow + * deterministic addresses of all SysData structures */ + if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo))) +#else + if (RGXTraceBufferIsInitRequired(psDevInfo)) +#endif +#endif + { + eError = RGXTraceBufferInitOnDemandResources(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp)); + } + PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSDATA), + "FwSysData", + &psDevInfo->psRGXFWIfFwSysDataMemDesc, + &psFwSysInitScratch->sFwSysData, + (void**) &psDevInfo->psRGXFWIfFwSysData, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* GPIO validation setup */ + psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF; +#if defined(SUPPORT_VALIDATION) + { + IMG_INT32 ui32GPIOValidationMode; + + /* Check AppHint for GPIO validation mode */ + pvr_apphint_get_uint32(APPHINT_ID_GPIOValidationMode, &ui32GPIOValidationMode); + + if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.", + __func__, + ui32GPIOValidationMode, + RGXFWIF_GPIO_VAL_LAST)); + } + else + { + psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode; + } + + psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode; + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FW_DUAL_LOCKSTEP)) + { + IMG_BOOL bDualLockstepFWProcessor; + + /* Check AppHint for Dual Lockstep firmware processor */ + pvr_apphint_get_bool(APPHINT_ID_DualLockstepFWProcessor, &bDualLockstepFWProcessor); + + psDevInfo->ui32ValidationFlags |= (bDualLockstepFWProcessor) ? RGX_VAL_LS_EN : 0; + } + +#if (defined(SUPPORT_FBCDC_SIGNATURE_CHECK) || defined(SUPPORT_TRP)) + { + IMG_UINT32 ui32EnablePollOnChecksumErrorStatus; + + /* Check AppHint for Dual Lockstep firmware processor */ + pvr_apphint_get_uint32(APPHINT_ID_EnablePollOnChecksumErrorStatus, &ui32EnablePollOnChecksumErrorStatus); + + switch (ui32EnablePollOnChecksumErrorStatus) + { + case 1: psDevInfo->ui32ValidationFlags |= RGX_VAL_SIG_CHECK_NOERR_EN; break; + case 2: psDevInfo->ui32ValidationFlags |= RGX_VAL_SIG_CHECK_ERR_EN; break; + default: break; + } + } +#endif +#endif /* defined(SUPPORT_VALIDATION) */ + +#if defined(SUPPORT_FIRMWARE_GCOV) + eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo, + &psDevInfo->psFirmwareGcovBufferMemDesc, + RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE, + &psFwSysInitScratch->sFirmwareGcovCtl, + "FirmwareGcovBuffer"); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail); + psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE; +#endif /* defined(SUPPORT_FIRMWARE_GCOV) */ + +#if defined(PDUMP) + /* Require a minimum amount of memory for the signature buffers */ + if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN) + { + ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN; + } + + /* Setup Signature and Checksum Buffers for TDM, GEOM, 3D and CDM */ + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTDMChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM], + "TDM"); + PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); + psDevInfo->ui32SigTDMChecksSize = ui32SignatureChecksBufSize; + + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTAChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM], + "GEOM"); + PVR_LOG_GOTO_IF_ERROR(eError, "GEOM Signature check setup", fail); + psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize; + + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSig3DChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D], + "3D"); + PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail); + psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize; + + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigCDMChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM], + "CDM"); + PVR_LOG_GOTO_IF_ERROR(eError, "CDM Signature check setup", fail); + psDevInfo->ui32SigCDMChecksSize = ui32SignatureChecksBufSize; + +#if defined(SUPPORT_VALIDATION) && (defined(SUPPORT_TRP) || defined(SUPPORT_FBCDC_SIGNATURE_CHECK)) + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTRP_FBCDCMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_3D], + "3D_TRP_FBCDC"); + PVR_LOG_GOTO_IF_ERROR(eError, "FBCDC/TRP Signature check setup", fail); + psDevInfo->ui32SigTRP_FBCDCSize = ui32SignatureChecksBufSize; +#endif +#endif + + if (!bEnableSignatureChecks) + { + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM].sBuffer.ui32Addr = 0x0; + } + + eError = RGXFWSetupAlignChecks(psDeviceNode, + &psFwSysInitScratch->sAlignChecks, + pui32RGXFWAlignChecks, + ui32RGXFWAlignChecksArrLength); + PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail); + + psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags; + + /* Fill the remaining bits of fw the init data */ + psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE; + psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE; + psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE; + psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE; + psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE; + psFwSysInitScratch->sPDSIndirectHeapBase.uiAddr = RGX_PDS_INDIRECT_STATE_HEAP_BASE; + + psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask; + + eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); + PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); + +#if defined(SUPPORT_PDVFS) + /* Core clock rate */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(IMG_UINT32), + "FwPDVFSCoreClkRate", + &psDevInfo->psRGXFWIFCoreClkRateMemDesc, + &psFwSysInitScratch->sCoreClockRate, + (void**) &psDevInfo->pui32RGXFWIFCoreClkRate, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail); +#endif + +#if defined(SUPPORT_TBI_INTERFACE) +#if !defined(PDUMP) + /* allocate only if required */ + if (RGXTBIBufferIsInitRequired(psDevInfo)) +#endif /* !defined(PDUMP) */ + { + /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource + * (irrespective of loggroup(s) enabled), given that logtype/loggroups + * can be set during PDump playback in logconfig, at any point of time + */ + eError = RGXTBIBufferInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail); + } + + psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer; +#endif /* defined(SUPPORT_TBI_INTERFACE) */ + + /* Allocate shared buffer for GPU utilisation */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_GPU_UTIL_FWCB), + "FwGPUUtilisationBuffer", + &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc, + &psFwSysInitScratch->sGpuUtilFWCbCtl, + (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_RUNTIME_CFG), + "FwRuntimeCfg", + &psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + &psFwSysInitScratch->sRuntimeCfg, + (void**) &psDevInfo->psRGXFWIfRuntimeCfg, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware runtime configuration memory allocation", fail); + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_REG_CFG), + "FwRegisterConfigStructure", + &psDevInfo->psRGXFWIfRegCfgMemDesc, + &psFwSysInitScratch->sRegCfg, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail); +#endif + + psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB); + /* Second stage initialisation or HWPerf, hHWPerfLock created in first + * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */ + if (psDevInfo->ui64HWPerfFilter == 0) + { + psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter; + psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter; + } + else + { + /* The filter has already been modified. This can happen if + * pvr/apphint/EnableFTraceGPU was enabled. */ + psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter; + } + +#if !defined(PDUMP) + /* Allocate if HWPerf filter has already been set. This is possible either + * by setting a proper AppHint or enabling GPU ftrace events. */ + if (psDevInfo->ui64HWPerfFilter != 0) +#endif + { + /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources + * (irrespective of HWPerf enabled or not), given that HWPerf can be + * enabled during PDump playback via RTCONF at any point of time. */ + eError = RGXHWPerfInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail); +#if defined(SUPPORT_POWMON_COMPONENT) + if (RGXPowmonBufferIsInitRequired(psDevInfo)) + { + /* Allocate power monitoring log buffer if enabled */ + eError = RGXPowmonBufferInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXPowmonBufferInitOnDemandResources", fail); + } +#endif + } + + RGXHWPerfInitAppHintCallbacks(psDeviceNode); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWINITDATA_WC_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + ui32HWPerfCountersDataSize, + "FwHWPerfControlStructure", + &psDevInfo->psRGXFWIfHWPerfCountersMemDesc, + &psFwSysInitScratch->sHWPerfCtl, + NULL, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail); + + psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN) + ? IMG_FALSE : IMG_TRUE; + + psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf; + +#if defined(PDUMP) + /* default: no filter */ + psFwSysInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT; + psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0; +#endif + +#if defined(SUPPORT_VALIDATION) + { + IMG_UINT32 dm; + + /* TPU trilinear rounding mask override */ + for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++) + { + psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm]; + } + + /* USRM Config override */ + for (dm = 0; dm < RGXFWIF_USRM_DM_LAST; dm++) + { + psFwSysInitScratch->aui32USRMNumRegions[dm] = pui32USRMNumRegions[dm]; + } + + /* UVBRM Config override */ + for (dm = 0; dm < RGXFWIF_UVBRM_DM_LAST; dm++) + { + psFwSysInitScratch->aui64UVBRMNumRegions[dm] = pui64UVBRMNumRegions[dm]; + } + } +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENT("Allocate non-secure buffer for security validation test"); + eError = DevmemFwAllocateExportable(psDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + RGX_FWSHAREDMEM_ALLOCFLAGS, + "FwExNonSecureBuffer", + &psDevInfo->psRGXFWIfNonSecureBufMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail); + + eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbNonSecureBuffer, + psDevInfo->psRGXFWIfNonSecureBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail); + + PDUMPCOMMENT("Allocate secure buffer for security validation test"); + eError = DevmemFwAllocateExportable(psDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_VAL_SECURE_BUFFER, + "FwExSecureBuffer", + &psDevInfo->psRGXFWIfSecureBufMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "Secure buffer allocation", fail); + + eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbSecureBuffer, + psDevInfo->psRGXFWIfSecureBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail); +#endif /* SUPPORT_SECURITY_VALIDATION */ + + /* Initialize FW started flag */ + psFwSysInitScratch->bFirmwareStarted = IMG_FALSE; + psFwSysInitScratch->ui32MarkerVal = 1; + + if (!psDeviceNode->bAutoVzFwIsUp) + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + + /* Required info by FW to calculate the ActivePM idle timer latency */ + psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + psFwSysInitScratch->ui32ActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms; + + /* Initialise variable runtime configuration to the system defaults */ + psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed; + psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32ActivePMLatencyms; + psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE; + + /* Validate the power units mask and initialize to number of units to power up */ + if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) + { + eError = PVRSRV_ERROR_INVALID_SPU_MASK; + PVR_DPF((PVR_DBG_ERROR, + "%s:Invalid power units mask (All=0x%X, Non Fused=0x%X). At-least one power unit must to be powered up.", + __func__, + ui32AllPowUnitsMask, + ui32AvailablePowUnitsMask)); + goto fail; + } + psRuntimeCfg->ui32PowUnitsStateMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; + + /* Setup FW coremem data */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore, + psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + &psFwSysInitScratch->sCorememDataStore.pbyFWAddr, + 0); + } + } + + psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL; + psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL; +#if defined(SUPPORT_VALIDATION) + psDevInfo->psRGXFWIfFwSysData->ui32KillingCtl = ui32KillingCtl; +#else + PVR_UNREFERENCED_PARAMETER(ui32KillingCtl); +#endif + + /* Initialise GPU utilisation buffer */ + psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord = + RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE); + + /* init HWPERF data */ + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0; + psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0; + psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0; +#if defined(SUPPORT_POWMON_COMPONENT) + psDevInfo->psRGXFWIfFwSysData->ui32PowMonEnergy = 0; +#endif + + /*Send through the BVNC Feature Flags*/ + eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail); + + /* populate the real FwOsInit structure with the values stored in the scratch copy */ + OSDeviceMemCopy(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT)); + } + + OSFreeMem(psFwSysInitScratch); + + return PVRSRV_OK; + +fail: + if (psFwSysInitScratch) + { + OSFreeMem(psFwSysInitScratch); + } + + RGXFreeFwSysData(psDevInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*! +******************************************************************************* + @Function RGXSetupFwOsData + + @Description Sets up all os-specific firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32FwOsCfgFlags) +{ + PVRSRV_ERROR eError; + RGXFWIF_OSINIT sFwOsInitScratch; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32kCCBSize = (!PVRSRV_VZ_MODE_IS(NATIVE) ? + (RGXFWIF_KCCB_NUMCMDS_LOG2 - 1) : (RGXFWIF_KCCB_NUMCMDS_LOG2)); + + OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT)); + + /* Memory tracking the connection state should be non-volatile and + * is not cleared on allocation to prevent loss of pre-reset information */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG) + & (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC), + sizeof(RGXFWIF_CONNECTION_CTL), + "FwConnectionCtl", + &psDevInfo->psRGXFWIfConnectionCtlMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfConnectionCtl, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Connection Control structure allocation", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG), + sizeof(RGXFWIF_OSINIT), + "FwOsInitStructure", + &psDevInfo->psRGXFWIfOsInitMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfOsInit, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Os Init structure allocation", fail); + + /* init HWR frame info */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + sizeof(RGXFWIF_HWRINFOBUF), + "FwHWRInfoBuffer", + &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc, + &sFwOsInitScratch.sRGXFWIfHWRInfoBufCtl, + (void**) &psDevInfo->psRGXFWIfHWRInfoBufCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail); + + /* Might be uncached. Be conservative and use a DeviceMemSet */ + OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF)); + + /* Allocate a sync for power management */ + eError = SyncPrimContextCreate(psDevInfo->psDeviceNode, + &psDevInfo->hSyncPrimContext); + PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive context allocation", fail); + + eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack"); + PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive allocation", fail); + + /* Set up kernel CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlMemDesc, + &psDevInfo->psKernelCCB, + &psDevInfo->psKernelCCBMemDesc, + &sFwOsInitScratch.psKernelCCBCtl, + &sFwOsInitScratch.psKernelCCB, + ui32kCCBSize, + sizeof(RGXFWIF_KCCB_CMD), + (RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + "FwKernelCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB allocation", fail); + + /* KCCB additionally uses a return slot array for FW to be able to send back + * return codes for each required command + */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + (1U << ui32kCCBSize) * sizeof(IMG_UINT32), + "FwKernelCCBRtnSlots", + &psDevInfo->psKernelCCBRtnSlotsMemDesc, + &sFwOsInitScratch.psKernelCCBRtnSlots, + (void**) &psDevInfo->pui32KernelCCBRtnSlots, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB return slot array allocation", fail); + + /* Set up firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlMemDesc, + &psDevInfo->psFirmwareCCB, + &psDevInfo->psFirmwareCCBMemDesc, + &sFwOsInitScratch.psFirmwareCCBCtl, + &sFwOsInitScratch.psFirmwareCCB, + RGXFWIF_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware CCB allocation", fail); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Set up checkpoint CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psCheckpointCCBCtl, + &psDevInfo->psCheckpointCCBCtlMemDesc, + &psDevInfo->psCheckpointCCB, + &psDevInfo->psCheckpointCCBMemDesc, + &sFwOsInitScratch.psCheckpointCCBCtl, + &sFwOsInitScratch.psCheckpointCCB, + RGXFWIF_CHECKPOINTCCB_NUMCMDS_LOG2, + sizeof(PRGXFWIF_UFO_ADDR), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwChkptCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Checkpoint CCB allocation", fail); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_ALLOCFLAGS, + sizeof(RGXFWIF_OSDATA), + "FwOsData", + &psDevInfo->psRGXFWIfFwOsDataMemDesc, + &sFwOsInitScratch.sFwOsData, + (void**) &psDevInfo->psRGXFWIfFwOsData, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags = ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_ALL; + + eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psDevInfo->psRGXFWIfFwOsData->sPowerSync.ui32Addr); + PVR_LOG_GOTO_IF_ERROR(eError, "Get Sync Prim FW address", fail); + + sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Set up Workload Estimation firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc, + &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, + &sFwOsInitScratch.psWorkEstFirmwareCCB, + RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_WORKEST_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwWEstCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + + /* Initialise the compatibility check data */ + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); + + /* populate the real FwOsInit structure with the values stored in the scratch copy */ + OSDeviceMemCopy(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT)); + +#if defined(SUPPORT_AUTOVZ) && defined(SUPPORT_AUTOVZ_HW_REGS) + /* if hardware registers are used to store connection states, + * these can only be accessed if the GPU is powered up */ + if (PVRSRV_VZ_MODE_IS(HOST) && (psDeviceNode->bAutoVzFwIsUp)) +#endif /* defined(SUPPORT_AUTOVZ) && defined(SUPPORT_AUTOVZ_HW_REGS)*/ + { + KM_SET_OS_CONNECTION(READY, psDevInfo); + } + + return PVRSRV_OK; + +fail: + RGXFreeFwOsData(psDevInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*! +******************************************************************************* + @Function RGXSetupFirmware + + @Description Setups all firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32KillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32AvailablePowUnitsMask) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = RGXSetupFwOsData(psDeviceNode, ui32HWRDebugDumpLimit, ui32FwOsCfgFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest drivers do not configure system-wide firmware data */ + psDevInfo->psRGXFWIfSysInit = NULL; + } + else + { + /* Native and Host drivers must initialise the firmware's system data */ + eError = RGXSetupFwSysData(psDeviceNode, + bEnableSignatureChecks, + ui32SignatureChecksBufSize, + ui32HWPerfFWBufSizeKB, + ui64HWPerfFilter, + ui32RGXFWAlignChecksArrLength, + pui32RGXFWAlignChecks, + ui32ConfigFlags, + ui32ConfigFlagsExt, + ui32LogType, + ui32FilterFlags, + ui32JonesDisableMask, + ui32HWPerfCountersDataSize, + ui32KillingCtl, + pui32TPUTrilinearFracMask, + pui32USRMNumRegions, + pui64UVBRMNumRegions, + eRGXRDPowerIslandConf, + eFirmwarePerf, + ui32AvailablePowUnitsMask); + PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail); + } + + psDevInfo->bFirmwareInitialised = IMG_TRUE; + +#if defined(PDUMP) + RGXPDumpLoadFWInitData(psDevInfo, + ui32HWPerfCountersDataSize, + ui32KillingCtl, + bEnableSignatureChecks); +#endif /* PDUMP */ + +fail: + return eError; +} + +/*! +******************************************************************************* + @Function RGXFreeFwSysData + + @Description Frees all system-wide firmware related data + + @Input psDevInfo +******************************************************************************/ +static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + psDevInfo->bFirmwareInitialised = IMG_FALSE; + + if (psDevInfo->psRGXFWAlignChecksMemDesc) + { + RGXFWFreeAlignChecks(psDevInfo); + } + +#if defined(PDUMP) + if (psDevInfo->psRGXFWSigTDMChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDMChecksMemDesc); + psDevInfo->psRGXFWSigTDMChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSigTAChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc); + psDevInfo->psRGXFWSigTAChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSig3DChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc); + psDevInfo->psRGXFWSig3DChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSigCDMChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigCDMChecksMemDesc); + psDevInfo->psRGXFWSigCDMChecksMemDesc = NULL; + } + +#if defined(SUPPORT_VALIDATION) && (defined(SUPPORT_TRP) || defined(SUPPORT_FBCDC_SIGNATURE_CHECK)) + if (psDevInfo->psRGXFWSigTRP_FBCDCMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTRP_FBCDCMemDesc); + psDevInfo->psRGXFWSigTRP_FBCDCMemDesc = NULL; + } +#endif +#endif + +#if defined(SUPPORT_FIRMWARE_GCOV) + if (psDevInfo->psFirmwareGcovBufferMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc); + psDevInfo->psFirmwareGcovBufferMemDesc = NULL; + } +#endif + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + RGXSetupFaultReadRegisterRollback(psDevInfo); +#endif + + if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc) + { + if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCb = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL; + } + + RGXHWPerfDeinit(psDevInfo); + + if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc) + { + if (psDevInfo->psRGXFWIfRuntimeCfg != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc); + psDevInfo->psRGXFWIfRuntimeCfg = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc); + psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL; + } + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } + } + + if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc) + { + if (psDevInfo->psRGXFWIfTraceBufCtl != NULL) + { + /* first deinit/free the tracebuffer allocation */ + RGXTraceBufferDeinit(psDevInfo); + +#if defined(SUPPORT_POWMON_COMPONENT) + /* second free the powmon log buffer if used */ + RGXPowmonBufferDeinit(psDevInfo); +#endif + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc); + psDevInfo->psRGXFWIfTraceBufCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc); + psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfFwSysDataMemDesc) + { + if (psDevInfo->psRGXFWIfFwSysData != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); + psDevInfo->psRGXFWIfFwSysData = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwSysDataMemDesc); + psDevInfo->psRGXFWIfFwSysDataMemDesc = NULL; + } + +#if defined(SUPPORT_TBI_INTERFACE) + if (psDevInfo->psRGXFWIfTBIBufferMemDesc) + { + RGXTBIBufferDeinit(psDevInfo); + } +#endif + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + if (psDevInfo->psRGXFWIfRegCfgMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc); + psDevInfo->psRGXFWIfRegCfgMemDesc = NULL; + } +#endif + if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc) + { + RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL; + } + +#if defined(SUPPORT_SECURITY_VALIDATION) + if (psDevInfo->psRGXFWIfNonSecureBufMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfNonSecureBufMemDesc); + psDevInfo->psRGXFWIfNonSecureBufMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfSecureBufMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSecureBufMemDesc); + psDevInfo->psRGXFWIfSecureBufMemDesc = NULL; + } +#endif + + /* Free the SLC3 fence object */ + _FreeSLC3Fence(psDevInfo); + +#if defined(SUPPORT_PDVFS) + if (psDevInfo->psRGXFWIFCoreClkRateMemDesc) + { + if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc); + psDevInfo->pui32RGXFWIFCoreClkRate = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc); + psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL; + } +#endif +} + +/*! +******************************************************************************* + @Function RGXFreeFwOsData + + @Description Frees all os-specific firmware related data + + @Input psDevInfo +******************************************************************************/ +static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFreeCCBReturnSlots(psDevInfo, + &psDevInfo->pui32KernelCCBRtnSlots, + &psDevInfo->psKernelCCBRtnSlotsMemDesc); + RGXFreeCCB(psDevInfo, + &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlMemDesc, + &psDevInfo->psKernelCCB, + &psDevInfo->psKernelCCBMemDesc); + + RGXFreeCCB(psDevInfo, + &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlMemDesc, + &psDevInfo->psFirmwareCCB, + &psDevInfo->psFirmwareCCBMemDesc); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + RGXFreeCCB(psDevInfo, + &psDevInfo->psCheckpointCCBCtl, + &psDevInfo->psCheckpointCCBCtlMemDesc, + &psDevInfo->psCheckpointCCB, + &psDevInfo->psCheckpointCCBMemDesc); +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFreeCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc); +#endif + + if (psDevInfo->psPowSyncPrim != NULL) + { + SyncPrimFree(psDevInfo->psPowSyncPrim); + psDevInfo->psPowSyncPrim = NULL; + } + + if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL) + { + SyncPrimContextDestroy(psDevInfo->hSyncPrimContext); + psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL; + } + + if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc) + { + if (psDevInfo->psRGXFWIfHWRInfoBufCtl != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); + psDevInfo->psRGXFWIfHWRInfoBufCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); + psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfFwOsDataMemDesc) + { + if (psDevInfo->psRGXFWIfFwOsData != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwOsDataMemDesc); + psDevInfo->psRGXFWIfFwOsData = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwOsDataMemDesc); + psDevInfo->psRGXFWIfFwOsDataMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXFreeFirmware + + @Description Frees all the firmware-related allocations + + @Input psDevInfo +******************************************************************************/ +void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFreeFwOsData(psDevInfo); + + if (psDevInfo->psRGXFWIfConnectionCtl) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfConnectionCtlMemDesc); + psDevInfo->psRGXFWIfConnectionCtl = NULL; + } + + if (psDevInfo->psRGXFWIfConnectionCtlMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfConnectionCtlMemDesc); + psDevInfo->psRGXFWIfConnectionCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfOsInit) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOsInitMemDesc); + psDevInfo->psRGXFWIfOsInit = NULL; + } + + if (psDevInfo->psRGXFWIfOsInitMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfOsInitMemDesc); + psDevInfo->psRGXFWIfOsInitMemDesc = NULL; + } + + RGXFreeFwSysData(psDevInfo); + if (psDevInfo->psRGXFWIfSysInit) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfSysInitMemDesc); + psDevInfo->psRGXFWIfSysInit = NULL; + } + + if (psDevInfo->psRGXFWIfSysInitMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSysInitMemDesc); + psDevInfo->psRGXFWIfSysInitMemDesc = NULL; + } +} + +/****************************************************************************** + FUNCTION : RGXAcquireKernelCCBSlot + + PURPOSE : Attempts to obtain a slot in the Kernel CCB + + PARAMETERS : psCCB - the CCB + : Address of space if available, NULL otherwise + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc, + RGXFWIF_CCB_CTL *psKCCBCtl, + IMG_UINT32 *pui32Offset) +{ + IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + + ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + + /* + * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1 + * executing kick), hence the kernel CCB should not queue more than + * 254 commands. + */ + PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255); + +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset); + DevmemPDumpCBP(psKCCBCtrlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), + ui32NextWriteOffset, + 1, + (psKCCBCtl->ui32WrapMask + 1)); +#endif + + if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } + *pui32Offset = ui32NextWriteOffset; + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : RGXPollKernelCCBSlot + + PURPOSE : Poll for space in Kernel CCB + + PARAMETERS : psCCB - the CCB + : Address of space if available, NULL otherwise + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXPollKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc, + RGXFWIF_CCB_CTL *psKCCBCtl) +{ + IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + + ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + + /* + * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1 + * executing kick), hence the kernel CCB should not queue more than + * 254 commands. + */ + PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + + if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset) + { + return PVRSRV_OK; + } + { + /* + * The following sanity check doesn't impact performance, + * since the CPU has to wait for the GPU anyway (full kernel CCB). + */ + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return PVRSRV_ERROR_KERNEL_CCB_FULL; +} + +/****************************************************************************** + FUNCTION : RGXGetCmdMemCopySize + + PURPOSE : Calculates actual size of KCCB command getting used + + PARAMETERS : eCmdType Type of KCCB command + + RETURNS : Returns actual size of KCCB command on success else zero +******************************************************************************/ +static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) +{ + /* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD + * This will account alignment requirement of uCmdData union + * + * Then add command-data size depending on command type to calculate actual + * command size required to do mem copy + * + * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct. + */ + switch (eCmdType) + { + case RGXFWIF_KCCB_CMD_KICK: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA); + } + case RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA); + } + case RGXFWIF_KCCB_CMD_MMUCACHE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA); + } +#if defined(SUPPORT_USC_BREAKPOINT) + case RGXFWIF_KCCB_CMD_BP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA); + } +#endif + case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA); + } + case RGXFWIF_KCCB_CMD_CLEANUP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST); + } + case RGXFWIF_KCCB_CMD_POW: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST); + } + case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE: + case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA); + } + case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA); + } + case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA); + } + case RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SIGNAL_UPDATE_DATA); + } + case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA); + } + case RGXFWIF_KCCB_CMD_FORCE_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA); + } +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + case RGXFWIF_KCCB_CMD_REGCONFIG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA); + } +#endif +#if defined(SUPPORT_PDVFS) + case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA); + } +#endif + case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OSID_PRIORITY_DATA); + } + case RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HCS_CTL); + } + case RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OSID_ISOLATION_GROUP_DATA); + } + case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA); + } + case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL); + } + case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS); + } + case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS); + } + case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA); + } + case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_DEV_VIRTADDR); + } + case RGXFWIF_KCCB_CMD_PHR_CFG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_PHR_CFG_DATA); + } + case RGXFWIF_KCCB_CMD_HEALTH_CHECK: + case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL: + case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT: + { + /* No command specific data */ + return offsetof(RGXFWIF_KCCB_CMD, uCmdData); + } + default: + { + /* Invalid (OR) Unused (OR) Newly added command type */ + return 0; /* Error */ + } + } +} + +PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SlotNum, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVWaitForValueKM( + (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM"); + +#if defined(PDUMP) + /* PDumping conditions same as RGXSendCommandRaw for the actual command and poll command to go in harmony */ + if (PDumpIsContCaptureOn()) + { + IMG_BOOL bIsInCaptureRange; + + PDumpIsCaptureFrameKM(&bIsInCaptureRange); + + if ((bIsInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)) && !PDUMPPOWCMDINTRANS()) + { + PDUMPCOMMENT("Poll on KCCB slot %u for value %u (mask: 0x%x)", ui32SlotNum, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32SlotNum * sizeof(IMG_UINT32), + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); + } + } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + + return eError; +} + +static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB; + IMG_UINT32 ui32NewWriteOffset; + IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + IMG_UINT32 ui32CmdMemCopySize; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(uiPdumpFlags); +#else + IMG_BOOL bPdumpEnabled = IMG_FALSE; + IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS(); + IMG_BOOL bContCaptureOn = PDumpIsContCaptureOn(); /* client connected or in pdump init phase */ + + if (bContCaptureOn) + { + IMG_BOOL bIsInCaptureRange; + + PDumpIsCaptureFrameKM(&bIsInCaptureRange); + bPdumpEnabled = (bIsInCaptureRange || PDUMP_IS_CONTINUOUS(uiPdumpFlags)) && !bPDumpPowTrans; + + /* in capture range */ + if (!PVRSRV_VZ_MODE_IS(GUEST) && bPdumpEnabled) + { + if (!psDevInfo->bDumpedKCCBCtlAlready) + { + /* entering capture range */ + psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE; + + /* Wait for the live FW to catch up */ + PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d", + __func__, + psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset)); + PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, + ui32OldWriteOffset, 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP); + + /* Dump Init state of Kernel CCB control (read and write offset) */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control, roff: %d, woff: %d", + psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset); + + DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, + 0, + sizeof(RGXFWIF_CCB_CTL), + PDUMP_FLAGS_CONTINUOUS); + } + } + } +#endif + +#if defined(SUPPORT_AUTOVZ) + if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) || + (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" + "driver state = %u / firmware state = %u;" + "expected READY (%u/%u) or ACTIVE (%u/%u);", + __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), + RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, + RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE)); + eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; + goto _RGXSendCommandRaw_Exit; + } +#endif + + PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize); + if (!OSLockIsLocked(psDeviceNode->hPowerLock)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s called without power lock held!", + __func__)); + PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); + } + + /* Acquire a slot in the CCB */ + eError = RGXAcquireKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psKCCBCtl, &ui32NewWriteOffset); + if (eError != PVRSRV_OK) + { + goto _RGXSendCommandRaw_Exit; + } + + /* Calculate actual size of command to optimize device mem copy */ + ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType); + PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND); + + /* Copy the command into the CCB */ + OSDeviceMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize], + psKCCBCmd, ui32CmdMemCopySize); + + /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */ + if (pui32CmdKCCBSlot) + { + *pui32CmdKCCBSlot = ui32OldWriteOffset; + + /* Each such command enqueue needs to reset the slot value first. This is so that a caller + * doesn't get to see stale/false value in allotted slot */ + psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset] = RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE; +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Reset kCCB slot number %u", ui32OldWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32OldWriteOffset * sizeof(IMG_UINT32), + sizeof(IMG_UINT32), + uiPdumpFlags); +#endif + PVR_DPF((PVR_DBG_MESSAGE, "%s: Device (%p) KCCB slot %u reset with value %u for command type %u", + __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType)); + } + + /* ensure kCCB data is written before the offsets */ + OSWriteMemoryBarrier(); + + /* Move past the current command */ + psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset; + /* Force a read-back to memory to avoid posted writes on certain buses */ + (void) psKCCBCtl->ui32WriteOffset; + + +#if defined(PDUMP) + if (bContCaptureOn) + { + /* in capture range */ + if (bPdumpEnabled) + { + /* Dump new Kernel CCB content */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB cmd woff = %d", + ui32OldWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc, + ui32OldWriteOffset * psKCCBCtl->ui32CmdSize, + ui32CmdMemCopySize, + PDUMP_FLAGS_CONTINUOUS); + + /* Dump new kernel CCB write offset */ + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl woff: %d", + ui32NewWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), + sizeof(IMG_UINT32), + uiPdumpFlags); + + /* mimic the read-back of the write from above */ + DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), + ui32NewWriteOffset, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, + uiPdumpFlags); + + } + /* out of capture range */ + else + { + eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit); + } + } +#endif + + + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB"); + /* + * Kick the MTS to schedule the firmware. + */ + __MTSScheduleWrite(psDevInfo, MTS_SCHEDULE_DM_VAL & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK); + + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, MTS_SCHEDULE_DM_VAL & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, uiPdumpFlags); + +#if defined(NO_HARDWARE) + /* keep the roff updated because fw isn't there to update it */ + psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset; +#endif + +_RGXSendCommandRaw_Exit: + return eError; +} + +/****************************************************************************** + FUNCTION : _AllocDeferredCommand + + PURPOSE : Allocate a KCCB command and add it to KCCB deferred list + + PARAMETERS : psDevInfo RGX device info + : eKCCBType Firmware Command type + : psKCCBCmd Firmware Command + : uiPdumpFlags Pdump flags + + RETURNS : PVRSRV_OK If all went good, PVRSRV_ERROR_RETRY otherwise. +******************************************************************************/ +static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags) +{ + RGX_DEFERRED_KCCB_CMD *psDeferredCommand; + OS_SPINLOCK_FLAGS uiFlags; + + psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand)); + + if (!psDeferredCommand) + { + PVR_DPF((PVR_DBG_ERROR, + "Deferring a KCCB command failed: allocation failure: requesting retry")); + return PVRSRV_ERROR_RETRY; + } + + psDeferredCommand->sKCCBcmd = *psKCCBCmd; + psDeferredCommand->uiPdumpFlags = uiPdumpFlags; + psDeferredCommand->psDevInfo = psDevInfo; + + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode)); + psDevInfo->ui32KCCBDeferredCommandsCount++; + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : _FreeDeferredCommand + + PURPOSE : Remove from the deferred list the sent deferred KCCB command + + PARAMETERS : psNode Node in deferred list + : psDeferredKCCBCmd KCCB Command to free + + RETURNS : None +******************************************************************************/ +static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd) +{ + dllist_remove_node(psNode); + psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--; + OSFreeMem(psDeferredKCCBCmd); +} + +/****************************************************************************** + FUNCTION : RGXSendCommandsFromDeferredList + + PURPOSE : Try send KCCB commands in deferred list to KCCB + Should be called by holding PowerLock + + PARAMETERS : psDevInfo RGX device info + : bPoll Poll for space in KCCB + + RETURNS : PVRSRV_OK If all commands in deferred list are sent to KCCB, + PVRSRV_ERROR_KERNEL_CCB_FULL otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psNode, *psNext; + RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd; + DLLIST_NODE sCommandList; + OS_SPINLOCK_FLAGS uiFlags; + + PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDevInfo->psDeviceNode)); + + /* !!! Important !!! + * + * The idea of moving the whole list hLockKCCBDeferredCommandsList below + * to the temporary list is only valid under the principle that all of the + * operations are also protected by the power lock. It must be held + * so that the order of the commands doesn't get messed up while we're + * performing the operations on the local list. + * + * The necessity of releasing the hLockKCCBDeferredCommandsList comes from + * the fact that _FreeDeferredCommand() is allocating memory and it can't + * be done in atomic context (inside section protected by a spin lock). + * + * We're using spin lock here instead of mutex to quickly perform a check + * if the list is empty in MISR without a risk that the MISR is going + * to sleep due to a lock. + */ + + /* move the whole list to a local list so it can be processed without lock */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (dllist_is_empty(&sCommandList)) + { + return PVRSRV_OK; + } + + /* For every deferred KCCB command, try to send it*/ + dllist_foreach_node(&sCommandList, psNode, psNext) + { + psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode); + eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo, + &psTempDeferredKCCBCmd->sKCCBcmd, + psTempDeferredKCCBCmd->uiPdumpFlags, + NULL /* We surely aren't interested in kCCB slot number of deferred command */); + if (eError != PVRSRV_OK) + { + if (!bPoll) + { + eError = PVRSRV_ERROR_KERNEL_CCB_FULL; + goto cleanup_; + } + break; + } + + _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd); + } + + if (bPoll) + { + PVRSRV_ERROR eErrPollForKCCBSlot; + + /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the + * outer loop times-out, we'll still want to return KCCB_FULL to caller + */ + eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, + psDevInfo->psKernelCCBCtl); + if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + eError = PVRSRV_ERROR_KERNEL_CCB_FULL; + goto cleanup_; + } + } + } END_LOOP_UNTIL_TIMEOUT(); + +cleanup_: + /* if the local list is not empty put it back to the deferred list head + * so that the old order of commands is retained */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_insert_list_at_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + return eError; +} + +PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + IMG_BOOL bPoll = (pui32CmdKCCBSlot != NULL); + PVRSRV_ERROR eError; + + /* + * First try to Flush all the cmds in deferred list. + * + * We cannot defer an incoming command if the caller is interested in + * knowing the command's kCCB slot: it plans to poll/wait for a + * response from the FW just after the command is enqueued, so we must + * poll for space to be available. + */ + eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll); + if (eError == PVRSRV_OK) + { + eError = RGXSendCommandRaw(psDevInfo, + psKCCBCmd, + uiPdumpFlags, + pui32CmdKCCBSlot); + } + + /* + * If we don't manage to enqueue one of the deferred commands or the command + * passed as argument because the KCCB is full, insert the latter into the deferred commands list. + * The deferred commands will also be flushed eventually by: + * - one more KCCB command sent for any DM + * - RGX_MISRHandler_CheckFWActivePowerState + */ + if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + if (pui32CmdKCCBSlot == NULL) + { + eError = _AllocDeferredCommand(psDevInfo, psKCCBCmd, uiPdumpFlags); + } + else + { + /* Let the caller retry. Otherwise if we deferred the command and returned OK, + * the caller can end up looking in a stale CCB slot. + */ + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't flush the deferred queue for a command (Type:%d) " + "that needed the kCCB command slot number! Returning kCCB FULL", + __func__, psKCCBCmd->eCmdType)); + } + } + + return eError; +} + +PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + + /* Ensure Rogue is powered up before kicking MTS */ + eError = PVRSRVPowerLock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to acquire powerlock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto _PVRSRVPowerLock_Exit; + } + + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto _PVRSRVSetDevicePowerStateKM_Exit; + } + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + psKCCBCmd, + ui32PDumpFlags, + pui32CmdKCCBSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)", + __func__, + PVRSRVGetErrorString(eError))); +#if defined(DEBUG) + /* PVRSRVDebugRequest must be called without powerlock */ + PVRSRVPowerUnlock(psDeviceNode); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto _PVRSRVPowerLock_Exit; +#endif + } + +_PVRSRVSetDevicePowerStateKM_Exit: + PVRSRVPowerUnlock(psDeviceNode); + +_PVRSRVPowerLock_Exit: + return eError; +} + +void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSScheduleMISR(psDevInfo->hProcessQueuesMISR); +} + +/*! +******************************************************************************* + + @Function RGX_MISRHandler_ScheduleProcessQueues + + @Description - Sends uncounted kick to all the DMs (the FW will process all + the queue for all the DMs) +******************************************************************************/ +static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + PVRSRV_DEV_POWER_STATE ePowerState; + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + return; + } + + /* Check whether it's worth waking up the GPU */ + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if (!PVRSRV_VZ_MODE_IS(GUEST) && + (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) + { + /* For now, guest drivers will always wake-up the GPU */ + RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_BOOL bGPUHasWorkWaiting; + + bGPUHasWorkWaiting = + (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); + + if (!bGPUHasWorkWaiting) + { + /* all queues are empty, don't wake up the GPU */ + PVRSRVPowerUnlock(psDeviceNode); + return; + } + } + + PDUMPPOWCMDSTART(); + /* wake up the GPU */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + + PVRSRVPowerUnlock(psDeviceNode); + return; + } + + /* uncounted kick to the FW */ + HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED); + __MTSScheduleWrite(psDevInfo, (MTS_SCHEDULE_DM_VAL & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED); + + PVRSRVPowerUnlock(psDeviceNode); +} + +PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode) +{ + return OSInstallMISR(phMISR, + RGX_MISRHandler_ScheduleProcessQueues, + psDeviceNode, + "RGX_ScheduleProcessQueues"); +} + +PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DM eKCCBType, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32CacheOpFence, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiMMUSyncUpdate; + + /* Don't send the command/power up request if the device is de-initialising. + * The de-init thread could destroy the device whilst the power up + * sequence below is accessing the HW registers. + */ + if (unlikely((psDevInfo == NULL) || + (psDevInfo->psDeviceNode == NULL) || + (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) + { + return PVRSRV_ERROR_INVALID_DEVICE; + } + + eError = CacheOpFence(eKCCBType, ui32CacheOpFence); + if (unlikely(eError != PVRSRV_OK)) goto RGXScheduleCommand_exit; + + /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful + in a scenario with several applications allocating resources. */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + + goto RGXScheduleCommand_exit; + } + + if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)) + { + /* If we have the power lock the device is valid but the deinit + * thread could be waiting for the lock. */ + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + /* Ensure device is powered up before sending any commands */ + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto _PVRSRVSetDevicePowerStateKM_Exit; + } + + eError = RGXPreKickCacheCommand(psDevInfo, psServerMMUContext, eKCCBType, &uiMMUSyncUpdate, IMG_FALSE); + if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot); + if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; + +_PVRSRVSetDevicePowerStateKM_Exit: + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + +#if defined(SUPPORT_VALIDATION) + /** + * For validation, force the core to different powered units between + * DM kicks. PVRSRVDeviceGPUUnitsPowerChange acquires the power lock, hence + * ensure that this is done after the power lock is released. + */ + if ((eError == PVRSRV_OK) && (eKCCBType != RGXFWIF_DM_GP)) + { + if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN) + { + IMG_UINT32 ui32PowerDomainState; + IMG_BOOL bIsValid; + do { + ui32PowerDomainState = RGXPowerDomainGetNextState(&psDevInfo->sPowerDomainState); + bIsValid = ui32PowerDomainState && + ((ui32PowerDomainState & ~(psDevInfo->ui32AvailablePowUnitsMask)) == 0); + } while (!bIsValid); + + eError = PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32PowerDomainState); + if (eError != PVRSRV_OK) + goto RGXScheduleCommand_exit; + } + } +#endif + +RGXScheduleCommand_exit: + return eError; +} + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/* + * RGXCheckCheckpointCCB + */ +void RGXCheckCheckpointCCB(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bSignal = IMG_FALSE; + + PRGXFWIF_UFO_ADDR *psFwUFOAddr; + RGXFWIF_CCB_CTL *psChptCCBCtl = psDevInfo->psCheckpointCCBCtl; + IMG_UINT8 *psChptCCB = psDevInfo->psCheckpointCCB; + IMG_UINT32 ui32WriteOffset, ui32ReadOffset, ui32WrapMask = psChptCCBCtl->ui32WrapMask; + IMG_UINT32 uiFwAddr; + PVRSRV_SYNC_CHECKPOINT_STATE uiChptState; + + /* + * Check if the firmware has signalled a full sync state check. + */ + if (psDevInfo->psRGXFWIfFwOsData->ui32FWSyncCheckMark != psDevInfo->psRGXFWIfFwOsData->ui32HostSyncCheckMark) + { + /* + * Update the offsets first so that if the firmware tries to write + * another checkpoint it is not missed by the check state. + */ + psDevInfo->psRGXFWIfFwOsData->ui32HostSyncCheckMark = psDevInfo->psRGXFWIfFwOsData->ui32FWSyncCheckMark; + psChptCCBCtl->ui32ReadOffset = psChptCCBCtl->ui32WriteOffset; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Checkpoint CCB full, performing full sync checkpoint state check", __func__)); + + SyncCheckpointCheckState(); + bSignal = IMG_TRUE; + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_check_state(); +#endif + + goto exit_signal; + } + + /* + * Take a snapshot of the current CCB ctl pointers at the start of + * processing. + */ + ui32WriteOffset = psChptCCBCtl->ui32WriteOffset; + ui32ReadOffset = psChptCCBCtl->ui32ReadOffset; + ui32WrapMask = psChptCCBCtl->ui32WrapMask; + + while (ui32ReadOffset != ui32WriteOffset) + { + /* Point to the next checkpoint address */ + psFwUFOAddr = ((PRGXFWIF_UFO_ADDR *)psChptCCB) + ui32ReadOffset; + + /* + * State is encoded in bit 1 of ufo address + * 1 = signalled, 0 = errored + */ + uiChptState = PVRSRV_SYNC_CHECKPOINT_ERRORED; + uiFwAddr = psFwUFOAddr->ui32Addr; + + if (uiFwAddr & 0x1U) + { + uiChptState = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + uiFwAddr |= 0x1U; + + if (SyncCheckpointUFOHasSignalled(psDeviceNode, uiFwAddr, uiChptState)) + { + bSignal = IMG_TRUE; + } + else +#if defined(SUPPORT_BUFFER_SYNC) + if (pvr_buffer_sync_checkpoint_ufo_has_signalled(uiFwAddr, uiChptState)) + { + /* Buffer sync does not need a signal call. */ + } + else +#endif + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware signalled checkpoint (%#08X) with no host backing", __func__, uiFwAddr)); + } + + /* Update read offset */ + ui32ReadOffset = (ui32ReadOffset + 1) & ui32WrapMask; + } + + psChptCCBCtl->ui32ReadOffset = ui32ReadOffset; + +exit_signal: + if (bSignal) + { + SyncCheckpointSignalWaiters(); + } +} +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + +/* + * RGXCheckFirmwareCCB + */ +void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_FWCCB_CMD *psFwCCBCmd; + + RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; + IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB; + + while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) + { + /* Point to the next command */ + psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset; + + HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType); + switch (psFwCCBCmd->eCmdType) + { + case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(ZSBUFFER_BACKING, "Request to add backing to ZSBuffer"); + } + RGXProcessRequestZSBufferBacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; + } + + case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer"); + } + RGXProcessRequestZSBufferUnbacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; + } + + case RGXFWIF_FWCCB_CMD_FREELIST_GROW: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(FREELIST_GROW, "Request to grow the free list"); + } + RGXProcessRequestGrow(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID); + break; + } + + case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists"); + } + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } + + RGXProcessRequestFreelistsReconstruction(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs); + break; + } + + case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: + { + DLLIST_NODE *psNode, *psNext; + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = + &psFwCCBCmd->uCmdData.sCmdContextResetNotification; + IMG_UINT32 ui32ServerCommonContextID = + psCmdContextResetNotification->ui32ServerCommonContextID; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; + + OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + if (psThisContext->ui32ContextID == ui32ServerCommonContextID) + { + psServerCommonContext = psThisContext; + break; + } + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", + __func__, + psServerCommonContext, + psCmdContextResetNotification->ui32ServerCommonContextID, + (IMG_UINT32)(psCmdContextResetNotification->eResetReason), + psCmdContextResetNotification->ui32ResetJobRef)); + + if (psServerCommonContext != NULL) + { + psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; + psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; + } + OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); + + if (psCmdContextResetNotification->bPageFault) + { + DevmemIntPFNotify(psDevInfo->psDeviceNode, + psCmdContextResetNotification->ui64PCAddress, + psCmdContextResetNotification->sFaultAddress); + } + break; + } + + case RGXFWIF_FWCCB_CMD_DEBUG_DUMP: + { + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE); + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__)); + PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + break; + } + + case RGXFWIF_FWCCB_CMD_UPDATE_STATS: + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner; + IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue; + + switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate) + { + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: + { + PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: + { + PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp); + break; + } + } +#endif + break; + } +#if defined(SUPPORT_PDVFS) + case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: + { + PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, + psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); + break; + } +#endif + case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: + { + if (psDevInfo->psRGXFWIfFwSysData != NULL && + psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) + { + PVRSRV_ERROR eError; + + /* Power down... */ + eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF); + if (eError == PVRSRV_OK) + { + /* Clear the FW faulted flags... */ + psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); + + /* Power back up again... */ + eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON); + + /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ + if (eError == PVRSRV_OK) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXFWHealthCheckCmd(psDevInfo); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)", + __func__, PVRSRVGetErrorString(eError))); + } + } + break; + } +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) + case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS: + { + if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) + { + PVRSRV_ERROR eSOCtimerErr = PVRSRVValidateSOCUSCTimer(psDevInfo, + PDUMP_NONE, + psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray, + psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary, + psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers); + if (PVRSRV_OK == eSOCtimerErr) + { + PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time")); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time")); + } + } + break; + } +#endif + default: + { + /* unknown command */ + PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)", + __func__, psFwCCBCmd->eCmdType)); + /* Assert on magic value corruption */ + PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD); + } + } + + /* Update read offset */ + psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; + } +} + +PVRSRV_ERROR IMG_CALLCONV RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_CCB_CTL *psKCCBCtl; + IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + + psKCCBCtl = psDevInfo->psKernelCCBCtl; + ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 + + psKCCBCtl->ui32WriteOffset - + psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask; + ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount; + + for (ui32MaxRetries = ui32CurrentQueueLength + 1; + ui32MaxRetries > 0; + ui32MaxRetries--) + { + + /* + * PVRSRVPollForValueKM flags are set to POLL_FLAG_NONE in this case so that the function + * does not generate an error message. In this case, the PollForValueKM is expected to + * timeout as there is work ongoing on the GPU which may take longer than the timeout period. + */ + eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE); + if (eError != PVRSRV_ERROR_TIMEOUT) + { + break; + } + + RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", + __func__, PVRSRVGetErrorString(eError), + pui32LinMemAddr, ui32Value)); + } + + return eError; +} + +PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_UINT32 *pui32ConfigState, + IMG_BOOL bSetNotClear) +{ + PVRSRV_ERROR eError; + PVRSRV_DEV_POWER_STATE ePowerState; + RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 }; + PVRSRV_DEVICE_NODE *psDeviceNode; + RGXFWIF_SYSDATA *psFwSysData; + IMG_UINT32 ui32kCCBCommandSlot; + IMG_BOOL bWaitForFwUpdate = IMG_FALSE; + + if (!psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + psDeviceNode = psDevInfo->psDeviceNode; + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (NULL == psFwSysData) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Fw Sys Config is not mapped into CPU space", + __func__)); + return PVRSRV_ERROR_INVALID_CPU_ADDR; + } + + /* apply change and ensure the new data is written to memory + * before requesting the FW to read it + */ + ui32Config = ui32Config & RGXFWIF_INICFG_ALL; + if (bSetNotClear) + { + psFwSysData->ui32ConfigFlags |= ui32Config; + } + else + { + psFwSysData->ui32ConfigFlags &= ~ui32Config; + } + + /* return current/new value to caller */ + if (pui32ConfigState) + { + *pui32ConfigState = psFwSysData->ui32ConfigFlags; + } + + OSMemoryBarrier(); + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + /* notify FW to update setting */ + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + /* Ask the FW to update its cached version of the value */ + sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sStateFlagCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); + bWaitForFwUpdate = IMG_TRUE; + } + +unlock: + PVRSRVPowerUnlock(psDeviceNode); + if (bWaitForFwUpdate) + { + /* Wait for the value to be updated as the FW validates + * the parameters and modifies the ui32ConfigFlags + * accordingly + * (for completeness as registered callbacks should also + * not permit invalid transitions) + */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + } + return eError; +} + +static +PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DM eDM, + RGXFWIF_KCCB_CMD *psKCCBCmd, + RGXFWIF_CLEANUP_TYPE eCleanupType, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32kCCBCommandSlot; + + psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP; + psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType; + + /* + Send the cleanup request to the firmware. If the resource is still busy + the firmware will tell us and we'll drop out with a retry. + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + psServerMMUContext, + eDM, + psKCCBCmd, + 0, + ui32PDumpFlags, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail_command); + + /* Wait for command kCCB slot to be updated by FW */ + PDUMPCOMMENT("Wait for the firmware to reply to the cleanup command"); + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, + ui32PDumpFlags); + /* + If the firmware hasn't got back to us in a timely manner + then bail and let the caller retry the command. + */ + if (eError == PVRSRV_ERROR_TIMEOUT) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: RGXWaitForKCCBSlotUpdate timed out. Dump debug information.", + __func__)); + + eError = PVRSRV_ERROR_RETRY; +#if defined(DEBUG) + PVRSRVDebugRequest(psDevInfo->psDeviceNode, + DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); +#endif + goto fail_poll; + } + else if (eError != PVRSRV_OK) + { + goto fail_poll; + } + +#if defined(PDUMP) + /* + * The cleanup request to the firmware will tell us if a given resource is busy or not. + * If the RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY flag is set, this means that the resource is + * still in use. In this case we return a PVRSRV_ERROR_RETRY error to the client drivers + * and they will re-issue the cleanup request until it succeed. + * + * Since this retry mechanism doesn't work for pdumps, client drivers should ensure + * that cleanup requests are only submitted if the resource is unused. + * If this is not the case, the following poll will block infinitely, making sure + * the issue doesn't go unnoticed. + */ + PDUMPCOMMENT("Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps", + eDM, + psKCCBCmd->uCmdData.sCleanupData.eCleanupType, + psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr); + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32kCCBCommandSlot * sizeof(IMG_UINT32), + 0, + RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); +#endif + + /* + If the command has was run but a resource was busy, then the request + will need to be retried. + */ + if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)) + { + if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + eError = PVRSRV_ERROR_RETRY; + goto fail_requestbusy; + } + + return PVRSRV_OK; + +fail_requestbusy: +fail_poll: +fail_command: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + RGXRequestCommonContextCleanUp +*/ +PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGXFWIF_DM eDM, + IMG_UINT32 ui32PDumpFlags) +{ + RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0}; + PVRSRV_ERROR eError; + PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + + /* Force retry if this context's CCB is currently being dumped + * as part of the stalled CCB debug */ + if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>", + __func__, + (void*)psServerCommonContext->psClientCCB)); + return PVRSRV_ERROR_RETRY; + } + + psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext); +#if defined(PDUMP) + PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]", + eDM, psFWCommonContextFWAddr.ui32Addr); + PDUMPCOMMENT("Wait for CCB to be empty before common ctx cleanup"); + + RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags); +#endif + + /* Setup our command data, the cleanup call will fill in the rest */ + sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, + psServerCommonContext->psServerMMUContext, + eDM, + &sRCCleanUpCmd, + RGXFWIF_CLEANUP_FWCOMMONCONTEXT, + ui32PDumpFlags); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + * RGXFWRequestHWRTDataCleanUp + */ + +PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + PRGXFWIF_HWRTDATA psHWRTData) +{ + RGXFWIF_KCCB_CMD sHWRTDataCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("HW RTData cleanup Request [HWRTData = 0x%08x]", psHWRTData.ui32Addr); + + sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData; + + eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, + NULL, + RGXFWIF_DM_GP, + &sHWRTDataCleanUpCmd, + RGXFWIF_CLEANUP_HWRTDATA, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a HWRTData cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + RGXFWRequestFreeListCleanUp +*/ +PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FREELIST psFWFreeList) +{ + RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr); + + /* Setup our command data, the cleanup call will fill in the rest */ + sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sFLCleanUpCmd, + RGXFWIF_CLEANUP_FREELIST, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + RGXFWRequestZSBufferCleanUp +*/ +PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_ZSBUFFER psFWZSBuffer) +{ + RGXFWIF_KCCB_CMD sZSBufferCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr); + + /* Setup our command data, the cleanup call will fill in the rest */ + sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDevInfo, + NULL, + RGXFWIF_DM_3D, + &sZSBufferCleanUpCmd, + RGXFWIF_CLEANUP_ZSBUFFER, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HCSDeadlineMs) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sSetHCSDeadline; + + sSetHCSDeadline.eCmdType = RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE; + sSetHCSDeadline.uCmdData.sHCSCtrl.ui32HCSDeadlineMS = ui32HCSDeadlineMs; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sSetHCSDeadline, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; + + return RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sCmpKCCBCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); +} + +PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32IsolationPriorityThreshold) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sOSidIsoConfCmd = { 0 }; + + sOSidIsoConfCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE; + sOSidIsoConfCmd.uCmdData.sCmdOSidIsolationData.ui32IsolationPriorityThreshold = ui32IsolationPriorityThreshold; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sOSidIsoConfCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + RGXFWIF_OS_STATE_CHANGE eOSOnlineState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState; + + if (eOSOnlineState == RGXFWIF_OS_ONLINE) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sOSOnlineStateCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) break; + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + else if (psFwSysData) + { + IMG_UINT32 ui32kCCBCommandSlot; + volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags; + + psFwRunFlags = (volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + /* Attempt several times until the FW manages to offload the OS */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + /* Send request */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sOSOnlineStateCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue; + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", return_); + + /* Wait for FW to process the cmd */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); + + /* read the OS state */ + OSMemoryBarrier(); + /* check if FW finished offloading the OSID and is stopped */ + if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE) + { + eError = PVRSRV_OK; + break; + } + else + { + eError = PVRSRV_ERROR_TIMEOUT; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + else + { + eError = PVRSRV_ERROR_NOT_INITIALISED; + } + +return_ : + return eError; +} + +PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 }; + + sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE; + sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32OSidNum = ui32OSid; + sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32Priority = ui32Priority; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sOSidPriorityCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Priority, + RGXFWIF_DM eDM) +{ + IMG_UINT32 ui32CmdSize; + IMG_UINT8 *pui8CmdPtr; + RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; + RGXFWIF_CCB_CMD_HEADER *psCmdHeader; + RGXFWIF_CMD_PRIORITY *psCmd; + PVRSRV_ERROR eError; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); + + /* + Get space for command + */ + ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); + + eError = RGXAcquireCCB(psClientCCB, + ui32CmdSize, + (void **) &pui8CmdPtr, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); + } + goto fail_ccbacquire; + } + + /* + Write the command header and command + */ + psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr; + psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; + psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); + pui8CmdPtr += sizeof(*psCmdHeader); + + psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr; + psCmd->ui32Priority = ui32Priority; + pui8CmdPtr += sizeof(*psCmd); + + /* + We should reserved space in the kernel CCB here and fill in the command + directly. + This is so if there isn't space in the kernel CCB we can return with + retry back to services client before we take any operations + */ + + /* + Submit the command + */ + RGXReleaseCCB(psClientCCB, + ui32CmdSize, + PDUMP_FLAGS_CONTINUOUS); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); + return eError; + } + + /* Construct the priority command. */ + sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + psContext->psServerMMUContext, + eDM, + &sPriorityCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to submit set priority command with error (%u)", + __func__, + eError)); + } + + return PVRSRV_OK; + +fail_ccbacquire: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PHRMode) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 }; + + sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG; + sCfgPHRCmd.uCmdData.sPeriodicHwResetCfg.ui32PHRMode = ui32PHRMode; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_GP, + &sCfgPHRCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +/* + RGXReadMETAAddr +*/ +PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) +{ + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + IMG_UINT32 ui32PollValue; + IMG_UINT32 ui32PollMask; + IMG_UINT32 ui32PollRegOffset; + IMG_UINT32 ui32ReadOffset; + IMG_UINT32 ui32WriteOffset; + IMG_UINT32 ui32WriteValue; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN; + CHECK_HWBRN_68777(ui32WriteValue); + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES; + } + else + { + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN; + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX; + } + + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), + ui32PollValue, + ui32PollMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Read */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset, ui32WriteValue); + (void)OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset); + + /* Wait for Slave Port to be Ready: read complete */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), + ui32PollValue, + ui32PollMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Read the value */ + *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32ReadOffset); + + return PVRSRV_OK; +} + +/* + RGXWriteMETAAddr +*/ +PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value) +{ + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES), + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + CHECK_HWBRN_68777(ui32METAAddr); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32METAAddr); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32Value); + } + else + { + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); + (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value); + (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ + } + + return PVRSRV_OK; +} + +void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious) +{ + /* Attempt to detect and deal with any stalled client contexts. + * bIgnorePrevious may be set by the caller if they know a context to be + * stalled, as otherwise this function will only identify stalled + * contexts which have not been previously reported. + */ + + IMG_UINT32 ui32StalledClientMask = 0; + + if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock))) + { + PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning...")); + return; + } + + ui32StalledClientMask |= CheckForStalledClientTDMTransferCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo); + + /* If at least one DM stalled bit is different than before */ + if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask)) + { + if (ui32StalledClientMask > 0) + { + static __maybe_unused const char *pszStalledAction = +#if defined(PVRSRV_STALLED_CCB_ACTION) + "force"; +#else + "warn"; +#endif + /* Print all the stalled DMs */ + PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s", + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GEOM), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D))); + + PVR_LOG(("Trying to identify stalled context...(%s) [%d]", + pszStalledAction, bIgnorePrevious)); + + DumpStalledContextInfo(psDevInfo); + } + else + { + if (psDevInfo->ui32StalledClientMask> 0) + { + /* Indicate there are no stalled DMs */ + PVR_LOG(("No further stalled client contexts exist")); + } + } + psDevInfo->ui32StalledClientMask = ui32StalledClientMask; + psDevInfo->pvEarliestStalledClientCCB = NULL; + } + OSLockRelease(psDevInfo->hCCBStallCheckLock); +} + +/* + RGXUpdateHealthStatus +*/ +PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, + IMG_BOOL bCheckAfterTimePassed) +{ + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_HEALTH_STATUS eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; + PVRSRV_DEVICE_HEALTH_REASON eNewReason = PVRSRV_DEVICE_HEALTH_REASON_NONE; + PVRSRV_RGXDEV_INFO* psDevInfo; + RGXFWIF_TRACEBUF* psRGXFWIfTraceBufCtl; + RGXFWIF_SYSDATA* psFwSysData; + RGXFWIF_CCB_CTL *psKCCBCtl; + IMG_UINT32 ui32ThreadCount; + IMG_BOOL bKCCBCmdsWaiting; + + PVR_ASSERT(psDevNode != NULL); + psDevInfo = psDevNode->pvDevice; + + /* If the firmware is not yet initialised or has already deinitialised, stop here */ + if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || + psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT) + { + return PVRSRV_OK; + } + + psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + +#if defined(SUPPORT_AUTOVZ) + if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)) + { + /* read and write back the alive token value to confirm to the + * virtualisation watchdog that this connection is healthy */ + KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); + } +#endif + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* If this is a quick update, then include the last current value... */ + if (!bCheckAfterTimePassed) + { + eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus); + eNewReason = OSAtomicRead(&psDevNode->eHealthReason); + } + + /* Decrement the SLR holdoff counter (if non-zero) */ + if (psDevInfo->ui32SLRHoldoffCounter > 0) + { + psDevInfo->ui32SLRHoldoffCounter--; + } + + + /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs + (there is a race condition where PVRSRVIsDevicePowered returns TRUE when the GPU is actually powering down. + That's not a problem as this function does not touch the HW except for the RGXScheduleCommand function, + which is already powerlock safe. The worst thing that could happen is that Rogue might power back up + but the chances of that are very low */ + + if (OSLockIsLocked(psDevNode->hPowerLock) && PVRSRVIsDevicePowered(psDevNode)) + { + /* + Firmware thread checks... + */ + if (psRGXFWIfTraceBufCtl != NULL) + { + for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++) + { + IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; + + /* + Check if the FW has hit an assert... + */ + if (*pszTraceAssertInfo != '\0') + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)", + __func__, ui32ThreadCount, pszTraceAssertInfo, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED; + goto _RGXUpdateHealthStatus_Exit; + } + + /* + Check the threads to see if they are in the same poll locations as last time... + */ + if (bCheckAfterTimePassed) + { + if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && + psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", + __func__, ui32ThreadCount, + ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[ui32ThreadCount])); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; + goto _RGXUpdateHealthStatus_Exit; + } + psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; + } + } + + /* + Check if the FW has faulted... + */ + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Firmware has faulted and needs to restart", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; + } + else + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; + } + goto _RGXUpdateHealthStatus_Exit; + } + } + + /* + Event Object Timeouts check... + */ + if (!bCheckAfterTimePassed) + { + if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)", + __func__, + psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS; + } + psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts; + } + + /* + Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check + that some have executed since then. + */ + bKCCBCmdsWaiting = IMG_FALSE; + psKCCBCtl = psDevInfo->psKernelCCBCtl; + + if (psKCCBCtl != NULL) + { + if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask || + psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask) + { + PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)", + __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; + } + + if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset) + { + bKCCBCmdsWaiting = IMG_TRUE; + } + } + + if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfFwOsData != NULL) + { + IMG_UINT32 ui32KCCBCmdsExecuted = psDevInfo->psRGXFWIfFwOsData->ui32KCCBCmdsExecuted; + + if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted) + { + /* + If something was waiting last time then the Firmware has stopped processing commands. + */ + if (psDevInfo->bKCCBCmdsWaitingLastTime) + { + PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED; + } + + /* + If no commands are currently pending and nothing happened since the last poll, then + schedule a dummy command to ping the firmware so we know it is alive and processing. + */ + if (!bKCCBCmdsWaiting) + { + PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)", + __func__, eError)); + } + else + { + bKCCBCmdsWaiting = IMG_TRUE; + } + } + } + + psDevInfo->bKCCBCmdsWaitingLastTime = bKCCBCmdsWaiting; + psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted; + } + } + + /* + Interrupt counts check... + */ + if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfFwSysData != NULL) + { + IMG_UINT32 ui32LISRCount = 0; + IMG_UINT32 ui32FWCount = 0; + IMG_UINT32 ui32MissingInts = 0; + IMG_UINT32 ui32Index; + + /* Add up the total number of interrupts issued, sampled/received and missed... */ + for (ui32Index = 0; ui32Index < RGXFW_THREAD_NUM; ui32Index++) + { + ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index]; + ui32FWCount += psDevInfo->psRGXFWIfFwSysData->aui32InterruptCount[ui32Index]; + } + + if (ui32LISRCount < ui32FWCount) + { + ui32MissingInts = (ui32FWCount-ui32LISRCount); + } + + if (ui32LISRCount == psDevInfo->ui32InterruptCountLastTime && + ui32MissingInts >= psDevInfo->ui32MissingInterruptsLastTime && + psDevInfo->ui32MissingInterruptsLastTime > 1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: LISR has not received the last %d interrupts", + __func__, ui32MissingInts)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; + + /* Schedule the MISRs to help mitigate the problems of missing interrupts. */ + OSScheduleMISR(psDevInfo->pvMISRData); + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + psDevInfo->ui32InterruptCountLastTime = ui32LISRCount; + psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts; + } + + /* + Stalled CCB check... + */ + if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus)) + { + RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE); + } + + /* + Finished, save the new status... + */ +_RGXUpdateHealthStatus_Exit: + OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus); + OSAtomicWrite(&psDevNode->eHealthReason, eNewReason); + RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason); + + /* + * Attempt to service the HWPerf buffer to regularly transport idle/periodic + * packets to host buffer. + */ + if (psDevNode->pfnServiceHWPerf != NULL) + { + PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: " + "Error occurred when servicing HWPerf buffer (%d)", + __func__, eError)); + } + } + + /* Attempt to refresh timer correlation data */ + RGXTimeCorrRestartPeriodic(psDevNode); + + return PVRSRV_OK; +} /* RGXUpdateHealthStatus */ + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return PVRSRV_OK; + } + + return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, + psCurrentServerCommonContext->psClientCCB, + eKickTypeDM); +} + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return; + } + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + /* If high verbosity requested, dump whole CCB */ + DumpCCB(psCurrentServerCommonContext->psDevInfo, + psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } + else + { + /* Otherwise, only dump first stalled command in the CCB */ + DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } +} + +PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) +{ + PVRSRV_ERROR eError; + PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; + + PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); + PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); + + if (bKick) + { + if (psKMHWRTDataSet) + { + PRGXFWIF_CLEANUP_CTL psCleanupCtl; + + eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc, + offsetof(RGXFWIF_HWRTDATA, sCleanupState), + RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); + + *(psCleanupCtlWrite++) = psCleanupCtl; + } + + if (eDM == RGXFWIF_DM_3D) + { + RGXFWIF_PRBUFFER_TYPE eBufferType; + RGX_ZSBUFFER_DATA *psBuffer = NULL; + + for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++) + { + switch (eBufferType) + { + case RGXFWIF_PRBUFFER_ZSBUFFER: + psBuffer = psZSBuffer; + break; + case RGXFWIF_PRBUFFER_MSAABUFFER: + psBuffer = psMSAAScratchBuffer; + break; + case RGXFWIF_PRBUFFER_MAXSUPPORTED: + psBuffer = NULL; + break; + } + if (psBuffer) + { + (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + + offsetof(RGXFWIF_PRBUFFER, sCleanupState); + psBuffer = NULL; + } + } + } + } + + *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl; + PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_HWRINFOBUF *psHWRInfoBuf; + IMG_UINT32 i; + + if (psDevNode->pvDevice == NULL) + { + return PVRSRV_ERROR_INVALID_DEVINFO; + } + psDevInfo = psDevNode->pvDevice; + + psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; + + for (i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++) + { + /* Reset the HWR numbers */ + psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0; + psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0; + psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0; + psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0; + } + + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) + { + psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0; + } + + psHWRInfoBuf->ui32WriteIndex = 0; + psHWRInfoBuf->ui32DDReqCount = 0; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid) +{ + + PVRSRV_ERROR eError; + + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; + } + + eError = PMR_DevPhysAddr(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + ui32LogicalOffset, + psPhyAddr, + bValid); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR_DevPhysAddr failed (%u)", + __func__, + eError)); + return eError; + } + + + eError = PMRUnlockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRUnLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; + } + + return eError; +} + +#if defined(PDUMP) +PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) +{ + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (psDevInfo->bDumpedKCCBCtlAlready) + { + /* exiting capture range or pdump block */ + psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; + + /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, + "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", + psKCCBCtl, + ui32WriteOffset, + ui32WriteOffset); + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), + ui32WriteOffset, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError)); + } + } + + return eError; + +} +#endif + +/*! +******************************************************************************* + + @Function RGXClientConnectCompatCheck_ClientAgainstFW + + @Description + + Check compatibility of client and firmware (build options) + at the connection time. + + @Input psDeviceNode - device node + @Input ui32ClientBuildOptions - build options for the client + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) +{ +#if !defined(NO_HARDWARE) || defined(PDUMP) +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptionsMismatch; + IMG_UINT32 ui32BuildOptionsFW; +#endif + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#endif + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(NO_HARDWARE) + if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.", + __func__)); + return PVRSRV_ERROR_NOT_INITIALISED; + } + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); +#endif + +#if defined(PDUMP) + { + PVRSRV_ERROR eError; + + PDUMPCOMMENT("Compatibility check: client and FW build options"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions), + ui32ClientBuildOptions, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", + __func__, + eError)); + return eError; + } + } +#endif + +#if !defined(NO_HARDWARE) + ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions; + ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; + + if (ui32BuildOptionsMismatch != 0) + { + if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in client: (0x%x). Please check rgx_options.h", + ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + } + + if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFW & ui32BuildOptionsMismatch )); + } + + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXFwRawHeapAllocMap + + @Description Register firmware heap for the specified guest OSID + + @Input psDeviceNode - device node + @Input ui32OSID - Guest OSID + @Input sDevPAddr - Heap address + @Input ui64DevPSize - Heap size + + @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful. + +******************************************************************************/ +PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT64 ui64DevPSize) +{ + PVRSRV_ERROR eError; + IMG_CHAR szRegionRAName[PVRSRV_MAX_RA_NAME_LENGTH]; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32RawFwHeapAllocFlags = (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_FW_ALLOC_RAW | + PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(ui32OSID)); + + PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK); + + OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + + if (!ui64DevPSize || + !sDevPAddr.uiAddr || + ui32OSID >= RGX_NUM_OS_SUPPORTED || + ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = PVRSRVCreateRegionRA(psDeviceNode->psDevConfig, + &psDeviceNode->psKernelFwRawMemArena[ui32OSID], + psDeviceNode->szKernelFwRawRAName[ui32OSID], + 0, + sDevPAddr.uiAddr, + RGX_FIRMWARE_RAW_HEAP_SIZE, + 0, + szRegionRAName); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVCreateRegionRA"); + + PDUMPCOMMENT("Allocate and map raw firmware heap for OSID: [%d]", ui32OSID); + +#if (RGX_NUM_OS_SUPPORTED > 1) + /* don't clear the heap of other guests on allocation */ + ui32RawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0); +#endif + + /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ + if (psDeviceNode->bAutoVzFwIsUp) + { + ui32RawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + } + + eError = DevmemFwAllocate(psDevInfo, + RGX_FIRMWARE_RAW_HEAP_SIZE, + ui32RawFwHeapAllocFlags, + psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName, + &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); + + /* Mark this devmem heap as premapped so allocations will not require device mapping. */ + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + + if (ui32OSID == RGXFW_HOST_OS) + { + /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly + * No memory allocated from these sub-heaps will be individually mapped into the device's + * address space so they can remain marked permanently as premapped. */ + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); + } + + return eError; +} + +/*! +******************************************************************************* + + @Function RGXFwRawHeapUnmapFree + + @Description Unregister firmware heap for the specified guest OSID + + @Input psDeviceNode - device node + @Input ui32OSID - Guest OSID + +******************************************************************************/ +void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* remove the premap status, so the heap can be unmapped and freed */ + if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID]) + { + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE); + } + + if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); + psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL; + } + + if (psDeviceNode->psKernelFwRawMemArena[ui32OSID]) + { + RA_Delete(psDeviceNode->psKernelFwRawMemArena[ui32OSID]); + } + + psDeviceNode->psKernelFwRawMemArena[ui32OSID] = NULL; +} + +/****************************************************************************** + End of file (rgxfwutils.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.h new file mode 100644 index 000000000000..f33dc9f13ede --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxfwutils.h @@ -0,0 +1,1280 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWUTILS_H +#define RGXFWUTILS_H + +#include "rgxdevice.h" +#include "rgxccb.h" +#include "devicemem.h" +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "connection_server.h" +#include "rgxta3d.h" +#include "devicemem_utils.h" +#include "rgxmem.h" + +#if defined(SUPPORT_TRUSTED_DEVICE) +#include "physmem_tdfwmem.h" +#endif + +#if defined(SUPPORT_DEDICATED_FW_MEMORY) +#include "physmem_fwdedicatedmem.h" +#endif + +static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVMEM_FLAGS_T *puiFlags, + DEVMEM_HEAP **ppsFwHeap) +{ + switch (PVRSRV_FW_ALLOC_TYPE(*puiFlags)) + { + case FW_ALLOC_MAIN: + { + *ppsFwHeap = psDevInfo->psFirmwareMainHeap; + break; + } + case FW_ALLOC_CONFIG: + { + *ppsFwHeap = psDevInfo->psFirmwareConfigHeap; + break; + } + case FW_ALLOC_RAW: + { + IMG_UINT32 ui32OSID = PVRSRV_FW_RAW_ALLOC_OSID(*puiFlags); + + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID"); + *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID]; + break; + } + default: + { + /* Firmware local allocations are by default from the fw main heap */ + *puiFlags |= PVRSRV_MEMALLOCFLAG_FW_ALLOC_MAIN; + *ppsFwHeap = psDevInfo->psFirmwareMainHeap; + break; + } + } + + /* Imported from AppHint , flag to poison allocations when freed */ + *puiFlags |= psDevInfo->ui32FWPoisonOnFreeFlag; + + return PVRSRV_OK; +} + +/* + * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size. + * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems + * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't + * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation. + */ +static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEVMEM_SIZE_T uiSize, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateAndMap(psFwHeap, + uiSize, + GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)), + uiFlags, + pszText, + ppsMemDescPtr, + &sTmpDevVAddr); + + PVR_DPF_RETURN_RC(eError); +} + +static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && + (pszText[0] == 'F') && (pszText[1] == 'w') && + (pszText[2] == 'E') && (pszText[3] == 'x')); + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateExportable(psDeviceNode, + uiSize, + uiAlign, + DevmemGetHeapLog2PageSize(psFwHeap), + uiFlags, + pszText, + ppsMemDescPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError)); + PVR_DPF_RETURN_RC(eError); + } + + /* + We need to map it so the heap for this allocation + is set + */ + eError = DevmemMapToDevice(*ppsMemDescPtr, + psDevInfo->psFirmwareMainHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsMemDescPtr); + PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError)); + } + + PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr); +} + +static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + IMG_UINT32 ui32Align; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); + ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateSparse(psDevInfo->psDeviceNode, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + ui32Align, + DevmemGetHeapLog2PageSize(psFwHeap), + uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING, + pszText, + ppsMemDescPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + /* + We need to map it so the heap for this allocation + is set + */ + eError = DevmemMapToDevice(*ppsMemDescPtr, + psFwHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsMemDescPtr); + PVR_DPF_RETURN_RC(eError); + } + + PVR_DPF_RETURN_RC(eError); +} + + +static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVMEM_MEMDESC *psMemDesc) +{ + PVR_DPF_ENTERED1(psMemDesc); + + DevmemReleaseDevVirtAddr(psMemDesc); + DevmemFree(psMemDesc); + + PVR_DPF_RETURN; +} + +#if defined(SUPPORT_TRUSTED_DEVICE) +static INLINE +PVRSRV_ERROR DevmemImportTDFWMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + IMG_UINT32 uiMemAllocFlags, + PVRSRV_TD_FW_MEM_REGION eRegion, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + PMR *psTDFWMemPMR; + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_DEVMEM_SIZE_T uiMemDescSize; + PVRSRV_ERROR eError; + + if (ppsMemDescPtr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: memdesc pointer is null", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_FW_ALLOC_MAIN; + + eError = PhysmemNewTDFWMemPMR(NULL, + psDeviceNode, + uiSize, + uiLog2Align, + uiMemAllocFlags, + eRegion, + &psTDFWMemPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDFWMemPMR failed (%u)", eError)); + goto PMRCreateError; + } + + /* NB: TDFWMemPMR refcount: 1 -> 2 */ + eError = DevmemLocalImport(psDeviceNode, + psTDFWMemPMR, + uiMemAllocFlags, + ppsMemDescPtr, + &uiMemDescSize, + "TDFWMem"); + if (eError != PVRSRV_OK) + { + goto ImportError; + } + + eError = DevmemMapToDevice(*ppsMemDescPtr, + psDevInfo->psFirmwareMainHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to map TD META code PMR (%u)", eError)); + goto MapError; + } + + /* NB: TDFWMemPMR refcount: 2 -> 1 + * The PMR will be unreferenced again (and destroyed) when + * the memdesc tracking it is cleaned up + */ + PMRUnrefPMR(psTDFWMemPMR); + + return PVRSRV_OK; + +MapError: + DevmemFree(*ppsMemDescPtr); + *ppsMemDescPtr = NULL; +ImportError: + /* Unref and destroy the PMR */ + PMRUnrefPMR(psTDFWMemPMR); +PMRCreateError: + + return eError; +} +#endif + + +#if defined(SUPPORT_DEDICATED_FW_MEMORY) +static INLINE +PVRSRV_ERROR DevmemAllocateDedicatedFWMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + IMG_UINT32 uiMemAllocFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + PMR *psPMR; + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_DEVMEM_SIZE_T uiMemDescSize; + IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align; + PVRSRV_ERROR eError; + + PVR_ASSERT(ppsMemDescPtr); + + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiSize, + &uiAlign); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "DevmemExportalignAdjustSizeAndAlign failed (%u)", eError)); + goto PMRCreateError; + } + + eError = PhysmemNewFWDedicatedMemPMR(NULL, + psDeviceNode, + uiSize, + uiLog2Align, + uiMemAllocFlags, + &psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PhysmemNewFWDedicatedMemPMR failed (%u)", eError)); + goto PMRCreateError; + } + + /* NB: FWDedicatedMemPMR refcount: 1 -> 2 */ + eError = DevmemLocalImport(psDeviceNode, + psPMR, + uiMemAllocFlags, + ppsMemDescPtr, + &uiMemDescSize, + pszText); + if (eError != PVRSRV_OK) + { + goto ImportError; + } + + eError = DevmemMapToDevice(*ppsMemDescPtr, + psDevInfo->psFirmwareMainHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to map dedicated FW memory (%u)", eError)); + goto MapError; + } + + /* NB: FWDedicatedMemPMR refcount: 2 -> 1 + * The PMR will be unreferenced again (and destroyed) when + * the memdesc tracking it is cleaned up + */ + PMRUnrefPMR(psPMR); + + return PVRSRV_OK; + +MapError: + DevmemFree(*ppsMemDescPtr); + *ppsMemDescPtr = NULL; +ImportError: + /* Unref and destroy the PMR */ + PMRUnrefPMR(psPMR); +PMRCreateError: + + return eError; +} +#endif + + +/* + * This function returns the value of the hardware register RGX_CR_TIMER + * which is a timer counting in ticks. + */ + +static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); + + /* + * In order to avoid having to issue three 32-bit reads to detect the + * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated + * in the MSB of the high 32-bit word. If the wrap happens, we just read + * the register again (it will not wrap again so soon). + */ + if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK) + { + ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); + } + + return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT; +} + +/* + * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, + * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first kick) + */ +#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + +#define RGX_FWSHAREDMEM_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_UNCACHED | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + +/* + * This FW Init Data is initialised on the CPU and then passed to the FW. We need + * to make the CPU mapping write-combined to avoid CPU-specific alignment issues + * for device memory. + */ +#define RGX_FWINITDATA_WC_ALLOCFLAGS ((RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE) + +#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE)) +#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (RGX_FWSHAREDMEM_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)) + +/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */ +#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0U)) + +/****************************************************************************** + * RGXSetFirmwareAddress Flags + *****************************************************************************/ +#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */ +#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer, + otherwise RGXUnsetFirmwareAddress() must be call when finished. */ + +IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, DEVMEM_FLAGS_T uiAllocFlags); + +#if defined(SUPPORT_POWMON_COMPONENT) +IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +#if defined(SUPPORT_TBI_INTERFACE) +IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32KillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32AvailableSPUMask); + + + +void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*************************************************************************/ /*! +@Function RGXSetupFwAllocation + +@Description Sets a pointer in a firmware data structure. + +@Input psDevInfo Device Info struct +@Input uiAllocFlags Flags determining type of memory allocation +@Input ui32Size Size of memory allocation +@Input pszName Allocation label +@Input psFwPtr Address of the firmware pointer to set +@Input ppvCpuPtr Address of the cpu pointer to set +@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVMEM_FLAGS_T uiAllocFlags, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszName, + DEVMEM_MEMDESC **ppsMemDesc, + RGXFWIF_DEV_VIRTADDR *psFwPtr, + void **ppvCpuPtr, + IMG_UINT32 ui32DevVAFlags); + +/*************************************************************************/ /*! +@Function RGXSetFirmwareAddress + +@Description Sets a pointer in a firmware data structure. + +@Input ppDest Address of the pointer to set +@Input psSrc MemDesc describing the pointer +@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, + DEVMEM_MEMDESC *psSrc, + IMG_UINT32 uiOffset, + IMG_UINT32 ui32Flags); + + +/*************************************************************************/ /*! +@Function RGXSetMetaDMAAddress + +@Description Fills a Firmware structure used to setup the Meta DMA with two + pointers to the same data, one on 40 bit and one on 32 bit + (pointer in the FW memory space). + +@Input ppDest Address of the structure to set +@Input psSrcMemDesc MemDesc describing the pointer +@Input psSrcFWDevVAddr Firmware memory space pointer + +@Return void +*/ /**************************************************************************/ +void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, + DEVMEM_MEMDESC *psSrcMemDesc, + RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, + IMG_UINT32 uiOffset); + + +/*************************************************************************/ /*! +@Function RGXUnsetFirmwareAddress + +@Description Unsets a pointer in a firmware data structure + +@Input psSrc MemDesc describing the pointer + +@Return void +*/ /**************************************************************************/ +void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc); + +PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue); +PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue); + +/*************************************************************************/ /*! +@Function FWCommonContextAllocate + +@Description Allocate a FW common context. This allocates the HW memory + for the context, the CCB and wires it all together. + +@Input psConnection Connection this context is being created on +@Input psDeviceNode Device node to create the FW context on + (must be RGX device node) +@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which + which represents the requestor of this FWCC +@Input eDM Data Master type +@Input psServerMMUContext Server MMU memory context. +@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use + as the FW context or NULL if this function + should allocate it +@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use + as the FW context. If psAllocatedMemDesc + is NULL then this parameter is ignored +@Input psFWMemContextMemDesc MemDesc of the FW memory context this + common context resides on +@Input psContextStateMemDesc FW context state (context switch) MemDesc +@Input ui32CCBAllocSizeLog2 Size of the CCB for this context +@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context +@Input ui32ContextFlags Flags which specify properties of the context +@Input ui32Priority Priority of the context +@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run +@Input ui64RobustnessAddress Address for FW to signal a context reset +@Input psInfo Structure that contains extra info + required for the creation of the context + (elements might change from core to core) +@Return PVRSRV_OK if the context was successfully created +*/ /**************************************************************************/ +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef); + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr); + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags); + +/*! +******************************************************************************* +@Function RGXScheduleProcessQueuesKM + +@Description Software command complete handler + (sends uncounted kicks for all the DMs through the MISR) + +@Input hCmdCompHandle RGX device node + +@Return None +******************************************************************************/ +void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); + +/*! +******************************************************************************* + +@Function RGXInstallProcessQueuesMISR + +@Description Installs the MISR to handle Process Queues operations + +@Input phMISR Pointer to the MISR handler +@Input psDeviceNode RGX Device node + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll); + +/*************************************************************************/ /*! +@Function RGXSendCommandWithPowLockAndGetKCCBSlot + +@Description Sends a command to a particular DM without honouring + pending cache operations but taking the power lock. + +@Input psDevInfo Device Info +@Input psKCCBCmd The cmd to send. +@Input ui32PDumpFlags Pdump flags +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); + +#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ + RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXSendCommandAndGetKCCBSlot + +@Description Sends a command to a particular DM without honouring + pending cache operations or the power lock. + The function flushes any deferred KCCB commands first. + +@Input psDevInfo Device Info +@Input psKCCBCmd The cmd to send. +@Input uiPdumpFlags PDump flags. +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + PDUMP_FLAGS_T uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); + +#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ + RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXScheduleCommandAndGetKCCBSlot + +@Description Sends a command to a particular DM and kicks the firmware but + first schedules any commands which have to happen before + handle + +@Input psDevInfo Device Info +@Input psServerMMUContext Device server MMU context or NULL if current + app context does not require its MMU caches + to be invalidated (firmware context caches + will still be invalidated if required.) +@Input eDM To which DM the cmd is sent. +@Input psKCCBCmd The cmd to send. +@Input ui32CacheOpFence Pending cache op. fence value. +@Input ui32PDumpFlags PDump flags +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DM eKCCBType, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32CacheOpFence, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); +#define RGXScheduleCommand(psDevInfo, psServerMMUContext, eKCCBType, psKCCBCmd, ui32CacheOpFence, ui32PDumpFlags) \ + RGXScheduleCommandAndGetKCCBSlot(psDevInfo, psServerMMUContext, eKCCBType, psKCCBCmd, ui32CacheOpFence, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXWaitForKCCBSlotUpdate + +@Description Waits until the required kCCB slot value is updated by the FW + (signifies command completion). Additionally, dumps a relevant + PDump poll command. + +@Input psDevInfo Device Info +@Input ui32SlotNum The kCCB slot number to wait for an update on +@Input ui32PDumpFlags + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SlotNum, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*************************************************************************/ /*! +@Function RGXPollForGPCommandCompletion + +@Description Polls for completion of a submitted GP command. Poll is done + on a value matching a masked read from the address. + +@Input psDevNode Pointer to device node struct +@Input pui32LinMemAddr CPU linear address to poll +@Input ui32Value Required value +@Input ui32Mask Mask + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask); + +/*************************************************************************/ /*! +@Function RGXStateFlagCtrl + +@Description Set and return FW internal state flags. + +@Input psDevInfo Device Info +@Input ui32Config AppHint config flags +@Output pui32State Current AppHint state flag configuration +@Input bSetNotClear Set or clear the provided config flags + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_UINT32 *pui32State, + IMG_BOOL bSetNotClear); + +/*! +******************************************************************************* +@Function RGXFWRequestCommonContextCleanUp + +@Description Schedules a FW common context cleanup. The firmware doesn't + block waiting for the resource to become idle but rather + notifies the host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psServerCommonContext context to be cleaned up +@Input eDM Data master, to which the cleanup command should + be sent +@Input ui32PDumpFlags PDump continuous flag + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGXFWIF_DM eDM, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* +@Function RGXFWRequestHWRTDataCleanUp + +@Description Schedules a FW HWRTData memory cleanup. The firmware doesn't + block waiting for the resource to become idle but rather + notifies the host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psHWRTData firmware address of the HWRTData for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + PRGXFWIF_HWRTDATA psHWRTData); + +/*! +******************************************************************************* +@Function RGXFWRequestFreeListCleanUp + +@Description Schedules a FW FreeList cleanup. The firmware doesn't block + waiting for the resource to become idle but rather notifies the + host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psFWFreeList firmware address of the FreeList for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, + PRGXFWIF_FREELIST psFWFreeList); + +/*! +******************************************************************************* +@Function RGXFWRequestZSBufferCleanUp + +@Description Schedules a FW ZS Buffer cleanup. The firmware doesn't block + waiting for the resource to become idle but rather notifies the + host that the resources is busy. + +@Input psDevInfo pointer to device node +@Input psFWZSBuffer firmware address of the ZS Buffer for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_ZSBUFFER psFWZSBuffer); + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Priority, + RGXFWIF_DM eDM); + +/*! +******************************************************************************* +@Function RGXFWSetHCSDeadline + +@Description Requests the Firmware to set a new Hard Context Switch timeout + deadline. Context switches that surpass that deadline cause the + system to kill the currently running workloads. + +@Input psDeviceNode pointer to device node +@Input ui32HCSDeadlineMs The deadline in milliseconds. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HCSDeadlineMs); + +/*! +******************************************************************************* +@Function RGXFWChangeOSidPriority + +@Description Requests the Firmware to change the priority of an operating + system. Higher priority number equals higher priority on the + scheduling system. + +@Input psDevInfo pointer to device info +@Input ui32OSid The OSid whose priority is to be altered +@Input ui32Priority The new priority number for the specified OSid + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32Priority); + +/*! +******************************************************************************* +@Function RGXFWSetOSIsolationThreshold + +@Description Requests the Firmware to change the priority threshold of the + OS Isolation group. Any OS with a priority higher or equal than + the threshold is considered to be belonging to the isolation + group. + +@Input psDevInfo pointer to device info +@Input ui32IsolationPriorityThreshold The new priority threshold + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32IsolationPriorityThreshold); + +/*! +******************************************************************************* +@Function RGXFWHealthCheckCmd + +@Description Ping the firmware to check if it is responsive. + +@Input psDevInfo pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXFWSetFwOsState + +@Description Requests the Firmware to change the guest OS Online states. + This should be initiated by the VMM when a guest VM comes + online or goes offline. If offline, the FW offloads any current + resource from that OSID. The request is repeated until the FW + has had time to free all the resources or has waited for + workloads to finish. + +@Input psDevInfo pointer to device info +@Input ui32OSid The Guest OSid whose state is being altered +@Input eOSOnlineState The new state (Online or Offline) + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + RGXFWIF_OS_STATE_CHANGE eOSOnlineState); + +/*! +******************************************************************************* +@Function RGXFWConfigPHR + +@Description Configure the Periodic Hardware Reset functionality + +@Input psDevInfo pointer to device info +@Input ui32PHRMode desired PHR mode + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PHRMode); + +/*! +******************************************************************************* +@Function RGXReadMETAAddr + +@Description Reads a value at given address in META memory space + (it can be either a memory location or a META register) + +@Input psDevInfo pointer to device info +@Input ui32METAAddr address in META memory space + +@Output pui32Value value + +@Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32METAAddr, + IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* +@Function RGXWriteMETAAddr + +@Description Write a value to the given address in META memory space + (it can be either a memory location or a META register) + +@Input psDevInfo pointer to device info +@Input ui32METAAddr address in META memory space +@Input ui32Value Value to write to address in META memory space + +@Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32METAAddr, + IMG_UINT32 ui32Value); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/*! +******************************************************************************* +@Function RGXCheckCheckpointCCB + +@Description Processes all signalled checkpoints which are found in the + checkpoint CCB. + +@Input psDevInfo pointer to device node + +@Return None +******************************************************************************/ +void RGXCheckCheckpointCCB(PVRSRV_DEVICE_NODE *psDevInfo); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + +/*! +******************************************************************************* +@Function RGXCheckFirmwareCCB + +@Description Processes all commands that are found in the Firmware CCB. + +@Input psDevInfo pointer to device + +@Return None +******************************************************************************/ +void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXCheckForStalledClientContexts + +@Description Checks all client contexts, for the device with device info + provided, to see if any are waiting for a fence to signal and + optionally force signalling of the fence for the context which + has been waiting the longest. + This function is called by RGXUpdateHealthStatus() and also + may be invoked from other trigger points. + +@Input psDevInfo pointer to device info +@Input bIgnorePrevious If IMG_TRUE, any stalled contexts will be + indicated immediately, rather than only + checking against any previous stalled contexts + +@Return None +******************************************************************************/ +void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious); + +/*! +******************************************************************************* +@Function RGXUpdateHealthStatus + +@Description Tests a number of conditions which might indicate a fatal error + has occurred in the firmware. The result is stored in the + device node eHealthStatus. + +@Input psDevNode Pointer to device node structure. +@Input bCheckAfterTimePassed When TRUE, the function will also test + for firmware queues and polls not changing + since the previous test. + + Note: if not enough time has passed since the + last call, false positives may occur. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, + IMG_BOOL bCheckAfterTimePassed); + + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/*! +******************************************************************************* +@Function AttachKickResourcesCleanupCtls + +@Description Attaches the cleanup structures to a kick command so that + submission reference counting can be performed when the + firmware processes the command + +@Output apsCleanupCtl Array of CleanupCtl structure pointers to populate. +@Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out. +@Input eDM Which data master is the subject of the command. +@Input bKick TRUE if the client originally wanted to kick this DM. +@Input psRTDataCleanup Optional RTData cleanup associated with the command. +@Input psZBuffer Optional ZSBuffer associated with the command. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer); + +/*! +******************************************************************************* +@Function RGXResetHWRLogs + +@Description Resets the HWR Logs buffer + (the hardware recovery count is not reset) + +@Input psDevNode Pointer to the device + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode); + +/*! +******************************************************************************* +@Function RGXGetPhyAddr + +@Description Get the physical address of a PMR at an offset within it + +@Input psPMR PMR of the allocation +@Input ui32LogicalOffset Logical offset + +@Output psPhyAddr Physical address of the allocation + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid); + +#if defined(PDUMP) +/*! +******************************************************************************* +@Function RGXPdumpDrainKCCB + +@Description Wait for the firmware to execute all the commands in the kCCB + +@Input psDevInfo Pointer to the device +@Input ui32WriteOffset Woff we have to POL for the Roff to be equal to + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32WriteOffset); +#endif /* PDUMP */ + +/*! +******************************************************************************* +@Function RGXFwRawHeapAllocMap + +@Description Register and maps to device, a raw firmware physheap + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT64 ui64DevPSize); + +/*! +******************************************************************************* +@Function RGXFwRawHeapUnmapFree + +@Description Unregister and unmap from device, a raw firmware physheap + +******************************************************************************/ +void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID); + +#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ) +#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers." +#endif + +#if defined(SUPPORT_AUTOVZ_HW_REGS) +/* AutoVz with hw support */ +#define KM_GET_FW_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3) +#define KM_GET_OS_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2) +#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val) + +#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1) +#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0) +#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val) + +#else + +#if defined(SUPPORT_AUTOVZ) +#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken) +#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken) +#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken = val +#endif /* defined(SUPPORT_AUTOVZ) */ + +#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1))) +/* native, static-vz and AutoVz using shared memory */ +#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState) +#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState) +#define KM_SET_OS_CONNECTION(val, psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState = RGXFW_CONNECTION_OS_##val) +#else +/* dynamic-vz & nohw */ +#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE) +#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE) +#define KM_SET_OS_CONNECTION(val, psDevInfo) +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */ +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ + +#if defined(SUPPORT_AUTOVZ) +#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS +#else +#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START +#endif + +#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val) +#define KM_FW_CONNECTION_IS(val, psDevInfo) (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val) + +#endif /* RGXFWUTILS_H */ +/****************************************************************************** + End of file (rgxfwutils.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.c new file mode 100644 index 000000000000..74e9eb04cbb1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.c @@ -0,0 +1,3769 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX HW Performance implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON + +#include "img_defs.h" +#include "pvr_debug.h" +#include "rgxdevice.h" +#include "pvrsrv_error.h" +#include "pvr_notifier.h" +#include "osfunc.h" +#include "allocmem.h" + +#include "pvrsrv.h" +#include "pvrsrv_tlstreams.h" +#include "pvrsrv_tlcommon.h" +#include "tlclient.h" +#include "tlstream.h" + +#include "rgxhwperf.h" +#include "rgxapi_km.h" +#include "rgxfwutils.h" +#include "rgxtimecorr.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pdump_km.h" +#include "pvrsrv_apphint.h" +#include "process_stats.h" +#include "rgx_hwperf_table.h" +#include "rgxinit.h" + +#include "info_page_defs.h" + +/* This is defined by default to enable producer callbacks. + * Clients of the TL interface can disable the use of the callback + * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */ +#define SUPPORT_TL_PRODUCER_CALLBACK 1 + +/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ +#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) + +/* Defines size of buffers returned from acquire/release calls */ +#define FW_STREAM_BUFFER_SIZE (0x80000) +#define HOST_STREAM_BUFFER_SIZE (0x20000) + +/* Must be at least as large as two tl packets of maximum size */ +static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), + "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); +static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), + "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); + +IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); + +static inline IMG_UINT32 +RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, + IMG_UINT32 ui32AllowedSize, + RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) +{ + IMG_UINT32 sizeSum = 0; + + /* Traverse the array to find how many packets will fit in the available space. */ + while ( sizeSum < ui32BytesExp && + sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) + { + sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); + psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); + } + + return sizeSum; +} + +static inline void +RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, + IMG_BOOL bIsReaderConnected) +{ + if (!bIsReaderConnected) + { + PVR_DPF((PVR_DBG_ERROR, "%s : HWPerf FW events enabled but host buffer for FW events is full " + "and no reader is currently connected, suspending event collection. " + "Restart driver and connect a reader to avoid event loss.", __func__)); + psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE; + } +} + +/* + RGXHWPerfCopyDataL1toL2 + */ +static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo, + IMG_BYTE *pbFwBuffer, + IMG_UINT32 ui32BytesExp) +{ + IMG_HANDLE hHWPerfStream = psDeviceInfo->hHWPerfStream; + IMG_BYTE * pbL2Buffer; + IMG_UINT32 ui32L2BufFree; + IMG_UINT32 ui32BytesCopied = 0; + IMG_UINT32 ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer)); + PVRSRV_ERROR eError; + IMG_BOOL bIsReaderConnected; + + /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */ +#ifdef HWPERF_MISR_FUNC_DEBUG + static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX; +#endif + + PVR_DPF_ENTERED; + +#ifdef HWPERF_MISR_FUNC_DEBUG + PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d", + pbFwBuffer, ui32BytesExp)); +#endif + +#ifdef HWPERF_MISR_FUNC_DEBUG + { + /* Check the incoming buffer of data has not lost any packets */ + IMG_BYTE *pbFwBufferIter = pbFwBuffer; + IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp; + do + { + RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter); + IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal; + if (gui32Ordinal != IMG_UINT32_MAX) + { + if ((gui32Ordinal+1) != ui32CurOrdinal) + { + if (gui32Ordinal < ui32CurOrdinal) + { + PVR_DPF((PVR_DBG_WARNING, + "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u", + pbFwBufferIter, + ui32CurOrdinal - gui32Ordinal - 1, + gui32Ordinal, + ui32CurOrdinal)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u", + pbFwBufferIter, + gui32Ordinal, + ui32CurOrdinal)); + } + } + } + gui32Ordinal = asCurPos->ui32Ordinal; + pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos); + } while (pbFwBufferIter < pbFwBufferEnd); + } +#endif + + if (ui32BytesExp > psDeviceInfo->ui32L2BufMaxPacketSize) + { + IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, + psDeviceInfo->ui32L2BufMaxPacketSize, + RGX_HWPERF_GET_PACKET(pbFwBuffer)); + + if (0 != sizeSum) + { + ui32BytesExp = sizeSum; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as " + "packet is too big and hence it breaches TL " + "packet size limit (TLBufferSize / 2.5)")); + goto e0; + } + } + + /* Try submitting all data in one TL packet. */ + eError = TLStreamReserve2(hHWPerfStream, + &pbL2Buffer, + (size_t)ui32BytesExp, ui32BytesExpMin, + &ui32L2BufFree, &bIsReaderConnected); + if ( eError == PVRSRV_OK ) + { + OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp ); + eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp); + if ( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR, + "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", + eError, __func__)); + goto e0; + } + /* Data were successfully written */ + ui32BytesCopied = ui32BytesExp; + } + else if (eError == PVRSRV_ERROR_STREAM_FULL) + { + /* There was not enough space for all data, copy as much as possible */ + IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer)); + + PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree)); + + if ( 0 != sizeSum ) + { + eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum); + + if ( eError == PVRSRV_OK ) + { + OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum ); + eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum); + if ( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR, + "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", + eError, __func__)); + goto e0; + } + /* sizeSum bytes of hwperf packets have been successfully written */ + ui32BytesCopied = sizeSum; + } + else if ( PVRSRV_ERROR_STREAM_FULL == eError ) + { + PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); + } + } + if ( PVRSRV_OK != eError && /* Some other error occurred */ + PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller */ + { + PVR_DPF((PVR_DBG_ERROR, + "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.", + eError)); + } + +e0: + /* Return the remaining packets left to be transported. */ + PVR_DPF_RETURN_VAL(ui32BytesCopied); +} + + +static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx( + const IMG_UINT32 ui32BufSize, + const IMG_UINT32 ui32Pos, + const IMG_UINT32 ui32Size) +{ + return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 ); +} + + +/* + RGXHWPerfDataStore + */ +static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf; + IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount; + IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0; +#ifdef HWPERF_MISR_FUNC_DEBUG + IMG_UINT32 ui32BytesExpSum = 0; +#endif + + PVR_DPF_ENTERED; + + /* Caller should check this member is valid before calling */ + PVR_ASSERT(psDevInfo->hHWPerfStream); + + if (psDevInfo->bSuspendHWPerfL2DataCopy) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s : Copying data to host buffer for FW events is " + "suspended. Restart driver if HWPerf FW events are " + "needed", __func__)); + + PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); + } + + /* Get a copy of the current + * read (first packet to read) + * write (empty location for the next write to be inserted) + * WrapCount (size in bytes of the buffer at or past end) + * indexes of the FW buffer */ + ui32SrcRIdx = psFwSysData->ui32HWPerfRIdx; + ui32SrcWIdx = psFwSysData->ui32HWPerfWIdx; + OSMemoryBarrier(); + ui32SrcWrapCount = psFwSysData->ui32HWPerfWrapCount; + + /* Is there any data in the buffer not yet retrieved? */ + if ( ui32SrcRIdx != ui32SrcWIdx ) + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx)); + + /* Is the write position higher than the read position? */ + if ( ui32SrcWIdx > ui32SrcRIdx ) + { + /* Yes, buffer has not wrapped */ + ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx; +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo + ui32SrcRIdx, + ui32BytesExp); + + /* Advance the read index and the free bytes counter by the number + * of bytes transported. Items will be left in buffer if not all data + * could be transported. Exit to allow buffer to drain. */ + psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + ui32BytesCopiedSum += ui32BytesCopied; + } + /* No, buffer has wrapped and write position is behind read position */ + else + { + /* Byte count equal to + * number of bytes from read position to the end of the buffer, + * + data in the extra space in the end of the buffer. */ + ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx; + +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + /* Attempt to transfer the packets to the TL stream buffer */ + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo + ui32SrcRIdx, + ui32BytesExp); + + /* Advance read index as before and Update the local copy of the + * read index as it might be used in the last if branch*/ + ui32SrcRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + /* Update Wrap Count */ + if ( ui32SrcRIdx == 0) + { + psFwSysData->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize; + } + psFwSysData->ui32HWPerfRIdx = ui32SrcRIdx; + + ui32BytesCopiedSum += ui32BytesCopied; + + /* If all the data in the end of the array was copied, try copying + * wrapped data in the beginning of the array, assuming there is + * any and the RIdx was wrapped. */ + if ( (ui32BytesCopied == ui32BytesExp) + && (ui32SrcWIdx > 0) + && (ui32SrcRIdx == 0) ) + { + ui32BytesExp = ui32SrcWIdx; +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo, + ui32BytesExp); + /* Advance the FW buffer read position. */ + psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + ui32BytesCopiedSum += ui32BytesCopied; + } + } +#ifdef HWPERF_MISR_FUNC_DEBUG + if (ui32BytesCopiedSum != ui32BytesExpSum) + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum)); + } +#endif + + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport")); + } + + PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); +} + + +PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + IMG_UINT32 ui32BytesCopied; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDevInfo); + psRgxDevInfo = psDevInfo->pvDevice; + + /* Store FW event data if the destination buffer exists.*/ + if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + { + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo); + if ( ui32BytesCopied ) + { /* Signal consumers that packets may be available to read when + * running from a HW kick, not when called by client APP thread + * via the transport layer CB as this can lead to stream + * corruption.*/ + eError = TLStreamSync(psRgxDevInfo->hHWPerfStream); + PVR_ASSERT(eError == PVRSRV_OK); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied")); + RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo); + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + } + + + PVR_DPF_RETURN_OK; +} + + +/* Currently supported by default */ +#if defined(SUPPORT_TL_PRODUCER_CALLBACK) +static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream, + IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser; + + PVR_UNREFERENCED_PARAMETER(hStream); + PVR_UNREFERENCED_PARAMETER(ui32Resp); + + PVR_ASSERT(psRgxDevInfo); + + switch (ui32ReqOp) + { + case TL_SOURCECB_OP_CLIENT_EOS: + /* Keep HWPerf resource init check and use of + * resources atomic, they may not be freed during use + */ + + /* This solution is for avoiding a deadlock situation where - + * in DoTLStreamReserve(), writer has acquired HWPerfLock and + * ReadLock and is waiting on ReadPending (which will be reset + * by reader), And + * the reader after setting ReadPending in TLStreamAcquireReadPos(), + * is waiting for HWPerfLock in RGXHWPerfTLCB(). + * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we + * will return to the reader without waiting to acquire HWPerfLock. + */ + if (!OSTryLockAcquire(psRgxDevInfo->hHWPerfLock)) + { + PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write " + "operation might already be in process")); + return PVRSRV_OK; + } + + if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + { + (void) RGXHWPerfDataStore(psRgxDevInfo); + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + break; + + default: + break; + } + + return eError; +} +#endif + + +static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc) + { + if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL) + { + DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL; + } + DevmemFwUnmapAndFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; + } +} + +/*************************************************************************/ /*! +@Function RGXHWPerfInit + +@Description Called during driver init for initialization of HWPerf module + in the Rogue device driver. This function keeps allocated + only the minimal necessary resources, which are required for + functioning of HWPerf server module. + +@Input psRgxDevInfo RGX Device Info + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_DPF_ENTERED; + + /* expecting a valid device info */ + PVR_ASSERT(psRgxDevInfo); + + /* Create a lock for HWPerf server module used for serializing, L1 to L2 + * copy calls (e.g. in case of TL producer callback) and L1, L2 resource + * allocation */ + eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + /* avoid uninitialised data */ + psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL; + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; + + PVR_DPF_RETURN_OK; +} + +/*************************************************************************/ /*! +@Function RGXHWPerfIsInitRequired + +@Description Returns true if the HWperf firmware buffer (L1 buffer) and host + driver TL buffer (L2 buffer) are not already allocated. Caller + must possess hHWPerfLock lock before calling this + function so the state tested is not inconsistent. + +@Input psRgxDevInfo RGX Device Info, on which init requirement is + checked. + +@Return IMG_BOOL Whether initialization (allocation) is required + */ /**************************************************************************/ +static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock)); + +#if !defined(NO_HARDWARE) + /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver + * built for actual hardware (TC, EMU, etc.) + */ + if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL) + { + /* The allocation API (RGXHWPerfInitOnDemandResources) allocates + * device memory for both L1 and L2 without any checks. Hence, + * either both should be allocated or both be NULL. + * + * In-case this changes in future (for e.g. a situation where one + * of the 2 buffers is already allocated and other is required), + * add required checks before allocation calls to avoid memory leaks. + */ + PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL); + return IMG_TRUE; + } + PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL); +#else + /* On a NO-HW driver L2 is not allocated. So, no point in checking its + * allocation */ + if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL) + { + return IMG_TRUE; + } +#endif + return IMG_FALSE; +} +#if !defined(NO_HARDWARE) +static void _HWPerfFWOnReaderOpenCB(void *pvArg) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; + PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; + sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV; + sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = 0; + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevNode->pvDevice, + NULL, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in " + "firmware (error = %d)", __func__, eError)); + return; + } + + eError = RGXWaitForKCCBSlotUpdate(psRgxDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); +} +#endif +/*************************************************************************/ /*! +@Function RGXHWPerfInitOnDemandResources + +@Description This function allocates the HWperf firmware buffer (L1 buffer) + and host driver TL buffer (L2 buffer) if HWPerf is enabled at + driver load time. Otherwise, these buffers are allocated + on-demand as and when required. Caller + must possess hHWPerfLock lock before calling this + function so the state tested is not inconsistent if called + outside of driver initialisation. + +@Input psRgxDevInfo RGX Device Info, on which init is done + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) +{ + IMG_HANDLE hStream = NULL; /* Init required for noHW */ + PVRSRV_ERROR eError; + IMG_UINT32 ui32L2BufferSize = 0; + DEVMEM_FLAGS_T uiMemAllocFlags; + IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold + names up to "hwperf_9999", which is enough */ + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + /* Create the L1 HWPerf buffer on demand */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) + | PVRSRV_MEMALLOCFLAG_GPU_READABLE + | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE + | PVRSRV_MEMALLOCFLAG_CPU_READABLE + | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE + | PVRSRV_MEMALLOCFLAG_UNCACHED +#if defined(PDUMP) /* Helps show where the packet data ends */ + | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC +#else /* Helps show corruption issues in driver-live */ + | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC +#endif + ; + + /* Allocate HWPerf FW L1 buffer */ + eError = DevmemFwAllocate(psRgxDevInfo, + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGXFW_HWPERF_L1_PADDING_DEFAULT, + uiMemAllocFlags, + "FwHWPerfBuffer", + &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate kernel fw hwperf buffer (%u)", + __func__, eError)); + goto e0; + } + + /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory. + * Also, make sure the FW address is not already set */ + PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0); + + /* Meta cached flag removed from this allocation as it was found + * FW performance was better without it. */ + eError = RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e0); + + RGXSetMetaDMAAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfDMABuf, + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + &psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, + 0); + + eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire kernel hwperf buffer (%u)", + __func__, eError)); + goto e0; + } + + /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence, + * L2 buffer is not allocated */ +#if !defined(NO_HARDWARE) + /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer + * accessed by the FW. The MISR may try to write one packet the size of the L1 + * buffer in some scenarios. When logging is enabled in the MISR, it can be seen + * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers + * are the more chance of this happening. + * Size chosen to allow MISR to write an L1 sized packet and for the client + * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. + */ + ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize + + (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1); + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = TLStreamCreate(&hStream, + psRgxDevInfo->psDeviceNode, + pszHWPerfStreamName, + ui32L2BufferSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, + _HWPerfFWOnReaderOpenCB, psRgxDevInfo, +#if !defined(SUPPORT_TL_PRODUCER_CALLBACK) + NULL, NULL +#else + /* Not enabled by default */ + RGXHWPerfTLCB, psRgxDevInfo +#endif + ); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1); + + eError = TLStreamSetNotifStream(hStream, + PVRSRVGetPVRSRVData()->hTLCtrlStream); + /* we can still discover host stream so leave it as is and just log error */ + PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); + + /* send the event here because host stream is implicitly opened for write + * in TLStreamCreate and TLStreamOpen is never called (so the event is + * never emitted) */ + TLStreamMarkStreamOpen(hStream); + + { + TL_STREAM_INFO sTLStreamInfo; + + TLStreamInfo(hStream, &sTLStreamInfo); + psRgxDevInfo->ui32L2BufMaxPacketSize = sTLStreamInfo.maxTLpacketSize; + + psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; + } + + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d", + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize)); + +#else /* defined(NO_HARDWARE) */ + PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize); + PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB); + PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName); + ui32L2BufferSize = 0; +#endif + + psRgxDevInfo->hHWPerfStream = hStream; + PVR_DPF_RETURN_OK; + +#if !defined(NO_HARDWARE) + e1: /* L2 buffer initialisation failures */ + psRgxDevInfo->hHWPerfStream = NULL; +#endif + e0: /* L1 buffer initialisation failures */ + RGXHWPerfL1BufferDeinit(psRgxDevInfo); + + PVR_DPF_RETURN_RC(eError); +} + + +void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_HANDLE hStream = psRgxDevInfo->hHWPerfStream; + + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + psRgxDevInfo->hHWPerfStream = NULL; + + /* Clean up the L2 buffer stream object if allocated */ + if (hStream) + { + /* send the event here because host stream is implicitly opened for + * write in TLStreamCreate and TLStreamClose is never called (so the + * event is never emitted) */ + TLStreamMarkStreamClose(hStream); + TLStreamClose(hStream); + } + + /* Cleanup L1 buffer resources */ + RGXHWPerfL1BufferDeinit(psRgxDevInfo); + + /* Cleanup the HWPerf server module lock resource */ + if (psRgxDevInfo->hHWPerfLock) + { + OSLockDestroy(psRgxDevInfo->hHWPerfLock); + psRgxDevInfo->hHWPerfLock = NULL; + } + + PVR_DPF_RETURN; +} + + +/****************************************************************************** + * RGX HW Performance Profiling Server API(s) + *****************************************************************************/ + +static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + /* If this method is being used whether to enable or disable + * then the hwperf buffers (host and FW) are likely to be needed + * eventually so create them, also helps unit testing. Buffers + * allocated on demand to reduce RAM foot print on systems not + * needing HWPerf resources. + * Obtain lock first, test and init if required. */ + OSLockAcquire(psDevice->hHWPerfLock); + + if (!psDevice->bFirmwareInitialised) + { + psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter + eError = PVRSRV_ERROR_NOT_INITIALISED; + + PVR_DPF((PVR_DBG_ERROR, + "HWPerf has NOT been initialised yet. Mask has been SET to " + "(%" IMG_UINT64_FMTSPECx ")", + ui64Mask)); + + goto unlock_and_return; + } + + if (RGXHWPerfIsInitRequired(psDevice)) + { + eError = RGXHWPerfInitOnDemandResources(psDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " + "resources failed", __func__)); + goto unlock_and_return; + } + } + +#if defined(SUPPORT_POWMON_COMPONENT) + if (RGXPowmonBufferIsInitRequired(psDeviceNode->pvDevice)) + { + /* Allocate power monitoring log buffer if enabled */ + eError = RGXPowmonBufferInitOnDemandResources(psDeviceNode->pvDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand power monitoring " + "resources failed", __func__)); + goto unlock_and_return; + } + } +#endif + + /* Unlock here as no further HWPerf resources are used below that would be + * affected if freed by another thread */ + OSLockRelease(psDevice->hHWPerfLock); + + /* Return if the filter is the same */ + if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask) + goto return_; + + /* Prepare command parameters ... */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; + sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET; + sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask; + + /* Ask the FW to carry out the HWPerf configuration command */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + NULL, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + IMG_TRUE, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in " + "firmware (error = %d)", __func__, eError)); + goto return_; + } + + psDevice->ui64HWPerfFilter = bToggle ? + psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask; + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED", + ui64Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", + ui64Mask)); + } +#endif + + return PVRSRV_OK; + +unlock_and_return: + OSLockRelease(psDevice->hHWPerfLock); + +return_: + return eError; +} + +#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800 + +static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bToggle, + IMG_UINT32 ui32Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter; +#endif + + OSLockAcquire(psDevice->hLockHWPerfHostStream); + if (psDevice->hHWPerfHostStream == NULL) + { + eError = RGXHWPerfHostInitOnDemandResources(psDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfHost resources failed", + __func__)); + OSLockRelease(psDevice->hLockHWPerfHostStream); + return eError; + } + } + + psDevice->ui32HWPerfHostFilter = bToggle ? + psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask; + + // Deferred creation of host periodic events thread + if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) + { + eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); + PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); + } + else if (!(psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))) + { + eError = PVRSRVDestroyHWPerfHostThread(); + PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread"); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + // Log deferred events stats if filter changed from non-zero to zero + if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0)) + { + PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)", + psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS)); + + PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) " + "WaitForRightOrdPktHighWatermark(%u)", + psDevice->ui32WaitForAtomicCtxPktHighWatermark, + psDevice->ui32WaitForRightOrdPktHighWatermark)); + } +#endif + + OSLockRelease(psDevice->hLockHWPerfHostStream); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED", + ui32Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)", + ui32Mask)); + } +#endif + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle, + IMG_UINT32 ui32InfoPageIdx, + IMG_UINT32 ui32Mask) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + + PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START && + ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info" + " page index", PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquire(psData->hInfoPageLock); + psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ? + psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask; + OSLockRelease(psData->hInfoPageLock); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED", + ui32InfoPageIdx, ui32Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)", + ui32InfoPageIdx, ui32Mask)); + } +#endif + + return PVRSRV_OK; +} + +static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock( + RGX_HWPERF_BVNC_BLOCK * const psBlocks, + IMG_UINT16 * const pui16Count, + const IMG_UINT16 ui16BlockID, /* see RGX_HWPERF_CNTBLK_ID */ + const IMG_UINT16 ui16NumCounters, + const IMG_UINT16 ui16NumBlocks) +{ + const IMG_UINT16 ui16Count = *pui16Count; + + if (ui16Count < RGX_HWPERF_MAX_BVNC_BLOCK_LEN) + { + RGX_HWPERF_BVNC_BLOCK * const psBlock = &psBlocks[ui16Count]; + + /* If the GROUP is non-zero, convert from e.g. RGX_CNTBLK_ID_USC0 to RGX_CNTBLK_ID_USC_ALL. The table stores the former (plus the + number of blocks and counters) but PVRScopeServices expects the latter (plus the number of blocks and counters). The conversion + could always be moved to PVRScopeServices, but it's less code this way. */ + psBlock->ui16BlockID = (ui16BlockID & RGX_CNTBLK_ID_GROUP_MASK) ? (ui16BlockID | RGX_CNTBLK_ID_UNIT_ALL_MASK) : ui16BlockID; + psBlock->ui16NumCounters = ui16NumCounters; + psBlock->ui16NumBlocks = ui16NumBlocks; + + *pui16Count = ui16Count + 1; + return IMG_TRUE; + } + return IMG_FALSE; +} + +PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC) +{ + IMG_PCHAR pszBVNC; + PVR_LOG_RETURN_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS); + + if ((pszBVNC = RGXDevBVNCString(psDevInfo))) + { + size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1); + OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); + memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength); + } + else + { + *psBVNC->aszBvncString = 0; + } + + psBVNC->ui32BvncKmFeatureFlags = 0x0; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG; + } + +#ifdef SUPPORT_WORKLOAD_ESTIMATION + /* Not a part of BVNC feature line and so doesn't need the feature supported check */ + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; +#endif + + /* Define the HW counter block counts. */ + { + RGX_HWPERF_BVNC_BLOCK * const psBlocks = psBVNC->aBvncBlocks; + IMG_UINT16 * const pui16Count = &psBVNC->ui16BvncBlocks; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; + const IMG_UINT32 ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); + IMG_UINT32 ui32BlkCfgIdx; + size_t uiCount; + IMG_BOOL bOk = IMG_TRUE; + + // Initialise to zero blocks + *pui16Count = 0; + + // Add all the blocks + for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) + { + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL * const psCntBlkInfo = &asCntBlkTypeModel[ui32BlkCfgIdx]; + RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; + /* psCntBlkInfo->uiNumUnits gives compile-time info. For BVNC agnosticism, we use this: */ + if (psCntBlkInfo->pfnIsBlkPresent(psCntBlkInfo, psDevInfo, &sCntBlkRtInfo)) + { + bOk &= RGXServerFeatureFlagsToHWPerfFlagsAddBlock(psBlocks, pui16Count, psCntBlkInfo->uiCntBlkIdBase, RGX_CNTBLK_COUNTERS_MAX, sCntBlkRtInfo.uiNumUnits); + } + } + + /* If this fails, consider why the static_assert didn't fail, and consider increasing RGX_HWPERF_MAX_BVNC_BLOCK_LEN */ + PVR_ASSERT(bOk); + + // Zero the remaining entries + uiCount = *pui16Count; + OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks)); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_BVNC *psBVNC) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_FALSE((NULL != psDeviceNode), "psConnection invalid", PVRSRV_ERROR_INVALID_PARAMS); + + psDevInfo = psDeviceNode->pvDevice; + eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, psBVNC); + + return eError; +} + +/* + PVRSRVRGXCtrlHWPerfKM + */ +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + PVR_ASSERT(psDeviceNode); + + if (eStreamId == RGX_HWPERF_STREAM_ID0_FW) + { + return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask); + } + else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST) + { + return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask); + } + else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT) + { + IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32); + IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask; + + return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id.")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_DPF_RETURN_OK; +} + +/* + AppHint interfaces + */ +static +PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT64 ui64Value) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + psDevNode = psPVRSRVData->psDeviceNodeList; + /* Control HWPerf on all the devices */ + while (psDevNode) + { + eError = RGXHWPerfCtrlFwBuffer(psDevNode, IMG_FALSE, ui64Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier)); + return eError; + } + psDevNode = psDevNode->psNext; + } + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT64 *pui64Value) +{ + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Configuration command is applied for all devices, so filter value should + * be same for all */ + psDevice = psDeviceNode->pvDevice; + *pui64Value = psDevice->ui64HWPerfFilter; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + psDevNode = psPVRSRVData->psDeviceNodeList; + /* Control HWPerf on all the devices */ + while (psDevNode) + { + eError = RGXHWPerfCtrlHostBuffer(psDevNode, IMG_FALSE, ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier)); + return eError; + } + psDevNode = psDevNode->psNext; + } + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevice = psDeviceNode->pvDevice; + *pui32Value = psDevice->ui32HWPerfHostFilter; + return PVRSRV_OK; +} + +static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivData, + IMG_UINT32 *pui32Value) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; + PVR_UNREFERENCED_PARAMETER(psDevice); + + OSLockAcquire(psData->hInfoPageLock); + *pui32Value = psData->pui32InfoPage[ui32Idx]; + OSLockRelease(psData->hInfoPageLock); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivData, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; + PVR_UNREFERENCED_PARAMETER(psDevice); + + return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value); +} + +void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter, + RGXHWPerfReadFwFilter, + RGXHWPerfSetFwFilter, + psDeviceNode, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter, + RGXHWPerfReadHostFilter, + RGXHWPerfSetHostFilter, + psDeviceNode, + NULL); +} + +void RGXHWPerfClientInitAppHintCallbacks(void) +{ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_SERVICES_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_EGL_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_OPENGLES_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_OPENCL_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_VULKAN_IDX); +} + +/* + PVRSRVRGXConfigureHWPerfBlocksKM + */ +PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32ArrayLen, + RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + DEVMEM_MEMDESC* psFwBlkConfigsMemDesc; + RGX_HWPERF_CONFIG_CNTBLK* psFwArray; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDeviceNode); + PVR_ASSERT(ui32ArrayLen>0); + PVR_ASSERT(psBlockConfigs); + + /* Fill in the command structure with the parameters needed + */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS; + sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32CtrlWord = ui32CtrlWord; + sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen; + + eError = DevmemFwAllocate(psDevice, + sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED, + "FwHWPerfCountersConfigBlock", + &psFwBlkConfigsMemDesc); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); + + eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs, + psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); + + eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); + + OSDeviceMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen); + DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, + 0, + sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, + PDUMP_FLAGS_CONTINUOUS); + + /* Ask the FW to carry out the HWPerf configuration command + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + NULL, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); + + /* Release temporary memory used for block configuration + */ + RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); + DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); + DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); + + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); + + PVR_DPF_RETURN_OK; + +fail3: + DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); +fail2: + RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); +fail1: + DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); + + PVR_DPF_RETURN_RC(eError); +} + + +/* + PVRSRVRGXControlHWPerfBlocksKM + */ +PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_BOOL bEnable, + IMG_UINT32 ui32ArrayLen, + IMG_UINT16 * psBlockIDs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBlockIDs != NULL, "psBlockIDs"); + PVR_LOG_RETURN_IF_INVALID_PARAM((ui32ArrayLen>0) && (ui32ArrayLen <= RGXFWIF_HWPERF_CTRL_BLKS_MAX), "ui32ArrayLen"); + + PVR_ASSERT(psDeviceNode); + psDevice = psDeviceNode->pvDevice; + + /* Fill in the command structure with the parameters needed + */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS; + sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable; + sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen; + + OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16) * ui32ArrayLen); + + + /* Ask the FW to carry out the HWPerf configuration command + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + NULL, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + + +#if defined(DEBUG) + if (bEnable) + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen)); + else + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen)); +#endif + + PVR_DPF_RETURN_OK; +} + +static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB) +{ + if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX) + { + /* Size specified as a AppHint but it is too big */ + PVR_DPF((PVR_DBG_WARNING, + "RGXHWPerfHostInit: HWPerf Host buffer size " + "value (%u) too big, using maximum (%u)", + ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MAX)); + return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10; + } + else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN) + { + return ui32BufSizeKB<<10; + } + else if (ui32BufSizeKB > 0) + { + /* Size specified as a AppHint but it is too small */ + PVR_DPF((PVR_DBG_WARNING, + "RGXHWPerfHostInit: HWPerf Host buffer size " + "value (%u) too small, using minimum (%u)", + ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MIN)); + return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10; + } + else + { + /* 0 size implies AppHint not set or is set to zero, + * use default size from driver constant. */ + return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10; + } +} + +/****************************************************************************** + * RGX HW Performance Host Stream API + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function RGXHWPerfHostInit + +@Description Called during driver init for initialisation of HWPerfHost + stream in the Rogue device driver. This function keeps allocated + only the minimal necessary resources, which are required for + functioning of HWPerf server module. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_ASSERT(psRgxDevInfo != NULL); + + eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error); + + psRgxDevInfo->hHWPerfHostStream = NULL; + psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */ + psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1; + psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + psRgxDevInfo->pui8DeferredEvents = NULL; + /* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic + * is maintained */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0; + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + +error: + return eError; +} + +static void _HWPerfHostOnConnectCB(void *pvArg) +{ + PVRSRV_RGXDEV_INFO* psDevice; + PVRSRV_ERROR eError; + + RGXSRV_HWPERF_CLK_SYNC(pvArg); + + psDevice = (PVRSRV_RGXDEV_INFO*) pvArg; + + /* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter + * before the host stream is opened for reading by a HWPerf client. + * Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */ + if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) + { + eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); + PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); + } +} + +/* Avoiding a holder struct using fields below, as a struct gets along padding, + * packing, and other compiler dependencies, and we want a continuous stream of + * bytes for (header+data) for use in TLStreamWrite. See + * _HWPerfHostDeferredEventsEmitter(). + * + * A deferred (UFO) packet is represented in memory as: + * - IMG_BOOL --> Indicates whether a packet write is + * "complete" by atomic context or not. + * - RGX_HWPERF_V2_PACKET_HDR --. + * |--> Fed together to TLStreamWrite for + * | deferred packet to be written to + * | HWPerfHost buffer + * - RGX_HWPERF_HOST_UFO_DATA---` + * + * PS: Currently only UFO events are supported in deferred list */ +#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\ + sizeof(RGX_HWPERF_V2_PACKET_HDR) +\ + sizeof(RGX_HWPERF_HOST_UFO_DATA)) + +static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData); +static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32MaxOrdinal); + +/*************************************************************************/ /*! +@Function RGXHWPerfHostInitOnDemandResources + +@Description This function allocates the HWPerfHost buffer if HWPerf is + enabled at driver load time. Otherwise, these buffers are + allocated on-demand as and when required. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError; + IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; /* 5 makes space up to "hwperf_host_9999" streams */ + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", + PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf host stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream, + psRgxDevInfo->psDeviceNode, + pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize, + TL_OPMODE_DROP_NEWER, + _HWPerfHostOnConnectCB, psRgxDevInfo, + NULL, NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); + + eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream, + PVRSRVGetPVRSRVData()->hTLCtrlStream); + /* we can still discover host stream so leave it as is and just log error */ + PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); + + /* send the event here because host stream is implicitly opened for write + * in TLStreamCreate and TLStreamOpen is never called (so the event is + * never emitted) */ + eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream); + PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen"); + + /* HWPerfHost deferred events specific initialization */ + eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR, + RGX_MISRHandler_HWPerfPostDeferredHostEvents, + psRgxDevInfo, + "RGX_HWPerfDeferredEventPoster"); + PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR", err_install_misr); + + eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create); + + psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS + * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE); + if (NULL == psRgxDevInfo->pui8DeferredEvents) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for " + "HWPerfHost deferred events array", __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_alloc_deferred_events; + } + psRgxDevInfo->ui16DEReadIdx = 0; + psRgxDevInfo->ui16DEWriteIdx = 0; +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + psRgxDevInfo->ui32DEHighWatermark = 0; + psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0; + psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0; +#endif + + PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB", + psRgxDevInfo->ui32HWPerfHostBufSize)); + + return PVRSRV_OK; + +err_alloc_deferred_events: + OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + +err_spinlock_create: + (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + +err_install_misr: + TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); + TLStreamClose(psRgxDevInfo->hHWPerfHostStream); + psRgxDevInfo->hHWPerfHostStream = NULL; + + return eError; +} + +void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_ASSERT (psRgxDevInfo); + + if (psRgxDevInfo->pui8DeferredEvents) + { + OSFreeMem(psRgxDevInfo->pui8DeferredEvents); + psRgxDevInfo->pui8DeferredEvents = NULL; + } + + if (psRgxDevInfo->hHWPerfHostSpinLock) + { + OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + } + + if (psRgxDevInfo->pvHostHWPerfMISR) + { + (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + } + + if (psRgxDevInfo->hHWPerfHostStream) + { + /* send the event here because host stream is implicitly opened for + * write in TLStreamCreate and TLStreamClose is never called (so the + * event is never emitted) */ + TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); + TLStreamClose(psRgxDevInfo->hHWPerfHostStream); + psRgxDevInfo->hHWPerfHostStream = NULL; + } + + if (psRgxDevInfo->hLockHWPerfHostStream) + { + OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream); + psRgxDevInfo->hLockHWPerfHostStream = NULL; + } +} + +inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter) +{ + PVRSRV_VZ_RETN_IF_MODE(GUEST); + psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter; +} + +inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent) +{ + PVR_ASSERT(psRgxDevInfo); + return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE; +} + +#define MAX_RETRY_COUNT 80 +static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32CurrentOrdinal) +{ + IMG_UINT32 ui32Retry = MAX_RETRY_COUNT; + + PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL); + PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL); + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + + /* First, flush pending events (if any) */ + _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal); + + while ((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1) + && (--ui32Retry != 0)) + { + /* Release lock and give a chance to a waiting context to emit the + * expected packet */ + OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream); + OSSleepms(100); + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Will warn only once! Potential packet(s) lost after ordinal" + " %u (Current ordinal = %u)", + __func__, + psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal)); + psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE; + } + + if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) + { + psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; + } +#endif +} + +static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32CurrentOrdinal) +{ + /* update last ordinal emitted */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal; + + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); +} + +static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) +{ + IMG_UINT8 *pui8Dest; + + PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream, + &pui8Dest, ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer" + " (%d). Dropping packet.", + __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + return NULL; + } + PVR_ASSERT(pui8Dest != NULL); + + return pui8Dest; +} + +static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream, + ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s" + " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + } +} + +/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */ +static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_V2_PACKET_HDR *psHeader) +{ + PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream, + IMG_OFFSET_ADDR(psHeader, 0), psHeader->ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer" + " (%d). Dropping packet.", + __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + } + + /* Regardless of whether write passed/failed, we consider it "written" */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal; + + return (eError == PVRSRV_OK); +} + +/* Helper macros for deferred events operations */ +#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS) +#define GET_DE_EVENT_BASE(_idx) (IMG_OFFSET_ADDR(psRgxDevInfo->pui8DeferredEvents, \ + (_idx) * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE)) + +#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*)((void *)(_base))) +#define GET_DE_EVENT_DATA(_base) (IMG_OFFSET_ADDR((_base), sizeof(IMG_BOOL))) + +/* Emits HWPerfHost event packets present in the deferred list stopping when one + * of the following cases is hit: + * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering + * criteria (ordinal == last_ordinal + 1) + * + * case 2: A packet with ordinal > ui32MaxOrdinal is found + * + * case 3: Deferred list's (read == write) i.e. no more deferred packets. + * + * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling + * this function.*/ +static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32MaxOrdinal) +{ + RGX_HWPERF_V2_PACKET_HDR *psHeader; + IMG_UINT32 ui32Retry; + IMG_UINT8 *pui8DeferredEvent; + IMG_BOOL *pbPacketWritten; + IMG_BOOL bWritePassed; + + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); + + while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) + { + pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx); + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent); + psHeader = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent); + + for (ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--) + { + /* Packet not yet written, re-check after a while. Wait for a short period as + * atomic contexts are generally expected to finish fast */ + OSWaitus(10); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Will warn only once. Dropping a deferred packet as atomic context" + " took too long to write it", + __func__)); + psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE; + } + + if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) + { + psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; + } +#endif + + if (*pbPacketWritten) + { + if ((psHeader->ui32Ordinal > ui32MaxOrdinal) || + (psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1))) + { + /* Leave remaining events to be emitted by next call to this function */ + break; + } + bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__)); + bWritePassed = IMG_FALSE; + } + + /* Move on to next packet */ + psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx); + + if (!bWritePassed // if write failed + && ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR + && psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events + { + /* Stop emitting here and re-schedule MISR */ + OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); + break; + } + } +} + +static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData) +{ + PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData; + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + + /* Since we're called from MISR, there is no upper cap of ordinal to be emitted. + * Send IMG_UINT32_MAX to signify all possible packets. */ + _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX); + + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); +} + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) +static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_UINT32 ui32DEWatermark; + IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx; + IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx; + + if (ui16LWrite >= ui16LRead) + { + ui32DEWatermark = ui16LWrite - ui16LRead; + } + else + { + ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite); + } + + if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark) + { + psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark; + } +} +#endif + +/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost + buffer. Since the data returned by this function is required in both, an + atomic as well as a process/sleepable context, it is protected under spinlock + + @Output pui32Ordinal Pointer to ordinal number assigned to this packet + @Output pui64Timestamp Timestamp value for this packet + @Output ppui8Dest If the current context cannot sleep, pointer to a place in + deferred events buffer where the packet data should be written. + Don't care, otherwise. + */ +static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 *pui32Ordinal, + IMG_UINT64 *pui64Timestamp, + IMG_UINT8 **ppui8Dest, + IMG_BOOL bSleepAllowed) +{ + OS_SPINLOCK_FLAGS uiFlags; + + /* Spin lock is required to avoid getting scheduled out by a higher priority + * context while we're getting header specific details and packet place in + * HWPerf buffer (when in atomic context) for ourselves */ + OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); + + *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++; + *pui64Timestamp = RGXTimeCorrGetClockus64(); + + if (!bSleepAllowed) + { + /* We're in an atomic context. So return the next position available in + * deferred events buffer */ + IMG_UINT16 ui16NewWriteIdx; + IMG_BOOL *pbPacketWritten; + + PVR_ASSERT(ppui8Dest != NULL); + + ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx); + if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx) + { + /* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be + * big enough to avoid any such scenario */ +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + /* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do + * this debug output here when trace_printk support is added to DDK */ +// PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u", +// __func__, psRgxDevInfo->ui32DEHighWatermark, +// HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx, +// psRgxDevInfo->ui16DEReadIdx)); +#endif + *ppui8Dest = NULL; + } + else + { + /* Return the position where deferred event would be written */ + *ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx); + + /* Make sure packet write "state" is "write-pending" _before_ moving write + * pointer forward */ + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest); + *pbPacketWritten = IMG_FALSE; + + psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx; + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + _UpdateDEBufferHighWatermark(psRgxDevInfo); +#endif + } + } + + OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); +} + +static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_EVENT_TYPE eEvType, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Ordinal, + IMG_UINT64 ui64Timestamp) +{ + RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) ((void *)pui8Dest); + + PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE); + + psHeader->ui32Ordinal = ui32Ordinal; + psHeader->ui64Timestamp = ui64Timestamp; + psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG; + psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST, + eEvType, 0, 0, 0); + psHeader->ui32Size = ui32Size; +} + +static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate) +{ + RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->ui32EnqType = eEnqType; + psData->ui32PID = ui32Pid; + psData->ui32ExtJobRef = ui32ExtJobRef; + psData->ui32IntJobRef = ui32IntJobRef; + psData->ui32DMContext = ui32FWDMContext; + psData->hCheckFence = hCheckFence; + psData->hUpdateFence = hUpdateFence; + psData->hUpdateTimeline = hUpdateTimeline; + psData->ui64CheckFence_UID = ui64CheckFenceUID; + psData->ui64UpdateFence_UID = ui64UpdateFenceUID; + psData->ui64DeadlineInus = ui64DeadlineInus; + psData->ui64CycleEstimate = ui64CycleEstimate; +} + +void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate ) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostEnqPacketData(pui8Dest, + eEnqType, + ui32Pid, + ui32FWDMContext, + ui32ExtJobRef, + ui32IntJobRef, + hCheckFence, + hUpdateFence, + hUpdateTimeline, + ui64CheckFenceUID, + ui64UpdateFenceUID, + ui64DeadlineInus, + ui64CycleEstimate); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType) +{ + IMG_UINT32 ui32Size = + (IMG_UINT32) offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData); + RGX_HWPERF_UFO_DATA_ELEMENT *puData; + + switch (eUfoType) + { + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + ui32Size += sizeof(puData->sCheckSuccess); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + ui32Size += sizeof(puData->sCheckFail); + break; + case RGX_HWPERF_UFO_EV_UPDATE: + ui32Size += sizeof(puData->sUpdate); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" + " event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData) +{ + RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + psData->aui32StreamData; + + psData->eEvType = eUfoType; + /* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping + * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */ + psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1, + offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData)); + + switch (eUfoType) + { + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + puData->sCheckSuccess.ui32FWAddr = + psUFOData->sCheckSuccess.ui32FWAddr; + puData->sCheckSuccess.ui32Value = + psUFOData->sCheckSuccess.ui32Value; + + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess)); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + puData->sCheckFail.ui32FWAddr = + psUFOData->sCheckFail.ui32FWAddr; + puData->sCheckFail.ui32Value = + psUFOData->sCheckFail.ui32Value; + puData->sCheckFail.ui32Required = + psUFOData->sCheckFail.ui32Required; + + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); + break; + case RGX_HWPERF_UFO_EV_UPDATE: + puData->sUpdate.ui32FWAddr = + psUFOData->sUpdate.ui32FWAddr; + puData->sUpdate.ui32OldValue = + psUFOData->sUpdate.ui32OldValue; + puData->sUpdate.ui32NewValue = + psUFOData->sUpdate.ui32NewValue; + + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate)); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" + " event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, + const IMG_BOOL bSleepAllowed) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_BOOL *pbPacketWritten = NULL; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + &pui8Dest, bSleepAllowed); + + if (bSleepAllowed) + { + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + } + else + { + if (pui8Dest == NULL) + { + // Give-up if we couldn't get a place in deferred events buffer + goto cleanup; + } + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest); + pui8Dest = GET_DE_EVENT_DATA(pui8Dest); + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData); + + if (bSleepAllowed) + { + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + else + { + *pbPacketWritten = IMG_TRUE; + OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); + } + +cleanup: + if (bSleepAllowed) + { + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + } +} + +#define UNKNOWN_SYNC_NAME "UnknownSync" + +static_assert(PVRSRV_SYNC_NAME_LENGTH==PVRSRV_SYNC_NAME_LENGTH, "Sync class name max does not match Fence Sync name max"); + +static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize( + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR **ppsName, + IMG_UINT32 *ui32NameSize) +{ + RGX_HWPERF_HOST_ALLOC_DATA *psData; + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail); + + if (*ppsName != NULL && *ui32NameSize > 0) + { + /* if string longer than maximum cut it (leave space for '\0') */ + if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) + *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid" + " resource name given.")); + *ppsName = UNKNOWN_SYNC_NAME; + *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME); + } + + switch (eAllocType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: + ui32Size += sizeof(psData->uAllocDetail.sSWFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + IMG_CHAR *acName = NULL; + + psData->ui32AllocType = eAllocType; + + switch (eAllocType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc; + acName = psData->uAllocDetail.sSyncAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc; + acName = psData->uAllocDetail.sFenceAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: + psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc; + acName = psData->uAllocDetail.sSWFenceAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc; + acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); + PVR_ASSERT(IMG_FALSE); + } + + + if (acName != NULL) + { + if (ui32NameSize) + { + OSStringLCopy(acName, psName, ui32NameSize); + } + else + { + /* In case no name was given make sure we don't access random + * memory */ + acName[0] = '\0'; + } + } +} + +void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType, + &psName, + &ui32NameSize); + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size, + ui32Ordinal, ui64Timestamp); + + _SetupHostAllocPacketData(pui8Dest, + eAllocType, + puAllocDetail, + psName, + ui32NameSize); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr) +{ + RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->ui32FreeType = eFreeType; + + switch (eFreeType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostFreeEvent: Invalid free event type")); + PVR_ASSERT(IMG_FALSE); + } +} + +void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostFreePacketData(pui8Dest, + eFreeType, + ui64UID, + ui32PID, + ui32FWAddr); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize( + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + const IMG_CHAR **ppsName, + IMG_UINT32 *ui32NameSize) +{ + RGX_HWPERF_HOST_MODIFY_DATA *psData; + RGX_HWPERF_HOST_MODIFY_DETAIL *puData; + IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType); + + if (*ppsName != NULL && *ui32NameSize > 0) + { + /* first strip the terminator */ + if ((*ppsName)[*ui32NameSize - 1] == '\0') + *ui32NameSize -= 1; + /* if string longer than maximum cut it (leave space for '\0') */ + if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) + *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH - 1; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid" + " resource name given.")); + *ppsName = UNKNOWN_SYNC_NAME; + *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1; + } + + switch (eModifyType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + ui32Size += sizeof(puData->sFenceMerge) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize + 1; /* +1 for '\0' */ + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + IMG_CHAR *acName = NULL; + + psData->ui32ModifyType = eModifyType; + + switch (eModifyType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID; + psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1; + psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2; + acName = psData->uModifyDetail.sFenceMerge.acName; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); + PVR_ASSERT(IMG_FALSE); + } + + if (acName != NULL) + { + if (ui32NameSize) + { + OSStringLCopy(acName, psName, ui32NameSize); + } + else + { + /* In case no name was given make sure we don't access random + * memory */ + acName[0] = '\0'; + } + } +} + +void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType, + &psName, + &ui32NameSize); + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostModifyPacketData(pui8Dest, + eModifyType, + ui64NewUID, + ui64UID1, + ui64UID2, + psName, + ui32NameSize); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest) +{ + RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32CurrIdx = + RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx]; + + psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp; + psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp; + psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed; +} + +void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = + RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus) +{ + switch (eDeviceHealthStatus) + { + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + } +} + +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) +{ + switch (eDeviceHealthReason) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED; + } +} + +static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason, + IMG_UINT8 *pui8Dest) +{ + RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->eEvType = eEvType; + + switch (eEvType) + { + case RGX_HWPERF_DEV_INFO_EV_HEALTH: + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus); + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_EV eEvType) +{ + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_DEV_INFO_DATA, uDevInfoDetail); + + switch (eEvType) + { + case RGX_HWPERF_DEV_INFO_EV_HEALTH: + ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Size; + + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) + { + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + ui32Size = _CalculateHostDeviceInfoPacketSize(eEvType); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) + { + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest); + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + } + + OSLockRelease(psRgxDevInfo->hHWPerfLock); +} + +static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, + IMG_UINT32 ui32TotalMemoryUsage, + IMG_UINT32 ui32LivePids, + PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage, + IMG_UINT8 *pui8Dest) +{ + IMG_INT i; + RGX_HWPERF_HOST_INFO_DATA *psData = (RGX_HWPERF_HOST_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->eEvType = eEvType; + + switch (eEvType) + { + case RGX_HWPERF_INFO_EV_MEM_USAGE: + psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage; + + if (psPerProcessMemUsage) + { + for (i = 0; i < ui32LivePids; ++i) + { + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage; + } + } + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType, + IMG_UINT32 *pui32TotalMemoryUsage, + IMG_UINT32 *pui32LivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage) +{ + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail); + + switch (eEvType) + { + case RGX_HWPERF_INFO_EV_MEM_USAGE: +#if !defined(__QNXNTO__) + if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK) + { + ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size) + + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage))); + } +#else + PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); +#endif + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_INFO_EV eEvType) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32TotalMemoryUsage = 0; + PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL; + IMG_UINT32 ui32LivePids = 0; + + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) + { + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) + { + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest); + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + + if (psPerProcessMemUsage) + OSFreeMemNoStats(psPerProcessMemUsage); // psPerProcessMemUsage was allocated with OSAllocZMemNoStats + } + + OSLockRelease(psRgxDevInfo->hHWPerfLock); +} + +static inline IMG_UINT32 +_CalculateHostFenceWaitPacketSize(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType) +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psSizeCalculator; + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA, uDetail); + + switch (eWaitType) + { + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: + ui32Size += sizeof(psSizeCalculator->uDetail.sBegin); + break; + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: + ui32Size += sizeof(psSizeCalculator->uDetail.sEnd); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid wait event type (%u)", __func__, + eWaitType)); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void +_SetupHostFenceWaitPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data) +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psData = (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->eType = eWaitType; + psData->uiPID = uiPID; + psData->hFence = hFence; + + switch (eWaitType) + { + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: + psData->uDetail.sBegin.ui32TimeoutInMs = ui32Data; + break; + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: + psData->uDetail.sEnd.eResult = + (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT) ui32Data; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid fence-wait event type", __func__)); + PVR_ASSERT(IMG_FALSE); + } +} + +void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostFenceWaitPacketSize(eType); + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT, + ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _CalculateHostSWTimelineAdvPacketSize(void) +{ + IMG_UINT32 ui32Size = sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA); + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void +_SetupHostSWTimelineAdvPacketData(IMG_UINT8 *pui8Dest, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex) + +{ + RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *psData = (RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->uiPID = uiPID; + psData->hTimeline = hSWTimeline; + psData->ui64SyncPtIndex = ui64SyncPtIndex; +} + +void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostSWTimelineAdvPacketSize(); + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE, + ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + +} + +/****************************************************************************** + * Currently only implemented on Linux. Feature can be enabled to provide + * an interface to 3rd-party kernel modules that wish to access the + * HWPerf data. The API is documented in the rgxapi_km.h header and + * the rgx_hwperf* headers. + *****************************************************************************/ + +/* Internal HWPerf kernel connection/device data object to track the state + * of a client session. + */ +typedef struct +{ + PVRSRV_DEVICE_NODE* psRgxDevNode; + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + + /* TL Open/close state */ + IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; + + /* TL Acquire/release state */ + IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ + IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ + IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ + IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ + IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ + IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ + IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ + + +} RGX_KM_HWPERF_DEVDATA; + +PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + RGX_KM_HWPERF_DEVDATA *psDevData; + RGX_HWPERF_DEVICE *psNewHWPerfDevice; + RGX_HWPERF_CONNECTION* psHWPerfConnection; + IMG_BOOL bFWActive = IMG_FALSE; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* avoid uninitialised data */ + PVR_ASSERT(*ppsHWPerfConnection == NULL); + PVR_ASSERT(psPVRSRVData); + + /* Allocate connection object */ + psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection)); + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* early save the return pointer to aid clean-up if failure occurs */ + *ppsHWPerfConnection = psHWPerfConnection; + + psDeviceNode = psPVRSRVData->psDeviceNodeList; + while (psDeviceNode) + { + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerf: Device not currently active. ID:%u", + __func__, + psDeviceNode->sDevId.i32UMIdentifier)); + psDeviceNode = psDeviceNode->psNext; + continue; + } + /* Create a list node to be attached to connection object's list */ + psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice)); + if (!psNewHWPerfDevice) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* Insert node at head of the list */ + psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList; + psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice; + + /* create a device data object for kernel server */ + psDevData = OSAllocZMem(sizeof(*psDevData)); + psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData; + if (!psDevData) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName), + "hwperf_device_%d", psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf device name for device %d", + __func__, + psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevData->psRgxDevNode = psDeviceNode; + psDevData->psRgxDevInfo = psDeviceNode->pvDevice; + + psDeviceNode = psDeviceNode->psNext; + + /* At least one device is active */ + bFWActive = IMG_TRUE; + } + + if (!bFWActive) + { + return PVRSRV_ERROR_NOT_READY; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) +{ + RGX_KM_HWPERF_DEVDATA *psDevData; + RGX_HWPERF_DEVICE *psHWPerfDev; + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + PVRSRV_ERROR eError; + IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; + IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; + IMG_UINT32 ui32BufSize; + + /* Disable producer callback by default for the Kernel API. */ + IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING | + PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + psRgxDevInfo = psDevData->psRgxDevInfo; + + /* In the case where the AppHint has not been set we need to + * initialise the HWPerf resources here. Allocated on-demand + * to reduce RAM foot print on systems not needing HWPerf. + */ + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + if (RGXHWPerfIsInitRequired(psRgxDevInfo)) + { + eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfFW resources failed", + __func__)); + OSLockRelease(psRgxDevInfo->hHWPerfLock); + return eError; + } + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + if (psRgxDevInfo->hHWPerfHostStream == NULL) + { + eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfHost resources failed", + __func__)); + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); + return eError; + } + } + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* Open the RGX TL stream for reading in this session */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfFwStreamName, + ui32StreamFlags, + &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)"); + + /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", + PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf host stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Open the host TL stream for reading in this session */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfHostStreamName, + PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, + &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)"); + + /* Allocate a large enough buffer for use during the entire session to + * avoid the need to resize in the Acquire call as this might be in an ISR + * Choose size that can contain at least one packet. + */ + /* Allocate buffer for FW Stream */ + ui32BufSize = FW_STREAM_BUFFER_SIZE; + psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize); + if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize; + + /* Allocate buffer for Host Stream */ + ui32BufSize = HOST_STREAM_BUFFER_SIZE; + psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize); + if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL) + { + OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize; + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + eError = RGXHWPerfLazyConnect(ppsHWPerfConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0); + + eError = RGXHWPerfOpen(*ppsHWPerfConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfOpen", e1); + + return PVRSRV_OK; + + e1: /* HWPerfOpen might have opened some, and then failed */ + RGXHWPerfClose(*ppsHWPerfConnection); + e0: /* LazyConnect might have allocated some resources and then failed, + * make sure they are cleaned up */ + RGXHWPerfFreeConnection(ppsHWPerfConnection); + return eError; +} + + +PVRSRV_ERROR RGXHWPerfControl( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE* psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfConfigureCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32NumBlocks, + RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE *psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXConfigureHWPerfBlocksKM(NULL, + psDevData->psRgxDevNode, + ui32CtrlWord, + ui32NumBlocks, + asBlockConfigs); + + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXConfigureHWPerfBlocksKM"); + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +static PVRSRV_ERROR RGXHWPerfToggleCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs, + IMG_BOOL bToggle, + const char* szFunctionString) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE* psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXControlHWPerfBlocksKM(NULL, + psDevData->psRgxDevNode, + bToggle, + ui32NumBlocks, + aeBlockIDs); + + PVR_LOG_RETURN_IF_ERROR(eError, szFunctionString); + + psHWPerfDev = psHWPerfDev->psNext; + } + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXHWPerfDisableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs) +{ + return RGXHWPerfToggleCounters(psHWPerfConnection, + ui32NumBlocks, + aeBlockIDs, + IMG_FALSE, + __func__); +} + + +PVRSRV_ERROR RGXHWPerfEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs) +{ + return RGXHWPerfToggleCounters(psHWPerfConnection, + ui32NumBlocks, + aeBlockIDs, + IMG_TRUE, + __func__); +} + + +PVRSRV_ERROR RGXHWPerfAcquireEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_PBYTE* ppBuf, + IMG_UINT32* pui32BufLen) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; + IMG_PBYTE pDataDest; + IMG_UINT32 ui32TlPackets = 0; + IMG_PBYTE pBufferEnd; + PVRSRVTL_PPACKETHDR psHDRptr; + PVRSRVTL_PACKETTYPE ui16TlType; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Reset the output arguments in case we discover an error */ + *ppBuf = NULL; + *pui32BufLen = 0; + + /* Valid input argument values supplied by the caller */ + if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psDevData->pTlBuf[eStreamId] == NULL) + { + /* Acquire some data to read from the HWPerf TL stream */ + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + psDevData->hSD[eStreamId], + &psDevData->pTlBuf[eStreamId], + &psDevData->ui32AcqDataLen[eStreamId]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientAcquireData"); + + psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId]; + } + + /* TL indicates no data exists so return OK and zero. */ + if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0)) + { + return PVRSRV_OK; + } + + /* Process each TL packet in the data buffer we have acquired */ + pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId]; + pDataDest = psDevData->pHwpBuf[eStreamId]; + psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]); + psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId]; + while (psHDRptr < (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) + { + ui16TlType = GET_PACKET_TYPE(psHDRptr); + if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) + { + IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); + if (0 == ui16DataLen) + { + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr)); + } + else + { + /* Check next packet does not fill buffer */ + if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId]) + { + break; + } + + /* For valid data copy it into the client buffer and move + * the write position on */ + OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen); + pDataDest += ui16DataLen; + } + } + else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full")); + } + else + { + /* else Ignore padding packet type and others */ + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType )); + } + + /* Update loop variable to the next packet and increment counts */ + psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); + /* Updated to keep track of the next packet to be read. */ + psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) ((void *)psHDRptr); + ui32TlPackets++; + } + + PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets)); + + psDevData->bRelease[eStreamId] = IMG_FALSE; + if (psHDRptr >= (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) + { + psDevData->bRelease[eStreamId] = IMG_TRUE; + } + + /* Update output arguments with client buffer details and true length */ + *ppBuf = psDevData->pHwpBuf[eStreamId]; + *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId]; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfReleaseEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Valid input argument values supplied by the caller */ + if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psDevData->bRelease[eStreamId]) + { + /* Inform the TL that we are done with reading the data. */ + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]); + psDevData->ui32AcqDataLen[eStreamId] = 0; + psDevData->pTlBuf[eStreamId] = NULL; + } + else + { + psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId]; + } + return eError; +} + + +PVRSRV_ERROR RGXHWPerfGetFilter( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_UINT64 *ui64Filter) +{ + PVRSRV_RGXDEV_INFO* psRgxDevInfo = + hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Valid input argument values supplied by the caller */ + if (!psRgxDevInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* No need to take hHWPerfLock here since we are only reading data + * from always existing integers to return to debugfs which is an + * atomic operation. + */ + switch (eStreamId) { + case RGX_HWPERF_STREAM_ID0_FW: + *ui64Filter = psRgxDevInfo->ui64HWPerfFilter; + break; + case RGX_HWPERF_STREAM_ID1_HOST: + *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev; + RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection; + + /* if connection object itself is NULL, nothing to free */ + if (psHWPerfConnection == NULL) + { + return PVRSRV_OK; + } + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfNextDev) + { + psHWPerfDev = psHWPerfNextDev; + psHWPerfNextDev = psHWPerfNextDev->psNext; + + /* Free the session memory */ + if (psHWPerfDev->hDevData) + OSFreeMem(psHWPerfDev->hDevData); + OSFreeMem(psHWPerfDev); + } + OSFreeMem(psHWPerfConnection); + *ppsHWPerfConnection = NULL; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection) +{ + RGX_HWPERF_DEVICE *psHWPerfDev; + RGX_KM_HWPERF_DEVDATA* psDevData; + IMG_UINT uiStreamId; + PVRSRV_ERROR eError; + + /* Check session connection is not zero */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++) + { + /* If the TL buffer exists they have not called ReleaseData + * before disconnecting so clean it up */ + if (psDevData->pTlBuf[uiStreamId]) + { + /* TLClientReleaseData call and null out the buffer fields + * and length */ + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]); + psDevData->ui32AcqDataLen[uiStreamId] = 0; + psDevData->pTlBuf[uiStreamId] = NULL; + PVR_LOG_IF_ERROR(eError, "TLClientReleaseData"); + /* Packets may be lost if release was not required */ + if (!psDevData->bRelease[uiStreamId]) + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost.")); + } + } + + /* Close the TL stream, ignore the error if it occurs as we + * are disconnecting */ + if (psDevData->hSD[uiStreamId]) + { + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psDevData->hSD[uiStreamId]); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + psDevData->hSD[uiStreamId] = NULL; + } + + /* Free the client buffer used in session */ + if (psDevData->pHwpBuf[uiStreamId]) + { + OSFreeMem(psDevData->pHwpBuf[uiStreamId]); + psDevData->pHwpBuf[uiStreamId] = NULL; + } + } + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + eError = RGXHWPerfClose(*ppsHWPerfConnection); + PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose"); + + eError = RGXHWPerfFreeConnection(ppsHWPerfConnection); + PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection"); + + return eError; +} + +IMG_UINT64 RGXHWPerfConvertCRTimeStamp( + IMG_UINT32 ui32ClkSpeed, + IMG_UINT64 ui64CorrCRTimeStamp, + IMG_UINT64 ui64CorrOSTimeStamp, + IMG_UINT64 ui64CRTimeStamp) +{ + IMG_UINT64 ui64CRDeltaToOSDeltaKNs; + IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns; + + if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp)) + { + return 0; + } + + ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(ui32ClkSpeed); + + /* RGX CR timer ticks delta */ + deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp; + /* RGX time delta in nanoseconds */ + delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); + /* Calculate OS time of HWPerf event */ + ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns; + + return ui64EventOSTimestamp; +} + +/****************************************************************************** + End of file (rgxhwperf.c) + ******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.h new file mode 100644 index 000000000000..74dd262d06dc --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxhwperf.h @@ -0,0 +1,495 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX HWPerf functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXHWPERF_H_ +#define RGXHWPERF_H_ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#include "device.h" +#include "connection_server.h" +#include "rgxdevice.h" +#include "rgx_hwperf.h" + +/* HWPerf host buffer size constraints in KBs */ +#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB +#define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U) +#define HWPERF_HOST_TL_STREAM_SIZE_MAX (3072U) + +/****************************************************************************** + * RGX HW Performance decode Bvnc Features for HWPerf + *****************************************************************************/ +PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_BVNC *psBVNC); + +PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_BVNC *psBVNC); + +/****************************************************************************** + * RGX HW Performance Data Transport Routines + *****************************************************************************/ + +PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo); + +PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); +PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); +void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); +void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); +void RGXHWPerfClientInitAppHintCallbacks(void); + +/****************************************************************************** + * RGX HW Performance Profiling API(s) + *****************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask); + +PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32ArrayLen, + RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs); + +PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_BOOL bEnable, + IMG_UINT32 ui32ArrayLen, + IMG_UINT16 * psBlockIDs); + +/****************************************************************************** + * RGX HW Performance Host Stream API + *****************************************************************************/ + +PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB); +PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); +void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); + +void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32Filter); + +void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate); + +void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail); + +void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr); + +void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize); + +void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, + const IMG_BOOL bSleepAllowed); + +void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo); + +void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason); + +void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_INFO_EV eEvType); + +void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data); + +void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex); + +IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent); + +#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \ + (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \ + & RGX_HWPERF_EVENT_MASK_VALUE(EV)) + +#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \ + ((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice) + +#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \ + ((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice) + +/* Deadline and cycle estimate is not supported for all ENQ events */ +#define NO_DEADLINE 0 +#define NO_CYCEST 0 + + +#if defined(SUPPORT_RGX) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param C Kick context + * @param P Pid of kicking process + * @param X Related FW context + * @param E External job reference + * @param I Job ID + * @param K Kick type + * @param CF Check fence handle + * @param UF Update fence handle + * @param UT Update timeline (on which above UF was created) handle + * @param CHKUID Check fence UID + * @param UPDUID Update fence UID + * @param D Deadline + * @param CE Cycle estimate + */ +#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID) \ + do { \ + if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \ + { \ + RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \ + (K), (P), (X), (E), (I), \ + (CF), (UF), (UT), \ + (CHKUID), (UPDUID), 0, 0); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device Info pointer + * @param T Host UFO event type + * @param D Pointer to UFO data + * @param S Is sleeping allowed? + */ +#define RGXSRV_HWPERF_UFO(I, T, D, S) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \ + { \ + RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device node pointer + * @param T Host ALLOC event type + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSyncAlloc.ui32FWAddr = (FWADDR); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (N), (Z), &uAllocDetail); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param PID ID of allocating process + * @param FENCE PVRSRV_FENCE object + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sFenceAlloc.uiPID = (PID); \ + uAllocDetail.sFenceAlloc.hFence = (FENCE); \ + uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = (FWADDR); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * @param D Device Node pointer + * @param TL PVRSRV_TIMELINE on which CP is allocated + * @param PID Allocating process ID of this TL/FENCE + * @param FENCE PVRSRV_FENCE as passed to SyncCheckpointResolveFence OR PVRSRV_NO_FENCE + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = (FWADDR); \ + uAllocDetail.sSyncCheckPointAlloc.hTimeline = (TL); \ + uAllocDetail.sSyncCheckPointAlloc.uiPID = (PID); \ + uAllocDetail.sSyncCheckPointAlloc.hFence = (FENCE); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * @param D Device Node pointer + * @param PID ID of allocating process + * @param SW_FENCE PVRSRV_FENCE object + * @param SW_TL PVRSRV_TIMELINE on which SW_FENCE is allocated + * @param SPI Sync point index on the SW_TL on which this SW_FENCE is allocated + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSWFenceAlloc.uiPID = (PID); \ + uAllocDetail.sSWFenceAlloc.hSWFence = (SW_FENCE); \ + uAllocDetail.sSWFenceAlloc.hSWTimeline = (SW_TL); \ + uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = (SPI); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param FWADDR sync firmware address + */ +#define RGXSRV_HWPERF_FREE(D, T, FWADDR) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ + { \ + RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (0), (0), (FWADDR)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param UID ID of input object + * @param PID ID of allocating process + * @param FWADDR sync firmware address + */ +#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ + { \ + RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (UID), (PID), (FWADDR)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param NEWUID ID of output object + * @param UID1 ID of first input object + * @param UID2 ID of second input object + * @param N string containing new object's name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \ + { \ + RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (NEWUID), (UID1), (UID2), N, Z); \ + } \ + } while (0) + + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device info pointer + */ +#define RGXSRV_HWPERF_CLK_SYNC(I) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \ + { \ + RGXHWPerfHostPostClkSyncEvent((I)); \ + } \ + } while (0) + + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts a device info event to the HWPerfHost stream. + * + * @param I Device info pointer + * @param T Event type + * @param H Health status enum + * @param R Health reason enum + */ +#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ + { \ + RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device info pointer + * @param T Event type + */ +#define RGXSRV_HWPERF_HOST_INFO(I, T) \ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_INFO)) \ + { \ + RGXHWPerfHostPostInfo((I), (T)); \ + } \ +} while (0) + +/** + * @param I Device info pointer + * @param T Wait Event type + * @param PID Process ID that the following fence belongs to + * @param F Fence handle + * @param D Data for this wait event type + */ +#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) \ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_FENCE_WAIT)) \ + { \ + RGXHWPerfHostPostFenceWait(I, RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_##T, \ + (PID), (F), (D)); \ + } \ +} while (0) + +/** + * @param I Device info pointer + * @param PID Process ID that the following timeline belongs to + * @param F SW-timeline handle + * @param SPI Sync-pt index where this SW-timeline has reached + */ +#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)\ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE)) \ + { \ + RGXHWPerfHostPostSWTimelineAdv((I), (PID), (SW_TL), (SPI)); \ + } \ +} while (0) +#else + +#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) +#define RGXSRV_HWPERF_UFO(I, T, D, S) +#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) +#define RGXSRV_HWPERF_FREE(D, T, FWADDR) +#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) +#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) +#define RGXSRV_HWPERF_CLK_SYNC(I) +#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) +#define RGXSRV_HWPERF_HOST_INFO(I, T) +#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) +#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI) + +#endif + +#endif /* RGXHWPERF_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.c new file mode 100644 index 000000000000..023764f20c34 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.c @@ -0,0 +1,4502 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(LINUX) +#include +#else +#include +#endif + +#include "img_defs.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "pvrsrv_bridge_init.h" +#include "syscommon.h" +#include "rgx_heaps.h" +#include "rgxheapconfig.h" +#include "rgxdefs_km.h" +#include "rgxpower.h" +#include "tlstream.h" +#include "pvrsrv_tlstreams.h" + +#include "rgxinit.h" +#include "rgxbvnc.h" + +#include "pdump_km.h" +#include "handle.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "rgxmem.h" +#include "sync_internal.h" +#include "pvrsrv_apphint.h" +#include "oskm_apphint.h" +#include "rgxfwdbg.h" +#include "info_page.h" + +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgx_fwif_km.h" + +#include "rgxmmuinit.h" +#include "devicemem_utils.h" +#include "devicemem_server.h" +#include "physmem_osmem.h" +#include "physmem_lma.h" + +#include "rgxdebug.h" +#include "rgxhwperf.h" +#include "htbserver.h" + +#include "rgx_options.h" +#include "pvrversion.h" + +#include "rgx_compat_bvnc.h" + +#include "rgx_heaps.h" + +#include "rgxta3d.h" +#include "rgxtimecorr.h" +#include "rgxshader.h" + +#if defined(PDUMP) +#include "rgxstartstop.h" +#endif + +#include "rgx_fwif_alignchecks.h" +#include "vmm_pvz_client.h" + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(PDUMP) && defined(NO_HARDWARE) +#include "validation_soc.h" +#endif + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) +#include "pdump_physmem.h" +#endif + +static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); +static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString); +static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed); +static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue, IMG_UINT64 ui64SPUResetValue); +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode); + +#if (RGX_NUM_OS_SUPPORTED > 1) +static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid); +static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); +#endif + +#if defined(SUPPORT_AUTOVZ) +#define RGX_FW_MMU_RESERVED_MEM_SETUP(devnode) (MMU_PX_SETUP) { \ + LMA_PhyContigPagesAlloc, \ + LMA_PhyContigPagesFree, \ + LMA_PhyContigPagesMap, \ + LMA_PhyContigPagesUnmap, \ + LMA_PhyContigPagesClean, \ + OSGetPageShift(), \ + (devnode)->psFwMMUReservedMemArena \ + } +#endif + + +#define RGX_MMU_LOG2_PAGE_SIZE_4KB (12) +#define RGX_MMU_LOG2_PAGE_SIZE_16KB (14) +#define RGX_MMU_LOG2_PAGE_SIZE_64KB (16) +#define RGX_MMU_LOG2_PAGE_SIZE_256KB (18) +#define RGX_MMU_LOG2_PAGE_SIZE_1MB (20) +#define RGX_MMU_LOG2_PAGE_SIZE_2MB (21) + +#define RGX_MMU_PAGE_SIZE_4KB ( 4 * 1024) +#define RGX_MMU_PAGE_SIZE_16KB ( 16 * 1024) +#define RGX_MMU_PAGE_SIZE_64KB ( 64 * 1024) +#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024) +#define RGX_MMU_PAGE_SIZE_1MB (1024 * 1024) +#define RGX_MMU_PAGE_SIZE_2MB (2048 * 1024) +#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB +#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB + +#define VAR(x) #x + +#define MAX_BVNC_LEN (12) +#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(MAX_BVNC_LEN))+1) + +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + +/* bits used by the LISR to provide a trace of its last execution */ +#define RGX_LISR_DEVICE_NOT_POWERED (1 << 0) +#define RGX_LISR_FWIF_POW_OFF (1 << 1) +#define RGX_LISR_EVENT_EN (1 << 2) +#define RGX_LISR_COUNTS_EQUAL (1 << 3) +#define RGX_LISR_PROCESSED (1 << 4) + +typedef struct _LISR_EXECUTION_INFO_ +{ + /* bit mask showing execution flow of last LISR invocation */ + IMG_UINT32 ui32State; + /* snapshot from the last LISR invocation, regardless of + * whether an interrupt was handled + */ + IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM]; + /* time of the last LISR invocation */ + IMG_UINT64 ui64Clockns; +} LISR_EXECUTION_INFO; + +/* information about the last execution of the LISR */ +static LISR_EXECUTION_INFO g_sLISRExecutionInfo; + +#endif + +IMG_BOOL RGXFwIrqEventRx(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bIrqRx = IMG_TRUE; + IMG_UINT32 ui32IRQStatus, ui32IRQThreadMask; + + /* virtualisation note: + * status & clearing registers are available on both Host and Guests. + * Due to the remappings done by the 2nd stage device MMU, all drivers + * assume they are accessing register bank 0 */ + ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_IRQ_OS0_EVENT_STATUS); + ui32IRQThreadMask = (ui32IRQStatus & ~RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK); + if (ui32IRQThreadMask != 0) + { + /* acknowledge and clear the interrupt */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_IRQ_OS0_EVENT_CLEAR, ui32IRQThreadMask); + } + else + { + /* spurious interrupt */ + bIrqRx = IMG_FALSE; + } + + return bIrqRx; +} + +#if !defined(NO_HARDWARE) +/*************************************************************************/ /*! +@Function SampleIRQCount +@Description Utility function taking snapshots of RGX FW interrupt count. +@Input paui32Input A pointer to RGX FW IRQ count array. + Size of the array should be equal to RGX FW thread + count. +@Input paui32Output A pointer to array containing sampled RGX FW + IRQ counts +@Return IMG_BOOL Returns IMG_TRUE, if RGX FW IRQ is not equal to + sampled RGX FW IRQ count for any RGX FW thread. +*/ /**************************************************************************/ +static INLINE IMG_BOOL SampleIRQCount(volatile IMG_UINT32 *paui32Input, + volatile IMG_UINT32 *paui32Output) +{ + IMG_UINT32 ui32TID; + IMG_BOOL bReturnVal = IMG_FALSE; + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + if (paui32Output[ui32TID] != paui32Input[ui32TID]) + { + /** + * we are handling any unhandled interrupts here so align the host + * count with the FW count + */ + + /* Sample the current count from the FW _after_ we've cleared the interrupt. */ + paui32Output[ui32TID] = paui32Input[ui32TID]; + bReturnVal = IMG_TRUE; + } + } + + return bReturnVal; +} + +static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_BOOL bScheduleMISR = IMG_FALSE; +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + IMG_UINT32 ui32TID; +#endif + + RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo); + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + PVR_DPF((PVR_DBG_ERROR, + "Last RGX_LISRHandler State: 0x%08X Clock: %llu", + g_sLISRExecutionInfo.ui32State, + g_sLISRExecutionInfo.ui64Clockns)); + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + PVR_DPF((PVR_DBG_ERROR, + "RGX FW thread %u: InterruptCountSnapshot: 0x%X", + ui32TID, g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID])); + } +#else + PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION")); +#endif + + + if (psFwSysData->ePowState != RGXFWIF_POW_OFF) + { + PVR_DPF((PVR_DBG_ERROR, "_WaitForInterruptsTimeout: FW pow state is not OFF (is %u)", + (unsigned int) psFwSysData->ePowState)); + } + + bScheduleMISR = SampleIRQCount(psFwSysData->aui32InterruptCount, + psDevInfo->aui32SampleIRQCount); + return bScheduleMISR; +} + +void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bScheduleMISR; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + bScheduleMISR = IMG_TRUE; + } + else + { + bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo); + } + + if (bScheduleMISR) + { + OSScheduleMISR(psDevInfo->pvMISRData); + + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } +} + +/* + RGX LISR Handler +*/ +static IMG_BOOL RGX_LISRHandler (void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bInterruptProcessed = IMG_FALSE; + RGXFWIF_SYSDATA *psFwSysData; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + if (psDevInfo->bRGXPowered && RGXFwIrqEventRx(psDevInfo)) + { + bInterruptProcessed = IMG_TRUE; + OSScheduleMISR(psDevInfo->pvMISRData); + } + + return bInterruptProcessed; + } + + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + { + IMG_UINT32 ui32TID; + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID] = psFwSysData->aui32InterruptCount[ui32TID]; + } + + g_sLISRExecutionInfo.ui32State = 0; + g_sLISRExecutionInfo.ui64Clockns = OSClockns64(); + } +#endif + + if (psDevInfo->bRGXPowered == IMG_FALSE) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_DEVICE_NOT_POWERED; +#endif + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_FWIF_POW_OFF; +#endif + return bInterruptProcessed; + } + } + + if (RGXFwIrqEventRx(psDevInfo)) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_EVENT_EN; + psDeviceNode->ui64nLISR++; +#endif + + bInterruptProcessed = SampleIRQCount(psFwSysData->aui32InterruptCount, + psDevInfo->aui32SampleIRQCount); + + if (!bInterruptProcessed) + { +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_COUNTS_EQUAL; +#endif + return bInterruptProcessed; + } + + bInterruptProcessed = IMG_TRUE; +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + g_sLISRExecutionInfo.ui32State |= RGX_LISR_PROCESSED; + psDeviceNode->ui64nMISR++; +#endif + + OSScheduleMISR(psDevInfo->pvMISRData); + + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + + return bInterruptProcessed; +} + +static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + OS_SPINLOCK_FLAGS uiFlags; + + /* First check whether there are pending commands in Deferred KCCB List */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead)) + { + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + return; + } + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + /* Powerlock to avoid further Power transition requests + while KCCB deferred list is being processed */ + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire PowerLock (device: %p, error: %s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + goto _RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed; + } + + /* Try to send deferred KCCB commands Do not Poll from here*/ + eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); + + PVRSRVPowerUnlock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s could not flush Deferred KCCB list, KCCB is full.", + __func__)); + } + +_RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed: + + return; +} + +static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevice; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { + RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); + } + + if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { + /* The FW is IDLE and therefore could be shut down */ + eError = RGXActivePowerRequest(psDeviceNode); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Failed RGXActivePowerRequest call (device: %p) with %s", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + else + { + /* Re-schedule the power down request as it was deferred. */ + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + } + +} + +/* Shorter defines to keep the code a bit shorter */ +#define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE +#define GPU_ACTIVE RGXFWIF_GPU_UTIL_STATE_ACTIVE +#define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED +#define MAX_ITERATIONS 64 + +static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hGpuUtilUser, + RGXFWIF_GPU_UTIL_STATS *psReturnStats) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + IMG_UINT64 ui64TimeNow; + IMG_UINT32 ui32Attempts; + IMG_UINT32 ui32Remainder; + + + /***** (1) Initialise return stats *****/ + + psReturnStats->bValid = IMG_FALSE; + psReturnStats->ui64GpuStatIdle = 0; + psReturnStats->ui64GpuStatActive = 0; + psReturnStats->ui64GpuStatBlocked = 0; + psReturnStats->ui64GpuStatCumulative = 0; + + if (hGpuUtilUser == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + psAggregateStats = hGpuUtilUser; + + + /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */ + for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++) + { + IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0}; + IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0; + IMG_UINT32 i = 0; + + + /***** (2) Get latest data from shared area *****/ + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + /* + * First attempt at detecting if the FW is in the middle of an update. + * This should also help if the FW is in the middle of a 64 bit variable update. + */ + while (((ui64LastWord != psUtilFWCb->ui64LastWord) || + (aui64TmpCounters[ui64LastState] != + psUtilFWCb->aui64StatsCounters[ui64LastState])) && + (i < MAX_ITERATIONS)) + { + ui64LastWord = psUtilFWCb->ui64LastWord; + ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord); + aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE]; + aui64TmpCounters[GPU_ACTIVE] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE]; + aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED]; + i++; + } + + OSLockRelease(psDevInfo->hGPUUtilLock); + + if (i == MAX_ITERATIONS) + { + PVR_DPF((PVR_DBG_WARNING, + "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); + return PVRSRV_ERROR_TIMEOUT; + } + + + /***** (3) Compute return stats *****/ + + /* Update temp counters to account for the time since the last update to the shared ones */ + OSMemoryBarrier(); /* Ensure the current time is read after the loop above */ + ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64()); + ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord); + ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); + aui64TmpCounters[ui64LastState] += ui64LastPeriod; + + /* Get statistics for a user since its last request */ + psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE], + psAggregateStats->ui64GpuStatIdle); + psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE], + psAggregateStats->ui64GpuStatActive); + psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED], + psAggregateStats->ui64GpuStatBlocked); + psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle + + psReturnStats->ui64GpuStatActive + + psReturnStats->ui64GpuStatBlocked; + + if (psAggregateStats->ui64TimeStamp != 0) + { + IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp; + /* We expect to return at least 75% of the time since the last call in GPU stats */ + IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4); + + /* + * If the returned stats are substantially lower than the time since + * the last call, then the Host might have read a partial update from the FW. + * If this happens, try sampling the shared counters again. + */ + if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low " + "(call period %" IMG_UINT64_FMTSPEC ")", + __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again", + __func__, ui32Attempts)); + continue; + } + } + + break; + } + + + /***** (4) Update aggregate stats for the current user *****/ + + psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle; + psAggregateStats->ui64GpuStatActive += psReturnStats->ui64GpuStatActive; + psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked; + psAggregateStats->ui64TimeStamp = ui64TimeNow; + + + /***** (5) Convert return stats to microseconds *****/ + + psReturnStats->ui64GpuStatIdle = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatActive = OSDivide64(psReturnStats->ui64GpuStatActive, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder); + + /* Check that the return stats make sense */ + if (psReturnStats->ui64GpuStatCumulative == 0) + { + /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD + * returned 0. This could happen if the GPU frequency value + * is not well calibrated and the FW is updating the GPU state + * while the Host is reading it. + * When such an event happens frequently, timers or the aggregate + * stats might not be accurate... + */ + PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data.")); + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + psReturnStats->bValid = IMG_TRUE; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser) +{ + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + + /* NoStats used since this may be called outside of the register/de-register + * process calls which track memory use. */ + psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS)); + if (psAggregateStats == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psAggregateStats->ui64GpuStatIdle = 0; + psAggregateStats->ui64GpuStatActive = 0; + psAggregateStats->ui64GpuStatBlocked = 0; + psAggregateStats->ui64TimeStamp = 0; + + /* Not used */ + psAggregateStats->bValid = IMG_FALSE; + psAggregateStats->ui64GpuStatCumulative = 0; + + *phGpuUtilUser = psAggregateStats; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser) +{ + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + + if (hGpuUtilUser == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psAggregateStats = hGpuUtilUser; + OSFreeMemNoStats(psAggregateStats); + + return PVRSRV_OK; +} + +/* + RGX MISR Handler +*/ +static void RGX_MISRHandler_Main (void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* Give the HWPerf service a chance to transfer some data from the FW + * buffer to the host driver transport layer buffer. + */ + RGXHWPerfDataStoreCB(psDeviceNode); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + /* Process the signalled checkpoints in the checkpoint CCB, before + * handling all other notifiers. */ + RGXCheckCheckpointCCB(psDeviceNode); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + + /* Inform other services devices that we have finished an operation */ + PVRSRVCheckStatus(psDeviceNode); + +#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD) + /* + * Firmware CCB only exists for primary FW thread. Only requirement for + * non primary FW thread(s) to communicate with host driver is in the case + * of PDVFS running on non primary FW thread. + * This requirement is directly handled by the below + */ + RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice); +#endif + + /* Process the Firmware CCB for pending commands */ + RGXCheckFirmwareCCB(psDeviceNode->pvDevice); + + /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */ + RGXTimeCorrRestartPeriodic(psDeviceNode); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Process Workload Estimation Specific commands from the FW */ + WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); +#endif + + if (psDevInfo->pvAPMISRData == NULL) + { + RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); + } +} +#endif /* !defined(NO_HARDWARE) */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + +PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_BOOL bEnableTrustedDeviceAceConfig) +{ + IMG_UINT32 ui32OS, ui32Region; + + for (ui32OS = 0; ui32OS < GPUVIRT_VALIDATION_NUM_OS; ui32OS++) + { + for (ui32Region = 0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++) + { + PVR_DPF((PVR_DBG_MESSAGE, + "OS=%u, Region=%u, Min=0x%x, Max=0x%x", + ui32OS, + ui32Region, + aui32OSidMin[ui32Region][ui32OS], + aui32OSidMax[ui32Region][ui32OS])); + } + } + + PopulateLMASubArenas(psDeviceNode, aui32OSidMin, aui32OSidMax); + + PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig); + + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Save information used on power transitions for later + * (when RGXStart and RGXStop are executed) + */ + psDevInfo->sLayerParams.psDevInfo = psDevInfo; + psDevInfo->sLayerParams.psDevConfig = psDevConfig; +#if defined(PDUMP) + psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; + + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, + &sKernelMMUCtxPCAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); + return eError; + } + + psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + /* Send information used on power transitions to the trusted device as + * in this setup the driver cannot start/stop the GPU and perform resets + */ + if (psDevConfig->pfnTDSetPowerParams) + { + PVRSRV_TD_POWER_PARAMS sTDPowerParams; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; + } + + eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, + &sTDPowerParams); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!")); + eError = PVRSRV_ERROR_NOT_IMPLEMENTED; + } +#endif + + return eError; +} + +/* + RGXSystemGetFabricCoherency +*/ +PVRSRV_ERROR RGXSystemGetFabricCoherency(IMG_CPU_PHYADDR sRegsCpuPBase, + IMG_UINT32 ui32RegsSize, + PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType, + PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode) +{ + IMG_CHAR *aszLabels[] = {"none", "acelite", "fullace", "unknown"}; + PVRSRV_DEVICE_SNOOP_MODE eAppHintCacheSnoopingMode; + PVRSRV_DEVICE_SNOOP_MODE eDeviceCacheSnoopingMode; + IMG_UINT32 ui32AppHintFabricCoherency; + IMG_UINT32 ui32DeviceFabricCoherency; + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; +#if !defined(NO_HARDWARE) + void *pvRegsBaseKM; +#endif + + if (!sRegsCpuPBase.uiAddr || !ui32RegsSize) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSystemGetFabricCoherency: Invalid RGX register base/size parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if !defined(NO_HARDWARE) + pvRegsBaseKM = OSMapPhysToLin(sRegsCpuPBase, ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + if (! pvRegsBaseKM) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSystemGetFabricCoherency: Failed to create RGX register mapping")); + return PVRSRV_ERROR_BAD_MAPPING; + } + + /* AXI support within the SoC, bitfield COHERENCY_SUPPORT [1 .. 0] + value NO_COHERENCY 0x0 {SoC does not support any form of Coherency} + value ACE_LITE_COHERENCY 0x1 {SoC supports ACE-Lite or I/O Coherency} + value FULL_ACE_COHERENCY 0x2 {SoC supports full ACE or 2-Way Coherency} */ + ui32DeviceFabricCoherency = OSReadHWReg32(pvRegsBaseKM, RGX_CR_SOC_AXI); + PVR_LOG(("AXI fabric coherency (RGX_CR_SOC_AXI): 0x%x", ui32DeviceFabricCoherency)); +#if defined(DEBUG) + if (ui32DeviceFabricCoherency & ~((IMG_UINT32)RGX_CR_SOC_AXI_MASKFULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid RGX_CR_SOC_AXI value.", __func__)); + return PVRSRV_ERROR_INVALID_DEVICE; + } +#endif + ui32DeviceFabricCoherency &= ~((IMG_UINT32)RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK); + ui32DeviceFabricCoherency >>= RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT; + + /* UnMap Regs */ + OSUnMapPhysToLin(pvRegsBaseKM, ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + switch (ui32DeviceFabricCoherency) + { + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY: + default: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_NONE; + break; + } +#else /* !defined(NO_HARDWARE) */ +#if defined(RGX_FEATURE_GPU_CPU_COHERENCY) + *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE; + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; +#else + *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY; +#endif +#endif /* !defined(NO_HARDWARE) */ + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; + OSGetKMAppHintUINT32(pvAppHintState, FabricCoherencyOverride, + &ui32AppHintDefault, &ui32AppHintFabricCoherency); + OSFreeKMAppHintState(pvAppHintState); + +#if defined(SUPPORT_SECURITY_VALIDATION) + /* Temporarily disable coherency */ + ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY; +#endif + + /* Suppress invalid AppHint value */ + switch (ui32AppHintFabricCoherency) + { + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "Invalid FabricCoherencyOverride AppHint %d, ignoring", + ui32AppHintFabricCoherency)); + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; + break; + } + + if (ui32AppHintFabricCoherency < ui32DeviceFabricCoherency) + { + PVR_LOG(("Downgrading device fabric coherency from %s to %s", + aszLabels[ui32DeviceFabricCoherency], + aszLabels[ui32AppHintFabricCoherency])); + eDeviceCacheSnoopingMode = eAppHintCacheSnoopingMode; + } + else if (ui32AppHintFabricCoherency > ui32DeviceFabricCoherency) + { + PVR_DPF((PVR_DBG_ERROR, + "Cannot upgrade device fabric coherency from %s to %s, ignoring", + aszLabels[ui32DeviceFabricCoherency], + aszLabels[ui32AppHintFabricCoherency])); + + /* Override requested-for app-hint with actual app-hint value being used */ + ui32AppHintFabricCoherency = ui32DeviceFabricCoherency; + } + + *peCacheSnoopingMode = eDeviceCacheSnoopingMode; + return PVRSRV_OK; +} + +/* + RGXSystemHasFBCDCVersion31 +*/ +static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32FBCDCVersionOverride = 0; +#endif + + { + +#if defined(SUPPORT_VALIDATION) + if (ui32FBCDCVersionOverride == 2) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!", + __func__)); + } +#endif + +#if !defined(NO_HARDWARE) + if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: System uses FBCDC3.1 but GPU doesn't support it!", + __func__)); + } +#endif + } + + return IMG_FALSE; +} + +/* + RGXDevMMUAttributes +*/ +static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bKernelMemoryCtx) +{ + MMU_DEVICEATTRIBS *psMMUDevAttrs = NULL; + + /* bKernelMemoryCtx is only used for rogue cores */ + PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx); + + if (psDeviceNode->pfnCheckDeviceFeature) + { + psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; + } + + return psMMUDevAttrs; +} + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) +/* + RGXGetSecurePDumpMemspace +*/ +static PVRSRV_ERROR RGXGetSecurePDumpMemspace(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_CHAR *pszMemspaceName, + IMG_UINT32 ui32MemspaceNameLen) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + if (PVRSRV_CHECK_SECURE_FW_CODE(uiFlags) || + PVRSRV_CHECK_SECURE_FW_DATA(uiFlags) ) + { + OSSNPrintf(pszMemspaceName, + ui32MemspaceNameLen, + PMR_MEMSPACE_FMTSPEC, + "TDFWMEM"); + } + else if (PVRSRV_CHECK_SECURE_BUFFER(uiFlags)) + { + OSSNPrintf(pszMemspaceName, + ui32MemspaceNameLen, + PMR_MEMSPACE_FMTSPEC, + "TDSECBUFMEM"); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Not a secure allocation, flags 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC, + __func__, uiFlags)); + eError = PVRSRV_ERROR_INVALID_REQUEST; + } + + return eError; +} +#endif + +/* + * RGXInitDevPart2 + */ +PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DeviceFlags, + IMG_UINT32 ui32HWPerfHostBufSizeKB, + IMG_UINT32 ui32HWPerfHostFilter, + RGX_ACTIVEPM_CONF eActivePMConf, + IMG_UINT32 ui32AvailablePowUnitsMask) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; + +#if defined(TIMING) || defined(DEBUG) + OSUserModeAccessToPerfCountersEn(); +#endif + + PDUMPCOMMENT("RGX Initialisation Part 2"); + + /* Initialise Device Flags */ + psDevInfo->ui32DeviceFlags = 0; + RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE); + + /* Allocate DVFS Table (needs to be allocated before GPU trace events + * component is initialised because there is a dependency between them) */ + psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable))); + if (psDevInfo->psGpuDVFSTable == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitDevPart2KM: failed to allocate gpu dvfs table storage")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Initialise HWPerfHost buffer. */ + if (RGXHWPerfHostInit(psDevInfo, ui32HWPerfHostBufSizeKB) == PVRSRV_OK) + { + if (psDevInfo->ui32HWPerfHostFilter == 0) + { + RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter); + } + + /* If HWPerf enabled allocate all resources for the host side buffer. */ + if (psDevInfo->ui32HWPerfHostFilter != 0) + { + if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand" + " initialisation failed.")); + } + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer initialisation failed.")); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Initialise work estimation lock */ + eError = OSLockCreate(&psDevInfo->hWorkEstLock); + PVR_ASSERT(eError == PVRSRV_OK); +#endif + + /* Initialise lists of ZSBuffers */ + eError = OSLockCreate(&psDevInfo->hLockZSBuffer); + PVR_ASSERT(eError == PVRSRV_OK); + dllist_init(&psDevInfo->sZSBufferHead); + psDevInfo->ui32ZSBufferCurrID = 1; + + /* Initialise lists of growable Freelists */ + eError = OSLockCreate(&psDevInfo->hLockFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + dllist_init(&psDevInfo->sFreeListHead); + psDevInfo->ui32FreelistCurrID = 1; + + eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock); + + if (eError != PVRSRV_OK) + { + return eError; + } + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + + eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock); + + if (eError != PVRSRV_OK) + { + return eError; + } + } + + /* Setup GPU utilisation stats update callback */ + eError = OSLockCreate(&psDevInfo->hGPUUtilLock); + PVR_ASSERT(eError == PVRSRV_OK); +#if !defined(NO_HARDWARE) + psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats; +#endif + + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; + psDevInfo->eActivePMConf = eActivePMConf; + + /* Validate the SPU mask and initialize to number of SPUs to power up */ + if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s:Invalid SPU mask (All=0x%X, Non Fused=0x%X). At-least one SPU must to be powered up.", + __func__, + ui32AllPowUnitsMask, + ui32AvailablePowUnitsMask)); + return PVRSRV_ERROR_INVALID_SPU_MASK; + } + + psDevInfo->ui32AvailablePowUnitsMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; + +#if !defined(NO_HARDWARE) + /* set-up the Active Power Mgmt callback */ + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM; + IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || + (eActivePMConf == RGX_ACTIVEPM_FORCE_ON); + + if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE))) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__)); + bEnableAPM = false; + } + +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) + /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */ + PVR_ASSERT(bEnableAPM == IMG_FALSE); +#endif + + if (bEnableAPM) + { + eError = OSInstallMISR(&psDevInfo->pvAPMISRData, + RGX_MISRHandler_CheckFWActivePowerState, + psDeviceNode, + "RGX_CheckFWActivePower"); + if (eError != PVRSRV_OK) + { + return eError; + } + + /* Prevent the device being woken up before there is something to do. */ + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF; + } + } +#endif + + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM, + RGXQueryAPMState, + RGXSetAPMState, + psDeviceNode, + NULL); + + RGXTimeCorrInitAppHintCallbacks(psDeviceNode); + + /* + Register the device with the power manager. + Normal/Hyperv Drivers: Supports power management + Guest Drivers: Do not currently support power management + */ + eError = PVRSRVRegisterPowerDevice(psDeviceNode, + &RGXPrePowerState, &RGXPostPowerState, + psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, + &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, + &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, + &RGXPowUnitsStateMaskChange, + (IMG_HANDLE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + eDefaultPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitDevPart2KM: failed to register device with power manager")); + return eError; + } + + eError = RGXSetPowerParams(psDevInfo, psDevConfig); + if (eError != PVRSRV_OK) return eError; + +#if defined(SUPPORT_VALIDATION) + { + void *pvAppHintState = NULL; + + IMG_UINT32 ui32AppHintDefault; + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL; + OSGetKMAppHintUINT32(pvAppHintState, TestSLRInterval, + &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval); + PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d", + ui32AppHintDefault, psDevInfo->ui32TestSLRInterval)); + OSFreeKMAppHintState(pvAppHintState); + psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; + psDevInfo->ui32SLRSkipFWAddr = 0; + } +#endif + +#if defined(PDUMP) +#if defined(NO_HARDWARE) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle"); + + /* Kick the FW once, in case it still needs to detect and set the idle state */ + PDUMPREG32(RGX_PDUMPREG_NAME, + RGX_CR_MTS_SCHEDULE, + RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); + + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ePowState), + RGXFWIF_POW_IDLE, + 0xFFFFFFFFU, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); + if (eError != PVRSRV_OK) return eError; +#endif + + /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation commands"); + + psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW; + + if (! PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = RGXStop(&psDevInfo->sLayerParams); + if (eError != PVRSRV_OK) return eError; + } + + psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW); +#endif + +#if !defined(NO_HARDWARE) + eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode); + if (eError != PVRSRV_OK) + { + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } + return eError; + } + + /* Register the interrupt handlers */ + eError = OSInstallMISR(&psDevInfo->pvMISRData, + RGX_MISRHandler_Main, + psDeviceNode, + "RGX_Main"); + if (eError != PVRSRV_OK) + { + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + return eError; + } + + eError = SysInstallDeviceLISR(psDevConfig->hSysData, + psDevConfig->ui32IRQ, + PVRSRV_MODNAME, + RGX_LISRHandler, + psDeviceNode, + &psDevInfo->pvLISRData); + if (eError != PVRSRV_OK) + { + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + (void) OSUninstallMISR(psDevInfo->pvMISRData); + return eError; + } + +#endif + +#if defined(PDUMP) +/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside + * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the + * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its + * last parameter which will not exist on architectures which do not have this + * feature. + * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for + * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this + * means we can build the kernel driver without having to worry about the BVNC + * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given + * architecture, whereas the FEATURE is only defined for those BVNCs that + * support it). + */ +#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY))) +#endif + { + if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) && + !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping"); + } + else + { + if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping"); + } + if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping"); + } + } + } +#endif + + eError = PVRSRVTQLoadShaders(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load TQ shaders", __func__)); + return eError; + } + + psDevInfo->bDevInit2Done = IMG_TRUE; + + return PVRSRV_OK; +} + +#define VZ_RGX_FW_FILENAME_SUFFIX ".vz" +#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ + RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX))) + +static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFWFilenameStr, + IMG_CHAR *pszFWpFilenameStr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + const IMG_CHAR * const pszFWFilenameSuffix = + PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; + + OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STR_FMTSPEC "%s", + RGX_FW_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, + pszFWFilenameSuffix); + + OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STRP_FMTSPEC "%s", + RGX_FW_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, + pszFWFilenameSuffix); +} + +const void * RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, + OS_FW_IMAGE **ppsRGXFW) +{ + IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; + IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; + IMG_CHAR *pszLoadedFwStr; + + /* Prepare the image filenames to use in the following code */ + _GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr); + + /* Get pointer to Firmware image */ + pszLoadedFwStr = aszFWFilenameStr; + *ppsRGXFW = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION); + if (*ppsRGXFW == NULL) + { + pszLoadedFwStr = aszFWpFilenameStr; + *ppsRGXFW = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION); + if (*ppsRGXFW == NULL) + { + pszLoadedFwStr = RGX_FW_FILENAME; + *ppsRGXFW = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION); + if (*ppsRGXFW == NULL) + { + PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s'", + aszFWFilenameStr)); + return NULL; + } + } + } + + PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr)); + + return OSFirmwareData(*ppsRGXFW); +} + +#if defined(PDUMP) +PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + RGXFWIF_KCCB_CMD sKccbCmd; + PVRSRV_ERROR eError; + + /* Fill in the command structure with the parameters needed */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT; + + eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice, + &sKccbCmd, + PDUMP_FLAGS_CONTINUOUS); + + return eError; +} +#endif + +PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* set up fw memory contexts */ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_ERROR eError; + +#if defined(SUPPORT_AUTOVZ) + MMU_PX_SETUP sDefaultPxSetup = psDeviceNode->sDevMMUPxSetup; + + if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) + { + /* Temporarily swap the MMU Px methods and default LMA region of GPU physheap to + * allow the page tables of all memory mapped by the FwKernel context to be placed + * in a dedicated memory carveout. This should allow the firmware mappings to + * persist after a Host kernel crash or driver reset. */ + + psDeviceNode->sDevMMUPxSetup = RGX_FW_MMU_RESERVED_MEM_SETUP(psDeviceNode); + } +#endif + + /* Set the device fabric coherency before FW context creation */ + eError = RGXSystemGetFabricCoherency(psDevConfig->sRegsCpuPBase, + psDevConfig->ui32RegsSize, + &psDeviceNode->eDevFabricType, + &psDevConfig->eCacheSnoopingMode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed RGXSystemGetFabricCoherency (%u)", + __func__, + eError)); + goto failed_to_create_ctx; + } + + /* Register callbacks for creation of device memory contexts */ + psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; + psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + + /* Create the memory context for the firmware. */ + eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META, + &psDevInfo->psKernelDevmemCtx); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemCreateContext (%u)", + __func__, + eError)); + goto failed_to_create_ctx; + } + + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT, + &psDevInfo->psFirmwareMainHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemFindHeapByName (%u)", + __func__, + eError)); + goto failed_to_find_heap; + } + + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT, + &psDevInfo->psFirmwareConfigHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemFindHeapByName (%u)", + __func__, + eError)); + goto failed_to_find_heap; + } + +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSID; + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + IMG_CHAR szHeapName[PVRSRV_MAX_RA_NAME_LENGTH]; + + OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName, + &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap); + } + } +#endif + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_DEV_PHYADDR sPhysHeapBase; + IMG_UINT32 ui32OSID; + + eError = PhysHeapRegionGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL], 0, &sPhysHeapBase); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr", failed_to_find_heap); + + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)}; + + eError = RGXFwRawHeapAllocMap(psDeviceNode, + ui32OSID, + sRawFwHeapBase, + RGX_FIRMWARE_RAW_HEAP_SIZE); + if (eError != PVRSRV_OK) + { + for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--) + { + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } + PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap); + } + } + +#if defined(SUPPORT_AUTOVZ) + /* restore default Px setup */ + psDeviceNode->sDevMMUPxSetup = sDefaultPxSetup; +#endif + } +#else + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); + } +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + + return eError; + +failed_to_find_heap: + /* + * Clear the mem context create callbacks before destroying the RGX firmware + * context to avoid a spurious callback. + */ + psDeviceNode->pfnRegisterMemoryContext = NULL; + psDeviceNode->pfnUnregisterMemoryContext = NULL; + DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); + psDevInfo->psKernelDevmemCtx = NULL; +failed_to_create_ctx: + return eError; +} + +void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(HOST)) + { +#if defined(SUPPORT_AUTOVZ) + MMU_PX_SETUP sDefaultPxSetup = psDeviceNode->sDevMMUPxSetup; + + psDeviceNode->sDevMMUPxSetup = RGX_FW_MMU_RESERVED_MEM_SETUP(psDeviceNode); + + if (!psDeviceNode->bAutoVzFwIsUp) +#endif + { + IMG_UINT32 ui32OSID; + + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } + } +#if defined(SUPPORT_AUTOVZ) + psDeviceNode->sDevMMUPxSetup = sDefaultPxSetup; +#endif + } +#else + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig); + + if (psDevInfo->psFirmwareMainHeap) + { + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_FALSE); + } + if (psDevInfo->psFirmwareConfigHeap) + { + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_FALSE); + } + } +#endif + + /* + * Clear the mem context create callbacks before destroying the RGX firmware + * context to avoid a spurious callback. + */ + psDeviceNode->pfnRegisterMemoryContext = NULL; + psDeviceNode->pfnUnregisterMemoryContext = NULL; + + if (psDevInfo->psKernelDevmemCtx) + { + eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); + PVR_ASSERT(eError == PVRSRV_OK); + } +} + +static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32AlignChecksSize, + IMG_UINT32 aui32AlignChecks[]) +{ + static IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM}; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + IMG_UINT32 i, *paui32FWAlignChecks; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Skip the alignment check if the driver is guest + since there is no firmware to check against */ + PVRSRV_VZ_RET_IF_MODE(GUEST, eError); + + if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: FW Alignment Check Mem Descriptor is NULL", + __func__)); + return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc, + (void **) &paui32FWAlignChecks); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire kernel address for alignment checks (%u)", + __func__, + eError)); + return eError; + } + + paui32FWAlignChecks += ARRAY_SIZE(aui32AlignChecksKM) + 1; + if (*paui32FWAlignChecks++ != ui32AlignChecksSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mismatch in number of structures to check.", + __func__)); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + goto return_; + } + + for (i = 0; i < ui32AlignChecksSize; i++) + { + if (aui32AlignChecks[i] != paui32FWAlignChecks[i]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Check for structured alignment failed.", + __func__)); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + goto return_; + } + } + +return_: + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); + + return eError; +} + +static +PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T ui32Size, + IMG_UINT32 uiMemAllocFlags, + PVRSRV_TD_FW_MEM_REGION eRegion, + const IMG_PCHAR pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift(); + +#if defined(SUPPORT_DEDICATED_FW_MEMORY) + PVR_UNREFERENCED_PARAMETER(eRegion); + + PDUMPCOMMENT("Allocate dedicated FW %s memory", pszText); + + eError = DevmemAllocateDedicatedFWMem(psDeviceNode, + ui32Size, + uiLog2Align, + uiMemAllocFlags, + pszText, + ppsMemDescPtr); +#else /* defined(SUPPORT_DEDICATED_FW_MEMORY) */ + +#if defined(SUPPORT_TRUSTED_DEVICE) + PDUMPCOMMENT("Import secure FW %s memory", pszText); + + eError = DevmemImportTDFWMem(psDeviceNode, + ui32Size, + uiLog2Align, + uiMemAllocFlags, + eRegion, + ppsMemDescPtr); +#else + uiMemAllocFlags = (uiMemAllocFlags | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + PVR_UNREFERENCED_PARAMETER(uiLog2Align); + PVR_UNREFERENCED_PARAMETER(eRegion); + + PDUMPCOMMENT("Allocate FW %s memory", pszText); + + eError = DevmemFwAllocate(psDeviceNode->pvDevice, + ui32Size, + uiMemAllocFlags, + pszText, + ppsMemDescPtr); +#endif + +#endif /* defined(SUPPORT_DEDICATED_FW_MEMORY) */ + + return eError; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver + + @Description + + Validate the FW build options against KM driver build options (KM build options only) + + Following check is redundant, because next check checks the same bits. + Redundancy occurs because if client-server are build-compatible and client-firmware are + build-compatible then server-firmware are build-compatible as well. + + This check is left for clarity in error messages if any incompatibility occurs. + + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit) +{ +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch; + + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW); + + ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; + + if (ui32BuildOptions != ui32BuildOptionsFWKMPart) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart; +#if !defined(PVRSRV_STRICT_COMPAT_CHECK) + /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ + ui32BuildOptionsMismatch &= ~OPTIONS_DEBUG_MASK; +#endif + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; " + "extra options present in the KM driver: (0x%x). Please check rgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch )); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + + if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; " + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ.")); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]")); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver + + @Description + + Validate FW DDK version against driver DDK version + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + IMG_UINT32 ui32DDKVersion; + PVRSRV_ERROR eError; + + ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); +#endif + +#if defined(PDUMP) + PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion), + ui32DDKVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK revision (%u.%u).", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion), + PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion))); + eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; + PVR_DBG_BREAK; + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK revision (%u.%u) match. [ OK ]", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_MAJ, PVRVERSION_MIN)); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver + + @Description + + Validate FW DDK build against driver DDK build + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ + PVRSRV_ERROR eError=PVRSRV_OK; +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + IMG_UINT32 ui32DDKBuild; + + ui32DDKBuild = PVRVERSION_BUILD; +#endif + +#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK) + PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild), + ui32DDKBuild, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild) + { + PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).", + ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); +#if defined(PVRSRV_STRICT_COMPAT_CHECK) + eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH; + PVR_DBG_BREAK; + return eError; +#endif + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]", + ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); + } +#endif + return eError; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_BVNC_FWAgainstDriver + + @Description + + Validate FW BVNC against driver BVNC + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if !defined(NO_HARDWARE) + IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; +#endif +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC); + PVRSRV_ERROR eError; + + sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); +#endif + +#if defined(PDUMP) + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), + sBVNC.ui32LayoutVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } + + + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - lower 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), + (IMG_UINT32)sBVNC.ui64BVNC, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } + + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - Higher 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + sizeof(IMG_UINT32), + (IMG_UINT32)(sBVNC.ui64BVNC >> 32), + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + bCompatibleAll = IMG_TRUE; + } + + if (!bCompatibleAll) + { + if (!bCompatibleVersion) + { + PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).", + __func__, + sBVNC.ui32LayoutVersion, + psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion)); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + + if (!bCompatibleBVNC) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", + RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC))); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]")); + } +#endif + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver + + @Description + + Validate HW BVNC against driver BVNC + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP) || !defined(NO_HARDWARE) + IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B | + RGX_BVNC_PACK_MASK_V | + RGX_BVNC_PACK_MASK_N | + RGX_BVNC_PACK_MASK_C; + + PVRSRV_ERROR eError; + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC); +#endif + +#if !defined(NO_HARDWARE) + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC); + IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; +#endif + +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + + if (psDevInfo->bIgnoreHWReportedBVNC) + { + PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)")); + return PVRSRV_OK; + } + +#if defined(PDUMP) || !defined(NO_HARDWARE) +#if defined(COMPAT_BVNC_MASK_B) + ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_B; +#endif +#if defined(COMPAT_BVNC_MASK_V) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V; +#endif +#if defined(COMPAT_BVNC_MASK_N) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N; +#endif +#if defined(COMPAT_BVNC_MASK_C) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; +#endif + + sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + + + + if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) + { + PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.", + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):("")))); + } +#endif + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: Layout version of compchecks struct"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), + sSWBVNC.ui32LayoutVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPCOM(ui32PDumpFlags, "BVNC compatibility check started"); + if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) + { + PDUMPIF("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Lower 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), + (IMG_UINT32)sSWBVNC.ui64BVNC , + (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Higher 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + sizeof(IMG_UINT32), + (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32), + (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPFI("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + } + if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V) + { + PDUMPIF("DISABLE_HWV_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWV_CHECK", ui32PDumpFlags); + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW V and FW V"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0), + (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)), + RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + PDUMPFI("DISABLE_HWV_CHECK", ui32PDumpFlags); + } + PDUMPCOM(ui32PDumpFlags, "BVNC compatibility check finished"); +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC; + + sHWBVNC.ui64BVNC &= ui64MaskBVNC; + sSWBVNC.ui64BVNC &= ui64MaskBVNC; + + RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + bCompatibleAll = IMG_TRUE; + } + + if (!bCompatibleAll) + { + if (!bCompatibleVersion) + { + PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).", + __func__, + sHWBVNC.ui32LayoutVersion, + sSWBVNC.ui32LayoutVersion)); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + + if (!bCompatibleBVNC) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).", + RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", + RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver + + @Description + + Validate HW META version against driver META version + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + PVRSRV_ERROR eError; +#endif + +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + + IMG_UINT32 ui32FWCoreIDValue = 0; + IMG_CHAR *pcRGXFW_PROCESSOR = NULL; + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + switch (RGX_GET_FEATURE_VALUE(psDevInfo, META)) + { + case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break; + case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break; + case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break; + case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); + PVR_ASSERT(0); + } + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE; + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); + PVR_ASSERT(0); + } + +#if defined(PDUMP) + PDUMPIF("DISABLE_HWMETA_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWMETA_CHECK", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: KM driver and HW FW Processor version"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion), + ui32FWCoreIDValue, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + PDUMPFI("DISABLE_HWMETA_CHECK", ui32PDumpFlags); +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue) + { + PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).", + pcRGXFW_PROCESSOR, + ui32FWCoreIDValue, + pcRGXFW_PROCESSOR, + psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); + eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH; + PVR_DBG_BREAK; + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].", + pcRGXFW_PROCESSOR, + ui32FWCoreIDValue, + pcRGXFW_PROCESSOR, + psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); + } +#endif + return PVRSRV_OK; +} + +/*! +******************************************************************************* +******************************************************************************* + + @Function RGXDevInitCompatCheck + + @Description + + Check compatibility of host driver and firmware (DDK and build options) + for RGX devices at services/device initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32RegValue; + IMG_UINT8 ui8FwOsCount; + IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; + +#if defined(SUPPORT_AUTOVZ) + /* AutoVz drivers booting while the firmware is running might have to wait + * longer to have their compatibility data filled if the firmware is busy */ + ui32FwTimeout = (psDeviceNode->bAutoVzFwIsUp) ? + (PVR_AUTOVZ_WDG_PERIOD_MS * 1000 * 3) : (MAX_HW_TIME_US); +#endif + + LOOP_UNTIL_TIMEOUT(ui32FwTimeout) + { + if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + ui32RegValue = 0; + + if ((!PVRSRV_VZ_MODE_IS(GUEST)) && + RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue); + + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)", + __func__, eError)); + goto chk_exit; + } + + if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT)) + { + eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED; + PVR_DPF((PVR_DBG_ERROR, + "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)", + __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError)); + goto chk_exit; + } + } + + if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + eError = PVRSRV_ERROR_TIMEOUT; + PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", + __func__, eError)); + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's " + "OsConfig initialisation data was not accepted by the firmware", __func__)); + } + goto chk_exit; + } + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || + (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount)); + } +#endif /* defined(NO_HARDWARE) */ + + eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + } + + eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = PVRSRV_OK; +chk_exit: + + return eError; +} + +static void _RGXSoftResetToggle(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 ui64ResetValue, + IMG_UINT64 ui64SPUResetValue) +{ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue); + if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1) + { + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU, ui64SPUResetValue); + } + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); + if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1) + { + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU); + } +} + +/**************************************************************************/ /*! +@Function RGXSoftReset +@Description Resets some modules of the RGX device +@Input psDeviceNode Device node +@Input ui64ResetValue A mask for which each bit set corresponds + to a module to reset (via the SOFT_RESET + register). +@Input ui64SPUResetValue A mask for which each bit set corresponds + to a module to reset (via the SOFT_RESET_SPU + register). +@Return PVRSRV_ERROR +*/ /***************************************************************************/ +static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT64 ui64ResetValue, + IMG_UINT64 ui64SPUResetValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (((ui64ResetValue & RGX_CR_SOFT_RESET_MASKFULL) != ui64ResetValue) + || (ui64SPUResetValue & RGX_CR_SOFT_RESET_SPU_MASKFULL) != ui64SPUResetValue) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* The device info */ + psDevInfo = psDeviceNode->pvDevice; + + /* Set in soft-reset */ + _RGXSoftResetToggle(psDevInfo, ui64ResetValue, ui64SPUResetValue); + + /* Take the modules out of reset... */ + _RGXSoftResetToggle(psDevInfo, 0, 0); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiFWCodeLen, + IMG_DEVMEM_SIZE_T uiFWDataLen, + IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, + IMG_DEVMEM_SIZE_T uiFWCorememDataLen) +{ + DEVMEM_FLAGS_T uiMemAllocFlags; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + /* + * Set up Allocation for FW code section + */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCodeLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_CODE_REGION, + "FwCodeRegion", + &psDevInfo->psRGXFWCodeMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw code mem (%u)", + eError)); + goto failFWCodeMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc, + &psDevInfo->sFWCodeDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw code mem (%u)", + eError)); + goto failFWCodeMemDescAqDevVirt; + } + + /* + * The FW code must be the first allocation in the firmware heap, otherwise + * the bootloader will not work (the FW will not be able to find the bootloader). + */ + PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_HOST_MAIN_HEAP_BASE); + + /* + * Set up Allocation for FW data section + */ + uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWDataLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION, + "FwDataRegion", + &psDevInfo->psRGXFWDataMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw data mem (%u)", + eError)); + goto failFWDataMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc, + &psDevInfo->sFWDataDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw data mem (%u)", + eError)); + goto failFWDataMemDescAqDevVirt; + } + + if (uiFWCorememCodeLen != 0) + { + /* + * Set up Allocation for FW coremem code section + */ + uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCorememCodeLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_COREMEM_CODE_REGION, + "FwCorememCodeRegion", + &psDevInfo->psRGXFWCorememCodeMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw coremem code mem, size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", + uiFWCorememCodeLen, uiMemAllocFlags, eError)); + goto failFWCorememMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, + &psDevInfo->sFWCorememCodeDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw coremem mem code (%u)", + eError)); + goto failFWCorememCodeMemDescAqDevVirt; + } + + eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr, + psDevInfo->psRGXFWCorememCodeMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", failFWCorememCodeMemDescFwAddr); + } + else + { + psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0; + psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0; + } + + if (uiFWCorememDataLen != 0) + { + /* + * Set up Allocation for FW coremem data section + */ + uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCorememDataLen, + uiMemAllocFlags, + PVRSRV_DEVICE_FW_COREMEM_DATA_REGION, + "FwCorememDataRegion", + &psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw coremem data mem, " + "size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", + uiFWCorememDataLen, + uiMemAllocFlags, + eError)); + goto failFWCorememDataMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + &psDevInfo->sFWCorememDataStoreDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw coremem mem data (%u)", + eError)); + goto failFWCorememDataMemDescAqDevVirt; + } + + eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr, + psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", failFWCorememDataMemDescFwAddr); + } + else + { + psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0; + psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0; + } + + return PVRSRV_OK; + +failFWCorememDataMemDescFwAddr: +failFWCorememDataMemDescAqDevVirt: + if (uiFWCorememDataLen != 0) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } +failFWCorememDataMemDescAlloc: +failFWCorememCodeMemDescFwAddr: +failFWCorememCodeMemDescAqDevVirt: + if (uiFWCorememCodeLen != 0) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); + psDevInfo->psRGXFWCorememCodeMemDesc = NULL; + } +failFWCorememMemDescAlloc: +failFWDataMemDescAqDevVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); + psDevInfo->psRGXFWDataMemDesc = NULL; +failFWDataMemDescAlloc: +failFWCodeMemDescAqDevVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); + psDevInfo->psRGXFWCodeMemDesc = NULL; +failFWCodeMemDescAlloc: + return eError; +} + +/* + AppHint parameter interface +*/ +static +PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eResult; + + eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); + *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK; + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eResult; + + eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); + if (PVRSRV_OK == eResult) + { + if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE) + { + *pui32Value = 0; /* Trace */ + } + else + { + *pui32Value = 1; /* TBI */ + } + } + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eResult; + IMG_UINT32 ui32RGXFWLogType; + + eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType); + if (PVRSRV_OK == eResult) + { + if (0 == ui32RGXFWLogType) + { + BITMASK_SET(ui32Value, RGXFWIF_LOG_TYPE_TRACE); + } + eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value); + } + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eResult; + IMG_UINT32 ui32RGXFWLogType = ui32Value; + + eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType); + if (PVRSRV_OK != eResult) + { + return eResult; + } + + /* 0 - trace, 1 - tbi */ + if (0 == ui32Value) + { + BITMASK_SET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); + } +#if defined(SUPPORT_TBI_INTERFACE) + else if (1 == ui32Value) + { + BITMASK_UNSET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); + } +#endif + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid parameter %u specified to set FW log type AppHint.", + __func__, ui32Value)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType); + return eResult; +} + +static +PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_BOOL *pbValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + + *pbValue = (PVRSRV_MEMALLOCFLAG_POISON_ON_FREE == psDevInfo->ui32FWPoisonOnFreeFlag) + ? IMG_TRUE + : IMG_FALSE; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_BOOL bValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + psDevInfo->ui32FWPoisonOnFreeFlag = bValue + ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE + : 0UL; + + return PVRSRV_OK; +} + +/* + * RGXInitFirmware + */ +PVRSRV_ERROR +RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32KillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + IMG_UINT32 ui32HWPerfCountersDataSize, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32AvailablePowUnitsMask, + IMG_UINT32 ui32FwOsCfgFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE; + + eError = RGXSetupFirmware(psDeviceNode, + bEnableSignatureChecks, + ui32SignatureChecksBufSize, + ui32HWPerfFWBufSizeKB, + ui64HWPerfFilter, + ui32RGXFWAlignChecksArrLength, + pui32RGXFWAlignChecks, + ui32ConfigFlags, + ui32ConfigFlagsExt, + ui32FwOsCfgFlags, + ui32LogType, + ui32FilterFlags, + ui32JonesDisableMask, + ui32HWRDebugDumpLimit, + ui32HWPerfCountersDataSize, + ui32KillingCtl, + pui32TPUTrilinearFracMask, + pui32USRMNumRegions, + pui64UVBRMNumRegions, + eRGXRDPowerIslandingConf, + eFirmwarePerf, + ui32AvailablePowUnitsMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", + eError)); + goto failed_init_firmware; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup, + RGXFWTraceQueryFilter, + RGXFWTraceSetFilter, + psDeviceNode, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType, + RGXFWTraceQueryLogType, + RGXFWTraceSetLogType, + psDeviceNode, + NULL); + } + + bEnableFWPoisonOnFree = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; + + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree, + RGXQueryFWPoisonOnFree, + RGXSetFWPoisonOnFree, + psDeviceNode, + NULL); + + psDevInfo->ui32FWPoisonOnFreeFlag = bEnableFWPoisonOnFree + ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE + : 0UL; + + return PVRSRV_OK; + +failed_init_firmware: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* See device.h for function declaration */ +static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC **psMemDesc, + IMG_UINT32 *puiSyncPrimVAddr, + IMG_UINT32 *puiSyncPrimBlockSize) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + RGXFWIF_DEV_VIRTADDR pFirmwareAddr; + IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32); + IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32); + IMG_UINT32 ui32CoherencyFlag = 0; + + psDevInfo = psDeviceNode->pvDevice; + + /* Size and align are 'expanded' because we request an Exportalign allocation */ + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiUFOBlockSize, + &ui32UFOBlockAlign); + + if (eError != PVRSRV_OK) + { + goto e0; + } + + if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && + PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) + { + ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; + } + else + { + ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED; + } + + eError = DevmemFwAllocateExportable(psDeviceNode, + uiUFOBlockSize, + ui32UFOBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + ui32CoherencyFlag, + "FwExUFOBlock", + psMemDesc); + if (eError != PVRSRV_OK) + { + goto e0; + } + + eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_GOTO_IF_ERROR(eError, e1); + + *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr; + *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize); + + return PVRSRV_OK; + +e1: + DevmemFwUnmapAndFree(psDevInfo, *psMemDesc); +e0: + return eError; +} + +/* See device.h for function declaration */ +static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* + If the system has snooping of the device cache then the UFO block + might be in the cache so we need to flush it out before freeing + the memory + + When the device is being shutdown/destroyed we don't care anymore. + Several necessary data structures to issue a flush were destroyed + already. + */ + if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && + psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) + { + RGXFWIF_KCCB_CMD sFlushInvalCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32kCCBCommandSlot; + + /* Schedule the SLC flush command ... */ +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); +#endif + sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; + + eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, + &sFlushInvalCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule SLC flush command with error (%u)", + __func__, + eError)); + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: SLC flush and invalidate aborted with error (%u)", + __func__, + eError)); + } + else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & + RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + } + } + + RGXUnsetFirmwareAddress(psMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psMemDesc); +} + +/* + DevDeInitRGX +*/ +PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + IMG_UINT32 ui32Temp=0; + + if (!psDevInfo) + { + /* Can happen if DevInitRGX failed */ + PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Null DevInfo")); + return PVRSRV_OK; + } + + if (psDevInfo->psRGXFWIfOsInit) + { + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + } + + eError = DeviceDepBridgeDeInit(psDevInfo->sDevFeatureCfg.ui64Features); + PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeDeInit"); +#if defined(PDUMP) + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + DUMMY_PAGE); + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDevZeroPage, + DEV_ZERO_PAGE); +#endif + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0); + PVR_UNREFERENCED_PARAMETER(ui32Temp); + } + else +#else + { + /*Delete the Dummy page related info */ + ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter); + if (0 != ui32Temp) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Dummy page reference counter is non zero (%u)", + __func__, + ui32Temp)); + PVR_ASSERT(0); + } + } +#endif + + /*Delete the Dummy page related info */ + ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter); + if (0 != ui32Temp) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Zero page reference counter is non zero (%u)", + __func__, + ui32Temp)); + } + +#if defined(PDUMP) + if (NULL != psDeviceNode->sDummyPage.hPdumpPg) + { + PDUMPCOMMENT("Error dummy page handle is still active"); + } + + if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg) + { + PDUMPCOMMENT("Error Zero page handle is still active"); + } +#endif + + /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */ + OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); + + /* Destroy the zero page lock */ + OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); + + /* Unregister debug request notifiers first as they could depend on anything. */ + + RGXDebugDeinit(psDevInfo); + + + /* Cancel notifications to this device */ + PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify); + psDeviceNode->hCmdCompNotify = NULL; + + /* + * De-initialise in reverse order, so stage 2 init is undone first. + */ + if (psDevInfo->bDevInit2Done) + { + psDevInfo->bDevInit2Done = IMG_FALSE; + + eError = PVRSRVTQUnloadShaders(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + +#if !defined(NO_HARDWARE) + (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData); + (void) OSUninstallMISR(psDevInfo->pvMISRData); + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } +#endif /* !NO_HARDWARE */ + + /* Remove the device from the power manager */ + eError = PVRSRVRemovePowerDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + + psDevInfo->pfnGetGpuUtilStats = NULL; + OSLockDestroy(psDevInfo->hGPUUtilLock); + + /* Free DVFS Table */ + if (psDevInfo->psGpuDVFSTable != NULL) + { + OSFreeMem(psDevInfo->psGpuDVFSTable); + psDevInfo->psGpuDVFSTable = NULL; + } + + /* De-init Freelists/ZBuffers... */ + OSLockDestroy(psDevInfo->hLockFreeList); + OSLockDestroy(psDevInfo->hLockZSBuffer); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* De-init work estimation lock */ + OSLockDestroy(psDevInfo->hWorkEstLock); +#endif + + /* Unregister MMU related stuff */ + eError = RGXMMUInit_Unregister(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", + eError)); + return eError; + } + } + + /* UnMap Regs */ + if (psDevInfo->pvRegsBaseKM != NULL) + { +#if !defined(NO_HARDWARE) + OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); +#endif /* !NO_HARDWARE */ + psDevInfo->pvRegsBaseKM = NULL; + } + +#if 0 /* not required at this time */ + if (psDevInfo->hTimer) + { + eError = OSRemoveTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevDeInitRGX: Failed to remove timer")); + return eError; + } + psDevInfo->hTimer = NULL; + } +#endif + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + + RGXDeInitHeaps(psDevMemoryInfo); + + if (psDevInfo->psRGXFWCodeMemDesc) + { + /* Free fw code */ + PDUMPCOMMENT("Freeing FW code memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); + psDevInfo->psRGXFWCodeMemDesc = NULL; + } + if (psDevInfo->psRGXFWDataMemDesc) + { + /* Free fw data */ + PDUMPCOMMENT("Freeing FW data memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); + psDevInfo->psRGXFWDataMemDesc = NULL; + } + if (psDevInfo->psRGXFWCorememCodeMemDesc) + { + /* Free fw core mem code */ + PDUMPCOMMENT("Freeing FW coremem code memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); + psDevInfo->psRGXFWCorememCodeMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + /* Free fw core mem data */ + PDUMPCOMMENT("Freeing FW coremem data store memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } + + /* + Free the firmware allocations. + */ + RGXFreeFirmware(psDevInfo); + RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); + +#if defined(SUPPORT_VALIDATION) + RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState); +#endif + + /* De-initialise non-device specific (TL) users of RGX device memory */ + RGXHWPerfHostDeInit(psDevInfo); + eError = HTBDeInit(); + PVR_LOG_IF_ERROR(eError, "HTBDeInit"); + + /* destroy the stalled CCB locks */ + OSLockDestroy(psDevInfo->hCCBRecoveryLock); + OSLockDestroy(psDevInfo->hCCBStallCheckLock); + + /* destroy the context list locks */ + OSLockDestroy(psDevInfo->sRegCongfig.hLock); + OSLockDestroy(psDevInfo->hBPLock); + OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); + OSWRLockDestroy(psDevInfo->hRenderCtxListLock); + OSWRLockDestroy(psDevInfo->hComputeCtxListLock); + OSWRLockDestroy(psDevInfo->hTransferCtxListLock); + OSWRLockDestroy(psDevInfo->hTDMCtxListLock); + OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); + OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); + OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); + OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); + + if (psDevInfo->hDebugFaultInfoLock != NULL) + { + OSLockDestroy(psDevInfo->hDebugFaultInfoLock); + } + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + if (psDevInfo->hMMUCtxUnregLock != NULL) + { + OSLockDestroy(psDevInfo->hMMUCtxUnregLock); + } + } + + /* Free device BVNC string */ + if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString) + { + OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString); + } + + /* DeAllocate devinfo */ + OSFreeMem(psDevInfo); + + psDeviceNode->pvDevice = NULL; + + return PVRSRV_OK; +} + +#if defined(PDUMP) +static +PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); + + psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; + + return PVRSRV_OK; +} +#endif /* PDUMP */ + +static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name, + IMG_UINT64 heap_base, + IMG_DEVMEM_SIZE_T heap_length, + IMG_DEVMEM_SIZE_T heap_reserved_region_length, + IMG_UINT32 log2_import_alignment) +{ + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; + IMG_UINT32 ui32GeneralNon4KHeapPageSize; + IMG_UINT32 ui32OSLog2PageShift = OSGetPageShift(); + IMG_UINT32 ui32OSPageSize; + + DEVMEM_HEAP_BLUEPRINT b = { + .pszName = name, + .sHeapBaseAddr.uiAddr = heap_base, + .uiHeapLength = heap_length, + .uiReservedRegionLength = heap_reserved_region_length, + .uiLog2DataPageSize = RGXHeapDerivePageSize(ui32OSLog2PageShift), + .uiLog2ImportAlignment = log2_import_alignment, + }; + + ui32OSPageSize = (1 << ui32OSLog2PageShift); + + /* Any heap length should at least match OS page size at the minimum or + * a multiple of OS page size */ + if ((b.uiHeapLength == 0) || (b.uiHeapLength & (ui32OSPageSize - 1))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid Heap \"%s\" Size: %" IMG_UINT64_FMTSPEC " (0x%" IMG_UINT64_FMTSPECx ")", + __func__, + b.pszName, b.uiHeapLength, b.uiHeapLength)); + PVR_DPF((PVR_DBG_ERROR, + "Heap Size should always be a non-zero value and a " + "multiple of OS Page Size:%u(0x%x)", + ui32OSPageSize, ui32OSPageSize)); + PVR_ASSERT(b.uiHeapLength >= ui32OSPageSize); + } + + + PVR_ASSERT(b.uiReservedRegionLength % RGX_HEAP_RESERVED_SIZE_GRANULARITY == 0); + + if (!OSStringNCompare(name, RGX_GENERAL_NON4K_HEAP_IDENT, sizeof(RGX_GENERAL_NON4K_HEAP_IDENT))) + { + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize, + &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); + switch (ui32GeneralNon4KHeapPageSize) + { + case (1<psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * RGX_MAX_HEAP_ID); + if (psNewMemoryInfo->psDeviceMemoryHeap == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_BLUEPRINT")); + goto e0; + } + + /* Get the page size for the dummy page from the NON4K heap apphint */ + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize, + &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); + *pui32Log2DummyPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize); + OSFreeKMAppHintState(pvAppHintState); + + /* Initialise the heaps */ + psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap; + + INIT_HEAP(GENERAL_SVM); + INIT_HEAP(GENERAL); + INIT_HEAP(GENERAL_NON4K); + INIT_HEAP(PDSCODEDATA); + INIT_HEAP(USCCODE); + INIT_HEAP(TQ3DPARAMETERS); + INIT_HEAP(SIGNALS); + INIT_HEAP(COMPONENT_CTRL); + INIT_HEAP(FBCDC); + INIT_HEAP(FBCDC_LARGE); + INIT_HEAP(PDS_INDIRECT_STATE); + INIT_HEAP(TEXTURE_STATE); + INIT_HEAP(TDM_TPU_YUV_COEFFS); + INIT_HEAP(VISIBILITY_TEST); + + /* vulkan capture replay buffer heap */ + INIT_HEAP_NAME(VK_CAPT_REPLAY_BUF, VK_CAPT_REPLAY_BUF); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + INIT_FW_CONFIG_HEAP(GUEST); + INIT_FW_MAIN_HEAP(GUEST, META); + } + else + { + INIT_FW_MAIN_HEAP(HOST, META); + INIT_FW_CONFIG_HEAP(HOST); + } + + /* set the heap count */ + psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap); + + PVR_ASSERT(psNewMemoryInfo->ui32HeapCount <= RGX_MAX_HEAP_ID); + + /* + In the new heap setup, we initialise 2 configurations: + 1 - One will be for the firmware only (index 1 in array) + a. This primarily has the firmware heap in it. + b. It also has additional guest OSID firmware heap(s) + - Only if the number of support firmware OSID > 1 + 2 - Others shall be for clients only (index 0 in array) + a. This has all the other client heaps in it. + */ + psNewMemoryInfo->uiNumHeapConfigs = 2; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs); + if (psNewMemoryInfo->psDeviceMemoryHeapConfigArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_CONFIG")); + goto e1; + } + + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration"; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap; + + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration"; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + + if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4) + { + IMG_UINT32 i; + + /* + * Zero all MMU Page Size Range Config registers so they won't participate in + * address decode. Size field = 0 also means 4K page size which is default + * for all addresses not within the ranges defined by these registers. + */ + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i) + { + psDevInfo->aui64MMUPageSizeRangeValue[i] = 0; + } + + /* + * Set up the first range only to reflect the Non4K general heap. + * In future, we could allow multiple simultaneous non4K page sizes. + */ + if (ui32GeneralNon4KHeapPageSize != 4*1024) + { + psDevInfo->aui64MMUPageSizeRangeValue[0] = RGXMMUInit_GetConfigRangeValue(ui32GeneralNon4KHeapPageSize, + RGX_GENERAL_NON4K_HEAP_BASE, + RGX_GENERAL_NON4K_HEAP_SIZE); + } + } + +#if (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSid; + + /* Create additional raw firmware heaps */ + for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK) + { + /* if any allocation fails, free previously allocated heaps and abandon initialisation */ + for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--) + { + RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); + psDeviceMemoryHeapCursor--; + } + goto e1; + } + + /* Append additional firmware heaps to host driver firmware context heap configuration */ + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1; + + /* advance to the next heap */ + psDeviceMemoryHeapCursor++; + } + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + + return PVRSRV_OK; +e1: + OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap); +e0: + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +#undef INIT_HEAP +#undef INIT_HEAP_NAME + + +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo) +{ +#if (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSid; + DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap; + + /* Delete all guest firmware heaps */ + for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); + psDeviceMemoryHeapCursor++; + } + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + + OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray); + OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap); +} + +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PHYS_HEAP *psPhysHeap; + PHYS_HEAP_TYPE eHeapType; + IMG_UINT64 uPhysheapSize; + IMG_UINT32 ui32RegionCount; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + + /* Initialise the objects used to manage the physical firmware heap */ + psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + eHeapType = PhysHeapGetType(psPhysHeap); + + if (eHeapType == PHYS_HEAP_TYPE_UMA) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__)); + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewOSRamBackedPMR; + } + else + { + IMG_UINT64 uRawHeapBase; + RA_BASE_T uFwCfgSubHeapBase, uFwMainSubHeapBase; + const IMG_UINT64 ui64ExpectedHeapSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + const RA_LENGTH_T uFwCfgSubHeapSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; + const RA_LENGTH_T uFwMainSubHeapSize = RGX_FIRMWARE_META_MAIN_HEAP_SIZE; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__)); + ui32RegionCount = PhysHeapNumberOfRegions(psPhysHeap); + if (ui32RegionCount > 1) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware heap currently support 1 region only. " + "PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL contains %u regions. Only the 1st will be used.", __func__, ui32RegionCount)); + } + + eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr", ErrorDeinit); + + eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetCpuPAddr", ErrorDeinit); + + eError = PhysHeapRegionGetSize(psPhysHeap, 0, &uPhysheapSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetSize", ErrorDeinit); + PVR_LOG_GOTO_IF_FALSE(uPhysheapSize >= ui64ExpectedHeapSize, + "Invalid firmware physical heap size.", ErrorDeinit); + + /* Now we construct RAs to manage the FW heaps */ + uRawHeapBase = sDevPAddr.uiAddr; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest subheap layout: Config + Main */ + uFwCfgSubHeapBase = uRawHeapBase; + uFwMainSubHeapBase = uFwCfgSubHeapBase + uFwCfgSubHeapSize; + } + else + { + /* Native/Host subheap layout: Main + (optional MIPS reserved range) + Config */ + uFwMainSubHeapBase = uRawHeapBase; + uFwCfgSubHeapBase = uRawHeapBase + RGX_FIRMWARE_RAW_HEAP_SIZE - uFwCfgSubHeapSize; + } + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->psKernelFwMainMemArena, + psDeviceNode->szKernelFwMainRAName, + sCpuPAddr.uiAddr + (uFwMainSubHeapBase - uRawHeapBase), + uFwMainSubHeapBase, + uFwMainSubHeapSize, + 0, + "Fw Main subheap"); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVCreateRegionRA(FwMain)", ErrorDeinit); + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->psKernelFwConfigMemArena, + psDeviceNode->szKernelFwConfigRAName, + sCpuPAddr.uiAddr + (uFwCfgSubHeapBase - uRawHeapBase), + uFwCfgSubHeapBase, + uFwCfgSubHeapSize, + 0, + "Fw Cfg subheap"); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVCreateRegionRA(FwCfg)", ErrorDeinit); + + psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR; + +#if defined(SUPPORT_AUTOVZ) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers: + * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb; + * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */ + RA_LENGTH_T uMaxFwMmuPageTableSize = 1 * 1024 * 1024; + + /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap. + * If a different base address is specified for this reserved range, use the overriding define instead. */ +#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) + RA_BASE_T uFwMmuReservedMemStart = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; +#else + RA_BASE_T uFwMmuReservedMemStart = uRawHeapBase + (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED); +#endif + + eError = PVRSRVCreateRegionRA(psDevConfig, + &psDeviceNode->psFwMMUReservedMemArena, + NULL, + 0, + uFwMmuReservedMemStart, + uMaxFwMmuPageTableSize, + 0, + "Fw MMU Mem "); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVCreateRegionRA(FwMMU)", ErrorDeinit); + } +#endif + } + + return eError; + +ErrorDeinit: + PVR_ASSERT(IMG_FALSE); + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + + return eError; +} + +/* + RGXRegisterDevice +*/ +PVRSRV_ERROR RGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PDUMPCOMMENT("Device Name: %s", psDeviceNode->psDevConfig->pszName); + + if (psDeviceNode->psDevConfig->pszVersion) + { + PDUMPCOMMENT("Device Version: %s", psDeviceNode->psDevConfig->pszVersion); + } + + PDUMPCOMMENT("RGX Initialisation (Part 1)"); + + /********************* + * Device node setup * + *********************/ + /* Setup static data and callbacks on the device agnostic device node */ +#if defined(PDUMP) + psDeviceNode->sDevId.pszPDumpRegName = RGX_PDUMPREG_NAME; + psDeviceNode->sDevId.pszPDumpDevName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]); + psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump; + psDeviceNode->ui64FBCClearColour = RGX_FBC_CC_DEFAULT; + +#endif /* PDUMP */ + + OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK); + OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE); + + psDeviceNode->pfnDevSLCFlushRange = RGXSLCFlushRange; + psDeviceNode->pfnInvalFBSCTable = RGXInvalidateFBSCTable; + + psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL; + + psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate; + + psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick; + + /* Register RGX to receive notifies when other devices complete some work */ + PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode); + + psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck; + + /* Register callbacks for creation of device memory contexts */ + psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; + psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + + /* Register callbacks for Unified Fence Objects */ + psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock; + psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock; + + /* Register callback for checking the device's health */ + psDeviceNode->pfnUpdateHealthStatus = RGXUpdateHealthStatus; + + /* Register method to service the FW HWPerf buffer */ + psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB; + + /* Register callback for getting the device version information string */ + psDeviceNode->pfnDeviceVersionString = RGXDevVersionString; + + /* Register callback for getting the device clock speed */ + psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed; + + /* Register callback for soft resetting some device modules */ + psDeviceNode->pfnSoftReset = RGXSoftReset; + + /* Register callback for resetting the HWR logs */ + psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs; + + /* Register callback for resetting the HWR logs */ + psDeviceNode->pfnVerifyBVNC = RGXVerifyBVNC; + + /* Register callback for checking alignment of UM structures */ + psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck; + + /*Register callback for checking the supported features and getting the + * corresponding values */ + psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported; + psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue; + + /* Callback for checking if system layer supports FBC 3.1 */ + psDeviceNode->pfnHasFBCDCVersion31 = NULL; + + /* Callback for getting the MMU device attributes */ + psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) + /* Callback for getting a secure PDump memory space name */ + psDeviceNode->pfnGetSecurePDumpMemspace = RGXGetSecurePDumpMemspace; +#endif + + /* Register callback for initialising device-specific physical memory heaps */ + psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit; + + /* Set up required support for dummy page */ + OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0); + OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0); + + /* Set the order to 0 */ + psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0; + psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0; + + /* Set the size of the Dummy page to zero */ + psDeviceNode->sDummyPage.ui32Log2PgSize = 0; + + /* Set the size of the Zero page to zero */ + psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0; + + /* Set the Dummy page phys addr */ + psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* Set the Zero page phys addr */ + psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* The lock can be acquired from MISR (Z-buffer) path */ + eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__)); + return eError; + } + + /* Create the lock for zero page */ + eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__)); + goto free_dummy_page; + } +#if defined(PDUMP) + psDeviceNode->sDummyPage.hPdumpPg = NULL; + psDeviceNode->sDevZeroPage.hPdumpPg = NULL; +#endif + + psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; + + /* The device shared-virtual-memory heap address-space size is stored here for faster + look-up without having to walk the device heap configuration structures during + client device connection (i.e. this size is relative to a zero-based offset) */ + psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE; + + /********************* + * Device info setup * + *********************/ + /* Allocate device control block */ + psDevInfo = OSAllocZMem(sizeof(*psDevInfo)); + if (psDevInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "DevInitRGXPart1 : Failed to alloc memory for DevInfo")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* create locks for the context lists stored in the DevInfo structure. + * these lists are modified on context create/destroy and read by the + * watchdog thread + */ + + eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__)); + goto e0; + } + + eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__)); + goto e1; + } + + eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__)); + goto e2; + } + + eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__)); + goto e3; + } + + eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__)); + goto e4; + } + + eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__)); + goto e5; + } + + eError = OSSpinLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__)); + goto e6; + } + dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead)); + + dllist_init(&(psDevInfo->sRenderCtxtListHead)); + dllist_init(&(psDevInfo->sComputeCtxtListHead)); + dllist_init(&(psDevInfo->sTDMCtxtListHead)); + dllist_init(&(psDevInfo->sKickSyncCtxtListHead)); + + dllist_init(&(psDevInfo->sCommonCtxtListHead)); + psDevInfo->ui32CommonCtxtCurrentID = 1; + + + eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__)); + goto e7; + } + + eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__)); + goto e8; + } + + eError = OSLockCreate(&psDevInfo->hBPLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__)); + goto e9; + } + + eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__)); + goto e10; + } + + eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__)); + goto e11; + } + eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__)); + goto e12; + } + + dllist_init(&psDevInfo->sMemoryContextList); + + /* initialise ui32SLRHoldoffCounter */ + if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT) + { + psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + } + else + { + psDevInfo->ui32SLRHoldoffCounter = 0; + } + + /* Setup static data and callbacks on the device specific device info */ + psDevInfo->psDeviceNode = psDeviceNode; + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + /* + * Map RGX Registers + */ + psDevInfo->ui32RegSize = psDeviceNode->psDevConfig->ui32RegsSize; + psDevInfo->sRegsPhysBase = psDeviceNode->psDevConfig->sRegsCpuPBase; + +#if !defined(NO_HARDWARE) + psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, + psDeviceNode->psDevConfig->ui32RegsSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitDevPart2KM: Failed to create RGX register mapping")); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto e13; + } +#else + psDevInfo->pvRegsBaseKM = NULL; +#endif /* !NO_HARDWARE */ + + psDeviceNode->pvDevice = psDevInfo; + + + eError = RGXBvncInitialiseConfiguration(psDeviceNode); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unsupported HW device detected by driver", + __func__)); + goto e14; + } + + /* pdump info about the core */ + PDUMPCOMMENT("RGX Version Information (KM): %d.%d.%d.%d", + psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + + /* Configure MMU specific stuff */ + RGXMMUInit_Register(psDeviceNode); + + eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo, + &psDeviceNode->sDummyPage.ui32Log2PgSize); + if (eError != PVRSRV_OK) + { + goto e14; + } + + /*Set the zero page size as needed for the heap with biggest page size */ + psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDeviceNode->sDummyPage.ui32Log2PgSize; + + eError = RGXHWPerfInit(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14); + +#if defined(SUPPORT_VALIDATION) + eError = RGXPowerDomainInitState(&psDevInfo->sPowerDomainState, + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount); + if (eError != PVRSRV_OK) + { + goto e15; + } +#if defined(SUPPORT_SOC_TIMER) && defined(PDUMP) && defined(NO_HARDWARE) + { + IMG_BOOL ui32AppHintDefault = IMG_FALSE; + IMG_BOOL bInitSocTimer; + void *pvAppHintState = NULL; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, ValidateSOCUSCTimer, &ui32AppHintDefault, &bInitSocTimer); + OSFreeKMAppHintState(pvAppHintState); + + if (bInitSocTimer) + { + PVRSRVPdumpInitSOCUSCTimer(); + } + } +#endif +#endif + + /* Register callback for dumping debug info */ + eError = RGXDebugInit(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", e16); + +#if defined(PDUMP) + eError = DevmemIntAllocDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + PVR_DUMMY_PAGE_INIT_VALUE, + DUMMY_PAGE, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__)); + goto e17; + } + eError = DevmemIntAllocDefBackingPage(psDeviceNode, + &psDeviceNode->sDevZeroPage, + PVR_ZERO_PAGE_INIT_VALUE, + DEV_ZERO_PAGE, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__)); + goto e18; + } +#endif + + + /* Initialise the device dependent bridges */ + eError = DeviceDepBridgeInit(psDevInfo->sDevFeatureCfg.ui64Features); + PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit"); + + return PVRSRV_OK; + +#if defined(PDUMP) +e18: + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + DUMMY_PAGE); +e17: + RGXDebugDeinit(psDevInfo); +#endif +e16: +#if defined(SUPPORT_VALIDATION) + RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState); +e15: +#endif + RGXHWPerfDeinit(psDevInfo); +e14: +#if !defined(NO_HARDWARE) + OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + +e13: +#endif /* !NO_HARDWARE */ + OSLockDestroy(psDevInfo->hCCBRecoveryLock); +e12: + OSLockDestroy(psDevInfo->hCCBStallCheckLock); +e11: + OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); +e10: + OSLockDestroy(psDevInfo->hBPLock); +e9: + OSLockDestroy(psDevInfo->sRegCongfig.hLock); +e8: + OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); +e7: + OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); +e6: + OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); +e5: + OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); +e4: + OSWRLockDestroy(psDevInfo->hTDMCtxListLock); +e3: + OSWRLockDestroy(psDevInfo->hTransferCtxListLock); +e2: + OSWRLockDestroy(psDevInfo->hComputeCtxListLock); +e1: + OSWRLockDestroy(psDevInfo->hRenderCtxListLock); +e0: + OSFreeMem(psDevInfo); + + /* Destroy the zero page lock created above */ + OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); + +free_dummy_page: + /* Destroy the dummy page lock created above */ + OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString; + if (NULL == psz) + { + IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN]; + size_t uiBVNCStringSize; + size_t uiStringLength; + + uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d", + psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN); + + uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR); + psz = OSAllocMem(uiBVNCStringSize); + if (NULL != psz) + { + OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize); + psDevInfo->sDevFeatureCfg.pszBVNCString = psz; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Allocating memory for BVNC Info string failed", + __func__)); + } + } + + return psz; +} + +/*************************************************************************/ /*! +@Function RGXDevVersionString +@Description Gets the version string for the given device node and returns + a pointer to it in ppszVersionString. It is then the + responsibility of the caller to free this memory. +@Input psDeviceNode Device node from which to obtain the + version string +@Output ppszVersionString Contains the version string upon return +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR **ppszVersionString) +{ +#if defined(COMPAT_BVNC_MASK_B) || defined(COMPAT_BVNC_MASK_V) || defined(COMPAT_BVNC_MASK_N) || defined(COMPAT_BVNC_MASK_C) || defined(NO_HARDWARE) || defined(EMULATOR) + const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)"; +#else + const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)"; +#endif + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_PCHAR pszBVNC; + size_t uiStringLength; + + if (psDeviceNode == NULL || ppszVersionString == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + pszBVNC = RGXDevBVNCString(psDevInfo); + + if (NULL == pszBVNC) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + uiStringLength = OSStringLength(pszBVNC); + uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */ + *ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); + if (*ppszVersionString == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString, + pszBVNC); + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function RGXDevClockSpeed +@Description Gets the clock speed for the given device node and returns + it in pui32RGXClockSpeed. +@Input psDeviceNode Device node +@Output pui32RGXClockSpeed Variable for storing the clock speed +@Return PVRSRV_ERROR +*/ /***************************************************************************/ +static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_PUINT32 pui32RGXClockSpeed) +{ + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + + /* get clock speed */ + *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + + return PVRSRV_OK; +} + +#if (RGX_NUM_OS_SUPPORTED > 1) +/*! + ******************************************************************************* + + @Function RGXInitFwRawHeap + + @Description Called to perform additional initialisation + ******************************************************************************/ +static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid) +{ + IMG_UINT32 uiStringLength; + IMG_UINT32 uiStringLengthMax = 32; + + uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1); + + /* Start by allocating memory for this OSID heap identification string */ + psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); + if (psDevMemHeap->pszName == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ + OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid); + + /* Use the common blueprint template support function to initialise the heap */ + *psDevMemHeap = _blueprint_init((IMG_CHAR *)psDevMemHeap->pszName, + RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE), + RGX_FIRMWARE_RAW_HEAP_SIZE, + 0, + 0); + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function RGXDeInitFwRawHeap + + @Description Called to perform additional deinitialisation + ******************************************************************************/ +static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) +{ + IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE; + IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); + + /* Safe to do as the guest firmware heaps are last in the list */ + if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase && + psDevMemHeap->sHeapBaseAddr.uiAddr < uiSpan) + { + void *pszName = (void*)psDevMemHeap->pszName; + OSFreeMem(pszName); + } +} +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + +/****************************************************************************** + End of file (rgxinit.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.h new file mode 100644 index 000000000000..5ea9116fc49d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxinit.h @@ -0,0 +1,340 @@ +/*************************************************************************/ /*! +@File +@Title RGX initialisation header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXINIT_H__) +#define __RGXINIT_H__ + +#include "connection_server.h" +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgx_bridge.h" +#include "fwload.h" + +#if defined(LINUX) +#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware +#else +#define OS_FW_VERIFY_FUNCTION NULL +#endif + +/*! +******************************************************************************* + + @Function RGXInitDevPart2 + + @Description + + Second part of server-side RGX initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DeviceFlags, + IMG_UINT32 ui32HWPerfHostBufSizeKB, + IMG_UINT32 ui32HWPerfHostFilter, + RGX_ACTIVEPM_CONF eActivePMConf, + IMG_UINT32 ui32AvailableSPUMask); + +PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T ui32FWCodeLen, + IMG_DEVMEM_SIZE_T ui32FWDataLen, + IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, + IMG_DEVMEM_SIZE_T uiFWCorememDataLen); + + +/*! +******************************************************************************* + + @Function RGXInitFirmware + + @Description + + Server-side RGX firmware initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32RGXFWAlignChecksArrLength, + IMG_UINT32 *pui32RGXFWAlignChecks, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32KillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + IMG_UINT32 ui32HWPerfCountersDataSize, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32AvailableSPUMask, + IMG_UINT32 ui32FwOsCfgFlags); + + +/*! +******************************************************************************* + + @Function RGXLoadAndGetFWData + + @Description + + Load FW and return pointer to FW data. + + @Input psDeviceNode - device node + + @Input ppsRGXFW - fw pointer + + @Return void * - pointer to FW data + +******************************************************************************/ +const void *RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, OS_FW_IMAGE **ppsRGXFW); + +#if defined(PDUMP) +/*! +******************************************************************************* + + @Function RGXInitHWPerfCounters + + @Description + + Initialisation of the performance counters + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +/*! +******************************************************************************* + + @Function RGXRegisterDevice + + @Description + + Registers the device with the system + + @Input: psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +******************************************************************************* + + @Function RGXDevBVNCString + + @Description + + Returns the Device BVNC string. It will allocate and fill it first, if necessary. + + @Input: psDevInfo - device info (must not be null) + + @Return IMG_PCHAR - pointer to BVNC string + +******************************************************************************/ +IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function DevDeInitRGX + + @Description + + Reset and deinitialise Chip + + @Input psDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#if !defined(NO_HARDWARE) + +void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsRegister + + @Description SO Interface function called from the OS layer implementation. + Initialise data used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). This function must be called only once for each + different user/handle. + + @Input phGpuUtilUser - Pointer to handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); + + +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsUnregister + + @Description SO Interface function called from the OS layer implementation. + Free data previously used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). + + @Input hGpuUtilUser - Handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); +#endif /* !defined(NO_HARDWARE) */ + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/*! +******************************************************************************* + + @Function PVRSRVGPUVIRTPopulateLMASubArenasKM + + @Description Populates the LMA arenas based on the min max values passed by + the client during initialization. GPU Virtualisation Validation + only. + + @Input psDeviceNode : Pointer to a device info structure. + ui32NumElements : Total number of min / max values passed by + the client + pui32Elements : The array containing all the min / max values + passed by the client, all bundled together + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_BOOL bEnableTrustedDeviceAceConfig); +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +/*! +************************************************************************************ + @Function RGXSystemGetFabricCoherency + + @Description Get the system fabric coherency for the device by reading default + configuration from device register, subject to AppHint overrides. + + @Input sRegsCpuPBase : Device register CPU physical address base + ui32RegsSize : Device register size + peDevFabricType : Device memory bus fabric type + peCacheSnoopingMode : Fabric coherency override + + @Return PVRSRV_ERROR +************************************************************************************/ +PVRSRV_ERROR RGXSystemGetFabricCoherency(IMG_CPU_PHYADDR sRegsCpuPBase, + IMG_UINT32 ui32RegsSize, + PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType, + PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode); + +/*! + ******************************************************************************* + + @Function RGXInitCreateFWKernelMemoryContext + + @Description Called to perform initialisation during firmware kernel context + creation. + + @Input psDeviceNode device node + ******************************************************************************/ +PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! + ******************************************************************************* + + @Function RGXDeInitDestroyFWKernelMemoryContext + + @Description Called to perform deinitialisation during firmware kernel + context destruction. + + @Input psDeviceNode device node + ******************************************************************************/ +void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! + ******************************************************************************* + + @Function RGXFwIrqEventRx + + @Description Checks the implementation specific IRQ status register, + clearing it if necessary and returning the IRQ status. + + @Input: psDevInfo - device info + + @Return: IRQ status + + ******************************************************************************/ +IMG_BOOL RGXFwIrqEventRx(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* __RGXINIT_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxkicksync.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxkicksync.c new file mode 100644 index 000000000000..c5c0889ea2be --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxkicksync.c @@ -0,0 +1,788 @@ +/*************************************************************************/ /*! +@File rgxkicksync.c +@Title Server side of the sync only kick API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxkicksync.h" + +#include "rgxdevice.h" +#include "rgxmem.h" +#include "rgxfwutils.h" +#include "allocmem.h" +#include "sync.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_KICKSYNC_UFO_DUMP 0 + +//#define KICKSYNC_CHECKPOINT_DEBUG 1 + +#if defined(KICKSYNC_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +struct _RGX_SERVER_KICKSYNC_CONTEXT_ +{ + PVRSRV_DEVICE_NODE * psDeviceNode; + RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +}; + + +PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_KICKSYNC_CONTEXT **ppsKickSyncContext) +{ + PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext; + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; + + /* Prepare cleanup struct */ + * ppsKickSyncContext = NULL; + psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext)); + if (psKickSyncContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSLockCreate(&psKickSyncContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto err_lockcreate; + } + + psKickSyncContext->psDeviceNode = psDeviceNode; + + ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); + ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_KICKSYNC, + RGXFWIF_DM_GP, + hMemCtxPrivData, + NULL, + 0, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_KICKSYNC_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_KICKSYNC_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + 0, /* priority */ + 0, /* max deadline MS */ + 0, /* robustness address */ + & sInfo, + & psKickSyncContext->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); + dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); + + SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence); + SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate); + + * ppsKickSyncContext = psKickSyncContext; + return PVRSRV_OK; + +fail_contextalloc: + OSLockDestroy(psKickSyncContext->hLock); +err_lockcreate: + OSFreeMem(psKickSyncContext); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode, + psKickSyncContext->psServerCommonContext, + RGXFWIF_DM_GP, + PDUMP_FLAGS_NONE); + + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + + OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); + dllist_remove_node(&(psKickSyncContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); + + FWCommonContextFree(psKickSyncContext->psServerCommonContext); + + SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence); + SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate); + + OSLockDestroy(psKickSyncContext->hLock); + + OSFreeMem(psKickSyncContext); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psKickSyncContext->hLock); + eError = FWCommonContextSetFlags(psKickSyncContext->psServerCommonContext, + (IMG_UINT32)ui64Input); + + OSLockRelease(psKickSyncContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); + dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) + { + RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); + + if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) + { + DumpFWCommonContextInfo(psCurrentServerKickSyncCtx->psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); +} + +IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); + + dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) + { + RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); + + if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); + return ui32ContextBitMask; +} + +PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + + IMG_UINT32 ui32ExtJobRef) +{ + RGXFWIF_KCCB_CMD sKickSyncKCCBCmd; + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + IMG_BOOL bCCBStateOpen = IMG_FALSE; + PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress = NULL; + IMG_UINT32 ui32ClientFenceCount = 0; + IMG_UINT32 *paui32ClientFenceValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psKickSyncContext->psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + void *pvUpdateFenceFinaliseData = NULL; + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have dev var updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + OSLockAcquire(psKickSyncContext->hLock); + eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateOffset); + + if (eError != PVRSRV_OK) + { + goto fail_syncaddrlist; + } + + if (ui32ClientUpdateCount > 0) + { + pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; + } + /* Ensure the string is null-terminated (Required for safety) */ + szUpdateFenceName[31] = '\0'; + + /* This will never be true if called from the bridge since piUpdateFence will always be valid */ + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto out_unlock; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), " + "psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheckFence, + (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_OK) + { + goto fail_resolve_fence; + } + + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...", + __func__, iUpdateTimeline)); + eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode, + szUpdateFenceName, + iUpdateTimeline, + psKickSyncContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", + __func__, eError)); + goto fail_create_output_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ...returned from SyncCheckpointCreateFence " + "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " + "ui32FenceTimelineUpdateValue=%u)", + __func__, iUpdateFence, psFenceTimelineUpdateSync, + ui32FenceTimelineUpdateValue)); + + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount); + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32ClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the kicksync context update list */ + SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); + } + } + + /* Reset number of fence syncs in kicksync context fence list to 0 */ + SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence, + 0, NULL, NULL); + + if (ui32FenceSyncCheckpointCount > 0) + { + /* Append the checks (from input fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d sync checkpoints to KickSync Fence " + "(&psKickSyncContext->sSyncAddrListFence=<%p>)...", + __func__, ui32FenceSyncCheckpointCount, + (void*)&psKickSyncContext->sSyncAddrListFence)); + SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiClientFenceUFOAddress) + { + pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs; + } + ui32ClientFenceCount += ui32FenceSyncCheckpointCount; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + + if (psUpdateSyncCheckpoint) + { + PVRSRV_ERROR eErr; + + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint to KickSync Update " + "(&psKickSyncContext->sSyncAddrListUpdate=<%p>)...", + __func__, (void*)&psKickSyncContext->sSyncAddrListUpdate)); + eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (eErr != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ...done. SyncAddrListAppendCheckpoints() returned error (%d)", + __func__, eErr)); + } + else + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done.", __func__)); + } + if (!pauiClientUpdateUFOAddress) + { + pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32ClientUpdateCount++; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + +#if (ENABLE_KICKSYNC_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...", + __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress; + IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Prepared %d KickSync fence syncs " + "(&psKickSyncContext->sSyncAddrListFence=<%p>, " + "pauiClientFenceUFOAddress=<%p>):", + __func__, ui32ClientFenceCount, + (void*)&psKickSyncContext->sSyncAddrListFence, + (void*)pauiClientFenceUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, " + "CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, ii + 1, ui32ClientFenceCount, + (void*)psTmpIntFenceUFOAddress, + psTmpIntFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, ii + 1, ui32ClientFenceCount, + (void*)psTmpIntFenceUFOAddress, + psTmpIntFenceUFOAddress->ui32Addr, + *pui32TmpIntFenceValue, + *pui32TmpIntFenceValue)); + pui32TmpIntFenceValue++; + } + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, + "%s: Prepared %d KickSync update syncs " + "(&psKickSyncContext->sSyncAddrListUpdate=<%p>, " + "pauiClientUpdateUFOAddress=<%p>):", + __func__, ui32ClientUpdateCount, + (void*)&psKickSyncContext->sSyncAddrListUpdate, + (void*)pauiClientUpdateUFOAddress)); + for (ii=0; ii", + __func__, __LINE__, + (void*)psTmpIntUpdateUFOAddress)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Line %d, pui32TmpIntUpdateValue=<%p>", + __func__, __LINE__, + (void*)pui32TmpIntUpdateValue)); + if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, " + "UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, ii + 1, ui32ClientUpdateCount, + (void*)psTmpIntUpdateUFOAddress, + psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", + __func__, ii + 1, ui32ClientUpdateCount, + (void*)psTmpIntUpdateUFOAddress, + psTmpIntUpdateUFOAddress->ui32Addr, + *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + RGXCmdHelperInitCmdCCB(psClientCCB, + 0, /* empty ui64FBSCEntryMask */ + ui32ClientFenceCount, + pauiClientFenceUFOAddress, + paui32ClientFenceValue, + ui32ClientUpdateCount, + pauiClientUpdateUFOAddress, + paui32ClientUpdateValue, + 0, + NULL, + RGXFWIF_CCB_CMD_TYPE_NULL, + ui32ExtJobRef, + ui32IntJobRef, + PDUMP_FLAGS_NONE, + NULL, + "KickSync", + bCCBStateOpen, + asCmdHelperData); + + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + if (eError != PVRSRV_OK) + { + goto fail_cmdaquire; + } + + /* + * We should reserve space in the kernel CCB here and fill in the command + * directly. + * This is so if there isn't space in the kernel CCB we can return with + * retry back to services client before we take any operations + */ + + /* + * We might only be kicking for flush out a padding packet so only submit + * the command if the create was successful + */ + if (eError == PVRSRV_OK) + { + /* + * All the required resources are ready at this point, we can't fail so + * take the required server sync operations and commit all the resources + */ + RGXCmdHelperReleaseCmdCCB(1, + asCmdHelperData, + "KickSync", + FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr); + } + + /* Construct the kernel kicksync CCB command. */ + sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext); + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + /* + * Submit the kicksync command to the firmware. + */ + RGXSRV_HWPERF_ENQ(psKickSyncContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_SYNC, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice, + FWCommonContextGetServerMMUCtx(psKickSyncContext->psServerCommonContext), + RGXFWIF_DM_GP, + & sKickSyncKCCBCmd, + ui32ClientCacheOpSeqNum, + PDUMP_FLAGS_NONE); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_SYNC); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)", + eError)); + } + + /* + * Now check eError (which may have returned an error from our earlier call + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdaquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", + __func__, (void*)psUpdateSyncCheckpoint, + SyncCheckpointGetId(psUpdateSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Updating NOHW sync prim<%p> to %d", + __func__, (void*)psFenceTimelineUpdateSync, + ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + *piUpdateFence = iUpdateFence; + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psKickSyncContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, szUpdateFenceName); + } + + OSLockRelease(psKickSyncContext->hLock); + return PVRSRV_OK; + +fail_cmdaquire: + SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate); + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } + + /* Free memory allocated to hold update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + } +fail_alloc_update_values_mem: +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free memory allocated to hold the resolved fence's checkpoints */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } +fail_resolve_fence: +fail_syncaddrlist: +out_unlock: + OSLockRelease(psKickSyncContext->hLock); + return eError; +} + + +/**************************************************************************//** + End of file (rgxkicksync.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer.h new file mode 100644 index 000000000000..91d75e1a55ad --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer.h @@ -0,0 +1,510 @@ +/*************************************************************************/ /*! +@File +@Title Header for Services abstraction layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declaration of an interface layer used to abstract code that + can be compiled outside of the DDK, potentially in a + completely different OS. + All the headers included by this file must also be copied to + the alternative source tree. + All the functions declared here must have a DDK implementation + inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and + another different implementation in case they are used outside + of the DDK. + All of the functions accept as a first parameter a + "const void *hPrivate" argument. It should be used to pass + around any implementation specific data required. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXLAYER_H__) +#define __RGXLAYER_H__ + +#if defined(__cplusplus) +extern "C" { +#endif + + +#include "img_defs.h" +#include "img_types.h" +#include "img_elf.h" +#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */ +#include "rgx_bvnc_defs_km.h" +#include "rgx_fw_info.h" +#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */ +#include "rgx_meta.h" +#include "rgx_riscv.h" + +#include "rgxdefs_km.h" +/* includes: + * rgx_cr_defs_km.h, + * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h), + * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h) + */ + + +/*! +******************************************************************************* + + @Function RGXMemCopy + + @Description MemCopy implementation + + @Input hPrivate : Implementation specific data + @Input pvDst : Pointer to the destination + @Input pvSrc : Pointer to the source location + @Input uiSize : The amount of memory to copy in bytes + + @Return void + +******************************************************************************/ +void RGXMemCopy(const void *hPrivate, + void *pvDst, + void *pvSrc, + size_t uiSize); + +/*! +******************************************************************************* + + @Function RGXMemSet + + @Description MemSet implementation + + @Input hPrivate : Implementation specific data + @Input pvDst : Pointer to the start of the memory region + @Input ui8Value : The value to be written + @Input uiSize : The number of bytes to be set to ui8Value + + @Return void + +******************************************************************************/ +void RGXMemSet(const void *hPrivate, + void *pvDst, + IMG_UINT8 ui8Value, + size_t uiSize); + +/*! +******************************************************************************* + + @Function RGXCommentLog + + @Description Generic log function used for debugging or other purposes + + @Input hPrivate : Implementation specific data + @Input pszString : Message to be printed + @Input ... : Variadic arguments + + @Return void + +******************************************************************************/ +__printf(2, 3) +void RGXCommentLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...); + +/*! +******************************************************************************* + + @Function RGXErrorLog + + @Description Generic error log function used for debugging or other purposes + + @Input hPrivate : Implementation specific data + @Input pszString : Message to be printed + @Input ... : Variadic arguments + + @Return void + +******************************************************************************/ +__printf(2, 3) +void RGXErrorLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...); + +/* This is used to check if a specific feature is enabled. + * Should be used instead of calling RGXDeviceHasFeature. */ +#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \ + RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK) + +/* This is used to check if a specific feature with value is enabled. + * Should be used instead of calling RGXDeviceGetFeatureValue. */ +#define RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, Feature) \ + (RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) >= 0) + +/* This is used to get the value of a specific feature from hPrivate. + * Should be used instead of calling RGXDeviceGetFeatureValue. */ +#define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \ + RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) +/*! +******************************************************************************* + + @Function RGXDeviceGetFeatureValue + + @Description Checks if a device has a particular feature with values + + @Input hPrivate : Implementation specific data + @Input ui64Feature : Feature with values to check + + @Return Value >= 0 if the given feature is available, -1 otherwise + +******************************************************************************/ +IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature); + +/*! +******************************************************************************* + + @Function RGXDeviceHasFeature + + @Description Checks if a device has a particular feature + + @Input hPrivate : Implementation specific data + @Input ui64Feature : Feature to check + + @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature); + +/*! +******************************************************************************* + + @Function RGXDeviceHasErnBrn + + @Description Checks if a device has a particular errata + + @Input hPrivate : Implementation specific data + @Input ui64ErnsBrns : Flags to check + + @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); + +/*! +******************************************************************************* + + @Function RGXGetFWCorememSize + + @Description Get the FW coremem size + + @Input hPrivate : Implementation specific data + + @Return FW coremem size + +******************************************************************************/ +IMG_INTERNAL +IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXWriteReg32/64 + + @Description Write a value to a 32/64 bit RGX register + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui32/64RegValue : New register value + + @Return void + +******************************************************************************/ +void RGXWriteReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue); + +void RGXWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue); + +/*! +******************************************************************************* + + @Function RGXReadReg32/64 + + @Description Read a 32/64 bit RGX register + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + + @Return Register value + +******************************************************************************/ +IMG_UINT32 RGXReadReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr); + +IMG_UINT64 RGXReadReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr); + +/*! +******************************************************************************* + + @Function RGXReadModifyWriteReg32 + + @Description Read-modify-write a 32 bit RGX register + + @Input hPrivate : Implementation specific data. + @Input ui32RegAddr : Register offset inside the register bank. + @Input ui32RegValue : New register value. + @Input ui32RegMask : Keep the bits set in the mask. + + @Return Always returns PVRSRV_OK + +******************************************************************************/ +IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegKeepMask); + +/*! +******************************************************************************* + + @Function RGXPollReg32/64 + + @Description Poll on a 32/64 bit RGX register until some bits are set/unset + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui32/64RegValue : Value expected from the register + @Input ui32/64RegMask : Only the bits set in this mask will be + checked against uiRegValue + + @Return PVRSRV_OK if the poll succeeds, + PVRSRV_ERROR_TIMEOUT if the poll takes too long + +******************************************************************************/ +PVRSRV_ERROR RGXPollReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32RegMask); + +PVRSRV_ERROR RGXPollReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask); + +/*! +******************************************************************************* + + @Function RGXWaitCycles + + @Description Wait for a number of GPU cycles and/or microseconds + + @Input hPrivate : Implementation specific data + @Input ui32Cycles : Number of GPU cycles to wait for in pdumps, + it can also be used when running driver-live + if desired (ignoring the next parameter) + @Input ui32WaitUs : Number of microseconds to wait for when running + driver-live + + @Return void + +******************************************************************************/ +void RGXWaitCycles(const void *hPrivate, + IMG_UINT32 ui32Cycles, + IMG_UINT32 ui32WaitUs); + +/*! +******************************************************************************* + + @Function RGXAcquireKernelMMUPC + + @Description Acquire the Kernel MMU Page Catalogue device physical address + + @Input hPrivate : Implementation specific data + @Input psPCAddr : Returned page catalog address + + @Return void + +******************************************************************************/ +void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr); + +/*! +******************************************************************************* + + @Function RGXWriteKernelMMUPC32/64 + + @Description Write the Kernel MMU Page Catalogue to the 32/64 bit + RGX register passed as argument. + In a driver-live scenario without PDump these functions + are the same as RGXWriteReg32/64 and they don't need + to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32PCReg : Register offset inside the register bank + @Input ui32AlignShift : PC register alignshift + @Input ui32Shift : PC register shift + @Input ui32/64PCVal : Page catalog value (aligned and shifted) + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXWriteKernelMMUPC32(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT32 ui32PCVal); + +#else /* defined(PDUMP) */ +#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \ + RGXWriteReg32(priv, pcreg, pcval) +#endif /* defined(PDUMP) */ + +/*! +******************************************************************************* + + @Function RGXDoFWSlaveBoot + + @Description Returns whether or not a FW Slave Boot is required + while powering on + + @Input hPrivate : Implementation specific data + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXFabricCoherencyTest + + @Description Performs fabric coherency test + + @Input hPrivate : Implementation specific data + + @Return PVRSRV_OK if the test succeeds, + PVRSRV_ERROR_INIT_FAILURE if the test fails at some point + +******************************************************************************/ +PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDeviceSLCBanks + + @Description Returns the number of SLC banks used by the device + + @Input hPrivate : Implementation specific data + + @Return Number of SLC banks + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDeviceSLCSize + + @Description Returns the device SLC size + + @Input hPrivate : Implementation specific data + + @Return SLC size + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDeviceCacheLineSize + + @Description Returns the device cache line size + + @Input hPrivate : Implementation specific data + + @Return Cache line size + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXAcquireBootCodeAddr + + @Description Acquire the device virtual address of the RISCV boot code + + @Input hPrivate : Implementation specific data + @Output psBootCodeAddr : Boot code base address + + @Return void + +******************************************************************************/ +void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr); + +/*! +******************************************************************************* + + @Function RGXAcquireBootDataAddr + + @Description Acquire the device virtual address of the RISCV boot data + + @Input hPrivate : Implementation specific data + @Output psBootDataAddr : Boot data base address + + @Return void + +******************************************************************************/ +void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr); + +/*! + ******************************************************************************* + + @Function RGXDeviceIrqEventRx + + @Description Checks the implementation specific IRQ status register, + clearing it if necessary and returning the IRQ status. + + @Input hPrivate : Implementation specific data + + @Return: IRQ status + + ******************************************************************************/ +IMG_BOOL RGXDeviceIrqEventRx(const void *hPrivate); + +#if defined(__cplusplus) +} +#endif + +#endif /* __RGXLAYER_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.c new file mode 100644 index 000000000000..4d771bf35fb2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.c @@ -0,0 +1,965 @@ +/*************************************************************************/ /*! +@File +@Title DDK implementation of the Services abstraction layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description DDK implementation of the Services abstraction layer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxlayer_impl.h" +#include "osfunc.h" +#include "pdump_km.h" +#include "rgxfwutils.h" +#include "rgxinit.h" +#include "cache_km.h" + +#if defined(PDUMP) +#include +#endif + +void RGXMemCopy(const void *hPrivate, + void *pvDst, + void *pvSrc, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemCopy(pvDst, pvSrc, uiSize); +} + +void RGXMemSet(const void *hPrivate, + void *pvDst, + IMG_UINT8 ui8Value, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemSet(pvDst, ui8Value, uiSize); +} + +void RGXCommentLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ +#if defined(PDUMP) + va_list argList; + va_start(argList, pszString); + PDumpCommentWithFlagsVA(PDUMP_FLAGS_CONTINUOUS, pszString, argList); + va_end(argList); + PVR_UNREFERENCED_PARAMETER(hPrivate); +#else + PVR_UNREFERENCED_PARAMETER(hPrivate); + PVR_UNREFERENCED_PARAMETER(pszString); +#endif +} + +void RGXErrorLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list argList; + + PVR_UNREFERENCED_PARAMETER(hPrivate); + + va_start(argList, pszString); + vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); + va_end(argList); + + PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); +} + +IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) +{ + IMG_INT32 i32Ret = -1; + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + psDeviceNode = psDevInfo->psDeviceNode; + + if ((psDeviceNode->pfnGetDeviceFeatureValue)) + { + i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature); + } + + return i32Ret; +} + +IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; +} + +IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + return RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); + } + return 0; +} + +void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); + } + + PDUMPREG32(RGX_PDUMPREG_NAME, ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); +} + +void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); + } + + PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); +} + +IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT32 ui32RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui32RegValue = IMG_UINT32_MAX; + } + else +#endif + { + ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD32(RGX_PDUMPREG_NAME, ui32RegAddr, psParams->ui32PdumpFlags); + + return ui32RegValue; +} + +IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT64 ui64RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui64RegValue = IMG_UINT64_MAX; + } + else +#endif + { + ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD64(RGX_PDUMPREG_NAME, ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); + + return ui64RegValue; +} + +IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 uiRegValueNew, + IMG_UINT64 uiRegKeepMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + + /* only use the new values for bits we update according to the keep mask */ + uiRegValueNew &= ~uiRegKeepMask; + +#if defined(PDUMP) + /* Store register offset to temp PDump variable */ + PDumpRegRead64ToInternalVar(RGX_PDUMPREG_NAME, ":SYSMEM:$1", ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); + + /* Keep the bits set in the mask */ + PDumpWriteVarANDValueOp(":SYSMEM:$1", uiRegKeepMask, PDUMP_FLAGS_CONTINUOUS); + + /* OR the new values */ + PDumpWriteVarORValueOp(":SYSMEM:$1", uiRegValueNew, PDUMP_FLAGS_CONTINUOUS); + + /* Do the actual register write */ + PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", 0); + + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + + { + IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); + uiRegValue &= uiRegKeepMask; + OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32RegValue, + ui32RegMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32RegValue, + ui32RegMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + /* Split lower and upper words */ + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); + IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); + IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), + ui32UpperValue, + ui32UpperMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32LowerValue, + ui32LowerMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr + 4, + ui32UpperValue, + ui32UpperMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32LowerValue, + ui32LowerMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSWaitus(ui32TimeUs); + PDUMPIDLWITHFLAGS(ui32Cycles, PDUMP_FLAGS_CONTINUOUS); +} + +void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; +} + +#if defined(PDUMP) +void RGXWriteKernelMMUPC32(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT32 ui32PCVal) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write the cat-base address */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal); + + /* Pdump catbase address */ + MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, + RGX_PDUMPREG_NAME, + ui32PCReg, + 4, + ui32PCRegAlignShift, + ui32PCRegShift, + PDUMP_FLAGS_CONTINUOUS); +} +#endif /* defined(PDUMP) */ + +#define MAX_NUM_COHERENCY_TESTS (10) +IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS) + { + return IMG_FALSE; + } + + psDeviceNode = psDevInfo->psDeviceNode; +#if !defined(NO_HARDWARE) + return (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && + PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)); +#else + return IMG_FALSE; +#endif +} + +/* + * The fabric coherency test is performed when platform supports fabric coherency + * either in the form of ACE-lite or Full-ACE. This test is done quite early + * with the firmware processor quiescent and makes exclusive use of the slave + * port interface for reading/writing through the device memory hierarchy. The + * rationale for the test is to ensure that what the CPU writes to its dcache + * is visible to the GPU via coherency snoop miss/hit and vice-versa without + * any intervening cache maintenance by the writing agent. + */ +PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 *pui32FabricCohTestBufferCpuVA = NULL; + IMG_UINT32 *pui32FabricCohCcTestBufferCpuVA = NULL; + IMG_UINT32 *pui32FabricCohNcTestBufferCpuVA = NULL; + DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc = NULL; + DEVMEM_MEMDESC *psFabricCohCcTestBufferMemDesc = NULL; + DEVMEM_MEMDESC *psFabricCohNcTestBufferMemDesc = NULL; + RGXFWIF_DEV_VIRTADDR sFabricCohCcTestBufferDevVA; + RGXFWIF_DEV_VIRTADDR sFabricCohNcTestBufferDevVA; + RGXFWIF_DEV_VIRTADDR *psFabricCohTestBufferDevVA = NULL; + IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64); + IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64); + IMG_UINT64 ui64SegOutAddrTopCached = 0; + IMG_UINT64 ui64SegOutAddrTopUncached = 0; + IMG_UINT32 ui32OddEven; + IMG_UINT32 ui32OddEvenSeed = 1; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bFullTestPassed = IMG_TRUE; + IMG_BOOL bSubTestPassed = IMG_FALSE; + IMG_BOOL bExit = IMG_FALSE; + enum TEST_TYPE { + CPU_WRITE_GPU_READ_SM=0, GPU_WRITE_CPU_READ_SM, + CPU_WRITE_GPU_READ_SH, GPU_WRITE_CPU_READ_SH + } eTestType; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + PVR_LOG(("Starting fabric coherency test .....")); + + /* Size and align are 'expanded' because we request an export align allocation */ + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiFabricCohTestBlockSize, + &uiFabricCohTestBlockAlign); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemExportalignAdjustSizeAndAlign() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e0; + } + + /* Allocate, acquire cpu address and set firmware address for cc=1 buffer */ + eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, + uiFabricCohTestBlockSize, + uiFabricCohTestBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, + "FwExFabricCoherencyCcTestBuffer", + &psFabricCohCcTestBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemFwAllocateExportable() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e0; + } + + eError = DevmemAcquireCpuVirtAddr(psFabricCohCcTestBufferMemDesc, (void **) &pui32FabricCohCcTestBufferCpuVA); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemAcquireCpuVirtAddr() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e1; + } + + /* Create a FW address which is uncached in the Meta DCache and in the SLC using the Meta bootloader segment. + This segment is the only one configured correctly out of reset (when this test is meant to be executed) */ + eError = RGXSetFirmwareAddress(&sFabricCohCcTestBufferDevVA, + psFabricCohCcTestBufferMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", e2); + + /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ + sFabricCohCcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK; + sFabricCohCcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK; + sFabricCohCcTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + /* Map the buffer in the bootloader segment as uncached */ + sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; + sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; + + /* Allocate, acquire cpu address and set firmware address for cc=0 buffer */ + eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, + uiFabricCohTestBlockSize, + uiFabricCohTestBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, + "FwExFabricCoherencyNcTestBuffer", + &psFabricCohNcTestBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemFwAllocateExportable() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e3; + } + + eError = DevmemAcquireCpuVirtAddr(psFabricCohNcTestBufferMemDesc, (void **) &pui32FabricCohNcTestBufferCpuVA); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemAcquireCpuVirtAddr() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e4; + } + + eError = RGXSetFirmwareAddress(&sFabricCohNcTestBufferDevVA, + psFabricCohNcTestBufferMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", e5); + + /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ + sFabricCohNcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK; + sFabricCohNcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK; + sFabricCohNcTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + /* Map the buffer in the bootloader segment as uncached */ + sFabricCohNcTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; + sFabricCohNcTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; + + /* Obtain the META segment addresses corresponding to cached and uncached windows into SLC */ + ui64SegOutAddrTopCached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF); + ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF); + + /* At the top level, we perform snoop-miss (i.e. to verify slave port) & snoop-hit (i.e. to verify ACE) test. + NOTE: For now, skip snoop-miss test as Services currently forces all firmware allocations to be coherent */ + for (eTestType = CPU_WRITE_GPU_READ_SH; eTestType <= GPU_WRITE_CPU_READ_SH && bExit == IMG_FALSE; eTestType++) + { + IMG_CPU_PHYADDR sCpuPhyAddr; + IMG_BOOL bValid; + PMR *psPMR; + + if (eTestType == CPU_WRITE_GPU_READ_SM) + { + /* All snoop miss test must bypass the SLC, here memory is region of coherence so + configure META to use SLC bypass cache policy for the bootloader segment. Note + this cannot be done on a cache-coherent (i.e. CC=1) VA, as this violates ACE + standard as one cannot issue a non-coherent request into the bus fabric for + an allocation's VA that is cache-coherent in SLC, so use non-coherent buffer */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + pui32FabricCohTestBufferCpuVA = pui32FabricCohNcTestBufferCpuVA; + psFabricCohTestBufferMemDesc = psFabricCohNcTestBufferMemDesc; + psFabricCohTestBufferDevVA = &sFabricCohNcTestBufferDevVA; + } + else if (eTestType == CPU_WRITE_GPU_READ_SH) + { + /* All snoop hit test must obviously use SLC, here SLC is region of coherence so + configure META not to bypass the SLC for the bootloader segment */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + pui32FabricCohTestBufferCpuVA = pui32FabricCohCcTestBufferCpuVA; + psFabricCohTestBufferMemDesc = psFabricCohCcTestBufferMemDesc; + psFabricCohTestBufferDevVA = &sFabricCohCcTestBufferDevVA; + } + + if (eTestType == GPU_WRITE_CPU_READ_SH && + !PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) + { + /* Cannot perform this test if there is no snooping of device cache */ + continue; + } + + /* Acquire underlying PMR CpuPA in preparation for cache maintenance */ + (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR); + eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid); + if (eError != PVRSRV_OK || bValid == IMG_FALSE) + { + PVR_DPF((PVR_DBG_ERROR, + "PMR_CpuPhysAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Here we do two passes mostly to account for the effects of using a different + seed (i.e. ui32OddEvenSeed) value to read and write */ + for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++) + { + IMG_UINT32 i; + + /* Do multiple sub-dword cache line tests */ + for (i = 0; i < 2 && bExit == IMG_FALSE; i++) + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32FWValue; + IMG_UINT32 ui32FWValue2; + IMG_UINT32 ui32LastFWValue = ~0; + IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32); + + /* Calculate next address and seed value to write/read from slave-port */ + ui32FWAddr = psFabricCohTestBufferDevVA->ui32Addr + ui32Offset; + ui32OddEvenSeed += 1; + + if (eTestType == GPU_WRITE_CPU_READ_SM || eTestType == GPU_WRITE_CPU_READ_SH) + { + /* Clean dcache to ensure there is no stale data in dcache that might over-write + what we are about to write via slave-port here because if it drains from the CPU + dcache before we read it, it would corrupt what we are going to read back via + the CPU */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_CLEAN); + + /* Calculate a new value to write */ + ui32FWValue = i + ui32OddEvenSeed; + + /* Write the value using the RGX slave-port interface */ + eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32FWValue); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXWriteMETAAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Read back value using RGX slave-port interface, this is used + as a sort of memory barrier for the above write */ + eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue2); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXReadMETAAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + else if (ui32FWValue != ui32FWValue2) + { + //IMG_UINT32 ui32FWValue3; + //RGXReadMETAAddr(psDevInfo, 0xC1F00000, &ui32FWValue3); + + /* Fatal error, we should abort */ + PVR_DPF((PVR_DBG_ERROR, + "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x", + i, + ui32FWValue, + ui32FWValue2)); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + + if (!PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) + { + /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory + region is discarded before we read (i.e. next read must trigger a cache miss). + If there is snooping of device cache, then any prefetching done by the CPU + will reflect the most up to date datum writing by GPU into said location, + that is to say prefetching must be coherent so CPU d-flush is not needed */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE); + } + } + else + { + IMG_UINT32 ui32RAWCpuValue; + + /* Ensures line is in dcache */ + ui32FWValue = IMG_UINT32_MAX; + + /* Dirty allocation in dcache */ + ui32RAWCpuValue = i + ui32OddEvenSeed; + pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed; + + /* Flush possible cpu store-buffer(ing) on LMA */ + OSWriteMemoryBarrier(); + + switch (eTestType) + { + case CPU_WRITE_GPU_READ_SM: + /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so + memory is coherent before the SlavePort reads */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_FLUSH); + break; + default: + break; + } + + /* Read back value using RGX slave-port interface */ + eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXReadWithSP error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Being mostly paranoid here, verify that CPU RAW operation is sane after + the above slave port read */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE); + if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue) + { + /* Fatal error, we should abort */ + PVR_DPF((PVR_DBG_ERROR, + "At Offset: %d, RAW by CPU failed: expected: %x, got: %x", + i, + ui32RAWCpuValue, + pui32FabricCohTestBufferCpuVA[i])); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + } + + /* Compare to see if sub-test passed */ + if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue) + { + bSubTestPassed = IMG_TRUE; + } + else + { + bSubTestPassed = IMG_FALSE; + bFullTestPassed = IMG_FALSE; + eError = PVRSRV_ERROR_INIT_FAILURE; + if (ui32LastFWValue != ui32FWValue) + { +#if defined(DEBUG) + PVR_LOG(("At Offset: %d, Expected: %x, Got: %x", + i, + (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i], + (eTestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue)); +#endif + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "test encountered unexpected error, exiting")); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + } + + ui32LastFWValue = (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i]; + } + +#if defined(DEBUG) + bSubTestPassed = bExit ? IMG_FALSE : bSubTestPassed; + switch (eTestType) + { + case CPU_WRITE_GPU_READ_SM: + PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case GPU_WRITE_CPU_READ_SM: + PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case CPU_WRITE_GPU_READ_SH: + PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case GPU_WRITE_CPU_READ_SH: + PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + default: + PVR_LOG(("Internal error, exiting test")); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } +#endif + } + } + + /* Release and free NC/CC test buffers */ + RGXUnsetFirmwareAddress(psFabricCohCcTestBufferMemDesc); +e5: + DevmemReleaseCpuVirtAddr(psFabricCohCcTestBufferMemDesc); +e4: + DevmemFwUnmapAndFree(psDevInfo, psFabricCohCcTestBufferMemDesc); + +e3: + RGXUnsetFirmwareAddress(psFabricCohNcTestBufferMemDesc); +e2: + DevmemReleaseCpuVirtAddr(psFabricCohNcTestBufferMemDesc); +e1: + DevmemFwUnmapAndFree(psDevInfo, psFabricCohNcTestBufferMemDesc); + +e0: + /* Restore bootloader segment settings */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + + bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed; + if (bFullTestPassed) + { + PVR_LOG(("fabric coherency test: PASSED")); + psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1; + } + else + { + PVR_LOG(("fabric coherency test: FAILED")); + psDevInfo->ui32CoherencyTestsDone++; + } + + return eError; +} + +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; +} + +IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); +} + +IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); +} + +void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; +} + +void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; +} + +IMG_BOOL RGXDeviceIrqEventRx(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return RGXFwIrqEventRx(psDevInfo); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.h new file mode 100644 index 000000000000..b1ea6f093d3a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxlayer_impl.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Header for DDK implementation of the Services abstraction layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for DDK implementation of the Services abstraction layer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXLAYER_IMPL_H) +#define RGXLAYER_IMPL_H + +#include "rgxlayer.h" +#include "device_connection.h" + +typedef struct _RGX_LAYER_PARAMS_ +{ + void *psDevInfo; + void *psDevConfig; +#if defined(PDUMP) + IMG_UINT32 ui32PdumpFlags; +#endif + + IMG_DEV_PHYADDR sPCAddr; +} RGX_LAYER_PARAMS; + +#endif /* RGXLAYER_IMPL_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.c new file mode 100644 index 000000000000..98c7ce841245 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.c @@ -0,0 +1,933 @@ +/*************************************************************************/ /*! +@File +@Title RGX memory context management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX memory context management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_server_utils.h" +#include "devicemem_pdump.h" +#include "rgxdevice.h" +#include "rgx_fwif_km.h" +#include "rgxfwutils.h" +#include "pdump_km.h" +#include "pdump_physmem.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "sync_internal.h" +#include "rgx_memallocflags.h" +#include "info_page.h" + +#if defined(PDUMP) +#include "sync.h" +#endif + +struct _SERVER_MMU_CONTEXT_ +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc; + PRGXFWIF_FWMEMCONTEXT sFWMemContextDevVirtAddr; + MMU_CONTEXT *psMMUContext; + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_UINT64 ui64FBSCEntryMask; + DLLIST_NODE sNode; + PVRSRV_RGXDEV_INFO *psDevInfo; +}; + +PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate) +{ + PVRSRV_ERROR eError; + DLLIST_NODE *psNode, *psNext; + RGXFWIF_KCCB_CMD sFlushInvalCmd; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32kCCBCommandSlot; + + OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); + + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + if (psIter->psMMUContext == psMMUContext) + { + psServerMMUContext = psIter; + } + } + + OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); + + if (! psServerMMUContext) + { + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; + } + + /* Schedule the SLC flush command */ +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); +#endif + sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = bInvalidate; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = uiSize; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = sDevVAddr.uiAddr; + eError = RGXGetFWCommonContextAddrFromServerMMUCtx(psDevInfo, + psServerMMUContext, + &sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, + &sFlushInvalCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSLCFlush: Failed to schedule SLC flush command with error (%u)", + eError)); + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSLCFlush: SLC flush and invalidate aborted with error (%u)", + eError)); + } + } + + return eError; +} + +PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_UINT64 ui64FBSCEntryMask) +{ + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); + + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + if (psIter->psMMUContext == psMMUContext) + { + psServerMMUContext = psIter; + } + } + + OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); + + if (! psServerMMUContext) + { + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; + } + + /* Accumulate the FBSC invalidate request */ + psServerMMUContext->ui64FBSCEntryMask |= ui64FBSCEntryMask; + + return PVRSRV_OK; +} + +/* + * RGXExtractFBSCEntryMaskFromMMUContext + * + */ +PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + IMG_UINT64 *pui64FBSCEntryMask) +{ + if (!psServerMMUContext) + { + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; + } + + *pui64FBSCEntryMask = psServerMMUContext->ui64FBSCEntryMask; + psServerMMUContext->ui64FBSCEntryMask = 0; + + return PVRSRV_OK; +} + +void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eMMULevel, + IMG_BOOL bUnmap) +{ + IMG_UINT32 ui32NewCacheFlags; + + PVR_UNREFERENCED_PARAMETER(bUnmap); + + switch (eMMULevel) + { + case MMU_LEVEL_3: + ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PC; + + break; + case MMU_LEVEL_2: + ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PD; + + break; + case MMU_LEVEL_1: + ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PT; + + break; + default: + ui32NewCacheFlags = 0; + PVR_ASSERT(0); + + break; + } + + MMU_AppendCacheFlags(psMMUContext, ui32NewCacheFlags); +} + +static inline void _GetAndResetCacheOpsPending(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + IMG_UINT32 *pui32FWCacheFlags, + IMG_UINT32 *pui32ServerCtxCacheFlags) +{ + /* + * Atomically exchange flags and 0 to ensure we never accidentally read + * state inconsistently or overwrite valid cache flags with 0. + */ + *pui32ServerCtxCacheFlags = + (psServerMMUContext != NULL) ? + MMU_ExchangeCacheFlags(psServerMMUContext->psMMUContext, 0) : 0; + + *pui32FWCacheFlags = MMU_ExchangeCacheFlags(psDevInfo->psKernelMMUCtx, 0); +} + +static +PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_DM eDM, + SERVER_MMU_CONTEXT *psServerMMUContext, + IMG_UINT32 ui32CacheFlags, + IMG_UINT32 ui32FWCacheFlags, + IMG_BOOL bInterrupt, + IMG_UINT32 *pui32MMUInvalidateUpdate) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sFlushCmd; + + sFlushCmd.uCmdData.sMMUCacheData.ui32FWFlags = ui32FWCacheFlags; + if (psServerMMUContext == NULL) + { + sFlushCmd.uCmdData.sMMUCacheData.sMemoryContext.ui32Addr = 0; + } + else + { + PVR_ASSERT((psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr != 0)); + + sFlushCmd.uCmdData.sMMUCacheData.sMemoryContext = + psServerMMUContext->sFWMemContextDevVirtAddr; + } + + *pui32MMUInvalidateUpdate = psDeviceNode->ui32NextMMUInvalidateUpdate++; + + /* Setup cmd and add the device nodes sync object */ + sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE; + sFlushCmd.uCmdData.sMMUCacheData.ui32MMUCacheSyncUpdateValue = *pui32MMUInvalidateUpdate; + SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim, + &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr); + + /* Indicate the firmware should signal command completion to the host */ + if (bInterrupt) + { + ui32CacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT; + } + + sFlushCmd.uCmdData.sMMUCacheData.ui32Flags = ui32CacheFlags; + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Submit MMU flush and invalidate (flags = 0x%08x)", + ui32CacheFlags); +#endif + + /* Schedule MMU cache command */ + eError = RGXSendCommand(psDeviceNode->pvDevice, + &sFlushCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule MMU cache command to " + "DM=%d with error (%u)", + __func__, eDM, eError)); + psDeviceNode->ui32NextMMUInvalidateUpdate--; + } + + return eError; +} + +PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32MMUInvalidateUpdate, + IMG_BOOL bInterrupt) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32CacheFlags, ui32FWCacheFlags; + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto RGXMMUCacheInvalidateKick_exit; + } + + _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, NULL, + &ui32FWCacheFlags, &ui32CacheFlags); + if (ui32FWCacheFlags == 0 && ui32CacheFlags == 0) + { + /* Nothing to do if no cache ops pending */ + eError = PVRSRV_OK; + goto _PowerUnlockAndReturnErr; + } + + /* Ensure device is powered up before sending cache command */ + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + PDUMPPOWCMDEND(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto _PowerUnlockAndReturnErr; + } + + eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, + NULL, ui32CacheFlags, ui32FWCacheFlags, + bInterrupt, pui32MMUInvalidateUpdate); + if (eError != PVRSRV_OK) + { + /* failed to submit cache operations, return failure */ + goto _PowerUnlockAndReturnErr; + } + +_PowerUnlockAndReturnErr: + PVRSRVPowerUnlock(psDeviceNode); + +RGXMMUCacheInvalidateKick_exit: + return eError; +} + +PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DM eDM, + IMG_UINT32 *pui32MMUInvalidateUpdate, + IMG_BOOL bInterrupt) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + IMG_UINT32 ui32CacheFlags; + IMG_UINT32 ui32FWCacheFlags; + + /* Caller should ensure that power lock is held before calling this function */ + PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); + + _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, psServerMMUContext, + &ui32FWCacheFlags, &ui32CacheFlags); + + if (ui32CacheFlags == 0 && ui32FWCacheFlags == 0) + { + /* Nothing to do if no cache ops pending */ + return PVRSRV_OK; + } + + return _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, psServerMMUContext, + ui32CacheFlags, ui32FWCacheFlags, + bInterrupt, pui32MMUInvalidateUpdate); +} + +/* page fault debug is the only current use case for needing to find process info + * after that process device memory context has been destroyed + */ + +typedef struct _UNREGISTERED_MEMORY_CONTEXT_ +{ + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_DEV_PHYADDR sPCDevPAddr; +} UNREGISTERED_MEMORY_CONTEXT; + +/* must be a power of two */ +#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3) + +static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE]; +static IMG_UINT32 gui32UnregisteredMemCtxsHead; + +/* record a device memory context being unregistered. + * the list of unregistered contexts can be used to find the PID and process name + * belonging to a memory context which has been destroyed + */ +static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext) +{ + UNREGISTERED_MEMORY_CONTEXT *psRecord; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead]; + + gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1) + & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1); + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + + psRecord->uiPID = psServerMMUContext->uiPID; + if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context")); + } + OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); +} + + +void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData) +{ + SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData; + PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo; + +#if defined(PDUMP) + { + RGXFWIF_DEV_VIRTADDR sFWAddr; + + RGXSetFirmwareAddress(&sFWAddr, + psServerMMUContext->psFWMemContextMemDesc, + 0, + RFW_FWADDR_NOREF_FLAG); + + /* + * MMU cache commands (always dumped) might have a pointer to this FW + * memory context, wait until the FW has caught-up to the latest command. + */ + PDUMPCOMMENT("Ensure FW has executed all MMU invalidations on FW memory " + "context 0x%x before freeing it", sFWAddr.ui32Addr); + SyncPrimPDumpPol(psDevInfo->psDeviceNode->psMMUCacheSyncPrim, + psDevInfo->psDeviceNode->ui32NextMMUInvalidateUpdate - 1, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_GREATEREQUAL, + PDUMP_FLAGS_CONTINUOUS); + } +#endif + + OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); + dllist_remove_node(&psServerMMUContext->sNode); + OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext); + } + + /* + * Release the page catalogue address acquired in RGXRegisterMemoryContext(). + */ + MMU_ReleaseBaseAddr(NULL); + + /* + * Free the firmware memory context. + */ + PDUMPCOMMENT("Free FW memory context"); + DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); + + OSFreeMem(psServerMMUContext); +} + +/* + * RGXRegisterMemoryContext + */ +PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_FLAGS_T uiFWMemContextMemAllocFlags; + RGXFWIF_FWMEMCONTEXT *psFWMemContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc; + SERVER_MMU_CONTEXT *psServerMMUContext; + + if (psDevInfo->psKernelMMUCtx == NULL) + { + /* + * This must be the creation of the Kernel memory context. Take a copy + * of the MMU context for use when programming the BIF. + */ + psDevInfo->psKernelMMUCtx = psMMUContext; + } + else + { + psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext)); + if (psServerMMUContext == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_server_ctx; + } + + psServerMMUContext->psDevInfo = psDevInfo; + psServerMMUContext->ui64FBSCEntryMask = 0; + psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = 0; + + /* + * This FW MemContext is only mapped into kernel for initialisation purposes. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, + * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick) + */ + uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; + + /* + Allocate device memory for the firmware memory context for the new + application. + */ + PDUMPCOMMENT("Allocate RGX firmware memory context"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWMemContext), + uiFWMemContextMemAllocFlags | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwMemoryContext", + &psFWMemContextMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware memory context (%u)", + __func__, + eError)); + goto fail_alloc_fw_ctx; + } + + /* + Temporarily map the firmware memory context to the kernel. + */ + eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc, + (void **)&psFWMemContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware memory context (%u)", + __func__, + eError)); + goto fail_acquire_cpu_addr; + } + + /* + * Write the new memory context's page catalogue into the firmware memory + * context for the client. + */ + eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_acquire_base_addr; + } + + /* + * Set default values for the rest of the structure. + */ + psFWMemContext->uiPageCatBaseRegID = RGXFW_BIF_INVALID_PCREG; + psFWMemContext->uiBreakpointAddr = 0; + psFWMemContext->uiBPHandlerAddr = 0; + psFWMemContext->uiBreakpointCtl = 0; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; + IMG_BOOL bOSidAxiProt; + + MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt); + + psFWMemContext->ui32OSid = ui32OSidReg; + psFWMemContext->bOSidAxiProt = bOSidAxiProt; +} +#endif + +#if defined(PDUMP) + { + IMG_CHAR aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOffset = 0; + + /* + * Dump the Mem context allocation + */ + DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS); + + + /* + * Obtain a symbolic addr of the mem context structure + */ + eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, + &uiOffset, + aszName, + PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to generate a Dump Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_pdump_cat_base_addr; + } + + /* + * Dump the Page Cat tag in the mem context (symbolic address) + */ + eError = MMU_PDumpWritePageCatBase(psMMUContext, + aszName, + uiOffset, + 8, /* 64-bit register write */ + 0, + 0, + 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_pdump_cat_base; + } + } +#endif + + /* + * Release kernel address acquired above. + */ + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + + /* + * Store the process information for this device memory context + * for use with the host page-fault analysis. + */ + psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM(); + psServerMMUContext->psMMUContext = psMMUContext; + psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; + OSStringLCopy(psServerMMUContext->szProcessName, + OSGetCurrentClientProcessNameKM(), + sizeof(psServerMMUContext->szProcessName)); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "New memory context: Process Name: %s PID: %u (0x%08X)", + psServerMMUContext->szProcessName, + psServerMMUContext->uiPID, + psServerMMUContext->uiPID); + + OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); + dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode); + OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); + + *hPrivData = psServerMMUContext; + } + + return PVRSRV_OK; + +#if defined(PDUMP) +fail_pdump_cat_base: +fail_pdump_cat_base_addr: + MMU_ReleaseBaseAddr(NULL); +#endif +fail_acquire_base_addr: + /* Done before jumping to the fail point as the release is done before exit */ +fail_acquire_cpu_addr: + DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); +fail_alloc_fw_ctx: + OSFreeMem(psServerMMUContext); +fail_alloc_server_ctx: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv) +{ + SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv; + + return psMMUContext->psFWMemContextMemDesc; +} + +void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DEV_VIRTADDR sFWMemContextAddr) +{ + psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = sFWMemContextAddr.ui32Addr; +} + +void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_DEV_PHYADDR *psDevPAddr, + MMU_FAULT_DATA *psOutFaultData) +{ + IMG_DEV_PHYADDR sPCDevPAddr; + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); + + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psServerMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + + if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for memory context")); + continue; + } + + if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) + { + MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr, psOutFaultData); + goto out_unlock; + } + } + + /* Lastly check for fault in the kernel allocated memory */ + if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for kernel memory context")); + } + + if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) + { + MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData); + } + +out_unlock: + OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); +} + +/* given the physical address of a page catalogue, searches for a corresponding + * MMU context and if found, provides the caller details of the process. + * Returns IMG_TRUE if a process is found. + */ +IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, + RGXMEM_PROCESS_INFO *psInfo) +{ + IMG_BOOL bRet = IMG_FALSE; + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + + /* check if the input PC addr corresponds to an active memory context */ + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psThisMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + IMG_DEV_PHYADDR sPCDevPAddr; + + if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for memory context")); + continue; + } + + if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr) + { + psServerMMUContext = psThisMMUContext; + break; + } + } + + if (psServerMMUContext != NULL) + { + psInfo->uiPID = psServerMMUContext->uiPID; + OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + /* else check if the input PC addr corresponds to the firmware */ + else + { + IMG_DEV_PHYADDR sKernelPCDevPAddr; + PVRSRV_ERROR eError; + + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr); + + if (eError != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for kernel memory context")); + } + else + { + if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr) + { + psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; + OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + } + } + + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && + (bRet == IMG_FALSE)) + { + /* no active memory context found with the given PC address. + * Check the list of most recently freed memory contexts. + */ + IMG_UINT32 i; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + /* iterate through the list of unregistered memory contexts + * from newest (one before the head) to the oldest (the current head) + */ + i = gui32UnregisteredMemCtxsHead; + + do + { + UNREGISTERED_MEMORY_CONTEXT *psRecord; + + i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1)); + + psRecord = &gasUnregisteredMemCtxs[i]; + + if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr) + { + psInfo->uiPID = psRecord->uiPID; + OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_TRUE; + bRet = IMG_TRUE; + break; + } + } while (i != gui32UnregisteredMemCtxsHead); + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + + } + + return bRet; +} + +IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, + RGXMEM_PROCESS_INFO *psInfo) +{ + IMG_BOOL bRet = IMG_FALSE; + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + + /* check if the input PID corresponds to an active memory context */ + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psThisMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + + if (psThisMMUContext->uiPID == uiPID) + { + psServerMMUContext = psThisMMUContext; + break; + } + } + + if (psServerMMUContext != NULL) + { + psInfo->uiPID = psServerMMUContext->uiPID; + OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + /* else check if the input PID corresponds to the firmware */ + else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; + OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && + (bRet == IMG_FALSE)) + { + /* if the PID didn't correspond to an active context or the + * FW address then see if it matches a recently unregistered context + */ + const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1; + IMG_UINT32 i, j; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0; + j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE; + i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j++) + { + UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i]; + + if (psRecord->uiPID == uiPID) + { + psInfo->uiPID = psRecord->uiPID; + OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_TRUE; + bRet = IMG_TRUE; + break; + } + } + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + } + + return bRet; +} + +/****************************************************************************** + End of file (rgxmem.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.h new file mode 100644 index 000000000000..40b51dbbdc7c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmem.h @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@File +@Title RGX memory context management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for RGX memory context management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXMEM_H__) +#define __RGXMEM_H__ + +#include "pvrsrv_error.h" +#include "device.h" +#include "mmu_common.h" +#include "rgxdevice.h" + +#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16 + +/* this PID denotes the firmware */ +#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF + +/* this PID denotes the PM */ +#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF + +typedef struct _RGXMEM_PROCESS_INFO_ +{ + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_BOOL bUnregistered; +} RGXMEM_PROCESS_INFO; + +typedef struct _SERVER_MMU_CONTEXT_ SERVER_MMU_CONTEXT; + +IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext); + +void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DEV_VIRTADDR sFWMemContextAddr); + +void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDevNode); +void RGXMMUSyncPrimFree(void); + +PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiLength, + IMG_BOOL bInvalidate); + +PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_UINT64 ui64FBSCEntryMask); + +PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + IMG_UINT64 *pui64FBSCEntryMask); + +void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eMMULevel, + IMG_BOOL bUnmap); + +/*************************************************************************/ /*! +@Function RGXMMUCacheInvalidateKick + +@Description Sends a flush command to a particular DM but first takes + the power lock. + +@Input psDevNode Device Node pointer +@Input pui32NextMMUInvalidateUpdate +@Input bInterrupt Should the firmware signal command completion to + the host + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 *pui32NextMMUInvalidateUpdate, + IMG_BOOL bInterrupt); + +/*************************************************************************/ /*! +@Function RGXPreKickCacheCommand + +@Description Sends a cache flush command to a particular DM without + honouring the power lock. It's the caller's responsibility + to ensure power lock is held before calling this function. + +@Input psDevInfo Device Info +@Input psServerMMUContext Context requesting cache invalidation +@Input eDM To which DM the cmd is sent. +@Input pui32MMUInvalidateUpdate +@Input bInterrupt Should the firmware signal command completion to + the host + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DM eDM, + IMG_UINT32 *pui32MMUInvalidateUpdate, + IMG_BOOL bInterrupt); + +void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData); +PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData); + +DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv); + +void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_DEV_PHYADDR *psDevPAddr, + MMU_FAULT_DATA *psOutFaultData); + +IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, + RGXMEM_PROCESS_INFO *psInfo); + +IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, + RGXMEM_PROCESS_INFO *psInfo); + +#endif /* __RGXMEM_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.c new file mode 100644 index 000000000000..5e521cfcc02d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.c @@ -0,0 +1,1268 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ +#include "rgxmmuinit.h" +#include "rgxmmudefs_km.h" + +#include "rgxdevice.h" +#include "img_types.h" +#include "img_defs.h" +#include "mmu_common.h" +#include "pdump_mmu.h" + +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "rgx_memallocflags.h" +#include "rgx_heaps.h" +#include "pdump_km.h" + + +/* useful macros */ +/* units represented in a bitfield */ +#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1) + + +/* + * Bits of PT, PD and PC not involving addresses + */ + + + +/* protection bits for MMU_VERSION <= 3 */ +#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ + ~RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK | \ + RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \ + RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \ + RGX_MMUCTRL_PT_DATA_CC_EN | \ + RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ + RGX_MMUCTRL_PT_DATA_VALID_EN) + +#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \ + ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \ + RGX_MMUCTRL_PD_DATA_VALID_EN) + +#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \ + RGX_MMUCTRL_PC_DATA_VALID_EN) + + +/* + * protection bits for MMU_VERSION >= 4 + * MMU4 has no PENDING or PAGE_SIZE fields in PxE + */ +#define RGX_MMU4CTRL_PTE_PROTMASK (RGX_MMUCTRL_PTE_PROTMASK & ~RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN) + +#define RGX_MMU4CTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_VALID_EN) + +#define RGX_MMU4CTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_VALID_EN) + + + + +static MMU_PxE_CONFIG sRGXMMUPCEConfig; +static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; + + +/* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; + + +/* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; + + +/* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; + + +/* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; + + +/* + * + * Configuration for heaps with 1MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; + + +/* + * + * Configuration for heaps with 2MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; + + +/* Forward declaration of protection bits derivation functions, for + the following structure */ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); + +/* protection bits derivation functions for MMUv4 */ +static IMG_UINT64 RGXMMU4DerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static PVRSRV_ERROR RGXMMU4GetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); +static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 *pui32Log2PageSize); + + +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv); + +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); + +static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; + +PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bHaveMMU4 = (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4); + + /* Setup of Px Entries: + * + * + * PAGE TABLE (8 Byte): + * + * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 | + * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid | + * + * + * PAGE DIRECTORY (8 Byte): + * + * | 40 | 39...5 (varies) | 4 | 3...1 | 0 | + * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid | + * + * + * PAGE CATALOGUE (4 Byte): + * + * | 31...4 | 3...2 | 1 | 0 | + * | Page Directory base address | (reserved) | Entry Pending | Valid | + * + */ + + + /* Example how to get the PD address from a PC entry. + * The procedure is the same for PD and PT entries to retrieve PT and Page addresses: + * + * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&': + * | 31...4 | 3...2 | 1 | 0 | + * | PD Addr | 0 | 0 | 0 | + * + * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>': + * | 27...0 | + * | PD Addr | + * + * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<': + * | 39...0 | + * | PD Addr | + * + */ + + + sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = + PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]); + + /* + * Setup sRGXMMUPCEConfig + */ + sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */ + sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ + + sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */ + sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */ + + sRGXMMUPCEConfig.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PCE_PROTMASK : RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits */ + sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the status bits */ + + sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ + sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */ + + /* + * Setup sRGXMMUTopLevelDevVAddrConfig + */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */ + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask, + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift)); + + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */ + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */ + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask, + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift)); + + /* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_4KBDP + */ + sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12; + sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12; + + sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_4KBDP + */ + sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000); + sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; + sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */ + + sRGXMMUPTEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_4KBDP + */ + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig4KB + */ + gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; + gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; + gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; + gsPageSizeConfig4KB.uiRefCount = 0; + gsPageSizeConfig4KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_16KBDP + */ + sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; + sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10; + + sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_16KBDP + */ + sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000); + sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; + sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14; + + sRGXMMUPTEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_16KBDP + */ + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000); + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift)); + + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff); + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig16KB + */ + gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; + gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; + gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; + gsPageSizeConfig16KB.uiRefCount = 0; + gsPageSizeConfig16KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_64KBDP + */ + sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8; + sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8; + + sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_64KBDP + */ + sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000); + sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16; + sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16; + + sRGXMMUPTEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_64KBDP + */ + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000); + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig64KB + */ + gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; + gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; + gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; + gsPageSizeConfig64KB.uiRefCount = 0; + gsPageSizeConfig64KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_256KBDP + */ + sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP + */ + sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000); + sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18; + sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18; + + sRGXMMUPTEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_256KBDP + */ + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000); + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff); + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig256KB + */ + gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; + gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; + gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; + gsPageSizeConfig256KB.uiRefCount = 0; + gsPageSizeConfig256KB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_1MBDP + */ + sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + /* + * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even + * if they contain fewer entries. + */ + sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_1MBDP + */ + sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000); + sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20; + sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20; + + sRGXMMUPTEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_1MBDP + */ + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000); + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff); + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig1MB + */ + gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; + gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; + gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; + gsPageSizeConfig1MB.uiRefCount = 0; + gsPageSizeConfig1MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_2MBDP + */ + sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + /* + * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even + * if they contain fewer entries. + */ + sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_2MBDP + */ + sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000); + sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21; + sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21; + + sRGXMMUPTEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_2MBDP + */ + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000); + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff); + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig2MB + */ + gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; + gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; + gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; + gsPageSizeConfig2MB.uiRefCount = 0; + gsPageSizeConfig2MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUDeviceAttributes + */ + sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT; + sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3; + sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; + sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig; + sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; + + /* Functions for deriving page table/dir/cat protection bits */ + sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; + sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; + sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; + sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; + sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; + sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; + + /* Functions for establishing configurations for PDE/PTE/DEVVADDR + on per-heap basis */ + sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; + sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; + + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL; + + if (bHaveMMU4) + { + /* override some of these functions for MMU4 as page size is not stored in PD entries */ + sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXMMU4DerivePDEProt8; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXMMU4GetPageSizeFromPDE8; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = RGXMMU4GetPageSizeFromVirtAddr; + } + + psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = PVRSRV_OK; + +#if defined(PDUMP) + psDeviceNode->pfnMMUGetContextID = NULL; +#endif + + psDeviceNode->psMMUDevAttrs = NULL; + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); + PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", + gsPageSizeConfig4KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", + gsPageSizeConfig4KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", + gsPageSizeConfig16KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", + gsPageSizeConfig16KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", + gsPageSizeConfig64KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", + gsPageSizeConfig64KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", + gsPageSizeConfig256KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", + gsPageSizeConfig256KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", + gsPageSizeConfig1MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", + gsPageSizeConfig1MB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", + gsPageSizeConfig2MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", + gsPageSizeConfig2MB.uiRefCount)); +#endif + if (gsPageSizeConfig4KB.uiRefCount > 0 || + gsPageSizeConfig16KB.uiRefCount > 0 || + gsPageSizeConfig64KB.uiRefCount > 0 || + gsPageSizeConfig256KB.uiRefCount > 0 || + gsPageSizeConfig1MB.uiRefCount > 0 || + gsPageSizeConfig2MB.uiRefCount > 0 + ) + { + PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function RGXMMUInit_GetConfigRangeValue +@Description Helper Function + For a given virtual address range and page size, return the + value to load into an MMU_PAGE_SIZE_RANGE config register. +@Return 64-bit register value +*/ /**************************************************************************/ +IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize) +{ + /* end address of range is inclusive */ + IMG_UINT64 ui64EndAddress = ui64BaseAddress + ui64RangeSize - (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT); + IMG_UINT64 ui64RegValue = 0; + + switch (ui32DataPageSize) + { + case 16*1024: + ui64RegValue = 1; + break; + case 64*1024: + ui64RegValue = 2; + break; + case 256*1024: + ui64RegValue = 3; + break; + case 1024*1024: + ui64RegValue = 4; + break; + case 2*1024*1024: + ui64RegValue = 5; + break; + case 4*1024: + /* fall through */ + default: + /* anything we don't support, use 4K */ + break; + } + + /* the range config register addresses are in 2MB chunks so check 21 lsb are zero */ + PVR_ASSERT((ui64BaseAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT) - 1)) == 0); + PVR_ASSERT((ui64EndAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT) - 1)) == 0); + + ui64BaseAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT; + ui64EndAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT; + + ui64RegValue = (ui64RegValue << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT) | + (ui64EndAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) | + (ui64BaseAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT); + return ui64RegValue; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt4 +@Description calculate the PCE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) +{ + return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt8 +@Description calculate the PCE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt4 +@Description derive the PDE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt8 +@Description derive the PDE protection flags based on an 8 byte entry + +@Input uiLog2DataPageSize The log2 of the required page size. + E.g, for 4KiB pages, this parameter must be 12. + For 2MiB pages, it must be set to 21. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ret_value = 0; /* 0 means invalid */ + + if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ + { + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", + __FILE__, __LINE__, __func__, uiLog2DataPageSize)); + } + } + return ret_value; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt4 +@Description calculate the PTE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device")); + + return 0; +} + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt8 +@Description calculate the PTE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ui64MMUFlags=0; + + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) + { + /* read/write */ + } + else if (MMU_PROTFLAGS_READABLE & uiProtFlags) + { + /* read only */ + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN; + } + else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) + { + /* write only */ + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device")); + } + else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified...")); + } + + /* cache coherency */ + if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN; + } + + if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN; + } + + if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN; + } + + /** + * Always enable caching on the fabric level cache irrespective of type of + * cache coherent interconnect and memory cache attributes. + * This needs to be updated, if selective caching policy needs to be + * implemented based on cache attributes requested by caller and based on + * cache coherent interconnect. + */ + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC; + + return ui64MMUFlags; +} + + +/*************************************************************************/ /*! +@Function RGXGetPageSizeConfig +@Description Set up configuration for variable sized data pages. + RGXPutPageSizeConfigCB has to be called to ensure correct + refcounting. +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv) +{ + MMU_PAGESIZECONFIG *psPageSizeConfig; + + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + *phPriv = NULL; + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Refer caller's pointers to the data */ + *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; + *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; + *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; + +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + /* Increment ref-count - not that we're allocating anything here + (I'm using static structs), but one day we might, so we want + the Get/Put code to be balanced properly */ + psPageSizeConfig->uiRefCount++; + + /* This is purely for debug statistics */ + psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, + psPageSizeConfig->uiRefCount); +#endif + + *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; + PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RGXPutPageSizeConfig +@Description Tells this code that the mmu module is done with the + configurations set in RGXGetPageSizeConfig. This can + be a no-op. + Called after RGXGetPageSizeConfigCB. +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) +{ +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + MMU_PAGESIZECONFIG *psPageSizeConfig; + IMG_UINT32 uiLog2DataPageSize; + + uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; + + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Ref-count here is not especially useful, but it's an extra + check that the API is being used correctly */ + psPageSizeConfig->uiRefCount--; +#else + PVR_UNREFERENCED_PARAMETER(hPriv); +#endif + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) +{ + IMG_UINT64 ui64PageSizeBits = ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK); + + switch (ui64PageSizeBits) + { + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB: + *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB: + *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB: + *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB: + *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB: + *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB: + *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXGetPageSizeFromPDE8: Invalid page size bitfield %" IMG_UINT64_FMTSPECx " in PDE", + ui64PageSizeBits)); + + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + return PVRSRV_OK; +} + + + + +/*************************************************************************/ /*! +@Function RGXMMU4DerivePDEProt8 +@Description derive the PDE protection flags based on an 8 byte entry + +@Input uiLog2DataPageSize: ignored as MMU4 doesn't put page size in PD entries. + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXMMU4DerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ret_value = 0; /* 0 means invalid */ + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ + { + /* page size in range config registers. Bits in PD entries are reserved */ + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN; + } + return ret_value; +} + + +/*************************************************************************/ /*! +@Function RGXMMU4GetPageSizeFromPDE8 +@Description The upper layers should be such that this function is never called + as pages size are not stored in PD entries for MMU4. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXMMU4GetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui64PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + + PVR_ASSERT(0 && "RGXMMU4GetPageSizeFromPDE8 called in error. MMU4 does not store page sizes in PDT."); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} + + +/*************************************************************************/ /*! +@Function RGXMMU4GetPageSizeFromVirtAddr +@Description Get page size by walking through range config registers + looking for a match against the virtual address. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 *pui32Log2PageSize) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 i; + + /* assume default of 4KB page size */ + *pui32Log2PageSize = 12; + + /* Loop through the range registers looking for the given target address */ + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i) + { + IMG_UINT64 ui64RangeVal = psDevInfo->aui64MMUPageSizeRangeValue[i]; + + if (ui64RangeVal != 0) + { + /* end addr in register is inclusive in the range so add 1 to move it over the end */ + IMG_UINT64 ui64Base = ((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK) + >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT) + << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT; + IMG_UINT64 ui64End = (((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK) + >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) + 1) + << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT; + + if ((sDevVAddr.uiAddr >= ui64Base) && (sDevVAddr.uiAddr < ui64End)) + { + IMG_UINT32 ui32PageSizeField = (IMG_UINT32)((ui64RangeVal & RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK) + >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT); + if (ui32PageSizeField < 5) + { + *pui32Log2PageSize = (ui32PageSizeField << 1) + 12; /* 12 (4K), 14 (16K), 16 (64K), 18 (256K), 20 (1MB) */ + } + else if (ui32PageSizeField == 5) + { + *pui32Log2PageSize = 21; /* 2MB */ + } + else + { + eError = PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + break; + } + } + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.h new file mode 100644 index 000000000000..0f56d30d3f07 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmmuinit.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* NB: this file is not to be included arbitrarily. It exists solely + for the linkage between rgxinit.c and rgxmmuinit.c, the former + being otherwise cluttered by the contents of the latter */ + +#ifndef _SRVKM_RGXMMUINIT_H_ +#define _SRVKM_RGXMMUINIT_H_ + +#include "device.h" +#include "img_types.h" +#include "mmu_common.h" +#include "img_defs.h" + +PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize); + +#endif /* #ifndef _SRVKM_RGXMMUINIT_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmulticore.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmulticore.c new file mode 100644 index 000000000000..cfd970070ceb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxmulticore.c @@ -0,0 +1,129 @@ +/*************************************************************************/ /*! +@File rgxmulticore.c +@Title Functions related to multicore devices +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel mode workload estimation functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxdevice.h" +#include "rgxdefs_km.h" +#include "pdump_km.h" +#include "rgxmulticore.h" +#include "pvr_debug.h" + +/* + * RGXGetMultiCoreInfo: + * Read multicore HW registers and fill in data structure for clients. + * Return not supported on cores without multicore. + */ +PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps) +{ + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(PLACEHOLDER_UNTIL_IMPLEMENTATION) + /* Waiting for finalisation of future hardware */ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + void *hPrivate = (void*)&psDevInfo->sLayerParams; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH)); + IMG_UINT32 ui32MulticoreGPUReg = RGX_CR_MULTICORE_GPU; + IMG_UINT32 ui32NumCores; + IMG_UINT32 i; + + ui32NumCores = RGXReadReg32(hPrivate, RGX_CR_MULTICORE_SYSTEM); +#if !defined(NO_HARDWARE) + PVR_LOG(("Multicore system has %u cores", ui32NumCores)); + /* check that the number of cores reported is in-bounds */ + if (ui32NumCores > (RGX_CR_MULTICORE_SYSTEM_MASKFULL >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT)) + { + PVR_DPF((PVR_DBG_ERROR, "invalid return (%u) read from MULTICORE_SYSTEM", ui32NumCores)); + return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; + } +#else + /* simulation: currently we support one primary and one secondary */ + ui32NumCores = 2; +#endif + + *pui32NumCores = ui32NumCores; + /* CapsSize of zero is allowed to just return number of cores */ + if (ui32CapsSize > 0) + { +#if !defined(NO_HARDWARE) + PVR_LOG(("Configured for %u multicores", ui32NumCores)); +#endif + if (ui32CapsSize < ui32NumCores) + { + eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + } + else + { + for (i = 0; i < ui32NumCores; ++i) + { + *pui64Caps = RGXReadReg64(hPrivate, ui32MulticoreGPUReg) & ~0xFFFFFFFF; +#if !defined(NO_HARDWARE) + PVR_LOG(("Core %d has capabilities value 0x%x", i, (IMG_UINT32)(*pui64Caps) )); +#else + /* emulation for what we think caps are */ + *pui64Caps = i | ((i == 0) ? (RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN + | RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN) : 0) + | RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN + | RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN; +#endif + + ++pui64Caps; + ui32MulticoreGPUReg += ui32MulticoreRegBankOffset; + } + } + } + } + else +#endif + { + /* MULTICORE not supported on this device */ + PVR_DPF((PVR_DBG_ERROR, "Multicore not supported on this device")); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.c new file mode 100644 index 000000000000..5b6ba42c4a54 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.c @@ -0,0 +1,542 @@ +/*************************************************************************/ /*! +@File rgxpdump.c +@Title Device specific pdump routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific pdump functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PDUMP) +#include "pvrsrv.h" +#include "devicemem_pdump.h" +#include "rgxpdump.h" +#include "pdumpdesc.h" + +/* + * There are two different set of functions one for META/RISCV and one for MIPS + * because the Pdump player does not implement the support for + * the MIPS MMU yet. So for MIPS builds we cannot use DevmemPDumpSaveToFileVirtual, + * we have to use DevmemPDumpSaveToFile instead. + */ +static PVRSRV_ERROR _FWDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PDUMPIF("DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); + PDUMPELSE("DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); + +#if defined(SUPPORT_FIRMWARE_GCOV) + /* Gcov */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Gcov Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psFirmwareGcovBufferMemDesc, + 0, + psDevInfo->ui32FirmwareGcovSize, + "firmware_gcov.img", + 0, + ui32PDumpFlags); +#endif + /* TDM signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTDMChecksMemDesc, + 0, + psDevInfo->ui32SigTDMChecksSize, + "out.2dsig", + 0, + ui32PDumpFlags); + + /* TA signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc, + 0, + psDevInfo->ui32SigTAChecksSize, + "out.tasig", + 0, + ui32PDumpFlags); + + /* 3D signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc, + 0, + psDevInfo->ui32Sig3DChecksSize, + "out.3dsig", + 0, + ui32PDumpFlags); + /* CDM signatures */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump CDM signatures and checksums Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigCDMChecksMemDesc, + 0, + psDevInfo->ui32SigCDMChecksSize, + "out.cdmsig", + 0, + ui32PDumpFlags); + + + PDUMPFI("DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); + + return PVRSRV_OK; +} +static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* Dump trace buffers */ + PDUMPIF("ENABLE_TRACEBUF", ui32PDumpFlags); + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers"); + for (ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++) + { + /* + * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]); + + /* ui32TracePointer tracepointer */ + ui32Size = sizeof(IMG_UINT32); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + uiTraceBufThreadNumOff, + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* next, dump size of trace buffer in DWords */ + ui32Size = sizeof(IMG_UINT32); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* trace buffer */ + ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], + 0, /* 0 offset in the trace buffer mem desc */ + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* assert info buffer */ + ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + sizeof(IMG_UINT32); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */ + + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */ + + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */ + ui32Size, + "out.trace", + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + } + PDUMPFI("ENABLE_TRACEBUF", ui32PDumpFlags); + + /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */ + PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc); + + /* Dump hwperf buffer */ + PDUMPIF("ENABLE_HWPERF", ui32PDumpFlags); + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer"); + DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc, + 0, + psDevInfo->ui32RGXFWIfHWPerfBufSize, + "out.hwperf", + 0, + ui32PDumpFlags); + PDUMPFI("ENABLE_HWPERF", ui32PDumpFlags); + + return PVRSRV_OK; + +} + + +/* + * PVRSRVPDumpSignatureBufferKM + */ +PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + if (psDeviceNode->pfnCheckDeviceFeature) + { + return _FWDumpSignatureBufferKM(psConnection, + psDeviceNode, + ui32PDumpFlags); + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ +#if defined(SUPPORT_VALIDATION) && (defined(SUPPORT_FBCDC_SIGNATURE_CHECK) || defined(SUPPORT_TRP)) + + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + +#if defined(SUPPORT_FBCDC_SIGNATURE_CHECK) + +#if !defined(RGX_CR_FBCDC_STATUS) + #define RGX_CR_FBCDC_STATUS (0xF600U) +#endif +#if !defined(RGX_CR_FBCDC_SIGNATURE_STATUS) + #define RGX_CR_FBCDC_SIGNATURE_STATUS (0xF618U) +#endif + /* + * Add a PDUMP POLL on the FBC/FBDC signature check status. + */ + if (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_NOERR_EN) + { + PDUMPCOMMENT("Verify FBCDC Signature: match required"); + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FBCDC_STATUS, + 0, + 0xFFFFFFFF, + ui32PDumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FBCDC_SIGNATURE_STATUS, + 0, + 0xFFFFFFFF, + ui32PDumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + } + else if (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_ERR_EN) + { + static char pszVar1[] = ":SYSMEM:$2"; + static char pszVar2[] = ":SYSMEM:$3"; + char *pszLoopCondition; + + /* + * Do: + * v1 = [RGX_CR_FBCDC_STATUS] + * v2 = [RGX_CR_FBCDC_SIGNATURE_STATUS] + * While (v1 OR v2) == 0 + */ + PDUMPCOMMENT("Verify FBCDC Signature: mismatch required"); + eError = PDumpInternalValCondStr(&pszLoopCondition, + pszVar1, + 0, + 0xFFFFFFFF, + ui32PDumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unable to write pdump verification sequence (%d)", __func__, eError)); + } + else + { + eError = PDumpStartDoLoopKM(ui32PDumpFlags); + + eError = PDumpRegRead32ToInternalVar(RGX_PDUMPREG_NAME, + RGX_CR_FBCDC_STATUS, + pszVar1, + ui32PDumpFlags); + + eError = PDumpRegRead32ToInternalVar(RGX_PDUMPREG_NAME, + RGX_CR_FBCDC_SIGNATURE_STATUS, + pszVar2, + ui32PDumpFlags); + + eError = PDumpWriteVarORVarOp(pszVar1, pszVar2, ui32PDumpFlags); + eError = PDumpEndDoWhileLoopKM(pszLoopCondition, ui32PDumpFlags); + OSFreeMem(pszLoopCondition); + } + } +#endif /* SUPPORT_FBCDC_SIGNATURE_CHECK */ + +#if defined(SUPPORT_TRP) + /* + * Add a PDUMP POLL on the TRP signature check status. + */ + if (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_NOERR_EN) + { + PDUMPCOMMENT("Verify TRP Signature: match required"); + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGXFWIF_CR_TRP_SIGNATURE_STATUS, + RGXFWIF_TRP_STATUS_CHECKSUMS_OK, + 0xFFFFFFFF, + ui32PDumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + } + else if (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_ERR_EN) + { + PDUMPCOMMENT("Verify TRP Signature: mismatch required"); + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGXFWIF_CR_TRP_SIGNATURE_STATUS, + RGXFWIF_TRP_STATUS_CHECKSUMS_ERROR, + 0xFFFFFFFF, + ui32PDumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + } +#endif /* SUPPORT_TRP */ + + if ((psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_NOERR_EN) || + (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_ERR_EN)) + { + /* + * FBCDC and TRP signatures buffer + */ + PDumpCommentWithFlags(ui32PDumpFlags, "** Dump FBCDC and TRP signatures buffer"); + DevmemPDumpDataDescriptor(psDevInfo->psRGXFWSigTRP_FBCDCMemDesc, + 0, + psDevInfo->ui32SigTRP_FBCDCSize, + "out.trpsig", + IBIN_HEADER_TYPE, + 0, 0, /* not applicable to IBIN header type */ + ui32PDumpFlags); + } + +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + if (psDeviceNode->pfnCheckDeviceFeature) + { + return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_PBYTE abyPDumpDesc) +{ + IMG_PUINT32 pui32Word; + IMG_UINT32 ui32HeaderDataSize; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + /* Validate parameters */ + if (((IMAGE_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) || + ((IMAGE_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + memset(abyPDumpDesc, 0, IMAGE_HEADER_SIZE); + + pui32Word = IMG_OFFSET_ADDR(abyPDumpDesc, 0); + pui32Word[0] = (IMAGE_HEADER_TYPE << HEADER_WORD0_TYPE_SHIFT); + pui32Word[1] = (IMAGE_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) | + (IMAGE_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT); + + ui32HeaderDataSize = ui32DataSize; + if (eFBCompression != IMG_FB_COMPRESSION_NONE) + { + ui32HeaderDataSize += ui32HeaderSize; + } + pui32Word[2] = ui32HeaderDataSize << HEADER_WORD2_DATA_SIZE_SHIFT; + + pui32Word[3] = ui32LogicalWidth << IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT; + pui32Word[4] = ui32LogicalHeight << IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT; + + pui32Word[5] = ePixFmt << IMAGE_HEADER_WORD5_FORMAT_SHIFT; + + pui32Word[6] = ui32PhysicalWidth << IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT; + pui32Word[7] = ui32PhysicalHeight << IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT; + + pui32Word[8] = IMAGE_HEADER_WORD8_STRIDE_POSITIVE | IMAGE_HEADER_WORD8_BIFTYPE_NONE; + + switch (eMemLayout) + { + case IMG_MEMLAYOUT_STRIDED: + pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_STRIDED; + break; + case IMG_MEMLAYOUT_TWIDDLED: + pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported memory layout - %d", eMemLayout)); + return PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT; + } + + switch (eFBCompression) + { + case IMG_FB_COMPRESSION_NONE: + break; + case IMG_FB_COMPRESSION_DIRECT_8x8: + case IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8: + pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_8X8; + break; + case IMG_FB_COMPRESSION_DIRECT_16x4: + case IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4: + pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_16x4; + break; + case IMG_FB_COMPRESSION_DIRECT_32x2: + case IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2: + /* Services Client guards against unsupported FEATURE_FB_CDC_32x2. + We should never pass through the UM|KM bridge on cores lacking the feature. + */ + pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_32x2; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported compression mode - %d", eFBCompression)); + return PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE; + } + + pui32Word[9] = 0; + + if (eFBCompression != IMG_FB_COMPRESSION_NONE) + { + if (RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC) == 4) + { + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V4; + + if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 || + eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 || + eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) + { + pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_ON; + } + + pui32Word[9] |= (eFBCSwizzle << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) & IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK; + } + else /* 3 or 3.1 */ + { + IMG_BOOL bIsFBC31 = psDevInfo->psRGXFWIfFwSysData-> + ui32ConfigFlags & RGXFWIF_INICFG_FBCDC_V3_1_EN; + + if (bIsFBC31) + { + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2; + } + else + { + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2; + } + } + + pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE; + } + + pui32Word[10] = paui32FBCClearColour[0]; + pui32Word[11] = paui32FBCClearColour[1]; + pui32Word[12] = (IMG_UINT32) (psDeviceNode->ui64FBCClearColour & 0xFFFFFFFF); + pui32Word[13] = (IMG_UINT32) (psDeviceNode->ui64FBCClearColour >> 32); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPDumpPrepareOutputDataDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_PBYTE pbyPDumpDataHdr) +{ + IMG_PUINT32 pui32Word; + + /* Validate parameters */ + if (((DATA_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) || + ((DATA_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pui32Word = IMG_OFFSET_ADDR(pbyPDumpDataHdr, 0); + + if (ui32HeaderType == DATA_HEADER_TYPE) + { + pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT); + pui32Word[1] = (DATA_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) | + (DATA_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT); + pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT; + + pui32Word[3] = ui32ElementType << DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT; + pui32Word[4] = ui32ElementCount << DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT; + } + + if (ui32HeaderType == IBIN_HEADER_TYPE) + { + pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT); + pui32Word[1] = (IBIN_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) | + (IBIN_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT); + pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT; + } + + return PVRSRV_OK; +} +#endif /* PDUMP */ + +/****************************************************************************** + End of file (rgxpdump.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.h new file mode 100644 index 000000000000..407d4efff111 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpdump.h @@ -0,0 +1,178 @@ +/*************************************************************************/ /*! +@File +@Title RGX pdump Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX pdump functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxdevice.h" +#include "device.h" +#include "devicemem.h" +#include "pdump_km.h" +#include "pvr_debug.h" + +#if defined(PDUMP) +/*! +******************************************************************************* + + @Function PVRSRVPDumpSignatureBufferKM + + @Description + + Dumps TA and 3D signature and checksum buffers + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* + + @Function PVRSRVPDumpIntegritySignatureCheckKM + + @Description + + Poll on FBC/FBDC end-to-end signature status + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* + + @Function PVRSRVPDumpTraceBufferKM + + @Description + + Dumps TA and 3D signature and checksum buffers + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* + + @Function RGXPDumpPrepareOutputImageDescriptorHdr + + @Description + + Dumps the header for an OutputImage command + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_PBYTE abyPDumpDesc); + +/*! +******************************************************************************* + + @Function RGXPDumpPrepareOutputDataDescriptorHdr + + @Description + + Dumps the header for an OutputData command + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXPDumpPrepareOutputDataDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_PBYTE pbyPDumpDataHdr); + +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPDumpSignatureBufferKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPDumpTraceBufferKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} +#endif /* PDUMP */ +/****************************************************************************** + End of file (rgxpdump.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.c new file mode 100644 index 000000000000..e9d6bb73b1a4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.c @@ -0,0 +1,1151 @@ +/*************************************************************************/ /*! +@File +@Title Device specific power routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(LINUX) +#include +#else +#include +#endif + +#include "rgxpower.h" +#include "rgxinit.h" +#include "rgx_fwif_km.h" +#include "rgxfwutils.h" +#include "pdump_km.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "rgxdebug.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "rgxtimecorr.h" +#include "devicemem_utils.h" +#include "htbserver.h" +#include "rgxstartstop.h" +#include "rgxfwimageutils.h" +#include "sync.h" +#include "rgxdefs_km.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif +#if defined(SUPPORT_LINUX_DVFS) +#include "pvr_dvfs_device.h" +#endif +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) +#include "validation_soc.h" +#endif + +static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_KCCB_CMD sCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CmdKCCBSlot; + + /* Send the Timeout notification to the FW */ + sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + return eError; +} + +static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb; + IMG_UINT64 *paui64StatsCounters; + IMG_UINT64 ui64LastPeriod; + IMG_UINT64 ui64LastState; + IMG_UINT64 ui64LastTime; + IMG_UINT64 ui64TimeNow; + + psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0]; + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64()); + + /* Update counters to account for the time since the last update */ + ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord); + ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord); + ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); + paui64StatsCounters[ui64LastState] += ui64LastPeriod; + + /* Update state and time of the latest update */ + psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); + + OSLockRelease(psDevInfo->hGPUUtilLock); +} + +static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (psDevConfig->pfnTDRGXStop == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData); +#else + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = RGXStop(&psDevInfo->sLayerParams); +#endif + + return eError; +} + +/* + RGXPrePowerState +*/ +PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + { + return PVRSRV_OK; + } + + if ((eNewPowerState != eCurrentPowerState) && + (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + IMG_UINT32 ui32CmdKCCBSlot; + + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Send the Power off request to the FW */ + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = bForced; + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", + __func__)); + return eError; + } + + /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies + on the EventObject which is signalled in this MISR */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + + /* Check the Power state after the answer */ + if (eError == PVRSRV_OK) + { + /* Finally, de-initialise some registers. */ + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32TID; + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + /* Wait for the pending FW processor to host interrupts to come back. */ + eError = PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32TID], + psFwSysData->aui32InterruptCount[ui32TID], + 0xffffffff, + POLL_FLAG_LOG_ERROR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Wait for pending interrupts failed. Thread %u: Host: %u, FW: %u", + __func__, + ui32TID, + psDevInfo->aui32SampleIRQCount[ui32TID], + psFwSysData->aui32InterruptCount[ui32TID])); + + RGX_WaitForInterruptsTimeout(psDevInfo); + break; + } + } +#endif /* NO_HARDWARE */ + + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); + + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); + +#if defined(SUPPORT_LINUX_DVFS) + eError = SuspendDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); + return eError; + } +#endif + + psDevInfo->bRGXPowered = IMG_FALSE; + + eError = RGXDoStop(psDeviceNode); + if (eError != PVRSRV_OK) + { + /* Power down failures are treated as successful since the power was removed but logged. */ + PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", + __func__, PVRSRVGetErrorString(eError))); + psDevInfo->ui32ActivePMReqNonIdle++; + eError = PVRSRV_OK; + } + } + else + { + /* the sync was updated but the pow state isn't off -> the FW denied the transition */ + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; + + if (bForced) + { /* It is an error for a forced request to be denied */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Failure to power off during a forced power off. FW: %d", + __func__, psFwSysData->ePowState)); + } + } + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + /* timeout waiting for the FW to ack the request: return timeout */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Timeout waiting for powoff ack from the FW", + __func__)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error waiting for powoff ack from the FW (%s)", + __func__, PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; + } + } + + return eError; +} + +#if defined(TRACK_FW_BOOT) +static INLINE void RGXGetFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo, + FW_BOOT_STAGE *peStage, + FW_BOOT_STAGE *peStageMax) +{ + *peStageMax = FW_BOOT_INIT_DONE; + + /* Boot stage temporarily stored to the register below */ + *peStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, + RGX_FW_BOOT_STAGE_REGISTER); +} +#endif + +static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + + if (psDevConfig->pfnTDRGXStart == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData); +#else + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = RGXStart(&psDevInfo->sLayerParams); +#endif + + return eError; +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) +/* + * To validate the MTS unit we do the following: + * - Immediately after firmware loading for each OSID + * - Write the OSid to a memory location shared with FW + * - Kick the register of that OSid + * (Uncounted, DM 0) + * - FW clears the memory location if OSid matches + * - Host checks that memory location is cleared + * + * See firmware/devices/rgx/rgxfw_bg.c + */ +static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_SYSINIT *psFwSysInit, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32ScheduleRegister; + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32KickType; + IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS); + + /* Nothing to do if device does not support GPU_VIRTUALISATION */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION)) + { + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:")); + + /* Need to get the maximum supported OSid value from the per-device info. + * This can change according to how much memory is physically present and + * what the carve-out mapping looks like (provided by the module load-time + * parameters). + */ + ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, psDeviceNode->ui32NumOSId); + + if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS) + { + PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:")); + PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped)); + PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped)); + } + + ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED; + + for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++) + { + /* set Test field */ + psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT; + /* Force a read-back to memory to avoid posted writes on certain buses */ + (void) psFwSysInit->ui32OSKickTest; + OSWriteMemoryBarrier(); + + /* kick register */ + ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS); + PVR_DPF((PVR_DBG_MESSAGE, " Testing OS: %u, Kick Reg: %X", + ui32OSid, + ui32ScheduleRegister)); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType); + OSMemoryBarrier(); + + /* Wait test enable bit to be unset */ + if (PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest, + 0, + RGXFWIF_KICK_TEST_ENABLED_BIT, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)", + ui32OSid, + psFwSysInit->ui32OSKickTest)); + + return PVRSRV_ERROR_TIMEOUT; + } + + /* sanity check that the value is what we expect */ + if (psFwSysInit->ui32OSKickTest != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location", + ui32OSid, + psFwSysInit->ui32OSKickTest)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + PVR_DPF((PVR_DBG_MESSAGE, " PASS")); + } + + PVR_LOG(("MTS passed sideband tests")); + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */ + +/* + RGXPostPowerState +*/ +PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + if ((eNewPowerState != eCurrentPowerState) && + (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) + IMG_UINT32 ui32ConfigFlags; +#endif + + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + { + psDevInfo->bRGXPowered = IMG_TRUE; + return PVRSRV_OK; + } + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* Update timer correlation related data */ + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); + + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); + + eError = RGXDoStart(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed")); + return eError; + } + + OSMemoryBarrier(); + + /* + * Check whether the FW has started by polling on bFirmwareStarted flag + */ + if (PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, + IMG_TRUE, + 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); + eError = PVRSRV_ERROR_TIMEOUT; + +#if defined(TRACK_FW_BOOT) + { + FW_BOOT_STAGE eStage, eStageMax; + + RGXGetFWBootStage(psDevInfo, &eStage, &eStageMax); + + PVR_LOG(("%s: FW reached boot stage %i/%i.", + __func__, eStage, eStageMax)); + } +#endif + + /* + * When bFirmwareStarted fails some info may be gained by doing the following + * debug dump but unfortunately it could be potentially dangerous if the reason + * for not booting is the GPU power is not ON. However, if we have reached this + * point the System Layer has returned without errors, we assume the GPU power + * is indeed ON. + */ + RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); + RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); + + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), + IMG_TRUE, + 0xFFFFFFFFU, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", + eError)); + return eError; + } + +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) + { + PVRSRV_ERROR eError; + RGXFWIF_SYSDATA *psFwSysData; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc, (void **)&psFwSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire OS Config (%u)", + __func__, + eError)); + return eError; + } + + ui32ConfigFlags = psFwSysData->ui32ConfigFlags; + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); + } + + /* Check if the Validation IRQ flag is set */ + if ((ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) != 0) + { + eError = PVRSRVValidateIrqs(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif /* defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) */ + +#endif /* defined(PDUMP) */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) + eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); + if (eError != PVRSRV_OK) + { + return eError; + } +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); +#endif + + HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); + + psDevInfo->bRGXPowered = IMG_TRUE; + +#if defined(SUPPORT_LINUX_DVFS) + eError = ResumeDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS")); + return eError; + } +#endif + } + } + + PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState); + + return PVRSRV_OK; +} + +/* + RGXPreClockSpeedChange +*/ +PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVR_UNREFERENCED_PARAMETER(psRGXData); + + PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz", + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); + + if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && + (psFwSysData->ePowState != RGXFWIF_POW_OFF)) + { + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS); + } + + return eError; +} + +/* + RGXPostClockSpeedChange +*/ +PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* Update runtime configuration with the new value */ + psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed; + + if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && + (psFwSysData->ePowState != RGXFWIF_POW_OFF)) + { + RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd; + IMG_UINT32 ui32CmdKCCBSlot; + + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS); + + sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE; + sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed; + + /* Ensure the new clock speed is written to memory before requesting the FW to read it */ + OSMemoryBarrier(); + + PDUMPCOMMENT("Scheduling CORE clock speed change command"); + + PDUMPPOWCMDSTART(); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sCOREClkSpeedChangeCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling CORE clock speed change command failed"); + PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError)); + return eError; + } + + PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz", + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function RGXPowUnitsStateMaskChange +@Description Changes power state of power units/islands +@Input hDevHandle RGX Device Node. +@Input ui32PowUnitsStateMask Mask containing power state of PUs. + Each bit corresponds to an PU. + Bit position corresponds to PU number i.e. Bit0 is PU0, Bit1 is PU1 etc. + '1' indicates ON and '0' indicates OFF. + Value must be non-zero. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32PowUnitsStateMask) +{ + + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sPowUnitsStateMaskChange; + IMG_UINT32 ui32PowUnitsMask = psDevInfo->ui32AvailablePowUnitsMask; + IMG_UINT32 ui32CmdKCCBSlot; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /** + * Validate the input. At-least one PU must be powered on and all requested + * PU's must be a subset of full PU mask. + */ + if ((ui32PowUnitsStateMask == 0) || (ui32PowUnitsStateMask & ~ui32PowUnitsMask)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid Power Units mask requested (0x%X). Value should be non-zero and sub-set of 0x%X mask", + __func__, + ui32PowUnitsStateMask, + ui32PowUnitsMask)); + return PVRSRV_ERROR_INVALID_SPU_MASK; + } + + psRuntimeCfg->ui32PowUnitsStateMask = ui32PowUnitsStateMask; + +#if !defined(NO_HARDWARE) + { + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { + return PVRSRV_OK; + } + + if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) + { + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; + PVR_DPF((PVR_DBG_ERROR, + "%s: Powered units state can not be changed, when not IDLE", + __func__)); + return eError; + } + } +#endif + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + + sPowUnitsStateMaskChange.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowUnitsStateMaskChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; + sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32PowUnitsStateMask = ui32PowUnitsStateMask; + + PDUMPCOMMENT("Scheduling command to change power units state to 0x%X", ui32PowUnitsStateMask); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sPowUnitsStateMaskChange, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling command to change power units state. Error:%u", eError); + PVR_DPF((PVR_DBG_ERROR, + "%s: Scheduling KCCB to change power units state. Error:%u", + __func__, eError)); + return eError; + } + + /* Wait for the firmware to answer. */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__)); + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENT("%s: Poll for Kernel SyncPrim [0x%p] on DM %d", __func__, psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + + return PVRSRV_OK; +} + +/* + @Function RGXAPMLatencyChange +*/ +PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ActivePMLatencyms, + IMG_BOOL bActivePMLatencyPersistant) +{ + + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + IMG_UINT32 ui32CmdKCCBSlot; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock")); + return eError; + } + + /* Update runtime configuration with the new values and ensure the + * new APM latency is written to memory before requesting the FW to + * read it + */ + psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms; + psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant; + OSMemoryBarrier(); + + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + RGXFWIF_KCCB_CMD sActivePMLatencyChange; + sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW; + sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE; + sActivePMLatencyChange.uCmdData.sPowData.uPowerReqData.ui32ActivePMLatencyms = ui32ActivePMLatencyms; + + PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sActivePMLatencyChange, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError); + PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError)); + goto ErrorExit; + } + } + +ErrorExit: + PVRSRVPowerUnlock(psDeviceNode); + + return eError; +} + +/* + RGXActivePowerRequest +*/ +PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + + psDevInfo->ui32ActivePMReqTotal++; + + /* Powerlock to avoid further requests from racing with the FW hand-shake + * from now on (previous kicks to this point are detected by the FW) + * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid + * potential dead lock between PDumpWriteLock and PowerLock + * during 'DriverLive + PDUMP=1 + EnableAPM=1'. + */ + eError = PVRSRVPowerTryLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock"); + } + else + { + psDevInfo->ui32ActivePMReqRetry++; + } + goto _RGXActivePowerRequest_PowerLock_failed; + } + + /* Check again for IDLE once we have the power lock */ + if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime); +#endif + + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + IMG_FALSE); /* forced */ + PDUMPPOWCMDEND(); + + if (eError == PVRSRV_OK) + { + psDevInfo->ui32ActivePMReqOk++; + } + else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) + { + psDevInfo->ui32ActivePMReqDenied++; + } + } + else + { + psDevInfo->ui32ActivePMReqNonIdle++; + } + + PVRSRVPowerUnlock(psDeviceNode); + +_RGXActivePowerRequest_PowerLock_failed: + + return eError; +} +/* + RGXForcedIdleRequest +*/ + +#define RGX_FORCED_IDLE_RETRY_COUNT 10 + +PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32RetryCount = 0; + IMG_UINT32 ui32CmdKCCBSlot; +#if !defined(NO_HARDWARE) + RGXFWIF_SYSDATA *psFwSysData; +#endif + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(NO_HARDWARE) + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Firmware already forced idle */ + if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE) + { + return PVRSRV_OK; + } + + /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */ + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { + return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; + } +#endif + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE; + + PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command"); + + /* Send one forced IDLE command to GP */ + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__)); + return eError; + } + + /* Wait for GPU to finish current workload */ + do { + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT)) + { + break; + } + ui32RetryCount++; + PVR_DPF((PVR_DBG_WARNING, + "%s: Request timeout. Retry %d of %d", + __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT)); + } while (IMG_TRUE); + + if (eError != PVRSRV_OK) + { + RGXFWNotifyHostTimeout(psDevInfo); + PVR_DPF((PVR_DBG_ERROR, + "%s: Idle request failed. Firmware potentially left in forced idle state", + __func__)); + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + +#if !defined(NO_HARDWARE) + /* Check the firmware state for idleness */ + if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) + { + return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; + } +#endif + + return PVRSRV_OK; +} + +/* + RGXCancelForcedIdleRequest +*/ +PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32CmdKCCBSlot; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + goto ErrorExit; + } + + /* Send the IDLE request to the FW */ + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE; + + PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command"); + + /* Send cancel forced IDLE command to GP */ + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP); + goto ErrorExit; + } + + /* Wait for the firmware to answer. */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 1, 0xFFFFFFFF); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__)); + goto ErrorExit; + } + +#if defined(PDUMP) + PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + + return eError; + +ErrorExit: + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__)); + return eError; +} + +#if defined(SUPPORT_VALIDATION) +#define RGX_POWER_DOMAIN_STATE_INVALID (0xFFFFFFFF) + +PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState, + IMG_UINT32 ui32MaxPowUnitsCount) +{ + /* + * Total power domain states = 2^(Max power unit count) + */ + IMG_UINT32 ui32TotalStates = 1 << ui32MaxPowUnitsCount; + IMG_UINT32 i; + + /** + * Allocate memory for storing last transition for each power domain + * state. + */ + psState->paui32LastTransition = OSAllocMem(ui32TotalStates * + sizeof(*psState->paui32LastTransition)); + + if (!psState->paui32LastTransition) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate memory ", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /** + * Initialize last transition of each state to invalid + */ + for (i=0; ipaui32LastTransition[i] = RGX_POWER_DOMAIN_STATE_INVALID; + } + + psState->ui32PowUnitsCount = ui32MaxPowUnitsCount; + psState->ui32CurrentState = RGX_POWER_DOMAIN_STATE_INVALID; + + return PVRSRV_OK; +} + +void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState) +{ + psState->ui32PowUnitsCount = 0; + + if (psState->paui32LastTransition) + { + OSFreeMem(psState->paui32LastTransition); + } +} + +IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState) +{ + IMG_UINT32 ui32NextState, ui32CurrentState = psState->ui32CurrentState; + IMG_UINT32 ui32TotalStates = 1 << psState->ui32PowUnitsCount; + + if (ui32CurrentState == RGX_POWER_DOMAIN_STATE_INVALID) + { + /** + * Start with all units powered off. + */ + ui32NextState = 0; + } + else if (psState->paui32LastTransition[ui32CurrentState] == RGX_POWER_DOMAIN_STATE_INVALID) + { + ui32NextState = ui32CurrentState; + psState->paui32LastTransition[ui32CurrentState] = ui32CurrentState; + } + else + { + ui32NextState = (psState->paui32LastTransition[ui32CurrentState] + 1) % ui32TotalStates; + psState->paui32LastTransition[ui32CurrentState] = ui32NextState; + } + + psState->ui32CurrentState = ui32NextState; + return ui32NextState; +} +#endif +/****************************************************************************** + End of file (rgxpower.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.h new file mode 100644 index 000000000000..5fc4b7e7017e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxpower.h @@ -0,0 +1,224 @@ +/*************************************************************************/ /*! +@File +@Title RGX power header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX power +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXPOWER_H__) +#define __RGXPOWER_H__ + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "servicesext.h" +#include "rgxdevice.h" + + +/*! +****************************************************************************** + + @Function RGXPrePowerState + + @Description + + does necessary preparation before power state transition + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +/*! +****************************************************************************** + + @Function RGXPostPowerState + + @Description + + does necessary preparation after power state transition + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + + +/*! +****************************************************************************** + + @Function RGXPreClockSpeedChange + + @Description + + Does processing required before an RGX clock speed change. + + @Input hDevHandle : RGX Device Node + @Input bIdleDevice : Whether the firmware needs to be idled + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +/*! +****************************************************************************** + + @Function RGXPostClockSpeedChange + + @Description + + Does processing required after an RGX clock speed change. + + @Input hDevHandle : RGX Device Node + @Input bIdleDevice : Whether the firmware had been idled previously + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + + +/*************************************************************************/ /*! +@Function RGXPowUnitsStateMaskChange +@Description Changes power state of SPUs +@Input hDevHandle RGX Device Node. +@Input ui32PowUnitsStateMask Mask containing power state of SPUs. + Each bit corresponds to an SPU. + Bit position corresponds to SPU number i.e. Bit0 is SPU0, Bit1 is SPU1 etc. + '1' indicates ON and '0' indicates OFF. + Value must be non-zero. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32PowUnitsStateMask); + +/*! +****************************************************************************** + + @Function RGXAPMLatencyChange + + @Description + + Changes the wait duration used before firmware indicates IDLE. + Reducing this value will cause the firmware to shut off faster and + more often but may increase bubbles in GPU scheduling due to the added + power management activity. If bPersistent is NOT set, APM latency will + return back to system default on power up. + + @Input hDevHandle : RGX Device Node + @Input ui32ActivePMLatencyms : Number of milliseconds to wait + @Input bPersistent : Set to ensure new value is not reset + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ActivePMLatencyms, + IMG_BOOL bActivePMLatencyPersistant); + +/*! +****************************************************************************** + + @Function RGXActivePowerRequest + + @Description Initiate a handshake with the FW to power off the GPU + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); + +/*! +****************************************************************************** + + @Function RGXForcedIdleRequest + + @Description Initiate a handshake with the FW to idle the GPU + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted); + +/*! +****************************************************************************** + + @Function RGXCancelForcedIdleRequest + + @Description Send a request to cancel idle to the firmware. + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle); + + +#if defined(SUPPORT_VALIDATION) +PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState, + IMG_UINT32 ui32MaxPowUnitsCount); + +void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState); + +IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState); +#endif +#endif /* __RGXPOWER_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxregconfig.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxregconfig.c new file mode 100644 index 000000000000..09db969bb11d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxregconfig.c @@ -0,0 +1,291 @@ +/*************************************************************************/ /*! +@File +@Title RGX Register configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Regconfig routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxregconfig.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "device.h" +#include "sync_internal.h" +#include "pdump_km.h" +#include "pvrsrv.h" + +PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT8 ui8RegCfgType) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType; + + PVR_UNREFERENCED_PARAMETER(psDevConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (eRegCfgType < psRegCfg->eRegCfgTypeToPush) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXSetRegConfigTypeKM: Register configuration requested (%d) is not valid since it has to be at least %d." + " Configurations of different types need to go in order", + eRegCfgType, + psRegCfg->eRegCfgTypeToPush)); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE; + } + + psRegCfg->eRegCfgTypeToPush = eRegCfgType; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psDevConnection); + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigTypeKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (psRegCfg->bEnabled) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Cannot add record whilst register configuration active.")); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_ENABLED; + } + if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Register configuration full.")); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_FULL; + } + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask; + sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + NULL, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->ui32NumRegRecords++; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (psRegCfg->bEnabled) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Attempt to clear register configuration whilst active.")); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_ENABLED; + } + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + NULL, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->ui32NumRegRecords = 0; + psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + NULL, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->bEnabled = IMG_TRUE; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + NULL, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->bEnabled = IMG_FALSE; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION")); + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + + +/****************************************************************************** + End of file (rgxregconfig.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsignals.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsignals.c new file mode 100644 index 000000000000..dd266b80a3d1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsignals.c @@ -0,0 +1,100 @@ +/*************************************************************************/ /*! +@File rgxsignals.c +@Title RGX Signals routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Signals routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxsignals.h" + +#include "rgxmem.h" +#include "rgx_fwif_km.h" +#include "mmu_common.h" +#include "devicemem.h" +#include "rgxfwutils.h" + + +PVRSRV_ERROR +PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_DEV_VIRTADDR sDevSignalAddress) +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc; + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE; + sKCCBCmd.uCmdData.sSignalUpdateData.sDevSignalAddress = sDevSignalAddress; + eError = RGXSetFirmwareAddress(&sKCCBCmd.uCmdData.sSignalUpdateData.psFWMemContext, + psFWMemContextMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail_fwaddr); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice, + hMemCtxPrivData, + RGXFWIF_DM_GP, + &sKCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, + eError, PVRSRVGETERRORSTRING(eError))); + } + +fail_fwaddr: + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsrvinit.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsrvinit.c new file mode 100644 index 000000000000..e5d3918bb90d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsrvinit.c @@ -0,0 +1,1478 @@ +/*************************************************************************/ /*! +@File +@Title Services initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "srvinit.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "km_apphint_defs.h" + +#include "htbuffer_types.h" +#include "htbuffer_init.h" + +#include "devicemem.h" +#include "devicemem_pdump.h" + +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "rgx_compat_bvnc.h" + +#include "rgxdefs_km.h" +#include "pvrsrv.h" + +#include "rgxinit.h" + +#include "rgx_compat_bvnc.h" + +#include "osfunc.h" + +#include "rgxdefs_km.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +#include "rgx_fwif_hwperf.h" +#include "rgx_hwperf_table.h" + +static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = +{ +#define X(a, b, c, d, e, f, g) {a, b, 0xFF, d, e, f, NULL} +RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST, +RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST +#undef X +}; + +#include "fwload.h" +#include "rgxlayer_impl.h" +#include "rgxfwimageutils.h" +#include "rgxfwutils.h" + +#include "rgx_bvnc_defs_km.h" + +#include "rgxdevice.h" +#include "pvrsrv.h" + +#if defined(SUPPORT_TRUSTED_DEVICE) +#include "rgxdevice.h" +#include "pvrsrv_device.h" +#endif + +#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */ + +#define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */ +#define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */ +#define AVAIL_POW_UNITS_MASK_DEFAULT 0xFFFFFFFF + +#if defined(SUPPORT_VALIDATION) +#include "pvrsrv_apphint.h" +#endif + +#include "os_srvinit_param.h" + +#if defined(LINUX) +#include "km_apphint.h" +#else +/*! +******************************************************************************* + * AppHint mnemonic data type helper tables +******************************************************************************/ +/* apphint map of name vs. enable flag */ +static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = { +#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, + HTB_LOG_SFGROUPLIST +#undef X +}; +/* apphint map of arg vs. OpMode */ +static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = { + { "droplatest", HTB_OPMODE_DROPLATEST}, + { "dropoldest", HTB_OPMODE_DROPOLDEST}, + /* HTB should never be started in HTB_OPMODE_BLOCK + * as this can lead to deadlocks + */ +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = { + { "trace", 0}, + { "none", 0} +#if defined(SUPPORT_TBI_INTERFACE) + , { "tbi", 1} +#endif +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = { + { "mono", 0 }, + { "mono_raw", 1 }, + { "sched", 2 } +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP }; + +/* + * Services AppHints initialisation + */ +#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e) +APPHINT_LIST_ALL +#undef X +#endif /* LINUX */ + +/* + * Container for all the apphints used by this module + */ +typedef struct _RGX_SRVINIT_APPHINTS_ +{ + IMG_UINT32 ui32DriverMode; + IMG_BOOL bEnableSignatureChecks; + IMG_UINT32 ui32SignatureChecksBufSize; + +#if defined(DEBUG) + IMG_BOOL bAssertOnOutOfMem; +#endif + IMG_BOOL bAssertOnHWRTrigger; +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32KillingCtl; + IMG_BOOL bValidateIrq; + IMG_BOOL bValidateSOCUSCTimer; + IMG_UINT32 ui32AvailablePowUnitsMask; + IMG_BOOL bInjectPowUnitsStateMaskChange; + IMG_BOOL bEnablePowUnitsStateMaskChange; + IMG_UINT32 ui32FBCDCVersionOverride; + IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; + IMG_UINT32 aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST]; + IMG_UINT64 aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST]; +#endif + IMG_BOOL bCheckMlist; + IMG_BOOL bDisableClockGating; + IMG_BOOL bDisableDMOverlap; + IMG_BOOL bDisableFEDLogging; + IMG_BOOL bDisablePDP; + IMG_BOOL bEnableDMKillRand; + IMG_BOOL bEnableRandomCsw; + IMG_BOOL bEnableSoftResetCsw; + IMG_BOOL bEnableHWR; + IMG_BOOL bFilteringMode; + IMG_BOOL bHWPerfDisableCounterFilter; + IMG_BOOL bZeroFreelist; + IMG_UINT32 ui32EnableFWContextSwitch; + IMG_UINT32 ui32FWContextSwitchProfile; + IMG_UINT32 ui32ISPSchedulingLatencyMode; + IMG_UINT32 ui32HWPerfFWBufSize; + IMG_UINT32 ui32HWPerfHostBufSize; + IMG_UINT32 ui32HWPerfFilter0; + IMG_UINT32 ui32HWPerfFilter1; + IMG_UINT32 ui32HWPerfHostFilter; + IMG_UINT32 ui32TimeCorrClock; + IMG_UINT32 ui32HWRDebugDumpLimit; + IMG_UINT32 ui32JonesDisableMask; + IMG_UINT32 ui32LogType; + IMG_UINT32 ui32TruncateMode; + IMG_UINT32 ui32CDMArbitrationMode; + FW_PERF_CONF eFirmwarePerf; + RGX_ACTIVEPM_CONF eRGXActivePMConf; + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; +#endif + IMG_BOOL bEnableTrustedDeviceAceConfig; + IMG_UINT32 ui32FWContextSwitchCrossDM; +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + IMG_UINT32 ui32PhysMemTestPasses; +#endif +} RGX_SRVINIT_APPHINTS; + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/* + * Parses the dot('.') separated OSID regions on a string and stores the integer results + * in an array. Numbers can be decimal or hex (starting with 0x) and there must be a . between each + * (example: 1.2.3.4.5.6.7.8) + */ +static void _ParseOSidRegionString(IMG_CHAR *apszBuffer, IMG_UINT32 *pui32ApphintArray) +{ + IMG_UINT32 ui32OSid; + IMG_CHAR *pui8StringParsingBase=apszBuffer; + IMG_UINT32 ui32StringLength = OSStringLength(apszBuffer); + + /* Initialize all apphints to 0 */ + for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++) + { + pui32ApphintArray[ui32OSid] = 0; + } + + /* Parse the string. Even if it fails, apphints will have been initialized */ + for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++) + { + IMG_UINT32 ui32Base=10; + IMG_CHAR *pui8StringParsingNextDelimiter; + + /* Find the next character in the string that's not a ',' '.' or ' ' */ + while ((*pui8StringParsingBase == '.' || + *pui8StringParsingBase == ',' || + *pui8StringParsingBase == ' ') && + pui8StringParsingBase - apszBuffer <= ui32StringLength) + { + pui8StringParsingBase++; + } + + if (pui8StringParsingBase - apszBuffer > ui32StringLength) + { + PVR_DPF((PVR_DBG_ERROR, "Reached the end of the apphint string while trying to parse it.\nBuffer: %s, OSid: %d", pui8StringParsingBase, ui32OSid)); + return; + } + + /* If the substring begins with "0x" move the pointer 2 bytes forward and set the base to 16 */ + if (*pui8StringParsingBase == '0' && *(pui8StringParsingBase+1) =='x') + { + ui32Base=16; + pui8StringParsingBase+=2; + } + + /* Find the next delimiter in the string or the end of the string itself if we're parsing the final number */ + pui8StringParsingNextDelimiter = pui8StringParsingBase; + + while (*pui8StringParsingNextDelimiter!='.' && + *pui8StringParsingNextDelimiter!=',' && + *pui8StringParsingNextDelimiter!=' ' && + *pui8StringParsingNextDelimiter!='\0' && + (pui8StringParsingNextDelimiter - apszBuffer <= ui32StringLength)) + { + pui8StringParsingNextDelimiter++; + } + + /* + * Each number is followed by a '.' except for the last one. If a string termination is found + * when not expected the functions returns + */ + + if (*pui8StringParsingNextDelimiter=='\0' && ui32OSid < GPUVIRT_VALIDATION_NUM_OS - 1) + { + PVR_DPF((PVR_DBG_ERROR, "There was an error parsing the OSid Region Apphint Strings")); + return; + } + + /*replace the . with a string termination so that it can be properly parsed to an integer */ + *pui8StringParsingNextDelimiter = '\0'; + + /* Parse the number. The fact that it is followed by '\0' means that the string parsing utility + * will finish there and not try to parse the rest */ + + OSStringToUINT32(pui8StringParsingBase, ui32Base, &pui32ApphintArray[ui32OSid]); + + pui8StringParsingBase = pui8StringParsingNextDelimiter + 1; + } +} + +#endif +/*! +******************************************************************************* + + @Function GetApphints + + @Description Read init time apphints and initialise internal variables + + @Input psHints : Pointer to apphints container + + @Return void + +******************************************************************************/ +static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints) +{ + void *pvParamState = SrvInitParamOpen(); + IMG_UINT32 ui32ParamTemp; + + /* + * NB AppHints initialised to a default value via SrvInitParamInit* macros above + */ + SrvInitParamGetUINT32(pvParamState, DriverMode, psHints->ui32DriverMode); + SrvInitParamGetBOOL(pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks); + SrvInitParamGetUINT32(pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize); + +#if defined(DEBUG) + SrvInitParamGetBOOL(pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem); +#endif + SrvInitParamGetBOOL(pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger); + SrvInitParamGetBOOL(pvParamState, CheckMList, psHints->bCheckMlist); + SrvInitParamGetBOOL(pvParamState, DisableClockGating, psHints->bDisableClockGating); + SrvInitParamGetBOOL(pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap); + SrvInitParamGetBOOL(pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging); + SrvInitParamGetUINT32(pvParamState, EnableAPM, ui32ParamTemp); + psHints->eRGXActivePMConf = ui32ParamTemp; + SrvInitParamGetBOOL(pvParamState, EnableGenericDMKillingRandMode, psHints->bEnableDMKillRand); + SrvInitParamGetBOOL(pvParamState, EnableRandomContextSwitch, psHints->bEnableRandomCsw); + SrvInitParamGetBOOL(pvParamState, EnableSoftResetContextSwitch, psHints->bEnableSoftResetCsw); + SrvInitParamGetUINT32(pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch); + SrvInitParamGetBOOL(pvParamState, EnableHWR, psHints->bEnableHWR); + SrvInitParamGetUINT32(pvParamState, EnableRDPowerIsland, ui32ParamTemp); + psHints->eRGXRDPowerIslandConf = ui32ParamTemp; + SrvInitParamGetUINT32(pvParamState, FirmwarePerf, ui32ParamTemp); + psHints->eFirmwarePerf = ui32ParamTemp; + SrvInitParamGetUINT32(pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile); + SrvInitParamGetBOOL(pvParamState, HWPerfDisableCounterFilter, psHints->bHWPerfDisableCounterFilter); + SrvInitParamGetUINT32(pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize); + SrvInitParamGetUINT32(pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize); + SrvInitParamGetUINT32(pvParamState, ISPSchedulingLatencyMode, psHints->ui32ISPSchedulingLatencyMode); + SrvInitParamGetUINT32(pvParamState, CDMArbitrationOverride, psHints->ui32CDMArbitrationMode); +#if defined(LINUX) + /* name changes */ + { + IMG_UINT64 ui64Tmp; + SrvInitParamGetBOOL(pvParamState, DisablePDumpPanic, psHints->bDisablePDP); + SrvInitParamGetUINT64(pvParamState, HWPerfFWFilter, ui64Tmp); + psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu); + psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu); + } +#else + SrvInitParamUnreferenced(DisablePDumpPanic); + SrvInitParamUnreferenced(HWPerfFWFilter); + SrvInitParamUnreferenced(RGXBVNC); +#endif + SrvInitParamGetUINT32(pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter); + SrvInitParamGetUINT32List(pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock); + SrvInitParamGetUINT32(pvParamState, HWRDebugDumpLimit, ui32ParamTemp); + psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL); + SrvInitParamGetUINT32(pvParamState, JonesDisableMask, ui32ParamTemp); + psHints->ui32JonesDisableMask = ui32ParamTemp & ~0XFFFF0000U; + + SrvInitParamGetBOOL(pvParamState, NewFilteringMode, psHints->bFilteringMode); + SrvInitParamGetUINT32(pvParamState, TruncateMode, psHints->ui32TruncateMode); + + SrvInitParamGetBOOL(pvParamState, ZeroFreelist, psHints->bZeroFreelist); +#if defined(LINUX) + SrvInitParamGetUINT32(pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM); +#else + SrvInitParamUnreferenced(FWContextSwitchCrossDM); +#endif + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + SrvInitParamGetUINT32(pvParamState, PhysMemTestPasses, psHints->ui32PhysMemTestPasses); +#endif + +#if defined(SUPPORT_VALIDATION) + /* Allow apphint to override the default SH Geometry pipe mask */ + SrvInitParamGetUINT32(pvParamState, KillingCtl, psHints->ui32KillingCtl); + SrvInitParamGetBOOL(pvParamState, ValidateIrq, psHints->bValidateIrq); + SrvInitParamGetBOOL(pvParamState, ValidateSOCUSCTimer, psHints->bValidateSOCUSCTimer); + SrvInitParamGetUINT32(pvParamState, HWValAvailableSPUMask, psHints->ui32AvailablePowUnitsMask); + SrvInitParamGetBOOL(pvParamState, GPUUnitsPowerChange, psHints->bInjectPowUnitsStateMaskChange); + SrvInitParamGetBOOL(pvParamState, HWValEnableSPUPowerMaskChange, psHints->bEnablePowUnitsStateMaskChange); + SrvInitParamGetUINT32(pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride); + + /* Apphints for Unified Store virtual partitioning. */ + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsVDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_VDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsDDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_DDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsCDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_CDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsPDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_PDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsTDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_TDM]); + + /* Apphints for UVB virtual partitioning. */ + SrvInitParamGetUINT64(pvParamState, UVBRMNumRegionsVDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_VDM]); + SrvInitParamGetUINT64(pvParamState, UVBRMNumRegionsDDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_DDM]); + + /* Apphints for TPU trilinear frac masking */ + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]); +#endif + + /* + * FW logs apphints + */ + { + IMG_UINT32 ui32LogGroup, ui32TraceOrTBI; + + SrvInitParamGetUINT32BitField(pvParamState, EnableLogGroup, ui32LogGroup); + SrvInitParamGetUINT32List(pvParamState, FirmwareLogType, ui32TraceOrTBI); + + /* Defaulting to TRACE */ + BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); + +#if defined(SUPPORT_TBI_INTERFACE) + if (ui32TraceOrTBI == 1 /* TBI */) + { + if ((ui32LogGroup & RGXFWIF_LOG_TYPE_GROUP_MASK) == 0) + { + /* No groups configured - defaulting to MAIN group */ + BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_GROUP_MAIN); + } + BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); + } +#endif + + psHints->ui32LogType = ui32LogGroup; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * GPU virtualisation validation apphints + */ + { + IMG_CHAR pszOSidRegionBuffer[GPUVIRT_VALIDATION_MAX_STRING_LENGTH]; + + SrvInitParamGetSTRING(pvParamState, OSidRegion0Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[0]); + + SrvInitParamGetSTRING(pvParamState, OSidRegion0Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[0]); + + SrvInitParamGetSTRING(pvParamState, OSidRegion1Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[1]); + + SrvInitParamGetSTRING(pvParamState, OSidRegion1Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH); + _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[1]); + } +#else +#if !defined(LINUX) + SrvInitParamUnreferenced(OSidRegion0Min); + SrvInitParamUnreferenced(OSidRegion0Max); + SrvInitParamUnreferenced(OSidRegion1Min); + SrvInitParamUnreferenced(OSidRegion1Max); +#endif /* !defined(LINUX) */ +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + SrvInitParamGetBOOL(pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig); + + + SrvInitParamClose(pvParamState); +} + + +/*! +******************************************************************************* + + @Function GetFWConfigFlags + + @Description Initialise and return FW config flags + + @Input psHints : Apphints container + @Input pui32FWConfigFlags : Pointer to config flags + + @Return void + +******************************************************************************/ +static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SRVINIT_APPHINTS *psHints, + IMG_UINT32 *pui32FWConfigFlags, + IMG_UINT32 *pui32FWConfigFlagsExt, + IMG_UINT32 *pui32FwOsCfgFlags) +{ + IMG_UINT32 ui32FWConfigFlags = 0; + IMG_UINT32 ui32FWConfigFlagsExt = 0; + IMG_UINT32 ui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | + (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + ui32FWConfigFlags = 0; + ui32FWConfigFlagsExt = 0; + } + else + { +#if defined(DEBUG) + ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; +#endif + ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; + ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; + ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; + ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; + ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; + ui32FWConfigFlags |= psHints->bEnableDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; + ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; + ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; + ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; + ui32FWConfigFlags |= psHints->bEnableHWR ? RGXFWIF_INICFG_HWR_EN : 0; + ui32FWConfigFlags |= (psHints->ui32ISPSchedulingLatencyMode << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) & RGXFWIF_INICFG_ISPSCHEDMODE_MASK; +#if defined(SUPPORT_VALIDATION) +#if defined(NO_HARDWARE) && defined(PDUMP) + ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0; +#endif +#endif + ui32FWConfigFlags |= psHints->bHWPerfDisableCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; + ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_CUSTOM_TIMER) ? RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN : 0; + ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_POLLS) ? RGXFWIF_INICFG_POLL_COUNTERS_EN : 0; + ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; + +#if defined(SUPPORT_VALIDATION) + ui32FWConfigFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN : 0; + ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0; + + if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) && + ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0))) + { + psHints->eRGXActivePMConf = 0; + psHints->eRGXRDPowerIslandConf = 0; + PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n" + "Overriding current value for both with new value 0.")); + } +#endif + ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; + ui32FWConfigFlags |= (psHints->ui32CDMArbitrationMode << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) & RGXFWIF_INICFG_CDM_ARBITRATION_MASK; + } + + if ((ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN) && + ((ui32FWConfigFlags & RGXFWIF_INICFG_ISPSCHEDMODE_MASK) == RGXFWIF_INICFG_ISPSCHEDMODE_NONE)) + { + ui32FwOsCfgFlags &= ~RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN; + PVR_DPF((PVR_DBG_WARNING, "ISPSchedulingLatencyMode=0 implies context switching is inoperable on DM_3D.\n" + "Overriding current value EnableFWContextSwitch=0x%x with new value 0x%x", + psHints->ui32EnableFWContextSwitch, + ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL)); + } + + *pui32FWConfigFlags = ui32FWConfigFlags; + *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt; + *pui32FwOsCfgFlags = ui32FwOsCfgFlags; +} + + +/*! +******************************************************************************* + + @Function GetFilterFlags + + @Description Initialise and return filter flags + + @Input psHints : Apphints container + + @Return IMG_UINT32 : Filter flags + +******************************************************************************/ +static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints) +{ + IMG_UINT32 ui32FilterFlags = 0; + + ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; + if (psHints->ui32TruncateMode == 2) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; + } + else if (psHints->ui32TruncateMode == 3) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; + } + + return ui32FilterFlags; +} + + +/*! +******************************************************************************* + + @Function InitDeviceFlags + + @Description Initialise and return device flags + + @Input psHints : Apphints container + @Input pui32DeviceFlags : Pointer to device flags + + @Return void + +******************************************************************************/ +static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, + IMG_UINT32 *pui32DeviceFlags) +{ + IMG_UINT32 ui32DeviceFlags = 0; + +#if defined(SUPPORT_VALIDATION) + ui32DeviceFlags |= psHints->bInjectPowUnitsStateMaskChange? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0; +#endif + ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; + ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); +#endif + + *pui32DeviceFlags = ui32DeviceFlags; +} + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) +/*! +******************************************************************************* + + @Function RGXTDProcessFWImage + + @Description Fetch and send data used by the trusted device to complete + the FW image setup + + @Input psDeviceNode : Device node + @Input psRGXFW : Firmware blob + @Input puFWParams : Parameters used by the FW at boot time + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, + OS_FW_IMAGE *psRGXFW, + RGX_FW_BOOT_PARAMS *puFWParams) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_TD_FW_PARAMS sTDFWParams; + PVRSRV_ERROR eError; + + if (psDevConfig->pfnTDSendFWImage == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + sTDFWParams.pvFirmware = OSFirmwareData(psRGXFW); + sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + sTDFWParams.uFWP.sMeta.sFWCodeDevVAddr = puFWParams->sMeta.sFWCodeDevVAddr; + sTDFWParams.uFWP.sMeta.sFWDataDevVAddr = puFWParams->sMeta.sFWDataDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememCodeDevVAddr = puFWParams->sMeta.sFWCorememCodeDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememCodeFWAddr = puFWParams->sMeta.sFWCorememCodeFWAddr; + sTDFWParams.uFWP.sMeta.uiFWCorememCodeSize = puFWParams->sMeta.uiFWCorememCodeSize; + sTDFWParams.uFWP.sMeta.sFWCorememDataDevVAddr = puFWParams->sMeta.sFWCorememDataDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememDataFWAddr = puFWParams->sMeta.sFWCorememDataFWAddr; + sTDFWParams.uFWP.sMeta.ui32NumThreads = puFWParams->sMeta.ui32NumThreads; + } + + eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams); + + return eError; +} +#endif + +/*! +******************************************************************************* + + @Function InitFirmware + + @Description Allocate, initialise and pdump Firmware code and data memory + + @Input psDeviceNode : Device Node + @Input psHints : Apphints + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SRVINIT_APPHINTS *psHints) +{ + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + + /* FW code memory */ + IMG_DEVMEM_SIZE_T uiFWCodeAllocSize; + void *pvFWCodeHostAddr; + + /* FW data memory */ + IMG_DEVMEM_SIZE_T uiFWDataAllocSize; + void *pvFWDataHostAddr; + + /* FW coremem code memory */ + IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize; + void *pvFWCorememCodeHostAddr = NULL; + + /* FW coremem data memory */ + IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize; + void *pvFWCorememDataHostAddr = NULL; + + RGX_FW_BOOT_PARAMS uFWParams; + RGX_LAYER_PARAMS sLayerParams; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + /* + * Get pointer to Firmware image + */ + pbRGXFirmware = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW); + if (!pbRGXFirmware) + { + /* Error or confirmation message generated in RGXLoadAndGetFWData */ + eError = PVRSRV_ERROR_INIT_FAILURE; + goto cleanup_initfw; + } + + sLayerParams.psDevInfo = psDevInfo; + + /* + * Allocate Firmware memory + */ + + eError = RGXGetFWImageAllocSize(&sLayerParams, + pbRGXFirmware, + OSFirmwareSize(psRGXFW), + &uiFWCodeAllocSize, + &uiFWDataAllocSize, + &uiFWCorememCodeAllocSize, + &uiFWCorememDataAllocSize); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXGetFWImageAllocSize failed", + __func__)); + goto cleanup_initfw; + } + + psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; + +#if defined(SUPPORT_TRUSTED_DEVICE) + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: META DMA not available, disabling core memory code/data", + __func__)); + uiFWCorememCodeAllocSize = 0; + uiFWCorememDataAllocSize = 0; + } +#endif + + psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize; + + eError = RGXInitAllocFWImgMem(psDeviceNode, + uiFWCodeAllocSize, + uiFWDataAllocSize, + uiFWCorememCodeAllocSize, + uiFWCorememDataAllocSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PVRSRVRGXInitAllocFWImgMem failed (%d)", + __func__, + eError)); + goto cleanup_initfw; + } + + /* + * Acquire pointers to Firmware allocations + */ + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); + +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCodeHostAddr = NULL; +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); + +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWDataHostAddr = NULL; +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememCodeAllocSize != 0) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data); + } + else + { + pvFWCorememCodeHostAddr = NULL; + } +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCorememCodeHostAddr = NULL; +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememDataAllocSize != 0) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode); + } + else +#endif + { + pvFWCorememDataHostAddr = NULL; + } + + /* + * Prepare FW boot parameters + */ + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; + uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; + uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; + uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; + uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; + uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; + uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) + uFWParams.sMeta.ui32NumThreads = 2; +#else + uFWParams.sMeta.ui32NumThreads = 1; +#endif + } + else + { + uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; + uFWParams.sRISCV.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; + uFWParams.sRISCV.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; + + uFWParams.sRISCV.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; + uFWParams.sRISCV.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; + uFWParams.sRISCV.uiFWCorememDataSize = uiFWCorememDataAllocSize; + } + + + /* + * Process the Firmware image and setup code and data segments. + * + * When the trusted device is enabled and the FW code lives + * in secure memory we will only setup the data segments here, + * while the code segments will be loaded to secure memory + * by the trusted device. + */ + if (!psDeviceNode->bAutoVzFwIsUp) + { + eError = RGXProcessFWImage(&sLayerParams, + pbRGXFirmware, + pvFWCodeHostAddr, + pvFWDataHostAddr, + pvFWCorememCodeHostAddr, + pvFWCorememDataHostAddr, + &uFWParams); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXProcessFWImage failed (%d)", + __func__, + eError)); + goto release_corememdata; + } + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (psRGXFW) + { + RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams); + } +#endif + + + /* + * PDump Firmware allocations + */ + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware code image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc, + 0, + uiFWCodeAllocSize, + PDUMP_FLAGS_CONTINUOUS); +#endif + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware data image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc, + 0, + uiFWDataAllocSize, + PDUMP_FLAGS_CONTINUOUS); + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememCodeAllocSize != 0) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem code image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc, + 0, + uiFWCorememCodeAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememDataAllocSize != 0) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem data store image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + 0, + uiFWCorememDataAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } +#endif + + /* + * Release Firmware allocations and clean up + */ +release_corememdata: +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememDataAllocSize !=0) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + } + +release_corememcode: + if (uiFWCorememCodeAllocSize != 0) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + } +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) +release_data: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) +release_code: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); +#endif +cleanup_initfw: + if (psRGXFW != NULL) + { + OSUnloadFirmware(psRGXFW); + } + + return eError; +} + +IMG_INTERNAL static inline IMG_UINT32 RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *); +IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); + +IMG_INTERNAL /*static inline*/ IMG_UINT32 +RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) +{ + *ppsModel = gasCntBlkTypeModel; + return ARRAY_SIZE(gasCntBlkTypeModel); +} + +/*! +******************************************************************************* + @Function RGXHWPerfMaxDefinedBlks + + @Description Return the number of valid block-IDs for the given device node + + @Input (PVRSRV_RGXDEV_INFO *) pvDevice device-node to query + + @Returns (IMG_UINT32) Number of block-IDs (RGX_CNTBLK_ID) + valid for this device. +******************************************************************************/ +IMG_INTERNAL static inline IMG_UINT32 +RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_HWPERF_CNTBLK_RT_INFO sRtInfo; + IMG_UINT32 uiRetVal; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psHWPBlkConfig; + IMG_UINT32 uiNumArrayEls, ui; + + uiRetVal = RGX_CNTBLK_ID_DIRECT_LAST; + + uiNumArrayEls = RGXGetHWPerfBlockConfig(&psHWPBlkConfig); + + if (psHWPBlkConfig == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL Config Block", __func__)); + return 0; + } + PVR_ASSERT(uiNumArrayEls > 0); + + /* Iterate over each block-ID and find the number of instances of each + * block which are present for this device type. We only query the + * Indirect blocks as their presence varies according to GPU. All direct + * blocks have an entry - but they may not be physically present. + */ + for (ui = RGX_CNTBLK_ID_DIRECT_LAST; ui < uiNumArrayEls; ui++) + { + if (rgx_hwperf_blk_present(&psHWPBlkConfig[ui], (void *)psDevInfo, &sRtInfo)) + { + uiRetVal += sRtInfo.uiNumUnits; + PVR_DPF((PVR_DBG_VERBOSE, "%s: Block %u, NumUnits %u, Total %u", + __func__, ui, sRtInfo.uiNumUnits, uiRetVal)); + } +#ifdef DEBUG + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present", + __func__, ui)); + } +#endif + } + + PVR_DPF((PVR_DBG_VERBOSE, "%s: Num Units = %u", __func__, uiRetVal)); + + return uiRetVal; +} + +/*! +******************************************************************************* + + @Function InitialiseHWPerfCounters + + @Description Initialisation of hardware performance counters and dumping + them out to pdump, so that they can be modified at a later + point. + + @Input pvDevice + @Input psHWPerfDataMemDesc + @Input psHWPerfInitDataInt + + @Return void + +******************************************************************************/ + +static void InitialiseHWPerfCounters(void *pvDevice, DEVMEM_MEMDESC *psHWPerfDataMemDesc, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt) +{ + RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData; + IMG_UINT32 ui32CntBlkModelLen; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc; + IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx; + RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; + IMG_UINT32 uiUnit; + IMG_BOOL bDirect; + + ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); + + PVR_DPF((PVR_DBG_VERBOSE, "%s: #BlockConfig entries = %d", __func__, ui32CntBlkModelLen)); + + /* Initialise the number of blocks in the RGXFWIF_HWPERF_CTL structure. + * This allows Firmware to validate that it has been correctly configured. + */ + psHWPerfInitDataInt->ui32NumBlocks = RGXHWPerfMaxDefinedBlks(pvDevice); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HWPerf Block count = %u.", + psHWPerfInitDataInt->ui32NumBlocks); +#if defined(PDUMP) + /* Ensure that we record the BVNC specific ui32NumBlocks in the PDUMP data + * so that when we playback we have the correct value present. + */ + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitDataInt->ui32NumBlocks) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitDataInt->ui32NumBlocks, PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(PDUMP) */ + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "HWPerf Counter config starts here."); + + /* Simply iterate over all the RGXFWIW_HWPERF_CTL blocks in order */ + psHWPerfInitBlkData = &psHWPerfInitDataInt->sBlkCfg[0]; + + for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; + ui32BlkCfgIdx++, psHWPerfInitBlkData++) + { + IMG_BOOL bSingleton; + + /* Exit early if this core does not have any of these counter blocks + * due to core type/BVNC features.... */ + psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx]; + + if (psBlkTypeDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL - Index %d / %d", + __func__, ui32BlkCfgIdx, ui32CntBlkModelLen)); + continue; + } + + PVR_DPF((PVR_DBG_VERBOSE, + "%s: CfgIdx = %u, InitBlkData @ 0x%p, BlkTypeDesc @ 0x%p", + __func__, ui32BlkCfgIdx, psHWPerfInitBlkData, psBlkTypeDesc)); + + if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE) + { + PVR_DPF((PVR_DBG_VERBOSE, "%s: %s [ID 0x%x] NOT present", __func__, + psBlkTypeDesc->pszBlockNameComment, + psBlkTypeDesc->uiCntBlkIdBase )); + /* Block isn't present, but has an entry in the table. Populate + * the Init data so that we can track the block later. + */ + psHWPerfInitBlkData->uiBlockID = psBlkTypeDesc->uiCntBlkIdBase; + continue; + } +#ifdef DEBUG + else + { + PVR_DPF((PVR_DBG_VERBOSE, "%s: %s has %d %s", __func__, + psBlkTypeDesc->pszBlockNameComment, sCntBlkRtInfo.uiNumUnits, + (sCntBlkRtInfo.uiNumUnits > 1) ? "units" : "unit")); + } +#endif /* DEBUG */ + + /* Program all counters in one block so those already on may + * be configured off and vice-versa. */ + bDirect = psBlkTypeDesc->uiIndirectReg == 0; + + /* Set if there is only one instance of this block-ID present */ + bSingleton = sCntBlkRtInfo.uiNumUnits == 1; + + for (ui32BlockID = psBlkTypeDesc->uiCntBlkIdBase, uiUnit = 0; + ui32BlockID < psBlkTypeDesc->uiCntBlkIdBase+sCntBlkRtInfo.uiNumUnits; + ui32BlockID++, uiUnit++) + { + + if (bDirect) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Block : %s", psBlkTypeDesc->pszBlockNameComment); + } + else + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Unit %d Block : %s%d", + ui32BlockID-psBlkTypeDesc->uiCntBlkIdBase, + psBlkTypeDesc->pszBlockNameComment, uiUnit); + } + + psHWPerfInitBlkData->uiBlockID = ui32BlockID; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiBlockID: The Block ID for the layout block. See RGX_CNTBLK_ID for further information."); +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->uiBlockID) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->uiBlockID, + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + psHWPerfInitBlkData->uiNumCounters = 0; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "uiNumCounters (X): Specifies the number of valid counters" + " [0..%d] which follow.", RGX_CNTBLK_COUNTERS_MAX); +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->uiNumCounters) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->uiNumCounters, + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + psHWPerfInitBlkData->uiEnabled = 0; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiEnabled: Set to 0x1 if the block needs to be enabled during playback."); +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->uiEnabled) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->uiEnabled, + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + for (ui32CounterIdx = 0; ui32CounterIdx < RGX_CNTBLK_COUNTERS_MAX; ui32CounterIdx++) + { + psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx] = IMG_UINT32_C(0x00000000); + + if (bDirect) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "%s_COUNTER_%d", + psBlkTypeDesc->pszBlockNameComment, ui32CounterIdx); + } + else + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "%s%d_COUNTER_%d", + psBlkTypeDesc->pszBlockNameComment, + uiUnit, ui32CounterIdx); + } +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx], + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + } + + /* Update our block reference for indirect units which have more + * than a single unit present. Only increment if we have more than + * one unit left to process as the external loop counter will be + * incremented after final unit is processed. + */ + if (!bSingleton && (uiUnit < (sCntBlkRtInfo.uiNumUnits - 1))) + { + psHWPerfInitBlkData++; + } + } + } + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "HWPerf Counter config finishes here."); +} + +/*! +******************************************************************************* + + @Function InitialiseAllCounters + + @Description Initialise HWPerf and custom counters + + @Input psDeviceNode : Device Node + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + RGXFWIF_HWPERF_CTL *psHWPerfInitData; + PVRSRV_ERROR eError; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt); + + InitialiseHWPerfCounters(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData); + +failHWPerfCountersMemDescAqCpuVirt: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + + return eError; +} + +/* + * _ParseHTBAppHints: + * + * Generate necessary references to the globally visible AppHints which are + * declared in the above #include "km_apphint_defs.h" + * Without these local references some compiler tool-chains will treat + * unreferenced declarations as fatal errors. This function duplicates the + * HTB_specific apphint references which are made in htbserver.c:HTBInit() + * However, it makes absolutely *NO* use of these hints. + */ +static void +_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + void *pvParamState = NULL; + IMG_UINT32 ui32LogType; + IMG_BOOL bAnyLogGroupConfigured; + IMG_UINT32 ui32BufferSize; + IMG_UINT32 ui32OpMode; + + /* Services initialisation parameters */ + pvParamState = SrvInitParamOpen(); + if (pvParamState == NULL) + return; + + SrvInitParamGetUINT32BitField(pvParamState, EnableHTBLogGroup, ui32LogType); + bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE; + SrvInitParamGetUINT32List(pvParamState, HTBOperationMode, ui32OpMode); + SrvInitParamGetUINT32(pvParamState, HTBufferSizeInKB, ui32BufferSize); + + SrvInitParamClose(pvParamState); +} + +/*! +******************************************************************************* + + @Function RGXInit + + @Description RGX Initialisation + + @Input psDeviceNode + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Services initialisation parameters */ + RGX_SRVINIT_APPHINTS sApphints = {0}; + IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags; + IMG_UINT32 ui32DeviceFlags; + IMG_UINT32 ui32AvailablePowUnitsMask; + + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + /* Number of HWPerf Block-IDs (RGX_CNTBLK_ID) which are available */ + IMG_UINT32 ui32NumHWPerfBlocks; + + /* Size of the RGXFWIF_HWPERF_CTL_BLK structure - varies by BVNC */ + IMG_UINT32 ui32HWPerfBlkSize; + +#if defined(SUPPORT_AUTOVZ) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + const IMG_UINT32 ui32MtsDm0IntEnableReg = 0xB58; + + /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation + * and it provides a good method of determining if the firmware has been booted previously */ + psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32MtsDm0IntEnableReg) != 0); + + PVR_LOG(("AutoVz startup check: firmware is %s;", + (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down")); + } + else if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest assumes the firmware is always available */ + psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + } + else +#endif + { + /* Firmware does not follow the AutoVz life-cycle */ + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; + } + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Register defs revision: %d", RGX_CR_DEFS_KM_REVISION); +#endif + + ui32NumHWPerfBlocks = RGXHWPerfMaxDefinedBlks((void *)psDevInfo); + + ui32HWPerfBlkSize = sizeof(RGXFWIF_HWPERF_CTL) + + (ui32NumHWPerfBlocks - 1) * sizeof(RGXFWIF_HWPERF_CTL_BLK); + + /* Services initialisation parameters */ + _ParseHTBAppHints(psDeviceNode); + GetApphints(psDevInfo, &sApphints); + InitDeviceFlags(&sApphints, &ui32DeviceFlags); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + PVRSRVGPUVIRTPopulateLMASubArenasKM(psDeviceNode, sApphints.aui32OSidMin, sApphints.aui32OSidMax, sApphints.bEnableTrustedDeviceAceConfig); +} +#endif + + eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)", + __func__, eError)); + goto cleanup; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = InitFirmware(psDeviceNode, &sApphints); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: InitFirmware failed (%d)", + __func__, + eError)); + goto cleanup; + } + } + + /* + * Setup Firmware initialisation data + */ + + GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags); + +#if defined(SUPPORT_VALIDATION) + ui32AvailablePowUnitsMask = sApphints.ui32AvailablePowUnitsMask; +#else + ui32AvailablePowUnitsMask = AVAIL_POW_UNITS_MASK_DEFAULT; +#endif + + eError = RGXInitFirmware(psDeviceNode, + sApphints.bEnableSignatureChecks, + sApphints.ui32SignatureChecksBufSize, + sApphints.ui32HWPerfFWBufSize, + (IMG_UINT64)sApphints.ui32HWPerfFilter0 | + ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32), + 0, + NULL, + ui32FWConfigFlags, + sApphints.ui32LogType, + GetFilterFlags(&sApphints), + sApphints.ui32JonesDisableMask, + sApphints.ui32HWRDebugDumpLimit, +#if defined(SUPPORT_VALIDATION) + sApphints.ui32KillingCtl, + &sApphints.aui32TPUTrilinearFracMask[0], + &sApphints.aui32USRMNumRegions[0], + (IMG_PUINT64)&sApphints.aui64UVBRMNumRegions[0], +#else + 0, + NULL, NULL, NULL, +#endif + ui32HWPerfBlkSize, + sApphints.eRGXRDPowerIslandConf, + sApphints.eFirmwarePerf, + ui32FWConfigFlagsExt, + ui32AvailablePowUnitsMask, + ui32FwOsCfgFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVRGXInitFirmware failed (%d)", + __func__, + eError)); + goto cleanup; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = InitialiseAllCounters(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: InitialiseAllCounters failed (%d)", + __func__, + eError)); + goto cleanup; + } + } + + /* + * Perform second stage of RGX initialisation + */ + eError = RGXInitDevPart2(psDeviceNode, + ui32DeviceFlags, + sApphints.ui32HWPerfHostBufSize, + sApphints.ui32HWPerfHostFilter, + sApphints.eRGXActivePMConf, + ui32AvailablePowUnitsMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PVRSRVRGXInitDevPart2KM failed (%d)", + __func__, + eError)); + goto cleanup; + } + +#if defined(SUPPORT_VALIDATION) + PVRSRVAppHintDumpState(); +#endif + + eError = PVRSRV_OK; + +cleanup: + return eError; +} + +/****************************************************************************** + End of file (rgxsrvinit.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxstartstop.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxstartstop.c new file mode 100644 index 000000000000..109259bae4d6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxstartstop.c @@ -0,0 +1,771 @@ +/*************************************************************************/ /*! +@File +@Title Device specific start/stop routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific start/stop routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* The routines implemented here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when trusted device is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. */ +#include "rgxstartstop.h" +#include "rgxfwutils.h" + +static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate, + IMG_UINT32 ui32CoreReg, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 i = 0; + + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value); + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT); + + do + { + RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value); + } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000)); + + if (i == 1000) + { + RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout"); + return PVRSRV_ERROR_TIMEOUT; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate) +{ + PVRSRV_ERROR eError; + + /* Give privilege to debug and slave port */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); + + /* Point Meta to the bootloader address, global (uncached) range */ + eError = RGXWriteMetaCoreRegThoughSP(hPrivate, + PC_ACCESS(0), + RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT); + + if (eError != PVRSRV_OK) + { + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!"); + return eError; + } + + /* Enable minim encoding */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN); + + /* Enable Meta thread */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXInitMetaProcWrapper + + @Description Configures the hardware wrapper of the META processor + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitMetaProcWrapper(const void *hPrivate) +{ + IMG_UINT64 ui64GartenConfig; + + /* Garten IDLE bit controlled by META */ + ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; + + RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig); +} + + +/*! +******************************************************************************* + + @Function RGXInitRiscvProcWrapper + + @Description Configures the hardware wrapper of the RISCV processor + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitRiscvProcWrapper(const void *hPrivate) +{ + IMG_DEV_VIRTADDR sTmp; + + RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); + + RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); + RGXAcquireBootCodeAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + RGXRISCVFW_BOOTLDR_CODE_REMAP, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_ALIGN) + << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_SHIFT | + TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_FETCH_EN); + + RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); + RGXAcquireBootDataAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + RGXRISCVFW_BOOTLDR_DATA_REMAP, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_ALIGN) + << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_REGION_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_MMU_CONTEXT_SHIFT | + TMP_RGX_CR_FWCORE_ADDR_REMAP0_CONFIG_LOAD_STORE_EN); + + /* Garten IDLE bit controlled by RISCV */ + RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); +} + + +/*! +******************************************************************************* + + @Function RGXInitBIF + + @Description Initialise RGX BIF + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitBIF(const void *hPrivate) +{ + IMG_DEV_PHYADDR sPCAddr; + IMG_UINT32 uiPCAddr; + + /* + * Acquire the address of the Kernel Page Catalogue. + */ + RGXAcquireKernelMMUPC(hPrivate, &sPCAddr); + uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT) + << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) + & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK); + + /* + * Write the kernel catalogue base. + */ + RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); + + + /* Set the mapping context */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ + + /* Write the cat-base address */ + RGXWriteKernelMMUPC32(hPrivate, + RGX_CR_MMU_CBASE_MAPPING, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, + uiPCAddr); + +#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) + /* Set-up different MMU ID mapping to the same PC used above */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ + + RGXWriteKernelMMUPC32(hPrivate, + RGX_CR_MMU_CBASE_MAPPING, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, + uiPCAddr); +#endif +} + + +/**************************************************************************/ /*! +@Function RGXInitMMURangeRegisters +@Description Initialises MMU range registers for Non4K pages. +@Input hPrivate Implementation specific data +@Return void + */ /**************************************************************************/ +static void RGXInitMMURangeRegisters(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; + PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; + IMG_UINT32 ui32RegAddr = RGX_CR_MMU_PAGE_SIZE_RANGE_ONE; + IMG_UINT32 i; + + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i, ui32RegAddr += sizeof(IMG_UINT64)) + { + RGXWriteReg64(hPrivate, ui32RegAddr, psDevInfo->aui64MMUPageSizeRangeValue[i]); + } +} + + +/**************************************************************************/ /*! +@Function RGXInitAXIACE +@Description Initialises AXI ACE registers +@Input hPrivate Implementation specific data +@Return void + */ /**************************************************************************/ +static void RGXInitAXIACE(const void *hPrivate) +{ + IMG_UINT64 ui64RegVal; + + /** + * The below configuration is only applicable for RGX core's supporting + * ACE/ACE-lite protocol and connected to ACE coherent interconnect. + */ + + /** + * Configure AxDomain and AxCache for MMU transactions. + * AxDomain set to non sharable (0x0). + */ + ui64RegVal = RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE | + RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE; + + /** + * Configure AxCache for PM/MMU transactions. + * Set to same value (i.e WBRWALLOC caching, rgxmmunit.c:RGXDerivePTEProt8) + * as non-coherent PTEs + */ + ui64RegVal |= (IMG_UINT64_C(0xF)) << RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT; + + /** + * Configure AxDomain for non MMU transactions. + */ + ui64RegVal |= RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE | + RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE; + + RGXCommentLog(hPrivate, "Init AXI-ACE interface"); + RGXWriteReg64(hPrivate, RGX_CR_ACE_CTRL, ui64RegVal); +} + +static void RGXMercerSoftResetSet(const void *hPrivate, IMG_UINT64 ui32MercerFlags) +{ + RGXWriteReg64(hPrivate, RGX_CR_MERCER_SOFT_RESET, ui32MercerFlags & RGX_CR_MERCER_SOFT_RESET_MASKFULL); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) RGXReadReg64(hPrivate, RGX_CR_MERCER_SOFT_RESET); +} + +static void RGXSPUSoftResetAssert(const void *hPrivate) +{ + /* Assert Mercer0 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN); + /* Assert Mercer1 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN); + /* Assert Mercer2 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN | RGX_CR_MERCER2_SOFT_RESET_SPU_EN); + + RGXWriteReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET, RGX_CR_SWIFT_SOFT_RESET_MASKFULL); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET); + + RGXWriteReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET, RGX_CR_TEXAS_SOFT_RESET_MASKFULL); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET); +} + +static void RGXSPUSoftResetDeAssert(const void *hPrivate) +{ + RGXWriteReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET, 0); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET); + + + RGXWriteReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET, 0); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET); + + /* Deassert Mercer2 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN); + /* Deassert Mercer1 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN); + /* Deassert Mercer0 */ + RGXMercerSoftResetSet(hPrivate, 0); +} + +static void RGXResetSequence(const void *hPrivate, const IMG_CHAR *pcRGXFW_PROCESSOR) +{ + /* Set RGX in soft-reset */ + RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1"); + RGXSPUSoftResetAssert(hPrivate); + + RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_JONES_ALL); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_JONES_ALL | RGX_SOFT_RESET_EXTRA); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + /* Take everything out of reset but the FW processor */ + RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_EXTRA | RGX_CR_SOFT_RESET_GARTEN_EN); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR); + RGXSPUSoftResetDeAssert(hPrivate); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); +} + +static void DeassertMetaReset(const void *hPrivate) +{ + /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */ + RGXWaitCycles(hPrivate, 32, 3); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + /* ... and afterwards */ + RGXWaitCycles(hPrivate, 32, 3); +} + +static PVRSRV_ERROR InitJonesECCRAM(const void *hPrivate) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Value; + + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) == 0) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + /* META must be taken out of reset (without booting) during Coremem initialization. */ + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); + DeassertMetaReset(hPrivate); + + /* Clocks must be set to "on" during RAMs initialization. */ + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_ON); + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_ON); + + RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, META_CR_TXCLKCTRL_ALL_ON); + RGXReadMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, &ui32Value); + + RGXWriteReg64(hPrivate, RGX_CR_JONES_RAM_INIT_KICK, RGX_CR_JONES_RAM_INIT_KICK_MASKFULL); + eError = RGXPollReg64(hPrivate, RGX_CR_JONES_RAM_STATUS, + RGX_CR_JONES_RAM_STATUS_MASKFULL, RGX_CR_JONES_RAM_STATUS_MASKFULL); + + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, META_CR_TXCLKCTRL_ALL_AUTO); + RGXReadMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, &ui32Value); + + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_AUTO); + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_AUTO); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); + RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + return eError; +} + +PVRSRV_ERROR RGXStart(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; + PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; + IMG_CHAR *pcRGXFW_PROCESSOR; + IMG_BOOL bMetaFW = IMG_FALSE; + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + bMetaFW = IMG_FALSE; + bDoFWSlaveBoot = IMG_FALSE; + } + else + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; + bMetaFW = IMG_TRUE; + bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); + } + + /* Disable the default sys_bus_secure protection to perform minimal setup */ + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); + + /* Only bypass HMMU if the module is present */ + if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK)) + { + if (PVRSRV_VZ_MODE_IS(NATIVE)) + { + /* Always set HMMU in bypass mode */ + RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); + (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); + } +#if defined(PVRSRV_VZ_BYPASS_HMMU) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* Also set HMMU in bypass mode */ + RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); + (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); + } +#endif + } + +#if defined(SUPPORT_VALIDATION) +#if !defined(RGX_CR_FIRMWARE_PROCESSOR_LS) +#define RGX_CR_FIRMWARE_PROCESSOR_LS (0x01A0U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN (0x00000001U) +#endif + { + if (psDevInfo->ui32ValidationFlags & RGX_VAL_LS_EN) + { + /* Set the dual LS mode */ + RGXWriteReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS, RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS); + } + } +#endif + + /*! + * Start series8 FW init sequence + */ + RGXResetSequence(hPrivate, pcRGXFW_PROCESSOR); + + if (bMetaFW && RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) > 0) + { + RGXCommentLog(hPrivate, "RGXStart: Init Jones ECC RAM"); + eError = InitJonesECCRAM(hPrivate); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + if (bMetaFW) + { + if (bDoFWSlaveBoot) + { + /* Configure META to Slave boot */ + RGXCommentLog(hPrivate, "RGXStart: META Slave boot"); + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); + } + else + { + /* Configure META to Master boot */ + RGXCommentLog(hPrivate, "RGXStart: META Master boot"); + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN); + } + } + + /* + * Initialise Firmware wrapper + */ + if (bMetaFW) + { + RGXInitMetaProcWrapper(hPrivate); + } + else + { + RGXInitRiscvProcWrapper(hPrivate); + } + + if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4) + { + // initialise the MMU range based config registers for Non4K pages. + RGXInitMMURangeRegisters(hPrivate); + } + + RGXInitAXIACE(hPrivate); + /* + * Initialise BIF. + */ + RGXInitBIF(hPrivate); + + RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); + DeassertMetaReset(hPrivate); + + if (bMetaFW) + { + if (bDoFWSlaveBoot) + { + eError = RGXFabricCoherencyTest(hPrivate); + if (eError != PVRSRV_OK) return eError; + + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start"); + eError = RGXStartFirmware(hPrivate); + if (eError != PVRSRV_OK) return eError; + } + else + { + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); + } + } + else + { + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); + RGXWriteReg32(hPrivate, TMP_RGX_CR_FWCORE_BOOT, 1); + RGXWaitCycles(hPrivate, 32, 3); + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) + RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure"); + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ +#endif + + /*! + * End series8 FW init sequence + */ + + return eError; +} + +PVRSRV_ERROR RGXStop(const void *hPrivate) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bMetaFW = RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META); + + RGXDeviceIrqEventRx(hPrivate); + +#if defined(SUPPORT_VALIDATION) && !defined(TC_MEMORY_CONFIG) +#if !defined(RGX_CR_POWER_EVENT) +#define RGX_CR_POWER_EVENT (0x0038U) +#define RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT (9U) +#define RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT (8U) +#define RGX_CR_POWER_EVENT_TYPE_SHIFT (0U) +#define RGX_CR_POWER_EVENT_TYPE_POWER_DOWN (0x00000000U) +#define RGX_CR_POWER_EVENT_REQ_EN (0x00000002U) +#endif + + /* Power off any enabled SPUs */ + { + IMG_UINT32 ui32SPUOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_SPU)) -1; + IMG_UINT32 ui32RegVal; + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 2) + { + ui32RegVal = (ui32SPUOffMask << RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT) | + RGX_CR_POWER_EVENT_TYPE_POWER_DOWN; + } + else + { + ui32RegVal = (ui32SPUOffMask << RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT) | + RGX_CR_POWER_EVENT_TYPE_POWER_DOWN; + } + + RGXWriteReg32(hPrivate, + RGX_CR_POWER_EVENT, + ui32RegVal); + + RGXWriteReg32(hPrivate, + RGX_CR_POWER_EVENT, + ui32RegVal | RGX_CR_POWER_EVENT_REQ_EN); + + /* Poll on complete */ + eError = RGXPollReg32(hPrivate, + RGX_CR_EVENT_STATUS, + RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN, + RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN); + if (eError != PVRSRV_OK) return eError; + + /* Update the SPU_ENABLE mask */ + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 1) + { + RGXWriteReg32(hPrivate, RGX_CR_SPU_ENABLE, 0); + } + RGXWriteReg32(hPrivate, 0xF020, 0); + } +#endif + + /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */ + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_AXI2IMG_EN), + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_AXI2IMG_EN)); + + if (eError != PVRSRV_OK) return eError; + + + /* Wait for SLC to signal IDLE */ + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC_IDLE, + RGX_CR_SLC_IDLE_MASKFULL^(~RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK), + RGX_CR_SLC_IDLE_MASKFULL^(~RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK)); + if (eError != PVRSRV_OK) return eError; + + + /* Unset MTS DM association with threads */ + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); + + +#if defined(PDUMP) + if (bMetaFW) + { + PVRSRV_RGXDEV_INFO *psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + /* Disabling threads is only required for pdumps to stop the fw gracefully */ + + /* Disable thread 0 */ + eError = RGXWriteMetaRegThroughSP(hPrivate, + META_CR_T0ENABLE_OFFSET, + ~META_CR_TXENABLE_ENABLE_BIT); + if (eError != PVRSRV_OK) return eError; + + /* Disable thread 1 */ + eError = RGXWriteMetaRegThroughSP(hPrivate, + META_CR_T1ENABLE_OFFSET, + ~META_CR_TXENABLE_ENABLE_BIT); + if (eError != PVRSRV_OK) return eError; + + /* Wait for the Slave Port to finish all the transactions */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + /* Clear down any irq raised by META (done after disabling the FW + * threads to avoid a race condition). + * This is only really needed for PDumps but we do it anyway driver-live. + */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + } + else + { + /* Clear down any irq raised by META (done after disabling the FW + * threads to avoid a race condition). + * This is only really needed for PDumps but we do it anyway driver-live. + */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ + + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + } + if (eError != PVRSRV_OK) return eError; + } +#endif + + + eError = RGXPollReg64(hPrivate, + RGX_CR_SLC_STATUS1, + 0, + (~RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK | + ~RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK)); + if (eError != PVRSRV_OK) return eError; + + eError = RGXPollReg64(hPrivate, + RGX_CR_SLC_STATUS2, + 0, + (~RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK | + ~RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK)); + if (eError != PVRSRV_OK) return eError; + + + /* Wait for SLC to signal IDLE */ + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC_IDLE, + RGX_CR_SLC_IDLE_MASKFULL^(~RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK), + RGX_CR_SLC_IDLE_MASKFULL^(~RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK)); + if (eError != PVRSRV_OK) return eError; + + + /* Wait for Jones to signal IDLE except for the Garten Wrapper */ + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_AXI2IMG_EN), + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_AXI2IMG_EN)); + + if (eError != PVRSRV_OK) return eError; + + + if (bMetaFW) + { + IMG_UINT32 ui32RegValue; + + eError = RGXReadMetaRegThroughSP(hPrivate, + META_CR_TxVECINT_BHALT, + &ui32RegValue); + if (eError != PVRSRV_OK) return eError; + + if ((ui32RegValue & 0xFFFFFFFFU) == 0x0) + { + /* Wait for Sidekick/Jones to signal IDLE including + * the Garten Wrapper if there is no debugger attached + * (TxVECINT_BHALT = 0x0) */ + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_AXI2IMG_EN), + RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_AXI2IMG_EN)); + if (eError != PVRSRV_OK) return eError; + } + } + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsyncutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsyncutils.c new file mode 100644 index 000000000000..429aeaed2f92 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxsyncutils.c @@ -0,0 +1,186 @@ +/*************************************************************************/ /*! +@File rgxsyncutils.c +@Title RGX Sync Utilities +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX Sync helper functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "rgxsyncutils.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "allocmem.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +//#define TA3D_CHECKPOINT_DEBUG + +#if defined(TA3D_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +static +void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues, + IMG_UINT32 ui32Count) +{ + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; + + for (iii = 0; iii < ui32Count; iii++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } +} + +void _DebugSyncCheckpoints(PSYNC_CHECKPOINT *apsSyncCheckpoints, + IMG_UINT32 ui32Count); +{ + IMG_UINT32 ii; + for (ii = 0; ii < ui32Count; ii++) + { + PSYNC_CHECKPOINT psNextCheckpoint = *(apsSyncCheckpoints + ii); + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceTASyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); + } +} +#else +#define CHKPT_DBG(X) +#endif + + +PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, + SYNC_ADDR_LIST *psSyncList, + SYNC_ADDR_LIST *psPRSyncList, + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, + RGX_SYNC_DATA *psSyncData, + IMG_BOOL bKick3D) +{ + IMG_UINT32 *pui32TimelineUpdateWOff = NULL; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + + IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount; + + /* Space for original client updates, and the one new update */ + size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1); + + if (!bKick3D) + { + /* Additional space for one PR update, only the newest one */ + uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)", __func__, \ + (void*)pui32IntAllocatedUpdateValues)); + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize); + pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues; + + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)", __func__, \ + ui32ClientUpdateValueCount, bKick3D ? "TA/3D" : "TA/PR", (void*)pui32IntAllocatedUpdateValues)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount); +#endif + + pui32TimelineUpdateWOff += ui32ClientUpdateValueCount; + } + + /* Now set the additional update value and append the timeline sync prim addr to either the + * render context 3D (or TA) update list + */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", __func__, \ + ui32FenceTimelineUpdateValue, bKick3D ? "TA/3D" : "TA/PR")); + + /* Append the TA/3D update */ + { + *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; + psSyncData->ui32ClientUpdateValueCount++; + psSyncData->ui32ClientUpdateCount++; + SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync); + + if (!psSyncData->pauiClientUpdateUFOAddress) + { + psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs; + } + /* Update paui32ClientUpdateValue to point to our new list of update values */ + psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount); +#endif + } + + if (!bKick3D) + { + /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */ + *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; + psSyncData->ui32ClientPRUpdateValueCount = 1; + psSyncData->ui32ClientPRUpdateCount = 1; + SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync); + + if (!psSyncData->pauiClientPRUpdateUFOAddress) + { + psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs; + } + /* Update paui32ClientPRUpdateValue to point to our new list of update values */ + psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount]; + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount); +#endif + } + + /* Do not free the old psSyncData->ui32ClientUpdateValueCount, + * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */ + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.c new file mode 100644 index 000000000000..c10a4d44c483 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.c @@ -0,0 +1,5144 @@ +/*************************************************************************/ /*! +@File +@Title RGX TA/3D routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX TA/3D routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +/* for the offsetof macro */ +#if defined(LINUX) +#include +#else +#include +#endif + +#include "pdump_km.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxta3d.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pvrsrv.h" +#include "rgx_memallocflags.h" +#include "rgxccb.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "rgxsyncutils.h" +#include "htbuffer.h" + +#include "rgxdefs_km.h" +#include "rgx_fwif_km.h" +#include "physmem.h" +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "process_stats.h" + +#include "rgxpmdefs.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +#include "validation_soc.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" + +#define HASH_CLEAN_LIMIT 6 +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TA3D_UFO_DUMP 0 + +//#define TA3D_CHECKPOINT_DEBUG + +#if defined(TA3D_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +static INLINE +void _DebugSyncValues(const IMG_CHAR *pszFunction, + const IMG_UINT32 *pui32UpdateValues, + const IMG_UINT32 ui32Count) +{ + IMG_UINT32 i; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; + + for (i = 0; i < ui32Count; i++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } +} + +static INLINE +void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction, + const IMG_CHAR *pszDMName, + const PSYNC_CHECKPOINT *apsSyncCheckpoints, + const IMG_UINT32 ui32Count) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32Count; i++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i))); + } +} + +#else +#define CHKPT_DBG(X) +#endif + +/* define the number of commands required to be set up by the CCB helper */ +/* 1 command for the TA */ +#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1 +/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */ +#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3 + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui64CyclesPrediction) +#else +#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST) +#endif + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_RC_TA_DATA; + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_RC_3D_DATA; + +struct _RGX_SERVER_RENDER_CONTEXT_ { + /* this lock protects usage of the render context. + * it ensures only one kick is being prepared and/or submitted on + * this render context at any time + */ + POS_LOCK hLock; + RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS]; + RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS]; + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWRenderContextMemDesc; + RGX_SERVER_RC_TA_DATA sTAData; + RGX_SERVER_RC_3D_DATA s3DData; + IMG_UINT32 ui32CleanupStatus; +#define RC_CLEANUP_TA_COMPLETE (1 << 0) +#define RC_CLEANUP_3D_COMPLETE (1 << 1) + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListTAFence; + SYNC_ADDR_LIST sSyncAddrListTAUpdate; + SYNC_ADDR_LIST sSyncAddrList3DFence; + SYNC_ADDR_LIST sSyncAddrList3DUpdate; + ATOMIC_T hIntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +}; + + +/* + Static functions used by render context code +*/ + +static +PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + psTAData->psServerCommonContext, + RGXFWIF_DM_GEOM, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + FWCommonContextFree(psTAData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc); + psTAData->psServerCommonContext = NULL; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + ps3DData->psServerCommonContext, + RGXFWIF_DM_3D, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + FWCommonContextFree(ps3DData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc); + ps3DData->psServerCommonContext = NULL; + return PVRSRV_OK; +} + +static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) +{ + RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + PVRSRV_ERROR eError; + + eError = PMRDumpPageList(psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Error (%s) printing pmr %p", + PVRSRVGetErrorString(eError), + psPMRNode->psPMR)); + } +} + +IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList) +{ + DLLIST_NODE *psNode, *psNext; + + PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx, + psFreeList->sFreeListFWDevVAddr.ui32Addr, + psFreeList->ui32FreelistID, + psFreeList->ui64FreelistChecksum)); + + /* Dump Init FreeList page list */ + PVR_LOG((" Initial Memory block")); + dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) + { + _RGXDumpPMRPageList(psNode); + } + + /* Dump Grow FreeList page list */ + PVR_LOG((" Grow Memory blocks")); + dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) + { + _RGXDumpPMRPageList(psNode); + } + + return IMG_TRUE; +} + +static void _CheckFreelist(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumOfPagesToCheck, + IMG_UINT64 ui64ExpectedCheckSum, + IMG_UINT64 *pui64CalculatedCheckSum) +{ +#if defined(NO_HARDWARE) + /* No checksum needed as we have all information in the pdumps */ + PVR_UNREFERENCED_PARAMETER(psFreeList); + PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck); + PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum); + *pui64CalculatedCheckSum = 0; +#else + PVRSRV_ERROR eError; + size_t uiNumBytes; + IMG_UINT8* pui8Buffer; + IMG_UINT32* pui32Buffer; + IMG_UINT32 ui32CheckSumAdd = 0; + IMG_UINT32 ui32CheckSumXor = 0; + IMG_UINT32 ui32Entry; + IMG_UINT32 ui32Entry2; + IMG_BOOL bFreelistBad = IMG_FALSE; + + *pui64CalculatedCheckSum = 0; + + PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); + + /* Allocate Buffer of the size of the freelist */ + pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); + if (pui8Buffer == NULL) + { + PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!", + __func__, psFreeList)); + PVR_ASSERT(0); + return; + } + + /* Copy freelist content into Buffer */ + eError = PMR_ReadBytes(psFreeList->psFreeListPMR, + psFreeList->uiFreeListPMROffset + + (((psFreeList->ui32MaxFLPages - + psFreeList->ui32CurrentFLPages - psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)), + pui8Buffer, + ui32NumOfPagesToCheck * sizeof(IMG_UINT32), + &uiNumBytes); + if (eError != PVRSRV_OK) + { + OSFreeMem(pui8Buffer); + PVR_LOG(("%s: Failed to get freelist data for freelist %p!", + __func__, psFreeList)); + PVR_ASSERT(0); + return; + } + + PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); + + /* Generate checksum (skipping the first page if not allocated) */ + pui32Buffer = (IMG_UINT32 *)pui8Buffer; + ui32Entry = ((psFreeList->ui32GrowFLPages == 0 && psFreeList->ui32CurrentFLPages > 1) ? 1 : 0); + for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++) + { + ui32CheckSumAdd += pui32Buffer[ui32Entry]; + ui32CheckSumXor ^= pui32Buffer[ui32Entry]; + + /* Check for double entries */ + for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++) + { + if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]) + { + PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d", + __func__, + psFreeList->sFreeListFWDevVAddr.ui32Addr, + pui32Buffer[ui32Entry2], + ui32Entry, + ui32Entry2, + psFreeList->ui32CurrentFLPages)); + bFreelistBad = IMG_TRUE; + break; + } + } + } + + OSFreeMem(pui8Buffer); + + /* Check the calculated checksum against the expected checksum... */ + *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd; + + if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum) + { + PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016llx calculated 0x%016llx", + __func__, psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum)); + bFreelistBad = IMG_TRUE; + } + + if (bFreelistBad) + { + PVR_LOG(("%s: Sleeping for ever!", __func__)); + PVR_ASSERT(!bFreelistBad); + } +#endif +} + + +/* + * Function to work out the number of freelist pages to reserve for growing + * within the FW without having to wait for the host to progress a grow + * request. + * + * The number of pages must be a multiple of 4 to align the PM addresses + * for the initial freelist allocation and also be less than the grow size. + * + * If the threshold or grow size means less than 4 pages, then the feature + * is not used. + */ +static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32FLPages) +{ + IMG_UINT32 ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) & + ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1); + + if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages) + { + ui32ReadyFLPages = psFreeList->ui32GrowFLPages; + } + + return ui32ReadyFLPages; +} + + +PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumPages, + PDLLIST_NODE pListHeader) +{ + RGX_PMR_NODE *psPMRNode; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32MappingTable = 0; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiLength; + IMG_DEVMEM_SIZE_T uistartPage; + PVRSRV_ERROR eError; + static const IMG_CHAR szAllocName[] = "Free List"; + + /* Are we allowed to grow ? */ + if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages) + { + PVR_DPF((PVR_DBG_WARNING, + "Freelist [0x%p]: grow by %u pages denied. " + "Max PB size reached (current pages %u+%u/%u)", + psFreeList, + ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32ReadyFLPages, + psFreeList->ui32MaxFLPages)); + return PVRSRV_ERROR_PBSIZE_ALREADY_MAX; + } + + /* Allocate kernel memory block structure */ + psPMRNode = OSAllocMem(sizeof(*psPMRNode)); + if (psPMRNode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate host data structure", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocHost; + } + + /* + * Lock protects simultaneous manipulation of: + * - the memory block list + * - the freelist's ui32CurrentFLPages + */ + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + + + /* + * The PM never takes the last page in a freelist, so if this block + * of pages is the first one and there is no ability to grow, then + * we can skip allocating one 4K page for the lowest entry. + */ + if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + { + /* + * Allocation size will be rounded up to the OS page size, + * any attempt to change it a bit now will be invalidated later. + */ + psPMRNode->bFirstPageMissing = IMG_FALSE; + } + else + { + psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0 && ui32NumPages > 1); + } + + psPMRNode->ui32NumPages = ui32NumPages; + psPMRNode->psFreeList = psFreeList; + + /* Allocate Memory Block */ + PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages); + uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE; + if (psPMRNode->bFirstPageMissing) + { + uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE; + } + eError = PhysmemNewRamBackedPMR(NULL, + psFreeList->psDevInfo->psDeviceNode, + uiSize, + uiSize, + 1, + 1, + &ui32MappingTable, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVRSRV_MEMALLOCFLAG_GPU_READABLE, + sizeof(szAllocName), + szAllocName, + psFreeList->ownerPid, + &psPMRNode->psPMR, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX, + __func__, + (IMG_UINT64)uiSize)); + goto ErrorBlockAlloc; + } + + /* Zeroing physical pages pointed by the PMR */ + if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) + { + eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to zero PMR %p of freelist %p (%s)", + __func__, + psPMRNode->psPMR, + psFreeList, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(0); + } + } + + uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); + uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); + uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + + eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR, + psFreeList->ownerPid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: call to RIWritePMREntryWithOwnerKM failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + + /* Attach RI information */ + eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR, + OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), + szAllocName, + 0, + uiSize, + IMG_FALSE, + IMG_FALSE, + &psPMRNode->hRIHandle); + PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM"); + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + /* write Freelist with Memory Block physical addresses */ + eError = PMRWritePMPageList( + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to write pages of Node %p", + __func__, + psPMRNode)); + goto ErrorPopulateFreelist; + } + +#if defined(SUPPORT_SHADOW_FREELISTS) + /* Copy freelist memory to shadow freelist */ + { + const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof(IMG_UINT32); + const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2; + const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset; + IMG_BYTE *pFLMapAddr; + size_t uiNumBytes; + PVRSRV_ERROR res; + IMG_HANDLE hMapHandle; + + /* Map both the FL and the shadow FL */ + res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize, + (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); + if (res != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map freelist (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + goto ErrorPopulateFreelist; + } + + /* Copy only the newly added memory */ + memcpy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength); + +#if defined(PDUMP) + PDUMPCOMMENT("Initialize shadow freelist"); + + /* Translate memcpy to pdump */ + { + IMG_DEVMEM_OFFSET_T uiCurrOffset; + + for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32)) + { + PMRPDumpCopyMem32(psFreeList->psFreeListPMR, + uiCurrOffset + ui32FLMaxSize, + psFreeList->psFreeListPMR, + uiCurrOffset, + ":SYSMEM:$1", + PDUMP_FLAGS_CONTINUOUS); + } + } +#endif + + + res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle); + + if (res != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to release freelist mapping (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + goto ErrorPopulateFreelist; + } + } +#endif + + /* We add It must be added to the tail, otherwise the freelist population won't work */ + dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock); + + /* Update number of available pages */ + psFreeList->ui32CurrentFLPages += ui32NumPages; + + /* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */ + if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages) + { + psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages; + } + + /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */ + psFreeList->ui32ReadyFLPages = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages); + psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; + + if (psFreeList->bCheckFreelist) + { + /* + * We can only calculate the freelist checksum when the list is full + * (e.g. at initial creation time). At other times the checksum cannot + * be calculated and has to be disabled for this freelist. + */ + if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages) + { + _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum); + } + else + { + psFreeList->ui64FreelistChecksum = 0; + } + } + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)", + psFreeList, + ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"), + ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32ReadyFLPages, + psFreeList->ui32MaxFLPages, + psFreeList->ui64FreelistChecksum, + (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : ""))); + + return PVRSRV_OK; + + /* Error handling */ +ErrorPopulateFreelist: + PMRUnrefPMR(psPMRNode->psPMR); + +ErrorBlockAlloc: + OSFreeMem(psPMRNode); + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + +ErrorAllocHost: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; + +} + +static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, + RGX_FREELIST *psFreeList) +{ + DLLIST_NODE *psNode; + RGX_PMR_NODE *psPMRNode; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32OldValue; + + /* + * Lock protects simultaneous manipulation of: + * - the memory block list + * - the freelist's ui32CurrentFLPages value + */ + PVR_ASSERT(pListHeader); + PVR_ASSERT(psFreeList); + PVR_ASSERT(psFreeList->psDevInfo); + PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList); + + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + + /* Get node from head of list and remove it */ + psNode = dllist_get_next_node(pListHeader); + if (psNode) + { + dllist_remove_node(psNode); + + psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + PVR_ASSERT(psPMRNode); + PVR_ASSERT(psPMRNode->psPMR); + PVR_ASSERT(psPMRNode->psFreeList); + + /* remove block from freelist list */ + + /* Unwrite Freelist with Memory Block physical addresses */ + eError = PMRUnwritePMPageList(psPMRNode->psPageList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to unwrite pages of Node %p", + __func__, + psPMRNode)); + PVR_ASSERT(IMG_FALSE); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + + if (psPMRNode->hRIHandle) + { + PVRSRV_ERROR eError; + + eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle); + PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM"); + } + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + /* Free PMR (We should be the only one that holds a ref on the PMR) */ + eError = PMRUnrefPMR(psPMRNode->psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to free PB block %p (%s)", + __func__, + psPMRNode->psPMR, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(IMG_FALSE); + } + + /* update available pages in freelist */ + ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; + + /* + * Deallocated pages should first be deducted from ReadyPages bank, once + * there are no more left, start deducting them from CurrentPage bank. + */ + if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages) + { + psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages; + psFreeList->ui32ReadyFLPages = 0; + } + else + { + psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages; + } + + /* check underflow */ + PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)", + psFreeList, + psPMRNode->ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32MaxFLPages)); + + OSFreeMem(psPMRNode); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)", + psFreeList, + psFreeList->ui32InitFLPages)); + eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN; + } + + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + return eError; +} + +static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID) +{ + DLLIST_NODE *psNode, *psNext; + RGX_FREELIST *psFreeList = NULL; + + OSLockAcquire(psDevInfo->hLockFreeList); + + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + + if (psThisFreeList->ui32FreelistID == ui32FreelistID) + { + psFreeList = psThisFreeList; + break; + } + } + + OSLockRelease(psDevInfo->hLockFreeList); + return psFreeList; +} + +void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistID) +{ + RGX_FREELIST *psFreeList = NULL; + RGXFWIF_KCCB_CMD s3DCCBCmd; + IMG_UINT32 ui32GrowValue; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + psFreeList = FindFreeList(psDevInfo, ui32FreelistID); + if (psFreeList == NULL) + { + /* Should never happen */ + PVR_DPF((PVR_DBG_ERROR, + "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", + ui32FreelistID)); + PVR_ASSERT(IMG_FALSE); + + return; + } + + /* Since the FW made the request, it has already consumed the ready pages, update the host struct */ + psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; + psFreeList->ui32ReadyFLPages = 0; + + + /* Try to grow the freelist */ + eError = RGXGrowFreeList(psFreeList, + psFreeList->ui32GrowFLPages, + &psFreeList->sMemoryBlockHead); + + if (eError == PVRSRV_OK) + { + /* Grow successful, return size of grow size */ + ui32GrowValue = psFreeList->ui32GrowFLPages; + + psFreeList->ui32NumGrowReqByFW++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* Update Stats */ + PVRSRVStatsUpdateFreelistStats(0, + 1, /* Add 1 to the appropriate counter (Requests by FW) */ + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); + +#endif + + } + else + { + /* Grow failed */ + ui32GrowValue = 0; + PVR_DPF((PVR_DBG_ERROR, + "Grow for FreeList %p failed (%s)", + psFreeList, + PVRSRVGetErrorString(eError))); + } + + /* send feedback */ + s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE; + s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; + + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: Grow pages=%u, new pages=%u, ready pages=%u, counter=%d", + psFreeList, + ui32GrowValue, + s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages, + s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages, + psFreeList->ui32NumGrowReqByFW)); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_3D, + &s3DCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + /* Kernel CCB should never fill up, as the FW is processing them right away */ + + PVR_ASSERT(eError == PVRSRV_OK); +} + +static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) +{ + + PVRSRV_RGXDEV_INFO *psDevInfo; + RGX_FREELIST *psFreeList; + RGX_PMR_NODE *psPMRNode; + PVRSRV_ERROR eError; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiLength; + IMG_UINT32 ui32StartPage; + + psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + psFreeList = psPMRNode->psFreeList; + PVR_ASSERT(psFreeList); + psDevInfo = psFreeList->psDevInfo; + PVR_ASSERT(psDevInfo); + + uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); + ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); + uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); + + PMRUnwritePMPageList(psPMRNode->psPageList); + psPMRNode->psPageList = NULL; + eError = PMRWritePMPageList( + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error (%s) writing FL 0x%08x", + __func__, + PVRSRVGetErrorString(eError), + (IMG_UINT32)psFreeList->ui32FreelistID)); + } + + /* Zeroing physical pages pointed by the reconstructed freelist */ + if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) + { + eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to zero PMR %p of freelist %p (%s)", + __func__, + psPMRNode->psPMR, + psFreeList, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(0); + } + } + + psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages; +} + + +static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) +{ + IMG_UINT32 ui32OriginalFLPages; + DLLIST_NODE *psNode, *psNext; + PVRSRV_ERROR eError; +#if !defined(PM_INTERACTIVE_MODE) + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr; +#endif + + //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID)); + + /* Do the FreeList Reconstruction */ + ui32OriginalFLPages = psFreeList->ui32CurrentFLPages; + psFreeList->ui32CurrentFLPages = 0; + + /* Reconstructing Init FreeList pages */ + dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) + { + _RGXFreeListReconstruction(psNode); + } + + /* Reconstructing Grow FreeList pages */ + dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) + { + _RGXFreeListReconstruction(psNode); + } + + /* Ready pages are allocated but kept hidden until OOM occurs. */ + psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; + if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages) + { + PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages); + return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED; + } + + { + RGXFWIF_FREELIST *psFWFreeList; + + /* Update firmware freelist structure */ + eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + if (eError != PVRSRV_OK) + { + return eError; + } + +#if defined(PM_INTERACTIVE_MODE) + psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->ui32AllocatedPageCount = 0; + psFWFreeList->ui32AllocatedMMUPageCount = 0; +#else + sFreeListBaseDevVAddr = psFWFreeList->sFreeListBaseDevVAddr; + psFWFreeList->bUpdatePending = IMG_FALSE; + psFWFreeList->ui32UpdateNewPages = 0; + psFWFreeList->ui32UpdateNewReadyPages = 0; + psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; +#endif + + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + } + +#if !defined(PM_INTERACTIVE_MODE) + /* Reset freelist state buffer */ + { + RGX_PM_FREELISTSTATE_BUFFER sFLState; + size_t uiNbBytes; + IMG_DEV_VIRTADDR sFLBaseAddr; + + eError = PMR_ReadBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes); + + if (eError != PVRSRV_OK) + { + return eError; + } + + PVR_ASSERT(uiNbBytes == sizeof(sFLState)); + + sFLBaseAddr.uiAddr = (sFreeListBaseDevVAddr.uiAddr + + ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); + /* Note: Freelist base address is packed shifted down. */ + RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(sFLState, sFLBaseAddr.uiAddr >> RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT); + RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(sFLState, psFreeList->ui32CurrentFLPages - 1); + RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(sFLState, 0); + RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(sFLState, 0); + + eError = PMR_WriteBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes); + + if (eError != PVRSRV_OK) + { + return eError; + } + + PVR_ASSERT(uiNbBytes == sizeof(sFLState)); + } +#endif + + /* Check the Freelist checksum if required (as the list is fully populated) */ + if (psFreeList->bCheckFreelist) + { + IMG_UINT64 ui64CheckSum; + + _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); + } + + return eError; +} + + +void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistsCount, + IMG_UINT32 *paui32Freelists) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32Loop; + RGXFWIF_KCCB_CMD sTACCBCmd; +#if !defined(SUPPORT_SHADOW_FREELISTS) + DLLIST_NODE *psNodeHWRTData, *psNextHWRTData; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; + RGXFWIF_HWRTDATA *psHWRTData; +#endif + IMG_UINT32 ui32FinalFreelistsCount = 0; + IMG_UINT32 aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */ + + PVR_ASSERT(psDevInfo != NULL); + PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT); + if (ui32FreelistsCount > RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT) + { + ui32FreelistsCount = RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT; + } + + //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount)); + + /* + * Initialise the response command (in case we don't find a freelist ID). + * Also copy the list to the 'final' freelist array. + */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE; + sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount; + + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] | + RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; + aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop]; + } + + ui32FinalFreelistsCount = ui32FreelistsCount; + + /* + * The list of freelists we have been given for reconstruction will + * consist of local and global freelists (maybe MMU as well). Any + * local freelists should have their global list specified as well. + * There may be cases where the global freelist is not given (in + * cases of partial setups before a poll failure for example). To + * handle that we must first ensure every local freelist has a global + * freelist specified, otherwise we add that to the 'final' list. + * This final list of freelists is created in a first pass. + * + * Even with the global freelists listed, there may be other local + * freelists not listed, which are going to have their global freelist + * reconstructed. Therefore we have to find those freelists as well + * meaning we will have to iterate the entire list of freelists to + * find which must be reconstructed. This is the second pass. + */ + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + IMG_BOOL bInList = IMG_FALSE; + IMG_BOOL bGlobalInList = IMG_FALSE; + + /* Check if this local freelist is in the list and ensure its global is too. */ + if (psFreeList->ui32FreelistGlobalID != 0) + { + for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + { + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID) + { + bInList = IMG_TRUE; + } + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + { + bGlobalInList = IMG_TRUE; + } + } + + if (bInList && !bGlobalInList) + { + aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID; + ui32FinalFreelistsCount++; + } + } + } + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + IMG_BOOL bReconstruct = IMG_FALSE; + + /* + * Check if this freelist needs to be reconstructed (was it requested + * or is its global freelist going to be reconstructed)... + */ + for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + { + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID || + aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + { + bReconstruct = IMG_TRUE; + break; + } + } + + if (bReconstruct) + { + eError = RGXReconstructFreeList(psFreeList); + if (eError == PVRSRV_OK) + { +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* Mark all HWRTData's of reconstructing local freelists as HWR (applies to TA/3D's not finished yet) */ + dllist_foreach_node(&psFreeList->sNodeHWRTDataHead, psNodeHWRTData, psNextHWRTData) + { + psKMHWRTDataSet = IMG_CONTAINER_OF(psNodeHWRTData, RGX_KM_HW_RT_DATASET, sNodeHWRTData); + eError = DevmemAcquireCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc, (void **)&psHWRTData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Devmem AcquireCpuVirtAddr Failed during Reconstructing of FreeList, FwMemDesc(%p),psHWRTData(%p)", + psKMHWRTDataSet->psHWRTDataFwMemDesc, + psHWRTData)); + continue; + } + + psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR; + psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA; + + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); + } +#endif + + /* Update the response for this freelist if it was specifically requested for reconstruction. */ + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID) + { + /* Reconstruction of this requested freelist was successful... */ + sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; + break; + } + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "Reconstructing of FreeList %p failed (%s)", + psFreeList, + PVRSRVGetErrorString(eError))); + } + } + } + OSLockRelease(psDevInfo->hLockFreeList); + + /* Check that all freelists were found and reconstructed... */ + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] & + RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0); + } + + /* send feedback */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + NULL, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); +} + +/* Create HWRTDataSet */ +PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr, + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr, + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64PPPMultiSampleCtl, + IMG_UINT32 ui32TEStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32RgnStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32Loop; + + /* KM cookie storing all the FW/HW data */ + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; + + /* local pointers for memory descriptors of FW allocations */ + DEVMEM_MEMDESC *psHWRTDataFwMemDesc = NULL; + DEVMEM_MEMDESC *psRTArrayFwMemDesc = NULL; + DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc = NULL; + + /* local pointer for CPU-mapped [FW]HWRTData */ + RGXFWIF_HWRTDATA *psHWRTData = NULL; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* Prepare the HW RT DataSet struct */ + psKMHWRTDataSet = OSAllocZMem(sizeof(*psKMHWRTDataSet)); + if (psKMHWRTDataSet == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto AllocError; + } + + *ppsKMHWRTDataSet = psKMHWRTDataSet; + psKMHWRTDataSet->psDeviceNode = psDeviceNode; + + psDevInfo = psDeviceNode->pvDevice; + + /* + * This FW RT-Data is only mapped into kernel for initialisation. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, + * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate( psDevInfo, + sizeof(RGXFWIF_HWRTDATA), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, + "FwHwRTData", + &psHWRTDataFwMemDesc ); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed", + __func__)); + goto FWRTDataAllocateError; + } + + psKMHWRTDataSet->psHWRTDataFwMemDesc = psHWRTDataFwMemDesc; + eError = RGXSetFirmwareAddress( &psKMHWRTDataSet->sHWRTDataFwAddr, + psHWRTDataFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", FWRTDataFwAddrError); + + eError = DevmemAcquireCpuVirtAddr(psHWRTDataFwMemDesc, + (void **)&psHWRTData); + PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError); + +#if defined(PM_INTERACTIVE_MODE) + psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr; +#endif + + psHWRTData->sPMSecureRenderStateDevVAddr = sPMSecureDataDevVAddr; + +#if defined(PM_INTERACTIVE_MODE) + psHWRTData->sPMMListDevVAddr = sPMDataDevVAddr; +#else + psHWRTData->sPMRenderStateDevVAddr = sPMDataDevVAddr; +#endif + + psHWRTData->ui32ScreenPixelMax = ui32ScreenPixelMax; + psHWRTData->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl; + psHWRTData->ui32TEStride = ui32TEStride; + psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr; + psHWRTData->ui32TPCSize = ui32TPCSize; + psHWRTData->ui32TEScreen = ui32TEScreen; + psHWRTData->ui32TEAA = ui32TEAA; + psHWRTData->ui32TEMTILE1 = ui32TEMTILE1; + psHWRTData->ui32TEMTILE2 = ui32TEMTILE2; + psHWRTData->ui32RgnStride = ui32RgnStride; /* Region stride in Bytes */ + psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX; + psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY; + psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX; + psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY; + psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX; + psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY; + + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; + psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; + /* invalid initial snapshot value, the snapshot is always taken during first kick + * and hence the value get replaced during the first kick anyway. So it's safe to set it 0. + */ + psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0; + psHWRTData->bRenderStateNeedsReset = IMG_FALSE; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + { + RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl; + + psRTACtl->ui32RenderTargetIndex = 0; + psRTACtl->ui32ActiveRenderTargets = 0; + psRTACtl->sValidRenderTargets.ui32Addr = 0; + psRTACtl->sRTANumPartialRenders.ui32Addr = 0; + psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs; + + if (ui16MaxRTs > 1) + { + /* Allocate memory for the checks */ + PDUMPCOMMENT("Allocate memory for shadow render target cache"); + eError = DevmemFwAllocate( psDevInfo, + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED| + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwShadowRTCache", + &psRTArrayFwMemDesc ); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for render target array (%s)", + __func__, + ui16MaxRTs, PVRSRVGetErrorString(eError))); + goto FWAllocateRTArryError; + } + + psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc; + + eError = RGXSetFirmwareAddress( &psRTACtl->sValidRenderTargets, + psRTArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE ); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError); + + /* Allocate memory for the checks */ + PDUMPCOMMENT("Allocate memory for tracking renders accumulation"); + eError = DevmemFwAllocate(psDevInfo, + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED| + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwRendersAccumulation", + &psRendersAccArrayFwMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)", + __func__, + ui16MaxRTs, PVRSRVGetErrorString(eError))); + goto FWAllocateRTAccArryError; + } + + psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc; + + eError = RGXSetFirmwareAddress( &psRTACtl->sRTANumPartialRenders, + psRendersAccArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE ); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError); + } + } + +#if defined(PDUMP) + PDUMPCOMMENT("Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr); + DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS); +#endif + + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); + return PVRSRV_OK; + +FWAllocRTAccArryFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc); +FWAllocateRTAccArryError: + RGXUnsetFirmwareAddress(psRTArrayFwMemDesc); +FWAllocateRTArryFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psRTArrayFwMemDesc); +FWAllocateRTArryError: + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; + } + OSLockRelease(psDevInfo->hLockFreeList); + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataCpuMapError: + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataAllocateError: + *ppsKMHWRTDataSet = NULL; + OSFreeMem(psKMHWRTDataSet); + +AllocError: + return eError; +} + +/* Destroy HWRTDataSet */ +PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + PRGXFWIF_HWRTDATA psHWRTData; + IMG_UINT32 ui32Loop; + + PVR_ASSERT(psKMHWRTDataSet); + + psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice; + + eError = RGXSetFirmwareAddress(&psHWRTData, psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); + + /* Cleanup HWRTData */ + eError = RGXFWRequestHWRTDataCleanUp(psKMHWRTDataSet->psDeviceNode, psHWRTData); + + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + + if (psKMHWRTDataSet->psRTArrayFwMemDesc) + { + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); + } + + if (psKMHWRTDataSet->psRendersAccArrayFwMemDesc) + { + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRendersAccArrayFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRendersAccArrayFwMemDesc); + } + + /* decrease freelist refcount */ + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_remove_node(&psKMHWRTDataSet->sNodeHWRTData); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + /* Freeing the memory has to happen _after_ removing the HWRTData from the freelist + * otherwise we risk traversing the freelist to find a pointer from a freed data structure */ + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); + + OSFreeMem(psKMHWRTDataSet); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr, + IMG_DEV_VIRTADDR sFreeListStateDevVAddr, + PMR *psFreeListPMR, + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, + PMR *psFreeListStatePMR, + IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset, + RGX_FREELIST **ppsFreeList) +{ + PVRSRV_ERROR eError; + RGXFWIF_FREELIST *psFWFreeList; + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGX_FREELIST *psFreeList; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) + { + IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages; + + /* Round up number of FL pages to the next multiple of the OS page size */ + + ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", + __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); + + ui32InitFLPages = ui32NewInitFLPages; + ui32GrowFLPages = ui32NewGrowFLPages; + ui32MaxFLPages = ui32NewMaxFLPages; + } + + /* Allocate kernel freelist struct */ + psFreeList = OSAllocZMem(sizeof(*psFreeList)); + if (psFreeList == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate host data structure", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocHost; + } + + /* + * This FW FreeList context is only mapped into kernel for initialisation + * and reconstruction (at other times it is not mapped and only used by + * the FW. Therefore the GPU cache doesn't need coherency, and write-combine + * is suffice on the CPU side (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWFreeList), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, + "FwFreeList", + &psFWFreelistMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: DevmemAllocate for RGXFWIF_FREELIST failed", + __func__)); + goto FWFreeListAlloc; + } + + /* Initialise host data structures */ + psFreeList->psDevInfo = psDevInfo; + psFreeList->psFreeListPMR = psFreeListPMR; + psFreeList->uiFreeListPMROffset = uiFreeListPMROffset; + psFreeList->psFreeListStatePMR = psFreeListStatePMR; + psFreeList->uiFreeListStatePMROffset = uiFreeListStatePMROffset; + psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc; + eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + + /* psFreeList->ui32FreelistID set below with lock... */ + psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0); + psFreeList->ui32MaxFLPages = ui32MaxFLPages; + psFreeList->ui32InitFLPages = ui32InitFLPages; + psFreeList->ui32GrowFLPages = ui32GrowFLPages; + psFreeList->ui32CurrentFLPages = 0; + psFreeList->ui32ReadyFLPages = 0; + psFreeList->ui32GrowThreshold = ui32GrowParamThreshold; + psFreeList->ui64FreelistChecksum = 0; + psFreeList->ui32RefCount = 0; + psFreeList->bCheckFreelist = bCheckFreelist; + dllist_init(&psFreeList->sMemoryBlockHead); + dllist_init(&psFreeList->sMemoryBlockInitHead); +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_init(&psFreeList->sNodeHWRTDataHead); +#endif + psFreeList->ownerPid = OSGetCurrentClientProcessIDKM(); + + + /* Add to list of freelists */ + OSLockAcquire(psDevInfo->hLockFreeList); + psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++; + dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode); + OSLockRelease(psDevInfo->hLockFreeList); + + + /* Initialise FW data structure */ + eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap); + + { + const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); + + psFWFreeList->ui32MaxPages = ui32MaxFLPages; + psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; + psFWFreeList->ui32GrowPages = ui32GrowFLPages; + psFWFreeList->bUpdatePending = IMG_FALSE; + psFWFreeList->ui32UpdateNewPages = 0; + psFWFreeList->ui32UpdateNewReadyPages = 0; + psFWFreeList->sFreeListBaseDevVAddr = sFreeListBaseDevVAddr; +#if defined(PM_INTERACTIVE_MODE) + psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->ui64CurrentDevVAddr = (sFreeListBaseDevVAddr.uiAddr + + ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); +#endif + psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; + psFWFreeList->bGrowPending = IMG_FALSE; + psFWFreeList->ui32ReadyPages = ui32ReadyPages; + +#if defined(SUPPORT_SHADOW_FREELISTS) + /* Get the FW Memory Context address... */ + eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext, + RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData), + 0, RFW_FWADDR_NOREF_FLAG); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", + __func__)); + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + goto FWFreeListCpuMap; + } +#else + PVR_UNREFERENCED_PARAMETER(hMemCtxPrivData); +#endif + + /* + * Only the PM state buffer address is needed which contains the PM + * state including the freelist base address. + * + * Access to the physical PMR will be used to update the contents of the + * PM state buffer when PB grow occurs following OOM. + */ + psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; + psFWFreeList->sFreeListStateDevVAddr = sFreeListStateDevVAddr; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "Freelist [%p]: Created: Max pages 0x%08x, Init pages 0x%08x, FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current pages %u", + psFreeList, + ui32MaxFLPages, + ui32InitFLPages, + sFreeListBaseDevVAddr.uiAddr, + (sFreeListBaseDevVAddr.uiAddr + + ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1), + psFWFreeList->ui32CurrentPages - 1)); +#if defined(PDUMP) + PDUMPCOMMENT("Dump FW FreeList"); + DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS); + +#if defined(PM_INTERACTIVE_MODE) + /* + * Separate dump of the Freelist's number of Pages and stack pointer. + * This allows to easily modify the PB size in the out2.txt files. + */ + PDUMPCOMMENT("FreeList TotalPages"); + DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, + offsetof(RGXFWIF_FREELIST, ui32CurrentPages), + psFWFreeList->ui32CurrentPages, + PDUMP_FLAGS_CONTINUOUS); + PDUMPCOMMENT("FreeList StackPointer"); + DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, + offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), + psFWFreeList->ui32CurrentStackTop, + PDUMP_FLAGS_CONTINUOUS); +#endif +#endif + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + + /* Add initial PB block */ + eError = RGXGrowFreeList(psFreeList, + ui32InitFLPages, + &psFreeList->sMemoryBlockInitHead); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%s)", + __func__, + sFreeListBaseDevVAddr.uiAddr, + PVRSRVGetErrorString(eError))); + goto FWFreeListCpuMap; + } +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* Update Stats */ + PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/ + 0, + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); + +#endif + + /* return values */ + *ppsFreeList = psFreeList; + + return PVRSRV_OK; + + /* Error handling */ + +FWFreeListCpuMap: + /* Remove freelists from list */ + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_remove_node(&psFreeList->sNode); + OSLockRelease(psDevInfo->hLockFreeList); + RGXUnsetFirmwareAddress(psFWFreelistMemDesc); + +ErrorSetFwAddr: + DevmemFwUnmapAndFree(psDevInfo, psFWFreelistMemDesc); + +FWFreeListAlloc: + OSFreeMem(psFreeList); + +ErrorAllocHost: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + RGXDestroyFreeList +*/ +PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32RefCount; + + PVR_ASSERT(psFreeList); + + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + ui32RefCount = psFreeList->ui32RefCount; + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + if (ui32RefCount != 0) + { + /* Freelist still busy */ + return PVRSRV_ERROR_RETRY; + } + + /* Freelist is not in use => start firmware cleanup */ + eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo, + psFreeList->sFreeListFWDevVAddr); + if (eError != PVRSRV_OK) + { + /* Can happen if the firmware took too long to handle the cleanup request, + * or if SLC-flushes didn't went through (due to some GPU lockup) */ + return eError; + } + + /* Remove FreeList from linked list before we destroy it... */ + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + dllist_remove_node(&psFreeList->sNode); +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* Confirm all HWRTData nodes are freed before releasing freelist */ + PVR_ASSERT(dllist_is_empty(&psFreeList->sNodeHWRTDataHead)); +#endif + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + +#if defined(PM_INTERACTIVE_MODE) + if (psFreeList->bCheckFreelist) + { + RGXFWIF_FREELIST *psFWFreeList; + IMG_UINT64 ui32CurrentStackTop; + IMG_UINT64 ui64CheckSum; + + /* Get the current stack pointer for this free list */ + DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop; + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + + if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) + { + /* Do consistency tests (as the list is fully populated) */ + _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); + } + else + { + /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */ + _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum); + } + } +#endif + + /* Destroy FW structures */ + RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc); + DevmemFwUnmapAndFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc); + + /* Remove grow shrink blocks */ + while (!dllist_is_empty(&psFreeList->sMemoryBlockHead)) + { + eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + } + + /* Remove initial PB block */ + eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + + /* consistency checks */ + PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead)); + PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0); + + /* free Freelist */ + OSFreeMem(psFreeList); + + return eError; +} + + +/* + RGXCreateZSBuffer +*/ +PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + RGX_ZSBUFFER_DATA **ppsZSBuffer) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_PRBUFFER *psFWZSBuffer; + RGX_ZSBUFFER_DATA *psZSBuffer; + DEVMEM_MEMDESC *psFWZSBufferMemDesc; + IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE; + + /* Allocate host data structure */ + psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer)); + if (psZSBuffer == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate cleanup data structure for ZS-Buffer", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocCleanup; + } + + /* Populate Host data */ + psZSBuffer->psDevInfo = psDevInfo; + psZSBuffer->psReservation = psReservation; + psZSBuffer->psPMR = psPMR; + psZSBuffer->uiMapFlags = uiMapFlags; + psZSBuffer->ui32RefCount = 0; + psZSBuffer->bOnDemand = bOnDemand; + if (bOnDemand) + { + /* psZSBuffer->ui32ZSBufferID set below with lock... */ + psZSBuffer->psMapping = NULL; + + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; + dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); + OSLockRelease(psDevInfo->hLockZSBuffer); + } + + /* Allocate firmware memory for ZS-Buffer. */ + PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWZSBuffer), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, + "FwZSBuffer", + &psFWZSBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware ZS-Buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto ErrorAllocFWZSBuffer; + } + psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc; + + /* Temporarily map the firmware render context to the kernel. */ + eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc, + (void **)&psFWZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware ZS-Buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto ErrorAcquireFWZSBuffer; + } + + /* Populate FW ZS-Buffer data structure */ + psFWZSBuffer->bOnDemand = bOnDemand; + psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; + psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID; + + /* Get firmware address of ZS-Buffer. */ + eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + + /* Dump the ZS-Buffer and the memory content */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump firmware ZS-Buffer"); + DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS); +#endif + + /* Release address acquired above. */ + DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); + + + /* define return value */ + *ppsZSBuffer = psZSBuffer; + + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)", + psZSBuffer, + (bOnDemand) ? "On-Demand": "Up-front")); + + psZSBuffer->owner=OSGetCurrentClientProcessIDKM(); + + return PVRSRV_OK; + + /* error handling */ + +ErrorSetFwAddr: + DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); +ErrorAcquireFWZSBuffer: + DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc); + +ErrorAllocFWZSBuffer: + OSFreeMem(psZSBuffer); + +ErrorAllocCleanup: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + RGXDestroyZSBuffer +*/ +PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + PVR_ASSERT(psZSBuffer); + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + /* Request ZS Buffer cleanup */ + eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo, + psZSBuffer->sZSBufferFWDevVAddr); + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the firmware render context. */ + RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); + DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); + + /* Remove Deferred Allocation from list */ + if (psZSBuffer->bOnDemand) + { + OSLockAcquire(hLockZSBuffer); + PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); + dllist_remove_node(&psZSBuffer->sNode); + OSLockRelease(hLockZSBuffer); + } + + PVR_ASSERT(psZSBuffer->ui32RefCount == 0); + + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); + + /* Free ZS-Buffer host data structure */ + OSFreeMem(psZSBuffer); + + } + + return eError; +} + +PVRSRV_ERROR +RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + if (!psZSBuffer) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (!psZSBuffer->bOnDemand) + { + /* Only deferred allocations can be populated */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "ZS Buffer [%p, ID=0x%08x]: Physical backing requested", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + OSLockAcquire(hLockZSBuffer); + + if (psZSBuffer->ui32RefCount == 0) + { + if (psZSBuffer->bOnDemand) + { + IMG_HANDLE hDevmemHeap = (IMG_HANDLE)NULL; + + PVR_ASSERT(psZSBuffer->psMapping == NULL); + + /* Get Heap */ + eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); + PVR_ASSERT(psZSBuffer->psMapping == NULL); + if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) + { + OSLockRelease(hLockZSBuffer); + return PVRSRV_ERROR_INVALID_HEAP; + } + + eError = DevmemIntMapPMR(hDevmemHeap, + psZSBuffer->psReservation, + psZSBuffer->psPMR, + psZSBuffer->uiMapFlags, + &psZSBuffer->psMapping); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; + + } + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + } + } + + /* Increase refcount*/ + psZSBuffer->ui32RefCount++; + + OSLockRelease(hLockZSBuffer); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_POPULATION **ppsPopulation) +{ + RGX_POPULATION *psPopulation; + PVRSRV_ERROR eError; + + psZSBuffer->ui32NumReqByApp++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner); +#endif + + /* Do the backing */ + eError = RGXBackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + goto OnErrorBacking; + } + + /* Create the handle to the backing */ + psPopulation = OSAllocMem(sizeof(*psPopulation)); + if (psPopulation == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto OnErrorAlloc; + } + + psPopulation->psZSBuffer = psZSBuffer; + + /* return value */ + *ppsPopulation = psPopulation; + + return PVRSRV_OK; + +OnErrorAlloc: + RGXUnbackingZSBuffer(psZSBuffer); + +OnErrorBacking: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + if (!psZSBuffer) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_ASSERT(psZSBuffer->ui32RefCount); + + PVR_DPF((PVR_DBG_MESSAGE, + "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + OSLockAcquire(hLockZSBuffer); + + if (psZSBuffer->bOnDemand) + { + if (psZSBuffer->ui32RefCount == 1) + { + PVR_ASSERT(psZSBuffer->psMapping); + + eError = DevmemIntUnmapPMR(psZSBuffer->psMapping); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; + } + + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + } + } + + /* Decrease refcount*/ + psZSBuffer->ui32RefCount--; + + OSLockRelease(hLockZSBuffer); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation) +{ + PVRSRV_ERROR eError; + + if (!psPopulation) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer); + if (eError != PVRSRV_OK) + { + return eError; + } + + OSFreeMem(psPopulation); + + return PVRSRV_OK; +} + +static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) +{ + DLLIST_NODE *psNode, *psNext; + RGX_ZSBUFFER_DATA *psZSBuffer = NULL; + + OSLockAcquire(psDevInfo->hLockZSBuffer); + + dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext) + { + RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode); + + if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID) + { + psZSBuffer = psThisZSBuffer; + break; + } + } + + OSLockRelease(psDevInfo->hLockZSBuffer); + return psZSBuffer; +} + +void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID) +{ + IMG_BOOL bBackingDone = IMG_TRUE; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + RGX_ZSBUFFER_DATA *psZSBuffer; + RGXFWIF_KCCB_CMD sTACCBCmd; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + /* scan all deferred allocations */ + psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + + if (psZSBuffer == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", + ui32ZSBufferID)); + + return; + } + + /* Retrieve Server MMU context from devmem context so we can specify memory context for command. */ + if (psZSBuffer->psReservation != NULL) + { + DEVMEMINT_HEAP *psDevmemHeap; + DEVMEMINT_CTX *psDevmemCtx; + IMG_HANDLE hPrivData; + + if (DevmemServerGetHeapHandle(psZSBuffer->psReservation, (IMG_HANDLE *)&psDevmemHeap) == PVRSRV_OK && + DevmemServerGetContext(psDevmemHeap, &psDevmemCtx) == PVRSRV_OK && + DevmemServerGetPrivData(psDevmemCtx, &hPrivData) == PVRSRV_OK) + { + psServerMMUContext = hPrivData; + } + } + + /* Populate ZLS */ + eError = RGXBackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Populating ZS-Buffer failed with error %u (ID = 0x%08x)", + eError, ui32ZSBufferID)); + bBackingDone = IMG_FALSE; + } + + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + psServerMMUContext, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); + + psZSBuffer->ui32NumReqByFW++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner); +#endif +} + +void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID) +{ + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + RGX_ZSBUFFER_DATA *psZSBuffer; + RGXFWIF_KCCB_CMD sTACCBCmd; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + /* scan all deferred allocations */ + psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + + if (psZSBuffer == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", + ui32ZSBufferID)); + + return; + } + + /* Retrieve Server MMU context from devmem context so we can specify memory context for command. */ + if (psZSBuffer->psReservation != NULL) + { + DEVMEMINT_HEAP *psDevmemHeap; + DEVMEMINT_CTX *psDevmemCtx; + IMG_HANDLE hPrivData; + + if (DevmemServerGetHeapHandle(psZSBuffer->psReservation, (IMG_HANDLE *)&psDevmemHeap) == PVRSRV_OK && + DevmemServerGetContext(psDevmemHeap, &psDevmemCtx) == PVRSRV_OK && + DevmemServerGetPrivData(psDevmemCtx, &hPrivData) == PVRSRV_OK) + { + psServerMMUContext = hPrivData; + } + } + + /* Unpopulate ZLS */ + eError = RGXUnbackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "UnPopulating ZS-Buffer failed with error %u (ID = 0x%08x)", + eError, ui32ZSBufferID)); + PVR_ASSERT(IMG_FALSE); + } + + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + psServerMMUContext, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); +} + +static +PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_TA_DATA *psTAData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware TA context suspend state"); + + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_TACTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTAContextState", + &psTAData->psContextStateMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_tacontextsuspendalloc; + } + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_GEOM_PPP, + RGXFWIF_DM_GEOM, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + psTAData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &psTAData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init TA fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_tacommoncontext; + } + + /* + * Dump the FW 3D context suspend state buffer + */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump the TA context suspend state buffer"); + DevmemPDumpLoadMem(psTAData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_TACTX_STATE), + PDUMP_FLAGS_CONTINUOUS); +#endif + + psTAData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_tacommoncontext: + DevmemFree(psTAData->psContextStateMemDesc); +fail_tacontextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +static +PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_3D_DATA *ps3DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_UINT uiNumISPStoreRegs = RGXFWIF_IPP_RESUME_REG_COUNT; /* default 1 register for IPP_resume */ + IMG_UINT ui3DRegISPStateStoreSize = 0; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state"); + + uiNumISPStoreRegs += (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU) * + RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_PER_SPU) * + RGXFWIF_PIPE_COUNT_PER_ISP); + + + if (uiNumISPStoreRegs > (RGXFWIF_ISP_PIPE_COUNT_MAX + RGXFWIF_IPP_RESUME_REG_COUNT)) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + /* Size of the CS buffer */ + /* Calculate the size of the 3DCTX ISP state */ + ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + + (uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0])); + + eError = DevmemFwAllocate(psDevInfo, + ui3DRegISPStateStoreSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "Fw3DContextState", + &ps3DData->psContextStateMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_3dcontextsuspendalloc; + } + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_3D, + RGXFWIF_DM_3D, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + ps3DData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &ps3DData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init 3D fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_3dcommoncontext; + } + + /* + * Dump the FW 3D context suspend state buffer + */ + PDUMPCOMMENT("Dump the 3D context suspend state buffer"); + DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_3DCTX_STATE), + PDUMP_FLAGS_CONTINUOUS); + + ps3DData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_3dcommoncontext: + DevmemFree(ps3DData->psContextStateMemDesc); +fail_3dcontextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + + +/* + * PVRSRVRGXCreateRenderContextKM + */ +PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_RENDER_CONTEXT *psRenderContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_COMMON_CONTEXT_INFO sInfo; + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + + *ppsRenderContext = NULL; + + if (ui32StaticRenderContextStateSize > RGXFWIF_STATIC_RENDERCONTEXT_SIZE) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psRenderContext = OSAllocZMem(sizeof(*psRenderContext)); + if (psRenderContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSLockCreate(&psRenderContext->hLock); + + if (eError != PVRSRV_OK) + { + goto fail_lock; + } + + psRenderContext->psDeviceNode = psDeviceNode; + + /* + Create the FW render context, this has the TA and 3D FW common + contexts embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWRENDERCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwRenderContext", + &psRenderContext->psFWRenderContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwrendercontext; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); +#endif + + eError = _Create3DContext(psConnection, + psDeviceNode, + hMemCtxPrivData, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), + psFWMemContextMemDesc, + ui32Priority, + ui32Max3DDeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->s3DData, + U32toU8_Unpack3(ui32PackedCCBSizeU8888), + U32toU8_Unpack4(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_3dcontext; + } + + eError = _CreateTAContext(psConnection, + psDeviceNode, + hMemCtxPrivData, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), + psFWMemContextMemDesc, + ui32Priority, + ui32MaxTADeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->sTAData, + U32toU8_Unpack1(ui32PackedCCBSizeU8888), + U32toU8_Unpack2(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_tacontext; + } + + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + goto fail_acquire_cpu_mapping; + } + + OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize); + DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + +#if defined(SUPPORT_BUFFER_SYNC) + psRenderContext->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-ta3d"); + if (IS_ERR(psRenderContext->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psRenderContext->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence); + SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence); + SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + } + + *ppsRenderContext = psRenderContext; + return PVRSRV_OK; + +#if defined(SUPPORT_BUFFER_SYNC) +fail_buffer_sync_context_create: +#endif +fail_acquire_cpu_mapping: + _DestroyTAContext(&psRenderContext->sTAData, + psDeviceNode); +fail_tacontext: + _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); +fail_3dcontext: + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); +fail_fwrendercontext: + OSLockDestroy(psRenderContext->hLock); +fail_lock: + OSFreeMem(psRenderContext); + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + * PVRSRVRGXDestroyRenderContextKM + */ +PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; +#endif + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_remove_node(&(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext); +#endif + + /* Cleanup the TA if we haven't already */ + if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0) + { + eError = _DestroyTAContext(&psRenderContext->sTAData, + psRenderContext->psDeviceNode); + if (eError == PVRSRV_OK) + { + psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE; + } + else + { + goto e0; + } + } + + /* Cleanup the 3D if we haven't already */ + if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0) + { + eError = _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); + if (eError == PVRSRV_OK) + { + psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE; + } + else + { + goto e0; + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto e0; + } + + ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) + { + + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); + + eError = PVRSRV_ERROR_RETRY; + goto e0; + } +#endif + + /* + Only if both TA and 3D contexts have been cleaned up can we + free the shared resources + */ + if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE)) + { + /* Free the firmware render context */ + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); + + SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence); + SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence); + SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); +#endif + OSLockDestroy(psRenderContext->hLock); + + OSFreeMem(psRenderContext); + } + + return PVRSRV_OK; + +e0: + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + return eError; +} + + +#if (ENABLE_TA3D_UFO_DUMP == 1) +static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount, + IMG_UINT32 ui32ClientTAUpdateCount, + IMG_UINT32 ui32Client3DFenceCount, + IMG_UINT32 ui32Client3DUpdateCount, + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress, + IMG_UINT32 *paui32ClientTAFenceValue, + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress, + IMG_UINT32 *paui32ClientTAUpdateValue, + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress, + IMG_UINT32 *paui32Client3DFenceValue, + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress, + IMG_UINT32 *paui32Client3DUpdateValue) +{ + IMG_UINT32 i; + + PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~", + __func__)); + + /* Dump Fence syncs, Update syncs and PR Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", + __func__, ui32ClientTAFenceCount)); + for (i = 0; i < ui32ClientTAFenceCount; i++) + { + if (BITMASK_HAS(pauiClientTAFenceUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32ClientTAFenceCount, + (void *) pauiClientTAFenceUFOAddress, + pauiClientTAFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, i + 1, ui32ClientTAFenceCount, + (void *) pauiClientTAFenceUFOAddress, + pauiClientTAFenceUFOAddress->ui32Addr, + *paui32ClientTAFenceValue, + *paui32ClientTAFenceValue)); + paui32ClientTAFenceValue++; + } + pauiClientTAFenceUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", + __func__, ui32ClientTAUpdateCount)); + for (i = 0; i < ui32ClientTAUpdateCount; i++) + { + if (BITMASK_HAS(pauiClientTAUpdateUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32ClientTAUpdateCount, + (void *) pauiClientTAUpdateUFOAddress, + pauiClientTAUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", + __func__, i + 1, ui32ClientTAUpdateCount, + (void *) pauiClientTAUpdateUFOAddress, + pauiClientTAUpdateUFOAddress->ui32Addr, + *paui32ClientTAUpdateValue, + *paui32ClientTAUpdateValue)); + paui32ClientTAUpdateValue++; + } + pauiClientTAUpdateUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", + __func__, ui32Client3DFenceCount)); + for (i = 0; i < ui32Client3DFenceCount; i++) + { + if (BITMASK_HAS(pauiClient3DFenceUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32Client3DFenceCount, + (void *) pauiClient3DFenceUFOAddress, + pauiClient3DFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, i + 1, ui32Client3DFenceCount, + (void *) pauiClient3DFenceUFOAddress, + pauiClient3DFenceUFOAddress->ui32Addr, + *paui32Client3DFenceValue, + *paui32Client3DFenceValue)); + paui32Client3DFenceValue++; + } + pauiClient3DFenceUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", + __func__, ui32Client3DUpdateCount)); + for (i = 0; i < ui32Client3DUpdateCount; i++) + { + if (BITMASK_HAS(pauiClient3DUpdateUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32Client3DUpdateCount, + (void *) pauiClient3DUpdateUFOAddress, + pauiClient3DUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", + __func__, i + 1, ui32Client3DUpdateCount, + (void *) pauiClient3DUpdateUFOAddress, + pauiClient3DUpdateUFOAddress->ui32Addr, + *paui32Client3DUpdateValue, + *paui32Client3DUpdateValue)); + paui32Client3DUpdateValue++; + } + pauiClient3DUpdateUFOAddress++; + } +} +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + +/* + * PVRSRVRGXKickTA3DKM + */ +PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientTAFenceCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, + IMG_UINT32 *paui32ClientTAFenceSyncOffset, + IMG_UINT32 *paui32ClientTAFenceValue, + IMG_UINT32 ui32ClientTAUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock, + IMG_UINT32 *paui32ClientTAUpdateSyncOffset, + IMG_UINT32 *paui32ClientTAUpdateValue, + IMG_UINT32 ui32Client3DUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, + IMG_UINT32 *paui32Client3DUpdateSyncOffset, + IMG_UINT32 *paui32Client3DUpdateValue, + SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock, + IMG_UINT32 ui32PRFenceSyncOffset, + IMG_UINT32 ui32PRFenceValue, + PVRSRV_FENCE iCheckTAFence, + PVRSRV_TIMELINE iUpdateTATimeline, + PVRSRV_FENCE *piUpdateTAFence, + IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iCheck3DFence, + PVRSRV_TIMELINE iUpdate3DTimeline, + PVRSRV_FENCE *piUpdate3DFence, + IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32TACmdSize, + IMG_PBYTE pui8TADMCmd, + IMG_UINT32 ui323DPRCmdSize, + IMG_PBYTE pui83DPRDMCmd, + IMG_UINT32 ui323DCmdSize, + IMG_PBYTE pui83DDMCmd, + IMG_UINT32 ui32ExtJobRef, + IMG_BOOL bKickTA, + IMG_BOOL bKickPR, + IMG_BOOL bKick3D, + IMG_BOOL bAbort, + IMG_UINT32 ui32PDumpFlags, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32RenderTargetSize, + IMG_UINT32 ui32NumberOfDrawCalls, + IMG_UINT32 ui32NumberOfIndices, + IMG_UINT32 ui32NumberOfMRTs, + IMG_UINT64 ui64DeadlineInus) +{ + /* per-context helper structures */ + RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData; + RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData; + + IMG_UINT64 ui64FBSCEntryMask; + + IMG_UINT32 ui32TACmdCount=0; + IMG_UINT32 ui323DCmdCount=0; + IMG_UINT32 ui32TACmdOffset=0; + IMG_UINT32 ui323DCmdOffset=0; + RGXFWIF_UFO sPRUFO; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2; + + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + IMG_UINT32 ui32ClientPRUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; + IMG_UINT32 *paui32ClientPRUpdateValue = NULL; + + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; + + IMG_UINT64 uiCheckTAFenceUID = 0; + IMG_UINT64 uiCheck3DFenceUID = 0; + IMG_UINT64 uiUpdateTAFenceUID = 0; + IMG_UINT64 uiUpdate3DFenceUID = 0; + + IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd; + + IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0; + + IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE; + + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; + IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D); + + PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; + + IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; + IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0; + IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; + + /* + * Count of the number of TA and 3D update values (may differ from number of + * TA and 3D updates later, as sync checkpoints do not need to specify a value) + */ + IMG_UINT32 ui32ClientPRUpdateValueCount = 0; + IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount; + IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount; + PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL; /*!< TA fence checkpoints */ + PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL; /*!< 3D fence checkpoints */ + IMG_UINT32 ui32FenceTASyncCheckpointCount = 0; + IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL; /*!< TA update checkpoint (output) */ + PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL; /*!< 3D update checkpoint (output) */ + PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL; + PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; + void *pvTAUpdateFenceFinaliseData = NULL; + void *pv3DUpdateFenceFinaliseData = NULL; + + RGX_SYNC_DATA sTASyncData = {NULL}; /*!< Contains internal update syncs for TA */ + RGX_SYNC_DATA s3DSyncData = {NULL}; /*!< Contains internal update syncs for 3D */ + + IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE; +#if defined(SUPPORT_VALIDATION) + PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE; + PSYNC_CHECKPOINT psDummySyncCheckpoint; +#endif + +#if defined(SUPPORT_BUFFER_SYNC) + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA = {0}; + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D = {0}; + IMG_UINT32 ui32TACommandOffset = 0; + IMG_UINT32 ui323DCommandOffset = 0; + IMG_UINT32 ui32TACmdHeaderOffset = 0; + IMG_UINT32 ui323DCmdHeaderOffset = 0; + IMG_UINT32 ui323DFullRenderCommandOffset = 0; + IMG_UINT32 ui32TACmdOffsetWrapCheck = 0; + IMG_UINT32 ui323DCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + + IMG_UINT32 ui32TAFenceCount, ui323DFenceCount; + IMG_UINT32 ui32TAUpdateCount, ui323DUpdateCount; + IMG_UINT32 ui32PRUpdateCount; + + IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM(); + + IMG_UINT32 ui32Client3DFenceCount = 0; + + /* Ensure we haven't been given a null ptr to + * TA fence values if we have been told we + * have TA sync prim fences + */ + if (ui32ClientTAFenceCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientTAFenceValue != NULL, + "paui32ClientTAFenceValue NULL but " + "ui32ClientTAFenceCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Ensure we haven't been given a null ptr to + * TA update values if we have been told we + * have TA updates + */ + if (ui32ClientTAUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientTAUpdateValue != NULL, + "paui32ClientTAUpdateValue NULL but " + "ui32ClientTAUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Ensure we haven't been given a null ptr to + * 3D update values if we have been told we + * have 3D updates + */ + if (ui32Client3DUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32Client3DUpdateValue != NULL, + "paui32Client3DUpdateValue NULL but " + "ui32Client3DUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Write FW addresses into CMD SHARED BLOCKs */ + { + CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd; + CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd; + CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd; + + if (psKMHWRTDataSet == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "KMHWRTDataSet is a null-pointer")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Write FW address for TA CMD + */ + if (psGeomCmdShared != NULL) + { + psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + + /* Write FW address for 3D CMD + */ + if (ps3DCmdShared != NULL) + { + ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + + /* Write FW address for PR3D CMD + */ + if (psPR3DCmdShared != NULL) + { + psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + } + + if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, " + "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d", + __func__, + ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount)); + /* Sanity check we have a PR kick if there are client fences */ + if (unlikely(!bKickPR && ui32Client3DFenceCount != 0)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence passed without a PR kick", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure the string is null-terminated (Required for safety) */ + szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + + OSLockAcquire(psRenderContext->hLock); + + ui32TAFenceCount = ui32ClientTAFenceCount; + ui323DFenceCount = ui32Client3DFenceCount; + ui32TAUpdateCount = ui32ClientTAUpdateCount; + ui323DUpdateCount = ui32Client3DUpdateCount; + ui32PRUpdateCount = ui32ClientPRUpdateCount; + +#if defined(SUPPORT_BUFFER_SYNC) + if (ui32SyncPMRCount) + { + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling" + " pvr_buffer_sync_resolve_and_create_fences", __func__)); + + err = pvr_buffer_sync_resolve_and_create_fences( + psRenderContext->psBufferSyncContext, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData + ); + + if (unlikely(err)) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: " + "pvr_buffer_sync_resolve_and_create_fences failed (%d)", + __func__, eError)); + } + OSLockRelease(psRenderContext->hLock); + return eError; + } + +#if !defined(SUPPORT_STRIP_RENDERING) + if (bKickTA) + { + ui32TAFenceCount += ui32BufferFenceSyncCheckpointCount; + } + else + { + ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; + } +#else /* !defined(SUPPORT_STRIP_RENDERING) */ + ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; + + PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly); +#endif /* !defined(SUPPORT_STRIP_RENDERING) */ + + if (psBufferUpdateSyncCheckpoint != NULL) + { + if (bKick3D) + { + ui323DUpdateCount++; + } + else + { + ui32PRUpdateCount++; + } + } + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +#if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 +#error "Invalid value for UPDATE_FENCE_CHECKPOINT_COUNT. Must be either 1 or 2." +#endif /* !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 */ + + if (iCheckTAFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA]" + " (iCheckFence=%d)," + " psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheckTAFence, + (void *) psRenderContext->psDeviceNode->hSyncCheckpointContext)); + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence( + psRenderContext->psDeviceNode->hSyncCheckpointContext, + iCheckTAFence, + &ui32FenceTASyncCheckpointCount, + &apsFenceTASyncCheckpoints, + &uiCheckTAFenceUID, + ui32PDumpFlags + ); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", + __func__, eError)); + goto fail_resolve_input_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " + "checkpoints (apsFenceSyncCheckpoints=<%p>)", + __func__, iCheckTAFence, ui32FenceTASyncCheckpointCount, + (void *) apsFenceTASyncCheckpoints)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + if (apsFenceTASyncCheckpoints) + { + _DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints, + ui32FenceTASyncCheckpointCount); + } +#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ + } + + if (iCheck3DFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D]" + " (iCheckFence=%d), " + "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheck3DFence, + (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence( + psRenderContext->psDeviceNode->hSyncCheckpointContext, + iCheck3DFence, + &ui32Fence3DSyncCheckpointCount, + &apsFence3DSyncCheckpoints, + &uiCheck3DFenceUID, + ui32PDumpFlags + ); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", + __func__, eError)); + goto fail_resolve_input_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " + "checkpoints (apsFenceSyncCheckpoints=<%p>)", + __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount, + (void*)apsFence3DSyncCheckpoints)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + if (apsFence3DSyncCheckpoints) + { + _DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints, + ui32Fence3DSyncCheckpointCount); + } +#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ + } + + if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || + iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) + { + IMG_UINT32 i; + + if (bKickTA) + { + ui32TAFenceCount += ui32FenceTASyncCheckpointCount; + + for (i = 0; i < ui32Fence3DSyncCheckpointCount; i++) + { + if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != + uiCurrentProcess) + { + ui32TAFenceCount++; + } + } + } + + if (bKick3D) + { + ui323DFenceCount += ui32Fence3DSyncCheckpointCount; + } + + ui32TAUpdateCount += iUpdateTATimeline != PVRSRV_NO_TIMELINE ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + ui323DUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + ui32PRUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE && !bKick3D ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + } + +#if defined(SUPPORT_VALIDATION) + /* Check if TestingSLR is adding an extra sync checkpoint to the + * 3D fence check (which we won't signal) + */ + if ((psDevInfo->ui32TestSLRInterval > 0) && + (--psDevInfo->ui32TestSLRCount == 0)) + { + bTestSLRAdd3DCheck = IMG_TRUE; + psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; + } + + if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)) + { + if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint " + "to 3D fence but no update 3D timeline provided", __func__)); + } + else + { + SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext, + iUpdate3DTimeline, + hTestSLRTmpFence, + "TestSLRCheck", + &psDummySyncCheckpoint); + PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence " + "checkpoints (psDummySyncCheckpoint=<%p>)", + __func__, (void*)psDummySyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, + 1, + &psDummySyncCheckpoint); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui323DFenceCount++; + } + } +#endif /* defined(SUPPORT_VALIDATION) */ + + /* + * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, + * in other words, take the value and set it to zero afterwards. + * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts + * as it must be ready at the time of context activation. + * + * NOTE: We use sTAData to get the ServerCommonContext giving us the ServerMMUCtx, + * should we use s3DData in some cases? + * Under assumption that sTAData and s3DData share the same psServerCommonContext, + * the answer is NO. + * + * The ui64FBSCEntryMask filled by the following call gets cleared + * after the first KICK command and is ignored in the context of partial renders. + */ + eError = RGXExtractFBSCEntryMaskFromMMUContext( + psRenderContext->psDeviceNode, + FWCommonContextGetServerMMUCtx(psRenderContext->sTAData.psServerCommonContext), + &ui64FBSCEntryMask + ); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError)); + goto fail_tacmdinvalfbsc; + } + + if (bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", + __func__, ui32TAFenceCount, ui32TAUpdateCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + ui64FBSCEntryMask, + ui32TAFenceCount, + ui32TAUpdateCount, + ui32TACmdSize, + pasTACmdHelperData + ); + + /* Clear the mask as we don't want to invalidate the FBSC multiple times + * with the same value of ui64FBSCEntryMask. + */ + ui64FBSCEntryMask = 0; + } + + if (bKickPR) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32Client3DFenceCount=%d", __func__, + ui323DFenceCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + 0, /* empty ui64FBSCEntryMask it is assumed that PRs should + * not invalidate FBSC */ + ui323DFenceCount, + 0, + sizeof(sPRUFO), + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKickPR && !bUseCombined3DAnd3DPR) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32PRUpdateCount=%d", __func__, + ui32PRUpdateCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + 0, /* empty ui64FBSCEntryMask it is assumed that PRs should + * not invalidate FBSC */ + 0, + ui32PRUpdateCount, + /* if the client has not provided a 3DPR command, the regular 3D + * command should be used instead */ + pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKick3D || bAbort) + { + if (!bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32Client3DFenceCount=%d", __func__, + ui323DFenceCount)); + } + + RGXCmdHelperInitCmdCCB_CommandSize( + ui64FBSCEntryMask, /* equals: [a] 0 if 3D is preceded by TA + * [b] value from the MMU ctx otherwise */ + bKickTA ? 0 : ui323DFenceCount, + ui323DUpdateCount, + ui323DCmdSize, + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKickTA) + { + ui32TACmdSizeTmp = RGXCmdHelperGetCommandSize(1, pasTACmdHelperData); + + eError = RGXCheckSpaceCCB( + FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext), + ui32TACmdSizeTmp + ); + if (eError != PVRSRV_OK) + { + goto err_not_enough_space; + } + } + + if (ui323DCmdCount > 0) + { + ui323DCmdSizeTmp = RGXCmdHelperGetCommandSize(ui323DCmdCount, pas3DCmdHelperData); + + eError = RGXCheckSpaceCCB( + FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext), + ui323DCmdSizeTmp + ); + if (eError != PVRSRV_OK) + { + goto err_not_enough_space; + } + } + + /* need to reset the counter here */ + + ui323DCmdCount = 0; + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", + __func__, ui32ClientTAFenceCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence, + ui32ClientTAFenceCount, + apsClientTAFenceSyncPrimBlock, + paui32ClientTAFenceSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_ta_fence; + } + + if (ui32ClientTAFenceCount) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientTAFenceUFOAddress=<%p> ", + __func__, (void*)pauiClientTAFenceUFOAddress)); + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", + __func__, ui32ClientTAUpdateCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate, + ui32ClientTAUpdateCount, + apsClientTAUpdateSyncPrimBlock, + paui32ClientTAUpdateSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_ta_update; + } + + if (ui32ClientTAUpdateCount) + { + pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientTAUpdateUFOAddress=<%p> ", + __func__, (void*)pauiClientTAUpdateUFOAddress)); + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", + __func__, ui32Client3DFenceCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence, + ui32Client3DFenceCount, + NULL, + NULL); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_3d_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", + __func__, ui32Client3DUpdateCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate, + ui32Client3DUpdateCount, + apsClient3DUpdateSyncPrimBlock, + paui32Client3DUpdateSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_3d_update; + } + + if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D)) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", + __func__, (void*)pauiClient3DUpdateUFOAddress)); + + eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, + ui32PRFenceSyncOffset, + &uiPRFenceUFOAddress); + + if (unlikely(eError != PVRSRV_OK)) + { + goto err_pr_fence_address; + } + +#if (ENABLE_TA3D_UFO_DUMP == 1) + DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + ui32Client3DUpdateCount, + pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, + pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, + pauiClient3DFenceUFOAddress, NULL, + pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) +#if !defined(SUPPORT_STRIP_RENDERING) + /* Append buffer sync fences to TA fences */ + if (ui32BufferFenceSyncCheckpointCount > 0 && bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d buffer sync checkpoints to TA Fence " + "(&psRenderContext->sSyncAddrListTAFence=<%p>, " + "pauiClientTAFenceUFOAddress=<%p>)...", + __func__, + ui32BufferFenceSyncCheckpointCount, + (void*)&psRenderContext->sSyncAddrListTAFence , + (void*)pauiClientTAFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount; + } + else +#endif /* !defined(SUPPORT_STRIP_RENDERING) */ + /* Append buffer sync fences to 3D fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d buffer sync checkpoints to 3D Fence " + "(&psRenderContext->sSyncAddrList3DFence=<%p>, " + "pauiClient3DFenceUFOAddress=<%p>)...", + __func__, + ui32BufferFenceSyncCheckpointCount, + (void*)&psRenderContext->sSyncAddrList3DFence, + (void*)pauiClient3DFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + /* If we have a 3D kick append update to the 3D updates else append to the PR update */ + if (bKick3D) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 buffer sync checkpoint<%p> to 3D Update" + " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," + " pauiClient3DUpdateUFOAddress=<%p>)...", + __func__, + (void*)psBufferUpdateSyncCheckpoint, + (void*)&psRenderContext->sSyncAddrList3DUpdate, + (void*)pauiClient3DUpdateUFOAddress)); + /* Append buffer sync update to 3D updates */ + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiClient3DUpdateUFOAddress) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32Client3DUpdateCount++; + } + else + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 buffer sync checkpoint<%p> to PR Update" + " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," + " pauiClientPRUpdateUFOAddress=<%p>)...", + __func__, + (void*)psBufferUpdateSyncCheckpoint, + (void*)&psRenderContext->sSyncAddrList3DUpdate, + (void*)pauiClientPRUpdateUFOAddress)); + /* Attach update to the 3D (used for PR) Updates */ + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiClientPRUpdateUFOAddress) + { + pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32ClientPRUpdateCount++; + } + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: (after buffer_sync) ui32ClientTAFenceCount=%d, " + "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " + "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", + __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount, + ui32ClientPRUpdateCount)); + +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Buffer sync not supported but got %u buffers", + __func__, ui32SyncPMRCount)); + OSLockRelease(psRenderContext->hLock); + return PVRSRV_ERROR_INVALID_PARAMS; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* + * The hardware requires a PR to be submitted if there is a TA (otherwise + * it can wedge if we run out of PB space with no PR to run) + * + * If we only have a TA, attach native checks to the TA and updates to the PR + * If we have a TA and 3D, attach checks to TA, updates to 3D + * If we only have a 3D, attach checks and updates to the 3D + * + * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in + * addition to the update fence FD (if supplied) + * + * Currently, the client driver never kicks only the 3D, so we only support + * that for the time being. + */ + if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || + iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) + { + PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", __func__, iCheckTAFence, iUpdateTATimeline)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d", __func__, iCheck3DFence, iUpdate3DTimeline)); + + { + /* Create the output fence for TA (if required) */ + if (iUpdateTATimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence[TA] (iUpdateFence=%d, iUpdateTimeline=%d, psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, \ + iUpdateTAFence, iUpdateTATimeline, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, + szFenceNameTA, + iUpdateTATimeline, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateTAFence, + &uiUpdateTAFenceUID, + &pvTAUpdateFenceFinaliseData, + &psUpdateTASyncCheckpoint, + (void*)&psTAFenceTimelineUpdateSync, + &ui32TAFenceTimelineUpdateValue, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[TA] failed (%d)", __func__, eError)); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence[TA] (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=0x%x)", __func__, \ + iUpdateTAFence, (void*)psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue)); + + /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ + pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", __func__, \ + pauiClientTAIntUpdateUFOAddress->ui32Addr)); + } + + /* Append the sync prim update for the TA timeline (if required) */ + if (psTAFenceTimelineUpdateSync) + { + sTASyncData.ui32ClientUpdateCount = ui32ClientTAUpdateCount; + sTASyncData.ui32ClientUpdateValueCount = ui32ClientTAUpdateValueCount; + sTASyncData.ui32ClientPRUpdateValueCount = (bKick3D) ? 0 : ui32ClientPRUpdateValueCount; + sTASyncData.paui32ClientUpdateValue = paui32ClientTAUpdateValue; + + eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue, + &psRenderContext->sSyncAddrListTAUpdate, + (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate, + psTAFenceTimelineUpdateSync, + &sTASyncData, + bKick3D); + if (unlikely(eError != PVRSRV_OK)) + { + goto fail_alloc_update_values_mem_TA; + } + + paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue; + ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount; + pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress; + ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount; + } + + /* Create the output fence for 3D (if required) */ + if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence[3D] (iUpdateFence=%d, iUpdateTimeline=%d, psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, \ + iUpdate3DFence, iUpdate3DTimeline, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, + szFenceName3D, + iUpdate3DTimeline, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + &iUpdate3DFence, + &uiUpdate3DFenceUID, + &pv3DUpdateFenceFinaliseData, + &psUpdate3DSyncCheckpoint, + (void*)&ps3DFenceTimelineUpdateSync, + &ui323DFenceTimelineUpdateValue, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[3D] failed (%d)", __func__, eError)); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence[3D] (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=0x%x)", __func__, \ + iUpdate3DFence, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); + + /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ + pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", __func__, \ + pauiClient3DIntUpdateUFOAddress->ui32Addr)); + } + + /* Append the sync prim update for the 3D timeline (if required) */ + if (ps3DFenceTimelineUpdateSync) + { + s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount; + s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount; + s3DSyncData.ui32ClientPRUpdateValueCount = ui32ClientPRUpdateValueCount; + s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue; + + eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue, + &psRenderContext->sSyncAddrList3DUpdate, + &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */ + ps3DFenceTimelineUpdateSync, + &s3DSyncData, + bKick3D); + if (unlikely(eError != PVRSRV_OK)) + { + goto fail_alloc_update_values_mem_3D; + } + + paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue; + ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount; + pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress; + ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount; + + if (!bKick3D) + { + paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue; + ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount; + pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress; + ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount; + } + } + + /* + * The hardware requires a PR to be submitted if there is a TA OOM. + * If we only have a TA, attach native checks and updates to the TA + * and 3D updates to the PR. + * If we have a TA and 3D, attach the native TA checks and updates + * to the TA and similarly for the 3D. + * Note that 'updates' includes the cleanup syncs for 'check' fence + * FDs, in addition to the update fence FD (if supplied). + * Currently, the client driver never kicks only the 3D, so we don't + * support that for the time being. + */ + + { + if (bKickTA) + { + /* Attach checks and updates to TA */ + + /* Checks (from input fence) */ + if (ui32FenceTASyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", __func__, \ + ui32FenceTASyncCheckpointCount, (void*)apsFenceTASyncCheckpoints)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, + ui32FenceTASyncCheckpointCount, + apsFenceTASyncCheckpoints); + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount was %d, now %d}", __func__, \ + ui32ClientTAFenceCount, ui32ClientTAFenceCount+ui32FenceTASyncCheckpointCount)); + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount now %d}", __func__, ui32ClientTAFenceCount)); + + if (psUpdateTASyncCheckpoint) + { + /* Update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to TA Update...", __func__, \ + (void*)psUpdateTASyncCheckpoint, SyncCheckpointGetId(psUpdateTASyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate, + 1, + &psUpdateTASyncCheckpoint); + if (!pauiClientTAUpdateUFOAddress) + { + pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; + } + ui32ClientTAUpdateCount++; + } + + if (!bKick3D && psUpdate3DSyncCheckpoint) + { + /* Attach update to the 3D (used for PR) Updates */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...", __func__, \ + (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psUpdate3DSyncCheckpoint); + if (!pauiClientPRUpdateUFOAddress) + { + pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32ClientPRUpdateCount++; + } + } + + if (bKick3D) + { + /* Attach checks and updates to the 3D */ + + /* Checks (from input fence) */ + if (ui32Fence3DSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to 3D Fence...", __func__, ui32Fence3DSyncCheckpointCount)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, + ui32Fence3DSyncCheckpointCount, + apsFence3DSyncCheckpoints); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32Client3DFenceCount was %d, now %d}", __func__, \ + ui32Client3DFenceCount, ui32Client3DFenceCount+ui32Fence3DSyncCheckpointCount)); + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32Client3DFenceCount was %d}", __func__, ui32Client3DFenceCount)); + + if (psUpdate3DSyncCheckpoint) + { + /* Update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", __func__, \ + (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psUpdate3DSyncCheckpoint); + if (!pauiClient3DUpdateUFOAddress) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32Client3DUpdateCount++; + } + } + + /* + * Relocate sync check points from the 3D fence that are + * external to the current process, to the TA fence. + * This avoids a sync lockup when dependent renders are + * submitted out-of-order and a PR must be scheduled. + */ + if (bKickTA) + { + /* Search for external timeline dependencies */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Checking 3D fence for external sync points (%d)...", + __func__, ui32Fence3DSyncCheckpointCount)); + + for (i=0; i (ID=%d) to TA Fence...", __func__, \ + (void*)apsFence3DSyncCheckpoints[i], SyncCheckpointGetId(apsFence3DSyncCheckpoints[i]))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, + 1, + &apsFence3DSyncCheckpoints[i]); + + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32ClientTAFenceCount was %d, now %d}", + __func__, + ui32ClientTAFenceCount, + ui32ClientTAFenceCount + 1)); + + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + + ui32ClientTAFenceCount++; + } + } + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount, ui32ClientPRUpdateCount)); + } + } + + if (ui32ClientTAFenceCount) + { + PVR_ASSERT(pauiClientTAFenceUFOAddress); + if (!bTAFenceOnSyncCheckpointsOnly) + { + PVR_ASSERT(paui32ClientTAFenceValue); + } + } + if (ui32ClientTAUpdateCount) + { + PVR_ASSERT(pauiClientTAUpdateUFOAddress); + if (ui32ClientTAUpdateValueCount>0) + { + PVR_ASSERT(paui32ClientTAUpdateValue); + } + } + if (ui32Client3DFenceCount) + { + PVR_ASSERT(pauiClient3DFenceUFOAddress); + PVR_ASSERT(b3DFenceOnSyncCheckpointsOnly); + } + if (ui32Client3DUpdateCount) + { + PVR_ASSERT(pauiClient3DUpdateUFOAddress); + if (ui32Client3DUpdateValueCount>0) + { + PVR_ASSERT(paui32Client3DUpdateValue); + } + } + if (ui32ClientPRUpdateCount) + { + PVR_ASSERT(pauiClientPRUpdateUFOAddress); + if (ui32ClientPRUpdateValueCount>0) + { + PVR_ASSERT(paui32ClientPRUpdateValue); + } + } + + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ", + __func__, + ui32ClientTAFenceCount, + (void*)paui32ClientTAFenceValue)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ", + __func__, + ui32ClientTAUpdateCount, + (void*)pauiClientTAUpdateUFOAddress)); +#if (ENABLE_TA3D_UFO_DUMP == 1) + DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + ui32Client3DUpdateCount, + pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, + pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, + pauiClient3DFenceUFOAddress, NULL, + pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + + /* command size sanity check */ + + if (ui32TAFenceCount != ui32ClientTAFenceCount) + { + PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of fences" + " is different than the actual number (%u != %u)", + ui32TAFenceCount, ui32ClientTAFenceCount)); + } + if (ui32TAUpdateCount != ui32ClientTAUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui32TAUpdateCount, ui32ClientTAUpdateCount)); + } + if (!bTestSLRAdd3DCheck && (ui323DFenceCount != ui32Client3DFenceCount)) + { + PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of fences" + " is different than the actual number (%u != %u)", + ui323DFenceCount, ui32Client3DFenceCount)); + } + if (ui323DUpdateCount != ui32Client3DUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui323DUpdateCount, ui32Client3DUpdateCount)); + } + if (ui32PRUpdateCount != ui32ClientPRUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "PR pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui32PRUpdateCount, ui32ClientPRUpdateCount)); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (bKickTA || bKick3D || bAbort) + { + sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize; + sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls; + sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices = ui32NumberOfIndices; + sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs = ui32NumberOfMRTs; + } +#endif + + /* Init and acquire to TA command if required */ + if (bKickTA) + { + RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, + RGXFWIF_CCB_CMD_TYPE_GEOM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTA); +#endif + + /* Init the TA command helper */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", + __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext), + ui32ClientTAFenceCount, + pauiClientTAFenceUFOAddress, + paui32ClientTAFenceValue, + ui32ClientTAUpdateCount, + pauiClientTAUpdateUFOAddress, + paui32ClientTAUpdateValue, + ui32TACmdSize, + pui8TADMCmd, + RGXFWIF_CCB_CMD_TYPE_GEOM, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataTA, +#else + NULL, +#endif + "TA", + bCCBStateOpen, + pasTACmdHelperData); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); +#endif + + eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taacquirecmd; + } + else + { + ui32TACmdCount++; + } + } + + /* Only kick the 3D if required */ + if (bKickPR) + { + RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; + + /* + The command helper doesn't know about the PR fence so create + the command with all the fences against it and later create + the PR command itself which _must_ come after the PR fence. + */ + sPRUFO.puiAddrUFO = uiPRFenceUFOAddress; + sPRUFO.ui32Value = ui32PRFenceValue; + + /* Init the PR fence command helper */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", + __func__, ui32Client3DFenceCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + pauiClient3DFenceUFOAddress, + NULL, + 0, + NULL, + NULL, + sizeof(sPRUFO), + (IMG_UINT8*) &sPRUFO, + RGXFWIF_CCB_CMD_TYPE_FENCE_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR-Fence", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + + /* Init the 3D PR command helper */ + /* + Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update + if no 3D is present. This is so the timeline update cannot happen out of order with any + other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB). + This out of order timeline sync prim update could happen if we attach it to the TA update. + */ + if (ui32ClientPRUpdateCount) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Line %d, ui32ClientPRUpdateCount=%d, " + "pauiClientPRUpdateUFOAddress=0x%x, " + "ui32ClientPRUpdateValueCount=%d, " + "paui32ClientPRUpdateValue=0x%x", + __func__, __LINE__, ui32ClientPRUpdateCount, + pauiClientPRUpdateUFOAddress->ui32Addr, + ui32ClientPRUpdateValueCount, + (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue)); + } + if (!bUseCombined3DAnd3DPR) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", + __func__, ui32ClientPRUpdateCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + 0, + NULL, + NULL, + ui32ClientPRUpdateCount, + pauiClientPRUpdateUFOAddress, + paui32ClientPRUpdateValue, + pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead + pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd, + RGXFWIF_CCB_CMD_TYPE_3D_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + } + } + + if (bKick3D || bAbort) + { + RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, + RGXFWIF_CCB_CMD_TYPE_3D, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickData3D); +#endif + + /* Init the 3D command helper */ + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ + bKickTA ? NULL : pauiClient3DFenceUFOAddress, + NULL, + ui32Client3DUpdateCount, + pauiClient3DUpdateUFOAddress, + paui32Client3DUpdateValue, + ui323DCmdSize, + pui83DDMCmd, + RGXFWIF_CCB_CMD_TYPE_3D, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickData3D, +#else + NULL, +#endif + "3D", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following are used to determine the offset of the command header containing the workload estimation + data so that can be accessed when the KCCB is read */ + ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); + ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); +#endif + } + + /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */ + if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); + goto fail_3dcmdinit; + } + + if (ui323DCmdCount) + { + PVR_ASSERT(bKickPR || bKick3D); + + /* Acquire space for all the 3D command(s) */ + eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData); + if (unlikely(eError != PVRSRV_OK)) + { + /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling + * of a new TA command with the same Write offset in Kernel CCB. + */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); + goto fail_3dacquirecmd; + } + } + + /* + We should acquire the space in the kernel CCB here as after this point + we release the commands which will take operations on server syncs + which can't be undone + */ + + /* + Everything is ready to go now, release the commands + */ + if (ui32TACmdCount) + { + ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui32TACmdCount, + pasTACmdHelperData, + "TA", + FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and therefore would start at an + offset of 0 rather than the current command offset */ + if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) + { + ui32TACommandOffset = ui32TACmdOffset; + } + else + { + ui32TACommandOffset = 0; + } +#endif + } + + if (ui323DCmdCount) + { + ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, + pas3DCmdHelperData, + "3D", + FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + + if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) + { + ui323DCommandOffset = ui323DCmdOffset; + } + else + { + ui323DCommandOffset = 0; + } +#endif + } + + if (ui32TACmdCount) + { + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext); + + sTACmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext); + sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Add the Workload data into the KCCB kick */ + sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; +#else + sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, + &sTACmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_GEOM, + bKickTA, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taattachcleanupctls; + } + + HTBLOGK(HTB_SF_MAIN_KICK_TA, + sTACmdKickData.psContext, + ui32TACmdOffset + ); + RGXSRV_HWPERF_ENQ(psRenderContext, OSGetCurrentClientProcessIDKM(), + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TA3D, + iCheckTAFence, + iUpdateTAFence, + iUpdateTATimeline, + uiCheckTAFenceUID, uiUpdateTAFenceUID); + + if (!bUseSingleFWCommand) + { + /* Construct the kernel TA CCB command. */ + RGXFWIF_KCCB_CMD sTAKCCBCmd; + sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + FWCommonContextGetServerMMUCtx(psRenderContext->sTAData.psServerCommonContext), + RGXFWIF_DM_GEOM, + &sTAKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + + PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TA3D); + } + + if (ui323DCmdCount) + { + RGXFWIF_KCCB_CMD s3DKCCBCmd = { 0 }; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext); + + s3DCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); + s3DCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + s3DCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; +#else + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, + &s3DCmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_3D, + bKick3D, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dattachcleanupctls; + } + + HTBLOGK(HTB_SF_MAIN_KICK_3D, + s3DCmdKickData.psContext, + ui323DCmdOffset); + + if (bUseSingleFWCommand) + { + /* Construct the kernel TA/3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK; + s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.sTACmdKickData = sTACmdKickData; + s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.s3DCmdKickData = s3DCmdKickData; + } + else + { + /* Construct the kernel 3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData; + } + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + FWCommonContextGetServerMMUCtx(psRenderContext->s3DData.psServerCommonContext), + RGXFWIF_DM_3D, + &s3DKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (unlikely(eError != PVRSRV_OK )) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dacquirecmd; + } + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) + if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) + { + PVRSRVValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL); + } +#endif + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateTASyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x", __func__, \ + (void*)psUpdateTASyncCheckpoint, SyncCheckpointGetId(psUpdateTASyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint); + } + if (psTAFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim [TA] <%p> to %d", __func__, (void*)psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue); + } + + if (psUpdate3DSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x", __func__, \ + (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint); + } + if (ps3DFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim [3D] <%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); + +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...", + __func__, (void*)psBufferSyncData)); + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + if (piUpdateTAFence) + { + *piUpdateTAFence = iUpdateTAFence; + } + if (piUpdate3DFence) + { + *piUpdate3DFence = iUpdate3DFence; + } + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence. + * NOTE: 3D fence is always submitted, either via 3D or TA(PR). + */ + if (bKickTA) + { + SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); + } + SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); + + if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence, + pvTAUpdateFenceFinaliseData, + psUpdateTASyncCheckpoint, szFenceNameTA); + } + if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence, + pv3DUpdateFenceFinaliseData, + psUpdate3DSyncCheckpoint, szFenceName3D); + } + + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceTASyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); + } + if (apsFence3DSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); + } + + if (sTASyncData.paui32ClientUpdateValue) + { + OSFreeMem(sTASyncData.paui32ClientUpdateValue); + } + if (s3DSyncData.paui32ClientUpdateValue) + { + OSFreeMem(s3DSyncData.paui32ClientUpdateValue); + } + +#if defined(SUPPORT_VALIDATION) + if (bTestSLRAdd3DCheck) + { + SyncCheckpointFree(psDummySyncCheckpoint); + } +#endif + OSLockRelease(psRenderContext->hLock); + + return PVRSRV_OK; + +fail_3dattachcleanupctls: +fail_taattachcleanupctls: +fail_3dacquirecmd: +fail_3dcmdinit: +fail_taacquirecmd: + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate); + /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list. + * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what + * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the + * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate. + */ + if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs)) + { + SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr); + } + +fail_alloc_update_values_mem_3D: +fail_alloc_update_values_mem_TA: + if (iUpdateTAFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData); + } + if (iUpdate3DFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence. + * NOTE: 3D fence is always submitted, either via 3D or TA(PR). + */ + if (bKickTA) + { + SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); + } + SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); + +err_pr_fence_address: +err_populate_sync_addr_list_3d_update: +err_populate_sync_addr_list_3d_fence: +err_populate_sync_addr_list_ta_update: +err_populate_sync_addr_list_ta_fence: +err_not_enough_space: +fail_tacmdinvalfbsc: +fail_resolve_input_fence: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceTASyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); + } + if (apsFence3DSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); + } + if (sTASyncData.paui32ClientUpdateValue) + { + OSFreeMem(sTASyncData.paui32ClientUpdateValue); + } + if (s3DSyncData.paui32ClientUpdateValue) + { + OSFreeMem(s3DSyncData.paui32ClientUpdateValue); + } +#if defined(SUPPORT_VALIDATION) + if (bTestSLRAdd3DCheck) + { + SyncCheckpointFree(psDummySyncCheckpoint); + } +#endif +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_ASSERT(eError != PVRSRV_OK); + OSLockRelease(psRenderContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + OSLockAcquire(psRenderContext->hLock); + + if (psRenderContext->sTAData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext, + psConnection, + psRenderContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_GEOM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set the priority of the TA part of the rendercontext (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto fail_tacontext; + } + psRenderContext->sTAData.ui32Priority = ui32Priority; + } + + if (psRenderContext->s3DData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext, + psConnection, + psRenderContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_3D); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto fail_3dcontext; + } + psRenderContext->s3DData.ui32Priority = ui32Priority; + } + + OSLockRelease(psRenderContext->hLock); + return PVRSRV_OK; + +fail_3dcontext: +fail_tacontext: + OSLockRelease(psRenderContext->hLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2 = PVRSRV_OK; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psRenderContext->hLock); + eError = FWCommonContextSetFlags(psRenderContext->sTAData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + eError2 = FWCommonContextSetFlags(psRenderContext->s3DData.psServerCommonContext, + (IMG_UINT32)ui64Input); + } + OSLockRelease(psRenderContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags eError"); + PVR_LOG_IF_ERROR(eError2, "FWCommonContextSetFlags eError2"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +/* + * PVRSRVRGXGetLastRenderContextResetReasonKM + */ +PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef) +{ + RGX_SERVER_RC_TA_DATA *psRenderCtxTAData; + RGX_SERVER_RC_3D_DATA *psRenderCtx3DData; + RGX_SERVER_COMMON_CONTEXT *psCurrentServerTACommonCtx, *psCurrentServer3DCommonCtx; + RGXFWIF_CONTEXT_RESET_REASON eLastTAResetReason, eLast3DResetReason; + IMG_UINT32 ui32LastTAResetJobRef, ui32Last3DResetJobRef; + + PVR_ASSERT(psRenderContext != NULL); + PVR_ASSERT(peLastResetReason != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + psRenderCtxTAData = &(psRenderContext->sTAData); + psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext; + psRenderCtx3DData = &(psRenderContext->s3DData); + psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext; + + /* Get the last reset reasons from both the TA and 3D so they are reset... */ + eLastTAResetReason = FWCommonContextGetLastResetReason(psCurrentServerTACommonCtx, &ui32LastTAResetJobRef); + eLast3DResetReason = FWCommonContextGetLastResetReason(psCurrentServer3DCommonCtx, &ui32Last3DResetJobRef); + + /* Combine the reset reason from TA and 3D into one... */ + *peLastResetReason = (IMG_UINT32) eLast3DResetReason; + *pui32LastResetJobRef = ui32Last3DResetJobRef; + if (eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_NONE || + ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP || + eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING) && + (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP || + eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING)) || + ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP || + eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING) && + (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH))) + { + *peLastResetReason = eLastTAResetReason; + *pui32LastResetJobRef = ui32LastTAResetJobRef; + } + + return PVRSRV_OK; +} + +void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); + dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) + { + RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + + DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); +} + +IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); + + dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) + { + RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_GEOM) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_GEOM; + } + } + + if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); + return ui32ContextBitMask; +} + +/* + * RGXRenderContextStalledKM + */ +PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) +{ + RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE); + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxta3d.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.h new file mode 100644 index 000000000000..896a2e3e20ad --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxta3d.h @@ -0,0 +1,440 @@ +/*************************************************************************/ /*! +@File +@Title RGX TA and 3D Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX TA and 3D Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXTA3D_H +#define RGXTA3D_H + +#include "devicemem.h" +#include "devicemem_server.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgx_fwif_shared.h" +#include "sync_server.h" +#include "connection_server.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" +#include "ri_server.h" + +typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT; +typedef struct _RGX_FREELIST_ RGX_FREELIST; +typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE; + + +typedef struct _RGX_KM_HW_RT_DATASET_ +{ + /* RGX_RTDATA_CLEANUP_DATA */ + /* RGXMKIF_NUM_RTDATAS */ + PVRSRV_DEVICE_NODE *psDeviceNode; + RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr; + + DEVMEM_MEMDESC *psHWRTDataFwMemDesc; + DEVMEM_MEMDESC *psRTArrayFwMemDesc; + DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc; + + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS]; +#if !defined(SUPPORT_SHADOW_FREELISTS) + DLLIST_NODE sNodeHWRTData; +#endif + +} RGX_KM_HW_RT_DATASET; + +struct _RGX_FREELIST_ { + PVRSRV_RGXDEV_INFO *psDevInfo; + + /* Free list PMR */ + PMR *psFreeListPMR; + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset; + + /* Free list PM state PMR */ + PMR *psFreeListStatePMR; + IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset; + + /* Freelist config */ + IMG_UINT32 ui32MaxFLPages; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32CurrentFLPages; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32ReadyFLPages; + IMG_UINT32 ui32GrowThreshold; /* Percentage of FL memory used that should trigger a new grow request */ + IMG_UINT32 ui32FreelistID; + IMG_UINT32 ui32FreelistGlobalID; /* related global freelist for this freelist */ + IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */ + IMG_BOOL bCheckFreelist; /* freelist check enabled */ + IMG_UINT32 ui32RefCount; /* freelist reference counting */ + + IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application */ + IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */ + IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */ + + IMG_PID ownerPid; /* Pid of the owner of the list */ + + /* Memory Blocks */ + DLLIST_NODE sMemoryBlockHead; + DLLIST_NODE sMemoryBlockInitHead; + DLLIST_NODE sNode; +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* HWRTData nodes linked to local freelist */ + DLLIST_NODE sNodeHWRTDataHead; +#endif + + /* FW data structures */ + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + HASH_TABLE* psWorkloadHashTable; +#endif +}; + +struct _RGX_PMR_NODE_ { + RGX_FREELIST *psFreeList; + PMR *psPMR; + PMR_PAGELIST *psPageList; + DLLIST_NODE sMemoryBlock; + IMG_UINT32 ui32NumPages; + IMG_BOOL bFirstPageMissing; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RI_HANDLE hRIHandle; +#endif +}; + +typedef struct { + PVRSRV_RGXDEV_INFO *psDevInfo; + DEVMEM_MEMDESC *psFWZSBufferMemDesc; + RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; + + DEVMEMINT_RESERVATION *psReservation; + PMR *psPMR; + DEVMEMINT_MAPPING *psMapping; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; + IMG_UINT32 ui32ZSBufferID; + IMG_UINT32 ui32RefCount; + IMG_BOOL bOnDemand; + + IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */ + IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */ + + IMG_PID owner; + + DLLIST_NODE sNode; +}RGX_ZSBUFFER_DATA; + +typedef struct { + RGX_ZSBUFFER_DATA *psZSBuffer; +} RGX_POPULATION; + +/* Dump the physical pages of a freelist */ +IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList); + + + +/* Create HWRTDataSet */ +PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr, + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr, + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64PPPMultiSampleCtl, + IMG_UINT32 ui32TEStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32RgnStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET **ppsKmHwRTDataSet); + +/* Destroy HWRTDataSet */ +PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKmHwRTDataSet); + +/* + RGXCreateZSBuffer +*/ +PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + RGX_ZSBUFFER_DATA **ppsZSBuffer); + +/* + RGXDestroyZSBufferKM +*/ +PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer); + + +/* + * RGXBackingZSBuffer() + * + * Backs ZS-Buffer with physical pages + */ +PVRSRV_ERROR +RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); + +/* + * RGXPopulateZSBufferKM() + * + * Backs ZS-Buffer with physical pages (called by Bridge calls) + */ +PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_POPULATION **ppsPopulation); + +/* + * RGXUnbackingZSBuffer() + * + * Frees ZS-Buffer's physical pages + */ +PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); + +/* + * RGXUnpopulateZSBufferKM() + * + * Frees ZS-Buffer's physical pages (called by Bridge calls) + */ +PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation); + +/* + RGXProcessRequestZSBufferBacking +*/ +void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID); + +/* + RGXProcessRequestZSBufferUnbacking +*/ +void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID); + +/* + RGXGrowFreeList +*/ +PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumPages, + PDLLIST_NODE pListHeader); + +/* Create free list */ +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr, + IMG_DEV_VIRTADDR sFreeListStateDevVAddr, + PMR *psFreeListPMR, + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, + PMR *psFreeListStatePMR, + IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset, + RGX_FREELIST **ppsFreeList); + +/* Destroy free list */ +PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList); + +/* + RGXProcessRequestGrow +*/ +void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistID); + + +/* Reconstruct free list after Hardware Recovery */ +void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistsCount, + IMG_UINT32 *paui32Freelists); + +/*! +******************************************************************************* + + @Function PVRSRVRGXCreateRenderContextKM + + @Description + Server-side implementation of RGXCreateRenderContext + + @Input pvDeviceNode - device node + @Input ui32Priority - context priority + @Input hMemCtxPrivData - memory context private data + @Input ui32PackedCCBSizeU8888 : + ui8TACCBAllocSizeLog2 - TA CCB size + ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow + ui83DCCBAllocSizeLog2 - 3D CCB size + ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow + @Input ui32ContextFlags - flags which specify properties of the context + @Output ppsRenderContext - clean up data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXDestroyRenderContextKM + + @Description + Server-side implementation of RGXDestroyRenderContext + + @Input psRenderContext - + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXKickTA3DKM + + @Description + Server-side implementation of RGXKickTA3D + + @Input psRTDataCleanup - RT data associated with the kick (or NULL) + @Input psZBuffer - Z-buffer associated with the kick (or NULL) + @Input psSBuffer - S-buffer associated with the kick (or NULL) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientTAFenceCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, + IMG_UINT32 *paui32ClientTAFenceSyncOffset, + IMG_UINT32 *paui32ClientTAFenceValue, + IMG_UINT32 ui32ClientTAUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClientUpdateSyncPrimBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientTAUpdateValue, + IMG_UINT32 ui32Client3DUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, + IMG_UINT32 *paui32Client3DUpdateSyncOffset, + IMG_UINT32 *paui32Client3DUpdateValue, + SYNC_PRIMITIVE_BLOCK *psPRSyncPrimBlock, + IMG_UINT32 ui32PRSyncOffset, + IMG_UINT32 ui32PRFenceValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR szFenceName[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iCheckFence3D, + PVRSRV_TIMELINE iUpdateTimeline3D, + PVRSRV_FENCE *piUpdateFence3D, + IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32TACmdSize, + IMG_PBYTE pui8TADMCmd, + IMG_UINT32 ui323DPRCmdSize, + IMG_PBYTE pui83DPRDMCmd, + IMG_UINT32 ui323DCmdSize, + IMG_PBYTE pui83DDMCmd, + IMG_UINT32 ui32ExtJobRef, + IMG_BOOL bKickTA, + IMG_BOOL bKickPR, + IMG_BOOL bKick3D, + IMG_BOOL bAbort, + IMG_UINT32 ui32PDumpFlags, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32RenderTargetSize, + IMG_UINT32 ui32NumberOfDrawCalls, + IMG_UINT32 ui32NumberOfIndices, + IMG_UINT32 ui32NumberOfMRTs, + IMG_UINT64 ui64DeadlineInus); + + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 *peLastResetReason, + IMG_UINT32 *pui32LastResetJobRef); + +/* Debug - Dump debug info of render contexts on this device */ +void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client contexts are stalled */ +IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); + +#endif /* RGXTA3D_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.c new file mode 100644 index 000000000000..227e73c0bf35 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.c @@ -0,0 +1,1278 @@ +/*************************************************************************/ /*! +@File rgxtdmtransfer.c +@Title Device specific TDM transfer queue routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdump_km.h" +#include "rgxdevice.h" +#include "rgxccb.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxtdmtransfer.h" +#include "rgx_tq_shared.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "rgx_memallocflags.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" +#include "rgxshader.h" + +#include "pdump_km.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +#include "validation_soc.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TDM_UFO_DUMP 0 + +//#define TDM_CHECKPOINT_DEBUG 1 + +#if defined(TDM_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +typedef struct { + RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; + IMG_UINT32 ui32Priority; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +} RGX_SERVER_TQ_TDM_DATA; + + +struct _RGX_SERVER_TQ_TDM_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWTransferContextMemDesc; + IMG_UINT32 ui32Flags; + RGX_SERVER_TQ_TDM_DATA sTDMData; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +}; + +static PVRSRV_ERROR _CreateTDMTransferContext( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + DEVMEM_MEMDESC * psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + SERVER_MMU_CONTEXT * psServerMMUContext, + DEVMEM_MEMDESC * psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + RGX_COMMON_CONTEXT_INFO * psInfo, + RGX_SERVER_TQ_TDM_DATA * psTDMData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_BUFFER_SYNC) + psTDMData->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-tdm"); + if (IS_ERR(psTDMData->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psTDMData->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + eError = FWCommonContextAllocate( + psConnection, + psDeviceNode, + REQ_TYPE_TQ_TDM, + RGXFWIF_DM_TDM, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, /* max deadline MS */ + 0, /* robustness address */ + psInfo, + &psTDMData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + psTDMData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_contextalloc: +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); + psTDMData->psBufferSyncContext = NULL; +fail_buffer_sync_context_create: +#endif + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR _DestroyTDMTransferContext( + RGX_SERVER_TQ_TDM_DATA * psTDMData, + PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp( + psDeviceNode, + psTDMData->psServerCommonContext, + RGXFWIF_DM_TDM, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free it's resources */ + FWCommonContextFree(psTDMData->psServerCommonContext); + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); + psTDMData->psBufferSyncContext = NULL; +#endif + + return PVRSRV_OK; +} + +/* + * PVRSRVCreateTransferContextKM + */ +PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext) +{ + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext; + + DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Allocate the server side structure */ + *ppsTransferContext = NULL; + psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); + if (psTransferContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + Create the FW transfer context, this has the TDM common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWTDMCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTransferContext", + &psTransferContext->psFWTransferContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwtransfercontext; + } + + eError = OSLockCreate(&psTransferContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_lockcreate; + } + + psTransferContext->psDeviceNode = psDeviceNode; + + + eError = _CreateTDMTransferContext(psConnection, + psDeviceNode, + psTransferContext->psFWTransferContextMemDesc, + offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), + hMemCtxPrivData, + psFWMemContextMemDesc, + ui32Priority, + &sInfo, + &psTransferContext->sTDMData, + U32toU8_Unpack1(ui32PackedCCBSizeU88), + U32toU8_Unpack2(ui32PackedCCBSizeU88), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_tdmtransfercontext; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData); +#endif + + SyncAddrListInit(&psTransferContext->sSyncAddrListFence); + SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate); + + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + *ppsTransferContext = psTransferContext; + + return PVRSRV_OK; + +fail_tdmtransfercontext: + OSLockDestroy(psTransferContext->hLock); +fail_lockcreate: + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); +fail_fwtransfercontext: + OSFreeMem(psTransferContext); + PVR_ASSERT(eError != PVRSRV_OK); + *ppsTransferContext = NULL; + return eError; +} + +PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem) +{ + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem) +{ + PVR_UNREFERENCED_PARAMETER(psPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWTDMCONTEXT *psFWTransferContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; + + eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc, + (void **)&psFWTransferContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware transfer context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)); + + return PVRSRV_ERROR_RETRY; + } +#endif + + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_remove_node(&(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + + + eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData, + psTransferContext->psDeviceNode); + if (eError != PVRSRV_OK) + { + goto fail_destroyTDM; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData); +#endif + + SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence); + SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate); + + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); + + OSLockDestroy(psTransferContext->hLock); + + OSFreeMem(psTransferContext); + + return PVRSRV_OK; + +fail_destroyTDM: + + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + * PVRSRVSubmitTQ3DKickKM + */ +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * paui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; + RGX_CCB_CMD_HELPER_DATA *psCmdHelper; + PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 ui32IntClientFenceCount = 0; + IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue; + IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + + IMG_UINT64 ui64FBSCEntryMask; + + IMG_UINT32 ui32CmdOffset = 0; + IMG_BOOL bCCBStateOpen; + + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0}; + IMG_UINT32 ui32TDMWorkloadDataRO = 0; + IMG_UINT32 ui32TDMCmdHeaderOffset = 0; + IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; +#endif + + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if !defined(SUPPORT_WORKLOAD_ESTIMATION) + PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1); + PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2); + PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus); +#endif + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Ensure the string is null-terminated (Required for safety) */ + szUpdateFenceName[31] = '\0'; + + if (ui32SyncPMRCount != 0) + { + if (!ppsSyncPMRs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + OSLockAcquire(psTransferContext->hLock); + + /* We can't allocate the required amount of stack space on all consumer architectures */ + psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA)); + if (psCmdHelper == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_allochelper; + } + + + /* + Init the command helper commands for all the prepares + */ + { + IMG_CHAR *pszCommandName; + RGXFWIF_CCB_CMD_TYPE eType; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif + + pszCommandName = "TQ-TDM"; + + if (ui32FWCommandSize == 0) + { + /* A NULL CMD for TDM is used to append updates to a non finished + * FW command. bCCBStateOpen is used in case capture range is + * entered on this command, to not drain CCB up to the Roff for this + * command, but the finished command prior to this. + */ + bCCBStateOpen = IMG_TRUE; + eType = RGXFWIF_CCB_CMD_TYPE_NULL; + } + else + { + bCCBStateOpen = IMG_FALSE; + eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM; + } + +#if defined(SUPPORT_BUFFER_SYNC) + psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext; +#endif + + eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto fail_populate_sync_addr_list; + } + + eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset); + if (eError != PVRSRV_OK) + { + goto fail_populate_sync_addr_list; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + + + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); + err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData); + if (err) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_resolve_input_fence; + } + + /* Append buffer sync fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; + } +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_populate_sync_addr_list; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto fail_resolve_input_fence; + } +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 ii; + for (ii=0; ii<32; ii++) + { + PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii); + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii])); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode, + szUpdateFenceName, + iUpdateTimeline, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto fail_create_output_fence; + } + + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + if (paui32IntUpdateValue) + { + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); + } + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the transfer context update list */ + SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + if (ui32FenceSyncCheckpointCount) + { + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence)); +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + +#if (ENABLE_TDM_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + /* + * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, + * in other words, take the value and set it to zero afterwards. + * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts + * as it must be ready at the time of context activation. + */ + { + eError = RGXExtractFBSCEntryMaskFromMMUContext(psTransferContext->psDeviceNode, + FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext), + &ui64FBSCEntryMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError)); + goto fail_invalfbsc; + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1; + sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2; + + /* Prepare workload estimation */ + WorkEstPrepare(psDeviceNode->pvDevice, + &psTransferContext->sWorkEstData, + &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM, + eType, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTransfer); +#endif + + /* + Create the command helper data for this command + */ + RGXCmdHelperInitCmdCCB(psClientCCB, + ui64FBSCEntryMask, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32FWCommandSize, + pui8FWCommand, + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataTransfer, +#else /* SUPPORT_WORKLOAD_ESTIMATION */ + NULL, +#endif /* SUPPORT_WORKLOAD_ESTIMATION */ + pszCommandName, + bCCBStateOpen, + psCmdHelper); + } + + /* + Acquire space for all the commands in one go + */ + + eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper); + if (eError != PVRSRV_OK) + { + goto fail_3dcmdacquire; + } + + + /* + We should acquire the kernel CCB(s) space here as the schedule could fail + and we would have to roll back all the syncs + */ + + /* + Only do the command helper release (which takes the server sync + operations if the acquire succeeded + */ + ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(1, + psCmdHelper, + "TQ_TDM", + FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr); + + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper); + + ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck) + { + ui32TDMWorkloadDataRO = ui32CmdOffset; + } + else + { + ui32TDMWorkloadDataRO = 0; + } +#endif + + /* + Even if we failed to acquire the client CCB space we might still need + to kick the HW to process a padding packet to release space for us next + time round + */ + { + RGXFWIF_KCCB_CMD sTDMKCCBCmd; + IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress( + psTransferContext->sTDMData.psServerCommonContext).ui32Addr; + + /* Construct the kernel 3D CCB command. */ + sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset; +#else + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */ + /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */ + /* ui323DCmdOffset); */ + RGXSRV_HWPERF_ENQ(psTransferContext, + OSGetCurrentClientProcessIDKM(), + FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TQTDM, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, + FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext), + RGXFWIF_DM_TDM, + & sTDMKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef, + ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_2dcmdacquire; + } + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) + if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) + { + PVRSRVValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL); + } +#endif + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + * piUpdateFence = iUpdateFence; + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, szUpdateFenceName); + } + + OSFreeMem(psCmdHelper); + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; + +/* + No resources are created in this function so there is nothing to free + unless we had to merge syncs. + If we fail after the client CCB acquire there is still nothing to do + as only the client CCB release will modify the client CCB +*/ +fail_2dcmdacquire: +fail_3dcmdacquire: + +fail_invalfbsc: + SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + +/* fail_pdumpcheck: */ +/* fail_cmdtype: */ + + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + +fail_resolve_input_fence: + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +fail_populate_sync_addr_list: + PVR_ASSERT(eError != PVRSRV_OK); + OSFreeMem(psCmdHelper); +fail_allochelper: + + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + OSLockRelease(psTransferContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32PDumpFlags) +{ + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + OSLockAcquire(psTransferContext->hLock); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; + sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice, + FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext), + RGXFWIF_DM_TDM, + &sKCCBCmd, + 0, + ui32PDumpFlags); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, eError, PVRSRVGETERRORSTRING(eError))); + } + + OSLockRelease(psTransferContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + + OSLockAcquire(psTransferContext->hLock); + + if (psTransferContext->sTDMData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext, + psConnection, + psTransferContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_TDM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError))); + + OSLockRelease(psTransferContext->hLock); + return eError; + } + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psTransferContext->hLock); + eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + psTransferContext->ui32Flags = (IMG_UINT32)ui64Input; + } + OSLockRelease(psTransferContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); + + dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); + + DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + + OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); +} + + +IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); + + dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); + + if (CheckStalledClientCommonContext( + psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D) + == PVRSRV_ERROR_CCCB_STALLED) { + ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D; + } + } + + OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); + return ui32ContextBitMask; +} + +/**************************************************************************//** + End of file (rgxtdmtransfer.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.h b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.h new file mode 100644 index 000000000000..2ae1c48036b5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxtdmtransfer.h @@ -0,0 +1,129 @@ +/*************************************************************************/ /*! +@File rgxtdmtransfer.h +@Title RGX Transfer queue 2 Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX Transfer queue Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__RGXTDMTRANSFER_H__) +#define __RGXTDMTRANSFER_H__ + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgxfwutils.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "connection_server.h" + +typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT; + + +PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext); + + +PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem); + + +PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psUSCPMRMem); + + +PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext); + + +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * pui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus); + +PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of TDM transfer contexts on this device */ +void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client transfer contexts are stalled */ +IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + + +#endif /* __RGXTDMTRANSFER_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxutils.c b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxutils.c new file mode 100644 index 000000000000..e909feeccbe5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/devices/volcanic/rgxutils.c @@ -0,0 +1,217 @@ +/*************************************************************************/ /*! +@File +@Title Device specific utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "power.h" +#include "pvrsrv.h" +#include "sync_internal.h" +#include "rgxfwutils.h" + + +PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 *pui32State) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode) + return PVRSRV_ERROR_INVALID_PARAMS; + + psDevInfo = psDeviceNode->pvDevice; + *pui32State = psDevInfo->eActivePMConf; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 ui32State) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + if (RGX_ACTIVEPM_FORCE_OFF != ui32State + || !psDevInfo->pvAPMISRData) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + +#if !defined(NO_HARDWARE) + eError = OSUninstallMISR(psDevInfo->pvAPMISRData); + if (PVRSRV_OK == eError) + { + psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF; + psDevInfo->pvAPMISRData = NULL; + eError = PVRSRVSetDeviceDefaultPowerState((const PPVRSRV_DEVICE_NODE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON); + } +#endif + + return eError; +} + +PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL *pbDisabled) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + *pbDisabled = !psDevInfo->bPDPEnabled; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL bDisable) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + psDevInfo->bPDPEnabled = !bDisable; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32DeviceFlags) +{ + if (!pui32DeviceFlags || !psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32DeviceFlags = psDevInfo->ui32DeviceFlags; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_BOOL bSetNotClear) +{ + if (!psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if ((ui32Config & ~RGXKM_DEVICE_STATE_MASK) != 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Bits outside of device state mask set (input: 0x%x, mask: 0x%x)", + __func__, ui32Config, RGXKM_DEVICE_STATE_MASK)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (bSetNotClear) + { + psDevInfo->ui32DeviceFlags |= ui32Config; + } + else + { + psDevInfo->ui32DeviceFlags &= ~ui32Config; + } + + return PVRSRV_OK; +} + +inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM) +{ + PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST); + + switch (eKickTypeDM) { + case RGX_KICK_TYPE_DM_GP: + return "GP "; + case RGX_KICK_TYPE_DM_TDM_2D: + return "TDM/2D "; + case RGX_KICK_TYPE_DM_GEOM: + return "GEOM "; + case RGX_KICK_TYPE_DM_3D: + return "3D "; + case RGX_KICK_TYPE_DM_CDM: + return "CDM "; + case RGX_KICK_TYPE_DM_TQ2D: + return "TQ2D "; + case RGX_KICK_TYPE_DM_TQ3D: + return "TQ3D "; + default: + return "Invalid DM "; + } +} + +/****************************************************************************** + End of file (rgxutils.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/server/env/linux/Kbuild.mk new file mode 100644 index 000000000000..ba893e6b3f0e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/Kbuild.mk @@ -0,0 +1,538 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Window system +ccflags-y += -DWINDOW_SYSTEM=\"$(WINDOW_SYSTEM)\" + +# Linux kernel headers +ccflags-y += \ + -Iinclude \ + -Iinclude/drm + +# Compatibility BVNC +ccflags-y += -I$(TOP)/services/shared/devices/$(PVR_ARCH) + +# PVR_ARCH +ccflags-y += -DPVR_ARCH_NAME=\"$(PVR_ARCH)\" + +# Errata files +ccflags-y += -I$(TOP)/hwdefs/$(PVR_ARCH) + +# Linux-specific headers +ccflags-y += \ + -I$(TOP)/include/drm \ + -I$(TOP)/services/include/env/linux \ + -I$(TOP)/services/server/env/linux/$(PVR_ARCH) -I$(TOP)/services/server/env/linux \ + -I$(TOP)/kernel/drivers/staging/imgtec + +$(PVRSRV_MODNAME)-y += \ + services/server/env/linux/event.o \ + services/server/env/linux/fwload.o \ + services/server/env/linux/km_apphint.o \ + services/server/env/linux/module_common.o \ + services/server/env/linux/osmmap_stub.o \ + services/server/env/linux/osfunc.o \ + services/server/env/linux/allocmem.o \ + services/server/env/linux/osconnection_server.o \ + services/server/env/linux/physmem_osmem_linux.o \ + services/server/env/linux/pmr_os.o \ + services/server/env/linux/pvr_bridge_k.o \ + services/server/env/linux/pvr_debug.o \ + services/server/env/linux/physmem_dmabuf.o \ + services/server/common/devicemem_heapcfg.o \ + services/shared/common/devicemem.o \ + services/shared/common/devicemem_utils.o \ + services/shared/common/hash.o \ + services/shared/common/ra.o \ + services/shared/common/sync.o \ + services/shared/common/mem_utils.o \ + services/server/common/devicemem_server.o \ + services/server/common/handle.o \ + services/server/common/lists.o \ + services/server/common/mmu_common.o \ + services/server/common/connection_server.o \ + services/server/common/physheap.o \ + services/server/common/physmem.o \ + services/server/common/physmem_lma.o \ + services/server/common/physmem_hostmem.o \ + services/server/common/pmr.o \ + services/server/common/power.o \ + services/server/common/process_stats.o \ + services/server/common/pvr_notifier.o \ + services/server/common/pvrsrv.o \ + services/server/common/srvcore.o \ + services/server/common/sync_checkpoint.o \ + services/server/common/sync_server.o \ + services/shared/common/htbuffer.o \ + services/server/common/htbserver.o \ + services/server/common/htb_debug.o \ + services/server/common/tlintern.o \ + services/shared/common/tlclient.o \ + services/server/common/tlserver.o \ + services/server/common/tlstream.o \ + services/server/common/cache_km.o \ + services/shared/common/uniq_key_splay_tree.o \ + services/server/common/pvrsrv_pool.o \ + services/server/common/pvrsrv_bridge_init.o \ + services/server/common/info_page_km.o \ + services/shared/common/pvrsrv_error.o \ + services/server/common/di_server.o + +# Wrap ExtMem support +ifeq ($(SUPPORT_WRAP_EXTMEM),1) + $(PVRSRV_MODNAME)-y += \ + services/server/env/linux/physmem_extmem_linux.o \ + services/server/common/physmem_extmem.o +endif + +ifeq ($(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK),1) + $(PVRSRV_MODNAME)-y += \ + services/server/env/linux/pg_walk_through.o +endif + +ifeq ($(SUPPORT_TRUSTED_DEVICE),1) + $(PVRSRV_MODNAME)-y += \ + services/server/common/physmem_tdfwmem.o +endif + +ifeq ($(SUPPORT_DEDICATED_FW_MEMORY),1) + $(PVRSRV_MODNAME)-y += \ + services/server/common/physmem_fwdedicatedmem.o +endif + +ifeq ($(SUPPORT_PHYSMEM_TEST),1) + $(PVRSRV_MODNAME)-y += \ + services/server/env/linux/physmem_test.o +endif + +ifneq ($(PVR_LOADER),) + ifeq ($(KERNEL_DRIVER_DIR),) + $(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/$(PVR_LOADER).o + else + ifneq ($(wildcard $(KERNELDIR)/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM)/$(PVR_LOADER).c),) + $(PVRSRV_MODNAME)-y += external/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM)/$(PVR_LOADER).o + else + ifneq ($(wildcard $(KERNELDIR)/$(KERNEL_DRIVER_DIR)/$(PVR_LOADER).c),) + $(PVRSRV_MODNAME)-y += external/$(KERNEL_DRIVER_DIR)/$(PVR_LOADER).o + else + $(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/$(PVR_LOADER).o + endif + endif + endif +else + $(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/pvr_platform_drv.o +endif + +ifeq ($(SUPPORT_RGX),1) +$(PVRSRV_MODNAME)-y += \ + kernel/drivers/staging/imgtec/pvr_drm.o \ + services/server/env/linux/pvr_gputrace.o \ + services/server/devices/rgxfwdbg.o \ + services/server/devices/$(PVR_ARCH)/rgxccb.o \ + services/server/devices/$(PVR_ARCH)/rgxdebug.o \ + services/server/devices/rgxfwtrace_strings.o \ + services/server/devices/$(PVR_ARCH)/rgxfwutils.o \ + services/server/devices/$(PVR_ARCH)/rgxinit.o \ + services/server/devices/$(PVR_ARCH)/rgxbvnc.o \ + services/server/devices/$(PVR_ARCH)/rgxkicksync.o \ + services/server/devices/$(PVR_ARCH)/rgxlayer_impl.o \ + services/server/devices/$(PVR_ARCH)/rgxmem.o \ + services/server/devices/$(PVR_ARCH)/rgxmmuinit.o \ + services/server/devices/$(PVR_ARCH)/rgxregconfig.o \ + services/server/devices/$(PVR_ARCH)/rgxta3d.o \ + services/server/devices/$(PVR_ARCH)/rgxsyncutils.o \ + services/server/devices/$(PVR_ARCH)/rgxtdmtransfer.o \ + services/server/devices/$(PVR_ARCH)/rgxutils.o \ + services/server/devices/$(PVR_ARCH)/rgxhwperf.o \ + services/server/devices/$(PVR_ARCH)/rgxpower.o \ + services/server/devices/$(PVR_ARCH)/rgxstartstop.o \ + services/server/devices/rgxtimecorr.o \ + services/server/devices/$(PVR_ARCH)/rgxcompute.o \ + services/server/devices/$(PVR_ARCH)/rgxsignals.o \ + services/server/devices/$(PVR_ARCH)/rgxmulticore.o \ + services/server/devices/rgxshader.o +ifeq ($(SUPPORT_USC_BREAKPOINT),1) +$(PVRSRV_MODNAME)-y += \ + services/server/devices/$(PVR_ARCH)/rgxbreakpoint.o +endif + +ifeq ($(PVR_ARCH),rogue) + $(PVRSRV_MODNAME)-y += \ + services/server/devices/$(PVR_ARCH)/rgxtransfer.o \ + services/server/devices/$(PVR_ARCH)/rgxmipsmmuinit.o +endif + +ifeq ($(SUPPORT_PDVFS),1) + $(PVRSRV_MODNAME)-y += \ + services/server/devices/rgxpdvfs.o +endif + +ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1) + $(PVRSRV_MODNAME)-y += \ + services/server/devices/rgxworkest.o +endif + +endif + +ifeq ($(SUPPORT_DISPLAY_CLASS),1) +$(PVRSRV_MODNAME)-y += \ + services/server/common/dc_server.o \ + services/server/common/scp.o +endif + +ifeq ($(SUPPORT_SECURE_EXPORT),1) +$(PVRSRV_MODNAME)-y += services/server/env/linux/ossecure_export.o +endif + +ifeq ($(PDUMP),1) +$(PVRSRV_MODNAME)-y += \ + services/server/common/pdump_server.o \ + services/server/common/pdump_mmu.o \ + services/server/common/pdump_physmem.o \ + services/shared/common/devicemem_pdump.o \ + services/shared/common/devicememx_pdump.o + +ifeq ($(SUPPORT_RGX),1) +$(PVRSRV_MODNAME)-y += \ + services/server/devices/$(PVR_ARCH)/rgxpdump.o +endif + +endif + + + +ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1) +$(PVRSRV_MODNAME)-y += services/server/common/ri_server.o +endif + +ifeq ($(PVR_TESTING_UTILS),1) +$(PVRSRV_MODNAME)-y += services/server/common/tutils.o +endif + +$(PVRSRV_MODNAME)-y += services/server/common/devicemem_history_server.o + +ifeq ($(PVR_HANDLE_BACKEND),generic) +$(PVRSRV_MODNAME)-y += services/server/common/handle_generic.o +else +ifeq ($(PVR_HANDLE_BACKEND),idr) +$(PVRSRV_MODNAME)-y += services/server/env/linux/handle_idr.o +endif +endif + +ifeq ($(PVRSRV_ENABLE_LINUX_MMAP_STATS),1) +$(PVRSRV_MODNAME)-y += services/server/env/linux/mmap_stats.o +endif + +ifeq ($(SUPPORT_BUFFER_SYNC),1) +$(PVRSRV_MODNAME)-y += \ + kernel/drivers/staging/imgtec/pvr_buffer_sync.o \ + kernel/drivers/staging/imgtec/pvr_fence.o +endif + +ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1) +ifeq ($(SUPPORT_DMA_FENCE),1) +$(PVRSRV_MODNAME)-y += \ + kernel/drivers/staging/imgtec/pvr_sync_file.o \ + kernel/drivers/staging/imgtec/pvr_counting_timeline.o \ + kernel/drivers/staging/imgtec/pvr_sw_fence.o \ + kernel/drivers/staging/imgtec/pvr_fence.o +else +$(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/pvr_sync2.o +endif +else +ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1) +$(PVRSRV_MODNAME)-y += \ + services/server/common/sync_fallback_server.o \ + services/server/env/linux/ossecure_export.o +endif +endif + +ifeq ($(SUPPORT_LINUX_DVFS),1) +$(PVRSRV_MODNAME)-y += \ + services/server/env/linux/pvr_dvfs_device.o +endif + +ifeq ($(PVRSRV_ENABLE_PVR_ION_STATS),1) +$(PVRSRV_MODNAME)-y += \ + services/server/env/linux/pvr_ion_stats.o +endif + +$(PVRSRV_MODNAME)-$(CONFIG_X86) += services/server/env/linux/osfunc_x86.o +$(PVRSRV_MODNAME)-$(CONFIG_ARM) += services/server/env/linux/osfunc_arm.o +$(PVRSRV_MODNAME)-$(CONFIG_ARM64) += services/server/env/linux/osfunc_arm64.o +$(PVRSRV_MODNAME)-$(CONFIG_METAG) += services/server/env/linux/osfunc_metag.o +$(PVRSRV_MODNAME)-$(CONFIG_MIPS) += services/server/env/linux/osfunc_mips.o +$(PVRSRV_MODNAME)-$(CONFIG_E2K) += services/server/env/linux/osfunc_e2k.o +$(PVRSRV_MODNAME)-$(CONFIG_RISCV) += services/server/env/linux/osfunc_riscv.o + +ifeq ($(CONFIG_DEBUG_FS),y) +$(PVRSRV_MODNAME)-$(CONFIG_DEBUG_FS) += services/server/env/linux/pvr_debugfs.o +else ifeq ($(CONFIG_PROC_FS),y) +$(PVRSRV_MODNAME)-$(CONFIG_PROC_FS) += services/server/env/linux/pvr_procfs.o +endif +$(PVRSRV_MODNAME)-$(CONFIG_EVENT_TRACING) += services/server/env/linux/trace_events.o + +ccflags-y += -I$(OUT)/target_neutral/intermediates/firmware + +ifeq ($(SUPPORT_RGX),1) +# Srvinit headers and source files + +$(PVRSRV_MODNAME)-y += \ + services/server/devices/$(PVR_ARCH)/rgxsrvinit.o \ + services/server/devices/$(PVR_ARCH)/rgxfwimageutils.o +ifeq ($(PVR_ARCH),rogue) +$(PVRSRV_MODNAME)-y += \ + services/shared/devices/$(PVR_ARCH)/rgx_hwperf_table.o +endif +endif + +ccflags-y += \ + -I$(TOP)/hwdefs/$(PVR_ARCH)/km \ + -I$(TOP)/include/$(PVR_ARCH) -I$(TOP)/include \ + -I$(TOP)/include/$(PVR_ARCH)/public -I$(TOP)/include/public \ + -I$(TOP)/services/include/$(PVR_ARCH) -I$(TOP)/services/include \ + -I$(TOP)/services/shared/include \ + -I$(TOP)/services/server/devices/$(PVR_ARCH) -I$(TOP)/services/server/devices \ + -I$(TOP)/services/server/include/$(PVR_ARCH) -I$(TOP)/services/server/include \ + -I$(TOP)/services/shared/common \ + -I$(TOP)/services/shared/devices \ + -I$(TOP)/services/system/$(PVR_ARCH)/include \ + -I$(TOP)/services/server/common/$(PVR_ARCH) -I$(TOP)/services/server/common + +ifeq ($(KERNEL_DRIVER_DIR),) + ccflags-y += -I$(TOP)/services/system/$(PVR_ARCH)/$(PVR_SYSTEM) -I$(TOP)/services/system/$(PVR_SYSTEM) +endif + +# Bridge headers and source files + +# Keep in sync with: +# build/linux/common/bridges.mk AND +# services/bridge/Linux.mk + +ccflags-y += \ + -I$(bridge_base)/mm_bridge \ + -I$(bridge_base)/cmm_bridge \ + -I$(bridge_base)/srvcore_bridge \ + -I$(bridge_base)/sync_bridge \ + -I$(bridge_base)/synctracking_bridge \ + -I$(bridge_base)/htbuffer_bridge \ + -I$(bridge_base)/pvrtl_bridge \ + -I$(bridge_base)/cache_bridge \ + -I$(bridge_base)/dmabuf_bridge + +ifeq ($(SUPPORT_RGX),1) +ccflags-y += \ + -I$(bridge_base)/rgxtq2_bridge \ + -I$(bridge_base)/rgxta3d_bridge \ + -I$(bridge_base)/rgxhwperf_bridge \ + -I$(bridge_base)/rgxkicksync_bridge \ + -I$(bridge_base)/rgxcmp_bridge \ + -I$(bridge_base)/rgxregconfig_bridge \ + -I$(bridge_base)/rgxfwdbg_bridge \ + -I$(bridge_base)/rgxsignals_bridge +ifeq ($(PVR_ARCH),rogue) +ccflags-y += \ + -I$(bridge_base)/rgxtq_bridge +endif +ifeq ($(SUPPORT_USC_BREAKPOINT),1) +ccflags-y += \ + -I$(bridge_base)/rgxbreakpoint_bridge +endif +endif + +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/mm_bridge/server_mm_bridge.o \ + generated/$(PVR_ARCH)/cmm_bridge/server_cmm_bridge.o \ + generated/$(PVR_ARCH)/srvcore_bridge/server_srvcore_bridge.o \ + generated/$(PVR_ARCH)/sync_bridge/server_sync_bridge.o \ + generated/$(PVR_ARCH)/htbuffer_bridge/server_htbuffer_bridge.o \ + generated/$(PVR_ARCH)/pvrtl_bridge/server_pvrtl_bridge.o \ + generated/$(PVR_ARCH)/cache_bridge/server_cache_bridge.o \ + generated/$(PVR_ARCH)/dmabuf_bridge/server_dmabuf_bridge.o + +ifeq ($(SUPPORT_RGX),1) +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/rgxtq2_bridge/server_rgxtq2_bridge.o \ + generated/$(PVR_ARCH)/rgxta3d_bridge/server_rgxta3d_bridge.o \ + generated/$(PVR_ARCH)/rgxhwperf_bridge/server_rgxhwperf_bridge.o \ + generated/$(PVR_ARCH)/rgxkicksync_bridge/server_rgxkicksync_bridge.o \ + generated/$(PVR_ARCH)/rgxcmp_bridge/server_rgxcmp_bridge.o \ + generated/$(PVR_ARCH)/rgxregconfig_bridge/server_rgxregconfig_bridge.o \ + generated/$(PVR_ARCH)/rgxfwdbg_bridge/server_rgxfwdbg_bridge.o \ + generated/$(PVR_ARCH)/rgxsignals_bridge/server_rgxsignals_bridge.o +ifeq ($(PVR_ARCH),rogue) +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/rgxtq_bridge/server_rgxtq_bridge.o +endif +ifeq ($(SUPPORT_USC_BREAKPOINT),1) +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.o +endif +endif + +ifeq ($(SUPPORT_WRAP_EXTMEM),1) +ccflags-y += -I$(bridge_base)/mmextmem_bridge +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/mmextmem_bridge/server_mmextmem_bridge.o +endif + +ifeq ($(SUPPORT_DISPLAY_CLASS),1) +ccflags-y += -I$(bridge_base)/dc_bridge +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/dc_bridge/server_dc_bridge.o +endif + +ifeq ($(SUPPORT_SECURE_EXPORT),1) +ccflags-y += -I$(bridge_base)/smm_bridge +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/smm_bridge/server_smm_bridge.o +endif + +ifeq ($(PDUMP),1) +ccflags-y += \ + -I$(bridge_base)/pdump_bridge \ + -I$(bridge_base)/pdumpctrl_bridge \ + -I$(bridge_base)/pdumpmm_bridge + +ifeq ($(SUPPORT_RGX),1) +ccflags-y += \ + -I$(bridge_base)/rgxpdump_bridge + +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/rgxpdump_bridge/server_rgxpdump_bridge.o +endif + +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/pdump_bridge/server_pdump_bridge.o \ + generated/$(PVR_ARCH)/pdumpctrl_bridge/server_pdumpctrl_bridge.o \ + generated/$(PVR_ARCH)/pdumpmm_bridge/server_pdumpmm_bridge.o +endif + +ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1) +ccflags-y += -I$(bridge_base)/ri_bridge +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/ri_bridge/server_ri_bridge.o +endif + +ifeq ($(SUPPORT_VALIDATION),1) +ccflags-y += -I$(bridge_base)/validation_bridge +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/validation_bridge/server_validation_bridge.o +$(PVRSRV_MODNAME)-y += services/server/common/validation.o +ifeq ($(PVR_ARCH),volcanic) +$(PVRSRV_MODNAME)-y += services/server/common/validation_soc.o +endif +endif + +ifeq ($(PVR_TESTING_UTILS),1) +ccflags-y += -I$(bridge_base)/tutils_bridge +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/tutils_bridge/server_tutils_bridge.o +endif + +ccflags-y += -I$(bridge_base)/devicememhistory_bridge +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/devicememhistory_bridge/server_devicememhistory_bridge.o + +ccflags-y += -I$(bridge_base)/synctracking_bridge +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/synctracking_bridge/server_synctracking_bridge.o + +ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1) +ccflags-y += \ + -I$(bridge_base)/syncfallback_bridge +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/syncfallback_bridge/server_syncfallback_bridge.o +endif + + + + +# Direct bridges + +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/mm_bridge/client_mm_direct_bridge.o \ + generated/$(PVR_ARCH)/sync_bridge/client_sync_direct_bridge.o \ + generated/$(PVR_ARCH)/htbuffer_bridge/client_htbuffer_direct_bridge.o \ + generated/$(PVR_ARCH)/cache_bridge/client_cache_direct_bridge.o \ + generated/$(PVR_ARCH)/pvrtl_bridge/client_pvrtl_direct_bridge.o + +ifeq ($(PDUMP),1) +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/pdumpmm_bridge/client_pdumpmm_direct_bridge.o +endif + +ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1) +$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/ri_bridge/client_ri_direct_bridge.o +endif + +ifeq ($(PDUMP),1) + $(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/pdump_bridge/client_pdump_direct_bridge.o \ + generated/$(PVR_ARCH)/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.o + +ifeq ($(SUPPORT_RGX),1) + $(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/rgxpdump_bridge/client_rgxpdump_direct_bridge.o +endif + +endif + +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/devicememhistory_bridge/client_devicememhistory_direct_bridge.o + +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/synctracking_bridge/client_synctracking_direct_bridge.o + +# Enable -Werror for all built object files (suppress for Fiasco.OC/L4Linux) +ifeq ($(CONFIG_L4),) +ifneq ($(W),1) +$(foreach _o,$(addprefix CFLAGS_,$(notdir $($(PVRSRV_MODNAME)-y))),$(eval $(_o) := -Werror)) +endif +endif + +# With certain build configurations, e.g., ARM, Werror, we get a build +# failure in the ftrace Linux kernel header. So disable the relevant check. +CFLAGS_trace_events.o := -Wno-missing-prototypes + +# Make sure the mem_utils are built in 'free standing' mode, so the compiler +# is not encouraged to call out to C library functions +CFLAGS_mem_utils.o := -ffreestanding + +# Chrome OS kernel adds some issues +ccflags-y += -Wno-ignored-qualifiers + +# Treat #warning as a warning +ccflags-y += -Wno-error=cpp + +include $(TOP)/services/system/$(PVR_ARCH)/$(PVR_SYSTEM)/Kbuild.mk diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/Linux.mk b/drivers/mcst/gpu-imgtec/services/server/env/linux/Linux.mk new file mode 100644 index 000000000000..caf3a077a3a7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/Linux.mk @@ -0,0 +1,46 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +modules := srvkm + +srvkm_type := kernel_module +srvkm_target := $(PVRSRV_MODNAME).ko +srvkm_makefile := $(THIS_DIR)/Kbuild.mk diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/allocmem.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/allocmem.c new file mode 100644 index 000000000000..be6d1057c6aa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/allocmem.c @@ -0,0 +1,373 @@ +/*************************************************************************/ /*! +@File +@Title Host memory management implementation for Linux +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include + +#include "img_defs.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "process_stats.h" +#include "osfunc.h" + +/* + * DEBUG_MEMSTATS_ALLOC_RECORD_VALUES needs to be different from DEBUG_MEMSTATS_VALUES defined in process_stats.h + * The reason for this is that the file and line where the allocation happens are tracked from the OSAllocMem params. + * If DEBUG_MEMSTATS_VALUES were to be used, all OSAllocMem allocation statistics would point to allocmem.c, which is not + * expected behaviour. + */ +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) +#define DEBUG_MEMSTATS_ALLOC_RECORD_VALUES ,pvAllocFromFile, ui32AllocFromLine +#else +#define DEBUG_MEMSTATS_ALLOC_RECORD_VALUES +#endif + +/* + * When memory statistics are disabled, memory records are used instead. + * In order for these to work, the PID of the process that requested the + * allocation needs to be stored at the end of the kmalloc'd memory, making + * sure 4 extra bytes are allocated to fit the PID. + * + * There is no need for this extra allocation when memory statistics are + * enabled, since all allocations are tracked in DebugFS mem_area files. + */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) +#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32) +#else +#define ALLOCMEM_MEMSTATS_PADDING 0UL +#endif + +/* How many times kmalloc can fail before the allocation threshold is reduced */ +static const IMG_UINT32 g_ui32kmallocFailLimit = 10; +/* How many kmalloc failures happened since the last allocation threshold change */ +static IMG_UINT32 g_ui32kmallocFailCount = 0; +/* Current kmalloc threshold value in bytes */ +static IMG_UINT32 g_ui32kmallocThreshold = PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD; +/* Spinlock used so that the global variables above may not be modified by more than 1 thread at a time */ +static DEFINE_SPINLOCK(kmalloc_lock); + +static inline void OSTryDecreaseKmallocThreshold(void) +{ + unsigned long flags; + spin_lock_irqsave(&kmalloc_lock, flags); + + g_ui32kmallocFailCount++; + + if (g_ui32kmallocFailCount >= g_ui32kmallocFailLimit) + { + g_ui32kmallocFailCount = 0; + if (g_ui32kmallocThreshold > PAGE_SIZE) + { + g_ui32kmallocThreshold >>= 1; + printk(KERN_INFO "Threshold is now set to %d\n", g_ui32kmallocThreshold); + } + } + + spin_unlock_irqrestore(&kmalloc_lock, flags); +} + +static inline void OSResetKmallocFailCount(void) +{ + unsigned long flags; + spin_lock_irqsave(&kmalloc_lock, flags); + + g_ui32kmallocFailCount = 0; + + spin_unlock_irqrestore(&kmalloc_lock, flags); +} + +static inline void _pvr_vfree(const void* pvAddr) +{ +#if defined(DEBUG) + /* Size harder to come by for vmalloc and since vmalloc allocates + * a whole number of pages, poison the minimum size known to have + * been allocated. + */ + OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, + PAGE_SIZE); +#endif + vfree(pvAddr); +} + +static inline void _pvr_kfree(const void* pvAddr) +{ +#if defined(DEBUG) + /* Poison whole memory block */ + OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, + ksize(pvAddr)); +#endif + kfree(pvAddr); +} + +static inline void _pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) +{ +#if !defined(PVRSRV_ENABLE_PROCESS_STATS) + PVR_UNREFERENCED_PARAMETER(pvAddr); +#else + if (!is_vmalloc_addr(pvAddr)) + { +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, + pvAddr, + sCpuPAddr, + ksize(pvAddr), + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); +#else + { + /* Store the PID in the final additional 4 bytes allocated */ + IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING); + *puiTemp = OSGetCurrentClientProcessIDKM(); + } + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvAddr), OSGetCurrentClientProcessIDKM()); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + } + else + { +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + pvAddr, + sCpuPAddr, + ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); +#else + PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), + (IMG_UINT64)(uintptr_t) pvAddr, + OSGetCurrentClientProcessIDKM()); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + } +#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ +} + +static inline void _pvr_alloc_stats_remove(void *pvAddr) +{ +#if !defined(PVRSRV_ENABLE_PROCESS_STATS) + PVR_UNREFERENCED_PARAMETER(pvAddr); +#else + if (!is_vmalloc_addr(pvAddr)) + { +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + { + IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING); + PVRSRVStatsDecrMemKAllocStat(ksize(pvAddr), *puiTemp); + } +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, + (IMG_UINT64)(uintptr_t) pvAddr, + OSGetCurrentClientProcessIDKM()); +#endif + } + else + { +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + (IMG_UINT64)(uintptr_t) pvAddr); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + (IMG_UINT64)(uintptr_t) pvAddr, + OSGetCurrentClientProcessIDKM()); +#endif + } +#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ +} + +void *(OSAllocMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) +{ + void *pvRet = NULL; + + if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold) + { + pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vmalloc(ui32Size); + } + + if (pvRet != NULL) + { + _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); + } + + return pvRet; +} + +void *(OSAllocZMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) +{ + void *pvRet = NULL; + + if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold) + { + pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vzalloc(ui32Size); + } + + if (pvRet != NULL) + { + _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); + } + + return pvRet; +} + +/* + * The parentheses around OSFreeMem prevent the macro in allocmem.h from + * applying, as it would break the function's definition. + */ +void (OSFreeMem)(void *pvMem) +{ + if (pvMem != NULL) + { + _pvr_alloc_stats_remove(pvMem); + + if (!is_vmalloc_addr(pvMem)) + { + _pvr_kfree(pvMem); + } + else + { + _pvr_vfree(pvMem); + } + } +} + +void *OSAllocMemNoStats(IMG_UINT32 ui32Size) +{ + void *pvRet = NULL; + + if (ui32Size <= g_ui32kmallocThreshold) + { + pvRet = kmalloc(ui32Size, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vmalloc(ui32Size); + } + + return pvRet; +} + +void *OSAllocZMemNoStats(IMG_UINT32 ui32Size) +{ + void *pvRet = NULL; + + if (ui32Size <= g_ui32kmallocThreshold) + { + pvRet = kzalloc(ui32Size, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vzalloc(ui32Size); + } + + return pvRet; +} + +/* + * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from + * applying, as it would break the function's definition. + */ +void (OSFreeMemNoStats)(void *pvMem) +{ + if (pvMem != NULL) + { + if ( !is_vmalloc_addr(pvMem) ) + { + _pvr_kfree(pvMem); + } + else + { + _pvr_vfree(pvMem); + } + } +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/env_connection.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/env_connection.h new file mode 100644 index 000000000000..307ee29f74e6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/env_connection.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux specific server side connection management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(_ENV_CONNECTION_H_) +#define _ENV_CONNECTION_H_ + +#include +#include +#include + +#include "handle.h" +#include "pvr_debug.h" +#include "device.h" + +#if defined(SUPPORT_ION) +#include PVR_ANDROID_ION_HEADER +#include "ion_sys.h" +#include "allocmem.h" +#endif + +typedef struct _ENV_CONNECTION_PRIVATE_DATA_ +{ + struct file *psFile; + PVRSRV_DEVICE_NODE *psDevNode; +} ENV_CONNECTION_PRIVATE_DATA; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#define ION_CLIENT_NAME_SIZE 50 + +typedef struct _ENV_ION_CONNECTION_DATA_ +{ + IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE]; + struct ion_device *psIonDev; + struct ion_client *psIonClient; +} ENV_ION_CONNECTION_DATA; +#endif + +typedef struct _ENV_CONNECTION_DATA_ +{ + pid_t owner; + + struct file *psFile; + PVRSRV_DEVICE_NODE *psDevNode; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + ENV_ION_CONNECTION_DATA *psIonData; +#endif +} ENV_CONNECTION_DATA; + +#endif /* !defined(_ENV_CONNECTION_H_) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/event.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/event.c new file mode 100644 index 000000000000..aec0fc8a02dd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/event.c @@ -0,0 +1,514 @@ +/*************************************************************************/ /*! +@File +@Title Event Object +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) +#include +#endif +#include +#include +#include +#include +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "allocmem.h" +#include "event.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "pvr_bridge_k.h" + +#include "osfunc.h" + +/* Uncomment to enable event object stats that are useful for debugging. + * The stats can be gotten at any time (during lifetime of event object) + * using OSEventObjectDumpdebugInfo API */ +// #define LINUX_EVENT_OBJECT_STATS + + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG +{ + rwlock_t sLock; + /* Counts how many times event object was signalled i.e. how many times + * LinuxEventObjectSignal() was called on a given event object. + * Used for detecting pending signals. + * Note that this is in no way related to OS signals. */ + atomic_t sEventSignalCount; + struct list_head sList; +} PVRSRV_LINUX_EVENT_OBJECT_LIST; + + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG +{ + IMG_UINT32 ui32EventSignalCountPrevious; +#if defined(DEBUG) + IMG_UINT ui32Stats; +#endif + +#ifdef LINUX_EVENT_OBJECT_STATS + POS_LOCK hLock; + IMG_UINT32 ui32ScheduleAvoided; + IMG_UINT32 ui32ScheduleCalled; + IMG_UINT32 ui32ScheduleSleptFully; + IMG_UINT32 ui32ScheduleSleptPartially; + IMG_UINT32 ui32ScheduleReturnedImmediately; +#endif + wait_queue_head_t sWait; + struct list_head sList; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList; +} PVRSRV_LINUX_EVENT_OBJECT; + +/*! +****************************************************************************** + + @Function LinuxEventObjectListCreate + + @Description + + Linux wait object list creation + + @Output hOSEventKM : Pointer to the event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList; + + psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList)); + if (psEvenObjectList == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psEvenObjectList->sList); + + rwlock_init(&psEvenObjectList->sLock); + atomic_set(&psEvenObjectList->sEventSignalCount, 0); + + *phEventObjectList = (IMG_HANDLE *) psEvenObjectList; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectListDestroy + + @Description + + Linux wait object list destruction + + @Input hOSEventKM : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList; + + if (psEvenObjectList) + { + if (!list_empty(&psEvenObjectList->sList)) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty")); + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + OSFreeMem(psEvenObjectList); + /*not nulling pointer, copy on stack*/ + } + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function LinuxEventObjectDelete + + @Description + + Linux wait object removal + + @Input hOSEventObject : Event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject) +{ + if (hOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; + + write_lock_bh(&psLinuxEventObjectList->sLock); + list_del(&psLinuxEventObject->sList); + write_unlock_bh(&psLinuxEventObjectList->sLock); + +#ifdef LINUX_EVENT_OBJECT_STATS + OSLockDestroy(psLinuxEventObject->hLock); +#endif + +#if defined(DEBUG) +// PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats)); +#endif + + OSFreeMem(psLinuxEventObject); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; + } + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectAdd + + @Description + + Linux wait object addition + + @Input hOSEventObjectList : Event object list handle + @Output phOSEventObject : Pointer to the event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + + /* allocate completion variable */ + psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject)); + if (psLinuxEventObject == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psLinuxEventObject->sList); + + /* Start with the timestamp at which event object was added to the list */ + psLinuxEventObject->ui32EventSignalCountPrevious = atomic_read(&psLinuxEventObjectList->sEventSignalCount); + +#ifdef LINUX_EVENT_OBJECT_STATS + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&psLinuxEventObject->hLock), "OSLockCreate"); + psLinuxEventObject->ui32ScheduleAvoided = 0; + psLinuxEventObject->ui32ScheduleCalled = 0; + psLinuxEventObject->ui32ScheduleSleptFully = 0; + psLinuxEventObject->ui32ScheduleSleptPartially = 0; + psLinuxEventObject->ui32ScheduleReturnedImmediately = 0; +#endif + +#if defined(DEBUG) + psLinuxEventObject->ui32Stats = 0; +#endif + init_waitqueue_head(&psLinuxEventObject->sWait); + + psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; + + write_lock_bh(&psLinuxEventObjectList->sLock); + list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); + write_unlock_bh(&psLinuxEventObjectList->sLock); + + *phOSEventObject = psLinuxEventObject; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectSignal + + @Description + + Linux wait object signaling function + + @Input hOSEventObjectList : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + struct list_head *psListEntry, *psListEntryTemp, *psList; + psList = &psLinuxEventObjectList->sList; + + /* Move the timestamp ahead for this call, so a potential "Wait" from any + * EventObject/s doesn't wait for the signal to occur before returning. Early + * setting/incrementing of timestamp reduces the window where a concurrent + * "Wait" call might block while "this" Signal call is being processed */ + atomic_inc(&psLinuxEventObjectList->sEventSignalCount); + + read_lock_bh(&psLinuxEventObjectList->sLock); + list_for_each_safe(psListEntry, psListEntryTemp, psList) + { + psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); + wake_up_interruptible(&psLinuxEventObject->sWait); + } + read_unlock_bh(&psLinuxEventObjectList->sLock); + + return PVRSRV_OK; +} + +static void _TryToFreeze(void) +{ + /* if we reach zero it means that all of the threads called try_to_freeze */ + LinuxBridgeNumActiveKernelThreadsDecrement(); + + /* Returns true if the thread was frozen, should we do anything with this + * information? What do we return? Which one is the error case? */ + try_to_freeze(); + + LinuxBridgeNumActiveKernelThreadsIncrement(); +} + +void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject) +{ +#ifdef LINUX_EVENT_OBJECT_STATS + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; + + OSLockAcquire(psLinuxEventObject->hLock); + PVR_LOG(("%s: EvObj(%p) schedule: Avoided(%u) Called(%u) ReturnedImmediately(%u) SleptFully(%u) SleptPartially(%u)", + __func__, psLinuxEventObject, psLinuxEventObject->ui32ScheduleAvoided, + psLinuxEventObject->ui32ScheduleCalled, psLinuxEventObject->ui32ScheduleReturnedImmediately, + psLinuxEventObject->ui32ScheduleSleptFully, psLinuxEventObject->ui32ScheduleSleptPartially)); + OSLockRelease(psLinuxEventObject->hLock); +#else + PVR_LOG(("%s: LINUX_EVENT_OBJECT_STATS disabled!", __func__)); +#endif +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectWait + + @Description + + Linux wait object routine + + @Input hOSEventObject : Event object handle + + @Input ui64Timeoutus : Time out value in usec + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, + IMG_UINT64 ui64Timeoutus, + IMG_BOOL bFreezable) +{ + IMG_UINT32 ui32EventSignalCount; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32Remainder; + long timeOutJiffies; +#ifdef LINUX_EVENT_OBJECT_STATS + long totalTimeoutJiffies; + IMG_BOOL bScheduleCalled = IMG_FALSE; +#endif + + DEFINE_WAIT(sWait); + + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; + + /* Check if the driver is good shape */ + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an + * uint use the msec version. With such a long timeout we really don't need + * the high resolution of usecs. */ + if (ui64Timeoutus > 0xffffffffULL) + timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder)); + else + timeOutJiffies = usecs_to_jiffies(ui64Timeoutus); + +#ifdef LINUX_EVENT_OBJECT_STATS + totalTimeoutJiffies = timeOutJiffies; +#endif + + do + { + prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); + ui32EventSignalCount = (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); + + if (psLinuxEventObject->ui32EventSignalCountPrevious != ui32EventSignalCount) + { + /* There is a pending event signal i.e. LinuxEventObjectSignal() + * was called on the event object since the last time we checked. + * Return without waiting. */ + break; + } + + if (signal_pending(current)) + { + /* There is an OS signal pending so return. + * This allows to kill/interrupt user space processes which + * are waiting on this event object. */ + break; + } + +#ifdef LINUX_EVENT_OBJECT_STATS + bScheduleCalled = IMG_TRUE; +#endif + timeOutJiffies = schedule_timeout(timeOutJiffies); + + if (bFreezable) + { + _TryToFreeze(); + } + +#if defined(DEBUG) + psLinuxEventObject->ui32Stats++; +#endif + + + } while (timeOutJiffies); + + finish_wait(&psLinuxEventObject->sWait, &sWait); + + psLinuxEventObject->ui32EventSignalCountPrevious = ui32EventSignalCount; + +#ifdef LINUX_EVENT_OBJECT_STATS + OSLockAcquire(psLinuxEventObject->hLock); + if (bScheduleCalled) + { + psLinuxEventObject->ui32ScheduleCalled++; + if (totalTimeoutJiffies == timeOutJiffies) + { + psLinuxEventObject->ui32ScheduleReturnedImmediately++; + } + else if (timeOutJiffies == 0) + { + psLinuxEventObject->ui32ScheduleSleptFully++; + } + else + { + psLinuxEventObject->ui32ScheduleSleptPartially++; + } + } + else + { + psLinuxEventObject->ui32ScheduleAvoided++; + } + OSLockRelease(psLinuxEventObject->hLock); +#endif + + if (signal_pending(current)) + { + return PVRSRV_ERROR_INTERRUPTED; + } + else + { + return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT; + } +} + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + +PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + DEFINE_WAIT(sWait); + + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = + (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = + psLinuxEventObject->psLinuxEventObjectList; + + /* Check if the driver is in good shape */ + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); + + if (psLinuxEventObject->ui32EventSignalCountPrevious != + (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount)) + { + /* There is a pending signal, so return without waiting */ + goto finish; + } + + schedule(); + + _TryToFreeze(); + +finish: + finish_wait(&psLinuxEventObject->sWait, &sWait); + + psLinuxEventObject->ui32EventSignalCountPrevious = + (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); + + return PVRSRV_OK; +} + +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/event.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/event.h new file mode 100644 index 000000000000..bb378cb9220f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/event.h @@ -0,0 +1,54 @@ +/*************************************************************************/ /*! +@File +@Title Event Object +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList); +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList); +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject); +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject); +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList); +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, + IMG_UINT64 ui64Timeoutus, + IMG_BOOL bFreezable); +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject); +#endif +void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject); diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/fwload.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/fwload.c new file mode 100644 index 000000000000..d69c151cfc40 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/fwload.c @@ -0,0 +1,303 @@ +/*************************************************************************/ /*! +@File +@Title Services firmware load and access routines for Linux +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include + +#include "device.h" +#include "module_common.h" +#include "fwload.h" +#include "pvr_debug.h" +#include "srvkm.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) && defined(RGX_FW_SIGNED) +/* The Linux kernel does not support the RSA PSS padding mode. It only + * supports the legacy PKCS#1 padding mode. + */ +#if defined(RGX_FW_PKCS1_PSS_PADDING) +#error Linux does not support verification of RSA PSS padded signatures +#endif + +#include +#include +#include + +#include +#include + +#include "signfw.h" +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3.13.0) && RGX_FW_SIGNED */ + +struct OS_FW_IMAGE_t +{ + const struct firmware sFW; +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) && defined(RGX_FW_SIGNED) + +bool OSVerifyFirmware(const OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = &psFWImage->sFW; + struct FirmwareSignatureHeader *psHeader; + struct public_key_signature *psPKS; + unsigned char *szKeyID, *pcKeyID; + size_t uDigestSize, uDescSize; + void *pvSignature, *pvSigner; + struct crypto_shash *psTFM; + struct shash_desc *psDesc; + uint32_t ui32SignatureLen; + bool bVerified = false; + key_ref_t hKey; + uint8_t i; + int res; + + if (psFW->size < FW_SIGN_BACKWARDS_OFFSET) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)", + __func__, psFW->size)); + goto err_release_firmware; + } + + psHeader = (struct FirmwareSignatureHeader *) + (psFW->data + (psFW->size - FW_SIGN_BACKWARDS_OFFSET)); + + /* All derived from u8 so can't be exploited to flow out of this page */ + pvSigner = (u8 *)psHeader + sizeof(struct FirmwareSignatureHeader); + pcKeyID = (unsigned char *)((u8 *)pvSigner + psHeader->ui8SignerLen); + pvSignature = (u8 *)pcKeyID + psHeader->ui8KeyIDLen; + + /* We cannot update KERNEL_RO in-place, so we must copy the len */ + ui32SignatureLen = ntohl(psHeader->ui32SignatureLen); + + if (psHeader->ui8Algo >= PKEY_ALGO__LAST) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Public key algorithm %u is not supported", + __func__, psHeader->ui8Algo)); + goto err_release_firmware; + } + + if (psHeader->ui8HashAlgo >= PKEY_HASH__LAST) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Hash algorithm %u is not supported", + __func__, psHeader->ui8HashAlgo)); + goto err_release_firmware; + } + + if (psHeader->ui8IDType != PKEY_ID_X509) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Only asymmetric X.509 PKI certificates " + "are supported", __func__)); + goto err_release_firmware; + } + + /* Generate a hash of the fw data (including the padding) */ + + psTFM = crypto_alloc_shash(hash_algo_name[psHeader->ui8HashAlgo], 0, 0); + if (IS_ERR(psTFM)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: crypto_alloc_shash() failed (%ld)", + __func__, PTR_ERR(psTFM))); + goto err_release_firmware; + } + + uDescSize = crypto_shash_descsize(psTFM) + sizeof(*psDesc); + uDigestSize = crypto_shash_digestsize(psTFM); + + psPKS = kzalloc(sizeof(*psPKS) + uDescSize + uDigestSize, GFP_KERNEL); + if (!psPKS) + goto err_free_crypto_shash; + + psDesc = (struct shash_desc *)((u8 *)psPKS + sizeof(*psPKS)); + psDesc->tfm = psTFM; + psDesc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + psPKS->pkey_algo = psHeader->ui8Algo; + psPKS->pkey_hash_algo = psHeader->ui8HashAlgo; + + psPKS->digest = (u8 *)psPKS + sizeof(*psPKS) + uDescSize; + psPKS->digest_size = uDigestSize; + + res = crypto_shash_init(psDesc); + if (res < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_init() failed (%d)", + __func__, res)); + goto err_free_pks; + } + + res = crypto_shash_finup(psDesc, psFW->data, psFW->size - FW_SIGN_BACKWARDS_OFFSET, + psPKS->digest); + if (res < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_finup() failed (%d)", + __func__, res)); + goto err_free_pks; + } + + /* Populate the MPI with the signature payload */ + + psPKS->nr_mpi = 1; + psPKS->rsa.s = mpi_read_raw_data(pvSignature, ui32SignatureLen); + if (!psPKS->rsa.s) + { + PVR_DPF((PVR_DBG_ERROR, "%s: mpi_read_raw_data() failed", __func__)); + goto err_free_pks; + } + + /* Look up the key we'll use to verify this signature */ + + szKeyID = kmalloc(psHeader->ui8SignerLen + 2 + + psHeader->ui8KeyIDLen * 2 + 1, GFP_KERNEL); + if (!szKeyID) + goto err_free_mpi; + + memcpy(szKeyID, pvSigner, psHeader->ui8SignerLen); + + szKeyID[psHeader->ui8SignerLen + 0] = ':'; + szKeyID[psHeader->ui8SignerLen + 1] = ' '; + + for (i = 0; i < psHeader->ui8KeyIDLen; i++) + sprintf(&szKeyID[psHeader->ui8SignerLen + 2 + i * 2], + "%02x", pcKeyID[i]); + + szKeyID[psHeader->ui8SignerLen + 2 + psHeader->ui8KeyIDLen * 2] = 0; + + hKey = keyring_search(make_key_ref(system_trusted_keyring, 1), + &key_type_asymmetric, szKeyID); + if (IS_ERR(hKey)) + { + PVR_DPF((PVR_DBG_ERROR, "Request for unknown key '%s' (%ld)", + szKeyID, PTR_ERR(hKey))); + goto err_free_keyid_string; + } + + res = verify_signature(key_ref_to_ptr(hKey), psPKS); + if (res) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware digital signature verification " + "failed (%d)", __func__, res)); + goto err_put_key; + } + + PVR_LOG(("Digital signature for '%s' verified successfully.", + RGX_FW_FILENAME)); + bVerified = true; +err_put_key: + key_put(key_ref_to_ptr(hKey)); +err_free_keyid_string: + kfree(szKeyID); +err_free_mpi: + mpi_free(psPKS->rsa.s); +err_free_pks: + kfree(psPKS); +err_free_crypto_shash: + crypto_free_shash(psTFM); +err_release_firmware: + return bVerified; +} + +#else /* defined(RGX_FW_SIGNED) */ + +inline bool OSVerifyFirmware(const OS_FW_IMAGE *psFWImage) +{ + return true; +} + +#endif /* defined(RGX_FW_SIGNED) */ + +OS_FW_IMAGE * +OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, bool (*pfnVerifyFirmware)(const OS_FW_IMAGE*)) +{ + const struct firmware *psFW = NULL; + OS_FW_IMAGE *psFWImage; + IMG_INT32 res; + + res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice); + if (res != 0) + { + PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') failed (%d)", + __func__, pszBVNCString, res)); + + release_firmware(psFW); + return NULL; + } + + psFWImage = (OS_FW_IMAGE *)psFW; + + if (pfnVerifyFirmware != NULL && !pfnVerifyFirmware(psFWImage)) + { + release_firmware(psFW); + return NULL; + } + + return (OS_FW_IMAGE *)psFW; +} + +void +OSUnloadFirmware(OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = &psFWImage->sFW; + + release_firmware(psFW); +} + +size_t +OSFirmwareSize(OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = &psFWImage->sFW; + return psFW->size; +} + +const void * +OSFirmwareData(OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = &psFWImage->sFW; + + return psFW->data; +} + +/****************************************************************************** + End of file (fwload.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/handle_idr.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/handle_idr.c new file mode 100644 index 000000000000..c40e096bfaa5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/handle_idr.c @@ -0,0 +1,440 @@ +/*************************************************************************/ /*! +@File +@Title Resource Handle Manager - IDR Back-end +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide IDR based resource handle management back-end +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include +#include +#include +#include +#include + +#include "handle_impl.h" +#include "allocmem.h" +#include "osfunc.h" +#include "pvr_debug.h" + +#define ID_VALUE_MIN 1 +#define ID_VALUE_MAX INT_MAX + +#define ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i)) +#define HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h)) + +struct _HANDLE_IMPL_BASE_ +{ + struct idr sIdr; + + IMG_UINT32 ui32MaxHandleValue; + + IMG_UINT32 ui32TotalHandCount; +}; + +typedef struct _HANDLE_ITER_DATA_WRAPPER_ +{ + PFN_HANDLE_ITER pfnHandleIter; + void *pvHandleIterData; +} HANDLE_ITER_DATA_WRAPPER; + + +static int HandleIterFuncWrapper(int id, void *data, void *iter_data) +{ + HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data; + + PVR_UNREFERENCED_PARAMETER(data); + + return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData); +} + +/*! +****************************************************************************** + + @Function AcquireHandle + + @Description Acquire a new handle + + @Input psBase - Pointer to handle base structure + phHandle - Points to a handle pointer + pvData - Pointer to resource to be associated with the handle + + @Output phHandle - Points to a handle pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData) +{ + int id; + int result; + + PVR_ASSERT(psBase != NULL); + PVR_ASSERT(phHandle != NULL); + PVR_ASSERT(pvData != NULL); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) + idr_preload(GFP_KERNEL); + id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0); + idr_preload_end(); + + result = id; +#else + do + { + if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id); + } while (result == -EAGAIN); + + if ((IMG_UINT32)id > psBase->ui32MaxHandleValue) + { + idr_remove(&psBase->sIdr, id); + result = -ENOSPC; + } +#endif + + if (result < 0) + { + if (result == -ENOSPC) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached", + __func__, psBase->ui32MaxHandleValue)); + + return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + } + + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psBase->ui32TotalHandCount++; + + *phHandle = ID_TO_HANDLE(id); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function ReleaseHandle + + @Description Release a handle that is no longer needed. + + @Input psBase - Pointer to handle base structure + hHandle - Handle to release + ppvData - Points to a void data pointer + + @Output ppvData - Points to a void data pointer + + @Return PVRSRV_OK or PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE hHandle, + void **ppvData) +{ + int id = HANDLE_TO_ID(hHandle); + void *pvData; + + PVR_ASSERT(psBase); + + /* Get the data associated with the handle. If we get back NULL then + it's an invalid handle */ + + pvData = idr_find(&psBase->sIdr, id); + if (likely(pvData)) + { + idr_remove(&psBase->sIdr, id); + psBase->ui32TotalHandCount--; + } + + if (unlikely(pvData == NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)", + __func__, id, psBase->ui32TotalHandCount)); + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + if (ppvData) + { + *ppvData = pvData; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function GetHandleData + + @Description Get the data associated with the given handle + + @Input psBase - Pointer to handle base structure + hHandle - Handle from which data should be retrieved + ppvData - Points to a void data pointer + + @Output ppvData - Points to a void data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE hHandle, + void **ppvData) +{ + int id = HANDLE_TO_ID(hHandle); + void *pvData; + + PVR_ASSERT(psBase); + PVR_ASSERT(ppvData); + + pvData = idr_find(&psBase->sIdr, id); + if (likely(pvData)) + { + *ppvData = pvData; + + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } +} + +/*! +****************************************************************************** + + @Function SetHandleData + + @Description Set the data associated with the given handle + + @Input psBase - Pointer to handle base structure + hHandle - Handle for which data should be changed + pvData - Pointer to new data to be associated with the handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE hHandle, + void *pvData) +{ + int id = HANDLE_TO_ID(hHandle); + void *pvOldData; + + PVR_ASSERT(psBase); + + pvOldData = idr_replace(&psBase->sIdr, pvData, id); + if (IS_ERR(pvOldData)) + { + if (PTR_ERR(pvOldData) == -ENOENT) + { + return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; + } + else + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData) +{ + HANDLE_ITER_DATA_WRAPPER sIterData; + + PVR_ASSERT(psBase); + PVR_ASSERT(pfnHandleIter); + + sIterData.pfnHandleIter = pfnHandleIter; + sIterData.pvHandleIterData = pvHandleIterData; + + return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData); +} + +/*! +****************************************************************************** + + @Function EnableHandlePurging + + @Description Enable purging for a given handle base + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_ASSERT(psBase); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PurgeHandles + + @Description Purge handles for a given handle base + + @Input psBase - Pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_ASSERT(psBase); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function CreateHandleBase + + @Description Create a handle base structure + + @Input ppsBase - pointer to handle base structure pointer + + @Output ppsBase - points to handle base structure pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase) +{ + HANDLE_IMPL_BASE *psBase; + + PVR_ASSERT(ppsBase); + + psBase = OSAllocZMem(sizeof(*psBase)); + if (psBase == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", + __func__)); + + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + idr_init(&psBase->sIdr); + + psBase->ui32MaxHandleValue = ID_VALUE_MAX; + psBase->ui32TotalHandCount = 0; + + *ppsBase = psBase; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function DestroyHandleBase + + @Description Destroy a handle base structure + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase) +{ + PVR_ASSERT(psBase); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) + idr_remove_all(&psBase->sIdr); +#endif + + /* Finally destroy the idr */ + idr_destroy(&psBase->sIdr); + + OSFreeMem(psBase); + + return PVRSRV_OK; +} + + +static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = +{ + .pfnAcquireHandle = AcquireHandle, + .pfnReleaseHandle = ReleaseHandle, + .pfnGetHandleData = GetHandleData, + .pfnSetHandleData = SetHandleData, + .pfnIterateOverHandles = IterateOverHandles, + .pfnEnableHandlePurging = EnableHandlePurging, + .pfnPurgeHandles = PurgeHandles, + .pfnCreateHandleBase = CreateHandleBase, + .pfnDestroyHandleBase = DestroyHandleBase +}; + +PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs) +{ + static IMG_BOOL bAcquired = IMG_FALSE; + + if (bAcquired) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired", + __func__)); + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + if (ppsFuncs == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *ppsFuncs = &g_sHandleFuncTab; + + bAcquired = IMG_TRUE; + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.c new file mode 100644 index 000000000000..9a66b4c075ca --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.c @@ -0,0 +1,1568 @@ +/*************************************************************************/ /*! +@File km_apphint.c +@Title Apphint routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "di_server.h" +#include "pvr_uaccess.h" +#include +#include +#include +#include + +/* Common and SO layer */ +#include "img_defs.h" +#include "sofunc_pvr.h" + +/* for action device access */ +#include "pvrsrv.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgxfwutils.h" +#include "rgxhwperf.h" +#include "htbserver.h" +#include "rgxutils.h" +#include "rgxapi_km.h" + + +/* defines for default values */ +#include "rgx_fwif_km.h" +#include "htbuffer_types.h" + +#include "pvr_notifier.h" + +#include "km_apphint_defs.h" +#include "km_apphint.h" + +#if defined(PDUMP) +#include +#include "pdump_km.h" +#endif + +/* Size of temporary buffers used to read and write AppHint data. + * Must be large enough to contain any strings read or written but no larger + * than 4096: which is the buffer size for the kernel_param_ops .get + * function. And less than 1024 to keep the stack frame size within bounds. + */ +#define APPHINT_BUFFER_SIZE 512 + +#define APPHINT_DEVICES_MAX 16 + +/* +******************************************************************************* + * AppHint mnemonic data type helper tables +******************************************************************************/ +struct apphint_lookup { + const char *name; + int value; +}; + +static const struct apphint_lookup fwt_logtype_tbl[] = { + { "trace", 0}, + { "none", 0} +#if defined(SUPPORT_TBI_INTERFACE) + , { "tbi", 1} +#endif +}; + +static const struct apphint_lookup fwt_loggroup_tbl[] = { + RGXFWIF_LOG_GROUP_NAME_VALUE_MAP +}; + +static const struct apphint_lookup htb_loggroup_tbl[] = { +#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, + HTB_LOG_SFGROUPLIST +#undef X +}; + +static const struct apphint_lookup htb_opmode_tbl[] = { + { "droplatest", HTB_OPMODE_DROPLATEST}, + { "dropoldest", HTB_OPMODE_DROPOLDEST}, + { "block", HTB_OPMODE_BLOCK} +}; + +__maybe_unused +static const struct apphint_lookup htb_logmode_tbl[] = { + { "all", HTB_LOGMODE_ALLPID}, + { "restricted", HTB_LOGMODE_RESTRICTEDPID} +}; + +__maybe_unused +static const struct apphint_lookup timecorr_clk_tbl[] = { + { "mono", 0 }, + { "mono_raw", 1 }, + { "sched", 2 } +}; + +/* +******************************************************************************* + Data types +******************************************************************************/ +union apphint_value { + IMG_UINT64 UINT64; + IMG_UINT32 UINT32; + IMG_BOOL BOOL; + IMG_CHAR *STRING; +}; + +struct apphint_action { + union { + PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value); + PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value); + PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value); + PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value); + } query; + union { + PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value); + PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value); + PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value); + PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value); + } set; + const PVRSRV_DEVICE_NODE *device; + const void *private_data; + union apphint_value stored; + bool free; +}; + +struct apphint_param { + IMG_UINT32 id; + APPHINT_DATA_TYPE data_type; + const void *data_type_helper; + IMG_UINT32 helper_size; +}; + +struct apphint_init_data { + IMG_UINT32 id; /* index into AppHint Table */ + APPHINT_CLASS class; + const IMG_CHAR *name; + union apphint_value default_value; +}; + +struct apphint_class_state { + APPHINT_CLASS class; + IMG_BOOL enabled; +}; + +struct apphint_work { + struct work_struct work; + union apphint_value new_value; + struct apphint_action *action; +}; + +/* +******************************************************************************* + Initialization / configuration table data +******************************************************************************/ +#define UINT32Bitfield UINT32 +#define UINT32List UINT32 + +static const struct apphint_init_data init_data_buildvar[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_BUILDVAR_COMMON + APPHINT_LIST_BUILDVAR +#undef X +}; + +static const struct apphint_init_data init_data_modparam[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM +#undef X +}; + +static const struct apphint_init_data init_data_debugfs[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_DEBUGFS_COMMON + APPHINT_LIST_DEBUGFS +#undef X +}; + +static const struct apphint_init_data init_data_debugfs_device[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_DEBUGFS_DEVICE_COMMON + APPHINT_LIST_DEBUGFS_DEVICE +#undef X +}; + +#undef UINT32Bitfield +#undef UINT32List + +__maybe_unused static const char NO_PARAM_TABLE[] = {}; + +static const struct apphint_param param_lookup[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) }, + APPHINT_LIST_ALL +#undef X +}; + +static const struct apphint_class_state class_state[] = { +#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a}, + APPHINT_CLASS_LIST +#undef X +}; + +/* +******************************************************************************* + Global state +******************************************************************************/ +/* If the union apphint_value becomes such that it is not possible to read + * and write atomically, a mutex may be desirable to prevent a read returning + * a partially written state. + * This would require a statically initialized mutex outside of the + * struct apphint_state to prevent use of an uninitialized mutex when + * module_params are provided on the command line. + * static DEFINE_MUTEX(apphint_mutex); + */ +static struct apphint_state +{ + struct workqueue_struct *workqueue; + DI_GROUP *debugfs_device_rootdir[APPHINT_DEVICES_MAX]; + DI_ENTRY *debugfs_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGFS_DEVICE_ID_MAX]; + DI_GROUP *debugfs_rootdir; + DI_ENTRY *debugfs_entry[APPHINT_DEBUGFS_ID_MAX]; + DI_GROUP *buildvar_rootdir; + DI_ENTRY *buildvar_entry[APPHINT_BUILDVAR_ID_MAX]; + + unsigned num_devices; + PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX]; + unsigned initialized; + + /* Array contains value space for 1 copy of all apphint values defined + * (for device 1) and N copies of device specific apphint values for + * multi-device platforms. + */ + struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGFS_DEVICE_ID_MAX)]; + +} apphint = { +/* statically initialise default values to ensure that any module_params + * provided on the command line are not overwritten by defaults. + */ + .val = { +#define UINT32Bitfield UINT32 +#define UINT32List UINT32 +#define X(a, b, c, d, e) \ + { {NULL}, {NULL}, NULL, NULL, {.b=d}, false }, + APPHINT_LIST_ALL +#undef X +#undef UINT32Bitfield +#undef UINT32List + }, + .initialized = 0, + .num_devices = 0 +}; + +#define APPHINT_DEBUGFS_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGFS_DEVICE_ID_MAX) + +static inline void +get_apphint_id_from_action_addr(const struct apphint_action * const addr, + APPHINT_ID * const id) +{ + *id = (APPHINT_ID)(addr - apphint.val); + if (*id >= APPHINT_ID_MAX) { + *id -= APPHINT_DEBUGFS_DEVICE_ID_OFFSET; + *id %= APPHINT_DEBUGFS_DEVICE_ID_MAX; + *id += APPHINT_DEBUGFS_DEVICE_ID_OFFSET; + } +} + +static inline void +get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device, + int * const offset) +{ + int i; + + /* No device offset if not a device specific apphint */ + if (APPHINT_OF_DRIVER_NO_DEVICE == device) { + *offset = 0; + return; + } + + for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) { + if (apphint.devices[i] == device) + break; + } + if (APPHINT_DEVICES_MAX == i) { + PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__)); + i = 0; + } + *offset = i * APPHINT_DEBUGFS_DEVICE_ID_MAX; +} + +/** + * apphint_action_worker - perform an action after an AppHint update has been + * requested by a UM process + * And update the record of the current active value + */ +static void apphint_action_worker(struct work_struct *work) +{ + struct apphint_work *work_pkt = container_of(work, + struct apphint_work, + work); + struct apphint_action *a = work_pkt->action; + union apphint_value value = work_pkt->new_value; + APPHINT_ID id; + PVRSRV_ERROR result = PVRSRV_OK; + + get_apphint_id_from_action_addr(a, &id); + + if (a->set.UINT64) { + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_UINT64: + result = a->set.UINT64(a->device, + a->private_data, + value.UINT64); + break; + + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32Bitfield: + case APPHINT_DATA_TYPE_UINT32List: + result = a->set.UINT32(a->device, + a->private_data, + value.UINT32); + break; + + case APPHINT_DATA_TYPE_BOOL: + result = a->set.BOOL(a->device, + a->private_data, + value.BOOL); + break; + + case APPHINT_DATA_TYPE_STRING: + result = a->set.STRING(a->device, + a->private_data, + value.STRING); + kfree(value.STRING); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: unrecognised data type (%d), index (%d)", + __func__, param_lookup[id].data_type, id)); + } + + if (PVRSRV_OK != result) { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed (%s)", + __func__, PVRSRVGetErrorString(result))); + } + } else { + if (a->free) { + kfree(a->stored.STRING); + } + a->stored = value; + if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { + a->free = true; + } + PVR_DPF((PVR_DBG_MESSAGE, + "%s: AppHint value updated before handler is registered, ID(%d)", + __func__, id)); + } + kfree((void *)work_pkt); +} + +static void apphint_action(union apphint_value new_value, + struct apphint_action *action) +{ + struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL); + + /* queue apphint update on a serialized workqueue to avoid races */ + if (work_pkt) { + work_pkt->new_value = new_value; + work_pkt->action = action; + INIT_WORK(&work_pkt->work, apphint_action_worker); + if (0 == queue_work(apphint.workqueue, &work_pkt->work)) { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to queue apphint change request", + __func__)); + goto err_exit; + } + } else { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to alloc memory for apphint change request", + __func__)); + goto err_exit; + } + return; +err_exit: + kfree(new_value.STRING); +} + +/** + * apphint_read - read the different AppHint data types + * return -errno or the buffer size + */ +static int apphint_read(char *buffer, size_t count, APPHINT_ID ue, + union apphint_value *value) +{ + APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type; + int result = 0; + + switch (data_type) { + case APPHINT_DATA_TYPE_UINT64: + if (kstrtou64(buffer, 0, &value->UINT64) < 0) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid UINT64 input data for id %d: %s", + __func__, ue, buffer)); + result = -EINVAL; + goto err_exit; + } + break; + case APPHINT_DATA_TYPE_UINT32: + if (kstrtou32(buffer, 0, &value->UINT32) < 0) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid UINT32 input data for id %d: %s", + __func__, ue, buffer)); + result = -EINVAL; + goto err_exit; + } + break; + case APPHINT_DATA_TYPE_BOOL: + switch (buffer[0]) { + case '0': + case 'n': + case 'N': + case 'f': + case 'F': + value->BOOL = IMG_FALSE; + break; + case '1': + case 'y': + case 'Y': + case 't': + case 'T': + value->BOOL = IMG_TRUE; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid BOOL input data for id %d: %s", + __func__, ue, buffer)); + result = -EINVAL; + goto err_exit; + } + break; + case APPHINT_DATA_TYPE_UINT32List: + { + int i; + struct apphint_lookup *lookup = + (struct apphint_lookup *) + param_lookup[ue].data_type_helper; + int size = param_lookup[ue].helper_size; + /* buffer may include '\n', remove it */ + char *arg = strsep(&buffer, "\n"); + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + for (i = 0; i < size; i++) { + if (strcasecmp(lookup[i].name, arg) == 0) { + value->UINT32 = lookup[i].value; + break; + } + } + if (i == size) { + if (OSStringLength(arg) == 0) { + PVR_DPF((PVR_DBG_ERROR, + "%s: No value set for AppHint", + __func__)); + } else { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unrecognised AppHint value (%s)", + __func__, arg)); + } + result = -EINVAL; + } + break; + } + case APPHINT_DATA_TYPE_UINT32Bitfield: + { + int i; + struct apphint_lookup *lookup = + (struct apphint_lookup *) + param_lookup[ue].data_type_helper; + int size = param_lookup[ue].helper_size; + /* buffer may include '\n', remove it */ + char *string = strsep(&buffer, "\n"); + char *token = strsep(&string, ","); + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + value->UINT32 = 0; + /* empty string is valid to clear the bitfield */ + while (token && *token) { + for (i = 0; i < size; i++) { + if (strcasecmp(lookup[i].name, token) == 0) { + value->UINT32 |= lookup[i].value; + break; + } + } + if (i == size) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unrecognised AppHint value (%s)", + __func__, token)); + result = -EINVAL; + goto err_exit; + } + token = strsep(&string, ","); + } + break; + } + case APPHINT_DATA_TYPE_STRING: + { + /* buffer may include '\n', remove it */ + char *string = strsep(&buffer, "\n"); + size_t len = OSStringLength(string); + + if (!len) { + result = -EINVAL; + goto err_exit; + } + + ++len; + + value->STRING = kmalloc(len , GFP_KERNEL); + if (!value->STRING) { + result = -ENOMEM; + goto err_exit; + } + + OSStringLCopy(value->STRING, string, len); + break; + } + default: + result = -EINVAL; + goto err_exit; + } + +err_exit: + return (result < 0) ? result : count; +} + +static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * const action, + union apphint_value * const value) +{ + APPHINT_ID id; + APPHINT_DATA_TYPE data_type; + PVRSRV_ERROR result = PVRSRV_OK; + + get_apphint_id_from_action_addr(action, &id); + data_type = param_lookup[id].data_type; + + if (action->query.UINT64) { + switch (data_type) { + case APPHINT_DATA_TYPE_UINT64: + result = action->query.UINT64(action->device, + action->private_data, + &value->UINT64); + break; + + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32Bitfield: + case APPHINT_DATA_TYPE_UINT32List: + result = action->query.UINT32(action->device, + action->private_data, + &value->UINT32); + break; + + case APPHINT_DATA_TYPE_BOOL: + result = action->query.BOOL(action->device, + action->private_data, + &value->BOOL); + break; + + case APPHINT_DATA_TYPE_STRING: + result = action->query.STRING(action->device, + action->private_data, + &value->STRING); + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: unrecognised data type (%d), index (%d)", + __func__, data_type, id)); + } + } else { + *value = action->stored; + } + + if (PVRSRV_OK != result) { + PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)", __func__, result, id)); + } + + return result; +} + +/** + * apphint_write - write the current AppHint data to a buffer + * + * Returns length written or -errno + */ +static int apphint_write(char *buffer, const size_t size, + const struct apphint_action *a) +{ + const struct apphint_param *hint; + int result = 0; + APPHINT_ID id; + union apphint_value value; + + get_apphint_id_from_action_addr(a, &id); + hint = ¶m_lookup[id]; + + result = get_apphint_value_from_action(a, &value); + + switch (hint->data_type) { + case APPHINT_DATA_TYPE_UINT64: + result += snprintf(buffer + result, size - result, + "0x%016llx", + value.UINT64); + break; + case APPHINT_DATA_TYPE_UINT32: + result += snprintf(buffer + result, size - result, + "0x%08x", + value.UINT32); + break; + case APPHINT_DATA_TYPE_BOOL: + result += snprintf(buffer + result, size - result, + "%s", + value.BOOL ? "Y" : "N"); + break; + case APPHINT_DATA_TYPE_STRING: + if (value.STRING) { + result += snprintf(buffer + result, size - result, + "%s", + *value.STRING ? value.STRING : "(none)"); + } else { + result += snprintf(buffer + result, size - result, + "(none)"); + } + break; + case APPHINT_DATA_TYPE_UINT32List: + { + struct apphint_lookup *lookup = + (struct apphint_lookup *) hint->data_type_helper; + IMG_UINT32 i; + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + for (i = 0; i < hint->helper_size; i++) { + if (lookup[i].value == value.UINT32) { + result += snprintf(buffer + result, + size - result, + "%s", + lookup[i].name); + break; + } + } + break; + } + case APPHINT_DATA_TYPE_UINT32Bitfield: + { + struct apphint_lookup *lookup = + (struct apphint_lookup *) hint->data_type_helper; + IMG_UINT32 i; + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + for (i = 0; i < hint->helper_size; i++) { + if (lookup[i].value & value.UINT32) { + result += snprintf(buffer + result, + size - result, + "%s,", + lookup[i].name); + } + } + if (result) { + /* remove any trailing ',' */ + --result; + *(buffer + result) = '\0'; + } else { + result += snprintf(buffer + result, + size - result, "none"); + } + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: unrecognised data type (%d), index (%d)", + __func__, hint->data_type, id)); + result = -EINVAL; + } + +err_exit: + return result; +} + +/* +******************************************************************************* + Module parameters initialization - different from debugfs +******************************************************************************/ +/** + * apphint_kparam_set - Handle an update of a module parameter + * + * Returns 0, or -errno. arg is in kp->arg. + */ +static int apphint_kparam_set(const char *val, const struct kernel_param *kp) +{ + char val_copy[APPHINT_BUFFER_SIZE]; + APPHINT_ID id; + union apphint_value value; + int result; + + /* need to discard const in case of string comparison */ + result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE); + + get_apphint_id_from_action_addr(kp->arg, &id); + if (result < APPHINT_BUFFER_SIZE) { + result = apphint_read(val_copy, result, id, &value); + if (result >= 0) { + ((struct apphint_action *)kp->arg)->stored = value; + if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { + ((struct apphint_action *)kp->arg)->free = true; + } + } + } else { + PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__)); + } + return (result > 0) ? 0 : result; +} + +/** + * apphint_kparam_get - handle a read of a module parameter + * + * Returns length written or -errno. Buffer is 4k (ie. be short!) + */ +static int apphint_kparam_get(char *buffer, const struct kernel_param *kp) +{ + return apphint_write(buffer, PAGE_SIZE, kp->arg); +} + +__maybe_unused +static const struct kernel_param_ops apphint_kparam_fops = { + .set = apphint_kparam_set, + .get = apphint_kparam_get, +}; + +/* + * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM + * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for + * AppHint classes that have been disabled. + */ + +#define apphint_modparam_enable(name, number, perm) \ + module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm); + +#define X(a, b, c, d, e) \ + apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444) + APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM +#undef X + +/* +******************************************************************************* + Debugfs get (seq file) operations - supporting functions +******************************************************************************/ +static void *apphint_di_start(OSDI_IMPL_ENTRY *s, IMG_UINT64 *pos) +{ + if (*pos == 0) { + /* We want only one entry in the sequence, one call to show() */ + return (void *) 1; + } + + PVR_UNREFERENCED_PARAMETER(s); + + return NULL; +} + +static void apphint_di_stop(OSDI_IMPL_ENTRY *s, void *v) +{ + PVR_UNREFERENCED_PARAMETER(s); + PVR_UNREFERENCED_PARAMETER(v); +} + +static void *apphint_di_next(OSDI_IMPL_ENTRY *s, void *v, IMG_UINT64 *pos) +{ + PVR_UNREFERENCED_PARAMETER(s); + PVR_UNREFERENCED_PARAMETER(v); + PVR_UNREFERENCED_PARAMETER(pos); + return NULL; +} + +static int apphint_di_show(OSDI_IMPL_ENTRY *s, void *v) +{ + IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE]; + int result; + void *private = DIGetPrivData(s); + + PVR_UNREFERENCED_PARAMETER(v); + + result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, private); + if (result < 0) { + PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__)); + } else { + /* debugfs requires a trailing \n, module_params don't */ + result += snprintf(km_buffer + result, + APPHINT_BUFFER_SIZE - result, + "\n"); + DIPuts(s, km_buffer); + } + + /* have to return 0 to see output */ + return (result < 0) ? result : 0; +} + +/* +******************************************************************************* + Debugfs supporting functions +******************************************************************************/ + +/** + * apphint_set - Handle a DI value update + */ +static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count, + IMG_UINT64 *ppos, void *data) +{ + APPHINT_ID id; + union apphint_value value; + struct apphint_action *action = data; + char km_buffer[APPHINT_BUFFER_SIZE]; + int result = 0; + + if (ppos == NULL) + return -EIO; + + if (count >= APPHINT_BUFFER_SIZE) { + PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%" IMG_INT64_FMTSPECd ")", + __func__, count)); + result = -EINVAL; + goto err_exit; + } + + /* apphint_read() modifies the buffer so we need to copy it */ + memcpy(km_buffer, buffer, count); + /* count is larger than real buffer by 1 because DI framework appends + * a '\0' character at the end, but here we're ignoring this */ + count -= 1; + km_buffer[count] = '\0'; + + get_apphint_id_from_action_addr(action, &id); + result = apphint_read(km_buffer, count, id, &value); + if (result >= 0) + apphint_action(value, action); + + *ppos += count; +err_exit: + return result; +} + +/** + * apphint_debugfs_init - Create the specified debugfs entries + */ +static int apphint_debugfs_init(const char *sub_dir, + unsigned device_num, + unsigned init_data_size, + const struct apphint_init_data *init_data, + DI_GROUP *parentdir, + DI_GROUP **rootdir, + DI_ENTRY *entry[]) +{ + PVRSRV_ERROR result; + unsigned i; + int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX; + const DI_ITERATOR_CB iterator = { + .pfnStart = apphint_di_start, .pfnStop = apphint_di_stop, + .pfnNext = apphint_di_next, .pfnShow = apphint_di_show, + .pfnWrite = apphint_set + }; + + if (*rootdir) { + PVR_DPF((PVR_DBG_WARNING, + "AppHint DebugFS already created, skipping")); + result = -EEXIST; + goto err_exit; + } + + result = DICreateGroup(sub_dir, parentdir, rootdir); + if (result < 0) { + PVR_DPF((PVR_DBG_WARNING, + "Failed to create \"%s\" DebugFS directory.", sub_dir)); + goto err_exit; + } + + for (i = 0; i < init_data_size; i++) { + if (!class_state[init_data[i].class].enabled) + continue; + + result = DICreateEntry(init_data[i].name, + *rootdir, + &iterator, + (void *) &apphint.val[init_data[i].id + device_value_offset], + DI_ENTRY_TYPE_GENERIC, + &entry[i]); + if (result != PVRSRV_OK) { + PVR_DPF((PVR_DBG_WARNING, + "Failed to create \"%s/%s\" DebugFS entry.", + sub_dir, init_data[i].name)); + } + } + + return 0; + +err_exit: + return result; +} + +/** + * apphint_debugfs_deinit- destroy the debugfs entries + */ +static void apphint_debugfs_deinit(unsigned num_entries, + DI_GROUP **rootdir, + DI_ENTRY *entry[]) +{ + unsigned i; + + for (i = 0; i < num_entries; i++) { + if (entry[i]) { + DIDestroyEntry(entry[i]); + } + } + + if (*rootdir) { + DIDestroyGroup(*rootdir); + *rootdir = NULL; + } +} + +/* +******************************************************************************* + AppHint status dump implementation +******************************************************************************/ +#if defined(PDUMP) +static void apphint_pdump_values(void *flags, const IMG_CHAR *format, ...) +{ + char km_buffer[APPHINT_BUFFER_SIZE]; + IMG_UINT32 ui32Flags = *(IMG_UINT32 *)flags; + va_list ap; + + va_start(ap, format); + (void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap); + va_end(ap); + + PDumpCommentKM(km_buffer, ui32Flags); +} +#endif + +static IMG_BOOL is_apphint_value_equal(const APPHINT_DATA_TYPE data_type, + const union apphint_value * const left, + const union apphint_value * const right) +{ + switch (data_type) { + case APPHINT_DATA_TYPE_UINT64: + return left->UINT64 == right->UINT64; + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32List: + case APPHINT_DATA_TYPE_UINT32Bitfield: + return left->UINT32 == right->UINT32; + case APPHINT_DATA_TYPE_BOOL: + return left->BOOL == right->BOOL; + case APPHINT_DATA_TYPE_STRING: + return (OSStringNCompare(left->STRING, right->STRING, OSStringLength(right->STRING) + 1) == 0 ? IMG_TRUE : IMG_FALSE); + default: + PVR_DPF((PVR_DBG_WARNING, "%s: unhandled data type (%d)", __func__, data_type)); + return IMG_FALSE; + } +} + +static void apphint_dump_values(const char *group_name, + int device_num, + const struct apphint_init_data *group_data, + int group_size, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + bool list_all) +{ + int i, result; + int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX; + char km_buffer[APPHINT_BUFFER_SIZE]; + char count = 0; + + PVR_DUMPDEBUG_LOG(" %s", group_name); + for (i = 0; i < group_size; i++) + { + IMG_UINT32 id = group_data[i].id; + APPHINT_DATA_TYPE data_type = param_lookup[id].data_type; + const struct apphint_action *action = &apphint.val[id + device_value_offset]; + union apphint_value value; + + result = get_apphint_value_from_action(action, &value); + + if (PVRSRV_OK != result) { + continue; + } + + /* List only apphints with non-default values */ + if (!list_all && + is_apphint_value_equal(data_type, &value, &group_data[i].default_value)) { + continue; + } + + result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, action); + count++; + + if (result <= 0) { + PVR_DUMPDEBUG_LOG(" %s: ", + group_data[i].name); + } else { + PVR_DUMPDEBUG_LOG(" %s: %s", + group_data[i].name, km_buffer); + } + } + + if (count == 0) { + PVR_DUMPDEBUG_LOG(" none"); + } +} + +/** + * Callback for debug dump + */ +static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + int i, result; + char km_buffer[APPHINT_BUFFER_SIZE]; + PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) { + PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------"); + + apphint_dump_values("Build Vars", 0, + init_data_buildvar, ARRAY_SIZE(init_data_buildvar), + pfnDumpDebugPrintf, pvDumpDebugFile, true); + + apphint_dump_values("Module Params", 0, + init_data_modparam, ARRAY_SIZE(init_data_modparam), + pfnDumpDebugPrintf, pvDumpDebugFile, false); + + apphint_dump_values("Debugfs Params", 0, + init_data_debugfs, ARRAY_SIZE(init_data_debugfs), + pfnDumpDebugPrintf, pvDumpDebugFile, false); + + for (i = 0; i < APPHINT_DEVICES_MAX; i++) { + if (!apphint.devices[i] + || (device && device != apphint.devices[i])) + continue; + + result = snprintf(km_buffer, + APPHINT_BUFFER_SIZE, + "Debugfs Params Device ID: %d", + i); + if (0 > result) + continue; + + apphint_dump_values(km_buffer, i, + init_data_debugfs_device, + ARRAY_SIZE(init_data_debugfs_device), + pfnDumpDebugPrintf, + pvDumpDebugFile, + false); + } + } +} + +/* +******************************************************************************* + Public interface +******************************************************************************/ +int pvr_apphint_init(void) +{ + int result, i; + + if (apphint.initialized) { + result = -EEXIST; + goto err_out; + } + + for (i = 0; i < APPHINT_DEVICES_MAX; i++) + apphint.devices[i] = NULL; + + /* create workqueue with strict execution ordering to ensure no + * race conditions when setting/updating apphints from different + * contexts + */ + apphint.workqueue = alloc_workqueue("apphint_workqueue", + WQ_UNBOUND | WQ_FREEZABLE, 1); + if (!apphint.workqueue) { + result = -ENOMEM; + goto err_out; + } + + result = apphint_debugfs_init("apphint", 0, + ARRAY_SIZE(init_data_debugfs), init_data_debugfs, + NULL, + &apphint.debugfs_rootdir, apphint.debugfs_entry); + if (0 != result) + goto err_out; + + result = apphint_debugfs_init("buildvar", 0, + ARRAY_SIZE(init_data_buildvar), init_data_buildvar, + NULL, + &apphint.buildvar_rootdir, apphint.buildvar_entry); + + apphint.initialized = 1; + +err_out: + return result; +} + +int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device) +{ + int result, i; + char device_num[APPHINT_BUFFER_SIZE]; + unsigned device_value_offset; + + if (!apphint.initialized) { + result = -EAGAIN; + goto err_out; + } + + if (apphint.num_devices+1 >= APPHINT_DEVICES_MAX) { + result = -EMFILE; + goto err_out; + } + + result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%u", apphint.num_devices); + if (result < 0) { + PVR_DPF((PVR_DBG_WARNING, + "snprintf failed (%d)", result)); + result = -EINVAL; + goto err_out; + } + + /* Set the default values for the new device */ + device_value_offset = apphint.num_devices * APPHINT_DEBUGFS_DEVICE_ID_MAX; + for (i = 0; i < APPHINT_DEBUGFS_DEVICE_ID_MAX; i++) { + apphint.val[init_data_debugfs_device[i].id + device_value_offset].stored + = init_data_debugfs_device[i].default_value; + } + + result = apphint_debugfs_init(device_num, apphint.num_devices, + ARRAY_SIZE(init_data_debugfs_device), + init_data_debugfs_device, + apphint.debugfs_rootdir, + &apphint.debugfs_device_rootdir[apphint.num_devices], + apphint.debugfs_device_entry[apphint.num_devices]); + if (0 != result) + goto err_out; + + apphint.devices[apphint.num_devices] = device; + apphint.num_devices++; + + (void)SOPvrDbgRequestNotifyRegister( + &device->hAppHintDbgReqNotify, + device, + apphint_dump_state, + DEBUG_REQUEST_APPHINT, + device); + +err_out: + return result; +} + +void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device) +{ + int i; + + if (!apphint.initialized) + return; + + /* find the device */ + for (i = 0; i < APPHINT_DEVICES_MAX; i++) { + if (apphint.devices[i] == device) + break; + } + + if (APPHINT_DEVICES_MAX == i) + return; + + if (device->hAppHintDbgReqNotify) { + (void)SOPvrDbgRequestNotifyUnregister( + device->hAppHintDbgReqNotify); + device->hAppHintDbgReqNotify = NULL; + } + + apphint_debugfs_deinit(APPHINT_DEBUGFS_DEVICE_ID_MAX, + &apphint.debugfs_device_rootdir[i], + apphint.debugfs_device_entry[i]); + + apphint.devices[i] = NULL; + + WARN_ON(apphint.num_devices==0); + apphint.num_devices--; +} + +void pvr_apphint_deinit(void) +{ + int i; + + if (!apphint.initialized) + return; + + /* remove any remaining device data */ + for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) { + if (apphint.devices[i]) + pvr_apphint_device_unregister(apphint.devices[i]); + } + + /* free all alloc'd string apphints and set to NULL */ + for (i = 0; i < ARRAY_SIZE(apphint.val); i++) { + if (apphint.val[i].free && apphint.val[i].stored.STRING) { + kfree(apphint.val[i].stored.STRING); + apphint.val[i].stored.STRING = NULL; + apphint.val[i].free = false; + } + } + + apphint_debugfs_deinit(APPHINT_DEBUGFS_ID_MAX, + &apphint.debugfs_rootdir, apphint.debugfs_entry); + apphint_debugfs_deinit(APPHINT_BUILDVAR_ID_MAX, + &apphint.buildvar_rootdir, apphint.buildvar_entry); + + destroy_workqueue(apphint.workqueue); + + apphint.initialized = 0; +} + +void pvr_apphint_dump_state(void) +{ +#if defined(PDUMP) + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; + + apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH, + apphint_pdump_values, (void *)&ui32Flags); +#endif + apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH, + NULL, NULL); +} + +int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal) +{ + int error = -ERANGE; + + if (ue < APPHINT_ID_MAX) { + *pVal = apphint.val[ue].stored.UINT64; + error = 0; + } + return error; +} + +int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal) +{ + int error = -ERANGE; + + if (ue < APPHINT_ID_MAX) { + *pVal = apphint.val[ue].stored.UINT32; + error = 0; + } + return error; +} + +int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal) +{ + int error = -ERANGE; + + if (ue < APPHINT_ID_MAX) { + error = 0; + *pVal = apphint.val[ue].stored.BOOL; + } + return error; +} + +int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) +{ + int error = -ERANGE; + if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) { + if (OSStringLCopy(pBuffer, apphint.val[ue].stored.STRING, size) < size) { + error = 0; + } + } + return error; +} + +int pvr_apphint_set_uint64(APPHINT_ID ue, IMG_UINT64 Val) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT64)) { + + if (apphint.val[ue].set.UINT64) { + apphint.val[ue].set.UINT64(apphint.val[ue].device, apphint.val[ue].private_data, Val); + } else { + apphint.val[ue].stored.UINT64 = Val; + } + error = 0; + } + + return error; +} + +int pvr_apphint_set_uint32(APPHINT_ID ue, IMG_UINT32 Val) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT32)) { + + if (apphint.val[ue].set.UINT32) { + apphint.val[ue].set.UINT32(apphint.val[ue].device, apphint.val[ue].private_data, Val); + } else { + apphint.val[ue].stored.UINT32 = Val; + } + error = 0; + } + + return error; +} + +int pvr_apphint_set_bool(APPHINT_ID ue, IMG_BOOL Val) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + (param_lookup[ue].data_type == APPHINT_DATA_TYPE_BOOL)) { + + error = 0; + if (apphint.val[ue].set.BOOL) { + apphint.val[ue].set.BOOL(apphint.val[ue].device, apphint.val[ue].private_data, Val); + } else { + apphint.val[ue].stored.BOOL = Val; + } + } + + return error; +} + +int pvr_apphint_set_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + ((param_lookup[ue].data_type == APPHINT_DATA_TYPE_STRING) && + apphint.val[ue].stored.STRING)) { + + if (apphint.val[ue].set.STRING) { + error = apphint.val[ue].set.STRING(apphint.val[ue].device, apphint.val[ue].private_data, pBuffer); + } else { + if (strlcpy(apphint.val[ue].stored.STRING, pBuffer, size) < size) { + error = 0; + } + } + } + + return error; +} + +void pvr_apphint_register_handlers_uint64(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_UINT64: + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.UINT64 = query, + .set.UINT64 = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +void pvr_apphint_register_handlers_uint32(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32Bitfield: + case APPHINT_DATA_TYPE_UINT32List: + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.UINT32 = query, + .set.UINT32 = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +void pvr_apphint_register_handlers_bool(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_BOOL: + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.BOOL = query, + .set.BOOL = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +void pvr_apphint_register_handlers_string(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_STRING: + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.STRING = query, + .set.STRING = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +/* EOF */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.h new file mode 100644 index 000000000000..9b267d7a4820 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/km_apphint.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File km_apphint.h +@Title Apphint internal header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux kernel AppHint control +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef KM_APPHINT_H +#define KM_APPHINT_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "pvrsrv_apphint.h" +#include "km_apphint_defs.h" +#include "device.h" + +int pvr_apphint_init(void); +void pvr_apphint_deinit(void); +int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device); +void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device); +void pvr_apphint_dump_state(void); + +int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal); +int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal); +int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal); +int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); + +int pvr_apphint_set_uint64(APPHINT_ID ue, IMG_UINT64 Val); +int pvr_apphint_set_uint32(APPHINT_ID ue, IMG_UINT32 Val); +int pvr_apphint_set_bool(APPHINT_ID ue, IMG_BOOL Val); +int pvr_apphint_set_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); + +void pvr_apphint_register_handlers_uint64(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), + const PVRSRV_DEVICE_NODE *device, + const void * private_data); +void pvr_apphint_register_handlers_uint32(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data); +void pvr_apphint_register_handlers_bool(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data); +void pvr_apphint_register_handlers_string(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data); + +#if defined(__cplusplus) +} +#endif +#endif /* KM_APPHINT_H */ + +/****************************************************************************** + End of file (km_apphint.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/linkage.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/linkage.h new file mode 100644 index 000000000000..1f95631eab54 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/linkage.h @@ -0,0 +1,55 @@ +/*************************************************************************/ /*! +@File +@Title Linux specific Services code internal interfaces +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Interfaces between various parts of the Linux specific + Services code, that don't have any other obvious + header file to go into. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__LINKAGE_H__) +#define __LINKAGE_H__ + +PVRSRV_ERROR PVROSFuncInit(void); +void PVROSFuncDeInit(void); + +int PVRDebugCreateDIEntries(void); +void PVRDebugRemoveDIEntries(void); + +#endif /* !defined(__LINKAGE_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.c new file mode 100644 index 000000000000..5822dab08ee9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.c @@ -0,0 +1,513 @@ +/*************************************************************************/ /*! +@File +@Title Common Linux module setup +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if defined(CONFIG_DEBUG_FS) +#include "pvr_debugfs.h" +#endif /* defined(CONFIG_DEBUG_FS) */ +#if defined(CONFIG_PROC_FS) +#include "pvr_procfs.h" +#endif /* defined(CONFIG_PROC_FS) */ +#include "di_server.h" +#include "private_data.h" +#include "linkage.h" +#include "power.h" +#include "env_connection.h" +#include "process_stats.h" +#include "module_common.h" +#include "pvrsrv.h" +#include "srvcore.h" +#if defined(SUPPORT_RGX) +#include "rgxdevice.h" +#endif +#include "pvrsrv_error.h" +#include "pvr_drv.h" +#include "pvr_bridge_k.h" + +#include "pvr_fence.h" + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) +#include "pvr_sync.h" +#endif + +#include "ospvr_gputrace.h" + +#include "km_apphint.h" +#include "srvinit.h" + +#include "pvr_ion_stats.h" + +#if defined(SUPPORT_DISPLAY_CLASS) +/* Display class interface */ +#include "kerneldisplay.h" +EXPORT_SYMBOL(DCRegisterDevice); +EXPORT_SYMBOL(DCUnregisterDevice); +EXPORT_SYMBOL(DCDisplayConfigurationRetired); +EXPORT_SYMBOL(DCDisplayHasPendingCommand); +EXPORT_SYMBOL(DCImportBufferAcquire); +EXPORT_SYMBOL(DCImportBufferRelease); + +/* Physmem interface (required by LMA DC drivers) */ +#include "physheap.h" +EXPORT_SYMBOL(PhysHeapAcquire); +EXPORT_SYMBOL(PhysHeapRelease); +EXPORT_SYMBOL(PhysHeapGetType); +EXPORT_SYMBOL(PhysHeapRegionGetCpuPAddr); +EXPORT_SYMBOL(PhysHeapRegionGetSize); +EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr); + +EXPORT_SYMBOL(PVRSRVGetDriverStatus); +EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR); +EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR); + +#include "pvr_notifier.h" +EXPORT_SYMBOL(PVRSRVCheckStatus); + +#include "pvr_debug.h" +EXPORT_SYMBOL(PVRSRVGetErrorString); +#endif /* defined(SUPPORT_DISPLAY_CLASS) */ + +#if defined(SUPPORT_RGX) +#include "rgxapi_km.h" +#if defined(SUPPORT_SHARED_SLC) +EXPORT_SYMBOL(RGXInitSLC); +#endif +EXPORT_SYMBOL(RGXHWPerfConnect); +EXPORT_SYMBOL(RGXHWPerfDisconnect); +EXPORT_SYMBOL(RGXHWPerfControl); +#if defined(HWPERF_PACKET_V2C_SIG) +EXPORT_SYMBOL(RGXHWPerfConfigureCounters); +#else +EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters); +EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCustomCounters); +#endif +EXPORT_SYMBOL(RGXHWPerfDisableCounters); +EXPORT_SYMBOL(RGXHWPerfAcquireEvents); +EXPORT_SYMBOL(RGXHWPerfReleaseEvents); +EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp); +#if defined(SUPPORT_KERNEL_HWPERF_TEST) +EXPORT_SYMBOL(OSAddTimer); +EXPORT_SYMBOL(OSEnableTimer); +EXPORT_SYMBOL(OSDisableTimer); +EXPORT_SYMBOL(OSRemoveTimer); +#endif +#endif + +CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile) +{ + if (pFile) + { + struct drm_file *psDRMFile = pFile->private_data; + + return psDRMFile->driver_priv; + } + + return NULL; +} + +struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection) +{ + ENV_CONNECTION_DATA *psEnvConnection; + + psEnvConnection = PVRSRVConnectionPrivateData(psConnection); + PVR_ASSERT(psEnvConnection != NULL); + + return psEnvConnection->psFile; +} + +/**************************************************************************/ /*! +@Function PVRSRVDriverInit +@Description Common one time driver initialisation +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDriverInit(void) +{ + PVRSRV_ERROR error; + + error = DIInit(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } + +#if defined(CONFIG_DEBUG_FS) + error = PVRDebugFsRegister(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } +#elif defined(CONFIG_PROC_FS) + error = PVRProcFsRegister(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } +#endif /* defined(CONFIG_DEBUG_FS) || defined(CONFIG_PROC_FS) */ + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + error = PVRSRVStatsInitialise(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } +#endif + + error = PVROSFuncInit(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } + +#if defined(SUPPORT_RGX) + error = PVRGpuTraceSupportInit(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } +#endif + + error = PVRSRVCommonDriverInit(); + if (error != PVRSRV_OK) + { + return -ENODEV; + } + + error = PVRSRVIonStatsInitialise(); + if (error != PVRSRV_OK) + { + return -ENODEV; + } + +#if defined(SUPPORT_RGX) + /* calling here because we need to handle input from the file even + * before the devices are initialised + * note: we're not passing a device node because apphint callbacks don't + * need it */ + PVRGpuTraceInitAppHintCallbacks(NULL); +#endif + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDriverDeinit +@Description Common one time driver de-initialisation +@Return void +*/ /***************************************************************************/ +void PVRSRVDriverDeinit(void) +{ + PVRSRVIonStatsDestroy(); + + PVRSRVCommonDriverDeInit(); + +#if defined(SUPPORT_RGX) + PVRGpuTraceSupportDeInit(); +#endif + + PVROSFuncDeInit(); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsDestroy(); +#endif +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceInit +@Description Common device related initialisation. +@Input psDeviceNode The device node for which initialisation should be + performed +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + int error = 0; + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + { + PVRSRV_ERROR eError = pvr_sync_init(psDeviceNode->psDevConfig->pvOSDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)", + __func__, eError)); + return -EBUSY; + } + } +#endif + + error = PVRDebugCreateDIEntries(); + if (error != 0) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to create default debugfs entries (%d)", + __func__, error)); + } + +#if defined(SUPPORT_RGX) + error = PVRGpuTraceInitDevice(psDeviceNode); + if (error != 0) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to initialise PVR GPU Tracing on device%d (%d)", + __func__, psDeviceNode->sDevId.i32UMIdentifier, error)); + } +#endif + + /* register the AppHint device control before device initialisation + * so individual AppHints can be configured during the init phase + */ + error = pvr_apphint_device_register(psDeviceNode); + if (error != 0) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to initialise device AppHints (%d)", + __func__, error)); + } + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceDeinit +@Description Common device related de-initialisation. +@Input psDeviceNode The device node for which de-initialisation should + be performed +@Return void +*/ /***************************************************************************/ +void PVRSRVDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + pvr_apphint_device_unregister(psDeviceNode); + +#if defined(SUPPORT_RGX) + PVRGpuTraceDeInitDevice(psDeviceNode); +#endif + + PVRDebugRemoveDIEntries(); + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + pvr_sync_deinit(); +#endif + + pvr_fence_cleanup(); +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceShutdown +@Description Common device shutdown. +@Input psDeviceNode The device node representing the device that should + be shutdown +@Return void +*/ /***************************************************************************/ + +void PVRSRVDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* + * Disable the bridge to stop processes trying to use the driver + * after it has been shut down. + */ + eError = LinuxBridgeBlockClientsAccess(IMG_TRUE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to suspend driver (%d)", + __func__, eError)); + return; + } + + (void) PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF); +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceSuspend +@Description Common device suspend. +@Input psDeviceNode The device node representing the device that should + be suspended +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* + * LinuxBridgeBlockClientsAccess prevents processes from using the driver + * while it's suspended (this is needed for Android). Acquire the bridge + * lock first to ensure the driver isn't currently in use. + */ + + LinuxBridgeBlockClientsAccess(IMG_FALSE); + + if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF) != PVRSRV_OK) + { + LinuxBridgeUnblockClientsAccess(); + return -EINVAL; + } + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceResume +@Description Common device resume. +@Input psDeviceNode The device node representing the device that should + be resumed +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON) != PVRSRV_OK) + { + return -EINVAL; + } + + LinuxBridgeUnblockClientsAccess(); + + /* + * Reprocess the device queues in case commands were blocked during + * suspend. + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) + { + PVRSRVCheckStatus(NULL); + } + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceOpen +@Description Common device open. +@Input psDeviceNode The device node representing the device being + opened by a user mode process +@Input psDRMFile The DRM file data that backs the file handle + returned to the user mode process +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceOpen(PVRSRV_DEVICE_NODE *psDeviceNode, + struct drm_file *psDRMFile) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + ENV_CONNECTION_PRIVATE_DATA sPrivData; + void *pvConnectionData; + PVRSRV_ERROR eError; + int iErr = 0; + + if (!psPVRSRVData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__)); + iErr = -ENODEV; + goto out; + } + + /* + * If the first attempt already set the state to bad, + * there is no point in going the second time, so get out + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.", + __func__)); + iErr = -ENODEV; + goto out; + } + + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT) + { + eError = PVRSRVCommonDeviceInitialise(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)", + __func__, PVRSRVGetErrorString(eError))); + iErr = -ENODEV; + goto out; + } + +#if defined(SUPPORT_RGX) + PVRGpuTraceInitIfEnabled(psDeviceNode); +#endif + } + + sPrivData.psDevNode = psDeviceNode; + sPrivData.psFile = psDRMFile->filp; + + /* + * Here we pass the file pointer which will passed through to our + * OSConnectionPrivateDataInit function where we can save it so + * we can back reference the file structure from its connection + */ + eError = PVRSRVCommonConnectionConnect(&pvConnectionData, (void *) &sPrivData); + if (eError != PVRSRV_OK) + { + iErr = -ENOMEM; + goto out; + } + + psDRMFile->driver_priv = pvConnectionData; + +out: + return iErr; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceRelease +@Description Common device release. +@Input psDeviceNode The device node for the device that the given file + represents +@Input psDRMFile The DRM file data that's being released +@Return void +*/ /***************************************************************************/ +void PVRSRVDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode, + struct drm_file *psDRMFile) +{ + void *pvConnectionData = psDRMFile->driver_priv; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + psDRMFile->driver_priv = NULL; + if (pvConnectionData) + { + PVRSRVCommonConnectionDisconnect(pvConnectionData); + } +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.h new file mode 100644 index 000000000000..72505c3d8fbe --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/module_common.h @@ -0,0 +1,67 @@ +/*************************************************************************/ /*! +@File module_common.h +@Title Common linux module setup header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _MODULE_COMMON_H_ +#define _MODULE_COMMON_H_ + +/* DRVNAME is the name we use to register our driver. */ +#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME + +struct _PVRSRV_DEVICE_NODE_; +struct drm_file; + +int PVRSRVDriverInit(void); +void PVRSRVDriverDeinit(void); + +int PVRSRVDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +void PVRSRVDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +void PVRSRVDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +int PVRSRVDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +int PVRSRVDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +int PVRSRVDeviceOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + struct drm_file *psDRMFile); +void PVRSRVDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + struct drm_file *psDRMFile); + +#endif /* _MODULE_COMMON_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osconnection_server.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osconnection_server.c new file mode 100644 index 000000000000..8b0ab4afb713 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osconnection_server.c @@ -0,0 +1,155 @@ +/*************************************************************************/ /*! +@File +@Title Linux specific per process data functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "connection_server.h" +#include "osconnection_server.h" + +#include "env_connection.h" +#include "allocmem.h" +#include "pvr_debug.h" + +#include + +#if defined(SUPPORT_ION) +#include +#include PVR_ANDROID_ION_HEADER + +/* + The ion device (the base object for all requests) + gets created by the system and we acquire it via + Linux specific functions provided by the system layer +*/ +#include "ion_sys.h" +#endif + +PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) +{ + ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData; + ENV_CONNECTION_DATA *psEnvConnection; +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + ENV_ION_CONNECTION_DATA *psIonConnection; +#endif + + *phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA)); + + if (*phOsPrivateData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData; + + psEnvConnection->owner = current->tgid; + + /* Save the pointer to our struct file */ + psEnvConnection->psFile = psPrivData->psFile; + psEnvConnection->psDevNode = psPrivData->psDevNode; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA)); + if (psIonConnection == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psEnvConnection->psIonData = psIonConnection; + /* + We can have more than one connection per process, so we need + more than the PID to have a unique name. + */ + psEnvConnection->psIonData->psIonDev = IonDevAcquire(); + OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM()); + psEnvConnection->psIonData->psIonClient = + ion_client_create(psEnvConnection->psIonData->psIonDev, + psEnvConnection->psIonData->azIonClientName); + + if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient)) + { + PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create " + "ion client for per connection data")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } +#endif /* SUPPORT_ION && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ + return PVRSRV_OK; +} + +PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + ENV_CONNECTION_DATA *psEnvConnection; + + if (hOsPrivateData == NULL) + { + return PVRSRV_OK; + } + + psEnvConnection = hOsPrivateData; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + PVR_ASSERT(psEnvConnection->psIonData != NULL); + + PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL); + ion_client_destroy(psEnvConnection->psIonData->psIonClient); + + IonDevRelease(psEnvConnection->psIonData->psIonDev); + OSFreeMem(psEnvConnection->psIonData); +#endif + + OSFreeMem(hOsPrivateData); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +PVRSRV_DEVICE_NODE *OSGetDevNode(CONNECTION_DATA *psConnection) +{ + ENV_CONNECTION_DATA *psEnvConnection; + + psEnvConnection = PVRSRVConnectionPrivateData(psConnection); + PVR_ASSERT(psEnvConnection); + + return psEnvConnection->psDevNode; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc.c new file mode 100644 index 000000000000..da97bd26b614 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc.c @@ -0,0 +1,1760 @@ +/*************************************************************************/ /*! +@File +@Title Environment related functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) +#include +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +#include +#include +#else +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ + +#include "log2.h" +#include "osfunc.h" +#include "cache_km.h" +#include "img_defs.h" +#include "img_types.h" +#include "allocmem.h" +#include "devicemem_server_utils.h" +#include "event.h" +#include "linkage.h" +#include "pvr_uaccess.h" +#include "pvr_debug.h" +#include "pvr_bridge_k.h" +#include "pvrsrv_memallocflags.h" +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif +#include "physmem_osmem_linux.h" +#include "dma_support.h" +#include "kernel_compatibility.h" + +#if defined(VIRTUAL_PLATFORM) +#define EVENT_OBJECT_TIMEOUT_US (120000000ULL) +#else +#if defined(EMULATOR) || defined(TC_APOLLO_TCF5) +#define EVENT_OBJECT_TIMEOUT_US (2000000ULL) +#else +#define EVENT_OBJECT_TIMEOUT_US (100000ULL) +#endif /* EMULATOR */ +#endif + + +typedef struct { + struct task_struct *kthread; + PFN_THREAD pfnThread; + void *hData; + IMG_CHAR *pszThreadName; + IMG_BOOL bIsThreadRunning; + IMG_BOOL bIsSupportingThread; + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB; + DLLIST_NODE sNode; +} OSThreadData; + +void OSSuspendTaskInterruptible(void) +{ + set_current_state(TASK_INTERRUPTIBLE); + schedule(); +} + +static DLLIST_NODE gsThreadListHead; + +static void _ThreadListAddEntry(OSThreadData *psThreadListNode) +{ + dllist_add_to_tail(&gsThreadListHead, &(psThreadListNode->sNode)); +} + +static void _ThreadListRemoveEntry(OSThreadData *psThreadListNode) +{ + dllist_remove_node(&(psThreadListNode->sNode)); +} + +static void _ThreadSetStopped(OSThreadData *psOSThreadData) +{ + psOSThreadData->bIsThreadRunning = IMG_FALSE; +} + +static void _OSInitThreadList(void) +{ + dllist_init(&gsThreadListHead); +} + +void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PDLLIST_NODE psNodeCurr, psNodeNext; + + dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext) + { + OSThreadData *psThreadListNode; + psThreadListNode = IMG_CONTAINER_OF(psNodeCurr, OSThreadData, sNode); + + PVR_DUMPDEBUG_LOG(" %s : %s", + psThreadListNode->pszThreadName, + (psThreadListNode->bIsThreadRunning) ? "Running" : "Stopped"); + + if (psThreadListNode->pfnDebugDumpCB) + { + psThreadListNode->pfnDebugDumpCB(pfnDumpDebugPrintf, pvDumpDebugFile); + } + } +} + +PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ +#ifdef CONFIG_MCST + dma_addr_t d; +#endif + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; + IMG_CPU_PHYADDR sCpuPAddr; + struct page *psPage; + IMG_UINT32 ui32Order=0; + gfp_t gfp_flags; + + PVR_ASSERT(uiSize != 0); + /*Align the size to the page granularity */ + uiSize = PAGE_ALIGN(uiSize); + + /*Get the order to be used with the allocation */ + ui32Order = get_order(uiSize); + + gfp_flags = GFP_KERNEL; + +#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) + if (psDev) + { + if (*psDev->dma_mask == DMA_BIT_MASK(32)) + { + /* Limit to 32 bit. + * Achieved by setting __GFP_DMA32 for 64 bit systems */ + gfp_flags |= __GFP_DMA32; + } + else if (*psDev->dma_mask < DMA_BIT_MASK(32)) + { + /* Limit to whatever the size of DMA zone is. */ + gfp_flags |= __GFP_DMA; + } + } +#else + PVR_UNREFERENCED_PARAMETER(psDev); +#endif + + /*allocate the pages */ + psPage = alloc_pages(gfp_flags, ui32Order); + if (psPage == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + uiSize = (1 << ui32Order) * PAGE_SIZE; + + psMemHandle->u.pvHandle = psPage; + psMemHandle->uiOrder = ui32Order; + sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage)); +#ifdef CONFIG_MCST + d = dma_map_page(psDevNode->psDevConfig->pvOSDevice, + psPage, 0, uiSize, DMA_BIDIRECTIONAL); + if (dma_mapping_error(psDevNode->psDevConfig->pvOSDevice, d)) { + __free_pages(psPage, ui32Order); + return PVRSRV_ERROR_BAD_MAPPING; + } + psMemHandle->sDevPAddr.uiAddr = d; + psDevPAddr->uiAddr = d; + +#else + /* + * Even when more pages are allocated as base MMU object we still need one single physical address because + * they are physically contiguous. + */ + PhysHeapCpuPAddrToDevPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], 1, psDevPAddr, &sCpuPAddr); +#endif +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + uiSize, + (IMG_UINT64)(uintptr_t) psPage, + uiPid); +#else + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + psPage, + sCpuPAddr, + uiSize, + NULL, + uiPid + DEBUG_MEMSTATS_VALUES); +#endif +#else + PVR_UNREFERENCED_PARAMETER(uiPid); +#endif + + return PVRSRV_OK; +} + +void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle) +{ + struct page *psPage = (struct page*) psMemHandle->u.pvHandle; + IMG_UINT32 uiSize, uiPageCount=0, ui32Order; + + ui32Order = psMemHandle->uiOrder; + uiPageCount = (1 << ui32Order); + uiSize = (uiPageCount * PAGE_SIZE); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + (IMG_UINT64)(uintptr_t) psPage); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + (IMG_UINT64)(uintptr_t) psPage, + OSGetCurrentClientProcessIDKM()); +#endif +#endif +#ifdef CONFIG_MCST + dma_unmap_page(psDevNode->psDevConfig->pvOSDevice, + psMemHandle->sDevPAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL); +#endif + __free_pages(psPage, ui32Order); + psMemHandle->uiOrder = 0; +} + +PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr) +{ + size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->uiOrder); + *pvPtr = kmap((struct page*)psMemHandle->u.pvHandle); + + PVR_UNREFERENCED_PARAMETER(psDevPAddr); + + PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */ + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize, OSGetCurrentClientProcessIDKM()); +#else + { + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, + *pvPtr, + sCpuPAddr, + actualSize, + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_VALUES); + } +#endif +#endif + + return PVRSRV_OK; +} + +void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Mapping is done a page at a time */ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, + (1 << (PAGE_SHIFT + psMemHandle->uiOrder)), + OSGetCurrentClientProcessIDKM()); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, + (IMG_UINT64)(uintptr_t)pvPtr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(pvPtr); + + kunmap((struct page*) psMemHandle->u.pvHandle); +} + +PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + struct page* psPage = (struct page*) psMemHandle->u.pvHandle; + + void* pvVirtAddrStart = kmap(psPage) + uiOffset; + IMG_CPU_PHYADDR sPhysStart, sPhysEnd; + + IMG_UINT32 ui32Order; + + if (uiLength == 0) + { + goto e0; + } + + ui32Order = psMemHandle->uiOrder; + if ((uiOffset + uiLength) > ((1 << ui32Order) * PAGE_SIZE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid size params, uiOffset %u, uiLength %u", + __func__, + uiOffset, + uiLength)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset; + sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength; + + CacheOpExec(psDevNode, + pvVirtAddrStart, + pvVirtAddrStart + uiLength, + sPhysStart, + sPhysEnd, + PVRSRV_CACHE_OP_CLEAN); + +e0: + kunmap(psPage); + + return eError; +} + +#if defined(__GNUC__) +#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8))) +#define PVRSRV_MEM_ALIGN_MASK (0x7) +#else +#error "PVRSRV Alignment macros need to be defined for this compiler" +#endif + +IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute) +{ + IMG_UINT32 uiSize = 0; + + switch (eCacheAttribute) + { + case OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE: + uiSize = cache_line_size(); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d", + __func__, (IMG_UINT32)eCacheAttribute)); + PVR_ASSERT(0); + break; + } + + return uiSize; +} + +IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...) +{ + va_list argList; + IMG_INT32 iCount = 0; + + va_start(argList, pszFormat); + iCount = vsscanf(pStr, pszFormat, argList); + va_end(argList); + + return iCount; +} + +IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen) +{ + return (IMG_INT)memcmp(pvBufA, pvBufB, uiLen); +} + +size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize) +{ + /* + * Let strlcat handle any truncation cases correctly. + * We will definitely get a NUL-terminated string set in pszDest + */ + size_t uSrcSize = strlcat(pszDest, pszSrc, uDstSize); + +#if defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) + /* Handle truncation by dumping calling stack if debug allows */ + if (uSrcSize >= uDstSize) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", + __func__, pszSrc, (long)uDstSize, pszDest)); + OSDumpStack(); + } +#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ + + return uSrcSize; +} + +IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) +{ + va_list argList; + IMG_INT32 iCount; + + va_start(argList, pszFormat); + iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList); + va_end(argList); + + return iCount; +} + +IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) +{ + return vsnprintf(pStr, ui32Size, pszFormat, vaArgs); +} + +size_t OSStringLength(const IMG_CHAR *pStr) +{ + return strlen(pStr); +} + +size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount) +{ + return strnlen(pStr, uiCount); +} + +IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, + size_t uiSize) +{ + return strncmp(pStr1, pStr2, uiSize); +} + +PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, + IMG_UINT32 *ui32Result) +{ + if (kstrtou32(pStr, ui32Base, ui32Result) != 0) + return PVRSRV_ERROR_CONVERSION_FAILED; + + return PVRSRV_OK; +} + +IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, + IMG_UINT32 ui32Num) +{ + IMG_UINT32 ui32i, ui32Len = 0, ui32NumCopy = ui32Num; + + /* calculate string length required to hold the number string */ + do + { + ui32Len++; + ui32NumCopy /= 10; + } while (ui32NumCopy != 0); + + if (unlikely(ui32Len >= uSize)) + { + /* insufficient buffer */ + return 0; + } + + for (ui32i = 0; ui32i < ui32Len; ui32i++) + { + pszBuf[ui32Len - (ui32i + 1)] = '0' + ui32Num % 10; + ui32Num = ui32Num / 10; + } + + pszBuf[ui32Len] = '\0'; + return ui32Len; +} + +PVRSRV_ERROR OSInitEnvData(void) +{ + + LinuxInitPhysmem(); + + _OSInitThreadList(); + + return PVRSRV_OK; +} + + +void OSDeInitEnvData(void) +{ + + LinuxDeinitPhysmem(); +} + + +void OSReleaseThreadQuanta(void) +{ + schedule(); +} + +/* Not matching/aligning this API to the Clockus() API above to avoid necessary + * multiplication/division operations in calling code. + */ +static inline IMG_UINT64 Clockns64(void) +{ + IMG_UINT64 timenow; + + /* Kernel thread preempt protection. Some architecture implementations + * (ARM) of sched_clock are not preempt safe when the kernel is configured + * as such e.g. CONFIG_PREEMPT and others. + */ + preempt_disable(); + + /* Using sched_clock instead of ktime_get since we need a time stamp that + * correlates with that shown in kernel logs and trace data not one that + * is a bit behind. */ + timenow = sched_clock(); + + preempt_enable(); + + return timenow; +} + +IMG_UINT64 OSClockns64(void) +{ + return Clockns64(); +} + +IMG_UINT64 OSClockus64(void) +{ + IMG_UINT64 timenow = Clockns64(); + IMG_UINT32 remainder; + + return OSDivide64r64(timenow, 1000, &remainder); +} + +IMG_UINT32 OSClockus(void) +{ + return (IMG_UINT32) OSClockus64(); +} + +IMG_UINT32 OSClockms(void) +{ + IMG_UINT64 timenow = Clockns64(); + IMG_UINT32 remainder; + + return OSDivide64(timenow, 1000000, &remainder); +} + +static inline IMG_UINT64 KClockns64(void) +{ + ktime_t sTime = ktime_get(); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + return sTime; +#else + return sTime.tv64; +#endif +} + +PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time) +{ + *pui64Time = KClockns64(); + return PVRSRV_OK; +} + +PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time) +{ + IMG_UINT64 timenow = KClockns64(); + IMG_UINT32 remainder; + + *pui64Time = OSDivide64r64(timenow, 1000, &remainder); + return PVRSRV_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) +IMG_UINT64 OSClockMonotonicRawns64(void) +{ + struct timespec64 ts; + + ktime_get_raw_ts64(&ts); + return ts.tv_sec * 1000000000 + ts.tv_nsec; +} +#else +IMG_UINT64 OSClockMonotonicRawns64(void) +{ + struct timespec ts; + + getrawmonotonic(&ts); + return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec; +} +#endif + +IMG_UINT64 OSClockMonotonicRawus64(void) +{ + IMG_UINT32 rem; + return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem); +} + +/* + OSWaitus +*/ +void OSWaitus(IMG_UINT32 ui32Timeus) +{ + udelay(ui32Timeus); +} + + +/* + OSSleepms +*/ +void OSSleepms(IMG_UINT32 ui32Timems) +{ + msleep(ui32Timems); +} + + +INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void) +{ + return (IMG_UINT64)TASK_SIZE; +} + +INLINE IMG_PID OSGetCurrentProcessID(void) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + + return (IMG_PID)task_tgid_nr(current); +} + +INLINE IMG_PID OSGetCurrentVirtualProcessID(void) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + + return (IMG_PID)task_tgid_vnr(current); +} + +INLINE IMG_CHAR *OSGetCurrentProcessName(void) +{ + return current->comm; +} + +INLINE uintptr_t OSGetCurrentThreadID(void) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + + return current->pid; +} + +IMG_PID OSGetCurrentClientProcessIDKM(void) +{ + return OSGetCurrentProcessID(); +} + +IMG_CHAR *OSGetCurrentClientProcessNameKM(void) +{ + return OSGetCurrentProcessName(); +} + +uintptr_t OSGetCurrentClientThreadIDKM(void) +{ + return OSGetCurrentThreadID(); +} + +size_t OSGetPageSize(void) +{ + return PAGE_SIZE; +} + +size_t OSGetPageShift(void) +{ + return PAGE_SHIFT; +} + +size_t OSGetPageMask(void) +{ + return (OSGetPageSize()-1); +} + +size_t OSGetOrder(size_t uSize) +{ + return get_order(PAGE_ALIGN(uSize)); +} + +IMG_UINT64 OSGetRAMSize(void) +{ + struct sysinfo SI; + si_meminfo(&SI); + + return (PAGE_SIZE * SI.totalram); +} + +typedef struct +{ + int os_error; + PVRSRV_ERROR pvr_error; +} error_map_t; + +/* return -ve versions of POSIX errors as they are used in this form */ +static const error_map_t asErrorMap[] = +{ + {-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT}, + {-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL}, + {-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM}, + {-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE}, + {-EPERM, PVRSRV_ERROR_BRIDGE_EPERM}, + {-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY}, + {-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED}, + {-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL}, + {-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY}, + {-EINVAL, PVRSRV_ERROR_INVALID_PARAMS}, + + {0, PVRSRV_OK} +}; + +int PVRSRVToNativeError(PVRSRV_ERROR e) +{ + int os_error = -EFAULT; + int i; + + for (i = 0; i < ARRAY_SIZE(asErrorMap); i++) + { + if (e == asErrorMap[i].pvr_error) + { + os_error = asErrorMap[i].os_error; + break; + } + } + return os_error; +} + +typedef struct _MISR_DATA_ { + struct workqueue_struct *psWorkQueue; + struct work_struct sMISRWork; + const IMG_CHAR* pszName; + PFN_MISR pfnMISR; + void *hData; +} MISR_DATA; + +/* + MISRWrapper +*/ +static void MISRWrapper(struct work_struct *data) +{ + MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork); + + PVR_DPF((PVR_DBG_MESSAGE, "Waking up '%s' MISR %p", psMISRData->pszName, psMISRData)); + + psMISRData->pfnMISR(psMISRData->hData); +} + +/* + OSInstallMISR +*/ +PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, + void *hData, const IMG_CHAR *pszMisrName) +{ + MISR_DATA *psMISRData; + + psMISRData = OSAllocMem(sizeof(*psMISRData)); + PVR_LOG_RETURN_IF_NOMEM(psMISRData, "psMISRData"); + + psMISRData->hData = hData; + psMISRData->pfnMISR = pfnMISR; + psMISRData->pszName = pszMisrName; + + PVR_DPF((PVR_DBG_MESSAGE, "Installing MISR with cookie %p", psMISRData)); + + psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_misr"); + + if (psMISRData->psWorkQueue == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed")); + OSFreeMem(psMISRData); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + } + + INIT_WORK(&psMISRData->sMISRWork, MISRWrapper); + + *hMISRData = (IMG_HANDLE) psMISRData; + + return PVRSRV_OK; +} + +/* + OSUninstallMISR +*/ +PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData) +{ + MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; + + PVR_DPF((PVR_DBG_MESSAGE, "Uninstalling MISR with cookie %p", psMISRData)); + + destroy_workqueue(psMISRData->psWorkQueue); + OSFreeMem(psMISRData); + + return PVRSRV_OK; +} + +/* + OSScheduleMISR +*/ +PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData) +{ + MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; + + /* + Note: + + In the case of NO_HARDWARE we want the driver to be synchronous so + that we don't have to worry about waiting for previous operations + to complete + */ +#if defined(NO_HARDWARE) + psMISRData->pfnMISR(psMISRData->hData); + return PVRSRV_OK; +#else + { + bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork); + return (rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS); + } +#endif +} + +/* OS specific values for thread priority */ +static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] = +{ + 0, /* OS_THREAD_NOSET_PRIORITY */ + -20, /* OS_THREAD_HIGHEST_PRIORITY */ + -10, /* OS_THREAD_HIGH_PRIORITY */ + 0, /* OS_THREAD_NORMAL_PRIORITY */ + 9, /* OS_THREAD_LOW_PRIORITY */ + 19, /* OS_THREAD_LOWEST_PRIORITY */ +}; + +static int OSThreadRun(void *data) +{ + OSThreadData *psOSThreadData = data; + + /* count freezable threads */ + LinuxBridgeNumActiveKernelThreadsIncrement(); + + /* Returns true if the thread was frozen, should we do anything with this + * information? What do we return? Which one is the error case? */ + set_freezable(); + + PVR_DPF((PVR_DBG_MESSAGE, "Starting Thread '%s'...", psOSThreadData->pszThreadName)); + + /* Call the client's kernel thread with the client's data pointer */ + psOSThreadData->pfnThread(psOSThreadData->hData); + + if (psOSThreadData->bIsSupportingThread) + { + _ThreadSetStopped(psOSThreadData); + } + + /* Wait for OSThreadDestroy() to call kthread_stop() */ + while (!kthread_freezable_should_stop(NULL)) + { + schedule(); + } + + LinuxBridgeNumActiveKernelThreadsDecrement(); + + return 0; +} + +PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData) +{ + return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, + pfnDebugDumpCB, bIsSupportingThread, hData, + OS_THREAD_NOSET_PRIORITY); +} + +PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData, + OS_THREAD_LEVEL eThreadPriority) +{ + OSThreadData *psOSThreadData; + PVRSRV_ERROR eError; + + psOSThreadData = OSAllocZMem(sizeof(*psOSThreadData)); + PVR_LOG_GOTO_IF_NOMEM(psOSThreadData, eError, fail_alloc); + + psOSThreadData->pfnThread = pfnThread; + psOSThreadData->hData = hData; + psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName); + + if (IS_ERR(psOSThreadData->kthread)) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_kthread; + } + + if (bIsSupportingThread) + { + psOSThreadData->pszThreadName = pszThreadName; + psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB; + psOSThreadData->bIsThreadRunning = IMG_TRUE; + psOSThreadData->bIsSupportingThread = IMG_TRUE; + + _ThreadListAddEntry(psOSThreadData); + } + + if (eThreadPriority != OS_THREAD_NOSET_PRIORITY && + eThreadPriority < OS_THREAD_LAST_PRIORITY) + { + set_user_nice(psOSThreadData->kthread, + ai32OSPriorityValues[eThreadPriority]); + } + + *phThread = psOSThreadData; + + return PVRSRV_OK; + +fail_kthread: + OSFreeMem(psOSThreadData); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread) +{ + OSThreadData *psOSThreadData = hThread; + int ret; + + /* Let the thread know we are ready for it to end and wait for it. */ + ret = kthread_stop(psOSThreadData->kthread); + if (0 != ret) + { + PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret)); + return PVRSRV_ERROR_RETRY; + } + + if (psOSThreadData->bIsSupportingThread) + { + _ThreadListRemoveEntry(psOSThreadData); + } + + OSFreeMem(psOSThreadData); + + return PVRSRV_OK; +} + +void OSPanic(void) +{ + BUG(); + +#if defined(__KLOCWORK__) + /* Klocwork does not understand that BUG is terminal... */ + abort(); +#endif +} + +#ifdef CONFIG_MCST +void * +OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_DEV_PHYADDR BaseDevPAddr, + size_t ui32Bytes, + IMG_UINT32 ui32MappingFlags) +#else +void * +OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, + size_t ui32Bytes, + IMG_UINT32 ui32MappingFlags) +#endif +{ + void __iomem *pvLinAddr; + + if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) + { + PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu"); + return NULL; + } + + if (! PVRSRV_VZ_MODE_IS(NATIVE)) + { + /* + This is required to support DMA physheaps for GPU virtualization. + Unfortunately, if a region of kernel managed memory is turned into + a DMA buffer, conflicting mappings can come about easily on Linux + as the original memory is mapped by the kernel as normal cached + memory whilst DMA buffers are mapped mostly as uncached device or + cache-coherent device memory. In both cases the system will have + two conflicting mappings for the same memory region and will have + "undefined behaviour" for most processors notably ARMv6 onwards + and some x86 micro-architectures. As a result, perform ioremapping + manually for DMA physheap allocations by translating from CPU/VA + to BUS/PA thereby preventing the creation of conflicting mappings. + */ +#ifdef CONFIG_MCST + pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BaseDevPAddr.uiAddr, ui32Bytes); +#else + pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes); +#endif + if (pvLinAddr != NULL) + { + return (void __force *) pvLinAddr; + } + } + + switch (ui32MappingFlags) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); + break; + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: +#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_E2K) + pvLinAddr = (void __iomem *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes); +#else + pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); +#endif + break; + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: +#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_E2K) + pvLinAddr = (void __iomem *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes); +#else + pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); +#endif + break; + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: + PVR_ASSERT(!"Unexpected cpu cache mode"); + pvLinAddr = NULL; + break; + default: + PVR_ASSERT(!"Unsupported cpu cache mode"); + pvLinAddr = NULL; + break; + } + + return (void __force *) pvLinAddr; +} + + +IMG_BOOL +OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32MappingFlags) +{ + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + + if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) + { + PVR_ASSERT(!"Found non-cpu cache mode flag when unmapping from the cpu"); + return IMG_FALSE; + } + + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + if (SysDmaCpuVAddrToDevPAddr(pvLinAddr)) + { + return IMG_TRUE; + } + } + + iounmap((void __iomem *) pvLinAddr); + + return IMG_TRUE; +} + +#define OS_MAX_TIMERS 8 + +/* Timer callback structure used by OSAddTimer */ +typedef struct TIMER_CALLBACK_DATA_TAG +{ + IMG_BOOL bInUse; + PFN_TIMER_FUNC pfnTimerFunc; + void *pvData; + struct timer_list sTimer; + IMG_UINT32 ui32Delay; + IMG_BOOL bActive; + struct work_struct sWork; +}TIMER_CALLBACK_DATA; + +static struct workqueue_struct *psTimerWorkQueue; + +static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS]; + +static DEFINE_MUTEX(sTimerStructLock); + +static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData) +{ + if (!psTimerCBData->bActive) + return; + + /* call timer callback */ + psTimerCBData->pfnTimerFunc(psTimerCBData->pvData); + + /* reset timer */ + mod_timer(&psTimerCBData->sTimer, psTimerCBData->sTimer.expires + psTimerCBData->ui32Delay); +} + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +/*************************************************************************/ /*! +@Function OSTimerCallbackWrapper +@Description OS specific timer callback wrapper function +@Input psTimer Timer list structure +*/ /**************************************************************************/ +static void OSTimerCallbackWrapper(struct timer_list *psTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = from_timer(psTimerCBData, psTimer, sTimer); +#else +/*************************************************************************/ /*! +@Function OSTimerCallbackWrapper +@Description OS specific timer callback wrapper function +@Input uData Timer callback data +*/ /**************************************************************************/ +static void OSTimerCallbackWrapper(uintptr_t uData) +{ + TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uData; +#endif + int res; + + res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork); + if (res == 0) + { + PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued")); + } +} + + +static void OSTimerWorkQueueCallBack(struct work_struct *psWork) +{ + TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork); + + OSTimerCallbackBody(psTimerCBData); +} + +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout) +{ + TIMER_CALLBACK_DATA *psTimerCBData; + IMG_UINT32 ui32i; + + /* check callback */ + if (!pfnTimerFunc) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback")); + return NULL; + } + + /* Allocate timer callback data structure */ + mutex_lock(&sTimerStructLock); + for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) + { + psTimerCBData = &sTimers[ui32i]; + if (!psTimerCBData->bInUse) + { + psTimerCBData->bInUse = IMG_TRUE; + break; + } + } + mutex_unlock(&sTimerStructLock); + if (ui32i >= OS_MAX_TIMERS) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use")); + return NULL; + } + + psTimerCBData->pfnTimerFunc = pfnTimerFunc; + psTimerCBData->pvData = pvData; + psTimerCBData->bActive = IMG_FALSE; + + /* + HZ = ticks per second + ui32MsTimeout = required ms delay + ticks = (Hz * ui32MsTimeout) / 1000 + */ + psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000) + ? 1 + : ((HZ * ui32MsTimeout) / 1000); + + /* initialise object */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0); +#else + init_timer(&psTimerCBData->sTimer); + + /* setup timer object */ + psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper; + psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData; +#endif + + return (IMG_HANDLE)(uintptr_t)(ui32i + 1); +} + + +static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer) +{ + IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1; + + PVR_ASSERT(ui32i < OS_MAX_TIMERS); + + return &sTimers[ui32i]; +} + +PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* free timer callback data struct */ + psTimerCBData->bInUse = IMG_FALSE; + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* Start timer arming */ + psTimerCBData->bActive = IMG_TRUE; + + /* set the expire time */ + psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; + + /* Add the timer to the list */ + add_timer(&psTimerCBData->sTimer); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(psTimerCBData->bActive); + + /* Stop timer from arming */ + psTimerCBData->bActive = IMG_FALSE; + smp_mb(); + + flush_workqueue(psTimerWorkQueue); + + /* remove timer */ + del_timer_sync(&psTimerCBData->sTimer); + + /* + * This second flush is to catch the case where the timer ran + * before we managed to delete it, in which case, it will have + * queued more work for the workqueue. Since the bActive flag + * has been cleared, this second flush won't result in the + * timer being rearmed. + */ + flush_workqueue(psTimerWorkQueue); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject) +{ + PVR_UNREFERENCED_PARAMETER(pszName); + + PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); + + return LinuxEventObjectListCreate(hEventObject); +} + + +PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); + + return LinuxEventObjectListDestroy(hEventObject); +} + +#define _FREEZABLE IMG_TRUE +#define _NON_FREEZABLE IMG_FALSE + +/* + * EventObjectWaitTimeout() + */ +static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM, + IMG_UINT64 uiTimeoutus) +{ + PVRSRV_ERROR eError; + + if (hOSEventKM && uiTimeoutus > 0) + { + eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, _NON_FREEZABLE); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus) +{ + return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus); +} + +PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM) +{ + return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US); +} + +PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, + IMG_UINT64 uiTimeoutus) +{ + PVRSRV_ERROR eError; + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + if (hOSEventKM) + { + if (uiTimeoutus > 0) + eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, + _FREEZABLE); + else + eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM); + } +#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + if (hOSEventKM && uiTimeoutus > 0) + { + eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, + _FREEZABLE); + } +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKernel: invalid arguments %p", + hOSEventKM)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM) +{ + LinuxEventObjectDumpDebugInfo(hOSEventKM); +} + +PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, IMG_HANDLE *phOSEvent) +{ + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(phOSEvent, "phOSEvent"); + PVR_LOG_GOTO_IF_INVALID_PARAM(hEventObject, eError, error); + + eError = LinuxEventObjectAdd(hEventObject, phOSEvent); + PVR_LOG_GOTO_IF_ERROR(eError, "LinuxEventObjectAdd", error); + + return PVRSRV_OK; + +error: + *phOSEvent = NULL; + return eError; +} + +PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(hOSEventKM, "hOSEventKM"); + + return LinuxEventObjectDelete(hOSEventKM); +} + +PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); + + return LinuxEventObjectSignal(hEventObject); +} + +PVRSRV_ERROR OSCopyToUser(void *pvProcess, + void __user *pvDest, + const void *pvSrc, + size_t ui32Bytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if (pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +PVRSRV_ERROR OSCopyFromUser(void *pvProcess, + void *pvDest, + const void __user *pvSrc, + size_t ui32Bytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if (likely(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) +{ + *pui32Remainder = do_div(ui64Divident, ui32Divisor); + + return ui64Divident; +} + +IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) +{ + *pui32Remainder = do_div(ui64Divident, ui32Divisor); + + return (IMG_UINT32) ui64Divident; +} + +/* One time osfunc initialisation */ +PVRSRV_ERROR PVROSFuncInit(void) +{ + { + PVR_ASSERT(!psTimerWorkQueue); + + psTimerWorkQueue = create_freezable_workqueue("pvr_timer"); + if (psTimerWorkQueue == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", + __func__)); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + } + } + + { + IMG_UINT32 ui32i; + + for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) + { + TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i]; + + INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack); + } + } + return PVRSRV_OK; +} + +/* + * Osfunc deinitialisation. + * Note that PVROSFuncInit may not have been called + */ +void PVROSFuncDeInit(void) +{ + if (psTimerWorkQueue != NULL) + { + destroy_workqueue(psTimerWorkQueue); + psTimerWorkQueue = NULL; + } +} + +void OSDumpStack(void) +{ + dump_stack(); +} + +PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, + IMG_UINT64 sCpuVAddrBase, + IMG_CPU_PHYADDR sCpuPAHeapBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bIsLMA) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t sPFN; +#else + IMG_UINT64 uiPFN; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + + PVRSRV_ERROR eError; + + struct mm_struct *psMM = current->mm; + struct vm_area_struct *psVMA = NULL; + struct address_space *psMapping = NULL; + struct page *psPage = NULL; + + IMG_UINT64 uiCPUVirtAddr = 0; + IMG_UINT32 ui32Loop = 0; + IMG_UINT32 ui32PageSize = OSGetPageSize(); + IMG_BOOL bMixedMap = IMG_FALSE; + + /* + * Acquire the lock before manipulating the VMA + * In this case only mmap_sem lock would suffice as the pages associated with this VMA + * are never meant to be swapped out. + * + * In the future, in case the pages are marked as swapped, page_table_lock needs + * to be acquired in conjunction with this to disable page swapping. + */ + + /* Find the Virtual Memory Area associated with the user base address */ + psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase); + if (NULL == psVMA) + { + eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND; + return eError; + } + + /* Acquire the memory sem */ + down_write(&psMM->mmap_sem); + + psMapping = psVMA->vm_file->f_mapping; + + /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */ + psVMA->vm_pgoff = (psVMA->vm_start >> PAGE_SHIFT); + + /* Delete the entries for the pages that got freed */ + if (ui32FreePageCount && (pai32FreeIndices != NULL)) + { + for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) + { + uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize)); + + unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); + +#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE + /* + * Still need to map pages in case remap flag is set. + * That is not done until the remap case succeeds + */ +#endif + } + eError = PVRSRV_OK; + } + + if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA) + { + psVMA->vm_flags |= VM_MIXEDMAP; + bMixedMap = IMG_TRUE; + } + else + { + if (ui32AllocPageCount && (NULL != pai32AllocIndices)) + { + for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + + psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = page_to_pfn_t(psPage); + + if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) +#else + uiPFN = page_to_pfn(psPage); + + if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0)) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + { + bMixedMap = IMG_TRUE; + psVMA->vm_flags |= VM_MIXEDMAP; + break; + } + } + } + } + + /* Map the pages that got allocated */ + if (ui32AllocPageCount && (NULL != pai32AllocIndices)) + { + for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + int err; + + uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize)); + unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); + + if (bIsLMA) + { + phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr + + ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = phys_to_pfn_t(uiAddr, 0); + psPage = pfn_t_to_page(sPFN); +#else + uiPFN = uiAddr >> PAGE_SHIFT; + psPage = pfn_to_page(uiPFN); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + } + else + { + psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = page_to_pfn_t(psPage); +#else + uiPFN = page_to_pfn(psPage); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + } + + if (bMixedMap) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) + vm_fault_t vmf; + + vmf = vmf_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); + if (vmf & VM_FAULT_ERROR) + { + err = vm_fault_to_errno(vmf, 0); + } + else + { + err = 0; + } +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); +#else + err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */ + } + else + { + err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage); + } + + if (err) + { + PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err)); + eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; + goto eFailed; + } + } + } + + eError = PVRSRV_OK; +eFailed: + up_write(&psMM->mmap_sem); + + return eError; +} + +/*************************************************************************/ /*! +@Function OSDebugSignalPID +@Description Sends a SIGTRAP signal to a specific PID in user mode for + debugging purposes. The user mode process can register a handler + against this signal. + This is necessary to support the Rogue debugger. If the Rogue + debugger is not used then this function may be implemented as + a stub. +@Input ui32PID The PID for the signal. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID) +{ + int err; + struct pid *psPID; + + psPID = find_vpid(ui32PID); + if (psPID == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__)); + return PVRSRV_ERROR_NOT_FOUND; + } + + err = kill_pid(psPID, SIGTRAP, 0); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err)); + return PVRSRV_ERROR_SIGNAL_FAILED; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSIsKernelThread +@Description This API determines if the current running thread is a kernel + thread (i.e. one not associated with any userland process, + typically an MISR handler.) +@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. +*/ /**************************************************************************/ +IMG_BOOL OSIsKernelThread(void) +{ + /* + * Kernel threads have a NULL memory descriptor. + * + * See https://www.kernel.org/doc/Documentation/vm/active_mm.txt + */ + return current->mm == NULL; +} + +void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_DUMPDEBUG_LOG("OS kernel info: %s %s %s %s", + utsname()->sysname, + utsname()->release, + utsname()->version, + utsname()->machine); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm.c new file mode 100644 index 000000000000..b43ab84c9a34 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm.c @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@File +@Title arm specific OS functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) + #include +#endif +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" + + +static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd) +{ + return (size_t)((char *)pvEnd - (char *)pvStart); +} + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); + arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ + /* Inner cache */ + dmac_flush_range(pvVirtStart, pvVirtEnd); + + /* Outer cache */ + outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ + /* Inner cache */ + dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE); + + /* Outer cache */ + outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ + /* Inner cache */ + dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE); + + /* Outer cache */ + outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; +#else + return OS_CACHE_OP_ADDR_TYPE_BOTH; +#endif +} + +/* User Enable Register */ +#define PMUSERENR_EN 0x00000001 /* enable user access to the counters */ + +static void per_cpu_perf_counter_user_access_en(void *data) +{ + PVR_UNREFERENCED_PARAMETER(data); +#if !defined(CONFIG_L4) + /* Enable user-mode access to counters. */ + asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN)); +#endif +} + +void OSUserModeAccessToPerfCountersEn(void) +{ + on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1); +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + /* + * The kernel looks to have always used normal memory under ARM32. + * See osfunc_arm64.c implementation for more details. + */ + return IMG_TRUE; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm64.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm64.c new file mode 100644 index 000000000000..d2a8329e3ad0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_arm64.c @@ -0,0 +1,288 @@ +/*************************************************************************/ /*! +@File +@Title arm64 specific OS functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" + +#if defined(CONFIG_OUTER_CACHE) + /* If you encounter a 64-bit ARM system with an outer cache, you'll need + * to add the necessary code to manage that cache. See osfunc_arm.c + * for an example of how to do so. + */ + #error "CONFIG_OUTER_CACHE not supported on arm64." +#endif + +static inline void begin_user_mode_access(void) +{ +#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) + uaccess_enable(); +#endif +} + +static inline void end_user_mode_access(void) +{ +#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) + uaccess_disable(); +#endif +} + +static inline void FlushRange(void *pvRangeAddrStart, + void *pvRangeAddrEnd, + PVRSRV_CACHE_OP eCacheOp) +{ + IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + IMG_BYTE *pbStart = pvRangeAddrStart; + IMG_BYTE *pbEnd = pvRangeAddrEnd; + IMG_BYTE *pbBase; + + /* + On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache + maintenance is performed on a memory location using a VA, the effect of + that cache maintenance is visible to all VA aliases of the physical memory + location. So here it's quicker to issue the machine cache maintenance + instruction directly without going via the Linux kernel DMA framework as + this is sufficient to maintain the CPU d-caches on arm64. + */ + + begin_user_mode_access(); + + pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize); + for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) + { + switch (eCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + asm volatile ("dc cvac, %0" :: "r" (pbBase)); + break; + + case PVRSRV_CACHE_OP_INVALIDATE: + asm volatile ("dc ivac, %0" :: "r" (pbBase)); + break; + + case PVRSRV_CACHE_OP_FLUSH: + asm volatile ("dc civac, %0" :: "r" (pbBase)); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Cache maintenance operation type %d is invalid", + __func__, eCacheOp)); + break; + } + } + + end_user_mode_access(); +} + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + struct device *dev; + + if (pvVirtStart) + { + FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH); + return; + } + + dev = psDevNode->psDevConfig->pvOSDevice; + + if (dev) + { + dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_TO_DEVICE); + dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_FROM_DEVICE); + } + else + { + /* + * Allocations done prior to obtaining device pointer may + * affect in cache operations being scheduled. + * + * Ignore operations with null device pointer. + * This prevents crashes on newer kernels that don't return dummy ops + * when null pointer is passed to get_dma_ops. + * + */ + + /* Don't spam on nohw */ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); +#endif + } + +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + struct device *dev; + + if (pvVirtStart) + { + FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_CLEAN); + return; + } + + dev = psDevNode->psDevConfig->pvOSDevice; + + if (dev) + { + dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_TO_DEVICE); + } + else + { + /* + * Allocations done prior to obtaining device pointer may + * affect in cache operations being scheduled. + * + * Ignore operations with null device pointer. + * This prevents crashes on newer kernels that don't return dummy ops + * when null pointer is passed to get_dma_ops. + * + */ + + + /* Don't spam on nohw */ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); +#endif + } + +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + struct device *dev; + + if (pvVirtStart) + { + FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_INVALIDATE); + return; + } + + dev = psDevNode->psDevConfig->pvOSDevice; + + if (dev) + { + dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_FROM_DEVICE); + } + else + { + /* + * Allocations done prior to obtaining device pointer may + * affect in cache operations being scheduled. + * + * Ignore operations with null device pointer. + * This prevents crashes on newer kernels that don't return dummy ops + * when null pointer is passed to get_dma_ops. + * + */ + + /* Don't spam on nohw */ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); +#endif + } +} + + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + /* + * Under ARM64 there is the concept of 'device' [0] and 'normal' [1] memory. + * Unaligned access on device memory is explicitly disallowed [2]: + * + * 'Further, unaligned accesses are only allowed to regions marked as Normal + * memory type. + * ... + * Attempts to perform unaligned accesses when not allowed will cause an + * alignment fault (data abort).' + * + * Write-combine on ARM64 can be implemented as either normal non-cached + * memory (NORMAL_NC) or as device memory with gathering enabled + * (DEVICE_GRE.) Kernel 3.13 changed this from the latter to the former. + * + * [0]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/CHDBDIDF.html + * [1]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch13s01s01.html + * [2]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + */ + + pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); + + return (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_e2k.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_e2k.c new file mode 100644 index 000000000000..997a369c8084 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_e2k.c @@ -0,0 +1,143 @@ +/*************************************************************************/ /*! +@File +@Title E2K specific OS functions +@Copyright Copyright (c) ZAO MCST +@Description OS functions who's implementation are processor specific +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include + + +PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + switch(uiCacheOp) + { + /* Fall-through */ + case PVRSRV_CACHE_OP_CLEAN: + case PVRSRV_CACHE_OP_FLUSH: + case PVRSRV_CACHE_OP_INVALIDATE: + + write_back_cache_all(); + break; + + case PVRSRV_CACHE_OP_NONE: + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Global cache operation type %d is invalid", + __FUNCTION__, uiCacheOp)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_ASSERT(0); + break; + } + + return eError; +} + +static void e2k_flush_cache_range(void *pvStart, const void *pvEnd) +{ + flush_DCACHE_range(pvStart, pvEnd - pvStart); +} + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + e2k_flush_cache_range(pvVirtStart, pvVirtEnd); +} + + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + /* No clean feature on e2k */ + e2k_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + /* No invalidate-only support */ + e2k_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ + /* Not applicable to e2k architecture. */ +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + return IMG_TRUE; /* TODO: look to OSIsWriteCombineUnalignedSafe() from osfunc_arm64.c */ +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_mips.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_mips.c new file mode 100644 index 000000000000..986caf70d384 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_mips.c @@ -0,0 +1,113 @@ +/*************************************************************************/ /*! +@File +@Title mips specific OS functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + unsigned long len; + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd); + + len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart; + dma_cache_sync(psDevNode->psDevConfig->pvOSDevice, (void *)pvVirtStart, len, DMA_BIDIRECTIONAL); +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + unsigned long len; + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd); + + len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart; + dma_cache_sync(psDevNode->psDevConfig->pvOSDevice, (void *)pvVirtStart, len, DMA_TO_DEVICE); +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + unsigned long len; + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd); + + len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart; + dma_cache_sync(psDevNode->psDevConfig->pvOSDevice, (void *)pvVirtStart, len, DMA_FROM_DEVICE); +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ + /* Not applicable to MIPS architecture. */ +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + return IMG_TRUE; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_riscv.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_riscv.c new file mode 100644 index 000000000000..b49ee7c16988 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_riscv.c @@ -0,0 +1,168 @@ +/*************************************************************************/ /*! +@File +@Title RISC-V specific OS functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "cache_ops.h" + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + /* + * RISC-V cache maintenance mechanism is not part of the core spec. + * This leaves the actual mechanism of action to an implementer. + * Here we let the system layer decide how maintenance is done. + */ + if (psDevNode->psDevConfig->pfnHostCacheMaintenance) + { + psDevNode->psDevConfig->pfnHostCacheMaintenance( + psDevNode->psDevConfig->hSysData, + PVRSRV_CACHE_OP_FLUSH, + pvVirtStart, + pvVirtEnd, + sCPUPhysStart, + sCPUPhysEnd); + + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: System doesn't implement cache maintenance. Skipping!", + __func__)); + } + +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + /* + * RISC-V cache maintenance mechanism is not part of the core spec. + * This leaves the actual mechanism of action to an implementer. + * Here we let the system layer decide how maintenance is done. + */ + if (psDevNode->psDevConfig->pfnHostCacheMaintenance) + { + psDevNode->psDevConfig->pfnHostCacheMaintenance( + psDevNode->psDevConfig->hSysData, + PVRSRV_CACHE_OP_CLEAN, + pvVirtStart, + pvVirtEnd, + sCPUPhysStart, + sCPUPhysEnd); + + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: System doesn't implement cache maintenance. Skipping!", + __func__)); + } +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + /* + * RISC-V cache maintenance mechanism is not part of the core spec. + * This leaves the actual mechanism of action to an implementer. + * Here we let the system layer decide how maintenance is done. + */ + if (psDevNode->psDevConfig->pfnHostCacheMaintenance) + { + psDevNode->psDevConfig->pfnHostCacheMaintenance( + psDevNode->psDevConfig->hSysData, + PVRSRV_CACHE_OP_INVALIDATE, + pvVirtStart, + pvVirtEnd, + sCPUPhysStart, + sCPUPhysEnd); + + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: System doesn't implement cache maintenance. Skipping!", + __func__)); + } +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + /* + * Need to obtain psDevNode here and do the following: + * + * OS_CACHE_OP_ADDR_TYPE eOpAddrType = + * psDevNode->psDevConfig->bHasPhysicalCacheMaintenance ? + * OS_CACHE_OP_ADDR_TYPE_PHYSICAL : OS_CACHE_OP_ADDR_TYPE_VIRTUAL; + * + * Return BOTH for now on. + * + */ + return OS_CACHE_OP_ADDR_TYPE_BOTH; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ + PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__)); + PVR_ASSERT(0); +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + PVR_DPF((PVR_DBG_WARNING, + "%s: Not implemented (assuming false)!", + __func__)); + PVR_ASSERT(0); + return IMG_FALSE; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_x86.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_x86.c new file mode 100644 index 000000000000..2a70a80d1f7b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osfunc_x86.c @@ -0,0 +1,125 @@ +/*************************************************************************/ /*! +@File +@Title x86 specific OS functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" + +static void x86_flush_cache_range(const void *pvStart, const void *pvEnd) +{ + IMG_BYTE *pbStart = (IMG_BYTE *)pvStart; + IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd; + IMG_BYTE *pbBase; + + pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd, + (uintptr_t)boot_cpu_data.x86_clflush_size); + + mb(); + for (pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size) + { +#if !defined(CONFIG_L4) + clflush(pbBase); +#endif + } + mb(); +} + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + x86_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + /* No clean feature on x86 */ + x86_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + /* No invalidate-only support */ + x86_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ + /* Not applicable to x86 architecture. */ +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + return IMG_TRUE; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/osmmap_stub.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/osmmap_stub.c new file mode 100644 index 000000000000..d149391429ee --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/osmmap_stub.c @@ -0,0 +1,146 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS abstraction for the mmap2 interface for mapping PMRs into + User Mode memory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* our exported API */ +#include "osmmap.h" + +/* include/ */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +/* services/include/ */ + +/* services/include/srvhelper/ */ +#include "ra.h" + +#include "pmr.h" + +PVRSRV_ERROR +OSMMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiPMRSize, + IMG_UINT32 uiFlags, + IMG_HANDLE *phOSMMapPrivDataOut, + void **ppvMappingAddressOut, + size_t *puiMappingLengthOut) +{ + PVRSRV_ERROR eError; + PMR *psPMR; + void *pvKernelAddress; + size_t uiLength; + IMG_HANDLE hPriv; + + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(uiFlags); + + /* + Normally this function would mmap a PMR into the memory space of + user process, but in this case we're taking a PMR and mapping it + into kernel virtual space. We keep the same function name for + symmetry as this allows the higher layers of the software stack + to not care whether they are user mode or kernel + */ + + psPMR = hPMR; + + if (PMR_IsSparse(psPMR)) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, + 0, + 0, + &pvKernelAddress, + &uiLength, + &hPriv); + } + else + { + eError = PMRAcquireKernelMappingData(psPMR, + 0, + 0, + &pvKernelAddress, + &uiLength, + &hPriv); + } + if (eError != PVRSRV_OK) + { + goto e0; + } + + *phOSMMapPrivDataOut = hPriv; + *ppvMappingAddressOut = pvKernelAddress; + *puiMappingLengthOut = uiLength; + + /* MappingLength might be rounded up to page size */ + PVR_ASSERT(*puiMappingLengthOut >= uiPMRSize); + + return PVRSRV_OK; + + /* + error exit paths follow + */ + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void +OSMUnmapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE hOSMMapPrivData, + void *pvMappingAddress, + size_t uiMappingLength) +{ + PMR *psPMR; + + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(pvMappingAddress); + PVR_UNREFERENCED_PARAMETER(uiMappingLength); + + psPMR = hPMR; + PMRReleaseKernelMappingData(psPMR, + hOSMMapPrivData); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_dmabuf.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_dmabuf.c new file mode 100644 index 000000000000..4666e129043b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_dmabuf.c @@ -0,0 +1,1257 @@ +/*************************************************************************/ /*! +@File physmem_dmabuf.c +@Title dmabuf memory allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for dmabuf memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "physmem_dmabuf.h" +#include "pvrsrv.h" +#include "pmr.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) + +#include +#include +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +#include "allocmem.h" +#include "osfunc.h" +#include "pmr_impl.h" +#include "hash.h" +#include "private_data.h" +#include "module_common.h" +#include "pvr_ion_stats.h" + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +#include "mmap_stats.h" +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#include "kernel_compatibility.h" + +/* + * dma_buf_ops + * + * These are all returning errors if used. + * The point is to prevent anyone outside of our driver from importing + * and using our dmabuf. + */ + +static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ + !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) + struct device *psDev, +#endif + struct dma_buf_attachment *psAttachment) +{ + return -ENOSYS; +} + +static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment, + enum dma_data_direction eDirection) +{ + /* Attach hasn't been called yet */ + return ERR_PTR(-EINVAL); +} + +static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment, + struct sg_table *psTable, + enum dma_data_direction eDirection) +{ +} + +static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf) +{ + PMR *psPMR = (PMR *) psDmaBuf->priv; + + PMRUnrefPMR(psPMR); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) +static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum) +{ + return ERR_PTR(-ENOSYS); +} +#endif + +static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA) +{ + return -ENOSYS; +} + +static const struct dma_buf_ops sPVRDmaBufOps = +{ + .attach = PVRDmaBufOpsAttach, + .map_dma_buf = PVRDmaBufOpsMap, + .unmap_dma_buf = PVRDmaBufOpsUnmap, + .release = PVRDmaBufOpsRelease, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ + !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) + .map_atomic = PVRDmaBufOpsKMap, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) + .map = PVRDmaBufOpsKMap, +#endif +#else + .kmap_atomic = PVRDmaBufOpsKMap, + .kmap = PVRDmaBufOpsKMap, +#endif + .mmap = PVRDmaBufOpsMMap, +}; + +/* end of dma_buf_ops */ + + +typedef struct _PMR_DMA_BUF_DATA_ +{ + /* Filled in at PMR create time */ + PHYS_HEAP *psPhysHeap; + struct dma_buf_attachment *psAttachment; + PFN_DESTROY_DMABUF_PMR pfnDestroy; + IMG_BOOL bPoisonOnFree; + + /* Modified by PMR lock/unlock */ + struct sg_table *psSgTable; + IMG_DEV_PHYADDR *pasDevPhysAddr; + IMG_UINT32 ui32PhysPageCount; + IMG_UINT32 ui32VirtPageCount; +} PMR_DMA_BUF_DATA; + +/* Start size of the g_psDmaBufHash hash table */ +#define DMA_BUF_HASH_SIZE 20 + +static DEFINE_MUTEX(g_HashLock); + +static HASH_TABLE *g_psDmaBufHash; +static IMG_UINT32 g_ui32HashRefCount; + +#if defined(PVR_ANDROID_ION_USE_SG_LENGTH) +#define pvr_sg_length(sg) ((sg)->length) +#else +#define pvr_sg_length(sg) sg_dma_len(sg) +#endif + +/***************************************************************************** + * PMR callback functions * + *****************************************************************************/ + +static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf_attachment *psAttachment = psPrivData->psAttachment; + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + struct sg_table *psSgTable = psPrivData->psSgTable; + PMR *psPMR; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDmaBuf->ops != &sPVRDmaBufOps) + { + if (g_psDmaBufHash) + { + /* We have a hash table so check if we've seen this dmabuf before */ + psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); + + if (psPMR) + { + if (!PMRIsPMRLive(psPMR)) + { + HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf); + g_ui32HashRefCount--; + + if (g_ui32HashRefCount == 0) + { + HASH_Delete(g_psDmaBufHash); + g_psDmaBufHash = NULL; + } + } + else{ + eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; + } + } + PVRSRVIonRemoveMemAllocRecord(psDmaBuf); + } + }else + { + psPMR = (PMR *) psDmaBuf->priv; + if (PMRIsPMRLive(psPMR)) + { + eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; + } + + } + + if (PVRSRV_OK != eError) + { + return eError; + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + OSGetCurrentClientProcessIDKM()); +#endif + + psPrivData->ui32PhysPageCount = 0; + + dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL); + + + if (psPrivData->bPoisonOnFree) + { + void *pvKernAddr; + int err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) + int i; +#endif + + err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to begin cpu access for free poisoning (err=%d)", + __func__, err)); + PVR_ASSERT(IMG_FALSE); + goto exit; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) + pvKernAddr = dma_buf_vmap(psDmaBuf); + if (!pvKernAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to poison allocation before free", + __func__)); + PVR_ASSERT(IMG_FALSE); + goto exit_end_access; + } + + memset(pvKernAddr, PVRSRV_POISON_ON_FREE_VALUE, psDmaBuf->size); + + dma_buf_vunmap(psDmaBuf, pvKernAddr); +#else + for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++) + { + pvKernAddr = dma_buf_kmap(psDmaBuf, i); + if (IS_ERR_OR_NULL(pvKernAddr)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to poison allocation before free (err=%ld)", + __func__, pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM)); + PVR_ASSERT(IMG_FALSE); + goto exit_end_access; + } + + memset(pvKernAddr, PVRSRV_POISON_ON_FREE_VALUE, PAGE_SIZE); + + dma_buf_kunmap(psDmaBuf, i, pvKernAddr); + } +#endif + +exit_end_access: + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + } + +exit: + if (psPrivData->pfnDestroy) + { + eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + OSFreeMem(psPrivData->pasDevPhysAddr); + OSFreeMem(psPrivData); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +{ + PVR_UNREFERENCED_PARAMETER(pvPriv); + return PVRSRV_OK; +} + +static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +{ + PVR_UNREFERENCED_PARAMETER(pvPriv); + return PVRSRV_OK; +} + +static void PMRGetFactoryLock(void) +{ + mutex_lock(&g_HashLock); +} + +static void PMRReleaseFactoryLock(void) +{ + mutex_unlock(&g_HashLock); +} + +static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + IMG_UINT32 ui32PageIndex; + IMG_UINT32 idx; + + if (ui32Log2PageSize != PAGE_SHIFT) + { + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + IMG_UINT32 ui32InPageOffset; + + ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT; + ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT); + + + PVR_ASSERT(ui32PageIndex < psPrivData->ui32VirtPageCount); + PVR_ASSERT(ui32InPageOffset < PAGE_SIZE); + psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset; + } + } + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; + void *pvKernAddr; + PVRSRV_ERROR eError; + int err; + + if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs " + "are not allowed!", __func__)); + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + goto fail; + } + + err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); + if (err) + { + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + goto fail; + } + + pvKernAddr = dma_buf_vmap(psDmaBuf); + if (IS_ERR_OR_NULL(pvKernAddr)) + { + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + goto fail_kmap; + } + + *ppvKernelAddressOut = pvKernAddr + uiOffset; + *phHandleOut = pvKernAddr; + + return PVRSRV_OK; + +fail_kmap: + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); + } while (err == -EAGAIN || err == -EINTR); + +fail: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; + void *pvKernAddr = hHandle; + int err; + + dma_buf_vunmap(psDmaBuf, pvKernAddr); + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); + } while (err == -EAGAIN || err == -EINTR); +} + +static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + PMR *psPMR, + PMR_MMAP_DATA pOSMMapData) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; + struct vm_area_struct *psVma = pOSMMapData; + int err; + + if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Not possible to MMAP sparse DMABufs", + __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + err = dma_buf_mmap(psDmaBuf, psVma, 0); + if (err) + { + return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING; + } + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + MMapStatsAddOrUpdatePMR(psPMR, psVma->vm_end - psVma->vm_start); +#endif + + return PVRSRV_OK; +} + +static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab = +{ + .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf, + .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf, + .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf, + .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf, + .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf, + .pfnMMap = PMRMMapDmaBuf, + .pfnFinalize = PMRFinalizeDmaBuf, + .pfnGetPMRFactoryLock = PMRGetFactoryLock, + .pfnReleasePMRFactoryLock = PMRReleaseFactoryLock, +}; + +/***************************************************************************** + * Public facing interface * + *****************************************************************************/ + +PVRSRV_ERROR +PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment, + PFN_DESTROY_DMABUF_PMR pfnDestroy, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr) +{ + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + PMR_DMA_BUF_DATA *psPrivData; + PMR_FLAGS_T uiPMRFlags; + IMG_BOOL bZeroOnAlloc; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bPoisonOnFree; + PVRSRV_ERROR eError; + IMG_UINT32 i, j; + IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT; + IMG_UINT32 ui32PageCount = 0; + struct scatterlist *sg; + struct sg_table *table; + IMG_UINT32 uiSglOffset; + IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; + + bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); + bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); + bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags); + + if (bZeroOnAlloc && bPoisonOnFree) + { + /* Zero on Alloc and Poison on Alloc are mutually exclusive */ + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errReturn; + } + + psPrivData = OSAllocZMem(sizeof(*psPrivData)); + if (psPrivData == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto errReturn; + } + + psPrivData->psPhysHeap = psHeap; + psPrivData->psAttachment = psAttachment; + psPrivData->pfnDestroy = pfnDestroy; + psPrivData->bPoisonOnFree = bPoisonOnFree; + psPrivData->ui32VirtPageCount = + (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT; + + psPrivData->pasDevPhysAddr = + OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) * + psPrivData->ui32VirtPageCount); + if (!psPrivData->pasDevPhysAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate buffer for physical addresses (oom)", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto errFreePrivData; + } + + if (bZeroOnAlloc || bPoisonOnAlloc) + { + void *pvKernAddr; + int err; +#if (LINUX_VERSION_CODE = KERNEL_VERSION(5, 6, 0)) + pvKernAddr = dma_buf_vmap(psDmaBuf); + if (!pvKernAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map buffer for %s)", + __func__, bZeroOnAlloc ? "zeroing" : "poisoning")); + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + + goto errFreePhysAddr; + } + + if (bZeroOnAlloc) + { + memset(pvKernAddr, 0, psDmaBuf->size); + } + else + { + memset(pvKernAddr, PVRSRV_POISON_ON_ALLOC_VALUE, psDmaBuf->size); + } + + dma_buf_vunmap(psDmaBuf, pvKernAddr); +#else + for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++) + { + pvKernAddr = dma_buf_kmap(psDmaBuf, i); + if (IS_ERR_OR_NULL(pvKernAddr)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map page for %s (err=%ld)", + __func__, bZeroOnAlloc ? "zeroing" : "poisoning", + pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM)); + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + + goto errFreePhysAddr; + } + + if (bZeroOnAlloc) + { + memset(pvKernAddr, 0, PAGE_SIZE); + } + else + { + memset(pvKernAddr, PVRSRV_POISON_ON_ALLOC_VALUE, PAGE_SIZE); + } + + dma_buf_kunmap(psDmaBuf, i, pvKernAddr); + } +#endif + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + } + + table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(table)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errFreePhysAddr; + } + + /* + * We do a two pass process: first work out how many pages there + * are and second, fill in the data. + */ + for_each_sg(table->sgl, sg, table->nents, i) + { + ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE; + } + + if (WARN_ON(!ui32PageCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnmap; + } + + if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual " + "number of physical dma buf pages don't match", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnmap; + } + + psPrivData->ui32PhysPageCount = ui32PageCount; + psPrivData->psSgTable = table; + ui32PageCount = 0; + sg = table->sgl; + uiSglOffset = 0; + + + /* Fill physical address array */ + for (i = 0; i < ui32NumPhysChunks; i++) + { + for (j = 0; j < uiPagesPerChunk; j++) + { + IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j; + + psPrivData->pasDevPhysAddr[uiIdx].uiAddr = + sg_dma_address(sg) + uiSglOffset; + + /* Get the next offset for the current sgl or the next sgl */ + uiSglOffset += PAGE_SIZE; + if (uiSglOffset >= pvr_sg_length(sg)) + { + sg = sg_next(sg); + uiSglOffset = 0; + + /* Check that we haven't looped */ + if (WARN_ON(sg == table->sgl)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address " + "array", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnmap; + } + } + } + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + OSGetCurrentClientProcessIDKM()); +#endif + + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* + * Check no significant bits were lost in cast due to different + * bit widths for flags + */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (OSSNPrintf((IMG_CHAR *)pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN, "ImpDmaBuf:%s", (IMG_CHAR *)pszName) < 0) + { + pszAnnotation[0] = '\0'; + } + else + { + pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0'; + } + + eError = PMRCreatePMR(psDevNode, + psHeap, + ui32NumVirtChunks * uiChunkSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + PAGE_SHIFT, + uiPMRFlags, + pszAnnotation, + &_sPMRDmaBufFuncTab, + psPrivData, + PMR_TYPE_DMABUF, + ppsPMRPtr, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto errFreePhysAddr; + } + + return PVRSRV_OK; + +errUnmap: + dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL); +errFreePhysAddr: + OSFreeMem(psPrivData->pasDevPhysAddr); +errFreePrivData: + OSFreeMem(psPrivData); +errReturn: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment) +{ + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + + PVR_UNREFERENCED_PARAMETER(psHeap); + + dma_buf_detach(psDmaBuf, psAttachment); + dma_buf_put(psDmaBuf); + + return PVRSRV_OK; +} + +struct dma_buf * +PhysmemGetDmaBuf(PMR *psPMR) +{ + PMR_DMA_BUF_DATA *psPrivData; + + psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); + if (psPrivData) + { + return psPrivData->psAttachment->dmabuf; + } + + return NULL; +} + +PVRSRV_ERROR +PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_INT *piFd) +{ + struct dma_buf *psDmaBuf; + IMG_DEVMEM_SIZE_T uiPMRSize; + PVRSRV_ERROR eError; + IMG_INT iFd; + + mutex_lock(&g_HashLock); + + PMRRefPMR(psPMR); + + eError = PMR_LogicalSize(psPMR, &uiPMRSize); + if (eError != PVRSRV_OK) + { + goto fail_pmr_ref; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + { + DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo); + + sDmaBufExportInfo.priv = psPMR; + sDmaBufExportInfo.ops = &sPVRDmaBufOps; + sDmaBufExportInfo.size = uiPMRSize; + sDmaBufExportInfo.flags = O_RDWR; + + psDmaBuf = dma_buf_export(&sDmaBufExportInfo); + } +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) + psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, + uiPMRSize, O_RDWR, NULL); +#else + psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, + uiPMRSize, O_RDWR); +#endif + + if (IS_ERR_OR_NULL(psDmaBuf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)", + __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_pmr_ref; + } + + iFd = dma_buf_fd(psDmaBuf, O_RDWR); + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)", + __func__, iFd)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_dma_buf; + } + + mutex_unlock(&g_HashLock); + *piFd = iFd; + return PVRSRV_OK; + +fail_dma_buf: + dma_buf_put(psDmaBuf); + +fail_pmr_ref: + mutex_unlock(&g_HashLock); + PMRUnrefPMR(psPMR); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32MappingTable = 0; + struct dma_buf *psDmaBuf; + PVRSRV_ERROR eError; + + /* Get the buffer handle */ + psDmaBuf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(psDmaBuf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", + __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); + return PVRSRV_ERROR_BAD_MAPPING; + + } + + uiSize = psDmaBuf->size; + + eError = PhysmemImportSparseDmaBuf(psConnection, + psDevNode, + fd, + uiFlags, + uiSize, + 1, + 1, + &ui32MappingTable, + ui32NameSize, + pszName, + ppsPMRPtr, + puiSize, + puiAlign); + + dma_buf_put(psDmaBuf); + + return eError; +} + +PVRSRV_ERROR +PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PMR *psPMR = NULL; + struct dma_buf_attachment *psAttachment; + struct dma_buf *psDmaBuf; + PVRSRV_ERROR eError; + IMG_BOOL bHashTableCreated = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (!psDevNode) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errReturn; + } + + /* Terminate string from bridge to prevent corrupt annotations in RI */ + if (pszName != NULL) + { + IMG_CHAR* pszName0 = (IMG_CHAR*) pszName; + pszName0[ui32NameSize-1] = '\0'; + } + + mutex_lock(&g_HashLock); + + /* Get the buffer handle */ + psDmaBuf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(psDmaBuf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", + __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto errReturn; + } + + if (psDmaBuf->ops == &sPVRDmaBufOps) + { + PVRSRV_DEVICE_NODE *psPMRDevNode; + + /* We exported this dma_buf, so we can just get its PMR */ + psPMR = (PMR *) psDmaBuf->priv; + + /* However, we can't import it if it belongs to a different device */ + psPMRDevNode = PMR_DeviceNode(psPMR); + if (psPMRDevNode != psDevNode) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", + __func__)); + eError = PVRSRV_ERROR_PMR_NOT_PERMITTED; + goto err; + } + } + else + { + if (g_psDmaBufHash) + { + /* We have a hash table so check if we've seen this dmabuf before */ + psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); + } + else + { + /* + * As different processes may import the same dmabuf we need to + * create a hash table so we don't generate a duplicate PMR but + * rather just take a reference on an existing one. + */ + g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE); + if (!g_psDmaBufHash) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err; + } + bHashTableCreated = IMG_TRUE; + } + } + + if (psPMR) + { + /* Reuse the PMR we already created */ + PMRRefPMR(psPMR); + + *ppsPMRPtr = psPMR; + PMR_LogicalSize(psPMR, puiSize); + *puiAlign = PAGE_SIZE; + } + /* No errors so far */ + eError = PVRSRV_OK; + +err: + if (psPMR || (PVRSRV_OK != eError)) + { + mutex_unlock(&g_HashLock); + dma_buf_put(psDmaBuf); + return eError; + } + + /* Do we want this to be a sparse PMR? */ + if (ui32NumVirtChunks > 1) + { + IMG_UINT32 i; + + /* Parameter validation */ + if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) || + uiChunkSize != PAGE_SIZE || + ui32NumPhysChunks > ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requesting sparse buffer: " + "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to " + "OS page size (%lu). uiChunkSize * ui32NumPhysChunks " + "("IMG_DEVMEM_SIZE_FMTSPEC") must" + " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). " + "ui32NumPhysChunks (%u) must be lesser or equal to " + "ui32NumVirtChunks (%u)", + __func__, + uiChunkSize, + PAGE_SIZE, + uiChunkSize * ui32NumPhysChunks, + psDmaBuf->size, + ui32NumPhysChunks, + ui32NumVirtChunks)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnlockAndDMAPut; + } + + /* Parameter validation - Mapping table entries*/ + for (i = 0; i < ui32NumPhysChunks; i++) + { + if (pui32MappingTable[i] > ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requesting sparse buffer: " + "Entry in mapping table (%u) is out of allocation " + "bounds (%u)", + __func__, + (IMG_UINT32) pui32MappingTable[i], + (IMG_UINT32) ui32NumVirtChunks)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnlockAndDMAPut; + } + } + } + else + { + /* if ui32NumPhysChunks == 0 pui32MappingTable is NULL and because + * is ui32NumPhysChunks is set to 1 below we don't allow NULL array */ + if (pui32MappingTable == NULL) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnlockAndDMAPut; + } + + /* Make sure parameters are valid for non-sparse allocations as well */ + uiChunkSize = psDmaBuf->size; + ui32NumPhysChunks = 1; + ui32NumVirtChunks = 1; + } + + + psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice); + if (IS_ERR_OR_NULL(psAttachment)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)", + __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM)); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto errUnlockAndDMAPut; + } + + /* + * Note: + * While we have no way to determine the type of the buffer we just + * assume that all dmabufs are from the same physical heap. + */ + eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode, + psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL], + psAttachment, + PhysmemDestroyDmaBuf, + uiFlags, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + ui32NameSize, + pszName, + &psPMR); + if (eError != PVRSRV_OK) + { + goto errDMADetach; + } + + /* First time we've seen this dmabuf so store it in the hash table */ + HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR); + g_ui32HashRefCount++; + + mutex_unlock(&g_HashLock); + + PVRSRVIonAddMemAllocRecord(psDmaBuf); + + *ppsPMRPtr = psPMR; + *puiSize = ui32NumVirtChunks * uiChunkSize; + *puiAlign = PAGE_SIZE; + + return PVRSRV_OK; + +errDMADetach: + dma_buf_detach(psDmaBuf, psAttachment); + +errUnlockAndDMAPut: + if (IMG_TRUE == bHashTableCreated) + { + HASH_Delete(g_psDmaBufHash); + g_psDmaBufHash = NULL; + } + mutex_unlock(&g_HashLock); + dma_buf_put(psDmaBuf); + +errReturn: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */ + +PVRSRV_ERROR +PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment, + PFN_DESTROY_DMABUF_PMR pfnDestroy, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(psHeap); + PVR_UNREFERENCED_PARAMETER(psAttachment); + PVR_UNREFERENCED_PARAMETER(pfnDestroy); + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(uiChunkSize); + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); + PVR_UNREFERENCED_PARAMETER(pui32MappingTable); + PVR_UNREFERENCED_PARAMETER(ui32NameSize); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +struct dma_buf * +PhysmemGetDmaBuf(PMR *psPMR) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + + return NULL; +} + +PVRSRV_ERROR +PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_INT *piFd) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(piFd); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR +PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(fd); + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(ui32NameSize); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); + PVR_UNREFERENCED_PARAMETER(puiSize); + PVR_UNREFERENCED_PARAMETER(puiAlign); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR +PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(fd); + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); + PVR_UNREFERENCED_PARAMETER(puiSize); + PVR_UNREFERENCED_PARAMETER(puiAlign); + PVR_UNREFERENCED_PARAMETER(uiChunkSize); + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); + PVR_UNREFERENCED_PARAMETER(pui32MappingTable); + PVR_UNREFERENCED_PARAMETER(ui32NameSize); + PVR_UNREFERENCED_PARAMETER(pszName); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.c new file mode 100644 index 000000000000..89da7b8afaf5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.c @@ -0,0 +1,4275 @@ +/*************************************************************************/ /*! +@File +@Title Implementation of PMR functions for OS managed memory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for physical memory borrowed + from that normally managed by the operating system. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MCST +#include +#endif + +#if defined(CONFIG_X86) || defined(CONFIG_E2K) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) +#include +#else +#include +#endif +#endif + +/* include/ */ +#include "rgx_heaps.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "rgx_pdump_panics.h" +/* services/server/include/ */ +#include "allocmem.h" +#include "osfunc.h" +#include "pdump_km.h" +#include "pmr.h" +#include "pmr_impl.h" +#include "cache_km.h" +#include "devicemem_server_utils.h" + +/* ourselves */ +#include "physmem_osmem.h" +#include "physmem_osmem_linux.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) +#include "hash.h" +#endif +#endif + +#include "kernel_compatibility.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) +static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM; +#else +/* split_page not available on older kernels */ +#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM +#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0 +static IMG_UINT32 g_uiMaxOrder; +#endif + +/* + These corresponds to the MMU min/max page sizes and associated PTE + alignment that can be used on the device for an allocation. It is + 4KB (min) and 2MB (max) respectively. +*/ +#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_4KB_PAGE_SHIFT +#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_2MB_PAGE_SHIFT + +/* Defines how many pages should be mapped at once to the kernel */ +#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */ + +/* + These are used to get/set/mask lower-order bits in a dma_addr_t + to provide side-band information associated with that address. + These includes whether the address was obtained via alloc_page + or dma_alloc and if address came allocated pre-aligned or an + adjustment was made manually to aligned it. +*/ +#define DMA_SET_ADJUSTED_ADDR(x) ((x) | ((dma_addr_t)0x02)) +#define DMA_IS_ADDR_ADJUSTED(x) ((x) & ((dma_addr_t)0x02)) +#define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01)) +#define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01)) +#define DMA_GET_ALIGN_ADJUSTMENT(x) ((x>>2) & ((dma_addr_t)0x3ff)) +#define DMA_SET_ALIGN_ADJUSTMENT(x,y) ((x) | (((dma_addr_t)y)<<0x02)) +#define DMA_GET_ADDR(x) (((dma_addr_t)x) & ((dma_addr_t)~0xfff)) +#define DMA_VADDR_NOT_IN_USE 0xCAFEF00DDEADBEEFULL + +typedef struct _PMR_OSPAGEARRAY_DATA_ { + /* Device for which this allocation has been made */ + PVRSRV_DEVICE_NODE *psDevNode; + /* The pid that made this allocation */ + IMG_PID uiPid; + + /* + * iNumOSPagesAllocated: + * Number of pages allocated in this PMR so far. + * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR. + */ + IMG_INT32 iNumOSPagesAllocated; + + /* + * uiTotalNumOSPages: + * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size) + * number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...) + */ + IMG_UINT32 uiTotalNumOSPages; + + /* + uiLog2AllocPageSize; + + size of each "page" -- this would normally be the same as + PAGE_SHIFT, but we support the idea that we may allocate pages + in larger chunks for better contiguity, using order>0 in the + call to alloc_pages() + */ + IMG_UINT32 uiLog2AllocPageSize; + + /* + ui64DmaMask; + */ + IMG_UINT64 ui64DmaMask; + + /* + For non DMA/CMA allocation, pagearray references the pages + thus allocated; one entry per compound page when compound + pages are used. In addition, for DMA/CMA allocations, we + track the returned cpu virtual and device bus address. + */ + struct page **pagearray; + dma_addr_t *dmaphysarray; + void **dmavirtarray; + + /* + Record at alloc time whether poisoning will be required when the + PMR is freed. + */ + IMG_BOOL bZero; + IMG_BOOL bPoisonOnFree; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bOnDemand; + IMG_BOOL bUnpinned; /* Should be protected by page pool lock */ + IMG_BOOL bIsCMA; /* Is CMA memory allocated via DMA framework */ +#ifdef CONFIG_MCST + IMG_BOOL bIsFast; +#endif + + /* + The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean + flag, advising us to do cache maintenance on behalf of the caller. + Boolean used to track if we need to revert the cache attributes + of the pages used in this allocation. Depends on OS/architecture. + */ + IMG_UINT32 ui32CPUCacheFlags; + IMG_BOOL bUnsetMemoryType; +} PMR_OSPAGEARRAY_DATA; + +/*********************************** + * Page pooling for uncached pages * + ***********************************/ +static INLINE void +_FreeOSPage_CMA(struct device *dev, + size_t alloc_size, + IMG_UINT32 uiOrder, + void *virt_addr, + dma_addr_t dev_addr, + struct page *psPage); + +static void +_FreeOSPage(IMG_UINT32 uiOrder, + IMG_BOOL bUnsetMemoryType, + struct page *psPage); + +static PVRSRV_ERROR +_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 ui32FreePageCount); + +static PVRSRV_ERROR +_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, + IMG_UINT32 *puiPagesFreed); + +/* A struct for our page pool holding an array of zeroed (!) pages. + * We always put units of page arrays to the pool but are + * able to take individual pages */ +typedef struct +{ + /* Linkage for page pool LRU list */ + struct list_head sPagePoolItem; + + /* How many items are still in the page array */ + IMG_UINT32 uiItemsRemaining; + /* Array of the actual pages */ + struct page **ppsPageArray; + +} LinuxPagePoolEntry; + +/* CleanupThread structure to put allocation in page pool */ +typedef struct +{ + PVRSRV_CLEANUP_THREAD_WORK sCleanupWork; + IMG_UINT32 ui32CPUCacheMode; + LinuxPagePoolEntry *psPoolEntry; +} LinuxCleanupData; + +/* A struct for the unpinned items */ +typedef struct +{ + struct list_head sUnpinPoolItem; + PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr; +} LinuxUnpinEntry; + + +/* Caches to hold page pool and page array structures */ +static struct kmem_cache *g_psLinuxPagePoolCache; +static struct kmem_cache *g_psLinuxPageArray; + +/* Track what is live, all protected by pool lock. + * x86 needs two page pools because we have to change the memory attributes + * of the pages which is expensive due to an implicit flush. + * See set_pages_array_uc/wc/wb. */ +static IMG_UINT32 g_ui32UnpinPageCount; +static IMG_UINT32 g_ui32PagePoolUCCount; +#if defined(CONFIG_X86) || defined(CONFIG_E2K) +static IMG_UINT32 g_ui32PagePoolWCCount; +#endif +/* Tracks asynchronous tasks currently accessing the page pool. + * It is incremented if a defer free task + * is created. Both will decrement the value when they finished the work. + * The atomic prevents piling up of deferred work in case the deferred thread + * cannot keep up with the application.*/ +static ATOMIC_T g_iPoolCleanTasks; +/* We don't want too many asynchronous threads trying to access the page pool + * at the same time */ +#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128 + +/* Defines how many pages the page cache should hold. */ +#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES) +static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES; +#else +static const IMG_UINT32 g_ui32PagePoolMaxEntries; +#endif + +/* We double check if we would exceed this limit if we are below MAX_POOL_PAGES + and want to add an allocation to the pool. + This prevents big allocations being given back to the OS just because they + exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */ +#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES) +static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES; +#else +static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries; +#endif + +#if defined(CONFIG_X86) || defined(CONFIG_E2K) +#define PHYSMEM_OSMEM_NUM_OF_POOLS 2 +static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE +}; +#else +#define PHYSMEM_OSMEM_NUM_OF_POOLS 1 +static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED +}; +#endif + +/* Global structures we use to manage the page pool */ +static DEFINE_MUTEX(g_sPagePoolMutex); + +/* List holding the page array pointers: */ +static LIST_HEAD(g_sPagePoolList_WC); +static LIST_HEAD(g_sPagePoolList_UC); +static LIST_HEAD(g_sUnpinList); + +static inline IMG_UINT32 +_PagesInPoolUnlocked(void) +{ + IMG_UINT32 uiCnt = g_ui32PagePoolUCCount; +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + uiCnt += g_ui32PagePoolWCCount; +#endif + return uiCnt; +} + +static inline void +_PagePoolLock(void) +{ + mutex_lock(&g_sPagePoolMutex); +} + +static inline int +_PagePoolTrylock(void) +{ + return mutex_trylock(&g_sPagePoolMutex); +} + +static inline void +_PagePoolUnlock(void) +{ + mutex_unlock(&g_sPagePoolMutex); +} + +static PVRSRV_ERROR +_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData) +{ + LinuxUnpinEntry *psUnpinEntry; + + psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry)); + if (!psUnpinEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSAllocMem failed. Cannot add entry to unpin list.", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData; + + /* Add into pool that the shrinker can access easily*/ + list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList); + + g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated; + + return PVRSRV_OK; +} + +static void +_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData) +{ + LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry; + + /* Remove from pool */ + list_for_each_entry_safe(psUnpinEntry, + psTempUnpinEntry, + &g_sUnpinList, + sUnpinPoolItem) + { + if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData) + { + list_del(&psUnpinEntry->sUnpinPoolItem); + break; + } + } + + OSFreeMem(psUnpinEntry); + + g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated; +} + +static inline IMG_BOOL +_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, + struct list_head **ppsPoolHead, + IMG_UINT32 **ppuiCounter) +{ + switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) + { + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + /* + For x86 we need to keep different lists for uncached + and write-combined as we must always honour the PAT + setting which cares about this difference. + */ + + *ppsPoolHead = &g_sPagePoolList_WC; + *ppuiCounter = &g_ui32PagePoolWCCount; + break; +#endif + + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + *ppsPoolHead = &g_sPagePoolList_UC; + *ppuiCounter = &g_ui32PagePoolUCCount; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown CPU caching mode. " + "Using default UC pool.", + __func__)); + *ppsPoolHead = &g_sPagePoolList_UC; + *ppuiCounter = &g_ui32PagePoolUCCount; + PVR_ASSERT(0); + return IMG_FALSE; + } + return IMG_TRUE; +} + +static struct shrinker g_sShrinker; + +/* Returning the number of pages that still reside in the page pool. */ +static unsigned long +_GetNumberOfPagesInPoolUnlocked(void) +{ + return _PagesInPoolUnlocked() + g_ui32UnpinPageCount; +} + +/* Linux shrinker function that informs the OS about how many pages we are caching and + * it is able to reclaim. */ +static unsigned long +_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + int remain; + + PVR_ASSERT(psShrinker == &g_sShrinker); + (void)psShrinker; + (void)psShrinkControl; + + /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ + if (_PagePoolTrylock() == 0) + return 0; + remain = _GetNumberOfPagesInPoolUnlocked(); + _PagePoolUnlock(); + + return remain; +} + +/* Linux shrinker function to reclaim the pages from our page pool */ +static unsigned long +_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + unsigned long uNumToScan = psShrinkControl->nr_to_scan; + unsigned long uSurplus = 0; + LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry; + IMG_UINT32 uiPagesFreed; + + PVR_ASSERT(psShrinker == &g_sShrinker); + (void)psShrinker; + + /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ + if (_PagePoolTrylock() == 0) + return SHRINK_STOP; + + _FreePagesFromPoolUnlocked(uNumToScan, + &uiPagesFreed); + uNumToScan -= uiPagesFreed; + + if (uNumToScan == 0) + { + goto e_exit; + } + + /* Free unpinned memory, starting with LRU entries */ + list_for_each_entry_safe(psUnpinEntry, + psTempUnpinEntry, + &g_sUnpinList, + sUnpinPoolItem) + { + PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr; + IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)? + psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages; + PVRSRV_ERROR eError; + + /* Free associated pages */ + eError = _FreeOSPages(psPageArrayDataPtr, + NULL, + 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)", + __func__, + PVRSRVGetErrorString(eError), + eError)); + goto e_exit; + } + + /* Remove item from pool */ + list_del(&psUnpinEntry->sUnpinPoolItem); + + g_ui32UnpinPageCount -= uiNumPages; + + /* Check if there is more to free or if we already surpassed the limit */ + if (uiNumPages < uNumToScan) + { + uNumToScan -= uiNumPages; + + } + else if (uiNumPages > uNumToScan) + { + uSurplus += uiNumPages - uNumToScan; + uNumToScan = 0; + goto e_exit; + } + else + { + uNumToScan -= uiNumPages; + goto e_exit; + } + } + +e_exit: + if (list_empty(&g_sUnpinList)) + { + PVR_ASSERT(g_ui32UnpinPageCount == 0); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) + { + int remain; + remain = _GetNumberOfPagesInPoolUnlocked(); + _PagePoolUnlock(); + return remain; + } +#else + /* Returning the number of pages freed during the scan */ + _PagePoolUnlock(); + return psShrinkControl->nr_to_scan - uNumToScan + uSurplus; +#endif +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) +static int +_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + if (psShrinkControl->nr_to_scan != 0) + { + return _ScanObjectsInPagePool(psShrinker, psShrinkControl); + } + else + { + /* No pages are being reclaimed so just return the page count */ + return _CountObjectsInPagePool(psShrinker, psShrinkControl); + } +} + +static struct shrinker g_sShrinker = +{ + .shrink = _ShrinkPagePool, + .seeks = DEFAULT_SEEKS +}; +#else +static struct shrinker g_sShrinker = +{ + .count_objects = _CountObjectsInPagePool, + .scan_objects = _ScanObjectsInPagePool, + .seeks = DEFAULT_SEEKS +}; +#endif + +/* Register the shrinker so Linux can reclaim cached pages */ +void LinuxInitPhysmem(void) +{ + g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL); + + g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL); + if (g_psLinuxPagePoolCache) + { + /* Only create the shrinker if we created the cache OK */ + register_shrinker(&g_sShrinker); + } + + OSAtomicWrite(&g_iPoolCleanTasks, 0); +} + +/* Unregister the shrinker and remove all pages from the pool that are still left */ +void LinuxDeinitPhysmem(void) +{ + IMG_UINT32 uiPagesFreed; + + if (OSAtomicRead(&g_iPoolCleanTasks) > 0) + { + PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running " + "while deinitialising memory subsystem.")); + } + + _PagePoolLock(); + if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when " + "deinitialising memory subsystem.")); + PVR_ASSERT(0); + } + + PVR_ASSERT(_PagesInPoolUnlocked() == 0); + + /* Free the page cache */ + kmem_cache_destroy(g_psLinuxPagePoolCache); + + unregister_shrinker(&g_sShrinker); + _PagePoolUnlock(); + + kmem_cache_destroy(g_psLinuxPageArray); +} + +static void EnableOOMKiller(void) +{ + current->flags &= ~PF_DUMPCORE; +} + +static void DisableOOMKiller(void) +{ + /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled. + * + * As oom_killer_disable() is an inline, non-exported function, we + * can't use it from a modular driver. Furthermore, the OOM killer + * API doesn't look thread safe, which 'current' is. + */ + WARN_ON(current->flags & PF_DUMPCORE); + current->flags |= PF_DUMPCORE; +} + +/* Prints out the addresses in a page array for debugging purposes + * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */ +/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */ +static inline void +_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint) +{ +#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY) + IMG_UINT32 i; + if (pagearray) + { + printk("Array %p:\n", pagearray); + for (i = 0; i < uiPagesToPrint; i++) + { + printk("%p | ", (pagearray)[i]); + } + printk("\n"); + } + else + { + printk("Array is NULL:\n"); + } +#else + PVR_UNREFERENCED_PARAMETER(pagearray); + PVR_UNREFERENCED_PARAMETER(uiPagesToPrint); +#endif +} + +/* Debugging function that dumps out the number of pages for every + * page array that is currently in the page pool. + * Not defined by default. Define locally to activate feature: */ +/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */ +static void +_DumpPoolStructure(void) +{ +#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL) + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 j; + IMG_UINT32 *puiCounter; + + printk("\n"); + /* Empty all pools */ + for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) + { + + printk("pool = %u\n", j); + + /* Get the correct list for this caching mode */ + if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) + { + break; + } + + list_for_each_entry_safe(psPagePoolEntry, + psTempPoolEntry, + psPoolHead, + sPagePoolItem) + { + printk("%u | ", psPagePoolEntry->uiItemsRemaining); + } + printk("\n"); + } +#endif +} + +/* Free a certain number of pages from the page pool. + * Mainly used in error paths or at deinitialisation to + * empty the whole pool. */ +static PVRSRV_ERROR +_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, + IMG_UINT32 *puiPagesFreed) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 i, j; + IMG_UINT32 *puiCounter; + + *puiPagesFreed = uiMaxPagesToFree; + + /* Empty all pools */ + for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) + { + + /* Get the correct list for this caching mode */ + if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) + { + break; + } + + /* Free the pages and remove page arrays from the pool if they are exhausted */ + list_for_each_entry_safe(psPagePoolEntry, + psTempPoolEntry, + psPoolHead, + sPagePoolItem) + { + IMG_UINT32 uiItemsToFree; + struct page **ppsPageArray; + + /* Check if we are going to free the whole page array or just parts */ + if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree) + { + uiItemsToFree = psPagePoolEntry->uiItemsRemaining; + ppsPageArray = psPagePoolEntry->ppsPageArray; + } + else + { + uiItemsToFree = uiMaxPagesToFree; + ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]); + } + +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + /* Set the correct page caching attributes on x86 */ + if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j])) + { + int ret; + ret = set_pages_array_wb(ppsPageArray, uiItemsToFree); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to reset page attributes", + __func__)); + eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES; + goto e_exit; + } + } +#endif + + /* Free the actual pages */ + for (i = 0; i < uiItemsToFree; i++) + { + __free_pages(ppsPageArray[i], 0); + ppsPageArray[i] = NULL; + } + + /* Reduce counters */ + uiMaxPagesToFree -= uiItemsToFree; + *puiCounter -= uiItemsToFree; + psPagePoolEntry->uiItemsRemaining -= uiItemsToFree; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* + * MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree); +#endif + + /* Is this pool entry exhausted, delete it */ + if (psPagePoolEntry->uiItemsRemaining == 0) + { + OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); + list_del(&psPagePoolEntry->sPagePoolItem); + kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); + } + + /* Return if we have all our pages */ + if (uiMaxPagesToFree == 0) + { + goto e_exit; + } + } + } + +e_exit: + *puiPagesFreed -= uiMaxPagesToFree; + _DumpPoolStructure(); + return eError; +} + +/* Get a certain number of pages from the page pool and + * copy them directly into a given page array. */ +static void +_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, + IMG_UINT32 uiMaxNumPages, + struct page **ppsPageArray, + IMG_UINT32 *puiNumReceivedPages) +{ + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 i; + IMG_UINT32 *puiCounter; + + *puiNumReceivedPages = 0; + + /* Get the correct list for this caching mode */ + if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) + { + return; + } + + /* Check if there are actually items in the list */ + if (list_empty(psPoolHead)) + { + return; + } + + PVR_ASSERT(*puiCounter > 0); + + /* Receive pages from the pool */ + list_for_each_entry_safe(psPagePoolEntry, + psTempPoolEntry, + psPoolHead, + sPagePoolItem) + { + /* Get the pages from this pool entry */ + for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--) + { + ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1]; + (*puiNumReceivedPages)++; + psPagePoolEntry->uiItemsRemaining--; + } + + /* Is this pool entry exhausted, delete it */ + if (psPagePoolEntry->uiItemsRemaining == 0) + { + OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); + list_del(&psPagePoolEntry->sPagePoolItem); + kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); + } + + /* Return if we have all our pages */ + if (*puiNumReceivedPages == uiMaxNumPages) + { + goto exit_ok; + } + } + +exit_ok: + + /* Update counters */ + *puiCounter -= *puiNumReceivedPages; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages)); +#endif + + _DumpPoolStructure(); + return; +} + +/* Same as _GetPagesFromPoolUnlocked but handles locking and + * checks first whether pages from the pool are a valid option. */ +static inline void +_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32CPUCacheFlags, + IMG_UINT32 uiPagesToAlloc, + IMG_UINT32 uiOrder, + IMG_BOOL bZero, + struct page **ppsPageArray, + IMG_UINT32 *puiPagesFromPool) +{ +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + PVR_UNREFERENCED_PARAMETER(bZero); +#else + /* Don't get pages from pool if it doesn't provide zeroed pages */ + if (bZero) + { + return; + } +#endif + + /* The page pool stores only order 0 pages. If we need zeroed memory we + * directly allocate from the OS because it is faster than + * doing it within the driver. */ + if (uiOrder == 0 && + !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) + { + + _PagePoolLock(); + _GetPagesFromPoolUnlocked(ui32CPUCacheFlags, + uiPagesToAlloc, + ppsPageArray, + puiPagesFromPool); + _PagePoolUnlock(); + } + + return; +} + +/* Takes a page array and maps it into the kernel to write zeros */ +static PVRSRV_ERROR +_ZeroPageArray(IMG_UINT32 uiNumToClean, + struct page **ppsCleanArray, + pgprot_t pgprot) +{ + IMG_CPU_VIRTADDR pvAddr; + IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, + uiNumToClean); + + /* Map and fill the pages with zeros. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + while (uiNumToClean != 0) + { + IMG_UINT32 uiToClean = (uiNumToClean >= uiMaxPagesToMap) ? + uiMaxPagesToMap : + uiNumToClean; + +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + pvAddr = vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot); +#else + pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot); +#endif + if (!pvAddr) + { + if (uiMaxPagesToMap <= 1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of vmalloc memory, " + "unable to map pages for zeroing.", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + else + { + /* Halve the pages to map at once and try again. */ + uiMaxPagesToMap = uiMaxPagesToMap >> 1; + continue; + } + } + + OSDeviceMemSet(pvAddr, 0, PAGE_SIZE * uiToClean); + +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + vunmap(pvAddr); +#else + vm_unmap_ram(pvAddr, uiToClean); +#endif + + ppsCleanArray = &(ppsCleanArray[uiToClean]); + uiNumToClean -= uiToClean; + } + + return PVRSRV_OK; +} + +#ifndef CONFIG_MCST +static PVRSRV_ERROR +_CleanupThread_CleanPages(void *pvData) +{ + LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData; + LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 *puiCounter = NULL; +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + PVRSRV_ERROR eError; + pgprot_t pgprot; + IMG_UINT32 i; +#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ + + /* Get the correct pool for this caching mode. */ + _GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter); + +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + switch (PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode)) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + /* For x86 we can only map with the same attributes + * as in the PAT settings*/ + pgprot = pgprot_noncached(PAGE_KERNEL); + break; +#endif + + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: + pgprot = pgprot_writecombine(PAGE_KERNEL); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown caching mode to set page protection flags.", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto eExit; + } + + /* Map and fill the pages with zeros. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + eError = _ZeroPageArray(psPagePoolEntry->uiItemsRemaining, + psPagePoolEntry->ppsPageArray, + pgprot); + if (eError != PVRSRV_OK) + { + goto eExit; + } +#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ + + /* Lock down pool and add item */ + _PagePoolLock(); + + /* Pool counters were already updated so don't do it here again*/ + + /* The pages are all zeroed so return them to the pool. */ + list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead); + + _DumpPoolStructure(); + _PagePoolUnlock(); + + OSFreeMem(pvData); + OSAtomicDecrement(&g_iPoolCleanTasks); + + return PVRSRV_OK; + +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) +eExit: + /* we failed to zero the pages so return the error so we can + * retry during the next spin */ + if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) > 0) + { + return eError; + } + + /* this was the last retry, give up and free pages to OS */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Deferred task error, freeing pages to OS.", + __func__)); + _PagePoolLock(); + + *puiCounter -= psPagePoolEntry->uiItemsRemaining; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psCleanupData->psPoolEntry->uiItemsRemaining); +#endif + + _PagePoolUnlock(); + + for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++) + { + _FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]); + } + OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); + kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); + OSFreeMem(psCleanupData); + + OSAtomicDecrement(&g_iPoolCleanTasks); + + return PVRSRV_OK; +#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ +} +#endif /*CONFIG_MCST*/ + +/* Put page array to the page pool. + * Handles locking and checks whether the pages are + * suitable to be stored in the pool. */ +static inline IMG_BOOL +_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags, + struct page **ppsPageArray, + IMG_BOOL bUnpinned, + IMG_UINT32 uiOrder, + IMG_UINT32 uiNumPages) +{ + /* Because we always allocate memory through CMA (see bIsCMA) + * this pool is not needed anymore. Actually, it just slows + * things down as it currently is designed to call set_memory_wb() + * before returning pages. */ +#ifndef CONFIG_MCST + LinuxCleanupData *psCleanupData; + PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn; +#if defined(SUPPORT_PHYSMEM_TEST) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); +#endif + + if (uiOrder == 0 && + !bUnpinned && + !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) + { + IMG_UINT32 uiEntries; + IMG_UINT32 *puiCounter; + struct list_head *psPoolHead; + + + _PagePoolLock(); + + uiEntries = _PagesInPoolUnlocked(); + + /* Check for number of current page pool entries and whether + * we have other asynchronous tasks in-flight */ + if ( (uiEntries < g_ui32PagePoolMaxEntries) && + ((uiEntries + uiNumPages) < + (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )) + { + if (OSAtomicIncrement(&g_iPoolCleanTasks) <= + PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS) + { +#if defined(SUPPORT_PHYSMEM_TEST) + if (!psPVRSRVData->hCleanupThread) + { + goto eDecrement; + } +#endif + + psCleanupData = OSAllocMem(sizeof(*psCleanupData)); + + if (!psCleanupData) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __func__)); + goto eDecrement; + } + + psCleanupThreadFn = &psCleanupData->sCleanupWork; + psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags; + psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); + + if (!psCleanupData->psPoolEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __func__)); + goto eFreeCleanupData; + } + + if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get correct page pool", + __func__)); + goto eFreePoolEntry; + } + + /* Increase counter here to avoid deferred cleanup tasks piling up */ + *puiCounter = *puiCounter + uiNumPages; + + psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray; + psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages; + + psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages; + psCleanupThreadFn->pvData = psCleanupData; + psCleanupThreadFn->bDependsOnHW = IMG_FALSE; + CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn, + CLEANUP_THREAD_RETRY_COUNT_DEFAULT); + + #if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiNumPages); + #endif + + /* We must not hold the pool lock when calling AddWork because it might call us back to + * free pooled pages directly when unloading the driver */ + _PagePoolUnlock(); + + PVRSRVCleanupThreadAddWork(psCleanupThreadFn); + + + } + else + { + goto eDecrement; + } + + } + else + { + goto eUnlock; + } + } + else + { + goto eExitFalse; + } + + return IMG_TRUE; + +eFreePoolEntry: + OSFreeMem(psCleanupData->psPoolEntry); +eFreeCleanupData: + OSFreeMem(psCleanupData); +eDecrement: + OSAtomicDecrement(&g_iPoolCleanTasks); +eUnlock: + _PagePoolUnlock(); +eExitFalse: +#endif /*CONFIG_MCST*/ + return IMG_FALSE; +} + +/* Get the GFP flags that we pass to the page allocator */ +static inline gfp_t +_GetGFPFlags(IMG_BOOL bZero, + PVRSRV_DEVICE_NODE *psDevNode) +{ + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; + gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC; + +#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) + /* Force use of HIGHMEM */ + gfp_flags |= __GFP_HIGHMEM; + + PVR_UNREFERENCED_PARAMETER(psDev); +#else + if (psDev) + { +#if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) || defined(CONFIG_E2K) + if (*psDev->dma_mask > DMA_BIT_MASK(32)) + { + /* If our system is able to handle large addresses use highmem */ + gfp_flags |= __GFP_HIGHMEM; + } + else if (*psDev->dma_mask == DMA_BIT_MASK(32)) + { + /* Limit to 32 bit. + * Achieved by setting __GFP_DMA32 for 64 bit systems */ + gfp_flags |= __GFP_DMA32; + } + else + { + /* Limit to size of DMA zone. */ + gfp_flags |= __GFP_DMA; + } +#else + if (*psDev->dma_mask < DMA_BIT_MASK(32)) + { + gfp_flags |= __GFP_DMA; + } + else + { + gfp_flags |= __GFP_HIGHMEM; + } +#endif /* if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) || defined(CONFIG_E2K) */ + } + +#endif /* if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) */ + + if (bZero) + { + gfp_flags |= __GFP_ZERO; + } + + return gfp_flags; +} + +/* + * @Function _PoisonDevicePage + * + * @Description Poisons a device page. In normal case the device page has the + * same size as the OS page and so the ui32DevPageOrder will be + * equal to 0 and page argument will point to one OS page + * structure. In case of Non4K pages the order will be greater + * than 0 and page argument will point to an array of OS + * allocated pages. + * + * @Input psDevNode pointer to the device object + * @Input page array of the pages allocated by from the OS + * @Input ui32DevPageOrder order of the page (same as the one used to allocate + * the page array by alloc_pages()) + * @Input ui32CPUCacheFlags CPU cache flags applied to the page + * @Input ui8PoisonValue value used to poison the page + */ +static void +_PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode, + struct page *page, + IMG_UINT32 ui32DevPageOrder, + IMG_UINT32 ui32CPUCacheFlags, + IMG_BYTE ui8PoisonValue) +{ + IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; + IMG_UINT32 ui32OsPageIdx; + + for (ui32OsPageIdx = 0; + ui32OsPageIdx < (1U << ui32DevPageOrder); + ui32OsPageIdx++) + { + struct page *current_page = page + ui32OsPageIdx; + void *kvaddr = kmap_atomic(current_page); + + if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) || + PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags)) + { + OSDeviceMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE); + } + else + { + OSCachedMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE); + } + + sCPUPhysAddrStart.uiAddr = page_to_phys(current_page); + sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE; + + OSCPUCacheFlushRangeKM(psDevNode, + kvaddr, kvaddr + PAGE_SIZE, + sCPUPhysAddrStart, sCPUPhysAddrEnd); + + kunmap_atomic(kvaddr); + } +} + +/* Allocate and initialise the structure to hold the metadata of the allocation */ +static PVRSRV_ERROR +_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 uiLog2AllocPageSize, + IMG_BOOL bZero, + IMG_BOOL bIsCMA, + IMG_BOOL bPoisonOnAlloc, + IMG_BOOL bPoisonOnFree, + IMG_BOOL bOnDemand, + IMG_UINT32 ui32CPUCacheFlags, + IMG_PID uiPid, + PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr) +{ + PVRSRV_ERROR eError; + PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks; + IMG_UINT32 uiNumOSPageSizeVirtPages; + IMG_UINT32 uiNumDevPageSizeVirtPages; + PMR_OSPAGEARRAY_DATA *psPageArrayData; + IMG_UINT64 ui64DmaMask = 0; + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + + /* Use of cast below is justified by the assertion that follows to + * prove that no significant bits have been truncated */ + uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1); + PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize); + + uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT); + + /* Allocate the struct to hold the metadata */ + psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL); + if (psPageArrayData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OS refused the memory allocation for the private data.", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_freed_none; + } + + /* + * Allocate the page array + * + * We avoid tracking this memory because this structure might go into the page pool. + * The OS can drain the pool asynchronously and when doing that we have to avoid + * any potential deadlocks. + * + * In one scenario the process stats vmalloc hash table lock is held and then + * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not + * try to acquire the vmalloc hash table lock again. + */ + psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages); + if (psPageArrayData->pagearray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_kmem_cache; + } + else + { + if (bIsCMA) + { + /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */ + psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages); + if (psPageArrayData->dmavirtarray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_pagearray; + } + + psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages); + if (psPageArrayData->dmaphysarray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_cpuvirtaddrarray; + } + } + } + + if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice) + { + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; + ui64DmaMask = *psDev->dma_mask; + } + + /* Init metadata */ + psPageArrayData->psDevNode = psDevNode; + psPageArrayData->uiPid = uiPid; + psPageArrayData->iNumOSPagesAllocated = 0; + psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages; + psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize; + psPageArrayData->ui64DmaMask = ui64DmaMask; + psPageArrayData->bZero = bZero; + psPageArrayData->bIsCMA = bIsCMA; + psPageArrayData->bOnDemand = bOnDemand; + psPageArrayData->bUnpinned = IMG_FALSE; + psPageArrayData->bPoisonOnFree = bPoisonOnFree; + psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc; + psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags; +#ifdef CONFIG_MCST + psPageArrayData->bIsFast = IMG_FALSE; +#endif + /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */ + if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) || + PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags)) + { + psPageArrayData->bUnsetMemoryType = IMG_TRUE; + } + else + { + psPageArrayData->bUnsetMemoryType = IMG_FALSE; + } + + *ppsPageArrayDataPtr = psPageArrayData; + return PVRSRV_OK; + +/* Error path */ +e_free_cpuvirtaddrarray: + OSFreeMemNoStats(psPageArrayData->dmavirtarray); + +e_free_pagearray: + OSFreeMemNoStats(psPageArrayData->pagearray); + +e_free_kmem_cache: + kmem_cache_free(g_psLinuxPageArray, psPageArrayData); + PVR_DPF((PVR_DBG_ERROR, + "%s: OS refused the memory allocation for the page pointer table. " + "Did you ask for too much?", + __func__)); + +e_freed_none: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static inline void +_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, + struct page **ppsPage, + IMG_UINT32 uiNumPages, + IMG_BOOL bFlush) +{ + void * pvAddr; + + if (OSCPUCacheOpAddressType() == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + pgprot_t pgprot = PAGE_KERNEL; + + IMG_UINT32 uiNumToClean = uiNumPages; + struct page **ppsCleanArray = ppsPage; + + /* Map and flush page. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + while (uiNumToClean != 0) + { + IMG_UINT32 uiToClean = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, + uiNumToClean); + IMG_CPU_PHYADDR sUnused = + { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; + + pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot); + if (!pvAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable to flush page cache for new allocation, skipping flush.")); + return; + } + + CacheOpExec(psDevNode, + pvAddr, + pvAddr + PAGE_SIZE, + sUnused, + sUnused, + PVRSRV_CACHE_OP_FLUSH); + + vm_unmap_ram(pvAddr, uiToClean); + + ppsCleanArray = &(ppsCleanArray[uiToClean]); + uiNumToClean -= uiToClean; + } + } + else + { + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx) + { + IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; + + pvAddr = kmap(ppsPage[ui32Idx]); + sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]); + sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE; + + /* If we're zeroing, we need to make sure the cleared memory is pushed out + * of the cache before the cache lines are invalidated */ + CacheOpExec(psDevNode, + pvAddr, + pvAddr + PAGE_SIZE, + sCPUPhysAddrStart, + sCPUPhysAddrEnd, + PVRSRV_CACHE_OP_FLUSH); + + kunmap(ppsPage[ui32Idx]); + } + } +} + +/* Change the caching attribute of pages on x86 systems and takes care of + * cache maintenance. This function is supposed to be called once for pages that + * came from alloc_pages(). It expects an array of OS page sized pages! + * + * Flush/Invalidate pages in case the allocation is not cached. Necessary to + * remove pages from the cache that might be flushed later and corrupt memory. */ +static inline PVRSRV_ERROR +_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, + struct page **ppsPage, + IMG_UINT32 uiNumPages, + IMG_BOOL bFlush, + IMG_UINT32 ui32CPUCacheFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags); + IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags); + IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags); + + if (ppsPage != NULL && uiNumPages != 0) + { +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + /* On x86 we have to set page cache attributes for non-cached pages. + * The call is implicitly taking care of all flushing/invalidating + * and therefore we can skip the usual cache maintenance after this. */ + if (bCPUUncached || bCPUWriteCombine) + { + /* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of + current mapping before we map it ourselves */ + int ret = IMG_FALSE; + PVR_UNREFERENCED_PARAMETER(bFlush); + + switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + ret = set_pages_array_uc(ppsPage, uiNumPages); + if (ret) + { + eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; + PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret)); + } + break; + + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: + ret = set_pages_array_wc(ppsPage, uiNumPages); + if (ret) + { + eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; + PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret)); + } + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: + break; + + default: + break; + } + } + else +#endif + { + if ( bFlush || + bCPUUncached || bCPUWriteCombine || + (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) ) + { + /* We can be given pages which still remain in the cache. + In order to make sure that the data we write through our mappings + doesn't get overwritten by later cache evictions we invalidate the + pages that are given to us. + + Note: + This still seems to be true if we request cold pages, it's just less + likely to be in the cache. */ + _ApplyCacheMaintenance(psDevNode, + ppsPage, + uiNumPages, + bFlush); + } + } + } + + return eError; +} + +#ifdef CONFIG_MCST +static struct page *osmem_get_page(void *virt) +{ + BUG_ON(!is_vmalloc_addr(virt) && !virt_addr_valid(virt)); + if (is_vmalloc_addr(virt)) + return vmalloc_to_page(virt); + else + return virt_to_page(virt); +} +#endif +/* Same as _AllocOSPage except it uses DMA framework to perform allocation. + * uiPageIndex is expected to be the pagearray index where to store the higher order page. */ +static PVRSRV_ERROR +_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData, + gfp_t gfp_flags, + IMG_UINT32 ui32AllocOrder, + IMG_UINT32 ui32MinOrder, + IMG_UINT32 uiPageIndex) +{ +#ifdef CONFIG_MCST + struct pci_dev *pdev = NULL; + unsigned long attrs = 0; +#else + IMG_UINT32 uiAllocIsMisaligned; +#endif + void *virt_addr; + struct page *page; + dma_addr_t bus_addr; + size_t alloc_size = PAGE_SIZE << ui32AllocOrder; + struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + PVR_ASSERT(ui32AllocOrder == ui32MinOrder); +#ifdef CONFIG_MCST + if (!dev) {/*FIXME:*/ + pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650, NULL); + dev = &pdev->dev; + } + + if (psPageArrayData->uiLog2AllocPageSize != PAGE_SHIFT) + attrs |= DMA_ATTR_FORCE_CONTIGUOUS; + + virt_addr = dma_alloc_attrs(dev, + alloc_size, + &bus_addr, + gfp_flags, attrs); + + pci_dev_put(pdev); + if (virt_addr == NULL) { + EnableOOMKiller(); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + BUG_ON(psPageArrayData->uiLog2AllocPageSize != PAGE_SHIFT && + is_vmalloc_addr(virt_addr)); + page = osmem_get_page(virt_addr); + /*FIXME: Why this alignment is needed for? */ + /*BUG_ON(DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE<> PAGE_SHIFT); +#else + /* Assumes bus address space is identical to physical address space */ + page = phys_to_page(bus_addr); +#endif + } + + EnableOOMKiller(); + + /* Physical allocation alignment works/hidden behind the scene transparently, + we do this here if the allocated buffer address does not meet its alignment + requirement by over-allocating using the next power-2 order and reporting + aligned-adjusted values back to meet the requested alignment constraint. + Evidently we waste memory by doing this so should only do so if we do not + initially meet the alignment constraint. */ + uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE< ui32MinOrder) + { + IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr); + if (ui32AllocOrder == ui32MinOrder) + { + if (bUsedAllocPages) + { + __free_pages(page, ui32AllocOrder); + } + else + { + dma_free_coherent(dev, alloc_size, virt_addr, bus_addr); + } + + ui32AllocOrder = ui32AllocOrder + 1; + alloc_size = PAGE_SIZE << ui32AllocOrder; + + PVR_ASSERT(uiAllocIsMisaligned != 0); + } + else + { + size_t align_adjust = PAGE_SIZE << ui32MinOrder; + + /* Adjust virtual/bus addresses to meet alignment */ + bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr; + align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust); + align_adjust -= (size_t)bus_addr; + + if (align_adjust) + { + if (bUsedAllocPages) + { + page += align_adjust >> PAGE_SHIFT; + bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page)); + virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE; + } + else + { + bus_addr += align_adjust; + virt_addr += align_adjust; +#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) + page = pfn_to_page(bus_addr >> PAGE_SHIFT); +#else + /* Assumes bus address space is identical to physical address space */ + page = phys_to_page(bus_addr); +#endif + } + + /* Store adjustments in PAGE_SIZE counts */ + align_adjust = align_adjust >> PAGE_SHIFT; + bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust); + } + + /* Taint bus_addr due to over-allocation, allows us to free + * memory correctly */ + bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr); + uiAllocIsMisaligned = 0; + } + } + } while (uiAllocIsMisaligned); +#endif /*CONFIG_MCST*/ + + /* Convert OSPageSize-based index into DevicePageSize-based index */ + psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr; + psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr; + psPageArrayData->pagearray[uiPageIndex] = page; + + return PVRSRV_OK; +} + +/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at + * position uiPageIndex. + * + * If the order is higher than 0, it splits the page into multiples and + * stores them at position uiPageIndex to uiPageIndex+(1<= KERNEL_VERSION(3,10,0)) + /* In case we need to, split the higher order page; + this should only be used for order-0 allocations + as higher order allocations should use DMA/CMA */ + if (uiAllocOrder != 0) + { + split_page(psPage, uiAllocOrder); + } +#endif + + /* Store the page (or multiple split pages) in the page array */ + for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++) + { + psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]); + } + + return PVRSRV_OK; +} + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + +static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + struct page *psPage) +{ + IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) }; + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + NULL, sCPUPhysAddr, + 1 << psPageArrayData->uiLog2AllocPageSize, + NULL, psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); +} + +static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + struct page *psPage) +{ + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + (IMG_UINT64) page_to_phys(psPage), + psPageArrayData->uiPid); +} + +#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + +static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) +{ + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + uiSize, uiPid); +} + +static inline void _DecrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) +{ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + uiSize, uiPid); +} + +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + +/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons. + * + * Firstly to support device pages which are larger than OS. By asking the OS for 2^N + * order OS pages at a time we guarantee the device page is contiguous. + * + * Secondly for performance where we may ask for 2^N order pages to reduce the number + * of calls to alloc_pages, and thus reduce time for huge allocations. + * + * Regardless of page order requested, we need to break them down to track _OS pages. + * The maximum order requested is increased if all max order allocations were successful. + * If any request fails we reduce the max order. + */ +#ifdef CONFIG_MCST +static PVRSRV_ERROR +_AllocOSPages_Fast_wo_iommu(PMR_OSPAGEARRAY_DATA *psPageArrayData) +#else +static PVRSRV_ERROR +_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) +#endif +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiArrayIndex = 0; + IMG_UINT32 ui32Order; + IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_BOOL bIncreaseMaxOrder = IMG_TRUE; + + IMG_UINT32 ui32NumPageReq; + IMG_UINT32 uiPagesToAlloc; + IMG_UINT32 uiPagesFromPool = 0; + + gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? psPageArrayData->bZero : IMG_FALSE, /* Zero all pages later as batch */ + psPageArrayData->psDevNode); + gfp_t ui32GfpFlags; + gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY); + + struct page **ppsPageArray = psPageArrayData->pagearray; + struct page **ppsPageAttributeArray = NULL; + + uiPagesToAlloc = psPageArrayData->uiTotalNumOSPages; + + /* Try to get pages from the pool since it is faster; + the page pool currently only supports zero-order pages + thus currently excludes all DMA/CMA allocated memory */ + _GetPagesFromPoolLocked(psPageArrayData->psDevNode, + psPageArrayData->ui32CPUCacheFlags, + uiPagesToAlloc, + ui32MinOrder, + psPageArrayData->bZero, + ppsPageArray, + &uiPagesFromPool); + + uiArrayIndex = uiPagesFromPool; + + if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD) + { /* Small allocations: ask for one device page at a time */ + ui32Order = ui32MinOrder; + bIncreaseMaxOrder = IMG_FALSE; + } + else + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + /* Large zero-order or none zero-order allocations, ask for + MAX(max-order, min-order) order pages at a time; alloc + failures throttles this down to ZeroOrder allocations */ + ui32Order = MAX(g_uiMaxOrder, ui32MinOrder); +#else + /* Because split_pages() is not available on older kernels + we cannot mix-and-match any-order pages in the PMR; + only same-order pages must be present in page array. + So we unconditionally force it to use ui32MinOrder on + these older kernels */ + ui32Order = ui32MinOrder; +#if defined(DEBUG) + if (! psPageArrayData->bIsCMA) + { + /* Sanity check that this is zero */ + PVR_ASSERT(! ui32Order); + } +#endif +#endif + } + + /* Only if asking for more contiguity than we actually need, let it fail */ + ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; + ui32NumPageReq = (1 << ui32Order); + + while (uiArrayIndex < uiPagesToAlloc) + { + IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex; + + while (ui32NumPageReq > ui32PageRemain) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + /* Pages to request is larger than that remaining + so ask for less so never over allocate */ + ui32Order = MAX(ui32Order >> 1, ui32MinOrder); +#else + /* Pages to request is larger than that remaining so + do nothing thus over allocate as we do not support + mix/match of any-order pages in PMR page-array in + older kernels (simplifies page free logic) */ + PVR_ASSERT(ui32Order == ui32MinOrder); +#endif + ui32NumPageReq = (1 << ui32Order); + ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; + } + + if (psPageArrayData->bIsCMA) + { + /* As the DMA/CMA framework rounds-up request to the + next power-of-two, we request multiple uiMinOrder + pages to satisfy allocation request in order to + minimise wasting memory */ + eError = _AllocOSPage_CMA(psPageArrayData, + ui32GfpFlags, + ui32Order, + ui32MinOrder, + uiArrayIndex >> ui32MinOrder); + } + else + { + /* Allocate uiOrder pages at uiArrayIndex */ + eError = _AllocOSPage(psPageArrayData, + ui32GfpFlags, + ui32Order, + ui32MinOrder, + uiArrayIndex); + } + + if (eError == PVRSRV_OK) + { + /* Successful request. Move onto next. */ + uiArrayIndex += ui32NumPageReq; + } + else + { + if (ui32Order > ui32MinOrder) + { + /* Last request failed. Let's ask for less next time */ + ui32Order = MAX(ui32Order >> 1, ui32MinOrder); + bIncreaseMaxOrder = IMG_FALSE; + ui32NumPageReq = (1 << ui32Order); + ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; + g_uiMaxOrder = ui32Order; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) + /* We should not trigger this code path in older kernels, + this is enforced by ensuring ui32Order == ui32MinOrder */ + PVR_ASSERT(ui32Order == ui32MinOrder); +#endif + } + else + { + /* Failed to alloc pages at required contiguity. Failed allocation */ + PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)", + __func__, + psPageArrayData->bIsCMA ? "dma_alloc_coherent" : "alloc_pages", + uiArrayIndex, + uiPagesToAlloc, + ui32GfpFlags, + ui32Order, + PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + goto e_free_pages; + } + } + } + + if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM)) + { /* All successful allocations on max order. Let's ask for more next time */ + g_uiMaxOrder++; + } + + /* Construct table of page pointers to apply attributes */ + ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool]; + if (psPageArrayData->bIsCMA) + { + IMG_UINT32 uiIdx, uiIdy, uiIdz; + + ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc); + if (ppsPageAttributeArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_pages; + } + + for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq) + { + uiIdy = uiIdx >> ui32Order; + for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++) + { + ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy]; + ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz; + } + } + } + + if (psPageArrayData->bZero && ui32MinOrder == 0) + { + eError = _ZeroPageArray(uiPagesToAlloc - uiPagesFromPool, + ppsPageAttributeArray, + PAGE_KERNEL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)")); + goto e_free_pages; + } + } + + + /* Do the cache management as required */ + eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, + ppsPageAttributeArray, + uiPagesToAlloc - uiPagesFromPool, + psPageArrayData->bZero, + psPageArrayData->ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); + goto e_free_pages; + } + else + { + if (psPageArrayData->bIsCMA) + { + OSFreeMem(ppsPageAttributeArray); + } + } + + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages; + + { + IMG_UINT32 ui32NumPages = + psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder; + IMG_UINT32 i; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < ui32NumPages; i++) + { + _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } +#else + _IncrMemAllocStat_UmaPages(uiPagesToAlloc * PAGE_SIZE, + psPageArrayData->uiPid); +#endif +#endif + + if (psPageArrayData->bPoisonOnAlloc) + { + for (i = 0; i < ui32NumPages; i++) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[i], + ui32MinOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_ALLOC_VALUE); + } + } + } + + return PVRSRV_OK; + +/* Error path */ +e_free_pages: + { + IMG_UINT32 ui32PageToFree; + + if (psPageArrayData->bIsCMA) + { + IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order; + IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order; + PVR_ASSERT(ui32Order == ui32MinOrder); + + if (ppsPageAttributeArray) + { + OSFreeMem(ppsPageAttributeArray); + } + + for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + ui32MinOrder, + psPageArrayData->dmavirtarray[ui32PageToFree], + psPageArrayData->dmaphysarray[ui32PageToFree], + ppsPageArray[ui32PageToFree]); + psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0; + psPageArrayData->dmavirtarray[ui32PageToFree] = NULL; + ppsPageArray[ui32PageToFree] = NULL; + } + } + else + { + /* Free the pages we got from the pool */ + for (ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++) + { + _FreeOSPage(ui32MinOrder, + psPageArrayData->bUnsetMemoryType, + ppsPageArray[ui32PageToFree]); + ppsPageArray[ui32PageToFree] = NULL; + } + + for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++) + { + _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]); + ppsPageArray[ui32PageToFree] = NULL; + } + } + + return eError; + } +} +#ifdef CONFIG_MCST +static PVRSRV_ERROR +_AllocOSPages_Fast_w_iommu(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + PVRSRV_ERROR eError; + void *va; + dma_addr_t dma_addr; + struct page **pages = NULL; + IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? psPageArrayData->bZero : IMG_FALSE, /* Zero all pages later as batch */ + psPageArrayData->psDevNode); + unsigned nr_pages = psPageArrayData->uiTotalNumOSPages; + unsigned long size = nr_pages * PAGE_SIZE; + unsigned long attrs = 0; + BUILD_BUG_ON(!IS_ENABLED(CONFIG_DMA_REMAP)); + if (psPageArrayData->uiLog2AllocPageSize != PAGE_SHIFT) + attrs |= DMA_ATTR_FORCE_CONTIGUOUS; + + if (!dev) {/*FIXME:*/ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650, NULL); + dev = &pdev->dev; + } + va = dma_alloc_attrs(dev, size, &dma_addr, gfp_flags, attrs); + + if (!va) { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto alloc_page_failed; + } + if (is_vmalloc_addr(va)) { + pages = dma_common_find_pages(va); + BUG_ON(!pages); + BUG_ON(psPageArrayData->uiLog2AllocPageSize != PAGE_SHIFT); + memcpy(psPageArrayData->pagearray, + pages, nr_pages * sizeof(pages)); + } else { + int i; + int nr = (1 << ui32MinOrder); + struct page *p = virt_to_page(va); + pages = vmalloc(nr_pages * sizeof(*pages)); + if (!pages) + goto e_free_pages; + for (i = 0; i < nr_pages; i++) + pages[i] = nth_page(p, i); + for (i = 0; i < nr_pages / nr; i++) + psPageArrayData->pagearray[i] = nth_page(p, i * nr); + } + /* Do the cache management as required */ + eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, + pages, + nr_pages, + psPageArrayData->bZero, + psPageArrayData->ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + goto e_free_pages; + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages; + + psPageArrayData->dmavirtarray[0] = va; + psPageArrayData->dmaphysarray[0] = dma_addr; + psPageArrayData->bIsFast = IMG_TRUE; + return PVRSRV_OK; +/* Error path */ +e_free_pages: + if (!is_vmalloc_addr(va)) + vfree(pages); + dma_free_coherent(dev, size, va, dma_addr); +alloc_page_failed: + return eError; +} + +static PVRSRV_ERROR +_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + if (!dev) {/*FIXME:*/ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650, NULL); + dev = &pdev->dev; + } + return device_iommu_mapped(dev) ? + _AllocOSPages_Fast_w_iommu(psPageArrayData) : + _AllocOSPages_Fast_wo_iommu(psPageArrayData); + +} +#endif /*CONFIG_MCST*/ +/* Allocation of OS pages: This function is used for sparse allocations. + * + * Sparse allocations provide only a proportion of sparse physical backing within the total + * virtual range. */ +static PVRSRV_ERROR +_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 uiPagesToAlloc) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + struct page **ppsPageArray = psPageArrayData->pagearray; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_UINT32 uiPagesFromPool = 0; + IMG_UINT32 uiNumOSPagesToAlloc = uiPagesToAlloc * (1 << uiOrder); + IMG_UINT32 uiTotalNumAllocPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; + gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? psPageArrayData->bZero : + IMG_FALSE, /* Zero pages later as batch */ + psPageArrayData->psDevNode); + + /* We use this page array to receive pages from the pool and then reuse it afterwards to + * store pages that need their cache attribute changed on x86*/ + struct page **ppsTempPageArray; + IMG_UINT32 uiTempPageArrayIndex = 0; +#ifdef CONFIG_MCST + BUG_ON(psPageArrayData->bIsFast); +#endif + /* Allocate the temporary page array that we need here to receive pages + * from the pool and to store pages that need their caching attributes changed. + * Allocate number of OS pages to be able to use the attribute function later. */ + ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiNumOSPagesToAlloc); + if (ppsTempPageArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_exit; + } + + /* Check the requested number of pages if they fit in the page array */ + if (uiTotalNumAllocPages < + ((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiPagesToAlloc) ) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to allocate more pages (Order %u) than this buffer can handle, " + "Request + Allocated < Max! Request %u, Allocated %u, Max %u.", + __func__, + uiOrder, + uiPagesToAlloc, + psPageArrayData->iNumOSPagesAllocated >> uiOrder, + uiTotalNumAllocPages)); + eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; + goto e_free_temp_array; + } + + /* Try to get pages from the pool since it is faster */ + _GetPagesFromPoolLocked(psPageArrayData->psDevNode, + psPageArrayData->ui32CPUCacheFlags, + uiPagesToAlloc, + uiOrder, + psPageArrayData->bZero, + ppsTempPageArray, + &uiPagesFromPool); + + /* Allocate pages from the OS or move the pages that we got from the pool + * to the page array */ + for (i = 0; i < uiPagesToAlloc; i++) + { + /* Check if the indices we are allocating are in range */ + if (puiAllocIndices[i] >= uiTotalNumAllocPages) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Given alloc index %u at %u is larger than page array %u.", + __func__, + i, + puiAllocIndices[i], + uiTotalNumAllocPages)); + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + goto e_free_pages; + } + + /* Check if there is not already a page allocated at this position */ + if (NULL != ppsPageArray[puiAllocIndices[i]]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping number %u at page array index %u already exists. " + "Page struct %p", + __func__, + i, + puiAllocIndices[i], + ppsPageArray[puiAllocIndices[i]])); + eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; + goto e_free_pages; + } + + /* Finally assign a page to the array. + * Either from the pool or allocate a new one. */ + if (uiPagesFromPool != 0) + { + uiPagesFromPool--; + ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[uiPagesFromPool]; + } + else + { + if (psPageArrayData->bIsCMA) + { + + /* As the DMA/CMA framework rounds-up request to the + next power-of-two, we request multiple uiMinOrder + pages to satisfy allocation request in order to + minimise wasting memory */ + eError = _AllocOSPage_CMA(psPageArrayData, + ui32GfpFlags, + uiOrder, + uiOrder, + puiAllocIndices[i]); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages")); + goto e_free_pages; + } + } + else + { + DisableOOMKiller(); + ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder); + EnableOOMKiller(); + } + + if (ppsPageArray[puiAllocIndices[i]] != NULL) + { + /* Reusing the temp page array if it has no pool pages anymore */ + + if (psPageArrayData->bIsCMA) + { + IMG_UINT32 idx; + struct page* psPageAddr; + + psPageAddr = ppsPageArray[puiAllocIndices[i]]; + + for (idx = 0; idx < (1 << uiOrder); idx++) + { + ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr; + psPageAddr++; + } + uiTempPageArrayIndex += (1 << uiOrder); + } + else + { + ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]]; + uiTempPageArrayIndex++; + } + } + else + { + /* Failed to alloc pages at required contiguity. Failed allocation */ + PVR_DPF((PVR_DBG_ERROR, + "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u", + __func__, + i, + uiPagesToAlloc, + ui32GfpFlags, + uiOrder)); + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + goto e_free_pages; + } + } + } + + if (psPageArrayData->bZero && uiOrder == 0) + { + eError = _ZeroPageArray(uiTempPageArrayIndex, + ppsTempPageArray, + PAGE_KERNEL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (sparse)")); + goto e_free_pages; + } + } + + /* Do the cache management as required */ + eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, + ppsTempPageArray, + uiTempPageArrayIndex, + psPageArrayData->bZero, + psPageArrayData->ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); + goto e_free_pages; + } + + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated += uiNumOSPagesToAlloc; + + /* Free temporary page array */ + OSFreeMem(ppsTempPageArray); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < uiPagesToAlloc; i++) + { + _AddMemAllocRecord_UmaPages(psPageArrayData, + ppsPageArray[puiAllocIndices[i]]); + } +#else + _IncrMemAllocStat_UmaPages(uiNumOSPagesToAlloc * PAGE_SIZE, + psPageArrayData->uiPid); +#endif +#endif + + if (psPageArrayData->bPoisonOnAlloc) + { + for (i = 0; i < uiPagesToAlloc; i++) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[puiAllocIndices[i]], + uiOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_ALLOC_VALUE); + } + } + + return PVRSRV_OK; + +/* Error path */ +e_free_pages: + { + IMG_UINT32 ui32PageToFree; + + if (psPageArrayData->bIsCMA) + { + IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; + + for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]], + psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]], + ppsPageArray[puiAllocIndices[ui32PageToFree]]); + psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]]= (dma_addr_t)0; + psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]] = NULL; + ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL; + } + } + else + { + /* Free the pages we got from the pool */ + for (ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++) + { + _FreeOSPage(0, + psPageArrayData->bUnsetMemoryType, + ppsTempPageArray[ui32PageToFree]); + } + + /* Free the pages we just allocated from the OS */ + for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++) + { + _FreeOSPage(0, + IMG_FALSE, + ppsPageArray[puiAllocIndices[ui32PageToFree]]); + } + + /* Reset all page array entries that have been set so far*/ + for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++) + { + ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL; + } + } + } + +e_free_temp_array: + OSFreeMem(ppsTempPageArray); + +e_exit: + return eError; +} + +/* Allocate pages for a given page array. + * + * The executed allocation path depends whether an array with allocation + * indices has been passed or not */ +static PVRSRV_ERROR +_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 uiPagesToAlloc) +{ + PVRSRV_ERROR eError; + struct page **ppsPageArray; + + /* Sanity checks */ + PVR_ASSERT(NULL != psPageArrayData); + if (psPageArrayData->bIsCMA) + { + PVR_ASSERT(psPageArrayData->dmaphysarray != NULL); + PVR_ASSERT(psPageArrayData->dmavirtarray != NULL); + } + PVR_ASSERT(psPageArrayData->pagearray != NULL); + PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); + + ppsPageArray = psPageArrayData->pagearray; + + /* Go the sparse alloc path if we have an array with alloc indices.*/ + if (puiAllocIndices != NULL) + { + eError = _AllocOSPages_Sparse(psPageArrayData, + puiAllocIndices, + uiPagesToAlloc); + } + else + { + eError = _AllocOSPages_Fast(psPageArrayData); + } + + if (eError != PVRSRV_OK) + { + goto e_exit; + } + + _DumpPageArray(ppsPageArray, + psPageArrayData->uiTotalNumOSPages >> + (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); + + PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData)); + return PVRSRV_OK; + +e_exit: + return eError; +} + +/* Same as _FreeOSPage except free memory using DMA framework */ +static INLINE void +_FreeOSPage_CMA(struct device *dev, + size_t alloc_size, + IMG_UINT32 uiOrder, + void *virt_addr, + dma_addr_t dev_addr, + struct page *psPage) +{ +#ifdef CONFIG_MCST + struct pci_dev *pdev = NULL; + unsigned long attrs = 0; + if (!dev) { /*FIXME:*/ + pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650, NULL); + } + if (uiOrder != 0) + attrs |= DMA_ATTR_FORCE_CONTIGUOUS; + /* Since we always allocate through CMA the below cases are not suitable */ + dma_free_attrs(dev, alloc_size, virt_addr, + DMA_GET_ADDR(dev_addr), attrs); + pci_dev_put(pdev); + return; +#else + if (DMA_IS_ALLOCPG_ADDR(dev_addr)) + { +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + void *pvPageVAddr = page_address(psPage); + if (pvPageVAddr) + { + int ret = set_memory_wb((unsigned long)pvPageVAddr, 1); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to reset page attribute", + __func__)); + } + } +#endif + + if (DMA_IS_ADDR_ADJUSTED(dev_addr)) + { + psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr); + uiOrder += 1; + } + + __free_pages(psPage, uiOrder); + } + else + { + if (DMA_IS_ADDR_ADJUSTED(dev_addr)) + { + size_t align_adjust; + + align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr); + alloc_size = alloc_size << 1; + + dev_addr = DMA_GET_ADDR(dev_addr); + dev_addr -= align_adjust << PAGE_SHIFT; + virt_addr -= align_adjust << PAGE_SHIFT; + } + + dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr)); + } +#endif /*CONFIG_MCST*/ +} + +/* Free a single page back to the OS. + * Make sure the cache type is set back to the default value. + * + * Note: + * We must _only_ check bUnsetMemoryType in the case where we need to free + * the page back to the OS since we may have to revert the cache properties + * of the page to the default as given by the OS when it was allocated. */ +static void +_FreeOSPage(IMG_UINT32 uiOrder, + IMG_BOOL bUnsetMemoryType, + struct page *psPage) +{ +#ifdef CONFIG_MCST + /* Since we always allocate through CMA we should not get here */ + WARN_ON_ONCE(1); +#endif +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + void *pvPageVAddr; + pvPageVAddr = page_address(psPage); + + if (pvPageVAddr && bUnsetMemoryType) + { + int ret; + + ret = set_memory_wb((unsigned long)pvPageVAddr, 1); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", + __func__)); + } + } +#else + PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType); +#endif + __free_pages(psPage, uiOrder); +} + +/* Free the struct holding the metadata */ +static PVRSRV_ERROR +_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData)); + + /* Check if the page array actually still exists. + * It might be the case that has been moved to the page pool */ + if (psPageArrayData->pagearray != NULL) + { + OSFreeMemNoStats(psPageArrayData->pagearray); +#ifdef CONFIG_MCST + OSFreeMemNoStats(psPageArrayData->dmavirtarray); + OSFreeMemNoStats(psPageArrayData->dmaphysarray); +#endif + } + + kmem_cache_free(g_psLinuxPageArray, psPageArrayData); + + return PVRSRV_OK; +} + +/* Free all or some pages from a sparse page array */ +static PVRSRV_ERROR +_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 ui32FreePageCount) +{ + IMG_BOOL bSuccess; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0; + struct page **ppsPageArray = psPageArrayData->pagearray; + IMG_UINT32 uiNumPages; + + struct page **ppsTempPageArray; + IMG_UINT32 uiTempArraySize; + + /* We really should have something to free before we call this */ + PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); + + if (pai32FreeIndices == NULL) + { + uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; + uiTempArraySize = psPageArrayData->iNumOSPagesAllocated; + } + else + { + uiNumPages = ui32FreePageCount; + uiTempArraySize = ui32FreePageCount << uiOrder; + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < uiNumPages; i++) + { + IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + + if (NULL != ppsPageArray[idx]) + { + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]); + } + } +#endif + + if (psPageArrayData->bPoisonOnFree) + { + for (i = 0; i < uiNumPages; i++) + { + IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + + if (NULL != ppsPageArray[idx]) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[idx], + uiOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_FREE_VALUE); + } + } + } + + if (psPageArrayData->bIsCMA) + { + IMG_UINT32 uiDevNumPages = uiNumPages; + IMG_UINT32 uiDevPageSize = 1<uiLog2AllocPageSize; + + for (i = 0; i < uiDevNumPages; i++) + { + IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + if (NULL != ppsPageArray[idx]) + { +#ifdef CONFIG_MCST + if (psPageArrayData->bUnsetMemoryType) + set_memory_wb((u64)page_address(ppsPageArray[idx]), uiDevPageSize); +#endif + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[idx], + psPageArrayData->dmaphysarray[idx], + ppsPageArray[idx]); + psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0; + psPageArrayData->dmavirtarray[idx] = NULL; + ppsPageArray[idx] = NULL; + uiTempIdx++; + } + } + uiTempIdx <<= uiOrder; + } + else + { + + /* OSAllocMemNoStats required because this code may be run without the bridge lock held */ + ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize); + if (ppsTempPageArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Put pages in a contiguous array so further processing is easier */ + for (i = 0; i < uiNumPages; i++) + { + uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i; + if (NULL != ppsPageArray[uiPageIndex]) + { + struct page *psPage = ppsPageArray[uiPageIndex]; + + for (j = 0; j < (1<ui32CPUCacheFlags, + ppsTempPageArray, + psPageArrayData->bUnpinned, + 0, + uiTempIdx); + if (bSuccess) + { + goto exit_ok; + } + + /* Free pages and reset page caching attributes on x86 */ +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType) + { + int iError; + iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx); + + if (iError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__)); + } + } +#endif + + /* Free the pages */ + for (i = 0; i < uiTempIdx; i++) + { + __free_pages(ppsTempPageArray[i], 0); + } + + /* Free the temp page array here if it did not move to the pool */ + OSFreeMemNoStats(ppsTempPageArray); + } + +exit_ok: + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) + _DecrMemAllocStat_UmaPages(uiTempIdx * PAGE_SIZE, + psPageArrayData->uiPid); +#endif + + if (pai32FreeIndices && ((uiTempIdx >> uiOrder) != ui32FreePageCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Probable sparse duplicate indices: ReqFreeCount: %d " + "ActualFreedCount: %d", __func__, ui32FreePageCount, (uiTempIdx >> uiOrder))); + } + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated -= uiTempIdx; + PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); + return PVRSRV_OK; +} + +/* Free all the pages in a page array */ +#ifdef CONFIG_MCST +static PVRSRV_ERROR +_FreeOSPages_Fast_wo_iommu(PMR_OSPAGEARRAY_DATA *psPageArrayData) +#else +static PVRSRV_ERROR +_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) +#endif +{ + IMG_BOOL bSuccess; + IMG_UINT32 i; + IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder; + IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; + struct page **ppsPageArray = psPageArrayData->pagearray; + + /* We really should have something to free before we call this */ + PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < uiDevNumPages; i++) + { + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } +#else + _DecrMemAllocStat_UmaPages(uiNumPages * PAGE_SIZE, + psPageArrayData->uiPid); +#endif +#endif + + if (psPageArrayData->bPoisonOnFree) + { + for (i = 0; i < uiDevNumPages; i++) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[i], + uiOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_FREE_VALUE); + } + } + + /* Try to move the page array to the pool */ + bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags, + ppsPageArray, + psPageArrayData->bUnpinned, + uiOrder, + uiNumPages); + if (bSuccess) + { + psPageArrayData->pagearray = NULL; + goto exit_ok; + } + + if (psPageArrayData->bIsCMA) + { + for (i = 0; i < uiDevNumPages; i++) + { +#ifdef CONFIG_MCST + if (psPageArrayData->bUnsetMemoryType) + set_memory_wb((u64)page_address(ppsPageArray[i]), uiDevPageSize); +#endif + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + + uiDevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[i], + psPageArrayData->dmaphysarray[i], + ppsPageArray[i]); + psPageArrayData->dmaphysarray[i] = (dma_addr_t)0; + psPageArrayData->dmavirtarray[i] = NULL; + ppsPageArray[i] = NULL; + } + } + else + { +#if defined(CONFIG_X86) || defined(CONFIG_E2K) + if (psPageArrayData->bUnsetMemoryType) + { + int ret; + + ret = set_pages_array_wb(ppsPageArray, uiNumPages); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", + __func__)); + } + } +#endif + + for (i = 0; i < uiNumPages; i++) + { + _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]); + ppsPageArray[i] = NULL; + } + } + +exit_ok: + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated = 0; + return PVRSRV_OK; +} + +#ifdef CONFIG_MCST +static PVRSRV_ERROR +_FreeOSPages_Fast_w_iommu(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + unsigned long attrs = 0; + struct page **pages; + PVRSRV_ERROR ret = PVRSRV_OK; + bool big_pages = psPageArrayData->uiLog2AllocPageSize != PAGE_SHIFT; + struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + unsigned nr_pages = psPageArrayData->uiTotalNumOSPages; + unsigned long size = nr_pages * PAGE_SIZE; + void *va = psPageArrayData->dmavirtarray[0]; + dma_addr_t dma_addr = psPageArrayData->dmaphysarray[0]; + if (!dev) {/*FIXME:*/ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650, NULL); + dev = &pdev->dev; + } + if (big_pages) + attrs |= DMA_ATTR_FORCE_CONTIGUOUS; + + dma_free_attrs(dev, size, va, dma_addr, attrs); + if (!psPageArrayData->bUnsetMemoryType) + return PVRSRV_OK; + if (big_pages) { + int i; + struct page *p = virt_to_page(va); + BUG_ON(is_vmalloc_addr(va)); + pages = vmalloc(nr_pages * sizeof(*pages)); + if (!pages) + return PVRSRV_ERROR_OUT_OF_MEMORY; + for (i = 0; i < nr_pages; i++) + pages[i] = nth_page(p, i); + } else { + pages = psPageArrayData->pagearray; + } + if (WARN_ON(set_pages_array_wb(pages, nr_pages))) + ret = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + if (big_pages) + vfree(pages); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + + if (!dev) {/*FIXME:*/ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_3D_IMAGINATION_GX6650, NULL); + dev = &pdev->dev; + } + return device_iommu_mapped(dev) ? + _FreeOSPages_Fast_w_iommu(psPageArrayData) : + _FreeOSPages_Fast_wo_iommu(psPageArrayData); +} +#endif /*CONFIG_MCST*/ + +/* Free pages from a page array. + * Takes care of mem stats and chooses correct free path depending on parameters. */ +static PVRSRV_ERROR +_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 ui32FreePageCount) +{ + PVRSRV_ERROR eError; + + /* Go the sparse or non-sparse path */ + if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages + || pai32FreeIndices != NULL) + { + eError = _FreeOSPages_Sparse(psPageArrayData, + pai32FreeIndices, + ui32FreePageCount); + } + else + { + eError = _FreeOSPages_Fast(psPageArrayData); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed")); + } + + _DumpPageArray(psPageArrayData->pagearray, + psPageArrayData->uiTotalNumOSPages >> + (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); + + return eError; +} + +/* + * + * Implementation of callback functions + * + */ + +/* Destruction function is called after last reference disappears, + * but before PMR itself is freed. + */ +static PVRSRV_ERROR +PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + + /* We can't free pages until now. */ + if (psOSPageArrayData->iNumOSPagesAllocated != 0) + { + _PagePoolLock(); + if (psOSPageArrayData->bUnpinned) + { + _RemoveUnpinListEntryUnlocked(psOSPageArrayData); + } + _PagePoolUnlock(); + + eError = _FreeOSPages(psOSPageArrayData, + NULL, + 0); + PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ + } + + eError = _FreeOSPagesArray(psOSPageArrayData); + PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ + return PVRSRV_OK; +} + +/* Callback function for locking the system physical page addresses. + * This function must be called before the lookup address func. */ +static PVRSRV_ERROR +PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + + if (psOSPageArrayData->bOnDemand) + { + /* Allocate Memory for deferred allocation */ + eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + eError = PVRSRV_OK; + return eError; +} + +static PVRSRV_ERROR +PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) +{ + /* Just drops the refcount. */ + PVRSRV_ERROR eError = PVRSRV_OK; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + + if (psOSPageArrayData->bOnDemand) + { + /* Free Memory for deferred allocation */ + eError = _FreeOSPages(psOSPageArrayData, + NULL, + 0); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + PVR_ASSERT (eError == PVRSRV_OK); + return eError; +} + +/* Determine PA for specified offset into page array. */ +static IMG_DEV_PHYADDR GetOffsetPA(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, + IMG_UINT32 ui32Offset) +{ + IMG_UINT32 ui32Log2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; + IMG_UINT32 ui32PageIndex = ui32Offset >> ui32Log2AllocPageSize; + IMG_UINT32 ui32InPageOffset = ui32Offset - (ui32PageIndex << ui32Log2AllocPageSize); + IMG_DEV_PHYADDR sPA; + + PVR_ASSERT(ui32PageIndex < psOSPageArrayData->uiTotalNumOSPages); + PVR_ASSERT(ui32InPageOffset < (1U << ui32Log2AllocPageSize)); +#ifdef CONFIG_MCST + /* PMRSysPhysAddrOSMem() is misnamed. It must return DevAddr. */ + sPA.uiAddr = psOSPageArrayData->bIsFast ? + psOSPageArrayData->dmaphysarray[0] + ui32Offset : + psOSPageArrayData->dmaphysarray[ui32PageIndex] + ui32InPageOffset; + BUG_ON(!sPA.uiAddr); +#else + sPA.uiAddr = page_to_phys(psOSPageArrayData->pagearray[ui32PageIndex]); + sPA.uiAddr += ui32InPageOffset; +#endif + + return sPA; +} + +/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */ +static PVRSRV_ERROR +PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + IMG_UINT32 uiIdx; + + if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requested physical addresses from PMR " + "for incompatible contiguity %u!", + __func__, + ui32Log2PageSize)); + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++) + { + if (pbValid[uiIdx]) + { + psDevPAddr[uiIdx] = GetOffsetPA(psOSPageArrayData, puiOffset[uiIdx]); + +#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) + /* this is just a precaution, normally this should be always + * available */ + if (psOSPageArrayData->ui64DmaMask) + { + if (psDevPAddr[uiIdx].uiAddr > psOSPageArrayData->ui64DmaMask) + { + PVR_DPF((PVR_DBG_ERROR, "%s: physical address" + " (%" IMG_UINT64_FMTSPECX ") out of allowable range" + " [0; %" IMG_UINT64_FMTSPECX "]", __func__, + psDevPAddr[uiIdx].uiAddr, + psOSPageArrayData->ui64DmaMask)); + BUG(); + } + } +#endif + } + } + + return PVRSRV_OK; +} + +typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ { + void *pvBase; + IMG_UINT32 ui32PageCount; +} PMR_OSPAGEARRAY_KERNMAP_DATA; + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + void *pvAddress; + pgprot_t prot = PAGE_KERNEL; + IMG_UINT32 ui32PageOffset=0; + size_t uiMapOffset=0; + IMG_UINT32 ui32PageCount = 0; + IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; + IMG_UINT32 uiOSPageShift = OSGetPageShift(); + IMG_UINT32 uiPageSizeDiff = 0; + struct page **pagearray; + PMR_OSPAGEARRAY_KERNMAP_DATA *psData; + + /* For cases device page size greater than the OS page size, + * multiple physically contiguous OS pages constitute one device page. + * However only the first page address of such an ensemble is stored + * as part of the mapping table in the driver. Hence when mapping the PMR + * in part/full, all OS pages that constitute the device page + * must also be mapped to kernel. + * + * For the case where device page size less than OS page size, + * treat it the same way as the page sizes are equal */ + if (uiLog2AllocPageSize > uiOSPageShift) + { + uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift; + } + + /* + Zero offset and size as a special meaning which means map in the + whole of the PMR, this is due to fact that the places that call + this callback might not have access to be able to determine the + physical size + */ + if ((uiOffset == 0) && (uiSize == 0)) + { + ui32PageOffset = 0; + uiMapOffset = 0; + /* Page count = amount of OS pages */ + ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated; + } + else + { + size_t uiEndoffset; + + ui32PageOffset = uiOffset >> uiLog2AllocPageSize; + uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize); + uiEndoffset = uiOffset + uiSize - 1; + /* Add one as we want the count, not the offset */ + /* Page count = amount of device pages (note uiLog2AllocPageSize being used) */ + ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1; + ui32PageCount -= ui32PageOffset; + + /* The OS page count to be mapped might be different if the + * OS page size is lesser than the device page size */ + ui32PageCount <<= uiPageSizeDiff; + } + + switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags)) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + prot = pgprot_noncached(prot); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: + prot = pgprot_writecombine(prot); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: + break; + + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (uiPageSizeDiff) + { + /* Each device page can be broken down into ui32SubPageCount OS pages */ + IMG_UINT32 ui32SubPageCount = 1 << uiPageSizeDiff; + IMG_UINT32 i; + struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset]; + + /* Allocate enough memory for the OS page pointers for this mapping */ + pagearray = OSAllocMem(ui32PageCount * sizeof(pagearray[0])); + + if (pagearray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e0; + } + + /* construct array that holds the page pointers that constitute the requested + * mapping */ + for (i = 0; i < ui32PageCount; i++) + { + IMG_UINT32 ui32OSPageArrayIndex = i / ui32SubPageCount; + IMG_UINT32 ui32OSPageArrayOffset = i % ui32SubPageCount; + + /* + * The driver only stores OS page pointers for the first OS page + * within each device page (psPage[ui32OSPageArrayIndex]). + * Get the next OS page structure at device page granularity, + * then calculate OS page pointers for all the other pages. + */ + pagearray[i] = psPage[ui32OSPageArrayIndex] + ui32OSPageArrayOffset; + } + } + else + { + pagearray = &psOSPageArrayData->pagearray[ui32PageOffset]; + } + + psData = OSAllocMem(sizeof(*psData)); + if (psData == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e1; + } + +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + pvAddress = vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot); +#else + pvAddress = vm_map_ram(pagearray, ui32PageCount, -1, prot); +#endif + if (pvAddress == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e2; + } + + *ppvKernelAddressOut = pvAddress + uiMapOffset; + psData->pvBase = pvAddress; + psData->ui32PageCount = ui32PageCount; + *phHandleOut = psData; + + if (uiPageSizeDiff) + { + OSFreeMem(pagearray); + } + + return PVRSRV_OK; + + /* + error exit paths follow + */ +e2: + OSFreeMem(psData); +e1: + if (uiPageSizeDiff) + { + OSFreeMem(pagearray); + } +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle; + PVR_UNREFERENCED_PARAMETER(pvPriv); + +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + vunmap(psData->pvBase); +#else + vm_unmap_ram(psData->pvBase, psData->ui32PageCount); +#endif + OSFreeMem(psData); +} + +static +PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv) +{ + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Lock down the pool and add the array to the unpin list */ + _PagePoolLock(); + + /* Sanity check */ + PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_FALSE); + PVR_ASSERT(psOSPageArrayData->bOnDemand == IMG_FALSE); + + eError = _AddUnpinListEntryUnlocked(psOSPageArrayData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to add allocation to unpinned list (%d).", + __func__, + eError)); + + goto e_exit; + } + + psOSPageArrayData->bUnpinned = IMG_TRUE; + +e_exit: + _PagePoolUnlock(); + return eError; +} + +static +PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv, + PMR_MAPPING_TABLE *psMappingTable) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv; + IMG_UINT32 *pui32MapTable = NULL; + IMG_UINT32 i, j = 0, ui32Temp = 0; + + _PagePoolLock(); + + /* Sanity check */ + PVR_ASSERT(psOSPageArrayData->bUnpinned); + + psOSPageArrayData->bUnpinned = IMG_FALSE; + + /* If there are still pages in the array remove entries from the pool */ + if (psOSPageArrayData->iNumOSPagesAllocated != 0) + { + _RemoveUnpinListEntryUnlocked(psOSPageArrayData); + _PagePoolUnlock(); + + eError = PVRSRV_OK; + goto e_exit_mapalloc_failure; + } + _PagePoolUnlock(); + + /* If pages were reclaimed we allocate new ones and + * return PVRSRV_ERROR_PMR_NEW_MEMORY */ + if (psMappingTable->ui32NumVirtChunks == 1) + { + eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages); + } + else + { + pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks); + if (NULL == pui32MapTable) + { + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to Alloc Map Table.", + __func__)); + goto e_exit_mapalloc_failure; + } + + for (i = 0, j = 0; i < psMappingTable->ui32NumVirtChunks; i++) + { + ui32Temp = psMappingTable->aui32Translation[i]; + if (TRANSLATION_INVALID != ui32Temp) + { + pui32MapTable[j++] = ui32Temp; + } + } + eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to get new pages for unpinned allocation.", + __func__)); + + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + goto e_exit; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Allocating new pages for unpinned allocation. " + "Old content is lost!", + __func__)); + + eError = PVRSRV_ERROR_PMR_NEW_MEMORY; + +e_exit: + OSFreeMem(pui32MapTable); +e_exit_mapalloc_failure: + return eError; +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemOSMem +@Description This function Changes the sparse mapping by allocating and + freeing of pages. It changes the GPU and CPU maps accordingly. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static PVRSRV_ERROR +PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiFlags) +{ + PVRSRV_ERROR eError; + + PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR); + PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; + struct page **psPageArray = psPMRPageArrayData->pagearray; + void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray; + dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray; + + struct page *psPage; + dma_addr_t psDMAPAddr; + void *pvDMAVAddr; + + IMG_UINT32 ui32AdtnlAllocPages = 0; /*uiLog2AllocPageSize - PAGE_SHIFT; + IMG_BOOL bCMA = psPMRPageArrayData->bIsCMA; +#ifdef CONFIG_MCST + BUG_ON(psPMRPageArrayData->bIsFast); +#endif + + /* Check SPARSE flags and calculate pages to allocate and free */ + if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) + { + ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ? + ui32FreePageCount : ui32AllocPageCount; + + PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported"); + } + + if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) + { + ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount; + } + else + { + ui32AllocPageCount = 0; + } + + if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) + { + ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount; + } + else + { + ui32FreePageCount = 0; + } + + if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Missing parameters for number of pages to alloc/free", + __func__)); + return eError; + } + + /* The incoming request is classified into two operations independent of + * each other: alloc & free pages. + * These operations can be combined with two mapping operations as well + * which are GPU & CPU space mappings. + * + * From the alloc and free page requests, the net amount of pages to be + * allocated or freed is computed. Pages that were requested to be freed + * will be reused to fulfil alloc requests. + * + * The order of operations is: + * 1. Allocate new pages from the OS + * 2. Move the free pages from free request to alloc positions. + * 3. Free the rest of the pages not used for alloc + * + * Alloc parameters are validated at the time of allocation + * and any error will be handled then. */ + + /* Validate the free indices */ + if (ui32FreePageCount) + { + if (NULL != pai32FreeIndices){ + + for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) + { + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) + { + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + goto e0; + } + + if (NULL == psPageArray[uiFreepgidx]) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to free non-allocated page", + __func__)); + goto e0; + } + } + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Given non-zero free count but missing indices array", + __func__)); + return eError; + } + } + + /* Validate the alloc indices */ + for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + uiAllocpgidx = pai32AllocIndices[ui32Loop]; + + if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) + { + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + goto e0; + } + + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + if ((NULL != psPageArray[uiAllocpgidx]) || + (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to allocate already allocated page again", + __func__)); + goto e0; + } + } + else + { + if ((NULL == psPageArray[uiAllocpgidx]) || + (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) ) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to remap memory due to missing page", + __func__)); + goto e0; + } + } + } + + ui32Loop = 0; + + /* Allocate new pages from the OS */ + if (0 != ui32AdtnlAllocPages) + { + eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: New Addtl Allocation of pages failed", + __func__)); + goto e0; + } + + psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; + /*Mark the corresponding pages of translation table as valid */ + for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) + { + psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; + } + } + + + ui32Index = ui32Loop; + + /* Move the corresponding free pages to alloc request */ + for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++) + { + uiAllocpgidx = pai32AllocIndices[ui32Index]; + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + psPage = psPageArray[uiAllocpgidx]; + psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; + + if (bCMA) + { + pvDMAVAddr = psDMAVirtArray[uiAllocpgidx]; + psDMAPAddr = psDMAPhysArray[uiAllocpgidx]; + psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx]; + psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx]; + } + + /* Is remap mem used in real world scenario? Should it be turned to a + * debug feature? The condition check needs to be out of loop, will be + * done at later point though after some analysis */ + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + psPageArray[uiFreepgidx] = NULL; + if (bCMA) + { + psDMAVirtArray[uiFreepgidx] = NULL; + psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0; + } + } + else + { + psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + psPageArray[uiFreepgidx] = psPage; + if (bCMA) + { + psDMAVirtArray[uiFreepgidx] = pvDMAVAddr; + psDMAPhysArray[uiFreepgidx] = psDMAPAddr; + } + } + } + + /* Free the additional free pages */ + if (0 != ui32AdtnlFreePages) + { + eError = _FreeOSPages(psPMRPageArrayData, + &pai32FreeIndices[ui32Loop], + ui32AdtnlFreePages); + if (eError != PVRSRV_OK) + { + goto e0; + } + psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; + while (ui32Loop < ui32FreePageCount) + { + psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID; + ui32Loop++; + } + } + + eError = PVRSRV_OK; + +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemCPUMapOSMem +@Description This function Changes CPU maps accordingly +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static +PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices) +{ + struct page **psPageArray; + PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; + IMG_CPU_PHYADDR sCPUPAddr; + + sCPUPAddr.uiAddr = 0; + psPageArray = psPMRPageArrayData->pagearray; + + return OSChangeSparseMemCPUAddrMap((void **)psPageArray, + sCpuVAddrBase, + sCPUPAddr, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + IMG_FALSE); +} + +#ifdef CONFIG_MCST +static PVRSRV_ERROR PMRMMapOSMem(PMR_IMPL_PRIVDATA pvPriv, + PMR *psPMR, + PMR_MMAP_DATA pOSMMapData) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psPageArrayData = pvPriv; + PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); + PMR_MAPPING_TABLE *psMappingTable = PMR_GetMappigTable(psPMR); + IMG_UINT32 ui32CPUCacheFlags; + pgprot_t sPageProt; + struct page **ppsPageArray = psPageArrayData->pagearray; + struct vm_area_struct *ps_vma = pOSMMapData; + ps_vma->vm_pgoff = 0; /*XXX: user sets something wrong */ + IMG_UINT32 uiNumPages = psPageArrayData->iNumOSPagesAllocated, i; + IMG_BOOL bIsSparse = (psMappingTable->ui32NumVirtChunks != + psMappingTable->ui32NumPhysChunks || + psMappingTable->ui32NumVirtChunks > 1) ? + IMG_TRUE : IMG_FALSE; + int err; + if (((ps_vma->vm_flags & VM_WRITE) != 0) && + ((ps_vma->vm_flags & VM_SHARED) == 0)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto err; + } + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + goto e0; + } + BUG_ON(bIsSparse && psMappingTable->uiChunkSize != (1 << psPageArrayData->uiLog2AllocPageSize)); + + sPageProt = vm_get_page_prot(ps_vma->vm_flags); + + eError = DevmemCPUCacheMode(psDevNode, + PMR_Flags(psPMR), + &ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + goto err; + } + + switch (ui32CPUCacheFlags) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + sPageProt = pgprot_noncached(sPageProt); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: + sPageProt = pgprot_writecombine(sPageProt); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: + { +/* Do not set to write-combine for plato */ +#if !defined(PLATO_MEMORY_CONFIG) + PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR); + + if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA) + sPageProt = pgprot_writecombine(sPageProt); +#endif + break; + } + + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto err; + } + ps_vma->vm_page_prot = sPageProt; + + ps_vma->vm_flags |= VM_IO; + + /* Don't include the mapping in core dumps */ + ps_vma->vm_flags |= VM_DONTDUMP; + + /* + * Disable mremap because our nopage handler assumes all + * page requests have already been validated. + */ + ps_vma->vm_flags |= VM_DONTEXPAND; + + /* Don't allow mapping to be inherited across a process fork */ + ps_vma->vm_flags |= VM_DONTCOPY; + if (psMappingTable->ui32NumVirtChunks == 1) { + err = vm_map_pages(ps_vma, ppsPageArray, uiNumPages); + if (WARN_ON(err)) { + eError = PVRSRV_ERROR_BAD_MAPPING; + goto err; + } + } else { + for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++) { + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 j = psMappingTable->aui32Translation[i]; + if (j == TRANSLATION_INVALID) + continue; + uiOffset = i * psMappingTable->uiChunkSize; + err = remap_pfn_range(ps_vma, ps_vma->vm_start + uiOffset, + page_to_pfn(ppsPageArray[j]), + psMappingTable->uiChunkSize, + ps_vma->vm_page_prot); + if (WARN_ON(err)) { + eError = PVRSRV_ERROR_BAD_MAPPING; + goto err; + } + } + } + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + MMapStatsAddOrUpdatePMR(psPMR, ps_vma->vm_end - ps_vma->vm_start); +#endif + + return PVRSRV_OK; +err: + PMRUnlockSysPhysAddresses(psPMR); +e0: + return eError; +} +#endif /*CONFIG_MCST*/ + +static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = { + .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem, + .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem, + .pfnDevPhysAddr = &PMRSysPhysAddrOSMem, + .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem, + .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem, + .pfnReadBytes = NULL, + .pfnWriteBytes = NULL, + .pfnUnpinMem = &PMRUnpinOSMem, + .pfnPinMem = &PMRPinOSMem, + .pfnChangeSparseMem = &PMRChangeSparseMemOSMem, + .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem, + .pfnFinalize = &PMRFinalizeOSMem, +#ifdef CONFIG_MCST + .pfnMMap = PMRMMapOSMem, +#endif +}; + +/* Wrapper around OS page allocation. */ +static PVRSRV_ERROR +DoPageAlloc(PMR_OSPAGEARRAY_DATA *psPrivData, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32Log2AllocPageSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Do we fill the whole page array or just parts (sparse)? */ + if (ui32NumPhysChunks == ui32NumVirtChunks) + { + /* Allocate the physical pages */ + eError = _AllocOSPages(psPrivData, + NULL, + psPrivData->uiTotalNumOSPages >> + (ui32Log2AllocPageSize - PAGE_SHIFT)); + } + else if (ui32NumPhysChunks != 0) + { + /* Calculate the number of pages we want to allocate */ + IMG_UINT32 ui32PagesToAlloc = + (IMG_UINT32)((((ui32NumPhysChunks * uiChunkSize) - 1) >> ui32Log2AllocPageSize) + 1); + + /* Make sure calculation is correct */ + PVR_ASSERT(((PMR_SIZE_T) ui32PagesToAlloc << ui32Log2AllocPageSize) == + (ui32NumPhysChunks * uiChunkSize)); + + /* Allocate the physical pages */ + eError = _AllocOSPages(psPrivData, puiAllocIndices, + ui32PagesToAlloc); + } + + return eError; +} + +PVRSRV_ERROR +PhysmemNewOSRamBackedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PMR *psPMR; + struct _PMR_OSPAGEARRAY_DATA_ *psPrivData; + PMR_FLAGS_T uiPMRFlags; + PHYS_HEAP *psPhysHeap; + IMG_UINT32 ui32CPUCacheFlags; + IMG_BOOL bZero; + IMG_BOOL bIsCMA; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bPoisonOnFree; + IMG_BOOL bOnDemand; + IMG_BOOL bCpuLocal; + IMG_BOOL bFwLocal; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* + * The host driver (but not guest) can still use this factory for firmware + * allocations + */ + if (PVRSRV_VZ_MODE_IS(GUEST) && PVRSRV_CHECK_FW_LOCAL(uiFlags)) + { + PVR_ASSERT(0); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errorOnParam; + } + + /* Select correct caching mode */ + eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + goto errorOnParam; + } + + if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)) + { + ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN; + } + +#if defined(SUPPORT_SECURITY_VALIDATION) && defined(NO_HARDWARE) + /* The following check is done before any attempt to use either security flag */ + if ((PVRSRV_CHECK_SECURE_FW_CODE(uiFlags) && PVRSRV_CHECK_SECURE_BUFFER(uiFlags)) || + (PVRSRV_CHECK_SECURE_FW_DATA(uiFlags) && PVRSRV_CHECK_SECURE_BUFFER(uiFlags)) || + (PVRSRV_CHECK_SECURE_FW_CODE(uiFlags) && PVRSRV_CHECK_SECURE_FW_DATA(uiFlags))) + { + PVR_DPF((PVR_DBG_ERROR, "Multiple secure allocation flags are set!")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errorOnParam; + } +#endif + + /* + * Use CMA framework if order is greater than OS page size; please note + * that OSMMapPMRGeneric() has the same expectation as well. + */ +#ifdef CONFIG_MCST + bIsCMA = IMG_TRUE; /*always use dma intreface */ +#else + bIsCMA = uiLog2AllocPageSize > PAGE_SHIFT ? IMG_TRUE : IMG_FALSE; +#endif + bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE; + bCpuLocal = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE; + bFwLocal = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE; + bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; + bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; + bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE; + +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + /* Overwrite flags and always zero pages that could go back to UM */ + bZero = IMG_TRUE; + bPoisonOnAlloc = IMG_FALSE; +#endif + + /* Physical allocation alignment is generally not supported except under + very restrictive conditions, also there is a maximum alignment value + which must not exceed the largest device page-size. If these are not + met then fail the aligned-requested allocation */ + if (bIsCMA) + { + IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize; + if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid PA alignment: size 0x%llx, align 0x%x", + __func__, uiSize, uiAlign)); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + goto errorOnParam; + } + PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ); + } + + /* Create Array structure that hold the physical pages */ + eError = _AllocOSPageArray(psDevNode, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + uiLog2AllocPageSize, + bZero, + bIsCMA, + bPoisonOnAlloc, + bPoisonOnFree, + bOnDemand, + ui32CPUCacheFlags, + uiPid, + &psPrivData); + if (eError != PVRSRV_OK) + { + goto errorOnAllocPageArray; + } + + if (!bOnDemand) + { + eError = DoPageAlloc(psPrivData, puiAllocIndices, ui32NumPhysChunks, + ui32NumVirtChunks, uiChunkSize, uiLog2AllocPageSize); + if (eError != PVRSRV_OK) + { + goto errorOnAllocPages; + } + } + + /* + * In this instance, we simply pass flags straight through. + * + * Generically, uiFlags can include things that control the PMR factory, but + * we don't need any such thing (at the time of writing!), and our caller + * specifies all PMR flags so we don't need to meddle with what was given to + * us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* + * Check no significant bits were lost in cast due to different bit widths + * for flags + */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (bOnDemand) + { + PDUMPCOMMENT("Deferred Allocation PMR (UMA)"); + } + + if (bFwLocal) + { + PDUMPCOMMENT("FW_LOCAL allocation requested"); + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + } + else if (bCpuLocal) + { + PDUMPCOMMENT("CPU_LOCAL allocation requested"); + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]; + } + else + { + psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]; + } + + eError = PMRCreatePMR(psDevNode, + psPhysHeap, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + puiAllocIndices, + uiLog2AllocPageSize, + uiPMRFlags, + pszAnnotation, + &_sPMROSPFuncTab, + psPrivData, + PMR_TYPE_OSMEM, + &psPMR, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto errorOnCreate; + } + + *ppsPMRPtr = psPMR; + + return PVRSRV_OK; + +errorOnCreate: + if (!bOnDemand) + { + eError2 = _FreeOSPages(psPrivData, NULL, 0); + PVR_ASSERT(eError2 == PVRSRV_OK); + } + +errorOnAllocPages: + eError2 = _FreeOSPagesArray(psPrivData); + PVR_ASSERT(eError2 == PVRSRV_OK); + +errorOnAllocPageArray: +errorOnParam: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.h new file mode 100644 index 000000000000..3fac82dd8612 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_osmem_linux.h @@ -0,0 +1,49 @@ +/*************************************************************************/ /*! +@File +@Title Linux OS physmem implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __PHYSMEM_OSMEM_LINUX_H__ +#define __PHYSMEM_OSMEM_LINUX_H__ + +void LinuxInitPhysmem(void); +void LinuxDeinitPhysmem(void); + +#endif /* __PHYSMEM_OSMEM_LINUX_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.c new file mode 100644 index 000000000000..f680bfd445a7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.c @@ -0,0 +1,710 @@ +/*************************************************************************/ /*! +@Title Physmem_test +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Single entry point for testing of page factories +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "physmem_test.h" +#include "device.h" +#include "syscommon.h" +#include "pmr.h" +#include "osfunc.h" +#include "physmem.h" +#include "physmem_osmem.h" +#include "physmem_lma.h" +#include "pvrsrv.h" + +#define PHYSMEM_TEST_PAGES 2 /* Mem test pages */ +#define PHYSMEM_TEST_PASSES_MAX 1000 /* Limit number of passes to some reasonable value */ + + +/* Test patterns for mem test */ + +static const IMG_UINT64 gui64Patterns[] = { + 0, + 0xffffffffffffffffULL, + 0x5555555555555555ULL, + 0xaaaaaaaaaaaaaaaaULL, + 0x1111111111111111ULL, + 0x2222222222222222ULL, + 0x4444444444444444ULL, + 0x8888888888888888ULL, + 0x3333333333333333ULL, + 0x6666666666666666ULL, + 0x9999999999999999ULL, + 0xccccccccccccccccULL, + 0x7777777777777777ULL, + 0xbbbbbbbbbbbbbbbbULL, + 0xddddddddddddddddULL, + 0xeeeeeeeeeeeeeeeeULL, + 0x7a6c7258554e494cULL, +}; + +static const IMG_UINT32 gui32Patterns[] = { + 0, + 0xffffffffU, + 0x55555555U, + 0xaaaaaaaaU, + 0x11111111U, + 0x22222222U, + 0x44444444U, + 0x88888888U, + 0x33333333U, + 0x66666666U, + 0x99999999U, + 0xccccccccU, + 0x77777777U, + 0xbbbbbbbbU, + 0xddddddddU, + 0xeeeeeeeeU, + 0x7a6c725cU, +}; + +static const IMG_UINT16 gui16Patterns[] = { + 0, + 0xffffU, + 0x5555U, + 0xaaaaU, + 0x1111U, + 0x2222U, + 0x4444U, + 0x8888U, + 0x3333U, + 0x6666U, + 0x9999U, + 0xccccU, + 0x7777U, + 0xbbbbU, + 0xddddU, + 0xeeeeU, + 0x7a6cU, +}; + +static const IMG_UINT8 gui8Patterns[] = { + 0, + 0xffU, + 0x55U, + 0xaaU, + 0x11U, + 0x22U, + 0x44U, + 0x88U, + 0x33U, + 0x66U, + 0x99U, + 0xccU, + 0x77U, + 0xbbU, + 0xddU, + 0xeeU, + 0x6cU, +}; + + +/* Following function does minimal required initialisation for mem test using dummy device node */ +static PVRSRV_ERROR +PhysMemTestInit(PVRSRV_DEVICE_NODE **ppsDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_ERROR eError; + + /* Dummy device node */ + psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); + + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; + psDeviceNode->psDevConfig = psDevConfig; + psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; + + /* Initialise Phys mem heaps */ + eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysMemHeapsInit", ErrorSysDevDeInit); + + psDeviceNode->sDevMMUPxSetup.uiMMUPxLog2AllocGran = OSGetPageShift(); + + *ppsDeviceNode = psDeviceNode; + + return PVRSRV_OK; + +ErrorSysDevDeInit: + psDevConfig->psDevNode = NULL; + OSFreeMem(psDeviceNode); + return eError; +} + +/* Undo initialisation done for mem test */ +static void +PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Deinitialise Phys mem heaps */ + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + + OSFreeMem(psDeviceNode); +} + +/* Test for PMR factory validation */ +static PVRSRV_ERROR +PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) +{ + PVRSRV_ERROR eError, eError1; + IMG_UINT32 i = 0, j = 0, ui32Index = 0; + IMG_UINT32 *pui32MappingTable = NULL; + PMR *psPMR = NULL; + IMG_BOOL *pbValid; + IMG_DEV_PHYADDR *apsDevPAddr; + IMG_UINT32 ui32NumOfPages = 10, ui32NumOfPhysPages = 5; + size_t uiMappedSize, uiPageSize; + IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + + uiPageSize = OSGetPageSize(); + + /* Allocate OS memory for PMR page list */ + apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem"); + + /* Allocate OS memory for PMR page state */ + pbValid = OSAllocMem(ui32NumOfPages * sizeof(IMG_BOOL)); + PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem); + OSCachedMemSet(pbValid, 0, ui32NumOfPages * sizeof(IMG_BOOL)); + + /* Allocate OS memory for write buffer */ + pcWriteBuffer = OSAllocMem(uiPageSize); + PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem); + OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize); + + /* Allocate OS memory for read buffer */ + pcReadBuffer = OSAllocMem(uiPageSize); + PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); + + /* Allocate OS memory for mapping table */ + pui32MappingTable = (IMG_UINT32 *)OSAllocMem(ui32NumOfPhysPages * sizeof(*pui32MappingTable)); + PVR_LOG_GOTO_IF_NOMEM(pui32MappingTable, eError, ErrorFreeReadBuffer); + + /* Pages having even index will have physical backing in PMR */ + for (ui32Index=0; ui32Index < ui32NumOfPages; ui32Index+=2) + { + pui32MappingTable[i++] = ui32Index; + } + + /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED attributes */ + uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; + + /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */ + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + ui32NumOfPages * uiPageSize, + uiPageSize, + ui32NumOfPhysPages, + ui32NumOfPages, + pui32MappingTable, + OSGetPageShift(), + uiFlags, + sizeof("PMR ValidationTest"), + "PMR ValidationTest", + OSGetCurrentClientProcessIDKM(), + &psPMR, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); + goto ErrorFreeMappingTable; + } + + /* Check whether allocated PMR can be locked and obtain physical addresses + * of underlying memory pages. + */ + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR")); + goto ErrorUnrefPMR; + } + + /* Get the Device physical addresses of the pages */ + eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses")); + goto ErrorUnlockPhysAddresses; + } + + /* Check whether device address of each physical page is OS PAGE_SIZE aligned */ + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + if ((apsDevPAddr[i].uiAddr & OSGetPageMask()) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Physical memory of PMR is not page aligned")); + eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; + goto ErrorUnlockPhysAddresses; + } + } + } + + /* Acquire kernel virtual address of each physical page and write to it + * and then release it. + */ + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); + goto ErrorUnlockPhysAddresses; + } + OSDeviceMemCopy(pvKernAddr, pcWriteBuffer, OSGetPageSize()); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivData); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + } + } + + /* Acquire kernel virtual address of each physical page and read + * from it and check where contents are intact. + */ + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); + goto ErrorUnlockPhysAddresses; + } + OSCachedMemSet(pcReadBuffer, 0x0, uiPageSize); + OSDeviceMemCopy(pcReadBuffer, pvKernAddr, uiMappedSize); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivData); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + for (j = 0; j < uiPageSize; j++) + { + if (pcReadBuffer[j] != pcWriteBuffer[j]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", + __func__, pcReadBuffer[j], pcWriteBuffer[j])); + eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; + goto ErrorUnlockPhysAddresses; + } + } + } + } + +ErrorUnlockPhysAddresses: + /* Unlock and Unref the PMR to destroy it */ + eError1 = PMRUnlockSysPhysAddresses(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); + } + +ErrorUnrefPMR: + eError1 = PMRUnrefPMR(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); + } +ErrorFreeMappingTable: + OSFreeMem(pui32MappingTable); +ErrorFreeReadBuffer: + OSFreeMem(pcReadBuffer); +ErrorFreeWriteBuffer: + OSFreeMem(pcWriteBuffer); +ErrorFreePMRPageStateMem: + OSFreeMem(pbValid); +ErrorFreePMRPageListMem: + OSFreeMem(apsDevPAddr); + + return eError; +} + +#define DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, Patterns, NumOfPatterns, Error, ptr, i) \ + for (i = 0; i < NumOfPatterns; i++) \ + { \ + /* Write pattern */ \ + for (ptr = StartAddr; ptr < EndAddr; ptr++) \ + { \ + *ptr = Patterns[i]; \ + } \ + \ + /* Read back and validate pattern */ \ + for (ptr = StartAddr; ptr < EndAddr ; ptr++) \ + { \ + if (*ptr != Patterns[i]) \ + { \ + Error = PVRSRV_ERROR_MEMORY_TEST_FAILED; \ + break; \ + } \ + } \ + \ + if (Error != PVRSRV_OK) \ + { \ + break; \ + } \ + } + +static PVRSRV_ERROR +TestPatternU8(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; + IMG_UINT8 *EndAddr = ((IMG_UINT8 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT8)); + IMG_UINT8 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT8)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui8Patterns, sizeof(gui8Patterns)/sizeof(IMG_UINT8), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", + __func__, *p, gui8Patterns[i])); + } + + return eError; +} + + +static PVRSRV_ERROR +TestPatternU16(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT16 *StartAddr = (IMG_UINT16 *) pvKernAddr; + IMG_UINT16 *EndAddr = ((IMG_UINT16 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT16)); + IMG_UINT16 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT16)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui16Patterns, sizeof(gui16Patterns)/sizeof(IMG_UINT16), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%hx), expected (0x%hx)!", + __func__, *p, gui16Patterns[i])); + } + + return eError; +} + +static PVRSRV_ERROR +TestPatternU32(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT32 *StartAddr = (IMG_UINT32 *) pvKernAddr; + IMG_UINT32 *EndAddr = ((IMG_UINT32 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT32)); + IMG_UINT32 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT32)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui32Patterns, sizeof(gui32Patterns)/sizeof(IMG_UINT32), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%x), expected (0x%x)!", + __func__, *p, gui32Patterns[i])); + } + + return eError; +} + +static PVRSRV_ERROR +TestPatternU64(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT64 *StartAddr = (IMG_UINT64 *) pvKernAddr; + IMG_UINT64 *EndAddr = ((IMG_UINT64 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT64)); + IMG_UINT64 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT64)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui64Patterns, sizeof(gui64Patterns)/sizeof(IMG_UINT64), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%llx), expected (0x%llx)!", + __func__, *p, gui64Patterns[i])); + } + + return eError; +} + +static PVRSRV_ERROR +TestSplitCacheline(void *pvKernAddr, size_t uiMappedSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + size_t uiCacheLineSize; + size_t uiBlockSize; + size_t j; + IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; + IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; + IMG_UINT8 *EndAddr, *p; + + uiCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + + if (uiCacheLineSize > 0) + { + uiBlockSize = (uiCacheLineSize * 2)/3; /* split cacheline */ + + pcWriteBuffer = OSAllocMem(uiBlockSize); + PVR_LOG_RETURN_IF_NOMEM(pcWriteBuffer, "OSAllocMem"); + + /* Fill the write buffer with test data, 0xAB*/ + OSCachedMemSet(pcWriteBuffer, 0xAB, uiBlockSize); + + pcReadBuffer = OSAllocMem(uiBlockSize); + PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); + + /* Fit only complete blocks in uiMappedSize, ignore leftover bytes */ + EndAddr = StartAddr + (uiBlockSize * (uiMappedSize / uiBlockSize)); + + /* Write blocks into the memory */ + for (p = StartAddr; p < EndAddr; p += uiBlockSize) + { + OSCachedMemCopy(p, pcWriteBuffer, uiBlockSize); + } + + /* Read back blocks and check */ + for (p = StartAddr; p < EndAddr; p += uiBlockSize) + { + OSCachedMemCopy(pcReadBuffer, p, uiBlockSize); + + for (j = 0; j < uiBlockSize; j++) + { + if (pcReadBuffer[j] != pcWriteBuffer[j]) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, pcReadBuffer[j], pcWriteBuffer[j])); + eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; + goto ErrorMemTestFailed; + } + } + } + +ErrorMemTestFailed: + OSFreeMem(pcReadBuffer); +ErrorFreeWriteBuffer: + OSFreeMem(pcWriteBuffer); + } + + return eError; +} + +/* Memory test - writes and reads back different patterns to memory and validate the same */ +static PVRSRV_ERROR +MemTestPatterns(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32MappingTable = 0; + PMR *psPMR = NULL; + size_t uiMappedSize, uiPageSize; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + + uiPageSize = OSGetPageSize(); + + /* Allocate PMR with READ | WRITE | WRITE_COMBINE attributes */ + uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE; + + /*Allocate a PMR from given physical heap */ + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + uiPageSize * PHYSMEM_TEST_PAGES, + uiPageSize * PHYSMEM_TEST_PAGES, + 1, + 1, + &ui32MappingTable, + OSGetPageShift(), + uiFlags, + sizeof("PMR PhysMemTest"), + "PMR PhysMemTest", + OSGetCurrentClientProcessIDKM(), + &psPMR, + PDUMP_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemNewRamBackedPMR"); + + /* Check whether allocated PMR can be locked and obtain physical + * addresses of underlying memory pages. + */ + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", ErrorUnrefPMR); + + /* Map the physical page(s) into kernel space, acquire kernel mapping + * for PMR. + */ + eError = PMRAcquireKernelMappingData(psPMR, 0, uiPageSize * PHYSMEM_TEST_PAGES, &pvKernAddr, &uiMappedSize, &hPrivData); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", ErrorUnlockPhysAddresses); + + PVR_ASSERT((uiPageSize * PHYSMEM_TEST_PAGES) == uiMappedSize); + + /* Test various patterns */ + eError = TestPatternU64(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + eError = TestPatternU32(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + eError = TestPatternU16(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + eError = TestPatternU8(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + /* Test split cachelines */ + eError = TestSplitCacheline(pvKernAddr, uiMappedSize); + +ErrorReleaseKernelMappingData: + (void) PMRReleaseKernelMappingData(psPMR, hPrivData); + +ErrorUnlockPhysAddresses: + /* Unlock and Unref the PMR to destroy it, ignore returned value */ + (void) PMRUnlockSysPhysAddresses(psPMR); +ErrorUnrefPMR: + (void) PMRUnrefPMR(psPMR); + + return eError; +} + +static PVRSRV_ERROR +PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_UINT32 ui32Passes) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + + /* PMR validation test */ + eError = PMRValidationTest(psDeviceNode, uiFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR validation test failed!", + __func__)); + return eError; + } + + for (i = 0; i < ui32Passes; i++) + { + /* Mem test */ + eError = MemTestPatterns(psDeviceNode, uiFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: [Pass#%u] MemTestPatterns failed!", + __func__, i)); + break; + } + } + + return eError; +} + +PVRSRV_ERROR +PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = pvDevConfig; + PVRSRV_ERROR eError; + + /* validate memtest passes requested */ + ui32MemTestPasses = (ui32MemTestPasses > PHYSMEM_TEST_PASSES_MAX)? PHYSMEM_TEST_PASSES_MAX : ui32MemTestPasses; + + /* Do minimal initialisation before test */ + eError = PhysMemTestInit(&psDeviceNode, psDevConfig); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Test failed to initialize", __func__)); + return eError; + } + + /* GPU local mem */ + eError = PhysMemTestRun(psDeviceNode, 0, ui32MemTestPasses); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "GPU local memory test failed!")); + goto ErrorPhysMemTestDeinit; + } + + /* CPU local mem */ + eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_CPU_LOCAL, ui32MemTestPasses); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "CPU local memory test failed!")); + goto ErrorPhysMemTestDeinit; + } + + PVR_LOG(("PhysMemTest: Passed.")); + goto PhysMemTestPassed; + +ErrorPhysMemTestDeinit: + PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed.")); +PhysMemTestPassed: + PhysMemTestDeInit(psDeviceNode); + + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.h new file mode 100644 index 000000000000..684c729d0e51 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/physmem_test.h @@ -0,0 +1,51 @@ +/*************************************************************************/ /*! +@Title Physmem test header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for single entry point for testing of page factories +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVSRV_PHYSMEM_TEST_H +#define SRVSRV_PHYSMEM_TEST_H +/* + * PhysMemTest + */ +PVRSRV_ERROR +PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses); + +#endif /* SRVSRV_PHYSMEM_TEST_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pmr_os.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/pmr_os.c new file mode 100644 index 000000000000..0d37a6423090 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pmr_os.c @@ -0,0 +1,640 @@ +/*************************************************************************/ /*! +@File +@Title Linux OS PMR functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#if defined(CONFIG_L4) +#include +#endif +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) +#include +#include +#endif + +#include "img_defs.h" +#include "pvr_debug.h" +#include "allocmem.h" +#include "devicemem_server_utils.h" +#include "pmr.h" +#include "pmr_os.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +#include "mmap_stats.h" +#endif + +#include "kernel_compatibility.h" + +/* + * x86_32: + * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM + * pages with default memory attributes; these HIGHMEM pages are skipped in + * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range(). + * Also vm_insert_page is faster. + * + * x86_64: + * Use vm_insert_page because it is faster. + * + * Other platforms: + * Use remap_pfn_range by default because it does not issue a cache flush. + * It is known that ARM32 benefits from this. When other platforms become + * available it has to be investigated if this assumption holds for them as well. + * + * Since vm_insert_page does more precise memory accounting we have the build + * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug + * feature. + * + */ +#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT) +#define PMR_OS_USE_VM_INSERT_PAGE 1 +#endif + +static void MMapPMROpen(struct vm_area_struct *ps_vma) +{ + PMR *psPMR = ps_vma->vm_private_data; + + /* Our VM flags should ensure this function never gets called */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Unexpected mmap open call, this is probably an application bug.", + __func__)); + PVR_DPF((PVR_DBG_WARNING, + "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p", + __func__, + ps_vma, + ps_vma->vm_start, + ps_vma->vm_end - ps_vma->vm_start, + psPMR)); + + /* In case we get called anyway let's do things right by increasing the refcount and + * locking down the physical addresses. */ + PMRRefPMR(psPMR); + + if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__)); + PMRUnrefPMR(psPMR); + } +} + +static void MMapPMRClose(struct vm_area_struct *ps_vma) +{ + PMR *psPMR = ps_vma->vm_private_data; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + { + uintptr_t vAddr = ps_vma->vm_start; + + while (vAddr < ps_vma->vm_end) + { + /* USER MAPPING */ + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + (IMG_UINT64)vAddr, + OSGetCurrentClientProcessIDKM()); + vAddr += PAGE_SIZE; + } + } +#else + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + ps_vma->vm_end - ps_vma->vm_start, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + PMRUnlockSysPhysAddresses(psPMR); + PMRUnrefPMR(psPMR); +} + +/* + * This vma operation is used to read data from mmap regions. It is called + * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace + * requests and reads from /proc//mem. + */ +static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr, + void *buf, int len, int write) +{ + PMR *psPMR = ps_vma->vm_private_data; + unsigned long ulOffset = addr - ps_vma->vm_start; + size_t uiBytesCopied; + PVRSRV_ERROR eError; + int iRetVal = -EINVAL; + + if (write) + { + eError = PMR_WriteBytes(psPMR, + (IMG_DEVMEM_OFFSET_T) ulOffset, + buf, + len, + &uiBytesCopied); + } + else + { + eError = PMR_ReadBytes(psPMR, + (IMG_DEVMEM_OFFSET_T) ulOffset, + buf, + len, + &uiBytesCopied); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)", + __func__, + write ? "PMR_WriteBytes" : "PMR_ReadBytes", + eError)); + } + else + { + iRetVal = uiBytesCopied; + } + + return iRetVal; +} + +static const struct vm_operations_struct gsMMapOps = +{ + .open = &MMapPMROpen, + .close = &MMapPMRClose, + .access = MMapVAccess, +}; + +static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, + struct vm_area_struct *ps_vma, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_UINT32 uiLog2PageSize, + IMG_BOOL bUseVMInsertPage, + IMG_BOOL bUseMixedMap) +{ + IMG_INT32 iStatus; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t sPFN; +#else + unsigned long uiPFN; +#endif + +#if defined(CONFIG_L4) + size_t size; + IMG_CPU_VIRTADDR pvVAddr; +#if defined(ARM) + struct device *dev = psDevNode->psDevConfig->pvOSDevice; +#endif + + /* In L4 remaps from KM into UM is done via VA */ + pvVAddr = l4x_phys_to_virt(psCpuPAddr->uiAddr); + if (pvVAddr == NULL) + { + return -1; + } + + for (size = 0; size < 1ULL << uiLog2PageSize; size += PAGE_SIZE) + { + /* Fault-in pages now, ensure compiler does not optimise this out */ + *((volatile int*)pvVAddr + size) = *((volatile int*)pvVAddr + size); + } + +#if defined(ARM) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = pfn_to_pfn_t(dma_to_pfn(dev, psCpuPAddr->uiAddr)); +#else + uiPFN = dma_to_pfn(dev, psCpuPAddr->uiAddr); +#endif +#else /* defined(ARM) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = pfn_to_pfn_t(((uintptr_t) pvVAddr) >> PAGE_SHIFT); +#else + uiPFN = ((uintptr_t) pvVAddr) >> PAGE_SHIFT; + PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == (IMG_UINT64)(uintptr_t)pvVAddr); +#endif +#endif + PVR_ASSERT(bUseVMInsertPage == IMG_FALSE); +#else /* defined(CONFIG_L4) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0); +#else + uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT; + PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr); +#endif +#endif + + /* + * vm_insert_page() allows insertion of individual pages into user + * VMA space _only_ if page is a order-zero allocated page + */ + if (bUseVMInsertPage) + { + if (bUseMixedMap) + { + /* + * This path is just for debugging. It should be + * equivalent to the remap_pfn_range() path. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) + vm_fault_t vmf; + + vmf = vmf_insert_mixed(ps_vma, + ps_vma->vm_start + uiOffset, + sPFN); + if (vmf & VM_FAULT_ERROR) + { + iStatus = vm_fault_to_errno(vmf, 0); + } + else + { + iStatus = 0; + } +#else + iStatus = vm_insert_mixed(ps_vma, + ps_vma->vm_start + uiOffset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN); +#else + uiPFN); +#endif +#endif + } + else + { + /* Since kernel 3.7 this sets VM_MIXEDMAP internally */ + iStatus = vm_insert_page(ps_vma, + ps_vma->vm_start + uiOffset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t_to_page(sPFN)); +#else + pfn_to_page(uiPFN)); +#endif + } + } + else + { + /* + NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR() + + The current services mmap model maps in a PMR's full-length size + into the user VMA & applies any user specified offset to the kernel + returned zero-offset based VA in services client; this essentially + means services server ignores ps_vma->vm_pgoff (this houses hPMR) + during a mmap call. + + Furthermore, during a DMA/CMA memory allocation, multiple order-n + pages are used to satisfy an allocation request due to DMA/CMA + framework rounding-up allocation size to next power-of-two which + can lead to wasted memory (so we don't allocate using single call). + + The combination of the above two issues mean that we cannot use the + dma_mmap_coherent() for a number of reasons outlined below: + + - Services mmap semantics does not fit with dma_mmap_coherent() + which requires proper ps_vma->vm_pgoff; seeing this houses a + hPMR handle value, calls into dma_mmap_coherent() fails. This + could be avoided by forcing ps_vma->vm_pgoff to zero but the + ps_vma->vm_pgoff is applied to DMA bus address PFN and not + user VMA which is always mapped at ps_vma->vm_start. + + - As multiple order-n pages are used for DMA/CMA allocations, a + single dma_mmap_coherent() call with a vma->vm_pgoff set to + zero cannot (maybe) be used because there is no guarantee that + all of the multiple order-n pages in the PMR are physically + contiguous from the first entry to the last. Whilst this is + highly likely to be the case, there is no guarantee that it + will be so we cannot depend on this being the case. + + The solution is to manually mmap DMA/CMA pages into user VMA + using remap_pfn_range() directly. Furthermore, accounting is + always compromised for DMA/CMA allocations. + */ + size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize; + + iStatus = remap_pfn_range(ps_vma, + ps_vma->vm_start + uiOffset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t_to_pfn(sPFN), +#else + uiPFN, +#endif + uiNumContiguousBytes, + ps_vma->vm_page_prot); + } + + return iStatus; +} + +PVRSRV_ERROR +OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) +{ + struct vm_area_struct *ps_vma = pOSMMapData; + PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); + PVRSRV_ERROR eError; + size_t uiLength; + IMG_INT32 iStatus; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32CPUCacheFlags; + pgprot_t sPageProt; + IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_UINT32 uiOffsetIdx; + IMG_UINT32 uiNumOfPFNs; + IMG_UINT32 uiLog2PageSize; + IMG_CPU_PHYADDR *psCpuPAddr; + IMG_BOOL *pbValid; + IMG_BOOL bUseMixedMap = IMG_FALSE; + IMG_BOOL bUseVMInsertPage = IMG_FALSE; + + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + goto e0; + } + + if (((ps_vma->vm_flags & VM_WRITE) != 0) && + ((ps_vma->vm_flags & VM_SHARED) == 0)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + + sPageProt = vm_get_page_prot(ps_vma->vm_flags); + + eError = DevmemCPUCacheMode(psDevNode, + PMR_Flags(psPMR), + &ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + goto e0; + } + + switch (ui32CPUCacheFlags) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + sPageProt = pgprot_noncached(sPageProt); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: + sPageProt = pgprot_writecombine(sPageProt); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: + { +/* Do not set to write-combine for plato */ +#if !defined(PLATO_MEMORY_CONFIG) + PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR); + + if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA) + sPageProt = pgprot_writecombine(sPageProt); +#endif + break; + } + + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + ps_vma->vm_page_prot = sPageProt; + + ps_vma->vm_flags |= VM_IO; + + /* Don't include the mapping in core dumps */ + ps_vma->vm_flags |= VM_DONTDUMP; + + /* + * Disable mremap because our nopage handler assumes all + * page requests have already been validated. + */ + ps_vma->vm_flags |= VM_DONTEXPAND; + + /* Don't allow mapping to be inherited across a process fork */ + ps_vma->vm_flags |= VM_DONTCOPY; + + uiLength = ps_vma->vm_end - ps_vma->vm_start; + + /* Is this mmap targeting non order-zero pages or does it use pfn mappings? + * If yes, don't use vm_insert_page */ + uiLog2PageSize = PMR_GetLog2Contiguity(psPMR); + +#if defined(PMR_OS_USE_VM_INSERT_PAGE) + bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM); +#if defined(CONFIG_L4) + /* L4 uses CMA allocations */ + bUseVMInsertPage = IMG_FALSE; +#endif +#endif + + /* Can we use stack allocations */ + uiNumOfPFNs = uiLength >> uiLog2PageSize; + if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr)); + if (psCpuPAddr == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e1; + } + + /* Should allocation fail, clean-up here before exiting */ + pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid)); + if (pbValid == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + OSFreeMem(psCpuPAddr); + goto e2; + } + } + else + { + psCpuPAddr = asCpuPAddr; + pbValid = abValid; + } + + /* Obtain map range pfns */ + eError = PMR_CpuPhysAddr(psPMR, + uiLog2PageSize, + uiNumOfPFNs, + 0, + psCpuPAddr, + pbValid); + if (eError != PVRSRV_OK) + { + goto e3; + } + + /* + * Scan the map range for pfns without struct page* handling. If + * we find one, this is a mixed map, and we can't use vm_insert_page() + * NOTE: vm_insert_page() allows insertion of individual pages into user + * VMA space _only_ if said page is an order-zero allocated page. + */ + if (bUseVMInsertPage) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t sPFN; +#else + unsigned long uiPFN; +#endif + + for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx) + { + if (pbValid[uiOffsetIdx]) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0); + + if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) +#else + uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT; + PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr); + + if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + { + bUseMixedMap = IMG_TRUE; + break; + } + } + } + + if (bUseMixedMap) + { + ps_vma->vm_flags |= VM_MIXEDMAP; + } + } + else + { + ps_vma->vm_flags |= VM_PFNMAP; + } + + /* For each PMR page-size contiguous bytes, map page(s) into user VMA */ + for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<> uiLog2PageSize; + /* + * Only map in pages that are valid, any that aren't will be + * picked up by the nopage handler which will return a zeroed + * page for us. + */ + if (pbValid[uiOffsetIdx]) + { + iStatus = _OSMMapPMR(psDevNode, + ps_vma, + uiOffset, + &psCpuPAddr[uiOffsetIdx], + uiLog2PageSize, + bUseVMInsertPage, + bUseMixedMap); + if (iStatus) + { + /* Failure error code doesn't get propagated */ + eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; + PVR_ASSERT(0); + goto e3; + } + } +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) +#define PMR_OS_BAD_CPUADDR 0x0BAD0BAD + { + IMG_CPU_PHYADDR sPAddr; + sPAddr.uiAddr = pbValid[uiOffsetIdx] ? + psCpuPAddr[uiOffsetIdx].uiAddr : + IMG_CAST_TO_CPUPHYADDR_UINT(PMR_OS_BAD_CPUADDR); + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + (void*)(uintptr_t)(ps_vma->vm_start + uiOffset), + sPAddr, + 1<vm_private_data = psPMR; + + /* Install open and close handlers for ref-counting */ + ps_vma->vm_ops = &gsMMapOps; + + /* + * Take a reference on the PMR so that it can't be freed while mapped + * into the user process. + */ + PMRRefPMR(psPMR); + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + /* record the stats */ + MMapStatsAddOrUpdatePMR(psPMR, uiLength); +#endif + + return PVRSRV_OK; + + /* Error exit paths follow */ +e3: + if (pbValid != abValid) + { + OSFreeMem(pbValid); + } +e2: + if (psCpuPAddr != asCpuPAddr) + { + OSFreeMem(psCpuPAddr); + } +e1: + PMRUnlockSysPhysAddresses(psPMR); +e0: + return eError; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/private_data.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/private_data.h new file mode 100644 index 000000000000..6d63f151bbd1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/private_data.h @@ -0,0 +1,53 @@ +/*************************************************************************/ /*! +@File +@Title Linux private data structure +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__INCLUDED_PRIVATE_DATA_H_) +#define __INCLUDED_PRIVATE_DATA_H_ + +#include + +#include "connection_server.h" + +CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile); +struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection); + +#endif /* !defined(__INCLUDED_PRIVATE_DATA_H_) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.c new file mode 100644 index 000000000000..e7f4d883aaa7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.c @@ -0,0 +1,588 @@ +/*************************************************************************/ /*! +@File +@Title PVR Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include + +#include "img_defs.h" +#include "pvr_bridge.h" +#include "pvr_bridge_k.h" +#include "connection_server.h" +#include "syscommon.h" +#include "pvr_debug.h" +#include "di_server.h" +#include "private_data.h" +#include "linkage.h" +#include "pmr.h" +#include "rgx_bvnc_defs_km.h" +#include "pvrsrv_bridge_init.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#else +#include +#endif + +#include "pvr_drm.h" +#include "pvr_drv.h" + +#include "env_connection.h" +#include +#include + +/* RGX: */ +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif + +#include "srvcore.h" +#include "common_srvcore_bridge.h" + +PVRSRV_ERROR InitDMABUFBridge(void); +PVRSRV_ERROR DeinitDMABUFBridge(void); + +#if defined(MODULE_TEST) +/************************************************************************/ +// additional includes for services testing +/************************************************************************/ +#include "pvr_test_bridge.h" +#include "kern_test.h" +/************************************************************************/ +// end of additional includes +/************************************************************************/ +#endif + +/* The mmap code has its own mutex, to prevent possible re-entrant issues + * when the same PMR is mapped from two different connections/processes. + */ +static DEFINE_MUTEX(g_sMMapMutex); + +#define _DRIVER_SUSPENDED 1 +#define _DRIVER_NOT_SUSPENDED 0 +static ATOMIC_T g_iDriverSuspended; +static ATOMIC_T g_iNumActiveDriverThreads; +static ATOMIC_T g_iNumActiveKernelThreads; +static IMG_HANDLE g_hDriverThreadEventObject; + +#if defined(DEBUG_BRIDGE_KM) +static DI_ENTRY *gpsDIBridgeStatsEntry; + +static void *BridgeStatsDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); + + BridgeGlobalStatsLock(); + + if (psDispatchTable == NULL || *pui64Pos > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + return NULL; + } + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return &(psDispatchTable[*pui64Pos - 1]); +} + +static void BridgeStatsDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); + + BridgeGlobalStatsUnlock(); +} + +static void *BridgeStatsDINext(OSDI_IMPL_ENTRY *psEntry, void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); + IMG_UINT64 uiItemAskedFor = *pui64Pos; /* pui64Pos on entry is the index to return */ + + PVR_UNREFERENCED_PARAMETER(pvData); + + /* Is the item asked for (starts at 0) a valid table index? */ + if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + (*pui64Pos)++; /* on exit it is the next DI index to ask for */ + return &(psDispatchTable[uiItemAskedFor]); + } + + /* Now passed the end of the table to indicate stop */ + return NULL; +} + +static int BridgeStatsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData == DI_START_TOKEN) + { + DIPrintf(psEntry, + "Total ioctl call count = %u\n" + "Total number of bytes copied via copy_from_user = %u\n" + "Total number of bytes copied via copy_to_user = %u\n" + "Total number of bytes copied via copy_*_user = %u\n\n" + "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n", + g_BridgeGlobalStats.ui32IOCTLCount, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + "#", + "Bridge Name", + "Wrapper Function", + "Call Count", + "copy_from_user (B)", + "copy_to_user (B)", + "Total Time (us)", + "Max Time (us)"); + } + else if (pvData != NULL) + { + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psTableEntry = pvData; + IMG_UINT32 ui32Remainder; + + DIPrintf(psEntry, + "%3d: %-60s %-48s %-10u %-20u %-20u %-20" IMG_UINT64_FMTSPEC " %-20" IMG_UINT64_FMTSPEC "\n", + (IMG_UINT32)(((size_t)psTableEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)), + psTableEntry->pszIOCName, + (psTableEntry->pfFunction != NULL) ? psTableEntry->pszFunctionName : "(null)", + psTableEntry->ui32CallCount, + psTableEntry->ui32CopyFromUserTotalBytes, + psTableEntry->ui32CopyToUserTotalBytes, + OSDivide64r64(psTableEntry->ui64TotalTimeNS, 1000, &ui32Remainder), + OSDivide64r64(psTableEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); + } + + return 0; +} + +static IMG_INT64 BridgeStatsWrite(const IMG_CHAR *pcBuffer, + IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, + void *pvData) +{ + IMG_UINT32 i; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[0] == '0', -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + /* Reset stats. */ + + BridgeGlobalStatsLock(); + + g_BridgeGlobalStats.ui32IOCTLCount = 0; + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0; + g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0; + + for (i = 0; i < ARRAY_SIZE(g_BridgeDispatchTable); i++) + { + g_BridgeDispatchTable[i].ui32CallCount = 0; + g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0; + g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0; + g_BridgeDispatchTable[i].ui64TotalTimeNS = 0; + g_BridgeDispatchTable[i].ui64MaxTimeNS = 0; + } + + BridgeGlobalStatsUnlock(); + + return ui64Count; +} + +#endif /* defined(DEBUG_BRIDGE_KM) */ + +PVRSRV_ERROR OSPlatformBridgeInit(void) +{ + PVRSRV_ERROR eError; + + eError = InitDMABUFBridge(); + PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge"); + + OSAtomicWrite(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED); + OSAtomicWrite(&g_iNumActiveDriverThreads, 0); + OSAtomicWrite(&g_iNumActiveKernelThreads, 0); + + eError = OSEventObjectCreate("Global driver thread event object", + &g_hDriverThreadEventObject); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", error_); + +#if defined(DEBUG_BRIDGE_KM) + { + DI_ITERATOR_CB sIter = { + .pfnStart = BridgeStatsDIStart, + .pfnStop = BridgeStatsDIStop, + .pfnNext = BridgeStatsDINext, + .pfnShow = BridgeStatsDIShow, + .pfnWrite = BridgeStatsWrite + }; + + eError = DICreateEntry("bridge_stats", NULL, &sIter, + &g_BridgeDispatchTable[0], + DI_ENTRY_TYPE_GENERIC, + &gpsDIBridgeStatsEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", error_); + } +#endif + + return PVRSRV_OK; + +error_: + if (g_hDriverThreadEventObject) { + OSEventObjectDestroy(g_hDriverThreadEventObject); + g_hDriverThreadEventObject = NULL; + } + + return eError; +} + +PVRSRV_ERROR OSPlatformBridgeDeInit(void) +{ + PVRSRV_ERROR eError; + +#if defined(DEBUG_BRIDGE_KM) + if (gpsDIBridgeStatsEntry != NULL) + { + DIDestroyEntry(gpsDIBridgeStatsEntry); + } +#endif + + eError = DeinitDMABUFBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitDMABUFBridge"); + + if (g_hDriverThreadEventObject != NULL) { + OSEventObjectDestroy(g_hDriverThreadEventObject); + g_hDriverThreadEventObject = NULL; + } + + return eError; +} + +PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hEvent; + + eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); + return eError; + } + + if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED, + _DRIVER_SUSPENDED) == _DRIVER_SUSPENDED) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto out_put; + } + + /* now wait for any threads currently in the server to exit */ + while (OSAtomicRead(&g_iNumActiveDriverThreads) != 0 || + (OSAtomicRead(&g_iNumActiveKernelThreads) != 0 && !bShutdown)) + { + if (OSAtomicRead(&g_iNumActiveDriverThreads) != 0) + { + PVR_LOG(("%s: waiting for user threads (%d)", __func__, + OSAtomicRead(&g_iNumActiveDriverThreads))); + } + if (OSAtomicRead(&g_iNumActiveKernelThreads) != 0) + { + PVR_LOG(("%s: waiting for kernel threads (%d)", __func__, + OSAtomicRead(&g_iNumActiveKernelThreads))); + } + /* Regular wait is called here (and not OSEventObjectWaitKernel) because + * this code is executed by the caller of .suspend/.shutdown callbacks + * which is most likely PM (or other actor responsible for suspend + * process). Because of that this thread shouldn't and most likely + * event cannot be frozen. */ + OSEventObjectWait(hEvent); + } + +out_put: + OSEventObjectClose(hEvent); + + return eError; +} + +PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void) +{ + PVRSRV_ERROR eError; + + /* resume the driver and then signal so any waiting threads wake up */ + if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_SUSPENDED, + _DRIVER_NOT_SUSPENDED) == _DRIVER_NOT_SUSPENDED) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = OSEventObjectSignal(g_hDriverThreadEventObject); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s", + __func__, PVRSRVGetErrorString(eError))); + } + + return eError; +} + +static PVRSRV_ERROR LinuxBridgeSignalIfSuspended(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) + { + PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event" + " object: %s", __func__, PVRSRVGetErrorString(eError))); + } + } + + return eError; +} + +void LinuxBridgeNumActiveKernelThreadsIncrement(void) +{ + OSAtomicIncrement(&g_iNumActiveKernelThreads); +} + +void LinuxBridgeNumActiveKernelThreadsDecrement(void) +{ + OSAtomicDecrement(&g_iNumActiveKernelThreads); + PVR_ASSERT(OSAtomicRead(&g_iNumActiveKernelThreads) >= 0); + + /* Signal on every decrement in case LinuxBridgeBlockClientsAccess() is + * waiting for the threads to freeze. + * (error is logged in called function so ignore, we can't do much with + * it anyway) */ + (void) LinuxBridgeSignalIfSuspended(); +} + +static PVRSRV_ERROR _WaitForDriverUnsuspend(void) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hEvent; + + eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); + return eError; + } + + while (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) + { + /* we should be able to use normal (not kernel) wait here since + * we were just unfrozen and most likely we're not going to + * be frozen again (?) */ + OSEventObjectWait(hEvent); + } + + OSEventObjectClose(hEvent); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVDriverThreadEnter(void) +{ + PVRSRV_ERROR eError; + + /* increment first so there is no race between this value and + * g_iDriverSuspended in LinuxBridgeBlockClientsAccess() */ + OSAtomicIncrement(&g_iNumActiveDriverThreads); + + if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) + { + /* decrement here because the driver is going to be suspended and + * this thread is going to be frozen so we don't want to wait for + * it in LinuxBridgeBlockClientsAccess() */ + OSAtomicDecrement(&g_iNumActiveDriverThreads); + + /* during suspend procedure this will put the current thread to + * the freezer but during shutdown this will just return */ + try_to_freeze(); + + /* if the thread was unfrozen but the flag is not yet set to + * _DRIVER_NOT_SUSPENDED wait for it + * in case this is a shutdown the thread was not frozen so we'll + * wait here indefinitely but this is ok (and this is in fact what + * we want) because no thread should be entering the driver in such + * case */ + eError = _WaitForDriverUnsuspend(); + + /* increment here because that means that the thread entered the + * driver */ + OSAtomicIncrement(&g_iNumActiveDriverThreads); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver" + " unsuspend: %s", __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + } + + return PVRSRV_OK; +} + +void PVRSRVDriverThreadExit(void) +{ + OSAtomicDecrement(&g_iNumActiveDriverThreads); + /* if the driver is being suspended then we need to signal the + * event object as the thread suspending the driver is waiting + * for active threads to exit + * error is logged in called function so ignore returned error + */ + (void) LinuxBridgeSignalIfSuspended(); +} + +int +PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile) +{ + struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg; + PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 }; + CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pDRMFile->filp); + PVRSRV_ERROR error; + + if (psConnection == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __func__)); + return -EFAULT; + } + + PVR_ASSERT(psSrvkmCmd != NULL); + + DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d", + task_tgid_nr(current), + ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner, + psSrvkmCmd->bridge_id, + psSrvkmCmd->bridge_func_id); + + if ((error = PVRSRVDriverThreadEnter()) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", + __func__, + PVRSRVGetErrorString(error))); + goto e0; + } + + sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id; + sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id; + sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM); + sBridgePackageKM.pvParamIn = (void __user *)(uintptr_t)psSrvkmCmd->in_data_ptr; + sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size; + sBridgePackageKM.pvParamOut = (void __user *)(uintptr_t)psSrvkmCmd->out_data_ptr; + sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size; + + error = BridgedDispatchKM(psConnection, &sBridgePackageKM); + + PVRSRVDriverThreadExit(); + +e0: + return OSPVRSRVToNativeError(error); +} + +int +PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma) +{ + CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile); + IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff); + PMR *psPMR; + PVRSRV_ERROR eError; + + if (psConnection == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid connection data")); + return -ENOENT; + } + + /* + * The bridge lock used here to protect PVRSRVLookupHandle is replaced + * by a specific lock considering that the handle functions have now + * their own lock. This change was necessary to solve the lockdep issues + * related with the PVRSRV_MMap. + */ + + eError = PVRSRVLookupHandle(psConnection->psHandleBase, + (void **)&psPMR, + hSecurePMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + goto e0; + } + + mutex_lock(&g_sMMapMutex); + /* Note: PMRMMapPMR will take a reference on the PMR. + * Unref the handle immediately, because we have now done + * the required operation on the PMR (whether it succeeded or not) + */ + eError = PMRMMapPMR(psPMR, ps_vma); + mutex_unlock(&g_sMMapMutex); + PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto e0; + } + + + return 0; + +e0: + + PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError)); + PVR_ASSERT(eError != PVRSRV_OK); + + return -ENOENT; // -EAGAIN // or what? +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.h new file mode 100644 index 000000000000..10680ea0ac49 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_bridge_k.h @@ -0,0 +1,103 @@ +/*************************************************************************/ /*! +@File +@Title PVR Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVR_BRIDGE_K_H_ +#define _PVR_BRIDGE_K_H_ + +#include "pvrsrv_error.h" + +/*! +****************************************************************************** + @Function LinuxBridgeBlockClientsAccess + @Description This function will wait for any existing threads in the Server + to exit and then disable access to the driver. New threads will + not be allowed to enter the Server until the driver is + unsuspended (see LinuxBridgeUnblockClientsAccess). + @Input bShutdown this flag indicates that the function was called + from a shutdown callback and therefore it will + not wait for the kernel threads to get frozen + (because this doesn't happen during shutdown + procedure) + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown); + +/*! +****************************************************************************** + @Function LinuxBridgeUnblockClientsAccess + @Description This function will re-enable the bridge and allow any threads + waiting to enter the Server to continue. + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void); + +void LinuxBridgeNumActiveKernelThreadsIncrement(void); +void LinuxBridgeNumActiveKernelThreadsDecrement(void); + +/*! +****************************************************************************** + @Function PVRSRVDriverThreadEnter + @Description Increments number of client threads currently operating + in the driver's context. + If the driver is currently being suspended this function + will call try_to_freeze() on behalf of the client thread. + When the driver is resumed the function will exit and allow + the thread into the driver. + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVDriverThreadEnter(void); + +/*! +****************************************************************************** + @Function PVRSRVDriverThreadExit + @Description Decrements the number of client threads currently operating + in the driver's context to match the call to + PVRSRVDriverThreadEnter(). + The function also signals the driver that a thread left the + driver context so if it's waiting to suspend it knows that + the number of threads decreased. +******************************************************************************/ +void PVRSRVDriverThreadExit(void); + +#endif /* _PVR_BRIDGE_K_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debug.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debug.c new file mode 100644 index 000000000000..2b2f54c025e2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debug.c @@ -0,0 +1,1860 @@ +/*************************************************************************/ /*! +@File +@Title Debug Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides kernel side Debug Functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "allocmem.h" +#include "pvrversion.h" +#include "img_types.h" +#include "img_defs.h" +#include "servicesext.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "linkage.h" +#include "pvr_uaccess.h" +#include "pvrsrv.h" +#include "lists.h" +#include "osfunc.h" +#include "di_server.h" + +#include "rgx_options.h" + +#if defined(SUPPORT_RGX) +#include "rgxdevice.h" +#include "rgxdebug.h" +#include "rgxinit.h" +#include "rgxfwutils.h" +#include "sofunc_rgx.h" +/* Handle used by DebugFS to get GPU utilisation stats */ +static IMG_HANDLE ghGpuUtilUserDebugFS; +#endif + +#if defined(PVRSRV_NEED_PVR_DPF) + +/******** BUFFERED LOG MESSAGES ********/ + +/* Because we don't want to have to handle CCB wrapping, each buffered + * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means + * there is the same fixed number of messages that can be stored, + * regardless of message length. + */ + +#if defined(PVRSRV_DEBUG_CCB_MAX) + +#define PVRSRV_DEBUG_CCB_MESG_MAX PVR_MAX_DEBUG_MESSAGE_LEN + +#include +#include + +typedef struct +{ + const IMG_CHAR *pszFile; + IMG_INT iLine; + IMG_UINT32 ui32TID; + IMG_UINT32 ui32PID; + IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX]; + struct timeval sTimeVal; +} +PVRSRV_DEBUG_CCB; + +static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX]; + +static IMG_UINT giOffset; + +/* protects access to gsDebugCCB */ +static DEFINE_SPINLOCK(gsDebugCCBLock); + +static void +AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, + const IMG_CHAR *szBuffer) +{ + unsigned long uiFlags; + + spin_lock_irqsave(&gsDebugCCBLock, uiFlags); + + gsDebugCCB[giOffset].pszFile = pszFileName; + gsDebugCCB[giOffset].iLine = ui32Line; + gsDebugCCB[giOffset].ui32TID = current->pid; + gsDebugCCB[giOffset].ui32PID = current->tgid; + + do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal); + + OSStringLCopy(gsDebugCCB[giOffset].pcMesg, szBuffer, + PVRSRV_DEBUG_CCB_MESG_MAX); + + giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX; + + spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); +} + +void PVRSRVDebugPrintfDumpCCB(void) +{ + int i; + unsigned long uiFlags; + + spin_lock_irqsave(&gsDebugCCBLock, uiFlags); + + for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++) + { + PVRSRV_DEBUG_CCB *psDebugCCBEntry = + &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX]; + + /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */ + if (!psDebugCCBEntry->pszFile) + { + continue; + } + + printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n", + psDebugCCBEntry->pszFile, + psDebugCCBEntry->iLine, + (long)psDebugCCBEntry->sTimeVal.tv_sec, + (long)psDebugCCBEntry->sTimeVal.tv_usec, + psDebugCCBEntry->ui32TID, + psDebugCCBEntry->ui32PID, + psDebugCCBEntry->pcMesg); + + /* Clear this entry so it doesn't get printed the next time again. */ + psDebugCCBEntry->pszFile = NULL; + } + + spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); +} + +#else /* defined(PVRSRV_DEBUG_CCB_MAX) */ + +static INLINE void +AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, + const IMG_CHAR *szBuffer) +{ + (void)pszFileName; + (void)szBuffer; + (void)ui32Line; +} + +void PVRSRVDebugPrintfDumpCCB(void) +{ + /* Not available */ +} + +#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ + +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +#if defined(PVRSRV_NEED_PVR_DPF) + +#define PVR_MAX_FILEPATH_LEN 256 + +#if !defined(PVR_TESTING_UTILS) +static +#endif +IMG_UINT32 gPVRDebugLevel = + ( + DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | 0x8 | 0x20 | 0x100 + +#if defined(PVRSRV_DEBUG_CCB_MAX) + | DBGPRIV_BUFFERED +#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ + +#if defined(PVR_DPF_ADHOC_DEBUG_ON) + | DBGPRIV_DEBUG +#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */ + ); + +module_param(gPVRDebugLevel, uint, 0644); +MODULE_PARM_DESC(gPVRDebugLevel, + "Sets the level of debug output (default 0x7)"); + +#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */ + +#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN + +/* Message buffer for messages */ +static IMG_CHAR gszBuffer[PVR_MAX_MSG_LEN + 1]; + +/* The lock is used to control access to gszBuffer */ +static DEFINE_SPINLOCK(gsDebugLock); + +/* + * Append a string to a buffer using formatted conversion. + * The function takes a variable number of arguments, pointed + * to by the var args list. + */ +__printf(3, 0) +static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs) +{ + IMG_UINT32 ui32Used; + IMG_UINT32 ui32Space; + IMG_INT32 i32Len; + + ui32Used = OSStringLength(pszBuf); + BUG_ON(ui32Used >= ui32BufSiz); + ui32Space = ui32BufSiz - ui32Used; + + i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs); + pszBuf[ui32BufSiz - 1] = 0; + + /* Return true if string was truncated */ + return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space; +} + +/*************************************************************************/ /*! +@Function PVRSRVReleasePrintf +@Description To output an important message to the user in release builds +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) +{ + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf = gszBuffer; + IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); + IMG_INT32 result; + + va_start(vaArgs, pszFormat); + + spin_lock_irqsave(&gsDebugLock, ulLockFlags); + + result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K: %u: ", current->pid); + PVR_ASSERT(result>0); + ui32BufSiz -= result; + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_INFO "%s (truncated)\n", pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + + spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); + va_end(vaArgs); +} + +#if defined(PVRSRV_NEED_PVR_TRACE) + +/*************************************************************************/ /*! +@Function PVRTrace +@Description To output a debug message to the user +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +void PVRSRVTrace(const IMG_CHAR *pszFormat, ...) +{ + va_list VArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf = gszBuffer; + IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); + IMG_INT32 result; + + va_start(VArgs, pszFormat); + + spin_lock_irqsave(&gsDebugLock, ulLockFlags); + + result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid); + PVR_ASSERT(result>0); + ui32BufSiz -= result; + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs)) + { + printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + printk(KERN_ERR "%s\n", pszBuf); + } + + spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); + + va_end(VArgs); +} + +#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ + +#if defined(PVRSRV_NEED_PVR_DPF) + +/* + * Append a string to a buffer using formatted conversion. + * The function takes a variable number of arguments, calling + * VBAppend to do the actual work. + */ +__printf(3, 4) +static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...) +{ + va_list VArgs; + IMG_BOOL bTrunc; + + va_start (VArgs, pszFormat); + + bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs); + + va_end (VArgs); + + return bTrunc; +} + +/*************************************************************************/ /*! +@Function PVRSRVDebugPrintf +@Description To output a debug message to the user +@Input uDebugLevel The current debug level +@Input pszFile The source file generating the message +@Input uLine The line of the source file +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, + const IMG_CHAR *pszFullFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR *pszFormat, + ...) +{ + const IMG_CHAR *pszFileName = pszFullFileName; + IMG_CHAR *pszLeafName; + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf = gszBuffer; + IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); + + if (!(gPVRDebugLevel & ui32DebugLevel)) + { + return; + } + + va_start(vaArgs, pszFormat); + + spin_lock_irqsave(&gsDebugLock, ulLockFlags); + + switch (ui32DebugLevel) + { + case DBGPRIV_FATAL: + { + OSStringLCopy(pszBuf, "PVR_K:(Fatal): ", ui32BufSiz); + break; + } + case DBGPRIV_ERROR: + { + OSStringLCopy(pszBuf, "PVR_K:(Error): ", ui32BufSiz); + break; + } + case DBGPRIV_WARNING: + { + OSStringLCopy(pszBuf, "PVR_K:(Warn): ", ui32BufSiz); + break; + } + case DBGPRIV_MESSAGE: + { + OSStringLCopy(pszBuf, "PVR_K:(Mesg): ", ui32BufSiz); + break; + } + case DBGPRIV_VERBOSE: + { + OSStringLCopy(pszBuf, "PVR_K:(Verb): ", ui32BufSiz); + break; + } + case DBGPRIV_DEBUG: + { + OSStringLCopy(pszBuf, "PVR_K:(Debug): ", ui32BufSiz); + break; + } + case DBGPRIV_CALLTRACE: + case DBGPRIV_ALLOC: + case DBGPRIV_BUFFERED: + default: + { + OSStringLCopy(pszBuf, "PVR_K: ", ui32BufSiz); + break; + } + } + + if (current->pid == task_tgid_nr(current)) + { + (void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid); + } + else + { + (void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */); + } + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_ERR "%s (truncated)\n", pszBuf); + } + else + { + IMG_BOOL bTruncated = IMG_FALSE; + +#if !defined(__sh__) + pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/'); + + if (pszLeafName) + { + pszFileName = pszLeafName+1; + } +#endif /* __sh__ */ + +#if defined(DEBUG) + { + static const IMG_CHAR *lastFile; + + if (lastFile == pszFileName) + { + bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); + } + else + { + bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line); + lastFile = pszFileName; + } + } +#else + bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); +#endif + + if (bTruncated) + { + printk(KERN_ERR "%s (truncated)\n", pszBuf); + } + else + { + if (ui32DebugLevel & DBGPRIV_BUFFERED) + { + AddToBufferCCB(pszFileName, ui32Line, pszBuf); + } + else + { + printk(KERN_ERR "%s\n", pszBuf); + } + } + } + + spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); + + va_end (vaArgs); +} + +#endif /* PVRSRV_NEED_PVR_DPF */ + + +/*************************************************************************/ /*! + Version DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, + va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_VersionStartOp(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (psPVRSRVData == NULL) { + PVR_DPF((PVR_DBG_ERROR, "psPVRSRVData = NULL")); + return NULL; + } + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugVersionCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _VersionStopOp(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvPriv); +} + +static void *_VersionNextOp(OSDI_IMPL_ENTRY *psEntry,void *pvPriv, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugVersionCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +#define DI_PRINT_VERSION_FMTSPEC "%s Version: %u.%u @ %u (%s) build options: 0x%08x %s\n" +#define STR_DEBUG "debug" +#define STR_RELEASE "release" + +#if !defined(PVR_ARCH_NAME) +#define PVR_ARCH_NAME "Unknown" +#endif + +static int _VersionShowOp(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + + if (pvPriv == DI_START_TOKEN) + { + if (psPVRSRVData->sDriverInfo.bIsNoMatch) + { + const BUILD_INFO *psBuildInfo; + + psBuildInfo = &psPVRSRVData->sDriverInfo.sUMBuildInfo; + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "UM Driver", + PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), + PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), + psBuildInfo->ui32BuildRevision, + (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? STR_DEBUG : STR_RELEASE, + psBuildInfo->ui32BuildOptions, + PVR_BUILD_DIR); + + psBuildInfo = &psPVRSRVData->sDriverInfo.sKMBuildInfo; + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "KM Driver (" PVR_ARCH_NAME ")", + PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), + PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), + psBuildInfo->ui32BuildRevision, + (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? STR_DEBUG : STR_RELEASE, + psBuildInfo->ui32BuildOptions, + PVR_BUILD_DIR); + } + else + { + /* bIsNoMatch is `false` in one of the following cases: + * - UM & KM version parameters actually match. + * - A comparison between UM & KM has not been made yet, because no + * client ever connected. + * + * In both cases, available (KM) version info is the best output we + * can provide. + */ + DIPrintf(psEntry, "Driver Version: %s (%s) (%s) build options: " + "0x%08lx %s\n", PVRVERSION_STRING, PVR_ARCH_NAME, + PVR_BUILD_TYPE, RGX_BUILD_OPTIONS_KM, PVR_BUILD_DIR); + } + } + else if (pvPriv != NULL) + { + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) pvPriv; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; +#endif + IMG_BOOL bFwVersionInfoPrinted = IMG_FALSE; + + DIPrintf(psEntry, "\nDevice Name: %s\n", psDevConfig->pszName); + + if (psDevConfig->pszVersion) + { + DIPrintf(psEntry, "Device Version: %s\n", + psDevConfig->pszVersion); + } + + if (psDevNode->pfnDeviceVersionString) + { + IMG_CHAR *pszDeviceVersionString; + + if (psDevNode->pfnDeviceVersionString(psDevNode, &pszDeviceVersionString) == PVRSRV_OK) + { + DIPrintf(psEntry, "%s\n", pszDeviceVersionString); + + OSFreeMem(pszDeviceVersionString); + } + } +#if defined(SUPPORT_RGX) + /* print device's firmware version info */ + if (psDevInfo->psRGXFWIfOsInitMemDesc != NULL) + { + /* psDevInfo->psRGXFWIfOsInitMemDesc should be permanently mapped */ + if (psDevInfo->psRGXFWIfOsInit != NULL) + { + if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) + { + const RGXFWIF_COMPCHECKS *psRGXCompChecks = + &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks; + + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "Firmware", + PVRVERSION_UNPACK_MAJ(psRGXCompChecks->ui32DDKVersion), + PVRVERSION_UNPACK_MIN(psRGXCompChecks->ui32DDKVersion), + psRGXCompChecks->ui32DDKBuild, + ((psRGXCompChecks->ui32BuildOptions & OPTIONS_DEBUG_MASK) ? + STR_DEBUG : STR_RELEASE), + psRGXCompChecks->ui32BuildOptions, + PVR_BUILD_DIR); + bFwVersionInfoPrinted = IMG_TRUE; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error acquiring CPU virtual " + "address of FWInitMemDesc", __func__)); + } + } +#endif + + if (!bFwVersionInfoPrinted) + { + DIPrintf(psEntry, "Firmware Version: Info unavailable %s\n", +#if defined(NO_HARDWARE) + "on NoHW driver" +#else + "(Is INIT complete?)" +#endif + ); + } + } + + return 0; +} + +#if defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) +/*************************************************************************/ /*! + Power data DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugPowerDataCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, + va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64; + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*puiCurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_DebugPowerDataDIStart(OSDI_IMPL_ENTRY *psEntry, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 0; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugPowerDataCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugPowerDataDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugPowerDataDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 0; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugPowerDataCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static PVRSRV_ERROR SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode, + RGXFWIF_COUNTER_DUMP_REQUEST eRequestType, + IMG_UINT32 *pui32kCCBCommandSlot) +{ + PVRSRV_ERROR eError; + + RGXFWIF_KCCB_CMD sCounterDumpCmd; + + sCounterDumpCmd.eCmdType = RGXFWIF_KCCB_CMD_COUNTER_DUMP; + sCounterDumpCmd.uCmdData.sCounterDumpConfigData.eCounterDumpRequest = eRequestType; + + eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sCounterDumpCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + pui32kCCBCommandSlot); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SendPowerCounterCommand: RGXScheduleCommandAndGetKCCBSlot failed. Error:%u", eError)); + } + + return eError; +} + +static void *_IsDevNodeNotInitialised(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + return psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE ? NULL : psDeviceNode; +} + +static void _SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode, + va_list va) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32kCCBCommandSlot; + + OSLockAcquire(psDevInfo->hCounterDumpingLock); + + SendPowerCounterCommand(psDeviceNode, va_arg(va, RGXFWIF_COUNTER_DUMP_REQUEST), &ui32kCCBCommandSlot); + + OSLockRelease(psDevInfo->hCounterDumpingLock); +} + +static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (pvData != NULL) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised when power counter data was requested!")); + return -EIO; + } + + OSLockAcquire(psDevInfo->hCounterDumpingLock); + + eError = SendPowerCounterCommand(psDeviceNode, RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, &ui32kCCBCommandSlot); + + if (eError != PVRSRV_OK) + { + OSLockRelease(psDevInfo->hCounterDumpingLock); + return -EIO; + } + + /* Wait for FW complete completion */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXWaitForKCCBSlotUpdate failed (%u)", + __func__, + eError)); + OSLockRelease(psDevInfo->hCounterDumpingLock); + return -EIO; + } + + /* Read back the buffer */ + { + IMG_UINT32* pui32PowerBuffer; + IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod; + IMG_UINT32 i, j; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc, (void**)&pui32PowerBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire buffer memory mapping (%u)", + __func__, + eError)); + OSLockRelease(psDevInfo->hCounterDumpingLock); + return -EIO; + } + + ui32NumOfRegs = *pui32PowerBuffer++; + ui32SamplePeriod = *pui32PowerBuffer++; + + if (ui32NumOfRegs) + { + DIPrintf(psEntry, "Power counter data for device id: %d\n", psDeviceNode->sDevId.i32UMIdentifier); + DIPrintf(psEntry, "Sample period: 0x%08x\n", ui32SamplePeriod); + + for (i = 0; i < ui32NumOfRegs; i++) + { + IMG_UINT32 ui32High, ui32Low; + IMG_UINT32 ui32RegOffset = *pui32PowerBuffer++; + IMG_UINT32 ui32NumOfInstances = *pui32PowerBuffer++; + + PVR_ASSERT(ui32NumOfInstances); + + DIPrintf(psEntry, "0x%08x:", ui32RegOffset); + + for (j = 0; j < ui32NumOfInstances; j++) + { + ui32Low = *pui32PowerBuffer++; + ui32High = *pui32PowerBuffer++; + + DIPrintf(psEntry, " 0x%016llx", (IMG_UINT64)ui32Low | (IMG_UINT64)ui32High << 32); + } + + DIPrintf(psEntry, "\n"); + } + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psCounterBufferMemDesc); + } + + OSLockRelease(psDevInfo->hCounterDumpingLock); + } + + return eError; +} + +static IMG_INT64 PowerDataSet(const IMG_CHAR __user *pcBuffer, + IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, + void *pvData) +{ + PVRSRV_DATA* psPVRSRVData = (PVRSRV_DATA*) pvData; + RGXFWIF_COUNTER_DUMP_REQUEST eRequest; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + if (List_PVRSRV_DEVICE_NODE_Any(psPVRSRVData->psDeviceNodeList, + _IsDevNodeNotInitialised)) + { + PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised when " + "power counter data was requested!")); + return -EIO; + } + + if (pcBuffer[0] == '1') + { + eRequest = RGXFWIF_PWR_COUNTER_DUMP_START; + } + else if (pcBuffer[0] == '0') + { + eRequest = RGXFWIF_PWR_COUNTER_DUMP_STOP; + } + else + { + return -EINVAL; + } + + List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, + _SendPowerCounterCommand, eRequest); + + *pui64Pos += ui64Count; + return ui64Count; +} + +#endif /* SUPPORT_RGX && SUPPORT_POWER_SAMPLING_VIA_DEBUGFS*/ +/*************************************************************************/ /*! + Status DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, + va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_DebugStatusDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugStatusCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugStatusDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugStatusDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugStatusCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData == DI_START_TOKEN) + { + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + + if (psPVRSRVData != NULL) + { + switch (psPVRSRVData->eServicesState) + { + case PVRSRV_SERVICES_STATE_OK: + DIPrintf(psEntry, "Driver Status: OK\n"); + break; + case PVRSRV_SERVICES_STATE_BAD: + DIPrintf(psEntry, "Driver Status: BAD\n"); + break; + case PVRSRV_SERVICES_STATE_UNDEFINED: + DIPrintf(psEntry, "Driver Status: UNDEFINED\n"); + break; + default: + DIPrintf(psEntry, "Driver Status: UNKNOWN (%d)\n", psPVRSRVData->eServicesState); + break; + } + } + } + else if (pvData != NULL) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + IMG_CHAR *pszStatus = ""; + IMG_CHAR *pszReason = ""; + PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus; + PVRSRV_DEVICE_HEALTH_REASON eHealthReason; + + /* Update the health status now if possible... */ + if (psDeviceNode->pfnUpdateHealthStatus) + { + psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE); + } + eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); + eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason); + + switch (eHealthStatus) + { + case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszStatus = "OK"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszStatus = "NOT RESPONDING"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszStatus = "DEAD"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszStatus = "FAULT"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszStatus = "UNDEFINED"; break; + default: pszStatus = "UNKNOWN"; break; + } + + switch (eHealthReason) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " (Asserted)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " (Poll failing)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " (Global Event Object timeouts rising)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " (KCCB offset invalid)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " (KCCB stalled)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " (Idling)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " (Restarting)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " (Missing interrupts)"; break; + default: pszReason = " (Unknown reason)"; break; + } + + DIPrintf(psEntry, "Firmware Status: %s%s\n", pszStatus, pszReason); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* + * Guest drivers do not support the following functionality: + * - Perform actual on-chip fw tracing. + * - Collect actual on-chip GPU utilization stats. + * - Perform actual on-chip GPU power/dvfs management. + * - As a result no more information can be provided. + */ + return 0; + } + + /* Write other useful stats to aid the test cycle... */ + if (psDeviceNode->pvDevice != NULL) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_HWRINFOBUF *psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Calculate the number of HWR events in total across all the DMs... */ + if (psHWRInfoBuf != NULL) + { + IMG_UINT32 ui32HWREventCount = 0; + IMG_UINT32 ui32CRREventCount = 0; + IMG_UINT32 ui32DMIndex; + + for (ui32DMIndex = 0; ui32DMIndex < RGXFWIF_DM_MAX; ui32DMIndex++) + { + ui32HWREventCount += psHWRInfoBuf->aui32HwrDmLockedUpCount[ui32DMIndex]; + ui32CRREventCount += psHWRInfoBuf->aui32HwrDmOverranCount[ui32DMIndex]; + } + + DIPrintf(psEntry, "HWR Event Count: %d\n", ui32HWREventCount); + DIPrintf(psEntry, "CRR Event Count: %d\n", ui32CRREventCount); +#if defined(PVRSRV_STALLED_CCB_ACTION) + /* Write the number of Sync Lockup Recovery (SLR) events... */ + DIPrintf(psEntry, "SLR Event Count: %d\n", psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested); +#endif + } + + if (psFwSysData != NULL) + { + DIPrintf(psEntry, "FWF Event Count: %d\n", psFwSysData->ui32FWFaults); + } + + /* Write the number of APM events... */ + DIPrintf(psEntry, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal); + + /* Write the current GPU Utilisation values... */ + if (psDevInfo->pfnGetGpuUtilStats && + eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) + { + RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; + PVRSRV_ERROR eError = PVRSRV_OK; + + eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode, + ghGpuUtilUserDebugFS, + &sGpuUtilStats); + + if ((eError == PVRSRV_OK) && + ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative)) + { + IMG_UINT64 util; + IMG_UINT32 rem; + + util = 100 * sGpuUtilStats.ui64GpuStatActive; + util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem); + + DIPrintf(psEntry, "GPU Utilisation: %u%%\n", (IMG_UINT32)util); + } + else + { + DIPrintf(psEntry, "GPU Utilisation: -\n"); + } + } +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + /* Show the detected #LISR, #MISR scheduled calls */ + DIPrintf(psEntry, "RGX #LISR: %llu\n", psDeviceNode->ui64nLISR); + DIPrintf(psEntry, "RGX #MISR: %llu\n", psDeviceNode->ui64nMISR); +#endif +#endif + } + } + + return 0; +} + +static IMG_INT64 DebugStatusSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[0] == 'k' || pcBuffer[0] == 'K', -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD; + + *pui64Pos += ui64Count; + return ui64Count; +} + +/*************************************************************************/ /*! + Dump Debug DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugDumpDebugCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_DebugDumpDebugDIStart(OSDI_IMPL_ENTRY *psEntry, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugDumpDebugCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugDumpDebugDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugDumpDebugDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugDumpDebugCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DumpDebugDIPrintf(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...) +{ + OSDI_IMPL_ENTRY *psEntry = pvDumpDebugFile; + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list ArgList; + + va_start(ArgList, pszFormat); + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList); + va_end(ArgList); + DIPrintf(psEntry, "%s\n", szBuffer); +} + +static int _DebugDumpDebugDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData != NULL && pvData != DI_START_TOKEN) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + + if (psDeviceNode->pvDevice != NULL) + { + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, + _DumpDebugDIPrintf, psEntry); + } + } + + return 0; +} + +#if defined(SUPPORT_RGX) +/*************************************************************************/ /*! + Firmware Trace DebugFS entry +*/ /**************************************************************************/ +static void *_DebugFWTraceCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_DebugFWTraceDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugFWTraceCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugFWTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugFWTraceDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugFWTraceCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _FWTraceDIPrintf(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...) +{ + OSDI_IMPL_ENTRY *psEntry = pvDumpDebugFile; + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list ArgList; + + va_start(ArgList, pszFormat); + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList); + va_end(ArgList); + DIPrintf(psEntry, "%s\n", szBuffer); +} + +static int _DebugFWTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData != NULL && pvData != DI_START_TOKEN) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + + if (psDeviceNode->pvDevice != NULL) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + RGXDumpFirmwareTrace(_FWTraceDIPrintf, psEntry, psDevInfo); + } + } + + return 0; +} + +#if defined(SUPPORT_FIRMWARE_GCOV) + +static PVRSRV_RGXDEV_INFO *getPsDevInfo(OSDI_IMPL_ENTRY *psEntry) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + + if (psPVRSRVData != NULL) + { + if (psPVRSRVData->psDeviceNodeList != NULL) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psPVRSRVData->psDeviceNodeList->pvDevice; + return psDevInfo; + } + } + return NULL; +} + +static void *_FirmwareGcovDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psEntry); + + if (psDevInfo != NULL) + { + if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) + { + void *pvCpuVirtAddr; + DevmemAcquireCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc, &pvCpuVirtAddr); + return *pui64Pos ? NULL : pvCpuVirtAddr; + } + } + + return NULL; +} + +static void _FirmwareGcovDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psEntry); + + PVR_UNREFERENCED_PARAMETER(pvData); + + if (psDevInfo != NULL) + { + if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc); + } + } +} + +static void *_FirmwareGcovDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); + PVR_UNREFERENCED_PARAMETER(pui64Pos); + return NULL; +} + +static int _FirmwareGcovDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psEntry); + + if (psDevInfo != NULL) + { + DIWrite(psEntry, pvData, psDevInfo->ui32FirmwareGcovSize); + } + return 0; +} + +#endif /* defined(SUPPORT_FIRMWARE_GCOV) */ + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +/*************************************************************************/ /*! + Power monitoring DebugFS entry +*/ /**************************************************************************/ +static void *_PowMonCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_PowMonTraceDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _PowMonCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _PowMonTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_PowMonTraceDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _PowMonCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _PowMonTraceDIPrintf(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...) +{ + OSDI_IMPL_ENTRY *psEntry = pvDumpDebugFile; + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list ArgList; + + va_start(ArgList, pszFormat); + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList); + va_end(ArgList); + DIPrintf(psEntry, "%s\n", szBuffer); +} + +static int _PowMonTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData != NULL && pvData != DI_START_TOKEN) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + + if (psDeviceNode->pvDevice != NULL) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + RGXDumpPowerMonitoring(_PowMonTraceDIPrintf, psEntry, psDevInfo); + } + } + + return 0; +} +#endif /* defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) */ + +#if defined(SUPPORT_VALIDATION) +/*************************************************************************/ /*! + RGX Registers Dump DebugFS entry +*/ /**************************************************************************/ +static IMG_INT64 _RgxRegsSeek(IMG_UINT64 ui64Offset, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_LOG_RETURN_IF_FALSE(psPVRSRVData != NULL, "psPVRSRVData is NULL", -1); + + psDevInfo = psPVRSRVData->psDeviceNodeList->pvDevice; + + PVR_LOG_RETURN_IF_FALSE(ui64Offset <= (psDevInfo->ui32RegSize - 4), + "register offset is too big", -1); + + return ui64Offset; +} + +static IMG_INT64 _RgxRegsRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_UINT64 uiRegVal = 0x00; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegs; + + PVR_LOG_RETURN_IF_FALSE(psPVRSRVData != NULL, + "psPVRSRVData is NULL", -ENXIO); + PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, + "wrong RGX register size", -EIO); + PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), + "register read offset isn't aligned", -EINVAL); + + psDevInfo = psPVRSRVData->psDeviceNodeList->pvDevice; + pvRegs = psDevInfo->pvRegsBaseKM; + + uiRegVal = ui64Count == 4 ? + OSReadHWReg32(pvRegs, *pui64Pos) : OSReadHWReg64(pvRegs, *pui64Pos); + + OSCachedMemCopy(pcBuffer, &uiRegVal, ui64Count); + + return ui64Count; +} + +static IMG_INT64 _RgxRegsWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo; + + /* ignore the '\0' character */ + ui64Count -= 1; + + PVR_LOG_RETURN_IF_FALSE(psPVRSRVData != NULL, + "psPVRSRVData == NULL", -ENXIO); + PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, + "wrong RGX register size", -EIO); + PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), + "register read offset isn't aligned", -EINVAL); + + psDevInfo = psPVRSRVData->psDeviceNodeList->pvDevice; + + if (ui64Count == 4) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos, + *((IMG_UINT32 *) (void *) pcBuffer)); + } + else + { + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos, + *((IMG_UINT64 *) (void *) pcBuffer)); + } + + return ui64Count; +} +#endif /* defined(SUPPORT_VALIDATION) && !defined(NO_HARDWARE) */ + +#endif /* defined(SUPPORT_RGX) */ +/*************************************************************************/ /*! + Debug level DebugFS entry +*/ /**************************************************************************/ + +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) +static void *DebugLevelDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + if (*pui64Pos == 0) + { + return DIGetPrivData(psEntry); + } + + return NULL; +} + +static void DebugLevelDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *DebugLevelDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); + PVR_UNREFERENCED_PARAMETER(pui64Pos); + + return NULL; +} + +static int DebugLevelDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData != NULL) + { + IMG_UINT32 uiDebugLevel = *((IMG_UINT32 *)pvData); + + DIPrintf(psEntry, "%u\n", uiDebugLevel); + + return 0; + } + + return -EINVAL; +} + +static IMG_INT64 DebugLevelSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + IMG_UINT32 *uiDebugLevel = pvData; + const IMG_UINT uiMaxBufferSize = 6; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + if (sscanf(pcBuffer, "%u", &gPVRDebugLevel) == 0) + { + return -EINVAL; + } + + /* As this is Linux the next line uses a GCC builtin function */ + (*uiDebugLevel) &= (1 << __builtin_ffsl(DBGPRIV_LAST)) - 1; + + *pui64Pos += ui64Count; + return ui64Count; +} +#endif /* defined(DEBUG) */ + +static DI_ENTRY *gpsVersionDIEntry; + +static DI_ENTRY *gpsStatusDIEntry; +static DI_ENTRY *gpsDumpDebugDIEntry; + +#if defined(SUPPORT_RGX) +static DI_ENTRY *gpsFWTraceDIEntry; +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +static DI_ENTRY *gpsPowMonDIEntry; +#endif +#if defined(SUPPORT_FIRMWARE_GCOV) +static DI_ENTRY *gpsFirmwareGcovDIEntry; +#endif +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) +static DI_ENTRY *gpsPowerDataDIEntry; +#endif +#if defined(SUPPORT_VALIDATION) +static DI_ENTRY *gpsRGXRegsDIEntry; +#endif +#endif + +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) +static DI_ENTRY *gpsDebugLevelDIEntry; +#endif + +int PVRDebugCreateDIEntries(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError; + + PVR_ASSERT(psPVRSRVData != NULL); + + /* + * The DebugFS entries are designed to work in a single device system but + * this function will be called multiple times in a multi-device system. + * Return an error in this case. + */ + if (gpsVersionDIEntry) + { + return -EEXIST; + } + +#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) + if (SORgxGpuUtilStatsRegister(&ghGpuUtilUserDebugFS) != PVRSRV_OK) + { + return -ENOMEM; + } +#endif + + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _VersionStartOp, + .pfnStop = _VersionStopOp, + .pfnNext = _VersionNextOp, + .pfnShow = _VersionShowOp + }; + + eError = DICreateEntry("version", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsVersionDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } + + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugStatusDIStart, + .pfnStop = _DebugStatusDIStop, + .pfnNext = _DebugStatusDINext, + .pfnShow = _DebugStatusDIShow, + .pfnWrite = DebugStatusSet + }; + eError = DICreateEntry("status", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsStatusDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } + + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugDumpDebugDIStart, + .pfnStop = _DebugDumpDebugDIStop, + .pfnNext = _DebugDumpDebugDINext, + .pfnShow = _DebugDumpDebugDIShow + }; + eError = DICreateEntry("debug_dump", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsDumpDebugDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } + +#if defined(SUPPORT_RGX) + if (! PVRSRV_VZ_MODE_IS(GUEST)) + { + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugFWTraceDIStart, + .pfnStop = _DebugFWTraceDIStop, + .pfnNext = _DebugFWTraceDINext, + .pfnShow = _DebugFWTraceDIShow + }; + eError = DICreateEntry("firmware_trace", NULL, &sIterator, + psPVRSRVData, DI_ENTRY_TYPE_GENERIC, + &gpsFWTraceDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _PowMonTraceDIStart, + .pfnStop = _PowMonTraceDIStop, + .pfnNext = _PowMonTraceDINext, + .pfnShow = _PowMonTraceDIShow + }; + eError = DICreateEntry("power_mon", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsPowMonDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } +#endif + } + +#if defined(SUPPORT_FIRMWARE_GCOV) + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _FirmwareGcovDIStart, + .pfnStop = _FirmwareGcovDIStop, + .pfnNext = _FirmwareGcovDINext, + .pfnShow = _FirmwareGcovDIShow + }; + + eError = DICreateEntry("firmware_gcov", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsFirmwareGcovDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } +#endif + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugPowerDataDIStart, + .pfnStop = _DebugPowerDataDIStop, + .pfnNext = _DebugPowerDataDINext, + .pfnShow = _DebugPowerDataDIShow, + .pfnWrite = PowerDataSet + }; + eError = DICreateEntry("power_data", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsPowerDataDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } +#endif + +#if defined(SUPPORT_VALIDATION) + { + DI_ITERATOR_CB sIterator = { + .pfnSeek = _RgxRegsSeek, + .pfnRead = _RgxRegsRead, + .pfnWrite = _RgxRegsWrite + }; + eError = DICreateEntry("rgxregs", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_RANDOM_ACCESS, &gpsRGXRegsDIEntry); + + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } +#endif + +#endif + +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) + { + DI_ITERATOR_CB sIterator = { + .pfnStart = DebugLevelDIStart, + .pfnStop = DebugLevelDIStop, + .pfnNext = DebugLevelDINext, + .pfnShow = DebugLevelDIShow, + .pfnWrite = DebugLevelSet + }; + eError = DICreateEntry("debug_level", NULL, &sIterator, &gPVRDebugLevel, + DI_ENTRY_TYPE_GENERIC, &gpsDebugLevelDIEntry); + PVR_GOTO_IF_ERROR(eError, PVRDebugCreateDIEntriesErrorExit); + } +#endif + + return 0; + +PVRDebugCreateDIEntriesErrorExit: + PVRDebugRemoveDIEntries(); + + return -EFAULT; +} + +void PVRDebugRemoveDIEntries(void) +{ +#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) + if (ghGpuUtilUserDebugFS != NULL) + { + SORgxGpuUtilStatsUnregister(ghGpuUtilUserDebugFS); + ghGpuUtilUserDebugFS = NULL; + } +#endif + +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) + if (gpsDebugLevelDIEntry != NULL) + { + DIDestroyEntry(gpsDebugLevelDIEntry); + } +#endif + +#if defined(SUPPORT_RGX) + if (gpsFWTraceDIEntry != NULL) + { + DIDestroyEntry(gpsFWTraceDIEntry); + } + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) + if (gpsPowMonDIEntry != NULL) + { + DIDestroyEntry(gpsPowMonDIEntry); + } +#endif + +#if defined(SUPPORT_FIRMWARE_GCOV) + if (gpsFirmwareGcovDIEntry != NULL) + { + DIDestroyEntry(gpsFirmwareGcovDIEntry); + } +#endif + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + if (gpsPowerDataDIEntry != NULL) + { + DIDestroyEntry(gpsPowerDataDIEntry); + } +#endif + +#if defined(SUPPORT_VALIDATION) + if (gpsRGXRegsDIEntry != NULL) + { + DIDestroyEntry(gpsRGXRegsDIEntry); + } +#endif +#endif /* defined(SUPPORT_RGX) */ + + if (gpsDumpDebugDIEntry != NULL) + { + DIDestroyEntry(gpsDumpDebugDIEntry); + } + + if (gpsStatusDIEntry != NULL) + { + DIDestroyEntry(gpsStatusDIEntry); + } + + if (gpsVersionDIEntry != NULL) + { + DIDestroyEntry(gpsVersionDIEntry); + } +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.c new file mode 100644 index 000000000000..3871c8004a06 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.c @@ -0,0 +1,609 @@ +/*************************************************************************/ /*! +@File +@Title DebugFS implementation of Debug Info interface. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +@Description Implements osdi_impl.h API to provide access to driver's + debug data via DebugFS. + + Note about locking in DebugFS module. + + Access to DebugFS is protected against the race where any + file could be removed while being accessed or accessed while + being removed. Any calls to debugfs_remove() will block + until all operations are finished. + + See implementation of proxy file operations (FULL_PROXY_FUNC) + and implementation of debugfs_file_[get|put]() in + fs/debugfs/file.c in Linux kernel sources for more details. + + Not about locking for sequential files. + + The seq_file objects have a mutex that protects access + to all of the file operations hence all of the sequential + *read* operations are protected. +*/ /**************************************************************************/ + +#include +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvr_debugfs.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_bridge_k.h" +#include "pvr_uaccess.h" +#include "osdi_impl.h" + +#define _DRIVER_THREAD_ENTER() \ + do { \ + PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \ + if (eLocalError != PVRSRV_OK) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \ + __func__, PVRSRVGetErrorString(eLocalError))); \ + return OSPVRSRVToNativeError(eLocalError); \ + } \ + } while (0) + +#define _DRIVER_THREAD_EXIT() \ + PVRSRVDriverThreadExit() + +#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR + +typedef struct DFS_DIR +{ + struct dentry *psDirEntry; + struct DFS_DIR *psParentDir; +} DFS_DIR; + +typedef struct DFS_ENTRY +{ + OSDI_IMPL_ENTRY sImplEntry; + DI_ITERATOR_CB sIterCb; +} DFS_ENTRY; + +typedef struct DFS_FILE +{ + struct dentry *psFileEntry; + struct DFS_DIR *psParentDir; + const struct seq_operations *psSeqOps; + struct DFS_ENTRY sEntry; + DI_ENTRY_TYPE eType; +} DFS_FILE; + +/* ----- native callbacks interface ----------------------------------------- */ + +static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, + va_list pArgs) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + seq_vprintf(pvNativeHandle, pszFmt, pArgs); +#else + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs); + seq_printf(pvNativeHandle, "%s", szBuffer); +#endif +} + +static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) +{ + seq_puts(pvNativeHandle, pszStr); +} + +static IMG_BOOL _HasOverflowed(void *pvNativeHandle) +{ + struct seq_file *psSeqFile = pvNativeHandle; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + return seq_has_overflowed(psSeqFile); +#else + return psSeqFile->count == psSeqFile->size; +#endif +} + +static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { + .pfnVPrintf = _VPrintf, + .pfnPuts = _Puts, + .pfnHasOverflowed = _HasOverflowed, +}; + +/* ----- sequential file operations ----------------------------------------- */ + +static void *_Start(struct seq_file *psSeqFile, loff_t *puiPos) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + void *pvRet = psEntry->sIterCb.pfnStart(&psEntry->sImplEntry, puiPos); + + if (pvRet == DI_START_TOKEN) + { + return SEQ_START_TOKEN; + } + + return pvRet; +} + +static void _Stop(struct seq_file *psSeqFile, void *pvPriv) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + psEntry->sIterCb.pfnStop(&psEntry->sImplEntry, pvPriv); +} + +static void *_Next(struct seq_file *psSeqFile, void *pvPriv, loff_t *puiPos) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + return psEntry->sIterCb.pfnNext(&psEntry->sImplEntry, pvPriv, puiPos); +} + +static int _Show(struct seq_file *psSeqFile, void *pvPriv) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + if (pvPriv == SEQ_START_TOKEN) + { + pvPriv = DI_START_TOKEN; + } + + return psEntry->sIterCb.pfnShow(&psEntry->sImplEntry, pvPriv); +} + +static struct seq_operations _g_sSeqOps = { + .start = _Start, + .stop = _Stop, + .next = _Next, + .show = _Show +}; + +/* ----- file operations ---------------------------------------------------- */ + +static int _Open(struct inode *psINode, struct file *psFile) +{ + DFS_FILE *psDFSFile; + int iRes; + + PVR_LOG_RETURN_IF_FALSE(psINode != NULL && psINode->i_private != NULL, + "psDFSFile is NULL", -EIO); + + _DRIVER_THREAD_ENTER(); + + psDFSFile = psINode->i_private; + + if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) + { + iRes = seq_open(psFile, psDFSFile->psSeqOps); + } + else + { + /* private data is NULL as it's going to be set below */ + iRes = single_open(psFile, _Show, NULL); + } + + if (iRes == 0) + { + struct seq_file *psSeqFile = psFile->private_data; + + DFS_ENTRY *psEntry = OSAllocMem(sizeof(*psEntry)); + if (psEntry == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem() failed", __func__)); + iRes = -ENOMEM; + goto return_; + } + + *psEntry = psDFSFile->sEntry; + psSeqFile->private = psEntry; + psEntry->sImplEntry.pvNative = psSeqFile; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", + __func__, iRes)); + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static int _Close(struct inode *psINode, struct file *psFile) +{ + DFS_FILE *psDFSFile = psINode->i_private; + DFS_ENTRY *psEntry; + int iRes; + + PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", + -EIO); + + _DRIVER_THREAD_ENTER(); + + /* save pointer to DFS_ENTRY */ + psEntry = ((struct seq_file *) psFile->private_data)->private; + + if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) + { + iRes = seq_release(psINode, psFile); + } + else + { + iRes = single_release(psINode, psFile); + } + + /* free DFS_ENTRY allocated in _Open */ + OSFreeMem(psEntry); + + /* Sanity check as seq_release (and single_release which calls it) + * never fail */ + if (iRes != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d", + __func__, iRes)); + } + + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static ssize_t _Read(struct file *psFile, char __user *pcBuffer, + size_t uiCount, loff_t *puiPos) +{ + DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; + ssize_t iRes = -1; + + _DRIVER_THREAD_ENTER(); + + if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) + { + iRes = seq_read(psFile, pcBuffer, uiCount, puiPos); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " + "returned %zd", __func__, iRes)); + goto return_; + } + } + else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + DFS_ENTRY *psEntry = &psDFSFile->sEntry; + IMG_UINT64 ui64Count = uiCount, ui64Pos; + + IMG_CHAR *pcLocalBuffer = OSAllocMem(uiCount); + PVR_GOTO_IF_FALSE(pcLocalBuffer != NULL, return_); + + iRes = psEntry->sIterCb.pfnRead(pcLocalBuffer, ui64Count, &ui64Pos, + psEntry->sImplEntry.pvPrivData); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " + "returned %zd", __func__, iRes)); + OSFreeMem(pcLocalBuffer); + goto return_; + } + + if (pvr_copy_to_user(pcBuffer, pcLocalBuffer, iRes) != 0) + { + iRes = -1; + } + + OSFreeMem(pcLocalBuffer); + + *puiPos = ui64Pos; + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin) +{ + DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; + loff_t iRes = -1; + + _DRIVER_THREAD_ENTER(); + + if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) + { + iRes = seq_lseek(psFile, iOffset, iOrigin); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to offset " + "%lld, pfnSeek() returned %lld", __func__, + iOffset, iRes)); + goto return_; + } + } + else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + DFS_ENTRY *psEntry = &psDFSFile->sEntry; + IMG_UINT64 ui64Pos; + + switch (iOrigin) + { + case SEEK_SET: + ui64Pos = psFile->f_pos + iOffset; + break; + case SEEK_CUR: + ui64Pos = iOffset; + break; + case SEEK_END: + /* not supported as we don't know the file size here */ + /* fall through */ + default: + return -1; + } + + /* only pass the absolute position to the callback, it's up to the + * implementer to determine if the position is valid */ + + iRes = psEntry->sIterCb.pfnSeek(ui64Pos, + psEntry->sImplEntry.pvPrivData); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to offset " + "%lld, pfnSeek() returned %lld", __func__, + iOffset, iRes)); + goto return_; + } + + psFile->f_pos = ui64Pos; + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static ssize_t _Write(struct file *psFile, const char __user *pszBuffer, + size_t uiCount, loff_t *puiPos) +{ + struct inode *psINode = psFile->f_path.dentry->d_inode; + DFS_FILE *psDFSFile = psINode->i_private; + DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb; + IMG_CHAR *pcLocalBuffer; + IMG_UINT64 ui64Count = uiCount + 1, ui64Pos = *puiPos; + IMG_INT64 i64Res = -EIO; + + PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", + -EIO); + PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL", + -EIO); + + _DRIVER_THREAD_ENTER(); + + /* allocate buffer with one additional byte fore NUL character */ + pcLocalBuffer = OSAllocMem(ui64Count); + PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed", + return_); + + i64Res = pvr_copy_from_user(pcLocalBuffer, pszBuffer, uiCount); + PVR_LOG_GOTO_IF_FALSE(i64Res == 0, "pvr_copy_from_user() failed", + free_local_buffer_); + + /* ensure that the framework user gets a NUL terminated buffer */ + pcLocalBuffer[ui64Count - 1] = '\0'; + + i64Res = psIter->pfnWrite(pcLocalBuffer, ui64Count, &ui64Pos, + psDFSFile->sEntry.sImplEntry.pvPrivData); + PVR_LOG_GOTO_IF_FALSE(i64Res >= 0, "pfnWrite failed", free_local_buffer_); + + *puiPos = ui64Pos; + +free_local_buffer_: + OSFreeMem(pcLocalBuffer); + +return_: + _DRIVER_THREAD_EXIT(); + + return i64Res; +} + +static const struct file_operations _g_psFileOpsGen = { + .owner = THIS_MODULE, + .open = _Open, + .release = _Close, + .read = _Read, + .llseek = _LSeek, + .write = _Write, +}; + +static const struct file_operations _g_psFileOpsRndAcc = { + .owner = THIS_MODULE, + .read = _Read, + .llseek = _LSeek, + .write = _Write, +}; + +/* ----- DI implementation interface ---------------------------------------- */ + +static PVRSRV_ERROR _Init(void) +{ + return PVRSRV_OK; +} + +static void _DeInit(void) +{ +} + +static PVRSRV_ERROR _CreateFile(const IMG_CHAR *pszName, + DI_ENTRY_TYPE eType, + const DI_ITERATOR_CB *psIterCb, + void *pvPrivData, + void *pvParentDir, + void **pvFile) +{ + DFS_DIR *psParentDir = pvParentDir; + DFS_FILE *psFile; + umode_t uiMode = S_IFREG; + struct dentry *psEntry; + const struct file_operations *psFileOps = NULL; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pvFile != NULL, "pvFile"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentDir != NULL, "pvParentDir"); + + switch (eType) + { + case DI_ENTRY_TYPE_GENERIC: + psFileOps = &_g_psFileOpsGen; + break; + case DI_ENTRY_TYPE_RANDOM_ACCESS: + psFileOps = &_g_psFileOpsRndAcc; + break; + default: + PVR_LOG_GOTO_IF_INVALID_PARAM("eType", eError, return_); + } + + psFile = OSAllocMem(sizeof(*psFile)); + PVR_LOG_GOTO_IF_NOMEM(psFile, eError, return_); + + uiMode |= psIterCb->pfnShow != NULL || psIterCb->pfnRead != NULL ? + S_IRUGO : 0; + uiMode |= psIterCb->pfnWrite != NULL ? S_IWUSR : 0; + + psEntry = debugfs_create_file(pszName, uiMode, psParentDir->psDirEntry, + psFile, psFileOps); + if (IS_ERR_OR_NULL(psEntry)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create debugfs '%s' file", + __func__, pszName)); + + eError = psEntry == NULL ? + PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_DEVICE; + goto free_file_; + } + + psFile->eType = eType; + psFile->psSeqOps = &_g_sSeqOps; + psFile->sEntry.sIterCb = *psIterCb; + psFile->sEntry.sImplEntry.pvPrivData = pvPrivData; + psFile->sEntry.sImplEntry.pvNative = NULL; + psFile->sEntry.sImplEntry.psCb = &_g_sEntryCallbacks; + psFile->psParentDir = psParentDir; + psFile->psFileEntry = psEntry; + + *pvFile = psFile; + + return PVRSRV_OK; + +free_file_: + OSFreeMem(psFile); + +return_: + return eError; +} + +static void _DestroyFile(void *pvFile) +{ + DFS_FILE *psFile = pvFile; + + PVR_ASSERT(psFile != NULL); + + psFile->psFileEntry->d_inode->i_private = NULL; + + debugfs_remove(psFile->psFileEntry); + OSFreeMem(psFile); +} + +static PVRSRV_ERROR _CreateDir(const IMG_CHAR *pszName, + void *pvParentDir, + void **ppvDir) +{ + DFS_DIR *psNewDir; + struct dentry *psDirEntry, *psParentDir = NULL; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppvDir != NULL, "ppvDir"); + + psNewDir = OSAllocMem(sizeof(*psNewDir)); + PVR_LOG_RETURN_IF_NOMEM(psNewDir, "OSAllocMem"); + + psNewDir->psParentDir = pvParentDir; + + if (pvParentDir != NULL) + { + psParentDir = psNewDir->psParentDir->psDirEntry; + } + + psDirEntry = debugfs_create_dir(pszName, psParentDir); + if (IS_ERR_OR_NULL(psDirEntry)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create '%s' debugfs directory", + __func__, pszName)); + OSFreeMem(psNewDir); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psNewDir->psDirEntry = psDirEntry; + *ppvDir = psNewDir; + + return PVRSRV_OK; +} + +static void _DestroyDir(void *pvDir) +{ + DFS_DIR *psDir = pvDir; + + PVR_ASSERT(psDir != NULL); + + debugfs_remove(psDir->psDirEntry); + OSFreeMem(psDir); +} + +PVRSRV_ERROR PVRDebugFsRegister(void) +{ + OSDI_IMPL_CB sImplCb = { + .pfnInit = _Init, + .pfnDeInit = _DeInit, + .pfnCreateEntry = _CreateFile, + .pfnDestroyEntry = _DestroyFile, + .pfnCreateGroup = _CreateDir, + .pfnDestroyGroup = _DestroyDir + }; + + return DIRegisterImplementation("debugfs", &sImplCb); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.h new file mode 100644 index 000000000000..23ae55b12069 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_debugfs.h @@ -0,0 +1,50 @@ +/*************************************************************************/ /*! +@File +@Title DebugFS implementation of Debug Info interface. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DEBUGFS_H +#define PVR_DEBUGFS_H + +#include "pvrsrv_error.h" + +PVRSRV_ERROR PVRDebugFsRegister(void); + +#endif /* PVR_DEBUGFS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.c new file mode 100644 index 000000000000..d40a7cf979fa --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.c @@ -0,0 +1,644 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR devfreq device implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux module setup +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(NO_HARDWARE) + +#include +#if defined(CONFIG_DEVFREQ_THERMAL) +#include +#endif +#include +#include + +#include "power.h" +#include "pvrsrv.h" +#include "pvrsrv_device.h" + +#include "rgxdevice.h" +#include "rgxinit.h" +#include "sofunc_rgx.h" + +#include "syscommon.h" + +#include "pvr_dvfs_device.h" + +#include "kernel_compatibility.h" + +static PVRSRV_DEVICE_NODE *gpsDeviceNode; + +static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_freq, IMG_UINT32 flags) +{ + RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData; + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; + IMG_UINT32 ui32Freq, ui32CurFreq, ui32Volt; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + struct opp *opp; +#else + struct dev_pm_opp *opp; +#endif + + /* Check the RGX device is initialised */ + if (!psRGXData) + { + return -ENODATA; + } + + psRGXTimingInfo = psRGXData->psRGXTimingInfo; + if (!psDVFSDevice->bEnabled) + { + *requested_freq = psRGXTimingInfo->ui32CoreClockSpeed; + return 0; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_lock(); +#endif + + opp = devfreq_recommended_opp(dev, requested_freq, flags); + if (IS_ERR(opp)) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#endif + PVR_DPF((PVR_DBG_ERROR, "Invalid OPP")); + return PTR_ERR(opp); + } + + ui32Freq = dev_pm_opp_get_freq(opp); + ui32Volt = dev_pm_opp_get_voltage(opp); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#else + dev_pm_opp_put(opp); +#endif + + ui32CurFreq = psRGXTimingInfo->ui32CoreClockSpeed; + + if (ui32CurFreq == ui32Freq) + { + return 0; + } + + if (PVRSRV_OK != PVRSRVDevicePreClockSpeedChange(gpsDeviceNode, + psDVFSDeviceCfg->bIdleReq, + NULL)) + { + dev_err(dev, "PVRSRVDevicePreClockSpeedChange failed\n"); + return -EPERM; + } + + /* Increasing frequency, change voltage first */ + if (ui32Freq > ui32CurFreq) + { + psDVFSDeviceCfg->pfnSetVoltage(ui32Volt); + } + + psDVFSDeviceCfg->pfnSetFrequency(ui32Freq); + + /* Decreasing frequency, change frequency first */ + if (ui32Freq < ui32CurFreq) + { + psDVFSDeviceCfg->pfnSetVoltage(ui32Volt); + } + + psRGXTimingInfo->ui32CoreClockSpeed = ui32Freq; + + PVRSRVDevicePostClockSpeedChange(gpsDeviceNode, psDVFSDeviceCfg->bIdleReq, + NULL); + + return 0; +} + +static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = gpsDeviceNode->pvDevice; + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData; + RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; + RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; + PVRSRV_ERROR eError; + + /* Check the RGX device is initialised */ + if (!psDevInfo || !psRGXData) + { + return -ENODATA; + } + + psRGXTimingInfo = psRGXData->psRGXTimingInfo; + stat->current_frequency = psRGXTimingInfo->ui32CoreClockSpeed; + + if (psDevInfo->pfnGetGpuUtilStats == NULL) + { + /* Not yet ready. So set times to something sensible. */ + stat->busy_time = 0; + stat->total_time = 0; + return 0; + } + + eError = psDevInfo->pfnGetGpuUtilStats(psDevInfo->psDeviceNode, + psDVFSDevice->hGpuUtilUserDVFS, + &sGpuUtilStats); + + if (eError != PVRSRV_OK) + { + return -EAGAIN; + } + + stat->busy_time = sGpuUtilStats.ui64GpuStatActive; + stat->total_time = sGpuUtilStats.ui64GpuStatCumulative; + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq) +{ + RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData; + + /* Check the RGX device is initialised */ + if (!psRGXData) + { + return -ENODATA; + } + + *freq = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + + return 0; +} +#endif + +static struct devfreq_dev_profile img_devfreq_dev_profile = +{ + .target = devfreq_target, + .get_dev_status = devfreq_get_dev_status, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + .get_cur_freq = devfreq_cur_freq, +#endif +}; + +static int FillOPPTable(struct device *dev) +{ + const IMG_OPP *iopp; + int i, err = 0; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + + for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable; + i < psDVFSDeviceCfg->ui32OPPTableSize; + i++, iopp++) + { + err = dev_pm_opp_add(dev, iopp->ui32Freq, iopp->ui32Volt); + if (err) { + dev_err(dev, "Could not add OPP entry, %d\n", err); + return err; + } + } + + return 0; +} + +static void ClearOPPTable(struct device *dev) +{ +#if (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + const IMG_OPP *iopp; + int i; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + + for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable; + i < psDVFSDeviceCfg->ui32OPPTableSize; + i++, iopp++) + { + dev_pm_opp_remove(dev, iopp->ui32Freq); + } +#endif +} + +static int GetOPPValues(struct device *dev, + unsigned long *min_freq, + unsigned long *min_volt, + unsigned long *max_freq) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + struct opp *opp; +#else + struct dev_pm_opp *opp; +#endif + int count, i, err = 0; + unsigned long freq; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) + unsigned int *freq_table; +#else + unsigned long *freq_table; +#endif + + count = dev_pm_opp_get_opp_count(dev); + if (count < 0) + { + dev_err(dev, "Could not fetch OPP count, %d\n", count); + return count; + } + + dev_info(dev, "Found %d OPP points.\n", count); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + freq_table = devm_kcalloc(dev, count, sizeof(*freq_table), GFP_ATOMIC); +#else + freq_table = kcalloc(count, sizeof(*freq_table), GFP_ATOMIC); +#endif + if (! freq_table) + { + return -ENOMEM; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + /* Start RCU read-side critical section to map frequency to OPP */ + rcu_read_lock(); +#endif + + /* Iterate over OPP table; Iteration 0 finds "opp w/ freq >= 0 Hz". */ + freq = 0; + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + { + err = PTR_ERR(opp); + dev_err(dev, "Couldn't find lowest frequency, %d\n", err); + goto exit; + } + + *min_volt = dev_pm_opp_get_voltage(opp); + *max_freq = *min_freq = freq_table[0] = freq; + dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", 1, count, freq, *min_volt); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + dev_pm_opp_put(opp); +#endif + + /* Iteration i > 0 finds "opp w/ freq >= (opp[i-1].freq + 1)". */ + for (i = 1; i < count; i++) + { + freq++; + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + { + err = PTR_ERR(opp); + dev_err(dev, "Couldn't find %dth frequency, %d\n", i, err); + goto exit; + } + + freq_table[i] = freq; + *max_freq = freq; + dev_info(dev, + "opp[%d/%d]: (%lu Hz, %lu uV)\n", + i + 1, + count, + freq, + dev_pm_opp_get_voltage(opp)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + dev_pm_opp_put(opp); +#endif + } + +exit: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + if (!err) + { + img_devfreq_dev_profile.freq_table = freq_table; + img_devfreq_dev_profile.max_state = count; + } + else +#endif + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + devm_kfree(dev, freq_table); +#else + kfree(freq_table); +#endif + } + + return err; +} + +#if defined(CONFIG_DEVFREQ_THERMAL) +static int RegisterCoolingDevice(struct device *dev, + IMG_DVFS_DEVICE *psDVFSDevice, + struct devfreq_cooling_power *powerOps) +{ + struct device_node *of_node; + int err = 0; + PVRSRV_VZ_RET_IF_MODE(GUEST, err); + + if (!powerOps) + { + dev_info(dev, "Cooling: power ops not registered, not enabling cooling"); + return 0; + } + + of_node = of_node_get(dev->of_node); + + psDVFSDevice->psDevfreqCoolingDevice = of_devfreq_cooling_register_power( + of_node, psDVFSDevice->psDevFreq, powerOps); + + if (IS_ERR(psDVFSDevice->psDevfreqCoolingDevice)) + { + err = PTR_ERR(psDVFSDevice->psDevfreqCoolingDevice); + dev_err(dev, "Failed to register as devfreq cooling device %d", err); + } + + of_node_put(of_node); + + return err; +} +#endif + +#define TO_IMG_ERR(err) ((err == -EPROBE_DEFER) ? PVRSRV_ERROR_PROBE_DEFER : PVRSRV_ERROR_INIT_FAILURE) + +PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + IMG_DVFS_DEVICE *psDVFSDevice = NULL; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; + IMG_DVFS_GOVERNOR_CFG *psDVFSGovernorCfg = NULL; + RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; + struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice; + unsigned long min_freq = 0, max_freq = 0, min_volt = 0; + PVRSRV_ERROR eError; + int err; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(CONFIG_PM_OPP) + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif + + if (gpsDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR, + "DVFS already initialised for device node %p", + gpsDeviceNode)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + gpsDeviceNode = psDeviceNode; + psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + psDVFSGovernorCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSGovernorCfg; + psRGXTimingInfo = ((RGX_DATA *)psDeviceNode->psDevConfig->hDevData)->psRGXTimingInfo; + +#if defined(SUPPORT_SOC_TIMER) + if (! psDeviceNode->psDevConfig->pfnSoCTimerRead) + { + PVR_DPF((PVR_DBG_ERROR, "System layer SoC timer callback not implemented")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } +#endif + + eError = SORgxGpuUtilStatsRegister(&psDVFSDevice->hGpuUtilUserDVFS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to register to the GPU utilisation stats, %d", eError)); + return eError; + } + +#if defined(CONFIG_OF) + err = dev_pm_opp_of_add_table(psDev); + if (err) + { + /* + * If there are no device tree or system layer provided operating points + * then return an error + */ + if (err != -ENODEV || !psDVFSDeviceCfg->pasOPPTable) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to init opp table from devicetree, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + } +#endif + + if (psDVFSDeviceCfg->pasOPPTable) + { + err = FillOPPTable(psDev); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to fill OPP table with data, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + } + + err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to read OPP points, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + + img_devfreq_dev_profile.initial_freq = min_freq; + img_devfreq_dev_profile.polling_ms = psDVFSDeviceCfg->ui32PollMs; + + psRGXTimingInfo->ui32CoreClockSpeed = min_freq; + + psDVFSDeviceCfg->pfnSetFrequency(min_freq); + psDVFSDeviceCfg->pfnSetVoltage(min_volt); + + psDVFSDevice->data.upthreshold = psDVFSGovernorCfg->ui32UpThreshold; + psDVFSDevice->data.downdifferential = psDVFSGovernorCfg->ui32DownDifferential; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + psDVFSDevice->psDevFreq = devm_devfreq_add_device(psDev, + &img_devfreq_dev_profile, + "simple_ondemand", + &psDVFSDevice->data); +#else + psDVFSDevice->psDevFreq = devfreq_add_device(psDev, + &img_devfreq_dev_profile, + "simple_ondemand", + &psDVFSDevice->data); +#endif + + if (IS_ERR(psDVFSDevice->psDevFreq)) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to add as devfreq device %p, %ld", + psDVFSDevice->psDevFreq, + PTR_ERR(psDVFSDevice->psDevFreq))); + eError = TO_IMG_ERR(PTR_ERR(psDVFSDevice->psDevFreq)); + goto err_exit; + } + + eError = SuspendDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInit: Failed to suspend DVFS")); + goto err_exit; + } + +#if defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + psDVFSDevice->psDevFreq->policy.user.min_freq = min_freq; + psDVFSDevice->psDevFreq->policy.user.max_freq = max_freq; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) + psDVFSDevice->psDevFreq->scaling_min_freq = min_freq; + psDVFSDevice->psDevFreq->scaling_max_freq = max_freq; +#else + psDVFSDevice->psDevFreq->min_freq = min_freq; + psDVFSDevice->psDevFreq->max_freq = max_freq; +#endif + + err = devfreq_register_opp_notifier(psDev, psDVFSDevice->psDevFreq); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to register opp notifier, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + +#if defined(CONFIG_DEVFREQ_THERMAL) + err = RegisterCoolingDevice(psDev, psDVFSDevice, psDVFSDeviceCfg->psPowerOps); + if (err) + { + eError = TO_IMG_ERR(err); + goto err_exit; + } +#endif + + PVR_TRACE(("PVR DVFS activated: %lu-%lu Hz, Period: %ums", + min_freq, + max_freq, + psDVFSDeviceCfg->ui32PollMs)); + + return PVRSRV_OK; + +err_exit: + DeinitDVFS(psDeviceNode); + return eError; +} + +void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + IMG_DVFS_DEVICE *psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice; + IMG_INT32 i32Error; + + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_ASSERT(psDeviceNode == gpsDeviceNode); + + if (! psDVFSDevice) + { + return; + } + +#if defined(CONFIG_DEVFREQ_THERMAL) + if (!IS_ERR_OR_NULL(psDVFSDevice->psDevfreqCoolingDevice)) + { + devfreq_cooling_unregister(psDVFSDevice->psDevfreqCoolingDevice); + psDVFSDevice->psDevfreqCoolingDevice = NULL; + } +#endif + + if (psDVFSDevice->psDevFreq) + { + i32Error = devfreq_unregister_opp_notifier(psDev, psDVFSDevice->psDevFreq); + if (i32Error < 0) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to unregister OPP notifier")); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + devfreq_remove_device(psDVFSDevice->psDevFreq); +#else + devm_devfreq_remove_device(psDev, psDVFSDevice->psDevFreq); +#endif + + psDVFSDevice->psDevFreq = NULL; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + kfree(img_devfreq_dev_profile.freq_table); +#endif + + /* Remove OPP entries for this device */ + ClearOPPTable(psDev); + +#if defined(CONFIG_OF) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) + dev_pm_opp_of_remove_table(psDev); +#endif +#endif + + SORgxGpuUtilStatsUnregister(psDVFSDevice->hGpuUtilUserDVFS); + psDVFSDevice->hGpuUtilUserDVFS = NULL; + + gpsDeviceNode = NULL; +} + +PVRSRV_ERROR SuspendDVFS(void) +{ + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + + psDVFSDevice->bEnabled = IMG_FALSE; + + return PVRSRV_OK; +} + +PVRSRV_ERROR ResumeDVFS(void) +{ + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + + /* Not supported in GuestOS drivers */ + psDVFSDevice->bEnabled = !PVRSRV_VZ_MODE_IS(GUEST); + + return PVRSRV_OK; +} + +#endif /* !NO_HARDWARE */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.h new file mode 100644 index 000000000000..808fc3de3f81 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_dvfs_device.h @@ -0,0 +1,58 @@ +/*************************************************************************/ /*! +@File pvr_dvfs.c +@Title System level interface for DVFS +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVR_DVFS_DEVICE_H_ +#define _PVR_DVFS_DEVICE_H_ + +#include "opaque_types.h" +#include "pvrsrv_error.h" + + +PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode); + +void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode); + +PVRSRV_ERROR SuspendDVFS(void); + +PVRSRV_ERROR ResumeDVFS(void); + +#endif /* _PVR_DVFS_DEVICE_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_gputrace.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_gputrace.c new file mode 100644 index 000000000000..dab61a61c7b9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_gputrace.c @@ -0,0 +1,1244 @@ +/*************************************************************************/ /*! +@File pvr_gputrace.c +@Title PVR GPU Trace module Linux implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) +#include +#else +#include +#endif + +#include "pvrsrv_error.h" +#include "pvrsrv_apphint.h" +#include "pvr_debug.h" +#include "ospvr_gputrace.h" +#include "rgxhwperf.h" +#include "rgxtimecorr.h" +#include "device.h" +#include "trace_events.h" +#include "pvrsrv.h" +#include "pvrsrv_tlstreams.h" +#include "tlclient.h" +#include "pvr_debug.h" +#define CREATE_TRACE_POINTS +#include "rogue_trace_events.h" + +/****************************************************************************** + Module internal implementation +******************************************************************************/ + +typedef enum { + PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0, + + PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1, + PVR_GPUTRACE_SWITCH_TYPE_END = 2, + PVR_GPUTRACE_SWITCH_TYPE_SINGLE = 3 +} PVR_GPUTRACE_SWITCH_TYPE; + +typedef struct RGX_HWPERF_FTRACE_DATA { + /* This lock ensures the HWPerf TL stream reading resources are not destroyed + * by one thread disabling it while another is reading from it. Keeps the + * state and resource create/destroy atomic and consistent. */ + POS_LOCK hFTraceResourceLock; + + IMG_HANDLE hGPUTraceCmdCompleteHandle; + IMG_HANDLE hGPUTraceTLStream; + IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp; + IMG_UINT32 ui32FTraceLastOrdinal; +} RGX_HWPERF_FTRACE_DATA; + +/* This lock ensures state change of GPU_TRACING on/off is done atomically */ +static POS_LOCK ghGPUTraceStateLock; +static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU; + +/* Saved value of the clock source before the trace was enabled. We're keeping + * it here so that we know which clock should be selected after we disable the + * gpu ftrace. */ +#if defined(SUPPORT_RGX) +static RGXTIMECORR_CLOCK_TYPE geLastTimeCorrClock = PVRSRV_APPHINT_TIMECORRCLOCK; +#endif + +/* This lock ensures that the reference counting operation on the FTrace UFO + * events and enable/disable operation on firmware event are performed as + * one atomic operation. This should ensure that there are no race conditions + * between reference counting and firmware event state change. + * See below comment for guiUfoEventRef. + */ +static POS_LOCK ghLockFTraceEventLock; + +/* Multiple FTrace UFO events are reflected in the firmware as only one event. When + * we enable FTrace UFO event we want to also at the same time enable it in + * the firmware. Since there is a multiple-to-one relation between those events + * we count how many FTrace UFO events is enabled. If at least one event is + * enabled we enabled the firmware event. When all FTrace UFO events are disabled + * we disable firmware event. */ +static IMG_UINT guiUfoEventRef; + +/****************************************************************************** + Module In-bound API +******************************************************************************/ + +static PVRSRV_ERROR _GpuTraceDisable( + PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_BOOL bDeInit); + +static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE); + +PVRSRV_ERROR PVRGpuTraceSupportInit(void) +{ + PVRSRV_ERROR eError; + + if (ghLockFTraceEventLock != NULL) + { + PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized")); + return PVRSRV_OK; + } + + /* common module params initialization */ + eError = OSLockCreate(&ghLockFTraceEventLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + eError = OSLockCreate(&ghGPUTraceStateLock); + PVR_LOG_RETURN_IF_ERROR (eError, "OSLockCreate"); + + return PVRSRV_OK; +} + +void PVRGpuTraceSupportDeInit(void) +{ + if (ghGPUTraceStateLock) + { + OSLockDestroy(ghGPUTraceStateLock); + } + + if (ghLockFTraceEventLock) + { + OSLockDestroy(ghLockFTraceEventLock); + ghLockFTraceEventLock = NULL; + } +} + +PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + RGX_HWPERF_FTRACE_DATA *psData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA)); + psDevInfo->pvGpuFtraceData = psData; + PVR_LOG_GOTO_IF_NOMEM(psData, eError, e0); + + /* We initialise it only once because we want to track if any + * packets were dropped. */ + psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1; + + eError = OSLockCreate(&psData->hFTraceResourceLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + return PVRSRV_OK; + +e0: + PVRGpuTraceDeInitDevice(psDeviceNode); + return eError; +} + +void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData; + + PVRSRV_VZ_RETN_IF_MODE(GUEST); + if (psData) + { + /* first disable the tracing, to free up TL resources */ + if (psData->hFTraceResourceLock) + { + OSLockAcquire(psData->hFTraceResourceLock); + _GpuTraceDisable(psDeviceNode->pvDevice, IMG_TRUE); + OSLockRelease(psData->hFTraceResourceLock); + + /* now free all the FTrace resources */ + OSLockDestroy(psData->hFTraceResourceLock); + } + OSFreeMem(psData); + psDevInfo->pvGpuFtraceData = NULL; + } +} + +IMG_BOOL PVRGpuTraceIsEnabled(void) +{ + return gbFTraceGPUEventsEnabled; +} + +void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (PVRGpuTraceIsEnabled()) + { + PVRSRV_ERROR eError = PVRGpuTraceSetEnabled(psDeviceNode, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing" + " (%s)", PVRSRVGetErrorString(eError))); + } + + /* below functions will enable FTrace events which in turn will + * execute HWPerf callbacks that set appropriate filter values + * note: unfortunately the functions don't allow to pass private + * data so they enable events for all of the devices + * at once, which means that this can happen more than once + * if there is more than one device */ + + /* single events can be enabled by calling trace_set_clr_event() + * with the event name, e.g.: + * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */ +#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */ +#if defined(ANDROID) || defined(CHROMIUMOS_KERNEL) + if (trace_set_clr_event("gpu", NULL, 1)) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu\" event" + " group")); + } + else + { + PVR_LOG(("FTrace events from \"gpu\" group enabled")); + } +#endif /* defined(ANDROID) || defined(CHROMIUMOS_KERNEL) */ + if (trace_set_clr_event("rogue", NULL, 1)) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event" + " group")); + } + else + { + PVR_LOG(("FTrace events from \"rogue\" group enabled")); + } +#endif /* defined(CONFIG_EVENT_TRACING) */ + } +} + +/* Caller must now hold hFTraceResourceLock before calling this method. + */ +static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_HWPERF_FTRACE_DATA *psFtraceData; + PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; + IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + + psFtraceData = psRgxDevInfo->pvGpuFtraceData; + + PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); + + /* return if already enabled */ + if (psFtraceData->hGPUTraceTLStream) + { + return PVRSRV_OK; + } + +#if defined(SUPPORT_RGX) + /* Signal FW to enable event generation */ + if (psRgxDevInfo->bFirmwareInitialised) + { + IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter & + (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO); + + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, + RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, + RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | + ui64UFOFilter); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out); + } + else +#endif + { + /* only set filter and exit */ + psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | + ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) & + psRgxDevInfo->ui64HWPerfFilter); + + PVR_DPF((PVR_DBG_WARNING, + "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", + psRgxDevInfo->ui64HWPerfFilter)); + + return PVRSRV_OK; + } + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32UMIdentifier) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevNode->sDevId.i32UMIdentifier)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Open the TL Stream for HWPerf data consumption */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfStreamName, + PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, + &psFtraceData->hGPUTraceTLStream); + PVR_LOG_GOTO_IF_ERROR(eError, "TLClientOpenStream", err_out); + +#if defined(SUPPORT_RGX) + if (RGXTimeCorrGetClockSource() != RGXTIMECORR_CLOCK_SCHED) + { + /* Set clock source for timer correlation data to sched_clock */ + geLastTimeCorrClock = RGXTimeCorrGetClockSource(); + RGXTimeCorrSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED); + } +#endif + + /* Reset the OS timestamp coming from the timer correlation data + * associated with the latest HWPerf event we processed. + */ + psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0; + + /* Register a notifier to collect HWPerf data whenever the HW completes + * an operation. + */ + eError = PVRSRVRegisterCmdCompleteNotify( + &psFtraceData->hGPUTraceCmdCompleteHandle, + &_GpuTraceCmdCompleteNotify, + psRgxDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream); + +err_out: + PVR_DPF_RETURN_RC(eError); + +err_close_stream: + TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUTraceTLStream); + psFtraceData->hGPUTraceTLStream = NULL; + goto err_out; +} + +/* Caller must now hold hFTraceResourceLock before calling this method. + */ +static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_HWPERF_FTRACE_DATA *psFtraceData; +#if defined(SUPPORT_RGX) + PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; +#endif + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + + psFtraceData = psRgxDevInfo->pvGpuFtraceData; + + PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); + + /* if FW is not yet initialised, just set filter and exit */ + if (!psRgxDevInfo->bFirmwareInitialised) + { + psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE; + PVR_DPF((PVR_DBG_WARNING, + "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", + psRgxDevInfo->ui64HWPerfFilter)); + + return PVRSRV_OK; + } + + if (NULL == psFtraceData->hGPUTraceTLStream) + { + /* Tracing already disabled, just return */ + return PVRSRV_OK; + } + +#if defined(SUPPORT_RGX) + if (!bDeInit) + { + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, + RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, + (RGX_HWPERF_EVENT_MASK_NONE)); + PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + } +#endif + + if (psFtraceData->hGPUTraceCmdCompleteHandle) + { + /* Tracing is being turned off. Unregister the notifier. */ + eError = PVRSRVUnregisterCmdCompleteNotify( + psFtraceData->hGPUTraceCmdCompleteHandle); + PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify"); + psFtraceData->hGPUTraceCmdCompleteHandle = NULL; + } + + if (psFtraceData->hGPUTraceTLStream) + { + IMG_PBYTE pbTmp = NULL; + IMG_UINT32 ui32Tmp = 0; + + /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there + * are some events left unprocessed in this FTrace/systrace "session" + * (note that even if we have just disabled HWPerf on the FW some packets + * could have been generated and already copied to L2 by the MISR handler). + * + * With the following calls we will both copy new data to the Host buffer + * (done by the producer callback in TLClientAcquireData) and advance + * the read offset in the buffer to catch up with the latest events. + */ + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUTraceTLStream, + &pbTmp, &ui32Tmp); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + + /* Let close stream perform the release data on the outstanding acquired data */ + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUTraceTLStream); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + + psFtraceData->hGPUTraceTLStream = NULL; + } + +#if defined(SUPPORT_RGX) + if (geLastTimeCorrClock != RGXTIMECORR_CLOCK_SCHED) + { + RGXTimeCorrSetClockSource(psRgxDevNode, geLastTimeCorrClock); + } +#endif + + PVR_DPF_RETURN_RC(eError); +} + +static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_BOOL bNewValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_HWPERF_FTRACE_DATA *psFtraceData; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + psFtraceData = psRgxDevInfo->pvGpuFtraceData; + + /* About to create/destroy FTrace resources, lock critical section + * to avoid HWPerf MISR thread contention. + */ + OSLockAcquire(psFtraceData->hFTraceResourceLock); + + eError = (bNewValue ? _GpuTraceEnable(psRgxDevInfo) + : _GpuTraceDisable(psRgxDevInfo, IMG_FALSE)); + + OSLockRelease(psFtraceData->hFTraceResourceLock); + + PVR_DPF_RETURN_RC(eError); +} + +static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + + psDeviceNode = psPVRSRVData->psDeviceNodeList; + /* enable/disable GPU trace on all devices */ + while (psDeviceNode) + { + eError = _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); + if (eError != PVRSRV_OK) + { + break; + } + psDeviceNode = psDeviceNode->psNext; + } + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR PVRGpuTraceSetEnabled(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bNewValue) +{ + return _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); +} + +/* ----- HWPerf to FTrace packet processing and events injection ------------ */ + +static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType) +{ + static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = { +#if defined(HWPERF_PACKET_V2C_SIG) + "TA3D", "CDM", "RS", "SHG", "TQTDM", "SYNC", "LAST" +#else + "TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "LAST" +#endif + }; + + /* cast in case of negative value */ + if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST) + { + return ""; + } + + return aszKickType[eKickType]; +} + +void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + RGX_HWPERF_KICK_TYPE eKickType) +{ + const IMG_CHAR *pszKickType = _HWPerfKickTypeToStr(eKickType); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRGpuTraceEnqueueEvent(%s): contextId %u, " + "jobId %u", pszKickType, ui32FirmwareCtx, ui32IntJobRef)); + + if (PVRGpuTraceIsEnabled()) + { + trace_rogue_job_enqueue(ui32FirmwareCtx, ui32IntJobRef, ui32ExtJobRef, + pszKickType); + } +} + +static void _GpuTraceWorkSwitch( + IMG_UINT64 ui64HWTimestampInOSTime, + IMG_UINT32 ui32CtxId, + IMG_UINT32 ui32CtxPriority, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + const IMG_CHAR* pszWorkType, + PVR_GPUTRACE_SWITCH_TYPE eSwType) +{ + PVR_ASSERT(pszWorkType); + trace_rogue_sched_switch(pszWorkType, eSwType, ui64HWTimestampInOSTime, + ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef, ui32ExtJobRef); +} + +static void _GpuTraceUfo( + IMG_UINT64 ui64OSTimestamp, + const RGX_HWPERF_UFO_EV eEvType, + const IMG_UINT32 ui32CtxId, + const IMG_UINT32 ui32ExtJobRef, + const IMG_UINT32 ui32IntJobRef, + const IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + switch (eEvType) { + case RGX_HWPERF_UFO_EV_UPDATE: + trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, ui32UFOCount, puData); + break; + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, + puData); + break; + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, + puData); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, + puData); + break; + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, + puData); + break; + default: + break; + } +} + +static void _GpuTraceFirmware( + IMG_UINT64 ui64HWTimestampInOSTime, + const IMG_CHAR* pszWorkType, + PVR_GPUTRACE_SWITCH_TYPE eSwType) +{ + trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType); +} + +static void _GpuTraceEventsLost( + const RGX_HWPERF_STREAM_ID eStreamId, + const IMG_UINT32 ui32LastOrdinal, + const IMG_UINT32 ui32CurrOrdinal) +{ + trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal); +} + +/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */ +static uint64_t CalculateEventTimestamp( + PVRSRV_RGXDEV_INFO *psDevInfo, + uint32_t ui32TimeCorrIndex, + uint64_t ui64EventTimestamp) +{ + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex]; + uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp; + uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp; + uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs; + uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns; + + if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp) + { + /* The previous packet had a time reference (time correlation data) more + * recent than the one in the current packet, it means the timer + * correlation array wrapped too quickly (buffer too small) and in the + * previous call to _GpuTraceUfoEvent we read one of the + * newest timer correlations rather than one of the oldest ones. + */ + PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be " + "wrong! The time correlation array size should be increased " + "to avoid this.", __func__)); + } + + psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp; + + /* RGX CR timer ticks delta */ + deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp; + /* RGX time delta in nanoseconds */ + delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); + /* Calculate OS time of HWPerf event */ + ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns; + + PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u", + __func__, ui64CRTimeStamp, ui64OSTimeStamp, + psTimeCorr->ui32CoreClockSpeed)); + + return ui64EventOSTimestamp; +} + +static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, + PVR_GPUTRACE_SWITCH_TYPE eSwType) +{ + IMG_UINT64 ui64Timestamp; + RGX_HWPERF_HW_DATA* psHWPerfPktData; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psHWPerfPkt); + PVR_ASSERT(pszWorkName); + + psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); + + ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, + psHWPerfPkt->ui64Timestamp); + + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d", + pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType)); + + _GpuTraceWorkSwitch(ui64Timestamp, + psHWPerfPktData->ui32DMContext, + psHWPerfPktData->ui32CtxPriority, + psHWPerfPktData->ui32ExtJobRef, + psHWPerfPktData->ui32IntJobRef, + pszWorkName, + eSwType); + + PVR_DPF_RETURN; +} + +static void _GpuTraceUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) +{ + IMG_UINT64 ui64Timestamp; + RGX_HWPERF_UFO_DATA *psHWPerfPktData; + IMG_UINT32 ui32UFOCount; + RGX_HWPERF_UFO_DATA_ELEMENT *puData; + + psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); + + ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo); + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) IMG_OFFSET_ADDR(psHWPerfPktData, RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo)); + + ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, + psHWPerfPkt->ui64Timestamp); + + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceUfoEvent: ui32ExtJobRef=%d, " + "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef, + psHWPerfPktData->ui32IntJobRef)); + + _GpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType, + psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32ExtJobRef, + psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData); +} + +static void _GpuTraceFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, + PVR_GPUTRACE_SWITCH_TYPE eSwType) + +{ + uint64_t ui64Timestamp; + RGX_HWPERF_FW_DATA *psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); + + ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, + psHWPerfPkt->ui64Timestamp); + + _GpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType); +} + +static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) +{ + RGX_HWPERF_EVENT_TYPE eType; + RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; + IMG_UINT32 ui32HwEventTypeIndex; + static const struct { + IMG_CHAR* pszName; + PVR_GPUTRACE_SWITCH_TYPE eSwType; + } aszHwEventTypeMap[] = { +#define _T(T) PVR_GPUTRACE_SWITCH_TYPE_##T + { "BG", _T(BEGIN) }, /* RGX_HWPERF_FW_BGSTART */ + { "BG", _T(END) }, /* RGX_HWPERF_FW_BGEND */ + { "IRQ", _T(BEGIN) }, /* RGX_HWPERF_FW_IRQSTART */ + { "IRQ", _T(END) }, /* RGX_HWPERF_FW_IRQEND */ + { "DBG", _T(BEGIN) }, /* RGX_HWPERF_FW_DBGSTART */ + { "DBG", _T(END) }, /* RGX_HWPERF_FW_DBGEND */ + { "PMOOM_TAPAUSE", _T(END) }, /* RGX_HWPERF_HW_PMOOM_TAPAUSE */ + { "TA", _T(BEGIN) }, /* RGX_HWPERF_HW_TAKICK */ + { "TA", _T(END) }, /* RGX_HWPERF_HW_TAFINISHED */ + { "TQ3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DTQKICK */ + { "3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DKICK */ + { "3D", _T(END) }, /* RGX_HWPERF_HW_3DFINISHED */ + { "CDM", _T(BEGIN) }, /* RGX_HWPERF_HW_CDMKICK */ + { "CDM", _T(END) }, /* RGX_HWPERF_HW_CDMFINISHED */ + { "TQ2D", _T(BEGIN) }, /* RGX_HWPERF_HW_TLAKICK */ + { "TQ2D", _T(END) }, /* RGX_HWPERF_HW_TLAFINISHED */ + { "3DSPM", _T(BEGIN) }, /* RGX_HWPERF_HW_3DSPMKICK */ + { NULL, 0 }, /* RGX_HWPERF_HW_PERIODIC (unsupported) */ + { "RTU", _T(BEGIN) }, /* RGX_HWPERF_HW_RTUKICK */ + { "RTU", _T(END) }, /* RGX_HWPERF_HW_RTUFINISHED */ + { "SHG", _T(BEGIN) }, /* RGX_HWPERF_HW_SHGKICK */ + { "SHG", _T(END) }, /* RGX_HWPERF_HW_SHGFINISHED */ + { "TQ3D", _T(END) }, /* RGX_HWPERF_HW_3DTQFINISHED */ + { "3DSPM", _T(END) }, /* RGX_HWPERF_HW_3DSPMFINISHED */ + { "PMOOM_TARESUME", _T(BEGIN) }, /* RGX_HWPERF_HW_PMOOM_TARESUME */ + { "TDM", _T(BEGIN) }, /* RGX_HWPERF_HW_TDMKICK */ + { "TDM", _T(END) }, /* RGX_HWPERF_HW_TDMFINISHED */ + { "NULL", _T(SINGLE) }, /* RGX_HWPERF_HW_NULLKICK */ +#undef _T + }; + static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1, + "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE"); + + PVR_ASSERT(psHWPerfPkt); + eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt); + + if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1) + { + RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt); + _GpuTraceEventsLost(eStreamId, + psFtraceData->ui32FTraceLastOrdinal, + psHWPerfPkt->ui32Ordinal); + PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)", + eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal)); + } + + psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal; + + /* Process UFO packets */ + if (eType == RGX_HWPERF_UFO) + { + _GpuTraceUfoEvent(psDevInfo, psHWPerfPkt); + return IMG_TRUE; + } + + if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) + { + /* this ID belongs to range 0, so index directly in range 0 */ + ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; + } + else + { + /* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */ + ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) + + (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1); + } + + if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap)) + goto err_unsupported; + + if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL) + { + /* Not supported map entry, ignore event */ + goto err_unsupported; + } + + if (HWPERF_PACKET_IS_HW_TYPE(eType)) + { + if (aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType == PVR_GPUTRACE_SWITCH_TYPE_SINGLE) + { + _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + PVR_GPUTRACE_SWITCH_TYPE_BEGIN); + _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + PVR_GPUTRACE_SWITCH_TYPE_END); + } + else + { + _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); + } + } + else if (HWPERF_PACKET_IS_FW_TYPE(eType)) + { + _GpuTraceFirmwareEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); + } + else + { + goto err_unsupported; + } + + return IMG_TRUE; + +err_unsupported: + PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType)); + return IMG_FALSE; +} + + +static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, + void *pBuffer, IMG_UINT32 ui32ReadLen) +{ + IMG_UINT32 ui32TlPackets = 0; + IMG_UINT32 ui32HWPerfPackets = 0; + IMG_UINT32 ui32HWPerfPacketsSent = 0; + void *pBufferEnd; + PVRSRVTL_PPACKETHDR psHDRptr; + PVRSRVTL_PACKETTYPE ui16TlType; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDevInfo); + PVR_ASSERT(pBuffer); + PVR_ASSERT(ui32ReadLen); + + /* Process the TL Packets + */ + pBufferEnd = IMG_OFFSET_ADDR(pBuffer, ui32ReadLen); + psHDRptr = GET_PACKET_HDR(pBuffer); + while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd ) + { + ui16TlType = GET_PACKET_TYPE(psHDRptr); + if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) + { + IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); + if (0 == ui16DataLen) + { + PVR_DPF((PVR_DBG_ERROR, "_GpuTraceProcessPackets: ZERO Data in TL data packet: %p", psHDRptr)); + } + else + { + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt; + RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd; + + /* Check for lost hwperf data packets */ + psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen); + psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)); + do + { + if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt)) + { + ui32HWPerfPacketsSent++; + } + ui32HWPerfPackets++; + psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt); + } + while (psHWPerfPkt < psHWPerfEnd); + } + } + else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) + { + PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Indication that the transport buffer was full")); + } + else + { + /* else Ignore padding packet type and others */ + PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Ignoring TL packet, type %d", ui16TlType )); + } + + psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); + ui32TlPackets++; + } + + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceProcessPackets: TL " + "Packets processed %03d, HWPerf packets %03d, sent %03d", + ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent)); + + PVR_DPF_RETURN; +} + + +static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) +{ + PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle; + RGX_HWPERF_FTRACE_DATA* psFtraceData; + PVRSRV_ERROR eError; + IMG_PBYTE pBuffer; + IMG_UINT32 ui32ReadLen; + IMG_BOOL bFTraceLockAcquired = IMG_FALSE; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDeviceInfo != NULL); + + psFtraceData = psDeviceInfo->pvGpuFtraceData; + + /* Command-complete notifiers can run concurrently. If this is + * happening, just bail out and let the previous call finish. + * This is ok because we can process the queued packets on the next call. + */ + bFTraceLockAcquired = OSTryLockAcquire(psFtraceData->hFTraceResourceLock); + if (IMG_FALSE == bFTraceLockAcquired) + { + PVR_DPF_RETURN; + } + + /* If this notifier is called, it means the TL resources will be valid at-least + * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock + * to clean-up the TL resources and un-register the notifier, so just assert here. + */ + PVR_ASSERT(psFtraceData->hGPUTraceTLStream); + + /* If we have a valid stream attempt to acquire some data */ + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen); + if (eError == PVRSRV_OK) + { + /* Process the HWPerf packets and release the data */ + if (ui32ReadLen > 0) + { + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceCmdCompleteNotify: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen)); + + /* Process the transport layer data for HWPerf packets... */ + _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen); + + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "TLClientReleaseData"); + + /* Serious error, disable FTrace GPU events */ + + /* Release TraceLock so we always have the locking + * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/ + OSLockRelease(psFtraceData->hFTraceResourceLock); + OSLockAcquire(psFtraceData->hFTraceResourceLock); + _GpuTraceDisable(psDeviceInfo, IMG_FALSE); + OSLockRelease(psFtraceData->hFTraceResourceLock); + goto out; + + } + } /* else no data, ignore */ + } + else if (eError != PVRSRV_ERROR_TIMEOUT) + { + PVR_LOG_ERROR(eError, "TLClientAcquireData"); + } + if (bFTraceLockAcquired) + { + OSLockRelease(psFtraceData->hFTraceResourceLock); + } +out: + PVR_DPF_RETURN; +} + +/* ----- AppHint interface -------------------------------------------------- */ + +static PVRSRV_ERROR _GpuTraceIsEnabledCallback( + const PVRSRV_DEVICE_NODE *device, + const void *private_data, + IMG_BOOL *value) +{ + PVR_UNREFERENCED_PARAMETER(device); + PVR_UNREFERENCED_PARAMETER(private_data); + + *value = gbFTraceGPUEventsEnabled; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _GpuTraceSetEnabledCallback( + const PVRSRV_DEVICE_NODE *device, + const void *private_data, + IMG_BOOL value) +{ + PVR_UNREFERENCED_PARAMETER(device); + + /* Lock down the state to avoid concurrent writes */ + OSLockAcquire(ghGPUTraceStateLock); + + if (value != gbFTraceGPUEventsEnabled) + { + PVRSRV_ERROR eError; + if ((eError = _GpuTraceSetEnabledForAllDevices(value)) == PVRSRV_OK) + { + PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED")); + gbFTraceGPUEventsEnabled = value; + } + else + { + PVR_TRACE(("FAILED to %s GPU FTrace", value ? "enable" : "disable")); + /* On failure, partial enable/disable might have resulted. + * Try best to restore to previous state. Ignore error */ + _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled); + + OSLockRelease(ghGPUTraceStateLock); + return eError; + } + } + else + { + PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled")); + } + + OSLockRelease(ghGPUTraceStateLock); + + return PVRSRV_OK; +} + +void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU, + _GpuTraceIsEnabledCallback, + _GpuTraceSetEnabledCallback, + psDeviceNode, NULL); +} + +/* ----- FTrace event callbacks -------------------------------------------- */ + +void PVRGpuTraceEnableUfoCallback(void) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + PVRSRV_ERROR eError; +#endif + + /* Lock down events state, for consistent value of guiUfoEventRef */ + OSLockAcquire(ghLockFTraceEventLock); + if (guiUfoEventRef++ == 0) + { + /* make sure UFO events are enabled on all rogue devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + IMG_UINT64 ui64Filter; + + psRgxDevInfo = psDeviceNode->pvDevice; + ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) | + psRgxDevInfo->ui64HWPerfFilter; + /* Small chance exists that ui64HWPerfFilter can be changed here and + * the newest filter value will be changed to the old one + UFO event. + * This is not a critical problem. */ + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter); + if (eError == PVRSRV_ERROR_NOT_INITIALISED) + { + /* If we land here that means that the FW is not initialised yet. + * We stored the filter and it will be passed to the firmware + * during its initialisation phase. So ignore. */ + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32UMIdentifier)); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + } + OSLockRelease(ghLockFTraceEventLock); +} + +void PVRGpuTraceDisableUfoCallback(void) +{ +#if defined(SUPPORT_RGX) + PVRSRV_ERROR eError; +#endif + PVRSRV_DEVICE_NODE *psDeviceNode; + + /* We have to check if lock is valid because on driver unload + * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace + * events. This means that the lock will be destroyed before this callback + * is called. + * We can safely return if that situation happens because driver will be + * unloaded so we don't care about HWPerf state anymore. */ + if (ghLockFTraceEventLock == NULL) + return; + + psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; + + /* Lock down events state, for consistent value of guiUfoEventRef */ + OSLockAcquire(ghLockFTraceEventLock); + if (--guiUfoEventRef == 0) + { + /* make sure UFO events are disabled on all rogue devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + IMG_UINT64 ui64Filter; + PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; + + ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) & + psRgxDevInfo->ui64HWPerfFilter; + /* Small chance exists that ui64HWPerfFilter can be changed here and + * the newest filter value will be changed to the old one + UFO event. + * This is not a critical problem. */ + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter); + if (eError == PVRSRV_ERROR_NOT_INITIALISED) + { + /* If we land here that means that the FW is not initialised yet. + * We stored the filter and it will be passed to the firmware + * during its initialisation phase. So ignore. */ + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d", + psDeviceNode->sDevId.i32UMIdentifier)); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + } + OSLockRelease(ghLockFTraceEventLock); +} + +void PVRGpuTraceEnableFirmwareActivityCallback(void) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + uint64_t ui64Filter, ui64FWEventsFilter = 0; + int i; + + for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; + i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) + { + ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); + } +#endif + OSLockAcquire(ghLockFTraceEventLock); + /* Enable all FW events on all the devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + PVRSRV_ERROR eError; + psRgxDevInfo = psDeviceNode->pvDevice; + ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter; + + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware" + " task timings (%s).", PVRSRVGetErrorString(eError))); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + OSLockRelease(ghLockFTraceEventLock); +} + +void PVRGpuTraceDisableFirmwareActivityCallback(void) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; +#if defined(SUPPORT_RGX) + IMG_UINT64 ui64FWEventsFilter = ~0; + int i; +#endif + + /* We have to check if lock is valid because on driver unload + * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace + * events. This means that the lock will be destroyed before this callback + * is called. + * We can safely return if that situation happens because driver will be + * unloaded so we don't care about HWPerf state anymore. */ + if (ghLockFTraceEventLock == NULL) + return; + + psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; + +#if defined(SUPPORT_RGX) + for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; + i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) + { + ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i); + } +#endif + + OSLockAcquire(ghLockFTraceEventLock); + + /* Disable all FW events on all the devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; + IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter; + + if (PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings.")); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + + OSLockRelease(ghLockFTraceEventLock); +} + +/****************************************************************************** + End of file (pvr_gputrace.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_ion_stats.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_ion_stats.h new file mode 100644 index 000000000000..c34180785453 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_ion_stats.h @@ -0,0 +1,80 @@ +/*************************************************************************/ /*! +@File +@Title Functions for recording ION memory stats. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_ION_STATS_H +#define PVR_ION_STATS_H + +#include "pvrsrv_error.h" +#include "img_defs.h" + +struct dma_buf; + +#if defined(PVRSRV_ENABLE_PVR_ION_STATS) +PVRSRV_ERROR PVRSRVIonStatsInitialise(void); + +void PVRSRVIonStatsDestroy(void); + +void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf); + +void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf); +#else +static INLINE PVRSRV_ERROR PVRSRVIonStatsInitialise(void) +{ + return PVRSRV_OK; +} + +static INLINE void PVRSRVIonStatsDestroy(void) +{ +} + +static INLINE void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf) +{ + PVR_UNREFERENCED_PARAMETER(psDmaBuf); +} + +static INLINE void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf) +{ + PVR_UNREFERENCED_PARAMETER(psDmaBuf); +} +#endif /* defined(PVRSRV_ENABLE_PVR_ION_STATS) */ + +#endif /* PVR_ION_STATS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.c new file mode 100644 index 000000000000..946b734481d1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.c @@ -0,0 +1,613 @@ +/*************************************************************************/ /*! +@File +@Title ProcFS implementation of Debug Info interface. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +@Description Implements osdi_impl.h API to provide access to driver's + debug data via ProcFS. + + Note about locking in ProcFS module. + + Access to ProcFS is protected against the race where any + file could be removed while being accessed or accessed while + being removed. Any calls to proc_remove() will block until all + operations are finished. + + See implementation of file operations (proc_reg_*()) and + implementation of (un)use_pde() and proc_entry_rundown() in + source/fs/proc/inode.c in Linux kernel sources for more details. + + Not about locking for sequential files. + + The seq_file objects have a mutex that protects access + to all of the file operations hence all of the sequential + *read* operations are protected. +*/ /**************************************************************************/ + +#include +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvr_procfs.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_bridge_k.h" +#include "pvr_uaccess.h" +#include "osdi_impl.h" + +#define _DRIVER_THREAD_ENTER() \ + do { \ + PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \ + if (eLocalError != PVRSRV_OK) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \ + __func__, PVRSRVGetErrorString(eLocalError))); \ + return OSPVRSRVToNativeError(eLocalError); \ + } \ + } while (0) + +#define _DRIVER_THREAD_EXIT() \ + PVRSRVDriverThreadExit() + +#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR + +typedef struct DFS_DIR +{ + struct proc_dir_entry *psDirEntry; + struct DFS_DIR *psParentDir; +} DFS_DIR; + +typedef struct DFS_ENTRY +{ + OSDI_IMPL_ENTRY sImplEntry; + DI_ITERATOR_CB sIterCb; +} DFS_ENTRY; + +typedef struct DFS_FILE +{ + struct proc_dir_entry *psFileEntry; + struct DFS_DIR *psParentDir; + const struct seq_operations *psSeqOps; + struct DFS_ENTRY sEntry; + DI_ENTRY_TYPE eType; +} DFS_FILE; + +/* ----- native callbacks interface ----------------------------------------- */ + +static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, + va_list pArgs) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + seq_vprintf(pvNativeHandle, pszFmt, pArgs); +#else + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs); + seq_printf(pvNativeHandle, "%s", szBuffer); +#endif +} + +static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) +{ + seq_puts(pvNativeHandle, pszStr); +} + +static IMG_BOOL _HasOverflowed(void *pvNativeHandle) +{ + struct seq_file *psSeqFile = pvNativeHandle; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + return seq_has_overflowed(psSeqFile); +#else + return psSeqFile->count == psSeqFile->size; +#endif +} + +static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { + .pfnVPrintf = _VPrintf, + .pfnPuts = _Puts, + .pfnHasOverflowed = _HasOverflowed, +}; + +/* ----- sequential file operations ----------------------------------------- */ + +static void *_Start(struct seq_file *psSeqFile, loff_t *puiPos) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + void *pvRet = psEntry->sIterCb.pfnStart(&psEntry->sImplEntry, puiPos); + + if (pvRet == DI_START_TOKEN) + { + return SEQ_START_TOKEN; + } + + return pvRet; +} + +static void _Stop(struct seq_file *psSeqFile, void *pvPriv) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + psEntry->sIterCb.pfnStop(&psEntry->sImplEntry, pvPriv); +} + +static void *_Next(struct seq_file *psSeqFile, void *pvPriv, loff_t *puiPos) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + return psEntry->sIterCb.pfnNext(&psEntry->sImplEntry, pvPriv, puiPos); +} + +static int _Show(struct seq_file *psSeqFile, void *pvPriv) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + if (pvPriv == SEQ_START_TOKEN) + { + pvPriv = DI_START_TOKEN; + } + + return psEntry->sIterCb.pfnShow(&psEntry->sImplEntry, pvPriv); +} + +static struct seq_operations _g_sSeqOps = { + .start = _Start, + .stop = _Stop, + .next = _Next, + .show = _Show +}; + +/* ----- file operations ---------------------------------------------------- */ + +static int _Open(struct inode *psINode, struct file *psFile) +{ + DFS_FILE *psDFSFile = PDE_DATA(psINode); + int iRes; + + PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", -EIO); + + _DRIVER_THREAD_ENTER(); + + if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) + { + iRes = seq_open(psFile, psDFSFile->psSeqOps); + } + else + { + /* private data is NULL as it's going to be set below */ + iRes = single_open(psFile, _Show, NULL); + } + + if (iRes == 0) + { + struct seq_file *psSeqFile = psFile->private_data; + + DFS_ENTRY *psEntry = OSAllocMem(sizeof(*psEntry)); + if (psEntry == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem() failed", __func__)); + iRes = -ENOMEM; + goto return_; + } + + *psEntry = psDFSFile->sEntry; + psSeqFile->private = psEntry; + psEntry->sImplEntry.pvNative = psSeqFile; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", + __func__, iRes)); + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static int _Close(struct inode *psINode, struct file *psFile) +{ + DFS_FILE *psDFSFile = PDE_DATA(psINode); + DFS_ENTRY *psEntry; + int iRes; + + PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", + -EIO); + + _DRIVER_THREAD_ENTER(); + + /* save pointer to DFS_ENTRY */ + psEntry = ((struct seq_file *) psFile->private_data)->private; + + if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) + { + iRes = seq_release(psINode, psFile); + } + else + { + iRes = single_release(psINode, psFile); + } + + /* free DFS_ENTRY allocated in _Open */ + OSFreeMem(psEntry); + + /* Sanity check as seq_release (and single_release which calls it) + * never fail */ + if (iRes != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d", + __func__, iRes)); + } + + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static ssize_t _Read(struct file *psFile, char __user *pcBuffer, + size_t uiCount, loff_t *puiPos) +{ + DFS_FILE *psDFSFile = PDE_DATA(psFile->f_path.dentry->d_inode); + ssize_t iRes = -1; + + _DRIVER_THREAD_ENTER(); + + if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) + { + iRes = seq_read(psFile, pcBuffer, uiCount, puiPos); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: filed to read from file pfnRead() " + "returned %zd", __func__, iRes)); + goto return_; + } + } + else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + DFS_ENTRY *psEntry = &psDFSFile->sEntry; + IMG_UINT64 ui64Count = uiCount, ui64Pos; + + IMG_CHAR *pcLocalBuffer = OSAllocMem(uiCount); + PVR_GOTO_IF_FALSE(pcLocalBuffer != NULL, return_); + + iRes = psEntry->sIterCb.pfnRead(pcLocalBuffer, ui64Count, &ui64Pos, + psEntry->sImplEntry.pvPrivData); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: filed to read from file pfnRead() " + "returned %zd", __func__, iRes)); + OSFreeMem(pcLocalBuffer); + goto return_; + } + + if (pvr_copy_to_user(pcBuffer, pcLocalBuffer, iRes) != 0) + { + iRes = -1; + } + + OSFreeMem(pcLocalBuffer); + + *puiPos = ui64Pos; + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin) +{ + DFS_FILE *psDFSFile = PDE_DATA(psFile->f_path.dentry->d_inode); + loff_t iRes = -1; + + _DRIVER_THREAD_ENTER(); + + if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) + { + iRes = seq_lseek(psFile, iOffset, iOrigin); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: filed to set file position to " + "offset %lld, pfnSeek() returned %lld", __func__, + iOffset, iRes)); + goto return_; + } + } + else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + DFS_ENTRY *psEntry = &psDFSFile->sEntry; + IMG_UINT64 ui64Pos; + + switch (iOrigin) + { + case SEEK_SET: + ui64Pos = psFile->f_pos + iOffset; + break; + case SEEK_CUR: + ui64Pos = iOffset; + break; + case SEEK_END: + /* not supported as we don't know the file size here */ + /* fall through */ + default: + return -1; + } + + /* only pass the absolute position to the callback, it's up to the + * implementer to determine if the position is valid */ + + iRes = psEntry->sIterCb.pfnSeek(ui64Pos, + psEntry->sImplEntry.pvPrivData); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: filed to set file position to " + "offset %lld, pfnSeek() returned %lld", __func__, + iOffset, iRes)); + goto return_; + } + + psFile->f_pos = ui64Pos; + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static ssize_t _Write(struct file *psFile, const char __user *pszBuffer, + size_t uiCount, loff_t *puiPos) +{ + struct inode *psINode = psFile->f_path.dentry->d_inode; + DFS_FILE *psDFSFile = PDE_DATA(psINode); + DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb; + IMG_CHAR *pcLocalBuffer; + IMG_UINT64 ui64Count = uiCount + 1, ui64Pos = *puiPos; + IMG_INT64 i64Res = -EIO; + + PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", + -EIO); + PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL", + -EIO); + + _DRIVER_THREAD_ENTER(); + + pcLocalBuffer = OSAllocMem(ui64Count); + PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed", + return_); + + i64Res = pvr_copy_from_user(pcLocalBuffer, pszBuffer, uiCount); + PVR_LOG_GOTO_IF_FALSE(i64Res == 0, "pvr_copy_from_user() failed", + free_local_buffer_); + + /* ensure that the framework user gets a NUL terminated buffer */ + pcLocalBuffer[ui64Count - 1] = '\0'; + + i64Res = psIter->pfnWrite(pcLocalBuffer, ui64Count, &ui64Pos, + psDFSFile->sEntry.sImplEntry.pvPrivData); + PVR_LOG_GOTO_IF_FALSE(i64Res >= 0, "pfnWrite failed", free_local_buffer_); + + *puiPos = ui64Pos; + +free_local_buffer_: + OSFreeMem(pcLocalBuffer); + +return_: + _DRIVER_THREAD_EXIT(); + + return i64Res; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +typedef struct proc_ops proc_ops; +#else +typedef struct file_operations proc_ops; +#endif + +static const proc_ops _g_psFileOpsGen = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) + .owner = THIS_MODULE, +#endif + .open = _Open, + .release = _Close, + .read = _Read, + .llseek = _LSeek, + .write = _Write, +}; + +static const proc_ops _g_psFileOpsRndAcc = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) + .owner = THIS_MODULE, +#endif + .read = _Read, + .llseek = _LSeek, + .write = _Write, +}; + +/* ----- DI implementation interface ---------------------------------------- */ + +static PVRSRV_ERROR _Init(void) +{ + return PVRSRV_OK; +} + +static void _DeInit(void) +{ +} + +static PVRSRV_ERROR _CreateFile(const IMG_CHAR *pszName, + DI_ENTRY_TYPE eType, + const DI_ITERATOR_CB *psIterCb, + void *pvPrivData, + void *pvParentDir, + void **pvFile) +{ + DFS_DIR *psParentDir = pvParentDir; + DFS_FILE *psFile; + umode_t uiMode = S_IFREG; + struct proc_dir_entry *psEntry; + const struct file_operations *psFileOps = NULL; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pvFile != NULL, "pvFile"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentDir != NULL, "pvParentDir"); + + switch (eType) + { + case DI_ENTRY_TYPE_GENERIC: + psFileOps = &_g_psFileOpsGen; + break; + case DI_ENTRY_TYPE_RANDOM_ACCESS: + psFileOps = &_g_psFileOpsRndAcc; + break; + default: + PVR_LOG_GOTO_IF_INVALID_PARAM("eType", eError, return_); + } + + psFile = OSAllocMem(sizeof(*psFile)); + PVR_LOG_GOTO_IF_NOMEM(psFile, eError, return_); + + uiMode |= psIterCb->pfnShow != NULL || psIterCb->pfnRead != NULL ? + S_IRUGO : 0; + uiMode |= psIterCb->pfnWrite != NULL ? S_IWUSR : 0; + + psEntry = proc_create_data(pszName, uiMode, psParentDir->psDirEntry, + psFileOps, psFile); + if (IS_ERR_OR_NULL(psEntry)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create debugfs '%s' file", + __func__, pszName)); + + eError = psEntry == NULL ? + PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_DEVICE; + goto free_file_; + } + + psFile->eType = eType; + psFile->psSeqOps = &_g_sSeqOps; + psFile->sEntry.sIterCb = *psIterCb; + psFile->sEntry.sImplEntry.pvPrivData = pvPrivData; + psFile->sEntry.sImplEntry.pvNative = NULL; + psFile->sEntry.sImplEntry.psCb = &_g_sEntryCallbacks; + psFile->psParentDir = psParentDir; + psFile->psFileEntry = psEntry; + + *pvFile = psFile; + + return PVRSRV_OK; + +free_file_: + OSFreeMem(psFile); + +return_: + return eError; +} + +static void _DestroyFile(void *pvFile) +{ + DFS_FILE *psFile = pvFile; + + PVR_ASSERT(psFile != NULL); + + proc_remove(psFile->psFileEntry); + OSFreeMem(psFile); +} + +static PVRSRV_ERROR _CreateDir(const IMG_CHAR *pszName, + void *pvParentDir, + void **ppvDir) +{ + DFS_DIR *psNewDir; + struct proc_dir_entry *psDirEntry, *psParentDir = NULL; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppvDir != NULL, "ppvDir"); + + psNewDir = OSAllocMem(sizeof(*psNewDir)); + PVR_LOG_RETURN_IF_NOMEM(psNewDir, "OSAllocMem"); + + psNewDir->psParentDir = pvParentDir; + + if (pvParentDir != NULL) + { + psParentDir = psNewDir->psParentDir->psDirEntry; + } + + psDirEntry = proc_mkdir(pszName, psParentDir); + if (IS_ERR_OR_NULL(psDirEntry)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create '%s' debugfs directory", + __func__, pszName)); + OSFreeMem(psNewDir); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psNewDir->psDirEntry = psDirEntry; + *ppvDir = psNewDir; + + return PVRSRV_OK; +} + +static void _DestroyDir(void *pvDir) +{ + DFS_DIR *psDir = pvDir; + + PVR_ASSERT(psDir != NULL); + + proc_remove(psDir->psDirEntry); + OSFreeMem(psDir); +} + +PVRSRV_ERROR PVRProcFsRegister(void) +{ + OSDI_IMPL_CB sImplCb = { + .pfnInit = _Init, + .pfnDeInit = _DeInit, + .pfnCreateEntry = _CreateFile, + .pfnDestroyEntry = _DestroyFile, + .pfnCreateGroup = _CreateDir, + .pfnDestroyGroup = _DestroyDir + }; + + return DIRegisterImplementation("procfs", &sImplCb); +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.h new file mode 100644 index 000000000000..61a1f0ee28d3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_procfs.h @@ -0,0 +1,50 @@ +/*************************************************************************/ /*! +@File +@Title ProcFS implementation of Debug Info interface. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_PROCFS_H +#define PVR_PROCFS_H + +#include "pvrsrv_error.h" + +PVRSRV_ERROR PVRProcFsRegister(void); + +#endif /* PVR_PROCFS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_uaccess.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_uaccess.h new file mode 100644 index 000000000000..05fcd128b96a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/pvr_uaccess.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File +@Title Utility functions for user space access +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __PVR_UACCESS_H__ +#define __PVR_UACCESS_H__ + +#include +#include + +static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + if (access_ok(VERIFY_WRITE, pvTo, ulBytes)) +#else + if (access_ok(pvTo, ulBytes)) +#endif + { + return __copy_to_user(pvTo, pvFrom, ulBytes); + } + + return ulBytes; +} + + +#if defined(__KLOCWORK__) + /* this part is only to tell Klocwork not to report false positive because + it doesn't understand that pvr_copy_from_user will initialise the memory + pointed to by pvTo */ +#include /* get the memset prototype */ +static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) +{ + if (pvTo != NULL) + { + memset(pvTo, 0xAA, ulBytes); + return 0; + } + return 1; +} + +#else /* real implementation */ + +static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) +{ + /* + * The compile time correctness checking introduced for copy_from_user in + * Linux 2.6.33 isn't fully compatible with our usage of the function. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + if (access_ok(VERIFY_READ, pvFrom, ulBytes)) +#else + if (access_ok(pvFrom, ulBytes)) +#endif + { + return __copy_from_user(pvTo, pvFrom, ulBytes); + } + + return ulBytes; +} +#endif /* klocworks */ + +#endif /* __PVR_UACCESS_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/rogue_trace_events.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/rogue_trace_events.h new file mode 100644 index 000000000000..e59230703f8e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/rogue_trace_events.h @@ -0,0 +1,543 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rogue + +#if !defined(ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) +#define ROGUE_TRACE_EVENTS_H + +#include +#include +#include +#include + +#define show_secs_from_ns(ns) \ + ({ \ + u64 t = ns + (NSEC_PER_USEC / 2); \ + do_div(t, NSEC_PER_SEC); \ + t; \ + }) + +#define show_usecs_from_ns(ns) \ + ({ \ + u64 t = ns + (NSEC_PER_USEC / 2); \ + u32 rem; \ + do_div(t, NSEC_PER_USEC); \ + rem = do_div(t, USEC_PER_SEC); \ + }) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_update_enabled_callback(void); +#else +void trace_fence_update_enabled_callback(void); +#endif +void trace_fence_update_disabled_callback(void); + +TRACE_EVENT_FN(rogue_fence_update, + + TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset, + u32 sync_fwaddr, u32 sync_value), + + TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value), + + TP_STRUCT__entry( + __string( comm, comm ) + __string( cmd, cmd ) + __string( dm, dm ) + __field( u32, ctx_id ) + __field( u32, offset ) + __field( u32, sync_fwaddr ) + __field( u32, sync_value ) + ), + + TP_fast_assign( + __assign_str(comm, comm); + __assign_str(cmd, cmd); + __assign_str(dm, dm); + __entry->ctx_id = ctx_id; + __entry->offset = offset; + __entry->sync_fwaddr = sync_fwaddr; + __entry->sync_value = sync_value; + ), + + TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", + __get_str(comm), + __get_str(cmd), + __get_str(dm), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->offset, + (unsigned long)__entry->sync_fwaddr, + (unsigned long)__entry->sync_value), + + trace_fence_update_enabled_callback, + trace_fence_update_disabled_callback +); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_check_enabled_callback(void); +#else +void trace_fence_check_enabled_callback(void); +#endif +void trace_fence_check_disabled_callback(void); + +TRACE_EVENT_FN(rogue_fence_check, + + TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset, + u32 sync_fwaddr, u32 sync_value), + + TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value), + + TP_STRUCT__entry( + __string( comm, comm ) + __string( cmd, cmd ) + __string( dm, dm ) + __field( u32, ctx_id ) + __field( u32, offset ) + __field( u32, sync_fwaddr ) + __field( u32, sync_value ) + ), + + TP_fast_assign( + __assign_str(comm, comm); + __assign_str(cmd, cmd); + __assign_str(dm, dm); + __entry->ctx_id = ctx_id; + __entry->offset = offset; + __entry->sync_fwaddr = sync_fwaddr; + __entry->sync_value = sync_value; + ), + + TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", + __get_str(comm), + __get_str(cmd), + __get_str(dm), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->offset, + (unsigned long)__entry->sync_fwaddr, + (unsigned long)__entry->sync_value), + + trace_fence_check_enabled_callback, + trace_fence_check_disabled_callback +); + +TRACE_EVENT(rogue_job_enqueue, + + TP_PROTO(u32 ctx_id, u32 int_id, u32 ext_id, + const char *kick_type), + + TP_ARGS(ctx_id, int_id, ext_id, kick_type), + + TP_STRUCT__entry( + __field(u32, ctx_id) + __field(u32, int_id) + __field(u32, ext_id) + __string(kick_type, kick_type) + ), + + TP_fast_assign( + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __assign_str(kick_type, kick_type); + ), + + TP_printk("ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s", + (unsigned long) __entry->ctx_id, + (unsigned long) __entry->int_id, + (unsigned long) __entry->ext_id, + __get_str(kick_type) + ) +); + +TRACE_EVENT(rogue_sched_switch, + + TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 next_ctx_id, + u32 next_prio, u32 next_int_id, u32 next_ext_id), + + TP_ARGS(work_type, switch_type, timestamp, next_ctx_id, next_prio, next_int_id, next_ext_id), + + TP_STRUCT__entry( + __string(work_type, work_type) + __field(u32, switch_type) + __field(u64, timestamp) + __field(u32, next_ctx_id) + __field(u32, next_prio) + __field(u32, next_int_id) + __field(u32, next_ext_id) + ), + + TP_fast_assign( + __assign_str(work_type, work_type); + __entry->switch_type = switch_type; + __entry->timestamp = timestamp; + __entry->next_ctx_id = next_ctx_id; + __entry->next_prio = next_prio; + __entry->next_int_id = next_int_id; + __entry->next_ext_id = next_ext_id; + ), + + TP_printk("ts=%llu.%06lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu" + " next_prio=%lu work_type=%s switch_type=%s", + (unsigned long long) show_secs_from_ns(__entry->timestamp), + (unsigned long) show_usecs_from_ns(__entry->timestamp), + (unsigned long) __entry->next_ctx_id, + (unsigned long) __entry->next_int_id, + (unsigned long) __entry->next_ext_id, + (unsigned long) __entry->next_prio, + __get_str(work_type), + __print_symbolic(__entry->switch_type, + /* These values are from ospvr_gputrace.h. */ + { 1, "begin" }, + { 2, "end" }) + ) +); + +TRACE_EVENT(rogue_create_fw_context, + + TP_PROTO(const char *comm, const char *dm, u32 ctx_id), + + TP_ARGS(comm, dm, ctx_id), + + TP_STRUCT__entry( + __string( comm, comm ) + __string( dm, dm ) + __field( u32, ctx_id ) + ), + + TP_fast_assign( + __assign_str(comm, comm); + __assign_str(dm, dm); + __entry->ctx_id = ctx_id; + ), + + TP_printk("comm=%s dm=%s ctx_id=%lu", + __get_str(comm), + __get_str(dm), + (unsigned long)__entry->ctx_id) +); + +void PVRGpuTraceEnableUfoCallback(void); +void PVRGpuTraceDisableUfoCallback(void); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int PVRGpuTraceEnableUfoCallbackWrapper(void); +#else +#define PVRGpuTraceEnableUfoCallbackWrapper \ + PVRGpuTraceEnableUfoCallback +#endif + +TRACE_EVENT_FN(rogue_ufo_update, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 old_value, u32 new_value), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, old_value, + new_value), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, old_value ) + __field( u32, new_value ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->old_value = old_value; + __entry->new_value = new_value; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx old_value=%#lx new_value=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->old_value, + (unsigned long)__entry->new_value), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_check_fail, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value, u32 required), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + __field( u32, required ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + __entry->required = required; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx required=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value, + (unsigned long)__entry->required), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_pr_check_fail, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value, u32 required), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + __field( u32, required ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + __entry->required = required; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx required=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value, + (unsigned long)__entry->required), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_check_success, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_pr_check_success, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT(rogue_events_lost, + + TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal), + + TP_ARGS(event_source, last_ordinal, curr_ordinal), + + TP_STRUCT__entry( + __field( u32, event_source ) + __field( u32, last_ordinal ) + __field( u32, curr_ordinal ) + ), + + TP_fast_assign( + __entry->event_source = event_source; + __entry->last_ordinal = last_ordinal; + __entry->curr_ordinal = curr_ordinal; + ), + + TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u", + __print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}), + __entry->last_ordinal, + __entry->curr_ordinal) +); + +void PVRGpuTraceEnableFirmwareActivityCallback(void); +void PVRGpuTraceDisableFirmwareActivityCallback(void); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void); +#else +#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \ + PVRGpuTraceEnableFirmwareActivityCallback +#endif + +TRACE_EVENT_FN(rogue_firmware_activity, + + TP_PROTO(u64 timestamp, const char *task, u32 fw_event), + + TP_ARGS(timestamp, task, fw_event), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __string( task, task ) + __field( u32, fw_event ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __assign_str(task, task); + __entry->fw_event = fw_event; + ), + + TP_printk("ts=%llu.%06lu task=%s event=%s", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + __get_str(task), + __print_symbolic(__entry->fw_event, + /* These values are from ospvr_gputrace.h. */ + { 1, "begin" }, + { 2, "end" })), + + PVRGpuTraceEnableFirmwareActivityCallbackWrapper, + PVRGpuTraceDisableFirmwareActivityCallback +); + +#undef show_secs_from_ns +#undef show_usecs_from_ns + +#endif /* ROGUE_TRACE_EVENTS_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . + +/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ +#define TRACE_INCLUDE_FILE rogue_trace_events + +/* This part must be outside protection */ +#include diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.c b/drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.c new file mode 100644 index 000000000000..39242ed2b95c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.c @@ -0,0 +1,265 @@ +/*************************************************************************/ /*! +@Title Linux trace event helper functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include + +#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) +#if !defined(CONFIG_TRACE_GPU_MEM) +#define CREATE_TRACE_POINTS +#include +#undef CREATE_TRACE_POINTS +#else /* !defined(CONFIG_TRACE_GPU_MEM) */ +#include +#endif /* !defined(CONFIG_TRACE_GPU_MEM) */ +#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ + +#include "img_types.h" +#include "trace_events.h" +#include "rogue_trace_events.h" +#include "sync_checkpoint_external.h" + +static bool fence_update_event_enabled, fence_check_event_enabled; + +bool trace_rogue_are_fence_updates_traced(void) +{ + return fence_update_event_enabled; +} + +bool trace_rogue_are_fence_checks_traced(void) +{ + return fence_check_event_enabled; +} + +/* + * Call backs referenced from rogue_trace_events.h. Note that these are not + * thread-safe, however, since running trace code when tracing is not enabled is + * simply a no-op, there is no harm in it. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_update_enabled_callback(void) +#else +void trace_fence_update_enabled_callback(void) +#endif +{ + fence_update_event_enabled = true; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + return 0; +#endif +} + +void trace_fence_update_disabled_callback(void) +{ + fence_update_event_enabled = false; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_check_enabled_callback(void) +#else +void trace_fence_check_enabled_callback(void) +#endif +{ + fence_check_event_enabled = true; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + return 0; +#endif +} + +void trace_fence_check_disabled_callback(void) +{ + fence_check_event_enabled = false; +} + +#if defined(SUPPORT_RGX) +/* This is a helper that calls trace_rogue_fence_update for each fence in an + * array. + */ +void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ + IMG_UINT i; + for (i = 0; i < uCount; i++) + { + trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset, + pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); + } +} + +void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ + IMG_UINT i; + for (i = 0; i < uCount; i++) + { + trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset, + pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); + } +} + +void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + IMG_UINT i; + for (i = 0; i < ui32UFOCount; i++) + { + trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, + ui32ExtJobRef, + puData->sUpdate.ui32FWAddr, + puData->sUpdate.ui32OldValue, + puData->sUpdate.ui32NewValue); + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate)); + } +} + +void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + IMG_UINT i; + for (i = 0; i < ui32UFOCount; i++) + { + if (bPrEvent) + { + trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckSuccess.ui32FWAddr, + puData->sCheckSuccess.ui32Value); + } + else + { + trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckSuccess.ui32FWAddr, + puData->sCheckSuccess.ui32Value); + } + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess)); + } +} + +void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + IMG_UINT i; + for (i = 0; i < ui32UFOCount; i++) + { + if (bPrEvent) + { + trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckFail.ui32FWAddr, + puData->sCheckFail.ui32Value, + puData->sCheckFail.ui32Required); + } + else + { + trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckFail.ui32FWAddr, + puData->sCheckFail.ui32Value, + puData->sCheckFail.ui32Required); + } + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); + } +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + +int PVRGpuTraceEnableUfoCallbackWrapper(void) +{ + +#if defined(SUPPORT_RGX) + PVRGpuTraceEnableUfoCallback(); +#endif + + return 0; +} + +int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void) +{ + +#if defined(SUPPORT_RGX) + PVRGpuTraceEnableFirmwareActivityCallback(); +#endif + + return 0; +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */ + +void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, + IMG_UINT64 ui64Size) +{ +#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) + trace_gpu_mem_total(ui8GPUId, 0, ui64Size); +#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ +} + +void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, + IMG_UINT32 ui32Pid, + IMG_UINT64 ui64Size) +{ +#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) + trace_gpu_mem_total(ui8GPUId, ui32Pid, ui64Size); +#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ +} diff --git a/drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.h b/drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.h new file mode 100644 index 000000000000..0a8fffd5bc37 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/env/linux/trace_events.h @@ -0,0 +1,198 @@ +/*************************************************************************/ /*! +@Title Linux trace events and event helper functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(TRACE_EVENTS_H) +#define TRACE_EVENTS_H + +#include "rgx_fwif_km.h" +#include "rgx_hwperf.h" + +/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't + * enabled, just like the actual trace event functions that the kernel + * defines for us. + */ +#ifdef CONFIG_EVENT_TRACING +bool trace_rogue_are_fence_checks_traced(void); + +bool trace_rogue_are_fence_updates_traced(void); + +void trace_job_enqueue(IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + const char *pszKickType); + +#if defined(SUPPORT_RGX) +void trace_rogue_fence_updates(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values); + +void trace_rogue_fence_checks(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values); + +void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData); + +void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData); + +void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData); +#endif /* if defined(SUPPORT_RGX) */ + +void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, + IMG_UINT64 ui64Size); + +void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, + IMG_UINT32 ui32Pid, + IMG_UINT64 ui64Size); + +#else /* CONFIG_TRACE_EVENTS */ +static inline +bool trace_rogue_are_fence_checks_traced(void) +{ + return false; +} + +static inline +bool trace_rogue_are_fence_updates_traced(void) +{ + return false; +} + +static inline +void trace_job_enqueue(IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + const char *pszKickType) +{ +} + +#if defined(SUPPORT_RGX) +static inline +void trace_rogue_fence_updates(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ +} + +static inline +void trace_rogue_fence_checks(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ +} + +static inline +void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ +} + +static inline +void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ +} + +static inline +void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ +} +#endif /* if defined(SUPPORT_RGX)*/ + +static inline +void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, + IMG_UINT64 ui64Size) +{ +} + +static inline +void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, + IMG_UINT32 ui32Pid, + IMG_UINT64 ui64Size) +{ +} + +#endif /* CONFIG_TRACE_EVENTS */ + +#endif /* TRACE_EVENTS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/cache_km.h b/drivers/mcst/gpu-imgtec/services/server/include/cache_km.h new file mode 100644 index 000000000000..1008963f8c1f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/cache_km.h @@ -0,0 +1,164 @@ +/*************************************************************************/ /*! +@File cache_km.h +@Title CPU cache management header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef CACHE_KM_H +#define CACHE_KM_H + +#if defined(LINUX) +#include +#else +#define KERNEL_VERSION +#endif + +#include "pvrsrv_error.h" +#include "os_cpu_cache.h" +#include "img_types.h" +#include "cache_ops.h" +#include "device.h" +#include "pmr.h" + +typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE; /*!< Represents CPU address type required for CPU d-cache maintenance */ +#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL 0x1 /*!< Operation requires CPU virtual address only */ +#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL 0x2 /*!< Operation requires CPU physical address only */ +#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH 0x3 /*!< Operation requires both CPU virtual & physical addresses */ + +#include "connection_server.h" + +/* + * CacheOpInit() & CacheOpDeInit() + * + * This must be called to initialise the KM cache maintenance framework. + * This is called early during the driver/module (un)loading phase. + */ +PVRSRV_ERROR CacheOpInit(void); +void CacheOpDeInit(void); + +/* + * CacheOpInit2() & CacheOpDeInit2() + * + * This must be called to initialise the UM cache maintenance framework. + * This is called when the driver is loaded/unloaded from the kernel. + */ +PVRSRV_ERROR CacheOpInit2(void); +void CacheOpDeInit2(void); + +/* + * CacheOpExec() + * + * This is the primary CPU data-cache maintenance interface and it is + * always guaranteed to be synchronous; the arguments supplied must be + * pre-validated for performance reasons else the d-cache maintenance + * operation might cause the underlying OS kernel to fault. + */ +PVRSRV_ERROR CacheOpExec(PPVRSRV_DEVICE_NODE psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd, + PVRSRV_CACHE_OP uiCacheOp); + +/* + * CacheOpValExec() + * + * Same as CacheOpExec(), except arguments are _Validated_ before being + * presented to the underlying OS kernel for CPU data-cache maintenance. + * The uiAddress is the start CPU virtual address for the to-be d-cache + * maintained PMR, it can be NULL in which case a remap will be performed + * internally, if required for cache maintenance. This is primarily used + * as the services client bridge call handler for synchronous user-mode + * cache maintenance requests. + */ +PVRSRV_ERROR CacheOpValExec(PMR *psPMR, + IMG_UINT64 uiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp); + +/* + * CacheOpQueue() + * + * This is the secondary cache maintenance interface and it is not + * guaranteed to be synchronous in that requests could be deferred + * and executed asynchronously. This interface is primarily meant + * as services client bridge call handler. Both uiInfoPgGFSeqNum + * and ui32[Current,Next]FenceSeqNum implements an internal client + * server queueing protocol so making use of this interface outside + * of services client is not recommended and should not be done. + */ +PVRSRV_ERROR CacheOpQueue(CONNECTION_DATA *psConnection, + PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32OpCount, + PMR **ppsPMR, + IMG_UINT64 *puiAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + PVRSRV_CACHE_OP *puiCacheOp, + IMG_UINT32 ui32OpTimeline, + IMG_UINT32 uiCurrentFenceSeqNum, + IMG_UINT32 *puiNextFenceSeqNum); + +/* + * CacheOpFence() + * + * This is used for fencing for any client in-flight cache maintenance + * operations that might have been deferred by the use of CacheOpQueue(). + * This should be called before any subsequent HW device kicks to ensure + * device memory is coherent with the HW before the kick. + */ +PVRSRV_ERROR CacheOpFence(RGXFWIF_DM eOpType, IMG_UINT32 ui32OpSeqNum); + +/* + * CacheOpLog() + * + * This is used for logging client cache maintenance operations that + * was executed in user-space. + */ +PVRSRV_ERROR CacheOpLog(PMR *psPMR, + IMG_UINT64 uiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64QueuedTimeMs, + IMG_UINT64 ui64ExecuteTimeMs, + IMG_UINT32 ui32NumRBF, + PVRSRV_CACHE_OP uiCacheOp); + +#endif /* CACHE_KM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/connection_server.h b/drivers/mcst/gpu-imgtec/services/server/include/connection_server.h new file mode 100644 index 000000000000..f177143b633f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/connection_server.h @@ -0,0 +1,128 @@ +/*************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description API for server side connection management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(_CONNECTION_SERVER_H_) +#define _CONNECTION_SERVER_H_ + + +#include "img_types.h" +#include "img_defs.h" +#include "handle.h" +#include "pvrsrv_cleanup.h" + +/* Variable used to hold in memory the timeout for the current time slice*/ +extern IMG_UINT64 gui64TimesliceLimit; +/* Counter number of handle data freed during the current time slice */ +extern IMG_UINT32 gui32HandleDataFreeCounter; +/* Set the maximum time the freeing of the resources can keep the lock */ +#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS (3000 * 1000) /* 3ms */ + +typedef struct _CONNECTION_DATA_ +{ + PVRSRV_HANDLE_BASE *psHandleBase; + PROCESS_HANDLE_BASE *psProcessHandleBase; + struct _SYNC_CONNECTION_DATA_ *psSyncConnectionData; + struct _PDUMP_CONNECTION_DATA_ *psPDumpConnectionData; + + /* Holds the client flags supplied at connection time */ + IMG_UINT32 ui32ClientFlags; + + /* + * OS specific data can be stored via this handle. + * See osconnection_server.h for a generic mechanism + * for initialising this field. + */ + IMG_HANDLE hOsPrivateData; + +#define PVRSRV_CONNECTION_PROCESS_NAME_LEN (16) + IMG_PID pid; + IMG_PID vpid; + IMG_UINT32 tid; + IMG_CHAR pszProcName[PVRSRV_CONNECTION_PROCESS_NAME_LEN]; + + IMG_HANDLE hProcessStats; + + IMG_HANDLE hClientTLStream; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * Connection-based values per application which can be modified by the + * AppHint settings 'OSid, OSidReg, bOSidAxiProtReg' for each application. + * These control where the connection's memory allocation is sourced from. + * ui32OSid, ui32OSidReg range from 0..(GPUVIRT_VALIDATION_NUM_OS - 1). + */ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32OSidReg; + IMG_BOOL bOSidAxiProtReg; +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + + /* Structure which is hooked into the cleanup thread work list */ + PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; + + DLLIST_NODE sConnectionListNode; + + /* List navigation for deferred freeing of connection data */ + struct _CONNECTION_DATA_ **ppsThis; + struct _CONNECTION_DATA_ *psNext; +} CONNECTION_DATA; + +#include "osconnection_server.h" + +PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData); +void PVRSRVCommonConnectionDisconnect(void *pvPrivData); + +IMG_PID PVRSRVGetPurgeConnectionPid(void); + +void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVConnectionPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection) +{ + return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL; +} + +#endif /* !defined(_CONNECTION_SERVER_H_) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/device.h b/drivers/mcst/gpu-imgtec/services/server/include/device.h new file mode 100644 index 000000000000..2411dd37f6be --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/device.h @@ -0,0 +1,556 @@ +/**************************************************************************/ /*! +@File +@Title Common Device header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device related function templates and defines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef DEVICE_H +#define DEVICE_H + +#include "devicemem_heapcfg.h" +#include "mmu_common.h" +#include "ra.h" /* RA_ARENA */ +#include "pvrsrv_device.h" +#include "sync_checkpoint.h" +#include "srvkm.h" +#include "physheap.h" +#include "sync_internal.h" +#include "sysinfo.h" +#include "dllist.h" + +#include "rgx_bvnc_defs_km.h" + +#include "lock.h" + +#include "power.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV; + +struct SYNC_RECORD; + +struct _CONNECTION_DATA_; + +/*************************************************************************/ /*! + @Function AllocUFOBlockCallback + @Description Device specific callback for allocation of a UFO block + + @Input psDeviceNode Pointer to device node to allocate + the UFO for. + @Output ppsMemDesc Pointer to pointer for the memdesc of + the allocation + @Output pui32SyncAddr FW Base address of the UFO block + @Output puiSyncPrimBlockSize Size of the UFO block + + @Return PVRSRV_OK if allocation was successful +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + DEVMEM_MEMDESC **ppsMemDesc, + IMG_UINT32 *pui32SyncAddr, + IMG_UINT32 *puiSyncPrimBlockSize); + +/*************************************************************************/ /*! + @Function FreeUFOBlockCallback + @Description Device specific callback for freeing of a UFO + + @Input psDeviceNode Pointer to device node that the UFO block was + allocated from. + @Input psMemDesc Pointer to pointer for the memdesc of the UFO + block to free. +*/ /**************************************************************************/ +typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc); + +typedef struct _PVRSRV_DEVICE_IDENTIFIER_ +{ + /* Pdump memory and register bank names */ + IMG_CHAR *pszPDumpDevName; + IMG_CHAR *pszPDumpRegName; + + /* Under Linux, this is the minor number of RenderNode corresponding to this Device */ + IMG_INT32 i32UMIdentifier; +} PVRSRV_DEVICE_IDENTIFIER; + +typedef struct _DEVICE_MEMORY_INFO_ +{ + /* Heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */ + IMG_UINT32 ui32HeapCount; + + /* Blueprints for creating new device memory contexts */ + IMG_UINT32 uiNumHeapConfigs; + DEVMEM_HEAP_CONFIG *psDeviceMemoryHeapConfigArray; + DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeap; +} DEVICE_MEMORY_INFO; + + +typedef struct _PG_HANDLE_ +{ + union + { + void *pvHandle; + IMG_UINT64 ui64Handle; + }u; +#ifdef CONFIG_MCST + IMG_DEV_PHYADDR sDevPAddr; +#endif + /* The allocation order is log2 value of the number of pages to allocate. + * As such this is a correspondingly small value. E.g, for order 4 we + * are talking 2^4 * PAGE_SIZE contiguous allocation. + * DevPxAlloc API does not need to support orders higher than 4. + */ +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_BYTE uiOrder; /* Order of the corresponding allocation */ + IMG_BYTE uiOSid; /* OSid to use for allocation arena. + * Connection-specific. */ + IMG_BYTE uiPad1, + uiPad2; /* Spare */ +#else + IMG_BYTE uiOrder; /* Order of the corresponding allocation */ + IMG_BYTE uiPad1, + uiPad2, + uiPad3; /* Spare */ +#endif +} PG_HANDLE; + +#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL) +#define DUMMY_PAGE ("DUMMY_PAGE") +#define DEV_ZERO_PAGE ("DEV_ZERO_PAGE") +#define PVR_DUMMY_PAGE_INIT_VALUE (0x0) +#define PVR_ZERO_PAGE_INIT_VALUE (0x0) + +typedef struct __DEFAULT_PAGE__ +{ + /*Page handle for the page allocated (UMA/LMA)*/ + PG_HANDLE sPageHandle; + POS_LOCK psPgLock; + ATOMIC_T atRefCounter; + /*Default page size in terms of log2 */ + IMG_UINT32 ui32Log2PgSize; + IMG_UINT64 ui64PgPhysAddr; +#if defined(PDUMP) + IMG_HANDLE hPdumpPg; +#endif +} PVRSRV_DEF_PAGE; + +typedef enum _PVRSRV_DEVICE_STATE_ +{ + PVRSRV_DEVICE_STATE_UNDEFINED = 0, + PVRSRV_DEVICE_STATE_INIT, + PVRSRV_DEVICE_STATE_ACTIVE, + PVRSRV_DEVICE_STATE_DEINIT, + PVRSRV_DEVICE_STATE_BAD, +} PVRSRV_DEVICE_STATE; + +typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_ +{ + PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0, + PVRSRV_DEVICE_HEALTH_STATUS_OK, + PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING, + PVRSRV_DEVICE_HEALTH_STATUS_DEAD, + PVRSRV_DEVICE_HEALTH_STATUS_FAULT +} PVRSRV_DEVICE_HEALTH_STATUS; + +typedef enum _PVRSRV_DEVICE_HEALTH_REASON_ +{ + PVRSRV_DEVICE_HEALTH_REASON_NONE = 0, + PVRSRV_DEVICE_HEALTH_REASON_ASSERTED, + PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING, + PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS, + PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, + PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED, + PVRSRV_DEVICE_HEALTH_REASON_IDLING, + PVRSRV_DEVICE_HEALTH_REASON_RESTARTING, + PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS +} PVRSRV_DEVICE_HEALTH_REASON; + +typedef enum _PVRSRV_DEVICE_DEBUG_DUMP_STATUS_ +{ + PVRSRV_DEVICE_DEBUG_DUMP_NONE = 0, + PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE +} PVRSRV_DEVICE_DEBUG_DUMP_STATUS; + +typedef struct _MMU_PX_SETUP_ +{ +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PVRSRV_ERROR (*pfnDevPxAllocGPV)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32OSid, IMG_PID uiPid); +#endif + PVRSRV_ERROR (*pfnDevPxAlloc)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid); + + void (*pfnDevPxFree)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *psMemHandle); + + PVRSRV_ERROR (*pfnDevPxMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *pshMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr); + + void (*pfnDevPxUnMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PG_HANDLE *psMemHandle, void *pvPtr); + + PVRSRV_ERROR (*pfnDevPxClean)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PG_HANDLE *pshMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength); + + IMG_UINT32 uiMMUPxLog2AllocGran; + + RA_ARENA *psPxRA; +} MMU_PX_SETUP; + + +typedef PVRSRV_ERROR (*FN_CREATERAMBACKEDPMR)(struct _CONNECTION_DATA_ *psConnection, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +typedef struct _PVRSRV_DEVICE_NODE_ +{ + PVRSRV_DEVICE_IDENTIFIER sDevId; + + PVRSRV_DEVICE_STATE eDevState; + PVRSRV_DEVICE_FABRIC_TYPE eDevFabricType; + + ATOMIC_T eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */ + ATOMIC_T eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */ + ATOMIC_T eDebugDumpRequested; /* Holds values from PVRSRV_DEVICE_DEBUG_DUMP_STATUS */ + + IMG_HANDLE *hDebugTable; + + /* device specific MMU attributes */ + MMU_DEVICEATTRIBS *psMMUDevAttrs; + /* Device specific MMU firmware attributes, used only in some devices */ + MMU_DEVICEATTRIBS *psFirmwareMMUDevAttrs; + + MMU_PX_SETUP sDevMMUPxSetup; + + /* lock for power state transitions */ + POS_LOCK hPowerLock; + IMG_PID uiPwrLockOwnerPID; /* Only valid between lock and corresponding unlock + operations of hPowerLock */ + + /* current system device power state */ + PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; + PPVRSRV_POWER_DEV psPowerDev; + + /* + callbacks the device must support: + */ + + FN_CREATERAMBACKEDPMR pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_LAST]; + + PVRSRV_ERROR (*pfnDevSLCFlushRange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate); + + PVRSRV_ERROR (*pfnInvalFBSCTable)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_UINT64 ui64FBSCEntries); + + PVRSRV_ERROR (*pfnValidateOrTweakPhysAddrs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + IMG_UINT64 *pui64Addr); + + void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eLevel, + IMG_BOOL bUnmap); + + PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_UINT32 *pui32NextMMUInvalidateUpdate, + IMG_BOOL bInterrupt); + + IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + + void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_BOOL bIsTimerPoll); + + PVRSRV_ERROR (*pfnValidationGPUUnitsPowerChange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32NewState); + + PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + PVRSRV_ERROR (*pfnVerifyBVNC)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); + + /* Method to drain device HWPerf packets from firmware buffer to host buffer */ + PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString); + + PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed); + + PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); + + PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]); + IMG_BOOL (*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask); + + IMG_INT32 (*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, enum _RGX_FEATURE_WITH_VALUE_INDEX_ eFeatureIndex); + + PVRSRV_ERROR (*pfnGetMultiCoreInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, IMG_UINT64 *pui64Caps); + + IMG_BOOL (*pfnHasFBCDCVersion31)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + MMU_DEVICEATTRIBS* (*pfnGetMMUDeviceAttributes)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_BOOL bKernelMemoryCtx); + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) + PVRSRV_ERROR (*pfnGetSecurePDumpMemspace)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_CHAR *pszMemspaceName, + IMG_UINT32 ui32MemspaceNameLen); +#endif + + PVRSRV_DEVICE_CONFIG *psDevConfig; + + /* device post-finalise compatibility check */ + PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*); + + /* initialise device-specific physheaps */ + PVRSRV_ERROR (*pfnPhysMemDeviceHeapsInit) (struct _PVRSRV_DEVICE_NODE_ *); + + /* information about the device's address space and heaps */ + DEVICE_MEMORY_INFO sDevMemoryInfo; + + /* device's shared-virtual-memory heap max virtual address */ + IMG_UINT64 ui64GeneralSVMHeapTopVA; + + ATOMIC_T iNumClockSpeedChanges; + + /* private device information */ + void *pvDevice; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS]; + /* Number of supported OSid for this device node given available memory */ + IMG_UINT32 ui32NumOSId; +#endif + +#define PVRSRV_MAX_RA_NAME_LENGTH (50) + RA_ARENA **apsLocalDevMemArenas; + IMG_CHAR **apszRANames; + IMG_UINT32 ui32NumOfLocalMemArenas; + + /* Resource allocator arena that manages the Firmware Main sub-heap */ + RA_ARENA *psKernelFwMainMemArena; + IMG_CHAR szKernelFwMainRAName[PVRSRV_MAX_RA_NAME_LENGTH]; + + /* Resource allocator arena that manages the Firmware Config sub-heap */ + RA_ARENA *psKernelFwConfigMemArena; + IMG_CHAR szKernelFwConfigRAName[PVRSRV_MAX_RA_NAME_LENGTH]; + + /* Array for the resource allocator arenas that manage the Firmware Raw + * heaps for all Guest operating systems on virtualized environments. */ + RA_ARENA *psKernelFwRawMemArena[RGX_NUM_OS_SUPPORTED]; + IMG_CHAR szKernelFwRawRAName[RGX_NUM_OS_SUPPORTED][PVRSRV_MAX_RA_NAME_LENGTH]; + + + IMG_UINT32 ui32RegisteredPhysHeaps; + PHYS_HEAP **papsRegisteredPhysHeaps; + + /* + * Pointers to the device's physical memory heap(s) + * The first entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) will be used for allocations + * where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is not set. Normally this will be an LMA heap + * (but the device configuration could specify a UMA heap here, if desired) + * The second entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) will be used for allocations + * where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. Normally this will be a UMA heap + * (but the configuration could specify an LMA heap here, if desired) + * The third entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) will be used for allocations + * where the PVRSRV_FW_ALLOC_TYPE is MAIN,CONFIG or RAW; this is used when virtualization is enabled + * The device configuration will always specify two physical heap IDs - in the event of the device + * only using one physical heap, both of these IDs will be the same, and hence both pointers below + * will also be the same; when virtualization is enabled the device configuration specifies + * three physical heap IDs, the last being for PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL allocations + */ + PHYS_HEAP *apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_LAST]; + +#if defined(SUPPORT_DEDICATED_FW_MEMORY) + PHYS_HEAP *psDedicatedFWMemHeap; + RA_ARENA *psDedicatedFWMemArena; +#endif + + /* RA reserved for storing the MMU mappings of firmware. + * The memory backing up this RA must persist between driver or OS reboots */ + RA_ARENA *psFwMMUReservedMemArena; + + /* Flag indicating if the firmware has been initialised during the + * 1st boot of the Host driver according to the AutoVz life-cycle. */ + IMG_BOOL bAutoVzFwIsUp; + + struct _PVRSRV_DEVICE_NODE_ *psNext; + struct _PVRSRV_DEVICE_NODE_ **ppsThis; + + /* Functions for notification about memory contexts */ + PVRSRV_ERROR (*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData); + void (*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData); + + /* Functions for allocation/freeing of UFOs */ + AllocUFOBlockCallback pfnAllocUFOBlock; /*!< Callback for allocation of a block of UFO memory */ + FreeUFOBlockCallback pfnFreeUFOBlock; /*!< Callback for freeing of a block of UFO memory */ + + IMG_HANDLE hSyncServerNotify; + POS_LOCK hSyncServerListLock; + DLLIST_NODE sSyncServerSyncsList; + + IMG_HANDLE hSyncServerRecordNotify; + POS_LOCK hSyncServerRecordLock; + IMG_UINT32 ui32SyncServerRecordCount; + IMG_UINT32 ui32SyncServerRecordCountHighWatermark; + DLLIST_NODE sSyncServerRecordList; + struct SYNC_RECORD *apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; + IMG_UINT32 uiSyncServerRecordFreeIdx; + + IMG_HANDLE hSyncCheckpointRecordNotify; + POS_LOCK hSyncCheckpointRecordLock; + IMG_UINT32 ui32SyncCheckpointRecordCount; + IMG_UINT32 ui32SyncCheckpointRecordCountHighWatermark; + DLLIST_NODE sSyncCheckpointRecordList; + struct SYNC_CHECKPOINT_RECORD *apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; + IMG_UINT32 uiSyncCheckpointRecordFreeIdx; + + IMG_HANDLE hSyncCheckpointNotify; + POS_SPINLOCK hSyncCheckpointListLock; /*!< Protects sSyncCheckpointSyncsList */ + DLLIST_NODE sSyncCheckpointSyncsList; + + PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext; + PSYNC_PRIM_CONTEXT hSyncPrimContext; + + /* With this sync-prim we make sure the MMU cache is flushed + * before we free the page table memory */ + PVRSRV_CLIENT_SYNC_PRIM *psMMUCacheSyncPrim; + IMG_UINT32 ui32NextMMUInvalidateUpdate; + + IMG_HANDLE hCmdCompNotify; + IMG_HANDLE hDbgReqNotify; + IMG_HANDLE hHtbDbgReqNotify; + IMG_HANDLE hAppHintDbgReqNotify; + IMG_HANDLE hThreadsDbgReqNotify; + + PVRSRV_DEF_PAGE sDummyPage; + PVRSRV_DEF_PAGE sDevZeroPage; + + POSWR_LOCK hMemoryContextPageFaultNotifyListLock; + DLLIST_NODE sMemoryContextPageFaultNotifyListHead; + +#if defined(PDUMP) + /* + * FBC clear color register default value to use. + */ + IMG_UINT64 ui64FBCClearColour; + + /* Device-level callback which is called when pdump.exe starts. + * Should be implemented in device-specific init code, e.g. rgxinit.c + */ + PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + /* device-level callback to return pdump ID associated to a memory context */ + IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext); + + IMG_UINT8 *pui8DeferredSyncCPSignal; /*! Deferred fence events buffer */ + + IMG_UINT16 ui16SyncCPReadIdx; /*! Read index in the above deferred fence events buffer */ + + IMG_UINT16 ui16SyncCPWriteIdx; /*! Write index in the above deferred fence events buffer */ + + POS_LOCK hSyncCheckpointSignalLock; /*! Guards data shared between an sleepable-contexts */ + + void *pvSyncCPMISR; /*! MISR to emit pending/deferred fence signals */ + + void *hTransition; /*!< SyncCheckpoint PdumpTransition Cookie */ + + DLLIST_NODE sSyncCheckpointContextListHead; /*!< List head for the sync chkpt contexts */ + + POS_LOCK hSyncCheckpointContextListLock; /*! lock for accessing sync chkpt contexts list */ + +#endif + +#if defined(SUPPORT_VALIDATION) + POS_LOCK hValidationLock; +#endif + + POS_LOCK hConnectionsLock; /*!< Lock protecting sConnections */ + DLLIST_NODE sConnections; /*!< The list of currently active connection objects for this device node */ +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + IMG_UINT64 ui64nLISR; /*!< Number of LISR calls seen */ + IMG_UINT64 ui64nMISR; /*!< Number of MISR calls made */ +#endif +} PVRSRV_DEVICE_NODE; + +/* + * Macros to be used instead of calling directly the pfns since these macros + * will expand the feature passed as argument into the bitmask/index to work + * with the macros defined in rgx_bvnc_defs_km.h + */ +#define PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, Feature) \ + psDevNode->pfnCheckDeviceFeature(psDevNode, RGX_FEATURE_##Feature##_BIT_MASK) +#define PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, Feature) \ + psDevNode->pfnGetDeviceFeatureValue(psDevNode, RGX_FEATURE_##Feature##_IDX) + +PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bInitSuccessful); + +PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions); + + +#endif /* DEVICE_H */ + +/****************************************************************************** + End of file (device.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/devicemem_heapcfg.h b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_heapcfg.h new file mode 100644 index 000000000000..9df0a66326fe --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_heapcfg.h @@ -0,0 +1,168 @@ +/**************************************************************************/ /*! +@File +@Title Temporary Device Memory 2 stuff +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef DEVICEMEMHEAPCFG_H +#define DEVICEMEMHEAPCFG_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + + + +struct _PVRSRV_DEVICE_NODE_; +struct _CONNECTION_DATA_; + + +/* + A "heap config" is a blueprint to be used for initial setting up of heaps + when a device memory context is created. + + We define a data structure to define this, but it's really down to the + caller to populate it. This is all expected to be in-kernel. We provide an + API that client code can use to enquire about the blueprint, such that it may + do the heap set-up during the context creation call on behalf of the user. +*/ + +/* Blueprint for a single heap */ +typedef struct _DEVMEM_HEAP_BLUEPRINT_ +{ + /* Name of this heap - for debug purposes, and perhaps for lookup + by name? */ + const IMG_CHAR *pszName; + + /* Virtual address of the beginning of the heap. This _must_ be a + multiple of the data page size for the heap. It is + _recommended_ that it be coarser than that - especially, it + should begin on a boundary appropriate to the MMU for the + device. For Rogue, this is a Page Directory boundary, or 1GB + (virtual address a multiple of 0x0040000000). */ + IMG_DEV_VIRTADDR sHeapBaseAddr; + + /* Length of the heap. Given that the END address of the heap has + a similar restriction to that of the _beginning_ of the heap. + That is the heap length _must_ be a whole number of data pages. + Again, the recommendation is that it ends on a 1GB boundary. + Again, this is not essential, but we do know that (at the time + of writing) the current implementation of mmu_common.c is such + that no two heaps may share a page directory, thus the + remaining virtual space would be wasted if the length were not + a multiple of 1GB */ + IMG_DEVMEM_SIZE_T uiHeapLength; + + /* VA space starting sHeapBaseAddr to uiReservedRegionLength-1 are reserved + for statically defined addresses (shared/known between clients and FW). + Services never maps allocations into this reserved address space _unless_ + explicitly requested via PVRSRVMapToDeviceAddress by passing sDevVirtAddr + which falls within this reserved range. Since this range is completely for + clients to manage (where allocations are page granular), it _must_ again be + a whole number of data pages. Additionally, another constraint enforces this + to be a multiple of RGX_HEAP_RESERVED_SIZE_GRANULARITY (which evaluates to + max page size supported) to support varied pages sizes */ + IMG_DEVMEM_SIZE_T uiReservedRegionLength; + + /* Data page size. This is the page size that is going to get + programmed into the MMU, so it needs to be a valid one for the + device. Importantly, the start address and length _must_ be + multiples of this page size. Note that the page size is + specified as the log 2 relative to 1 byte (e.g. 12 indicates + 4kB) */ + IMG_UINT32 uiLog2DataPageSize; + + /* Import alignment. Force imports to this heap to be + aligned to at least this value */ + IMG_UINT32 uiLog2ImportAlignment; + +} DEVMEM_HEAP_BLUEPRINT; + +/* Entire named heap config */ +typedef struct _DEVMEM_HEAP_CONFIG_ +{ + /* Name of this heap config - for debug and maybe lookup */ + const IMG_CHAR *pszName; + + /* Number of heaps in this config */ + IMG_UINT32 uiNumHeaps; + + /* Array of individual heap blueprints as defined above */ + DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray; +} DEVMEM_HEAP_CONFIG; + + +PVRSRV_ERROR +HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 *puiNumHeapConfigsOut +); + +PVRSRV_ERROR +HeapCfgHeapCount(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut +); + +PVRSRV_ERROR +HeapCfgHeapConfigName(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapConfigNameBufSz, + IMG_CHAR *pszHeapConfigNameOut +); + +PVRSRV_ERROR +HeapCfgHeapDetails(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_UINT32 uiHeapNameBufSz, + IMG_CHAR *pszHeapNameOut, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSizeOut, + IMG_UINT32 *puiLog2ImportAlignmentOut +); + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/include/devicemem_history_server.h b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_history_server.h new file mode 100644 index 000000000000..a09050f8c946 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_history_server.h @@ -0,0 +1,152 @@ +/*************************************************************************/ /*! +@File devicemem_history_server.h +@Title Resource Information abstraction +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Devicemem History functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_HISTORY_SERVER_H +#define DEVICEMEM_HISTORY_SERVER_H + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxmem.h" +#include "devicemem_utils.h" + +PVRSRV_ERROR DevicememHistoryInitKM(void); + +void DevicememHistoryDeInitKM(void); + +PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *ui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *ui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *paui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pauiFreePageIndices, + IMG_UINT32 AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut); + +/* used when the PID does not matter */ +#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE + +typedef struct _DEVICEMEM_HISTORY_QUERY_IN_ +{ + IMG_PID uiPID; + IMG_DEV_VIRTADDR sDevVAddr; +} DEVICEMEM_HISTORY_QUERY_IN; + +/* Store up to 4 results for a lookup. In the case of the faulting page being + * re-mapped between the page fault occurring on HW and the page fault analysis + * being done, the second result entry will show the allocation being unmapped. + * A further 2 entries are added to cater for multiple buffers in the same page. + */ +#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4 + +typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_ +{ + IMG_CHAR szString[DEVMEM_ANNOTATION_MAX_LEN]; + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + IMG_BOOL bMap; + IMG_BOOL bRange; + IMG_BOOL bAll; + IMG_UINT64 ui64When; + IMG_UINT64 ui64Age; + /* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */ + IMG_UINT32 ui32StartPage; + IMG_UINT32 ui32PageCount; + IMG_DEV_VIRTADDR sMapStartAddr; + IMG_DEV_VIRTADDR sMapEndAddr; + RGXMEM_PROCESS_INFO sProcessInfo; +} DEVICEMEM_HISTORY_QUERY_OUT_RESULT; + +typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_ +{ + IMG_UINT32 ui32NumResults; + /* result 0 is the newest */ + DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS]; +} DEVICEMEM_HISTORY_QUERY_OUT; + +IMG_BOOL +DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + IMG_UINT32 ui32PageSizeBytes, + IMG_BOOL bMatchAnyAllocInPage); + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/include/devicemem_server.h b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_server.h new file mode 100644 index 000000000000..d4c7f215f7f1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_server.h @@ -0,0 +1,682 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Server side component for device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_SERVER_H +#define DEVICEMEM_SERVER_H + +#include "device.h" /* For device node */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +#include "connection_server.h" +#include "pmr.h" + +typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX; +typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT; +typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP; + +typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION; +typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING; +typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY; + + +/*************************************************************************/ /*! +@Function DevmemIntUnpin +@Description This is the counterpart to DevmemPin(). It is meant to be + called when the allocation is NOT mapped in the device virtual + space. + +@Input psPMR The physical memory to unpin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR); + +/*************************************************************************/ /*! +@Function DevmemIntUnpinInvalidate +@Description This is the counterpart to DevmemIntPinValidate(). It is meant + to be called for allocations that ARE mapped in the device + virtual space and we have to invalidate the mapping. + +@Input psPMR The physical memory to unpin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); + +/*************************************************************************/ /*! +@Function DevmemIntPin +@Description This is the counterpart to DevmemIntUnpin(). + Is meant to be called if there is NO device mapping present. + +@Input psPMR The physical memory to pin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntPin(PMR *psPMR); + +/*************************************************************************/ /*! +@Function DevmemIntPinValidate +@Description This is the counterpart to DevmemIntUnpinInvalidate(). + Is meant to be called if there is IS a device mapping present + that needs to be taken care of. + +@Input psDevmemMapping The mapping structure used for the passed PMR. + +@Input psPMR The physical memory to pin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); +/* + * DevmemServerGetImportHandle() + * + * For given exportable memory descriptor returns PMR handle + * + */ +PVRSRV_ERROR +DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport); + +/* + * DevmemServerGetHeapHandle() + * + * For given reservation returns the Heap handle + * + */ +PVRSRV_ERROR +DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, + IMG_HANDLE *phHeap); + +/* + * DevmemServerGetContext() + * + * For given heap returns the context. + * + */ +PVRSRV_ERROR +DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_CTX **ppsDevmemCtxPtr); + +/* + * DevmemServerGetPrivData() + * + * For given context returns the private data handle. + * + */ +PVRSRV_ERROR +DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, + IMG_HANDLE *phPrivData); + +/* + * DevmemIntAllocDefBackingPage + * + * This function allocates default backing page and initializes it + * with a given default value + * + */ +PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_INT uiInitValue, + IMG_CHAR *pcDefPageName, + IMG_BOOL bInitPage); +/* + * DevmemIntFreeDefBackingPage + * + * Frees a given page + */ +void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_CHAR *pcDefPageName); + + +/* + * DevmemIntCtxCreate() + * + * Create a Server-side Device Memory Context. This is usually the counterpart + * of the client side memory context, and indeed is usually created at the + * same time. + * + * You must have one of these before creating any heaps. + * + * All heaps must have been destroyed before calling + * DevmemIntCtxDestroy() + * + * If you call DevmemIntCtxCreate() (and it succeeds) you are promising to + * later call DevmemIntCtxDestroy() + * + * Note that this call will cause the device MMU code to do some work for + * creating the device memory context, but it does not guarantee that a page + * catalogue will have been created, as this may be deferred until the first + * allocation. + * + * Caller to provide storage for a pointer to the DEVMEM_CTX object that will + * be created by this call. + */ +PVRSRV_ERROR +DevmemIntCtxCreate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + /* devnode / perproc etc */ + IMG_BOOL bKernelMemoryCtx, + DEVMEMINT_CTX **ppsDevmemCtxPtr, + IMG_HANDLE *hPrivData, + IMG_UINT32 *pui32CPUCacheLineSize); +/* + * DevmemIntCtxDestroy() + * + * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport. + */ +PVRSRV_ERROR +DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx); + +/* + * DevmemIntHeapCreate() + * + * Creates a new heap in this device memory context. This will cause a call + * into the MMU code to allocate various data structures for managing this + * heap. It will not necessarily cause any page tables to be set up, as this + * can be deferred until first allocation. (i.e. we shouldn't care - it's up + * to the MMU code) + * + * Note that the data page size must be specified (as log 2). The data page + * size as specified here will be communicated to the mmu module, and thus may + * determine the page size configured in page directory entries for subsequent + * allocations from this heap. It is essential that the page size here is less + * than or equal to the "minimum contiguity guarantee" of any PMR that you + * subsequently attempt to map to this heap. + * + * If you call DevmemIntHeapCreate() (and the call succeeds) you are promising + * that you shall subsequently call DevmemIntHeapDestroy() + * + * Caller to provide storage for a pointer to the DEVMEM_HEAP object that will + * be created by this call. + */ +PVRSRV_ERROR +DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sHeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_UINT32 uiLog2DataPageSize, + DEVMEMINT_HEAP **ppsDevmemHeapPtr); +/* + * DevmemIntHeapDestroy() + * + * Destroys a heap previously created with DevmemIntHeapCreate() + * + * All allocations from his heap must have been freed before this + * call. + */ +PVRSRV_ERROR +DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap); + +/* + * DevmemIntMapPMR() + * + * Maps the given PMR to the virtual range previously allocated with + * DevmemIntReserveRange() + * + * If appropriate, the PMR must have had its physical backing committed, as + * this call will call into the MMU code to set up the page tables for this + * allocation, which shall in turn request the physical addresses from the + * PMR. Alternatively, the PMR implementation can choose to do so off the + * the back of the "lock" callback, which it will receive as a result + * (indirectly) of this call. + * + * This function makes no promise w.r.t. the circumstances that it can be + * called, and these would be "inherited" from the implementation of the PMR. + * For example if the PMR "lock" callback causes pages to be pinned at that + * time (which may cause scheduling or disk I/O etc.) then it would not be + * legal to "Map" the PMR in a context where scheduling events are disallowed. + * + * If you call DevmemIntMapPMR() (and the call succeeds) then you are promising + * that you shall later call DevmemIntUnmapPMR() + */ +PVRSRV_ERROR +DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + DEVMEMINT_MAPPING **ppsMappingPtr); +/* + * DevmemIntUnmapPMR() + * + * Reverses the mapping caused by DevmemIntMapPMR() + */ +PVRSRV_ERROR +DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping); + +/* DevmemIntMapPages() + * + * Maps an arbitrary amount of pages from a PMR to a reserved range + * + * @input psReservation Reservation handle for the range + * @input psPMR PMR that is mapped + * @input ui32PageCount Number of consecutive pages that are + * mapped + * @input ui32PhysicalPgOffset Logical offset in the PMR + * @input uiFlags Mapping flags + * @input sDevVAddrBase Virtual address base to start the + * mapping from + */ +PVRSRV_ERROR +DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase); + +/* DevmemIntUnmapPages() + * + * Unmaps an arbitrary amount of pages from a reserved range + * + * @input psReservation Reservation handle for the range + * @input sDevVAddrBase Virtual address base to start from + * @input ui32PageCount Number of consecutive pages that are + * unmapped + */ +PVRSRV_ERROR +DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount); + +/* + * DevmemIntReserveRange() + * + * Indicates that the specified range should be reserved from the given heap. + * + * In turn causes the page tables to be allocated to cover the specified range. + * + * If you call DevmemIntReserveRange() (and the call succeeds) then you are + * promising that you shall later call DevmemIntUnreserveRange() + */ +PVRSRV_ERROR +DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sAllocationDevVAddr, + IMG_DEVMEM_SIZE_T uiAllocationSize, + DEVMEMINT_RESERVATION **ppsReservationPtr); +/* + * DevmemIntUnreserveRange() + * + * Undoes the state change caused by DevmemIntReserveRage() + */ +PVRSRV_ERROR +DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation); + +/*************************************************************************/ /*! +@Function DevmemIntChangeSparse +@Description Changes the sparse allocations of a PMR by allocating and freeing + pages and changing their corresponding CPU and GPU mappings. + +@input psDevmemHeap Pointer to the heap we map on +@input psPMR The PMR we want to map +@input ui32AllocPageCount Number of pages to allocate +@input pai32AllocIndices The logical PMR indices where pages will + be allocated. May be NULL. +@input ui32FreePageCount Number of pages to free +@input pai32FreeIndices The logical PMR indices where pages will + be freed. May be NULL. +@input uiSparseFlags Flags passed in to determine which kind + of sparse change the user wanted. + See devicemem_typedefs.h for details. +@input uiFlags Memalloc flags for this virtual range. +@input sDevVAddrBase The base address of the virtual range of + this sparse allocation. +@input sCpuVAddrBase The CPU base address of this allocation. + May be 0 if not existing. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, + PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT64 sCpuVAddrBase); + +/* + * DevmemIntFlushDevSLCRange() + * + * Flush specified device context's virtual address range from SLC. + */ +PVRSRV_ERROR +DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate); + +/* + * DevmemIntRGXInvalidateFBSCTable() + * + * Invalidate selected FBSC table indices. + * + */ +PVRSRV_ERROR +DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevmemCtx, + IMG_UINT64 ui64FBSCEntryMask); + +PVRSRV_ERROR +DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR sDevAddr); + +PVRSRV_ERROR +DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR *psFaultAddress); + +/*************************************************************************/ /*! +@Function DevmemIntRegisterPFNotifyKM +@Description Registers a PID to be notified when a page fault occurs on a + specific device memory context. +@Input psDevmemCtx The context to be notified about. +@Input ui32PID The PID of the process that would like to be + notified. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, + IMG_INT32 ui32PID, + IMG_BOOL bRegister); + +/*************************************************************************/ /*! +@Function DevmemIntPFNotify +@Description Notifies any processes that have registered themselves to be + notified when a page fault happens on a specific device memory + context. +@Input *psDevNode The device node. +@Input ui64FaultedPCAddress The page catalogue address that faulted. +@Input sFaultAddress The address that triggered the fault. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT64 ui64FaultedPCAddress, + IMG_DEV_VIRTADDR sFaultAddress); + +#if defined(PDUMP) +/* + * DevmemIntPDumpSaveToFileVirtual() + * + * Writes out PDump "SAB" commands with the data found in memory at + * the given virtual address. + */ +PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags); + +IMG_UINT32 +DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext); + +PVRSRV_ERROR +DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpSaveToFileVirtual) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDevmemCtx); + PVR_UNREFERENCED_PARAMETER(sDevAddrStart); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiArraySize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpBitmap) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32Width); + PVR_UNREFERENCED_PARAMETER(ui32Height); + PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes); + PVR_UNREFERENCED_PARAMETER(sDevBaseAddr); + PVR_UNREFERENCED_PARAMETER(psDevMemContext); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(ePixelFormat); + PVR_UNREFERENCED_PARAMETER(ui32AddrMode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpImageDescriptor) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psDevMemContext); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); + PVR_UNREFERENCED_PARAMETER(ePixFmt); + PVR_UNREFERENCED_PARAMETER(eMemLayout); + PVR_UNREFERENCED_PARAMETER(eFBCompression); + PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); + PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); + PVR_UNREFERENCED_PARAMETER(sHeader); + PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpDataDescriptor) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psDevMemContext); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32ElementType); + PVR_UNREFERENCED_PARAMETER(ui32ElementCount); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#endif /* PDUMP */ + +PVRSRV_ERROR +DevmemIntInit(void); + +PVRSRV_ERROR +DevmemIntDeInit(void); + +PVRSRV_ERROR +DevmemIntExportCtx(DEVMEMINT_CTX *psContext, + PMR *psPMR, + DEVMEMINT_CTX_EXPORT **ppsContextExport); + +PVRSRV_ERROR +DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport); + +PVRSRV_ERROR +DevmemIntAcquireRemoteCtx(PMR *psPMR, + DEVMEMINT_CTX **ppsContext, + IMG_HANDLE *phPrivData); + +#endif /* DEVICEMEM_SERVER_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/devicemem_server_utils.h b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_server_utils.h new file mode 100644 index 000000000000..6d43c2d82ba4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/devicemem_server_utils.h @@ -0,0 +1,198 @@ +/**************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header file utilities that are specific to device memory functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "device.h" +#include "pvrsrv_memallocflags.h" +#include "pvrsrv.h" + +static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags, + IMG_UINT32 *pui32Ret) +{ + IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); + IMG_UINT32 ui32Ret; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); + + switch (ui32CPUCacheMode) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; + break; + + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE; + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: + + /* + * If system has no coherency but coherency has been requested for CPU + * and GPU we currently fall back to write-combine. + * This avoids errors on arm64 when uncached is turned into ordered device memory + * and suffers from problems with unaligned access. + */ + if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) && + !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) + { + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE; + } + else + { + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; + } + break; + + default: + PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode)); + PVR_ASSERT(0); + /* + We should never get here, but if we do then setting the mode + to uncached is the safest thing to do. + */ + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; + eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + break; + } + + *pui32Ret = ui32Ret; + + return eError; +} + +static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags, + IMG_UINT32 *pui32Ret) +{ + IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); + IMG_UINT32 ui32Ret; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); + + switch (ui32DeviceCacheMode) + { + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; + break; + + case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE: + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE; + break; + + case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT: + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; + break; + + case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT: + + /* + * If system has no coherency but coherency has been requested for CPU + * and GPU we currently fall back to write-combine. + * This avoids errors on arm64 when uncached is turned into ordered device memory + * and suffers from problems with unaligned access. + */ + if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) && + !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) + { + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE; + } + else + { + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; + } + break; + + default: + PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode)); + PVR_ASSERT(0); + /* + We should never get here, but if we do then setting the mode + to uncached is the safest thing to do. + */ + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; + eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + break; + } + + *pui32Ret = ui32Ret; + + return eError; +} + +static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags) +{ + IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); + IMG_BOOL bRet = IMG_FALSE; + + PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); + + if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) + { + bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig); + } + return bRet; +} + +static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags) +{ + IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); + IMG_BOOL bRet = IMG_FALSE; + + PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); + + if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) + { + bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig); + } + return bRet; +} diff --git a/drivers/mcst/gpu-imgtec/services/server/include/di_common.h b/drivers/mcst/gpu-imgtec/services/server/include/di_common.h new file mode 100644 index 000000000000..1b4460cb6b04 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/di_common.h @@ -0,0 +1,228 @@ +/*************************************************************************/ /*! +@File +@Title Common types for Debug Info framework. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DI_COMMON_H +#define DI_COMMON_H + +#include "img_types.h" + +/* Token that signals that a header should be printed. */ +#define DI_START_TOKEN ((void *) 1) + +/* This is a public handle to an entry. */ +typedef struct DI_GROUP DI_GROUP; +typedef struct DI_ENTRY DI_ENTRY; +typedef struct OSDI_IMPL_ENTRY OSDI_IMPL_ENTRY; + +/*! Debug Info entries types. */ +typedef enum DI_ENTRY_TYPE +{ + DI_ENTRY_TYPE_GENERIC, /*!< generic entry type, implements + start/stop/next/show iterator + interface */ + DI_ENTRY_TYPE_RANDOM_ACCESS, /*!< random access entry, implements + seek/read iterator interface */ +} DI_ENTRY_TYPE; + +/*! @Function DI_PFN_START + * + * @Description + * Start operation returns first entry and passes it to Show operation. + * + * @Input psEntry pointer to the implementation entry + * @InOut pui64Pos current data position in the entry + * + * @Return pointer to data that will be passed to the other iterator + * functions in pvData argument + */ +typedef void *(*DI_PFN_START)(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos); + +/*! @Function DI_PFN_STOP + * + * @Description + * Stop operations is called after iterator reaches end of data. + * + * If pvData was allocated in pfnStart it should be freed here. + * + * @Input psEntry pointer to the implementation entry + * @Input pvData pointer to data returned from pfnStart/pfnNext + */ +typedef void (*DI_PFN_STOP)(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/*! @Function DI_PFN_NEXT + * + * @Description + * Next returns next data entry and passes it to Show operation. + * + * @Input psEntry pointer to the implementation entry + * @Input pvData pointer to data returned from pfnStart/pfnNext + * @InOut pui64Pos current data position in the entry + */ +typedef void *(*DI_PFN_NEXT)(OSDI_IMPL_ENTRY *psEntry, void *pvData, + IMG_UINT64 *pui64Pos); + +/*! @Function DI_PFN_SHOW + * + * @Description + * Outputs the data element. + * + * @Input psEntry pointer to the implementation entry + * @Input pvData pointer to data returned from pfnStart/pfnNext + */ +typedef int (*DI_PFN_SHOW)(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/*! @Function DI_PFN_SEEK + * + * @Description + * Changes position of the entry data pointer + * + * @Input uiOffset new entry offset (absolute) + * @Input pvData private data provided during entry creation + */ +typedef IMG_INT64 (*DI_PFN_SEEK)(IMG_UINT64 ui64Offset, void *pvData); + +/*! @Function DI_PFN_READ + * + * @Description + * Retrieves data form the entry from position previously set by Seek. + * + * @Input pszBuffer output buffer + * @Input ui64Count length of the output buffer + * @InOut pui64Pos pointer to the current position in the entry + * @Input pvData private data provided during entry creation + */ +typedef IMG_INT64 (*DI_PFN_READ)(IMG_CHAR *pszBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData); + +/*! @Function DI_PFN_WRITE + * + * @Description + * Handle writes operation to the entry. + * + * @Input pszBuffer NUL-terminated buffer containing written data + * @Input ui64Count length of the data in pszBuffer (length of the buffer) + * @InOut pui64Pos pointer to the current position in the entry + * @Input pvData private data provided during entry creation + */ +typedef IMG_INT64 (*DI_PFN_WRITE)(const IMG_CHAR *pszBuffer, + IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, + void *pvData); + +/*! Debug info entry iterator. + * + * This covers all entry types: GENERIC and RANDOM_ACCESS. + * + * The GENERIC entry type + * + * The GENERIC type should implement either a full set of following callbacks: + * pfnStart, pfnStop, pfnNext and pfnShow, or pfnShow only. If only pfnShow + * callback is given the framework will use default handlers in place of the + * other ones. + * + * e.g. for generic entry: + * + * struct sIter = { + * .pfnStart = StartCb, .pfnStop = StopCb, pfnNext = NextCb, + * .pfnShow = ShowCb + * }; + * + * The use case for implementing pfnShow only is if the data for the given + * entry is short and can be printed in one go because the pfnShow callback + * will be called only once. + * + * e.g. for one-shot print generic entry: + * + * struct sIter = { + * .pfnShow = SingleShowCb + * }; + * + * The DICreateEntry() function will return error if DI_ENTRY_TYPE_GENERIC + * type is used and invalid combination of callbacks is given. + * + * The RANDOM_ACCESS entry + * + * The RANDOM_ACCESS type should implement either both pfnSeek and pfnRead + * or pfnRead only callbacks. + * + * e.g. of seekable and readable random access entry: + * + * struct sIter = { + * .pfnSeek = SeekCb, .pfnRead = ReadCb + * }; + * + * The DICreateEntry() function will return error if DI_ENTRY_TYPE_RANDOM_ACCESS + * type is used and invalid combination of callbacks is given. + * + * Writing to file (optional) + * + * The iterator allows also to pass a pfnWrite callback that allows implementing + * write operation on the entry. The write operation is entry type agnostic + * which means that it can be defined for both GENERIC and RANDOM_ACCESS + * entries. + * + * e.g. for writable one-shot print generic entry + * + * struct sIter = { + * .pfnShow = SingleShowCb, .pfnWrite = WriteCb + * }; + */ +typedef struct DI_ITERATOR_CB +{ + /* Generic entry interface. */ + + DI_PFN_START pfnStart; /*!< Starts iteration and returns first element + of entry's data. */ + DI_PFN_STOP pfnStop; /*!< Stops iteration. */ + DI_PFN_NEXT pfnNext; /*!< Returns next element of entry's data. */ + DI_PFN_SHOW pfnShow; /*!< Shows current data element of an entry. */ + + /* Optional random access entry interface. */ + + DI_PFN_SEEK pfnSeek; /*!< Sets data pointer in an entry. */ + DI_PFN_READ pfnRead; /*!< Reads data from an entry. */ + + /* Optional writing to entry interface. */ + + DI_PFN_WRITE pfnWrite; /*!< Performs write operation on an entry. */ +} DI_ITERATOR_CB; + +#endif /* DI_COMMON_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/di_server.h b/drivers/mcst/gpu-imgtec/services/server/include/di_server.h new file mode 100644 index 000000000000..db5e7569b00b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/di_server.h @@ -0,0 +1,204 @@ +/*************************************************************************/ /*! +@File +@Title Functions for creating Debug Info groups and entries. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DI_SERVER_H +#define DI_SERVER_H + +#include + +#include "di_common.h" +#include "pvrsrv_error.h" +#include "img_defs.h" + +/*! @Function DIInit + * + * @Description + * Initialises Debug Info framework. This function will create common resources + * for the framework. + * + * Note: This function must be called before first call to + * DIRegisterImplementations() all of the implementations. + */ +PVRSRV_ERROR DIInit(void); + +/*! @Function DIInitImplementations + * + * @Description + * Initialises Debug Info implementations. This function will call pfnInit() + * on each registered implementation. It will also create the root group for all + * of the entries. + * + * Note: This function must be called after all of the implementations are + * registered with DIRegisterImplementation(). + */ +PVRSRV_ERROR DIInitImplementations(void); + +/*! @Function DIDeInit + * + * @Description + * De-initialises Debug Info framework. This function will call pfnDeInit() + * on each implementation and clean up common resources. It will also remove + * the root group. + */ +void DIDeInit(void); + +/*! @Function DICreateEntry + * + * @Description + * Creates debug info entry. Depending on different implementations the entry + * might be for example a DebugFS file or something totally different. + * + * The entry will belong to a parent group if provided or to the root group + * if not. + * + * @Input pszName: name of the new entry + * @Input psDiGroup: parent group, if NULL entry will belong to the root group + * @Input psIterCb: implementation of the iterator for the entry + * @Input psPriv: private data that will be passed to the iterator operations + * @Input eType: type of the entry + * + * @Output ppsEntry: handle to the newly created entry + * + * @Return PVRSRV_ERROR error code + */ +PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, + const DI_GROUP *psGroup, + const DI_ITERATOR_CB *psIterCb, + void *psPriv, + DI_ENTRY_TYPE eType, + DI_ENTRY **ppsEntry); + +/*! @Function DIDestroyEntry + * + * @Description + * Destroys debug info entry. + * + * @Input psEntry: handle to the entry + */ +void DIDestroyEntry(DI_ENTRY *psEntry); + +/*! @Function DICreateGroup + * + * @Description + * Creates debug info group. Depending on different implementations the group + * might be for example a DebugFS directory or something totally different. + * + * The group will belong to a parent group if provided or to the root group + * if not. + * + * @Input pszName: name of the new entry + * @Input psParent: parent group, if NULL entry will belong to the root group + * + * @Output ppsGroup: handle to the newly created entry + * + * @Return PVRSRV_ERROR error code + */ +PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, + const DI_GROUP *psParent, + DI_GROUP **ppsGroup); + +/*! @Function DIDestroyGroup + * + * @Description + * Destroys debug info group. + * + * @Input psGroup: handle to the group + */ +void DIDestroyGroup(DI_GROUP *psGroup); + +/*! @Function DIGetPrivData + * + * @Description + * Retrieves private data from psEntry. The data is either passed during + * entry creation via psPriv parameter of DICreateEntry() function + * or by explicitly setting it with DIGetPrivData() function. + * + * @Input psEntry pointer to OSDI_IMPL_ENTRY object + * + * @Returns pointer to the private data (can be NULL if private data + * has not been specified) + */ +void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry); + +/*! @Function DISetPrivateData + * + * @Description + * Sets pointer to private data in OSDI_IMPL_ENTRY object. The private data + * can be set either using this function or by passing it in psPriv argument + * when calling DICreateEntry(). + * + * @Input psEntry pointer to OSDI_IMPL_ENTRY object + * @Input psPrivData pointer to the private data + */ +void DISetPrivData(OSDI_IMPL_ENTRY *psEntry, void *pvPrivData); + +/*! @Function DIPrintf + * + * @Description + * Prints formatted string to the DI entry. + * + * @Input psEntry pointer to OSDI_IMPL_ENTRY object + * @Input pszFmt NUL-terminated format string + */ +void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) + __printf(2, 3); + +/*! @Function DIPrintf + * + * @Description + * Prints a string to the DI entry. + * + * @Input psEntry pointer to OSDI_IMPL_ENTRY object + * @Input pszFmt NUL-terminated string + */ +void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr); + +/*! @Function DIHasOverflowed + * + * @Description + * Checks if the DI buffer has overflowed. + * + * @Return IMG_TRUE if buffer overflowed + */ +IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry); + +#endif /* DI_SERVER_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/fwload.h b/drivers/mcst/gpu-imgtec/services/server/include/fwload.h new file mode 100644 index 000000000000..a64d1377b6cb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/fwload.h @@ -0,0 +1,148 @@ +/*************************************************************************/ /*! +@File +@Title Services RGX OS Interface for loading the firmware +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This file defines the OS interface through which the RGX + device initialisation code in the kernel/server will obtain + the RGX firmware binary image. The API is used during the + initialisation of an RGX device via the + PVRSRVCommonDeviceInitialise() + call sequence. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _FWLOAD_H_ +#define _FWLOAD_H_ + +#include "img_defs.h" +#include "device_connection.h" +#include "device.h" + +/*! Opaque type handle defined and known to the OS layer implementation of this + * fwload.h OS API. This private data is allocated in the implementation of + * OSLoadFirmware() and contains whatever data and information needed to be + * able to acquire and return the firmware binary image to the Services + * kernel/server during initialisation. + * It is no longer required and may be freed when OSUnloadFirmware() is called. + */ +typedef struct OS_FW_IMAGE_t OS_FW_IMAGE; + +#if defined(LINUX) + +bool OSVerifyFirmware(const OS_FW_IMAGE* psFWImage); + +#endif + +/*************************************************************************/ /*! +@Function OSLoadFirmware +@Description The OS implementation must load or acquire the firmware (FW) + image binary needed by the driver stack. + A handle to the common layer device node is given to identify + which device instance in the system is being initialised. The + BVNC string is also supplied so that the implementation knows + which FW image to retrieve since each FW image only supports one + GPU type/revision. + The calling server code supports multiple GPU types and revisions + and will detect the specific GPU type and revision before calling + this API. It will also have runtime configuration of the VZ mode, + hence this API must be able to retrieve different FW binary + images based on the pszBVNCString given. The purpose of the end + platform/system is key to understand which FW images must be + available to the kernel server. + On exit the implementation must return a pointer to some private + data it uses to hold the FW image information and data. It will + be passed onto later API calls by the kernel server code. + NULL should be returned if the FW image could not be retrieved. + The format of the BVNC string is as follows ([x] denotes + optional field): + "rgx.fw[.signed].B.V[p].N.C[.vz]" + The implementation must first try to load the FW identified + by the pszBVpNCString parameter. If this is not available then it + should drop back to retrieving the FW identified by the + pszBVNCString parameter. The fields in the string are: + B, V, N, C are all unsigned integer identifying type/revision. + [.signed] is present when RGX_FW_SIGNED=1 is defined in the + server build. + [p] denotes a provisional (pre-silicon) GPU configuration. + [.vz] is present when the kernel server is loaded on the HOST + of a virtualised platform. See the DriverMode server + AppHint for details. + +@Input psDeviceNode Device instance identifier. +@Input pszBVNCString Identifier string of the FW image to + be loaded/acquired in production driver. +@Input pfnVerifyFirmware Callback which checks validity of FW image. +@Return OS_FW_IMAGE* Ptr to private data on success, + NULL otherwise. +*/ /**************************************************************************/ +OS_FW_IMAGE* OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + const IMG_CHAR *pszBVNCString, + bool (*pfnVerifyFirmware)(const OS_FW_IMAGE*)); + +/*************************************************************************/ /*! +@Function OSFirmwareData +@Description This function returns a pointer to the start of the FW image + binary data held in memory. It must remain valid until + OSUnloadFirmware() is called. +@Input psFWImage Private data opaque handle +@Return void* Ptr to FW binary image to start on GPU. +*/ /**************************************************************************/ +const void* OSFirmwareData(OS_FW_IMAGE *psFWImage); + +/*************************************************************************/ /*! +@Function OSFirmwareSize +@Description This function returns the size of the FW image binary data. +@Input psFWImage Private data opaque handle +@Return size_t Size in bytes of the firmware binary image +*/ /**************************************************************************/ +size_t OSFirmwareSize(OS_FW_IMAGE *psFWImage); + +/*************************************************************************/ /*! +@Function OSUnloadFirmware +@Description This is called when the server has completed firmware + initialisation and no longer needs the private data, possibly + allocated by OSLoadFirmware(). +@Input psFWImage Private data opaque handle +*/ /**************************************************************************/ +void OSUnloadFirmware(OS_FW_IMAGE *psFWImage); + +#endif /* _FWLOAD_H_ */ + +/****************************************************************************** + End of file (fwload.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/fwtrace_string.h b/drivers/mcst/gpu-imgtec/services/server/include/fwtrace_string.h new file mode 100644 index 000000000000..992be76d4f91 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/fwtrace_string.h @@ -0,0 +1,52 @@ +/*************************************************************************/ /*! +@File fwtrace_string.h +@Title RGX Firmware trace strings for KM +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Platform Generic +@Description This file defines SFs tuple. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _KM_TRACE_STRING_H_ +#define _KM_TRACE_STRING_H_ + +#include "rgx_fwif_sf.h" + +extern const RGXKM_STID_FMT SFs[]; +extern const IMG_UINT32 g_ui32SFsCount; + +#endif /* _KM_TRACE_STRING_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/handle.h b/drivers/mcst/gpu-imgtec/services/server/include/handle.h new file mode 100644 index 000000000000..d9db3713378c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/handle.h @@ -0,0 +1,201 @@ +/**************************************************************************/ /*! +@File +@Title Handle Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(HANDLE_API_H) +#define HANDLE_API_H + +#include "lock_types.h" + +/* + * Handle API + * ---------- + * The handle API is intended to provide handles for kernel resources, which + * can then be passed back to user space processes. + * + * The following functions comprise the API. Each function takes a pointer to + * a PVRSRV_HANDLE_BASE structure, one of which is allocated for each process, + * and stored in the per-process data area. Use KERNEL_HANDLE_BASE for handles + * not allocated for a particular process, or for handles that need to be + * allocated before the PVRSRV_HANDLE_BASE structure for the process is + * available. + * + * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag); + * + * Allocate a handle phHandle, for the resource of type eType pointed to by + * pvData. + * + * For handles that have a definite lifetime, where the corresponding resource + * is explicitly created and destroyed, eFlag should be zero. + * + * If a particular resource may be referenced multiple times by a given + * process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI will allow multiple + * handles to be allocated for the resource. Such handles cannot be found with + * PVRSRVFindHandle. + * + * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + * + * This function is similar to PVRSRVAllocHandle, except that the allocated + * handles are associated with a parent handle, hParent, that has been + * allocated previously. Subhandles are automatically deallocated when their + * parent handle is deallocated. + * Subhandles can be treated as ordinary handles. For example, they may have + * subhandles of their own, and may be explicitly deallocated using + * PVRSRVReleaseHandle (see below). + * + * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); + * + * Find the handle previously allocated for the resource pointed to by pvData, + * of type eType. Handles allocated with the flag + * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this function. + * + * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, + * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Given a handle for a resource of type eType, return the pointer to the + * resource. + * + * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase, + * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, + * IMH_HANDLE hAncestor); + * + * Similar to PVRSRVLookupHandle, but checks the handle is a descendant + * of hAncestor. + * + * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Deallocate a handle of given type. + * + * Return the parent of a handle in *phParent, or NULL if the handle has + * no parent. + */ + +#include "img_types.h" +#include "img_defs.h" +#include "hash.h" + +typedef enum +{ + #define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x, + #include "handle_types.h" + #undef HANDLETYPE +} PVRSRV_HANDLE_TYPE; + +static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero"); + +typedef enum +{ + PVRSRV_HANDLE_BASE_TYPE_CONNECTION, + PVRSRV_HANDLE_BASE_TYPE_PROCESS, + PVRSRV_HANDLE_BASE_TYPE_GLOBAL +} PVRSRV_HANDLE_BASE_TYPE; + + +typedef enum +{ + /* No flags */ + PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0, + /* Multiple handles can point at the given data pointer */ + PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x01, + /* Subhandles are allocated in a private handle space */ + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x02 +} PVRSRV_HANDLE_ALLOC_FLAG; + +typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE; + +typedef struct _PROCESS_HANDLE_BASE_ +{ + PVRSRV_HANDLE_BASE *psHandleBase; + ATOMIC_T iRefCount; + +} PROCESS_HANDLE_BASE; + +extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; +#define KERNEL_HANDLE_BASE (gpsKernelHandleBase) + +#define HANDLE_DEBUG_LISTING_MAX_NUM 20 + +typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData); + +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); +PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); + +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); +PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); +PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); +PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); + +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor); + +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); +PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); +PVRSRV_ERROR PVRSRVReleaseHandleStagedUnlock(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, + PVRSRV_HANDLE_BASE_TYPE eType); + +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime); + +PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVHandleInit(void); + +PVRSRV_ERROR PVRSRVHandleDeInit(void); + +PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void); + +void LockHandle(PVRSRV_HANDLE_BASE *psBase); +void UnlockHandle(PVRSRV_HANDLE_BASE *psBase); + +#endif /* !defined(HANDLE_API_H) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/handle_impl.h b/drivers/mcst/gpu-imgtec/services/server/include/handle_impl.h new file mode 100644 index 000000000000..ad35236101b7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/handle_impl.h @@ -0,0 +1,89 @@ +/**************************************************************************/ /*! +@File +@Title Implementation Callbacks for Handle Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the handle manager API. This file is for declarations + and definitions that are private/internal to the handle manager + API but need to be shared between the generic handle manager + code and the various handle manager backends, i.e. the code that + implements the various callbacks. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(__HANDLE_IMPL_H__) +#define __HANDLE_IMPL_H__ + +#include "img_types.h" +#include "pvrsrv_error.h" + +typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE; + +typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData); + +typedef struct _HANDLE_IMPL_FUNCTAB_ +{ + /* Acquire a new handle which is associated with the given data */ + PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData); + + /* Release the given handle (optionally returning the data associated with it) */ + PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); + + /* Get the data associated with the given handle */ + PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); + + /* Set the data associated with the given handle */ + PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData); + + PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData); + + /* Enable handle purging on the given handle base */ + PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase); + + /* Purge handles on the given handle base */ + PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase); + + /* Create handle base */ + PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase); + + /* Destroy handle base */ + PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase); +} HANDLE_IMPL_FUNCTAB; + +PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs); + +#endif /* !defined(__HANDLE_IMPL_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/handle_types.h b/drivers/mcst/gpu-imgtec/services/server/include/handle_types.h new file mode 100644 index 000000000000..7f82cf94da84 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/handle_types.h @@ -0,0 +1,83 @@ +/**************************************************************************/ /*! +@File +@Title Handle Manager handle types +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ +/* NOTE: Do not add include guards to this file */ + +HANDLETYPE(NONE) +HANDLETYPE(SHARED_EVENT_OBJECT) +HANDLETYPE(EVENT_OBJECT_CONNECT) +HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE) +HANDLETYPE(PHYSMEM_PMR) +HANDLETYPE(PHYSMEM_PMR_EXPORT) +HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT) +HANDLETYPE(DEVMEMINT_CTX) +HANDLETYPE(DEVMEMINT_CTX_EXPORT) +HANDLETYPE(DEVMEMINT_HEAP) +HANDLETYPE(DEVMEMINT_RESERVATION) +HANDLETYPE(DEVMEMINT_MAPPING) +HANDLETYPE(RGX_FW_MEMDESC) +HANDLETYPE(RGX_FREELIST) +HANDLETYPE(RGX_MEMORY_BLOCK) +HANDLETYPE(RGX_SERVER_RENDER_CONTEXT) +HANDLETYPE(RGX_SERVER_TQ_CONTEXT) +HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT) +HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT) +HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT) +HANDLETYPE(SYNC_PRIMITIVE_BLOCK) +HANDLETYPE(SYNC_RECORD_HANDLE) +HANDLETYPE(PVRSRV_TIMELINE_SERVER) +HANDLETYPE(PVRSRV_FENCE_SERVER) +HANDLETYPE(PVRSRV_FENCE_EXPORT) +HANDLETYPE(RGX_KM_HW_RT_DATASET) +HANDLETYPE(RGX_FWIF_ZSBUFFER) +HANDLETYPE(RGX_POPULATION) +HANDLETYPE(DC_DEVICE) +HANDLETYPE(DC_DISPLAY_CONTEXT) +HANDLETYPE(DC_BUFFER) +HANDLETYPE(DC_PIN_HANDLE) +HANDLETYPE(DEVMEM_MEM_IMPORT) +HANDLETYPE(PHYSMEM_PMR_PAGELIST) +HANDLETYPE(PVR_TL_SD) +HANDLETYPE(RI_HANDLE) +HANDLETYPE(DEV_PRIV_DATA) +HANDLETYPE(MM_PLAT_CLEANUP) +HANDLETYPE(WORKEST_RETURN_DATA) diff --git a/drivers/mcst/gpu-imgtec/services/server/include/htbserver.h b/drivers/mcst/gpu-imgtec/services/server/include/htbserver.h new file mode 100644 index 000000000000..9f0f5ff58ab0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/htbserver.h @@ -0,0 +1,249 @@ +/*************************************************************************/ /*! +@File htbserver.h +@Title Host Trace Buffer server implementation. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved + +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. + + A Host Trace can be merged with a corresponding Firmware Trace. + This is achieved by inserting synchronisation data into both + traces and post processing to merge them. + + The FW Trace will contain a "Sync Partition Marker". This is + updated every time the RGX is brought out of reset (RGX clock + timestamps reset at this point) and is repeated when the FW + Trace buffer wraps to ensure there is always at least 1 + partition marker in the Firmware Trace buffer whenever it is + read. + + The Host Trace will contain corresponding "Sync Partition + Markers" - #HTBSyncPartitionMarker(). Each partition is then + subdivided into "Sync Scale" sections - #HTBSyncScale(). The + "Sync Scale" data allows the timestamps from the two traces to + be correlated. The "Sync Scale" data is updated as part of the + standard RGX time correlation code (rgxtimecorr.c) and is + updated periodically including on power and clock changes. + +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __HTBSERVER_H__ +#define __HTBSERVER_H__ + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv.h" +#include "htbuffer.h" + + +/************************************************************************/ /*! + @Function HTBIDeviceCreate + @Description Initialisation actions for HTB at device creation. + + @Input psDeviceNode Reference to the device node in context + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode); + +/************************************************************************/ /*! + @Function HTBIDeviceDestroy + @Description De-initialisation actions for HTB at device destruction. + + @Input psDeviceNode Reference to the device node in context + +*/ /**************************************************************************/ +void +HTBDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode); + +/************************************************************************/ /*! + @Function HTBInit + @Description Initialise the Host Trace Buffer and allocate all resources + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBInit(void); + +/************************************************************************/ /*! + @Function HTBDeInit + @Description Close the Host Trace Buffer and free all resources + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeInit(void); + +/*************************************************************************/ /*! + @Function HTBConfigureKM + @Description Configure or update the configuration of the Host Trace Buffer + + @Input ui32NameSize Size of the pszName string + + @Input pszName Name to use for the underlying data buffer + + @Input ui32BufferSize Size of the underlying data buffer + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName, + const IMG_UINT32 ui32BufferSize); + + +/*************************************************************************/ /*! + @Function HTBControlKM + @Description Update the configuration of the Host Trace Buffer + + @Input ui32NumFlagGroups Number of group enable flags words + + @Input aui32GroupEnable Flags words controlling groups to be logged + + @Input ui32LogLevel Log level to record + + @Input ui32EnablePID PID to enable logging for a specific process + + @Input eLogMode Enable logging for all or specific processes, + + @Input eOpMode Control the behaviour of the data buffer + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBControlKM(const IMG_UINT32 ui32NumFlagGroups, + const IMG_UINT32 *aui32GroupEnable, + const IMG_UINT32 ui32LogLevel, + const IMG_UINT32 ui32EnablePID, + const HTB_LOGMODE_CTRL eLogMode, + const HTB_OPMODE_CTRL eOpMode); + + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarker + @Description Write an HTB sync partition marker to the HTB log + + @Input ui32Marker Marker value + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarker(const IMG_UINT32 ui32Marker); + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarkerRpt + @Description Write a HTB sync partition marker to the HTB log, given + the previous values to repeat. + + @Input ui32Marker Marker value + @Input ui64SyncOSTS previous OSTS + @Input ui64SyncCRTS previous CRTS + @Input ui32ClkSpeed previous Clockspeed + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarkerRepeat(const IMG_UINT32 ui32Marker, + const IMG_UINT64 ui64SyncOSTS, + const IMG_UINT64 ui64SyncCRTS, + const IMG_UINT32 ui32ClkSpeed); + +/*************************************************************************/ /*! + @Function HTBSyncScale + @Description Write FW-Host synchronisation data to the HTB log when clocks + change or are re-calibrated + + @Input bLogValues IMG_TRUE if value should be immediately written + out to the log + + @Input ui64OSTS OS Timestamp + + @Input ui64CRTS Rogue timestamp + + @Input ui32CalcClkSpd Calculated clock speed + +*/ /**************************************************************************/ +void +HTBSyncScale(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, + const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd); + +/*************************************************************************/ /*! + @Function HTBLogKM + @Description Record a Host Trace Buffer log event + + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events associated + with a particular process, but performed by + another can be logged correctly. + + @Input ui64TimeStamp The timestamp to be associated with this log event + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBLogKM(IMG_UINT32 PID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids SF, + IMG_UINT32 ui32NumArgs, IMG_UINT32 *aui32Args); + +/*************************************************************************/ /*! + @Function HTBIsConfigured + @Description Determine if HTB stream has been configured + + @Input none + + @Return IMG_FALSE Stream has not been configured + IMG_TRUE Stream has been configured + +*/ /**************************************************************************/ +IMG_BOOL +HTBIsConfigured(void); +#endif /* __HTBSERVER_H__ */ + +/* EOF */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/info_page.h b/drivers/mcst/gpu-imgtec/services/server/include/info_page.h new file mode 100644 index 000000000000..90afaaf9c63e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/info_page.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File +@Title Kernel/User mode general purpose shared memory. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description General purpose memory shared between kernel driver and user + mode. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _INFO_PAGE_KM_H_ +#define _INFO_PAGE_KM_H_ + +#include "pvrsrv_error.h" + +#include "pmr.h" +#include "pvrsrv.h" +#include "info_page_defs.h" + +/** + * @Function InfoPageCreate + * @Description Allocates resources for global information page. + * @Input psData pointer to PVRSRV data + * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. + */ +PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData); + +/** + * @Function InfoPageDestroy + * @Description Frees all of the resource of global information page. + * @Input psData pointer to PVRSRV data + * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. + */ +void InfoPageDestroy(PVRSRV_DATA *psData); + +/** + * @Function PVRSRVAcquireInfoPageKM() + * @Description This interface is used for obtaining the global information page + * which acts as a general purpose shared memory between KM and UM. + * The use of this information page outside of services is _not_ + * recommended. + * @Output ppsPMR handle to exported PMR + * @Return + */ +PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR); + +/** + * @Function PVRSRVReleaseInfoPageKM() + * @Description This function matches PVRSRVAcquireInfoPageKM(). + * @Input psPMR handle to exported PMR + * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. + */ +PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *psPMR); + +/** + * @Function GetInfoPageDebugFlagsKM() + * @Description Return info page debug flags + * @Return info page debug flags + */ +static INLINE IMG_UINT32 GetInfoPageDebugFlagsKM(void) +{ + return (PVRSRVGetPVRSRVData())->pui32InfoPage[DEBUG_FEATURE_FLAGS]; +} + +#endif /* _INFO_PAGE_KM_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/lists.h b/drivers/mcst/gpu-imgtec/services/server/include/lists.h new file mode 100644 index 000000000000..fd25c4567403 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/lists.h @@ -0,0 +1,355 @@ +/*************************************************************************/ /*! +@File +@Title Linked list shared functions templates. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Definition of the linked list function templates. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __LISTS_UTILS__ +#define __LISTS_UTILS__ + +/* instruct QAC to ignore warnings about the following custom formatted macros */ +/* PRQA S 0881,3410 ++ */ +#include +#include "img_types.h" +#include "device.h" +#include "power.h" + +/* + - USAGE - + + The list functions work with any structure that provides the fields psNext and + ppsThis. In order to make a function available for a given type, it is required + to use the function template macro that creates the actual code. + + There are 5 main types of functions: + - INSERT : given a pointer to the head pointer of the list and a pointer + to the node, inserts it as the new head. + - INSERT TAIL : given a pointer to the head pointer of the list and a pointer + to the node, inserts the node at the tail of the list. + - REMOVE : given a pointer to a node, removes it from its list. + - FOR EACH : apply a function over all the elements of a list. + - ANY : apply a function over the elements of a list, until one of them + return a non null value, and then returns it. + + The two last functions can have a variable argument form, with allows to pass + additional parameters to the callback function. In order to do this, the + callback function must take two arguments, the first is the current node and + the second is a list of variable arguments (va_list). + + The ANY functions have also another for which specifies the return type of the + callback function and the default value returned by the callback function. + +*/ + +/*************************************************************************/ /*! +@Function List_##TYPE##_ForEach +@Description Apply a callback function to all the elements of a list. +@Input psHead The head of the list to be processed. +@Input pfnCallBack The function to be applied to each element of the list. +*/ /**************************************************************************/ +#define DECLARE_LIST_FOR_EACH(TYPE) \ +void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_FOR_EACH(TYPE) \ +void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ +{\ + while (psHead)\ + {\ + pfnCallBack(psHead);\ + psHead = psHead->psNext;\ + }\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_ForEachSafe +@Description Apply a callback function to all the elements of a list. Do it + in a safe way that handles the fact that a node might remove + itself from the list during the iteration. +@Input psHead The head of the list to be processed. +@Input pfnCallBack The function to be applied to each element of the list. +*/ /**************************************************************************/ +#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \ +void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \ +void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ +{\ + TYPE *psNext;\ +\ + while (psHead)\ + {\ + psNext = psHead->psNext; \ + pfnCallBack(psHead);\ + psHead = psNext;\ + }\ +} + + +#define DECLARE_LIST_FOR_EACH_VA(TYPE) \ +void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \ +void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \ +{\ + va_list ap;\ + while (psHead)\ + {\ + va_start(ap, pfnCallBack);\ + pfnCallBack(psHead, ap);\ + psHead = psHead->psNext;\ + va_end(ap);\ + }\ +} + + +/*************************************************************************/ /*! +@Function List_##TYPE##_Any +@Description Applies a callback function to the elements of a list until + the function returns a non null value, then returns it. +@Input psHead The head of the list to be processed. +@Input pfnCallBack The function to be applied to each element of the list. +@Return The first non null value returned by the callback function. +*/ /**************************************************************************/ +#define DECLARE_LIST_ANY(TYPE) \ +void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY(TYPE) \ +void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\ +{ \ + void *pResult;\ + TYPE *psNextNode;\ + pResult = NULL;\ + psNextNode = psHead;\ + while (psHead && !pResult)\ + {\ + psNextNode = psNextNode->psNext;\ + pResult = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + + +/*with variable arguments, that will be passed as a va_list to the callback function*/ + +#define DECLARE_LIST_ANY_VA(TYPE) \ +void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA(TYPE) \ +void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + void* pResult = NULL;\ + while (psHead && !pResult)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + pResult = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + +/*those ones are for extra type safety, so there's no need to use castings for the results*/ + +#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\ +{ \ + RTYPE result;\ + TYPE *psNextNode;\ + result = CONTINUE;\ + psNextNode = psHead;\ + while (psHead && result == CONTINUE)\ + {\ + psNextNode = psNextNode->psNext;\ + result = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + RTYPE result = CONTINUE;\ + while (psHead && result == CONTINUE)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + result = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +/*************************************************************************/ /*! +@Function List_##TYPE##_Remove +@Description Removes a given node from the list. +@Input psNode The pointer to the node to be removed. +*/ /**************************************************************************/ +#define DECLARE_LIST_REMOVE(TYPE) \ +void List_##TYPE##_Remove(TYPE *psNode) + +#define IMPLEMENT_LIST_REMOVE(TYPE) \ +void List_##TYPE##_Remove(TYPE *psNode)\ +{\ + (*psNode->ppsThis)=psNode->psNext;\ + if (psNode->psNext)\ + {\ + psNode->psNext->ppsThis = psNode->ppsThis;\ + }\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_Insert +@Description Inserts a given node at the beginning of the list. +@Input psHead The pointer to the pointer to the head node. +@Input psNode The pointer to the node to be inserted. +*/ /**************************************************************************/ +#define DECLARE_LIST_INSERT(TYPE) \ +void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode) + +#define IMPLEMENT_LIST_INSERT(TYPE) \ +void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\ +{\ + psNewNode->ppsThis = ppsHead;\ + psNewNode->psNext = *ppsHead;\ + *ppsHead = psNewNode;\ + if (psNewNode->psNext)\ + {\ + psNewNode->psNext->ppsThis = &(psNewNode->psNext);\ + }\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_InsertTail +@Description Inserts a given node at the end of the list. +@Input psHead The pointer to the pointer to the head node. +@Input psNode The pointer to the node to be inserted. +*/ /**************************************************************************/ +#define DECLARE_LIST_INSERT_TAIL(TYPE) \ +void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode) + +#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \ +void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\ +{\ + TYPE *psTempNode = *ppsHead;\ + if (psTempNode != NULL)\ + {\ + while (psTempNode->psNext)\ + psTempNode = psTempNode->psNext;\ + ppsHead = &psTempNode->psNext;\ + }\ + psNewNode->ppsThis = ppsHead;\ + psNewNode->psNext = NULL;\ + *ppsHead = psNewNode;\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_Reverse +@Description Reverse a list in place +@Input ppsHead The pointer to the pointer to the head node. +*/ /**************************************************************************/ +#define DECLARE_LIST_REVERSE(TYPE) \ +void List_##TYPE##_Reverse(TYPE **ppsHead) + +#define IMPLEMENT_LIST_REVERSE(TYPE) \ +void List_##TYPE##_Reverse(TYPE **ppsHead)\ +{\ + TYPE *psTmpNode1; \ + TYPE *psTmpNode2; \ + TYPE *psCurNode; \ + psTmpNode1 = NULL; \ + psCurNode = *ppsHead; \ + while (psCurNode) { \ + psTmpNode2 = psCurNode->psNext; \ + psCurNode->psNext = psTmpNode1; \ + psTmpNode1 = psCurNode; \ + psCurNode = psTmpNode2; \ + if (psCurNode) \ + { \ + psTmpNode1->ppsThis = &(psCurNode->psNext); \ + } \ + else \ + { \ + psTmpNode1->ppsThis = ppsHead; \ + } \ + } \ + *ppsHead = psTmpNode1; \ +} + +#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL) + + +DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE); +DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE); +DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE); +DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE); +DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE); + +#undef DECLARE_LIST_ANY_2 +#undef DECLARE_LIST_ANY_VA +#undef DECLARE_LIST_ANY_VA_2 +#undef DECLARE_LIST_FOR_EACH +#undef DECLARE_LIST_FOR_EACH_VA +#undef DECLARE_LIST_INSERT +#undef DECLARE_LIST_REMOVE + +#endif + +/* re-enable warnings */ +/* PRQA S 0881,3410 -- */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/mmu_common.h b/drivers/mcst/gpu-imgtec/services/server/include/mmu_common.h new file mode 100644 index 000000000000..971cc524faa2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/mmu_common.h @@ -0,0 +1,774 @@ +/*************************************************************************/ /*! +@File +@Title Common MMU Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef MMU_COMMON_H +#define MMU_COMMON_H + +/* + The Memory Management Unit (MMU) performs device virtual to physical + translation. + + Terminology: + - page catalogue, PC (optional, 3 tier MMU) + - page directory, PD + - page table, PT (can be variable sized) + - data page, DP (can be variable sized) + Note: PD and PC are fixed size and can't be larger than the native + physical (CPU) page size + Shifts and AlignShift variables: + - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0 + - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units + by applying a bit shift left by 'xxxAlignShift' bits +*/ + +/* + Device Virtual Address Config: + + Incoming Device Virtual Address is deconstructed into up to 4 + fields, where the virtual address is up to 64bits: + MSB-----------------------------------------------LSB + | PC Index: | PD Index: | PT Index: | DP offset: | + | d bits | c bits | b-v bits | a+v bits | + ----------------------------------------------------- + where v is the variable page table modifier, e.g. + v == 0 -> 4KB DP + v == 2 -> 16KB DP + v == 4 -> 64KB DP + v == 6 -> 256KB DP + v == 8 -> 1MB DP + v == 10 -> 4MB DP +*/ + +/* services/server/include/ */ +#include "pmr.h" + +/* include/ */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_notifier.h" +#include "pvrsrv_error.h" +#include "servicesext.h" + + +/*! + The level of the MMU +*/ +typedef enum +{ + MMU_LEVEL_0 = 0, /* Level 0 = Page */ + + MMU_LEVEL_1, + MMU_LEVEL_2, + MMU_LEVEL_3, + MMU_LEVEL_LAST +} MMU_LEVEL; + +/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */ +#include "pdump_mmu.h" + +#define MMU_MAX_LEVEL 3 + +typedef struct _MMU_LEVEL_DATA_ +{ + IMG_UINT32 ui32Index; + IMG_UINT32 ui32NumOfEntries; + IMG_CHAR const *psDebugStr; + IMG_UINT8 uiBytesPerEntry; + IMG_UINT64 ui64Address; +} MMU_LEVEL_DATA; + +typedef enum _MMU_FAULT_TYPE_ +{ + MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */ + MMU_FAULT_TYPE_PM, + MMU_FAULT_TYPE_NON_PM, +} MMU_FAULT_TYPE; + +typedef struct _MMU_FAULT_DATA_ +{ + MMU_LEVEL eTopLevel; + MMU_FAULT_TYPE eType; + MMU_LEVEL_DATA sLevelData[MMU_LEVEL_LAST]; +} MMU_FAULT_DATA; + +struct _MMU_DEVVADDR_CONFIG_; + +/*! + MMU device attributes. This structure is the interface between the generic + MMU code and the device specific MMU code. +*/ +typedef struct _MMU_DEVICEATTRIBS_ +{ + PDUMP_MMU_TYPE eMMUType; + + IMG_CHAR *pszMMUPxPDumpMemSpaceName; + + /*! The type of the top level object */ + MMU_LEVEL eTopLevel; + + /*! Alignment requirement of the base object */ + IMG_UINT32 ui32BaseAlign; + + /*! HW config of the base object */ + struct _MMU_PxE_CONFIG_ *psBaseConfig; + + /*! Address split for the base object */ + const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig; + + /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */ + IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32, IMG_UINT32); + /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */ + IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32); + /*! Callback for creating protection bits for the page directory entry with 8 byte entry */ + IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32, IMG_UINT32); + /*! Callback for creating protection bits for the page directory entry with 4 byte entry */ + IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32); + /*! Callback for creating protection bits for the page table entry with 8 byte entry */ + IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32, IMG_UINT32); + /*! Callback for creating protection bits for the page table entry with 4 byte entry */ + IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32); + + /*! Callback for getting the MMU configuration based on the specified page size */ + PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize, + const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig, + const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig, + const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv2); + /*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */ + PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv); + + /*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */ + PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *); + /*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */ + PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *); + /*! Callback for getting the page size directly from the address. Supported on MMU4 */ + PVRSRV_ERROR (*pfnGetPageSizeFromVirtAddr)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_DEV_VIRTADDR, IMG_UINT32 *); + + /*! Private data handle */ + IMG_HANDLE hGetPageSizeFnPriv; +} MMU_DEVICEATTRIBS; + +/*! + MMU virtual address split +*/ +typedef struct _MMU_DEVVADDR_CONFIG_ +{ + /*! Page catalogue index mask */ + IMG_UINT64 uiPCIndexMask; + /*! Page catalogue index shift */ + IMG_UINT8 uiPCIndexShift; + /*! Total number of PC entries */ + IMG_UINT32 uiNumEntriesPC; + /*! Page directory mask */ + IMG_UINT64 uiPDIndexMask; + /*! Page directory shift */ + IMG_UINT8 uiPDIndexShift; + /*! Total number of PD entries */ + IMG_UINT32 uiNumEntriesPD; + /*! Page table mask */ + IMG_UINT64 uiPTIndexMask; + /*! Page index shift */ + IMG_UINT8 uiPTIndexShift; + /*! Total number of PT entries */ + IMG_UINT32 uiNumEntriesPT; + /*! Page offset mask */ + IMG_UINT64 uiPageOffsetMask; + /*! Page offset shift */ + IMG_UINT8 uiPageOffsetShift; + /*! First virtual address mappable for this config */ + IMG_UINT64 uiOffsetInBytes; + +} MMU_DEVVADDR_CONFIG; + +/* + P(C/D/T) Entry Config: + + MSB-----------------------------------------------LSB + | PT Addr: | variable PT ctrl | protection flags: | + | bits c+v | b bits | a bits | + ----------------------------------------------------- + where v is the variable page table modifier and is optional +*/ +/*! + Generic MMU entry description. This is used to describe PC, PD and PT entries. +*/ +typedef struct _MMU_PxE_CONFIG_ +{ + IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */ + + IMG_UINT64 uiAddrMask; /*! Physical address mask */ + IMG_UINT8 uiAddrShift; /*! Physical address shift */ + IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */ + + IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */ + IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */ + + IMG_UINT64 uiProtMask; /*! Protection flags mask */ + IMG_UINT8 uiProtShift; /*! Protection flags shift */ + + IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */ + IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */ +} MMU_PxE_CONFIG; + +/* MMU Protection flags */ + + +/* These are specified generically and in a h/w independent way, and + are interpreted at each level (PC/PD/PT) separately. */ + +/* The following flags are for internal use only, and should not + traverse the API */ +#define MMU_PROTFLAGS_INVALID 0x80000000U + +typedef IMG_UINT32 MMU_PROTFLAGS_T; + +/* The following flags should be supplied by the caller: */ +#define MMU_PROTFLAGS_READABLE (1U<<0) +#define MMU_PROTFLAGS_WRITEABLE (1U<<1) +#define MMU_PROTFLAGS_CACHE_COHERENT (1U<<2) +#define MMU_PROTFLAGS_CACHED (1U<<3) + +/* Device specific flags*/ +#define MMU_PROTFLAGS_DEVICE_OFFSET 16 +#define MMU_PROTFLAGS_DEVICE_MASK 0x000f0000UL +#define MMU_PROTFLAGS_DEVICE(n) \ + (((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \ + MMU_PROTFLAGS_DEVICE_MASK) + + +typedef struct _MMU_CONTEXT_ MMU_CONTEXT; + +struct _PVRSRV_DEVICE_NODE_; + +struct _CONNECTION_DATA_; + +typedef struct _MMU_PAGESIZECONFIG_ +{ + const MMU_PxE_CONFIG *psPDEConfig; + const MMU_PxE_CONFIG *psPTEConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_UINT32 uiRefCount; + IMG_UINT32 uiMaxRefCount; +} MMU_PAGESIZECONFIG; + +/*************************************************************************/ /*! +@Function MMU_ContextCreate + +@Description Create a new MMU context + +@Input psConnection Connection requesting the MMU context + creation. Can be NULL for kernel/FW + memory context. +@Input psDevNode Device node of the device to create the + MMU context for +@Output ppsMMUContext The created MMU context + +@Return PVRSRV_OK if the MMU context was successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ContextCreate(struct _CONNECTION_DATA_ *psConnection, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT **ppsMMUContext, + MMU_DEVICEATTRIBS *psDevAttrs); + + +/*************************************************************************/ /*! +@Function MMU_ContextDestroy + +@Description Destroy a MMU context + +@Input psMMUContext MMU context to destroy + +@Return None +*/ +/*****************************************************************************/ +void +MMU_ContextDestroy(MMU_CONTEXT *psMMUContext); + +/*************************************************************************/ /*! +@Function MMU_Alloc + +@Description Allocate the page tables required for the specified virtual range + +@Input psMMUContext MMU context to operate on + +@Input uSize The size of the allocation + +@Output puActualSize Actual size of allocation + +@Input uiProtFlags Generic MMU protection flags + +@Input uDevVAddrAlignment Alignment requirement of the virtual + allocation + +@Input psDevVAddr Virtual address to start the allocation + from + +@Return PVRSRV_OK if the allocation of the page tables was successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_Alloc(MMU_CONTEXT *psMMUContext, + IMG_DEVMEM_SIZE_T uSize, + IMG_DEVMEM_SIZE_T *puActualSize, + IMG_UINT32 uiProtFlags, + IMG_DEVMEM_SIZE_T uDevVAddrAlignment, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 uiLog2PageSize); + + +/*************************************************************************/ /*! +@Function MMU_Free + +@Description Free the page tables of the specified virtual range + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddr Virtual address to start the free + from + +@Input uiSize The size of the allocation + +@Return None +*/ +/*****************************************************************************/ +void +MMU_Free(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiLog2DataPageSize); + + +/*************************************************************************/ /*! +@Function MMU_MapPages + +@Description Map pages to the MMU. + Two modes of operation: One requires a list of physical page + indices that are going to be mapped, the other just takes + the PMR and a possible offset to map parts of it. + +@Input psMMUContext MMU context to operate on + +@Input uiMappingFlags Memalloc flags for the mapping + +@Input sDevVAddrBase Device virtual address of the 1st page + +@Input psPMR PMR to map + +@Input ui32PhysPgOffset Physical offset into the PMR + +@Input ui32MapPageCount Number of pages to map + +@Input paui32MapIndices List of page indices to map, + can be NULL + +@Input uiLog2PageSize Log2 page size of the pages to map + +@Return PVRSRV_OK if the mapping was successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_MapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + PMR *psPMR, + IMG_UINT32 ui32PhysPgOffset, + IMG_UINT32 ui32MapPageCount, + IMG_UINT32 *paui32MapIndices, + IMG_UINT32 uiLog2PageSize); + +/*************************************************************************/ /*! +@Function MMU_UnmapPages + +@Description Unmap pages from the MMU. + +@Input psMMUContext MMU context to operate on + +@Input uiMappingFlags Memalloc flags for the mapping + +@Input sDevVAddr Device virtual address of the 1st page + +@Input ui32PageCount Number of pages to unmap + +@Input pai32UnmapIndicies Array of page indices to be unmapped + +@Input uiLog2PageSize log2 size of the page + + +@Input uiMemAllocFlags Indicates if the unmapped regions need + to be backed by dummy or zero page + +@Return None +*/ +/*****************************************************************************/ +void +MMU_UnmapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_UINT32 *pai32UnmapIndicies, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags); + +/*************************************************************************/ /*! +@Function MMU_MapPMRFast + +@Description Map a PMR into the MMU. Must be not sparse. + This is supposed to cover most mappings and, as the name suggests, + should be as fast as possible. + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddr Device virtual address to map the PMR + into + +@Input psPMR PMR to map + +@Input uiSizeBytes Size in bytes to map + +@Input uiMappingFlags Memalloc flags for the mapping + +@Return PVRSRV_OK if the PMR was successfully mapped +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + const PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSizeBytes, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_UINT32 uiLog2PageSize); + +/*************************************************************************/ /*! +@Function MMU_UnmapPMRFast + +@Description Unmap pages from the MMU as fast as possible. + PMR must be non-sparse! + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrBase Device virtual address of the 1st page + +@Input ui32PageCount Number of pages to unmap + +@Input uiLog2PageSize log2 size of the page + +@Return None +*/ +/*****************************************************************************/ +void +MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 uiLog2PageSize); + +/*************************************************************************/ /*! +@Function MMU_ChangeValidity + +@Description Sets or unsets the valid bit of page table entries for a given + address range. + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddr The device virtual base address of + the range we want to modify + +@Input uiSizeBytes The size of the range in bytes + +@Input uiLog2PageSize Log2 of the used page size + +@Input bMakeValid Choose to set or unset the valid bit. + (bMakeValid == IMG_TRUE ) -> SET + (bMakeValid == IMG_FALSE) -> UNSET + +@Input psPMR The PMR backing the allocation. + Needed in case we have sparse memory + where we have to check whether a physical + address actually backs the virtual. + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ChangeValidity(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSizeBytes, + IMG_UINT32 uiLog2PageSize, + IMG_BOOL bMakeValid, + PMR *psPMR); + +/*************************************************************************/ /*! +@Function MMU_AcquireBaseAddr + +@Description Acquire the device physical address of the base level MMU object + +@Input psMMUContext MMU context to operate on + +@Output psPhysAddr Device physical address of the base level + MMU object + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr); + +/*************************************************************************/ /*! +@Function MMU_ReleaseBaseAddr + +@Description Release the device physical address of the base level MMU object + +@Input psMMUContext MMU context to operate on + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +void +MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/***********************************************************************************/ /*! +@Function MMU_SetOSid + +@Description Set the OSid associated with the application (and the MMU Context) + +@Input psMMUContext MMU context to store the OSid on + +@Input ui32OSid the OSid in question + +@Input ui32OSidReg The value that the firmware will assign to the + registers. + +@Input bOSidAxiProt Toggles whether the AXI prot bit will be set or + not. +@Return None +*/ +/***********************************************************************************/ + +void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt); + +/***********************************************************************************/ /*! +@Function MMU_GetOSid + +@Description Retrieve the OSid associated with the MMU context. + +@Input psMMUContext MMU context in which the OSid is stored + +@Output pui32OSid The OSid in question + +@Output pui32OSidReg The OSid that the firmware will assign to the + registers. + +@Output pbOSidAxiProt Toggles whether the AXI prot bit will be set or + not. +@Return None +*/ +/***********************************************************************************/ + +void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, + IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt); +#endif + +/*************************************************************************/ /*! +@Function MMU_AppendCacheFlags + +@Description Set the cache flags to the bitwise or of themselves and the + specified input flags, i.e. ui32CacheFlags |= ui32NewCacheFlags, + atomically. + +@Input psMMUContext MMU context + +@Input ui32NewCacheFlags Cache flags to append. + +@Return None +*/ +/*****************************************************************************/ +void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); + +/*************************************************************************/ /*! +@Function MMU_ExchangeCacheFlags + +@Description Exchange MMU context flags with specified value, atomically. + +@Input psMMUContext MMU context + +@Input ui32CacheFlags Cache flags to set. + +@Return Previous MMU context cache flags. +*/ +/*****************************************************************************/ +IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); + +/*************************************************************************/ /*! +@Function MMU_CheckFaultAddress + +@Description Check the specified MMU context to see if the provided address + should be valid + +@Input psMMUContext MMU context to store the data on + +@Input psDevVAddr Address to check + +@Output psOutFaultData To store fault details after checking + +@Return None +*/ +/*****************************************************************************/ +void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR *psDevVAddr, + MMU_FAULT_DATA *psOutFaultData); + +/*************************************************************************/ /*! +@Function MMU_IsVDevAddrValid +@Description Checks if given address is valid. +@Input psMMUContext MMU context to store the data on +@Input uiLog2PageSize page size +@Input sDevVAddr Address to check +@Return IMG_TRUE of address is valid +*/ /**************************************************************************/ +IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiLog2PageSize, + IMG_DEV_VIRTADDR sDevVAddr); + +#if defined(PDUMP) + +/*************************************************************************/ /*! +@Function MMU_ContextDerivePCPDumpSymAddr + +@Description Derives a PDump Symbolic address for the top level MMU object + +@Input psMMUContext MMU context to operate on + +@Input pszPDumpSymbolicNameBuffer Buffer to write the PDump symbolic + address to + +@Input uiPDumpSymbolicNameBufferSize Size of the buffer + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, + IMG_CHAR *pszPDumpSymbolicNameBuffer, + size_t uiPDumpSymbolicNameBufferSize); + +/*************************************************************************/ /*! +@Function MMU_PDumpWritePageCatBase + +@Description PDump write of the top level MMU object to a device register + +@Input psMMUContext MMU context to operate on + +@Input pszSpaceName PDump name of the mem/reg space + +@Input uiOffset Offset to write the address to + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, + const IMG_CHAR *pszSpaceName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + PDUMP_FLAGS_T uiPdumpFlags); + +/*************************************************************************/ /*! +@Function MMU_AcquirePDumpMMUContext + +@Description Acquire a reference to the PDump MMU context for this MMU + context + +@Input psMMUContext MMU context to operate on + +@Output pui32PDumpMMUContextID PDump MMU context ID + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 *pui32PDumpMMUContextID, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function MMU_ReleasePDumpMMUContext + +@Description Release a reference to the PDump MMU context for this MMU context + +@Input psMMUContext MMU context to operate on + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 ui32PDumpFlags); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(MMU_PDumpWritePageCatBase) +#endif +static INLINE void +MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, + const IMG_CHAR *pszSpaceName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + PDUMP_FLAGS_T uiPdumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMMUContext); + PVR_UNREFERENCED_PARAMETER(pszSpaceName); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui32WordSize); + PVR_UNREFERENCED_PARAMETER(ui32AlignShift); + PVR_UNREFERENCED_PARAMETER(ui32Shift); + PVR_UNREFERENCED_PARAMETER(uiPdumpFlags); +} +#endif /* PDUMP */ + +#endif /* #ifdef MMU_COMMON_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/opaque_types.h b/drivers/mcst/gpu-imgtec/services/server/include/opaque_types.h new file mode 100644 index 000000000000..766bc22ea418 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/opaque_types.h @@ -0,0 +1,56 @@ +/*************************************************************************/ /*! +@File +@Title Opaque Types +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines opaque types for various services types +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef SERVICES_OPAQUE_TYPES_H +#define SERVICES_OPAQUE_TYPES_H + +#include "img_defs.h" +#include "img_types.h" + +typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE; +typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE; + +#endif /* SERVICES_OPAQUE_TYPES_H */ + +/****************************************************************************** + End of file (opaque_types.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/os_srvinit_param.h b/drivers/mcst/gpu-imgtec/services/server/include/os_srvinit_param.h new file mode 100644 index 000000000000..b1cf3403e894 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/os_srvinit_param.h @@ -0,0 +1,322 @@ +/*************************************************************************/ /*! +@File +@Title Services initialisation parameters header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Services initialisation parameter support for the Linux kernel. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OS_SRVINIT_PARAM_H +#define OS_SRVINIT_PARAM_H + +#if defined(LINUX) && defined(__KERNEL__) +#include "km_apphint.h" +#include "km_apphint_defs.h" + +#define SrvInitParamOpen() NULL +#define SrvInitParamClose(pvState) ((void)(pvState)) + +#define SrvInitParamGetBOOL(state, name, value) \ + ((void) pvr_apphint_get_bool(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetUINT32(state, name, value) \ + ((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetUINT64(state, name, value) \ + ((void) pvr_apphint_get_uint64(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetSTRING(state, name, buffer, size) \ + ((void) pvr_apphint_get_string(APPHINT_ID_ ## name, buffer, size)) + +#define SrvInitParamGetUINT32BitField(state, name, value) \ + ((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetUINT32List(state, name, value) \ + ((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)) + +#else /* defined(LINUX) && defined(__KERNEL__) */ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "img_types.h" + +/*! Lookup item. */ +typedef struct +{ + const IMG_CHAR *pszValue; /*!< looked up name */ + IMG_UINT32 ui32Value; /*!< looked up value */ +} SRV_INIT_PARAM_UINT32_LOOKUP; + +/*************************************************************************/ /*! +@Brief SrvInitParamOpen + +@Description Establish a connection to the Parameter resource store which is + used to hold configuration information associated with the + server instance. + +@Return (void *) Handle to Parameter resource store to be used for + subsequent parameter value queries + +*/ /**************************************************************************/ +void *SrvInitParamOpen(void); + +/*************************************************************************/ /*! +@Brief SrvInitParamClose + +@Description Remove a pre-existing connection to the Parameter resource store + given by 'pvState' and release any temporary storage associated + with the 'pvState' mapping handle + +@Input pvState Handle to Parameter resource store + +*/ /**************************************************************************/ +void SrvInitParamClose(void *pvState); + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetBOOL + +@Description Get the current BOOL value for parameter 'pszName' from the + Parameter resource store attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Name of parameter to look-up + +@Input pbDefault Value to return if parameter not found + +@Output pbValue Value of parameter 'pszName' or 'pbDefault' + if not found + +*/ /**************************************************************************/ +void _SrvInitParamGetBOOL( + void *pvState, + const IMG_CHAR *pszName, + const IMG_BOOL *pbDefault, + IMG_BOOL *pbValue +); + +/*! Get the BOOL value for parameter 'name' from the parameter resource store + * attached to 'state'. */ +#define SrvInitParamGetBOOL(state, name, value) \ + _SrvInitParamGetBOOL(state, # name, & __SrvInitParam_ ## name, &(value)) + +/*! Initialise FLAG type parameter identified by 'name'. */ +#define SrvInitParamInitFLAG(name, defval, dummy) \ + static const IMG_BOOL __SrvInitParam_ ## name = defval; + +/*! Initialise BOOL type parameter identified by 'name'. */ +#define SrvInitParamInitBOOL(name, defval, dummy) \ + static const IMG_BOOL __SrvInitParam_ ## name = defval; + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetUINT32 + +@Description Get the current IMG_UINT32 value for parameter 'pszName' + from the Parameter resource store attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Name of parameter to look-up + +@Input pui32Default Value to return if parameter not found + +@Output pui32Value Value of parameter 'pszName' or + 'pui32Default' if not found + +*/ /**************************************************************************/ +void _SrvInitParamGetUINT32( + void *pvState, + const IMG_CHAR *pszName, + const IMG_UINT32 *pui32Default, + IMG_UINT32 *pui32Value +); + +/*! Get the UINT32 value for parameter 'name' from the parameter resource store + * attached to 'state'. */ +#define SrvInitParamGetUINT32(state, name, value) \ + _SrvInitParamGetUINT32(state, # name, & __SrvInitParam_ ## name, &(value)) + +/*! Initialise UINT32 type parameter identified by 'name'. */ +#define SrvInitParamInitUINT32(name, defval, dummy) \ + static const IMG_UINT32 __SrvInitParam_ ## name = defval; + +/*! Initialise UINT64 type parameter identified by 'name'. */ +#define SrvInitParamInitUINT64(name, defval, dummy) \ + static const IMG_UINT64 __SrvInitParam_ ## name = defval; + +/*! @cond Doxygen_Suppress */ +#define SrvInitParamUnreferenced(name) \ + PVR_UNREFERENCED_PARAMETER( __SrvInitParam_ ## name ) +/*! @endcond */ + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetUINT32BitField + +@Description Get the current IMG_UINT32 bitfield value for parameter + 'pszBasename' from the Parameter resource store + attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszBaseName Bitfield parameter name to search for + +@Input uiDefault Default return value if parameter not found + +@Input psLookup Bitfield array to traverse + +@Input uiSize number of elements in 'psLookup' + +@Output puiValue Value of bitfield or 'uiDefault' if + parameter not found +*/ /**************************************************************************/ +void _SrvInitParamGetUINT32BitField( + void *pvState, + const IMG_CHAR *pszBaseName, + IMG_UINT32 uiDefault, + const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, + IMG_UINT32 uiSize, + IMG_UINT32 *puiValue +); + +/*! Initialise UINT32 bitfield type parameter identified by 'name' with + * 'inival' value and 'lookup' look up array. */ +#define SrvInitParamInitUINT32Bitfield(name, inival, lookup) \ + static IMG_UINT32 __SrvInitParam_ ## name = inival; \ + static SRV_INIT_PARAM_UINT32_LOOKUP * \ + __SrvInitParamLookup_ ## name = &lookup[0]; \ + static const IMG_UINT32 __SrvInitParamSize_ ## name = \ + ARRAY_SIZE(lookup); + +/*! Get the UINT32 bitfield value for parameter 'name' from the parameter + * resource store attached to 'state'. */ +#define SrvInitParamGetUINT32BitField(state, name, value) \ + _SrvInitParamGetUINT32BitField(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetUINT32List + +@Description Get the current IMG_UINT32 list value for the specified + parameter 'pszName' from the Parameter resource store + attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Parameter list name to search for + +@Input uiDefault Default value to return if 'pszName' is + not set within 'pvState' + +@Input psLookup parameter list to traverse + +@Input uiSize number of elements in 'psLookup' list + +@Output puiValue value of located list element or + 'uiDefault' if parameter not found + +*/ /**************************************************************************/ +void _SrvInitParamGetUINT32List( + void *pvState, + const IMG_CHAR *pszName, + IMG_UINT32 uiDefault, + const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, + IMG_UINT32 uiSize, + IMG_UINT32 *puiValue +); + +/*! Get the UINT32 list value for parameter 'name' from the parameter + * resource store attached to 'state'. */ +#define SrvInitParamGetUINT32List(state, name, value) \ + _SrvInitParamGetUINT32List(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) + +/*! Initialise UINT32 list type parameter identified by 'name' with + * 'defval' default value and 'lookup' look up list. */ +#define SrvInitParamInitUINT32List(name, defval, lookup) \ + static IMG_UINT32 __SrvInitParam_ ## name = defval; \ + static SRV_INIT_PARAM_UINT32_LOOKUP * \ + __SrvInitParamLookup_ ## name = &lookup[0]; \ + static const IMG_UINT32 __SrvInitParamSize_ ## name = \ + ARRAY_SIZE(lookup); + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetSTRING + +@Description Get the contents of the specified parameter string 'pszName' + from the Parameter resource store attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Parameter string name to search for + +@Input psDefault Default string to return if 'pszName' is + not set within 'pvState' + +@Input size Size of output 'pBuffer' + +@Output pBuffer Output copy of 'pszName' contents or + copy of 'psDefault' if 'pszName' is not + set within 'pvState' + +*/ /**************************************************************************/ +void _SrvInitParamGetSTRING( + void *pvState, + const IMG_CHAR *pszName, + const IMG_CHAR *psDefault, + IMG_CHAR *pBuffer, + size_t size +); + +/*! Initialise STRING type parameter identified by 'name' with 'defval' default + * value. */ +#define SrvInitParamInitSTRING(name, defval, dummy) \ + static const IMG_CHAR *__SrvInitParam_ ## name = defval; + +/*! Get the STRING value for parameter 'name' from the parameter resource store + * attached to 'state'. */ +#define SrvInitParamGetSTRING(state, name, buffer, size) \ + _SrvInitParamGetSTRING(state, # name, __SrvInitParam_ ## name, buffer, size) + +#if defined(__cplusplus) +} +#endif + +#endif /* defined(LINUX) && defined(__KERNEL__) */ + +#endif /* OS_SRVINIT_PARAM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/osconnection_server.h b/drivers/mcst/gpu-imgtec/services/server/include/osconnection_server.h new file mode 100644 index 000000000000..aed24f285778 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/osconnection_server.h @@ -0,0 +1,121 @@ +/**************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description API for OS specific callbacks from server side connection + management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ +#ifndef _OSCONNECTION_SERVER_H_ +#define _OSCONNECTION_SERVER_H_ + +#include "handle.h" +#include "osfunc.h" + + +#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) +PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData); +PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData); + +PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); + +PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection); + +#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSConnectionPrivateDataInit) +#endif +/*************************************************************************/ /*! +@Function OSConnectionPrivateDataInit +@Description Allocates and initialises any OS-specific private data + relating to a connection. + Called from PVRSRVCommonConnectionConnect(). +@Input pvOSData pointer to any OS private data +@Output phOsPrivateData handle to the created connection + private data +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) +{ + PVR_UNREFERENCED_PARAMETER(phOsPrivateData); + PVR_UNREFERENCED_PARAMETER(pvOSData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSConnectionPrivateDataDeInit) +#endif +/*************************************************************************/ /*! +@Function OSConnectionPrivateDataDeInit +@Description Frees previously allocated OS-specific private data + relating to a connection. +@Input hOsPrivateData handle to the connection private data + to be freed +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + PVR_UNREFERENCED_PARAMETER(hOsPrivateData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSConnectionSetHandleOptions) +#endif +static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + PVR_UNREFERENCED_PARAMETER(psHandleBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSGetDevNode) +#endif +static INLINE PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + return NULL; +} +#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ + + +#endif /* _OSCONNECTION_SERVER_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/osdi_impl.h b/drivers/mcst/gpu-imgtec/services/server/include/osdi_impl.h new file mode 100644 index 000000000000..14a7a94fee10 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/osdi_impl.h @@ -0,0 +1,185 @@ +/*************************************************************************/ /*! +@File +@Title Functions and types for creating Debug Info implementations. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OSDI_IMPL_H +#define OSDI_IMPL_H + +#include "di_common.h" + +/*! Implementation callbacks. Those operations are performed on native + * implementation handles. */ +typedef struct OSDI_IMPL_ENTRY_CB +{ + /*! @Function pfnVPrintf + * + * @Description + * Implementation of the 'vprintf' operation. + * + * @Input pvNativeHandle native implementation handle + * @Input pszFmt NUL-terminated format string + * @Input va_list variable length argument list + */ + void (*pfnVPrintf)(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list); + + /*! @Function pfnPuts + * + * @Description + * Implementation of the 'puts' operation. + * + * @Input pvNativeHandle native implementation handle + * @Input pszStr NUL-terminated string + */ + void (*pfnPuts)(void *pvNativeHandle, const IMG_CHAR *pszStr); + + /*! @Function pfnHasOverflowed + * + * @Description + * Checks if the native implementation's buffer has overflowed. + * + * @Input pvNativeHandle native implementation handle + */ + IMG_BOOL (*pfnHasOverflowed)(void *pvNativeHandle); +} OSDI_IMPL_ENTRY_CB; + +/*! Debug Info entry specialisation. */ +typedef struct OSDI_IMPL_ENTRY +{ + /*! Pointer to the private data. The data originates from DICreateEntry() + * function. */ + void *pvPrivData; + /*! Pointer to the implementation native handle. */ + void *pvNative; + /*! Implementation entry callbacks. */ + OSDI_IMPL_ENTRY_CB *psCb; +} OSDI_IMPL_ENTRY; + +/*! Debug Info implementation callbacks. */ +typedef struct OSDI_IMPL_CB +{ + /*! Initialise implementation callback. + */ + PVRSRV_ERROR (*pfnInit)(void); + + /*! De-initialise implementation callback. + */ + void (*pfnDeInit)(void); + + /*! @Function pfnCreateEntry + * + * @Description + * Creates entry of eType type with pszName in the pvNativeGroup parent + * group. The entry is an abstract term which depends on the implementation, + * e.g.: a file in DebugFS. + * + * @Input pszName: name of the entry + * @Input eType: type of the entry + * @Input psIterCb: iterator implementation for the entry + * @Input pvPrivData: data that will be passed to the iterator callbacks + * in OSDI_IMPL_ENTRY - it can be retrieved by calling + * DIGetPrivData() function + * @Input pvNativeGroup: implementation specific handle to the parent group + * + * @Output pvNativeEntry: implementation specific handle to the entry + * + * return PVRSRV_ERROR error code + */ + PVRSRV_ERROR (*pfnCreateEntry)(const IMG_CHAR *pszName, + DI_ENTRY_TYPE eType, + const DI_ITERATOR_CB *psIterCb, + void *pvPrivData, + void *pvNativeGroup, + void **pvNativeEntry); + + /*! @Function pfnDestroyEntry + * + * @Description + * Destroys native entry. + * + * @Input psNativeEntry: handle to the entry + */ + void (*pfnDestroyEntry)(void *psNativeEntry); + + /*! @Function pfnCreateGroup + * + * @Description + * Creates group with pszName in the psNativeParentGroup parent group. + * The group is an abstract term which depends on the implementation, + * e.g.: a directory in DebugFS. + * + * @Input pszName: name of the entry + * @Input psNativeParentGroup: implementation specific handle to the parent + * group + * + * @Output psNativeGroup: implementation specific handle to the group + * + * return PVRSRV_ERROR error code + */ + PVRSRV_ERROR (*pfnCreateGroup)(const IMG_CHAR *pszName, + void *psNativeParentGroup, + void **psNativeGroup); + + /*! @Function pfnDestroyGroup + * + * @Description + * Destroys native group. + * + * @Input psNativeGroup: handle to the group + */ + void (*pfnDestroyGroup)(void *psNativeGroup); +} OSDI_IMPL_CB; + +/*! @Function DIRegisterImplementation + * + * @Description + * Registers Debug Info implementations with the framework. The framework takes + * the ownership of the implementation and will clean up the resources when + * it's de-initialised. + * + * @Input pszName: name of the implementation + * @Input psImplCb: implementation callbacks + * + * @Return PVRSRV_ERROR error code + */ +PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, + const OSDI_IMPL_CB *psImplCb); + +#endif /* OSDI_IMPL_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/osfunc.h b/drivers/mcst/gpu-imgtec/services/server/include/osfunc.h new file mode 100644 index 000000000000..0f4b37b6e2a1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/osfunc.h @@ -0,0 +1,1599 @@ +/*************************************************************************/ /*! +@File +@Title OS functions header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS specific API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifdef DEBUG_RELEASE_BUILD +#pragma optimize( "", off ) +#define DEBUG 1 +#endif + +#ifndef __OSFUNC_H__ +/*! @cond Doxygen_Suppress */ +#define __OSFUNC_H__ +/*! @endcond */ + +#if defined(LINUX) && defined(__KERNEL__) +#include "kernel_nospec.h" +#if !defined(NO_HARDWARE) +#include +#endif +#endif + +#include + +#if defined(__QNXNTO__) +#include +#include +#endif + +#if defined(INTEGRITY_OS) +#include +#include +#endif + +#include "img_types.h" +#include "img_defs.h" +#include "device.h" +#include "pvrsrv_device.h" +#include "cache_ops.h" +#include "osfunc_common.h" + +/****************************************************************************** + * Static defines + *****************************************************************************/ +/*! + * Returned by OSGetCurrentProcessID() and OSGetCurrentThreadID() if the OS + * is currently operating in the interrupt context. + */ +#define KERNEL_ID 0xffffffffL + +#if defined(LINUX) && defined(__KERNEL__) +#define OSConfineArrayIndexNoSpeculation(index, size) array_index_nospec((index), (size)) +#elif defined(__QNXNTO__) +#define OSConfineArrayIndexNoSpeculation(index, size) (index) +#define PVRSRV_MISSING_NO_SPEC_IMPL +#elif defined(INTEGRITY_OS) +#define OSConfineArrayIndexNoSpeculation(index, size) (index) +#define PVRSRV_MISSING_NO_SPEC_IMPL +#else +/*************************************************************************/ /*! +@Function OSConfineArrayIndexNoSpeculation +@Description This macro aims to avoid code exposure to Cache Timing + Side-Channel Mechanisms which rely on speculative code + execution (Variant 1). It does so by ensuring a value to be + used as an array index will be set to zero if outside of the + bounds of the array, meaning any speculative execution of code + which uses this suitably adjusted index value will not then + attempt to load data from memory outside of the array bounds. + Code calling this macro must still first verify that the + original unmodified index value is within the bounds of the + array, and should then only use the modified value returned + by this function when accessing the array itself. + NB. If no OS-specific implementation of this macro is + defined, the original index is returned unmodified and no + protection against the potential exploit is provided. +@Input index The original array index value that would be used to + access the array. +@Input size The number of elements in the array being accessed. +@Return The value to use for the array index, modified so that it + remains within array bounds. +*/ /**************************************************************************/ +#define OSConfineArrayIndexNoSpeculation(index, size) (index) +#if !defined(DOXYGEN) +#define PVRSRV_MISSING_NO_SPEC_IMPL +#endif +#endif + +/*************************************************************************/ /*! +@Function OSClockns64 +@Description This function returns the number of ticks since system boot + expressed in nanoseconds. Unlike OSClockns, OSClockns64 has + a near 64-bit range. +@Return The 64-bit clock value, in nanoseconds. +*/ /**************************************************************************/ +IMG_UINT64 OSClockns64(void); + +/*************************************************************************/ /*! +@Function OSClockus64 +@Description This function returns the number of ticks since system boot + expressed in microseconds. Unlike OSClockus, OSClockus64 has + a near 64-bit range. +@Return The 64-bit clock value, in microseconds. +*/ /**************************************************************************/ +IMG_UINT64 OSClockus64(void); + +/*************************************************************************/ /*! +@Function OSClockus +@Description This function returns the number of ticks since system boot + in microseconds. +@Return The 32-bit clock value, in microseconds. +*/ /**************************************************************************/ +IMG_UINT32 OSClockus(void); + +/*************************************************************************/ /*! +@Function OSClockms +@Description This function returns the number of ticks since system boot + in milliseconds. +@Return The 32-bit clock value, in milliseconds. +*/ /**************************************************************************/ +IMG_UINT32 OSClockms(void); + +/*************************************************************************/ /*! +@Function OSClockMonotonicns64 +@Description This function returns a clock value based on the system + monotonic clock. +@Output pui64Time The 64-bit clock value, in nanoseconds. +@Return Error Code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time); + +/*************************************************************************/ /*! +@Function OSClockMonotonicus64 +@Description This function returns a clock value based on the system + monotonic clock. +@Output pui64Time The 64-bit clock value, in microseconds. +@Return Error Code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time); + +/*************************************************************************/ /*! +@Function OSClockMonotonicRawns64 +@Description This function returns a clock value based on the system + monotonic raw clock. +@Return 64bit ns timestamp +*/ /**************************************************************************/ +IMG_UINT64 OSClockMonotonicRawns64(void); + +/*************************************************************************/ /*! +@Function OSClockMonotonicRawus64 +@Description This function returns a clock value based on the system + monotonic raw clock. +@Return 64bit us timestamp +*/ /**************************************************************************/ +IMG_UINT64 OSClockMonotonicRawus64(void); + +/*************************************************************************/ /*! +@Function OSGetPageSize +@Description This function returns the page size. + If the OS is not using memory mappings it should return a + default value of 4096. +@Return The size of a page, in bytes. +*/ /**************************************************************************/ +size_t OSGetPageSize(void); + +/*************************************************************************/ /*! +@Function OSGetPageShift +@Description This function returns the page size expressed as a power of + two. A number of pages, left-shifted by this value, gives the + equivalent size in bytes. + If the OS is not using memory mappings it should return a + default value of 12. +@Return The page size expressed as a power of two. +*/ /**************************************************************************/ +size_t OSGetPageShift(void); + +/*************************************************************************/ /*! +@Function OSGetPageMask +@Description This function returns a bitmask that may be applied to an + address to mask off the least-significant bits so as to + leave the start address of the page containing that address. +@Return The page mask. +*/ /**************************************************************************/ +size_t OSGetPageMask(void); + +/*************************************************************************/ /*! +@Function OSGetOrder +@Description This function returns the order of power of two for a given + size. Eg. for a uSize of 4096 bytes the function would + return 12 (4096 = 2^12). +@Input uSize The size in bytes. +@Return The order of power of two. +*/ /**************************************************************************/ +size_t OSGetOrder(size_t uSize); + +/*************************************************************************/ /*! +@Function OSGetRAMSize +@Description This function returns the total amount of GPU-addressable + memory provided by the system. In other words, after loading + the driver this would be the largest allocation an + application would reasonably expect to be able to make. + Note that this is function is not expected to return the + current available memory but the amount which would be + available on startup. +@Return Total GPU-addressable memory size, in bytes. +*/ /**************************************************************************/ +IMG_UINT64 OSGetRAMSize(void); + +/*************************************************************************/ /*! +@Description Pointer to a Mid-level Interrupt Service Routine (MISR). +@Input pvData Pointer to MISR specific data. +*/ /**************************************************************************/ +typedef void (*PFN_MISR)(void *pvData); + +/*************************************************************************/ /*! +@Description Pointer to a thread entry point function. +@Input pvData Pointer to thread specific data. +*/ /**************************************************************************/ +typedef void (*PFN_THREAD)(void *pvData); + +/*************************************************************************/ /*! +@Function OSChangeSparseMemCPUAddrMap +@Description This function changes the CPU mapping of the underlying + sparse allocation. It is used by a PMR 'factory' + implementation if that factory supports sparse + allocations. +@Input psPageArray array representing the pages in the + sparse allocation +@Input sCpuVAddrBase the virtual base address of the sparse + allocation ('first' page) +@Input sCpuPAHeapBase the physical address of the virtual + base address 'sCpuVAddrBase' +@Input ui32AllocPageCount the number of pages referenced in + 'pai32AllocIndices' +@Input pai32AllocIndices list of indices of pages within + 'psPageArray' that we now want to + allocate and map +@Input ui32FreePageCount the number of pages referenced in + 'pai32FreeIndices' +@Input pai32FreeIndices list of indices of pages within + 'psPageArray' we now want to + unmap and free +@Input bIsLMA flag indicating if the sparse allocation + is from LMA or UMA memory +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, + IMG_UINT64 sCpuVAddrBase, + IMG_CPU_PHYADDR sCpuPAHeapBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bIsLMA); + +/*************************************************************************/ /*! +@Function OSInstallMISR +@Description Installs a Mid-level Interrupt Service Routine (MISR) + which handles higher-level processing of interrupts from + the device (GPU). + An MISR runs outside of interrupt context, and so may be + descheduled. This means it can contain code that would + not be permitted in the LISR. + An MISR is invoked when OSScheduleMISR() is called. This + call should be made by installed LISR once it has completed + its interrupt processing. + Multiple MISRs may be installed by the driver to handle + different causes of interrupt. +@Input pfnMISR pointer to the function to be installed + as the MISR +@Input hData private data provided to the MISR +@Input pszMisrName Name describing purpose of MISR worker thread + (Must be a string literal). +@Output hMISRData handle to the installed MISR (to be used + for a subsequent uninstall) +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, + PFN_MISR pfnMISR, + void *hData, + const IMG_CHAR *pszMisrName); + +/*************************************************************************/ /*! +@Function OSUninstallMISR +@Description Uninstalls a Mid-level Interrupt Service Routine (MISR). +@Input hMISRData handle to the installed MISR +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData); + +/*************************************************************************/ /*! +@Function OSScheduleMISR +@Description Schedules a Mid-level Interrupt Service Routine (MISR) to be + executed. An MISR should be executed outside of interrupt + context, for example in a work queue. +@Input hMISRData handle to the installed MISR +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData); + +/*************************************************************************/ /*! +@Description Pointer to a function implementing debug dump of thread-specific + data. +@Input pfnDumpDebugPrintf Used to specify the print function used + to dump any debug information. If this + argument is NULL then a default print + function will be used. +@Input pvDumpDebugFile File identifier to be passed to the + print function if specified. +*/ /**************************************************************************/ + +typedef void (*PFN_THREAD_DEBUG_DUMP)(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/*************************************************************************/ /*! +@Function OSThreadCreate +@Description Creates a kernel thread and starts it running. The caller + is responsible for informing the thread that it must finish + and return from the pfnThread function. It is not possible + to kill or terminate it. The new thread runs with the default + priority provided by the Operating System. + Note: Kernel threads are freezable which means that they + can be frozen by the kernel on for example driver suspend. + Because of that only OSEventObjectWaitKernel() function should + be used to put kernel threads in waiting state. +@Output phThread Returned handle to the thread. +@Input pszThreadName Name to assign to the thread. +@Input pfnThread Thread entry point function. +@Input pfnDebugDumpCB Used to dump info of the created thread +@Input bIsSupportingThread Set, if summary of this thread needs to + be dumped in debug_dump +@Input hData Thread specific data pointer for pfnThread(). +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ + +PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData); + +/*! Available priority levels for the creation of a new Kernel Thread. */ +typedef enum priority_levels +{ + OS_THREAD_NOSET_PRIORITY = 0, /* With this option the priority level is the default for the given OS */ + OS_THREAD_HIGHEST_PRIORITY, + OS_THREAD_HIGH_PRIORITY, + OS_THREAD_NORMAL_PRIORITY, + OS_THREAD_LOW_PRIORITY, + OS_THREAD_LOWEST_PRIORITY, + OS_THREAD_LAST_PRIORITY /* This must be always the last entry */ +} OS_THREAD_LEVEL; + +/*************************************************************************/ /*! +@Function OSThreadCreatePriority +@Description As OSThreadCreate, this function creates a kernel thread and + starts it running. The difference is that with this function + is possible to specify the priority used to schedule the new + thread. + +@Output phThread Returned handle to the thread. +@Input pszThreadName Name to assign to the thread. +@Input pfnThread Thread entry point function. +@Input pfnDebugDumpCB Used to dump info of the created thread +@Input bIsSupportingThread Set, if summary of this thread needs to + be dumped in debug_dump +@Input hData Thread specific data pointer for pfnThread(). +@Input eThreadPriority Priority level to assign to the new thread. +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData, + OS_THREAD_LEVEL eThreadPriority); + +/*************************************************************************/ /*! +@Function OSThreadDestroy +@Description Waits for the thread to end and then destroys the thread + handle memory. This function will block and wait for the + thread to finish successfully, thereby providing a sync point + for the thread completing its work. No attempt is made to kill + or otherwise terminate the thread. +@Input hThread The thread handle returned by OSThreadCreate(). +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread); + +/*************************************************************************/ /*! +@Function OSMapPhysToLin +@Description Maps physical memory into a linear address range. +@Input BasePAddr physical CPU address +@Input ui32Bytes number of bytes to be mapped +@Input ui32Flags flags denoting the caching mode to be employed + for the mapping (uncached/write-combined, + cached coherent or cached incoherent). + See pvrsrv_memallocflags.h for full flag bit + definitions. +@Return Pointer to the new mapping if successful, NULL otherwise. +*/ /**************************************************************************/ +#ifdef CONFIG_MCST +void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_DEV_PHYADDR BaseDevPAddr, + size_t ui32Bytes, IMG_UINT32 ui32Flags); +#else +void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags); +#endif +/*************************************************************************/ /*! +@Function OSUnMapPhysToLin +@Description Unmaps physical memory previously mapped by OSMapPhysToLin(). +@Input pvLinAddr the linear mapping to be unmapped +@Input ui32Bytes number of bytes to be unmapped +@Input ui32Flags flags denoting the caching mode that was employed + for the original mapping. +@Return IMG_TRUE if unmapping was successful, IMG_FALSE otherwise. +*/ /**************************************************************************/ +IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags); + +/*************************************************************************/ /*! +@Function OSCPUCacheFlushRangeKM +@Description Clean and invalidate the CPU cache for the specified + address range. +@Input psDevNode device on which the allocation was made +@Input pvVirtStart virtual start address of the range to be + flushed +@Input pvVirtEnd virtual end address of the range to be + flushed +@Input sCPUPhysStart physical start address of the range to be + flushed +@Input sCPUPhysEnd physical end address of the range to be + flushed +@Return None +*/ /**************************************************************************/ +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +/*************************************************************************/ /*! +@Function OSCPUCacheCleanRangeKM +@Description Clean the CPU cache for the specified address range. + This writes out the contents of the cache and clears the + 'dirty' bit (which indicates the physical memory is + consistent with the cache contents). +@Input psDevNode device on which the allocation was made +@Input pvVirtStart virtual start address of the range to be + cleaned +@Input pvVirtEnd virtual end address of the range to be + cleaned +@Input sCPUPhysStart physical start address of the range to be + cleaned +@Input sCPUPhysEnd physical end address of the range to be + cleaned +@Return None +*/ /**************************************************************************/ +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +/*************************************************************************/ /*! +@Function OSCPUCacheInvalidateRangeKM +@Description Invalidate the CPU cache for the specified address range. + The cache must reload data from those addresses if they + are accessed. +@Input psDevNode device on which the allocation was made +@Input pvVirtStart virtual start address of the range to be + invalidated +@Input pvVirtEnd virtual end address of the range to be + invalidated +@Input sCPUPhysStart physical start address of the range to be + invalidated +@Input sCPUPhysEnd physical end address of the range to be + invalidated +@Return None +*/ /**************************************************************************/ +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +/*! CPU Cache operations address domain type */ +typedef enum +{ + OS_CACHE_OP_ADDR_TYPE_VIRTUAL, /*!< Operation requires CPU virtual address only */ + OS_CACHE_OP_ADDR_TYPE_PHYSICAL, /*!< Operation requires CPU physical address only */ + OS_CACHE_OP_ADDR_TYPE_BOTH /*!< Operation requires both CPU virtual & physical addresses */ +} OS_CACHE_OP_ADDR_TYPE; + +/*************************************************************************/ /*! +@Function OSCPUCacheOpAddressType +@Description Returns the address type (i.e. virtual/physical/both) the CPU + architecture performs cache maintenance operations under. + This is used to infer whether the virtual or physical address + supplied to the OSCPUCacheXXXRangeKM functions can be omitted + when called. +@Return OS_CACHE_OP_ADDR_TYPE +*/ /**************************************************************************/ +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void); + +/*! CPU Cache attributes available for retrieval, DCache unless specified */ +typedef enum _OS_CPU_CACHE_ATTRIBUTE_ +{ + OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE, /*!< The cache line size */ + OS_CPU_CACHE_ATTRIBUTE_COUNT /*!< The number of attributes (must be last) */ +} OS_CPU_CACHE_ATTRIBUTE; + +/*************************************************************************/ /*! +@Function OSCPUCacheAttributeSize +@Description Returns the size of a given cache attribute. + Typically this function is used to return the cache line + size, but may be extended to return the size of other + cache attributes. +@Input eCacheAttribute the cache attribute whose size should + be returned. +@Return The size of the specified cache attribute, in bytes. +*/ /**************************************************************************/ +IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute); + +/*************************************************************************/ /*! +@Function OSGetCurrentProcessID +@Description Returns ID of current process (thread group) +@Return ID of current process +*****************************************************************************/ +IMG_PID OSGetCurrentProcessID(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentVirtualProcessID +@Description Returns ID of current process (thread group of current + PID namespace) +@Return ID of current process in PID namespace +*****************************************************************************/ +IMG_PID OSGetCurrentVirtualProcessID(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentProcessName +@Description Gets the name of current process +@Return Process name +*****************************************************************************/ +IMG_CHAR *OSGetCurrentProcessName(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentProcessVASpaceSize +@Description Returns the CPU virtual address space size of current process +@Return Process VA space size +*/ /**************************************************************************/ +IMG_UINT64 OSGetCurrentProcessVASpaceSize(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentThreadID +@Description Returns ID for current thread +@Return ID of current thread +*****************************************************************************/ +uintptr_t OSGetCurrentThreadID(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentClientProcessIDKM +@Description Returns ID of current client process (thread group) which + has made a bridge call into the server. + For some operating systems, this may simply be the current + process id. For others, it may be that a dedicated thread + is used to handle the processing of bridge calls and that + some additional processing is required to obtain the ID of + the client process making the bridge call. +@Return ID of current client process +*****************************************************************************/ +IMG_PID OSGetCurrentClientProcessIDKM(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentClientProcessNameKM +@Description Gets the name of current client process +@Return Client process name +*****************************************************************************/ +IMG_CHAR *OSGetCurrentClientProcessNameKM(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentClientThreadIDKM +@Description Returns ID for current client thread + For some operating systems, this may simply be the current + thread id. For others, it may be that a dedicated thread + is used to handle the processing of bridge calls and that + some additional processing is require to obtain the ID of + the client thread making the bridge call. +@Return ID of current client thread +*****************************************************************************/ +uintptr_t OSGetCurrentClientThreadIDKM(void); + +/*************************************************************************/ /*! +@Function OSMemCmp +@Description Compares two blocks of memory for equality. +@Input pvBufA Pointer to the first block of memory +@Input pvBufB Pointer to the second block of memory +@Input uiLen The number of bytes to be compared +@Return Value < 0 if pvBufA is less than pvBufB. + Value > 0 if pvBufB is less than pvBufA. + Value = 0 if pvBufA is equal to pvBufB. +*****************************************************************************/ +IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesAlloc +@Description Allocates a number of contiguous physical pages. + If allocations made by this function are CPU cached then + OSPhyContigPagesClean has to be implemented to write the + cached data to memory. +@Input psDevNode the device for which the allocation is + required +@Input uiSize the size of the required allocation (in bytes) +@Output psMemHandle a returned handle to be used to refer to this + allocation +@Output psDevPAddr the physical address of the allocation +@Return PVRSRV_OK on success, a failure code otherwise. +*****************************************************************************/ +PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesFree +@Description Frees a previous allocation of contiguous physical pages +@Input psDevNode the device on which the allocation was made +@Input psMemHandle the handle of the allocation to be freed +@Return None. +*****************************************************************************/ +void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesMap +@Description Maps the specified allocation of contiguous physical pages + to a kernel virtual address +@Input psDevNode the device on which the allocation was made +@Input psMemHandle the handle of the allocation to be mapped +@Input uiSize the size of the allocation (in bytes) +@Input psDevPAddr the physical address of the allocation +@Output pvPtr the virtual kernel address to which the + allocation is now mapped +@Return PVRSRV_OK on success, a failure code otherwise. +*****************************************************************************/ +PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesUnmap +@Description Unmaps the kernel mapping for the specified allocation of + contiguous physical pages +@Input psDevNode the device on which the allocation was made +@Input psMemHandle the handle of the allocation to be unmapped +@Input pvPtr the virtual kernel address to which the + allocation is currently mapped +@Return None. +*****************************************************************************/ +void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesClean +@Description Write the content of the specified allocation from CPU cache to + memory from (start + uiOffset) to (start + uiOffset + uiLength) + It is expected to be implemented as a cache clean operation but + it is allowed to fall back to a cache clean + invalidate + (i.e. flush). + If allocations returned by OSPhyContigPagesAlloc are always + uncached this can be implemented as nop. +@Input psDevNode device on which the allocation was made +@Input psMemHandle the handle of the allocation to be flushed +@Input uiOffset the offset in bytes from the start of the + allocation from where to start flushing +@Input uiLength the amount to flush from the offset in bytes +@Return PVRSRV_OK on success, a failure code otherwise. +*****************************************************************************/ +PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength); + + +/*************************************************************************/ /*! +@Function OSInitEnvData +@Description Called to initialise any environment-specific data. This + could include initialising the bridge calling infrastructure + or device memory management infrastructure. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSInitEnvData(void); + +/*************************************************************************/ /*! +@Function OSDeInitEnvData +@Description The counterpart to OSInitEnvData(). Called to free any + resources which may have been allocated by OSInitEnvData(). +@Return None. +*/ /**************************************************************************/ +void OSDeInitEnvData(void); + +/*************************************************************************/ /*! +@Function OSVSScanf +@Description OS function to support the standard C vsscanf() function. +*/ /**************************************************************************/ +IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...); + +/*************************************************************************/ /*! +@Function OSStringLCat +@Description OS function to support the BSD C strlcat() function. +*/ /**************************************************************************/ +size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize); + +/*************************************************************************/ /*! +@Function OSSNPrintf +@Description OS function to support the standard C snprintf() function. +@Output pStr char array to print into +@Input ui32Size maximum size of data to write (chars) +@Input pszFormat format string +*/ /**************************************************************************/ +IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4); + +/*************************************************************************/ /*! +@Function OSVSNPrintf +@Description Printf to IMG string using variable args (see stdarg.h). + This is necessary because the '...' notation does not + support nested function calls. +@Input ui32Size maximum size of data to write (chars) +@Input pszFormat format string +@Input vaArgs variable args structure (from stdarg.h) +@Output pStr char array to print into +@Return Number of character written in buffer if successful other wise -1 on error +*/ /**************************************************************************/ +IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) __printf(3, 0); + +/*************************************************************************/ /*! +@Function OSStringLength +@Description OS function to support the standard C strlen() function. +*/ /**************************************************************************/ +size_t OSStringLength(const IMG_CHAR *pStr); + +/*************************************************************************/ /*! +@Function OSStringNLength +@Description Return the length of a string, excluding the terminating null + byte ('\0'), but return at most 'uiCount' bytes. Only the first + 'uiCount' bytes of 'pStr' are interrogated. +@Input pStr pointer to the string +@Input uiCount the maximum length to return +@Return Length of the string if less than 'uiCount' bytes, otherwise + 'uiCount'. +*/ /**************************************************************************/ +size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount); + +/*************************************************************************/ /*! +@Function OSStringNCompare +@Description OS function to support the standard C strncmp() function. +*/ /**************************************************************************/ +IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, + size_t uiSize); + +/*************************************************************************/ /*! +@Function OSStringToUINT32 +@Description Changes string to IMG_UINT32. +*/ /**************************************************************************/ +PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, + IMG_UINT32 *ui32Result); + +/*************************************************************************/ /*! +@Function OSStringUINT32ToStr +@Description Changes IMG_UINT32 to string +@Input pszBuf Buffer to write output number string +@Input uSize Size of buffer provided, i.e. size of pszBuf +@Input ui32Num Number to convert to string +@Return Returns 0 if buffer is not sufficient to hold the number string, + else returns length of number string +*/ /**************************************************************************/ +IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, IMG_UINT32 ui32Num); + +/*************************************************************************/ /*! +@Function OSEventObjectCreate +@Description Create an event object. +@Input pszName name to assign to the new event object. +@Output EventObject the created event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, + IMG_HANDLE *EventObject); + +/*************************************************************************/ /*! +@Function OSEventObjectDestroy +@Description Destroy an event object. +@Input hEventObject the event object to destroy. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject); + +/*************************************************************************/ /*! +@Function OSEventObjectSignal +@Description Signal an event object. Any thread waiting on that event + object will be woken. +@Input hEventObject the event object to signal. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject); + +/*************************************************************************/ /*! +@Function OSEventObjectWait +@Description Wait for an event object to signal. The function is passed + an OS event object handle (which allows the OS to have the + calling thread wait on the associated event object). + The calling thread will be rescheduled when the associated + event object signals. + If the event object has not signalled after a default timeout + period (defined in EVENT_OBJECT_TIMEOUT_MS), the function + will return with the result code PVRSRV_ERROR_TIMEOUT. + + +@Input hOSEventKM the OS event object handle associated with + the event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM); + +/*************************************************************************/ /*! +@Function OSEventObjectWaitKernel +@Description Wait for an event object to signal. The function is passed + an OS event object handle (which allows the OS to have the + calling thread wait on the associated event object). + The calling thread will be rescheduled when the associated + event object signals. + If the event object has not signalled after a default timeout + period (defined in EVENT_OBJECT_TIMEOUT_MS), the function + will return with the result code PVRSRV_ERROR_TIMEOUT. + + Note: This function should be used only by kernel thread. + This is because all kernel threads are freezable and + this function allows the kernel to freeze the threads + when waiting. + + See OSEventObjectWait() for more details. + +@Input hOSEventKM the OS event object handle associated with + the event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +#if defined(LINUX) && defined(__KERNEL__) +PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); +#else +#define OSEventObjectWaitKernel OSEventObjectWaitTimeout +#endif + +/*************************************************************************/ /*! +@Function OSSuspendTaskInterruptible +@Description Suspend the current task into interruptible state. +@Return none. +*/ /**************************************************************************/ +#if defined(LINUX) && defined(__KERNEL__) +void OSSuspendTaskInterruptible(void); +#endif + +/*************************************************************************/ /*! +@Function OSEventObjectWaitTimeout +@Description Wait for an event object to signal or timeout. The function + is passed an OS event object handle (which allows the OS to + have the calling thread wait on the associated event object). + The calling thread will be rescheduled when the associated + event object signals. + If the event object has not signalled after the specified + timeout period (passed in 'uiTimeoutus'), the function + will return with the result code PVRSRV_ERROR_TIMEOUT. +@Input hOSEventKM the OS event object handle associated with + the event object. +@Input uiTimeoutus the timeout period (in usecs) +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); + +/*************************************************************************/ /*! +@Function OSEventObjectDumpDebugInfo +@Description Emits debug counters/stats related to the event object passed +@Input hOSEventKM the OS event object handle associated with + the event object. +@Return None. +*/ /**************************************************************************/ +void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM); + +/*************************************************************************/ /*! +@Function OSEventObjectOpen +@Description Open an OS handle on the specified event object. + This OS handle may then be used to make a thread wait for + that event object to signal. +@Input hEventObject Event object handle. +@Output phOSEvent OS handle to the returned event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, + IMG_HANDLE *phOSEvent); + +/*************************************************************************/ /*! +@Function OSEventObjectClose +@Description Close an OS handle previously opened for an event object. +@Input hOSEventKM OS event object handle to close. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM); + +/*************************************************************************/ /*! +@Function OSWaitus +@Description Implements a busy wait of the specified number of microseconds. + This function does NOT release thread quanta. +@Input ui32Timeus The duration of the wait period (in us) +@Return None. +*/ /**************************************************************************/ +void OSWaitus(IMG_UINT32 ui32Timeus); + +/*************************************************************************/ /*! +@Function OSSleepms +@Description Implements a sleep of the specified number of milliseconds. + This function may allow pre-emption, meaning the thread + may potentially not be rescheduled for a longer period. +@Input ui32Timems The duration of the sleep (in ms) +@Return None. +*/ /**************************************************************************/ +void OSSleepms(IMG_UINT32 ui32Timems); + +/*************************************************************************/ /*! +@Function OSReleaseThreadQuanta +@Description Relinquishes the current thread's execution time-slice, + permitting the OS scheduler to schedule another thread. +@Return None. +*/ /**************************************************************************/ +void OSReleaseThreadQuanta(void); + + +/*************************************************************************/ /*! +*/ /**************************************************************************/ + +/* The access method is dependent on the location of the physical memory that + * makes up the PhyHeaps defined for the system and the CPU architecture. These + * macros may change in future to accommodate different access requirements. + */ +/*! Performs a 32 bit word read from the device memory. */ +#define OSReadDeviceMem32(addr) (*((volatile IMG_UINT32 __force *)(addr))) +/*! Performs a 32 bit word write to the device memory. */ +#define OSWriteDeviceMem32(addr, val) (*((volatile IMG_UINT32 __force *)(addr)) = (IMG_UINT32)(val)) + +#if defined(LINUX) && defined(__KERNEL__) && !defined(NO_HARDWARE) + #define OSReadHWReg8(addr, off) ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off))) + #define OSReadHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off))) + #define OSReadHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off))) + + /* Little endian support only */ + #define OSReadHWReg64(addr, off) \ + ({ \ + __typeof__(addr) _addr = addr; \ + __typeof__(off) _off = off; \ + (IMG_UINT64) \ + ( \ + ( (IMG_UINT64)(readl((IMG_BYTE __iomem *)(_addr) + (_off) + 4)) << 32) \ + | readl((IMG_BYTE __iomem *)(_addr) + (_off)) \ + ); \ + }) + + #define OSWriteHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off)) + #define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off)) + #define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off)) + /* Little endian support only */ + #define OSWriteHWReg64(addr, off, val) do \ + { \ + __typeof__(addr) _addr = addr; \ + __typeof__(off) _off = off; \ + __typeof__(val) _val = val; \ + writel((IMG_UINT32)((_val) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off)); \ + writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \ + } while (0) + + +#elif defined(NO_HARDWARE) + /* OSReadHWReg operations skipped in no hardware builds */ + #define OSReadHWReg8(addr, off) (0x4eU) + #define OSReadHWReg16(addr, off) (0x3a4eU) + #define OSReadHWReg32(addr, off) (0x30f73a4eU) +#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8 + /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */ + #define OSReadHWReg64(addr, off) (0x5b376c9d30f73a4eUL) +#else + #define OSReadHWReg64(addr, off) (0x5b376c9d30f73a4eULL) +#endif + + #define OSWriteHWReg8(addr, off, val) + #define OSWriteHWReg16(addr, off, val) + #define OSWriteHWReg32(addr, off, val) + #define OSWriteHWReg64(addr, off, val) + +#else +/*************************************************************************/ /*! +@Function OSReadHWReg8 +@Description Read from an 8-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The byte read. +*/ /**************************************************************************/ + IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSReadHWReg16 +@Description Read from a 16-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The word read. +*/ /**************************************************************************/ + IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSReadHWReg32 +@Description Read from a 32-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The long word read. +*/ /**************************************************************************/ + IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSReadHWReg64 +@Description Read from a 64-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The long long word read. +*/ /**************************************************************************/ + IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSWriteHWReg8 +@Description Write to an 8-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui8Value The byte to be written to the register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value); + +/*************************************************************************/ /*! +@Function OSWriteHWReg16 +@Description Write to a 16-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui16Value The word to be written to the register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value); + +/*************************************************************************/ /*! +@Function OSWriteHWReg32 +@Description Write to a 32-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui32Value The long word to be written to the register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); + +/*************************************************************************/ /*! +@Function OSWriteHWReg64 +@Description Write to a 64-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui64Value The long long word to be written to the + register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value); +#endif + +/*************************************************************************/ /*! +@Description Pointer to a timer callback function. +@Input pvData Pointer to timer specific data. +*/ /**************************************************************************/ +typedef void (*PFN_TIMER_FUNC)(void* pvData); + +/*************************************************************************/ /*! +@Function OSAddTimer +@Description OS specific function to install a timer callback. The + timer will then need to be enabled, as it is disabled by + default. + When enabled, the callback will be invoked once the specified + timeout has elapsed. +@Input pfnTimerFunc Timer callback +@Input *pvData Callback data +@Input ui32MsTimeout Callback period +@Return Valid handle on success, NULL if a failure +*/ /**************************************************************************/ +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout); + +/*************************************************************************/ /*! +@Function OSRemoveTimer +@Description Removes the specified timer. The handle becomes invalid and + should no longer be used. +@Input hTimer handle of the timer to be removed +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer); + +/*************************************************************************/ /*! +@Function OSEnableTimer +@Description Enable the specified timer. after enabling, the timer will + invoke the associated callback at an interval determined by + the configured timeout period until disabled. +@Input hTimer handle of the timer to be enabled +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer); + +/*************************************************************************/ /*! +@Function OSDisableTimer +@Description Disable the specified timer +@Input hTimer handle of the timer to be disabled +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer); + + +/*************************************************************************/ /*! + @Function OSPanic + @Description Take action in response to an unrecoverable driver error + @Return None +*/ /**************************************************************************/ +void OSPanic(void); + +/*************************************************************************/ /*! +@Function OSCopyToUser +@Description Copy data to user-addressable memory from kernel-addressable + memory. + Note that pvDest may be an invalid address or NULL and the + function should return an error in this case. + For operating systems that do not have a user/kernel space + distinction, this function should be implemented as a stub + which simply returns PVRSRV_ERROR_NOT_SUPPORTED. +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination User memory +@Input pvSrc pointer to the source Kernel memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest, const void *pvSrc, size_t ui32Bytes); + +/*************************************************************************/ /*! +@Function OSCopyFromUser +@Description Copy data from user-addressable memory to kernel-addressable + memory. + Note that pvSrc may be an invalid address or NULL and the + function should return an error in this case. + For operating systems that do not have a user/kernel space + distinction, this function should be implemented as a stub + which simply returns PVRSRV_ERROR_NOT_SUPPORTED. +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination Kernel memory +@Input pvSrc pointer to the source User memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void __user *pvSrc, size_t ui32Bytes); + +#if defined(__linux__) || defined(INTEGRITY_OS) +#define OSBridgeCopyFromUser OSCopyFromUser +#define OSBridgeCopyToUser OSCopyToUser +#else +/*************************************************************************/ /*! +@Function OSBridgeCopyFromUser +@Description Copy data from user-addressable memory into kernel-addressable + memory as part of a bridge call operation. + For operating systems that do not have a user/kernel space + distinction, this function will require whatever implementation + is needed to pass data for making the bridge function call. + For operating systems which do have a user/kernel space + distinction (such as Linux) this function may be defined so + as to equate to a call to OSCopyFromUser(). +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination Kernel memory +@Input pvSrc pointer to the source User memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess, + void *pvDest, + const void *pvSrc, + size_t ui32Bytes); + +/*************************************************************************/ /*! +@Function OSBridgeCopyToUser +@Description Copy data to user-addressable memory from kernel-addressable + memory as part of a bridge call operation. + For operating systems that do not have a user/kernel space + distinction, this function will require whatever implementation + is needed to pass data for making the bridge function call. + For operating systems which do have a user/kernel space + distinction (such as Linux) this function may be defined so + as to equate to a call to OSCopyToUser(). +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination User memory +@Input pvSrc pointer to the source Kernel memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess, + void *pvDest, + const void *pvSrc, + size_t ui32Bytes); +#endif + +/* To be increased if required in future */ +#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x2000 /*!< Size of the memory block used to hold data passed in to a bridge call */ +#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 /*!< Size of the memory block used to hold data returned from a bridge call */ + +/*************************************************************************/ /*! +@Function OSPlatformBridgeInit +@Description Called during device creation to allow the OS port to register + other bridge modules and related resources that it requires. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSPlatformBridgeInit(void); + +/*************************************************************************/ /*! +@Function OSPlatformBridgeDeInit +@Description Called during device destruction to allow the OS port to + deregister its OS specific bridges and clean up other + related resources. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSPlatformBridgeDeInit(void); + + +#if defined(LINUX) && defined(__KERNEL__) +#define OSWriteMemoryBarrier() wmb() +#define OSReadMemoryBarrier() rmb() +#define OSMemoryBarrier() mb() +#else +/*************************************************************************/ /*! +@Function OSWriteMemoryBarrier +@Description Insert a write memory barrier. + The write memory barrier guarantees that all store operations + (writes) specified before the barrier will appear to happen + before all of the store operations specified after the barrier. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +void OSWriteMemoryBarrier(void); +/*************************************************************************/ /*! +@Function OSReadMemoryBarrier +@Description Insert a read memory barrier. + The read memory barrier guarantees that all load (read) + operations specified before the barrier will appear to happen + before all of the load operations specified after the barrier. +*/ /**************************************************************************/ +void OSReadMemoryBarrier(void); +/*************************************************************************/ /*! +@Function OSMemoryBarrier +@Description Insert a read/write memory barrier. + The read and write memory barrier guarantees that all load + (read) and all store (write) operations specified before the + barrier will appear to happen before all of the load/store + operations specified after the barrier. +@Return None. +*/ /**************************************************************************/ +void OSMemoryBarrier(void); +#endif + +/*************************************************************************/ /*! +@Function PVRSRVToNativeError +@Description Returns the OS-specific equivalent error number/code for + the specified PVRSRV_ERROR value. + If there is no equivalent, or the PVRSRV_ERROR value is + PVRSRV_OK (no error), 0 is returned. +@Return The OS equivalent error code. +*/ /**************************************************************************/ +int PVRSRVToNativeError(PVRSRV_ERROR e); +/** See PVRSRVToNativeError(). */ +#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) ) + + +#if defined(LINUX) && defined(__KERNEL__) + +/* Provide LockDep friendly definitions for Services RW locks */ +#include +#include +#include "allocmem.h" + +#define OSWRLockCreate(ppsLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \ + if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \ + e;}) +#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;}) + +#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;}) +#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;}) +#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;}) +#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;}) + +#elif defined(LINUX) || defined(__QNXNTO__) || defined(INTEGRITY_OS) +/* User-mode unit tests use these definitions on Linux */ + +PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock); +void OSWRLockDestroy(POSWR_LOCK psLock); +void OSWRLockAcquireRead(POSWR_LOCK psLock); +void OSWRLockReleaseRead(POSWR_LOCK psLock); +void OSWRLockAcquireWrite(POSWR_LOCK psLock); +void OSWRLockReleaseWrite(POSWR_LOCK psLock); + +#else + +/*************************************************************************/ /*! +@Function OSWRLockCreate +@Description Create a writer/reader lock. + This type of lock allows multiple concurrent readers but + only a single writer, allowing for optimized performance. +@Output ppsLock A handle to the created WR lock. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock) +{ + PVR_UNREFERENCED_PARAMETER(ppsLock); + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSWRLockDestroy +@Description Destroys a writer/reader lock. +@Input psLock The handle of the WR lock to be destroyed. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockDestroy(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockAcquireRead +@Description Acquire a writer/reader read lock. + If the write lock is already acquired, the caller will + block until it is released. +@Input psLock The handle of the WR lock to be acquired for + reading. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockReleaseRead +@Description Release a writer/reader read lock. +@Input psLock The handle of the WR lock whose read lock is to + be released. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockAcquireWrite +@Description Acquire a writer/reader write lock. + If the write lock or any read lock are already acquired, + the caller will block until all are released. +@Input psLock The handle of the WR lock to be acquired for + writing. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockReleaseWrite +@Description Release a writer/reader write lock. +@Input psLock The handle of the WR lock whose write lock is to + be released. +@Return None +*/ /**************************************************************************/ +static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} +#endif + +/*************************************************************************/ /*! +@Function OSDivide64r64 +@Description Divide a 64-bit value by a 32-bit value. Return the 64-bit + quotient. + The remainder is also returned in 'pui32Remainder'. +@Input ui64Divident The number to be divided. +@Input ui32Divisor The 32-bit value 'ui64Divident' is to + be divided by. +@Output pui32Remainder The remainder of the division. +@Return The 64-bit quotient (result of the division). +*/ /**************************************************************************/ +IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); + +/*************************************************************************/ /*! +@Function OSDivide64 +@Description Divide a 64-bit value by a 32-bit value. Return a 32-bit + quotient. + The remainder is also returned in 'pui32Remainder'. + This function allows for a more optional implementation + of a 64-bit division when the result is known to be + representable in 32-bits. +@Input ui64Divident The number to be divided. +@Input ui32Divisor The 32-bit value 'ui64Divident' is to + be divided by. +@Output pui32Remainder The remainder of the division. +@Return The 32-bit quotient (result of the division). +*/ /**************************************************************************/ +IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); + +/*************************************************************************/ /*! +@Function OSDumpStack +@Description Dump the current task information and its stack trace. +@Return None +*/ /**************************************************************************/ +void OSDumpStack(void); + +/*************************************************************************/ /*! +@Function OSUserModeAccessToPerfCountersEn +@Description Permit User-mode access to CPU performance counter + registers. + This function is called during device initialisation. + Certain CPU architectures may need to explicitly permit + User mode access to performance counters - if this is + required, the necessary code should be implemented inside + this function. +@Return None. +*/ /**************************************************************************/ +void OSUserModeAccessToPerfCountersEn(void); + +/*************************************************************************/ /*! +@Function OSDebugSignalPID +@Description Sends a SIGTRAP signal to a specific PID in user mode for + debugging purposes. The user mode process can register a handler + against this signal. + This is necessary to support the Rogue debugger. If the Rogue + debugger is not used then this function may be implemented as + a stub. +@Input ui32PID The PID for the signal. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID); + +#if defined(LINUX) && defined(__KERNEL__) && !defined(DOXYGEN) +#define OSWarnOn(a) WARN_ON(a) +#else +/*************************************************************************/ /*! +@Function OSWarnOn +@Description This API allows the driver to emit a special token and stack + dump to the server log when an issue is detected that needs the + OS to be notified. The token or call may be used to trigger + log collection by the OS environment. + PVR_DPF log messages will have been emitted prior to this call. +@Input a Expression to evaluate, if true trigger Warn signal +@Return None +*/ /**************************************************************************/ +#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while (0) +#endif + +/*************************************************************************/ /*! +@Function OSIsKernelThread +@Description This API determines if the current running thread is a kernel + thread (i.e. one not associated with any userland process, + typically an MISR handler.) +@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. +*/ /**************************************************************************/ +IMG_BOOL OSIsKernelThread(void); + +/*************************************************************************/ /*! +@Function OSThreadDumpInfo +@Description Traverse the thread list and call each of the stored + callbacks to dump the info in debug_dump. +@Input pfnDumpDebugPrintf The 'printf' function to be called to + display the debug info +@Input pvDumpDebugFile Optional file identifier to be passed to + the 'printf' function if required +*/ /**************************************************************************/ +void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/*************************************************************************/ /*! +@Function OSDumpVersionInfo +@Description Store OS version information in debug dump. +@Input pfnDumpDebugPrintf The 'printf' function to be called to + display the debug info +@Input pvDumpDebugFile Optional file identifier to be passed to + the 'printf' function if required +*/ /**************************************************************************/ +void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/*************************************************************************/ /*! +@Function OSIsWriteCombineUnalignedSafe +@Description Determine if unaligned accesses to write-combine memory are + safe to perform, i.e. whether we are safe from a CPU fault + occurring. This test is specifically aimed at ARM64 platforms + which cannot provide this guarantee if the memory is 'device' + memory rather than 'normal' under the ARM memory architecture. +@Return IMG_TRUE if safe, IMG_FALSE otherwise. +*/ /**************************************************************************/ +IMG_BOOL OSIsWriteCombineUnalignedSafe(void); + +#endif /* __OSFUNC_H__ */ + +/****************************************************************************** + End of file (osfunc.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/oskm_apphint.h b/drivers/mcst/gpu-imgtec/services/server/include/oskm_apphint.h new file mode 100644 index 000000000000..1b1e91089438 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/oskm_apphint.h @@ -0,0 +1,176 @@ +/*************************************************************************/ /*! +@File oskm_apphint.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS-independent interface for retrieving KM apphints +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "img_defs.h" +#if defined(LINUX) +#include "km_apphint.h" +#else +#include "services_client_porting.h" +#endif +#if !defined(__OSKM_APPHINT_H__) +#define __OSKM_APPHINT_H__ + + +#if defined(LINUX) && !defined(DOXYGEN) +static INLINE IMG_UINT os_get_km_apphint_UINT32(void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) { + return !pvr_apphint_get_uint32(id, pVal); +} +static INLINE IMG_UINT os_get_km_apphint_UINT64(void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) { + return !pvr_apphint_get_uint64(id, pVal); +} +static INLINE IMG_UINT os_get_km_apphint_BOOL(void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) { + return !pvr_apphint_get_bool(id, pVal); +} +static INLINE IMG_UINT os_get_km_apphint_STRING(void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) { + return !pvr_apphint_get_string(id, buffer, size); +} + +#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \ + os_get_km_apphint_UINT32(state, APPHINT_ID_ ## name, appHintDefault, value) + +#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \ + os_get_km_apphint_UINT64(state, APPHINT_ID_ ## name, appHintDefault, value) + +#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \ + os_get_km_apphint_BOOL(state, APPHINT_ID_ ## name, appHintDefault, value) + +#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \ + os_get_km_apphint_STRING(state, APPHINT_ID_ ## name, appHintDefault, buffer, size) + + +#define OSCreateKMAppHintState(state) \ + PVR_UNREFERENCED_PARAMETER(state) + +#define OSFreeKMAppHintState(state) \ + PVR_UNREFERENCED_PARAMETER(state) + +#else /* #if defined(LINUX) && !defined(DOXYGEN) */ + +/**************************************************************************/ /*! +@def OSGetKMAppHintUINT32(state, name, appHintDefault, value) +@Description Interface for retrieval of uint32 km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output value Pointer to returned app hint value. + */ /**************************************************************************/ +#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \ + PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) + +/**************************************************************************/ /*! +@def OSGetKMAppHintUINT64(state, name, appHintDefault, value) +@Description Interface for retrieval of uint64 km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output value Pointer to returned app hint value. + */ /**************************************************************************/ +#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \ + PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) + +/**************************************************************************/ /*! +@def OSGetKMAppHintBOOL(state, name, appHintDefault, value) +@Description Interface for retrieval of IMG_BOOL km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output value Pointer to returned app hint value. + */ /**************************************************************************/ +#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \ + PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) + +/**************************************************************************/ /*! +@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) +@Description Interface for retrieval of string km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output buffer Buffer used to return app hint string. +@Input size Size of the buffer. + */ /**************************************************************************/ +#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \ + (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer)) + +/**************************************************************************/ /*! +@def OSCreateKMAppHintState(state) +@Description Creates the app hint state. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVCreateAppHintState() declared in + services_client_porting.h, effectively making it 'shared' code. +@Output state App hint state + */ /**************************************************************************/ +#define OSCreateKMAppHintState(state) \ + PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state) + +/**************************************************************************/ /*! +@def OSFreeKMAppHintState +@Description Free the app hint state. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVCreateAppHintState() declared in + services_client_porting.h, effectively making it 'shared' code. +@Output state App hint state + */ /**************************************************************************/ +#define OSFreeKMAppHintState(state) \ + PVRSRVFreeAppHintState(IMG_SRV_UM, state) + +#endif /* #if defined(LINUX) */ + +#endif /* __OSKM_APPHINT_H__ */ + +/****************************************************************************** + End of file (oskm_apphint.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/ospvr_gputrace.h b/drivers/mcst/gpu-imgtec/services/server/include/ospvr_gputrace.h new file mode 100644 index 000000000000..1b239efd1332 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/ospvr_gputrace.h @@ -0,0 +1,167 @@ +/*************************************************************************/ /*! +@File ospvr_gputrace.h +@Title PVR GPU Trace module common environment interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_GPUTRACE_H_ +#define PVR_GPUTRACE_H_ + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_hwperf.h" +#include "device.h" + +#if defined(LINUX) + +void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExternalJobRef, + IMG_UINT32 ui32InternalJobRef, + RGX_HWPERF_KICK_TYPE eKickType); + +/* Early initialisation of GPU Trace events logic. + * This function is called on *driver* initialisation. */ +PVRSRV_ERROR PVRGpuTraceSupportInit(void); + +/* GPU Trace resources final cleanup. + * This function is called on driver de-initialisation. */ +void PVRGpuTraceSupportDeInit(void); + +/* Initialisation for AppHints callbacks. + * This function is called during the late stage of driver initialisation but + * before the device initialisation but after the debugfs sub-system has been + * initialised. */ +void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); + +/* Per-device initialisation of the GPU Trace resources */ +PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* Per-device cleanup for the GPU Trace resources. */ +void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* Enables the gpu trace sub-system for a given device. */ +PVRSRV_ERROR PVRGpuTraceSetEnabled( + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bNewValue); + +/* Returns IMG_TRUE if the gpu trace sub-system has been enabled (but not + * necessarily initialised). */ +IMG_BOOL PVRGpuTraceIsEnabled(void); + +/* Performs some initialisation steps if the feature was enabled + * on driver startup. */ +void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* FTrace events callbacks interface */ + +void PVRGpuTraceEnableUfoCallback(void); +void PVRGpuTraceDisableUfoCallback(void); + +void PVRGpuTraceEnableFirmwareActivityCallback(void); +void PVRGpuTraceDisableFirmwareActivityCallback(void); + +#else /* define(LINUX) */ + +static inline void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExternalJobRef, + IMG_UINT32 ui32InternalJobRef, + RGX_HWPERF_KICK_TYPE eKickType) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef); + PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef); + PVR_UNREFERENCED_PARAMETER(eKickType); +} + +static inline PVRSRV_ERROR PVRGpuTraceSupportInit(void) { + return PVRSRV_OK; +} + +static inline void PVRGpuTraceSupportDeInit(void) {} + +static inline void PVRGpuTraceInitAppHintCallbacks( + const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + +static inline PVRSRV_ERROR PVRGpuTraceInitDevice( + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + return PVRSRV_OK; +} + +static inline void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + +static inline PVRSRV_ERROR PVRGpuTraceSetEnabled( + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bNewValue) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(bNewValue); + return PVRSRV_OK; +} + +static inline IMG_BOOL PVRGpuTraceIsEnabled(void) +{ + return IMG_FALSE; +} + +static inline void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + +static inline void PVRGpuTraceEnableUfoCallback(void) {} +static inline void PVRGpuTraceDisableUfoCallback(void) {} + +static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {} +static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {} + +#endif /* define(LINUX) */ + +#endif /* PVR_GPUTRACE_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pdump_km.h b/drivers/mcst/gpu-imgtec/services/server/include/pdump_km.h new file mode 100644 index 000000000000..83696e9e0277 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pdump_km.h @@ -0,0 +1,1144 @@ +/*************************************************************************/ /*! +@File +@Title pdump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for pdump functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PDUMP_KM_H +#define PDUMP_KM_H + +#if defined(PDUMP) +#include +#endif + +/* services/srvkm/include/ */ +#include "device.h" + +/* include/ */ +#include "pvrsrv_error.h" + + +#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +#include "connection_server.h" +/* Pull in pdump flags from services include */ +#include "pdump.h" +#include "pdumpdefs.h" + +/* Define this to enable the PDUMP_HERE trace in the server */ +#undef PDUMP_TRACE + +#if defined(PDUMP_TRACE) +#define PDUMP_HERE_VAR IMG_UINT32 here = 0; +#define PDUMP_HERE(a) { here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); } +#define PDUMP_HEREA(a) { here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); } +#else +#define PDUMP_HERE_VAR IMG_UINT32 here = 0; +#define PDUMP_HERE(a) here = (a); +#define PDUMP_HEREA(a) here = (a); +#endif + +#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 +#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 + +/* Invalid value for PDump block number */ +#define PDUMP_BLOCKNUM_INVALID IMG_UINT32_MAX + +typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA; + +/* PDump transition events */ +typedef enum _PDUMP_TRANSITION_EVENT_ +{ + PDUMP_TRANSITION_EVENT_NONE, /* No event */ + PDUMP_TRANSITION_EVENT_BLOCK_FINISHED, /* Block mode event, current PDump-block has finished */ + PDUMP_TRANSITION_EVENT_BLOCK_STARTED, /* Block mode event, new PDump-block has started */ + PDUMP_TRANSITION_EVENT_RANGE_ENTERED, /* Transition into capture range */ + PDUMP_TRANSITION_EVENT_RANGE_EXITED, /* Transition out of capture range */ +} PDUMP_TRANSITION_EVENT; + +typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags); +typedef void (*PFN_PDUMP_SYNCBLOCKS)(void *pvData, PDUMP_TRANSITION_EVENT eEvent); + +typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION_FENCE_SYNC)(void *pvData, PDUMP_TRANSITION_EVENT eEvent); + +#ifdef PDUMP + +/*! Macro used to record a panic in the PDump script stream */ +#define PDUMP_PANIC(_id, _msg) do \ + { PVRSRV_ERROR _eE;\ + _eE = PDumpPanic(((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __func__, __LINE__); \ + PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\ + MSC_SUPPRESS_4127\ + } while (0) + +/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ +#define PDUMP_ERROR(_err, _msg) \ + (void)PDumpCaptureError(_err, _msg, __func__, __LINE__) + +#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE +#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE +#define SZ_FILENAME_SIZE_MAX (PVRSRV_PDUMP_MAX_FILENAME_SIZE+sizeof(PDUMP_PARAM_N_FILE_NAME)) + +#define PDUMP_GET_SCRIPT_STRING() \ + IMG_HANDLE hScript; \ + void *pvScriptAlloc; \ + IMG_UINT32 ui32MaxLen = SZ_SCRIPT_SIZE_MAX-1; \ + pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ + if (!pvScriptAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_STRING() failed to allocate memory for script buffer")); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + \ + hScript = (IMG_HANDLE) pvScriptAlloc; + +#define PDUMP_GET_MSG_STRING() \ + IMG_CHAR *pszMsg; \ + void *pvMsgAlloc; \ + IMG_UINT32 ui32MaxLen = SZ_MSG_SIZE_MAX-1; \ + pvMsgAlloc = OSAllocMem( SZ_MSG_SIZE_MAX ); \ + if (!pvMsgAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_MSG_STRING() failed to allocate memory for message buffer")); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + pszMsg = (IMG_CHAR *)pvMsgAlloc; + +#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ + IMG_HANDLE hScript; \ + IMG_CHAR *pszFileName; \ + IMG_UINT32 ui32MaxLenScript = SZ_SCRIPT_SIZE_MAX-1; \ + void *pvScriptAlloc; \ + void *pvFileAlloc; \ + pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ + if (!pvScriptAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for script buffer")); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + \ + hScript = (IMG_HANDLE) pvScriptAlloc; \ + pvFileAlloc = OSAllocMem( SZ_FILENAME_SIZE_MAX ); \ + if (!pvFileAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for filename buffer")); \ + OSFreeMem(pvScriptAlloc); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + pszFileName = (IMG_CHAR *)pvFileAlloc; + +#define PDUMP_RELEASE_SCRIPT_STRING() \ + if (pvScriptAlloc) \ + { \ + OSFreeMem(pvScriptAlloc); \ + pvScriptAlloc = NULL; \ + } + +#define PDUMP_RELEASE_MSG_STRING() \ + if (pvMsgAlloc) \ + { \ + OSFreeMem(pvMsgAlloc); \ + pvMsgAlloc = NULL; \ + } + +#define PDUMP_RELEASE_FILE_STRING() \ + if (pvFileAlloc) \ + { \ + OSFreeMem(pvFileAlloc); \ + pvFileAlloc = NULL; \ + } + +#define PDUMP_RELEASE_SCRIPT_AND_FILE_STRING() \ + if (pvScriptAlloc) \ + { \ + OSFreeMem(pvScriptAlloc); \ + pvScriptAlloc = NULL; \ + } \ + if (pvFileAlloc) \ + { \ + OSFreeMem(pvFileAlloc); \ + pvFileAlloc = NULL; \ + } + + +/* Shared across pdump_x files */ +PVRSRV_ERROR PDumpInitCommon(void); +void PDumpDeInitCommon(void); +PVRSRV_ERROR PDumpReady(void); +void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset, + size_t *puiZeroPageSize, + const IMG_CHAR **ppszZeroPageFilename); + +void PDumpConnectionNotify(void); +void PDumpDisconnectionNotify(void); + +void PDumpStopInitPhase(void); +PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Frame); +PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32* pui32Frame); +PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize); + + +PVRSRV_ERROR PDumpReg32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegDst, + IMG_UINT32 ui32RegSrc, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar, + IMG_HANDLE hPdumpPages, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpInternalVarToMemLabel(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpWriteVarORValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarANDValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarSHRValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarORVarOp(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszInternalVar2, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarANDVarOp(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszInternalVar2, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpSAW(IMG_CHAR *pszDevSpaceName, + IMG_UINT32 ui32HPOffsetBytes, + IMG_UINT32 ui32NumSaveBytes, + IMG_CHAR *pszOutfileName, + IMG_UINT32 ui32OutfileOffsetByte, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + +PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags); + + +/**************************************************************************/ /*! +@Function PDumpImageDescriptor +@Description PDumps image data out as an IMGBv2 data section +@Input psDeviceNode Pointer to device node. +@Input ui32MMUContextID PDUMP MMU context ID. +@Input pszSABFileName Pointer to string containing file name of + Image being SABed +@Input sData GPU virtual address of this surface. +@Input ui32DataSize Image data size +@Input ui32LogicalWidth Image logical width +@Input ui32LogicalHeight Image logical height +@Input ui32PhysicalWidth Image physical width +@Input ui32PhysicalHeight Image physical height +@Input ePixFmt Image pixel format +@Input eFBCompression FB compression mode +@Input paui32FBCClearColour FB clear colour (Only applicable to FBC surfaces) +@Input eFBCSwizzle FBC channel swizzle (Only applicable to FBC surfaces) +@Input sHeader GPU virtual address of the headers of this + surface (Only applicable to FBC surfaces) +@Input ui32HeaderSize Header size (Only applicable to FBC surfaces) +@Input ui32PDumpFlags PDUMP flags +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +*/ /***************************************************************************/ +PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags); + +/**************************************************************************/ /*! +@Function PDumpDataDescriptor +@Description PDumps non-image data out as an IMGCv1 data section +@Input psDeviceNode Pointer to device node. +@Input ui32MMUContextID PDUMP MMU context ID. +@Input pszSABFileName Pointer to string containing file name of + Data being SABed +@Input sData GPU virtual address of this data. +@Input ui32DataSize Data size +@Input ui32HeaderType Header type +@Input ui32ElementType Data element type +@Input ui32ElementCount Number of data elements +@Input ui32PDumpFlags PDUMP flags +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +*/ /***************************************************************************/ +PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags); + + +PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags); + +__printf(2, 3) +PVRSRV_ERROR PDumpCommentWithFlagsNoLock(IMG_UINT32 ui32Flags, + IMG_CHAR* pszFormat, + ...); + +PVRSRV_ERROR PDumpCommentWithFlagsNoLockVA(IMG_UINT32 ui32Flags, + const IMG_CHAR * pszFormat, + va_list args); + +__printf(2, 3) +PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, + IMG_CHAR* pszFormat, + ...); + +PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags, + const IMG_CHAR * pszFormat, + va_list args); + +PVRSRV_ERROR PDumpPanic(IMG_UINT32 ui32PanicNo, + IMG_CHAR* pszPanicMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline); + +PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo, + IMG_CHAR* pszErrorMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline); + +PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32dwData, + IMG_HANDLE hUniqueTag); + +PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + +PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame); + +PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State); + +PVRSRV_ERROR PDumpGetCurrentBlockKM(IMG_UINT32 *pui32CurrentBlock); + +PVRSRV_ERROR PDumpForceCaptureStopKM(void); + +PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCaptureRange); + +PVRSRV_ERROR PDumpRegRead32ToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegRead64ToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszInternalVar, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags); +PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks); + +PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace, + IMG_UINT32 ui32MMUCtxID, + IMG_UINT32 ui32RegionID, + IMG_BOOL bEnable, + IMG_UINT64 ui64VAddr, + IMG_UINT64 ui64LenBytes, + IMG_UINT32 ui32XStride, + IMG_UINT32 ui32Flags); + +void PDumpLock(void); +void PDumpUnlock(void); + +PVRSRV_ERROR PDumpRegCondStr(IMG_CHAR **ppszPDumpCond, + IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + +PVRSRV_ERROR PDumpInternalValCondStr(IMG_CHAR **ppszPDumpCond, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + +PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpStartDoLoopKM(IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpEndDoWhileLoopKM(IMG_CHAR *pszPDumpWhileCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpCOMCommand(IMG_UINT32 ui32PDumpFlags, const IMG_CHAR *pszPDump); + +void PDumpPowerTransitionStart(void); +void PDumpPowerTransitionEnd(void); +IMG_BOOL PDumpInPowerTransition(void); +IMG_BOOL PDumpIsContCaptureOn(void); + +/*! + * @name PDumpWriteParameter + * @brief General function for writing to PDump stream. Used + * mainly for memory dumps to parameter stream. + * Usually more convenient to use PDumpWriteScript below + * for the script stream. + * @param psui8Data - data to write + * @param ui32Size - size of write + * @param ui32Flags - PDump flags + * @param pui32FileOffset - on return contains the file offset to + * the start of the parameter data + * @param aszFilenameStr - pointer to at least a 20 char buffer to + * return the parameter filename + * @return error + */ +PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset, + IMG_CHAR* aszFilenameStr); + +/*! + * @name PDumpWriteScript + * @brief Write an PDumpOS created string to the "script" output stream + * @param hString - PDump OS layer handle of string buffer to write + * @param ui32Flags - PDump flags + * @return IMG_TRUE on success. + */ +IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags); + +/**************************************************************************/ /*! +@Function PDumpSNPrintf +@Description Printf to OS-specific PDump state buffer. This function is + only called if PDUMP is defined. +@Input hBuf handle of buffer to write into +@Input ui32ScriptSizeMax maximum size of data to write (chars) +@Input pszFormat format string +@Return None +*/ /**************************************************************************/ +__printf(3, 4) +PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...); + + +/* + PDumpWriteShiftedMaskedValue(): + + loads the "reference" address into an internal PDump register, + optionally shifts it right, + optionally shifts it left, + optionally masks it + then finally writes the computed value to the given destination address + + i.e. it emits pdump language equivalent to this expression: + + dest = ((&ref) >> SHRamount << SHLamount) & MASK +*/ +PVRSRV_ERROR +PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName, + const IMG_CHAR *pszDestSymbolicName, + IMG_DEVMEM_OFFSET_T uiDestOffset, + const IMG_CHAR *pszRefRegspaceName, + const IMG_CHAR *pszRefSymbolicName, + IMG_DEVMEM_OFFSET_T uiRefOffset, + IMG_UINT32 uiSHRAmount, + IMG_UINT32 uiSHLAmount, + IMG_UINT32 uiMask, + IMG_DEVMEM_SIZE_T uiWordSize, + IMG_UINT32 uiPDumpFlags); + +/* + PDumpWriteSymbAddress(): + writes the address of the "reference" to the offset given +*/ +PVRSRV_ERROR +PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName, + IMG_DEVMEM_OFFSET_T uiDestOffset, + const IMG_CHAR *pszRefSymbolicName, + IMG_DEVMEM_OFFSET_T uiRefOffset, + const IMG_CHAR *pszPDumpDevName, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + IMG_UINT32 uiPDumpFlags); + +/* Register the connection with the PDump subsystem */ +PVRSRV_ERROR +PDumpRegisterConnection(void *hSyncPrivData, + PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, + PDUMP_CONNECTION_DATA **ppsPDumpConnectionData); + +/* Unregister the connection with the PDump subsystem */ +void +PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData); + +/* Register for notification of PDump Transition into/out of capture range */ +PVRSRV_ERROR +PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PFN_PDUMP_TRANSITION pfnCallback, + void *hPrivData, + void *pvDevice, + void **ppvHandle); + +/* Unregister notification of PDump Transition */ +void +PDumpUnregisterTransitionCallback(void *pvHandle); + +PVRSRV_ERROR +PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, + PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, + void **ppvHandle); + +void +PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle); + +/* Notify PDump of a Transition into/out of capture range */ +PVRSRV_ERROR +PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PDUMP_TRANSITION_EVENT eEvent, + IMG_UINT32 ui32PDumpFlags); + + +#define PDUMP_LOCK(_ui32PDumpFlags) do \ + { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ + {\ + PDumpLock();\ + }\ + MSC_SUPPRESS_4127\ + } while (0) + +#define PDUMP_UNLOCK(_ui32PDumpFlags) do \ + { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ + {\ + PDumpUnlock();\ + }\ + MSC_SUPPRESS_4127\ + } while (0) + +#define PDUMPINIT PDumpInitCommon +#define PDUMPDEINIT PDumpDeInitCommon +#define PDUMPREG32 PDumpReg32 +#define PDUMPREG64 PDumpReg64 +#define PDUMPREGREAD32 PDumpRegRead32 +#define PDUMPREGREAD64 PDumpRegRead64 +#define PDUMPCOMMENT(...) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__) +#define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags +#define PDUMPREGPOL PDumpRegPolKM +#define PDUMPPDREG PDumpPDReg +#define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags +#define PDUMPREGBASEDCBP PDumpRegBasedCBP +#define PDUMPENDINITPHASE PDumpStopInitPhase +#define PDUMPIDLWITHFLAGS PDumpIDLWithFlags +#define PDUMPIDL PDumpIDL +#define PDUMPPOWCMDSTART PDumpPowerTransitionStart +#define PDUMPPOWCMDEND PDumpPowerTransitionEnd +#define PDUMPPOWCMDINTRANS PDumpInPowerTransition +#define PDUMPCOM PDumpCOMCommand + +#define PDUMP_BLKSTART(_ui32PDumpFlags) do \ + { PDUMP_LOCK(_ui32PDumpFlags);\ + _ui32PDumpFlags |= PDUMP_FLAGS_PDUMP_LOCK_HELD;\ + MSC_SUPPRESS_4127\ + } while (0) + +#define PDUMP_BLKEND(_ui32PDumpFlags) do \ + { _ui32PDumpFlags &= ~PDUMP_FLAGS_PDUMP_LOCK_HELD;\ + PDUMP_UNLOCK(_ui32PDumpFlags);\ + MSC_SUPPRESS_4127\ + } while (0) + +#define PDUMPIF(_msg,_ui32PDumpFlags) do \ + {PDUMP_BLKSTART(_ui32PDumpFlags);\ + PDumpIfKM(_msg,_ui32PDumpFlags);\ + MSC_SUPPRESS_4127\ + } while (0) + +#define PDUMPELSE PDumpElseKM + +#define PDUMPFI(_msg,_ui32PDumpFlags) do \ + { PDumpFiKM(_msg,_ui32PDumpFlags);\ + PDUMP_BLKEND(_ui32PDumpFlags);\ + MSC_SUPPRESS_4127\ + } while (0) + +#else +/* + We should be clearer about which functions can be called + across the bridge as this looks rather unbalanced +*/ + +/*! Macro used to record a panic in the PDump script stream */ +#define PDUMP_PANIC(_id, _msg) ((void)0) + +/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ +#define PDUMP_ERROR(_err, _msg) ((void)0) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpInitCommon) +#endif +static INLINE PVRSRV_ERROR +PDumpInitCommon(void) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpConnectionNotify) +#endif +static INLINE void +PDumpConnectionNotify(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpDisconnectionNotify) +#endif +static INLINE void +PDumpDisconnectionNotify(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpLock) +#endif +static INLINE void +PDumpLock(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnlock) +#endif +static INLINE void +PDumpUnlock(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpStopInitPhase) +#endif +static INLINE void +PDumpStopInitPhase(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpSetFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpSetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32Frame) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(ui32Frame); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpGetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32* pui32Frame) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(pui32Frame); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpCommentKM) +#endif +static INLINE PVRSRV_ERROR +PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags) +{ + PVR_UNREFERENCED_PARAMETER(pszComment); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + return PVRSRV_OK; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpSetDefaultCaptureParamsKM) +#endif +static INLINE PVRSRV_ERROR +PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32Mode); + PVR_UNREFERENCED_PARAMETER(ui32Start); + PVR_UNREFERENCED_PARAMETER(ui32End); + PVR_UNREFERENCED_PARAMETER(ui32Interval); + PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize); + + return PVRSRV_OK; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPanic) +#endif +static INLINE PVRSRV_ERROR +PDumpPanic(IMG_UINT32 ui32PanicNo, + IMG_CHAR* pszPanicMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline) +{ + PVR_UNREFERENCED_PARAMETER(ui32PanicNo); + PVR_UNREFERENCED_PARAMETER(pszPanicMsg); + PVR_UNREFERENCED_PARAMETER(pszPPFunc); + PVR_UNREFERENCED_PARAMETER(ui32PPline); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpCaptureError) +#endif +static INLINE PVRSRV_ERROR +PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo, + IMG_CHAR* pszErrorMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline) +{ + PVR_UNREFERENCED_PARAMETER(ui32ErrorNo); + PVR_UNREFERENCED_PARAMETER(pszErrorMsg); + PVR_UNREFERENCED_PARAMETER(pszPPFunc); + PVR_UNREFERENCED_PARAMETER(ui32PPline); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpIsLastCaptureFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame) +{ + *pbIsLastCaptureFrame = IMG_FALSE; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetStateKM) +#endif +static INLINE PVRSRV_ERROR +PDumpGetStateKM(IMG_UINT64 *ui64State) +{ + *ui64State = 0; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpIsCaptureFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing) +{ + *bIsCapturing = IMG_FALSE; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetCurrentBlockKM) +#endif +static INLINE PVRSRV_ERROR +PDumpGetCurrentBlockKM(IMG_UINT32 *pui32BlockNum) +{ + *pui32BlockNum = PDUMP_BLOCKNUM_INVALID; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpForceCaptureStopKM) +#endif +static INLINE PVRSRV_ERROR +PDumpForceCaptureStopKM(void) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpBitmapKM) +#endif +static INLINE PVRSRV_ERROR +PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32Width); + PVR_UNREFERENCED_PARAMETER(ui32Height); + PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes); + PVR_UNREFERENCED_PARAMETER(sDevBaseAddr); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(ePixelFormat); + PVR_UNREFERENCED_PARAMETER(ui32AddrMode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpImageDescriptor) +#endif +static INLINE PVRSRV_ERROR +PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(pszSABFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); + PVR_UNREFERENCED_PARAMETER(ePixFmt); + PVR_UNREFERENCED_PARAMETER(eMemLayout); + PVR_UNREFERENCED_PARAMETER(eFBCompression); + PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); + PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); + PVR_UNREFERENCED_PARAMETER(sHeader); + PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpDataDescriptor) +#endif +static INLINE PVRSRV_ERROR +PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(pszSABFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32ElementType); + PVR_UNREFERENCED_PARAMETER(ui32ElementCount); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpRegisterConnection) +#endif +static INLINE PVRSRV_ERROR +PDumpRegisterConnection(void *hSyncPrivData, + PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, + PDUMP_CONNECTION_DATA **ppsPDumpConnectionData) +{ + PVR_UNREFERENCED_PARAMETER(hSyncPrivData); + PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks); + PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnregisterConnection) +#endif +static INLINE void +PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData) +{ + PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpRegisterTransitionCallback) +#endif +static INLINE PVRSRV_ERROR +PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PFN_PDUMP_TRANSITION pfnCallback, + void *hPrivData, + void *pvDevice, + void **ppvHandle) +{ + PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); + PVR_UNREFERENCED_PARAMETER(pfnCallback); + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(pvDevice); + PVR_UNREFERENCED_PARAMETER(ppvHandle); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnregisterTransitionCallback) +#endif +static INLINE void +PDumpUnregisterTransitionCallback(void *pvHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvHandle); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpRegisterTransitionCallback) +#endif +static INLINE PVRSRV_ERROR +PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, + PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, + void **ppvHandle) +{ + PVR_UNREFERENCED_PARAMETER(pfnCallback); + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(ppvHandle); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnregisterTransitionCallbackFenceSync) +#endif +static INLINE void +PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvHandle); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpTransition) +#endif +static INLINE PVRSRV_ERROR +PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PDUMP_TRANSITION_EVENT eEvent, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); + PVR_UNREFERENCED_PARAMETER(eEvent); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#if defined(LINUX) || defined(GCC_IA32) || defined(GCC_ARM) || defined(__QNXNTO__) || defined(INTEGRITY_OS) + #define PDUMPINIT PDumpInitCommon + #define PDUMPDEINIT(args...) + #define PDUMPREG32(args...) + #define PDUMPREG64(args...) + #define PDUMPREGREAD32(args...) + #define PDUMPREGREAD64(args...) + #define PDUMPCOMMENT(args...) + #define PDUMPREGPOL(args...) + #define PDUMPPDREG(args...) + #define PDUMPPDREGWITHFLAGS(args...) + #define PDUMPSYNC(args...) + #define PDUMPCOPYTOMEM(args...) + #define PDUMPWRITE(args...) + #define PDUMPREGBASEDCBP(args...) + #define PDUMPCOMMENTWITHFLAGS(args...) + #define PDUMPENDINITPHASE(args...) + #define PDUMPIDLWITHFLAGS(args...) + #define PDUMPIDL(args...) + #define PDUMPPOWCMDSTART(args...) + #define PDUMPPOWCMDEND(args...) + #define PDUMP_LOCK(args...) + #define PDUMP_UNLOCK(args...) + #define PDUMPIF(args...) + #define PDUMPFI(args...) + #define PDUMPCOM(args...) +#else + #error Compiler not specified +#endif + +#endif /* PDUMP */ + +#endif /* PDUMP_KM_H */ + +/****************************************************************************** + End of file (pdump_km.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pdump_mmu.h b/drivers/mcst/gpu-imgtec/services/server/include/pdump_mmu.h new file mode 100644 index 000000000000..a53363955a22 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pdump_mmu.h @@ -0,0 +1,171 @@ +/**************************************************************************/ /*! +@File +@Title Common MMU Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVKM_PDUMP_MMU_H +#define SRVKM_PDUMP_MMU_H + +/* services/server/include/ */ +#include "pdump_symbolicaddr.h" +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "mmu_common.h" + +/* + * PDUMP MMU attributes + */ +typedef struct _PDUMP_MMU_ATTRIB_DEVICE_ +{ + /* Per-Device Pdump attribs */ + + /*!< Pdump memory bank name */ + IMG_CHAR *pszPDumpMemDevName; + + /*!< Pdump register bank name */ + IMG_CHAR *pszPDumpRegDevName; + +} PDUMP_MMU_ATTRIB_DEVICE; + +typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_ +{ + IMG_UINT32 ui32Dummy; +} PDUMP_MMU_ATTRIB_CONTEXT; + +typedef struct _PDUMP_MMU_ATTRIB_HEAP_ +{ + /* data page info */ + IMG_UINT32 ui32DataPageMask; +} PDUMP_MMU_ATTRIB_HEAP; + +typedef struct _PDUMP_MMU_ATTRIB_ +{ + struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice; + struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext; + struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap; +} PDUMP_MMU_ATTRIB; + +#if defined(PDUMP) +PVRSRV_ERROR +PDumpMMUMalloc(const IMG_CHAR *pszPDumpDevName, + MMU_LEVEL eMMULevel, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Align, + PDUMP_MMU_TYPE eMMUType); + +PVRSRV_ERROR +PDumpMMUFree(const IMG_CHAR *pszPDumpDevName, + MMU_LEVEL eMMULevel, + IMG_DEV_PHYADDR *psDevPAddr, + PDUMP_MMU_TYPE eMMUType); + +PVRSRV_ERROR +PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags, + MMU_LEVEL eMMULevel, + IMG_UINT64 ui64PxSymAddr, + IMG_UINT64 ui64PxOffset); + +PVRSRV_ERROR +PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel, + const IMG_CHAR *pszPDumpDevName, + void *pvPxMem, + IMG_DEV_PHYADDR sPxDevPAddr, + IMG_UINT32 uiFirstEntry, + IMG_UINT32 uiNumEntries, + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_UINT64 uiSymbolicAddrOffset, + IMG_UINT32 uiBytesPerEntry, + IMG_UINT32 uiLog2Align, + IMG_UINT32 uiAddrShift, + IMG_UINT64 uiAddrMask, + IMG_UINT64 uiPxEProtMask, + IMG_UINT64 uiDataValidEnable, + IMG_UINT32 ui32Flags, + PDUMP_MMU_TYPE eMMUType); + +PVRSRV_ERROR +PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName, + IMG_DEV_PHYADDR sPCDevPAddr, + PDUMP_MMU_TYPE eMMUType, + IMG_UINT32 *pui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace, + IMG_UINT32 uiPDumpMMUCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset, + IMG_UINT32 ui32PDumpFlags); + +#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ + PDumpMMUAllocMMUContext(pszPDumpMemDevName, \ + sPCDevPAddr, \ + eMMUType, \ + puiPDumpCtxID, \ + ui32PDumpFlags) + +#define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ + PDumpMMUFreeMMUContext(pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) +#else /* PDUMP */ + +#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ + ((void)0) +#define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ + ((void)0) + +#endif /* PDUMP */ + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pdump_physmem.h b/drivers/mcst/gpu-imgtec/services/server/include/pdump_physmem.h new file mode 100644 index 000000000000..d2fc5638e481 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pdump_physmem.h @@ -0,0 +1,243 @@ +/**************************************************************************/ /*! +@File +@Title pdump functions to assist with physmem allocations +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVSRV_PDUMP_PHYSMEM_H +#define SRVSRV_PDUMP_PHYSMEM_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pmr.h" + +#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40 +#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60 +#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH) + +typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T; + +#if defined(PDUMP) +PVRSRV_ERROR +PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, + IMG_CHAR **ppszSymbolicAddress); + +PVRSRV_ERROR +PDumpMalloc(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + /* alignment is alignment of start of buffer _and_ + minimum contiguity - i.e. smallest allowable + page-size. */ + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle); + +void +PDumpMakeStringValid(IMG_CHAR *pszString, + IMG_UINT32 ui32StrLen); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetSymbolicAddr) +#endif +static INLINE PVRSRV_ERROR +PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, + IMG_CHAR **ppszSymbolicAddress) +{ + PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle); + PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress); + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR +PDumpMalloc(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(pszDevSpace); + PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress); + PVR_UNREFERENCED_PARAMETER(ui64Size); + PVR_UNREFERENCED_PARAMETER(uiAlign); + PVR_UNREFERENCED_PARAMETER(bInitialise); + PVR_UNREFERENCED_PARAMETER(ui32InitValue); + PVR_UNREFERENCED_PARAMETER(phHandlePtr); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR +PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); + return PVRSRV_OK; +} +#endif /* PDUMP */ + +#define PMR_DEFAULT_PREFIX "PMR" +#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s" +#define PMR_MEMSPACE_FMTSPEC "%s" +#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s" + +#if defined(PDUMP) +#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \ + PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE) +#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ + PDumpFree(hHandle) +#else +#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \ + ((void)(*phHandlePtr=NULL)) +#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ + ((void)(0)) +#endif /* PDUMP */ + +PVRSRV_ERROR +PDumpPMRWRW32(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + const IMG_CHAR *pszInternalVar, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRWRW64(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + const IMG_CHAR *pszInternalVar, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRLDB(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRSAB(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFileName, + IMG_UINT32 uiFileOffset); + +/* + PDumpPMRPOL() + + Emits a POL to the PDUMP. +*/ +PVRSRV_ERROR +PDumpPMRPOL(const IMG_CHAR *pszMempaceName, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 uiCount, + IMG_UINT32 uiDelay, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRCBP(const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize); + +/* + * PDumpWriteParameterBlob() + * + * Writes a binary blob to the pdump param stream containing the current + * contents of the memory, and returns the filename and offset of where + * that blob is located (for use in a subsequent LDB, for example). + * + * Caller to provide buffer to receive filename, and declare the size of + * that buffer. + */ +PVRSRV_ERROR +PDumpWriteParameterBlob(IMG_UINT8 *pcBuffer, + size_t uiNumBytes, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_CHAR *pszFilenameOut, + size_t uiFilenameBufSz, + PDUMP_FILEOFFSET_T *puiOffsetOut); + +#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pdump_symbolicaddr.h b/drivers/mcst/gpu-imgtec/services/server/include/pdump_symbolicaddr.h new file mode 100644 index 000000000000..ed912a5096c9 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pdump_symbolicaddr.h @@ -0,0 +1,55 @@ +/**************************************************************************/ /*! +@File +@Title Abstraction of PDUMP symbolic address derivation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Allows pdump functions to derive symbolic addresses on-the-fly +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVKM_PDUMP_SYMBOLICADDR_H +#define SRVKM_PDUMP_SYMBOLICADDR_H + +#include "img_types.h" + +#include "pvrsrv_error.h" + +/* pdump symbolic addresses are generated on-the-fly with a callback */ + +typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset); + +#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/physmem.h b/drivers/mcst/gpu-imgtec/services/server/include/physmem.h new file mode 100644 index 000000000000..c554ed4dc5c8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/physmem.h @@ -0,0 +1,234 @@ +/*************************************************************************/ /*! +@File +@Title Physmem header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for common entry point for creation of RAM backed PMR's +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVSRV_PHYSMEM_H +#define SRVSRV_PHYSMEM_H + +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "connection_server.h" + +/* services/server/include/ */ +#include "pmr.h" +#include "pmr_impl.h" + +/* Valid values for TC_MEMORY_CONFIG configuration option */ +#define TC_MEMORY_LOCAL (1) +#define TC_MEMORY_HOST (2) +#define TC_MEMORY_HYBRID (3) + +/* Valid values for the PLATO_MEMORY_CONFIG configuration option */ +#define PLATO_MEMORY_LOCAL (1) +#define PLATO_MEMORY_HOST (2) +#define PLATO_MEMORY_HYBRID (3) + +/*************************************************************************/ /*! +@Function DevPhysMemAlloc +@Description Allocate memory from device specific heaps directly. +@Input psDevNode device node to operate on +@Input ui32MemSize Size of the memory to be allocated +@Input u8Value Value to be initialised to. +@Input bInitPage Flag to control initialisation +@Input pszDevSpace PDUMP memory space in which the + allocation is to be done +@Input pszSymbolicAddress Symbolic name of the allocation +@Input phHandlePtr PDUMP handle to the allocation +@Output hMemHandle Handle to the allocated memory +@Output psDevPhysAddr Device Physical address of allocated + page +@Return PVRSRV_OK if the allocation is successful +*/ /**************************************************************************/ +PVRSRV_ERROR +DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32MemSize, + IMG_UINT32 ui32Log2Align, + const IMG_UINT8 u8Value, + IMG_BOOL bInitPage, +#if defined(PDUMP) + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_HANDLE *phHandlePtr, +#endif + IMG_HANDLE hMemHandle, + IMG_DEV_PHYADDR *psDevPhysAddr); + +/*************************************************************************/ /*! +@Function DevPhysMemFree +@Description Free memory to device specific heaps directly. +@Input psDevNode device node to operate on +@Input hPDUMPMemHandle Pdump handle to allocated memory +@Input hMemHandle Devmem handle to allocated memory +@Return None +*/ /**************************************************************************/ +void +DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, +#if defined(PDUMP) + IMG_HANDLE hPDUMPMemHandle, +#endif + IMG_HANDLE hMemHandle); + +/* + * PhysmemNewRamBackedPMR + * + * This function will create a RAM backed PMR using the device specific + * callback, this allows control at a per-devicenode level to select the + * memory source thus supporting mixed UMA/LMA systems. + * + * The size must be a multiple of page size. The page size is specified in + * log2. It should be regarded as a minimum contiguity of which the + * resulting memory must be a multiple. It may be that this should be a fixed + * number. It may be that the allocation size needs to be a multiple of some + * coarser "page size" than that specified in the page size argument. + * For example, take an OS whose page granularity is a fixed 16kB, but the + * caller requests memory in page sizes of 4kB. The request can be satisfied + * if and only if the SIZE requested is a multiple of 16kB. If the arguments + * supplied are such that this OS cannot grant the request, + * PVRSRV_ERROR_INVALID_PARAMS will be returned. + * + * The caller should supply storage of a pointer. Upon successful return a + * PMR object will have been created and a pointer to it returned in the + * PMROut argument. + * + * A PMR successfully created should be destroyed with PhysmemUnrefPMR. + * + * Note that this function may cause memory allocations and on some operating + * systems this may cause scheduling events, so it is important that this + * function be called with interrupts enabled and in a context where + * scheduling events and memory allocations are permitted. + * + * The flags may be used by the implementation to change its behaviour if + * required. The flags will also be stored in the PMR as immutable metadata + * and returned to mmu_common when it asks for it. + * + * The PID specified is used to tie this allocation to the process context + * that the allocation is made on behalf of. + */ +PVRSRV_ERROR +PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMROut, + IMG_UINT32 ui32PDumpFlags); + + +/* + * PhysmemNewRamBackedLockedPMR + * + * Same as function above but is additionally locking down the PMR. + * + * Get the physical memory and lock down the PMR directly, we do not want to + * defer the actual allocation to mapping time. + * + * In general the concept of on-demand allocations is not useful for + * allocations where we give the users the freedom to map and unmap memory at + * will. The user is not expecting their memory contents to suddenly vanish + * just because they unmapped the buffer. + * Even if they would know and be ok with it, we do not want to check for + * every page we unmap whether we have to unlock the underlying PMR. +*/ +PVRSRV_ERROR +PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function PhysmemImportPMR +@Description Import PMR a previously exported PMR +@Input psPMRExport The exported PMR token +@Input uiPassword Authorisation password + for the PMR being imported +@Input uiSize Size of the PMR being imported + (for verification) +@Input uiLog2Contig Log2 continuity of the PMR being + imported (for verification) +@Output ppsPMR The imported PMR +@Return PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device + PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect + PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect + PVRSRV_OK if successful +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemImportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR); + +/*************************************************************************/ /*! +@Function PVRSRVGetMaxDevMemSizeKM +@Description Get the amount of device memory on current platform +@Output puiLMASize LMA memory size +@Output puiUMASize UMA memory size +@Return None +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVGetMaxDevMemSizeKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T *puiLMASize, + IMG_DEVMEM_SIZE_T *puiUMASize); + +#endif /* SRVSRV_PHYSMEM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/physmem_dmabuf.h b/drivers/mcst/gpu-imgtec/services/server/include/physmem_dmabuf.h new file mode 100644 index 000000000000..33981789c86b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/physmem_dmabuf.h @@ -0,0 +1,114 @@ +/**************************************************************************/ /*! +@File physmem_dmabuf.h +@Title Header for dmabuf PMR factory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks importing Ion allocations +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(_PHYSMEM_DMABUF_H_) +#define _PHYSMEM_DMABUF_H_ + +#include + +#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "connection_server.h" + +#include "pmr.h" + +typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment); + +PVRSRV_ERROR +PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment, + PFN_DESTROY_DMABUF_PMR pfnDestroy, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr); + +struct dma_buf * +PhysmemGetDmaBuf(PMR *psPMR); + +PVRSRV_ERROR +PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_INT *piFd); + +PVRSRV_ERROR +PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +PVRSRV_ERROR +PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +#endif /* !defined(_PHYSMEM_DMABUF_H_) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/physmem_fwdedicatedmem.h b/drivers/mcst/gpu-imgtec/services/server/include/physmem_fwdedicatedmem.h new file mode 100644 index 000000000000..f16bde444e25 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/physmem_fwdedicatedmem.h @@ -0,0 +1,68 @@ +/**************************************************************************/ /*! +@File +@Title Header for dedicated firmware memory PMR factory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks importing secure firmware + code allocations. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef PHYSMEM_FWDEDICATEDMEM_H +#define PHYSMEM_FWDEDICATEDMEM_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "pmr.h" + +#if !defined(NO_HARDWARE) +PVRSRV_ERROR PhysmemInitFWDedicatedMem(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_DEVICE_CONFIG *psDevConfig); + +void PhysmemDeinitFWDedicatedMem(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +PVRSRV_ERROR PhysmemNewFWDedicatedMemPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + PMR **ppsPMRPtr); + +#endif /* PHYSMEM_FWDEDICATEDMEM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/physmem_hostmem.h b/drivers/mcst/gpu-imgtec/services/server/include/physmem_hostmem.h new file mode 100644 index 000000000000..c9e75c9156a8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/physmem_hostmem.h @@ -0,0 +1,54 @@ +/*************************************************************************/ /*! +@File physmem_hostmem.h +@Title Host memory device node header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PHYSMEM_HOSTMEM_H__) +#define __PHYSMEM_HOSTMEM_H__ + +#include "pvrsrv_device.h" + +/*! Heap ID of the host driver's device heap */ +#define PHYS_HEAP_ID_HOSTMEM (~((IMG_UINT32)0)) + +PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void); + +#endif /* !defined(__PHYSMEM_HOSTMEM_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/physmem_lma.h b/drivers/mcst/gpu-imgtec/services/server/include/physmem_lma.h new file mode 100644 index 000000000000..ef346e78f08b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/physmem_lma.h @@ -0,0 +1,78 @@ +/**************************************************************************/ /*! +@File +@Title Header for local card memory allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for local card memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef _SRVSRV_PHYSMEM_LMA_H_ +#define _SRVSRV_PHYSMEM_LMA_H_ + +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +/* services/server/include/ */ +#include "pmr.h" +#include "pmr_impl.h" + +/* + * PhysmemNewLocalRamBackedPMR + * + * This function will create a PMR using the local card memory and is OS + * agnostic. + */ +PVRSRV_ERROR +PhysmemNewLocalRamBackedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +#endif /* #ifndef _SRVSRV_PHYSMEM_LMA_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/physmem_osmem.h b/drivers/mcst/gpu-imgtec/services/server/include/physmem_osmem.h new file mode 100644 index 000000000000..3c9bbd8074e4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/physmem_osmem.h @@ -0,0 +1,128 @@ +/*************************************************************************/ /*! +@File physmem_osmem.h +@Title OS memory PMR factory API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of Services memory management. This file defines the + OS memory PMR factory API that must be defined so that the + common & device layer code in the Services Server can allocate + new PMRs back with pages from the OS page allocator. Applicable + for UMA based platforms, such platforms must implement this API + in the OS Porting layer, in the "env" directory for that + system. + +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PHYSMEM_OSMEM_H +#define PHYSMEM_OSMEM_H + +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +/* services/server/include/ */ +#include "pmr.h" +#include "pmr_impl.h" + +/*************************************************************************/ /*! +@Function PhysmemNewOSRamBackedPMR +@Description Rogue Services will call this function to allocate GPU device + memory from the PMR factory supported by the OS DDK port. This + factory typically obtains physical memory from the kernel/OS + API that allocates memory from the default heap of shared + system memory available on the platform. The allocated memory + must be page-aligned and be a whole number of pages. + After allocating the required memory, the implementation must + then call PMRCreatePMR() to obtain the PMR structure that + describes this allocation to the upper layers of the Services. + memory management sub-system. + NB. Implementation of this function is mandatory. If shared + system memory is not to be used in the OS port then the + implementation must return PVRSRV_ERROR_NOT_SUPPORTED. + +@Input psConnection the connection to the originator process +@Input psDevNode the device node +@Input uiSize the size of the allocation + (must be a multiple of page size) +@Input uiChunkSize when sparse allocations are requested, + this is the allocated chunk size. + For regular allocations, this will be + the same as uiSize. + (must be a multiple of page size) +@Input ui32NumPhysChunks when sparse allocations are requested, + this is the number of physical chunks + to be allocated. + For regular allocations, this will be 1. +@Input ui32NumVirtChunks when sparse allocations are requested, + this is the number of virtual chunks + covering the sparse allocation. + For regular allocations, this will be 1. +@Input pui32MappingTable when sparse allocations are requested, + this is the list of the indices of + each physically-backed virtual chunk + For regular allocations, this will + be NULL. +@Input uiLog2PageSize the physical pagesize in log2(bytes). +@Input uiFlags the allocation flags. +@Input pszAnnotation string describing the PMR (for debug). + This should be passed into the function + PMRCreatePMR(). +@Input uiPid The process ID that this allocation should + be associated with. +@Output ppsPMROut pointer to the PMR created for the + new allocation +@Input ui32PDumpFlags the pdump flags. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemNewOSRamBackedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMROut, + IMG_UINT32 ui32PDumpFlags); + +#endif /* PHYSMEM_OSMEM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/physmem_tdfwmem.h b/drivers/mcst/gpu-imgtec/services/server/include/physmem_tdfwmem.h new file mode 100644 index 000000000000..364639d6be49 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/physmem_tdfwmem.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@File +@Title Header for secure firmware memory PMR factory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks importing secure firmware + code allocations. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PHYSMEM_TDFWMEM_H +#define PHYSMEM_TDFWMEM_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "pmr.h" + +/* + * PhysmemNewTDFWMemPMR + * + * This function is used as part of the facility to provide secure Firmware + * memory. A default implementation is provided but it can be replaced by + * the SoC implementer if necessary. + * + * Calling this function will create a PMR for a memory allocation made + * in "secure Firmware memory". + */ +PVRSRV_ERROR PhysmemNewTDFWMemPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Align, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + PVRSRV_TD_FW_MEM_REGION eRegion, + PMR **ppsPMRPtr); + +#endif /* PHYSMEM_TDFWMEM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pmr.h b/drivers/mcst/gpu-imgtec/services/server/include/pmr.h new file mode 100644 index 000000000000..d174535099cf --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pmr.h @@ -0,0 +1,1131 @@ +/*************************************************************************/ /*! +@File +@Title Physmem (PMR) abstraction +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + the "PMR" abstraction. A PMR (Physical Memory Resource) + represents some unit of physical memory which is + allocated/freed/mapped/unmapped as an indivisible unit + (higher software levels provide an abstraction above that + to deal with dividing this down into smaller manageable units). + Importantly, this module knows nothing of virtual memory, or + of MMUs etc., with one excusable exception. We have the + concept of a "page size", which really means nothing in + physical memory, but represents a "contiguity quantum" such + that the higher level modules which map this memory are able + to verify that it matches the needs of the page size for the + virtual realm into which it is being mapped. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SRVSRV_PMR_H_ +#define _SRVSRV_PMR_H_ + +/* include/ */ +#include "img_types.h" +#include "img_defs.h" +#include "pdumpdefs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" /* Required for export DEVMEM_EXPORTCOOKIE */ + +/* services/include */ +#include "pdump.h" +#include "physheap.h" + +/* services/server/include/ */ +#include "pmr_impl.h" +#include "opaque_types.h" + +#define PMR_MAX_TRANSLATION_STACK_ALLOC (32) + +/* Maximum number of pages a PMR can have is 1G of memory */ +#define PMR_MAX_SUPPORTED_PAGE_COUNT (262144) + +typedef IMG_UINT64 PMR_BASE_T; +typedef IMG_UINT64 PMR_SIZE_T; +#define PMR_SIZE_FMTSPEC "0x%010"IMG_UINT64_FMTSPECX +#define PMR_VALUE32_FMTSPEC "0x%08X" +#define PMR_VALUE64_FMTSPEC "0x%016"IMG_UINT64_FMTSPECX +typedef IMG_UINT32 PMR_LOG2ALIGN_T; +typedef IMG_UINT64 PMR_PASSWORD_T; + +struct _PMR_MAPPING_TABLE_ +{ + PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */ + IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */ + IMG_UINT32 ui32NumVirtChunks; /*!< Number of virtual chunks in the mapping */ + /* Must be last */ + IMG_UINT32 aui32Translation[1]; /*!< Translation mapping for "logical" to physical */ +}; + +#define TRANSLATION_INVALID 0xFFFFFFFFUL + +typedef struct _PMR_EXPORT_ PMR_EXPORT; + +typedef struct _PMR_PAGELIST_ PMR_PAGELIST; + +//typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE; + +/* + * PMRCreatePMR + * + * Not to be called directly, only via implementations of PMR + * factories, e.g. in physmem_osmem.c, deviceclass.c, etc. + * + * Creates a PMR object, with callbacks and private data as per the + * FuncTab/PrivData args. + * + * Note that at creation time the PMR must set in stone the "logical + * size" and the "contiguity guarantee" + * + * Flags are also set at this time. (T.B.D. flags also immutable for + * the life of the PMR?) + * + * Logical size is the amount of Virtual space this allocation would + * take up when mapped. Note that this does not have to be the same + * as the actual physical size of the memory. For example, consider + * the sparsely allocated non-power-of-2 texture case. In this + * instance, the "logical size" would be the virtual size of the + * rounded-up power-of-2 texture. That some pages of physical memory + * may not exist does not affect the logical size calculation. + * + * The PMR must also supply the "contiguity guarantee" which is the + * finest granularity of alignment and size of physical pages that the + * PMR will provide after LockSysPhysAddresses is called. Note that + * the calling code may choose to call PMRSysPhysAddr with a finer + * granularity than this, for example if it were to map into a device + * MMU with a smaller page size, and it's also OK for the PMR to + * supply physical memory in larger chunks than this. But + * importantly, never the other way around. + * + * More precisely, the following inequality must be maintained + * whenever mappings and/or physical addresses exist: + * + * (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory) + * + * The function table will contain the following callbacks which may + * be overridden by the PMR implementation: + * + * pfnLockPhysAddresses + * + * Called when someone locks requests that Physical pages are to + * be locked down via the PMRLockSysPhysAddresses() API. Note + * that if physical pages are prefaulted at PMR creation time and + * therefore static, it would not be necessary to override this + * function, in which case NULL may be supplied. + * + * pfnUnlockPhysAddresses + * + * The reverse of pfnLockPhysAddresses. Note that this should be + * NULL if and only if pfnLockPhysAddresses is NULL + * + * pfnSysPhysAddr + * + * This function is mandatory. This is the one which returns the + * system physical address for a given offset into this PMR. The + * "lock" function will have been called, if overridden, before + * this function, thus the implementation should not increase any + * refcount when answering this call. Refcounting, if necessary, + * should be done in the lock/unlock calls. Refcounting would + * not be necessary in the prefaulted/static scenario, as the + * pmr.c abstraction will handle the refcounting for the whole + * PMR. + * + * pfnFinalize + * + * Called when the PMR's refcount reaches zero and it gets + * destroyed. This allows the implementation to free up any + * resource acquired during creation time. + * + */ +PVRSRV_ERROR +PMRCreatePMR(PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP *psPhysHeap, + PMR_SIZE_T uiLogicalSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, + PMR_FLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + const PMR_IMPL_FUNCTAB *psFuncTab, + PMR_IMPL_PRIVDATA pvPrivData, + PMR_IMPL_TYPE eType, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +/* + * PMRLockSysPhysAddresses() + * + * Calls the relevant callback to lock down the system physical addresses of + * the memory that makes up the whole PMR. + * + * Before this call, it is not valid to use any of the information + * getting APIs: PMR_Flags(), PMR_SysPhysAddr(), + * [ see note below about lock/unlock semantics ] + * + * The caller of this function does not have to care about how the PMR + * is implemented. He only has to know that he is allowed access to + * the physical addresses _after_ calling this function and _until_ + * calling PMRUnlockSysPhysAddresses(). + * + * + * Notes to callback implementers (authors of PMR Factories): + * + * Some PMR implementations will be such that the physical memory exists for + * the lifetime of the PMR, with a static address, (and normally flags and + * symbolic address are static too) and so it is legal for a PMR + * implementation to not provide an implementation for the lock callback. + * + * Some PMR implementation may wish to page memory in from secondary storage + * on demand. The lock/unlock callbacks _may_ be the place to do this. + * (More likely, there would be a separate API for doing this, but this API + * provides a useful place to assert that it has been done) + */ + +PVRSRV_ERROR +PMRLockSysPhysAddresses(PMR *psPMR); + +PVRSRV_ERROR +PMRLockSysPhysAddressesNested(PMR *psPMR, + IMG_UINT32 ui32NestingLevel); + +/* + * PMRUnlockSysPhysAddresses() + * + * the reverse of PMRLockSysPhysAddresses() + */ +PVRSRV_ERROR +PMRUnlockSysPhysAddresses(PMR *psPMR); + +PVRSRV_ERROR +PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel); + + +/*************************************************************************/ /*! +@Function PMRUnpinPMR +@Description This is the counterpart to PMRPinPMR(). It is meant to be + called before repinning an allocation. + + For a detailed description see client API documentation. + +@Input psPMR The physical memory to unpin. + +@Input bDevMapped A flag that indicates if this PMR has been + mapped to device virtual space. + Needed to check if this PMR is allowed to be + unpinned or not. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped); + +/*************************************************************************/ /*! +@Function PMRPinPMR +@Description This is the counterpart to PMRUnpinPMR(). It is meant to be + called after unpinning an allocation. + + For a detailed description see client API documentation. + +@Input psPMR The physical memory to pin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR PMRPinPMR(PMR *psPMR); + +/* + * PhysmemPMRExport() + * + * Given a PMR, creates a PMR "Export", which is a handle that + * provides sufficient data to be able to "import" this PMR elsewhere. + * The PMR Export is an object in its own right, whose existence + * implies a reference on the PMR, thus the PMR cannot be destroyed + * while the PMR Export exists. The intention is that the PMR Export + * will be wrapped in the devicemem layer by a cross process handle, + * and some IPC by which to communicate the handle value and password + * to other processes. The receiving process is able to unwrap this + * to gain access to the same PMR Export in this layer, and, via + * PhysmemPMRImport(), obtain a reference to the original PMR. + * + * The caller receives, along with the PMR Export object, information + * about the size and contiguity guarantee for the PMR, and also the + * PMRs secret password, in order to authenticate the subsequent + * import. + * + * N.B. If you call PMRExportPMR() (and it succeeds), you are + * promising to later call PMRUnexportPMR() + */ +PVRSRV_ERROR +PMRExportPMR(PMR *psPMR, + PMR_EXPORT **ppsPMRExport, + PMR_SIZE_T *puiSize, + PMR_LOG2ALIGN_T *puiLog2Contig, + PMR_PASSWORD_T *puiPassword); + +/*! +******************************************************************************* + + @Function PMRMakeLocalImportHandle + + @Description + + Transform a general handle type into one that we are able to import. + Takes a PMR reference. + + @Input psPMR The input PMR. + @Output ppsPMR The output PMR that is going to be transformed to the + correct handle type. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +PMRMakeLocalImportHandle(PMR *psPMR, + PMR **ppsPMR); + +/*! +******************************************************************************* + + @Function PMRUnmakeLocalImportHandle + + @Description + + Take a PMR, destroy the handle and release a reference. + Counterpart to PMRMakeServerExportClientExport(). + + @Input psPMR PMR to destroy. + Created by PMRMakeLocalImportHandle(). + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +PMRUnmakeLocalImportHandle(PMR *psPMR); + +/* + * PMRUnexporPMRt() + * + * The reverse of PMRExportPMR(). This causes the PMR to no longer be + * exported. If the PMR has already been imported, the imported PMR + * reference will still be valid, but no further imports will be possible. + */ +PVRSRV_ERROR +PMRUnexportPMR(PMR_EXPORT *psPMRExport); + +/* + * PMRImportPMR() + * + * Takes a PMR Export object, as obtained by PMRExportPMR(), and + * obtains a reference to the original PMR. + * + * The password must match, and is assumed to have been (by whatever + * means, IPC etc.) preserved intact from the former call to + * PMRExportPMR() + * + * The size and contiguity arguments are entirely irrelevant for the + * import, however they are verified in order to trap bugs. + * + * N.B. If you call PhysmemPMRImport() (and it succeeds), you are + * promising to later call PhysmemPMRUnimport() + */ +PVRSRV_ERROR +PMRImportPMR(PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR); + +/* + * PMRUnimportPMR() + * + * releases the reference on the PMR as obtained by PMRImportPMR() + */ +PVRSRV_ERROR +PMRUnimportPMR(PMR *psPMR); + +PVRSRV_ERROR +PMRLocalImportPMR(PMR *psPMR, + PMR **ppsPMR, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +/* + * Equivalent mapping functions when in kernel mode. + */ +PVRSRV_ERROR +PMRAcquireKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut); + +PVRSRV_ERROR +PMRAcquireSparseKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut); + +PVRSRV_ERROR +PMRReleaseKernelMappingData(PMR *psPMR, + IMG_HANDLE hPriv); + +#if defined(INTEGRITY_OS) +PVRSRV_ERROR +PMRMapMemoryObject(PMR *psPMR, + IMG_HANDLE *phMemObj, + void **pvClientAddr, + IMG_HANDLE *phPrivOut); +PVRSRV_ERROR +PMRUnmapMemoryObject(PMR *psPMR, + IMG_HANDLE hPriv); +#endif + +/* + * PMR_ReadBytes() + * + * calls into the PMR implementation to read up to uiBufSz bytes, + * returning the actual number read in *puiNumBytes + * + * this will read up to the end of the PMR, or the next symbolic name + * boundary, or until the requested number of bytes is read, whichever + * comes first + * + * In the case of sparse PMR's the caller doesn't know what offsets are + * valid and which ones aren't so we will just write 0 to invalid offsets + */ +PVRSRV_ERROR +PMR_ReadBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/* + * PMR_WriteBytes() + * + * calls into the PMR implementation to write up to uiBufSz bytes, + * returning the actual number read in *puiNumBytes + * + * this will write up to the end of the PMR, or the next symbolic name + * boundary, or until the requested number of bytes is written, whichever + * comes first + * + * In the case of sparse PMR's the caller doesn't know what offsets are + * valid and which ones aren't so we will just ignore data at invalid offsets + */ +PVRSRV_ERROR +PMR_WriteBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/*************************************************************************/ /*! +@Function PMRMMapPMR +@Description Performs the necessary steps to map the PMR into a user process + address space. The caller does not need to call + PMRLockSysPhysAddresses before calling this function. + +@Input psPMR PMR to map. + +@Input pOSMMapData OS specific data needed to create a mapping. + +@Return PVRSRV_ERROR: PVRSRV_OK on success or an error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); + +/* + * PMRRefPMR() + * + * Take a reference on the passed in PMR + */ +void +PMRRefPMR(PMR *psPMR); + +/* + * PMRUnrefPMR() + * + * This undoes a call to any of the PhysmemNew* family of APIs + * (i.e. any PMR factory "constructor") + * + * This relinquishes a reference to the PMR, and, where the refcount + * reaches 0, causes the PMR to be destroyed (calling the finalizer + * callback on the PMR, if there is one) + */ +PVRSRV_ERROR +PMRUnrefPMR(PMR *psPMR); + +/* + * PMRUnrefUnlockPMR() + * + * Same as above but also unlocks the PMR. + */ +PVRSRV_ERROR +PMRUnrefUnlockPMR(PMR *psPMR); + +PPVRSRV_DEVICE_NODE +PMR_DeviceNode(const PMR *psPMR); + +/* + * PMRIsPMRLive() + * + * This function returns true if the PMR is in use and false otherwise. + * This function is not thread safe and hence the caller needs to ensure the + * thread safety by explicitly taking PMR or through other means. + */ +IMG_BOOL PMRIsPMRLive(PMR *psPMR); + +/* + * PMR_Flags() + * + * Flags are static and guaranteed for the life of the PMR. Thus this + * function is idempotent and acquire/release semantics is not required. + * + * Returns the flags as specified on the PMR. The flags are to be + * interpreted as mapping permissions + */ +PMR_FLAGS_T +PMR_Flags(const PMR *psPMR); + +IMG_BOOL +PMR_IsSparse(const PMR *psPMR); + +IMG_BOOL +PMR_IsUnpinned(const PMR *psPMR); + +PVRSRV_ERROR +PMR_LogicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiLogicalSize); + +PVRSRV_ERROR +PMR_PhysicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiPhysicalSize); + +PHYS_HEAP * +PMR_PhysHeap(const PMR *psPMR); + +PMR_MAPPING_TABLE * +PMR_GetMappigTable(const PMR *psPMR); + +IMG_UINT32 +PMR_GetLog2Contiguity(const PMR *psPMR); + +const IMG_CHAR * +PMR_GetAnnotation(const PMR *psPMR); + +/* + * PMR_IsOffsetValid() + * + * Returns if an address offset inside a PMR has a valid + * physical backing. + */ +PVRSRV_ERROR +PMR_IsOffsetValid(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_BOOL *pbValid); + +PMR_IMPL_TYPE +PMR_GetType(const PMR *psPMR); + +IMG_INT32 +PMR_GetRefCount(const PMR *psPMR); + +/* + * PMR_DevPhysAddr() + * + * A note regarding Lock/Unlock semantics + * ====================================== + * + * PMR_DevPhysAddr may only be called after PMRLockSysPhysAddresses() + * has been called. The data returned may be used only until + * PMRUnlockSysPhysAddresses() is called after which time the licence + * to use the data is revoked and the information may be invalid. + * + * Given an offset, this function returns the device physical address of the + * corresponding page in the PMR. It may be called multiple times + * until the address of all relevant pages has been determined. + * + * If caller only wants one physical address it is sufficient to pass in: + * ui32Log2PageSize==0 and ui32NumOfPages==1 + */ +PVRSRV_ERROR +PMR_DevPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEV_PHYADDR *psDevAddr, + IMG_BOOL *pbValid); + +/* + * PMR_CpuPhysAddr() + * + * See note above about Lock/Unlock semantics. + * + * Given an offset, this function returns the CPU physical address of the + * corresponding page in the PMR. It may be called multiple times + * until the address of all relevant pages has been determined. + * + */ +PVRSRV_ERROR +PMR_CpuPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_CPU_PHYADDR *psCpuAddrPtr, + IMG_BOOL *pbValid); + +PVRSRV_ERROR +PMRGetUID(PMR *psPMR, + IMG_UINT64 *pui64UID); +/* + * PMR_ChangeSparseMem() + * + * See note above about Lock/Unlock semantics. + * + * This function alters the memory map of the given PMR in device space by + * adding/deleting the pages as requested. + * + */ +PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiSparseFlags); + +/* + * PMR_ChangeSparseMemCPUMap() + * + * See note above about Lock/Unlock semantics. + * + * This function alters the memory map of the given PMR in CPU space by + * adding/deleting the pages as requested. + */ +PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices); + +#if defined(PDUMP) + +void +PDumpPMRMallocPMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiMappingTable, + IMG_UINT32 uiLog2Contiguity, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoPtr, + IMG_UINT32 ui32PDumpFlags); + +void +PDumpPMRFreePMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 uiLog2Contiguity, + IMG_HANDLE hPDumpAllocationInfoHandle); + +void +PDumpPMRChangeSparsePMR(PMR *psPMR, + IMG_UINT32 uiBlockSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut); + +/* + * PMR_PDumpSymbolicAddr() + * + * Given an offset, returns the pdump memspace name and symbolic + * address of the corresponding page in the PMR. + * + * Note that PDump memspace names and symbolic addresses are static + * and valid for the lifetime of the PMR, therefore we don't require + * acquire/release semantics here. + * + * Note that it is expected that the pdump "mapping" code will call + * this function multiple times as each page is mapped in turn + * + * Note that NextSymName is the offset from the base of the PMR to the + * next pdump symbolic address (or the end of the PMR if the PMR only + * had one PDUMPMALLOC + */ +PVRSRV_ERROR +PMR_PDumpSymbolicAddr(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32NamespaceNameLen, + IMG_CHAR *pszNamespaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName + ); + +/* + * PMRPDumpLoadMemValue32() + * + * writes the current contents of a dword in PMR memory to the pdump + * script stream. Useful for patching a buffer by simply editing the + * script output file in ASCII plain text. + * + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue32(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpCopyMem32 + * + * Adds in the pdump script stream a copy of a dword in one PMR memory + * location to another PMR memory location. + * + */ +PVRSRV_ERROR +PMRPDumpCopyMem32(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpLoadMemValue64() + * + * writes the current contents of a dword in PMR memory to the pdump + * script stream. Useful for patching a buffer by simply editing the + * script output file in ASCII plain text. + * + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue64(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpCopyMem64 + * + * Adds in the pdump script stream a copy of a quadword in one PMR memory + * location to another PMR memory location. + */ +PVRSRV_ERROR +PMRPDumpCopyMem64(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpLoadMem() + * + * Writes the current contents of the PMR memory to the pdump PRM stream, + * and emits some PDump code to the script stream to LDB said bytes from + * said file. If bZero is IMG_TRUE then the PDump zero page is used as the + * source for the LDB. + */ +PVRSRV_ERROR +PMRPDumpLoadMem(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_BOOL bZero); + +/* + * PMRPDumpSaveToFile() + * + * Emits some PDump that does an SAB (save bytes) using the PDump symbolic + * address of the PMR. Note that this is generally not the preferred way to + * dump the buffer contents. There is an equivalent function in + * devicemem_server.h which also emits SAB but using the virtual address, + * which is the "right" way to dump the buffer contents to a file. + * This function exists just to aid testing by providing a means to dump + * the PMR directly by symbolic address also. + */ +PVRSRV_ERROR +PMRPDumpSaveToFile(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPMRMallocPMR) +#endif +static INLINE void +PDumpPMRMallocPMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiMappingTable, + IMG_UINT32 uiLog2Contiguity, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiBlockSize); + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); + PVR_UNREFERENCED_PARAMETER(puiMappingTable); + PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity); + PVR_UNREFERENCED_PARAMETER(bInitialise); + PVR_UNREFERENCED_PARAMETER(ui32InitValue); + PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoPtr); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPMRFreePMR) +#endif +static INLINE void +PDumpPMRFreePMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 uiLog2Contiguity, + IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiBlockSize); + PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity); + PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPMRChangeSparsePMR) +#endif +static INLINE void +PDumpPMRChangeSparsePMR(PMR *psPMR, + IMG_UINT32 uiBlockSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiBlockSize); + PVR_UNREFERENCED_PARAMETER(ui32AllocPageCount); + PVR_UNREFERENCED_PARAMETER(pai32AllocIndices); + PVR_UNREFERENCED_PARAMETER(ui32FreePageCount); + PVR_UNREFERENCED_PARAMETER(pai32FreeIndices); + PVR_UNREFERENCED_PARAMETER(bInitialise); + PVR_UNREFERENCED_PARAMETER(ui32InitValue); + PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoOut); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMR_PDumpSymbolicAddr) +#endif +static INLINE PVRSRV_ERROR +PMR_PDumpSymbolicAddr(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32NamespaceNameLen, + IMG_CHAR *pszNamespaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen); + PVR_UNREFERENCED_PARAMETER(pszNamespaceName); + PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen); + PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr); + PVR_UNREFERENCED_PARAMETER(puiNewOffset); + PVR_UNREFERENCED_PARAMETER(puiNextSymName); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpLoadMemValue32) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpLoadMemValue32(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpLoadMemValue64) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpLoadMemValue64(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui64Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpLoadMem) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpLoadMem(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_BOOL bZero) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + PVR_UNREFERENCED_PARAMETER(bZero); + return PVRSRV_OK; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpSaveToFile) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpSaveToFile(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiArraySize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(uiFileOffset); + return PVRSRV_OK; +} + +#endif /* PDUMP */ + +/* This function returns the private data that a pmr subtype embedded in + * here. We use the function table pointer as "authorisation" that this + * function is being called by the pmr subtype implementation. We can + * assume (assert) that. It would be a bug in the implementation of the + * pmr subtype if this assertion ever fails. + */ +void * +PMRGetPrivateData(const PMR *psPMR, + const PMR_IMPL_FUNCTAB *psFuncTab); + +PVRSRV_ERROR +PMRZeroingPMR(PMR *psPMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); + +PVRSRV_ERROR +PMRDumpPageList(PMR *psReferencePMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); + +PVRSRV_ERROR +PMRWritePMPageList(/* Target PMR, offset, and length */ + PMR *psPageListPMR, + IMG_DEVMEM_OFFSET_T uiTableOffset, + IMG_DEVMEM_SIZE_T uiTableLength, + /* Referenced PMR, and "page" granularity */ + PMR *psReferencePMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, + PMR_PAGELIST **ppsPageList); + +/* Doesn't actually erase the page list - just releases + * the appropriate refcounts + */ +PVRSRV_ERROR // should be void, surely +PMRUnwritePMPageList(PMR_PAGELIST *psPageList); + +#if defined(PDUMP) +PVRSRV_ERROR +PMRPDumpPol32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiFlags); + +PVRSRV_ERROR +PMRPDumpCheck32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PMRPDumpCBP(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize); +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpPol32) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpPol32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpCheck32) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpCheck32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpCBP) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpCBP(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiReadOffset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + return PVRSRV_OK; +} +#endif + +PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR); + +/* + * PMRInit() + * + * To be called once and only once to initialise the internal data in + * the PMR module (mutexes and such) + * + * Not for general use. Only PVRSRVInit(); should be calling this. + */ +PVRSRV_ERROR +PMRInit(void); + +/* + * PMRDeInit() + * + * To be called once and only once to deinitialise the internal data in + * the PMR module (mutexes and such) and for debug checks + * + * Not for general use. Only PVRSRVDeInit(); should be calling this. + */ +PVRSRV_ERROR +PMRDeInit(void); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +PVRSRV_ERROR +PMRStoreRIHandle(PMR *psPMR, void *hRIHandle); +#endif + +#endif /* #ifdef _SRVSRV_PMR_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pmr_impl.h b/drivers/mcst/gpu-imgtec/services/server/include/pmr_impl.h new file mode 100644 index 000000000000..44c0bce28841 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pmr_impl.h @@ -0,0 +1,553 @@ +/**************************************************************************/ /*! +@File +@Title Implementation Callbacks for Physmem (PMR) abstraction +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This file is for definitions + that are private to the world of PMRs, but that need to be + shared between pmr.c itself and the modules that implement the + callbacks for the PMR. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVSRV_PMR_IMPL_H +#define SRVSRV_PMR_IMPL_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +/*! Physical Memory Resource type. + */ +typedef struct _PMR_ PMR; + +/*! Per-flavour callbacks need to be shared with generic implementation + * (pmr.c). + */ +typedef void *PMR_IMPL_PRIVDATA; + +/*! Type for holding flags passed to the PMR factory. + */ +typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T; + +/*! Mapping table for the allocation. + * + * PMR's can be sparse in which case not all the logical addresses in it are + * valid. The mapping table translates logical offsets into physical offsets. + * + * This table is always passed to the PMR factory regardless if the memory is + * sparse or not. In case of non-sparse memory all virtual offsets are mapped + * to physical offsets. + */ +typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE; + +/*! Private data passed to the ::PFN_MMAP_FN function. + */ +typedef void *PMR_MMAP_DATA; + +/*! PMR factory type. + */ +typedef enum _PMR_IMPL_TYPE_ +{ + PMR_TYPE_NONE = 0, + PMR_TYPE_OSMEM, + PMR_TYPE_LMA, + PMR_TYPE_DMABUF, + PMR_TYPE_EXTMEM, + PMR_TYPE_DC, + PMR_TYPE_TDFWMEM, + PMR_TYPE_TDSECBUF +} PMR_IMPL_TYPE; + +/*************************************************************************/ /*! +@Brief Callback function type PFN_LOCK_PHYS_ADDRESSES_FN + +@Description Called to lock down the physical addresses for all pages + allocated for a PMR. + The default implementation is to simply increment a + lock-count for debugging purposes. + If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will + be called when someone first requires a physical address, + and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be + called when the last such reference is released. + The PMR implementation may assume that physical addresses + will have been "locked" in this manner before any call is + made to the pfnDevPhysAddr() callback + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN + +@Description Called to release the lock taken on the physical addresses + for all pages allocated for a PMR. + The default implementation is to simply decrement a + lock-count for debugging purposes. + If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be + called when the last reference taken on the PMR is + released. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_DEV_PHYS_ADDR_FN + +@Description Called to obtain one or more physical addresses for given + offsets within a PMR. + + The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is + guaranteed to have been called prior to calling the + PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to + rely on the physical address thus obtained after the + PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input ui32Log2PageSize The log2 page size. +@Input ui32NumOfAddr The number of addresses to be returned +@Input puiOffset The offset from the start of the PMR + (in bytes) for which the physical + address is required. Where multiple + addresses are requested, this will + contain a list of offsets. +@Output pbValid List of boolean flags indicating which + addresses in the returned list + (psDevAddrPtr) are valid (for sparse + allocations, not all pages may have a + physical backing) +@Output psDevAddrPtr Returned list of physical addresses + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfAddr, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevAddrPtr); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN + +@Description Called to obtain a kernel-accessible address (mapped to a + virtual address if required) for the PMR for use internally + in Services. + + Implementation of this function for the (default) PMR factory providing + OS-allocations is mandatory (the driver will expect to be able to call + this function for OS-provided allocations). + For other PMR factories, implementation of this function is only necessary + where an MMU mapping is required for the Kernel to be able to access the + allocated memory. + If no mapping is needed, this function can remain unimplemented and the + pfn may be set to NULL. +@Input pvPriv Private data (which was generated by + the PMR factory when PMR was created) +@Input uiOffset Offset from the beginning of the PMR + at which mapping is to start +@Input uiSize Size of mapping (in bytes) +@Output ppvKernelAddressOut Mapped kernel address +@Output phHandleOut Returned handle of the new mapping +@Input ulFlags Mapping flags + +@Return PVRSRV_OK if the mapping was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN + +@Description Called to release a mapped kernel virtual address + + Implementation of this callback is mandatory if + PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN is provided for the PMR factory, + otherwise this function can remain unimplemented and the pfn may be set + to NULL. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input hHandle Handle of the mapping to be released + +@Return None +*/ /**************************************************************************/ +typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_READ_BYTES_FN + +@Description Called to read bytes from an unmapped allocation + + Implementation of this callback is optional - where it is not provided, + the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire + PMR (if an MMU mapping is required for the Kernel to be able to access the + allocated memory). + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input uiOffset Offset from the beginning of the PMR at + which to begin reading +@Output pcBuffer Buffer in which to return the read data +@Input uiBufSz Number of bytes to be read +@Output puiNumBytes Number of bytes actually read (may be + less than uiBufSz) + +@Return PVRSRV_OK if the read was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_WRITE_BYTES_FN + +@Description Called to write bytes into an unmapped allocation + + Implementation of this callback is optional - where it is not provided, + the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire + PMR (if an MMU mapping is required for the Kernel to be able to access the + allocated memory). + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input uiOffset Offset from the beginning of the PMR at + which to begin writing +@Input pcBuffer Buffer containing the data to be written +@Input uiBufSz Number of bytes to be written +@Output puiNumBytes Number of bytes actually written (may be + less than uiBufSz) + +@Return PVRSRV_OK if the write was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_UNPIN_MEM_FN + +@Description Called to unpin an allocation. + Once unpinned, the pages backing the allocation may be + re-used by the Operating System for another purpose. + When the pages are required again, they may be re-pinned + (by calling PFN_PIN_MEM_FN). The driver will try to return + same pages as before. The caller will be told if the + content of these returned pages has been modified or if + the pages returned are not the original pages. + + Implementation of this callback is optional. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the unpin was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_PIN_MEM_FN + +@Description Called to pin a previously unpinned allocation. + The driver will try to return same pages as were previously + assigned to the allocation. The caller will be told if the + content of these returned pages has been modified or if + the pages returned are not the original pages. + + Implementation of this callback is optional. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Input psMappingTable Mapping table, which describes how + virtual 'chunks' are to be mapped to + physical 'chunks' for the allocation. + +@Return PVRSRV_OK if the original pages were returned unmodified. + PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified + or different pages were returned. + Another PVRSRV_ERROR code on failure. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, + PMR_MAPPING_TABLE *psMappingTable); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_CHANGE_SPARSE_MEM_FN + +@Description Called to modify the physical backing for a given sparse + allocation. + The caller provides a list of the pages within the sparse + allocation which should be backed with a physical allocation + and a list of the pages which do not require backing. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input psPMR The PMR of the sparse allocation to be + modified +@Input ui32AllocPageCount The number of pages specified in + pai32AllocIndices +@Input pai32AllocIndices The list of pages in the sparse + allocation that should be backed with a + physical allocation. Pages are + referenced by their index within the + sparse allocation (e.g. in a 10 page + allocation, pages are denoted by + indices 0 to 9) +@Input ui32FreePageCount The number of pages specified in + pai32FreeIndices +@Input pai32FreeIndices The list of pages in the sparse + allocation that do not require + a physical allocation. +@Input ui32Flags Allocation flags + +@Return PVRSRV_OK if the sparse allocation physical backing was updated + successfully, an error code otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiFlags); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN + +@Description Called to modify which pages are mapped for a given sparse + allocation. + The caller provides a list of the pages within the sparse + allocation which should be given a CPU mapping and a list + of the pages which do not require a CPU mapping. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input psPMR The PMR of the sparse allocation to be + modified +@Input sCpuVAddrBase The virtual base address of the sparse + allocation +@Input ui32AllocPageCount The number of pages specified in + pai32AllocIndices +@Input pai32AllocIndices The list of pages in the sparse + allocation that should be given a CPU + mapping. Pages are referenced by their + index within the sparse allocation (e.g. + in a 10 page allocation, pages are + denoted by indices 0 to 9) +@Input ui32FreePageCount The number of pages specified in + pai32FreeIndices +@Input pai32FreeIndices The list of pages in the sparse + allocation that do not require a CPU + mapping. + +@Return PVRSRV_OK if the page mappings were updated successfully, an + error code otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_MMAP_FN + +@Description Called to map pages in the specified PMR. + + Implementation of this callback is optional. + Where it is provided, it will be used in place of OSMMapPMRGeneric(). + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input psPMR The PMR of the allocation to be mapped +@Input pMMapData OS-specific data to describe how mapping + should be performed + +@Return PVRSRV_OK if the mapping was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv, + PMR *psPMR, + PMR_MMAP_DATA pMMapData); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_FINALIZE_FN + +@Description Called to destroy the PMR. + This callback will be called only when all references to + the PMR have been dropped. + The PMR was created via a call to PhysmemNewRamBackedPMR() + and is destroyed via this callback. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the PMR destruction was successful, an error + code otherwise. + Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only + error returned from physmem_dmabuf.c layer and on this + error, destroying of the PMR is aborted without disturbing + the PMR state. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN + +@Description Called to acquire the PMR factory's global lock, if it has one, + hence callback optional. Factories which support entry points + in addition to the normal bridge calls, for example, from the + native OS that manipulate the PMR reference count should + create a factory lock and implementations for these call backs. + + Implementation of this callback is optional. + +@Return None +*/ +/*****************************************************************************/ +typedef void (*PFN_ACQUIRE_PMR_FACTORY_LOCK_FN)(void); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_RELEASE_PMR_FACTORY_LOCK_FN + +@Description Called to release the PMR factory's global lock acquired by calling + pfn_acquire_pmr_factory_lock callback. + + Implementation of this callback is optional. + +@Return None +*/ /**************************************************************************/ +typedef void (*PFN_RELEASE_PMR_FACTORY_LOCK_FN)(void); + +/*! PMR factory callback table. + */ +struct _PMR_IMPL_FUNCTAB_ { + /*! Callback function pointer, see ::PFN_LOCK_PHYS_ADDRESSES_FN */ + PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses; + /*! Callback function pointer, see ::PFN_UNLOCK_PHYS_ADDRESSES_FN */ + PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses; + + /*! Callback function pointer, see ::PFN_DEV_PHYS_ADDR_FN */ + PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr; + + /*! Callback function pointer, see ::PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN */ + PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData; + /*! Callback function pointer, see ::PFN_RELEASE_KERNEL_MAPPING_DATA_FN */ + PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData; + +#if defined(INTEGRITY_OS) + /* + * MapMemoryObject()/UnmapMemoryObject() + * + * called to map/unmap memory objects in Integrity OS + */ + + PVRSRV_ERROR (*pfnMapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE *phMemObj, + void **pvClientAddr, + IMG_HANDLE *phHandleOut); + PVRSRV_ERROR (*pfnUnmapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv); +#endif + + /*! Callback function pointer, see ::PFN_READ_BYTES_FN */ + PFN_READ_BYTES_FN pfnReadBytes; + /*! Callback function pointer, see ::PFN_WRITE_BYTES_FN */ + PFN_WRITE_BYTES_FN pfnWriteBytes; + + /*! Callback function pointer, see ::PFN_UNPIN_MEM_FN */ + PFN_UNPIN_MEM_FN pfnUnpinMem; + /*! Callback function pointer, see ::PFN_PIN_MEM_FN */ + PFN_PIN_MEM_FN pfnPinMem; + + /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */ + PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem; + /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */ + PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap; + + /*! Callback function pointer, see ::PFN_MMAP_FN */ + PFN_MMAP_FN pfnMMap; + + /*! Callback function pointer, see ::PFN_FINALIZE_FN */ + PFN_FINALIZE_FN pfnFinalize; + + /*! Callback function pointer, see ::PFN_ACQUIRE_PMR_FACTORY_LOCK_FN */ + PFN_ACQUIRE_PMR_FACTORY_LOCK_FN pfnGetPMRFactoryLock; + + /*! Callback function pointer, see ::PFN_RELEASE_PMR_FACTORY_LOCK_FN */ + PFN_RELEASE_PMR_FACTORY_LOCK_FN pfnReleasePMRFactoryLock; +}; + +/*! PMR factory callback table. + */ +typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB; + +#endif /* SRVSRV_PMR_IMPL_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pmr_os.h b/drivers/mcst/gpu-imgtec/services/server/include/pmr_os.h new file mode 100644 index 000000000000..0dfbd492e4c5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pmr_os.h @@ -0,0 +1,62 @@ +/*************************************************************************/ /*! +@File +@Title OS PMR functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS specific PMR functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PMR_OS_H__) +#define __PMR_OS_H__ + +#include "pmr_impl.h" + +/*************************************************************************/ /*! +@Function OSMMapPMRGeneric +@Description Implements a generic PMR mapping function, which is used + to CPU map a PMR where the PMR does not have a mapping + function defined by the creating PMR factory. +@Input psPMR the PMR to be mapped +@Output pOSMMapData pointer to any private data + needed by the generic mapping function +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); + +#endif /* !defined(__PMR_OS_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/power.h b/drivers/mcst/gpu-imgtec/services/server/include/power.h new file mode 100644 index 000000000000..dd1b089ef4c4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/power.h @@ -0,0 +1,128 @@ +/*************************************************************************/ /*! +@File +@Title Power Management Functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef POWER_H +#define POWER_H + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_device.h" +#include "pvrsrv_error.h" +#include "servicesext.h" +#include "opaque_types.h" + +/*! + ***************************************************************************** + * Power management + *****************************************************************************/ + +typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV; + +typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice); + + +PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode); +void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode); + +PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode); +void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode); +PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode); + +IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode); +IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice); + + +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced); + +PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_SYS_POWER_STATE ePVRState); + +PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState); + +/* Type PFN_DC_REGISTER_POWER */ +PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_PRE_POWER pfnDevicePrePower, + PFN_POST_POWER pfnDevicePostPower, + PFN_SYS_DEV_PRE_POWER pfnSystemPrePower, + PFN_SYS_DEV_POST_POWER pfnSystemPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState); + +PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode); + +PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PPVRSRV_DEV_POWER_STATE pePowerState); + +IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode); + +PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void *pvInfo); + +void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void *pvInfo); + +PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnCheckIdleReq, + IMG_BOOL bDeviceOffPermitted); + +PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode); + +PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 ui32NewValue); + + +#endif /* POWER_H */ + +/****************************************************************************** + End of file (power.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/process_stats.h b/drivers/mcst/gpu-imgtec/services/server/include/process_stats.h new file mode 100644 index 000000000000..32675dc2df8d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/process_stats.h @@ -0,0 +1,257 @@ +/*************************************************************************/ /*! +@File +@Title Functions for creating and reading proc filesystem entries. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PROCESS_STATS_H +#define PROCESS_STATS_H + +#include + +#include "pvrsrv_error.h" +#include "cache_ops.h" + +/* + * The publishing of Process Stats is controlled by the + * PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory + * allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option. + * + * Note: There will be a performance degradation with memory allocation + * recording enabled! + */ + + +/* + * Memory types which can be tracked... + */ +typedef enum { + PVRSRV_MEM_ALLOC_TYPE_KMALLOC, /* memory allocated by kmalloc() */ + PVRSRV_MEM_ALLOC_TYPE_VMALLOC, /* memory allocated by vmalloc() */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, /* pages allocated from UMA to hold page table information */ + PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, /* ALLOC_PAGES_PT_UMA mapped to kernel address space */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, /* pages allocated from LMA to hold page table information */ + PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, /* ALLOC_PAGES_PT_LMA mapped to kernel address space */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, /* pages allocated from LMA */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, /* pages allocated from UMA */ + PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, /* mapped UMA/LMA pages */ + PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, /* pages in the page pool */ + PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, /* dma-buf imports */ + + /* Must be the last enum...*/ + PVRSRV_MEM_ALLOC_TYPE_COUNT +} PVRSRV_MEM_ALLOC_TYPE; + + +/* + * Functions for managing the processes recorded... + */ +PVRSRV_ERROR PVRSRVStatsInitialise(void); +void PVRSRVStatsDestroy(void); + +PVRSRV_ERROR PVRSRVStatsInitialiseDI(void); +void PVRSRVStatsDestroyDI(void); + +PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats); + +void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats); + +#define MAX_POWER_STAT_ENTRIES 51 + +/* + * Functions for recording the statistics... + */ + +/* + * PVRSRV_ENABLE_PROCESS_STATS enables process statistics regarding events, + * resources and memory across all processes + * PVRSRV_ENABLE_MEMORY_STATS enables recording of Linux kernel memory + * allocations, provided that PVRSRV_ENABLE_PROCESS_STATS is enabled + * - Output can be found in: + * /sys/kernel/debug/pvr/proc_stats/[live|retired]_pids_stats/mem_area + * PVRSRV_DEBUG_LINUX_MEMORY_STATS provides more details about memory + * statistics in conjunction with PVRSRV_ENABLE_MEMORY_STATS + * PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON is defined to encompass both memory + * allocation statistics functionalities described above in a single macro + */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) && defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) +#define PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON +#endif + +/* + * When using detailed memory allocation statistics, the line number and + * file name where the allocation happened are also provided. + * When this feature is not used, these parameters are not needed. + */ +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) +#define DEBUG_MEMSTATS_PARAMS ,void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine +#define DEBUG_MEMSTATS_VALUES ,__FILE__, __LINE__ +#else +#define DEBUG_MEMSTATS_PARAMS +#define DEBUG_MEMSTATS_VALUES +#endif + +void PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + void *pvCpuVAddr, + IMG_CPU_PHYADDR sCpuPAddr, + size_t uiBytes, + void *pvPrivateData, + IMG_PID uiPid + DEBUG_MEMSTATS_PARAMS); + +void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 ui64Key, + IMG_PID uiPid); + +void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID uiPid); + +/* + * Increases the memory stat for eAllocType. Tracks the allocation size value + * by inserting a value into a hash table with uiCpuVAddr as key. + * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack(). + */ +void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_UINT64 uiCpuVAddr, + IMG_PID uiPid); + +void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID uiPid); + +void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, + IMG_PID decrPID); + +/* + * Decrease the memory stat for eAllocType. Takes the allocation size value + * from the hash table with uiCpuVAddr as key. + * Pair with PVRSRVStatsIncrMemAllocStatAndTrack(). + */ +void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 uiCpuVAddr); + +void +PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes); + +void +PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes); + +void +PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner); + +PVRSRV_ERROR +PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner); + +void PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders, + IMG_UINT32 ui32TotalNumOutOfMemory, + IMG_UINT32 ui32TotalTAStores, + IMG_UINT32 ui32Total3DStores, + IMG_UINT32 ui32TotalCDMStores, + IMG_UINT32 ui32TotalTDMStores, + IMG_PID owner); + +void PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp, + IMG_UINT32 ui32NumReqByFW, + IMG_PID owner); + +void PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp, + IMG_UINT32 ui32NumGrowReqByFW, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32NumHighPages, + IMG_PID ownerPid); +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) +void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, + IMG_UINT32 ui32OpSeqNum, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT32 eFenceOpType, +#endif + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64ExecuteTimeMs, + IMG_BOOL bUserModeFlush, + IMG_BOOL bIsFence, + IMG_PID ownerPid); +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +/* Update pre/post power transition timing statistics */ +void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, + IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, + IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower); + +void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer); +void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer); +#else +/* Update pre/post power transition timing statistics */ +static inline +void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, + IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, + IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {} +static inline +void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {} + +static inline +void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {} +#endif + +void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp); + +void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration); + +/* Functions used for calculating the memory usage statistics of a process */ +PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, + IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats); + +typedef struct { + IMG_UINT32 ui32Pid; + IMG_UINT32 ui32KernelMemUsage; + IMG_UINT32 ui32GraphicsMemUsage; +} PVRSRV_PER_PROCESS_MEM_USAGE; + +PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, + IMG_UINT32 *pui32NumberOfLivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData); + +#endif /* PROCESS_STATS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvr_dvfs.h b/drivers/mcst/gpu-imgtec/services/server/include/pvr_dvfs.h new file mode 100644 index 000000000000..f68b1c9ddf4d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvr_dvfs.h @@ -0,0 +1,136 @@ +/*************************************************************************/ /*! +@File pvr_dvfs.h +@Title System level interface for DVFS +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVR_DVFS_H_ +#define _PVR_DVFS_H_ + +#include + +#if defined(SUPPORT_LINUX_DVFS) + #include + #include + + #if defined(CONFIG_DEVFREQ_THERMAL) + #include + #endif + + #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + #include + #else + #include + #endif +#endif + +#include "img_types.h" + +typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui32Freq); +typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui32Volt); + +typedef struct _IMG_OPP_ +{ + IMG_UINT32 ui32Volt; + /* + * Unit of frequency in Hz. + */ + IMG_UINT32 ui32Freq; +} IMG_OPP; + +typedef struct _IMG_DVFS_DEVICE_CFG_ +{ + const IMG_OPP *pasOPPTable; + IMG_UINT32 ui32OPPTableSize; +#if defined(SUPPORT_LINUX_DVFS) + IMG_UINT32 ui32PollMs; +#endif + IMG_BOOL bIdleReq; + PFN_SYS_DEV_DVFS_SET_FREQUENCY pfnSetFrequency; + PFN_SYS_DEV_DVFS_SET_VOLTAGE pfnSetVoltage; + +#if defined(CONFIG_DEVFREQ_THERMAL) && defined(SUPPORT_LINUX_DVFS) + struct devfreq_cooling_power *psPowerOps; +#endif +} IMG_DVFS_DEVICE_CFG; + +#if defined(SUPPORT_LINUX_DVFS) +typedef struct _IMG_DVFS_GOVERNOR_ +{ + IMG_BOOL bEnabled; +} IMG_DVFS_GOVERNOR; + +typedef struct _IMG_DVFS_GOVERNOR_CFG_ +{ + IMG_UINT32 ui32UpThreshold; + IMG_UINT32 ui32DownDifferential; +} IMG_DVFS_GOVERNOR_CFG; +#endif + +#if defined(__linux__) +#if defined(SUPPORT_LINUX_DVFS) +typedef struct _IMG_DVFS_DEVICE_ +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + struct opp *psOPP; +#else + struct dev_pm_opp *psOPP; +#endif + struct devfreq *psDevFreq; + IMG_BOOL bEnabled; + IMG_HANDLE hGpuUtilUserDVFS; + struct devfreq_simple_ondemand_data data; +#if defined(CONFIG_DEVFREQ_THERMAL) + struct thermal_cooling_device *psDevfreqCoolingDevice; +#endif +} IMG_DVFS_DEVICE; +#endif + +typedef struct _IMG_DVFS_ +{ +#if defined(SUPPORT_LINUX_DVFS) + IMG_DVFS_DEVICE sDVFSDevice; + IMG_DVFS_GOVERNOR sDVFSGovernor; + IMG_DVFS_GOVERNOR_CFG sDVFSGovernorCfg; +#endif + IMG_DVFS_DEVICE_CFG sDVFSDeviceCfg; +} PVRSRV_DVFS; +#endif/* (__linux__) */ + +#endif /* _PVR_DVFS_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvr_notifier.h b/drivers/mcst/gpu-imgtec/services/server/include/pvr_notifier.h new file mode 100644 index 000000000000..91f643eb3b6d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvr_notifier.h @@ -0,0 +1,250 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR notifier interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_NOTIFIER_H__) +#define __PVR_NOTIFIER_H__ + +#include "img_types.h" +#include "pvr_debug.h" + + +/*************************************************************************/ /*! +Command Complete Notifier Interface +*/ /**************************************************************************/ + +typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE; +#ifndef _CMDCOMPNOTIFY_PFN_ +typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); +#define _CMDCOMPNOTIFY_PFN_ +#endif + +/*************************************************************************/ /*! +@Function PVRSRVCmdCompleteInit +@Description Performs initialisation of the command complete notifier + interface. +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVCmdCompleteInit(void); + +/*************************************************************************/ /*! +@Function PVRSRVCmdCompleteDeinit +@Description Performs cleanup for the command complete notifier interface. +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +void +PVRSRVCmdCompleteDeinit(void); + +/*************************************************************************/ /*! +@Function PVRSRVRegisterCmdCompleteNotify +@Description Register a callback function that is called when some device + finishes some work, which is signalled via a call to + PVRSRVCheckStatus. +@Output phNotify On success, points to command complete + notifier handle +@Input pfnCmdCompleteNotify Function callback +@Input hPrivData Data to be passed back to the caller via + the callback function +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, + PVRSRV_CMDCOMP_HANDLE hPrivData); + +/*************************************************************************/ /*! +@Function PVRSRVUnregisterCmdCompleteNotify +@Description Unregister a previously registered callback function. +@Input hNotify Command complete notifier handle +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify); + +/*************************************************************************/ /*! +@Function PVRSRVCheckStatus +@Description Notify any registered command complete handlers that some work + has been finished (unless hCmdCompCallerHandle matches a + handler's hPrivData). Also signal the global event object. +@Input hCmdCompCallerHandle Used to prevent a handler from being + notified. A NULL value results in all + handlers being notified. +*/ /**************************************************************************/ +void +PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); + + +/*************************************************************************/ /*! +Debug Notifier Interface +*/ /**************************************************************************/ + +#define DEBUG_REQUEST_DC 0 +#define DEBUG_REQUEST_SYNCTRACKING 1 +#define DEBUG_REQUEST_SYS 2 +#define DEBUG_REQUEST_ANDROIDSYNC 3 +#define DEBUG_REQUEST_LINUXFENCE 4 +#define DEBUG_REQUEST_SYNCCHECKPOINT 5 +#define DEBUG_REQUEST_HTB 6 +#define DEBUG_REQUEST_APPHINT 7 +#define DEBUG_REQUEST_FALLBACKSYNC 8 + +#define DEBUG_REQUEST_VERBOSITY_LOW 0 +#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 +#define DEBUG_REQUEST_VERBOSITY_HIGH 2 +#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH + +#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) + +/* + * Macro used within debug dump functions to send output either to PVR_LOG or + * a custom function. The custom function should be stored as a function + * pointer in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile' + * is also required as a local variable to serve as a file identifier for the + * printf function if required. + */ +#define PVR_DUMPDEBUG_LOG(...) \ + do \ + { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \ + else \ + PVR_LOG((__VA_ARGS__)); \ + } while (0) + +struct _PVRSRV_DEVICE_NODE_; + +typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE; +#ifndef _DBGNOTIFY_PFNS_ +typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...); +typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +#define _DBGNOTIFY_PFNS_ +#endif + +/*************************************************************************/ /*! +@Function PVRSRVRegisterDbgTable +@Description Registers a debug requester table for the given device. The + order in which the debug requester IDs appear in the given + table determine the order in which a set of notifier callbacks + will be called. In other words, the requester ID that appears + first will have all of its associated debug notifier callbacks + called first. This will then be followed by all the callbacks + associated with the next requester ID in the table and so on. +@Input psDevNode Device node to register requester table with +@Input paui32Table Array of requester IDs +@Input ui32Length Number of elements in paui32Table +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRegisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + const IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length); + +/*************************************************************************/ /*! +@Function PVRSRVUnregisterDbgTable +@Description Unregisters a debug requester table. +@Input psDevNode Device node for which the requester table should + be unregistered +@Return void +*/ /**************************************************************************/ +void +PVRSRVUnregisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + +/*************************************************************************/ /*! +@Function PVRSRVRegisterDbgRequestNotify +@Description Register a callback function that is called when a debug request + is made via a call PVRSRVDebugRequest. There are a number of + verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to + DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once + for each level up to the highest level specified to + PVRSRVDebugRequest. +@Output phNotify Points to debug notifier handle on success +@Input psDevNode Device node for which the debug callback + should be registered +@Input pfnDbgRequestNotify Function callback +@Input ui32RequesterID Requester ID. This is used to determine + the order in which callbacks are called +@Input hDbgReqeustHandle Data to be passed back to the caller via + the callback function +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + IMG_UINT32 ui32RequesterID, + PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle); + +/*************************************************************************/ /*! +@Function PVRSRVUnregisterDbgRequestNotify +@Description Unregister a previously registered callback function. +@Input hNotify Debug notifier handle. +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify); + +/*************************************************************************/ /*! +@Function PVRSRVDebugRequest +@Description Notify any registered debug request handlers that a debug + request has been made and at what level. +@Input psDevNode Device node for which the debug request + has been made +@Input ui32VerbLevel The maximum verbosity level to dump +@Input pfnDumpDebugPrintf Used to specify the print function that + should be used to dump any debug + information. If this argument is NULL then + PVR_LOG() will be used as the default + print function. +@Input pvDumpDebugFile Optional file identifier to be passed to + the print function if required. +@Return void +*/ /**************************************************************************/ +void +PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +#endif /* !defined(__PVR_NOTIFIER_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv.h b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv.h new file mode 100644 index 000000000000..c0aab4b68725 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv.h @@ -0,0 +1,497 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR services server header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_H +#define PVRSRV_H + +#include "connection_server.h" +#include "pvrsrv_pool.h" +#include "device.h" +#include "power.h" +#include "syscommon.h" +#include "sysinfo.h" +#include "physheap.h" +#include "cache_ops.h" +#include "pvr_notifier.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +#include "dma_support.h" +#include "vz_vmm_pvz.h" + +/*! + * For OSThreadDestroy(), which may require a retry + * Try for 100 ms to destroy an OS thread before failing + */ +#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL +#define OS_THREAD_DESTROY_RETRY_COUNT 10 + +typedef enum _FIRMWARE_ALLOC_TYPE_ +{ + FW_ALLOC_NO_FW_ACCESS = PVRSRV_FW_ALLOC_TYPE(PVRSRV_MEMALLOCFLAG_FW_ALLOC_NO_FW_ACCESS), + FW_ALLOC_MAIN = PVRSRV_FW_ALLOC_TYPE(PVRSRV_MEMALLOCFLAG_FW_ALLOC_MAIN), + FW_ALLOC_CONFIG = PVRSRV_FW_ALLOC_TYPE(PVRSRV_MEMALLOCFLAG_FW_ALLOC_CONFIG), + FW_ALLOC_RAW = PVRSRV_FW_ALLOC_TYPE(PVRSRV_MEMALLOCFLAG_FW_ALLOC_RAW) +} FIRMWARE_ALLOC_TYPE; + +typedef enum _POLL_FLAGS_ +{ + POLL_FLAG_NONE = 0, /* No message or dump is printed on poll timeout */ + POLL_FLAG_LOG_ERROR = 1, /* Log error on poll timeout */ + POLL_FLAG_DEBUG_DUMP = 2 /* Print debug dump on poll timeout */ +} POLL_FLAGS; + +typedef struct _BUILD_INFO_ +{ + IMG_UINT32 ui32BuildOptions; + IMG_UINT32 ui32BuildVersion; + IMG_UINT32 ui32BuildRevision; + IMG_UINT32 ui32BuildType; +#define BUILD_TYPE_DEBUG 0 +#define BUILD_TYPE_RELEASE 1 + /* The above fields are self explanatory */ + /* B.V.N.C can be added later if required */ +} BUILD_INFO; + +typedef struct _DRIVER_INFO_ +{ + BUILD_INFO sUMBuildInfo; + BUILD_INFO sKMBuildInfo; + IMG_UINT8 ui8UMSupportedArch; + IMG_UINT8 ui8KMBitArch; + +#define BUILD_ARCH_64BIT (1 << 0) +#define BUILD_ARCH_32BIT (1 << 1) +#define BUILD_ARCH_BOTH (BUILD_ARCH_32BIT | BUILD_ARCH_64BIT) + IMG_BOOL bIsNoMatch; +}DRIVER_INFO; + +typedef struct PVRSRV_DATA_TAG +{ + PVRSRV_DRIVER_MODE eDriverMode; /*!< Driver mode (i.e. native, host or guest) */ + IMG_BOOL bForceApphintDriverMode; /*!< Indicate if driver mode is forced via apphint */ + DRIVER_INFO sDriverInfo; + IMG_UINT32 ui32RegisteredDevices; + PVRSRV_DEVICE_NODE *psDeviceNodeList; /*!< List head of device nodes */ + PVRSRV_DEVICE_NODE *psHostMemDeviceNode; /*!< DeviceNode to be used for device independent + host based memory allocations where the DevMem + framework is to be used e.g. TL */ + PVRSRV_SERVICES_STATE eServicesState; /*!< global driver state */ + + HASH_TABLE *psProcessHandleBase_Table; /*!< Hash table with process handle bases */ + POS_LOCK hProcessHandleBase_Lock; /*!< Lock for the process handle base table */ + PVRSRV_HANDLE_BASE *psProcessHandleBaseBeingFreed; /*!< Pointer to process handle base currently being freed */ + + IMG_HANDLE hGlobalEventObject; /*!< OS Global Event Object */ + IMG_UINT32 ui32GEOConsecutiveTimeouts; /*!< OS Global Event Object Timeouts */ + + IMG_HANDLE hCleanupThread; /*!< Cleanup thread */ + IMG_HANDLE hCleanupEventObject; /*!< Event object to drive cleanup thread */ + POS_SPINLOCK hCleanupThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */ + DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread */ + IMG_PID cleanupThreadPid; /*!< Cleanup thread process id */ + ATOMIC_T i32NumCleanupItems; /*!< Number of items in cleanup thread work list */ + + IMG_HANDLE hDevicesWatchdogThread; /*!< Devices watchdog thread */ + IMG_HANDLE hDevicesWatchdogEvObj; /*! Event object to drive devices watchdog thread */ + volatile IMG_UINT32 ui32DevicesWatchdogPwrTrans; /*! Number of off -> on power state transitions */ +#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + volatile IMG_UINT32 ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */ +#endif +#ifdef PVR_TESTING_UTILS + volatile IMG_UINT32 ui32DevicesWdWakeupCounter; /* Need this for the unit tests. */ +#endif + + POS_LOCK hHWPerfHostPeriodicThread_Lock; /*!< Lock for the HWPerf Host periodic thread */ + IMG_HANDLE hHWPerfHostPeriodicThread; /*!< HWPerf Host periodic thread */ + IMG_HANDLE hHWPerfHostPeriodicEvObj; /*! Event object to drive HWPerf thread */ + volatile IMG_BOOL bHWPerfHostThreadStop; + IMG_UINT32 ui32HWPerfHostThreadTimeout; + + IMG_HANDLE hPvzConnection; /*!< PVZ connection used for cross-VM hyper-calls */ + POS_LOCK hPvzConnectionLock; /*!< Lock protecting PVZ connection */ + IMG_BOOL abVmOnline[RGX_NUM_OS_SUPPORTED]; + + IMG_BOOL bUnload; /*!< Driver unload is in progress */ + + IMG_HANDLE hTLCtrlStream; /*! Control plane for TL streams */ + + IMG_HANDLE hDriverThreadEventObject; /*! Event object relating to multi-threading in the Server */ + IMG_BOOL bDriverSuspended; /*! if TRUE, the driver is suspended and new threads should not enter */ + ATOMIC_T iNumActiveDriverThreads; /*! Number of threads active in the Server */ + + PMR *psInfoPagePMR; /*! Handle to exportable PMR of the information page. */ + IMG_UINT32 *pui32InfoPage; /*! CPU memory mapping for information page. */ + DEVMEM_MEMDESC *psInfoPageMemDesc; /*! Memory descriptor of the information page. */ + POS_LOCK hInfoPageLock; /*! Lock guarding access to information page. */ +} PVRSRV_DATA; + + +/*! +****************************************************************************** + @Function PVRSRVGetPVRSRVData + + @Description Get a pointer to the global data + + @Return PVRSRV_DATA * +******************************************************************************/ +PVRSRV_DATA *PVRSRVGetPVRSRVData(void); + + +#define PVRSRV_VZ_MODE_IS(_expr) (DRIVER_MODE_##_expr == PVRSRVGetPVRSRVData()->eDriverMode) +#define PVRSRV_VZ_RETN_IF_MODE(_expr) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) +#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) +#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) +#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) + +/*! +****************************************************************************** +@Note The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE) + can be an override or non-override 32-bit value. An override value + has the MSB bit set & a non-override value has this MSB bit cleared. + Excluding this MSB bit & interpreting the remaining 31-bit as a + signed 31-bit integer, the mode values are: + [-1 native : 0 host : +1 guest ]. +******************************************************************************/ +#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr) ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31)) +#define PVRSRV_VZ_APPHINT_MODE(_expr) \ + ((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \ + !((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \ + ((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \ + ((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF)) + +/*! +****************************************************************************** + + @Function LMA memory management API + +******************************************************************************/ +#if defined(SUPPORT_GPUVIRT_VALIDATION) +PVRSRV_ERROR LMA_PhyContigPagesAllocGPV(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32OSid, IMG_PID uiPid); +#endif +PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid); + +void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle); + +PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr); + +void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + void *pvPtr); + +PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength); + +IMG_BOOL IsPhysmemNewRamBackedByLMA(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx); + +/*! +****************************************************************************** + @Function PVRSRVPollForValueKM + + @Description + Polls for a value to match a masked read + + @Input psDevNode : Pointer to device node struct + @Input pui32LinMemAddr : CPU linear address to poll + @Input ui32Value : required value + @Input ui32Mask : Mask + @Input bDebugDumpOnFailure : Whether poll failure should result into a debug + dump. CAUTION: When calling this function from code paths which are + also used by debug-dumping code, this argument MUST be IMG_FALSE + otherwise, we might end up requesting debug-dump in recursion and + eventually blow-up call stack. + + @Return PVRSRV_ERROR : +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + POLL_FLAGS ePollFlags); + +/*! +****************************************************************************** + @Function PVRSRVWaitForValueKM + + @Description + Waits (using EventObjects) for a value to match a masked read + + @Input pui32LinMemAddr : CPU linear address to poll + @Input ui32Value : Required value + @Input ui32Mask : Mask to be applied before checking against + ui32Value + @Return PVRSRV_ERROR : +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV +PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask); + +/*! +****************************************************************************** + @Function : PVRSRVSystemHasCacheSnooping + + @Description : Returns whether the system has cache snooping + + @Return : IMG_TRUE if the system has cache snooping +******************************************************************************/ +IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemSnoopingIsEmulated + + @Description : Returns whether system cache snooping support is emulated + + @Return : IMG_TRUE if the system cache snooping is emulated in software +******************************************************************************/ +IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemSnoopingOfCPUCache + + @Description : Returns whether the system supports snooping of the CPU cache + + @Return : IMG_TRUE if the system has CPU cache snooping +******************************************************************************/ +IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemSnoopingOfDeviceCache + + @Description : Returns whether the system supports snooping of the device cache + + @Return : IMG_TRUE if the system has device cache snooping +******************************************************************************/ +IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemHasNonMappableLocalMemory + + @Description : Returns whether the device has non-mappable part of local memory + + @Return : IMG_TRUE if the device has non-mappable part of local memory +******************************************************************************/ +IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemWaitCycles + + @Description : Waits for at least ui32Cycles of the Device clk. +******************************************************************************/ +void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles); + +PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData); + +PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData); + +int PVRSRVGetDriverStatus(void); + +/*! +****************************************************************************** + @Function : PVRSRVIsBridgeEnabled + + @Description : Returns whether the given bridge group is enabled + + @Return : IMG_TRUE if the given bridge group is enabled +******************************************************************************/ +static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup) +{ + IMG_UINT32 ui32Bridges; + IMG_UINT32 ui32Offset; + + PVR_UNREFERENCED_PARAMETER(hServices); + +#if defined(SUPPORT_RGX) + if (ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST) + { + ui32Bridges = gui32RGXBridges; + ui32Offset = PVRSRV_BRIDGE_RGX_FIRST; + } + else +#endif /* SUPPORT_RGX */ + { + ui32Bridges = gui32PVRBridges; + ui32Offset = PVRSRV_BRIDGE_FIRST; + } + + return ((1U << (ui32BridgeGroup - ui32Offset)) & ui32Bridges) != 0; +} + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/*! +****************************************************************************** + @Function : PopulateLMASubArenas + + @Description : Uses the Apphints passed by the client at + initialization time to add bases and sizes in the + various arenas in the LMA memory + + @Input psDeviceNode : Pointer to the device node struct containing all the + arena information + + @Input aui32OSidMin : Single dimensional array containing the minimum + values for each OSid area + + @Input aui32OSidMax : Single dimensional array containing the maximum + values for each OSid area +******************************************************************************/ + +void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); + +#if defined(EMULATOR) + void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); + void SetTrustedDeviceAceEnabled(void); +#endif + +#endif + +/*! +****************************************************************************** + @Function : PVRSRVCreateRegionRA + + @Description : Create a Resource Arena and initialises it with a given + memory range. + + @Input psDevConfig : Pointer to the Device configuration structure + + @Output ppsRegionRA: Pointer address of the region RA to be created. + + @Output pszRAName : Pointer to RA name + + @Input ui64CpuBase : CPU Physical Base Address of the RA + + @Input ui64DevBase : Device Physical Base Address of the RA + + @Input ui64Size : Size of the RA to be created + + @Input ui32RegionId: Index of the region being initialised + + @Input pszLabel : String briefly describing the RA's purpose + + @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +PVRSRV_ERROR PVRSRVCreateRegionRA(PVRSRV_DEVICE_CONFIG *psDevConfig, + RA_ARENA **ppsRegionRA, + IMG_CHAR *pszRAName, + IMG_UINT64 ui64CpuBase, + IMG_UINT64 ui64DevBase, + IMG_UINT64 ui64Size, + IMG_UINT32 ui32RegionId, + IMG_CHAR *pszLabel); + +/*! +****************************************************************************** + @Function : PVRSRVCreateHWPerfHostThread + + @Description : Creates HWPerf event object and thread unless already created + + @Input ui32Timeout : Initial timeout (ms) between updates on the HWPerf thread + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout); + +/*! +****************************************************************************** + @Function : PVRSRVDestroyHWPerfHostThread + + @Description : Destroys HWPerf event object and thread if created + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void); + +/*! +****************************************************************************** + @Function : PVRSRVPhysMemHeapsInit + + @Description : Registers and acquires physical memory heaps + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVPhysMemHeapsDeinit + + @Description : Releases and unregisters physical memory heaps + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* PVRSRV_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_apphint.h b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_apphint.h new file mode 100644 index 000000000000..f43cb7984a60 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_apphint.h @@ -0,0 +1,71 @@ +/**************************************************************************/ /*! +@File +@Title PowerVR AppHint generic interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(PVRSRV_APPHINT_H) +#define PVRSRV_APPHINT_H + +/* Supplied to PVRSRVAppHintRegisterHandlers*() functions when the apphint + * is a global driver apphint, i.e. apphints not present in + * APPHINT_DEBUGFS_DEVICE_ID, i.e. not per device. + */ +#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U) + +#if defined(LINUX) + +#include "km_apphint.h" +#define PVRSRVAppHintDumpState() pvr_apphint_dump_state() +#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p) + +#else + +#define PVRSRVAppHintDumpState() +#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) + +#endif + +#endif /* PVRSRV_APPHINT_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_bridge_init.h b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_bridge_init.h new file mode 100644 index 000000000000..b99d474cf253 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_bridge_init.h @@ -0,0 +1,57 @@ +/**************************************************************************/ /*! +@File +@Title PVR Common Bridge Init/Deinit Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the common PVR Bridge init/deinit code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef _PVRSRV_BRIDGE_INIT_H_ +#define _PVRSRV_BRIDGE_INIT_H_ + +#include "img_types.h" +#include "pvrsrv_error.h" + +PVRSRV_ERROR ServerBridgeInit(void); +PVRSRV_ERROR DeviceDepBridgeInit(IMG_UINT64 ui64Features); + +PVRSRV_ERROR ServerBridgeDeInit(void); +PVRSRV_ERROR DeviceDepBridgeDeInit(IMG_UINT64 ui64Features); + + +#endif /* _PVRSRV_BRIDGE_INIT_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_cleanup.h b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_cleanup.h new file mode 100644 index 000000000000..d476b8feec8a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_cleanup.h @@ -0,0 +1,159 @@ +/**************************************************************************/ /*! +@File +@Title PowerVR SrvKM cleanup thread deferred work interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef _PVRSRV_CLEANUP_H +#define _PVRSRV_CLEANUP_H + +#include "dllist.h" + +/**************************************************************************/ /*! +@Brief CLEANUP_THREAD_FN + +@Description This is the function prototype for the pfnFree member found in + the structure PVRSRV_CLEANUP_THREAD_WORK. The function is + responsible for carrying out the clean up work and if successful + freeing the memory originally supplied to the call + PVRSRVCleanupThreadAddWork(). + +@Input pvParam This is private data originally supplied by the caller + to PVRSRVCleanupThreadAddWork() when registering the + clean up work item, psDAta->pvData. Itr can be cast + to a relevant type within the using module. + +@Return PVRSRV_OK if the cleanup operation was successful and the + callback has freed the PVRSRV_CLEANUP_THREAD_WORK* work item + memory original supplied to PVRSRVCleanupThreadAddWork() + Any other error code will lead to the work item + being re-queued and hence the original + PVRSRV_CLEANUP_THREAD_WORK* must not be freed. +*/ /***************************************************************************/ + +typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam); + + +/* Typical number of times a caller should want the work to be retried in case + * of the callback function (pfnFree) returning an error. + * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry + * count (ui32RetryCount) unless there are special requirements. + * A value of 200 corresponds to around ~20s (200 * 100ms). If it is not + * successful by then give up as an unrecoverable problem has occurred. + */ +#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 200u +/* Like for CLEANUP_THREAD_RETRY_COUNT_DEFAULT but call will wait for + * a specified amount of time rather than number of retries. + */ +#define CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT 20000u /* 20s */ + +/* Use to set retry count on a cleanup item. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + * _count - retry count + */ +#define CLEANUP_THREAD_SET_RETRY_COUNT(_item,_count) \ + do { \ + (_item)->ui32RetryCount = (_count); \ + (_item)->ui32TimeStart = 0; \ + (_item)->ui32TimeEnd = 0; \ + } while (0) + +/* Use to set timeout deadline on a cleanup item. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + * _timeout - timeout in milliseconds, if 0 + * CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT is used + */ +#define CLEANUP_THREAD_SET_RETRY_TIMEOUT(_item,_timeout) \ + do { \ + (_item)->ui32RetryCount = 0; \ + (_item)->ui32TimeStart = OSClockms(); \ + (_item)->ui32TimeEnd = (_item)->ui32TimeStart + ((_timeout) > 0 ? \ + (_timeout) : CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); \ + } while (0) + +/* Indicates if the timeout on a given item has been reached. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + */ +#define CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(_item) \ + ((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \ + OSClockms() - (_item)->ui32TimeStart) + +/* Indicates if the current item is waiting on timeout or retry count. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + * */ +#define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \ + ((_item)->ui32TimeStart != (_item->ui32TimeEnd)) + +/* Clean up work item specifics so that the task can be managed by the + * pvr_defer_free cleanup thread in the Server. + */ +typedef struct _PVRSRV_CLEANUP_THREAD_WORK_ +{ + DLLIST_NODE sNode; /*!< List node used internally by the cleanup + thread */ + CLEANUP_THREAD_FN pfnFree; /*!< Pointer to the function to be called to + carry out the deferred cleanup */ + void *pvData; /*!< private data for pfnFree, usually a way back + to the original PVRSRV_CLEANUP_THREAD_WORK* + pointer supplied in the call to + PVRSRVCleanupThreadAddWork(). */ + IMG_UINT32 ui32TimeStart; /*!< Timestamp in ms of the moment when + cleanup item has been created. */ + IMG_UINT32 ui32TimeEnd; /*!< Time in ms after which no further retry + attempts will be made, item discard and + error logged when this is reached. */ + IMG_UINT32 ui32RetryCount; /*!< Number of times the callback should be + re-tried when it returns error. */ + IMG_BOOL bDependsOnHW; /*!< Retry again after the RGX interrupt signals + the global event object */ +} PVRSRV_CLEANUP_THREAD_WORK; + + +/**************************************************************************/ /*! +@Function PVRSRVCleanupThreadAddWork + +@Description Add a work item to be called from the cleanup thread + +@Input psData : The function pointer and private data for the callback + +@Return None +*/ /***************************************************************************/ +void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData); + +#endif /* _PVRSRV_CLEANUP_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_device.h b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_device.h new file mode 100644 index 000000000000..a1750cc0a645 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_device.h @@ -0,0 +1,364 @@ +/**************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef __PVRSRV_DEVICE_H__ +#define __PVRSRV_DEVICE_H__ + +#include "img_types.h" +#include "physheap.h" +#include "pvrsrv_error.h" +#include "rgx_fwif_km.h" +#include "servicesext.h" +#include "cache_ops.h" + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) +#include "pvr_dvfs.h" +#endif + +typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG; +typedef enum _DRIVER_MODE_ +{ +/* Do not use these enumerations directly, to query the + current driver mode, use the PVRSRV_VZ_MODE_IS() + macro */ + DRIVER_MODE_NATIVE = -1, + DRIVER_MODE_HOST = 0, + DRIVER_MODE_GUEST +} PVRSRV_DRIVER_MODE; + +/* + * This type defines location-oriented physical heap IDs which are used to + * help map to actual physical heaps (often far fewer) defined in the system + * layer. See PVRSRV_DEVICE_CONFIG.aui32PhysHeapID[]. + * These IDs are replicated in the Device Memory allocation flags to allow + * allocations to be made in terms of their locality to ensure the correct + * physical heap is accessed for the given system/platform configuration. + * + * EXTERNAL - This is used by some PMR import/export factories where the + * physical memory heap is not managed by the pvrsrv driver. + */ +typedef enum +{ + PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL = 0, + PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL = 1, + PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL = 2, + PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST = 3, + PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL = 4, + PVRSRV_DEVICE_PHYS_HEAP_LAST +} PVRSRV_DEVICE_PHYS_HEAP; + +typedef enum +{ + PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0, + PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1, + PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST +} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA; + +typedef enum _PVRSRV_DEVICE_SNOOP_MODE_ +{ + PVRSRV_DEVICE_SNOOP_NONE = 0, + PVRSRV_DEVICE_SNOOP_CPU_ONLY, + PVRSRV_DEVICE_SNOOP_DEVICE_ONLY, + PVRSRV_DEVICE_SNOOP_CROSS, + PVRSRV_DEVICE_SNOOP_EMULATED, +} PVRSRV_DEVICE_SNOOP_MODE; + +#if defined(SUPPORT_SOC_TIMER) +typedef IMG_UINT64 +(*PFN_SYS_DEV_SOC_TIMER_READ)(IMG_HANDLE hSysData); +#endif + +typedef enum _PVRSRV_DEVICE_FABRIC_TYPE_ +{ + PVRSRV_DEVICE_FABRIC_NONE = 0, + PVRSRV_DEVICE_FABRIC_ACELITE, + PVRSRV_DEVICE_FABRIC_FULLACE, +} PVRSRV_DEVICE_FABRIC_TYPE; + +typedef IMG_UINT32 +(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData); + +typedef PVRSRV_ERROR +(*PFN_SYS_DEV_PRE_POWER)(IMG_HANDLE hSysData, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +typedef PVRSRV_ERROR +(*PFN_SYS_DEV_POST_POWER)(IMG_HANDLE hSysData, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +typedef void +(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig); + +typedef PVRSRV_ERROR +(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData, + IMG_UINT64 ui64MemSize); + +typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64); + +typedef void +(*PFN_SYS_DEV_HOST_CACHE_MAINTENANCE)(IMG_HANDLE hSysData, + PVRSRV_CACHE_OP eRequestType, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +typedef enum _PVRSRV_TD_FW_MEM_REGION_ +{ + PVRSRV_DEVICE_FW_CODE_REGION = 0, + PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION = 1, + PVRSRV_DEVICE_FW_COREMEM_CODE_REGION = 2, + PVRSRV_DEVICE_FW_COREMEM_DATA_REGION = 3 +} PVRSRV_TD_FW_MEM_REGION; + +#if defined(SUPPORT_TRUSTED_DEVICE) + +#define TD_MAX_NUM_MIPS_PAGETABLE_PAGES (4U) + +typedef struct _PVRSRV_TD_FW_PARAMS_ +{ + const void *pvFirmware; + IMG_UINT32 ui32FirmwareSize; + + union + { + struct + { + /* META-only parameters */ + IMG_DEV_VIRTADDR sFWCodeDevVAddr; + IMG_DEV_VIRTADDR sFWDataDevVAddr; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_UINT32 ui32NumThreads; + } sMeta; + + struct + { + /* MIPS-only parameters */ + IMG_DEV_PHYADDR sGPURegAddr; + IMG_DEV_PHYADDR asFWPageTableAddr[TD_MAX_NUM_MIPS_PAGETABLE_PAGES]; + IMG_DEV_PHYADDR sFWStackAddr; + IMG_UINT32 ui32FWPageTableLog2PageSize; + IMG_UINT32 ui32FWPageTableNumPages; + } sMips; + } uFWP; +} PVRSRV_TD_FW_PARAMS; + +typedef PVRSRV_ERROR +(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData, + PVRSRV_TD_FW_PARAMS *psTDFWParams); + +typedef struct _PVRSRV_TD_POWER_PARAMS_ +{ + IMG_DEV_PHYADDR sPCAddr; + + /* MIPS-only fields */ + IMG_DEV_PHYADDR sGPURegAddr; + IMG_DEV_PHYADDR sBootRemapAddr; + IMG_DEV_PHYADDR sCodeRemapAddr; + IMG_DEV_PHYADDR sDataRemapAddr; +} PVRSRV_TD_POWER_PARAMS; + +typedef PVRSRV_ERROR +(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData, + PVRSRV_TD_POWER_PARAMS *psTDPowerParams); + +typedef PVRSRV_ERROR +(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData); + +typedef PVRSRV_ERROR +(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData); + +#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_UINT32[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT32[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +struct _PVRSRV_DEVICE_CONFIG_ +{ + /*! OS device passed to SysDevInit (linux: 'struct device') */ + void *pvOSDevice; + + /*! + *! Service representation of pvOSDevice. Should be set to NULL when the + *! config is created in SysDevInit. Set by Services once a device node has + *! been created for this config and unset before SysDevDeInit is called. + */ + struct _PVRSRV_DEVICE_NODE_ *psDevNode; + + /*! Name of the device */ + IMG_CHAR *pszName; + + /*! Version of the device (optional) */ + IMG_CHAR *pszVersion; + + /*! Register bank address */ + IMG_CPU_PHYADDR sRegsCpuPBase; + /*! Register bank size */ + IMG_UINT32 ui32RegsSize; + /*! Device interrupt number */ + IMG_UINT32 ui32IRQ; + + PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode; + + /*! Device specific data handle */ + IMG_HANDLE hDevData; + + /*! System specific data that gets passed into system callback functions. */ + IMG_HANDLE hSysData; + + IMG_BOOL bHasNonMappableLocalMemory; + + /*! Indicates if system supports FBCDC v3.1 */ + IMG_BOOL bHasFBCDCVersion31; + + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 ui32PhysHeapCount; + + /*! + *! ID of the Physical memory heap to use. + *! + *! The first entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) + *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL + *! flag is not set. Normally this will be the PhysHeapID of an LMA heap + *! but the configuration could specify a UMA heap here (if desired). + *! + *! The second entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) + *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL + *! flag is set. Normally this will be the PhysHeapID of a UMA heap but + *! the configuration could specify an LMA heap here (if desired). + *! + *! The third entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) + *! will be used for allocations where the PVRSRV_FW_ALLOC_TYPE is + *! FW_ALLOC_MAIN, FW_ALLOC_CONFIG or FW_ALLOC_RAW. + *! + *! The fourth entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL]) + *! will be used for allocations that are imported into the driver and + *! are local to other devices, e.g. a display controller. + *! + *! In the event of there being only one Physical Heap, the configuration + *! should specify the same heap details in all entries. + */ + IMG_UINT32 aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_LAST]; + + /*! + *! Callbacks to change system device power state at the beginning and end + *! of a power state change (optional). + */ + PFN_SYS_DEV_PRE_POWER pfnPrePowerState; + PFN_SYS_DEV_POST_POWER pfnPostPowerState; + + /*! Callback to obtain the clock frequency from the device (optional). */ + PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet; + +#if defined(SUPPORT_SOC_TIMER) + /*! Callback to read SoC timer register value (mandatory). */ + PFN_SYS_DEV_SOC_TIMER_READ pfnSoCTimerRead; +#endif + + /*! + *! Callback to handle memory budgeting. Can be used to reject allocations + *! over a certain size (optional). + */ + PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize; + + /*! + *! Callback to perform host CPU cache maintenance. Might be needed for + *! architectures which allow extensions such as RISC-V (optional). + */ + PFN_SYS_DEV_HOST_CACHE_MAINTENANCE pfnHostCacheMaintenance; + IMG_BOOL bHasPhysicalCacheMaintenance; + +#if defined(SUPPORT_TRUSTED_DEVICE) + /*! + *! Callback to send FW image and FW boot time parameters to the trusted + *! device. + */ + PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage; + + /*! + *! Callback to send parameters needed in a power transition to the trusted + *! device. + */ + PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams; + + /*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */ + PFN_TD_RGXSTART pfnTDRGXStart; + PFN_TD_RGXSTOP pfnTDRGXStop; +#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ + + /*! Function that does device feature specific system layer initialisation */ + PFN_SYS_DEV_FEAT_DEP_INIT pfnSysDevFeatureDepInit; + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + PVRSRV_DVFS sDVFS; +#endif + +#if defined(SUPPORT_ALT_REGBASE) + IMG_DEV_PHYADDR sAltRegsGpuPBase; +#endif + + /*! + *! Indicates if device physical address 0x0 might be used as GPU memory + *! (e.g. LMA system or UMA system with CPU PA 0x0 reserved by the OS, + *! but CPU PA != device PA and device PA 0x0 available for the GPU) + */ + IMG_BOOL bDevicePA0IsValid; + + /*! + *! Function to initialize System-specific virtualization. If not supported + *! this should be a NULL reference. Only present if + *! SUPPORT_GPUVIRT_VALIDATION is defined. + */ +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PFN_SYS_DEV_VIRT_INIT pfnSysDevVirtInit; +#endif +}; + +#endif /* __PVRSRV_DEVICE_H__*/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_pool.h b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_pool.h new file mode 100644 index 000000000000..71a204fe4e51 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_pool.h @@ -0,0 +1,135 @@ +/**************************************************************************/ /*! +@File +@Title Services pool implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides a generic pool implementation. + The pool allows to dynamically retrieve and return entries from + it using functions pair PVRSRVPoolGet/PVRSRVPoolPut. The entries + are created in lazy manner which means not until first usage. + The pool API allows to pass and allocation/free functions + pair that will allocate entry's private data and return it + to the caller on every entry 'Get'. + The pool will keep up to ui32MaxEntries entries allocated. + Every entry that exceeds this number and is 'Put' back to the + pool will be freed on the spot instead being returned to the + pool. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(__PVRSRVPOOL_H__) +#define __PVRSRVPOOL_H__ + +/**************************************************************************/ /*! + @Description Callback function called during creation of the new element. This + function allocates an object that will be stored in the pool. + The object can be retrieved from the pool by calling + PVRSRVPoolGet. + @Input pvPrivData Private data passed to the alloc function. + @Output pvOut Allocated object. + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut); + +/**************************************************************************/ /*! + @Description Callback function called to free the object allocated by + the counterpart alloc function. + @Input pvPrivData Private data passed to the free function. + @Output pvFreeData Object allocated by PVRSRV_POOL_ALLOC_FUNC. +*/ /***************************************************************************/ +typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData); + +typedef IMG_HANDLE PVRSRV_POOL_TOKEN; + +typedef struct _PVRSRV_POOL_ PVRSRV_POOL; + +/**************************************************************************/ /*! + @Function PVRSRVPoolCreate + @Description Creates new buffer pool. + @Input pfnAlloc Allocation function pointer. Function is used + to allocate new pool entries' data. + @Input pfnFree Free function pointer. Function is used to + free memory allocated by pfnAlloc function. + @Input ui32MaxEntries Total maximum number of entries in the pool. + @Input pszName Name of the pool. String has to be NULL + terminated. + @Input pvPrivData Private data that will be passed to pfnAlloc and + pfnFree functions. + @Output ppsPool New buffer pool object. + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, + PVRSRV_POOL_FREE_FUNC *pfnFree, + IMG_UINT32 ui32MaxEntries, + const IMG_CHAR *pszName, + void *pvPrivData, + PVRSRV_POOL **ppsPool); + +/**************************************************************************/ /*! + @Function PVRSRVPoolDestroy + @Description Destroys pool created by PVRSRVPoolCreate. + @Input psPool Buffer pool object meant to be destroyed. +*/ /***************************************************************************/ +void PVRSRVPoolDestroy(PVRSRV_POOL *psPool); + +/**************************************************************************/ /*! + @Function PVRSRVPoolGet + @Description Retrieves an entry form a pool. If no free elements are + available new entry will be allocated. + @Input psPool Pointer to the pool. + @Output hToken Pointer to the entry handle. + @Output ppvDataOut Pointer to data stored in the entry (the data + allocated by the pfnAlloc function). + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, + PVRSRV_POOL_TOKEN *hToken, + void **ppvDataOut); + +/**************************************************************************/ /*! + @Function PVRSRVPoolPut + @Description Returns entry to the pool. If number of entries is greater + than ui32MaxEntries set during pool creation the entry will + be freed instead. + @Input psPool Pointer to the pool. + @Input hToken Entry handle. + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, + PVRSRV_POOL_TOKEN hToken); + +#endif /* __PVRSRVPOOL_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_sync_server.h b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_sync_server.h new file mode 100644 index 000000000000..f2fb88906363 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/pvrsrv_sync_server.h @@ -0,0 +1,277 @@ +/**************************************************************************/ /*! +@File +@Title Fence sync server interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef _PVRSRV_SYNC_SERVER_H_ +#define _PVRSRV_SYNC_SERVER_H_ + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) +#include "sync_fallback_server.h" +#include "pvr_notifier.h" +#include "img_types.h" +#include "pvrsrv_sync_km.h" +#elif defined(SUPPORT_NATIVE_FENCE_SYNC) +#include "pvr_sync.h" +#endif + +#include "rgxhwperf.h" + +#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH +#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH + +typedef struct _SYNC_TIMELINE_OBJ_ +{ + void *pvTlObj; /* Implementation specific timeline object */ + + PVRSRV_TIMELINE hTimeline; /* Reference to implementation-independent timeline object */ +} SYNC_TIMELINE_OBJ; + +typedef struct _SYNC_FENCE_OBJ_ +{ + void *pvFenceObj; /* Implementation specific fence object */ + + PVRSRV_FENCE hFence; /* Reference to implementation-independent fence object */ +} SYNC_FENCE_OBJ; + +static inline void SyncClearTimelineObj(SYNC_TIMELINE_OBJ *psSTO) +{ + psSTO->pvTlObj = NULL; + psSTO->hTimeline = PVRSRV_NO_TIMELINE; +} + +static inline IMG_BOOL SyncIsTimelineObjValid(const SYNC_TIMELINE_OBJ *psSTO) +{ + return psSTO->pvTlObj != NULL; +} + +static inline void SyncClearFenceObj(SYNC_FENCE_OBJ *psSFO) +{ + psSFO->pvFenceObj = NULL; + psSFO->hFence = PVRSRV_NO_FENCE; +} + +static inline IMG_BOOL SyncIsFenceObjValid(const SYNC_FENCE_OBJ *psSFO) +{ + return psSFO->pvFenceObj != NULL; +} + + +/* Mapping of each required function to its appropriate sync-implementation function */ +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + #define SyncFenceWaitKM_ SyncFbFenceWait + #define SyncGetFenceObj_ SyncFbGetFenceObj + #define SyncFenceReleaseKM_ SyncFbFenceReleaseKM + #define SyncSWTimelineFenceCreateKM_ SyncFbSWTimelineFenceCreateKM + #define SyncSWTimelineAdvanceKM_ SyncFbSWTimelineAdvanceKM + #define SyncSWGetTimelineObj_ SyncFbSWGetTimelineObj + #define SyncSWTimelineReleaseKM_ SyncFbTimelineRelease + #define SyncDumpFence_ SyncFbDumpFenceKM + #define SyncSWDumpTimeline_ SyncFbSWDumpTimelineKM +#elif defined(SUPPORT_NATIVE_FENCE_SYNC) + #define SyncFenceWaitKM_ pvr_sync_fence_wait + #define SyncGetFenceObj_ pvr_sync_fence_get + #define SyncFenceReleaseKM_ pvr_sync_fence_release + #define SyncSWTimelineFenceCreateKM_ pvr_sync_sw_timeline_fence_create + #define SyncSWTimelineAdvanceKM_ pvr_sync_sw_timeline_advance + #define SyncSWGetTimelineObj_ pvr_sync_sw_timeline_get + #define SyncSWTimelineReleaseKM_ pvr_sync_sw_timeline_release + #define SyncDumpFence_ sync_dump_fence + #define SyncSWDumpTimeline_ sync_sw_dump_timeline +#endif + +/*************************************************************************/ /*! +@Function SyncFenceWaitKM + +@Description Wait for all the sync points in the fence to be signalled. + +@Input psFenceObj Fence to wait on + +@Input ui32TimeoutInMs Maximum time to wait (in milliseconds) + +@Return PVRSRV_OK once the fence has been passed (all + containing check points have either + signalled or errored) + PVRSRV_ERROR_TIMEOUT if the poll has exceeded the timeout + PVRSRV_ERROR_FAILED_DEPENDENCIES Other sync-impl specific error +*/ /**************************************************************************/ +static inline PVRSRV_ERROR +SyncFenceWaitKM(PVRSRV_DEVICE_NODE *psDevNode, + const SYNC_FENCE_OBJ *psFenceObj, + IMG_UINT32 ui32TimeoutInMs) +{ + PVRSRV_ERROR eError; + + RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, + BEGIN, + OSGetCurrentProcessID(), + psFenceObj->hFence, + ui32TimeoutInMs); + + eError = SyncFenceWaitKM_(psFenceObj->pvFenceObj, ui32TimeoutInMs); + + RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, + END, + OSGetCurrentProcessID(), + psFenceObj->hFence, + ((eError == PVRSRV_OK) ? + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED : + ((eError == PVRSRV_ERROR_TIMEOUT) ? + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT : + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR))); + return eError; +} + +/*************************************************************************/ /*! +@Function SyncGetFenceObj + +@Description Get the implementation specific server fence object from + opaque implementation independent PVRSRV_FENCE type. + When successful, this function gets a reference on the base + fence, which needs to be dropped using SyncFenceReleaseKM, + when fence object is no longer in use. + +@Input iFence Input opaque fence object + +@Output psFenceObj Pointer to implementation specific fence object + +@Return PVRSRV_ERROR PVRSRV_OK, on success +*/ /**************************************************************************/ +static inline PVRSRV_ERROR +SyncGetFenceObj(PVRSRV_FENCE iFence, + SYNC_FENCE_OBJ *psFenceObj) +{ + psFenceObj->hFence = iFence; + return SyncGetFenceObj_(iFence, &psFenceObj->pvFenceObj); +} + +/*************************************************************************/ /*! +@Function SyncFenceReleaseKM + +@Description Release reference on this fence. + +@Input psFenceObj Fence to be released + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static inline +PVRSRV_ERROR SyncFenceReleaseKM(const SYNC_FENCE_OBJ *psFenceObj) +{ + return SyncFenceReleaseKM_(psFenceObj->pvFenceObj); +} + +/*****************************************************************************/ +/* */ +/* SW TIMELINE SPECIFIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +static inline PVRSRV_ERROR +SyncSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE hSWTimeline, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE *phOutFence) +{ + IMG_UINT64 ui64SyncPtIdx; + PVRSRV_ERROR eError; + eError = SyncSWTimelineFenceCreateKM_(hSWTimeline, + pszFenceName, + phOutFence, + &ui64SyncPtIdx); + if (eError == PVRSRV_OK) + { + RGXSRV_HWPERF_ALLOC_SW_FENCE(psDevNode, OSGetCurrentProcessID(), + *phOutFence, hSWTimeline, ui64SyncPtIdx, + pszFenceName, OSStringLength(pszFenceName)); + } + return eError; +} + +static inline PVRSRV_ERROR +SyncSWTimelineAdvanceKM(PVRSRV_DEVICE_NODE *psDevNode, + const SYNC_TIMELINE_OBJ *psSWTimelineObj) +{ + IMG_UINT64 ui64SyncPtIdx; + PVRSRV_ERROR eError; + eError = SyncSWTimelineAdvanceKM_(psSWTimelineObj->pvTlObj, + &ui64SyncPtIdx); + + if (eError == PVRSRV_OK) + { + RGXSRV_HWPERF_SYNC_SW_TL_ADV(psDevNode->pvDevice, + OSGetCurrentProcessID(), + psSWTimelineObj->hTimeline, + ui64SyncPtIdx); + } + return eError; +} + +static inline PVRSRV_ERROR +SyncSWGetTimelineObj(PVRSRV_TIMELINE hSWTimeline, + SYNC_TIMELINE_OBJ *psSWTimelineObj) +{ + psSWTimelineObj->hTimeline = hSWTimeline; + return SyncSWGetTimelineObj_(hSWTimeline, &psSWTimelineObj->pvTlObj); +} + +static inline PVRSRV_ERROR +SyncSWTimelineReleaseKM(const SYNC_TIMELINE_OBJ *psSWTimelineObj) +{ + return SyncSWTimelineReleaseKM_(psSWTimelineObj->pvTlObj); +} + +static inline PVRSRV_ERROR +SyncDumpFence(const SYNC_FENCE_OBJ *psFenceObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + return SyncDumpFence_(psFenceObj->pvFenceObj, pfnDumpDebugPrintf, pvDumpDebugFile); +} + +static inline PVRSRV_ERROR +SyncSWDumpTimeline(const SYNC_TIMELINE_OBJ *psSWTimelineObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + return SyncSWDumpTimeline_(psSWTimelineObj->pvTlObj, pfnDumpDebugPrintf, pvDumpDebugFile); +} + + +#endif /* _PVRSRV_SYNC_SERVER_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/ri_server.h b/drivers/mcst/gpu-imgtec/services/server/include/ri_server.h new file mode 100644 index 000000000000..f7467f800070 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/ri_server.h @@ -0,0 +1,106 @@ +/*************************************************************************/ /*! +@File ri_server.h +@Title Resource Information abstraction +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Resource Information (RI) functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RI_SERVER_H +#define RI_SERVER_H + +#include "img_defs.h" +#include "ri_typedefs.h" +#include "pmr.h" +#include "pvrsrv_error.h" +#include "physheap.h" + +PVRSRV_ERROR RIInitKM(void); +void RIDeInitKM(void); + +void RILockAcquireKM(void); +void RILockReleaseKM(void); + +PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR); + +PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, + IMG_PID ui32Owner); + +PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT64 uiOffset, + IMG_UINT64 uiSize, + IMG_BOOL bIsImport, + IMG_BOOL bIsSuballoc, + RI_HANDLE *phRIHandle); + +PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize, + const IMG_CHAR *psz8TextB, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64DevVAddr, + RI_HANDLE *phRIHandle); + +PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, + IMG_DEV_VIRTADDR sVAddr); + +PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle); +PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle); + +PVRSRV_ERROR RIDeleteListKM(void); + +PVRSRV_ERROR RIDumpListKM(PMR *psPMR); + +PVRSRV_ERROR RIDumpAllKM(void); + +PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid); + +#if defined(DEBUG) +PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, + IMG_PID pid, + IMG_UINT64 ui64Offset, + IMG_DEV_VIRTADDR *psDevVAddr); +#endif + +IMG_BOOL RIGetListEntryKM(IMG_PID pid, + IMG_HANDLE **ppHandle, + IMG_CHAR **ppszEntryString); + +IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType); + +#endif /* RI_SERVER_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/sofunc_pvr.h b/drivers/mcst/gpu-imgtec/services/server/include/sofunc_pvr.h new file mode 100644 index 000000000000..48bc94d8ef0b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/sofunc_pvr.h @@ -0,0 +1,94 @@ +/*************************************************************************/ /*! +@File +@Title SO Interface header file for common PVR functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Contains SO interface functions. These functions are defined in + the common layer and are called from the env layer OS specific + implementation. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(SOFUNC_PVR_H_) +#define SOFUNC_PVR_H_ + +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "device.h" +#include "pvr_notifier.h" + + +/**************************************************************************/ /*! + @Function SOPvrDbgRequestNotifyRegister + @Description SO Interface function called from the OS layer implementation. + Register a callback function that is called when a debug request + is made via a call PVRSRVDebugRequest. There are a number of + verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to + DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once + for each level up to the highest level specified to + PVRSRVDebugRequest. +@Output phNotify On success, points to debug notifier handle +@Input psDevNode Device node for which the debug callback + should be registered +@Input pfnDbgRequestNotify Function callback +@Input ui32RequesterID Requester ID. This is used to determine + the order in which callbacks are called, + see DEBUG_REQUEST_* +@Input hDbgReqeustHandle Data to be passed back to the caller via + the callback function +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /******************************************************************** ******/ +PVRSRV_ERROR SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify, + PVRSRV_DEVICE_NODE *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + IMG_UINT32 ui32RequesterID, + PVRSRV_DBGREQ_HANDLE hDbgRequestHandle); + +/**************************************************************************/ /*! + @Function SOPvrDbgRequestNotifyUnregister + @Description SO Interface function called from the OS layer implementation. + Remove and clean up the specified notifier registration so that + it does not receive any further callbacks. + @Input hNotify Handle returned to caller from + SOPvrDbgRequestNotifyRegister(). + @Return PVRSRV_ERROR +*/ /***************************************************************************/ +PVRSRV_ERROR SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify); + + +#endif /* SOFUNC_PVR_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/sofunc_rgx.h b/drivers/mcst/gpu-imgtec/services/server/include/sofunc_rgx.h new file mode 100644 index 000000000000..be9594d95242 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/sofunc_rgx.h @@ -0,0 +1,95 @@ +/*************************************************************************/ /*! +@File +@Title SO Interface header file for devices/RGX functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Contains SO interface functions. These functions are defined in + the common devices layer and are called from the env layer OS + specific implementation. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(SOFUNC_RGX_H_) +#define SOFUNC_RGX_H_ + +#include "img_types.h" +#include "pvrsrv_error.h" + + +#if !defined(NO_HARDWARE) +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsRegister + + @Description SO Interface function called from the OS layer implementation. + Initialise data used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). This function must be called only once for each + different user/handle. + + @Input phGpuUtilUser - Pointer to handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); + + +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsUnregister + + @Description SO Interface function called from the OS layer implementation. + Free data previously used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). + + @Input hGpuUtilUser - Handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); +#endif /* !defined(NO_HARDWARE) */ + + + +#endif /* SOFUNC_RGX_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/srvcore.h b/drivers/mcst/gpu-imgtec/services/server/include/srvcore.h new file mode 100644 index 000000000000..443aa246f5f5 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/srvcore.h @@ -0,0 +1,216 @@ +/**************************************************************************/ /*! +@File +@Title PVR Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVCORE_H +#define SRVCORE_H + +#include "lock_types.h" +#include "connection_server.h" +#include "pvr_debug.h" + +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif + +PVRSRV_ERROR +CopyFromUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void *pvDest, + void __user *pvSrc, + IMG_UINT32 ui32Size); +PVRSRV_ERROR +CopyToUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void __user *pvDest, + void *pvSrc, + IMG_UINT32 ui32Size); + +IMG_INT +DummyBW(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 *psBridgeIn, + IMG_UINT8 *psBridgeOut, + CONNECTION_DATA *psConnection); + +typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 *psBridgeIn, + IMG_UINT8 *psBridgeOut, + CONNECTION_DATA *psConnection); + +typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY +{ + BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl + arguments before calling into srvkm proper */ + POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired + before calling the above wrapper */ +#if defined(DEBUG_BRIDGE_KM) + const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */ + const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */ + const IMG_CHAR *pszBridgeLockName; /*!< Name of bridge lock which will be acquired */ + IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */ + IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ + IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ + IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */ + IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */ +#endif +}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY; + +#if defined(SUPPORT_RGX) + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1) + #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_LAST+1) +#else + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_DISPATCH_LAST+1) + #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_LAST+1) +#endif + +extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; + +void BridgeDispatchTableStartOffsetsInit(void); + +void +_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, + IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName, + POS_LOCK hBridgeLock, + const IMG_CHAR* pszBridgeLockName); +void +UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, + IMG_UINT32 ui32Index); + + +/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */ +#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\ + hBridgeLock) \ + _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\ + (POS_LOCK)hBridgeLock, #hBridgeLock) + +#define DISPATCH_TABLE_GAP_THRESHOLD 5 + + +#if defined(DEBUG_BRIDGE_KM) +typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS +{ + IMG_UINT32 ui32IOCTLCount; + IMG_UINT32 ui32TotalCopyFromUserBytes; + IMG_UINT32 ui32TotalCopyToUserBytes; +} PVRSRV_BRIDGE_GLOBAL_STATS; + +void BridgeGlobalStatsLock(void); +void BridgeGlobalStatsUnlock(void); + +/* OS specific code may want to report the stats held here and within the + * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a + * debugfs entry /sys/kernel/debug/pvr/bridge_stats) */ +extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; +#endif + +PVRSRV_ERROR BridgeDispatcherInit(void); +void BridgeDispatcherDeinit(void); + +PVRSRV_ERROR +BridgedDispatchKM(CONNECTION_DATA * psConnection, + PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM); + +PVRSRV_ERROR +PVRSRVConnectKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32ClientBuildOptions, + IMG_UINT32 ui32ClientDDKVersion, + IMG_UINT32 ui32ClientDDKBuild, + IMG_UINT8 *pui8KernelArch, + IMG_UINT32 *ui32CapabilityFlags, + IMG_UINT64 *ui64PackedBvnc); + +PVRSRV_ERROR +PVRSRVDisconnectKM(void); + +PVRSRV_ERROR +PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject); + +PVRSRV_ERROR +PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject); + +PVRSRV_ERROR +PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32VerbLevel); + +PVRSRV_ERROR +PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_PUINT32 pui32RGXClockSpeed); + +PVRSRV_ERROR +PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32FWAlignChecksSize, + IMG_UINT32 aui32FWAlignChecks[]); + +PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32DeviceStatus); + +PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps); + +PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, + IMG_UINT32 ui32ArrSize, + IMG_BOOL bAllProcessStats, + IMG_UINT32 *ui32MemoryStats); + +#endif /* SRVCORE_H */ + +/****************************************************************************** + End of file (srvcore.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/srvinit.h b/drivers/mcst/gpu-imgtec/services/server/include/srvinit.h new file mode 100644 index 000000000000..ea46a78c7a82 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/srvinit.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File +@Title Initialisation server internal header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines the connections between the various parts of the + initialisation server. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SRVINIT_H__ +#define __SRVINIT_H__ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "device_connection.h" +#include "device.h" + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +#if defined(__cplusplus) +} +#endif +#endif /* __SRVINIT_H__ */ + +/****************************************************************************** + End of file (srvinit.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/srvkm.h b/drivers/mcst/gpu-imgtec/services/server/include/srvkm.h new file mode 100644 index 000000000000..ef0beef94567 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/srvkm.h @@ -0,0 +1,145 @@ +/**************************************************************************/ /*! +@File +@Title Services kernel module internal header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVKM_H +#define SRVKM_H + +#include "servicesext.h" + +#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +struct _PVRSRV_DEVICE_NODE_; + +/*************************************************************************/ /*! +@Function PVRSRVCommonDriverInit +@Description Performs one time driver initialisation of Services Common and + Device layers. +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVCommonDriverInit(void); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDriverInit +@Description Performs one time driver de-initialisation of Services. +@Return void +*/ /**************************************************************************/ +void IMG_CALLCONV PVRSRVCommonDriverDeInit(void); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDeviceCreate +@Description Creates and initialises a common layer Services device node + for an OS native device. First stage device discovery. +@Input pvOSDevice OS native device +@Input i32UMIdentifier A unique identifier which helps recognise this + Device in the UM space. +@Output ppsDeviceNode Points to the new device node on success +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV +PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32UMIdentifier, + struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDeviceInitialise +@Description Initialises the device layer specifics (e.g. boot FW etc) + for the supplied device node, created previously by + PVRSRVCommonDeviceCreate. The device is ready for use when this + second stage device initialisation returns successfully. +@Input psDeviceNode Device node of the device to be initialised +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVCommonDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDeviceDestroy +@Description Destroys a PVR Services device node. +@Input psDeviceNode Device node to destroy +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV +PVRSRVCommonDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +/****************** +HIGHER LEVEL MACROS +*******************/ + +/*---------------------------------------------------------------------------- +Repeats the body of the loop for a certain minimum time, or until the body +exits by its own means (break, return, goto, etc.) + +Example of usage: + +LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) +{ + if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) + { + bTimeout = IMG_FALSE; + break; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); +} END_LOOP_UNTIL_TIMEOUT(); + +-----------------------------------------------------------------------------*/ + +/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time + * it will be decremented and the loop executed one final time. This is + * necessary when preemption is enabled. + */ +/* PRQA S 3411,3431 12 */ /* critical format, leave alone */ +#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \ +{\ + IMG_UINT32 uiOffset, uiStart, uiCurrent; \ + IMG_INT32 iNotLastLoop; \ + for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\ + ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \ + uiCurrent = OSClockus(), \ + uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \ + uiStart = uiCurrent < uiStart ? 0 : uiStart) + +#define END_LOOP_UNTIL_TIMEOUT() \ +} + +#endif /* SRVKM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint.h b/drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint.h new file mode 100644 index 000000000000..baf568dd87d3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint.h @@ -0,0 +1,725 @@ +/*************************************************************************/ /*! +@File +@Title Synchronisation checkpoint interface header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines the client side interface for synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_CHECKPOINT_H +#define SYNC_CHECKPOINT_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_sync_km.h" +#include "pdumpdefs.h" +#include "pdump.h" +#include "dllist.h" +#include "pvr_debug.h" +#include "device_connection.h" +#include "opaque_types.h" + +#ifndef CHECKPOINT_TYPES +#define CHECKPOINT_TYPES +typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT; + +typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT; +#endif + +/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code + will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions + we can then call */ +#ifndef _CHECKPOINT_PFNS_ +#define _CHECKPOINT_PFNS_ +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE fence, + IMG_UINT32 *nr_checkpoints, + PSYNC_CHECKPOINT **checkpoint_handles, + IMG_UINT64 *pui64FenceUID); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(const IMG_CHAR *fence_name, + PVRSRV_TIMELINE timeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *new_fence, + IMG_UINT64 *pui64FenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *new_checkpoint_handle, + IMG_HANDLE *timeline_update_sync, + IMG_UINT32 *timeline_update_value); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); +typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); +typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); +typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs); +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +typedef IMG_BOOL (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void); +typedef void (*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void); +#endif +#if defined(PDUMP) +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, + IMG_UINT32 *puiNumCheckpoints, + PSYNC_CHECKPOINT **papsCheckpoints); +#endif + +#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 + +typedef struct +{ + PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; + PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; + PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; + PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; + PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; + PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; + PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) + PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN pfnCheckpointHasSignalled; + PFN_SYNC_CHECKPOINT_CHECK_STATE_FN pfnCheckState; + PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN pfnSignalWaiters; +#endif + IMG_CHAR pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; +#if defined(PDUMP) + PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; +#endif +} PFN_SYNC_CHECKPOINT_STRUCT; + +PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); + +#endif /* ifndef _CHECKPOINT_PFNS_ */ + +/*************************************************************************/ /*! +@Function SyncCheckpointContextCreate + +@Description Create a new synchronisation checkpoint context + +@Input psDevNode Device node + +@Output ppsSyncCheckpointContext Handle to the created synchronisation + checkpoint context + +@Return PVRSRV_OK if the synchronisation checkpoint context was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, + PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointContextDestroy + +@Description Destroy a synchronisation checkpoint context + +@Input psSyncCheckpointContext Handle to the synchronisation + checkpoint context to destroy + +@Return PVRSRV_OK if the synchronisation checkpoint context was + successfully destroyed. + PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still + has sync checkpoints defined +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointContextRef + +@Description Takes a reference on a synchronisation checkpoint context + +@Input psContext Handle to the synchronisation checkpoint context + on which a ref is to be taken + +@Return None +*/ +/*****************************************************************************/ +void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointContextUnref + +@Description Drops a reference taken on a synchronisation checkpoint + context + +@Input psContext Handle to the synchronisation checkpoint context + on which the ref is to be dropped + +@Return None +*/ +/*****************************************************************************/ +void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointAlloc + +@Description Allocate a new synchronisation checkpoint on the specified + synchronisation checkpoint context + +@Input hSyncCheckpointContext Handle to the synchronisation + checkpoint context + +@Input hTimeline Timeline on which this sync + checkpoint is being created + +@Input hFence Fence as passed into pfnFenceResolve + API, when the API encounters a non-PVR + fence as part of its input fence. From + all other places this argument must be + PVRSRV_NO_FENCE. + +@Input pszClassName Sync checkpoint source annotation + (will be truncated to at most + PVRSRV_SYNC_NAME_LENGTH chars) + +@Output ppsSyncCheckpoint Created synchronisation checkpoint + +@Return PVRSRV_OK if the synchronisation checkpoint was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, + PVRSRV_TIMELINE hTimeline, + PVRSRV_FENCE hFence, + const IMG_CHAR *pszCheckpointName, + PSYNC_CHECKPOINT *ppsSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointFree + +@Description Free a synchronisation checkpoint + The reference count held for the synchronisation checkpoint + is decremented - if it has becomes zero, it is also freed. + +@Input psSyncCheckpoint The synchronisation checkpoint to free + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointSignal + +@Description Signal the synchronisation checkpoint + +@Input psSyncCheckpoint The synchronisation checkpoint to signal + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointSignalNoHW + +@Description Signal the synchronisation checkpoint in NO_HARWARE build + +@Input psSyncCheckpoint The synchronisation checkpoint to signal + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointError + +@Description Error the synchronisation checkpoint + +@Input psSyncCheckpoint The synchronisation checkpoint to error + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointStateFromUFO + +@Description Returns the current state of the synchronisation checkpoint + which has the given UFO firmware address + +@Input psDevNode The device owning the sync + checkpoint + +@Input ui32FwAddr The firmware address of the sync + checkpoint + +@Return The current state (32-bit value) of the sync checkpoint +*/ +/*****************************************************************************/ +IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr); + +/*************************************************************************/ /*! +@Function SyncCheckpointErrorFromUFO + +@Description Error the synchronisation checkpoint which has the + given UFO firmware address + +@Input psDevNode The device owning the sync + checkpoint to be errored + +@Input ui32FwAddr The firmware address of the sync + checkpoint to be errored + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); + +/*************************************************************************/ /*! +@Function SyncCheckpointRollbackFromUFO + +@Description Drop the enqueued count reference taken on the synchronisation + checkpoint on behalf of the firmware. + Called in the event of a DM Kick failing. + +@Input psDevNode The device owning the sync + checkpoint to be rolled back + +@Input ui32FwAddr The firmware address of the sync + checkpoint to be rolled back + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); + +#if defined(PVRSRV_SYNC_CHECKPOINT_CCB) +/*************************************************************************/ /*! +@Function SyncCheckpointUFOHasSignalled + +@Description Inform the sync backend that a specific checkpoint UFO has been + signalled by the firmware so that the host view of the object + can be updated. + +@Input psDevNode The device owning the sync + checkpoint that has been signalled. + +@Input ui32FwAddr The firmware address of the sync + checkpoint that has been signalled. + +@Input ui32Value The new value of the sync checkpoint. + +@Return IMG_TRUE if a backing sync point has been found and updated, + IMG_FALSE otherwise. +*/ +/*****************************************************************************/ +IMG_BOOL +SyncCheckpointUFOHasSignalled(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr, + IMG_UINT32 ui32Value); + +/*************************************************************************/ /*! +@Function SyncCheckpointCheckState + +@Description Perform a full state check to check the state of all sync + points currently alive. + +@Return IMG_TRUE if a backing sync point has been found and updated, + IMG_FALSE otherwise. +*/ +/*****************************************************************************/ +void +SyncCheckpointCheckState(void); + +/*************************************************************************/ /*! +@Function SyncCheckpointSignalWaiters + +@Description Signal any clients waiting on syncs which have been updated. + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointSignalWaiters(void); +#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */ + +/*************************************************************************/ /*! +@Function SyncCheckpointIsSignalled + +@Description Returns IMG_TRUE if the synchronisation checkpoint is + signalled or errored + +@Input psSyncCheckpoint The synchronisation checkpoint to test + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +IMG_BOOL +SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, + IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointIsErrored + +@Description Returns IMG_TRUE if the synchronisation checkpoint is + errored + +@Input psSyncCheckpoint The synchronisation checkpoint to test + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +IMG_BOOL +SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, + IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointTakeRef + +@Description Take a reference on a synchronisation checkpoint + +@Input psSyncCheckpoint Synchronisation checkpoint to take a + reference on + +@Return PVRSRV_OK if a reference was taken on the synchronisation + primitive +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointDropRef + +@Description Drop a reference on a synchronisation checkpoint + +@Input psSyncCheckpoint Synchronisation checkpoint to drop a + reference on + +@Return PVRSRV_OK if a reference was dropped on the synchronisation + primitive +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointResolveFence + +@Description Resolve a fence, returning a list of the sync checkpoints + that fence contains. + This function in turn calls a function provided by the + OS native sync implementation. + +@Input psSyncCheckpointContext The sync checkpoint context + on which checkpoints should be + created (in the event of the fence + having a native sync pt with no + associated sync checkpoint) + +@Input hFence The fence to be resolved + +@Output pui32NumSyncCheckpoints The number of sync checkpoints the + fence contains. Can return 0 if + passed a null (-1) fence. + +@Output papsSyncCheckpoints List of sync checkpoints the fence + contains + +@Output puiFenceUID Unique ID of the resolved fence + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE hFence, + IMG_UINT32 *pui32NumSyncCheckpoints, + PSYNC_CHECKPOINT **papsSyncCheckpoints, + IMG_UINT64 *puiFenceUID, + PDUMP_FLAGS_T ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointCreateFence + +@Description Create a fence containing a single sync checkpoint. + Return the fence and a ptr to sync checkpoint it contains. + This function in turn calls a function provided by the + OS native sync implementation. + +@Input pszFenceName String to assign to the new fence + (for debugging purposes) + +@Input hTimeline Timeline on which the new fence is + to be created + +@Input psSyncCheckpointContext Sync checkpoint context to be used + when creating the new fence + +@Output phNewFence The newly created fence + +@Output pui64FenceUID Unique ID of the created fence + +@Output ppvFenceFinaliseData Any data needed to finalise the fence + in a later call to the function + SyncCheckpointFinaliseFence() + +@Output psNewSyncCheckpoint The sync checkpoint contained in + the new fence + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode, + const IMG_CHAR *pszFenceName, + PVRSRV_TIMELINE hTimeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *phNewFence, + IMG_UINT64 *pui64FenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *psNewSyncCheckpoint, + void **ppvTimelineUpdateSyncPrim, + IMG_UINT32 *pui32TimelineUpdateValue, + PDUMP_FLAGS_T ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointRollbackFenceData + +@Description 'Rolls back' the fence specified (destroys the fence and + takes any other required actions to undo the fence + creation (eg if the implementation wishes to revert the + incrementing of the fence's timeline, etc). + This function in turn calls a function provided by the + OS native sync implementation. + +@Input hFence Fence to be 'rolled back' + +@Input pvFinaliseData Data needed to finalise the + fence + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData); + +/*************************************************************************/ /*! +@Function SyncCheckpointFinaliseFence + +@Description 'Finalise' the fence specified (performs any actions the + underlying implementation may need to perform just prior + to the fence being returned to the client. + This function in turn calls a function provided by the + OS native sync implementation - if the native sync + implementation does not need to perform any actions at + this time, this function does not need to be registered. + +@Input psDevNode Device node + +@Input hFence Fence to be 'finalised' + +@Input pvFinaliseData Data needed to finalise the fence + +@Input psSyncCheckpoint Base sync checkpoint that this fence + is formed of + +@Input pszName Fence annotation + +@Return PVRSRV_OK if a valid fence and finalise data were provided. + PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise + data were provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function (permitted). +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, + PVRSRV_FENCE hFence, + void *pvFinaliseData, + PSYNC_CHECKPOINT psSyncCheckpoint, + const IMG_CHAR *pszName); + +/*************************************************************************/ /*! +@Function SyncCheckpointFreeCheckpointListMem + +@Description Free memory the memory which was allocated by the sync + implementation and used to return the list of sync + checkpoints when resolving a fence. + to the fence being returned to the client. + This function in turn calls a free function registered by + the sync implementation (if a function has been registered). + +@Input pvCheckpointListMem Pointer to the memory to be freed + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem); + +/*************************************************************************/ /*! +@Function SyncCheckpointNoHWUpdateTimelines + +@Description Called by the DDK in a NO_HARDWARE build only. + After syncs have been manually signalled by the DDK, this + function is called to allow the OS native sync implementation + to update its timelines (as the usual callback notification + of signalled checkpoints is not supported for NO_HARDWARE). + This function in turn calls a function provided by the + OS native sync implementation. + +@Input pvPrivateData Any data the OS native sync + implementation might require. + +@Return PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function, otherwise + PVRSRV_OK. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData); + +/*************************************************************************/ /*! +@Function SyncCheckpointDumpInfoOnStalledUFOs + +@Description Called by the DDK in the event of the health check watchdog + examining the CCBs and determining that one has failed to + progress after 10 second when the GPU is idle due to waiting + on one or more UFO fences. + The DDK will pass a list of UFOs on which the CCB is waiting + and the sync implementation will check them to see if any + relate to sync points it has created. If so, the + implementation should dump debug information on those sync + points to the kernel log or other suitable output (which will + allow the unsignalled syncs to be identified). + The function shall return the number of syncs in the provided + array that were syncs which it had created. + +@Input ui32NumUFOs The number of UFOs in the array passed + in the pui32VAddrs parameter. + pui32Vaddr The array of UFOs the CCB is waiting on. + +@Output pui32NumSyncOwnedUFOs The number of UFOs in pui32Vaddr which + relate to syncs created by the sync + implementation. + +@Return PVRSRV_OK if a valid pointer is provided in pui32NumSyncOwnedUFOs. + PVRSRV_ERROR_INVALID_PARAMS if a NULL value is provided in + pui32NumSyncOwnedUFOs. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. + +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, + IMG_UINT32 *pui32Vaddrs, + IMG_UINT32 *pui32NumSyncOwnedUFOs); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetStateString + +@Description Called to get a string representing the current state of a + sync checkpoint. + +@Input psSyncCheckpoint Synchronisation checkpoint to get the + state for. + +@Return The string representing the current state of this checkpoint +*/ +/*****************************************************************************/ +const IMG_CHAR * +SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointRecordLookup + +@Description Returns a debug string with information about the + sync checkpoint. + +@Input psDevNode The device owning the sync + checkpoint to lookup + +@Input ui32FwAddr The firmware address of the sync + checkpoint to lookup + +@Input pszSyncInfo Character array to write to + +@Input len Len of the character array + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len); + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function PVRSRVSyncCheckpointFencePDumpPolKM + +@Description Called to insert a poll into the PDump script on a given + Fence being signalled or errored. + +@Input hFence Fence for PDump to poll on + +@Return PVRSRV_OK if a valid sync checkpoint was provided. +*/ +/*****************************************************************************/ + +PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence); + +#endif + +#endif /* SYNC_CHECKPOINT_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint_init.h b/drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint_init.h new file mode 100644 index 000000000000..f5aa139c260e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/sync_checkpoint_init.h @@ -0,0 +1,82 @@ +/*************************************************************************/ /*! +@File +@Title Services synchronisation checkpoint initialisation interface + header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines synchronisation checkpoint structures that are visible + internally and externally +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SYNC_CHECKPOINT_INIT_ +#define _SYNC_CHECKPOINT_INIT_ + +#include "device.h" + +/*************************************************************************/ /*! +@Function SyncCheckpointInit + +@Description Initialise the sync checkpoint driver by giving it the + device node (needed to determine the pfnUFOAlloc function + to call in order to allocate sync block memory). + +@Input psDevNode Device for which sync checkpoints + are being initialised + +@Return PVRSRV_OK initialised successfully, + PVRSRV_ERROR_ otherwise +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode); + +/*************************************************************************/ /*! +@Function SyncCheckpointDeinit + +@Description Deinitialise the sync checkpoint driver. + Frees resources allocated during initialisation. + +@Input psDevNode Device for which sync checkpoints + are being de-initialised + +@Return None +*/ +/*****************************************************************************/ +void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode); + +#endif /* _SYNC_CHECKPOINT_INIT_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/sync_fallback_server.h b/drivers/mcst/gpu-imgtec/services/server/include/sync_fallback_server.h new file mode 100644 index 000000000000..67ae9900cd3e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/sync_fallback_server.h @@ -0,0 +1,198 @@ +/**************************************************************************/ /*! +@File +@Title Fallback sync interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef _SYNC_FALLBACK_SERVER_H_ +#define _SYNC_FALLBACK_SERVER_H_ + +#include "img_types.h" +#include "sync_checkpoint.h" +#include "device.h" +#include "connection_server.h" + + +typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER; +typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER; +typedef struct _PVRSRV_FENCE_EXPORT_ PVRSRV_FENCE_EXPORT; + +typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT; + +#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH +#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH + +/*****************************************************************************/ +/* */ +/* SW SPECIFIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize, + const IMG_CHAR *pszTimelineName, + PVRSRV_TIMELINE_SERVER **ppsTimeline); + +PVRSRV_ERROR SyncFbFenceCreateSW(PVRSRV_TIMELINE_SERVER *psTimeline, + IMG_UINT32 uiFenceNameSize, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE_SERVER **ppsOutputFence, + IMG_UINT64 *pui64SyncPtIdx); +PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_TIMELINE iSWTimeline, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE *piOutputFence, + IMG_UINT64* pui64SyncPtIdx); + +PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline, + IMG_UINT64 *pui64SyncPtIdx); +PVRSRV_ERROR SyncFbSWTimelineAdvanceKM(void *pvSWTimelineObj, + IMG_UINT64* pui64SyncPtIdx); + +/*****************************************************************************/ +/* */ +/* PVR SPECIFIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize, + const IMG_CHAR *pszTimelineName, + PVRSRV_TIMELINE_SERVER **ppsTimeline); + +PVRSRV_ERROR SyncFbFenceCreatePVR(const IMG_CHAR *pszName, + PVRSRV_TIMELINE iTl, + PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext, + PVRSRV_FENCE *piOutFence, + IMG_UINT64 *puiFenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *ppsOutCheckpoint, + void **ppvTimelineUpdateSync, + IMG_UINT32 *puiTimelineUpdateValue); + +PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext, + PVRSRV_FENCE iFence, + IMG_UINT32 *puiNumCheckpoints, + PSYNC_CHECKPOINT **papsCheckpoints, + IMG_UINT64 *puiFenceUID); + +/*****************************************************************************/ +/* */ +/* GENERIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +PVRSRV_ERROR SyncFbGetFenceObj(PVRSRV_FENCE iFence, + void **ppvFenceObj); + +PVRSRV_ERROR SyncFbSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline, + void **ppvSWTimelineObj); + +PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl); + +PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence); +PVRSRV_ERROR SyncFbFenceReleaseKM(void *pvFenceObj); + +PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence, + PVRSRV_FENCE_SERVER **ppsOutFence); + +PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1, + PVRSRV_FENCE_SERVER *psInFence2, + IMG_UINT32 uiFenceNameSize, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE_SERVER **ppsOutFence); + +PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence, + IMG_UINT32 uiTimeout); + +PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence, + IMG_UINT32 uiLine, + IMG_UINT32 uiFileNameLength, + const IMG_CHAR *pszFile, + IMG_UINT32 uiModuleLength, + const IMG_CHAR *pszModule, + IMG_UINT32 uiDescLength, + const IMG_CHAR *pszDesc); + +PVRSRV_ERROR SyncFbDumpFenceKM(void *pvSWFenceObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +PVRSRV_ERROR SyncFbSWDumpTimelineKM(void *pvSWTimelineObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs); + +IMG_BOOL SyncFbCheckpointHasSignalled(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value); + +/*****************************************************************************/ +/* */ +/* IMPORT/EXPORT FUNCTIONS */ +/* */ +/*****************************************************************************/ + +#if defined(SUPPORT_INSECURE_EXPORT) +PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence, + PVRSRV_FENCE_EXPORT **ppExport); + +PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport); + +PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevice, + PVRSRV_FENCE_EXPORT *psImport, + PVRSRV_FENCE_SERVER **psFence); +#endif /* defined(SUPPORT_INSECURE_EXPORT) */ + +PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + PVRSRV_FENCE_SERVER *psFence, + IMG_SECURE_TYPE *phSecure, + PVRSRV_FENCE_EXPORT **ppsExport, + CONNECTION_DATA **ppsSecureConnection); + +PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport); + +PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevice, + IMG_SECURE_TYPE hSecure, + PVRSRV_FENCE_SERVER **psFence); + +#endif /* _SYNC_FALLBACK_SERVER_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/sync_server.h b/drivers/mcst/gpu-imgtec/services/server/include/sync_server.h new file mode 100644 index 000000000000..7bf5f00caf48 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/sync_server.h @@ -0,0 +1,266 @@ +/**************************************************************************/ /*! +@File +@Title Server side synchronisation interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Describes the server side synchronisation functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv.h" +#include "device.h" +#include "devicemem.h" +#include "pdump.h" +#include "pvrsrv_error.h" +#include "connection_server.h" +#include "pdump_km.h" + +#ifndef _SYNC_SERVER_H_ +#define _SYNC_SERVER_H_ + +typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK; +typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA; +typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE; + +typedef struct _SYNC_ADDR_LIST_ +{ + IMG_UINT32 ui32NumSyncs; + PRGXFWIF_UFO_ADDR *pasFWAddrs; +} SYNC_ADDR_LIST; + +PVRSRV_ERROR +SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, + IMG_UINT32 ui32Offset, + PRGXFWIF_UFO_ADDR *psAddrOut); + +void +SyncAddrListInit(SYNC_ADDR_LIST *psList); + +void +SyncAddrListDeinit(SYNC_ADDR_LIST *psList); + +PVRSRV_ERROR +SyncAddrListPopulate(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumSyncs, + SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, + IMG_UINT32 *paui32SyncOffset); + +PVRSRV_ERROR +SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, + PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim); +PVRSRV_ERROR +SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint); + +PVRSRV_ERROR +SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint); + +void +SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint); + +PVRSRV_ERROR +SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList); + +PVRSRV_ERROR +PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, + IMG_UINT32 *puiSyncPrimVAddr, + IMG_UINT32 *puiSyncPrimBlockSize, + PMR **ppsSyncPMR); + +PVRSRV_ERROR +PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, + DEVMEM_EXPORTCOOKIE **psExportCookie); + +PVRSRV_ERROR +PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk); + +PVRSRV_ERROR +PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk); + +PVRSRV_ERROR +PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value); + +PVRSRV_ERROR +PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_BOOL bServerSync, + IMG_UINT32 ui32FWAddr, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName); + +PVRSRV_ERROR +PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FWAddr); + +PVRSRV_ERROR +PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + SYNC_RECORD_HANDLE *phRecord, + SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_BOOL bServerSync, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName); + +PVRSRV_ERROR +PVRSRVSyncRecordRemoveByHandleKM( + SYNC_RECORD_HANDLE hRecord); +void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len); + +void ServerSyncDumpPending(void); + +PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData); +void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData); +void SyncConnectionPDumpSyncBlocks(void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent); + +/*! +****************************************************************************** +@Function SyncServerInit + +@Description Per-device initialisation for the ServerSync module +******************************************************************************/ +PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode); +void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode); + + +/*! +****************************************************************************** +@Function PVRSRVLockServerSync + +@Description Acquire a global lock to maintain server sync consistency +******************************************************************************/ +void PVRSRVLockServerSync(void); +/*! +****************************************************************************** +@Function PVRSRVUnlockServerSync + +@Description Release the global server sync lock +******************************************************************************/ +void PVRSRVUnlockServerSync(void); + +#if defined(PDUMP) +PVRSRV_ERROR +PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset); + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value); + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiDumpFlags); + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, + IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize); + +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpValueKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpPolKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpCBPKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, + IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + return PVRSRV_OK; +} +#endif /* PDUMP */ +#endif /*_SYNC_SERVER_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/tlintern.h b/drivers/mcst/gpu-imgtec/services/server/include/tlintern.h new file mode 100644 index 000000000000..58223bd86aff --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/tlintern.h @@ -0,0 +1,346 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer internals +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Transport Layer header used by TL internally +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef TLINTERN_H +#define TLINTERN_H + + +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" +#include "lock.h" +#include "tlstream.h" + +/* Forward declarations */ +typedef struct _TL_SNODE_* PTL_SNODE; + +/* To debug buffer utilisation enable this macro here and define + * PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c + * before the inclusion of pvr_debug.h. + * Issue pvrtutils 6 on target to see stream buffer utilisation. */ +//#define TL_BUFFER_STATS 1 + +/*! TL stream structure container. + * pbyBuffer holds the circular buffer. + * ui32Read points to the beginning of the buffer, ie to where data to + * Read begin. + * ui32Write points to the end of data that have been committed, ie this is + * where new data will be written. + * ui32Pending number of bytes reserved in last reserve call which have not + * yet been submitted. Therefore these data are not ready to + * be transported. + * hStreamWLock - provides atomic protection for the ui32Pending & ui32Write + * members of the structure for when they are checked and/or + * updated in the context of a stream writer (producer) + * calling DoTLStreamReserve() & TLStreamCommit(). + * - Reader context is not multi-threaded, only one client per + * stream is allowed. Also note the read context may be in an + * ISR which prevents a design where locks can be held in the + * AcquireData/ReleaseData() calls. Thus this lock only + * protects the stream members from simultaneous writers. + * + * ui32Read < ui32Write <= ui32Pending + * where < and <= operators are overloaded to make sense in a circular way. + */ +typedef struct _TL_STREAM_ +{ + IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; /*!< String name identifier */ + PVRSRV_DEVICE_NODE *psDevNode; /*!< Underlying device on which the stream's buffer is allocated */ + TL_OPMODE eOpMode; /*!< Mode of Operation of TL Buffer */ + + IMG_BOOL bWaitForEmptyOnDestroy; /*!< Flag: On destroying a non-empty stream block until + * stream is drained. */ + IMG_BOOL bNoSignalOnCommit; /*!< Flag: Used to avoid the TL signalling waiting consumers + * that new data is available on every commit. Producers + * using this flag will need to manually signal when + * appropriate using the TLStreamSync() API */ + + void (*pfOnReaderOpenCallback)(void *); /*!< Optional on reader connect callback */ + void *pvOnReaderOpenUserData; /*!< On reader connect user data */ + void (*pfProducerCallback)(void); /*!< Optional producer callback of type TL_STREAM_SOURCECB */ + void *pvProducerUserData; /*!< Producer callback user data */ + + struct _TL_STREAM_ *psNotifStream; /*!< Pointer to the stream to which notification will be sent */ + + volatile IMG_UINT32 ui32Read; /*!< Pointer to the beginning of available data */ + volatile IMG_UINT32 ui32Write; /*!< Pointer to already committed data which are ready to be + * copied to user space */ + IMG_UINT32 ui32Pending; /*!< Count pending bytes reserved in buffer */ + IMG_UINT32 ui32Size; /*!< Buffer size */ + IMG_UINT32 ui32ThresholdUsageForSignal; /*!< Buffer usage threshold at which a TL writer signals a blocked/ + * waiting reader when transitioning from empty->non-empty */ + IMG_UINT32 ui32MaxPacketSize; /*! Max TL packet size */ + IMG_BYTE *pbyBuffer; /*!< Actual data buffer */ + + PTL_SNODE psNode; /*!< Ptr to parent stream node */ + DEVMEM_MEMDESC *psStreamMemDesc; /*!< MemDescriptor used to allocate buffer space through PMR */ + + IMG_HANDLE hProducerEvent; /*!< Handle to wait on if there is not enough space */ + IMG_HANDLE hProducerEventObj; /*!< Handle to signal blocked reserve calls */ + IMG_BOOL bSignalPending; /*!< Tracks if a "signal" is pending to be sent to a blocked/ + * waiting reader */ + + POS_LOCK hStreamWLock; /*!< Writers Lock for ui32Pending & ui32Write*/ + POS_LOCK hReadLock; /*!< Readers Lock for bReadPending & ui32Read*/ + IMG_BOOL bReadPending; /*!< Tracks if a read operation is pending or not*/ + IMG_BOOL bNoWrapPermanent; /*!< Flag: Prevents buffer wrap and subsequent data loss + * as well as resetting the read position on close. */ + +#if defined(TL_BUFFER_STATS) + IMG_UINT32 ui32CntReadFails; /*!< Tracks how many times reader failed to acquire read lock */ + IMG_UINT32 ui32CntReadSuccesses; /*!< Tracks how many times reader acquires read lock successfully */ + IMG_UINT32 ui32CntWriteSuccesses; /*!< Tracks how many times writer acquires read lock successfully */ + IMG_UINT32 ui32CntWriteWaits; /*!< Tracks how many times writer had to wait to acquire read lock */ + IMG_UINT32 ui32CntNumWriteSuccess; /*!< Tracks how many write operations were successful*/ + IMG_UINT32 ui32BufferUt; /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */ + IMG_UINT32 ui32MaxReserveWatermark; /*!< Max stream reserve size that was ever requested by a writer */ + IMG_UINT32 ui32SignalsSent; /*!< Number of signals that were actually sent by the write API */ + ATOMIC_T bNoReaderSinceFirstReserve; /*!< Tracks if a read has been done since the buffer was last found empty */ + IMG_UINT32 ui32TimeStart; /*!< Time at which a write (Reserve call) was done into an empty buffer. + * Guarded by hStreamWLock. */ + IMG_UINT32 ui32MinTimeToFullInUs; /*!< Minimum time taken to (nearly) fully fill an empty buffer. Guarded + * by hStreamWLock. */ + /* Behaviour counters, protected by hStreamLock in case of + * multi-threaded access */ + IMG_UINT32 ui32NumCommits; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32SignalNotSent; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32ManSyncs; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32ProducerByteCount; /*!< Counters used to analysing stream performance, see ++ loc */ + + /* Not protected by the lock, inc in the reader thread which is currently singular */ + IMG_UINT32 ui32AcquireRead1; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32AcquireRead2; /*!< Counters used to analysing stream performance, see ++ loc */ +#endif + +} TL_STREAM, *PTL_STREAM; + +/* there need to be enough space reserved in the buffer for 2 minimal packets + * and it needs to be aligned the same way the buffer is or there will be a + * compile error.*/ +#define BUFFER_RESERVED_SPACE (2 * PVRSRVTL_PACKET_ALIGNMENT) + +/* ensure the space reserved follows the buffer's alignment */ +static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)), + "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT"); + +/* Define the largest value that a uint that matches the + * PVRSRVTL_PACKET_ALIGNMENT size can hold */ +#define MAX_UINT 0xffffFFFF + +/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is + * outstanding on the stream. */ +#define NOTHING_PENDING IMG_UINT32_MAX + + +/* + * Transport Layer Stream Descriptor types/defs + */ +typedef struct _TL_STREAM_DESC_ +{ + PTL_SNODE psNode; /*!< Ptr to parent stream node */ + IMG_UINT32 ui32Flags; /*!< Flags supplied by client on stream open */ + IMG_HANDLE hReadEvent; /*!< For wait call (only used/set in reader descriptors) */ + IMG_INT uiRefCount; /*!< Reference count to the SD */ + +#if defined(TL_BUFFER_STATS) + /* Behaviour counters, no multi-threading protection need as they are + * incremented in a single thread due to only supporting one reader + * at present */ + IMG_UINT32 ui32AcquireCount; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32NoData; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32NoDataSleep; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32Signalled; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32TimeoutEmpty; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32TimeoutData; /*!< Counters used to analysing stream performance, see ++ loc */ +#endif + IMG_UINT32 ui32ReadLimit; /*!< Limit buffer reads to data present in the + buffer at the time of stream open. */ + IMG_UINT32 ui32ReadLen; /*!< Size of data returned by initial Acquire */ +} TL_STREAM_DESC, *PTL_STREAM_DESC; + +PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3); + +#define TL_STREAM_KM_FLAG_MASK 0xFFFF0000 +#define TL_STREAM_FLAG_TEST 0x10000000 +#define TL_STREAM_FLAG_WRAPREAD 0x00010000 + +#define TL_STREAM_UM_FLAG_MASK 0x0000FFFF + +#if defined(TL_BUFFER_STATS) +# define TL_COUNTER_INC(a) ((a)++) +# define TL_COUNTER_ADD(a,b) ((a) += (b)) +#else +# define TL_COUNTER_INC(a) (void)(0) +# define TL_COUNTER_ADD(a,b) (void)(0) +#endif +/* + * Transport Layer stream list node + */ +typedef struct _TL_SNODE_ +{ + struct _TL_SNODE_* psNext; /*!< Linked list next element */ + IMG_HANDLE hReadEventObj; /*!< Readers 'wait for data' event */ + PTL_STREAM psStream; /*!< TL Stream object */ + IMG_INT uiWRefCount; /*!< Stream writer reference count */ + PTL_STREAM_DESC psRDesc; /*!< Stream reader 0 or ptr only */ + PTL_STREAM_DESC psWDesc; /*!< Stream writer 0 or ptr only */ +} TL_SNODE; + +PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4); + +/* + * Transport Layer global top types and variables + * Use access function to obtain pointer. + * + * hTLGDLock - provides atomicity over read/check/write operations and + * sequence of operations on uiClientCnt, psHead list of SNODEs and + * the immediate members in a list element SNODE structure. + * - This larger scope of responsibility for this lock helps avoid + * the need for a lock in the SNODE structure. + * - Lock held in the client (reader) context when streams are + * opened/closed and in the server (writer) context when streams + * are created/open/closed. + */ +typedef struct _TL_GDATA_ +{ + IMG_HANDLE hTLEventObj; /* Global TL signal object, new streams, etc */ + + IMG_UINT uiClientCnt; /* Counter to track the number of client stream connections. */ + PTL_SNODE psHead; /* List of TL streams and associated client handle */ + + POS_LOCK hTLGDLock; /* Lock for structure AND psHead SNODE list */ +} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA; + +/* + * Transport Layer Internal Kernel-Mode Server API + */ +TL_GLOBAL_DATA* TLGGD(void); /* TLGetGlobalData() */ + +PVRSRV_ERROR TLInit(void); +void TLDeInit(void); + +void TLAddStreamNode(PTL_SNODE psAdd); +PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName); +PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc); +IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, + IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 ui32Max); +PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc); +void TLReturnStreamNode(PTL_SNODE psNode); + +/****************************************************************************** + Function Name : TLTryRemoveStreamAndFreeStreamNode + + Inputs : PTL_SNODE Pointer to the TL_SNODE whose stream is requested + to be removed from TL_GLOBAL_DATA's list + + Return Value : IMG_TRUE - If the stream was made NULL and this + TL_SNODE was removed from the + TL_GLOBAL_DATA's list + + IMG_FALSE - If the stream wasn't made NULL as there + is a client connected to this stream + + Description : If there is no client currently connected to this stream then, + This function removes this TL_SNODE from the + TL_GLOBAL_DATA's list. The caller is responsible for the + cleanup of the TL_STREAM whose TL_SNODE may be removed + + Otherwise, this function does nothing +******************************************************************************/ +IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove); + +/****************************************************************************** + Function Name : TLUnrefDescAndTryFreeStreamNode + + Inputs : PTL_SNODE Pointer to the TL_SNODE whose descriptor is + requested to be removed + : PTL_STREAM_DESC Pointer to the STREAM_DESC + + Return Value : IMG_TRUE - If this TL_SNODE was removed from the + TL_GLOBAL_DATA's list + + IMG_FALSE - Otherwise + + Description : This function removes the stream descriptor from this TL_SNODE + and, if there is no writer (producer context) currently bound to this + stream, this function removes this TL_SNODE from the TL_GLOBAL_DATA's + list. The caller is responsible for the cleanup of the TL_STREAM + whose TL_SNODE may be removed +******************************************************************************/ +IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD); + +/* + * Transport Layer stream interface to server part declared here to avoid + * circular dependency. + */ +IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream, + IMG_BOOL bDisableCallback, + IMG_UINT32* puiReadOffset); +PVRSRV_ERROR TLStreamAdvanceReadPos(PTL_STREAM psStream, + IMG_UINT32 uiReadLen, + IMG_UINT32 uiOrigReadLen); +void TLStreamResetReadPos(PTL_STREAM psStream); + +DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream); +IMG_BOOL TLStreamOutOfData(IMG_HANDLE psStream); + +/****************************************************************************** + Function Name : TLStreamDestroy + + Inputs : PTL_STREAM Pointer to the TL_STREAM to be destroyed + + Description : This function performs all the clean-up operations required for + destruction of this stream +******************************************************************************/ +void TLStreamDestroy(PTL_STREAM psStream); + +/* + * Test related functions + */ +PVRSRV_ERROR TUtilsInit(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR TUtilsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#endif /* TLINTERN_H */ +/****************************************************************************** + End of file (tlintern.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/tlserver.h b/drivers/mcst/gpu-imgtec/services/server/include/tlserver.h new file mode 100644 index 000000000000..7ac2958eac1e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/tlserver.h @@ -0,0 +1,97 @@ +/*************************************************************************/ /*! +@File +@Title KM server Transport Layer implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main bridge APIs for Transport Layer client functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef TLSERVER_H +#define TLSERVER_H + +#include "img_defs.h" +#include "pvr_debug.h" +#include "connection_server.h" + +#include "tlintern.h" + +/* + * Transport Layer Client API Kernel-Mode bridge implementation + */ + +PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection); +PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection); + +PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + PTL_STREAM_DESC* ppsSD, + PMR** ppsTLPMR); + +PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD); + +PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, + IMG_UINT32 ui32Max, + IMG_CHAR *pszStreams, + IMG_UINT32 *pui32NumFound); + +PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32* ui32BufferOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available); + +PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size); + +PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32* puiReadOffset, + IMG_UINT32* puiReadLen); + +PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 uiReadOffset, + IMG_UINT32 uiReadLen); + +PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size, + IMG_BYTE *pui8Data); + +#endif /* TLSERVER_H */ + +/****************************************************************************** + End of file (tlserver.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/tlstream.h b/drivers/mcst/gpu-imgtec/services/server/include/tlstream.h new file mode 100644 index 000000000000..9d95c578a3c3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/tlstream.h @@ -0,0 +1,601 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer kernel side API. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description TL provides driver components with a way to copy data from kernel + space to user space (e.g. screen/file). + + Data can be passed to the Transport Layer through the + TL Stream (kernel space) API interface. + + The buffer provided to every stream is a modified version of a + circular buffer. Which CB version is created is specified by + relevant flags when creating a stream. Currently two types + of buffer are available: + - TL_OPMODE_DROP_NEWER: + When the buffer is full, incoming data are dropped + (instead of overwriting older data) and a marker is set + to let the user know that data have been lost. + - TL_OPMODE_BLOCK: + When the circular buffer is full, reserve/write calls block + until enough space is freed. + - TL_OPMODE_DROP_OLDEST: + When the circular buffer is full, the oldest packets in the + buffer are dropped and a flag is set in header of next packet + to let the user know that data have been lost. + + All size/space requests are in bytes. However, the actual + implementation uses native word sizes (i.e. 4 byte aligned). + + The user does not need to provide space for the stream buffer + as the TL handles memory allocations and usage. + + Inserting data to a stream's buffer can be done either: + - by using TLReserve/TLCommit: User is provided with a buffer + to write data to. + - or by using TLWrite: User provides a buffer with + data to be committed. The TL + copies the data from the + buffer into the stream buffer + and returns. + Users should be aware that there are implementation overheads + associated with every stream buffer. If you find that less + data are captured than expected then try increasing the + stream buffer size or use TLInfo to obtain buffer parameters + and calculate optimum required values at run time. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef TLSTREAM_H +#define TLSTREAM_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_tlcommon.h" +#include "device.h" + +/*! Extract TL stream opmode from the given stream create flags. + * Last 3 bits of streamFlag is used for storing opmode, hence + * opmode mask is set as following. */ +#define TL_OPMODE_MASK 0x7 + +/* + * NOTE: This enum is used to directly access the HTB_OPMODE_xxx values + * within htbserver.c. + * As such we *MUST* keep the values matching in order of declaration. + */ +/*! Opmode specifying circular buffer behaviour */ +typedef enum +{ + /*! Undefined operation mode */ + TL_OPMODE_UNDEF = 0, + + /*! Reject new data if the buffer is full, producer may then decide to + * drop the data or retry after some time. */ + TL_OPMODE_DROP_NEWER, + + /*! When buffer is full, advance the tail/read position to accept the new + * reserve call (size permitting), effectively overwriting the oldest + * data in the circular buffer. Not supported yet. */ + TL_OPMODE_DROP_OLDEST, + + /*! Block Reserve (subsequently Write) calls if there is not enough space + * until some space is freed via a client read operation. */ + TL_OPMODE_BLOCK, + + /*!< For error checking */ + TL_OPMODE_LAST + +} TL_OPMODE; + +typedef enum { + /* Enum to be used in conjunction with new Flags feature */ + + /* Flag set when Drop Oldest is set and packets have been dropped */ + TL_FLAG_OVERWRITE_DETECTED = (1 << 0), + +} TL_Flags; + +static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK, + "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK"); + +/*! Flags specifying stream behaviour */ +/*! Do not destroy stream if there still are data that have not been + * copied in user space. Block until the stream is emptied. */ +#define TL_FLAG_FORCE_FLUSH (1U<<8) +/*! Do not signal consumers on commit automatically when the stream buffer + * transitions from empty to non-empty. Producer responsible for signal when + * it chooses. */ +#define TL_FLAG_NO_SIGNAL_ON_COMMIT (1U<<9) + +/*! When a stream has this property it never wraps around and + * overwrites existing data, hence it is a fixed size persistent + * buffer, data written is permanent. Producers need to ensure + * the buffer is big enough for their needs. + * When a stream is opened for reading the client will always + * find the read position at the start of the buffer/data. */ +#define TL_FLAG_PERMANENT_NO_WRAP (1U<<10) + +/*! Defer allocation of stream's shared memory until first open. */ +#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<11) + +/*! Structure used to pass internal TL stream sizes information to users.*/ +typedef struct _TL_STREAM_INFO_ +{ + IMG_UINT32 headerSize; /*!< Packet header size in bytes */ + IMG_UINT32 minReservationSize; /*!< Minimum data size reserved in bytes */ + IMG_UINT32 pageSize; /*!< Page size in bytes */ + IMG_UINT32 pageAlign; /*!< Page alignment in bytes */ + IMG_UINT32 maxTLpacketSize; /*! Max allowed TL packet size*/ +} TL_STREAM_INFO, *PTL_STREAM_INFO; + +/*! Callback operations or notifications that a stream producer may handle + * when requested by the Transport Layer. + */ +#define TL_SOURCECB_OP_CLIENT_EOS 0x01 /*!< Client has reached end of stream, + * can anymore data be supplied? + * ui32Resp ignored in this operation */ + +/*! Function pointer type for the callback handler into the "producer" code + * that writes data to the TL stream. Producer should handle the notification + * or operation supplied in ui32ReqOp on stream hStream. The + * Operations and notifications are defined above in TL_SOURCECB_OP */ +typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream, + IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser); + +typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg); + +/*************************************************************************/ /*! + @Function TLAllocSharedMemIfNull + @Description Allocates shared memory for the stream. + @Input hStream Stream handle. + @Return eError Internal services call returned eError error + number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLAllocSharedMemIfNull(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLFreeSharedMem + @Description Frees stream's shared memory. + @Input phStream Stream handle. +*/ /**************************************************************************/ +void +TLFreeSharedMem(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamCreate + @Description Request the creation of a new stream and open a handle. + If creating a stream which should continue to exist after the + current context is finished, then TLStreamCreate must be + followed by a TLStreamOpen call. On any case, the number of + create/open calls must balance with the number of close calls + used. This ensures the resources of a stream are released when + it is no longer required. + @Output phStream Pointer to handle to store the new stream. + @Input psDevNode Pointer to the Device Node to be used for + stream allocation. + @Input szStreamName Name of stream, maximum length: + PRVSRVTL_MAX_STREAM_NAME_SIZE. + If a longer string is provided,creation fails. + @Input ui32Size Desired buffer size in bytes. + @Input ui32StreamFlags Used to configure buffer behaviour. See above. + @Input pfOnReaderOpenCB Optional callback called when a client + opens this stream, may be null. + @Input pvOnReaderOpenUD Optional user data for pfOnReaderOpenCB, + may be null. + @Input pfProducerCB Optional callback, may be null. + @Input pvProducerUD Optional user data for callback, may be null. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or string name + exceeded MAX_STREAM_NAME_SIZE + @Return PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate space for + stream handle. + @Return PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with + the same stream name string. + @Return eError Internal services call returned + eError error number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamCreate(IMG_HANDLE *phStream, + PVRSRV_DEVICE_NODE *psDevNode, + const IMG_CHAR *szStreamName, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32StreamFlags, + TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, + void *pvOnReaderOpenUD, + TL_STREAM_SOURCECB pfProducerCB, + void *pvProducerUD); + +/*************************************************************************/ /*! + @Function TLStreamOpen + @Description Attach to existing stream that has already been created by a + TLStreamCreate call. A handle is returned to the stream. + @Output phStream Pointer to handle to store the stream. + @Input szStreamName Name of stream, should match an already + existing stream name + @Return PVRSRV_ERROR_NOT_FOUND None of the streams matched the + requested stream name. + PVRSRV_ERROR_INVALID_PARAMS Non-NULL pointer to stream + handler is required. + @Return PVRSRV_OK Success. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamOpen(IMG_HANDLE *phStream, + const IMG_CHAR *szStreamName); + + +/*************************************************************************/ /*! + @Function TLStreamReset + @Description Resets read and write pointers and pending flag. + @Output phStream Pointer to stream's handle +*/ /**************************************************************************/ +void TLStreamReset(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamSetNotifStream + @Description Registers a "notification stream" which will be used to + publish information about state change of the "hStream" + stream. Notification can inform about events such as stream + open/close, etc. + @Input hStream Handle to stream to update. + @Input hNotifStream Handle to the stream which will be used for + publishing notifications. + @Return PVRSRV_ERROR_INVALID_PARAMS If either of the parameters is + NULL + @Return PVRSRV_OK Success. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream); + +/*************************************************************************/ /*! + @Function TLStreamReconfigure + @Description Request the stream flags controlling buffer behaviour to + be updated. + In the case where TL_OPMODE_BLOCK is to be used, + TLStreamCreate should be called without that flag and this + function used to change the stream mode once a consumer process + has been started. This avoids a deadlock scenario where the + TLStreaWrite/TLStreamReserve call will hold the Bridge Lock + while blocking if the TL buffer is full. + The TL_OPMODE_BLOCK should never drop the Bridge Lock + as this leads to another deadlock scenario where the caller to + TLStreamWrite/TLStreamReserve has already acquired another lock + (e.g. gHandleLock) which is not dropped. This then leads to that + thread acquiring locks out of order. + @Input hStream Handle to stream to update. + @Input ui32StreamFlags Flags that configure buffer behaviour. See above. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or inconsistent + stream flags. + @Return PVRSRV_ERROR_NOT_READY Stream is currently being written to + try again later. + @Return eError Internal services call returned + eError error number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReconfigure(IMG_HANDLE hStream, + IMG_UINT32 ui32StreamFlags); + +/*************************************************************************/ /*! + @Function TLStreamClose + @Description Detach from the stream associated with the given handle. If + the current handle is the last one accessing the stream + (i.e. the number of TLStreamCreate+TLStreamOpen calls matches + the number of TLStreamClose calls) then the stream is also + deleted. + On return the handle is no longer valid. + @Input hStream Handle to stream that will be closed. + @Return None. +*/ /**************************************************************************/ +void +TLStreamClose(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamReserve + @Description Reserve space in stream buffer. When successful every + TLStreamReserve call must be followed by a matching + TLStreamCommit call. While a TLStreamCommit call is pending + for a stream, subsequent TLStreamReserve calls for this + stream will fail. + @Input hStream Stream handle. + @Output ppui8Data Pointer to a pointer to a location in the + buffer. The caller can then use this address + in writing data into the stream. + @Input ui32Size Number of bytes to reserve in buffer. + @Return PVRSRV_INVALID_PARAMS NULL stream handler. + @Return PVRSRV_ERROR_NOT_READY There are data previously reserved + that are pending to be committed. + @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to + reserve more space than the + buffer size. + @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested + is larger than the free + space. + @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size + requested is larger + than max TL packet size + @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer + does not have enough space + for the reserve. + @Return PVRSRV_OK Success, output arguments valid. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReserve(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLStreamReserve2 + @Description Reserve space in stream buffer. When successful every + TLStreamReserve call must be followed by a matching + TLStreamCommit call. While a TLStreamCommit call is pending + for a stream, subsequent TLStreamReserve calls for this + stream will fail. + @Input hStream Stream handle. + @Output ppui8Data Pointer to a pointer to a location in the + buffer. The caller can then use this address + in writing data into the stream. + @Input ui32Size Ideal number of bytes to reserve in buffer. + @Input ui32SizeMin Minimum number of bytes to reserve in buffer. + @Input pui32Available Optional, but when present and the + RESERVE_TOO_BIG error is returned, a size + suggestion is returned in this argument which + the caller can attempt to reserve again for a + successful allocation. + @Output pbIsReaderConnected Let writing clients know if reader is + connected or not, in case of error. + @Return PVRSRV_INVALID_PARAMS NULL stream handler. + @Return PVRSRV_ERROR_NOT_READY There are data previously reserved + that are pending to be committed. + @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to + reserve more space than the + buffer size. + @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested + is larger than the free + space. + Check the pui32Available + value for the correct + reserve size to use. + @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size + requested is larger + than max TL packet size + @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer + does not have enough space + for the reserve. + @Return PVRSRV_OK Success, output arguments valid. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReserve2(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available, + IMG_BOOL* pbIsReaderConnected); + +/*************************************************************************/ /*! + @Function TLStreamReserveReturnFlags + @Description Reserve space in stream buffer. When successful every + TLStreamReserve call must be followed by a matching + TLStreamCommit call. While a TLStreamCommit call is pending + for a stream, subsequent TLStreamReserve calls for this + stream will fail. + @Input hStream Stream handle. + @Output ppui8Data Pointer to a pointer to a location in the + buffer. The caller can then use this address + in writing data into the stream. + @Input ui32Size Ideal number of bytes to reserve in buffer. + @Output pui32Flags Output parameter to return flags generated within + the reserve function. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReserveReturnFlags(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32* pui32Flags); + +/*************************************************************************/ /*! + @Function TLStreamGetUT + @Description Returns the current stream utilisation in bytes + @Input hStream Stream handle. + @Return IMG_UINT32 Stream utilisation +*/ /**************************************************************************/ +IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamCommit + @Description Notify TL that data have been written in the stream buffer. + Should always follow and match TLStreamReserve call. + @Input hStream Stream handle. + @Input ui32Size Number of bytes that have been added to the + stream. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. + @Return PVRSRV_ERROR_STREAM_MISUSE Commit results in more data + committed than the buffer size, + the stream is misused. + @Return eError Commit was successful but + internal services call returned + eError error number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamCommit(IMG_HANDLE hStream, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLStreamWrite + @Description Combined Reserve/Commit call. This function Reserves space in + the specified stream buffer, copies ui32Size bytes of data + from the array pui8Src points to and Commits in an "atomic" + style operation. + @Input hStream Stream handle. + @Input pui8Src Source to read data from. + @Input ui32Size Number of bytes to copy and commit. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK + */ /**************************************************************************/ +PVRSRV_ERROR +TLStreamWrite(IMG_HANDLE hStream, + IMG_UINT8 *pui8Src, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLStreamWriteRetFlags + @Description Combined Reserve/Commit call. This function Reserves space in + the specified stream buffer, copies ui32Size bytes of data + from the array pui8Src points to and Commits in an "atomic" + style operation. Also accepts a pointer to a bit flag value + for returning write status flags. + @Input hStream Stream handle. + @Input pui8Src Source to read data from. + @Input ui32Size Number of bytes to copy and commit. + @Output pui32Flags Output parameter for write status info + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK + */ /**************************************************************************/ +PVRSRV_ERROR +TLStreamWriteRetFlags(IMG_HANDLE hStream, + IMG_UINT8 *pui8Src, + IMG_UINT32 ui32Size, + IMG_UINT32 *pui32Flags); + +/*************************************************************************/ /*! + @Function TLStreamSync + @Description Signal the consumer to start acquiring data from the stream + buffer. Called by producers that use the flag + TL_FLAG_NO_SIGNAL_ON_COMMIT to manually control when + consumers starting reading the stream. + Used when multiple small writes need to be batched. + @Input hStream Stream handle. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK + */ /**************************************************************************/ +PVRSRV_ERROR +TLStreamSync(IMG_HANDLE hStream); + + +/*************************************************************************/ /*! + @Function TLStreamMarkEOS + @Description Insert a EOS marker packet in the given stream. + @Input hStream Stream handle. + @Input bRemoveOld if TRUE, remove old stream record file before + splitting to new file. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK Success. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamMarkEOS(IMG_HANDLE hStream, IMG_BOOL bRemoveOld); + +/*************************************************************************/ /*! +@Function TLStreamMarkStreamOpen +@Description Puts *open* stream packet into hStream's notification stream, + if set, error otherwise." +@Input hStream Stream handle. +@Return PVRSRV_OK on success and error code on failure +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamMarkStreamOpen(IMG_HANDLE hStream); + +/*************************************************************************/ /*! +@Function TLStreamMarkStreamClose +@Description Puts *close* stream packet into hStream's notification stream, + if set, error otherwise." +@Input hStream Stream handle. +@Return PVRSRV_OK on success and error code on failure +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamMarkStreamClose(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamInfo + @Description Run time information about buffer elemental sizes. + It sets psInfo members accordingly. Users can use those values + to calculate the parameters they use in TLStreamCreate and + TLStreamReserve. + @Output psInfo pointer to stream info structure. + @Return None. +*/ /**************************************************************************/ +void +TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo); + +/*************************************************************************/ /*! + @Function TLStreamIsOpenForReading + @Description Query if a stream has any readers connected. + @Input hStream Stream handle. + @Return IMG_BOOL True if at least one reader is connected, + false otherwise +*/ /**************************************************************************/ +IMG_BOOL +TLStreamIsOpenForReading(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamOutOfData + @Description Query if the stream is empty (no data waiting to be read). + @Input hStream Stream handle. + @Return IMG_BOOL True if read==write, no data waiting, + false otherwise +*/ /**************************************************************************/ +IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamResetProducerByteCount + @Description Reset the producer byte counter on the specified stream. + @Input hStream Stream handle. + @Input IMG_UINT32 Value to reset counter to, often 0. + @Return PVRSRV_OK Success. + @Return PVRSRV_ERROR_STREAM_MISUSE Success but the read and write + positions did not match, + stream not empty. +*/ /**************************************************************************/ + +PVRSRV_ERROR +TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value); + +#endif /* TLSTREAM_H */ +/***************************************************************************** + End of file (tlstream.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/tutils_km.h b/drivers/mcst/gpu-imgtec/services/server/include/tutils_km.h new file mode 100644 index 000000000000..3cfd8ade56fe --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/tutils_km.h @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@File tutils_km.h +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel services functions for calls to tutils (testing utils) + layer in the server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _TUTILS_KM_H_ +#define _TUTILS_KM_H_ + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "connection_server.h" +#include "device.h" +#include "pvrsrv_sync_km.h" + + +PVRSRV_ERROR ServerTestIoctlKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiCmd, + IMG_PBYTE uiIn1, + IMG_UINT32 uiIn2, + IMG_UINT32* puiOut1, + IMG_UINT32* puiOut2); + +PVRSRV_ERROR PowMonTestIoctlKM(IMG_UINT32 uiCmd, + IMG_UINT32 uiIn1, + IMG_UINT32 uiIn2, + IMG_UINT32 *puiOut1, + IMG_UINT32 *puiOut2); + +PVRSRV_ERROR SyncCheckpointTestIoctlKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiCmd, + IMG_UINT32 uiIn1, + IMG_UINT32 uiIn2, + const IMG_CHAR *pszInName, + IMG_UINT32 *puiOut1, + IMG_UINT32 *puiOut2, + IMG_UINT8 *puiOut3); + +IMG_EXPORT +PVRSRV_ERROR DevmemIntAllocHostMemKM(IMG_DEVMEM_SIZE_T ui32Size, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32LableLength, + const IMG_CHAR *pszAllocLabel, + PMR **ppsPMR); + +PVRSRV_ERROR DevmemIntFreeHostMemKM(PMR *psPMR); + +IMG_EXPORT +PVRSRV_ERROR PowerTestIoctlKM(IMG_UINT32 uiCmd, + IMG_UINT32 uiIn1, + IMG_UINT32 uiIn2, + IMG_UINT32 *puiOut1, + IMG_UINT32 *puiOut2); + +PVRSRV_ERROR TestIOCTLSyncFbFenceSignalPVR(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + void *psFence); + +PVRSRV_ERROR TestIOCTLSyncFbFenceCreatePVR(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 uiNameLength, + const IMG_CHAR *pszName, + PVRSRV_TIMELINE iTL, + PVRSRV_FENCE *piOutFence); + +PVRSRV_ERROR TestIOCTLSyncFbFenceResolvePVR(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_FENCE iFence); +PVRSRV_ERROR TestIOCTLSyncFbSWTimelineAdvance(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE iSWTl); + +PVRSRV_ERROR TestIOCTLSyncFbSWFenceCreate(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE iTl, + IMG_UINT32 uiFenceNameLength, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE *piFence); + + + +PVRSRV_ERROR TestIOCTLSyncSWTimelineFenceCreateKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE sTimeline, + IMG_UINT32 uiNameLength, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE *psOutFence); + +PVRSRV_ERROR TestIOCTLSyncSWTimelineAdvanceKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE sTimeline); + +PVRSRV_ERROR TestIOCTLIsTimelineValidKM(PVRSRV_TIMELINE sTimeline, + IMG_BOOL *bResult); + +PVRSRV_ERROR TestIOCTLIsFenceValidKM(PVRSRV_FENCE sFence, + IMG_BOOL *bResult); + +PVRSRV_ERROR TestIOCTLSyncCheckpointResolveFenceKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_FENCE hFence, + IMG_UINT32 *pui32NumSyncCheckpoints); + +PVRSRV_ERROR TestIOCTLSyncCheckpointCreateFenceKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_CHAR *pszFenceName, + PVRSRV_TIMELINE hTimeline, + PVRSRV_FENCE *phOutFence, + IMG_UINT64 *puiUpdateFenceUID); + +PVRSRV_ERROR TestIOCTLWriteByteKM(IMG_BYTE *pui8WriteData); + +PVRSRV_ERROR TestIOCTLReadByteKM(IMG_BYTE *pui8ReadData); +#endif /* _TUTILS_KM_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/vmm_impl.h b/drivers/mcst/gpu-imgtec/services/server/include/vmm_impl.h new file mode 100644 index 000000000000..f6dd5433b8a6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/vmm_impl.h @@ -0,0 +1,187 @@ +/*************************************************************************/ /*! +@File vmm_impl.h +@Title Common VM manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides common VM manager definitions that need to + be shared by system virtualization layer itself and modules that + implement the actual VM manager types. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_IMPL_H +#define VMM_IMPL_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +typedef enum _VMM_CONF_PARAM_ +{ + VMM_CONF_PRIO_OSID0 = 0, + VMM_CONF_PRIO_OSID1 = 1, + VMM_CONF_PRIO_OSID2 = 2, + VMM_CONF_PRIO_OSID3 = 3, + VMM_CONF_PRIO_OSID4 = 4, + VMM_CONF_PRIO_OSID5 = 5, + VMM_CONF_PRIO_OSID6 = 6, + VMM_CONF_PRIO_OSID7 = 7, + VMM_CONF_ISOL_THRES = 8, + VMM_CONF_HCS_DEADLINE = 9 +} VMM_CONF_PARAM; + +/* + Virtual machine manager (hypervisor) para-virtualization (PVZ) connection: + - Type is implemented by host and guest drivers + - Assumes synchronous function call semantics + - Unidirectional semantics + - For Host (vmm -> host) + - For Guest (guest -> vmm) + - Parameters can be IN/OUT/INOUT + + - Host pvz entries are pre-implemented by IMG + - For host implementation, see vmm_pvz_server.c + - Called by host side hypercall handler or VMM + + - Guest pvz entries are supplied by 3rd-party + - These are specific to hypervisor (VMM) type + - These implement the actual hypercalls mechanism + + Para-virtualization (PVZ) call runtime sequence: + 1 - Guest driver in guest VM calls PVZ function + 1.1 - Guest PVZ connection calls + 1.2 - Guest VM Manager type which + 1.2.1 - Performs any pre-processing like parameter packing, etc. + 1.2.2 - Issues hypercall (blocking synchronous call) + + 2 - VM Manager (hypervisor) receives hypercall + 2.1 - Hypercall handler: + 2.1.1 - Performs any pre-processing + 2.1.2 - If call terminates in VM Manager: perform action and return from hypercall + 2.1.3 - Otherwise forward to host driver (implementation specific call) + + 3 - Host driver receives call from VM Manager + 3.1 - Host VM manager type: + 3.1.1 - Performs any pre-processing like parameter unpacking, etc. + 3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry + 3.2 - Host PVZ connection calls corresponding host system virtualisation layer + 3.3 - Host driver system virtualisation layer: + 3.3.1 - Perform action requested by guest driver + 3.3.2 - Return to host VM Manager type + 3.4 - Host VM Manager type: + 3.4.1 - Prepare to return from hypercall + 3.4.2 - Perform any post-processing like result packing, etc. + 3.4.3 - Issue return from hypercall + + 4 - VM Manager (hypervisor) + 4.1 - Perform any post-processing + 4.2 - Return control to guest driver + + 5 - Guest driver in guest VM + 5.1 - Perform any post-processing like parameter unpacking, etc. + 5.2 - Continue execution in guest VM + */ +typedef struct _VMM_PVZ_CONNECTION_ +{ + struct { + /* + This pair must be implemented if the guest is responsible + for allocating the physical heap that backs its firmware + allocations, this is the default configuration. The physical + heap is allocated within the guest VM IPA space and this + IPA Addr/Size must be re-expressed as PA space Addr/Size + by the VM manager before forwarding request to host. + If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED. + */ + PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr); + + PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID); + } sClientFuncTab; + + struct { + /* + Corresponding server side entries to handle guest PVZ calls + NOTE: + - Additional PVZ function ui32OSID parameter + - OSID determination is responsibility of VM manager + - Actual OSID value must be supplied by VM manager + - This can be done either in client/VMM/host side + - Must be done before host pvz function(s) are called + - Host pvz function validates incoming OSID values + */ + PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr); + + PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID); + } sServerFuncTab; + + struct { + /* + This is used by the VM manager to report pertinent runtime guest VM + information to the host; these events may in turn be forwarded to + the firmware + */ + PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority); + + PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID); + + PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue); + + } sVmmFuncTab; +} VMM_PVZ_CONNECTION; + +/*! +******************************************************************************* + @Function VMMCreatePvzConnection() and VMMDestroyPvzConnection() + @Description Both the guest and VM manager call this in order to obtain a + PVZ connection to the VM and host respectively; that is, guest + calls it to obtain connection to VM, VM calls it to obtain a + connection to the host. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection); +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection); + +#endif /* VMM_IMPL_H */ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_client.h b/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_client.h new file mode 100644 index 000000000000..688e9f36c98c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_client.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@File vmm_pvz_client.h +@Title Guest VM manager client para-virtualization routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header provides guest VMM client para-virtualization APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_PVZ_CLIENT_H +#define VMM_PVZ_CLIENT_H + +#include "pvrsrv.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "vmm_pvz_common.h" +#include "vmm_impl.h" + +/*! +******************************************************************************* + @Function PvzClientMapDevPhysHeap + @Description The guest front-end to initiate a pfnMapDevPhysHeap PVZ call + to the host. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +******************************************************************************* + @Function PvzClientUnmapDevPhysHeap + @Description The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ call + to the host. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); + +#endif /* VMM_PVZ_CLIENT_H */ + +/****************************************************************************** + End of file (vmm_pvz_client.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_common.h b/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_common.h new file mode 100644 index 000000000000..82ab50d6fa30 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_common.h @@ -0,0 +1,65 @@ +/*************************************************************************/ /*! +@File vmm_pvz_common.h +@Title Common VM manager function IDs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header provides VM manager para-virtualization function IDs and + definitions of their payload structures, if appropriate. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_PVZ_COMMON_H +#define VMM_PVZ_COMMON_H + +#include "img_types.h" + +#define PVZ_BRIDGE_DEFAULT 0UL +#define PVZ_BRIDGE_MAPDEVICEPHYSHEAP (PVZ_BRIDGE_DEFAULT + 1) +#define PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP (PVZ_BRIDGE_MAPDEVICEPHYSHEAP + 1) +#define PVZ_BRIDGE_LAST (PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP + 1) + +typedef struct _PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP +{ + IMG_UINT64 ui64MemBase; + IMG_UINT32 ui32OSID; +}PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP; + +#endif /* VMM_PVZ_COMMON_H */ + +/***************************************************************************** + End of file (vmm_pvz_common.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_server.h b/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_server.h new file mode 100644 index 000000000000..947f28944e37 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/vmm_pvz_server.h @@ -0,0 +1,121 @@ +/*************************************************************************/ /*! +@File vmm_pvz_server.h +@Title VM manager para-virtualization interface helper routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header provides API(s) available to VM manager, this must be + called to close the loop during guest para-virtualization calls. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_PVZ_SERVER_H +#define VMM_PVZ_SERVER_H + +#include "vmm_impl.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "vmm_pvz_common.h" + +/*! +******************************************************************************* + @Function PvzServerMapDevPhysHeap + @Description The VM manager calls this in response to guest PVZ interface + call pfnMapDevPhysHeap. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr); + +/*! +******************************************************************************* + @Function PvzServerUnmapDevPhysHeap + @Description The VM manager calls this in response to guest PVZ interface + call pfnUnmapDevPhysHeap. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID); + +/*! +******************************************************************************* + @Function PvzServerOnVmOnline + @Description The VM manager calls this when guest VM machine comes online. + The host driver might initialize the FW if it has not done so + already. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority); + +/*! +******************************************************************************* + @Function PvzServerOnVmOffline + @Description The VM manager calls this when a guest VM machine is about to + go offline. The VM manager might have unmapped the GPU kick + register for such VM but not the GPU memory until the call + returns. Once the function returns, the FW does not hold any + reference for such VM and no workloads from it are running in + the GPU and it is safe to remove the memory for such VM. + @Return PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if for some reason + the FW is taking too long to clean-up the resources of the + OSID. Otherwise, a PVRSRV_ERROR code. +******************************************************************************/ +PVRSRV_ERROR +PvzServerOnVmOffline(IMG_UINT32 ui32OSID); + +/*! +******************************************************************************* + @Function PvzServerVMMConfigure + @Description The VM manager calls this to configure several parameters like + HCS or isolation. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, + IMG_UINT32 ui32ParamValue); + +#endif /* VMM_PVZ_SERVER_H */ + +/****************************************************************************** + End of file (vmm_pvz_server.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/vz_vm.h b/drivers/mcst/gpu-imgtec/services/server/include/vz_vm.h new file mode 100644 index 000000000000..cfcf4cffa5ec --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/vz_vm.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File vz_vm.h +@Title System virtualization VM support APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides VM management support APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _VZ_VM_H_ +#define _VZ_VM_H_ + +#include "vmm_impl.h" + +bool IsVmOnline(IMG_UINT32 ui32OSID); + +PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid); + +PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue); + +#endif /* _VZ_VM_H_ */ + +/***************************************************************************** + End of file (vz_vm.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/server/include/vz_vmm_pvz.h b/drivers/mcst/gpu-imgtec/services/server/include/vz_vmm_pvz.h new file mode 100644 index 000000000000..abc6470ebd9c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/server/include/vz_vmm_pvz.h @@ -0,0 +1,79 @@ +/*************************************************************************/ /*! +@File vz_vmm_pvz.h +@Title System virtualization VM manager management APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides VM manager para-virtz management APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VZ_VMM_PVZ_H +#define VZ_VMM_PVZ_H + +#include "img_types.h" +#include "vmm_impl.h" + +/*! +******************************************************************************* + @Function PvzConnectionInit() and PvzConnectionDeInit() + @Description PvzConnectionInit initializes the VM manager para-virt + which is used subsequently for communication between guest and + host; depending on the underlying VM setup, this could either + be a hyper-call or cross-VM call + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig); +void PvzConnectionDeInit(void); + +/*! +******************************************************************************* + @Function PvzConnectionAcquire() and PvzConnectionRelease() + @Description These are to acquire/release a handle to the VM manager + para-virtz connection to make a pvz call; on the client, use it + it to make the actual pvz call and on the server handler / + VM manager, use it to complete the processing for the pvz call + or make a VM manager to host pvzbridge call +@Return VMM_PVZ_CONNECTION* on success. Otherwise NULL +******************************************************************************/ +VMM_PVZ_CONNECTION* PvzConnectionAcquire(void); +void PvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection); + +#endif /* VZ_VMM_PVZ_H */ + +/****************************************************************************** + End of file (vz_vmm_pvz.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/devicemem.c b/drivers/mcst/gpu-imgtec/services/shared/common/devicemem.c new file mode 100644 index 000000000000..c6389e184bfd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/devicemem.c @@ -0,0 +1,2980 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Front End (nominally Client side part, but now invokable + from server too) of device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "devicemem.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "allocmem.h" +#include "ra.h" +#include "osfunc.h" +#include "osmmap.h" +#include "devicemem_utils.h" +#include "client_mm_bridge.h" +#include "client_cache_bridge.h" +#include "services_km.h" + +#if defined(PDUMP) +#if defined(__KERNEL__) +#include "pdump_km.h" +#else +#include "pdump_um.h" +#endif +#include "devicemem_pdump.h" +#endif +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "client_ri_bridge.h" +#endif +#include "client_devicememhistory_bridge.h" +#include "info_page_client.h" + +#include "rgx_heaps.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#include "rgxdefs_km.h" +#include "rgx_bvnc_defs_km.h" +#include "device.h" +#include "rgxdevice.h" +#include "pvr_ricommon.h" +#if defined(LINUX) +#include "linux/kernel.h" +#endif +#else +#include "rgxdefs.h" +#endif + +#if defined(__KERNEL__) && defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +extern PVRSRV_ERROR RIDumpAllKM(void); +#endif + +#if defined(__KERNEL__) +#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) +#else +#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) +#endif + +#if defined(__KERNEL__) +/* Derive the virtual from the hPMR */ +static +IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; + + IMG_DEV_PHYADDR sDevAddr; + IMG_BOOL bValid; + + PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]; + IMG_DEV_PHYADDR sHeapAddr; + + eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sHeapAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapRegionGetDevPAddr", fail); + +#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) +{ + IMG_DEV_PHYADDR sDevPAddrCorrected; + + PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, &sDevPAddrCorrected, (IMG_CPU_PHYADDR *)&sHeapAddr); + sHeapAddr.uiAddr = sDevPAddrCorrected.uiAddr; +} +#endif + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddr", fail); + + eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid); + if (eError != PVRSRV_OK) + { + PVR_LOG_IF_ERROR(eError, "PMR_DevPhysAddr"); + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); + goto fail; + } + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); + + ui64OptionalMapAddress = RGX_FIRMWARE_RAW_HEAP_BASE | (sDevAddr.uiAddr - sHeapAddr.uiAddr); + + PVR_DPF((PVR_DBG_ALLOC, "%s: sDevAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" sHeapAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" => ui64OptionalMapAddress = 0x%"IMG_UINT64_FMTSPECx, + __func__, sDevAddr.uiAddr, sHeapAddr.uiAddr, ui64OptionalMapAddress)); +fail: + return ui64OptionalMapAddress; +} +#endif + +/***************************************************************************** + * Sub allocation internals * + *****************************************************************************/ +static INLINE DEVMEM_FLAGS_T +DevmemOverrideFlagsOrPassThrough(SHARED_DEV_CONNECTION hDevConnection, DEVMEM_FLAGS_T uiFlags) +{ +#if defined(__KERNEL__) && defined(RGX_FEATURE_GPU_CPU_COHERENCY) + /* + * Override the requested memory flags of FW allocations only, + * non-FW allocations pass-through unmodified. + * + * On fully coherent platforms: + * - We upgrade uncached, CPU-only cached or GPU-only cached to + * full coherency. This gives caching improvements for free. + * + * On ace-lite platforms: + * - If the allocation is not CPU cached, then there is nothing + * for the GPU to snoop regardless of the GPU cache setting. + * - If the allocation is not GPU cached, then the SLC will not + * be used and will not snoop the CPU even if it is CPU cached. + * - Therefore only the GPU setting can be upgraded to coherent + * if it is already GPU cached incoherent and the CPU is cached. + * + * All other platforms: + * - Do not modify the allocation flags. + */ + if (PVRSRV_CHECK_FW_LOCAL(uiFlags)) + { + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevConnection; + + if (PVRSRVSystemSnoopingOfDeviceCache(psDevNode->psDevConfig) && + PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) + { + /* Clear existing flags, mark the allocation as fully coherent. */ + uiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK); + uiFlags |= PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; + } + else if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)) && + (PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)) && + PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && + psDevNode->eDevFabricType == PVRSRV_DEVICE_FABRIC_ACELITE) + { + /* Upgrade the allocation from GPU cached incoherent to GPU cached coherent. */ + uiFlags &= ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK; + uiFlags |= PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT; + } + } +#else + PVR_UNREFERENCED_PARAMETER(hDevConnection); +#endif + + return uiFlags; +} + +static INLINE void +CheckAnnotationLength(const IMG_CHAR *pszAnnotation) +{ + IMG_UINT32 length = OSStringLength(pszAnnotation); + + if (length >= DEVMEM_ANNOTATION_MAX_LEN) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Annotation \"%s\" has been truncated to %d characters from %d characters", + __func__, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1, length)); + } +} + +static PVRSRV_ERROR +AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiLog2Quantum, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + IMG_BOOL bExportable, + const IMG_CHAR *pszAnnotation, + DEVMEM_IMPORT **ppsImport) +{ + DEVMEM_IMPORT *psImport; + DEVMEM_FLAGS_T uiPMRFlags; + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; + + eError = DevmemImportStructAlloc(hDevConnection, + &psImport); + PVR_GOTO_IF_ERROR(eError, failAlloc); + + /* check if shift value is not too big (sizeof(1ULL)) */ + PVR_ASSERT(uiLog2Quantum < sizeof(unsigned long long) * 8); + /* Check the size is a multiple of the quantum */ + PVR_ASSERT((uiSize & ((1ULL<psImport; + SHARED_DEV_CONNECTION hDevConnection; + IMG_HANDLE hPMR; + IMG_HANDLE hSrvDevMemHeap; + POS_LOCK hLock; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_CPU_VIRTADDR pvCpuVAddr; + DEVMEM_PROPERTIES_T uiProperties; + + if (NULL == psImport) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__)); + goto e0; + } + + hDevConnection = psImport->hDevConnection; + hPMR = psImport->hPMR; + hLock = psImport->hLock; + sDevVAddr = psImport->sDeviceImport.sDevVAddr; + pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr; + + if (NULL == hDevConnection) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__)); + goto e0; + } + + if (NULL == hPMR) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__)); + goto e0; + } + + if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__)); + goto e0; + } + + if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (NULL == pvCpuVAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__)); + goto e0; + } + + uiProperties = GetImportProperties(psMemDesc->psImport); + + if (uiProperties & DEVMEM_PROPERTIES_SECURE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Secure buffers currently do not support sparse changes", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This memory descriptor doesn't support sparse changes", + __func__)); + eError = PVRSRV_ERROR_INVALID_REQUEST; + goto e0; + } + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + if (psMemDesc->sCPUMemDesc.ui32RefCount > 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This memory descriptor is mapped more than once (refcnt: %u)into " + "CPU Address space.\nRelease all CPU maps of this object and retry...", + __func__, psMemDesc->sCPUMemDesc.ui32RefCount)); + eError = PVRSRV_ERROR_OBJECT_STILL_REFERENCED; + goto e0; + } +#endif + + hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap; + + OSLockAcquire(hLock); + + eError = BridgeChangeSparseMem(GetBridgeHandle(hDevConnection), + hSrvDevMemHeap, + hPMR, + ui32AllocPageCount, + paui32AllocPageIndices, + ui32FreePageCount, + pauiFreePageIndices, + uiSparseFlags, + psImport->uiFlags, + sDevVAddr, + (IMG_UINT64)((uintptr_t)pvCpuVAddr)); + + OSLockRelease(hLock); + + if (eError != PVRSRV_OK) + { + goto e0; + } + + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistorySparseChange(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap), + ui32AllocPageCount, + paui32AllocPageIndices, + ui32FreePageCount, + pauiFreePageIndices, + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + +e0: + return eError; +} + +static void +FreeDeviceMemory(DEVMEM_IMPORT *psImport) +{ + DevmemImportStructRelease(psImport); +} + +static PVRSRV_ERROR +SubAllocImportAlloc(RA_PERARENA_HANDLE hArena, + RA_LENGTH_T uiSize, + RA_FLAGS_T _flags, + const IMG_CHAR *pszAnnotation, + /* returned data */ + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phImport) +{ + /* When suballocations need a new lump of memory, the RA calls + back here. Later, in the kernel, we must construct a new PMR + and a pairing between the new lump of virtual memory and the + PMR (whether or not such PMR is backed by physical memory) */ + DEVMEM_HEAP *psHeap; + DEVMEM_IMPORT *psImport; + IMG_DEVMEM_ALIGN_T uiAlign; + PVRSRV_ERROR eError; + IMG_UINT32 ui32MappingTable = 0; + DEVMEM_FLAGS_T uiFlags = (DEVMEM_FLAGS_T) _flags; + IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; + + /* Per-arena private handle is, for us, the heap */ + psHeap = hArena; + + /* align to the l.s.b. of the size... e.g. 96kiB aligned to + 32kiB. NB: There is an argument to say that the RA should never + ask us for Non-power-of-2 size anyway, but I don't want to make + that restriction arbitrarily now */ + uiAlign = uiSize & ~(uiSize-1); + + /* Technically this is only required for guest drivers due to + fw heaps being pre-allocated and pre-mapped resulting in + a 1:1 (i.e. virtual : physical) offset correlation but we + force this behaviour for all drivers to maintain consistency + (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */ + if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum)) + { + uiAlign = (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum); + } + + /* The RA should not have invoked us with a size that is not a + multiple of the quantum anyway */ + PVR_ASSERT((uiSize & ((1ULL<uiLog2Quantum)-1)) == 0); + + eError = AllocateDeviceMemory(psHeap->psCtx->hDevConnection, + psHeap->uiLog2Quantum, + uiSize, + uiSize, + 1, + 1, + &ui32MappingTable, + uiAlign, + uiFlags, + IMG_FALSE, + "PMR sub-allocated", + &psImport); + PVR_GOTO_IF_ERROR(eError, failAlloc); + +#if defined(PDUMP) && defined(DEBUG) +#if defined(__KERNEL__) + PDUMPCOMMENTWITHFLAGS(PDUMP_CONT, + "Created PMR for sub-allocations with handle ID: 0x%p Annotation: \"%s\" (PID %u)", + psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); +#else + PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, + "Created PMR for sub-allocations with handle ID: %p Annotation: \"%s\" (PID %u)", + psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); +#endif +#else + PVR_UNREFERENCED_PARAMETER(pszAnnotation); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { +#if defined(__KERNEL__) + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; + + PVR_ASSERT(PVRSRV_CHECK_FW_LOCAL(uiFlags)); + + /* If allocation is made by the Kernel from the firmware heap, account for it + * under the PVR_SYS_ALLOC_PID. + */ + if ((psHeap == psDevInfo->psFirmwareMainHeap) || (psHeap == psDevInfo->psFirmwareConfigHeap)) + { + eError = BridgeRIWritePMREntryWithOwner (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + PVR_SYS_ALLOC_PID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntryWithOwner failed (Error=%d)", __func__, eError)); + } + } + else +#endif + { + eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (Error=%d)", __func__, eError)); + } + } + } +#endif + +#if defined(__KERNEL__) + if (psHeap->bPremapped) + { + ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); + } +#endif + + /* + Suballocations always get mapped into the device was we need to + key the RA off something and as we can't export suballocations + there is no valid reason to request an allocation an not map it + */ + eError = DevmemImportStructDevMap(psHeap, + IMG_TRUE, + psImport, + ui64OptionalMapAddress); + PVR_GOTO_IF_ERROR(eError, failMap); + + OSLockAcquire(psImport->hLock); + /* Mark this import struct as zeroed so we can save some PDump LDBs + * and do not have to CPU map + mem set()*/ + if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + { + psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED; + } + else if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) + { + psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_POISONED; + } + psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN; + OSLockRelease(psImport->hLock); + + *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr; + *puiActualSize = uiSize; + *phImport = psImport; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failMap: + FreeDeviceMemory(psImport); +failAlloc: + + return eError; +} + +static void +SubAllocImportFree(RA_PERARENA_HANDLE hArena, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hImport) +{ + DEVMEM_IMPORT *psImport = hImport; +#if !defined(PVRSRV_NEED_PVR_ASSERT) + PVR_UNREFERENCED_PARAMETER(hArena); + PVR_UNREFERENCED_PARAMETER(uiBase); +#endif + + PVR_ASSERT(psImport != NULL); + PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap); + PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr); + + (void) DevmemImportStructDevUnmap(psImport); + (void) DevmemImportStructRelease(psImport); +} + +/***************************************************************************** + * Devmem context internals * + *****************************************************************************/ + +static PVRSRV_ERROR +PopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx, + DEVMEM_HEAPCFGID uiHeapBlueprintID) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + struct DEVMEM_HEAP_TAG **ppsHeapArray; + IMG_UINT32 uiNumHeaps; + IMG_UINT32 uiHeapsToUnwindOnError; + IMG_UINT32 uiHeapIndex; + IMG_DEV_VIRTADDR sDevVAddrBase; + IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH]; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_DEVMEM_SIZE_T uiReservedRegionLength; + IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize; + IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment; + + eError = DevmemHeapCount(psCtx->hDevConnection, + uiHeapBlueprintID, + &uiNumHeaps); + PVR_GOTO_IF_ERROR(eError, e0); + + if (uiNumHeaps == 0) + { + ppsHeapArray = NULL; + } + else + { + ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps); + PVR_GOTO_IF_NOMEM(ppsHeapArray, eError, e0); + } + + uiHeapsToUnwindOnError = 0; + + for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++) + { + eError = DevmemHeapDetails(psCtx->hDevConnection, + uiHeapBlueprintID, + uiHeapIndex, + &aszHeapName[0], + sizeof(aszHeapName), + &sDevVAddrBase, + &uiHeapLength, + &uiReservedRegionLength, + &uiLog2DataPageSize, + &uiLog2ImportAlignment); + PVR_GOTO_IF_ERROR(eError, e1); + + eError = DevmemCreateHeap(psCtx, + sDevVAddrBase, + uiHeapLength, + uiReservedRegionLength, + uiLog2DataPageSize, + uiLog2ImportAlignment, + aszHeapName, + uiHeapBlueprintID, + &ppsHeapArray[uiHeapIndex]); + PVR_GOTO_IF_ERROR(eError, e1); + + uiHeapsToUnwindOnError = uiHeapIndex + 1; + } + + psCtx->uiAutoHeapCount = uiNumHeaps; + psCtx->ppsAutoHeapArray = ppsHeapArray; + + PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount); + PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps); + + return PVRSRV_OK; + + /* error exit paths */ +e1: + for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++) + { + eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]); + PVR_ASSERT(eError2 == PVRSRV_OK); + } + + if (uiNumHeaps != 0) + { + OSFreeMem(ppsHeapArray); + } + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR +UnpopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx) +{ + PVRSRV_ERROR eReturn = PVRSRV_OK; + PVRSRV_ERROR eError2; + IMG_UINT32 uiHeapIndex; + IMG_BOOL bDoCheck = IMG_TRUE; +#if defined(__KERNEL__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } +#endif + + for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++) + { + if (!psCtx->ppsAutoHeapArray[uiHeapIndex]) + { + continue; + } + + eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]); + if (eError2 != PVRSRV_OK) + { + eReturn = eError2; + } + else + { + psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL; + } + } + + if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray) + { + OSFreeMem(psCtx->ppsAutoHeapArray); + psCtx->ppsAutoHeapArray = NULL; + psCtx->uiAutoHeapCount = 0; + } + + return eReturn; +} + +/***************************************************************************** + * Devmem context functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_CONTEXT **ppsCtxPtr) +{ + PVRSRV_ERROR eError; + DEVMEM_CONTEXT *psCtx; + /* handle to the server-side counterpart of the device memory + context (specifically, for handling mapping to device MMU) */ + IMG_HANDLE hDevMemServerContext; + IMG_HANDLE hPrivData; + IMG_BOOL bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META); + + PVR_GOTO_IF_NOMEM(ppsCtxPtr, eError, e0); + + psCtx = OSAllocMem(sizeof(*psCtx)); + PVR_GOTO_IF_NOMEM(psCtx, eError, e0); + + psCtx->uiNumHeaps = 0; + + psCtx->hDevConnection = hDevConnection; + + /* Create (server-side) Device Memory context */ + eError = BridgeDevmemIntCtxCreate(GetBridgeHandle(psCtx->hDevConnection), + bHeapCfgMetaId, + &hDevMemServerContext, + &hPrivData, + &psCtx->ui32CPUCacheLineSize); + PVR_GOTO_IF_ERROR(eError, e1); + + psCtx->hDevMemServerContext = hDevMemServerContext; + psCtx->hPrivData = hPrivData; + + /* automagic heap creation */ + psCtx->uiAutoHeapCount = 0; + + eError = PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID); + PVR_GOTO_IF_ERROR(eError, e2); + + *ppsCtxPtr = psCtx; + + PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount); + return PVRSRV_OK; + + /* error exit paths follow */ + +e2: + PVR_ASSERT(psCtx->uiAutoHeapCount == 0); + PVR_ASSERT(psCtx->uiNumHeaps == 0); + BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), hDevMemServerContext); + +e1: + OSFreeMem(psCtx); + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, + IMG_HANDLE *hPrivData) +{ + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); + PVR_GOTO_IF_INVALID_PARAM(hPrivData, eError, e0); + + *hPrivData = psCtx->hPrivData; + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx) +{ + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemFindHeapByName(const struct DEVMEM_CONTEXT_TAG *psCtx, + const IMG_CHAR *pszHeapName, + struct DEVMEM_HEAP_TAG **ppsHeapRet) +{ + IMG_UINT32 uiHeapIndex; + + /* N.B. This func is only useful for finding "automagic" heaps by name */ + for (uiHeapIndex = 0; + uiHeapIndex < psCtx->uiAutoHeapCount; + uiHeapIndex++) + { + if (!OSStringNCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName, OSStringLength(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName) + 1)) + { + *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex]; + return PVRSRV_OK; + } + } + + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemDestroyContext(DEVMEM_CONTEXT *psCtx) +{ + PVRSRV_ERROR eError; + IMG_BOOL bDoCheck = IMG_TRUE; + +#if defined(__KERNEL__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } +#endif + + PVR_RETURN_IF_INVALID_PARAM(psCtx); + + eError = UnpopulateContextFromBlueprint(psCtx); + if (bDoCheck && eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: UnpopulateContextFromBlueprint failed (%d) leaving %d heaps", + __func__, eError, psCtx->uiNumHeaps)); + goto e1; + } + + eError = BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), + psCtx->hDevMemServerContext); + if (bDoCheck && eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: BridgeDevmemIntCtxDestroy failed (%d)", + __func__, eError)); + goto e1; + } + + /* should be no more heaps left */ + if (bDoCheck && psCtx->uiNumHeaps) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Additional heaps remain in DEVMEM_CONTEXT", + __func__)); + eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT; + goto e1; + } + + OSCachedMemSet(psCtx, 0, sizeof(*psCtx)); + OSFreeMem(psCtx); + +e1: + return eError; +} + +/***************************************************************************** + * Devmem heap query functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 *puiNumHeapConfigsOut) +{ + PVRSRV_ERROR eError; + eError = BridgeHeapCfgHeapConfigCount(GetBridgeHandle(hDevConnection), + puiNumHeapConfigsOut); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut) +{ + PVRSRV_ERROR eError; + eError = BridgeHeapCfgHeapCount(GetBridgeHandle(hDevConnection), + uiHeapConfigIndex, + puiNumHeapsOut); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_CHAR *pszConfigNameOut, + IMG_UINT32 uiConfigNameBufSz) +{ + PVRSRV_ERROR eError; + eError = BridgeHeapCfgHeapConfigName(GetBridgeHandle(hDevConnection), + uiHeapConfigIndex, + uiConfigNameBufSz, + pszConfigNameOut); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_CHAR *pszHeapNameOut, + IMG_UINT32 uiHeapNameBufSz, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSizeOut, + IMG_UINT32 *puiLog2ImportAlignmentOut) +{ + PVRSRV_ERROR eError; + + eError = BridgeHeapCfgHeapDetails(GetBridgeHandle(hDevConnection), + uiHeapConfigIndex, + uiHeapIndex, + uiHeapNameBufSz, + pszHeapNameOut, + psDevVAddrBaseOut, + puiHeapLengthOut, + puiReservedRegionLengthOut, + puiLog2DataPageSizeOut, + puiLog2ImportAlignmentOut); + + VG_MARK_INITIALIZED(pszHeapNameOut, uiHeapNameBufSz); + + return eError; +} + +/***************************************************************************** + * Devmem heap functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetHeapInt(DEVMEM_HEAP *psHeap, + IMG_HANDLE *phDevmemHeap) +{ + PVR_RETURN_IF_INVALID_PARAM(psHeap); + *phDevmemHeap = psHeap->hDevMemServerHeap; + return PVRSRV_OK; +} + +/* See devicemem.h for important notes regarding the arguments + to this function */ +IMG_INTERNAL PVRSRV_ERROR +DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, + IMG_DEV_VIRTADDR sBaseAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_DEVMEM_SIZE_T uiReservedRegionLength, + IMG_UINT32 ui32Log2Quantum, + IMG_UINT32 ui32Log2ImportAlignment, + const IMG_CHAR *pszName, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_HEAP **ppsHeapPtr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2; + DEVMEM_HEAP *psHeap; + /* handle to the server-side counterpart of the device memory heap + (specifically, for handling mapping to device MMU) */ + IMG_HANDLE hDevMemServerHeap; + IMG_BOOL bRANoSplit = IMG_FALSE; + + IMG_CHAR aszBuf[100]; + IMG_CHAR *pszStr; + IMG_UINT32 ui32pszStrSize; + + if (ppsHeapPtr == NULL || + uiReservedRegionLength % RGX_HEAP_RESERVED_SIZE_GRANULARITY) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + PVR_ASSERT(uiReservedRegionLength <= uiLength); + + psHeap = OSAllocMem(sizeof(*psHeap)); + PVR_GOTO_IF_NOMEM(psHeap, eError, e0); + + /* Need to keep local copy of heap name, so caller may free theirs */ + ui32pszStrSize = OSStringLength(pszName) + 1; + pszStr = OSAllocMem(ui32pszStrSize); + PVR_GOTO_IF_NOMEM(pszStr, eError, e1); + OSStringLCopy(pszStr, pszName, ui32pszStrSize); + psHeap->pszName = pszStr; + + psHeap->uiSize = uiLength; + psHeap->uiReservedRegionSize = uiReservedRegionLength; + psHeap->sBaseAddress = sBaseAddress; + psHeap->bPremapped = false; + OSAtomicWrite(&psHeap->hImportCount, 0); + + OSSNPrintf(aszBuf, sizeof(aszBuf), + "NDM heap '%s' (suballocs) ctx:%p", + pszName, psCtx); + ui32pszStrSize = OSStringLength(aszBuf) + 1; + pszStr = OSAllocMem(ui32pszStrSize); + PVR_GOTO_IF_NOMEM(pszStr, eError, e2); + OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); + psHeap->pszSubAllocRAName = pszStr; + +#if defined(PDUMP) + /* The META heap is shared globally so a single physical memory import + * may be used to satisfy allocations of different processes. + * This is problematic when PDumping because the physical memory + * import used to satisfy a new allocation may actually have been + * imported (and thus the PDump MALLOC generated) before the PDump + * client was started, leading to the MALLOC being missing. + * + * This is solved by disabling splitting of imports for the META physmem + * RA, meaning that every firmware allocation gets its own import, thus + * ensuring the MALLOC is present for every allocation made within the + * pdump capture range + */ + if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META) + { + bRANoSplit = IMG_TRUE; + } +#else + PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID); +#endif + + psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName, + /* Subsequent imports: */ + ui32Log2Quantum, + RA_LOCKCLASS_2, + SubAllocImportAlloc, + SubAllocImportFree, + (RA_PERARENA_HANDLE) psHeap, + bRANoSplit); + if (psHeap->psSubAllocRA == NULL) + { + eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA; + goto e3; + } + + psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment; + psHeap->uiLog2Quantum = ui32Log2Quantum; + + if (!OSStringNCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT, sizeof(RGX_GENERAL_SVM_HEAP_IDENT))) + { + /* The SVM heap normally starts out as this type though + it may transition to DEVMEM_HEAP_MANAGER_USER + on platforms with more processor virtual address + bits than device virtual address bits */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; + } + else if (uiReservedRegionLength != 0) + { + /* Heaps which specify reserved VA space range are dual managed: + * - sBaseAddress to (sBaseAddress+uiReservedRegionLength-1): User managed + * - (sBaseAddress+uiReservedRegionLength) to uiLength: RA managed + */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_DUAL_USER_RA; + } + else + { + /* Otherwise, heap manager is decided (USER or RA) at first map */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_UNKNOWN; + } + + /* Mark the heap to be managed by RA */ + if (!OSStringNCompare(pszName, RGX_VK_CAPT_REPLAY_BUF_HEAP_IDENT, + sizeof(RGX_VK_CAPT_REPLAY_BUF_HEAP_IDENT))) + { + psHeap->ui32HeapManagerFlags |= DEVMEM_HEAP_MANAGER_RA; + } + + OSSNPrintf(aszBuf, sizeof(aszBuf), + "NDM heap '%s' (QVM) ctx:%p", + pszName, psCtx); + ui32pszStrSize = OSStringLength(aszBuf) + 1; + pszStr = OSAllocMem(ui32pszStrSize); + if (pszStr == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e4; + } + OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); + psHeap->pszQuantizedVMRAName = pszStr; + + psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName, + /* Subsequent import: */ + 0, RA_LOCKCLASS_1, NULL, NULL, + (RA_PERARENA_HANDLE) psHeap, + IMG_FALSE); + if (psHeap->psQuantizedVMRA == NULL) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); + } + + if (!RA_Add(psHeap->psQuantizedVMRA, + /* Make sure the VMRA doesn't allocate from reserved VAs */ + (RA_BASE_T)sBaseAddress.uiAddr + uiReservedRegionLength, + (RA_LENGTH_T)uiLength, + (RA_FLAGS_T)0, /* This RA doesn't use or need flags */ + NULL /* per ispan handle */)) + { + RA_Delete(psHeap->psQuantizedVMRA); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); + } + + psHeap->psCtx = psCtx; + + + /* Create server-side counterpart of Device Memory heap */ + eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection), + psCtx->hDevMemServerContext, + sBaseAddress, + uiLength, + ui32Log2Quantum, + &hDevMemServerHeap); + PVR_GOTO_IF_ERROR(eError, e6); + + psHeap->hDevMemServerHeap = hDevMemServerHeap; + + eError = OSLockCreate(&psHeap->hLock); + PVR_GOTO_IF_ERROR(eError, e7); + + psHeap->psCtx->uiNumHeaps++; + *ppsHeapPtr = psHeap; + +#if defined(PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING) + psHeap->psMemDescList = NULL; +#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */ + + return PVRSRV_OK; + + /* error exit paths */ +e7: + eError2 = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psCtx->hDevConnection), + psHeap->hDevMemServerHeap); + PVR_ASSERT (eError2 == PVRSRV_OK); +e6: + if (psHeap->psQuantizedVMRA) + RA_Delete(psHeap->psQuantizedVMRA); +e5: + if (psHeap->pszQuantizedVMRAName) + OSFreeMem(psHeap->pszQuantizedVMRAName); +e4: + RA_Delete(psHeap->psSubAllocRA); +e3: + OSFreeMem(psHeap->pszSubAllocRAName); +e2: + OSFreeMem(psHeap->pszName); +e1: + OSFreeMem(psHeap); +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetHeapBaseDevVAddr(struct DEVMEM_HEAP_TAG *psHeap, + IMG_DEV_VIRTADDR *pDevVAddr) +{ + PVR_RETURN_IF_INVALID_PARAM(psHeap); + + *pDevVAddr = psHeap->sBaseAddress; + + return PVRSRV_OK; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + IMG_DEVMEM_SIZE_T uiSize = *puiSize; + IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign; + + /* Just in case someone changes definition of IMG_DEVMEM_ALIGN_T. */ + static_assert(sizeof(unsigned long long) == sizeof(uiAlign), + "invalid uiAlign size"); + /* This value is used for shifting so it cannot be greater than number + * of bits in unsigned long long (sizeof(1ULL)). Using greater value is + * undefined behaviour. */ + if (uiLog2Quantum >= sizeof(unsigned long long) * 8) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if ((1ULL << uiLog2Quantum) > uiAlign) + { + uiAlign = 1ULL << uiLog2Quantum; + } + uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1); + + *puiSize = uiSize; + *puiAlign = uiAlign; + + return PVRSRV_OK; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemDestroyHeap(DEVMEM_HEAP *psHeap) +{ + PVRSRV_ERROR eError; + IMG_INT uiImportCount; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + IMG_BOOL bDoCheck = IMG_TRUE; +#if defined(__KERNEL__) + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } +#endif +#endif + + PVR_RETURN_IF_INVALID_PARAM(psHeap); + + uiImportCount = OSAtomicRead(&psHeap->hImportCount); + if (uiImportCount > 0) + { + PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName)); +#if defined(__KERNEL__) +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):")); + RIDumpAllKM(); +#else + PVR_DPF((PVR_DBG_ERROR, "Compile with PVRSRV_ENABLE_GPU_MEMORY_INFO=1 to get a full " + "list of all driver allocations.")); +#endif +#endif +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (bDoCheck) +#endif + { + return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; + } + } + + eError = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap); +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (bDoCheck) +#endif + { + PVR_LOG_RETURN_IF_ERROR(eError, "BridgeDevmemIntHeapDestroy"); + } + + PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0); + psHeap->psCtx->uiNumHeaps--; + + OSLockDestroy(psHeap->hLock); + + if (psHeap->psQuantizedVMRA) + { + RA_Delete(psHeap->psQuantizedVMRA); + } + if (psHeap->pszQuantizedVMRAName) + { + OSFreeMem(psHeap->pszQuantizedVMRAName); + } + + RA_Delete(psHeap->psSubAllocRA); + OSFreeMem(psHeap->pszSubAllocRAName); + + OSFreeMem(psHeap->pszName); + + OSCachedMemSet(psHeap, 0, sizeof(*psHeap)); + OSFreeMem(psHeap); + + return PVRSRV_OK; +} + +/***************************************************************************** + * Devmem allocation/free functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEV_VIRTADDR *psDevVirtAddr) +{ + PVRSRV_ERROR eError; + eError = DevmemSubAllocate(uiPreAllocMultiplier, + psHeap, + uiSize, + uiAlign, + uiFlags, + pszText, + ppsMemDescPtr); + PVR_GOTO_IF_ERROR(eError, fail_alloc); + + eError = DevmemMapToDevice(*ppsMemDescPtr, + psHeap, + psDevVirtAddr); + PVR_GOTO_IF_ERROR(eError, fail_map); + + return PVRSRV_OK; + +fail_map: + DevmemFree(*ppsMemDescPtr); +fail_alloc: + ppsMemDescPtr = NULL; + return eError; + +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + RA_BASE_T uiAllocatedAddr = 0; + RA_LENGTH_T uiAllocatedSize; + RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC *psMemDesc = NULL; + IMG_DEVMEM_OFFSET_T uiOffset = 0; + DEVMEM_IMPORT *psImport; + IMG_UINT32 ui32CPUCacheLineSize; + void *pvAddr = NULL; + + IMG_BOOL bImportClean; + IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags); + IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); + IMG_BOOL bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); + IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || + PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)); + IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) || + PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)); + IMG_BOOL bAlign = ! (PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags)); + PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE; + IMG_UINT32 ui32CacheLineSize = 0; + DEVMEM_PROPERTIES_T uiProperties; + + if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + { + /* Deferred Allocation not supported on SubAllocs*/ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failParams); + } + + PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); + PVR_GOTO_IF_INVALID_PARAM(psHeap->psCtx, eError, failParams); + PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); + + uiFlags = DevmemOverrideFlagsOrPassThrough(psHeap->psCtx->hDevConnection, uiFlags); + +#if defined(__KERNEL__) + { + /* The hDevConnection holds two different types of pointers depending on the + * address space in which it is used. + * In this instance the variable points to the device node in server */ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; + ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS)); + } +#else + ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE; +#endif + + /* The following logic makes sure that any cached memory is aligned to both the CPU and GPU. + * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each. + * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM. + * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments. + */ + ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize; + /* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple + * Also checking if the allocation is going to be cached on the CPU + * Currently there is no check for the validity of the cache coherent option. + * In this case, the alignment could be applied but the mode could still fall back to uncached. + */ + if (bAlign && ui32CPUCacheLineSize > uiAlign && bCPUCached) + { + uiAlign = ui32CPUCacheLineSize; + } + + /* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple + * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options. + * Currently there is no check for the validity of the cache coherent option. + * In this case, the alignment could be applied but the mode could still fall back to uncached. + */ + if (bAlign && ui32CacheLineSize > uiAlign && bGPUCached) + { + uiAlign = ui32CacheLineSize; + } + + eError = DevmemValidateParams(uiSize, + uiAlign, + &uiFlags); + PVR_GOTO_IF_ERROR(eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + /* No request for exportable memory so use the RA */ + eError = RA_Alloc(psHeap->psSubAllocRA, + uiSize, + uiPreAllocMultiplier, + uiFlags, + uiAlign, + pszText, + &uiAllocatedAddr, + &uiAllocatedSize, + &hImport); + PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); + + psImport = hImport; + + /* This assignment is assuming the RA returns an hImport where suballocations + * can be made from if uiSize is NOT a page multiple of the passed heap. + * + * So we check if uiSize is a page multiple and mark it as exportable + * if it is not. + * */ + OSLockAcquire(psImport->hLock); + if (!(uiSize & ((1ULL << psHeap->uiLog2Quantum) - 1)) && + (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER)) + { + psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE; + } + psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE; + uiProperties = psImport->uiProperties; + OSLockRelease(psImport->hLock); + + uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr; + +#if defined(PDUMP) && defined(DEBUG) +#if defined(__KERNEL__) + PDUMPCOMMENTWITHFLAGS(PDUMP_CONT, + "Suballocated %u Byte for \"%s\" from PMR with handle ID: 0x%p (PID %u)", + (IMG_UINT32) uiSize, pszText, psImport->hPMR, OSGetCurrentProcessID()); +#else + PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, + "Suballocated %u Byte for \"%s\" from PMR with handle ID: %p (PID %u)", + (IMG_UINT32) uiSize, + pszText, + psImport->hPMR, + OSGetCurrentProcessID()); +#endif +#endif + + DevmemMemDescInit(psMemDesc, + uiOffset, + psImport, + uiSize); + + bImportClean = ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0); + + /* Zero the memory */ + if (bZero) + { + /* Has the import been zeroed on allocation and were no suballocations returned to it so far? */ + bImportClean = bImportClean && ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0); + + if (!bImportClean) + { + eOp = PVRSRV_CACHE_OP_FLUSH; + + eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); + PVR_GOTO_IF_ERROR(eError, failMaintenance); + + /* uiSize is a 64-bit quantity whereas the 3rd argument + * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems + * hence a compiler warning of implicit cast and loss of data. + * Added explicit cast and assert to remove warning. + */ + PVR_ASSERT(uiSize < IMG_UINT32_MAX); + + OSDeviceMemSet(pvAddr, 0x0, (size_t) uiSize); +#if defined(PDUMP) + DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS); +#endif + } + } + else if (bPoisonOnAlloc) + { + /* Has the import been poisoned on allocation and were no suballocations returned to it so far? */ + bPoisonOnAlloc = (uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_POISONED) != 0; + + if (!bPoisonOnAlloc) + { + eOp = PVRSRV_CACHE_OP_FLUSH; + + eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); + PVR_GOTO_IF_ERROR(eError, failMaintenance); + + if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || + PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)) + { + OSDeviceMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, + uiSize); + } + else + { + OSCachedMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, + uiSize); + } + + bPoisonOnAlloc = IMG_TRUE; + } + } + + /* Flush or invalidate */ + if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag || bPoisonOnAlloc)) + { + /* BridgeCacheOpQueue _may_ be deferred so use BridgeCacheOpExec + to ensure this cache maintenance is actioned immediately */ + eError = BridgeCacheOpExec (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + (IMG_UINT64)(uintptr_t) + pvAddr - psMemDesc->uiOffset, + psMemDesc->uiOffset, + psMemDesc->uiAllocSize, + eOp); + PVR_GOTO_IF_ERROR(eError, failMaintenance); + } + + if (pvAddr) + { + DevmemReleaseCpuVirtAddr(psMemDesc); + pvAddr = NULL; + } + + /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when + * the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszText); + OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + OSStringNLength(psMemDesc->szText, DEVMEM_ANNOTATION_MAX_LEN), + psMemDesc->szText, + psMemDesc->uiOffset, + uiAllocatedSize, + IMG_FALSE, + IMG_TRUE, + &(psMemDesc->hRIHandle)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError)); + } + } +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + PVR_UNREFERENCED_PARAMETER (pszText); +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + *ppsMemDescPtr = psMemDesc; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failMaintenance: + if (pvAddr) + { + DevmemReleaseCpuVirtAddr(psMemDesc); + pvAddr = NULL; + } + DevmemMemDescRelease(psMemDesc); + psMemDesc = NULL; /* Make sure we don't do a discard after the release */ +failDeviceMemAlloc: + if (psMemDesc) + { + DevmemMemDescDiscard(psMemDesc); + } +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + PVRSRVGETERRORSTRING(eError), + uiSize)); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + IMG_UINT32 ui32MappingTable = 0; + + eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, + &uiSize, + &uiAlign); + PVR_GOTO_IF_ERROR(eError, failParams); + + uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); + + eError = DevmemValidateParams(uiSize, + uiAlign, + &uiFlags); + PVR_GOTO_IF_ERROR(eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = AllocateDeviceMemory(hDevConnection, + uiLog2HeapPageSize, + uiSize, + uiSize, + 1, + 1, + &ui32MappingTable, + uiAlign, + uiFlags, + IMG_TRUE, + pszText, + &psImport); + PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + uiSize); + + *ppsMemDescPtr = psMemDesc; + + /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when + * the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszText); + OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError)); + } + + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + uiSize, + IMG_FALSE, + IMG_FALSE, + &psMemDesc->hRIHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError)); + } + } +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + PVR_UNREFERENCED_PARAMETER (pszText); +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + return PVRSRV_OK; + + /* error exit paths follow */ + +failDeviceMemAlloc: + DevmemMemDescDiscard(psMemDesc); + +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + PVRSRVGETERRORSTRING(eError), + uiSize)); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + + eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, + &uiSize, + &uiAlign); + PVR_GOTO_IF_ERROR(eError, failParams); + + uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); + + eError = DevmemValidateParams(uiSize, + uiAlign, + &uiFlags); + PVR_GOTO_IF_ERROR(eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = AllocateDeviceMemory(hDevConnection, + uiLog2HeapPageSize, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiAlign, + uiFlags, + IMG_TRUE, + pszText, + &psImport); + PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + uiSize); + + /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when + * the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszText); + OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError)); + } + + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + uiSize, + IMG_FALSE, + IMG_FALSE, + &psMemDesc->hRIHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError)); + } + } +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + PVR_UNREFERENCED_PARAMETER (pszText); +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + *ppsMemDescPtr = psMemDesc; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failDeviceMemAlloc: + DevmemMemDescDiscard(psMemDesc); + +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + PVRSRVGETERRORSTRING(eError), + uiSize)); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hServerHandle, + IMG_HANDLE *hLocalImportHandle) +{ + return BridgePMRMakeLocalImportHandle(GetBridgeHandle(hDevConnection), + hServerHandle, + hLocalImportHandle); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hLocalImportHandle) +{ + return BridgePMRUnmakeLocalImportHandle(GetBridgeHandle(hDevConnection), hLocalImportHandle); +} + +/***************************************************************************** + * Devmem unsecure export functions * + *****************************************************************************/ + +#if defined(SUPPORT_INSECURE_EXPORT) + +static PVRSRV_ERROR +_Mapping_Export(DEVMEM_IMPORT *psImport, + DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr, + DEVMEM_EXPORTKEY *puiExportKeyPtr, + DEVMEM_SIZE_T *puiSize, + DEVMEM_LOG2ALIGN_T *puiLog2Contig) +{ + /* Gets an export handle and key for the PMR used for this mapping */ + /* Can only be done if there are no suballocations for this mapping */ + + PVRSRV_ERROR eError; + DEVMEM_EXPORTHANDLE hPMRExportHandle; + DEVMEM_EXPORTKEY uiExportKey; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig; + + PVR_GOTO_IF_INVALID_PARAM(psImport, eError, failParams); + + if ((GetImportProperties(psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, failParams); + } + + eError = BridgePMRExportPMR(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + &hPMRExportHandle, + &uiSize, + &uiLog2Contig, + &uiExportKey); + PVR_GOTO_IF_ERROR(eError, failExport); + + PVR_ASSERT(uiSize == psImport->uiSize); + + *phPMRExportHandlePtr = hPMRExportHandle; + *puiExportKeyPtr = uiExportKey; + *puiSize = uiSize; + *puiLog2Contig = uiLog2Contig; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failExport: +failParams: + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; + +} + +static void +_Mapping_Unexport(DEVMEM_IMPORT *psImport, + DEVMEM_EXPORTHANDLE hPMRExportHandle) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT (psImport != NULL); + + eError = BridgePMRUnexportPMR(GetBridgeHandle(psImport->hDevConnection), + hPMRExportHandle); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemExport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie) +{ + /* Caller to provide storage for export cookie struct */ + PVRSRV_ERROR eError; + IMG_HANDLE hPMRExportHandle = 0; + IMG_UINT64 uiPMRExportPassword = 0; + IMG_DEVMEM_SIZE_T uiSize = 0; + IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0; + + PVR_GOTO_IF_INVALID_PARAM(psMemDesc, eError, e0); + PVR_GOTO_IF_INVALID_PARAM(psExportCookie, eError, e0); + + if (DEVMEM_PROPERTIES_EXPORTABLE != + (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This Memory (0x%p) cannot be exported!...", + __func__, psMemDesc)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e0); + } + + eError = _Mapping_Export(psMemDesc->psImport, + &hPMRExportHandle, + &uiPMRExportPassword, + &uiSize, + &uiLog2Contig); + if (eError != PVRSRV_OK) + { + psExportCookie->uiSize = 0; + goto e0; + } + + psExportCookie->hPMRExportHandle = hPMRExportHandle; + psExportCookie->uiPMRExportPassword = uiPMRExportPassword; + psExportCookie->uiSize = uiSize; + psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig; + + return PVRSRV_OK; + + /* error exit paths follow */ + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL void +DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie) +{ + _Mapping_Unexport(psMemDesc->psImport, + psExportCookie->hPMRExportHandle); + + psExportCookie->uiSize = 0; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemImport(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_EXPORTCOOKIE *psCookie, + DEVMEM_FLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = DevmemImportStructAlloc(hDevConnection, + &psImport); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); + } + + /* Get a handle to the PMR (inc refcount) */ + eError = BridgePMRImportPMR(GetBridgeHandle(hDevConnection), + psCookie->hPMRExportHandle, + psCookie->uiPMRExportPassword, + psCookie->uiSize, /* not trusted - just for sanity checks */ + psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for sanity checks */ + &hPMR); + PVR_GOTO_IF_ERROR(eError, failImport); + + DevmemImportStructInit(psImport, + psCookie->uiSize, + 1ULL << psCookie->uiLog2ContiguityGuarantee, + uiFlags, + hPMR, + DEVMEM_PROPERTIES_IMPORTED | + DEVMEM_PROPERTIES_EXPORTABLE); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + psImport->uiSize); + + *ppsMemDescPtr = psMemDesc; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + psMemDesc->psImport->uiSize, + IMG_TRUE, + IMG_TRUE, + &psMemDesc->hRIHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError)); + } + } +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + return PVRSRV_OK; + + /* error exit paths follow */ + +failImport: + DevmemImportDiscard(psImport); +failImportAlloc: + DevmemMemDescDiscard(psMemDesc); +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#endif /* SUPPORT_INSECURE_EXPORT */ + +/***************************************************************************** + * Common MemDesc functions * + *****************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +DevmemUnpin(DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport); + + if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) + { + eError = PVRSRV_ERROR_INVALID_REQUEST; + PVR_DPF((PVR_DBG_ERROR, + "%s: The passed allocation is not valid to unpin", + __func__)); + + goto e_exit; + } + + /* Stop if the allocation might have suballocations. */ + if (!(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: The passed allocation is not valid to unpin because " + "there might be suballocations on it. Make sure you allocate a page multiple " + "of the heap when using PVRSRVAllocDeviceMem()", + __func__)); + + goto e_exit; + } + + /* Stop if the Import is still mapped to CPU */ + if (psImport->sCPUImport.ui32RefCount) + { + eError = PVRSRV_ERROR_STILL_MAPPED; + PVR_DPF((PVR_DBG_ERROR, + "%s: There are still %u references on the CPU mapping. " + "Please remove all CPU mappings before unpinning.", + __func__, + psImport->sCPUImport.ui32RefCount)); + + goto e_exit; + } + + /* Only unpin if it is not already unpinned + * Return PVRSRV_OK */ + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) + { + goto e_exit; + } + + /* Unpin it and invalidate mapping */ + if (psImport->sDeviceImport.bMapped) + { + eError = BridgeDevmemIntUnpinInvalidate(GetBridgeHandle(psImport->hDevConnection), + psImport->sDeviceImport.hMapping, + psImport->hPMR); + } + else + { + /* Or just unpin it */ + eError = BridgeDevmemIntUnpin(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + } + + /* Update flags and RI when call was successful */ + if (eError == PVRSRV_OK) + { + OSLockAcquire(psImport->hLock); + psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED; + OSLockRelease(psImport->hLock); + } + else + { + /* Or just show what went wrong */ + PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d", + __func__, + eError)); + } + +e_exit: + return eError; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemPin(DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport); + + if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e_exit); + } + + /* Only pin if it is unpinned */ + if ((uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0) + { + goto e_exit; + } + + /* Pin it and make mapping valid */ + if (psImport->sDeviceImport.bMapped) + { + eError = BridgeDevmemIntPinValidate(GetBridgeHandle(psImport->hDevConnection), + psImport->sDeviceImport.hMapping, + psImport->hPMR); + } + else + { + /* Or just pin it */ + eError = BridgeDevmemIntPin(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + } + + if ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)) + { + OSLockAcquire(psImport->hLock); + psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED; + OSLockRelease(psImport->hLock); + } + else + { + /* Or just show what went wrong */ + PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d", + __func__, + eError)); + } + +e_exit: + return eError; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + *puiSize = psMemDesc->uiAllocSize; + + return eError; +} + +IMG_INTERNAL void +DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, IMG_CHAR **pszAnnotation) +{ + /* + * It is expected that psMemDesc->szText is a valid NUL-terminated string, + * since DevmemMemDescAlloc uses OSAllocZMem to create the memdesc. + */ + *pszAnnotation = psMemDesc->szText; +} + +/* + This function is called for freeing any class of memory + */ +IMG_INTERNAL IMG_BOOL +DevmemFree(DEVMEM_MEMDESC *psMemDesc) +{ + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_SECURE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Please use methods dedicated to secure buffers.", + __func__)); + return IMG_FALSE; + } + + return DevmemMemDescRelease(psMemDesc); +} + +IMG_INTERNAL IMG_BOOL +DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc) +{ + DevmemReleaseDevVirtAddr(psMemDesc); + return DevmemFree(psMemDesc); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR *psDevVirtAddr) +{ + DEVMEM_IMPORT *psImport; + IMG_DEV_VIRTADDR sDevVAddr; + PVRSRV_ERROR eError; + IMG_BOOL bMap = IMG_TRUE; + IMG_BOOL bDestroyed = IMG_FALSE; + IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); + + /* Do not try to map unpinned memory */ + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags); + } + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); + + if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); + } + + /* Don't map memory for deferred allocations */ + if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + { + PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); + bMap = IMG_FALSE; + } + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount+1); + + psImport = psMemDesc->psImport; + DevmemMemDescAcquire(psMemDesc); + +#if defined(__KERNEL__) + if (psHeap->bPremapped) + { + ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); + } +#endif + + eError = DevmemImportStructDevMap(psHeap, + bMap, + psImport, + ui64OptionalMapAddress); + PVR_GOTO_IF_ERROR(eError, failMap); + + sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; + sDevVAddr.uiAddr += psMemDesc->uiOffset; + psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; + psMemDesc->sDeviceMemDesc.ui32RefCount++; + + *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; + + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psHeap), + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + if (psMemDesc->hRIHandle) + { + eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), + psMemDesc->hRIHandle, + psImport->sDeviceImport.sDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError)); + } + } + } +#endif + + return PVRSRV_OK; + +failMap: + bDestroyed = DevmemMemDescRelease(psMemDesc); +failCheck: +failParams: + if (!bDestroyed) + { + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); +failFlags: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR sDevVirtAddr) +{ + DEVMEM_IMPORT *psImport; + IMG_DEV_VIRTADDR sDevVAddr; + PVRSRV_ERROR eError; + IMG_BOOL bMap = IMG_TRUE; + IMG_BOOL bDestroyed = IMG_FALSE; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); + + /* Do not try to map unpinned memory */ + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags); + } + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); + + if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); + } + + /* Don't map memory for deferred allocations */ + if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + { + PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); + bMap = IMG_FALSE; + } + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount+1); + + psImport = psMemDesc->psImport; + DevmemMemDescAcquire(psMemDesc); + + eError = DevmemImportStructDevMap(psHeap, + bMap, + psImport, + sDevVirtAddr.uiAddr); + PVR_GOTO_IF_ERROR(eError, failMap); + + sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; + sDevVAddr.uiAddr += psMemDesc->uiOffset; + psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; + psMemDesc->sDeviceMemDesc.ui32RefCount++; + + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psHeap), + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + if (psMemDesc->hRIHandle) + { + eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), + psMemDesc->hRIHandle, + psImport->sDeviceImport.sDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError)); + } + } + } +#endif + + return PVRSRV_OK; + +failMap: + bDestroyed = DevmemMemDescRelease(psMemDesc); +failCheck: +failParams: + if (!bDestroyed) + { + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); +failFlags: + return eError; +} + +IMG_INTERNAL IMG_DEV_VIRTADDR +DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) +{ + if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) + { + PVR_LOG_ERROR(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, "DevmemGetDevVirtAddr"); + } + + PVR_ASSERT(psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr !=0 ); + + return psMemDesc->sDeviceMemDesc.sDevVAddr; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR *psDevVirtAddr) +{ + PVRSRV_ERROR eError; + + /* Do not try to map unpinned memory */ + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_UNPINNED) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failCheck); + } + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount+1); + + if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, failRelease); + } + psMemDesc->sDeviceMemDesc.ui32RefCount++; + + *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + return PVRSRV_OK; + +failRelease: + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + PVR_ASSERT(eError != PVRSRV_OK); +failCheck: + return eError; +} + +IMG_INTERNAL void +DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) +{ + PVR_ASSERT(psMemDesc != NULL); + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount-1); + + PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0); + + if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0) + { + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistoryUnmap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap), + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + + /* When device mapping destroyed, zero Dev VA so DevmemGetDevVirtAddr() + * returns 0 */ + if (DevmemImportStructDevUnmap(psMemDesc->psImport) == IMG_TRUE) + { + psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr = 0; + } + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + DevmemMemDescRelease(psMemDesc); + } + else + { + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + } +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr) +{ + PVRSRV_ERROR eError; + DEVMEM_PROPERTIES_T uiProperties; + + PVR_ASSERT(psMemDesc != NULL); + PVR_ASSERT(ppvCpuVirtAddr != NULL); + + uiProperties = GetImportProperties(psMemDesc->psImport); + + if (uiProperties & + (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE)) + { +#if defined(SUPPORT_SECURITY_VALIDATION) + if (uiProperties & DEVMEM_PROPERTIES_SECURE) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Allocation is a secure buffer. " + "It should not be possible to map to CPU, but for security " + "validation this will be allowed for testing purposes, " + "as long as the buffer is pinned.", + __func__)); + } + + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) +#endif + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Allocation is currently unpinned or a secure buffer. " + "Not possible to map to CPU!", + __func__)); + return PVRSRV_ERROR_INVALID_MAP_REQUEST; + } + } + + if (uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CPU Mapping is not possible on this allocation!", + __func__)); + return PVRSRV_ERROR_INVALID_MAP_REQUEST; + } + + OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sCPUMemDesc.ui32RefCount, + psMemDesc->sCPUMemDesc.ui32RefCount+1); + + if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0) + { + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + IMG_UINT8 *pui8CPUVAddr; + + DevmemMemDescAcquire(psMemDesc); + eError = DevmemImportStructCPUMap(psImport); + PVR_GOTO_IF_ERROR(eError, failMap); + + pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr; + pui8CPUVAddr += psMemDesc->uiOffset; + psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr; + } + *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; + + VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); + + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + + return PVRSRV_OK; + +failMap: + PVR_ASSERT(eError != PVRSRV_OK); + psMemDesc->sCPUMemDesc.ui32RefCount--; + + if (!DevmemMemDescRelease(psMemDesc)) + { + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + } + return eError; +} + +IMG_INTERNAL void +DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr) +{ + PVR_ASSERT(psMemDesc != NULL); + PVR_ASSERT(ppvCpuVirtAddr != NULL); + + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CPU UnMapping is not possible on this allocation!", + __func__)); + return; + } + + OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sCPUMemDesc.ui32RefCount, + psMemDesc->sCPUMemDesc.ui32RefCount+1); + + *ppvCpuVirtAddr = NULL; + if (psMemDesc->sCPUMemDesc.ui32RefCount) + { + *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; + psMemDesc->sCPUMemDesc.ui32RefCount += 1; + } + + VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); +} + +IMG_INTERNAL void +DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc) +{ + PVR_ASSERT(psMemDesc != NULL); + + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CPU UnMapping is not possible on this allocation!", + __func__)); + return; + } + + OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sCPUMemDesc.ui32RefCount, + psMemDesc->sCPUMemDesc.ui32RefCount-1); + + PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0); + + if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0) + { + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + DevmemImportStructCPUUnmap(psMemDesc->psImport); + DevmemMemDescRelease(psMemDesc); + } + else + { + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + } +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport) +{ + if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) + { + return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION; + } + + *phImport = psMemDesc->psImport->hPMR; + + return PVRSRV_OK; +} + +#if !defined(__KERNEL__) +IMG_INTERNAL PVRSRV_ERROR +DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, + IMG_UINT64 *pui64UID) +{ + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + PVRSRV_ERROR eError; + + if (!(GetImportProperties(psImport) & (DEVMEM_PROPERTIES_IMPORTED | + DEVMEM_PROPERTIES_EXPORTABLE))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This Memory (0x%p) doesn't support the functionality requested...", + __func__, psMemDesc)); + return PVRSRV_ERROR_INVALID_REQUEST; + } + + eError = BridgePMRGetUID(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + pui64UID); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *hReservation) +{ + DEVMEM_IMPORT *psImport; + + PVR_ASSERT(psMemDesc); + psImport = psMemDesc->psImport; + + PVR_ASSERT(psImport); + *hReservation = psImport->sDeviceImport.hReservation; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phPMR, + IMG_DEVMEM_OFFSET_T *puiPMROffset) +{ + DEVMEM_IMPORT *psImport; + + PVR_ASSERT(psMemDesc); + *puiPMROffset = psMemDesc->uiOffset; + psImport = psMemDesc->psImport; + + PVR_ASSERT(psImport); + *phPMR = psImport->hPMR; + + return PVRSRV_OK; +} +#endif /* !__KERNEL__ */ + +#if defined(__KERNEL__) +IMG_INTERNAL void +DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_FLAGS_T *puiFlags) +{ + DEVMEM_IMPORT *psImport; + + PVR_ASSERT(psMemDesc); + psImport = psMemDesc->psImport; + + PVR_ASSERT(psImport); + *puiFlags = psImport->uiFlags; +} + +IMG_INTERNAL SHARED_DEV_CONNECTION +DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc) +{ + return psMemDesc->psImport->hDevConnection; +} +#endif /* __KERNEL__ */ + +IMG_INTERNAL PVRSRV_ERROR +DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hExtHandle, + DEVMEM_FLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEVMEM_SIZE_T *puiSizePtr, + const IMG_CHAR *pszAnnotation) +{ + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T uiAlign; + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = DevmemImportStructAlloc(hDevConnection, + &psImport); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); + } + + /* Get the PMR handle and its size from the server */ + eError = BridgePMRLocalImportPMR(GetBridgeHandle(hDevConnection), + hExtHandle, + &hPMR, + &uiSize, + &uiAlign); + PVR_GOTO_IF_ERROR(eError, failImport); + + DevmemImportStructInit(psImport, + uiSize, + uiAlign, + uiFlags, + hPMR, + DEVMEM_PROPERTIES_IMPORTED | + DEVMEM_PROPERTIES_EXPORTABLE); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + uiSize); + + *ppsMemDescPtr = psMemDesc; + if (puiSizePtr) + *puiSizePtr = uiSize; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + /* Attach RI information. + * Set backed size to 0 since this allocation has been allocated + * by the same process and has been accounted for. */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + psMemDesc->psImport->uiSize, + IMG_TRUE, + IMG_FALSE, + &(psMemDesc->hRIHandle)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError)); + } + } +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + + /* Copy the allocation descriptive name and size so it can be passed + * to DevicememHistory when the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszAnnotation); + OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); + + return PVRSRV_OK; + +failImport: + DevmemImportDiscard(psImport); +failImportAlloc: + DevmemMemDescDiscard(psMemDesc); +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#if !defined(__KERNEL__) +IMG_INTERNAL PVRSRV_ERROR +DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR sDevVAddr) +{ + return BridgeDevmemIsVDevAddrValid(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + sDevVAddr); +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR *psFaultAddress) +{ + return BridgeDevmemGetFaultAddress(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + psFaultAddress); +} +IMG_INTERNAL PVRSRV_ERROR +DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate) +{ + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + return BridgeDevmemFlushDevSLCRange(GetBridgeHandle(psImport->hDevConnection), + psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, + sDevVAddr, + uiSize, + bInvalidate); +} + +#if defined(RGX_FEATURE_FBCDC) +IMG_INTERNAL PVRSRV_ERROR +DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, + IMG_UINT64 ui64FBSCEntries) +{ + return BridgeDevmemInvalidateFBSCTable(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + ui64FBSCEntries); +} +#endif + +#endif /* !__KERNEL__ */ + +IMG_INTERNAL IMG_UINT32 +DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap) +{ + return psHeap->uiLog2Quantum; +} + +#if !defined(__KERNEL__) +/**************************************************************************/ /*! +@Function RegisterDevMemPFNotify +@Description Registers that the application wants to be signaled when a page + fault occurs. + +@Input psContext Memory context the process that would like to + be notified about. +@Input ui32PID The PID of the calling process. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + */ /***************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, + IMG_UINT32 ui32PID, + IMG_BOOL bRegister) +{ + PVRSRV_ERROR eError; + + eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + ui32PID, + bRegister); + if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Bridge Call Failed: This could suggest a UM/KM mismatch (%d)", + __func__, + (IMG_INT)(eError))); + } + + return eError; +} +#endif /* !__KERNEL__ */ + +IMG_INTERNAL void +DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped) +{ + psHeap->bPremapped = IsPremapped; +} diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/devicemem_pdump.c b/drivers/mcst/gpu-imgtec/services/shared/common/devicemem_pdump.c new file mode 100644 index 000000000000..c848fb65503c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/devicemem_pdump.c @@ -0,0 +1,403 @@ +/*************************************************************************/ /*! +@File +@Title Shared device memory management PDump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements common (client & server) PDump functions for the + memory management code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#if defined(PDUMP) + +#include "allocmem.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pdump.h" +#include "devicemem.h" +#include "devicemem_utils.h" +#include "devicemem_pdump.h" +#include "client_pdump_bridge.h" +#include "client_pdumpmm_bridge.h" +#if defined(LINUX) && !defined(__KERNEL__) +#include +#if defined(SUPPORT_ANDROID_PLATFORM) +#include "android_utils.h" +#endif +#endif + +IMG_INTERNAL void +DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize); + + eError = BridgePMRPDumpLoadMem(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiOffset, + uiSize, + uiPDumpFlags, + IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void +DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize); + + eError = BridgePMRPDumpLoadMem(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiOffset, + uiSize, + uiPDumpFlags, + IMG_TRUE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void +DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = BridgePMRPDumpLoadMemValue32(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiOffset, + ui32Value, + uiPDumpFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void +DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = BridgePMRPDumpLoadMemValue64(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiOffset, + ui64Value, + uiPDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T *puiMemOffset, + IMG_CHAR *pszName, + IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[100]; + IMG_CHAR aszSymbolicName[100]; + IMG_DEVMEM_OFFSET_T uiNextSymName; + + *puiMemOffset += psMemDesc->uiOffset; + + eError = BridgePMRPDumpSymbolicAddr(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + *puiMemOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + puiMemOffset, + &uiNextSymName); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); + + OSSNPrintf(pszName, ui32Size, "%s:%s", &aszMemspaceName[0], &aszSymbolicName[0]); + return eError; +} + +IMG_INTERNAL void +DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset) +{ + PVRSRV_ERROR eError; + + eError = BridgePMRPDumpSaveToFile(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiOffset, + uiSize, + OSStringLength(pszFilename) + 1, + pszFilename, + uiFileOffset); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void +DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PdumpFlags) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevAddrStart; + + sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr; + sDevAddrStart.uiAddr += psMemDesc->uiOffset; + sDevAddrStart.uiAddr += uiOffset; + + eError = BridgeDevmemIntPDumpSaveToFileVirtual(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, + sDevAddrStart, + uiSize, + OSStringLength(pszFilename) + 1, + pszFilename, + ui32FileOffset, + ui32PdumpFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void +DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PdumpFlags) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevAddrStart; + + sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr; + sDevAddrStart.uiAddr += psMemDesc->uiOffset; + sDevAddrStart.uiAddr += uiOffset; + + eError = BridgePDumpDataDescriptor(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, + OSStringLength(pszFilename) + 1, + pszFilename, + sDevAddrStart, + uiSize, + ui32HeaderType, + ui32ElementType, + ui32ElementCount, + ui32PdumpFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_DEVMEM_SIZE_T uiNumBytes; + + uiNumBytes = 4; + + if (psMemDesc->uiOffset + uiOffset + uiNumBytes > psMemDesc->psImport->uiSize) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + eError = BridgePMRPDumpPol32(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiOffset, + ui32Value, + ui32Mask, + eOperator, + ui32PDumpFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + + /* + error exit paths follow + */ + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +#if defined(__KERNEL__) +IMG_INTERNAL PVRSRV_ERROR +DevmemPDumpDevmemCheck32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_DEVMEM_SIZE_T uiNumBytes; + + uiNumBytes = 4; + + if (psMemDesc->uiOffset + uiOffset + uiNumBytes >= psMemDesc->psImport->uiSize) + { + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + goto e0; + } + + eError = BridgePMRPDumpCheck32(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiOffset, + ui32Value, + ui32Mask, + eOperator, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto e0; + } + + return PVRSRV_OK; + + /* + error exit paths follow + */ + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} +#endif /* defined(__KERNEL__) */ + +IMG_INTERNAL PVRSRV_ERROR +DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVRSRV_ERROR eError; + + if ((psMemDesc->uiOffset + uiReadOffset) > psMemDesc->psImport->uiSize) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + eError = BridgePMRPDumpCBP(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset + uiReadOffset, + uiWriteOffset, + uiPacketSize, + uiBufferSize); + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +#endif /* PDUMP */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/devicemem_utils.c b/drivers/mcst/gpu-imgtec/services/shared/common/devicemem_utils.c new file mode 100644 index 000000000000..29262196baa0 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/devicemem_utils.c @@ -0,0 +1,1181 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management internal utility functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Utility functions used internally by device memory management + code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "allocmem.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "ra.h" +#include "devicemem_utils.h" +#include "client_mm_bridge.h" +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "client_ri_bridge.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#else +#include "pvr_bridge_client.h" +#endif +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "proc_stats.h" +#endif + +/* + SVM heap management support functions for CPU (un)mapping + */ +#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY 2 + +static inline PVRSRV_ERROR +DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap, + DEVMEM_IMPORT *psImport, + IMG_UINT64 *ui64MapAddress) +{ + PVRSRV_ERROR eError; + IMG_UINT64 ui64SvmMapAddr; + IMG_UINT64 ui64SvmMapAddrEnd; + IMG_UINT64 ui64SvmHeapAddrEnd; + + /* SVM heap management always has XXX_MANAGER_KERNEL unless we + have triggered the fall back code-path in which case we + should not be calling into this code-path */ + PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL); + + /* By acquiring the CPU virtual address here, it essentially + means we lock-down the virtual address for the duration + of the life-cycle of the allocation until a de-allocation + request comes in. Thus the allocation is guaranteed not to + change its virtual address on the CPU during its life-time. + NOTE: Import might have already been CPU Mapped before now, + normally this is not a problem, see fall back */ + eError = DevmemImportStructCPUMap(psImport); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to CPU map (lock-down) device memory for SVM use", + __func__)); + eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED; + goto failSVM; + } + + /* Supplied kernel mmap virtual address is also device virtual address; + calculate the heap & kernel supplied mmap virtual address limits */ + ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; + ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; + ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize; + PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0); + + /* SVM limit test may fail if processor has more virtual address bits than device */ + if ((ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd) || + (ui64SvmMapAddr & ~(ui64SvmHeapAddrEnd - 1))) + { + /* Unmap incompatible SVM virtual address, this + may not release address if it was elsewhere + CPU Mapped before call into this function */ + DevmemImportStructCPUUnmap(psImport); + + /* Flag incompatible SVM mapping */ + eError = PVRSRV_ERROR_BAD_MAPPING; + goto failSVM; + } + + *ui64MapAddress = ui64SvmMapAddr; +failSVM: + /* either OK, MAP_FAILED or BAD_MAPPING */ + return eError; +} + +static inline void +DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) +{ + PVR_UNREFERENCED_PARAMETER(psHeap); + DevmemImportStructCPUUnmap(psImport); +} + +static inline PVRSRV_ERROR +DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap, + DEVMEM_IMPORT *psImport, + IMG_UINT uiAlign, + IMG_UINT64 *ui64MapAddress) +{ + RA_LENGTH_T uiAllocatedSize; + RA_BASE_T uiAllocatedAddr; + IMG_UINT64 ui64SvmMapAddr; + IMG_UINT uiRetry = 0; + PVRSRV_ERROR eError; + + /* If SVM heap management has transitioned to XXX_MANAGER_USER, + this is essentially a fall back approach that ensures we + continue to satisfy SVM alloc. This approach is not without + hazards in that we may specify a virtual address that is + already in use by the user process */ + PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER); + + /* Normally, for SVM heap allocations, CPUMap _must_ be done + before DevMap; ideally the initial CPUMap should be done by + SVM functions though this is not a hard requirement as long + as the prior elsewhere obtained CPUMap virtual address meets + SVM address requirements. This is a fall-back code-pathway + so we have to test that this assumption holds before we + progress any further */ + OSLockAcquire(psImport->sCPUImport.hLock); + + if (psImport->sCPUImport.ui32RefCount) + { + /* Already CPU Mapped SVM heap allocation, this prior elsewhere + obtained virtual address is responsible for the above + XXX_MANAGER_KERNEL failure. As we are not responsible for + this, we cannot progress any further so need to fail */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Previously obtained CPU map address not SVM compatible" + , __func__)); + + /* Revert SVM heap to DEVMEM_HEAP_MANAGER_KERNEL */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Reverting SVM heap back to kernel managed", + __func__)); + + OSLockRelease(psImport->sCPUImport.hLock); + + /* Do we need a more specific error code here */ + eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED; + goto failSVM; + } + + OSLockRelease(psImport->sCPUImport.hLock); + + do + { + /* Next we proceed to instruct the kernel to use the RA_Alloc supplied + virtual address to map-in this SVM import suballocation; there is no + guarantee that this RA_Alloc virtual address may not collide with an + already in-use VMA range in the process */ + eError = RA_Alloc(psHeap->psQuantizedVMRA, + psImport->uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* flags: this RA doesn't use flags*/ + uiAlign, + "SVM_Virtual_Alloc", + &uiAllocatedAddr, + &uiAllocatedSize, + NULL /* don't care about per-import priv data */); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot RA allocate SVM compatible address", + __func__)); +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) + { + PVRSRV_ERROR eErr; + eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, + OSGetCurrentProcessID()); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + } +#endif + goto failSVM; + } + + /* No reason for allocated virtual size to be different from + the PMR's size */ + psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr; + PVR_ASSERT(uiAllocatedSize == psImport->uiSize); + + /* Map the import or allocation using the RA_Alloc virtual address; + the kernel may fail the request if the supplied virtual address + is already in-use in which case we re-try using another virtual + address obtained from the RA_Alloc */ + eError = DevmemImportStructCPUMap(psImport); + if (eError != PVRSRV_OK) + { + /* For now we simply discard failed RA_Alloc() obtained virtual + address (i.e. plenty of virtual space), this prevents us from + re-using these and furthermore essentially blacklists these + addresses from future SVM consideration; We exit fall-back + attempt if retry exceeds the fall-back retry limit */ + if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot find SVM compatible address, bad mapping", + __func__)); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto failSVM; + } + } + else + { + /* Found compatible SVM virtual address, set as device virtual address */ + ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; + } + } while (eError != PVRSRV_OK); + + *ui64MapAddress = ui64SvmMapAddr; +failSVM: + return eError; +} + +static inline void +DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) +{ + RA_BASE_T uiAllocatedAddr; + + /* We only free SVM compatible addresses, all addresses in + the blacklist are essentially excluded from future RA_Alloc */ + uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; + RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr); + + DevmemImportStructCPUUnmap(psImport); +} + +static inline PVRSRV_ERROR +DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap, + DEVMEM_IMPORT *psImport, + IMG_UINT uiAlign, + IMG_UINT64 *ui64MapAddress) +{ + PVRSRV_ERROR eError; + + switch (psHeap->ui32HeapManagerFlags) + { + case DEVMEM_HEAP_MANAGER_KERNEL: + eError = DevmemCPUMapSVMKernelManaged(psHeap, + psImport, + ui64MapAddress); + if (eError == PVRSRV_ERROR_BAD_MAPPING) + { + /* If the SVM map address is outside of SVM heap limits, + change heap type to DEVMEM_HEAP_MANAGER_USER */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; + + PVR_DPF((PVR_DBG_WARNING, + "%s: Kernel managed SVM heap is now user managed", + __func__)); + + /* Retry using user managed fall-back approach */ + eError = DevmemCPUMapSVMUserManaged(psHeap, + psImport, + uiAlign, + ui64MapAddress); + } + break; + + case DEVMEM_HEAP_MANAGER_USER: + eError = DevmemCPUMapSVMUserManaged(psHeap, + psImport, + uiAlign, + ui64MapAddress); + break; + + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + return eError; +} + +static inline void +DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) +{ + switch (psHeap->ui32HeapManagerFlags) + { + case DEVMEM_HEAP_MANAGER_KERNEL: + DevmemCPUUnmapSVMKernelManaged(psHeap, psImport); + break; + + case DEVMEM_HEAP_MANAGER_USER: + DevmemCPUUnmapSVMUserManaged(psHeap, psImport); + break; + + default: + break; + } +} + +/* + The Devmem import structure is the structure we use + to manage memory that is "imported" (which is page + granular) from the server into our process, this + includes allocations. + + This allows memory to be imported without requiring + any CPU or device mapping. Memory can then be mapped + into the device or CPU on demand, but neither is + required. + */ + +IMG_INTERNAL +void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport) +{ + IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount); + PVR_UNREFERENCED_PARAMETER(iRefCount); + PVR_ASSERT(iRefCount != 1); + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + iRefCount-1, + iRefCount); +} + +IMG_INTERNAL +IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport) +{ + IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount); + PVR_ASSERT(iRefCount >= 0); + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + iRefCount+1, + iRefCount); + + if (iRefCount == 0) + { + BridgePMRUnrefPMR(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + OSLockDestroy(psImport->sCPUImport.hLock); + OSLockDestroy(psImport->sDeviceImport.hLock); + OSLockDestroy(psImport->hLock); + OSFreeMem(psImport); + + return IMG_TRUE; + } + + return IMG_FALSE; +} + +IMG_INTERNAL +void DevmemImportDiscard(DEVMEM_IMPORT *psImport) +{ + PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0); + OSLockDestroy(psImport->sCPUImport.hLock); + OSLockDestroy(psImport->sDeviceImport.hLock); + OSLockDestroy(psImport->hLock); + OSFreeMem(psImport); +} + +IMG_INTERNAL +PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc) +{ + DEVMEM_MEMDESC *psMemDesc; + PVRSRV_ERROR eError; + + /* Must be zeroed in case it needs to be freed before it is initialised */ + psMemDesc = OSAllocZMem(sizeof(DEVMEM_MEMDESC)); + PVR_GOTO_IF_NOMEM(psMemDesc, eError, failAlloc); + + eError = OSLockCreate(&psMemDesc->hLock); + PVR_GOTO_IF_ERROR(eError, failMDLock); + + eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock); + PVR_GOTO_IF_ERROR(eError, failDMDLock); + + eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock); + PVR_GOTO_IF_ERROR(eError, failCMDLock); + + OSAtomicWrite(&psMemDesc->hRefCount, 0); + + *ppsMemDesc = psMemDesc; + + return PVRSRV_OK; + +failCMDLock: + OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); +failDMDLock: + OSLockDestroy(psMemDesc->hLock); +failMDLock: + OSFreeMem(psMemDesc); +failAlloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + Init the MemDesc structure + */ +IMG_INTERNAL +void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiSize) +{ + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + 0, + 1); + + psMemDesc->psImport = psImport; + psMemDesc->uiOffset = uiOffset; + + psMemDesc->sDeviceMemDesc.ui32RefCount = 0; + psMemDesc->sCPUMemDesc.ui32RefCount = 0; + psMemDesc->uiAllocSize = uiSize; + psMemDesc->hPrivData = NULL; + psMemDesc->ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE; + + OSAtomicWrite(&psMemDesc->hRefCount, 1); +} + +IMG_INTERNAL +void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc) +{ + IMG_INT iRefCount = 0; + + iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + iRefCount-1, + iRefCount); + + PVR_UNREFERENCED_PARAMETER(iRefCount); +} + +IMG_INTERNAL +IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc) +{ + IMG_INT iRefCount; + PVR_ASSERT(psMemDesc != NULL); + + iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount); + PVR_ASSERT(iRefCount >= 0); + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + iRefCount+1, + iRefCount); + + if (iRefCount == 0) + { +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI) && + (psMemDesc->hRIHandle)) + { + PVRSRV_ERROR eError; + + eError = BridgeRIDeleteMEMDESCEntry(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->hRIHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIDeleteMEMDESCEntry failed (Error=%d)", __func__, eError)); + } + } +#endif + + OSLockAcquire(psMemDesc->psImport->hLock); + if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE) + { + /* As soon as the first sub-allocation on the psImport is freed + * we might get dirty memory when reusing it. + * We have to delete the ZEROED, CLEAN & POISONED flag */ + + psMemDesc->psImport->uiProperties &= + ~(DEVMEM_PROPERTIES_IMPORT_IS_ZEROED | + DEVMEM_PROPERTIES_IMPORT_IS_CLEAN | + DEVMEM_PROPERTIES_IMPORT_IS_POISONED); + + OSLockRelease(psMemDesc->psImport->hLock); + + RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA, + psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr + + psMemDesc->uiOffset); + } + else + { + OSLockRelease(psMemDesc->psImport->hLock); + DevmemImportStructRelease(psMemDesc->psImport); + } + + OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); + OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); + OSLockDestroy(psMemDesc->hLock); + OSFreeMem(psMemDesc); + + return IMG_TRUE; + } + + return IMG_FALSE; +} + +IMG_INTERNAL +void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc) +{ + PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0); + + OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); + OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); + OSLockDestroy(psMemDesc->hLock); + OSFreeMem(psMemDesc); +} + + +IMG_INTERNAL +PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T *puiFlags) +{ + if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) && + (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (uiAlign & (uiAlign-1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The requested alignment is not a power of two.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (uiSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Please request a non-zero size value.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* If zero flag is set we have to have write access to the page. */ + if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags)) + { + (*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE; + } + + return PVRSRV_OK; +} + +/* + Allocate and init an import structure + */ +IMG_INTERNAL +PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_IMPORT **ppsImport) +{ + DEVMEM_IMPORT *psImport; + PVRSRV_ERROR eError; + + psImport = OSAllocMem(sizeof(*psImport)); + PVR_RETURN_IF_FALSE(psImport != NULL, PVRSRV_ERROR_OUT_OF_MEMORY); + + /* Setup some known bad values for things we don't have yet */ + psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON; + psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON; + psImport->sDeviceImport.psHeap = NULL; + psImport->sDeviceImport.bMapped = IMG_FALSE; + + eError = OSLockCreate(&psImport->sDeviceImport.hLock); + PVR_GOTO_IF_ERROR(eError, failDIOSLockCreate); + + psImport->sCPUImport.hOSMMapData = NULL; + psImport->sCPUImport.pvCPUVAddr = NULL; + + eError = OSLockCreate(&psImport->sCPUImport.hLock); + PVR_GOTO_IF_ERROR(eError, failCIOSLockCreate); + + /* Set up common elements */ + psImport->hDevConnection = hDevConnection; + + /* Setup properties */ + psImport->uiProperties = 0; + + /* Setup refcounts */ + psImport->sDeviceImport.ui32RefCount = 0; + psImport->sCPUImport.ui32RefCount = 0; + OSAtomicWrite(&psImport->hRefCount, 0); + + /* Create the lock */ + eError = OSLockCreate(&psImport->hLock); + PVR_GOTO_IF_ERROR(eError, failILockAlloc); + + *ppsImport = psImport; + + return PVRSRV_OK; + +failILockAlloc: + OSLockDestroy(psImport->sCPUImport.hLock); +failCIOSLockCreate: + OSLockDestroy(psImport->sDeviceImport.hLock); +failDIOSLockCreate: + OSFreeMem(psImport); + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + Initialise the import structure + */ +IMG_INTERNAL +void DevmemImportStructInit(DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + IMG_HANDLE hPMR, + DEVMEM_PROPERTIES_T uiProperties) +{ + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + 0, + 1); + + psImport->uiSize = uiSize; + psImport->uiAlign = uiAlign; + psImport->uiFlags = uiFlags; + psImport->hPMR = hPMR; + psImport->uiProperties = uiProperties; + OSAtomicWrite(&psImport->hRefCount, 1); +} + +/* Allocate the requested device virtual address region + * from the heap */ +static PVRSRV_ERROR DevmemReserveVARange(DEVMEM_HEAP *psHeap, + DEVMEM_SIZE_T uiSize, + IMG_UINT uiAlign, + RA_LENGTH_T *puiAllocatedSize, + IMG_UINT64 ui64OptionalMapAddress) +{ + PVRSRV_ERROR eError; + + /* Allocate space in the VM */ + eError = RA_Alloc_Range(psHeap->psQuantizedVMRA, + uiSize, + 0, + uiAlign, + ui64OptionalMapAddress, + puiAllocatedSize); + + if (PVRSRV_OK != eError) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if ((eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) || + (eError == PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL)) + { + PVRSRV_ERROR eErr; + eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, + OSGetCurrentProcessID()); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + } +#endif + return eError; + } + + /* No reason for the allocated virtual size to be different from + the PMR's size */ + PVR_ASSERT(*puiAllocatedSize == uiSize); + + return PVRSRV_OK; +} + +/* + Map an import to the device + */ +IMG_INTERNAL +PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, + IMG_BOOL bMap, + DEVMEM_IMPORT *psImport, + IMG_UINT64 ui64OptionalMapAddress) +{ + DEVMEM_DEVICE_IMPORT *psDeviceImport; + RA_BASE_T uiAllocatedAddr; + RA_LENGTH_T uiAllocatedSize; + IMG_DEV_VIRTADDR sBase; + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; + IMG_UINT uiAlign; + IMG_BOOL bDestroyed = IMG_FALSE; + + /* Round the provided import alignment to the configured heap alignment */ + uiAlign = 1ULL << psHeap->uiLog2ImportAlignment; + uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1); + + psDeviceImport = &psImport->sDeviceImport; + + OSLockAcquire(psDeviceImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psDeviceImport->ui32RefCount, + psDeviceImport->ui32RefCount+1); + + if (psDeviceImport->ui32RefCount++ == 0) + { + DevmemImportStructAcquire(psImport); + + OSAtomicIncrement(&psHeap->hImportCount); + + if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) + { + /* SVM (shared virtual memory) imports or allocations always + need to acquire CPU virtual address first as address is + used to map the allocation into the device virtual address + space; i.e. the virtual address of the allocation for both + the CPU/GPU must be identical. */ + eError = DevmemImportStructDevMapSVM(psHeap, + psImport, + uiAlign, + &ui64OptionalMapAddress); + PVR_GOTO_IF_ERROR(eError, failVMRAAlloc); + } + + if (ui64OptionalMapAddress == 0) + { + /* If heap is _completely_ managed by USER or KERNEL, we shouldn't + * be here, as this is RA manager code-path */ + if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER || + psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL) + { + PVR_DPF((PVR_DBG_ERROR, + psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? + "%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().": + "%s: Heap is kernel managed, use right allocation flags (e.g. SVM).", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_UNKNOWN) + { + /* Only set the heap manager (to RA) at first map when heap manager + * is unknown. It might be a dual heap (both, user and RA managed), + * in which case heap manager is set at creation time */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_RA; + } + + /* Allocate space in the VM */ + eError = RA_Alloc(psHeap->psQuantizedVMRA, + psImport->uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* flags: this RA doesn't use flags*/ + uiAlign, + "Virtual_Alloc", + &uiAllocatedAddr, + &uiAllocatedSize, + NULL /* don't care about per-import priv data */ + ); + if (PVRSRV_OK != eError) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) + { + PVRSRV_ERROR eErr; + eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, + OSGetCurrentProcessID()); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + } +#endif + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM, failVMRAAlloc); + } + + /* No reason for the allocated virtual size to be different from + the PMR's size */ + PVR_ASSERT(uiAllocatedSize == psImport->uiSize); + + sBase.uiAddr = uiAllocatedAddr; + + } + else + { + IMG_UINT64 ui64ValidEndAddr; + + /* Ensure supplied ui64OptionalMapAddress is within heap range */ + ui64ValidEndAddr = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; + if ((ui64OptionalMapAddress + psImport->uiSize > ui64ValidEndAddr) || + (ui64OptionalMapAddress < psHeap->sBaseAddress.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>." + , __func__ + , (void*)(uintptr_t)ui64OptionalMapAddress + , (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr + , (void*)(uintptr_t)ui64ValidEndAddr)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + switch (psHeap->ui32HeapManagerFlags) + { + case DEVMEM_HEAP_MANAGER_UNKNOWN: + /* DEVMEM_HEAP_MANAGER_USER can apply to _any_ heap and can only + * be determined here. This heap type transitions from + * DEVMEM_HEAP_MANAGER_UNKNOWN to DEVMEM_HEAP_MANAGER_USER on + * 1st alloc. */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; + break; + + case DEVMEM_HEAP_MANAGER_USER: + case DEVMEM_HEAP_MANAGER_KERNEL: + if (! psHeap->uiSize) + { + PVR_DPF((PVR_DBG_ERROR, + psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? + "%s: Heap DEVMEM_HEAP_MANAGER_USER is disabled.": + "%s: Heap DEVMEM_HEAP_MANAGER_KERNEL is disabled." + , __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failVMRAAlloc); + } + break; + + case DEVMEM_HEAP_MANAGER_DUAL_USER_RA: + /* When the heap is dual managed, ensure supplied ui64OptionalMapAddress + * and import size are within heap address space range */ + if (ui64OptionalMapAddress + psImport->uiSize <= + psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) + { + break; + } + else + { + /* Allocate requested VM range */ + eError = DevmemReserveVARange(psHeap, + psImport->uiSize, + uiAlign, + &uiAllocatedSize, + ui64OptionalMapAddress); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); + } + + } + break; + case DEVMEM_HEAP_MANAGER_RA: + /* Allocate requested VM range */ + eError = DevmemReserveVARange(psHeap, + psImport->uiSize, + uiAlign, + &uiAllocatedSize, + ui64OptionalMapAddress); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); + } + break; + + default: + break; + } + + if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid address to map to. Please provide an " + "address aligned to a page multiple of the heap." + , __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid heap to map to. " + "Please choose a heap that can handle smaller page sizes." + , __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + uiAllocatedAddr = ui64OptionalMapAddress; + uiAllocatedSize = psImport->uiSize; + sBase.uiAddr = uiAllocatedAddr; + } + + if (psHeap->bPremapped) + { + /* no virtual address reservation and mapping are required for memory that's already mapped */ + psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; + psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; + } + else + { + /* Setup page tables for the allocated VM space */ + eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap, + sBase, + uiAllocatedSize, + &hReservation); + PVR_GOTO_IF_ERROR(eError, failReserve); + + if (bMap) + { + DEVMEM_FLAGS_T uiMapFlags; + + uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; + + /* Actually map the PMR to allocated VM space */ + eError = BridgeDevmemIntMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap, + hReservation, + psImport->hPMR, + uiMapFlags, + &psDeviceImport->hMapping); + PVR_GOTO_IF_ERROR(eError, failMap); + + psDeviceImport->bMapped = IMG_TRUE; + } + + psDeviceImport->hReservation = hReservation; + } + + /* Setup device mapping specific parts of the mapping info */ + psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr; + psDeviceImport->psHeap = psHeap; + } + else + { + /* + Check that we've been asked to map it into the + same heap 2nd time around + */ + if (psHeap != psDeviceImport->psHeap) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failParams); + } + } + OSLockRelease(psDeviceImport->hLock); + + return PVRSRV_OK; + +failMap: + if (!psHeap->bPremapped) + { + BridgeDevmemIntUnreserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), + hReservation); + } +failReserve: + if (ui64OptionalMapAddress == 0) + { + RA_Free(psHeap->psQuantizedVMRA, + uiAllocatedAddr); + } +failVMRAAlloc: + if ((ui64OptionalMapAddress) && PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) + { + DevmemImportStructDevUnmapSVM(psHeap, psImport); + } + bDestroyed = DevmemImportStructRelease(psImport); + OSAtomicDecrement(&psHeap->hImportCount); +failParams: + if (!bDestroyed) + { + psDeviceImport->ui32RefCount--; + OSLockRelease(psDeviceImport->hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* + Unmap an import from the Device + */ +IMG_INTERNAL +IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport) +{ + PVRSRV_ERROR eError; + DEVMEM_DEVICE_IMPORT *psDeviceImport; + + psDeviceImport = &psImport->sDeviceImport; + + OSLockAcquire(psDeviceImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psDeviceImport->ui32RefCount, + psDeviceImport->ui32RefCount-1); + + if (--psDeviceImport->ui32RefCount == 0) + { + DEVMEM_HEAP *psHeap = psDeviceImport->psHeap; + + if (!psHeap->bPremapped) + { + if (psDeviceImport->bMapped) + { + eError = BridgeDevmemIntUnmapPMR(GetBridgeHandle(psImport->hDevConnection), + psDeviceImport->hMapping); + PVR_ASSERT(eError == PVRSRV_OK); + } + + eError = BridgeDevmemIntUnreserveRange(GetBridgeHandle(psImport->hDevConnection), + psDeviceImport->hReservation); + PVR_ASSERT(eError == PVRSRV_OK); + } + + psDeviceImport->bMapped = IMG_FALSE; + psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; + psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; + + /* DEVMEM_HEAP_MANAGER_RA can also come from a dual managed heap in which case, + we need to check if the allocated VA falls within RA managed range */ + if ((psHeap->ui32HeapManagerFlags & DEVMEM_HEAP_MANAGER_RA) && + psDeviceImport->sDevVAddr.uiAddr >= (psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) && + psDeviceImport->sDevVAddr.uiAddr < (psHeap->sBaseAddress.uiAddr + psHeap->uiSize)) + { + RA_Free(psHeap->psQuantizedVMRA, psDeviceImport->sDevVAddr.uiAddr); + } + + if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) + { + DevmemImportStructDevUnmapSVM(psHeap, psImport); + } + + OSLockRelease(psDeviceImport->hLock); + + DevmemImportStructRelease(psImport); + + OSAtomicDecrement(&psHeap->hImportCount); + + return IMG_TRUE; + } + else + { + OSLockRelease(psDeviceImport->hLock); + return IMG_FALSE; + } +} + +/* + Map an import into the CPU + */ +IMG_INTERNAL +PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport) +{ + PVRSRV_ERROR eError; + DEVMEM_CPU_IMPORT *psCPUImport; + size_t uiMappingLength; + + psCPUImport = &psImport->sCPUImport; + + OSLockAcquire(psCPUImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psCPUImport->ui32RefCount, + psCPUImport->ui32RefCount+1); + + if (psCPUImport->ui32RefCount++ == 0) + { + DevmemImportStructAcquire(psImport); + + eError = OSMMapPMR(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + psImport->uiSize, + psImport->uiFlags, + &psCPUImport->hOSMMapData, + &psCPUImport->pvCPUVAddr, + &uiMappingLength); + PVR_GOTO_IF_ERROR(eError, failMap); + + /* MappingLength might be rounded up to page size */ + PVR_ASSERT(uiMappingLength >= psImport->uiSize); + } + OSLockRelease(psCPUImport->hLock); + + return PVRSRV_OK; + +failMap: + psCPUImport->ui32RefCount--; + if (!DevmemImportStructRelease(psImport)) + { + OSLockRelease(psCPUImport->hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* + Unmap an import from the CPU + */ +IMG_INTERNAL +void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport) +{ + DEVMEM_CPU_IMPORT *psCPUImport; + + psCPUImport = &psImport->sCPUImport; + + OSLockAcquire(psCPUImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psCPUImport->ui32RefCount, + psCPUImport->ui32RefCount-1); + + if (--psCPUImport->ui32RefCount == 0) + { + /* psImport->uiSize is a 64-bit quantity whereas the 5th + * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems + * hence a compiler warning of implicit cast and loss of data. + * Added explicit cast and assert to remove warning. + */ +#if defined(LINUX) && defined(__i386__) + PVR_ASSERT(psImport->uiSizehDevConnection), + psImport->hPMR, + psCPUImport->hOSMMapData, + psCPUImport->pvCPUVAddr, + (size_t)psImport->uiSize); + + psCPUImport->hOSMMapData = NULL; + psCPUImport->pvCPUVAddr = NULL; + + OSLockRelease(psCPUImport->hLock); + + DevmemImportStructRelease(psImport); + } + else + { + OSLockRelease(psCPUImport->hLock); + } +} diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/devicememx_pdump.c b/drivers/mcst/gpu-imgtec/services/shared/common/devicememx_pdump.c new file mode 100644 index 000000000000..ee84dd5eaf8d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/devicememx_pdump.c @@ -0,0 +1,80 @@ +/*************************************************************************/ /*! +@File +@Title Shared X device memory management PDump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements common (client & server) PDump functions for the + memory management code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#if defined(PDUMP) + +#include "devicememx_pdump.h" +#include "pdump.h" +#include "client_pdumpmm_bridge.h" +#include "devicemem_utils.h" + +IMG_INTERNAL void +DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(uiSize != 0); + PVR_ASSERT(uiOffset + uiSize <= (psMemDescPhys->uiNumPages << + psMemDescPhys->uiLog2PageSize)); + + eError = BridgePMRPDumpLoadMem(psMemDescPhys->hBridge, + psMemDescPhys->hPMR, + uiOffset, + uiSize, + uiPDumpFlags, + IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed with error %d", + __func__, eError)); + } +} + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/hash.c b/drivers/mcst/gpu-imgtec/services/shared/common/hash.c new file mode 100644 index 000000000000..258fece45390 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/hash.c @@ -0,0 +1,734 @@ +/*************************************************************************/ /*! +@File +@Title Self scaling hash tables. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description + Implements simple self scaling hash tables. Hash collisions are handled by + chaining entries together. Hash tables are increased in size when they + become more than (50%?) full and decreased in size when less than (25%?) + full. Hash tables are never decreased below their initial size. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* include/ */ +#include "img_defs.h" +#include "img_types.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +/* services/shared/include/ */ +#include "hash.h" + +/* services/client/include/ or services/server/include/ */ +#include "osfunc.h" +#include "allocmem.h" + +//#define PERF_DBG_RESIZE +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) +#include +#endif + +#if defined(__KERNEL__) +#include "pvrsrv.h" +#endif + +#define KEY_TO_INDEX(pHash, key, uSize) \ + ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize)) + +#define KEY_COMPARE(pHash, pKey1, pKey2) \ + ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2))) + +#if defined(__linux__) && defined(__KERNEL__) +#define _AllocMem OSAllocMemNoStats +#define _AllocZMem OSAllocZMemNoStats +#define _FreeMem OSFreeMemNoStats +#else +#define _AllocMem OSAllocMem +#define _AllocZMem OSAllocZMem +#define _FreeMem OSFreeMem +#endif + +#define NO_SHRINK 0 + +/* Each entry in a hash table is placed into a bucket */ +typedef struct _BUCKET_ +{ + struct _BUCKET_ *pNext; /*!< the next bucket on the same chain */ + uintptr_t v; /*!< entry value */ + uintptr_t k[]; /* PRQA S 0642 */ + /* override dynamic array declaration warning */ +} BUCKET; + +struct _HASH_TABLE_ +{ + IMG_UINT32 uSize; /*!< current size of the hash table */ + IMG_UINT32 uCount; /*!< number of entries currently in the hash table */ + IMG_UINT32 uMinimumSize; /*!< the minimum size that the hash table should be re-sized to */ + IMG_UINT32 uKeySize; /*!< size of key in bytes */ + IMG_UINT32 uShrinkThreshold; /*!< The threshold at which to trigger a shrink */ + IMG_UINT32 uGrowThreshold; /*!< The threshold at which to trigger a grow */ + HASH_FUNC* pfnHashFunc; /*!< hash function */ + HASH_KEY_COMP* pfnKeyComp; /*!< key comparison function */ + BUCKET** ppBucketTable; /*!< the hash table array */ +#if defined(DEBUG) + const char* pszFile; + unsigned int ui32LineNum; +#endif +}; + +/*************************************************************************/ /*! +@Function HASH_Func_Default +@Description Hash function intended for hashing keys composed of uintptr_t + arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey A pointer to the key to hash. +@Input uHashTabLen The length of the hash table. +@Return The hash value. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_UINT32 +HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + uintptr_t *p = (uintptr_t *)pKey; + IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); + IMG_UINT32 ui; + IMG_UINT32 uHashKey = 0; + + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + IMG_UINT32 uHashPart = (IMG_UINT32)*p++; + + uHashPart += (uHashPart << 12); + uHashPart ^= (uHashPart >> 22); + uHashPart += (uHashPart << 4); + uHashPart ^= (uHashPart >> 9); + uHashPart += (uHashPart << 10); + uHashPart ^= (uHashPart >> 2); + uHashPart += (uHashPart << 7); + uHashPart ^= (uHashPart >> 12); + + uHashKey += uHashPart; + } + + return uHashKey; +} + +/*************************************************************************/ /*! +@Function HASH_Key_Comp_Default +@Description Compares keys composed of uintptr_t arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey1 Pointer to first hash key to compare. +@Input pKey2 Pointer to second hash key to compare. +@Return IMG_TRUE - The keys match. + IMG_FALSE - The keys don't match. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2) +{ + uintptr_t *p1 = (uintptr_t *)pKey1; + uintptr_t *p2 = (uintptr_t *)pKey2; + IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); + IMG_UINT32 ui; + + PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + if (*p1++ != *p2++) + return IMG_FALSE; + } + + return IMG_TRUE; +} + +/*************************************************************************/ /*! +@Function _ChainInsert +@Description Insert a bucket into the appropriate hash table chain. +@Input pBucket The bucket +@Input ppBucketTable The hash table +@Input uSize The size of the hash table +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static void +_ChainInsert(HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize) +{ + IMG_UINT32 uIndex; + + /* We assume that all parameters passed by the caller are valid. */ + PVR_ASSERT(pBucket != NULL); + PVR_ASSERT(ppBucketTable != NULL); + PVR_ASSERT(uSize != 0); + + uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */ + pBucket->pNext = ppBucketTable[uIndex]; + ppBucketTable[uIndex] = pBucket; +} + +/*************************************************************************/ /*! +@Function _Rehash +@Description Iterate over every entry in an old hash table and rehash into + the new table. +@Input ppOldTable The old hash table +@Input uOldSize The size of the old hash table +@Input ppNewTable The new hash table +@Input uNewSize The size of the new hash table +@Return None +*/ /**************************************************************************/ +static void +_Rehash(HASH_TABLE *pHash, + BUCKET **ppOldTable, IMG_UINT32 uOldSize, + BUCKET **ppNewTable, IMG_UINT32 uNewSize) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex< uOldSize; uIndex++) + { + BUCKET *pBucket; + pBucket = ppOldTable[uIndex]; + while (pBucket != NULL) + { + BUCKET *pNextBucket = pBucket->pNext; + _ChainInsert(pHash, pBucket, ppNewTable, uNewSize); + pBucket = pNextBucket; + } + } +} + +/*************************************************************************/ /*! +@Function _Resize +@Description Attempt to resize a hash table, failure to allocate a new + larger hash table is not considered a hard failure. We simply + continue and allow the table to fill up, the effect is to + allow hash chains to become longer. +@Input pHash Hash table to resize. +@Input uNewSize Required table size. +@Return IMG_TRUE Success + IMG_FALSE Failed +*/ /**************************************************************************/ +static IMG_BOOL +_Resize(HASH_TABLE *pHash, IMG_UINT32 uNewSize) +{ + BUCKET **ppNewTable; + IMG_UINT32 uiThreshold = uNewSize >> 2; +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) + struct timeval start, end; +#endif + + if (uNewSize == pHash->uSize) + { + return IMG_TRUE; + } + +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) + gettimeofday(&start, NULL); +#endif + + ppNewTable = _AllocZMem(sizeof(BUCKET *) * uNewSize); + if (ppNewTable == NULL) + { + return IMG_FALSE; + } + + _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize); + + _FreeMem(pHash->ppBucketTable); + +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) + gettimeofday(&end, NULL); + if (start.tv_usec > end.tv_usec) + { + end.tv_usec = 1000000 - start.tv_usec + end.tv_usec; + } + else + { + end.tv_usec -= start.tv_usec; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: H:%p O:%d N:%d C:%d G:%d S:%d T:%06luus", __func__, pHash, pHash->uSize, uNewSize, pHash->uCount, pHash->uGrowThreshold, pHash->uShrinkThreshold, end.tv_usec)); +#endif + + /*not nulling pointer, being reassigned just below*/ + pHash->ppBucketTable = ppNewTable; + pHash->uSize = uNewSize; + + pHash->uGrowThreshold = uiThreshold * 3; + pHash->uShrinkThreshold = (uNewSize <= pHash->uMinimumSize) ? NO_SHRINK : uiThreshold; + + return IMG_TRUE; +} + + +/*************************************************************************/ /*! +@Function HASH_Create_Extended +@Description Create a self scaling hash table, using the supplied key size, + and the supplied hash and key comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Input uKeySize The size of the key, in bytes. +@Input pfnHashFunc Pointer to hash function. +@Input pfnKeyComp Pointer to key comparison function. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +IMG_INTERNAL +HASH_TABLE * HASH_Create_Extended_Int (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp) +{ + HASH_TABLE *pHash; + + if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid input parameters", __func__)); + return NULL; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: InitialSize=0x%x", __func__, uInitialLen)); + + pHash = _AllocMem(sizeof(HASH_TABLE)); + if (pHash == NULL) + { + return NULL; + } + + pHash->uCount = 0; + pHash->uSize = uInitialLen; + pHash->uMinimumSize = uInitialLen; + pHash->uKeySize = uKeySize; + pHash->uGrowThreshold = (uInitialLen >> 2) * 3; + pHash->uShrinkThreshold = NO_SHRINK; + pHash->pfnHashFunc = pfnHashFunc; + pHash->pfnKeyComp = pfnKeyComp; + + pHash->ppBucketTable = _AllocZMem(sizeof(BUCKET *) * pHash->uSize); + if (pHash->ppBucketTable == NULL) + { + _FreeMem(pHash); + /*not nulling pointer, out of scope*/ + return NULL; + } + + return pHash; +} + +#if defined(DEBUG) +IMG_INTERNAL +HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, + const char *file, const unsigned int line) +{ + HASH_TABLE *hash; + hash = HASH_Create_Extended_Int(uInitialLen, uKeySize, + pfnHashFunc, pfnKeyComp); + if (hash) + { + hash->pszFile = file; + hash->ui32LineNum = line; + } + return hash; +} +#endif + +/*************************************************************************/ /*! +@Function HASH_Create +@Description Create a self scaling hash table with a key consisting of a + single uintptr_t, and using the default hash and key + comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +IMG_INTERNAL +HASH_TABLE * HASH_Create_Int (IMG_UINT32 uInitialLen) +{ + return HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), + &HASH_Func_Default, &HASH_Key_Comp_Default); +} + +#if defined(DEBUG) +IMG_INTERNAL +HASH_TABLE * HASH_Create_Debug(IMG_UINT32 uInitialLen, const char *file, const unsigned int line) +{ + HASH_TABLE *hash; + hash = HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), + &HASH_Func_Default, &HASH_Key_Comp_Default); + if (hash) + { + hash->pszFile = file; + hash->ui32LineNum = line; + } + return hash; +} +#endif + +/*************************************************************************/ /*! +@Function HASH_Delete_Extended +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table should have been removed + before calling this function. +@Input pHash Hash table +@Input bWarn Set false to suppress warnings in the case of + deletion with active entries. +*/ /**************************************************************************/ +IMG_INTERNAL void +HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn) +{ + IMG_BOOL bDoCheck = IMG_TRUE; +#if defined(__KERNEL__) && !defined(__QNXNTO__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData != NULL) + { + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } + } +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + else + { + bDoCheck = IMG_FALSE; + } +#endif +#endif + if (pHash != NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "HASH_Delete")); + + if (bDoCheck) + { + PVR_ASSERT(pHash->uCount==0); + } + if (pHash->uCount != 0) + { + IMG_UINT32 i; + if (bWarn) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, pHash->uCount)); +#if defined(DEBUG) + PVR_DPF ((PVR_DBG_ERROR, "%s: Hash %p created at %s:%u.", __func__, (uintptr_t*)pHash, pHash->pszFile, pHash->ui32LineNum)); +#endif + } + + for (i = 0; i < pHash->uSize; i++) + { + BUCKET *pBucket = pHash->ppBucketTable[i]; + while (pBucket != NULL) + { + BUCKET *pNextBucket = pBucket->pNext; + _FreeMem(pBucket); + pBucket = pNextBucket; + } + } + + } + _FreeMem(pHash->ppBucketTable); + pHash->ppBucketTable = NULL; + _FreeMem(pHash); + /*not nulling pointer, copy on stack*/ + } +} + +/*************************************************************************/ /*! +@Function HASH_Delete +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been removed + before calling this function. +@Input pHash Hash table +*/ /**************************************************************************/ +IMG_INTERNAL void +HASH_Delete(HASH_TABLE *pHash) +{ + HASH_Delete_Extended(pHash, IMG_TRUE); +} + +/*************************************************************************/ /*! +@Function HASH_Insert_Extended +@Description Insert a key value pair into a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to the key. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v) +{ + BUCKET *pBucket; + + PVR_ASSERT(pHash != NULL); + + if (pHash == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter", __func__)); + return IMG_FALSE; + } + + pBucket = _AllocMem(sizeof(BUCKET) + pHash->uKeySize); + if (pBucket == NULL) + { + return IMG_FALSE; + } + + pBucket->v = v; + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/ + OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize); + + _ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize); + + pHash->uCount++; + + /* check if we need to think about re-balancing */ + if (pHash->uCount > pHash->uGrowThreshold) + { + /* Ignore the return code from _Resize because the hash table is + still in a valid state and although not ideally sized, it is still + functional */ + _Resize(pHash, pHash->uSize << 1); + } + + return IMG_TRUE; +} + +/*************************************************************************/ /*! +@Function HASH_Insert +@Description Insert a key value pair into a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v) +{ + return HASH_Insert_Extended(pHash, &k, v); +} + +/*************************************************************************/ /*! +@Function HASH_Remove_Extended +@Description Remove a key from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_ASSERT(pHash != NULL); + + if (pHash == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + uintptr_t v = pBucket->v; + (*ppBucket) = pBucket->pNext; + + _FreeMem(pBucket); + /*not nulling original pointer, already overwritten*/ + + pHash->uCount--; + + /* check if we need to think about re-balancing, when the shrink + * threshold is 0 we are at the minimum size, no further shrink */ + if (pHash->uCount < pHash->uShrinkThreshold) + { + /* Ignore the return code from _Resize because the + hash table is still in a valid state and although + not ideally sized, it is still functional */ + _Resize(pHash, MAX(pHash->uSize >> 1, pHash->uMinimumSize)); + } + + return v; + } + } + return 0; +} + +/*************************************************************************/ /*! +@Function HASH_Remove +@Description Remove a key value pair from a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Remove(HASH_TABLE *pHash, uintptr_t k) +{ + return HASH_Remove_Extended(pHash, &k); +} + +/*************************************************************************/ /*! +@Function HASH_Retrieve_Extended +@Description Retrieve a value from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_ASSERT(pHash != NULL); + + if (pHash == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + uintptr_t v = pBucket->v; + + return v; + } + } + return 0; +} + +/*************************************************************************/ /*! +@Function HASH_Retrieve +@Description Retrieve a value from a hash table created with HASH_Create. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k) +{ + return HASH_Retrieve_Extended(pHash, &k); +} + +/*************************************************************************/ /*! +@Function HASH_Iterate +@Description Iterate over every entry in the hash table. +@Input pHash Hash table to iterate. +@Input pfnCallback Callback to call with the key and data for each +. entry in the hash table +@Return Callback error if any, otherwise PVRSRV_OK +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex < pHash->uSize; uIndex++) + { + BUCKET *pBucket; + pBucket = pHash->ppBucketTable[uIndex]; + while (pBucket != NULL) + { + PVRSRV_ERROR eError; + BUCKET *pNextBucket = pBucket->pNext; + + eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), pBucket->v); + + /* The callback might want us to break out early */ + if (eError != PVRSRV_OK) + return eError; + + pBucket = pNextBucket; + } + } + return PVRSRV_OK; +} + +#ifdef HASH_TRACE +/*************************************************************************/ /*! +@Function HASH_Dump +@Description Dump out some information about a hash table. +@Input pHash The hash table. +*/ /**************************************************************************/ +void +HASH_Dump(HASH_TABLE *pHash) +{ + IMG_UINT32 uIndex; + IMG_UINT32 uMaxLength=0; + IMG_UINT32 uEmptyCount=0; + + PVR_ASSERT(pHash != NULL); + for (uIndex=0; uIndexuSize; uIndex++) + { + BUCKET *pBucket; + IMG_UINT32 uLength = 0; + if (pHash->ppBucketTable[uIndex] == NULL) + { + uEmptyCount++; + } + for (pBucket=pHash->ppBucketTable[uIndex]; + pBucket != NULL; + pBucket = pBucket->pNext) + { + uLength++; + } + uMaxLength = MAX(uMaxLength, uLength); + } + + PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d", + pHash->uMinimumSize, pHash->uSize, pHash->uCount)); + PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength)); +} +#endif diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/htbuffer.c b/drivers/mcst/gpu-imgtec/services/shared/common/htbuffer.c new file mode 100644 index 000000000000..827c313c9f2d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/htbuffer.c @@ -0,0 +1,185 @@ +/*************************************************************************/ /*! +@File htbuffer.c +@Title Host Trace Buffer shared API. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include "htbuffer.h" +#include "osfunc.h" +#include "client_htbuffer_bridge.h" + +/* The group flags array of ints large enough to store all the group flags + * NB: This will only work while all logging is in the kernel + */ +IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0}; + + +/*************************************************************************/ /*! + @Function HTBControl + @Description Update the configuration of the Host Trace Buffer + @Input hSrvHandle Server Handle + @Input ui32NumFlagGroups Number of group enable flags words + @Input aui32GroupEnable Flags words controlling groups to be logged + @Input ui32LogLevel Log level to record + @Input ui32EnablePID PID to enable logging for a specific process + @Input eLogPidMode Enable logging for all or specific processes, + @Input eOpMode Control what trace data is dropped if the TL + buffer is full + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBControl( + IMG_HANDLE hSrvHandle, + IMG_UINT32 ui32NumFlagGroups, + IMG_UINT32 * aui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 ui32EnablePID, + HTB_LOGMODE_CTRL eLogPidMode, + HTB_OPMODE_CTRL eOpMode +) +{ + return BridgeHTBControl( + hSrvHandle, + ui32NumFlagGroups, + aui32GroupEnable, + ui32LogLevel, + ui32EnablePID, + eLogPidMode, + eOpMode + ); +} + + +/*************************************************************************/ /*! +*/ /**************************************************************************/ +static PVRSRV_ERROR +_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT64 ui64TimeStampus, + HTB_LOG_SFids SF, va_list args) +{ +#if defined(__KERNEL__) + IMG_UINT32 i; + IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF); +#if defined(__KLOCWORK__) + IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1]; // Prevent KW False-positive +#else + IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS]; +#endif + + PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS); + ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ? + HTB_LOG_MAX_PARAMS : ui32NumArgs; + + /* unpack var args before sending over bridge */ + for (i=0; i +#else +#include +#endif + +/* The attribute "vector_size" will generate floating point instructions + * and use FPU registers. In kernel OS, the FPU registers might be corrupted + * when CPU is doing context switch because FPU registers are not expected to + * be stored. + * GCC enables compiler option, -mgeneral-regs-only, by default. + * This option restricts the generated code to use general registers only + * so that we don't have issues on that. + */ +#if defined(__KERNEL__) && defined(__clang__) + +#define DEVICE_MEMSETCPY_NON_VECTOR_KM +#if !defined(BITS_PER_BYTE) +#define BITS_PER_BYTE (8) +#endif /* BITS_PER_BYTE */ + +/* Loading or storing 16 or 32 bytes is only supported on 64-bit machines. */ +#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES > 8 +typedef __uint128_t uint128_t; + +typedef struct +{ + uint128_t ui128DataFields[2]; +} +uint256_t; +#endif + +#endif + +/* This file is only intended to be used on platforms which use GCC or Clang, + * due to its requirement on __attribute__((vector_size(n))), typeof() and + * __SIZEOF__ macros. + */ + +#if defined(__GNUC__) + +#ifndef MIN +#define MIN(a, b) \ + ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;}) +#endif + +#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES) +#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__ +#endif +#if (DEVICE_MEMSETCPY_ALIGN_IN_BYTES & (DEVICE_MEMSETCPY_ALIGN_IN_BYTES - 1)) != 0 +#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2" +#endif +#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4 +#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4" +#endif + +#if __SIZEOF_POINTER__ != __SIZEOF_LONG__ +#error No support for architectures where void* and long are sized differently +#endif + +#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES +/* Meaningless, and harder to do correctly */ +# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long) +typedef unsigned long block_t; +#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES +# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) +# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 + typedef uint64_t block_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 + typedef uint128_t block_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 + typedef uint256_t block_t; +# endif +# else +typedef unsigned int block_t + __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES))); +# endif +# if defined(__arm64__) || defined(__aarch64__) +# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 +# define DEVICE_MEMSETCPY_ARM64 +# define REGSZ "w" +# define REGCL "w" +# define BVCLB "r" +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 +# define DEVICE_MEMSETCPY_ARM64 +# define REGSZ "x" +# define REGCL "x" +# define BVCLB "r" +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 +# if defined(__ARM_NEON_FP) +# define DEVICE_MEMSETCPY_ARM64 +# define REGSZ "q" +# define REGCL "v" +# define BVCLB "w" +# endif +# endif +# if defined(DEVICE_MEMSETCPY_ARM64) +# if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL) +# define NSHLD() __asm__ ("dmb nshld") +# define NSHST() __asm__ ("dmb nshst") +# define LDP "ldnp" +# define STP "stnp" +# else +# define NSHLD() +# define NSHST() +# define LDP "ldp" +# define STP "stp" +# endif +# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) +# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 +typedef uint32_t block_half_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 +typedef uint64_t block_half_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 +typedef uint128_t block_half_t; +# endif +# else + typedef unsigned int block_half_t + __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2))); +# endif +# endif +# endif +#endif + +__attribute__((visibility("hidden"))) +void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize) +{ + volatile const char *pcSrc = pvSrc; + volatile char *pcDst = pvDst; + size_t uPreambleBytes; + int bBlockCopy = 0; + + size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t); + size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); + + if (!uSrcUnaligned && !uDstUnaligned) + { + /* Neither pointer is unaligned. Optimal case. */ + bBlockCopy = 1; + } + else + { + if (uSrcUnaligned == uDstUnaligned) + { + /* Neither pointer is usefully aligned, but they are misaligned in + * the same way, so we can copy a preamble in a slow way, then + * optimize the rest. + */ + uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); + uSize -= uPreambleBytes; + while (uPreambleBytes) + { + *pcDst++ = *pcSrc++; + uPreambleBytes--; + } + + bBlockCopy = 1; + } + else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0) + { + /* Both pointers are at least 32-bit aligned, and we assume that + * the processor must handle all kinds of 32-bit load-stores. + * NOTE: Could we optimize this with a non-temporal version? + */ + if (uSize >= sizeof(int)) + { + volatile int *piSrc = (int *)((void *)pcSrc); + volatile int *piDst = (int *)((void *)pcDst); + + while (uSize >= sizeof(int)) + { + *piDst++ = *piSrc++; + uSize -= sizeof(int); + } + + pcSrc = (char *)((void *)piSrc); + pcDst = (char *)((void *)piDst); + } + } + } + + if (bBlockCopy && uSize >= sizeof(block_t)) + { + volatile block_t *pSrc = (block_t *)((void *)pcSrc); + volatile block_t *pDst = (block_t *)((void *)pcDst); + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHLD(); +#endif + + while (uSize >= sizeof(block_t)) + { +#if defined(DEVICE_MEMSETCPY_ARM64) + __asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t" + STP " " REGSZ "0, " REGSZ "1, [%[pDst]]" + : + : [pSrc] "r" (pSrc), [pDst] "r" (pDst) + : "memory", REGCL "0", REGCL "1"); +#else + *pDst = *pSrc; +#endif + pDst++; + pSrc++; + uSize -= sizeof(block_t); + } + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHST(); +#endif + + pcSrc = (char *)((void *)pSrc); + pcDst = (char *)((void *)pDst); + } + + while (uSize) + { + *pcDst++ = *pcSrc++; + uSize--; + } +} + +__attribute__((visibility("hidden"))) +void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize) +{ + volatile char *pcDst = pvDst; + size_t uPreambleBytes; + + size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); + + if (uDstUnaligned) + { + uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); + uSize -= uPreambleBytes; + while (uPreambleBytes) + { + *pcDst++ = ui8Value; + uPreambleBytes--; + } + } + + if (uSize >= sizeof(block_t)) + { + volatile block_t *pDst = (block_t *)((void *)pcDst); + size_t i, uBlockSize; +#if defined(DEVICE_MEMSETCPY_ARM64) + typedef block_half_t BLK_t; +#else + typedef block_t BLK_t; +#endif /* defined(DEVICE_MEMSETCPY_ARM64) */ + +#if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) + BLK_t bValue = 0; + + uBlockSize = sizeof(BLK_t) / sizeof(ui8Value); + + for (i = 0; i < uBlockSize; i++) + { + bValue |= (BLK_t)ui8Value << ((uBlockSize - i - 1) * BITS_PER_BYTE); + } +#else + BLK_t bValue = {0}; + + uBlockSize = sizeof(bValue) / sizeof(unsigned int); + for (i = 0; i < uBlockSize; i++) + bValue[i] = ui8Value << 24U | + ui8Value << 16U | + ui8Value << 8U | + ui8Value; +#endif /* defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) */ + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHLD(); +#endif + + while (uSize >= sizeof(block_t)) + { +#if defined(DEVICE_MEMSETCPY_ARM64) + __asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]" + : + : [bValue] BVCLB (bValue), [pDst] "r" (pDst) + : "memory"); +#else + *pDst = bValue; +#endif + pDst++; + uSize -= sizeof(block_t); + } + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHST(); +#endif + + pcDst = (char *)((void *)pDst); + } + + while (uSize) + { + *pcDst++ = ui8Value; + uSize--; + } +} + +#endif /* defined(__GNUC__) */ + +/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */ +IMG_INTERNAL +void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t uSize) +{ + volatile const char *pcSrc = pvSrc; + volatile char *pcDst = pvDst; + + while (uSize) + { + *pcDst++ = *pcSrc++; + uSize--; + } +} + +IMG_INTERNAL +void DeviceMemSetBytes(void *pvDst, unsigned char ui8Value, size_t uSize) +{ + volatile char *pcDst = pvDst; + + while (uSize) + { + *pcDst++ = ui8Value; + uSize--; + } +} + +#if !defined(__QNXNTO__) /* Ignore Neutrino as it uses strlcpy */ + +#if defined(__KERNEL__) && defined(LINUX) +/* + * In case of Linux kernel-mode in a debug build, choose the variant + * of StringLCopy that uses strlcpy and logs truncation via a stack dump. + * For Linux kernel-mode in a release build, strlcpy alone is used. + */ +#if defined(DEBUG) +IMG_INTERNAL +size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) +{ + /* + * Let strlcpy handle any truncation cases correctly. + * We will definitely get a NUL-terminated string set in pszDest + */ + size_t uSrcSize = strlcpy(pszDest, pszSrc, uDataSize); + +#if defined(PVR_DEBUG_STRLCPY) + /* Handle truncation by dumping calling stack if debug allows */ + if (uSrcSize >= uDataSize) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", + __func__, pszSrc, (long)uDataSize, pszDest)); + OSDumpStack(); + } +#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ + + return uSrcSize; +} +#endif /* defined(DEBUG) */ + +#else /* defined(__KERNEL__) && defined(LINUX) */ +/* + * For every other platform, make use of the strnlen and strncpy + * implementation of StringLCopy. + * NOTE: It is crucial to avoid memcpy as this has a hidden side-effect of + * dragging in whatever the build-environment flavour of GLIBC is which can + * cause unexpected failures for host-side command execution. + */ +IMG_INTERNAL +size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) +{ + size_t uSrcSize = strnlen(pszSrc, uDataSize); + + (void)strncpy(pszDest, pszSrc, uSrcSize); + if (uSrcSize == uDataSize) + { + pszDest[uSrcSize-1] = '\0'; + } + else + { + pszDest[uSrcSize] = '\0'; + } + + return uSrcSize; +} + +#endif /* defined(__KERNEL__) && defined(LINUX) */ + +#endif /* !defined(__QNXNTO__) */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/pvrsrv_error.c b/drivers/mcst/gpu-imgtec/services/shared/common/pvrsrv_error.c new file mode 100644 index 000000000000..5cd02a28d5a1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/pvrsrv_error.c @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Services error support +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "pvr_debug.h" + +IMG_EXPORT +const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError) +{ + switch (eError) + { + case PVRSRV_OK: + return "PVRSRV_OK"; +#define PVRE(x) \ + case x: \ + return #x; +#include "pvrsrv_errors.h" +#undef PVRE + default: + return "Unknown PVRSRV error number"; + } +} diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/ra.c b/drivers/mcst/gpu-imgtec/services/shared/common/ra.c new file mode 100644 index 000000000000..d9a679589978 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/ra.c @@ -0,0 +1,1589 @@ +/*************************************************************************/ /*! +@File +@Title Resource Allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Description + Implements generic resource allocation. The resource allocator was originally + intended to manage address spaces. In practice the resource allocator is + generic and can manage arbitrary sets of integers. + + Resources are allocated from arenas. Arenas can be created with an initial + span of resources. Further resources spans can be added to arenas. A + callback mechanism allows an arena to request further resource spans on + demand. + + Each arena maintains an ordered list of resource segments each described by a + boundary tag. Each boundary tag describes a segment of resources which are + either 'free', available for allocation, or 'busy' currently allocated. + Adjacent 'free' segments are always coalesced to avoid fragmentation. + + For allocation, all 'free' segments are kept on lists of 'free' segments in + a table index by pvr_log2(segment size) i.e., each table index n holds 'free' + segments in the size range 2^n -> 2^(n+1) - 1. + + Allocation policy is based on an *almost* good fit strategy. + + Allocated segments are inserted into a self-scaling hash table which maps + the base resource of the span to the relevant boundary tag. This allows the + code to get back to the boundary tag without exporting explicit boundary tag + references through the API. + + Each arena has an associated quantum size, all allocations from the arena are + made in multiples of the basic quantum. + + On resource exhaustion in an arena, a callback if provided will be used to + request further resources. Resource spans allocated by the callback mechanism + will be returned when freed (through one of the two callbacks). +*/ /**************************************************************************/ + +/* Issues: + * - flags, flags are passed into the resource allocator but are not currently used. + * - determination, of import size, is currently braindead. + * - debug code should be moved out to own module and #ifdef'd + */ + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "uniq_key_splay_tree.h" + +#include "hash.h" +#include "ra.h" +#include "pvrsrv_memallocflags.h" + +#include "osfunc.h" +#include "allocmem.h" +#include "lock.h" +#include "pvr_intrinsics.h" + +/* The initial, and minimum size of the live address -> boundary tag structure + * hash table. The value 64 is a fairly arbitrary choice. The hash table + * resizes on demand so the value chosen is not critical. + */ +#define MINIMUM_HASH_SIZE (64) + + +/* #define RA_VALIDATE */ + +#if defined(__KLOCWORK__) + /* Make sure Klocwork analyses all the code (including the debug one) */ + #if !defined(RA_VALIDATE) + #define RA_VALIDATE + #endif +#endif + +#if !defined(PVRSRV_NEED_PVR_ASSERT) || !defined(RA_VALIDATE) +/* Disable the asserts unless explicitly told otherwise. + * They slow the driver too much for other people + */ + +#undef PVR_ASSERT +/* Use a macro that really do not do anything when compiling in release + * mode! + */ +#define PVR_ASSERT(x) +#endif + +/* boundary tags, used to describe a resource segment */ +struct _BT_ +{ + enum bt_type + { + btt_free, /* free resource segment */ + btt_live /* allocated resource segment */ + } type; + + unsigned int is_leftmost; + unsigned int is_rightmost; + unsigned int free_import; + + /* The base resource and extent of this segment */ + RA_BASE_T base; + RA_LENGTH_T uSize; + + /* doubly linked ordered list of all segments within the arena */ + struct _BT_ *pNextSegment; + struct _BT_ *pPrevSegment; + + /* doubly linked un-ordered list of free segments with the same flags. */ + struct _BT_ *next_free; + struct _BT_ *prev_free; + + /* A user reference associated with this span, user references are + * currently only provided in the callback mechanism + */ + IMG_HANDLE hPriv; + + /* Flags to match on this span */ + IMG_UINT32 uFlags; + +}; +typedef struct _BT_ BT; + + +/* resource allocation arena */ +struct _RA_ARENA_ +{ + /* arena name for diagnostics output */ + IMG_CHAR *name; + + /* allocations within this arena are quantum sized */ + RA_LENGTH_T uQuantum; + + /* import interface, if provided */ + PVRSRV_ERROR (*pImportAlloc)(RA_PERARENA_HANDLE h, + RA_LENGTH_T uSize, + IMG_UINT32 uFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *pBase, + RA_LENGTH_T *pActualSize, + RA_PERISPAN_HANDLE *phPriv); + void (*pImportFree) (RA_PERARENA_HANDLE, + RA_BASE_T, + RA_PERISPAN_HANDLE hPriv); + + /* Arbitrary handle provided by arena owner to be passed into the + * import alloc and free hooks + */ + void *pImportHandle; + + IMG_PSPLAY_TREE per_flags_buckets; + + /* resource segment list */ + BT *pHeadSegment; + + /* segment address to boundary tag hash table */ + HASH_TABLE *pSegmentHash; + + /* Lock for this arena */ + POS_LOCK hLock; + + /* LockClass of this arena. This is used within lockdep to decide if a + * recursive call sequence with the same lock class is allowed or not. + */ + IMG_UINT32 ui32LockClass; + + /* If TRUE, imports will not be split up. Allocations will always get + * their own import + */ + IMG_BOOL bNoSplit; +}; + +/*************************************************************************/ /*! +@Function _RequestAllocFail +@Description Default callback allocator used if no callback is specified, + always fails to allocate further resources to the arena. +@Input _h - callback handle +@Input _uSize - requested allocation size +@Input _uflags - allocation flags +@Input _pBase - receives allocated base +@Output _pActualSize - actual allocation size +@Input _pRef - user reference +@Return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails + to allocate. +*/ /**************************************************************************/ +static PVRSRV_ERROR +_RequestAllocFail(RA_PERARENA_HANDLE _h, + RA_LENGTH_T _uSize, + IMG_UINT32 _uFlags, + const IMG_CHAR *_pszAnnotation, + RA_BASE_T *_pBase, + RA_LENGTH_T *_pActualSize, + RA_PERISPAN_HANDLE *_phPriv) +{ + PVR_UNREFERENCED_PARAMETER(_h); + PVR_UNREFERENCED_PARAMETER(_uSize); + PVR_UNREFERENCED_PARAMETER(_pActualSize); + PVR_UNREFERENCED_PARAMETER(_phPriv); + PVR_UNREFERENCED_PARAMETER(_uFlags); + PVR_UNREFERENCED_PARAMETER(_pBase); + PVR_UNREFERENCED_PARAMETER(_pszAnnotation); + + return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL; +} + + +#if defined(PVR_CTZLL) + /* Make sure to trigger an error if someone change the buckets or the bHasEltsMapping size + the bHasEltsMapping is used to quickly determine the smallest bucket containing elements. + therefore it must have at least as many bits has the buckets array have buckets. The RA + implementation actually uses one more bit. */ + static_assert(ARRAY_SIZE(((IMG_PSPLAY_TREE)0)->buckets) + < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping), + "Too many buckets for bHasEltsMapping bitmap"); +#endif + + +/*************************************************************************/ /*! +@Function pvr_log2 +@Description Computes the floor of the log base 2 of a unsigned integer +@Input n Unsigned integer +@Return Floor(Log2(n)) +*/ /**************************************************************************/ +#if defined(PVR_CLZLL) +/* make sure to trigger a problem if someone changes the RA_LENGTH_T type + indeed the __builtin_clzll is for unsigned long long variables. + + if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl + if it changes to unsigned int, use __builtin_clz + + if it changes for something bigger than unsigned long long, + then revert the pvr_log2 to the classic implementation */ +static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long), + "RA log routines not tuned for sizeof(RA_LENGTH_T)"); + +static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n) +{ + PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ + + return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n); +} +#else +static IMG_UINT32 +pvr_log2(RA_LENGTH_T n) +{ + IMG_UINT32 l = 0; + + PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ + + n>>=1; + while (n>0) + { + n>>=1; + l++; + } + return l; +} +#endif + + +#if defined(RA_VALIDATE) +/*************************************************************************/ /*! +@Function _IsInSegmentList +@Description Tests if a BT is in the segment list. +@Input pArena The arena. +@Input pBT The boundary tag to look for. +@Return IMG_FALSE BT was not in the arena's segment list. + IMG_TRUE BT was in the arena's segment list. +*/ /**************************************************************************/ +static IMG_BOOL +_IsInSegmentList(RA_ARENA *pArena, BT *pBT) +{ + BT* pBTScan; + + PVR_ASSERT(pArena != NULL); + PVR_ASSERT(pBT != NULL); + + /* Walk the segment list until we see the BT pointer... */ + pBTScan = pArena->pHeadSegment; + while (pBTScan != NULL && pBTScan != pBT) + { + pBTScan = pBTScan->pNextSegment; + } + + /* Test if we found it and then return */ + return (pBTScan == pBT); +} + +/*************************************************************************/ /*! +@Function _IsInFreeList +@Description Tests if a BT is in the free list. +@Input pArena The arena. +@Input pBT The boundary tag to look for. +@Return IMG_FALSE BT was not in the arena's free list. + IMG_TRUE BT was in the arena's free list. +*/ /**************************************************************************/ +static IMG_BOOL +_IsInFreeList(RA_ARENA *pArena, BT *pBT) +{ + BT* pBTScan; + IMG_UINT32 uIndex; + + PVR_ASSERT(pArena != NULL); + PVR_ASSERT(pBT != NULL); + + /* Look for the free list that holds BTs of this size... */ + uIndex = pvr_log2(pBT->uSize); + PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + + pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); + if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags)) + { + return 0; + } + else + { + pBTScan = pArena->per_flags_buckets->buckets[uIndex]; + while (pBTScan != NULL && pBTScan != pBT) + { + pBTScan = pBTScan->next_free; + } + + /* Test if we found it and then return */ + return (pBTScan == pBT); + } +} + +/* is_arena_valid should only be used in debug mode. + * It checks that some properties an arena must have are verified + */ +static int is_arena_valid(struct _RA_ARENA_ *arena) +{ + struct _BT_ *chunk; +#if defined(PVR_CTZLL) + unsigned int i; +#endif + + for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment) + { + /* if next segment is NULL, then it must be a rightmost */ + PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost)); + /* if prev segment is NULL, then it must be a leftmost */ + PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost)); + + if (chunk->type == btt_free) + { + /* checks the correctness of the type field */ + PVR_ASSERT(_IsInFreeList(arena, chunk)); + + /* check that there can't be two consecutive free chunks. + Indeed, instead of having two consecutive free chunks, + there should be only one that span the size of the two. */ + PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free)); + PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free)); + } + else + { + /* checks the correctness of the type field */ + PVR_ASSERT(!_IsInFreeList(arena, chunk)); + } + + PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base)); + PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base)); + + /* all segments of the same imports must have the same flags ... */ + PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags)); + /* ... and the same import handle */ + PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv)); + + + /* if a free chunk spans a whole import, then it must be an 'not to free import'. + Otherwise it should have been freed. */ + PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import)); + } + +#if defined(PVR_CTZLL) + if (arena->per_flags_buckets != NULL) + { + for (i = 0; i < FREE_TABLE_LIMIT; ++i) + { + /* verify that the bHasEltsMapping is correct for this flags bucket */ + PVR_ASSERT( + ((arena->per_flags_buckets->buckets[i] == NULL) && + (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0))) + || + ((arena->per_flags_buckets->buckets[i] != NULL) && + (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0))) + ); + } + } +#endif + + /* if arena was not valid, an earlier assert should have triggered */ + return 1; +} +#endif + +/*************************************************************************/ /*! +@Function _SegmentListInsertAfter +@Description Insert a boundary tag into an arena segment list after a + specified boundary tag. +@Input pInsertionPoint The insertion point. +@Input pBT The boundary tag to insert. +*/ /**************************************************************************/ +static INLINE void +_SegmentListInsertAfter(BT *pInsertionPoint, + BT *pBT) +{ + PVR_ASSERT(pBT != NULL); + PVR_ASSERT(pInsertionPoint != NULL); + + pBT->pNextSegment = pInsertionPoint->pNextSegment; + pBT->pPrevSegment = pInsertionPoint; + if (pInsertionPoint->pNextSegment != NULL) + { + pInsertionPoint->pNextSegment->pPrevSegment = pBT; + } + pInsertionPoint->pNextSegment = pBT; +} + +/*************************************************************************/ /*! +@Function _SegmentListInsert +@Description Insert a boundary tag into an arena segment list +@Input pArena The arena. +@Input pBT The boundary tag to insert. +*/ /**************************************************************************/ +static INLINE void +_SegmentListInsert(RA_ARENA *pArena, BT *pBT) +{ + PVR_ASSERT(!_IsInSegmentList(pArena, pBT)); + + /* insert into the segment chain */ + pBT->pNextSegment = pArena->pHeadSegment; + pArena->pHeadSegment = pBT; + if (pBT->pNextSegment != NULL) + { + pBT->pNextSegment->pPrevSegment = pBT; + } + + pBT->pPrevSegment = NULL; +} + +/*************************************************************************/ /*! +@Function _SegmentListRemove +@Description Remove a boundary tag from an arena segment list. +@Input pArena The arena. +@Input pBT The boundary tag to remove. +*/ /**************************************************************************/ +static void +_SegmentListRemove(RA_ARENA *pArena, BT *pBT) +{ + PVR_ASSERT(_IsInSegmentList(pArena, pBT)); + + if (pBT->pPrevSegment == NULL) + pArena->pHeadSegment = pBT->pNextSegment; + else + pBT->pPrevSegment->pNextSegment = pBT->pNextSegment; + + if (pBT->pNextSegment != NULL) + pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment; +} + + +/*************************************************************************/ /*! +@Function _BuildBT +@Description Construct a boundary tag for a free segment. +@Input base The base of the resource segment. +@Input uSize The extent of the resource segment. +@Input uFlags The flags to give to the boundary tag +@Return Boundary tag or NULL +*/ /**************************************************************************/ +static BT * +_BuildBT(RA_BASE_T base, RA_LENGTH_T uSize, RA_FLAGS_T uFlags) +{ + BT *pBT; + + pBT = OSAllocZMem(sizeof(BT)); + if (pBT == NULL) + { + return NULL; + } + + pBT->is_leftmost = 1; + pBT->is_rightmost = 1; + /* pBT->free_import = 0; */ + pBT->type = btt_live; + pBT->base = base; + pBT->uSize = uSize; + pBT->uFlags = uFlags; + + return pBT; +} + + +/*************************************************************************/ /*! +@Function _SegmentSplit +@Description Split a segment into two, maintain the arena segment list. The + boundary tag should not be in the free table. Neither the + original or the new neighbour boundary tag will be in the free + table. +@Input pBT The boundary tag to split. +@Input uSize The required segment size of boundary tag after + splitting. +@Return New neighbour boundary tag or NULL. +*/ /**************************************************************************/ +static BT * +_SegmentSplit(BT *pBT, RA_LENGTH_T uSize) +{ + BT *pNeighbour; + + pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags); + if (pNeighbour == NULL) + { + return NULL; + } + + _SegmentListInsertAfter(pBT, pNeighbour); + + pNeighbour->is_leftmost = 0; + pNeighbour->is_rightmost = pBT->is_rightmost; + pNeighbour->free_import = pBT->free_import; + pBT->is_rightmost = 0; + pNeighbour->hPriv = pBT->hPriv; + pBT->uSize = uSize; + pNeighbour->uFlags = pBT->uFlags; + + return pNeighbour; +} + +/*************************************************************************/ /*! +@Function _FreeListInsert +@Description Insert a boundary tag into an arena free table. +@Input pArena The arena. +@Input pBT The boundary tag. +*/ /**************************************************************************/ +static void +_FreeListInsert(RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + uIndex = pvr_log2(pBT->uSize); + + PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + PVR_ASSERT(!_IsInFreeList(pArena, pBT)); + + pBT->type = btt_free; + + pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); + /* the flags item in the splay tree must have been created before-hand by + _InsertResource */ + PVR_ASSERT(pArena->per_flags_buckets != NULL); + PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL); + + /* Handle NULL values for RELEASE builds and/or disabled ASSERT DEBUG builds */ + if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL))) + { + return; + } + pBT->next_free = pArena->per_flags_buckets->buckets[uIndex]; + if (pBT->next_free != NULL) + { + pBT->next_free->prev_free = pBT; + } + pBT->prev_free = NULL; + pArena->per_flags_buckets->buckets[uIndex] = pBT; + +#if defined(PVR_CTZLL) + /* tells that bucket[index] now contains elements */ + pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex); +#endif + +} + +/*************************************************************************/ /*! +@Function _FreeListRemove +@Description Remove a boundary tag from an arena free table. +@Input pArena The arena. +@Input pBT The boundary tag. +*/ /**************************************************************************/ +static void +_FreeListRemove(RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + uIndex = pvr_log2(pBT->uSize); + + PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + PVR_ASSERT(_IsInFreeList(pArena, pBT)); + + if (pBT->next_free != NULL) + { + pBT->next_free->prev_free = pBT->prev_free; + } + + if (pBT->prev_free != NULL) + { + pBT->prev_free->next_free = pBT->next_free; + } + else + { + pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); + /* the flags item in the splay tree must have already been created + (otherwise how could there be a segment with these flags */ + PVR_ASSERT(pArena->per_flags_buckets != NULL); + PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL); + + /* Handle unlikely NULL values for RELEASE or ASSERT-disabled builds */ + if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL))) + { + pBT->type = btt_live; + return; + } + + pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free; +#if defined(PVR_CTZLL) + if (pArena->per_flags_buckets->buckets[uIndex] == NULL) + { + /* there is no more elements in this bucket. Update the mapping. */ + pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex); + } +#endif + } + + PVR_ASSERT(!_IsInFreeList(pArena, pBT)); + pBT->type = btt_live; +} + + +/*************************************************************************/ /*! +@Function _InsertResource +@Description Add a free resource segment to an arena. +@Input pArena The arena. +@Input base The base of the resource segment. +@Input uSize The extent of the resource segment. +@Input uFlags The flags of the new resources. +@Return New bucket pointer + NULL on failure +*/ /**************************************************************************/ +static BT * +_InsertResource(RA_ARENA *pArena, RA_BASE_T base, RA_LENGTH_T uSize, + RA_FLAGS_T uFlags) +{ + BT *pBT; + PVR_ASSERT(pArena!=NULL); + + pBT = _BuildBT(base, uSize, uFlags); + + if (pBT != NULL) + { + IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets); + if (tmp == NULL) + { + OSFreeMem(pBT); + return NULL; + } + + pArena->per_flags_buckets = tmp; + _SegmentListInsert(pArena, pBT); + _FreeListInsert(pArena, pBT); + } + return pBT; +} + +/*************************************************************************/ /*! +@Function _InsertResourceSpan +@Description Add a free resource span to an arena, marked for free_import. +@Input pArena The arena. +@Input base The base of the resource segment. +@Input uSize The extent of the resource segment. +@Return The boundary tag representing the free resource segment, + or NULL on failure. +*/ /**************************************************************************/ +static INLINE BT * +_InsertResourceSpan(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags) +{ + BT *pBT = _InsertResource(pArena, base, uSize, uFlags); + if (pBT != NULL) + { + pBT->free_import = 1; + } + return pBT; +} + + +/*************************************************************************/ /*! +@Function _RemoveResourceSpan +@Description Frees a resource span from an arena, returning the imported + span via the callback. +@Input pArena The arena. +@Input pBT The boundary tag to free. +@Return IMG_FALSE failure - span was still in use + IMG_TRUE success - span was removed and returned +*/ /**************************************************************************/ +static INLINE IMG_BOOL +_RemoveResourceSpan(RA_ARENA *pArena, BT *pBT) +{ + PVR_ASSERT(pArena!=NULL); + PVR_ASSERT(pBT!=NULL); + + if (pBT->free_import && + pBT->is_leftmost && + pBT->is_rightmost) + { + _SegmentListRemove(pArena, pBT); + pArena->pImportFree(pArena->pImportHandle, pBT->base, pBT->hPriv); + OSFreeMem(pBT); + + return IMG_TRUE; + } + + return IMG_FALSE; +} + + +/*************************************************************************/ /*! +@Function _FreeBT +@Description Free a boundary tag taking care of the segment list and the + boundary tag free table. +@Input pArena The arena. +@Input pBT The boundary tag to free. +*/ /**************************************************************************/ +static void +_FreeBT(RA_ARENA *pArena, BT *pBT) +{ + BT *pNeighbour; + + PVR_ASSERT(pArena!=NULL); + PVR_ASSERT(pBT!=NULL); + PVR_ASSERT(!_IsInFreeList(pArena, pBT)); + + /* try and coalesce with left neighbour */ + pNeighbour = pBT->pPrevSegment; + if ((!pBT->is_leftmost) && (pNeighbour->type == btt_free)) + { + /* Sanity check. */ + PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base); + + _FreeListRemove(pArena, pNeighbour); + _SegmentListRemove(pArena, pNeighbour); + pBT->base = pNeighbour->base; + + pBT->uSize += pNeighbour->uSize; + pBT->is_leftmost = pNeighbour->is_leftmost; + OSFreeMem(pNeighbour); + } + + /* try to coalesce with right neighbour */ + pNeighbour = pBT->pNextSegment; + if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free)) + { + /* sanity check */ + PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base); + + _FreeListRemove(pArena, pNeighbour); + _SegmentListRemove(pArena, pNeighbour); + pBT->uSize += pNeighbour->uSize; + pBT->is_rightmost = pNeighbour->is_rightmost; + OSFreeMem(pNeighbour); + } + + if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE) + { + _FreeListInsert(pArena, pBT); + PVR_ASSERT((!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import)); + } + + PVR_ASSERT(is_arena_valid(pArena)); +} + + +/* + This function returns the first element in a bucket that can be split + in a way that one of the sub-segments can meet the size and alignment + criteria. + + The first_elt is the bucket to look into. Remember that a bucket is + implemented as a pointer to the first element of the linked list. + + nb_max_try is used to limit the number of elements considered. + This is used to only consider the first nb_max_try elements in the + free-list. The special value ~0 is used to say unlimited i.e. consider + all elements in the free list + */ +static INLINE +struct _BT_ *find_chunk_in_bucket(struct _BT_ * first_elt, + RA_LENGTH_T uSize, + RA_LENGTH_T uAlignment, + unsigned int nb_max_try) +{ + struct _BT_ *walker; + + for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free) + { + const RA_BASE_T aligned_base = (uAlignment > 1) ? + (walker->base + uAlignment - 1) & ~(uAlignment - 1) + : walker->base; + + if (walker->base + walker->uSize >= aligned_base + uSize) + { + return walker; + } + + /* 0xFFFF...FFFF is used has nb_max_try = infinity. */ + if (nb_max_try != (unsigned int) ~0) + { + nb_max_try--; + } + } + + return NULL; +} + + +/*************************************************************************/ /*! +@Function _AttemptAllocAligned +@Description Attempt an allocation from an arena. +@Input pArena The arena. +@Input uSize The requested allocation size. +@Input uFlags Allocation flags +@Output phPriv The user references associated with + the imported segment. (optional) +@Input uAlignment Required uAlignment, or 0. + Must be a power of 2 if not 0 +@Output base Allocated resource base (non-optional, must not + be NULL) +@Return IMG_FALSE failure + IMG_TRUE success +*/ /**************************************************************************/ +static IMG_BOOL +_AttemptAllocAligned(RA_ARENA *pArena, + RA_LENGTH_T uSize, + IMG_UINT32 uFlags, + RA_LENGTH_T uAlignment, + RA_BASE_T *base, + RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */ +{ + + IMG_UINT32 index_low; + IMG_UINT32 index_high; + IMG_UINT32 i; + struct _BT_ *pBT = NULL; + RA_BASE_T aligned_base; + + PVR_ASSERT(pArena!=NULL); + PVR_ASSERT(base != NULL); + + pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets); + if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->ui32Flags != uFlags)) + { + /* no chunks with these flags. */ + return IMG_FALSE; + } + + index_low = pvr_log2(uSize); + index_high = pvr_log2(uSize + uAlignment - 1); + + PVR_ASSERT(index_low < FREE_TABLE_LIMIT); + PVR_ASSERT(index_high < FREE_TABLE_LIMIT); + PVR_ASSERT(index_low <= index_high); + +#if defined(PVR_CTZLL) + i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); +#else + for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i) + { + } +#endif + PVR_ASSERT(i <= FREE_TABLE_LIMIT); + + if (i != FREE_TABLE_LIMIT) + { + /* since we start at index_high + 1, we are guaranteed to exit */ + pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1); + } + else + { + for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i) + { + pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0); + } + } + + if (pBT == NULL) + { + return IMG_FALSE; + } + + aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base; + + _FreeListRemove(pArena, pBT); + + if (pArena->bNoSplit) + { + goto nosplit; + } + + /* with uAlignment we might need to discard the front of this segment */ + if (aligned_base > pBT->base) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit(pBT, (RA_LENGTH_T)(aligned_base - pBT->base)); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Front split failed", __func__)); + /* Put pBT back in the list */ + _FreeListInsert(pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert(pArena, pBT); + pBT = pNeighbour; + } + + /* the segment might be too big, if so, discard the back of the segment */ + if (pBT->uSize > uSize) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit(pBT, uSize); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Back split failed", __func__)); + /* Put pBT back in the list */ + _FreeListInsert(pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert(pArena, pNeighbour); + } +nosplit: + pBT->type = btt_live; + + if (!HASH_Insert_Extended(pArena->pSegmentHash, &aligned_base, (uintptr_t)pBT)) + { + _FreeBT(pArena, pBT); + return IMG_FALSE; + } + + if (phPriv != NULL) + *phPriv = pBT->hPriv; + + *base = aligned_base; + + return IMG_TRUE; +} + + + +/*************************************************************************/ /*! +@Function RA_Create +@Description To create a resource arena. +@Input name The name of the arena for diagnostic purposes. +@Input ulog2Quantum The arena allocation quantum. +@Input ui32LockClass the lock class level this arena uses +@Input imp_alloc A resource allocation callback or 0. +@Input imp_free A resource de-allocation callback or 0. +@Input arena_handle Handle passed to alloc and free or 0. +@Input bNoSplit Disable splitting up imports. +@Return arena handle, or NULL. +*/ /**************************************************************************/ +IMG_INTERNAL RA_ARENA * +RA_Create(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE h, + RA_LENGTH_T uSize, + RA_FLAGS_T _flags, + const IMG_CHAR *pszAnnotation, + /* returned data */ + RA_BASE_T *pBase, + RA_LENGTH_T *pActualSize, + RA_PERISPAN_HANDLE *phPriv), + void (*imp_free) (RA_PERARENA_HANDLE, + RA_BASE_T, + RA_PERISPAN_HANDLE), + RA_PERARENA_HANDLE arena_handle, + IMG_BOOL bNoSplit) +{ + RA_ARENA *pArena; + PVRSRV_ERROR eError; + + if (name == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter 'name' (NULL not accepted)", __func__)); + return NULL; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s'", __func__, name)); + + pArena = OSAllocMem(sizeof(*pArena)); + if (pArena == NULL) + { + goto arena_fail; + } + + eError = OSLockCreate(&pArena->hLock); + if (eError != PVRSRV_OK) + { + goto lock_fail; + } + + pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default); + + if (pArena->pSegmentHash==NULL) + { + goto hash_fail; + } + + pArena->name = name; + pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail; + pArena->pImportFree = imp_free; + pArena->pImportHandle = arena_handle; + pArena->pHeadSegment = NULL; + pArena->uQuantum = 1ULL << uLog2Quantum; + pArena->per_flags_buckets = NULL; + pArena->ui32LockClass = ui32LockClass; + pArena->bNoSplit = bNoSplit; + + PVR_ASSERT(is_arena_valid(pArena)); + return pArena; + +hash_fail: + OSLockDestroy(pArena->hLock); +lock_fail: + OSFreeMem(pArena); + /* not nulling pointer, out of scope */ +arena_fail: + return NULL; +} + +/*************************************************************************/ /*! +@Function RA_Delete +@Description To delete a resource arena. All resources allocated from + the arena must be freed before deleting the arena. +@Input pArena The arena to delete. +*/ /**************************************************************************/ +IMG_INTERNAL void +RA_Delete(RA_ARENA *pArena) +{ + IMG_UINT32 uIndex; + IMG_BOOL bWarn = IMG_TRUE; + + PVR_ASSERT(pArena != NULL); + + if (pArena == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); + return; + } + + PVR_ASSERT(is_arena_valid(pArena)); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: name='%s'", __func__, pArena->name)); + + while (pArena->pHeadSegment != NULL) + { + BT *pBT = pArena->pHeadSegment; + + if (pBT->type != btt_free) + { + if (bWarn) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__, + (unsigned long long)pBT->base, (unsigned long long)pBT->uSize)); + PVR_DPF((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__)); + bWarn = IMG_FALSE; + } + } + else + { + _FreeListRemove(pArena, pBT); + } + + _SegmentListRemove(pArena, pBT); + OSFreeMem(pBT); + /* not nulling original pointer, it has changed */ + } + + while (pArena->per_flags_buckets != NULL) + { + for (uIndex=0; uIndexper_flags_buckets->buckets[uIndex] == NULL); + } + + pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->ui32Flags, pArena->per_flags_buckets); + } + + HASH_Delete(pArena->pSegmentHash); + OSLockDestroy(pArena->hLock); + OSFreeMem(pArena); + /* not nulling pointer, copy on stack */ +} + +/*************************************************************************/ /*! +@Function RA_Add +@Description To add a resource span to an arena. The span must not + overlap with any span previously added to the arena. +@Input pArena The arena to add a span into. +@Input base The base of the span. +@Input uSize The extent of the span. +@Input uFlags the flags of the new import +@Input hPriv a private handle associate to the span. + (reserved for user) +@Return IMG_TRUE - Success + IMG_FALSE - failure +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +RA_Add(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + RA_PERISPAN_HANDLE hPriv) +{ + struct _BT_* bt; + PVR_ASSERT(pArena != NULL); + PVR_ASSERT(uSize != 0); + + if (pArena == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); + return IMG_FALSE; + } + + if (uSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid size 0 added to arena %s", __func__, pArena->name)); + return IMG_FALSE; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " + "base=0x%llx, size=0x%llx", __func__, pArena->name, + (unsigned long long)base, (unsigned long long)uSize)); + + uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1); + bt = _InsertResource(pArena, base, uSize, uFlags); + if (bt != NULL) + { + bt->hPriv = hPriv; + } + + PVR_ASSERT(is_arena_valid(pArena)); + OSLockRelease(pArena->hLock); + + return bt != NULL; +} + +/*************************************************************************/ /*! +@Function RA_Alloc +@Description To allocate resource from an arena. +@Input pArena The arena +@Input uRequestSize The size of resource segment requested. +@Input uImportMultiplier Import x-times more for future requests if + we have to import new memory. +@Input uImportFlags Flags influencing allocation policy. +@Input uAlignment The uAlignment constraint required for the + allocated segment, use 0 if uAlignment not + required, otherwise must be a power of 2. +@Input pszAnnotation String to describe the allocation +@Output base Allocated base resource +@Output pActualSize The actual size of resource segment + allocated, typically rounded up by quantum. +@Output phPriv The user reference associated with allocated + resource span. +@Return PVRSRV_OK - success +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RA_Alloc(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + IMG_UINT8 uImportMultiplier, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *base, + RA_LENGTH_T *pActualSize, + RA_PERISPAN_HANDLE *phPriv) +{ + PVRSRV_ERROR eError; + IMG_BOOL bResult; + RA_LENGTH_T uSize = uRequestSize; + RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); + + if (pArena == NULL || uImportMultiplier == 0 || uSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: One of the necessary parameters is 0", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + + if (pActualSize != NULL) + { + *pActualSize = uSize; + } + + /* Must be a power of 2 or 0 */ + PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: arena='%s', size=0x%llx(0x%llx), " + "alignment=0x%llx", __func__, pArena->name, + (unsigned long long)uSize, (unsigned long long)uRequestSize, + (unsigned long long)uAlignment)); + + /* if allocation failed then we might have an import source which + can provide more resource, else we will have to fail the + allocation to the caller. */ + bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); + if (!bResult) + { + IMG_HANDLE hPriv; + RA_BASE_T import_base; + RA_LENGTH_T uImportSize = uSize; + + /* + Ensure that we allocate sufficient space to meet the uAlignment + constraint + */ + if (uAlignment > pArena->uQuantum) + { + uImportSize += (uAlignment - pArena->uQuantum); + } + + /* apply over-allocation multiplier after all alignment adjustments */ + uImportSize *= uImportMultiplier; + + /* ensure that we import according to the quanta of this arena */ + uImportSize = (uImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1); + + eError = pArena->pImportAlloc(pArena->pImportHandle, + uImportSize, uImportFlags, + pszAnnotation, + &import_base, &uImportSize, + &hPriv); + if (PVRSRV_OK != eError) + { + OSLockRelease(pArena->hLock); + return eError; + } + else + { + BT *pBT; + pBT = _InsertResourceSpan(pArena, import_base, uImportSize, uFlags); + /* successfully import more resource, create a span to + represent it and retry the allocation attempt */ + if (pBT == NULL) + { + /* insufficient resources to insert the newly acquired span, + so free it back again */ + pArena->pImportFree(pArena->pImportHandle, import_base, hPriv); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " + "size=0x%llx failed!", __func__, pArena->name, + (unsigned long long)uSize)); + /* RA_Dump (arena); */ + + OSLockRelease(pArena->hLock); + return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED; + } + + pBT->hPriv = hPriv; + + bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); + if (!bResult) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: name='%s' second alloc failed!", + __func__, pArena->name)); + + /* + On failure of _AttemptAllocAligned() depending on the exact point + of failure, the imported segment may have been used and freed, or + left untouched. If the later, we need to return it. + */ + _FreeBT(pArena, pBT); + + OSLockRelease(pArena->hLock); + return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; + } + else + { + /* Check if the new allocation was in the span we just added... */ + if (*base < import_base || *base > (import_base + uImportSize)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: name='%s' alloc did not occur in the imported span!", + __func__, pArena->name)); + + /* + Remove the imported span which should not be in use (if it is then + that is okay, but essentially no span should exist that is not used). + */ + _FreeBT(pArena, pBT); + } + } + } + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', size=0x%llx, " + "*base=0x%llx = %d", __func__, pArena->name, (unsigned long long)uSize, + (unsigned long long)*base, bResult)); + + PVR_ASSERT(is_arena_valid(pArena)); + + OSLockRelease(pArena->hLock); + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RA_Find_BT_VARange +@Description To find the boundary tag associated with the given device + virtual address. +@Input pArena The arena +@input base Allocated base resource +@Input uRequestSize The size of resource segment requested. +@Input uImportFlags Flags influencing allocation policy. +@Return Boundary Tag - success, NULL on failure +*/ /**************************************************************************/ +static BT *RA_Find_BT_VARange(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags) +{ + IMG_PSPLAY_TREE psSplaynode; + BT *pBT = pArena->pHeadSegment; + IMG_UINT32 uIndex; + + uIndex = pvr_log2 (uRequestSize); + + /* Find the splay node associated with these import flags */ + psSplaynode = PVRSRVFindNode(uImportFlags, pArena->per_flags_buckets); + + if (psSplaynode == NULL) + { + return NULL; + } + + /* Find the free Boundary Tag from the bucket that holds the requested range */ + while (uIndex < FREE_TABLE_LIMIT) + { + pBT = psSplaynode->buckets[uIndex]; + + while (pBT) + { + if ((pBT->base <= base) && ((pBT->base + pBT->uSize) >= (base + uRequestSize))) + { + if (pBT->type == btt_free) + { + return pBT; + } + else + { + PVR_ASSERT(pBT->type == btt_free); + } + } + else{ + pBT = pBT->next_free; + } + } + +#if defined(PVR_CTZLL) + /* This could further be optimised to get the next valid bucket */ + while (!(psSplaynode->bHasEltsMapping & (1 << ++uIndex))); +#else + uIndex++; +#endif + } + + return NULL; +} + + +/*************************************************************************/ /*! +@Function RA_Alloc_Range +@Description To allocate requested device virtual address resource from an arena. +@Input pArena The arena +@Input uRequestSize The size of resource segment requested. +@Input uImportFlags Flags influencing allocation policy. +@Input uAlignment The uAlignment constraint required for the + allocated segment, use 0 if uAlignment not required, otherwise + must be a power of 2. +@input base Allocated base resource +@Output pActualSize The actual size of resource segment + allocated, typically rounded up by quantum. +@Return PVRSRV_OK - success +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RA_Alloc_Range(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + RA_BASE_T base, + RA_LENGTH_T *pActualSize) +{ + RA_LENGTH_T uSize = uRequestSize; + BT *pBT = NULL; + + if (pArena == NULL || uSize == 0) + { + PVR_DPF ((PVR_DBG_ERROR, + "%s: One of the necessary parameters is 0", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + + /* Align the requested size to the Arena Quantum */ + uSize = ((uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1)); + + /* Must be a power of 2 or 0 */ + PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); + + if (uAlignment > 1) + { + if (base != ((base + uAlignment - 1) & ~(uAlignment - 1))) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + /* Find if the segment in the range exists and is free + * Check if the segment can be split + * Find the bucket that points to this segment + * Find the free segment is in the free list + * remove the free segment + * split the segment into three segments one prior free, alloc range, + * free segment after the range. + * remove the allocated range segment from the free list + * hook up the prior and after segments back to free list + * For each free, find the bucket the segment should go to + */ + + pBT = RA_Find_BT_VARange(pArena, base, uSize, uImportFlags); + + if (pBT == NULL) + { + OSLockRelease(pArena->hLock); + return PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL; + } + + /* Remove the boundary tag from the free list */ + _FreeListRemove (pArena, pBT); + + /* if requested VA start in the middle of the BT, split the BT accordingly */ + if (base > pBT->base) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(base - pBT->base)); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "%s: Front split failed", __func__)); + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL; + } + + /* Insert back the free BT to the free list */ + _FreeListInsert(pArena, pBT); + pBT = pNeighbour; + } + + /* the segment might be too big, if so, discard the back of the segment */ + if (pBT->uSize > uSize) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit(pBT, uSize); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "%s: Back split failed", __func__)); + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL; + } + + /* Insert back the free BT to the free list */ + _FreeListInsert (pArena, pNeighbour); + } + + pBT->type = btt_live; + + if (!HASH_Insert_Extended (pArena->pSegmentHash, &base, (uintptr_t)pBT)) + { + _FreeBT (pArena, pBT); + OSLockRelease(pArena->hLock); + return PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED; + } + + if (pActualSize != NULL) + { + *pActualSize = uSize; + } + + OSLockRelease(pArena->hLock); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RA_Free +@Description To free a resource segment. +@Input pArena The arena the segment was originally allocated from. +@Input base The base of the resource span to free. +*/ /**************************************************************************/ +IMG_INTERNAL void +RA_Free(RA_ARENA *pArena, RA_BASE_T base) +{ + BT *pBT; + + PVR_ASSERT(pArena != NULL); + + if (pArena == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); + return; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', base=0x%llx", __func__, pArena->name, + (unsigned long long)base)); + + pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &base); + PVR_ASSERT(pBT != NULL); + + if (pBT) + { + PVR_ASSERT(pBT->base == base); + _FreeBT(pArena, pBT); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: no resource span found for given base (0x%llX) in arena %s", + __func__, (unsigned long long) base, + pArena->name)); + } + + PVR_ASSERT(is_arena_valid(pArena)); + OSLockRelease(pArena->hLock); +} diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/sync.c b/drivers/mcst/gpu-imgtec/services/shared/common/sync.c new file mode 100644 index 000000000000..d3d9cbda7efb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/sync.c @@ -0,0 +1,871 @@ +/*************************************************************************/ /*! +@File +@Title Services synchronisation interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements client side code for services synchronisation + interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "client_sync_bridge.h" +#include "client_synctracking_bridge.h" +#include "info_page_client.h" +#include "pvr_bridge.h" +#include "allocmem.h" +#include "osfunc.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pvr_debug.h" +#include "dllist.h" +#include "sync.h" +#include "sync_internal.h" +#include "lock.h" +#include "log2.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#endif + + +#define SYNC_BLOCK_LIST_CHUNCK_SIZE 10 + +/* + This defines the maximum amount of synchronisation memory + that can be allocated per SyncPrim context. + In reality this number is meaningless as we would run out + of synchronisation memory before we reach this limit, but + we need to provide a size to the span RA. + */ +#define MAX_SYNC_MEM (4 * 1024 * 1024) + +/* forward declaration */ +static PVRSRV_ERROR +_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value); + +/* + Internal interfaces for management of SYNC_PRIM_CONTEXT + */ +static void +_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext) +{ + if (!OSAtomicRead(&psContext->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: context already freed", __func__)); + } + else if (0 == OSAtomicDecrement(&psContext->hRefCount)) + { + /* SyncPrimContextDestroy only when no longer referenced */ + RA_Delete(psContext->psSpanRA); + RA_Delete(psContext->psSubAllocRA); + OSFreeMem(psContext); + } +} + +static void +_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext) +{ + if (!OSAtomicRead(&psContext->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: context use after free", __func__)); + } + else + { + OSAtomicIncrement(&psContext->hRefCount); + } +} + +/* + Internal interfaces for management of synchronisation block memory + */ +static PVRSRV_ERROR +AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext, + SYNC_PRIM_BLOCK **ppsSyncBlock) +{ + SYNC_PRIM_BLOCK *psSyncBlk; + IMG_HANDLE hSyncPMR; + IMG_HANDLE hSyncImportHandle; + IMG_DEVMEM_SIZE_T uiImportSize; + PVRSRV_ERROR eError; + + psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK)); + PVR_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); + + psSyncBlk->psContext = psContext; + + /* Allocate sync prim block */ + eError = BridgeAllocSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), + &psSyncBlk->hServerSyncPrimBlock, + &psSyncBlk->ui32FirmwareAddr, + &psSyncBlk->ui32SyncBlockSize, + &hSyncPMR); + PVR_GOTO_IF_ERROR(eError, fail_blockalloc); + + /* Make it mappable by the client */ + eError = DevmemMakeLocalImportHandle(psContext->hDevConnection, + hSyncPMR, + &hSyncImportHandle); + PVR_GOTO_IF_ERROR(eError, fail_export); + + /* Get CPU mapping of the memory block */ + eError = DevmemLocalImport(psContext->hDevConnection, + hSyncImportHandle, + PVRSRV_MEMALLOCFLAG_CPU_READABLE, + &psSyncBlk->hMemDesc, + &uiImportSize, + "SyncPrimitiveBlock"); + + /* + Regardless of success or failure we "undo" the export + */ + DevmemUnmakeLocalImportHandle(psContext->hDevConnection, + hSyncImportHandle); + + PVR_GOTO_IF_ERROR(eError, fail_import); + + eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, + (void **) &psSyncBlk->pui32LinAddr); + PVR_GOTO_IF_ERROR(eError, fail_cpuvaddr); + + *ppsSyncBlock = psSyncBlk; + return PVRSRV_OK; + +fail_cpuvaddr: + DevmemFree(psSyncBlk->hMemDesc); +fail_import: +fail_export: + BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), + psSyncBlk->hServerSyncPrimBlock); +fail_blockalloc: + OSFreeMem(psSyncBlk); +fail_alloc: + return eError; +} + +static void +FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk) +{ + SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext; + + DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); + DevmemFree(psSyncBlk->hMemDesc); + BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), + psSyncBlk->hServerSyncPrimBlock); + OSFreeMem(psSyncBlk); +} + +static PVRSRV_ERROR +SyncPrimBlockImport(RA_PERARENA_HANDLE hArena, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phImport) +{ + SYNC_PRIM_CONTEXT *psContext = hArena; + SYNC_PRIM_BLOCK *psSyncBlock = NULL; + RA_LENGTH_T uiSpanSize; + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(uFlags); + + /* Check we've not been called with an unexpected size */ + PVR_LOG_GOTO_IF_INVALID_PARAM(hArena, eError, e0); + PVR_LOG_GOTO_IF_INVALID_PARAM(uSize == sizeof(IMG_UINT32), eError, e0); + + /* + Ensure the synprim context doesn't go away while we have sync blocks + attached to it + */ + _SyncPrimContextRef(psContext); + + /* Allocate the block of memory */ + eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock); + PVR_LOG_GOTO_IF_ERROR(eError, "AllocSyncPrimitiveBlock", fail_syncblockalloc); + + /* Allocate a span for it */ + eError = RA_Alloc(psContext->psSpanRA, + psSyncBlock->ui32SyncBlockSize, + RA_NO_IMPORT_MULTIPLIER, + 0, + psSyncBlock->ui32SyncBlockSize, + pszAnnotation, + &psSyncBlock->uiSpanBase, + &uiSpanSize, + NULL); + PVR_GOTO_IF_ERROR(eError, fail_spanalloc); + + /* + There is no reason the span RA should return an allocation larger + then we request + */ + PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize); + + *puiBase = psSyncBlock->uiSpanBase; + *puiActualSize = psSyncBlock->ui32SyncBlockSize; + *phImport = psSyncBlock; + return PVRSRV_OK; + +fail_spanalloc: + FreeSyncPrimitiveBlock(psSyncBlock); +fail_syncblockalloc: + _SyncPrimContextUnref(psContext); +e0: + return eError; +} + +static void +SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hImport) +{ + SYNC_PRIM_CONTEXT *psContext = hArena; + SYNC_PRIM_BLOCK *psSyncBlock = hImport; + + if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase) + { + /* Invalid input params */ + return; + } + + /* Free the span this import is using */ + RA_Free(psContext->psSpanRA, uiBase); + + /* Free the syncpim block */ + FreeSyncPrimitiveBlock(psSyncBlock); + + /* Drop our reference to the syncprim context */ + _SyncPrimContextUnref(psContext); +} + +static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt) +{ + IMG_UINT64 ui64Temp; + + PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL); + + ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; + PVR_ASSERT(ui64Tempu.sLocal.psSyncBlock; + + psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr + + (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32)); +} + +static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt) +{ + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + { + PVRSRV_ERROR eError; + IMG_HANDLE hBridge = + GetBridgeHandle(psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection); + + if (GetInfoPageDebugFlags(psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + if (psSyncInt->u.sLocal.hRecord) + { + /* remove this sync record */ + eError = BridgeSyncRecordRemoveByHandle(hBridge, + psSyncInt->u.sLocal.hRecord); + } + } + else + { + IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr + + SyncPrimGetOffset(psSyncInt); + + eError = BridgeSyncFreeEvent(hBridge, ui32FWAddr); + PVR_LOG_IF_ERROR(eError, "BridgeSyncFreeEvent"); + } + } +#if defined(PVRSRV_ENABLE_SYNC_POISONING) + (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_POISON_VALUE); +#else + /* reset the sync prim value as it is freed. + * this guarantees the client sync allocated to the client will + * have a value of zero and the client does not need to + * explicitly initialise the sync value to zero. + * the allocation of the backing memory for the sync prim block + * is done with ZERO_ON_ALLOC so the memory is initially all zero. + */ + (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE); +#endif + + RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr); + OSFreeMem(psSyncInt); + _SyncPrimContextUnref(psContext); +} + +static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt) +{ + if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed")); + } + else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount)) + { + SyncPrimLocalFree(psSyncInt); + } +} + +static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt) +{ + SYNC_PRIM_BLOCK *psSyncBlock; + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt); +} + +static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align) +{ + PVR_ASSERT(IsPower2(ui32Align)); + return ExactLog2(ui32Align); +} + +/* + External interfaces + */ + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, + PSYNC_PRIM_CONTEXT *phSyncPrimContext) +{ + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT)); + PVR_GOTO_IF_NOMEM(psContext, eError, fail_alloc); + + psContext->hDevConnection = hDevConnection; + + OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext); + OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext); + + /* + Create the RA for sub-allocations of the SynPrim's + + Note: + The import size doesn't matter here as the server will pass + back the blocksize when does the import which overrides + what we specify here. + */ + + psContext->psSubAllocRA = RA_Create(psContext->azName, + /* Params for imports */ + _Log2(sizeof(IMG_UINT32)), + RA_LOCKCLASS_2, + SyncPrimBlockImport, + SyncPrimBlockUnimport, + psContext, + IMG_FALSE); + PVR_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); + + /* + Create the span-management RA + + The RA requires that we work with linear spans. For our use + here we don't require this behaviour as we're always working + within offsets of blocks (imports). However, we need to keep + the RA happy so we create the "span" management RA which + ensures that all are imports are added to the RA in a linear + fashion + */ + psContext->psSpanRA = RA_Create(psContext->azSpanName, + /* Params for imports */ + 0, + RA_LOCKCLASS_1, + NULL, + NULL, + NULL, + IMG_FALSE); + PVR_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); + + if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL)) + { + RA_Delete(psContext->psSpanRA); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, fail_span); + } + + OSAtomicWrite(&psContext->hRefCount, 1); + + *phSyncPrimContext = psContext; + return PVRSRV_OK; +fail_span: + RA_Delete(psContext->psSubAllocRA); +fail_suballoc: + OSFreeMem(psContext); +fail_alloc: + return eError; +} + +IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext) +{ + SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; + if (1 != OSAtomicRead(&psContext->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __func__)); + } +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) +#if defined(__KERNEL__) + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state", __func__)); + OSAtomicWrite(&psContext->hRefCount, 1); + } +#endif +#endif + _SyncPrimContextUnref(psContext); +} + +static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName, + IMG_BOOL bServerSync) +{ + SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM *psNewSync; + PVRSRV_ERROR eError; + RA_BASE_T uiSpanAddr; + + PVR_LOG_RETURN_IF_INVALID_PARAM(hSyncPrimContext, "hSyncPrimeContext"); + + psNewSync = OSAllocMem(sizeof(SYNC_PRIM)); + PVR_GOTO_IF_NOMEM(psNewSync, eError, fail_alloc); + + eError = RA_Alloc(psContext->psSubAllocRA, + sizeof(IMG_UINT32), + RA_NO_IMPORT_MULTIPLIER, + 0, + sizeof(IMG_UINT32), + "Sync_Prim", + &uiSpanAddr, + NULL, + (RA_PERISPAN_HANDLE *) &psSyncBlock); + PVR_GOTO_IF_ERROR(eError, fail_raalloc); + + psNewSync->eType = SYNC_PRIM_TYPE_LOCAL; + OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1); + psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr; + psNewSync->u.sLocal.psSyncBlock = psSyncBlock; + SyncPrimGetCPULinAddr(psNewSync); + *ppsSync = &psNewSync->sCommon; + _SyncPrimContextRef(psContext); +#if defined(PVRSRV_ENABLE_SYNC_POISONING) + (void) _SyncPrimSetValue(psNewSync, LOCAL_SYNC_PRIM_RESET_VALUE); +#endif + + if (GetInfoPageDebugFlags(psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; + size_t uiSize; + + if (pszClassName) + { + uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); + /* Copy the class name annotation into a fixed-size array */ + OSCachedMemCopy(szClassName, pszClassName, uiSize); + if (uiSize == PVRSRV_SYNC_NAME_LENGTH) + szClassName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + else + szClassName[uiSize++] = '\0'; + } + else + { + /* No class name annotation */ + uiSize = 0; + szClassName[0] = '\0'; + } + + /* record this sync */ + eError = BridgeSyncRecordAdd( + GetBridgeHandle(psSyncBlock->psContext->hDevConnection), + &psNewSync->u.sLocal.hRecord, + psSyncBlock->hServerSyncPrimBlock, + psSyncBlock->ui32FirmwareAddr, + SyncPrimGetOffset(psNewSync), + bServerSync, + uiSize, + szClassName); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)", + __func__, + szClassName, + PVRSRVGETERRORSTRING(eError))); + psNewSync->u.sLocal.hRecord = NULL; + } + } + else + { + size_t uiSize; + + uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); + + if (uiSize < PVRSRV_SYNC_NAME_LENGTH) + uiSize++; + /* uiSize now reflects size used for pszClassName + NUL byte */ + + eError = BridgeSyncAllocEvent(GetBridgeHandle(hSyncPrimContext->hDevConnection), + bServerSync, + psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync), + uiSize, + pszClassName); + PVR_LOG_IF_ERROR(eError, "BridgeSyncAllocEvent"); + } + + return PVRSRV_OK; + +fail_raalloc: + OSFreeMem(psNewSync); +fail_alloc: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName) +{ + return _SyncPrimAlloc(hSyncPrimContext, + ppsSync, + pszClassName, + IMG_FALSE); +} + +static PVRSRV_ERROR +_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError; + + if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) + { + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32), + ui32Value); + } + else + { + /* Server sync not supported, attempted use of server sync */ + return PVRSRV_ERROR_NOT_SUPPORTED; + } + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) + { + SyncPrimLocalUnref(psSyncInt); + } + else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) + { + /* Server sync not supported, attempted use of server sync */ + return PVRSRV_ERROR_NOT_SUPPORTED; + } + else + { + /* + Either the client has given us a bad pointer or there is an + error in this module + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); + } + +err_out: + return eError; +} + +#if defined(NO_HARDWARE) +IMG_INTERNAL PVRSRV_ERROR +SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + /* There is no check for the psSyncInt to be LOCAL as this call + substitutes the Firmware updating a sync and that sync could + be a server one */ + + eError = _SyncPrimSetValue(psSyncInt, ui32Value); + +err_out: + return eError; +} +#endif + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); + } + + eError = _SyncPrimSetValue(psSyncInt, ui32Value); + +#if defined(PDUMP) + SyncPrimPDump(psSync); +#endif +err_out: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_HANDLE *phBlock, + IMG_UINT32 *pui32Offset) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + PVR_LOG_GOTO_IF_INVALID_PARAM(phBlock, eError, err_out); + PVR_LOG_GOTO_IF_INVALID_PARAM(pui32Offset, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)) + { + *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock; + *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)", + __func__, psSyncInt->eType)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, err_out); + } + +err_out: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + *pui32FwAddr = 0; + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) + { + *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt); + } + else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) + { + /* Server sync not supported, attempted use of server sync */ + return PVRSRV_ERROR_NOT_SUPPORTED; + } + else + { + /* Either the client has given us a bad pointer or there is an + * error in this module + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); + } + +err_out: + return eError; +} + +#if defined(PDUMP) +IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt)); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDump"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt), + ui32Value); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpValue"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt), + ui32Value, + ui32Mask, + eOperator, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpPol"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + +#if defined(LINUX) && defined(__i386__) + PVR_ASSERT(uiWriteOffsethDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt), + TRUNCATE_64BITS_TO_32BITS(uiWriteOffset), + TRUNCATE_64BITS_TO_32BITS(uiPacketSize), + TRUNCATE_64BITS_TO_32BITS(uiBufferSize)); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpCBP"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/tlclient.c b/drivers/mcst/gpu-imgtec/services/shared/common/tlclient.c new file mode 100644 index 000000000000..9f42ffe5e355 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/tlclient.c @@ -0,0 +1,486 @@ +/*************************************************************************/ /*! +@File tlclient.c +@Title Services Transport Layer shared API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Transport layer common API used in both clients and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* DESIGN NOTE + * This transport layer consumer-role API was created as a shared API when a + * client wanted to read the data of a TL stream from within the KM server + * driver. This was in addition to the existing clients supported externally + * by the UM client library component via PVR API layer. + * This shared API is thus used by the PVR TL API in the client library and + * by clients internal to the server driver module. It depends on + * client entry points of the TL and DEVMEM bridge modules. These entry points + * encapsulate from the TL shared API whether a direct bridge or an indirect + * (ioctl) bridge is used. + * One reason for needing this layer centres around the fact that some of the + * API functions make multiple bridge calls and the logic that glues these + * together is common regardless of client location. Further this layer has + * allowed the defensive coding that checks parameters to move into the PVR + * API layer where untrusted clients enter giving a more efficient KM code path. + */ + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pvr_debug.h" +#include "osfunc.h" + +#include "allocmem.h" +#include "devicemem.h" + +#include "tlclient.h" +#include "pvrsrv_tlcommon.h" +#include "client_pvrtl_bridge.h" + +/* Defines/Constants + */ + +#define NO_ACQUIRE 0xffffffffU + +/* User-side stream descriptor structure. + */ +typedef struct _TL_STREAM_DESC_ +{ + /* Handle on kernel-side stream descriptor*/ + IMG_HANDLE hServerSD; + + /* Stream data buffer variables */ + DEVMEM_MEMDESC* psUMmemDesc; + IMG_PBYTE pBaseAddr; + + /* Offset in bytes into the circular buffer and valid only after + * an Acquire call and undefined after a release. */ + IMG_UINT32 uiReadOffset; + + /* Always a positive integer when the Acquire call returns and a release + * is outstanding. Undefined at all other times. */ + IMG_UINT32 uiReadLen; + + /* Flag indicating if the RESERVE_TOO_BIG error was already printed. + * It's used to reduce number of errors in kernel log. */ + IMG_BOOL bPrinted; +} TL_STREAM_DESC, *PTL_STREAM_DESC; + + +IMG_INTERNAL +PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + IMG_HANDLE* phSD) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + TL_STREAM_DESC *psSD = NULL; + IMG_HANDLE hTLPMR; + IMG_HANDLE hTLImportHandle; + IMG_DEVMEM_SIZE_T uiImportSize; + IMG_UINT32 ui32MemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(pszName); + PVR_ASSERT(phSD); + *phSD = NULL; + + /* Allocate memory for the stream descriptor object, initialise with + * "no data read" yet. */ + psSD = OSAllocZMem(sizeof(TL_STREAM_DESC)); + PVR_LOG_GOTO_IF_NOMEM(psSD, eError, e0); + psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; + + /* Send open stream request to kernel server to get stream handle and + * buffer cookie so we can get access to the buffer in this process. */ + eError = BridgeTLOpenStream(GetBridgeHandle(hDevConnection), pszName, + ui32Mode, &psSD->hServerSD, &hTLPMR); + if (eError != PVRSRV_OK) + { + if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) && + (eError == PVRSRV_ERROR_TIMEOUT)) + { + goto e1; + } + PVR_LOG_GOTO_IF_ERROR(eError, "BridgeTLOpenStream", e1); + } + + /* Convert server export cookie into a cookie for use by this client */ + eError = DevmemMakeLocalImportHandle(hDevConnection, + hTLPMR, &hTLImportHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2); + + ui32MemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0; + /* Now convert client cookie into a client handle on the buffer's + * physical memory region */ + eError = DevmemLocalImport(hDevConnection, + hTLImportHandle, + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, + &psSD->psUMmemDesc, + &uiImportSize, + "TLBuffer"); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemImport", e3); + + /* Now map the memory into the virtual address space of this process. */ + eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **) + &psSD->pBaseAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4); + + /* Ignore error, not much that can be done */ + (void) DevmemUnmakeLocalImportHandle(hDevConnection, + hTLImportHandle); + + /* Return client descriptor handle to caller */ + *phSD = psSD; + return PVRSRV_OK; + +/* Clean up post buffer setup */ +e4: + DevmemFree(psSD->psUMmemDesc); +e3: + (void) DevmemUnmakeLocalImportHandle(hDevConnection, + &hTLImportHandle); +/* Clean up post stream open */ +e2: + BridgeTLCloseStream(GetBridgeHandle(hDevConnection), psSD->hServerSD); + +/* Clean up post allocation of the descriptor object */ +e1: + OSFreeMem(psSD); + +e0: + return eError; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + + /* Check the caller provided connection is valid */ + if (!psSD->hServerSD) + { + PVR_DPF((PVR_DBG_ERROR, "%s: descriptor already " + "closed/not open", __func__)); + return PVRSRV_ERROR_HANDLE_NOT_FOUND; + } + + /* Check if acquire is outstanding, perform release if it is, ignore result + * as there is not much we can do if it is an error other than close */ + if (psSD->uiReadLen != NO_ACQUIRE) + { + (void) BridgeTLReleaseData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, psSD->uiReadOffset, psSD->uiReadLen); + psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; + } + + /* Clean up DevMem resources used for this stream in this client */ + DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc); + + DevmemFree(psSD->psUMmemDesc); + + /* Send close to server to clean up kernel mode resources for this + * handle and release the memory. */ + eError = BridgeTLCloseStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD); + PVR_LOG_IF_ERROR(eError, "BridgeTLCloseStream"); + + OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC)); + OSFreeMem(psSD); + + return eError; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR *pszNamePattern, + IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 *pui32NumFound) +{ + PVR_ASSERT(hDevConnection); + PVR_ASSERT(pszNamePattern); + PVR_ASSERT(pui32NumFound); + + return BridgeTLDiscoverStreams(GetBridgeHandle(hDevConnection), + pszNamePattern, + // we need to treat this as one dimensional array + *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE, + (IMG_CHAR *) aszStreams, + pui32NumFound); +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + IMG_UINT32 ui32BufferOffset, ui32Dummy; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ppui8Data); + PVR_ASSERT(ui32Size); + + eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32Size, &ui32Dummy); + PVR_RETURN_IF_ERROR(eError); + + *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; + + return PVRSRV_OK; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32 *pui32Available) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + IMG_UINT32 ui32BufferOffset; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ppui8Data); + PVR_ASSERT(ui32Size); + + eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32SizeMin, + pui32Available); + PVR_RETURN_IF_ERROR(eError); + + *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; + + return PVRSRV_OK; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ui32Size); + + eError = BridgeTLCommitStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD, ui32Size); + PVR_RETURN_IF_ERROR(eError); + + return PVRSRV_OK; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_PBYTE* ppPacketBuf, + IMG_UINT32* pui32BufLen) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ppPacketBuf); + PVR_ASSERT(pui32BufLen); + + /* In case of non-blocking acquires, which can return no data, and + * error paths ensure we clear the output parameters first. */ + *ppPacketBuf = NULL; + *pui32BufLen = 0; + + /* Check Acquire has not been called twice in a row without a release */ + if (psSD->uiReadOffset != NO_ACQUIRE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: acquire already " + "outstanding, ReadOffset(%d), ReadLength(%d)", + __func__, psSD->uiReadOffset, psSD->uiReadLen)); + return PVRSRV_ERROR_RETRY; + } + + /* Ask the kernel server for the next chunk of data to read */ + eError = BridgeTLAcquireData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, &psSD->uiReadOffset, &psSD->uiReadLen); + if (eError != PVRSRV_OK) + { + /* Mask reporting of the errors seen under normal operation */ + if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) && + (eError != PVRSRV_ERROR_TIMEOUT) && + (eError != PVRSRV_ERROR_STREAM_READLIMIT_REACHED)) + { + PVR_LOG_ERROR(eError, "BridgeTLAcquireData"); + } + psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE; + return eError; + } + /* else PVRSRV_OK */ + + /* Return the data offset and length to the caller if bytes are available + * to be read. Could be zero for non-blocking mode so pass back cleared + * values above */ + if (psSD->uiReadLen) + { + *ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset; + *pui32BufLen = psSD->uiReadLen; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _TLClientReleaseDataLen( + SHARED_DEV_CONNECTION hDevConnection, + TL_STREAM_DESC* psSD, + IMG_UINT32 uiReadLen) +{ + PVRSRV_ERROR eError; + + /* the previous acquire did not return any data, this is a no-operation */ + if (psSD->uiReadLen == 0) + { + return PVRSRV_OK; + } + + /* Check release has not been called twice in a row without an acquire */ + if (psSD->uiReadOffset == NO_ACQUIRE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); + return PVRSRV_ERROR_RETRY; + } + + /* Inform the kernel to release the data from the buffer */ + eError = BridgeTLReleaseData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, + psSD->uiReadOffset, uiReadLen); + PVR_LOG_IF_ERROR(eError, "BridgeTLReleaseData"); + + /* Reset state to indicate no outstanding acquire */ + psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; + + return eError; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD) +{ + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + + return _TLClientReleaseDataLen(hDevConnection, psSD, psSD->uiReadLen); +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen) +{ + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + + /* Check the specified size is within the size returned by Acquire */ + if (uiActualReadLen > psSD->uiReadLen) + { + PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return _TLClientReleaseDataLen(hDevConnection, psSD, uiActualReadLen); +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE *pui8Data) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ui32Size); + PVR_ASSERT(pui8Data); + + eError = BridgeTLWriteData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, ui32Size, pui8Data); + if (eError != PVRSRV_OK) + { + if (eError == PVRSRV_ERROR_STREAM_FULL) + { + if (!psSD->bPrinted) + { + psSD->bPrinted = IMG_TRUE; + PVR_DPF((PVR_DBG_ERROR, "Not enough space. Failed to write" + " data to the stream (%d).", eError)); + } + } + else if (eError == PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED) + { + PVR_DPF((PVR_DBG_ERROR, "TL packet size limit exceeded. " + "Failed to write data to the stream (%d).", eError)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: KM returned %d", + __func__, eError)); + } + } + + return eError; +} + +/****************************************************************************** + End of file (tlclient.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.c b/drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.c new file mode 100644 index 000000000000..5ff6602df273 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.c @@ -0,0 +1,280 @@ +/*************************************************************************/ /*! +@File +@Title Provides splay-trees. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implementation of splay-trees. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "allocmem.h" /* for OSMemAlloc / OSMemFree */ +#include "osfunc.h" /* for OSMemFree */ +#include "pvr_debug.h" +#include "uniq_key_splay_tree.h" + +/** + * This function performs a simple top down splay + * + * @param ui32Flags the flags that must splayed to the root (if possible). + * @param psTree The tree to splay. + * @return the resulting tree after the splay operation. + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) +{ + IMG_SPLAY_TREE sTmp1; + IMG_PSPLAY_TREE psLeft; + IMG_PSPLAY_TREE psRight; + IMG_PSPLAY_TREE psTmp2; + + if (psTree == NULL) + { + return NULL; + } + + sTmp1.psLeft = NULL; + sTmp1.psRight = NULL; + + psLeft = &sTmp1; + psRight = &sTmp1; + + for (;;) + { + if (ui32Flags < psTree->ui32Flags) + { + if (psTree->psLeft == NULL) + { + break; + } + + if (ui32Flags < psTree->psLeft->ui32Flags) + { + /* if we get to this point, we need to rotate right the tree */ + psTmp2 = psTree->psLeft; + psTree->psLeft = psTmp2->psRight; + psTmp2->psRight = psTree; + psTree = psTmp2; + if (psTree->psLeft == NULL) + { + break; + } + } + + /* if we get to this point, we need to link right */ + psRight->psLeft = psTree; + psRight = psTree; + psTree = psTree->psLeft; + } + else + { + if (ui32Flags > psTree->ui32Flags) + { + if (psTree->psRight == NULL) + { + break; + } + + if (ui32Flags > psTree->psRight->ui32Flags) + { + /* if we get to this point, we need to rotate left the tree */ + psTmp2 = psTree->psRight; + psTree->psRight = psTmp2->psLeft; + psTmp2->psLeft = psTree; + psTree = psTmp2; + if (psTree->psRight == NULL) + { + break; + } + } + + /* if we get to this point, we need to link left */ + psLeft->psRight = psTree; + psLeft = psTree; + psTree = psTree->psRight; + } + else + { + break; + } + } + } + + /* at this point re-assemble the tree */ + psLeft->psRight = psTree->psLeft; + psRight->psLeft = psTree->psRight; + psTree->psLeft = sTmp1.psRight; + psTree->psRight = sTmp1.psLeft; + return psTree; +} + + +/** + * This function inserts a node into the Tree (unless it is already present, in + * which case it is equivalent to performing only a splay operation + * + * @param ui32Flags the key of the new node + * @param psTree The tree into which one wants to add a new node + * @return The resulting with the node in it + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) +{ + IMG_PSPLAY_TREE psNew; + + if (psTree != NULL) + { + psTree = PVRSRVSplay(ui32Flags, psTree); + if (psTree->ui32Flags == ui32Flags) + { + return psTree; + } + } + + psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE)); + if (psNew == NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree.")); + return NULL; + } + + psNew->ui32Flags = ui32Flags; + OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets)); + +#if defined(PVR_CTZLL) + psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1); +#endif + + if (psTree == NULL) + { + psNew->psLeft = NULL; + psNew->psRight = NULL; + return psNew; + } + + if (ui32Flags < psTree->ui32Flags) + { + psNew->psLeft = psTree->psLeft; + psNew->psRight = psTree; + psTree->psLeft = NULL; + } + else + { + psNew->psRight = psTree->psRight; + psNew->psLeft = psTree; + psTree->psRight = NULL; + } + + return psNew; +} + + +/** + * Deletes a node from the tree (unless it is not there, in which case it is + * equivalent to a splay operation) + * + * @param ui32Flags the value of the node to remove + * @param psTree the tree into which the node must be removed + * @return the resulting tree + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) +{ + IMG_PSPLAY_TREE psTmp; + if (psTree == NULL) + { + return NULL; + } + + psTree = PVRSRVSplay(ui32Flags, psTree); + if (ui32Flags == psTree->ui32Flags) + { + /* The value was present in the tree */ + if (psTree->psLeft == NULL) + { + psTmp = psTree->psRight; + } + else + { + psTmp = PVRSRVSplay(ui32Flags, psTree->psLeft); + psTmp->psRight = psTree->psRight; + } + OSFreeMem(psTree); + return psTmp; + } + + /* The value was not present in the tree, so just return it as is + * (after the splay) */ + return psTree; +} + +/** + * This function picks up the appropriate node for the given flags + * + * @param ui32Flags the flags that must associated with the node. + * @param psTree current splay tree node. + * @return the resulting tree node after the search operation. + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVFindNode(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) +{ + if (psTree == NULL) + { + return NULL; + } + + while (psTree) + { + if (ui32Flags == psTree->ui32Flags) + { + return psTree; + } + + if (ui32Flags < psTree->ui32Flags) + { + psTree = psTree->psLeft; + continue; + } + + if (ui32Flags > psTree->ui32Flags) + { + psTree = psTree->psRight; + continue; + } + } + + return NULL; +} diff --git a/drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.h b/drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.h new file mode 100644 index 000000000000..2ea3163188c7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/common/uniq_key_splay_tree.h @@ -0,0 +1,88 @@ +/*************************************************************************/ /*! +@File +@Title Splay trees interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides debug functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef UNIQ_KEY_SPLAY_TREE_H_ +#define UNIQ_KEY_SPLAY_TREE_H_ + +#include "img_types.h" +#include "pvr_intrinsics.h" + +#if defined(PVR_CTZLL) + /* map the is_bucket_n_free to an int. + * This way, the driver can find the first non empty without loop + */ + typedef IMG_UINT64 IMG_ELTS_MAPPINGS; +#endif + +/* head of list of free boundary tags for indexed by pvr_log2 of the + boundary tag size */ + +#define FREE_TABLE_LIMIT 40 + +struct _BT_; + +typedef struct img_splay_tree +{ + /* left child/subtree */ + struct img_splay_tree * psLeft; + + /* right child/subtree */ + struct img_splay_tree * psRight; + + /* Flags to match on this span, used as the key. */ + IMG_UINT32 ui32Flags; +#if defined(PVR_CTZLL) + /* each bit of this int is a boolean telling if the corresponding + bucket is empty or not */ + IMG_ELTS_MAPPINGS bHasEltsMapping; +#endif + struct _BT_ * buckets[FREE_TABLE_LIMIT]; +} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE; + +IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree); +IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree); +IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree); +IMG_PSPLAY_TREE PVRSRVFindNode(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree); + + +#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.c b/drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.c new file mode 100644 index 000000000000..d203d5c55b33 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.c @@ -0,0 +1,676 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance counter table +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX HW Performance counters table +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_defs.h" +#include "rgx_fwif_hwperf.h" +#if defined(__KERNEL__) +#include "rgxdefs_km.h" +#else +#include "rgxdefs.h" +#endif +#include "rgx_hwperf_table.h" + +/* Includes needed for PVRSRVKM (Server) context */ +# include "rgx_bvnc_defs_km.h" +# if defined(__KERNEL__) +# include "rgxdevice.h" +# endif + +/* Shared compile-time context ASSERT macro */ +#if defined(RGX_FIRMWARE) +# include "rgxfw_utils.h" +/* firmware context */ +# define DBG_ASSERT(_c) RGXFW_ASSERT((_c)) +#else +# include "pvr_debug.h" +/* host client/server context */ +# define DBG_ASSERT(_c) PVR_ASSERT((_c)) +#endif + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() + + Referenced in gasCntBlkTypeModel[] table below and only called from + RGX_FIRMWARE run-time context. Therefore compile time configuration is used. + *****************************************************************************/ + +#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS) +# include "rgxfw_pow.h" +# include "rgxfw_utils.h" + +static bool rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + PVR_UNREFERENCED_PARAMETER(eBlkType); + PVR_UNREFERENCED_PARAMETER(ui8UnitId); + +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + /* S7XT: JONES */ + return (eBlkType == RGX_CNTBLK_ID_JONES); +#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + /* S6XT: TA, TORNADO */ + return true; +#else + /* S6 : TA, HUB, RASTER (RASCAL) */ + return (gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U; +#endif +} + +/* Only use conditional compilation when counter blocks appear in different + * islands for different Rogue families. + */ +static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); + + if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && + (ui32NumDustsEnabled > 0U)) + { +#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER) + IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2U; + + switch (eBlkType) + { + case RGX_CNTBLK_ID_TPU_MCU0: /* S6 and S6XT */ +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + case RGX_CNTBLK_ID_TEXAS0: /* S7 */ +#endif + if (ui8UnitId >= ui32NumDustsEnabled) + { + return false; + } + break; + case RGX_CNTBLK_ID_USC0: /* S6, S6XT, S7 */ + case RGX_CNTBLK_ID_PBE0: /* S7, PBE2_IN_XE */ + /* Handle single cluster cores */ + if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled)) + { + return false; + } + break; + case RGX_CNTBLK_ID_BLACKPEARL0: /* S7 */ + case RGX_CNTBLK_ID_RASTER0: /* S6XT */ +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + case RGX_CNTBLK_ID_TEXAS0: /* S6XT */ +#endif + if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled))) + { + return false; + } + break; + default: + RGXFW_ASSERT(false); /* should never get here, table error */ + break; + } +#else + /* Always true, no fused DUSTs, all powered so do not check unit */ + PVR_UNREFERENCED_PARAMETER(eBlkType); + PVR_UNREFERENCED_PARAMETER(ui8UnitId); +#endif + } + else + { + return false; + } + return true; +} + +#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +# define rgxfw_hwperf_pow_st_direct ((void*)NULL) +# define rgxfw_hwperf_pow_st_indirect ((void*)NULL) +# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL) + +#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL) + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end + *****************************************************************************/ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start + + Referenced in gasCntBlkTypeModel[] table below and called from all build + contexts: + RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server). + + Therefore each function has two implementations, one for compile time and one + run time configuration depending on the context. The functions will inform the + caller whether this block is valid for this particular RGX device. Other + run-time dependent data is returned in psRtInfo for the caller to use. + *****************************************************************************/ + +/* Used for block types: USC */ +static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT(psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_USC0); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + { + psRtInfo->ui32NumUnits = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); +# if defined(RGX_FEATURE_PERFBUS) + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +/* Used for block types: Direct RASTERISATION, HUB */ +static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_HUB)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; + if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +/* Used for block types: BF, BT, RT, SH, BX_TU */ +static IMG_BOOL rgx_hwperf_blk_present_raytracing(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BF) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BT) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RT) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_SH) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BX_TU0)); + + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + + return IMG_FALSE; +} + +#if defined(__KERNEL__) /* Server context */ +static IMG_UINT32 rgx_units_indirect_by_phantom(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) +{ + /* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */ + return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1 + : (psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]+3)/4; +} + +static IMG_UINT32 rgx_units_phantom_indirect_by_dust(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) +{ + /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */ + return MAX((psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]>>1),1); +} + +static IMG_UINT32 rgx_units_phantom_indirect_by_cluster(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) +{ + /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */ + return psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]; +} +#endif /* defined(__KERNEL__) */ + +/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */ +static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) + { + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) + { + psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TEXAS_PERF_INDIRECT; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0) + { + psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */ +static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TPU_PERF_INDIRECT; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TEXAS3_PERF_INDIRECT; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) + { + psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + return IMG_TRUE; +# else +# endif +#endif + return IMG_FALSE; +} + +/* Used for block types: TA, TPU_MCU. Also PBE when PBE2_IN_XE is present */ +static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) && + RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + { + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) + { + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) + { + /* PBE counters are not present on this config */ + return IMG_FALSE; + } + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TPU_MCU_L0_PERF_INDIRECT; + return IMG_TRUE; + } + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) +# if !defined(RGX_FEATURE_PBE2_IN_XE) + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) + { + /* No support for PBE counters without PBE2_IN_XE */ + return IMG_FALSE; + } +# endif + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_not(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ +#if defined(__KERNEL__) + return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) + || rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo)); + +#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); + +#elif defined(RGX_FEATURE_PBE2_IN_XE) || defined(RGX_FEATURE_PERFBUS) + return rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); +#else + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + return IMG_FALSE; +#endif +} + +static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ +#if defined(__KERNEL__) + return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) + || rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo)); + +#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); + +#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + return rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo); +#else + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + return IMG_FALSE; +#endif +} + +#if !defined(__KERNEL__) /* Firmware or User-mode context */ +static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, void *pvRtInfo) +{ + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + + /* Some functions not used on some BVNCs, silence compiler warnings */ + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_raytracing); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_not); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_xttop); + + return IMG_FALSE; +} + +/* Used to instantiate a null row in the block type model table below where the + * block is not supported for a given build BVNC in firmware/user mode context. + * This is needed as the blockid to block type lookup uses the table as well + * and clients may try to access blocks not in the hardware. */ +#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false} + +#endif + + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end + *****************************************************************************/ + +#if defined(__KERNEL__) /* Values will be calculated at run-time */ +#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC +#define RGX_INDIRECT_REG_TEXAS 0xFFFFFFFF +#define RGX_INDIRECT_REG_TPU 0xFFFFFFFF + +#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) +#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST +#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS3_PERF_INDIRECT +#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_PERF_INDIRECT + +#else + +#if defined(RGX_FEATURE_PERFBUS) +#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_MCU_L0_PERF_INDIRECT +#endif + +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) +#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_INDIRECT_BY_PHANTOM +#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS_PERF_INDIRECT +#endif + +#endif + + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table + + This table holds the entries for the performance counter block type model. + Where the block is not present on an RGX device in question the + pfnIsBlkPresent() returns false, if valid and present it returns true. + Columns in the table with a ** indicate the value is a default and the + value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent() + should be used at runtime by the caller. These columns are only valid for + compile time BVNC configured contexts. + + Order of table rows must match order of counter block IDs in the enumeration + RGX_HWPERF_CNTBLK_ID. + *****************************************************************************/ + +static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = +{ + /* ui32CntBlkIdBase, ui32IndirectReg, ui32PerfReg, ui32Select0BaseReg, ui32Counter0BaseReg ui8NumCounters, ui32NumUnits**, ui8SelectRegModeShift, ui8SelectRegOffsetShift, pfnIsBlkPowered pfnIsBlkPresent + * pszBlockNameComment, */ + /*RGX_CNTBLK_ID_TA*/ +#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_TA, 0, /* direct */ RGX_CR_TA_PERF, RGX_CR_TA_PERF_SELECT0, RGX_CR_TA_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_TA_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_s7top }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA), +#endif + + /*RGX_CNTBLK_ID_RASTER*/ +#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_RASTER, 0, /* direct */ RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER), +#endif + + /*RGX_CNTBLK_ID_HUB*/ +#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_HUB, 0, /* direct */ RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0, RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_HUB_BIFPMCACHE_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB), +#endif + + /*RGX_CNTBLK_ID_TORNADO*/ +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_TORNADO, 0, /* direct */ RGX_CR_TORNADO_PERF, RGX_CR_TORNADO_PERF_SELECT0, RGX_CR_TORNADO_PERF_COUNTER_0, 4, 1, 21, 4, "RGX_CR_TORNADO_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_xttop }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO), +#endif + + /*RGX_CNTBLK_ID_JONES*/ +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_JONES, 0, /* direct */ RGX_CR_JONES_PERF, RGX_CR_JONES_PERF_SELECT0, RGX_CR_JONES_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_JONES_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_s7top }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES), +#endif + + /*RGX_CNTBLK_ID_BF RGX_CNTBLK_ID_BT RGX_CNTBLK_ID_RT RGX_CNTBLK_ID_SH*/ +#if defined(__KERNEL__) + {RGX_CNTBLK_ID_BF, 0, /* direct */ DPX_CR_BF_PERF, DPX_CR_BF_PERF_SELECT0, DPX_CR_BF_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_BF_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing }, + {RGX_CNTBLK_ID_BT, 0, /* direct */ DPX_CR_BT_PERF, DPX_CR_BT_PERF_SELECT0, DPX_CR_BT_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_BT_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing }, + {RGX_CNTBLK_ID_RT, 0, /* direct */ DPX_CR_RT_PERF, DPX_CR_RT_PERF_SELECT0, DPX_CR_RT_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RT_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing }, + {RGX_CNTBLK_ID_SH, 0, /* direct */ RGX_CR_SH_PERF, RGX_CR_SH_PERF_SELECT0, RGX_CR_SH_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_SH_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BF), + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BT), + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RT), + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_SH), +#endif + + /*RGX_CNTBLK_ID_TPU_MCU0*/ +#if defined(__KERNEL__) || (defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + {RGX_CNTBLK_ID_TPU_MCU0, RGX_INDIRECT_REG_TPU, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0), +#endif + + /*RGX_CNTBLK_ID_USC0*/ +#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_USC0, RGX_CR_USC_PERF_INDIRECT, RGX_CR_USC_PERF, RGX_CR_USC_PERF_SELECT0, RGX_CR_USC_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_USC_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0), +#endif + + /*RGX_CNTBLK_ID_TEXAS0*/ +#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + {RGX_CNTBLK_ID_TEXAS0, RGX_INDIRECT_REG_TEXAS, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_NUM_BLOCK_UNITS, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_xttop }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0), +#endif + + /*RGX_CNTBLK_ID_RASTER0*/ +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0), +#endif + + /*RGX_CNTBLK_ID_BLACKPEARL0*/ +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0, RGX_CR_BLACKPEARL_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_BLACKPEARL_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0), +#endif + + /*RGX_CNTBLK_ID_PBE0*/ +#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_PBE2_IN_XE) + {RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF, RGX_CR_PBE_PERF_SELECT0, RGX_CR_PBE_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_PBE_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0), +#endif + + /*RGX_CNTBLK_ID_BX_TU0*/ +#if defined(__KERNEL__) + {RGX_CNTBLK_ID_BX_TU0, RGX_CR_BX_TU_PERF_INDIRECT, DPX_CR_BX_TU_PERF, DPX_CR_BX_TU_PERF_SELECT0, DPX_CR_BX_TU_PERF_COUNTER_0, 4, RGX_HWPERF_DOPPLER_BX_TU_BLKS, 21, 3, "RGX_CR_BX_TU_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BX_TU0), +#endif +}; + + +IMG_INTERNAL IMG_UINT32 +RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) +{ + *ppsModel = gasCntBlkTypeModel; + return ARRAY_SIZE(gasCntBlkTypeModel); +} + +/****************************************************************************** + End of file (rgx_hwperf_table.c) + ******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.h b/drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.h new file mode 100644 index 000000000000..243febe5399a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/devices/rogue/rgx_hwperf_table.h @@ -0,0 +1,116 @@ +/*************************************************************************/ /*! +@File +@Title HWPerf counter table header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Utility functions used internally for HWPerf data retrieval +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_HWPERF_TABLE_H +#define RGX_HWPERF_TABLE_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_fwif_hwperf.h" +#if defined(__KERNEL__) +#include "rgxdevice.h" +#endif +/*****************************************************************************/ + +/* Forward declaration */ +typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL; + +/* Function pointer type for functions to check dynamic power state of + * counter block instance. Used only in firmware. */ +typedef bool (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)( + RGX_HWPERF_CNTBLK_ID eBlkType, + IMG_UINT8 ui8UnitId); + +#if defined(__KERNEL__) +/* Counter block run-time info */ +typedef struct +{ + IMG_UINT32 ui32IndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */ + IMG_UINT32 ui32NumUnits; /* Number of instances of this block type in the core */ +} RGX_HWPERF_CNTBLK_RT_INFO; +#endif + +/* Function pointer type for functions to check block is valid and present + * on that RGX Device at runtime. It may have compile logic or run-time + * logic depending on where the code executes: server, srvinit or firmware. + * Values in the psRtInfo output parameter are only valid if true returned. + */ +typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)( + const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc, + void *pvDev_km, + void *pvRtInfo); + +/* This structure encodes properties of a type of performance counter block. + * The structure is sometimes referred to as a block type descriptor. These + * properties contained in this structure represent the columns in the block + * type model table variable below. These values vary depending on the build + * BVNC and core type. + * Each direct block has a unique type descriptor and each indirect group has + * a type descriptor. + */ +struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ +{ + /* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */ + IMG_UINT32 ui32CntBlkIdBase; /* The starting block id for this block type */ + IMG_UINT32 ui32IndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */ + IMG_UINT32 ui32PerfReg; /* RGX_CR_*_PERF register for this block type */ + IMG_UINT32 ui32Select0BaseReg; /* RGX_CR_*_PERF_SELECT0 register for this block type */ + IMG_UINT32 ui32Counter0BaseReg; /* RGX_CR_*_PERF_COUNTER_0 register for this block type */ + IMG_UINT8 ui8NumCounters; /* Number of counters in this block type */ + IMG_UINT8 ui8NumUnits; /* Number of instances of this block type in the core */ + IMG_UINT8 ui8SelectRegModeShift; /* Mode field shift value of select registers */ + IMG_UINT8 ui8SelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */ + const IMG_CHAR *pszBlockNameComment; /* Name of the PERF register. Used while dumping the perf counters to pdumps */ + PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */ + PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */ +}; + +/*****************************************************************************/ + +IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel); + +#endif /* RGX_HWPERF_TABLE_H */ + +/****************************************************************************** + End of file (rgx_hwperf_table.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/allocmem.h b/drivers/mcst/gpu-imgtec/services/shared/include/allocmem.h new file mode 100644 index 000000000000..d0ce7a7ffe61 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/allocmem.h @@ -0,0 +1,181 @@ +/*************************************************************************/ /*! +@File allocmem.h +@Title memory allocation header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Memory-Allocation API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef ALLOCMEM_H +#define ALLOCMEM_H + +#include "img_types.h" +#include "pvr_debug.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) || !defined(DEBUG) || !defined(PVRSRV_ENABLE_PROCESS_STATS) || !defined(PVRSRV_ENABLE_MEMORY_STATS) || defined(DOXYGEN) + +/**************************************************************************/ /*! +@Function OSAllocMem +@Description Allocates CPU memory. Contents are uninitialized. + If passed a size of zero, function should not assert, + but just return a NULL pointer. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocMem(IMG_UINT32 ui32Size); +#define OSAllocMem(_size) (OSAllocMem)((_size)) +/**************************************************************************/ /*! +@Function OSAllocZMem +@Description Allocates CPU memory and initializes the contents to zero. + If passed a size of zero, function should not assert, + but just return a NULL pointer. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocZMem(IMG_UINT32 ui32Size); +#define OSAllocZMem(_size) (OSAllocZMem)((_size)) + +#else +void *OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine); +void *OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine); +#define OSAllocMem(_size) (OSAllocMem)((_size), (__FILE__), (__LINE__)) +#define OSAllocZMem(_size) (OSAllocZMem)((_size), (__FILE__), (__LINE__)) +#endif + +/**************************************************************************/ /*! +@Function OSAllocMemNoStats +@Description Allocates CPU memory. Contents are uninitialized. + If passed a size of zero, function should not assert, + but just return a NULL pointer. + The allocated memory is not accounted for by process stats. + Process stats are an optional feature (enabled only when + PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount + of memory allocated to help in debugging. Where this is not + required, OSAllocMem() and OSAllocMemNoStats() equate to + the same operation. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocMemNoStats(IMG_UINT32 ui32Size); + +/**************************************************************************/ /*! +@Function OSAllocZMemNoStats +@Description Allocates CPU memory and initializes the contents to zero. + If passed a size of zero, function should not assert, + but just return a NULL pointer. + The allocated memory is not accounted for by process stats. + Process stats are an optional feature (enabled only when + PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount + of memory allocated to help in debugging. Where this is not + required, OSAllocZMem() and OSAllocZMemNoStats() equate to + the same operation. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocZMemNoStats(IMG_UINT32 ui32Size); + +/**************************************************************************/ /*! +@Function OSFreeMem +@Description Frees previously allocated CPU memory. +@Input pvCpuVAddr Pointer to the memory to be freed. +@Return None. + */ /**************************************************************************/ +void OSFreeMem(void *pvCpuVAddr); + +/**************************************************************************/ /*! +@Function OSFreeMemNoStats +@Description Frees previously allocated CPU memory. + The freed memory does not update the figures in process stats. + Process stats are an optional feature (enabled only when + PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount + of memory allocated to help in debugging. Where this is not + required, OSFreeMem() and OSFreeMemNoStats() equate to the + same operation. +@Input pvCpuVAddr Pointer to the memory to be freed. +@Return None. + */ /**************************************************************************/ +void OSFreeMemNoStats(void *pvCpuVAddr); + +/* + * These macros allow us to catch double-free bugs on DEBUG builds and + * prevent crashes on RELEASE builds. + */ + +/*! @cond Doxygen_Suppress */ +#if defined(DEBUG) +#define double_free_sentinel ((void *)&OSFreeMem) +#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp) +#else +#define double_free_sentinel NULL +#define ALLOCMEM_ASSERT(exp) do {} while (0) +#endif +/*! @endcond */ + +/*! Frees memory allocated by OSAllocMem(). */ +#define OSFreeMem(_ptr) do { \ + ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ + (OSFreeMem)(_ptr); \ + (_ptr) = double_free_sentinel; \ + MSC_SUPPRESS_4127 \ + } while (0) + +/*! Frees memory allocated by OSAllocMemNoStats(). */ +#define OSFreeMemNoStats(_ptr) do { \ + ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ + (OSFreeMemNoStats)(_ptr); \ + (_ptr) = double_free_sentinel; \ + MSC_SUPPRESS_4127 \ + } while (0) + +#if defined(__cplusplus) +} +#endif + +#endif /* ALLOCMEM_H */ + +/****************************************************************************** + End of file (allocmem.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/device_connection.h b/drivers/mcst/gpu-imgtec/services/shared/include/device_connection.h new file mode 100644 index 000000000000..8e03284b8f74 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/device_connection.h @@ -0,0 +1,117 @@ +/*************************************************************************/ /*! +@File device_connection.h +@Title +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(DEVICE_CONNECTION_H) +#define DEVICE_CONNECTION_H + +#include "img_types.h" +#include "img_defs.h" + +#if defined(__KERNEL__) +typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION; +#else +#include "connection.h" +typedef const struct PVRSRV_DEV_CONNECTION_TAG *SHARED_DEV_CONNECTION; +#endif + +/****************************************************************************** + * Device capability flags and masks + * + * Following bitmask shows allocated ranges and values for our device + * capability settings: + * + * 31 27 23 19 15 11 7 3 0 + * |...|...|...|...|...|...|...|... + * ** CACHE_COHERENT [0x1..0x2] + * x PVRSRV_CACHE_COHERENT_DEVICE_FLAG + * x. PVRSRV_CACHE_COHERENT_CPU_FLAG + * *... NONMAPPABLE_MEMORY [0x8] + * x... PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG + * *.... PDUMP_IS_RECORDING [0x10] + * x.... PVRSRV_PDUMP_IS_RECORDING + * ***........ DEVMEM_SVM_ALLOC [0x100..0x400] + * x........ PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED + * x......... PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED + * x.......... PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL + * *........... FBCDC_V3_1 [0x800] + * x........... FBCDC_V3_1_USED + * |...|...|...|...|...|...|...|... + *****************************************************************************/ + +/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/ +#define PVRSRV_CACHE_COHERENT_SHIFT (0) +#define PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT) +#define PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT) +#define PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT) +#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT) + +/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */ +#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7) +#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT) + +/* Flag to be passed over the bridge to indicate PDump activity */ +#define PVRSRV_PDUMP_IS_RECORDING_SHIFT (4) +#define PVRSRV_PDUMP_IS_RECORDING (1U << PVRSRV_PDUMP_IS_RECORDING_SHIFT) + +/* Flag to be passed over the bridge during connection stating SVM allocation availability */ +#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8) +#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) +#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) +#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) + +/* Flag to be passed over the bridge during connection stating whether GPU uses FBCDC v3.1 */ +#define PVRSRV_FBCDC_V3_1_USED_SHIFT (11) +#define PVRSRV_FBCDC_V3_1_USED (1U << PVRSRV_FBCDC_V3_1_USED_SHIFT) + + +static INLINE IMG_HANDLE GetBridgeHandle(SHARED_DEV_CONNECTION hDevConnection) +{ +#if defined(__KERNEL__) + return hDevConnection; +#else + return hDevConnection->hServices; +#endif +} + + +#endif /* !defined(DEVICE_CONNECTION_H) */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/devicemem.h b/drivers/mcst/gpu-imgtec/services/shared/include/devicemem.h new file mode 100644 index 000000000000..9f0588a54f02 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/devicemem.h @@ -0,0 +1,729 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management core internal +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Services internal interface to core device memory management + functions that are shared between client and server code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVCLIENT_DEVICEMEM_H +#define SRVCLIENT_DEVICEMEM_H + +/****************************************************************************** + * * + * +------------+ +------------+ +--------------+ +--------------+ * + * | a sub- | | a sub- | | an | | allocation | * + * | allocation | | allocation | | allocation | | also mapped | * + * | | | | | in proc 1 | | into proc 2 | * + * +------------+ +------------+ +--------------+ +--------------+ * + * | | | | * + * +--------------+ +--------------+ +--------------+ * + * | page gran- | | page gran- | | page gran- | * + * | ular mapping | | ular mapping | | ular mapping | * + * +--------------+ +--------------+ +--------------+ * + * | | | * + * | | | * + * | | | * + * +--------------+ +--------------+ * + * | | | | * + * | A "P.M.R." | | A "P.M.R." | * + * | | | | * + * +--------------+ +--------------+ * + * * + ******************************************************************************/ + +/* + All device memory allocations are ultimately a view upon (not + necessarily the whole of) a "PMR". + + A PMR is a "Physical Memory Resource", which may be a + "pre-faulted" lump of physical memory, or it may be a + representation of some physical memory that will be instantiated + at some future time. + + PMRs always represent multiple of some power-of-2 "contiguity" + promised by the PMR, which will allow them to be mapped in whole + pages into the device MMU. As memory allocations may be smaller + than a page, these mappings may be suballocated and thus shared + between multiple allocations in one process. A PMR may also be + mapped simultaneously into multiple device memory contexts + (cross-process scenario), however, for security reasons, it is not + legal to share a PMR "both ways" at once, that is, mapped into + multiple processes and divided up amongst several suballocations. + + This PMR terminology is introduced here for background + information, but is generally of little concern to the caller of + this API. This API handles suballocations and mappings, and the + caller thus deals primarily with MEMORY DESCRIPTORS representing + an allocation or suballocation, HEAPS representing ranges of + virtual addresses in a CONTEXT. +*/ + +/* + |<---------------------------context------------------------------>| + |<-------heap------->| |<-------heap------->|<-------heap------->| + |<-alloc->| | |<-alloc->|<-alloc->|| |<-alloc->| | +*/ + +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" +#include "pdumpdefs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +#include "pdump.h" + +#include "device_connection.h" + + +typedef IMG_UINT32 DEVMEM_HEAPCFGID; +#define DEVMEM_HEAPCFG_FORCLIENTS 0 +#define DEVMEM_HEAPCFG_META 1 + + +/* + In order to call the server side functions, we need a bridge handle. + We abstract that here, as we may wish to change its form. + */ + +typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE; + +/*************************************************************************/ /*! +@Function DevmemUnpin +@Description This is the counterpart to DevmemPin(). It is meant to be + called before repinning an allocation. + + For a detailed description see client API documentation. + +@Input phMemDesc The MemDesc that is going to be unpinned. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +DevmemUnpin(DEVMEM_MEMDESC *psMemDesc); + +/*************************************************************************/ /*! +@Function DevmemPin +@Description This is the counterpart to DevmemUnpin(). It is meant to be + called after unpinning an allocation. + + For a detailed description see client API documentation. + +@Input phMemDesc The MemDesc that is going to be pinned. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +DevmemPin(DEVMEM_MEMDESC *psMemDesc); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetHeapInt(DEVMEM_HEAP *psHeap, + IMG_HANDLE *phDevmemHeap); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_SIZE_T* puiSize); + +IMG_INTERNAL void +DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, + IMG_CHAR **pszAnnotation); + +/* + * DevmemCreateContext() + * + * Create a device memory context + * + * This must be called before any heap is created in this context + * + * Caller to provide bridge handle which will be recorded internally and used + * for all future operations on items from this memory context. Caller also + * to provide devicenode handle, as this is used for MMU configuration and + * also to determine the heap configuration for the auto-instantiated heaps. + * + * Note that when compiled in services/server, the hBridge is not used and + * is thrown away by the "fake" direct bridge. (This may change. It is + * recommended that NULL be passed for the handle for now.) + * + * hDeviceNode and uiHeapBlueprintID shall together dictate which heap-config + * to use. + * + * This will cause the server side counterpart to be created also. + * + * If you call DevmemCreateContext() (and the call succeeds) you are promising + * that you will later call Devmem_ContextDestroy(), except for abnormal + * process termination in which case it is expected it will be destroyed as + * part of handle clean up. + * + * Caller to provide storage for the pointer to the newly created + * NEWDEVMEM_CONTEXT object. + */ +PVRSRV_ERROR +DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_CONTEXT **ppsCtxPtr); + +/* + * DevmemAcquireDevPrivData() + * + * Acquire the device private data for this memory context + */ +PVRSRV_ERROR +DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, + IMG_HANDLE *hPrivData); + +/* + * DevmemReleaseDevPrivData() + * + * Release the device private data for this memory context + */ +PVRSRV_ERROR +DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx); + +/* + * DevmemDestroyContext() + * + * Undoes that done by DevmemCreateContext() + */ +PVRSRV_ERROR +DevmemDestroyContext(DEVMEM_CONTEXT *psCtx); + +/* + * DevmemCreateHeap() + * + * Create a heap in the given context. + * + * N.B. Not intended to be called directly, though it can be. + * Normally, heaps are instantiated at context creation time according + * to the specified blueprint. See DevmemCreateContext() for details. + * + * This will cause MMU code to set up data structures for the heap, + * but may not cause page tables to be modified until allocations are + * made from the heap. + * + * uiReservedRegionLength Reserved address space for static VAs shared + * between clients and firmware + * + * The "Quantum" is both the device MMU page size to be configured for + * this heap, and the unit multiples of which "quantized" allocations + * are made (allocations smaller than this, known as "suballocations" + * will be made from a "sub alloc RA" and will "import" chunks + * according to this quantum) + * + * Where imported PMRs (or, for example, PMRs created by device class + * buffers) are mapped into this heap, it is important that the + * physical contiguity guarantee offered by the PMR is greater than or + * equal to the quantum size specified here, otherwise the attempt to + * map it will fail. "Normal" allocations via Devmem_Allocate + * shall automatically meet this requirement, as each "import" will + * trigger the creation of a PMR with the desired contiguity. The + * supported quantum sizes in that case shall be dictated by the OS + * specific implementation of PhysmemNewOSRamBackedPMR() (see) + */ +PVRSRV_ERROR +DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr, + /* base and length of heap */ + IMG_DEV_VIRTADDR sBaseAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_DEVMEM_SIZE_T uiReservedRegionLength, + /* log2 of allocation quantum, i.e. "page" size. + All allocations (that go to server side) are + multiples of this. We use a client-side RA to + make sub-allocations from this */ + IMG_UINT32 ui32Log2Quantum, + /* The minimum import alignment for this heap */ + IMG_UINT32 ui32Log2ImportAlignment, + /* Name of heap for debug */ + /* N.B. Okay to exist on caller's stack - this + func takes a copy if it needs it. */ + const IMG_CHAR *pszName, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_HEAP **ppsHeapPtr); +/* + * DevmemDestroyHeap() + * + * Reverses DevmemCreateHeap() + * + * N.B. All allocations must have been freed and all mappings must + * have been unmapped before invoking this call + */ +PVRSRV_ERROR +DevmemDestroyHeap(DEVMEM_HEAP *psHeap); + +/* + * DevmemExportalignAdjustSizeAndAlign() + * Compute the Size and Align passed to avoid suballocations + * (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN). + * + * Returns PVRSRV_ERROR_INVALID_PARAMS if uiLog2Quantum has invalid value. + */ +IMG_INTERNAL PVRSRV_ERROR +DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +/* + * DevmemSubAllocate() + * + * Makes an allocation (possibly a "suballocation", as described + * below) of device virtual memory from this heap. + * + * The size and alignment of the allocation will be honoured by the RA + * that allocates the "suballocation". The resulting allocation will + * be mapped into GPU virtual memory and the physical memory to back + * it will exist, by the time this call successfully completes. + * + * The size must be a positive integer multiple of the alignment. + * (i.e. the alignment specifies the alignment of both the start and + * the end of the resulting allocation.) + * + * Allocations made via this API are routed through a "suballocation + * RA" which is responsible for ensuring that small allocations can be + * made without wasting physical memory in the server. Furthermore, + * such suballocations can be made entirely client side without + * needing to go to the server unless the allocation spills into a new + * page. + * + * Such suballocations cause many allocations to share the same "PMR". + * This happens only when the flags match exactly. + * + */ + +PVRSRV_ERROR +DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr); + +#define DevmemAllocate(...) \ + DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) + +PVRSRV_ERROR +DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr); + +PVRSRV_ERROR +DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *paui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pauiFreePageIndices, + SPARSE_MEM_RESIZE_FLAGS uiFlags); + +PVRSRV_ERROR +DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr); + +PVRSRV_ERROR +DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEV_VIRTADDR *psDevVirtAddr); + +#define DevmemAllocateAndMap(...) \ + DevmemSubAllocateAndMap(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) + +/* + * DevmemFree() + * + * Reverses that done by DevmemSubAllocate() N.B. The underlying + * mapping and server side allocation _may_ not be torn down, for + * example, if the allocation has been exported, or if multiple + * allocations were suballocated from the same mapping, but this is + * properly refcounted, so the caller does not have to care. + */ + +IMG_BOOL +DevmemFree(DEVMEM_MEMDESC *psMemDesc); + +IMG_BOOL +DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc); + +/* + DevmemMapToDevice: + + Map an allocation to the device it was allocated from. + This function _must_ be called before any call to + DevmemAcquireDevVirtAddr is made as it binds the allocation + to the heap. + DevmemReleaseDevVirtAddr is used to release the reference + to the device mapping this function created, but it doesn't + mean that the memory will actually be unmapped from the + device as other references to the mapping obtained via + DevmemAcquireDevVirtAddr could still be active. +*/ +PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR *psDevVirtAddr); + +/* + DevmemMapToDeviceAddress: + + Same as DevmemMapToDevice but the caller chooses the address + to map to. +*/ +IMG_INTERNAL PVRSRV_ERROR +DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR sDevVirtAddr); + +/* + DevmemGetDevVirtAddr + + Obtain the MemDesc's device virtual address. + This function _must_ be called after DevmemMapToDevice(Address) + and is expected to be used be functions which didn't allocate + the MemDesc but need to know it's address. + It will PVR_ASSERT if no device mapping exists and 0 is returned. + */ +IMG_DEV_VIRTADDR +DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); + +/* + DevmemAcquireDevVirtAddr + + Acquire the MemDesc's device virtual address. + This function _must_ be called after DevmemMapToDevice + and is expected to be used be functions which didn't allocate + the MemDesc but need to know it's address + */ +PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR *psDevVirtAddrRet); + +/* + * DevmemReleaseDevVirtAddr() + * + * give up the licence to use the device virtual address that was + * acquired by "Acquire" or "MapToDevice" + */ +void +DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); + +/* + * DevmemAcquireCpuVirtAddr() + * + * Acquires a license to use the cpu virtual address of this mapping. + * Note that the memory may not have been mapped into cpu virtual + * memory prior to this call. On first "acquire" the memory will be + * mapped in (if it wasn't statically mapped in) and on last put it + * _may_ become unmapped. Later calling "Acquire" again, _may_ cause + * the memory to be mapped at a different address. + */ +PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr); + +/* + * DevmemReacquireCpuVirtAddr() + * + * (Re)acquires license to use the cpu virtual address of this mapping + * if (and only if) there is already a pre-existing license to use the + * cpu virtual address for the mapping, returns NULL otherwise. + */ +void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr); + +/* + * DevmemReleaseDevVirtAddr() + * + * give up the licence to use the cpu virtual address that was granted + * with the "Get" call. + */ +void +DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc); + +#if defined(SUPPORT_INSECURE_EXPORT) +/* + * DevmemExport() + * + * Given a memory allocation allocated with DevmemAllocateExportable() + * create a "cookie" that can be passed intact by the caller's own choice + * of secure IPC to another process and used as the argument to "map" + * to map this memory into a heap in the target processes. N.B. This can + * also be used to map into multiple heaps in one process, though that's not + * the intention. + * + * Note, the caller must later call Unexport before freeing the + * memory. + */ +PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie); + + +void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie); + +PVRSRV_ERROR +DevmemImport(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_EXPORTCOOKIE *psCookie, + DEVMEM_FLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr); +#endif /* SUPPORT_INSECURE_EXPORT */ + +/* + * DevmemMakeLocalImportHandle() + * + * This is a "special case" function for making a server export cookie + * which went through the direct bridge into an export cookie that can + * be passed through the client bridge. + */ +PVRSRV_ERROR +DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hServerExport, + IMG_HANDLE *hClientExport); + +/* + * DevmemUnmakeLocalImportHandle() + * + * Free any resource associated with the Make operation + */ +PVRSRV_ERROR +DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hClientExport); + +/* + * + * The following set of functions is specific to the heap "blueprint" + * stuff, for automatic creation of heaps when a context is created + * + */ + + +/* Devmem_HeapConfigCount: returns the number of heap configs that + this device has. Note that there is no acquire/release semantics + required, as this data is guaranteed to be constant for the + lifetime of the device node */ +PVRSRV_ERROR +DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 *puiNumHeapConfigsOut); + +/* Devmem_HeapCount: returns the number of heaps that a given heap + config on this device has. Note that there is no acquire/release + semantics required, as this data is guaranteed to be constant for + the lifetime of the device node */ +PVRSRV_ERROR +DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut); +/* Devmem_HeapConfigName: return the name of the given heap config. + The caller is to provide the storage for the returned string and + indicate the number of bytes (including null terminator) for such + string in the BufSz arg. Note that there is no acquire/release + semantics required, as this data is guaranteed to be constant for + the lifetime of the device node. + */ +PVRSRV_ERROR +DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_CHAR *pszConfigNameOut, + IMG_UINT32 uiConfigNameBufSz); + +/* Devmem_HeapDetails: fetches all the metadata that is recorded in + this heap "blueprint". Namely: heap name (caller to provide + storage, and indicate buffer size (including null terminator) in + BufSz arg), device virtual address and length, log2 of data page + size (will be one of 12, 14, 16, 18, 20, 21, at time of writing). + Note that there is no acquire/release semantics required, as this + data is guaranteed to be constant for the lifetime of the device + node. */ +PVRSRV_ERROR +DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_CHAR *pszHeapNameOut, + IMG_UINT32 uiHeapNameBufSz, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSize, + IMG_UINT32 *puiLog2ImportAlignmentOut); + +/* + * Devmem_FindHeapByName() + * + * returns the heap handle for the named _automagic_ heap in this + * context. "automagic" heaps are those that are born with the + * context from a blueprint + */ +PVRSRV_ERROR +DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx, + const IMG_CHAR *pszHeapName, + DEVMEM_HEAP **ppsHeapRet); + +/* + * DevmemGetHeapBaseDevVAddr() + * + * returns the device virtual address of the base of the heap. + */ + +PVRSRV_ERROR +DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR *pDevVAddr); + +PVRSRV_ERROR +DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport); + +PVRSRV_ERROR +DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, + IMG_UINT64 *pui64UID); + +PVRSRV_ERROR +DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *hReservation); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *hPMR, + IMG_DEVMEM_OFFSET_T *puiPMROffset); + +IMG_INTERNAL void +DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_FLAGS_T *puiFlags); + +IMG_INTERNAL SHARED_DEV_CONNECTION +DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc); + +PVRSRV_ERROR +DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hExtHandle, + DEVMEM_FLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEVMEM_SIZE_T *puiSizePtr, + const IMG_CHAR *pszAnnotation); + +IMG_INTERNAL PVRSRV_ERROR +DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR sDevVAddr); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR *psFaultAddress); + +IMG_INTERNAL PVRSRV_ERROR +DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate); + +IMG_INTERNAL PVRSRV_ERROR +DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, + IMG_UINT64 ui64FBSCEntries); + +/* DevmemGetHeapLog2PageSize() + * + * Get the page size used for a certain heap. + */ +IMG_UINT32 +DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap); + +/*************************************************************************/ /*! +@Function RegisterDevMemPFNotify +@Description Registers that the application wants to be signaled when a page + fault occurs. + +@Input psContext Memory context the process that would like to + be notified about. +@Input ui32PID The PID of the calling process. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, + IMG_UINT32 ui32PID, + IMG_BOOL bRegister); + +/*************************************************************************/ /*! +@Function GetMaxDevMemSize +@Description Get the amount of device memory on current platform + (memory size in Bytes) +@Output puiLMASize LMA memory size +@Output puiUMASize UMA memory size +@Return Error code +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +GetMaxDevMemSize(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T *puiLMASize, + IMG_DEVMEM_SIZE_T *puiUMASize); + +/*************************************************************************/ /*! +@Function DevmemHeapSetPremapStatus +@Description In some special cases like virtualisation, a device memory heap + must be entirely backed by physical memory and mapped into the + device's virtual address space. This is done at context creation. + When objects are allocated from such a heap, the mapping part + must be skipped. The 'bPremapped' flag dictates if allocations + are to be mapped or not. + +@Input psHeap Device memory heap to be updated +@Input IsPremapped The premapping status to be set +*/ /**************************************************************************/ +IMG_INTERNAL void +DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped); + +#endif /* #ifndef SRVCLIENT_DEVICEMEM_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/devicemem_pdump.h b/drivers/mcst/gpu-imgtec/services/shared/include/devicemem_pdump.h new file mode 100644 index 000000000000..09b28afe7e51 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/devicemem_pdump.h @@ -0,0 +1,363 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management PDump internal +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Services internal interface to PDump device memory management + functions that are shared between client and server code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_PDUMP_H +#define DEVICEMEM_PDUMP_H + +#include "devicemem.h" +#include "pdumpdefs.h" +#include "pdump.h" + +#if defined(PDUMP) +/* + * DevmemPDumpLoadMem() + * + * takes a memory descriptor, offset, and size, and takes the current contents + * of the memory at that location and writes it to the prm pdump file, and + * emits a pdump LDB to load the data from that file. The intention here is + * that the contents of the simulated buffer upon pdump playback will be made + * to be the same as they are when this command is run, enabling pdump of + * cases where the memory has been modified externally, i.e. by the host cpu + * or by a third party. + */ +void +DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpLoadZeroMem() + * + * As DevmemPDumpLoadMem() but the PDump allocation will be populated with + * zeros from the zero page in the parameter stream + */ +void +DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpLoadMemValue32() + * + * As above but dumps the value at a dword-aligned address in plain text to + * the pdump script2 file. Useful for patching a buffer at pdump playback by + * simply editing the script output file. + * + * (The same functionality can be achieved by the above function but the + * binary PARAM file must be patched in that case.) + */ +IMG_INTERNAL void +DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpMemValue64() + * + * As above but dumps the 64bit-value at a dword-aligned address in plain text + * to the pdump script2 file. Useful for patching a buffer at pdump playback by + * simply editing the script output file. + * + * (The same functionality can be achieved by the above function but the + * binary PARAM file must be patched in that case.) + */ +IMG_INTERNAL void +DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpPageCatBaseToSAddr() + * + * Returns the symbolic address of a piece of memory represented by an offset + * into the mem descriptor. + */ +PVRSRV_ERROR +DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T *puiMemOffset, + IMG_CHAR *pszName, + IMG_UINT32 ui32Size); + +/* + * DevmemPDumpSaveToFile() + * + * Emits a pdump SAB to cause the current contents of the memory to be written + * to the given file during playback + */ +void +DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset); + +/* + * DevmemPDumpSaveToFileVirtual() + * + * Emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the virtual + * address and device MMU context to cause the pdump player to traverse the + * MMU page tables itself. + */ +void +DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PdumpFlags); + +/* + * DevmemPDumpDataDescriptor() + * + * Emits a pdump CMD:OutputData, using the virtual address and device MMU + * context. Provides more flexibility than a pdump SAB because metadata can + * be passed to an external pdump player library via the command header. + */ +void +DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PdumpFlags); + + +/* + * + * DevmemPDumpDevmemPol32() + * + * Writes a PDump 'POL' command to wait for a masked 32-bit memory location to + * become the specified value. + */ +PVRSRV_ERROR +DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags); + +#if defined(__KERNEL__) +/* + * + * DevmemPDumpDevmemCheck32() + * + * Writes a PDump 'POL' command to run a single-shot check for a masked + * 32-bit memory location to match the specified value. + */ +PVRSRV_ERROR +DevmemPDumpDevmemCheck32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags); +#endif + +/* + * DevmemPDumpCBP() + * + * Polls for space in circular buffer. Reads the read offset from memory and + * waits until there is enough space to write the packet. + * + * psMemDesc - MemDesc which contains the read offset + * uiReadOffset - Offset into MemDesc to the read offset + * uiWriteOffset - Current write offset + * uiPacketSize - Size of packet to write + * uiBufferSize - Size of circular buffer + */ +PVRSRV_ERROR +DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize); + +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpLoadMem) +#endif +static INLINE void +DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpLoadMemValue32) +#endif +static INLINE void +DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpLoadMemValue64) +#endif +static INLINE void +DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui64Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpPageCatBaseToSAddr) +#endif +static INLINE PVRSRV_ERROR +DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T *puiMemOffset, + IMG_CHAR *pszName, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(puiMemOffset); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ui32Size); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpSaveToFile) +#endif +static INLINE void +DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(uiFileOffset); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpSaveToFileVirtual) +#endif +static INLINE void +DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PdumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpDevmemPol32) +#endif +static INLINE PVRSRV_ERROR +DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpCBP) +#endif +static INLINE PVRSRV_ERROR +DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiReadOffset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + + return PVRSRV_OK; +} +#endif /* PDUMP */ +#endif /* DEVICEMEM_PDUMP_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/devicemem_utils.h b/drivers/mcst/gpu-imgtec/services/shared/include/devicemem_utils.h new file mode 100644 index 000000000000..20ce04ccef18 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/devicemem_utils.h @@ -0,0 +1,513 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management internal utility functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Utility functions used internally by device memory management + code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_UTILS_H +#define DEVICEMEM_UTILS_H + +#include "devicemem.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvr_debug.h" +#include "allocmem.h" +#include "ra.h" +#include "osfunc.h" +#include "lock.h" +#include "osmmap.h" + +#define DEVMEM_HEAPNAME_MAXLENGTH 160 + +#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG) +#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__) +#else +#define DEVMEM_REFCOUNT_PRINT(fmt, ...) +#endif + +/* If we need a "hMapping" but we don't have a server-side mapping, we poison + * the entry with this value so that it's easily recognised in the debugger. + * Note that this is potentially a valid handle, but then so is NULL, which is + * no better, indeed worse, as it's not obvious in the debugger. The value + * doesn't matter. We _never_ use it (and because it's valid, we never assert + * it isn't this) but it's nice to have a value in the source code that we can + * grep for if things go wrong. + */ +#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead) +#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead) + +#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF + +struct DEVMEM_CONTEXT_TAG +{ + + SHARED_DEV_CONNECTION hDevConnection; + + /* Number of heaps that have been created in this context + * (regardless of whether they have allocations) + */ + IMG_UINT32 uiNumHeaps; + + /* Each "DEVMEM_CONTEXT" has a counterpart in the server, which + * is responsible for handling the mapping into device MMU. + * We have a handle to that here. + */ + IMG_HANDLE hDevMemServerContext; + + /* Number of automagically created heaps in this context, + * i.e. those that are born at context creation time from the + * chosen "heap config" or "blueprint" + */ + IMG_UINT32 uiAutoHeapCount; + + /* Pointer to array of such heaps */ + struct DEVMEM_HEAP_TAG **ppsAutoHeapArray; + + /* The cache line size for use when allocating memory, + * as it is not queryable on the client side + */ + IMG_UINT32 ui32CPUCacheLineSize; + + /* Private data handle for device specific data */ + IMG_HANDLE hPrivData; +}; + +/* Flag that allows the heap to be marked as user or RA managed + * at the time of first map.*/ +#define DEVMEM_HEAP_MANAGER_UNKNOWN 0 +/* User managed heap. Management of this heap is out of services scope */ +#define DEVMEM_HEAP_MANAGER_USER (1U << 0) +/* Heap explicitly managed by services server module */ +#define DEVMEM_HEAP_MANAGER_KERNEL (1U << 1) +/* Heap managed by resource allocator */ +#define DEVMEM_HEAP_MANAGER_RA (1U << 2) +/* Heaps managed by the resource allocator and the user + * The reserved regions of the heap are managed explicitly by user + * The non-reserved region of the heap is managed by the RA */ +#define DEVMEM_HEAP_MANAGER_DUAL_USER_RA (DEVMEM_HEAP_MANAGER_USER | DEVMEM_HEAP_MANAGER_RA) + +struct DEVMEM_HEAP_TAG +{ + /* Name of heap - for debug and lookup purposes. */ + IMG_CHAR *pszName; + + /* Number of live imports in the heap */ + ATOMIC_T hImportCount; + + /* Base address and size of heap, required by clients due to some + * requesters not being full range + */ + IMG_DEV_VIRTADDR sBaseAddress; + DEVMEM_SIZE_T uiSize; + + DEVMEM_SIZE_T uiReservedRegionSize; /* uiReservedRegionLength in DEVMEM_HEAP_BLUEPRINT */ + + /* The heap manager, describing if the space is managed by the user, an RA, + * kernel or combination */ + IMG_UINT32 ui32HeapManagerFlags; + + /* This RA is for managing sub-allocations within the imports (PMRs) + * within the heap's virtual space. RA only used in DevmemSubAllocate() + * to track sub-allocated buffers. + * + * Resource Span - a PMR import added when the RA calls the + * imp_alloc CB (SubAllocImportAlloc) which returns the + * PMR import and size (span length). + * Resource - an allocation/buffer i.e. a MemDesc. Resource size represents + * the size of the sub-allocation. + */ + RA_ARENA *psSubAllocRA; + IMG_CHAR *pszSubAllocRAName; + + /* The psQuantizedVMRA is for the coarse allocation (PMRs) of virtual + * space from the heap. + * + * Resource Span - the heap's VM space from base to base+length, + * only one is added at heap creation. + * Resource - a PMR import associated with the heap. Dynamic number + * as memory is allocated/freed from or mapped/unmapped to + * the heap. Resource size follows PMR logical size. + */ + RA_ARENA *psQuantizedVMRA; + IMG_CHAR *pszQuantizedVMRAName; + + /* We also need to store a copy of the quantum size in order to feed + * this down to the server. + */ + IMG_UINT32 uiLog2Quantum; + + /* Store a copy of the minimum import alignment */ + IMG_UINT32 uiLog2ImportAlignment; + + /* The relationship between tiled heap alignment and heap byte-stride + * (dependent on tiling mode, abstracted here) + */ + IMG_UINT32 uiLog2TilingStrideFactor; + + /* The parent memory context for this heap */ + struct DEVMEM_CONTEXT_TAG *psCtx; + + /* Lock to protect this structure */ + POS_LOCK hLock; + + /* Each "DEVMEM_HEAP" has a counterpart in the server, which is + * responsible for handling the mapping into device MMU. + * We have a handle to that here. + */ + IMG_HANDLE hDevMemServerHeap; + + /* This heap is fully allocated and premapped into the device address space. + * Used in virtualisation for firmware heaps of Guest and optionally Host drivers. */ + IMG_BOOL bPremapped; +}; + +typedef IMG_UINT32 DEVMEM_PROPERTIES_T; /*!< Typedef for Devicemem properties */ +#define DEVMEM_PROPERTIES_EXPORTABLE (1UL<<0) /*!< Is it exportable? */ +#define DEVMEM_PROPERTIES_IMPORTED (1UL<<1) /*!< Is it imported from another process? */ +#define DEVMEM_PROPERTIES_SUBALLOCATABLE (1UL<<2) /*!< Is it suballocatable? */ +#define DEVMEM_PROPERTIES_UNPINNED (1UL<<3) /*!< Is it currently pinned? */ +#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED (1UL<<4) /*!< Is the memory fully zeroed? */ +#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN (1UL<<5) /*!< Is the memory clean, i.e. not been used before? */ +#define DEVMEM_PROPERTIES_SECURE (1UL<<6) /*!< Is it a special secure buffer? No CPU maps allowed! */ +#define DEVMEM_PROPERTIES_IMPORT_IS_POISONED (1UL<<7) /*!< Is the memory fully poisoned? */ +#define DEVMEM_PROPERTIES_NO_CPU_MAPPING (1UL<<8) /* No CPU Mapping is allowed, RW attributes + are further derived from allocation memory flags */ +#define DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE (1UL<<9) /* No sparse resizing allowed, once a memory + layout is chosen, no change allowed later, + This includes pinning and unpinning */ + + +typedef struct DEVMEM_DEVICE_IMPORT_TAG +{ + DEVMEM_HEAP *psHeap; /*!< Heap this import is bound to */ + IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the import */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ + IMG_HANDLE hReservation; /*!< Device memory reservation handle */ + IMG_HANDLE hMapping; /*!< Device mapping handle */ + IMG_BOOL bMapped; /*!< This is import mapped? */ + POS_LOCK hLock; /*!< Lock to protect the device import */ +} DEVMEM_DEVICE_IMPORT; + +typedef struct DEVMEM_CPU_IMPORT_TAG +{ + void *pvCPUVAddr; /*!< CPU virtual address of the import */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the CPU virtual address */ + IMG_HANDLE hOSMMapData; /*!< CPU mapping handle */ + POS_LOCK hLock; /*!< Lock to protect the CPU import */ +} DEVMEM_CPU_IMPORT; + +typedef struct DEVMEM_IMPORT_TAG +{ + SHARED_DEV_CONNECTION hDevConnection; + IMG_DEVMEM_ALIGN_T uiAlign; /*!< Alignment of the PMR */ + DEVMEM_SIZE_T uiSize; /*!< Size of import */ + ATOMIC_T hRefCount; /*!< Refcount for this import */ + DEVMEM_PROPERTIES_T uiProperties; /*!< Stores properties of an import like if + it is exportable, pinned or suballocatable */ + IMG_HANDLE hPMR; /*!< Handle to the PMR */ + DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */ + POS_LOCK hLock; /*!< Lock to protect the import */ + + DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */ + DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the import */ +} DEVMEM_IMPORT; + +typedef struct DEVMEM_DEVICE_MEMDESC_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the allocation */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ + POS_LOCK hLock; /*!< Lock to protect device memdesc */ +} DEVMEM_DEVICE_MEMDESC; + +typedef struct DEVMEM_CPU_MEMDESC_TAG +{ + void *pvCPUVAddr; /*!< CPU virtual address of the import */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the device CPU address */ + POS_LOCK hLock; /*!< Lock to protect CPU memdesc */ +} DEVMEM_CPU_MEMDESC; + +struct DEVMEM_MEMDESC_TAG +{ + DEVMEM_IMPORT *psImport; /*!< Import this memdesc is on */ + IMG_DEVMEM_OFFSET_T uiOffset; /*!< Offset into import where our allocation starts */ + IMG_DEVMEM_SIZE_T uiAllocSize; /*!< Size of the allocation */ + ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ + POS_LOCK hLock; /*!< Lock to protect memdesc */ + IMG_HANDLE hPrivData; + + DEVMEM_DEVICE_MEMDESC sDeviceMemDesc; /*!< Device specifics of the memdesc */ + DEVMEM_CPU_MEMDESC sCPUMemDesc; /*!< CPU specifics of the memdesc */ + + IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this memdesc */ + + IMG_UINT32 ui32AllocationIndex; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + IMG_HANDLE hRIHandle; /*!< Handle to RI information */ +#endif +}; + +/* The physical descriptor used to store handles and information of device + * physical allocations. + */ +struct DEVMEMX_PHYS_MEMDESC_TAG +{ + IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ + IMG_UINT32 uiLog2PageSize; /*!< Page size */ + ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ + DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */ + IMG_HANDLE hPMR; /*!< Handle to the PMR */ + DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the memdesc */ + DEVMEM_BRIDGE_HANDLE hBridge; /*!< Bridge connection for the server */ +}; + +/* The virtual descriptor used to store handles and information of a device + * virtual range and the mappings to it. + */ +struct DEVMEMX_VIRT_MEMDESC_TAG +{ + IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ + DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */ + DEVMEMX_PHYSDESC **apsPhysDescTable; /*!< Table to store links to physical descs */ + DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the memdesc */ + + IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this virt memdesc */ + IMG_UINT32 ui32AllocationIndex; /*!< To track mappings in this range */ + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + IMG_HANDLE hRIHandle; /*!< Handle to RI information */ +#endif +}; + +#define DEVICEMEM_UTILS_NO_ADDRESS 0 + +/****************************************************************************** +@Function DevmemValidateParams +@Description Check if flags are conflicting and if align is a size multiple. + +@Input uiSize Size of the import. +@Input uiAlign Alignment of the import. +@Input puiFlags Pointer to the flags for the import. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + DEVMEM_FLAGS_T *puiFlags); + +/****************************************************************************** +@Function DevmemImportStructAlloc +@Description Allocates memory for an import struct. Does not allocate a PMR! + Create locks for CPU and Devmem mappings. + +@Input hDevConnection Connection to use for calls from the import. +@Input ppsImport The import to allocate. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_IMPORT **ppsImport); + +/****************************************************************************** +@Function DevmemImportStructInit +@Description Initialises the import struct with the given parameters. + Set it's refcount to 1! + +@Input psImport The import to initialise. +@Input uiSize Size of the import. +@Input uiAlign Alignment of allocations in the import. +@Input uiMapFlags +@Input hPMR Reference to the PMR of this import struct. +@Input uiProperties Properties of the import. Is it exportable, + imported, suballocatable, unpinned? +******************************************************************************/ +void DevmemImportStructInit(DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + IMG_HANDLE hPMR, + DEVMEM_PROPERTIES_T uiProperties); + +/****************************************************************************** +@Function DevmemImportStructDevMap +@Description NEVER call after the last DevmemMemDescRelease() + Maps the PMR referenced by the import struct to the device's + virtual address space. + Does nothing but increase the cpu mapping refcount if the + import struct was already mapped. + +@Input psHeap The heap to map to. +@Input bMap Caller can choose if the import should be really + mapped in the page tables or if just a virtual range + should be reserved and the refcounts increased. +@Input psImport The import we want to map. +@Input uiOptionalMapAddress An optional address to map to. + Pass DEVICEMEM_UTILS_NOADDRESS if not used. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, + IMG_BOOL bMap, + DEVMEM_IMPORT *psImport, + IMG_UINT64 uiOptionalMapAddress); + +/****************************************************************************** +@Function DevmemImportStructDevUnmap +@Description Unmaps the PMR referenced by the import struct from the + device's virtual address space. + If this was not the last remaining CPU mapping on the import + struct only the cpu mapping refcount is decreased. +@return A boolean to signify if the import was unmapped. +******************************************************************************/ +IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportStructCPUMap +@Description NEVER call after the last DevmemMemDescRelease() + Maps the PMR referenced by the import struct to the CPU's + virtual address space. + Does nothing but increase the cpu mapping refcount if the + import struct was already mapped. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportStructCPUUnmap +@Description Unmaps the PMR referenced by the import struct from the CPU's + virtual address space. + If this was not the last remaining CPU mapping on the import + struct only the cpu mapping refcount is decreased. +******************************************************************************/ +void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport); + + +/****************************************************************************** +@Function DevmemImportStructAcquire +@Description Acquire an import struct by increasing it's refcount. +******************************************************************************/ +void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportStructRelease +@Description Reduces the refcount of the import struct. + Destroys the import in the case it was the last reference. + Destroys underlying PMR if this import was the last reference + to it. +@return A boolean to signal if the import was destroyed. True = yes. +******************************************************************************/ +IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportDiscard +@Description Discard a created, but unitilised import structure. + This must only be called before DevmemImportStructInit + after which DevmemImportStructRelease must be used to + "free" the import structure. +******************************************************************************/ +void DevmemImportDiscard(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemMemDescAlloc +@Description Allocates a MemDesc and create it's various locks. + Zero the allocated memory. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc); + +/****************************************************************************** +@Function DevmemMemDescInit +@Description Sets the given offset and import struct fields in the MemDesc. + Initialises refcount to 1 and other values to 0. + +@Input psMemDesc MemDesc to initialise. +@Input uiOffset Offset in the import structure. +@Input psImport Import the MemDesc is on. +@Input uiAllocSize Size of the allocation +******************************************************************************/ +void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiAllocSize); + +/****************************************************************************** +@Function DevmemMemDescAcquire +@Description Acquires the MemDesc by increasing it's refcount. +******************************************************************************/ +void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc); + +/****************************************************************************** +@Function DevmemMemDescRelease +@Description Releases the MemDesc by reducing it's refcount. + Destroy the MemDesc if it's recount is 0. + Destroy the import struct the MemDesc is on if that was the + last MemDesc on the import, probably following the destruction + of the underlying PMR. +@return A boolean to signal if the MemDesc was destroyed. True = yes. +******************************************************************************/ +IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc); + +/****************************************************************************** +@Function DevmemMemDescDiscard +@Description Discard a created, but uninitialised MemDesc structure. + This must only be called before DevmemMemDescInit after + which DevmemMemDescRelease must be used to "free" the + MemDesc structure. +******************************************************************************/ +void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc); + + +/****************************************************************************** +@Function GetImportProperties +@Description Atomically read psImport->uiProperties + It's possible that another thread modifies uiProperties + immediately after this function returns, making its result + stale. So, it's recommended to use this function only to + check if certain non-volatile flags were set. +******************************************************************************/ +static INLINE DEVMEM_PROPERTIES_T GetImportProperties(DEVMEM_IMPORT *psImport) +{ + DEVMEM_PROPERTIES_T uiProperties; + + OSLockAcquire(psImport->hLock); + uiProperties = psImport->uiProperties; + OSLockRelease(psImport->hLock); + return uiProperties; +} + +#endif /* DEVICEMEM_UTILS_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/devicememx.h b/drivers/mcst/gpu-imgtec/services/shared/include/devicememx.h new file mode 100644 index 000000000000..a9d7f950a42a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/devicememx.h @@ -0,0 +1,223 @@ +/*************************************************************************/ /*! +@File +@Title X Device Memory Management core internal +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Internal interface for extended device memory management. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEMX_H +#define DEVICEMEMX_H + +#include "img_types.h" +#include "devicemem_typedefs.h" +#include "devicemem_utils.h" +#include "pdumpdefs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "osfunc.h" + +/* DevmemXAllocPhysical() + * + * Allocate physical device memory and return a physical + * descriptor for it. + */ +PVRSRV_ERROR +DevmemXAllocPhysical(DEVMEM_CONTEXT *psCtx, + IMG_UINT32 uiNumPages, + IMG_UINT32 uiLog2PageSize, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEMX_PHYSDESC **ppsPhysDesc); + +/* DevmemXReleasePhysical() + * + * Removes a physical device allocation if all references + * to it are dropped, otherwise just decreases the refcount. + */ +void +DevmemXReleasePhysical(DEVMEMX_PHYSDESC *psPhysDesc); + +/* DevmemAllocVirtualAddr() + * + * Reserve a requested device virtual range and return + * a virtual descriptor for it. + */ +IMG_INTERNAL PVRSRV_ERROR +DevmemXAllocVirtualAddr(DEVMEM_HEAP* hHeap, + IMG_UINT32 uiNumPages, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + IMG_DEV_VIRTADDR sVirtAddr, + DEVMEMX_VIRTDESC **ppsVirtDesc); + +/* DevmemAllocVirtual() + * + * Allocate and reserve a device virtual range and return + * a virtual descriptor for it. + */ +PVRSRV_ERROR +DevmemXAllocVirtual(DEVMEM_HEAP* hHeap, + IMG_UINT32 uiNumPages, + DEVMEM_FLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEMX_VIRTDESC **ppsVirtDesc, + IMG_DEV_VIRTADDR *psVirtAddr); + +/* DevmemXFreeVirtual() + * + * Removes a device virtual range if all mappings on it + * have been removed. + */ +PVRSRV_ERROR +DevmemXFreeVirtual(DEVMEMX_VIRTDESC *psVirtDesc); + +/* DevmemXMapVirtualRange() + * + * Map memory from a physical descriptor into a virtual range. + */ +PVRSRV_ERROR +DevmemXMapVirtualRange(IMG_UINT32 ui32PageCount, + DEVMEMX_PHYSDESC *psPhysDesc, + IMG_UINT32 ui32PhysOffset, + DEVMEMX_VIRTDESC *psVirtDesc, + IMG_UINT32 ui32VirtOffset); + +/* DevmemXUnmapVirtualRange() + * + * Unmap pages from a device virtual range. + */ +PVRSRV_ERROR +DevmemXUnmapVirtualRange(IMG_UINT32 ui32PageCount, + DEVMEMX_VIRTDESC *psVirtDesc, + IMG_UINT32 ui32VirtPgOffset); + +/* DevmemXMapPhysicalToCPU() + * + * Map a full physical descriptor to CPU space. + */ +PVRSRV_ERROR +DevmemXMapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys, + IMG_CPU_VIRTADDR *psVirtAddr); + +/* DevmemXUnmapPhysicalToCPU() + * + * Remove the CPU mapping from the descriptor. + */ +PVRSRV_ERROR +DevmemXUnmapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys); + +/* DevmemXReacquireCpuVirtAddr() + * + * Reacquire the CPU mapping by incrementing the refcount. + */ +void +DevmemXReacquireCpuVirtAddr(DEVMEMX_PHYSDESC *psPhysDesc, + void **ppvCpuVirtAddr); + +/* DevmemXReleaseCpuVirtAddr() + * + * Release CPU mapping by decrementing the refcount. + */ +void +DevmemXReleaseCpuVirtAddr(DEVMEMX_PHYSDESC *psPhysDesc); + +/* DevmemXCreateDevmemMemDescVA() + * + * (Deprecated) + * + * Create a devmem memdesc from a virtual address. + * Always destroy with DevmemXFreeDevmemMemDesc(). + */ + +PVRSRV_ERROR +DevmemXCreateDevmemMemDescVA(const IMG_DEV_VIRTADDR sVirtualAddress, + DEVMEM_MEMDESC **ppsMemDesc); + +/* DevmemXCreateDevmemMemDesc() + * + * Create a devmem memdesc from a physical and + * virtual descriptor. + * Always destroy with DevmemXFreeDevmemMemDesc(). + */ + +PVRSRV_ERROR +DevmemXCreateDevmemMemDesc(DEVMEMX_PHYSDESC *psPhysDesc, + DEVMEMX_VIRTDESC *psVirtDesc, + DEVMEM_MEMDESC **ppsMemDesc); + +/* DevmemXFreeDevmemMemDesc() + * + * Free the memdesc again. Has no impact on the underlying + * physical and virtual descriptors. + */ +PVRSRV_ERROR +DevmemXFreeDevmemMemDesc(DEVMEM_MEMDESC *psMemDesc); + +PVRSRV_ERROR +DevmemXFlagCompatibilityCheck(IMG_UINT32 uiPhysFlags, + IMG_UINT32 uiVirtFlags); + +PVRSRV_ERROR +DevmemXPhysDescAlloc(DEVMEMX_PHYSDESC **ppsPhysDesc); + +void +DevmemXPhysDescInit(DEVMEMX_PHYSDESC *psPhysDesc, + IMG_HANDLE hPMR, + IMG_UINT32 uiNumPages, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE hBridge); + +void +DevmemXPhysDescFree(DEVMEMX_PHYSDESC *psPhysDesc); + +void +DevmemXPhysDescAcquire(DEVMEMX_PHYSDESC *psPhysDesc, + IMG_UINT32 uiAcquireCount); +void +DevmemXPhysDescRelease(DEVMEMX_PHYSDESC *psPhysDesc, + IMG_UINT32 uiReleaseCount); + +#if !defined(__KERNEL__) +IMG_INTERNAL PVRSRV_ERROR +DevmemXGetImportUID(DEVMEMX_PHYSDESC *psMemDescPhys, + IMG_UINT64 *pui64UID); +#endif + +#endif /* DEVICEMEMX_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/devicememx_pdump.h b/drivers/mcst/gpu-imgtec/services/shared/include/devicememx_pdump.h new file mode 100644 index 000000000000..b6e99f7eca66 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/devicememx_pdump.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@File +@Title X Device Memory Management PDump internal +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Services internal interface to PDump device memory management + functions that are shared between client and server code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEMX_PDUMP_H +#define DEVICEMEMX_PDUMP_H + +#include "devicememx.h" +#include "pdumpdefs.h" +#include "pdump.h" + +#if defined(PDUMP) +/* + * DevmemXPDumpLoadMem() + * + * Same as DevmemPDumpLoadMem(). + */ +IMG_INTERNAL void +DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags); +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemXPDumpLoadMem) +#endif + +static INLINE void +DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDescPhys); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); +} +#endif /* PDUMP */ +#endif /* DEVICEMEMX_PDUMP_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/hash.h b/drivers/mcst/gpu-imgtec/services/shared/include/hash.h new file mode 100644 index 000000000000..61e8bf0a8d99 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/hash.h @@ -0,0 +1,246 @@ +/*************************************************************************/ /*! +@File +@Title Self scaling hash tables +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements simple self scaling hash tables. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _HASH_H_ +#define _HASH_H_ + +#include "img_types.h" +#include "pvrsrv_error.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Keys passed to the comparison function are only guaranteed to be aligned on + * an uintptr_t boundary. + */ +typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); +typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2); + +typedef struct _HASH_TABLE_ HASH_TABLE; + +typedef PVRSRV_ERROR (*HASH_pfnCallback) ( + uintptr_t k, + uintptr_t v +); + +#if defined(DEBUG) +#else +#define HASH_CREATE(LEN) HASH_Create(LEN) +#endif + +/*************************************************************************/ /*! +@Function HASH_Func_Default +@Description Hash function intended for hashing keys composed of uintptr_t + arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey A pointer to the key to hash. +@Input uHashTabLen The length of the hash table. +@Return The hash value. +*/ /**************************************************************************/ +IMG_UINT32 HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + +/*************************************************************************/ /*! +@Function HASH_Key_Comp_Default +@Description Compares keys composed of uintptr_t arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey1 Pointer to first hash key to compare. +@Input pKey2 Pointer to second hash key to compare. +@Return IMG_TRUE - The keys match. + IMG_FALSE - The keys don't match. +*/ /**************************************************************************/ +IMG_BOOL HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2); + +/*************************************************************************/ /*! +@Function HASH_Create_Extended +@Description Create a self scaling hash table, using the supplied key size, + and the supplied hash and key comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Input uKeySize The size of the key, in bytes. +@Input pfnHashFunc Pointer to hash function. +@Input pfnKeyComp Pointer to key comparison function. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +HASH_TABLE * HASH_Create_Extended_Int(IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp); +#if defined(DEBUG) +#define HASH_Create_Extended(LEN, KS, FUN, CMP) HASH_Create_Extended_Debug(LEN, KS, FUN, CMP, __FILE__, __LINE__) +HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, + const char *file, const unsigned int line); +#else +#define HASH_Create_Extended HASH_Create_Extended_Int +#endif + +/*************************************************************************/ /*! +@Function HASH_Create +@Description Create a self scaling hash table with a key consisting of a + single uintptr_t, and using the default hash and key + comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +HASH_TABLE * HASH_Create_Int(IMG_UINT32 uInitialLen); +#if defined(DEBUG) +#define HASH_Create(LEN) HASH_Create_Debug(LEN, __FILE__, __LINE__) +HASH_TABLE * HASH_Create_Debug (IMG_UINT32 uInitialLen, const char *file, const unsigned int line); +#else +#define HASH_Create HASH_Create_Int +#endif + +/*************************************************************************/ /*! +@Function HASH_Delete_Extended +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table should have been removed + before calling this function. +@Input pHash Hash table +@Input bWarn Set false to suppress warnings in the case of + deletion with active entries. +@Return None +*/ /**************************************************************************/ +void HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn); + +/*************************************************************************/ /*! +@Function HASH_Delete +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been removed + before calling this function. +@Input pHash Hash table +@Return None +*/ /**************************************************************************/ +void HASH_Delete(HASH_TABLE *pHash); + +/*************************************************************************/ /*! +@Function HASH_Insert_Extended +@Description Insert a key value pair into a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to the key. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_BOOL HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v); + +/*************************************************************************/ /*! +@Function HASH_Insert +@Description Insert a key value pair into a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_BOOL HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v); + +/*************************************************************************/ /*! +@Function HASH_Remove_Extended +@Description Remove a key from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey); + +/*************************************************************************/ /*! +@Function HASH_Remove +@Description Remove a key value pair from a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Remove(HASH_TABLE *pHash, uintptr_t k); + +/*************************************************************************/ /*! +@Function HASH_Retrieve_Extended +@Description Retrieve a value from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey); + +/*************************************************************************/ /*! +@Function HASH_Retrieve +@Description Retrieve a value from a hash table created with HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k); + +/*************************************************************************/ /*! +@Function HASH_Iterate +@Description Iterate over every entry in the hash table. +@Input pHash Hash table to iterate. +@Input pfnCallback Callback to call with the key and data for each +. entry in the hash table +@Return Callback error if any, otherwise PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback); + +#ifdef HASH_TRACE +/*************************************************************************/ /*! +@Function HASH_Dump +@Description Dump out some information about a hash table. +@Input pHash The hash table. +*/ /**************************************************************************/ +void HASH_Dump(HASH_TABLE *pHash); +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* _HASH_H_ */ + +/****************************************************************************** + End of file (hash.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/htbuffer.h b/drivers/mcst/gpu-imgtec/services/shared/include/htbuffer.h new file mode 100644 index 000000000000..04cc5deba34e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/htbuffer.h @@ -0,0 +1,132 @@ +/*************************************************************************/ /*! +@File htbuffer.h +@Title Host Trace Buffer shared API. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef HTBUFFER_H +#define HTBUFFER_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "htbuffer_sf.h" +#include "htbuffer_types.h" +#include "htbuffer_init.h" + +#if defined(__KERNEL__) +#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple((IMG_HANDLE) NULL, SF, ## args); } while (0) + +/* Host Trace Buffer name */ +#define HTB_STREAM_NAME "PVRHTBuffer" + +#else +#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0) +#endif + +/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */ +#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff)) +#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff)) + +/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */ +#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff)) +#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff)) + +/*************************************************************************/ /*! + @Function HTBLog + @Description Record a Host Trace Buffer log event + + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events associated + with a particular process, but performed by + another can be logged correctly. + + @Input TimeStampus The timestamp in us for this event + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT64 ui64TimeStampns, IMG_UINT32 SF, ...); + + +/*************************************************************************/ /*! + @Function HTBLogSimple + @Description Record a Host Trace Buffer log event with implicit PID and Timestamp + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...); + + + +/* DEBUG log group enable */ +#if !defined(HTB_DEBUG_LOG_GROUP) +#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */ +#define HTB_LOG_TYPE_DBG __BUILDERROR__ +#endif + + +#if defined(__cplusplus) +} +#endif + +#endif /* HTBUFFER_H */ +/***************************************************************************** + End of file (htbuffer.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/htbuffer_init.h b/drivers/mcst/gpu-imgtec/services/shared/include/htbuffer_init.h new file mode 100644 index 000000000000..d114579964b4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/htbuffer_init.h @@ -0,0 +1,114 @@ +/*************************************************************************/ /*! +@File htbuffer_init.h +@Title Host Trace Buffer functions needed for Services initialisation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef HTBUFFER_INIT_H +#define HTBUFFER_INIT_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_types.h" +#include "img_defs.h" + +/*************************************************************************/ /*! + @Function HTBConfigure + @Description Configure the Host Trace Buffer. + Once these parameters are set they may not be changed + + @Input hSrvHandle Server Handle + + @Input pszBufferName Name to use for the TL buffer, this will be + required to request trace data from the TL + + @Input ui32BufferSize Requested TL buffer size in bytes + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBConfigure( + IMG_HANDLE hSrvHandle, + IMG_CHAR * pszBufferName, + IMG_UINT32 ui32BufferSize +); + +/*************************************************************************/ /*! + @Function HTBControl + @Description Update the configuration of the Host Trace Buffer + + @Input hSrvHandle Server Handle + + @Input ui32NumFlagGroups Number of group enable flags words + + @Input aui32GroupEnable Flags words controlling groups to be logged + + @Input ui32LogLevel Log level to record + + @Input ui32EnablePID PID to enable logging for a specific process + + @Input eLogMode Enable logging for all or specific processes, + + @Input eOpMode Control what trace data is dropped if the TL + buffer is full + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBControl( + IMG_HANDLE hSrvHandle, + IMG_UINT32 ui32NumFlagGroups, + IMG_UINT32 * aui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 ui32EnablePID, + HTB_LOGMODE_CTRL eLogMode, + HTB_OPMODE_CTRL eOpMode +); + +#if defined(__cplusplus) +} +#endif + +#endif /* HTBUFFER_INIT_H */ +/***************************************************************************** + End of file (htbuffer_init.h) +*****************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/lock.h b/drivers/mcst/gpu-imgtec/services/shared/include/lock.h new file mode 100644 index 000000000000..7cd0f58ad9d7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/lock.h @@ -0,0 +1,425 @@ +/*************************************************************************/ /*! +@File lock.h +@Title Locking interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Services internal locking interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef LOCK_H +#define LOCK_H + +/* In Linux kernel mode we are using the kernel mutex implementation directly + * with macros. This allows us to use the kernel lockdep feature for lock + * debugging. */ +#include "lock_types.h" + +#if defined(LINUX) && defined(__KERNEL__) + +#include "allocmem.h" +#include + +#define OSLockCreateNoStats(phLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \ + if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ + e;}) +#define OSLockCreate(phLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(phLock) = OSAllocMem(sizeof(struct mutex)); \ + if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ + e;}) +#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock)); PVRSRV_OK;}) +#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock)); PVRSRV_OK;}) + +#define OSLockAcquire(hLock) ({mutex_lock((hLock)); PVRSRV_OK;}) +#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass)); PVRSRV_OK;}) +#define OSLockRelease(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;}) + +#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE) +#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE) + +#define OSSpinLockCreate(_ppsLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \ + if (*(_ppsLock)) {spin_lock_init(*(_ppsLock)); e = PVRSRV_OK;} \ + e;}) +#define OSSpinLockDestroy(_psLock) ({OSFreeMem(_psLock);}) + +typedef unsigned long OS_SPINLOCK_FLAGS; +#define OSSpinLockAcquire(_pLock, _flags) spin_lock_irqsave(_pLock, _flags) +#define OSSpinLockRelease(_pLock, _flags) spin_unlock_irqrestore(_pLock, _flags) + +/* These _may_ be reordered or optimized away entirely by the compiler/hw */ +#define OSAtomicRead(pCounter) atomic_read(pCounter) +#define OSAtomicWrite(pCounter, i) atomic_set(pCounter, i) + +/* The following atomic operations, in addition to being SMP-safe, also + imply a memory barrier around the operation */ +#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter) +#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter) +#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv) +#define OSAtomicExchange(pCounter, iNewVal) atomic_xchg(pCounter, iNewVal) + +static inline IMG_INT OSAtomicOr(ATOMIC_T *pCounter, IMG_INT iVal) +{ + IMG_INT iOldVal, iLastVal, iNewVal; + + iLastVal = OSAtomicRead(pCounter); + do + { + iOldVal = iLastVal; + iNewVal = iOldVal | iVal; + + iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); + } + while (iOldVal != iLastVal); + + return iNewVal; +} + +#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter) +#define OSAtomicAddUnless(pCounter, incr, test) __atomic_add_unless(pCounter,incr,test) + +#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter) +#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test) + +#else /* defined(LINUX) && defined(__KERNEL__) */ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +/**************************************************************************/ /*! +@Function OSLockCreate +@Description Creates an operating system lock object. +@Output phLock The created lock. +@Return PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver + cannot allocate CPU memory needed for the lock. + PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to + allocate the lock. + */ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock); +#if defined(INTEGRITY_OS) +#define OSLockCreateNoStats OSLockCreate +#endif + +/**************************************************************************/ /*! +@Function OSLockDestroy +@Description Destroys an operating system lock object. +@Input hLock The lock to be destroyed. +@Return None. + */ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR OSLockDestroy(POS_LOCK hLock); + +#if defined(INTEGRITY_OS) +#define OSLockDestroyNoStats OSLockDestroy +#endif +/**************************************************************************/ /*! +@Function OSLockAcquire +@Description Acquires an operating system lock. + NB. This function must not return until the lock is acquired + (meaning the implementation should not timeout or return with + an error, as the caller will assume they have the lock). +@Input hLock The lock to be acquired. +@Return None. + */ /**************************************************************************/ +IMG_INTERNAL +void OSLockAcquire(POS_LOCK hLock); + +/**************************************************************************/ /*! +@Function OSTryLockAcquire +@Description Try to acquire an operating system lock. + NB. If lock is acquired successfully in the first attempt, + then the function returns true and else it will return false. +@Input hLock The lock to be acquired. +@Return IMG_TRUE if lock acquired successfully, + IMG_FALSE otherwise. + */ /**************************************************************************/ +IMG_INTERNAL +IMG_BOOL OSTryLockAcquire(POS_LOCK hLock); + +/* Nested notation isn't used in UM or other OS's */ +/**************************************************************************/ /*! +@Function OSLockAcquireNested +@Description For operating systems other than Linux, this equates to an + OSLockAcquire() call. On Linux, this function wraps a call + to mutex_lock_nested(). This recognises the scenario where + there may be multiple subclasses within a particular class + of lock. In such cases, the order in which the locks belonging + these various subclasses are acquired is important and must be + validated. +@Input hLock The lock to be acquired. +@Input subclass The subclass of the lock. +@Return None. + */ /**************************************************************************/ +#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock)) + +/**************************************************************************/ /*! +@Function OSLockRelease +@Description Releases an operating system lock. +@Input hLock The lock to be released. +@Return None. + */ /**************************************************************************/ +IMG_INTERNAL +void OSLockRelease(POS_LOCK hLock); + +/**************************************************************************/ /*! +@Function OSLockIsLocked +@Description Tests whether or not an operating system lock is currently + locked. +@Input hLock The lock to be tested. +@Return IMG_TRUE if locked, IMG_FALSE if not locked. + */ /**************************************************************************/ +IMG_INTERNAL +IMG_BOOL OSLockIsLocked(POS_LOCK hLock); + +#if defined(LINUX) + +/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */ +#define OSAtomicRead(pCounter) (*(volatile IMG_INT32 *)&(pCounter)->counter) +#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT32) i) +#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1) +#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1) +#define OSAtomicCompareExchange(pCounter, oldv, newv) \ + __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv) +#define OSAtomicOr(pCounter, iVal) __sync_or_and_fetch((&(pCounter)->counter), iVal) + +static inline IMG_UINT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_UINT32 iNewVal) +{ + IMG_UINT32 iOldVal; + IMG_UINT32 iLastVal; + + iLastVal = OSAtomicRead(pCounter); + do + { + iOldVal = iLastVal; + iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); + } + while (iOldVal != iLastVal); + + return iOldVal; +} + +#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr) +#define OSAtomicAddUnless(pCounter, incr, test) ({ \ + IMG_INT32 c; IMG_INT32 old; \ + c = OSAtomicRead(pCounter); \ + while (1) { \ + if (c == (test)) break; \ + old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \ + if (old == c) break; \ + c = old; \ + } c; }) + +#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr)) +#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test) + +#else + +/*************************************************************************/ /*! +@Function OSAtomicRead +@Description Read the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to read +@Return The value of the atomic variable +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicRead(const ATOMIC_T *pCounter); + +/*************************************************************************/ /*! +@Function OSAtomicWrite +@Description Write the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be written to +@Input v The value to write +@Return None +*/ /**************************************************************************/ +IMG_INTERNAL +void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v); + +/* For the following atomic operations, in addition to being SMP-safe, + should also have a memory barrier around each operation */ +/*************************************************************************/ /*! +@Function OSAtomicIncrement +@Description Increment the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be incremented +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicIncrement(ATOMIC_T *pCounter); + +/*************************************************************************/ /*! +@Function OSAtomicDecrement +@Description Decrement the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be decremented +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicDecrement(ATOMIC_T *pCounter); + +/*************************************************************************/ /*! +@Function OSAtomicAdd +@Description Add a specified value to a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to add the value to +@Input v The value to be added +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT32 v); + +/*************************************************************************/ /*! +@Function OSAtomicAddUnless +@Description Add a specified value to a variable atomically unless it + already equals a particular value. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to add the value to +@Input v The value to be added to 'pCounter' +@Input t The test value. If 'pCounter' equals this, + its value will not be adjusted +@Return The old value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); + +/*************************************************************************/ /*! +@Function OSAtomicSubtract +@Description Subtract a specified value to a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to subtract the value from +@Input v The value to be subtracted +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT32 v); + +/*************************************************************************/ /*! +@Function OSAtomicSubtractUnless +@Description Subtract a specified value from a variable atomically unless + it already equals a particular value. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to subtract the value from +@Input v The value to be subtracted from 'pCounter' +@Input t The test value. If 'pCounter' equals this, + its value will not be adjusted +@Return The old value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); + +/*************************************************************************/ /*! +@Function OSAtomicCompareExchange +@Description Set a variable to a given value only if it is currently + equal to a specified value. The whole operation must be atomic. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be checked and + possibly updated +@Input oldv The value the atomic variable must have in + order to be modified +@Input newv The value to write to the atomic variable if + it equals 'oldv' +@Return The old value of *pCounter +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT32 oldv, IMG_INT32 newv); + +/*************************************************************************/ /*! +@Function OSAtomicExchange +@Description Set a variable to a given value and retrieve previous value. + The whole operation must be atomic. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be updated +@Input iNewVal The value to write to the atomic variable +@Return The previous value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_INT32 iNewVal); + +/*************************************************************************/ /*! +@Function OSAtomicOr +@Description Set a variable to the bitwise or of its current value and the + specified value. Equivalent to *pCounter |= iVal. + The whole operation must be atomic. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be updated +@Input iVal The value to bitwise or against +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicOr(ATOMIC_T *pCounter, IMG_INT32 iVal); + +/* For now, spin-locks are required on Linux only, so other platforms fake + * spinlocks with normal mutex locks */ +typedef unsigned long OS_SPINLOCK_FLAGS; +#define POS_SPINLOCK POS_LOCK +#define OSSpinLockCreate(ppLock) OSLockCreate(ppLock) +#define OSSpinLockDestroy(pLock) OSLockDestroy(pLock) +#define OSSpinLockAcquire(pLock, flags) {flags = 0; OSLockAcquire(pLock);} +#define OSSpinLockRelease(pLock, flags) {flags = 0; OSLockRelease(pLock);} + +#endif /* defined(LINUX) */ +#endif /* defined(LINUX) && defined(__KERNEL__) */ + +#endif /* LOCK_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/osmmap.h b/drivers/mcst/gpu-imgtec/services/shared/include/osmmap.h new file mode 100644 index 000000000000..feb9217be66c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/osmmap.h @@ -0,0 +1,115 @@ +/*************************************************************************/ /*! +@File +@Title OS Interface for mapping PMRs into CPU space. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS abstraction for the mmap2 interface for mapping PMRs into + User Mode memory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OSMMAP_H +#define OSMMAP_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +/*************************************************************************/ /*! +@Function OSMMapPMR +@Description Maps the specified PMR into CPU memory so that it may be + accessed by the user process. + Whether the memory is mapped read only, read/write, or not at + all, is dependent on the PMR itself. + The PMR handle is opaque to the user, and lower levels of this + stack ensure that the handle is private to this process, such + that this API cannot be abused to gain access to other people's + PMRs. The OS implementation of this function should return the + virtual address and length for the User to use. The "PrivData" + is to be stored opaquely by the caller (N.B. he should make no + assumptions, in particular, NULL is a valid handle) and given + back to the call to OSMUnmapPMR. + The OS implementation is free to use the PrivData handle for + any purpose it sees fit. +@Input hBridge The bridge handle. +@Input hPMR The handle of the PMR to be mapped. +@Input uiPMRLength The size of the PMR. +@Input uiFlags Flags indicating how the mapping should + be done (read-only, etc). These may not + be honoured if the PMR does not permit + them. +@Input uiPMRLength The size of the PMR. +@Output phOSMMapPrivDataOut Returned private data. +@Output ppvMappingAddressOut The returned mapping. +@Output puiMappingLengthOut The size of the returned mapping. +@Return PVRSRV_OK on success, failure code otherwise. + */ /*************************************************************************/ +PVRSRV_ERROR +OSMMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiPMRLength, + IMG_UINT32 uiFlags, + IMG_HANDLE *phOSMMapPrivDataOut, + void **ppvMappingAddressOut, + size_t *puiMappingLengthOut); + +/*************************************************************************/ /*! +@Function OSMUnmapPMR +@Description Unmaps the specified PMR from CPU memory. + This function is the counterpart to OSMMapPMR. + The caller is required to pass the PMR handle back in along + with the same 3-tuple of information that was returned by the + call to OSMMapPMR in phOSMMapPrivDataOut. + It is possible to unmap only part of the original mapping + with this call, by specifying only the address range to be + unmapped in pvMappingAddress and uiMappingLength. +@Input hBridge The bridge handle. +@Input hPMR The handle of the PMR to be unmapped. +@Input hOSMMapPrivData The OS private data of the mapping. +@Input pvMappingAddress The address to be unmapped. +@Input uiMappingLength The size to be unmapped. +@Return PVRSRV_OK on success, failure code otherwise. + */ /*************************************************************************/ +void +OSMUnmapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE hOSMMapPrivData, + void *pvMappingAddress, + size_t uiMappingLength); + +#endif /* OSMMAP_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/proc_stats.h b/drivers/mcst/gpu-imgtec/services/shared/include/proc_stats.h new file mode 100644 index 000000000000..93d953af908b --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/proc_stats.h @@ -0,0 +1,135 @@ +/*************************************************************************/ /*! +@File +@Title Process and driver statistic definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PROC_STATS_H +#define PROC_STATS_H + +/* X-Macro for Process stat keys */ +#define PVRSRV_PROCESS_STAT_KEY \ + X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \ + X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \ + X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \ + X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ + X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \ + X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, "MemoryOOMCountDeviceVirtual") \ + X(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, "MemoryOOMCountPhysicalHeap") \ + X(PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, "MemoryOOMCountDeviceVirtualAtAddress") \ + X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ + X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") + + +/* X-Macro for Driver stat keys */ +#define PVRSRV_DRIVER_STAT_KEY \ + X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ + X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA/LMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ + X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") + + +typedef enum { +#define X(stat_type, stat_str) stat_type, + PVRSRV_PROCESS_STAT_KEY +#undef X + PVRSRV_PROCESS_STAT_TYPE_COUNT +}PVRSRV_PROCESS_STAT_TYPE; + +typedef enum { +#define X(stat_type, stat_str) stat_type, + PVRSRV_DRIVER_STAT_KEY +#undef X + PVRSRV_DRIVER_STAT_TYPE_COUNT +}PVRSRV_DRIVER_STAT_TYPE; + +extern const IMG_CHAR *const pszProcessStatType[]; + +extern const IMG_CHAR *const pszDriverStatType[]; + +#endif // PROC_STATS_H diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/ra.h b/drivers/mcst/gpu-imgtec/services/shared/include/ra.h new file mode 100644 index 000000000000..1116207729dd --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/ra.h @@ -0,0 +1,223 @@ +/*************************************************************************/ /*! +@File +@Title Resource Allocator API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RA_H +#define RA_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +/** Resource arena. + * struct _RA_ARENA_ deliberately opaque + */ +typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313 + +/* + * Per-Arena handle - this is private data for the caller of the RA. + * The RA knows nothing about this data. It is given it in RA_Create, and + * promises to pass it to calls to the ImportAlloc and ImportFree callbacks + */ +typedef IMG_HANDLE RA_PERARENA_HANDLE; +/* + * Per-Import handle - this is private data for the caller of the RA. + * The RA knows nothing about this data. It is given it on a per-import basis, + * basis, either the "initial" import at RA_Create time, or further imports + * via the ImportAlloc callback. It sends it back via the ImportFree callback, + * and also provides it in answer to any RA_Alloc request to signify from + * which "import" the allocation came. + */ +typedef IMG_HANDLE RA_PERISPAN_HANDLE; + +typedef IMG_UINT64 RA_BASE_T; +typedef IMG_UINT32 RA_LOG2QUANTUM_T; +typedef IMG_UINT64 RA_LENGTH_T; + +/* Lock classes: describes the level of nesting between different arenas. */ +#define RA_LOCKCLASS_0 0 +#define RA_LOCKCLASS_1 1 +#define RA_LOCKCLASS_2 2 + +#define RA_NO_IMPORT_MULTIPLIER 1 + +/* + * Flags in an "import" must match the flags for an allocation + */ +typedef IMG_UINT32 RA_FLAGS_T; + +/** + * @Function RA_Create + * + * @Description To create a resource arena. + * + * @Input name - the name of the arena for diagnostic purposes. + * @Input uLog2Quantum - the arena allocation quantum. + * @Input ui32LockClass - the lock class level this arena uses. + * @Input imp_alloc - a resource allocation callback or 0. + * @Input imp_free - a resource de-allocation callback or 0. + * @Input per_arena_handle - private handle passed to alloc and free or 0. + * @Input bNoSplit - Disable splitting up imports. + * @Return pointer to arena, or NULL. + */ +RA_ARENA * +RA_Create(IMG_CHAR *name, + /* subsequent imports: */ + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE _h, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *pBase, + RA_LENGTH_T *pActualSize, + RA_PERISPAN_HANDLE *phPriv), + void (*imp_free) (RA_PERARENA_HANDLE, + RA_BASE_T, + RA_PERISPAN_HANDLE), + RA_PERARENA_HANDLE per_arena_handle, + IMG_BOOL bNoSplit); + +/** + * @Function RA_Delete + * + * @Description + * + * To delete a resource arena. All resources allocated from the arena + * must be freed before deleting the arena. + * + * @Input pArena - the arena to delete. + * @Return None + */ +void +RA_Delete (RA_ARENA *pArena); + +/** + * @Function RA_Add + * + * @Description + * + * To add a resource span to an arena. The span must not overlap with + * any span previously added to the arena. + * + * @Input pArena - the arena to add a span into. + * @Input base - the base of the span. + * @Input uSize - the extent of the span. + * @Input hPriv - handle associated to the span (reserved for user uses) + * @Return IMG_TRUE - success, IMG_FALSE - failure + */ +IMG_BOOL +RA_Add(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + RA_PERISPAN_HANDLE hPriv); + +/** + * @Function RA_Alloc + * + * @Description To allocate resource from an arena. + * + * @Input pArena - the arena + * @Input uRequestSize - the size of resource segment requested. + * @Input uImportMultiplier - Import x-times of the uRequestSize + * for future RA_Alloc calls. + * Use RA_NO_IMPORT_MULTIPLIER to import the exact size. + * @Input uImportFlags - flags influencing allocation policy. + * @Input uAlignment - the alignment constraint required for the + * allocated segment, use 0 if alignment not required. + * @Input pszAnnotation - a string to describe the allocation + * @Output pBase - allocated base resource + * @Output pActualSize - the actual_size of resource segment allocated, + * typically rounded up by quantum. + * @Output phPriv - the user reference associated with allocated + * resource span. + * @Return PVRSRV_OK - success + */ +PVRSRV_ERROR +RA_Alloc(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + IMG_UINT8 uImportMultiplier, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *pBase, + RA_LENGTH_T *pActualSize, + RA_PERISPAN_HANDLE *phPriv); + +/** + * @Function RA_Alloc_Range + * + * @Description + * + * To allocate a resource at a specified base from an arena. + * + * @Input pArena - the arena + * @Input uRequestSize - the size of resource segment requested. + * @Input uImportFlags - flags influencing allocation policy. + * @Input uAlignment - the alignment constraint required for the + * allocated segment, use 0 if alignment not required. + * @Input pBase - allocated base resource + * @Output pActualSize - the actual_size of resource segment allocated, + * typically rounded up by quantum. + * @Return PVRSRV_OK - success + */ +PVRSRV_ERROR +RA_Alloc_Range (RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + RA_BASE_T base, + RA_LENGTH_T *pActualSize); + +/** + * @Function RA_Free + * + * @Description To free a resource segment. + * + * @Input pArena - the arena the segment was originally allocated from. + * @Input base - the base of the resource span to free. + * + * @Return None + */ +void +RA_Free(RA_ARENA *pArena, RA_BASE_T base); + +#endif diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/sync.h b/drivers/mcst/gpu-imgtec/services/shared/include/sync.h new file mode 100644 index 000000000000..fe3210ddf839 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/sync.h @@ -0,0 +1,316 @@ +/*************************************************************************/ /*! +@File +@Title Synchronisation interface header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines the client side interface for synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_H +#define SYNC_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "sync_prim_internal.h" +#include "pdumpdefs.h" +#include "dllist.h" +#include "pvr_debug.h" + +#include "device_connection.h" + +#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +/*************************************************************************/ /*! +@Function SyncPrimContextCreate + +@Description Create a new synchronisation context + +@Input hBridge Bridge handle + +@Input hDeviceNode Device node handle + +@Output hSyncPrimContext Handle to the created synchronisation + primitive context + +@Return PVRSRV_OK if the synchronisation primitive context was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, + PSYNC_PRIM_CONTEXT *hSyncPrimContext); + +/*************************************************************************/ /*! +@Function SyncPrimContextDestroy + +@Description Destroy a synchronisation context + +@Input hSyncPrimContext Handle to the synchronisation + primitive context to destroy + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext); + +/*************************************************************************/ /*! +@Function SyncPrimAlloc + +@Description Allocate a new synchronisation primitive on the specified + synchronisation context + +@Input hSyncPrimContext Handle to the synchronisation + primitive context + +@Output ppsSync Created synchronisation primitive + +@Input pszClassName Sync source annotation + +@Return PVRSRV_OK if the synchronisation primitive was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName); + +#if defined(__KERNEL__) +/*************************************************************************/ /*! +@Function SyncPrimAllocForServerSync + +@Description Allocate a new synchronisation primitive on the specified + synchronisation context for a server sync + +@Input hSyncPrimContext Handle to the synchronisation + primitive context + +@Output ppsSync Created synchronisation primitive + +@Input pszClassName Sync source annotation + +@Return PVRSRV_OK if the synchronisation primitive was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName); +#endif + +/*************************************************************************/ /*! +@Function SyncPrimFree + +@Description Free a synchronisation primitive + +@Input psSync The synchronisation primitive to free + +@Return PVRSRV_OK if the synchronisation primitive was + successfully freed +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync); + +/*************************************************************************/ /*! +@Function SyncPrimSet + +@Description Set the synchronisation primitive to a value + +@Input psSync The synchronisation primitive to set + +@Input ui32Value Value to set it to + +@Return PVRSRV_OK on success +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); + +#if defined(NO_HARDWARE) + +/*************************************************************************/ /*! +@Function SyncPrimNoHwUpdate + +@Description Updates the synchronisation primitive value (in NoHardware drivers) + +@Input psSync The synchronisation primitive to update + +@Input ui32Value Value to update it to + +@Return PVRSRV_OK on success +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); +#endif + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function SyncPrimPDump + +@Description PDump the current value of the synchronisation primitive + +@Input psSync The synchronisation primitive to PDump + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync); + +/*************************************************************************/ /*! +@Function SyncPrimPDumpValue + +@Description PDump the ui32Value as the value of the synchronisation + primitive (regardless of the current value). + +@Input psSync The synchronisation primitive to PDump +@Input ui32Value Value to give to the sync prim on the pdump + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); + +/*************************************************************************/ /*! +@Function SyncPrimPDumpPol + +@Description Do a PDump poll of the synchronisation primitive + +@Input psSync The synchronisation primitive to PDump + +@Input ui32Value Value to poll for + +@Input ui32Mask PDump mask operator + +@Input ui32PDumpFlags PDump flags + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function SyncPrimPDumpCBP + +@Description Do a PDump CB poll using the synchronisation primitive + +@Input psSync The synchronisation primitive to PDump + +@Input uiWriteOffset Current write offset of buffer + +@Input uiPacketSize Size of the packet to write into CB + +@Input uiBufferSize Size of the CB + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize); + +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDumpValue) +#endif +static INLINE void +SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psSync); + PVR_UNREFERENCED_PARAMETER(ui32Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDump) +#endif +static INLINE void +SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) +{ + PVR_UNREFERENCED_PARAMETER(psSync); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDumpPol) +#endif +static INLINE void +SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psSync); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDumpCBP) +#endif +static INLINE void +SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psSync); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); +} +#endif /* PDUMP */ +#endif /* SYNC_H */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/sync_internal.h b/drivers/mcst/gpu-imgtec/services/shared/include/sync_internal.h new file mode 100644 index 000000000000..00645d2c1e89 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/sync_internal.h @@ -0,0 +1,118 @@ +/*************************************************************************/ /*! +@File +@Title Services internal synchronisation interface header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines the internal client side interface for services + synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_INTERNAL +#define SYNC_INTERNAL + +#include "img_types.h" +#include "img_defs.h" +#include "ra.h" +#include "dllist.h" +#include "lock.h" +#include "devicemem.h" +#include "sync_prim_internal.h" + +#define LOCAL_SYNC_PRIM_RESET_VALUE 0 +#define LOCAL_SYNC_PRIM_POISON_VALUE 0xa5a5a5a5u + +/* + Private structure's +*/ +#define SYNC_PRIM_NAME_SIZE 50 +typedef struct SYNC_PRIM_CONTEXT +{ + SHARED_DEV_CONNECTION hDevConnection; + IMG_CHAR azName[SYNC_PRIM_NAME_SIZE]; /*!< Name of the RA */ + RA_ARENA *psSubAllocRA; /*!< RA context */ + IMG_CHAR azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */ + RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ + ATOMIC_T hRefCount; /*!< Ref count for this context */ +} SYNC_PRIM_CONTEXT; + +typedef struct SYNC_PRIM_BLOCK_TAG +{ + SYNC_PRIM_CONTEXT *psContext; /*!< Our copy of the services connection */ + IMG_HANDLE hServerSyncPrimBlock; /*!< Server handle for this block */ + IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync prim block */ + IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ + DEVMEM_MEMDESC *hMemDesc; /*!< Host mapping handle */ + IMG_UINT32 __iomem *pui32LinAddr; /*!< User CPU mapping */ + IMG_UINT64 uiSpanBase; /*!< Base of this import in the span RA */ + DLLIST_NODE sListNode; /*!< List node for the sync block list */ +} SYNC_PRIM_BLOCK; + +typedef enum SYNC_PRIM_TYPE_TAG +{ + SYNC_PRIM_TYPE_UNKNOWN = 0, + SYNC_PRIM_TYPE_LOCAL, + SYNC_PRIM_TYPE_SERVER, +} SYNC_PRIM_TYPE; + +typedef struct SYNC_PRIM_LOCAL_TAG +{ + ATOMIC_T hRefCount; /*!< Ref count for this sync */ + SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */ + IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ + IMG_HANDLE hRecord; /*!< Sync record handle */ +} SYNC_PRIM_LOCAL; + +typedef struct SYNC_PRIM_TAG +{ + PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */ + SYNC_PRIM_TYPE eType; /*!< Sync primitive type */ + union { + SYNC_PRIM_LOCAL sLocal; /*!< Local sync primitive data */ + } u; +} SYNC_PRIM; + + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr); + +IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_HANDLE *phBlock, + IMG_UINT32 *pui32Offset); + + +#endif /* SYNC_INTERNAL */ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/tlclient.h b/drivers/mcst/gpu-imgtec/services/shared/include/tlclient.h new file mode 100644 index 000000000000..00f7aa8fc043 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/tlclient.h @@ -0,0 +1,257 @@ +/*************************************************************************/ /*! +@File tlclient.h +@Title Services Transport Layer shared API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Transport layer common API used in both clients and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef TLCLIENT_H +#define TLCLIENT_H + + +#include "img_defs.h" +#include "pvrsrv_tlcommon.h" +#include "pvrsrv_error.h" + + +/* This value is used for the hSrvHandle argument in the client API when + * called directly from the kernel which will lead to a direct bridge access. + */ +#define DIRECT_BRIDGE_HANDLE ((IMG_HANDLE)0xDEADBEEFU) + + +/*************************************************************************/ /*! + @Function TLClientOpenStream + @Description Open a descriptor onto an existing kernel transport stream. + @Input hDevConnection Address of a pointer to a connection object + @Input pszName Address of the stream name string, no longer + than PRVSRVTL_MAX_STREAM_NAME_SIZE. + @Input ui32Mode Unused + @Output phSD Address of a pointer to an stream object + @Return PVRSRV_ERROR_NOT_FOUND when named stream not found + @Return PVRSRV_ERROR_ALREADY_OPEN stream already open by another + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_TIMEOUT timed out, stream not found + @Return PVRSRV_ERROR for other system codes +*/ /**************************************************************************/ + +IMG_INTERNAL +PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + IMG_HANDLE* phSD); + + +/*************************************************************************/ /*! + @Function TLClientCloseStream + @Description Close and release the stream connection to Services kernel + server transport layer. Any outstanding Acquire will be + released. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle is not known + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD); + +/*************************************************************************/ /*! + @Function TLClientDiscoverStreams + @Description Finds all streams that's name starts with pszNamePattern and + ends with a number. + @Input hDevConnection Address of a pointer to a connection object + @Input pszNamePattern Name pattern. Must be beginning of a string. + @Output aszStreams Array of numbers from end of the discovered + names. + @inOut pui32NumFound When input, max number that can fit into + pui32Streams. When output, number of + discovered streams. + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR *pszNamePattern, + IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 *pui32NumFound); + +/*************************************************************************/ /*! + @Function TLClientReserveStream + @Description Reserves a region with given size in the stream. If the stream + is already reserved the function will return an error. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Output ppui8Data pointer to the buffer + @Input ui32Size size of the data + @Return +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLClientStreamReserve2 + @Description Reserves a region with given size in the stream. If the stream + is already reserved the function will return an error. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Output ppui8Data pointer to the buffer + @Input ui32Size size of the data + @Input ui32SizeMin minimum size of the data + @Input ui32Available available space in buffer + @Return +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32 *pui32Available); + +/*************************************************************************/ /*! + @Function TLClientStreamCommit + @Description Commits previously reserved region in the stream and therefore + allows next reserves. + This function call has to be preceded by the call to + TLClientReserveStream or TLClientReserveStream2. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Input ui32Size Size of the data + @Return +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLClientAcquireData + @Description When there is data available in the stream buffer this call + returns with the address and length of the data buffer the + client can safely read. This buffer may contain one or more + packets of data. + If no data is available then this call blocks until it becomes + available. However if the stream has been destroyed while + waiting then a resource unavailable error will be returned to + the caller. Clients must pair this call with a ReleaseData + call. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Output ppPacketBuf Address of a pointer to an byte buffer. On exit + pointer contains address of buffer to read from + @Output puiBufLen Pointer to an integer. On exit it is the size + of the data to read from the packet buffer + @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_RETRY release not called beforehand + @Return PVRSRV_ERROR_TIMEOUT block timed out, no data + @Return PVRSRV_ERROR for other system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_PBYTE* ppPacketBuf, + IMG_UINT32* puiBufLen); + + +/*************************************************************************/ /*! + @Function TLClientReleaseData + @Description Called after client has read the stream data out of the buffer + The data is subsequently flushed from the stream buffer to make + room for more data packets from the stream source. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_RETRY acquire not called beforehand + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD); + +/*************************************************************************/ /*! + @Function TLClientReleaseDataLess + @Description Called after client has read only some data out of the buffer + and wishes to complete the read early i.e. does not want to + read the full data that the acquire call returned e.g read just + one packet from the stream. + The data is subsequently flushed from the stream buffer to make + room for more data packets from the stream source. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Input uiActualReadLen Size of data read, in bytes. Must be on a TL + packet boundary. + @Return PVRSRV_ERROR_INVALID_PARAMS when read length too big + @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_RETRY acquire not called beforehand + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen); + +/*************************************************************************/ /*! + @Function TLClientWriteData + @Description Writes data to the stream. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Input ui32Size Size of the data + @Input pui8Data Pointer to data +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE *pui8Data); + + +#endif /* TLCLIENT_H */ + +/****************************************************************************** + End of file (tlclient.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/shared/include/tutilsdefs.h b/drivers/mcst/gpu-imgtec/services/shared/include/tutilsdefs.h new file mode 100644 index 000000000000..b89e4a4bbb1f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/shared/include/tutilsdefs.h @@ -0,0 +1,230 @@ +/*************************************************************************/ /*! +@File tutilsdefs.h +@Title Testing utils bridge defines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Shared structures and constants between client and server sides + of tutils bridge +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef TUTILSDEFS_H +#define TUTILSDEFS_H + + +#include "pvrsrv_tlcommon.h" +#include "pvrsrv_sync_km.h" + +/****************************************************************************** + * + * TEST Related definitions and constants + */ +#define PVR_TL_TEST_STREAM_BRIDGE_NAME "TLBRIDGE_TEST" +#define PVR_TL_TEST_UMBASE 0x00202000 +#define PVR_TL_TEST_OFFSET 0x0008 +#define PVR_TL_TEST_LEN 0x0010 + +#define PVR_TL_TEST_STREAM2_NAME "TLSTREAM2_TEST" +#define PVR_TL_TEST_STREAM2_SIZE 2 + +#define PVR_TL_TEST_STREAM3_NAME "TLSTREAM3_TEST" +#define PVR_TL_TEST_STREAM3_SIZE 256 + +// This constant, when used as a parameter in StreamCreate, lessens the size of +// the buffer that is created for a stream, to avoid going over a page boundary. +#define PVR_TL_TEST_STREAM_BUFFER_REDUCTION 32 + +#define PVR_TL_TEST_CMD_SOURCE_START 10 +typedef struct _PVR_TL_TEST_CMD_SOURCE_START_IN_ +{ + /* Stream name must always be first in struct */ + IMG_CHAR pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; + IMG_UINT16 uiStreamSizeInPages; /* # of 4Kb pages */ + IMG_UINT16 uiInterval; /* in milliseconds */ + IMG_UINT16 uiCallbackKicks; /* 0 for no limit of timer call backs */ + IMG_UINT16 uiEOSMarkerKicks; /* Insert EOS Marker every N Kicks, 0 for none */ + IMG_UINT16 uiPacketSizeInBytes; /* 0 for random size between 1..255 size in bytes */ + IMG_UINT32 uiStreamCreateFlags; /* See TLStreamCreate() */ + IMG_UINT16 uiStartDelay; /* 0 for normal uiInterval delay, one off delay in ms */ + IMG_BOOL bDoNotDeleteStream; /* When true the stream is not deleted on self + * cleanup sources only the timers and other resources are */ + IMG_BOOL bDelayStreamCreate; /* When true the stream used in the source is created + * in the first kick. False for normal behaviour where + * the stream is created in the bridge source start context */ +} PVR_TL_TEST_CMD_SOURCE_START_IN; + + +#define PVR_TL_TEST_CMD_SOURCE_STOP 11 +typedef struct _PVR_TL_TEST_CMD_SOURCE_STOP_IN_ +{ + /* Stream name must always be first in struct */ + IMG_CHAR pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; + IMG_BOOL bDoNotDeleteStream; +} PVR_TL_TEST_CMD_SOURCE_STOP_IN; + +#define PVR_TL_TEST_CMD_SOURCE_START2 12 /* Uses two stage data submit */ +typedef PVR_TL_TEST_CMD_SOURCE_START_IN PVR_TL_TEST_CMD_SOURCE_START2_IN; + +#define PVR_TL_TEST_CMD_DEBUG_LEVEL 13 +/* No typedef, uses integer uiIn1 in union */ + +#define PVR_TL_TEST_CMD_DUMP_TL_STATE 14 +/* No typedef, uses integer uiIn1 in union */ + +#define PVR_TL_TEST_CMD_STREAM_CREATE 15 +typedef struct _PVR_TL_TEST_CMD_STREAM_CREATE_IN_ +{ + /* Stream name must always be first in struct */ + IMG_CHAR pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; + IMG_UINT16 uiStreamSizeInPages; + IMG_UINT32 uiStreamCreateFlags; + IMG_BOOL bWithOpenCallback; +} PVR_TL_TEST_CMD_STREAM_CREATE_IN; + +#define PVR_TL_TEST_CMD_STREAM_CLOSE 16 +typedef struct _PVR_TL_TEST_CMD_STREAM_NAME_IN_ +{ + IMG_CHAR pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; +} PVR_TL_TEST_CMD_STREAM_NAME_IN; + +#define PVR_TL_TEST_CMD_STREAM_OPEN 17 + +#define PVR_TL_TEST_CMD_DUMP_HWPERF_STATE 18 + +#define PVR_TL_TEST_CMD_FLUSH_HWPERF_FWBUF 19 + +#define PVR_TL_TEST_CMD_DUMP_PDUMP_STATE 21 + +typedef union _PVR_TL_TEST_CMD_IN_ +{ + PVR_TL_TEST_CMD_SOURCE_START_IN sStart; + PVR_TL_TEST_CMD_SOURCE_STOP_IN sStop; +/* PVR_TL_TEST_CMD_SOURCE_START_IN sStart2; Used by #12, use sStart instead */ + IMG_UINT32 uiIn1; /* Used by #13, #14 */ + PVR_TL_TEST_CMD_STREAM_CREATE_IN sCreate; + PVR_TL_TEST_CMD_STREAM_NAME_IN sName; + IMG_UINT32 uiParams[6]; +} PVR_TL_TEST_CMD_IN; + +/* Has to be the largest test IN structure */ +#define PVR_TL_TEST_PARAM_MAX_SIZE (sizeof(PVR_TL_TEST_CMD_IN)+4) + +#define PVR_TL_TEST_CMD_SET_PWR_STATE 22 +#define PVR_TL_TEST_CMD_GET_PWR_STATE 23 +#define PVR_TL_TEST_CMD_SET_DWT_PWR_CHANGE_COUNTER 24 +#define PVR_TL_TEST_CMD_GET_DWT_PWR_CHANGE_COUNTER 25 + +#define PVR_TL_TEST_PWR_STATE_ON 1 +#define PVR_TL_TEST_PWR_STATE_OFF 0 + +/**************************************************************************** + * PowMonTestThread IOCTL calls and constants + */ + +#define PVR_POWMON_CMD_GET_ESTIMATES 1 +#define PVR_POWMON_CMD_SET_THREAD_LATENCY 2 +#define PVR_POWMON_CMD_TEST_THREAD_UPDATE_STATE 3 + +#define PVR_POWMON_TEST_THREAD_RESUME 1 +#define PVR_POWMON_TEST_THREAD_PAUSE 0 + +/**************************************************************************** + * PowerTestThread IOCTL calls and constants + */ + +#define PVR_POWER_TEST_CMD_DVFS 1 +#define PVR_POWER_TEST_CMD_FORCED_IDLE 2 +#define PVR_POWER_TEST_CMD_CANCEL_FORCED_IDLE 3 +#define PVR_POWER_TEST_CMD_POWER_ON 4 +#define PVR_POWER_TEST_CMD_POWER_OFF 5 +#define PVR_POWER_TEST_CMD_APM_LATENCY 6 +#define PVR_POWER_TEST_CMD_INVALID 7 + +#define PVR_POWER_TEST_NON_FORCED 0 +#define PVR_POWER_TEST_FORCED 1 + +/**************************************************************************** + * SyncCheckpointTest IOCTL types + */ + +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CONTEXT_CREATE 26 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CONTEXT_DESTROY 27 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_REGISTER_FUNCS 28 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CREATE 29 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CREATE_NULL_CTXT 30 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CREATE_NULL_RTRN 31 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_DESTROY 32 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_SIGNAL 33 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_ERROR 34 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_IS_SIGNALLED 35 +#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_IS_ERRORED 36 + +typedef struct _PVR_TL_TEST_CMD_CHECKPOINT_CREATE_IN_ +{ + /* Checkpoint name must always be first in struct */ + IMG_CHAR pszCheckpointName[PVRSRV_SYNC_NAME_LENGTH]; + IMG_UINT16 uiStreamSizeInPages; + IMG_UINT32 uiStreamCreateFlags; +} PVR_TL_TEST_CMD_CHECKPOINT_CREATE_IN; + +#define PVR_TL_TEST_CMD_SET_STREAM_OPEN_COUNTER 37 +#define PVR_TL_TEST_CMD_GET_STREAM_OPEN_COUNTER 38 + +typedef struct _PVR_TL_TEST_CMD_STREAM_OPEN_COUNTER_IN_ +{ + IMG_CHAR pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; + IMG_UINT32 ui32Counter; +} PVR_TL_TEST_CMD_STREAM_OPEN_COUNTER_IN; + +/**************************************************************************** + * KmallocThreshold IOCTL types + */ + +#define PVR_TL_TEST_CMD_KMALLOC 39 + +typedef struct _PVR_TL_TEST_CMD_KMALLOC_IN_ +{ + IMG_UINT32 uiAllocCount; + IMG_UINT32 uiAllocSize; + IMG_UINT32 uiFailedAllocThreshold; + IMG_UINT32 uiFailedAllocFrequency; +} PVR_TL_TEST_CMD_KMALLOC_IN; + +#endif /* TUTILSDEFS_H */ + +/****************************************************************************** + End of file (tutilsdefs.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/dma_support.c b/drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/dma_support.c new file mode 100644 index 000000000000..6157fc02516e --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/dma_support.c @@ -0,0 +1,573 @@ +/*************************************************************************/ /*! +@File dma_support.c +@Title System DMA support +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides a contiguous memory allocator (i.e. DMA allocator); + APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include "allocmem.h" +#include "dma_support.h" +#include "kernel_compatibility.h" + +#define DMA_MAX_IOREMAP_ENTRIES 2 +static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE; +static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}}; + +static void* +SysDmaAcquireKernelAddress(struct page *psPage, IMG_UINT64 ui64Size, void *pvOSDevice) +{ + IMG_BOOL bPageByPage = IMG_TRUE; + IMG_UINT32 uiIdx; + void *pvVirtAddr = NULL; + IMG_UINT32 ui32PgCount = (IMG_UINT32)(ui64Size >> OSGetPageShift()); + PVRSRV_DEVICE_NODE *psDevNode = OSAllocZMemNoStats(sizeof(*psDevNode)); + PVRSRV_DEVICE_CONFIG *psDevConfig = OSAllocZMemNoStats(sizeof(*psDevConfig)); + struct page **pagearray = OSAllocZMemNoStats(ui32PgCount * sizeof(struct page *)); +#if defined(CONFIG_ARM64) + pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); +#else + pgprot_t prot = pgprot_noncached(PAGE_KERNEL); +#endif + + /* Validate all required dynamic tmp buffer allocations */ + if (psDevNode == NULL || psDevConfig == NULL || pagearray == NULL) + { + if (psDevNode) + { + OSFreeMem(psDevNode); + } + + if (psDevConfig) + { + OSFreeMem(psDevConfig); + } + + if (pagearray) + { + OSFreeMem(pagearray); + } + + goto e0; + } + + /* Fake psDevNode->psDevConfig->pvOSDevice */ + psDevConfig->pvOSDevice = pvOSDevice; + psDevNode->psDevConfig = psDevConfig; + + /* Evict any page data contents from d-cache */ + for (uiIdx = 0; uiIdx < ui32PgCount; uiIdx++) + { + void *pvVirtStart, *pvVirtEnd; + IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd; + + /* Prepare array required for vmap */ + pagearray[uiIdx] = &psPage[uiIdx]; + + if (bPageByPage) + { +#if defined(CONFIG_64BIT) + bPageByPage = IMG_FALSE; + + pvVirtStart = kmap(&psPage[uiIdx]); + pvVirtEnd = pvVirtStart + ui64Size; + + sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]); + sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + ui64Size; + /* all pages have a kernel linear address, flush entire range */ +#else + pvVirtStart = kmap(&psPage[uiIdx]); + pvVirtEnd = pvVirtStart + PAGE_SIZE; + + sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]); + sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE; + /* pages might be from HIGHMEM, need to kmap/flush per page */ +#endif + + /* Fallback to range-based d-cache flush */ + OSCPUCacheInvalidateRangeKM(psDevNode, + pvVirtStart, pvVirtEnd, + sCPUPhysStart, sCPUPhysEnd); + + kunmap(&psPage[uiIdx]); + } + } + + /* Remap pages into VMALLOC space */ +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + pvVirtAddr = vmap(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot); +#else + pvVirtAddr = vm_map_ram(pagearray, ui32PgCount, -1, prot); +#endif + + /* Clean-up tmp buffers */ + OSFreeMem(psDevConfig); + OSFreeMem(psDevNode); + OSFreeMem(pagearray); + +e0: + return pvVirtAddr; +} + +static void SysDmaReleaseKernelAddress(void *pvVirtAddr, IMG_UINT64 ui64Size) +{ +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + vunmap(pvVirtAddr); +#else + vm_unmap_ram(pvVirtAddr, ui64Size >> OSGetPageShift()); +#endif +} + +/*! +****************************************************************************** + @Function SysDmaAllocMem + + @Description Allocates physically contiguous memory + + @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + ******************************************************************************/ +PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + struct device *psDev; + struct page *psPage; + size_t uiSize; + + if (psDmaAlloc == NULL || + psDmaAlloc->hHandle || + psDmaAlloc->pvVirtAddr || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->sBusAddr.uiAddr || + psDmaAlloc->pvOSDevice == NULL) + { + PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); + psDev = (struct device *)psDmaAlloc->pvOSDevice; + +#if !(defined(CONFIG_L4) || defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC)) +#if defined(CONFIG_MCST) + psDmaAlloc->pvVirtAddr = dma_alloc_attrs( + (struct device *)psDmaAlloc->pvOSDevice, + (size_t) psDmaAlloc->ui64Size, + (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, + GFP_KERNEL, + DMA_ATTR_NON_CONSISTENT); +#else + psDmaAlloc->hHandle = dma_alloc_coherent(psDev, uiSize, (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, GFP_KERNEL); +#endif +#endif + if (psDmaAlloc->hHandle) + { + psDmaAlloc->pvVirtAddr = psDmaAlloc->hHandle; + + PVR_DPF((PVR_DBG_MESSAGE, + "Allocated DMA buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX, + psDmaAlloc->pvVirtAddr, + psDmaAlloc->sBusAddr.uiAddr, + uiSize)); + } + else if ((psPage = alloc_pages(GFP_KERNEL, get_order(uiSize)))) + { +#if defined(CONFIG_L4) + /* L4 is a para-virtualized environment, the PFN space is a virtual space and not physical space */ + psDmaAlloc->sBusAddr.uiAddr = l4x_virt_to_phys((void*)((unsigned long)page_to_pfn(psPage) << PAGE_SHIFT)); +#else + psDmaAlloc->sBusAddr.uiAddr = dma_map_page(psDev, psPage, 0, uiSize, DMA_BIDIRECTIONAL); + if (dma_mapping_error(psDev, psDmaAlloc->sBusAddr.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, + "dma_map_page() failed, page 0x%p order %d", + psPage, + get_order(uiSize))); + __free_pages(psPage, get_order(uiSize)); + goto e0; + } + psDmaAlloc->psPage = psPage; +#endif + + psDmaAlloc->pvVirtAddr = SysDmaAcquireKernelAddress(psPage, uiSize, psDmaAlloc->pvOSDevice); + if (! psDmaAlloc->pvVirtAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "SysDmaAcquireKernelAddress() failed, page 0x%p order %d", + psPage, + get_order(uiSize))); +#if !defined(CONFIG_L4) + dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL); +#endif + __free_pages(psPage, get_order(uiSize)); + goto e0; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "Allocated contiguous buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX, + psDmaAlloc->pvVirtAddr, + psDmaAlloc->sBusAddr.uiAddr, + uiSize)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Unable to allocate contiguous buffer, size: 0x"IMG_SIZE_FMTSPECX, uiSize)); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + } + +e0: + PVR_LOG_RETURN_IF_FALSE((psDmaAlloc->pvVirtAddr), "DMA/CMA allocation failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES); + return eError; +} + +/*! +****************************************************************************** + @Function SysDmaFreeMem + + @Description Free physically contiguous memory + + @Return void + ******************************************************************************/ +void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc) +{ + size_t uiSize; + struct device *psDev; + + if (psDmaAlloc == NULL || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->pvOSDevice == NULL || + psDmaAlloc->pvVirtAddr == NULL || + psDmaAlloc->sBusAddr.uiAddr == 0) + { + PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); + return; + } + + uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); + psDev = (struct device *)psDmaAlloc->pvOSDevice; + + if (psDmaAlloc->pvVirtAddr != psDmaAlloc->hHandle) + { + SysDmaReleaseKernelAddress(psDmaAlloc->pvVirtAddr, uiSize); + } + + if (! psDmaAlloc->hHandle) + { + struct page *psPage; +#if defined(CONFIG_L4) + psPage = pfn_to_page((unsigned long)l4x_phys_to_virt(psDmaAlloc->sBusAddr.uiAddr) >> PAGE_SHIFT); +#else +#ifdef CONFIG_MCST + dma_free_attrs(((struct device *)psDmaAlloc->pvOSDevice)->parent, + (size_t) psDmaAlloc->ui64Size, + psDmaAlloc->pvVirtAddr, + (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr, + DMA_ATTR_NON_CONSISTENT); +#else + dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL); +#endif + psPage = psDmaAlloc->psPage; +#endif + __free_pages(psPage, get_order(uiSize)); + return; + } + + dma_free_coherent(psDev, uiSize, psDmaAlloc->hHandle, (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr); +} + +/*! +****************************************************************************** + @Function SysDmaRegisterForIoRemapping + + @Description Registers DMA_ALLOC for manual I/O remapping + + @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + ******************************************************************************/ +PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc) +{ + size_t uiSize; + IMG_UINT32 ui32Idx; + IMG_BOOL bTabEntryFound = IMG_TRUE; + PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS; + + if (psDmaAlloc == NULL || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->pvOSDevice == NULL || + psDmaAlloc->pvVirtAddr == NULL || + psDmaAlloc->sBusAddr.uiAddr == 0) + { + PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); + + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + /* Check if an I/O remap entry exists for remapping */ + if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL) + { + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0); + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0); + break; + } + } + + if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES) + { + bTabEntryFound = IMG_FALSE; + } + + if (bTabEntryFound) + { + IMG_BOOL bSameVAddr, bSamePAddr, bSameSize; + + bSamePAddr = gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == psDmaAlloc->sBusAddr.uiAddr; + bSameVAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr; + bSameSize = gsDmaIoRemapArray[ui32Idx].ui64Size == uiSize; + + if (bSameVAddr) + { + if (bSamePAddr && bSameSize) + { + eError = PVRSRV_OK; + } + else + { + eError = PVRSRV_ERROR_ALREADY_EXISTS; + } + } + else + { + PVR_ASSERT(bSamePAddr == IMG_FALSE); + + gsDmaIoRemapArray[ui32Idx].ui64Size = uiSize; + gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr; + gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: register I/O remap: " + "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX, + psDmaAlloc->pvVirtAddr, + psDmaAlloc->sBusAddr.uiAddr, + uiSize)); + + gbEnableDmaIoRemapping = IMG_TRUE; + eError = PVRSRV_OK; + } + } + + return eError; +} + +/*! +****************************************************************************** + @Function SysDmaDeregisterForIoRemapping + + @Description Deregisters DMA_ALLOC from manual I/O remapping + + @Return void + ******************************************************************************/ +void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc) +{ + size_t uiSize; + IMG_UINT32 ui32Idx; + + if (psDmaAlloc == NULL || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->pvOSDevice == NULL || + psDmaAlloc->pvVirtAddr == NULL || + psDmaAlloc->sBusAddr.uiAddr == 0) + { + PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); + return; + } + + uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); + + /* Remove specified entries from list of I/O remap entries */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr) + { + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0; + gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL; + gsDmaIoRemapArray[ui32Idx].ui64Size = 0; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: deregister I/O remap: " + "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX, + psDmaAlloc->pvVirtAddr, + psDmaAlloc->sBusAddr.uiAddr, + uiSize)); + + break; + } + } + + /* Check if no other I/O remap entries exists for remapping */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL) + { + break; + } + } + + if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES) + { + /* No entries found so disable remapping */ + gbEnableDmaIoRemapping = IMG_FALSE; + } +} + +/*! +****************************************************************************** + @Function SysDmaDevPAddrToCpuVAddr + + @Description Maps a DMA_ALLOC physical address to CPU virtual address + + @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL + ******************************************************************************/ +IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size) +{ + IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL; + DMA_ALLOC *psHeapDmaAlloc; + IMG_UINT32 ui32Idx; + + if (gbEnableDmaIoRemapping == IMG_FALSE) + { + return pvDMAVirtAddr; + } + + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; + if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr) + { +#ifdef CONFIG_MCST + /* + * psHeapDmaAlloc->ui64Size already accounts for all + * guest heaps (see SysVzCreateDmaPhysHeap) + */ + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; +#else + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; +#endif + IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr; + + if (uiOffset < uiSpan) + { + PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan); + pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: remap: PA: 0x%llx => VA: 0x%p", + uiAddr, pvDMAVirtAddr)); + + break; + } + } + } + + return pvDMAVirtAddr; +} + +/*! +****************************************************************************** + @Function SysDmaCpuVAddrToDevPAddr + + @Description Maps a DMA_ALLOC CPU virtual address to physical address + + @Return Non-zero value on success. Otherwise, a 0 + ******************************************************************************/ +IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr) +{ + IMG_UINT64 uiAddr = 0; + DMA_ALLOC *psHeapDmaAlloc; + IMG_UINT32 ui32Idx; + + if (gbEnableDmaIoRemapping == IMG_FALSE) + { + return uiAddr; + } + + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; + if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr) + { +#ifdef CONFIG_MCST + /* + * psHeapDmaAlloc->ui64Size already accounts for all + * guest heaps (see SysVzCreateDmaPhysHeap) + */ + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; +#else + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; +#endif + IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr; + + if (uiOffset < uiSpan) + { + uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: remap: VA: 0x%p => PA: 0x%llx", + pvDMAVirtAddr, uiAddr)); + + break; + } + } + } + + return uiAddr; +} + +/****************************************************************************** + End of file (dma_support.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/pci_support.c b/drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/pci_support.c new file mode 100644 index 000000000000..c3bbcc46cb2c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/common/env/linux/pci_support.c @@ -0,0 +1,726 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include + +#if defined(CONFIG_MTRR) +#include +#endif + +#include "pci_support.h" +#include "allocmem.h" + +typedef struct _PVR_PCI_DEV_TAG +{ + struct pci_dev *psPCIDev; + HOST_PCI_INIT_FLAGS ePCIFlags; + IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + int iMTRR[DEVICE_COUNT_RESOURCE]; +#endif +} PVR_PCI_DEV; + +/*************************************************************************/ /*! +@Function OSPCISetDev +@Description Set a PCI device for subsequent use. +@Input pvPCICookie Pointer to OS specific PCI structure +@Input eFlags Flags +@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle +*/ /**************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) +{ + int err; + IMG_UINT32 i; + PVR_PCI_DEV *psPVRPCI; + + psPVRPCI = OSAllocMem(sizeof(*psPVRPCI)); + if (psPVRPCI == NULL) + { + printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n"); + return NULL; + } + + psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie; + psPVRPCI->ePCIFlags = eFlags; + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err); + OSFreeMem(psPVRPCI); + return NULL; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_set_master(psPVRPCI->psPCIDev); + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { +#if defined(CONFIG_PCI_MSI) + err = pci_enable_msi(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err); + psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */ + } +#else + printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel"); +#endif + } + + /* Initialise the PCI resource and MTRR tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + psPVRPCI->iMTRR[i] = -1; +#endif + } + + return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; +} + +/*************************************************************************/ /*! +@Function OSPCIAcquireDev +@Description Acquire a PCI device for subsequent use. +@Input ui16VendorID Vendor PCI ID +@Input ui16DeviceID Device PCI ID +@Input eFlags Flags +@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle +*/ /**************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, + IMG_UINT16 ui16DeviceID, + HOST_PCI_INIT_FLAGS eFlags) +{ + struct pci_dev *psPCIDev; + + psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL); + if (psPCIDev == NULL) + { + return NULL; + } + + return OSPCISetDev((void *)psPCIDev, eFlags); +} + +/*************************************************************************/ /*! +@Function OSPCIIRQ +@Description Get the interrupt number for the device. +@Input hPVRPCI PCI device handle +@Output pui16DeviceID Pointer to where the interrupt number + should be returned +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (pui32IRQ == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32IRQ = psPVRPCI->psPCIDev->irq; + + return PVRSRV_OK; +} + +/* Functions supported by OSPCIAddrRangeFunc */ +enum HOST_PCI_ADDR_RANGE_FUNC +{ + HOST_PCI_ADDR_RANGE_FUNC_LEN, + HOST_PCI_ADDR_RANGE_FUNC_START, + HOST_PCI_ADDR_RANGE_FUNC_END, + HOST_PCI_ADDR_RANGE_FUNC_REQUEST, + HOST_PCI_ADDR_RANGE_FUNC_RELEASE +}; + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeFunc +@Description Internal support function for various address range related + functions +@Input eFunc Function to perform +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Function dependent value +*/ /**************************************************************************/ +static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, + PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (ui32Index >= DEVICE_COUNT_RESOURCE) + { + printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range"); + return 0; + } + + switch (eFunc) + { + case HOST_PCI_ADDR_RANGE_FUNC_LEN: + { + return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_START: + { + return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_END: + { + return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: + { + int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); + if (err != 0) + { + printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err); + return 0; + } + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; + return 1; + } + case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: + { + if (psPVRPCI->abPCIResourceInUse[ui32Index]) + { + pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; + } + return 1; + } + default: + { + printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function"); + break; + } + } + + return 0; +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeLen +@Description Returns length of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Length of address range or 0 if no + such range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeStart +@Description Returns the start of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Start of address range or 0 if no + such range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeEnd +@Description Returns the end of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 End of address range or 0 if no such + range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIRequestAddrRange +@Description Request a given address range index for subsequent use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + else + { + return PVRSRV_OK; + } +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseAddrRange +@Description Release a given address range that is no longer being used +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + else + { + return PVRSRV_OK; + } +} + +/*************************************************************************/ /*! +@Function OSPCIRequestAddrRegion +@Description Request a given region from an address range for subsequent use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Input uiOffset Offset into the address range that forms + the start of the region +@Input uiLength Length of the region +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index, + IMG_UINT64 uiOffset, + IMG_UINT64 uiLength) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start; + resource_size_t end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + + /* Check that the requested region is valid */ + if ((start + uiOffset + uiLength - 1) > end) + { + return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; + } + + if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) + { + if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) + { + return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + } + } + else + { + if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) + { + return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + } + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseAddrRegion +@Description Release a given region, from an address range, that is no + longer in use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Input ui32Offset Offset into the address range that forms + the start of the region +@Input ui32Length Length of the region +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index, + IMG_UINT64 uiOffset, + IMG_UINT64 uiLength) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start; + resource_size_t end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + + /* Check that the region is valid */ + if ((start + uiOffset + uiLength - 1) > end) + { + return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; + } + + if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) + { + release_region(start + uiOffset, uiLength); + } + else + { + release_mem_region(start + uiOffset, uiLength); + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseDev +@Description Release a PCI device that is no longer being used +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; + } + } + +#if defined(CONFIG_PCI_MSI) + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_disable_msi(psPVRPCI->psPCIDev); + } +#endif + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_clear_master(psPVRPCI->psPCIDev); + } + + pci_disable_device(psPVRPCI->psPCIDev); + + OSFreeMem(psPVRPCI); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCISuspendDev +@Description Prepare PCI device to be turned off by power management +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + int err; + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + } + } + + err = pci_save_state(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + pci_disable_device(psPVRPCI->psPCIDev); + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND)); + switch (err) + { + case 0: + break; + case -EIO: + printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM"); + break; + case -EINVAL: + printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state"); + break; + default: + printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err); + break; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIResumeDev +@Description Prepare a PCI device to be resumed by power management +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int err; + int i; + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON)); + switch (err) + { + case 0: + break; + case -EIO: + printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM"); + break; + case -EINVAL: + printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state"); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + default: + printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + + pci_restore_state(psPVRPCI->psPCIDev); + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + pci_set_master(psPVRPCI->psPCIDev); + + /* Restore the PCI resource tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME); + if (err != 0) + { + printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err); + } + } + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIGetVendorDeviceIDs +@Description Retrieve PCI vendor ID and device ID. +@Input hPVRPCI PCI device handle +@Output pui16VendorID Vendor ID +@Output pui16DeviceID Device ID +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT16 *pui16VendorID, + IMG_UINT16 *pui16DeviceID) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + struct pci_dev *psPCIDev; + + if (psPVRPCI == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psPCIDev = psPVRPCI->psPCIDev; + if (psPCIDev == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui16VendorID = psPCIDev->vendor; + *pui16DeviceID = psPCIDev->device; + + return PVRSRV_OK; +} + +#if defined(CONFIG_MTRR) + +/*************************************************************************/ /*! +@Function OSPCIClearResourceMTRRs +@Description Clear any BIOS-configured MTRRs for a PCI memory region +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start, end; + int res; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + res = arch_io_reserve_memtype_wc(start, end - start); + if (res) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } +#endif + res = arch_phys_wc_add(start, end - start); + if (res < 0) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + arch_io_free_memtype_wc(start, end - start); +#endif + + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + psPVRPCI->iMTRR[ui32Index] = res; +#else + + res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + res = mtrr_del(res, start, end - start); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + /* Workaround for overlapping MTRRs. */ + { + IMG_BOOL bGotMTRR0 = IMG_FALSE; + + /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning + * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic & + * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour. + * + * WRBACK is incompatible with some PCI devices, so try to split + * the UNCACHABLE regions up and insert a WRCOMB region instead. + */ + res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0); + if (res < 0) + { + /* If this fails, services has probably run before and created + * a write-combined MTRR for the test chip. Assume it has, and + * don't return an error here. + */ + return PVRSRV_OK; + } + + if (res == 0) + bGotMTRR0 = IMG_TRUE; + + res = mtrr_del(res, start, end - start); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (bGotMTRR0) + { + /* Replace 0 with a non-overlapping WRBACK MTRR */ + res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + /* Add a WRCOMB MTRR for the PCI device memory bar */ + res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + } + } +#endif + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseResourceMTRRs +@Description Release resources allocated by OSPCIClearResourceMTRRs +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +*/ /**************************************************************************/ +void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (psPVRPCI->iMTRR[ui32Index] >= 0) + { + arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]); + psPVRPCI->iMTRR[ui32Index] = -1; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + { + resource_size_t start, end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; + + arch_io_free_memtype_wc(start, end - start); + } +#endif + } +#else + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); +#endif +} +#endif /* defined(CONFIG_MTRR) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/common/vmm_type_stub.c b/drivers/mcst/gpu-imgtec/services/system/rogue/common/vmm_type_stub.c new file mode 100644 index 000000000000..747bf4a8e1f1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/common/vmm_type_stub.c @@ -0,0 +1,119 @@ +/*************************************************************************/ /*! +@File vmm_type_stub.c +@Title Stub VM manager type +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Sample stub (no-operation) VM manager implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxheapconfig.h" + +#include "vmm_impl.h" +#include "vmm_pvz_server.h" + +static PVRSRV_ERROR +StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64Addr) +{ + PVR_UNREFERENCED_PARAMETER(ui32FuncID); + PVR_UNREFERENCED_PARAMETER(ui32DevID); + PVR_UNREFERENCED_PARAMETER(ui64Size); + PVR_UNREFERENCED_PARAMETER(ui64Addr); + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +static PVRSRV_ERROR +StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID) +{ + PVR_UNREFERENCED_PARAMETER(ui32FuncID); + PVR_UNREFERENCED_PARAMETER(ui32DevID); + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +static VMM_PVZ_CONNECTION gsStubVmmPvz = +{ + .sClientFuncTab = { + /* pfnMapDevPhysHeap */ + &StubVMMMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &StubVMMUnmapDevPhysHeap + }, + + .sServerFuncTab = { + /* pfnMapDevPhysHeap */ + &PvzServerMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &PvzServerUnmapDevPhysHeap + }, + + .sVmmFuncTab = { + /* pfnOnVmOnline */ + &PvzServerOnVmOnline, + + /* pfnOnVmOffline */ + &PvzServerOnVmOffline, + + /* pfnVMMConfigure */ + &PvzServerVMMConfigure + } +}; + +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) +{ + PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); + *psPvzConnection = &gsStubVmmPvz; + PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); + return PVRSRV_OK; +} + +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) +{ + PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); +} + +/****************************************************************************** + End of file (vmm_type_stub.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/Kbuild.mk new file mode 100644 index 000000000000..50b254a86273 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/Kbuild.mk @@ -0,0 +1,58 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Copyright Copyright (c) MCST +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +PVRSRVKM_NAME = $(PVRSRV_MODNAME) + +$(PVRSRVKM_NAME)-y += \ + services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \ + services/system/rogue/common/env/linux/pci_support.o \ + services/system/rogue/common/env/linux/dma_support.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/rogue/common/vmm_type_$(VMM_TYPE).o + +ccflags-y += \ + -I$(TOP)/services/system/rogue/common/env/linux -I$(TOP)/services/system/rogue/common/env/linux \ + -I$(TOP)/kernel/drivers/staging/imgtec \ + -I$(TOP)/kernel/drivers/staging/imgtec/e2c3_gpu \ + -I$(TOP)/include/rogue/system/e2c3 -I$(TOP)/include/system/e2c3_gpu diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysconfig.c b/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysconfig.c new file mode 100644 index 000000000000..4850a2d67cc8 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysconfig.c @@ -0,0 +1,487 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Copyright Copyright (c) MCST +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "sysinfo.h" + +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "rgxdevice.h" +#include "syscommon.h" +#include "allocmem.h" +#include "pvr_debug.h" + +#include "e2c3_gpu_drv.h" + +#include +#include +#include +#include + +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + +/* Dummy DVFS configuration used purely for testing purposes */ + +static const IMG_OPP asOPPTable[] = { +#if 0 + /* uV Hz */ +/* +Disabled freqs: + { 800000,1000000000}, + { 800000, 941000000}, + { 800000, 889000000}, + { 800000, 842000000}, +*/ + { 800000, 800000000}, /* Maximum traget freq in silicon */ + { 800000, 762000000}, /* Info in Bug 109159 */ + { 800000, 727000000}, + { 800000, 696000000}, + { 800000, 667000000}, + { 800000, 640000000}, + { 800000, 615000000}, + { 800000, 593000000}, + { 800000, 571000000}, + { 800000, 552000000}, + { 800000, 533000000}, + { 800000, 516000000}, + { 800000, 500000000}, + { 800000, 471000000}, + { 800000, 444000000}, + { 800000, 421000000}, + { 800000, 400000000}, +/* +Disabled freqs: + { 800000, 381000000}, + { 800000, 364000000}, + { 800000, 348000000}, + { 800000, 333000000}, + { 800000, 320000000}, + { 800000, 308000000}, + { 800000, 296000000}, + { 800000, 286000000}, + { 800000, 276000000}, + { 800000, 267000000}, + { 800000, 258000000}, +*/ +#else + { 1000000, 2500000 }, /* FPGA prototype reality */ +#endif +}; + +#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP)) + +static void SetFrequency(IMG_UINT32 ui32Frequency) +{ + PVR_DPF((PVR_DBG_WARNING, "SetFrequency %u", ui32Frequency)); +} + +static void SetVoltage(IMG_UINT32 ui32Voltage) +{ + PVR_DPF((PVR_DBG_WARNING, "SetVoltage %u", ui32Voltage)); +} + +#endif + +/* + * CPU to Device physical address translation + */ +static void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + /* Not implemented/used */ + BUG(); +} + +/* + * Device to CPU physical address translation + */ +static void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = hPrivData; + struct device *dev = (struct device *)psDevConfig->pvOSDevice; + struct iommu_domain *dom = NULL; + IMG_UINT32 ui32Idx; + + if (device_iommu_mapped(dev)) { + dom = iommu_get_domain_for_dev(dev); + BUG_ON(!dom); + } + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + IMG_UINT64 va = psDevPAddr[ui32Idx].uiAddr; + IMG_UINT64 pa = dom ? iommu_iova_to_phys(dom, va) : + dma_to_phys(dev, va); + psCpuPAddr[ui32Idx].uiAddr = pa; + } +} + +static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = { + .pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr, +}; + +static PHYS_HEAP_REGION gsPhysHeapRegion = { + .sStartAddr.uiAddr = 0, + .sCardBase.uiAddr = 0, + .uiSize = 0, + .hPrivData = NULL, +}; + +static PHYS_HEAP_CONFIG gsPhysHeapConfig = { + .ui32PhysHeapID = 0, + .pszPDumpMemspaceName = "SYSMEM", + .eType = PHYS_HEAP_TYPE_UMA, + .psMemFuncs = &gsPhysHeapFuncs, + .pasRegions = &gsPhysHeapRegion, + .ui32NumOfRegions = 1, + .hPrivData = NULL, +}; + +typedef struct _SYS_DATA_ SYS_DATA; + +struct _SYS_DATA_ { + struct platform_device *pdev; + + struct e2c3_gpu_rogue_platform_data *pdata; + + struct resource *registers; +}; + +static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + OSFreeMem(psDevConfig); +} + +static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, + PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo)); + if (!psDevConfig) { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRGXData = + (RGX_DATA *)IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)IMG_OFFSET_ADDR( + psRGXData, sizeof(*psRGXData)); + + /* Setup RGX specific timing data */ + psRGXTimingInfo->ui32CoreClockSpeed = 800000000; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = + SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + + /* Setup the device config */ + psDevConfig->pvOSDevice = &psSysData->pdev->dev; + psDevConfig->pszName = "e2c3-gpu"; + psDevConfig->pszVersion = NULL; + + psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; + psDevConfig->ui32RegsSize = + (IMG_UINT32)resource_size(psSysData->registers); + + psDevConfig->ui32IRQ = 0; + + if (NATIVE_IS_MACHINE_SIM) { + /* simulator does not support snoops */ + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + } else { + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + } + + /* Device's physical heaps */ + psDevConfig->pasPhysHeaps = &gsPhysHeapConfig; + BUG_ON(gsPhysHeapConfig.hPrivData); + gsPhysHeapConfig.hPrivData = psDevConfig; + psDevConfig->ui32PhysHeapCount = 1; + + /* Device's physical heap IDs */ + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = 0; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = 0; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = 0; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = 0; + + /* Only required for LMA but having this always set shouldn't be a problem */ + psDevConfig->bDevicePA0IsValid = IMG_TRUE; + + psDevConfig->hDevData = psRGXData; + psDevConfig->hSysData = psSysData; + + *ppsDevConfigOut = psDevConfig; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + SYS_DATA *psSysData; + resource_size_t uiRegistersSize; + PVRSRV_ERROR eError; + int err = 0; + + PVR_ASSERT(pvOSDevice); + + psSysData = OSAllocZMem(sizeof(*psSysData)); + if (psSysData == NULL) { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psSysData->pdev = to_platform_device((struct device *)pvOSDevice); + psSysData->pdata = psSysData->pdev->dev.platform_data; + + dma_set_mask(pvOSDevice, DMA_BIT_MASK(40)); + + err = e2c3_gpu_enable(psSysData->pdev->dev.parent); + if (err) { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", + __func__, err)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + goto ErrFreeSysData; + } + + psSysData->registers = platform_get_resource_byname( + psSysData->pdev, IORESOURCE_MEM, "rogue-regs"); + if (!psSysData->registers) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get Rogue register information", + __func__)); + eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + goto ErrorDevDisable; + } + + /* Check the address range is large enough. */ + uiRegistersSize = resource_size(psSysData->registers); + if (uiRegistersSize < E2C3_GPU_RGX_REG_REGION_SIZE) { + PVR_DPF(( + PVR_DBG_ERROR, + "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)", + __func__, &uiRegistersSize, + E2C3_GPU_RGX_REG_REGION_SIZE)); + + eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; + goto ErrorDevDisable; + } + + /* Reserve the address range */ + if (!request_mem_region(psSysData->registers->start, + resource_size(psSysData->registers), + SYS_RGX_DEV_NAME)) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Rogue register memory region not available", + __func__)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + + goto ErrorDevDisable; + } + + eError = DeviceConfigCreate(psSysData, &psDevConfig); + if (eError != PVRSRV_OK) { + goto ErrorReleaseMemRegion; + } + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + /* Dummy DVFS configuration used purely for testing purposes */ + psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable; + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; +#endif +#if defined(SUPPORT_LINUX_DVFS) + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000; + psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; +#endif + + *ppsDevConfig = psDevConfig; + + return PVRSRV_OK; + +ErrorReleaseMemRegion: + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); +ErrorDevDisable: + e2c3_gpu_disable(psSysData->pdev->dev.parent); +ErrFreeSysData: + OSFreeMem(psSysData); + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData; + + DeviceConfigDestroy(psDevConfig); + + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); + e2c3_gpu_disable(psSysData->pdev->dev.parent); + + OSFreeMem(psSysData); +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + + PVR_DUMPDEBUG_LOG("------[ %s system debug ]------", SYS_RGX_DEV_NAME); + PVR_DUMPDEBUG_LOG("TODO: (temp, pll state and other)"); + + return PVRSRV_OK; +} + +typedef struct { + struct device *psDev; + void *pvData; + PFN_LISR pfnLISR; +} LISR_DATA; + +static void E2C3_GPU_InterruptHandler(void *pvData) +{ + LISR_DATA *psLISRData = pvData; + psLISRData->pfnLISR(psLISRData->pvData); +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, PFN_LISR pfnLISR, + void *pvData, IMG_HANDLE *phLISRData) +{ + SYS_DATA *psSysData = (SYS_DATA *)hSysData; + LISR_DATA *psLISRData; + PVRSRV_ERROR eError; + int err; + + PVR_UNREFERENCED_PARAMETER(ui32IRQ); + + psLISRData = OSAllocZMem(sizeof(*psLISRData)); + if (!psLISRData) { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out; + } + + psLISRData->pfnLISR = pfnLISR; + psLISRData->pvData = pvData; + psLISRData->psDev = psSysData->pdev->dev.parent; + + err = e2c3_gpu_set_interrupt_handler( + psLISRData->psDev, E2C3_GPU_InterruptHandler, psLISRData); + if (err) { + PVR_DPF((PVR_DBG_ERROR, + "%s: e2c3_gpu_set_interrupt_handler() failed (%d)", + __func__, err)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_free_data; + } + + err = e2c3_gpu_enable_interrupt(psLISRData->psDev); + if (err) { + PVR_DPF((PVR_DBG_ERROR, + "%s: e2c3_gpu_enable_interrupt() failed (%d)", + __func__, err)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_unset_interrupt_handler; + } + + *phLISRData = psLISRData; + eError = PVRSRV_OK; + + PVR_TRACE(("Installed device LISR " IMG_PFN_FMTSPEC, pfnLISR)); + +err_out: + return eError; +err_unset_interrupt_handler: + e2c3_gpu_set_interrupt_handler(psLISRData->psDev, NULL, NULL); +err_free_data: + OSFreeMem(psLISRData); + goto err_out; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + LISR_DATA *psLISRData = (LISR_DATA *)hLISRData; + int err; + + err = e2c3_gpu_disable_interrupt(psLISRData->psDev); + if (err) { + PVR_DPF((PVR_DBG_ERROR, + "%s: e2c3_gpu_disable_interrupt() failed (%d)", + __func__, err)); + } + + err = e2c3_gpu_set_interrupt_handler(psLISRData->psDev, NULL, NULL); + if (err) { + PVR_DPF((PVR_DBG_ERROR, + "%s: e2c3_gpu_set_interrupt_handler() failed (%d)", + __func__, err)); + } + + PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC, + psLISRData->pfnLISR)); + + OSFreeMem(psLISRData); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysinfo.h b/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysinfo.h new file mode 100644 index 000000000000..6d869e52bd31 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/e2c3_gpu/sysinfo.h @@ -0,0 +1,62 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Copyright Copyright (c) MCST +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +/*!< System specific poll/timeout details */ +#if defined(VIRTUAL_PLATFORM) +#define MAX_HW_TIME_US (24000000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000) +#else +#define MAX_HW_TIME_US (1000000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000) +#endif + +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (20000) + +#define SYS_RGX_DEV_NAME "e2c3_gpu_rogue" + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/include/dma_support.h b/drivers/mcst/gpu-imgtec/services/system/rogue/include/dma_support.h new file mode 100644 index 000000000000..80fd4d8eb3f3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/include/dma_support.h @@ -0,0 +1,116 @@ +/*************************************************************************/ /*! +@File dma_support.h +@Title Device contiguous memory allocator and I/O re-mapper +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides a contiguous memory allocator API; mainly + used for allocating / ioremapping (DMA/PA <-> CPU/VA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DMA_SUPPORT_H +#define DMA_SUPPORT_H + +#include "osfunc.h" +#include "pvrsrv.h" + +typedef struct _DMA_ALLOC_ +{ + IMG_UINT64 ui64Size; + IMG_CPU_VIRTADDR pvVirtAddr; + IMG_DEV_PHYADDR sBusAddr; + IMG_HANDLE hHandle; +#if defined(LINUX) + struct page *psPage; +#endif + void *pvOSDevice; +} DMA_ALLOC; + +/*! +******************************************************************************* + @Function SysDmaAllocMem + @Description Allocates physically contiguous memory + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaFreeMem + @Description Free physically contiguous memory + @Return void +******************************************************************************/ +void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaRegisterForIoRemapping + @Description Registers DMA_ALLOC for manual I/O remapping + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaDeregisterForIoRemapping + @Description Deregisters DMA_ALLOC from manual I/O remapping + @Return void +******************************************************************************/ +void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaDevPAddrToCpuVAddr + @Description Maps a DMA_ALLOC physical address to CPU virtual address + @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL +******************************************************************************/ +IMG_CPU_VIRTADDR +SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size); + +/*! +******************************************************************************* + @Function SysDmaCpuVAddrToDevPAddr + @Description Maps a DMA_ALLOC CPU virtual address to physical address + @Return Non-zero value on success. Otherwise, a 0 +******************************************************************************/ +IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr); + +#endif /* DMA_SUPPORT_H */ + +/****************************************************************************** + End of file (dma_support.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/include/interrupt_support.h b/drivers/mcst/gpu-imgtec/services/system/rogue/include/interrupt_support.h new file mode 100644 index 000000000000..0cca1ac22b19 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/include/interrupt_support.h @@ -0,0 +1,103 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__INTERRUPT_SUPPORT_H__) +#define __INTERRUPT_SUPPORT_H__ + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_device.h" + +/*! Default trigger type for the interrupt line. */ +#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0) +/*! Interrupt triggered when interrupt line is low. */ +#define SYS_IRQ_FLAG_TRIGGER_LOW (0x1 << 0) +/*! Interrupt triggered when interrupt line is high. */ +#define SYS_IRQ_FLAG_TRIGGER_HIGH (0x2 << 0) +/*! Interrupt trigger mask. */ +#define SYS_IRQ_FLAG_TRIGGER_MASK (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \ + SYS_IRQ_FLAG_TRIGGER_LOW | \ + SYS_IRQ_FLAG_TRIGGER_HIGH) +/*! The irq is allowed to be shared among several devices. */ +#define SYS_IRQ_FLAG_SHARED (0x1 << 8) + +/*! Interrupt flags mask. */ +#define SYS_IRQ_FLAG_MASK (SYS_IRQ_FLAG_TRIGGER_MASK | \ + SYS_IRQ_FLAG_SHARED) + +/*************************************************************************/ /*! +@Description Pointer to a system Low-level Interrupt Service Routine (LISR). +@Input pvData Private data provided to the LISR. +@Return IMG_TRUE if interrupt handled, IMG_FALSE otherwise. +*/ /**************************************************************************/ +typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData); + +/*************************************************************************/ /*! +@Function OSInstallSystemLISR +@Description Installs a system low-level interrupt handler +@Output phLISR On return, contains a handle to the + installed LISR +@Input ui32IRQ The IRQ number for which the + interrupt handler should be installed +@Input pszDevName Name of the device for which the handler + is being installed +@Input pfnLISR A pointer to an interrupt handler + function +@Input pvData A pointer to data that should be passed + to pfnLISR when it is called +@Input ui32Flags Interrupt flags +@Return PVRSRV_OK on success, a failure code otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszDevName, + PFN_SYS_LISR pfnLISR, + void *pvData, + IMG_UINT32 ui32Flags); + +/*************************************************************************/ /*! +@Function OSUninstallSystemLISR +@Description Uninstalls a system low-level interrupt handler +@Input hLISRData The handle to the LISR to uninstall +@Return PVRSRV_OK on success, a failure code otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData); +#endif /* !defined(__INTERRUPT_SUPPORT_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/include/pci_support.h b/drivers/mcst/gpu-imgtec/services/system/rogue/include/pci_support.h new file mode 100644 index 000000000000..45870b8442ab --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/include/pci_support.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PCI_SUPPORT_H +#define PCI_SUPPORT_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(LINUX) +#include +#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev)) +#else +#define TO_PCI_COOKIE(dev) (dev) +#endif + +typedef enum _HOST_PCI_INIT_FLAGS_ +{ + HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001, + HOST_PCI_INIT_FLAG_MSI = 0x00000002, + HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff +} HOST_PCI_INIT_FLAGS; + +struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; +typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; + +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ); +IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); +PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16VendorID, IMG_UINT16 *pui16DeviceID); + +#if defined(CONFIG_MTRR) +PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +#else +static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); + return PVRSRV_OK; +} + +static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); +} +#endif + +#endif /* PCI_SUPPORT_H */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/include/syscommon.h b/drivers/mcst/gpu-imgtec/services/system/rogue/include/syscommon.h new file mode 100644 index 000000000000..a2589d0c2c6a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/include/syscommon.h @@ -0,0 +1,129 @@ +/**************************************************************************/ /*! +@File +@Title Common System APIs and structures +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides common system-specific declarations and + macros that are supported by all systems +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(__SYSCOMMON_H__) +#define __SYSCOMMON_H__ + +#include "img_types.h" +#include "pvr_notifier.h" +#include "pvrsrv_device.h" +#include "pvrsrv_error.h" + +/*************************************************************************/ /*! +@Description Pointer to a Low-level Interrupt Service Routine (LISR). +@Input pvData Private data provided to the LISR. +@Return True if interrupt handled, false otherwise. +*/ /**************************************************************************/ +typedef IMG_BOOL (*PFN_LISR)(void *pvData); + +/**************************************************************************/ /*! +@Function SysDevInit +@Description System specific device initialisation function. +@Input pvOSDevice pointer to the OS device reference +@Input ppsDevConfig returned device configuration info +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig); + +/**************************************************************************/ /*! +@Function SysDevDeInit +@Description System specific device deinitialisation function. +@Input psDevConfig device configuration info of the device to be + deinitialised +@Return None. +*/ /***************************************************************************/ +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/**************************************************************************/ /*! +@Function SysDebugInfo +@Description Dump system specific device debug information. +@Input psDevConfig pointer to device configuration info +@Input pfnDumpDebugPrintf the 'printf' function to be called to + display the debug info +@Input pvDumpDebugFile optional file identifier to be passed to + the 'printf' function if required +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/**************************************************************************/ /*! +@Function SysInstallDeviceLISR +@Description Installs the system Low-level Interrupt Service Routine (LISR) + which handles low-level processing of interrupts from the device + (GPU). + The LISR will be invoked when the device raises an interrupt. An + LISR may not be descheduled, so code which needs to do so should + be placed in an MISR. + The installed LISR will schedule any MISRs once it has completed + its interrupt processing, by calling OSScheduleMISR(). +@Input hSysData pointer to the system data of the device +@Input ui32IRQ the IRQ on which the LISR is to be installed +@Input pszName name of the module installing the LISR +@Input pfnLISR pointer to the function to be installed as the + LISR +@Input pvData private data provided to the LISR +@Output phLISRData handle to the installed LISR (to be used for a + subsequent uninstall) +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData); + +/**************************************************************************/ /*! +@Function SysUninstallDeviceLISR +@Description Uninstalls the system Low-level Interrupt Service Routine (LISR) + which handles low-level processing of interrupts from the device + (GPU). +@Input hLISRData handle of the LISR to be uninstalled +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData); + +#endif /* !defined(__SYSCOMMON_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/include/sysvalidation.h b/drivers/mcst/gpu-imgtec/services/system/rogue/include/sysvalidation.h new file mode 100644 index 000000000000..dc3791d3b6c1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/include/sysvalidation.h @@ -0,0 +1,62 @@ +/*************************************************************************/ /*! +@File +@Title Validation System APIs and structures +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros + needed for hardware validation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSVALIDATION_H__) +#define __SYSVALIDATION_H__ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "img_types.h" +#include "rgxdefs_km.h" +#include "virt_validation_defs.h" + +void SysInitVirtInitialization(IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) +void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); +void SysSetTrustedDeviceAceEnabled(void); +#endif +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +#endif /* !defined(__SYSVALIDATION_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/Kbuild.mk new file mode 100644 index 000000000000..712a7e607515 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/Kbuild.mk @@ -0,0 +1,63 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +PVRSRVKM_NAME = $(PVRSRV_MODNAME) + +ifeq ($(KERNEL_DRIVER_DIR),) + SYSTEM_BASEDIR := services/system/rogue/$(PVR_SYSTEM) +else + SYSTEM_BASEDIR := external/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM) +endif + +$(PVRSRVKM_NAME)-y += \ + $(SYSTEM_BASEDIR)/mt8173_mfgsys.o \ + $(SYSTEM_BASEDIR)/mt8173_sysconfig.o \ + services/system/rogue/common/env/linux/dma_support.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/rogue/common/vmm_type_$(VMM_TYPE).o + +ifeq ($(SUPPORT_ION),1) + $(PVRSRVKM_NAME)-y += \ + services/system/rogue/common/env/linux/ion_support_generic.o +endif diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.c b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.c new file mode 100644 index 000000000000..6689079d2dab --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.c @@ -0,0 +1,330 @@ +/* +* Copyright (c) 2014 MediaTek Inc. +* Author: Chiawen Lee +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mt8173_mfgsys.h" + +static const char * const top_mfg_clk_name[] = { + "mfg_mem_in_sel", + "mfg_axi_in_sel", + "top_axi", + "top_mem", +}; + +#define MAX_TOP_MFG_CLK ARRAY_SIZE(top_mfg_clk_name) + +#define REG_MFG_AXI BIT(0) +#define REG_MFG_MEM BIT(1) +#define REG_MFG_G3D BIT(2) +#define REG_MFG_26M BIT(3) +#define REG_MFG_ALL (REG_MFG_AXI | REG_MFG_MEM | REG_MFG_G3D | REG_MFG_26M) + +#define REG_MFG_CG_STA 0x00 +#define REG_MFG_CG_SET 0x04 +#define REG_MFG_CG_CLR 0x08 + +static void mtk_mfg_clr_clock_gating(void __iomem *reg) +{ + writel(REG_MFG_ALL, reg + REG_MFG_CG_CLR); +} + +static int mtk_mfg_prepare_clock(struct mtk_mfg *mfg) +{ + int i; + int ret; + + for (i = 0; i < MAX_TOP_MFG_CLK; i++) { + ret = clk_prepare(mfg->top_clk[i]); + if (ret) + goto unwind; + } + ret = clk_prepare(mfg->top_mfg); + if (ret) + goto unwind; + + return 0; +unwind: + while (i--) + clk_unprepare(mfg->top_clk[i]); + + return ret; +} + +static void mtk_mfg_unprepare_clock(struct mtk_mfg *mfg) +{ + int i; + + clk_unprepare(mfg->top_mfg); + for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--) + clk_unprepare(mfg->top_clk[i]); +} + +static int mtk_mfg_enable_clock(struct mtk_mfg *mfg) +{ + int i; + int ret; + + for (i = 0; i < MAX_TOP_MFG_CLK; i++) { + ret = clk_enable(mfg->top_clk[i]); + if (ret) + goto unwind; + } + ret = clk_enable(mfg->top_mfg); + if (ret) + goto unwind; + mtk_mfg_clr_clock_gating(mfg->reg_base); + + return 0; +unwind: + while (i--) + clk_disable(mfg->top_clk[i]); + + return ret; +} + +static void mtk_mfg_disable_clock(struct mtk_mfg *mfg) +{ + int i; + + clk_disable(mfg->top_mfg); + for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--) + clk_disable(mfg->top_clk[i]); +} + +static void mtk_mfg_enable_hw_apm(struct mtk_mfg *mfg) +{ + writel(0x003c3d4d, mfg->reg_base + 0x24); + writel(0x4d45440b, mfg->reg_base + 0x28); + writel(0x7a710184, mfg->reg_base + 0xe0); + writel(0x835f6856, mfg->reg_base + 0xe4); + writel(0x002b0234, mfg->reg_base + 0xe8); + writel(0x80000000, mfg->reg_base + 0xec); + writel(0x08000000, mfg->reg_base + 0xa0); +} + +int mtk_mfg_enable(struct mtk_mfg *mfg) +{ + int ret; + + ret = regulator_enable(mfg->vgpu); + if (ret) + return ret; + + ret = pm_runtime_get_sync(mfg->dev); + if (ret) + goto err_regulator_disable; + + ret = mtk_mfg_enable_clock(mfg); + if (ret) + goto err_pm_runtime_put; + + mtk_mfg_enable_hw_apm(mfg); + + return 0; + +err_pm_runtime_put: + pm_runtime_put_sync(mfg->dev); +err_regulator_disable: + regulator_disable(mfg->vgpu); + return ret; +} + +void mtk_mfg_disable(struct mtk_mfg *mfg) +{ + mtk_mfg_disable_clock(mfg); + pm_runtime_put_sync(mfg->dev); + regulator_disable(mfg->vgpu); + +} + +int mtk_mfg_freq_set(struct mtk_mfg *mfg, unsigned long freq) +{ + int ret; + + ret = clk_prepare_enable(mfg->top_mfg); + if (ret) { + dev_err(mfg->dev, "enable and prepare top_mfg failed, %d\n", ret); + return ret; + } + + ret = clk_set_parent(mfg->top_mfg, mfg->clk26m); + if (ret) { + dev_err(mfg->dev, "Set clk parent to clk26m failed, %d\n", ret); + goto unprepare_top_mfg; + } + + ret = clk_set_rate(mfg->mmpll, freq); + if (ret) + dev_err(mfg->dev, "Set freq to %lu Hz failed, %d\n", freq, ret); + + ret = clk_set_parent(mfg->top_mfg, mfg->top_mmpll); + if (ret) + dev_err(mfg->dev, "Set clk parent to top_mmpll failed, %d\n", ret); + +unprepare_top_mfg: + clk_disable_unprepare(mfg->top_mfg); + + if (!ret) + dev_dbg(mfg->dev, "Freq set to %lu Hz\n", freq); + + return ret; +} + +int mtk_mfg_volt_set(struct mtk_mfg *mfg, int volt) +{ + int ret; + + ret = regulator_set_voltage(mfg->vgpu, volt, volt); + if (ret != 0) { + dev_err(mfg->dev, "Set voltage to %u uV failed, %d\n", + volt, ret); + return ret; + } + + dev_dbg(mfg->dev, "Voltage set to %d uV\n", volt); + + return 0; +} + +static int mtk_mfg_bind_device_resource(struct mtk_mfg *mfg) +{ + struct device *dev = mfg->dev; + struct platform_device *pdev = to_platform_device(dev); + int i; + struct resource *res; + + mfg->top_clk = devm_kcalloc(dev, MAX_TOP_MFG_CLK, + sizeof(*mfg->top_clk), GFP_KERNEL); + if (!mfg->top_clk) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + mfg->rgx_start = res->start; + mfg->rgx_size = resource_size(res); + + mfg->rgx_irq = platform_get_irq_byname(pdev, "RGX"); + if (mfg->rgx_irq < 0) + return mfg->rgx_irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + mfg->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(mfg->reg_base)) + return PTR_ERR(mfg->reg_base); + + mfg->mmpll = devm_clk_get(dev, "mmpll_clk"); + if (IS_ERR(mfg->mmpll)) { + dev_err(dev, "devm_clk_get mmpll_clk failed !!!\n"); + return PTR_ERR(mfg->mmpll); + } + + for (i = 0; i < MAX_TOP_MFG_CLK; i++) { + mfg->top_clk[i] = devm_clk_get(dev, top_mfg_clk_name[i]); + if (IS_ERR(mfg->top_clk[i])) { + dev_err(dev, "devm_clk_get %s failed !!!\n", + top_mfg_clk_name[i]); + return PTR_ERR(mfg->top_clk[i]); + } + } + + mfg->top_mfg = devm_clk_get(dev, "top_mfg"); + if (IS_ERR(mfg->top_mfg)) { + dev_err(dev, "devm_clk_get top_mfg failed !!!\n"); + return PTR_ERR(mfg->top_mfg); + } + + mfg->top_mmpll = devm_clk_get(dev, "top_mmpll"); + if (IS_ERR(mfg->top_mmpll)) { + dev_err(dev, "devm_clk_get top_mmpll failed !!!\n"); + return PTR_ERR(mfg->top_mmpll); + } + + mfg->clk26m = devm_clk_get(dev, "clk26m"); + if (IS_ERR(mfg->clk26m)) { + dev_err(dev, "devm_clk_get clk26m failed !!!\n"); + return PTR_ERR(mfg->clk26m); + } + +#if defined(CONFIG_DEVFREQ_THERMAL) + mfg->tz = thermal_zone_get_zone_by_name("cpu_thermal"); + if (IS_ERR(mfg->tz)) { + dev_warn(dev, "Failed to get cpu_thermal zone\n"); + } +#endif + + mfg->vgpu = devm_regulator_get(dev, "mfgsys-power"); + if (IS_ERR(mfg->vgpu)) + return PTR_ERR(mfg->vgpu); + + pm_runtime_enable(dev); + + return 0; +} + +static void mtk_mfg_unbind_device_resource(struct mtk_mfg *mfg) +{ + struct device *dev = mfg->dev; + + pm_runtime_disable(dev); +} + +struct mtk_mfg *mtk_mfg_create(struct device *dev) +{ + int err; + struct mtk_mfg *mfg; + + mtk_mfg_debug("mtk_mfg_create Begin\n"); + + mfg = devm_kzalloc(dev, sizeof(*mfg), GFP_KERNEL); + if (!mfg) + return ERR_PTR(-ENOMEM); + mfg->dev = dev; + + err = mtk_mfg_bind_device_resource(mfg); + if (err != 0) + return ERR_PTR(err); + + mutex_init(&mfg->set_power_state); + + err = mtk_mfg_prepare_clock(mfg); + if (err) + goto err_unbind_resource; + + mtk_mfg_debug("mtk_mfg_create End\n"); + + return mfg; +err_unbind_resource: + mtk_mfg_unbind_device_resource(mfg); + + return ERR_PTR(err); +} + +void mtk_mfg_destroy(struct mtk_mfg *mfg) +{ + mtk_mfg_unprepare_clock(mfg); + + mtk_mfg_unbind_device_resource(mfg); +} diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.h b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.h new file mode 100644 index 000000000000..3c1e9cad1284 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_mfgsys.h @@ -0,0 +1,66 @@ +/* +* Copyright (c) 2014 MediaTek Inc. +* Author: Chiawen Lee +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +*/ + +#ifndef MT8173_MFGSYS_H +#define MT8173_MFGSYS_H + +#include + +/* unit ms, timeout interval for DVFS detection */ +#define MTK_DVFS_SWITCH_INTERVAL 300 + +#define ENABLE_MTK_MFG_DEBUG 0 + +#if ENABLE_MTK_MFG_DEBUG +#define mtk_mfg_debug(fmt, args...) pr_info("[MFG]" fmt, ##args) +#else +#define mtk_mfg_debug(fmt, args...) do { } while (0) +#endif + +struct mtk_mfg { + struct device *dev; + + struct clk **top_clk; + void __iomem *reg_base; + + resource_size_t rgx_start; + resource_size_t rgx_size; + int rgx_irq; + + /* mutex protect for set power state */ + struct mutex set_power_state; + + /* for gpu device freq/volt update */ + struct regulator *vgpu; + struct clk *mmpll; + struct clk *top_mfg; + struct clk *top_mmpll; + struct clk *clk26m; + +#if (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + struct thermal_zone_device *tz; +#endif +}; + +struct mtk_mfg *mtk_mfg_create(struct device *dev); +void mtk_mfg_destroy(struct mtk_mfg *mfg); + +int mtk_mfg_enable(struct mtk_mfg *mfg); +void mtk_mfg_disable(struct mtk_mfg *mfg); + +int mtk_mfg_freq_set(struct mtk_mfg *mfg, unsigned long freq); +int mtk_mfg_volt_set(struct mtk_mfg *mfg, int volt); + +#endif /* MT8173_MFGSYS_H*/ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_sysconfig.c b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_sysconfig.c new file mode 100644 index 000000000000..966212bfb769 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/mt8173_sysconfig.c @@ -0,0 +1,545 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_DEVFREQ_THERMAL) +#include +#endif + +#include "physheap.h" +#include "pvrsrv_device.h" +#include "rgxdevice.h" +#include "syscommon.h" +#if defined(SUPPORT_ION) +#include "ion_support.h" +#endif + +#include "mt8173_mfgsys.h" + +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS 10 +#define RGX_HW_CORE_CLOCK_SPEED 395000000 + +/* Setup RGX specific timing data */ +static RGX_TIMING_INFORMATION gsRGXTimingInfo = { + .ui32CoreClockSpeed = RGX_HW_CORE_CLOCK_SPEED, + .bEnableActivePM = IMG_TRUE, + .ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS, + .bEnableRDPowIsland = IMG_TRUE, +}; + +static RGX_DATA gsRGXData = { + .psRGXTimingInfo = &gsRGXTimingInfo, +}; + +static PVRSRV_DEVICE_CONFIG gsDevice; + +typedef struct +{ + IMG_UINT32 ui32IRQ; + PFN_LISR pfnLISR; + void *pvLISRData; +} LISR_WRAPPER_DATA; + +static irqreturn_t MTKLISRWrapper(int iIrq, void *pvData) +{ + LISR_WRAPPER_DATA *psWrapperData = pvData; + + if (psWrapperData->pfnLISR(psWrapperData->pvLISRData)) + { + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +/* + * CPU to Device physical address translation + */ +static +void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; + } +} + +/* + * Device to CPU physical address translation + */ +static +void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; + } +} + +static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = { + .pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr, +}; + +static PHYS_HEAP_REGION gsPhysHeapRegion = { + .sStartAddr.uiAddr = 0, + .sCardBase.uiAddr = 0, + .uiSize = 0, + .hPrivData = NULL, +}; + +static PHYS_HEAP_CONFIG gsPhysHeapConfig = { + .ui32PhysHeapID = 0, + .pszPDumpMemspaceName = "SYSMEM", + .eType = PHYS_HEAP_TYPE_UMA, + .psMemFuncs = &gsPhysHeapFuncs, + .pasRegions = &gsPhysHeapRegion, + .ui32NumOfRegions = 1, + .hPrivData = NULL, +}; + +static PVRSRV_ERROR MTKSysDevPrePowerState( + IMG_HANDLE hSysData, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + struct mtk_mfg *mfg = hSysData; + + mtk_mfg_debug("MTKSysDevPrePowerState (%d->%d), bForced = %d\n", + eCurrentPowerState, eNewPowerState, bForced); + + mutex_lock(&mfg->set_power_state); + + if ((PVRSRV_DEV_POWER_STATE_OFF == eNewPowerState) && + (PVRSRV_DEV_POWER_STATE_ON == eCurrentPowerState)) + mtk_mfg_disable(mfg); + + mutex_unlock(&mfg->set_power_state); + return PVRSRV_OK; +} + +static PVRSRV_ERROR MTKSysDevPostPowerState( + IMG_HANDLE hSysData, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + struct mtk_mfg *mfg = hSysData; + PVRSRV_ERROR ret; + + mtk_mfg_debug("MTKSysDevPostPowerState (%d->%d)\n", + eCurrentPowerState, eNewPowerState); + + mutex_lock(&mfg->set_power_state); + + if ((PVRSRV_DEV_POWER_STATE_ON == eNewPowerState) && + (PVRSRV_DEV_POWER_STATE_OFF == eCurrentPowerState)) { + if (mtk_mfg_enable(mfg)) { + ret = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; + goto done; + } + } + + ret = PVRSRV_OK; +done: + mutex_unlock(&mfg->set_power_state); + + return ret; +} + +#ifdef SUPPORT_LINUX_DVFS + +#define FALLBACK_STATIC_TEMPERATURE 65000 + +/* Temperatures on power over-temp-and-voltage curve (C) */ +static const int vt_temperatures[] = { 25, 45, 65, 85, 105 }; + +/* Voltages on power over-temp-and-voltage curve (mV) */ +static const int vt_voltages[] = { 900, 1000, 1130 }; + +#define POWER_TABLE_NUM_TEMP ARRAY_SIZE(vt_temperatures) +#define POWER_TABLE_NUM_VOLT ARRAY_SIZE(vt_voltages) + +static const unsigned int +power_table[POWER_TABLE_NUM_VOLT][POWER_TABLE_NUM_TEMP] = { + /* 25 45 65 85 105 */ + { 14540, 35490, 60420, 120690, 230000 }, /* 900 mV */ + { 21570, 41910, 82380, 159140, 298620 }, /* 1000 mV */ + { 32320, 72950, 111320, 209290, 382700 }, /* 1130 mV */ +}; + +/** Frequency and Power in Khz and mW respectively */ +static const int f_range[] = {253500, 299000, 396500, 455000, 494000, 598000}; +static const IMG_UINT32 max_dynamic_power[] = {612, 722, 957, 1100, 1194, 1445}; + +#if defined(CONFIG_DEVFREQ_THERMAL) +static u32 interpolate(int value, const int *x, const unsigned int *y, int len) +{ + u64 tmp64; + u32 dx; + u32 dy; + int i, ret; + + if (value <= x[0]) + return y[0]; + if (value >= x[len - 1]) + return y[len - 1]; + + for (i = 1; i < len - 1; i++) { + /* If value is identical, no need to interpolate */ + if (value == x[i]) + return y[i]; + if (value < x[i]) + break; + } + + /* Linear interpolation between the two (x,y) points */ + dy = y[i] - y[i - 1]; + dx = x[i] - x[i - 1]; + + tmp64 = value - x[i - 1]; + tmp64 *= dy; + do_div(tmp64, dx); + ret = y[i - 1] + tmp64; + + return ret; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +static unsigned long mtk_mfg_get_static_power(struct devfreq *df, + unsigned long voltage) +#else +static unsigned long mtk_mfg_get_static_power(unsigned long voltage) +#endif +{ + struct mtk_mfg *mfg = gsDevice.hSysData; + struct thermal_zone_device *tz = mfg->tz; + unsigned long power; +#if !defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) + unsigned long temperature = FALLBACK_STATIC_TEMPERATURE; +#else + int temperature = FALLBACK_STATIC_TEMPERATURE; +#endif + int low_idx = 0, high_idx = POWER_TABLE_NUM_VOLT - 1; + int i; + + if (!tz) + return 0; + + if (tz->ops->get_temp(tz, &temperature)) + dev_warn(mfg->dev, "Failed to read temperature\n"); + do_div(temperature, 1000); + + for (i = 0; i < POWER_TABLE_NUM_VOLT; i++) { + if (voltage <= vt_voltages[POWER_TABLE_NUM_VOLT - 1 - i]) + high_idx = POWER_TABLE_NUM_VOLT - 1 - i; + + if (voltage >= vt_voltages[i]) + low_idx = i; + } + + if (low_idx == high_idx) { + power = interpolate(temperature, + vt_temperatures, + &power_table[low_idx][0], + POWER_TABLE_NUM_TEMP); + } else { + unsigned long dvt = + vt_voltages[high_idx] - vt_voltages[low_idx]; + unsigned long power1, power2; + + power1 = interpolate(temperature, + vt_temperatures, + &power_table[high_idx][0], + POWER_TABLE_NUM_TEMP); + + power2 = interpolate(temperature, + vt_temperatures, + &power_table[low_idx][0], + POWER_TABLE_NUM_TEMP); + + power = (power1 - power2) * (voltage - vt_voltages[low_idx]); + do_div(power, dvt); + power += power2; + } + + /* convert to mw */ + do_div(power, 1000); + + mtk_mfg_debug("mtk_mfg_get_static_power: %lu at Temperature %d\n", + power, temperature); + return power; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +static unsigned long mtk_mfg_get_dynamic_power(struct devfreq *df, + unsigned long freq, + unsigned long voltage) +#else +static unsigned long mtk_mfg_get_dynamic_power(unsigned long freq, + unsigned long voltage) +#endif +{ + #define NUM_RANGE ARRAY_SIZE(f_range) + /** Frequency and Power in Khz and mW respectively */ + IMG_INT32 i, low_idx = 0, high_idx = NUM_RANGE - 1; + IMG_UINT32 power; + + for (i = 0; i < NUM_RANGE; i++) { + if (freq <= f_range[NUM_RANGE - 1 - i]) + high_idx = NUM_RANGE - 1 - i; + + if (freq >= f_range[i]) + low_idx = i; + } + + if (low_idx == high_idx) { + power = max_dynamic_power[low_idx]; + } else { + IMG_UINT32 f_interval = f_range[high_idx] - f_range[low_idx]; + IMG_UINT32 p_interval = max_dynamic_power[high_idx] - + max_dynamic_power[low_idx]; + + power = p_interval * (freq - f_range[low_idx]); + do_div(power, f_interval); + power += max_dynamic_power[low_idx]; + } + + power = (IMG_UINT32)div_u64((IMG_UINT64)power * voltage * voltage, + 1000000UL); + + return power; + #undef NUM_RANGE +} + +static struct devfreq_cooling_power sPowerOps = { + .get_static_power = mtk_mfg_get_static_power, + .get_dynamic_power = mtk_mfg_get_dynamic_power, +}; +#endif + +static void SetFrequency(IMG_UINT32 freq) +{ + struct mtk_mfg *mfg = gsDevice.hSysData; + + /* freq is in Hz */ + mtk_mfg_freq_set(mfg, freq); +} + +static void SetVoltage(IMG_UINT32 volt) +{ + struct mtk_mfg *mfg = gsDevice.hSysData; + + mtk_mfg_volt_set(mfg, volt); +} +#endif + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + struct device *dev = pvOSDevice; + struct mtk_mfg *mfg; + + if (gsDevice.pvOSDevice) + { + return PVRSRV_ERROR_INVALID_DEVICE; + } + + mfg = mtk_mfg_create(dev); + if (IS_ERR(mfg)) { + if (PTR_ERR(mfg) == -EPROBE_DEFER) + return PVRSRV_ERROR_PROBE_DEFER; + else + return PVRSRV_ERROR_INIT_FAILURE; + } + + dma_set_mask(dev, DMA_BIT_MASK(33)); + + /* Make sure everything we don't care about is set to 0 */ + memset(&gsDevice, 0, sizeof(gsDevice)); + + /* Setup RGX device */ + gsDevice.pvOSDevice = pvOSDevice; + gsDevice.pszName = "mt8173"; + gsDevice.pszVersion = NULL; + + /* Device's physical heaps */ + gsDevice.pasPhysHeaps = &gsPhysHeapConfig; + gsDevice.ui32PhysHeapCount = 1; + + gsDevice.ui32IRQ = mfg->rgx_irq; + + gsDevice.sRegsCpuPBase.uiAddr = mfg->rgx_start; + gsDevice.ui32RegsSize = mfg->rgx_size; + +#ifdef SUPPORT_LINUX_DVFS + gsDevice.sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; + gsDevice.sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; + gsDevice.sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; + gsDevice.sDVFS.sDVFSDeviceCfg.ui32PollMs = MTK_DVFS_SWITCH_INTERVAL; +#if defined(CONFIG_DEVFREQ_THERMAL) + gsDevice.sDVFS.sDVFSDeviceCfg.psPowerOps = &sPowerOps; +#endif + + gsDevice.sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; + gsDevice.sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; +#endif + + /* Device's physical heap IDs */ + gsDevice.aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = 0; + gsDevice.aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = 0; + gsDevice.aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = 0; + + /* power management on HW system */ + gsDevice.pfnPrePowerState = MTKSysDevPrePowerState; + gsDevice.pfnPostPowerState = MTKSysDevPostPowerState; + + /* clock frequency */ + gsDevice.pfnClockFreqGet = NULL; + + gsDevice.hDevData = &gsRGXData; + gsDevice.hSysData = mfg; + + gsDevice.bHasFBCDCVersion31 = IMG_FALSE; + gsDevice.bDevicePA0IsValid = IMG_FALSE; + + *ppsDevConfig = &gsDevice; + +#if defined(SUPPORT_ION) + IonInit(NULL); +#endif + + return PVRSRV_OK; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + struct mtk_mfg *mfg = psDevConfig->hSysData; + +#if defined(SUPPORT_ION) + IonDeinit(); +#endif + + mtk_mfg_destroy(mfg); + + psDevConfig->pvOSDevice = NULL; +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + LISR_WRAPPER_DATA *psWrapperData; + + PVR_UNREFERENCED_PARAMETER(hSysData); + + psWrapperData = kmalloc(sizeof(*psWrapperData), GFP_KERNEL); + if (!psWrapperData) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psWrapperData->ui32IRQ = ui32IRQ; + psWrapperData->pfnLISR = pfnLISR; + psWrapperData->pvLISRData = pvData; + + if (request_irq(ui32IRQ, MTKLISRWrapper, IRQF_TRIGGER_LOW, pszName, + psWrapperData)) + { + kfree(psWrapperData); + + return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER; + } + + *phLISRData = (IMG_HANDLE) psWrapperData; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + LISR_WRAPPER_DATA *psWrapperData = hLISRData; + + free_irq(psWrapperData->ui32IRQ, psWrapperData); + + OSFreeMem(psWrapperData); + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/sysinfo.h b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/sysinfo.h new file mode 100644 index 000000000000..1ecf1ca4cb6f --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/mt8173/sysinfo.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + + + +/*!< System specific poll/timeout details */ +#define MAX_HW_TIME_US (1000000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (20000) + +#define SYS_RGX_OF_COMPATIBLE "mediatek,mt8173-gpu" + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/Kbuild.mk new file mode 100644 index 000000000000..9c4304344626 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/Kbuild.mk @@ -0,0 +1,57 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +PVRSRVKM_NAME = $(PVRSRV_MODNAME) + +$(PVRSRVKM_NAME)-y += \ + services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \ + services/system/rogue/common/env/linux/dma_support.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/rogue/common/vmm_type_$(VMM_TYPE).o + +ifeq ($(PLATO_SYSTEM_PDUMP),1) +ccflags-y += -I$(TOP)/services/system/rogue/$(PVR_SYSTEM) -I$(TOP)/services/system/rogue/$(PVR_SYSTEM) +endif + +ccflags-y += \ + -I$(TOP)/services/system/rogue/common/env/linux -I$(TOP)/services/system/rogue/common/env/linux diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.c b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.c new file mode 100644 index 000000000000..6ddbd065f231 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.c @@ -0,0 +1,778 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvrsrv_device.h" +#include "pvrsrv_memallocflags.h" +#include "syscommon.h" +#include "power.h" +#include "sysinfo.h" +#include "sysconfig.h" +#include "physheap.h" +#include "pci_support.h" +#include "interrupt_support.h" +#include "plato_drv.h" +#include + +#define PLATO_HAS_NON_MAPPABLE(sys) (sys->pdata->has_nonmappable == true) + +static struct plato_debug_register plato_noc_regs[] = { + {"NOC Offset 0x00", 0x00, 0}, + {"NOC Offset 0x04", 0x04, 0}, + {"NOC Offset 0x08", 0x08, 0}, + {"NOC Offset 0x0C", 0x0C, 0}, + {"NOC Offset 0x10", 0x10, 0}, + {"NOC Offset 0x14", 0x14, 0}, + {"NOC Offset 0x18", 0x18, 0}, + {"NOC Offset 0x1C", 0x1C, 0}, + {"NOC Offset 0x50", 0x50, 0}, + {"NOC Offset 0x54", 0x54, 0}, + {"NOC Offset 0x58", 0x58, 0}, + {"DDR A Ctrl", SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET, 0}, + {"DDR A Data", SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET, 0}, + {"DDR A Publ", SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET, 0}, + {"DDR B Ctrl", SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET, 0}, + {"DDR B Data", SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET, 0}, + {"DDR B Publ", SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET, 0}, + {"Display S", SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET, 0}, + {"GPIO 0 S", SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET, 0}, + {"GPIO 1 S", SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET, 0}, + {"GPU S", SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET, 0}, + {"PCI PHY", SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET, 0}, + {"PCI Reg", SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET, 0}, + {"PCI S", SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET, 0}, + {"Periph S", SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET, 0}, + {"Ret Reg", SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET, 0}, + {"Service", SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET, 0}, +}; + +static struct plato_debug_register plato_aon_regs[] = { + {"AON Offset 0x0000", 0x0000, 0}, + {"AON Offset 0x0070", 0x0070, 0}, +}; + +typedef struct _SYS_DATA_ { + struct platform_device *pdev; + struct resource *registers; + struct plato_rogue_platform_data *pdata; +} SYS_DATA; + +typedef struct { + struct device *psDev; + int iInterruptID; + void *pvData; + PFN_LISR pfnLISR; +} LISR_DATA; + +static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData) +{ + return NULL; +} + + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 i = 0; + + PVR_DUMPDEBUG_LOG("------[ Plato System Debug ]------"); + + if (plato_debug_info(psSysData->pdev->dev.parent, &plato_noc_regs[0], &plato_aon_regs[0])) + return PVRSRV_ERROR_INVALID_PARAMS; + + for (i = 0; i < ARRAY_SIZE(plato_noc_regs); i++) + PVR_DUMPDEBUG_LOG("%s: 0x%x", plato_noc_regs[i].description, plato_noc_regs[i].value); + + for (i = 0; i < ARRAY_SIZE(plato_aon_regs); i++) + PVR_DUMPDEBUG_LOG("%s: 0x%x", plato_aon_regs[i].description, plato_aon_regs[i].value); + + return PVRSRV_OK; +} + + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) + +static PVRSRV_ERROR InitLocalHeaps(SYS_DATA *psSysData, + IMG_UINT32 uiHeapIDBase, + PHYS_HEAP_CONFIG *pasPhysHeaps, + IMG_UINT32 uiPhysHeapCount, + IMG_HANDLE hPhysHeapPrivData) +{ + PHYS_HEAP_CONFIG *psPhysHeap; + PHYS_HEAP_REGION *psHeapRegion; + + psPhysHeap = &pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]; + psPhysHeap->ui32PhysHeapID = uiHeapIDBase + PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL; + psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; + psPhysHeap->pszPDumpMemspaceName = "LMA"; + psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs; + psPhysHeap->hPrivData = hPhysHeapPrivData; + psPhysHeap->ui32NumOfRegions = 1; + + if (PLATO_HAS_NON_MAPPABLE(psSysData)) + psPhysHeap->ui32NumOfRegions++; + + psPhysHeap->pasRegions = OSAllocZMem(sizeof(*psPhysHeap->pasRegions) * + psPhysHeap->ui32NumOfRegions); + if (!psPhysHeap->pasRegions) + return PVRSRV_ERROR_OUT_OF_MEMORY; + + /* Configure mappable heap region */ + psHeapRegion = &psPhysHeap->pasRegions[PLATO_LMA_HEAP_REGION_MAPPABLE]; + psHeapRegion->sStartAddr.uiAddr = psSysData->pdata->rogue_heap_mappable.base; + psHeapRegion->sCardBase.uiAddr = psSysData->pdata->rogue_heap_dev_addr; + psHeapRegion->uiSize = psSysData->pdata->rogue_heap_mappable.size; + + PVR_LOG(("Added mappable local memory heap. Base = 0x%016llx, Size=0x%016llx", + psHeapRegion->sCardBase.uiAddr, + psHeapRegion->uiSize)); + + /* Setup non-mappable region if BAR size is less than actual memory size (8GB) */ + if (PLATO_HAS_NON_MAPPABLE(psSysData)) { + psHeapRegion = &psPhysHeap->pasRegions[PLATO_LMA_HEAP_REGION_NONMAPPABLE]; + psHeapRegion->sCardBase.uiAddr = psSysData->pdata->rogue_heap_nonmappable.base; + psHeapRegion->uiSize = psSysData->pdata->rogue_heap_nonmappable.size; + psHeapRegion->sStartAddr.uiAddr = 0; + + PVR_LOG(("Added non-mappable local memory heap. Base = 0x%016llx, Size=0x%016llx", + psHeapRegion->sCardBase.uiAddr, + psHeapRegion->uiSize)); + + PVR_ASSERT(psHeapRegion->uiSize < SYS_DEV_MEM_REGION_SIZE); + } + +#if defined(SUPPORT_PLATO_DISPLAY) + psPhysHeap = &pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL]; + psPhysHeap->ui32PhysHeapID = uiHeapIDBase + PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL; + psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; + psPhysHeap->pszPDumpMemspaceName = "LMA"; + psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs; + psPhysHeap->hPrivData = hPhysHeapPrivData; + psPhysHeap->ui32NumOfRegions = 1; + + psPhysHeap->pasRegions = OSAllocZMem(sizeof(*psPhysHeap->pasRegions)); + if (!psPhysHeap->pasRegions) { + OSFreeMem(pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psHeapRegion = &psPhysHeap->pasRegions[0]; + psHeapRegion->sCardBase.uiAddr = PLATO_DDR_DEV_PHYSICAL_BASE; + psHeapRegion->sStartAddr.uiAddr = psSysData->pdata->pdp_heap.base; + psHeapRegion->uiSize = psSysData->pdata->pdp_heap.size; + + PVR_LOG(("Added PDP heap. Base = 0x%016llx, Size=0x%016llx", + psHeapRegion->sStartAddr.uiAddr, + psHeapRegion->uiSize)); +#endif + + return PVRSRV_OK; +} +#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */ + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static PVRSRV_ERROR InitHostHeaps(SYS_DATA *psSysData, + IMG_UINT32 uiHeapIDBase, + PHYS_HEAP_CONFIG *pasPhysHeaps, + IMG_UINT32 uiPhysHeapCount, + IMG_HANDLE hPhysHeapPrivData) +{ + PHYS_HEAP_CONFIG *psPhysHeap; + + PVR_ASSERT(uiPhysHeapCount == 1); + + psPhysHeap = &pasPhysHeaps[0]; + psPhysHeap->ui32PhysHeapID = uiHeapIDBase; + psPhysHeap->eType = PHYS_HEAP_TYPE_UMA; + psPhysHeap->pszPDumpMemspaceName = "SYSMEM"; + psPhysHeap->psMemFuncs = &gsHostPhysHeapFuncs; + psPhysHeap->hPrivData = hPhysHeapPrivData; + psPhysHeap->ui32NumOfRegions = 1; + + psPhysHeap->pasRegions = OSAllocZMem(sizeof(*psPhysHeap->pasRegions)); + if (!psPhysHeap->pasRegions) + return PVRSRV_ERROR_OUT_OF_MEMORY; + + psPhysHeap->pasRegions[0].sCardBase.uiAddr = PLATO_HOSTRAM_DEV_PHYSICAL_BASE; + + return PVRSRV_OK; +} +#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) */ + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static PVRSRV_ERROR InitHybridHeaps(SYS_DATA *psSysData, + IMG_UINT32 uiHeapIDBase, + PHYS_HEAP_CONFIG *pasPhysHeaps, + IMG_UINT32 uiPhysHeapCount, + IMG_HANDLE hPhysHeapPrivData) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(uiPhysHeapCount >= PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL); + + eError = InitHostHeaps(psSysData, + uiHeapIDBase + PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL, + &pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], 1, + hPhysHeapPrivData); + if (eError != PVRSRV_OK) + return eError; + + /* + * InitLocalHeaps should set up the correct heaps regardless of whether the + * memory configuration is 'local' or 'hybrid'. + */ + eError = InitLocalHeaps(psSysData, uiHeapIDBase, pasPhysHeaps, + uiPhysHeapCount, hPhysHeapPrivData); + if (eError != PVRSRV_OK) { + OSFreeMem(pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL].pasRegions); + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL].pasRegions = NULL; + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL].ui32NumOfRegions = 0; + + return eError; + } + + /* Adjust the pdump memory space names */ + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pszPDumpMemspaceName = "LMA0"; + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL].pszPDumpMemspaceName = "LMA1"; + + return PVRSRV_OK; +} +#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */ + +static PVRSRV_ERROR PhysHeapsCreate(SYS_DATA *psSysData, + PVRSRV_DEVICE_CONFIG *psDevConfig, + PHYS_HEAP_CONFIG **ppasPhysHeapsOut, + IMG_UINT32 *puiPhysHeapCountOut) +{ + static IMG_UINT32 uiHeapIDBase = 0; + IMG_UINT32 uiHeapCount = 1; + PHYS_HEAP_CONFIG *pasPhysHeaps; + PVRSRV_ERROR eError; + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) + uiHeapCount++; +#endif + +#if defined(SUPPORT_PLATO_DISPLAY) + uiHeapCount++; +#endif + + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); + if (!pasPhysHeaps) + return PVRSRV_ERROR_OUT_OF_MEMORY; + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) + eError = InitLocalHeaps(psSysData, uiHeapIDBase, pasPhysHeaps, + uiHeapCount, psDevConfig); + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL].ui32PhysHeapID; +#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) + eError = InitHostHeaps(psSysData, uiHeapIDBase, pasPhysHeaps, + uiHeapCount, psDevConfig); + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; +#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) + eError = InitHybridHeaps(psSysData, uiHeapIDBase, pasPhysHeaps, + uiHeapCount, psDevConfig); + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = +#if defined(SUPPORT_PLATO_DISPLAY) + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL].ui32PhysHeapID; +#else + pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL].ui32PhysHeapID; +#endif +#endif + + if (eError != PVRSRV_OK) { + OSFreeMem(pasPhysHeaps); + return eError; + } + + uiHeapIDBase += uiHeapCount; + + *ppasPhysHeapsOut = pasPhysHeaps; + *puiPhysHeapCountOut = uiHeapCount; + + return PVRSRV_OK; +} + +static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps, + IMG_UINT32 uiPhysHeapCount) +{ + IMG_UINT32 i; + + for (i = 0; i < uiPhysHeapCount; i++) { + OSFreeMem(pasPhysHeaps[i].pasRegions); + pasPhysHeaps[i].pasRegions = NULL; + pasPhysHeaps[i].ui32NumOfRegions = 0; + } + + OSFreeMem(pasPhysHeaps); +} + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static void PlatoLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sStartAddr.uiAddr + + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sCardBase.uiAddr; + + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) { + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sStartAddr.uiAddr + + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sCardBase.uiAddr; + } + } +} + +static void PlatoLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + + /* Optimise common case */ + psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr - + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sCardBase.uiAddr + + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sStartAddr.uiAddr; + + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) { + psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr - + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sCardBase.uiAddr + + psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0].sStartAddr.uiAddr; + } + } +} + +static IMG_UINT32 PlatoLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + + if (!(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) && + (psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32NumOfRegions > 1)) + return PLATO_LMA_HEAP_REGION_NONMAPPABLE; + + return PLATO_LMA_HEAP_REGION_MAPPABLE; +} + +#endif + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static void PlatoSystemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr + PLATO_HOSTRAM_DEV_PHYSICAL_BASE; + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr + PLATO_HOSTRAM_DEV_PHYSICAL_BASE; + } +} + +static void PlatoSystemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr - PLATO_HOSTRAM_DEV_PHYSICAL_BASE); + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr - PLATO_HOSTRAM_DEV_PHYSICAL_BASE); + } +} + +static IMG_UINT32 PlatoSystemGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + + if (!(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) && + !(uiAllocationFlags & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) && + (psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].ui32NumOfRegions > 1)) + return PLATO_LMA_HEAP_REGION_NONMAPPABLE; + + return PLATO_LMA_HEAP_REGION_MAPPABLE; +} +#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */ + +static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice, + SYS_DATA *psSysData, + PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PVRSRV_ERROR eError; + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo)); + if (!psDevConfig) + return PVRSRV_ERROR_OUT_OF_MEMORY; + + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + + /* Set up the RGX timing information */ + psRGXTimingInfo->ui32CoreClockSpeed = plato_core_clock_speed(&psSysData->pdev->dev); + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + + /* Initialize heaps */ + eError = PhysHeapsCreate(psSysData, psDevConfig, &psDevConfig->pasPhysHeaps, + &psDevConfig->ui32PhysHeapCount); + if (eError != PVRSRV_OK) { + OSFreeMem(psDevConfig); + return eError; + } + + psDevConfig->pvOSDevice = pvOSDevice; + psDevConfig->pszName = PLATO_SYSTEM_NAME; + psDevConfig->pszVersion = GetDeviceVersionString(psSysData); + + psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; + psDevConfig->ui32RegsSize = SYS_PLATO_REG_RGX_SIZE; + + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + psDevConfig->bHasNonMappableLocalMemory = PLATO_HAS_NON_MAPPABLE(psSysData); + psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; + + psDevConfig->ui32IRQ = PLATO_INTERRUPT_GPU; + + psDevConfig->hDevData = psRGXData; + psDevConfig->hSysData = psSysData; + + *ppsDevConfigOut = psDevConfig; + + return PVRSRV_OK; +} + +static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if (psDevConfig->pszVersion) + OSFreeMem(psDevConfig->pszVersion); + + PhysHeapsDestroy(psDevConfig->pasPhysHeaps, psDevConfig->ui32PhysHeapCount); + + /* + * The device config, RGX data and RGX timing info are part of the same + * allocation so do only one free. + */ + OSFreeMem(psDevConfig); +} + +static PVRSRV_ERROR PlatoLocalMemoryTest(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + IMG_UINT64 i, j = 0; + IMG_UINT32 tmp = 0; + IMG_UINT32 chunk = sizeof(IMG_UINT32) * 10; + + PHYS_HEAP_REGION *psRegion = + &psDevConfig->pasPhysHeaps[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL].pasRegions[0]; + + IMG_UINT64 ui64TestMemoryBase = psRegion->sStartAddr.uiAddr; + IMG_UINT64 ui64TestMemorySize = psRegion->uiSize; + + PVR_LOG(("%s: Starting Local memory test from 0x%llx to 0x%llx (in CPU space)", + __func__, ui64TestMemoryBase, ui64TestMemoryBase + ui64TestMemorySize)); + + while (j < ui64TestMemorySize) { + IMG_CPU_PHYADDR myPaddr; + IMG_UINT32 *pui32Virt; + + myPaddr.uiAddr = ui64TestMemoryBase + j; + pui32Virt = OSMapPhysToLin(myPaddr, chunk, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + for (i = 0; i < chunk/sizeof(IMG_UINT32); i++) { + *(pui32Virt + i) = 0xdeadbeef; + OSWriteMemoryBarrier(); + tmp = *(pui32Virt + i); + if (tmp != 0xdeadbeef) { + PVR_DPF((PVR_DBG_ERROR, + "Local memory read-write test failed at address=0x%llx: written 0x%x, read 0x%x", + ui64TestMemoryBase + ((i * sizeof(IMG_UINT32)) + j), (IMG_UINT32) 0xdeadbeef, tmp)); + + OSUnMapPhysToLin(pui32Virt, chunk, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + return PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL; + } + } + + OSUnMapPhysToLin(pui32Virt, chunk, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + j += (1024 * 1024 * 500); + } + + PVR_LOG(("Local memory read-write test passed!")); + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + SYS_DATA *psSysData; + IMG_UINT32 uiRegistersSize; + PVRSRV_ERROR eError; + + PVR_ASSERT(pvOSDevice); + + psSysData = OSAllocZMem(sizeof(*psSysData)); + if (psSysData == NULL) + return PVRSRV_ERROR_OUT_OF_MEMORY; + + dma_set_mask(pvOSDevice, DMA_BIT_MASK(40)); + + /* Retrieve platform device and data */ + psSysData->pdev = to_platform_device((struct device *) pvOSDevice); + psSysData->pdata = psSysData->pdev->dev.platform_data; + + /* Enable plato PCI */ + if (plato_enable(psSysData->pdev->dev.parent)) { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device", __func__)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + goto ErrFreeSysData; + } + + psSysData->registers = platform_get_resource_byname(psSysData->pdev, IORESOURCE_MEM, PLATO_ROGUE_RESOURCE_REGS); + if (!psSysData->registers) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get Rogue register information", + __func__)); + eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + goto ErrorDevDisable; + } + + /* Check the address range is large enough. */ + uiRegistersSize = resource_size(psSysData->registers); + if (uiRegistersSize < SYS_PLATO_REG_RGX_SIZE) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Rogue register region isn't big enough (was %08X, required 0x%08x)", + __func__, uiRegistersSize, SYS_PLATO_REG_RGX_SIZE)); + + eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; + goto ErrorDevDisable; + } + +#if !defined(VIRTUAL_PLATFORM) + /* Reserve the rogue registers address range */ + if (!request_mem_region(psSysData->registers->start, + uiRegistersSize, + PVRSRV_MODNAME)) { + PVR_DPF((PVR_DBG_ERROR, "%s: Rogue register memory region not available", __func__)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + goto ErrorDevDisable; + } +#endif + + eError = DeviceConfigCreate(pvOSDevice, psSysData, &psDevConfig); + if (eError != PVRSRV_OK) { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create device config", __func__)); + goto ErrorReleaseMemRegion; + } + + PlatoLocalMemoryTest(psDevConfig); + + *ppsDevConfig = psDevConfig; + + return PVRSRV_OK; + +ErrorReleaseMemRegion: + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); +ErrorDevDisable: + plato_disable(psSysData->pdev->dev.parent); +ErrFreeSysData: + OSFreeMem(psSysData); + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData; + + DeviceConfigDestroy(psDevConfig); + + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); + plato_disable(psSysData->pdev->dev.parent); + + OSFreeMem(psSysData); +} + +static void PlatoInterruptHandler(void *pvData) +{ + LISR_DATA *psLISRData = pvData; + + psLISRData->pfnLISR(psLISRData->pvData); +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + SYS_DATA *psSysData = (SYS_DATA *)hSysData; + LISR_DATA *psLISRData; + PVRSRV_ERROR eError; + + /* Should only accept GPU interrupts through this API */ + if (ui32IRQ != PLATO_INTERRUPT_GPU) { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid %d", __func__, ui32IRQ)); + return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + } + + psLISRData = OSAllocZMem(sizeof(*psLISRData)); + if (!psLISRData) + return PVRSRV_ERROR_OUT_OF_MEMORY; + + psLISRData->pfnLISR = pfnLISR; + psLISRData->pvData = pvData; + psLISRData->iInterruptID = ui32IRQ; + psLISRData->psDev = psSysData->pdev->dev.parent; + + if (plato_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, PlatoInterruptHandler, psLISRData)) { + PVR_DPF((PVR_DBG_ERROR, "%s: plato_set_interrupt_handler() failed", __func__)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_free_data; + } + + if (plato_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID)) { + PVR_DPF((PVR_DBG_ERROR, "%s: plato_enable_interrupt() failed", __func__)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_unset_interrupt_handler; + } + + *phLISRData = psLISRData; + + PVR_LOG(("Installed device LISR %s on IRQ %d", pszName, ui32IRQ)); + + return PVRSRV_OK; + +err_unset_interrupt_handler: + plato_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); +err_free_data: + OSFreeMem(psLISRData); + return eError; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + LISR_DATA *psLISRData = (LISR_DATA *) hLISRData; + int err; + + err = plato_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: plato_enable_interrupt() failed (%d)", __func__, err)); + } + + err = plato_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: plato_set_interrupt_handler() failed (%d)", __func__, err)); + } + + PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID)); + + OSFreeMem(psLISRData); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.h b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.h new file mode 100644 index 000000000000..5ed862c18d89 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysconfig.h @@ -0,0 +1,116 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSCONFIG_H__) +#define __SYSCONFIG_H__ + +#include "pvrsrv_device.h" +#include "rgxdevice.h" +#include "plato_drv.h" + +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) +#define MAX_SYSTEMS 32 + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static void PlatoLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void PlatoLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static IMG_UINT32 PlatoLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags); +#endif +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static void PlatoSystemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void PlatoSystemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static IMG_UINT32 PlatoSystemGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags); + +#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */ + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = { + /* pfnCpuPAddrToDevPAddr */ + PlatoLocalCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ + PlatoLocalDevPAddrToCpuPAddr, + /* pfnGetRegionId */ + PlatoLocalGetRegionId, +}; +#endif + +#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) +static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = { + /* pfnCpuPAddrToDevPAddr */ + PlatoSystemCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ + PlatoSystemDevPAddrToCpuPAddr, + /* pfnGetRegionId */ + PlatoSystemGetRegionId, +}; +#endif + +#if (PLATO_MEMORY_CONFIG != PLATO_MEMORY_LOCAL) && \ + (PLATO_MEMORY_CONFIG != PLATO_MEMORY_HYBRID) && \ + (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) +#error "PLATO_MEMORY_CONFIG not valid" +#endif + +/***************************************************************************** + * system specific data structures + *****************************************************************************/ + +#endif /* !defined(__SYSCONFIG_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysinfo.h b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysinfo.h new file mode 100644 index 000000000000..3d80adffc0d6 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_plato/sysinfo.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +#define SYS_RGX_DEV_VENDOR_ID (0x1AEE) +#define SYS_RGX_DEV_DEVICE_ID (0x0003) +#define SYS_RGX_DEV_NAME "plato_rogue" + +/*!< System specific poll/timeout details */ +#if defined(VIRTUAL_PLATFORM) || defined(EMULATOR) +/* Emulator clock ~600 times slower than HW */ +#define MAX_HW_TIME_US (300000000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1000000) +#else +#define MAX_HW_TIME_US (500000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(100000) +#endif + +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (10000) + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/Kbuild.mk new file mode 100644 index 000000000000..13ed9696b8cf --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/Kbuild.mk @@ -0,0 +1,57 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +PVRSRVKM_NAME = $(PVRSRV_MODNAME) + +$(PVRSRVKM_NAME)-y += \ + services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \ + services/system/rogue/common/env/linux/pci_support.o \ + services/system/rogue/common/env/linux/dma_support.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/rogue/common/vmm_type_$(VMM_TYPE).o + +ccflags-y += \ + -I$(TOP)/services/system/rogue/common/env/linux -I$(TOP)/services/system/rogue/common/env/linux \ + -I$(TOP)/kernel/drivers/staging/imgtec \ + -I$(TOP)/kernel/drivers/staging/imgtec/tc \ + -I$(TOP)/include/rogue/system/rgx_tc -I$(TOP)/include/system/rgx_tc diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysconfig.c b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysconfig.c new file mode 100644 index 000000000000..4a6fff18f08d --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysconfig.c @@ -0,0 +1,896 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "sysinfo.h" +#include "apollo_regs.h" + +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "rgxdevice.h" +#include "syscommon.h" +#include "allocmem.h" +#include "pvr_debug.h" + +#if defined(SUPPORT_ION) +#include PVR_ANDROID_ION_HEADER +#include "ion_support.h" +#include "ion_sys.h" +#endif + +#include "tc_drv.h" + +#include +#include + +/* Must be consecutive and start from 0 */ +#define PHY_HEAP_CARD_GPU 0 +#define PHY_HEAP_CARD_EXT 1 +#define PHY_HEAP_LMA_NUM 2 + +#define PHY_HEAP_SYSTEM PHY_HEAP_LMA_NUM +#define PHY_HEAP_NUM 3 + +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) + +typedef enum _LMA_HEAP_REGION_ID_ +{ + /* + * MAIN region needs to be the one with index 0, because the kernel code + * always uses LMA heap region 0 to allocate MMU pages + */ + REGION_MAIN = 0, + REGION_NUM +} LMA_HEAP_REGION_ID; + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + +/* Dummy DVFS configuration used purely for testing purposes */ + +static const IMG_OPP asOPPTable[] = +{ + { 8, 25000000}, + { 16, 50000000}, + { 32, 75000000}, + { 64, 100000000}, +}; + +#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP)) + +static void SetFrequency(IMG_UINT32 ui32Frequency) +{ + PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency)); +} + +static void SetVoltage(IMG_UINT32 ui32Voltage) +{ + PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage)); +} + +#endif + +static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); + +static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = +{ + .pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr, + .pfnGetRegionId = TCLocalGetRegionId, +}; + +static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static IMG_UINT32 TCHostGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); + +static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = +{ + .pfnCpuPAddrToDevPAddr = TCHostCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr, + .pfnGetRegionId = TCHostGetRegionId, +}; + +typedef struct _SYS_DATA_ SYS_DATA; + +struct _SYS_DATA_ +{ + struct platform_device *pdev; + + struct tc_rogue_platform_data *pdata; + + struct resource *registers; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + struct ion_client *ion_client; + struct ion_handle *ion_rogue_allocation; +#endif +}; + +#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s - TCF Core Revision: %s - TCF Core Target Build ID: %s - PCI Version: %s - Macro Version: %s" +#define FPGA_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ +#define TCF_CORE_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ +#define TCF_CORE_CFG_MAX_LEN 4 /* current longest format: "x" */ +#define PCI_VERSION_MAX_LEN 4 /* current longest format: "x" */ +#define MACRO_VERSION_MAX_LEN 8 /* current longest format: "x.yz" */ + +static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData) +{ + int err; + char str_fpga_rev[FPGA_REV_MAX_LEN]={0}; + char str_tcf_core_rev[TCF_CORE_REV_MAX_LEN]={0}; + char str_tcf_core_target_build_id[TCF_CORE_CFG_MAX_LEN]={0}; + char str_pci_ver[PCI_VERSION_MAX_LEN]={0}; + char str_macro_ver[MACRO_VERSION_MAX_LEN]={0}; + + IMG_CHAR *pszVersion; + IMG_UINT32 ui32StringLength; + + err = tc_sys_strings(psSysData->pdev->dev.parent, + str_fpga_rev, sizeof(str_fpga_rev), + str_tcf_core_rev, sizeof(str_tcf_core_rev), + str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id), + str_pci_ver, sizeof(str_pci_ver), + str_macro_ver, sizeof(str_macro_ver)); + if (err) + { + return NULL; + } + + /* Calculate how much space we need to allocate for the string */ + ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING); + ui32StringLength += OSStringLength(str_fpga_rev); + ui32StringLength += OSStringLength(str_tcf_core_rev); + ui32StringLength += OSStringLength(str_tcf_core_target_build_id); + ui32StringLength += OSStringLength(str_pci_ver); + ui32StringLength += OSStringLength(str_macro_ver); + + /* Create the version string */ + pszVersion = OSAllocMem(ui32StringLength * sizeof(IMG_CHAR)); + if (pszVersion) + { + OSSNPrintf(&pszVersion[0], ui32StringLength, + SYSTEM_INFO_FORMAT_STRING, + str_fpga_rev, + str_tcf_core_rev, + str_tcf_core_target_build_id, + str_pci_ver, + str_macro_ver); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to create format string", __func__)); + } + + return pszVersion; +} + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +static SYS_DATA *gpsIonPrivateData; + +PVRSRV_ERROR IonInit(void *pvPrivateData) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYS_DATA *psSysData = pvPrivateData; + gpsIonPrivateData = psSysData; + + psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME); + if (IS_ERR(psSysData->ion_client)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client))); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + goto err_out; + } + /* Allocate the whole rogue ion heap and pass that to services to manage */ + psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0); + if (IS_ERR(psSysData->ion_rogue_allocation)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation))); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + goto err_destroy_client; + + } + + return PVRSRV_OK; +err_destroy_client: + ion_client_destroy(psSysData->ion_client); + psSysData->ion_client = NULL; +err_out: + return eError; +} + +void IonDeinit(void) +{ + SYS_DATA *psSysData = gpsIonPrivateData; + ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation); + psSysData->ion_rogue_allocation = NULL; + ion_client_destroy(psSysData->ion_client); + psSysData->ion_client = NULL; +} + +struct ion_device *IonDevAcquire(void) +{ + return gpsIonPrivateData->pdata->ion_device; +} + +void IonDevRelease(struct ion_device *ion_device) +{ + PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device); +} +#endif /* defined(SUPPORT_ION) */ + +static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psDevPAddr[ui32Idx].uiAddr = + psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base; + } +} + +static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psCpuPAddr[ui32Idx].uiAddr = + psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base; + } +} + +static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) +{ + /* Return first region which is always valid */ + return REGION_MAIN; +} + +static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 uiNumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_ASSERT(sizeof(*psDevPAddr) == sizeof(*psCpuPAddr)); + OSCachedMemCopy(psDevPAddr, psCpuPAddr, uiNumOfAddr * sizeof(*psDevPAddr)); +} + +static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 uiNumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_ASSERT(sizeof(*psCpuPAddr) == sizeof(*psDevPAddr)); + OSCachedMemCopy(psCpuPAddr, psDevPAddr, uiNumOfAddr * sizeof(*psCpuPAddr)); +} + +static IMG_UINT32 TCHostGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) +{ + /* Return first region which is always valid */ + return 0; +} + +static void +FreePhysHeaps(PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 uiNumHeaps) +{ + IMG_UINT32 i; + + for (i = 0; i < uiNumHeaps; i++) + { + OSFreeMem(pasPhysHeaps[i].pasRegions); + } +} + +static PVRSRV_ERROR +InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap, IMG_UINT32 ui32ID, + IMG_UINT64 uiBaseAddr, IMG_UINT64 uiStartAddr, + IMG_UINT64 uiSize, PHYS_HEAP_FUNCTIONS *psFuncs) +{ + PHYS_HEAP_REGION *psRegion; + + psRegion = OSAllocMem(sizeof(*psRegion)); + if (!psRegion) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psRegion->sCardBase.uiAddr = uiBaseAddr; + psRegion->sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr); + psRegion->uiSize = uiSize; + + psPhysHeap->ui32PhysHeapID = ui32ID; + psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; + psPhysHeap->pszPDumpMemspaceName = "LMA"; + psPhysHeap->psMemFuncs = psFuncs; + psPhysHeap->pasRegions = psRegion; + psPhysHeap->ui32NumOfRegions = 1; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + struct tc_rogue_platform_data *pdata = psSysData->pdata; + PHYS_HEAP_FUNCTIONS *psHeapFuncs; + IMG_UINT64 uiLocalCardBase; + PVRSRV_ERROR eError; + + if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + psHeapFuncs = &gsHostPhysHeapFuncs; + uiLocalCardBase = psSysData->pdata->tc_memory_base; + } + else + { + psHeapFuncs = &gsLocalPhysHeapFuncs; + uiLocalCardBase = 0; + } + + eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_GPU], PHY_HEAP_CARD_GPU, + uiLocalCardBase, pdata->rogue_heap_memory_base, + pdata->rogue_heap_memory_size, psHeapFuncs); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_EXT], PHY_HEAP_CARD_EXT, + uiLocalCardBase, pdata->pdp_heap_memory_base, + pdata->pdp_heap_memory_size, psHeapFuncs); + if (eError != PVRSRV_OK) + { + OSFreeMem(pasPhysHeaps[PHY_HEAP_CARD_GPU].pasRegions); + return eError; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + PHYS_HEAP_REGION *psRegion; + + psRegion = OSAllocZMem(sizeof(*psRegion)); + if (!psRegion) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + pasPhysHeaps[PHY_HEAP_SYSTEM].ui32PhysHeapID = PHY_HEAP_SYSTEM; + pasPhysHeaps[PHY_HEAP_SYSTEM].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHY_HEAP_SYSTEM].pszPDumpMemspaceName = "SYSTEM"; + pasPhysHeaps[PHY_HEAP_SYSTEM].pasRegions = psRegion; + pasPhysHeaps[PHY_HEAP_SYSTEM].ui32NumOfRegions = 1; + pasPhysHeaps[PHY_HEAP_SYSTEM].psMemFuncs = &gsHostPhysHeapFuncs; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PhysHeapsInit(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, + void *pvPrivData) +{ + static IMG_UINT32 uiHeapIDBase = 0; + PVRSRV_ERROR eError; + IMG_UINT32 i; + + eError = InitLocalHeaps(psSysData, pasPhysHeaps); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = InitHostHeaps(psSysData, pasPhysHeaps); + if (eError != PVRSRV_OK) + { + FreePhysHeaps(pasPhysHeaps, PHY_HEAP_LMA_NUM); + return eError; + } + + /* Initialise fields that don't change between memory modes. + * Fix up heap IDs. This is needed for multi-testchip systems to + * ensure the heap IDs are unique as this is what Services expects. + */ + for (i = 0; i < PHY_HEAP_NUM; i++) + { + pasPhysHeaps[i].ui32PhysHeapID += uiHeapIDBase; + pasPhysHeaps[i].hPrivData = pvPrivData; + } + + uiHeapIDBase += PHY_HEAP_NUM; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PhysHeapsCreate(const SYS_DATA *psSysData, void *pvPrivData, + PHYS_HEAP_CONFIG **ppasPhysHeapsOut, + IMG_UINT32 *puiPhysHeapCountOut) +{ + PHYS_HEAP_CONFIG *pasPhysHeaps; + PVRSRV_ERROR eError; + + pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * PHY_HEAP_NUM); + if (!pasPhysHeaps) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = PhysHeapsInit(psSysData, pasPhysHeaps, pvPrivData); + if (eError != PVRSRV_OK) + { + OSFreeMem(pasPhysHeaps); + return eError; + } + + *ppasPhysHeapsOut = pasPhysHeaps; + *puiPhysHeapCountOut = PHY_HEAP_NUM; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +DeviceMapPhysHeaps(IMG_UINT32 uiMemMode, PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + const PHYS_HEAP_CONFIG *pasPhysHeaps = psDevConfig->pasPhysHeaps; + IMG_UINT32 *aui32PhysHeapID = psDevConfig->aui32PhysHeapID; + + switch (uiMemMode) + { + case TC_MEMORY_LOCAL: + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = + pasPhysHeaps[PHY_HEAP_CARD_EXT].ui32PhysHeapID; + break; + case TC_MEMORY_HYBRID: + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_SYSTEM].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = + pasPhysHeaps[PHY_HEAP_CARD_EXT].ui32PhysHeapID; + break; + default: + PVR_ASSERT(!"Unsupported memory mode"); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + return PVRSRV_OK; +} + +static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if (psDevConfig->pszVersion) + { + OSFreeMem(psDevConfig->pszVersion); + } + + FreePhysHeaps(psDevConfig->pasPhysHeaps, psDevConfig->ui32PhysHeapCount); + + OSFreeMem(psDevConfig->pasPhysHeaps); + + OSFreeMem(psDevConfig); +} + +static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, + PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiPhysHeapCount; + PVRSRV_ERROR eError; + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo)); + if (!psDevConfig) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRGXData = (RGX_DATA *) IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *) IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData)); + + eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); + if (eError != PVRSRV_OK) + { + goto ErrorFreeDevConfig; + } + + /* Setup RGX specific timing data */ + psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + + /* Setup the device config */ + psDevConfig->pvOSDevice = &psSysData->pdev->dev; + psDevConfig->pszName = "tc"; + psDevConfig->pszVersion = GetDeviceVersionString(psSysData); + + psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; + psDevConfig->ui32RegsSize = resource_size(psSysData->registers); + + psDevConfig->ui32IRQ = TC_INTERRUPT_EXT; + + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + + psDevConfig->pasPhysHeaps = pasPhysHeaps; + psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; + + eError = DeviceMapPhysHeaps(psSysData->pdata->mem_mode, psDevConfig); + if (eError != PVRSRV_OK) + { + goto ErrorDestroyDevConfig; + } + + /* Only required for LMA but having this always set shouldn't be a problem */ + psDevConfig->bDevicePA0IsValid = IMG_TRUE; + + psDevConfig->hDevData = psRGXData; + psDevConfig->hSysData = psSysData; + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + /* Dummy DVFS configuration used purely for testing purposes */ + psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable; + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; +#endif +#if defined(SUPPORT_LINUX_DVFS) + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000; + psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; +#endif + + *ppsDevConfigOut = psDevConfig; + + return PVRSRV_OK; + +ErrorDestroyDevConfig: + DeviceConfigDestroy(psDevConfig); +ErrorFreeDevConfig: + OSFreeMem(psDevConfig); + return eError; +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + SYS_DATA *psSysData; + resource_size_t uiRegistersSize; + PVRSRV_ERROR eError; + int err = 0; + + PVR_ASSERT(pvOSDevice); + + psSysData = OSAllocZMem(sizeof(*psSysData)); + if (psSysData == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psSysData->pdev = to_platform_device((struct device *)pvOSDevice); + psSysData->pdata = psSysData->pdev->dev.platform_data; + + PVR_ASSERT(TC_MEMORY_CONFIG == psSysData->pdata->mem_mode); + + /* + * The device cannot address system memory, so there is no DMA + * limitation. + */ + if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL) + { + dma_set_mask(pvOSDevice, DMA_BIT_MASK(64)); + } + else + { + dma_set_mask(pvOSDevice, DMA_BIT_MASK(32)); + } + + err = tc_enable(psSysData->pdev->dev.parent); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + goto ErrFreeSysData; + } + + psSysData->registers = platform_get_resource_byname(psSysData->pdev, + IORESOURCE_MEM, + "rogue-regs"); + if (!psSysData->registers) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get Rogue register information", + __func__)); + eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + goto ErrorDevDisable; + } + + /* Check the address range is large enough. */ + uiRegistersSize = resource_size(psSysData->registers); + if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)", + __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE)); + + eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; + goto ErrorDevDisable; + } + + /* Reserve the address range */ + if (!request_mem_region(psSysData->registers->start, + resource_size(psSysData->registers), + SYS_RGX_DEV_NAME)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Rogue register memory region not available", + __func__)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + + goto ErrorDevDisable; + } + + eError = DeviceConfigCreate(psSysData, &psDevConfig); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseMemRegion; + } + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + eError = IonInit(psSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__)); + goto ErrorDeviceConfigDestroy; + } +#endif + + *ppsDevConfig = psDevConfig; + + return PVRSRV_OK; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +ErrorDeviceConfigDestroy: + DeviceConfigDestroy(psDevConfig); +#endif +ErrorReleaseMemRegion: + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); +ErrorDevDisable: + tc_disable(psSysData->pdev->dev.parent); +ErrFreeSysData: + OSFreeMem(psSysData); + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + IonDeinit(); +#endif + + DeviceConfigDestroy(psDevConfig); + + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); + tc_disable(psSysData->pdev->dev.parent); + + OSFreeMem(psSysData); +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ +#if defined(TC_APOLLO_TCF5) + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + return PVRSRV_OK; +#else + SYS_DATA *psSysData = psDevConfig->hSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + u32 tmp = 0; + u32 pll; + + PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------"); + + if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll)) + goto err_out; + + if (tmp > 0) + PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp); + PVR_DUMPDEBUG_LOG("PLL status: %x", pll); + +err_out: + return eError; +#endif +} + +typedef struct +{ + struct device *psDev; + int iInterruptID; + void *pvData; + PFN_LISR pfnLISR; +} LISR_DATA; + +static void TCInterruptHandler(void* pvData) +{ + LISR_DATA *psLISRData = pvData; + psLISRData->pfnLISR(psLISRData->pvData); +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + SYS_DATA *psSysData = (SYS_DATA *)hSysData; + LISR_DATA *psLISRData; + PVRSRV_ERROR eError; + int err; + + if (ui32IRQ != TC_INTERRUPT_EXT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ)); + return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + } + + psLISRData = OSAllocZMem(sizeof(*psLISRData)); + if (!psLISRData) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out; + } + + psLISRData->pfnLISR = pfnLISR; + psLISRData->pvData = pvData; + psLISRData->iInterruptID = ui32IRQ; + psLISRData->psDev = psSysData->pdev->dev.parent; + + err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_free_data; + } + + err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_unset_interrupt_handler; + } + + *phLISRData = psLISRData; + eError = PVRSRV_OK; + + PVR_TRACE(("Installed device LISR " IMG_PFN_FMTSPEC " to irq %u", pfnLISR, ui32IRQ)); + +err_out: + return eError; +err_unset_interrupt_handler: + tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); +err_free_data: + OSFreeMem(psLISRData); + goto err_out; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + LISR_DATA *psLISRData = (LISR_DATA *) hLISRData; + int err; + + err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_disable_interrupt() failed (%d)", __func__, err)); + } + + err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); + } + + PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID)); + + OSFreeMem(psLISRData); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysinfo.h b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysinfo.h new file mode 100644 index 000000000000..b71df887b113 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_linux_tc/sysinfo.h @@ -0,0 +1,60 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +/*!< System specific poll/timeout details */ +#if defined(VIRTUAL_PLATFORM) || defined(FPGA) +#define MAX_HW_TIME_US (240000000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000) +#else +#define MAX_HW_TIME_US (500000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) +#endif +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (10000) + +#define SYS_RGX_DEV_NAME "tc_rogue" + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/Kbuild.mk new file mode 100644 index 000000000000..2aa281ca62de --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/Kbuild.mk @@ -0,0 +1,53 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +PVRSRVKM_NAME = $(PVRSRV_MODNAME) + +$(PVRSRVKM_NAME)-y += services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \ + services/system/rogue/common/env/linux/dma_support.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/rogue/common/vmm_type_$(VMM_TYPE).o + +ifeq ($(SUPPORT_ION),1) +$(PVRSRVKM_NAME)-y += services/system/rogue/common/env/linux/ion_support_generic.o +endif diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.c b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.c new file mode 100644 index 000000000000..381505e46dc7 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.c @@ -0,0 +1,350 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv_device.h" +#include "syscommon.h" +#include "vz_vmm_pvz.h" +#include "allocmem.h" +#include "sysinfo.h" +#include "sysconfig.h" +#include "physheap.h" +#if defined(SUPPORT_ION) +#include "ion_support.h" +#endif +#if defined(LINUX) +#include +#endif +#include "rgx_bvnc_defs_km.h" +/* + * In systems that support trusted device address protection, there are three + * physical heaps from which pages should be allocated: + * - one heap for normal allocations + * - one heap for allocations holding META code memory + * - one heap for allocations holding secured DRM data + */ + +#define PHYS_HEAP_IDX_GENERAL 0 +#define PHYS_HEAP_IDX_FW 1 + +#if defined(SUPPORT_TRUSTED_DEVICE) +#define PHYS_HEAP_IDX_TDFWMEM 2 +#define PHYS_HEAP_IDX_TDSECUREBUF 3 +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) +#define PHYS_HEAP_IDX_FW_MEMORY 2 +#endif + +/* + CPU to Device physical address translation +*/ +static +void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; + } + } +} + +/* + Device to CPU physical address translation +*/ +static +void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; + } + } +} + +static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = +{ + /* pfnCpuPAddrToDevPAddr */ + UMAPhysHeapCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ + UMAPhysHeapDevPAddrToCpuPAddr, + /* pfnGetRegionId */ + NULL, +}; + +static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut, + IMG_UINT32 *puiPhysHeapCountOut) +{ + /* + * This function is called during device initialisation, which on Linux, + * means it won't be called concurrently. As such, there's no need to + * protect it with a lock or use an atomic variable. + */ + static IMG_UINT32 ui32HeapIDBase = 0; + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiHeapCount = 2; + +#if defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_DEDICATED_FW_MEMORY) + uiHeapCount += 1; +#endif + + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); + if (!pasPhysHeaps) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID = + ui32HeapIDBase + PHYS_HEAP_IDX_GENERAL; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pszPDumpMemspaceName = "SYSMEM"; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].psMemFuncs = &gsPhysHeapFuncs; + + pasPhysHeaps[PHYS_HEAP_IDX_FW].ui32PhysHeapID = + ui32HeapIDBase + PHYS_HEAP_IDX_FW; + pasPhysHeaps[PHYS_HEAP_IDX_FW].pszPDumpMemspaceName = "SYSMEM_FW"; + pasPhysHeaps[PHYS_HEAP_IDX_FW].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_FW].psMemFuncs = &gsPhysHeapFuncs; + +#if defined(SUPPORT_TRUSTED_DEVICE) + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].ui32PhysHeapID = + ui32HeapIDBase + PHYS_HEAP_IDX_TDFWMEM; + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].pszPDumpMemspaceName = "TDFWMEM"; + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].psMemFuncs = &gsPhysHeapFuncs; + +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].ui32PhysHeapID = + ui32HeapIDBase + PHYS_HEAP_IDX_FW_MEMORY; + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].pszPDumpMemspaceName = "DEDICATEDFWMEM"; + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].psMemFuncs = &gsPhysHeapFuncs; +#endif + + ui32HeapIDBase += uiHeapCount; + + *ppasPhysHeapsOut = pasPhysHeaps; + *puiPhysHeapCountOut = uiHeapCount; + + return PVRSRV_OK; +} + +static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + OSFreeMem(pasPhysHeaps); +} + +static void SysDevFeatureDepInit(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT64 ui64Features) +{ +#if defined(SUPPORT_AXI_ACE_TEST) + if ( ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK) + { + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + }else +#endif + { + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + } +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiPhysHeapCount; + PVRSRV_ERROR eError; + +#if defined(LINUX) + dma_set_mask(pvOSDevice, DMA_BIT_MASK(40)); +#endif + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo)); + if (!psDevConfig) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + + eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount); + if (eError) + { + goto ErrorFreeDevConfig; + } + + /* Setup RGX specific timing data */ + psRGXTimingInfo->ui32CoreClockSpeed = RGX_NOHW_CORE_CLOCK_SPEED; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + +#if defined(SUPPORT_TRUSTED_DEVICE) + psRGXData->bHasTDFWMemPhysHeap = IMG_TRUE; + psRGXData->uiTDFWMemPhysHeapID = + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].ui32PhysHeapID; + +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) + psRGXData->bHasFWMemPhysHeap = IMG_TRUE; + psRGXData->uiFWMemPhysHeapID = PHYS_HEAP_IDX_FW_MEMORY; +#endif + + /* Setup the device config */ + psDevConfig->pvOSDevice = pvOSDevice; + psDevConfig->pszName = "nohw"; + psDevConfig->pszVersion = NULL; + psDevConfig->pfnSysDevFeatureDepInit = SysDevFeatureDepInit; + + /* Device setup information */ + psDevConfig->sRegsCpuPBase.uiAddr = 0x00f00000; + psDevConfig->ui32RegsSize = 0x4000; + psDevConfig->ui32IRQ = 0x00000bad; + + psDevConfig->pasPhysHeaps = pasPhysHeaps; + psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; + + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PHYS_HEAP_IDX_FW].ui32PhysHeapID; + + /* No power management on no HW system */ + psDevConfig->pfnPrePowerState = NULL; + psDevConfig->pfnPostPowerState = NULL; + + psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; + + /* No clock frequency either */ + psDevConfig->pfnClockFreqGet = NULL; + + psDevConfig->hDevData = psRGXData; + + /* Setup other system specific stuff */ +#if defined(SUPPORT_ION) + IonInit(NULL); +#endif + + *ppsDevConfig = psDevConfig; + + return PVRSRV_OK; + +ErrorFreeDevConfig: + OSFreeMem(psDevConfig); + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ +#if defined(SUPPORT_ION) + IonDeinit(); +#endif + + PhysHeapsDestroy(psDevConfig->pasPhysHeaps); + OSFreeMem(psDevConfig); +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + PVR_UNREFERENCED_PARAMETER(hSysData); + PVR_UNREFERENCED_PARAMETER(ui32IRQ); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(pfnLISR); + PVR_UNREFERENCED_PARAMETER(pvData); + PVR_UNREFERENCED_PARAMETER(phLISRData); + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + PVR_UNREFERENCED_PARAMETER(hLISRData); + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (sysconfig.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.h b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.h new file mode 100644 index 000000000000..8da8dc8354b2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysconfig.h @@ -0,0 +1,58 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv_device.h" +#include "rgxdevice.h" + +#if !defined(__SYSCCONFIG_H__) +#define __SYSCCONFIG_H__ + + +#define RGX_NOHW_CORE_CLOCK_SPEED 100000000 +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (0) + +/***************************************************************************** + * system specific data structures + *****************************************************************************/ + +#endif /* __SYSCCONFIG_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysinfo.h b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysinfo.h new file mode 100644 index 000000000000..83587b1857fb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/rogue/rgx_nohw/sysinfo.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +/*!< System specific poll/timeout details */ +#define MAX_HW_TIME_US (500000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (10000) + +#if defined(__linux__) +#define SYS_RGX_DEV_NAME "rgxnohw" +#endif + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/dma_support.c b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/dma_support.c new file mode 100644 index 000000000000..f08ef461fec4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/dma_support.c @@ -0,0 +1,335 @@ +/*************************************************************************/ /*! +@File dma_support.c +@Title System DMA support +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides a contiguous memory allocator (i.e. DMA allocator); + APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined(LINUX) +#include +#include +#endif + +#include "allocmem.h" +#include "dma_support.h" + +#define DMA_MAX_IOREMAP_ENTRIES 8 +static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE; +static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}}; +static IMG_UINT32 gsDmaIoRemapRef[DMA_MAX_IOREMAP_ENTRIES] = {0}; + +/*! +****************************************************************************** + @Function SysDmaAllocMem + + @Description Allocates physically contiguous memory + + @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + ******************************************************************************/ +PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDmaAlloc != NULL && psDmaAlloc->pvOSDevice != NULL) + { +#if defined(LINUX) + psDmaAlloc->pvVirtAddr = + dma_alloc_coherent((struct device *)psDmaAlloc->pvOSDevice, + (size_t) psDmaAlloc->ui64Size, + (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, + GFP_KERNEL); + PVR_LOG_RETURN_IF_FALSE((NULL != psDmaAlloc->pvVirtAddr), "dma_alloc_coherent() failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES); +#else + #error "Provide OS implementation of DMA allocation"; +#endif + } + + return eError; +} + +/*! +****************************************************************************** + @Function SysDmaFreeMem + + @Description Free physically contiguous memory + + @Return void + ******************************************************************************/ +void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc) +{ + if (psDmaAlloc && psDmaAlloc->pvVirtAddr) + { +#if defined(LINUX) + dma_free_coherent((struct device *)psDmaAlloc->pvOSDevice, + (size_t) psDmaAlloc->ui64Size, + psDmaAlloc->pvVirtAddr, + (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr); +#else + #error "Provide OS implementation of DMA deallocation"; +#endif + } +} + +/*! +****************************************************************************** + @Function SysDmaRegisterForIoRemapping + + @Description Registers DMA_ALLOC for manual I/O remapping + + @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + ******************************************************************************/ +PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc) +{ + IMG_UINT32 ui32Idx; + PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS; + + if (psDmaAlloc == NULL || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->sBusAddr.uiAddr == 0) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + else if (psDmaAlloc->pvVirtAddr == NULL) + { + /* Check if an I/O remap entry already exists for this request */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr <= psDmaAlloc->sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr + gsDmaIoRemapArray[ui32Idx].ui64Size >= psDmaAlloc->sBusAddr.uiAddr + psDmaAlloc->ui64Size) + { + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].pvVirtAddr); + break; + } + } + + if (ui32Idx < DMA_MAX_IOREMAP_ENTRIES) + { + IMG_UINT64 ui64Offset; + ui64Offset = psDmaAlloc->sBusAddr.uiAddr - gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr; + psDmaAlloc->pvVirtAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr + (uintptr_t)ui64Offset; + gsDmaIoRemapRef[ui32Idx] += 1; + return PVRSRV_OK; + } + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Check if there is a free I/O remap table entry for this request */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0) + { + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL); + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0); + break; + } + } + + if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES) + { + return eError; + } + + gsDmaIoRemapArray[ui32Idx].ui64Size = psDmaAlloc->ui64Size; + gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr; + gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr; + gsDmaIoRemapRef[ui32Idx] += 1; + + PVR_LOG(("DMA: register I/O remap: VA: 0x%p, PA: 0x%llx, Size: 0x%llx", + psDmaAlloc->pvVirtAddr, + psDmaAlloc->sBusAddr.uiAddr, + psDmaAlloc->ui64Size)); + + gbEnableDmaIoRemapping = IMG_TRUE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function SysDmaDeregisterForIoRemapping + + @Description Deregisters DMA_ALLOC from manual I/O remapping + + @Return void + ******************************************************************************/ +void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc) +{ + IMG_UINT32 ui32Idx; + + if (psDmaAlloc == NULL || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->pvVirtAddr == NULL || + psDmaAlloc->sBusAddr.uiAddr == 0) + { + return; + } + + /* Remove specified entry from the list of I/O remap entries */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr <= psDmaAlloc->sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr + gsDmaIoRemapArray[ui32Idx].ui64Size >= psDmaAlloc->sBusAddr.uiAddr + psDmaAlloc->ui64Size) + { + if (! --gsDmaIoRemapRef[ui32Idx]) + { + PVR_LOG(("DMA: deregister I/O remap: VA: 0x%p, PA: 0x%llx, Size: 0x%llx", + gsDmaIoRemapArray[ui32Idx].pvVirtAddr, + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr, + gsDmaIoRemapArray[ui32Idx].ui64Size)); + + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0; + gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL; + gsDmaIoRemapArray[ui32Idx].ui64Size = 0; + } + + break; + } + } + + /* Check if no other I/O remap entries exists for remapping */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL) + { + break; + } + } + + if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES) + { + /* No entries found so disable remapping */ + gbEnableDmaIoRemapping = IMG_FALSE; + } +} + +/*! +****************************************************************************** + @Function SysDmaDevPAddrToCpuVAddr + + @Description Maps a DMA_ALLOC physical address to CPU virtual address + + @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL + ******************************************************************************/ +IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size) +{ + IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL; + DMA_ALLOC *psHeapDmaAlloc; + IMG_UINT32 ui32Idx; + + if (gbEnableDmaIoRemapping == IMG_FALSE) + { + return pvDMAVirtAddr; + } + + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; + if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr) + { + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; + IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr; + + if (uiOffset < uiSpan) + { + PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan); + pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: remap: PA: 0x%llx => VA: 0x%p", + uiAddr, pvDMAVirtAddr)); + + break; + } + } + } + + return pvDMAVirtAddr; +} + +/*! +****************************************************************************** + @Function SysDmaCpuVAddrToDevPAddr + + @Description Maps a DMA_ALLOC CPU virtual address to physical address + + @Return Non-zero value on success. Otherwise, a 0 + ******************************************************************************/ +IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr) +{ + IMG_UINT64 uiAddr = 0; + DMA_ALLOC *psHeapDmaAlloc; + IMG_UINT32 ui32Idx; + + if (gbEnableDmaIoRemapping == IMG_FALSE) + { + return uiAddr; + } + + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; + if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr) + { + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; + IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr; + + if (uiOffset < uiSpan) + { + uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: remap: VA: 0x%p => PA: 0x%llx", + pvDMAVirtAddr, uiAddr)); + + break; + } + } + } + + return uiAddr; +} + +/****************************************************************************** + End of file (dma_support.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/interrupt_support.c b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/interrupt_support.c new file mode 100644 index 000000000000..c67d45352050 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/interrupt_support.c @@ -0,0 +1,151 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "pvr_debug.h" +#include "allocmem.h" +#include "interrupt_support.h" + +typedef struct LISR_DATA_TAG +{ + IMG_UINT32 ui32IRQ; + PFN_SYS_LISR pfnLISR; + void *pvData; +} LISR_DATA; + +static irqreturn_t SystemISRWrapper(int irq, void *dev_id) +{ + LISR_DATA *psLISRData = (LISR_DATA *)dev_id; + + PVR_UNREFERENCED_PARAMETER(irq); + + if (psLISRData) + { + if (psLISRData->pfnLISR(psLISRData->pvData)) + { + return IRQ_HANDLED; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __func__)); + } + + return IRQ_NONE; +} + +PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszDevName, + PFN_SYS_LISR pfnLISR, + void *pvData, + IMG_UINT32 ui32Flags) +{ + LISR_DATA *psLISRData; + unsigned long ulIRQFlags = 0; + + if (pfnLISR == NULL || pvData == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Flags & ~SYS_IRQ_FLAG_MASK) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK) + { + case SYS_IRQ_FLAG_TRIGGER_DEFAULT: + break; + case SYS_IRQ_FLAG_TRIGGER_LOW: + ulIRQFlags |= IRQF_TRIGGER_LOW; + break; + case SYS_IRQ_FLAG_TRIGGER_HIGH: + ulIRQFlags |= IRQF_TRIGGER_HIGH; + break; + default: + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Flags & SYS_IRQ_FLAG_SHARED) + { + ulIRQFlags |= IRQF_SHARED; + } + + psLISRData = OSAllocMem(sizeof(*psLISRData)); + if (psLISRData == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psLISRData->ui32IRQ = ui32IRQ; + psLISRData->pfnLISR = pfnLISR; + psLISRData->pvData = pvData; + + if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData)) + { + OSFreeMem(psLISRData); + + return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER; + } + + *phLISR = (IMG_HANDLE)psLISRData; + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR) +{ + LISR_DATA *psLISRData = (LISR_DATA *)hLISR; + + if (psLISRData == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + free_irq(psLISRData->ui32IRQ, psLISRData); + + OSFreeMem(psLISRData); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/pci_support.c b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/pci_support.c new file mode 100644 index 000000000000..c3bbcc46cb2c --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/env/linux/pci_support.c @@ -0,0 +1,726 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include + +#if defined(CONFIG_MTRR) +#include +#endif + +#include "pci_support.h" +#include "allocmem.h" + +typedef struct _PVR_PCI_DEV_TAG +{ + struct pci_dev *psPCIDev; + HOST_PCI_INIT_FLAGS ePCIFlags; + IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + int iMTRR[DEVICE_COUNT_RESOURCE]; +#endif +} PVR_PCI_DEV; + +/*************************************************************************/ /*! +@Function OSPCISetDev +@Description Set a PCI device for subsequent use. +@Input pvPCICookie Pointer to OS specific PCI structure +@Input eFlags Flags +@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle +*/ /**************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) +{ + int err; + IMG_UINT32 i; + PVR_PCI_DEV *psPVRPCI; + + psPVRPCI = OSAllocMem(sizeof(*psPVRPCI)); + if (psPVRPCI == NULL) + { + printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n"); + return NULL; + } + + psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie; + psPVRPCI->ePCIFlags = eFlags; + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err); + OSFreeMem(psPVRPCI); + return NULL; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_set_master(psPVRPCI->psPCIDev); + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { +#if defined(CONFIG_PCI_MSI) + err = pci_enable_msi(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err); + psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */ + } +#else + printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel"); +#endif + } + + /* Initialise the PCI resource and MTRR tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + psPVRPCI->iMTRR[i] = -1; +#endif + } + + return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; +} + +/*************************************************************************/ /*! +@Function OSPCIAcquireDev +@Description Acquire a PCI device for subsequent use. +@Input ui16VendorID Vendor PCI ID +@Input ui16DeviceID Device PCI ID +@Input eFlags Flags +@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle +*/ /**************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, + IMG_UINT16 ui16DeviceID, + HOST_PCI_INIT_FLAGS eFlags) +{ + struct pci_dev *psPCIDev; + + psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL); + if (psPCIDev == NULL) + { + return NULL; + } + + return OSPCISetDev((void *)psPCIDev, eFlags); +} + +/*************************************************************************/ /*! +@Function OSPCIIRQ +@Description Get the interrupt number for the device. +@Input hPVRPCI PCI device handle +@Output pui16DeviceID Pointer to where the interrupt number + should be returned +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (pui32IRQ == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32IRQ = psPVRPCI->psPCIDev->irq; + + return PVRSRV_OK; +} + +/* Functions supported by OSPCIAddrRangeFunc */ +enum HOST_PCI_ADDR_RANGE_FUNC +{ + HOST_PCI_ADDR_RANGE_FUNC_LEN, + HOST_PCI_ADDR_RANGE_FUNC_START, + HOST_PCI_ADDR_RANGE_FUNC_END, + HOST_PCI_ADDR_RANGE_FUNC_REQUEST, + HOST_PCI_ADDR_RANGE_FUNC_RELEASE +}; + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeFunc +@Description Internal support function for various address range related + functions +@Input eFunc Function to perform +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Function dependent value +*/ /**************************************************************************/ +static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, + PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (ui32Index >= DEVICE_COUNT_RESOURCE) + { + printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range"); + return 0; + } + + switch (eFunc) + { + case HOST_PCI_ADDR_RANGE_FUNC_LEN: + { + return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_START: + { + return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_END: + { + return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: + { + int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); + if (err != 0) + { + printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err); + return 0; + } + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; + return 1; + } + case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: + { + if (psPVRPCI->abPCIResourceInUse[ui32Index]) + { + pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; + } + return 1; + } + default: + { + printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function"); + break; + } + } + + return 0; +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeLen +@Description Returns length of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Length of address range or 0 if no + such range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeStart +@Description Returns the start of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Start of address range or 0 if no + such range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeEnd +@Description Returns the end of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 End of address range or 0 if no such + range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIRequestAddrRange +@Description Request a given address range index for subsequent use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + else + { + return PVRSRV_OK; + } +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseAddrRange +@Description Release a given address range that is no longer being used +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + else + { + return PVRSRV_OK; + } +} + +/*************************************************************************/ /*! +@Function OSPCIRequestAddrRegion +@Description Request a given region from an address range for subsequent use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Input uiOffset Offset into the address range that forms + the start of the region +@Input uiLength Length of the region +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index, + IMG_UINT64 uiOffset, + IMG_UINT64 uiLength) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start; + resource_size_t end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + + /* Check that the requested region is valid */ + if ((start + uiOffset + uiLength - 1) > end) + { + return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; + } + + if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) + { + if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) + { + return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + } + } + else + { + if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) + { + return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + } + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseAddrRegion +@Description Release a given region, from an address range, that is no + longer in use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Input ui32Offset Offset into the address range that forms + the start of the region +@Input ui32Length Length of the region +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index, + IMG_UINT64 uiOffset, + IMG_UINT64 uiLength) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start; + resource_size_t end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + + /* Check that the region is valid */ + if ((start + uiOffset + uiLength - 1) > end) + { + return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; + } + + if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) + { + release_region(start + uiOffset, uiLength); + } + else + { + release_mem_region(start + uiOffset, uiLength); + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseDev +@Description Release a PCI device that is no longer being used +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; + } + } + +#if defined(CONFIG_PCI_MSI) + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_disable_msi(psPVRPCI->psPCIDev); + } +#endif + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_clear_master(psPVRPCI->psPCIDev); + } + + pci_disable_device(psPVRPCI->psPCIDev); + + OSFreeMem(psPVRPCI); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCISuspendDev +@Description Prepare PCI device to be turned off by power management +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + int err; + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + } + } + + err = pci_save_state(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + pci_disable_device(psPVRPCI->psPCIDev); + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND)); + switch (err) + { + case 0: + break; + case -EIO: + printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM"); + break; + case -EINVAL: + printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state"); + break; + default: + printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err); + break; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIResumeDev +@Description Prepare a PCI device to be resumed by power management +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int err; + int i; + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON)); + switch (err) + { + case 0: + break; + case -EIO: + printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM"); + break; + case -EINVAL: + printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state"); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + default: + printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + + pci_restore_state(psPVRPCI->psPCIDev); + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + pci_set_master(psPVRPCI->psPCIDev); + + /* Restore the PCI resource tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME); + if (err != 0) + { + printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err); + } + } + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIGetVendorDeviceIDs +@Description Retrieve PCI vendor ID and device ID. +@Input hPVRPCI PCI device handle +@Output pui16VendorID Vendor ID +@Output pui16DeviceID Device ID +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT16 *pui16VendorID, + IMG_UINT16 *pui16DeviceID) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + struct pci_dev *psPCIDev; + + if (psPVRPCI == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psPCIDev = psPVRPCI->psPCIDev; + if (psPCIDev == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui16VendorID = psPCIDev->vendor; + *pui16DeviceID = psPCIDev->device; + + return PVRSRV_OK; +} + +#if defined(CONFIG_MTRR) + +/*************************************************************************/ /*! +@Function OSPCIClearResourceMTRRs +@Description Clear any BIOS-configured MTRRs for a PCI memory region +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start, end; + int res; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + res = arch_io_reserve_memtype_wc(start, end - start); + if (res) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } +#endif + res = arch_phys_wc_add(start, end - start); + if (res < 0) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + arch_io_free_memtype_wc(start, end - start); +#endif + + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + psPVRPCI->iMTRR[ui32Index] = res; +#else + + res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + res = mtrr_del(res, start, end - start); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + /* Workaround for overlapping MTRRs. */ + { + IMG_BOOL bGotMTRR0 = IMG_FALSE; + + /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning + * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic & + * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour. + * + * WRBACK is incompatible with some PCI devices, so try to split + * the UNCACHABLE regions up and insert a WRCOMB region instead. + */ + res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0); + if (res < 0) + { + /* If this fails, services has probably run before and created + * a write-combined MTRR for the test chip. Assume it has, and + * don't return an error here. + */ + return PVRSRV_OK; + } + + if (res == 0) + bGotMTRR0 = IMG_TRUE; + + res = mtrr_del(res, start, end - start); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (bGotMTRR0) + { + /* Replace 0 with a non-overlapping WRBACK MTRR */ + res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + /* Add a WRCOMB MTRR for the PCI device memory bar */ + res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + } + } +#endif + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseResourceMTRRs +@Description Release resources allocated by OSPCIClearResourceMTRRs +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +*/ /**************************************************************************/ +void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (psPVRPCI->iMTRR[ui32Index] >= 0) + { + arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]); + psPVRPCI->iMTRR[ui32Index] = -1; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + { + resource_size_t start, end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; + + arch_io_free_memtype_wc(start, end - start); + } +#endif + } +#else + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); +#endif +} +#endif /* defined(CONFIG_MTRR) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/common/vmm_type_stub.c b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/vmm_type_stub.c new file mode 100644 index 000000000000..747bf4a8e1f1 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/common/vmm_type_stub.c @@ -0,0 +1,119 @@ +/*************************************************************************/ /*! +@File vmm_type_stub.c +@Title Stub VM manager type +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Sample stub (no-operation) VM manager implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxheapconfig.h" + +#include "vmm_impl.h" +#include "vmm_pvz_server.h" + +static PVRSRV_ERROR +StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64Addr) +{ + PVR_UNREFERENCED_PARAMETER(ui32FuncID); + PVR_UNREFERENCED_PARAMETER(ui32DevID); + PVR_UNREFERENCED_PARAMETER(ui64Size); + PVR_UNREFERENCED_PARAMETER(ui64Addr); + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +static PVRSRV_ERROR +StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID) +{ + PVR_UNREFERENCED_PARAMETER(ui32FuncID); + PVR_UNREFERENCED_PARAMETER(ui32DevID); + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +static VMM_PVZ_CONNECTION gsStubVmmPvz = +{ + .sClientFuncTab = { + /* pfnMapDevPhysHeap */ + &StubVMMMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &StubVMMUnmapDevPhysHeap + }, + + .sServerFuncTab = { + /* pfnMapDevPhysHeap */ + &PvzServerMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &PvzServerUnmapDevPhysHeap + }, + + .sVmmFuncTab = { + /* pfnOnVmOnline */ + &PvzServerOnVmOnline, + + /* pfnOnVmOffline */ + &PvzServerOnVmOffline, + + /* pfnVMMConfigure */ + &PvzServerVMMConfigure + } +}; + +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) +{ + PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); + *psPvzConnection = &gsStubVmmPvz; + PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); + return PVRSRV_OK; +} + +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) +{ + PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); +} + +/****************************************************************************** + End of file (vmm_type_stub.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/include/dma_support.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/dma_support.h new file mode 100644 index 000000000000..cc1e4ebc31ab --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/dma_support.h @@ -0,0 +1,112 @@ +/*************************************************************************/ /*! +@File dma_support.h +@Title Device contiguous memory allocator and I/O re-mapper +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides a contiguous memory allocator API; mainly + used for allocating / ioremapping (DMA/PA <-> CPU/VA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DMA_SUPPORT_H +#define DMA_SUPPORT_H + +#include "osfunc.h" +#include "pvrsrv.h" + +typedef struct _DMA_ALLOC_ +{ + IMG_UINT64 ui64Size; + IMG_CPU_VIRTADDR pvVirtAddr; + IMG_DEV_PHYADDR sBusAddr; + void *pvOSDevice; +} DMA_ALLOC; + +/*! +******************************************************************************* + @Function SysDmaAllocMem + @Description Allocates physically contiguous memory + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaFreeMem + @Description Free physically contiguous memory + @Return void +******************************************************************************/ +void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaRegisterForIoRemapping + @Description Registers DMA_ALLOC for manual I/O remapping + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaDeregisterForIoRemapping + @Description Deregisters DMA_ALLOC from manual I/O remapping + @Return void +******************************************************************************/ +void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaDevPAddrToCpuVAddr + @Description Maps a DMA_ALLOC physical address to CPU virtual address + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +IMG_CPU_VIRTADDR +SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size); + +/*! +******************************************************************************* + @Function SysDmaCpuVAddrToDevPAddr + @Description Maps a DMA_ALLOC CPU virtual address to physical address + @Return Non-zero value on success. Otherwise, a 0 +******************************************************************************/ +IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr); + +#endif /* DMA_SUPPORT_H */ + +/****************************************************************************** + End of file (dma_support.h) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/include/interrupt_support.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/interrupt_support.h new file mode 100644 index 000000000000..0cca1ac22b19 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/interrupt_support.h @@ -0,0 +1,103 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__INTERRUPT_SUPPORT_H__) +#define __INTERRUPT_SUPPORT_H__ + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_device.h" + +/*! Default trigger type for the interrupt line. */ +#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0) +/*! Interrupt triggered when interrupt line is low. */ +#define SYS_IRQ_FLAG_TRIGGER_LOW (0x1 << 0) +/*! Interrupt triggered when interrupt line is high. */ +#define SYS_IRQ_FLAG_TRIGGER_HIGH (0x2 << 0) +/*! Interrupt trigger mask. */ +#define SYS_IRQ_FLAG_TRIGGER_MASK (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \ + SYS_IRQ_FLAG_TRIGGER_LOW | \ + SYS_IRQ_FLAG_TRIGGER_HIGH) +/*! The irq is allowed to be shared among several devices. */ +#define SYS_IRQ_FLAG_SHARED (0x1 << 8) + +/*! Interrupt flags mask. */ +#define SYS_IRQ_FLAG_MASK (SYS_IRQ_FLAG_TRIGGER_MASK | \ + SYS_IRQ_FLAG_SHARED) + +/*************************************************************************/ /*! +@Description Pointer to a system Low-level Interrupt Service Routine (LISR). +@Input pvData Private data provided to the LISR. +@Return IMG_TRUE if interrupt handled, IMG_FALSE otherwise. +*/ /**************************************************************************/ +typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData); + +/*************************************************************************/ /*! +@Function OSInstallSystemLISR +@Description Installs a system low-level interrupt handler +@Output phLISR On return, contains a handle to the + installed LISR +@Input ui32IRQ The IRQ number for which the + interrupt handler should be installed +@Input pszDevName Name of the device for which the handler + is being installed +@Input pfnLISR A pointer to an interrupt handler + function +@Input pvData A pointer to data that should be passed + to pfnLISR when it is called +@Input ui32Flags Interrupt flags +@Return PVRSRV_OK on success, a failure code otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszDevName, + PFN_SYS_LISR pfnLISR, + void *pvData, + IMG_UINT32 ui32Flags); + +/*************************************************************************/ /*! +@Function OSUninstallSystemLISR +@Description Uninstalls a system low-level interrupt handler +@Input hLISRData The handle to the LISR to uninstall +@Return PVRSRV_OK on success, a failure code otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData); +#endif /* !defined(__INTERRUPT_SUPPORT_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/include/pci_support.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/pci_support.h new file mode 100644 index 000000000000..45870b8442ab --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/pci_support.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PCI_SUPPORT_H +#define PCI_SUPPORT_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(LINUX) +#include +#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev)) +#else +#define TO_PCI_COOKIE(dev) (dev) +#endif + +typedef enum _HOST_PCI_INIT_FLAGS_ +{ + HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001, + HOST_PCI_INIT_FLAG_MSI = 0x00000002, + HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff +} HOST_PCI_INIT_FLAGS; + +struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; +typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; + +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ); +IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); +PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16VendorID, IMG_UINT16 *pui16DeviceID); + +#if defined(CONFIG_MTRR) +PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +#else +static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); + return PVRSRV_OK; +} + +static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); +} +#endif + +#endif /* PCI_SUPPORT_H */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/include/syscommon.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/syscommon.h new file mode 100644 index 000000000000..a2589d0c2c6a --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/syscommon.h @@ -0,0 +1,129 @@ +/**************************************************************************/ /*! +@File +@Title Common System APIs and structures +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides common system-specific declarations and + macros that are supported by all systems +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(__SYSCOMMON_H__) +#define __SYSCOMMON_H__ + +#include "img_types.h" +#include "pvr_notifier.h" +#include "pvrsrv_device.h" +#include "pvrsrv_error.h" + +/*************************************************************************/ /*! +@Description Pointer to a Low-level Interrupt Service Routine (LISR). +@Input pvData Private data provided to the LISR. +@Return True if interrupt handled, false otherwise. +*/ /**************************************************************************/ +typedef IMG_BOOL (*PFN_LISR)(void *pvData); + +/**************************************************************************/ /*! +@Function SysDevInit +@Description System specific device initialisation function. +@Input pvOSDevice pointer to the OS device reference +@Input ppsDevConfig returned device configuration info +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig); + +/**************************************************************************/ /*! +@Function SysDevDeInit +@Description System specific device deinitialisation function. +@Input psDevConfig device configuration info of the device to be + deinitialised +@Return None. +*/ /***************************************************************************/ +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/**************************************************************************/ /*! +@Function SysDebugInfo +@Description Dump system specific device debug information. +@Input psDevConfig pointer to device configuration info +@Input pfnDumpDebugPrintf the 'printf' function to be called to + display the debug info +@Input pvDumpDebugFile optional file identifier to be passed to + the 'printf' function if required +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/**************************************************************************/ /*! +@Function SysInstallDeviceLISR +@Description Installs the system Low-level Interrupt Service Routine (LISR) + which handles low-level processing of interrupts from the device + (GPU). + The LISR will be invoked when the device raises an interrupt. An + LISR may not be descheduled, so code which needs to do so should + be placed in an MISR. + The installed LISR will schedule any MISRs once it has completed + its interrupt processing, by calling OSScheduleMISR(). +@Input hSysData pointer to the system data of the device +@Input ui32IRQ the IRQ on which the LISR is to be installed +@Input pszName name of the module installing the LISR +@Input pfnLISR pointer to the function to be installed as the + LISR +@Input pvData private data provided to the LISR +@Output phLISRData handle to the installed LISR (to be used for a + subsequent uninstall) +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData); + +/**************************************************************************/ /*! +@Function SysUninstallDeviceLISR +@Description Uninstalls the system Low-level Interrupt Service Routine (LISR) + which handles low-level processing of interrupts from the device + (GPU). +@Input hLISRData handle of the LISR to be uninstalled +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData); + +#endif /* !defined(__SYSCOMMON_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/include/sysvalidation.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/sysvalidation.h new file mode 100644 index 000000000000..f80fe6cde2f3 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/include/sysvalidation.h @@ -0,0 +1,63 @@ +/*************************************************************************/ /*! +@File +@Title Validation System APIs and structures +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros + needed for hardware validation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSVALIDATION_H__) +#define __SYSVALIDATION_H__ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "img_types.h" +#include "rgxdefs_km.h" +#include "virt_validation_defs.h" + +void SysInitVirtInitialization(IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) +/* functions only used on rogue, but header defining them is common */ +void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); +void SysSetTrustedDeviceAceEnabled(void); +#endif +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +#endif /* !defined(__SYSVALIDATION_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/Kbuild.mk new file mode 100644 index 000000000000..711070dd3e74 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/Kbuild.mk @@ -0,0 +1,59 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +PVRSRVKM_NAME = $(PVRSRV_MODNAME) + +$(PVRSRVKM_NAME)-y += \ + services/system/volcanic/$(PVR_SYSTEM)/sysconfig.o \ + services/system/volcanic/common/env/linux/interrupt_support.o \ + services/system/volcanic/common/env/linux/pci_support.o \ + services/system/volcanic/common/env/linux/dma_support.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/volcanic/common/vmm_type_$(VMM_TYPE).o + +ccflags-y += \ + -I$(TOP)/services/system/volcanic/common/env/linux \ + -I$(TOP)/services/include/env/linux \ + -I$(TOP)/kernel/drivers/staging/imgtec \ + -I$(TOP)/kernel/drivers/staging/imgtec/tc \ + -I$(TOP)/include/volcanic/system/rgx_tc -I$(TOP)/include/system/rgx_tc diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysconfig.c b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysconfig.c new file mode 100644 index 000000000000..82c58a221962 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysconfig.c @@ -0,0 +1,1040 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "sysinfo.h" +#include "apollo_regs.h" + +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "rgxdevice.h" +#include "syscommon.h" +#include "allocmem.h" +#include "pvr_debug.h" + +#if defined(SUPPORT_ION) +#include PVR_ANDROID_ION_HEADER +#include "ion_support.h" +#include "ion_sys.h" +#endif + +#include "tc_drv.h" + +#include +#include + +/* Must be consecutive and start from 0 */ +#define PHY_HEAP_CARD_GPU 0 +#define PHY_HEAP_CARD_EXT 1 +#define PHY_HEAP_LMA_NUM 2 + +#define PHY_HEAP_SYSTEM PHY_HEAP_LMA_NUM +#define PHY_HEAP_NUM 3 + +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) + +#if defined(SUPPORT_SECURITY_VALIDATION) +#define SECURE_FW_CODE_MEM_SIZE (0x200000) /* 2MB (max HMMU page size) */ +#define SECURE_FW_DATA_MEM_SIZE (0x200000) /* 2MB (max HMMU page size) */ +#define SECURE_MEM_SIZE (0x4000000) /* 32MB (multiple of max HMMU page size) */ +#endif + +typedef enum _LMA_HEAP_REGION_ID_ +{ + /* + * MAIN region needs to be the one with index 0, because the kernel code + * always uses LMA heap region 0 to allocate MMU pages + */ + REGION_MAIN = 0, +#if defined(SUPPORT_SECURITY_VALIDATION) + REGION_SEC_FW_CODE, + REGION_SEC_FW_DATA, + REGION_SEC_MEM, +#endif + REGION_NUM +} LMA_HEAP_REGION_ID; + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + +/* Dummy DVFS configuration used purely for testing purposes */ + +static const IMG_OPP asOPPTable[] = +{ + { 8, 25000000}, + { 16, 50000000}, + { 32, 75000000}, + { 64, 100000000}, +}; + +#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP)) + +static void SetFrequency(IMG_UINT32 ui32Frequency) +{ + PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency)); +} + +static void SetVoltage(IMG_UINT32 ui32Voltage) +{ + PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage)); +} + +#endif + +static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); + +static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = +{ + .pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr, + .pfnGetRegionId = TCLocalGetRegionId, +}; + +static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static IMG_UINT32 TCHostGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); + +static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = +{ + .pfnCpuPAddrToDevPAddr = TCHostCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr, + .pfnGetRegionId = TCHostGetRegionId, +}; + +typedef struct _SYS_DATA_ SYS_DATA; + +struct _SYS_DATA_ +{ + struct platform_device *pdev; + + struct tc_rogue_platform_data *pdata; + + struct resource *registers; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + struct ion_client *ion_client; + struct ion_handle *ion_rogue_allocation; +#endif +}; + +#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s - TCF Core Revision: %s - TCF Core Target Build ID: %s - PCI Version: %s - Macro Version: %s" +#define FPGA_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ +#define TCF_CORE_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ +#define TCF_CORE_CFG_MAX_LEN 4 /* current longest format: "x" */ +#define PCI_VERSION_MAX_LEN 4 /* current longest format: "x" */ +#define MACRO_VERSION_MAX_LEN 8 /* current longest format: "x.yz" */ + +static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData) +{ + int err; + char str_fpga_rev[FPGA_REV_MAX_LEN]={0}; + char str_tcf_core_rev[TCF_CORE_REV_MAX_LEN]={0}; + char str_tcf_core_target_build_id[TCF_CORE_CFG_MAX_LEN]={0}; + char str_pci_ver[PCI_VERSION_MAX_LEN]={0}; + char str_macro_ver[MACRO_VERSION_MAX_LEN]={0}; + + IMG_CHAR *pszVersion; + IMG_UINT32 ui32StringLength; + + err = tc_sys_strings(psSysData->pdev->dev.parent, + str_fpga_rev, sizeof(str_fpga_rev), + str_tcf_core_rev, sizeof(str_tcf_core_rev), + str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id), + str_pci_ver, sizeof(str_pci_ver), + str_macro_ver, sizeof(str_macro_ver)); + if (err) + { + return NULL; + } + + /* Calculate how much space we need to allocate for the string */ + ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING); + ui32StringLength += OSStringLength(str_fpga_rev); + ui32StringLength += OSStringLength(str_tcf_core_rev); + ui32StringLength += OSStringLength(str_tcf_core_target_build_id); + ui32StringLength += OSStringLength(str_pci_ver); + ui32StringLength += OSStringLength(str_macro_ver); + + /* Create the version string */ + pszVersion = OSAllocMem(ui32StringLength * sizeof(IMG_CHAR)); + if (pszVersion) + { + OSSNPrintf(&pszVersion[0], ui32StringLength, + SYSTEM_INFO_FORMAT_STRING, + str_fpga_rev, + str_tcf_core_rev, + str_tcf_core_target_build_id, + str_pci_ver, + str_macro_ver); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to create format string", __func__)); + } + + return pszVersion; +} + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +static SYS_DATA *gpsIonPrivateData; + +PVRSRV_ERROR IonInit(void *pvPrivateData) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYS_DATA *psSysData = pvPrivateData; + gpsIonPrivateData = psSysData; + + psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME); + if (IS_ERR(psSysData->ion_client)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client))); + eError = PVRSRV_ERROR_ION_NO_CLIENT; + goto err_out; + } + /* Allocate the whole rogue ion heap and pass that to services to manage */ + psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0); + if (IS_ERR(psSysData->ion_rogue_allocation)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation))); + eError = PVRSRV_ERROR_ION_FAILED_TO_ALLOC; + goto err_destroy_client; + + } + + return PVRSRV_OK; +err_destroy_client: + ion_client_destroy(psSysData->ion_client); + psSysData->ion_client = NULL; +err_out: + return eError; +} + +void IonDeinit(void) +{ + SYS_DATA *psSysData = gpsIonPrivateData; + ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation); + psSysData->ion_rogue_allocation = NULL; + ion_client_destroy(psSysData->ion_client); + psSysData->ion_client = NULL; +} + +struct ion_device *IonDevAcquire(void) +{ + return gpsIonPrivateData->pdata->ion_device; +} + +void IonDevRelease(struct ion_device *ion_device) +{ + PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device); +} +#endif /* defined(SUPPORT_ION) */ + +static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psDevPAddr[ui32Idx].uiAddr = + psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base; + } +} + +static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psCpuPAddr[ui32Idx].uiAddr = + psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base; + } +} + +static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) +{ +#if defined(SUPPORT_SECURITY_VALIDATION) + if (PVRSRV_CHECK_SECURE_FW_CODE(uiAllocFlags)) + { + return REGION_SEC_FW_CODE; + } + + if (PVRSRV_CHECK_SECURE_FW_DATA(uiAllocFlags)) + { + return REGION_SEC_FW_DATA; + } + + if (PVRSRV_CHECK_SECURE_BUFFER(uiAllocFlags)) + { + return REGION_SEC_MEM; + } + +#endif + /* Return first region which is always valid */ + return REGION_MAIN; +} + +static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 uiNumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_ASSERT(sizeof(*psDevPAddr) == sizeof(*psCpuPAddr)); + OSCachedMemCopy(psDevPAddr, psCpuPAddr, uiNumOfAddr * sizeof(*psDevPAddr)); +} + +static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 uiNumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_ASSERT(sizeof(*psCpuPAddr) == sizeof(*psDevPAddr)); + OSCachedMemCopy(psCpuPAddr, psDevPAddr, uiNumOfAddr * sizeof(*psCpuPAddr)); +} + +static IMG_UINT32 TCHostGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) +{ + /* Return first region which is always valid */ + return 0; +} + +static void +FreePhysHeaps(PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 uiNumHeaps) +{ + IMG_UINT32 i; + + for (i = 0; i < uiNumHeaps; i++) + { + OSFreeMem(pasPhysHeaps[i].pasRegions); + } +} + +static PVRSRV_ERROR +CreateCardGPUHeapRegion(const SYS_DATA *psSysData, + PHYS_HEAP_REGION **ppsRegionsOut, + IMG_UINT32 *puiRegionCountOut) +{ + PHYS_HEAP_REGION *psRegions; + IMG_UINT32 uiRegionCount = 0; + IMG_UINT64 ui64CardAddr = 0; + IMG_UINT64 ui64StartAddr = psSysData->pdata->rogue_heap_memory_base; + IMG_UINT64 ui64RogueHeapSize = psSysData->pdata->rogue_heap_memory_size; +#if defined(SUPPORT_SECURITY_VALIDATION) + IMG_UINT64 uiTDFWCodeSize = SECURE_FW_CODE_MEM_SIZE; + IMG_UINT64 uiTDFWDataSize = SECURE_FW_DATA_MEM_SIZE; + IMG_UINT64 uiTDSecBufSize = SECURE_MEM_SIZE; +#endif + + psRegions = OSAllocZMem(sizeof(*psRegions) * REGION_NUM); + if (!psRegions) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + ui64CardAddr = psSysData->pdata->tc_memory_base; + } + +#if defined(SUPPORT_SECURITY_VALIDATION) + /* Take some space from the main heap region */ + ui64RogueHeapSize -= uiTDFWCodeSize + uiTDFWDataSize + uiTDSecBufSize; +#endif + + psRegions[REGION_MAIN].sStartAddr.uiAddr = + IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr); + psRegions[REGION_MAIN].sCardBase.uiAddr = ui64CardAddr; + psRegions[REGION_MAIN].uiSize = ui64RogueHeapSize; + uiRegionCount++; + + ui64CardAddr += ui64RogueHeapSize; + ui64StartAddr += ui64RogueHeapSize; + +#if defined(SUPPORT_SECURITY_VALIDATION) + /* Setup the secure FW code heap */ + psRegions[REGION_SEC_FW_CODE].sCardBase.uiAddr = ui64CardAddr; + psRegions[REGION_SEC_FW_CODE].sStartAddr.uiAddr = + IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr); + psRegions[REGION_SEC_FW_CODE].uiSize = uiTDFWCodeSize; + uiRegionCount++; + + ui64CardAddr += uiTDFWCodeSize; + ui64StartAddr += uiTDFWCodeSize; + + /* Setup the secure FW data heap */ + psRegions[REGION_SEC_FW_DATA].sCardBase.uiAddr = ui64CardAddr; + psRegions[REGION_SEC_FW_DATA].sStartAddr.uiAddr = + IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr); + psRegions[REGION_SEC_FW_DATA].uiSize = uiTDFWDataSize; + uiRegionCount++; + + ui64CardAddr += uiTDFWDataSize; + ui64StartAddr += uiTDFWDataSize; + + /* Setup the secure buffers heap */ + psRegions[REGION_SEC_MEM].sCardBase.uiAddr = ui64CardAddr; + psRegions[REGION_SEC_MEM].sStartAddr.uiAddr = + IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr); + psRegions[REGION_SEC_MEM].uiSize = uiTDSecBufSize; + uiRegionCount++; + + ui64CardAddr += uiTDSecBufSize; + ui64StartAddr += uiTDSecBufSize; +#endif + + *ppsRegionsOut = psRegions; + *puiRegionCountOut = uiRegionCount; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +CreateCardEXTHeapRegion(const SYS_DATA *psSysData, + PHYS_HEAP_REGION **ppsRegionsOut, + IMG_UINT32 *puiRegionCountOut) +{ + PHYS_HEAP_REGION *psRegions; + IMG_UINT32 uiRegionCount = 0; + IMG_UINT64 ui64CardAddr = 0; + IMG_UINT64 ui64StartAddr = psSysData->pdata->pdp_heap_memory_base; + IMG_UINT64 ui64Size = psSysData->pdata->pdp_heap_memory_size; + + psRegions = OSAllocMem(sizeof(*psRegions)); + if (!psRegions) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + ui64CardAddr = psSysData->pdata->tc_memory_base; + } + + psRegions[0].sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr); + psRegions[0].sCardBase.uiAddr = ui64CardAddr; + psRegions[0].uiSize = ui64Size; + uiRegionCount++; + + *ppsRegionsOut = psRegions; + *puiRegionCountOut = uiRegionCount; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap, IMG_UINT32 ui32ID, + PHYS_HEAP_REGION *psRegions, IMG_UINT32 ui32NumOfRegions, + PHYS_HEAP_FUNCTIONS *psFuncs) +{ + + psPhysHeap->ui32PhysHeapID = ui32ID; + psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; + psPhysHeap->pszPDumpMemspaceName = "LMA"; + psPhysHeap->psMemFuncs = psFuncs; + psPhysHeap->pasRegions = psRegions; + psPhysHeap->ui32NumOfRegions = ui32NumOfRegions; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + PHYS_HEAP_FUNCTIONS *psHeapFuncs; + PHYS_HEAP_REGION *psRegions; + IMG_UINT32 ui32NumOfRegions; + PVRSRV_ERROR eError; + + if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + psHeapFuncs = &gsHostPhysHeapFuncs; + } + else + { + psHeapFuncs = &gsLocalPhysHeapFuncs; + } + + eError = CreateCardGPUHeapRegion(psSysData, &psRegions, &ui32NumOfRegions); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_GPU], PHY_HEAP_CARD_GPU, + psRegions, ui32NumOfRegions, psHeapFuncs); + if (eError != PVRSRV_OK) + { + OSFreeMem(psRegions); + return eError; + } + + eError = CreateCardEXTHeapRegion(psSysData, &psRegions, &ui32NumOfRegions); + if (eError != PVRSRV_OK) + { + OSFreeMem(pasPhysHeaps[PHY_HEAP_CARD_GPU].pasRegions); + return eError; + } + + eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_EXT], PHY_HEAP_CARD_EXT, + psRegions, ui32NumOfRegions, psHeapFuncs); + if (eError != PVRSRV_OK) + { + OSFreeMem(psRegions); + OSFreeMem(pasPhysHeaps[PHY_HEAP_CARD_GPU].pasRegions); + return eError; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + PHYS_HEAP_REGION *psRegion; + + psRegion = OSAllocZMem(sizeof(*psRegion)); + if (!psRegion) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + pasPhysHeaps[PHY_HEAP_SYSTEM].ui32PhysHeapID = PHY_HEAP_SYSTEM; + pasPhysHeaps[PHY_HEAP_SYSTEM].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHY_HEAP_SYSTEM].pszPDumpMemspaceName = "SYSTEM"; + pasPhysHeaps[PHY_HEAP_SYSTEM].pasRegions = psRegion; + pasPhysHeaps[PHY_HEAP_SYSTEM].ui32NumOfRegions = 1; + pasPhysHeaps[PHY_HEAP_SYSTEM].psMemFuncs = &gsHostPhysHeapFuncs; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PhysHeapsInit(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, + void *pvPrivData) +{ + static IMG_UINT32 uiHeapIDBase = 0; + PVRSRV_ERROR eError; + IMG_UINT32 i; + + eError = InitLocalHeaps(psSysData, pasPhysHeaps); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = InitHostHeaps(psSysData, pasPhysHeaps); + if (eError != PVRSRV_OK) + { + FreePhysHeaps(pasPhysHeaps, PHY_HEAP_LMA_NUM); + return eError; + } + + /* Initialise fields that don't change between memory modes. + * Fix up heap IDs. This is needed for multi-testchip systems to + * ensure the heap IDs are unique as this is what Services expects. + */ + for (i = 0; i < PHY_HEAP_NUM; i++) + { + pasPhysHeaps[i].ui32PhysHeapID += uiHeapIDBase; + pasPhysHeaps[i].hPrivData = pvPrivData; + } + + uiHeapIDBase += PHY_HEAP_NUM; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PhysHeapsCreate(const SYS_DATA *psSysData, void *pvPrivData, + PHYS_HEAP_CONFIG **ppasPhysHeapsOut, + IMG_UINT32 *puiPhysHeapCountOut) +{ + PHYS_HEAP_CONFIG *pasPhysHeaps; + PVRSRV_ERROR eError; + + pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * PHY_HEAP_NUM); + if (!pasPhysHeaps) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = PhysHeapsInit(psSysData, pasPhysHeaps, pvPrivData); + if (eError != PVRSRV_OK) + { + OSFreeMem(pasPhysHeaps); + return eError; + } + + *ppasPhysHeapsOut = pasPhysHeaps; + *puiPhysHeapCountOut = PHY_HEAP_NUM; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +DeviceMapPhysHeaps(IMG_UINT32 uiMemMode, PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + const PHYS_HEAP_CONFIG *pasPhysHeaps = psDevConfig->pasPhysHeaps; + IMG_UINT32 *aui32PhysHeapID = psDevConfig->aui32PhysHeapID; + + switch (uiMemMode) + { + case TC_MEMORY_LOCAL: + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = + pasPhysHeaps[PHY_HEAP_CARD_EXT].ui32PhysHeapID; + break; + case TC_MEMORY_HYBRID: + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = + pasPhysHeaps[PHY_HEAP_SYSTEM].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = + pasPhysHeaps[PHY_HEAP_CARD_GPU].ui32PhysHeapID; + aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = + pasPhysHeaps[PHY_HEAP_CARD_EXT].ui32PhysHeapID; + break; + default: + PVR_ASSERT(!"Unsupported memory mode"); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + return PVRSRV_OK; +} + +static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if (psDevConfig->pszVersion) + { + OSFreeMem(psDevConfig->pszVersion); + } + + FreePhysHeaps(psDevConfig->pasPhysHeaps, psDevConfig->ui32PhysHeapCount); + + OSFreeMem(psDevConfig->pasPhysHeaps); + + OSFreeMem(psDevConfig); +} + +static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, + PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiPhysHeapCount; + PVRSRV_ERROR eError; + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo)); + if (!psDevConfig) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + + eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); + if (eError != PVRSRV_OK) + { + goto ErrorFreeDevConfig; + } + + /* Setup RGX specific timing data */ + /* Volcanic FPGA 94MHz with divisor 16 = ~6MHz real clock */ + psRGXTimingInfo->ui32CoreClockSpeed = (94 * 1000 * 1000) / 16; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + + /* Setup the device config */ + psDevConfig->pvOSDevice = &psSysData->pdev->dev; + psDevConfig->pszName = "tc"; + psDevConfig->pszVersion = GetDeviceVersionString(psSysData); + + psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; + psDevConfig->ui32RegsSize = resource_size(psSysData->registers); + + psDevConfig->ui32IRQ = TC_INTERRUPT_EXT; + + psDevConfig->pasPhysHeaps = pasPhysHeaps; + psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; + + eError = DeviceMapPhysHeaps(psSysData->pdata->mem_mode, psDevConfig); + if (eError != PVRSRV_OK) + { + goto ErrorDestroyDevConfig; + } + + /* Only required for LMA but having this always set shouldn't be a problem */ + psDevConfig->bDevicePA0IsValid = IMG_TRUE; + + psDevConfig->hDevData = psRGXData; + psDevConfig->hSysData = psSysData; + + psDevConfig->pfnSysDevFeatureDepInit = NULL; + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + /* Dummy DVFS configuration used purely for testing purposes */ + psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable; + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; +#endif +#if defined(SUPPORT_LINUX_DVFS) + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000; + psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; +#endif + + psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; + + *ppsDevConfigOut = psDevConfig; + + return PVRSRV_OK; + +ErrorDestroyDevConfig: + DeviceConfigDestroy(psDevConfig); +ErrorFreeDevConfig: + OSFreeMem(psDevConfig); + return eError; +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + SYS_DATA *psSysData; + resource_size_t uiRegistersSize; + PVRSRV_ERROR eError; + int err = 0; + + PVR_ASSERT(pvOSDevice); + + psSysData = OSAllocZMem(sizeof(*psSysData)); + if (psSysData == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psSysData->pdev = to_platform_device((struct device *)pvOSDevice); + psSysData->pdata = psSysData->pdev->dev.platform_data; + + PVR_ASSERT(TC_MEMORY_CONFIG == psSysData->pdata->mem_mode); + + /* + * The device cannot address system memory, so there is no DMA + * limitation. + */ + if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL) + { + dma_set_mask(pvOSDevice, DMA_BIT_MASK(64)); + } + else + { + dma_set_mask(pvOSDevice, DMA_BIT_MASK(32)); + } + + err = tc_enable(psSysData->pdev->dev.parent); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + goto ErrFreeSysData; + } + + psSysData->registers = platform_get_resource_byname(psSysData->pdev, + IORESOURCE_MEM, + "rogue-regs"); + if (!psSysData->registers) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get Rogue register information", + __func__)); + eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + goto ErrorDevDisable; + } + + /* Check the address range is large enough. */ + uiRegistersSize = resource_size(psSysData->registers); + if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)", + __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE)); + + eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; + goto ErrorDevDisable; + } + + /* Reserve the address range */ + if (!request_mem_region(psSysData->registers->start, + resource_size(psSysData->registers), + SYS_RGX_DEV_NAME)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Rogue register memory region not available", + __func__)); + eError = PVRSRV_ERROR_PCI_CALL_FAILED; + + goto ErrorDevDisable; + } + + eError = DeviceConfigCreate(psSysData, &psDevConfig); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseMemRegion; + } + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + eError = IonInit(psSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__)); + goto ErrorDeviceConfigDestroy; + } +#endif + + *ppsDevConfig = psDevConfig; + + return PVRSRV_OK; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +ErrorDeviceConfigDestroy: + DeviceConfigDestroy(psDevConfig); +#endif +ErrorReleaseMemRegion: + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); +ErrorDevDisable: + tc_disable(psSysData->pdev->dev.parent); +ErrFreeSysData: + OSFreeMem(psSysData); + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + IonDeinit(); +#endif + + DeviceConfigDestroy(psDevConfig); + + release_mem_region(psSysData->registers->start, + resource_size(psSysData->registers)); + tc_disable(psSysData->pdev->dev.parent); + + OSFreeMem(psSysData); +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ +#if defined(TC_APOLLO_TCF5) + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + return PVRSRV_OK; +#else + SYS_DATA *psSysData = psDevConfig->hSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + u32 tmp = 0; + u32 pll; + + PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------"); + + if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll)) + goto err_out; + + if (tmp > 0) + PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp); + PVR_DUMPDEBUG_LOG("PLL status: %x", pll); + +err_out: + return eError; +#endif +} + +typedef struct +{ + struct device *psDev; + int iInterruptID; + void *pvData; + PFN_LISR pfnLISR; +} LISR_DATA; + +static void TCInterruptHandler(void* pvData) +{ + LISR_DATA *psLISRData = pvData; + psLISRData->pfnLISR(psLISRData->pvData); +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + SYS_DATA *psSysData = (SYS_DATA *)hSysData; + LISR_DATA *psLISRData; + PVRSRV_ERROR eError; + int err; + + if (ui32IRQ != TC_INTERRUPT_EXT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ)); + return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + } + + psLISRData = OSAllocZMem(sizeof(*psLISRData)); + if (!psLISRData) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out; + } + + psLISRData->pfnLISR = pfnLISR; + psLISRData->pvData = pvData; + psLISRData->iInterruptID = ui32IRQ; + psLISRData->psDev = psSysData->pdev->dev.parent; + + err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_free_data; + } + + err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err)); + eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + goto err_unset_interrupt_handler; + } + + *phLISRData = psLISRData; + eError = PVRSRV_OK; + + PVR_TRACE(("Installed device LISR " IMG_PFN_FMTSPEC " to irq %u", pfnLISR, ui32IRQ)); + +err_out: + return eError; +err_unset_interrupt_handler: + tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); +err_free_data: + OSFreeMem(psLISRData); + goto err_out; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + LISR_DATA *psLISRData = (LISR_DATA *) hLISRData; + int err; + + err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_disable_interrupt() failed (%d)", __func__, err)); + } + + err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); + } + + PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID)); + + OSFreeMem(psLISRData); + + return PVRSRV_OK; +} diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysinfo.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysinfo.h new file mode 100644 index 000000000000..7171a525f5e4 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_linux_tc/sysinfo.h @@ -0,0 +1,60 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +/*!< System specific poll/timeout details */ +#if defined(VIRTUAL_PLATFORM) +#define MAX_HW_TIME_US (240000000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000) +#else +#define MAX_HW_TIME_US (20000000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000) // (1500) +#endif +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (10000) + +#define SYS_RGX_DEV_NAME "tc_rogue" + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/Kbuild.mk b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/Kbuild.mk new file mode 100644 index 000000000000..55b9e9dc6e21 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/Kbuild.mk @@ -0,0 +1,54 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +PVRSRVKM_NAME = $(PVRSRV_MODNAME) + +$(PVRSRVKM_NAME)-y += services/system/volcanic/$(PVR_SYSTEM)/sysconfig.o \ + services/system/volcanic/common/env/linux/interrupt_support.o \ + services/system/volcanic/common/env/linux/dma_support.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/volcanic/common/vmm_type_$(VMM_TYPE).o + +ifeq ($(SUPPORT_ION),1) +$(PVRSRVKM_NAME)-y += services/system/volcanic/common/env/linux/ion_support_generic.o +endif diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.c b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.c new file mode 100644 index 000000000000..a6b78ee3c4ed --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.c @@ -0,0 +1,346 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "syscommon.h" +#include "vz_vmm_pvz.h" +#include "allocmem.h" +#include "sysinfo.h" +#include "sysconfig.h" +#include "physheap.h" +#if defined(SUPPORT_ION) +#include "ion_support.h" +#endif +#if defined(LINUX) +#include +#endif +/* + * In systems that support trusted device address protection, there are three + * physical heaps from which pages should be allocated: + * - one heap for normal allocations + * - one heap for allocations holding META code memory + * - one heap for allocations holding secured DRM data + */ + +#define PHYS_HEAP_IDX_GENERAL 0 +#define PHYS_HEAP_IDX_TDFWMEM 1 +#define PHYS_HEAP_IDX_TDSECUREBUF 2 +#define PHYS_HEAP_IDX_VIRTFW 3 +#define PHYS_HEAP_IDX_FW_MEMORY 4 + +#if defined(SUPPORT_VALIDATION) && defined(PDUMP) +#include "validation_soc.h" +#endif + +/* + CPU to Device physical address translation +*/ +static +void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; + } + } +} + +/* + Device to CPU physical address translation +*/ +static +void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; + } + } +} + +static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = +{ + /* pfnCpuPAddrToDevPAddr */ + UMAPhysHeapCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ + UMAPhysHeapDevPAddrToCpuPAddr, + /* pfnGetRegionId */ + NULL, +}; + +static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut, + IMG_UINT32 *puiPhysHeapCountOut) +{ + PHYS_HEAP_CONFIG *pasPhysHeaps; + static IMG_UINT32 uiHeapIDBase = 0; + IMG_UINT32 ui32NextHeapID = 0; + IMG_UINT32 uiHeapCount = 1; + +#if defined(SUPPORT_TRUSTED_DEVICE) + uiHeapCount += 1; +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) + uiHeapCount += 1; +#endif + uiHeapCount += !PVRSRV_VZ_MODE_IS(NATIVE) ? 1:0; + + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); + if (!pasPhysHeaps) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_GENERAL; + pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM"; + pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs; + ui32NextHeapID++; + +#if defined(SUPPORT_TRUSTED_DEVICE) + pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_TDFWMEM; + pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "TDFWMEM"; + pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs; + ui32NextHeapID++; +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) + pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_FW_MEMORY; + pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "DEDICATEDFWMEM"; + pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs; +#endif + + if (! PVRSRV_VZ_MODE_IS(NATIVE)) + { + pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_VIRTFW; + pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM"; + pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs; + ui32NextHeapID++; + } + + uiHeapIDBase += uiHeapCount; + + *ppasPhysHeapsOut = pasPhysHeaps; + *puiPhysHeapCountOut = uiHeapCount; + + return PVRSRV_OK; +} + +static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + OSFreeMem(pasPhysHeaps); +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiPhysHeapCount; + PVRSRV_ERROR eError; + +#if defined(LINUX) + dma_set_mask(pvOSDevice, DMA_BIT_MASK(40)); +#endif + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo)); + if (!psDevConfig) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + + eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount); + if (eError) + { + goto ErrorFreeDevConfig; + } + + /* Setup RGX specific timing data */ + psRGXTimingInfo->ui32CoreClockSpeed = RGX_NOHW_CORE_CLOCK_SPEED; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + +#if defined(SUPPORT_TRUSTED_DEVICE) + psRGXData->bHasTDFWMemPhysHeap = IMG_TRUE; + psRGXData->uiTDFWMemPhysHeapID = PHYS_HEAP_IDX_TDFWMEM; +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) + psRGXData->bHasFWMemPhysHeap = IMG_TRUE; + psRGXData->uiFWMemPhysHeapID = PHYS_HEAP_IDX_FW_MEMORY; +#endif + + /* Setup the device config */ + psDevConfig->pvOSDevice = pvOSDevice; + psDevConfig->pszName = "nohw"; + psDevConfig->pszVersion = NULL; + + /* Device setup information */ + psDevConfig->sRegsCpuPBase.uiAddr = 0x00f00baa; + psDevConfig->ui32RegsSize = 0x4000; + psDevConfig->ui32IRQ = 0x00000bad; + + psDevConfig->pasPhysHeaps = pasPhysHeaps; + psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; + + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PHYS_HEAP_IDX_GENERAL; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PHYS_HEAP_IDX_GENERAL; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PHYS_HEAP_IDX_GENERAL; + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = PHYS_HEAP_IDX_GENERAL; + if (! PVRSRV_VZ_MODE_IS(NATIVE)) + { + /* Virtualization support services needs to know which heap ID corresponds to FW */ + psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PHYS_HEAP_IDX_VIRTFW; + } + + /* No power management on no HW system */ + psDevConfig->pfnPrePowerState = NULL; + psDevConfig->pfnPostPowerState = NULL; + + /* No clock frequency either */ + psDevConfig->pfnClockFreqGet = NULL; + + psDevConfig->hDevData = psRGXData; + + psDevConfig->bDevicePA0IsValid = IMG_FALSE; + psDevConfig->pfnSysDevFeatureDepInit = NULL; + + /* Setup other system specific stuff */ +#if defined(SUPPORT_ION) + IonInit(NULL); +#endif + + /* Pdump validation system registers */ +#if defined(SUPPORT_VALIDATION) && defined(PDUMP) + PVRSRVConfigureSysCtrl(NULL, PDUMP_FLAGS_CONTINUOUS); +#if defined(SUPPORT_SECURITY_VALIDATION) + PVRSRVConfigureTrustedDevice(NULL, PDUMP_FLAGS_CONTINUOUS); +#endif +#endif + + psDevConfig->bHasFBCDCVersion31 = IMG_TRUE; + + *ppsDevConfig = psDevConfig; + + return PVRSRV_OK; + +ErrorFreeDevConfig: + OSFreeMem(psDevConfig); + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ +#if defined(SUPPORT_ION) + IonDeinit(); +#endif + + PhysHeapsDestroy(psDevConfig->pasPhysHeaps); + OSFreeMem(psDevConfig); +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + PVR_UNREFERENCED_PARAMETER(hSysData); + PVR_UNREFERENCED_PARAMETER(ui32IRQ); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(pfnLISR); + PVR_UNREFERENCED_PARAMETER(pvData); + PVR_UNREFERENCED_PARAMETER(phLISRData); + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + PVR_UNREFERENCED_PARAMETER(hLISRData); + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (sysconfig.c) +******************************************************************************/ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.h new file mode 100644 index 000000000000..8da8dc8354b2 --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysconfig.h @@ -0,0 +1,58 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv_device.h" +#include "rgxdevice.h" + +#if !defined(__SYSCCONFIG_H__) +#define __SYSCCONFIG_H__ + + +#define RGX_NOHW_CORE_CLOCK_SPEED 100000000 +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (0) + +/***************************************************************************** + * system specific data structures + *****************************************************************************/ + +#endif /* __SYSCCONFIG_H__ */ diff --git a/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysinfo.h b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysinfo.h new file mode 100644 index 000000000000..83587b1857fb --- /dev/null +++ b/drivers/mcst/gpu-imgtec/services/system/volcanic/rgx_nohw/sysinfo.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +/*!< System specific poll/timeout details */ +#define MAX_HW_TIME_US (500000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (10000) + +#if defined(__linux__) +#define SYS_RGX_DEV_NAME "rgxnohw" +#endif + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/mcst/gpu-viv/Kbuild b/drivers/mcst/gpu-viv/Kbuild new file mode 100644 index 000000000000..5bfbc33f7295 --- /dev/null +++ b/drivers/mcst/gpu-viv/Kbuild @@ -0,0 +1,295 @@ +############################################################################## +# +# The MIT License (MIT) +# +# Copyright (c) 2014 - 2018 Vivante Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################## +# +# The GPL License (GPL) +# +# Copyright (C) 2014 - 2018 Vivante Corporation +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +############################################################################## +# +# Note: This software is released under dual MIT and GPL licenses. A +# recipient may use this file under the terms of either the MIT license or +# GPL License. If you wish to use only one license not the other, you can +# indicate your decision by deleting one of the above license notices in your +# version of this file. +# +############################################################################## + + +# +# Linux build file for kernel HAL driver. +# + +AQROOT := $(srctree)/drivers/mcst/gpu-viv + +include $(AQROOT)/config + +soc_vendor := $(firstword $(subst -, ,$(SOC_PLATFORM))) +soc_board := $(lastword $(subst -, ,$(SOC_PLATFORM))) + +KERNEL_DIR ?= $(TOOL_DIR)/kernel + +OS_KERNEL_DIR := hal/os/linux/kernel +ARCH_KERNEL_DIR := hal/kernel/arch +ARCH_VG_KERNEL_DIR := hal/kernel/archvg +HAL_KERNEL_DIR := hal/kernel +TA_DIR := hal/security_v1 +HOST := $(shell hostname) + +# Include platform config if exists. +-include $(AQROOT)/$(OS_KERNEL_DIR)/platform/$(soc_vendor)/gc_hal_kernel_platform_$(soc_board).config + +MODULE_NAME ?= galcore +CUSTOMER_ALLOCATOR_OBJS ?= +ALLOCATOR_ARRAY_H_LOCATION ?= $(OS_KERNEL_DIR)/allocator/default/ + +EXTRA_CFLAGS += -Werror + +OBJS := $(OS_KERNEL_DIR)/gc_hal_kernel_device.o \ + $(OS_KERNEL_DIR)/gc_hal_kernel_linux.o \ + $(OS_KERNEL_DIR)/gc_hal_kernel_math.o \ + $(OS_KERNEL_DIR)/gc_hal_kernel_os.o \ + $(OS_KERNEL_DIR)/gc_hal_kernel_debugfs.o \ + $(OS_KERNEL_DIR)/gc_hal_kernel_allocator.o \ + $(OS_KERNEL_DIR)/allocator/default/gc_hal_kernel_allocator_user_memory.o \ + $(OS_KERNEL_DIR)/allocator/default/gc_hal_kernel_allocator_dma.o \ + $(OS_KERNEL_DIR)/allocator/default/gc_hal_kernel_allocator_gfp.o \ + $(OS_KERNEL_DIR)/allocator/default/gc_hal_kernel_allocator_reserved_mem.o \ + $(OS_KERNEL_DIR)/gc_hal_kernel_driver.o \ + $(OS_KERNEL_DIR)/platform/$(soc_vendor)/gc_hal_kernel_platform_$(soc_board).o + +ifneq ($(CONFIG_DMA_SHARED_BUFFER),) +OBJS += $(OS_KERNEL_DIR)/allocator/default/gc_hal_kernel_allocator_dmabuf.o +endif + +ifneq ($(CONFIG_IOMMU_SUPPORT),) +OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_iommu.o +endif + +ifneq ($(CONFIG_DRM),) +OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_drm.o +endif + +OBJS += $(HAL_KERNEL_DIR)/gc_hal_kernel.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_command.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_async_command.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_db.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_debug.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_event.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_heap.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_video_memory.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_power.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_security_v1.o + +OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_context.o \ + $(ARCH_KERNEL_DIR)/gc_hal_kernel_hardware.o + +ifeq ($(VIVANTE_ENABLE_3D),1) +OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_recorder.o +endif + + +ifneq ($(CONFIG_SYNC),) +EXTRA_CFLAGS += -Idrivers/staging/android +EXTRA_CFLAGS += -DgcdLINUX_SYNC_FILE=1 + +OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_sync.o +else + ifneq ($(CONFIG_SYNC_FILE),) + EXTRA_CFLAGS += -DgcdLINUX_SYNC_FILE=1 + OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_sync.o + endif +endif + +ifeq ($(SECURITY),1) +OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_security_channel.o \ + $(HAL_KERNEL_DIR)/gc_hal_kernel_security.o +endif + +ifneq ($(CUSTOMER_ALLOCATOR_OBJS),) +OBJS += $(CUSTOMER_ALLOCATOR_OBJS) +endif + +OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_security_channel_emulator.o \ + $(TA_DIR)/gc_hal_ta.o \ + $(TA_DIR)/gc_hal_ta_hardware.o \ + $(TA_DIR)/gc_hal_ta_mmu.o \ + $(TA_DIR)/os/emulator/gc_hal_ta_emulator.o + +ifeq ($(KERNELRELEASE),) + +.PHONY: all clean install + +# Define targets. +all: + @$(MAKE) V=$(V) ARCH=$(ARCH_TYPE) -C $(KERNEL_DIR) SUBDIRS=`pwd` modules + +clean: + @rm -rf $(OBJS) + @rm -rf modules.order Module.symvers .tmp_versions + @find $(AQROOT) -name ".gc_*.cmd" | xargs rm -f + +install: all + +else + + +EXTRA_CFLAGS += -DLINUX -DDRIVER + +ifeq ($(FLAREON),1) +EXTRA_CFLAGS += -DFLAREON +endif + +ifeq ($(DEBUG),1) +EXTRA_CFLAGS += -DDBG=1 -DDEBUG -D_DEBUG +else +EXTRA_CFLAGS += -DDBG=0 +endif + +ifeq ($(NO_DMA_COHERENT),1) +EXTRA_CFLAGS += -DNO_DMA_COHERENT +endif + +ifeq ($(CONFIG_DOVE_GPU),1) +EXTRA_CFLAGS += -DCONFIG_DOVE_GPU=1 +endif + +ifneq ($(USE_PLATFORM_DRIVER),0) +EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=1 +else +EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=0 +endif + +EXTRA_CFLAGS += -DVIVANTE_PROFILER=1 +EXTRA_CFLAGS += -DVIVANTE_PROFILER_CONTEXT=1 + +ifeq ($(ENABLE_GPU_CLOCK_BY_DRIVER),1) +EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=1 +else +EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=0 +endif + +ifeq ($(USE_NEW_LINUX_SIGNAL),1) +EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=1 +else +EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=0 +endif + +ifeq ($(USE_LINUX_PCIE),1) +EXTRA_CFLAGS += -DUSE_LINUX_PCIE=1 +else +EXTRA_CFLAGS += -DUSE_LINUX_PCIE=0 +endif + +ifeq ($(FORCE_ALL_VIDEO_MEMORY_CACHED),1) +EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=1 +else +EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=0 +endif + +ifeq ($(CACHE_FUNCTION_UNIMPLEMENTED),1) +EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=1 +else +EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=0 +endif + +ifeq ($(VIVANTE_ENABLE_3D),0) +EXTRA_CFLAGS += -DgcdENABLE_3D=0 +else +EXTRA_CFLAGS += -DgcdENABLE_3D=1 +endif + +EXTRA_CFLAGS += -DgcdENABLE_2D=0 + +EXTRA_CFLAGS += -DgcdENABLE_VG=0 + +ifeq ($(USE_BANK_ALIGNMENT),1) + EXTRA_CFLAGS += -DgcdENABLE_BANK_ALIGNMENT=1 + ifneq ($(BANK_BIT_START),0) + ifneq ($(BANK_BIT_END),0) + EXTRA_CFLAGS += -DgcdBANK_BIT_START=$(BANK_BIT_START) + EXTRA_CFLAGS += -DgcdBANK_BIT_END=$(BANK_BIT_END) + endif + endif + + ifneq ($(BANK_CHANNEL_BIT),0) + EXTRA_CFLAGS += -DgcdBANK_CHANNEL_BIT=$(BANK_CHANNEL_BIT) + endif +endif + +ifeq ($(FPGA_BUILD),1) +EXTRA_CFLAGS += -DgcdFPGA_BUILD=1 +else +EXTRA_CFLAGS += -DgcdFPGA_BUILD=0 +endif + +ifeq ($(SECURITY),1) +EXTRA_CFLAGS += -DgcdSECURITY=1 +endif + +EXTRA_CFLAGS += -DgcdENABLE_DRM=$(VIVANTE_ENABLE_DRM) + +EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc +EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel +EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/arch +EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc +EXTRA_CFLAGS += -I$(AQROOT)/hal/os/linux/kernel +EXTRA_CFLAGS += -I$(AQROOT)/$(ALLOCATOR_ARRAY_H_LOCATION) +EXTRA_CFLAGS += -I$(AQROOT)/hal/security_v1/ + +ifneq ($(CONFIG_ARM),) +EXTRA_CFLAGS += -Iarch/arm/mm +endif + + +ifeq ($(VIVANTE_ENABLE_DRM),1) +EXTRA_CFLAGS += -I$(AQROOT)/driver/X/libdrm-2.4.66/include/drm +endif + +EXTRA_CFLAGS += -DHOST=\"$(HOST)\" + +EXTRA_CFLAGS += -DgcdENABLE_TRUST_APPLICATION=1 + +obj-$(CONFIG_MCST_GPU_VIV) = $(MODULE_NAME).o + +$(MODULE_NAME)-objs = $(OBJS) + +endif diff --git a/drivers/mcst/gpu-viv/Kconfig b/drivers/mcst/gpu-viv/Kconfig new file mode 100644 index 000000000000..907e12995d0e --- /dev/null +++ b/drivers/mcst/gpu-viv/Kconfig @@ -0,0 +1,12 @@ +menu "MCST Vivante GPU support (galcore v6.2.4p3)" + depends on DRM_VIVANTE + +config MCST_GPU_VIV + tristate "MCST Vivante GPU support (galcore v6.2.4p3)" + depends on FORCE_MAX_ZONEORDER > 15 + select DRM_VM + default m + ---help--- + Say Y to get the GPU driver support. + +endmenu diff --git a/drivers/mcst/gpu-viv/config b/drivers/mcst/gpu-viv/config new file mode 100644 index 000000000000..59b2016a0f48 --- /dev/null +++ b/drivers/mcst/gpu-viv/config @@ -0,0 +1,72 @@ +############################################################################## +# +# The MIT License (MIT) +# +# Copyright (c) 2014 - 2018 Vivante Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################## +# +# The GPL License (GPL) +# +# Copyright (C) 2014 - 2018 Vivante Corporation +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +############################################################################## +# +# Note: This software is released under dual MIT and GPL licenses. A +# recipient may use this file under the terms of either the MIT license or +# GPL License. If you wish to use only one license not the other, you can +# indicate your decision by deleting one of the above license notices in your +# version of this file. +# +############################################################################## + + +ARCH_TYPE ?= arm +SDK_DIR ?= $(AQROOT)/build/sdk +VIVANTE_ENABLE_3D ?= 1 +VIVANTE_ENABLE_2D ?= 1 +VIVANTE_ENABLE_VG ?= 1 +VIVANTE_ENABLE_DRM ?= 0 +NO_DMA_COHERENT ?= 1 +USE_PLATFORM_DRIVER ?= 1 +ENABLE_GPU_CLOCK_BY_DRIVER ?= 1 +FORCE_ALL_VIDEO_MEMORY_CACHED ?= 0 +CACHE_FUNCTION_UNIMPLEMENTED ?= 0 +USE_BANK_ALIGNMENT ?= 1 +BANK_BIT_START ?= 16 +BANK_BIT_END ?= 17 +BANK_CHANNEL_BIT ?= 13 +SECURITY ?= 0 +SOC_PLATFORM ?= mcst diff --git a/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c new file mode 100644 index 000000000000..167d69802f81 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c @@ -0,0 +1,4872 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal.h" +#include "gc_hal_kernel.h" +#include "gc_hal_kernel_context.h" +#include "gc_hal_kernel_buffer.h" + +/******************************************************************************\ +******************************** Debugging Macro ******************************* +\******************************************************************************/ + +/* Zone used for header/footer. */ +#define _GC_OBJ_ZONE gcvZONE_HARDWARE + + +/******************************************************************************\ +************************** Context State Buffer Helpers ************************ +\******************************************************************************/ + +#define _STATE(reg) \ + _State(\ + Context, index, \ + reg ## _Address >> 2, \ + reg ## _ResetValue, \ + reg ## _Count, \ + gcvFALSE, gcvFALSE \ + ) + +#define _STATE_COUNT(reg, count) \ + _State(\ + Context, index, \ + reg ## _Address >> 2, \ + reg ## _ResetValue, \ + count, \ + gcvFALSE, gcvFALSE \ + ) + +#define _STATE_COUNT_OFFSET(reg, offset, count) \ + _State(\ + Context, index, \ + (reg ## _Address >> 2) + offset, \ + reg ## _ResetValue, \ + count, \ + gcvFALSE, gcvFALSE \ + ) + +#define _STATE_MIRROR_COUNT(reg, mirror, count) \ + _StateMirror(\ + Context, \ + reg ## _Address >> 2, \ + count, \ + mirror ## _Address >> 2 \ + ) + +#define _STATE_HINT(reg) \ + _State(\ + Context, index, \ + reg ## _Address >> 2, \ + reg ## _ResetValue, \ + reg ## _Count, \ + gcvFALSE, gcvTRUE \ + ) + +#define _STATE_HINT_BLOCK(reg, block, count) \ + _State(\ + Context, index, \ + (reg ## _Address >> 2) + (block << reg ## _BLK), \ + reg ## _ResetValue, \ + count, \ + gcvFALSE, gcvTRUE \ + ) + +#define _STATE_COUNT_OFFSET_HINT(reg, offset, count) \ + _State(\ + Context, index, \ + (reg ## _Address >> 2) + offset, \ + reg ## _ResetValue, \ + count, \ + gcvFALSE, gcvTRUE \ + ) + +#define _STATE_X(reg) \ + _State(\ + Context, index, \ + reg ## _Address >> 2, \ + reg ## _ResetValue, \ + reg ## _Count, \ + gcvTRUE, gcvFALSE \ + ) + +#define _STATE_INIT_VALUE(reg, value) \ + _State(\ + Context, index, \ + reg ## _Address >> 2, \ + value, \ + reg ## _Count, \ + gcvFALSE, gcvFALSE \ + ) + +#define _CLOSE_RANGE() \ + _TerminateStateBlock(Context, index) + +#define _ENABLE(reg, field) \ + do \ + { \ + if (gcmVERIFYFIELDVALUE(data, reg, MASK_ ## field, ENABLED)) \ + { \ + enable |= gcmFIELDMASK(reg, field); \ + } \ + } \ + while (gcvFALSE) + +#define _BLOCK_COUNT(reg) \ + ((reg ## _Count) >> (reg ## _BLK)) + + +/******************************************************************************\ +*********************** Support Functions and Definitions ********************** +\******************************************************************************/ + +#define gcdSTATE_MASK \ + (gcmSETFIELDVALUE(0, AQ_COMMAND_NOP_COMMAND, OPCODE, NOP) | 0xC0FFEE) + +#if gcdENABLE_3D +static gctUINT32 +_TerminateStateBlock( + IN gckCONTEXT Context, + IN gctUINT32 Index + ) +{ + gctUINT32_PTR buffer; + gctUINT32 align; + + /* Determine if we need alignment. */ + align = (Index & 1) ? 1 : 0; + + /* Address correct index. */ + buffer = (Context->buffer == gcvNULL) + ? gcvNULL + : Context->buffer->logical; + + /* Flush the current state block; make sure no pairing with the states + to follow happens. */ + if (align && (buffer != gcvNULL)) + { + buffer[Index] = 0xDEADDEAD; + } + + /* Reset last address. */ + Context->lastAddress = ~0U; + + /* Return alignment requirement. */ + return align; +} +#endif + + +#if gcdENABLE_3D +static gctUINT32 +_FlushPipe( + IN gckCONTEXT Context, + IN gctUINT32 Index, + IN gcePIPE_SELECT Pipe + ) +{ + gctUINT32 flushSlots; + gctBOOL txCacheFix; + gctBOOL fcFlushStall; + gctBOOL iCacheInvalidate; + gctBOOL halti5; + gctBOOL snapPages; + gctBOOL hwTFB; + gctBOOL blt; + gctBOOL peTSFlush; + + txCacheFix + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_TEX_CACHE_FLUSH_FIX); + + fcFlushStall + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_FC_FLUSH_STALL); + + iCacheInvalidate + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE); + + halti5 + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_HALTI5); + + snapPages + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_SNAPPAGE_CMD_FIX) && + gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_SNAPPAGE_CMD); + + + hwTFB + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_HW_TFB); + + blt + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_BLT_ENGINE); + + peTSFlush + = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_PE_TILE_CACHE_FLUSH_FIX); + + flushSlots = blt ? 10 : 6; + + if (Pipe == gcvPIPE_3D) + { + if (!txCacheFix) + { + /* Semaphore stall */ + flushSlots += blt ? 8 : 4; + } + + /* VST cache */ + flushSlots += 2; + } + + if (fcFlushStall) + { + /* Flush tile status cache. */ + flushSlots += blt ? ((!peTSFlush) ? 14 :10) : 6; + } + + if (iCacheInvalidate && !halti5) + { + flushSlots += blt ? 16 : 12; + } + + if (hwTFB) + { + flushSlots += 2; + } + + /* Snap pages */ + if (snapPages) + { + flushSlots += 2; + } + + if (Context->buffer != gcvNULL) + { + gctUINT32_PTR buffer; + + /* Address correct index. */ + buffer = Context->buffer->logical + Index; + + if (Pipe == gcvPIPE_3D && !txCacheFix) + { + if (blt) + { + /* Semaphore from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + /* Semaphore from FE to PE. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to PE. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + } + + /* Flush the current pipe. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = (Pipe == gcvPIPE_2D) + ? + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) + : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 10:10) - (0 ? + 10:10) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 10:10) - (0 ? + 10:10) + 1))))))) << (0 ? + 10:10))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 10:10) - (0 ? + 10:10) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 10:10) - (0 ? 10:10) + 1))))))) << (0 ? 10:10))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 11:11) - (0 ? + 11:11) + 1))))))) << (0 ? + 11:11))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))); + + if (hwTFB) + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x7003) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = 0x12345678; + } + + /* Flush VST in separate cmd. */ + if (Pipe == gcvPIPE_3D) + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + } + + /* Semaphore from FE to PE. */ + if (blt) + { + /* Semaphore from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to PE. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + + if (fcFlushStall) + { + if (!peTSFlush && blt) + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502B) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + + /* Semaphore from FE to PE. */ + if (blt) + { + /* Semaphore from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to PE. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + } + + if (iCacheInvalidate && !halti5) + { + /* Invalidate I$ after pipe is stalled */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))); + + /* Semaphore from FE to PE. */ + if (blt) + { + /* Semaphore from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to PE. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + } + + if (snapPages) + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x13 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x04 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))); + + *buffer++ + = 0; + } + } + + /* Number of slots taken by flushing pipe. */ + return flushSlots; +} +#endif + +#if gcdENABLE_3D +static gctUINT32 +_SemaphoreStall( + IN gckCONTEXT Context, + IN gctUINT32 Index + ) +{ + gctBOOL blt = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_BLT_ENGINE); + if (Context->buffer != gcvNULL) + { + gctUINT32_PTR buffer; + + /* Address correct index. */ + buffer = Context->buffer->logical + Index; + + if (blt) + { + /* Semaphore from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to BLT. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + /* Semaphore from FE to PE. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to PE. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + } + + /* Semaphore/stall takes 4 slots. */ + return (blt ? 8 : 4); +} +#endif + +#if (gcdENABLE_3D) +static gctUINT32 +_SwitchPipe( + IN gckCONTEXT Context, + IN gctUINT32 Index, + IN gcePIPE_SELECT Pipe + ) +{ + gctUINT32 slots = 2; + + if (Context->buffer != gcvNULL) + { + gctUINT32_PTR buffer; + + /* Address correct index. */ + buffer = Context->buffer->logical + Index; + + /* LoadState(AQPipeSelect, 1), pipe. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer + = (Pipe == gcvPIPE_2D) + ? 0x1 + : 0x0; + } + + Context->pipeSelectBytes = slots * gcmSIZEOF(gctUINT32); + + return slots; +} +#endif + +#if gcdENABLE_3D +static gctUINT32 +_State( + IN gckCONTEXT Context, + IN gctUINT32 Index, + IN gctUINT32 Address, + IN gctUINT32 Value, + IN gctUINT32 Size, + IN gctBOOL FixedPoint, + IN gctBOOL Hinted + ) +{ + gctUINT32_PTR buffer; + gctUINT32 align; + gctUINT32 i; + + /* Determine if we need alignment. */ + align = (Index & 1) ? 1 : 0; + + /* Address correct index. */ + buffer = (Context->buffer == gcvNULL) + ? gcvNULL + : Context->buffer->logical; + + if ((buffer == gcvNULL) && (Address + Size > Context->maxState)) + { + /* Determine maximum state. */ + Context->maxState = Address + Size; + } + + if (buffer == gcvNULL) + { + /* Update number of states. */ + Context->numStates += Size; + } + + /* Do we need a new entry? */ + if ((Address != Context->lastAddress) || (FixedPoint != Context->lastFixed)) + { + if (buffer != gcvNULL) + { + if (align) + { + /* Add filler. */ + buffer[Index++] = 0xDEADDEAD; + } + + /* LoadState(Address, Count). */ + gcmkASSERT((Index & 1) == 0); + + if (FixedPoint) + { + buffer[Index] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 26:26) - (0 ? + 26:26) + 1))))))) << (0 ? + 26:26))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + } + else + { + buffer[Index] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 26:26) - (0 ? + 26:26) + 1))))))) << (0 ? + 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + } + + /* Walk all the states. */ + for (i = 0; i < (gctUINT32)Size; i += 1) + { + /* Set state to uninitialized value. */ + buffer[Index + 1 + i] = Value; + + /* Set index in state mapping table. */ + Context->map[Address + i].index = (gctUINT)Index + 1 + i; + +#if gcdSECURE_USER + /* Save hint. */ + if (Context->hint != gcvNULL) + { + Context->hint[Address + i] = Hinted; + } +#endif + } + } + + /* Save information for this LoadState. */ + Context->lastIndex = (gctUINT)Index; + Context->lastAddress = Address + (gctUINT32)Size; + Context->lastSize = Size; + Context->lastFixed = FixedPoint; + + /* Return size for load state. */ + return align + 1 + Size; + } + + /* Append this state to the previous one. */ + if (buffer != gcvNULL) + { + /* Update last load state. */ + buffer[Context->lastIndex] = + ((((gctUINT32) (buffer[Context->lastIndex])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (Context->lastSize + Size) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + /* Walk all the states. */ + for (i = 0; i < (gctUINT32)Size; i += 1) + { + /* Set state to uninitialized value. */ + buffer[Index + i] = Value; + + /* Set index in state mapping table. */ + Context->map[Address + i].index = (gctUINT)Index + i; + +#if gcdSECURE_USER + /* Save hint. */ + if (Context->hint != gcvNULL) + { + Context->hint[Address + i] = Hinted; + } +#endif + } + } + + /* Update last address and size. */ + Context->lastAddress += (gctUINT32)Size; + Context->lastSize += Size; + + /* Return number of slots required. */ + return Size; +} + +static gctUINT32 +_StateMirror( + IN gckCONTEXT Context, + IN gctUINT32 Address, + IN gctUINT32 Size, + IN gctUINT32 AddressMirror + ) +{ + gctUINT32 i; + + /* Process when buffer is set. */ + if (Context->buffer != gcvNULL) + { + /* Walk all states. */ + for (i = 0; i < Size; i++) + { + /* Copy the mapping address. */ + Context->map[Address + i].index = + Context->map[AddressMirror + i].index; + +#if gcdSECURE_USER + Context->hint[Address + i] = + Context->hint[AddressMirror + i]; +#endif + } + } + + /* Return the number of required maps. */ + return Size; +} +#endif + +#if (gcdENABLE_3D) +static gceSTATUS +_InitializeContextBuffer( + IN gckCONTEXT Context + ) +{ + gctUINT32_PTR buffer; + gctUINT32 index; + +#if gcdENABLE_3D + gctBOOL halti0, halti1, halti2, halti3, halti4, halti5; + gctUINT i; + gctUINT vertexUniforms, fragmentUniforms, vsConstBase, psConstBase, constMax; + gctBOOL unifiedUniform; + gctBOOL hasGS, hasTS; + gctBOOL genericAttrib; + gctBOOL hasICache; + gctBOOL hasICachePrefetch; + gctUINT numRT = 0; + gctUINT numSamplers = 32; + gctBOOL hasTXdesc; + gctBOOL hasSecurity; + gctBOOL hasRobustness; + gctBOOL multiCoreBlockSetCfg2; +#endif + + gckHARDWARE hardware; + + gcmkHEADER(); + + hardware = Context->hardware; + + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + /* Reset the buffer index. */ + index = 0; + + /* Reset the last state address. */ + Context->lastAddress = ~0U; + + /* Get the buffer pointer. */ + buffer = (Context->buffer == gcvNULL) + ? gcvNULL + : Context->buffer->logical; + + + /**************************************************************************/ + /* Build 2D states. *******************************************************/ + + +#if gcdENABLE_3D + /**************************************************************************/ + /* Build 3D states. *******************************************************/ + + halti0 = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_HALTI0); + halti1 = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_HALTI1); + halti2 = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_HALTI2); + halti3 = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_HALTI3); + halti4 = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_HALTI4); + halti5 = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_HALTI5); + hasGS = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_GEOMETRY_SHADER); + hasTS = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TESSELLATION); + genericAttrib = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_GENERIC_ATTRIB); + hasICache = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE); + hasTXdesc = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TX_DESCRIPTOR); + hasSecurity = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_SECURITY); + hasRobustness = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_ROBUSTNESS); + hasICachePrefetch = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_SH_INSTRUCTION_PREFETCH); + multiCoreBlockSetCfg2 = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_MULTI_CORE_BLOCK_SET_CONFIG2); + + /* Multi render target. */ + if (halti2 || + (Context->hardware->identity.chipModel == gcv900 && Context->hardware->identity.chipRevision == 0x5250) + ) + { + numRT = 8; + } + else if (halti0) + { + numRT = 4; + } + else + { + numRT = 1; + } + + if (hasGS && hasTS) + { + numSamplers = 80; + } + + /* Query how many uniforms can support. */ + {if (Context->hardware->identity.numConstants > 256){ unifiedUniform = gcvTRUE; +if (halti5){ vsConstBase = 0xD000; + psConstBase = 0xD800; +}else{ vsConstBase = 0xC000; + psConstBase = 0xC000; +}if ((Context->hardware->identity.chipModel == gcv880) && ((Context->hardware->identity.chipRevision & 0xfff0) == 0x5120)){ vertexUniforms = 512; + fragmentUniforms = 64; + constMax = 576; +}else{ vertexUniforms = gcmMIN(512, Context->hardware->identity.numConstants - 64); + fragmentUniforms = gcmMIN(512, Context->hardware->identity.numConstants - 64); + constMax = Context->hardware->identity.numConstants; +}}else if (Context->hardware->identity.numConstants == 256){ if (Context->hardware->identity.chipModel == gcv2000 && (Context->hardware->identity.chipRevision == 0x5118 || Context->hardware->identity.chipRevision == 0x5140)) { unifiedUniform = gcvFALSE; + vsConstBase = 0x1400; + psConstBase = 0x1C00; + vertexUniforms = 256; + fragmentUniforms = 64; + constMax = 320; + } else { unifiedUniform = gcvFALSE; + vsConstBase = 0x1400; + psConstBase = 0x1C00; + vertexUniforms = 256; + fragmentUniforms = 256; + constMax = 512; + }}else{ unifiedUniform = gcvFALSE; + vsConstBase = 0x1400; + psConstBase = 0x1C00; + vertexUniforms = 168; + fragmentUniforms = 64; + constMax = 232; +}}; + + +#if !gcdENABLE_UNIFIED_CONSTANT + if (Context->hardware->identity.numConstants > 256) + { + unifiedUniform = gcvTRUE; + } + else + { + unifiedUniform = gcvFALSE; + } +#endif + + /* Store the 3D entry index. */ + Context->entryOffset3D = (gctUINT)index * gcmSIZEOF(gctUINT32); + + /* Switch to 3D pipe. */ + index += _SwitchPipe(Context, index, gcvPIPE_3D); + + /* Current context pointer. */ +#if gcdDEBUG + index += _State(Context, index, 0x03850 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); +#endif + + index += _FlushPipe(Context, index, gcvPIPE_3D); + + /* Global states. */ + if (hasSecurity) + { + index += _State(Context, index, 0x03900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + index += _State(Context, index, 0x03904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + index += _State(Context, index, 0x03814 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + index += _State(Context, index, 0x03818 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0381C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (halti5) + { + index += _State(Context, index, 0x03888 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x038C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03884 >> 2, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:0) - (0 ? + 2:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:0) - (0 ? + 2:0) + 1))))))) << (0 ? + 2:0))) | (((gctUINT32) ((gctUINT32) (hardware->options.uscL1CacheRatio) & ((gctUINT32) ((((1 ? + 2:0) - (0 ? + 2:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:0) - (0 ? + 2:0) + 1))))))) << (0 ? + 2:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 20:16) - (0 ? + 20:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 20:16) - (0 ? + 20:16) + 1))))))) << (0 ? + 20:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? + 20:16) - (0 ? + 20:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16))), 1, gcvFALSE, gcvFALSE); + } + else + { + index += _State(Context, index, 0x03820 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03828 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0382C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03834 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03854 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (hasGS) + { + index += _State(Context, index, 0x0388C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + index += _State(Context, index, 0x0384C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + /* Front End states. */ + if (halti5) + { + index += _State(Context, index, 0x17800 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + index += _State(Context, index, 0x007C4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17880 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17900 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17980 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17A00 >> 2, 0x3F800000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x007D0 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x007D8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17A80 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + } + else + { + index += _State(Context, index, 0x00600 >> 2, 0x00000000, (halti0 ? 16 : 12), gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + if (genericAttrib) + { + index += _State(Context, index, 0x006C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00700 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00740 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00780 >> 2, 0x3F800000, 16, gcvFALSE, gcvFALSE); + } + } + + if (halti2 || (Context->hardware->identity.streamCount > 8)) + { + index += _State(Context, index, 0x14600 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14640 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14680 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + } + else if (Context->hardware->identity.streamCount > 1) + { + index += _State(Context, index, 0x00680 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x006A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + } + else + { + index += _State(Context, index, 0x0064C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x00650 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + } + index += _State(Context, index, 0x00644 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x00648 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0067C >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + + if (hasRobustness) + { + index += _State(Context, index, 0x146C0 >> 2, 0x00000000, 16, gcvFALSE, gcvTRUE); + index += _CLOSE_RANGE(); + index += _State(Context, index, 0x007F8 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _CLOSE_RANGE(); + } + + if (halti5) + { + index += _State(Context, index, 0x008B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x15600 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + else + { + /* This register is programed by all chips, which program all DECODE_SELECT as VS + ** except SAMPLER_DECODE_SELECT. + */ + index += _State(Context, index, 0x00860 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (hasICache) + { + /* I-Cache states. */ + index += _State(Context, index, 0x00868 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0086C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0304C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01028 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + + if (hasICachePrefetch) + { + if (halti5) + { + index += _State(Context, index, 0x15604 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x01094 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + } + else + { + index += _State(Context, index, 0x00890 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x0104C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + } + index += _CLOSE_RANGE(); + } + } + + /* Vertex Shader states. */ + index += _State(Context, index, 0x00804 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00808 >> 2, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:0) - (0 ? + 5:0) + 1))))))) << (0 ? + 5:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:0) - (0 ? 5:0) + 1))))))) << (0 ? 5:0))), 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0080C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00830 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (halti5) + { + index += _State(Context, index, 0x00898 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x008A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00870 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x008A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x008C0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x008E0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + } + else + { + index += _State(Context, index, 0x00810 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00820 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + } + + index += _CLOSE_RANGE(); + + /* GS */ + if (hasGS) + { + index += _State(Context, index, 0x01100 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01104 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01108 >> 2, 0x01000001, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0110C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01110 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01114 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x0111C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01140 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01144 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01148 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0114C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01154 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01120 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + + /* TCS & TES */ + + if (hasTS) + { + index += _State(Context, index, 0x007C0 >> 2, 0x00000003, 1, gcvFALSE, gcvFALSE); + + index += _State(Context, index, 0x14A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A40 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A00 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A08 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x14A10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A20 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A44 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14A4C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + index += _CLOSE_RANGE(); + + index += _State(Context, index, 0x14B18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B20 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B0C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x14B14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B40 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B24 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14B34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + index += _State(Context, index, 0x14B00 >> 2, 0x00040000, 1, gcvFALSE, gcvFALSE); + + } + + index += _CLOSE_RANGE(); + + /* TFB */ + if (gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_HW_TFB)) + { + index += _State(Context, index, 0x1C000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x1C008 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x1C040 >> 2) + (0 << 4), 0x00000000, 4, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x1C080 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x1C0C0 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x1C100 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x1C800 >> 2, 0x00000000, 128*4, gcvFALSE, gcvFALSE); + + index += _State(Context, index, 0x1C014 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _CLOSE_RANGE(); + + } + + /* Primitive Assembly states. */ + index += _State(Context, index, 0x00A00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00A04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A0C >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00A10 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A1C >> 2, 0x3F000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A28 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A30 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A38 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A3C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A80 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A84 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00A8C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A88 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (halti5) + { + index += _State(Context, index, 0x00AA8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00A90 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + } + else + { + index += _State(Context, index, 0x00A40 >> 2, 0x00000000, Context->hardware->identity.varyingsCount, gcvFALSE, gcvFALSE); + } + + index += _State(Context, index, 0x03A00 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03A04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (multiCoreBlockSetCfg2) + { + index += _State(Context, index, 0x03A0C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x03A10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + /* Setup states. */ + index += _State(Context, index, 0x00C00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00C04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00C08 >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00C0C >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00C10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00C14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00C18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00C1C >> 2, 0x42000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00C20 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + index += _State(Context, index, 0x00C24 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE); + + /* Raster states. */ + index += _State(Context, index, 0x00E00 >> 2, 0x000000F1, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00E10 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00E04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00E40 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00E08 >> 2, 0x17000031, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00E24 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00E20 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (halti2) + { + index += _State(Context, index, 0x00E0C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (halti5) + { + index += _State(Context, index, 0x00E34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + + /* Pixel Shader states. */ + index += _State(Context, index, 0x01004 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0100C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01010 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01030 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01034 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (halti2) + { + index += _State(Context, index, 0x01040 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE); + } + + if (numRT == 8) + { + index += _State(Context, index, 0x0102C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01038 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (halti4) + { + index += _State(Context, index, 0x01054 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (halti5) + { + index += _State(Context, index, 0x01080 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01058 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01098 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + + index += _CLOSE_RANGE(); + + /* Texture states. */ + if (hasTXdesc) + { + /* Texture descriptor states */ + index += _State(Context, index, 0x14C40 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + index += _State(Context, index, 0x16C00 >> 2, 0x00000000, numSamplers, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x16E00 >> 2, 0x00000000, numSamplers, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17000 >> 2, 0x00000000, numSamplers, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17200 >> 2, 0x00000000, numSamplers, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x17400 >> 2, 0x00000000, numSamplers, gcvFALSE, gcvFALSE); + + index += _State(Context, index, (0x15C00 >> 2) + (0 << 0), 0x00000000, numSamplers, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x15E00 >> 2, 0x00000000, numSamplers, gcvFALSE, gcvFALSE); + + index += _CLOSE_RANGE(); + + _StateMirror(Context, 0x16000 >> 2, numSamplers , 0x16C00 >> 2); + _StateMirror(Context, 0x16200 >> 2, numSamplers , 0x16E00 >> 2); + _StateMirror(Context, 0x16400 >> 2, numSamplers , 0x17000 >> 2); + _StateMirror(Context, 0x16600 >> 2, numSamplers , 0x17200 >> 2); + _StateMirror(Context, 0x16800 >> 2, numSamplers , 0x17400 >> 2); + _StateMirror(Context, 0x15800 >> 2, numSamplers , 0x15C00 >> 2); + _StateMirror(Context, 0x15A00 >> 2, numSamplers , 0x15E00 >> 2); + } + else + { + index += _State(Context, index, 0x02000 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x02040 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x02080 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x020C0 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x02100 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x02140 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x02180 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x021C0 >> 2, 0x00321000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x02200 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x02240 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE); + index += _State(Context, index, (0x02400 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02440 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02480 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x024C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02500 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02540 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02580 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x025C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02600 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02640 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02680 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x026C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02700 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x02740 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE); + index += _CLOSE_RANGE(); + + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TEXTURE_LINEAR)) + { + /* + * Linear stride LODn will overwrite LOD0 on GC880,GC2000. + * And only LOD0 is valid for this register. + */ + gctUINT count = halti1 ? 14 : 1; + + for (i = 0; i < 12; i += 1) + { + index += _State(Context, index, (0x02C00 >> 2) + i * 16, 0x00000000, count, gcvFALSE, gcvFALSE); + } + } + + if (halti1) + { + gctUINT texBlockCount; + gctUINT gcregTXLogSizeResetValue; + + /* Enable the integer filter pipe for all texture samplers + so that the floating point filter clock will shut off until + we start using the floating point filter. + */ + gcregTXLogSizeResetValue = ((((gctUINT32) (0x00000000)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 29:29) - (0 ? + 29:29) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 29:29) - (0 ? + 29:29) + 1))))))) << (0 ? + 29:29))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 29:29) - (0 ? + 29:29) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 29:29) - (0 ? 29:29) + 1))))))) << (0 ? 29:29))); + + /* New texture block. */ + index += _State(Context, index, 0x10000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10100 >> 2, gcregTXLogSizeResetValue, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10380 >> 2, 0x00321000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10400 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10480 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TX_FILTER)) + { + index += _State(Context, index, 0x12000 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x12400 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE); + } + + texBlockCount = ((512) >> (4)); + + for (i = 0; i < texBlockCount; i += 1) + { + index += _State(Context, index, (0x10800 >> 2) + (i << 4), 0x00000000, 14, gcvFALSE, gcvTRUE); + } + } + + if (gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_TEX_BASELOD)) + { + index += _State(Context, index, 0x10700 >> 2, 0x00000F00, 32, gcvFALSE, gcvFALSE); + } + + if (halti3 || + gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TX_SUPPORT_DEC)) + { + index += _State(Context, index, 0x10780 >> 2, 0x00030000, 32, gcvFALSE, gcvFALSE); + } + + if (halti4) + { + index += _State(Context, index, 0x11200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x11280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + } + + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TX_FRAC_PRECISION_6BIT)) + { + index += _State(Context, index, 0x11000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x11080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x11100 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x11180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x11300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + } + + /* ASTC */ + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TEXTURE_ASTC)) + { + index += _State(Context, index, 0x10500 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10580 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10600 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x10680 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE); + } + } + + if (halti3) + { + index += _State(Context, index, 0x14C00 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE); + } + + /* Thread walker states. */ + index += _State(Context, index, 0x00900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00908 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0090C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00910 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00914 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00918 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00924 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0091C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_SHADER_ENHANCEMENTS2)) + { + index += _State(Context, index, 0x00940 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00944 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00948 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0094C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00950 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00954 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (halti5) + { + index += _State(Context, index, 0x00958 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0095C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00960 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + index += _CLOSE_RANGE(); + + /* VS/PS Start/End PC register */ + if (halti5) + { + index += _State(Context, index, 0x00874 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x008BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0087C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01090 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + else if (hasICache) + { + /* New Shader instruction PC registers(20bit). */ + index += _State(Context, index, 0x00874 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00878 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0087C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00880 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + else + { + if (Context->hardware->identity.instructionCount <= 256) + { + /* old shader instruction PC registers (12bit)*/ + index += _State(Context, index, 0x00800 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + + index += _State(Context, index, 0x01000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01018 >> 2, 0x01000000, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + else + { + /* New Shader instruction PC registers (16bit) */ + index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + } + + + if (!hasICachePrefetch) + { + /* This unified one need SELECT bit to steer */ + if (Context->hardware->identity.instructionCount > 1024) + { + for (i = 0; + i < Context->hardware->identity.instructionCount << 2; + i += 256 << 2 + ) + { + index += _State(Context, index, (0x20000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + } + /* This unified one is steered by base adddress, it's automatical. */ + else if (Context->hardware->identity.instructionCount > 256) + { + /* VS instruction memory. */ + for (i = 0; + i < Context->hardware->identity.instructionCount << 2; + i += 256 << 2 + ) + { + index += _State(Context, index, (0x0C000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + + _StateMirror(Context, 0x08000 >> 2, Context->hardware->identity.instructionCount << 2 , 0x0C000 >> 2); + } + /* if (Context->hardware->identity.instructionCount <= 256). This is non-unified one. */ + else + { + index += _State(Context, index, 0x04000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + index += _State(Context, index, 0x06000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + } + + if (unifiedUniform) + { + gctINT numConstants = Context->hardware->identity.numConstants; + + /* Base Offset register */ + index += _State(Context, index, 0x01024 >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x00864 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + + for (i = 0; + numConstants > 0; + i += 256 << 2, + numConstants -= 256 + ) + { + if (halti5) + { + if (numConstants >= 256) + { + index += _State(Context, index, (0x36000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE); + } + else + { + index += _State(Context, index, (0x36000 >> 2) + i, 0x00000000, numConstants << 2, gcvFALSE, gcvFALSE); + } + index += _CLOSE_RANGE(); + } + else + { + if (numConstants >= 256) + { + index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE); + } + else + { + index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, numConstants << 2, gcvFALSE, gcvFALSE); + } + + index += _CLOSE_RANGE(); + } + } + + if (halti5) + { + _StateMirror(Context, 0x34000 >> 2, Context->hardware->identity.numConstants << 2 , 0x36000 >> 2); + } + } +#if gcdENABLE_UNIFIED_CONSTANT + else +#endif + { + index += _State(Context, index, 0x05000 >> 2, 0x00000000, vertexUniforms * 4, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x07000 >> 2, 0x00000000, fragmentUniforms * 4, gcvFALSE, gcvFALSE); + } + + if (halti1) + { + index += _State(Context, index, 0x00884 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (halti5) + { + index += _State(Context, index, 0x008B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + /* Store the index of the "XD" entry. */ + Context->entryOffsetXDFrom3D = (gctUINT)index * gcmSIZEOF(gctUINT32); + + + /* Pixel Engine states. */ + index += _State(Context, index, 0x01400 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01404 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01408 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0140C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01414 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01418 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0141C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01420 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01424 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01428 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x0142C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01434 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01454 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01458 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x014A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x014A8 >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x014AC >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE); + + if(gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_HALF_FLOAT_PIPE) ) + { + index += _State(Context, index, 0x014B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x014B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + index += _State(Context, index, 0x014A4 >> 2, 0x000E400C, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01580 >> 2, 0x00000000, 3, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x014B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + + if (halti3) + { + index += _State(Context, index, 0x0103C >> 2, 0x76543210, 1, gcvFALSE, gcvFALSE); + } + + index += _State(Context, index, (0x01460 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE); + + if (Context->hardware->identity.pixelPipes == 1) + { + index += _State(Context, index, 0x01430 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x01410 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + } + + if (Context->hardware->identity.pixelPipes > 1 || halti0) + { + index += _State(Context, index, (0x01480 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE); + } + + for (i = 0; i < 3; i++) + { + index += _State(Context, index, (0x01500 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE); + } + + if (numRT == 8) + { + for (i = 0; i < 7; i++) + { + index += _State(Context, index, (0x14800 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE); + } + index += _State(Context, index, 0x14900 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE); + } + + + if (halti3) + { + index += _State(Context, index, 0x014BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (halti4) + { + index += _State(Context, index, 0x014C0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + } + + if (hasGS) + { + index += _State(Context, index, 0x038A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + } + + if (halti5) + { + index += _State(Context, index, 0x14920 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14940 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14960 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x14980 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x149A0 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE); + } + + if (hasRobustness) + { + index += _State(Context, index, 0x149C0 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x014C4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + } + + /* Memory Controller */ + index += _State(Context, index, 0x01654 >> 2, 0x00200000, 1, gcvFALSE, gcvFALSE); + + index += _CLOSE_RANGE(); + index += _State(Context, index, 0x01658 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x0165C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x01660 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01664 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x01668 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x0166C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x016A4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x016AC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x016A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01720 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x01740 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE); + index += _State(Context, index, 0x01760 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + + + if (halti2) + { + index += _State(Context, index, 0x01780 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE); + index += _State(Context, index, 0x016BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE); + index += _State(Context, index, (0x017A0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE); + index += _State(Context, index, (0x017C0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x017E0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE); + index += _State(Context, index, (0x01A00 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE); + index += _State(Context, index, (0x01A20 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE); + index += _State(Context, index, (0x01A40 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE); + } + + index += _CLOSE_RANGE(); + + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_BUG_FIXES18)) + { + index += _State(Context, index, 0x03860 >> 2, 0x6, 1, gcvFALSE, gcvFALSE); + index += _CLOSE_RANGE(); + } + + if (halti3) + { + index += _State(Context, index, 0x01A80 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE); + index += _CLOSE_RANGE(); + } + + if (hasSecurity || hasRobustness) + { + index += _State(Context, index, 0x001AC >> 2, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))), 1, gcvFALSE, gcvFALSE); + } + + /* Semaphore/stall. */ + index += _SemaphoreStall(Context, index); +#endif + + /**************************************************************************/ + /* Link to another address. ***********************************************/ + + Context->linkIndex3D = (gctUINT)index; + + if (buffer != gcvNULL) + { + buffer[index + 0] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + buffer[index + 1] + = 0; + } + + index += 2; + + /* Store the end of the context buffer. */ + Context->bufferSize = index * gcmSIZEOF(gctUINT32); + + + /**************************************************************************/ + /* Pipe switch for the case where neither 2D nor 3D are used. *************/ + + /* Store the 3D entry index. */ + Context->entryOffsetXDFrom2D = (gctUINT)index * gcmSIZEOF(gctUINT32); + + /* Switch to 3D pipe. */ + index += _SwitchPipe(Context, index, gcvPIPE_3D); + + /* Store the location of the link. */ + Context->linkIndexXD = (gctUINT)index; + + if (buffer != gcvNULL) + { + buffer[index + 0] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + buffer[index + 1] + = 0; + } + + index += 2; + + + /**************************************************************************/ + /* Save size for buffer. **************************************************/ + + Context->totalSize = index * gcmSIZEOF(gctUINT32); + +#if gcdENABLE_3D + psConstBase = psConstBase; + vsConstBase = vsConstBase; + constMax = constMax; +#endif + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} +#endif + +static gceSTATUS +_DestroyContext( + IN gckCONTEXT Context + ) +{ + gceSTATUS status = gcvSTATUS_OK; + + if (Context != gcvNULL) + { + gcsCONTEXT_PTR bufferHead; + + /* Free context buffers. */ + for (bufferHead = Context->buffer; Context->buffer != gcvNULL;) + { + /* Get a shortcut to the current buffer. */ + gcsCONTEXT_PTR buffer = Context->buffer; + + /* Get the next buffer. */ + gcsCONTEXT_PTR next = buffer->next; + + /* Last item? */ + if (next == bufferHead) + { + next = gcvNULL; + } + + /* Destroy the signal. */ + if (buffer->signal != gcvNULL) + { + gcmkONERROR(gckOS_DestroySignal( + Context->os, buffer->signal + )); + + buffer->signal = gcvNULL; + } + + /* Free state delta map. */ + if (buffer->logical != gcvNULL) + { + if (Context->hardware->kernel->virtualCommandBuffer) + { + gcmkONERROR(gckEVENT_DestroyVirtualCommandBuffer( + Context->hardware->kernel->eventObj, + Context->totalSize, + buffer->physical, + buffer->logical, + gcvKERNEL_PIXEL + )); + } + else + { + gcmkONERROR(gckEVENT_FreeContiguousMemory( + Context->hardware->kernel->eventObj, + Context->totalSize, + buffer->physical, + buffer->logical, + gcvKERNEL_PIXEL + )); + } + + buffer->logical = gcvNULL; + } + + /* Free context buffer. */ + gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, buffer)); + + /* Remove from the list. */ + Context->buffer = next; + } + +#if gcdSECURE_USER + /* Free the hint array. */ + if (Context->hint != gcvNULL) + { + gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->hint)); + } +#endif + + /* Mark the gckCONTEXT object as unknown. */ + Context->object.type = gcvOBJ_UNKNOWN; + + /* Free the gckCONTEXT object. */ + gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context)); + } + +OnError: + return status; +} + +#if (gcdENABLE_3D) +static gceSTATUS +_AllocateContextBuffer( + IN gckCONTEXT Context, + IN gcsCONTEXT_PTR Buffer + ) +{ + gceSTATUS status; + gctPOINTER pointer; + gctUINT32 address; + gctSIZE_T totalSize = Context->totalSize; + + if (Context->hardware->kernel->virtualCommandBuffer) + { + gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer( + Context->hardware->kernel, + gcvFALSE, + &totalSize, + &Buffer->physical, + &pointer + )); + + gcmkONERROR(gckKERNEL_GetGPUAddress( + Context->hardware->kernel, + pointer, + gcvFALSE, + Buffer->physical, + &address + )); + } + else + { + gctUINT32 allocFlag; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag = gcvALLOC_FLAG_CACHEABLE | gcvALLOC_FLAG_CONTIGUOUS; +#else + allocFlag = gcvALLOC_FLAG_CONTIGUOUS; +#endif + gcmkONERROR(gckOS_AllocateNonPagedMemory( + Context->os, + gcvFALSE, + allocFlag, + &totalSize, + &Buffer->physical, + &pointer + )); + + gcmkONERROR(gckHARDWARE_ConvertLogical( + Context->hardware, + pointer, + gcvFALSE, + &address + )); + } + + Buffer->logical = pointer; + Buffer->address = address; + + return gcvSTATUS_OK; + +OnError: + return status; +} +#endif + +/******************************************************************************\ +**************************** Context Management API **************************** +\******************************************************************************/ + +/******************************************************************************\ +** +** gckCONTEXT_Construct +** +** Construct a new gckCONTEXT object. +** +** INPUT: +** +** gckOS Os +** Pointer to gckOS object. +** +** gctUINT32 ProcessID +** Current process ID. +** +** gckHARDWARE Hardware +** Pointer to gckHARDWARE object. +** +** OUTPUT: +** +** gckCONTEXT * Context +** Pointer to a variable thet will receive the gckCONTEXT object +** pointer. +*/ +#if (gcdENABLE_3D) +gceSTATUS +gckCONTEXT_Construct( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gctUINT32 ProcessID, + OUT gckCONTEXT * Context + ) +{ + gceSTATUS status; + gckCONTEXT context = gcvNULL; + gctUINT32 allocationSize; + gctUINT i; + gctPOINTER pointer = gcvNULL; + + gcmkHEADER_ARG("Os=0x%08X Hardware=0x%08X", Os, Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Context != gcvNULL); + + + /**************************************************************************/ + /* Allocate and initialize basic fields of gckCONTEXT. ********************/ + + /* The context object size. */ + allocationSize = gcmSIZEOF(struct _gckCONTEXT); + + /* Allocate the object. */ + gcmkONERROR(gckOS_Allocate( + Os, allocationSize, &pointer + )); + + context = pointer; + + /* Reset the entire object. */ + gcmkONERROR(gckOS_ZeroMemory(context, allocationSize)); + + /* Initialize the gckCONTEXT object. */ + context->object.type = gcvOBJ_CONTEXT; + context->os = Os; + context->hardware = Hardware; + + +#if !gcdENABLE_3D + context->entryPipe = gcvPIPE_2D; + context->exitPipe = gcvPIPE_2D; +#elif gcdCMD_NO_2D_CONTEXT + context->entryPipe = gcvPIPE_3D; + context->exitPipe = gcvPIPE_3D; +#else + context->entryPipe + = (((((gctUINT32) (context->hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ) + ? gcvPIPE_2D + : gcvPIPE_3D; + context->exitPipe = gcvPIPE_3D; +#endif + + /* Get the command buffer requirements. */ + gcmkONERROR(gckHARDWARE_QueryCommandBuffer( + Hardware, + gcvENGINE_RENDER, + &context->alignment, + &context->reservedHead, + gcvNULL + )); + + /**************************************************************************/ + /* Get the size of the context buffer. ************************************/ + + gcmkONERROR(_InitializeContextBuffer(context)); + + if (context->maxState > 0) + { + /**************************************************************************/ + /* Allocate and reset the state mapping table. ****************************/ + if (context->hardware->kernel->command->stateMap == gcvNULL) + { + /* Allocate the state mapping table. */ + gcmkONERROR(gckOS_Allocate( + Os, + gcmSIZEOF(gcsSTATE_MAP) * context->maxState, + &pointer + )); + + context->map = pointer; + + /* Zero the state mapping table. */ + gcmkONERROR(gckOS_ZeroMemory( + context->map, gcmSIZEOF(gcsSTATE_MAP) * context->maxState + )); + + context->hardware->kernel->command->stateMap = pointer; + } + else + { + context->map = context->hardware->kernel->command->stateMap; + } + + /**************************************************************************/ + /* Allocate the hint array. ***********************************************/ + +#if gcdSECURE_USER + /* Allocate hints. */ + gcmkONERROR(gckOS_Allocate( + Os, + gcmSIZEOF(gctBOOL) * context->maxState, + &pointer + )); + + context->hint = pointer; +#endif + } + + /**************************************************************************/ + /* Allocate the context and state delta buffers. **************************/ + + for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i += 1) + { + /* Allocate a context buffer. */ + gcsCONTEXT_PTR buffer; + + /* Allocate the context buffer structure. */ + gcmkONERROR(gckOS_Allocate( + Os, + gcmSIZEOF(gcsCONTEXT), + &pointer + )); + + buffer = pointer; + + /* Reset the context buffer structure. */ + gcmkVERIFY_OK(gckOS_ZeroMemory( + buffer, gcmSIZEOF(gcsCONTEXT) + )); + + /* Append to the list. */ + if (context->buffer == gcvNULL) + { + buffer->next = buffer; + context->buffer = buffer; + } + else + { + buffer->next = context->buffer->next; + context->buffer->next = buffer; + } + + /* Set the number of delta in the order of creation. */ +#if gcmIS_DEBUG(gcdDEBUG_CODE) + buffer->num = i; +#endif + + /* Create the busy signal. */ + gcmkONERROR(gckOS_CreateSignal( + Os, gcvFALSE, &buffer->signal + )); + + /* Set the signal, buffer is currently not busy. */ + gcmkONERROR(gckOS_Signal( + Os, buffer->signal, gcvTRUE + )); + + /* Create a new physical context buffer. */ + gcmkONERROR(_AllocateContextBuffer( + context, buffer + )); + + /* Set gckEVENT object pointer. */ + buffer->eventObj = Hardware->kernel->eventObj; + + /* Set the pointers to the LINK commands. */ + if (context->linkIndex2D != 0) + { + buffer->link2D = &buffer->logical[context->linkIndex2D]; + } + + if (context->linkIndex3D != 0) + { + buffer->link3D = &buffer->logical[context->linkIndex3D]; + } + + if (context->linkIndexXD != 0) + { + gctPOINTER xdLink; + gctUINT32 xdEntryAddress; + gctUINT32 xdEntrySize; + gctUINT32 linkBytes; + + /* Determine LINK parameters. */ + xdLink + = &buffer->logical[context->linkIndexXD]; + + xdEntryAddress + = buffer->address + + context->entryOffsetXDFrom3D; + + xdEntrySize + = context->bufferSize + - context->entryOffsetXDFrom3D; + + /* Query LINK size. */ + gcmkONERROR(gckHARDWARE_Link( + Hardware, gcvNULL, 0, 0, &linkBytes, gcvNULL, gcvNULL + )); + + /* Generate a LINK. */ + gcmkONERROR(gckHARDWARE_Link( + Hardware, + xdLink, + xdEntryAddress, + xdEntrySize, + &linkBytes, + gcvNULL, + gcvNULL + )); + } + } + + + /**************************************************************************/ + /* Initialize the context buffers. ****************************************/ + + /* Initialize the current context buffer. */ + gcmkONERROR(_InitializeContextBuffer(context)); + + /* Make all created contexts equal. */ + { + gcsCONTEXT_PTR currContext, tempContext; + + /* Set the current context buffer. */ + currContext = context->buffer; + + /* Get the next context buffer. */ + tempContext = currContext->next; + + /* Loop through all buffers. */ + while (tempContext != currContext) + { + if (tempContext == gcvNULL) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + /* Copy the current context. */ + gckOS_MemCopy( + tempContext->logical, + currContext->logical, + context->totalSize + ); + + /* Get the next context buffer. */ + tempContext = tempContext->next; + } + } + + /* Return pointer to the gckCONTEXT object. */ + *Context = context; + + /* Success. */ + gcmkFOOTER_ARG("*Context=0x%08X", *Context); + return gcvSTATUS_OK; + +OnError: + /* Roll back on error. */ + gcmkVERIFY_OK(_DestroyContext(context)); + + /* Return the status. */ + gcmkFOOTER(); + return status; +} +#endif + +/******************************************************************************\ +** +** gckCONTEXT_Destroy +** +** Destroy a gckCONTEXT object. +** +** INPUT: +** +** gckCONTEXT Context +** Pointer to an gckCONTEXT object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCONTEXT_Destroy( + IN gckCONTEXT Context + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Context=0x%08X", Context); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT); + + /* Destroy the context and all related objects. */ + status = _DestroyContext(Context); + + /* Success. */ + gcmkFOOTER_NO(); + return status; +} + +/******************************************************************************\ +** +** gckCONTEXT_Update +** +** Merge all pending state delta buffers into the current context buffer. +** +** INPUT: +** +** gckCONTEXT Context +** Pointer to an gckCONTEXT object. +** +** gctUINT32 ProcessID +** Current process ID. +** +** gcsSTATE_DELTA_PTR StateDelta +** Pointer to the state delta. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCONTEXT_Update( + IN gckCONTEXT Context, + IN gctUINT32 ProcessID, + IN gcsSTATE_DELTA_PTR StateDelta + ) +{ +#if gcdENABLE_3D + gceSTATUS status = gcvSTATUS_OK; + gcsSTATE_DELTA _stateDelta; + gckKERNEL kernel; + gcsCONTEXT_PTR buffer; + gcsSTATE_MAP_PTR map; + gctBOOL needCopy = gcvFALSE; + gcsSTATE_DELTA_PTR nDelta; + gcsSTATE_DELTA_PTR uDelta = gcvNULL; + gcsSTATE_DELTA_PTR kDelta = gcvNULL; + gcsSTATE_DELTA_RECORD_PTR record; + gcsSTATE_DELTA_RECORD_PTR recordArray = gcvNULL; + gctUINT elementCount; + gctUINT address; + gctUINT32 mask; + gctUINT32 data; + gctUINT index; + gctUINT i, j; + gctUINT32 dirtyRecordArraySize = 0; + +#if gcdSECURE_USER + gcskSECURE_CACHE_PTR cache; +#endif + + gcmkHEADER_ARG( + "Context=0x%08X ProcessID=%d StateDelta=0x%08X", + Context, ProcessID, StateDelta + ); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT); + + /* Get a shortcut to the kernel object. */ + kernel = Context->hardware->kernel; + + /* Check wehther we need to copy the structures or not. */ + gcmkONERROR(gckOS_QueryNeedCopy(Context->os, ProcessID, &needCopy)); + + /* Get the current context buffer. */ + buffer = Context->buffer; + + /* Wait until the context buffer becomes available; this will + also reset the signal and mark the buffer as busy. */ + gcmkONERROR(gckOS_WaitSignal( + Context->os, buffer->signal, gcvFALSE, gcvINFINITE + )); + +#if gcdSECURE_USER + /* Get the cache form the database. */ + gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache)); +#endif + +#if gcmIS_DEBUG(gcdDEBUG_CODE) && 1 && gcdENABLE_3D + /* Update current context token. */ + buffer->logical[Context->map[0x0E14].index] + = (gctUINT32)gcmPTR2INT32(Context); +#endif + + /* Are there any pending deltas? */ + if (buffer->deltaCount != 0) + { + /* Get the state map. */ + map = Context->map; + + /* Get the first delta item. */ + uDelta = buffer->delta; + + /* Reset the vertex stream count. */ + elementCount = 0; + + /* Merge all pending deltas. */ + for (i = 0; i < buffer->deltaCount; i += 1) + { + /* Get access to the state delta. */ + gcmkONERROR(gckKERNEL_OpenUserData( + kernel, needCopy, + &_stateDelta, + uDelta, gcmSIZEOF(gcsSTATE_DELTA), + (gctPOINTER *) &kDelta + )); + + dirtyRecordArraySize + = gcmSIZEOF(gcsSTATE_DELTA_RECORD) * kDelta->recordCount; + + if (dirtyRecordArraySize) + { + /* Get access to the state records. */ + gcmkONERROR(gckOS_MapUserPointer( + kernel->os, + gcmUINT64_TO_PTR(kDelta->recordArray), + dirtyRecordArraySize, + (gctPOINTER *) &recordArray + )); + } + + /* Merge all pending states. */ + for (j = 0; j < kDelta->recordCount; j += 1) + { + if (j >= Context->numStates) + { + break; + } + + /* Get the current state record. */ + record = &recordArray[j]; + + /* Get the state address. */ + gcmkONERROR(gckOS_ReadMappedPointer(kernel->os, &record->address, &address)); + + /* Make sure the state is a part of the mapping table. */ + if (address >= Context->maxState) + { + gcmkTRACE( + gcvLEVEL_ERROR, + "%s(%d): State 0x%04X (0x%04X) is not mapped.\n", + __FUNCTION__, __LINE__, + address, address << 2 + ); + + continue; + } + + /* Get the state index. */ + index = map[address].index; + + /* Skip the state if not mapped. */ + if (index == 0) + { + continue; + } + + /* Get the data mask. */ + gcmkONERROR(gckOS_ReadMappedPointer(kernel->os, &record->mask, &mask)); + + /* Get the new data value. */ + gcmkONERROR(gckOS_ReadMappedPointer(kernel->os, &record->data, &data)); + + /* Masked states that are being completly reset or regular states. */ + if ((mask == 0) || (mask == ~0U)) + { + /* Process special states. */ + if (address == 0x0595) + { + /* Force auto-disable to be disabled. */ + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 13:13) - (0 ? + 13:13) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 13:13) - (0 ? + 13:13) + 1))))))) << (0 ? + 13:13))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 13:13) - (0 ? + 13:13) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13))); + } + +#if gcdSECURE_USER + /* Do we need to convert the logical address? */ + if (Context->hint[address]) + { + /* Map handle into physical address. */ + gcmkONERROR(gckKERNEL_MapLogicalToPhysical( + kernel, cache, (gctPOINTER) &data + )); + } +#endif + + /* Set new data. */ + buffer->logical[index] = data; + } + + /* Masked states that are being set partially. */ + else + { + buffer->logical[index] + = (~mask & buffer->logical[index]) + | (mask & data); + } + } + + /* Get the element count. */ + if (kDelta->elementCount != 0) + { + elementCount = kDelta->elementCount; + } + + /* Dereference delta. */ + kDelta->refCount -= 1; + gcmkASSERT(kDelta->refCount >= 0); + + /* Get the next state delta. */ + nDelta = gcmUINT64_TO_PTR(kDelta->next); + + if (dirtyRecordArraySize) + { + /* Get access to the state records. */ + gcmkONERROR(gckOS_UnmapUserPointer( + kernel->os, + gcmUINT64_TO_PTR(kDelta->recordArray), + dirtyRecordArraySize, + recordArray + )); + + recordArray = gcvNULL; + } + + /* Close access to the current state delta. */ + gcmkONERROR(gckKERNEL_CloseUserData( + kernel, needCopy, + gcvTRUE, + uDelta, gcmSIZEOF(gcsSTATE_DELTA), + (gctPOINTER *) &kDelta + )); + + /* Update the user delta pointer. */ + uDelta = nDelta; + } + + /* Hardware disables all input attribute when the attribute 0 is programmed, + it then reenables those attributes that were explicitely programmed by + the software. Because of this we cannot program the entire array of + values, otherwise we'll get all attributes reenabled, but rather program + only those that are actully needed by the software. + elementCount = attribCount + 1 to make sure 0 is a flag to indicate if UMD + touches it. + */ + if (elementCount != 0) + { + gctUINT base; + gctUINT nopCount; + gctUINT32_PTR nop; + gctUINT fe2vsCount; + gctUINT attribCount = elementCount -1; + gctUINT32 feAttributeStatgeAddr = 0x0180; + if (gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_HALTI5)) + { + fe2vsCount = 32; + base = map[0x5E00].index; + feAttributeStatgeAddr = 0x5E00; + } + else if (gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_HALTI0)) + { + fe2vsCount = 16; + base = map[0x0180].index; + } + else + { + fe2vsCount = 12; + base = map[0x0180].index; + } + + /* Set the proper state count. */ + if (attribCount == 0) + { + gcmkASSERT(gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_ZERO_ATTRIB_SUPPORT)); + + buffer->logical[base - 1] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 26:26) - (0 ? + 26:26) + 1))))))) << (0 ? + 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (feAttributeStatgeAddr) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + /* Set the proper state count. */ + buffer->logical[base + 1] = + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 26:26) - (0 ? + 26:26) + 1))))))) << (0 ? + 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x01F2) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + buffer->logical[base + 2] = 0x1; + attribCount = 3; + } + else + { + buffer->logical[base - 1] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 26:26) - (0 ? + 26:26) + 1))))))) << (0 ? + 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 26:26) - (0 ? + 26:26) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (attribCount) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (feAttributeStatgeAddr) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + } + + /* Determine the number of NOP commands. */ + nopCount = (fe2vsCount / 2) - (attribCount / 2); + /* Determine the location of the first NOP. */ + nop = &buffer->logical[base + (attribCount | 1)]; + + /* Fill the unused space with NOPs. */ + for (i = 0; i < nopCount; i += 1) + { + if (nop >= buffer->logical + Context->totalSize) + { + break; + } + + /* Generate a NOP command. */ + *nop = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + /* Advance. */ + nop += 2; + } + } + /* Reset pending deltas. */ + buffer->deltaCount = 0; + buffer->delta = gcvNULL; + } + + if (StateDelta) + { + /* Set state delta user pointer. */ + uDelta = StateDelta; + + /* Get access to the state delta. */ + gcmkONERROR(gckKERNEL_OpenUserData( + kernel, needCopy, + &_stateDelta, + uDelta, gcmSIZEOF(gcsSTATE_DELTA), + (gctPOINTER *) &kDelta + )); + + /* State delta cannot be attached to anything yet. */ + if (kDelta->refCount != 0) + { + gcmkTRACE( + gcvLEVEL_ERROR, + "%s(%d): kDelta->refCount = %d (has to be 0).\n", + __FUNCTION__, __LINE__, + kDelta->refCount + ); + } + + /* Attach to all contexts. */ + buffer = Context->buffer; + + do + { + /* Attach to the context if nothing is attached yet. If a delta + is allready attached, all we need to do is to increment + the number of deltas in the context. */ + if (buffer->delta == gcvNULL) + { + buffer->delta = uDelta; + } + + /* Update reference count. */ + kDelta->refCount += 1; + + /* Update counters. */ + buffer->deltaCount += 1; + + /* Get the next context buffer. */ + buffer = buffer->next; + + if (buffer == gcvNULL) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + } + while (Context->buffer != buffer); + + /* Close access to the current state delta. */ + gcmkONERROR(gckKERNEL_CloseUserData( + kernel, needCopy, + gcvTRUE, + uDelta, gcmSIZEOF(gcsSTATE_DELTA), + (gctPOINTER *) &kDelta + )); + + } + /* Schedule an event to mark the context buffer as available. */ + gcmkONERROR(gckEVENT_Signal( + buffer->eventObj, buffer->signal, gcvKERNEL_PIXEL + )); + + /* Advance to the next context buffer. */ + Context->buffer = buffer->next; + + /* Return the status. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + /* Get access to the state records. */ + if (kDelta != gcvNULL && recordArray != gcvNULL) + { + gcmkVERIFY_OK(gckOS_UnmapUserPointer( + kernel->os, + gcmUINT64_TO_PTR(kDelta->recordArray), + dirtyRecordArraySize, + (gctPOINTER *) &recordArray + )); + } + + /* Close access to the current state delta. */ + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + kernel, needCopy, + gcvTRUE, + uDelta, gcmSIZEOF(gcsSTATE_DELTA), + (gctPOINTER *) &kDelta + )); + + /* Return the status. */ + gcmkFOOTER(); + return status; +#else + return gcvSTATUS_OK; +#endif +} + +gceSTATUS +gckCONTEXT_MapBuffer( + IN gckCONTEXT Context, + OUT gctUINT32 *Physicals, + OUT gctUINT64 *Logicals, + OUT gctUINT32 *Bytes + ) +{ + gceSTATUS status; + int i = 0; + gctSIZE_T pageCount; + gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer; + gckKERNEL kernel = Context->hardware->kernel; + gctPOINTER logical; + gctPHYS_ADDR physical; + + gcsCONTEXT_PTR buffer; + + gcmkHEADER(); + + gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT); + + buffer = Context->buffer; + + for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i++) + { + if (kernel->virtualCommandBuffer) + { + commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)buffer->physical; + physical = commandBuffer->virtualBuffer.physical; + + gcmkONERROR(gckOS_CreateUserVirtualMapping( + kernel->os, + physical, + Context->totalSize, + &logical, + &pageCount)); + } + else + { + physical = buffer->physical; + + gcmkONERROR(gckOS_MapMemory( + kernel->os, + physical, + Context->totalSize, + &logical)); + } + + Physicals[i] = gcmPTR_TO_NAME(physical); + + Logicals[i] = gcmPTR_TO_UINT64(logical); + + buffer = buffer->next; + } + + *Bytes = (gctUINT)Context->totalSize; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + diff --git a/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h new file mode 100644 index 000000000000..7582f8e9224b --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h @@ -0,0 +1,194 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_context_h_ +#define __gc_hal_kernel_context_h_ + +#include "gc_hal_kernel_buffer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Maps state locations within the context buffer. */ +typedef struct _gcsSTATE_MAP * gcsSTATE_MAP_PTR; +typedef struct _gcsSTATE_MAP +{ + /* Index of the state in the context buffer. */ + gctUINT index; + + /* State mask. */ + gctUINT32 mask; +} +gcsSTATE_MAP; + +/* Context buffer. */ +typedef struct _gcsCONTEXT * gcsCONTEXT_PTR; +typedef struct _gcsCONTEXT +{ + /* For debugging: the number of context buffer in the order of creation. */ + gctUINT num; + + /* Pointer to gckEVENT object. */ + gckEVENT eventObj; + + /* Context busy signal. */ + gctSIGNAL signal; + + /* Physical address of the context buffer. */ + gctPHYS_ADDR physical; + + /* Logical address of the context buffer. */ + gctUINT32_PTR logical; + + /* Hardware address of the context buffer. */ + gctUINT32 address; + + /* Pointer to the LINK commands. */ + gctPOINTER link2D; + gctPOINTER link3D; + + /* The number of pending state deltas. */ + gctUINT deltaCount; + + /* Pointer to the first delta to be applied. */ + gcsSTATE_DELTA_PTR delta; + + /* Next context buffer. */ + gcsCONTEXT_PTR next; +} +gcsCONTEXT; + +typedef struct _gcsRECORD_ARRAY_MAP * gcsRECORD_ARRAY_MAP_PTR; +struct _gcsRECORD_ARRAY_MAP +{ + /* User pointer key. */ + gctUINT64 key; + + /* Kernel memory buffer. */ + gcsSTATE_DELTA_RECORD_PTR kData; + + /* Next map. */ + gcsRECORD_ARRAY_MAP_PTR next; + +}; + +#define USE_SW_RESET 1 + +/* gckCONTEXT structure that hold the current context. */ +struct _gckCONTEXT +{ + /* Object. */ + gcsOBJECT object; + + /* Pointer to gckOS object. */ + gckOS os; + + /* Pointer to gckHARDWARE object. */ + gckHARDWARE hardware; + + /* Command buffer alignment. */ + gctUINT32 alignment; + gctUINT32 reservedHead; + + /* Context buffer metrics. */ + gctSIZE_T maxState; + gctUINT32 numStates; + gctUINT32 totalSize; + gctUINT32 bufferSize; + gctUINT32 linkIndex2D; + gctUINT32 linkIndex3D; + gctUINT32 linkIndexXD; + gctUINT32 entryOffset3D; + gctUINT32 entryOffsetXDFrom2D; + gctUINT32 entryOffsetXDFrom3D; + + /* State mapping. */ + gcsSTATE_MAP_PTR map; + + /* List of context buffers. */ + gcsCONTEXT_PTR buffer; + + /* Requested pipe select for context. */ + gcePIPE_SELECT entryPipe; + gcePIPE_SELECT exitPipe; + + /* Variables used for building state buffer. */ + gctUINT32 lastAddress; + gctSIZE_T lastSize; + gctUINT32 lastIndex; + gctBOOL lastFixed; + + gctUINT32 pipeSelectBytes; + + /* Hint array. */ +#if gcdSECURE_USER + gctBOOL_PTR hint; +#endif + + gcsPROFILER_COUNTERS_PART1 latestProfiler_part1; + gcsPROFILER_COUNTERS_PART1 histroyProfiler_part1; + gcsPROFILER_COUNTERS_PART1 preProfiler_part1; + gcsPROFILER_COUNTERS_PART2 latestProfiler_part2; + gcsPROFILER_COUNTERS_PART2 histroyProfiler_part2; + gcsPROFILER_COUNTERS_PART2 preProfiler_part2; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_kernel_context_h_ */ + diff --git a/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c new file mode 100644 index 000000000000..1cb28beb0640 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c @@ -0,0 +1,18398 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal.h" +#include "gc_hal_kernel.h" +#include "gc_hal_kernel_context.h" + +#include "gc_feature_database.h" + +#define _GC_OBJ_ZONE gcvZONE_HARDWARE + +#define gcmSEMAPHORESTALL(buffer) \ + do \ + { \ + /* Arm the PE-FE Semaphore. */ \ + *buffer++ \ + = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, 1) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, 0x0E02); \ + \ + *buffer++ \ + = gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END) \ + | gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE);\ + \ + /* STALL FE until PE is done flushing. */ \ + *buffer++ \ + = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \ + \ + *buffer++ \ + = gcmSETFIELDVALUE(0, STALL_STALL, SOURCE, FRONT_END) \ + | gcmSETFIELDVALUE(0, STALL_STALL, DESTINATION, PIXEL_ENGINE); \ + } while(0) + +typedef struct _gcsiDEBUG_REGISTERS * gcsiDEBUG_REGISTERS_PTR; +typedef struct _gcsiDEBUG_REGISTERS +{ + gctSTRING module; + gctUINT index; + gctUINT shift; + gctUINT data; + gctUINT count; + gctUINT32 pipeMask; + gctUINT32 selectStart; +} +gcsiDEBUG_REGISTERS; + +typedef struct _gcsFE_STACK +{ + gctSTRING name; + gctINT count; + gctUINT32 highSelect; + gctUINT32 lowSelect; + gctUINT32 linkSelect; + gctUINT32 clear; + gctUINT32 next; +} +gcsFE_STACK; + +/******************************************************************************\ +********************************* Support Code ********************************* +\******************************************************************************/ +static gctBOOL +_IsHardwareMatch( + IN gckHARDWARE Hardware, + IN gctINT32 ChipModel, + IN gctUINT32 ChipRevision + ) +{ + return ((Hardware->identity.chipModel == ChipModel) && + (Hardware->identity.chipRevision == ChipRevision)); +} + +static gceSTATUS +_ResetGPU( + IN gckHARDWARE Hardware, + IN gckOS Os, + IN gceCORE Core + ); + +static void +_GetEcoID( + IN gckHARDWARE Hardware, + IN OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity + ) +{ + gcmkVERIFY_OK(gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x000E8, + &Identity->ecoID + )); + + if (_IsHardwareMatch(Hardware, 0x1000, 0x5037) && (Identity->chipDate == 0x20120617)) + { + Identity->ecoID = 1; + } + + if (_IsHardwareMatch(Hardware, 0x320, 0x5303) && (Identity->chipDate == 0x20140511)) + { + Identity->ecoID = 1; + } + +} + +static gceSTATUS +_IdentifyHardwareByDatabase( + IN gckHARDWARE Hardware, + IN gckOS Os, + IN gceCORE Core, + OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity + ) +{ + gceSTATUS status; + gctUINT32 chipIdentity; + gctUINT32 debugControl0; + gctUINT32 chipInfo; + gcsFEATURE_DATABASE *database; + + gcmkHEADER_ARG("Os=0x%x", Os); + + /* Get chip date. */ + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00028, &Identity->chipDate)); + + /*************************************************************************** + ** Get chip ID and revision. + */ + + /* Read chip identity register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00018, + &chipIdentity)); + + /* Special case for older graphic cores. */ + if (((((gctUINT32) (chipIdentity)) >> (0 ? + 31:24) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1)))))))) + { + Identity->chipModel = gcv500; + Identity->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) ); + } + + else + { + /* Read chip identity register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00020, + (gctUINT32_PTR) &Identity->chipModel)); + + if (((Identity->chipModel & 0xFF00) == 0x0400) + && (Identity->chipModel != 0x0420) + && (Identity->chipModel != 0x0428)) + { + Identity->chipModel = (gceCHIPMODEL) (Identity->chipModel & 0x0400); + } + + /* Read CHIP_REV register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00024, + &Identity->chipRevision)); + + if ((Identity->chipModel == gcv300) + && (Identity->chipRevision == 0x2201) + ) + { + gctUINT32 chipDate; + gctUINT32 chipTime; + + /* Read date and time registers. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00028, + &chipDate)); + + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x0002C, + &chipTime)); + + if ((chipDate == 0x20080814) && (chipTime == 0x12051100)) + { + /* This IP has an ECO; put the correct revision in it. */ + Identity->chipRevision = 0x1051; + } + } + + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x000A8, + &Identity->productID)); + } + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Identity: chipModel=%X", + Identity->chipModel); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Identity: chipRevision=%X", + Identity->chipRevision); + + _GetEcoID(Hardware, Identity); + + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00030, + &Identity->customerID)); + + /*************************************************************************** + ** Get chip features. + */ + + database = + Hardware->featureDatabase = + gcQueryFeatureDB( + Hardware->identity.chipModel, + Hardware->identity.chipRevision, + Hardware->identity.productID, + Hardware->identity.ecoID, + Hardware->identity.customerID); + + if (database == gcvNULL) + { + gcmkPRINT("[galcore]: Feature database is not found," + "chipModel=0x%0x, chipRevision=0x%x, productID=0x%x, ecoID=0x%x, customerID=0x%x", + Hardware->identity.chipModel, + Hardware->identity.chipRevision, + Hardware->identity.productID, + Hardware->identity.ecoID, + Hardware->identity.customerID); + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + Identity->pixelPipes = database->NumPixelPipes; + Identity->resolvePipes = database->NumResolvePipes; + Identity->instructionCount = database->InstructionCount; + Identity->numConstants = database->NumberOfConstants; + Identity->varyingsCount = database->VaryingCount; + Identity->gpuCoreCount = database->CoreCount; + Identity->streamCount = database->Streams; + + if (Identity->chipModel == gcv320) + { + gctUINT32 data; + + gcmkONERROR( + gckOS_ReadRegisterEx(Os, + Core, + 0x0002C, + &data)); + + if ((data != 33956864) && + ((Identity->chipRevision == 0x5007) || + (Identity->chipRevision == 0x5220))) + { + Hardware->maxOutstandingReads = 0xFF & + (Identity->chipRevision == 0x5220 ? 8 : + (Identity->chipRevision == 0x5007 ? 12 : 0)); + } + } + + if (_IsHardwareMatch(Hardware, gcv880, 0x5107)) + { + Hardware->maxOutstandingReads = 0x00010; + } + + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00470, &debugControl0)); + + if (debugControl0 & (1 << 16)) + { + Identity->chipFlags |= gcvCHIP_FLAG_MSAA_COHERENCEY_ECO_FIX; + } + + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x000A4, &chipInfo)); + + if (((((gctUINT32) (chipInfo)) >> (0 ? + 21:21) & ((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 21:21) - (0 ? + 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 21:21) - (0 ? 21:21) + 1)))))))) + { + Identity->chipFlags |= gcvCHIP_AXI_BUS128_BITS; + } + + /* Success. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_GetHardwareSignature( + IN gckHARDWARE Hardware, + IN gckOS Os, + IN gceCORE Core, + OUT gcsHARDWARE_SIGNATURE * Signature + ) +{ + gceSTATUS status; + + gctUINT32 chipIdentity; + + gcmkHEADER_ARG("Os=0x%x", Os); + + /*************************************************************************** + ** Get chip ID and revision. + */ + + /* Read chip identity register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00018, + &chipIdentity)); + + /* Special case for older graphic cores. */ + if (((((gctUINT32) (chipIdentity)) >> (0 ? + 31:24) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1)))))))) + { + Signature->chipModel = gcv500; + Signature->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) ); + } + + else + { + /* Read chip identity register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00020, + (gctUINT32_PTR) &Signature->chipModel)); + + /* Read CHIP_REV register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00024, + &Signature->chipRevision)); + } + + /*************************************************************************** + ** Get chip features. + */ + + /* Read chip feature register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x0001C, + &Signature->chipFeatures)); + + if (((Signature->chipModel == gcv500) && (Signature->chipRevision < 2)) + || ((Signature->chipModel == gcv300) && (Signature->chipRevision < 0x2000)) + ) + { + /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */ + Signature->chipMinorFeatures = 0; + Signature->chipMinorFeatures1 = 0; + Signature->chipMinorFeatures2 = 0; + } + else + { + /* Read chip minor feature register #0. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00034, + &Signature->chipMinorFeatures)); + + if (((((gctUINT32) (Signature->chipMinorFeatures)) >> (0 ? + 21:21) & ((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 21:21) - (0 ? + 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) + ) + { + /* Read chip minor features register #1. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00074, + &Signature->chipMinorFeatures1)); + + /* Read chip minor features register #2. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Os, Core, + 0x00084, + &Signature->chipMinorFeatures2)); + } + else + { + /* Chip doesn't has minor features register #1 or 2 or 3 or 4 or 5. */ + Signature->chipMinorFeatures1 = 0; + Signature->chipMinorFeatures2 = 0; + } + } + + /* Success. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/* Set to 1 to enable module clock gating debug function. +* Following options take effect when it is set to 1. +*/ +#define gcdDEBUG_MODULE_CLOCK_GATING 0 +/* Set to 1 to disable module clock gating of all modules. */ +#define gcdDISABLE_MODULE_CLOCK_GATING 0 +/* Set to 1 to disable module clock gating of each module. */ +#define gcdDISABLE_STARVE_MODULE_CLOCK_GATING 0 +#define gcdDISABLE_FE_CLOCK_GATING 0 +#define gcdDISABLE_PE_CLOCK_GATING 0 +#define gcdDISABLE_SH_CLOCK_GATING 0 +#define gcdDISABLE_PA_CLOCK_GATING 0 +#define gcdDISABLE_SE_CLOCK_GATING 0 +#define gcdDISABLE_RA_CLOCK_GATING 0 +#define gcdDISABLE_RA_EZ_CLOCK_GATING 0 +#define gcdDISABLE_RA_HZ_CLOCK_GATING 0 +#define gcdDISABLE_TX_CLOCK_GATING 0 +#define gcdDISABLE_TFB_CLOCK_GATING 0 +#define gcdDISABLE_GPIPE_CLOCK_GATING 0 +#define gcdDISABLE_BLT_CLOCK_GATING 0 +#define gcdDISABLE_TPG_CLOCK_GATING 0 +#define gcdDISABLE_VX_CLOCK_GATING 0 + +#if gcdDEBUG_MODULE_CLOCK_GATING +gceSTATUS +_ConfigureModuleLevelClockGating( + gckHARDWARE Hardware + ) +{ + gctUINT32 data; + + gcmkVERIFY_OK( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + &data)); + +#if gcdDISABLE_FE_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); +#endif + +#if gcdDISABLE_PE_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))); +#endif + +#if gcdDISABLE_SH_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))); +#endif + +#if gcdDISABLE_PA_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); +#endif + +#if gcdDISABLE_SE_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))); +#endif + +#if gcdDISABLE_RA_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))); +#endif + +#if gcdDISABLE_TX_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))); +#endif + +#if gcdDISABLE_RA_EZ_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))); +#endif + +#if gcdDISABLE_RA_HZ_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:17) - (0 ? + 17:17) + 1))))))) << (0 ? + 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))); +#endif + +#if gcdDISABLE_TFB_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 19:19) - (0 ? + 19:19) + 1))))))) << (0 ? + 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))); +#endif + +#if gcdDISABLE_GPIPE_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 22:22) - (0 ? + 22:22) + 1))))))) << (0 ? + 22:22))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))); +#endif + +#if gcdDISABLE_BLT_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 20:20) - (0 ? + 20:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 20:20) - (0 ? + 20:20) + 1))))))) << (0 ? + 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 20:20) - (0 ? + 20:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))); +#endif + +#if gcdDISABLE_TPG_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 18:18) - (0 ? + 18:18) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 18:18) - (0 ? + 18:18) + 1))))))) << (0 ? + 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 18:18) - (0 ? + 18:18) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))); +#endif + +#if gcdDISABLE_VX_CLOCK_GATING + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 21:21) - (0 ? + 21:21) + 1))))))) << (0 ? + 21:21))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))); +#endif + + gcmkVERIFY_OK( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + data)); + +#if gcdDISABLE_STARVE_MODULE_CLOCK_GATING + gcmkVERIFY_OK( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + &data)); + + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))); + + gcmkVERIFY_OK( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + data)); + +#endif + +#if gcdDISABLE_MODULE_CLOCK_GATING + gcmkVERIFY_OK( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + &data)); + + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + + gcmkVERIFY_OK( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + data)); +#endif + + return gcvSTATUS_OK; +} +#endif + +#if gcdPOWEROFF_TIMEOUT +void +_PowerTimerFunction( + gctPOINTER Data + ) +{ + gckHARDWARE hardware = (gckHARDWARE)Data; + gcmkVERIFY_OK( + gckHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT)); +} +#endif + +static gceSTATUS +_VerifyDMA( + IN gckOS Os, + IN gceCORE Core, + gctUINT32_PTR Address1, + gctUINT32_PTR Address2, + gctUINT32_PTR State1, + gctUINT32_PTR State2 + ) +{ + gceSTATUS status; + gctUINT32 i; + + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00660, State1)); + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00660, State1)); + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00664, Address1)); + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00664, Address1)); + + for (i = 0; i < 500; i += 1) + { + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00660, State2)); + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00660, State2)); + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00664, Address2)); + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00664, Address2)); + + if (*Address1 != *Address2) + { + break; + } + + if (*State1 != *State2) + { + break; + } + } + +OnError: + return status; +} + +static gceSTATUS +_DumpDebugRegisters( + IN gckOS Os, + IN gceCORE Core, + IN gcsiDEBUG_REGISTERS_PTR Descriptor + ) +{ +/* If this value is changed, print formats need to be changed too. */ +#define REG_PER_LINE 8 + gceSTATUS status = gcvSTATUS_OK; + gctUINT32 select; + gctUINT i, j, pipe; + gctUINT32 datas[REG_PER_LINE]; + gctUINT32 oldControl, control; + + gcmkHEADER_ARG("Os=0x%X Descriptor=0x%X", Os, Descriptor); + + /* Record control. */ + gckOS_ReadRegisterEx(Os, Core, 0x0, &oldControl); + + for (pipe = 0; pipe < 2; pipe++) + { + if (!(Descriptor->pipeMask & (1 << pipe))) + { + continue; + } + + gcmkPRINT_N(8, " %s[%d] debug registers:\n", Descriptor->module, pipe); + + /* Switch pipe. */ + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x0, &control)); + control &= ~(0xF << 20); + control |= (pipe << 20); + gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x0, control)); + + gcmkASSERT(Descriptor->count % REG_PER_LINE); + + for (i = 0; i < Descriptor->count; i += REG_PER_LINE) + { + /* Select of first one in the group. */ + select = i + Descriptor->selectStart; + + /* Read a group of registers. */ + for (j = 0; j < REG_PER_LINE; j++) + { + /* Shift select to right position. */ + gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, (select + j) << Descriptor->shift)); + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &datas[j])); + } + + gcmkPRINT_N(32, " [%02X] %08X %08X %08X %08X %08X %08X %08X %08X\n", + select, datas[0], datas[1], datas[2], datas[3], datas[4], datas[5], datas[6], datas[7]); + } + } + + /* Restore control. */ + gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x0, oldControl)); + +OnError: + /* Return the error. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_DumpLinkStack( + IN gckOS Os, + IN gceCORE Core, + IN gcsiDEBUG_REGISTERS_PTR Descriptor + ) +{ + /* Get wrptr */ + gctUINT32 shift = Descriptor->shift; + gctUINT32 pointerSelect = 0xE << shift; + gctUINT32 pointer, wrPtr, rdPtr, links[16]; + gctUINT32 stackSize = 16; + gctUINT32 oldestPtr = 0; + gctUINT32 i; + + gcmkVERIFY_OK(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, pointerSelect)); + gcmkVERIFY_OK(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &pointer)); + + wrPtr = (pointer & 0xF0) >> 4; + rdPtr = pointer & 0xF; + + /* Move rdptr to the oldest one (next one to the latest one. ) */ + oldestPtr = (wrPtr + 1) % stackSize; + + while (rdPtr != oldestPtr) + { + gcmkVERIFY_OK(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, 0x0)); + gcmkVERIFY_OK(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, 0xF << shift)); + + + gcmkVERIFY_OK(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, pointerSelect)); + gcmkVERIFY_OK(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &pointer)); + + rdPtr = pointer & 0xF; + } + + gcmkPRINT(" Link stack:"); + + /* Read from stack bottom*/ + for (i = 0; i < stackSize; i++) + { + gcmkVERIFY_OK(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, 0xD << shift)); + gcmkVERIFY_OK(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &links[i])); + + /* Advance rdPtr. */ + gcmkVERIFY_OK(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, 0x0)); + gcmkVERIFY_OK(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, 0xF << shift)); + } + + /* Print. */ + for (i = 0; i < stackSize; i += 4) + { + gcmkPRINT_N(32, " [0x%02X] 0x%08X [0x%02X] 0x%08X [0x%02X] 0x%08X [0x%02X] 0x%08X\n", + i, links[i], i + 1, links[i + 1], i + 2, links[i + 2], i + 3, links[i + 3]); + } + + return gcvSTATUS_OK; +} + +static gceSTATUS +_DumpFEStack( + IN gckOS Os, + IN gceCORE Core, + IN gcsiDEBUG_REGISTERS_PTR Descriptor + ) +{ + gctUINT i; + gctINT j; + gctUINT32 stack[32][2]; + gctUINT32 link[32]; + + static gcsFE_STACK _feStacks[] = + { + { "PRE_STACK", 32, 0x1A, 0x9A, 0x00, 0x1B, 0x1E }, + { "CMD_STACK", 32, 0x1C, 0x9C, 0x1E, 0x1D, 0x1E }, + }; + + for (i = 0; i < gcmCOUNTOF(_feStacks); i++) + { + gckOS_WriteRegisterEx(Os, Core, Descriptor->index, _feStacks[i].clear); + + for (j = 0; j < _feStacks[i].count; j++) + { + gckOS_WriteRegisterEx(Os, Core, Descriptor->index, _feStacks[i].highSelect); + + gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &stack[j][0]); + + gckOS_WriteRegisterEx(Os, Core, Descriptor->index, _feStacks[i].lowSelect); + + gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &stack[j][1]); + + gckOS_WriteRegisterEx(Os, Core, Descriptor->index, _feStacks[i].next); + + if (_feStacks[i].linkSelect) + { + gckOS_WriteRegisterEx(Os, Core, Descriptor->index, _feStacks[i].linkSelect); + + gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &link[j]); + } + } + + gcmkPRINT(" %s:", _feStacks[i].name); + + for (j = 31; j >= 3; j -= 4) + { + gcmkPRINT(" %08X %08X %08X %08X %08X %08X %08X %08X", + stack[j][0], stack[j][1], stack[j - 1][0], stack[j - 1][1], + stack[j - 2][0], stack[j - 2][1], stack[j - 3][0], stack[j - 3][1]); + } + + if (_feStacks[i].linkSelect) + { + gcmkPRINT(" LINK_STACK:"); + + for (j = 31; j >= 3; j -= 4) + { + gcmkPRINT(" %08X %08X %08X %08X %08X %08X %08X %08X", + link[j], link[j], link[j - 1], link[j - 1], + link[j - 2], link[j - 2], link[j - 3], link[j - 3]); + } + } + + } + + return gcvSTATUS_OK; +} + +static gceSTATUS +_IsGPUPresent( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gcsHARDWARE_SIGNATURE signature; + gctUINT32 control; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + &control)); + + control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))); + control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + control)); + + gckOS_ZeroMemory((gctPOINTER)&signature, gcmSIZEOF(gcsHARDWARE_SIGNATURE)); + + /* Identify the hardware. */ + gcmkONERROR(_GetHardwareSignature(Hardware, + Hardware->os, + Hardware->core, + &signature)); + + /* Check if these are the same values as saved before. */ + if ((Hardware->signature.chipModel != signature.chipModel) + || (Hardware->signature.chipRevision != signature.chipRevision) + || (Hardware->signature.chipFeatures != signature.chipFeatures) + || (Hardware->signature.chipMinorFeatures != signature.chipMinorFeatures) + || (Hardware->signature.chipMinorFeatures1 != signature.chipMinorFeatures1) + || (Hardware->signature.chipMinorFeatures2 != signature.chipMinorFeatures2) + ) + { + gcmkPRINT("[galcore]: GPU is not present."); + gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the error. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +_FlushCache( + gckHARDWARE Hardware, + gckCOMMAND Command + ) +{ + gceSTATUS status; + gctUINT32 bytes, requested; + gctPOINTER buffer; + + /* Get the size of the flush command. */ + gcmkONERROR(gckHARDWARE_Flush(Hardware, + gcvFLUSH_ALL, + gcvNULL, + &requested)); + + /* Reserve space in the command queue. */ + gcmkONERROR(gckCOMMAND_Reserve(Command, + requested, + &buffer, + &bytes)); + + /* Append a flush. */ + gcmkONERROR(gckHARDWARE_Flush( + Hardware, gcvFLUSH_ALL, buffer, &bytes + )); + + /* Execute the command queue. */ + gcmkONERROR(gckCOMMAND_Execute(Command, requested)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +static gctBOOL +_IsGPUIdle( + IN gctUINT32 Idle + ) +{ + return Idle == 0x7FFFFFFF; +} + +gctBOOL +_QueryFeatureDatabase( + IN gckHARDWARE Hardware, + IN gceFEATURE Feature + ) +{ + gctBOOL available; + + gcsFEATURE_DATABASE *database = Hardware->featureDatabase; + + gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature); + + /* Only features needed by common kernel logic added here. */ + switch (Feature) + { + case gcvFEATURE_END_EVENT: + available = gcvFALSE; + break; + + case gcvFEATURE_MC20: + available = database->REG_MC20; + break; + + case gcvFEATURE_EARLY_Z: + available = database->REG_NoEZ == 0; + break; + + case gcvFEATURE_HZ: + available = database->REG_HierarchicalZ; + break; + + case gcvFEATURE_NEW_HZ: + available = database->REG_NewHZ; + break; + + case gcvFEATURE_FAST_MSAA: + available = database->REG_FastMSAA; + break; + + case gcvFEATURE_SMALL_MSAA: + available = database->REG_SmallMSAA; + break; + + case gcvFEATURE_DYNAMIC_FREQUENCY_SCALING: + /* This feature doesn't apply for 2D cores. */ + available = database->REG_DynamicFrequencyScaling && database->REG_Pipe3D; + + if (Hardware->identity.chipModel == gcv1000 && + (Hardware->identity.chipRevision == 0x5039 || + Hardware->identity.chipRevision == 0x5040)) + { + available = gcvFALSE; + } + break; + + case gcvFEATURE_ACE: + available = database->REG_ACE; + break; + + case gcvFEATURE_HALTI2: + available = database->REG_Halti2; + break; + + case gcvFEATURE_PIPE_2D: + available = database->REG_Pipe2D; + break; + + case gcvFEATURE_PIPE_3D: +#if gcdENABLE_3D + available = database->REG_Pipe3D; +#else + available = gcvFALSE; +#endif + break; + + case gcvFEATURE_FC_FLUSH_STALL: + available = database->REG_FcFlushStall; + break; + + case gcvFEATURE_BLT_ENGINE: + available = database->REG_BltEngine; + break; + + case gcvFEATURE_HALTI0: + available = database->REG_Halti0; + break; + + case gcvFEATURE_FE_ALLOW_STALL_PREFETCH_ENG: + available = database->REG_FEAllowStallPrefetchEng; + break; + + case gcvFEATURE_MMU: + available = database->REG_MMU; + break; + + case gcvFEATURE_FENCE_64BIT: + available = database->FENCE_64BIT; + break; + + case gcvFEATURE_TEX_BASELOD: + available = database->REG_Halti2; + + if (_IsHardwareMatch(Hardware, gcv900, 0x5250)) + { + available = gcvTRUE; + } + break; + + case gcvFEATURE_TEX_CACHE_FLUSH_FIX: + available = database->REG_Halti5; + break; + + case gcvFEATURE_BUG_FIXES1: + available = database->REG_BugFixes1; + break; + + case gcvFEATURE_MULTI_SOURCE_BLT: + available = database->REG_MultiSourceBlt; + break; + + case gcvFEATURE_HALTI5: + available = database->REG_Halti5; + break; + + case gcvFEATURE_FAST_CLEAR: + available = database->REG_FastClear; + + if (Hardware->identity.chipModel == gcv700) + { + available = gcvFALSE; + } + break; + + case gcvFEATURE_BUG_FIXES7: + available = database->REG_BugFixes7; + break; + + case gcvFEATURE_ZCOMPRESSION: + available = database->REG_ZCompression; + break; + + case gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE: + available = database->REG_InstructionCache; + break; + + case gcvFEATURE_YUV420_TILER: + available = database->REG_YUV420Tiler; + break; + + case gcvFEATURE_2DPE20: + available = database->REG_2DPE20; + break; + + case gcvFEATURE_DITHER_AND_FILTER_PLUS_ALPHA_2D: + available = database->REG_DitherAndFilterPlusAlpha2D; + break; + + case gcvFEATURE_ONE_PASS_2D_FILTER: + available = database->REG_OnePass2DFilter; + break; + + case gcvFEATURE_HALTI1: + available = database->REG_Halti1; + break; + + case gcvFEATURE_HALTI3: + available = database->REG_Halti3; + break; + + case gcvFEATURE_HALTI4: + available = database->REG_Halti4; + break; + + case gcvFEATURE_GEOMETRY_SHADER: + available = database->REG_GeometryShader; + break; + + case gcvFEATURE_TESSELLATION: + available = database->REG_TessellationShaders; + break; + + case gcvFEATURE_GENERIC_ATTRIB: + available = database->REG_Generics; + break; + + case gcvFEATURE_TEXTURE_LINEAR: + available = database->REG_LinearTextureSupport; + break; + + case gcvFEATURE_TX_FILTER: + available = database->REG_TXFilter; + break; + + case gcvFEATURE_TX_SUPPORT_DEC: + available = database->REG_TXSupportDEC; + break; + + case gcvFEATURE_TX_FRAC_PRECISION_6BIT: + available = database->REG_TX6bitFrac; + break; + + case gcvFEATURE_TEXTURE_ASTC: + available = database->REG_TXEnhancements4 && !database->NO_ASTC; + break; + + case gcvFEATURE_SHADER_ENHANCEMENTS2: + available = database->REG_SHEnhancements2; + break; + + case gcvFEATURE_BUG_FIXES18: + available = database->REG_BugFixes18; + break; + + case gcvFEATURE_64K_L2_CACHE: + available = gcvFALSE; + break; + + case gcvFEATURE_BUG_FIXES4: + available = database->REG_BugFixes4; + break; + + case gcvFEATURE_BUG_FIXES12: + available = database->REG_BugFixes12; + break; + + case gcvFEATURE_HW_TFB: + available = database->HWTFB; + break; + + case gcvFEATURE_SNAPPAGE_CMD_FIX: + available = database->SH_SNAP2PAGE_FIX; + break; + + case gcvFEATURE_SECURITY: + available = database->SECURITY; + break; + + case gcvFEATURE_TX_DESCRIPTOR: + available = database->REG_Halti5; + break; + + case gcvFEATURE_TX_DESC_CACHE_CLOCKGATE_FIX: + available = database->TX_DESC_CACHE_CLOCKGATE_FIX; + break; + + case gcvFEATURE_ROBUSTNESS: + available = database->ROBUSTNESS; + break; + + case gcvFEATURE_SNAPPAGE_CMD: + available = database->SNAPPAGE_CMD; + break; + + case gcvFEATURE_HALF_FLOAT_PIPE: + available = database->REG_HalfFloatPipe; + break; + + case gcvFEATURE_SH_INSTRUCTION_PREFETCH: + available = database->SH_ICACHE_PREFETCH; + break; + + case gcvFEATURE_FE_NEED_DUMMYDRAW: + available = database->FE_NEED_DUMMYDRAW; + break; + + case gcvFEATURE_DEC300_COMPRESSION: + available = database->REG_DEC; + break; + + case gcvFEATURE_DEC400_COMPRESSION: + available = database->G2D_DEC400; + break; + + case gcvFEATURE_TPC_COMPRESSION: + available = database->REG_ThirdPartyCompression; + break; + + case gcvFEATURE_TPCV11_COMPRESSION: + available = database->G2D_3rd_PARTY_COMPRESSION_1_1; + break; + + case gcvFEATURE_USC_DEFER_FILL_FIX: + available = database->USC_DEFER_FILL_FIX; + break; + + case gcvFEATURE_USC: + available = database->REG_Halti5; + break; + + case gcvFEATURE_RA_CG_FIX: + available = database->RA_CG_FIX; + break; + + case gcvFEATURE_ZERO_ATTRIB_SUPPORT: + available = database->REG_Halti4; + break; + + case gcvFEATURE_SH_CLOCK_GATE_FIX: + available = database->SH_CLOCK_GATE_FIX; + break; + + case gcvFEATURE_GPIPE_CLOCK_GATE_FIX: + available = gcvTRUE; + break; + + case gcvFEATURE_NEW_GPIPE: + available = database->NEW_GPIPE; + break; + + case gcvFEATURE_MULTI_CORE_BLOCK_SET_CONFIG2: + available = database->MULTI_CORE_BLOCK_SET_CONFIG2; + break; + + case gcvFEATURE_SECURITY_AHB: + available = database->SECURITY_AHB; + break; + + case gcvFEATURE_ASYNC_BLIT: + available = database->ASYNC_BLT; + break; + + case gcvFEATURE_COMPUTE_ONLY: + available = database->COMPUTE_ONLY; + break; + + case gcvFEATURE_USC_FULLCACHE_FIX: + available = database->USC_FULL_CACHE_FIX; + break; + + case gcvFEATURE_PE_TILE_CACHE_FLUSH_FIX: + available = database->PE_TILE_CACHE_FLUSH_FIX; + break; + + default: + gcmkFATAL("Invalid feature has been requested."); + available = gcvFALSE; + } + + gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE); + return available; +} + +static void +_ConfigurePolicyID( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gctUINT32 policyID; + gctUINT32 auxBit = ~0U; + gctUINT32 axiConfig; + gckOS os = Hardware->os; + gceCORE core = Hardware->core; + gctUINT32 i; + gctUINT32 offset; + gctUINT32 shift; + gctUINT32 currentAxiConfig; + + status = gckOS_GetPolicyID(os, gcvSURF_TYPE_UNKNOWN, &policyID, &axiConfig); + + if (status == gcvSTATUS_NOT_SUPPORTED) + { + /* No customized policyID setting. */ + return; + } + + for (i = 0; i < 16; i++) + { + /* Mapping 16 surface type.*/ + status = gckOS_GetPolicyID(os, (gceSURF_TYPE) i, &policyID, &axiConfig); + + if (gcmIS_SUCCESS(status)) + { + if (auxBit == ~0U) + { + /* There is a customized policyID setting for this type. */ + auxBit = (policyID >> 4) & 0x1; + } + else + { + /* Check whether this bit changes. */ + if (auxBit != ((policyID >> 4) & 0x1)) + { + gcmkPRINT("[galcore]: AUX_BIT changes"); + return; + } + } + + offset = policyID >> 1; + + shift = (policyID & 0x1) * 16; + + axiConfig &= 0xFFFF; + + gcmkVERIFY_OK(gckOS_ReadRegisterEx( + os, + core, + (0x0070 + offset) << 2, + ¤tAxiConfig + )); + + currentAxiConfig |= (axiConfig << shift); + + gcmkVERIFY_OK(gckOS_WriteRegisterEx( + os, + core, + (0x0070 + offset) << 2, + currentAxiConfig + )); + } + } + + if (auxBit != ~0U) + { + gcmkVERIFY_OK(gckOS_WriteRegisterEx( + os, + core, + 0x000EC, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:8) - (0 ? + 8:8) + 1))))))) << (0 ? + 8:8))) | (((gctUINT32) ((gctUINT32) (auxBit) & ((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) + )); + } +} +/**************************** +** Initialise hardware options +*/ +static void +_SetHardwareOptions( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gcsHAL_QUERY_CHIP_OPTIONS *options = &Hardware->options; + + status = gckOS_QueryOption(Hardware->os, "powerManagement", (gctUINT32*)&options->powerManagement); + + if (status == gcvSTATUS_NOT_SUPPORTED) + { + /* Enable power management by default. */ + Hardware->options.powerManagement = gcvTRUE; + } + + /* Disable profiler by default */ + status = gckOS_QueryOption(Hardware->os, "gpuProfiler", (gctUINT32*)&options->gpuProfiler); + if (status == gcvSTATUS_NOT_SUPPORTED) + { + /* Disable profiler by default */ + Hardware->options.gpuProfiler= gcvFALSE; + } + gckOS_QueryOption(Hardware->os, "mmu", (gctUINT32_PTR)&options->enableMMU); + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_USC)) + { + gctUINT L1cacheSize; + gcsFEATURE_DATABASE *database = Hardware->featureDatabase; + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_COMPUTE_ONLY)) + { + L1cacheSize = database->L1CacheSize; + } + else + { + gctUINT attribBufSizeInKB; + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_TESSELLATION)) + { + /* GS/TS must be bundled. */ + gcmkASSERT(gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_GEOMETRY_SHADER)); + attribBufSizeInKB = 42; + } + else + { + gcmkASSERT(!gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_GEOMETRY_SHADER)); + attribBufSizeInKB = 8; + } + + if (attribBufSizeInKB < database->USC_MAX_PAGES) + { + L1cacheSize = database->USC_MAX_PAGES - attribBufSizeInKB; + } + else + { + attribBufSizeInKB -= 4; + L1cacheSize = 4; + } + } + gcmkASSERT(L1cacheSize); + if (L1cacheSize >= database->L1CacheSize) + { + Hardware->options.uscL1CacheRatio = 0x0; + gcmkASSERT(gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_USC_FULLCACHE_FIX)); + } + else + { + static const gctINT s_uscCacheRatio[] = + { + 100000,/* 1.0f */ + 50000, /* 0.5f */ + 25000, /* 0.25f */ + 12500, /* 0.125f */ + 62500, /* 0.0625f */ + 3125, /* 0.03125f */ + 75000, /* 0.75f */ + 0, /*0.0f */ + }; + gctINT maxL1cacheSize = L1cacheSize * 100000; + gctINT delta = 2147483647; /* start with very big delta */ + gctINT i = 0; + gctINT curIndex = -1; + for (; i < gcmCOUNTOF(s_uscCacheRatio); ++i) + { + gctINT curL1cacheSize = database->L1CacheSize * s_uscCacheRatio[i]; + + if ((maxL1cacheSize >= curL1cacheSize) && + ((maxL1cacheSize - curL1cacheSize) < delta)) + { + curIndex = i; + delta = maxL1cacheSize - curL1cacheSize; + } + } + gcmkASSERT(-1 != curIndex); + Hardware->options.uscL1CacheRatio = curIndex; + } + } + + options->secureMode = gcvSECURE_NONE; + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_SECURITY)) + { + gctUINT32 ta = 0; + + gcmkASSERT(gcvSTATUS_TRUE == gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_SECURITY_AHB)); + + options->secureMode = gcvSECURE_IN_NORMAL; + + status = gckOS_QueryOption(Hardware->os, "TA", &ta); + + if (gcmIS_SUCCESS(status) && ta) + { + options->secureMode = gcvSECURE_IN_TA; + } + } + else if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_SECURITY_AHB)) + { + options->secureMode = gcvSECURE_IN_NORMAL; + } + + return; +} + +/* +* State timer helper must be called with powerMutex held. +*/ +void +gckSTATETIMER_Reset( + IN gcsSTATETIMER * StateTimer, + IN gctUINT64 Start + ) +{ + gctUINT64 now; + + if (Start) + { + now = Start; + } + else + { + gckOS_GetProfileTick(&now); + } + + StateTimer->recent = StateTimer->start = now; + + gckOS_ZeroMemory(StateTimer->elapse, gcmSIZEOF(StateTimer->elapse)); +} + +void +gckSTATETIMER_Accumulate( + IN gcsSTATETIMER * StateTimer, + IN gceCHIPPOWERSTATE OldState + ) +{ + gctUINT64 now; + gctUINT64 elapse; + + gckOS_GetProfileTick(&now); + + elapse = now - StateTimer->recent; + + StateTimer->recent = now; + + StateTimer->elapse[OldState] += elapse; +} + +void +gckSTATETIMER_Query( + IN gcsSTATETIMER * StateTimer, + IN gceCHIPPOWERSTATE State, + OUT gctUINT64_PTR Start, + OUT gctUINT64_PTR End, + OUT gctUINT64_PTR On, + OUT gctUINT64_PTR Off, + OUT gctUINT64_PTR Idle, + OUT gctUINT64_PTR Suspend + ) +{ + *Start = StateTimer->start; + + gckSTATETIMER_Accumulate(StateTimer, State); + + *End = StateTimer->recent; + + *On = StateTimer->elapse[gcvPOWER_ON]; + *Off = StateTimer->elapse[gcvPOWER_OFF]; + *Idle = StateTimer->elapse[gcvPOWER_IDLE]; + *Suspend = StateTimer->elapse[gcvPOWER_SUSPEND]; + + gckSTATETIMER_Reset(StateTimer, StateTimer->recent); +} + +/******************************************************************************\ +****************************** gckHARDWARE API code ***************************** +\******************************************************************************/ + +/******************************************************************************* +** +** gckHARDWARE_Construct +** +** Construct a new gckHARDWARE object. +** +** INPUT: +** +** gckOS Os +** Pointer to an initialized gckOS object. +** +** gceCORE Core +** Specified core. +** +** OUTPUT: +** +** gckHARDWARE * Hardware +** Pointer to a variable that will hold the pointer to the gckHARDWARE +** object. +*/ +gceSTATUS +gckHARDWARE_Construct( + IN gckOS Os, + IN gceCORE Core, + OUT gckHARDWARE * Hardware + ) +{ + gceSTATUS status; + gckHARDWARE hardware = gcvNULL; + gctUINT16 data = 0xff00; + gctPOINTER pointer = gcvNULL; + gctUINT i; + + gcmkHEADER_ARG("Os=0x%x", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Hardware != gcvNULL); + + /* Enable the GPU. */ + gcmkONERROR(gckOS_SetGPUPower(Os, Core, gcvTRUE, gcvTRUE)); + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x00000, + 0x00000900)); + + /* Allocate the gckHARDWARE object. */ + gcmkONERROR(gckOS_Allocate(Os, + gcmSIZEOF(struct _gckHARDWARE), + &pointer)); + + gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckHARDWARE)); + + hardware = (gckHARDWARE) pointer; + + /* Initialize the gckHARDWARE object. */ + hardware->object.type = gcvOBJ_HARDWARE; + hardware->os = Os; + hardware->core = Core; + + gcmkONERROR(_GetHardwareSignature(hardware, Os, Core, &hardware->signature)); + + /* Identify the hardware. */ + gcmkONERROR(_IdentifyHardwareByDatabase(hardware, Os, Core, &hardware->identity)); + + _SetHardwareOptions(hardware); + + hardware->mmuVersion = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_MMU); + + /* Get the system's physical base address for old MMU */ + if (hardware->mmuVersion == 0) + { + gcmkONERROR(gckOS_GetBaseAddress(Os, &hardware->baseAddress)); + } + + /* Determine the hardware type */ + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_PIPE_3D) + && gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_PIPE_2D) + ) + { + hardware->type = gcvHARDWARE_3D2D; + } + else + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_PIPE_2D)) + { + hardware->type = gcvHARDWARE_2D; + } + else + { + hardware->type = gcvHARDWARE_3D; + } + + hardware->powerBaseAddress + = ((hardware->identity.chipModel == gcv300) + && (hardware->identity.chipRevision < 0x2000)) + ? 0x0100 + : 0x0000; + + + /* _ResetGPU need powerBaseAddress. */ + status = _ResetGPU(hardware, Os, Core); + if (status != gcvSTATUS_OK) + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "_ResetGPU failed: status=%d\n", status); + } + +#if gcdDEC_ENABLE_AHB + gcmkONERROR(gckOS_WriteRegisterEx(Os, gcvCORE_DEC, 0x18180, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 22:22) - (0 ? + 22:22) + 1))))))) << (0 ? + 22:22))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))))); +#endif + + hardware->hasL2Cache = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_64K_L2_CACHE); + + if (!hardware->hasL2Cache) + { + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x0055C, + 0x00FFFFFF)); + } + + hardware->powerMutex = gcvNULL; + + /* Determine whether bug fixes #1 are present. */ + hardware->extraEventStates = (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_BUG_FIXES1) == gcvFALSE); + + /* Check if big endian */ + hardware->bigEndian = (*(gctUINT8 *)&data == 0xff); + + /* Initialize the fast clear. */ + gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1)); + +#if !gcdENABLE_128B_MERGE + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_MULTI_SOURCE_BLT)) + { + /* 128B merge is turned on by default. Disable it. */ + gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, 0)); + } +#endif + +#if (gcdFPGA_BUILD && 1) + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_TPCV11_COMPRESSION)) + { + gctUINT32 data; + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00558, &data)); + data |= 0x1 << 27; + gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, data)); + } +#endif + + { + gctUINT32 value; + gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00090, &value)); +#if gcdDEC_ENABLE_AHB + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_DEC300_COMPRESSION)) + { + value |= ~0xFFFFFFBF; + } + else +#endif + { + value &= 0xFFFFFFBF; + } + gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00090, value)); + } + + /* Set power state to ON. */ + hardware->chipPowerState = gcvPOWER_ON; + hardware->clockState = gcvTRUE; + hardware->powerState = gcvTRUE; + hardware->lastWaitLink = ~0U; + hardware->lastEnd = ~0U; + hardware->globalSemaphore = gcvNULL; +#if gcdENABLE_FSCALE_VAL_ADJUST + hardware->powerOnFscaleVal = 64; +#endif + + gcmkONERROR(gckOS_CreateMutex(Os, &hardware->powerMutex)); + gcmkONERROR(gckOS_CreateSemaphore(Os, &hardware->globalSemaphore)); + +#if gcdPOWEROFF_TIMEOUT + hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT; + + gcmkVERIFY_OK(gckOS_CreateTimer(Os, + _PowerTimerFunction, + (gctPOINTER)hardware, + &hardware->powerOffTimer)); +#endif + + for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++) + { + gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pageTableDirty[i])); + } + + gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pendingEvent)); + +#if defined(LINUX) || defined(__QNXNTO__) || defined(UNDER_CE) + if (hardware->mmuVersion) + { + hardware->stallFEPrefetch + = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_FE_ALLOW_STALL_PREFETCH_ENG); + } + else +#endif + { + hardware->stallFEPrefetch = gcvTRUE; + } + + hardware->hasAsyncFe + = gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_ASYNC_BLIT); + + hardware->minFscaleValue = 1; + hardware->waitCount = 200; + + gckSTATETIMER_Reset(&hardware->powerStateTimer, 0); + +#if gcdLINK_QUEUE_SIZE + gcmkONERROR(gckQUEUE_Allocate(hardware->os, &hardware->linkQueue, gcdLINK_QUEUE_SIZE)); +#endif + + if (hardware->options.secureMode == gcvSECURE_IN_NORMAL) + { + hardware->pagetableArray.size = 4096; + + gcmkONERROR(gckOS_AllocateNonPagedMemory( + hardware->os, + gcvFALSE, + gcvALLOC_FLAG_CONTIGUOUS, + &hardware->pagetableArray.size, + &hardware->pagetableArray.physical, + &hardware->pagetableArray.logical + )); + + gcmkONERROR(gckOS_GetPhysicalAddress( + hardware->os, + hardware->pagetableArray.logical, + &hardware->pagetableArray.address + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + hardware->os, + hardware->pagetableArray.address, + &hardware->pagetableArray.address + )); + } + + /* Return pointer to the gckHARDWARE object. */ + *Hardware = hardware; + + /* Success. */ + gcmkFOOTER_ARG("*Hardware=0x%x", *Hardware); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (hardware != gcvNULL) + { + /* Turn off the power. */ + gcmkVERIFY_OK(gckOS_SetGPUPower(Os, Core, gcvFALSE, gcvFALSE)); + + if (hardware->globalSemaphore != gcvNULL) + { + /* Destroy the global semaphore. */ + gcmkVERIFY_OK(gckOS_DestroySemaphore(Os, + hardware->globalSemaphore)); + } + + if (hardware->powerMutex != gcvNULL) + { + /* Destroy the power mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Os, hardware->powerMutex)); + } + +#if gcdPOWEROFF_TIMEOUT + if (hardware->powerOffTimer != gcvNULL) + { + gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer)); + gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer)); + } +#endif + + for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++) + { + if (hardware->pageTableDirty[i] != gcvNULL) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty[i])); + } + } + + if (hardware->pendingEvent != gcvNULL) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pendingEvent)); + } + + if (hardware->pagetableArray.logical != gcvNULL) + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + Os, + hardware->pagetableArray.size, + hardware->pagetableArray.physical, + hardware->pagetableArray.logical + )); + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, hardware)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_Destroy +** +** Destroy an gckHARDWARE object. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object that needs to be destroyed. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_Destroy( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gctUINT i; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + /* Destroy the power semaphore. */ + gcmkVERIFY_OK(gckOS_DestroySemaphore(Hardware->os, + Hardware->globalSemaphore)); + + /* Destroy the power mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Hardware->os, Hardware->powerMutex)); + +#if gcdPOWEROFF_TIMEOUT + gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer)); + gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer)); +#endif + + for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty[i])); + } + + gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pendingEvent)); + +#if gcdLINK_QUEUE_SIZE + gckQUEUE_Free(Hardware->os, &Hardware->linkQueue); +#endif + + if (Hardware->pagetableArray.logical != gcvNULL) + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + Hardware->os, + Hardware->pagetableArray.size, + Hardware->pagetableArray.physical, + Hardware->pagetableArray.logical + )); + } + + /* Mark the object as unknown. */ + Hardware->object.type = gcvOBJ_UNKNOWN; + + /* Free the object. */ + gcmkONERROR(gcmkOS_SAFE_FREE(Hardware->os, Hardware)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_GetType +** +** Get the hardware type. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** OUTPUT: +** +** gceHARDWARE_TYPE * Type +** Pointer to a variable that receives the type of hardware object. +*/ +gceSTATUS +gckHARDWARE_GetType( + IN gckHARDWARE Hardware, + OUT gceHARDWARE_TYPE * Type + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + gcmkVERIFY_ARGUMENT(Type != gcvNULL); + + *Type = Hardware->type; + + gcmkFOOTER_ARG("*Type=%d", *Type); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_InitializeHardware +** +** Initialize the hardware. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_InitializeHardware( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gctUINT32 control; + gctUINT32 data; + gctUINT32 regPMC = 0; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + /* Disable isolate GPU bit. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 19:19) - (0 ? + 19:19) + 1))))))) << (0 ? + 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))))); + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + &control)); + + /* Enable debug register. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 11:11) - (0 ? + 11:11) + 1))))))) << (0 ? + 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))))); + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_SECURITY_AHB) && + (Hardware->options.secureMode == gcvSECURE_IN_NORMAL)) + { + gctUINT32 ahbControl = 0; + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x003A8, + &ahbControl)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x003A8, + ((((gctUINT32) (ahbControl)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))))); + } + + /* Reset memory counters. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0003C, + ~0U)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0003C, + 0)); + + if (Hardware->mmuVersion == 0) + { + /* Program the base addesses. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0041C, + Hardware->baseAddress)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00418, + Hardware->baseAddress)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00428, + Hardware->baseAddress)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00420, + Hardware->baseAddress)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00424, + Hardware->baseAddress)); + } + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + &data)); + + /* Enable clock gating. */ + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + if ((Hardware->identity.chipRevision == 0x4301) + || (Hardware->identity.chipRevision == 0x4302) + ) + { + /* Disable stall module level clock gating for 4.3.0.1 and 4.3.0.2 + ** revisions. */ + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))); + } + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + data)); + +#if gcdENABLE_3D + /* Disable PE clock gating on revs < 5.0 when HZ is present without a + ** bug fix. */ + if ((Hardware->identity.chipRevision < 0x5000) + && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HZ) + && !gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_BUG_FIXES4) + ) + { + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + /* Disable PE clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))); + } +#endif + + if (Hardware->identity.chipModel == gcv4000 && + ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222))) + { + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:23) - (0 ? + 23:23) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:23) - (0 ? + 23:23) + 1))))))) << (0 ? + 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 23:23) - (0 ? + 23:23) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))))); + } + + if ((Hardware->identity.chipModel == gcv1000 && + (Hardware->identity.chipRevision == 0x5039 || + Hardware->identity.chipRevision == 0x5040)) + || + (Hardware->identity.chipModel == gcv2000 && + Hardware->identity.chipRevision == 0x5140) + ) + { + gctUINT32 pulseEater; + + pulseEater = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))); + + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + ((((gctUINT32) (pulseEater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:17) - (0 ? + 17:17) + 1))))))) << (0 ? + 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))))); + } + + if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) == gcvSTATUS_FALSE) + || (Hardware->identity.chipRevision < 0x5422) + ) + { + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:15) - (0 ? + 15:15) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:15) - (0 ? + 15:15) + 1))))))) << (0 ? + 15:15))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 15:15) - (0 ? + 15:15) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15))); + } + + if (_IsHardwareMatch(Hardware, gcv2000, 0x5108)) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00480, + &data)); + + /* Set FE bus to one, TX bus to zero */ + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))); + + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00480, + data)); + } + + gcmkONERROR( + gckHARDWARE_SetMMU(Hardware, + Hardware->kernel->mmu->area[0].pageTableLogical)); + + if (Hardware->identity.chipModel >= gcv400 + && Hardware->identity.chipModel != gcv420 + && !gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_BUG_FIXES12)) + { + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + /* Disable PA clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + } + + /* Limit 2D outstanding request. */ + if (Hardware->maxOutstandingReads) + { + gctUINT32 data; + + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00414, + &data)); + + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (Hardware->maxOutstandingReads & 0xFF) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))); + + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00414, + data)); + } + + if (_IsHardwareMatch(Hardware, gcv1000, 0x5035)) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00414, + &data)); + + /* Disable HZ-L2. */ + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:12) - (0 ? + 12:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:12) - (0 ? + 12:12) + 1))))))) << (0 ? + 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 12:12) - (0 ? + 12:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))); + + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00414, + data)); + } + + if (_IsHardwareMatch(Hardware, gcv4000, 0x5222) + || _IsHardwareMatch(Hardware, gcv2000, 0x5108) + || (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_TX_DESCRIPTOR) + && !gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_TX_DESC_CACHE_CLOCKGATE_FIX) + ) + ) + { + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + /* Disable TX clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))); + } + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_NEW_GPIPE) && + !gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_GPIPE_CLOCK_GATE_FIX)) + { + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + /* Disable GPIPE clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 22:22) - (0 ? + 22:22) + 1))))))) << (0 ? + 22:22))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))); + } + + if (_IsHardwareMatch(Hardware, gcv880, 0x5106)) + { + Hardware->kernel->timeOut = 140 * 1000; + } + + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + /* Disable RA HZ clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:17) - (0 ? + 17:17) + 1))))))) << (0 ? + 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))); + + /* Disable RA EZ clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))); + + if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI5) + && !gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_RA_CG_FIX) + ) + ) + { + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + /* Disable RA clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))); + } + + if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI5) + && !gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_SH_CLOCK_GATE_FIX) + ) + ) + { + if (regPMC == 0) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ®PMC)); + } + + /* Disable SH clock gating. */ + regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))); + } + + if (regPMC != 0) + { + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + regPMC)); + } + + if (_IsHardwareMatch(Hardware, gcv2000, 0x5108) + || (_IsHardwareMatch(Hardware, gcv2000, 0xffff5450)) + || _IsHardwareMatch(Hardware, gcv320, 0x5007) + || _IsHardwareMatch(Hardware, gcv320, 0x5303) + || _IsHardwareMatch(Hardware, gcv880, 0x5106) + || _IsHardwareMatch(Hardware, gcv400, 0x4645) + ) + { + /* Update GPU AXI cache atttribute. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00008, + 0x00002200)); + } + + if ((Hardware->identity.chipRevision > 0x5420) + && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D)) + { + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + &data)); + + /* Disable internal DFS. */ + data = +#if gcdDVFS + ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 18:18) - (0 ? + 18:18) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 18:18) - (0 ? + 18:18) + 1))))))) << (0 ? + 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 18:18) - (0 ? + 18:18) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | +#endif + ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | + ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:17) - (0 ? + 17:17) + 1))))))) << (0 ? + 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + data)); + } + + if (_IsHardwareMatch(Hardware, gcv2500, 0x5422)) + { + gcmkONERROR(gckOS_ReadRegisterEx( + Hardware->os, Hardware->core, 0x00090, &data)); + + /* AXI switch setup to SPLIT_TO64 mode */ + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:0) - (0 ? + 1:0) + 1))))))) << (0 ? + 1:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))); + + gcmkONERROR(gckOS_WriteRegisterEx( + Hardware->os, Hardware->core, 0x00090, data)); + } + + _ConfigurePolicyID(Hardware); + +#if gcdDEBUG_MODULE_CLOCK_GATING + _ConfigureModuleLevelClockGating(Hardware); +#endif + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the error. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_QueryMemory +** +** Query the amount of memory available on the hardware. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** OUTPUT: +** +** gctSIZE_T * InternalSize +** Pointer to a variable that will hold the size of the internal video +** memory in bytes. If 'InternalSize' is gcvNULL, no information of the +** internal memory will be returned. +** +** gctUINT32 * InternalBaseAddress +** Pointer to a variable that will hold the hardware's base address for +** the internal video memory. This pointer cannot be gcvNULL if +** 'InternalSize' is also non-gcvNULL. +** +** gctUINT32 * InternalAlignment +** Pointer to a variable that will hold the hardware's base address for +** the internal video memory. This pointer cannot be gcvNULL if +** 'InternalSize' is also non-gcvNULL. +** +** gctSIZE_T * ExternalSize +** Pointer to a variable that will hold the size of the external video +** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the +** external memory will be returned. +** +** gctUINT32 * ExternalBaseAddress +** Pointer to a variable that will hold the hardware's base address for +** the external video memory. This pointer cannot be gcvNULL if +** 'ExternalSize' is also non-gcvNULL. +** +** gctUINT32 * ExternalAlignment +** Pointer to a variable that will hold the hardware's base address for +** the external video memory. This pointer cannot be gcvNULL if +** 'ExternalSize' is also non-gcvNULL. +** +** gctUINT32 * HorizontalTileSize +** Number of horizontal pixels per tile. If 'HorizontalTileSize' is +** gcvNULL, no horizontal pixel per tile will be returned. +** +** gctUINT32 * VerticalTileSize +** Number of vertical pixels per tile. If 'VerticalTileSize' is +** gcvNULL, no vertical pixel per tile will be returned. +*/ +gceSTATUS +gckHARDWARE_QueryMemory( + IN gckHARDWARE Hardware, + OUT gctSIZE_T * InternalSize, + OUT gctUINT32 * InternalBaseAddress, + OUT gctUINT32 * InternalAlignment, + OUT gctSIZE_T * ExternalSize, + OUT gctUINT32 * ExternalBaseAddress, + OUT gctUINT32 * ExternalAlignment, + OUT gctUINT32 * HorizontalTileSize, + OUT gctUINT32 * VerticalTileSize + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + if (InternalSize != gcvNULL) + { + /* No internal memory. */ + *InternalSize = 0; + } + + if (ExternalSize != gcvNULL) + { + /* No external memory. */ + *ExternalSize = 0; + } + + if (HorizontalTileSize != gcvNULL) + { + /* 4x4 tiles. */ + *HorizontalTileSize = 4; + } + + if (VerticalTileSize != gcvNULL) + { + /* 4x4 tiles. */ + *VerticalTileSize = 4; + } + + /* Success. */ + gcmkFOOTER_ARG("*InternalSize=%lu *InternalBaseAddress=0x%08x " + "*InternalAlignment=0x%08x *ExternalSize=%lu " + "*ExternalBaseAddress=0x%08x *ExtenalAlignment=0x%08x " + "*HorizontalTileSize=%u *VerticalTileSize=%u", + gcmOPT_VALUE(InternalSize), + gcmOPT_VALUE(InternalBaseAddress), + gcmOPT_VALUE(InternalAlignment), + gcmOPT_VALUE(ExternalSize), + gcmOPT_VALUE(ExternalBaseAddress), + gcmOPT_VALUE(ExternalAlignment), + gcmOPT_VALUE(HorizontalTileSize), + gcmOPT_VALUE(VerticalTileSize)); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_QueryChipIdentity +** +** Query the identity of the hardware. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** OUTPUT: +** +** gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity +** Pointer to the identity structure. +** +*/ +gceSTATUS +gckHARDWARE_QueryChipIdentity( + IN gckHARDWARE Hardware, + OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Identity != gcvNULL); + + *Identity = Hardware->identity; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_QueryChipOptions +** +** Query the options of the hardware. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** OUTPUT: +** +** gcsHAL_QUERY_CHIP_OPTIONS_PTR Options +** Pointer to the identity structure. +** +*/ +gceSTATUS +gckHARDWARE_QueryChipOptions( + IN gckHARDWARE Hardware, + OUT gcsHAL_QUERY_CHIP_OPTIONS_PTR Options + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Options != gcvNULL); + + *Options = Hardware->options; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + + +/******************************************************************************* +** +** gckHARDWARE_SplitMemory +** +** Split a hardware specific memory address into a pool and offset. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** gctUINT32 Address +** Address in hardware specific format. +** +** OUTPUT: +** +** gcePOOL * Pool +** Pointer to a variable that will hold the pool type for the address. +** +** gctUINT32 * Offset +** Pointer to a variable that will hold the offset for the address. +*/ +gceSTATUS +gckHARDWARE_SplitMemory( + IN gckHARDWARE Hardware, + IN gctUINT32 Address, + OUT gcePOOL * Pool, + OUT gctUINT32 * Offset + ) +{ + gcmkHEADER_ARG("Hardware=0x%x Addres=0x%08x", Hardware, Address); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Pool != gcvNULL); + gcmkVERIFY_ARGUMENT(Offset != gcvNULL); + + if (Hardware->mmuVersion == 0) + { + /* Dispatch on memory type. */ + switch ((((((gctUINT32) (Address)) >> (0 ? 31:31)) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) )) + { + case 0x0: + /* System memory. */ + *Pool = gcvPOOL_SYSTEM; + break; + + case 0x1: + /* Virtual memory. */ + *Pool = gcvPOOL_VIRTUAL; + break; + + default: + /* Invalid memory type. */ + gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); + return gcvSTATUS_INVALID_ARGUMENT; + } + + /* Return offset of address. */ + *Offset = (((((gctUINT32) (Address)) >> (0 ? 30:0)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 30:0) - (0 ? 30:0) + 1)))))) ); + } + else + { + *Pool = gcvPOOL_SYSTEM; + *Offset = Address; + } + + /* Success. */ + gcmkFOOTER_ARG("*Pool=%d *Offset=0x%08x", *Pool, *Offset); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_Execute +** +** Kickstart the hardware's command processor with an initialized command +** buffer. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** gctUINT32 Address +** Hardware address of command buffer. +** +** gctSIZE_T Bytes +** Number of bytes for the prefetch unit (until after the first LINK). +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_Execute( + IN gckHARDWARE Hardware, + IN gctUINT32 Address, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + gctUINT32 control; + + gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Bytes=%lu", + Hardware, Address, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + /* Enable all events. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U)); + + /* Write address register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, Address)); + + /* Build control register. */ + control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + /* Set big endian */ + if (Hardware->bigEndian) + { + control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 21:20) - (0 ? + 21:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 21:20) - (0 ? + 21:20) + 1))))))) << (0 ? + 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? + 21:20) - (0 ? + 21:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))); + } + + /* Make sure writing to command buffer and previous AHB register is done. */ + gcmkONERROR(gckOS_MemoryBarrier(Hardware->os, gcvNULL)); + + /* Write control register. */ + switch (Hardware->options.secureMode) + { + case gcvSECURE_NONE: + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control)); + break; + case gcvSECURE_IN_NORMAL: + +#if defined(__KERNEL__) + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control)); +#endif + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x003A4, control)); + + break; +#if gcdENABLE_TRUST_APPLICATION + case gcvSECURE_IN_TA: + /* Send message to TA. */ + gcmkONERROR(gckKERNEL_SecurityStartCommand(Hardware->kernel, Address, (gctUINT32)Bytes)); + break; +#endif + default: + break; + } + + /* Increase execute count. */ + Hardware->executeCount++; + + /* Record last execute address. */ + Hardware->lastExecuteAddress = Address; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Started command buffer @ 0x%08x", + Address); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/* Atomic version of Execute, for IRQ routine. */ +static gceSTATUS +gckHARDWARE_AtomicExecute( + IN gckHARDWARE Hardware, + IN gctUINT32 Address, + IN gctSIZE_T Bytes + ) +{ + gctUINT32 control; + + /* Enable all events. */ + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U); + + /* Write address register. */ + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, Address); + + /* Build control register. */ + control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + /* Set big endian */ + if (Hardware->bigEndian) + { + control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 21:20) - (0 ? + 21:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 21:20) - (0 ? + 21:20) + 1))))))) << (0 ? + 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? + 21:20) - (0 ? + 21:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))); + } + + /* Make sure writing to command buffer and previous AHB register is done. */ + gckOS_MemoryBarrier(Hardware->os, gcvNULL); + + /* Write control register. */ + switch (Hardware->options.secureMode) + { + case gcvSECURE_NONE: + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control); + break; + case gcvSECURE_IN_NORMAL: + +#if defined(__KERNEL__) + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control); +#endif + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x003A4, control); + + break; +#if gcdENABLE_TRUST_APPLICATION + case gcvSECURE_IN_TA: + /* Send message to TA. */ + gckKERNEL_SecurityStartCommand(Hardware->kernel, Address, (gctUINT32)Bytes); + break; +#endif + default: + break; + } + + /* Increase execute count. */ + Hardware->executeCount++; + + /* Record last execute address. */ + Hardware->lastExecuteAddress = Address; + + /* Success. */ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_WaitLink +** +** Append a WAIT/LINK command sequence at the specified location in the command +** queue. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Pointer to the current location inside the command queue to append +** WAIT/LINK command sequence at or gcvNULL just to query the size of the +** WAIT/LINK command sequence. +** +** gctUINT32 Address +** GPU address of current location inside the command queue. +** +** gctUINT32 Offset +** Offset into command buffer required for alignment. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes available for the WAIT/LINK command +** sequence. If 'Logical' is gcvNULL, this argument will be ignored. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that will receive the number of bytes required +** by the WAIT/LINK command sequence. If 'Bytes' is gcvNULL, nothing will +** be returned. +** +** gctUINT32 * WaitOffset +** Pointer to a variable that will receive the offset of the WAIT command +** from the specified logcial pointer. +** If 'WaitOffset' is gcvNULL nothing will be returned. +** +** gctSIZE_T * WaitSize +** Pointer to a variable that will receive the number of bytes used by +** the WAIT command. If 'LinkSize' is gcvNULL nothing will be returned. +*/ +gceSTATUS +gckHARDWARE_WaitLink( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN gctUINT32 Offset, + IN OUT gctUINT32 * Bytes, + OUT gctUINT32 * WaitOffset, + OUT gctUINT32 * WaitSize + ) +{ + gceSTATUS status; + gctUINT32_PTR logical; + gctUINT32 bytes; + gctBOOL useL2; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x *Bytes=%lu", + Hardware, Logical, Offset, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical != gcvNULL) || (Bytes != gcvNULL)); + + useL2 = Hardware->hasL2Cache; + + /* Compute number of bytes required. */ + if (useL2) + { + bytes = gcmALIGN(Offset + 24, 8) - Offset; + } + else + { + bytes = gcmALIGN(Offset + 16, 8) - Offset; + } + + /* Cast the input pointer. */ + logical = (gctUINT32_PTR) Logical; + + if (logical != gcvNULL) + { + /* Not enough space? */ + if (*Bytes < bytes) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + gcmkASSERT(Address != ~0U); + + /* Store the WAIT/LINK address. */ + Hardware->lastWaitLink = Address; + + /* Append WAIT(count). */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (Hardware->waitCount) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + logical++; + + if (useL2) + { + /* LoadState(AQFlush, 1), flush. */ + *logical++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))); + } + + /* Append LINK(2, address). */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ = Address; + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%08x: WAIT %u", Address, Hardware->waitCount + ); + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%08x: LINK 0x%08x, #%lu", + Address + 8, Address, bytes + ); + + if (WaitOffset != gcvNULL) + { + /* Return the offset pointer to WAIT command. */ + *WaitOffset = 0; + } + + if (WaitSize != gcvNULL) + { + /* Return number of bytes used by the WAIT command. */ + if (useL2) + { + *WaitSize = 16; + } + else + { + *WaitSize = 8; + } + } + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the WAIT/LINK command + ** sequence. */ + *Bytes = bytes; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu *WaitOffset=0x%x *WaitSize=%lu", + gcmOPT_VALUE(Bytes), gcmOPT_VALUE(WaitOffset), + gcmOPT_VALUE(WaitSize)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_End +** +** Append an END command at the specified location in the command queue. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Pointer to the current location inside the command queue to append +** END command at or gcvNULL just to query the size of the END command. +** +** gctUINT32 Address +** GPU address of current location inside the command queue. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes available for the END command. If +** 'Logical' is gcvNULL, this argument will be ignored. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that will receive the number of bytes required +** for the END command. If 'Bytes' is gcvNULL, nothing will be returned. +*/ +gceSTATUS +gckHARDWARE_End( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN OUT gctUINT32 * Bytes + ) +{ + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + gctUINT32 address; + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu", + Hardware, Logical, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + + if (Logical != gcvNULL) + { + if (*Bytes < 8) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + /* Append END. */ + logical[0] = + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + /* Record the count of execution which is finised by this END. */ + logical[1] = + Hardware->executeCount; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical); + + /* Make sure the CPU writes out the data to memory. */ + gcmkONERROR( + gckOS_MemoryBarrier(Hardware->os, Logical)); + + + gcmkASSERT(Address != ~0U); + address = Address; + + Hardware->lastEnd = address; + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the END command. */ + *Bytes = 8; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_ChipEnable( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gceCORE_3D_MASK ChipEnable, + IN OUT gctSIZE_T * Bytes + ) +{ + gckOS os = Hardware->os; + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x ChipEnable=0x%x *Bytes=%lu", + Hardware, Logical, ChipEnable, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + + if (Logical != gcvNULL) + { + if (*Bytes < 8) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + /* Append CHIPENABLE. */ + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ChipEnable + ); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: CHIPENABLE 0x%x", Logical, ChipEnable); + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the CHIPENABLE command. */ + *Bytes = 8; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_Nop +** +** Append a NOP command at the specified location in the command queue. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Pointer to the current location inside the command queue to append +** NOP command at or gcvNULL just to query the size of the NOP command. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes available for the NOP command. If +** 'Logical' is gcvNULL, this argument will be ignored. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that will receive the number of bytes required +** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned. +*/ +gceSTATUS +gckHARDWARE_Nop( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctSIZE_T * Bytes + ) +{ + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu", + Hardware, Logical, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + + if (Logical != gcvNULL) + { + if (*Bytes < 8) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + /* Append NOP. */ + logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: NOP", Logical); + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the NOP command. */ + *Bytes = 8; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_Event +** +** Append an EVENT command at the specified location in the command queue. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Pointer to the current location inside the command queue to append +** the EVENT command at or gcvNULL just to query the size of the EVENT +** command. +** +** gctUINT8 Event +** Event ID to program. +** +** gceKERNEL_WHERE FromWhere +** Location of the pipe to send the event. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes available for the EVENT command. If +** 'Logical' is gcvNULL, this argument will be ignored. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that will receive the number of bytes required +** for the EVENT command. If 'Bytes' is gcvNULL, nothing will be +** returned. +*/ +gceSTATUS +gckHARDWARE_Event( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT8 Event, + IN gceKERNEL_WHERE FromWhere, + IN OUT gctUINT32 * Bytes + ) +{ + gctUINT size; + gctUINT32 destination = 0; + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + gceSTATUS status; + gctBOOL blt; + gctBOOL extraEventStates; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Event=%u FromWhere=%d *Bytes=%lu", + Hardware, Logical, Event, FromWhere, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + gcmkVERIFY_ARGUMENT(Event < 32); + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_BLT_ENGINE)) + { + /* Send all event from blt. */ + if (FromWhere == gcvKERNEL_PIXEL) + { + FromWhere = gcvKERNEL_BLT; + } + } + + blt = FromWhere == gcvKERNEL_BLT ? gcvTRUE : gcvFALSE; + + /* Determine the size of the command. */ + + extraEventStates = Hardware->extraEventStates && (FromWhere == gcvKERNEL_PIXEL); + + size = extraEventStates + ? gcmALIGN(8 + (1 + 5) * 4, 8) /* EVENT + 5 STATES */ + : 8; + + if (blt) + { + size += 16; + } + + if (Logical != gcvNULL) + { + if (*Bytes < size) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + switch (FromWhere) + { + case gcvKERNEL_COMMAND: + /* From command processor. */ + destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))); + break; + + case gcvKERNEL_PIXEL: + /* From pixel engine. */ + destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))); + break; + + case gcvKERNEL_BLT: + destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))); + break; + + default: + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if (blt) + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + + /* Append EVENT(Event, destination). */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = ((((gctUINT32) (destination)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) ((gctUINT32) (Event) & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))); + + if (blt) + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + + + /* Make sure the event ID gets written out before GPU can access it. */ + gcmkONERROR( + gckOS_MemoryBarrier(Hardware->os, logical + 1)); + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + { + gctPHYS_ADDR_T phys; + gckOS_GetPhysicalAddress(Hardware->os, Logical, &phys); + gckOS_CPUPhysicalToGPUPhysical(Hardware->os, phys, &phys); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%08x: EVENT %d", phys, Event); + } +#endif + + /* Append the extra states. These are needed for the chips that do not + ** support back-to-back events due to the async interface. The extra + ** states add the necessary delay to ensure that event IDs do not + ** collide. */ + if (extraEventStates) + { + *logical++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0100) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + *logical++ = 0; + *logical++ = 0; + *logical++ = 0; + *logical++ = 0; + *logical++ = 0; + } + +#if gcdINTERRUPT_STATISTIC + if (Event < gcmCOUNTOF(Hardware->kernel->eventObj->queues)) + { + gckOS_AtomSetMask(Hardware->pendingEvent, 1 << Event); + } +#endif + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the EVENT command. */ + *Bytes = size; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_PipeSelect +** +** Append a PIPESELECT command at the specified location in the command queue. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Pointer to the current location inside the command queue to append +** the PIPESELECT command at or gcvNULL just to query the size of the +** PIPESELECT command. +** +** gcePIPE_SELECT Pipe +** Pipe value to select. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes available for the PIPESELECT command. +** If 'Logical' is gcvNULL, this argument will be ignored. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that will receive the number of bytes required +** for the PIPESELECT command. If 'Bytes' is gcvNULL, nothing will be +** returned. +*/ +gceSTATUS +gckHARDWARE_PipeSelect( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gcePIPE_SELECT Pipe, + IN OUT gctUINT32 * Bytes + ) +{ + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Pipe=%d *Bytes=%lu", + Hardware, Logical, Pipe, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + + /* Append a PipeSelect. */ + if (Logical != gcvNULL) + { + gctUINT32 flush, stall; + + if (*Bytes < 32) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + flush = (Pipe == gcvPIPE_2D) + ? + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))); + + stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* LoadState(AQFlush, 1), flush. */ + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical + 1, + flush + )); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: FLUSH 0x%x", logical, flush); + + /* LoadState(AQSempahore, 1), stall. */ + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical + 2, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical + 3, + stall + )); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: SEMAPHORE 0x%x", logical + 2, stall); + + /* Stall, stall. */ + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical + 4, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical + 5, + stall + )); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: STALL 0x%x", logical + 4, stall); + + /* LoadState(AQPipeSelect, 1), pipe. */ + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical + 6, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + logical + 7, + (Pipe == gcvPIPE_2D) + ? 0x1 + : 0x0 + )); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: PIPE %d", logical + 6, Pipe); + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the PIPESELECT command. */ + *Bytes = 32; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_Link +** +** Append a LINK command at the specified location in the command queue. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Pointer to the current location inside the command queue to append +** the LINK command at or gcvNULL just to query the size of the LINK +** command. +** +** gctUINT32 FetchAddress +** Hardware address of destination of LINK. +** +** gctSIZE_T FetchSize +** Number of bytes in destination of LINK. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes available for the LINK command. If +** 'Logical' is gcvNULL, this argument will be ignored. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that will receive the number of bytes required +** for the LINK command. If 'Bytes' is gcvNULL, nothing will be returned. +*/ +gceSTATUS +gckHARDWARE_Link( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 FetchAddress, + IN gctUINT32 FetchSize, + IN OUT gctUINT32 * Bytes, + OUT gctUINT32 * Low, + OUT gctUINT32 * High + ) +{ + gceSTATUS status; + gctSIZE_T bytes; + gctUINT32 link; + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x FetchAddress=0x%x FetchSize=%lu " + "*Bytes=%lu", + Hardware, Logical, FetchAddress, FetchSize, + gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + + if (Logical != gcvNULL) + { + if (*Bytes < 8) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + gcmkONERROR( + gckOS_WriteMemory(Hardware->os, logical + 1, FetchAddress)); + + if (High) + { + *High = FetchAddress; + } + + /* Make sure the address got written before the LINK command. */ + gcmkONERROR( + gckOS_MemoryBarrier(Hardware->os, logical + 1)); + + /* Compute number of 64-byte aligned bytes to fetch. */ + bytes = gcmALIGN(FetchAddress + FetchSize, 64) - FetchAddress; + + /* Append LINK(bytes / 8), FetchAddress. */ + link = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + gcmkONERROR( + gckOS_WriteMemory(Hardware->os, logical, link)); + + if (Low) + { + *Low = link; + } + + /* Memory barrier. */ + gcmkONERROR( + gckOS_MemoryBarrier(Hardware->os, logical)); + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the LINK command. */ + *Bytes = 8; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_FenceRender( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 FenceAddress, + IN gctUINT64 FenceData, + IN OUT gctUINT32 * Bytes + ) +{ + gckOS os = Hardware->os; + gctUINT32_PTR logical = (gctUINT32_PTR)Logical; + + gctUINT32 dataLow = (gctUINT32)FenceData; + gctUINT32 dataHigh = (gctUINT32)(FenceData >> 32); + + if (logical) + { + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E1A) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + FenceAddress + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E26) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + dataHigh + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E1B) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + dataLow + ); + } + else + { + *Bytes = gcdRENDER_FENCE_LENGTH; + } + + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_FenceBlt( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 FenceAddress, + IN gctUINT64 FenceData, + IN OUT gctUINT32 * Bytes + ) +{ + gckOS os = Hardware->os; + gctUINT32_PTR logical = (gctUINT32_PTR)Logical; + + gctUINT32 dataLow = (gctUINT32)FenceData; + gctUINT32 dataHigh = (gctUINT32)(FenceData >> 32); + + if (logical) + { + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x5029) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + FenceAddress + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502D) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + dataHigh + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502A) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + dataLow + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + ); + + gcmkWRITE_MEMORY( + logical, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + ); + } + else + { + *Bytes = gcdBLT_FENCE_LENGTH; + } + + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_Fence( + IN gckHARDWARE Hardware, + IN gceENGINE Engine, + IN gctPOINTER Logical, + IN gctUINT32 FenceAddress, + IN gctUINT64 FenceData, + IN OUT gctUINT32 * Bytes + ) +{ + if (Engine == gcvENGINE_RENDER) + { + return gckHARDWARE_FenceRender(Hardware, Logical, FenceAddress, FenceData, Bytes); + } + else + { + return gckHARDWARE_FenceBlt(Hardware, Logical, FenceAddress, FenceData, Bytes); + } +} + +/******************************************************************************* +** +** gckHARDWARE_UpdateQueueTail +** +** Update the tail of the command queue. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Logical address of the start of the command queue. +** +** gctUINT32 Offset +** Offset into the command queue of the tail (last command). +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_UpdateQueueTail( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Offset + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x", + Hardware, Logical, Offset); + + /* Verify the hardware. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + /* Force a barrier. */ + gcmkONERROR( + gckOS_MemoryBarrier(Hardware->os, Logical)); + + /* Notify gckKERNEL object of change. */ + gcmkONERROR( + gckKERNEL_Notify(Hardware->kernel, + gcvNOTIFY_COMMAND_QUEUE, + gcvFALSE)); + + if (status == gcvSTATUS_CHIP_NOT_READY) + { + gcmkONERROR(gcvSTATUS_DEVICE); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_ConvertLogical +** +** Convert a logical system address into a hardware specific address. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Logical address to convert. +** +** gctBOOL InUserSpace +** gcvTRUE if the memory in user space. +** +** gctUINT32* Address +** Return hardware specific address. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_ConvertLogical( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctBOOL InUserSpace, + OUT gctUINT32 * Address + ) +{ + gctUINT32 address; + gceSTATUS status; + gctPHYS_ADDR_T physical; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x InUserSpace=%d", + Hardware, Logical, InUserSpace); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + + /* Convert logical address into a physical address. */ + if (InUserSpace) + { + gcmkONERROR(gckOS_UserLogicalToPhysical(Hardware->os, Logical, &physical)); + } + else + { + gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &physical)); + } + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Hardware->os, physical, &physical)); + + gcmkSAFECASTPHYSADDRT(address, physical); + + /* For old MMU, get GPU address according to baseAddress. */ + if (Hardware->mmuVersion == 0) + { + /* Subtract base address to get a GPU address. */ + gcmkASSERT(address >= Hardware->baseAddress); + address -= Hardware->baseAddress; + } + + /* Return hardware specific address. */ + *Address = (Hardware->mmuVersion == 0) + ? + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:31) - (0 ? + 31:31) + 1))))))) << (0 ? + 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 30:0) - (0 ? + 30:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 30:0) - (0 ? + 30:0) + 1))))))) << (0 ? + 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? + 30:0) - (0 ? + 30:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) + : address; + + /* Success. */ + gcmkFOOTER_ARG("*Address=0x%08x", *Address); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +static void +_ResumeWaitLinkFE( + gckHARDWARE Hardware + ) +{ + gctUINT32 resume; + gctUINT32 bytes; + gctUINT32 idle; + + /* Make sure FE is idle. */ + do + { + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00004, + &idle); + } + while (idle != 0x7FFFFFFF); + + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00664, + &resume); + + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00664, + &resume); + + /* Determine the wait-link command size. */ + bytes = Hardware->hasL2Cache ? 24 : 16; + + /* Start Command Parser. */ + gckHARDWARE_AtomicExecute(Hardware, resume, bytes); +} + +/******************************************************************************* +** +** gckHARDWARE_Interrupt +** +** Process an interrupt. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctBOOL InterruptValid +** If gcvTRUE, this function will read the interrupt acknowledge +** register, stores the data, and return whether or not the interrupt +** is ours or not. If gcvFALSE, this functions will read the interrupt +** acknowledge register and combine it with any stored value to handle +** the event notifications. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_Interrupt( + IN gckHARDWARE Hardware, + IN gctBOOL InterruptValid + ) +{ + gckEVENT eventObj; + gctUINT32 data = 0; + gctUINT32 dataEx; + gceSTATUS status; + gceSTATUS statusEx; + + /* Extract gckEVENT object. */ + eventObj = Hardware->kernel->eventObj; + + if (InterruptValid) + { + /* + * Notice: + * In isr here. + * We should return success when either FE or AsyncFE reports correct + * interrupts, so that isr can wake up threadRoutine for either FE. + * That means, only need return ERROR when both FEs reports ERROR. + */ + /* Read AQIntrAcknowledge register. */ + status = gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00010, + &data); + + if (gcmIS_ERROR(status)) + { + goto OnError; + } + + if (data == 0) + { + /* Not our interrupt. */ + status = gcvSTATUS_NOT_OUR_INTERRUPT; + } + else + { +#if gcdINTERRUPT_STATISTIC + gckOS_AtomClearMask(Hardware->pendingEvent, data); +#endif + if (data & (1 << 29)) + { + /* Event ID 29 is not a normal event, but for invalidating pipe. */ + _ResumeWaitLinkFE(Hardware); + data &= ~(1 << 29); + } + + /* Inform gckEVENT of the interrupt. */ + status = gckEVENT_Interrupt(eventObj, data); + } + + if (!Hardware->hasAsyncFe) + { + /* Done. */ + goto OnError; + } + + /* Read BLT interrupt. */ + statusEx = gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x000D4, + &dataEx + ); + + if (gcmIS_ERROR(statusEx)) + { + /* + * Do not overwrite status here, so that former status from + * AQIntrAck is returned. + */ + goto OnError; + } + + /* + * This bit looks useless now, we can use this check if this interrupt + * is from FE. + */ + dataEx &= ~0x80000000; + + /* + * Descriptor fetched, update counter. + * We can't do this at dataEx != 0 only, because read HW acknowledge + * register will overwrite 0x007E4. If one + * interrupt we don't read it, we will miss it for ever. + */ + gckFE_UpdateAvaiable(Hardware, &Hardware->kernel->asyncCommand->fe); + + /* Do not need report NOT_OUT_INTERRUPT error if dataEx is 0. */ + if (dataEx) + { + statusEx = gckEVENT_Interrupt(Hardware->kernel->asyncEvent, dataEx); + + if (gcmIS_SUCCESS(statusEx)) + { + /* At least AsyncFE is success, treat all as success. */ + status = gcvSTATUS_OK; + } + } + } + else + { + /* Handle events. */ + status = gckEVENT_Notify(eventObj, 0); + + if (Hardware->hasAsyncFe) + { + status = gckEVENT_Notify(Hardware->kernel->asyncEvent, 0); + } + } + +OnError: + /* Return the status. */ + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_QueryCommandBuffer +** +** Query the command buffer alignment and number of reserved bytes. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** OUTPUT: +** +** gctSIZE_T * Alignment +** Pointer to a variable receiving the alignment for each command. +** +** gctSIZE_T * ReservedHead +** Pointer to a variable receiving the number of reserved bytes at the +** head of each command buffer. +** +** gctSIZE_T * ReservedTail +** Pointer to a variable receiving the number of bytes reserved at the +** tail of each command buffer. +*/ +gceSTATUS +gckHARDWARE_QueryCommandBuffer( + IN gckHARDWARE Hardware, + IN gceENGINE Engine, + OUT gctUINT32 * Alignment, + OUT gctUINT32 * ReservedHead, + OUT gctUINT32 * ReservedTail + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + if (Alignment != gcvNULL) + { + /* Align every 8 bytes. */ + *Alignment = 8; + } + + if (ReservedHead != gcvNULL) + { + /* Reserve space for SelectPipe(). */ + *ReservedHead = 32; + } + + if (ReservedTail != gcvNULL) + { + if (Engine == gcvENGINE_RENDER) + { + gcmkFOOTER_NO(); + return gcvSTATUS_NOT_SUPPORTED; + } + else + { + *ReservedTail = gcdBLT_FENCE_LENGTH; + } + } + + /* Success. */ + gcmkFOOTER_ARG("*Alignment=%lu *ReservedHead=%lu *ReservedTail=%lu", + gcmOPT_VALUE(Alignment), gcmOPT_VALUE(ReservedHead), + gcmOPT_VALUE(ReservedTail)); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_QuerySystemMemory +** +** Query the command buffer alignment and number of reserved bytes. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** OUTPUT: +** +** gctSIZE_T * SystemSize +** Pointer to a variable that receives the maximum size of the system +** memory. +** +** gctUINT32 * SystemBaseAddress +** Poinetr to a variable that receives the base address for system +** memory. +*/ +gceSTATUS +gckHARDWARE_QuerySystemMemory( + IN gckHARDWARE Hardware, + OUT gctSIZE_T * SystemSize, + OUT gctUINT32 * SystemBaseAddress + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + if (SystemSize != gcvNULL) + { + /* Maximum system memory can be 2GB. */ + *SystemSize = 1U << 31; + } + + if (SystemBaseAddress != gcvNULL) + { + /* Set system memory base address. */ + *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:31) - (0 ? + 31:31) + 1))))))) << (0 ? + 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))); + } + + /* Success. */ + gcmkFOOTER_ARG("*SystemSize=%lu *SystemBaseAddress=%lu", + gcmOPT_VALUE(SystemSize), gcmOPT_VALUE(SystemBaseAddress)); + return gcvSTATUS_OK; +} + +#if gcdENABLE_3D +/******************************************************************************* +** +** gckHARDWARE_QueryShaderCaps +** +** Query the shader capabilities. +** +** INPUT: +** +** Nothing. +** +** OUTPUT: +** +** gctUINT * VertexUniforms +** Pointer to a variable receiving the number of uniforms in the vertex +** shader. +** +** gctUINT * FragmentUniforms +** Pointer to a variable receiving the number of uniforms in the +** fragment shader. +** +** gctBOOL * UnifiedUnforms +** Pointer to a variable receiving whether the uniformas are unified. +*/ +gceSTATUS +gckHARDWARE_QueryShaderCaps( + IN gckHARDWARE Hardware, + OUT gctUINT * VertexUniforms, + OUT gctUINT * FragmentUniforms, + OUT gctBOOL * UnifiedUnforms + ) +{ + gctBOOL unifiedConst; + gctUINT32 vsConstMax; + gctUINT32 psConstMax; + gctUINT32 vsConstBase; + gctUINT32 psConstBase; + gctUINT32 ConstMax; + gctBOOL halti5; + + gcmkHEADER_ARG("Hardware=0x%x VertexUniforms=0x%x " + "FragmentUniforms=0x%x UnifiedUnforms=0x%x", + Hardware, VertexUniforms, + FragmentUniforms, UnifiedUnforms); + + halti5 = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI5); + + {if (Hardware->identity.numConstants > 256){ unifiedConst = gcvTRUE; +if (halti5){ vsConstBase = 0xD000; + psConstBase = 0xD800; +}else{ vsConstBase = 0xC000; + psConstBase = 0xC000; +}if ((Hardware->identity.chipModel == gcv880) && ((Hardware->identity.chipRevision & 0xfff0) == 0x5120)){ vsConstMax = 512; + psConstMax = 64; + ConstMax = 576; +}else{ vsConstMax = gcmMIN(512, Hardware->identity.numConstants - 64); + psConstMax = gcmMIN(512, Hardware->identity.numConstants - 64); + ConstMax = Hardware->identity.numConstants; +}}else if (Hardware->identity.numConstants == 256){ if (Hardware->identity.chipModel == gcv2000 && (Hardware->identity.chipRevision == 0x5118 || Hardware->identity.chipRevision == 0x5140)) { unifiedConst = gcvFALSE; + vsConstBase = 0x1400; + psConstBase = 0x1C00; + vsConstMax = 256; + psConstMax = 64; + ConstMax = 320; + } else { unifiedConst = gcvFALSE; + vsConstBase = 0x1400; + psConstBase = 0x1C00; + vsConstMax = 256; + psConstMax = 256; + ConstMax = 512; + }}else{ unifiedConst = gcvFALSE; + vsConstBase = 0x1400; + psConstBase = 0x1C00; + vsConstMax = 168; + psConstMax = 64; + ConstMax = 232; +}}; + + + if (VertexUniforms != gcvNULL) + { + /* Return the vs shader const count. */ + *VertexUniforms = vsConstMax; + } + + if (FragmentUniforms != gcvNULL) + { + /* Return the ps shader const count. */ + *FragmentUniforms = psConstMax; + } + + if (UnifiedUnforms != gcvNULL) + { + /* Return whether the uniformas are unified. */ + *UnifiedUnforms = unifiedConst; + } + + psConstBase = psConstBase; + vsConstBase = vsConstBase; + ConstMax = ConstMax; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} +#endif + +/******************************************************************************* +** +** gckHARDWARE_SetMMU +** +** Set the page table base address. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Logical address of the page table. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_SetMMU( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical + ) +{ + gceSTATUS status; + gctUINT32 address = 0; + gctUINT32 idle; + gctUINT32 timer = 0, delay = 1; + gctPHYS_ADDR_T physical; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + if (Hardware->mmuVersion == 0) + { + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + /* Convert the logical address into physical address. */ + gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &physical)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Hardware->os, physical, &physical)); + + gcmkSAFECASTPHYSADDRT(address, physical); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Setting page table to 0x%08X", + address); + + /* Write the AQMemoryFePageTable register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00400, + address)); + + /* Write the AQMemoryRaPageTable register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00410, + address)); + + /* Write the AQMemoryTxPageTable register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00404, + address)); + + + /* Write the AQMemoryPePageTable register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00408, + address)); + + /* Write the AQMemoryPezPageTable register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0040C, + address)); + } + else if (Hardware->options.enableMMU && + (Hardware->options.secureMode != gcvSECURE_IN_TA)) + { + gctBOOL hwMmuDisabled = gcvTRUE; + + /* Force Disable MMU to guarantee setup command be read from physical addr */ + if (Hardware->options.secureMode == gcvSECURE_IN_NORMAL) + { + gctUINT32 regMmuCtrl = 0; + gcmkONERROR(gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x00388, + ®MmuCtrl + )); + + hwMmuDisabled = ((((((gctUINT32) (regMmuCtrl)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0x1) + ? gcvFALSE + : gcvTRUE; + } + else + { + gctUINT32 regMmuCtrl = 0; + + gcmkONERROR(gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x0018C, + ®MmuCtrl + )); + + hwMmuDisabled = ((((((gctUINT32) (regMmuCtrl)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0x1) + ? gcvFALSE + : gcvTRUE; + } + + if (hwMmuDisabled) + { + /* Prepared command sequence contains an END, + ** so update lastEnd and store executeCount to END command. + */ + gcsHARDWARE_FUNCTION *function = &Hardware->functions[gcvHARDWARE_FUNCTION_MMU]; + gctUINT32_PTR endLogical = (gctUINT32_PTR)function->endLogical; + + Hardware->lastEnd = function->endAddress; + + *(endLogical + 1) = Hardware->executeCount + 1; + + if (Hardware->options.secureMode == gcvSECURE_IN_NORMAL) + { + gctUINT32_PTR safeLogical = Hardware->kernel->mmu->safePageLogical; + gctUINT32 extSafeAddress; + /* Set up base address of page table array. */ + gcmkONERROR(gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x0038C, + (gctUINT32)(Hardware->pagetableArray.address & 0xFFFFFFFF) + )); + + gcmkONERROR(gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x00390, + (gctUINT32)((Hardware->pagetableArray.address >> 32) & 0xFFFFFFFF) + )); + + gcmkONERROR(gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x00394, + 1 + )); + + gcmkONERROR( + gckOS_GetPhysicalAddress(Hardware->os, safeLogical, &physical)); + + gcmkVERIFY_OK( + gckOS_CPUPhysicalToGPUPhysical(Hardware->os, physical, &physical)); + + address = (gctUINT32)(physical & 0xFFFFFFFF); + extSafeAddress = (gctUINT32)(physical >> 32); + + if (address & 0x3F) + { + gcmkONERROR(gcvSTATUS_NOT_ALIGNED); + } + + /* more than 40bit physical address */ + if (extSafeAddress & 0xFFFFFF00) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x0039C, + address + ); + + gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x00398, + address + ); + + gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x003A0, + (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) ((gctUINT32)extSafeAddress) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) &((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:31) - (0 ? + 31:31) + 1))))))) << (0 ? + 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))) + | (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) ((gctUINT32)extSafeAddress) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) &((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:15) - (0 ? + 15:15) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:15) - (0 ? + 15:15) + 1))))))) << (0 ? + 15:15))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 15:15) - (0 ? + 15:15) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)))) + ); + } + + /* Execute prepared command sequence. */ + gcmkONERROR(gckHARDWARE_Execute( + Hardware, + function->address, + function->bytes + )); + +#if gcdLINK_QUEUE_SIZE + { + gcuQUEUEDATA data; + + gcmkVERIFY_OK(gckOS_GetProcessID(&data.linkData.pid)); + + data.linkData.start = function->address; + data.linkData.end = function->address + function->bytes; + data.linkData.linkLow = 0; + data.linkData.linkHigh = 0; + + gckQUEUE_Enqueue(&Hardware->linkQueue, &data); + } +#endif + + /* Wait until MMU configure finishes. */ + do + { + gckOS_Delay(Hardware->os, delay); + + gcmkONERROR(gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x00004, + &idle)); + + timer += delay; + delay *= 2; + +#if gcdGPU_TIMEOUT + if (timer >= Hardware->kernel->timeOut) + { + gckHARDWARE_DumpGPUState(Hardware); + gckCOMMAND_DumpExecutingBuffer(Hardware->kernel->command); + + /* Even if hardware is not reset correctly, let software + ** continue to avoid software stuck. Software will timeout again + ** and try to recover GPU in next timeout. + */ + gcmkONERROR(gcvSTATUS_DEVICE); + } +#endif + } + while (!(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) )); + + /* Enable MMU. */ + if (Hardware->options.secureMode == gcvSECURE_IN_NORMAL) + { + gcmkONERROR(gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x00388, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + )); + } + else + { + gcmkONERROR(gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x0018C, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (gcvTRUE) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + )); + } + } + } + + /* Return the status. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_FlushAsyncMMU( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ) +{ + gctUINT32 semaphore, stall; + gctUINT32_PTR buffer; + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu", + Hardware, Logical, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + + if (Logical != gcvNULL) + { + buffer = (gctUINT32_PTR) Logical; + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 1, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 2, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 3, + (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) & ((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 4, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + )); + + semaphore = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + if (Hardware->stallFEPrefetch) + { + semaphore |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 29:28) - (0 ? + 29:28) + 1))))))) << (0 ? + 29:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 29:28) - (0 ? 29:28) + 1))))))) << (0 ? 29:28))); + } + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 5, + semaphore)); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 6, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + )); + + stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + if (Hardware->stallFEPrefetch) + { + stall |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 29:28) - (0 ? + 29:28) + 1))))))) << (0 ? + 29:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 29:28) - (0 ? 29:28) + 1))))))) << (0 ? 29:28))); + } + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 7, + stall)); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 8, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + )); + + gcmkONERROR(gckOS_WriteMemory( + Hardware->os, + buffer + 9, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + )); + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the PIPESELECT command. */ + *Bytes = 40; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_FlushMMU +** +** Flush the page table. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_FlushMMU( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gckCOMMAND command; + gctUINT32_PTR buffer; + gctUINT32 bufferSize; + gctPOINTER pointer = gcvNULL; + gctUINT32 flushSize; + gctUINT32 count, offset; + gctUINT32 address; + gctUINT32 semaphore, stall; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + /* Verify the gckCOMMAND object pointer. */ + command = Hardware->kernel->command; + + /* Flush the memory controller. */ + if (Hardware->mmuVersion == 0) + { + gcmkONERROR(gckCOMMAND_Reserve( + command, 8, &pointer, &bufferSize + )); + + buffer = (gctUINT32_PTR) pointer; + + buffer[0] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + buffer[1] + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + + gcmkONERROR(gckCOMMAND_Execute(command, 8)); + } + else + { + gctUINT32 prefetchCount = 4; + gctBOOL bltEngine = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_BLT_ENGINE); + + flushSize = 10 * 4; + offset = 2; + + if (bltEngine) + { + flushSize += 4 * 4; + prefetchCount += 2; + } + + gcmkONERROR(gckCOMMAND_Reserve( + command, flushSize, &pointer, &bufferSize + )); + + buffer = (gctUINT32_PTR) pointer; + + count = ((gctUINT)bufferSize - flushSize + 7) >> 3; + + address = command->address + command->offset; + + /* LINK to next slot to flush FE FIFO. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (prefetchCount) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = address + offset * gcmSIZEOF(gctUINT32); + + /* Flush MMU cache. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) & ((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))); + + if (bltEngine) + { + /* Blt lock. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + + /* Arm the PE-FE Semaphore. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + semaphore = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))); + + if (Hardware->stallFEPrefetch) + { + semaphore |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 29:28) - (0 ? + 29:28) + 1))))))) << (0 ? + 29:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 29:28) - (0 ? 29:28) + 1))))))) << (0 ? 29:28))); + } + + if (bltEngine) + { + semaphore |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + else + { + semaphore |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + + *buffer++ + = semaphore; + + /* STALL FE until PE is done flushing. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))); + + if (Hardware->stallFEPrefetch) + { + stall |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 29:28) - (0 ? + 29:28) + 1))))))) << (0 ? + 29:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? + 29:28) - (0 ? + 29:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 29:28) - (0 ? 29:28) + 1))))))) << (0 ? 29:28))); + } + + if (bltEngine) + { + stall |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + else + { + stall |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + + *buffer++ + = stall; + + if (bltEngine) + { + /* Blt unlock. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + + /* LINK to next slot to flush FE FIFO. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (count) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = address + flushSize; + + gcmkONERROR(gckCOMMAND_Execute(command, flushSize)); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_SetMMUStates( + IN gckHARDWARE Hardware, + IN gctPOINTER MtlbAddress, + IN gceMMU_MODE Mode, + IN gctPOINTER SafeAddress, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ) +{ + gceSTATUS status; + gctUINT32 config, address; + gctUINT32 extMtlb, extSafeAddress, configEx = 0; + gctPHYS_ADDR_T physical; + gctUINT32_PTR buffer; + gctBOOL ace; + gctUINT32 reserveBytes = 0; + gcsMMU_TABLE_ARRAY_ENTRY * entry; + + gctBOOL config2D; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Hardware->mmuVersion != 0); + + entry = (gcsMMU_TABLE_ARRAY_ENTRY *) Hardware->pagetableArray.logical; + + ace = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_ACE); + + switch (Hardware->options.secureMode) + { + case gcvSECURE_IN_NORMAL: + reserveBytes = 8 + 4 * 4; + break; + case gcvSECURE_NONE: + reserveBytes = 16 + 4 * 4; + if (ace) + { + reserveBytes += 8; + } + break; + case gcvSECURE_IN_TA: + default: + gcmkASSERT(gcvFALSE); + gcmkPRINT("%s(%d): secureMode is wrong", __FUNCTION__, __LINE__); + break; + } + + config2D = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D) + && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_2D); + + if (config2D) + { + reserveBytes += + /* Pipe Select. */ + 4 * 4 + /* Configure MMU States. */ + + 4 * 4 + /* Semaphore stall */ + + 4 * 8; + + if (ace) + { + reserveBytes += 8; + } + } + + reserveBytes += 8; + + /* Convert logical address into physical address. */ + gcmkONERROR( + gckOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &physical)); + + gcmkVERIFY_OK( + gckOS_CPUPhysicalToGPUPhysical(Hardware->os, physical, &physical)); + + config = (gctUINT32)(physical & 0xFFFFFFFF); + extMtlb = (gctUINT32)(physical >> 32); + + /* more than 40bit physical address */ + if (extMtlb & 0xFFFFFF00) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + gcmkONERROR( + gckOS_GetPhysicalAddress(Hardware->os, SafeAddress, &physical)); + + gcmkVERIFY_OK( + gckOS_CPUPhysicalToGPUPhysical(Hardware->os, physical, &physical)); + + address = (gctUINT32)(physical & 0xFFFFFFFF); + extSafeAddress = (gctUINT32)(physical >> 32); + + if (address & 0x3F) + { + gcmkONERROR(gcvSTATUS_NOT_ALIGNED); + } + + /* more than 40bit physical address */ + if (extSafeAddress & 0xFFFFFF00) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + if (ace) + { + configEx = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (extSafeAddress) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (extMtlb) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))); + } + + switch (Mode) + { + case gcvMMU_MODE_1K: + if (config & 0x3FF) + { + gcmkONERROR(gcvSTATUS_NOT_ALIGNED); + } + + config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + break; + + case gcvMMU_MODE_4K: + if (config & 0xFFF) + { + gcmkONERROR(gcvSTATUS_NOT_ALIGNED); + } + + config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + break; + + default: + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if (Logical != gcvNULL) + { + buffer = Logical; + + if (Hardware->options.secureMode == gcvSECURE_IN_NORMAL) + { + /* Setup page table array entry. */ + if (Hardware->bigEndian) + { + entry->low = gcmBSWAP32(config); + entry->high = gcmBSWAP32(extMtlb); + } + else + { + entry->low = config; + entry->high = extMtlb; + } + + /* Setup command buffer to load index 0 of page table array. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x006B) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) &((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))); + } + else + { + gcmkASSERT(Hardware->options.secureMode == gcvSECURE_NONE); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ = config; + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ = address; + + if (ace) + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0068) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = configEx; + } + } + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E12) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))); + + do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0); +; + + + if (config2D) + { + /* LoadState(AQPipeSelect, 1), pipe. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ = 0x1; + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ = config; + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ = address; + + if (ace) + { + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0068) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = configEx; + } + + do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0); +; + + + /* LoadState(AQPipeSelect, 1), pipe. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ = 0x0; + + do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0); +; + + } + + } + + if (Bytes != gcvNULL) + { + *Bytes = reserveBytes; + } + + /* Return the status. */ + gcmkFOOTER_NO(); + return status; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if gcdPROCESS_ADDRESS_SPACE +/******************************************************************************* +** +** gckHARDWARE_ConfigMMU +** +** Append a MMU Configuration command sequence at the specified location in the command +** queue. That command sequence consists of mmu configuration, LINK and WAIT/LINK. +** LINK is fetched and paresed with new mmu configuration. +** +** If MMU Configuration is not changed between commit, change last WAIT/LINK to +** link to ENTRY. +** +** -+-----------+-----------+----------------------------------------- +** | WAIT/LINK | WAIT/LINK | +** -+-----------+-----------+----------------------------------------- +** | /|\ +** \|/ | +** +--------------------+ +** | ENTRY | ... | LINK | +** +--------------------+ +** +** If MMU Configuration is changed between commit, change last WAIT/LINK to +** link to MMU CONFIGURATION command sequence, and there are an EVNET and +** an END at the end of this command sequence, when interrupt handler +** receives this event, it will start FE at ENTRY to continue the command +** buffer execution. +** +** -+-----------+-------------------+---------+---------+-----------+-- +** | WAIT/LINK | MMU CONFIGURATION | EVENT | END | WAIT/LINK | +** -+-----------+-------------------+---------+---------+-----------+-- +** | /|\ /|\ +** +-------------+ | +** +--------------------+ +** | ENTRY | ... | LINK | +** +--------------------+ +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gctPOINTER Logical +** Pointer to the current location inside the command queue to append +** command sequence at or gcvNULL just to query the size of the +** command sequence. +** +** gctPOINTER MtlbLogical +** Pointer to the current Master TLB. +** +** gctUINT32 Offset +** Offset into command buffer required for alignment. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes available for the command +** sequence. If 'Logical' is gcvNULL, this argument will be ignored. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that will receive the number of bytes required +** by the command sequence. If 'Bytes' is gcvNULL, nothing will +** be returned. +** +** gctUINT32 * WaitLinkOffset +** Pointer to a variable that will receive the offset of the WAIT/LINK command +** from the specified logcial pointer. +** If 'WaitLinkOffset' is gcvNULL nothing will be returned. +** +** gctSIZE_T * WaitLinkBytes +** Pointer to a variable that will receive the number of bytes used by +** the WAIT command. +** If 'WaitLinkBytes' is gcvNULL nothing will be returned. +*/ +gceSTATUS +gckHARDWARE_ConfigMMU( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctPOINTER MtlbLogical, + IN gctUINT32 Offset, + IN OUT gctSIZE_T * Bytes, + OUT gctSIZE_T * WaitLinkOffset, + OUT gctSIZE_T * WaitLinkBytes + ) +{ + gceSTATUS status; + gctSIZE_T bytes, bytesAligned; + gctUINT32 config; + gctUINT32_PTR buffer = (gctUINT32_PTR) Logical; + gctPHYS_ADDR_T physical; + gctUINT32 address; + gctUINT32 event; + gctSIZE_T stCmds; /* semaphore stall cmd size */; + + gcmkHEADER_ARG("Hardware=0x%08X Logical=0x%08x MtlbLogical=0x%08X", + Hardware, Logical, MtlbLogical); + + stCmds = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_TEX_CACHE_FLUSH_FIX) ? 0 : 4; + + bytes + /* Semaphore stall states. */ + = stCmds * 4 + /* Flush cache states. */ + + 20 * 4 + /* MMU configuration states. */ + + 6 * 4 + /* EVENT. */ + + 2 * 4 + /* END. */ + + 2 * 4 + /* WAIT/LINK. */ + + 4 * 4; + + /* Compute number of bytes required. */ + bytesAligned = gcmALIGN(Offset + bytes, 8) - Offset; + + if (buffer != gcvNULL) + { + if (MtlbLogical == gcvNULL) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* Get physical address of this command buffer segment. */ + gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, buffer, &physical)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + Hardware->os, + physical, + &physical + )); + + gcmkSAFECASTPHYSADDRT(address, physical); + + /* Get physical address of Master TLB. */ + gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, MtlbLogical, &physical)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + Hardware->os, + physical, + &physical + )); + + gcmkSAFECASTPHYSADDRT(config, physical); + + config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + + if (stCmds) + { + /* Arm the PE-FE Semaphore. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* STALL FE until PE is done flushing. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + + /* Flush cache. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + AQ_FLUSH_VSHL1_CACHE) - (0 ? + AQ_FLUSH_VSHL1_CACHE) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + AQ_FLUSH_VSHL1_CACHE) - (0 ? + AQ_FLUSH_VSHL1_CACHE) + 1))))))) << (0 ? + AQ_FLUSH_VSHL1_CACHE))) | (((gctUINT32) (AQ_FLUSH_VSHL1_CACHE_ENABLE & ((gctUINT32) ((((1 ? + AQ_FLUSH_VSHL1_CACHE) - (0 ? + AQ_FLUSH_VSHL1_CACHE) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + AQ_FLUSH_VSHL1_CACHE) - (0 ? + AQ_FLUSH_VSHL1_CACHE) + 1))))))) << (0 ? AQ_FLUSH_VSHL1_CACHE))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + AQ_FLUSH_PSHL1_CACHE) - (0 ? + AQ_FLUSH_PSHL1_CACHE) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + AQ_FLUSH_PSHL1_CACHE) - (0 ? + AQ_FLUSH_PSHL1_CACHE) + 1))))))) << (0 ? + AQ_FLUSH_PSHL1_CACHE))) | (((gctUINT32) (AQ_FLUSH_PSHL1_CACHE_ENABLE & ((gctUINT32) ((((1 ? + AQ_FLUSH_PSHL1_CACHE) - (0 ? + AQ_FLUSH_PSHL1_CACHE) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + AQ_FLUSH_PSHL1_CACHE) - (0 ? + AQ_FLUSH_PSHL1_CACHE) + 1))))))) << (0 ? AQ_FLUSH_PSHL1_CACHE))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))); + + /* Flush VTS in separate command */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + + /* Flush tile status cache. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + /* Arm the PE-FE Semaphore. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* STALL FE until PE is done flushing. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* LINK to next slot to flush FE FIFO. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = address + (stCmds + 12) * gcmSIZEOF(gctUINT32); + + /* Configure MMU. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) & ((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))); + + /* Arm the PE-FE Semaphore. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* STALL FE until PE is done flushing. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* LINK to next slot to flush FE FIFO. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = physical + (stCmds + 20) * 4; + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = config; + + /* Arm the PE-FE Semaphore. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* STALL FE until PE is done flushing. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Event 29. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + event = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))); + event = ((((gctUINT32) (event)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) ((gctUINT32) (29) & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))); + + *buffer++ + = event; + + /* Append END. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + } + + if (Bytes != gcvNULL) + { + *Bytes = bytesAligned; + } + + if (WaitLinkOffset != gcvNULL) + { + *WaitLinkOffset = bytes - 4 * 4; + } + + if (WaitLinkBytes != gcvNULL) + { + *WaitLinkBytes = 4 * 4; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} +#endif + +/******************************************************************************* +** +** gckHARDWARE_BuildVirtualAddress +** +** Build a virtual address. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** gctUINT32 Index +** Index into page table. +** +** gctUINT32 Offset +** Offset into page. +** +** OUTPUT: +** +** gctUINT32 * Address +** Pointer to a variable receiving te hardware address. +*/ +gceSTATUS +gckHARDWARE_BuildVirtualAddress( + IN gckHARDWARE Hardware, + IN gctUINT32 Index, + IN gctUINT32 Offset, + OUT gctUINT32 * Address + ) +{ + gcmkHEADER_ARG("Hardware=0x%x Index=%u Offset=%u", Hardware, Index, Offset); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + + /* Build virtual address. */ + *Address = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:31) - (0 ? + 31:31) + 1))))))) << (0 ? + 31:31))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 30:0) - (0 ? + 30:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 30:0) - (0 ? + 30:0) + 1))))))) << (0 ? + 30:0))) | (((gctUINT32) ((gctUINT32) (Offset | (Index << 12)) & ((gctUINT32) ((((1 ? + 30:0) - (0 ? + 30:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))); + + /* Success. */ + gcmkFOOTER_ARG("*Address=0x%08x", *Address); + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_GetIdle( + IN gckHARDWARE Hardware, + IN gctBOOL Wait, + OUT gctUINT32 * Data + ) +{ + gceSTATUS status; + gctUINT32 idle = 0; + gctINT retry, poll, pollCount; + gctUINT32 address; + + gcmkHEADER_ARG("Hardware=0x%x Wait=%d", Hardware, Wait); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Data != gcvNULL); + + + /* If we have to wait, try 100 polls per millisecond. */ + pollCount = Wait ? 100 : 1; + + /* At most, try for 1 second. */ + for (retry = 0; retry < 1000; ++retry) + { + /* If we have to wait, try 100 polls per millisecond. */ + for (poll = pollCount; poll > 0; --poll) + { + /* Read register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle)); + + /* Read the current FE address. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00664, + &address)); + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00664, + &address)); + + /* See if we have to wait for FE idle. */ + if (_IsGPUIdle(idle) + && (address == Hardware->lastEnd + 8) + ) + { + /* FE is idle. */ + break; + } + } + + /* Check if we need to wait for FE and FE is busy. */ + if (Wait && !_IsGPUIdle(idle)) + { + /* Wait a little. */ + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "%s: Waiting for idle: 0x%08X", + __FUNCTION__, idle); + + gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1)); + } + else + { + break; + } + } + + /* Return idle to caller. */ + *Data = idle; + +#if defined(EMULATOR) + /* Wait a little while until CModel FE gets END. + * END is supposed to be appended by caller. + */ + gckOS_Delay(Hardware->os, 100); +#endif + + /* Success. */ + gcmkFOOTER_ARG("*Data=0x%08x", *Data); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/* Flush the caches. */ +gceSTATUS +gckHARDWARE_Flush( + IN gckHARDWARE Hardware, + IN gceKERNEL_FLUSH Flush, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ) +{ + gctUINT32 pipe; + gctUINT32 flush = 0; + gctUINT32 flushVST = 0; + gctBOOL flushTileStatus; + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + gceSTATUS status; + gctBOOL halti5; + gctBOOL flushICache; + gctBOOL flushTXDescCache; + gctBOOL flushTFB; + gctBOOL hwTFB; + gctBOOL blt; + gctBOOL peTSFlush; + + gcmkHEADER_ARG("Hardware=0x%x Flush=0x%x Logical=0x%x *Bytes=%lu", + Hardware, Flush, Logical, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + /* Get current pipe. */ + pipe = Hardware->kernel->command->pipeSelect; + + halti5 = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI5); + + hwTFB = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HW_TFB); + + blt = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_BLT_ENGINE); + + peTSFlush = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PE_TILE_CACHE_FLUSH_FIX); + + /* Flush tile status cache. */ + flushTileStatus = Flush & gcvFLUSH_TILE_STATUS; + + /* Flush Icache for halti5 hardware as we dont do it when program or context switches*/ + flushICache = (Flush & gcvFLUSH_ICACHE) && halti5; + + /* Flush texture descriptor cache */ + flushTXDescCache = Flush & gcvFLUSH_TXDESC; + + /* Flush USC cache for TFB client */ + flushTFB = (Flush & gcvFLUSH_TFBHEADER) && hwTFB; + + /* Flush TFB for vertex buffer */ + if (hwTFB && (Flush & gcvFLUSH_VERTEX)) + { + flushTFB = gcvTRUE; + } + + /* Flush 3D color cache. */ + if ((Flush & gcvFLUSH_COLOR) && (pipe == 0x0)) + { + flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))); + } + + /* Flush 3D depth cache. */ + if ((Flush & gcvFLUSH_DEPTH) && (pipe == 0x0)) + { + flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + + /* Flush 3D texture cache. */ + if ((Flush & gcvFLUSH_TEXTURE) && (pipe == 0x0)) + { + flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))); + flushVST = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + } + + /* Flush 2D cache. */ + if ((Flush & gcvFLUSH_2D) && (pipe == 0x1)) + { + flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))); + } + + /* Flush L2 cache. */ + if ((Flush & gcvFLUSH_L2) && (pipe == 0x0)) + { + flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))); + } + + /* Vertex buffer and texture could be touched by SHL1 for SSBO and image load/store */ + if ((Flush & (gcvFLUSH_VERTEX | gcvFLUSH_TEXTURE)) && (pipe == 0x0)) + { + flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 10:10) - (0 ? + 10:10) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 10:10) - (0 ? + 10:10) + 1))))))) << (0 ? + 10:10))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 10:10) - (0 ? + 10:10) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 10:10) - (0 ? 10:10) + 1))))))) << (0 ? 10:10))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 11:11) - (0 ? + 11:11) + 1))))))) << (0 ? + 11:11))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))); + } + + /* See if there is a valid flush. */ + if ((flush == 0) && + (flushTileStatus == gcvFALSE) && + (flushICache == gcvFALSE) && + (flushTXDescCache == gcvFALSE) && + (flushTFB == gcvFALSE)) + { + if (Bytes != gcvNULL) + { + /* No bytes required. */ + *Bytes = 0; + } + } + else + { + gctUINT32 reserveBytes = 0; + gctBOOL txCacheFix = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_TEX_CACHE_FLUSH_FIX) + ? gcvTRUE : gcvFALSE; + + /* Determine reserve bytes. */ + if (!txCacheFix || flushICache || flushTXDescCache) + { + /* Semaphore/Stall */ + reserveBytes += blt ? (8 * gcmSIZEOF(gctUINT32)) : (4 * gcmSIZEOF(gctUINT32)); + } + + if (flush) + { + reserveBytes += 2 * gcmSIZEOF(gctUINT32); + } + + if (flushVST) + { + reserveBytes += 2 * gcmSIZEOF(gctUINT32); + } + + if (flushTileStatus) + { + reserveBytes += (!peTSFlush && blt) ? 6 * gcmSIZEOF(gctUINT32) : 2 * gcmSIZEOF(gctUINT32); + } + + if (flushICache) + { + reserveBytes += 2 * gcmSIZEOF(gctUINT32); + } + + if (flushTXDescCache) + { + reserveBytes += 2 * gcmSIZEOF(gctUINT32); + } + + if (flushTFB) + { + reserveBytes += 2 * gcmSIZEOF(gctUINT32); + } + + /* Semaphore/Stall */ + reserveBytes += blt ? (8 * gcmSIZEOF(gctUINT32)) : (4 * gcmSIZEOF(gctUINT32)); + + /* Copy to command queue. */ + if (Logical != gcvNULL) + { + if (*Bytes < reserveBytes) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + if (!txCacheFix || flushICache || flushTXDescCache) + { + if (blt) + { + /* Semaphore from FE to BLT. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to BLT. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + /* Semaphore. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + } + + if (flush) + { + /* Append LOAD_STATE to AQFlush. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ = flush; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: FLUSH 0x%x", logical - 1, flush); + } + + if (flushVST) + { + /* Append LOAD_STATE to AQFlush. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ = flushVST; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: FLUSH 0x%x", logical - 1, flush); + } + + if (flushTileStatus) + { + if (!peTSFlush && blt) + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502B) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: FLUSH TILE STATUS 0x%x", logical - 1, logical[-1]); + } + + if (flushICache) + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x022C) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: FLUSH Icache 0x%x", logical - 1, logical[-1]); + + } + + if (flushTXDescCache) + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x5311) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:28) - (0 ? + 31:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:28) - (0 ? + 31:28) + 1))))))) << (0 ? + 31:28))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 31:28) - (0 ? + 31:28) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: FLUSH Icache 0x%x", logical - 1, logical[-1]); + + } + + if (flushTFB) + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x7003) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = 0x12345678; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "0x%x: FLUSH TFB cache 0x%x", logical - 1, logical[-1]); + + } + + if (blt) + { + /* Semaphore from FE to BLT. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall from FE to BLT. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x10 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x502E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + } + else + { + /* Semaphore. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + + /* Stall. */ + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); + } + } + + if (Bytes != gcvNULL) + { + /* bytes required. */ + *Bytes = reserveBytes; + } + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_SetFastClear( + IN gckHARDWARE Hardware, + IN gctINT Enable, + IN gctINT Compression + ) +{ +#if gcdENABLE_3D + gctUINT32 debug; + gceSTATUS status; + gceCOMPRESSION_OPTION compression = (Compression == -1) ? gcvCOMPRESSION_OPTION_DEFAULT : (gceCOMPRESSION_OPTION)Compression; + + gcmkHEADER_ARG("Hardware=0x%x Enable=%d Compression=%d", + Hardware, Enable, Compression); + + /* Only process if fast clear is available. */ + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_FAST_CLEAR)) + { + if (Enable == -1) + { + /* Determine automatic value for fast clear. */ + Enable = ((Hardware->identity.chipModel != gcv500) + || (Hardware->identity.chipRevision >= 3) + ) ? 1 : 0; + } + + if (compression == gcvCOMPRESSION_OPTION_DEFAULT) + { + /* Determine automatic value for compression. */ + if (Enable) + { + if (gcvSTATUS_FALSE == gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_ZCOMPRESSION)) + { + compression &= ~gcvCOMPRESSION_OPTION_DEPTH; + } + } + else + { + compression = gcvCOMPRESSION_OPTION_NONE; + } + } + + /* Read AQMemoryDebug register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &debug)); + + /* Set fast clear bypass. */ + debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 20:20) - (0 ? + 20:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 20:20) - (0 ? + 20:20) + 1))))))) << (0 ? + 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? + 20:20) - (0 ? + 20:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))); + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_BUG_FIXES7) || + (Hardware->identity.chipModel >= gcv4000)) + { + /* Set compression bypass. */ + debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 21:21) - (0 ? + 21:21) + 1))))))) << (0 ? + 21:21))) | (((gctUINT32) ((gctUINT32) ((gcvCOMPRESSION_OPTION_NONE == compression) ? + 1 : 0) & ((gctUINT32) ((((1 ? + 21:21) - (0 ? + 21:21) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))); + } + + /* Write back AQMemoryDebug register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00414, + debug)); + + /* Store fast clear and comprersison flags. */ + Hardware->options.allowFastClear = Enable; + Hardware->options.allowCompression = compression; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "FastClear=%d Compression=%d", Enable, Compression); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +#else + return gcvSTATUS_OK; +#endif +} + +typedef enum +{ + gcvPOWER_FLAG_INITIALIZE = 1 << 0, + gcvPOWER_FLAG_STALL = 1 << 1, + gcvPOWER_FLAG_STOP = 1 << 2, + gcvPOWER_FLAG_START = 1 << 3, + gcvPOWER_FLAG_RELEASE = 1 << 4, + gcvPOWER_FLAG_DELAY = 1 << 5, + gcvPOWER_FLAG_SAVE = 1 << 6, + gcvPOWER_FLAG_ACQUIRE = 1 << 7, + gcvPOWER_FLAG_POWER_OFF = 1 << 8, + gcvPOWER_FLAG_CLOCK_OFF = 1 << 9, + gcvPOWER_FLAG_CLOCK_ON = 1 << 10, +} +gcePOWER_FLAGS; + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +static gctCONST_STRING +_PowerEnum(gceCHIPPOWERSTATE State) +{ + const gctCONST_STRING states[] = + { + gcmSTRING(gcvPOWER_ON), + gcmSTRING(gcvPOWER_OFF), + gcmSTRING(gcvPOWER_IDLE), + gcmSTRING(gcvPOWER_SUSPEND), + gcmSTRING(gcvPOWER_IDLE_BROADCAST), + gcmSTRING(gcvPOWER_SUSPEND_BROADCAST), + gcmSTRING(gcvPOWER_OFF_BROADCAST), + gcmSTRING(gcvPOWER_OFF_TIMEOUT), + gcmSTRING(gcvPOWER_ON_AUTO) + }; + + if ((State >= gcvPOWER_ON) && (State <= gcvPOWER_ON_AUTO)) + { + return states[State - gcvPOWER_ON]; + } + + return "unknown"; +} +#endif + +/******************************************************************************* +** +** gckHARDWARE_SetPowerManagementState +** +** Set GPU to a specified power state. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** gceCHIPPOWERSTATE State +** Power State. +** +*/ +gceSTATUS +gckHARDWARE_SetPowerManagementState( + IN gckHARDWARE Hardware, + IN gceCHIPPOWERSTATE State + ) +{ + gceSTATUS status; + gckCOMMAND command = gcvNULL; + gckOS os; + gctUINT flag, clock; + gctBOOL acquired = gcvFALSE; + gctBOOL mutexAcquired = gcvFALSE; + gctBOOL broadcast = gcvFALSE; +#if gcdPOWEROFF_TIMEOUT + gctBOOL timeout = gcvFALSE; + gctBOOL isAfter = gcvFALSE; + gctUINT32 currentTime; +#endif + gctUINT32 process, thread; + gctBOOL commandStarted = gcvFALSE; + +#if gcdENABLE_PROFILING + gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime, + initTime, offTime, startTime, totalTime; +#endif + gctBOOL global = gcvFALSE; + gctBOOL globalAcquired = gcvFALSE; + + /* State transition flags. */ + static const gctUINT flags[4][4] = + { + /* gcvPOWER_ON */ + { /* ON */ 0, + /* OFF */ gcvPOWER_FLAG_ACQUIRE | + gcvPOWER_FLAG_STALL | + gcvPOWER_FLAG_STOP | + gcvPOWER_FLAG_POWER_OFF | + gcvPOWER_FLAG_CLOCK_OFF, + /* IDLE */ gcvPOWER_FLAG_ACQUIRE | + gcvPOWER_FLAG_STALL, + /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE | + gcvPOWER_FLAG_STALL | + gcvPOWER_FLAG_STOP | + gcvPOWER_FLAG_CLOCK_OFF, + }, + + /* gcvPOWER_OFF */ + { /* ON */ gcvPOWER_FLAG_INITIALIZE | + gcvPOWER_FLAG_START | + gcvPOWER_FLAG_RELEASE | + gcvPOWER_FLAG_DELAY, + /* OFF */ 0, + /* IDLE */ gcvPOWER_FLAG_INITIALIZE | + gcvPOWER_FLAG_START | + gcvPOWER_FLAG_DELAY, + /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE | + gcvPOWER_FLAG_CLOCK_OFF, + }, + + /* gcvPOWER_IDLE */ + { /* ON */ gcvPOWER_FLAG_RELEASE, + /* OFF */ gcvPOWER_FLAG_STOP | + gcvPOWER_FLAG_POWER_OFF | + gcvPOWER_FLAG_CLOCK_OFF, + /* IDLE */ 0, + /* SUSPEND */ gcvPOWER_FLAG_STOP | + gcvPOWER_FLAG_CLOCK_OFF, + }, + + /* gcvPOWER_SUSPEND */ + { /* ON */ gcvPOWER_FLAG_START | + gcvPOWER_FLAG_RELEASE | + gcvPOWER_FLAG_DELAY | + gcvPOWER_FLAG_CLOCK_ON, + /* OFF */ gcvPOWER_FLAG_SAVE | + gcvPOWER_FLAG_POWER_OFF | + gcvPOWER_FLAG_CLOCK_OFF, + /* IDLE */ gcvPOWER_FLAG_START | + gcvPOWER_FLAG_DELAY | + gcvPOWER_FLAG_CLOCK_ON, + /* SUSPEND */ 0, + }, + }; + + /* Clocks. */ + static const gctUINT clocks[4] = + { + /* gcvPOWER_ON */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:2) - (0 ? + 8:2) + 1))))))) << (0 ? + 8:2))) | (((gctUINT32) ((gctUINT32) (64) & ((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))), + + /* gcvPOWER_OFF */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:2) - (0 ? + 8:2) + 1))))))) << (0 ? + 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))), + + /* gcvPOWER_IDLE */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:2) - (0 ? + 8:2) + 1))))))) << (0 ? + 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))), + + /* gcvPOWER_SUSPEND */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:2) - (0 ? + 8:2) + 1))))))) << (0 ? + 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))), + }; + + gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State); +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Switching to power state %d(%s)", + State, _PowerEnum(State)); +#endif + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + /* Get the gckOS object pointer. */ + os = Hardware->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + /* Get the gckCOMMAND object pointer. */ + gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL); + command = Hardware->kernel->command; + gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND); + + /* Start profiler. */ + gcmkPROFILE_INIT(freq, time); + + + /* Convert the broadcast power state. */ + switch (State) + { + case gcvPOWER_IDLE_BROADCAST: + /* Convert to IDLE and note we are inside broadcast. */ + State = gcvPOWER_IDLE; + broadcast = gcvTRUE; + break; + + case gcvPOWER_SUSPEND_BROADCAST: + /* Convert to SUSPEND and note we are inside broadcast. */ + State = gcvPOWER_SUSPEND; + broadcast = gcvTRUE; + break; + + case gcvPOWER_OFF_BROADCAST: + /* Convert to OFF and note we are inside broadcast. */ + State = gcvPOWER_OFF; + broadcast = gcvTRUE; + break; + + case gcvPOWER_ON_AUTO: + /* Convert to ON and note we are inside recovery. */ + State = gcvPOWER_ON; + break; + + case gcvPOWER_ON: + case gcvPOWER_IDLE: + case gcvPOWER_SUSPEND: + case gcvPOWER_OFF: + /* Mark as global power management. */ + global = gcvTRUE; + break; + +#if gcdPOWEROFF_TIMEOUT + case gcvPOWER_OFF_TIMEOUT: + /* Convert to OFF and note we are inside broadcast. */ + State = gcvPOWER_OFF; + broadcast = gcvTRUE; + /* Check time out */ + timeout = gcvTRUE; + break; +#endif + + default: + break; + } + + if (Hardware->options.powerManagement == gcvFALSE + && State != gcvPOWER_ON + ) + { + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + /* Get current process and thread IDs. */ + gcmkONERROR(gckOS_GetProcessID(&process)); + gcmkONERROR(gckOS_GetThreadID(&thread)); + + if (broadcast) + { + /* Try to acquire the power mutex. */ + status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0); + + if (gcmIS_SUCCESS(status)) + { + mutexAcquired = gcvTRUE; + } + else if (status == gcvSTATUS_TIMEOUT) + { + /* Check if we already own this mutex. */ + if ((Hardware->powerProcess == process) + && (Hardware->powerThread == thread) + ) + { + /* Bail out on recursive power management. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + else if (State != gcvPOWER_ON) + { + /* Called from IST, + ** so waiting here will cause deadlock, + ** if lock holder call gckCOMMAND_Stall() */ + status = gcvSTATUS_OK; + goto OnError; + } + } + } + + if (!mutexAcquired) + { + /* Acquire the power mutex. */ + gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE)); + mutexAcquired = gcvTRUE; + } + + /* Get time until mtuex acquired. */ + gcmkPROFILE_QUERY(time, mutexTime); + + Hardware->powerProcess = process; + Hardware->powerThread = thread; + mutexAcquired = gcvTRUE; + + /* Grab control flags and clock. */ + flag = flags[Hardware->chipPowerState][State]; + clock = clocks[State]; + +#if gcdENABLE_FSCALE_VAL_ADJUST + if (State == gcvPOWER_ON) + { + clock = ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:2) - (0 ? + 8:2) + 1))))))) << (0 ? + 8:2))) | (((gctUINT32) ((gctUINT32) (Hardware->powerOnFscaleVal) & ((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))); + } +#endif + + if (State == gcvPOWER_SUSPEND && Hardware->chipPowerState == gcvPOWER_OFF && broadcast) + { +#if gcdPOWER_SUSPEND_WHEN_IDLE + /* Do nothing */ + + /* Release the power mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +#else + /* Clock should be on when switch power from off to suspend */ + clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:2) - (0 ? + 8:2) + 1))))))) << (0 ? + 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) ; +#endif + } + +#if gcdPOWEROFF_TIMEOUT + if (timeout) + { + gcmkONERROR(gckOS_GetTicks(¤tTime)); + + gcmkONERROR( + gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter)); + + /* powerOffTime is pushed forward, give up.*/ + if (isAfter + /* Expect a transition start from IDLE or SUSPEND. */ + || (Hardware->chipPowerState == gcvPOWER_ON) + || (Hardware->chipPowerState == gcvPOWER_OFF) + ) + { + /* Release the power mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex)); + + /* No need to do anything. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Power Off GPU[%d] at %u [supposed to be at %u]", + Hardware->core, currentTime, Hardware->powerOffTime); + } +#endif + + if (flag == 0) + { + /* Release the power mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex)); + + /* No need to do anything. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + /* If this is an internal power management, we have to check if we can grab + ** the global power semaphore. If we cannot, we have to wait until the + ** external world changes power management. */ + if (!global) + { + /* Try to acquire the global semaphore. */ + status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore); + if (status == gcvSTATUS_TIMEOUT) + { + if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND) + { + /* Called from thread routine which should NEVER sleep.*/ + status = gcvSTATUS_OK; + goto OnError; + } + + /* Release the power mutex. */ + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Releasing the power mutex."); + gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex)); + mutexAcquired = gcvFALSE; + + /* Wait for the semaphore. */ + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Waiting for global semaphore."); + gcmkONERROR(gckOS_AcquireSemaphore(os, Hardware->globalSemaphore)); + globalAcquired = gcvTRUE; + + /* Acquire the power mutex. */ + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Reacquiring the power mutex."); + gcmkONERROR(gckOS_AcquireMutex(os, + Hardware->powerMutex, + gcvINFINITE)); + mutexAcquired = gcvTRUE; + + /* chipPowerState may be changed by external world during the time + ** we give up powerMutex, so updating flag now is necessary. */ + flag = flags[Hardware->chipPowerState][State]; + + if (flag == 0) + { + gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore)); + globalAcquired = gcvFALSE; + + gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex)); + mutexAcquired = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + } + else + { + /* Error. */ + gcmkONERROR(status); + } + + /* Release the global semaphore again. */ + gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore)); + globalAcquired = gcvFALSE; + + /* Try to acquire the semaphore to make sure commit is not in progress + ** Otherwise, we just abort. */ + if (flag & gcvPOWER_FLAG_ACQUIRE) + { + /* ON -> Other, boardcast. */ + /* Try to acquire the power management semaphore. */ + status = gckOS_TryAcquireSemaphore(os, command->powerSemaphore); + + if (status == gcvSTATUS_OK) + { + acquired = gcvTRUE; + + /* avoid acquiring again. */ + flag &= ~gcvPOWER_FLAG_ACQUIRE; + } + else + { + /* Not ready to swith. */ + status = gcvSTATUS_CHIP_NOT_READY; + goto OnError; + } + } + } + else + { + if (State == gcvPOWER_OFF || State == gcvPOWER_SUSPEND || State == gcvPOWER_IDLE) + { + /* Acquire the global semaphore if it has not been acquired. */ + status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore); + if (status == gcvSTATUS_OK) + { + globalAcquired = gcvTRUE; + } + else if (status != gcvSTATUS_TIMEOUT) + { + /* Other errors. */ + gcmkONERROR(status); + } + /* Ignore gcvSTATUS_TIMEOUT and leave globalAcquired as gcvFALSE. + ** gcvSTATUS_TIMEOUT means global semaphore has already + ** been acquired before this operation, so even if we fail, + ** we should not release it in our error handling. It should be + ** released by the next successful global gcvPOWER_ON. */ + } + + /* Global power management can't be aborted, so sync with + ** proceeding last commit. */ + if (flag & gcvPOWER_FLAG_ACQUIRE) + { + /* Acquire the power management semaphore. */ + gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore)); + acquired = gcvTRUE; + + /* avoid acquiring again. */ + flag &= ~gcvPOWER_FLAG_ACQUIRE; + } + } + + if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON)) + { + /* Turn on the power. */ + gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE)); + + /* Mark clock and power as enabled. */ + Hardware->clockState = gcvTRUE; + Hardware->powerState = gcvTRUE; + + for (;;) + { + /* Check if GPU is present and awake. */ + status = _IsGPUPresent(Hardware); + + /* Check if the GPU is not responding. */ + if (status == gcvSTATUS_GPU_NOT_RESPONDING) + { + /* Turn off the power and clock. */ + gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvFALSE, gcvFALSE)); + + Hardware->clockState = gcvFALSE; + Hardware->powerState = gcvFALSE; + + /* Wait a little. */ + gckOS_Delay(os, 1); + + /* Turn on the power and clock. */ + gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE)); + + Hardware->clockState = gcvTRUE; + Hardware->powerState = gcvTRUE; + + /* We need to initialize the hardware and start the command + * processor. */ + flag |= gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_START; + } + else + { + /* Test for error. */ + gcmkONERROR(status); + + /* Break out of loop. */ + break; + } + } + } + + /* Get time until powered on. */ + gcmkPROFILE_QUERY(time, onTime); + + if (flag & gcvPOWER_FLAG_STALL) + { + gctBOOL idle; + gctINT32 atomValue; + + /* For global operation, all pending commits have already been + ** blocked by globalSemaphore or powerSemaphore.*/ + if (!global) + { + /* Check commit atom. */ + gcmkONERROR(gckOS_AtomGet(os, command->atomCommit, &atomValue)); + + if (atomValue > 0) + { + /* Commits are pending - abort power management. */ + status = broadcast ? gcvSTATUS_CHIP_NOT_READY + : gcvSTATUS_MORE_DATA; + goto OnError; + } + } + + if (broadcast) + { + /* Check for idle. */ + gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle)); + + if (!idle) + { + status = gcvSTATUS_CHIP_NOT_READY; + goto OnError; + } + } + + else + { + /* Wait to finish all commands. */ + gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE)); + + for (;;) + { + gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle)); + + if (idle) + { + break; + } + + gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1)); + } + } + } + + /* Get time until stalled. */ + gcmkPROFILE_QUERY(time, stallTime); + + if (flag & gcvPOWER_FLAG_ACQUIRE) + { + /* Acquire the power management semaphore. */ + gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore)); + acquired = gcvTRUE; + } + + if (flag & gcvPOWER_FLAG_STOP) + { + /* Stop the command parser. */ + gcmkONERROR(gckCOMMAND_Stop(command)); + } + + /* Flush Cache before Power Off. */ + if (flag & gcvPOWER_FLAG_POWER_OFF) + { + if (Hardware->clockState == gcvFALSE) + { + /* Turn off the GPU power. */ + gcmkONERROR( + gckOS_SetGPUPower(os, + Hardware->core, + gcvTRUE, + gcvTRUE)); + + Hardware->clockState = gcvTRUE; +#if gcdDVFS + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE) +#endif + { + /* Write the clock control register. */ + gcmkONERROR(gckOS_WriteRegisterEx(os, + Hardware->core, + 0x00000, + clocks[0])); + + /* Done loading the frequency scaler. */ + gcmkONERROR(gckOS_WriteRegisterEx(os, + Hardware->core, + 0x00000, + ((((gctUINT32) (clocks[0])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))))); + } + } + + if(_IsHardwareMatch(Hardware, gcv400, 0x4645)) + { + gcmkONERROR(gckCOMMAND_Start(command)); + + gcmkONERROR(_FlushCache(Hardware, command)); + + gckOS_Delay(gcvNULL, 1); + + /* Stop the command parser. */ + gcmkONERROR(gckCOMMAND_Stop(command)); + } + else + { + gckHARDWARE_ExecuteFunctions(Hardware, gcvHARDWARE_FUNCTION_FLUSH); + gckOS_Delay(gcvNULL, 1); + } + + flag |= gcvPOWER_FLAG_CLOCK_OFF; + } + + /* Get time until stopped. */ + gcmkPROFILE_QUERY(time, stopTime); + + /* Only process this when hardware is enabled. */ + if (Hardware->clockState && Hardware->powerState +#if gcdDVFS + /* Don't touch clock control if dynamic frequency scaling is available. */ + && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE +#endif + ) + { + if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF)) + { + if (Hardware->identity.chipModel == gcv4000 + && ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222))) + { + clock &= ~2U; + } + } + + /* Write the clock control register. */ + gcmkONERROR(gckOS_WriteRegisterEx(os, + Hardware->core, + 0x00000, + clock)); + + /* Done loading the frequency scaler. */ + gcmkONERROR(gckOS_WriteRegisterEx(os, + Hardware->core, + 0x00000, + ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))))); + } + + if (flag & gcvPOWER_FLAG_DELAY) + { + /* Wait for the specified amount of time to settle coming back from + ** power-off or suspend state. */ + gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY)); + } + + /* Get time until delayed. */ + gcmkPROFILE_QUERY(time, delayTime); + + if (flag & gcvPOWER_FLAG_INITIALIZE) + { + /* Initialize hardware. */ + gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware)); + + gcmkONERROR(gckHARDWARE_SetFastClear(Hardware, + Hardware->options.allowFastClear, + Hardware->options.allowCompression)); + + /* Force the command queue to reload the next context. */ + command->currContext = gcvNULL; + + /* Trigger a possible dummy draw. */ + command->dummyDraw = gcvTRUE; + } + + /* Get time until initialized. */ + gcmkPROFILE_QUERY(time, initTime); + + if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF)) + { + /* Turn off the GPU power. */ + gcmkONERROR( + gckOS_SetGPUPower(os, + Hardware->core, + (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE + : gcvTRUE, + (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE + : gcvTRUE)); + + /* Save current hardware power and clock states. */ + Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE + : gcvTRUE; + Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE + : gcvTRUE; + } + + /* Get time until off. */ + gcmkPROFILE_QUERY(time, offTime); + + if (flag & gcvPOWER_FLAG_START) + { + /* Start the command processor. */ + gcmkONERROR(gckCOMMAND_Start(command)); + commandStarted = gcvTRUE; + } + + /* Get time until started. */ + gcmkPROFILE_QUERY(time, startTime); + + if (flag & gcvPOWER_FLAG_RELEASE) + { + /* Release the power management semaphore. */ + gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore)); + acquired = gcvFALSE; + + if (global) + { + /* Verify global semaphore has been acquired already before + ** we release it. + ** If it was acquired, gckOS_TryAcquireSemaphore will return + ** gcvSTATUS_TIMEOUT and we release it. Otherwise, global + ** semaphore will be acquried now, but it still is released + ** immediately. */ + status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore); + if (status != gcvSTATUS_TIMEOUT) + { + gcmkONERROR(status); + } + + /* Release the global semaphore. */ + gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore)); + globalAcquired = gcvFALSE; + } + } + + gckSTATETIMER_Accumulate(&Hardware->powerStateTimer, Hardware->chipPowerState); + + /* Save the new power state. */ + Hardware->chipPowerState = State; + +#if gcdDVFS + if (State == gcvPOWER_ON && Hardware->kernel->dvfs) + { + gckDVFS_Start(Hardware->kernel->dvfs); + } +#endif + +#if gcdPOWEROFF_TIMEOUT + /* Reset power off time */ + gcmkONERROR(gckOS_GetTicks(¤tTime)); + + Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout; + + if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND) + { + /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */ + gcmkVERIFY_OK(gckOS_StartTimer(os, + Hardware->powerOffTimer, + Hardware->powerOffTimeout)); + } + else + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer"); + + /* Cancel running timer when GPU enters ON or OFF. */ + gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer)); + } +#endif + + /* Release the power mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex)); + + /* Get total time. */ + gcmkPROFILE_QUERY(time, totalTime); +#if gcdENABLE_PROFILING + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu", + freq, mutexTime, onTime, stallTime, stopTime); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + " delay:%llu init:%llu off:%llu start:%llu total:%llu", + delayTime, initTime, offTime, startTime, totalTime); +#endif + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (commandStarted) + { + gcmkVERIFY_OK(gckCOMMAND_Stop(command)); + } + + if (acquired) + { + /* Release semaphore. */ + gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os, + command->powerSemaphore)); + } + + if (globalAcquired) + { + gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os, + Hardware->globalSemaphore)); + } + + if (mutexAcquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_QueryPowerManagementState +** +** Get GPU power state. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** gceCHIPPOWERSTATE* State +** Power State. +** +*/ +gceSTATUS +gckHARDWARE_QueryPowerManagementState( + IN gckHARDWARE Hardware, + OUT gceCHIPPOWERSTATE* State + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(State != gcvNULL); + + /* Return the statue. */ + *State = Hardware->chipPowerState; + + /* Success. */ + gcmkFOOTER_ARG("*State=%d", *State); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_SetPowerManagement +** +** Configure GPU power management function. +** Only used in driver initialization stage. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** gctBOOL PowerManagement +** Power Mangement State. +** +*/ +gceSTATUS +gckHARDWARE_SetPowerManagement( + IN gckHARDWARE Hardware, + IN gctBOOL PowerManagement + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + gcmkVERIFY_OK( + gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE)); + + Hardware->options.powerManagement = PowerManagement; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_SetGpuProfiler +** +** Configure GPU profiler function. +** Only used in driver initialization stage. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** gctBOOL GpuProfiler +** GOU Profiler State. +** +*/ +gceSTATUS +gckHARDWARE_SetGpuProfiler( + IN gckHARDWARE Hardware, + IN gctBOOL GpuProfiler + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + if (GpuProfiler == gcvTRUE) + { + gctUINT32 data = 0; + + /* Need to disable clock gating when doing profiling. */ + gcmkVERIFY_OK( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + &data)); + + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + + gcmkVERIFY_OK( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00100, + data)); + } + + Hardware->options.gpuProfiler= GpuProfiler; + + if (GpuProfiler == gcvTRUE) + { + Hardware->waitCount = 200 * 100; + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +#if gcdENABLE_FSCALE_VAL_ADJUST +gceSTATUS +gckHARDWARE_SetFscaleValue( + IN gckHARDWARE Hardware, + IN gctUINT32 FscaleValue + ) +{ + gceSTATUS status; + gctUINT32 clock; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Hardware=0x%x FscaleValue=%d", Hardware, FscaleValue); + + gcmkVERIFY_ARGUMENT(FscaleValue > 0 && FscaleValue <= 64); + + gcmkONERROR( + gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE)); + acquired = gcvTRUE; + + Hardware->powerOnFscaleVal = FscaleValue; + + if (Hardware->chipPowerState == gcvPOWER_ON) + { + gctUINT32 data; + + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + &data)); + + /* Disable all clock gating. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:6) - (0 ? + 6:6) + 1))))))) << (0 ? + 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 6:6) - (0 ? + 6:6) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:8) - (0 ? + 8:8) + 1))))))) << (0 ? + 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 11:11) - (0 ? + 11:11) + 1))))))) << (0 ? + 11:11))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))))); + + clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:2) - (0 ? + 8:2) + 1))))))) << (0 ? + 8:2))) | (((gctUINT32) ((gctUINT32) (FscaleValue) & ((gctUINT32) ((((1 ? + 8:2) - (0 ? + 8:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + clock)); + + /* Done loading the frequency scaler. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))))); + + /* Restore all clock gating. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + Hardware->powerBaseAddress + + 0x00104, + data)); + } + + gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_GetFscaleValue( + IN gckHARDWARE Hardware, + IN gctUINT * FscaleValue, + IN gctUINT * MinFscaleValue, + IN gctUINT * MaxFscaleValue + ) +{ + *FscaleValue = Hardware->powerOnFscaleVal; + *MinFscaleValue = Hardware->minFscaleValue; + *MaxFscaleValue = 64; + + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_SetMinFscaleValue( + IN gckHARDWARE Hardware, + IN gctUINT MinFscaleValue + ) +{ + if (MinFscaleValue >= 1 && MinFscaleValue <= 64) + { + Hardware->minFscaleValue = MinFscaleValue; + } + + return gcvSTATUS_OK; +} +#endif + +#if gcdPOWEROFF_TIMEOUT +gceSTATUS +gckHARDWARE_SetPowerOffTimeout( + IN gckHARDWARE Hardware, + IN gctUINT32 Timeout +) +{ + gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout); + + Hardware->powerOffTimeout = Timeout; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + + +gceSTATUS +gckHARDWARE_QueryPowerOffTimeout( + IN gckHARDWARE Hardware, + OUT gctUINT32* Timeout +) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + *Timeout = Hardware->powerOffTimeout; + + gcmkFOOTER_ARG("*Timeout=%d", *Timeout); + return gcvSTATUS_OK; +} +#endif + +gceSTATUS +gckHARDWARE_QueryIdle( + IN gckHARDWARE Hardware, + OUT gctBOOL_PTR IsIdle + ) +{ + gceSTATUS status; + gctUINT32 idle; +#if !gcdSECURITY + gctUINT32 address; +#endif + gctBOOL isIdle = gcvFALSE; + +#if gcdINTERRUPT_STATISTIC + gckEVENT eventObj = Hardware->kernel->eventObj; + gctINT32 pendingInterrupt; +#endif + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL); + + do + { + /* We are idle when the power is not ON. */ + if (Hardware->chipPowerState != gcvPOWER_ON) + { + isIdle = gcvTRUE; + break; + } + + /* Read idle register. */ + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle)); + + /* Pipe must be idle. */ + if ((idle | (1 << 14)) != 0x7ffffffe) + { + /* Something is busy. */ + break; + } + +#if gcdSECURITY + isIdle = gcvTRUE; + break; +#else + /* Read the current FE address. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00664, + &address)); + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00664, + &address)); + + /* Test if address is inside the last WAIT/LINK sequence. */ + if ((address < Hardware->lastWaitLink) || + (address >= (gctUINT64)Hardware->lastWaitLink + 16)) + { + /* FE is not in WAIT/LINK yet. */ + break; + } +#endif + +#if gcdINTERRUPT_STATISTIC + gcmkONERROR(gckOS_AtomGet( + Hardware->os, + eventObj->interruptCount, + &pendingInterrupt + )); + + if (pendingInterrupt) + { + /* Pending interrupts, not idle. */ + break; + } + + if (Hardware->hasAsyncFe) + { + gckEVENT asyncEvent = Hardware->kernel->asyncEvent; + + gcmkONERROR(gckOS_AtomGet( + Hardware->os, + asyncEvent->interruptCount, + &pendingInterrupt + )); + + if (pendingInterrupt) + { + /* Pending async FE interrupts, not idle. */ + break; + } + } +#endif + + /* Is really idle. */ + isIdle = gcvTRUE; + } + while (gcvFALSE); + + *IsIdle = isIdle; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** Handy macros that will help in reading those debug registers. +*/ +#define gcmkREAD_DEBUG_REGISTER_PART1(control, block, index, data) \ + gcmkONERROR(\ + gckOS_WriteRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_CONTROL##control##_Address, \ + gcmSETFIELD(0, \ + GC_DEBUG_CONTROL##control, \ + block, \ + index))); \ + gcmkONERROR(\ + gckOS_ReadRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_SIGNALS_##block##_Address, \ + &profiler_part1->data)) + +#define gcmkREAD_DEBUG_REGISTER_PART2(control, block, index, data) \ + gcmkONERROR(\ + gckOS_WriteRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_CONTROL##control##_Address, \ + gcmSETFIELD(0, \ + GC_DEBUG_CONTROL##control, \ + block, \ + index))); \ + gcmkONERROR(\ + gckOS_ReadRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_SIGNALS_##block##_Address, \ + &profiler_part2->data)) + +#define gcmkREAD_DEBUG_REGISTER_N(control, block, index, data) \ + gcmkONERROR(\ + gckOS_WriteRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_CONTROL##control##_Address, \ + gcmSETFIELD(0, \ + GC_DEBUG_CONTROL##control, \ + block, \ + index))); \ + gcmkONERROR(\ + gckOS_ReadRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_SIGNALS_##block##_Address, \ + &data)) + +#define gcmkRESET_DEBUG_REGISTER(control, block, value) \ + gcmkONERROR(\ + gckOS_WriteRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_CONTROL##control##_Address, \ + gcmSETFIELD(0, \ + GC_DEBUG_CONTROL##control, \ + block, \ + value))); \ + gcmkONERROR(\ + gckOS_WriteRegisterEx(Hardware->os, \ + Hardware->core, \ + GC_DEBUG_CONTROL##control##_Address, \ + gcmSETFIELD(0, \ + GC_DEBUG_CONTROL##control, \ + block, \ + 0))) + +static gctUINT32 +CalcDelta( + IN gctUINT32 new, + IN gctUINT32 old + ) +{ + if (new >= old) + { + return new - old; + } + else + { + return (gctUINT32)((gctUINT64)new + 0x100000000ll - old); + } +} + + +#if USE_SW_RESET +#define gcmkRESET_PROFILE_DATA_PART1(counterName) \ + temp = profiler_part1->counterName; \ + profiler_part1->counterName = CalcDelta(temp, Context->preProfiler_part1.counterName); \ + Context->preProfiler_part1.counterName = temp +#endif + +#define gcmkUPDATE_PROFILE_DATA_PART1(data) \ + profilerHistroy_part1->data += profiler_part1->data +#define gcmkUPDATE_PROFILE_DATA_PART2(data) \ + profilerHistroy_part2->data += profiler_part2->data + +gceSTATUS +gckHARDWARE_QueryContextProfile( + IN gckHARDWARE Hardware, + IN gctBOOL Reset, + IN gckCONTEXT Context, + OUT gcsPROFILER_COUNTERS_PART1 * Counters_part1, + OUT gcsPROFILER_COUNTERS_PART2 * Counters_part2 +) +{ + gceSTATUS status; + gckCOMMAND command = Hardware->kernel->command; + gcsPROFILER_COUNTERS_PART1 * profiler_part1 = Counters_part1; + gcsPROFILER_COUNTERS_PART2 * profiler_part2 = Counters_part2; + + gcmkHEADER_ARG("Hardware=0x%x Counters_part1=0x%x, Counters_part2=0x%x", Hardware, Counters_part1, Counters_part2); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + if (!Context) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + /* Acquire the context sequnence mutex. */ + gcmkONERROR(gckOS_AcquireMutex( + command->os, command->mutexContextSeq, gcvINFINITE + )); + + /* Read the counters. */ + if (Counters_part1) + { + gcmkVERIFY_OK(gckOS_MemCopy( + profiler_part1, &Context->histroyProfiler_part1, gcmSIZEOF(gcsPROFILER_COUNTERS_PART1) + )); + } + else if (Counters_part2) + { + gcmkVERIFY_OK(gckOS_MemCopy( + profiler_part2, &Context->histroyProfiler_part2, gcmSIZEOF(gcsPROFILER_COUNTERS_PART2) + )); + } + + /* Reset counters. */ + if (Reset) + { + if (Counters_part1) + { + gcmkVERIFY_OK(gckOS_ZeroMemory( + &Context->histroyProfiler_part1, gcmSIZEOF(gcsPROFILER_COUNTERS_PART1) + )); + } + else if (Counters_part2) + { + gcmkVERIFY_OK(gckOS_ZeroMemory( + &Context->histroyProfiler_part2, gcmSIZEOF(gcsPROFILER_COUNTERS_PART2) + )); + } + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex( + command->os, command->mutexContextSeq + )); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_UpdateContextProfile( + IN gckHARDWARE Hardware, + IN gckCONTEXT Context +) +{ + gceSTATUS status; + gcsPROFILER_COUNTERS_PART1 * profiler_part1 = &Context->latestProfiler_part1; + gcsPROFILER_COUNTERS_PART1 * profilerHistroy_part1 = &Context->histroyProfiler_part1; + gcsPROFILER_COUNTERS_PART2 * profiler_part2 = &Context->latestProfiler_part2; + gcsPROFILER_COUNTERS_PART2 * profilerHistroy_part2 = &Context->histroyProfiler_part2; + gceCHIPMODEL chipModel; + gctUINT32 chipRevision; + gctUINT32 i; + gctUINT32 resetValue = 0xF; + gctBOOL hasNewCounters = gcvFALSE; + gctUINT32 clock; + gctUINT32 colorKilled = 0, colorDrawn = 0, depthKilled = 0, depthDrawn = 0; + gctUINT32 totalRead, totalWrite; + gctUINT32 mc_axi_max_min_latency; + gctUINT32 temp; + gckCOMMAND command = Hardware->kernel->command; + gctBOOL mutexAcquired = gcvFALSE; + + gcmkHEADER_ARG("Hardware=0x%x Context=0x%x", Hardware, Context); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT); + + /* Acquire the context sequnence mutex. */ + gcmkONERROR(gckOS_AcquireMutex( + command->os, command->mutexContextSeq, gcvINFINITE + )); + mutexAcquired = gcvTRUE; + + chipModel = Hardware->identity.chipModel; + chipRevision = Hardware->identity.chipRevision; + if ((chipModel == gcv5000 && chipRevision == 0x5434) || (chipModel == gcv3000 && chipRevision == 0x5435)) + { + resetValue = 0xFF; + hasNewCounters = gcvTRUE; + } + + if (chipModel == gcv2100 || chipModel == gcv2000 || chipModel == gcv880) + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00438, + &profiler_part2->hi_total_cycle_count)); + + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00078, + &profiler_part2->hi_total_idle_cycle_count)); + } + else + { + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00078, + &profiler_part2->hi_total_cycle_count)); + + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x0007C, + &profiler_part2->hi_total_idle_cycle_count)); + } + gcmkUPDATE_PROFILE_DATA_PART2(hi_total_cycle_count); + gcmkUPDATE_PROFILE_DATA_PART2(hi_total_idle_cycle_count); + + /* Read clock control register. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + &clock)); + + profiler_part2->hi_total_read_8B_count = 0; + profiler_part2->hi_total_write_8B_count = 0; + profiler_part1->pe0_pixel_count_drawn_by_color_pipe = 0; + profiler_part1->pe0_pixel_count_drawn_by_depth_pipe = 0; + profiler_part1->pe0_pixel_count_killed_by_color_pipe = 0; + profiler_part1->pe0_pixel_count_killed_by_depth_pipe = 0; + profiler_part1->pe1_pixel_count_drawn_by_color_pipe = 0; + profiler_part1->pe1_pixel_count_drawn_by_depth_pipe = 0; + profiler_part1->pe1_pixel_count_killed_by_color_pipe = 0; + profiler_part1->pe1_pixel_count_killed_by_depth_pipe = 0; + + /* Walk through all avaiable pixel pipes. */ + for (i = 0; i < Hardware->identity.pixelPipes; ++i) + { + /* Select proper pipe. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:20) - (0 ? + 23:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:20) - (0 ? + 23:20) + 1))))))) << (0 ? + 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? + 23:20) - (0 ? + 23:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))))); + + /* BW */ + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00040, + &totalRead)); + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00044, + &totalWrite)); + + profiler_part2->hi_total_read_8B_count += totalRead; + profiler_part2->hi_total_write_8B_count += totalWrite; + + /* PE */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn)); + + + if (i == 0) + { + profiler_part1->pe0_pixel_count_killed_by_color_pipe = colorKilled; + profiler_part1->pe0_pixel_count_killed_by_depth_pipe = depthKilled; + profiler_part1->pe0_pixel_count_drawn_by_color_pipe = colorDrawn; + profiler_part1->pe0_pixel_count_drawn_by_depth_pipe = depthDrawn; + } + else if (i == 1) + { + profiler_part1->pe1_pixel_count_killed_by_color_pipe = colorKilled; + profiler_part1->pe1_pixel_count_killed_by_depth_pipe = depthKilled; + profiler_part1->pe1_pixel_count_drawn_by_color_pipe = colorDrawn; + profiler_part1->pe1_pixel_count_drawn_by_depth_pipe = depthDrawn; + } + } + + gcmkUPDATE_PROFILE_DATA_PART2(hi_total_read_8B_count); + gcmkUPDATE_PROFILE_DATA_PART2(hi_total_write_8B_count); +#if USE_SW_RESET + gcmkRESET_PROFILE_DATA_PART1(pe0_pixel_count_killed_by_color_pipe); + gcmkRESET_PROFILE_DATA_PART1(pe0_pixel_count_killed_by_depth_pipe); + gcmkRESET_PROFILE_DATA_PART1(pe0_pixel_count_drawn_by_color_pipe); + gcmkRESET_PROFILE_DATA_PART1(pe0_pixel_count_drawn_by_depth_pipe); + gcmkRESET_PROFILE_DATA_PART1(pe1_pixel_count_killed_by_color_pipe); + gcmkRESET_PROFILE_DATA_PART1(pe1_pixel_count_killed_by_depth_pipe); + gcmkRESET_PROFILE_DATA_PART1(pe1_pixel_count_drawn_by_color_pipe); + gcmkRESET_PROFILE_DATA_PART1(pe1_pixel_count_drawn_by_depth_pipe); +#endif + gcmkUPDATE_PROFILE_DATA_PART1(pe0_pixel_count_killed_by_color_pipe); + gcmkUPDATE_PROFILE_DATA_PART1(pe0_pixel_count_killed_by_depth_pipe); + gcmkUPDATE_PROFILE_DATA_PART1(pe0_pixel_count_drawn_by_color_pipe); + gcmkUPDATE_PROFILE_DATA_PART1(pe0_pixel_count_drawn_by_depth_pipe); + gcmkUPDATE_PROFILE_DATA_PART1(pe1_pixel_count_killed_by_color_pipe); + gcmkUPDATE_PROFILE_DATA_PART1(pe1_pixel_count_killed_by_depth_pipe); + gcmkUPDATE_PROFILE_DATA_PART1(pe1_pixel_count_drawn_by_color_pipe); + gcmkUPDATE_PROFILE_DATA_PART1(pe1_pixel_count_drawn_by_depth_pipe); + + /* Reset clock control register. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + clock)); + + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0)); + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0)); + +#if !USE_SW_RESET + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) +)); +#endif + + /* FE */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00450, &profiler_part1->fe_draw_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00450, &profiler_part1->fe_out_vertex_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00450, &profiler_part1->fe_cache_miss_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (16) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00450, &profiler_part1->fe_cache_lk_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (17) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00450, &profiler_part1->fe_stall_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (18) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00450, &profiler_part1->fe_process_count)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) +)); + + gcmkUPDATE_PROFILE_DATA_PART1(fe_draw_count); + gcmkUPDATE_PROFILE_DATA_PART1(fe_out_vertex_count); + gcmkUPDATE_PROFILE_DATA_PART1(fe_cache_miss_count); + gcmkUPDATE_PROFILE_DATA_PART1(fe_cache_lk_count); + gcmkUPDATE_PROFILE_DATA_PART1(fe_process_count); + + /* SH */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_inst_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_rendered_pixel_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_inst_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_rendered_vertice_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_branch_inst_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_texld_inst_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_branch_inst_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_texld_inst_counter)); + if (hasNewCounters) + { + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (19) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_non_idle_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (16) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_stall_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (21) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->vs_process_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (20) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_non_idle_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (17) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (18) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_stall_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (22) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->ps_process_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->shader_cycle_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (23) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->tx_non_idle_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (24) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->tx_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (25) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->tx_stall_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (26) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler_part1->tx_process_count)); + } +#if USE_SW_RESET + gcmkRESET_PROFILE_DATA_PART1(ps_inst_counter); + gcmkRESET_PROFILE_DATA_PART1(ps_rendered_pixel_counter); + gcmkRESET_PROFILE_DATA_PART1(vs_inst_counter); + gcmkRESET_PROFILE_DATA_PART1(vs_rendered_vertice_counter); + gcmkRESET_PROFILE_DATA_PART1(vs_branch_inst_counter); + gcmkRESET_PROFILE_DATA_PART1(vs_texld_inst_counter); + gcmkRESET_PROFILE_DATA_PART1(ps_branch_inst_counter); + gcmkRESET_PROFILE_DATA_PART1(ps_texld_inst_counter); + if (hasNewCounters) + { + gcmkRESET_PROFILE_DATA_PART1(vs_non_idle_starve_count); + gcmkRESET_PROFILE_DATA_PART1(vs_starve_count); + gcmkRESET_PROFILE_DATA_PART1(vs_stall_count); + gcmkRESET_PROFILE_DATA_PART1(vs_process_count); + gcmkRESET_PROFILE_DATA_PART1(ps_non_idle_starve_count); + gcmkRESET_PROFILE_DATA_PART1(ps_starve_count); + gcmkRESET_PROFILE_DATA_PART1(ps_stall_count); + gcmkRESET_PROFILE_DATA_PART1(ps_process_count); + gcmkRESET_PROFILE_DATA_PART1(shader_cycle_count); + gcmkRESET_PROFILE_DATA_PART1(tx_non_idle_starve_count); + gcmkRESET_PROFILE_DATA_PART1(tx_starve_count); + gcmkRESET_PROFILE_DATA_PART1(tx_stall_count); + gcmkRESET_PROFILE_DATA_PART1(tx_process_count); + } +#endif + gcmkUPDATE_PROFILE_DATA_PART1(ps_inst_counter); + gcmkUPDATE_PROFILE_DATA_PART1(ps_rendered_pixel_counter); + gcmkUPDATE_PROFILE_DATA_PART1(vs_inst_counter); + gcmkUPDATE_PROFILE_DATA_PART1(vs_rendered_vertice_counter); + gcmkUPDATE_PROFILE_DATA_PART1(vs_branch_inst_counter); + gcmkUPDATE_PROFILE_DATA_PART1(vs_texld_inst_counter); + gcmkUPDATE_PROFILE_DATA_PART1(ps_branch_inst_counter); + gcmkUPDATE_PROFILE_DATA_PART1(ps_texld_inst_counter); + if (hasNewCounters) + { + gcmkUPDATE_PROFILE_DATA_PART1(vs_non_idle_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(vs_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(vs_stall_count); + gcmkUPDATE_PROFILE_DATA_PART1(vs_process_count); + gcmkUPDATE_PROFILE_DATA_PART1(ps_non_idle_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(ps_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(ps_stall_count); + gcmkUPDATE_PROFILE_DATA_PART1(ps_process_count); + gcmkUPDATE_PROFILE_DATA_PART1(shader_cycle_count); + gcmkUPDATE_PROFILE_DATA_PART1(tx_non_idle_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(tx_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(tx_stall_count); + gcmkUPDATE_PROFILE_DATA_PART1(tx_process_count); + } +#if !USE_SW_RESET + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) +)); +#endif + + /* PA */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_input_vtx_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_input_prim_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_output_prim_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_depth_clipped_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_trivial_rejected_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_culled_prim_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_droped_prim_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_frustum_clipped_prim_counter)); + if (hasNewCounters) + { + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_non_idle_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_stall_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler_part1->pa_process_count)); + } +#if USE_SW_RESET + gcmkRESET_PROFILE_DATA_PART1(pa_input_vtx_counter); + gcmkRESET_PROFILE_DATA_PART1(pa_input_prim_counter); + gcmkRESET_PROFILE_DATA_PART1(pa_output_prim_counter); + gcmkRESET_PROFILE_DATA_PART1(pa_depth_clipped_counter); + gcmkRESET_PROFILE_DATA_PART1(pa_trivial_rejected_counter); + gcmkRESET_PROFILE_DATA_PART1(pa_culled_prim_counter); + gcmkRESET_PROFILE_DATA_PART1(pa_droped_prim_counter); + gcmkRESET_PROFILE_DATA_PART1(pa_frustum_clipped_prim_counter); + if (hasNewCounters) + { + gcmkRESET_PROFILE_DATA_PART1(pa_non_idle_starve_count); + gcmkRESET_PROFILE_DATA_PART1(pa_starve_count); + gcmkRESET_PROFILE_DATA_PART1(pa_stall_count); + gcmkRESET_PROFILE_DATA_PART1(pa_process_count); + } +#endif + gcmkUPDATE_PROFILE_DATA_PART1(pa_input_vtx_counter); + gcmkUPDATE_PROFILE_DATA_PART1(pa_input_prim_counter); + gcmkUPDATE_PROFILE_DATA_PART1(pa_output_prim_counter); + gcmkUPDATE_PROFILE_DATA_PART1(pa_depth_clipped_counter); + gcmkUPDATE_PROFILE_DATA_PART1(pa_trivial_rejected_counter); + gcmkUPDATE_PROFILE_DATA_PART1(pa_culled_prim_counter); + gcmkUPDATE_PROFILE_DATA_PART1(pa_droped_prim_counter); + gcmkUPDATE_PROFILE_DATA_PART1(pa_frustum_clipped_prim_counter); + if (hasNewCounters) + { + gcmkUPDATE_PROFILE_DATA_PART1(pa_non_idle_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(pa_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(pa_stall_count); + gcmkUPDATE_PROFILE_DATA_PART1(pa_process_count); + } +#if !USE_SW_RESET + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) +)); +#endif + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_clipped_triangle_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (16) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_clipped_line_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (17) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_culled_triangle_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (18) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_culled_lines_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (19) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_trivial_rejected_line_count)); + if (hasNewCounters) + { + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_stall_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_receive_triangle_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_send_triangle_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_receive_lines_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_send_lines_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_process_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (20) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler_part1->se_non_idle_starve_count)); + } + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) +)); + + gcmkUPDATE_PROFILE_DATA_PART1(se_clipped_triangle_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_clipped_line_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_culled_triangle_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_culled_lines_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_trivial_rejected_line_count); + if (hasNewCounters) + { + gcmkUPDATE_PROFILE_DATA_PART1(se_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_stall_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_receive_triangle_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_send_triangle_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_receive_lines_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_send_lines_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_process_count); + gcmkUPDATE_PROFILE_DATA_PART1(se_non_idle_starve_count); + } + + /* RA */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_valid_pixel_count_to_render)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_total_quad_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_valid_quad_count_after_early_z)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_input_prim_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_pipe_cache_miss_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_prefetch_cache_miss_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_eez_culled_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (17) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_pipe_hz_cache_miss_counter)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (18) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_prefetch_hz_cache_miss_counter)); + if (hasNewCounters) + { + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_non_idle_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_starve_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_stall_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (16) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler_part1->ra_process_count)); + } +#if USE_SW_RESET + gcmkRESET_PROFILE_DATA_PART1(ra_valid_pixel_count_to_render); + gcmkRESET_PROFILE_DATA_PART1(ra_total_quad_count); + gcmkRESET_PROFILE_DATA_PART1(ra_valid_quad_count_after_early_z); + gcmkRESET_PROFILE_DATA_PART1(ra_input_prim_count); + gcmkRESET_PROFILE_DATA_PART1(ra_pipe_cache_miss_counter); + gcmkRESET_PROFILE_DATA_PART1(ra_prefetch_cache_miss_counter); + gcmkRESET_PROFILE_DATA_PART1(ra_eez_culled_counter); + gcmkRESET_PROFILE_DATA_PART1(ra_pipe_hz_cache_miss_counter); + gcmkRESET_PROFILE_DATA_PART1(ra_prefetch_hz_cache_miss_counter); + if (hasNewCounters) + { + gcmkRESET_PROFILE_DATA_PART1(ra_non_idle_starve_count); + gcmkRESET_PROFILE_DATA_PART1(ra_starve_count); + gcmkRESET_PROFILE_DATA_PART1(ra_stall_count); + gcmkRESET_PROFILE_DATA_PART1(ra_process_count); + } +#endif + gcmkUPDATE_PROFILE_DATA_PART1(ra_valid_pixel_count_to_render); + gcmkUPDATE_PROFILE_DATA_PART1(ra_total_quad_count); + gcmkUPDATE_PROFILE_DATA_PART1(ra_valid_quad_count_after_early_z); + gcmkUPDATE_PROFILE_DATA_PART1(ra_input_prim_count); + gcmkUPDATE_PROFILE_DATA_PART1(ra_pipe_cache_miss_counter); + gcmkUPDATE_PROFILE_DATA_PART1(ra_prefetch_cache_miss_counter); + gcmkUPDATE_PROFILE_DATA_PART1(ra_eez_culled_counter); + gcmkUPDATE_PROFILE_DATA_PART1(ra_pipe_hz_cache_miss_counter); + gcmkUPDATE_PROFILE_DATA_PART1(ra_prefetch_hz_cache_miss_counter); + if (hasNewCounters) + { + gcmkUPDATE_PROFILE_DATA_PART1(ra_non_idle_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(ra_starve_count); + gcmkUPDATE_PROFILE_DATA_PART1(ra_stall_count); + gcmkUPDATE_PROFILE_DATA_PART1(ra_process_count); + } +#if !USE_SW_RESET + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) +)); +#endif + + /* TX */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_total_bilinear_requests)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_total_trilinear_requests)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_total_discarded_texture_requests)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_total_texture_requests)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_mc0_miss_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_mc0_request_byte_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_mc1_miss_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler_part1->tx_mc1_request_byte_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) +)); + + gcmkUPDATE_PROFILE_DATA_PART1(tx_total_bilinear_requests); + gcmkUPDATE_PROFILE_DATA_PART1(tx_total_trilinear_requests); + gcmkUPDATE_PROFILE_DATA_PART1(tx_total_discarded_texture_requests); + gcmkUPDATE_PROFILE_DATA_PART1(tx_total_texture_requests); + gcmkUPDATE_PROFILE_DATA_PART1(tx_mc0_miss_count); + gcmkUPDATE_PROFILE_DATA_PART1(tx_mc0_request_byte_count); + gcmkUPDATE_PROFILE_DATA_PART1(tx_mc1_miss_count); + gcmkUPDATE_PROFILE_DATA_PART1(tx_mc1_request_byte_count); + + /* MC */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_8B_from_colorpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_8B_sentout_from_colorpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_write_req_8B_from_colorpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_sentout_from_colorpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_write_req_from_colorpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_8B_from_depthpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_8B_sentout_from_depthpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_write_req_8B_from_depthpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_sentout_from_depthpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_write_req_from_depthpipe)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_8B_from_others)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_write_req_8B_from_others)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_read_req_from_others)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mcc_total_write_req_from_others)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (21) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_fe_read_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (22) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_mmu_read_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (23) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_blt_read_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (24) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_sh0_read_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (25) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_sh1_read_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (26) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_pe_write_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (27) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_blt_write_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (28) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_sh0_write_bandwidth)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (29) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler_part2->mc_sh1_write_bandwidth)); + + /* Reset counters. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1)); + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) +)); + + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_8B_from_colorpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_8B_sentout_from_colorpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_write_req_8B_from_colorpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_sentout_from_colorpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_write_req_from_colorpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_8B_from_depthpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_8B_sentout_from_depthpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_write_req_8B_from_depthpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_sentout_from_depthpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_write_req_from_depthpipe); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_8B_from_others); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_write_req_8B_from_others); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_read_req_from_others); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_total_write_req_from_others); + gcmkUPDATE_PROFILE_DATA_PART2(mc_fe_read_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_mmu_read_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_blt_read_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_sh0_read_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_sh1_read_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_pe_write_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_blt_write_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_sh0_write_bandwidth); + gcmkUPDATE_PROFILE_DATA_PART2(mc_sh1_write_bandwidth); + + /* read latency counters */ + if (hasNewCounters) + { + /* latency */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x0056C, + &mc_axi_max_min_latency)); + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00570, + &profiler_part2->mcc_axi_total_latency)); + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00574, + &profiler_part2->mcc_axi_sample_count)); + + /* Reset Latency counters */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00568, + 0x10a)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00568, + 0xa)); + + profiler_part2->mcc_axi_min_latency = (mc_axi_max_min_latency & 0xffff0000) >> 16; + profiler_part2->mcc_axi_max_latency = (mc_axi_max_min_latency & 0x0000ffff); + if (profiler_part2->mcc_axi_min_latency == 4095) + profiler_part2->mcc_axi_min_latency = 0; + + gcmkUPDATE_PROFILE_DATA_PART2(mcc_axi_min_latency); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_axi_max_latency); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_axi_total_latency); + gcmkUPDATE_PROFILE_DATA_PART2(mcc_axi_sample_count); + } + + /* HI */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler_part2->hi0_axi_cycles_read_request_stalled)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler_part2->hi0_axi_cycles_write_request_stalled)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler_part2->hi0_axi_cycles_write_data_stalled)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) +)); + + gcmkUPDATE_PROFILE_DATA_PART2(hi0_axi_cycles_read_request_stalled); + gcmkUPDATE_PROFILE_DATA_PART2(hi0_axi_cycles_write_request_stalled); + gcmkUPDATE_PROFILE_DATA_PART2(hi0_axi_cycles_write_data_stalled); + + /* L2 */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_total_axi0_read_request_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_total_axi0_write_request_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_total_axi1_write_request_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_total_read_transactions_request_by_axi0)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_total_read_transactions_request_by_axi1)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_total_write_transactions_request_by_axi0)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_total_write_transactions_request_by_axi1)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (16) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_axi0_minmax_latency)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (17) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_axi0_total_latency)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (18) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_axi0_total_request_count)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (19) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_axi1_minmax_latency)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (20) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_axi1_total_latency)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (21) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00564, &profiler_part2->l2_axi1_total_request_count)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (resetValue) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) )); +gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) +)); + + profiler_part2->l2_axi0_min_latency = (profiler_part2->l2_axi0_minmax_latency & 0xffff0000) >> 16; + profiler_part2->l2_axi0_max_latency = (profiler_part2->l2_axi0_minmax_latency & 0x0000ffff); + profiler_part2->l2_axi1_min_latency = (profiler_part2->l2_axi0_minmax_latency & 0xffff0000) >> 16; + profiler_part2->l2_axi1_max_latency = (profiler_part2->l2_axi0_minmax_latency & 0x0000ffff); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_axi0_read_request_count); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_axi1_read_request_count); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_axi0_write_request_count); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_axi1_write_request_count); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_read_transactions_request_by_axi0); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_read_transactions_request_by_axi1); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_write_transactions_request_by_axi0); + gcmkUPDATE_PROFILE_DATA_PART2(l2_total_write_transactions_request_by_axi1); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi0_min_latency); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi0_max_latency); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi0_total_latency); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi0_total_request_count); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi1_min_latency); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi1_max_latency); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi1_total_latency); + gcmkUPDATE_PROFILE_DATA_PART2(l2_axi1_total_request_count); + + gcmkVERIFY_OK(gckOS_ReleaseMutex( + command->os, command->mutexContextSeq + )); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (mutexAcquired) + { + gckOS_ReleaseMutex( + command->os, command->mutexContextSeq + ); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + + +gceSTATUS +gckHARDWARE_InitProfiler( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gctUINT32 control; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + &control)); + /* Enable debug register. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 11:11) - (0 ? + 11:11) + 1))))))) << (0 ? + 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 11:11) - (0 ? + 11:11) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))))); + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_ResetGPU( + IN gckHARDWARE Hardware, + IN gckOS Os, + IN gceCORE Core + ) +{ + gctUINT32 control, idle; + gceSTATUS status; + + for (;;) + { + /* Disable clock gating. */ + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + Hardware->powerBaseAddress + + 0x00104, + 0x00000000)); + + control = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:17) - (0 ? + 17:17) + 1))))))) << (0 ? + 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 17:17) - (0 ? + 17:17) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))); + + /* Disable pulse-eater. */ + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x0010C, + control)); + + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x0010C, + ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))))); + + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x0010C, + control)); + + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x00000, + ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:9) - (0 ? + 9:9) + 1))))))) << (0 ? + 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 9:9) - (0 ? + 9:9) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))))); + + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x00000, + 0x00000900)); + + /* Wait for clock being stable. */ + gcmkONERROR(gckOS_Delay(Os, 1)); + + /* Isolate the GPU. */ + control = ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 19:19) - (0 ? + 19:19) + 1))))))) << (0 ? + 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))); + + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x00000, + control)); + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_SECURITY_AHB) && + (Hardware->options.secureMode == gcvSECURE_IN_NORMAL)) + { + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x003A8, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))))); + } + else + { + /* Set soft reset. */ + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x00000, + ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:12) - (0 ? + 12:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:12) - (0 ? + 12:12) + 1))))))) << (0 ? + 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 12:12) - (0 ? + 12:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))))); + } + + /* Wait for reset. */ + gcmkONERROR(gckOS_Delay(Os, 1)); + + /* Reset soft reset bit. */ + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x00000, + ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:12) - (0 ? + 12:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:12) - (0 ? + 12:12) + 1))))))) << (0 ? + 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 12:12) - (0 ? + 12:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))))); + + /* Reset GPU isolation. */ + control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 19:19) - (0 ? + 19:19) + 1))))))) << (0 ? + 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))); + + gcmkONERROR(gckOS_WriteRegisterEx(Os, + Core, + 0x00000, + control)); + + /* Read idle register. */ + gcmkONERROR(gckOS_ReadRegisterEx(Os, + Core, + 0x00004, + &idle)); + + if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0) + { + continue; + } + + /* Read reset register. */ + gcmkONERROR(gckOS_ReadRegisterEx(Os, + Core, + 0x00000, + &control)); + + if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0) + || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0) + ) + { + continue; + } + + /* GPU is idle. */ + break; + } + + /* Success. */ + return gcvSTATUS_OK; + +OnError: + + /* Return the error. */ + return status; +} + +gceSTATUS +gckHARDWARE_Reset( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL); + + /* Record context ID in debug register before reset. */ + gcmkONERROR(gckHARDWARE_UpdateContextID(Hardware)); + + /* Hardware reset. */ + status = gckOS_ResetGPU(Hardware->os, Hardware->core); + + if (gcmIS_ERROR(status)) + { + if (Hardware->identity.chipRevision < 0x4600) + { + /* Not supported - we need the isolation bit. */ + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + /* Soft reset. */ + gcmkONERROR(_ResetGPU(Hardware, Hardware->os, Hardware->core)); + } + + /* Force the command queue to reload the next context. */ + Hardware->kernel->command->currContext = gcvNULL; + + /* Initialize hardware. */ + gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware)); + + /* Jump to address into which GPU should run if it doesn't stuck. */ + gcmkONERROR(gckHARDWARE_Execute(Hardware, Hardware->kernel->restoreAddress, 16)); + + gcmkPRINT("[galcore]: recovery done"); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkPRINT("[galcore]: Hardware not reset successfully, give up"); + + /* Return the error. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_GetBaseAddress( + IN gckHARDWARE Hardware, + OUT gctUINT32_PTR BaseAddress + ) +{ + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL); + + /* Test if we have a new Memory Controller. */ + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_MC20)) + { + /* No base address required. */ + *BaseAddress = 0; + } + else + { + /* Get the base address from the OS. */ + *BaseAddress = Hardware->baseAddress; + } + + /* Success. */ + gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress); + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_NeedBaseAddress( + IN gckHARDWARE Hardware, + IN gctUINT32 State, + OUT gctBOOL_PTR NeedBase + ) +{ + gctBOOL need = gcvFALSE; + + gcmkHEADER_ARG("Hardware=0x%x State=0x%08x", Hardware, State); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(NeedBase != gcvNULL); + + /* Make sure this is a load state. */ + if (((((gctUINT32) (State)) >> (0 ? + 31:27) & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1)))))))) + { +#if gcdENABLE_3D + /* Get the state address. */ + switch ((((((gctUINT32) (State)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) )) + { + case 0x0596: + case 0x0597: + case 0x0599: + case 0x059A: + case 0x05A9: + /* These states need a TRUE physical address. */ + need = gcvTRUE; + break; + } +#else + /* 2D addresses don't need a base address. */ +#endif + } + + /* Return the flag. */ + *NeedBase = need; + + /* Success. */ + gcmkFOOTER_ARG("*NeedBase=%d", *NeedBase); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHARDWARE_IsFeatureAvailable +** +** Verifies whether the specified feature is available in hardware. +** +** INPUT: +** +** gckHARDWARE Hardware +** Pointer to an gckHARDWARE object. +** +** gceFEATURE Feature +** Feature to be verified. +*/ +gceSTATUS +gckHARDWARE_IsFeatureAvailable( + IN gckHARDWARE Hardware, + IN gceFEATURE Feature + ) +{ + gctBOOL available; + + gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + available = _QueryFeatureDatabase(Hardware, Feature); + + /* Return result. */ + gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE); + return available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE; +} + +/******************************************************************************* +** +** gckHARDWARE_DumpMMUException +** +** Dump the MMU debug info on an MMU exception. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_DumpMMUException( + IN gckHARDWARE Hardware + ) +{ + gctUINT32 mmu = 0; + gctUINT32 mmuStatus = 0; + gctUINT32 address = 0; + gctUINT32 i = 0; + gctUINT32 mtlb = 0; + gctUINT32 stlb = 0; + gctUINT32 offset = 0; +#if gcdPROCESS_ADDRESS_SPACE + gcsDATABASE_PTR database; +#endif + gctUINT32 mmuStatusRegAddress; + gctUINT32 mmuExceptionAddress; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + +#if gcdENABLE_TRUST_APPLICATION + if (Hardware->options.secureMode == gcvSECURE_IN_TA) + { + gcmkVERIFY_OK(gckKERNEL_SecurityDumpMMUException(Hardware->kernel)); + + gckMMU_DumpRecentFreedAddress(Hardware->kernel->mmu); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + else +#endif + if (Hardware->options.secureMode == gcvSECURE_NONE) + { + mmuStatusRegAddress = 0x00188; + mmuExceptionAddress = 0x00190; + } + else + { + mmuStatusRegAddress = 0x00384; + mmuExceptionAddress = 0x00380; + } + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + gcmkPRINT("GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n", + Hardware->core, + Hardware->identity.chipModel, + Hardware->identity.chipRevision); + + gcmkPRINT("**************************\n"); + gcmkPRINT("*** MMU ERROR DUMP ***\n"); + gcmkPRINT("**************************\n"); + + + gcmkVERIFY_OK( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + mmuStatusRegAddress, + &mmuStatus)); + + gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus); + + for (i = 0; i < 4; i += 1) + { + mmu = mmuStatus & 0xF; + mmuStatus >>= 4; + + if (mmu == 0) + { + continue; + } + + switch (mmu) + { + case 1: + gcmkPRINT(" MMU%d: slave not present\n", i); + break; + + case 2: + gcmkPRINT(" MMU%d: page not present\n", i); + break; + + case 3: + gcmkPRINT(" MMU%d: write violation\n", i); + break; + + case 4: + gcmkPRINT(" MMU%d: out of bound", i); + break; + + case 5: + gcmkPRINT(" MMU%d: read security violation", i); + break; + + case 6: + gcmkPRINT(" MMU%d: write security violation", i); + break; + + default: + gcmkPRINT(" MMU%d: unknown state\n", i); + } + + gcmkVERIFY_OK( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + mmuExceptionAddress + i * 4, + &address)); + + mtlb = (address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT; + stlb = (address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT; + offset = address & gcdMMU_OFFSET_4K_MASK; + + gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address); + + gcmkPRINT(" MTLB entry = %d\n", mtlb); + + gcmkPRINT(" STLB entry = %d\n", stlb); + + gcmkPRINT(" Offset = 0x%08X (%d)\n", offset, offset); + + gckMMU_DumpPageTableEntry(Hardware->kernel->mmu, address); + +#if gcdPROCESS_ADDRESS_SPACE + for (i = 0; i < gcmCOUNTOF(Hardware->kernel->db->db); ++i) + { + for (database = Hardware->kernel->db->db[i]; + database != gcvNULL; + database = database->next) + { + gcmkPRINT(" database [%d] :", database->processID); + gckMMU_DumpPageTableEntry(database->mmu, address); + } + } +#endif + + gckMMU_DumpRecentFreedAddress(Hardware->kernel->mmu); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_HandleFault( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status = gcvSTATUS_NOT_SUPPORTED; + gctUINT32 mmu, mmuStatus, address = gcvINVALID_ADDRESS, i = 0; + gctUINT32 mmuStatusRegAddress; + gctUINT32 mmuExceptionAddress; + + gcuVIDMEM_NODE_PTR node; + gctUINT32 entryValue; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + if (Hardware->options.secureMode == gcvSECURE_NONE) + { + mmuStatusRegAddress = 0x00188; + mmuExceptionAddress = 0x00190; + } + else + { + mmuStatusRegAddress = 0x00384; + mmuExceptionAddress = 0x00380; + } + + /* Get MMU exception address. */ +#if gcdENABLE_TRUST_APPLICATION + if (Hardware->options.secureMode == gcvSECURE_IN_TA) + { + gckKERNEL_ReadMMUException(Hardware->kernel, &mmuStatus, &address); + } + else +#endif + { + gcmkVERIFY_OK(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + mmuStatusRegAddress, + &mmuStatus + )); + + gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus); + + for (i = 0; i < 4; i += 1) + { + mmu = mmuStatus & 0xF; + mmuStatus >>= 4; + + if (mmu == 0) + { + continue; + } + + gcmkVERIFY_OK(gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + mmuExceptionAddress + i * 4, + &address + )); + + break; + } + } + + if (address != gcvINVALID_ADDRESS) + { + address &= ~gcdMMU_PAGE_4K_MASK; + + /* Try to allocate memory and setup map for exception address. */ + gcmkONERROR(gckVIDMEM_FindVIDMEM(Hardware->kernel, address, &node, &entryValue)); + +#if gcdENABLE_TRUST_APPLICATION + if (Hardware->options.secureMode == gcvSECURE_IN_TA) + { + gckKERNEL_HandleMMUException( + Hardware->kernel, + mmuStatus, + entryValue, + address + ); + } + else +#endif + { + gctUINT32_PTR entry; + + /* Setup page table. */ + gcmkONERROR(gckMMU_GetPageEntry(Hardware->kernel->mmu, address, &entry)); + + gckMMU_SetPage(Hardware->kernel->mmu, entryValue, gcvTRUE, entry); + + /* Resume hardware execution. */ + gcmkVERIFY_OK(gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + mmuExceptionAddress + i * 4, + *entry + )); + } + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHARDWARE_DumpGPUState +** +** Dump the GPU debug registers. +** +** INPUT: +** +** gckHARDWARE Harwdare +** Pointer to an gckHARDWARE object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHARDWARE_DumpGPUState( + IN gckHARDWARE Hardware + ) +{ + static gctCONST_STRING _cmdState[] = + { + "PAR_IDLE_ST", "PAR_DEC_ST", "PAR_ADR0_ST", "PAR_LOAD0_ST", + "PAR_ADR1_ST", "PAR_LOAD1_ST", "PAR_3DADR_ST", "PAR_3DCMD_ST", + "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST", "PAR_INITREQDMA_ST", + "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST", "PAR_2DRECT1_ST", + "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST", "PAR_WAIT_ST", + "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST" + }; + + static gctCONST_STRING _cmdDmaState[] = + { + "CMD_IDLE_ST", "CMD_START_ST", "CMD_REQ_ST", "CMD_END_ST" + }; + + static gctCONST_STRING _cmdFetState[] = + { + "FET_IDLE_ST", "FET_RAMVALID_ST", "FET_VALID_ST" + }; + + static gctCONST_STRING _reqDmaState[] = + { + "REQ_IDLE_ST", "REQ_WAITIDX_ST", "REQ_CAL_ST" + }; + + static gctCONST_STRING _calState[] = + { + "CAL_IDLE_ST", "CAL_LDADR_ST", "CAL_IDXCALC_ST" + }; + + static gctCONST_STRING _veReqState[] = + { + "VER_IDLE_ST", "VER_CKCACHE_ST", "VER_MISS_ST" + }; + + static gcsiDEBUG_REGISTERS _dbgRegs[] = + { + { "RA", 0x474, 16, 0x448, 256, 0x1, 0x00 }, + { "TX", 0x474, 24, 0x44C, 128, 0x1, 0x00 }, + { "FE", 0x470, 0, 0x450, 256, 0x1, 0x00 }, + { "PE", 0x470, 16, 0x454, 256, 0x3, 0x00 }, + { "DE", 0x470, 8, 0x458, 256, 0x1, 0x00 }, + { "SH", 0x470, 24, 0x45C, 256, 0x1, 0x00 }, + { "PA", 0x474, 0, 0x460, 256, 0x1, 0x00 }, + { "SE", 0x474, 8, 0x464, 256, 0x1, 0x00 }, + { "MC", 0x478, 0, 0x468, 256, 0x3, 0x00 }, + { "HI", 0x478, 8, 0x46C, 256, 0x1, 0x00 }, + { "TPG", 0x474, 24, 0x44C, 32, 0x2, 0x80 }, + { "TFB", 0x474, 24, 0x44C, 32, 0x2, 0xA0 }, + { "USC", 0x474, 24, 0x44C, 64, 0x2, 0xC0 }, + { "L2", 0x478, 0, 0x564, 256, 0x1, 0x00 }, + { "BLT", 0x478, 24, 0x1A4, 256, 0x1, 0x00 } + }; + + static gctUINT32 _otherRegs[] = + { + 0x040, 0x044, 0x04C, 0x050, 0x054, 0x058, 0x05C, 0x060, + 0x43c, 0x440, 0x444, 0x414, 0x100 + }; + + gceSTATUS status; + gckKERNEL kernel = gcvNULL; + gctUINT32 idle = 0, axi = 0; + gctUINT32 dmaAddress1 = 0, dmaAddress2 = 0; + gctUINT32 dmaState1 = 0, dmaState2 = 0; + gctUINT32 dmaLow = 0, dmaHigh = 0; + gctUINT32 cmdState = 0, cmdDmaState = 0, cmdFetState = 0; + gctUINT32 dmaReqState = 0, calState = 0, veReqState = 0; + gctUINT i; + gctUINT pipe = 0, pixelPipes = 0; + gctUINT32 control = 0, oldControl = 0; + gckOS os = Hardware->os; + gceCORE core = Hardware->core; + + gcmkHEADER_ARG("Hardware=0x%X", Hardware); + + kernel = Hardware->kernel; + + gcmkPRINT_N(12, "GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n", + core, + Hardware->identity.chipModel, + Hardware->identity.chipRevision); + + pixelPipes = Hardware->identity.pixelPipes + ? Hardware->identity.pixelPipes + : 1; + + /* Reset register values. */ + idle = axi = + dmaState1 = dmaState2 = + dmaAddress1 = dmaAddress2 = + dmaLow = dmaHigh = 0; + + /* Verify whether DMA is running. */ + gcmkONERROR(_VerifyDMA( + os, core, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2 + )); + + cmdState = dmaState2 & 0x1F; + cmdDmaState = (dmaState2 >> 8) & 0x03; + cmdFetState = (dmaState2 >> 10) & 0x03; + dmaReqState = (dmaState2 >> 12) & 0x03; + calState = (dmaState2 >> 14) & 0x03; + veReqState = (dmaState2 >> 16) & 0x03; + + gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00004, &idle)); + gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x0000C, &axi)); + gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00668, &dmaLow)); + gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00668, &dmaLow)); + gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x0066C, &dmaHigh)); + gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x0066C, &dmaHigh)); + + gcmkPRINT_N(0, "**************************\n"); + gcmkPRINT_N(0, "*** GPU STATE DUMP ***\n"); + gcmkPRINT_N(0, "**************************\n"); + + gcmkPRINT_N(4, " axi = 0x%08X\n", axi); + + gcmkPRINT_N(4, " idle = 0x%08X\n", idle); + if ((idle & 0x00000001) == 0) gcmkPRINT_N(0, " FE not idle\n"); + if ((idle & 0x00000002) == 0) gcmkPRINT_N(0, " DE not idle\n"); + if ((idle & 0x00000004) == 0) gcmkPRINT_N(0, " PE not idle\n"); + if ((idle & 0x00000008) == 0) gcmkPRINT_N(0, " SH not idle\n"); + if ((idle & 0x00000010) == 0) gcmkPRINT_N(0, " PA not idle\n"); + if ((idle & 0x00000020) == 0) gcmkPRINT_N(0, " SE not idle\n"); + if ((idle & 0x00000040) == 0) gcmkPRINT_N(0, " RA not idle\n"); + if ((idle & 0x00000080) == 0) gcmkPRINT_N(0, " TX not idle\n"); + if ((idle & 0x00000100) == 0) gcmkPRINT_N(0, " VG not idle\n"); + if ((idle & 0x00000200) == 0) gcmkPRINT_N(0, " IM not idle\n"); + if ((idle & 0x00000400) == 0) gcmkPRINT_N(0, " FP not idle\n"); + if ((idle & 0x00000800) == 0) gcmkPRINT_N(0, " TS not idle\n"); + if ((idle & 0x00001000) == 0) gcmkPRINT_N(0, " BL not idle\n"); + if ((idle & 0x00002000) == 0) gcmkPRINT_N(0, " BP not idle\n"); + if ((idle & 0x00004000) == 0) gcmkPRINT_N(0, " MC not idle\n"); + if ((idle & 0x80000000) != 0) gcmkPRINT_N(0, " AXI low power mode\n"); + + if ( + (dmaAddress1 == dmaAddress2) + && (dmaState1 == dmaState2) + ) + { + gcmkPRINT_N(0, " DMA appears to be stuck at this address:\n"); + gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1); + } + else + { + if (dmaAddress1 == dmaAddress2) + { + gcmkPRINT_N(0, " DMA address is constant, but state is changing:\n"); + gcmkPRINT_N(4, " 0x%08X\n", dmaState1); + gcmkPRINT_N(4, " 0x%08X\n", dmaState2); + } + else + { + gcmkPRINT_N(0, " DMA is running; known addresses are:\n"); + gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1); + gcmkPRINT_N(4, " 0x%08X\n", dmaAddress2); + } + } + + gcmkPRINT_N(4, " dmaLow = 0x%08X\n", dmaLow); + gcmkPRINT_N(4, " dmaHigh = 0x%08X\n", dmaHigh); + gcmkPRINT_N(4, " dmaState = 0x%08X\n", dmaState2); + gcmkPRINT_N(8, " command state = %d (%s)\n", cmdState, _cmdState [cmdState]); + gcmkPRINT_N(8, " command DMA state = %d (%s)\n", cmdDmaState, _cmdDmaState[cmdDmaState]); + gcmkPRINT_N(8, " command fetch state = %d (%s)\n", cmdFetState, _cmdFetState[cmdFetState]); + gcmkPRINT_N(8, " DMA request state = %d (%s)\n", dmaReqState, _reqDmaState[dmaReqState]); + gcmkPRINT_N(8, " cal state = %d (%s)\n", calState, _calState [calState]); + gcmkPRINT_N(8, " VE request state = %d (%s)\n", veReqState, _veReqState [veReqState]); + + gcmkPRINT_N(0, " Debug registers:\n"); + + for (i = 0; i < gcmCOUNTOF(_dbgRegs); i += 1) + { + gcmkONERROR(_DumpDebugRegisters(os, core, &_dbgRegs[i])); + } + + /* Record control. */ + gckOS_ReadRegisterEx(os, core, 0x0, &oldControl); + + for (pipe = 0; pipe < pixelPipes; pipe++) + { + gcmkPRINT_N(4, " Other Registers[%d]:\n", pipe); + + /* Switch pipe. */ + gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x0, &control)); + control &= ~(0xF << 20); + control |= (pipe << 20); + gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, control)); + + for (i = 0; i < gcmCOUNTOF(_otherRegs); i += 1) + { + gctUINT32 read; + gcmkONERROR(gckOS_ReadRegisterEx(os, core, _otherRegs[i], &read)); + gcmkPRINT_N(12, " [0x%04X] 0x%08X\n", _otherRegs[i], read); + } + + if (Hardware->mmuVersion) + { + gcmkPRINT(" MMU status from MC[%d]:", pipe); + + gckHARDWARE_DumpMMUException(Hardware); + } + } + + /* Restore control. */ + gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, oldControl)); + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI0)) + { + /* FE debug register. */ + gcmkVERIFY_OK(_DumpLinkStack(os, core, &_dbgRegs[2])); + } + + _DumpFEStack(os, core, &_dbgRegs[2]); + + gcmkPRINT_N(0, "**************************\n"); + gcmkPRINT_N(0, "***** SW COUNTERS *****\n"); + gcmkPRINT_N(0, "**************************\n"); + gcmkPRINT_N(4, " Execute Count = 0x%08X\n", Hardware->executeCount); + gcmkPRINT_N(4, " Execute Addr = 0x%08X\n", Hardware->lastExecuteAddress); + gcmkPRINT_N(4, " End Addr = 0x%08X\n", Hardware->lastEnd); + + /* dump stack. */ + gckOS_DumpCallStack(os); + +OnError: + + /* Return the error. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +gckHARDWARE_ReadPerformanceRegister( + IN gckHARDWARE Hardware, + IN gctUINT PerformanceAddress, + IN gctUINT IndexAddress, + IN gctUINT IndexShift, + IN gctUINT Index, + OUT gctUINT32_PTR Value + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x PerformanceAddress=0x%x IndexAddress=0x%x " + "IndexShift=%u Index=%u", + Hardware, PerformanceAddress, IndexAddress, IndexShift, + Index); + + /* Write the index. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + IndexAddress, + Index << IndexShift)); + + /* Read the register. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + PerformanceAddress, + Value)); + + /* Test for reset. */ + if (Index == 15) + { + /* Index another register to get out of reset. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, IndexAddress, 0)); + } + + /* Success. */ + gcmkFOOTER_ARG("*Value=0x%x", *Value); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_GetFrameInfo( + IN gckHARDWARE Hardware, + OUT gcsHAL_FRAME_INFO * FrameInfo + ) +{ + gceSTATUS status; + gctUINT i, clock; + gcsHAL_FRAME_INFO info; +#if gcdFRAME_DB_RESET + gctUINT reset; +#endif + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Get profile tick. */ + gcmkONERROR(gckOS_GetProfileTick(&info.ticks)); + + /* Read SH counters and reset them. */ + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0045C, + 0x00470, + 24, + 4, + &info.shaderCycles)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0045C, + 0x00470, + 24, + 9, + &info.vsInstructionCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0045C, + 0x00470, + 24, + 12, + &info.vsTextureCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0045C, + 0x00470, + 24, + 7, + &info.psInstructionCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0045C, + 0x00470, + 24, + 14, + &info.psTextureCount)); +#if gcdFRAME_DB_RESET + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0045C, + 0x00470, + 24, + 15, + &reset)); +#endif + + /* Read PA counters and reset them. */ + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00460, + 0x00474, + 0, + 3, + &info.vertexCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00460, + 0x00474, + 0, + 4, + &info.primitiveCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00460, + 0x00474, + 0, + 7, + &info.rejectedPrimitives)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00460, + 0x00474, + 0, + 8, + &info.culledPrimitives)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00460, + 0x00474, + 0, + 6, + &info.clippedPrimitives)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00460, + 0x00474, + 0, + 5, + &info.outPrimitives)); +#if gcdFRAME_DB_RESET + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00460, + 0x00474, + 0, + 15, + &reset)); +#endif + + /* Read RA counters and reset them. */ + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00448, + 0x00474, + 16, + 3, + &info.inPrimitives)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00448, + 0x00474, + 16, + 11, + &info.culledQuadCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00448, + 0x00474, + 16, + 1, + &info.totalQuadCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00448, + 0x00474, + 16, + 2, + &info.quadCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00448, + 0x00474, + 16, + 0, + &info.totalPixelCount)); +#if gcdFRAME_DB_RESET + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00448, + 0x00474, + 16, + 15, + &reset)); +#endif + + /* Read TX counters and reset them. */ + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0044C, + 0x00474, + 24, + 0, + &info.bilinearRequests)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0044C, + 0x00474, + 24, + 1, + &info.trilinearRequests)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0044C, + 0x00474, + 24, + 8, + &info.txHitCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0044C, + 0x00474, + 24, + 9, + &info.txMissCount)); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0044C, + 0x00474, + 24, + 6, + &info.txBytes8)); +#if gcdFRAME_DB_RESET + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x0044C, + 0x00474, + 24, + 15, + &reset)); +#endif + + /* Read clock control register. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + &clock)); + + /* Walk through all avaiable pixel pipes. */ + for (i = 0; i < Hardware->identity.pixelPipes; ++i) + { + /* Select proper pipe. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:20) - (0 ? + 23:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:20) - (0 ? + 23:20) + 1))))))) << (0 ? + 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? + 23:20) - (0 ? + 23:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))))); + + /* Read cycle registers. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00078, + &info.cycles[i])); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x0007C, + &info.idleCycles[i])); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00438, + &info.mcCycles[i])); + + /* Read bandwidth registers. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x0005C, + &info.readRequests[i])); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00040, + &info.readBytes8[i])); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00050, + &info.writeRequests[i])); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00044, + &info.writeBytes8[i])); + + /* Read PE counters. */ + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00454, + 0x00470, + 16, + 0, + &info.colorKilled[i])); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00454, + 0x00470, + 16, + 2, + &info.colorDrawn[i])); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00454, + 0x00470, + 16, + 1, + &info.depthKilled[i])); + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00454, + 0x00470, + 16, + 3, + &info.depthDrawn[i])); + } + + /* Zero out remaning reserved counters. */ + for (; i < 8; ++i) + { + info.readBytes8[i] = 0; + info.writeBytes8[i] = 0; + info.cycles[i] = 0; + info.idleCycles[i] = 0; + info.mcCycles[i] = 0; + info.readRequests[i] = 0; + info.writeRequests[i] = 0; + info.colorKilled[i] = 0; + info.colorDrawn[i] = 0; + info.depthKilled[i] = 0; + info.depthDrawn[i] = 0; + } + + /* Reset clock control register. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + clock)); + + /* Reset cycle and bandwidth counters. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0003C, + 1)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0003C, + 0)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00078, + 0)); + +#if gcdFRAME_DB_RESET + /* Reset PE counters. */ + gcmkONERROR(gckHARDWARE_ReadPerformanceRegister( + Hardware, + 0x00454, + 0x00470, + 16, + 15, + &reset)); +#endif + + /* Copy to user. */ + gcmkONERROR(gckOS_CopyToUserData(Hardware->os, + &info, + FrameInfo, + gcmSIZEOF(info))); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_DumpGpuProfile( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gctUINT clock, i; + gctUINT32 totalRead, totalWrite, read, write; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + /* Read clock control register. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + &clock)); + + totalRead = 0; + totalWrite = 0; + + /* Walk through all avaiable pixel pipes. */ + for (i = 0; i < Hardware->identity.pixelPipes; ++i) + { + /* Select proper pipe. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:20) - (0 ? + 23:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:20) - (0 ? + 23:20) + 1))))))) << (0 ? + 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? + 23:20) - (0 ? + 23:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))))); + + /* BW */ + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00040, + &read)); + totalRead += read; + + gcmkONERROR( + gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00044, + &write)); + totalWrite += write; + } + + gcmkPRINT("==============GPU Profile: read request : %d\n", totalRead); + gcmkPRINT("==============GPU Profile: write request: %d\n", totalWrite); + + /* Reset clock control register. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00000, + clock)); + /* Reset counters. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1)); + gcmkONERROR( + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if gcdDVFS +#define READ_FROM_EATER1 1 + +gceSTATUS +gckHARDWARE_QueryLoad( + IN gckHARDWARE Hardware, + OUT gctUINT32 * Load + ) +{ + gctUINT32 debug1; + gceSTATUS status; + gcmkHEADER_ARG("Hardware=0x%X", Hardware); + + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Load != gcvNULL); + + gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE); + + if (Hardware->chipPowerState == gcvPOWER_ON) + { + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00110, + Load)); +#if READ_FROM_EATER1 + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00134, + Load)); +#endif + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00114, + &debug1)); + + /* Patch result of 0x110 with result of 0x114. */ + if ((debug1 & 0xFF) == 1) + { + *Load &= ~0xFF; + *Load |= 1; + } + + if (((debug1 & 0xFF00) >> 8) == 1) + { + *Load &= ~(0xFF << 8); + *Load |= 1 << 8; + } + + if (((debug1 & 0xFF0000) >> 16) == 1) + { + *Load &= ~(0xFF << 16); + *Load |= 1 << 16; + } + + if (((debug1 & 0xFF000000) >> 24) == 1) + { + *Load &= ~(0xFF << 24); + *Load |= 1 << 24; + } + } + else + { + status = gcvSTATUS_INVALID_REQUEST; + } + +OnError: + + gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex); + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_SetDVFSPeroid( + IN gckHARDWARE Hardware, + OUT gctUINT32 Frequency + ) +{ + gceSTATUS status; + gctUINT32 period; + gctUINT32 eater; + +#if READ_FROM_EATER1 + gctUINT32 period1; + gctUINT32 eater1; +#endif + + gcmkHEADER_ARG("Hardware=0x%X Frequency=%d", Hardware, Frequency); + + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + period = 0; + + while((64 << period) < (gcdDVFS_ANAYLSE_WINDOW * Frequency * 1000) ) + { + period++; + } + +#if READ_FROM_EATER1 + /* + * Peroid = F * 1000 * 1000 / (60 * 16 * 1024); + */ + period1 = Frequency * 6250 / 6114; +#endif + + gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE); + + if (Hardware->chipPowerState == gcvPOWER_ON) + { + /* Get current configure. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + &eater)); + + /* Change peroid. */ + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + ((((gctUINT32) (eater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:8) - (0 ? + 15:8) + 1))))))) << (0 ? + 15:8))) | (((gctUINT32) ((gctUINT32) (period) & ((gctUINT32) ((((1 ? + 15:8) - (0 ? + 15:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))))); + +#if READ_FROM_EATER1 + /* Config eater1. */ + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x00130, + &eater1)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x00130, + ((((gctUINT32) (eater1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:16) - (0 ? + 31:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:16) - (0 ? + 31:16) + 1))))))) << (0 ? + 31:16))) | (((gctUINT32) ((gctUINT32) (period1) & ((gctUINT32) ((((1 ? + 31:16) - (0 ? + 31:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))))); +#endif + } + else + { + status = gcvSTATUS_INVALID_REQUEST; + } + +OnError: + gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex); + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_InitDVFS( + IN gckHARDWARE Hardware + ) +{ + gceSTATUS status; + gctUINT32 data; + + gcmkHEADER_ARG("Hardware=0x%X", Hardware); + + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + &data)); + + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 18:18) - (0 ? + 18:18) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 18:18) - (0 ? + 18:18) + 1))))))) << (0 ? + 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 18:18) - (0 ? + 18:18) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 19:19) - (0 ? + 19:19) + 1))))))) << (0 ? + 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 19:19) - (0 ? + 19:19) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 20:20) - (0 ? + 20:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 20:20) - (0 ? + 20:20) + 1))))))) << (0 ? + 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 20:20) - (0 ? + 20:20) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:23) - (0 ? + 23:23) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:23) - (0 ? + 23:23) + 1))))))) << (0 ? + 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 23:23) - (0 ? + 23:23) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))); + data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 22:22) - (0 ? + 22:22) + 1))))))) << (0 ? + 22:22))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 22:22) - (0 ? + 22:22) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "DVFS Configure=0x%X", + data); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, + Hardware->core, + 0x0010C, + data)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} +#endif + +/******************************************************************************* +** +** gckHARDWARE_PrepareFunctions +** +** Generate command buffer snippets which will be used by gckHARDWARE, by which +** gckHARDWARE can manipulate GPU by FE command without using gckCOMMAND to avoid +** race condition and deadlock. +** +** Notice: +** 1. Each snippet can only be executed when GPU is idle. +** 2. Execution is triggered by AHB (0x658) +** 3. Each snippet followed by END so software can sync with GPU by checking GPU +** idle +** 4. It is transparent to gckCOMMAND command buffer. +** +** Existing Snippets: +** 1. MMU Configure +** For new MMU, after GPU is reset, FE execute this command sequence to enable MMU. +*/ +gceSTATUS +gckHARDWARE_PrepareFunctions( + gckHARDWARE Hardware + ) +{ + gckOS os; + gceSTATUS status; + gctUINT32 offset = 0; + gctUINT32 endBytes; + gctUINT32 flushBytes; + gctUINT8_PTR logical; + gctUINT32 address; + gcsHARDWARE_FUNCTION *function; + gceDUMMY_DRAW_TYPE dummyDrawType = gcvDUMMY_DRAW_INVALID; + + gcmkHEADER_ARG("%x", Hardware); + + os = Hardware->os; + + gcmkVERIFY_OK(gckOS_GetPageSize(os, &Hardware->mmuFuncBytes)); + Hardware->auxFuncBytes = Hardware->mmuFuncBytes; + + gcmkONERROR(gckHARDWARE_End( + Hardware, + gcvNULL, + ~0U, + &endBytes + )); + + if ((Hardware->mmuVersion > 0) && + Hardware->options.enableMMU && + (Hardware->options.secureMode != gcvSECURE_IN_TA)) + { + gctUINT32 mmuBytes; + gctPHYS_ADDR_T physical = 0; + gctUINT32 flags = gcvALLOC_FLAG_CONTIGUOUS; + +#if defined(CONFIG_ZONE_DMA32) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + flags |= gcvALLOC_FLAG_4GB_ADDR; +#endif +#endif +#ifdef CONFIG_MCST + flags |= gcvALLOC_FLAG_4GB_ADDR; +#endif + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + flags |= gcvALLOC_FLAG_CACHEABLE; +#endif + + /* Allocate mmu command buffer within 32bit space */ + gcmkONERROR(gckOS_AllocateNonPagedMemory( + os, + gcvFALSE, + flags, + &Hardware->mmuFuncBytes, + &Hardware->mmuFuncPhysical, + &Hardware->mmuFuncLogical + )); + + gcmkONERROR(gckOS_GetPhysicalAddress( + os, + Hardware->mmuFuncLogical, + &physical + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + os, + physical, + &physical + )); + + if (!(flags & gcvALLOC_FLAG_4GB_ADDR) && (physical & 0xFFFFFFFF00000000ULL)) + { + gcmkFATAL("%s(%d): Command buffer physical address (0x%llx) for MMU setup exceeds 32bits, " + "please rebuild kernel with CONFIG_ZONE_DMA32=y.", + __FUNCTION__, __LINE__, physical); + } + + function = &Hardware->functions[gcvHARDWARE_FUNCTION_MMU]; + function->logical = (gctUINT8_PTR)Hardware->mmuFuncLogical; + gcmkSAFECASTPHYSADDRT(function->address, physical); + + gcmkONERROR(gckHARDWARE_SetMMUStates( + Hardware, + Hardware->kernel->mmu->mtlbLogical, + gcvMMU_MODE_4K, + Hardware->kernel->mmu->safePageLogical, + function->logical, + &mmuBytes + )); + + function->endAddress = function->address + mmuBytes; + function->endLogical = function->logical + mmuBytes; + + gcmkONERROR(gckHARDWARE_End( + Hardware, + function->endLogical, + function->endAddress, + &endBytes + )); + + function->bytes = mmuBytes + endBytes; + + gcmkONERROR(gckOS_CacheClean( + Hardware->os, + 0, + Hardware->mmuFuncPhysical, + 0, + Hardware->mmuFuncLogical, + function->bytes + )); + } + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Hardware->kernel->virtualCommandBuffer) + { + gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer = gcvNULL; + gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer( + Hardware->kernel, + gcvFALSE, + &Hardware->auxFuncBytes, + &Hardware->auxFuncPhysical, + &Hardware->auxFuncLogical + )); + + gcmkONERROR(gckKERNEL_GetGPUAddress( + Hardware->kernel, + Hardware->auxFuncLogical, + gcvFALSE, + Hardware->auxFuncPhysical, + &Hardware->auxFuncAddress + )); + + commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR) Hardware->auxFuncPhysical; + + Hardware->auxPhysHandle = commandBuffer->virtualBuffer.physical; + } + else +#endif + { + gctPHYS_ADDR_T physical = 0; + gctUINT32 allocFlag = gcvALLOC_FLAG_CONTIGUOUS; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag |= gcvALLOC_FLAG_CACHEABLE; +#endif + + /* Allocate a command buffer. */ + gcmkONERROR(gckOS_AllocateNonPagedMemory( + os, + gcvFALSE, + allocFlag, + &Hardware->auxFuncBytes, + &Hardware->auxFuncPhysical, + &Hardware->auxFuncLogical + )); + + gcmkONERROR(gckOS_GetPhysicalAddress( + os, + Hardware->auxFuncLogical, + &physical + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + os, + physical, + &physical + )); + + gcmkSAFECASTPHYSADDRT(Hardware->auxFuncAddress, physical); + + gcmkONERROR(gckMMU_FillFlatMapping( + Hardware->kernel->mmu, + Hardware->auxFuncAddress, + Hardware->auxFuncBytes + )); + + Hardware->auxPhysHandle = Hardware->auxFuncPhysical; + } + + /* + ** All cache flush command sequence. + */ + function = &Hardware->functions[gcvHARDWARE_FUNCTION_FLUSH]; + + function->logical = logical = (gctUINT8_PTR)Hardware->auxFuncLogical + offset; + + function->address = Hardware->auxFuncAddress + offset; + + /* Get the size of the flush command. */ + gcmkONERROR(gckHARDWARE_Flush(Hardware, gcvFLUSH_ALL, gcvNULL, &flushBytes)); + + /* Append a flush. */ + gcmkONERROR(gckHARDWARE_Flush(Hardware, gcvFLUSH_ALL, logical, &flushBytes)); + + offset += flushBytes; + + logical = (gctUINT8_PTR)Hardware->auxFuncLogical + offset; + address = Hardware->auxFuncAddress + offset; + + gcmkONERROR(gckHARDWARE_End(Hardware, logical, address, &endBytes)); + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Hardware->kernel->virtualCommandBuffer) + { + gcmkONERROR(gckKERNEL_GetGPUAddress( + Hardware->kernel, + logical, + gcvFALSE, + Hardware->auxFuncPhysical, + &Hardware->lastEnd + )); + } +#endif + + offset += endBytes; + + function->bytes = flushBytes + endBytes; + + function->endAddress = function->address + flushBytes; + function->endLogical = function->logical + flushBytes; + + /* + ** ASYNC-BLT Engine event command + */ + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_ASYNC_BLIT)) + { + gctUINT8 i; + gctUINT32 eventBytes; + + function = &Hardware->functions[gcvHARDWARE_FUNCTION_BLT_EVENT]; + + function->logical = logical = (gctUINT8_PTR)Hardware->auxFuncLogical + offset; + function->address = Hardware->auxFuncAddress + offset; + + gcmkONERROR(gckHARDWARE_Event(Hardware, gcvNULL, 0, gcvKERNEL_BLT, &eventBytes)); + + for (i = 0; i < 29; i++) + { + gcmkONERROR(gckHARDWARE_Event( + Hardware, + logical + i * eventBytes, + i, + gcvKERNEL_BLT, + &eventBytes + )); + + offset += eventBytes; + } + + function->bytes = eventBytes * 29; + } + + + /************************************************************************************ + * Dummy draw. + */ + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_FE_NEED_DUMMYDRAW)) + { + dummyDrawType = gcvDUMMY_DRAW_GC400; + } + + if (!gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_USC_DEFER_FILL_FIX) && + gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_USC)) + { + dummyDrawType = gcvDUMMY_DRAW_V60; + } + + if (dummyDrawType != gcvDUMMY_DRAW_INVALID) + { + gctUINT32 dummyDrawBytes; + + function = &Hardware->functions[gcvHARDWARE_FUNCTION_DUMMY_DRAW]; + + function->logical = logical = (gctUINT8_PTR)Hardware->auxFuncLogical + offset; + function->address = Hardware->auxFuncAddress + offset; + + /* Append a dummy draw. */ + gcmkONERROR(gckHARDWARE_DummyDraw(Hardware, logical, function->address, dummyDrawType, &dummyDrawBytes)); + + offset += dummyDrawBytes; + + logical += dummyDrawBytes; + address = function->address + dummyDrawBytes; + + gcmkONERROR(gckHARDWARE_End(Hardware, logical, address, &endBytes)); + + offset += endBytes; + + function->endAddress = function->address + dummyDrawBytes; + function->endLogical = function->logical + dummyDrawBytes; + + function->bytes = dummyDrawBytes + endBytes; + } + gcmkASSERT(offset < Hardware->auxFuncBytes); + + gcmkONERROR(gckOS_CacheClean( + Hardware->os, + 0, + Hardware->auxPhysHandle, + 0, + Hardware->auxFuncLogical, + Hardware->auxFuncBytes + )); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckHARDWARE_DestroyFunctions( + gckHARDWARE Hardware + ) +{ + gcmkHEADER_ARG("%x", Hardware); + + if (Hardware->auxFuncPhysical) + { +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Hardware->kernel->virtualCommandBuffer) + { + gcmkVERIFY_OK(gckKERNEL_FreeVirtualMemory( + Hardware->auxFuncPhysical, + Hardware->auxFuncLogical, + gcvFALSE + )); + } + else +#endif + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + Hardware->os, + Hardware->auxFuncBytes, + Hardware->auxFuncPhysical, + Hardware->auxFuncLogical + )); + } + } + + if (Hardware->mmuFuncPhysical) + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + Hardware->os, + Hardware->mmuFuncBytes, + Hardware->mmuFuncPhysical, + Hardware->mmuFuncLogical + )); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_ExecuteFunctions( + IN gckHARDWARE Hardware, + IN gceHARDWARE_FUNCTION Function + ) +{ + gceSTATUS status; + gctUINT32 idle; + gctUINT32 timer = 0, delay = 1; + gcsHARDWARE_FUNCTION * function = &Hardware->functions[Function]; + gctUINT32 address; + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Hardware->kernel->virtualCommandBuffer) + { + address = function->address; + } + else +#endif + { + address = function->address - Hardware->baseAddress; + } + + /* Execute prepared command sequence. */ + gcmkONERROR(gckHARDWARE_Execute( + Hardware, + address, + function->bytes + )); + +#if gcdLINK_QUEUE_SIZE + { + gcuQUEUEDATA data; + + gcmkVERIFY_OK(gckOS_GetProcessID(&data.linkData.pid)); + + data.linkData.start = address; + data.linkData.end = address + function->bytes; + data.linkData.linkLow = 0; + data.linkData.linkHigh = 0; + + gckQUEUE_Enqueue(&Hardware->linkQueue, &data); + } +#endif + + gcmkDUMPCOMMAND( + Hardware->os, + function->logical, + function->bytes, + gcvDUMP_BUFFER_KERNEL, + gcvTRUE + ); + +#if gcdDUMP_COMMAND + gcmkPRINT("@[kernel.execute]"); +#endif + + /* Wait until GPU idle. */ + do + { + gckOS_Delay(Hardware->os, delay); + + gcmkONERROR(gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x00004, + &idle)); + + timer += delay; + delay *= 2; + +#if gcdGPU_TIMEOUT + if (timer >= Hardware->kernel->timeOut) + { + gckHARDWARE_DumpGPUState(Hardware); + gckCOMMAND_DumpExecutingBuffer(Hardware->kernel->command); + + /* Even if hardware is not reset correctly, let software + ** continue to avoid software stuck. Software will timeout again + ** and try to recover GPU in next timeout. + */ + gcmkONERROR(gcvSTATUS_DEVICE); + } +#endif + } + while (!_IsGPUIdle(idle)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckHARDWARE_AddressInHardwareFuncions( + IN gckHARDWARE Hardware, + IN gctUINT32 Address, + OUT gctPOINTER *Pointer + ) +{ + if (Address >= Hardware->auxFuncAddress && Address <= Hardware->auxFuncAddress - 1 + Hardware->auxFuncBytes) + { + *Pointer = (gctUINT8_PTR)Hardware->auxFuncLogical + + (Address - Hardware->auxFuncAddress) + ; + + return gcvSTATUS_OK; + } + + return gcvSTATUS_NOT_FOUND; +} + +gceSTATUS +gckHARDWARE_QueryStateTimer( + IN gckHARDWARE Hardware, + OUT gctUINT64_PTR Start, + OUT gctUINT64_PTR End, + OUT gctUINT64_PTR On, + OUT gctUINT64_PTR Off, + OUT gctUINT64_PTR Idle, + OUT gctUINT64_PTR Suspend + ) +{ + gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE); + + gckSTATETIMER_Query( + &Hardware->powerStateTimer, Hardware->chipPowerState, Start, End, On, Off, Idle, Suspend); + + gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex); + + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_WaitFence( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT64 FenceData, + IN gctUINT32 FenceAddress, + OUT gctUINT32 *Bytes + ) +{ + gctUINT32_PTR logical = (gctUINT32_PTR)Logical; + + gctUINT32 dataLow = (gctUINT32)FenceData; + gctUINT32 dataHigh = (gctUINT32)(FenceData >> 32); + + if (logical) + { + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x01FD) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = dataHigh; + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x01FA) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *logical++ + = dataLow; + + *logical++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (Hardware->waitCount) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:16) - (0 ? + 17:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:16) - (0 ? + 17:16) + 1))))))) << (0 ? + 17:16))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? + 17:16) - (0 ? + 17:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:16) - (0 ? 17:16) + 1))))))) << (0 ? 17:16))); + + *logical++ + = FenceAddress; + } + else + { + *Bytes = 6 * gcmSIZEOF(gctUINT32); + } + + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_UpdateContextID( + IN gckHARDWARE Hardware + ) +{ + static gcsiDEBUG_REGISTERS fe = { "FE", 0x470, 0, 0x450, 256, 0x1, 0x00 }; + gckOS os = Hardware->os; + gceCORE core = Hardware->core; + gctUINT32 contextIDLow, contextIDHigh; + gceSTATUS status; + + gcmkONERROR(gckOS_WriteRegisterEx(os, core, fe.index, 0x53 << fe.shift)); + gcmkONERROR(gckOS_ReadRegisterEx(os, core, fe.data, &contextIDLow)); + + gcmkONERROR(gckOS_WriteRegisterEx(os, core, fe.index, 0x54 << fe.shift)); + gcmkONERROR(gckOS_ReadRegisterEx(os, core, fe.data, &contextIDHigh)); + + Hardware->contextID = ((gctUINT64)contextIDHigh << 32) + contextIDLow; + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckFE_Initialize( + IN gckHARDWARE Hardware, + OUT gckFE FE + ) +{ + gceSTATUS status; + gctUINT32 data; + + gcmkHEADER(); + + gckOS_ZeroMemory(FE, gcmSIZEOF(gcsFE)); + + gcmkVERIFY_OK(gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x007E4, + &data + )); + + gcmkONERROR(gckOS_AtomConstruct(Hardware->os, &FE->freeDscriptors)); + + data = (((((gctUINT32) (data)) >> (0 ? 6:0)) & ((gctUINT32) ((((1 ? 6:0) - (0 ? 6:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 6:0) - (0 ? 6:0) + 1)))))) ); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, _GC_OBJ_ZONE, "free descriptor=%d", data); + + gcmkONERROR(gckOS_AtomSet(Hardware->os, FE->freeDscriptors, data)); + + /* Enable interrupts. */ + gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x000D8, ~0U); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + + if (FE->freeDscriptors) + { + gckOS_AtomDestroy(Hardware->os, FE->freeDscriptors); + } + + gcmkFOOTER(); + return status; +} + +void +gckFE_UpdateAvaiable( + IN gckHARDWARE Hardware, + OUT gckFE FE + ) +{ + gceSTATUS status; + gctUINT32 data; + gctINT32 oldValue; + + status = gckOS_ReadRegisterEx( + Hardware->os, + Hardware->core, + 0x007E4, + &data + ); + + if (gcmIS_SUCCESS(status)) + { + data = (((((gctUINT32) (data)) >> (0 ? 6:0)) & ((gctUINT32) ((((1 ? 6:0) - (0 ? 6:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 6:0) - (0 ? 6:0) + 1)))))) ); + + while (data--) + { + gckOS_AtomIncrement(Hardware->os, FE->freeDscriptors, &oldValue); + } + } +} + +gceSTATUS +gckFE_ReserveSlot( + IN gckHARDWARE Hardware, + IN gckFE FE, + OUT gctBOOL * Available + ) +{ + gctINT32 oldValue; + + gckOS_AtomDecrement(Hardware->os, FE->freeDscriptors, &oldValue); + + if (oldValue > 0) + { + /* Get one slot. */ + *Available = gcvTRUE; + } + else + { + /* No available slot, restore decreased one.*/ + gckOS_AtomIncrement(Hardware->os, FE->freeDscriptors, &oldValue); + *Available = gcvFALSE; + } + + return gcvSTATUS_OK; +} + +void +gckFE_Execute( + IN gckHARDWARE Hardware, + IN gckFE FE, + IN gcsFEDescriptor * Desc + ) +{ + gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x007DC, + Desc->start + ); + + gckOS_MemoryBarrier( + Hardware->os, + gcvNULL + ); + + gckOS_WriteRegisterEx( + Hardware->os, + Hardware->core, + 0x007E0, + Desc->end + ); +} + +gceSTATUS +gckHARDWARE_DummyDraw( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN gceDUMMY_DRAW_TYPE DummyDrawType, + IN OUT gctUINT32 * Bytes + ) +{ + gctUINT32 dummyDraw_gc400[] = { + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0193) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0x000000, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0194) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0180) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:0) - (0 ? + 3:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:0) - (0 ? + 3:0) + 1))))))) << (0 ? + 3:0))) | (((gctUINT32) (0x8 & ((gctUINT32) ((((1 ? + 3:0) - (0 ? + 3:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:0) - (0 ? 3:0) + 1))))))) << (0 ? 3:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 13:12) - (0 ? + 13:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 13:12) - (0 ? + 13:12) + 1))))))) << (0 ? + 13:12))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? + 13:12) - (0 ? + 13:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1))))))) << (0 ? + 31:24))) | (((gctUINT32) ((gctUINT32) (4 * gcmSIZEOF(float)) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:7) - (0 ? + 7:7) + 1))))))) << (0 ? + 7:7))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 7:7) - (0 ? + 7:7) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E05) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:0) - (0 ? + 1:0) + 1))))))) << (0 ? + 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0202) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:0) - (0 ? + 5:0) + 1))))))) << (0 ? + 5:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:0) - (0 ? 5:0) + 1))))))) << (0 ? 5:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0208) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:0) - (0 ? + 5:0) + 1))))))) << (0 ? + 5:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:0) - (0 ? 5:0) + 1))))))) << (0 ? 5:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0201) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:0) - (0 ? + 5:0) + 1))))))) << (0 ? + 5:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:0) - (0 ? 5:0) + 1))))))) << (0 ? 5:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0204) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:0) - (0 ? + 5:0) + 1))))))) << (0 ? + 5:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 5:0) - (0 ? + 5:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:0) - (0 ? 5:0) + 1))))))) << (0 ? 5:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x1000) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0x0, 0x0, 0x0, 0x0, + 0xDEADDEAD, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0203) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:0) - (0 ? + 6:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:0) - (0 ? + 6:0) + 1))))))) << (0 ? + 6:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 6:0) - (0 ? + 6:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:0) - (0 ? 6:0) + 1))))))) << (0 ? 6:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x020E) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0200) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 1, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x020C) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0x000F003F, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x028C) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 11:8) - (0 ? + 11:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 11:8) - (0 ? + 11:8) + 1))))))) << (0 ? + 11:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 11:8) - (0 ? + 11:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 11:8) - (0 ? 11:8) + 1))))))) << (0 ? 11:8))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0500) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:0) - (0 ? + 1:0) + 1))))))) << (0 ? + 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x028D) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 13:12) - (0 ? + 13:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 13:12) - (0 ? + 13:12) + 1))))))) << (0 ? + 13:12))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? + 13:12) - (0 ? + 13:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:8) - (0 ? + 9:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:8) - (0 ? + 9:8) + 1))))))) << (0 ? + 9:8))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 9:8) - (0 ? + 9:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:8) - (0 ? 9:8) + 1))))))) << (0 ? 9:8))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:16) - (0 ? + 17:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:16) - (0 ? + 17:16) + 1))))))) << (0 ? + 17:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 17:16) - (0 ? + 17:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:16) - (0 ? 17:16) + 1))))))) << (0 ? 17:16))), + + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0300) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0301) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0302) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0303) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0289) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:0) - (0 ? + 3:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:0) - (0 ? + 3:0) + 1))))))) << (0 ? + 3:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 3:0) - (0 ? + 3:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:0) - (0 ? 3:0) + 1))))))) << (0 ? 3:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x05 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:0) - (0 ? + 3:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:0) - (0 ? + 3:0) + 1))))))) << (0 ? + 3:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 3:0) - (0 ? + 3:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:0) - (0 ? 3:0) + 1))))))) << (0 ? 3:0))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:0) - (0 ? + 23:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:0) - (0 ? + 23:0) + 1))))))) << (0 ? + 23:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 23:0) - (0 ? + 23:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:0) - (0 ? 23:0) + 1))))))) << (0 ? 23:0))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:0) - (0 ? + 23:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:0) - (0 ? + 23:0) + 1))))))) << (0 ? + 23:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 23:0) - (0 ? + 23:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 23:0) - (0 ? 23:0) + 1))))))) << (0 ? 23:0))), + }; + + gctUINT32 dummyDraw_v60[] = { + + /* Semaphore from FE to PE. */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))), + + /* Stall from FE to PE. */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (0x0) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E06) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:0) - (0 ? + 1:0) + 1))))))) << (0 ? + 1:0))) | (((gctUINT32) ((gctUINT32) (0x0) & ((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 14:12) - (0 ? + 14:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 14:12) - (0 ? + 14:12) + 1))))))) << (0 ? + 14:12))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 14:12) - (0 ? + 14:12) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 14:12) - (0 ? 14:12) + 1))))))) << (0 ? 14:12))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 17:16) - (0 ? + 17:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 17:16) - (0 ? + 17:16) + 1))))))) << (0 ? + 17:16))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 17:16) - (0 ? + 17:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 17:16) - (0 ? 17:16) + 1))))))) << (0 ? 17:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:4) - (0 ? + 7:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:4) - (0 ? + 7:4) + 1))))))) << (0 ? + 7:4))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 7:4) - (0 ? + 7:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 7:4) - (0 ? 7:4) + 1))))))) << (0 ? 7:4))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0401) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0x0, + 0x2, + 0x0, + 0x0, + 0x0, + 0x0, + (gctUINT32)~0x0, + + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x020C) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0xffffffff, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E07) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 2, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E08) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 2, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0420) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:0) - (0 ? + 2:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:0) - (0 ? + 2:0) + 1))))))) << (0 ? + 2:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? + 2:0) - (0 ? + 2:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:0) - (0 ? 2:0) + 1))))))) << (0 ? 2:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0424) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 1, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0403) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 3, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E21) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:0) - (0 ? + 2:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:0) - (0 ? + 2:0) + 1))))))) << (0 ? + 2:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? + 2:0) - (0 ? + 2:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:0) - (0 ? 2:0) + 1))))))) << (0 ? 2:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x040A) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x2000) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1 << 2) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0x07801033,0x3fc00900,0x00000040,0x00390008, + (gctUINT32)~0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x021F) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 0x0, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0240) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:0) - (0 ? + 1:0) + 1))))))) << (0 ? + 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 1:0) - (0 ? + 1:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 6:4) - (0 ? + 6:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 6:4) - (0 ? + 6:4) + 1))))))) << (0 ? + 6:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 6:4) - (0 ? + 6:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 6:4) - (0 ? 6:4) + 1))))))) << (0 ? 6:4))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:8) - (0 ? + 8:8) + 1))))))) << (0 ? + 8:8))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 26:24) - (0 ? + 26:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 26:24) - (0 ? + 26:24) + 1))))))) << (0 ? + 26:24))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? + 26:24) - (0 ? + 26:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 26:24) - (0 ? 26:24) + 1))))))) << (0 ? 26:24))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0241) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (31) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:16) - (0 ? + 31:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:16) - (0 ? + 31:16) + 1))))))) << (0 ? + 31:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 31:16) - (0 ? + 31:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0244) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 9:0) - (0 ? + 9:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 9:0) - (0 ? + 9:0) + 1))))))) << (0 ? + 9:0))) | (((gctUINT32) ((gctUINT32) (31) & ((gctUINT32) ((((1 ? + 9:0) - (0 ? + 9:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 9:0) - (0 ? 9:0) + 1))))))) << (0 ? 9:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:16) - (0 ? + 31:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:16) - (0 ? + 31:16) + 1))))))) << (0 ? + 31:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 31:16) - (0 ? + 31:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0247) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + + (32+(4*(((gcsFEATURE_DATABASE *)Hardware->featureDatabase)->NumShaderCores)-1))/(4*(((gcsFEATURE_DATABASE *)Hardware->featureDatabase)->NumShaderCores)), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0248) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + 1, + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 5:5) - (0 ? + 5:5) + 1))))))) << (0 ? + 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 5:5) - (0 ? + 5:5) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))), + + /* Semaphore from FE to PE. */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))), + + /* Stall from FE to PE. */ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))), + + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:0) - (0 ? + 4:0) + 1))))))) << (0 ? + 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 4:0) - (0 ? + 4:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 12:8) - (0 ? + 12:8) + 1))))))) << (0 ? + 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? + 12:8) - (0 ? + 12:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))), + + /* Invalidate I cache.*/ + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x022C) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))), + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 1:1) - (0 ? + 1:1) + 1))))))) << (0 ? + 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 1:1) - (0 ? + 1:1) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 2:2) - (0 ? + 2:2) + 1))))))) << (0 ? + 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 2:2) - (0 ? + 2:2) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 3:3) - (0 ? + 3:3) + 1))))))) << (0 ? + 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 3:3) - (0 ? + 3:3) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 4:4) - (0 ? + 4:4) + 1))))))) << (0 ? + 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 4:4) - (0 ? + 4:4) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))), + }; + + gctUINT32 bytes = 0; + gctUINT32_PTR dummyDraw = gcvNULL; + + + switch(DummyDrawType) + { + case gcvDUMMY_DRAW_GC400: + dummyDraw = dummyDraw_gc400; + bytes = gcmSIZEOF(dummyDraw_gc400); + *(dummyDraw + 1) = Address; + break; + case gcvDUMMY_DRAW_V60: + dummyDraw = dummyDraw_v60; + bytes = gcmSIZEOF(dummyDraw_v60); + break; + default: + /* other chip no need dummy draw.*/ + gcmkASSERT(0); + break; + }; + + if (Logical != gcvNULL) + { + gckOS_MemCopy(Logical, dummyDraw, bytes); + } + + *Bytes = bytes; + + return gcvSTATUS_OK; +} + +gceSTATUS +gckHARDWARE_EnterQueryClock( + IN gckHARDWARE Hardware, + OUT gctUINT64 *McStart, + OUT gctUINT64 *ShStart + ) +{ + gceSTATUS status; + gctUINT64 mcStart, shStart; + + gcmkONERROR(gckOS_GetTime(&mcStart)); + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0)); + + *McStart = mcStart; + + if (Hardware->core <= gcvCORE_3D_MAX) + { + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, 0xFFU << 24)); + + gcmkONERROR(gckOS_GetTime(&shStart)); + + gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, 0x4U << 24)); + + *ShStart = shStart; + } + +OnError: + return status; +} + +gceSTATUS +gckHARDWARE_ExitQueryClock( + IN gckHARDWARE Hardware, + IN gctUINT64 McStart, + IN gctUINT64 ShStart, + OUT gctUINT32 *McClk, + OUT gctUINT32 *ShClk + ) +{ + gceSTATUS status; + gctUINT64 mcEnd, shEnd; + gctUINT32 mcCycle, shCycle; + gctUINT64 mcFreq, shFreq = 0; + + gcmkONERROR(gckOS_GetTime(&mcEnd)); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00438, &mcCycle)); + + if (mcCycle == 0) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + /* cycle = (gctUINT64)cycle * 1000000 / (end - start); */ + mcFreq = ((gctUINT64)mcCycle * ((1000000U << 12) / (gctUINT32)(mcEnd - McStart))) >> 12; + + *McClk = (gctUINT32)mcFreq; + + if (Hardware->core <= gcvCORE_3D_MAX) + { + gcmkONERROR(gckOS_GetTime(&shEnd)); + gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &shCycle)); + + if (!shCycle) + { + /*TODO: [VIV] Query SH cycle not support for old chips */ + *ShClk = *McClk; + return gcvSTATUS_OK; + } + + if (!ShStart) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + shFreq = ((gctUINT64)shCycle * ((1000000U << 12) / (gctUINT32)(shEnd - ShStart))) >> 12; + } + + *ShClk = (gctUINT32)shFreq; + +OnError: + return status; +} + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h new file mode 100644 index 000000000000..b5b1cdd00081 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h @@ -0,0 +1,368 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_hardware_h_ +#define __gc_hal_kernel_hardware_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + gcvHARDWARE_FUNCTION_MMU, + gcvHARDWARE_FUNCTION_FLUSH, + + /* BLT engine command sequence. */ + gcvHARDWARE_FUNCTION_BLT_EVENT, + gcvHARDWARE_FUNCTION_DUMMY_DRAW, + gcvHARDWARE_FUNCTION_NUM, +} +gceHARDWARE_FUNCTION; + + +typedef struct _gcsHARWARE_FUNCTION +{ + /* Entry of the function. */ + gctUINT32 address; + + /* CPU address of the function. */ + gctUINT8_PTR logical; + + /* Bytes of the function. */ + gctUINT32 bytes; + + /* Hardware address of END in this function. */ + gctUINT32 endAddress; + + /* Logical of END in this function. */ + gctUINT8_PTR endLogical; +} +gcsHARDWARE_FUNCTION; + +typedef struct _gcsSTATETIMER +{ + gctUINT64 start; + gctUINT64 recent; + + /* Elapse of each power state. */ + gctUINT64 elapse[4]; +} +gcsSTATETIMER; + +typedef struct _gcsHARDWARE_SIGNATURE +{ + /* Chip model. */ + gceCHIPMODEL chipModel; + + /* Revision value.*/ + gctUINT32 chipRevision; + + /* Supported feature fields. */ + gctUINT32 chipFeatures; + + /* Supported minor feature fields. */ + gctUINT32 chipMinorFeatures; + + /* Supported minor feature 1 fields. */ + gctUINT32 chipMinorFeatures1; + + /* Supported minor feature 2 fields. */ + gctUINT32 chipMinorFeatures2; +} +gcsHARDWARE_SIGNATURE; + +typedef struct _gcsMMU_TABLE_ARRAY_ENTRY +{ + gctUINT32 low; + gctUINT32 high; +} +gcsMMU_TABLE_ARRAY_ENTRY; + +typedef struct _gcsHARDWARE_PAGETABLE_ARRAY +{ + /* Number of entries in page table array. */ + gctUINT num; + + /* Size in bytes of array. */ + gctSIZE_T size; + + /* Physical address of array. */ + gctPHYS_ADDR_T address; + + /* Memory descriptor. */ + gctPHYS_ADDR physical; + + /* Logical address of array. */ + gctPOINTER logical; +} +gcsHARDWARE_PAGETABLE_ARRAY; + +/* gckHARDWARE object. */ +struct _gckHARDWARE +{ + /* Object. */ + gcsOBJECT object; + + /* Pointer to gctKERNEL object. */ + gckKERNEL kernel; + + /* Pointer to gctOS object. */ + gckOS os; + + /* Core */ + gceCORE core; + + /* Chip characteristics. */ + gcsHAL_QUERY_CHIP_IDENTITY identity; + gcsHAL_QUERY_CHIP_OPTIONS options; + gctUINT32 powerBaseAddress; + gctBOOL extraEventStates; + + /* Big endian */ + gctBOOL bigEndian; + + /* Base address. */ + gctUINT32 baseAddress; + + /* Chip status */ + gctPOINTER powerMutex; + gctUINT32 powerProcess; + gctUINT32 powerThread; + gceCHIPPOWERSTATE chipPowerState; + gctUINT32 lastWaitLink; + gctUINT32 lastEnd; + gctBOOL clockState; + gctBOOL powerState; + gctPOINTER globalSemaphore; + + gctUINT32 mmuVersion; + + /* Type */ + gceHARDWARE_TYPE type; + +#if gcdPOWEROFF_TIMEOUT + gctUINT32 powerOffTime; + gctUINT32 powerOffTimeout; + gctPOINTER powerOffTimer; +#endif + +#if gcdENABLE_FSCALE_VAL_ADJUST + gctUINT32 powerOnFscaleVal; +#endif + gctPOINTER pageTableDirty[gcvENGINE_GPU_ENGINE_COUNT]; + +#if gcdLINK_QUEUE_SIZE + struct _gckQUEUE linkQueue; +#endif + gctBOOL stallFEPrefetch; + + gctUINT32 minFscaleValue; + gctUINT waitCount; + + gctUINT32 mcClk; + gctUINT32 shClk; + + gctPOINTER pendingEvent; + + /* Function used by gckHARDWARE. */ + gctPHYS_ADDR mmuFuncPhysical; + gctPOINTER mmuFuncLogical; + gctSIZE_T mmuFuncBytes; + + gctPHYS_ADDR auxFuncPhysical; + gctPHYS_ADDR auxPhysHandle; + gctPOINTER auxFuncLogical; + gctUINT32 auxFuncAddress; + gctSIZE_T auxFuncBytes; + + gcsHARDWARE_FUNCTION functions[gcvHARDWARE_FUNCTION_NUM]; + + gcsSTATETIMER powerStateTimer; + gctUINT32 executeCount; + gctUINT32 lastExecuteAddress; + + /* Head for hardware list in gckMMU. */ + gcsLISTHEAD mmuHead; + + gctPOINTER featureDatabase; + gctBOOL hasAsyncFe; + gctBOOL hasL2Cache; + + gcsHARDWARE_SIGNATURE signature; + + gctUINT32 maxOutstandingReads; + + gcsHARDWARE_PAGETABLE_ARRAY pagetableArray; + + gctUINT64 contextID; +}; + +typedef struct _gcsFEDescriptor +{ + gctUINT32 start; + gctUINT32 end; +} +gcsFEDescriptor; + +typedef struct _gcsFE * gckFE; +typedef struct _gcsFE +{ + gckOS os; + + /* Number of free descriptors. */ + gctPOINTER freeDscriptors; +} +gcsFE; + +gceSTATUS +gckFE_Initialize( + IN gckHARDWARE Hardware, + OUT gckFE FE + ); + +gceSTATUS +gckFE_ReserveSlot( + IN gckHARDWARE Hardware, + IN gckFE FE, + OUT gctBOOL * Available + ); + +void +gckFE_UpdateAvaiable( + IN gckHARDWARE Hardware, + OUT gckFE FE + ); + +void +gckFE_Execute( + IN gckHARDWARE Hardware, + IN gckFE FE, + IN gcsFEDescriptor * Desc + ); + +gceSTATUS +gckHARDWARE_GetBaseAddress( + IN gckHARDWARE Hardware, + OUT gctUINT32_PTR BaseAddress + ); + +gceSTATUS +gckHARDWARE_NeedBaseAddress( + IN gckHARDWARE Hardware, + IN gctUINT32 State, + OUT gctBOOL_PTR NeedBase + ); + +gceSTATUS +gckHARDWARE_GetFrameInfo( + IN gckHARDWARE Hardware, + OUT gcsHAL_FRAME_INFO * FrameInfo + ); + +gceSTATUS +gckHARDWARE_DumpGpuProfile( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_HandleFault( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_ExecuteFunctions( + IN gckHARDWARE Hardware, + IN gceHARDWARE_FUNCTION Function + ); + +gceSTATUS +gckHARDWARE_DummyDraw( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN gceDUMMY_DRAW_TYPE DummyDrawType, + IN OUT gctUINT32 * Bytes + ); + +gceSTATUS +gckHARDWARE_EnterQueryClock( + IN gckHARDWARE Hardware, + OUT gctUINT64 *McStart, + OUT gctUINT64 *ShStart + ); + +gceSTATUS +gckHARDWARE_ExitQueryClock( + IN gckHARDWARE Hardware, + IN gctUINT64 McStart, + IN gctUINT64 ShStart, + OUT gctUINT32 *McClk, + OUT gctUINT32 *ShClk + ); + +#define gcmkWRITE_MEMORY(logical, data) \ + do { \ + gcmkVERIFY_OK(gckOS_WriteMemory(os, logical, data)); \ + logical++; \ + }\ + while (0) ; \ + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_kernel_hardware_h_ */ + diff --git a/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c new file mode 100644 index 000000000000..b6c2ce05cb15 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c @@ -0,0 +1,728 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal.h" +#include "gc_hal_kernel.h" +#include "gc_hal_kernel_context.h" + +/* + * ----------------------- + * HARDWARE STATE RECORDER + * ----------------------- + * + * State mirror buffer is used to 'mirror' hardware states since hardware + * states can't be dumpped. It is a context buffer which stores 'global' + * context. + * + * For each commit, state recorder + * 1) Records context buffer (if there is) and command buffers in this commit. + * 2) Parse those buffers to estimate the state changed. + * 3) Stores result to a mirror buffer. + * + * == Commit 0 ==================================================================== + * + * Context Buffer 0 + * + * Command Buffer 0 + * + * Mirror Buffer 0 <- Context Buffer 0 + Command Buffer 0 + * + * == Commit 1 ==================================================================== + * + * Command Buffer 1 + * + * Mirror Buffer 1 <- Command buffer 1 + Mirror Buffer 0 + * + * == Commit 2 ==================================================================== + * + * Context Buffer 2 (optional) + * + * Command Buffer 2 + * + * Mirror Buffer 2 <- Command buffer 2 + Context Buffer 2 + Mirror Buffer 1 + * + * == Commit N ==================================================================== + * + * For Commit N, these buffers are needed to reproduce hardware's behavior in + * this commit. + * + * Mirror Buffer [N - 1] : State Mirror accumlated by past commits, + * which is used to restore hardware state. + * Context Buffer [N] : + * Command Buffer [N] : Command buffer executed by hardware in this commit. + * + * If sequence of states programming matters, hardware's behavior can't be reproduced, + * but the state values stored in mirror buffer are assuring. + */ + +/* Queue size. */ +#define gcdNUM_RECORDS 6 + +typedef struct _gcsPARSER_HANDLER * gckPARSER_HANDLER; + +typedef void +(*HandlerFunction)( + IN gckPARSER_HANDLER Handler, + IN gctUINT32 Addr, + IN gctUINT32 Data + ); + +typedef struct _gcsPARSER_HANDLER +{ + gctUINT32 type; + gctUINT32 cmd; + gctPOINTER private; + HandlerFunction function; +} +gcsPARSER_HANDLER; + +typedef struct _gcsPARSER * gckPARSER; +typedef struct _gcsPARSER +{ + gctUINT8_PTR currentCmdBufferAddr; + + /* Current command. */ + gctUINT32 lo; + gctUINT32 hi; + + gctUINT8 cmdOpcode; + gctUINT16 cmdAddr; + gctUINT32 cmdSize; + gctUINT32 cmdRectCount; + gctUINT8 skip; + gctUINT32 skipCount; + + gctBOOL allow; + gctBOOL stop; + + /* Callback used by parser to handle a command. */ + gckPARSER_HANDLER commandHandler; +} +gcsPARSER; + +typedef struct _gcsMIRROR +{ + gctUINT32_PTR logical[gcdNUM_RECORDS]; + gctUINT32 bytes; + gcsSTATE_MAP_PTR map; + gctSIZE_T maxState; +} +gcsMIRROR; + +typedef struct _gcsDELTA +{ + gctUINT64 commitStamp; + gctUINT32_PTR command; + gctUINT32 commandBytes; + gctUINT32_PTR context; + gctUINT32 contextBytes; +} +gcsDELTA; + +typedef struct _gcsRECORDER +{ + gckOS os; + gcsMIRROR mirror; + gcsDELTA deltas[gcdNUM_RECORDS]; + + /* Index of current record. */ + gctUINT index; + + /* Number of records. */ + gctUINT num; + + /* Plugin used by gckPARSER. */ + gcsPARSER_HANDLER recorderHandler; + gckPARSER parser; +} +gcsRECORDER; + + +/******************************************************************************\ +***************************** Command Buffer Parser **************************** +\******************************************************************************/ + +/* +** Command buffer parser checks command buffer in FE's view to make sure there +** is no format error. +** +** Parser provide a callback mechnisam, so plug-in can be added to implement +** other functions. +*/ + +static void +_HandleLoadState( + IN OUT gckPARSER Parser + ) +{ + gctUINT i; + gctUINT32_PTR data = (gctUINT32_PTR)Parser->currentCmdBufferAddr; + gctUINT32 cmdAddr = Parser->cmdAddr; + + if (Parser->commandHandler == gcvNULL + || Parser->commandHandler->cmd != 0x01 + ) + { + /* No handler for this command. */ + return; + } + + for (i = 0; i < Parser->cmdSize; i++) + { + Parser->commandHandler->function(Parser->commandHandler, cmdAddr, *data); + + /* Advance to next state. */ + cmdAddr++; + data++; + } +} + +static void +_GetCommand( + IN OUT gckPARSER Parser + ) +{ + gctUINT32 * buffer = (gctUINT32 *)Parser->currentCmdBufferAddr; + + gctUINT16 cmdRectCount; + gctUINT16 cmdDataCount; + + Parser->hi = buffer[0]; + Parser->lo = buffer[1]; + + Parser->cmdOpcode = (((((gctUINT32) (Parser->hi)) >> (0 ? 31:27)) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) ); + Parser->cmdRectCount = 1; + + switch (Parser->cmdOpcode) + { + case 0x01: + /* Extract count. */ + Parser->cmdSize = (((((gctUINT32) (Parser->hi)) >> (0 ? 25:16)) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1)))))) ); + if (Parser->cmdSize == 0) + { + /* 0 means 1024. */ + Parser->cmdSize = 1024; + } + Parser->skip = (Parser->cmdSize & 0x1) ? 0 : 1; + + /* Extract address. */ + Parser->cmdAddr = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) ); + + Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 4; + Parser->skipCount = Parser->cmdSize + Parser->skip; + break; + + case 0x05: + Parser->cmdSize = 4; + Parser->skipCount = gcmALIGN(Parser->cmdSize, 2); + break; + + case 0x06: + Parser->cmdSize = 5; + Parser->skipCount = gcmALIGN(Parser->cmdSize, 2); + break; + + case 0x0C: + Parser->cmdSize = 3; + Parser->skipCount = gcmALIGN(Parser->cmdSize, 2); + break; + + case 0x09: + Parser->cmdSize = 2; + Parser->cmdAddr = 0x0F16; + Parser->skipCount = gcmALIGN(Parser->cmdSize, 2); + break; + + case 0x04: + Parser->cmdSize = 1; + Parser->cmdAddr = 0x0F06; + + cmdRectCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) ); + cmdDataCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 26:16)) & ((gctUINT32) ((((1 ? 26:16) - (0 ? 26:16) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 26:16) - (0 ? 26:16) + 1)))))) ); + + Parser->skipCount = gcmALIGN(Parser->cmdSize, 2) + + cmdRectCount * 2 + + gcmALIGN(cmdDataCount, 2); + + Parser->cmdRectCount = cmdRectCount; + break; + + case 0x03: + Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8; + Parser->skipCount = 0; + break; + + case 0x02: + Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8; + Parser->skipCount = 0; + break; + + case 0x07: + Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8; + Parser->skipCount = 0; + break; + + case 0x08: + /* Commands after LINK isn't executed, skip them. */ + Parser->stop = gcvTRUE; + break; + + default: + /* Unknown command is a risk. */ + Parser->allow = gcvFALSE; + break; + } +} + +static void +_ParseCommand( + IN OUT gckPARSER Parser + ) +{ + switch(Parser->cmdOpcode) + { + case 0x01: + _HandleLoadState(Parser); + break; + case 0x05: + case 0x06: + case 0x0C: + break; + case 0x04: + break; + default: + break; + } + + /* Advance to next command. */ + Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + + (Parser->skipCount << 2); +} + +gceSTATUS +gckPARSER_Parse( + IN gckPARSER Parser, + IN gctUINT8_PTR Buffer, + IN gctUINT32 Bytes + ) +{ + gckPARSER parser = Parser; + gctUINT8_PTR end = (gctUINT8_PTR)Buffer + Bytes; + + /* Initialize parser. */ + parser->currentCmdBufferAddr = (gctUINT8_PTR)Buffer; + parser->skip = 0; + parser->allow = gcvTRUE; + parser->stop = gcvFALSE; + + /* Go through command buffer until reaching the end + ** or meeting an error. */ + do + { + _GetCommand(parser); + + _ParseCommand(parser); + } + while ((parser->currentCmdBufferAddr < end) + && (parser->allow == gcvTRUE) + && (parser->stop == gcvFALSE) + ); + + if (parser->allow == gcvFALSE) + { + /* Error detected. */ + return gcvSTATUS_NOT_SUPPORTED; + } + + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckPARSER_RegisterCommandHandler +** +** Register a command handler which will be called when parser get a command. +** +*/ +gceSTATUS +gckPARSER_RegisterCommandHandler( + IN gckPARSER Parser, + IN gckPARSER_HANDLER Handler + ) +{ + Parser->commandHandler = Handler; + + return gcvSTATUS_OK; +} + +gceSTATUS +gckPARSER_Construct( + IN gckOS Os, + IN gckPARSER_HANDLER Handler, + OUT gckPARSER * Parser + ) +{ + gceSTATUS status; + gckPARSER pointer; + + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsPARSER), (gctPOINTER *)&pointer)); + + /* Put it here temp, should have a more general plug-in mechnisam. */ + pointer->commandHandler = Handler; + + *Parser = pointer; + + return gcvSTATUS_OK; + +OnError: + return status; +} + +void +gckPARSER_Destroy( + IN gckOS Os, + IN gckPARSER Parser + ) +{ + gcmkOS_SAFE_FREE(Os, Parser); +} + +/******************************************************************************\ +**************************** Hardware States Recorder ************************** +\******************************************************************************/ + +static void +_RecodeState( + IN gckPARSER_HANDLER Handler, + IN gctUINT32 Addr, + IN gctUINT32 Data + ) +{ + gcmkVERIFY_OK(gckRECORDER_UpdateMirror(Handler->private, Addr, Data)); +} + +static gctUINT +_Previous( + IN gctUINT Index + ) +{ + if (Index == 0) + { + return gcdNUM_RECORDS - 1; + } + + return Index - 1; +} + +static gctUINT +_Next( + IN gctUINT Index + ) +{ + return (Index + 1) % gcdNUM_RECORDS; +} + +gceSTATUS +gckRECORDER_Construct( + IN gckOS Os, + IN gckHARDWARE Hardware, + OUT gckRECORDER * Recorder + ) +{ + gceSTATUS status; + gckCONTEXT context = gcvNULL; + gckRECORDER recorder = gcvNULL; + gctSIZE_T mapSize; + gctUINT i; + gctBOOL virtualCommandBuffer = Hardware->kernel->virtualCommandBuffer; + + /* MMU is not ready now. */ + Hardware->kernel->virtualCommandBuffer = gcvFALSE; + + gcmkONERROR(gckCONTEXT_Construct(Os, Hardware, 0, &context)); + + /* Restore. */ + Hardware->kernel->virtualCommandBuffer = virtualCommandBuffer; + + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsRECORDER), (gctPOINTER *)&recorder)); + + gckOS_ZeroMemory(recorder, gcmSIZEOF(gcsRECORDER)); + + /* Copy state map. */ + recorder->mirror.maxState = context->maxState; + + mapSize = context->maxState * gcmSIZEOF(gcsSTATE_MAP); + + gcmkONERROR(gckOS_Allocate(Os, mapSize, (gctPOINTER *)&recorder->mirror.map)); + + gckOS_MemCopy(recorder->mirror.map, context->map, mapSize); + + /* Copy context buffer. */ + recorder->mirror.bytes = context->totalSize; + + for (i = 0; i < gcdNUM_RECORDS; i++) + { + gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->mirror.logical[i])); + gckOS_MemCopy(recorder->mirror.logical[i], context->buffer->logical, context->totalSize); + } + + for (i = 0; i < gcdNUM_RECORDS; i++) + { + gcmkONERROR(gckOS_Allocate(Os, gcdCMD_BUFFER_SIZE, (gctPOINTER *)&recorder->deltas[i].command)); + gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->deltas[i].context)); + } + + recorder->index = 0; + recorder->num = 0; + + /* Initialize Parser plugin. */ + recorder->recorderHandler.cmd = 0x01; + recorder->recorderHandler.private = recorder; + recorder->recorderHandler.function = _RecodeState; + + gcmkONERROR(gckPARSER_Construct(Os, &recorder->recorderHandler, &recorder->parser)); + + recorder->os = Os; + + *Recorder = recorder; + + gckCONTEXT_Destroy(context); + return gcvSTATUS_OK; + +OnError: + if (context) + { + gckCONTEXT_Destroy(context); + } + + if (recorder) + { + gckRECORDER_Destory(Os, recorder); + } + + return status; +} + +gceSTATUS +gckRECORDER_Destory( + IN gckOS Os, + IN gckRECORDER Recorder + ) +{ + gctUINT i; + + if (Recorder->mirror.map) + { + gcmkOS_SAFE_FREE(Os, Recorder->mirror.map); + } + + for (i = 0; i < gcdNUM_RECORDS; i++) + { + if (Recorder->mirror.logical[i]) + { + gcmkOS_SAFE_FREE(Os, Recorder->mirror.logical[i]); + } + } + + for (i = 0; i < gcdNUM_RECORDS; i++) + { + if (Recorder->deltas[i].command) + { + gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].command); + } + + if (Recorder->deltas[i].context) + { + gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].context); + } + } + + if (Recorder->parser) + { + gckPARSER_Destroy(Os, Recorder->parser); + } + + gcmkOS_SAFE_FREE(Os, Recorder); + + return gcvSTATUS_OK; +} + +gceSTATUS +gckRECORDER_UpdateMirror( + IN gckRECORDER Recorder, + IN gctUINT32 State, + IN gctUINT32 Data + ) +{ + gctUINT32 index; + gcsSTATE_MAP_PTR map = Recorder->mirror.map; + gctUINT32_PTR buffer = Recorder->mirror.logical[Recorder->index]; + + if (State >= Recorder->mirror.maxState) + { + /* Ignore them just like HW does. */ + return gcvSTATUS_OK; + } + + index = map[State].index; + + if (index) + { + buffer[index] = Data; + } + + return gcvSTATUS_OK; +} + +void +gckRECORDER_AdvanceIndex( + IN gckRECORDER Recorder, + IN gctUINT64 CommitStamp + ) +{ + /* Get next record. */ + gctUINT next = (Recorder->index + 1) % gcdNUM_RECORDS; + + /* Record stamp of this commit. */ + Recorder->deltas[Recorder->index].commitStamp = CommitStamp; + + /* Mirror of next record is mirror of this record and delta in next record. */ + gckOS_MemCopy(Recorder->mirror.logical[next], + Recorder->mirror.logical[Recorder->index], Recorder->mirror.bytes); + + /* Advance to next record. */ + Recorder->index = next; + + Recorder->num = gcmMIN(Recorder->num + 1, gcdNUM_RECORDS - 1); + + + /* Reset delta. */ + Recorder->deltas[Recorder->index].commandBytes = 0; + Recorder->deltas[Recorder->index].contextBytes = 0; +} + +void +gckRECORDER_Record( + IN gckRECORDER Recorder, + IN gctUINT8_PTR CommandBuffer, + IN gctUINT32 CommandBytes, + IN gctUINT8_PTR ContextBuffer, + IN gctUINT32 ContextBytes + ) +{ + gcsDELTA * delta = &Recorder->deltas[Recorder->index]; + + if (CommandBytes != 0xFFFFFFFF) + { + gckPARSER_Parse(Recorder->parser, CommandBuffer, CommandBytes); + gckOS_MemCopy(delta->command, CommandBuffer, CommandBytes); + delta->commandBytes = CommandBytes; + } + + if (ContextBytes != 0xFFFFFFFF) + { + gckPARSER_Parse(Recorder->parser, ContextBuffer, ContextBytes); + gckOS_MemCopy(delta->context, ContextBuffer, ContextBytes); + delta->contextBytes = ContextBytes; + } +} + +void +gckRECORDER_Dump( + IN gckRECORDER Recorder + ) +{ + gctUINT last = Recorder->index; + gctUINT previous; + gctUINT i; + gcsMIRROR *mirror = &Recorder->mirror; + gcsDELTA *delta; + gckOS os = Recorder->os; + + for (i = 0; i < Recorder->num; i++) + { + last = _Previous(last); + } + + for (i = 0; i < Recorder->num; i++) + { + delta = &Recorder->deltas[last]; + + /* Dump record */ + gcmkPRINT("#[commit %llu]", delta->commitStamp); + + if (delta->commitStamp) + { + previous = _Previous(last); + + gcmkPRINT("#[mirror]"); + gckOS_DumpBuffer(os, mirror->logical[previous], mirror->bytes, gcvDUMP_BUFFER_CONTEXT, gcvTRUE); + gcmkPRINT("@[kernel.execute]"); + } + + if (delta->contextBytes) + { + gckOS_DumpBuffer(os, delta->context, delta->contextBytes, gcvDUMP_BUFFER_CONTEXT, gcvTRUE); + gcmkPRINT("@[kernel.execute]"); + } + + gckOS_DumpBuffer(os, delta->command, delta->commandBytes, gcvDUMP_BUFFER_USER, gcvTRUE); + gcmkPRINT("@[kernel.execute]"); + + last = _Next(last); + } +} + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.c new file mode 100644 index 000000000000..ef0c652ae718 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.c @@ -0,0 +1,6504 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" + +#if gcdDEC_ENABLE_AHB +#include "viv_dec300_main.h" +#endif + +#define _GC_OBJ_ZONE gcvZONE_KERNEL + +/******************************************************************************* +***** Version Signature *******************************************************/ + +#define _gcmTXT2STR(t) #t +#define gcmTXT2STR(t) _gcmTXT2STR(t) +const char * _VERSION = "\n\0$VERSION$" + gcmTXT2STR(gcvVERSION_MAJOR) "." + gcmTXT2STR(gcvVERSION_MINOR) "." + gcmTXT2STR(gcvVERSION_PATCH) ":" + gcmTXT2STR(gcvVERSION_BUILD) "$\n"; + +/******************************************************************************\ +******************************* gckKERNEL API Code ****************************** +\******************************************************************************/ + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +#define gcmDEFINE2TEXT(d) #d +gctCONST_STRING _DispatchText[] = +{ + gcmDEFINE2TEXT(gcvHAL_QUERY_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_IDENTITY), + gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_FREQUENCY), + gcmDEFINE2TEXT(gcvHAL_ALLOCATE_NON_PAGED_MEMORY), + gcmDEFINE2TEXT(gcvHAL_FREE_NON_PAGED_MEMORY), + gcmDEFINE2TEXT(gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY), + gcmDEFINE2TEXT(gcvHAL_FREE_CONTIGUOUS_MEMORY), + gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_RELEASE_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_MAP_MEMORY), + gcmDEFINE2TEXT(gcvHAL_UNMAP_MEMORY), + gcmDEFINE2TEXT(gcvHAL_MAP_USER_MEMORY), + gcmDEFINE2TEXT(gcvHAL_UNMAP_USER_MEMORY), + gcmDEFINE2TEXT(gcvHAL_LOCK_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_UNLOCK_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_EVENT_COMMIT), + gcmDEFINE2TEXT(gcvHAL_USER_SIGNAL), + gcmDEFINE2TEXT(gcvHAL_SIGNAL), + gcmDEFINE2TEXT(gcvHAL_WRITE_DATA), + gcmDEFINE2TEXT(gcvHAL_COMMIT), + gcmDEFINE2TEXT(gcvHAL_STALL), + gcmDEFINE2TEXT(gcvHAL_READ_REGISTER), + gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER), + gcmDEFINE2TEXT(gcvHAL_GET_PROFILE_SETTING), + gcmDEFINE2TEXT(gcvHAL_SET_PROFILE_SETTING), + gcmDEFINE2TEXT(gcvHAL_PROFILE_REGISTERS_2D), + gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS_PART1), + gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS_PART2), + gcmDEFINE2TEXT(gcvHAL_READ_PROFILER_REGISTER_SETTING), + gcmDEFINE2TEXT(gcvHAL_SET_POWER_MANAGEMENT_STATE), + gcmDEFINE2TEXT(gcvHAL_QUERY_POWER_MANAGEMENT_STATE), + gcmDEFINE2TEXT(gcvHAL_GET_BASE_ADDRESS), + gcmDEFINE2TEXT(gcvHAL_SET_IDLE), + gcmDEFINE2TEXT(gcvHAL_QUERY_KERNEL_SETTINGS), + gcmDEFINE2TEXT(gcvHAL_RESET), + gcmDEFINE2TEXT(gcvHAL_MAP_PHYSICAL), + gcmDEFINE2TEXT(gcvHAL_DEBUG), + gcmDEFINE2TEXT(gcvHAL_CACHE), + gcmDEFINE2TEXT(gcvHAL_TIMESTAMP), + gcmDEFINE2TEXT(gcvHAL_DATABASE), + gcmDEFINE2TEXT(gcvHAL_VERSION), + gcmDEFINE2TEXT(gcvHAL_CHIP_INFO), + gcmDEFINE2TEXT(gcvHAL_ATTACH), + gcmDEFINE2TEXT(gcvHAL_DETACH), + gcmDEFINE2TEXT(gcvHAL_SET_TIMEOUT), + gcmDEFINE2TEXT(gcvHAL_GET_FRAME_INFO), + gcmDEFINE2TEXT(gcvHAL_DUMP_GPU_PROFILE), + gcmDEFINE2TEXT(gcvHAL_QUERY_COMMAND_BUFFER), + gcmDEFINE2TEXT(gcvHAL_COMMIT_DONE), + gcmDEFINE2TEXT(gcvHAL_DUMP_GPU_STATE), + gcmDEFINE2TEXT(gcvHAL_DUMP_EVENT), + gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER), + gcmDEFINE2TEXT(gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER), + gcmDEFINE2TEXT(gcvHAL_SET_FSCALE_VALUE), + gcmDEFINE2TEXT(gcvHAL_GET_FSCALE_VALUE), + gcmDEFINE2TEXT(gcvHAL_EXPORT_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_NAME_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_IMPORT_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_QUERY_RESET_TIME_STAMP), + gcmDEFINE2TEXT(gcvHAL_READ_REGISTER_EX), + gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER_EX), + gcmDEFINE2TEXT(gcvHAL_CREATE_NATIVE_FENCE), + gcmDEFINE2TEXT(gcvHAL_WAIT_NATIVE_FENCE), + gcmDEFINE2TEXT(gcvHAL_DESTROY_MMU), + gcmDEFINE2TEXT(gcvHAL_SHBUF), + gcmDEFINE2TEXT(gcvHAL_GET_GRAPHIC_BUFFER_FD), + gcmDEFINE2TEXT(gcvHAL_SET_VIDEO_MEMORY_METADATA), + gcmDEFINE2TEXT(gcvHAL_GET_VIDEO_MEMORY_FD), + gcmDEFINE2TEXT(gcvHAL_CONFIG_POWER_MANAGEMENT), + gcmDEFINE2TEXT(gcvHAL_WRAP_USER_MEMORY), + gcmDEFINE2TEXT(gcvHAL_WAIT_FENCE), +#if gcdDEC_ENABLE_AHB + gcmDEFINE2TEXT(gcvHAL_DEC300_READ), + gcmDEFINE2TEXT(gcvHAL_DEC300_WRITE), + gcmDEFINE2TEXT(gcvHAL_DEC300_FLUSH), + gcmDEFINE2TEXT(gcvHAL_DEC300_FLUSH_WAIT), +#endif + gcmDEFINE2TEXT(gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY), + gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_OPTION), +}; +#endif + +#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC +void +_MonitorTimerFunction( + gctPOINTER Data + ) +{ + gckKERNEL kernel = (gckKERNEL)Data; + gctINT32 pendingInterrupt; + gctBOOL reset = gcvFALSE; + gctINT32 mask; + gctUINT32 advance = kernel->timeOut/2; + + + if (kernel->monitorTimerStop) + { + /* Stop. */ + return; + } + + gckOS_AtomGet(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt); + + if (pendingInterrupt < 0) + { + gctINT i = 0 - pendingInterrupt; + gctINT pendingMask; + + gcmkVERIFY_OK(gckOS_AtomGet( + kernel->os, + kernel->hardware->pendingEvent, + &pendingMask + )); + + gcmkPRINT("[galcore]: Number of pending interrupt is %d mask is %x", + pendingInterrupt, pendingMask); + + while (i--) + { + /* Ignore counting which should not exist. */ + gckOS_AtomIncrement(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt); + } + + gckOS_AtomGet(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt); + } + + if (kernel->monitoring == gcvFALSE) + { + if (pendingInterrupt) + { + /* Begin to mointor GPU state. */ + kernel->monitoring = gcvTRUE; + + /* Record current state. */ + kernel->lastCommitStamp = kernel->eventObj->lastCommitStamp; + kernel->restoreAddress = kernel->hardware->lastWaitLink; + gcmkVERIFY_OK(gckOS_AtomGet( + kernel->os, + kernel->hardware->pendingEvent, + &kernel->restoreMask + )); + + /* Clear timeout. */ + kernel->timer = 0; + } + } + else + { + if (pendingInterrupt) + { + gcmkVERIFY_OK(gckOS_AtomGet( + kernel->os, + kernel->hardware->pendingEvent, + &mask + )); + + if (kernel->eventObj->lastCommitStamp == kernel->lastCommitStamp + && kernel->hardware->lastWaitLink == kernel->restoreAddress + && mask == kernel->restoreMask + ) + { + /* GPU state is not changed, accumlate timeout. */ + kernel->timer += advance; + + if (kernel->timer >= kernel->timeOut) + { + /* GPU stuck, trigger reset. */ + reset = gcvTRUE; + } + } + else + { + /* GPU state changed, cancel current timeout.*/ + kernel->monitoring = gcvFALSE; + } + } + else + { + /* GPU finish all jobs, cancel current timeout*/ + kernel->monitoring = gcvFALSE; + } + } + + if (reset) + { + gckKERNEL_Recovery(kernel); + + /* Work in this timeout is done. */ + kernel->monitoring = gcvFALSE; + } + + gcmkVERIFY_OK(gckOS_StartTimer(kernel->os, kernel->monitorTimer, advance)); +} +#endif + +#if gcdPROCESS_ADDRESS_SPACE +gceSTATUS +_MapCommandBuffer( + IN gckKERNEL Kernel + ) +{ + gceSTATUS status; + gctUINT32 i; + gctPHYS_ADDR_T physical; + gctUINT32 address; + gckMMU mmu; + + gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu)); + + for (i = 0; i < gcdCOMMAND_QUEUES; i++) + { + gcmkONERROR(gckOS_GetPhysicalAddress( + Kernel->os, + Kernel->command->queues[i].logical, + &physical + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, physical, &physical)); + + gcmkSAFECASTPHYSADDRT(address, physical); + + gcmkONERROR(gckMMU_FlatMapping(mmu, address, 1)); + } + + return gcvSTATUS_OK; + +OnError: + return status; +} +#endif + +void +_DumpDriverConfigure( + IN gckKERNEL Kernel + ) +{ + gcmkPRINT_N(0, "**************************\n"); + gcmkPRINT_N(0, "*** GPU DRV CONFIG ***\n"); + gcmkPRINT_N(0, "**************************\n"); + + gcmkPRINT("Galcore version %d.%d.%d.%d\n", + gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD); + + gckOS_DumpParam(); +} + +void +_DumpState( + IN gckKERNEL Kernel + ) +{ + /* Dump GPU Debug registers. */ + gcmkVERIFY_OK(gckHARDWARE_DumpGPUState(Kernel->hardware)); + + gcmkVERIFY_OK(gckCOMMAND_DumpExecutingBuffer(Kernel->command)); + + /* Dump Pending event. */ + gcmkVERIFY_OK(gckEVENT_Dump(Kernel->eventObj)); + + /* Dump Process DB. */ + gcmkVERIFY_OK(gckKERNEL_DumpProcessDB(Kernel)); + +#if gcdRECORD_COMMAND + /* Dump record. */ + gckRECORDER_Dump(Kernel->command->recorder); +#endif +} + +static gceHARDWARE_TYPE +_GetHardwareType( + IN gckKERNEL Kernel + ) +{ + gceHARDWARE_TYPE type; + gcmkHEADER(); + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + { + type = Kernel->hardware->type; + } + + gcmkFOOTER_ARG("type=%d", type); + return type; +} + +gceSTATUS +_SetRecovery( + IN gckKERNEL Kernel, + IN gctBOOL Recovery, + IN gctUINT32 StuckDump + ) +{ + Kernel->recovery = Recovery; + + if (Recovery == gcvFALSE) + { + /* Dump stuck information if Recovery is disabled. */ + Kernel->stuckDump = gcmMAX(StuckDump, gcvSTUCK_DUMP_USER_COMMAND); + } + + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckKERNEL_Construct +** +** Construct a new gckKERNEL object. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gceCORE Core +** Specified core. +** +** IN gctPOINTER Context +** Pointer to a driver defined context. +** +** IN gckDB SharedDB, +** Pointer to a shared DB. +** +** OUTPUT: +** +** gckKERNEL * Kernel +** Pointer to a variable that will hold the pointer to the gckKERNEL +** object. +*/ + +gceSTATUS +gckKERNEL_Construct( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT ChipID, + IN gctPOINTER Context, + IN gckDEVICE Device, + IN gckDB SharedDB, + OUT gckKERNEL * Kernel + ) +{ + gckKERNEL kernel = gcvNULL; + gceSTATUS status; + gctSIZE_T i; + gctPOINTER pointer = gcvNULL; + gctUINT32 recovery; + gctUINT32 stuckDump; + + gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Kernel != gcvNULL); + + /* Allocate the gckKERNEL object. */ + gcmkONERROR(gckOS_Allocate(Os, + gcmSIZEOF(struct _gckKERNEL), + &pointer)); + + /* Zero the object. */ + gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckKERNEL)); + + kernel = pointer; + + /* Initialize the gckKERNEL object. */ + kernel->object.type = gcvOBJ_KERNEL; + kernel->os = Os; + kernel->core = Core; + kernel->device = Device; + kernel->chipID = ChipID; + +#if gcdENABLE_TRUST_APPLICATION + /* Connect to security service for this GPU. */ + gcmkONERROR(gckKERNEL_SecurityOpen(kernel, kernel->core, &kernel->securityChannel)); +#endif + + if (SharedDB == gcvNULL) + { + gcmkONERROR(gckOS_Allocate(Os, + gcmSIZEOF(struct _gckDB), + &pointer)); + + kernel->db = pointer; + kernel->dbCreated = gcvTRUE; + kernel->db->freeDatabase = gcvNULL; + kernel->db->freeRecord = gcvNULL; + kernel->db->dbMutex = gcvNULL; + kernel->db->lastDatabase = gcvNULL; + kernel->db->idleTime = 0; + kernel->db->lastIdle = 0; + kernel->db->lastSlowdown = 0; + + for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i) + { + kernel->db->db[i] = gcvNULL; + } + + /* Construct a database mutex. */ + gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->dbMutex)); + + /* Construct a video memory name database. */ + gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->nameDatabase)); + + /* Construct a video memory name database mutex. */ + gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->nameDatabaseMutex)); + + /* Construct a pointer name database. */ + gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->pointerDatabase)); + + /* Construct a pointer name database mutex. */ + gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->pointerDatabaseMutex)); + + /* Initialize on fault vidmem list. */ + gcsLIST_Init(&kernel->db->onFaultVidmemList); + } + else + { + kernel->db = SharedDB; + kernel->dbCreated = gcvFALSE; + } + + for (i = 0; i < gcmCOUNTOF(kernel->timers); ++i) + { + kernel->timers[i].startTime = 0; + kernel->timers[i].stopTime = 0; + } + + /* Save context. */ + kernel->context = Context; + + /* Construct atom holding number of clients. */ + kernel->atomClients = gcvNULL; + gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients)); + + kernel->recovery = gcvTRUE; + kernel->stuckDump = gcvSTUCK_DUMP_NONE; + + /* Override default recovery and stuckDump setting. */ + status = gckOS_QueryOption(Os, "recovery", &recovery); + + if (gcmIS_SUCCESS(status)) + { + status = gckOS_QueryOption(Os, "stuckDump", &stuckDump); + + gcmkASSERT(status == gcvSTATUS_OK); + + _SetRecovery(kernel, recovery, stuckDump); + } + + /* Need the kernel reference before gckKERNEL_Construct() completes. + gckOS_MapPagesEx() is called to map kernel virtual command buffers. */ + *Kernel = kernel; + + kernel->virtualBufferHead = + kernel->virtualBufferTail = gcvNULL; + + gcmkONERROR( + gckOS_CreateMutex(Os, (gctPOINTER)&kernel->virtualBufferLock)); + + { + /* Construct the gckHARDWARE object. */ + gcmkONERROR( + gckHARDWARE_Construct(Os, kernel->core, &kernel->hardware)); + + /* Set pointer to gckKERNEL object in gckHARDWARE object. */ + kernel->hardware->kernel = kernel; + + kernel->timeOut = kernel->hardware->type == gcvHARDWARE_2D + ? gcdGPU_2D_TIMEOUT + : gcdGPU_TIMEOUT + ; + + /* Initialize virtual command buffer. */ +#if gcdALLOC_CMD_FROM_RESERVE || gcdSECURITY || gcdDISABLE_GPU_VIRTUAL_ADDRESS || !USE_KERNEL_VIRTUAL_BUFFERS + kernel->virtualCommandBuffer = gcvFALSE; +#else + kernel->virtualCommandBuffer = kernel->hardware->options.enableMMU; +#endif + +#if gcdSHARED_PAGETABLE + /* Construct the gckMMU object. */ + gcmkONERROR( + gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu)); +#else + if (Device == gcvNULL) + { + /* Construct the gckMMU object. */ + gcmkONERROR( + gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu)); + } + else + { + gcmkONERROR(gckDEVICE_GetMMU(Device, kernel->hardware->type, &kernel->mmu)); + + if (kernel->mmu == gcvNULL) + { + gcmkONERROR( + gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu)); + + gcmkONERROR( + gckDEVICE_SetMMU(Device, kernel->hardware->type, kernel->mmu)); + } + } + + gcmkVERIFY_OK(gckMMU_AttachHardware(kernel->mmu, kernel->hardware)); +#endif + + kernel->contiguousBaseAddress = kernel->mmu->contiguousBaseAddress; + kernel->externalBaseAddress = kernel->mmu->externalBaseAddress; + + /* Construct the gckCOMMAND object. */ + gcmkONERROR( + gckCOMMAND_Construct(kernel, &kernel->command)); + + if (gckHARDWARE_IsFeatureAvailable(kernel->hardware, gcvFEATURE_ASYNC_BLIT)) + { + /* Construct the gckASYNC_COMMAND object for BLT engine. */ + gcmkONERROR(gckASYNC_COMMAND_Construct(kernel, &kernel->asyncCommand)); + + /* Construct gckEVENT for BLT. */ + gcmkONERROR(gckEVENT_Construct(kernel, &kernel->asyncEvent)); + + kernel->asyncEvent->asyncCommand = kernel->asyncCommand; + + kernel->command->asyncCommand = kernel->asyncCommand; + } + + /* Construct the gckEVENT object. */ + gcmkONERROR( + gckEVENT_Construct(kernel, &kernel->eventObj)); + + gcmkVERIFY_OK(gckOS_GetTime(&kernel->resetTimeStamp)); + + gcmkONERROR(gckHARDWARE_PrepareFunctions(kernel->hardware)); + + /* Initialize the hardware. */ + gcmkONERROR( + gckHARDWARE_InitializeHardware(kernel->hardware)); + +#if gcdDVFS + if (gckHARDWARE_IsFeatureAvailable(kernel->hardware, + gcvFEATURE_DYNAMIC_FREQUENCY_SCALING)) + { + gcmkONERROR(gckDVFS_Construct(kernel->hardware, &kernel->dvfs)); + gcmkONERROR(gckDVFS_Start(kernel->dvfs)); + } +#endif + +#if COMMAND_PROCESSOR_VERSION == 1 + /* Start the command queue. */ + gcmkONERROR(gckCOMMAND_Start(kernel->command)); +#endif + } + +#if VIVANTE_PROFILER + /* Initialize profile setting */ + kernel->profileEnable = gcvFALSE; + kernel->profileCleanRegister = gcvTRUE; +#endif + +#if gcdLINUX_SYNC_FILE + gcmkONERROR(gckOS_CreateSyncTimeline(Os, Core, &kernel->timeline)); +#endif + +#if gcdSECURITY + /* Connect to security service for this GPU. */ + gcmkONERROR(gckKERNEL_SecurityOpen(kernel, kernel->core, &kernel->securityChannel)); +#endif + +#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC + if (kernel->timeOut) + { + gcmkVERIFY_OK(gckOS_CreateTimer( + Os, + (gctTIMERFUNCTION)_MonitorTimerFunction, + (gctPOINTER)kernel, + &kernel->monitorTimer + )); + + kernel->monitoring = gcvFALSE; + + kernel->monitorTimerStop = gcvFALSE; + + gcmkVERIFY_OK(gckOS_StartTimer( + Os, + kernel->monitorTimer, + 100 + )); + } +#endif + + /* Return pointer to the gckKERNEL object. */ + *Kernel = kernel; + + /* Success. */ + gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel); + return gcvSTATUS_OK; + +OnError: + *Kernel = gcvNULL; + + if (kernel != gcvNULL) + { + gckKERNEL_Destroy(kernel); + } + + /* Return the error. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_Destroy +** +** Destroy an gckKERNEL object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object to destroy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_Destroy( + IN gckKERNEL Kernel + ) +{ + gctSIZE_T i; + gcsDATABASE_PTR database, databaseNext; + gcsDATABASE_RECORD_PTR record, recordNext; + + gcmkHEADER_ARG("Kernel=0x%x", Kernel); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); +#if QNX_SINGLE_THREADED_DEBUGGING + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->debugMutex)); +#endif + + /* Destroy the database. */ + if (Kernel->dbCreated) + { + for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i) + { + if (Kernel->db->db[i] != gcvNULL) + { + gcmkVERIFY_OK( + gckKERNEL_DestroyProcessDB(Kernel, Kernel->db->db[i]->processID)); + } + } + + /* Free all databases. */ + for (database = Kernel->db->freeDatabase; + database != gcvNULL; + database = databaseNext) + { + databaseNext = database->next; + + if (database->counterMutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->counterMutex)); + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, database)); + } + + if (Kernel->db->lastDatabase != gcvNULL) + { + if (Kernel->db->lastDatabase->counterMutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->lastDatabase->counterMutex)); + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db->lastDatabase)); + } + + /* Free all database records. */ + for (record = Kernel->db->freeRecord; record != gcvNULL; record = recordNext) + { + recordNext = record->next; + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record)); + } + + if (Kernel->db->dbMutex) + { + /* Destroy the database mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->dbMutex)); + } + + if (Kernel->db->nameDatabase) + { + /* Destroy video memory name database. */ + gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->nameDatabase)); + } + + if (Kernel->db->nameDatabaseMutex) + { + /* Destroy video memory name database mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->nameDatabaseMutex)); + } + + if (Kernel->db->pointerDatabase) + { + /* Destroy id-pointer database. */ + gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->pointerDatabase)); + } + + if (Kernel->db->pointerDatabaseMutex) + { + /* Destroy id-pointer database mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + } + + if (Kernel->db) + { + /* Destroy the database. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db)); + } + + /* Notify stuck timer to quit. */ + Kernel->monitorTimerStop = gcvTRUE; + } + + if (Kernel->monitorTimer) + { + /* Stop and destroy monitor timer. */ + gcmkVERIFY_OK(gckOS_StopTimer(Kernel->os, Kernel->monitorTimer)); + gcmkVERIFY_OK(gckOS_DestroyTimer(Kernel->os, Kernel->monitorTimer)); + } + + { + if (Kernel->command) + { + /* Destroy the gckCOMMNAND object. */ + gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command)); + } + + if (Kernel->asyncCommand) + { + gcmkVERIFY_OK(gckASYNC_COMMAND_Destroy(Kernel->asyncCommand)); + } + + if (Kernel->asyncEvent) + { + gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->asyncEvent)); + } + + if (Kernel->eventObj) + { + /* Destroy the gckEVENT object. */ + gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->eventObj)); + } + + gcmkVERIFY_OK(gckHARDWARE_DestroyFunctions(Kernel->hardware)); + + if (Kernel->mmu) + { +#if gcdSHARED_PAGETABLE + /* Destroy the gckMMU object. */ + gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu)); +#else + if (Kernel->mmu->hardware == Kernel->hardware) + { + /* Destroy the gckMMU object. */ + gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu)); + } +#endif + } + + if (Kernel->hardware) + { + /* Destroy the gckHARDWARE object. */ + gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware)); + } + } + + if (Kernel->atomClients) + { + /* Detsroy the client atom. */ + gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients)); + } + + if (Kernel->virtualBufferLock) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->virtualBufferLock)); + } + +#if gcdDVFS + if (Kernel->dvfs) + { + gcmkVERIFY_OK(gckDVFS_Stop(Kernel->dvfs)); + gcmkVERIFY_OK(gckDVFS_Destroy(Kernel->dvfs)); + } +#endif + +#if gcdLINUX_SYNC_FILE + if (Kernel->timeline) + { + gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Kernel->os, Kernel->timeline)); + } +#endif + +#if gcdSECURITY + if (Kernel->securityChannel) + { + gcmkVERIFY_OK(gckKERNEL_SecurityClose(Kernel->securityChannel)); + } +#endif + + /* Mark the gckKERNEL object as unknown. */ + Kernel->object.type = gcvOBJ_UNKNOWN; + + /* Free the gckKERNEL object. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** _AllocateMemory +** +** Private function to walk all required memory pools to allocate the requested +** amount of video memory. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that defines the command to +** be dispatched. +** +** OUTPUT: +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that receives any data to be +** returned. +*/ +gceSTATUS +gckKERNEL_AllocateLinearMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN OUT gcePOOL * Pool, + IN gctSIZE_T Bytes, + IN gctUINT32 Alignment, + IN gceSURF_TYPE Type, + IN gctUINT32 Flag, + OUT gctUINT32 * Node + ) +{ + gcePOOL pool; + gceSTATUS status; + gckVIDMEM videoMemory; + gctINT loopCount; + gcuVIDMEM_NODE_PTR node = gcvNULL; + gctBOOL tileStatusInVirtual; + gctBOOL contiguous = gcvFALSE; + gctBOOL cacheable = gcvFALSE; + gctBOOL secure = gcvFALSE; + gctBOOL fastPools = gcvFALSE; + gctBOOL hasFastPools = gcvFALSE; + gctSIZE_T bytes = Bytes; + gctUINT32 handle = 0; + gceDATABASE_TYPE type; + + gcmkHEADER_ARG("Kernel=0x%x *Pool=%d Bytes=%lu Alignment=%lu Type=%d", + Kernel, *Pool, Bytes, Alignment, Type); + + gcmkVERIFY_ARGUMENT(Pool != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes != 0); + + /* Get basic type. */ + Type &= 0xFF; + + /* Check flags. */ + contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS; + cacheable = Flag & gcvALLOC_FLAG_CACHEABLE; + secure = Flag & gcvALLOC_FLAG_SECURITY; + if (Flag & gcvALLOC_FLAG_FAST_POOLS) + { + fastPools = gcvTRUE; + Flag &= ~gcvALLOC_FLAG_FAST_POOLS; + } + +#if gcdALLOC_ON_FAULT + if (Type == gcvSURF_RENDER_TARGET) + { + Flag |= gcvALLOC_FLAG_ALLOC_ON_FAULT; + } +#endif + + if (Flag & gcvALLOC_FLAG_ALLOC_ON_FAULT) + { + *Pool = gcvPOOL_VIRTUAL; + } + + if (Flag & gcvALLOC_FLAG_DMABUF_EXPORTABLE) + { + gctSIZE_T pageSize = 0; + gckOS_GetPageSize(Kernel->os, &pageSize); + + /* Usually, the exported dmabuf might be later imported to DRM, + ** while DRM requires input size to be page aligned. + */ + Bytes = gcmALIGN(Bytes, pageSize); + } + +AllocateMemory: + + /* Get initial pool. */ + switch (pool = *Pool) + { + case gcvPOOL_DEFAULT: + case gcvPOOL_LOCAL: + pool = gcvPOOL_LOCAL_INTERNAL; + loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS; + break; + + case gcvPOOL_UNIFIED: + pool = gcvPOOL_SYSTEM; + loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS; + break; + + case gcvPOOL_CONTIGUOUS: + loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS; + break; + + default: + loopCount = 1; + break; + } + + while (loopCount-- > 0) + { + if (pool == gcvPOOL_VIRTUAL) + { + /* Create a gcuVIDMEM_NODE for virtual memory. */ + gcmkONERROR( + gckVIDMEM_ConstructVirtual(Kernel, Flag | gcvALLOC_FLAG_NON_CONTIGUOUS, Bytes, &node)); + + bytes = node->Virtual.bytes; + node->Virtual.type = Type; + + /* Success. */ + break; + } + + else + if (pool == gcvPOOL_CONTIGUOUS) + { +#if gcdCONTIGUOUS_SIZE_LIMIT + if (Bytes > gcdCONTIGUOUS_SIZE_LIMIT && contiguous == gcvFALSE) + { + status = gcvSTATUS_OUT_OF_MEMORY; + } + else +#endif + { + /* Create a gcuVIDMEM_NODE from contiguous memory. */ + status = gckVIDMEM_ConstructVirtual( + Kernel, + Flag | gcvALLOC_FLAG_CONTIGUOUS, + Bytes, + &node); + } + + if (gcmIS_SUCCESS(status)) + { + bytes = node->Virtual.bytes; + node->Virtual.type = Type; + + /* Memory allocated. */ + break; + } + } + + else + /* gcvPOOL_SYSTEM can't be cacheable. */ + if (cacheable == gcvFALSE && secure == gcvFALSE) + { + /* Get pointer to gckVIDMEM object for pool. */ + status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory); + + if (gcmIS_SUCCESS(status)) + { + /* Allocate memory. */ + if ((Flag & videoMemory->capability) != Flag) + { + status = gcvSTATUS_NOT_SUPPORTED; + + gcmkFATAL("%s(%d): Reject alloc because VIDMEM (pool=%d) caps=0x%x cannot meet required Flag=0x%x", + __FUNCTION__, __LINE__, pool, videoMemory->capability, Flag); + } +#if defined(gcdLINEAR_SIZE_LIMIT) + /* 512 KB */ + else if (Bytes > gcdLINEAR_SIZE_LIMIT) + { + status = gcvSTATUS_OUT_OF_MEMORY; + } +#endif + else + { + hasFastPools = gcvTRUE; + status = gckVIDMEM_AllocateLinear(Kernel, + videoMemory, + Bytes, + Alignment, + Type, + (*Pool == gcvPOOL_SYSTEM), + &node); + } + + if (gcmIS_SUCCESS(status)) + { + /* Memory allocated. */ + node->VidMem.pool = pool; + bytes = node->VidMem.bytes; + break; + } + } + } + + if (pool == gcvPOOL_LOCAL_INTERNAL) + { + /* Advance to external memory. */ + pool = gcvPOOL_LOCAL_EXTERNAL; + } + + else + if (pool == gcvPOOL_LOCAL_EXTERNAL) + { + /* Advance to contiguous system memory. */ + pool = gcvPOOL_SYSTEM; + } + + else + if (pool == gcvPOOL_SYSTEM) + { + /* Do not go ahead to try relative slow pools */ + if (fastPools && hasFastPools) + { + status = gcvSTATUS_OUT_OF_MEMORY; + break; + } + + /* Advance to contiguous memory. */ + pool = gcvPOOL_CONTIGUOUS; + } + + else + if (pool == gcvPOOL_CONTIGUOUS) + { + { + tileStatusInVirtual = + gckHARDWARE_IsFeatureAvailable(Kernel->hardware, + gcvFEATURE_MC20); + } + + if (Type == gcvSURF_TILE_STATUS && tileStatusInVirtual != gcvTRUE) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + if (contiguous) + { + break; + } + + /* Advance to virtual memory. */ + pool = gcvPOOL_VIRTUAL; + } + + else + { + /* Out of pools. */ + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + } + + if (node == gcvNULL) + { + if (contiguous) + { + /* Broadcast OOM message. */ + status = gckOS_Broadcast(Kernel->os, Kernel->hardware, gcvBROADCAST_OUT_OF_MEMORY); + + if (gcmIS_SUCCESS(status)) + { + /* Get some memory. */ + gckOS_Delay(gcvNULL, 1); + goto AllocateMemory; + } + } + + /* Nothing allocated. */ + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Allocate handle for this video memory. */ + gcmkONERROR( + gckVIDMEM_NODE_Allocate(Kernel, node, Type, pool, &handle)); + + /* Return node and pool used for allocation. */ + *Node = handle; + *Pool = pool; + + /* Encode surface type and pool to database type. */ + type = gcvDB_VIDEO_MEMORY + | (Type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT) + | (pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT); + + /* Record in process db. */ + gcmkONERROR( + gckKERNEL_AddProcessDB(Kernel, + ProcessID, + type, + gcmINT2PTR(handle), + gcvNULL, + bytes)); + + + /* Return status. */ + gcmkFOOTER_ARG("*Pool=%d *Node=0x%x", *Pool, *Node); + return gcvSTATUS_OK; + +OnError: + if (handle) + { + /* Destroy handle allocated. */ + gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, handle)); + } + + if (node) + { + /* Free video memory allocated. */ + gcmkVERIFY_OK(gckVIDMEM_Free(Kernel, node)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_ReleaseVideoMemory +** +** Release handle of a video memory. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 ProcessID +** ProcessID of current process. +** +** gctUINT32 Handle +** Handle of video memory. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_ReleaseVideoMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle + ) +{ + gceSTATUS status; + gckVIDMEM_NODE nodeObject; + gceDATABASE_TYPE type; + + gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d Handle=%d", + Kernel, ProcessID, Handle); + + gcmkONERROR( + gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, Handle, &nodeObject)); + + type = gcvDB_VIDEO_MEMORY + | (nodeObject->type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT) + | (nodeObject->pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT); + + gcmkONERROR( + gckKERNEL_RemoveProcessDB(Kernel, + ProcessID, + type, + gcmINT2PTR(Handle))); + + gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, Handle); + + gckVIDMEM_NODE_Dereference(Kernel, nodeObject); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_LockVideoMemory +** +** Lock a video memory node. It will generate a cpu virtual address used +** by software and a GPU address used by GPU. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gceCORE Core +** GPU to which video memory is locked. +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that defines the command to +** be dispatched. +** +** OUTPUT: +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that receives any data to be +** returned. +*/ +gceSTATUS +gckKERNEL_LockVideoMemory( + IN gckKERNEL Kernel, + IN gceCORE Core, + IN gctUINT32 ProcessID, + IN gctBOOL FromUser, + IN OUT gcsHAL_INTERFACE * Interface + ) +{ + gceSTATUS status; + gckVIDMEM_NODE nodeObject = gcvNULL; + gcuVIDMEM_NODE_PTR node = gcvNULL; + gctBOOL locked = gcvFALSE; + gctBOOL asynchronous = gcvFALSE; +#ifndef __QNXNTO__ + gctPOINTER pointer = gcvNULL; +#endif + + gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d", + Kernel, ProcessID); + + gcmkONERROR( + gckVIDMEM_HANDLE_LookupAndReference(Kernel, + Interface->u.LockVideoMemory.node, + &nodeObject)); + + node = nodeObject->node; + + Interface->u.LockVideoMemory.gid = 0; + + /* Lock video memory. */ + gcmkONERROR( + gckVIDMEM_Lock(Kernel, + nodeObject, + Interface->u.LockVideoMemory.cacheable, + &Interface->u.LockVideoMemory.address, + &Interface->u.LockVideoMemory.gid, + &Interface->u.LockVideoMemory.physicalAddress)); + + locked = gcvTRUE; + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + /* Map video memory address into user space. */ +#ifdef __QNXNTO__ + if (node->VidMem.logical == gcvNULL) + { + gcmkONERROR( + gckKERNEL_MapVideoMemory(Kernel, + FromUser, + Interface->u.LockVideoMemory.address, + ProcessID, + node->VidMem.bytes, + &node->VidMem.logical)); + } + gcmkASSERT(node->VidMem.logical != gcvNULL); + + Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical); +#else + gcmkONERROR( + gckKERNEL_MapVideoMemoryEx(Kernel, + Core, + FromUser, + Interface->u.LockVideoMemory.address, + node->VidMem.pool, + &pointer)); + + Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(pointer); +#endif + } + else + { + Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical); + + /* Success. */ + status = gcvSTATUS_OK; + } + +#if gcdPROCESS_ADDRESS_SPACE + gcmkONERROR(gckVIDMEM_Node_Lock( + Kernel, + nodeObject, + &Interface->u.LockVideoMemory.address + )); +#endif + + +#if gcdSECURE_USER + /* Return logical address as physical address. */ + Interface->u.LockVideoMemory.address = + (gctUINT32)(Interface->u.LockVideoMemory.memory); +#endif + gcmkONERROR( + gckKERNEL_AddProcessDB(Kernel, + ProcessID, gcvDB_VIDEO_MEMORY_LOCKED, + gcmINT2PTR(Interface->u.LockVideoMemory.node), + gcvNULL, + 0)); + + gckVIDMEM_HANDLE_Reference( + Kernel, ProcessID, (gctUINT32)Interface->u.LockVideoMemory.node); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (locked) + { + /* Roll back the lock. */ + gcmkVERIFY_OK(gckVIDMEM_Unlock(Kernel, + nodeObject, + gcvSURF_TYPE_UNKNOWN, + &asynchronous)); + + if (gcvTRUE == asynchronous) + { + /* Bottom Half */ + gcmkVERIFY_OK(gckVIDMEM_Unlock(Kernel, + nodeObject, + gcvSURF_TYPE_UNKNOWN, + gcvNULL)); + } + } + + if (nodeObject != gcvNULL) + { + gckVIDMEM_NODE_Dereference(Kernel, nodeObject); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_UnlockVideoMemory +** +** Unlock a video memory node. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 ProcessID +** ProcessID of current process. +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that defines the command to +** be dispatched. +** +** OUTPUT: +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that receives any data to be +** returned. +*/ +gceSTATUS +gckKERNEL_UnlockVideoMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN OUT gcsHAL_INTERFACE * Interface + ) +{ + gceSTATUS status; + gckVIDMEM_NODE nodeObject; + gcuVIDMEM_NODE_PTR node; + gctSIZE_T bytes; + + gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d", + Kernel, ProcessID); + + Interface->u.UnlockVideoMemory.pool = gcvPOOL_UNKNOWN; + Interface->u.UnlockVideoMemory.bytes = 0; + + gcmkONERROR(gckVIDMEM_HANDLE_Lookup( + Kernel, + ProcessID, + (gctUINT32)Interface->u.UnlockVideoMemory.node, + &nodeObject)); + + node = nodeObject->node; + bytes = (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + ? node->VidMem.bytes + : node->Virtual.bytes; + + /* Unlock video memory. */ + gcmkONERROR(gckVIDMEM_Unlock( + Kernel, + nodeObject, + Interface->u.UnlockVideoMemory.type, + &Interface->u.UnlockVideoMemory.asynchroneous)); + +#if gcdSECURE_USER + /* Flush the translation cache for virtual surfaces. */ + if (node->VidMem.memory->object.type != gcvOBJ_VIDMEM) + { + gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel, + cache, + node->Virtual.logical, + bytes)); + } +#endif + + Interface->u.UnlockVideoMemory.pool = nodeObject->pool; + Interface->u.UnlockVideoMemory.bytes = (gctUINT)bytes; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_BottomHalfUnlockVideoMemory +** +** Unlock video memory from gpu. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID owning this memory. +** +** gctPOINTER Pointer +** Video memory to be unlock. +*/ +gceSTATUS +gckKERNEL_BottomHalfUnlockVideoMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Node + ) +{ + gceSTATUS status; + gckVIDMEM_NODE nodeObject = gcvNULL; + + /* Remove record from process db. */ + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Kernel, + ProcessID, + gcvDB_VIDEO_MEMORY_LOCKED, + gcmINT2PTR(Node))); + + gcmkONERROR(gckVIDMEM_HANDLE_Lookup( + Kernel, + ProcessID, + Node, + &nodeObject)); + + gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, Node); + + /* Unlock video memory. */ + gcmkONERROR(gckVIDMEM_Unlock( + Kernel, + nodeObject, + gcvSURF_TYPE_UNKNOWN, + gcvNULL)); + + gcmkONERROR(gckVIDMEM_NODE_Dereference( + Kernel, + nodeObject)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +/******************************************************************************* +** +** gckKERNEL_SetVidMemMetadata +** +** Set/Get metadata to/from gckVIDMEM_NODE object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 ProcessID +** ProcessID of current process. +** +** INOUT: +** +** gcsHAL_INTERFACE * Interface +** Pointer to a interface structure +*/ +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include + +gceSTATUS +gckKERNEL_SetVidMemMetadata( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + INOUT gcsHAL_INTERFACE * Interface + ) +{ + gceSTATUS status = gcvSTATUS_NOT_SUPPORTED; + gckVIDMEM_NODE nodeObj = gcvNULL; + + gcmkHEADER_ARG("Kernel=0x%X ProcessID=%d", Kernel, ProcessID); + + gcmkONERROR(gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, Interface->u.SetVidMemMetadata.node, &nodeObj)); + + if (Interface->u.SetVidMemMetadata.readback) + { + Interface->u.SetVidMemMetadata.ts_fd = nodeObj->metadata.ts_fd; + Interface->u.SetVidMemMetadata.fc_enabled = nodeObj->metadata.fc_enabled; + Interface->u.SetVidMemMetadata.fc_value = nodeObj->metadata.fc_value; + Interface->u.SetVidMemMetadata.fc_value_upper = nodeObj->metadata.fc_value_upper; + Interface->u.SetVidMemMetadata.compressed = nodeObj->metadata.compressed; + Interface->u.SetVidMemMetadata.compress_format = nodeObj->metadata.compress_format; + } + else + { + nodeObj->metadata.ts_fd = Interface->u.SetVidMemMetadata.ts_fd; + + if (nodeObj->metadata.ts_fd >= 0) + { + nodeObj->metadata.ts_dma_buf = dma_buf_get(nodeObj->metadata.ts_fd); + + if (IS_ERR(nodeObj->metadata.ts_dma_buf)) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + dma_buf_put(nodeObj->metadata.ts_dma_buf); + } + else + { + nodeObj->metadata.ts_dma_buf = NULL; + } + + nodeObj->metadata.fc_enabled = Interface->u.SetVidMemMetadata.fc_enabled; + nodeObj->metadata.fc_value = Interface->u.SetVidMemMetadata.fc_value; + nodeObj->metadata.fc_value_upper = Interface->u.SetVidMemMetadata.fc_value_upper; + nodeObj->metadata.compressed = Interface->u.SetVidMemMetadata.compressed; + nodeObj->metadata.compress_format = Interface->u.SetVidMemMetadata.compress_format; + } + +OnError: + gcmkFOOTER(); + return status; +} + +#else + +gceSTATUS +gckKERNEL_SetVidMemMetadata( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + INOUT gcsHAL_INTERFACE * Interface + ) +{ + gcmkFATAL("The kernel did NOT support CONFIG_DMA_SHARED_BUFFER"); + return gcvSTATUS_NOT_SUPPORTED; +} +#endif + +/******************************************************************************* +** +** gckKERNEL_QueryVidMemPoolNodes +** +** Loop all databases to query used memory nodes of a specific pool. +** +** INPUT: +** +** Pool The memory pool for query. + +** TotalSize Total size of the used contiguous memory. +** +** MemoryBlocks Used contiguous memory block info. +** +** NumMaxBlock The count of memory blocks array provided by the caller. +** +** NumBlocks The actual number of memory blocks returned. +** +** OUTPUT: +** +** Error status. Should always be gcvSTATUS_SUCCESS since a query should always succeed. +*/ +gceSTATUS +gckKERNEL_QueryVidMemPoolNodes( + gckKERNEL Kernel, + gcePOOL Pool, + gctUINT32 * TotalSize, + gcsContiguousBlock * MemoryBlocks, + gctUINT32 NumMaxBlocks, + gctUINT32 * NumBlocks + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gctINT i; + gctINT bcount; + gctUINT32 num_blocks = 0; + gctSIZE_T total_size = 0; + gckVIDMEM memory; + gcuVIDMEM_NODE_PTR nodes, node; + + do + { + /* Get the heap and nodes. */ + status = gckKERNEL_GetVideoMemoryPool(Kernel, Pool, &memory); + if (status != gcvSTATUS_OK) + break; + + status = gckVIDMEM_QueryNodes(Kernel, Pool, &bcount, &nodes); + if (status != gcvSTATUS_OK) + break; + + /* Iterate all nodes. */ + for (i = 0; i < bcount; i++) + { + node = nodes[i].VidMem.next; + do + { + if (node == gcvNULL) + { + break; + } + + /* Is it in the "free" list? */ + if (node->VidMem.nextFree == gcvNULL) + { + if (num_blocks < NumMaxBlocks) + { + MemoryBlocks[num_blocks].ptr = (gctUINT32)node->VidMem.offset + memory->baseAddress; + MemoryBlocks[num_blocks].size = node->VidMem.bytes; + } + total_size += node->VidMem.bytes; + num_blocks++; + } + + node = node->VidMem.next; + } while (node != &nodes[i]); + } + } + while (gcvFALSE); + + if (TotalSize != gcvNULL) + *TotalSize = (gctUINT32)total_size; + + if (NumBlocks != gcvNULL) + *NumBlocks = num_blocks; + + return status; +} + +gceSTATUS +gckKERNEL_QueryDatabase( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN OUT gcsHAL_INTERFACE * Interface + ) +{ + gceSTATUS status; + gctINT i; + + gceDATABASE_TYPE type[3] = { + gcvDB_VIDEO_MEMORY | (gcvPOOL_SYSTEM << gcdDB_VIDEO_MEMORY_POOL_SHIFT), + gcvDB_VIDEO_MEMORY | (gcvPOOL_CONTIGUOUS << gcdDB_VIDEO_MEMORY_POOL_SHIFT), + gcvDB_VIDEO_MEMORY | (gcvPOOL_VIRTUAL << gcdDB_VIDEO_MEMORY_POOL_SHIFT), + }; + + gcmkHEADER(); + + /* Query video memory. */ + gcmkONERROR( + gckKERNEL_QueryProcessDB(Kernel, + Interface->u.Database.processID, + !Interface->u.Database.validProcessID, + gcvDB_VIDEO_MEMORY, + &Interface->u.Database.vidMem)); + + /* Query non-paged memory. */ + gcmkONERROR( + gckKERNEL_QueryProcessDB(Kernel, + Interface->u.Database.processID, + !Interface->u.Database.validProcessID, + gcvDB_NON_PAGED, + &Interface->u.Database.nonPaged)); + + /* Query contiguous memory. */ + gcmkONERROR( + gckKERNEL_QueryProcessDB(Kernel, + Interface->u.Database.processID, + !Interface->u.Database.validProcessID, + gcvDB_CONTIGUOUS, + &Interface->u.Database.contiguous)); + + /* Query GPU idle time. */ + gcmkONERROR( + gckKERNEL_QueryProcessDB(Kernel, + Interface->u.Database.processID, + !Interface->u.Database.validProcessID, + gcvDB_IDLE, + &Interface->u.Database.gpuIdle)); + for (i = 0; i < 3; i++) + { + /* Query each video memory pool. */ + gcmkONERROR( + gckKERNEL_QueryProcessDB(Kernel, + Interface->u.Database.processID, + !Interface->u.Database.validProcessID, + type[i], + &Interface->u.Database.vidMemPool[i])); + } + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gckKERNEL_DumpVidMemUsage(Kernel, Interface->u.Database.processID); +#endif + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_ConfigPowerManagement( + IN gckKERNEL Kernel, + IN OUT gcsHAL_INTERFACE * Interface +) +{ + gceSTATUS status; + gctBOOL enable = Interface->u.ConfigPowerManagement.enable; + + gcmkHEADER(); + + gcmkONERROR(gckHARDWARE_SetPowerManagement(Kernel->hardware, enable)); + + if (enable == gcvFALSE) + { + gcmkONERROR( + gckHARDWARE_SetPowerManagementState(Kernel->hardware, gcvPOWER_ON)); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +static gceSTATUS +gckKERNEL_CacheOperation( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Node, + IN gceCACHEOPERATION Operation, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + gckVIDMEM_NODE nodeObject = gcvNULL; + gcuVIDMEM_NODE_PTR node = gcvNULL; + void *memHandle; + + gcmkHEADER_ARG("Kernel=%p pid=%u Node=%u op=%d Logical=%p Bytes=0x%lx", + Kernel, ProcessID, Node, Operation, Logical, Bytes); + + gcmkONERROR(gckVIDMEM_HANDLE_Lookup(Kernel, + ProcessID, + Node, + &nodeObject)); + + node = nodeObject->node; + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + static gctBOOL printed; + + if (!printed) + { + printed = gcvTRUE; + gcmkPRINT("[galcore]: %s: Flush Video Memory", __FUNCTION__); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + else + { + memHandle = node->Virtual.physical; + } + + switch (Operation) + { + case gcvCACHE_FLUSH: + /* Clean and invalidate the cache. */ + status = gckOS_CacheFlush(Kernel->os, + ProcessID, + memHandle, + gcvINVALID_PHYSICAL_ADDRESS, + Logical, + Bytes); + break; + case gcvCACHE_CLEAN: + /* Clean the cache. */ + status = gckOS_CacheClean(Kernel->os, + ProcessID, + memHandle, + gcvINVALID_PHYSICAL_ADDRESS, + Logical, + Bytes); + break; + case gcvCACHE_INVALIDATE: + /* Invalidate the cache. */ + status = gckOS_CacheInvalidate(Kernel->os, + ProcessID, + memHandle, + gcvINVALID_PHYSICAL_ADDRESS, + Logical, + Bytes); + break; + + case gcvCACHE_MEMORY_BARRIER: + status = gckOS_MemoryBarrier(Kernel->os, Logical); + break; + + default: + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + break; + } + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_WaitFence( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + IN gctUINT32 TimeOut + ) +{ + gceSTATUS status; + gckVIDMEM_NODE node; + gctUINT32 processID; + gckCOMMAND command = Kernel->command; + gckASYNC_COMMAND asyncCommand = Kernel->asyncCommand; + gckFENCE fence = gcvNULL; + gctUINT i; + + gckOS_GetProcessID(&processID); + + gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node)); + + /* Wait for fence of all engines. */ + for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++) + { + gckFENCE_SYNC sync = &node->sync[i]; + + if (i == gcvENGINE_RENDER) + { + fence = command->fence; + } + else + { + fence = asyncCommand->fence; + } + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Kernel->virtualCommandBuffer) + { + gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR) fence->physical; + + fence->physHandle = commandBuffer->virtualBuffer.physical; + } + else +#endif + { + fence->physHandle = fence->physical; + } + + gcmkONERROR(gckOS_CacheInvalidate( + Kernel->os, + 0, + fence->physHandle, + 0, + fence->logical, + 8 + )); + + if (sync->commitStamp <= *(gctUINT64_PTR)fence->logical) + { + continue; + } + else + { + gckOS_Signal(Kernel->os, sync->signal, gcvFALSE); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, &fence->mutex, gcvINFINITE)); + + /* Add to waiting list. */ + gcsLIST_AddTail(&sync->head, &fence->waitingList); + + gcmkASSERT(sync->inList == gcvFALSE); + + sync->inList = gcvTRUE; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, &fence->mutex)); + + /* Wait. */ + status = gckOS_WaitSignal(Kernel->os, sync->signal, gcvTRUE, TimeOut); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, &fence->mutex, gcvINFINITE)); + + if (sync->inList) + { + gcsLIST_Del(&sync->head); + sync->inList = gcvFALSE; + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, &fence->mutex)); + } + } + + gckVIDMEM_NODE_Dereference(Kernel, node); + +OnError: + return status; +} + +#ifdef __linux__ + +typedef struct _gcsGRRAPHIC_BUFFER_PARCLE +{ + gcsFDPRIVATE base; + gckKERNEL kernel; + + gckVIDMEM_NODE node[3]; + gctSHBUF shBuf; + gctINT32 signal; +} +gcsGRAPHIC_BUFFER_PARCLE; + +static void +_ReleaseGraphicBuffer( + gckKERNEL Kernel, + gcsGRAPHIC_BUFFER_PARCLE * Parcle + ) +{ + gctUINT i; + + for (i = 0; i < 3; i++) + { + if (Parcle->node[i]) + { + gckVIDMEM_NODE_Dereference(Kernel, Parcle->node[i]); + } + } + + if (Parcle->shBuf) + { + gckKERNEL_DestroyShBuffer(Kernel, Parcle->shBuf); + } + + if (Parcle->signal) + { + gckOS_DestroyUserSignal(Kernel->os, Parcle->signal); + } + + gcmkOS_SAFE_FREE(Kernel->os, Parcle); +} + +static gctINT +_FdReleaseGraphicBuffer( + gcsFDPRIVATE_PTR Private + ) +{ + gcsGRAPHIC_BUFFER_PARCLE * parcle = (gcsGRAPHIC_BUFFER_PARCLE *) Private; + + _ReleaseGraphicBuffer(parcle->kernel, parcle); + return 0; +} + +static gceSTATUS +_GetGraphicBufferFd( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Node[3], + IN gctUINT64 ShBuf, + IN gctUINT64 Signal, + OUT gctINT32 * Fd + ) +{ + gceSTATUS status; + gctUINT i; + gcsGRAPHIC_BUFFER_PARCLE * parcle = gcvNULL; + + gcmkONERROR(gckOS_Allocate( + Kernel->os, + gcmSIZEOF(gcsGRAPHIC_BUFFER_PARCLE), + (gctPOINTER *)&parcle + )); + + gckOS_ZeroMemory(parcle, sizeof(gcsGRAPHIC_BUFFER_PARCLE)); + + parcle->base.release = _FdReleaseGraphicBuffer; + parcle->kernel = Kernel; + + for (i = 0; i < 3; i++) + { + if (Node[i] != 0) + { + gcmkONERROR( + gckVIDMEM_HANDLE_LookupAndReference(Kernel, Node[i], &parcle->node[i])); + } + } + + if (ShBuf) + { + gctSHBUF shBuf = gcmUINT64_TO_PTR(ShBuf); + + gcmkONERROR(gckKERNEL_MapShBuffer(Kernel, shBuf)); + parcle->shBuf = shBuf; + } + + if (Signal) + { + gctSIGNAL signal = gcmUINT64_TO_PTR(Signal); + + gcmkONERROR( + gckOS_MapSignal(Kernel->os, + signal, + (gctHANDLE)(gctUINTPTR_T)ProcessID, + &signal)); + + parcle->signal= (gctINT32)Signal; + } + + gcmkONERROR(gckOS_GetFd("viv-gr", &parcle->base, Fd)); + + return gcvSTATUS_OK; + +OnError: + if (parcle) + { + _ReleaseGraphicBuffer(Kernel, parcle); + } + return status; +} +#endif + +/******************************************************************************* +** +** gckKERNEL_Dispatch +** +** Dispatch a command received from the user HAL layer. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctBOOL FromUser +** whether the call is from the user space. +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that defines the command to +** be dispatched. +** +** OUTPUT: +** +** gcsHAL_INTERFACE * Interface +** Pointer to a gcsHAL_INTERFACE structure that receives any data to be +** returned. +*/ +gceSTATUS +gckKERNEL_Dispatch( + IN gckKERNEL Kernel, + IN gckDEVICE Device, + IN gctBOOL FromUser, + IN OUT gcsHAL_INTERFACE * Interface + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gctPHYS_ADDR physical = gcvNULL; + gctSIZE_T bytes; + gctPOINTER logical = gcvNULL; +#if (gcdENABLE_3D) + gckCONTEXT context = gcvNULL; +#endif + gckKERNEL kernel = Kernel; + gctUINT32 processID; +#if gcdSECURE_USER + gcskSECURE_CACHE_PTR cache; + gctPOINTER logical; +#endif +#if !USE_NEW_LINUX_SIGNAL + gctSIGNAL signal; +#endif + gckVIRTUAL_COMMAND_BUFFER_PTR buffer; + + gctBOOL powerMutexAcquired = gcvFALSE; + gctBOOL commitMutexAcquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x", + Kernel, FromUser, Interface); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Interface != gcvNULL); + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, + "Dispatching command %d (%s)", + Interface->command, _DispatchText[Interface->command]); +#endif +#if QNX_SINGLE_THREADED_DEBUGGING + gckOS_AcquireMutex(Kernel->os, Kernel->debugMutex, gcvINFINITE); +#endif + + /* Get the current process ID. */ + gcmkONERROR(gckOS_GetProcessID(&processID)); + +#if gcdSECURE_USER + gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache)); +#endif + + /* Dispatch on command. */ + switch (Interface->command) + { + case gcvHAL_GET_BASE_ADDRESS: + /* Get base address. */ + Interface->u.GetBaseAddress.baseAddress = Kernel->hardware->baseAddress; + Interface->u.GetBaseAddress.flatMappingRangeCount = Kernel->mmu->flatMappingRangeCount; + if (Kernel->mmu->flatMappingRangeCount) + { + gckOS_MemCopy(Interface->u.GetBaseAddress.flatMappingRanges, Kernel->mmu->flatMappingRanges, + gcmSIZEOF(gcsFLAT_MAPPING_RANGE) * Kernel->mmu->flatMappingRangeCount); + } + break; + + case gcvHAL_QUERY_VIDEO_MEMORY: + /* Query video memory size. */ + gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface)); + break; + + case gcvHAL_QUERY_CHIP_IDENTITY: + /* Query chip identity. */ + gcmkONERROR( + gckHARDWARE_QueryChipIdentity( + Kernel->hardware, + &Interface->u.QueryChipIdentity)); + break; + + case gcvHAL_QUERY_CHIP_FREQUENCY: + /* Query chip clock. */ + Interface->u.QueryChipFrequency.mcClk = Kernel->hardware->mcClk; + Interface->u.QueryChipFrequency.shClk = Kernel->hardware->shClk; + break; + + case gcvHAL_MAP_MEMORY: + physical = gcmINT2PTR(Interface->u.MapMemory.physical); + + /* Map memory. */ + gcmkONERROR( + gckKERNEL_MapMemory(Kernel, + physical, + (gctSIZE_T) Interface->u.MapMemory.bytes, + &logical)); + + Interface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical); + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, gcvDB_MAP_MEMORY, + logical, + physical, + (gctSIZE_T) Interface->u.MapMemory.bytes)); + break; + + case gcvHAL_UNMAP_MEMORY: + physical = gcmINT2PTR(Interface->u.UnmapMemory.physical); + + gcmkVERIFY_OK( + gckKERNEL_RemoveProcessDB(Kernel, + processID, gcvDB_MAP_MEMORY, + gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical))); + + /* Unmap memory. */ + gcmkONERROR( + gckKERNEL_UnmapMemory(Kernel, + physical, + (gctSIZE_T) Interface->u.UnmapMemory.bytes, + gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical), + processID)); + break; + + case gcvHAL_ALLOCATE_NON_PAGED_MEMORY: + bytes = (gctSIZE_T) Interface->u.AllocateNonPagedMemory.bytes; + + /* Allocate non-paged memory. */ + gcmkONERROR( + gckOS_AllocateNonPagedMemory( + Kernel->os, + FromUser, + gcvALLOC_FLAG_CONTIGUOUS, + &bytes, + &physical, + &logical)); + + Interface->u.AllocateNonPagedMemory.bytes = bytes; + Interface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical); + Interface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical); + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, gcvDB_NON_PAGED, + logical, + gcmINT2PTR(Interface->u.AllocateNonPagedMemory.physical), + bytes)); + break; + + case gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER: + bytes = (gctSIZE_T) Interface->u.AllocateVirtualCommandBuffer.bytes; + + gcmkONERROR( + gckKERNEL_AllocateVirtualCommandBuffer( + Kernel, + FromUser, + &bytes, + &physical, + &logical)); + + Interface->u.AllocateVirtualCommandBuffer.bytes = bytes; + Interface->u.AllocateVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(logical); + Interface->u.AllocateVirtualCommandBuffer.physical = gcmPTR_TO_NAME(physical); + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, gcvDB_COMMAND_BUFFER, + logical, + gcmINT2PTR(Interface->u.AllocateVirtualCommandBuffer.physical), + bytes)); + break; + + case gcvHAL_FREE_NON_PAGED_MEMORY: + physical = gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical); + + gcmkVERIFY_OK( + gckKERNEL_RemoveProcessDB(Kernel, + processID, gcvDB_NON_PAGED, + gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical))); + + /* Unmap user logical out of physical memory first. */ + gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os, + physical, + (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes, + gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical))); + + /* Free non-paged memory. */ + gcmkONERROR( + gckOS_FreeNonPagedMemory(Kernel->os, + (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes, + physical, + gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical))); + +#if gcdSECURE_USER + gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache( + Kernel, + cache, + gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical), + (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes)); +#endif + + gcmRELEASE_NAME(Interface->u.FreeNonPagedMemory.physical); + break; + + case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY: + bytes = (gctSIZE_T) Interface->u.AllocateContiguousMemory.bytes; + + /* Allocate contiguous memory. */ + gcmkONERROR(gckOS_AllocateContiguous( + Kernel->os, + FromUser, + &bytes, + &physical, + &logical)); + + Interface->u.AllocateContiguousMemory.bytes = bytes; + Interface->u.AllocateContiguousMemory.logical = gcmPTR_TO_UINT64(logical); + Interface->u.AllocateContiguousMemory.physical = gcmPTR_TO_NAME(physical); + + gcmkONERROR(gckHARDWARE_ConvertLogical( + Kernel->hardware, + logical, + gcvTRUE, + &Interface->u.AllocateContiguousMemory.address)); + + gcmkVERIFY_OK(gckKERNEL_AddProcessDB( + Kernel, + processID, gcvDB_CONTIGUOUS, + logical, + gcmINT2PTR(Interface->u.AllocateContiguousMemory.physical), + bytes)); + break; + + case gcvHAL_FREE_CONTIGUOUS_MEMORY: + physical = gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical); + + gcmkVERIFY_OK( + gckKERNEL_RemoveProcessDB(Kernel, + processID, gcvDB_CONTIGUOUS, + gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical))); + + /* Unmap user logical out of physical memory first. */ + gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os, + physical, + (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes, + gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical))); + + /* Free contiguous memory. */ + gcmkONERROR( + gckOS_FreeContiguous(Kernel->os, + physical, + gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical), + (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes)); + +#if gcdSECURE_USER + gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache( + Kernel, + cache, + gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical), + (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes)); +#endif + + gcmRELEASE_NAME(Interface->u.FreeContiguousMemory.physical); + break; + + case gcvHAL_ALLOCATE_VIDEO_MEMORY: + + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + + break; + + case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY: + /* Allocate memory. */ + gcmkONERROR( + gckKERNEL_AllocateLinearMemory(Kernel, processID, + &Interface->u.AllocateLinearVideoMemory.pool, + Interface->u.AllocateLinearVideoMemory.bytes, + Interface->u.AllocateLinearVideoMemory.alignment, + Interface->u.AllocateLinearVideoMemory.type, + Interface->u.AllocateLinearVideoMemory.flag, + &Interface->u.AllocateLinearVideoMemory.node)); + break; + + case gcvHAL_RELEASE_VIDEO_MEMORY: + /* Release video memory. */ + gcmkONERROR(gckKERNEL_ReleaseVideoMemory( + Kernel, processID, + (gctUINT32)Interface->u.ReleaseVideoMemory.node + )); + break; + + case gcvHAL_LOCK_VIDEO_MEMORY: + /* Lock video memory. */ + gcmkONERROR(gckKERNEL_LockVideoMemory(Kernel, Kernel->core, processID, FromUser, Interface)); + break; + + case gcvHAL_UNLOCK_VIDEO_MEMORY: + /* Unlock video memory. */ + gcmkONERROR(gckKERNEL_UnlockVideoMemory(Kernel, processID, Interface)); + break; + + case gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY: + gcmkERR_BREAK(gckKERNEL_BottomHalfUnlockVideoMemory(Kernel, processID, + Interface->u.BottomHalfUnlockVideoMemory.node)); + break; + + case gcvHAL_EVENT_COMMIT: + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, + Kernel->device->commitMutex, + gcvINFINITE + )); + + commitMutexAcquired = gcvTRUE; + /* Commit an event queue. */ + if (Interface->engine == gcvENGINE_BLT) + { + if (!gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_ASYNC_BLIT)) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + gcmkONERROR(gckEVENT_Commit( + Kernel->asyncEvent, gcmUINT64_TO_PTR(Interface->u.Event.queue), gcvFALSE)); + } + else + { + gcmkONERROR(gckEVENT_Commit( + Kernel->eventObj, gcmUINT64_TO_PTR(Interface->u.Event.queue), gcvFALSE)); + } + + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex)); + commitMutexAcquired = gcvFALSE; + break; + + case gcvHAL_COMMIT: + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, + Kernel->device->commitMutex, + gcvINFINITE + )); + commitMutexAcquired = gcvTRUE; + + /* Commit a command and context buffer. */ + if (Interface->engine == gcvENGINE_BLT) + { + gctUINT64 *commandBuffers = gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer); + + if (!gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_ASYNC_BLIT)) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + gcmkONERROR(gckASYNC_COMMAND_Commit( + Kernel->asyncCommand, + gcmUINT64_TO_PTR(commandBuffers[0]), + gcmUINT64_TO_PTR(Interface->u.Commit.queue) + )); + + gcmkONERROR(gckEVENT_Commit( + Kernel->asyncEvent, + gcmUINT64_TO_PTR(Interface->u.Commit.queue), + gcvFALSE + )); + } + else + { + gctUINT32 i; + + status = gckCOMMAND_Commit(Kernel->command, + Interface->u.Commit.contexts[0] ? + gcmNAME_TO_PTR(Interface->u.Commit.contexts[0]) : gcvNULL, + gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffers[0]), + gcmUINT64_TO_PTR(Interface->u.Commit.deltas[0]), + processID, + Interface->u.Commit.shared, + Interface->u.Commit.index, + &Interface->u.Commit.commitStamp, + &Interface->u.Commit.contextSwitched + ); + + if (status != gcvSTATUS_INTERRUPTED) + { + gcmkONERROR(status); + } + + /* Force an event if powerManagement is on. */ + status = gckEVENT_Commit(Kernel->eventObj, + gcmUINT64_TO_PTR(Interface->u.Commit.queue), + Kernel->hardware->options.powerManagement); + + if (status != gcvSTATUS_INTERRUPTED) + { + gcmkONERROR(status); + } + + if (Interface->u.Commit.count > 1 && Interface->engine == gcvENGINE_RENDER) + { + for (i = 1; i < Interface->u.Commit.count; i++) + { + gceHARDWARE_TYPE type = Interface->hardwareType; + gckKERNEL kernel = Device->map[type].kernels[i]; + + status = gckCOMMAND_Commit(kernel->command, + Interface->u.Commit.contexts[i] ? + gcmNAME_TO_PTR(Interface->u.Commit.contexts[i]) : gcvNULL, + Interface->u.Commit.commandBuffers[i] ? + gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffers[i]) : gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffers[0]), + gcmUINT64_TO_PTR(Interface->u.Commit.deltas[i]), + processID, + Interface->u.Commit.shared, + Interface->u.Commit.commandBuffers[i] ? + Interface->u.Commit.index : i, + &Interface->u.Commit.commitStamp, + &Interface->u.Commit.contextSwitched + ); + + if (status != gcvSTATUS_INTERRUPTED) + { + gcmkONERROR(status); + } + + /* Force an event if powerManagement is on. */ + status = gckEVENT_Commit(kernel->eventObj, + gcvNULL, + kernel->hardware->options.powerManagement); + + if (status != gcvSTATUS_INTERRUPTED) + { + gcmkONERROR(status); + } + } + } + + for (i = 0; i < Interface->u.Commit.count; i++) + { + gceHARDWARE_TYPE type = Interface->hardwareType; + gckKERNEL kernel = Device->map[type].kernels[i]; + + if ((kernel->hardware->options.gpuProfiler == gcvTRUE) && + (kernel->profileEnable == gcvTRUE)) + { + gcmkONERROR(gckCOMMAND_Stall(kernel->command, gcvTRUE)); + + if (kernel->command->currContext) + { + gcmkONERROR(gckHARDWARE_UpdateContextProfile( + kernel->hardware, + kernel->command->currContext)); + } + } + } + } + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex)); + commitMutexAcquired = gcvFALSE; + + break; + + case gcvHAL_STALL: + /* Stall the command queue. */ + gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE)); + + break; + + case gcvHAL_MAP_USER_MEMORY: + + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + + break; + + case gcvHAL_UNMAP_USER_MEMORY: + + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + + break; + +#if !USE_NEW_LINUX_SIGNAL + case gcvHAL_USER_SIGNAL: + /* Dispatch depends on the user signal subcommands. */ + switch(Interface->u.UserSignal.command) + { + case gcvUSER_SIGNAL_CREATE: + /* Create a signal used in the user space. */ + gcmkONERROR( + gckOS_CreateUserSignal(Kernel->os, + Interface->u.UserSignal.manualReset, + &Interface->u.UserSignal.id)); + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, gcvDB_SIGNAL, + gcmINT2PTR(Interface->u.UserSignal.id), + gcvNULL, + 0)); + break; + + case gcvUSER_SIGNAL_DESTROY: + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Kernel, + processID, gcvDB_SIGNAL, + gcmINT2PTR(Interface->u.UserSignal.id))); + + /* Destroy the signal. */ + gcmkONERROR( + gckOS_DestroyUserSignal(Kernel->os, + Interface->u.UserSignal.id)); + break; + + case gcvUSER_SIGNAL_SIGNAL: + /* Signal the signal. */ + gcmkONERROR( + gckOS_SignalUserSignal(Kernel->os, + Interface->u.UserSignal.id, + Interface->u.UserSignal.state)); + break; + + case gcvUSER_SIGNAL_WAIT: + /* Wait on the signal. */ + status = gckOS_WaitUserSignal(Kernel->os, + Interface->u.UserSignal.id, + Interface->u.UserSignal.wait); + break; + + case gcvUSER_SIGNAL_MAP: + gcmkONERROR( + gckOS_MapSignal(Kernel->os, + (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id, + (gctHANDLE)(gctUINTPTR_T)processID, + &signal)); + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, gcvDB_SIGNAL, + gcmINT2PTR(Interface->u.UserSignal.id), + gcvNULL, + 0)); + break; + + case gcvUSER_SIGNAL_UNMAP: + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Kernel, + processID, gcvDB_SIGNAL, + gcmINT2PTR(Interface->u.UserSignal.id))); + + /* Destroy the signal. */ + gcmkONERROR( + gckOS_DestroyUserSignal(Kernel->os, + Interface->u.UserSignal.id)); + break; + + default: + /* Invalid user signal command. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + break; +#endif + + case gcvHAL_SET_POWER_MANAGEMENT_STATE: + /* Set the power management state. */ + gcmkONERROR( + gckHARDWARE_SetPowerManagementState( + Kernel->hardware, + Interface->u.SetPowerManagement.state)); + break; + + case gcvHAL_QUERY_POWER_MANAGEMENT_STATE: + /* Chip is not idle. */ + Interface->u.QueryPowerManagement.isIdle = gcvFALSE; + + /* Query the power management state. */ + gcmkONERROR(gckHARDWARE_QueryPowerManagementState( + Kernel->hardware, + &Interface->u.QueryPowerManagement.state)); + + /* Query the idle state. */ + gcmkONERROR( + gckHARDWARE_QueryIdle(Kernel->hardware, + &Interface->u.QueryPowerManagement.isIdle)); + break; + + case gcvHAL_READ_REGISTER: +#if gcdREGISTER_ACCESS_FROM_USER + { + gceCHIPPOWERSTATE power; + + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE)); + powerMutexAcquired = gcvTRUE; + gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware, + &power)); + if (power == gcvPOWER_ON) + { + /* Read a register. */ + gcmkONERROR(gckOS_ReadRegisterEx( + Kernel->os, + Kernel->core, + Interface->u.ReadRegisterData.address, + &Interface->u.ReadRegisterData.data)); + } + else + { + /* Chip is in power-state. */ + Interface->u.ReadRegisterData.data = 0; + status = gcvSTATUS_CHIP_NOT_READY; + } + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex)); + powerMutexAcquired = gcvFALSE; + } +#else + /* No access from user land to read registers. */ + Interface->u.ReadRegisterData.data = 0; + status = gcvSTATUS_NOT_SUPPORTED; +#endif + break; + + case gcvHAL_WRITE_REGISTER: +#if gcdREGISTER_ACCESS_FROM_USER + { + gceCHIPPOWERSTATE power; + + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE)); + powerMutexAcquired = gcvTRUE; + gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware, + &power)); + if (power == gcvPOWER_ON) + { + /* Write a register. */ + gcmkONERROR( + gckOS_WriteRegisterEx(Kernel->os, + Kernel->core, + Interface->u.WriteRegisterData.address, + Interface->u.WriteRegisterData.data)); + } + else + { + /* Chip is in power-state. */ + Interface->u.WriteRegisterData.data = 0; + status = gcvSTATUS_CHIP_NOT_READY; + } + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex)); + powerMutexAcquired = gcvFALSE; + } +#else + /* No access from user land to write registers. */ + status = gcvSTATUS_NOT_SUPPORTED; +#endif + break; + + case gcvHAL_READ_ALL_PROFILE_REGISTERS_PART1: + /* Read profile data according to the context. */ + gcmkONERROR( + gckHARDWARE_QueryContextProfile( + Kernel->hardware, + Kernel->profileCleanRegister, + gcmNAME_TO_PTR(Interface->u.RegisterProfileData_part1.context), + &Interface->u.RegisterProfileData_part1.Counters, + gcvNULL)); + break; + case gcvHAL_READ_ALL_PROFILE_REGISTERS_PART2: + /* Read profile data according to the context. */ + gcmkONERROR( + gckHARDWARE_QueryContextProfile( + Kernel->hardware, + Kernel->profileCleanRegister, + gcmNAME_TO_PTR(Interface->u.RegisterProfileData_part2.context), + gcvNULL, + &Interface->u.RegisterProfileData_part2.Counters)); + break; + + case gcvHAL_GET_PROFILE_SETTING: +#if VIVANTE_PROFILER + /* Get profile setting */ + Interface->u.GetProfileSetting.enable = Kernel->profileEnable; +#endif + + status = gcvSTATUS_OK; + break; + + case gcvHAL_SET_PROFILE_SETTING: +#if VIVANTE_PROFILER + /* Set profile setting */ + if(Kernel->hardware->options.gpuProfiler) + { + Kernel->profileEnable = Interface->u.SetProfileSetting.enable; + + if (Kernel->profileEnable) + { + gcmkONERROR(gckHARDWARE_InitProfiler(Kernel->hardware)); + } + + } + else + { + status = gcvSTATUS_NOT_SUPPORTED; + break; + } +#endif + + status = gcvSTATUS_OK; + break; + + case gcvHAL_READ_PROFILER_REGISTER_SETTING: + Kernel->profileCleanRegister = Interface->u.SetProfilerRegisterClear.bclear; + status = gcvSTATUS_OK; + break; + + case gcvHAL_QUERY_KERNEL_SETTINGS: + /* Get kernel settings. */ + gcmkONERROR( + gckKERNEL_QuerySettings(Kernel, + &Interface->u.QueryKernelSettings.settings)); + break; + + case gcvHAL_RESET: + /* Reset the hardware. */ + gcmkONERROR( + gckHARDWARE_Reset(Kernel->hardware)); + break; + + case gcvHAL_DEBUG: + /* Set debug level and zones. */ + if (Interface->u.Debug.set) + { + gckOS_SetDebugLevel(Interface->u.Debug.level); + gckOS_SetDebugZones(Interface->u.Debug.zones, + Interface->u.Debug.enable); + } + + if (Interface->u.Debug.message[0] != '\0') + { + /* Print a message to the debugger. */ + if (Interface->u.Debug.type == gcvMESSAGE_TEXT) + { + gckOS_DumpBuffer(Kernel->os, + Interface->u.Debug.message, + gcmSIZEOF(Interface->u.Debug.message), + gcvDUMP_BUFFER_FROM_USER, + gcvTRUE); + } + else + { + gckOS_DumpBuffer(Kernel->os, + Interface->u.Debug.message, + Interface->u.Debug.messageSize, + gcvDUMP_BUFFER_FROM_USER, + gcvTRUE); + } + } + status = gcvSTATUS_OK; + break; + + case gcvHAL_DUMP_GPU_STATE: + { + gceCHIPPOWERSTATE power; + + _DumpDriverConfigure(Kernel); + + gcmkONERROR(gckHARDWARE_QueryPowerManagementState( + Kernel->hardware, + &power + )); + + if (power == gcvPOWER_ON) + { + Interface->u.ReadRegisterData.data = 1; + + _DumpState(Kernel); + } + else + { + Interface->u.ReadRegisterData.data = 0; + status = gcvSTATUS_CHIP_NOT_READY; + + gcmkPRINT("[galcore]: Can't dump state if GPU isn't POWER ON."); + } + } + break; + + case gcvHAL_DUMP_EVENT: + break; + + case gcvHAL_CACHE: + logical = gcmUINT64_TO_PTR(Interface->u.Cache.logical); + bytes = (gctSIZE_T) Interface->u.Cache.bytes; + + gcmkONERROR(gckKERNEL_CacheOperation(Kernel, + processID, + Interface->u.Cache.node, + Interface->u.Cache.operation, + logical, + bytes)); + break; + + case gcvHAL_TIMESTAMP: + /* Check for invalid timer. */ + if ((Interface->u.TimeStamp.timer >= gcmCOUNTOF(Kernel->timers)) + || (Interface->u.TimeStamp.request != 2)) + { + Interface->u.TimeStamp.timeDelta = 0; + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* Return timer results and reset timer. */ + { + gcsTIMER_PTR timer = &(Kernel->timers[Interface->u.TimeStamp.timer]); + gctUINT64 timeDelta = 0; + + if (timer->stopTime < timer->startTime ) + { + Interface->u.TimeStamp.timeDelta = 0; + gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW); + } + + timeDelta = timer->stopTime - timer->startTime; + + /* Check truncation overflow. */ + Interface->u.TimeStamp.timeDelta = (gctINT32) timeDelta; + /*bit0~bit30 is available*/ + if (timeDelta>>31) + { + Interface->u.TimeStamp.timeDelta = 0; + gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW); + } + + status = gcvSTATUS_OK; + } + break; + + case gcvHAL_DATABASE: + gcmkONERROR(gckKERNEL_QueryDatabase(Kernel, processID, Interface)); + break; + + case gcvHAL_VERSION: + Interface->u.Version.major = gcvVERSION_MAJOR; + Interface->u.Version.minor = gcvVERSION_MINOR; + Interface->u.Version.patch = gcvVERSION_PATCH; + Interface->u.Version.build = gcvVERSION_BUILD; +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, + "KERNEL version %d.%d.%d build %u", + gcvVERSION_MAJOR, gcvVERSION_MINOR, + gcvVERSION_PATCH, gcvVERSION_BUILD); +#endif + break; + + case gcvHAL_CHIP_INFO: + /* Only if not support multi-core */ + Interface->u.ChipInfo.count = 1; + Interface->u.ChipInfo.types[0] = Kernel->hardware->type; + break; + +#if (gcdENABLE_3D) + case gcvHAL_ATTACH: + /* Attach user process. */ + gcmkONERROR( + gckCOMMAND_Attach(Kernel->command, + &context, + &bytes, + &Interface->u.Attach.numStates, + processID)); + + Interface->u.Attach.maxState = bytes; + Interface->u.Attach.context = gcmPTR_TO_NAME(context); + + if (Interface->u.Attach.map == gcvTRUE) + { + gcmkVERIFY_OK( + gckCONTEXT_MapBuffer(context, + Interface->u.Attach.physicals, + Interface->u.Attach.logicals, + &Interface->u.Attach.bytes)); + } + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, gcvDB_CONTEXT, + gcmINT2PTR(Interface->u.Attach.context), + gcvNULL, + 0)); + break; +#endif + + case gcvHAL_DETACH: + gcmkVERIFY_OK( + gckKERNEL_RemoveProcessDB(Kernel, + processID, gcvDB_CONTEXT, + gcmINT2PTR(Interface->u.Detach.context))); + + /* Detach user process. */ + gcmkONERROR( + gckCOMMAND_Detach(Kernel->command, + gcmNAME_TO_PTR(Interface->u.Detach.context))); + + gcmRELEASE_NAME(Interface->u.Detach.context); + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, + Kernel->device->commitMutex, + gcvINFINITE + )); + + commitMutexAcquired = gcvTRUE; + + gcmkONERROR(gckEVENT_Submit( + Kernel->eventObj, + gcvTRUE, + gcvFALSE + )); + + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, + Kernel->device->commitMutex + )); + + commitMutexAcquired = gcvFALSE; + break; + + case gcvHAL_GET_FRAME_INFO: + gcmkONERROR(gckHARDWARE_GetFrameInfo( + Kernel->hardware, + gcmUINT64_TO_PTR(Interface->u.GetFrameInfo.frameInfo))); + break; + + case gcvHAL_DUMP_GPU_PROFILE: + gcmkONERROR(gckHARDWARE_DumpGpuProfile(Kernel->hardware)); + break; + + case gcvHAL_SET_FSCALE_VALUE: +#if gcdENABLE_FSCALE_VAL_ADJUST + status = gckHARDWARE_SetFscaleValue(Kernel->hardware, + Interface->u.SetFscaleValue.value); +#else + status = gcvSTATUS_NOT_SUPPORTED; +#endif + break; + case gcvHAL_GET_FSCALE_VALUE: +#if gcdENABLE_FSCALE_VAL_ADJUST + status = gckHARDWARE_GetFscaleValue(Kernel->hardware, + &Interface->u.GetFscaleValue.value, + &Interface->u.GetFscaleValue.minValue, + &Interface->u.GetFscaleValue.maxValue); +#else + status = gcvSTATUS_NOT_SUPPORTED; +#endif + break; + + case gcvHAL_EXPORT_VIDEO_MEMORY: + /* Unlock video memory. */ + gcmkONERROR(gckVIDMEM_NODE_Export(Kernel, + Interface->u.ExportVideoMemory.node, + Interface->u.ExportVideoMemory.flags, + gcvNULL, + &Interface->u.ExportVideoMemory.fd)); + break; + + case gcvHAL_NAME_VIDEO_MEMORY: + gcmkONERROR(gckVIDMEM_NODE_Name(Kernel, + Interface->u.NameVideoMemory.handle, + &Interface->u.NameVideoMemory.name)); + break; + + case gcvHAL_IMPORT_VIDEO_MEMORY: + gcmkONERROR(gckVIDMEM_NODE_Import(Kernel, + Interface->u.ImportVideoMemory.name, + &Interface->u.ImportVideoMemory.handle)); + + gcmkONERROR( + gckKERNEL_AddProcessDB(Kernel, + processID, gcvDB_VIDEO_MEMORY, + gcmINT2PTR(Interface->u.ImportVideoMemory.handle), + gcvNULL, + 0)); + break; + + case gcvHAL_SET_VIDEO_MEMORY_METADATA: + gcmkONERROR(gckKERNEL_SetVidMemMetadata(Kernel, processID, Interface)); + break; + + case gcvHAL_GET_VIDEO_MEMORY_FD: + gcmkONERROR(gckVIDMEM_NODE_GetFd( + Kernel, + Interface->u.GetVideoMemoryFd.handle, + &Interface->u.GetVideoMemoryFd.fd + )); + + /* No need to add it to processDB because OS will release all fds when + ** process quits. + */ + break; + + case gcvHAL_QUERY_RESET_TIME_STAMP: + Interface->u.QueryResetTimeStamp.timeStamp = Kernel->resetTimeStamp; + Interface->u.QueryResetTimeStamp.contextID = Kernel->hardware->contextID; + break; + + case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER: + buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)gcmNAME_TO_PTR(Interface->u.FreeVirtualCommandBuffer.physical); + + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Kernel, + processID, + gcvDB_COMMAND_BUFFER, + gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical))); + + gcmkONERROR(gckOS_DestroyUserVirtualMapping( + Kernel->os, + buffer->virtualBuffer.physical, + (gctSIZE_T)Interface->u.FreeVirtualCommandBuffer.bytes, + gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical))); + + gcmkONERROR(gckKERNEL_DestroyVirtualCommandBuffer( + Kernel, + (gctSIZE_T)Interface->u.FreeVirtualCommandBuffer.bytes, + (gctPHYS_ADDR)buffer, + gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical))); + + gcmRELEASE_NAME(Interface->u.FreeVirtualCommandBuffer.physical); + break; + +#if gcdLINUX_SYNC_FILE + case gcvHAL_CREATE_NATIVE_FENCE: + { + gctINT fenceFD; + gctSIGNAL signal = + gcmUINT64_TO_PTR(Interface->u.CreateNativeFence.signal); + + gcmkONERROR( + gckOS_CreateNativeFence(Kernel->os, + Kernel->timeline, + signal, + &fenceFD)); + + Interface->u.CreateNativeFence.fenceFD = fenceFD; + } + break; + + case gcvHAL_WAIT_NATIVE_FENCE: + { + gctINT fenceFD; + gctUINT32 timeout; + + fenceFD = Interface->u.WaitNativeFence.fenceFD; + timeout = Interface->u.WaitNativeFence.timeout; + + gcmkONERROR( + gckOS_WaitNativeFence(Kernel->os, + Kernel->timeline, + fenceFD, + timeout)); + } + break; +#endif + + case gcvHAL_SHBUF: + { + gctSHBUF shBuf; + gctPOINTER uData; + gctUINT32 bytes; + + switch (Interface->u.ShBuf.command) + { + case gcvSHBUF_CREATE: + bytes = Interface->u.ShBuf.bytes; + + /* Create. */ + gcmkONERROR(gckKERNEL_CreateShBuffer(Kernel, bytes, &shBuf)); + + Interface->u.ShBuf.id = gcmPTR_TO_UINT64(shBuf); + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, + gcvDB_SHBUF, + shBuf, + gcvNULL, + 0)); + break; + + case gcvSHBUF_DESTROY: + shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id); + + /* Check db first to avoid illegal destroy in the process. */ + gcmkONERROR( + gckKERNEL_RemoveProcessDB(Kernel, + processID, + gcvDB_SHBUF, + shBuf)); + + gcmkONERROR(gckKERNEL_DestroyShBuffer(Kernel, shBuf)); + break; + + case gcvSHBUF_MAP: + shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id); + + /* Map for current process access. */ + gcmkONERROR(gckKERNEL_MapShBuffer(Kernel, shBuf)); + + gcmkVERIFY_OK( + gckKERNEL_AddProcessDB(Kernel, + processID, + gcvDB_SHBUF, + shBuf, + gcvNULL, + 0)); + break; + + case gcvSHBUF_WRITE: + shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id); + uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data); + bytes = Interface->u.ShBuf.bytes; + + /* Write. */ + gcmkONERROR( + gckKERNEL_WriteShBuffer(Kernel, shBuf, uData, bytes)); + break; + + case gcvSHBUF_READ: + shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id); + uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data); + bytes = Interface->u.ShBuf.bytes; + + /* Read. */ + gcmkONERROR( + gckKERNEL_ReadShBuffer(Kernel, + shBuf, + uData, + bytes, + &bytes)); + + /* Return copied size. */ + Interface->u.ShBuf.bytes = bytes; + break; + + default: + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + break; + } + } + break; + +#ifdef __linux__ + case gcvHAL_GET_GRAPHIC_BUFFER_FD: + gcmkONERROR(_GetGraphicBufferFd( + Kernel, + processID, + Interface->u.GetGraphicBufferFd.node, + Interface->u.GetGraphicBufferFd.shBuf, + Interface->u.GetGraphicBufferFd.signal, + &Interface->u.GetGraphicBufferFd.fd + )); + break; +#endif + + + case gcvHAL_CONFIG_POWER_MANAGEMENT: + gcmkONERROR(gckKERNEL_ConfigPowerManagement(Kernel, Interface)); + break; + + case gcvHAL_WRAP_USER_MEMORY: + gcmkONERROR(gckVIDMEM_NODE_WrapUserMemory(Kernel, + &Interface->u.WrapUserMemory.desc, + &Interface->u.WrapUserMemory.node, + &Interface->u.WrapUserMemory.bytes)); + + gcmkONERROR( + gckKERNEL_AddProcessDB(Kernel, + processID, + gcvDB_VIDEO_MEMORY, + gcmINT2PTR(Interface->u.WrapUserMemory.node), + gcvNULL, + 0)); + break; + + case gcvHAL_WAIT_FENCE: + gcmkONERROR(gckKERNEL_WaitFence( + Kernel, + Interface->u.WaitFence.handle, + Interface->u.WaitFence.timeOut + )); + break; + +#if gcdDEC_ENABLE_AHB + case gcvHAL_DEC300_READ: + gcmkONERROR(viv_dec300_read( + Interface->u.DEC300Read.enable, + Interface->u.DEC300Read.readId, + Interface->u.DEC300Read.format, + Interface->u.DEC300Read.strides, + Interface->u.DEC300Read.is3D, + Interface->u.DEC300Read.isMSAA, + Interface->u.DEC300Read.clearValue, + Interface->u.DEC300Read.isTPC, + Interface->u.DEC300Read.isTPCCompressed, + Interface->u.DEC300Read.surfAddrs, + Interface->u.DEC300Read.tileAddrs + )); + break; + + case gcvHAL_DEC300_WRITE: + gcmkONERROR(viv_dec300_write( + Interface->u.DEC300Write.enable, + Interface->u.DEC300Write.readId, + Interface->u.DEC300Write.writeId, + Interface->u.DEC300Write.format, + Interface->u.DEC300Write.surfAddr, + Interface->u.DEC300Write.tileAddr + )); + break; + + case gcvHAL_DEC300_FLUSH: + gcmkONERROR(viv_dec300_flush(0)); + break; + + case gcvHAL_DEC300_FLUSH_WAIT: + gcmkONERROR(viv_dec300_flush_done(&Interface->u.DEC300FlushWait.done)); + break; +#endif + + + case gcvHAL_QUERY_CHIP_OPTION: + /* Query chip options. */ + gcmkONERROR( + gckHARDWARE_QueryChipOptions( + Kernel->hardware, + &Interface->u.QueryChipOptions)); + break; + + default: + /* Invalid command. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + +OnError: + /* Save status. */ + Interface->status = status; + +#if QNX_SINGLE_THREADED_DEBUGGING + gckOS_ReleaseMutex(Kernel->os, Kernel->debugMutex); +#endif + + if (powerMutexAcquired == gcvTRUE) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex)); + } + + if (commitMutexAcquired == gcvTRUE) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_AttachProcess +** +** Attach or detach a process. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctBOOL Attach +** gcvTRUE if a new process gets attached or gcFALSE when a process +** gets detatched. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_AttachProcess( + IN gckKERNEL Kernel, + IN gctBOOL Attach + ) +{ + gceSTATUS status; + gctUINT32 processID; + + gcmkHEADER_ARG("Kernel=0x%x Attach=%d", Kernel, Attach); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Get current process ID. */ + gcmkONERROR(gckOS_GetProcessID(&processID)); + + gcmkONERROR(gckKERNEL_AttachProcessEx(Kernel, Attach, processID)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_AttachProcessEx +** +** Attach or detach a process with the given PID. Can be paired with gckKERNEL_AttachProcess +** provided the programmer is aware of the consequences. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctBOOL Attach +** gcvTRUE if a new process gets attached or gcFALSE when a process +** gets detatched. +** +** gctUINT32 PID +** PID of the process to attach or detach. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_AttachProcessEx( + IN gckKERNEL Kernel, + IN gctBOOL Attach, + IN gctUINT32 PID + ) +{ + gceSTATUS status; + gctINT32 old; + + gcmkHEADER_ARG("Kernel=0x%x Attach=%d PID=%d", Kernel, Attach, PID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + if (Attach) + { + /* Increment the number of clients attached. */ + gcmkONERROR( + gckOS_AtomIncrement(Kernel->os, Kernel->atomClients, &old)); + + if (old == 0) + { + { + gcmkONERROR(gckOS_Broadcast(Kernel->os, + Kernel->hardware, + gcvBROADCAST_FIRST_PROCESS)); + } + } + + if (Kernel->dbCreated) + { + /* Create the process database. */ + gcmkONERROR(gckKERNEL_CreateProcessDB(Kernel, PID)); + } + +#if gcdPROCESS_ADDRESS_SPACE + /* Map kernel command buffer in the process's own MMU. */ + gcmkONERROR(_MapCommandBuffer(Kernel)); +#endif + } + else + { + if (Kernel->dbCreated) + { + /* Clean up the process database. */ + gcmkONERROR(gckKERNEL_DestroyProcessDB(Kernel, PID)); + + /* Save the last know process ID. */ + Kernel->db->lastProcessID = PID; + } + + { + status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE); + + if (status == gcvSTATUS_INTERRUPTED && Kernel->eventObj->submitTimer) + { + gcmkONERROR(gckOS_StartTimer(Kernel->os, + Kernel->eventObj->submitTimer, + 1)); + } + else + { + gcmkONERROR(status); + } + } + + /* Decrement the number of clients attached. */ + gcmkONERROR( + gckOS_AtomDecrement(Kernel->os, Kernel->atomClients, &old)); + + if (old == 1) + { + { + /* Last client detached, switch to SUSPEND power state. */ + gcmkONERROR(gckOS_Broadcast(Kernel->os, + Kernel->hardware, + gcvBROADCAST_LAST_PROCESS)); + } + + /* Flush the debug cache. */ + gcmkDEBUGFLUSH(~0U); + } + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if gcdSECURE_USER +gceSTATUS +gckKERNEL_MapLogicalToPhysical( + IN gckKERNEL Kernel, + IN gcskSECURE_CACHE_PTR Cache, + IN OUT gctPOINTER * Data + ) +{ + gceSTATUS status; + static gctBOOL baseAddressValid = gcvFALSE; + static gctUINT32 baseAddress; + gctBOOL needBase; + gcskLOGICAL_CACHE_PTR slot; + + gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x *Data=0x%x", + Kernel, Cache, gcmOPT_POINTER(Data)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + if (!baseAddressValid) + { + /* Get base address. */ + gcmkONERROR(gckHARDWARE_GetBaseAddress(Kernel->hardware, &baseAddress)); + + baseAddressValid = gcvTRUE; + } + + /* Does this state load need a base address? */ + gcmkONERROR(gckHARDWARE_NeedBaseAddress(Kernel->hardware, + ((gctUINT32_PTR) Data)[-1], + &needBase)); + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU + { + gcskLOGICAL_CACHE_PTR next; + gctINT i; + + /* Walk all used cache slots. */ + for (i = 1, slot = Cache->cache[0].next, next = gcvNULL; + (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL); + ++i, slot = slot->next + ) + { + if (slot->logical == *Data) + { + /* Bail out. */ + next = slot; + break; + } + } + + /* See if we had a miss. */ + if (next == gcvNULL) + { + /* Use the tail of the cache. */ + slot = Cache->cache[0].prev; + + /* Initialize the cache line. */ + slot->logical = *Data; + + /* Map the logical address to a DMA address. */ + gcmkONERROR( + gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, slot->dma, &slot->dma)); + } + + /* Move slot to head of list. */ + if (slot != Cache->cache[0].next) + { + /* Unlink. */ + slot->prev->next = slot->next; + slot->next->prev = slot->prev; + + /* Move to head of chain. */ + slot->prev = &Cache->cache[0]; + slot->next = Cache->cache[0].next; + slot->prev->next = slot; + slot->next->prev = slot; + } + } +#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR + { + gctINT i; + gcskLOGICAL_CACHE_PTR next = gcvNULL; + gcskLOGICAL_CACHE_PTR oldestSlot = gcvNULL; + slot = gcvNULL; + + if (Cache->cacheIndex != gcvNULL) + { + /* Walk the cache forwards. */ + for (i = 1, slot = Cache->cacheIndex; + (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL); + ++i, slot = slot->next) + { + if (slot->logical == *Data) + { + /* Bail out. */ + next = slot; + break; + } + + /* Determine age of this slot. */ + if ((oldestSlot == gcvNULL) + || (oldestSlot->stamp > slot->stamp) + ) + { + oldestSlot = slot; + } + } + + if (next == gcvNULL) + { + /* Walk the cache backwards. */ + for (slot = Cache->cacheIndex->prev; + (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL); + ++i, slot = slot->prev) + { + if (slot->logical == *Data) + { + /* Bail out. */ + next = slot; + break; + } + + /* Determine age of this slot. */ + if ((oldestSlot == gcvNULL) + || (oldestSlot->stamp > slot->stamp) + ) + { + oldestSlot = slot; + } + } + } + } + + /* See if we had a miss. */ + if (next == gcvNULL) + { + if (Cache->cacheFree != 0) + { + slot = &Cache->cache[Cache->cacheFree]; + gcmkASSERT(slot->logical == gcvNULL); + + ++ Cache->cacheFree; + if (Cache->cacheFree >= gcmCOUNTOF(Cache->cache)) + { + Cache->cacheFree = 0; + } + } + else + { + /* Use the oldest cache slot. */ + gcmkASSERT(oldestSlot != gcvNULL); + slot = oldestSlot; + + /* Unlink from the chain. */ + slot->prev->next = slot->next; + slot->next->prev = slot->prev; + + /* Append to the end. */ + slot->prev = Cache->cache[0].prev; + slot->next = &Cache->cache[0]; + slot->prev->next = slot; + slot->next->prev = slot; + } + + /* Initialize the cache line. */ + slot->logical = *Data; + + /* Map the logical address to a DMA address. */ + gcmkONERROR( + gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, slot->dma, &slot->dma)); + } + + /* Save time stamp. */ + slot->stamp = ++ Cache->cacheStamp; + + /* Save current slot for next lookup. */ + Cache->cacheIndex = slot; + } +#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + { + gctINT i; + gctUINT32 data = gcmPTR2INT32(*Data); + gctUINT32 key, index; + gcskLOGICAL_CACHE_PTR hash; + + /* Generate a hash key. */ + key = (data >> 24) + (data >> 16) + (data >> 8) + data; + index = key % gcmCOUNTOF(Cache->hash); + + /* Get the hash entry. */ + hash = &Cache->hash[index]; + + for (slot = hash->nextHash, i = 0; + (slot != gcvNULL) && (i < gcdSECURE_CACHE_SLOTS); + slot = slot->nextHash, ++i + ) + { + if (slot->logical == (*Data)) + { + break; + } + } + + if (slot == gcvNULL) + { + /* Grab from the tail of the cache. */ + slot = Cache->cache[0].prev; + + /* Unlink slot from any hash table it is part of. */ + if (slot->prevHash != gcvNULL) + { + slot->prevHash->nextHash = slot->nextHash; + } + if (slot->nextHash != gcvNULL) + { + slot->nextHash->prevHash = slot->prevHash; + } + + /* Initialize the cache line. */ + slot->logical = *Data; + + /* Map the logical address to a DMA address. */ + gcmkONERROR( + gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, slot->dma, &slot->dma)); + + if (hash->nextHash != gcvNULL) + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, + "Hash Collision: logical=0x%x key=0x%08x", + *Data, key); + } + + /* Insert the slot at the head of the hash list. */ + slot->nextHash = hash->nextHash; + if (slot->nextHash != gcvNULL) + { + slot->nextHash->prevHash = slot; + } + slot->prevHash = hash; + hash->nextHash = slot; + } + + /* Move slot to head of list. */ + if (slot != Cache->cache[0].next) + { + /* Unlink. */ + slot->prev->next = slot->next; + slot->next->prev = slot->prev; + + /* Move to head of chain. */ + slot->prev = &Cache->cache[0]; + slot->next = Cache->cache[0].next; + slot->prev->next = slot; + slot->next->prev = slot; + } + } +#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE + { + gctUINT32 index = (gcmPTR2INT32(*Data) % gcdSECURE_CACHE_SLOTS) + 1; + + /* Get cache slot. */ + slot = &Cache->cache[index]; + + /* Check for cache miss. */ + if (slot->logical != *Data) + { + /* Initialize the cache line. */ + slot->logical = *Data; + + /* Map the logical address to a DMA address. */ + gcmkONERROR( + gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, slot->dma, &slot->dma)); + } + } +#endif + + /* Return DMA address. */ + *Data = gcmINT2PTR(slot->dma + (needBase ? baseAddress : 0)); + + /* Success. */ + gcmkFOOTER_ARG("*Data=0x%08x", *Data); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_FlushTranslationCache( + IN gckKERNEL Kernel, + IN gcskSECURE_CACHE_PTR Cache, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gctINT i; + gcskLOGICAL_CACHE_PTR slot; + gctUINT8_PTR ptr; + + gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x Logical=0x%x Bytes=%lu", + Kernel, Cache, Logical, Bytes); + + /* Do we need to flush the entire cache? */ + if (Logical == gcvNULL) + { + /* Clear all cache slots. */ + for (i = 1; i <= gcdSECURE_CACHE_SLOTS; ++i) + { + Cache->cache[i].logical = gcvNULL; + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + Cache->cache[i].nextHash = gcvNULL; + Cache->cache[i].prevHash = gcvNULL; +#endif +} + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + /* Zero the hash table. */ + for (i = 0; i < gcmCOUNTOF(Cache->hash); ++i) + { + Cache->hash[i].nextHash = gcvNULL; + } +#endif + + /* Reset the cache functionality. */ + Cache->cacheIndex = gcvNULL; + Cache->cacheFree = 1; + Cache->cacheStamp = 0; + } + + else + { + gctUINT8_PTR low = (gctUINT8_PTR) Logical; + gctUINT8_PTR high = low + Bytes; + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU + gcskLOGICAL_CACHE_PTR next; + + /* Walk all used cache slots. */ + for (i = 1, slot = Cache->cache[0].next; + (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL); + ++i, slot = next + ) + { + /* Save pointer to next slot. */ + next = slot->next; + + /* Test if this slot falls within the range to flush. */ + ptr = (gctUINT8_PTR) slot->logical; + if ((ptr >= low) && (ptr < high)) + { + /* Unlink slot. */ + slot->prev->next = slot->next; + slot->next->prev = slot->prev; + + /* Append slot to tail of cache. */ + slot->prev = Cache->cache[0].prev; + slot->next = &Cache->cache[0]; + slot->prev->next = slot; + slot->next->prev = slot; + + /* Mark slot as empty. */ + slot->logical = gcvNULL; + } + } + +#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR + gcskLOGICAL_CACHE_PTR next; + + for (i = 1, slot = Cache->cache[0].next; + (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL); + ++i, slot = next) + { + /* Save pointer to next slot. */ + next = slot->next; + + /* Test if this slot falls within the range to flush. */ + ptr = (gctUINT8_PTR) slot->logical; + if ((ptr >= low) && (ptr < high)) + { + /* Test if this slot is the current slot. */ + if (slot == Cache->cacheIndex) + { + /* Move to next or previous slot. */ + Cache->cacheIndex = (slot->next->logical != gcvNULL) + ? slot->next + : (slot->prev->logical != gcvNULL) + ? slot->prev + : gcvNULL; + } + + /* Unlink slot from cache. */ + slot->prev->next = slot->next; + slot->next->prev = slot->prev; + + /* Insert slot to head of cache. */ + slot->prev = &Cache->cache[0]; + slot->next = Cache->cache[0].next; + slot->prev->next = slot; + slot->next->prev = slot; + + /* Mark slot as empty. */ + slot->logical = gcvNULL; + slot->stamp = 0; + } + } + +#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + gctINT j; + gcskLOGICAL_CACHE_PTR hash, next; + + /* Walk all hash tables. */ + for (i = 0, hash = Cache->hash; + i < gcmCOUNTOF(Cache->hash); + ++i, ++hash) + { + /* Walk all slots in the hash. */ + for (j = 0, slot = hash->nextHash; + (j < gcdSECURE_CACHE_SLOTS) && (slot != gcvNULL); + ++j, slot = next) + { + /* Save pointer to next slot. */ + next = slot->next; + + /* Test if this slot falls within the range to flush. */ + ptr = (gctUINT8_PTR) slot->logical; + if ((ptr >= low) && (ptr < high)) + { + /* Unlink slot from hash table. */ + if (slot->prevHash == hash) + { + hash->nextHash = slot->nextHash; + } + else + { + slot->prevHash->nextHash = slot->nextHash; + } + + if (slot->nextHash != gcvNULL) + { + slot->nextHash->prevHash = slot->prevHash; + } + + /* Unlink slot from cache. */ + slot->prev->next = slot->next; + slot->next->prev = slot->prev; + + /* Append slot to tail of cache. */ + slot->prev = Cache->cache[0].prev; + slot->next = &Cache->cache[0]; + slot->prev->next = slot; + slot->next->prev = slot; + + /* Mark slot as empty. */ + slot->logical = gcvNULL; + slot->prevHash = gcvNULL; + slot->nextHash = gcvNULL; + } + } + } + +#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE + gctUINT32 index; + + /* Loop while inside the range. */ + for (i = 1; (low < high) && (i <= gcdSECURE_CACHE_SLOTS); ++i) + { + /* Get index into cache for this range. */ + index = (gcmPTR2INT32(low) % gcdSECURE_CACHE_SLOTS) + 1; + slot = &Cache->cache[index]; + + /* Test if this slot falls within the range to flush. */ + ptr = (gctUINT8_PTR) slot->logical; + if ((ptr >= low) && (ptr < high)) + { + /* Remove entry from cache. */ + slot->logical = gcvNULL; + } + + /* Next block. */ + low += gcdSECURE_CACHE_SLOTS; + } +#endif + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} +#endif + +/******************************************************************************* +** +** gckKERNEL_Recovery +** +** Try to recover the GPU from a fatal error. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_Recovery( + IN gckKERNEL Kernel + ) +{ + gceSTATUS status; + gckEVENT eventObj; + gckHARDWARE hardware; +#if gcdSECURE_USER + gctUINT32 processID; + gcskSECURE_CACHE_PTR cache; +#endif + gctUINT32 mask = 0; + gctUINT32 i = 0, count = 0; +#if gcdINTERRUPT_STATISTIC + gctINT32 oldValue; +#endif + + gcmkHEADER_ARG("Kernel=0x%x", Kernel); + + /* Validate the arguemnts. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Grab gckEVENT object. */ + eventObj = Kernel->eventObj; + gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT); + + /* Grab gckHARDWARE object. */ + hardware = Kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + +#if gcdSECURE_USER + /* Flush the secure mapping cache. */ + gcmkONERROR(gckOS_GetProcessID(&processID)); + gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache)); + gcmkONERROR(gckKERNEL_FlushTranslationCache(Kernel, cache, gcvNULL, 0)); +#endif + + if (Kernel->stuckDump == gcvSTUCK_DUMP_NONE) + { + gcmkPRINT("[galcore]: GPU[%d] hang, automatic recovery.", Kernel->core); + } + else + { + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->device->stuckDumpMutex, gcvINFINITE)); + + _DumpDriverConfigure(Kernel); + _DumpState(Kernel); + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->device->stuckDumpMutex)); + } + + if (Kernel->recovery == gcvFALSE) + { + gcmkPRINT("[galcore]: Stop driver to keep scene."); + + /* Stop monitor timer. */ + Kernel->monitorTimerStop = gcvTRUE; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + /* Issuing a soft reset for the GPU. */ + gcmkONERROR(gckHARDWARE_Reset(hardware)); + + mask = Kernel->restoreMask; + + for (i = 0; i < 32; i++) + { + if (mask & (1 << i)) + { + count++; + } + } + + /* Handle all outstanding events now. */ + gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, mask)); + +#if gcdINTERRUPT_STATISTIC + while (count--) + { + gcmkONERROR(gckOS_AtomDecrement( + Kernel->os, + eventObj->interruptCount, + &oldValue + )); + } + + gckOS_AtomClearMask(Kernel->hardware->pendingEvent, mask); +#endif + + gcmkONERROR(gckEVENT_Notify(eventObj, 1)); + + gcmkVERIFY_OK(gckOS_GetTime(&Kernel->resetTimeStamp)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_OpenUserData +** +** Get access to the user data. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctBOOL NeedCopy +** The flag indicating whether or not the data should be copied. +** +** gctPOINTER StaticStorage +** Pointer to the kernel storage where the data is to be copied if +** NeedCopy is gcvTRUE. +** +** gctPOINTER UserPointer +** User pointer to the data. +** +** gctSIZE_T Size +** Size of the data. +** +** OUTPUT: +** +** gctPOINTER * KernelPointer +** Pointer to the kernel pointer that will be pointing to the data. +*/ +gceSTATUS +gckKERNEL_OpenUserData( + IN gckKERNEL Kernel, + IN gctBOOL NeedCopy, + IN gctPOINTER StaticStorage, + IN gctPOINTER UserPointer, + IN gctSIZE_T Size, + OUT gctPOINTER * KernelPointer + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG( + "Kernel=0x%08X NeedCopy=%d StaticStorage=0x%08X " + "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X", + Kernel, NeedCopy, StaticStorage, UserPointer, Size, KernelPointer + ); + + /* Validate the arguemnts. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(!NeedCopy || (StaticStorage != gcvNULL)); + gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL); + gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL); + gcmkVERIFY_ARGUMENT(Size > 0); + + if (NeedCopy) + { + /* Copy the user data to the static storage. */ + gcmkONERROR(gckOS_CopyFromUserData( + Kernel->os, StaticStorage, UserPointer, Size + )); + + /* Set the kernel pointer. */ + * KernelPointer = StaticStorage; + } + else + { + gctPOINTER pointer = gcvNULL; + + /* Map the user pointer. */ + gcmkONERROR(gckOS_MapUserPointer( + Kernel->os, UserPointer, Size, &pointer + )); + + /* Set the kernel pointer. */ + * KernelPointer = pointer; + } + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_CloseUserData +** +** Release resources associated with the user data connection opened by +** gckKERNEL_OpenUserData. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctBOOL NeedCopy +** The flag indicating whether or not the data should be copied. +** +** gctBOOL FlushData +** If gcvTRUE, the data is written back to the user. +** +** gctPOINTER UserPointer +** User pointer to the data. +** +** gctSIZE_T Size +** Size of the data. +** +** OUTPUT: +** +** gctPOINTER * KernelPointer +** Kernel pointer to the data. +*/ +gceSTATUS +gckKERNEL_CloseUserData( + IN gckKERNEL Kernel, + IN gctBOOL NeedCopy, + IN gctBOOL FlushData, + IN gctPOINTER UserPointer, + IN gctSIZE_T Size, + OUT gctPOINTER * KernelPointer + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gctPOINTER pointer; + + gcmkHEADER_ARG( + "Kernel=0x%08X NeedCopy=%d FlushData=%d " + "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X", + Kernel, NeedCopy, FlushData, UserPointer, Size, KernelPointer + ); + + /* Validate the arguemnts. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL); + gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL); + gcmkVERIFY_ARGUMENT(Size > 0); + + /* Get a shortcut to the kernel pointer. */ + pointer = * KernelPointer; + + if (pointer != gcvNULL) + { + if (NeedCopy) + { + if (FlushData) + { + gcmkONERROR(gckOS_CopyToUserData( + Kernel->os, * KernelPointer, UserPointer, Size + )); + } + } + else + { + /* Unmap record from kernel memory. */ + gcmkONERROR(gckOS_UnmapUserPointer( + Kernel->os, + UserPointer, + Size, + * KernelPointer + )); + } + + /* Reset the kernel pointer. */ + * KernelPointer = gcvNULL; + } + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_AllocateVirtualCommandBuffer( + IN gckKERNEL Kernel, + IN gctBOOL InUserSpace, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ) +{ + gceSTATUS status; + gckOS os = Kernel->os; + gckVIRTUAL_COMMAND_BUFFER_PTR buffer; + + gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu", + os, InUserSpace, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes != gcvNULL); + gcmkVERIFY_ARGUMENT(*Bytes > 0); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + gcmkONERROR( + gckOS_Allocate( + os, + sizeof(gckVIRTUAL_COMMAND_BUFFER), + (gctPOINTER)&buffer + )); + + gcmkONERROR(gckOS_ZeroMemory(buffer, sizeof(gckVIRTUAL_COMMAND_BUFFER))); + + gcmkONERROR( + gckKERNEL_AllocateVirtualMemory( + Kernel, + gcvFALSE, + InUserSpace, + Bytes, + (gctPHYS_ADDR *)&buffer, + Logical + )); + + gcmkVERIFY_OK(gckOS_AcquireMutex(os, Kernel->virtualBufferLock, gcvINFINITE)); + + if (Kernel->virtualBufferHead == gcvNULL) + { + Kernel->virtualBufferHead = + Kernel->virtualBufferTail = buffer; + } + else + { + buffer->prev = Kernel->virtualBufferTail; + Kernel->virtualBufferTail->next = buffer; + Kernel->virtualBufferTail = buffer; + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Kernel->virtualBufferLock)); + + *Physical = buffer; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkVERIFY_OK(gckOS_Free(os, buffer)); + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_DestroyVirtualCommandBuffer( + IN gckKERNEL Kernel, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical + ) +{ + gckOS os; + gckKERNEL kernel; + gckVIRTUAL_COMMAND_BUFFER_PTR buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)Physical; + + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(buffer != gcvNULL); + + kernel = buffer->virtualBuffer.kernel; + os = kernel->os; + + gcmkVERIFY_OK(gckOS_AcquireMutex(os, kernel->virtualBufferLock, gcvINFINITE)); + + if (buffer == kernel->virtualBufferHead) + { + if ((kernel->virtualBufferHead = buffer->next) == gcvNULL) + { + kernel->virtualBufferTail = gcvNULL; + } + } + else + { + buffer->prev->next = buffer->next; + + if (buffer == kernel->virtualBufferTail) + { + kernel->virtualBufferTail = buffer->prev; + } + else + { + buffer->next->prev = buffer->prev; + } + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, kernel->virtualBufferLock)); + + gcmkVERIFY_OK( + gckKERNEL_FreeVirtualMemory( + Physical, + Logical, + gcvFALSE + )); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckKERNEL_AllocateVirtualMemory( + IN gckKERNEL Kernel, + IN gctBOOL NonPaged, + IN gctBOOL InUserSpace, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ) +{ + gckOS os = Kernel->os; + gceSTATUS status; + gctPOINTER logical = gcvNULL; + gctSIZE_T pageCount; + gctSIZE_T bytes = *Bytes; + gckVIRTUAL_BUFFER_PTR buffer = gcvNULL; + gckMMU mmu = gcvNULL; + gctUINT32 allocFlag = 0; + + gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu", + os, InUserSpace, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes != gcvNULL); + gcmkVERIFY_ARGUMENT(*Bytes > 0); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + if (*Physical == gcvNULL) + { + gcmkONERROR(gckOS_Allocate(os, + sizeof(gckVIRTUAL_BUFFER), + (gctPOINTER)&buffer)); + + gcmkONERROR(gckOS_ZeroMemory(buffer, sizeof(gckVIRTUAL_BUFFER))); + } + else + { + buffer = *Physical; + } + + buffer->bytes = bytes; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag = gcvALLOC_FLAG_CACHEABLE; +#endif + + if (NonPaged) + { + gcmkONERROR(gckOS_AllocateNonPagedMemory( + os, + InUserSpace, + allocFlag | gcvALLOC_FLAG_CONTIGUOUS, + &bytes, + &buffer->physical, + &logical + )); + } + else + { + gcmkONERROR(gckOS_AllocatePagedMemoryEx( + os, + allocFlag | gcvALLOC_FLAG_NON_CONTIGUOUS, + bytes, + gcvNULL, + &buffer->physical + )); + } + + if (NonPaged) + { + gctSIZE_T pageSize; + gcmkONERROR(gckOS_GetPageSize(os, &pageSize)); + + pageCount = (bytes + pageSize - 1) / pageSize; + + if (InUserSpace) + { + *Logical = + buffer->userLogical = logical; + } + else + { + *Logical = + buffer->kernelLogical = logical; + } + } + else + { + if (InUserSpace) + { + gcmkONERROR(gckOS_CreateUserVirtualMapping(os, + buffer->physical, + bytes, + &logical, + &pageCount)); + + *Logical = + buffer->userLogical = logical; + } + else + { + gcmkONERROR(gckOS_CreateKernelVirtualMapping(os, + buffer->physical, + bytes, + &logical, + &pageCount)); + + *Logical = + buffer->kernelLogical = logical; + } + + } + + buffer->pageCount = pageCount; + buffer->kernel = Kernel; + + gcmkONERROR(gckOS_GetProcessID(&buffer->pid)); + +#if gcdPROCESS_ADDRESS_SPACE + gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu)); + buffer->mmu = mmu; +#else + mmu = Kernel->mmu; +#endif + + gcmkONERROR(gckMMU_AllocatePages(mmu, + pageCount, + &buffer->pageTable, + &buffer->gpuAddress)); + +#if gcdENABLE_TRUST_APPLICATION + if (Kernel->hardware->options.secureMode == gcvSECURE_IN_TA) + { + gcmkONERROR(gckKERNEL_MapInTrustApplicaiton( + Kernel, + logical, + buffer->physical, + buffer->gpuAddress, + pageCount + )); + } + else +#endif + { + gcmkONERROR(gckOS_MapPagesEx(os, + Kernel->core, + buffer->physical, + pageCount, + buffer->gpuAddress, + buffer->pageTable, + gcvFALSE, + gcvSURF_TYPE_UNKNOWN + )); + } + + gcmkONERROR(gckMMU_Flush(mmu, gcvSURF_INDEX)); + + if (*Physical == gcvNULL) + *Physical = buffer; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, + "gpuAddress = %x pageCount = %d kernelLogical = %x userLogical=%x", + buffer->gpuAddress, buffer->pageCount, + buffer->kernelLogical, buffer->userLogical); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (buffer && buffer->gpuAddress) + { + gcmkVERIFY_OK( + gckMMU_FreePages(mmu, gcvFALSE, buffer->gpuAddress, buffer->pageTable, buffer->pageCount)); + } + + if (NonPaged && buffer->physical) + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + os, + bytes, + buffer->physical, + logical + )); + } + else + { + if (buffer && buffer->userLogical) + { + gcmkVERIFY_OK( + gckOS_DestroyUserVirtualMapping(os, + buffer->physical, + bytes, + (NonPaged ? 0 : buffer->userLogical))); + } + + if (buffer && buffer->kernelLogical) + { + gcmkVERIFY_OK( + gckOS_DestroyKernelVirtualMapping(os, + buffer->physical, + bytes, + (NonPaged ? 0 : buffer->kernelLogical))); + } + + if (buffer && buffer->physical) + { + gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, bytes)); + } + } + + if (*Physical == gcvNULL) + gcmkVERIFY_OK(gckOS_Free(os, buffer)); + + /* Return the status. */ + gcmkFOOTER(); + return status; + +} + +gceSTATUS +gckKERNEL_FreeVirtualMemory( + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gctBOOL NonPaged + ) +{ + gckOS os; + gckKERNEL kernel; + gckMMU mmu; + gckVIRTUAL_BUFFER_PTR buffer = (gckVIRTUAL_BUFFER_PTR)Physical; + + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(buffer != gcvNULL); + + kernel = buffer->kernel; + os = kernel->os; + +#if gcdPROCESS_ADDRESS_SPACE + gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu)); +#else + mmu = kernel->mmu; +#endif + + if (!buffer->userLogical && !NonPaged) + { + gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(os, + buffer->physical, + buffer->bytes, + Logical)); + } + + gcmkVERIFY_OK( + gckMMU_FreePages(mmu, gcvFALSE, buffer->gpuAddress, buffer->pageTable, buffer->pageCount)); + + gcmkVERIFY_OK(gckOS_UnmapPages(os, buffer->pageCount, buffer->gpuAddress)); + + if (NonPaged) + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + os, + buffer->bytes, + buffer->physical, + Logical + )); + } + else + { + gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, buffer->bytes)); + } + + gcmkVERIFY_OK(gckOS_Free(os, buffer)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckKERNEL_GetGPUAddress( + IN gckKERNEL Kernel, + IN gctPOINTER Logical, + IN gctBOOL InUserSpace, + IN gctPHYS_ADDR Physical, + OUT gctUINT32 * Address + ) +{ + gckVIRTUAL_BUFFER_PTR buffer = Physical; + gctPOINTER start; + + gcmkHEADER_ARG("Logical = %x InUserSpace=%d.", Logical, InUserSpace); + + if (InUserSpace) + { + start = buffer->userLogical; + } + else + { + start = buffer->kernelLogical; + } + + gcmkASSERT(Logical >= start + && (Logical < (gctPOINTER)((gctUINT8_PTR)start + buffer->bytes))); + + * Address = buffer->gpuAddress + (gctUINT32)((gctUINT8_PTR)Logical - (gctUINT8_PTR)start); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckKERNEL_QueryGPUAddress( + IN gckKERNEL Kernel, + IN gctUINT32 GpuAddress, + OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer + ) +{ + gckVIRTUAL_COMMAND_BUFFER_PTR buffer; + gctUINT32 start; + gceSTATUS status = gcvSTATUS_NOT_SUPPORTED; + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE)); + + /* Walk all command buffers. */ + for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next) + { + start = (gctUINT32)buffer->virtualBuffer.gpuAddress; + + if (GpuAddress >= start && GpuAddress <= (start - 1 + buffer->virtualBuffer.pageCount * 4096)) + { + /* Find a range matched. */ + *Buffer = buffer; + status = gcvSTATUS_OK; + break; + } + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock)); + + return status; +} + +static void +gckQUEUE_Dequeue( + IN gckQUEUE LinkQueue + ) +{ + gcmkASSERT(LinkQueue->count == LinkQueue->size); + + LinkQueue->count--; + LinkQueue->front = (LinkQueue->front + 1) % gcdLINK_QUEUE_SIZE; +} + +void +gckQUEUE_Enqueue( + IN gckQUEUE LinkQueue, + IN gcuQUEUEDATA *Data + ) +{ + gcuQUEUEDATA * datas = LinkQueue->datas; + + if (LinkQueue->count == LinkQueue->size) + { + gckQUEUE_Dequeue(LinkQueue); + } + + gcmkASSERT(LinkQueue->count < LinkQueue->size); + + LinkQueue->count++; + + datas[LinkQueue->rear] = *Data; + + LinkQueue->rear = (LinkQueue->rear + 1) % LinkQueue->size; +} + +void +gckQUEUE_GetData( + IN gckQUEUE LinkQueue, + IN gctUINT32 Index, + OUT gcuQUEUEDATA ** Data + ) +{ + gcuQUEUEDATA * datas = LinkQueue->datas; + + gcmkASSERT(Index >= 0 && Index < LinkQueue->size); + + *Data = &datas[(Index + LinkQueue->front) % LinkQueue->size]; +} + +gceSTATUS +gckQUEUE_Allocate( + IN gckOS Os, + IN gckQUEUE Queue, + IN gctUINT32 Size + ) +{ + gceSTATUS status; + + gcmkONERROR(gckOS_Allocate( + Os, + gcmSIZEOF(struct _gckLINKDATA) * Size, + (gctPOINTER *)&Queue->datas + )); + + Queue->size = Size; + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckQUEUE_Free( + IN gckOS Os, + IN gckQUEUE Queue + ) +{ + if (Queue->datas) + { + gcmkVERIFY_OK(gckOS_Free(Os, (gctPOINTER)Queue->datas)); + } + + return gcvSTATUS_OK; +} + +/******************************************************************************\ +*************************** Pointer - ID translation *************************** +\******************************************************************************/ +#define gcdID_TABLE_LENGTH 1024 +typedef struct _gcsINTEGERDB * gckINTEGERDB; +typedef struct _gcsINTEGERDB +{ + gckOS os; + gctPOINTER* table; + gctPOINTER mutex; + gctUINT32 tableLen; + gctUINT32 currentID; + gctUINT32 unused; +} +gcsINTEGERDB; + +gceSTATUS +gckKERNEL_CreateIntegerDatabase( + IN gckKERNEL Kernel, + OUT gctPOINTER * Database + ) +{ + gceSTATUS status; + gckINTEGERDB database = gcvNULL; + + gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database); + + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Database != gcvNULL); + + /* Allocate a database. */ + gcmkONERROR(gckOS_Allocate( + Kernel->os, gcmSIZEOF(gcsINTEGERDB), (gctPOINTER *)&database)); + + gcmkONERROR(gckOS_ZeroMemory(database, gcmSIZEOF(gcsINTEGERDB))); + + /* Allocate a pointer table. */ + gcmkONERROR(gckOS_Allocate( + Kernel->os, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH, (gctPOINTER *)&database->table)); + + gcmkONERROR(gckOS_ZeroMemory(database->table, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH)); + + /* Allocate a database mutex. */ + gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->mutex)); + + /* Initialize. */ + database->currentID = 0; + database->unused = gcdID_TABLE_LENGTH; + database->os = Kernel->os; + database->tableLen = gcdID_TABLE_LENGTH; + + *Database = database; + + gcmkFOOTER_ARG("*Database=0x%08X", *Database); + return gcvSTATUS_OK; + +OnError: + /* Rollback. */ + if (database) + { + if (database->table) + { + gcmkOS_SAFE_FREE(Kernel->os, database->table); + } + + gcmkOS_SAFE_FREE(Kernel->os, database); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_DestroyIntegerDatabase( + IN gckKERNEL Kernel, + IN gctPOINTER Database + ) +{ + gckINTEGERDB database = Database; + + gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database); + + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Database != gcvNULL); + + /* Destroy pointer table. */ + gcmkOS_SAFE_FREE(Kernel->os, database->table); + + /* Destroy database mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->mutex)); + + /* Destroy database. */ + gcmkOS_SAFE_FREE(Kernel->os, database); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckKERNEL_AllocateIntegerId( + IN gctPOINTER Database, + IN gctPOINTER Pointer, + OUT gctUINT32 * Id + ) +{ + gceSTATUS status; + gckINTEGERDB database = Database; + gctUINT32 i, unused, currentID, tableLen; + gctPOINTER * table; + gckOS os = database->os; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Database=0x%08X Pointer=0x%08X", Database, Pointer); + + gcmkVERIFY_ARGUMENT(Id != gcvNULL); + + gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE)); + acquired = gcvTRUE; + + if (database->unused < 1) + { + /* Extend table. */ + gcmkONERROR( + gckOS_Allocate(os, + gcmSIZEOF(gctPOINTER) * (database->tableLen + gcdID_TABLE_LENGTH), + (gctPOINTER *)&table)); + + gcmkONERROR(gckOS_ZeroMemory(table + database->tableLen, + gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH)); + + /* Copy data from old table. */ + gckOS_MemCopy(table, + database->table, + database->tableLen * gcmSIZEOF(gctPOINTER)); + + gcmkOS_SAFE_FREE(os, database->table); + + /* Update databse with new allocated table. */ + database->table = table; + database->currentID = database->tableLen; + database->tableLen += gcdID_TABLE_LENGTH; + database->unused += gcdID_TABLE_LENGTH; + } + + table = database->table; + currentID = database->currentID; + tableLen = database->tableLen; + unused = database->unused; + + /* Connect id with pointer. */ + table[currentID] = Pointer; + + *Id = currentID + 1; + + /* Update the currentID. */ + if (--unused > 0) + { + for (i = 0; i < tableLen; i++) + { + if (++currentID >= tableLen) + { + /* Wrap to the begin. */ + currentID = 0; + } + + if (table[currentID] == gcvNULL) + { + break; + } + } + } + + database->table = table; + database->currentID = currentID; + database->tableLen = tableLen; + database->unused = unused; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex)); + acquired = gcvFALSE; + + gcmkFOOTER_ARG("*Id=%d", *Id); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_FreeIntegerId( + IN gctPOINTER Database, + IN gctUINT32 Id + ) +{ + gceSTATUS status; + gckINTEGERDB database = Database; + gckOS os = database->os; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id); + + gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE)); + acquired = gcvTRUE; + + if (!(Id > 0 && Id <= database->tableLen)) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + Id -= 1; + + database->table[Id] = gcvNULL; + + if (database->unused++ == 0) + { + database->currentID = Id; + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex)); + acquired = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_QueryIntegerId( + IN gctPOINTER Database, + IN gctUINT32 Id, + OUT gctPOINTER * Pointer + ) +{ + gceSTATUS status; + gckINTEGERDB database = Database; + gctPOINTER pointer; + gckOS os = database->os; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id); + gcmkVERIFY_ARGUMENT(Pointer != gcvNULL); + + gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE)); + acquired = gcvTRUE; + + if (!(Id > 0 && Id <= database->tableLen)) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + Id -= 1; + + pointer = database->table[Id]; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex)); + acquired = gcvFALSE; + + if (pointer) + { + *Pointer = pointer; + } + else + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + gcmkFOOTER_ARG("*Pointer=0x%08X", *Pointer); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex)); + } + + gcmkFOOTER(); + return status; +} + + +gctUINT32 +gckKERNEL_AllocateNameFromPointer( + IN gckKERNEL Kernel, + IN gctPOINTER Pointer + ) +{ + gceSTATUS status; + gctUINT32 name; + gctPOINTER database = Kernel->db->pointerDatabase; + + gcmkHEADER_ARG("Kernel=0x%X Pointer=0x%X", Kernel, Pointer); + + gcmkONERROR( + gckKERNEL_AllocateIntegerId(database, Pointer, &name)); + + gcmkFOOTER_ARG("name=%d", name); + return name; + +OnError: + gcmkFOOTER(); + return 0; +} + +gctPOINTER +gckKERNEL_QueryPointerFromName( + IN gckKERNEL Kernel, + IN gctUINT32 Name + ) +{ + gceSTATUS status; + gctPOINTER pointer = gcvNULL; + gctPOINTER database = Kernel->db->pointerDatabase; + + gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name); + + /* Lookup in database to get pointer. */ + gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, &pointer)); + + gcmkFOOTER_ARG("pointer=0x%X", pointer); + return pointer; + +OnError: + gcmkFOOTER(); + return gcvNULL; +} + +gceSTATUS +gckKERNEL_DeleteName( + IN gckKERNEL Kernel, + IN gctUINT32 Name + ) +{ + gctPOINTER database = Kernel->db->pointerDatabase; + + gcmkHEADER_ARG("Kernel=0x%X Name=0x%X", Kernel, Name); + + /* Free name if exists. */ + gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Name)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +***** Shared Buffer ************************************************************ +*******************************************************************************/ + +/******************************************************************************* +** +** gckKERNEL_CreateShBuffer +** +** Create shared buffer. +** The shared buffer can be used across processes. Other process needs call +** gckKERNEL_MapShBuffer before use it. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 Size +** Specify the shared buffer size. +** +** OUTPUT: +** +** gctSHBUF * ShBuf +** Pointer to hold return shared buffer handle. +*/ +gceSTATUS +gckKERNEL_CreateShBuffer( + IN gckKERNEL Kernel, + IN gctUINT32 Size, + OUT gctSHBUF * ShBuf + ) +{ + gceSTATUS status; + gcsSHBUF_PTR shBuf = gcvNULL; + + gcmkHEADER_ARG("Kernel=0x%X, Size=%u", Kernel, Size); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + if (Size == 0) + { + /* Invalid size. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + else if (Size > 1024) + { + /* Limite shared buffer size. */ + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + /* Create a shared buffer structure. */ + gcmkONERROR( + gckOS_Allocate(Kernel->os, + sizeof (gcsSHBUF), + (gctPOINTER *)&shBuf)); + + /* Initialize shared buffer. */ + shBuf->id = 0; + shBuf->reference = gcvNULL; + shBuf->size = Size; + shBuf->data = gcvNULL; + + /* Allocate integer id for this shared buffer. */ + gcmkONERROR( + gckKERNEL_AllocateIntegerId(Kernel->db->pointerDatabase, + shBuf, + &shBuf->id)); + + /* Allocate atom. */ + gcmkONERROR(gckOS_AtomConstruct(Kernel->os, &shBuf->reference)); + + /* Set default reference count to 1. */ + gcmkVERIFY_OK(gckOS_AtomSet(Kernel->os, shBuf->reference, 1)); + + /* Return integer id. */ + *ShBuf = (gctSHBUF)(gctUINTPTR_T)shBuf->id; + + gcmkFOOTER_ARG("*ShBuf=%u", shBuf->id); + return gcvSTATUS_OK; + +OnError: + /* Error roll back. */ + if (shBuf != gcvNULL) + { + if (shBuf->id != 0) + { + gcmkVERIFY_OK( + gckKERNEL_FreeIntegerId(Kernel->db->pointerDatabase, + shBuf->id)); + } + + gcmkOS_SAFE_FREE(Kernel->os, shBuf); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_DestroyShBuffer +** +** Destroy shared buffer. +** This will decrease reference of specified shared buffer and do actual +** destroy when no reference on it. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctSHBUF ShBuf +** Specify the shared buffer to be destroyed. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_DestroyShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf + ) +{ + gceSTATUS status; + gcsSHBUF_PTR shBuf; + gctINT32 oldValue = 0; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u", + Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL); + + /* Acquire mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, + Kernel->db->pointerDatabaseMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Find shared buffer structure. */ + gcmkONERROR( + gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase, + (gctUINT32)(gctUINTPTR_T)ShBuf, + (gctPOINTER)&shBuf)); + + gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf); + + /* Decrease the reference count. */ + gckOS_AtomDecrement(Kernel->os, shBuf->reference, &oldValue); + + if (oldValue == 1) + { + /* Free integer id. */ + gcmkVERIFY_OK( + gckKERNEL_FreeIntegerId(Kernel->db->pointerDatabase, + shBuf->id)); + + /* Free atom. */ + gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, shBuf->reference)); + + if (shBuf->data) + { + gcmkOS_SAFE_FREE(Kernel->os, shBuf->data); + shBuf->data = gcvNULL; + } + + /* Free the shared buffer. */ + gcmkOS_SAFE_FREE(Kernel->os, shBuf); + } + + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + acquired = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_MapShBuffer +** +** Map shared buffer into this process so that it can be used in this process. +** This will increase reference count on the specified shared buffer. +** Call gckKERNEL_DestroyShBuffer to dereference. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctSHBUF ShBuf +** Specify the shared buffer to be mapped. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_MapShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf + ) +{ + gceSTATUS status; + gcsSHBUF_PTR shBuf; + gctINT32 oldValue = 0; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u", + Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL); + + /* Acquire mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, + Kernel->db->pointerDatabaseMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Find shared buffer structure. */ + gcmkONERROR( + gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase, + (gctUINT32)(gctUINTPTR_T)ShBuf, + (gctPOINTER)&shBuf)); + + gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf); + + /* Increase the reference count. */ + gckOS_AtomIncrement(Kernel->os, shBuf->reference, &oldValue); + + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + acquired = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_WriteShBuffer +** +** Write user data into shared buffer. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctSHBUF ShBuf +** Specify the shared buffer to be written to. +** +** gctPOINTER UserData +** User mode pointer to hold the source data. +** +** gctUINT32 ByteCount +** Specify number of bytes to write. If this is larger than +** shared buffer size, gcvSTATUS_INVALID_ARGUMENT is returned. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_WriteShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf, + IN gctPOINTER UserData, + IN gctUINT32 ByteCount + ) +{ + gceSTATUS status; + gcsSHBUF_PTR shBuf = gcvNULL; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u UserData=0x%X ByteCount=%u", + Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf, UserData, ByteCount); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL); + + /* Acquire mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, + Kernel->db->pointerDatabaseMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Find shared buffer structure. */ + gcmkONERROR( + gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase, + (gctUINT32)(gctUINTPTR_T)ShBuf, + (gctPOINTER)&shBuf)); + + gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf); + + if ((ByteCount > shBuf->size) || + (ByteCount == 0) || + (UserData == gcvNULL)) + { + /* Exceeds buffer max size or invalid. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if (shBuf->data == gcvNULL) + { + /* Allocate buffer data when first time write. */ + gcmkONERROR(gckOS_Allocate(Kernel->os, ByteCount, &shBuf->data)); + } + + /* Copy data from user. */ + gcmkONERROR( + gckOS_CopyFromUserData(Kernel->os, + shBuf->data, + UserData, + ByteCount)); + + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + acquired = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (shBuf && shBuf->data) + { + gcmkOS_SAFE_FREE(Kernel->os, shBuf->data); + shBuf->data = gcvNULL; + } + + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_ReadShBuffer +** +** Read data from shared buffer and copy to user pointer. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctSHBUF ShBuf +** Specify the shared buffer to be read from. +** +** gctPOINTER UserData +** User mode pointer to save output data. +** +** gctUINT32 ByteCount +** Specify number of bytes to read. +** If this is larger than shared buffer size, only avaiable bytes are +** copied. If smaller, copy requested size. +** +** OUTPUT: +** +** gctUINT32 * BytesRead +** Pointer to hold how many bytes actually read from shared buffer. +*/ +gceSTATUS +gckKERNEL_ReadShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf, + IN gctPOINTER UserData, + IN gctUINT32 ByteCount, + OUT gctUINT32 * BytesRead + ) +{ + gceSTATUS status; + gcsSHBUF_PTR shBuf; + gctUINT32 bytes; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u UserData=0x%X ByteCount=%u", + Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf, UserData, ByteCount); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL); + + /* Acquire mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, + Kernel->db->pointerDatabaseMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Find shared buffer structure. */ + gcmkONERROR( + gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase, + (gctUINT32)(gctUINTPTR_T)ShBuf, + (gctPOINTER)&shBuf)); + + gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf); + + if (shBuf->data == gcvNULL) + { + *BytesRead = 0; + + /* No data in shared buffer, skip copy. */ + status = gcvSTATUS_SKIP; + goto OnError; + } + else if (ByteCount == 0) + { + /* Invalid size to read. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* Determine bytes to copy. */ + bytes = (ByteCount < shBuf->size) ? ByteCount : shBuf->size; + + /* Copy data to user. */ + gcmkONERROR( + gckOS_CopyToUserData(Kernel->os, + shBuf->data, + UserData, + bytes)); + + /* Return copied size. */ + *BytesRead = bytes; + + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + acquired = gcvFALSE; + + gcmkFOOTER_ARG("*BytesRead=%u", bytes); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex)); + } + + gcmkFOOTER(); + return status; +} + +/*******************************************************************************\ +*************************** List Helper ***************************************** +\*******************************************************************************/ + +static void +_ListAdd( + gcsLISTHEAD_PTR New, + gcsLISTHEAD_PTR Prev, + gcsLISTHEAD_PTR Next + ) +{ + Next->prev = New; + New->next = Next; + New->prev = Prev; + Prev->next = New; +} + +void +_ListDel( + gcsLISTHEAD_PTR Prev, + gcsLISTHEAD_PTR Next + ) +{ + Next->prev = Prev; + Prev->next = Next; +} + +void +gcsLIST_Init( + gcsLISTHEAD_PTR Node + ) +{ + Node->prev = Node; + Node->next = Node; +} + +void +gcsLIST_Add( + gcsLISTHEAD_PTR New, + gcsLISTHEAD_PTR Head + ) +{ + _ListAdd(New, Head, Head->next); +} + +void +gcsLIST_AddTail( + gcsLISTHEAD_PTR New, + gcsLISTHEAD_PTR Head + ) +{ + _ListAdd(New, Head->prev, Head); +} + +void +gcsLIST_Del( + gcsLISTHEAD_PTR Node + ) +{ + _ListDel(Node->prev, Node->next); +} + +gctBOOL +gcsLIST_Empty( + gcsLISTHEAD_PTR Head + ) +{ + return Head->next == Head; +} + +/*******************************************************************************\ +********************************* Fence ***************************************** +\*******************************************************************************/ + +gceSTATUS +gckFENCE_Create( + IN gckOS Os, + IN gckKERNEL Kernel, + OUT gckFENCE * Fence + ) +{ + gceSTATUS status; + gckFENCE fence = gcvNULL; + gctSIZE_T pageSize = 4096; + + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsFENCE), (gctPOINTER *)&fence)); + gcmkONERROR(gckOS_ZeroMemory(fence, gcmSIZEOF(gcsFENCE))); + gcmkONERROR(gckOS_CreateMutex(Os, (gctPOINTER *)&fence->mutex)); + + fence->kernel = Kernel; + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Kernel->virtualCommandBuffer) + { + gcmkONERROR(gckKERNEL_AllocateVirtualMemory( + Kernel, + gcvFALSE, + gcvFALSE, + &pageSize, + &fence->physical, + &fence->logical + )); + + gcmkONERROR(gckKERNEL_GetGPUAddress( + Kernel, + fence->logical, + gcvFALSE, + fence->physical, + &fence->address + )); + } + else +#endif + { + gctUINT32 allocFlag = gcvALLOC_FLAG_CONTIGUOUS; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag |= gcvALLOC_FLAG_CACHEABLE; +#endif + + gcmkONERROR(gckOS_AllocateNonPagedMemory( + Os, + gcvFALSE, + allocFlag, + &pageSize, + &fence->physical, + &fence->logical + )); + + gcmkONERROR(gckHARDWARE_ConvertLogical( + Kernel->hardware, + fence->logical, + gcvFALSE, + &fence->address + )); + + gcmkONERROR(gckMMU_FillFlatMapping( + Kernel->mmu, fence->address, pageSize + )); + } + + gcsLIST_Init(&fence->waitingList); + + *Fence = fence; + + return gcvSTATUS_OK; +OnError: + if (fence) + { + gckFENCE_Destory(Os, fence); + } + + return status; +} + +gceSTATUS +gckFENCE_Destory( + IN gckOS Os, + OUT gckFENCE Fence + ) +{ + if (Fence->mutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Fence->mutex)); + } + + if (Fence->logical) + { +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Fence->kernel->virtualCommandBuffer) + { + gcmkVERIFY_OK(gckKERNEL_FreeVirtualMemory( + Fence->physical, + Fence->logical, + gcvFALSE + )); + } + else +#endif + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + Os, + 4096, + Fence->physical, + Fence->logical + )); + } + } + + gcmkOS_SAFE_FREE(Os, Fence); + + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckFENCE_Signal +** +** Signal all completed nodes. +** +** +*/ +gceSTATUS +gckFENCE_Signal( + IN gckOS Os, + IN gckFENCE Fence + ) +{ + gcsLISTHEAD_PTR list = &Fence->waitingList; + gcsLISTHEAD_PTR nodeHead, nodeTemp; + gckFENCE_SYNC sync; + gckOS os = Os; + gctUINT64 stamp = *(gctUINT64 *)Fence->logical; + + gcmkVERIFY_OK(gckOS_AcquireMutex(os, Fence->mutex, gcvINFINITE)); + + gcmkLIST_FOR_EACH_SAFE(nodeHead, nodeTemp, list) + { + sync = gcmCONTAINEROF(nodeHead, _gcsFENCE_SYNC, head); + + /* Signal all nodes which are complete. */ + if (sync->commitStamp <= stamp && sync->inList) + { + /* Signal. */ + gckOS_Signal(os, sync->signal, gcvTRUE); + + /* Remove from wait list. */ + gcsLIST_Del(nodeHead); + + /* Mark node not in waiting list. */ + sync->inList = gcvFALSE; + } + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Fence->mutex)); + + return gcvSTATUS_OK; +} + +gceSTATUS +gckDEVICE_Construct( + IN gckOS Os, + OUT gckDEVICE * Device + ) +{ + gceSTATUS status; + gckDEVICE device; + gctUINT i; + + gcmkHEADER(); + + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsDEVICE), (gctPOINTER *)&device)); + + for (i = 0; i < gcvCORE_COUNT; i++) + { + device->coreInfoArray[i].type = gcvHARDWARE_INVALID; + } + device->defaultHwType = gcvHARDWARE_INVALID; + + gckOS_ZeroMemory(device, gcmSIZEOF(gcsDEVICE)); + + gcmkONERROR(gckOS_CreateMutex(Os, &device->stuckDumpMutex)); + gcmkONERROR(gckOS_CreateMutex(Os, &device->commitMutex)); + + device->os = Os; + + *Device = device; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + + if (device != gcvNULL) + { + gckDEVICE_Destroy(Os, device); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckDEVICE_AddCore( + IN gckDEVICE Device, + IN gceCORE Core, + IN gctUINT ChipID, + IN gctPOINTER Context, + IN gckKERNEL * Kernel + ) +{ + gceSTATUS status; + gcsCORE_INFO * info = Device->coreInfoArray; + gceHARDWARE_TYPE type = (gceHARDWARE_TYPE)((gctUINT)gcvHARDWARE_INVALID); + gctUINT32 index = Device->coreNum; + gctUINT32 i; + gcsCORE_LIST *coreList; + gceHARDWARE_TYPE kernelType; + gceHARDWARE_TYPE defaultHwType; + gckKERNEL kernel; + + gcmkASSERT(Device->coreNum < gcvCORE_COUNT); + + if (Core >= gcvCORE_MAJOR && Core <= gcvCORE_3D_MAX) + { + /* Chip ID is only used for 3D cores. */ + if (ChipID == gcvCHIP_ID_DEFAULT) + { + /* Apply default chipID if it is not set. */ + ChipID = Core; + } + } + + /* Construct gckKERNEL for this core. */ + gcmkONERROR(gckKERNEL_Construct( + Device->os, Core, ChipID, Context, Device, Device->database, Kernel)); + + kernel = *Kernel; + + if (Device->database == gcvNULL) + { + Device->database = kernel->db; + } + + kernelType = _GetHardwareType(kernel); + + if (kernelType >= gcvHARDWARE_NUM_TYPES) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + info[index].type = kernelType; + info[index].core = Core; + info[index].kernel = kernel; + info[index].chipID = ChipID; + + if (index == 0) + { + /* First core, map all type/core to it. */ + for (; type != gcvHARDWARE_NUM_TYPES; type = (gceHARDWARE_TYPE)((gctUINT)type + 1)) + { + Device->map[type].num = 0; + + for (i = 0 ; i < 4; i++) + { + Device->map[type].kernels[i] = kernel; + } + } + } + + /* Get core list of this type. */ + coreList = &Device->map[kernelType]; + + /* Setup gceHARDWARE_TYPE to gceCORE mapping. */ + coreList->kernels[coreList->num++] = kernel; + + defaultHwType = kernelType; + if (kernelType == gcvHARDWARE_3D2D) + { + coreList = &Device->map[gcvHARDWARE_3D]; + coreList->kernels[coreList->num++] = kernel; + defaultHwType = gcvHARDWARE_3D; + } + + /* Advance total core number. */ + Device->coreNum++; + + /* Default HW type was chosen: 3D > 2D > VG */ + if (Device->defaultHwType == gcvHARDWARE_INVALID) + { + Device->defaultHwType = defaultHwType; + } + else if (Device->defaultHwType > defaultHwType) + { + Device->defaultHwType = defaultHwType; + } + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckDEVICE_ChipInfo( + IN gckDEVICE Device, + IN gcsHAL_INTERFACE_PTR Interface + ) +{ + gctUINT i; + gcsCORE_INFO * info = Device->coreInfoArray; + + for (i = 0; i < Device->coreNum; i++) + { + Interface->u.ChipInfo.types[i] = info[i].type; + Interface->u.ChipInfo.ids[i] = info[i].chipID; + } + + Interface->u.ChipInfo.count = Device->coreNum; + + return gcvSTATUS_OK; +} + +gceSTATUS +gckDEVICE_Version( + IN gckDEVICE Device, + IN gcsHAL_INTERFACE_PTR Interface + ) +{ + Interface->u.Version.major = gcvVERSION_MAJOR; + Interface->u.Version.minor = gcvVERSION_MINOR; + Interface->u.Version.patch = gcvVERSION_PATCH; + Interface->u.Version.build = gcvVERSION_BUILD; +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, + "KERNEL version %d.%d.%d build %u", + gcvVERSION_MAJOR, gcvVERSION_MINOR, + gcvVERSION_PATCH, gcvVERSION_BUILD); +#endif + + return gcvSTATUS_OK; +} + +gceSTATUS +gckDEVICE_Destroy( + IN gckOS Os, + IN gckDEVICE Device + ) +{ + gctINT i; + gcsCORE_INFO * info = Device->coreInfoArray; + + for (i = Device->coreNum - 1; i >= 0 ; i--) + { + if (info[i].kernel != gcvNULL) + { + gckKERNEL_Destroy(info[i].kernel); + } + } + + if (Device->commitMutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Device->commitMutex)); + } + if (Device->stuckDumpMutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Device->stuckDumpMutex)); + } + + gcmkOS_SAFE_FREE(Os, Device); + + return gcvSTATUS_OK; +} + +static gceSTATUS +gckDEVICE_SetTimeOut( + IN gckDEVICE Device, + IN gcsHAL_INTERFACE_PTR Interface + ) +{ +#if gcdGPU_TIMEOUT + gckKERNEL kernel; + gctUINT i; + gceHARDWARE_TYPE type = Interface->hardwareType; + gcsCORE_LIST *coreList; + + coreList = &Device->map[type]; + + for (i = 0; i < coreList->num; i++) + { + kernel = coreList->kernels[i]; + + kernel->timeOut = Interface->u.SetTimeOut.timeOut; + } +#endif + + return gcvSTATUS_OK; +} + + +gceSTATUS +gckDEVICE_Dispatch( + IN gckDEVICE Device, + IN gcsHAL_INTERFACE_PTR Interface + ) +{ + gceSTATUS status = gcvSTATUS_NOT_SUPPORTED; + gckKERNEL kernel; + gceHARDWARE_TYPE type = Interface->hardwareType; + gctUINT32 coreIndex = Interface->coreIndex; + + switch (Interface->command) + { + case gcvHAL_CHIP_INFO: + status = gckDEVICE_ChipInfo(Device, Interface); + break; + + case gcvHAL_VERSION: + status = gckDEVICE_Version(Device, Interface); + break; + + case gcvHAL_SET_TIMEOUT: + status = gckDEVICE_SetTimeOut(Device, Interface); + break; + + default: + status = gcvSTATUS_NOT_SUPPORTED; + break; + } + + if (gcmIS_SUCCESS(status)) + { + /* Dispatch handled in this layer. */ + Interface->status = status; + } + else + { + /* Need go through gckKERNEL dispatch. */ + kernel = Device->map[type].kernels[coreIndex]; + + + { + status = gckKERNEL_Dispatch(kernel, Device, gcvTRUE, Interface); + } + + /* Interface->status is handled in gckKERNEL_Dispatch(). */ + } + + return status; +} + +gceSTATUS +gckDEVICE_GetMMU( + IN gckDEVICE Device, + IN gceHARDWARE_TYPE Type, + IN gckMMU *Mmu + ) +{ + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(Type < gcvHARDWARE_NUM_TYPES); + + *Mmu = Device->mmus[Type]; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckDEVICE_SetMMU( + IN gckDEVICE Device, + IN gceHARDWARE_TYPE Type, + IN gckMMU Mmu + ) +{ + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(Type < gcvHARDWARE_NUM_TYPES); + + Device->mmus[Type] = Mmu; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckDEVICE_QueryGPUAddress +** +** Search GPUAddress in other core's address space, whose type is same as current +** core. It is used to find correct command buffer which is shared by mulitple +** core. +** +*/ +gceSTATUS +gckDEVICE_QueryGPUAddress( + IN gckDEVICE Device, + IN gckKERNEL Kernel, + IN gctUINT32 GPUAddress, + OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer + ) +{ + gceSTATUS status = gcvSTATUS_NOT_FOUND; + gctUINT i; + gceHARDWARE_TYPE kernelType; + + kernelType = _GetHardwareType(Kernel); + + if (Device != gcvNULL) + { + for (i = 0; i < Device->coreNum; i++) + { + if (Device->coreInfoArray[i].type == kernelType) + { + /* Search other core's command buffer list whose type is same. */ + status = gckKERNEL_QueryGPUAddress( + Device->coreInfoArray[i].kernel, GPUAddress, Buffer); + + if (gcmIS_SUCCESS(status)) + { + break; + } + } + } + } + else + { + status = gckKERNEL_QueryGPUAddress(Kernel, GPUAddress, Buffer); + } + + return status; +} + +#if gcdENABLE_TRUST_APPLICATION +gceSTATUS +gckKERNEL_MapInTrustApplicaiton( + IN gckKERNEL Kernel, + IN gctPOINTER Logical, + IN gctPHYS_ADDR Physical, + IN gctUINT32 GPUAddress, + IN gctSIZE_T PageCount + ) +{ + gceSTATUS status; + gctUINT32 * physicalArrayLogical = gcvNULL; + gctSIZE_T bytes; + gctPOINTER logical = Logical; + gctUINT32 i; + gctSIZE_T pageSize; + gctUINT32 pageMask; + + gcmkHEADER(); + + gcmkVERIFY_OK(gckOS_GetPageSize(Kernel->os, &pageSize)); + + pageMask = (gctUINT32)pageSize - 1; + + bytes = PageCount * gcmSIZEOF(gctUINT32); + + gcmkONERROR(gckOS_Allocate( + Kernel->os, + bytes, + (gctPOINTER *)&physicalArrayLogical + )); + + /* Fill in physical array. */ + for (i = 0; i < PageCount; i++) + { + gctPHYS_ADDR_T phys; + status = gckOS_PhysicalToPhysicalAddress( + Kernel->os, + Physical, + i * 4096, + &phys + ); + + if (status == gcvSTATUS_NOT_SUPPORTED) + { + gcmkONERROR(gckOS_GetPhysicalAddress( + Kernel->os, + logical, + &phys + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, phys, &phys)); + } + + phys &= ~pageMask; + + gcmkSAFECASTPHYSADDRT(physicalArrayLogical[i], phys); + + logical = (gctUINT8_PTR)logical + 4096; + } + + gcmkONERROR(gckKERNEL_SecurityMapMemory( + Kernel, + physicalArrayLogical, + 0, + (gctUINT32)PageCount, + &GPUAddress + )); + + gcmkVERIFY_OK(gckOS_Free( + Kernel->os, + physicalArrayLogical + )) + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if(physicalArrayLogical != gcvNULL) + gcmkVERIFY_OK(gckOS_Free( + Kernel->os, + (gctPOINTER)physicalArrayLogical + )); + gcmkFOOTER(); + return status; +} +#endif + +/******************************************************************************* +***** Test Code **************************************************************** +*******************************************************************************/ + diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.h b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.h new file mode 100644 index 000000000000..eeb2d488cbc4 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel.h @@ -0,0 +1,2062 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_h_ +#define __gc_hal_kernel_h_ + +#include "gc_hal.h" +#include "gc_hal_kernel_hardware.h" +#include "gc_hal_driver.h" +#include "gc_hal_kernel_mutex.h" +#include "gc_hal_metadata.h" + + +#if gcdSECURITY || gcdENABLE_TRUST_APPLICATION +#include "gc_hal_security_interface.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************* +***** New MMU Defination *******************************************************/ +#define gcdMMU_MTLB_SHIFT 22 +#define gcdMMU_STLB_4K_SHIFT 12 +#define gcdMMU_STLB_64K_SHIFT 16 + +#define gcdMMU_MTLB_BITS (32 - gcdMMU_MTLB_SHIFT) +#define gcdMMU_PAGE_4K_BITS gcdMMU_STLB_4K_SHIFT +#define gcdMMU_STLB_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_4K_BITS) +#define gcdMMU_PAGE_64K_BITS gcdMMU_STLB_64K_SHIFT +#define gcdMMU_STLB_64K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_64K_BITS) + +#define gcdMMU_MTLB_ENTRY_NUM (1 << gcdMMU_MTLB_BITS) +#define gcdMMU_MTLB_SIZE (gcdMMU_MTLB_ENTRY_NUM << 2) +#define gcdMMU_STLB_4K_ENTRY_NUM (1 << gcdMMU_STLB_4K_BITS) +#define gcdMMU_STLB_4K_SIZE (gcdMMU_STLB_4K_ENTRY_NUM << 2) +#define gcdMMU_PAGE_4K_SIZE (1 << gcdMMU_STLB_4K_SHIFT) +#define gcdMMU_STLB_64K_ENTRY_NUM (1 << gcdMMU_STLB_64K_BITS) +#define gcdMMU_STLB_64K_SIZE (gcdMMU_STLB_64K_ENTRY_NUM << 2) +#define gcdMMU_PAGE_64K_SIZE (1 << gcdMMU_STLB_64K_SHIFT) + +#define gcdMMU_MTLB_MASK (~((1U << gcdMMU_MTLB_SHIFT)-1)) +#define gcdMMU_STLB_4K_MASK ((~0U << gcdMMU_STLB_4K_SHIFT) ^ gcdMMU_MTLB_MASK) +#define gcdMMU_PAGE_4K_MASK (gcdMMU_PAGE_4K_SIZE - 1) +#define gcdMMU_STLB_64K_MASK ((~((1U << gcdMMU_STLB_64K_SHIFT)-1)) ^ gcdMMU_MTLB_MASK) +#define gcdMMU_PAGE_64K_MASK (gcdMMU_PAGE_64K_SIZE - 1) + +/* Page offset definitions. */ +#define gcdMMU_OFFSET_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_4K_BITS) +#define gcdMMU_OFFSET_4K_MASK ((1U << gcdMMU_OFFSET_4K_BITS) - 1) +#define gcdMMU_OFFSET_16K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_16K_BITS) +#define gcdMMU_OFFSET_16K_MASK ((1U << gcdMMU_OFFSET_16K_BITS) - 1) + +#define gcdMMU_MTLB_ENTRY_HINTS_BITS 6 +#define gcdMMU_MTLB_ENTRY_STLB_MASK (~((1U << gcdMMU_MTLB_ENTRY_HINTS_BITS) - 1)) + +#define gcdMMU_MTLB_PRESENT 0x00000001 +#define gcdMMU_MTLB_EXCEPTION 0x00000002 +#define gcdMMU_MTLB_4K_PAGE 0x00000000 + +#define gcdMMU_STLB_PRESENT 0x00000001 +#define gcdMMU_STLB_EXCEPTION 0x00000002 +#define gcdMMU_STLB_4K_PAGE 0x00000000 + +/******************************************************************************* +***** Stuck Dump Level ********************************************************/ + +/* Dump nonthing when stuck happens. */ +#define gcvSTUCK_DUMP_NONE 0 + +/* Dump GPU state and memory near stuck point. */ +#define gcvSTUCK_DUMP_NEARBY_MEMORY 1 + +/* Beside gcvSTUCK_DUMP_NEARBY_MEMORY, dump context buffer and user command buffer. */ +#define gcvSTUCK_DUMP_USER_COMMAND 2 + +/* Beside gcvSTUCK_DUMP_USER_COMMAND, commit will be stall +** to make sure command causing stuck isn't missed. */ +#define gcvSTUCK_DUMP_STALL_COMMAND 3 + +/* Beside gcvSTUCK_DUMP_USER_COMMAND, dump kernel command buffer. */ +#define gcvSTUCK_DUMP_ALL_COMMAND 4 + +/******************************************************************************* +***** Process Secure Cache ****************************************************/ + +#define gcdSECURE_CACHE_LRU 1 +#define gcdSECURE_CACHE_LINEAR 2 +#define gcdSECURE_CACHE_HASH 3 +#define gcdSECURE_CACHE_TABLE 4 + +#define gcvPAGE_TABLE_DIRTY_BIT_OTHER (1 << 0) +#define gcvPAGE_TABLE_DIRTY_BIT_FE (1 << 1) + +typedef struct _gcskLOGICAL_CACHE * gcskLOGICAL_CACHE_PTR; +typedef struct _gcskLOGICAL_CACHE gcskLOGICAL_CACHE; +struct _gcskLOGICAL_CACHE +{ + /* Logical address. */ + gctPOINTER logical; + + /* DMAable address. */ + gctUINT32 dma; + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + /* Pointer to the previous and next hash tables. */ + gcskLOGICAL_CACHE_PTR nextHash; + gcskLOGICAL_CACHE_PTR prevHash; +#endif + +#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE + /* Pointer to the previous and next slot. */ + gcskLOGICAL_CACHE_PTR next; + gcskLOGICAL_CACHE_PTR prev; +#endif + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR + /* Time stamp. */ + gctUINT64 stamp; +#endif +}; + +typedef struct _gcskSECURE_CACHE * gcskSECURE_CACHE_PTR; +typedef struct _gcskSECURE_CACHE +{ + /* Cache memory. */ + gcskLOGICAL_CACHE cache[1 + gcdSECURE_CACHE_SLOTS]; + + /* Last known index for LINEAR mode. */ + gcskLOGICAL_CACHE_PTR cacheIndex; + + /* Current free slot for LINEAR mode. */ + gctUINT32 cacheFree; + + /* Time stamp for LINEAR mode. */ + gctUINT64 cacheStamp; + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + /* Hash table for HASH mode. */ + gcskLOGICAL_CACHE hash[256]; +#endif +} +gcskSECURE_CACHE; + +/******************************************************************************* +***** Process Database Management *********************************************/ + +typedef enum _gceDATABASE_TYPE +{ + gcvDB_VIDEO_MEMORY = 1, /* Video memory created. */ + gcvDB_COMMAND_BUFFER, /* Command Buffer. */ + gcvDB_NON_PAGED, /* Non paged memory. */ + gcvDB_CONTIGUOUS, /* Contiguous memory. */ + gcvDB_SIGNAL, /* Signal. */ + gcvDB_VIDEO_MEMORY_LOCKED, /* Video memory locked. */ + gcvDB_CONTEXT, /* Context */ + gcvDB_IDLE, /* GPU idle. */ + gcvDB_MAP_MEMORY, /* Map memory */ + gcvDB_MAP_USER_MEMORY, /* Map user memory */ + gcvDB_SHBUF, /* Shared buffer. */ + + gcvDB_NUM_TYPES, +} +gceDATABASE_TYPE; + +#define gcdDATABASE_TYPE_MASK 0x000000FF +#define gcdDB_VIDEO_MEMORY_TYPE_MASK 0x0000FF00 +#define gcdDB_VIDEO_MEMORY_TYPE_SHIFT 8 + +#define gcdDB_VIDEO_MEMORY_POOL_MASK 0x00FF0000 +#define gcdDB_VIDEO_MEMORY_POOL_SHIFT 16 + +typedef struct _gcsDATABASE_RECORD * gcsDATABASE_RECORD_PTR; +typedef struct _gcsDATABASE_RECORD +{ + /* Pointer to kernel. */ + gckKERNEL kernel; + + /* Pointer to next database record. */ + gcsDATABASE_RECORD_PTR next; + + /* Type of record. */ + gceDATABASE_TYPE type; + + /* Data for record. */ + gctPOINTER data; + gctPHYS_ADDR physical; + gctSIZE_T bytes; +} +gcsDATABASE_RECORD; + +typedef struct _gcsDATABASE * gcsDATABASE_PTR; +typedef struct _gcsDATABASE +{ + /* Pointer to next entry is hash list. */ + gcsDATABASE_PTR next; + gctSIZE_T slot; + + /* Process ID. */ + gctUINT32 processID; + + /* Open-Close ref count */ + gctPOINTER refs; + + /* Already mark for delete and cannot reenter */ + gctBOOL deleted; + + /* Sizes to query. */ + gcsDATABASE_COUNTERS vidMem; + gcsDATABASE_COUNTERS nonPaged; + gcsDATABASE_COUNTERS contiguous; + gcsDATABASE_COUNTERS mapUserMemory; + gcsDATABASE_COUNTERS mapMemory; + + gcsDATABASE_COUNTERS vidMemType[gcvSURF_NUM_TYPES]; + /* Counter for each video memory pool. */ + gcsDATABASE_COUNTERS vidMemPool[gcvPOOL_NUMBER_OF_POOLS]; + gctPOINTER counterMutex; + + /* Idle time management. */ + gctUINT64 lastIdle; + gctUINT64 idle; + + /* Pointer to database. */ + gcsDATABASE_RECORD_PTR list[48]; + +#if gcdSECURE_USER + /* Secure cache. */ + gcskSECURE_CACHE cache; +#endif + + gctPOINTER handleDatabase; + gctPOINTER handleDatabaseMutex; + +#if gcdPROCESS_ADDRESS_SPACE + gckMMU mmu; +#endif +} +gcsDATABASE; + +typedef struct _gcsFDPRIVATE * gcsFDPRIVATE_PTR; +typedef struct _gcsFDPRIVATE +{ + gctINT (* release) (gcsFDPRIVATE_PTR Private); +} +gcsFDPRIVATE; + +typedef struct _gcsRECORDER * gckRECORDER; + + +/* Create a process database that will contain all its allocations. */ +gceSTATUS +gckKERNEL_CreateProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID + ); + +/* Add a record to the process database. */ +gceSTATUS +gckKERNEL_AddProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Pointer, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Size + ); + +/* Remove a record to the process database. */ +gceSTATUS +gckKERNEL_RemoveProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Pointer + ); + +/* Destroy the process database. */ +gceSTATUS +gckKERNEL_DestroyProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID + ); + +/* Find a record to the process database. */ +gceSTATUS +gckKERNEL_FindProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 ThreadID, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Pointer, + OUT gcsDATABASE_RECORD_PTR Record + ); + +/* Query the process database. */ +gceSTATUS +gckKERNEL_QueryProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctBOOL LastProcessID, + IN gceDATABASE_TYPE Type, + OUT gcuDATABASE_INFO * Info + ); + +/* Dump the process database. */ +gceSTATUS +gckKERNEL_DumpProcessDB( + IN gckKERNEL Kernel + ); + +/* Dump the video memory usage for process specified. */ +gceSTATUS +gckKERNEL_DumpVidMemUsage( + IN gckKERNEL Kernel, + IN gctINT32 ProcessID + ); + +gceSTATUS +gckKERNEL_FindDatabase( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctBOOL LastProcessID, + OUT gcsDATABASE_PTR * Database + ); + +gceSTATUS +gckKERNEL_FindHandleDatbase( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + OUT gctPOINTER * HandleDatabase, + OUT gctPOINTER * HandleDatabaseMutex + ); + +gceSTATUS +gckKERNEL_GetProcessMMU( + IN gckKERNEL Kernel, + OUT gckMMU * Mmu + ); + +gceSTATUS +gckMMU_FlatMapping( + IN gckMMU Mmu, + IN gctUINT32 Physical, + IN gctUINT32 NumPages + ); + +gceSTATUS +gckMMU_GetPageEntry( + IN gckMMU Mmu, + IN gctUINT32 Address, + IN gctUINT32_PTR *PageTable + ); + +gceSTATUS +gckMMU_FreePagesEx( + IN gckMMU Mmu, + IN gctUINT32 Address, + IN gctSIZE_T PageCount + ); + +gceSTATUS +gckMMU_AttachHardware( + IN gckMMU Mmu, + IN gckHARDWARE Hardware + ); + +void +gckMMU_DumpRecentFreedAddress( + IN gckMMU Mmu + ); + +gceSTATUS +gckKERNEL_CreateIntegerDatabase( + IN gckKERNEL Kernel, + OUT gctPOINTER * Database + ); + +gceSTATUS +gckKERNEL_DestroyIntegerDatabase( + IN gckKERNEL Kernel, + IN gctPOINTER Database + ); + +gceSTATUS +gckKERNEL_AllocateIntegerId( + IN gctPOINTER Database, + IN gctPOINTER Pointer, + OUT gctUINT32 * Id + ); + +gceSTATUS +gckKERNEL_FreeIntegerId( + IN gctPOINTER Database, + IN gctUINT32 Id + ); + +gceSTATUS +gckKERNEL_QueryIntegerId( + IN gctPOINTER Database, + IN gctUINT32 Id, + OUT gctPOINTER * Pointer + ); + +/* Pointer rename */ +gctUINT32 +gckKERNEL_AllocateNameFromPointer( + IN gckKERNEL Kernel, + IN gctPOINTER Pointer + ); + +gctPOINTER +gckKERNEL_QueryPointerFromName( + IN gckKERNEL Kernel, + IN gctUINT32 Name + ); + +gceSTATUS +gckKERNEL_DeleteName( + IN gckKERNEL Kernel, + IN gctUINT32 Name + ); + +#if gcdSECURE_USER +/* Get secure cache from the process database. */ +gceSTATUS +gckKERNEL_GetProcessDBCache( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + OUT gcskSECURE_CACHE_PTR * Cache + ); +#endif + +/******************************************************************************* +********* Timer Management ****************************************************/ +typedef struct _gcsTIMER * gcsTIMER_PTR; +typedef struct _gcsTIMER +{ + /* Start and Stop time holders. */ + gctUINT64 startTime; + gctUINT64 stopTime; +} +gcsTIMER; + +/******************************************************************************\ +********************************** Structures ********************************** +\******************************************************************************/ + +/* gckDB object. */ +struct _gckDB +{ + /* Database management. */ + gcsDATABASE_PTR db[16]; + gctPOINTER dbMutex; + gcsDATABASE_PTR freeDatabase; + gcsDATABASE_RECORD_PTR freeRecord; + gcsDATABASE_PTR lastDatabase; + gctUINT32 lastProcessID; + gctUINT64 lastIdle; + gctUINT64 idleTime; + gctUINT64 lastSlowdown; + gctUINT64 lastSlowdownIdle; + gctPOINTER nameDatabase; + gctPOINTER nameDatabaseMutex; + + gctPOINTER pointerDatabase; + gctPOINTER pointerDatabaseMutex; + + gcsLISTHEAD onFaultVidmemList; + gctPOINTER onFaultVidmemListMutex; +}; + +typedef struct _gckVIRTUAL_BUFFER * gckVIRTUAL_BUFFER_PTR; +typedef struct _gckVIRTUAL_BUFFER +{ + gctPHYS_ADDR physical; + gctPOINTER userLogical; + gctPOINTER kernelLogical; + gctSIZE_T bytes; + gctSIZE_T pageCount; + gctPOINTER pageTable; + gctUINT32 gpuAddress; + gctUINT pid; + gckKERNEL kernel; +#if gcdPROCESS_ADDRESS_SPACE + gckMMU mmu; +#endif +} +gckVIRTUAL_BUFFER; + +typedef struct _gckVIRTUAL_COMMAND_BUFFER * gckVIRTUAL_COMMAND_BUFFER_PTR; +typedef struct _gckVIRTUAL_COMMAND_BUFFER +{ + gckVIRTUAL_BUFFER virtualBuffer; + gckVIRTUAL_COMMAND_BUFFER_PTR next; + gckVIRTUAL_COMMAND_BUFFER_PTR prev; +} +gckVIRTUAL_COMMAND_BUFFER; + +/* gckKERNEL object. */ +struct _gckKERNEL +{ + /* Object. */ + gcsOBJECT object; + + /* Pointer to gckOS object. */ + gckOS os; + + /* Core */ + gceCORE core; + + /* Pointer to gckHARDWARE object. */ + gckHARDWARE hardware; + + /* Pointer to gckCOMMAND object. */ + gckCOMMAND command; + + /* Pointer to gckEVENT object. */ + gckEVENT eventObj; + + /* Pointer to context. */ + gctPOINTER context; + + /* Pointer to gckMMU object. */ + gckMMU mmu; + + /* Arom holding number of clients. */ + gctPOINTER atomClients; + +#if VIVANTE_PROFILER + /* Enable profiling */ + gctBOOL profileEnable; + /* Clear profile register or not*/ + gctBOOL profileCleanRegister; +#endif + +#ifdef QNX_SINGLE_THREADED_DEBUGGING + gctPOINTER debugMutex; +#endif + + /* Database management. */ + gckDB db; + gctBOOL dbCreated; + + gctUINT64 resetTimeStamp; + + /* Pointer to gckEVENT object. */ + gcsTIMER timers[8]; + gctUINT32 timeOut; + + + /* Virtual command buffer list. */ + gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferHead; + gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferTail; + gctPOINTER virtualBufferLock; + + /* Enable virtual command buffer. */ + gctBOOL virtualCommandBuffer; + +#if gcdDVFS + gckDVFS dvfs; +#endif + +#if gcdLINUX_SYNC_FILE + gctHANDLE timeline; +#endif + + /* Enable recovery. */ + gctBOOL recovery; + + /* Level of dump information after stuck. */ + gctUINT stuckDump; + +#if gcdSECURITY || gcdENABLE_TRUST_APPLICATION + gctUINT32 securityChannel; +#endif + + /* Timer to monitor GPU stuck. */ + gctPOINTER monitorTimer; + + /* Flag to quit monitor timer. */ + gctBOOL monitorTimerStop; + + /* Monitor states. */ + gctBOOL monitoring; + gctUINT32 lastCommitStamp; + gctUINT32 timer; + gctUINT32 restoreAddress; + gctINT32 restoreMask; + + /* 3DBLIT */ + gckASYNC_COMMAND asyncCommand; + gckEVENT asyncEvent; + + /* Pointer to gckDEVICE object. */ + gckDEVICE device; + + gctUINT chipID; + + gctUINT32 contiguousBaseAddress; + gctUINT32 externalBaseAddress; +}; + +struct _FrequencyHistory +{ + gctUINT32 frequency; + gctUINT32 count; +}; + +/* gckDVFS object. */ +struct _gckDVFS +{ + gckOS os; + gckHARDWARE hardware; + gctPOINTER timer; + gctUINT32 pollingTime; + gctBOOL stop; + gctUINT32 totalConfig; + gctUINT32 loads[8]; + gctUINT8 currentScale; + struct _FrequencyHistory frequencyHistory[16]; +}; + +typedef struct _gcsFENCE * gckFENCE; +typedef struct _gcsFENCE +{ + /* Pointer to required object. */ + gckKERNEL kernel; + + /* Fence location. */ + gctPHYS_ADDR physical; + gctPHYS_ADDR physHandle; + gctPOINTER logical; + gctUINT32 address; + + gcsLISTHEAD waitingList; + gctPOINTER mutex; +} +gcsFENCE; + +/* A sync point attached to fence. */ +typedef struct _gcsFENCE_SYNC * gckFENCE_SYNC; +typedef struct _gcsFENCE_SYNC +{ + /* Stamp of commit access this node. */ + gctUINT64 commitStamp; + + /* Attach to waiting list. */ + gcsLISTHEAD head; + + gctPOINTER signal; + + gctBOOL inList; +} +gcsFENCE_SYNC; + +/* gckCOMMAND object. */ +struct _gckCOMMAND +{ + /* Object. */ + gcsOBJECT object; + + /* Pointer to required object. */ + gckKERNEL kernel; + gckOS os; + + /* Number of bytes per page. */ + gctUINT32 pageSize; + + /* Current pipe select. */ + gcePIPE_SELECT pipeSelect; + + /* Command queue running flag. */ + gctBOOL running; + + /* Idle flag and commit stamp. */ + gctBOOL idle; + gctUINT64 commitStamp; + + /* Command queue mutex. */ + gctPOINTER mutexQueue; + + /* Context switching mutex. */ + gctPOINTER mutexContext; + + /* Context sequence mutex. */ + gctPOINTER mutexContextSeq; + + /* Command queue power semaphore. */ + gctPOINTER powerSemaphore; + + /* Current command queue. */ + struct _gcskCOMMAND_QUEUE + { + gctSIGNAL signal; + gctPHYS_ADDR physical; + gctPOINTER logical; + gctUINT32 address; + } + queues[gcdCOMMAND_QUEUES]; + + gctPHYS_ADDR virtualMemory; + gctPHYS_ADDR physHandle; + gctUINT32 physical; + gctPOINTER logical; + gctUINT32 address; + gctUINT32 offset; + gctINT index; +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gctUINT wrapCount; +#endif + + /* The command queue is new. */ + gctBOOL newQueue; + + /* Context management. */ + gckCONTEXT currContext; + gctPOINTER stateMap; + + /* Pointer to last WAIT command. */ + gctUINT32 waitPhysical; + gctPOINTER waitLogical; + gctUINT32 waitAddress; + gctUINT32 waitSize; + gctUINT32 waitOffset; + + /* Command buffer alignment. */ + gctUINT32 alignment; + gctUINT32 reservedHead; + + /* Commit counter. */ + gctPOINTER atomCommit; + + /* Kernel process ID. */ + gctUINT32 kernelProcessID; + + /* End Event signal. */ + gctSIGNAL endEventSignal; + +#if gcdSECURE_USER + /* Hint array copy buffer. */ + gctBOOL hintArrayAllocated; + gctUINT hintArraySize; + gctUINT32_PTR hintArray; +#endif + +#if gcdPROCESS_ADDRESS_SPACE + gckMMU currentMmu; +#endif + +#if gcdRECORD_COMMAND + gckRECORDER recorder; +#endif + + gctPOINTER kList; + + gckFENCE fence; + + /* For getting state from async command buffer. */ + gckASYNC_COMMAND asyncCommand; + + gctBOOL dummyDraw; +}; + +typedef struct _gcsEVENT * gcsEVENT_PTR; + +/* Structure holding one event to be processed. */ +typedef struct _gcsEVENT +{ + /* Pointer to next event in queue. */ + gcsEVENT_PTR next; + + /* Event information. */ + gcsHAL_INTERFACE info; + + /* Process ID owning the event. */ + gctUINT32 processID; + +#ifdef __QNXNTO__ + /* Kernel. */ + gckKERNEL kernel; +#endif + + gctBOOL fromKernel; +} +gcsEVENT; + +/* Structure holding a list of events to be processed by an interrupt. */ +typedef struct _gcsEVENT_QUEUE * gcsEVENT_QUEUE_PTR; +typedef struct _gcsEVENT_QUEUE +{ + /* Time stamp. */ + gctUINT64 stamp; + + /* Source of the event. */ + gceKERNEL_WHERE source; + + /* Pointer to head of event queue. */ + gcsEVENT_PTR head; + + /* Pointer to tail of event queue. */ + gcsEVENT_PTR tail; + + /* Next list of events. */ + gcsEVENT_QUEUE_PTR next; + + /* Current commit stamp. */ + gctUINT64 commitStamp; +} +gcsEVENT_QUEUE; + +/* + gcdREPO_LIST_COUNT defines the maximum number of event queues with different + hardware module sources that may coexist at the same time. Only two sources + are supported - gcvKERNEL_COMMAND and gcvKERNEL_PIXEL. gcvKERNEL_COMMAND + source is used only for managing the kernel command queue and is only issued + when the current command queue gets full. Since we commit event queues every + time we commit command buffers, in the worst case we can have up to three + pending event queues: + - gcvKERNEL_PIXEL + - gcvKERNEL_COMMAND (queue overflow) + - gcvKERNEL_PIXEL +*/ +#define gcdREPO_LIST_COUNT 3 + +/* gckEVENT object. */ +struct _gckEVENT +{ + /* The object. */ + gcsOBJECT object; + + /* Pointer to required objects. */ + gckOS os; + gckKERNEL kernel; + + /* Pointer to gckASYNC_COMMAND object. */ + gckASYNC_COMMAND asyncCommand; + + /* Time stamp. */ + gctUINT64 stamp; + gctUINT32 lastCommitStamp; + + /* Queue mutex. */ + gctPOINTER eventQueueMutex; + + /* Array of event queues. */ + gcsEVENT_QUEUE queues[29]; + gctINT32 freeQueueCount; + gctUINT8 lastID; + + /* Pending events. */ + gctPOINTER pending; + + /* List of free event structures and its mutex. */ + gcsEVENT_PTR freeEventList; + gctSIZE_T freeEventCount; + gctPOINTER freeEventMutex; + + /* Event queues. */ + gcsEVENT_QUEUE_PTR queueHead; + gcsEVENT_QUEUE_PTR queueTail; + gcsEVENT_QUEUE_PTR freeList; + gcsEVENT_QUEUE repoList[gcdREPO_LIST_COUNT]; + gctPOINTER eventListMutex; + + gctPOINTER submitTimer; + +#if gcdINTERRUPT_STATISTIC + gctPOINTER interruptCount; +#endif + + gctINT notifyState; +}; + +/* Free all events belonging to a process. */ +gceSTATUS +gckEVENT_FreeProcess( + IN gckEVENT Event, + IN gctUINT32 ProcessID + ); + +gceSTATUS +gckEVENT_Stop( + IN gckEVENT Event, + IN gctUINT32 ProcessID, + IN gctPHYS_ADDR Handle, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN gctSIGNAL Signal, + IN OUT gctUINT32 * waitSize + ); + +typedef struct _gcsLOCK_INFO * gcsLOCK_INFO_PTR; +typedef struct _gcsLOCK_INFO +{ + gctUINT32 GPUAddresses[gcdMAX_GPU_COUNT]; + gctPOINTER pageTables[gcdMAX_GPU_COUNT]; + gctUINT32 lockeds[gcdMAX_GPU_COUNT]; + gckKERNEL lockKernels[gcdMAX_GPU_COUNT]; + gckMMU lockMmus[gcdMAX_GPU_COUNT]; +} +gcsLOCK_INFO; + +typedef struct _gcsGPU_MAP * gcsGPU_MAP_PTR; +typedef struct _gcsGPU_MAP +{ + gctINT pid; + gcsLOCK_INFO lockInfo; + gcsGPU_MAP_PTR prev; + gcsGPU_MAP_PTR next; +} +gcsGPU_MAP; + +/* gcuVIDMEM_NODE structure. */ +typedef union _gcuVIDMEM_NODE +{ + /* Allocated from gckVIDMEM. */ + struct _gcsVIDMEM_NODE_VIDMEM + { + /* Owner of this node. */ + gckVIDMEM memory; + + /* Dual-linked list of nodes. */ + gcuVIDMEM_NODE_PTR next; + gcuVIDMEM_NODE_PTR prev; + + /* Dual linked list of free nodes. */ + gcuVIDMEM_NODE_PTR nextFree; + gcuVIDMEM_NODE_PTR prevFree; + + /* Information for this node. */ + gctSIZE_T offset; + gctSIZE_T bytes; + gctUINT32 alignment; + +#ifdef __QNXNTO__ + /* Client virtual address. */ + gctPOINTER logical; +#endif + + /* Locked counter. */ + gctINT32 locked; + + /* Memory pool. */ + gcePOOL pool; + gctUINT32 physical; + + /* Process ID owning this memory. */ + gctUINT32 processID; + + } + VidMem; + + /* Allocated from gckOS. */ + struct _gcsVIDMEM_NODE_VIRTUAL + { + /* Pointer to gckKERNEL object. */ + gckKERNEL kernel; + + /* Information for this node. */ + /* Contiguously allocated? */ + gctBOOL contiguous; + /* mdl record pointer... a kmalloc address. Process agnostic. */ + gctPHYS_ADDR physical; + gctSIZE_T bytes; + /* do_mmap_pgoff address... mapped per-process. */ + gctPOINTER logical; + + + /* Customer private handle */ + gctUINT32 gid; + + /* Page table information. */ + /* Used only when node is not contiguous */ + gctSIZE_T pageCount; + + /* Used only when node is not contiguous */ + gctPOINTER pageTables[gcdMAX_GPU_COUNT]; + /* Actual physical address */ + gctUINT32 addresses[gcdMAX_GPU_COUNT]; + + /* Locked counter. */ + gctINT32 lockeds[gcdMAX_GPU_COUNT]; + + /* Surface type. */ + gceSURF_TYPE type; + + /* Secure GPU virtual address. */ + gctBOOL secure; + + gctBOOL onFault; + + gcsLISTHEAD head; + } + Virtual; +} +gcuVIDMEM_NODE; + +/* gckVIDMEM object. */ +struct _gckVIDMEM +{ + /* Object. */ + gcsOBJECT object; + + /* Pointer to gckOS object. */ + gckOS os; + + /* mdl record pointer... a kmalloc address. Process agnostic. */ + gctPHYS_ADDR physical; + + /* Information for this video memory heap. */ + gctUINT32 baseAddress; + gctSIZE_T bytes; + gctSIZE_T freeBytes; + gctSIZE_T minFreeBytes; + + /* caps inherit from its allocator, ~0u if allocator was not applicable. */ + gctUINT32 capability; + + /* Mapping for each type of surface. */ + gctINT mapping[gcvSURF_NUM_TYPES]; + + /* Sentinel nodes for up to 8 banks. */ + gcuVIDMEM_NODE sentinel[8]; + + /* Allocation threshold. */ + gctSIZE_T threshold; + + /* The heap mutex. */ + gctPOINTER mutex; +}; + +typedef struct _gcsVIDMEM_NODE +{ + _VIV_VIDMEM_METADATA metadata; + + /* Pointer to gcuVIDMEM_NODE. */ + gcuVIDMEM_NODE_PTR node; + + /* Pointer to gckKERNEL object. */ + gckKERNEL kernel; + + /* Mutex to protect node. */ + gctPOINTER mutex; + + /* Reference count. */ + gctPOINTER reference; + + /* Name for client to import. */ + gctUINT32 name; + + /* dma_buf */ + gctPOINTER dmabuf; + +#if gcdPROCESS_ADDRESS_SPACE + /* Head of mapping list. */ + gcsGPU_MAP_PTR mapHead; + + /* Tail of mapping list. */ + gcsGPU_MAP_PTR mapTail; + + gctPOINTER mapMutex; +#endif + + /* Surface Type. */ + gceSURF_TYPE type; + + /* Pool from which node is allocated. */ + gcePOOL pool; + + gcsFENCE_SYNC sync[gcvENGINE_GPU_ENGINE_COUNT]; + + /* For DRM usage */ + gctUINT64 timeStamp; + gckVIDMEM_NODE tsNode; + gctUINT32 tilingMode; + gctUINT32 tsMode; + gctUINT64 clearValue; +} +gcsVIDMEM_NODE; + +typedef struct _gcsVIDMEM_HANDLE * gckVIDMEM_HANDLE; +typedef struct _gcsVIDMEM_HANDLE +{ + /* Pointer to gckVIDMEM_NODE. */ + gckVIDMEM_NODE node; + + /* Handle for current process. */ + gctUINT32 handle; + + /* Reference count for this handle. */ + gctPOINTER reference; +} +gcsVIDMEM_HANDLE; + +typedef struct _gcsSHBUF * gcsSHBUF_PTR; +typedef struct _gcsSHBUF +{ + /* ID. */ + gctUINT32 id; + + /* Reference count. */ + gctPOINTER reference; + + /* Data size. */ + gctUINT32 size; + + /* Data. */ + gctPOINTER data; +} +gcsSHBUF; + +typedef struct _gcsCORE_INFO +{ + gceHARDWARE_TYPE type; + gceCORE core; + gckKERNEL kernel; + gctUINT chipID; +} +gcsCORE_INFO; + +typedef struct _gcsCORE_LIST +{ + gckKERNEL kernels[gcvCORE_COUNT]; + gctUINT32 num; +} +gcsCORE_LIST; + +/* A gckDEVICE is a group of cores (gckKERNEL in software). */ +typedef struct _gcsDEVICE +{ + gcsCORE_INFO coreInfoArray[gcvCORE_COUNT]; + gctUINT32 coreNum; + gcsCORE_LIST map[gcvHARDWARE_NUM_TYPES]; + gceHARDWARE_TYPE defaultHwType; + + gckOS os; + + /* Process resource database. */ + gckDB database; + + /* Same hardware type shares one MMU. */ + gckMMU mmus[gcvHARDWARE_NUM_TYPES]; + + /* Mutex to make sure stuck dump for multiple cores doesn't interleave. */ + gctPOINTER stuckDumpMutex; + + /* Mutex for multi-core combine mode command submission */ + gctPOINTER commitMutex; +} +gcsDEVICE; + +gceSTATUS +gckVIDMEM_HANDLE_Allocate( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + OUT gctUINT32 * Handle + ); + +gceSTATUS +gckVIDMEM_HANDLE_Reference( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle + ); + +gceSTATUS +gckVIDMEM_HANDLE_Dereference( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle + ); + +gceSTATUS +gckVIDMEM_NODE_Allocate( + IN gckKERNEL Kernel, + IN gcuVIDMEM_NODE_PTR VideoNode, + IN gceSURF_TYPE Type, + IN gcePOOL Pool, + IN gctUINT32 * Handle + ); + +gceSTATUS +gckVIDMEM_Node_Lock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + OUT gctUINT32 *Address + ); + +gceSTATUS +gckVIDMEM_NODE_Unlock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + IN gctUINT32 ProcessID + ); + +gceSTATUS +gckVIDMEM_NODE_Reference( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node + ); + +gceSTATUS +gckVIDMEM_NODE_Dereference( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node + ); + +gceSTATUS +gckVIDMEM_NODE_Export( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + IN gctINT32 Flags, + OUT gctPOINTER *DmaBuf, + OUT gctINT32 *FD + ); + +gceSTATUS +gckVIDMEM_NODE_Name( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + OUT gctUINT32 * Name + ); + +gceSTATUS +gckVIDMEM_NODE_Import( + IN gckKERNEL Kernel, + IN gctUINT32 Name, + OUT gctUINT32 * Handle + ); + +gceSTATUS +gckVIDMEM_NODE_GetFd( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + OUT gctINT * Fd + ); + +gceSTATUS +gckVIDMEM_HANDLE_LookupAndReference( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + OUT gckVIDMEM_NODE * Node + ); + +gceSTATUS +gckVIDMEM_HANDLE_Lookup( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle, + OUT gckVIDMEM_NODE * Node + ); + +gceSTATUS +gckVIDMEM_NODE_WrapUserMemory( + IN gckKERNEL Kernel, + IN gcsUSER_MEMORY_DESC_PTR Desc, + OUT gctUINT32 * Handle, + OUT gctUINT64 * Bytes + ); + +gceSTATUS +gckVIDMEM_FindVIDMEM( + IN gckKERNEL Kernel, + IN gctUINT32 HardwareAddress, + OUT gcuVIDMEM_NODE_PTR * Node, + OUT gctUINT32_PTR PageTableEntryValue + ); + +gceSTATUS +gckVIDMEM_QueryNodes( + IN gckKERNEL Kernel, + IN gcePOOL Pool, + OUT gctINT32 *Count, + OUT gcuVIDMEM_NODE_PTR *Nodes + ); + +#if gcdPROCESS_ADDRESS_SPACE +gceSTATUS +gckEVENT_DestroyMmu( + IN gckEVENT Event, + IN gckMMU Mmu, + IN gceKERNEL_WHERE FromWhere + ); +#endif + +typedef struct _gcsADDRESS_AREA * gcsADDRESS_AREA_PTR; +typedef struct _gcsADDRESS_AREA +{ + /* Page table information. */ + gctSIZE_T pageTableSize; + gctPHYS_ADDR pageTablePhysical; + gctUINT32_PTR pageTableLogical; + gctUINT32 pageTableEntries; + + /* Free entries. */ + gctUINT32 heapList; + gctBOOL freeNodes; + + gctUINT32 dynamicMappingStart; + gctUINT32 dynamicMappingEnd; + + gctUINT32_PTR mapLogical; +} +gcsADDRESS_AREA; + +/* gckMMU object. */ +struct _gckMMU +{ + /* The object. */ + gcsOBJECT object; + + /* Pointer to gckOS object. */ + gckOS os; + + /* Pointer to gckHARDWARE object. */ + gckHARDWARE hardware; + + /* The page table mutex. */ + gctPOINTER pageTableMutex; + + /* Master TLB information. */ + gctSIZE_T mtlbSize; + gctPHYS_ADDR mtlbPhysical; + gctUINT32_PTR mtlbLogical; + gctUINT32 mtlbEntries; + + gctPOINTER staticSTLB; + gctBOOL enabled; + +#if gcdPROCESS_ADDRESS_SPACE + gctPOINTER pageTableDirty[gcdMAX_GPU_COUNT]; + gctPOINTER stlbs; +#endif + + gctPOINTER safePageLogical; + gctPHYS_ADDR safePagePhysical; + gctUINT32 safeAddress; + gctSIZE_T safePageSize; + + /* physBase,physSize flat mapping area. */ + gctUINT32 flatMappingRangeCount; + gcsFLAT_MAPPING_RANGE flatMappingRanges[gcdMAX_FLAT_MAPPING_COUNT]; + + /* List of hardware which uses this MMU. */ + gcsLISTHEAD hardwareList; + + struct _gckQUEUE recentFreedAddresses; + + gcsADDRESS_AREA area[gcvADDRESS_AREA_COUNT]; + + gctUINT32 contiguousBaseAddress; + gctUINT32 externalBaseAddress; +}; + +typedef struct _gcsASYNC_COMMAND +{ + gckOS os; + gckHARDWARE hardware; + gckKERNEL kernel; + + gctPOINTER mutex; + gcsFE fe; + + gctUINT32 reservedTail; + gctUINT64 commitStamp; + + gckFENCE fence; + + gctPOINTER kList; +} +gcsASYNC_COMMAND; + + +gceSTATUS +gckOS_CreateKernelVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical, + OUT gctSIZE_T * PageCount + ); + +gceSTATUS +gckOS_DestroyKernelVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ); + +gceSTATUS +gckOS_CreateUserVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical, + OUT gctSIZE_T * PageCount + ); + +gceSTATUS +gckOS_DestroyUserVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ); + +gceSTATUS +gckOS_GetFd( + IN gctSTRING Name, + IN gcsFDPRIVATE_PTR Private, + OUT gctINT *Fd + ); + +/******************************************************************************* +** +** gckOS_ReadMappedPointer +** +** Read pointer mapped from user pointer which returned by gckOS_MapUserPointer. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Address +** Pointer returned by gckOS_MapUserPointer. +** +** gctUINT32_PTR Data +** Pointer to hold 32 bits data. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_ReadMappedPointer( + IN gckOS Os, + IN gctPOINTER Address, + IN gctUINT32_PTR Data + ); + +gceSTATUS +gckKERNEL_AllocateVirtualCommandBuffer( + IN gckKERNEL Kernel, + IN gctBOOL InUserSpace, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ); + +gceSTATUS +gckKERNEL_DestroyVirtualCommandBuffer( + IN gckKERNEL Kernel, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical + ); + +gceSTATUS +gckKERNEL_AllocateVirtualMemory( + IN gckKERNEL Kernel, + IN gctBOOL NonPaged, + IN gctBOOL InUserSpace, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ); + +gceSTATUS +gckKERNEL_FreeVirtualMemory( + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gctBOOL NonPaged + ); + +gceSTATUS +gckKERNEL_GetGPUAddress( + IN gckKERNEL Kernel, + IN gctPOINTER Logical, + IN gctBOOL InUserSpace, + IN gctPHYS_ADDR Physical, + OUT gctUINT32 * Address + ); + +gceSTATUS +gckKERNEL_QueryGPUAddress( + IN gckKERNEL Kernel, + IN gctUINT32 GpuAddress, + OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer + ); + +gceSTATUS +gckKERNEL_AttachProcess( + IN gckKERNEL Kernel, + IN gctBOOL Attach + ); + +gceSTATUS +gckKERNEL_AttachProcessEx( + IN gckKERNEL Kernel, + IN gctBOOL Attach, + IN gctUINT32 PID + ); + +#if gcdSECURE_USER +gceSTATUS +gckKERNEL_MapLogicalToPhysical( + IN gckKERNEL Kernel, + IN gcskSECURE_CACHE_PTR Cache, + IN OUT gctPOINTER * Data + ); + +gceSTATUS +gckKERNEL_FlushTranslationCache( + IN gckKERNEL Kernel, + IN gcskSECURE_CACHE_PTR Cache, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); +#endif + +gceSTATUS +gckHARDWARE_QueryIdle( + IN gckHARDWARE Hardware, + OUT gctBOOL_PTR IsIdle + ); + +gceSTATUS +gckHARDWARE_WaitFence( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT64 FenceData, + IN gctUINT32 FenceAddress, + OUT gctUINT32 *Bytes + ); + +gceSTATUS +gckHARDWARE_AddressInHardwareFuncions( + IN gckHARDWARE Hardware, + IN gctUINT32 Address, + OUT gctPOINTER *Pointer + ); + +gceSTATUS +gckHARDWARE_UpdateContextID( + IN gckHARDWARE Hardware + ); + +#if gcdSECURITY +gceSTATUS +gckKERNEL_SecurityOpen( + IN gckKERNEL Kernel, + IN gctUINT32 GPU, + OUT gctUINT32 *Channel + ); + +/* +** Close a security service channel +*/ +gceSTATUS +gckKERNEL_SecurityClose( + IN gctUINT32 Channel + ); + +/* +** Security service interface. +*/ +gceSTATUS +gckKERNEL_SecurityCallService( + IN gctUINT32 Channel, + IN OUT gcsTA_INTERFACE * Interface + ); + +gceSTATUS +gckKERNEL_SecurityStartCommand( + IN gckKERNEL Kernel + ); + +gceSTATUS +gckKERNEL_SecurityAllocateSecurityMemory( + IN gckKERNEL Kernel, + IN gctUINT32 Bytes, + OUT gctUINT32 * Handle + ); + +gceSTATUS +gckKERNEL_SecurityExecute( + IN gckKERNEL Kernel, + IN gctPOINTER Buffer, + IN gctUINT32 Bytes + ); + +gceSTATUS +gckKERNEL_SecurityMapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 *PhysicalArray, + IN gctUINT32 PageCount, + OUT gctUINT32 * GPUAddress + ); + +gceSTATUS +gckKERNEL_SecurityUnmapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 GPUAddress, + IN gctUINT32 PageCount + ); + +#endif + +#if gcdENABLE_TRUST_APPLICATION +gceSTATUS +gckKERNEL_SecurityOpen( + IN gckKERNEL Kernel, + IN gctUINT32 GPU, + OUT gctUINT32 *Channel + ); + +/* +** Close a security service channel +*/ +gceSTATUS +gckKERNEL_SecurityClose( + IN gctUINT32 Channel + ); + +/* +** Security service interface. +*/ +gceSTATUS +gckKERNEL_SecurityCallService( + IN gctUINT32 Channel, + IN OUT gcsTA_INTERFACE * Interface + ); + +gceSTATUS +gckKERNEL_SecurityStartCommand( + IN gckKERNEL Kernel, + IN gctUINT32 Address, + IN gctUINT32 Bytes + ); + +gceSTATUS +gckKERNEL_SecurityMapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 *PhysicalArray, + IN gctPHYS_ADDR_T Physical, + IN gctUINT32 PageCount, + OUT gctUINT32 * GPUAddress + ); + +gceSTATUS +gckKERNEL_SecurityUnmapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 GPUAddress, + IN gctUINT32 PageCount + ); + +gceSTATUS +gckKERNEL_SecurityDumpMMUException( + IN gckKERNEL Kernel + ); + +gceSTATUS +gckKERNEL_ReadMMUException( + IN gckKERNEL Kernel, + IN gctUINT32_PTR MMUStatus, + IN gctUINT32_PTR MMUException + ); + +gceSTATUS +gckKERNEL_HandleMMUException( + IN gckKERNEL Kernel, + IN gctUINT32 MMUStatus, + IN gctPHYS_ADDR_T Physical, + IN gctUINT32 GPUAddres + ); +#endif + +gceSTATUS +gckKERNEL_CreateShBuffer( + IN gckKERNEL Kernel, + IN gctUINT32 Size, + OUT gctSHBUF * ShBuf + ); + +gceSTATUS +gckKERNEL_DestroyShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf + ); + +gceSTATUS +gckKERNEL_MapShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf + ); + +gceSTATUS +gckKERNEL_WriteShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf, + IN gctPOINTER UserData, + IN gctUINT32 ByteCount + ); + +gceSTATUS +gckKERNEL_ReadShBuffer( + IN gckKERNEL Kernel, + IN gctSHBUF ShBuf, + IN gctPOINTER UserData, + IN gctUINT32 ByteCount, + OUT gctUINT32 * BytesRead + ); + + +/******************************************************************************\ +******************************* gckCONTEXT Object ******************************* +\******************************************************************************/ + +gceSTATUS +gckCONTEXT_Construct( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gctUINT32 ProcessID, + OUT gckCONTEXT * Context + ); + +gceSTATUS +gckCONTEXT_Destroy( + IN gckCONTEXT Context + ); + +gceSTATUS +gckCONTEXT_Update( + IN gckCONTEXT Context, + IN gctUINT32 ProcessID, + IN gcsSTATE_DELTA_PTR StateDelta + ); + +gceSTATUS +gckCONTEXT_MapBuffer( + IN gckCONTEXT Context, + OUT gctUINT32 *Physicals, + OUT gctUINT64 *Logicals, + OUT gctUINT32 *Bytes + ); + +void +gckQUEUE_Enqueue( + IN gckQUEUE LinkQueue, + IN gcuQUEUEDATA *Data + ); + +void +gckQUEUE_GetData( + IN gckQUEUE LinkQueue, + IN gctUINT32 Index, + OUT gcuQUEUEDATA ** Data + ); + +gceSTATUS +gckQUEUE_Allocate( + IN gckOS Os, + IN gckQUEUE Queue, + IN gctUINT32 Size + ); + +gceSTATUS +gckQUEUE_Free( + IN gckOS Os, + IN gckQUEUE Queue + ); + +/******************************************************************************\ +****************************** gckRECORDER Object ****************************** +\******************************************************************************/ +gceSTATUS +gckRECORDER_Construct( + IN gckOS Os, + IN gckHARDWARE Hardware, + OUT gckRECORDER * Recorder + ); + +gceSTATUS +gckRECORDER_Destory( + IN gckOS Os, + IN gckRECORDER Recorder + ); + +void +gckRECORDER_AdvanceIndex( + gckRECORDER Recorder, + gctUINT64 CommitStamp + ); + +void +gckRECORDER_Record( + gckRECORDER Recorder, + gctUINT8_PTR CommandBuffer, + gctUINT32 CommandBytes, + gctUINT8_PTR ContextBuffer, + gctUINT32 ContextBytes + ); + +void +gckRECORDER_Dump( + gckRECORDER Recorder + ); + +gceSTATUS +gckRECORDER_UpdateMirror( + gckRECORDER Recorder, + gctUINT32 State, + gctUINT32 Data + ); + +/******************************************************************************\ +*************************** gckASYNC_COMMAND Object **************************** +\******************************************************************************/ +gceSTATUS +gckASYNC_COMMAND_Construct( + IN gckKERNEL Kernel, + OUT gckASYNC_COMMAND * Command + ); + +gceSTATUS +gckASYNC_COMMAND_Destroy( + IN gckASYNC_COMMAND Command + ); + +gceSTATUS +gckASYNC_COMMAND_Commit( + IN gckASYNC_COMMAND Command, + IN gcoCMDBUF CommandBuffer, + IN gcsQUEUE_PTR EventQueue + ); + +gceSTATUS +gckASYNC_COMMAND_EnterCommit( + IN gckASYNC_COMMAND Command + ); + +gceSTATUS +gckASYNC_COMMAND_ExitCommit( + IN gckASYNC_COMMAND Command + ); + +gceSTATUS +gckASYNC_COMMAND_Execute( + IN gckASYNC_COMMAND Command, + IN gctUINT32 Start, + IN gctUINT32 End + ); + +void +gcsLIST_Init( + gcsLISTHEAD_PTR Node + ); + +void +gcsLIST_Add( + gcsLISTHEAD_PTR New, + gcsLISTHEAD_PTR Head + ); + +void +gcsLIST_AddTail( + gcsLISTHEAD_PTR New, + gcsLISTHEAD_PTR Head + ); + +void +gcsLIST_Del( + gcsLISTHEAD_PTR Node + ); + +gctBOOL +gcsLIST_Empty( + gcsLISTHEAD_PTR Head + ); + +#define gcmkLIST_FOR_EACH(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +#define gcmkLIST_FOR_EACH_SAFE(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +gceSTATUS +gckFENCE_Create( + IN gckOS Os, + IN gckKERNEL Kernel, + OUT gckFENCE * Fence + ); + +gceSTATUS +gckFENCE_Destory( + IN gckOS Os, + OUT gckFENCE Fence + ); + +gceSTATUS +gckFENCE_Signal( + IN gckOS Os, + IN gckFENCE Fence + ); + +gceSTATUS +gckDEVICE_Construct( + IN gckOS Os, + OUT gckDEVICE * Device + ); + +gceSTATUS +gckDEVICE_AddCore( + IN gckDEVICE Device, + IN gceCORE Core, + IN gctUINT chipID, + IN gctPOINTER Context, + IN gckKERNEL * Kernel + ); + +gceSTATUS +gckDEVICE_Destroy( + IN gckOS Os, + IN gckDEVICE Device + ); + +gceSTATUS +gckDEVICE_Dispatch( + IN gckDEVICE Device, + IN gcsHAL_INTERFACE_PTR Interface + ); + +gceSTATUS +gckDEVICE_GetMMU( + IN gckDEVICE Device, + IN gceHARDWARE_TYPE Type, + IN gckMMU *Mmu + ); + +gceSTATUS +gckDEVICE_SetMMU( + IN gckDEVICE Device, + IN gceHARDWARE_TYPE Type, + IN gckMMU Mmu + ); + +gceSTATUS +gckDEVICE_QueryGPUAddress( + IN gckDEVICE Device, + IN gckKERNEL Kernel, + IN gctUINT32 GPUAddress, + OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer + ); + +#if gcdENABLE_TRUST_APPLICATION +gceSTATUS +gckKERNEL_MapInTrustApplicaiton( + IN gckKERNEL Kernel, + IN gctPOINTER Logical, + IN gctPHYS_ADDR Physical, + IN gctUINT32 GPUAddress, + IN gctSIZE_T PageCount + ); +#endif + +#if gcdSECURITY || gcdENABLE_TRUST_APPLICATION +gceSTATUS +gckOS_OpenSecurityChannel( + IN gckOS Os, + IN gceCORE Core, + OUT gctUINT32 *Channel + ); + +gceSTATUS +gckOS_CloseSecurityChannel( + IN gctUINT32 Channel + ); + +gceSTATUS +gckOS_CallSecurityService( + IN gctUINT32 Channel, + IN gcsTA_INTERFACE * Interface + ); + +gceSTATUS +gckOS_InitSecurityChannel( + OUT gctUINT32 Channel + ); + +gceSTATUS +gckOS_AllocatePageArray( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T PageCount, + OUT gctPOINTER * PageArrayLogical, + OUT gctPHYS_ADDR * PageArrayPhysical + ); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_kernel_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_async_command.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_async_command.c new file mode 100644 index 000000000000..2a0c5547215e --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_async_command.c @@ -0,0 +1,477 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" +#include "gc_hal_kernel_context.h" + +#define _GC_OBJ_ZONE gcvZONE_ASYNC_COMMAND + +static gceSTATUS +_HandlePatchList( + IN gckASYNC_COMMAND Command, + IN gcoCMDBUF CommandBuffer, + IN gctBOOL NeedCopy + ) +{ + gceSTATUS status; + gcsPATCH_LIST * uList; + gcsPATCH_LIST * previous; + gcsPATCH_LIST * kList; + + gcmkHEADER_ARG( + "Command=0x%x CommandBuffer=0x%x NeedCopy=%d", + Command, CommandBuffer, NeedCopy + ); + + uList = gcmUINT64_TO_PTR(CommandBuffer->patchHead); + + while (uList) + { + gctUINT i; + + kList = gcvNULL; + previous = uList; + + gcmkONERROR(gckKERNEL_OpenUserData( + Command->kernel, + NeedCopy, + Command->kList, + uList, + gcmSIZEOF(gcsPATCH_LIST), + (gctPOINTER *)&kList + )); + + for (i = 0; i < kList->count; i++) + { + gcsPATCH * patch = &kList->patch[i]; + + /* Touch video memory node. */ + gcmkVERIFY_OK(gckVIDMEM_SetCommitStamp(Command->kernel, gcvENGINE_BLT, patch->handle, Command->commitStamp)); + } + + uList = kList->next; + + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + NeedCopy, + gcvFALSE, + previous, + gcmSIZEOF(gcsPATCH_LIST), + (gctPOINTER *)&kList + )); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (kList) + { + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + NeedCopy, + gcvFALSE, + previous, + gcmSIZEOF(gcsPATCH_LIST), + (gctPOINTER *)&kList + )); + } + + gcmkFOOTER(); + return status; +} + + +gceSTATUS +gckASYNC_COMMAND_Construct( + IN gckKERNEL Kernel, + OUT gckASYNC_COMMAND * Command + ) +{ + gceSTATUS status; + gckASYNC_COMMAND command; + gckOS os = Kernel->os; + + gcmkHEADER(); + + /* Allocate gckASYNC_COMMAND object. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsASYNC_COMMAND), (gctPOINTER *)&command)); + + gckOS_ZeroMemory(command, gcmSIZEOF(gcsASYNC_COMMAND)); + + /* Mutex to protect gckFE. */ + gcmkONERROR(gckOS_CreateMutex(os, &command->mutex)); + + /* Initialize gckFE. */ + gckFE_Initialize(Kernel->hardware, &command->fe); + + /* Initialize gckASYNC_COMMAND object. */ + command->os = os; + command->kernel = Kernel; + command->hardware = Kernel->hardware; + + gcmkVERIFY_OK(gckHARDWARE_QueryCommandBuffer( + Kernel->hardware, + gcvENGINE_BLT, + gcvNULL, + gcvNULL, + &command->reservedTail + )); + + gcmkONERROR(gckFENCE_Create( + os, Kernel, &command->fence + )); + + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsPATCH_LIST), &command->kList)); + + /* Commit stamp start from 1. */ + command->commitStamp = 1; + + *Command = command; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Rollback. */ + gckASYNC_COMMAND_Destroy(command); + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckASYNC_COMMAND_Destroy( + IN gckASYNC_COMMAND Command + ) +{ + gcmkHEADER(); + + if (Command) + { + if (Command->mutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutex)); + } + + if (Command->fence) + { + gcmkVERIFY_OK(gckFENCE_Destory(Command->os, Command->fence)); + } + + if (Command->kList) + { + gcmkOS_SAFE_FREE(Command->os, Command->kList); + } + + if (Command->fe.freeDscriptors) + { + gcmkOS_SAFE_FREE(Command->os, Command->fe.freeDscriptors); + } + + gcmkOS_SAFE_FREE(Command->os, Command); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckASYNC_COMMAND_Commit( + IN gckASYNC_COMMAND Command, + IN gcoCMDBUF CommandBuffer, + IN gcsQUEUE_PTR EventQueue + ) +{ + gceSTATUS status; + gctBOOL available = gcvFALSE; + gctBOOL acquired = gcvFALSE; + gcoCMDBUF commandBufferObject = gcvNULL; + struct _gcoCMDBUF _commandBufferObject; + gctUINT8_PTR commandBufferLogical; + gctUINT8_PTR commandBufferTail; + gctUINT commandBufferSize; + gctUINT32 commandBufferAddress; + gcsFEDescriptor descriptor; + gctUINT32 skipFlushBytes; + gctUINT32 fenceBytes; + gctBOOL needCopy; + gctUINT32 oldValue; + gctUINT32 flushBytes; + + gcmkHEADER(); + + gckOS_QueryNeedCopy(Command->os, 0, &needCopy); + + gcmkVERIFY_OK(_HandlePatchList(Command, CommandBuffer, needCopy)); + + /* Open user passed gcoCMDBUF object. */ + gcmkONERROR(gckKERNEL_OpenUserData( + Command->kernel, + needCopy, + &_commandBufferObject, + CommandBuffer, + gcmSIZEOF(struct _gcoCMDBUF), + (gctPOINTER *)&commandBufferObject + )); + + gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER); + + gckHARDWARE_FlushAsyncMMU(Command->hardware, gcvNULL, &flushBytes); + + gcmkONERROR(gckOS_AtomicExchange(Command->os, + Command->hardware->pageTableDirty[gcvENGINE_BLT], + 0, + &oldValue)); + + if (oldValue) + { + commandBufferLogical + = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical) + + commandBufferObject->startOffset; + + gckHARDWARE_FlushAsyncMMU(Command->hardware, commandBufferLogical, &flushBytes); + + skipFlushBytes = 0; + } + else + { + skipFlushBytes = flushBytes; + } + + /* Compute the command buffer entry and the size. */ + commandBufferLogical + = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical) + + commandBufferObject->startOffset + + skipFlushBytes; + + commandBufferSize + = commandBufferObject->offset + + Command->reservedTail + - commandBufferObject->startOffset + - skipFlushBytes; + + commandBufferTail + = commandBufferLogical + + commandBufferSize + - Command->reservedTail; + + /* Get the hardware address. */ + if (Command->kernel && Command->kernel->virtualCommandBuffer) + { + gckKERNEL kernel = Command->kernel; + gckVIRTUAL_COMMAND_BUFFER_PTR virtualCommandBuffer + = gcmNAME_TO_PTR(commandBufferObject->physical); + + if (virtualCommandBuffer == gcvNULL) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gcmkONERROR(gckKERNEL_GetGPUAddress( + Command->kernel, + commandBufferLogical, + gcvTRUE, + virtualCommandBuffer, + &commandBufferAddress + )); + } + else + { + gcmkONERROR(gckHARDWARE_ConvertLogical( + Command->hardware, + commandBufferLogical, + gcvTRUE, + &commandBufferAddress + )); + } + + gcmkONERROR(gckHARDWARE_Fence( + Command->hardware, + gcvENGINE_BLT, + commandBufferTail, + Command->fence->address, + Command->commitStamp, + &fenceBytes + )); + + descriptor.start = commandBufferAddress; + descriptor.end = commandBufferAddress + commandBufferSize; + + gcmkDUMPCOMMAND( + Command->os, + commandBufferLogical, + commandBufferSize, + gcvDUMP_BUFFER_USER, + gcvFALSE + ); + + gckOS_AcquireMutex(Command->os, Command->mutex, gcvINFINITE); + acquired = gcvTRUE; + + /* Acquire a slot. */ + for(;;) + { + gcmkONERROR(gckFE_ReserveSlot(Command->hardware, &Command->fe, &available)); + + if (available) + { + break; + } + else + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, _GC_OBJ_ZONE, "No available slot, have to wait"); + + gckOS_Delay(Command->os, 1); + } + } + + /* Send descriptor. */ + gckFE_Execute(Command->hardware, &Command->fe, &descriptor); + + Command->commitStamp++; + + gckOS_ReleaseMutex(Command->os, Command->mutex); + acquired = gcvFALSE; + + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + needCopy, + gcvFALSE, + CommandBuffer, + gcmSIZEOF(struct _gcoCMDBUF), + (gctPOINTER *)&commandBufferObject + )); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gckOS_ReleaseMutex(Command->os, Command->mutex); + } + + if (commandBufferObject) + { + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + needCopy, + gcvFALSE, + CommandBuffer, + gcmSIZEOF(struct _gcoCMDBUF), + (gctPOINTER *)&commandBufferObject + )); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckASYNC_COMMAND_EnterCommit( + IN gckASYNC_COMMAND Command + ) +{ + return gckOS_AcquireMutex(Command->os, Command->mutex, gcvINFINITE); +} + + +gceSTATUS +gckASYNC_COMMAND_ExitCommit( + IN gckASYNC_COMMAND Command + ) +{ + return gckOS_ReleaseMutex(Command->os, Command->mutex); +} + +gceSTATUS +gckASYNC_COMMAND_Execute( + IN gckASYNC_COMMAND Command, + IN gctUINT32 Start, + IN gctUINT32 End + ) +{ + gceSTATUS status; + gcsFEDescriptor descriptor; + gctBOOL available; + + descriptor.start = Start; + descriptor.end = End; + + /* Acquire a slot. */ + for(;;) + { + gcmkONERROR(gckFE_ReserveSlot(Command->hardware, &Command->fe, &available)); + + if (available) + { + break; + } + else + { + gckOS_Delay(Command->os, 1); + } + } + + /* Send descriptor. */ + gckFE_Execute(Command->hardware, &Command->fe, &descriptor); + + return gcvSTATUS_OK; + +OnError: + return status; +} + diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_command.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_command.c new file mode 100644 index 000000000000..3b875996ac1a --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_command.c @@ -0,0 +1,3475 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" +#include "gc_hal_kernel_context.h" + +#define _GC_OBJ_ZONE gcvZONE_COMMAND + +/******************************************************************************\ +********************************* Support Code ********************************* +\******************************************************************************/ + +/******************************************************************************* +** +** _NewQueue +** +** Allocate a new command queue. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object. +** +** gctBOOL Stalled +** Indicate if hardware is stalled already. +** +** OUTPUT: +** +** gckCOMMAND Command +** gckCOMMAND object has been updated with a new command queue. +*/ +static gceSTATUS +_NewQueue( + IN OUT gckCOMMAND Command, + IN gctBOOL Stalled + ) +{ + gceSTATUS status; + gctINT currentIndex, newIndex; + gctPHYS_ADDR_T physical; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Switch to the next command buffer. */ + currentIndex = Command->index; + newIndex = (currentIndex + 1) % gcdCOMMAND_QUEUES; + + /* Wait for availability. */ +#if gcdDUMP_COMMAND + gcmkPRINT("@[kernel.waitsignal]"); +#endif + + gcmkONERROR(gckOS_WaitSignal( + Command->os, + Command->queues[newIndex].signal, + gcvFALSE, + gcvINFINITE + )); + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + if (newIndex < currentIndex) + { + Command->wrapCount += 1; + + gcmkTRACE_ZONE_N( + gcvLEVEL_INFO, gcvZONE_COMMAND, + 2 * 4, + "%s(%d): queue array wrapped around.\n", + __FUNCTION__, __LINE__ + ); + } + + gcmkTRACE_ZONE_N( + gcvLEVEL_INFO, gcvZONE_COMMAND, + 3 * 4, + "%s(%d): total queue wrap arounds %d.\n", + __FUNCTION__, __LINE__, Command->wrapCount + ); + + gcmkTRACE_ZONE_N( + gcvLEVEL_INFO, gcvZONE_COMMAND, + 3 * 4, + "%s(%d): switched to queue %d.\n", + __FUNCTION__, __LINE__, newIndex + ); +#endif + + /* Update gckCOMMAND object with new command queue. */ + Command->index = newIndex; + Command->newQueue = gcvTRUE; +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Command->kernel->virtualCommandBuffer) + { + gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer = gcvNULL; + + Command->virtualMemory = Command->queues[newIndex].physical; + + commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR) Command->virtualMemory; + + Command->physHandle = commandBuffer->virtualBuffer.physical; + } + else +#endif + { + Command->physHandle = Command->queues[newIndex].physical; + } + + Command->logical = Command->queues[newIndex].logical; + Command->address = Command->queues[newIndex].address; + Command->offset = 0; + + gcmkONERROR(gckOS_GetPhysicalAddress( + Command->os, + Command->logical, + &physical + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + Command->os, + physical, + &physical + )); + + gcmkSAFECASTPHYSADDRT(Command->physical, physical); + + if (currentIndex != -1) + { + if (Stalled) + { + gckOS_Signal( + Command->os, + Command->queues[currentIndex].signal, + gcvTRUE + ); + } + else + { + /* Mark the command queue as available. */ + gcmkONERROR(gckEVENT_Signal( + Command->kernel->eventObj, + Command->queues[currentIndex].signal, + gcvKERNEL_COMMAND + )); + } + } + + /* Success. */ + gcmkFOOTER_ARG("Command->index=%d", Command->index); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_IncrementCommitAtom( + IN gckCOMMAND Command, + IN gctBOOL Increment + ) +{ + gceSTATUS status; + gckHARDWARE hardware; + gctINT32 atomValue; + gctBOOL powerAcquired = gcvFALSE; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Extract the gckHARDWARE and gckEVENT objects. */ + hardware = Command->kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + /* Grab the power mutex. */ + gcmkONERROR(gckOS_AcquireMutex( + Command->os, hardware->powerMutex, gcvINFINITE + )); + powerAcquired = gcvTRUE; + + /* Increment the commit atom. */ + if (Increment) + { + gcmkONERROR(gckOS_AtomIncrement( + Command->os, Command->atomCommit, &atomValue + )); + } + else + { + gcmkONERROR(gckOS_AtomDecrement( + Command->os, Command->atomCommit, &atomValue + )); + } + + /* Release the power mutex. */ + gcmkONERROR(gckOS_ReleaseMutex( + Command->os, hardware->powerMutex + )); + powerAcquired = gcvFALSE; + + /* Success. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + if (powerAcquired) + { + /* Release the power mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex( + Command->os, hardware->powerMutex + )); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if gcdSECURE_USER +static gceSTATUS +_ProcessHints( + IN gckCOMMAND Command, + IN gctUINT32 ProcessID, + IN gcoCMDBUF CommandBuffer + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gckKERNEL kernel; + gctBOOL needCopy = gcvFALSE; + gcskSECURE_CACHE_PTR cache; + gctUINT8_PTR commandBufferLogical; + gctUINT8_PTR hintedData; + gctUINT32_PTR hintArray; + gctUINT i, hintCount; + + gcmkHEADER_ARG( + "Command=0x%08X ProcessID=%d CommandBuffer=0x%08X", + Command, ProcessID, CommandBuffer + ); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + /* Reset state array pointer. */ + hintArray = gcvNULL; + + /* Get the kernel object. */ + kernel = Command->kernel; + + /* Get the cache form the database. */ + gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache)); + + /* Determine the start of the command buffer. */ + commandBufferLogical + = (gctUINT8_PTR) CommandBuffer->logical + + CommandBuffer->startOffset; + + /* Determine the number of records in the state array. */ + hintCount = CommandBuffer->hintArrayTail - CommandBuffer->hintArray; + + /* Check wehther we need to copy the structures or not. */ + gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy)); + + /* Get access to the state array. */ + if (needCopy) + { + gctUINT copySize; + + if (Command->hintArrayAllocated && + (Command->hintArraySize < CommandBuffer->hintArraySize)) + { + gcmkONERROR(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray))); + Command->hintArraySize = gcvFALSE; + } + + if (!Command->hintArrayAllocated) + { + gctPOINTER pointer = gcvNULL; + + gcmkONERROR(gckOS_Allocate( + Command->os, + CommandBuffer->hintArraySize, + &pointer + )); + + Command->hintArray = gcmPTR_TO_UINT64(pointer); + Command->hintArrayAllocated = gcvTRUE; + Command->hintArraySize = CommandBuffer->hintArraySize; + } + + hintArray = gcmUINT64_TO_PTR(Command->hintArray); + copySize = hintCount * gcmSIZEOF(gctUINT32); + + gcmkONERROR(gckOS_CopyFromUserData( + Command->os, + hintArray, + gcmUINT64_TO_PTR(CommandBuffer->hintArray), + copySize + )); + } + else + { + gctPOINTER pointer = gcvNULL; + + gcmkONERROR(gckOS_MapUserPointer( + Command->os, + gcmUINT64_TO_PTR(CommandBuffer->hintArray), + CommandBuffer->hintArraySize, + &pointer + )); + + hintArray = pointer; + } + + /* Scan through the buffer. */ + for (i = 0; i < hintCount; i += 1) + { + /* Determine the location of the hinted data. */ + hintedData = commandBufferLogical + hintArray[i]; + + /* Map handle into physical address. */ + gcmkONERROR(gckKERNEL_MapLogicalToPhysical( + kernel, cache, (gctPOINTER) hintedData + )); + } + +OnError: + /* Get access to the state array. */ + if (!needCopy && (hintArray != gcvNULL)) + { + gcmkVERIFY_OK(gckOS_UnmapUserPointer( + Command->os, + gcmUINT64_TO_PTR(CommandBuffer->hintArray), + CommandBuffer->hintArraySize, + hintArray + )); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} +#endif + +#if !gcdNULL_DRIVER +static gceSTATUS +_FlushMMU( + IN gckCOMMAND Command + ) +{ +#if gcdSECURITY + return gcvSTATUS_OK; +#else + gceSTATUS status; + gctUINT32 oldValue; + gckHARDWARE hardware = Command->kernel->hardware; + gctBOOL pause = gcvFALSE; + + gctUINT8_PTR pointer; + gctUINT32 address; + gctUINT32 eventBytes; + gctUINT32 endBytes; + gctUINT32 bufferSize; + gctUINT32 executeBytes; + gctUINT32 waitLinkBytes; + + gcmkONERROR(gckOS_AtomicExchange(Command->os, + hardware->pageTableDirty[gcvENGINE_RENDER], + 0, + &oldValue)); + + if (oldValue) + { + /* Page Table is upated, flush mmu before commit. */ + gcmkONERROR(gckHARDWARE_FlushMMU(hardware)); + + if ((oldValue & gcvPAGE_TABLE_DIRTY_BIT_FE) + && (!hardware->stallFEPrefetch) + ) + { + pause = gcvTRUE; + } + } + + if (pause) + { + /* Query size. */ + gcmkONERROR(gckHARDWARE_Event(hardware, gcvNULL, 0, gcvKERNEL_PIXEL, &eventBytes)); + gcmkONERROR(gckHARDWARE_End(hardware, gcvNULL, ~0U, &endBytes)); + + executeBytes = eventBytes + endBytes; + + gcmkONERROR(gckHARDWARE_WaitLink( + hardware, + gcvNULL, + ~0U, + Command->offset + executeBytes, + &waitLinkBytes, + gcvNULL, + gcvNULL + )); + + /* Reserve space. */ + gcmkONERROR(gckCOMMAND_Reserve( + Command, + executeBytes, + (gctPOINTER *)&pointer, + &bufferSize + )); + + /* Pointer to reserved address. */ + address = Command->address + Command->offset; + + /* Append EVENT(29). */ + gcmkONERROR(gckHARDWARE_Event( + hardware, + pointer, + 29, + gcvKERNEL_PIXEL, + &eventBytes + )); + + /* Append END. */ + pointer += eventBytes; + address += eventBytes; + + gcmkONERROR(gckHARDWARE_End(hardware, pointer, address, &endBytes)); + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (hardware->kernel->virtualCommandBuffer) + { + gcmkONERROR(gckKERNEL_GetGPUAddress( + hardware->kernel, + pointer, + gcvFALSE, + Command->virtualMemory, + &hardware->lastEnd + )); + } +#endif + + gcmkONERROR(gckCOMMAND_Execute(Command, executeBytes)); + } + + return gcvSTATUS_OK; +OnError: + return status; +#endif +} + +static gceSTATUS +_DummyDraw( + IN gckCOMMAND Command + ) +{ +#if gcdSECURITY + return gcvSTATUS_OK; +#else + gceSTATUS status; + gckHARDWARE hardware = Command->kernel->hardware; + + gctUINT8_PTR pointer; + gctUINT32 bufferSize; + + gctUINT32 dummyDrawBytes; + gceDUMMY_DRAW_TYPE dummyDrawType = gcvDUMMY_DRAW_INVALID; + + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_FE_NEED_DUMMYDRAW)) + { + dummyDrawType = gcvDUMMY_DRAW_GC400; + } + + if (!gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_USC_DEFER_FILL_FIX) && + gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_USC)) + { + dummyDrawType = gcvDUMMY_DRAW_V60; + } + + if (dummyDrawType != gcvDUMMY_DRAW_INVALID) + { + gckHARDWARE_DummyDraw(hardware, gcvNULL, Command->queues[0].address, dummyDrawType, &dummyDrawBytes); + + /* Reserve space. */ + gcmkONERROR(gckCOMMAND_Reserve( + Command, + dummyDrawBytes, + (gctPOINTER *)&pointer, + &bufferSize + )); + + gckHARDWARE_DummyDraw(hardware, pointer, Command->queues[0].address, dummyDrawType, &dummyDrawBytes); + + gcmkONERROR(gckCOMMAND_Execute(Command, dummyDrawBytes)); + } + + return gcvSTATUS_OK; +OnError: + return status; +#endif +} + +#endif + +static void +_DumpBuffer( + IN gctPOINTER Buffer, + IN gctUINT32 GpuAddress, + IN gctSIZE_T Size + ) +{ + gctSIZE_T i, line, left; + gctUINT32_PTR data = Buffer; + + line = Size / 32; + left = Size % 32; + + for (i = 0; i < line; i++) + { + gcmkPRINT("%08X : %08X %08X %08X %08X %08X %08X %08X %08X", + GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); + data += 8; + GpuAddress += 8 * 4; + } + + switch(left) + { + case 28: + gcmkPRINT("%08X : %08X %08X %08X %08X %08X %08X %08X", + GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6]); + break; + case 24: + gcmkPRINT("%08X : %08X %08X %08X %08X %08X %08X", + GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5]); + break; + case 20: + gcmkPRINT("%08X : %08X %08X %08X %08X %08X", + GpuAddress, data[0], data[1], data[2], data[3], data[4]); + break; + case 16: + gcmkPRINT("%08X : %08X %08X %08X %08X", + GpuAddress, data[0], data[1], data[2], data[3]); + break; + case 12: + gcmkPRINT("%08X : %08X %08X %08X", + GpuAddress, data[0], data[1], data[2]); + break; + case 8: + gcmkPRINT("%08X : %08X %08X", + GpuAddress, data[0], data[1]); + break; + case 4: + gcmkPRINT("%08X : %08X", + GpuAddress, data[0]); + break; + default: + break; + } +} + +static void +_DumpKernelCommandBuffer( + IN gckCOMMAND Command + ) +{ + gctINT i; + gctUINT64 physical = 0; + gctUINT32 address; + gctPOINTER entry = gcvNULL; + + for (i = 0; i < gcdCOMMAND_QUEUES; i++) + { + entry = Command->queues[i].logical; + + gckOS_GetPhysicalAddress(Command->os, entry, &physical); + + gckOS_CPUPhysicalToGPUPhysical(Command->os, physical, &physical); + + gcmkPRINT("Kernel command buffer %d\n", i); + + gcmkSAFECASTPHYSADDRT(address, physical); + + _DumpBuffer(entry, address, Command->pageSize); + } +} + +#if !gcdNULL_DRIVER +static gceSTATUS +_HandlePatchList( + IN gckCOMMAND Command, + IN gcoCMDBUF CommandBuffer, + IN gctBOOL NeedCopy, + OUT gctUINT64 *AsyncCommandStamp + ) +{ + gceSTATUS status; + gcsPATCH_LIST * uList; + gcsPATCH_LIST * previous; + gcsPATCH_LIST * kList; + gctUINT64 asyncStamp = 0; + + gcmkHEADER_ARG( + "Command=0x%x CommandBuffer=0x%x NeedCopy=%d", + Command, CommandBuffer, NeedCopy + ); + + uList = gcmUINT64_TO_PTR(CommandBuffer->patchHead); + + while (uList) + { + gctUINT i; + + kList = gcvNULL; + previous = uList; + + gcmkONERROR(gckKERNEL_OpenUserData( + Command->kernel, + NeedCopy, + Command->kList, + uList, + gcmSIZEOF(gcsPATCH_LIST), + (gctPOINTER *)&kList + )); + + for (i = 0; i < kList->count; i++) + { + gctUINT64 stamp = 0; + gcsPATCH * patch = &kList->patch[i]; + + /* Touch video memory node. */ + gcmkVERIFY_OK(gckVIDMEM_SetCommitStamp(Command->kernel, gcvENGINE_RENDER, patch->handle, Command->commitStamp)); + + /* Get stamp touched async command buffer. */ + gcmkVERIFY_OK(gckVIDMEM_GetCommitStamp(Command->kernel, gcvENGINE_BLT, patch->handle, &stamp)); + + /* Find latest one. */ + asyncStamp = gcmMAX(asyncStamp, stamp); + } + + uList = kList->next; + + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + NeedCopy, + gcvFALSE, + previous, + gcmSIZEOF(gcsPATCH_LIST), + (gctPOINTER *)&kList + )); + } + + if ((Command->asyncCommand != gcvNULL) + && (*(gctUINT64 *)Command->asyncCommand->fence->logical > asyncStamp) + ) + { + /* No need to wait for async command buffer. */ + *AsyncCommandStamp = 0; + } + else + { + /* Need to add a fence wait. */ + *AsyncCommandStamp = asyncStamp; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (kList) + { + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + NeedCopy, + gcvFALSE, + previous, + gcmSIZEOF(gcsPATCH_LIST), + (gctPOINTER *)&kList + )); + } + + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_WaitForAsyncCommandStamp( + IN gckCOMMAND Command, + IN gctUINT64 Stamp + ) +{ + gctUINT32 bytes; + gceSTATUS status; + gctUINT32 fenceAddress; + gctUINT32 bufferSize; + gctPOINTER pointer; + gcmkHEADER_ARG("Stamp = 0x%llx", Stamp); + + fenceAddress = Command->asyncCommand->fence->address; + + gcmkONERROR(gckHARDWARE_WaitFence(Command->kernel->hardware, + gcvNULL, + Stamp, + fenceAddress, + &bytes + )); + + gcmkONERROR(gckCOMMAND_Reserve( + Command, + bytes, + &pointer, + &bufferSize + )); + + gcmkONERROR(gckHARDWARE_WaitFence( + Command->kernel->hardware, + pointer, + Stamp, + fenceAddress, + &bytes + )); + + gcmkONERROR(gckCOMMAND_Execute(Command, bytes)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************\ +**************** Helper functions for parsing gcoCMDBUF ************************ +\******************************************************************************/ +static void +_GetCMDBUFSize( + IN gcoCMDBUF CommandBuffer, + OUT gctUINT_PTR CommandBufferSize + ) +{ + *CommandBufferSize + = CommandBuffer->offset + + CommandBuffer->reservedTail + - CommandBuffer->startOffset; +} + +static void +_GetCMDBUFTail( + IN gcoCMDBUF CommandBuffer, + OUT gctUINT8_PTR * Tail + ) +{ + gctUINT8_PTR commandBufferLogical; + gctUINT commandBufferSize; + + commandBufferLogical + = (gctUINT8_PTR) gcmUINT64_TO_PTR(CommandBuffer->logical) + + CommandBuffer->startOffset; + + _GetCMDBUFSize(CommandBuffer, &commandBufferSize); + + *Tail + = commandBufferLogical + + commandBufferSize + - CommandBuffer->reservedTail; +} + +static void +_ParseCMDBUFTail( + IN gckHARDWARE Hardware, + IN gcoCMDBUF CommandBuffer, + OUT gctUINT8_PTR * Fence, + OUT gctUINT8_PTR * Link + ) +{ + gctUINT8_PTR tail; + + _GetCMDBUFTail(CommandBuffer, &tail); + + if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_FENCE_64BIT)) + { + *Fence = tail; + *Link = tail + gcdRENDER_FENCE_LENGTH; + } + else + { + *Fence = gcvNULL; + *Link = tail; + } +} + +static gceSTATUS +_GetCMDBUFEntry( + IN gckCOMMAND Command, + IN gcoCMDBUF CommandBuffer, + OUT gctUINT32_PTR EntryAddress, + OUT gctUINT32_PTR EntryBytes + ) +{ + gceSTATUS status; + gctUINT8_PTR commandBufferLogical; + gctUINT commandBufferSize; + gckVIRTUAL_COMMAND_BUFFER_PTR virtualCommandBuffer; + gctUINT32 commandBufferAddress; + gctUINT offset; + + commandBufferLogical + = (gctUINT8_PTR) gcmUINT64_TO_PTR(CommandBuffer->logical) + + CommandBuffer->startOffset; + + /* Get the hardware address. */ + if (Command->kernel->virtualCommandBuffer) + { + gckKERNEL kernel = Command->kernel; + + virtualCommandBuffer = gcmNAME_TO_PTR(CommandBuffer->physical); + + if (virtualCommandBuffer == gcvNULL) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gcmkONERROR(gckKERNEL_GetGPUAddress( + Command->kernel, + commandBufferLogical, + gcvTRUE, + virtualCommandBuffer, + &commandBufferAddress + )); + } + else + { + gcmkONERROR(gckHARDWARE_ConvertLogical( + Command->kernel->hardware, + commandBufferLogical, + gcvTRUE, + &commandBufferAddress + )); + } + + /* Get offset. */ + gcmkONERROR(gckHARDWARE_PipeSelect( + Command->kernel->hardware, gcvNULL, gcvPIPE_3D, &offset + )); + + _GetCMDBUFSize(CommandBuffer, &commandBufferSize); + + *EntryAddress = commandBufferAddress + offset; + *EntryBytes = commandBufferSize - offset; + + return gcvSTATUS_OK; + +OnError: + return status; +} + +/******************************************************************************* +** +** Link a list of command buffer together to make them atomic. +** Fence will be added in the last command buffer. +*/ +static gceSTATUS +_ProcessUserCommandBufferList( + IN gckCOMMAND Command, + IN gcoCMDBUF CommandBufferListHead, + OUT gcoCMDBUF * CommandBufferListTail + ) +{ + gceSTATUS status; + gctBOOL needCopy; + + struct _gcoCMDBUF _commandBufferObject; + gcoCMDBUF currentCMDBUF; + struct _gcoCMDBUF _nextCMDBUF; + gcoCMDBUF currentCMDBUFUser = CommandBufferListHead; + + gckOS_QueryNeedCopy(Command->os, 0, &needCopy); + + /* Open first gcoCMDBUF object as currentCMDBUF. */ + gcmkONERROR(gckKERNEL_OpenUserData( + Command->kernel, + needCopy, + &_commandBufferObject, + currentCMDBUFUser, + gcmSIZEOF(struct _gcoCMDBUF), + (gctPOINTER *)¤tCMDBUF + )); + + /* Iterate the list. */ + while (currentCMDBUF->nextCMDBUF != 0) + { + gcoCMDBUF nextCMDBUFUser; + gcoCMDBUF nextCMDBUF; + gctUINT8_PTR fenceLogical = gcvNULL; + gctUINT8_PTR linkLogical; + gctUINT32 linkBytes = 8; + gctUINT32 linkLow; + gctUINT32 linkHigh; + + gctUINT32 entryAddress = 0; + gctUINT32 entryBytes = 0; + + nextCMDBUFUser + = gcmUINT64_TO_PTR(currentCMDBUF->nextCMDBUF); + + /* Open next gcoCMDBUF object as nextCMDBUF. */ + gcmkONERROR(gckKERNEL_OpenUserData( + Command->kernel, + needCopy, + &_nextCMDBUF, + nextCMDBUFUser, + gcmSIZEOF(struct _gcoCMDBUF), + (gctPOINTER *)&nextCMDBUF + )); + + /* Get the start hardware address of nextCMDBUF. */ + gcmkONERROR(_GetCMDBUFEntry(Command, + nextCMDBUF, + &entryAddress, + &entryBytes + )); + + /* Process current gcoCMDBUF object. */ + _ParseCMDBUFTail( + Command->kernel->hardware, + currentCMDBUF, + &fenceLogical, + &linkLogical + ); + + /* Don't send fence in the middle of gcoCMDBUF list. */ + if (fenceLogical != gcvNULL) + { + gctUINT i = gcdRENDER_FENCE_LENGTH / gcmSIZEOF(gctUINT32) / 2; + + /* Fill NOPs in space reserved for fence. */ + while (i--) + { + gctSIZE_T nopBytes = 8; + gcmkONERROR(gckHARDWARE_Nop(Command->kernel->hardware, fenceLogical, &nopBytes)); + fenceLogical += nopBytes; + } + } + + /* Generate a LINK from the end of current command buffer + ** to the start of next command buffer. */ + gcmkONERROR(gckHARDWARE_Link( + Command->kernel->hardware, + linkLogical, + entryAddress, + entryBytes, + &linkBytes, + &linkLow, + &linkHigh + )); + + /* Close current gcoCMDBUF object which is processed. */ + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + needCopy, + gcvFALSE, + currentCMDBUFUser, + gcmSIZEOF(struct _gcoCMDBUF), + (gctPOINTER *)¤tCMDBUF + )); + + /* Advance to next gcoCMDBUF object. */ + currentCMDBUFUser = nextCMDBUFUser; + currentCMDBUF = nextCMDBUF; + } + + gcmkVERIFY_OK(gckKERNEL_CloseUserData( + Command->kernel, + needCopy, + gcvFALSE, + currentCMDBUFUser, + gcmSIZEOF(struct _gcoCMDBUF), + (gctPOINTER *)¤tCMDBUF + )); + + /* Return the tail of the list. */ + *CommandBufferListTail = currentCMDBUFUser; + + return gcvSTATUS_OK; + +OnError: + return status; +} +#endif + +/******************************************************************************\ +****************************** gckCOMMAND API Code ****************************** +\******************************************************************************/ + +/******************************************************************************* +** +** gckCOMMAND_Construct +** +** Construct a new gckCOMMAND object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** OUTPUT: +** +** gckCOMMAND * Command +** Pointer to a variable that will hold the pointer to the gckCOMMAND +** object. +*/ +gceSTATUS +gckCOMMAND_Construct( + IN gckKERNEL Kernel, + OUT gckCOMMAND * Command + ) +{ + gckOS os; + gckCOMMAND command = gcvNULL; + gceSTATUS status; + gctINT i; + gctPOINTER pointer = gcvNULL; + gctSIZE_T pageSize; + + gcmkHEADER_ARG("Kernel=0x%x", Kernel); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Command != gcvNULL); + + /* Extract the gckOS object. */ + os = Kernel->os; + + /* Allocate the gckCOMMAND structure. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckCOMMAND), &pointer)); + command = pointer; + + /* Reset the entire object. */ + gcmkONERROR(gckOS_ZeroMemory(command, gcmSIZEOF(struct _gckCOMMAND))); + + /* Initialize the gckCOMMAND object.*/ + command->object.type = gcvOBJ_COMMAND; + command->kernel = Kernel; + command->os = os; + + /* Get the command buffer requirements. */ + gcmkONERROR(gckHARDWARE_QueryCommandBuffer( + Kernel->hardware, + gcvENGINE_RENDER, + &command->alignment, + &command->reservedHead, + gcvNULL + )); + + /* Create the command queue mutex. */ + gcmkONERROR(gckOS_CreateMutex(os, &command->mutexQueue)); + + /* Create the context switching mutex. */ + gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContext)); + + /* Create the context switching mutex. */ + gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContextSeq)); + + /* Create the power management semaphore. */ + gcmkONERROR(gckOS_CreateSemaphore(os, &command->powerSemaphore)); + + /* Create the commit atom. */ + gcmkONERROR(gckOS_AtomConstruct(os, &command->atomCommit)); + + /* Get the page size from teh OS. */ + gcmkONERROR(gckOS_GetPageSize(os, &pageSize)); + + gcmkSAFECASTSIZET(command->pageSize, pageSize); + + /* Get process ID. */ + gcmkONERROR(gckOS_GetProcessID(&command->kernelProcessID)); + + /* Set hardware to pipe 0. */ + command->pipeSelect = gcvPIPE_INVALID; + + /* Pre-allocate the command queues. */ + for (i = 0; i < gcdCOMMAND_QUEUES; ++i) + { +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Kernel->virtualCommandBuffer) + { + gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer( + Kernel, + gcvFALSE, + &pageSize, + &command->queues[i].physical, + &command->queues[i].logical + )); + + gcmkONERROR(gckKERNEL_GetGPUAddress( + Kernel, + command->queues[i].logical, + gcvFALSE, + command->queues[i].physical, + &command->queues[i].address + )); + } + else +#endif + { + gctUINT32 allocFlag; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag = gcvALLOC_FLAG_CACHEABLE | gcvALLOC_FLAG_CONTIGUOUS; +#else + allocFlag = gcvALLOC_FLAG_CONTIGUOUS; +#endif + + gcmkONERROR(gckOS_AllocateNonPagedMemory( + os, + gcvFALSE, + allocFlag, + &pageSize, + &command->queues[i].physical, + &command->queues[i].logical + )); + + gcmkONERROR(gckHARDWARE_ConvertLogical( + Kernel->hardware, + command->queues[i].logical, + gcvFALSE, + &command->queues[i].address + )); + + gcmkONERROR(gckMMU_FillFlatMapping( + Kernel->mmu, command->queues[i].address, pageSize + )); + } + + gcmkONERROR(gckOS_CreateSignal( + os, gcvFALSE, &command->queues[i].signal + )); + + gcmkONERROR(gckOS_Signal( + os, command->queues[i].signal, gcvTRUE + )); + } + +#if gcdRECORD_COMMAND + gcmkONERROR(gckRECORDER_Construct(os, Kernel->hardware, &command->recorder)); +#endif + + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsPATCH_LIST), &command->kList)); + + gcmkONERROR(gckFENCE_Create( + os, Kernel, &command->fence + )); + + /* No command queue in use yet. */ + command->index = -1; + command->logical = gcvNULL; + command->newQueue = gcvFALSE; + + /* Command is not yet running. */ + command->running = gcvFALSE; + + /* Command queue is idle. */ + command->idle = gcvTRUE; + + /* Commit stamp start from 1. */ + command->commitStamp = 1; + + /* END event signal not created. */ + command->endEventSignal = gcvNULL; + + command->dummyDraw = gcvTRUE; + + /* Return pointer to the gckCOMMAND object. */ + *Command = command; + + /* Success. */ + gcmkFOOTER_ARG("*Command=0x%x", *Command); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (command != gcvNULL) + { + gcmkVERIFY_OK(gckCOMMAND_Destroy(command)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_Destroy +** +** Destroy an gckCOMMAND object. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object to destroy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_Destroy( + IN gckCOMMAND Command + ) +{ + gctINT i; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + /* Stop the command queue. */ + gcmkVERIFY_OK(gckCOMMAND_Stop(Command)); + + for (i = 0; i < gcdCOMMAND_QUEUES; ++i) + { + if (Command->queues[i].signal) + { + gcmkVERIFY_OK(gckOS_DestroySignal( + Command->os, Command->queues[i].signal + )); + } + + if (Command->queues[i].logical) + { +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Command->kernel->virtualCommandBuffer) + { + gcmkVERIFY_OK(gckKERNEL_DestroyVirtualCommandBuffer( + Command->kernel, + Command->pageSize, + Command->queues[i].physical, + Command->queues[i].logical + )); + } + else +#endif + { + gcmkVERIFY_OK(gckOS_FreeNonPagedMemory( + Command->os, + Command->pageSize, + Command->queues[i].physical, + Command->queues[i].logical + )); + } + } + } + + /* END event signal. */ + if (Command->endEventSignal != gcvNULL) + { + gcmkVERIFY_OK(gckOS_DestroySignal( + Command->os, Command->endEventSignal + )); + } + + if (Command->mutexContext) + { + /* Delete the context switching mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext)); + } + + if (Command->mutexContextSeq != gcvNULL) + gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContextSeq)); + + if (Command->mutexQueue) + { + /* Delete the command queue mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue)); + } + + if (Command->powerSemaphore) + { + /* Destroy the power management semaphore. */ + gcmkVERIFY_OK(gckOS_DestroySemaphore(Command->os, Command->powerSemaphore)); + } + + if (Command->atomCommit) + { + /* Destroy the commit atom. */ + gcmkVERIFY_OK(gckOS_AtomDestroy(Command->os, Command->atomCommit)); + } + +#if gcdSECURE_USER + /* Free state array. */ + if (Command->hintArrayAllocated) + { + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray))); + Command->hintArrayAllocated = gcvFALSE; + } +#endif + +#if gcdRECORD_COMMAND + gckRECORDER_Destory(Command->os, Command->recorder); +#endif + + if (Command->stateMap) + { + gcmkOS_SAFE_FREE(Command->os, Command->stateMap); + } + + if (Command->kList) + { + gcmkOS_SAFE_FREE(Command->os, Command->kList); + } + + if (Command->fence) + { + gcmkVERIFY_OK(gckFENCE_Destory(Command->os, Command->fence)); + } + + /* Mark object as unknown. */ + Command->object.type = gcvOBJ_UNKNOWN; + + /* Free the gckCOMMAND object. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, Command)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckCOMMAND_EnterCommit +** +** Acquire command queue synchronization objects. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object to destroy. +** +** gctBOOL FromPower +** Determines whether the call originates from inside the power +** management or not. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_EnterCommit( + IN gckCOMMAND Command, + IN gctBOOL FromPower + ) +{ + gceSTATUS status; + gckHARDWARE hardware; + gctBOOL atomIncremented = gcvFALSE; + gctBOOL semaAcquired = gcvFALSE; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Extract the gckHARDWARE and gckEVENT objects. */ + hardware = Command->kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + if (!FromPower) + { + /* Increment COMMIT atom to let power management know that a commit is + ** in progress. */ + gcmkONERROR(_IncrementCommitAtom(Command, gcvTRUE)); + atomIncremented = gcvTRUE; + + /* Notify the system the GPU has a commit. */ + gcmkONERROR(gckOS_Broadcast(Command->os, + hardware, + gcvBROADCAST_GPU_COMMIT)); + + /* Acquire the power management semaphore. */ + gcmkONERROR(gckOS_AcquireSemaphore(Command->os, + Command->powerSemaphore)); + semaAcquired = gcvTRUE; + } + + /* Grab the conmmand queue mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Command->os, + Command->mutexQueue, + gcvINFINITE)); + + /* Success. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + if (semaAcquired) + { + /* Release the power management semaphore. */ + gcmkVERIFY_OK(gckOS_ReleaseSemaphore( + Command->os, Command->powerSemaphore + )); + } + + if (atomIncremented) + { + /* Decrement the commit atom. */ + gcmkVERIFY_OK(_IncrementCommitAtom( + Command, gcvFALSE + )); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_ExitCommit +** +** Release command queue synchronization objects. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object to destroy. +** +** gctBOOL FromPower +** Determines whether the call originates from inside the power +** management or not. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_ExitCommit( + IN gckCOMMAND Command, + IN gctBOOL FromPower + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Release the power mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue)); + + if (!FromPower) + { + /* Release the power management semaphore. */ + gcmkONERROR(gckOS_ReleaseSemaphore(Command->os, + Command->powerSemaphore)); + + /* Decrement the commit atom. */ + gcmkONERROR(_IncrementCommitAtom(Command, gcvFALSE)); + } + + /* Success. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_Start +** +** Start up the command queue. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object to start. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_Start( + IN gckCOMMAND Command + ) +{ + gceSTATUS status; + gckHARDWARE hardware; + gctUINT32 waitOffset = 0; + gctUINT32 waitLinkBytes; + gctPOINTER logical; + gctUINT32 physical; + gctUINT32 address; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + if (Command->running) + { + /* Command queue already running. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + /* Extract the gckHARDWARE object. */ + hardware = Command->kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + /* Query the size of WAIT/LINK command sequence. */ + gcmkONERROR(gckHARDWARE_WaitLink( + hardware, + gcvNULL, + ~0U, + Command->offset, + &waitLinkBytes, + gcvNULL, + gcvNULL + )); + + if ((Command->pageSize - Command->offset < waitLinkBytes) + || (Command->logical == gcvNULL) + ) + { + /* Start at beginning of a new queue. */ + gcmkONERROR(_NewQueue(Command, gcvTRUE)); + } + + logical = (gctUINT8_PTR) Command->logical + Command->offset; + physical = Command->physical + Command->offset; + address = Command->address + Command->offset; + + /* Append WAIT/LINK. */ + gcmkONERROR(gckHARDWARE_WaitLink( + hardware, + logical, + address, + 0, + &waitLinkBytes, + &waitOffset, + &Command->waitSize + )); + + Command->waitLogical = (gctUINT8_PTR) logical + waitOffset; + Command->waitPhysical = physical + waitOffset; + Command->waitAddress = address + waitOffset; + Command->waitOffset = waitOffset; + + /* Flush the cache for the wait/link. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + 0, + Command->physHandle, + Command->offset, + logical, + waitLinkBytes + )); + + /* Adjust offset. */ + Command->offset += waitLinkBytes; + Command->newQueue = gcvFALSE; + +#if gcdSECURITY + /* Start FE by calling security service. */ + gckKERNEL_SecurityStartCommand( + Command->kernel + ); +#else + /* Enable command processor. */ + gcmkONERROR(gckHARDWARE_Execute( + hardware, + address, + waitLinkBytes + )); +#endif + + /* Command queue is running. */ + Command->running = gcvTRUE; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_Stop +** +** Stop the command queue. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object to stop. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_Stop( + IN gckCOMMAND Command + ) +{ + gckHARDWARE hardware; + gceSTATUS status; + gctUINT32 idle; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + if (!Command->running) + { + /* Command queue is not running. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + /* Extract the gckHARDWARE object. */ + hardware = Command->kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + if (gckHARDWARE_IsFeatureAvailable(hardware, + gcvFEATURE_END_EVENT) == gcvSTATUS_TRUE) + { + /* Allocate the signal. */ + if (Command->endEventSignal == gcvNULL) + { + gcmkONERROR(gckOS_CreateSignal(Command->os, + gcvTRUE, + &Command->endEventSignal)); + } + + /* Append the END EVENT command to trigger the signal. */ + gcmkONERROR(gckEVENT_Stop(Command->kernel->eventObj, + Command->kernelProcessID, + Command->physHandle, + Command->offset + Command->waitOffset, + Command->waitLogical, + Command->waitAddress, + Command->endEventSignal, + &Command->waitSize)); + } + else + { + /* Replace last WAIT with END. */ + gcmkONERROR(gckHARDWARE_End( + hardware, + Command->waitLogical, + Command->waitAddress, + &Command->waitSize + )); + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (hardware->kernel->virtualCommandBuffer) + { + gcmkONERROR(gckKERNEL_GetGPUAddress( + hardware->kernel, + Command->waitLogical, + gcvFALSE, + Command->virtualMemory, + &hardware->lastEnd + )); + } +#endif + +#if gcdSECURITY + gcmkONERROR(gckKERNEL_SecurityExecute( + Command->kernel, Command->waitLogical, 8 + )); +#endif + + /* Update queue tail pointer. */ + gcmkONERROR(gckHARDWARE_UpdateQueueTail(Command->kernel->hardware, + Command->logical, + Command->offset)); + + /* Flush the cache for the END. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + 0, + Command->physHandle, + Command->offset + Command->waitOffset, + Command->waitLogical, + Command->waitSize + )); + + /* Wait for idle. */ + gcmkONERROR(gckHARDWARE_GetIdle(hardware, gcvTRUE, &idle)); + } + + /* Command queue is no longer running. */ + Command->running = gcvFALSE; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_Commit +** +** Commit a command buffer to the command queue. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to a gckCOMMAND object. +** +** gckCONTEXT Context +** Pointer to a gckCONTEXT object. +** +** gcoCMDBUF CommandBuffer +** Pointer to a gcoCMDBUF object. +** +** gcsSTATE_DELTA_PTR StateDelta +** Pointer to the state delta. +** +** gctUINT32 ProcessID +** Current process ID. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_Commit( + IN gckCOMMAND Command, + IN gckCONTEXT Context, + IN gcoCMDBUF CommandBuffer, + IN gcsSTATE_DELTA_PTR StateDelta, + IN gctUINT32 ProcessID, + IN gctBOOL Shared, + IN gctUINT32 Index, + OUT gctUINT64_PTR CommitStamp, + OUT gctBOOL_PTR ContextSwitched + ) +{ + gceSTATUS status; + gctBOOL commitEntered = gcvFALSE; + gctBOOL contextAcquired = gcvFALSE; + gckHARDWARE hardware; + gctBOOL needCopy = gcvFALSE; + gctBOOL commandBufferMapped = gcvFALSE; + gcoCMDBUF commandBufferObject = gcvNULL; + gctBOOL stall = gcvFALSE; + gctBOOL contextSwitched = gcvFALSE; + +#if !gcdNULL_DRIVER + gcsCONTEXT_PTR contextBuffer; + struct _gcoCMDBUF _commandBufferObject; + gctPHYS_ADDR_T commandBufferPhysical; + gctUINT8_PTR commandBufferLogical = gcvNULL; + gctUINT32 commandBufferAddress = 0; + gctUINT8_PTR commandBufferTail = gcvNULL; + gctUINT commandBufferSize; + gctSIZE_T nopBytes; + gctUINT32 pipeBytes; + gctUINT32 linkBytes; + gctSIZE_T bytes; + gctUINT32 offset; + gctPOINTER entryLogical; + gctUINT32 entryAddress; + gctUINT32 entryBytes; + gctUINT32 exitOffset; + gctPOINTER exitLogical; + gctUINT32 exitAddress; + gctUINT32 exitBytes; + gctUINT32 waitLinkPhysical; + gctPOINTER waitLinkLogical; + gctUINT32 waitLinkAddress; + gctUINT32 waitLinkBytes; + gctUINT32 waitOffset; + gctUINT32 waitSize; + +#ifdef __QNXNTO__ + gctPOINTER userCommandBufferLogical = gcvNULL; + gctBOOL userCommandBufferLogicalMapped = gcvFALSE; +#endif + +#if gcdPROCESS_ADDRESS_SPACE + gckMMU mmu; + gctUINT32 oldValue; +#endif + +#if gcdDUMP_COMMAND + gctPOINTER contextDumpLogical = gcvNULL; + gctSIZE_T contextDumpBytes = 0; + gctPOINTER bufferDumpLogical = gcvNULL; + gctSIZE_T bufferDumpBytes = 0; +# endif + gctUINT32 exitLinkLow = 0, exitLinkHigh = 0; + gctUINT32 entryLinkLow = 0, entryLinkHigh = 0; + gctUINT32 commandLinkLow = 0, commandLinkHigh = 0; + + gckVIRTUAL_COMMAND_BUFFER_PTR virtualCommandBuffer = gcvNULL; + gctUINT64 asyncCommandStamp = 0; + gcoCMDBUF lastCommandBuffer = gcvNULL; + gctPOINTER pointer = gcvNULL; + gckKERNEL kernel = Command->kernel; + + gctPHYS_ADDR contextPhysHandle = gcvNULL; + gctPHYS_ADDR physHandle = gcvNULL; +#endif + + gcmkHEADER_ARG( + "Command=0x%x CommandBuffer=0x%x ProcessID=%d", + Command, CommandBuffer, ProcessID + ); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + +#if !gcdNULL_DRIVER + gcmkONERROR(_ProcessUserCommandBufferList( + Command, + CommandBuffer, + &lastCommandBuffer + )); +#endif + +#if gcdPROCESS_ADDRESS_SPACE + gcmkONERROR(gckKERNEL_GetProcessMMU(Command->kernel, &mmu)); + + gcmkONERROR(gckOS_AtomicExchange(Command->os, + mmu->pageTableDirty[Command->kernel->core], + 0, + &oldValue)); +#else +#endif + + /* Acquire the command queue. */ + gcmkONERROR(gckCOMMAND_EnterCommit(Command, gcvFALSE)); + commitEntered = gcvTRUE; + + /* Acquire the context switching mutex. */ + gcmkONERROR(gckOS_AcquireMutex( + Command->os, Command->mutexContext, gcvINFINITE + )); + contextAcquired = gcvTRUE; + + /* Extract the gckHARDWARE and gckEVENT objects. */ + hardware = Command->kernel->hardware; + + /* Check wehther we need to copy the structures or not. */ + gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy)); + +#if gcdNULL_DRIVER + /* Context switch required? */ + if ((Context != gcvNULL) && (Command->currContext != Context)) + { + /* Yes, merge in the deltas. */ + gckCONTEXT_Update(Context, ProcessID, StateDelta); + + /* Update the current context. */ + Command->currContext = Context; + + contextSwitched = gcvTRUE; + } +#else + if (needCopy) + { + commandBufferObject = &_commandBufferObject; + + gcmkONERROR(gckOS_CopyFromUserData( + Command->os, + commandBufferObject, + CommandBuffer, + gcmSIZEOF(struct _gcoCMDBUF) + )); + + gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER); + } + else + { + gcmkONERROR(gckOS_MapUserPointer( + Command->os, + CommandBuffer, + gcmSIZEOF(struct _gcoCMDBUF), + &pointer + )); + + commandBufferObject = pointer; + + gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER); + commandBufferMapped = gcvTRUE; + } + + gcmkONERROR(_HandlePatchList(Command, commandBufferObject, needCopy, &asyncCommandStamp)); + + /* Query the size of NOP command. */ + gcmkONERROR(gckHARDWARE_Nop( + hardware, gcvNULL, &nopBytes + )); + + /* Query the size of pipe select command sequence. */ + gcmkONERROR(gckHARDWARE_PipeSelect( + hardware, gcvNULL, gcvPIPE_3D, &pipeBytes + )); + + /* Query the size of LINK command. */ + gcmkONERROR(gckHARDWARE_Link( + hardware, gcvNULL, 0, 0, &linkBytes, gcvNULL, gcvNULL + )); + + /* Compute the command buffer entry and the size. */ + commandBufferLogical + = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical) + + commandBufferObject->startOffset; + + + /* Get the hardware address. */ + if (Command->kernel->virtualCommandBuffer) + { + virtualCommandBuffer = gcmNAME_TO_PTR(commandBufferObject->physical); + physHandle = virtualCommandBuffer->virtualBuffer.physical; + + if (virtualCommandBuffer == gcvNULL) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gcmkONERROR(gckKERNEL_GetGPUAddress( + Command->kernel, + commandBufferLogical, + gcvTRUE, + virtualCommandBuffer, + &commandBufferAddress + )); + } + else + { + physHandle = gcmNAME_TO_PTR(commandBufferObject->physical); + + gcmkONERROR(gckHARDWARE_ConvertLogical( + hardware, + commandBufferLogical, + gcvTRUE, + &commandBufferAddress + )); + } + +#ifdef __QNXNTO__ + userCommandBufferLogical = (gctPOINTER) commandBufferLogical; + + gcmkONERROR(gckOS_MapUserPointer( + Command->os, + userCommandBufferLogical, + 0, + &pointer)); + + commandBufferLogical = pointer; + + userCommandBufferLogicalMapped = gcvTRUE; + + gcmkONERROR(gckOS_GetPhysicalAddress( + Command->os, + commandBufferLogical, + &commandBufferPhysical + )); + +#else + /* Get the physical address. */ + gcmkONERROR(gckOS_UserLogicalToPhysical( + Command->os, + commandBufferLogical, + &commandBufferPhysical + )); +#endif + + commandBufferSize + = commandBufferObject->offset + + commandBufferObject->reservedTail + - commandBufferObject->startOffset; + + gcmkONERROR(_FlushMMU(Command)); + + if (Command->dummyDraw == gcvTRUE && + Context != gcvNULL) + { + Command->dummyDraw = gcvFALSE; + gcmkONERROR(_DummyDraw(Command)); + } + + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_FENCE_64BIT) && asyncCommandStamp != 0) + { + gcmkONERROR(_WaitForAsyncCommandStamp(Command, asyncCommandStamp)); + } + + /* Get the current offset. */ + offset = Command->offset; + + /* Compute number of bytes left in current kernel command queue. */ + bytes = Command->pageSize - offset; + + /* Query the size of WAIT/LINK command sequence. */ + gcmkONERROR(gckHARDWARE_WaitLink( + hardware, + gcvNULL, + ~0U, + offset, + &waitLinkBytes, + gcvNULL, + gcvNULL + )); + + /* Is there enough space in the current command queue? */ + if (bytes < waitLinkBytes) + { + /* No, create a new one. */ + gcmkONERROR(_NewQueue(Command, gcvFALSE)); + + /* Get the new current offset. */ + offset = Command->offset; + + /* Recompute the number of bytes in the new kernel command queue. */ + bytes = Command->pageSize - offset; + gcmkASSERT(bytes >= waitLinkBytes); + } + + /* Compute the location if WAIT/LINK command sequence. */ + waitLinkPhysical = Command->physical + offset; + waitLinkLogical = (gctUINT8_PTR) Command->logical + offset; + waitLinkAddress = Command->address + offset; + + /* Context switch required? */ + if (Context == gcvNULL) + { + /* See if we have to switch pipes for the command buffer. */ + if (commandBufferObject->entryPipe == Command->pipeSelect) + { + /* Skip pipe switching sequence. */ + offset = pipeBytes; + } + else + { + /* The current hardware and the entry command buffer pipes + ** are different, switch to the correct pipe. */ + gcmkONERROR(gckHARDWARE_PipeSelect( + Command->kernel->hardware, + commandBufferLogical, + commandBufferObject->entryPipe, + &pipeBytes + )); + + /* Do not skip pipe switching sequence. */ + offset = 0; + } + + /* Compute the entry. */ + entryLogical = commandBufferLogical + offset; + entryAddress = commandBufferAddress + offset; + entryBytes = commandBufferSize - offset; + + Command->currContext = gcvNULL; + } +#if gcdDEBUG_OPTION && gcdDEBUG_FORCE_CONTEXT_UPDATE + else if (1) +#else + else if (Command->currContext != Context) +#endif + { + /* Get the current context buffer. */ + contextBuffer = Context->buffer; + + /* Yes, merge in the deltas. */ + gcmkONERROR(gckCONTEXT_Update(Context, ProcessID, StateDelta)); + + contextSwitched = gcvTRUE; + + /*************************************************************** + ** SWITCHING CONTEXT. + */ + + /* Determine context buffer entry offset. */ + offset = (Command->pipeSelect == gcvPIPE_3D) + + /* Skip pipe switching sequence. */ + ? Context->entryOffset3D + Context->pipeSelectBytes + + /* Do not skip pipe switching sequence. */ + : Context->entryOffset3D; + + /* Compute the entry. */ + entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset; + entryAddress = contextBuffer->address + offset; + entryBytes = Context->bufferSize - offset; + + /* See if we have to switch pipes between the context + and command buffers. */ + if (commandBufferObject->entryPipe == gcvPIPE_3D) + { + /* Skip pipe switching sequence. */ + offset = pipeBytes; + } + else + { + /* The current hardware and the initial context pipes are + different, switch to the correct pipe. */ + gcmkONERROR(gckHARDWARE_PipeSelect( + Command->kernel->hardware, + commandBufferLogical, + commandBufferObject->entryPipe, + &pipeBytes + )); + + /* Do not skip pipe switching sequence. */ + offset = 0; + } + + /* Generate a LINK from the context buffer to + the command buffer. */ + gcmkONERROR(gckHARDWARE_Link( + hardware, + contextBuffer->link3D, + commandBufferAddress + offset, + commandBufferSize - offset, + &linkBytes, + &commandLinkLow, + &commandLinkHigh + )); + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Command->kernel->virtualCommandBuffer) + { + gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR) contextBuffer->physical; + + contextPhysHandle = commandBuffer->virtualBuffer.physical; + } + else +#endif + { + contextPhysHandle = contextBuffer->physical; + } + + /* Flush the context buffer cache. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + 0, + contextPhysHandle, + offset, + entryLogical, + entryBytes + )); + + /* Update the current context. */ + Command->currContext = Context; + +#if gcdDUMP_COMMAND + contextDumpLogical = entryLogical; + contextDumpBytes = entryBytes; +#endif + +#if gcdSECURITY + /* Commit context buffer to trust zone. */ + gckKERNEL_SecurityExecute( + Command->kernel, + entryLogical, + entryBytes - 8 + ); +#endif + +#if gcdRECORD_COMMAND + gckRECORDER_Record( + Command->recorder, + gcvNULL, + 0xFFFFFFFF, + entryLogical, + entryBytes + ); +#endif + } + + /* Same context. */ + else + { + /* See if we have to switch pipes for the command buffer. */ + if (commandBufferObject->entryPipe == Command->pipeSelect) + { + /* Skip pipe switching sequence. */ + offset = pipeBytes; + } + else + { + /* The current hardware and the entry command buffer pipes + ** are different, switch to the correct pipe. */ + gcmkONERROR(gckHARDWARE_PipeSelect( + Command->kernel->hardware, + commandBufferLogical, + commandBufferObject->entryPipe, + &pipeBytes + )); + + /* Do not skip pipe switching sequence. */ + offset = 0; + } + + /* Compute the entry. */ + entryLogical = commandBufferLogical + offset; + entryAddress = commandBufferAddress + offset; + entryBytes = commandBufferSize - offset; + } + +#if gcdDUMP_COMMAND + bufferDumpLogical = commandBufferLogical + offset; + bufferDumpBytes = commandBufferSize - offset; +#endif + +#if gcdSECURE_USER + /* Process user hints. */ + gcmkONERROR(_ProcessHints(Command, ProcessID, commandBufferObject)); +#endif + + /* Determine the location to jump to for the command buffer being + ** scheduled. */ + if (Command->newQueue) + { + /* New command queue, jump to the beginning of it. */ + exitOffset = 0; + exitLogical = Command->logical; + exitAddress = Command->address; + exitBytes = Command->offset + waitLinkBytes; + } + else + { + /* Still within the preexisting command queue, jump to the new + WAIT/LINK command sequence. */ + exitOffset = offset; + exitLogical = waitLinkLogical; + exitAddress = waitLinkAddress; + exitBytes = waitLinkBytes; + } + + /* Add a new WAIT/LINK command sequence. When the command buffer which is + currently being scheduled is fully executed by the GPU, the FE will + jump to this WAIT/LINK sequence. */ + gcmkONERROR(gckHARDWARE_WaitLink( + hardware, + waitLinkLogical, + waitLinkAddress, + offset, + &waitLinkBytes, + &waitOffset, + &waitSize + )); + + /* Flush the command queue cache. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + 0, + Command->physHandle, + exitOffset, + exitLogical, + exitBytes + )); + + /* Determine the location of the TAIL in the command buffer. */ + commandBufferTail + = commandBufferLogical + + commandBufferSize + - commandBufferObject->reservedTail; + + /* Generate command which writes out commit stamp. */ + if (gckHARDWARE_IsFeatureAvailable(hardware, gcvFEATURE_FENCE_64BIT)) + { + gctUINT32 bytes; + + gcmkONERROR(gckHARDWARE_Fence( + hardware, + gcvENGINE_RENDER, + commandBufferTail, + Command->fence->address, + Command->commitStamp, + &bytes + )); + + commandBufferTail += gcdRENDER_FENCE_LENGTH; + } + + /* Generate a LINK from the end of the command buffer being scheduled + back to the kernel command queue. */ +#if !gcdSECURITY + if (Shared == gcvFALSE) + { + gcmkONERROR(gckHARDWARE_Link( + hardware, + commandBufferTail, + exitAddress, + exitBytes, + &linkBytes, + &exitLinkLow, + &exitLinkHigh + )); + } + else + { + gctUINT8_PTR link = commandBufferTail + Index * 16; + gctSIZE_T bytes = 8; + + gcmkONERROR(gckHARDWARE_ChipEnable( + hardware, + link, + (gceCORE_3D_MASK)(1 << hardware->kernel->chipID), + &bytes + )); + + link += bytes; + + gcmkONERROR(gckHARDWARE_Link( + hardware, + link, + exitAddress, + exitBytes, + &linkBytes, + &exitLinkLow, + &exitLinkHigh + )); + + link += linkBytes; + } +#endif + + /* Flush the command buffer cache. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + ProcessID, + physHandle, + commandBufferObject->startOffset, + commandBufferLogical, + commandBufferSize + )); + +#if gcdRECORD_COMMAND + gckRECORDER_Record( + Command->recorder, + commandBufferLogical + offset, + commandBufferSize - offset, + gcvNULL, + 0xFFFFFFFF + ); + + gckRECORDER_AdvanceIndex(Command->recorder, Command->commitStamp); +#endif + +#if gcdSECURITY + /* Submit command buffer to trust zone. */ + gckKERNEL_SecurityExecute( + Command->kernel, + commandBufferLogical + offset, + commandBufferSize - offset - 8 + ); +#else + /* Generate a LINK from the previous WAIT/LINK command sequence to the + entry determined above (either the context or the command buffer). + This LINK replaces the WAIT instruction from the previous WAIT/LINK + pair, therefore we use WAIT metrics for generation of this LINK. + This action will execute the entire sequence. */ + gcmkONERROR(gckHARDWARE_Link( + hardware, + Command->waitLogical, + entryAddress, + entryBytes, + &Command->waitSize, + &entryLinkLow, + &entryLinkHigh + )); +#endif + +#if gcdLINK_QUEUE_SIZE + if (Command->kernel->stuckDump >= gcvSTUCK_DUMP_USER_COMMAND) + { + gcuQUEUEDATA data; + + gcmkVERIFY_OK(gckOS_GetProcessID(&data.linkData.pid)); + + data.linkData.start = entryAddress; + data.linkData.end = entryAddress + entryBytes; + data.linkData.linkLow = entryLinkLow; + data.linkData.linkHigh = entryLinkHigh; + + gckQUEUE_Enqueue(&hardware->linkQueue, &data); + + if (commandBufferAddress + offset != entryAddress) + { + data.linkData.start = commandBufferAddress + offset; + data.linkData.end = commandBufferAddress + commandBufferSize; + data.linkData.linkLow = commandLinkLow; + data.linkData.linkHigh = commandLinkHigh; + + gckQUEUE_Enqueue(&hardware->linkQueue, &data); + } + + if (Command->kernel->stuckDump >= gcvSTUCK_DUMP_ALL_COMMAND) + { + data.linkData.start = exitAddress; + data.linkData.end = exitAddress + exitBytes; + data.linkData.linkLow = exitLinkLow; + data.linkData.linkHigh = exitLinkHigh; + + /* Dump kernel command.*/ + gckQUEUE_Enqueue(&hardware->linkQueue, &data); + } + } +#endif + + /* Flush the cache for the link. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + 0, + Command->physHandle, + Command->offset + waitOffset, + Command->waitLogical, + Command->waitSize + )); + + gcmkDUMPCOMMAND( + Command->os, + Command->waitLogical, + Command->waitSize, + gcvDUMP_BUFFER_LINK, + gcvFALSE + ); + + gcmkDUMPCOMMAND( + Command->os, + contextDumpLogical, + contextDumpBytes, + gcvDUMP_BUFFER_CONTEXT, + gcvFALSE + ); + + gcmkDUMPCOMMAND( + Command->os, + bufferDumpLogical, + bufferDumpBytes, + gcvDUMP_BUFFER_USER, + gcvFALSE + ); + + gcmkDUMPCOMMAND( + Command->os, + waitLinkLogical, + waitLinkBytes, + gcvDUMP_BUFFER_WAITLINK, + gcvFALSE + ); + + /* Update the current pipe. */ + Command->pipeSelect = commandBufferObject->exitPipe; + + /* Update command queue offset. */ + Command->offset += waitLinkBytes; + Command->newQueue = gcvFALSE; + + /* Update address of last WAIT. */ + Command->waitPhysical = waitLinkPhysical + waitOffset; + Command->waitLogical = (gctUINT8_PTR)waitLinkLogical + waitOffset; + Command->waitAddress = waitLinkAddress + waitOffset; + Command->waitSize = waitSize; + + /* Update queue tail pointer. */ + gcmkONERROR(gckHARDWARE_UpdateQueueTail( + hardware, Command->logical, Command->offset + )); + +#if gcdDUMP_COMMAND + gcmkPRINT("@[kernel.commit]"); +#endif +#endif /* gcdNULL_DRIVER */ + + /* Release the context switching mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); + contextAcquired = gcvFALSE; + + *CommitStamp = Command->commitStamp; + *ContextSwitched = contextSwitched; + + Command->commitStamp++; + + stall = gcvFALSE; + +#if gcdLINK_QUEUE_SIZE + if (Command->kernel->stuckDump == gcvSTUCK_DUMP_STALL_COMMAND) + { + if ((Command->commitStamp % (gcdLINK_QUEUE_SIZE/2)) == 0) + { + /* If only context buffer and command buffer is recorded, + ** each commit costs 2 slot in queue, to make sure command + ** causing stuck is recorded, number of pending command buffer + ** is limited to (gckLINK_QUEUE_SIZE/2) + */ + stall = gcvTRUE; + } + } +#endif + + /* Release the command queue. */ + gcmkONERROR(gckCOMMAND_ExitCommit(Command, gcvFALSE)); + commitEntered = gcvFALSE; + + if (status == gcvSTATUS_INTERRUPTED) + { + gcmkTRACE( + gcvLEVEL_INFO, + "%s(%d): Intterupted in gckEVENT_Submit", + __FUNCTION__, __LINE__ + ); + status = gcvSTATUS_OK; + } + else + { + gcmkONERROR(status); + } + +#ifdef __QNXNTO__ + if (userCommandBufferLogicalMapped) + { + gcmkONERROR(gckOS_UnmapUserPointer( + Command->os, + userCommandBufferLogical, + 0, + commandBufferLogical)); + + userCommandBufferLogicalMapped = gcvFALSE; + } +#endif + + /* Unmap the command buffer pointer. */ + if (commandBufferMapped) + { + gcmkONERROR(gckOS_UnmapUserPointer( + Command->os, + CommandBuffer, + gcmSIZEOF(struct _gcoCMDBUF), + commandBufferObject + )); + + commandBufferMapped = gcvFALSE; + } + + /* Return status. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + if (contextAcquired) + { + /* Release the context switching mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); + } + + if (commitEntered) + { + /* Release the command queue mutex. */ + gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Command, gcvFALSE)); + } + +#ifdef __QNXNTO__ + if (userCommandBufferLogicalMapped) + { + gcmkVERIFY_OK(gckOS_UnmapUserPointer( + Command->os, + userCommandBufferLogical, + 0, + commandBufferLogical)); + } +#endif + + /* Unmap the command buffer pointer. */ + if (commandBufferMapped) + { + gcmkVERIFY_OK(gckOS_UnmapUserPointer( + Command->os, + CommandBuffer, + gcmSIZEOF(struct _gcoCMDBUF), + commandBufferObject + )); + } + + /* Return status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_Reserve +** +** Reserve space in the command queue. Also acquire the command queue mutex. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object. +** +** gctSIZE_T RequestedBytes +** Number of bytes previously reserved. +** +** OUTPUT: +** +** gctPOINTER * Buffer +** Pointer to a variable that will receive the address of the reserved +** space. +** +** gctSIZE_T * BufferSize +** Pointer to a variable that will receive the number of bytes +** available in the command queue. +*/ +gceSTATUS +gckCOMMAND_Reserve( + IN gckCOMMAND Command, + IN gctUINT32 RequestedBytes, + OUT gctPOINTER * Buffer, + OUT gctUINT32 * BufferSize + ) +{ + gceSTATUS status; + gctUINT32 bytes; + gctUINT32 requiredBytes; + gctUINT32 requestedAligned; + + gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + /* Compute aligned number of reuested bytes. */ + requestedAligned = gcmALIGN(RequestedBytes, Command->alignment); + + /* Another WAIT/LINK command sequence will have to be appended after + the requested area being reserved. Compute the number of bytes + required for WAIT/LINK at the location after the reserved area. */ + gcmkONERROR(gckHARDWARE_WaitLink( + Command->kernel->hardware, + gcvNULL, + ~0U, + Command->offset + requestedAligned, + &requiredBytes, + gcvNULL, + gcvNULL + )); + + /* Compute total number of bytes required. */ + requiredBytes += requestedAligned; + + /* Compute number of bytes available in command queue. */ + bytes = Command->pageSize - Command->offset; + + /* Is there enough space in the current command queue? */ + if (bytes < requiredBytes) + { + /* Create a new command queue. */ + gcmkONERROR(_NewQueue(Command, gcvFALSE)); + + /* Recompute the number of bytes in the new kernel command queue. */ + bytes = Command->pageSize - Command->offset; + + /* Still not enough space? */ + if (bytes < requiredBytes) + { + /* Rare case, not enough room in command queue. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + } + + /* Return pointer to empty slot command queue. */ + *Buffer = (gctUINT8 *) Command->logical + Command->offset; + + /* Return number of bytes left in command queue. */ + *BufferSize = bytes; + + /* Success. */ + gcmkFOOTER_ARG("*Buffer=0x%x *BufferSize=%lu", *Buffer, *BufferSize); + return gcvSTATUS_OK; + +OnError: + /* Return status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_Execute +** +** Execute a previously reserved command queue by appending a WAIT/LINK command +** sequence after it and modifying the last WAIT into a LINK command. The +** command FIFO mutex will be released whether this function succeeds or not. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object. +** +** gctSIZE_T RequestedBytes +** Number of bytes previously reserved. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_Execute( + IN gckCOMMAND Command, + IN gctUINT32 RequestedBytes + ) +{ + gceSTATUS status; + + gctUINT32 waitLinkPhysical; + gctUINT8_PTR waitLinkLogical; + gctUINT32 waitLinkAddress; + gctUINT32 waitLinkOffset; + gctUINT32 waitLinkBytes; + + gctUINT32 waitPhysical; + gctPOINTER waitLogical; + gctUINT32 waitAddress; + gctUINT32 waitOffset; + gctUINT32 waitBytes; + + gctUINT32 linkLow, linkHigh; + + gctUINT32 execOffset; + gctPOINTER execLogical; + gctUINT32 execAddress; + gctUINT32 execBytes; + + gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + /* Compute offset for WAIT/LINK. */ + waitLinkOffset = Command->offset + RequestedBytes; + + /* Compute number of bytes left in command queue. */ + waitLinkBytes = Command->pageSize - waitLinkOffset; + + /* Compute the location if WAIT/LINK command sequence. */ + waitLinkPhysical = Command->physical + waitLinkOffset; + waitLinkLogical = (gctUINT8_PTR) Command->logical + waitLinkOffset; + waitLinkAddress = Command->address + waitLinkOffset; + + /* Append WAIT/LINK in command queue. */ + gcmkONERROR(gckHARDWARE_WaitLink( + Command->kernel->hardware, + waitLinkLogical, + waitLinkAddress, + waitLinkOffset, + &waitLinkBytes, + &waitOffset, + &waitBytes + )); + + /* Compute the location if WAIT command. */ + waitPhysical = waitLinkPhysical + waitOffset; + waitLogical = waitLinkLogical + waitOffset; + waitAddress = waitLinkAddress + waitOffset; + + /* Determine the location to jump to for the command buffer being + ** scheduled. */ + if (Command->newQueue) + { + /* New command queue, jump to the beginning of it. */ + execOffset = 0; + execLogical = Command->logical; + execAddress = Command->address; + execBytes = waitLinkOffset + waitLinkBytes; + } + else + { + /* Still within the preexisting command queue, jump directly to the + reserved area. */ + execOffset = Command->offset; + execLogical = (gctUINT8 *) Command->logical + Command->offset; + execAddress = Command->address + Command->offset; + execBytes = RequestedBytes + waitLinkBytes; + } + + /* Flush the cache. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + 0, + Command->physHandle, + execOffset, + execLogical, + execBytes + )); + + /* Convert the last WAIT into a LINK. */ + gcmkONERROR(gckHARDWARE_Link( + Command->kernel->hardware, + Command->waitLogical, + execAddress, + execBytes, + &Command->waitSize, + &linkLow, + &linkHigh + )); + + /* Flush the cache. */ + gcmkONERROR(gckOS_CacheClean( + Command->os, + 0, + Command->physHandle, + waitLinkOffset + waitOffset, + Command->waitLogical, + Command->waitSize + )); + +#if gcdLINK_QUEUE_SIZE + if (Command->kernel->stuckDump >= gcvSTUCK_DUMP_ALL_COMMAND) + { + gcuQUEUEDATA data; + + gcmkVERIFY_OK(gckOS_GetProcessID(&data.linkData.pid)); + + data.linkData.start = execAddress; + data.linkData.end = execAddress + execBytes; + data.linkData.linkLow = linkLow; + data.linkData.linkHigh = linkHigh; + + gckQUEUE_Enqueue(&Command->kernel->hardware->linkQueue, &data); + } +#endif + + gcmkDUMPCOMMAND( + Command->os, + Command->waitLogical, + Command->waitSize, + gcvDUMP_BUFFER_LINK, + gcvFALSE + ); + + gcmkDUMPCOMMAND( + Command->os, + execLogical, + execBytes, + gcvDUMP_BUFFER_KERNEL, + gcvFALSE + ); + + /* Update the pointer to the last WAIT. */ + Command->waitPhysical = waitPhysical; + Command->waitLogical = waitLogical; + Command->waitAddress = waitAddress; + Command->waitSize = waitBytes; + + /* Update the command queue. */ + Command->offset += RequestedBytes + waitLinkBytes; + Command->newQueue = gcvFALSE; + + /* Update queue tail pointer. */ + gcmkONERROR(gckHARDWARE_UpdateQueueTail( + Command->kernel->hardware, Command->logical, Command->offset + )); + +#if gcdDUMP_COMMAND + gcmkPRINT("@[kernel.execute]"); +#endif + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_Stall +** +** The calling thread will be suspended until the command queue has been +** completed. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to an gckCOMMAND object. +** +** gctBOOL FromPower +** Determines whether the call originates from inside the power +** management or not. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_Stall( + IN gckCOMMAND Command, + IN gctBOOL FromPower + ) +{ +#if gcdNULL_DRIVER + /* Do nothing with infinite hardware. */ + return gcvSTATUS_OK; +#else + gckOS os; + gckHARDWARE hardware; + gckEVENT eventObject; + gceSTATUS status; + gctSIGNAL signal = gcvNULL; + gctUINT timer = 0; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + /* Extract the gckOS object pointer. */ + os = Command->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + /* Extract the gckHARDWARE object pointer. */ + hardware = Command->kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + /* Extract the gckEVENT object pointer. */ + eventObject = Command->kernel->eventObj; + gcmkVERIFY_OBJECT(eventObject, gcvOBJ_EVENT); + + /* Allocate the signal. */ + gcmkONERROR(gckOS_CreateSignal(os, gcvTRUE, &signal)); + + /* Append the EVENT command to trigger the signal. */ + gcmkONERROR(gckEVENT_Signal(eventObject, signal, gcvKERNEL_PIXEL)); + + /* Submit the event queue. */ + gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower)); + +#if gcdDUMP_COMMAND + gcmkPRINT("@[kernel.stall]"); +#endif + + if (status == gcvSTATUS_CHIP_NOT_READY) + { + /* Error. */ + goto OnError; + } + + do + { + /* Wait for the signal. */ + status = gckOS_WaitSignal(os, signal, gcvTRUE, gcdGPU_ADVANCETIMER); + + if (status == gcvSTATUS_TIMEOUT) + { +#if gcmIS_DEBUG(gcdDEBUG_CODE) + gctUINT32 idle; + + /* Read idle register. */ + gcmkVERIFY_OK(gckHARDWARE_GetIdle( + hardware, gcvFALSE, &idle + )); + + gcmkTRACE( + gcvLEVEL_ERROR, + "%s(%d): idle=%08x", + __FUNCTION__, __LINE__, idle + ); + + gcmkVERIFY_OK(gckOS_MemoryBarrier(os, gcvNULL)); +#endif + + /* Advance timer. */ + timer += gcdGPU_ADVANCETIMER; + } + else if (status == gcvSTATUS_INTERRUPTED) + { + gcmkONERROR(gcvSTATUS_INTERRUPTED); + } + + } + while (gcmIS_ERROR(status)); + + /* Bail out on timeout. */ + if (gcmIS_ERROR(status)) + { + /* Broadcast the stuck GPU. */ + gcmkONERROR(gckOS_Broadcast( + os, hardware, gcvBROADCAST_GPU_STUCK + )); + } + + /* Delete the signal. */ + gcmkVERIFY_OK(gckOS_DestroySignal(os, signal)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (signal != gcvNULL) + { + /* Free the signal. */ + gcmkVERIFY_OK(gckOS_DestroySignal(os, signal)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +#endif +} + +/******************************************************************************* +** +** gckCOMMAND_Attach +** +** Attach user process. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to a gckCOMMAND object. +** +** gctUINT32 ProcessID +** Current process ID. +** +** OUTPUT: +** +** gckCONTEXT * Context +** Pointer to a variable that will receive a pointer to a new +** gckCONTEXT object. +** +** gctSIZE_T * StateCount +** Pointer to a variable that will receive the number of states +** in the context buffer. +*/ +#if (gcdENABLE_3D) +gceSTATUS +gckCOMMAND_Attach( + IN gckCOMMAND Command, + OUT gckCONTEXT * Context, + OUT gctSIZE_T * MaxState, + OUT gctUINT32 * NumStates, + IN gctUINT32 ProcessID + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Command=0x%x", Command); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + /* Acquire the context switching mutex. */ + gcmkONERROR(gckOS_AcquireMutex( + Command->os, Command->mutexContext, gcvINFINITE + )); + acquired = gcvTRUE; + + /* Construct a gckCONTEXT object. */ + gcmkONERROR(gckCONTEXT_Construct( + Command->os, + Command->kernel->hardware, + ProcessID, + Context + )); + + /* Return the number of states in the context. */ + * MaxState = (* Context)->maxState; + * NumStates = (* Context)->numStates; + + /* Release the context switching mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); + acquired = gcvFALSE; + + /* Success. */ + gcmkFOOTER_ARG("*Context=0x%x", *Context); + return gcvSTATUS_OK; + +OnError: + /* Release mutex. */ + if (acquired) + { + /* Release the context switching mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); + acquired = gcvFALSE; + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} +#endif + +/******************************************************************************* +** +** gckCOMMAND_Detach +** +** Detach user process. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to a gckCOMMAND object. +** +** gckCONTEXT Context +** Pointer to a gckCONTEXT object to be destroyed. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_Detach( + IN gckCOMMAND Command, + IN gckCONTEXT Context + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Command=0x%x Context=0x%x", Command, Context); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); + + /* Acquire the context switching mutex. */ + gcmkONERROR(gckOS_AcquireMutex( + Command->os, Command->mutexContext, gcvINFINITE + )); + acquired = gcvTRUE; + + /* Construct a gckCONTEXT object. */ + gcmkONERROR(gckCONTEXT_Destroy(Context)); + + if (Command->currContext == Context) + { + /* Detach from gckCOMMAND object if the destoryed context is current context. */ + Command->currContext = gcvNULL; + } + + /* Release the context switching mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); + acquired = gcvFALSE; + + /* Return the status. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + /* Release mutex. */ + if (acquired) + { + /* Release the context switching mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); + acquired = gcvFALSE; + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckCOMMAND_DumpExecutingBuffer +** +** Dump the command buffer which GPU is executing. +** +** INPUT: +** +** gckCOMMAND Command +** Pointer to a gckCOMMAND object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckCOMMAND_DumpExecutingBuffer( + IN gckCOMMAND Command + ) +{ + gceSTATUS status; + gckVIRTUAL_COMMAND_BUFFER_PTR buffer = gcvNULL; + gctUINT32 gpuAddress; + gctSIZE_T pageCount; + gctPOINTER entry = gcvNULL; + gckOS os = Command->os; + gckKERNEL kernel = Command->kernel; + gctUINT32 i; + gctUINT32 dumpRear; + gckQUEUE queue = &kernel->hardware->linkQueue; + gctSIZE_T bytes; + gckLINKDATA linkData; + gcuQUEUEDATA * queueData; + gctUINT32 offset; + gctPOINTER entryDump; + gctUINT32 pid; + gctUINT8 processName[24] = {0}; + gctPHYS_ADDR_T cpuPhysical; + + gcmkPRINT("**************************\n"); + gcmkPRINT("**** COMMAND BUF DUMP ****\n"); + gcmkPRINT("**************************\n"); + + gcmkPRINT(" Submitted commit stamp = %lld", Command->commitStamp - 1); + gcmkPRINT(" Executed commit stamp = %lld", *(gctUINT64_PTR)Command->fence->logical); + + gcmkVERIFY_OK(gckOS_ReadRegisterEx(os, kernel->core, 0x664, &gpuAddress)); + gcmkVERIFY_OK(gckOS_ReadRegisterEx(os, kernel->core, 0x664, &gpuAddress)); + + gcmkPRINT("DMA Address 0x%08X, memory around:", gpuAddress); + + /* Search and dump memory around DMA address. */ + if (kernel->virtualCommandBuffer) + { + status = gckDEVICE_QueryGPUAddress(kernel->device, kernel, gpuAddress, &buffer); + } + else + { + status = gcvSTATUS_OK; + } + + if (gcmIS_SUCCESS(status)) + { + if (kernel->virtualCommandBuffer) + { + gcmkVERIFY_OK(gckOS_CreateKernelVirtualMapping( + os, buffer->virtualBuffer.physical, buffer->virtualBuffer.bytes, &entry, &pageCount)); + + offset = gpuAddress - buffer->virtualBuffer.gpuAddress; + + entryDump = entry; + + /* Dump one pages. */ + bytes = 4096; + + /* Align to page. */ + offset &= 0xfffff000; + + /* Kernel address of page where stall point stay. */ + entryDump = (gctUINT8_PTR)entryDump + offset; + + /* Align to page. */ + gpuAddress &= 0xfffff000; + } + else + { + gcmkVERIFY_OK(gckOS_GPUPhysicalToCPUPhysical(os, gpuAddress, &cpuPhysical)); + + gcmkVERIFY_OK(gckOS_MapPhysical(os, (gctUINT32) cpuPhysical, 4096, &entry)); + + /* Align to page start. */ + entryDump = (gctPOINTER)((gctUINTPTR_T)entry & ~0xFFF); + gpuAddress = gpuAddress & ~0xFFF; + bytes = 4096; + } + + gcmkPRINT("User Command Buffer:\n"); + _DumpBuffer(entryDump, gpuAddress, bytes); + + if (kernel->virtualCommandBuffer) + { + gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping( + os, buffer->virtualBuffer.physical, buffer->virtualBuffer.bytes, entry)); + } + else + { + gcmkVERIFY_OK(gckOS_UnmapPhysical(os, entry, 4096)); + } + } + else + { + _DumpKernelCommandBuffer(Command); + } + + /* Dump link queue. */ + if (queue->count) + { + gcmkPRINT("Dump Level is %d, dump %d valid record in link queue:", + Command->kernel->stuckDump, queue->count); + + dumpRear = queue->count; + + for (i = 0; i < dumpRear; i++) + { + gckQUEUE_GetData(queue, i, &queueData); + + linkData = &queueData->linkData; + + /* Get gpu address of this command buffer. */ + gpuAddress = linkData->start; + bytes = linkData->end - gpuAddress; + + pid = linkData->pid; + + gckOS_GetProcessNameByPid(pid, 16, processName); + + if (kernel->virtualCommandBuffer) + { + buffer = gcvNULL; + + /* Get the whole buffer. */ + status = gckDEVICE_QueryGPUAddress(kernel->device, kernel, gpuAddress, &buffer); + + if (gcmIS_ERROR(status)) + { + /* Get kernel address of kernel command buffer. */ + status = gckCOMMAND_AddressInKernelCommandBuffer( + kernel->command, gpuAddress, &entry); + + if (gcmIS_ERROR(status)) + { + status = gckHARDWARE_AddressInHardwareFuncions( + kernel->hardware, gpuAddress, &entry); + + if (gcmIS_ERROR(status)) + { + gcmkPRINT("Buffer [%08X - %08X] not found, may be freed", + linkData->start, + linkData->end); + continue; + } + } + + offset = 0; + gcmkPRINT("Kernel Command Buffer: %08X, %08X", linkData->linkLow, linkData->linkHigh); + } + else + { + /* Get kernel logical for dump. */ + if (buffer->virtualBuffer.kernelLogical) + { + /* Get kernel logical directly if it is a context buffer. */ + entry = buffer->virtualBuffer.kernelLogical; + gcmkPRINT("Context Buffer: %08X, %08X PID:%d %s", + linkData->linkLow, linkData->linkHigh, linkData->pid, processName); + } + else + { + /* Make it accessiable by kernel if it is a user command buffer. */ + gcmkVERIFY_OK( + gckOS_CreateKernelVirtualMapping(os, + buffer->virtualBuffer.physical, + buffer->virtualBuffer.bytes, + &entry, + &pageCount)); + gcmkPRINT("User Command Buffer: %08X, %08X PID:%d %s", + linkData->linkLow, linkData->linkHigh, linkData->pid, processName); + } + + offset = gpuAddress - buffer->virtualBuffer.gpuAddress; + } + + /* Dump from the entry. */ + _DumpBuffer((gctUINT8_PTR)entry + offset, gpuAddress, bytes); + + /* Release kernel logical address if neccessary. */ + if (buffer && !buffer->virtualBuffer.kernelLogical) + { + gcmkVERIFY_OK( + gckOS_DestroyKernelVirtualMapping(os, + buffer->virtualBuffer.physical, + buffer->virtualBuffer.bytes, + entry)); + } + } + else + { + gcmkVERIFY_OK(gckOS_GPUPhysicalToCPUPhysical(os, gpuAddress, &cpuPhysical)); + + gcmkVERIFY_OK(gckOS_MapPhysical(os, (gctUINT32) cpuPhysical, bytes, &entry)); + + gcmkPRINT("Command Buffer: %08X, %08X PID:%d %s", + linkData->linkLow, linkData->linkHigh, linkData->pid, processName); + + _DumpBuffer((gctUINT8_PTR)entry, gpuAddress, bytes); + + gcmkVERIFY_OK(gckOS_UnmapPhysical(os, entry, bytes)); + } + } + } + + return gcvSTATUS_OK; +} + +gceSTATUS +gckCOMMAND_AddressInKernelCommandBuffer( + IN gckCOMMAND Command, + IN gctUINT32 Address, + OUT gctPOINTER * Pointer + ) +{ + gctINT i; + + for (i = 0; i < gcdCOMMAND_QUEUES; i++) + { + if ((Address >= Command->queues[i].address) + && (Address < (Command->queues[i].address + Command->pageSize)) + ) + { + *Pointer = (gctUINT8_PTR)Command->queues[i].logical + + (Address - Command->queues[i].address) + ; + + return gcvSTATUS_OK; + } + } + + return gcvSTATUS_NOT_FOUND; +} + diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_db.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_db.c new file mode 100644 index 000000000000..3f48824fa5e7 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_db.c @@ -0,0 +1,1894 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" + +#define _GC_OBJ_ZONE gcvZONE_DATABASE + +/******************************************************************************* +***** Private fuctions ********************************************************/ + +#define _GetSlot(database, x) \ + (gctUINT32)(gcmPTR_TO_UINT64(x) % gcmCOUNTOF(database->list)) + +/******************************************************************************* +** gckKERNEL_FindDatabase +** +** Find a database identified by a process ID and move it to the head of the +** hash list. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** ProcessID that identifies the database. +** +** gctBOOL LastProcessID +** gcvTRUE if searching for the last known process ID. gcvFALSE if +** we need to search for the process ID specified by the ProcessID +** argument. +** +** OUTPUT: +** +** gcsDATABASE_PTR * Database +** Pointer to a variable receiving the database structure pointer on +** success. +*/ +gceSTATUS +gckKERNEL_FindDatabase( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctBOOL LastProcessID, + OUT gcsDATABASE_PTR * Database + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database, previous; + gctSIZE_T slot; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d LastProcessID=%d", + Kernel, ProcessID, LastProcessID); + + /* Compute the hash for the database. */ + slot = ProcessID % gcmCOUNTOF(Kernel->db->db); + + /* Acquire the database mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Check whether we are getting the last known database. */ + if (LastProcessID) + { + /* Use last database. */ + database = Kernel->db->lastDatabase; + + if (database == gcvNULL) + { + /* Database not found. */ + gcmkONERROR(gcvSTATUS_INVALID_DATA); + } + } + else + { + /* Walk the hash list. */ + for (previous = gcvNULL, database = Kernel->db->db[slot]; + database != gcvNULL; + database = database->next) + { + if (database->processID == ProcessID) + { + /* Found it! */ + break; + } + + previous = database; + } + + if (database == gcvNULL) + { + /* Database not found. */ + gcmkONERROR(gcvSTATUS_INVALID_DATA); + } + + if (previous != gcvNULL) + { + /* Move database to the head of the hash list. */ + previous->next = database->next; + database->next = Kernel->db->db[slot]; + Kernel->db->db[slot] = database; + } + } + + /* Release the database mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + + /* Return the database. */ + *Database = database; + + /* Success. */ + gcmkFOOTER_ARG("*Database=0x%x", *Database); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_DeinitDatabase +** +** De-init a database structure. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gcsDATABASE_PTR Database +** Pointer to the database structure to deinit. +** +** OUTPUT: +** +** Nothing. +*/ +static gceSTATUS +gckKERNEL_DeinitDatabase( + IN gckKERNEL Kernel, + IN gcsDATABASE_PTR Database + ) +{ + gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database); + + if (Database) + { + Database->deleted = gcvFALSE; + + /* Destory handle db. */ + if (Database->refs) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Database->refs)); + Database->refs = gcvNULL; + } + + if (Database->handleDatabase) + { + gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Database->handleDatabase)); + Database->handleDatabase = gcvNULL; + } + + if (Database->handleDatabaseMutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Database->handleDatabaseMutex)); + Database->handleDatabaseMutex = gcvNULL; + } + +#if gcdPROCESS_ADDRESS_SPACE + if (Database->mmu) + { + gcmkONERROR(gckEVENT_DestroyMmu(Kernel->eventObj, Database->mmu, gcvKERNEL_PIXEL)); + Database->mmu = gcvNULL; + } +#endif + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** gckKERNEL_NewRecord +** +** Create a new database record structure and insert it to the head of the +** database. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gcsDATABASE_PTR Database +** Pointer to a database structure. +** +** OUTPUT: +** +** gcsDATABASE_RECORD_PTR * Record +** Pointer to a variable receiving the database record structure +** pointer on success. +*/ +static gceSTATUS +gckKERNEL_NewRecord( + IN gckKERNEL Kernel, + IN gcsDATABASE_PTR Database, + IN gctUINT32 Slot, + OUT gcsDATABASE_RECORD_PTR * Record + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gcsDATABASE_RECORD_PTR record = gcvNULL; + + gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database); + + /* Acquire the database mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + acquired = gcvTRUE; + + if (Kernel->db->freeRecord != gcvNULL) + { + /* Allocate the record from the free list. */ + record = Kernel->db->freeRecord; + Kernel->db->freeRecord = record->next; + } + else + { + gctPOINTER pointer = gcvNULL; + + /* Allocate the record from the heap. */ + gcmkONERROR(gckOS_Allocate(Kernel->os, + gcmSIZEOF(gcsDATABASE_RECORD), + &pointer)); + + record = pointer; + } + + /* Insert the record in the database. */ + record->next = Database->list[Slot]; + Database->list[Slot] = record; + + /* Release the database mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + + /* Return the record. */ + *Record = record; + + /* Success. */ + gcmkFOOTER_ARG("*Record=0x%x", *Record); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + } + if (record != gcvNULL) + { + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_DeleteRecord +** +** Remove a database record from the database and delete its structure. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gcsDATABASE_PTR Database +** Pointer to a database structure. +** +** gceDATABASE_TYPE Type +** Type of the record to remove. +** +** gctPOINTER Data +** Data of the record to remove. +** +** OUTPUT: +** +** gctSIZE_T_PTR Bytes +** Pointer to a variable that receives the size of the record deleted. +** Can be gcvNULL if the size is not required. +*/ +static gceSTATUS +gckKERNEL_DeleteRecord( + IN gckKERNEL Kernel, + IN gcsDATABASE_PTR Database, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Data, + OUT gctSIZE_T_PTR Bytes OPTIONAL + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gcsDATABASE_RECORD_PTR record, previous; + gctUINT32 slot = _GetSlot(Database, Data); + + gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x", + Kernel, Database, Type, Data); + + /* Acquire the database mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Scan the database for this record. */ + for (record = Database->list[slot], previous = gcvNULL; + record != gcvNULL; + record = record->next + ) + { + if ((record->type == Type) + && (record->data == Data) + ) + { + /* Found it! */ + break; + } + + previous = record; + } + + if (record == gcvNULL) + { + /* Ouch! This record is not found? */ + gcmkONERROR(gcvSTATUS_INVALID_DATA); + } + + if (Bytes != gcvNULL) + { + /* Return size of record. */ + *Bytes = record->bytes; + } + + /* Remove record from database. */ + if (previous == gcvNULL) + { + Database->list[slot] = record->next; + } + else + { + previous->next = record->next; + } + + /* Insert record in free list. */ + record->next = Kernel->db->freeRecord; + Kernel->db->freeRecord = record; + + /* Release the database mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_FindRecord +** +** Find a database record from the database. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gcsDATABASE_PTR Database +** Pointer to a database structure. +** +** gceDATABASE_TYPE Type +** Type of the record to remove. +** +** gctPOINTER Data +** Data of the record to remove. +** +** OUTPUT: +** +** gctSIZE_T_PTR Bytes +** Pointer to a variable that receives the size of the record deleted. +** Can be gcvNULL if the size is not required. +*/ +static gceSTATUS +gckKERNEL_FindRecord( + IN gckKERNEL Kernel, + IN gcsDATABASE_PTR Database, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Data, + OUT gcsDATABASE_RECORD_PTR Record + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gcsDATABASE_RECORD_PTR record; + gctUINT32 slot = _GetSlot(Database, Data); + + gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x", + Kernel, Database, Type, Data); + + /* Acquire the database mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Scan the database for this record. */ + for (record = Database->list[slot]; + record != gcvNULL; + record = record->next + ) + { + if ((record->type == Type) + && (record->data == Data) + ) + { + /* Found it! */ + break; + } + } + + if (record == gcvNULL) + { + /* Ouch! This record is not found? */ + gcmkONERROR(gcvSTATUS_INVALID_DATA); + } + + if (Record != gcvNULL) + { + /* Return information of record. */ + gcmkONERROR( + gckOS_MemCopy(Record, record, sizeof(gcsDATABASE_RECORD))); + } + + /* Release the database mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + + /* Success. */ + gcmkFOOTER_ARG("Record=0x%x", Record); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#ifdef CONFIG_MCST +static void gckKERNEL_RemoveFromHash( + IN gckKERNEL Kernel, + IN gctSIZE_T slot, + IN gcsDATABASE_PTR database +) +{ + gcsDATABASE_PTR db = gcvNULL, prev = gcvNULL; + + /* Walk the hash list. */ + for (db = Kernel->db->db[slot]; + db != gcvNULL; + db = db->next) + { + if (db->processID == database->processID) + { + break; + } + prev = db; + } + + /* Remove the database from the hash list. */ + if (prev) + { + prev->next = database->next; + } + else + { + Kernel->db->db[slot] = database->next; + } +} +#endif + +/******************************************************************************* +***** Public API **************************************************************/ + +/******************************************************************************* +** gckKERNEL_CreateProcessDB +** +** Create a new process database. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID used to identify the database. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_CreateProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gcsDATABASE_PTR database = gcvNULL; + gctPOINTER pointer = gcvNULL; + gctBOOL acquired = gcvFALSE; + gctSIZE_T slot; + gctUINT32 i; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID); + + /* Compute the hash for the database. */ + slot = ProcessID % gcmCOUNTOF(Kernel->db->db); + + /* Acquire the database mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Walk the hash list. */ + for (database = Kernel->db->db[slot]; + database != gcvNULL; + database = database->next) + { + if (database->processID == ProcessID) + { + gctINT32 oldVal = 0; + + if (database->deleted) + { + gcmkFATAL("%s(%d): DB of Process=0x%x cannot be reentered since it was in deletion\n", + __FUNCTION__, __LINE__, ProcessID); + gcmkONERROR(gcvSTATUS_INVALID_REQUEST); + } + + gcmkVERIFY_OK(gckOS_AtomIncrement(Kernel->os, database->refs, &oldVal)); + goto OnExit; + } + } + + if (Kernel->db->freeDatabase) + { + /* Allocate a database from the free list. */ + database = Kernel->db->freeDatabase; + Kernel->db->freeDatabase = database->next; + } + else + { + /* Allocate a new database from the heap. */ + gcmkONERROR(gckOS_Allocate(Kernel->os, + gcmSIZEOF(gcsDATABASE), + &pointer)); + + gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsDATABASE)); + + database = pointer; + + gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->counterMutex)); + } + + /* Initialize the database. */ + /* Save the hash slot. */ + database->slot = slot; + database->processID = ProcessID; + database->vidMem.bytes = 0; + database->vidMem.maxBytes = 0; + database->vidMem.totalBytes = 0; + database->nonPaged.bytes = 0; + database->nonPaged.maxBytes = 0; + database->nonPaged.totalBytes = 0; + database->contiguous.bytes = 0; + database->contiguous.maxBytes = 0; + database->contiguous.totalBytes = 0; + database->mapMemory.bytes = 0; + database->mapMemory.maxBytes = 0; + database->mapMemory.totalBytes = 0; + database->mapUserMemory.bytes = 0; + database->mapUserMemory.maxBytes = 0; + database->mapUserMemory.totalBytes = 0; + + for (i = 0; i < gcmCOUNTOF(database->list); i++) + { + database->list[i] = gcvNULL; + } + + for (i = 0; i < gcvSURF_NUM_TYPES; i++) + { + database->vidMemType[i].bytes = 0; + database->vidMemType[i].maxBytes = 0; + database->vidMemType[i].totalBytes = 0; + } + + for (i = 0; i < gcvPOOL_NUMBER_OF_POOLS; i++) + { + database->vidMemPool[i].bytes = 0; + database->vidMemPool[i].maxBytes = 0; + database->vidMemPool[i].totalBytes = 0; + } + + gcmkASSERT(database->refs == gcvNULL); + gcmkONERROR(gckOS_AtomConstruct(Kernel->os, &database->refs)); + gcmkONERROR(gckOS_AtomSet(Kernel->os, database->refs, 1)); + + gcmkASSERT(database->handleDatabase == gcvNULL); + gcmkONERROR(gckKERNEL_CreateIntegerDatabase(Kernel, &database->handleDatabase)); + + gcmkASSERT(database->handleDatabaseMutex == gcvNULL); + gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->handleDatabaseMutex)); + +#if gcdPROCESS_ADDRESS_SPACE + gcmkASSERT(database->mmu == gcvNULL); + gcmkONERROR(gckMMU_Construct(Kernel, gcdMMU_SIZE, &database->mmu)); +#endif + +#if gcdSECURE_USER + { + gctINT idx; + gcskSECURE_CACHE * cache = &database->cache; + + /* Setup the linked list of cache nodes. */ + for (idx = 1; idx <= gcdSECURE_CACHE_SLOTS; ++idx) + { + cache->cache[idx].logical = gcvNULL; + +#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE + cache->cache[idx].prev = &cache->cache[idx - 1]; + cache->cache[idx].next = &cache->cache[idx + 1]; +# endif +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + cache->cache[idx].nextHash = gcvNULL; + cache->cache[idx].prevHash = gcvNULL; +# endif + } + +#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE + /* Setup the head and tail of the cache. */ + cache->cache[0].next = &cache->cache[1]; + cache->cache[0].prev = &cache->cache[gcdSECURE_CACHE_SLOTS]; + cache->cache[0].logical = gcvNULL; + + /* Fix up the head and tail pointers. */ + cache->cache[0].next->prev = &cache->cache[0]; + cache->cache[0].prev->next = &cache->cache[0]; +# endif + +#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH + /* Zero out the hash table. */ + for (idx = 0; idx < gcmCOUNTOF(cache->hash); ++idx) + { + cache->hash[idx].logical = gcvNULL; + cache->hash[idx].nextHash = gcvNULL; + } +# endif + + /* Initialize cache index. */ + cache->cacheIndex = gcvNULL; + cache->cacheFree = 1; + cache->cacheStamp = 0; + } +#endif + + /* Insert the database into the hash. */ + database->next = Kernel->db->db[slot]; + Kernel->db->db[slot] = database; + + /* Reset idle timer. */ + Kernel->db->lastIdle = 0; + +OnError: + if (gcmIS_ERROR(status)) + { + gcmkVERIFY_OK(gckKERNEL_DeinitDatabase(Kernel, database)); + +#ifdef CONFIG_MCST + gckKERNEL_RemoveFromHash(Kernel, slot, database); +#endif + + if (pointer) + { + gcmkOS_SAFE_FREE(Kernel->os, pointer); + } + } + +OnExit: + if (acquired) + { + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_AddProcessDB +** +** Add a record to a process database. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID used to identify the database. +** +** gceDATABASE_TYPE TYPE +** Type of the record to add. +** +** gctPOINTER Pointer +** Data of the record to add. +** +** gctPHYS_ADDR Physical +** Physical address of the record to add. +** +** gctSIZE_T Size +** Size of the record to add. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_AddProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Pointer, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Size + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + gcsDATABASE_RECORD_PTR record = gcvNULL; + gcsDATABASE_COUNTERS * count; + gctUINT32 vidMemType; + gcePOOL vidMemPool; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x " + "Physical=0x%x Size=%lu", + Kernel, ProcessID, Type, Pointer, Physical, Size); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Decode type. */ + vidMemType = (Type & gcdDB_VIDEO_MEMORY_TYPE_MASK) >> gcdDB_VIDEO_MEMORY_TYPE_SHIFT; + vidMemPool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT; + + Type &= gcdDATABASE_TYPE_MASK; + + /* Special case the idle record. */ + if (Type == gcvDB_IDLE) + { + gctUINT64 time; + + /* Get the current profile time. */ + gcmkONERROR(gckOS_GetProfileTick(&time)); + + if ((ProcessID == 0) && (Kernel->db->lastIdle != 0)) + { + /* Out of idle, adjust time it was idle. */ + Kernel->db->idleTime += time - Kernel->db->lastIdle; + Kernel->db->lastIdle = 0; + } + else if (ProcessID == 1) + { + /* Save current idle time. */ + Kernel->db->lastIdle = time; + } + +#if gcdDYNAMIC_SPEED + { + /* Test for first call. */ + if (Kernel->db->lastSlowdown == 0) + { + /* Save milliseconds. */ + Kernel->db->lastSlowdown = time; + Kernel->db->lastSlowdownIdle = Kernel->db->idleTime; + } + else + { + /* Compute ellapsed time in milliseconds. */ + gctUINT delta = gckOS_ProfileToMS(time - Kernel->db->lastSlowdown); + + /* Test for end of period. */ + if (delta >= gcdDYNAMIC_SPEED) + { + /* Compute number of idle milliseconds. */ + gctUINT idle = gckOS_ProfileToMS( + Kernel->db->idleTime - Kernel->db->lastSlowdownIdle); + + /* Broadcast to slow down the GPU. */ + gcmkONERROR(gckOS_BroadcastCalibrateSpeed(Kernel->os, + Kernel->hardware, + idle, + delta)); + + /* Save current time. */ + Kernel->db->lastSlowdown = time; + Kernel->db->lastSlowdownIdle = Kernel->db->idleTime; + } + } + } +#endif + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Pointer != gcvNULL); + + /* Find the database. */ + gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database)); + + /* Create a new record in the database. */ + gcmkONERROR(gckKERNEL_NewRecord(Kernel, database, _GetSlot(database, Pointer), &record)); + + /* Initialize the record. */ + record->kernel = Kernel; + record->type = Type; + record->data = Pointer; + record->physical = Physical; + record->bytes = Size; + + /* Get pointer to counters. */ + switch (Type) + { + case gcvDB_VIDEO_MEMORY: + count = &database->vidMem; + break; + + case gcvDB_NON_PAGED: + count = &database->nonPaged; + break; + + case gcvDB_CONTIGUOUS: + count = &database->contiguous; + break; + + case gcvDB_MAP_MEMORY: + count = &database->mapMemory; + break; + + case gcvDB_MAP_USER_MEMORY: + count = &database->mapUserMemory; + break; + + default: + count = gcvNULL; + break; + } + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE)); + + if (count != gcvNULL) + { + /* Adjust counters. */ + count->totalBytes += Size; + count->bytes += Size; + count->allocCount++; + + if (count->bytes > count->maxBytes) + { + count->maxBytes = count->bytes; + } + } + + if (Type == gcvDB_VIDEO_MEMORY) + { + count = &database->vidMemType[vidMemType]; + + /* Adjust counters. */ + count->totalBytes += Size; + count->bytes += Size; + count->allocCount++; + + if (count->bytes > count->maxBytes) + { + count->maxBytes = count->bytes; + } + + count = &database->vidMemPool[vidMemPool]; + + /* Adjust counters. */ + count->totalBytes += Size; + count->bytes += Size; + count->allocCount++; + + if (count->bytes > count->maxBytes) + { + count->maxBytes = count->bytes; + } + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_RemoveProcessDB +** +** Remove a record from a process database. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID used to identify the database. +** +** gceDATABASE_TYPE TYPE +** Type of the record to remove. +** +** gctPOINTER Pointer +** Data of the record to remove. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_RemoveProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Pointer + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + gctSIZE_T bytes = 0; + gctUINT32 vidMemType; + gcePOOL vidMemPool; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x", + Kernel, ProcessID, Type, Pointer); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Pointer != gcvNULL); + + /* Decode type. */ + vidMemType = (Type & gcdDB_VIDEO_MEMORY_TYPE_MASK) >> gcdDB_VIDEO_MEMORY_TYPE_SHIFT; + vidMemPool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT; + + Type &= gcdDATABASE_TYPE_MASK; + + /* Find the database. */ + gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database)); + + /* Delete the record. */ + gcmkONERROR( + gckKERNEL_DeleteRecord(Kernel, database, Type, Pointer, &bytes)); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE)); + + /* Update counters. */ + switch (Type) + { + case gcvDB_VIDEO_MEMORY: + database->vidMem.bytes -= bytes; + database->vidMem.freeCount++; + database->vidMemType[vidMemType].bytes -= bytes; + database->vidMemType[vidMemType].freeCount++; + database->vidMemPool[vidMemPool].bytes -= bytes; + database->vidMemPool[vidMemPool].freeCount++; + break; + + case gcvDB_NON_PAGED: + database->nonPaged.bytes -= bytes; + database->nonPaged.freeCount++; + break; + + case gcvDB_CONTIGUOUS: + database->contiguous.bytes -= bytes; + database->contiguous.freeCount++; + break; + + case gcvDB_MAP_MEMORY: + database->mapMemory.bytes -= bytes; + database->mapMemory.freeCount++; + break; + + case gcvDB_MAP_USER_MEMORY: + database->mapUserMemory.bytes -= bytes; + database->mapUserMemory.freeCount++; + break; + + default: + break; + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_FindProcessDB +** +** Find a record from a process database. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID used to identify the database. +** +** gceDATABASE_TYPE TYPE +** Type of the record to remove. +** +** gctPOINTER Pointer +** Data of the record to remove. +** +** OUTPUT: +** +** gcsDATABASE_RECORD_PTR Record +** Copy of record. +*/ +gceSTATUS +gckKERNEL_FindProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 ThreadID, + IN gceDATABASE_TYPE Type, + IN gctPOINTER Pointer, + OUT gcsDATABASE_RECORD_PTR Record + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x", + Kernel, ProcessID, ThreadID, Type, Pointer); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Pointer != gcvNULL); + + /* Find the database. */ + gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database)); + + /* Find the record. */ + gcmkONERROR( + gckKERNEL_FindRecord(Kernel, database, Type, Pointer, Record)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_DestroyProcessDB +** +** Destroy a process database. If the database contains any records, the data +** inside those records will be deleted as well. This aids in the cleanup if +** a process has died unexpectedly or has memory leaks. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID used to identify the database. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_DestroyProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gckKERNEL kernel = Kernel; + gcsDATABASE_PTR previous = gcvNULL; + gcsDATABASE_PTR database = gcvNULL; + gcsDATABASE_PTR db = gcvNULL; + gctBOOL acquired = gcvFALSE; + gctSIZE_T slot; + gctUINT32 i; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Compute the hash for the database. */ + slot = ProcessID % gcmCOUNTOF(Kernel->db->db); + + /* Acquire the database mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Walk the hash list. */ + for (database = Kernel->db->db[slot]; + database != gcvNULL; + database = database->next) + { + if (database->processID == ProcessID) + { + break; + } + } + + if (database) + { + gctINT32 oldVal = 0; + gcmkONERROR(gckOS_AtomDecrement(Kernel->os, database->refs, &oldVal)); + if (oldVal != 1) + { + goto OnExit; + } + + /* Mark it for delete so disallow reenter until really delete it */ + gcmkASSERT(!database->deleted); + database->deleted = gcvTRUE; + } + else + { + gcmkFATAL("%s(%d): DB destroy of Process=0x%x cannot match with creation\n", + __FUNCTION__, __LINE__, ProcessID); + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + /* Cannot remove the database from the hash list + ** since later records deinit need to access from the hash + */ + + gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + acquired = gcvFALSE; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE, + "DB(%d): VidMem: total=%lu max=%lu", + ProcessID, database->vidMem.totalBytes, + database->vidMem.maxBytes); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE, + "DB(%d): NonPaged: total=%lu max=%lu", + ProcessID, database->nonPaged.totalBytes, + database->nonPaged.maxBytes); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE, + "DB(%d): Contiguous: total=%lu max=%lu", + ProcessID, database->contiguous.totalBytes, + database->contiguous.maxBytes); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE, + "DB(%d): Idle time=%llu", + ProcessID, Kernel->db->idleTime); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE, + "DB(%d): Map: total=%lu max=%lu", + ProcessID, database->mapMemory.totalBytes, + database->mapMemory.maxBytes); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE, + "DB(%d): Map: total=%lu max=%lu", + ProcessID, database->mapUserMemory.totalBytes, + database->mapUserMemory.maxBytes); + + if (database->list != gcvNULL) + { + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "Process %d has entries in its database:", + ProcessID); + } + + for (i = 0; i < gcmCOUNTOF(database->list); i++) + { + gcsDATABASE_RECORD_PTR record, next; + + /* Walk all records. */ + for (record = database->list[i]; record != gcvNULL; record = next) + { + gctBOOL asynchronous = gcvTRUE; + gckVIDMEM_NODE nodeObject; + gctPHYS_ADDR physical; + gctUINT32 handle; + + /* Next next record. */ + next = record->next; + + /* Dispatch on record type. */ + switch (record->type) + { + case gcvDB_VIDEO_MEMORY: + gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(record->kernel, + ProcessID, + gcmPTR2INT32(record->data), + &nodeObject)); + + /* Free the video memory. */ + gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel, + ProcessID, + gcmPTR2INT32(record->data))); + + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel, + nodeObject)); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: VIDEO_MEMORY 0x%x (status=%d)", + record->data, status); + break; + + case gcvDB_NON_PAGED: + physical = gcmNAME_TO_PTR(record->physical); + /* Unmap user logical memory first. */ + status = gckOS_UnmapUserLogical(Kernel->os, + physical, + record->bytes, + record->data); + + /* Free the non paged memory. */ + status = gckEVENT_FreeNonPagedMemory(record->kernel->eventObj, + record->bytes, + physical, + record->data, + gcvKERNEL_PIXEL); + gcmRELEASE_NAME(record->physical); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: NON_PAGED 0x%x, bytes=%lu (status=%d)", + record->data, record->bytes, status); + break; + + case gcvDB_COMMAND_BUFFER: + /* Free the command buffer. */ + status = gckEVENT_DestroyVirtualCommandBuffer(record->kernel->eventObj, + record->bytes, + gcmNAME_TO_PTR(record->physical), + record->data, + gcvKERNEL_PIXEL); + gcmRELEASE_NAME(record->physical); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: COMMAND_BUFFER 0x%x, bytes=%lu (status=%d)", + record->data, record->bytes, status); + break; + + case gcvDB_CONTIGUOUS: + physical = gcmNAME_TO_PTR(record->physical); + /* Unmap user logical memory first. */ + status = gckOS_UnmapUserLogical(Kernel->os, + physical, + record->bytes, + record->data); + + /* Free the contiguous memory. */ + status = gckEVENT_FreeContiguousMemory(record->kernel->eventObj, + record->bytes, + physical, + record->data, + gcvKERNEL_PIXEL); + gcmRELEASE_NAME(record->physical); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: CONTIGUOUS 0x%x bytes=%lu (status=%d)", + record->data, record->bytes, status); + break; + + case gcvDB_SIGNAL: +#if USE_NEW_LINUX_SIGNAL + status = gcvSTATUS_NOT_SUPPORTED; +#else + /* Free the user signal. */ + status = gckOS_DestroyUserSignal(Kernel->os, + gcmPTR2INT32(record->data)); +#endif /* USE_NEW_LINUX_SIGNAL */ + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: SIGNAL %d (status=%d)", + (gctINT)(gctUINTPTR_T)record->data, status); + break; + + case gcvDB_VIDEO_MEMORY_LOCKED: + handle = gcmPTR2INT32(record->data); + + gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(record->kernel, + ProcessID, + handle, + &nodeObject)); + + /* Unlock what we still locked */ + status = gckVIDMEM_Unlock(record->kernel, + nodeObject, + nodeObject->type, + &asynchronous); + + { + gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel, + ProcessID, + handle)); + + if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous)) + { + status = gckEVENT_Unlock(record->kernel->eventObj, + gcvKERNEL_PIXEL, + nodeObject, + nodeObject->type); + } + else + { + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel, + nodeObject)); + } + } + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: VIDEO_MEMORY_LOCKED 0x%x (status=%d)", + record->data, status); + break; + + case gcvDB_CONTEXT: + status = gckCOMMAND_Detach(record->kernel->command, gcmNAME_TO_PTR(record->data)); + gcmRELEASE_NAME(record->data); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: CONTEXT 0x%x (status=%d)", + record->data, status); + break; + + case gcvDB_MAP_MEMORY: + /* Unmap memory. */ + status = gckKERNEL_UnmapMemory(record->kernel, + record->physical, + record->bytes, + record->data, + ProcessID); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: MAP MEMORY %d (status=%d)", + gcmPTR2INT32(record->data), status); + break; + + case gcvDB_MAP_USER_MEMORY: + status = gckOS_UnmapUserMemory(Kernel->os, + Kernel->core, + record->physical, + record->bytes, + gcmNAME_TO_PTR(record->data), + 0); + gcmRELEASE_NAME(record->data); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: MAP USER MEMORY %d (status=%d)", + gcmPTR2INT32(record->data), status); + break; + + case gcvDB_SHBUF: + /* Free shared buffer. */ + status = gckKERNEL_DestroyShBuffer(record->kernel, + (gctSHBUF) record->data); + + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE, + "DB: SHBUF %u (status=%d)", + (gctUINT32)(gctUINTPTR_T) record->data, status); + break; + + default: + gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DATABASE, + "DB: Correcupted record=0x%08x type=%d", + record, record->type); + break; + } + + /* Delete the record. */ + gcmkONERROR(gckKERNEL_DeleteRecord(Kernel, + database, + record->type, + record->data, + gcvNULL)); + } + } + + /* Acquire the database mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Walk the hash list. */ + for (db = Kernel->db->db[slot]; + db != gcvNULL; + db = db->next) + { + if (db->processID == ProcessID) + { + break; + } + previous = db; + } + + if (db != database || !db->deleted) + { + gcmkFATAL("%s(%d): DB of Process=0x%x corrupted after found in deletion\n", + __FUNCTION__, __LINE__, ProcessID); + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + /* Remove the database from the hash list. */ + if (previous) + { + previous->next = database->next; + } + else + { + Kernel->db->db[slot] = database->next; + } + + /* Deinit current database. */ + gcmkVERIFY_OK(gckKERNEL_DeinitDatabase(Kernel, database)); + + if (Kernel->db->lastDatabase) + { + /* Insert last database to the free list. */ + Kernel->db->lastDatabase->next = Kernel->db->freeDatabase; + Kernel->db->freeDatabase = Kernel->db->lastDatabase; + } + + /* Update last database to current one. */ + Kernel->db->lastDatabase = database; + +OnError: +OnExit: + if (acquired) + { + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + } + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckKERNEL_QueryProcessDB +** +** Query a process database for the current usage of a particular record type. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID used to identify the database. +** +** gctBOOL LastProcessID +** gcvTRUE if searching for the last known process ID. gcvFALSE if +** we need to search for the process ID specified by the ProcessID +** argument. +** +** gceDATABASE_TYPE Type +** Type of the record to query. +** +** OUTPUT: +** +** gcuDATABASE_INFO * Info +** Pointer to a variable that receives the requested information. +*/ +gceSTATUS +gckKERNEL_QueryProcessDB( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctBOOL LastProcessID, + IN gceDATABASE_TYPE Type, + OUT gcuDATABASE_INFO * Info + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + gcePOOL vidMemPool; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Info=0x%x", + Kernel, ProcessID, Type, Info); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Info != gcvNULL); + + /* Deocde pool. */ + vidMemPool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT; + + Type &= gcdDATABASE_TYPE_MASK; + + /* Find the database. */ + gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, LastProcessID, &database)); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE)); + + /* Get pointer to counters. */ + switch (Type) + { + case gcvDB_VIDEO_MEMORY: + if (vidMemPool != gcvPOOL_UNKNOWN) + { + gckOS_MemCopy(&Info->counters, + &database->vidMemPool[vidMemPool], + gcmSIZEOF(database->vidMemPool[vidMemPool])); + } + else + { + gckOS_MemCopy(&Info->counters, + &database->vidMem, + gcmSIZEOF(database->vidMem)); + } + break; + + case gcvDB_NON_PAGED: + gckOS_MemCopy(&Info->counters, + &database->nonPaged, + gcmSIZEOF(database->vidMem)); + break; + + case gcvDB_CONTIGUOUS: + gckOS_MemCopy(&Info->counters, + &database->contiguous, + gcmSIZEOF(database->vidMem)); + break; + + case gcvDB_IDLE: + Info->time = Kernel->db->idleTime; + Kernel->db->idleTime = 0; + break; + + case gcvDB_MAP_MEMORY: + gckOS_MemCopy(&Info->counters, + &database->mapMemory, + gcmSIZEOF(database->mapMemory)); + break; + + case gcvDB_MAP_USER_MEMORY: + gckOS_MemCopy(&Info->counters, + &database->mapUserMemory, + gcmSIZEOF(database->mapUserMemory)); + break; + + default: + break; + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_FindHandleDatbase( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + OUT gctPOINTER * HandleDatabase, + OUT gctPOINTER * HandleDatabaseMutex + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", + Kernel, ProcessID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Find the database. */ + gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database)); + + *HandleDatabase = database->handleDatabase; + *HandleDatabaseMutex = database->handleDatabaseMutex; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if gcdPROCESS_ADDRESS_SPACE +gceSTATUS +gckKERNEL_GetProcessMMU( + IN gckKERNEL Kernel, + OUT gckMMU * Mmu + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + gctUINT32 processID; + + gcmkONERROR(gckOS_GetProcessID(&processID)); + + gcmkONERROR(gckKERNEL_FindDatabase(Kernel, processID, gcvFALSE, &database)); + + *Mmu = database->mmu; + + return gcvSTATUS_OK; + +OnError: + return status; +} +#endif + +#if gcdSECURE_USER +/******************************************************************************* +** gckKERNEL_GetProcessDBCache +** +** Get teh secure cache from a process database. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to a gckKERNEL object. +** +** gctUINT32 ProcessID +** Process ID used to identify the database. +** +** OUTPUT: +** +** gcskSECURE_CACHE_PTR * Cache +** Pointer to a variable that receives the secure cache pointer. +*/ +gceSTATUS +gckKERNEL_GetProcessDBCache( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + OUT gcskSECURE_CACHE_PTR * Cache + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Cache != gcvNULL); + + /* Find the database. */ + gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database)); + + /* Return the pointer to the cache. */ + *Cache = &database->cache; + + /* Success. */ + gcmkFOOTER_ARG("*Cache=0x%x", *Cache); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} +#endif + +gceSTATUS +gckKERNEL_DumpProcessDB( + IN gckKERNEL Kernel + ) +{ + gcsDATABASE_PTR database; + gctINT i, pid; + gctUINT8 name[24]; + + gcmkHEADER_ARG("Kernel=0x%x", Kernel); + + /* Acquire the database mutex. */ + gcmkVERIFY_OK( + gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + + gcmkPRINT("**************************\n"); + gcmkPRINT("*** PROCESS DB DUMP ***\n"); + gcmkPRINT("**************************\n"); + + gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME"); + /* Walk the databases. */ + for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i) + { + for (database = Kernel->db->db[i]; + database != gcvNULL; + database = database->next) + { + pid = database->processID; + + gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name))); + + gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name)); + + gcmkPRINT_N(8, "%-8d%s\n", pid, name); + } + } + + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +void +_DumpCounter( + IN gcsDATABASE_COUNTERS * Counter, + IN gctCONST_STRING Name + ) +{ + gcmkPRINT("%s:", Name); + gcmkPRINT(" Currently allocated : %10lld", Counter->bytes); + gcmkPRINT(" Maximum allocated : %10lld", Counter->maxBytes); + gcmkPRINT(" Total allocated : %10lld", Counter->totalBytes); +} + +gceSTATUS +gckKERNEL_DumpVidMemUsage( + IN gckKERNEL Kernel, + IN gctINT32 ProcessID + ) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + gcsDATABASE_COUNTERS * counter; + gctUINT32 i = 0; + + static gctCONST_STRING surfaceTypes[] = { + "UNKNOWN", + "INDEX", + "VERTEX", + "TEXTURE", + "RENDER_TARGET", + "DEPTH", + "BITMAP", + "TILE_STATUS", + "IMAGE", + "MASK", + "SCISSOR", + "HIERARCHICAL_DEPTH", + "ICACHE", + "TXDESC", + "FENCE", + "TFBHEADER", + }; + + gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", + Kernel, ProcessID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Find the database. */ + gcmkONERROR( + gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database)); + + gcmkPRINT("VidMem Usage (Process %d):", ProcessID); + + /* Get pointer to counters. */ + counter = &database->vidMem; + + _DumpCounter(counter, "Total Video Memory"); + + for (i = 0; i < gcvSURF_NUM_TYPES; i++) + { + counter = &database->vidMemType[i]; + + _DumpCounter(counter, surfaceTypes[i]); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_debug.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_debug.c new file mode 100644 index 000000000000..77d275056e3e --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_debug.c @@ -0,0 +1,2866 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" +#include + +/******************************************************************************\ +******************************** Debug Variables ******************************* +\******************************************************************************/ + +static gceSTATUS _lastError = gcvSTATUS_OK; +static gctUINT32 _debugLevel = gcvLEVEL_ERROR; +/* +_debugZones config value +Please Reference define in gc_hal_base.h +*/ +static gctUINT32 _debugZones = gcvZONE_NONE; + +/******************************************************************************\ +********************************* Debug Switches ******************************* +\******************************************************************************/ + +/* + gcdBUFFERED_OUTPUT + + When set to non-zero, all output is collected into a buffer with the + specified size. Once the buffer gets full, the debug buffer will be + printed to the console. gcdBUFFERED_SIZE determines the size of the buffer. +*/ +#define gcdBUFFERED_OUTPUT 0 + +/* + gcdBUFFERED_SIZE + + When set to non-zero, all output is collected into a buffer with the + specified size. Once the buffer gets full, the debug buffer will be + printed to the console. +*/ +#define gcdBUFFERED_SIZE (1024 * 1024 * 2) + +/* + gcdDMA_BUFFER_COUNT + + If greater then zero, the debugger will attempt to find the command buffer + where DMA is currently executing and then print this buffer and + (gcdDMA_BUFFER_COUNT - 1) buffers before the current one. If set to zero + or the current buffer is not found, all buffers are printed. +*/ +#define gcdDMA_BUFFER_COUNT 0 + +/* + gcdTHREAD_BUFFERS + + When greater then one, will accumulate messages from the specified number + of threads in separate output buffers. +*/ +#define gcdTHREAD_BUFFERS 1 + +/* + gcdENABLE_OVERFLOW + + When set to non-zero, and the output buffer gets full, instead of being + printed, it will be allowed to overflow removing the oldest messages. +*/ +#define gcdENABLE_OVERFLOW 1 + +/* + gcdSHOW_LINE_NUMBER + + When enabledm each print statement will be preceeded with the current + line number. +*/ +#define gcdSHOW_LINE_NUMBER 0 + +/* + gcdSHOW_PROCESS_ID + + When enabledm each print statement will be preceeded with the current + process ID. +*/ +#define gcdSHOW_PROCESS_ID 0 + +/* + gcdSHOW_THREAD_ID + + When enabledm each print statement will be preceeded with the current + thread ID. +*/ +#define gcdSHOW_THREAD_ID 0 + +/* + gcdSHOW_TIME + + When enabled each print statement will be preceeded with the current + high-resolution time. +*/ +#define gcdSHOW_TIME 0 + + +/******************************************************************************\ +****************************** Miscellaneous Macros **************************** +\******************************************************************************/ + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +# define gcmDBGASSERT(Expression, Format, Value) \ + if (!(Expression)) \ + { \ + _DirectPrint( \ + "*** gcmDBGASSERT ***************************\n" \ + " function : %s\n" \ + " line : %d\n" \ + " expression : " #Expression "\n" \ + " actual value : " Format "\n", \ + __FUNCTION__, __LINE__, Value \ + ); \ + } +#else +# define gcmDBGASSERT(Expression, Format, Value) +#endif + +#define gcmPTRALIGNMENT(Pointer, Alignemnt) \ +( \ + gcmALIGN(gcmPTR2INT32(Pointer), Alignemnt) - gcmPTR2INT32(Pointer) \ +) + +#if gcdALIGNBYSIZE +# define gcmISALIGNED(Offset, Alignment) \ + (((Offset) & ((Alignment) - 1)) == 0) + +# define gcmkALIGNPTR(Type, Pointer, Alignment) \ + Pointer = (Type) gcmINT2PTR(gcmALIGN(gcmPTR2INT32(Pointer), Alignment)) +#else +# define gcmISALIGNED(Offset, Alignment) \ + gcvTRUE + +# define gcmkALIGNPTR(Type, Pointer, Alignment) +#endif + +#define gcmALIGNSIZE(Offset, Size) \ + ((Size - Offset) + Size) + +#define gcdHAVEPREFIX \ +( \ + gcdSHOW_TIME \ + || gcdSHOW_LINE_NUMBER \ + || gcdSHOW_PROCESS_ID \ + || gcdSHOW_THREAD_ID \ +) + +#if gcdHAVEPREFIX + +# define gcdOFFSET 0 + +#if gcdSHOW_TIME +#if gcmISALIGNED(gcdOFFSET, 8) +# define gcdTIMESIZE gcmSIZEOF(gctUINT64) +# elif gcdOFFSET == 4 +# define gcdTIMESIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64)) +# else +# error "Unexpected offset value." +# endif +# undef gcdOFFSET +# define gcdOFFSET 8 +#if !defined(gcdPREFIX_LEADER) +# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64) +# define gcdTIMEFORMAT "0x%016llX" +# else +# define gcdTIMEFORMAT ", 0x%016llX" +# endif +# else +# define gcdTIMESIZE 0 +# define gcdTIMEFORMAT +# endif + +#if gcdSHOW_LINE_NUMBER +#if gcmISALIGNED(gcdOFFSET, 8) +# define gcdNUMSIZE gcmSIZEOF(gctUINT64) +# elif gcdOFFSET == 4 +# define gcdNUMSIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64)) +# else +# error "Unexpected offset value." +# endif +# undef gcdOFFSET +# define gcdOFFSET 8 +#if !defined(gcdPREFIX_LEADER) +# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64) +# define gcdNUMFORMAT "%8llu" +# else +# define gcdNUMFORMAT ", %8llu" +# endif +# else +# define gcdNUMSIZE 0 +# define gcdNUMFORMAT +# endif + +#if gcdSHOW_PROCESS_ID +#if gcmISALIGNED(gcdOFFSET, 4) +# define gcdPIDSIZE gcmSIZEOF(gctUINT32) +# else +# error "Unexpected offset value." +# endif +# undef gcdOFFSET +# define gcdOFFSET 4 +#if !defined(gcdPREFIX_LEADER) +# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32) +# define gcdPIDFORMAT "pid=%5d" +# else +# define gcdPIDFORMAT ", pid=%5d" +# endif +# else +# define gcdPIDSIZE 0 +# define gcdPIDFORMAT +# endif + +#if gcdSHOW_THREAD_ID +#if gcmISALIGNED(gcdOFFSET, 4) +# define gcdTIDSIZE gcmSIZEOF(gctUINT32) +# else +# error "Unexpected offset value." +# endif +# undef gcdOFFSET +# define gcdOFFSET 4 +#if !defined(gcdPREFIX_LEADER) +# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32) +# define gcdTIDFORMAT "tid=%5d" +# else +# define gcdTIDFORMAT ", tid=%5d" +# endif +# else +# define gcdTIDSIZE 0 +# define gcdTIDFORMAT +# endif + +# define gcdPREFIX_SIZE \ + ( \ + gcdTIMESIZE \ + + gcdNUMSIZE \ + + gcdPIDSIZE \ + + gcdTIDSIZE \ + ) + + static const char * _prefixFormat = + "[" + gcdTIMEFORMAT + gcdNUMFORMAT + gcdPIDFORMAT + gcdTIDFORMAT + "] "; + +#else + +# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32) +# define gcdPREFIX_SIZE 0 + +#endif + +/* Assumed largest variable argument leader size. */ +#define gcdVARARG_LEADER gcmSIZEOF(gctUINT64) + +/* Alignnments. */ +#if gcdALIGNBYSIZE +# define gcdPREFIX_ALIGNMENT gcdPREFIX_LEADER +# define gcdVARARG_ALIGNMENT gcdVARARG_LEADER +#else +# define gcdPREFIX_ALIGNMENT 0 +# define gcdVARARG_ALIGNMENT 0 +#endif + +#if gcdBUFFERED_OUTPUT +# define gcdOUTPUTPREFIX _AppendPrefix +# define gcdOUTPUTSTRING _AppendString +# define gcdOUTPUTCOPY _AppendCopy +# define gcdOUTPUTBUFFER _AppendBuffer +#else +# define gcdOUTPUTPREFIX _PrintPrefix +# define gcdOUTPUTSTRING _PrintString +# define gcdOUTPUTCOPY _PrintString +# define gcdOUTPUTBUFFER _PrintBuffer +#endif + +/******************************************************************************\ +****************************** Private Structures ****************************** +\******************************************************************************/ + +typedef enum _gceBUFITEM +{ + gceBUFITEM_NONE, + gcvBUFITEM_PREFIX, + gcvBUFITEM_STRING, + gcvBUFITEM_COPY, + gcvBUFITEM_BUFFER +} +gceBUFITEM; + +/* Common item head/buffer terminator. */ +typedef struct _gcsBUFITEM_HEAD * gcsBUFITEM_HEAD_PTR; +typedef struct _gcsBUFITEM_HEAD +{ + gceBUFITEM type; +} +gcsBUFITEM_HEAD; + +/* String prefix (for ex. [ 1,tid=0x019A]) */ +typedef struct _gcsBUFITEM_PREFIX * gcsBUFITEM_PREFIX_PTR; +typedef struct _gcsBUFITEM_PREFIX +{ + gceBUFITEM type; +#if gcdHAVEPREFIX + gctPOINTER prefixData; +#endif +} +gcsBUFITEM_PREFIX; + +/* Buffered string. */ +typedef struct _gcsBUFITEM_STRING * gcsBUFITEM_STRING_PTR; +typedef struct _gcsBUFITEM_STRING +{ + gceBUFITEM type; + gctINT indent; + gctCONST_STRING message; + gctPOINTER messageData; + gctUINT messageDataSize; +} +gcsBUFITEM_STRING; + +/* Buffered string (copy of the string is included with the record). */ +typedef struct _gcsBUFITEM_COPY * gcsBUFITEM_COPY_PTR; +typedef struct _gcsBUFITEM_COPY +{ + gceBUFITEM type; + gctINT indent; + gctPOINTER messageData; + gctUINT messageDataSize; +} +gcsBUFITEM_COPY; + +/* Memory buffer. */ +typedef struct _gcsBUFITEM_BUFFER * gcsBUFITEM_BUFFER_PTR; +typedef struct _gcsBUFITEM_BUFFER +{ + gceBUFITEM type; + gctINT indent; + gceDUMP_BUFFER bufferType; + +#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1) + gctUINT32 dmaAddress; +#endif + + gctUINT dataSize; + gctUINT32 address; +#if gcdHAVEPREFIX + gctPOINTER prefixData; +#endif +} +gcsBUFITEM_BUFFER; + +typedef struct _gcsBUFFERED_OUTPUT * gcsBUFFERED_OUTPUT_PTR; +typedef struct _gcsBUFFERED_OUTPUT +{ +#if gcdTHREAD_BUFFERS > 1 + gctUINT32 threadID; +#endif + +#if gcdSHOW_LINE_NUMBER + gctUINT64 lineNumber; +#endif + + gctINT indent; + +#if gcdBUFFERED_OUTPUT + gctINT start; + gctINT index; + gctINT count; + gctUINT8 buffer[gcdBUFFERED_SIZE]; +#endif + + gcsBUFFERED_OUTPUT_PTR prev; + gcsBUFFERED_OUTPUT_PTR next; +} +gcsBUFFERED_OUTPUT; + +typedef gctUINT (* gcfPRINTSTRING) ( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gcsBUFITEM_HEAD_PTR Item + ); + +typedef gctINT (* gcfGETITEMSIZE) ( + IN gcsBUFITEM_HEAD_PTR Item + ); + +/******************************************************************************\ +******************************* Private Variables ****************************** +\******************************************************************************/ + +static gcsBUFFERED_OUTPUT _outputBuffer[gcdTHREAD_BUFFERS]; +static gcsBUFFERED_OUTPUT_PTR _outputBufferHead = gcvNULL; +static gcsBUFFERED_OUTPUT_PTR _outputBufferTail = gcvNULL; + +/******************************************************************************\ +****************************** Item Size Functions ***************************** +\******************************************************************************/ + +#if gcdBUFFERED_OUTPUT +static gctINT +_GetTerminatorItemSize( + IN gcsBUFITEM_HEAD_PTR Item + ) +{ + return gcmSIZEOF(gcsBUFITEM_HEAD); +} + +static gctINT +_GetPrefixItemSize( + IN gcsBUFITEM_HEAD_PTR Item + ) +{ +#if gcdHAVEPREFIX + gcsBUFITEM_PREFIX_PTR item = (gcsBUFITEM_PREFIX_PTR) Item; + gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item); + return vlen + gcdPREFIX_SIZE; +#else + return gcmSIZEOF(gcsBUFITEM_PREFIX); +#endif +} + +static gctINT +_GetStringItemSize( + IN gcsBUFITEM_HEAD_PTR Item + ) +{ + gcsBUFITEM_STRING_PTR item = (gcsBUFITEM_STRING_PTR) Item; + gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item); + return vlen + item->messageDataSize; +} + +static gctINT +_GetCopyItemSize( + IN gcsBUFITEM_HEAD_PTR Item + ) +{ + gcsBUFITEM_COPY_PTR item = (gcsBUFITEM_COPY_PTR) Item; + gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item); + return vlen + item->messageDataSize; +} + +static gctINT +_GetBufferItemSize( + IN gcsBUFITEM_HEAD_PTR Item + ) +{ +#if gcdHAVEPREFIX + gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item; + gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item); + return vlen + gcdPREFIX_SIZE + item->dataSize; +#else + gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item; + return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize; +#endif +} + +static gcfGETITEMSIZE _itemSize[] = +{ + _GetTerminatorItemSize, + _GetPrefixItemSize, + _GetStringItemSize, + _GetCopyItemSize, + _GetBufferItemSize +}; +#endif + +/******************************************************************************\ +******************************* Printing Functions ***************************** +\******************************************************************************/ + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) || gcdBUFFERED_OUTPUT +static void +_DirectPrint( + gctCONST_STRING Message, + ... + ) +{ + gctINT len; + char buffer[768]; + gctARGUMENTS arguments; + + gcmkARGUMENTS_START(arguments, Message); + len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), Message, &arguments); + gcmkARGUMENTS_END(arguments); + + buffer[len] = '\0'; + gcmkOUTPUT_STRING(buffer); +} +#endif + +static int +_AppendIndent( + IN gctINT Indent, + IN char * Buffer, + IN int BufferSize + ) +{ + gctINT i; + + gctINT len = 0; + gctINT indent = Indent % 40; + + for (i = 0; i < indent; i += 1) + { + Buffer[len++] = ' '; + } + + if (indent != Indent) + { + len += gcmkSPRINTF( + Buffer + len, BufferSize - len, " <%d> ", Indent + ); + + Buffer[len] = '\0'; + } + + return len; +} + +#if gcdHAVEPREFIX +static void +_PrintPrefix( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctPOINTER Data + ) +{ + char buffer[768]; + gctINT len; + + /* Format the string. */ + len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, Data); + buffer[len] = '\0'; + + /* Print the string. */ + gcmkOUTPUT_STRING(buffer); +} +#endif + +static void +_PrintString( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctINT Indent, + IN gctCONST_STRING Message, + IN gctUINT ArgumentSize, + IN gctPOINTER Data + ) +{ + char buffer[768]; + gctINT len; + + /* Append the indent string. */ + len = _AppendIndent(Indent, buffer, gcmSIZEOF(buffer)); + + /* Format the string. */ + len += gcmkVSPRINTF(buffer + len, gcmSIZEOF(buffer) - len, Message, Data); + buffer[len] = '\0'; + + /* Add end-of-line if missing. */ + if (buffer[len - 1] != '\n') + { + buffer[len++] = '\n'; + buffer[len] = '\0'; + } + + /* Print the string. */ + gcmkOUTPUT_STRING(buffer); +} + +static void +_PrintBuffer( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctINT Indent, + IN gctPOINTER PrefixData, + IN gctPOINTER Data, + IN gctUINT Address, + IN gctSIZE_T DataSize, + IN gceDUMP_BUFFER Type, + IN gctUINT32 DmaAddress + ) +{ + static gctCONST_STRING _titleString[] = + { + "CONTEXT BUFFER", + "USER COMMAND BUFFER", + "KERNEL COMMAND BUFFER", + "LINK BUFFER", + "WAIT LINK BUFFER", + "" + }; + + static const gctINT COLUMN_COUNT = 8; + + gctUINT i, column, address; + gctSIZE_T count; + gctUINT32_PTR data; + gctCHAR buffer[768]; + gctUINT indent, len; + gctBOOL command; + + /* Append space for the prefix. */ +#if gcdHAVEPREFIX + indent = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, PrefixData); + buffer[indent] = '\0'; +#else + indent = 0; +#endif + + /* Append the indent string. */ + indent += _AppendIndent( + Indent, buffer + indent, gcmSIZEOF(buffer) - indent + ); + + switch (Type) + { + case gcvDUMP_BUFFER_CONTEXT: + case gcvDUMP_BUFFER_USER: + case gcvDUMP_BUFFER_KERNEL: + case gcvDUMP_BUFFER_LINK: + case gcvDUMP_BUFFER_WAITLINK: + /* Form and print the title string. */ + gcmkSPRINTF2( + buffer + indent, gcmSIZEOF(buffer) - indent, + "%s%s\n", _titleString[Type], + ((DmaAddress >= Address) && (DmaAddress < Address + DataSize)) + ? " (CURRENT)" : "" + ); + + gcmkOUTPUT_STRING(buffer); + + /* Terminate the string. */ + buffer[indent] = '\0'; + + /* This is a command buffer. */ + command = gcvTRUE; + break; + + case gcvDUMP_BUFFER_FROM_USER: + /* This is not a command buffer. */ + command = gcvFALSE; + + /* No title. */ + break; + + default: + gcmDBGASSERT(gcvFALSE, "%s", "invalid buffer type"); + + /* This is not a command buffer. */ + command = gcvFALSE; + } + + /* Overwrite the prefix with spaces. */ + for (i = 0; i < indent; i += 1) + { + buffer[i] = ' '; + } + + /* Form and print the opening string. */ + if (command) + { + gcmkSPRINTF2( + buffer + indent, gcmSIZEOF(buffer) - indent, + "@[kernel.command %08X %08X\n", Address, (gctUINT32)DataSize + ); + + gcmkOUTPUT_STRING(buffer); + + /* Terminate the string. */ + buffer[indent] = '\0'; + } + + /* Get initial address. */ + address = Address; + + /* Cast the data pointer. */ + data = (gctUINT32_PTR) Data; + + /* Compute the number of double words. */ + count = DataSize / gcmSIZEOF(gctUINT32); + + /* Print the buffer. */ + for (i = 0, len = indent, column = 0; i < count; i += 1) + { + /* Append the address. */ + if (column == 0) + { + len += gcmkSPRINTF( + buffer + len, gcmSIZEOF(buffer) - len, "0x%08X:", address + ); + } + + /* Append the data value. */ + len += gcmkSPRINTF2( + buffer + len, gcmSIZEOF(buffer) - len, "%c%08X", + (address == DmaAddress)? '>' : ' ', data[i] + ); + + buffer[len] = '\0'; + + /* Update the address. */ + address += gcmSIZEOF(gctUINT32); + + /* Advance column count. */ + column += 1; + + /* End of line? */ + if ((column % COLUMN_COUNT) == 0) + { + /* Append EOL. */ + gcmkSTRCATSAFE(buffer, gcmSIZEOF(buffer), "\n"); + + /* Print the string. */ + gcmkOUTPUT_STRING(buffer); + + /* Reset. */ + len = indent; + column = 0; + } + } + + /* Print the last partial string. */ + if (column != 0) + { + /* Append EOL. */ + gcmkSTRCATSAFE(buffer, gcmSIZEOF(buffer), "\n"); + + /* Print the string. */ + gcmkOUTPUT_STRING(buffer); + } + + /* Form and print the opening string. */ + if (command) + { + buffer[indent] = '\0'; + gcmkSTRCATSAFE(buffer, gcmSIZEOF(buffer), "] -- command\n"); + gcmkOUTPUT_STRING(buffer); + } +} + +#if gcdBUFFERED_OUTPUT +static gctUINT +_PrintNone( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gcsBUFITEM_HEAD_PTR Item + ) +{ + /* Return the size of the node. */ + return gcmSIZEOF(gcsBUFITEM_HEAD); +} + +static gctUINT +_PrintPrefixWrapper( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gcsBUFITEM_HEAD_PTR Item + ) +{ +#if gcdHAVEPREFIX + gcsBUFITEM_PREFIX_PTR item; + gctUINT vlen; + + /* Get access to the data. */ + item = (gcsBUFITEM_PREFIX_PTR) Item; + + /* Print the message. */ + _PrintPrefix(OutputBuffer, item->prefixData); + + /* Compute the size of the variable portion of the structure. */ + vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item); + + /* Return the size of the node. */ + return vlen + gcdPREFIX_SIZE; +#else + return gcmSIZEOF(gcsBUFITEM_PREFIX); +#endif +} + +static gctUINT +_PrintStringWrapper( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gcsBUFITEM_HEAD_PTR Item + ) +{ + gcsBUFITEM_STRING_PTR item; + gctUINT vlen; + + /* Get access to the data. */ + item = (gcsBUFITEM_STRING_PTR) Item; + + /* Print the message. */ + _PrintString( + OutputBuffer, + item->indent, item->message, item->messageDataSize, item->messageData + ); + + /* Compute the size of the variable portion of the structure. */ + vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item); + + /* Return the size of the node. */ + return vlen + item->messageDataSize; +} + +static gctUINT +_PrintCopyWrapper( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gcsBUFITEM_HEAD_PTR Item + ) +{ + gcsBUFITEM_COPY_PTR item; + gctCONST_STRING message; + gctUINT vlen; + + /* Get access to the data. */ + item = (gcsBUFITEM_COPY_PTR) Item; + + /* Determine the string pointer. */ + message = (gctCONST_STRING) (item + 1); + + /* Print the message. */ + _PrintString( + OutputBuffer, + item->indent, message, item->messageDataSize, item->messageData + ); + + /* Compute the size of the variable portion of the structure. */ + vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item); + + /* Return the size of the node. */ + return vlen + item->messageDataSize; +} + +static gctUINT +_PrintBufferWrapper( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gcsBUFITEM_HEAD_PTR Item + ) +{ +#if gcdHAVEPREFIX + gctUINT32 dmaAddress; + gcsBUFITEM_BUFFER_PTR item; + gctPOINTER data; + gctUINT vlen; + + /* Get access to the data. */ + item = (gcsBUFITEM_BUFFER_PTR) Item; + +#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1) + dmaAddress = item->dmaAddress; +#else + dmaAddress = 0xFFFFFFFF; +#endif + + if (dmaAddress != 0) + { + /* Compute the data address. */ + data = ((gctUINT8_PTR) item->prefixData) + gcdPREFIX_SIZE; + + /* Print buffer. */ + _PrintBuffer( + OutputBuffer, + item->indent, item->prefixData, + data, item->address, item->dataSize, + item->bufferType, dmaAddress + ); + } + + /* Compute the size of the variable portion of the structure. */ + vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item); + + /* Return the size of the node. */ + return vlen + gcdPREFIX_SIZE + item->dataSize; +#else + gctUINT32 dmaAddress; + gcsBUFITEM_BUFFER_PTR item; + + /* Get access to the data. */ + item = (gcsBUFITEM_BUFFER_PTR) Item; + +#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1) + dmaAddress = item->dmaAddress; +#else + dmaAddress = 0xFFFFFFFF; +#endif + + if (dmaAddress != 0) + { + /* Print buffer. */ + _PrintBuffer( + OutputBuffer, + item->indent, gcvNULL, + item + 1, item->address, item->dataSize, + item->bufferType, dmaAddress + ); + } + + /* Return the size of the node. */ + return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize; +#endif +} + +static gcfPRINTSTRING _printArray[] = +{ + _PrintNone, + _PrintPrefixWrapper, + _PrintStringWrapper, + _PrintCopyWrapper, + _PrintBufferWrapper +}; +#endif + +/******************************************************************************\ +******************************* Private Functions ****************************** +\******************************************************************************/ + +#if gcdBUFFERED_OUTPUT + +#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1) +static gcsBUFITEM_BUFFER_PTR +_FindCurrentDMABuffer( + gctUINT32 DmaAddress + ) +{ + gctINT i, skip; + gcsBUFITEM_HEAD_PTR item; + gcsBUFITEM_BUFFER_PTR dmaCurrent; + + /* Reset the current buffer. */ + dmaCurrent = gcvNULL; + + /* Get the first stored item. */ + item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start]; + + /* Run through all items. */ + for (i = 0; i < _outputBufferHead->count; i += 1) + { + /* Buffer item? */ + if (item->type == gcvBUFITEM_BUFFER) + { + gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item; + + if ((DmaAddress >= buffer->address) && + (DmaAddress < buffer->address + buffer->dataSize)) + { + dmaCurrent = buffer; + } + } + + /* Get the item size and skip it. */ + skip = (* _itemSize[item->type]) (item); + item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip); + + /* End of the buffer? Wrap around. */ + if (item->type == gceBUFITEM_NONE) + { + item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer; + } + } + + /* Return result. */ + return dmaCurrent; +} + +static void +_EnableAllDMABuffers( + void + ) +{ + gctINT i, skip; + gcsBUFITEM_HEAD_PTR item; + + /* Get the first stored item. */ + item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start]; + + /* Run through all items. */ + for (i = 0; i < _outputBufferHead->count; i += 1) + { + /* Buffer item? */ + if (item->type == gcvBUFITEM_BUFFER) + { + gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item; + + /* Enable the buffer. */ + buffer->dmaAddress = ~0U; + } + + /* Get the item size and skip it. */ + skip = (* _itemSize[item->type]) (item); + item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip); + + /* End of the buffer? Wrap around. */ + if (item->type == gceBUFITEM_NONE) + { + item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer; + } + } +} + +static void +_EnableDMABuffers( + gctUINT32 DmaAddress, + gcsBUFITEM_BUFFER_PTR CurrentDMABuffer + ) +{ + gctINT i, skip, index; + gcsBUFITEM_HEAD_PTR item; + gcsBUFITEM_BUFFER_PTR buffers[gcdDMA_BUFFER_COUNT]; + + /* Reset buffer pointers. */ + gckOS_ZeroMemory(buffers, gcmSIZEOF(buffers)); + + /* Set the current buffer index. */ + index = -1; + + /* Get the first stored item. */ + item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start]; + + /* Run through all items until the current DMA buffer is found. */ + for (i = 0; i < _outputBufferHead->count; i += 1) + { + /* Buffer item? */ + if (item->type == gcvBUFITEM_BUFFER) + { + /* Advance the index. */ + index = (index + 1) % gcdDMA_BUFFER_COUNT; + + /* Add to the buffer array. */ + buffers[index] = (gcsBUFITEM_BUFFER_PTR) item; + + /* Stop if this is the current DMA buffer. */ + if ((gcsBUFITEM_BUFFER_PTR) item == CurrentDMABuffer) + { + break; + } + } + + /* Get the item size and skip it. */ + skip = (* _itemSize[item->type]) (item); + item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip); + + /* End of the buffer? Wrap around. */ + if (item->type == gceBUFITEM_NONE) + { + item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer; + } + } + + /* Enable the found buffers. */ + gcmDBGASSERT(index != -1, "%d", index); + + for (i = 0; i < gcdDMA_BUFFER_COUNT; i += 1) + { + if (buffers[index] == gcvNULL) + { + break; + } + + buffers[index]->dmaAddress = DmaAddress; + + index -= 1; + + if (index == -1) + { + index = gcdDMA_BUFFER_COUNT - 1; + } + } +} +#endif + +static void +_Flush( + gctUINT32 DmaAddress + ) +{ + gctINT i, skip; + gcsBUFITEM_HEAD_PTR item; + + gcsBUFFERED_OUTPUT_PTR outputBuffer = _outputBufferHead; + +#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1) + if ((outputBuffer != gcvNULL) && (outputBuffer->count != 0)) + { + /* Find the current DMA buffer. */ + gcsBUFITEM_BUFFER_PTR dmaCurrent = _FindCurrentDMABuffer(DmaAddress); + + /* Was the current buffer found? */ + if (dmaCurrent == gcvNULL) + { + /* No, print all buffers. */ + _EnableAllDMABuffers(); + } + else + { + /* Yes, enable only specified number of buffers. */ + _EnableDMABuffers(DmaAddress, dmaCurrent); + } + } +#endif + + while (outputBuffer != gcvNULL) + { + if (outputBuffer->count != 0) + { + _DirectPrint("********************************************************************************\n"); + _DirectPrint("FLUSHING DEBUG OUTPUT BUFFER (%d elements).\n", outputBuffer->count); + _DirectPrint("********************************************************************************\n"); + + item = (gcsBUFITEM_HEAD_PTR) &outputBuffer->buffer[outputBuffer->start]; + + for (i = 0; i < outputBuffer->count; i += 1) + { + skip = (* _printArray[item->type]) (outputBuffer, item); + + item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip); + + if (item->type == gceBUFITEM_NONE) + { + item = (gcsBUFITEM_HEAD_PTR) outputBuffer->buffer; + } + } + + outputBuffer->start = 0; + outputBuffer->index = 0; + outputBuffer->count = 0; + } + + outputBuffer = outputBuffer->next; + } +} + +static gcsBUFITEM_HEAD_PTR +_AllocateItem( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctINT Size + ) +{ + gctINT skip; + gcsBUFITEM_HEAD_PTR item, next; + +#if gcdENABLE_OVERFLOW + if ( + (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD)) + || + ( + (OutputBuffer->index < OutputBuffer->start) && + (OutputBuffer->index + Size >= OutputBuffer->start) + ) + ) + { + if (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD)) + { + if (OutputBuffer->index < OutputBuffer->start) + { + item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start]; + + while (item->type != gceBUFITEM_NONE) + { + skip = (* _itemSize[item->type]) (item); + + OutputBuffer->start += skip; + OutputBuffer->count -= 1; + + item->type = gceBUFITEM_NONE; + item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip); + } + + OutputBuffer->start = 0; + } + + OutputBuffer->index = 0; + } + + item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start]; + + while (OutputBuffer->start - OutputBuffer->index <= Size) + { + skip = (* _itemSize[item->type]) (item); + + OutputBuffer->start += skip; + OutputBuffer->count -= 1; + + item->type = gceBUFITEM_NONE; + item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip); + + if (item->type == gceBUFITEM_NONE) + { + OutputBuffer->start = 0; + break; + } + } + } +#else + if (OutputBuffer->index + Size > gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD)) + { + _DirectPrint("\nMessage buffer full; forcing message flush.\n\n"); + _Flush(~0U); + } +#endif + + item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->index]; + + OutputBuffer->index += Size; + OutputBuffer->count += 1; + + next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + Size); + next->type = gceBUFITEM_NONE; + + return item; +} + +#if gcdALIGNBYSIZE +static void +_FreeExtraSpace( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctPOINTER Item, + IN gctINT ItemSize, + IN gctINT FreeSize + ) +{ + gcsBUFITEM_HEAD_PTR next; + + OutputBuffer->index -= FreeSize; + + next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) Item + ItemSize); + next->type = gceBUFITEM_NONE; +} +#endif + +#if gcdHAVEPREFIX +static void +_AppendPrefix( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctPOINTER Data + ) +{ + gctUINT8_PTR prefixData; + gcsBUFITEM_PREFIX_PTR item; + gctINT allocSize; + +#if gcdALIGNBYSIZE + gctUINT alignment; + gctINT size, freeSize; +#endif + + gcmDBGASSERT(Data != gcvNULL, "%p", Data); + + /* Determine the maximum item size. */ + allocSize + = gcmSIZEOF(gcsBUFITEM_PREFIX) + + gcdPREFIX_SIZE + + gcdPREFIX_ALIGNMENT; + + /* Allocate prefix item. */ + item = (gcsBUFITEM_PREFIX_PTR) _AllocateItem(OutputBuffer, allocSize); + + /* Compute the initial prefix data pointer. */ + prefixData = (gctUINT8_PTR) (item + 1); + + /* Align the data pointer as necessary. */ +#if gcdALIGNBYSIZE + alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT); + prefixData += alignment; +#endif + + /* Set item data. */ + item->type = gcvBUFITEM_PREFIX; + item->prefixData = prefixData; + + /* Copy argument value. */ + gcmkMEMCPY(prefixData, Data, gcdPREFIX_SIZE); + +#if gcdALIGNBYSIZE + /* Compute the actual node size. */ + size = gcmSIZEOF(gcsBUFITEM_PREFIX) + gcdPREFIX_SIZE + alignment; + + /* Free extra memory if any. */ + freeSize = allocSize - size; + if (freeSize != 0) + { + _FreeExtraSpace(OutputBuffer, item, size, freeSize); + } +#endif +} +#endif + +static void +_AppendString( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctINT Indent, + IN gctCONST_STRING Message, + IN gctUINT ArgumentSize, + IN gctPOINTER Data + ) +{ + gctUINT8_PTR messageData; + gcsBUFITEM_STRING_PTR item; + gctINT allocSize; + +#if gcdALIGNBYSIZE + gctUINT alignment; + gctINT size, freeSize; +#endif + + /* Determine the maximum item size. */ + allocSize + = gcmSIZEOF(gcsBUFITEM_STRING) + + ArgumentSize + + gcdVARARG_ALIGNMENT; + + /* Allocate prefix item. */ + item = (gcsBUFITEM_STRING_PTR) _AllocateItem(OutputBuffer, allocSize); + + /* Compute the initial message data pointer. */ + messageData = (gctUINT8_PTR) (item + 1); + + /* Align the data pointer as necessary. */ +#if gcdALIGNBYSIZE + alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT); + messageData += alignment; +#endif + + /* Set item data. */ + item->type = gcvBUFITEM_STRING; + item->indent = Indent; + item->message = Message; + item->messageData = messageData; + item->messageDataSize = ArgumentSize; + + /* Copy argument value. */ + if (ArgumentSize != 0) + { + gcmkMEMCPY(messageData, Data, ArgumentSize); + } + +#if gcdALIGNBYSIZE + /* Compute the actual node size. */ + size = gcmSIZEOF(gcsBUFITEM_STRING) + ArgumentSize + alignment; + + /* Free extra memory if any. */ + freeSize = allocSize - size; + if (freeSize != 0) + { + _FreeExtraSpace(OutputBuffer, item, size, freeSize); + } +#endif +} + +static void +_AppendCopy( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctINT Indent, + IN gctCONST_STRING Message, + IN gctUINT ArgumentSize, + IN gctPOINTER Data + ) +{ + gctUINT8_PTR messageData; + gcsBUFITEM_COPY_PTR item; + gctINT allocSize; + gctINT messageLength; + gctCONST_STRING message; + +#if gcdALIGNBYSIZE + gctUINT alignment; + gctINT size, freeSize; +#endif + + /* Get the length of the string. */ + messageLength = strlen(Message) + 1; + + /* Determine the maximum item size. */ + allocSize + = gcmSIZEOF(gcsBUFITEM_COPY) + + messageLength + + ArgumentSize + + gcdVARARG_ALIGNMENT; + + /* Allocate prefix item. */ + item = (gcsBUFITEM_COPY_PTR) _AllocateItem(OutputBuffer, allocSize); + + /* Determine the message placement. */ + message = (gctCONST_STRING) (item + 1); + + /* Compute the initial message data pointer. */ + messageData = (gctUINT8_PTR) message + messageLength; + + /* Align the data pointer as necessary. */ +#if gcdALIGNBYSIZE + if (ArgumentSize == 0) + { + alignment = 0; + } + else + { + alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT); + messageData += alignment; + } +#endif + + /* Set item data. */ + item->type = gcvBUFITEM_COPY; + item->indent = Indent; + item->messageData = messageData; + item->messageDataSize = ArgumentSize; + + /* Copy the message. */ + gcmkMEMCPY((gctPOINTER) message, Message, messageLength); + + /* Copy argument value. */ + if (ArgumentSize != 0) + { + gcmkMEMCPY(messageData, Data, ArgumentSize); + } + +#if gcdALIGNBYSIZE + /* Compute the actual node size. */ + size + = gcmSIZEOF(gcsBUFITEM_COPY) + + messageLength + + ArgumentSize + + alignment; + + /* Free extra memory if any. */ + freeSize = allocSize - size; + if (freeSize != 0) + { + _FreeExtraSpace(OutputBuffer, item, size, freeSize); + } +#endif +} + +static void +_AppendBuffer( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctINT Indent, + IN gctPOINTER PrefixData, + IN gctPOINTER Data, + IN gctUINT Address, + IN gctUINT DataSize, + IN gceDUMP_BUFFER Type, + IN gctUINT32 DmaAddress + ) +{ +#if gcdHAVEPREFIX + gctUINT8_PTR prefixData; + gcsBUFITEM_BUFFER_PTR item; + gctINT allocSize; + gctPOINTER data; + +#if gcdALIGNBYSIZE + gctUINT alignment; + gctINT size, freeSize; +#endif + + gcmDBGASSERT(DataSize != 0, "%d", DataSize); + gcmDBGASSERT(Data != gcvNULL, "%p", Data); + + /* Determine the maximum item size. */ + allocSize + = gcmSIZEOF(gcsBUFITEM_BUFFER) + + gcdPREFIX_SIZE + + gcdPREFIX_ALIGNMENT + + DataSize; + + /* Allocate prefix item. */ + item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, allocSize); + + /* Compute the initial prefix data pointer. */ + prefixData = (gctUINT8_PTR) (item + 1); + +#if gcdALIGNBYSIZE + /* Align the data pointer as necessary. */ + alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT); + prefixData += alignment; +#endif + + /* Set item data. */ + item->type = gcvBUFITEM_BUFFER; + item->indent = Indent; + item->bufferType = Type; + item->dataSize = DataSize; + item->address = Address; + item->prefixData = prefixData; + +#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1) + item->dmaAddress = DmaAddress; +#endif + + /* Copy prefix data. */ + gcmkMEMCPY(prefixData, PrefixData, gcdPREFIX_SIZE); + + /* Compute the data pointer. */ + data = prefixData + gcdPREFIX_SIZE; + + /* Copy argument value. */ + gcmkMEMCPY(data, Data, DataSize); + +#if gcdALIGNBYSIZE + /* Compute the actual node size. */ + size + = gcmSIZEOF(gcsBUFITEM_BUFFER) + + gcdPREFIX_SIZE + + alignment + + DataSize; + + /* Free extra memory if any. */ + freeSize = allocSize - size; + if (freeSize != 0) + { + _FreeExtraSpace(OutputBuffer, item, size, freeSize); + } +#endif +#else + gcsBUFITEM_BUFFER_PTR item; + gctINT size; + + gcmDBGASSERT(DataSize != 0, "%d", DataSize); + gcmDBGASSERT(Data != gcvNULL, "%p", Data); + + /* Determine the maximum item size. */ + size = gcmSIZEOF(gcsBUFITEM_BUFFER) + DataSize; + + /* Allocate prefix item. */ + item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, size); + + /* Set item data. */ + item->type = gcvBUFITEM_BUFFER; + item->indent = Indent; + item->dataSize = DataSize; + item->address = Address; + + /* Copy argument value. */ + gcmkMEMCPY(item + 1, Data, DataSize); +#endif +} +#endif + +static gcmINLINE void +_InitBuffers( + void + ) +{ + int i; + + if (_outputBufferHead == gcvNULL) + { + for (i = 0; i < gcdTHREAD_BUFFERS; i += 1) + { + if (_outputBufferTail == gcvNULL) + { + _outputBufferHead = &_outputBuffer[i]; + } + else + { + _outputBufferTail->next = &_outputBuffer[i]; + } + +#if gcdTHREAD_BUFFERS > 1 + _outputBuffer[i].threadID = ~0U; +#endif + + _outputBuffer[i].prev = _outputBufferTail; + _outputBuffer[i].next = gcvNULL; + + _outputBufferTail = &_outputBuffer[i]; + } + } +} + +static gcmINLINE gcsBUFFERED_OUTPUT_PTR +_GetOutputBuffer( + void + ) +{ + gcsBUFFERED_OUTPUT_PTR outputBuffer; + +#if gcdTHREAD_BUFFERS > 1 + /* Get the current thread ID. */ + gctUINT32 ThreadID = gcmkGETTHREADID(); + + /* Locate the output buffer for the thread. */ + outputBuffer = _outputBufferHead; + + while (outputBuffer != gcvNULL) + { + if (outputBuffer->threadID == ThreadID) + { + break; + } + + outputBuffer = outputBuffer->next; + } + + /* No matching buffer found? */ + if (outputBuffer == gcvNULL) + { + /* Get the tail for the buffer. */ + outputBuffer = _outputBufferTail; + + /* Move it to the head. */ + _outputBufferTail = _outputBufferTail->prev; + _outputBufferTail->next = gcvNULL; + + outputBuffer->prev = gcvNULL; + outputBuffer->next = _outputBufferHead; + + _outputBufferHead->prev = outputBuffer; + _outputBufferHead = outputBuffer; + + /* Reset the buffer. */ + outputBuffer->threadID = ThreadID; +#if gcdBUFFERED_OUTPUT + outputBuffer->start = 0; + outputBuffer->index = 0; + outputBuffer->count = 0; +#endif +#if gcdSHOW_LINE_NUMBER + outputBuffer->lineNumber = 0; +#endif + } +#else + outputBuffer = _outputBufferHead; +#endif + + return outputBuffer; +} + +static gcmINLINE int _GetArgumentSize( + IN gctCONST_STRING Message + ) +{ + int i, count; + + gcmDBGASSERT(Message != gcvNULL, "%p", Message); + + for (i = 0, count = 0; Message[i]; i += 1) + { + if (Message[i] == '%') + { + count += 1; + } + } + + return count * gcmSIZEOF(gctUINT32); +} + +#if gcdHAVEPREFIX +static void +_InitPrefixData( + IN gcsBUFFERED_OUTPUT_PTR OutputBuffer, + IN gctPOINTER Data + ) +{ + gctUINT8_PTR data = (gctUINT8_PTR) Data; + +#if gcdSHOW_TIME + { + gctUINT64 time; + gckOS_GetProfileTick(&time); + gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64)); + * ((gctUINT64_PTR) data) = time; + data += gcmSIZEOF(gctUINT64); + } +#endif + +#if gcdSHOW_LINE_NUMBER + { + gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64)); + * ((gctUINT64_PTR) data) = OutputBuffer->lineNumber; + data += gcmSIZEOF(gctUINT64); + } +#endif + +#if gcdSHOW_PROCESS_ID + { + gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32)); + * ((gctUINT32_PTR) data) = gcmkGETPROCESSID(); + data += gcmSIZEOF(gctUINT32); + } +#endif + +#if gcdSHOW_THREAD_ID + { + gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32)); + * ((gctUINT32_PTR) data) = gcmkGETTHREADID(); + } +#endif +} +#endif + +static void +_Print( + IN gctUINT ArgumentSize, + IN gctBOOL CopyMessage, + IN gctCONST_STRING Message, + IN gctARGUMENTS * Arguments + ) +{ + gcsBUFFERED_OUTPUT_PTR outputBuffer; + static gcmkDECLARE_MUTEX(lockHandle); + + gcmkMUTEX_LOCK(lockHandle); + + /* Initialize output buffer list. */ + _InitBuffers(); + + /* Locate the proper output buffer. */ + outputBuffer = _GetOutputBuffer(); + + /* Update the line number. */ +#if gcdSHOW_LINE_NUMBER + outputBuffer->lineNumber += 1; +#endif + + /* Print prefix. */ +#if gcdHAVEPREFIX + { + gctUINT8_PTR alignedPrefixData; + gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT]; + + /* Compute aligned pointer. */ + alignedPrefixData = prefixData; + gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT); + + /* Initialize the prefix data. */ + _InitPrefixData(outputBuffer, alignedPrefixData); + + /* Print the prefix. */ + gcdOUTPUTPREFIX(outputBuffer, alignedPrefixData); + } +#endif + + /* Form the indent string. */ + if (strncmp(Message, "--", 2) == 0) + { + outputBuffer->indent -= 2; + } + + /* Print the message. */ + if (CopyMessage) + { + gcdOUTPUTCOPY( + outputBuffer, outputBuffer->indent, + Message, ArgumentSize, (gctPOINTER) Arguments + ); + } + else + { + gcdOUTPUTSTRING( + outputBuffer, outputBuffer->indent, + Message, ArgumentSize, ((gctPOINTER) Arguments) + ); + } + + /* Check increasing indent. */ + if (strncmp(Message, "++", 2) == 0) + { + outputBuffer->indent += 2; + } + + gcmkMUTEX_UNLOCK(lockHandle); +} + + +/******************************************************************************\ +********************************* Debug Macros ********************************* +\******************************************************************************/ + +#ifdef __QNXNTO__ + +extern volatile unsigned g_nQnxInIsrs; + +#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \ +{ \ + if (atomic_add_value(&g_nQnxInIsrs, 1) == 0) \ + { \ + gctARGUMENTS __arguments__; \ + gcmkARGUMENTS_START(__arguments__, Message); \ + _Print(ArgumentSize, CopyMessage, Message, &__arguments__); \ + gcmkARGUMENTS_END(__arguments__); \ + } \ + atomic_sub(&g_nQnxInIsrs, 1); \ +} + +#else + +#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \ +{ \ + gctARGUMENTS __arguments__; \ + gcmkARGUMENTS_START(__arguments__, Message); \ + _Print(ArgumentSize, CopyMessage, Message, &__arguments__); \ + gcmkARGUMENTS_END(__arguments__); \ +} + +#endif + +/******************************************************************************\ +********************************** Debug Code ********************************** +\******************************************************************************/ + +/******************************************************************************* +** +** gckOS_Print +** +** Send a message to the debugger. +** +** INPUT: +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_Print( + IN gctCONST_STRING Message, + ... + ) +{ + gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message); +} + +/******************************************************************************* +** +** gckOS_PrintN +** +** Send a message to the debugger. +** +** INPUT: +** +** gctUINT ArgumentSize +** The size of the optional arguments in bytes. +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_PrintN( + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ) +{ + gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message); +} + +/******************************************************************************* +** +** gckOS_CopyPrint +** +** Send a message to the debugger. If in buffered output mode, the entire +** message will be copied into the buffer instead of using the pointer to +** the string. +** +** INPUT: +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_CopyPrint( + IN gctCONST_STRING Message, + ... + ) +{ + gcmDEBUGPRINT(_GetArgumentSize(Message), gcvTRUE, Message); +} + +/******************************************************************************* +** +** gckOS_DumpBuffer +** +** Print the contents of the specified buffer. +** +** INPUT: +** +** gckOS Os +** Pointer to gckOS object. +** +** gctPOINTER Buffer +** Pointer to the buffer to print. +** +** gctUINT Size +** Size of the buffer. +** +** gceDUMP_BUFFER Type +** Buffer type. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_DumpBuffer( + IN gckOS Os, + IN gctPOINTER Buffer, + IN gctSIZE_T Size, + IN gceDUMP_BUFFER Type, + IN gctBOOL CopyMessage + ) +{ + gctPHYS_ADDR_T physical; + gctUINT32 address = 0; + gcsBUFFERED_OUTPUT_PTR outputBuffer = gcvNULL; + gctCHAR *buffer = (gctCHAR*)Buffer; + gctPOINTER pAllocated = gcvNULL; + gctPOINTER pMapped = gcvNULL; + gceSTATUS status = gcvSTATUS_OK; + + static gcmkDECLARE_MUTEX(lockHandle); + + gcmkMUTEX_LOCK(lockHandle); + + /* Request lock when not coming from user, + or coming from user and not yet locked + and message is starting with @[. */ + if (Type == gcvDUMP_BUFFER_FROM_USER) + { + /* Some format check. */ + if ((Size > 2) + && (buffer[0] == '@' || buffer[0] == '#') + && (buffer[1] != '[') + ) + { + gcmkMUTEX_UNLOCK(lockHandle); + + /* No error tolerence in parser, so we stop on error to make noise. */ + for (;;) + { + gcmkPRINT( + "[galcore]: %s(%d): Illegal dump message %s\n", + __FUNCTION__, __LINE__, + buffer + ); + + gckOS_Delay(Os, 10 * 1000); + } + } + } + + if (Buffer != gcvNULL) + { + /* Initialize output buffer list. */ + _InitBuffers(); + + /* Locate the proper output buffer. */ + outputBuffer = _GetOutputBuffer(); + + /* Update the line number. */ +#if gcdSHOW_LINE_NUMBER + outputBuffer->lineNumber += 1; +#endif + + /* Get the physical address of the buffer. */ + if (Type != gcvDUMP_BUFFER_FROM_USER) + { + gcmkVERIFY_OK(gckOS_GetPhysicalAddress(Os, Buffer, &physical)); + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os, physical, &physical)); + gcmkSAFECASTPHYSADDRT(address, physical); + } + else + { + address = 0; + } + + if (Type == gcvDUMP_BUFFER_USER) + { + gctBOOL needCopy = gcvTRUE; + + gcmkONERROR(gckOS_QueryNeedCopy(Os, 0, &needCopy)); + + if (needCopy) + { + gcmkONERROR(gckOS_Allocate( + Os, + Size, + &pAllocated + )); + + gcmkONERROR(gckOS_CopyFromUserData( + Os, + pAllocated, + Buffer, + Size + )); + + Buffer = pAllocated; + } + else + { + gcmkONERROR(gckOS_MapUserPointer( + Os, + Buffer, + Size, + &pMapped + )); + + Buffer = pMapped; + } + } + +#if gcdHAVEPREFIX + { + gctUINT8_PTR alignedPrefixData; + gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT]; + + /* Compute aligned pointer. */ + alignedPrefixData = prefixData; + gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT); + + /* Initialize the prefix data. */ + _InitPrefixData(outputBuffer, alignedPrefixData); + + /* Print/schedule the buffer. */ + gcdOUTPUTBUFFER( + outputBuffer, outputBuffer->indent, + alignedPrefixData, Buffer, address, Size, Type, 0 + ); + } +#else + /* Print/schedule the buffer. */ + if (Type == gcvDUMP_BUFFER_FROM_USER) + { + gckOS_CopyPrint(Buffer); + } + else + { + gcdOUTPUTBUFFER( + outputBuffer, outputBuffer->indent, + gcvNULL, Buffer, address, Size, Type, 0 + ); + } +#endif + } + +OnError: + gcmkMUTEX_UNLOCK(lockHandle); + + if (pAllocated) + { + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, pAllocated)); + } + else if (pMapped) + { + gckOS_UnmapUserPointer(Os, buffer, Size, pMapped); + } +} + +/******************************************************************************* +** +** gckOS_DebugTrace +** +** Send a leveled message to the debugger. +** +** INPUT: +** +** gctUINT32 Level +** Debug level of message. +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_DebugTrace( + IN gctUINT32 Level, + IN gctCONST_STRING Message, + ... + ) +{ + if (Level > _debugLevel) + { + return; + } + + gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message); +} + +/******************************************************************************* +** +** gckOS_DebugTraceN +** +** Send a leveled message to the debugger. +** +** INPUT: +** +** gctUINT32 Level +** Debug level of message. +** +** gctUINT ArgumentSize +** The size of the optional arguments in bytes. +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_DebugTraceN( + IN gctUINT32 Level, + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ) +{ + if (Level > _debugLevel) + { + return; + } + + gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message); +} + +/******************************************************************************* +** +** gckOS_DebugTraceZone +** +** Send a leveled and zoned message to the debugger. +** +** INPUT: +** +** gctUINT32 Level +** Debug level for message. +** +** gctUINT32 Zone +** Debug zone for message. +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_DebugTraceZone( + IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctCONST_STRING Message, + ... + ) +{ + if ((Level > _debugLevel) || !(Zone & _debugZones)) + { + return; + } + + gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message); +} + +/******************************************************************************* +** +** gckOS_DebugTraceZoneN +** +** Send a leveled and zoned message to the debugger. +** +** INPUT: +** +** gctUINT32 Level +** Debug level for message. +** +** gctUINT32 Zone +** Debug zone for message. +** +** gctUINT ArgumentSize +** The size of the optional arguments in bytes. +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_DebugTraceZoneN( + IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ) +{ + if ((Level > _debugLevel) || !(Zone & _debugZones)) + { + return; + } + + gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message); +} + +/******************************************************************************* +** +** gckOS_DebugBreak +** +** Break into the debugger. +** +** INPUT: +** +** Nothing. +** +** OUTPUT: +** +** Nothing. +*/ +void +gckOS_DebugBreak( + void + ) +{ + gckOS_DebugTrace(gcvLEVEL_ERROR, "%s(%d)", __FUNCTION__, __LINE__); +} + +/******************************************************************************* +** +** gckOS_DebugFatal +** +** Send a message to the debugger and break into the debugger. +** +** INPUT: +** +** gctCONST_STRING Message +** Pointer to message. +** +** ... +** Optional arguments. +** +** OUTPUT: +** +** Nothing. +*/ +void +gckOS_DebugFatal( + IN gctCONST_STRING Message, + ... + ) +{ + gcmkPRINT_VERSION(); + gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message); + + /* Break into the debugger. */ + gckOS_DebugBreak(); +} + +/******************************************************************************* +** +** gckOS_SetDebugLevel +** +** Set the debug level. +** +** INPUT: +** +** gctUINT32 Level +** New debug level. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_SetDebugLevel( + IN gctUINT32 Level + ) +{ + _debugLevel = Level; +} + +/******************************************************************************* +** +** gckOS_SetDebugZone +** +** Set the debug zone. +** +** INPUT: +** +** gctUINT32 Zone +** New debug zone. +** +** OUTPUT: +** +** Nothing. +*/ +void +gckOS_SetDebugZone( + IN gctUINT32 Zone + ) +{ + _debugZones = Zone; +} + +/******************************************************************************* +** +** gckOS_SetDebugLevelZone +** +** Set the debug level and zone. +** +** INPUT: +** +** gctUINT32 Level +** New debug level. +** +** gctUINT32 Zone +** New debug zone. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_SetDebugLevelZone( + IN gctUINT32 Level, + IN gctUINT32 Zone + ) +{ + _debugLevel = Level; + _debugZones = Zone; +} + +/******************************************************************************* +** +** gckOS_SetDebugZones +** +** Enable or disable debug zones. +** +** INPUT: +** +** gctUINT32 Zones +** Debug zones to enable or disable. +** +** gctBOOL Enable +** Set to gcvTRUE to enable the zones (or the Zones with the current +** zones) or gcvFALSE to disable the specified Zones. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_SetDebugZones( + IN gctUINT32 Zones, + IN gctBOOL Enable + ) +{ + if (Enable) + { + /* Enable the zones. */ + _debugZones |= Zones; + } + else + { + /* Disable the zones. */ + _debugZones &= ~Zones; + } +} + +/******************************************************************************* +** +** gckOS_Verify +** +** Called to verify the result of a function call. +** +** INPUT: +** +** gceSTATUS Status +** Function call result. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_Verify( + IN gceSTATUS status + ) +{ + _lastError = status; +} + +/******************************************************************************* +** +** gckOS_DebugFlush +** +** Force messages to be flushed out. +** +** INPUT: +** +** gctCONST_STRING CallerName +** Name of the caller function. +** +** gctUINT LineNumber +** Line number of the caller. +** +** gctUINT32 DmaAddress +** The current DMA address or ~0U to ignore. +** +** OUTPUT: +** +** Nothing. +*/ + +void +gckOS_DebugFlush( + gctCONST_STRING CallerName, + gctUINT LineNumber, + gctUINT32 DmaAddress + ) +{ +#if gcdBUFFERED_OUTPUT + _DirectPrint("\nFlush requested by %s(%d).\n\n", CallerName, LineNumber); + _Flush(DmaAddress); +#endif +} +gctCONST_STRING +gckOS_DebugStatus2Name( + gceSTATUS status + ) +{ + switch (status) + { + case gcvSTATUS_OK: + return "gcvSTATUS_OK"; + case gcvSTATUS_TRUE: + return "gcvSTATUS_TRUE"; + case gcvSTATUS_NO_MORE_DATA: + return "gcvSTATUS_NO_MORE_DATA"; + case gcvSTATUS_CACHED: + return "gcvSTATUS_CACHED"; + case gcvSTATUS_MIPMAP_TOO_LARGE: + return "gcvSTATUS_MIPMAP_TOO_LARGE"; + case gcvSTATUS_NAME_NOT_FOUND: + return "gcvSTATUS_NAME_NOT_FOUND"; + case gcvSTATUS_NOT_OUR_INTERRUPT: + return "gcvSTATUS_NOT_OUR_INTERRUPT"; + case gcvSTATUS_MISMATCH: + return "gcvSTATUS_MISMATCH"; + case gcvSTATUS_MIPMAP_TOO_SMALL: + return "gcvSTATUS_MIPMAP_TOO_SMALL"; + case gcvSTATUS_LARGER: + return "gcvSTATUS_LARGER"; + case gcvSTATUS_SMALLER: + return "gcvSTATUS_SMALLER"; + case gcvSTATUS_CHIP_NOT_READY: + return "gcvSTATUS_CHIP_NOT_READY"; + case gcvSTATUS_NEED_CONVERSION: + return "gcvSTATUS_NEED_CONVERSION"; + case gcvSTATUS_SKIP: + return "gcvSTATUS_SKIP"; + case gcvSTATUS_DATA_TOO_LARGE: + return "gcvSTATUS_DATA_TOO_LARGE"; + case gcvSTATUS_INVALID_CONFIG: + return "gcvSTATUS_INVALID_CONFIG"; + case gcvSTATUS_CHANGED: + return "gcvSTATUS_CHANGED"; + case gcvSTATUS_NOT_SUPPORT_DITHER: + return "gcvSTATUS_NOT_SUPPORT_DITHER"; + + case gcvSTATUS_INVALID_ARGUMENT: + return "gcvSTATUS_INVALID_ARGUMENT"; + case gcvSTATUS_INVALID_OBJECT: + return "gcvSTATUS_INVALID_OBJECT"; + case gcvSTATUS_OUT_OF_MEMORY: + return "gcvSTATUS_OUT_OF_MEMORY"; + case gcvSTATUS_MEMORY_LOCKED: + return "gcvSTATUS_MEMORY_LOCKED"; + case gcvSTATUS_MEMORY_UNLOCKED: + return "gcvSTATUS_MEMORY_UNLOCKED"; + case gcvSTATUS_HEAP_CORRUPTED: + return "gcvSTATUS_HEAP_CORRUPTED"; + case gcvSTATUS_GENERIC_IO: + return "gcvSTATUS_GENERIC_IO"; + case gcvSTATUS_INVALID_ADDRESS: + return "gcvSTATUS_INVALID_ADDRESS"; + case gcvSTATUS_CONTEXT_LOSSED: + return "gcvSTATUS_CONTEXT_LOSSED"; + case gcvSTATUS_TOO_COMPLEX: + return "gcvSTATUS_TOO_COMPLEX"; + case gcvSTATUS_BUFFER_TOO_SMALL: + return "gcvSTATUS_BUFFER_TOO_SMALL"; + case gcvSTATUS_INTERFACE_ERROR: + return "gcvSTATUS_INTERFACE_ERROR"; + case gcvSTATUS_NOT_SUPPORTED: + return "gcvSTATUS_NOT_SUPPORTED"; + case gcvSTATUS_MORE_DATA: + return "gcvSTATUS_MORE_DATA"; + case gcvSTATUS_TIMEOUT: + return "gcvSTATUS_TIMEOUT"; + case gcvSTATUS_OUT_OF_RESOURCES: + return "gcvSTATUS_OUT_OF_RESOURCES"; + case gcvSTATUS_INVALID_DATA: + return "gcvSTATUS_INVALID_DATA"; + case gcvSTATUS_INVALID_MIPMAP: + return "gcvSTATUS_INVALID_MIPMAP"; + case gcvSTATUS_NOT_FOUND: + return "gcvSTATUS_NOT_FOUND"; + case gcvSTATUS_NOT_ALIGNED: + return "gcvSTATUS_NOT_ALIGNED"; + case gcvSTATUS_INVALID_REQUEST: + return "gcvSTATUS_INVALID_REQUEST"; + case gcvSTATUS_GPU_NOT_RESPONDING: + return "gcvSTATUS_GPU_NOT_RESPONDING"; + case gcvSTATUS_TIMER_OVERFLOW: + return "gcvSTATUS_TIMER_OVERFLOW"; + case gcvSTATUS_VERSION_MISMATCH: + return "gcvSTATUS_VERSION_MISMATCH"; + case gcvSTATUS_LOCKED: + return "gcvSTATUS_LOCKED"; + case gcvSTATUS_INTERRUPTED: + return "gcvSTATUS_INTERRUPTED"; + case gcvSTATUS_DEVICE: + return "gcvSTATUS_DEVICE"; + case gcvSTATUS_NOT_MULTI_PIPE_ALIGNED: + return "gcvSTATUS_NOT_MULTI_PIPE_ALIGNED"; + + /* Linker errors. */ + case gcvSTATUS_GLOBAL_TYPE_MISMATCH: + return "gcvSTATUS_GLOBAL_TYPE_MISMATCH"; + case gcvSTATUS_TOO_MANY_ATTRIBUTES: + return "gcvSTATUS_TOO_MANY_ATTRIBUTES"; + case gcvSTATUS_TOO_MANY_UNIFORMS: + return "gcvSTATUS_TOO_MANY_UNIFORMS"; + case gcvSTATUS_TOO_MANY_VARYINGS: + return "gcvSTATUS_TOO_MANY_VARYINGS"; + case gcvSTATUS_UNDECLARED_VARYING: + return "gcvSTATUS_UNDECLARED_VARYING"; + case gcvSTATUS_VARYING_TYPE_MISMATCH: + return "gcvSTATUS_VARYING_TYPE_MISMATCH"; + case gcvSTATUS_MISSING_MAIN: + return "gcvSTATUS_MISSING_MAIN"; + case gcvSTATUS_NAME_MISMATCH: + return "gcvSTATUS_NAME_MISMATCH"; + case gcvSTATUS_INVALID_INDEX: + return "gcvSTATUS_INVALID_INDEX"; + case gcvSTATUS_UNIFORM_MISMATCH: + return "gcvSTATUS_UNIFORM_MISMATCH"; + case gcvSTATUS_UNSAT_LIB_SYMBOL: + return "gcvSTATUS_UNSAT_LIB_SYMBOL"; + case gcvSTATUS_TOO_MANY_SHADERS: + return "gcvSTATUS_TOO_MANY_SHADERS"; + case gcvSTATUS_LINK_INVALID_SHADERS: + return "gcvSTATUS_LINK_INVALID_SHADERS"; + case gcvSTATUS_CS_NO_WORKGROUP_SIZE: + return "gcvSTATUS_CS_NO_WORKGROUP_SIZE"; + case gcvSTATUS_LINK_LIB_ERROR: + return "gcvSTATUS_LINK_LIB_ERROR"; + case gcvSTATUS_SHADER_VERSION_MISMATCH: + return "gcvSTATUS_SHADER_VERSION_MISMATCH"; + case gcvSTATUS_TOO_MANY_INSTRUCTION: + return "gcvSTATUS_TOO_MANY_INSTRUCTION"; + case gcvSTATUS_SSBO_MISMATCH: + return "gcvSTATUS_SSBO_MISMATCH"; + case gcvSTATUS_TOO_MANY_OUTPUT: + return "gcvSTATUS_TOO_MANY_OUTPUT"; + case gcvSTATUS_TOO_MANY_INPUT: + return "gcvSTATUS_TOO_MANY_INPUT"; + case gcvSTATUS_NOT_SUPPORT_CL: + return "gcvSTATUS_NOT_SUPPORT_CL"; + case gcvSTATUS_NOT_SUPPORT_INTEGER: + return "gcvSTATUS_NOT_SUPPORT_INTEGER"; + case gcvSTATUS_UNIFORM_TYPE_MISMATCH: + return "gcvSTATUS_UNIFORM_TYPE_MISMATCH"; + case gcvSTATUS_MISSING_PRIMITIVE_TYPE: + return "gcvSTATUS_MISSING_PRIMITIVE_TYPE"; + case gcvSTATUS_MISSING_OUTPUT_VERTEX_COUNT: + return "gcvSTATUS_MISSING_OUTPUT_VERTEX_COUNT"; + case gcvSTATUS_NON_INVOCATION_ID_AS_INDEX: + return "gcvSTATUS_NON_INVOCATION_ID_AS_INDEX"; + case gcvSTATUS_INPUT_ARRAY_SIZE_MISMATCH: + return "gcvSTATUS_INPUT_ARRAY_SIZE_MISMATCH"; + case gcvSTATUS_OUTPUT_ARRAY_SIZE_MISMATCH: + return "gcvSTATUS_OUTPUT_ARRAY_SIZE_MISMATCH"; + + /* Compiler errors. */ + case gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR: + return "gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR"; + case gcvSTATUS_COMPILER_FE_PARSER_ERROR: + return "gcvSTATUS_COMPILER_FE_PARSER_ERROR"; + default: + return "nil"; + } +} + +/******************************************************************************* +***** Binary Trace ************************************************************* +*******************************************************************************/ + +/******************************************************************************* +** _VerifyMessage +** +** Verify a binary trace message, decode it to human readable string and print +** it. +** +** ARGUMENTS: +** +** gctCONST_STRING Buffer +** Pointer to buffer to store. +** +** gctSIZE_T Bytes +** Buffer length. +*/ +void +_VerifyMessage( + IN gctCONST_STRING Buffer, + IN gctSIZE_T Bytes + ) +{ + char arguments[150] = {0}; + char format[100] = {0}; + + gctSTRING function; + gctPOINTER args; + gctUINT32 numArguments; + int i = 0; + gctUINT32 functionBytes; + + gcsBINARY_TRACE_MESSAGE_PTR message = (gcsBINARY_TRACE_MESSAGE_PTR)Buffer; + + /* Check signature. */ + if (message->signature != 0x7FFFFFFF) + { + gcmkPRINT("Signature error"); + return; + } + + /* Get function name. */ + function = (gctSTRING)&message->payload; + functionBytes = (gctUINT32)strlen(function) + 1; + + /* Get arguments number. */ + numArguments = message->numArguments; + + /* Get arguments . */ + args = function + functionBytes; + + /* Prepare format string. */ + while (numArguments--) + { + format[i++] = '%'; + format[i++] = 'x'; + format[i++] = ' '; + } + + format[i] = '\0'; + + if (numArguments) + { + gcmkVSPRINTF(arguments, 150, format, (gctARGUMENTS *) &args); + } + + gcmkPRINT("[%d](%d): %s(%d) %s", + message->pid, + message->tid, + function, + message->line, + arguments); +} + + +/******************************************************************************* +** gckOS_WriteToRingBuffer +** +** Store a buffer to ring buffer. +** +** ARGUMENTS: +** +** gctCONST_STRING Buffer +** Pointer to buffer to store. +** +** gctSIZE_T Bytes +** Buffer length. +*/ +void +gckOS_WriteToRingBuffer( + IN gctCONST_STRING Buffer, + IN gctSIZE_T Bytes + ) +{ + +} + +/******************************************************************************* +** gckOS_BinaryTrace +** +** Output a binary trace message. +** +** ARGUMENTS: +** +** gctCONST_STRING Function +** Pointer to function name. +** +** gctINT Line +** Line number. +** +** gctCONST_STRING Text OPTIONAL +** Optional pointer to a descriptive text. +** +** ... +** Optional arguments to the descriptive text. +*/ +void +gckOS_BinaryTrace( + IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text OPTIONAL, + ... + ) +{ + static gctUINT32 messageSignature = 0x7FFFFFFF; + char buffer[gcdBINARY_TRACE_MESSAGE_SIZE]; + gctUINT32 numArguments = 0; + gctUINT32 functionBytes; + gctUINT32 i = 0; + gctSTRING payload; + gcsBINARY_TRACE_MESSAGE_PTR message = (gcsBINARY_TRACE_MESSAGE_PTR)buffer; + + /* Calculate arguments number. */ + if (Text) + { + while (Text[i] != '\0') + { + if (Text[i] == '%') + { + numArguments++; + } + i++; + } + } + + message->signature = messageSignature; + message->pid = gcmkGETPROCESSID(); + message->tid = gcmkGETTHREADID(); + message->line = Line; + message->numArguments = numArguments; + + payload = (gctSTRING)&message->payload; + + /* Function name. */ + functionBytes = (gctUINT32)gcmkSTRLEN(Function) + 1; + gcmkMEMCPY(payload, Function, functionBytes); + + /* Advance to next payload. */ + payload += functionBytes; + + /* Arguments value. */ + if (numArguments) + { + gctARGUMENTS p; + gcmkARGUMENTS_START(p, Text); + + for (i = 0; i < numArguments; ++i) + { + gctPOINTER value = gcmkARGUMENTS_ARG(p, gctPOINTER); + gcmkMEMCPY(payload, &value, gcmSIZEOF(gctPOINTER)); + payload += gcmSIZEOF(gctPOINTER); + } + + gcmkARGUMENTS_END(p); + } + + gcmkASSERT(payload - buffer <= gcdBINARY_TRACE_MESSAGE_SIZE); + + + /* Send buffer to ring buffer. */ + gckOS_WriteToRingBuffer(buffer, (gctUINT32)(payload - buffer)); +} + diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_event.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_event.c new file mode 100644 index 000000000000..67400d0a41a9 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_event.c @@ -0,0 +1,3017 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" +#include "gc_hal_kernel_buffer.h" + +#ifdef __QNXNTO__ +#include "gc_hal_kernel_qnx.h" +#endif + +#define _GC_OBJ_ZONE gcvZONE_EVENT + +#define gcdEVENT_ALLOCATION_COUNT (4096 / gcmSIZEOF(gcsHAL_INTERFACE)) +#define gcdEVENT_MIN_THRESHOLD 4 + +/******************************************************************************\ +********************************* Support Code ********************************* +\******************************************************************************/ + +static gcmINLINE gceSTATUS +gckEVENT_AllocateQueue( + IN gckEVENT Event, + OUT gcsEVENT_QUEUE_PTR * Queue + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Event=0x%x", Event); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Queue != gcvNULL); + + /* Do we have free queues? */ + if (Event->freeList == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + /* Move one free queue from the free list. */ + * Queue = Event->freeList; + Event->freeList = Event->freeList->next; + + /* Success. */ + gcmkFOOTER_ARG("*Queue=0x%x", gcmOPT_POINTER(Queue)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +gckEVENT_FreeQueue( + IN gckEVENT Event, + OUT gcsEVENT_QUEUE_PTR Queue + ) +{ + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Event=0x%x", Event); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Queue != gcvNULL); + + /* Move one free queue from the free list. */ + Queue->next = Event->freeList; + Event->freeList = Queue; + + /* Success. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +gckEVENT_FreeRecord( + IN gckEVENT Event, + IN gcsEVENT_PTR Record + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Record != gcvNULL); + + /* Acquire the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, + Event->freeEventMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Push the record on the free list. */ + Record->next = Event->freeEventList; + Event->freeEventList = Record; + Event->freeEventCount += 1; + + /* Release the mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return gcvSTATUS_OK; +} + +static gceSTATUS +gckEVENT_IsEmpty( + IN gckEVENT Event, + OUT gctBOOL_PTR IsEmpty + ) +{ + gceSTATUS status; + gctSIZE_T i; + + gcmkHEADER_ARG("Event=0x%x", Event); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(IsEmpty != gcvNULL); + + /* Assume the event queue is empty. */ + *IsEmpty = gcvTRUE; + + /* Walk the event queue. */ + for (i = 0; i < gcmCOUNTOF(Event->queues); ++i) + { + /* Check whether this event is in use. */ + if (Event->queues[i].head != gcvNULL) + { + /* The event is in use, hence the queue is not empty. */ + *IsEmpty = gcvFALSE; + break; + } + } + + /* Try acquiring the mutex. */ + status = gckOS_AcquireMutex(Event->os, Event->eventQueueMutex, 0); + if (status == gcvSTATUS_TIMEOUT) + { + /* Timeout - queue is no longer empty. */ + *IsEmpty = gcvFALSE; + } + else + { + /* Bail out on error. */ + gcmkONERROR(status); + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + } + + /* Success. */ + gcmkFOOTER_ARG("*IsEmpty=%d", gcmOPT_VALUE(IsEmpty)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_TryToIdleGPU( + IN gckEVENT Event +) +{ + gceSTATUS status; + gctBOOL empty = gcvFALSE, idle = gcvFALSE; + gctBOOL powerLocked = gcvFALSE; + gckHARDWARE hardware; + + gcmkHEADER_ARG("Event=0x%x", Event); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + + /* Grab gckHARDWARE object. */ + hardware = Event->kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + /* Check whether the event queue is empty. */ + gcmkONERROR(gckEVENT_IsEmpty(Event, &empty)); + + if (empty) + { + status = gckOS_AcquireMutex(hardware->os, hardware->powerMutex, 0); + if (status == gcvSTATUS_TIMEOUT) + { + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + powerLocked = gcvTRUE; + + /* Query whether the hardware is idle. */ + gcmkONERROR(gckHARDWARE_QueryIdle(Event->kernel->hardware, &idle)); + + gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex)); + powerLocked = gcvFALSE; + + if (idle) + { + /* Inform the system of idle GPU. */ + gcmkONERROR(gckOS_Broadcast(Event->os, + Event->kernel->hardware, + gcvBROADCAST_GPU_IDLE)); + } + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (powerLocked) + { + gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex)); + } + + gcmkFOOTER(); + return status; +} + +static gceSTATUS +__RemoveRecordFromProcessDB( + IN gckEVENT Event, + IN gcsEVENT_PTR Record + ) +{ + gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record); + gcmkVERIFY_ARGUMENT(Record != gcvNULL); + + switch (Record->info.command) + { + case gcvHAL_FREE_NON_PAGED_MEMORY: + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Event->kernel, + Record->processID, + gcvDB_NON_PAGED, + gcmUINT64_TO_PTR(Record->info.u.FreeNonPagedMemory.logical))); + break; + + case gcvHAL_FREE_CONTIGUOUS_MEMORY: + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Event->kernel, + Record->processID, + gcvDB_CONTIGUOUS, + gcmUINT64_TO_PTR(Record->info.u.FreeContiguousMemory.logical))); + break; + + case gcvHAL_UNLOCK_VIDEO_MEMORY: + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Event->kernel, + Record->processID, + gcvDB_VIDEO_MEMORY_LOCKED, + gcmUINT64_TO_PTR(Record->info.u.UnlockVideoMemory.node))); + break; + + case gcvHAL_UNMAP_USER_MEMORY: + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Event->kernel, + Record->processID, + gcvDB_MAP_USER_MEMORY, + gcmINT2PTR(Record->info.u.UnmapUserMemory.info))); + break; + + case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER: + gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB( + Event->kernel, + Record->processID, + gcvDB_COMMAND_BUFFER, + gcmUINT64_TO_PTR(Record->info.u.FreeVirtualCommandBuffer.logical))); + break; + + default: + break; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +static gceSTATUS +_ReleaseVideoMemoryHandle( + IN gckKERNEL Kernel, + IN OUT gcsEVENT_PTR Record, + IN OUT gcsHAL_INTERFACE * Interface + ) +{ + gceSTATUS status; + gckVIDMEM_NODE nodeObject; + gctUINT32 handle; + + switch(Interface->command) + { + case gcvHAL_UNLOCK_VIDEO_MEMORY: + handle = (gctUINT32)Interface->u.UnlockVideoMemory.node; + + gcmkONERROR(gckVIDMEM_HANDLE_Lookup( + Kernel, Record->processID, handle, &nodeObject)); + + Record->info.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(nodeObject); + + gckVIDMEM_HANDLE_Dereference(Kernel, Record->processID, handle); + break; + + default: + break; + } + + return gcvSTATUS_OK; +OnError: + return status; +} + +/******************************************************************************* +** +** _QueryFlush +** +** Check the type of surfaces which will be released by current event and +** determine the cache needed to flush. +** +*/ +static gceSTATUS +_QueryFlush( + IN gckEVENT Event, + IN gcsEVENT_PTR Record, + OUT gceKERNEL_FLUSH *Flush + ) +{ + gceKERNEL_FLUSH flush = 0; + gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record); + gcmkVERIFY_ARGUMENT(Record != gcvNULL); + + while (Record != gcvNULL) + { + switch (Record->info.command) + { + case gcvHAL_UNLOCK_VIDEO_MEMORY: + switch(Record->info.u.UnlockVideoMemory.type) + { + case gcvSURF_TILE_STATUS: + flush |= gcvFLUSH_TILE_STATUS; + break; + case gcvSURF_RENDER_TARGET: + flush |= gcvFLUSH_COLOR; + break; + case gcvSURF_DEPTH: + flush |= gcvFLUSH_DEPTH; + break; + case gcvSURF_TEXTURE: + flush |= gcvFLUSH_TEXTURE; + break; + case gcvSURF_ICACHE: + flush |= gcvFLUSH_ICACHE; + break; + case gcvSURF_TXDESC: + flush |= gcvFLUSH_TXDESC; + break; + case gcvSURF_FENCE: + flush |= gcvFLUSH_FENCE; + break; + case gcvSURF_VERTEX: + flush |= gcvFLUSH_VERTEX; + break; + case gcvSURF_TFBHEADER: + flush |= gcvFLUSH_TFBHEADER; + break; + case gcvSURF_TYPE_UNKNOWN: + *Flush = gcvFLUSH_ALL; + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + default: + break; + } + break; + case gcvHAL_UNMAP_USER_MEMORY: + *Flush = gcvFLUSH_ALL; + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + + default: + break; + } + + Record = Record->next; + } + + *Flush = flush; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +void +_SubmitTimerFunction( + gctPOINTER Data + ) +{ + gckEVENT event = (gckEVENT)Data; + gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE)); +} + +/******************************************************************************\ +******************************* gckEVENT API Code ******************************* +\******************************************************************************/ + +/******************************************************************************* +** +** gckEVENT_Construct +** +** Construct a new gckEVENT object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** OUTPUT: +** +** gckEVENT * Event +** Pointer to a variable that receives the gckEVENT object pointer. +*/ +gceSTATUS +gckEVENT_Construct( + IN gckKERNEL Kernel, + OUT gckEVENT * Event + ) +{ + gckOS os; + gceSTATUS status; + gckEVENT eventObj = gcvNULL; + int i; + gcsEVENT_PTR record; + gctPOINTER pointer = gcvNULL; + + gcmkHEADER_ARG("Kernel=0x%x", Kernel); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Event != gcvNULL); + + /* Extract the pointer to the gckOS object. */ + os = Kernel->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + /* Allocate the gckEVENT object. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckEVENT), &pointer)); + + eventObj = pointer; + + /* Reset the object. */ + gcmkVERIFY_OK(gckOS_ZeroMemory(eventObj, gcmSIZEOF(struct _gckEVENT))); + + /* Initialize the gckEVENT object. */ + eventObj->object.type = gcvOBJ_EVENT; + eventObj->kernel = Kernel; + eventObj->os = os; + + /* Create the mutexes. */ + gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventQueueMutex)); + gcmkONERROR(gckOS_CreateMutex(os, &eventObj->freeEventMutex)); + gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventListMutex)); + + /* Create a bunch of event reccords. */ + for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1) + { + /* Allocate an event record. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsEVENT), &pointer)); + + record = pointer; + + /* Push it on the free list. */ + record->next = eventObj->freeEventList; + eventObj->freeEventList = record; + eventObj->freeEventCount += 1; + } + + /* Initialize the free list of event queues. */ + for (i = 0; i < gcdREPO_LIST_COUNT; i += 1) + { + eventObj->repoList[i].next = eventObj->freeList; + eventObj->freeList = &eventObj->repoList[i]; + } + + eventObj->freeQueueCount = gcmCOUNTOF(eventObj->queues); + + gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending)); + + gcmkVERIFY_OK(gckOS_CreateTimer(os, + _SubmitTimerFunction, + (gctPOINTER)eventObj, + &eventObj->submitTimer)); + +#if gcdINTERRUPT_STATISTIC + gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->interruptCount)); + gcmkONERROR(gckOS_AtomSet(os,eventObj->interruptCount, 0)); +#endif + + eventObj->notifyState = -1; + + /* Return pointer to the gckEVENT object. */ + *Event = eventObj; + + /* Success. */ + gcmkFOOTER_ARG("*Event=0x%x", *Event); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (eventObj != gcvNULL) + { + if (eventObj->eventQueueMutex != gcvNULL) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventQueueMutex)); + } + + if (eventObj->freeEventMutex != gcvNULL) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->freeEventMutex)); + } + + if (eventObj->eventListMutex != gcvNULL) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventListMutex)); + } + + while (eventObj->freeEventList != gcvNULL) + { + record = eventObj->freeEventList; + eventObj->freeEventList = record->next; + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, record)); + } + + if (eventObj->pending != gcvNULL) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending)); + } + +#if gcdINTERRUPT_STATISTIC + if (eventObj->interruptCount) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->interruptCount)); + } +#endif + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, eventObj)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_Destroy +** +** Destroy an gckEVENT object. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Destroy( + IN gckEVENT Event + ) +{ + gcsEVENT_PTR record; + gcsEVENT_QUEUE_PTR queue; + + gcmkHEADER_ARG("Event=0x%x", Event); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + + if (Event->submitTimer != gcvNULL) + { + gcmkVERIFY_OK(gckOS_StopTimer(Event->os, Event->submitTimer)); + gcmkVERIFY_OK(gckOS_DestroyTimer(Event->os, Event->submitTimer)); + } + + /* Delete the queue mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventQueueMutex)); + + /* Free all free events. */ + while (Event->freeEventList != gcvNULL) + { + record = Event->freeEventList; + Event->freeEventList = record->next; + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record)); + } + + /* Delete the free mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->freeEventMutex)); + + /* Free all pending queues. */ + while (Event->queueHead != gcvNULL) + { + /* Get the current queue. */ + queue = Event->queueHead; + + /* Free all pending events. */ + while (queue->head != gcvNULL) + { + record = queue->head; + queue->head = record->next; + + gcmkTRACE_ZONE_N( + gcvLEVEL_WARNING, gcvZONE_EVENT, + gcmSIZEOF(record) + gcmSIZEOF(queue->source), + "Event record 0x%x is still pending for %d.", + record, queue->source + ); + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record)); + } + + /* Remove the top queue from the list. */ + if (Event->queueHead == Event->queueTail) + { + Event->queueHead = + Event->queueTail = gcvNULL; + } + else + { + Event->queueHead = Event->queueHead->next; + } + + /* Free the queue. */ + gcmkVERIFY_OK(gckEVENT_FreeQueue(Event, queue)); + } + + /* Delete the list mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventListMutex)); + + gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending)); + +#if gcdINTERRUPT_STATISTIC + gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->interruptCount)); +#endif + + /* Mark the gckEVENT object as unknown. */ + Event->object.type = gcvOBJ_UNKNOWN; + + /* Free the gckEVENT object. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, Event)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckEVENT_GetEvent +** +** Reserve the next available hardware event. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctBOOL Wait +** Set to gcvTRUE to force the function to wait if no events are +** immediately available. +** +** gceKERNEL_WHERE Source +** Source of the event. +** +** OUTPUT: +** +** gctUINT8 * EventID +** Reserved event ID. +*/ +#define gcdINVALID_EVENT_PTR ((gcsEVENT_PTR)gcvMAXUINTPTR_T) + +gceSTATUS +gckEVENT_GetEvent( + IN gckEVENT Event, + IN gctBOOL Wait, + OUT gctUINT8 * EventID, + IN gceKERNEL_WHERE Source + ) +{ + gctINT i, id; + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gctINT32 free; + + gcmkHEADER_ARG("Event=0x%x Source=%d", Event, Source); + + while (gcvTRUE) + { + /* Grab the queue mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, + Event->eventQueueMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Walk through all events. */ + id = Event->lastID; + for (i = 0; i < gcmCOUNTOF(Event->queues); ++i) + { + gctINT nextID = id + 1; + + if (nextID == gcmCOUNTOF(Event->queues)) + { + nextID = 0; + } + + if (Event->queues[id].head == gcvNULL) + { + *EventID = (gctUINT8) id; + + Event->lastID = (gctUINT8) nextID; + + /* Save time stamp of event. */ + Event->queues[id].head = gcdINVALID_EVENT_PTR; + Event->queues[id].stamp = ++(Event->stamp); + Event->queues[id].source = Source; + + /* Decrease the number of free events. */ + free = --Event->freeQueueCount; + + /* Make compiler happy. */ + free = free; + +#if gcdDYNAMIC_SPEED + if (free <= gcdDYNAMIC_EVENT_THRESHOLD) + { + gcmkONERROR(gckOS_BroadcastHurry( + Event->os, + Event->kernel->hardware, + gcdDYNAMIC_EVENT_THRESHOLD - free)); + } +#endif + + /* Release the queue mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, + Event->eventQueueMutex)); + + /* Success. */ + gcmkTRACE_ZONE_N( + gcvLEVEL_INFO, gcvZONE_EVENT, + gcmSIZEOF(id), + "Using id=%d", + id + ); + + gcmkFOOTER_ARG("*EventID=%u", *EventID); + return gcvSTATUS_OK; + } + + id = nextID; + } + +#if gcdDYNAMIC_SPEED + /* No free events, speed up the GPU right now! */ + gcmkONERROR(gckOS_BroadcastHurry(Event->os, + Event->kernel->hardware, + gcdDYNAMIC_EVENT_THRESHOLD)); +#endif + + /* Release the queue mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + acquired = gcvFALSE; + + /* Fail if wait is not requested. */ + if (!Wait) + { + /* Out of resources. */ + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + /* Delay a while. */ + gcmkONERROR(gckOS_Delay(Event->os, 1)); + } + +OnError: + if (acquired) + { + /* Release the queue mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_AllocateRecord +** +** Allocate a record for the new event. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctBOOL AllocateAllowed +** State for allocation if out of free events. +** +** OUTPUT: +** +** gcsEVENT_PTR * Record +** Allocated event record. +*/ +static gcmINLINE gceSTATUS +gckEVENT_AllocateRecord( + IN gckEVENT Event, + IN gctBOOL AllocateAllowed, + OUT gcsEVENT_PTR * Record + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gctINT i; + gcsEVENT_PTR record; + gctPOINTER pointer = gcvNULL; + + gcmkHEADER_ARG("Event=0x%x AllocateAllowed=%d", Event, AllocateAllowed); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Record != gcvNULL); + + /* Acquire the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->freeEventMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Test if we are below the allocation threshold. */ + if ( (AllocateAllowed && (Event->freeEventCount < gcdEVENT_MIN_THRESHOLD)) || + (Event->freeEventCount == 0) ) + { + /* Allocate a bunch of records. */ + for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1) + { + /* Allocate an event record. */ + gcmkONERROR(gckOS_Allocate(Event->os, + gcmSIZEOF(gcsEVENT), + &pointer)); + + record = pointer; + + /* Push it on the free list. */ + record->next = Event->freeEventList; + Event->freeEventList = record; + Event->freeEventCount += 1; + } + } + + *Record = Event->freeEventList; + Event->freeEventList = Event->freeEventList->next; + Event->freeEventCount -= 1; + + /* Release the mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex)); + + /* Success. */ + gcmkFOOTER_ARG("*Record=0x%x", gcmOPT_POINTER(Record)); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_AddList +** +** Add a new event to the list of events. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gcsHAL_INTERFACE_PTR Interface +** Pointer to the interface for the event to be added. +** +** gceKERNEL_WHERE FromWhere +** Place in the pipe where the event needs to be generated. +** +** gctBOOL AllocateAllowed +** State for allocation if out of free events. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_AddList( + IN gckEVENT Event, + IN gcsHAL_INTERFACE_PTR Interface, + IN gceKERNEL_WHERE FromWhere, + IN gctBOOL AllocateAllowed, + IN gctBOOL FromKernel + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gcsEVENT_PTR record = gcvNULL; + gcsEVENT_QUEUE_PTR queue; + gckVIRTUAL_COMMAND_BUFFER_PTR buffer; + gckKERNEL kernel = Event->kernel; + + gcmkHEADER_ARG("Event=0x%x Interface=0x%x", + Event, Interface); + + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, _GC_OBJ_ZONE, + "FromWhere=%d AllocateAllowed=%d", + FromWhere, AllocateAllowed); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Interface != gcvNULL); + + /* Verify the event command. */ + gcmkASSERT + ( (Interface->command == gcvHAL_FREE_NON_PAGED_MEMORY) + || (Interface->command == gcvHAL_FREE_CONTIGUOUS_MEMORY) + || (Interface->command == gcvHAL_WRITE_DATA) + || (Interface->command == gcvHAL_UNLOCK_VIDEO_MEMORY) + || (Interface->command == gcvHAL_SIGNAL) + || (Interface->command == gcvHAL_UNMAP_USER_MEMORY) + || (Interface->command == gcvHAL_TIMESTAMP) + || (Interface->command == gcvHAL_COMMIT_DONE) + || (Interface->command == gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER) + || (Interface->command == gcvHAL_DESTROY_MMU) + ); + + /* Validate the source. */ + if ((FromWhere != gcvKERNEL_COMMAND) && (FromWhere != gcvKERNEL_PIXEL)) + { + /* Invalid argument. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* Allocate a free record. */ + gcmkONERROR(gckEVENT_AllocateRecord(Event, AllocateAllowed, &record)); + + /* Termninate the record. */ + record->next = gcvNULL; + + /* Record the committer. */ + record->fromKernel = FromKernel; + + /* Copy the event interface into the record. */ + gckOS_MemCopy(&record->info, Interface, gcmSIZEOF(record->info)); + + /* Get process ID. */ + gcmkONERROR(gckOS_GetProcessID(&record->processID)); + + if (FromKernel == gcvFALSE) + { + gcmkONERROR(__RemoveRecordFromProcessDB(Event, record)); + + /* Handle is belonged to current process, it must be released now. */ + status = _ReleaseVideoMemoryHandle(Event->kernel, record, Interface); + + if (gcmIS_ERROR(status)) + { + /* Ingore error because there are other events in the queue. */ + status = gcvSTATUS_OK; + goto OnError; + } + } + +#ifdef __QNXNTO__ + record->kernel = Event->kernel; +#endif + + /* Unmap user space logical address. + * Linux kernel does not support unmap the memory of other process any more since 3.5. + * Let's unmap memory of self process before submit the event to gpu. + * */ + switch(Interface->command) + { + case gcvHAL_FREE_NON_PAGED_MEMORY: + gcmkONERROR(gckOS_UnmapUserLogical( + Event->os, + gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical), + (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes, + gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical))); + break; + case gcvHAL_FREE_CONTIGUOUS_MEMORY: + gcmkONERROR(gckOS_UnmapUserLogical( + Event->os, + gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical), + (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes, + gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical))); + break; + + case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER: + buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)gcmNAME_TO_PTR(Interface->u.FreeVirtualCommandBuffer.physical); + if (buffer != gcvNULL && buffer->virtualBuffer.userLogical) + { + gcmkONERROR(gckOS_DestroyUserVirtualMapping( + Event->os, + buffer->virtualBuffer.physical, + (gctSIZE_T) Interface->u.FreeVirtualCommandBuffer.bytes, + gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical))); + } + break; + + default: + break; + } + + /* Acquire the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->eventListMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Do we need to allocate a new queue? */ + if ((Event->queueTail == gcvNULL) || (Event->queueTail->source < FromWhere)) + { + /* Allocate a new queue. */ + gcmkONERROR(gckEVENT_AllocateQueue(Event, &queue)); + + /* Initialize the queue. */ + queue->source = FromWhere; + queue->head = gcvNULL; + queue->next = gcvNULL; + + /* Attach it to the list of allocated queues. */ + if (Event->queueTail == gcvNULL) + { + Event->queueHead = + Event->queueTail = queue; + } + else + { + Event->queueTail->next = queue; + Event->queueTail = queue; + } + } + else + { + queue = Event->queueTail; + } + + /* Attach the record to the queue. */ + if (queue->head == gcvNULL) + { + queue->head = record; + queue->tail = record; + } + else + { + queue->tail->next = record; + queue->tail = record; + } + + /* Release the mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex)); + } + + if (record != gcvNULL) + { + gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_Unlock +** +** Schedule an event to unlock virtual memory. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gceKERNEL_WHERE FromWhere +** Place in the pipe where the event needs to be generated. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to a gcuVIDMEM_NODE union that specifies the virtual memory +** to unlock. +** +** gceSURF_TYPE Type +** Type of surface to unlock. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Unlock( + IN gckEVENT Event, + IN gceKERNEL_WHERE FromWhere, + IN gctPOINTER Node, + IN gceSURF_TYPE Type + ) +{ + gceSTATUS status; + gcsHAL_INTERFACE iface; + + gcmkHEADER_ARG("Event=0x%x FromWhere=%d Node=0x%x Type=%d", + Event, FromWhere, Node, Type); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Node != gcvNULL); + + /* Mark the event as an unlock. */ + iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY; + iface.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(Node); + iface.u.UnlockVideoMemory.type = Type; + iface.u.UnlockVideoMemory.asynchroneous = 0; + + /* Append it to the queue. */ + gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_FreeNonPagedMemory +** +** Schedule an event to free non-paged memory. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctSIZE_T Bytes +** Number of bytes of non-paged memory to free. +** +** gctPHYS_ADDR Physical +** Physical address of non-paged memory to free. +** +** gctPOINTER Logical +** Logical address of non-paged memory to free. +** +** gceKERNEL_WHERE FromWhere +** Place in the pipe where the event needs to be generated. +*/ +gceSTATUS +gckEVENT_FreeNonPagedMemory( + IN gckEVENT Event, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gceKERNEL_WHERE FromWhere + ) +{ + gceSTATUS status; + gcsHAL_INTERFACE iface; + gckKERNEL kernel = Event->kernel; + + gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x " + "FromWhere=%d", + Event, Bytes, Physical, Logical, FromWhere); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + /* Create an event. */ + iface.command = gcvHAL_FREE_NON_PAGED_MEMORY; + iface.u.FreeNonPagedMemory.bytes = Bytes; + iface.u.FreeNonPagedMemory.physical = gcmPTR_TO_NAME(Physical); + iface.u.FreeNonPagedMemory.logical = gcmPTR_TO_UINT64(Logical); + + /* Append it to the queue. */ + gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckEVENT_DestroyVirtualCommandBuffer( + IN gckEVENT Event, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gceKERNEL_WHERE FromWhere + ) +{ + gceSTATUS status; + gcsHAL_INTERFACE iface; + gckKERNEL kernel = Event->kernel; + + gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x " + "FromWhere=%d", + Event, Bytes, Physical, Logical, FromWhere); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + /* Create an event. */ + iface.command = gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER; + iface.u.FreeVirtualCommandBuffer.bytes = Bytes; + iface.u.FreeVirtualCommandBuffer.physical = gcmPTR_TO_NAME(Physical); + iface.u.FreeVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(Logical); + + /* Append it to the queue. */ + gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_FreeContigiuousMemory +** +** Schedule an event to free contiguous memory. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctSIZE_T Bytes +** Number of bytes of contiguous memory to free. +** +** gctPHYS_ADDR Physical +** Physical address of contiguous memory to free. +** +** gctPOINTER Logical +** Logical address of contiguous memory to free. +** +** gceKERNEL_WHERE FromWhere +** Place in the pipe where the event needs to be generated. +*/ +gceSTATUS +gckEVENT_FreeContiguousMemory( + IN gckEVENT Event, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gceKERNEL_WHERE FromWhere + ) +{ + gceSTATUS status; + gcsHAL_INTERFACE iface; + gckKERNEL kernel = Event->kernel; + + gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x " + "FromWhere=%d", + Event, Bytes, Physical, Logical, FromWhere); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + /* Create an event. */ + iface.command = gcvHAL_FREE_CONTIGUOUS_MEMORY; + iface.u.FreeContiguousMemory.bytes = Bytes; + iface.u.FreeContiguousMemory.physical = gcmPTR_TO_NAME(Physical); + iface.u.FreeContiguousMemory.logical = gcmPTR_TO_UINT64(Logical); + + /* Append it to the queue. */ + gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_Signal +** +** Schedule an event to trigger a signal. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctSIGNAL Signal +** Pointer to the signal to trigger. +** +** gceKERNEL_WHERE FromWhere +** Place in the pipe where the event needs to be generated. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Signal( + IN gckEVENT Event, + IN gctSIGNAL Signal, + IN gceKERNEL_WHERE FromWhere + ) +{ + gceSTATUS status; + gcsHAL_INTERFACE iface; + + gcmkHEADER_ARG("Event=0x%x Signal=0x%x FromWhere=%d", + Event, Signal, FromWhere); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + gcmkVERIFY_ARGUMENT(Signal != gcvNULL); + + /* Mark the event as a signal. */ + iface.command = gcvHAL_SIGNAL; + iface.u.Signal.signal = gcmPTR_TO_UINT64(Signal); + iface.u.Signal.auxSignal = 0; + iface.u.Signal.process = 0; + +#ifdef __QNXNTO__ + iface.u.Signal.coid = 0; + iface.u.Signal.rcvid = 0; + + gcmkONERROR(gckOS_SignalPending(Event->os, Signal)); +#endif + + /* Append it to the queue. */ + gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if gcdPROCESS_ADDRESS_SPACE +gceSTATUS +gckEVENT_DestroyMmu( + IN gckEVENT Event, + IN gckMMU Mmu, + IN gceKERNEL_WHERE FromWhere + ) +{ + gceSTATUS status; + gcsHAL_INTERFACE iface; + + gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + + iface.command = gcvHAL_DESTROY_MMU; + iface.u.DestroyMmu.mmu = gcmPTR_TO_UINT64(Mmu); + + /* Append it to the queue. */ + gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} +#endif + +gceSTATUS +gckEVENT_SubmitAsync( + IN gckEVENT Event, + IN gctBOOL Wait, + IN gctBOOL FromPower + ) +{ + gceSTATUS status; + gctUINT8 id = 0xFF; + gcsEVENT_QUEUE_PTR queue; + gctBOOL acquired = gcvFALSE; + gctBOOL commitEntered = gcvFALSE; + gctUINT32 start, end; + gctUINT8_PTR startLogical; + gctUINT32 eventBytes; + + gckHARDWARE hardware; + gckASYNC_COMMAND asyncCommand; + + gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait); + + /* Get gckCOMMAND object. */ + hardware = Event->kernel->hardware; + asyncCommand = Event->asyncCommand; + + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + /* Are there event queues? */ + if (Event->queueHead != gcvNULL) + { + /* Acquire the command queue. */ + gcmkONERROR(gckASYNC_COMMAND_EnterCommit(asyncCommand)); + commitEntered = gcvTRUE; + + /* Process all queues. */ + while (Event->queueHead != gcvNULL) + { + /* Acquire the list mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, + Event->eventListMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Get the current queue. */ + queue = Event->queueHead; + + /* Allocate an event ID. */ + gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source)); + + /* Copy event list to event ID queue. */ + Event->queues[id].head = queue->head; + + /* Remove the top queue from the list. */ + if (Event->queueHead == Event->queueTail) + { + Event->queueHead = gcvNULL; + Event->queueTail = gcvNULL; + } + else + { + Event->queueHead = Event->queueHead->next; + } + + /* Free the queue. */ + gcmkONERROR(gckEVENT_FreeQueue(Event, queue)); + + /* Release the list mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex)); + acquired = gcvFALSE; + + gcmkONERROR(gckHARDWARE_Event(Event->kernel->hardware, gcvNULL, id, gcvKERNEL_BLT, &eventBytes)); + + /* Get command sequence. */ + start = hardware->functions[gcvHARDWARE_FUNCTION_BLT_EVENT].address + id * eventBytes; + end = start + 24; + + startLogical = hardware->functions[gcvHARDWARE_FUNCTION_BLT_EVENT].logical + id * eventBytes; + + gcmkDUMPCOMMAND( + Event->os, + startLogical, + end - start, + gcvDUMP_BUFFER_KERNEL, + gcvFALSE + ); + + gcmkONERROR(gckASYNC_COMMAND_Execute(asyncCommand, start, end)); + } + + /* Release the command queue. */ + gcmkONERROR(gckASYNC_COMMAND_ExitCommit(asyncCommand)); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Need to unroll the mutex acquire. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex)); + } + + if (commitEntered) + { + /* Release the command queue mutex. */ + gcmkVERIFY_OK(gckASYNC_COMMAND_ExitCommit(asyncCommand)); + } + + if (id != 0xFF) + { + /* Need to unroll the event allocation. */ + Event->queues[id].head = gcvNULL; + } + + if (status == gcvSTATUS_GPU_NOT_RESPONDING) + { + /* Broadcast GPU stuck. */ + status = gckOS_Broadcast(Event->os, + Event->kernel->hardware, + gcvBROADCAST_GPU_STUCK); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_Submit +** +** Submit the current event queue to the GPU. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctBOOL Wait +** Submit requires one vacant event; if Wait is set to not zero, +** and there are no vacant events at this time, the function will +** wait until an event becomes vacant so that submission of the +** queue is successful. +** +** gctBOOL FromPower +** Determines whether the call originates from inside the power +** management or not. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Submit( + IN gckEVENT Event, + IN gctBOOL Wait, + IN gctBOOL FromPower + ) +{ + gceSTATUS status; + gctUINT8 id = 0xFF; + gcsEVENT_QUEUE_PTR queue; + gctBOOL acquired = gcvFALSE; + gckCOMMAND command = gcvNULL; + gctBOOL commitEntered = gcvFALSE; +#if !gcdNULL_DRIVER + gctUINT32 bytes; + gctPOINTER buffer; + gctUINT32 executeBytes; + gctUINT32 flushBytes; +#endif + +#if gcdINTERRUPT_STATISTIC + gctINT32 oldValue; +#endif + +#if gcdSECURITY + gctPOINTER reservedBuffer; +#endif + + gckHARDWARE hardware; + + gceKERNEL_FLUSH flush = gcvFALSE; + gctUINT64 commitStamp; + + gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait); + + /* Get gckCOMMAND object. */ + command = Event->kernel->command; + hardware = Event->kernel->hardware; + + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + if (Event->asyncCommand) + { + /* Call async submit path. */ + gcmkONERROR(gckEVENT_SubmitAsync(Event, Wait, FromPower)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + gckOS_GetTicks(&Event->lastCommitStamp); + + /* Are there event queues? */ + if (Event->queueHead != gcvNULL) + { + /* Acquire the command queue. */ + gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower)); + commitEntered = gcvTRUE; + + /* Get current commit stamp. */ + commitStamp = Event->kernel->command->commitStamp; + + if (commitStamp) + { + commitStamp -= 1; + } + + /* Process all queues. */ + while (Event->queueHead != gcvNULL) + { + /* Acquire the list mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, + Event->eventListMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Get the current queue. */ + queue = Event->queueHead; + + /* Allocate an event ID. */ + gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source)); + + /* Copy event list to event ID queue. */ + Event->queues[id].head = queue->head; + + /* Update current commit stamp. */ + Event->queues[id].commitStamp = commitStamp; + + /* Remove the top queue from the list. */ + if (Event->queueHead == Event->queueTail) + { + Event->queueHead = gcvNULL; + Event->queueTail = gcvNULL; + } + else + { + Event->queueHead = Event->queueHead->next; + } + + /* Free the queue. */ + gcmkONERROR(gckEVENT_FreeQueue(Event, queue)); + + /* Release the list mutex. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex)); + acquired = gcvFALSE; + + /* Determine cache needed to flush. */ + gcmkVERIFY_OK(_QueryFlush(Event, Event->queues[id].head, &flush)); + +#if gcdNULL_DRIVER +#if gcdINTERRUPT_STATISTIC + gcmkVERIFY_OK(gckOS_AtomIncrement( + Event->os, + Event->interruptCount, + &oldValue + )); +#endif + + /* Notify immediately on infinite hardware. */ + gcmkONERROR(gckEVENT_Interrupt(Event, 1 << id)); + + gcmkONERROR(gckEVENT_Notify(Event, 0)); +#else + /* Get the size of the hardware event. */ + gcmkONERROR(gckHARDWARE_Event( + hardware, + gcvNULL, + id, + Event->queues[id].source, + &bytes + )); + + /* Get the size of flush command. */ + gcmkONERROR(gckHARDWARE_Flush( + hardware, + flush, + gcvNULL, + &flushBytes + )); + + bytes += flushBytes; + + /* Total bytes need to execute. */ + executeBytes = bytes; + + /* Reserve space in the command queue. */ + gcmkONERROR(gckCOMMAND_Reserve(command, bytes, &buffer, &bytes)); +#if gcdSECURITY + reservedBuffer = buffer; +#endif + + /* Set the flush in the command queue. */ + gcmkONERROR(gckHARDWARE_Flush( + hardware, + flush, + buffer, + &flushBytes + )); + + /* Advance to next command. */ + buffer = (gctUINT8_PTR)buffer + flushBytes; + + /* Set the hardware event in the command queue. */ + gcmkONERROR(gckHARDWARE_Event( + hardware, + buffer, + id, + Event->queues[id].source, + &bytes + )); + + /* Advance to next command. */ + buffer = (gctUINT8_PTR)buffer + bytes; + +#if gcdINTERRUPT_STATISTIC + gcmkVERIFY_OK(gckOS_AtomIncrement( + Event->os, + Event->interruptCount, + &oldValue + )); +#endif + +#if gcdSECURITY + gckKERNEL_SecurityExecute( + Event->kernel, + reservedBuffer, + executeBytes + ); +#else + /* Execute the hardware event. */ + gcmkONERROR(gckCOMMAND_Execute(command, executeBytes)); +#endif +#endif + } + + /* Release the command queue. */ + gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower)); + +#if !gcdNULL_DRIVER + if (!FromPower) + { + gcmkVERIFY_OK(_TryToIdleGPU(Event)); + } +#endif + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Need to unroll the mutex acquire. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex)); + } + + if (commitEntered) + { + /* Release the command queue mutex. */ + gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, FromPower)); + } + + if (id != 0xFF) + { + /* Need to unroll the event allocation. */ + Event->queues[id].head = gcvNULL; + } + + if (status == gcvSTATUS_GPU_NOT_RESPONDING) + { + /* Broadcast GPU stuck. */ + status = gckOS_Broadcast(Event->os, + Event->kernel->hardware, + gcvBROADCAST_GPU_STUCK); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_Commit +** +** Commit an event queue from the user. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gcsQUEUE_PTR Queue +** User event queue. +** +** gctBOOL Forced +** Force fire a event. There won't be interrupt if there's no events + queued. Force a event by append a dummy one if this parameter is on. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Commit( + IN gckEVENT Event, + IN gcsQUEUE_PTR Queue, + IN gctBOOL Forced + ) +{ + gceSTATUS status; + gcsQUEUE_PTR record = gcvNULL, next; + gctUINT32 processID; + gctBOOL needCopy = gcvFALSE; + gctPOINTER pointer = gcvNULL; + + gcmkHEADER_ARG("Event=0x%x Queue=0x%x", Event, Queue); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + + /* Get the current process ID. */ + gcmkONERROR(gckOS_GetProcessID(&processID)); + + /* Query if we need to copy the client data. */ + gcmkONERROR(gckOS_QueryNeedCopy(Event->os, processID, &needCopy)); + + /* Loop while there are records in the queue. */ + while (Queue != gcvNULL) + { + gcsQUEUE queue; + + if (needCopy) + { + /* Point to stack record. */ + record = &queue; + + /* Copy the data from the client. */ + gcmkONERROR(gckOS_CopyFromUserData(Event->os, + record, + Queue, + gcmSIZEOF(gcsQUEUE))); + } + else + { + + /* Map record into kernel memory. */ + gcmkONERROR(gckOS_MapUserPointer(Event->os, + Queue, + gcmSIZEOF(gcsQUEUE), + &pointer)); + + record = pointer; + } + + /* Append event record to event queue. */ + gcmkONERROR( + gckEVENT_AddList(Event, &record->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE)); + + /* Next record in the queue. */ + next = gcmUINT64_TO_PTR(record->next); + + if (!needCopy) + { + /* Unmap record from kernel memory. */ + gcmkONERROR( + gckOS_UnmapUserPointer(Event->os, + Queue, + gcmSIZEOF(gcsQUEUE), + (gctPOINTER *) record)); + record = gcvNULL; + } + + Queue = next; + } + + if (Forced && Event->queueHead == gcvNULL) + { + gcsHAL_INTERFACE iface; + iface.command = gcvHAL_COMMIT_DONE; + + gcmkONERROR(gckEVENT_AddList(Event, &iface, gcvKERNEL_PIXEL, gcvFALSE, gcvTRUE)); + } + + /* Submit the event list. */ + gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE)); + + /* Success */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (pointer) + { + /* Roll back. */ + gcmkVERIFY_OK(gckOS_UnmapUserPointer(Event->os, + Queue, + gcmSIZEOF(gcsQUEUE), + (gctPOINTER*)pointer)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckEVENT_Interrupt +** +** Called by the interrupt service routine to store the triggered interrupt +** mask to be later processed by gckEVENT_Notify. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctUINT32 Data +** Mask for the 32 interrupts. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Interrupt( + IN gckEVENT Event, + IN gctUINT32 Data + ) +{ + /* Combine current interrupt status with pending flags. */ + gckOS_AtomSetMask(Event->pending, Data); + +#if gcdINTERRUPT_STATISTIC + { + gctINT j = 0; + gctINT32 oldValue; + + for (j = 0; j < gcmCOUNTOF(Event->queues); j++) + { + if ((Data & (1 << j))) + { + gckOS_AtomDecrement( + Event->os, + Event->interruptCount, + &oldValue + ); + } + } + } +#endif + + /* Success. */ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckEVENT_Notify +** +** Process all triggered interrupts. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Notify( + IN gckEVENT Event, + IN gctUINT32 IDs + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gctINT i; + gcsEVENT_QUEUE * queue; + gctUINT mask = 0; + gctBOOL acquired = gcvFALSE; + gctSIGNAL signal; + gctUINT pending = 0; + gckKERNEL kernel = Event->kernel; + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gctINT eventNumber = 0; +#endif +#if gcdSECURE_USER + gcskSECURE_CACHE_PTR cache; + gcuVIDMEM_NODE_PTR node; +#endif + gckVIDMEM_NODE nodeObject; + + gcmkHEADER_ARG("Event=0x%x IDs=0x%x", Event, IDs); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + + gcmDEBUG_ONLY( + if (IDs != 0) + { + for (i = 0; i < gcmCOUNTOF(Event->queues); ++i) + { + if (Event->queues[i].head != gcvNULL) + { + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "Queue(%d): stamp=%llu source=%d", + i, + Event->queues[i].stamp, + Event->queues[i].source); + } + } + } + ); + + /* Begin of event handling. */ + Event->notifyState = 0; + + for (;;) + { + gcsEVENT_PTR record; + + /* Grab the mutex queue. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, + Event->eventQueueMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + gckOS_AtomGet(Event->os, Event->pending, (gctINT32_PTR)&pending); + + if (pending == 0) + { + /* Release the mutex queue. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + acquired = gcvFALSE; + + /* No more pending interrupts - done. */ + break; + } + + if (pending & 0x80000000) + { + gcmkPRINT("AXI BUS ERROR"); + gckHARDWARE_DumpGPUState(Event->kernel->hardware); + pending &= 0x7FFFFFFF; + } + + if ((pending & 0x40000000) && Event->kernel->hardware->mmuVersion) + { +#if gcdUSE_MMU_EXCEPTION +#if gcdALLOC_ON_FAULT + status = gckHARDWARE_HandleFault(Event->kernel->hardware); +#endif + if (gcmIS_ERROR(status)) + { + /* Dump error is fault can't be handle. */ + gckHARDWARE_DumpMMUException(Event->kernel->hardware); + + gckHARDWARE_DumpGPUState(Event->kernel->hardware); + } +#endif + + pending &= 0xBFFFFFFF; + } + + gcmkTRACE_ZONE_N( + gcvLEVEL_INFO, gcvZONE_EVENT, + gcmSIZEOF(pending), + "Pending interrupts 0x%x", + pending + ); + + queue = gcvNULL; + + gcmDEBUG_ONLY( + if (IDs == 0) + { + for (i = 0; i < gcmCOUNTOF(Event->queues); ++i) + { + if (Event->queues[i].head != gcvNULL) + { + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "Queue(%d): stamp=%llu source=%d", + i, + Event->queues[i].stamp, + Event->queues[i].source); + } + } + } + ); + + /* Find the oldest pending interrupt. */ + for (i = 0; i < gcmCOUNTOF(Event->queues); ++i) + { + if ((Event->queues[i].head != gcvNULL) + && (pending & (1 << i)) + ) + { + if ((queue == gcvNULL) + || (Event->queues[i].stamp < queue->stamp) + ) + { + queue = &Event->queues[i]; + mask = 1 << i; +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + eventNumber = i; +#endif + } + } + } + + if (queue == gcvNULL) + { + gcmkTRACE_ZONE_N( + gcvLEVEL_ERROR, gcvZONE_EVENT, + gcmSIZEOF(pending), + "Interrupts 0x%x are not pending.", + pending + ); + + gckOS_AtomClearMask(Event->pending, pending); + + /* Release the mutex queue. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + acquired = gcvFALSE; + break; + } + + /* Check whether there is a missed interrupt. */ + for (i = 0; i < gcmCOUNTOF(Event->queues); ++i) + { + if ((Event->queues[i].head != gcvNULL) + && (Event->queues[i].stamp < queue->stamp) + && (Event->queues[i].source <= queue->source) + ) + { + gcmkTRACE_N( + gcvLEVEL_ERROR, + gcmSIZEOF(i) + gcmSIZEOF(Event->queues[i].stamp), + "Event %d lost (stamp %llu)", + i, Event->queues[i].stamp + ); + + /* Use this event instead. */ + queue = &Event->queues[i]; + mask = 0; + } + } + + if (mask != 0) + { +#if gcmIS_DEBUG(gcdDEBUG_TRACE) + gcmkTRACE_ZONE_N( + gcvLEVEL_INFO, gcvZONE_EVENT, + gcmSIZEOF(eventNumber), + "Processing interrupt %d", + eventNumber + ); +#endif + } + + gckOS_AtomClearMask(Event->pending, mask); + + if (!gckHARDWARE_IsFeatureAvailable(Event->kernel->hardware, gcvFEATURE_FENCE_64BIT)) + { + /* Write out commit stamp.*/ + *(gctUINT64 *)(Event->kernel->command->fence->logical) = queue->commitStamp; + } + + /* Signal clients waiting for fence. */ + gcmkVERIFY_OK(gckFENCE_Signal( + Event->os, + Event->kernel->command->fence + )); + + /* Grab the event head. */ + record = queue->head; + + /* Now quickly clear its event list. */ + queue->head = gcvNULL; + + /* Increase the number of free events. */ + Event->freeQueueCount++; + + /* Release the mutex queue. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + acquired = gcvFALSE; + + /* Walk all events for this interrupt. */ + while (record != gcvNULL) + { + gcsEVENT_PTR recordNext; +#ifndef __QNXNTO__ + gctPOINTER logical; +#endif +#if gcdSECURE_USER + gctSIZE_T bytes; +#endif + + /* Grab next record. */ + recordNext = record->next; + +#ifdef __QNXNTO__ + /* + * Assign record->processID as the pid for this galcore thread. + * Used in the OS calls which do not take a pid. + */ + drv_thread_specific_key_assign(record->processID, 0); +#endif + +#if gcdSECURE_USER + /* Get the cache that belongs to this process. */ + gcmkONERROR(gckKERNEL_GetProcessDBCache(Event->kernel, + record->processID, + &cache)); +#endif + + gcmkTRACE_ZONE_N( + gcvLEVEL_INFO, gcvZONE_EVENT, + gcmSIZEOF(record->info.command), + "Processing event type: %d", + record->info.command + ); + + switch (record->info.command) + { + case gcvHAL_FREE_NON_PAGED_MEMORY: + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "gcvHAL_FREE_NON_PAGED_MEMORY: 0x%x", + gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical)); + + /* Free non-paged memory. */ + status = gckOS_FreeNonPagedMemory( + Event->os, + (gctSIZE_T) record->info.u.FreeNonPagedMemory.bytes, + gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical), + gcmUINT64_TO_PTR(record->info.u.FreeNonPagedMemory.logical)); + + if (gcmIS_SUCCESS(status)) + { +#if gcdSECURE_USER + gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache( + Event->kernel, + cache, + gcmUINT64_TO_PTR(record->record.u.FreeNonPagedMemory.logical), + (gctSIZE_T) record->record.u.FreeNonPagedMemory.bytes)); +#endif + } + gcmRELEASE_NAME(record->info.u.FreeNonPagedMemory.physical); + break; + + case gcvHAL_FREE_CONTIGUOUS_MEMORY: + gcmkTRACE_ZONE( + gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "gcvHAL_FREE_CONTIGUOUS_MEMORY: 0x%x", + gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical)); + + /* Unmap the user memory. */ + status = gckOS_FreeContiguous( + Event->os, + gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical), + gcmUINT64_TO_PTR(record->info.u.FreeContiguousMemory.logical), + (gctSIZE_T) record->info.u.FreeContiguousMemory.bytes); + + if (gcmIS_SUCCESS(status)) + { +#if gcdSECURE_USER + gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache( + Event->kernel, + cache, + gcmUINT64_TO_PTR(event->event.u.FreeContiguousMemory.logical), + (gctSIZE_T) event->event.u.FreeContiguousMemory.bytes)); +#endif + } + gcmRELEASE_NAME(record->info.u.FreeContiguousMemory.physical); + break; + + case gcvHAL_WRITE_DATA: +#ifndef __QNXNTO__ + /* Convert physical into logical address. */ + gcmkERR_BREAK( + gckOS_MapPhysical(Event->os, + record->info.u.WriteData.address, + gcmSIZEOF(gctUINT32), + &logical)); + + /* Write data. */ + gcmkERR_BREAK( + gckOS_WriteMemory(Event->os, + logical, + record->info.u.WriteData.data)); + + /* Unmap the physical memory. */ + gcmkERR_BREAK( + gckOS_UnmapPhysical(Event->os, + logical, + gcmSIZEOF(gctUINT32))); +#else + /* Write data. */ + gcmkERR_BREAK( + gckOS_WriteMemory(Event->os, + gcmUINT64_TO_PTR(record->info.u.WriteData.address), + record->info.u.WriteData.data)); +#endif + break; + + case gcvHAL_UNLOCK_VIDEO_MEMORY: + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "gcvHAL_UNLOCK_VIDEO_MEMORY: 0x%x", + record->info.u.UnlockVideoMemory.node); + + nodeObject = gcmUINT64_TO_PTR(record->info.u.UnlockVideoMemory.node); + +#if gcdSECURE_USER + node = nodeObject->node; + + /* Save node information before it disappears. */ + node = event->event.u.UnlockVideoMemory.node; + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + logical = gcvNULL; + bytes = 0; + } + else + { + logical = node->Virtual.logical; + bytes = node->Virtual.bytes; + } +#endif + + /* Unlock. */ + status = gckVIDMEM_Unlock( + Event->kernel, + nodeObject, + record->info.u.UnlockVideoMemory.type, + gcvNULL); + +#if gcdSECURE_USER + if (gcmIS_SUCCESS(status) && (logical != gcvNULL)) + { + gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache( + Event->kernel, + cache, + logical, + bytes)); + } +#endif + +#if gcdPROCESS_ADDRESS_SPACE + gcmkVERIFY_OK(gckVIDMEM_NODE_Unlock( + Event->kernel, + nodeObject, + record->processID + )); +#endif + + status = gckVIDMEM_NODE_Dereference(Event->kernel, nodeObject); + break; + + case gcvHAL_SIGNAL: + signal = gcmUINT64_TO_PTR(record->info.u.Signal.signal); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "gcvHAL_SIGNAL: 0x%x", + signal); + +#ifdef __QNXNTO__ + if ((record->info.u.Signal.coid == 0) + && (record->info.u.Signal.rcvid == 0) + ) + { + /* Kernel signal. */ + gcmkERR_BREAK( + gckOS_SignalPulse(Event->os, + signal)); + } + else + { + /* User signal. */ + gcmkERR_BREAK( + gckOS_UserSignal(Event->os, + signal, + record->info.u.Signal.rcvid, + record->info.u.Signal.coid)); + } +#else + /* Set signal. */ + if (gcmUINT64_TO_PTR(record->info.u.Signal.process) == gcvNULL) + { + /* Kernel signal. */ + gcmkERR_BREAK( + gckOS_Signal(Event->os, + signal, + gcvTRUE)); + } + else + { + /* User signal. */ + gcmkERR_BREAK( + gckOS_UserSignal(Event->os, + signal, + gcmUINT64_TO_PTR(record->info.u.Signal.process))); + } + + gcmkASSERT(record->info.u.Signal.auxSignal == 0); +#endif + break; + + case gcvHAL_TIMESTAMP: + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "gcvHAL_TIMESTAMP: %d %d", + record->info.u.TimeStamp.timer, + record->info.u.TimeStamp.request); + + /* Process the timestamp. */ + switch (record->info.u.TimeStamp.request) + { + case 0: + status = gckOS_GetTime(&Event->kernel->timers[ + record->info.u.TimeStamp.timer]. + stopTime); + break; + + case 1: + status = gckOS_GetTime(&Event->kernel->timers[ + record->info.u.TimeStamp.timer]. + startTime); + break; + + default: + gcmkTRACE_ZONE_N( + gcvLEVEL_ERROR, gcvZONE_EVENT, + gcmSIZEOF(record->info.u.TimeStamp.request), + "Invalid timestamp request: %d", + record->info.u.TimeStamp.request + ); + + status = gcvSTATUS_INVALID_ARGUMENT; + break; + } + break; + + case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER: + gcmkVERIFY_OK( + gckKERNEL_DestroyVirtualCommandBuffer(Event->kernel, + (gctSIZE_T) record->info.u.FreeVirtualCommandBuffer.bytes, + gcmNAME_TO_PTR(record->info.u.FreeVirtualCommandBuffer.physical), + gcmUINT64_TO_PTR(record->info.u.FreeVirtualCommandBuffer.logical) + )); + gcmRELEASE_NAME(record->info.u.FreeVirtualCommandBuffer.physical); + break; + +#if gcdPROCESS_ADDRESS_SPACE + case gcvHAL_DESTROY_MMU: + status = gckMMU_Destroy(gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu)); + break; +#endif + + case gcvHAL_COMMIT_DONE: + break; + + default: + /* Invalid argument. */ + gcmkTRACE_ZONE_N( + gcvLEVEL_ERROR, gcvZONE_EVENT, + gcmSIZEOF(record->info.command), + "Unknown event type: %d", + record->info.command + ); + + status = gcvSTATUS_INVALID_ARGUMENT; + break; + } + + /* Make sure there are no errors generated. */ + if (gcmIS_ERROR(status)) + { + gcmkTRACE_ZONE_N( + gcvLEVEL_WARNING, gcvZONE_EVENT, + gcmSIZEOF(status), + "Event produced status: %d(%s)", + status, gckOS_DebugStatus2Name(status)); + } + + /* Free the event. */ + gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record)); + + /* Advance to next record. */ + record = recordNext; + } + + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT, + "Handled interrupt 0x%x", mask); + } + + if (IDs == 0) + { + gcmkONERROR(_TryToIdleGPU(Event)); + } + + /* End of event handling. */ + Event->notifyState = -1; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + } + + /* End of event handling. */ + Event->notifyState = -1; + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckEVENT_FreeProcess +** +** Free all events owned by a particular process ID. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctUINT32 ProcessID +** Process ID of the process to be freed up. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_FreeProcess( + IN gckEVENT Event, + IN gctUINT32 ProcessID + ) +{ + gctSIZE_T i; + gctBOOL acquired = gcvFALSE; + gcsEVENT_PTR record, next; + gceSTATUS status; + gcsEVENT_PTR deleteHead, deleteTail; + + gcmkHEADER_ARG("Event=0x%x ProcessID=%d", Event, ProcessID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + + /* Walk through all queues. */ + for (i = 0; i < gcmCOUNTOF(Event->queues); ++i) + { + if (Event->queues[i].head != gcvNULL) + { + /* Grab the event queue mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Event->os, + Event->eventQueueMutex, + gcvINFINITE)); + acquired = gcvTRUE; + + /* Grab the mutex head. */ + record = Event->queues[i].head; + Event->queues[i].head = gcvNULL; + Event->queues[i].tail = gcvNULL; + deleteHead = gcvNULL; + deleteTail = gcvNULL; + + while (record != gcvNULL) + { + next = record->next; + if (record->processID == ProcessID) + { + if (deleteHead == gcvNULL) + { + deleteHead = record; + } + else + { + deleteTail->next = record; + } + + deleteTail = record; + } + else + { + if (Event->queues[i].head == gcvNULL) + { + Event->queues[i].head = record; + } + else + { + Event->queues[i].tail->next = record; + } + + Event->queues[i].tail = record; + } + + record->next = gcvNULL; + record = next; + } + + /* Release the mutex queue. */ + gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + acquired = gcvFALSE; + + /* Loop through the entire list of events. */ + for (record = deleteHead; record != gcvNULL; record = next) + { + /* Get the next event record. */ + next = record->next; + + /* Free the event record. */ + gcmkONERROR(gckEVENT_FreeRecord(Event, record)); + } + } + } + + gcmkONERROR(_TryToIdleGPU(Event)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Release the event queue mutex. */ + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckEVENT_Stop +** +** Stop the hardware using the End event mechanism. +** +** INPUT: +** +** gckEVENT Event +** Pointer to an gckEVENT object. +** +** gctUINT32 ProcessID +** Process ID Logical belongs. +** +** gctPHYS_ADDR Handle +** Physical address handle. If gcvNULL it is video memory. +** +** gctSIZE_T Offset, +** Offset to this memory block. +** +** gctPOINTER Logical +** Logical address to flush. +** +** gctSIGNAL Signal +** Pointer to the signal to trigger. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckEVENT_Stop( + IN gckEVENT Event, + IN gctUINT32 ProcessID, + IN gctPHYS_ADDR Handle, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN gctSIGNAL Signal, + IN OUT gctUINT32 * waitSize + ) +{ + gceSTATUS status; + /* gctSIZE_T waitSize;*/ + gcsEVENT_PTR record = gcvNULL; + gctUINT8 id = 0xFF; + + gcmkHEADER_ARG("Event=0x%x ProcessID=%u Handle=0x%x Logical=0x%x " + "Address=0x%x Signal=0x%x", + Event, ProcessID, Handle, Logical, Address, Signal); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT); + + /* Submit the current event queue. */ + gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE)); + gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL)); + + /* Allocate a record. */ + gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &record)); + + /* Initialize the record. */ + record->next = gcvNULL; + record->processID = ProcessID; + record->info.command = gcvHAL_SIGNAL; + record->info.u.Signal.signal = gcmPTR_TO_UINT64(Signal); +#ifdef __QNXNTO__ + record->info.u.Signal.coid = 0; + record->info.u.Signal.rcvid = 0; +#endif + record->info.u.Signal.auxSignal = 0; + record->info.u.Signal.process = 0; + + /* Append the record. */ + Event->queues[id].head = record; + + /* Replace last WAIT with END. */ + gcmkONERROR(gckHARDWARE_End( + Event->kernel->hardware, Logical, Address, waitSize + )); + +#if USE_KERNEL_VIRTUAL_BUFFERS + if (Event->kernel->virtualCommandBuffer) + { + gcmkONERROR(gckKERNEL_GetGPUAddress( + Event->kernel, + Logical, + gcvFALSE, + Event->kernel->command->virtualMemory, + &Event->kernel->hardware->lastEnd + )); + } +#endif + + /* Flush the cache for the END. */ + gcmkONERROR(gckOS_CacheClean( + Event->os, + 0, + Handle, + Offset, + Logical, + *waitSize + )); + + /* Wait for the signal. */ + gcmkONERROR(gckOS_WaitSignal(Event->os, Signal, gcvFALSE, gcvINFINITE)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +static void +_PrintRecord( + gcsEVENT_PTR record + ) +{ + switch (record->info.command) + { + case gcvHAL_FREE_NON_PAGED_MEMORY: + gcmkPRINT(" gcvHAL_FREE_NON_PAGED_MEMORY"); + break; + + case gcvHAL_FREE_CONTIGUOUS_MEMORY: + gcmkPRINT(" gcvHAL_FREE_CONTIGUOUS_MEMORY"); + break; + + case gcvHAL_WRITE_DATA: + gcmkPRINT(" gcvHAL_WRITE_DATA"); + break; + + case gcvHAL_UNLOCK_VIDEO_MEMORY: + gcmkPRINT(" gcvHAL_UNLOCK_VIDEO_MEMORY"); + break; + + case gcvHAL_SIGNAL: + gcmkPRINT(" gcvHAL_SIGNAL process=%d signal=0x%x", + record->info.u.Signal.process, + record->info.u.Signal.signal); + break; + + case gcvHAL_UNMAP_USER_MEMORY: + gcmkPRINT(" gcvHAL_UNMAP_USER_MEMORY"); + break; + + case gcvHAL_TIMESTAMP: + gcmkPRINT(" gcvHAL_TIMESTAMP"); + break; + + case gcvHAL_COMMIT_DONE: + gcmkPRINT(" gcvHAL_COMMIT_DONE"); + break; + + case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER: + gcmkPRINT(" gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER logical=0x%08x", + record->info.u.FreeVirtualCommandBuffer.logical); + break; + + case gcvHAL_DESTROY_MMU: + gcmkPRINT(" gcvHAL_DESTORY_MMU mmu=0x%08x", + gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu)); + + break; + default: + gcmkPRINT(" Illegal Event %d", record->info.command); + break; + } +} + +/******************************************************************************* +** gckEVENT_Dump +** +** Dump record in event queue when stuck happens. +** No protection for the event queue. +**/ +gceSTATUS +gckEVENT_Dump( + IN gckEVENT Event + ) +{ + gcsEVENT_QUEUE_PTR queueHead = Event->queueHead; + gcsEVENT_QUEUE_PTR queue; + gcsEVENT_PTR record = gcvNULL; + gctINT i; +#if gcdINTERRUPT_STATISTIC + gctINT32 pendingInterrupt; + gctUINT32 intrAcknowledge; +#endif + gctINT32 pending; + + gcmkHEADER_ARG("Event=0x%x", Event); + + gcmkPRINT("**************************\n"); + gcmkPRINT("*** EVENT STATE DUMP ***\n"); + gcmkPRINT("**************************\n"); + + gcmkPRINT(" Unsumbitted Event:"); + while(queueHead) + { + queue = queueHead; + record = queueHead->head; + + gcmkPRINT(" [%x]:", queue); + while(record) + { + _PrintRecord(record); + record = record->next; + } + + if (queueHead == Event->queueTail) + { + queueHead = gcvNULL; + } + else + { + queueHead = queueHead->next; + } + } + + gcmkPRINT(" Untriggered Event:"); + for (i = 0; i < gcmCOUNTOF(Event->queues); i++) + { + queue = &Event->queues[i]; + record = queue->head; + + gcmkPRINT(" [%d]:", i); + while(record) + { + _PrintRecord(record); + record = record->next; + } + } + +#if gcdINTERRUPT_STATISTIC + gckOS_AtomGet(Event->os, Event->interruptCount, &pendingInterrupt); + gcmkPRINT(" Number of Pending Interrupt: %d", pendingInterrupt); + + if (Event->kernel->recovery == 0) + { + gckOS_ReadRegisterEx( + Event->os, + Event->kernel->core, + 0x10, + &intrAcknowledge + ); + + gcmkPRINT(" INTR_ACKNOWLEDGE=0x%x", intrAcknowledge); + } +#endif + + gcmkPRINT(" Notify State=%d", Event->notifyState); + + gckOS_AtomGet(Event->os, Event->pending, &pending); + + gcmkPRINT(" Pending=0x%x", pending); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_heap.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_heap.c new file mode 100644 index 000000000000..5c4835abb3e0 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_heap.c @@ -0,0 +1,892 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +/** +** @file +** gckHEAP object for kernel HAL layer. The heap implemented here is an arena- +** based memory allocation. An arena-based memory heap allocates data quickly +** from specified arenas and reduces memory fragmentation. +** +*/ +#include "gc_hal_kernel_precomp.h" + +#define _GC_OBJ_ZONE gcvZONE_HEAP + +/******************************************************************************* +***** Structures *************************************************************** +*******************************************************************************/ +#define gcdIN_USE ((gcskNODE_PTR)gcvMAXUINTPTR_T) + +typedef struct _gcskNODE * gcskNODE_PTR; +typedef struct _gcskNODE +{ + /* Number of byets in node. */ + gctSIZE_T bytes; + + /* Pointer to next free node, or gcvNULL to mark the node as freed, or + ** gcdIN_USE to mark the node as used. */ + gcskNODE_PTR next; + +#if gcmIS_DEBUG(gcdDEBUG_CODE) + /* Time stamp of allocation. */ + gctUINT64 timeStamp; +#endif +} +gcskNODE; + +typedef struct _gcskHEAP * gcskHEAP_PTR; +typedef struct _gcskHEAP +{ + /* Linked list. */ + gcskHEAP_PTR next; + gcskHEAP_PTR prev; + + /* Heap size. */ + gctSIZE_T size; + + /* Free list. */ + gcskNODE_PTR freeList; +} +gcskHEAP; + +struct _gckHEAP +{ + /* Object. */ + gcsOBJECT object; + + /* Pointer to a gckOS object. */ + gckOS os; + + /* Locking mutex. */ + gctPOINTER mutex; + + /* Allocation parameters. */ + gctSIZE_T allocationSize; + + /* Heap list. */ + gcskHEAP_PTR heap; +#if gcmIS_DEBUG(gcdDEBUG_CODE) + gctUINT64 timeStamp; +#endif + +#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) + /* Profile information. */ + gctUINT32 allocCount; + gctUINT64 allocBytes; + gctUINT64 allocBytesMax; + gctUINT64 allocBytesTotal; + gctUINT32 heapCount; + gctUINT32 heapCountMax; + gctUINT64 heapMemory; + gctUINT64 heapMemoryMax; +#endif +}; + +/******************************************************************************* +***** Static Support Functions ************************************************* +*******************************************************************************/ + +#if gcmIS_DEBUG(gcdDEBUG_CODE) +static gctSIZE_T +_DumpHeap( + IN gcskHEAP_PTR Heap + ) +{ + gctPOINTER p; + gctSIZE_T leaked = 0; + + /* Start at first node. */ + for (p = Heap + 1;;) + { + /* Convert the pointer. */ + gcskNODE_PTR node = (gcskNODE_PTR) p; + + /* Check if this is a used node. */ + if (node->next == gcdIN_USE) + { + /* Print the leaking node. */ + gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_HEAP, + "Detected leaking: node=0x%x bytes=%lu timeStamp=%llu " + "(%08X %c%c%c%c)", + node, node->bytes, node->timeStamp, + ((gctUINT32_PTR) (node + 1))[0], + gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[0]), + gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[1]), + gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[2]), + gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[3])); + + /* Add leaking byte count. */ + leaked += node->bytes; + } + + /* Test for end of heap. */ + if (node->bytes == 0) + { + break; + } + + else + { + /* Move to next node. */ + p = (gctUINT8_PTR) node + node->bytes; + } + } + + /* Return the number of leaked bytes. */ + return leaked; +} +#endif + +static gceSTATUS +_CompactKernelHeap( + IN gckHEAP Heap + ) +{ + gcskHEAP_PTR heap, next; + gctPOINTER p; + gcskHEAP_PTR freeList = gcvNULL; + + gcmkHEADER_ARG("Heap=0x%x", Heap); + + /* Walk all the heaps. */ + for (heap = Heap->heap; heap != gcvNULL; heap = next) + { + gcskNODE_PTR lastFree = gcvNULL; + + /* Zero out the free list. */ + heap->freeList = gcvNULL; + + /* Start at the first node. */ + for (p = (gctUINT8_PTR) (heap + 1);;) + { + /* Convert the pointer. */ + gcskNODE_PTR node = (gcskNODE_PTR) p; + + gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size)); + + /* Test if this node not used. */ + if (node->next != gcdIN_USE) + { + /* Test if this is the end of the heap. */ + if (node->bytes == 0) + { + break; + } + + /* Test of this is the first free node. */ + else if (lastFree == gcvNULL) + { + /* Initialzie the free list. */ + heap->freeList = node; + lastFree = node; + } + + else + { + /* Test if this free node is contiguous with the previous + ** free node. */ + if ((gctUINT8_PTR) lastFree + lastFree->bytes == p) + { + /* Just increase the size of the previous free node. */ + lastFree->bytes += node->bytes; + } + else + { + /* Add to linked list. */ + lastFree->next = node; + lastFree = node; + } + } + } + + /* Move to next node. */ + p = (gctUINT8_PTR) node + node->bytes; + } + + /* Mark the end of the chain. */ + if (lastFree != gcvNULL) + { + lastFree->next = gcvNULL; + } + + /* Get next heap. */ + next = heap->next; + + /* Check if the entire heap is free. */ + if ((heap->freeList != gcvNULL) + && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE)) + ) + { + /* Remove the heap from the linked list. */ + if (heap->prev == gcvNULL) + { + Heap->heap = next; + } + else + { + heap->prev->next = next; + } + + if (heap->next != gcvNULL) + { + heap->next->prev = heap->prev; + } + +#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) + /* Update profiling. */ + Heap->heapCount -= 1; + Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP); +#endif + + /* Add this heap to the list of heaps that need to be freed. */ + heap->next = freeList; + freeList = heap; + } + } + + if (freeList != gcvNULL) + { + /* Release the mutex, remove any chance for a dead lock. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Heap->os, Heap->mutex)); + + /* Free all heaps in the free list. */ + for (heap = freeList; heap != gcvNULL; heap = next) + { + /* Get pointer to the next heap. */ + next = heap->next; + + /* Free the heap. */ + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP, + "Freeing heap 0x%x (%lu bytes)", + heap, heap->size + gcmSIZEOF(gcskHEAP)); + gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap)); + } + + /* Acquire the mutex again. */ + gcmkVERIFY_OK( + gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +***** gckHEAP API Code ********************************************************* +*******************************************************************************/ + +/******************************************************************************* +** +** gckHEAP_Construct +** +** Construct a new gckHEAP object. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctSIZE_T AllocationSize +** Minimum size per arena. +** +** OUTPUT: +** +** gckHEAP * Heap +** Pointer to a variable that will hold the pointer to the gckHEAP +** object. +*/ +gceSTATUS +gckHEAP_Construct( + IN gckOS Os, + IN gctSIZE_T AllocationSize, + OUT gckHEAP * Heap + ) +{ + gceSTATUS status; + gckHEAP heap = gcvNULL; + gctPOINTER pointer = gcvNULL; + + gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Heap != gcvNULL); + + /* Allocate the gckHEAP object. */ + gcmkONERROR(gckOS_AllocateMemory(Os, + gcmSIZEOF(struct _gckHEAP), + &pointer)); + + heap = pointer; + + /* Initialize the gckHEAP object. */ + heap->object.type = gcvOBJ_HEAP; + heap->os = Os; + heap->allocationSize = AllocationSize; + heap->heap = gcvNULL; +#if gcmIS_DEBUG(gcdDEBUG_CODE) + heap->timeStamp = 0; +#endif + +#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) + /* Zero the counters. */ + heap->allocCount = 0; + heap->allocBytes = 0; + heap->allocBytesMax = 0; + heap->allocBytesTotal = 0; + heap->heapCount = 0; + heap->heapCountMax = 0; + heap->heapMemory = 0; + heap->heapMemoryMax = 0; +#endif + + /* Create the mutex. */ + gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex)); + + /* Return the pointer to the gckHEAP object. */ + *Heap = heap; + + /* Success. */ + gcmkFOOTER_ARG("*Heap=0x%x", *Heap); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (heap != gcvNULL) + { + /* Free the heap structure. */ + gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHEAP_Destroy +** +** Destroy a gckHEAP object. +** +** INPUT: +** +** gckHEAP Heap +** Pointer to a gckHEAP object to destroy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckHEAP_Destroy( + IN gckHEAP Heap + ) +{ + gcskHEAP_PTR heap; +#if gcmIS_DEBUG(gcdDEBUG_CODE) + gctSIZE_T leaked = 0; +#endif + + gcmkHEADER_ARG("Heap=0x%x", Heap); + + for (heap = Heap->heap; heap != gcvNULL; heap = Heap->heap) + { + /* Unlink heap from linked list. */ + Heap->heap = heap->next; + +#if gcmIS_DEBUG(gcdDEBUG_CODE) + /* Check for leaked memory. */ + leaked += _DumpHeap(heap); +#endif + + /* Free the heap. */ + gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap)); + } + + /* Free the mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Heap->os, Heap->mutex)); + + /* Free the heap structure. */ + gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, Heap)); + + /* Success. */ +#if gcmIS_DEBUG(gcdDEBUG_CODE) + gcmkFOOTER_ARG("leaked=%lu", leaked); +#else + gcmkFOOTER_NO(); +#endif + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckHEAP_Allocate +** +** Allocate data from the heap. +** +** INPUT: +** +** gckHEAP Heap +** Pointer to a gckHEAP object. +** +** IN gctSIZE_T Bytes +** Number of byte to allocate. +** +** OUTPUT: +** +** gctPOINTER * Memory +** Pointer to a variable that will hold the address of the allocated +** memory. +*/ +gceSTATUS +gckHEAP_Allocate( + IN gckHEAP Heap, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ) +{ + gctBOOL acquired = gcvFALSE; + gcskHEAP_PTR heap; + gceSTATUS status; + gctSIZE_T bytes; + gcskNODE_PTR node, used, prevFree = gcvNULL; + gctPOINTER memory = gcvNULL; + + gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + + /* Determine number of bytes required for a node. */ + bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8); + + /* Acquire the mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); + + acquired = gcvTRUE; + + /* Check if this allocation is bigger than the default allocation size. */ + if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE)) + { + /* Adjust allocation size. */ + Heap->allocationSize = bytes * 2; + } + + else if (Heap->heap != gcvNULL) + { + gctINT i; + + /* 2 retries, since we might need to compact. */ + for (i = 0; i < 2; ++i) + { + /* Walk all the heaps. */ + for (heap = Heap->heap; heap != gcvNULL; heap = heap->next) + { + /* Check if this heap has enough bytes to hold the request. */ + if (bytes <= heap->size - gcmSIZEOF(gcskNODE)) + { + prevFree = gcvNULL; + + /* Walk the chain of free nodes. */ + for (node = heap->freeList; + node != gcvNULL; + node = node->next + ) + { + gcmkASSERT(node->next != gcdIN_USE); + + /* Check if this free node has enough bytes. */ + if (node->bytes >= bytes) + { + /* Use the node. */ + goto UseNode; + } + + /* Save current free node for linked list management. */ + prevFree = node; + } + } + } + + if (i == 0) + { + /* Compact the heap. */ + gcmkVERIFY_OK(_CompactKernelHeap(Heap)); + +#if gcmIS_DEBUG(gcdDEBUG_CODE) + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "===== KERNEL HEAP ====="); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Number of allocations : %12u", + Heap->allocCount); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Number of bytes allocated : %12llu", + Heap->allocBytes); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Maximum allocation size : %12llu", + Heap->allocBytesMax); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Total number of bytes allocated : %12llu", + Heap->allocBytesTotal); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Number of heaps : %12u", + Heap->heapCount); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Heap memory in bytes : %12llu", + Heap->heapMemory); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Maximum number of heaps : %12u", + Heap->heapCountMax); + gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, + "Maximum heap memory in bytes : %12llu", + Heap->heapMemoryMax); +#endif + } + } + } + + /* Release the mutex. */ + gcmkONERROR( + gckOS_ReleaseMutex(Heap->os, Heap->mutex)); + + acquired = gcvFALSE; + + /* Allocate a new heap. */ + gcmkONERROR( + gckOS_AllocateMemory(Heap->os, + Heap->allocationSize, + &memory)); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP, + "Allocated heap 0x%x (%lu bytes)", + memory, Heap->allocationSize); + + /* Acquire the mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); + + acquired = gcvTRUE; + + /* Use the allocated memory as the heap. */ + heap = (gcskHEAP_PTR) memory; + + /* Insert this heap to the head of the chain. */ + heap->next = Heap->heap; + heap->prev = gcvNULL; + heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP); + + if (heap->next != gcvNULL) + { + heap->next->prev = heap; + } + Heap->heap = heap; + + /* Mark the end of the heap. */ + node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap + + Heap->allocationSize + - gcmSIZEOF(gcskNODE) + ); + node->bytes = 0; + node->next = gcvNULL; + + /* Create a free list. */ + node = (gcskNODE_PTR) (heap + 1); + heap->freeList = node; + + /* Initialize the free list. */ + node->bytes = heap->size - gcmSIZEOF(gcskNODE); + node->next = gcvNULL; + + /* No previous free. */ + prevFree = gcvNULL; + +#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) + /* Update profiling. */ + Heap->heapCount += 1; + Heap->heapMemory += Heap->allocationSize; + + if (Heap->heapCount > Heap->heapCountMax) + { + Heap->heapCountMax = Heap->heapCount; + } + if (Heap->heapMemory > Heap->heapMemoryMax) + { + Heap->heapMemoryMax = Heap->heapMemory; + } +#endif + +UseNode: + /* Verify some stuff. */ + gcmkASSERT(heap != gcvNULL); + gcmkASSERT(node != gcvNULL); + gcmkASSERT(node->bytes >= bytes); + + if (heap->prev != gcvNULL) + { + /* Unlink the heap from the linked list. */ + heap->prev->next = heap->next; + if (heap->next != gcvNULL) + { + heap->next->prev = heap->prev; + } + + /* Move the heap to the front of the list. */ + heap->next = Heap->heap; + heap->prev = gcvNULL; + Heap->heap = heap; + heap->next->prev = heap; + } + + /* Check if there is enough free space left after usage for another free + ** node. */ + if (node->bytes - bytes >= gcmSIZEOF(gcskNODE)) + { + /* Allocated used space from the back of the free list. */ + used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes); + + /* Adjust the number of free bytes. */ + node->bytes -= bytes; + gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE)); + } + else + { + /* Remove this free list from the chain. */ + if (prevFree == gcvNULL) + { + heap->freeList = node->next; + } + else + { + prevFree->next = node->next; + } + + /* Consume the entire free node. */ + used = (gcskNODE_PTR) node; + bytes = node->bytes; + } + + /* Mark node as used. */ + used->bytes = bytes; + used->next = gcdIN_USE; +#if gcmIS_DEBUG(gcdDEBUG_CODE) + used->timeStamp = ++Heap->timeStamp; +#endif + +#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) + /* Update profile counters. */ + Heap->allocCount += 1; + Heap->allocBytes += bytes; + Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax); + Heap->allocBytesTotal += bytes; +#endif + + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Heap->os, Heap->mutex)); + + /* Return pointer to memory. */ + *Memory = used + 1; + + /* Success. */ + gcmkFOOTER_ARG("*Memory=0x%x", *Memory); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Heap->os, Heap->mutex)); + } + + if (memory != gcvNULL) + { + /* Free the heap memory. */ + gckOS_FreeMemory(Heap->os, memory); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckHEAP_Free +** +** Free allocated memory from the heap. +** +** INPUT: +** +** gckHEAP Heap +** Pointer to a gckHEAP object. +** +** IN gctPOINTER Memory +** Pointer to memory to free. +** +** OUTPUT: +** +** NOTHING. +*/ +gceSTATUS +gckHEAP_Free( + IN gckHEAP Heap, + IN gctPOINTER Memory + ) +{ + gcskNODE_PTR node; + gceSTATUS status; + + gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + + /* Acquire the mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); + + /* Pointer to structure. */ + node = (gcskNODE_PTR) Memory - 1; + + /* Mark the node as freed. */ + node->next = gcvNULL; + +#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) + /* Update profile counters. */ + Heap->allocBytes -= node->bytes; +#endif + + /* Release the mutex. */ + gcmkVERIFY_OK( + gckOS_ReleaseMutex(Heap->os, Heap->mutex)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if VIVANTE_PROFILER +gceSTATUS +gckHEAP_ProfileStart( + IN gckHEAP Heap + ) +{ + gcmkHEADER_ARG("Heap=0x%x", Heap); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); + + /* Zero the counters. */ + Heap->allocCount = 0; + Heap->allocBytes = 0; + Heap->allocBytesMax = 0; + Heap->allocBytesTotal = 0; + Heap->heapCount = 0; + Heap->heapCountMax = 0; + Heap->heapMemory = 0; + Heap->heapMemoryMax = 0; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckHEAP_ProfileEnd( + IN gckHEAP Heap, + IN gctCONST_STRING Title + ) +{ + gcmkHEADER_ARG("Heap=0x%x Title=0x%x", Heap, Title); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); + gcmkVERIFY_ARGUMENT(Title != gcvNULL); + + gcmkPRINT(""); + gcmkPRINT("=====[ HEAP - %s ]=====", Title); + gcmkPRINT("Number of allocations : %12u", Heap->allocCount); + gcmkPRINT("Number of bytes allocated : %12llu", Heap->allocBytes); + gcmkPRINT("Maximum allocation size : %12llu", Heap->allocBytesMax); + gcmkPRINT("Total number of bytes allocated : %12llu", Heap->allocBytesTotal); + gcmkPRINT("Number of heaps : %12u", Heap->heapCount); + gcmkPRINT("Heap memory in bytes : %12llu", Heap->heapMemory); + gcmkPRINT("Maximum number of heaps : %12u", Heap->heapCountMax); + gcmkPRINT("Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax); + gcmkPRINT("=============================================="); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} +#endif /* VIVANTE_PROFILER */ + +/******************************************************************************* +***** Test Code **************************************************************** +*******************************************************************************/ + diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c new file mode 100644 index 000000000000..893fe0433b9a --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c @@ -0,0 +1,2989 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" + +#define _GC_OBJ_ZONE gcvZONE_MMU + +typedef enum _gceMMU_TYPE +{ + gcvMMU_USED = (0 << 4), + gcvMMU_SINGLE = (1 << 4), + gcvMMU_FREE = (2 << 4), +} +gceMMU_TYPE; + +#define gcmENTRY_TYPE(x) (x & 0xF0) + +#define gcmENTRY_COUNT(x) ((x & 0xFFFFFF00) >> 8) + +#define gcdMMU_TABLE_DUMP 0 + +#define gcdVERTEX_START (128 << 10) + +typedef struct _gcsMMU_STLB_CHUNK *gcsMMU_STLB_CHUNK_PTR; + +typedef struct _gcsMMU_STLB_CHUNK +{ + gctPHYS_ADDR physical; + gctUINT32_PTR logical; + gctSIZE_T size; + gctPHYS_ADDR_T physBase; + gctSIZE_T pageCount; + gctUINT32 mtlbIndex; + gctUINT32 mtlbEntryNum; + gcsMMU_STLB_CHUNK_PTR next; +} gcsMMU_STLB_CHUNK; + +#if gcdSHARED_PAGETABLE +typedef struct _gcsSharedPageTable * gcsSharedPageTable_PTR; +typedef struct _gcsSharedPageTable +{ + /* Shared gckMMU object. */ + gckMMU mmu; + + /* Hardwares which use this shared pagetable. */ + gckHARDWARE hardwares[gcdMAX_GPU_COUNT]; + + /* Number of cores use this shared pagetable. */ + gctUINT32 reference; +} +gcsSharedPageTable; + +static gcsSharedPageTable_PTR sharedPageTable = gcvNULL; +#endif + +typedef struct _gcsFreeSpaceNode * gcsFreeSpaceNode_PTR; +typedef struct _gcsFreeSpaceNode +{ + gctUINT32 start; + gctINT32 entries; +} +gcsFreeSpaceNode; + +#if gcdENDIAN_BIG + +# define _WritePageEntry(pageEntry, entryValue) \ + *(gctUINT32_PTR)(pageEntry) = gcmBSWAP32((gctUINT32)(entryValue)) + +# define _ReadPageEntry(pageEntry) \ + gcmBSWAP32(*(gctUINT32_PTR)(pageEntry)) + +#else + +# define _WritePageEntry(pageEntry, entryValue) \ + *(gctUINT32_PTR)(pageEntry) = (gctUINT32)(entryValue) + +# define _ReadPageEntry(pageEntry) \ + *(gctUINT32_PTR)(pageEntry) + +#endif + +static gceSTATUS +_FillPageTable( + IN gctUINT32_PTR PageTable, + IN gctUINT32 PageCount, + IN gctUINT32 EntryValue +) +{ + gctUINT i; + + for (i = 0; i < PageCount; i++) + { + _WritePageEntry(PageTable + i, EntryValue); + } + + return gcvSTATUS_OK; +} + +static gceSTATUS +_FillMap( + IN gctUINT32_PTR Map, + IN gctUINT32 PageCount, + IN gctUINT32 EntryValue +) +{ + gctUINT i; + + for (i = 0; i < PageCount; i++) + { + Map[i] = EntryValue; + } + + return gcvSTATUS_OK; +} + +static gceSTATUS +_Link( + IN gcsADDRESS_AREA_PTR Area, + IN gctUINT32 Index, + IN gctUINT32 Next + ) +{ + if (Index >= Area->pageTableEntries) + { + /* Just move heap pointer. */ + Area->heapList = Next; + } + else + { + /* Address page table. */ + gctUINT32_PTR map = Area->mapLogical; + + /* Dispatch on node type. */ + switch (gcmENTRY_TYPE(map[Index])) + { + case gcvMMU_SINGLE: + /* Set single index. */ + map[Index] = (Next << 8) | gcvMMU_SINGLE; + break; + + case gcvMMU_FREE: + /* Set index. */ + map[Index + 1] = Next; + break; + + default: + gcmkFATAL("MMU table correcupted at index %u!", Index); + return gcvSTATUS_HEAP_CORRUPTED; + } + } + + /* Success. */ + return gcvSTATUS_OK; +} + +static gceSTATUS +_AddFree( + IN gcsADDRESS_AREA_PTR Area, + IN gctUINT32 Index, + IN gctUINT32 Node, + IN gctUINT32 Count + ) +{ + gctUINT32_PTR map = Area->mapLogical; + + if (Count == 1) + { + /* Initialize a single page node. */ + map[Node] = (~((1U<<8)-1)) | gcvMMU_SINGLE; + } + else + { + /* Initialize the node. */ + map[Node + 0] = (Count << 8) | gcvMMU_FREE; + map[Node + 1] = ~0U; + } + + /* Append the node. */ + return _Link(Area, Index, Node); +} + +static gceSTATUS +_Collect( + IN gcsADDRESS_AREA_PTR Area + ) +{ + gctUINT32_PTR map = Area->mapLogical; + gceSTATUS status; + gctUINT32 i, previous, start = 0, count = 0; + + previous = Area->heapList = ~0U; + Area->freeNodes = gcvFALSE; + + /* Walk the entire page table. */ + for (i = 0; i < Area->pageTableEntries; ++i) + { + /* Dispatch based on type of page. */ + switch (gcmENTRY_TYPE(map[i])) + { + case gcvMMU_USED: + /* Used page, so close any open node. */ + if (count > 0) + { + /* Add the node. */ + gcmkONERROR(_AddFree(Area, previous, start, count)); + + /* Reset the node. */ + previous = start; + count = 0; + } + break; + + case gcvMMU_SINGLE: + /* Single free node. */ + if (count++ == 0) + { + /* Start a new node. */ + start = i; + } + break; + + case gcvMMU_FREE: + /* A free node. */ + if (count == 0) + { + /* Start a new node. */ + start = i; + } + + /* Advance the count. */ + count += map[i] >> 8; + + /* Advance the index into the page table. */ + i += (map[i] >> 8) - 1; + break; + + default: + gcmkFATAL("MMU page table correcupted at index %u!", i); + return gcvSTATUS_HEAP_CORRUPTED; + } + } + + /* See if we have an open node left. */ + if (count > 0) + { + /* Add the node to the list. */ + gcmkONERROR(_AddFree(Area, previous, start, count)); + } + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_MMU, + "Performed a garbage collection of the MMU heap."); + + /* Success. */ + return gcvSTATUS_OK; + +OnError: + /* Return the staus. */ + return status; +} + +static gctUINT32 +_SetPage(gctUINT32 PageAddress, gctUINT32 PageAddressExt, gctBOOL Writable) +{ + gctUINT32 entry = PageAddress + /* AddressExt */ + | (PageAddressExt << 4) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + + if (Writable) + { + /* writable */ + entry |= (1 << 2); + } +#if gcdUSE_MMU_EXCEPTION + else + { + /* If this page is read only, set exception bit to make exception happens + ** when writing to it. */ + entry |= gcdMMU_STLB_EXCEPTION; + } +#endif + + return entry; +} + +static gctUINT32 +_MtlbOffset( + gctUINT32 Address + ) +{ + return (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT; +} + +gctUINT32 +_AddressToIndex( + IN gcsADDRESS_AREA_PTR Area, + IN gctUINT32 Address + ) +{ + gctUINT32 mtlbOffset = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT; + gctUINT32 stlbOffset = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT; + + return (mtlbOffset - Area->dynamicMappingStart) * gcdMMU_STLB_4K_ENTRY_NUM + stlbOffset; +} + +static gctUINT32_PTR +_StlbEntry( + gcsADDRESS_AREA_PTR Area, + gctUINT32 Address + ) +{ + gctUINT32 index = _AddressToIndex(Area, Address); + + return &Area->pageTableLogical[index]; +} + +static gceSTATUS +_FillFlatMappingInMap( + gcsADDRESS_AREA_PTR Area, + gctUINT32 Index, + gctUINT32 NumPages + ) +{ + gceSTATUS status; + gctUINT32 i; + gctBOOL gotIt = gcvFALSE; + gctUINT32 index = Index; + gctUINT32_PTR map = Area->mapLogical; + gctUINT32 previous = ~0U; + + /* Find node which contains index. */ + for (i = 0; !gotIt && (i < Area->pageTableEntries);) + { + gctUINT32 numPages; + + switch (gcmENTRY_TYPE(map[i])) + { + case gcvMMU_SINGLE: + if (i == index) + { + gotIt = gcvTRUE; + } + else + { + previous = i; + i = map[i] >> 8; + } + break; + + case gcvMMU_FREE: + numPages = map[i] >> 8; + if (index >= i && index + NumPages - 1 < i + numPages) + { + gotIt = gcvTRUE; + } + else + { + previous = i; + i = map[i + 1]; + } + break; + + case gcvMMU_USED: + i++; + break; + + default: + gcmkFATAL("MMU table correcupted at index %u!", index); + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + } + + switch (gcmENTRY_TYPE(map[i])) + { + case gcvMMU_SINGLE: + /* Unlink single node from free list. */ + gcmkONERROR( + _Link(Area, previous, map[i] >> 8)); + break; + + case gcvMMU_FREE: + /* Split the node. */ + { + gctUINT32 start; + gctUINT32 next = map[i+1]; + gctUINT32 total = map[i] >> 8; + gctUINT32 countLeft = index - i; + gctUINT32 countRight = total - countLeft - NumPages; + + if (countLeft) + { + start = i; + _AddFree(Area, previous, start, countLeft); + previous = start; + } + + if (countRight) + { + start = index + NumPages; + _AddFree(Area, previous, start, countRight); + previous = start; + } + + _Link(Area, previous, next); + } + break; + } + + _FillMap(&map[index], NumPages, gcvMMU_USED); + + return gcvSTATUS_OK; +OnError: + return status; +} + +static gceSTATUS +_CollectFreeSpace( + IN gckMMU Mmu, + OUT gcsFreeSpaceNode_PTR *Array, + OUT gctINT * Size + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gctPOINTER pointer = gcvNULL; + gcsFreeSpaceNode_PTR array = gcvNULL; + gcsFreeSpaceNode_PTR node = gcvNULL; + gctINT size = 0; + gctINT i = 0; + + for (i = 0; i < gcdMMU_MTLB_ENTRY_NUM; i++) + { + if (!Mmu->mtlbLogical[i]) + { + if (!node) + { + /* This is the first entry of the free space. */ + node += 1; + size++; + + } + } + else if (node) + { + /* Reset the start. */ + node = gcvNULL; + } + } + + /* Allocate memory for the array. */ + gcmkONERROR(gckOS_Allocate(Mmu->os, + gcmSIZEOF(*array) * size, + &pointer)); + + array = (gcsFreeSpaceNode_PTR)pointer; + node = gcvNULL; + + for (i = 0, size = 0; i < gcdMMU_MTLB_ENTRY_NUM; i++) + { + if (!Mmu->mtlbLogical[i]) + { + if (!node) + { + /* This is the first entry of the free space. */ + node = &array[size++]; + + node->start = i; + node->entries = 0; + } + + node->entries++; + } + else if (node) + { + /* Reset the start. */ + node = gcvNULL; + } + } + +#if gcdMMU_TABLE_DUMP + for (i = 0; i < size; i++) + { + gckOS_Print("%s(%d): [%d]: start=%d, entries=%d.\n", + __FUNCTION__, __LINE__, + i, + array[i].start, + array[i].entries); + } +#endif + + *Array = array; + *Size = size; + + return gcvSTATUS_OK; + +OnError: + if (pointer != gcvNULL) + { + gckOS_Free(Mmu->os, pointer); + } + + return status; +} + +gceSTATUS +_GetMtlbFreeSpace( + IN gckMMU Mmu, + IN gctUINT32 NumEntries, + OUT gctUINT32 *MtlbStart, + OUT gctUINT32 *MtlbEnd + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gcsFreeSpaceNode_PTR nodeArray = gcvNULL; + gctINT i, nodeArraySize = 0; + gctINT numEntries = gcdMMU_MTLB_ENTRY_NUM; + gctINT32 mStart = -1; + gctINT32 mEnd = -1; + + gcmkONERROR(_CollectFreeSpace(Mmu, &nodeArray, &nodeArraySize)); + + /* Find the smallest space for NumEntries */ + for (i = 0; i < nodeArraySize; i++) + { + if (nodeArray[i].entries < numEntries && NumEntries <= (gctUINT32)nodeArray[i].entries) + { + numEntries = nodeArray[i].entries; + + mStart = nodeArray[i].start; + mEnd = nodeArray[i].start + NumEntries - 1; + } + } + + if (mStart == -1 && mEnd == -1) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + *MtlbStart = (gctUINT32)mStart; + *MtlbEnd = (gctUINT32)mEnd; + +OnError: + if (nodeArray) + { + gckOS_Free(Mmu->os, (gctPOINTER)nodeArray); + } + + return status; +} + +#if gcdPROCESS_ADDRESS_SPACE +gctUINT32 +_StlbOffset( + gctUINT32 Address + ) +{ + return (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT; +} + +static gceSTATUS +_AllocateStlb( + IN gckOS Os, + OUT gcsMMU_STLB_PTR *Stlb + ) +{ + gceSTATUS status; + gcsMMU_STLB_PTR stlb; + gctPOINTER pointer; + + /* Allocate slave TLB record. */ + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsMMU_STLB), &pointer)); + stlb = pointer; + + stlb->size = gcdMMU_STLB_4K_SIZE; + + /* Allocate slave TLB entries. */ + gcmkONERROR(gckOS_AllocateContiguous( + Os, + gcvFALSE, + &stlb->size, + &stlb->physical, + (gctPOINTER)&stlb->logical + )); + + gcmkONERROR(gckOS_GetPhysicalAddress(Os, stlb->logical, &stlb->physBase)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os, stlb->physBase, &stlb->physBase)); + +#if gcdUSE_MMU_EXCEPTION + _FillPageTable(stlb->logical, stlb->size / 4, gcdMMU_STLB_EXCEPTION); +#else + gckOS_ZeroMemory(stlb->logical, stlb->size); +#endif + + *Stlb = stlb; + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +_SetupProcessAddressSpace( + IN gckMMU Mmu + ) +{ + gceSTATUS status; + gctINT numEntries = 0; + gctUINT32_PTR map; + + numEntries = gcdPROCESS_ADDRESS_SPACE_SIZE + /* Address space mapped by one MTLB entry. */ + / (1 << gcdMMU_MTLB_SHIFT); + + area->dynamicMappingStart = 0; + + area->pageTableSize = numEntries * 4096; + + area->pageTableEntries = area->pageTableSize / gcmSIZEOF(gctUINT32); + + gcmkONERROR(gckOS_Allocate(Mmu->os, + area->pageTableSize, + (void **)&area->mapLogical)); + + /* Initialization. */ + map = area->mapLogical; + map[0] = (area->pageTableEntries << 8) | gcvMMU_FREE; + map[1] = ~0U; + area->heapList = 0; + area->freeNodes = gcvFALSE; + + return gcvSTATUS_OK; + +OnError: + return status; +} +#else +static gceSTATUS +_FillFlatMapping( + IN gckMMU Mmu, + IN gctUINT64 PhysBase, + OUT gctSIZE_T Size, + OUT gctUINT32 *GpuBaseAddress + ) +{ + gceSTATUS status; + gctUINT32 mtlb; + gctUINT32 physBase; + gcsADDRESS_AREA_PTR area = &Mmu->area[0]; + + /************************ look up existing flat mapping ranges. ****************/ + gctUINT64 flatBase = PhysBase; + gctUINT32 flatSize = (gctUINT32)Size; + gctUINT64 base = flatBase; + gctUINT32 size = flatSize; + gctUINT64 end = base + size; + gctUINT32 i; + + for (i = 0; i < Mmu->flatMappingRangeCount; i++) + { + if (base < Mmu->flatMappingRanges[i].start) + { + end = gcmMIN(end, Mmu->flatMappingRanges[i].start); + flatSize = (gctUINT32) (end - base); + } + else if (end > Mmu->flatMappingRanges[i].end) + { + base = gcmMAX(base, Mmu->flatMappingRanges[i].end); + + flatBase = base; + flatSize = (gctUINT32) (end - base); + } + else + { + /* it is already inside existing flat mapping ranges. */ + flatSize = 0; + } + + if (flatSize == 0) + { + if (GpuBaseAddress) + { + *GpuBaseAddress = (gctUINT32) PhysBase; + } + + return gcvSTATUS_OK; + } + } + + Mmu->flatMappingRanges[Mmu->flatMappingRangeCount].start = flatBase; + Mmu->flatMappingRanges[Mmu->flatMappingRangeCount].end = flatBase + flatSize; + Mmu->flatMappingRangeCount++; + + gcmkASSERT(Mmu->flatMappingRangeCount <= gcdMAX_FLAT_MAPPING_COUNT); + + /* overwrite the orignal parameters */ + PhysBase = flatBase; + physBase = (gctUINT32)flatBase; + Size = (gctSIZE_T)flatSize; + + mtlb = _MtlbOffset(physBase); + + /************************ Setup flat mapping in dynamic range. ****************/ + + if (area->dynamicMappingStart != gcvINVALID_ADDRESS && mtlb >= area->dynamicMappingStart && mtlb < area->dynamicMappingEnd) + { + gctUINT32_PTR stlbEntry; + gctUINT i; + + stlbEntry = _StlbEntry(area, physBase); + + /* Must be aligned to page. */ + gcmkASSERT((Size & 0xFFF) == 0); + + for (i = 0; i < (Size / 4096); i++) + { + /* Flat mapping in page table. */ + _WritePageEntry(stlbEntry, _SetPage(physBase + i * 4096, 0, gcvTRUE)); + } + + gcmkSAFECASTSIZET(size, Size); + + /* Flat mapping in map. */ + _FillFlatMappingInMap(area, _AddressToIndex(area, physBase), size / 4096); + + return gcvSTATUS_OK; + } + + /************************ Setup flat mapping in non dynamic range. **************/ + { + gctBOOL mutex = gcvFALSE; + gctUINT32 physBaseExt = (gctUINT32) (PhysBase >> 32); + gctUINT32 start = physBase & ~gcdMMU_PAGE_64K_MASK; + gctUINT32 end = (gctUINT32) (physBase + Size - 1) & ~gcdMMU_PAGE_64K_MASK; + gctUINT32 mStart = start >> gcdMMU_MTLB_SHIFT; + gctUINT32 mEnd = end >> gcdMMU_MTLB_SHIFT; + gctUINT32 sStart = (start & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT; + gctUINT32 sEnd = (end & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT; + gctPHYS_ADDR_T physical; + gcsMMU_STLB_CHUNK_PTR newStlbChunk = gcvNULL; + gctUINT32 stlbIndex = 0; + gctUINT32 totalNewStlbs = 0; + gctINT32 firstMtlbEntry = -1; + gctUINT32 mtlbCurEntry; + gcsMMU_STLB_CHUNK_PTR curStlbChunk = gcvNULL; + gctUINT32 seqs[2] = {0, 0}; + gctUINT32 seqIdx = 0; + + /* Grab the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE)); + mutex = gcvTRUE; + + if (PhysBase + Size - 1 > 0xffffffff) + { + gctUINT32 mEntries; + gctUINT32 sEntries; + + mEntries = (gctUINT32)(Size + (1 << gcdMMU_MTLB_SHIFT) - 1) / (1 << gcdMMU_MTLB_SHIFT); + + gcmkONERROR(_GetMtlbFreeSpace(Mmu, mEntries, &mStart, &mEnd)); + + sStart = 0; + sEntries = (gctUINT32)(Size + gcdMMU_PAGE_64K_SIZE - 1) / gcdMMU_PAGE_64K_SIZE; + sEnd = (sEntries - 1) % gcdMMU_STLB_64K_ENTRY_NUM; + } + + if (GpuBaseAddress) + { + *GpuBaseAddress = (mStart << gcdMMU_MTLB_SHIFT) + | (sStart << gcdMMU_STLB_64K_SHIFT) + | (physBase & gcdMMU_PAGE_64K_MASK); + } + + mtlbCurEntry = mStart; + + /* find all new stlbs, part of new flat mapping range may already have stlbs*/ + while (mtlbCurEntry <= mEnd) + { + if (*(Mmu->mtlbLogical + mtlbCurEntry) == 0) + { + if (seqIdx < 2) + { + if (seqs[seqIdx] != 2) + { + seqs[seqIdx] = 1; + } + else if (seqIdx < 1) + { + seqs[++seqIdx] = 1; + } + else + { + gcmkASSERT(gcvFALSE); + } + } + else if (seqs[1] != 1) + { + gcmkPRINT("There is a hole in new flat mapping range, which is not correct"); + } + totalNewStlbs++; + if (-1 == firstMtlbEntry) + { + firstMtlbEntry = mtlbCurEntry; + } + } + else + { + if (seqIdx < 2) + { + if (seqs[seqIdx] != 1) + { + seqs[seqIdx] = 2; + } + else if (seqIdx < 1) + { + seqs[++seqIdx] = 2; + } + else + { + gcmkASSERT(gcvFALSE); + } + } + else if (seqs[1] != 2) + { + gcmkPRINT("There is a hole in new flat mapping range, which is not correct"); + } + } + mtlbCurEntry++; + } + + /* Need allocate a new chunk of stlbs */ + if (totalNewStlbs) + { + gctUINT32 allocFlag = gcvALLOC_FLAG_CONTIGUOUS; + + gcmkONERROR( + gckOS_Allocate(Mmu->os, + sizeof(struct _gcsMMU_STLB_CHUNK), + (gctPOINTER *)&newStlbChunk)); + + newStlbChunk->mtlbEntryNum = totalNewStlbs; + newStlbChunk->next = gcvNULL; + newStlbChunk->physical = gcvNULL; + newStlbChunk->logical = gcvNULL; + newStlbChunk->size = gcdMMU_STLB_64K_SIZE * newStlbChunk->mtlbEntryNum; + newStlbChunk->pageCount = 0; + newStlbChunk->mtlbIndex = firstMtlbEntry; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag |= gcvALLOC_FLAG_CACHEABLE; +#endif + + gcmkONERROR( + gckOS_AllocateNonPagedMemory(Mmu->os, + gcvFALSE, + allocFlag, + &newStlbChunk->size, + &newStlbChunk->physical, + (gctPOINTER)&newStlbChunk->logical)); + + gcmkONERROR(gckOS_ZeroMemory(newStlbChunk->logical, newStlbChunk->size)); + + gcmkONERROR(gckOS_GetPhysicalAddress( + Mmu->os, + newStlbChunk->logical, + &physical)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + Mmu->os, + physical, + &physical)); + + gcmkSAFECASTPHYSADDRT(newStlbChunk->physBase, physical); + + if (newStlbChunk->physBase & (gcdMMU_STLB_64K_SIZE - 1)) + { + gcmkONERROR(gcvSTATUS_NOT_ALIGNED); + } + } + + while (mStart <= mEnd) + { + gctUINT32 last = (mStart == mEnd) ? sEnd : (gcdMMU_STLB_64K_ENTRY_NUM - 1); + gctPHYS_ADDR_T stlbPhyBase; + gctUINT32_PTR stlbLogical; + + gcmkASSERT(mStart < gcdMMU_MTLB_ENTRY_NUM); + + if (*(Mmu->mtlbLogical + mStart) == 0) + { + gctUINT32 mtlbEntry; + curStlbChunk = newStlbChunk; + stlbPhyBase = curStlbChunk->physBase + (stlbIndex * gcdMMU_STLB_64K_SIZE); + stlbLogical = (gctUINT32_PTR)((gctUINT8_PTR)curStlbChunk->logical + (stlbIndex * gcdMMU_STLB_64K_SIZE)); + physical = stlbPhyBase + /* 64KB page size */ + | (1 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + + gcmkSAFECASTPHYSADDRT(mtlbEntry, physical); + + _WritePageEntry(Mmu->mtlbLogical + mStart, mtlbEntry); + +#if gcdMMU_TABLE_DUMP + gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n", + __FUNCTION__, __LINE__, + mStart, + _ReadPageEntry(Mmu->mtlbLogical + mStart)); +#endif + +#if gcdMMU_TABLE_DUMP + gckOS_Print("%s(%d): STLB: logical:%08x -> physical:%08x\n", + __FUNCTION__, __LINE__, + stlbLogical, + stlbPhyBase); +#endif + ++stlbIndex; + } + else + { + gctUINT32 mtlbEntry = _ReadPageEntry(Mmu->mtlbLogical + mStart); + gctUINT stlbOffset; + + curStlbChunk = (gcsMMU_STLB_CHUNK_PTR)Mmu->staticSTLB; + + while (curStlbChunk) + { + if ((mStart >= curStlbChunk->mtlbIndex) && + (mStart < (curStlbChunk->mtlbIndex + curStlbChunk->mtlbEntryNum))) + { + break; + } + curStlbChunk = curStlbChunk->next; + } + gcmkASSERT(curStlbChunk); + stlbOffset = mStart - curStlbChunk->mtlbIndex; + + stlbPhyBase = curStlbChunk->physBase + (stlbOffset * gcdMMU_STLB_64K_SIZE); + stlbLogical = (gctUINT32_PTR)((gctUINT8_PTR)curStlbChunk->logical + (stlbOffset * gcdMMU_STLB_64K_SIZE)); + if (stlbPhyBase != (mtlbEntry & gcdMMU_MTLB_ENTRY_STLB_MASK)) + { + gcmkASSERT(0); + } + } + + while (sStart <= last) + { + gcmkASSERT(!(start & gcdMMU_PAGE_64K_MASK)); + + _WritePageEntry(stlbLogical + sStart, _SetPage(start, physBaseExt, gcvTRUE)); + +#if gcdMMU_TABLE_DUMP + gckOS_Print("%s(%d): insert STLB[%d]: %08x\n", + __FUNCTION__, __LINE__, + sStart, + _ReadPageEntry(stlbLogical + sStart)); +#endif + /* next page. */ + start += gcdMMU_PAGE_64K_SIZE; + if (start == 0) + { + physBaseExt++; + } + sStart++; + curStlbChunk->pageCount++; + } + + /* Flush STLB table. */ + gcmkONERROR(gckOS_CacheClean( + Mmu->os, + 0, + curStlbChunk->physical, + 0, + curStlbChunk->logical, + curStlbChunk->size + )); + + sStart = 0; + ++mStart; + } + + gcmkASSERT(totalNewStlbs == stlbIndex); + + if (newStlbChunk) + { + /* Insert the stlbChunk into staticSTLB. */ + if (Mmu->staticSTLB == gcvNULL) + { + Mmu->staticSTLB = newStlbChunk; + } + else + { + gcmkASSERT(newStlbChunk != gcvNULL); + gcmkASSERT(newStlbChunk->next == gcvNULL); + newStlbChunk->next = Mmu->staticSTLB; + Mmu->staticSTLB = newStlbChunk; + } + } + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + +#if gcdENABLE_TRUST_APPLICATION + if (Mmu->hardware->options.secureMode == gcvSECURE_IN_TA) + { + gckKERNEL_SecurityMapMemory(Mmu->hardware->kernel, gcvNULL, physBase, (gctUINT32)Size/4096, &physBase); + } +#endif + + return gcvSTATUS_OK; +OnError: + /* Roll back the allocation. + ** We don't need roll back mtlb programming as gckmONERROR + ** is only used during allocation time. + */ + if (newStlbChunk) + { + if (newStlbChunk->physical) + { + gcmkVERIFY_OK( + gckOS_FreeContiguous(Mmu->os, + newStlbChunk->physical, + newStlbChunk->logical, + newStlbChunk->size)); + } + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, newStlbChunk)); + } + if (mutex) + { + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + } + return status; + } +} + +static gceSTATUS +_SetupAddressArea( + IN gckOS Os, + IN gcsADDRESS_AREA_PTR Area, + IN gctUINT32 NumMTLBEntries + ) +{ + gceSTATUS status; + gctUINT32_PTR map; + + gcmkHEADER(); + Area->pageTableSize = NumMTLBEntries * 4096; + + gcmkSAFECASTSIZET(Area->pageTableEntries, Area->pageTableSize / gcmSIZEOF(gctUINT32)); + + gcmkONERROR(gckOS_Allocate(Os, Area->pageTableSize, (void **)&Area->mapLogical)); + + /* Initialization. */ + map = Area->mapLogical; + map[0] = (Area->pageTableEntries << 8) | gcvMMU_FREE; + map[1] = ~0U; + Area->heapList = 0; + Area->freeNodes = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_SetupDynamicSpace( + IN gckMMU Mmu + ) +{ + gceSTATUS status; + gcsFreeSpaceNode_PTR nodeArray = gcvNULL; + gctINT i, nodeArraySize = 0; + gctPHYS_ADDR_T physical; + gctUINT32 address; + gctINT numEntries = 0; + gctBOOL acquired = gcvFALSE; + gctUINT32 mtlbEntry; + gcsADDRESS_AREA_PTR area = &Mmu->area[0]; + gcsADDRESS_AREA_PTR areaSecure = &Mmu->area[gcvADDRESS_AREA_SECURE]; + gctUINT32 secureAreaSize = 0; + gctUINT32 allocFlag = gcvALLOC_FLAG_CONTIGUOUS; + + /* Find all the free address space. */ + gcmkONERROR(_CollectFreeSpace(Mmu, &nodeArray, &nodeArraySize)); + + for (i = 0; i < nodeArraySize; i++) + { + if (nodeArray[i].entries > numEntries) + { + area->dynamicMappingStart = nodeArray[i].start; + numEntries = nodeArray[i].entries; + area->dynamicMappingEnd = area->dynamicMappingStart + numEntries; + } + } + + gckOS_Free(Mmu->os, (gctPOINTER)nodeArray); + +#if gcdENABLE_TRUST_APPLICATION + if (gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_SECURITY) == gcvSTATUS_TRUE) + { + secureAreaSize = gcdMMU_SECURE_AREA_SIZE; + } +#endif + + /* Setup secure address area if need. */ + if (secureAreaSize > 0) + { + gcmkASSERT(numEntries > (gctINT)secureAreaSize); + + areaSecure->dynamicMappingStart = area->dynamicMappingStart + + (numEntries - secureAreaSize); + + gcmkONERROR(_SetupAddressArea(Mmu->os, areaSecure, secureAreaSize)); + + numEntries -= secureAreaSize; + } + + /* Setup normal address area. */ + gcmkONERROR(_SetupAddressArea(Mmu->os, area, numEntries)); + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag |= gcvALLOC_FLAG_CACHEABLE; +#endif + + /* Construct Slave TLB. */ + gcmkONERROR(gckOS_AllocateNonPagedMemory(Mmu->os, + gcvFALSE, + allocFlag, + &area->pageTableSize, + &area->pageTablePhysical, + (gctPOINTER)&area->pageTableLogical)); + +#if gcdUSE_MMU_EXCEPTION + gcmkONERROR(_FillPageTable(area->pageTableLogical, + area->pageTableEntries, + /* Enable exception */ + 1 << 1)); +#else + /* Invalidate all entries. */ + gcmkONERROR(gckOS_ZeroMemory(area->pageTableLogical, + area->pageTableSize)); +#endif + + gcmkONERROR(gckOS_GetPhysicalAddress(Mmu->os, + area->pageTableLogical, + &physical)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Mmu->os, + physical, + &physical)); + + gcmkSAFECASTPHYSADDRT(address, physical); + + /* Grab the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Map to Master TLB. */ + for (i = (gctINT)area->dynamicMappingStart; + i < (gctINT)area->dynamicMappingStart + numEntries; + i++) + { + mtlbEntry = address + /* 4KB page size */ + | (0 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + + _WritePageEntry(Mmu->mtlbLogical + i, mtlbEntry); + +#if gcdMMU_TABLE_DUMP + gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n", + __FUNCTION__, __LINE__, + i, + _ReadPageEntry(Mmu->mtlbLogical + i)); +#endif + address += gcdMMU_STLB_4K_SIZE; + } + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + + return gcvSTATUS_OK; + +OnError: + if (area->mapLogical) + { + gcmkVERIFY_OK( + gckOS_Free(Mmu->os, (gctPOINTER) area->mapLogical)); + + + gcmkVERIFY_OK( + gckOS_FreeContiguous(Mmu->os, + area->pageTablePhysical, + (gctPOINTER) area->pageTableLogical, + area->pageTableSize)); + } + + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + } + + return status; +} +#endif + +gctUINT32 +_GetPageCountOfUsedNode( + gctUINT32_PTR Node + ) +{ + gctUINT32 count; + + count = gcmENTRY_COUNT(*Node); + + if ((count << 8) == (~((1U<<8)-1))) + { + count = 1; + } + + return count; +} + +static gcsADDRESS_AREA_PTR +_GetProcessArea( + IN gckMMU Mmu, + IN gctBOOL Secure + ) +{ + gceADDRESS_AREA area = gcvADDRESS_AREA_NORMAL; + +#if gcdENABLE_TRUST_APPLICATION + if (Secure == gcvTRUE) + { + area = gcvADDRESS_AREA_SECURE; + } +#endif + + return &Mmu->area[area]; +} + +/******************************************************************************* +** +** _Construct +** +** Construct a new gckMMU object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctSIZE_T MmuSize +** Number of bytes for the page table. +** +** OUTPUT: +** +** gckMMU * Mmu +** Pointer to a variable that receives the gckMMU object pointer. +*/ +gceSTATUS +_Construct( + IN gckKERNEL Kernel, + IN gctSIZE_T MmuSize, + OUT gckMMU * Mmu + ) +{ + gckOS os; + gckHARDWARE hardware; + gceSTATUS status; + gckMMU mmu = gcvNULL; + gctUINT32_PTR map; + gctPOINTER pointer = gcvNULL; + gctUINT32 physBase; + gctUINT32 physSize; + gctUINT32 contiguousBase; + gctUINT32 contiguousSize = 0; + gctUINT32 externalBase; + gctUINT32 externalSize = 0; + gctUINT32 gpuAddress; + gctPHYS_ADDR_T gpuPhysical; + gcsADDRESS_AREA_PTR area = gcvNULL; + gctUINT32 allocFlag = gcvALLOC_FLAG_CONTIGUOUS; + + gcmkHEADER_ARG("Kernel=0x%x MmuSize=%lu", Kernel, MmuSize); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(MmuSize > 0); + gcmkVERIFY_ARGUMENT(Mmu != gcvNULL); + + /* Extract the gckOS object pointer. */ + os = Kernel->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + /* Extract the gckHARDWARE object pointer. */ + hardware = Kernel->hardware; + gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); + + /* Allocate memory for the gckMMU object. */ + gcmkONERROR(gckOS_Allocate(os, sizeof(struct _gckMMU), &pointer)); + + gckOS_ZeroMemory(pointer, sizeof(struct _gckMMU)); + + mmu = pointer; + + /* Initialize the gckMMU object. */ + mmu->object.type = gcvOBJ_MMU; + mmu->os = os; + mmu->hardware = hardware; + mmu->pageTableMutex = gcvNULL; + mmu->mtlbLogical = gcvNULL; + mmu->staticSTLB = gcvNULL; + mmu->enabled = gcvFALSE; + gcsLIST_Init(&mmu->hardwareList); + + + area = &mmu->area[0]; + area->mapLogical = gcvNULL; + area->pageTableLogical = gcvNULL; + + /* Create the page table mutex. */ + gcmkONERROR(gckOS_CreateMutex(os, &mmu->pageTableMutex)); + + if (hardware->mmuVersion == 0) + { + area->pageTableSize = MmuSize; + + /* Construct address space management table. */ + gcmkONERROR(gckOS_Allocate(mmu->os, + area->pageTableSize, + &pointer)); + + area->mapLogical = pointer; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag |= gcvALLOC_FLAG_CACHEABLE; +#endif + + /* Construct page table read by GPU. */ + gcmkONERROR(gckOS_AllocateNonPagedMemory(mmu->os, + gcvFALSE, + allocFlag, + &area->pageTableSize, + &area->pageTablePhysical, + (gctPOINTER)&area->pageTableLogical)); + + + /* Compute number of entries in page table. */ + gcmkSAFECASTSIZET(area->pageTableEntries, area->pageTableSize / sizeof(gctUINT32)); + + /* Mark all pages as free. */ + map = area->mapLogical; + + _FillPageTable(area->pageTableLogical, area->pageTableEntries, mmu->safeAddress); + + map[0] = (area->pageTableEntries << 8) | gcvMMU_FREE; + map[1] = ~0U; + area->heapList = 0; + area->freeNodes = gcvFALSE; + + status = gckOS_QueryOption(mmu->os, "contiguousBase", &contiguousBase); + + if (gcmIS_SUCCESS(status)) + { + status = gckOS_QueryOption(mmu->os, "contiguousSize", &contiguousSize); + } + + if (gcmIS_SUCCESS(status) && contiguousSize) + { + mmu->contiguousBaseAddress = contiguousBase - Kernel->hardware->baseAddress; + } + + } + else + { + /* Allocate the 4K mode MTLB table. */ + mmu->mtlbSize = gcdMMU_MTLB_SIZE; + +#if gcdENABLE_CACHEABLE_COMMAND_BUFFER + allocFlag |= gcvALLOC_FLAG_CACHEABLE; +#endif + + gcmkONERROR( + gckOS_AllocateNonPagedMemory(os, + gcvFALSE, + allocFlag, + &mmu->mtlbSize, + &mmu->mtlbPhysical, + &pointer)); + + mmu->mtlbLogical = pointer; + + area->dynamicMappingStart = gcvINVALID_ADDRESS; + +#if gcdPROCESS_ADDRESS_SPACE + _FillPageTable(pointer, mmu->mtlbSize / 4, gcdMMU_MTLB_EXCEPTION); + + /* Allocate a array to store stlbs. */ + gcmkONERROR(gckOS_Allocate(os, mmu->mtlbSize, &mmu->stlbs)); + + gckOS_ZeroMemory(mmu->stlbs, mmu->mtlbSize); + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + gcmkONERROR(gckOS_AtomConstruct(os, &mmu->pageTableDirty[i])); + } + + _SetupProcessAddressSpace(mmu); + + /* Map kernel command buffer in MMU. */ + for (i = 0; i < gcdCOMMAND_QUEUES; i++) + { + gcmkONERROR(gckOS_GetPhysicalAddress( + mmu->os, + Kernel->command->queues[i].logical, + &gpuPhysical + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + mmu->os, + gpuPhysical, + &gpuPhysical + )); + + gcmkSAFECASTPHYSADDRT(gpuAddress, gpuPhysical); + + gcmkONERROR(gckMMU_FlatMapping(mmu, gpuAddress, 1)); + } +#else + /* Invalid all the entries. */ + gcmkONERROR( + gckOS_ZeroMemory(pointer, mmu->mtlbSize)); + + gcmkONERROR( + gckOS_QueryOption(mmu->os, "physBase", &physBase)); + + gcmkONERROR( + gckOS_QueryOption(mmu->os, "physSize", &physSize)); + + gcmkONERROR( + gckOS_CPUPhysicalToGPUPhysical(mmu->os, physBase, &gpuPhysical)); + + gcmkSAFECASTPHYSADDRT(gpuAddress, gpuPhysical); + + if (physSize) + { + /* Setup user specified flat mapping. */ + gcmkONERROR(_FillFlatMapping(mmu, gpuAddress, physSize, gcvNULL)); + } + +#ifndef EMULATOR + if (!_ReadPageEntry(mmu->mtlbLogical + 0)) + { + gctUINT32 mtlbEntry; + /* + * Reserved 0~4MB space. + * 64KB page size, Ingore exception, Not Present. + */ + mtlbEntry = (1 << 2) + | (0 << 1) + | (0 << 0); + + _WritePageEntry(mmu->mtlbLogical + 0, mtlbEntry); + } +#endif + + status = gckOS_QueryOption(mmu->os, "contiguousBase", &contiguousBase); + + if (gcmIS_SUCCESS(status)) + { + status = gckOS_QueryOption(mmu->os, "contiguousSize", &contiguousSize); + } + + if (gcmIS_SUCCESS(status) && contiguousSize) + { + gctUINT64 gpuContiguousBase; + gctUINT32 contiguousBaseAddress; + + gcmkONERROR(gckOS_CPUPhysicalToGPUPhysical(mmu->os, contiguousBase, &gpuContiguousBase)); + + /* Setup flat mapping for reserved memory (VIDMEM). */ + gcmkONERROR(_FillFlatMapping(mmu, gpuContiguousBase, contiguousSize, &contiguousBaseAddress)); + + mmu->contiguousBaseAddress = contiguousBaseAddress; + } + + status = gckOS_QueryOption(mmu->os, "externalBase", &externalBase); + + if (gcmIS_SUCCESS(status)) + { + status = gckOS_QueryOption(mmu->os, "externalSize", &externalSize); + } + + if (gcmIS_SUCCESS(status) && externalSize) + { + gctUINT64 gpuExternalBase; + gctUINT32 externalBaseAddress; + + gcmkONERROR(gckOS_CPUPhysicalToGPUPhysical(mmu->os, externalBase, &gpuExternalBase)); + + /* Setup flat mapping for external memory. */ + gcmkONERROR(_FillFlatMapping(mmu, gpuExternalBase, externalSize, &externalBaseAddress)); + + mmu->externalBaseAddress = externalBaseAddress; + } + + gcmkONERROR(_SetupDynamicSpace(mmu)); +#endif + + /* Flush MTLB table. */ + gcmkONERROR(gckOS_CacheClean( + os, + 0, + mmu->mtlbPhysical, + 0, + mmu->mtlbLogical, + mmu->mtlbSize + )); + } + + mmu->safePageSize = 4096; + + gcmkONERROR(gckOS_AllocateContiguous( + os, + gcvFALSE, + &mmu->safePageSize, + &mmu->safePagePhysical, + &mmu->safePageLogical + )); + + gcmkONERROR(gckOS_GetPhysicalAddress( + os, + mmu->safePageLogical, + &gpuPhysical + )); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical( + os, + gpuPhysical, + &gpuPhysical + )); + + gcmkSAFECASTPHYSADDRT(mmu->safeAddress, gpuPhysical); + + gckOS_ZeroMemory(mmu->safePageLogical, mmu->safePageSize); + + gcmkONERROR(gckQUEUE_Allocate(os, &mmu->recentFreedAddresses, 16)); + + /* Return the gckMMU object pointer. */ + *Mmu = mmu; + + /* Success. */ + gcmkFOOTER_ARG("*Mmu=0x%x", *Mmu); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (mmu != gcvNULL) + { + if (area != gcvNULL && area->mapLogical != gcvNULL) + { + gcmkVERIFY_OK( + gckOS_Free(os, (gctPOINTER) area->mapLogical)); + + + gcmkVERIFY_OK( + gckOS_FreeContiguous(os, + area->pageTablePhysical, + (gctPOINTER) area->pageTableLogical, + area->pageTableSize)); + } + + if (mmu->mtlbLogical != gcvNULL) + { + gcmkVERIFY_OK( + gckOS_FreeContiguous(os, + mmu->mtlbPhysical, + (gctPOINTER) mmu->mtlbLogical, + mmu->mtlbSize)); + } + + if (mmu->pageTableMutex != gcvNULL) + { + /* Delete the mutex. */ + gcmkVERIFY_OK( + gckOS_DeleteMutex(os, mmu->pageTableMutex)); + } + + gcmkVERIFY_OK(gckQUEUE_Free(os, &mmu->recentFreedAddresses)); + + /* Mark the gckMMU object as unknown. */ + mmu->object.type = gcvOBJ_UNKNOWN; + + /* Free the allocates memory. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, mmu)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** _Destroy +** +** Destroy a gckMMU object. +** +** INPUT: +** +** gckMMU Mmu +** Pointer to an gckMMU object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +_Destroy( + IN gckMMU Mmu + ) +{ + gctUINT32 i; + gcmkHEADER_ARG("Mmu=0x%x", Mmu); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + + while (Mmu->staticSTLB != gcvNULL) + { + gcsMMU_STLB_CHUNK_PTR pre = Mmu->staticSTLB; + Mmu->staticSTLB = pre->next; + + if (pre->physical != gcvNULL) + { + gcmkVERIFY_OK( + gckOS_FreeContiguous(Mmu->os, + pre->physical, + pre->logical, + pre->size)); + } + + if (pre->mtlbEntryNum != 0) + { + gctUINT i; + for (i = 0; i < pre->mtlbEntryNum; ++i) + { + _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex + i, 0); +#if gcdMMU_TABLE_DUMP + gckOS_Print("%s(%d): clean MTLB[%d]\n", + __FUNCTION__, __LINE__, + pre->mtlbIndex + i); +#endif + } + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre)); + } + + if (Mmu->hardware->mmuVersion != 0) + { + gcmkVERIFY_OK( + gckOS_FreeContiguous(Mmu->os, + Mmu->mtlbPhysical, + (gctPOINTER) Mmu->mtlbLogical, + Mmu->mtlbSize)); + } + + for (i = 0; i < gcvADDRESS_AREA_COUNT; i++) + { + gcsADDRESS_AREA_PTR area = &Mmu->area[i]; + + /* Free address space management table. */ + if (area->mapLogical != gcvNULL) + { + gcmkVERIFY_OK( + gckOS_Free(Mmu->os, (gctPOINTER) area->mapLogical)); + } + + if (area->pageTableLogical != gcvNULL) + { + /* Free page table. */ + gcmkVERIFY_OK( + gckOS_FreeContiguous(Mmu->os, + area->pageTablePhysical, + (gctPOINTER) area->pageTableLogical, + area->pageTableSize)); + } + } + + /* Delete the page table mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->pageTableMutex)); + +#if gcdPROCESS_ADDRESS_SPACE + for (i = 0; i < Mmu->mtlbSize / 4; i++) + { + struct _gcsMMU_STLB_CHUNK *stlb = ((struct _gcsMMU_STLB_CHUNK **)Mmu->stlbs)[i]; + + if (stlb) + { + gcmkVERIFY_OK(gckOS_FreeContiguous( + Mmu->os, + stlb->physical, + stlb->logical, + stlb->size)); + + gcmkOS_SAFE_FREE(Mmu->os, stlb); + } + } + + gcmkOS_SAFE_FREE(Mmu->os, Mmu->stlbs); +#endif + + if (Mmu->safePageLogical != gcvNULL) + { + gcmkVERIFY_OK(gckOS_FreeContiguous( + Mmu->os, + Mmu->safePagePhysical, + Mmu->safePageLogical, + Mmu->safePageSize + )); + } + + gcmkVERIFY_OK(gckQUEUE_Free(Mmu->os, &Mmu->recentFreedAddresses)); + + /* Mark the gckMMU object as unknown. */ + Mmu->object.type = gcvOBJ_UNKNOWN; + + /* Free the gckMMU object. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, Mmu)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** _AdjstIndex +** +** Adjust the index from which we search for a usable node to make sure +** index allocated is greater than Start. +*/ +gceSTATUS +_AdjustIndex( + IN gckMMU Mmu, + IN gctUINT32 Index, + IN gctUINT32 PageCount, + IN gctUINT32 Start, + OUT gctUINT32 * IndexAdjusted + ) +{ + gceSTATUS status; + gctUINT32 index = Index; + gcsADDRESS_AREA_PTR area = &Mmu->area[0]; + gctUINT32_PTR map = area->mapLogical; + + gcmkHEADER(); + + for (; index < area->pageTableEntries;) + { + gctUINT32 result = 0; + gctUINT32 nodeSize = 0; + + if (index >= Start) + { + break; + } + + switch (gcmENTRY_TYPE(map[index])) + { + case gcvMMU_SINGLE: + nodeSize = 1; + break; + + case gcvMMU_FREE: + nodeSize = map[index] >> 8; + break; + + default: + gcmkFATAL("MMU table correcupted at index %u!", index); + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + if (nodeSize > PageCount) + { + result = index + (nodeSize - PageCount); + + if (result >= Start) + { + break; + } + } + + switch (gcmENTRY_TYPE(map[index])) + { + case gcvMMU_SINGLE: + index = map[index] >> 8; + break; + + case gcvMMU_FREE: + index = map[index + 1]; + break; + + default: + gcmkFATAL("MMU table correcupted at index %u!", index); + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + } + + *IndexAdjusted = index; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckMMU_Construct( + IN gckKERNEL Kernel, + IN gctSIZE_T MmuSize, + OUT gckMMU * Mmu + ) +{ +#if gcdSHARED_PAGETABLE + gceSTATUS status; + gctPOINTER pointer; + + gcmkHEADER_ARG("Kernel=0x%08x", Kernel); + + if (sharedPageTable == gcvNULL) + { + gcmkONERROR( + gckOS_Allocate(Kernel->os, + sizeof(struct _gcsSharedPageTable), + &pointer)); + sharedPageTable = pointer; + + gcmkONERROR( + gckOS_ZeroMemory(sharedPageTable, + sizeof(struct _gcsSharedPageTable))); + + gcmkONERROR(_Construct(Kernel, MmuSize, &sharedPageTable->mmu)); + } + + *Mmu = sharedPageTable->mmu; + + sharedPageTable->hardwares[sharedPageTable->reference] = Kernel->hardware; + + sharedPageTable->reference++; + + gcmkFOOTER_ARG("sharedPageTable->reference=%lu", sharedPageTable->reference); + return gcvSTATUS_OK; + +OnError: + if (sharedPageTable) + { + if (sharedPageTable->mmu) + { + gcmkVERIFY_OK(gckMMU_Destroy(sharedPageTable->mmu)); + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, sharedPageTable)); + } + + gcmkFOOTER(); + return status; +#else + return _Construct(Kernel, MmuSize, Mmu); +#endif +} + +gceSTATUS +gckMMU_Destroy( + IN gckMMU Mmu + ) +{ +#if gcdSHARED_PAGETABLE + gckOS os = Mmu->os; + + sharedPageTable->reference--; + + if (sharedPageTable->reference == 0) + { + if (sharedPageTable->mmu) + { + gcmkVERIFY_OK(_Destroy(Mmu)); + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, sharedPageTable)); + } + + return gcvSTATUS_OK; +#else + return _Destroy(Mmu); +#endif +} + +/******************************************************************************* +** +** gckMMU_AllocatePages +** +** Allocate pages inside the page table. +** +** INPUT: +** +** gckMMU Mmu +** Pointer to an gckMMU object. +** +** gctSIZE_T PageCount +** Number of pages to allocate. +** +** OUTPUT: +** +** gctPOINTER * PageTable +** Pointer to a variable that receives the base address of the page +** table. +** +** gctUINT32 * Address +** Pointer to a variable that receives the hardware specific address. +*/ +gceSTATUS +_AllocatePages( + IN gckMMU Mmu, + IN gctSIZE_T PageCount, + IN gceSURF_TYPE Type, + IN gctBOOL Secure, + OUT gctPOINTER * PageTable, + OUT gctUINT32 * Address + ) +{ + gceSTATUS status; + gctBOOL mutex = gcvFALSE; + gctUINT32 index = 0, previous = ~0U, left; + gctUINT32_PTR map; + gctBOOL gotIt; + gctUINT32 address; + gctUINT32 pageCount; + gcsADDRESS_AREA_PTR area = _GetProcessArea(Mmu, Secure); + + gcmkHEADER_ARG("Mmu=0x%x PageCount=%lu", Mmu, PageCount); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + gcmkVERIFY_ARGUMENT(PageCount > 0); + gcmkVERIFY_ARGUMENT(PageTable != gcvNULL); + + if (PageCount > area->pageTableEntries) + { + /* Not enough pages avaiable. */ + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + gcmkSAFECASTSIZET(pageCount, PageCount); + +#if gcdBOUNDARY_CHECK + /* Extra pages as bounary. */ + pageCount += gcdBOUNDARY_CHECK * 2; +#endif + + /* Grab the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE)); + mutex = gcvTRUE; + + /* Cast pointer to page table. */ + for (map = area->mapLogical, gotIt = gcvFALSE; !gotIt;) + { + index = area->heapList; + + if ((Mmu->hardware->mmuVersion == 0) && (Type == gcvSURF_VERTEX)) + { + gcmkONERROR(_AdjustIndex( + Mmu, + index, + pageCount, + gcdVERTEX_START / gcmSIZEOF(gctUINT32), + &index + )); + } + + /* Walk the heap list. */ + for (; !gotIt && (index < area->pageTableEntries);) + { + /* Check the node type. */ + switch (gcmENTRY_TYPE(map[index])) + { + case gcvMMU_SINGLE: + /* Single odes are valid if we only need 1 page. */ + if (pageCount == 1) + { + gotIt = gcvTRUE; + } + else + { + /* Move to next node. */ + previous = index; + index = map[index] >> 8; + } + break; + + case gcvMMU_FREE: + /* Test if the node has enough space. */ + if (pageCount <= (map[index] >> 8)) + { + gotIt = gcvTRUE; + } + else + { + /* Move to next node. */ + previous = index; + index = map[index + 1]; + } + break; + + default: + gcmkFATAL("MMU table correcupted at index %u!", index); + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + } + + /* Test if we are out of memory. */ + if (index >= area->pageTableEntries) + { + if (area->freeNodes) + { + /* Time to move out the trash! */ + gcmkONERROR(_Collect(area)); + + /* We are going to search from start, so reset previous to start. */ + previous = ~0U; + } + else + { + /* Out of resources. */ + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + } + } + + switch (gcmENTRY_TYPE(map[index])) + { + case gcvMMU_SINGLE: + /* Unlink single node from free list. */ + gcmkONERROR( + _Link(area, previous, map[index] >> 8)); + break; + + case gcvMMU_FREE: + /* Check how many pages will be left. */ + left = (map[index] >> 8) - pageCount; + switch (left) + { + case 0: + /* The entire node is consumed, just unlink it. */ + gcmkONERROR( + _Link(area, previous, map[index + 1])); + break; + + case 1: + /* One page will remain. Convert the node to a single node and + ** advance the index. */ + map[index] = (map[index + 1] << 8) | gcvMMU_SINGLE; + index ++; + break; + + default: + /* Enough pages remain for a new node. However, we will just adjust + ** the size of the current node and advance the index. */ + map[index] = (left << 8) | gcvMMU_FREE; + index += left; + break; + } + break; + } + + /* Mark node as used. */ + gcmkONERROR(_FillMap(&map[index], pageCount, gcvMMU_USED)); + +#if gcdBOUNDARY_CHECK + index += gcdBOUNDARY_CHECK; +#endif + + /* Record pageCount of allocated node at the beginning of node. */ + if (pageCount == 1) + { + map[index] = (~((1U<<8)-1)) | gcvMMU_USED; + } + else + { + map[index] = (pageCount << 8) | gcvMMU_USED; + } + + if (area->pageTableLogical != gcvNULL) + { + /* Return pointer to page table. */ + *PageTable = &area->pageTableLogical[index]; + } + else + { + /* Page table for secure area is handled in trust application. */ + *PageTable = gcvNULL; + } + + /* Build virtual address. */ + if (Mmu->hardware->mmuVersion == 0) + { + gcmkONERROR( + gckHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, &address)); + } + else + { + gctUINT32 masterOffset = index / gcdMMU_STLB_4K_ENTRY_NUM + + area->dynamicMappingStart; + gctUINT32 slaveOffset = index % gcdMMU_STLB_4K_ENTRY_NUM; + + address = (masterOffset << gcdMMU_MTLB_SHIFT) + | (slaveOffset << gcdMMU_STLB_4K_SHIFT); + } + + if (Address != gcvNULL) + { + *Address = address; + } + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + + /* Success. */ + gcmkFOOTER_ARG("*PageTable=0x%x *Address=%08x", + *PageTable, gcmOPT_VALUE(Address)); + return gcvSTATUS_OK; + +OnError: + + if (mutex) + { + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckMMU_FreePages +** +** Free pages inside the page table. +** +** INPUT: +** +** gckMMU Mmu +** Pointer to an gckMMU object. +** +** gctPOINTER PageTable +** Base address of the page table to free. +** +** gctSIZE_T PageCount +** Number of pages to free. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +_FreePages( + IN gckMMU Mmu, + IN gctBOOL Secure, + IN gctUINT32 Address, + IN gctPOINTER PageTable, + IN gctSIZE_T PageCount + ) +{ + gctUINT32_PTR node; + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gctUINT32 pageCount; + gcuQUEUEDATA data; + gcsADDRESS_AREA_PTR area = _GetProcessArea(Mmu, Secure); + + gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=%lu", + Mmu, PageTable, PageCount); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + gcmkVERIFY_ARGUMENT(PageCount > 0); + + gcmkSAFECASTSIZET(pageCount, PageCount); + +#if gcdBOUNDARY_CHECK + pageCount += gcdBOUNDARY_CHECK * 2; +#endif + + /* Get the node by index. */ + node = area->mapLogical + ((gctUINT32_PTR)PageTable - area->pageTableLogical); + + if (pageCount != _GetPageCountOfUsedNode(node)) + { + gcmkONERROR(gcvSTATUS_INVALID_REQUEST); + } + +#if gcdBOUNDARY_CHECK + node -= gcdBOUNDARY_CHECK; +#endif + + gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE)); + acquired = gcvTRUE; + + if (Mmu->hardware->mmuVersion == 0) + { + _FillPageTable(PageTable, pageCount, Mmu->safeAddress); + } + + if (pageCount == 1) + { + /* Single page node. */ + node[0] = (~((1U<<8)-1)) | gcvMMU_SINGLE; + + if (PageTable != gcvNULL) + { +#if gcdUSE_MMU_EXCEPTION + /* Enable exception */ + _WritePageEntry(PageTable, (1 << 1)); +#else + _WritePageEntry(PageTable, 0); +#endif + } + } + else + { + /* Mark the node as free. */ + node[0] = (pageCount << 8) | gcvMMU_FREE; + node[1] = ~0U; + + if (PageTable != gcvNULL) + { +#if gcdUSE_MMU_EXCEPTION + /* Enable exception */ + gcmkVERIFY_OK(_FillPageTable(PageTable, (gctUINT32)PageCount, 1 << 1)); +#else + gcmkVERIFY_OK(_FillPageTable(PageTable, (gctUINT32)PageCount, 0)); +#endif + } + } + + /* We have free nodes. */ + area->freeNodes = gcvTRUE; + + /* Record freed address range. */ + data.addressData.start = Address; + data.addressData.end = Address + (gctUINT32)PageCount * 4096; + gckQUEUE_Enqueue(&Mmu->recentFreedAddresses, &data); + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + acquired = gcvFALSE; + +#if gcdENABLE_TRUST_APPLICATION + if (Mmu->hardware->options.secureMode == gcvSECURE_IN_TA) + { + gckKERNEL_SecurityUnmapMemory(Mmu->hardware->kernel, Address, (gctUINT32)PageCount); + } +#endif + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckMMU_AllocatePages( + IN gckMMU Mmu, + IN gctSIZE_T PageCount, + OUT gctPOINTER * PageTable, + OUT gctUINT32 * Address + ) +{ + return gckMMU_AllocatePagesEx( + Mmu, PageCount, gcvSURF_TYPE_UNKNOWN, gcvFALSE, PageTable, Address); +} + +gceSTATUS +gckMMU_AllocatePagesEx( + IN gckMMU Mmu, + IN gctSIZE_T PageCount, + IN gceSURF_TYPE Type, + IN gctBOOL Secure, + OUT gctPOINTER * PageTable, + OUT gctUINT32 * Address + ) +{ +#if gcdDISABLE_GPU_VIRTUAL_ADDRESS + gcmkPRINT("GPU virtual address is disabled."); + return gcvSTATUS_NOT_SUPPORTED; +#else + return _AllocatePages(Mmu, PageCount, Type, Secure, PageTable, Address); +#endif +} + +gceSTATUS +gckMMU_FreePages( + IN gckMMU Mmu, + IN gctBOOL Secure, + IN gctUINT32 Address, + IN gctPOINTER PageTable, + IN gctSIZE_T PageCount + ) +{ + return _FreePages(Mmu, Secure, Address, PageTable, PageCount); +} + +gceSTATUS +gckMMU_SetPage( + IN gckMMU Mmu, + IN gctPHYS_ADDR_T PageAddress, + IN gctBOOL Writable, + IN gctUINT32 *PageEntry + ) +{ + gctUINT32 addressExt; + gctUINT32 address; + + gcmkHEADER_ARG("Mmu=0x%x", Mmu); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL); + gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF)); + + /* [31:0]. */ + address = (gctUINT32)(PageAddress & 0xFFFFFFFF); + /* [39:32]. */ + addressExt = (gctUINT32)((PageAddress >> 32) & 0xFF); + + if (Mmu->hardware->mmuVersion == 0) + { + _WritePageEntry(PageEntry, address); + } + else + { + _WritePageEntry(PageEntry, _SetPage(address, addressExt, gcvTRUE)); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +#if gcdPROCESS_ADDRESS_SPACE +gceSTATUS +gckMMU_GetPageEntry( + IN gckMMU Mmu, + IN gctUINT32 Address, + IN gctUINT32_PTR *PageTable + ) +{ + gceSTATUS status; + struct _gcsMMU_STLB_CHUNK *stlb; + struct _gcsMMU_STLB_CHUNK **stlbs = Mmu->stlbs; + gctUINT32 offset = _MtlbOffset(Address); + gctUINT32 mtlbEntry; + gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE); + + gcmkHEADER_ARG("Mmu=0x%x", Mmu); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + gcmkVERIFY_ARGUMENT((Address & 0xFFF) == 0); + + stlb = stlbs[offset]; + + if (stlb == gcvNULL) + { + gcmkONERROR(_AllocateStlb(Mmu->os, &stlb)); + + mtlbEntry = stlb->physBase + | gcdMMU_MTLB_4K_PAGE + | gcdMMU_MTLB_PRESENT + ; + + /* Insert Slave TLB address to Master TLB entry.*/ + _WritePageEntry(Mmu->mtlbLogical + offset, mtlbEntry); + + /* Record stlb. */ + stlbs[offset] = stlb; + } + + *PageTable = &stlb->logical[_StlbOffset(Address)]; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +_CheckMap( + IN gckMMU Mmu + ) +{ + gceSTATUS status; + gctUINT32_PTR map = area->mapLogical; + gctUINT32 index; + + for (index = area->heapList; index < area->pageTableEntries;) + { + /* Check the node type. */ + switch (gcmENTRY_TYPE(map[index])) + { + case gcvMMU_SINGLE: + /* Move to next node. */ + index = map[index] >> 8; + break; + + case gcvMMU_FREE: + /* Move to next node. */ + index = map[index + 1]; + break; + + default: + gcmkFATAL("MMU table correcupted at index [%u] = %x!", index, map[index]); + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + } + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckMMU_FlatMapping( + IN gckMMU Mmu, + IN gctUINT32 Physical, + IN gctUINT32 NumPages + ) +{ + gceSTATUS status; + gctUINT32 index = _AddressToIndex(Mmu, Physical); + gctUINT32 i; + gctUINT32_PTR pageTable; + + for (i = 0; i < NumPages; i++) + { + gckMMU_GetPageEntry(Mmu, Physical + i * 4096, &pageTable); + + _WritePageEntry(pageTable, _SetPage(Physical + i * 4096, 0)); + } + + gcmkONERROR(_FillFlatMapping(Mmu, PhysBase, Size, gcvNULL)); + + return gcvSTATUS_OK; + +OnError: + + /* Roll back. */ + return status; +} + +gceSTATUS +gckMMU_FreePagesEx( + IN gckMMU Mmu, + IN gctUINT32 Address, + IN gctSIZE_T PageCount + ) +{ + gctUINT32_PTR node; + gceSTATUS status; + +#if gcdUSE_MMU_EXCEPTION + gctUINT32 i; + struct _gcsMMU_STLB_CHUNK *stlb; + struct _gcsMMU_STLB_CHUNK **stlbs = Mmu->stlbs; +#endif + + gcmkHEADER_ARG("Mmu=0x%x Address=0x%x PageCount=%lu", + Mmu, Address, PageCount); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + gcmkVERIFY_ARGUMENT(PageCount > 0); + + /* Get the node by index. */ + node = area->mapLogical + _AddressToIndex(Mmu, Address); + + gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE)); + + if (PageCount == 1) + { + /* Single page node. */ + node[0] = (~((1U<<8)-1)) | gcvMMU_SINGLE; + } + else + { + /* Mark the node as free. */ + node[0] = (PageCount << 8) | gcvMMU_FREE; + node[1] = ~0U; + } + + /* We have free nodes. */ + area->freeNodes = gcvTRUE; + +#if gcdUSE_MMU_EXCEPTION + for (i = 0; i < PageCount; i++) + { + /* Get */ + stlb = stlbs[_MtlbOffset(Address)]; + + /* Enable exception */ + stlb->logical[_StlbOffset(Address)] = gcdMMU_STLB_EXCEPTION; + } +#endif + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} +#endif + +gceSTATUS +gckMMU_Flush( + IN gckMMU Mmu, + IN gceSURF_TYPE Type + ) +{ +#if !gcdPROCESS_ADDRESS_SPACE + gckHARDWARE hardware; +#endif + gctUINT32 mask; + gctINT i; + gctUINT j; + + if (Type == gcvSURF_VERTEX || Type == gcvSURF_INDEX) + { + mask = gcvPAGE_TABLE_DIRTY_BIT_FE; + } + else + { + mask = gcvPAGE_TABLE_DIRTY_BIT_OTHER; + } + + i = 0; + +#if gcdPROCESS_ADDRESS_SPACE + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + gcmkVERIFY_OK( + gckOS_AtomSetMask(Mmu->pageTableDirty[i], mask)); + } +#else +#if gcdSHARED_PAGETABLE + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + hardware = sharedPageTable->hardwares[i]; + if (hardware) + { + for (j = 0; j < gcvENGINE_GPU_ENGINE_COUNT; j++) + { + gcmkVERIFY_OK(gckOS_AtomSetMask(hardware->pageTableDirty[j], mask)); + } + } + } +#else + hardware = Mmu->hardware; + + for (j = 0 ; j < gcvENGINE_GPU_ENGINE_COUNT; j++) + { + gcmkVERIFY_OK( + gckOS_AtomSetMask(hardware->pageTableDirty[j], mask)); + } + + { + gcsLISTHEAD_PTR hardwareHead; + gcmkLIST_FOR_EACH(hardwareHead, &Mmu->hardwareList) + { + hardware = gcmCONTAINEROF(hardwareHead, _gckHARDWARE, mmuHead); + + if (hardware != Mmu->hardware) + { + for (j = 0 ; j < gcvENGINE_GPU_ENGINE_COUNT; j++) + { + gcmkVERIFY_OK( + gckOS_AtomSetMask(hardware->pageTableDirty[j], mask)); + } + } + } + } +#endif +#endif + + return gcvSTATUS_OK; +} + +gceSTATUS +gckMMU_DumpPageTableEntry( + IN gckMMU Mmu, + IN gctUINT32 Address + ) +{ +#if gcdPROCESS_ADDRESS_SPACE + gcsMMU_STLB_PTR *stlbs = Mmu->stlbs; + gcsMMU_STLB_PTR stlbDesc = stlbs[_MtlbOffset(Address)]; +#else + gctUINT32_PTR pageTable; + gctUINT32 index; + gctUINT32 mtlb, stlb; +#endif + gcsADDRESS_AREA_PTR area = &Mmu->area[0]; + + gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address); + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + + gcmkASSERT(Mmu->hardware->mmuVersion > 0); + +#if gcdPROCESS_ADDRESS_SPACE + if (stlbDesc) + { + gcmkPRINT(" STLB entry = 0x%08X", + _ReadPageEntry(&stlbDesc->logical[_StlbOffset(Address)])); + } + else + { + gcmkPRINT(" MTLB entry is empty."); + } +#else + mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT; + + if (mtlb >= area->dynamicMappingStart) + { + stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT; + + pageTable = area->pageTableLogical; + + index = (mtlb - area->dynamicMappingStart) + * gcdMMU_STLB_4K_ENTRY_NUM + + stlb; + + gcmkPRINT(" Page table entry = 0x%08X", _ReadPageEntry(pageTable + index)); + } + else + { + gcsMMU_STLB_CHUNK_PTR stlbChunkObj = Mmu->staticSTLB; + gctUINT32 entry = Mmu->mtlbLogical[mtlb]; + + stlb = (Address & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT; + + entry &= 0xFFFFFFF0; + + while (stlbChunkObj) + { + gctUINT i; + gctBOOL found = gcvFALSE; + for (i = 0; i < stlbChunkObj->mtlbEntryNum; ++i) + { + gctPHYS_ADDR_T stlbPhysBase = stlbChunkObj->physBase + (i * gcdMMU_STLB_64K_SIZE); + gctUINT32_PTR stlbLogical = + (gctUINT32_PTR)((gctUINT8_PTR)stlbChunkObj->logical + (i * gcdMMU_STLB_64K_SIZE)); + if (entry == stlbPhysBase) + { + gcmkPRINT(" Page table entry = 0x%08X", stlbLogical[stlb]); + found = gcvTRUE; + break; + } + } + if (found) + break; + stlbChunkObj = stlbChunkObj->next; + } + } +#endif + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +void +gckMMU_CheckSaftPage( + IN gckMMU Mmu + ) +{ + gctUINT8_PTR safeLogical = Mmu->safePageLogical; + gctUINT32 offsets[] = { + 0, + 64, + 128, + 256, + 2560, + 4000 + }; + + gctUINT32 i = 0; + + while (i < gcmCOUNTOF(offsets)) + { + if (safeLogical[offsets[i]] != 0) + { + gcmkPRINT("%s(%d) safe page is over written [%d] = %x", + __FUNCTION__, __LINE__, i, safeLogical[offsets[i]]); + } + } +} + +void +gckMMU_DumpAddressSpace( + IN gckMMU Mmu + ) +{ + gctUINT i; + gctUINT next; + gcsADDRESS_AREA_PTR area = &Mmu->area[0]; + gctUINT32_PTR map = area->mapLogical; + gctBOOL used = gcvFALSE; + gctUINT32 numPages; + + /* Grab the mutex. */ + gcmkVERIFY_OK(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE)); + + /* Find node which contains index. */ + for (i = 0; i < area->pageTableEntries; i = next) + { + switch (gcmENTRY_TYPE(map[i])) + { + case gcvMMU_SINGLE: + numPages = 1; + next = i + numPages; + used = gcvFALSE; + break; + + case gcvMMU_FREE: + numPages = map[i] >> 8; + next = i + numPages; + used = gcvFALSE; + break; + + case gcvMMU_USED: + numPages = 1; + next = i + numPages; + used = gcvTRUE; + break; + + default: + gcmkFATAL("MMU table correcupted at index %u!", i); + return; + } + + if (!used) + { + gcmkPRINT("Available Range [%d - %d)", i, i + numPages); + } + } + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex)); + +} + +void +gckMMU_DumpRecentFreedAddress( + IN gckMMU Mmu + ) +{ + gckQUEUE queue = &Mmu->recentFreedAddresses; + gctUINT32 i; + gcuQUEUEDATA *data; + + if (queue->count) + { + gcmkPRINT(" Recent %d freed GPU address ranges:", queue->count); + + for (i = 0; i < queue->count; i++) + { + gckQUEUE_GetData(queue, i, &data); + + gcmkPRINT(" [%08X - %08X]", data->addressData.start, data->addressData.end); + } + } +} + +gceSTATUS +gckMMU_FillFlatMapping( + IN gckMMU Mmu, + IN gctUINT32 PhysBase, + IN gctSIZE_T Size + ) +{ + gceSTATUS status; + gckHARDWARE hardware = Mmu->hardware; + + if (hardware->mmuVersion) + { + gcmkONERROR(_FillFlatMapping(Mmu, PhysBase, Size, gcvNULL)); + } + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckMMU_IsFlatMapped( + IN gckMMU Mmu, + OUT gctUINT32 Physical, + OUT gctBOOL *In + ) +{ + gceSTATUS status; + gctUINT32 i; + gctBOOL inFlatmapping = gcvFALSE; + gcmkHEADER(); + + gcmkVERIFY_ARGUMENT(In != gcvNULL); + + if (gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_MMU) == gcvFALSE) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + for (i = 0; i < Mmu->flatMappingRangeCount; i++) + { + if ((Physical >= Mmu->flatMappingRanges[i].start) && + (Physical < Mmu->flatMappingRanges[i].end)) + { + inFlatmapping = gcvTRUE; + break; + } + } + + *In = inFlatmapping; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckMMU_AttachHardware( + IN gckMMU Mmu, + IN gckHARDWARE Hardware + ) +{ + gcmkHEADER(); + + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, _GC_OBJ_ZONE, "Attach core %d", Hardware->core); + + gcsLIST_Add(&Hardware->mmuHead, &Mmu->hardwareList); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + + +#if !gcdPROCESS_ADDRESS_SPACE +gceSTATUS +gckMMU_GetPageEntry( + IN gckMMU Mmu, + IN gctUINT32 Address, + IN gctUINT32_PTR *PageTable + ) +{ + gctUINT32_PTR pageTable; + gctUINT32 index; + gctUINT32 mtlb, stlb; + gcsADDRESS_AREA_PTR area = &Mmu->area[0]; + + gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address); + gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); + + gcmkASSERT(Mmu->hardware->mmuVersion > 0); + + mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT; + + if (mtlb >= area->dynamicMappingStart) + { + stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT; + + pageTable = area->pageTableLogical; + + index = (mtlb - area->dynamicMappingStart) + * gcdMMU_STLB_4K_ENTRY_NUM + + stlb; + + *PageTable = pageTable + index; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} +#endif + +/****************************************************************************** +****************************** T E S T C O D E ****************************** +******************************************************************************/ + diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_power.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_power.c new file mode 100644 index 000000000000..805077d51aca --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_power.c @@ -0,0 +1,393 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" + +#ifdef CONFIG_L_PMC +#include +#endif /* CONFIG_L_PMC */ + +#define _GC_OBJ_ZONE gcvZONE_POWER + +/******************************************************************************\ +************************ Dynamic Voltage Frequency Setting ********************* +\******************************************************************************/ +#if gcdDVFS +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +static gctUINT32 +_GetLoadHistory( + IN gckDVFS Dvfs, + IN gctUINT32 Select, + IN gctUINT32 Index +) +{ + return Dvfs->loads[Index]; +} +#endif + +static void +_IncreaseScale( + IN gckDVFS Dvfs, + IN gctUINT32 Load, + OUT gctUINT8 *Scale + ) +{ + if (Dvfs->currentScale < 32) + { + *Scale = Dvfs->currentScale + 8; + } + else + { + *Scale = Dvfs->currentScale + 8; + *Scale = gcmMIN(64, *Scale); + } +} + +static void +_RecordFrequencyHistory( + gckDVFS Dvfs, + gctUINT32 Frequency + ) +{ + gctUINT32 i = 0; + + struct _FrequencyHistory *history = Dvfs->frequencyHistory; + + for (i = 0; i < 16; i++) + { + if (history->frequency == Frequency) + { + break; + } + + if (history->frequency == 0) + { + history->frequency = Frequency; + break; + } + + history++; + } + + if (i < 16) + { + history->count++; + } +} + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +static gctUINT32 +_GetFrequencyHistory( + gckDVFS Dvfs, + gctUINT32 Frequency + ) +{ + gctUINT32 i = 0; + + struct _FrequencyHistory * history = Dvfs->frequencyHistory; + + for (i = 0; i < 16; i++) + { + if (history->frequency == Frequency) + { + break; + } + + history++; + } + + if (i < 16) + { + return history->count; + } + + return 0; +} +#endif + +static void +_Policy( + IN gckDVFS Dvfs, + IN gctUINT32 Load, + OUT gctUINT8 *Scale + ) +{ + gctUINT8 load[4], nextLoad; + gctUINT8 scale; + + /* Last 4 history. */ + load[0] = (Load & 0xFF); + load[1] = (Load & 0xFF00) >> 8; + load[2] = (Load & 0xFF0000) >> 16; + load[3] = (Load & 0xFF000000) >> 24; + + /* Determine target scale. */ +#ifdef CONFIG_L_PMC + if (load[0] > load_threshold) +#else /* CONFIG_L_PMC */ + if (load[0] > 54) +#endif /* CONFIG_L_PMC */ + { + _IncreaseScale(Dvfs, Load, &scale); + } + else + { + nextLoad = (load[0] + load[1] + load[2] + load[3])/4; + + scale = Dvfs->currentScale * (nextLoad) / 54; + + scale = gcmMAX(1, scale); + scale = gcmMIN(64, scale); + } + + Dvfs->totalConfig++; + + Dvfs->loads[(load[0]-1)/8]++; + + *Scale = scale; + + + if (Dvfs->totalConfig % 100 == 0) + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, "======================================================="); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, "GPU Load: %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d", + 8, 16, 24, 32, 40, 48, 56, 64); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, " %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d", + _GetLoadHistory(Dvfs,2, 0), + _GetLoadHistory(Dvfs,2, 1), + _GetLoadHistory(Dvfs,2, 2), + _GetLoadHistory(Dvfs,2, 3), + _GetLoadHistory(Dvfs,2, 4), + _GetLoadHistory(Dvfs,2, 5), + _GetLoadHistory(Dvfs,2, 6), + _GetLoadHistory(Dvfs,2, 7) + ); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, "Frequency(MHz) %-8d %-8d %-8d %-8d %-8d", + 58, 120, 240, 360, 480); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, " %-8d %-8d %-8d %-8d %-8d", + _GetFrequencyHistory(Dvfs, 58), + _GetFrequencyHistory(Dvfs,120), + _GetFrequencyHistory(Dvfs,240), + _GetFrequencyHistory(Dvfs,360), + _GetFrequencyHistory(Dvfs,480) + ); + } +} + +static void +_TimerFunction( + gctPOINTER Data + ) +{ + gceSTATUS status; + gckDVFS dvfs = (gckDVFS) Data; + gckHARDWARE hardware = dvfs->hardware; + gctUINT32 value; + gctUINT32 frequency; + gctUINT8 scale; + gctUINT32 t1, t2, consumed; + + gckOS_GetTicks(&t1); + + gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value)); + + /* determine target sacle. */ + _Policy(dvfs, value, &scale); + + /* Set frequency and voltage. */ + gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale)); + + /* Query real frequency. */ + gcmkONERROR( + gckOS_QueryGPUFrequency(hardware->os, + hardware->core, + &frequency, + &dvfs->currentScale)); + + _RecordFrequencyHistory(dvfs, frequency); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, + "Current frequency = %d", + frequency); + + /* Set period. */ + gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency)); + +OnError: + /* Determine next querying time. */ + gckOS_GetTicks(&t2); + + consumed = gcmMIN(((long)t2 - (long)t1), 5); + + if (dvfs->stop == gcvFALSE) + { + gcmkVERIFY_OK(gckOS_StartTimer(hardware->os, + dvfs->timer, + dvfs->pollingTime - consumed)); + } + + return; +} + +gceSTATUS +gckDVFS_Construct( + IN gckHARDWARE Hardware, + OUT gckDVFS * Dvfs + ) +{ + gceSTATUS status; + gctPOINTER pointer; + gckDVFS dvfs = gcvNULL; + gckOS os = Hardware->os; + + gcmkHEADER_ARG("Hardware=0x%X", Hardware); + + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL); + + /* Allocate a gckDVFS manager. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckDVFS), &pointer)); + + gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckDVFS)); + + dvfs = pointer; + + /* Initialization. */ + dvfs->hardware = Hardware; + dvfs->pollingTime = gcdDVFS_POLLING_TIME; + dvfs->os = Hardware->os; + dvfs->currentScale = 64; + + /* Create a polling timer. */ + gcmkONERROR(gckOS_CreateTimer(os, _TimerFunction, pointer, &dvfs->timer)); + + /* Initialize frequency and voltage adjustment helper. */ + gcmkONERROR(gckOS_PrepareGPUFrequency(os, Hardware->core)); + + /* Return result. */ + *Dvfs = dvfs; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (dvfs) + { + if (dvfs->timer) + { + gcmkVERIFY_OK(gckOS_DestroyTimer(os, dvfs->timer)); + } + + gcmkOS_SAFE_FREE(os, dvfs); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckDVFS_Destroy( + IN gckDVFS Dvfs + ) +{ + gcmkHEADER_ARG("Dvfs=0x%X", Dvfs); + gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL); + + /* Deinitialize helper fuunction. */ + gcmkVERIFY_OK(gckOS_FinishGPUFrequency(Dvfs->os, Dvfs->hardware->core)); + + /* DestroyTimer. */ + gcmkVERIFY_OK(gckOS_DestroyTimer(Dvfs->os, Dvfs->timer)); + + gcmkOS_SAFE_FREE(Dvfs->os, Dvfs); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckDVFS_Start( + IN gckDVFS Dvfs + ) +{ + gcmkHEADER_ARG("Dvfs=0x%X", Dvfs); + gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL); + + gckHARDWARE_InitDVFS(Dvfs->hardware); + + Dvfs->stop = gcvFALSE; + + gckOS_StartTimer(Dvfs->os, Dvfs->timer, Dvfs->pollingTime); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckDVFS_Stop( + IN gckDVFS Dvfs + ) +{ + gcmkHEADER_ARG("Dvfs=0x%X", Dvfs); + gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL); + + Dvfs->stop = gcvTRUE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} +#endif diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h new file mode 100644 index 000000000000..ee2b12245e8c --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h @@ -0,0 +1,63 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_precomp_h_ +#define __gc_hal_kernel_precomp_h_ + +#include "gc_hal.h" +#include "gc_hal_driver.h" +#include "gc_hal_kernel.h" + +#endif /* __gc_hal_kernel_precomp_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security.c new file mode 100644 index 000000000000..abf5485052ae --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security.c @@ -0,0 +1,286 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" + + + + +#define _GC_OBJ_ZONE gcvZONE_KERNEL + +#if gcdSECURITY + +/* +** Open a security service channel. +*/ +gceSTATUS +gckKERNEL_SecurityOpen( + IN gckKERNEL Kernel, + IN gctUINT32 GPU, + OUT gctUINT32 *Channel + ) +{ + gceSTATUS status; + + gcmkONERROR(gckOS_OpenSecurityChannel(Kernel->os, Kernel->core, Channel)); + gcmkONERROR(gckOS_InitSecurityChannel(*Channel)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +/* +** Close a security service channel +*/ +gceSTATUS +gckKERNEL_SecurityClose( + IN gctUINT32 Channel + ) +{ + return gcvSTATUS_OK; +} + +/* +** Security service interface. +*/ +gceSTATUS +gckKERNEL_SecurityCallService( + IN gctUINT32 Channel, + IN OUT gcsTA_INTERFACE * Interface +) +{ + gceSTATUS status; + gcmkHEADER(); + + gcmkVERIFY_ARGUMENT(Interface != gcvNULL); + + gckOS_CallSecurityService(Channel, Interface); + + status = Interface->result; + + gcmkONERROR(status); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityStartCommand( + IN gckKERNEL Kernel + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_START_COMMAND; + iface.u.StartCommand.gpu = Kernel->core; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityAllocateSecurityMemory( + IN gckKERNEL Kernel, + IN gctUINT32 Bytes, + OUT gctUINT32 * Handle + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_ALLOCATE_SECRUE_MEMORY; + iface.u.AllocateSecurityMemory.bytes = Bytes; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + *Handle = iface.u.AllocateSecurityMemory.memory_handle; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityExecute( + IN gckKERNEL Kernel, + IN gctPOINTER Buffer, + IN gctUINT32 Bytes + ) +{ + gceSTATUS status; +#if defined(LINUX) + gctPHYS_ADDR_T physical; + gctUINT32 address; +#endif + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_EXECUTE; + iface.u.Execute.command_buffer = (gctUINT32 *)Buffer; + iface.u.Execute.gpu = Kernel->core; + iface.u.Execute.command_buffer_length = Bytes; + +#if defined(LINUX) + gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, Buffer, &physical)); + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, physical, &physical)); + gcmkSAFECASTPHYSADDRT(address, physical); + + iface.u.Execute.command_buffer = (gctUINT32 *)address; +#endif + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + /* Update queue tail pointer. */ + gcmkONERROR(gckHARDWARE_UpdateQueueTail( + Kernel->hardware, 0, 0 + )); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityMapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 *PhysicalArray, + IN gctUINT32 PageCount, + OUT gctUINT32 * GPUAddress + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; +#if defined(LINUX) + gctPHYS_ADDR_T physical; + gctUINT32 address; +#endif + + gcmkHEADER(); + + iface.command = KERNEL_MAP_MEMORY; + +#if defined(LINUX) + gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, PhysicalArray, &physical)); + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Kernel->os, physical, &physical)); + gcmkSAFECASTPHYSADDRT(address, physical); + iface.u.MapMemory.physicals = (gctUINT32 *)address; +#endif + + iface.u.MapMemory.pageCount = PageCount; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + *GPUAddress = iface.u.MapMemory.gpuAddress; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityUnmapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 GPUAddress, + IN gctUINT32 PageCount + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_UNMAP_MEMORY; + + iface.u.UnmapMemory.gpuAddress = GPUAddress; + iface.u.UnmapMemory.pageCount = PageCount; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +#endif diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security_v1.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security_v1.c new file mode 100644 index 000000000000..89a4333f6f2f --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_security_v1.c @@ -0,0 +1,320 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" + + + + +#define _GC_OBJ_ZONE gcvZONE_KERNEL + +#if gcdENABLE_TRUST_APPLICATION + +/* +** Open a security service channel. +*/ +gceSTATUS +gckKERNEL_SecurityOpen( + IN gckKERNEL Kernel, + IN gctUINT32 GPU, + OUT gctUINT32 *Channel + ) +{ + gceSTATUS status; + + gcmkONERROR(gckOS_OpenSecurityChannel(Kernel->os, Kernel->core, Channel)); + gcmkONERROR(gckOS_InitSecurityChannel(*Channel)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +/* +** Close a security service channel +*/ +gceSTATUS +gckKERNEL_SecurityClose( + IN gctUINT32 Channel + ) +{ + return gcvSTATUS_OK; +} + +/* +** Security service interface. +*/ +gceSTATUS +gckKERNEL_SecurityCallService( + IN gctUINT32 Channel, + IN OUT gcsTA_INTERFACE * Interface +) +{ + gceSTATUS status; + gcmkHEADER(); + + gcmkVERIFY_ARGUMENT(Interface != gcvNULL); + + gckOS_CallSecurityService(Channel, Interface); + + status = Interface->result; + + gcmkONERROR(status); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityStartCommand( + IN gckKERNEL Kernel, + IN gctUINT32 Address, + IN gctUINT32 Bytes + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_START_COMMAND; + iface.u.StartCommand.gpu = Kernel->core; + iface.u.StartCommand.address = Address; + iface.u.StartCommand.bytes = Bytes; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityAllocateSecurityMemory( + IN gckKERNEL Kernel, + IN gctUINT32 Bytes, + OUT gctUINT32 * Handle + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_ALLOCATE_SECRUE_MEMORY; + iface.u.AllocateSecurityMemory.bytes = Bytes; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + *Handle = iface.u.AllocateSecurityMemory.memory_handle; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityMapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 *PhysicalArray, + IN gctPHYS_ADDR_T Physical, + IN gctUINT32 PageCount, + OUT gctUINT32 * GPUAddress + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_MAP_MEMORY; + + iface.u.MapMemory.physicals = PhysicalArray; + iface.u.MapMemory.physical = Physical; + iface.u.MapMemory.pageCount = PageCount; + iface.u.MapMemory.gpuAddress = *GPUAddress; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_SecurityDumpMMUException( + IN gckKERNEL Kernel + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_DUMP_MMU_EXCEPTION; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + + + +gceSTATUS +gckKERNEL_SecurityUnmapMemory( + IN gckKERNEL Kernel, + IN gctUINT32 GPUAddress, + IN gctUINT32 PageCount + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_UNMAP_MEMORY; + + iface.u.UnmapMemory.gpuAddress = GPUAddress; + iface.u.UnmapMemory.pageCount = PageCount; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_ReadMMUException( + IN gckKERNEL Kernel, + IN gctUINT32_PTR MMUStatus, + IN gctUINT32_PTR MMUException + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_READ_MMU_EXCEPTION; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + *MMUStatus = iface.u.ReadMMUException.mmuStatus; + *MMUException = iface.u.ReadMMUException.mmuException; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckKERNEL_HandleMMUException( + IN gckKERNEL Kernel, + IN gctUINT32 MMUStatus, + IN gctPHYS_ADDR_T Physical, + IN gctUINT32 GPUAddress + ) +{ + gceSTATUS status; + gcsTA_INTERFACE iface; + + gcmkHEADER(); + + iface.command = KERNEL_HANDLE_MMU_EXCEPTION; + + iface.u.HandleMMUException.mmuStatus = MMUStatus; + iface.u.HandleMMUException.physical = Physical; + iface.u.HandleMMUException.gpuAddress = GPUAddress; + + gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + + + + +#endif diff --git a/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c new file mode 100644 index 000000000000..17c6b13af164 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c @@ -0,0 +1,3331 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_precomp.h" + +#define _GC_OBJ_ZONE gcvZONE_VIDMEM + +/******************************************************************************\ +******************************* Private Functions ****************************** +\******************************************************************************/ + +/******************************************************************************* +** +** _Split +** +** Split a node on the required byte boundary. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to the node to split. +** +** gctSIZE_T Bytes +** Number of bytes to keep in the node. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** gctBOOL +** gcvTRUE if the node was split successfully, or gcvFALSE if there is an +** error. +** +*/ +static gctBOOL +_Split( + IN gckOS Os, + IN gcuVIDMEM_NODE_PTR Node, + IN gctSIZE_T Bytes + ) +{ + gcuVIDMEM_NODE_PTR node; + gctPOINTER pointer = gcvNULL; + + /* Make sure the byte boundary makes sense. */ + if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes)) + { + return gcvFALSE; + } + + /* Allocate a new gcuVIDMEM_NODE object. */ + if (gcmIS_ERROR(gckOS_Allocate(Os, + gcmSIZEOF(gcuVIDMEM_NODE), + &pointer))) + { + /* Error. */ + return gcvFALSE; + } + + node = pointer; + + /* Initialize gcuVIDMEM_NODE structure. */ + node->VidMem.offset = Node->VidMem.offset + Bytes; + node->VidMem.bytes = Node->VidMem.bytes - Bytes; + node->VidMem.alignment = 0; + node->VidMem.locked = 0; + node->VidMem.memory = Node->VidMem.memory; + node->VidMem.pool = Node->VidMem.pool; + node->VidMem.physical = Node->VidMem.physical; +#ifdef __QNXNTO__ + node->VidMem.processID = 0; + node->VidMem.logical = gcvNULL; +#endif + + /* Insert node behind specified node. */ + node->VidMem.next = Node->VidMem.next; + node->VidMem.prev = Node; + Node->VidMem.next = node->VidMem.next->VidMem.prev = node; + + /* Insert free node behind specified node. */ + node->VidMem.nextFree = Node->VidMem.nextFree; + node->VidMem.prevFree = Node; + Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node; + + /* Adjust size of specified node. */ + Node->VidMem.bytes = Bytes; + + /* Success. */ + return gcvTRUE; +} + +/******************************************************************************* +** +** _Merge +** +** Merge two adjacent nodes together. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to the first of the two nodes to merge. +** +** OUTPUT: +** +** Nothing. +** +*/ +static gceSTATUS +_Merge( + IN gckOS Os, + IN gcuVIDMEM_NODE_PTR Node + ) +{ + gcuVIDMEM_NODE_PTR node; + gceSTATUS status; + + /* Save pointer to next node. */ + node = Node->VidMem.next; + + /* This is a good time to make sure the heap is not corrupted. */ + if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset) + { + /* Corrupted heap. */ + gcmkASSERT( + Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset); + return gcvSTATUS_HEAP_CORRUPTED; + } + + /* Adjust byte count. */ + Node->VidMem.bytes += node->VidMem.bytes; + + /* Unlink next node from linked list. */ + Node->VidMem.next = node->VidMem.next; + Node->VidMem.nextFree = node->VidMem.nextFree; + + Node->VidMem.next->VidMem.prev = + Node->VidMem.nextFree->VidMem.prevFree = Node; + + /* Free next node. */ + status = gcmkOS_SAFE_FREE(Os, node); + return status; +} + +/******************************************************************************\ +******************************* gckVIDMEM API Code ****************************** +\******************************************************************************/ + +/******************************************************************************* +** +** gckVIDMEM_ConstructVirtual +** +** Construct a new gcuVIDMEM_NODE union for virtual memory. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctSIZE_T Bytes +** Number of byte to allocate. +** +** OUTPUT: +** +** gcuVIDMEM_NODE_PTR * Node +** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer. +*/ +gceSTATUS +gckVIDMEM_ConstructVirtual( + IN gckKERNEL Kernel, + IN gctUINT32 Flag, + IN gctSIZE_T Bytes, + OUT gcuVIDMEM_NODE_PTR * Node + ) +{ + gckOS os; + gceSTATUS status; + gcuVIDMEM_NODE_PTR node = gcvNULL; + gctPOINTER pointer = gcvNULL; + gctINT i; + + gcmkHEADER_ARG("Kernel=0x%x Flag=%x Bytes=%lu", Kernel, Flag, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Node != gcvNULL); + + /* Extract the gckOS object pointer. */ + os = Kernel->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + /* Allocate an gcuVIDMEM_NODE union. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer)); + + node = pointer; + + /* Initialize gcuVIDMEM_NODE union for virtual memory. */ + node->Virtual.kernel = Kernel; + node->Virtual.contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS; + node->Virtual.logical = gcvNULL; + node->Virtual.secure = (Flag & gcvALLOC_FLAG_SECURITY) != 0; + node->Virtual.onFault = (Flag & gcvALLOC_FLAG_ALLOC_ON_FAULT) != 0; + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + node->Virtual.lockeds[i] = 0; + node->Virtual.pageTables[i] = gcvNULL; + } + + /* Allocate the virtual memory. */ + gcmkONERROR( + gckOS_AllocatePagedMemoryEx(os, + Flag, + node->Virtual.bytes = Bytes, + &node->Virtual.gid, + &node->Virtual.physical)); + + if (node->Virtual.onFault == gcvTRUE) + { + gcsLIST_Add(&node->Virtual.head, &Kernel->db->onFaultVidmemList); + } + + /* Return pointer to the gcuVIDMEM_NODE union. */ + *Node = node; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Created virtual node 0x%x for %u bytes @ 0x%x", + node, Bytes, node->Virtual.physical); + + /* Success. */ + gcmkFOOTER_ARG("*Node=0x%x", *Node); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (node != gcvNULL) + { + /* Free the structure. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckVIDMEM_DestroyVirtual +** +** Destroy an gcuVIDMEM_NODE union for virtual memory. +** +** INPUT: +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to a gcuVIDMEM_NODE union. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckVIDMEM_DestroyVirtual( + IN gcuVIDMEM_NODE_PTR Node + ) +{ + gckOS os; + + gcmkHEADER_ARG("Node=0x%x", Node); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL); + + /* Extact the gckOS object pointer. */ + os = Node->Virtual.kernel->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + if (Node->Virtual.onFault == gcvTRUE) + { + gcsLIST_Del(&Node->Virtual.head); + } + + /* Delete the gcuVIDMEM_NODE union. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckVIDMEM_Construct +** +** Construct a new gckVIDMEM object. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 BaseAddress +** Base address for the video memory heap. +** +** gctSIZE_T Bytes +** Number of bytes in the video memory heap. +** +** gctSIZE_T Threshold +** Minimum number of bytes beyond am allocation before the node is +** split. Can be used as a minimum alignment requirement. +** +** gctSIZE_T BankSize +** Number of bytes per physical memory bank. Used by bank +** optimization. +** +** OUTPUT: +** +** gckVIDMEM * Memory +** Pointer to a variable that will hold the pointer to the gckVIDMEM +** object. +*/ +gceSTATUS +gckVIDMEM_Construct( + IN gckOS Os, + IN gctUINT32 BaseAddress, + IN gctSIZE_T Bytes, + IN gctSIZE_T Threshold, + IN gctSIZE_T BankSize, + OUT gckVIDMEM * Memory + ) +{ + gckVIDMEM memory = gcvNULL; + gceSTATUS status; + gcuVIDMEM_NODE_PTR node; + gctINT i, banks = 0; + gctPOINTER pointer = gcvNULL; + gctUINT32 heapBytes; + gctUINT32 bankSize; + + gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu " + "BankSize=%lu", + Os, BaseAddress, Bytes, Threshold, BankSize); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + + gcmkSAFECASTSIZET(heapBytes, Bytes); + gcmkSAFECASTSIZET(bankSize, BankSize); + + /* Allocate the gckVIDMEM object. */ + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer)); + gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckVIDMEM)); + + memory = pointer; + + /* Initialize the gckVIDMEM object. */ + memory->object.type = gcvOBJ_VIDMEM; + memory->os = Os; + + /* Set video memory heap information. */ + memory->baseAddress = BaseAddress; + memory->bytes = heapBytes; + memory->freeBytes = heapBytes; + memory->minFreeBytes = heapBytes; + memory->capability = ~0u; + memory->threshold = Threshold; + memory->mutex = gcvNULL; + + BaseAddress = 0; + + /* Walk all possible banks. */ + for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i) + { + gctUINT32 bytes; + + if (BankSize == 0) + { + /* Use all bytes for the first bank. */ + bytes = heapBytes; + } + else + { + /* Compute number of bytes for this bank. */ + bytes = gcmALIGN(BaseAddress + 1, bankSize) - BaseAddress; + + if (bytes > heapBytes) + { + /* Make sure we don't exceed the total number of bytes. */ + bytes = heapBytes; + } + } + + if (bytes == 0) + { + /* Mark heap is not used. */ + memory->sentinel[i].VidMem.next = + memory->sentinel[i].VidMem.prev = + memory->sentinel[i].VidMem.nextFree = + memory->sentinel[i].VidMem.prevFree = gcvNULL; + continue; + } + + /* Allocate one gcuVIDMEM_NODE union. */ + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer)); + + node = pointer; + + /* Initialize gcuVIDMEM_NODE union. */ + node->VidMem.memory = memory; + + node->VidMem.next = + node->VidMem.prev = + node->VidMem.nextFree = + node->VidMem.prevFree = &memory->sentinel[i]; + + node->VidMem.offset = BaseAddress; + node->VidMem.bytes = bytes; + node->VidMem.alignment = 0; + node->VidMem.physical = 0; + node->VidMem.pool = gcvPOOL_UNKNOWN; + + node->VidMem.locked = 0; + +#ifdef __QNXNTO__ + node->VidMem.processID = 0; + node->VidMem.logical = gcvNULL; +#endif + + + /* Initialize the linked list of nodes. */ + memory->sentinel[i].VidMem.next = + memory->sentinel[i].VidMem.prev = + memory->sentinel[i].VidMem.nextFree = + memory->sentinel[i].VidMem.prevFree = node; + + /* Mark sentinel. */ + memory->sentinel[i].VidMem.bytes = 0; + + /* Adjust address for next bank. */ + BaseAddress += bytes; + heapBytes -= bytes; + banks ++; + } + + /* Assign all the bank mappings. */ + memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1; + memory->mapping[gcvSURF_BITMAP] = banks - 1; + if (banks > 1) --banks; + memory->mapping[gcvSURF_DEPTH] = banks - 1; + memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1; + if (banks > 1) --banks; + memory->mapping[gcvSURF_TEXTURE] = banks - 1; + if (banks > 1) --banks; + memory->mapping[gcvSURF_VERTEX] = banks - 1; + if (banks > 1) --banks; + memory->mapping[gcvSURF_INDEX] = banks - 1; + if (banks > 1) --banks; + memory->mapping[gcvSURF_TILE_STATUS] = banks - 1; + if (banks > 1) --banks; + memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0; + + memory->mapping[gcvSURF_ICACHE] = 0; + memory->mapping[gcvSURF_TXDESC] = 0; + memory->mapping[gcvSURF_FENCE] = 0; + memory->mapping[gcvSURF_TFBHEADER] = 0; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "[GALCORE] INDEX: bank %d", + memory->mapping[gcvSURF_INDEX]); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "[GALCORE] VERTEX: bank %d", + memory->mapping[gcvSURF_VERTEX]); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "[GALCORE] TEXTURE: bank %d", + memory->mapping[gcvSURF_TEXTURE]); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "[GALCORE] RENDER_TARGET: bank %d", + memory->mapping[gcvSURF_RENDER_TARGET]); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "[GALCORE] DEPTH: bank %d", + memory->mapping[gcvSURF_DEPTH]); + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "[GALCORE] TILE_STATUS: bank %d", + memory->mapping[gcvSURF_TILE_STATUS]); + + /* Allocate the mutex. */ + gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex)); + + /* Return pointer to the gckVIDMEM object. */ + *Memory = memory; + + /* Success. */ + gcmkFOOTER_ARG("*Memory=0x%x", *Memory); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + if (memory != gcvNULL) + { + if (memory->mutex != gcvNULL) + { + /* Delete the mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex)); + } + + for (i = 0; i < banks; ++i) + { + /* Free the heap. */ + gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL); + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next)); + } + + /* Free the object. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckVIDMEM_Destroy +** +** Destroy an gckVIDMEM object. +** +** INPUT: +** +** gckVIDMEM Memory +** Pointer to an gckVIDMEM object to destroy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckVIDMEM_Destroy( + IN gckVIDMEM Memory + ) +{ + gcuVIDMEM_NODE_PTR node, next; + gctINT i; + + gcmkHEADER_ARG("Memory=0x%x", Memory); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM); + + /* Walk all sentinels. */ + for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i) + { + /* Bail out of the heap is not used. */ + if (Memory->sentinel[i].VidMem.next == gcvNULL) + { + break; + } + + /* Walk all the nodes until we reach the sentinel. */ + for (node = Memory->sentinel[i].VidMem.next; + node->VidMem.bytes != 0; + node = next) + { + /* Save pointer to the next node. */ + next = node->VidMem.next; + + /* Free the node. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node)); + } + } + + /* Free the mutex. */ + gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex)); + + /* Mark the object as unknown. */ + Memory->object.type = gcvOBJ_UNKNOWN; + + /* Free the gckVIDMEM object. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +#if gcdENABLE_BANK_ALIGNMENT + +#if !gcdBANK_BIT_START +#error gcdBANK_BIT_START not defined. +#endif + +#if !gcdBANK_BIT_END +#error gcdBANK_BIT_END not defined. +#endif +/******************************************************************************* +** _GetSurfaceBankAlignment +** +** Return the required offset alignment required to the make BaseAddress +** aligned properly. +** +** INPUT: +** +** gckOS Os +** Pointer to gcoOS object. +** +** gceSURF_TYPE Type +** Type of allocation. +** +** gctUINT32 BaseAddress +** Base address of current video memory node. +** +** OUTPUT: +** +** gctUINT32_PTR AlignmentOffset +** Pointer to a variable that will hold the number of bytes to skip in +** the current video memory node in order to make the alignment bank +** aligned. +*/ +static gceSTATUS +_GetSurfaceBankAlignment( + IN gckKERNEL Kernel, + IN gceSURF_TYPE Type, + IN gctUINT32 BaseAddress, + OUT gctUINT32_PTR AlignmentOffset + ) +{ + gctUINT32 bank; + /* To retrieve the bank. */ + static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START) + ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1)); + + /* To retrieve the bank and all the lower bytes. */ + static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1)); + + gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL); + + switch (Type) + { + case gcvSURF_RENDER_TARGET: + bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START); + + /* Align to the first bank. */ + *AlignmentOffset = (bank == 0) ? + 0 : + ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask); + break; + + case gcvSURF_DEPTH: + bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START); + + /* Align to the third bank. */ + *AlignmentOffset = (bank == 2) ? + 0 : + ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask); + + /* Minimum 256 byte alignment needed for fast_msaa. */ + if ((gcdBANK_CHANNEL_BIT > 7) || + ((gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_FAST_MSAA) != gcvSTATUS_TRUE) && + (gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_SMALL_MSAA) != gcvSTATUS_TRUE))) + { + /* Add a channel offset at the channel bit. */ + *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT); + } + break; + + default: + /* no alignment needed. */ + *AlignmentOffset = 0; + } + + /* Return the status. */ + gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset); + return gcvSTATUS_OK; +} +#endif + +static gcuVIDMEM_NODE_PTR +_FindNode( + IN gckKERNEL Kernel, + IN gckVIDMEM Memory, + IN gctINT Bank, + IN gctSIZE_T Bytes, + IN gceSURF_TYPE Type, + IN OUT gctUINT32_PTR Alignment + ) +{ + gcuVIDMEM_NODE_PTR node; + gctUINT32 alignment; + +#if gcdENABLE_BANK_ALIGNMENT + gctUINT32 bankAlignment; + gceSTATUS status; +#endif + + if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL) + { + /* No free nodes left. */ + return gcvNULL; + } + +#if gcdENABLE_BANK_ALIGNMENT + /* Walk all free nodes until we have one that is big enough or we have + ** reached the sentinel. */ + for (node = Memory->sentinel[Bank].VidMem.nextFree; + node->VidMem.bytes != 0; + node = node->VidMem.nextFree) + { + if (node->VidMem.bytes < Bytes) + { + continue; + } + + gcmkONERROR(_GetSurfaceBankAlignment( + Kernel, + Type, + node->VidMem.memory->baseAddress + node->VidMem.offset, + &bankAlignment)); + + bankAlignment = gcmALIGN(bankAlignment, *Alignment); + + /* Compute number of bytes to skip for alignment. */ + alignment = (*Alignment == 0) + ? 0 + : (*Alignment - (node->VidMem.offset % *Alignment)); + + if (alignment == *Alignment) + { + /* Node is already aligned. */ + alignment = 0; + } + + if (node->VidMem.bytes >= Bytes + alignment + bankAlignment) + { + /* This node is big enough. */ + *Alignment = alignment + bankAlignment; + return node; + } + } +#endif + + /* Walk all free nodes until we have one that is big enough or we have + reached the sentinel. */ + for (node = Memory->sentinel[Bank].VidMem.nextFree; + node->VidMem.bytes != 0; + node = node->VidMem.nextFree) + { + gctUINT offset; + + gctINT modulo; + + gcmkSAFECASTSIZET(offset, node->VidMem.offset); + + modulo = gckMATH_ModuloInt(offset, *Alignment); + + /* Compute number of bytes to skip for alignment. */ + alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo); + + if (alignment == *Alignment) + { + /* Node is already aligned. */ + alignment = 0; + } + + if (node->VidMem.bytes >= Bytes + alignment) + { + /* This node is big enough. */ + *Alignment = alignment; + return node; + } + } + +#if gcdENABLE_BANK_ALIGNMENT +OnError: +#endif + /* Not enough memory. */ + return gcvNULL; +} + +/******************************************************************************* +** +** gckVIDMEM_AllocateLinear +** +** Allocate linear memory from the gckVIDMEM object. +** +** INPUT: +** +** gckVIDMEM Memory +** Pointer to an gckVIDMEM object. +** +** gctSIZE_T Bytes +** Number of bytes to allocate. +** +** gctUINT32 Alignment +** Byte alignment for allocation. +** +** gceSURF_TYPE Type +** Type of surface to allocate (use by bank optimization). +** +** gctBOOL Specified +** If user must use this pool, it should set Specified to gcvTRUE, +** otherwise allocator may reserve some memory for other usage, such +** as small block size allocation request. +** +** OUTPUT: +** +** gcuVIDMEM_NODE_PTR * Node +** Pointer to a variable that will hold the allocated memory node. +*/ +gceSTATUS +gckVIDMEM_AllocateLinear( + IN gckKERNEL Kernel, + IN gckVIDMEM Memory, + IN gctSIZE_T Bytes, + IN gctUINT32 Alignment, + IN gceSURF_TYPE Type, + IN gctBOOL Specified, + OUT gcuVIDMEM_NODE_PTR * Node + ) +{ + gceSTATUS status; + gcuVIDMEM_NODE_PTR node; + gctUINT32 alignment; + gctINT bank, i; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d", + Memory, Bytes, Alignment, Type); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Node != gcvNULL); + gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES); + + /* Acquire the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE)); + + acquired = gcvTRUE; + + if (Bytes > Memory->freeBytes) + { + /* Not enough memory. */ + status = gcvSTATUS_OUT_OF_MEMORY; + goto OnError; + } + +#if gcdSMALL_BLOCK_SIZE + if ((Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY)) + && (Bytes >= gcdSMALL_BLOCK_SIZE) + && (Specified == gcvFALSE) + ) + { + /* The left memory is for small memory.*/ + status = gcvSTATUS_OUT_OF_MEMORY; + goto OnError; + } +#endif + + /* Find the default bank for this surface type. */ + gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping)); + bank = Memory->mapping[Type]; + alignment = Alignment; + + /* Find a free node in the default bank. */ + node = _FindNode(Kernel, Memory, bank, Bytes, Type, &alignment); + + /* Out of memory? */ + if (node == gcvNULL) + { + /* Walk all lower banks. */ + for (i = bank - 1; i >= 0; --i) + { + /* Find a free node inside the current bank. */ + node = _FindNode(Kernel, Memory, i, Bytes, Type, &alignment); + if (node != gcvNULL) + { + break; + } + } + } + + if (node == gcvNULL) + { + /* Walk all upper banks. */ + for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i) + { + if (Memory->sentinel[i].VidMem.nextFree == gcvNULL) + { + /* Abort when we reach unused banks. */ + break; + } + + /* Find a free node inside the current bank. */ + node = _FindNode(Kernel, Memory, i, Bytes, Type, &alignment); + if (node != gcvNULL) + { + break; + } + } + } + + if (node == gcvNULL) + { + /* Out of memory. */ + status = gcvSTATUS_OUT_OF_MEMORY; + goto OnError; + } + + /* Do we have an alignment? */ + if (alignment > 0) + { + /* Split the node so it is aligned. */ + if (_Split(Memory->os, node, alignment)) + { + /* Successful split, move to aligned node. */ + node = node->VidMem.next; + + /* Remove alignment. */ + alignment = 0; + } + } + + /* Do we have enough memory after the allocation to split it? */ + if (node->VidMem.bytes - Bytes > Memory->threshold) + { + /* Adjust the node size. */ + _Split(Memory->os, node, Bytes); + } + + /* Remove the node from the free list. */ + node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree; + node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree; + node->VidMem.nextFree = + node->VidMem.prevFree = gcvNULL; + + /* Fill in the information. */ + node->VidMem.alignment = alignment; + node->VidMem.memory = Memory; +#ifdef __QNXNTO__ + node->VidMem.logical = gcvNULL; + gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID)); +#endif + + /* Adjust the number of free bytes. */ + Memory->freeBytes -= node->VidMem.bytes; + + if (Memory->freeBytes < Memory->minFreeBytes) + { + Memory->minFreeBytes = Memory->freeBytes; + } + + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); + + /* Return the pointer to the node. */ + *Node = node; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Allocated %u bytes @ 0x%x [0x%08X]", + node->VidMem.bytes, node, node->VidMem.offset); + + /* Success. */ + gcmkFOOTER_ARG("*Node=0x%x", *Node); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckVIDMEM_Free +** +** Free an allocated video memory node. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to a gcuVIDMEM_NODE object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckVIDMEM_Free( + IN gckKERNEL Kernel, + IN gcuVIDMEM_NODE_PTR Node + ) +{ + gceSTATUS status; + gckKERNEL kernel = gcvNULL; + gckVIDMEM memory = gcvNULL; + gcuVIDMEM_NODE_PTR node; + gctBOOL mutexAcquired = gcvFALSE; + + gcmkHEADER_ARG("Node=0x%x", Node); + + /* Verify the arguments. */ + if ((Node == gcvNULL) + || (Node->VidMem.memory == gcvNULL) + ) + { + /* Invalid object. */ + gcmkONERROR(gcvSTATUS_INVALID_OBJECT); + } + + /**************************** Video Memory ********************************/ + + if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + /* Extract pointer to gckVIDMEM object owning the node. */ + memory = Node->VidMem.memory; + + /* Acquire the mutex. */ + gcmkONERROR( + gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE)); + + mutexAcquired = gcvTRUE; + +#ifdef __QNXNTO__ + /* Unmap the video memory. */ + if (Node->VidMem.logical != gcvNULL) + { + gckKERNEL_UnmapVideoMemory( + Kernel, + Node->VidMem.logical, + Node->VidMem.processID, + Node->VidMem.bytes); + Node->VidMem.logical = gcvNULL; + } + + /* Reset. */ + Node->VidMem.processID = 0; + + /* Don't try to re-free an already freed node. */ + if ((Node->VidMem.nextFree == gcvNULL) + && (Node->VidMem.prevFree == gcvNULL) + ) +#endif + { + + /* Check if Node is already freed. */ + if (Node->VidMem.nextFree) + { + /* Node is alread freed. */ + gcmkONERROR(gcvSTATUS_INVALID_DATA); + } + + /* Update the number of free bytes. */ + memory->freeBytes += Node->VidMem.bytes; + + /* Find the next free node. */ + for (node = Node->VidMem.next; + node != gcvNULL && node->VidMem.nextFree == gcvNULL; + node = node->VidMem.next) ; + + /* Insert this node in the free list. */ + Node->VidMem.nextFree = node; + Node->VidMem.prevFree = node->VidMem.prevFree; + + Node->VidMem.prevFree->VidMem.nextFree = + node->VidMem.prevFree = Node; + + /* Is the next node a free node and not the sentinel? */ + if ((Node->VidMem.next == Node->VidMem.nextFree) + && (Node->VidMem.next->VidMem.bytes != 0) + ) + { + /* Merge this node with the next node. */ + gcmkONERROR(_Merge(memory->os, node = Node)); + gcmkASSERT(node->VidMem.nextFree != node); + gcmkASSERT(node->VidMem.prevFree != node); + } + + /* Is the previous node a free node and not the sentinel? */ + if ((Node->VidMem.prev == Node->VidMem.prevFree) + && (Node->VidMem.prev->VidMem.bytes != 0) + ) + { + /* Merge this node with the previous node. */ + gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev)); + gcmkASSERT(node->VidMem.nextFree != node); + gcmkASSERT(node->VidMem.prevFree != node); + } + } + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex)); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Node 0x%x is freed.", + Node); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + /*************************** Virtual Memory *******************************/ + + /* Get gckKERNEL object. */ + kernel = Node->Virtual.kernel; + + /* Verify the gckKERNEL object pointer. */ + gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL); + + + /* Free the virtual memory. */ + gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os, + Node->Virtual.physical, + Node->Virtual.bytes)); + + /* Destroy the gcuVIDMEM_NODE union. */ + gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (mutexAcquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex( + memory->os, memory->mutex + )); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if !gcdPROCESS_ADDRESS_SPACE +/******************************************************************************* +** +** _NeedVirtualMapping +** +** Whether setup GPU page table for video node. +** +** INPUT: +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to a gcuVIDMEM_NODE union. +** +** gceCORE Core +** Id of current GPU. +** +** OUTPUT: +** gctBOOL * NeedMapping +** A pointer hold the result whether Node should be mapping. +*/ +static gceSTATUS +_NeedVirtualMapping( + IN gckKERNEL Kernel, + IN gceCORE Core, + IN gcuVIDMEM_NODE_PTR Node, + OUT gctBOOL * NeedMapping +) +{ + gceSTATUS status; + gctPHYS_ADDR_T phys; + gctUINT32 address; + gctUINT32 end; + gcePOOL pool; + gctUINT32 offset; + gctUINT32 bytes; + + gcmkHEADER_ARG("Node=0x%X", Node); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Kernel != gcvNULL); + gcmkVERIFY_ARGUMENT(Node != gcvNULL); + gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL); + gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT); + + if (Node->Virtual.contiguous) + { + if (Node->Virtual.secure) + { + *NeedMapping = gcvTRUE; + } + else + { + /* Convert logical address into a physical address. */ + gcmkONERROR(gckOS_UserLogicalToPhysical( + Kernel->os, Node->Virtual.logical, &phys + )); + + if (phys > gcvMAXUINT32) + { + *NeedMapping = gcvTRUE; + } + else + { + gcmkSAFECASTPHYSADDRT(address, phys); + + if (!gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_MMU)) + { + gcmkASSERT(address >= Kernel->hardware->baseAddress); + + /* Subtract baseAddress to get a GPU address used for programming. */ + address -= Kernel->hardware->baseAddress; + + /* If part of region is belong to gcvPOOL_VIRTUAL, + ** whole region has to be mapped. */ + gcmkSAFECASTSIZET(bytes, Node->Virtual.bytes); + end = address + bytes - 1; + + gcmkONERROR(gckHARDWARE_SplitMemory( + Kernel->hardware, end, &pool, &offset + )); + + *NeedMapping = (pool == gcvPOOL_VIRTUAL); + } + else + { + gctBOOL flatMapped; + + gcmkONERROR(gckMMU_IsFlatMapped(Kernel->mmu, address, &flatMapped)); + + *NeedMapping = !flatMapped; + } + } + } + } + else + { + *NeedMapping = gcvTRUE; + } + + gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} +#endif + +#if gcdPROCESS_ADDRESS_SPACE +gcsGPU_MAP_PTR +_FindGPUMap( + IN gcsGPU_MAP_PTR Head, + IN gctINT ProcessID + ) +{ + gcsGPU_MAP_PTR map = Head; + + while (map) + { + if (map->pid == ProcessID) + { + return map; + } + + map = map->next; + } + + return gcvNULL; +} + +gcsGPU_MAP_PTR +_CreateGPUMap( + IN gckOS Os, + IN gcsGPU_MAP_PTR *Head, + IN gcsGPU_MAP_PTR *Tail, + IN gctINT ProcessID + ) +{ + gcsGPU_MAP_PTR gpuMap; + gctPOINTER pointer = gcvNULL; + + gckOS_Allocate(Os, sizeof(gcsGPU_MAP), &pointer); + + if (pointer == gcvNULL) + { + return gcvNULL; + } + + gpuMap = pointer; + + gckOS_ZeroMemory(pointer, sizeof(gcsGPU_MAP)); + + gpuMap->pid = ProcessID; + + if (!*Head) + { + *Head = *Tail = gpuMap; + } + else + { + gpuMap->prev = *Tail; + (*Tail)->next = gpuMap; + *Tail = gpuMap; + } + + return gpuMap; +} + +void +_DestroyGPUMap( + IN gckOS Os, + IN gcsGPU_MAP_PTR *Head, + IN gcsGPU_MAP_PTR *Tail, + IN gcsGPU_MAP_PTR gpuMap + ) +{ + + if (gpuMap == *Head) + { + if ((*Head = gpuMap->next) == gcvNULL) + { + *Tail = gcvNULL; + } + } + else + { + gpuMap->prev->next = gpuMap->next; + if (gpuMap == *Tail) + { + *Tail = gpuMap->prev; + } + else + { + gpuMap->next->prev = gpuMap->prev; + } + } + + gcmkOS_SAFE_FREE(Os, gpuMap); +} +#endif + +/******************************************************************************* +** +** gckVIDMEM_Lock +** +** Lock a video memory node and return its hardware specific address. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to a gcuVIDMEM_NODE union. +** +** OUTPUT: +** +** gctUINT32 * Address +** Pointer to a variable that will hold the hardware specific address. +** +** gctUINT32 * PhysicalAddress +** Pointer to a variable that will hold the bus address of a contiguous +** video node. +*/ +gceSTATUS +gckVIDMEM_Lock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + IN gctBOOL Cacheable, + OUT gctUINT32 * Address, + OUT gctUINT32 * Gid, + OUT gctUINT64 * PhysicalAddress + ) +{ + gceSTATUS status; + gctBOOL acquired = gcvFALSE; + gctBOOL locked = gcvFALSE; + gckOS os = gcvNULL; +#if !gcdPROCESS_ADDRESS_SPACE + gctBOOL needMapping = gcvFALSE; +#endif + gctUINT64 physicalAddress = ~0ULL; + gcuVIDMEM_NODE_PTR node = Node->node; + gctSIZE_T pageSize; + gctUINT32 pageMask; + + gcmkHEADER_ARG("Node=0x%x", Node); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Extract the gckOS object pointer. */ + os = Kernel->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + if ((node == gcvNULL) + || (node->VidMem.memory == gcvNULL) + ) + { + /* Invalid object. */ + gcmkONERROR(gcvSTATUS_INVALID_OBJECT); + } + + /* Grab the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(os, Node->mutex, gcvINFINITE)); + acquired = gcvTRUE; + + /**************************** Video Memory ********************************/ + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + gctUINT32 offset; + + if (Cacheable == gcvTRUE) + { + gcmkONERROR(gcvSTATUS_INVALID_REQUEST); + } + + /* Increment the lock count. */ + node->VidMem.locked ++; + + gcmkSAFECASTSIZET(offset, node->VidMem.offset); + physicalAddress = node->VidMem.memory->baseAddress + + offset + + node->VidMem.alignment; + + if (node->VidMem.pool == gcvPOOL_LOCAL_EXTERNAL) + { + *Address = Kernel->externalBaseAddress + offset; + } + else + { + gcmkASSERT(node->VidMem.pool == gcvPOOL_SYSTEM); + *Address = Kernel->contiguousBaseAddress + offset; + } + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Locked node 0x%x (%d) @ 0x%08X", + node, + node->VidMem.locked, + *Address); + } + + /*************************** Virtual Memory *******************************/ + + else + { + + *Gid = node->Virtual.gid; + +#if gcdPAGED_MEMORY_CACHEABLE + /* Force video memory cacheable. */ + Cacheable = gcvTRUE; +#endif + + gcmkONERROR( + gckOS_LockPages(os, + node->Virtual.physical, + node->Virtual.bytes, + Cacheable, + &node->Virtual.logical, + &node->Virtual.pageCount)); + + gcmkONERROR(gckOS_UserLogicalToPhysical( + os, + node->Virtual.logical, + &physicalAddress + )); + + +#if !gcdPROCESS_ADDRESS_SPACE + /* Increment the lock count. */ + if (node->Virtual.lockeds[Kernel->core] ++ == 0) + { + locked = gcvTRUE; + + gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, node, &needMapping)); + + if (needMapping == gcvFALSE) + { + /* Get hardware specific address. */ + { + gcmkONERROR(gckHARDWARE_ConvertLogical( + Kernel->hardware, + node->Virtual.logical, + gcvTRUE, + &node->Virtual.addresses[Kernel->core])); + } + } + else + { +#if gcdSECURITY + gctPHYS_ADDR physicalArrayPhysical; + gctPOINTER physicalArrayLogical; + + gcmkONERROR(gckOS_AllocatePageArray( + os, + node->Virtual.physical, + node->Virtual.pageCount, + &physicalArrayLogical, + &physicalArrayPhysical + )); + + gcmkONERROR(gckKERNEL_SecurityMapMemory( + Kernel, + physicalArrayLogical, + node->Virtual.pageCount, + &node->Virtual.addresses[Kernel->core] + )); + + gcmkONERROR(gckOS_FreeNonPagedMemory( + os, + 1, + physicalArrayPhysical, + physicalArrayLogical + )); +#else + { + /* Allocate pages inside the MMU. */ + gcmkONERROR( + gckMMU_AllocatePagesEx(Kernel->mmu, + node->Virtual.pageCount, + node->Virtual.type, + node->Virtual.secure, + &node->Virtual.pageTables[Kernel->core], + &node->Virtual.addresses[Kernel->core])); + } + + if (node->Virtual.onFault != gcvTRUE) + { +#if gcdENABLE_TRUST_APPLICATION + if (Kernel->hardware->options.secureMode == gcvSECURE_IN_TA) + { + gcmkONERROR(gckKERNEL_MapInTrustApplicaiton( + Kernel, + node->Virtual.logical, + node->Virtual.physical, + node->Virtual.addresses[Kernel->core], + node->Virtual.pageCount + )); + } + else +#endif + { + /* Map the pages. */ + gcmkONERROR(gckOS_MapPagesEx(os, + Kernel->core, + node->Virtual.physical, + node->Virtual.pageCount, + node->Virtual.addresses[Kernel->core], + node->Virtual.pageTables[Kernel->core], + gcvTRUE, + node->Virtual.type)); + } + } + + { + gcmkONERROR(gckMMU_Flush(Kernel->mmu, node->Virtual.type)); + } +#endif + } + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Mapped virtual node 0x%x to 0x%08X", + node, + node->Virtual.addresses[Kernel->core]); + } + + /* Return hardware address. */ + *Address = node->Virtual.addresses[Kernel->core]; + + if (needMapping == gcvTRUE) + { + + { + pageSize = Kernel->command->pageSize; + } + + pageMask = (gctUINT32)pageSize - 1; + + *Address += (gctUINT32)physicalAddress & pageMask; + } +#endif + } + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex)); + + *PhysicalAddress = (gctUINT64)physicalAddress; + + /* Success. */ + gcmkFOOTER_ARG("*Address=%08x", *Address); + return gcvSTATUS_OK; + +OnError: + if (locked) + { + if (node->Virtual.pageTables[Kernel->core] != gcvNULL) + { + { + /* Free the pages from the MMU. */ + gcmkVERIFY_OK( + gckMMU_FreePages(Kernel->mmu, + node->Virtual.secure, + node->Virtual.addresses[Kernel->core], + node->Virtual.pageTables[Kernel->core], + node->Virtual.pageCount)); + } + node->Virtual.pageTables[Kernel->core] = gcvNULL; + } + + /* Unlock the pages. */ + gcmkVERIFY_OK( + gckOS_UnlockPages(os, + node->Virtual.physical, + node->Virtual.bytes, + node->Virtual.logical + )); + + node->Virtual.lockeds[Kernel->core]--; + } + + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckVIDMEM_Unlock +** +** Unlock a video memory node. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to a locked gcuVIDMEM_NODE union. +** +** gceSURF_TYPE Type +** Type of surface to unlock. +** +** gctBOOL * Asynchroneous +** Pointer to a variable specifying whether the surface should be +** unlocked asynchroneously or not. +** +** OUTPUT: +** +** gctBOOL * Asynchroneous +** Pointer to a variable receiving the number of bytes used in the +** command buffer specified by 'Commands'. If gcvNULL, there is no +** command buffer. +*/ +gceSTATUS +gckVIDMEM_Unlock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + IN gceSURF_TYPE Type, + IN OUT gctBOOL * Asynchroneous + ) +{ + gceSTATUS status; + gckOS os = gcvNULL; + gctBOOL acquired = gcvFALSE; + gcuVIDMEM_NODE_PTR node = Node->node; + + gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d", + Node, Type, gcmOPT_VALUE(Asynchroneous)); + + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + + /* Get the gckOS object pointer. */ + os = Kernel->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + /* Verify the arguments. */ + if ((node == gcvNULL) + || (node->VidMem.memory == gcvNULL) + ) + { + /* Invalid object. */ + gcmkONERROR(gcvSTATUS_INVALID_OBJECT); + } + + /* Grab the mutex. */ + gcmkONERROR(gckOS_AcquireMutex(os, Node->mutex, gcvINFINITE)); + acquired = gcvTRUE; + + /**************************** Video Memory ********************************/ + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + if (node->VidMem.locked <= 0) + { + /* The surface was not locked. */ + status = gcvSTATUS_MEMORY_UNLOCKED; + goto OnError; + } + + if (Asynchroneous != gcvNULL) + { + /* Schedule an event to sync with GPU. */ + *Asynchroneous = gcvTRUE; + } + else + { + /* Decrement the lock count. */ + node->VidMem.locked --; + } + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Unlocked node 0x%x (%d)", + node, + node->VidMem.locked); + } + + /*************************** Virtual Memory *******************************/ + + else + { + + + if (Asynchroneous == gcvNULL) + { +#if !gcdPROCESS_ADDRESS_SPACE + if (node->Virtual.lockeds[Kernel->core] == 0) + { + status = gcvSTATUS_MEMORY_UNLOCKED; + goto OnError; + } + + /* Decrement lock count. */ + -- node->Virtual.lockeds[Kernel->core]; + + /* See if we can unlock the resources. */ + if (node->Virtual.lockeds[Kernel->core] == 0) + { +#if gcdSECURITY + if (node->Virtual.addresses[Kernel->core] > 0x80000000) + { + gcmkONERROR(gckKERNEL_SecurityUnmapMemory( + Kernel, + node->Virtual.addresses[Kernel->core], + node->Virtual.pageCount + )); + } +#else + /* Free the page table. */ + if (node->Virtual.pageTables[Kernel->core] != gcvNULL) + { + { + gcmkONERROR( + gckMMU_FreePages(Kernel->mmu, + node->Virtual.secure, + node->Virtual.addresses[Kernel->core], + node->Virtual.pageTables[Kernel->core], + node->Virtual.pageCount)); + } + + gcmkONERROR(gckOS_UnmapPages( + Kernel->os, + node->Virtual.pageCount, + node->Virtual.addresses[Kernel->core] + )); + + /* Mark page table as freed. */ + node->Virtual.pageTables[Kernel->core] = gcvNULL; + } +#endif + } + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Unmapped virtual node 0x%x from 0x%08X", + node, node->Virtual.addresses[Kernel->core]); +#endif + + } + + else + { + gcmkONERROR( + gckOS_UnlockPages(os, + node->Virtual.physical, + node->Virtual.bytes, + node->Virtual.logical)); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, + "Scheduled unlock for virtual node 0x%x", + node); + + /* Schedule the surface to be unlocked. */ + *Asynchroneous = gcvTRUE; + } + } + + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex)); + acquired = gcvFALSE; + + /* Success. */ + gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous)); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +#if gcdPROCESS_ADDRESS_SPACE +gceSTATUS +gckVIDMEM_Node_Lock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + OUT gctUINT32 *Address + ) +{ + gceSTATUS status; + gckOS os; + gcuVIDMEM_NODE_PTR node = Node->node; + gcsGPU_MAP_PTR gpuMap; + gctPHYS_ADDR physical = gcvNULL; + gctUINT32 phys = gcvINVALID_ADDRESS; + gctUINT32 processID; + gcsLOCK_INFO_PTR lockInfo; + gctUINT32 pageCount; + gckMMU mmu; + gctUINT32 i; + gctUINT32_PTR pageTableEntry; + gctUINT32 offset = 0; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Node = %x", Node); + + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Node != gcvNULL); + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + + os = Kernel->os; + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + gcmkONERROR(gckOS_GetProcessID(&processID)); + + gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu)); + + gcmkONERROR(gckOS_AcquireMutex(os, Node->mapMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Get map information for current process. */ + gpuMap = _FindGPUMap(Node->mapHead, processID); + + if (gpuMap == gcvNULL) + { + gpuMap = _CreateGPUMap(os, &Node->mapHead, &Node->mapTail, processID); + + if (gpuMap == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + } + + lockInfo = &gpuMap->lockInfo; + + if (lockInfo->lockeds[Kernel->core] ++ == 0) + { + /* Get necessary information. */ + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + phys = node->VidMem.memory->baseAddress + + node->VidMem.offset + + node->VidMem.alignment; + + /* GPU page table use 4K page. */ + pageCount = ((phys + node->VidMem.bytes + 4096 - 1) >> 12) + - (phys >> 12); + + offset = phys & 0xFFF; + } + else + { + pageCount = node->Virtual.pageCount; + physical = node->Virtual.physical; + } + + /* Allocate pages inside the MMU. */ + gcmkONERROR(gckMMU_AllocatePages( + mmu, + pageCount, + &lockInfo->pageTables[Kernel->core], + &lockInfo->GPUAddresses[Kernel->core])); + + /* Record MMU from which pages are allocated. */ + lockInfo->lockMmus[Kernel->core] = mmu; + + pageTableEntry = lockInfo->pageTables[Kernel->core]; + + /* Fill page table entries. */ + if (phys != gcvINVALID_ADDRESS) + { + gctUINT32 address = lockInfo->GPUAddresses[Kernel->core]; + for (i = 0; i < pageCount; i++) + { + gckMMU_GetPageEntry(mmu, address, &pageTableEntry); + gckMMU_SetPage(mmu, phys & 0xFFFFF000, pageTableEntry); + phys += 4096; + address += 4096; + pageTableEntry += 1; + } + } + else + { + gctUINT32 address = lockInfo->GPUAddresses[Kernel->core]; + gcmkASSERT(physical != gcvNULL); + gcmkONERROR(gckOS_MapPagesEx(os, + Kernel->core, + physical, + pageCount, + address, + pageTableEntry)); + } + + gcmkONERROR(gckMMU_Flush(mmu, Node->type)); + } + + *Address = lockInfo->GPUAddresses[Kernel->core] + offset; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mapMutex)); + acquired = gcvFALSE; + + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mapMutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_NODE_Unlock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + IN gctUINT32 ProcessID + ) +{ + gceSTATUS status; + gcsGPU_MAP_PTR gpuMap; + gcsLOCK_INFO_PTR lockInfo; + gckMMU mmu; + gcuVIDMEM_NODE_PTR node; + gctUINT32 pageCount; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%08X, Node = %x, ProcessID=%d", + Kernel, Node, ProcessID); + + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Node != gcvNULL); + + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Node->mapMutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Get map information for current process. */ + gpuMap = _FindGPUMap(Node->mapHead, ProcessID); + + if (gpuMap == gcvNULL) + { + /* No mapping for this process. */ + gcmkONERROR(gcvSTATUS_INVALID_DATA); + } + + lockInfo = &gpuMap->lockInfo; + + if (--lockInfo->lockeds[Kernel->core] == 0) + { + node = Node->node; + + /* Get necessary information. */ + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + gctUINT32 phys = node->VidMem.memory->baseAddress + + node->VidMem.offset + + node->VidMem.alignment; + + /* GPU page table use 4K page. */ + pageCount = ((phys + node->VidMem.bytes + 4096 - 1) >> 12) + - (phys >> 12); + } + else + { + pageCount = node->Virtual.pageCount; + } + + /* Get MMU which allocates pages. */ + mmu = lockInfo->lockMmus[Kernel->core]; + + /* Free virtual spaces in page table. */ + gcmkVERIFY_OK(gckMMU_FreePagesEx( + mmu, + lockInfo->GPUAddresses[Kernel->core], + pageCount + )); + + _DestroyGPUMap(Kernel->os, &Node->mapHead, &Node->mapTail, gpuMap); + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Node->mapMutex)); + acquired = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Node->mapMutex)); + } + + gcmkFOOTER(); + return status; +} +#endif + +/******************************************************************************* +** +** gckVIDMEM_HANDLE_Allocate +** +** Allocate a handle for a gckVIDMEM_NODE object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gckVIDMEM_NODE Node +** Pointer to a gckVIDMEM_NODE object. +** +** OUTPUT: +** +** gctUINT32 * Handle +** Pointer to a variable receiving a handle represent this +** gckVIDMEM_NODE in userspace. +*/ +gceSTATUS +gckVIDMEM_HANDLE_Allocate( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + OUT gctUINT32 * Handle + ) +{ + gceSTATUS status; + gctUINT32 processID = 0; + gctPOINTER pointer = gcvNULL; + gctPOINTER handleDatabase = gcvNULL; + gctPOINTER mutex = gcvNULL; + gctUINT32 handle = 0; + gckVIDMEM_HANDLE handleObject = gcvNULL; + gckOS os = Kernel->os; + + gcmkHEADER_ARG("Kernel=0x%X, Node=0x%X", Kernel, Node); + + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + /* Allocate a gckVIDMEM_HANDLE object. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsVIDMEM_HANDLE), &pointer)); + + gcmkVERIFY_OK(gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsVIDMEM_HANDLE))); + + handleObject = pointer; + + gcmkONERROR(gckOS_AtomConstruct(os, &handleObject->reference)); + + /* Set default reference count to 1. */ + gckOS_AtomSet(os, handleObject->reference, 1); + + gcmkVERIFY_OK(gckOS_GetProcessID(&processID)); + + gcmkONERROR( + gckKERNEL_FindHandleDatbase(Kernel, + processID, + &handleDatabase, + &mutex)); + + /* Allocate a handle for this object. */ + gcmkONERROR( + gckKERNEL_AllocateIntegerId(handleDatabase, handleObject, &handle)); + + handleObject->node = Node; + handleObject->handle = handle; + + *Handle = handle; + + gcmkFOOTER_ARG("*Handle=%d", *Handle); + return gcvSTATUS_OK; + +OnError: + if (handleObject != gcvNULL) + { + if (handleObject->reference != gcvNULL) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(os, handleObject->reference)); + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, handleObject)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_NODE_Reference( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node + ) +{ + gctINT32 oldValue; + gcmkHEADER_ARG("Kernel=0x%X Node=0x%X", Kernel, Node); + + gckOS_AtomIncrement(Kernel->os, Node->reference, &oldValue); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckVIDMEM_HANDLE_Reference( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle + ) +{ + gceSTATUS status; + gckVIDMEM_HANDLE handleObject = gcvNULL; + gctPOINTER database = gcvNULL; + gctPOINTER mutex = gcvNULL; + gctINT32 oldValue = 0; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Handle=%d PrcoessID=%d", Handle, ProcessID); + + gcmkONERROR( + gckKERNEL_FindHandleDatbase(Kernel, ProcessID, &database, &mutex)); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Translate handle to gckVIDMEM_HANDLE object. */ + gcmkONERROR( + gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject)); + + /* Increase the reference count. */ + gckOS_AtomIncrement(Kernel->os, handleObject->reference, &oldValue); + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + acquired = gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_HANDLE_Dereference( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle + ) +{ + gceSTATUS status; + gctPOINTER handleDatabase = gcvNULL; + gctPOINTER mutex = gcvNULL; + gctINT32 oldValue = 0; + gckVIDMEM_HANDLE handleObject = gcvNULL; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Handle=%d PrcoessID=%d", Handle, ProcessID); + + gcmkONERROR( + gckKERNEL_FindHandleDatbase(Kernel, + ProcessID, + &handleDatabase, + &mutex)); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Translate handle to gckVIDMEM_HANDLE. */ + gcmkONERROR( + gckKERNEL_QueryIntegerId(handleDatabase, Handle, (gctPOINTER *)&handleObject)); + + gckOS_AtomDecrement(Kernel->os, handleObject->reference, &oldValue); + + if (oldValue == 1) + { + /* Remove handle from database if this is the last reference. */ + gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(handleDatabase, Handle)); + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + acquired = gcvFALSE; + + if (oldValue == 1) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, handleObject->reference)); + gcmkOS_SAFE_FREE(Kernel->os, handleObject); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_HANDLE_LookupAndReference( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + OUT gckVIDMEM_NODE * Node + ) +{ + gceSTATUS status; + gckVIDMEM_HANDLE handleObject = gcvNULL; + gckVIDMEM_NODE node = gcvNULL; + gctPOINTER database = gcvNULL; + gctPOINTER mutex = gcvNULL; + gctUINT32 processID = 0; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle); + + gckOS_GetProcessID(&processID); + + gcmkONERROR( + gckKERNEL_FindHandleDatbase(Kernel, processID, &database, &mutex)); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Translate handle to gckVIDMEM_HANDLE object. */ + gcmkONERROR( + gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject)); + + /* Get gckVIDMEM_NODE object. */ + node = handleObject->node; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + acquired = gcvFALSE; + + /* Reference this gckVIDMEM_NODE object. */ + gcmkVERIFY_OK(gckVIDMEM_NODE_Reference(Kernel, node)); + + /* Return result. */ + *Node = node; + + gcmkFOOTER_ARG("*Node=%d", *Node); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_HANDLE_Lookup( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle, + OUT gckVIDMEM_NODE * Node + ) +{ + gceSTATUS status; + gckVIDMEM_HANDLE handleObject = gcvNULL; + gckVIDMEM_NODE node = gcvNULL; + gctPOINTER database = gcvNULL; + gctPOINTER mutex = gcvNULL; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%X ProcessID=%d Handle=%d", + Kernel, ProcessID, Handle); + + gcmkONERROR( + gckKERNEL_FindHandleDatbase(Kernel, ProcessID, &database, &mutex)); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE)); + acquired = gcvTRUE; + + gcmkONERROR( + gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject)); + + node = handleObject->node; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + acquired = gcvFALSE; + + *Node = node; + + gcmkFOOTER_ARG("*Node=%d", *Node); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckVIDMEM_NODE_Allocate +** +** Allocate a gckVIDMEM_NODE object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gcuVIDMEM_NODE_PTR Node +** Pointer to a gcuVIDMEM_NODE union. +** +** OUTPUT: +** +** gctUINT32 * Handle +** Pointer to a variable receiving a handle represent this +** gckVIDMEM_NODE in userspace. +*/ +gceSTATUS +gckVIDMEM_NODE_Allocate( + IN gckKERNEL Kernel, + IN gcuVIDMEM_NODE_PTR VideoNode, + IN gceSURF_TYPE Type, + IN gcePOOL Pool, + IN gctUINT32 * Handle + ) +{ + gceSTATUS status; + gckVIDMEM_NODE node = gcvNULL; + gctPOINTER pointer = gcvNULL; + gctUINT32 handle = 0; + gckOS os = Kernel->os; + gctUINT i; + + gcmkHEADER_ARG("Kernel=0x%X VideoNode=0x%X", Kernel, VideoNode); + + /* Construct a node. */ + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsVIDMEM_NODE), &pointer)); + + gcmkVERIFY_OK(gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsVIDMEM_NODE))); + + node = pointer; + + node->metadata.magic = VIV_VIDMEM_METADATA_MAGIC; + node->metadata.ts_fd = -1; + + node->node = VideoNode; + node->kernel = Kernel; + node->type = Type; + node->pool = Pool; + +#if gcdPROCESS_ADDRESS_SPACE + gcmkONERROR(gckOS_CreateMutex(os, &node->mapMutex)); +#endif + + gcmkONERROR(gckOS_AtomConstruct(os, &node->reference)); + + gcmkONERROR(gckOS_CreateMutex(os, &node->mutex)); + + for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++) + { + gcmkONERROR(gckOS_CreateSignal(os, gcvFALSE, &node->sync[i].signal)); + } + + /* Reference is 1 by default . */ + gckOS_AtomSet(os, node->reference, 1); + + /* Create a handle to represent this node. */ + gcmkONERROR(gckVIDMEM_HANDLE_Allocate(Kernel, node, &handle)); + + *Handle = handle; + + gcmkFOOTER_ARG("*Handle=%d", *Handle); + return gcvSTATUS_OK; + +OnError: + if (node != gcvNULL) + { +#if gcdPROCESS_ADDRESS_SPACE + if (node->mapMutex != gcvNULL) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->mapMutex)); + } +#endif + + if (node->mutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->mutex)); + } + + if (node->reference != gcvNULL) + { + gcmkVERIFY_OK(gckOS_AtomDestroy(os, node->reference)); + } + + for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++) + { + if (node->sync[i].signal != gcvNULL) + { + gcmkVERIFY_OK(gckOS_DestroySignal(os, node->sync[i].signal)); + } + } + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_NODE_Dereference( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node + ) +{ + gctINT32 oldValue = 0; + gctPOINTER database = Kernel->db->nameDatabase; + gctPOINTER mutex = Kernel->db->nameDatabaseMutex; + gctUINT i; + + gcmkHEADER_ARG("Kernel=0x%X Node=0x%X", Kernel, Node); + + gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE)); + + gcmkVERIFY_OK(gckOS_AtomDecrement(Kernel->os, Node->reference, &oldValue)); + + if (oldValue == 1 && Node->name) + { + /* Free name if exists. */ + gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Node->name)); + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + + if (oldValue == 1) + { + /* Free gcuVIDMEM_NODE. */ + gcmkVERIFY_OK(gckVIDMEM_Free(Kernel, Node->node)); + gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Node->reference)); +#if gcdPROCESS_ADDRESS_SPACE + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Node->mapMutex)); +#endif + gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Node->mutex)); + + for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++) + { + if (Node->sync[i].signal != gcvNULL) + { + gcmkVERIFY_OK(gckOS_DestroySignal(Kernel->os, Node->sync[i].signal)); + } + } + + /* Should not cause recursive call since tsNode->tsNode should be NULL */ + if (Node->tsNode) + { + gcmkASSERT(!Node->tsNode->tsNode); + gckVIDMEM_NODE_Dereference(Kernel, Node->tsNode); + } + + gcmkOS_SAFE_FREE(Kernel->os, Node); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +#if defined(CONFIG_DMA_SHARED_BUFFER) + +/******************************************************************************* +** +** +** Code for dma_buf ops +** +** +*******************************************************************************/ + +#include +#include +#include + +static struct sg_table *_dmabuf_map(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct sg_table *sgt = gcvNULL; + struct dma_buf *dmabuf = attachment->dmabuf; + gckVIDMEM_NODE nodeObject = dmabuf->priv; + gceSTATUS status = gcvSTATUS_OK; + + do + { + gcuVIDMEM_NODE_PTR node = nodeObject->node; + gctPHYS_ADDR physical = gcvNULL; + gctSIZE_T offset = 0; + gctSIZE_T bytes = 0; + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + physical = node->VidMem.memory->physical; + offset = node->VidMem.offset; + bytes = node->VidMem.bytes; + } + else + { + physical = node->Virtual.physical; + offset = 0; + bytes = node->Virtual.bytes; + } + + gcmkERR_BREAK(gckOS_MemoryGetSGT(nodeObject->kernel->os, physical, offset, bytes, (gctPOINTER*)&sgt)); + + if (dma_map_sg(attachment->dev, sgt->sgl, sgt->nents, direction) == 0) + { + sg_free_table(sgt); + kfree(sgt); + sgt = gcvNULL; + gcmkERR_BREAK(gcvSTATUS_GENERIC_IO); + } + } + while (gcvFALSE); + + return sgt; +} + +static void _dmabuf_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction) +{ + dma_unmap_sg(attachment->dev, sgt->sgl, sgt->nents, direction); + + sg_free_table(sgt); + kfree(sgt); +} + +static int _dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + gckVIDMEM_NODE nodeObject = dmabuf->priv; + gcuVIDMEM_NODE_PTR node = nodeObject->node; + gctPHYS_ADDR physical = gcvNULL; + gctSIZE_T skipPages = vma->vm_pgoff; + gctSIZE_T numPages = PAGE_ALIGN(vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + gceSTATUS status = gcvSTATUS_OK; + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + physical = node->VidMem.memory->physical; + skipPages += (node->VidMem.offset >> PAGE_SHIFT); + } + else + { + physical = node->Virtual.physical; + } + + gcmkONERROR(gckOS_MemoryMmap(nodeObject->kernel->os, physical, skipPages, numPages, vma)); + +OnError: + return gcmIS_ERROR(status) ? -EINVAL : 0; +} + +static void _dmabuf_release(struct dma_buf *dmabuf) +{ + gckVIDMEM_NODE nodeObject = dmabuf->priv; + + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(nodeObject->kernel, nodeObject)); +} + +static void *_dmabuf_kmap(struct dma_buf *dmabuf, unsigned long offset) +{ + gckVIDMEM_NODE nodeObject = dmabuf->priv; + gcuVIDMEM_NODE_PTR node = nodeObject->node; + gctINT8_PTR kvaddr = gcvNULL; + gctPHYS_ADDR physical = gcvNULL; + gctSIZE_T bytes = 0; + gctSIZE_T pageCount = 0; + + offset = (offset << PAGE_SHIFT); + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + physical = node->VidMem.memory->physical; + offset += node->VidMem.offset; + bytes = node->VidMem.bytes; + } + else + { + physical = node->Virtual.physical; + bytes = node->Virtual.bytes; + } + + if (gcmIS_SUCCESS(gckOS_CreateKernelVirtualMapping( + nodeObject->kernel->os, physical, bytes, (gctPOINTER*)&kvaddr, &pageCount))) + { + kvaddr += offset; + } + + return (gctPOINTER)kvaddr; +} + +static void _dmabuf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) +{ + gckVIDMEM_NODE nodeObject = dmabuf->priv; + gcuVIDMEM_NODE_PTR node = nodeObject->node; + gctINT8_PTR kvaddr = (gctINT8_PTR)ptr - (offset << PAGE_SHIFT); + gctPHYS_ADDR physical = gcvNULL; + gctSIZE_T bytes = 0; + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + physical = node->VidMem.memory->physical; + kvaddr -= node->VidMem.offset; + bytes = node->VidMem.bytes; + } + else + { + physical = node->Virtual.physical; + bytes = node->Virtual.bytes; + } + + gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping( + nodeObject->kernel->os, physical, bytes, (gctPOINTER*)&kvaddr)); +} + +static struct dma_buf_ops _dmabuf_ops = +{ + .map_dma_buf = _dmabuf_map, + .unmap_dma_buf = _dmabuf_unmap, + .mmap = _dmabuf_mmap, + .release = _dmabuf_release, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) + .map = _dmabuf_kmap, + .unmap = _dmabuf_kunmap, +# else + .kmap = _dmabuf_kmap, + .kunmap = _dmabuf_kunmap, +# endif +}; +#endif + +gceSTATUS +gckVIDMEM_NODE_Export( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + IN gctINT32 Flags, + OUT gctPOINTER *DmaBuf, + OUT gctINT32 *FD + ) +{ +#if defined(CONFIG_DMA_SHARED_BUFFER) + gceSTATUS status = gcvSTATUS_OK; + gckVIDMEM_NODE nodeObject = gcvNULL; + gctUINT32 processID = 0; + struct dma_buf *dmabuf = gcvNULL; + + gcmkHEADER_ARG("Kernel=%p Handle=0x%x", Kernel, Handle); + + gckOS_GetProcessID(&processID); + gcmkONERROR(gckVIDMEM_HANDLE_Lookup(Kernel, processID, Handle, &nodeObject)); + + dmabuf = nodeObject->dmabuf; + if (!dmabuf) + { + gctSIZE_T bytes = 0; + gctPHYS_ADDR physical = gcvNULL; + gcuVIDMEM_NODE_PTR node = nodeObject->node; + + if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) + { + physical = node->VidMem.memory->physical; + bytes = node->VidMem.bytes; + } + else + { + physical = node->Virtual.physical; + bytes = node->Virtual.bytes; + } + + /* Donot really get SGT, just check if the allocator support GetSGT. */ + gcmkONERROR(gckOS_MemoryGetSGT(Kernel->os, physical, 0, 0, NULL)); + + { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + exp_info.ops = &_dmabuf_ops; + exp_info.size = bytes; + exp_info.flags = Flags; + exp_info.priv = nodeObject; + dmabuf = dma_buf_export(&exp_info); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) + dmabuf = dma_buf_export(nodeObject, &_dmabuf_ops, bytes, Flags, NULL); +#else + dmabuf = dma_buf_export(nodeObject, &_dmabuf_ops, bytes, Flags); +#endif + } + + if (IS_ERR(dmabuf)) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + /* Reference this gckVIDMEM_NODE object. */ + gckVIDMEM_NODE_Reference(Kernel, nodeObject); + nodeObject->dmabuf = dmabuf; + } + + if (DmaBuf) + { + *DmaBuf = nodeObject->dmabuf; + } + + if (FD) + { + gctINT fd = dma_buf_fd(dmabuf, Flags); + + if (fd < 0) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + *FD = fd; + } + +OnError: + gcmkFOOTER_ARG("*DmaBuf=%p *FD=0x%x", gcmOPT_POINTER(DmaBuf), gcmOPT_VALUE(FD)); + return status; +#else + gcmkFATAL("The kernel did NOT support CONFIG_DMA_SHARED_BUFFER"); + return gcvSTATUS_NOT_SUPPORTED; +#endif +} + + +/******************************************************************************* +** +** gckVIDMEM_NODE_Name +** +** Naming a gckVIDMEM_NODE object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 Handle +** Handle to a gckVIDMEM_NODE object. +** +** OUTPUT: +** +** gctUINT32 * Name +** Pointer to a variable receiving a name which can be pass to another +** process. +*/ +gceSTATUS +gckVIDMEM_NODE_Name( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + OUT gctUINT32 * Name + ) +{ + gceSTATUS status; + gckVIDMEM_NODE node = gcvNULL; + gctUINT32 name = 0; + gctUINT32 processID = 0; + gctPOINTER database = Kernel->db->nameDatabase; + gctPOINTER mutex = Kernel->db->nameDatabaseMutex; + gctBOOL acquired = gcvFALSE; + gctBOOL referenced = gcvFALSE; + gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle); + + gcmkVERIFY_ARGUMENT(Name != gcvNULL); + + gcmkONERROR(gckOS_GetProcessID(&processID)); + + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE)); + acquired = gcvTRUE; + + gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node)); + referenced = gcvTRUE; + + if (node->name == 0) + { + /* Name this node. */ + gcmkONERROR(gckKERNEL_AllocateIntegerId(database, node, &name)); + node->name = name; + } + else + { + name = node->name; + } + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + acquired = gcvFALSE; + + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node)); + + *Name = name; + + gcmkFOOTER_ARG("*Name=%d", *Name); + return gcvSTATUS_OK; + +OnError: + if (referenced) + { + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node)); + } + + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckVIDMEM_NODE_Import +** +** Import a gckVIDMEM_NODE object. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 Name +** Name of a gckVIDMEM_NODE object. +** +** OUTPUT: +** +** gctUINT32 * Handle +** Pointer to a variable receiving a handle represent this +** gckVIDMEM_NODE in userspace. +*/ +gceSTATUS +gckVIDMEM_NODE_Import( + IN gckKERNEL Kernel, + IN gctUINT32 Name, + OUT gctUINT32 * Handle + ) +{ + gceSTATUS status; + gckVIDMEM_NODE node = gcvNULL; + gctPOINTER database = Kernel->db->nameDatabase; + gctPOINTER mutex = Kernel->db->nameDatabaseMutex; + gctBOOL acquired = gcvFALSE; + gctBOOL referenced = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name); + + gcmkONERROR(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE)); + acquired = gcvTRUE; + + /* Lookup in database to get the node. */ + gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, (gctPOINTER *)&node)); + + /* Reference the node. */ + gcmkONERROR(gckVIDMEM_NODE_Reference(Kernel, node)); + referenced = gcvTRUE; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + acquired = gcvFALSE; + + /* Allocate a handle for current process. */ + gcmkONERROR(gckVIDMEM_HANDLE_Allocate(Kernel, node, Handle)); + + gcmkFOOTER_ARG("*Handle=%d", *Handle); + return gcvSTATUS_OK; + +OnError: + if (referenced) + { + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node)); + } + + if (acquired) + { + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex)); + } + + gcmkFOOTER(); + return status; +} + +typedef struct _gcsVIDMEM_NODE_FDPRIVATE +{ + gcsFDPRIVATE base; + gckKERNEL kernel; + gckVIDMEM_NODE node; +} +gcsVIDMEM_NODE_FDPRIVATE; + + +static gctINT +_ReleaseFdPrivate( + gcsFDPRIVATE_PTR FdPrivate + ) +{ + /* Cast private info. */ + gcsVIDMEM_NODE_FDPRIVATE * private = (gcsVIDMEM_NODE_FDPRIVATE *) FdPrivate; + + gckVIDMEM_NODE_Dereference(private->kernel, private->node); + gckOS_Free(private->kernel->os, private); + + return 0; +} + + +/******************************************************************************* +** +** gckVIDMEM_NODE_GetFd +** +** Attach a gckVIDMEM_NODE object to a native fd. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctUINT32 Handle +** Handle to a gckVIDMEM_NODE object. +** +** OUTPUT: +** +** gctUINT32 * Fd +** Pointer to a variable receiving a native fd from os. +*/ +gceSTATUS +gckVIDMEM_NODE_GetFd( + IN gckKERNEL Kernel, + IN gctUINT32 Handle, + OUT gctINT * Fd + ) +{ + gceSTATUS status; + gckVIDMEM_NODE node = gcvNULL; + gctBOOL referenced = gcvFALSE; + gcsVIDMEM_NODE_FDPRIVATE * fdPrivate = gcvNULL; + gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle); + + /* Query and reference handle. */ + gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node)); + referenced = gcvTRUE; + + /* Allocated fd owns a reference. */ + gcmkONERROR(gckOS_Allocate( + Kernel->os, + gcmSIZEOF(gcsVIDMEM_NODE_FDPRIVATE), + (gctPOINTER *)&fdPrivate + )); + + fdPrivate->base.release = _ReleaseFdPrivate; + fdPrivate->kernel = Kernel; + fdPrivate->node = node; + + /* Allocated fd owns a reference. */ + gcmkONERROR(gckOS_GetFd("vidmem", &fdPrivate->base, Fd)); + + gcmkFOOTER_ARG("*Fd=%d", *Fd); + return gcvSTATUS_OK; + +OnError: + if (referenced) + { + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node)); + } + + if (fdPrivate) + { + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, fdPrivate)); + } + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_NODE_WrapUserMemory( + IN gckKERNEL Kernel, + IN gcsUSER_MEMORY_DESC_PTR Desc, + OUT gctUINT32 * Handle, + OUT gctUINT64 * Bytes + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gctBOOL found = gcvFALSE; + + gcmkHEADER_ARG("Kernel=0x%x", Kernel); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Desc); + gcmkVERIFY_ARGUMENT(Handle); + gcmkVERIFY_ARGUMENT(Bytes); + +#if defined(CONFIG_DMA_SHARED_BUFFER) + if (Desc->flag & gcvALLOC_FLAG_DMABUF) + { + struct dma_buf *dmabuf; + int fd = (int)Desc->handle; + + if (fd >= 0) + { + /* Import dma buf handle. */ + dmabuf = dma_buf_get(fd); + + Desc->handle = -1; + Desc->dmabuf = gcmPTR_TO_UINT64(dmabuf); + + dma_buf_put(dmabuf); + } + else + { + dmabuf = gcmUINT64_TO_PTR(Desc->dmabuf); + } + + if (dmabuf->ops == &_dmabuf_ops) + { + gctBOOL referenced = gcvFALSE; + gckVIDMEM_NODE nodeObject = dmabuf->priv; + + do + { + /* Reference the node. */ + gcmkERR_BREAK(gckVIDMEM_NODE_Reference(Kernel, nodeObject)); + referenced = gcvTRUE; + /* Allocate a handle for current process. */ + gcmkERR_BREAK(gckVIDMEM_HANDLE_Allocate(Kernel, nodeObject, Handle)); + found = gcvTRUE; + + *Bytes = (gctUINT64)dmabuf->size; + } + while (gcvFALSE); + + if (gcmIS_ERROR(status) && referenced) + { + gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, nodeObject)); + } + } + } +#endif + + if (!found) + { + gckOS os = Kernel->os; + gcuVIDMEM_NODE_PTR node = gcvNULL; + + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + do { + /* Allocate an gcuVIDMEM_NODE union. */ + gcmkERR_BREAK(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), (gctPOINTER*)&node)); + gckOS_ZeroMemory(node, gcmSIZEOF(gcuVIDMEM_NODE)); + + /* Initialize gcuVIDMEM_NODE union for virtual memory. */ + node->Virtual.kernel = Kernel; + + /* Wrap Memory. */ + gcmkERR_BREAK(gckOS_WrapMemory(os, Desc, &node->Virtual.bytes, + &node->Virtual.physical, &node->Virtual.contiguous)); + + /* Allocate handle for this video memory. */ + gcmkERR_BREAK(gckVIDMEM_NODE_Allocate( + Kernel, + node, + gcvSURF_BITMAP, + gcvPOOL_VIRTUAL, + Handle + )); + + *Bytes = (gctUINT64)node->Virtual.bytes; + } + while (gcvFALSE); + + if (gcmIS_ERROR(status) && node) + { + /* Free the structure. */ + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node)); + } + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckVIDMEM_SetCommitStamp( + IN gckKERNEL Kernel, + IN gceENGINE Engine, + IN gctUINT32 Handle, + IN gctUINT64 CommitStamp + ) +{ + gceSTATUS status; + gckVIDMEM_NODE node; + gctUINT32 processID; + + gckOS_GetProcessID(&processID); + + gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node)); + + node->sync[Engine].commitStamp = CommitStamp; + + gckVIDMEM_NODE_Dereference(Kernel, node); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckVIDMEM_GetCommitStamp( + IN gckKERNEL Kernel, + IN gceENGINE Engine, + IN gctUINT32 Handle, + OUT gctUINT64_PTR CommitStamp + ) +{ + gceSTATUS status; + gckVIDMEM_NODE node; + gctUINT32 processID; + + gckOS_GetProcessID(&processID); + + gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node)); + + *CommitStamp = node->sync[Engine].commitStamp; + + gckVIDMEM_NODE_Dereference(Kernel, node); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gckVIDMEM_FindVIDMEM( + IN gckKERNEL Kernel, + IN gctUINT32 HardwareAddress, + OUT gcuVIDMEM_NODE_PTR * Node, + OUT gctUINT32_PTR PageTableEntryValue + ) +{ + gceSTATUS status = gcvSTATUS_NOT_FOUND; + gcuVIDMEM_NODE_PTR node = gcvNULL; + + gcsLISTHEAD_PTR pos; + + gcmkLIST_FOR_EACH(pos, &Kernel->db->onFaultVidmemList) + { + node = (gcuVIDMEM_NODE_PTR)gcmCONTAINEROF(pos, _gcsVIDMEM_NODE_VIRTUAL, head); + + if (HardwareAddress >= node->Virtual.addresses[Kernel->core] + && (HardwareAddress <= node->Virtual.addresses[Kernel->core] - 1 + node->Virtual.bytes) + ) + { + *Node = node; + status = gcvSTATUS_OK; + break; + } + } + + if (gcmIS_SUCCESS(status)) + { + /* Setup map for fault address. */ + gctUINT32 offset = HardwareAddress - node->Virtual.addresses[Kernel->core]; + gctPHYS_ADDR_T physicalAddress; + + offset &= ~gcdMMU_PAGE_4K_MASK; + + gckOS_PhysicalToPhysicalAddress(Kernel->os, node->Virtual.physical, offset, &physicalAddress); + + gcmkSAFECASTPHYSADDRT(*PageTableEntryValue, physicalAddress); + } + + return status; +} + +/* Get the nodes of all banks. */ +gceSTATUS +gckVIDMEM_QueryNodes( + IN gckKERNEL Kernel, + IN gcePOOL Pool, + OUT gctINT32 *Count, + OUT gcuVIDMEM_NODE_PTR *Nodes + ) +{ + gceSTATUS status = gcvSTATUS_OK; + gckVIDMEM memory = gcvNULL; + + do + { + status = gckKERNEL_GetVideoMemoryPool(Kernel, Pool, &memory); + if (status != gcvSTATUS_OK) + break; + + if (memory != gcvNULL) + { + *Count = gcmCOUNTOF(memory->sentinel); + *Nodes = memory->sentinel; + } + } + while (gcvFALSE); + + return status; +} diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_feature_database.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_feature_database.h new file mode 100644 index 000000000000..cbd375cd1870 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_feature_database.h @@ -0,0 +1,82877 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +/*Auto created on 2018-08-14 02:22*/ +#ifndef _gc_feature_database_h_ +#define _gc_feature_database_h_ + +typedef struct +{ + /* Chip ID. */ + gctUINT32 chipID; + gctUINT32 chipVersion; + gctUINT32 productID; + gctUINT32 ecoID; + gctUINT32 customerID; + gctUINT32 patchVersion; + gctUINT32 formalRelease; + gctUINT32 Streams; + gctUINT32 TempRegisters; + gctUINT32 ThreadCount; + gctUINT32 VertexCacheSize; + gctUINT32 NumShaderCores; + gctUINT32 NumPixelPipes; + gctUINT32 VertexOutputBufferSize; + gctUINT32 BufferSize; + gctUINT32 InstructionCount; + gctUINT32 NumberOfConstants; + gctUINT32 CoreCount; + gctUINT32 VaryingCount; + gctUINT32 LocalStorageSize; + gctUINT32 L1CacheSize; + gctUINT32 InstructionMemorySize; + gctUINT32 ShaderPCLength; + gctUINT32 NumResolvePipes; + gctUINT32 USC_MAX_PAGES; + gctUINT32 RESULT_WINDOW_MAX_SIZE; + gctUINT32 NNMadPerCore; + gctUINT32 NNCoreCount; + gctUINT32 NNCoreCount_INT8; + gctUINT32 NNCoreCount_INT16; + gctUINT32 NNCoreCount_FLOAT16; + gctUINT32 NNInputBufferDepth; + gctUINT32 NNAccumBufferDepth; + gctUINT32 ClusterAliveMask; + gctUINT32 TPEngine_PwlLUTCount; + gctUINT32 TPEngine_PwlLUTSize; + gctUINT32 VIP_SRAM_SIZE; + gctUINT32 TPEngine_CoreCount; + gctUINT32 AXI_SRAM_SIZE; + gctUINT32 NN_INIMAGE_OFFSET_BITS; + gctUINT32 TP_REORDER_INIMAGE_SIZE; + gctUINT32 REG_FastClear:1; + gctUINT32 REG_SpecialAntiAliasing:1; + gctUINT32 REG_Pipe3D:1; + gctUINT32 REG_DXTTextureCompression:1; + gctUINT32 REG_DebugMode:1; + gctUINT32 REG_ZCompression:1; + gctUINT32 REG_YUV420Filter:1; + gctUINT32 REG_MSAA:1; + gctUINT32 REG_DC:1; + gctUINT32 REG_Pipe2D:1; + gctUINT32 REG_ETC1TextureCompression:1; + gctUINT32 REG_FastScaler:1; + gctUINT32 REG_HighDynamicRange:1; + gctUINT32 REG_YUV420Tiler:1; + gctUINT32 REG_ModuleCG:1; + gctUINT32 REG_MinArea:1; + gctUINT32 REG_NoEZ:1; + gctUINT32 REG_No422Texture:1; + gctUINT32 REG_BufferInterleaving:1; + gctUINT32 REG_ByteWrite2D:1; + gctUINT32 REG_NoScaler:1; + gctUINT32 REG_YUY2Averaging:1; + gctUINT32 REG_HalfPECache:1; + gctUINT32 REG_HalfTXCache:1; + gctUINT32 REG_YUY2RenderTarget:1; + gctUINT32 REG_Mem32BitSupport:1; + gctUINT32 REG_PipeVG:1; + gctUINT32 REG_VGTS:1; + gctUINT32 REG_FE20:1; + gctUINT32 REG_ByteWrite3D:1; + gctUINT32 REG_RsYuvTarget:1; + gctUINT32 REG_FE20BitIndex:1; + gctUINT32 REG_FlipY:1; + gctUINT32 REG_DualReturnBus:1; + gctUINT32 REG_EndiannessConfig:1; + gctUINT32 REG_Texture8K:1; + gctUINT32 REG_CorrectTextureConverter:1; + gctUINT32 REG_SpecialMsaaLod:1; + gctUINT32 REG_FastClearFlush:1; + gctUINT32 REG_2DPE20:1; + gctUINT32 REG_CorrectAutoDisable:1; + gctUINT32 REG_Render8K:1; + gctUINT32 REG_TileStatus2Bits:1; + gctUINT32 REG_SeparateTileStatusWhenInterleaved:1; + gctUINT32 REG_SuperTiled32x32:1; + gctUINT32 REG_VG20:1; + gctUINT32 REG_TSExtendedCommands:1; + gctUINT32 REG_CompressionFifoFixed:1; + gctUINT32 REG_ExtraShaderInstructions0:1; + gctUINT32 REG_VGFilter:1; + gctUINT32 REG_VG21:1; + gctUINT32 REG_ShaderGetsW:1; + gctUINT32 REG_ExtraShaderInstructions1:1; + gctUINT32 REG_DefaultReg0:1; + gctUINT32 REG_MC20:1; + gctUINT32 REG_ShaderMSAASideband:1; + gctUINT32 REG_BugFixes0:1; + gctUINT32 REG_VAA:1; + gctUINT32 REG_BypassInMSAA:1; + gctUINT32 REG_HierarchicalZ:1; + gctUINT32 REG_NewTexture:1; + gctUINT32 REG_A8TargetSupport:1; + gctUINT32 REG_CorrectStencil:1; + gctUINT32 REG_EnhanceVR:1; + gctUINT32 REG_RSUVSwizzle:1; + gctUINT32 REG_V2Compression:1; + gctUINT32 REG_VGDoubleBuffer:1; + gctUINT32 REG_BugFixes1:1; + gctUINT32 REG_BugFixes2:1; + gctUINT32 REG_TextureStride:1; + gctUINT32 REG_BugFixes3:1; + gctUINT32 REG_CorrectAutoDisable1:1; + gctUINT32 REG_AutoRestartTS:1; + gctUINT32 REG_BugFixes4:1; + gctUINT32 REG_L2Windowing:1; + gctUINT32 REG_HalfFloatPipe:1; + gctUINT32 REG_PixelDither:1; + gctUINT32 REG_TwoStencilReference:1; + gctUINT32 REG_ExtendedPixelFormat:1; + gctUINT32 REG_CorrectMinMaxDepth:1; + gctUINT32 REG_DitherAndFilterPlusAlpha2D:1; + gctUINT32 REG_BugFixes5:1; + gctUINT32 REG_New2D:1; + gctUINT32 REG_NewFloatingPointArithmetic:1; + gctUINT32 REG_TextureHorizontalAlignmentSelect:1; + gctUINT32 REG_NonPowerOfTwo:1; + gctUINT32 REG_LinearTextureSupport:1; + gctUINT32 REG_Halti0:1; + gctUINT32 REG_CorrectOverflowVG:1; + gctUINT32 REG_NegativeLogFix:1; + gctUINT32 REG_ResolveOffset:1; + gctUINT32 REG_OkToGateAxiClock:1; + gctUINT32 REG_MMU:1; + gctUINT32 REG_WideLine:1; + gctUINT32 REG_BugFixes6:1; + gctUINT32 REG_FcFlushStall:1; + gctUINT32 REG_LineLoop:1; + gctUINT32 REG_LogicOp:1; + gctUINT32 REG_SeamlessCubeMap:1; + gctUINT32 REG_SuperTiledTexture:1; + gctUINT32 REG_LinearPE:1; + gctUINT32 REG_RectPrimitive:1; + gctUINT32 REG_Composition:1; + gctUINT32 REG_CorrectAutoDisableCountWidth:1; + gctUINT32 REG_PESwizzle:1; + gctUINT32 REG_EndEvent:1; + gctUINT32 REG_S1S8:1; + gctUINT32 REG_Halti1:1; + gctUINT32 REG_RGB888:1; + gctUINT32 REG_TX_YUVAssembler:1; + gctUINT32 REG_DynamicFrequencyScaling:1; + gctUINT32 REG_TXFilter:1; + gctUINT32 REG_FullDirectFB:1; + gctUINT32 REG_OnePass2DFilter:1; + gctUINT32 REG_ThreadWalkerInPS:1; + gctUINT32 REG_TileFiller:1; + gctUINT32 REG_YUVStandard:1; + gctUINT32 REG_MultiSourceBlt:1; + gctUINT32 REG_YUVConversion:1; + gctUINT32 REG_FlushFixed2D:1; + gctUINT32 REG_Interleaver:1; + gctUINT32 REG_MixedStreams:1; + gctUINT32 REG_L2CacheFor2D420:1; + gctUINT32 REG_BugFixes7:1; + gctUINT32 REG_NoIndexPattern:1; + gctUINT32 REG_TextureTileStatus:1; + gctUINT32 REG_DecompressZ16:1; + gctUINT32 REG_BugFixes8:1; + gctUINT32 REG_DERotationStallFix:1; + gctUINT32 REG_OclOnly:1; + gctUINT32 REG_NewFeatures0:1; + gctUINT32 REG_InstructionCache:1; + gctUINT32 REG_GeometryShader:1; + gctUINT32 REG_TexCompressionSupertiled:1; + gctUINT32 REG_Generics:1; + gctUINT32 REG_BugFixes9:1; + gctUINT32 REG_FastMSAA:1; + gctUINT32 REG_WClip:1; + gctUINT32 REG_BugFixes10:1; + gctUINT32 REG_UnifiedSamplers:1; + gctUINT32 REG_BugFixes11:1; + gctUINT32 REG_PerformanceCounters:1; + gctUINT32 REG_ExtraShaderInstructions2:1; + gctUINT32 REG_BugFixes12:1; + gctUINT32 REG_BugFixes13:1; + gctUINT32 REG_DEEnhancements1:1; + gctUINT32 REG_ACE:1; + gctUINT32 REG_TXEnhancements1:1; + gctUINT32 REG_SHEnhancements1:1; + gctUINT32 REG_SHEnhancements2:1; + gctUINT32 REG_PEEnhancements1:1; + gctUINT32 REG_DEEnhancements2:1; + gctUINT32 REG_BugFixes14:1; + gctUINT32 REG_PowerOptimizations0:1; + gctUINT32 REG_NewHZ:1; + gctUINT32 REG_BugFixes15:1; + gctUINT32 REG_DEEnhancements3:1; + gctUINT32 REG_SHEnhancements3:1; + gctUINT32 REG_SHEnhancements4:1; + gctUINT32 REG_TXEnhancements2:1; + gctUINT32 REG_FEEnhancements1:1; + gctUINT32 REG_PEEnhancements2:1; + gctUINT32 REG_PAEnhancements1:1; + gctUINT32 REG_DENoGamma:1; + gctUINT32 REG_PAEnhancements2:1; + gctUINT32 REG_DEEnhancements4:1; + gctUINT32 REG_PEEnhancements3:1; + gctUINT32 REG_HIEnhancements1:1; + gctUINT32 REG_TXEnhancements3:1; + gctUINT32 REG_SHEnhancements5:1; + gctUINT32 REG_FEEnhancements2:1; + gctUINT32 REG_BugFixes16:1; + gctUINT32 REG_DEEnhancements5:1; + gctUINT32 REG_TXEnhancements4:1; + gctUINT32 REG_PEEnhancements4:1; + gctUINT32 REG_MCEnhancements1:1; + gctUINT32 REG_Halti2:1; + gctUINT32 REG_DEMirrorRotate:1; + gctUINT32 REG_SmallMSAA:1; + gctUINT32 REG_BugFixes17:1; + gctUINT32 REG_Rasterizer2:1; + gctUINT32 REG_DualPipeOPF:1; + gctUINT32 REG_MultiSrcV2:1; + gctUINT32 REG_CSCV2:1; + gctUINT32 REG_PAEnhancements3:1; + gctUINT32 REG_BugFixes18:1; + gctUINT32 REG_Compression2D:1; + gctUINT32 REG_Probe:1; + gctUINT32 REG_MediumPrecision:1; + gctUINT32 REG_DESupertile:1; + gctUINT32 REG_BugFixes19:1; + gctUINT32 REG_SHEnhancements6:1; + gctUINT32 REG_SHEnhancements7:1; + gctUINT32 REG_BugFixes20:1; + gctUINT32 REG_DEAddress40:1; + gctUINT32 REG_MiniMMUFix:1; + gctUINT32 REG_EEZ:1; + gctUINT32 REG_BugFixes21:1; + gctUINT32 REG_ExtraVgCaps:1; + gctUINT32 REG_MultiSrcV15:1; + gctUINT32 REG_BugFixes22:1; + gctUINT32 REG_Halti3:1; + gctUINT32 REG_TessellationShaders:1; + gctUINT32 REG_OPF9Tap:1; + gctUINT32 REG_MultiSrcV2StrQuad:1; + gctUINT32 REG_SeperateSRCAndDstCache:1; + gctUINT32 REG_Halti4:1; + gctUINT32 REG_RAWriteDepth:1; + gctUINT32 REG_AndroidOnly:1; + gctUINT32 REG_HasChipProductReg:1; + gctUINT32 REG_TXSupportDEC:1; + gctUINT32 REG_S8MSAACompression:1; + gctUINT32 REG_BugFixesIn544:1; + gctUINT32 REG_L2CacheRemove:1; + gctUINT32 REG_FEAllowRndVtxCnt:1; + gctUINT32 REG_CubeMapFL28:1; + gctUINT32 REG_TX6bitFrac:1; + gctUINT32 REG_FEAllowStallPrefetchEng:1; + gctUINT32 REG_ThirdPartyCompression:1; + gctUINT32 REG_RSS8:1; + gctUINT32 REG_MSAACoherencyCheck:1; + gctUINT32 REG_Halti5:1; + gctUINT32 REG_Evis:1; + gctUINT32 REG_BltEngine:1; + gctUINT32 REG_BugFixes23:1; + gctUINT32 REG_BugFixes24:1; + gctUINT32 REG_DEC:1; + gctUINT32 REG_VSTileNV12:1; + gctUINT32 REG_VSTileNV12_10BIT:1; + gctUINT32 RenderTarget8:1; + gctUINT32 TxLodFlowCorrection:1; + gctUINT32 FaceLod:1; + gctUINT32 MultiCoreSemaphoreStallV2:1; + gctUINT32 VMSAA:1; + gctUINT32 ChipEnableLink:1; + gctUINT32 MULTI_SRC_BLT_1_5_ENHANCEMENT:1; + gctUINT32 MULTI_SRC_BLT_BILINEAR_FILTER:1; + gctUINT32 RA_HZEZ_CLOCK_CONTROL:1; + gctUINT32 CACHE128B256BPERLINE:1; + gctUINT32 V4Compression:1; + gctUINT32 PE2D_MAJOR_SUPER_TILE:1; + gctUINT32 PE_32BPC_COLORMASK_FIX:1; + gctUINT32 ALPHA_BLENDING_OPT:1; + gctUINT32 NEW_GPIPE:1; + gctUINT32 PIPELINE_32_ATTRIBUTES:1; + gctUINT32 MSAA_SHADING:1; + gctUINT32 NO_ANISTRO_FILTER:1; + gctUINT32 NO_ASTC:1; + gctUINT32 NO_DXT:1; + gctUINT32 HWTFB:1; + gctUINT32 RA_DEPTH_WRITE_MSAA1X_FIX:1; + gctUINT32 EZHZ_CLOCKGATE_FIX:1; + gctUINT32 SH_SNAP2PAGE_FIX:1; + gctUINT32 SH_HALFDEPENDENCY_FIX:1; + gctUINT32 USC_MCFILL_FIX:1; + gctUINT32 TPG_TCPERF_FIX:1; + gctUINT32 USC_MDFIFO_OVERFLOW_FIX:1; + gctUINT32 SH_TEXLD_BARRIER_IN_CS_FIX:1; + gctUINT32 RS_NEW_BASEADDR:1; + gctUINT32 PE_8bpp_DUALPIPE_FIX:1; + gctUINT32 SH_ADVANCED_INSTR:1; + gctUINT32 SH_FLAT_INTERPOLATION_DUAL16_FIX:1; + gctUINT32 USC_CONTINUOUS_FLUS_FIX:1; + gctUINT32 SH_SUPPORT_V4:1; + gctUINT32 SH_SUPPORT_ALPHA_KILL:1; + gctUINT32 PE_NO_ALPHA_TEST:1; + gctUINT32 TX_LOD_NEAREST_SELECT:1; + gctUINT32 SH_FIX_LDEXP:1; + gctUINT32 SUPPORT_MOVAI:1; + gctUINT32 SH_SNAP2PAGE_MAXPAGES_FIX:1; + gctUINT32 PE_RGBA16I_FIX:1; + gctUINT32 BLT_8bpp_256TILE_FC_FIX:1; + gctUINT32 PE_64bit_FENCE_FIX:1; + gctUINT32 USC_FULL_CACHE_FIX:1; + gctUINT32 TX_YUV_ASSEMBLER_10BIT:1; + gctUINT32 FE_32bit_INDEX_FIX:1; + gctUINT32 BLT_64bpp_MASKED_CLEAR_FIX:1; + gctUINT32 SECURITY:1; + gctUINT32 ROBUSTNESS:1; + gctUINT32 USC_ATOMIC_FIX:1; + gctUINT32 SH_PSO_MSAA1x_FIX:1; + gctUINT32 USC_VX_PERF_FIX:1; + gctUINT32 EVIS_NO_ABSDIFF:1; + gctUINT32 EVIS_NO_BITREPLACE:1; + gctUINT32 EVIS_NO_BOXFILTER:1; + gctUINT32 EVIS_NO_CORDIAC:1; + gctUINT32 EVIS_NO_DP32:1; + gctUINT32 EVIS_NO_FILTER:1; + gctUINT32 EVIS_NO_IADD:1; + gctUINT32 EVIS_NO_SELECTADD:1; + gctUINT32 EVIS_LERP_7OUTPUT:1; + gctUINT32 EVIS_ACCSQ_8OUTPUT:1; + gctUINT32 USC_GOS_ADDR_FIX:1; + gctUINT32 TX_8bit_UVFrac:1; + gctUINT32 TX_DESC_CACHE_CLOCKGATE_FIX:1; + gctUINT32 RSBLT_MSAA_DECOMPRESSION:1; + gctUINT32 TX_INTEGER_COORDINATE:1; + gctUINT32 DRAWID:1; + gctUINT32 PSIO_SAMPLEMASK_IN_R0ZW_FIX:1; + gctUINT32 TX_INTEGER_COORDINATE_V2:1; + gctUINT32 MULTI_CORE_BLOCK_SET_CONFIG:1; + gctUINT32 VG_RESOLVE_ENGINE:1; + gctUINT32 VG_PE_COLOR_KEY:1; + gctUINT32 VG_IM_INDEX_FORMAT:1; + gctUINT32 SNAPPAGE_CMD:1; + gctUINT32 SH_NO_INDEX_CONST_ON_A0:1; + gctUINT32 SH_NO_ONECONST_LIMIT:1; + gctUINT32 SH_IMG_LDST_ON_TEMP:1; + gctUINT32 COMPUTE_ONLY:1; + gctUINT32 SH_IMG_LDST_CLAMP:1; + gctUINT32 SH_ICACHE_ALLOC_COUNT_FIX:1; + gctUINT32 SH_ICACHE_PREFETCH:1; + gctUINT32 PE2D_SEPARATE_CACHE:1; + gctUINT32 VG_AYUV_INPUT_OUTPUT:1; + gctUINT32 VG_DOUBLE_IMAGE:1; + gctUINT32 VG_RECTANGLE_STRIPE_MODE:1; + gctUINT32 VG_MMU:1; + gctUINT32 VG_IM_FILTER:1; + gctUINT32 VG_IM_YUV_PACKET:1; + gctUINT32 VG_IM_YUV_PLANAR:1; + gctUINT32 VG_PE_YUV_PACKET:1; + gctUINT32 VG_COLOR_PRECISION_8_BIT:1; + gctUINT32 PE_MSAA_OQ_FIX:1; + gctUINT32 PSIO_MSAA_CL_FIX:1; + gctUINT32 USC_DEFER_FILL_FIX:1; + gctUINT32 SH_CLOCK_GATE_FIX:1; + gctUINT32 FE_NEED_DUMMYDRAW:1; + gctUINT32 PE2D_LINEAR_YUV420_OUTPUT:1; + gctUINT32 PE2D_LINEAR_YUV420_10BIT:1; + gctUINT32 MULTI_CLUSTER:1; + gctUINT32 VG_TS_CULLING:1; + gctUINT32 VG_FP25:1; + gctUINT32 SH_MULTI_WG_PACK:1; + gctUINT32 SH_DUAL16_SAMPLEMASK_ZW:1; + gctUINT32 TPG_TRIVIAL_MODE_FIX:1; + gctUINT32 TX_ASTC_MULTISLICE_FIX:1; + gctUINT32 FE_ROBUST_FIX:1; + gctUINT32 SH_GPIPE_ACCESS_FULLTEMPS:1; + gctUINT32 PSIO_INTERLOCK:1; + gctUINT32 PA_WIDELINE_FIX:1; + gctUINT32 WIDELINE_HELPER_FIX:1; + gctUINT32 G2D_3rd_PARTY_COMPRESSION_1_1:1; + gctUINT32 TX_FLUSH_L1CACHE:1; + gctUINT32 PE_DITHER_FIX2:1; + gctUINT32 G2D_DEC400:1; + gctUINT32 SH_TEXLD_U_FIX:1; + gctUINT32 MC_FCCACHE_BYTEMASK:1; + gctUINT32 SH_MULTI_WG_PACK_FIX:1; + gctUINT32 DC_OVERLAY_SCALING:1; + gctUINT32 DC_SOURCE_ROTATION:1; + gctUINT32 DC_TILED:1; + gctUINT32 DC_YUV_L1:1; + gctUINT32 DC_D30_OUTPUT:1; + gctUINT32 DC_MMU:1; + gctUINT32 DC_COMPRESSION:1; + gctUINT32 DC_QOS:1; + gctUINT32 PE_ADVANCE_BLEND_PART0:1; + gctUINT32 FE_PATCHLIST_FETCH_FIX:1; + gctUINT32 RA_CG_FIX:1; + gctUINT32 EVIS_VX2:1; + gctUINT32 NN_FLOAT:1; + gctUINT32 DEC400:1; + gctUINT32 LS_SUPPORT_PERCOMP_DEPENDENCY:1; + gctUINT32 TP_ENGINE:1; + gctUINT32 MULTI_CORE_BLOCK_SET_CONFIG2:1; + gctUINT32 PE_VMSAA_COVERAGE_CACHE_FIX:1; + gctUINT32 SECURITY_AHB:1; + gctUINT32 MULTICORE_SEMAPHORESTALL_V3:1; + gctUINT32 SMALLBATCH:1; + gctUINT32 SH_CMPLX:1; + gctUINT32 SH_IDIV0_SWZL_EHS:1; + gctUINT32 TX_LERP_LESS_BIT:1; + gctUINT32 SH_GM_ENDIAN:1; + gctUINT32 SH_GM_USC_UNALLOC:1; + gctUINT32 SH_END_OF_BB:1; + gctUINT32 VIP_V7:1; + gctUINT32 TX_BORDER_CLAMP_FIX:1; + gctUINT32 SH_IMG_LD_LASTPIXEL_FIX:1; + gctUINT32 ASYNC_BLT:1; + gctUINT32 ASYNC_FE_FENCE_FIX:1; + gctUINT32 PSCS_THROTTLE:1; + gctUINT32 SEPARATE_LS:1; + gctUINT32 MCFE:1; + gctUINT32 WIDELINE_TRIANGLE_EMU:1; + gctUINT32 VG_RESOLUTION_8K:1; + gctUINT32 FENCE_32BIT:1; + gctUINT32 FENCE_64BIT:1; + gctUINT32 NN_INTERLEVE8:1; + gctUINT32 TP_REORDER:1; + gctUINT32 PE_DEPTH_ONLY_OQFIX:1; + gctUINT32 TP_LRN:1; + gctUINT32 TX_SEAMLESS_CUBE:1; + gctUINT32 TX_SNORM_SUPPORT:1; + gctUINT32 TP_MAX_POOLING_STRIDE1:1; + gctUINT32 SH_SCATTER_GATHER:1; + gctUINT32 HWMANAGED_LS:1; + gctUINT32 NN_FP16_ALU:1; + gctUINT32 NN_INT16_ALU:1; + gctUINT32 TP_ROI_POOLING:1; + gctUINT32 NN_ZDP3:1; + gctUINT32 NN_ZDP6:1; + gctUINT32 NN_XYDP9:1; + gctUINT32 NN_INT8_SCALE:1; + gctUINT32 NN_POWER_ISOLATION:1; + gctUINT32 SWTILING_PHASE1:1; + gctUINT32 SH_IMAGE_ENABLE_FIX:1; + gctUINT32 TF_QUANTIZATION:1; + gctUINT32 MSAA_FRAGMENT_OPERATION:1; + gctUINT32 TP_SIMPLE_INT16:1; + gctUINT32 TP_REAL_INT16:1; + gctUINT32 NN_FIRST_PIXEL_POOLING:1; + gctUINT32 SWTILING_PHASE2:1; + gctUINT32 VG_FORMAT_ARGB2222:1; + gctUINT32 PE_TILE_CACHE_FLUSH_FIX:1; + gctUINT32 BLT_YUV_OUTPUT:1; + gctUINT32 NN_STRIDE_SUPPORT:1; + gctUINT32 NN_XYDP6:1; + gctUINT32 TP_REORDER_FIX:1; + gctUINT32 NN_CONV1x1_PERF_FIX:1; + gctUINT32 NN_CACHELINE_MODE_PERF_FIX:1; + gctUINT32 NN_PER3DTILE_BUBBLE_FIX:1; + gctUINT32 SH_IO_CG_FIX:1; + gctUINT32 SWTILING_PHASE3:1; + gctUINT32 USC_STAY_LRU:1; + gctUINT32 NN_NONZERO_MIRROR_BORDER:1; + gctUINT32 NN_COEF_DECOMPRESS_PERF2X:1; + gctUINT32 INPUT_4BIT:1; + gctUINT32 COEF_COMPRESSION_ENHANCEMENT:1; + gctUINT32 NN_ZXDP3_KERNEL_READ_CONFLICT_FIX:1; + gctUINT32 NN_ZDP3_NO_COMPRESS_FIX:1; + gctUINT32 NN_ASYNC_COPY_PERF_FIX:1; + gctUINT32 OCB_COUNTER:1; + gctUINT32 PE_SWIZZLE:1; + gctUINT32 SH_ROBUSTNESS_FIX:1; + gctUINT32 USC_ATOMIC_FIX2:1; + gctUINT32 PE_A8B8G8R8:1; + gctUINT32 MULTIVIEW_RENDER:1; + gctUINT32 FE_DRAW_DIRECT:1; + gctUINT32 TX_VKBORDER_MODE:1; + gctUINT32 TX_UNNORMALIZED_COORD:1; +} gcsFEATURE_DATABASE; + +static gcsFEATURE_DATABASE gChipInfo[] = { + /* dc0000_5550 */ + { + 0x0, /* ChipID */ + 0x5550, /* ChipRevision */ + 0x12000000, /* ProductID */ + 0x0, /* EcoID */ + 0x300, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x1, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x1, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x1, /* gcFEATURE_BIT_DC_MMU */ + 0x1, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x1, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* dc0000_5560 */ + { + 0x0, /* ChipID */ + 0x5560, /* ChipRevision */ + 0x2000002, /* ProductID */ + 0x0, /* EcoID */ + 0x302, /* CustomerID */ + 0x3, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x1, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x1, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x1, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc200_4650 */ + { + 0x200, /* ChipID */ + 0x4650, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc200_4621 */ + { + 0x200, /* ChipID */ + 0x4621, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x0, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x0, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc300_4650 */ + { + 0x300, /* ChipID */ + 0x4650, /* ChipRevision */ + 0x5203, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x5, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc300_4650_guoke */ + { + 0x300, /* ChipID */ + 0x4650, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x4, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc300_4_6_6_rc0 */ + { + 0x300, /* ChipID */ + 0x4660, /* ChipRevision */ + 0x5203, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x1, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc320_5007 */ + { + 0x320, /* ChipID */ + 0x5007, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x8, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x100, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x0, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x0, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc320_5220 */ + { + 0x320, /* ChipID */ + 0x5220, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc320_5303 */ + { + 0x320, /* ChipID */ + 0x5303, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc320_5303_1 */ + { + 0x320, /* ChipID */ + 0x5303, /* ChipRevision */ + 0x5202, /* ProductID */ + 0x1, /* EcoID */ + 0x0, /* CustomerID */ + 0x4, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc320_5340 */ + { + 0x320, /* ChipID */ + 0x5340, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x4, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x1, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc320c_5341 */ + { + 0x320, /* ChipID */ + 0x5341, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xc, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x1, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x1, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x0, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x1, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x1, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc320_5341 */ + { + 0x320, /* ChipID */ + 0x5341, /* ChipRevision */ + 0x3202, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520l_5_3_5_rc0 */ + { + 0x320, /* ChipID */ + 0x5350, /* ChipRevision */ + 0x5202, /* ProductID */ + 0x0, /* EcoID */ + 0x206, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc355_v121_rc5 */ + { + 0x355, /* ChipID */ + 0x1215, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x1, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x1, /* gcFEATURE_BIT_REG_PipeVG */ + 0x1, /* gcFEATURE_BIT_REG_VGTS */ + 0x1, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x0, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x1, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x1, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x0, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x1, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x1, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x0, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x0, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc355_v121x */ + { + 0x355, /* ChipID */ + 0x1217, /* ChipRevision */ + 0x3003550, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x1, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x1, /* gcFEATURE_BIT_REG_PipeVG */ + 0x1, /* gcFEATURE_BIT_REG_VGTS */ + 0x1, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x0, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x1, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x1, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x0, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x1, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x1, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x0, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x1, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x1, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x1, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x1, /* gcFEATURE_BIT_VG_MMU */ + 0x1, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x1, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x1, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x1, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc355_8Kx8K */ + { + 0x355, /* ChipID */ + 0x1217, /* ChipRevision */ + 0x3003550, /* ProductID */ + 0x0, /* EcoID */ + 0x407, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x1, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x1, /* gcFEATURE_BIT_REG_PipeVG */ + 0x1, /* gcFEATURE_BIT_REG_VGTS */ + 0x1, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x0, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x1, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x1, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x0, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x1, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x1, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x0, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x1, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x1, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x1, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x1, /* gcFEATURE_BIT_VG_MMU */ + 0x1, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x1, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x1, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x1, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x1, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc400_4633 */ + { + 0x400, /* ChipID */ + 0x4633, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x80, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x80, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x0, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc600_4633 */ + { + 0x600, /* ChipID */ + 0x4633, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x80, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x80, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x0, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc400_4645 */ + { + 0x400, /* ChipID */ + 0x4645, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x4, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x80, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x80, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc400L_0x465x */ + { + 0x400, /* ChipID */ + 0x4652, /* ChipRevision */ + 0x70001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x7, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x80, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x80, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x1, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000nano_0x4652 */ + { + 0x400, /* ChipID */ + 0x4652, /* ChipRevision */ + 0x70001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x7, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x80, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x80, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x1, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000nano_0x4652 */ + { + 0x400, /* ChipID */ + 0x4652, /* ChipRevision */ + 0x70001, /* ProductID */ + 0x0, /* EcoID */ + 0x100, /* CustomerID */ + 0x9, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x80, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x80, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x1, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc420_5325 */ + { + 0x420, /* ChipID */ + 0x5325, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x2, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x1, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc420_5336 */ + { + 0x420, /* ChipID */ + 0x5336, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x3, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x1, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x1, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc420cpd_533rc7a */ + { + 0x420, /* ChipID */ + 0x5337, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x1, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x1, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc428_5421 */ + { + 0x428, /* ChipID */ + 0x5421, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc428c_5_4_2_rc3a */ + { + 0x428, /* ChipID */ + 0x5423, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x1, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x1, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520_5341 */ + { + 0x520, /* ChipID */ + 0x5341, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520l_5341_rc1b */ + { + 0x520, /* ChipID */ + 0x5341, /* ChipRevision */ + 0x5202, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520_5540_rc0 */ + { + 0x520, /* ChipID */ + 0x5540, /* ChipRevision */ + 0x5200, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x1, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x1, /* gcFEATURE_BIT_REG_DEC */ + 0x1, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520l_5_3_4_rc2b */ + { + 0x520, /* ChipID */ + 0x5342, /* ChipRevision */ + 0x5202, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x2, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x1, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x1, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x1, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x1, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x1, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520c_5_5_0 */ + { + 0x520, /* ChipID */ + 0x5501, /* ChipRevision */ + 0x5200, /* ProductID */ + 0x0, /* EcoID */ + 0x3, /* CustomerID */ + 0x2, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x1, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x1, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x1, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x1, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x1, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520c_5_5_4_rc1 */ + { + 0x520, /* ChipID */ + 0x5541, /* ChipRevision */ + 0x5200, /* ProductID */ + 0x0, /* EcoID */ + 0x202, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x1, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x1, /* gcFEATURE_BIT_REG_DEC */ + 0x1, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520sp_5_5_2_rc0a */ + { + 0x520, /* ChipID */ + 0x5520, /* ChipRevision */ + 0x5200, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x1, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x1, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520_v552_rc1 */ + { + 0x520, /* ChipID */ + 0x5521, /* ChipRevision */ + 0x5200, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x1, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x1, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc520_5_5_3_rc2a */ + { + 0x520, /* ChipID */ + 0x5532, /* ChipRevision */ + 0x5200, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x1, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x1, /* gcFEATURE_BIT_REG_DEC */ + 0x1, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc600L_0x465x */ + { + 0x600, /* ChipID */ + 0x4652, /* ChipRevision */ + 0x70005, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x7, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x1, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000nanoultra_4_6_5_rc3a */ + { + 0x600, /* ChipID */ + 0x4653, /* ChipRevision */ + 0x70005, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x1, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x1, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000nanoultra_4_6_5_rc3b */ + { + 0x600, /* ChipID */ + 0x4653, /* ChipRevision */ + 0x70005, /* ProductID */ + 0x0, /* EcoID */ + 0x101, /* CustomerID */ + 0x2, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x1, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x1, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000nanoultra_4_6_5_rc3e */ + { + 0x600, /* ChipID */ + 0x4653, /* ChipRevision */ + 0x70005, /* ProductID */ + 0x0, /* EcoID */ + 0x102, /* CustomerID */ + 0x5, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x1, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x1, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x0, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc620_5_5_3_rc0 */ + { + 0x620, /* ChipID */ + 0x5530, /* ChipRevision */ + 0x6200, /* ProductID */ + 0x0, /* EcoID */ + 0x200, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x1, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x1, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x1, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc620_5_5_5_rc0d */ + { + 0x620, /* ChipID */ + 0x5550, /* ChipRevision */ + 0x6200, /* ProductID */ + 0x0, /* EcoID */ + 0x201, /* CustomerID */ + 0x4, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x1, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x1, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x1, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x1, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc620tpc_5_5_6_rc0a */ + { + 0x620, /* ChipID */ + 0x5560, /* ChipRevision */ + 0x6200, /* ProductID */ + 0x0, /* EcoID */ + 0x200, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x1, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x0, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x0, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x1, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x1, /* gcFEATURE_BIT_REG_NoScaler */ + 0x0, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x0, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x0, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x0, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x0, /* gcFEATURE_BIT_REG_FlipY */ + 0x0, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x0, /* gcFEATURE_BIT_REG_Texture8K */ + 0x0, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x0, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x0, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x0, /* gcFEATURE_BIT_REG_Render8K */ + 0x0, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x0, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x0, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x0, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x0, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x0, /* gcFEATURE_BIT_REG_PixelDither */ + 0x0, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x1, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x0, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x0, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x0, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x0, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x1, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x1, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x1, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x1, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x1, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x1, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x1, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x1, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x1, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x1, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc860L_0x464x */ + { + 0x860, /* ChipID */ + 0x4647, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x4, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x0, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x1, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc880_5106 */ + { + 0x880, /* ChipID */ + 0x5106, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x100, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xb, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc880_5122 */ + { + 0x880, /* ChipID */ + 0x5122, /* ChipRevision */ + 0x70007, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xc, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x1, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc880TM_0x512x */ + { + 0x880, /* ChipID */ + 0x5124, /* ChipRevision */ + 0x70007, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x2, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xc, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x0, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc880TM_0x512x */ + { + 0x880, /* ChipID */ + 0x5124, /* ChipRevision */ + 0x70007, /* ProductID */ + 0x0, /* EcoID */ + 0x103, /* CustomerID */ + 0x2, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xf, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x0, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x0, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc900_5250 */ + { + 0x900, /* ChipID */ + 0x5250, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x200, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x2, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc1000_5036 */ + { + 0x1000, /* ChipID */ + 0x5036, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x1, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc1000_5037 */ + { + 0x1000, /* ChipID */ + 0x5037, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x1, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc1000_5037_1 */ + { + 0x1000, /* ChipID */ + 0x5037, /* ChipRevision */ + 0x0, /* ProductID */ + 0x1, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x0, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x0, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x1, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc1000_5039 */ + { + 0x1000, /* ChipID */ + 0x5039, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x11, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x4, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x8, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x240, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x0, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x1, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x0, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x1, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc1500_5246 */ + { + 0x1500, /* ChipID */ + 0x5246, /* ChipRevision */ + 0x70003, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x6, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x400, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xf, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x2, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc2000_5108 */ + { + 0x2000, /* ChipID */ + 0x5108, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x200, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0xa8, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xb, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x1, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x0, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x0, /* gcFEATURE_BIT_REG_LineLoop */ + 0x0, /* gcFEATURE_BIT_REG_LogicOp */ + 0x0, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x0, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x0, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x0, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x0, /* gcFEATURE_BIT_REG_TileFiller */ + 0x0, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x0, /* gcFEATURE_BIT_REG_Interleaver */ + 0x0, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc2000_5140 */ + { + 0x2000, /* ChipID */ + 0x5140, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x5, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x100, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc2000w_5_1_4_rc0e */ + { + 0x2000, /* ChipID */ + 0x5140, /* ChipRevision */ + 0x20000, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x5, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x100, /* gcFEATURE_VALUE_InstructionCount */ + 0x100, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x8, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x0, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x1, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x0, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x0, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x0, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x0, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x0, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x0, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x0, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x0, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x0, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x0, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x0, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x0, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc2500_5422 */ + { + 0x2500, /* ChipID */ + 0x5422, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x12, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xf, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc6400_5422 */ + { + 0x6400, /* ChipID */ + 0x5422, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x16, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xf, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc3000_5435 */ + { + 0x3000, /* ChipID */ + 0x5435, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xf, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc2000_ffff5450 */ + { + 0x2000, /* ChipID */ + 0xffff5450, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x8, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc3000_5450 */ + { + 0x3000, /* ChipID */ + 0x5450, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x8, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc3000_5451 */ + { + 0x3000, /* ChipID */ + 0x5451, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x4, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x1, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_551x */ + { + 0x3000, /* ChipID */ + 0x5512, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x3, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_5512 */ + { + 0x3000, /* ChipID */ + 0x5512, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x3, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_5514 */ + { + 0x3000, /* ChipID */ + 0x5514, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x4, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc4000_5222 */ + { + 0x4000, /* ChipID */ + 0x5222, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x800, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x800, /* gcFEATURE_VALUE_InstructionCount */ + 0x200, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xb, /* gcFEATURE_VALUE_VaryingCount */ + 0x8, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x1, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x0, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x0, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x0, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x0, /* gcFEATURE_BIT_REG_NewHZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc4000_5245 */ + { + 0x4000, /* ChipID */ + 0x5245, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x400, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xf, /* gcFEATURE_VALUE_VaryingCount */ + 0x8, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x0, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x0, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x0, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x1, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x0, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x0, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x0, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x0, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x0, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc5000_5434 */ + { + 0x5000, /* ChipID */ + 0x5434, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0xf, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x0, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x0, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x0, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x0, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x0, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x0, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x0, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x0, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x0, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x0, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000_551x */ + { + 0x5000, /* ChipID */ + 0x5513, /* ChipRevision */ + 0x70000, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000_5513 */ + { + 0x5000, /* ChipID */ + 0x5513, /* ChipRevision */ + 0x70000, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x1, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x1, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x1, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x0, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gcXAQ2_CMODEL */ + { + 0x7000, /* ChipID */ + 0x0, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x100, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x1, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x1, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x1, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x0, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x1, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x1, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x1, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x0, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x0, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x0, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x0, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x0, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x1, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x0, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x0, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x0, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x0, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x0, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XS_600x */ + { + 0x7000, /* ChipID */ + 0x6008, /* ChipRevision */ + 0x70004, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0xb, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XS_6008 */ + { + 0x7000, /* ChipID */ + 0x6008, /* ChipRevision */ + 0x70004, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0xb, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XSVX_600x */ + { + 0x7000, /* ChipID */ + 0x6008, /* ChipRevision */ + 0x70008, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XSVX_6008 */ + { + 0x7000, /* ChipID */ + 0x6008, /* ChipRevision */ + 0x70008, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x7, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XSVX_6009 */ + { + 0x7000, /* ChipID */ + 0x6009, /* ChipRevision */ + 0x70008, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x9, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000_6100 */ + { + 0x7000, /* ChipID */ + 0x6100, /* ChipRevision */ + 0x70000, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x20, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x20, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_6100 */ + { + 0x7000, /* ChipID */ + 0x6100, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XS_6100 */ + { + 0x7000, /* ChipID */ + 0x6100, /* ChipRevision */ + 0x70004, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x0, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x70000, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x28, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x28, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000UL_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x70003, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000ULVX_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x7000f, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x1, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x1, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7400_551x */ + { + 0x7400, /* ChipID */ + 0x5515, /* ChipRevision */ + 0x74000, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x6, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x0, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x2, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x0, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x0, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x1, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x0, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x0, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x0, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x0, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x1, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x1, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x0, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x0, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x0, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x1, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x0, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x0, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x0, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x1, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x1, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc8000UL_6200 */ + { + 0x8000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x80003, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x8, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x8, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* v630 */ + { + 0x7000, /* ChipID */ + 0x6300, /* ChipRevision */ + 0x0, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x1, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XS_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x70004, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000LXS_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x7000a, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000ULVX_V11_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x7000f, /* ProductID */ + 0x0, /* EcoID */ + 0x1, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x1, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x1, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000ULVX_V12_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x7000f, /* ProductID */ + 0x0, /* EcoID */ + 0x2, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc8000ULVX */ + { + 0x8000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x8000f, /* ProductID */ + 0x0, /* EcoID */ + 0x3, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x0, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x1, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x1, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x1, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x1, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x1, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x1, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x1, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x1, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000ULVX_6200_pid0x60 */ + { + 0x7000, /* ChipID */ + 0x6203, /* ChipRevision */ + 0x7000f, /* ProductID */ + 0x0, /* EcoID */ + 0x60, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000ULN_v122 */ + { + 0x7000, /* ChipID */ + 0x6203, /* ChipRevision */ + 0x70003, /* ProductID */ + 0x0, /* EcoID */ + 0x4, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x8, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x8, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x10, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x0, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x1, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x1, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x1, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x1, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XS_6FFF */ + { + 0x7000, /* ChipID */ + 0x6fff, /* ChipRevision */ + 0x70004, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70000, /* ProductID */ + 0x0, /* EcoID */ + 0x6, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70000, /* ProductID */ + 0x0, /* EcoID */ + 0xa, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XS_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70004, /* ProductID */ + 0x0, /* EcoID */ + 0x8, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x30, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x30, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc8000XS_6210 */ + { + 0x8000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x80004, /* ProductID */ + 0x0, /* EcoID */ + 0xd, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x30, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x30, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc8200LXS */ + { + 0x8200, /* ChipID */ + 0x6212, /* ChipRevision */ + 0x8200a, /* ProductID */ + 0x0, /* EcoID */ + 0xe, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x2, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x30, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x30, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x1, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XS_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70004, /* ProductID */ + 0x0, /* EcoID */ + 0xc, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x30, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x30, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x5, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x1, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x9, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000LXS_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x7000a, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x30, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x30, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XSVX_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70008, /* ProductID */ + 0x0, /* EcoID */ + 0x7, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000XSVX_6210 */ + { + 0x7000, /* ChipID */ + 0x6210, /* ChipRevision */ + 0x70008, /* ProductID */ + 0x0, /* EcoID */ + 0xb, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7000L_DEC400 */ + { + 0x7000, /* ChipID */ + 0x6214, /* ChipRevision */ + 0x70002, /* ProductID */ + 0x0, /* EcoID */ + 0x30, /* CustomerID */ + 0x8, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x1, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x1, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x1, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7400_0002 */ + { + 0x8400, /* ChipID */ + 0x6310, /* ChipRevision */ + 0x8400a, /* ProductID */ + 0x0, /* EcoID */ + 0x44, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x30, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x30, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x1, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x1, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x1, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x1, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc7400_0003 */ + { + 0x8400, /* ChipID */ + 0x6310, /* ChipRevision */ + 0x8400a, /* ProductID */ + 0x0, /* EcoID */ + 0x45, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x30, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x30, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x1, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x1, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x1, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x1, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc8400_6300 */ + { + 0x8400, /* ChipID */ + 0x6300, /* ChipRevision */ + 0x84004, /* ProductID */ + 0x0, /* EcoID */ + 0x41, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x10, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0xf, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x1, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x1, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x1, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc8100_6300_pid0x43 */ + { + 0x8100, /* ChipID */ + 0x6300, /* ChipRevision */ + 0x81004, /* ProductID */ + 0x0, /* EcoID */ + 0x43, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x10, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x1, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x1, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x1, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x1, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gc8200_6300_pid0x46 */ + { + 0x8200, /* ChipID */ + 0x6300, /* ChipRevision */ + 0x82004, /* ProductID */ + 0x0, /* EcoID */ + 0x46, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x10, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x40, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x3, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x0, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x0, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x1, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x0, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x1, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x1, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x1, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x1, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x1, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x1, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x1, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x1, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x1, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x1, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x1, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x1, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x1, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x1, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x1, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* cc8000_6330 */ + { + 0x8000, /* ChipID */ + 0x6330, /* ChipRevision */ + 0x6080000, /* ProductID */ + 0x0, /* EcoID */ + 0x51, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x10, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x1f, /* gcFEATURE_VALUE_VaryingCount */ + 0x20, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x20, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x0, /* gcFEATURE_BIT_REG_Evis */ + 0x1, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x1, /* gcFEATURE_BIT_SH_CMPLX */ + 0x1, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x1, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x1, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x1, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x1, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x1, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip7000UL_6100 */ + { + 0x7000, /* ChipID */ + 0x6100, /* ChipRevision */ + 0x5070003, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x1, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x0, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x0, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x0, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x0, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x0, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x1, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x1, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x0, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x1, /* gcFEATURE_BIT_HWTFB */ + 0x0, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x0, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x0, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x0, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x0, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x0, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x0, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x0, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x0, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x0, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x0, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x0, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x0, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY */ + 0x0, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x0, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x0, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x0, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x0, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x0, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x1, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x0, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x0, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x0, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x1, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip7000L_6200 */ + { + 0x7000, /* ChipID */ + 0x6200, /* ChipRevision */ + 0x5070002, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x20, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x20, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip7000UL_6200 */ + { + 0x7000, /* ChipID */ + 0x6201, /* ChipRevision */ + 0x5070003, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x0, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x0, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000UL_6211 */ + { + 0x8000, /* ChipID */ + 0x6212, /* ChipRevision */ + 0x5080003, /* ProductID */ + 0x0, /* EcoID */ + 0x21, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000ULFN_6211 */ + { + 0x8000, /* ChipID */ + 0x6211, /* ChipRevision */ + 0x5080003, /* ProductID */ + 0x0, /* EcoID */ + 0x22, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x0, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000UL_6211 */ + { + 0x8000, /* ChipID */ + 0x6211, /* ChipRevision */ + 0x5080003, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x0, /* EcoID */ + 0x29, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* gcnanovip */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x424f5343, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x8, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x1, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x0, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x0, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x0, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x0, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x0, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x0, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x0, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x1, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x45080001, /* ProductID */ + 0x0, /* EcoID */ + 0x24, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x10, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x800, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x45080001, /* ProductID */ + 0x0, /* EcoID */ + 0x82, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0xa, /* gcFEATURE_VALUE_NNCoreCount */ + 0xa, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x6, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x800, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x180, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000UL-s */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x15080003, /* ProductID */ + 0x0, /* EcoID */ + 0x25, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x18, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x18, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000UL-s */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x15080003, /* ProductID */ + 0x0, /* EcoID */ + 0x83, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x2, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x18, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x18, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x1, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000UL-q */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x45080003, /* ProductID */ + 0x0, /* EcoID */ + 0x26, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x18, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x18, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000UL */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x5080003, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x200, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x2, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x18, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x18, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x1, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000-q */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x45080000, /* ProductID */ + 0x0, /* EcoID */ + 0x72, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x800, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x8, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x40, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x40, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x0, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x25080001, /* ProductID */ + 0x0, /* EcoID */ + 0x2a, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x25080001, /* ProductID */ + 0x0, /* EcoID */ + 0x76, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000L-O */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x85080002, /* ProductID */ + 0x0, /* EcoID */ + 0x2f, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x20, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x20, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x1, /* gcFEATURE_BIT_SH_CMPLX */ + 0x1, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x1, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x1, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x1, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vip8000L-qi */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x4508000a, /* ProductID */ + 0x0, /* EcoID */ + 0x85, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x400, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x4, /* gcFEATURE_VALUE_NumShaderCores */ + 0x2, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x20, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x20, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x20, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x1, /* gcFEATURE_BIT_SH_CMPLX */ + 0x1, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x15080001, /* ProductID */ + 0x0, /* EcoID */ + 0x23, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x0, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x0, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x0, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x0, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x0, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x0, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x0, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-qi */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x45080009, /* ProductID */ + 0x0, /* EcoID */ + 0x7d, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x180, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x1, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-di */ + { + 0x8000, /* ChipID */ + 0x7000, /* ChipRevision */ + 0x25080009, /* ProductID */ + 0x0, /* EcoID */ + 0x7e, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x800, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x1, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x0, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-si */ + { + 0x8000, /* ChipID */ + 0x7120, /* ChipRevision */ + 0x15080009, /* ProductID */ + 0x0, /* EcoID */ + 0x80, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x180, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-qi */ + { + 0x8000, /* ChipID */ + 0x7120, /* ChipRevision */ + 0x45080009, /* ProductID */ + 0x0, /* EcoID */ + 0x88, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-di */ + { + 0x8000, /* ChipID */ + 0x7110, /* ChipRevision */ + 0x25080009, /* ProductID */ + 0x0, /* EcoID */ + 0x7f, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x6, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x200, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-di */ + { + 0x8000, /* ChipID */ + 0x7130, /* ChipRevision */ + 0x25080009, /* ProductID */ + 0x0, /* EcoID */ + 0x84, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x0, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x0, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x0, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s+ */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x3000000, /* EcoID */ + 0x89, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s */ + { + 0x8000, /* ChipID */ + 0x7010, /* ChipRevision */ + 0x15080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0xe0, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x1, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d */ + { + 0x8000, /* ChipID */ + 0x7010, /* ChipRevision */ + 0x25080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x1, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q */ + { + 0x8000, /* ChipID */ + 0x7010, /* ChipRevision */ + 0x45080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x1, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-o */ + { + 0x8000, /* ChipID */ + 0x7010, /* ChipRevision */ + 0x85080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0x6, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x8, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x0, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x0, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x1, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x0, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x15080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x100, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s+ */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x3000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x80, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x100, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x25080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x200, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d+ */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x6000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x200, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x45080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q+ */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0xc000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x6, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x400, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-o */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x85080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x8, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x800, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-o+ */ + { + 0x8000, /* ChipID */ + 0x7100, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x18000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0xc, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x800, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x0, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x0, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x0, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x0, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x0, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x0, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x0, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x15080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s+ */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x3000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x25080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d+ */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x6000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x45080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q+ */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0xc000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x6, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-o */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x85080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x800, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x8, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-o+ */ + { + 0x8000, /* ChipID */ + 0x7200, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x18000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x800, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0xc, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x0, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x0, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x0, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x0, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x0, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x0, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x15080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x2, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-s+ */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x3000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x3, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x100, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x1, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x25080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x4, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x2, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-d+ */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x6000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x6, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x200, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x3, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x45080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x8, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-q+ */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0xc000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0xc, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x400, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x6, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-o */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x85080001, /* ProductID */ + 0x0, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x10, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x7e2, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0x8, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, + /* vipnano-o+ */ + { + 0x8000, /* ChipID */ + 0x7300, /* ChipRevision */ + 0x5080001, /* ProductID */ + 0x18000000, /* EcoID */ + 0x0, /* CustomerID */ + 0x0, /* PatchVersion */ + 0x1, /* FormalRelease */ + 0x8, /* gcFEATURE_VALUE_Streams */ + 0x40, /* gcFEATURE_VALUE_TempRegisters */ + 0x100, /* gcFEATURE_VALUE_ThreadCount */ + 0x10, /* gcFEATURE_VALUE_VertexCacheSize */ + 0x1, /* gcFEATURE_VALUE_NumShaderCores */ + 0x1, /* gcFEATURE_VALUE_NumPixelPipes */ + 0x400, /* gcFEATURE_VALUE_VertexOutputBufferSize */ + 0x0, /* gcFEATURE_VALUE_BufferSize */ + 0x200, /* gcFEATURE_VALUE_InstructionCount */ + 0x140, /* gcFEATURE_VALUE_NumberOfConstants */ + 0x1, /* gcFEATURE_VALUE_CoreCount */ + 0x10, /* gcFEATURE_VALUE_VaryingCount */ + 0x10, /* gcFEATURE_VALUE_LocalStorageSize */ + 0x10, /* gcFEATURE_VALUE_L1CacheSize */ + 0x200, /* gcFEATURE_VALUE_InstructionMemorySize */ + 0x14, /* gcFEATURE_VALUE_ShaderPCLength */ + 0x0, /* gcFEATURE_VALUE_NumResolvePipes */ + 0x10, /* gcFEATURE_VALUE_USC_MAX_PAGES */ + 0x100, /* gcFEATURE_VALUE_RESULT_WINDOW_MAX_SIZE */ + 0x40, /* gcFEATURE_VALUE_NNMadPerCore */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_INT8 */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_INT16 */ + 0x18, /* gcFEATURE_VALUE_NNCoreCount_FLOAT16 */ + 0xc, /* gcFEATURE_VALUE_NNInputBufferDepth */ + 0x40, /* gcFEATURE_VALUE_NNAccumBufferDepth */ + 0x0, /* gcFEATURE_VALUE_ClusterAliveMask */ + 0x400, /* gcFEATURE_VALUE_TPEngine_PwlLUTCount */ + 0x10, /* gcFEATURE_VALUE_TPEngine_PwlLUTSize */ + 0x800, /* gcFEATURE_VALUE_VIP_SRAM_SIZE */ + 0xc, /* gcFEATURE_VALUE_TPEngine_CoreCount */ + 0x0, /* gcFEATURE_VALUE_AXI_SRAM_SIZE */ + 0x4, /* gcFEATURE_VALUE_NN_INIMAGE_OFFSET_BITS */ + 0x200, /* gcFEATURE_VALUE_TP_REORDER_INIMAGE_SIZE */ + 0x1, /* gcFEATURE_BIT_REG_FastClear */ + 0x0, /* gcFEATURE_BIT_REG_SpecialAntiAliasing */ + 0x1, /* gcFEATURE_BIT_REG_Pipe3D */ + 0x1, /* gcFEATURE_BIT_REG_DXTTextureCompression */ + 0x0, /* gcFEATURE_BIT_REG_DebugMode */ + 0x1, /* gcFEATURE_BIT_REG_ZCompression */ + 0x0, /* gcFEATURE_BIT_REG_YUV420Filter */ + 0x1, /* gcFEATURE_BIT_REG_MSAA */ + 0x0, /* gcFEATURE_BIT_REG_DC */ + 0x0, /* gcFEATURE_BIT_REG_Pipe2D */ + 0x1, /* gcFEATURE_BIT_REG_ETC1TextureCompression */ + 0x1, /* gcFEATURE_BIT_REG_FastScaler */ + 0x1, /* gcFEATURE_BIT_REG_HighDynamicRange */ + 0x1, /* gcFEATURE_BIT_REG_YUV420Tiler */ + 0x1, /* gcFEATURE_BIT_REG_ModuleCG */ + 0x0, /* gcFEATURE_BIT_REG_MinArea */ + 0x0, /* gcFEATURE_BIT_REG_NoEZ */ + 0x0, /* gcFEATURE_BIT_REG_No422Texture */ + 0x0, /* gcFEATURE_BIT_REG_BufferInterleaving */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite2D */ + 0x0, /* gcFEATURE_BIT_REG_NoScaler */ + 0x1, /* gcFEATURE_BIT_REG_YUY2Averaging */ + 0x0, /* gcFEATURE_BIT_REG_HalfPECache */ + 0x0, /* gcFEATURE_BIT_REG_HalfTXCache */ + 0x0, /* gcFEATURE_BIT_REG_YUY2RenderTarget */ + 0x0, /* gcFEATURE_BIT_REG_Mem32BitSupport */ + 0x0, /* gcFEATURE_BIT_REG_PipeVG */ + 0x0, /* gcFEATURE_BIT_REG_VGTS */ + 0x0, /* gcFEATURE_BIT_REG_FE20 */ + 0x1, /* gcFEATURE_BIT_REG_ByteWrite3D */ + 0x1, /* gcFEATURE_BIT_REG_RsYuvTarget */ + 0x1, /* gcFEATURE_BIT_REG_FE20BitIndex */ + 0x1, /* gcFEATURE_BIT_REG_FlipY */ + 0x1, /* gcFEATURE_BIT_REG_DualReturnBus */ + 0x1, /* gcFEATURE_BIT_REG_EndiannessConfig */ + 0x1, /* gcFEATURE_BIT_REG_Texture8K */ + 0x1, /* gcFEATURE_BIT_REG_CorrectTextureConverter */ + 0x1, /* gcFEATURE_BIT_REG_SpecialMsaaLod */ + 0x1, /* gcFEATURE_BIT_REG_FastClearFlush */ + 0x1, /* gcFEATURE_BIT_REG_2DPE20 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectAutoDisable */ + 0x1, /* gcFEATURE_BIT_REG_Render8K */ + 0x1, /* gcFEATURE_BIT_REG_TileStatus2Bits */ + 0x1, /* gcFEATURE_BIT_REG_SeparateTileStatusWhenInterleaved */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiled32x32 */ + 0x0, /* gcFEATURE_BIT_REG_VG20 */ + 0x0, /* gcFEATURE_BIT_REG_TSExtendedCommands */ + 0x1, /* gcFEATURE_BIT_REG_CompressionFifoFixed */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions0 */ + 0x0, /* gcFEATURE_BIT_REG_VGFilter */ + 0x0, /* gcFEATURE_BIT_REG_VG21 */ + 0x1, /* gcFEATURE_BIT_REG_ShaderGetsW */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions1 */ + 0x1, /* gcFEATURE_BIT_REG_DefaultReg0 */ + 0x1, /* gcFEATURE_BIT_REG_MC20 */ + 0x0, /* gcFEATURE_BIT_REG_ShaderMSAASideband */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes0 */ + 0x0, /* gcFEATURE_BIT_REG_VAA */ + 0x0, /* gcFEATURE_BIT_REG_BypassInMSAA */ + 0x0, /* gcFEATURE_BIT_REG_HierarchicalZ */ + 0x0, /* gcFEATURE_BIT_REG_NewTexture */ + 0x0, /* gcFEATURE_BIT_REG_A8TargetSupport */ + 0x1, /* gcFEATURE_BIT_REG_CorrectStencil */ + 0x1, /* gcFEATURE_BIT_REG_EnhanceVR */ + 0x1, /* gcFEATURE_BIT_REG_RSUVSwizzle */ + 0x1, /* gcFEATURE_BIT_REG_V2Compression */ + 0x0, /* gcFEATURE_BIT_REG_VGDoubleBuffer */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes1 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes2 */ + 0x0, /* gcFEATURE_BIT_REG_TextureStride */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes3 */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisable1 */ + 0x0, /* gcFEATURE_BIT_REG_AutoRestartTS */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes4 */ + 0x0, /* gcFEATURE_BIT_REG_L2Windowing */ + 0x1, /* gcFEATURE_BIT_REG_HalfFloatPipe */ + 0x1, /* gcFEATURE_BIT_REG_PixelDither */ + 0x1, /* gcFEATURE_BIT_REG_TwoStencilReference */ + 0x1, /* gcFEATURE_BIT_REG_ExtendedPixelFormat */ + 0x1, /* gcFEATURE_BIT_REG_CorrectMinMaxDepth */ + 0x1, /* gcFEATURE_BIT_REG_DitherAndFilterPlusAlpha2D */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes5 */ + 0x0, /* gcFEATURE_BIT_REG_New2D */ + 0x1, /* gcFEATURE_BIT_REG_NewFloatingPointArithmetic */ + 0x1, /* gcFEATURE_BIT_REG_TextureHorizontalAlignmentSelect */ + 0x1, /* gcFEATURE_BIT_REG_NonPowerOfTwo */ + 0x1, /* gcFEATURE_BIT_REG_LinearTextureSupport */ + 0x1, /* gcFEATURE_BIT_REG_Halti0 */ + 0x0, /* gcFEATURE_BIT_REG_CorrectOverflowVG */ + 0x1, /* gcFEATURE_BIT_REG_NegativeLogFix */ + 0x1, /* gcFEATURE_BIT_REG_ResolveOffset */ + 0x1, /* gcFEATURE_BIT_REG_OkToGateAxiClock */ + 0x1, /* gcFEATURE_BIT_REG_MMU */ + 0x1, /* gcFEATURE_BIT_REG_WideLine */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes6 */ + 0x1, /* gcFEATURE_BIT_REG_FcFlushStall */ + 0x1, /* gcFEATURE_BIT_REG_LineLoop */ + 0x1, /* gcFEATURE_BIT_REG_LogicOp */ + 0x1, /* gcFEATURE_BIT_REG_SeamlessCubeMap */ + 0x1, /* gcFEATURE_BIT_REG_SuperTiledTexture */ + 0x1, /* gcFEATURE_BIT_REG_LinearPE */ + 0x1, /* gcFEATURE_BIT_REG_RectPrimitive */ + 0x0, /* gcFEATURE_BIT_REG_Composition */ + 0x1, /* gcFEATURE_BIT_REG_CorrectAutoDisableCountWidth */ + 0x1, /* gcFEATURE_BIT_REG_PESwizzle */ + 0x1, /* gcFEATURE_BIT_REG_EndEvent */ + 0x1, /* gcFEATURE_BIT_REG_S1S8 */ + 0x1, /* gcFEATURE_BIT_REG_Halti1 */ + 0x0, /* gcFEATURE_BIT_REG_RGB888 */ + 0x1, /* gcFEATURE_BIT_REG_TX_YUVAssembler */ + 0x1, /* gcFEATURE_BIT_REG_DynamicFrequencyScaling */ + 0x0, /* gcFEATURE_BIT_REG_TXFilter */ + 0x1, /* gcFEATURE_BIT_REG_FullDirectFB */ + 0x0, /* gcFEATURE_BIT_REG_OnePass2DFilter */ + 0x1, /* gcFEATURE_BIT_REG_ThreadWalkerInPS */ + 0x1, /* gcFEATURE_BIT_REG_TileFiller */ + 0x1, /* gcFEATURE_BIT_REG_YUVStandard */ + 0x0, /* gcFEATURE_BIT_REG_MultiSourceBlt */ + 0x0, /* gcFEATURE_BIT_REG_YUVConversion */ + 0x1, /* gcFEATURE_BIT_REG_FlushFixed2D */ + 0x1, /* gcFEATURE_BIT_REG_Interleaver */ + 0x1, /* gcFEATURE_BIT_REG_MixedStreams */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheFor2D420 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes7 */ + 0x0, /* gcFEATURE_BIT_REG_NoIndexPattern */ + 0x1, /* gcFEATURE_BIT_REG_TextureTileStatus */ + 0x1, /* gcFEATURE_BIT_REG_DecompressZ16 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes8 */ + 0x1, /* gcFEATURE_BIT_REG_DERotationStallFix */ + 0x0, /* gcFEATURE_BIT_REG_OclOnly */ + 0x1, /* gcFEATURE_BIT_REG_NewFeatures0 */ + 0x1, /* gcFEATURE_BIT_REG_InstructionCache */ + 0x0, /* gcFEATURE_BIT_REG_GeometryShader */ + 0x1, /* gcFEATURE_BIT_REG_TexCompressionSupertiled */ + 0x1, /* gcFEATURE_BIT_REG_Generics */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes9 */ + 0x0, /* gcFEATURE_BIT_REG_FastMSAA */ + 0x0, /* gcFEATURE_BIT_REG_WClip */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes10 */ + 0x1, /* gcFEATURE_BIT_REG_UnifiedSamplers */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes11 */ + 0x1, /* gcFEATURE_BIT_REG_PerformanceCounters */ + 0x1, /* gcFEATURE_BIT_REG_ExtraShaderInstructions2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes12 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes13 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_ACE */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_DEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes14 */ + 0x0, /* gcFEATURE_BIT_REG_PowerOptimizations0 */ + 0x1, /* gcFEATURE_BIT_REG_NewHZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes15 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements1 */ + 0x0, /* gcFEATURE_BIT_REG_DENoGamma */ + 0x0, /* gcFEATURE_BIT_REG_PAEnhancements2 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_PEEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_HIEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_FEEnhancements2 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes16 */ + 0x0, /* gcFEATURE_BIT_REG_DEEnhancements5 */ + 0x1, /* gcFEATURE_BIT_REG_TXEnhancements4 */ + 0x0, /* gcFEATURE_BIT_REG_PEEnhancements4 */ + 0x1, /* gcFEATURE_BIT_REG_MCEnhancements1 */ + 0x1, /* gcFEATURE_BIT_REG_Halti2 */ + 0x0, /* gcFEATURE_BIT_REG_DEMirrorRotate */ + 0x1, /* gcFEATURE_BIT_REG_SmallMSAA */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes17 */ + 0x0, /* gcFEATURE_BIT_REG_Rasterizer2 */ + 0x0, /* gcFEATURE_BIT_REG_DualPipeOPF */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2 */ + 0x0, /* gcFEATURE_BIT_REG_CSCV2 */ + 0x1, /* gcFEATURE_BIT_REG_PAEnhancements3 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes18 */ + 0x0, /* gcFEATURE_BIT_REG_Compression2D */ + 0x0, /* gcFEATURE_BIT_REG_Probe */ + 0x1, /* gcFEATURE_BIT_REG_MediumPrecision */ + 0x0, /* gcFEATURE_BIT_REG_DESupertile */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes19 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements6 */ + 0x1, /* gcFEATURE_BIT_REG_SHEnhancements7 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes20 */ + 0x0, /* gcFEATURE_BIT_REG_DEAddress40 */ + 0x0, /* gcFEATURE_BIT_REG_MiniMMUFix */ + 0x1, /* gcFEATURE_BIT_REG_EEZ */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes21 */ + 0x0, /* gcFEATURE_BIT_REG_ExtraVgCaps */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV15 */ + 0x1, /* gcFEATURE_BIT_REG_BugFixes22 */ + 0x1, /* gcFEATURE_BIT_REG_Halti3 */ + 0x0, /* gcFEATURE_BIT_REG_TessellationShaders */ + 0x0, /* gcFEATURE_BIT_REG_OPF9Tap */ + 0x0, /* gcFEATURE_BIT_REG_MultiSrcV2StrQuad */ + 0x0, /* gcFEATURE_BIT_REG_SeperateSRCAndDstCache */ + 0x1, /* gcFEATURE_BIT_REG_Halti4 */ + 0x1, /* gcFEATURE_BIT_REG_RAWriteDepth */ + 0x0, /* gcFEATURE_BIT_REG_AndroidOnly */ + 0x1, /* gcFEATURE_BIT_REG_HasChipProductReg */ + 0x0, /* gcFEATURE_BIT_REG_TXSupportDEC */ + 0x1, /* gcFEATURE_BIT_REG_S8MSAACompression */ + 0x1, /* gcFEATURE_BIT_REG_BugFixesIn544 */ + 0x0, /* gcFEATURE_BIT_REG_L2CacheRemove */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowRndVtxCnt */ + 0x0, /* gcFEATURE_BIT_REG_CubeMapFL28 */ + 0x1, /* gcFEATURE_BIT_REG_TX6bitFrac */ + 0x1, /* gcFEATURE_BIT_REG_FEAllowStallPrefetchEng */ + 0x0, /* gcFEATURE_BIT_REG_ThirdPartyCompression */ + 0x1, /* gcFEATURE_BIT_REG_RSS8 */ + 0x1, /* gcFEATURE_BIT_REG_MSAACoherencyCheck */ + 0x1, /* gcFEATURE_BIT_REG_Halti5 */ + 0x1, /* gcFEATURE_BIT_REG_Evis */ + 0x0, /* gcFEATURE_BIT_REG_BltEngine */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes23 */ + 0x0, /* gcFEATURE_BIT_REG_BugFixes24 */ + 0x0, /* gcFEATURE_BIT_REG_DEC */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12 */ + 0x0, /* gcFEATURE_BIT_REG_VSTileNV12_10BIT */ + 0x0, /* gcFEATURE_BIT_RenderTarget8 */ + 0x0, /* gcFEATURE_BIT_TxLodFlowCorrection */ + 0x0, /* gcFEATURE_BIT_FaceLod */ + 0x0, /* gcFEATURE_BIT_MultiCoreSemaphoreStallV2 */ + 0x1, /* gcFEATURE_BIT_VMSAA */ + 0x0, /* gcFEATURE_BIT_ChipEnableLink */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_1_5_ENHANCEMENT */ + 0x0, /* gcFEATURE_BIT_MULTI_SRC_BLT_BILINEAR_FILTER */ + 0x1, /* gcFEATURE_BIT_RA_HZEZ_CLOCK_CONTROL */ + 0x1, /* gcFEATURE_BIT_CACHE128B256BPERLINE */ + 0x1, /* gcFEATURE_BIT_V4Compression */ + 0x0, /* gcFEATURE_BIT_PE2D_MAJOR_SUPER_TILE */ + 0x1, /* gcFEATURE_BIT_PE_32BPC_COLORMASK_FIX */ + 0x1, /* gcFEATURE_BIT_ALPHA_BLENDING_OPT */ + 0x1, /* gcFEATURE_BIT_NEW_GPIPE */ + 0x0, /* gcFEATURE_BIT_PIPELINE_32_ATTRIBUTES */ + 0x0, /* gcFEATURE_BIT_MSAA_SHADING */ + 0x0, /* gcFEATURE_BIT_NO_ANISTRO_FILTER */ + 0x1, /* gcFEATURE_BIT_NO_ASTC */ + 0x0, /* gcFEATURE_BIT_NO_DXT */ + 0x0, /* gcFEATURE_BIT_HWTFB */ + 0x1, /* gcFEATURE_BIT_RA_DEPTH_WRITE_MSAA1X_FIX */ + 0x1, /* gcFEATURE_BIT_EZHZ_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_HALFDEPENDENCY_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MCFILL_FIX */ + 0x1, /* gcFEATURE_BIT_TPG_TCPERF_FIX */ + 0x1, /* gcFEATURE_BIT_USC_MDFIFO_OVERFLOW_FIX */ + 0x1, /* gcFEATURE_BIT_SH_TEXLD_BARRIER_IN_CS_FIX */ + 0x1, /* gcFEATURE_BIT_RS_NEW_BASEADDR */ + 0x1, /* gcFEATURE_BIT_PE_8bpp_DUALPIPE_FIX */ + 0x0, /* gcFEATURE_BIT_SH_ADVANCED_INSTR */ + 0x1, /* gcFEATURE_BIT_SH_FLAT_INTERPOLATION_DUAL16_FIX */ + 0x1, /* gcFEATURE_BIT_USC_CONTINUOUS_FLUS_FIX */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_V4 */ + 0x0, /* gcFEATURE_BIT_SH_SUPPORT_ALPHA_KILL */ + 0x1, /* gcFEATURE_BIT_PE_NO_ALPHA_TEST */ + 0x0, /* gcFEATURE_BIT_TX_LOD_NEAREST_SELECT */ + 0x1, /* gcFEATURE_BIT_SH_FIX_LDEXP */ + 0x1, /* gcFEATURE_BIT_SUPPORT_MOVAI */ + 0x1, /* gcFEATURE_BIT_SH_SNAP2PAGE_MAXPAGES_FIX */ + 0x1, /* gcFEATURE_BIT_PE_RGBA16I_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_8bpp_256TILE_FC_FIX */ + 0x1, /* gcFEATURE_BIT_PE_64bit_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_USC_FULL_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_TX_YUV_ASSEMBLER_10BIT */ + 0x1, /* gcFEATURE_BIT_FE_32bit_INDEX_FIX */ + 0x1, /* gcFEATURE_BIT_BLT_64bpp_MASKED_CLEAR_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY */ + 0x1, /* gcFEATURE_BIT_ROBUSTNESS */ + 0x1, /* gcFEATURE_BIT_USC_ATOMIC_FIX */ + 0x1, /* gcFEATURE_BIT_SH_PSO_MSAA1x_FIX */ + 0x1, /* gcFEATURE_BIT_USC_VX_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_ABSDIFF */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BITREPLACE */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_BOXFILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_CORDIAC */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_DP32 */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_FILTER */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_IADD */ + 0x0, /* gcFEATURE_BIT_EVIS_NO_SELECTADD */ + 0x0, /* gcFEATURE_BIT_EVIS_LERP_7OUTPUT */ + 0x0, /* gcFEATURE_BIT_EVIS_ACCSQ_8OUTPUT */ + 0x1, /* gcFEATURE_BIT_USC_GOS_ADDR_FIX */ + 0x1, /* gcFEATURE_BIT_TX_8bit_UVFrac */ + 0x1, /* gcFEATURE_BIT_TX_DESC_CACHE_CLOCKGATE_FIX */ + 0x1, /* gcFEATURE_BIT_RSBLT_MSAA_DECOMPRESSION */ + 0x0, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE */ + 0x1, /* gcFEATURE_BIT_DRAWID */ + 0x1, /* gcFEATURE_BIT_PSIO_SAMPLEMASK_IN_R0ZW_FIX */ + 0x1, /* gcFEATURE_BIT_TX_INTEGER_COORDINATE_V2 */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG */ + 0x0, /* gcFEATURE_BIT_VG_RESOLVE_ENGINE */ + 0x0, /* gcFEATURE_BIT_VG_PE_COLOR_KEY */ + 0x0, /* gcFEATURE_BIT_VG_IM_INDEX_FORMAT */ + 0x0, /* gcFEATURE_BIT_SNAPPAGE_CMD */ + 0x1, /* gcFEATURE_BIT_SH_NO_INDEX_CONST_ON_A0 */ + 0x1, /* gcFEATURE_BIT_SH_NO_ONECONST_LIMIT */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_ON_TEMP */ + 0x1, /* gcFEATURE_BIT_COMPUTE_ONLY */ + 0x1, /* gcFEATURE_BIT_SH_IMG_LDST_CLAMP */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_ALLOC_COUNT_FIX */ + 0x1, /* gcFEATURE_BIT_SH_ICACHE_PREFETCH */ + 0x0, /* gcFEATURE_BIT_PE2D_SEPARATE_CACHE */ + 0x0, /* gcFEATURE_BIT_VG_AYUV_INPUT_OUTPUT */ + 0x0, /* gcFEATURE_BIT_VG_DOUBLE_IMAGE */ + 0x0, /* gcFEATURE_BIT_VG_RECTANGLE_STRIPE_MODE */ + 0x0, /* gcFEATURE_BIT_VG_MMU */ + 0x0, /* gcFEATURE_BIT_VG_IM_FILTER */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_IM_YUV_PLANAR */ + 0x0, /* gcFEATURE_BIT_VG_PE_YUV_PACKET */ + 0x0, /* gcFEATURE_BIT_VG_COLOR_PRECISION_8_BIT */ + 0x1, /* gcFEATURE_BIT_PE_MSAA_OQ_FIX */ + 0x1, /* gcFEATURE_BIT_PSIO_MSAA_CL_FIX */ + 0x1, /* gcFEATURE_BIT_USC_DEFER_FILL_FIX */ + 0x1, /* gcFEATURE_BIT_SH_CLOCK_GATE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_NEED_DUMMYDRAW */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_OUTPUT */ + 0x0, /* gcFEATURE_BIT_PE2D_LINEAR_YUV420_10BIT */ + 0x0, /* gcFEATURE_BIT_MULTI_CLUSTER */ + 0x0, /* gcFEATURE_BIT_VG_TS_CULLING */ + 0x0, /* gcFEATURE_BIT_VG_FP25 */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK */ + 0x0, /* gcFEATURE_BIT_SH_DUAL16_SAMPLEMASK_ZW */ + 0x0, /* gcFEATURE_BIT_TPG_TRIVIAL_MODE_FIX */ + 0x0, /* gcFEATURE_BIT_TX_ASTC_MULTISLICE_FIX */ + 0x0, /* gcFEATURE_BIT_FE_ROBUST_FIX */ + 0x0, /* gcFEATURE_BIT_SH_GPIPE_ACCESS_FULLTEMPS */ + 0x0, /* gcFEATURE_BIT_PSIO_INTERLOCK */ + 0x1, /* gcFEATURE_BIT_PA_WIDELINE_FIX */ + 0x0, /* gcFEATURE_BIT_WIDELINE_HELPER_FIX */ + 0x0, /* gcFEATURE_BIT_G2D_3rd_PARTY_COMPRESSION_1_1 */ + 0x0, /* gcFEATURE_BIT_TX_FLUSH_L1CACHE */ + 0x1, /* gcFEATURE_BIT_PE_DITHER_FIX2 */ + 0x0, /* gcFEATURE_BIT_G2D_DEC400 */ + 0x0, /* gcFEATURE_BIT_SH_TEXLD_U_FIX */ + 0x0, /* gcFEATURE_BIT_MC_FCCACHE_BYTEMASK */ + 0x0, /* gcFEATURE_BIT_SH_MULTI_WG_PACK_FIX */ + 0x0, /* gcFEATURE_BIT_DC_OVERLAY_SCALING */ + 0x0, /* gcFEATURE_BIT_DC_SOURCE_ROTATION */ + 0x0, /* gcFEATURE_BIT_DC_TILED */ + 0x0, /* gcFEATURE_BIT_DC_YUV_L1 */ + 0x0, /* gcFEATURE_BIT_DC_D30_OUTPUT */ + 0x0, /* gcFEATURE_BIT_DC_MMU */ + 0x0, /* gcFEATURE_BIT_DC_COMPRESSION */ + 0x0, /* gcFEATURE_BIT_DC_QOS */ + 0x0, /* gcFEATURE_BIT_PE_ADVANCE_BLEND_PART0 */ + 0x0, /* gcFEATURE_BIT_FE_PATCHLIST_FETCH_FIX */ + 0x1, /* gcFEATURE_BIT_RA_CG_FIX */ + 0x1, /* gcFEATURE_BIT_EVIS_VX2 */ + 0x1, /* gcFEATURE_BIT_NN_FLOAT */ + 0x0, /* gcFEATURE_BIT_DEC400 */ + 0x0, /* gcFEATURE_BIT_LS_SUPPORT_PERCOMP_DEPENDENCY */ + 0x1, /* gcFEATURE_BIT_TP_ENGINE */ + 0x0, /* gcFEATURE_BIT_MULTI_CORE_BLOCK_SET_CONFIG2 */ + 0x0, /* gcFEATURE_BIT_PE_VMSAA_COVERAGE_CACHE_FIX */ + 0x1, /* gcFEATURE_BIT_SECURITY_AHB */ + 0x0, /* gcFEATURE_BIT_MULTICORE_SEMAPHORESTALL_V3 */ + 0x0, /* gcFEATURE_BIT_SMALLBATCH */ + 0x0, /* gcFEATURE_BIT_SH_CMPLX */ + 0x0, /* gcFEATURE_BIT_SH_IDIV0_SWZL_EHS */ + 0x0, /* gcFEATURE_BIT_TX_LERP_LESS_BIT */ + 0x0, /* gcFEATURE_BIT_SH_GM_ENDIAN */ + 0x0, /* gcFEATURE_BIT_SH_GM_USC_UNALLOC */ + 0x1, /* gcFEATURE_BIT_SH_END_OF_BB */ + 0x1, /* gcFEATURE_BIT_VIP_V7 */ + 0x0, /* gcFEATURE_BIT_TX_BORDER_CLAMP_FIX */ + 0x0, /* gcFEATURE_BIT_SH_IMG_LD_LASTPIXEL_FIX */ + 0x0, /* gcFEATURE_BIT_ASYNC_BLT */ + 0x0, /* gcFEATURE_BIT_ASYNC_FE_FENCE_FIX */ + 0x1, /* gcFEATURE_BIT_PSCS_THROTTLE */ + 0x0, /* gcFEATURE_BIT_SEPARATE_LS */ + 0x1, /* gcFEATURE_BIT_MCFE */ + 0x0, /* gcFEATURE_BIT_WIDELINE_TRIANGLE_EMU */ + 0x0, /* gcFEATURE_BIT_VG_RESOLUTION_8K */ + 0x0, /* gcFEATURE_BIT_FENCE_32BIT */ + 0x0, /* gcFEATURE_BIT_FENCE_64BIT */ + 0x1, /* gcFEATURE_BIT_NN_INTERLEVE8 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER */ + 0x0, /* gcFEATURE_BIT_PE_DEPTH_ONLY_OQFIX */ + 0x1, /* gcFEATURE_BIT_TP_LRN */ + 0x0, /* gcFEATURE_BIT_TX_SEAMLESS_CUBE */ + 0x0, /* gcFEATURE_BIT_TX_SNORM_SUPPORT */ + 0x1, /* gcFEATURE_BIT_TP_MAX_POOLING_STRIDE1 */ + 0x0, /* gcFEATURE_BIT_SH_SCATTER_GATHER */ + 0x0, /* gcFEATURE_BIT_HWMANAGED_LS */ + 0x1, /* gcFEATURE_BIT_NN_FP16_ALU */ + 0x1, /* gcFEATURE_BIT_NN_INT16_ALU */ + 0x1, /* gcFEATURE_BIT_TP_ROI_POOLING */ + 0x0, /* gcFEATURE_BIT_NN_ZDP3 */ + 0x1, /* gcFEATURE_BIT_NN_ZDP6 */ + 0x1, /* gcFEATURE_BIT_NN_XYDP9 */ + 0x1, /* gcFEATURE_BIT_NN_INT8_SCALE */ + 0x1, /* gcFEATURE_BIT_NN_POWER_ISOLATION */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE1 */ + 0x0, /* gcFEATURE_BIT_SH_IMAGE_ENABLE_FIX */ + 0x1, /* gcFEATURE_BIT_TF_QUANTIZATION */ + 0x1, /* gcFEATURE_BIT_MSAA_FRAGMENT_OPERATION */ + 0x0, /* gcFEATURE_BIT_TP_SIMPLE_INT16 */ + 0x1, /* gcFEATURE_BIT_TP_REAL_INT16 */ + 0x1, /* gcFEATURE_BIT_NN_FIRST_PIXEL_POOLING */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE2 */ + 0x0, /* gcFEATURE_BIT_VG_FORMAT_ARGB2222 */ + 0x0, /* gcFEATURE_BIT_PE_TILE_CACHE_FLUSH_FIX */ + 0x0, /* gcFEATURE_BIT_BLT_YUV_OUTPUT */ + 0x1, /* gcFEATURE_BIT_NN_STRIDE_SUPPORT */ + 0x1, /* gcFEATURE_BIT_NN_XYDP6 */ + 0x1, /* gcFEATURE_BIT_TP_REORDER_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CONV1x1_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_NN_CACHELINE_MODE_PERF_FIX */ + 0x0, /* gcFEATURE_BIT_NN_PER3DTILE_BUBBLE_FIX */ + 0x1, /* gcFEATURE_BIT_SH_IO_CG_FIX */ + 0x1, /* gcFEATURE_BIT_SWTILING_PHASE3 */ + 0x1, /* gcFEATURE_BIT_USC_STAY_LRU */ + 0x1, /* gcFEATURE_BIT_NN_NONZERO_MIRROR_BORDER */ + 0x1, /* gcFEATURE_BIT_NN_COEF_DECOMPRESS_PERF2X */ + 0x1, /* gcFEATURE_BIT_INPUT_4BIT */ + 0x1, /* gcFEATURE_BIT_COEF_COMPRESSION_ENHANCEMENT */ + 0x1, /* gcFEATURE_BIT_NN_ZXDP3_KERNEL_READ_CONFLICT_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ZDP3_NO_COMPRESS_FIX */ + 0x1, /* gcFEATURE_BIT_NN_ASYNC_COPY_PERF_FIX */ + 0x1, /* gcFEATURE_BIT_OCB_COUNTER */ + 0x0, /* gcFEATURE_BIT_PE_SWIZZLE */ + 0x0, /* gcFEATURE_BIT_SH_ROBUSTNESS_FIX */ + 0x0, /* gcFEATURE_BIT_USC_ATOMIC_FIX2 */ + 0x0, /* gcFEATURE_BIT_PE_A8B8G8R8 */ + 0x0, /* gcFEATURE_BIT_MULTIVIEW_RENDER */ + 0x0, /* gcFEATURE_BIT_FE_DRAW_DIRECT */ + 0x0, /* gcFEATURE_BIT_TX_VKBORDER_MODE */ + 0x0, /* gcFEATURE_BIT_TX_UNNORMALIZED_COORD */ + }, +}; + +static gcsFEATURE_DATABASE* +gcQueryFeatureDB( + gctUINT32 ChipID, + gctUINT32 ChipVersion, + gctUINT32 ProductID, + gctUINT32 EcoID, + gctUINT32 CustomerID + ) +{ + gctINT entryNum = sizeof(gChipInfo) / sizeof(gChipInfo[0]); + gctINT i; + + /* check formal release entries first */ + for (i = 0; i < entryNum; ++i) + { + + if ((gChipInfo[i].chipID == ChipID) + && (gChipInfo[i].chipVersion == ChipVersion) + && (gChipInfo[i].productID == ProductID) + && (gChipInfo[i].ecoID == EcoID) + && (gChipInfo[i].customerID == CustomerID) + && (gChipInfo[i].formalRelease) + ) + { + return &gChipInfo[i]; + } + } + + /* check informal release entries if we dont find in formal entries */ + for (i = 0; i < entryNum; ++i) + { + + if ((gChipInfo[i].chipID == ChipID) + && ((gChipInfo[i].chipVersion & 0xFFF0) == (ChipVersion & 0xFFF0)) + && (gChipInfo[i].productID == ProductID) + && (gChipInfo[i].ecoID == EcoID) + && (gChipInfo[i].customerID == CustomerID) + && (!gChipInfo[i].formalRelease) + ) + { + return &gChipInfo[i]; + } + } + + return gcvNULL; +} +#endif /* _gc_feature_database_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal.h new file mode 100644 index 000000000000..a9cb9aba0ea4 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal.h @@ -0,0 +1,2833 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_h_ +#define __gc_hal_h_ + +#include "gc_hal_rename.h" +#include "gc_hal_types.h" +#include "gc_hal_enum.h" +#include "gc_hal_base.h" +#include "gc_hal_profiler.h" +#include "gc_hal_driver.h" +#if gcdENABLE_3D +#include "gc_hal_statistics.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +******************************* Alignment Macros ******************************* +\******************************************************************************/ + +/* Alignment with a non-power of two value. */ +#define gcmALIGN_NP2(n, align) \ +(\ + ((n) + (align) - 1) - (((n) + (align) - 1) % (align)) \ +) + +/* Alignment with a power of two value. */ +#define gcmALIGN(n, align) \ +(\ + ((n) + ((align) - 1)) & ~((align) - 1) \ +) + +#define gcmALIGN_BASE(n, align) \ +(\ + ((n) & ~((align) - 1)) \ +) + +/******************************************************************************\ +***************************** Element Count Macro ***************************** +\******************************************************************************/ + +#define gcmSIZEOF(a) \ +(\ + (gctSIZE_T) (sizeof(a)) \ +) + +#define gcmCOUNTOF(a) \ +(\ + sizeof(a) / sizeof(a[0]) \ +) + +/******************************************************************************\ +********************************* Cast Macro ********************************** +\******************************************************************************/ +#define gcmNAME_TO_PTR(na) \ + gckKERNEL_QueryPointerFromName(kernel, gcmALL_TO_UINT32(na)) + +#define gcmPTR_TO_NAME(ptr) \ + gckKERNEL_AllocateNameFromPointer(kernel, ptr) + +#define gcmRELEASE_NAME(na) \ + gckKERNEL_DeleteName(kernel, gcmALL_TO_UINT32(na)) + +#define gcmALL_TO_UINT32(t) \ +(\ + (gctUINT32) (gctUINTPTR_T) (t)\ +) + +#define gcmPTR_TO_UINT64(p) \ +(\ + (gctUINT64) (gctUINTPTR_T) (p)\ +) + +#define gcmUINT64_TO_PTR(u) \ +(\ + (gctPOINTER) (gctUINTPTR_T) (u)\ +) + +#define gcmUINT64_TO_TYPE(u, t) \ +(\ + (t) (gctUINTPTR_T) (u)\ +) + +/******************************************************************************\ +******************************** Useful Macro ********************************* +\******************************************************************************/ + +#define gcvINVALID_ADDRESS ~0U +#define gcvINVALID_VALUE 0xCCCCCCCC + +#define gcvINVALID_PHYSICAL_ADDRESS ~0U + +#define gcmGET_PRE_ROTATION(rotate) \ + ((rotate) & (~(gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y))) + +#define gcmGET_POST_ROTATION(rotate) \ + ((rotate) & (gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y)) + +/******************************************************************************\ +******************************** gcsOBJECT Object ******************************* +\******************************************************************************/ + +/* Type of objects. */ +typedef enum _gceOBJECT_TYPE +{ + gcvOBJ_UNKNOWN = 0, + gcvOBJ_2D = gcmCC('2','D',' ',' '), + gcvOBJ_3D = gcmCC('3','D',' ',' '), + gcvOBJ_ATTRIBUTE = gcmCC('A','T','T','R'), + gcvOBJ_BRUSHCACHE = gcmCC('B','R','U','$'), + gcvOBJ_BRUSHNODE = gcmCC('B','R','U','n'), + gcvOBJ_BRUSH = gcmCC('B','R','U','o'), + gcvOBJ_BUFFER = gcmCC('B','U','F','R'), + gcvOBJ_COMMAND = gcmCC('C','M','D',' '), + gcvOBJ_COMMANDBUFFER = gcmCC('C','M','D','B'), + gcvOBJ_CONTEXT = gcmCC('C','T','X','T'), + gcvOBJ_DEVICE = gcmCC('D','E','V',' '), + gcvOBJ_DUMP = gcmCC('D','U','M','P'), + gcvOBJ_EVENT = gcmCC('E','V','N','T'), + gcvOBJ_FUNCTION = gcmCC('F','U','N','C'), + gcvOBJ_HAL = gcmCC('H','A','L',' '), + gcvOBJ_HARDWARE = gcmCC('H','A','R','D'), + gcvOBJ_HEAP = gcmCC('H','E','A','P'), + gcvOBJ_INDEX = gcmCC('I','N','D','X'), + gcvOBJ_INTERRUPT = gcmCC('I','N','T','R'), + gcvOBJ_KERNEL = gcmCC('K','E','R','N'), + gcvOBJ_KERNEL_FUNCTION = gcmCC('K','F','C','N'), + gcvOBJ_MEMORYBUFFER = gcmCC('M','E','M','B'), + gcvOBJ_MMU = gcmCC('M','M','U',' '), + gcvOBJ_OS = gcmCC('O','S',' ',' '), + gcvOBJ_OUTPUT = gcmCC('O','U','T','P'), + gcvOBJ_PAINT = gcmCC('P','N','T',' '), + gcvOBJ_PATH = gcmCC('P','A','T','H'), + gcvOBJ_QUEUE = gcmCC('Q','U','E',' '), + gcvOBJ_SAMPLER = gcmCC('S','A','M','P'), + gcvOBJ_SHADER = gcmCC('S','H','D','R'), + gcvOBJ_VIR_SHADER = gcmCC('V','S','D','R'), + gcvOBJ_STREAM = gcmCC('S','T','R','M'), + gcvOBJ_SURF = gcmCC('S','U','R','F'), + gcvOBJ_TEXTURE = gcmCC('T','X','T','R'), + gcvOBJ_UNIFORM = gcmCC('U','N','I','F'), + gcvOBJ_VARIABLE = gcmCC('V','A','R','I'), + gcvOBJ_VERTEX = gcmCC('V','R','T','X'), + gcvOBJ_VIDMEM = gcmCC('V','M','E','M'), + gcvOBJ_VG = gcmCC('V','G',' ',' '), + gcvOBJ_BUFOBJ = gcmCC('B','U','F','O'), + gcvOBJ_UNIFORM_BLOCK = gcmCC('U','B','L','K'), + gcvOBJ_CL = gcmCC('C','L',' ',' '), + gcvOBJ_STORAGE_BLOCK = gcmCC('S','B','L','K'), + gcvOBJ_IO_BLOCK = gcmCC('I','O','B','K'), +} +gceOBJECT_TYPE; + +/* gcsOBJECT object defintinon. */ +typedef struct _gcsOBJECT +{ + /* Type of an object. */ + gceOBJECT_TYPE type; +} +gcsOBJECT; + +typedef struct _gckHARDWARE * gckHARDWARE; + + +#define gcdMAX_GPU_COUNT gcvCORE_COUNT + +#define gcdMAX_SURF_LAYERS 4 + +#define gcdMAX_DRAW_BUFFERS 8 + +/******************************************************************************* +** +** gcmVERIFY_OBJECT +** +** Assert if an object is invalid or is not of the specified type. If the +** object is invalid or not of the specified type, gcvSTATUS_INVALID_OBJECT +** will be returned from the current function. In retail mode this macro +** does nothing. +** +** ARGUMENTS: +** +** obj Object to test. +** t Expected type of the object. +*/ +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +#define _gcmVERIFY_OBJECT(prefix, obj, t) \ + if ((obj) == gcvNULL) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT failed: NULL"); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT((obj) != gcvNULL); \ + prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \ + return gcvSTATUS_INVALID_OBJECT; \ + } \ + else if (((gcsOBJECT*) (obj))->type != t) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT failed: %c%c%c%c", \ + gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \ + prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \ + return gcvSTATUS_INVALID_OBJECT; \ + } + +# define gcmVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcm, obj, t) +# define gcmkVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcmk, obj, t) +#else +# define gcmVERIFY_OBJECT(obj, t) do {} while (gcvFALSE) +# define gcmkVERIFY_OBJECT(obj, t) do {} while (gcvFALSE) +#endif + +/******************************************************************************/ +/*VERIFY_OBJECT if special return expected*/ +/******************************************************************************/ +#ifndef EGL_API_ANDROID +# define _gcmVERIFY_OBJECT_RETURN(prefix, obj, t, retVal) \ + do \ + { \ + if ((obj) == gcvNULL) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT_RETURN failed: NULL"); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT((obj) != gcvNULL); \ + prefix##FOOTER_ARG("retVal=%d", retVal); \ + return retVal; \ + } \ + else if (((gcsOBJECT*) (obj))->type != t) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "VERIFY_OBJECT_RETURN failed: %c%c%c%c", \ + gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \ + prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \ + gcmCC_PRINT(t)); \ + prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \ + prefix##FOOTER_ARG("retVal=%d", retVal); \ + return retVal; \ + } \ + } \ + while (gcvFALSE) +# define gcmVERIFY_OBJECT_RETURN(obj, t, retVal) \ + _gcmVERIFY_OBJECT_RETURN(gcm, obj, t, retVal) +# define gcmkVERIFY_OBJECT_RETURN(obj, t, retVal) \ + _gcmVERIFY_OBJECT_RETURN(gcmk, obj, t, retVal) +#else +# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE) +# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE) +#endif + +typedef struct _gcsContiguousBlock +{ + gctUINT32 ptr; + gctSIZE_T size; +} +gcsContiguousBlock; + + +/******************************************************************************\ +********************************** gckOS Object ********************************* +\******************************************************************************/ + +/* Construct a new gckOS object. */ +gceSTATUS +gckOS_Construct( + IN gctPOINTER Context, + OUT gckOS * Os + ); + +/* Destroy an gckOS object. */ +gceSTATUS +gckOS_Destroy( + IN gckOS Os + ); + +/* Query the video memory. */ +gceSTATUS +gckOS_QueryVideoMemory( + IN gckOS Os, + OUT gctPHYS_ADDR * InternalAddress, + OUT gctSIZE_T * InternalSize, + OUT gctPHYS_ADDR * ExternalAddress, + OUT gctSIZE_T * ExternalSize, + OUT gctPHYS_ADDR * ContiguousAddress, + OUT gctSIZE_T * ContiguousSize + ); + +/* Allocate memory from the heap. */ +gceSTATUS +gckOS_Allocate( + IN gckOS Os, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ); + +/* Free allocated memory. */ +gceSTATUS +gckOS_Free( + IN gckOS Os, + IN gctPOINTER Memory + ); + +/* Wrapper for allocation memory.. */ +gceSTATUS +gckOS_AllocateMemory( + IN gckOS Os, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ); + +/* Wrapper for freeing memory. */ +gceSTATUS +gckOS_FreeMemory( + IN gckOS Os, + IN gctPOINTER Memory + ); + +/* Allocate paged memory. */ +gceSTATUS +gckOS_AllocatePagedMemory( + IN gckOS Os, + IN gctSIZE_T Bytes, + OUT gctPHYS_ADDR * Physical + ); + +/* Allocate paged memory. */ +gceSTATUS +gckOS_AllocatePagedMemoryEx( + IN gckOS Os, + IN gctUINT32 Flag, + IN gctSIZE_T Bytes, + OUT gctUINT32 * Gid, + OUT gctPHYS_ADDR * Physical + ); + +/* Lock pages. */ +gceSTATUS +gckOS_LockPages( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctBOOL Cacheable, + OUT gctPOINTER * Logical, + OUT gctSIZE_T * PageCount + ); + +/* Map pages. */ +gceSTATUS +gckOS_MapPages( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T PageCount, + IN gctPOINTER PageTable + ); + +/* Map pages. */ +gceSTATUS +gckOS_MapPagesEx( + IN gckOS Os, + IN gceCORE Core, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T PageCount, + IN gctUINT32 Address, + IN gctPOINTER PageTable, + IN gctBOOL Writable, + IN gceSURF_TYPE Type + ); + +gceSTATUS +gckOS_UnmapPages( + IN gckOS Os, + IN gctSIZE_T PageCount, + IN gctUINT32 Address + ); + +/* Unlock pages. */ +gceSTATUS +gckOS_UnlockPages( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ); + +/* Free paged memory. */ +gceSTATUS +gckOS_FreePagedMemory( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes + ); + +/* Allocate non-paged memory. */ +gceSTATUS +gckOS_AllocateNonPagedMemory( + IN gckOS Os, + IN gctBOOL InUserSpace, + IN gctUINT32 Flag, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ); + +/* Free non-paged memory. */ +gceSTATUS +gckOS_FreeNonPagedMemory( + IN gckOS Os, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical + ); + +/* Allocate contiguous memory. */ +gceSTATUS +gckOS_AllocateContiguous( + IN gckOS Os, + IN gctBOOL InUserSpace, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ); + +/* Free contiguous memory. */ +gceSTATUS +gckOS_FreeContiguous( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); + +/* Get the number fo bytes per page. */ +gceSTATUS +gckOS_GetPageSize( + IN gckOS Os, + OUT gctSIZE_T * PageSize + ); + +/* Get the physical address of a corresponding logical address. */ +gceSTATUS +gckOS_GetPhysicalAddress( + IN gckOS Os, + IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T * Address + ); + +/* Get the physical address of a corresponding user logical address. */ +gceSTATUS +gckOS_UserLogicalToPhysical( + IN gckOS Os, + IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T * Address + ); + +/* Map physical memory. */ +gceSTATUS +gckOS_MapPhysical( + IN gckOS Os, + IN gctUINT32 Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical + ); + +/* Unmap previously mapped physical memory. */ +gceSTATUS +gckOS_UnmapPhysical( + IN gckOS Os, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); + +/* Get real physical address from descriptor. */ +gceSTATUS +gckOS_PhysicalToPhysicalAddress( + IN gckOS Os, + IN gctPOINTER Physical, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * PhysicalAddress + ); + +/* Read data from a hardware register. */ +gceSTATUS +gckOS_ReadRegister( + IN gckOS Os, + IN gctUINT32 Address, + OUT gctUINT32 * Data + ); + +/* Read data from a hardware register. */ +gceSTATUS +gckOS_ReadRegisterEx( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT32 Address, + OUT gctUINT32 * Data + ); + +/* Write data to a hardware register. */ +gceSTATUS +gckOS_WriteRegister( + IN gckOS Os, + IN gctUINT32 Address, + IN gctUINT32 Data + ); + +/* Write data to a hardware register. */ +gceSTATUS +gckOS_WriteRegisterEx( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT32 Address, + IN gctUINT32 Data + ); + +#ifdef __QNXNTO__ +static gcmINLINE gceSTATUS +gckOS_WriteMemory( + IN gckOS Os, + IN gctPOINTER Address, + IN gctUINT32 Data + ) +{ + /* Write memory. */ + *(gctUINT32 *)Address = Data; + return gcvSTATUS_OK; +} + +#else +/* Write data to a 32-bit memory location. */ +gceSTATUS +gckOS_WriteMemory( + IN gckOS Os, + IN gctPOINTER Address, + IN gctUINT32 Data + ); +#endif + +/* Map physical memory into the process space. */ +gceSTATUS +gckOS_MapMemory( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical + ); + +/* Unmap physical memory from the specified process space. */ +gceSTATUS +gckOS_UnmapMemoryEx( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + IN gctUINT32 PID + ); + +/* Unmap physical memory from the process space. */ +gceSTATUS +gckOS_UnmapMemory( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ); + +/* Unmap user logical memory out of physical memory. + * This function is only supported in Linux currently. + */ +gceSTATUS +gckOS_UnmapUserLogical( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ); + +/* Delete a mutex. */ +gceSTATUS +gckOS_DeleteMutex( + IN gckOS Os, + IN gctPOINTER Mutex + ); + +/* Acquire a mutex. */ +gceSTATUS +gckOS_AcquireMutex( + IN gckOS Os, + IN gctPOINTER Mutex, + IN gctUINT32 Timeout + ); + +/* Release a mutex. */ +gceSTATUS +gckOS_ReleaseMutex( + IN gckOS Os, + IN gctPOINTER Mutex + ); + +/* Atomically exchange a pair of 32-bit values. */ +gceSTATUS +gckOS_AtomicExchange( + IN gckOS Os, + IN OUT gctUINT32_PTR Target, + IN gctUINT32 NewValue, + OUT gctUINT32_PTR OldValue + ); + +/* Atomically exchange a pair of pointers. */ +gceSTATUS +gckOS_AtomicExchangePtr( + IN gckOS Os, + IN OUT gctPOINTER * Target, + IN gctPOINTER NewValue, + OUT gctPOINTER * OldValue + ); + +gceSTATUS +gckOS_AtomSetMask( + IN gctPOINTER Atom, + IN gctUINT32 Mask + ); + +gceSTATUS +gckOS_AtomClearMask( + IN gctPOINTER Atom, + IN gctUINT32 Mask + ); + +gceSTATUS +gckOS_DumpCallStack( + IN gckOS Os + ); + +gceSTATUS +gckOS_GetProcessNameByPid( + IN gctINT Pid, + IN gctSIZE_T Length, + OUT gctUINT8_PTR String + ); + +/******************************************************************************* +** +** gckOS_AtomConstruct +** +** Create an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** OUTPUT: +** +** gctPOINTER * Atom +** Pointer to a variable receiving the constructed atom. +*/ +gceSTATUS +gckOS_AtomConstruct( + IN gckOS Os, + OUT gctPOINTER * Atom + ); + +/******************************************************************************* +** +** gckOS_AtomDestroy +** +** Destroy an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom to destroy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomDestroy( + IN gckOS Os, + OUT gctPOINTER Atom + ); + +/******************************************************************************* +** +** gckOS_AtomGet +** +** Get the 32-bit value protected by an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** OUTPUT: +** +** gctINT32_PTR Value +** Pointer to a variable the receives the value of the atom. +*/ +gceSTATUS +gckOS_AtomGet( + IN gckOS Os, + IN gctPOINTER Atom, + OUT gctINT32_PTR Value + ); + +/******************************************************************************* +** +** gckOS_AtomSet +** +** Set the 32-bit value protected by an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** gctINT32 Value +** The value of the atom. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomSet( + IN gckOS Os, + IN gctPOINTER Atom, + IN gctINT32 Value + ); + +/******************************************************************************* +** +** gckOS_AtomIncrement +** +** Atomically increment the 32-bit integer value inside an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** OUTPUT: +** +** gctINT32_PTR Value +** Pointer to a variable the receives the original value of the atom. +*/ +gceSTATUS +gckOS_AtomIncrement( + IN gckOS Os, + IN gctPOINTER Atom, + OUT gctINT32_PTR Value + ); + +/******************************************************************************* +** +** gckOS_AtomDecrement +** +** Atomically decrement the 32-bit integer value inside an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** OUTPUT: +** +** gctINT32_PTR Value +** Pointer to a variable the receives the original value of the atom. +*/ +gceSTATUS +gckOS_AtomDecrement( + IN gckOS Os, + IN gctPOINTER Atom, + OUT gctINT32_PTR Value + ); + +/* Delay a number of milliseconds. */ +gceSTATUS +gckOS_Delay( + IN gckOS Os, + IN gctUINT32 Delay + ); + +/* Get time in milliseconds. */ +gceSTATUS +gckOS_GetTicks( + OUT gctUINT32_PTR Time + ); + +/* Compare time value. */ +gceSTATUS +gckOS_TicksAfter( + IN gctUINT32 Time1, + IN gctUINT32 Time2, + OUT gctBOOL_PTR IsAfter + ); + +/* Get time in microseconds. */ +gceSTATUS +gckOS_GetTime( + OUT gctUINT64_PTR Time + ); + +/* Memory barrier. */ +gceSTATUS +gckOS_MemoryBarrier( + IN gckOS Os, + IN gctPOINTER Address + ); + +/* Map user pointer. */ +gceSTATUS +gckOS_MapUserPointer( + IN gckOS Os, + IN gctPOINTER Pointer, + IN gctSIZE_T Size, + OUT gctPOINTER * KernelPointer + ); + +/* Unmap user pointer. */ +gceSTATUS +gckOS_UnmapUserPointer( + IN gckOS Os, + IN gctPOINTER Pointer, + IN gctSIZE_T Size, + IN gctPOINTER KernelPointer + ); + +/******************************************************************************* +** +** gckOS_QueryNeedCopy +** +** Query whether the memory can be accessed or mapped directly or it has to be +** copied. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 ProcessID +** Process ID of the current process. +** +** OUTPUT: +** +** gctBOOL_PTR NeedCopy +** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or +** gcvFALSE if the memory can be accessed or mapped dircetly. +*/ +gceSTATUS +gckOS_QueryNeedCopy( + IN gckOS Os, + IN gctUINT32 ProcessID, + OUT gctBOOL_PTR NeedCopy + ); + +/******************************************************************************* +** +** gckOS_CopyFromUserData +** +** Copy data from user to kernel memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER KernelPointer +** Pointer to kernel memory. +** +** gctPOINTER Pointer +** Pointer to user memory. +** +** gctSIZE_T Size +** Number of bytes to copy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_CopyFromUserData( + IN gckOS Os, + IN gctPOINTER KernelPointer, + IN gctPOINTER Pointer, + IN gctSIZE_T Size + ); + +/******************************************************************************* +** +** gckOS_CopyToUserData +** +** Copy data from kernel to user memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER KernelPointer +** Pointer to kernel memory. +** +** gctPOINTER Pointer +** Pointer to user memory. +** +** gctSIZE_T Size +** Number of bytes to copy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_CopyToUserData( + IN gckOS Os, + IN gctPOINTER KernelPointer, + IN gctPOINTER Pointer, + IN gctSIZE_T Size + ); + +gceSTATUS +gckOS_SuspendInterrupt( + IN gckOS Os + ); + +gceSTATUS +gckOS_SuspendInterruptEx( + IN gckOS Os, + IN gceCORE Core + ); + +gceSTATUS +gckOS_ResumeInterrupt( + IN gckOS Os + ); + +gceSTATUS +gckOS_ResumeInterruptEx( + IN gckOS Os, + IN gceCORE Core + ); + +/* Get the base address for the physical memory. */ +gceSTATUS +gckOS_GetBaseAddress( + IN gckOS Os, + OUT gctUINT32_PTR BaseAddress + ); + +/* Perform a memory copy. */ +gceSTATUS +gckOS_MemCopy( + IN gctPOINTER Destination, + IN gctCONST_POINTER Source, + IN gctSIZE_T Bytes + ); + +/* Zero memory. */ +gceSTATUS +gckOS_ZeroMemory( + IN gctPOINTER Memory, + IN gctSIZE_T Bytes + ); + +/* Device I/O control to the kernel HAL layer. */ +gceSTATUS +gckOS_DeviceControl( + IN gckOS Os, + IN gctBOOL FromUser, + IN gctUINT32 IoControlCode, + IN gctPOINTER InputBuffer, + IN gctSIZE_T InputBufferSize, + OUT gctPOINTER OutputBuffer, + IN gctSIZE_T OutputBufferSize + ); + +/******************************************************************************* +** +** gckOS_GetProcessID +** +** Get current process ID. +** +** INPUT: +** +** Nothing. +** +** OUTPUT: +** +** gctUINT32_PTR ProcessID +** Pointer to the variable that receives the process ID. +*/ +gceSTATUS +gckOS_GetProcessID( + OUT gctUINT32_PTR ProcessID + ); + +gceSTATUS +gckOS_GetCurrentProcessID( + OUT gctUINT32_PTR ProcessID + ); + +/******************************************************************************* +** +** gckOS_GetThreadID +** +** Get current thread ID. +** +** INPUT: +** +** Nothing. +** +** OUTPUT: +** +** gctUINT32_PTR ThreadID +** Pointer to the variable that receives the thread ID. +*/ +gceSTATUS +gckOS_GetThreadID( + OUT gctUINT32_PTR ThreadID + ); + +/******************************************************************************\ +********************************** Signal Object ********************************* +\******************************************************************************/ + +/* Create a signal. */ +gceSTATUS +gckOS_CreateSignal( + IN gckOS Os, + IN gctBOOL ManualReset, + OUT gctSIGNAL * Signal + ); + +/* Destroy a signal. */ +gceSTATUS +gckOS_DestroySignal( + IN gckOS Os, + IN gctSIGNAL Signal + ); + +/* Signal a signal. */ +gceSTATUS +gckOS_Signal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctBOOL State + ); + +/* Wait for a signal. */ +gceSTATUS +gckOS_WaitSignal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctBOOL Interruptable, + IN gctUINT32 Wait + ); + +#ifdef __QNXNTO__ +gceSTATUS +gckOS_SignalPulse( + IN gckOS Os, + IN gctSIGNAL Signal + ); + +gceSTATUS +gckOS_SignalPending( + IN gckOS Os, + IN gctSIGNAL Signal + ); +#endif + +/* Map a user signal to the kernel space. */ +gceSTATUS +gckOS_MapSignal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctHANDLE Process, + OUT gctSIGNAL * MappedSignal + ); + +/* Unmap a user signal */ +gceSTATUS +gckOS_UnmapSignal( + IN gckOS Os, + IN gctSIGNAL Signal + ); + +/* Map user memory. */ +gceSTATUS +gckOS_MapUserMemory( + IN gckOS Os, + IN gceCORE Core, + IN gctPOINTER Memory, + IN gctUINT32 Physical, + IN gctSIZE_T Size, + OUT gctPOINTER * Info, + OUT gctUINT32_PTR Address + ); + +/* Unmap user memory. */ +gceSTATUS +gckOS_UnmapUserMemory( + IN gckOS Os, + IN gceCORE Core, + IN gctPOINTER Memory, + IN gctSIZE_T Size, + IN gctPOINTER Info, + IN gctUINT32 Address + ); + +/* Get scatter-gather table from memory. */ +gceSTATUS +gckOS_MemoryGetSGT( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + OUT gctPOINTER *SGT + ); + +/* Map a page range of memory to user space. */ +gceSTATUS +gckOS_MemoryMmap( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T skipPages, + IN gctSIZE_T numPages, + INOUT gctPOINTER Vma + ); + +/* Wrap a user memory to gctPHYS_ADDR. */ +gceSTATUS +gckOS_WrapMemory( + IN gckOS Os, + IN gcsUSER_MEMORY_DESC_PTR Desc, + OUT gctSIZE_T *Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctBOOL *Contiguous + ); + +gceSTATUS +gckOS_GetPolicyID( + IN gckOS Os, + IN gceSURF_TYPE Type, + OUT gctUINT32_PTR PolicyID, + OUT gctUINT32_PTR AXIConfig + ); + +/******************************************************************************\ +************************** Android Native Fence Sync *************************** +\******************************************************************************/ +gceSTATUS +gckOS_CreateSyncTimeline( + IN gckOS Os, + IN gceCORE Core, + OUT gctHANDLE * Timeline + ); + +gceSTATUS +gckOS_DestroySyncTimeline( + IN gckOS Os, + IN gctHANDLE Timeline + ); + +gceSTATUS +gckOS_CreateNativeFence( + IN gckOS Os, + IN gctHANDLE Timeline, + IN gctSIGNAL Signal, + OUT gctINT * FenceFD + ); + +gceSTATUS +gckOS_WaitNativeFence( + IN gckOS Os, + IN gctHANDLE Timeline, + IN gctINT FenceFD, + IN gctUINT32 Timeout + ); + +#if !USE_NEW_LINUX_SIGNAL +/* Create signal to be used in the user space. */ +gceSTATUS +gckOS_CreateUserSignal( + IN gckOS Os, + IN gctBOOL ManualReset, + OUT gctINT * SignalID + ); + +/* Destroy signal used in the user space. */ +gceSTATUS +gckOS_DestroyUserSignal( + IN gckOS Os, + IN gctINT SignalID + ); + +/* Wait for signal used in the user space. */ +gceSTATUS +gckOS_WaitUserSignal( + IN gckOS Os, + IN gctINT SignalID, + IN gctUINT32 Wait + ); + +/* Signal a signal used in the user space. */ +gceSTATUS +gckOS_SignalUserSignal( + IN gckOS Os, + IN gctINT SignalID, + IN gctBOOL State + ); +#endif /* USE_NEW_LINUX_SIGNAL */ + +/* Set a signal owned by a process. */ +#if defined(__QNXNTO__) +gceSTATUS +gckOS_UserSignal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctINT Recvid, + IN gctINT Coid + ); +#else +gceSTATUS +gckOS_UserSignal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctHANDLE Process + ); +#endif + +/******************************************************************************\ +** Cache Support +*/ + +gceSTATUS +gckOS_CacheClean( + gckOS Os, + gctUINT32 ProcessID, + gctPHYS_ADDR Handle, + gctSIZE_T Offset, + gctPOINTER Logical, + gctSIZE_T Bytes + ); + +gceSTATUS +gckOS_CacheFlush( + gckOS Os, + gctUINT32 ProcessID, + gctPHYS_ADDR Handle, + gctSIZE_T Offset, + gctPOINTER Logical, + gctSIZE_T Bytes + ); + +gceSTATUS +gckOS_CacheInvalidate( + gckOS Os, + gctUINT32 ProcessID, + gctPHYS_ADDR Handle, + gctSIZE_T Offset, + gctPOINTER Logical, + gctSIZE_T Bytes + ); + +gceSTATUS +gckOS_CPUPhysicalToGPUPhysical( + IN gckOS Os, + IN gctPHYS_ADDR_T CPUPhysical, + IN gctPHYS_ADDR_T * GPUPhysical + ); + +gceSTATUS +gckOS_GPUPhysicalToCPUPhysical( + IN gckOS Os, + IN gctUINT32 GPUPhysical, + IN gctPHYS_ADDR_T * CPUPhysical + ); + +gceSTATUS +gckOS_QueryOption( + IN gckOS Os, + IN gctCONST_STRING Option, + OUT gctUINT32 * Value + ); + +/******************************************************************************\ +** Debug Support +*/ + +void +gckOS_SetDebugLevel( + IN gctUINT32 Level + ); + +void +gckOS_SetDebugZone( + IN gctUINT32 Zone + ); + +void +gckOS_SetDebugLevelZone( + IN gctUINT32 Level, + IN gctUINT32 Zone + ); + +void +gckOS_SetDebugZones( + IN gctUINT32 Zones, + IN gctBOOL Enable + ); + +void +gckOS_SetDebugFile( + IN gctCONST_STRING FileName + ); + +/******************************************************************************* +** Broadcast interface. +*/ + +typedef enum _gceBROADCAST +{ + /* GPU might be idle. */ + gcvBROADCAST_GPU_IDLE, + + /* A commit is going to happen. */ + gcvBROADCAST_GPU_COMMIT, + + /* GPU seems to be stuck. */ + gcvBROADCAST_GPU_STUCK, + + /* First process gets attached. */ + gcvBROADCAST_FIRST_PROCESS, + + /* Last process gets detached. */ + gcvBROADCAST_LAST_PROCESS, + + /* AXI bus error. */ + gcvBROADCAST_AXI_BUS_ERROR, + + /* Out of memory. */ + gcvBROADCAST_OUT_OF_MEMORY, +} +gceBROADCAST; + +gceSTATUS +gckOS_Broadcast( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gceBROADCAST Reason + ); + +gceSTATUS +gckOS_BroadcastHurry( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gctUINT Urgency + ); + +gceSTATUS +gckOS_BroadcastCalibrateSpeed( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gctUINT Idle, + IN gctUINT Time + ); + +/******************************************************************************* +** +** gckOS_SetGPUPower +** +** Set the power of the GPU on or off. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gceCORE Core +** GPU whose power is set. +** +** gctBOOL Clock +** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock. +** +** gctBOOL Power +** gcvTRUE to turn on the power, or gcvFALSE to turn off the power. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_SetGPUPower( + IN gckOS Os, + IN gceCORE Core, + IN gctBOOL Clock, + IN gctBOOL Power + ); + +gceSTATUS +gckOS_ResetGPU( + IN gckOS Os, + IN gceCORE Core + ); + +gceSTATUS +gckOS_PrepareGPUFrequency( + IN gckOS Os, + IN gceCORE Core + ); + +gceSTATUS +gckOS_FinishGPUFrequency( + IN gckOS Os, + IN gceCORE Core + ); + +gceSTATUS +gckOS_QueryGPUFrequency( + IN gckOS Os, + IN gceCORE Core, + OUT gctUINT32 * Frequency, + OUT gctUINT8 * Scale + ); + +gceSTATUS +gckOS_SetGPUFrequency( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT8 Scale + ); + +/******************************************************************************* +** Semaphores. +*/ + +/* Create a new semaphore. */ +gceSTATUS +gckOS_CreateSemaphore( + IN gckOS Os, + OUT gctPOINTER * Semaphore + ); + + +/* Delete a semahore. */ +gceSTATUS +gckOS_DestroySemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ); + +/* Acquire a semahore. */ +gceSTATUS +gckOS_AcquireSemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ); + +/* Try to acquire a semahore. */ +gceSTATUS +gckOS_TryAcquireSemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ); + +/* Release a semahore. */ +gceSTATUS +gckOS_ReleaseSemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ); + +/******************************************************************************* +** Timer API. +*/ + +typedef void (*gctTIMERFUNCTION)(gctPOINTER); + +/* Create a timer. */ +gceSTATUS +gckOS_CreateTimer( + IN gckOS Os, + IN gctTIMERFUNCTION Function, + IN gctPOINTER Data, + OUT gctPOINTER * Timer + ); + +/* Destory a timer. */ +gceSTATUS +gckOS_DestroyTimer( + IN gckOS Os, + IN gctPOINTER Timer + ); + +/* Start a timer. */ +gceSTATUS +gckOS_StartTimer( + IN gckOS Os, + IN gctPOINTER Timer, + IN gctUINT32 Delay + ); + +/* Stop a timer. */ +gceSTATUS +gckOS_StopTimer( + IN gckOS Os, + IN gctPOINTER Timer + ); + +/******************************************************************************\ +********************************* gckHEAP Object ******************************** +\******************************************************************************/ + +typedef struct _gckHEAP * gckHEAP; + +/* Construct a new gckHEAP object. */ +gceSTATUS +gckHEAP_Construct( + IN gckOS Os, + IN gctSIZE_T AllocationSize, + OUT gckHEAP * Heap + ); + +/* Destroy an gckHEAP object. */ +gceSTATUS +gckHEAP_Destroy( + IN gckHEAP Heap + ); + +/* Allocate memory. */ +gceSTATUS +gckHEAP_Allocate( + IN gckHEAP Heap, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Node + ); + +/* Free memory. */ +gceSTATUS +gckHEAP_Free( + IN gckHEAP Heap, + IN gctPOINTER Node + ); + +/* Profile the heap. */ +gceSTATUS +gckHEAP_ProfileStart( + IN gckHEAP Heap + ); + +gceSTATUS +gckHEAP_ProfileEnd( + IN gckHEAP Heap, + IN gctCONST_STRING Title + ); + + +/******************************************************************************\ +******************************** gckVIDMEM Object ****************************** +\******************************************************************************/ + +typedef struct _gckVIDMEM * gckVIDMEM; +typedef struct _gckKERNEL * gckKERNEL; +typedef struct _gckDB * gckDB; +typedef struct _gckDVFS * gckDVFS; +typedef struct _gcsASYNC_COMMAND * gckASYNC_COMMAND; +typedef struct _gckMMU * gckMMU; +typedef struct _gcsDEVICE * gckDEVICE; + +/* Construct a new gckVIDMEM object. */ +gceSTATUS +gckVIDMEM_Construct( + IN gckOS Os, + IN gctUINT32 BaseAddress, + IN gctSIZE_T Bytes, + IN gctSIZE_T Threshold, + IN gctSIZE_T Banking, + OUT gckVIDMEM * Memory + ); + +/* Destroy an gckVDIMEM object. */ +gceSTATUS +gckVIDMEM_Destroy( + IN gckVIDMEM Memory + ); + +/* Allocate linear memory. */ +gceSTATUS +gckVIDMEM_AllocateLinear( + IN gckKERNEL Kernel, + IN gckVIDMEM Memory, + IN gctSIZE_T Bytes, + IN gctUINT32 Alignment, + IN gceSURF_TYPE Type, + IN gctBOOL Specified, + OUT gcuVIDMEM_NODE_PTR * Node + ); + +/* Free memory. */ +gceSTATUS +gckVIDMEM_Free( + IN gckKERNEL Kernel, + IN gcuVIDMEM_NODE_PTR Node + ); + +/* Lock memory. */ +gceSTATUS +gckVIDMEM_Lock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + IN gctBOOL Cacheable, + OUT gctUINT32 * Address, + OUT gctUINT32 * Gid, + OUT gctUINT64 * PhysicalAddress + ); + +/* Unlock memory. */ +gceSTATUS +gckVIDMEM_Unlock( + IN gckKERNEL Kernel, + IN gckVIDMEM_NODE Node, + IN gceSURF_TYPE Type, + IN OUT gctBOOL * Asynchroneous + ); + +/* Construct a gcuVIDMEM_NODE union for virtual memory. */ +gceSTATUS +gckVIDMEM_ConstructVirtual( + IN gckKERNEL Kernel, + IN gctUINT32 Flag, + IN gctSIZE_T Bytes, + OUT gcuVIDMEM_NODE_PTR * Node + ); + +/* Destroy a gcuVIDMEM_NODE union for virtual memory. */ +gceSTATUS +gckVIDMEM_DestroyVirtual( + IN gcuVIDMEM_NODE_PTR Node + ); + +gceSTATUS +gckVIDMEM_SetCommitStamp( + IN gckKERNEL Kernel, + IN gceENGINE Engine, + IN gctUINT32 Handle, + IN gctUINT64 CommitStamp + ); + +gceSTATUS +gckVIDMEM_GetCommitStamp( + IN gckKERNEL Kernel, + IN gceENGINE Engine, + IN gctUINT32 Handle, + OUT gctUINT64_PTR CommitStamp + ); + +/******************************************************************************\ +******************************** gckKERNEL Object ****************************** +\******************************************************************************/ + +struct _gcsHAL_INTERFACE; + +/* Notifications. */ +typedef enum _gceNOTIFY +{ + gcvNOTIFY_INTERRUPT, + gcvNOTIFY_COMMAND_QUEUE, +} +gceNOTIFY; + +/* Flush flags. */ +typedef enum _gceKERNEL_FLUSH +{ + gcvFLUSH_COLOR = 0x01, + gcvFLUSH_DEPTH = 0x02, + gcvFLUSH_TEXTURE = 0x04, + gcvFLUSH_2D = 0x08, + gcvFLUSH_L2 = 0x10, + gcvFLUSH_TILE_STATUS = 0x20, + gcvFLUSH_ICACHE = 0x40, + gcvFLUSH_TXDESC = 0x80, + gcvFLUSH_FENCE = 0x100, + gcvFLUSH_VERTEX = 0x200, + gcvFLUSH_TFBHEADER = 0x400, + gcvFLUSH_ALL = gcvFLUSH_COLOR + | gcvFLUSH_DEPTH + | gcvFLUSH_TEXTURE + | gcvFLUSH_2D + | gcvFLUSH_L2 + | gcvFLUSH_TILE_STATUS + | gcvFLUSH_ICACHE + | gcvFLUSH_TXDESC + | gcvFLUSH_FENCE + | gcvFLUSH_VERTEX + | gcvFLUSH_TFBHEADER +} +gceKERNEL_FLUSH; + +/* Construct a new gckKERNEL object. */ +gceSTATUS +gckKERNEL_Construct( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT ChipID, + IN gctPOINTER Context, + IN gckDEVICE Device, + IN gckDB SharedDB, + OUT gckKERNEL * Kernel + ); + +/* Destroy an gckKERNEL object. */ +gceSTATUS +gckKERNEL_Destroy( + IN gckKERNEL Kernel + ); + +/* Dispatch a user-level command. */ +gceSTATUS +gckKERNEL_Dispatch( + IN gckKERNEL Kernel, + IN gckDEVICE Device, + IN gctBOOL FromUser, + IN OUT struct _gcsHAL_INTERFACE * Interface + ); + +/* Query Database requirements. */ +gceSTATUS + gckKERNEL_QueryDatabase( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN OUT gcsHAL_INTERFACE * Interface + ); + +/* Query the video memory. */ +gceSTATUS +gckKERNEL_QueryVideoMemory( + IN gckKERNEL Kernel, + OUT struct _gcsHAL_INTERFACE * Interface + ); + +/* Query used memory nodes of a specific pool. */ +gceSTATUS +gckKERNEL_QueryVidMemPoolNodes( + gckKERNEL Kernel, + gcePOOL Pool, + gctUINT32 * TotalSize, /* sum of the sizes of the contiguous blocks (i.e. total memory used at current time) : to be filled by the called function */ + gcsContiguousBlock * MemoryBlocks, /* previously allocated by the calling function : to be filled by the called function */ + gctUINT32 NumMaxBlocks, /* provided by the calling function */ + gctUINT32 * NumBlocks /* actual number of contiguous blocks : to be filled by the called function */ + ); + +/* Lookup the gckVIDMEM object for a pool. */ +gceSTATUS +gckKERNEL_GetVideoMemoryPool( + IN gckKERNEL Kernel, + IN gcePOOL Pool, + OUT gckVIDMEM * VideoMemory + ); + +gceSTATUS +gckKERNEL_AllocateLinearMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN OUT gcePOOL * Pool, + IN gctSIZE_T Bytes, + IN gctUINT32 Alignment, + IN gceSURF_TYPE Type, + IN gctUINT32 Flag, + OUT gctUINT32 * Node + ); + +gceSTATUS +gckKERNEL_ReleaseVideoMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Handle + ); + +gceSTATUS +gckKERNEL_LockVideoMemory( + IN gckKERNEL Kernel, + IN gceCORE Core, + IN gctUINT32 ProcessID, + IN gctBOOL FromUser, + IN OUT gcsHAL_INTERFACE * Interface + ); + +gceSTATUS +gckKERNEL_UnlockVideoMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN OUT gcsHAL_INTERFACE * Interface + ); + +/* Unlock video memory from gpu immediately w/o considering gpu cache flush. */ +gceSTATUS +gckKERNEL_BottomHalfUnlockVideoMemory( + IN gckKERNEL Kernel, + IN gctUINT32 ProcessID, + IN gctUINT32 Node + ); + +/* Map video memory. */ +gceSTATUS +gckKERNEL_MapVideoMemory( + IN gckKERNEL Kernel, + IN gctBOOL InUserSpace, + IN gctUINT32 Address, +#ifdef __QNXNTO__ + IN gctUINT32 Pid, + IN gctUINT32 Bytes, +#endif + OUT gctPOINTER * Logical + ); + +/* Map video memory. */ +gceSTATUS +gckKERNEL_MapVideoMemoryEx( + IN gckKERNEL Kernel, + IN gceCORE Core, + IN gctBOOL InUserSpace, + IN gctUINT32 Address, +#ifdef __QNXNTO__ + IN gctUINT32 Pid, + IN gctUINT32 Bytes, +#endif + IN gcePOOL Pool, + OUT gctPOINTER * Logical + ); + +#ifdef __QNXNTO__ +/* Unmap video memory. */ +gceSTATUS +gckKERNEL_UnmapVideoMemory( + IN gckKERNEL Kernel, + IN gctPOINTER Logical, + IN gctUINT32 Pid, + IN gctUINT32 Bytes + ); +#endif + +/* Map memory. */ +gceSTATUS +gckKERNEL_MapMemory( + IN gckKERNEL Kernel, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical + ); + +/* Unmap memory. */ +gceSTATUS +gckKERNEL_UnmapMemory( + IN gckKERNEL Kernel, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + IN gctUINT32 ProcessID + ); + +/* Notification of events. */ +gceSTATUS +gckKERNEL_Notify( + IN gckKERNEL Kernel, + IN gceNOTIFY Notifcation, + IN gctBOOL Data + ); + +gceSTATUS +gckKERNEL_QuerySettings( + IN gckKERNEL Kernel, + OUT gcsKERNEL_SETTINGS * Settings + ); + +/******************************************************************************* +** +** gckKERNEL_Recovery +** +** Try to recover the GPU from a fatal error. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_Recovery( + IN gckKERNEL Kernel + ); + +/* Get access to the user data. */ +gceSTATUS +gckKERNEL_OpenUserData( + IN gckKERNEL Kernel, + IN gctBOOL NeedCopy, + IN gctPOINTER StaticStorage, + IN gctPOINTER UserPointer, + IN gctSIZE_T Size, + OUT gctPOINTER * KernelPointer + ); + +/* Release resources associated with the user data connection. */ +gceSTATUS +gckKERNEL_CloseUserData( + IN gckKERNEL Kernel, + IN gctBOOL NeedCopy, + IN gctBOOL FlushData, + IN gctPOINTER UserPointer, + IN gctSIZE_T Size, + OUT gctPOINTER * KernelPointer + ); + +gceSTATUS +gckDVFS_Construct( + IN gckHARDWARE Hardware, + OUT gckDVFS * Frequency + ); + +gceSTATUS +gckDVFS_Destroy( + IN gckDVFS Dvfs + ); + +gceSTATUS +gckDVFS_Start( + IN gckDVFS Dvfs + ); + +gceSTATUS +gckDVFS_Stop( + IN gckDVFS Dvfs + ); + +/******************************************************************************\ +******************************* gckHARDWARE Object ***************************** +\******************************************************************************/ + +/* Construct a new gckHARDWARE object. */ +gceSTATUS +gckHARDWARE_Construct( + IN gckOS Os, + IN gceCORE Core, + OUT gckHARDWARE * Hardware + ); + +/* Destroy an gckHARDWARE object. */ +gceSTATUS +gckHARDWARE_Destroy( + IN gckHARDWARE Hardware + ); + +/* Get hardware type. */ +gceSTATUS +gckHARDWARE_GetType( + IN gckHARDWARE Hardware, + OUT gceHARDWARE_TYPE * Type + ); + +/* Query system memory requirements. */ +gceSTATUS +gckHARDWARE_QuerySystemMemory( + IN gckHARDWARE Hardware, + OUT gctSIZE_T * SystemSize, + OUT gctUINT32 * SystemBaseAddress + ); + +/* Build virtual address. */ +gceSTATUS +gckHARDWARE_BuildVirtualAddress( + IN gckHARDWARE Hardware, + IN gctUINT32 Index, + IN gctUINT32 Offset, + OUT gctUINT32 * Address + ); + +/* Query command buffer requirements. */ +gceSTATUS +gckHARDWARE_QueryCommandBuffer( + IN gckHARDWARE Hardware, + IN gceENGINE Engine, + OUT gctUINT32 * Alignment, + OUT gctUINT32 * ReservedHead, + OUT gctUINT32 * ReservedTail + ); + +/* Add a WAIT/LINK pair in the command queue. */ +gceSTATUS +gckHARDWARE_WaitLink( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN gctUINT32 Offset, + IN OUT gctUINT32 * Bytes, + OUT gctUINT32 * WaitOffset, + OUT gctUINT32 * WaitBytes + ); + +/* Kickstart the command processor. */ +gceSTATUS +gckHARDWARE_Execute( + IN gckHARDWARE Hardware, + IN gctUINT32 Address, + IN gctSIZE_T Bytes + ); + +/* Add an END command in the command queue. */ +gceSTATUS +gckHARDWARE_End( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Address, + IN OUT gctUINT32 * Bytes + ); + +gceSTATUS +gckHARDWARE_ChipEnable( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gceCORE_3D_MASK ChipEnable, + IN OUT gctSIZE_T * Bytes + ); + +/* Add a NOP command in the command queue. */ +gceSTATUS +gckHARDWARE_Nop( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctSIZE_T * Bytes + ); + +/* Add a PIPESELECT command in the command queue. */ +gceSTATUS +gckHARDWARE_PipeSelect( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gcePIPE_SELECT Pipe, + IN OUT gctUINT32 * Bytes + ); + +/* Add a LINK command in the command queue. */ +gceSTATUS +gckHARDWARE_Link( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 FetchAddress, + IN gctUINT32 FetchSize, + IN OUT gctUINT32 * Bytes, + OUT gctUINT32 * Low, + OUT gctUINT32 * High + ); + +/* Add an EVENT command in the command queue. */ +gceSTATUS +gckHARDWARE_Event( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT8 Event, + IN gceKERNEL_WHERE FromWhere, + IN OUT gctUINT32 * Bytes + ); + +/* Query the available memory. */ +gceSTATUS +gckHARDWARE_QueryMemory( + IN gckHARDWARE Hardware, + OUT gctSIZE_T * InternalSize, + OUT gctUINT32 * InternalBaseAddress, + OUT gctUINT32 * InternalAlignment, + OUT gctSIZE_T * ExternalSize, + OUT gctUINT32 * ExternalBaseAddress, + OUT gctUINT32 * ExternalAlignment, + OUT gctUINT32 * HorizontalTileSize, + OUT gctUINT32 * VerticalTileSize + ); + +/* Query the identity of the hardware. */ +gceSTATUS +gckHARDWARE_QueryChipIdentity( + IN gckHARDWARE Hardware, + OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity + ); + +gceSTATUS +gckHARDWARE_QueryChipOptions( + IN gckHARDWARE Hardware, + OUT gcsHAL_QUERY_CHIP_OPTIONS_PTR Options + ); + +/* Query the shader uniforms support. */ +gceSTATUS +gckHARDWARE_QueryShaderCaps( + IN gckHARDWARE Hardware, + OUT gctUINT * VertexUniforms, + OUT gctUINT * FragmentUniforms, + OUT gctBOOL * UnifiedUnforms + ); + +/* Split a harwdare specific address into API stuff. */ +gceSTATUS +gckHARDWARE_SplitMemory( + IN gckHARDWARE Hardware, + IN gctUINT32 Address, + OUT gcePOOL * Pool, + OUT gctUINT32 * Offset + ); + +/* Update command queue tail pointer. */ +gceSTATUS +gckHARDWARE_UpdateQueueTail( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctUINT32 Offset + ); + +/* Convert logical address to hardware specific address. */ +gceSTATUS +gckHARDWARE_ConvertLogical( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctBOOL InUserSpace, + OUT gctUINT32 * Address + ); + +/* Interrupt manager. */ +gceSTATUS +gckHARDWARE_Interrupt( + IN gckHARDWARE Hardware, + IN gctBOOL InterruptValid + ); + +/* Program MMU. */ +gceSTATUS +gckHARDWARE_SetMMU( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical + ); + +/* Flush the MMU. */ +gceSTATUS +gckHARDWARE_FlushMMU( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_FlushAsyncMMU( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ); + +/* Set the page table base address. */ +gceSTATUS +gckHARDWARE_SetMMUv2( + IN gckHARDWARE Hardware, + IN gctBOOL Enable, + IN gctPOINTER MtlbAddress, + IN gceMMU_MODE Mode, + IN gctPOINTER SafeAddress, + IN gctBOOL FromPower + ); + +#if gcdPROCESS_ADDRESS_SPACE +/* Configure mmu configuration. */ +gceSTATUS +gckHARDWARE_ConfigMMU( + IN gckHARDWARE Hardware, + IN gctPOINTER Logical, + IN gctPOINTER MtlbLogical, + IN gctUINT32 Offset, + IN OUT gctSIZE_T * Bytes, + OUT gctSIZE_T * WaitLinkOffset, + OUT gctSIZE_T * WaitLinkBytes + ); +#endif + +/* Get idle register. */ +gceSTATUS +gckHARDWARE_GetIdle( + IN gckHARDWARE Hardware, + IN gctBOOL Wait, + OUT gctUINT32 * Data + ); + +/* Flush the caches. */ +gceSTATUS +gckHARDWARE_Flush( + IN gckHARDWARE Hardware, + IN gceKERNEL_FLUSH Flush, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ); + +/* Enable/disable fast clear. */ +gceSTATUS +gckHARDWARE_SetFastClear( + IN gckHARDWARE Hardware, + IN gctINT Enable, + IN gctINT Compression + ); + +gceSTATUS +gckHARDWARE_ReadInterrupt( + IN gckHARDWARE Hardware, + OUT gctUINT32_PTR IDs + ); + +/* Power management. */ +gceSTATUS +gckHARDWARE_SetPowerManagementState( + IN gckHARDWARE Hardware, + IN gceCHIPPOWERSTATE State + ); + +gceSTATUS +gckHARDWARE_QueryPowerManagementState( + IN gckHARDWARE Hardware, + OUT gceCHIPPOWERSTATE* State + ); + +gceSTATUS +gckHARDWARE_SetPowerManagement( + IN gckHARDWARE Hardware, + IN gctBOOL PowerManagement + ); + +gceSTATUS +gckHARDWARE_SetGpuProfiler( + IN gckHARDWARE Hardware, + IN gctBOOL GpuProfiler + ); + +#if gcdENABLE_FSCALE_VAL_ADJUST +gceSTATUS +gckHARDWARE_SetFscaleValue( + IN gckHARDWARE Hardware, + IN gctUINT32 FscaleValue + ); + +gceSTATUS +gckHARDWARE_GetFscaleValue( + IN gckHARDWARE Hardware, + IN gctUINT * FscaleValue, + IN gctUINT * MinFscaleValue, + IN gctUINT * MaxFscaleValue + ); + +gceSTATUS +gckHARDWARE_SetMinFscaleValue( + IN gckHARDWARE Hardware, + IN gctUINT MinFscaleValue + ); +#endif + +#if gcdPOWEROFF_TIMEOUT +gceSTATUS +gckHARDWARE_SetPowerOffTimeout( + IN gckHARDWARE Hardware, + IN gctUINT32 Timeout +); + +gceSTATUS +gckHARDWARE_QueryPowerOffTimeout( + IN gckHARDWARE Hardware, + OUT gctUINT32* Timeout +); +#endif + +/* Profile 2D Engine. */ +gceSTATUS +gckHARDWARE_ProfileEngine2D( + IN gckHARDWARE Hardware, + OUT gcs2D_PROFILE_PTR Profile + ); + +gceSTATUS +gckHARDWARE_InitializeHardware( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_Reset( + IN gckHARDWARE Hardware + ); + +/* Check for Hardware features. */ +gceSTATUS +gckHARDWARE_IsFeatureAvailable( + IN gckHARDWARE Hardware, + IN gceFEATURE Feature + ); + +gceSTATUS +gckHARDWARE_DumpMMUException( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_DumpGPUState( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_InitDVFS( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_QueryLoad( + IN gckHARDWARE Hardware, + OUT gctUINT32 * Load + ); + +gceSTATUS +gckHARDWARE_SetDVFSPeroid( + IN gckHARDWARE Hardware, + IN gctUINT32 Frequency + ); + +gceSTATUS +gckHARDWARE_PrepareFunctions( + gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_DestroyFunctions( + gckHARDWARE Hardware + ); + +gceSTATUS +gckHARDWARE_SetMMUStates( + IN gckHARDWARE Hardware, + IN gctPOINTER MtlbAddress, + IN gceMMU_MODE Mode, + IN gctPOINTER SafeAddress, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ); + +gceSTATUS +gckHARDWARE_QueryStateTimer( + IN gckHARDWARE Hardware, + OUT gctUINT64_PTR Start, + OUT gctUINT64_PTR End, + OUT gctUINT64_PTR On, + OUT gctUINT64_PTR Off, + OUT gctUINT64_PTR Idle, + OUT gctUINT64_PTR Suspend + ); + +gceSTATUS +gckHARDWARE_Fence( + IN gckHARDWARE Hardware, + IN gceENGINE Engine, + IN gctPOINTER Logical, + IN gctUINT32 FenceAddress, + IN gctUINT64 FenceData, + IN OUT gctUINT32 * Bytes + ); + +/******************************************************************************\ +***************************** gckINTERRUPT Object ****************************** +\******************************************************************************/ + +typedef struct _gckINTERRUPT * gckINTERRUPT; + +typedef gceSTATUS (* gctINTERRUPT_HANDLER)( + IN gckKERNEL Kernel + ); + +gceSTATUS +gckINTERRUPT_Construct( + IN gckKERNEL Kernel, + OUT gckINTERRUPT * Interrupt + ); + +gceSTATUS +gckINTERRUPT_Destroy( + IN gckINTERRUPT Interrupt + ); + +gceSTATUS +gckINTERRUPT_SetHandler( + IN gckINTERRUPT Interrupt, + IN OUT gctINT32_PTR Id, + IN gctINTERRUPT_HANDLER Handler + ); + +gceSTATUS +gckINTERRUPT_Notify( + IN gckINTERRUPT Interrupt, + IN gctBOOL Valid + ); +/******************************************************************************\ +******************************** gckEVENT Object ******************************* +\******************************************************************************/ + +typedef struct _gckEVENT * gckEVENT; + +/* Construct a new gckEVENT object. */ +gceSTATUS +gckEVENT_Construct( + IN gckKERNEL Kernel, + OUT gckEVENT * Event + ); + +/* Destroy an gckEVENT object. */ +gceSTATUS +gckEVENT_Destroy( + IN gckEVENT Event + ); + +/* Reserve the next available hardware event. */ +gceSTATUS +gckEVENT_GetEvent( + IN gckEVENT Event, + IN gctBOOL Wait, + OUT gctUINT8 * EventID, + IN gceKERNEL_WHERE Source + ); + +/* Add a new event to the list of events. */ +gceSTATUS +gckEVENT_AddList( + IN gckEVENT Event, + IN gcsHAL_INTERFACE_PTR Interface, + IN gceKERNEL_WHERE FromWhere, + IN gctBOOL AllocateAllowed, + IN gctBOOL FromKernel + ); + +/* Schedule a FreeNonPagedMemory event. */ +gceSTATUS +gckEVENT_FreeNonPagedMemory( + IN gckEVENT Event, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gceKERNEL_WHERE FromWhere + ); + +/* Schedule a FreeContiguousMemory event. */ +gceSTATUS +gckEVENT_FreeContiguousMemory( + IN gckEVENT Event, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gceKERNEL_WHERE FromWhere + ); + +/* Schedule a FreeVideoMemory event. */ +gceSTATUS +gckEVENT_FreeVideoMemory( + IN gckEVENT Event, + IN gcuVIDMEM_NODE_PTR VideoMemory, + IN gceKERNEL_WHERE FromWhere + ); + +/* Schedule a signal event. */ +gceSTATUS +gckEVENT_Signal( + IN gckEVENT Event, + IN gctSIGNAL Signal, + IN gceKERNEL_WHERE FromWhere + ); + +/* Schedule an Unlock event. */ +gceSTATUS +gckEVENT_Unlock( + IN gckEVENT Event, + IN gceKERNEL_WHERE FromWhere, + IN gctPOINTER Node, + IN gceSURF_TYPE Type + ); + +/* Schedule a FreeVirtualCommandBuffer event. */ +gceSTATUS +gckEVENT_DestroyVirtualCommandBuffer( + IN gckEVENT Event, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gceKERNEL_WHERE FromWhere + ); + +gceSTATUS +gckEVENT_Submit( + IN gckEVENT Event, + IN gctBOOL Wait, + IN gctBOOL FromPower + ); + +gceSTATUS +gckEVENT_Commit( + IN gckEVENT Event, + IN gcsQUEUE_PTR Queue, + IN gctBOOL Forced + ); + +/* Event callback routine. */ +gceSTATUS +gckEVENT_Notify( + IN gckEVENT Event, + IN gctUINT32 IDs + ); + +/* Event callback routine. */ +gceSTATUS +gckEVENT_Interrupt( + IN gckEVENT Event, + IN gctUINT32 IDs + ); + +gceSTATUS +gckEVENT_Dump( + IN gckEVENT Event + ); +/******************************************************************************\ +******************************* gckCOMMAND Object ****************************** +\******************************************************************************/ + +typedef struct _gckCOMMAND * gckCOMMAND; + +/* Construct a new gckCOMMAND object. */ +gceSTATUS +gckCOMMAND_Construct( + IN gckKERNEL Kernel, + OUT gckCOMMAND * Command + ); + +/* Destroy an gckCOMMAND object. */ +gceSTATUS +gckCOMMAND_Destroy( + IN gckCOMMAND Command + ); + +/* Acquire command queue synchronization objects. */ +gceSTATUS +gckCOMMAND_EnterCommit( + IN gckCOMMAND Command, + IN gctBOOL FromPower + ); + +/* Release command queue synchronization objects. */ +gceSTATUS +gckCOMMAND_ExitCommit( + IN gckCOMMAND Command, + IN gctBOOL FromPower + ); + +/* Start the command queue. */ +gceSTATUS +gckCOMMAND_Start( + IN gckCOMMAND Command + ); + +/* Stop the command queue. */ +gceSTATUS +gckCOMMAND_Stop( + IN gckCOMMAND Command + ); + +gceSTATUS +gckCOMMAND_Commit( + IN gckCOMMAND Command, + IN gckCONTEXT Context, + IN gcoCMDBUF CommandBuffer, + IN gcsSTATE_DELTA_PTR StateDelta, + IN gctUINT32 ProcessID, + IN gctBOOL Shared, + IN gctUINT32 Index, + OUT gctUINT64_PTR CommitStamp, + OUT gctBOOL_PTR ContextSwitched + ); + +/* Reserve space in the command buffer. */ +gceSTATUS +gckCOMMAND_Reserve( + IN gckCOMMAND Command, + IN gctUINT32 RequestedBytes, + OUT gctPOINTER * Buffer, + OUT gctUINT32 * BufferSize + ); + +/* Execute reserved space in the command buffer. */ +gceSTATUS +gckCOMMAND_Execute( + IN gckCOMMAND Command, + IN gctUINT32 RequstedBytes + ); + +/* Stall the command queue. */ +gceSTATUS +gckCOMMAND_Stall( + IN gckCOMMAND Command, + IN gctBOOL FromPower + ); + +/* Attach user process. */ +gceSTATUS +gckCOMMAND_Attach( + IN gckCOMMAND Command, + OUT gckCONTEXT * Context, + OUT gctSIZE_T * MaxState, + OUT gctUINT32 * NumStates, + IN gctUINT32 ProcessID + ); + +/* Detach user process. */ +gceSTATUS +gckCOMMAND_Detach( + IN gckCOMMAND Command, + IN gckCONTEXT Context + ); + +/* Dump command buffer being executed by GPU. */ +gceSTATUS +gckCOMMAND_DumpExecutingBuffer( + IN gckCOMMAND Command + ); + +/* Whether a kernel command buffer address. */ +gceSTATUS +gckCOMMAND_AddressInKernelCommandBuffer( + IN gckCOMMAND Command, + IN gctUINT32 Address, + OUT gctPOINTER * Pointer + ); + +/******************************************************************************\ +********************************* gckMMU Object ******************************** +\******************************************************************************/ + +/* Construct a new gckMMU object. */ +gceSTATUS +gckMMU_Construct( + IN gckKERNEL Kernel, + IN gctSIZE_T MmuSize, + OUT gckMMU * Mmu + ); + +/* Destroy an gckMMU object. */ +gceSTATUS +gckMMU_Destroy( + IN gckMMU Mmu + ); + +/* Allocate pages inside the MMU. */ +gceSTATUS +gckMMU_AllocatePages( + IN gckMMU Mmu, + IN gctSIZE_T PageCount, + OUT gctPOINTER * PageTable, + OUT gctUINT32 * Address + ); + +gceSTATUS +gckMMU_AllocatePagesEx( + IN gckMMU Mmu, + IN gctSIZE_T PageCount, + IN gceSURF_TYPE Type, + IN gctBOOL Secure, + OUT gctPOINTER * PageTable, + OUT gctUINT32 * Address + ); + +/* Remove a page table from the MMU. */ +gceSTATUS +gckMMU_FreePages( + IN gckMMU Mmu, + IN gctBOOL Secure, + IN gctUINT32 Address, + IN gctPOINTER PageTable, + IN gctSIZE_T PageCount + ); + +/* Set the MMU page with info. */ +gceSTATUS +gckMMU_SetPage( + IN gckMMU Mmu, + IN gctPHYS_ADDR_T PageAddress, + IN gctBOOL Writable, + IN gctUINT32 *PageEntry + ); + +gceSTATUS +gckMMU_Flush( + IN gckMMU Mmu, + IN gceSURF_TYPE Type + ); + +gceSTATUS +gckMMU_DumpPageTableEntry( + IN gckMMU Mmu, + IN gctUINT32 Address + ); + +gceSTATUS +gckMMU_FillFlatMapping( + IN gckMMU Mmu, + IN gctUINT32 PhysBase, + IN gctSIZE_T Size + ); + +gceSTATUS +gckMMU_IsFlatMapped( + IN gckMMU Mmu, + OUT gctUINT32 Physical, + OUT gctBOOL *In + ); + + +gceSTATUS +gckHARDWARE_QueryContextProfile( + IN gckHARDWARE Hardware, + IN gctBOOL Reset, + IN gckCONTEXT Context, + OUT gcsPROFILER_COUNTERS_PART1 * Counters_part1, + OUT gcsPROFILER_COUNTERS_PART2 * Counters_part2 + ); + +gceSTATUS +gckHARDWARE_UpdateContextProfile( + IN gckHARDWARE Hardware, + IN gckCONTEXT Context + ); + +gceSTATUS +gckHARDWARE_InitProfiler( + IN gckHARDWARE Hardware + ); + +gceSTATUS +gckOS_DetectProcessByName( + IN gctCONST_POINTER Name + ); + +void +gckOS_DumpParam( + void + ); + +#ifdef __cplusplus +} +#endif + + +#endif /* __gc_hal_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_base.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_base.h new file mode 100644 index 000000000000..b0dec88828cb --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_base.h @@ -0,0 +1,5989 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_base_h_ +#define __gc_hal_base_h_ + +#include "gc_hal_enum.h" +#include "gc_hal_types.h" +#include "gc_hal_dump.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +****************************** Object Declarations ***************************** +\******************************************************************************/ + +typedef struct _gckOS * gckOS; +typedef struct _gcoHAL * gcoHAL; +typedef struct _gcoOS * gcoOS; +typedef struct _gco2D * gco2D; +typedef struct gcsATOM * gcsATOM_PTR; + +typedef struct _gco3D * gco3D; +typedef struct _gcoCL * gcoCL; +typedef struct _gcsFAST_FLUSH * gcsFAST_FLUSH_PTR; + +typedef struct _gcoSURF * gcoSURF; +typedef struct _gcsSURF_NODE * gcsSURF_NODE_PTR; +typedef struct _gcsSURF_FORMAT_INFO * gcsSURF_FORMAT_INFO_PTR; +typedef struct _gcsPOINT * gcsPOINT_PTR; +typedef struct _gcsSIZE * gcsSIZE_PTR; +typedef struct _gcsRECT * gcsRECT_PTR; +typedef struct _gcsBOUNDARY * gcsBOUNDARY_PTR; +typedef struct _gcoDUMP * gcoDUMP; +typedef struct _gcoHARDWARE * gcoHARDWARE; +typedef union _gcuVIDMEM_NODE * gcuVIDMEM_NODE_PTR; +typedef struct _gcsVIDMEM_NODE * gckVIDMEM_NODE; + +typedef void * gcoVG; + +typedef struct _gcoFENCE * gcoFENCE; +typedef struct _gcsSYNC_CONTEXT * gcsSYNC_CONTEXT_PTR; + +typedef enum { + gcvFENCE_TYPE_READ = 0x1, + gcvFENCE_TYPE_WRITE = 0x2, + gcvFENCE_TYPE_ALL = gcvFENCE_TYPE_READ | gcvFENCE_TYPE_WRITE, + gcvFNECE_TYPE_INVALID = 0x10000, +} +gceFENCE_TYPE; + +typedef struct _gcsUSER_MEMORY_DESC * gcsUSER_MEMORY_DESC_PTR; + + +/******************************************************************************\ +********************* Share obj lock/unlock macros. **************************** +\******************************************************************************/ +#define gcmLOCK_SHARE_OBJ(Obj) \ +{ \ + if(Obj->sharedLock != gcvNULL)\ + {\ + (gcoOS_AcquireMutex(\ + gcvNULL, Obj->sharedLock, gcvINFINITE));\ + }\ +} + + +#define gcmUNLOCK_SHARE_OBJ(Obj)\ +{\ + if(Obj->sharedLock != gcvNULL)\ + {\ + (gcoOS_ReleaseMutex(gcvNULL, Obj->sharedLock));\ + }\ +} + +typedef struct _gcsSystemInfo +{ + /* memory latency number for SH data fetch, in SH cycle*/ + gctUINT32 memoryLatencySH; +} +gcsSystemInfo; + + +#if gcdENABLE_3D +#if gcdSYNC +#define gcPLS_INITIALIZER \ +{ \ + gcvNULL, /* gcoOS object. */ \ + gcvNULL, /* gcoHAL object. */ \ + 0, /* internalSize */ \ + gcvNULL, /* internalPhysical */ \ + gcvNULL, /* internalLogical */ \ + 0, /* externalSize */ \ + gcvNULL, /* externalPhysical */ \ + gcvNULL, /* externalLogical */ \ + 0, /* contiguousSize */ \ + gcvNULL, /* contiguousPhysical */ \ + gcvNULL, /* contiguousLogical */ \ + gcvNULL, /* eglDisplayInfo */ \ + gcvNULL, /* eglSurfaceInfo */ \ + gcvSURF_A8R8G8B8,/* eglConfigFormat */ \ + gcvNULL, /* reference */ \ + 0, /* processID */ \ + 0, /* threadID */ \ + gcvFALSE, /* exiting */ \ + gcvFALSE, /* Special flag for NP2 texture. */ \ + gcvFALSE, /* device open. */ \ + gcvNULL, /* destructor */ \ + gcvNULL, /* accessLock */ \ + gcvNULL, /* GL FE compiler lock*/ \ + gcvNULL, /* CL FE compiler lock*/ \ + gcvPATCH_NOTINIT,/* global patchID */ \ + gcvNULL, /* global fenceID*/ \ +} +#else +#define gcPLS_INITIALIZER \ +{ \ + gcvNULL, /* gcoOS object. */ \ + gcvNULL, /* gcoHAL object. */ \ + 0, /* internalSize */ \ + gcvNULL, /* internalPhysical */ \ + gcvNULL, /* internalLogical */ \ + 0, /* externalSize */ \ + gcvNULL, /* externalPhysical */ \ + gcvNULL, /* externalLogical */ \ + 0, /* contiguousSize */ \ + gcvNULL, /* contiguousPhysical */ \ + gcvNULL, /* contiguousLogical */ \ + gcvNULL, /* eglDisplayInfo */ \ + gcvNULL, /* eglSurfaceInfo */ \ + gcvSURF_A8R8G8B8,/* eglConfigFormat */ \ + gcvNULL, /* reference */ \ + 0, /* processID */ \ + 0, /* threadID */ \ + gcvFALSE, /* exiting */ \ + gcvFALSE, /* Special flag for NP2 texture. */ \ + gcvFALSE, /* device open. */ \ + gcvNULL, /* destructor */ \ + gcvNULL, /* accessLock */ \ + gcvNULL, /* GL FE compiler lock*/ \ + gcvNULL, /* CL FE compiler lock*/ \ + gcvPATCH_NOTINIT,/* global patchID */ \ +} +#endif +#else +#define gcPLS_INITIALIZER \ +{ \ + gcvNULL, /* gcoOS object. */ \ + gcvNULL, /* gcoHAL object. */ \ + 0, /* internalSize */ \ + gcvNULL, /* internalPhysical */ \ + gcvNULL, /* internalLogical */ \ + 0, /* externalSize */ \ + gcvNULL, /* externalPhysical */ \ + gcvNULL, /* externalLogical */ \ + 0, /* contiguousSize */ \ + gcvNULL, /* contiguousPhysical */ \ + gcvNULL, /* contiguousLogical */ \ + gcvNULL, /* eglDisplayInfo */ \ + gcvNULL, /* eglSurfaceInfo */ \ + gcvSURF_A8R8G8B8,/* eglConfigFormat */ \ + gcvNULL, /* reference */ \ + 0, /* processID */ \ + 0, /* threadID */ \ + gcvFALSE, /* exiting */ \ + gcvFALSE, /* Special flag for NP2 texture. */ \ + gcvFALSE, /* device open. */ \ + gcvNULL, /* destructor */ \ + gcvNULL, /* accessLock */ \ +} +#endif + +/******************************************************************************\ +******************************* Thread local storage ************************* +\******************************************************************************/ + +typedef struct _gcsDRIVER_TLS * gcsDRIVER_TLS_PTR; + +typedef struct _gcsDRIVER_TLS +{ + void (* destructor)(gcsDRIVER_TLS_PTR Tls); +} +gcsDRIVER_TLS; + +typedef enum _gceTLS_KEY +{ + gcvTLS_KEY_EGL, + gcvTLS_KEY_OPENGL_ES, + gcvTLS_KEY_OPENVG, + gcvTLS_KEY_OPENGL, + gcvTLS_KEY_OPENCL, + gcvTLS_KEY_OPENVX, + + gcvTLS_KEY_COUNT +} +gceTLS_KEY; + +typedef struct _gcsTLS * gcsTLS_PTR; + +typedef struct _gcsTLS +{ + gceHARDWARE_TYPE currentType; + + /* To which core device control is called, + * it is index in a hardware type. + */ + gctUINT32 currentCoreIndex; + + /* Current 3D hardwre of this thread */ + gcoHARDWARE currentHardware; + + /* Default 3D hardware of this thread */ + gcoHARDWARE defaultHardware; + + /* Only for separated 3D and 2D */ + gcoHARDWARE hardware2D; +#if gcdENABLE_3D + gco3D engine3D; +#endif + + gctBOOL copied; + + /* libGAL.so handle */ + gctHANDLE handle; + + /* If true, do not releas 2d engine and hardware in hal layer */ + gctBOOL release2DUpper; + + /* Driver tls. */ + gcsDRIVER_TLS_PTR driverTLS[gcvTLS_KEY_COUNT]; +} +gcsTLS; + +/******************************************************************************\ +********************************* Enumerations ********************************* +\******************************************************************************/ + +typedef enum _gcePLS_VALUE +{ + gcePLS_VALUE_EGL_DISPLAY_INFO, + gcePLS_VALUE_EGL_CONFIG_FORMAT_INFO, + gcePLS_VALUE_EGL_DESTRUCTOR_INFO, +} +gcePLS_VALUE; + +/* Video memory pool type. */ +typedef enum _gcePOOL +{ + gcvPOOL_UNKNOWN = 0, + gcvPOOL_DEFAULT, + gcvPOOL_LOCAL, + gcvPOOL_LOCAL_INTERNAL, + gcvPOOL_LOCAL_EXTERNAL, + gcvPOOL_UNIFIED, + gcvPOOL_SYSTEM, + gcvPOOL_VIRTUAL, + gcvPOOL_USER, + gcvPOOL_CONTIGUOUS, + + gcvPOOL_NUMBER_OF_POOLS +} +gcePOOL; + +#if gcdENABLE_3D +/* Blending functions. */ +typedef enum _gceBLEND_FUNCTION +{ + gcvBLEND_ZERO, + gcvBLEND_ONE, + gcvBLEND_SOURCE_COLOR, + gcvBLEND_INV_SOURCE_COLOR, + gcvBLEND_SOURCE_ALPHA, + gcvBLEND_INV_SOURCE_ALPHA, + gcvBLEND_TARGET_COLOR, + gcvBLEND_INV_TARGET_COLOR, + gcvBLEND_TARGET_ALPHA, + gcvBLEND_INV_TARGET_ALPHA, + gcvBLEND_SOURCE_ALPHA_SATURATE, + gcvBLEND_CONST_COLOR, + gcvBLEND_INV_CONST_COLOR, + gcvBLEND_CONST_ALPHA, + gcvBLEND_INV_CONST_ALPHA, +} +gceBLEND_FUNCTION; + +/* Blending modes. */ +typedef enum _gceBLEND_MODE +{ + gcvBLEND_ADD = 0, + gcvBLEND_SUBTRACT, + gcvBLEND_REVERSE_SUBTRACT, + gcvBLEND_MIN, + gcvBLEND_MAX, + gcvBLEND_MULTIPLY, + gcvBLEND_SCREEN, + gcvBLEND_OVERLAY, + gcvBLEND_DARKEN, + gcvBLEND_LIGHTEN, + gcvBLEND_COLORDODGE, + gcvBLEND_COLORBURN, + gcvBLEND_HARDLIGHT, + gcvBLEND_SOFTLIGHT, + gcvBLEND_DIFFERENCE, + gcvBLEND_EXCLUSION, + gcvBLEND_HSL_HUE, + gcvBLEND_HSL_SATURATION, + gcvBLEND_HSL_COLOR, + gcvBLEND_HSL_LUMINOSITY, + + gcvBLEND_TOTAL +} +gceBLEND_MODE; + +/* Depth modes. */ +typedef enum _gceDEPTH_MODE +{ + gcvDEPTH_NONE, + gcvDEPTH_Z, + gcvDEPTH_W, +} +gceDEPTH_MODE; +#endif /* gcdENABLE_3D */ + + +/* API flags. */ +typedef enum _gceAPI +{ + gcvAPI_D3D = 1, + gcvAPI_OPENGL_ES11, + gcvAPI_OPENGL_ES20, + gcvAPI_OPENGL_ES30, + gcvAPI_OPENGL_ES31, + gcvAPI_OPENGL_ES32, + gcvAPI_OPENGL, + gcvAPI_OPENVG, + gcvAPI_OPENCL, + gcvAPI_OPENVK, +} +gceAPI; + +typedef enum _gceWHERE +{ + gcvWHERE_COMMAND_PREFETCH = 0, + gcvWHERE_COMMAND, + gcvWHERE_RASTER, + gcvWHERE_PIXEL, + gcvWHERE_BLT, +} +gceWHERE; + +typedef enum _gceHOW +{ + gcvHOW_SEMAPHORE = 0x1, + gcvHOW_STALL = 0x2, + gcvHOW_SEMAPHORE_STALL = 0x3, +} +gceHOW; + +typedef enum _gceSignalHandlerType +{ + gcvHANDLE_SIGFPE_WHEN_SIGNAL_CODE_IS_0 = 0x1, +} +gceSignalHandlerType; + +typedef struct _gcsSURF_VIEW +{ + gcoSURF surf; + gctUINT firstSlice; + gctUINT numSlices; +}gcsSURF_VIEW; + +/* gcsHAL_Limits*/ +typedef struct _gcsHAL_LIMITS +{ + /* chip info */ + gceCHIPMODEL chipModel; + gctUINT32 chipRevision; + gctUINT32 featureCount; + gctUINT32 *chipFeatures; + + /* target caps */ + gctUINT32 maxWidth; + gctUINT32 maxHeight; + gctUINT32 multiTargetCount; + gctUINT32 maxSamples; + +}gcsHAL_LIMITS; + +#define gcdEXTERNAL_MEMORY_NAME_MAX 32 +#define gcdEXTERNAL_MEMORY_DATA_MAX 8 + +typedef struct _gcsEXTERNAL_MEMORY_INFO +{ + /* Name of allocator used to attach this memory. */ + gctCHAR allocatorName[gcdEXTERNAL_MEMORY_NAME_MAX]; + + /* User defined data which will be passed to allocator. */ + gctUINT32 userData[gcdEXTERNAL_MEMORY_DATA_MAX]; +} +gcsEXTERNAL_MEMORY_INFO; + +/******************************************************************************\ +********************************* gcoHAL Object ********************************* +\******************************************************************************/ + +/* Construct a new gcoHAL object. */ +gceSTATUS +gcoHAL_ConstructEx( + IN gctPOINTER Context, + IN gcoOS Os, + OUT gcoHAL * Hal + ); + +/* Destroy an gcoHAL object. */ +gceSTATUS +gcoHAL_DestroyEx( + IN gcoHAL Hal + ); + +/* Empty function for compatibility. */ +gceSTATUS +gcoHAL_Construct( + IN gctPOINTER Context, + IN gcoOS Os, + OUT gcoHAL * Hal + ); + +/* Empty function for compatibility. */ +gceSTATUS +gcoHAL_Destroy( + IN gcoHAL Hal + ); + +/* Get HAL options */ +gceSTATUS +gcoHAL_GetOption( + IN gcoHAL Hal, + IN gceOPTION Option + ); + +gceSTATUS +gcoHAL_FrameInfoOps( + IN gcoHAL Hal, + IN gceFRAMEINFO FrameInfo, + IN gceFRAMEINFO_OP Op, + IN OUT gctUINT * Val + ); + +/* Set HAL options */ +gceSTATUS +gcoHAL_SetOption( + IN gcoHAL Hal, + IN gceOPTION Option, + IN gctBOOL Value + ); + +gceSTATUS +gcoHAL_GetHardware( + IN gcoHAL Hal, + OUT gcoHARDWARE* Hw + ); + + +#if gcdENABLE_3D +gceSTATUS +gcoHAL_GetSpecialHintData( + IN gcoHAL Hal, + OUT gctINT * Hint + ); +/* +** Deprecated(Don't use it), keep it here for external library(libgcu.so) +*/ +gceSTATUS +gcoHAL_Get3DEngine( + IN gcoHAL Hal, + OUT gco3D * Engine + ); +#endif /* gcdENABLE_3D */ + + +gceSTATUS +gcoHAL_GetProductName( + IN gcoHAL Hal, + OUT gctSTRING *ProductName + ); + +gceSTATUS +gcoHAL_SetFscaleValue( + IN gctUINT FscaleValue + ); + +gceSTATUS +gcoHAL_GetFscaleValue( + OUT gctUINT * FscaleValue, + OUT gctUINT * MinFscaleValue, + OUT gctUINT * MaxFscaleValue + ); + +gceSTATUS +gcoHAL_SetBltNP2Texture( + gctBOOL enable + ); + +gceSTATUS +gcoHAL_ExportVideoMemory( + IN gctUINT32 Handle, + IN gctUINT32 Flags, + OUT gctINT32 * FD + ); + +gceSTATUS +gcoHAL_NameVideoMemory( + IN gctUINT32 Handle, + OUT gctUINT32 * Name + ); + +gceSTATUS +gcoHAL_ImportVideoMemory( + IN gctUINT32 Name, + OUT gctUINT32 * Handle + ); + +gceSTATUS +gcoHAL_GetVideoMemoryFd( + IN gctUINT32 Handle, + OUT gctINT * Fd + ); + +/* Verify whether the specified feature is available in hardware. */ +gceSTATUS +gcoHAL_IsFeatureAvailable( + IN gcoHAL Hal, + IN gceFEATURE Feature + ); + +gceSTATUS +gcoHAL_IsSwwaNeeded( + IN gcoHAL Hal, + IN gceSWWA Swwa + ); + +gceSTATUS +gcoHAL_IsFeatureAvailable1( + IN gcoHAL Hal, + IN gceFEATURE Feature + ); + +/* Query the identity of the hardware. */ +gceSTATUS +gcoHAL_QueryChipIdentity( + IN gcoHAL Hal, + OUT gceCHIPMODEL* ChipModel, + OUT gctUINT32* ChipRevision, + OUT gctUINT32* ChipFeatures, + OUT gctUINT32* ChipMinorFeatures + ); + + +gceSTATUS +gcoHAL_QuerySuperTileMode( + OUT gctUINT32_PTR SuperTileMode + ); + +gceSTATUS +gcoHAL_QueryChipAxiBusWidth( + OUT gctBOOL * AXI128Bits + ); + +gceSTATUS +gcoHAL_QueryMultiGPUAffinityConfig( + IN gceHARDWARE_TYPE Type, + OUT gceMULTI_GPU_MODE *Mode, + OUT gctUINT32_PTR CoreIndex + ); + +#ifdef LINUX +gctINT32 +gcoOS_EndRecordAllocation(void); +void +gcoOS_RecordAllocation(void); +void +gcoOS_AddRecordAllocation(gctSIZE_T Size); +#endif + +/* Query the amount of video memory. */ +gceSTATUS +gcoHAL_QueryVideoMemory( + IN gcoHAL Hal, + OUT gctPHYS_ADDR * InternalAddress, + OUT gctSIZE_T * InternalSize, + OUT gctPHYS_ADDR * ExternalAddress, + OUT gctSIZE_T * ExternalSize, + OUT gctPHYS_ADDR * ContiguousAddress, + OUT gctSIZE_T * ContiguousSize + ); + +/* Map video memory. */ +gceSTATUS +gcoHAL_MapMemory( + IN gcoHAL Hal, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T NumberOfBytes, + OUT gctPOINTER * Logical + ); + +/* Unmap video memory. */ +gceSTATUS +gcoHAL_UnmapMemory( + IN gcoHAL Hal, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T NumberOfBytes, + IN gctPOINTER Logical + ); + +/* Schedule an unmap of a buffer mapped through its physical address. */ +gceSTATUS +gcoHAL_ScheduleUnmapMemory( + IN gcoHAL Hal, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T NumberOfBytes, + IN gctPOINTER Logical + ); + +/* Allocate video memory. */ +gceSTATUS +gcoOS_AllocateVideoMemory( + IN gcoOS Os, + IN gctBOOL InUserSpace, + IN gctBOOL InCacheable, + IN OUT gctSIZE_T * Bytes, + OUT gctUINT32 * Physical, + OUT gctPOINTER * Logical, + OUT gctPOINTER * Handle + ); + +/* Free video memory. */ +gceSTATUS +gcoOS_FreeVideoMemory( + IN gcoOS Os, + IN gctPOINTER Handle + ); + +/* Lock video memory. */ +gceSTATUS +gcoOS_LockVideoMemory( + IN gcoOS Os, + IN gctPOINTER Handle, + IN gctBOOL InUserSpace, + IN gctBOOL InCacheable, + OUT gctUINT32 * Physical, + OUT gctPOINTER * Logical + ); + +/* Map user memory. */ +gceSTATUS +gcoHAL_MapUserMemory( + IN gctPOINTER Logical, + IN gctUINT32 Physical, + IN gctSIZE_T Size, + OUT gctPOINTER * Info, + OUT gctUINT32_PTR GPUAddress + ); + +/* Unmap user memory. */ +gceSTATUS +gcoHAL_UnmapUserMemory( + IN gctPOINTER Logical, + IN gctSIZE_T Size, + IN gctPOINTER Info, + IN gctUINT32 GPUAddress + ); + +/* Schedule an unmap of a user buffer using event mechanism. */ +gceSTATUS +gcoHAL_ScheduleUnmapUserMemory( + IN gcoHAL Hal, + IN gctPOINTER Info, + IN gctSIZE_T Size, + IN gctUINT32 Address, + IN gctPOINTER Memory + ); + +/* Commit the current command buffer. */ +gceSTATUS +gcoHAL_Commit( + IN gcoHAL Hal, + IN gctBOOL Stall + ); + +#if gcdENABLE_3D +/* Sencd fence command. */ +gceSTATUS +gcoHAL_SendFence( + IN gcoHAL Hal + ); +#endif /* gcdENABLE_3D */ + +/* Query the tile capabilities. */ +gceSTATUS +gcoHAL_QueryTiled( + IN gcoHAL Hal, + OUT gctINT32 * TileWidth2D, + OUT gctINT32 * TileHeight2D, + OUT gctINT32 * TileWidth3D, + OUT gctINT32 * TileHeight3D + ); + +gceSTATUS +gcoHAL_Compact( + IN gcoHAL Hal + ); + +#if VIVANTE_PROFILER +gceSTATUS +gcoHAL_ProfileStart( + IN gcoHAL Hal + ); + +gceSTATUS +gcoHAL_ProfileEnd( + IN gcoHAL Hal, + IN gctCONST_STRING Title + ); +#endif + +/* Power Management */ +gceSTATUS +gcoHAL_SetPowerManagementState( + IN gcoHAL Hal, + IN gceCHIPPOWERSTATE State + ); + +gceSTATUS +gcoHAL_QueryPowerManagementState( + IN gcoHAL Hal, + OUT gceCHIPPOWERSTATE *State + ); + +/* Set the filter type for filter blit. */ +gceSTATUS +gcoHAL_SetFilterType( + IN gcoHAL Hal, + IN gceFILTER_TYPE FilterType + ); + +gceSTATUS +gcoHAL_GetDump( + IN gcoHAL Hal, + OUT gcoDUMP * Dump + ); + +/* Call the kernel HAL layer. */ +gceSTATUS +gcoHAL_Call( + IN gcoHAL Hal, + IN OUT gcsHAL_INTERFACE_PTR Interface + ); + +/* Schedule an event. */ +gceSTATUS +gcoHAL_ScheduleEvent( + IN gcoHAL Hal, + IN OUT gcsHAL_INTERFACE_PTR Interface + ); + +/* Request a start/stop timestamp. */ +gceSTATUS +gcoHAL_SetTimer( + IN gcoHAL Hal, + IN gctUINT32 Index, + IN gctBOOL Start + ); + +/* Get Time delta from a Timer in microseconds. */ +gceSTATUS +gcoHAL_GetTimerTime( + IN gcoHAL Hal, + IN gctUINT32 Timer, + OUT gctINT32_PTR TimeDelta + ); + +/* set timeout value. */ +gceSTATUS +gcoHAL_SetTimeOut( + IN gcoHAL Hal, + IN gctUINT32 timeOut + ); + +gceSTATUS +gcoHAL_SetHardwareType( + IN gcoHAL Hal, + IN gceHARDWARE_TYPE HardwardType + ); + +gceSTATUS +gcoHAL_GetHardwareType( + IN gcoHAL Hal, + OUT gceHARDWARE_TYPE * HardwardType + ); + +gceSTATUS +gcoHAL_QueryChipCount( + IN gcoHAL Hal, + OUT gctINT32 * Count + ); + +gceSTATUS +gcoHAL_Query3DCoreCount( + IN gcoHAL Hal, + OUT gctUINT32 *Count + ); + +gceSTATUS +gcoHAL_QueryCoreCount( + IN gcoHAL Hal, + IN gceHARDWARE_TYPE Type, + OUT gctUINT *Count, + OUT gctUINT_PTR ChipIDs + ); + +gceSTATUS +gcoHAL_QuerySeparated2D( + IN gcoHAL Hal + ); + +gceSTATUS +gcoHAL_QueryHybrid2D( + IN gcoHAL Hal + ); + +gceSTATUS +gcoHAL_Is3DAvailable( + IN gcoHAL Hal + ); + +/* Get pointer to gcoVG object. */ +gceSTATUS +gcoHAL_GetVGEngine( + IN gcoHAL Hal, + OUT gcoVG * Engine + ); + +gceSTATUS +gcoHAL_QueryChipLimits( + IN gcoHAL Hal, + IN gctINT32 Chip, + OUT gcsHAL_LIMITS *Limits); + +gceSTATUS +gcoHAL_QueryChipFeature( + IN gcoHAL Hal, + IN gctINT32 Chip, + IN gceFEATURE Feature); + +gceSTATUS +gcoHAL_SetCoreIndex( + IN gcoHAL Hal, + IN gctUINT32 Core + ); + +gceSTATUS +gcoHAL_GetCurrentCoreIndex( + IN gcoHAL Hal, + OUT gctUINT32 *Core + ); + +/*----------------------------------------------------------------------------*/ +/*----- Shared Buffer --------------------------------------------------------*/ + +/* Create shared buffer. */ +gceSTATUS +gcoHAL_CreateShBuffer( + IN gctUINT32 Size, + OUT gctSHBUF * ShBuf + ); + +/* Destroy shared buffer. */ +gceSTATUS +gcoHAL_DestroyShBuffer( + IN gctSHBUF ShBuf + ); + +/* Map shared buffer to current process. */ +gceSTATUS +gcoHAL_MapShBuffer( + IN gctSHBUF ShBuf + ); + +/* Write user data to shared buffer. */ +gceSTATUS +gcoHAL_WriteShBuffer( + IN gctSHBUF ShBuf, + IN gctCONST_POINTER Data, + IN gctUINT32 ByteCount + ); + +/* Read user data from shared buffer. */ +gceSTATUS +gcoHAL_ReadShBuffer( + IN gctSHBUF ShBuf, + IN gctPOINTER Data, + IN gctUINT32 BytesCount, + OUT gctUINT32 * BytesRead + ); + +/* Config power management to be enabled or disabled. */ +gceSTATUS +gcoHAL_ConfigPowerManagement( + IN gctBOOL Enable + ); + +gceSTATUS +gcoHAL_AllocateVideoMemory( + IN gctUINT Alignment, + IN gceSURF_TYPE Type, + IN gctUINT32 Flag, + IN gcePOOL Pool, + IN OUT gctSIZE_T * Bytes, + OUT gctUINT32_PTR Node + ); + +gceSTATUS +gcoHAL_LockVideoMemory( + IN gctUINT32 Node, + IN gctBOOL Cacheable, + IN gceENGINE engine, + OUT gctUINT32 * Physical, + OUT gctPOINTER * Logical + ); + +gceSTATUS +gcoHAL_UnlockVideoMemory( + IN gctUINT32 Node, + IN gceSURF_TYPE Type, + IN gceENGINE engine + ); + +gceSTATUS +gcoHAL_ReleaseVideoMemory( + IN gctUINT32 Node + ); + +gceSTATUS +gcoHAL_AllocateContiguous( + IN gcoOS Os, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ); + +#if gcdENABLE_3D +/* Query the target capabilities. */ +gceSTATUS +gcoHAL_QueryTargetCaps( + IN gcoHAL Hal, + OUT gctUINT * MaxWidth, + OUT gctUINT * MaxHeight, + OUT gctUINT * MultiTargetCount, + OUT gctUINT * MaxSamples + ); +#endif + +gceSTATUS +gcoHAL_WrapUserMemory( + IN gcsUSER_MEMORY_DESC_PTR UserMemoryDesc, + OUT gctUINT32_PTR Node + ); + +gceSTATUS +gcoHAL_QueryResetTimeStamp( + OUT gctUINT64_PTR ResetTimeStamp, + OUT gctUINT64_PTR ContextID + ); + +gceSTATUS +gcoHAL_WaitFence( + IN gctUINT32 Handle, + IN gctUINT32 TimeOut + ); + + +gceSTATUS +gcoHAL_ScheduleSignal( + IN gctSIGNAL Signal, + IN gctSIGNAL AuxSignal, + IN gctINT ProcessID, + IN gceKERNEL_WHERE FromWhere + ); + +gceSTATUS +gcoHAL_GetGraphicBufferFd( + IN gctUINT32 Node[3], + IN gctSHBUF ShBuf, + IN gctSIGNAL Signal, + OUT gctINT32 * Fd + ); + +gceSTATUS +gcoHAL_AlignToTile( + IN OUT gctUINT32 * Width, + IN OUT gctUINT32 * Height, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format + ); + +/******************************************************************************\ +********************************** gcoOS Object ********************************* +\******************************************************************************/ +/* Lock PLS access */ +gceSTATUS +gcoOS_LockPLS( + void + ); + +/* Unlock PLS access */ +gceSTATUS +gcoOS_UnLockPLS( + void + ); + +/* Get PLS value for given key */ +gctPOINTER +gcoOS_GetPLSValue( + IN gcePLS_VALUE key + ); + +/* Set PLS value of a given key */ +void +gcoOS_SetPLSValue( + IN gcePLS_VALUE key, + OUT gctPOINTER value + ); + +/* Lock GL FE compiler access */ +gceSTATUS +gcoOS_LockGLFECompiler( + void + ); + +/* Unlock GL FE compiler access */ +gceSTATUS +gcoOS_UnLockGLFECompiler( + void + ); + +/* Lock CL FE compiler access */ +gceSTATUS +gcoOS_LockCLFECompiler( + void + ); + +/* Unlock CL FE compiler access */ +gceSTATUS +gcoOS_UnLockCLFECompiler( + void + ); + +gceSTATUS +gcoOS_GetTLS( + OUT gcsTLS_PTR * TLS + ); + +/* Copy the TLS from a source thread. */ +gceSTATUS +gcoOS_CopyTLS( + IN gcsTLS_PTR Source + ); + +/* Query the thread local storage. */ +gceSTATUS +gcoOS_QueryTLS( + OUT gcsTLS_PTR * TLS + ); + +/* Get access to driver tls. */ +gceSTATUS +gcoOS_GetDriverTLS( + IN gceTLS_KEY Key, + OUT gcsDRIVER_TLS_PTR * TLS + ); + +/* + * Set driver tls. + * May cause memory leak if 'destructor' not set. + */ +gceSTATUS +gcoOS_SetDriverTLS( + IN gceTLS_KEY Key, + IN gcsDRIVER_TLS * TLS + ); + +/* Destroy the objects associated with the current thread. */ +void +gcoOS_FreeThreadData( + void + ); + +/* Empty function for compatibility. */ +gceSTATUS +gcoOS_Construct( + IN gctPOINTER Context, + OUT gcoOS * Os + ); + +/* Empty function for compatibility. */ +gceSTATUS +gcoOS_Destroy( + IN gcoOS Os + ); + +/* Get the base address for the physical memory. */ +gceSTATUS +gcoOS_GetBaseAddress( + IN gcoOS Os, + OUT gctUINT32_PTR BaseAddress + ); + +/* Allocate memory from the heap. */ +gceSTATUS +gcoOS_Allocate( + IN gcoOS Os, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ); + +/* Get allocated memory size. */ +gceSTATUS +gcoOS_GetMemorySize( + IN gcoOS Os, + IN gctPOINTER Memory, + OUT gctSIZE_T_PTR MemorySize + ); + +/* Free allocated memory. */ +gceSTATUS +gcoOS_Free( + IN gcoOS Os, + IN gctPOINTER Memory + ); + +/* Allocate memory. */ +gceSTATUS +gcoOS_AllocateSharedMemory( + IN gcoOS Os, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ); + +/* Free memory. */ +gceSTATUS +gcoOS_FreeSharedMemory( + IN gcoOS Os, + IN gctPOINTER Memory + ); + +/* Allocate memory. */ +gceSTATUS +gcoOS_AllocateMemory( + IN gcoOS Os, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ); + +/* Free memory. */ +gceSTATUS +gcoOS_FreeMemory( + IN gcoOS Os, + IN gctPOINTER Memory + ); + +/* Free contiguous memory. */ +gceSTATUS +gcoOS_FreeContiguous( + IN gcoOS Os, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); + +/* Map user memory. */ +gceSTATUS +gcoOS_MapUserMemory( + IN gcoOS Os, + IN gctPOINTER Memory, + IN gctSIZE_T Size, + OUT gctPOINTER * Info, + OUT gctUINT32_PTR Address + ); + +/* Map user memory. */ +gceSTATUS +gcoOS_MapUserMemoryEx( + IN gcoOS Os, + IN gctPOINTER Memory, + IN gctUINT32 Physical, + IN gctSIZE_T Size, + OUT gctPOINTER * Info, + OUT gctUINT32_PTR Address + ); + +/* Unmap user memory. */ +gceSTATUS +gcoOS_UnmapUserMemory( + IN gcoOS Os, + IN gctPOINTER Memory, + IN gctSIZE_T Size, + IN gctPOINTER Info, + IN gctUINT32 Address + ); + +gceSTATUS +gcoOS_CPUPhysicalToGPUPhysical( + IN gctUINT32 CPUPhysical, + OUT gctUINT32_PTR GPUPhysical + ); + +/* Device I/O Control call to the kernel HAL layer. */ +gceSTATUS +gcoOS_DeviceControl( + IN gcoOS Os, + IN gctUINT32 IoControlCode, + IN gctPOINTER InputBuffer, + IN gctSIZE_T InputBufferSize, + IN gctPOINTER OutputBuffer, + IN gctSIZE_T OutputBufferSize + ); + +/* Allocate non paged memory. */ +gceSTATUS +gcoOS_AllocateNonPagedMemory( + IN gcoOS Os, + IN gctBOOL InUserSpace, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ); + +/* Free non paged memory. */ +gceSTATUS +gcoOS_FreeNonPagedMemory( + IN gcoOS Os, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical + ); + +#define gcmOS_SAFE_FREE(os, mem) \ + gcoOS_Free(os, mem); \ + mem = gcvNULL + +#define gcmOS_SAFE_FREE_SHARED_MEMORY(os, mem) \ + gcoOS_FreeSharedMemory(os, mem); \ + mem = gcvNULL + +#define gcmkOS_SAFE_FREE(os, mem) \ + gckOS_Free(os, mem); \ + mem = gcvNULL + +#define gcdMAX_PATH 512 + +typedef enum _gceFILE_MODE +{ + gcvFILE_CREATE = 0, + gcvFILE_APPEND, + gcvFILE_READ, + gcvFILE_CREATETEXT, + gcvFILE_APPENDTEXT, + gcvFILE_READTEXT, +} +gceFILE_MODE; + +/* Open a file. */ +gceSTATUS +gcoOS_Open( + IN gcoOS Os, + IN gctCONST_STRING FileName, + IN gceFILE_MODE Mode, + OUT gctFILE * File + ); + +/* Close a file. */ +gceSTATUS +gcoOS_Close( + IN gcoOS Os, + IN gctFILE File + ); + +/* Read data from a file. */ +gceSTATUS +gcoOS_Read( + IN gcoOS Os, + IN gctFILE File, + IN gctSIZE_T ByteCount, + IN gctPOINTER Data, + OUT gctSIZE_T * ByteRead + ); + +/* Write data to a file. */ +gceSTATUS +gcoOS_Write( + IN gcoOS Os, + IN gctFILE File, + IN gctSIZE_T ByteCount, + IN gctCONST_POINTER Data + ); + +/* Flush data to a file. */ +gceSTATUS +gcoOS_Flush( + IN gcoOS Os, + IN gctFILE File + ); + +/* Close a file descriptor. */ +gceSTATUS +gcoOS_CloseFD( + IN gcoOS Os, + IN gctINT FD + ); + +/* Scan a file. */ +gceSTATUS +gcoOS_FscanfI( + IN gcoOS Os, + IN gctFILE File, + IN gctCONST_STRING Format, + OUT gctUINT *result + ); + +/* Dup file descriptor to another. */ +gceSTATUS +gcoOS_DupFD( + IN gcoOS Os, + IN gctINT FD, + OUT gctINT * FD2 + ); + +/* Create an endpoint for communication. */ +gceSTATUS +gcoOS_Socket( + IN gcoOS Os, + IN gctINT Domain, + IN gctINT Type, + IN gctINT Protocol, + OUT gctINT *SockFd + ); + +/* Close a socket. */ +gceSTATUS +gcoOS_CloseSocket( + IN gcoOS Os, + IN gctINT SockFd + ); + +/* Initiate a connection on a socket. */ +gceSTATUS +gcoOS_Connect( + IN gcoOS Os, + IN gctINT SockFd, + IN gctCONST_POINTER HostName, + IN gctUINT Port); + +/* Shut down part of connection on a socket. */ +gceSTATUS +gcoOS_Shutdown( + IN gcoOS Os, + IN gctINT SockFd, + IN gctINT How + ); + +/* Send a message on a socket. */ +gceSTATUS +gcoOS_Send( + IN gcoOS Os, + IN gctINT SockFd, + IN gctSIZE_T ByteCount, + IN gctCONST_POINTER Data, + IN gctINT Flags + ); + +/* Initiate a connection on a socket. */ +gceSTATUS +gcoOS_WaitForSend( + IN gcoOS Os, + IN gctINT SockFd, + IN gctINT Seconds, + IN gctINT MicroSeconds); + +/* Get environment variable value. */ +gceSTATUS +gcoOS_GetEnv( + IN gcoOS Os, + IN gctCONST_STRING VarName, + OUT gctSTRING * Value + ); + +/* Set environment variable value. */ +gceSTATUS +gcoOS_SetEnv( + IN gcoOS Os, + IN gctCONST_STRING VarName, + IN gctSTRING Value + ); + +/* Get current working directory. */ +gceSTATUS +gcoOS_GetCwd( + IN gcoOS Os, + IN gctINT SizeInBytes, + OUT gctSTRING Buffer + ); + +/* Get file status info. */ +gceSTATUS +gcoOS_Stat( + IN gcoOS Os, + IN gctCONST_STRING FileName, + OUT gctPOINTER Buffer + ); + +typedef enum _gceFILE_WHENCE +{ + gcvFILE_SEEK_SET, + gcvFILE_SEEK_CUR, + gcvFILE_SEEK_END +} +gceFILE_WHENCE; + +/* Set the current position of a file. */ +gceSTATUS +gcoOS_Seek( + IN gcoOS Os, + IN gctFILE File, + IN gctUINT32 Offset, + IN gceFILE_WHENCE Whence + ); + +/* Set the current position of a file. */ +gceSTATUS +gcoOS_SetPos( + IN gcoOS Os, + IN gctFILE File, + IN gctUINT32 Position + ); + +/* Get the current position of a file. */ +gceSTATUS +gcoOS_GetPos( + IN gcoOS Os, + IN gctFILE File, + OUT gctUINT32 * Position + ); + +/* Same as strstr. */ +gceSTATUS +gcoOS_StrStr( + IN gctCONST_STRING String, + IN gctCONST_STRING SubString, + OUT gctSTRING * Output + ); + +/* Find the last occurance of a character inside a string. */ +gceSTATUS +gcoOS_StrFindReverse( + IN gctCONST_STRING String, + IN gctINT8 Character, + OUT gctSTRING * Output + ); + +gceSTATUS +gcoOS_StrDup( + IN gcoOS Os, + IN gctCONST_STRING String, + OUT gctSTRING * Target + ); + +/* Copy a string. */ +gceSTATUS +gcoOS_StrCopySafe( + IN gctSTRING Destination, + IN gctSIZE_T DestinationSize, + IN gctCONST_STRING Source + ); + +/* Append a string. */ +gceSTATUS +gcoOS_StrCatSafe( + IN gctSTRING Destination, + IN gctSIZE_T DestinationSize, + IN gctCONST_STRING Source + ); + +/* Compare two strings. */ +gceSTATUS +gcoOS_StrCmp( + IN gctCONST_STRING String1, + IN gctCONST_STRING String2 + ); + +/* Compare characters of two strings. */ +gceSTATUS +gcoOS_StrNCmp( + IN gctCONST_STRING String1, + IN gctCONST_STRING String2, + IN gctSIZE_T Count + ); + +/* Convert string to float. */ +gceSTATUS +gcoOS_StrToFloat( + IN gctCONST_STRING String, + OUT gctFLOAT * Float + ); + +/* Convert hex string to integer. */ +gceSTATUS gcoOS_HexStrToInt( + IN gctCONST_STRING String, + OUT gctINT * Int + ); + +/* Convert hex string to float. */ +gceSTATUS +gcoOS_HexStrToFloat( + IN gctCONST_STRING String, + OUT gctFLOAT * Float + ); + +/* Convert string to integer. */ +gceSTATUS +gcoOS_StrToInt( + IN gctCONST_STRING String, + OUT gctINT * Int + ); + +gceSTATUS +gcoOS_MemCmp( + IN gctCONST_POINTER Memory1, + IN gctCONST_POINTER Memory2, + IN gctSIZE_T Bytes + ); + +gceSTATUS +gcoOS_PrintStrSafe( + OUT gctSTRING String, + IN gctSIZE_T StringSize, + IN OUT gctUINT * Offset, + IN gctCONST_STRING Format, + ... + ); + +gceSTATUS +gcoOS_LoadLibrary( + IN gcoOS Os, + IN gctCONST_STRING Library, + OUT gctHANDLE * Handle + ); + +gceSTATUS +gcoOS_FreeLibrary( + IN gcoOS Os, + IN gctHANDLE Handle + ); + +gceSTATUS +gcoOS_GetProcAddress( + IN gcoOS Os, + IN gctHANDLE Handle, + IN gctCONST_STRING Name, + OUT gctPOINTER * Function + ); + +gceSTATUS +gcoOS_Compact( + IN gcoOS Os + ); + +gceSTATUS +gcoOS_AddSignalHandler ( + IN gceSignalHandlerType SignalHandlerType + ); + +#if VIVANTE_PROFILER +gceSTATUS +gcoOS_ProfileStart( + IN gcoOS Os + ); + +gceSTATUS +gcoOS_ProfileEnd( + IN gcoOS Os, + IN gctCONST_STRING Title + ); + +gceSTATUS +gcoOS_SetProfileSetting( + IN gcoOS Os, + IN gctBOOL Enable, + IN gctCONST_STRING FileName + ); +#endif + +/* Get the amount of physical system memory */ +gceSTATUS +gcoOS_GetPhysicalSystemMemorySize( + OUT gctSIZE_T * PhysicalSystemMemorySize + ); + +/* Query the video memory. */ +gceSTATUS +gcoOS_QueryVideoMemory( + IN gcoOS Os, + OUT gctPHYS_ADDR * InternalAddress, + OUT gctSIZE_T * InternalSize, + OUT gctPHYS_ADDR * ExternalAddress, + OUT gctSIZE_T * ExternalSize, + OUT gctPHYS_ADDR * ContiguousAddress, + OUT gctSIZE_T * ContiguousSize + ); + +gceSTATUS +gcoOS_QueryCurrentProcessName( + OUT gctSTRING Name, + IN gctSIZE_T Size + ); + + +/*----------------------------------------------------------------------------*/ +/*----- Atoms ----------------------------------------------------------------*/ + +/* Construct an atom. */ +gceSTATUS +gcoOS_AtomConstruct( + IN gcoOS Os, + OUT gcsATOM_PTR * Atom + ); + +/* Destroy an atom. */ +gceSTATUS +gcoOS_AtomDestroy( + IN gcoOS Os, + IN gcsATOM_PTR Atom + ); + +/* Get the 32-bit value protected by an atom. */ +gceSTATUS +gcoOS_AtomGet( + IN gcoOS Os, + IN gcsATOM_PTR Atom, + OUT gctINT32_PTR Value + ); + +/* Set the 32-bit value protected by an atom. */ +gceSTATUS +gcoOS_AtomSet( + IN gcoOS Os, + IN gcsATOM_PTR Atom, + IN gctINT32 Value + ); + +/* Increment an atom. */ +gceSTATUS +gcoOS_AtomIncrement( + IN gcoOS Os, + IN gcsATOM_PTR Atom, + OUT gctINT32_PTR OldValue + ); + +/* Decrement an atom. */ +gceSTATUS +gcoOS_AtomDecrement( + IN gcoOS Os, + IN gcsATOM_PTR Atom, + OUT gctINT32_PTR OldValue + ); + +gctHANDLE +gcoOS_GetCurrentProcessID( + void + ); + +gctHANDLE +gcoOS_GetCurrentThreadID( + void + ); + +/*----------------------------------------------------------------------------*/ +/*----- Time -----------------------------------------------------------------*/ + +/* Get the number of milliseconds since the system started. */ +gctUINT32 +gcoOS_GetTicks( + void + ); + +/* Get time in microseconds. */ +gceSTATUS +gcoOS_GetTime( + gctUINT64_PTR Time + ); + +/* Get CPU usage in microseconds. */ +gceSTATUS +gcoOS_GetCPUTime( + gctUINT64_PTR CPUTime + ); + +/* Get memory usage. */ +gceSTATUS +gcoOS_GetMemoryUsage( + gctUINT32_PTR MaxRSS, + gctUINT32_PTR IxRSS, + gctUINT32_PTR IdRSS, + gctUINT32_PTR IsRSS + ); + +/* Delay a number of microseconds. */ +gceSTATUS +gcoOS_Delay( + IN gcoOS Os, + IN gctUINT32 Delay + ); + +/*----------------------------------------------------------------------------*/ +/*----- Threads --------------------------------------------------------------*/ + +#ifdef _WIN32 +/* Cannot include windows.h here becuase "near" and "far" + * which are used in gcsDEPTH_INFO, are defined to nothing in WinDef.h. + * So, use the real value of DWORD and WINAPI, instead. + * DWORD is unsigned long, and WINAPI is __stdcall. + * If these two are change in WinDef.h, the following two typdefs + * need to be changed, too. + */ +typedef unsigned long gctTHREAD_RETURN; +typedef unsigned long (__stdcall * gcTHREAD_ROUTINE)(void * Argument); +#else +typedef void * gctTHREAD_RETURN; +typedef void * (* gcTHREAD_ROUTINE)(void *); +#endif + +/* Create a new thread. */ +gceSTATUS +gcoOS_CreateThread( + IN gcoOS Os, + IN gcTHREAD_ROUTINE Worker, + IN gctPOINTER Argument, + OUT gctPOINTER * Thread + ); + +/* Close a thread. */ +gceSTATUS +gcoOS_CloseThread( + IN gcoOS Os, + IN gctPOINTER Thread + ); + +/*----------------------------------------------------------------------------*/ +/*----- Mutexes --------------------------------------------------------------*/ + +/* Create a new mutex. */ +gceSTATUS +gcoOS_CreateMutex( + IN gcoOS Os, + OUT gctPOINTER * Mutex + ); + +/* Delete a mutex. */ +gceSTATUS +gcoOS_DeleteMutex( + IN gcoOS Os, + IN gctPOINTER Mutex + ); + +/* Acquire a mutex. */ +gceSTATUS +gcoOS_AcquireMutex( + IN gcoOS Os, + IN gctPOINTER Mutex, + IN gctUINT32 Timeout + ); + +/* Release a mutex. */ +gceSTATUS +gcoOS_ReleaseMutex( + IN gcoOS Os, + IN gctPOINTER Mutex + ); + +/*----------------------------------------------------------------------------*/ +/*----- Signals --------------------------------------------------------------*/ + +/* Create a signal. */ +gceSTATUS +gcoOS_CreateSignal( + IN gcoOS Os, + IN gctBOOL ManualReset, + OUT gctSIGNAL * Signal + ); + +/* Destroy a signal. */ +gceSTATUS +gcoOS_DestroySignal( + IN gcoOS Os, + IN gctSIGNAL Signal + ); + +/* Signal a signal. */ +gceSTATUS +gcoOS_Signal( + IN gcoOS Os, + IN gctSIGNAL Signal, + IN gctBOOL State + ); + +/* Wait for a signal. */ +gceSTATUS +gcoOS_WaitSignal( + IN gcoOS Os, + IN gctSIGNAL Signal, + IN gctUINT32 Wait + ); + +/* Map a signal from another process */ +gceSTATUS +gcoOS_MapSignal( + IN gctSIGNAL RemoteSignal, + OUT gctSIGNAL * LocalSignal + ); + +/* Unmap a signal mapped from another process */ +gceSTATUS +gcoOS_UnmapSignal( + IN gctSIGNAL Signal + ); + +/*----------------------------------------------------------------------------*/ +/*----- Android Native Fence -------------------------------------------------*/ + +/* Create native fence. */ +gceSTATUS +gcoOS_CreateNativeFence( + IN gcoOS Os, + IN gctSIGNAL Signal, + OUT gctINT * FenceFD + ); + +/* (CPU) Wait on native fence. */ +gceSTATUS +gcoOS_ClientWaitNativeFence( + IN gcoOS Os, + IN gctINT FenceFD, + IN gctUINT32 Timeout + ); + +/* (GPU) Wait on native fence. */ +gceSTATUS +gcoOS_WaitNativeFence( + IN gcoOS Os, + IN gctINT FenceFD, + IN gctUINT32 Timeout + ); + +/*----------------------------------------------------------------------------*/ +/*----- Memory Access and Cache ----------------------------------------------*/ + +/* Write a register. */ +gceSTATUS +gcoOS_WriteRegister( + IN gcoOS Os, + IN gctUINT32 Address, + IN gctUINT32 Data + ); + +/* Read a register. */ +gceSTATUS +gcoOS_ReadRegister( + IN gcoOS Os, + IN gctUINT32 Address, + OUT gctUINT32 * Data + ); + +gceSTATUS +gcoOS_CacheClean( + IN gcoOS Os, + IN gctUINT32 Node, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); + +gceSTATUS +gcoOS_CacheFlush( + IN gcoOS Os, + IN gctUINT32 Node, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); + +gceSTATUS +gcoOS_CacheInvalidate( + IN gcoOS Os, + IN gctUINT32 Node, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); + +gceSTATUS +gcoOS_MemoryBarrier( + IN gcoOS Os, + IN gctPOINTER Logical + ); + +gceSTATUS +gcoOS_CPUPhysicalToGPUPhysical( + IN gctUINT32 CPUPhysical, + OUT gctUINT32_PTR GPUPhysical + ); + +gceSTATUS +gcoOS_QuerySystemInfo( + IN gcoOS Os, + OUT gcsSystemInfo *Info + ); + +/*----------------------------------------------------------------------------*/ +/*----- Profile --------------------------------------------------------------*/ + +gceSTATUS +gckOS_GetProfileTick( + OUT gctUINT64_PTR Tick + ); + +gceSTATUS +gckOS_QueryProfileTickRate( + OUT gctUINT64_PTR TickRate + ); + +gctUINT32 +gckOS_ProfileToMS( + IN gctUINT64 Ticks + ); + +gceSTATUS +gcoOS_GetProfileTick( + OUT gctUINT64_PTR Tick + ); + +gceSTATUS +gcoOS_QueryProfileTickRate( + OUT gctUINT64_PTR TickRate + ); + +#define _gcmPROFILE_INIT(prefix, freq, start) \ + do { \ + prefix ## OS_QueryProfileTickRate(&(freq)); \ + prefix ## OS_GetProfileTick(&(start)); \ + } while (gcvFALSE) + +#define _gcmPROFILE_QUERY(prefix, start, ticks) \ + do { \ + prefix ## OS_GetProfileTick(&(ticks)); \ + (ticks) = ((ticks) > (start)) ? ((ticks) - (start)) \ + : (~0ull - (start) + (ticks) + 1); \ + } while (gcvFALSE) + +#if gcdENABLE_PROFILING +# define gcmkPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gck, freq, start) +# define gcmkPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gck, start, ticks) +# define gcmPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gco, freq, start) +# define gcmPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gco, start, ticks) +# define gcmPROFILE_ONLY(x) x +# define gcmPROFILE_ELSE(x) do { } while (gcvFALSE) +# define gcmPROFILE_DECLARE_ONLY(x) x +# define gcmPROFILE_DECLARE_ELSE(x) typedef x +#else +# define gcmkPROFILE_INIT(start, freq) do { } while (gcvFALSE) +# define gcmkPROFILE_QUERY(start, ticks) do { } while (gcvFALSE) +# define gcmPROFILE_INIT(start, freq) do { } while (gcvFALSE) +# define gcmPROFILE_QUERY(start, ticks) do { } while (gcvFALSE) +# define gcmPROFILE_ONLY(x) do { } while (gcvFALSE) +# define gcmPROFILE_ELSE(x) x +# define gcmPROFILE_DECLARE_ONLY(x) do { } while (gcvFALSE) +# define gcmPROFILE_DECLARE_ELSE(x) x +#endif + +/******************************************************************************* +** gcoMATH object +*/ + +#define gcdPI 3.14159265358979323846f + +/* Kernel. */ +gctINT +gckMATH_ModuloInt( + IN gctINT X, + IN gctINT Y + ); + +/* User. */ +gctUINT32 +gcoMATH_Log2in5dot5( + IN gctINT X + ); + + +gctFLOAT +gcoMATH_UIntAsFloat( + IN gctUINT32 X + ); + +gctUINT32 +gcoMATH_FloatAsUInt( + IN gctFLOAT X + ); + +gctBOOL +gcoMATH_CompareEqualF( + IN gctFLOAT X, + IN gctFLOAT Y + ); + +gctUINT16 +gcoMATH_UInt8AsFloat16( + IN gctUINT8 X + ); + +gctUINT32 +gcoMATH_Float16ToFloat( + IN gctUINT16 In + ); + +gctUINT16 +gcoMATH_FloatToFloat16( + IN gctUINT32 In + ); + +gctUINT32 +gcoMATH_Float11ToFloat( + IN gctUINT32 In + ); + +gctUINT16 +gcoMATH_FloatToFloat11( + IN gctUINT32 In + ); + +gctUINT32 +gcoMATH_Float10ToFloat( + IN gctUINT32 In + ); + +gctUINT16 +gcoMATH_FloatToFloat10( + IN gctUINT32 In + ); + +gctUINT32 +gcoMATH_Float14ToFloat( + IN gctUINT16 In + ); + +/******************************************************************************\ +**************************** Coordinate Structures ***************************** +\******************************************************************************/ + +typedef struct _gcsPOINT +{ + gctINT32 x; + gctINT32 y; +} +gcsPOINT; + +typedef struct _gcsSIZE +{ + gctINT32 width; + gctINT32 height; +} +gcsSIZE; + +typedef struct _gcsRECT +{ + gctINT32 left; + gctINT32 top; + gctINT32 right; + gctINT32 bottom; +} +gcsRECT; + +typedef struct _gcsPIXEL +{ + union + { + struct + { + gctFLOAT r, g, b, a; + } f; + struct + { + gctINT32 r, g, b, a; + } i; + struct + { + gctUINT32 r, g, b, a; + } ui; + } color; + + gctFLOAT d; + gctUINT32 s; + +} gcsPIXEL; + +/******************************************************************************\ +********************************* gcoSURF Object ******************************** +\******************************************************************************/ + +/*----------------------------------------------------------------------------*/ +/*------------------------------- gcoSURF Common ------------------------------*/ + +/* Color format classes. */ +typedef enum _gceFORMAT_CLASS +{ + gcvFORMAT_CLASS_RGBA = 4500, + gcvFORMAT_CLASS_YUV, + gcvFORMAT_CLASS_INDEX, + gcvFORMAT_CLASS_LUMINANCE, + gcvFORMAT_CLASS_BUMP, + gcvFORMAT_CLASS_DEPTH, + gcvFORMAT_CLASS_ASTC, + gcvFORMAT_CLASS_COMPRESSED, + gcvFORMAT_CLASS_OTHER +} +gceFORMAT_CLASS; + +/* Color format data type */ +typedef enum _gceFORMAT_DATATYPE +{ + gcvFORMAT_DATATYPE_UNSIGNED_NORMALIZED, + gcvFORMAT_DATATYPE_SIGNED_NORMALIZED, + gcvFORMAT_DATATYPE_UNSIGNED_INTEGER, + gcvFORMAT_DATATYPE_SIGNED_INTEGER, + gcvFORMAT_DATATYPE_FLOAT16, + gcvFORMAT_DATATYPE_FLOAT32, + gcvFORMAT_DATATYPE_FLOAT_E5B9G9R9, + gcvFORMAT_DATATYPE_FLOAT_B10G11R11F, + gcvFORMAT_DATATYPE_INDEX, + gcvFORMAT_DATATYPE_SRGB, + gcvFORMAT_DATATYPE_FLOAT32_UINT, +} +gceFORMAT_DATATYPE; + +/* Special enums for width field in gcsFORMAT_COMPONENT. */ +typedef enum _gceCOMPONENT_CONTROL +{ + gcvCOMPONENT_NOTPRESENT = 0x00, + gcvCOMPONENT_DONTCARE = 0x80, + gcvCOMPONENT_WIDTHMASK = 0x7F, + gcvCOMPONENT_ODD = 0x80 +} +gceCOMPONENT_CONTROL; + +/* Color format component parameters. */ +typedef struct _gcsFORMAT_COMPONENT +{ + gctUINT8 start; + gctUINT8 width; +} +gcsFORMAT_COMPONENT; + +/* RGBA color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_RGBA +{ + gcsFORMAT_COMPONENT alpha; + gcsFORMAT_COMPONENT red; + gcsFORMAT_COMPONENT green; + gcsFORMAT_COMPONENT blue; +} +gcsFORMAT_CLASS_TYPE_RGBA; + +/* YUV color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_YUV +{ + gcsFORMAT_COMPONENT y; + gcsFORMAT_COMPONENT u; + gcsFORMAT_COMPONENT v; +} +gcsFORMAT_CLASS_TYPE_YUV; + +/* Index color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_INDEX +{ + gcsFORMAT_COMPONENT value; +} +gcsFORMAT_CLASS_TYPE_INDEX; + +/* Luminance color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_LUMINANCE +{ + gcsFORMAT_COMPONENT alpha; + gcsFORMAT_COMPONENT value; +} +gcsFORMAT_CLASS_TYPE_LUMINANCE; + +/* Bump map color format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_BUMP +{ + gcsFORMAT_COMPONENT alpha; + gcsFORMAT_COMPONENT l; + gcsFORMAT_COMPONENT v; + gcsFORMAT_COMPONENT u; + gcsFORMAT_COMPONENT q; + gcsFORMAT_COMPONENT w; +} +gcsFORMAT_CLASS_TYPE_BUMP; + +/* Depth and stencil format class. */ +typedef struct _gcsFORMAT_CLASS_TYPE_DEPTH +{ + gcsFORMAT_COMPONENT depth; + gcsFORMAT_COMPONENT stencil; +} +gcsFORMAT_CLASS_TYPE_DEPTH; + +typedef union _gcuPIXEL_FORMAT_CLASS +{ + gcsFORMAT_CLASS_TYPE_BUMP bump; + gcsFORMAT_CLASS_TYPE_RGBA rgba; + gcsFORMAT_CLASS_TYPE_YUV yuv; + gcsFORMAT_CLASS_TYPE_LUMINANCE lum; + gcsFORMAT_CLASS_TYPE_INDEX index; + gcsFORMAT_CLASS_TYPE_DEPTH depth; +} +gcuPIXEL_FORMAT_CLASS; + +/* Format parameters. */ +typedef struct _gcsSURF_FORMAT_INFO +{ + /* Name of the format */ + gctCONST_STRING formatName; + + /* Format code and class. */ + gceSURF_FORMAT format; + gceFORMAT_CLASS fmtClass; + + /* Format data type */ + gceFORMAT_DATATYPE fmtDataType; + + /* The size of one pixel in bits. */ + gctUINT8 bitsPerPixel; + + /* Pixel block dimensions. */ + gctUINT blockWidth; + gctUINT blockHeight; + + /* Pixel block size in bits. */ + gctUINT blockSize; + + /* Some formats are larger than what the GPU can support. */ + /* These formats are read in the number of layers specified. */ + gctUINT8 layers; + + /* The format is faked and software will interpret it differently + ** with HW. Most of them can't be blendable(PE) or filterable(TX). + */ + gctBOOL fakedFormat; + + /* Some formats have two neighbour pixels interleaved together. */ + /* To describe such format, set the flag to 1 and add another */ + /* like this one describing the odd pixel format. */ + gctBOOL interleaved; + + /* sRGB format. */ + gctBOOL sRGB; + + /* How GPU read from big-endian host memory */ + gceENDIAN_HINT endian; + + /* Format components. */ + gcuPIXEL_FORMAT_CLASS u; + + /* Format components. */ + gcuPIXEL_FORMAT_CLASS uOdd; + + /* Render format. */ + gceSURF_FORMAT closestRenderFormat; + /*gctCLOSEST_FORMAT dynamicClosestRenderFormat;*/ + gctUINT renderFormat; + const gceTEXTURE_SWIZZLE * pixelSwizzle; + + /* Texture format. */ + gceSURF_FORMAT closestTXFormat; + gctUINT txFormat; + const gceTEXTURE_SWIZZLE * txSwizzle; + gctBOOL txIntFilter; +} +gcsSURF_FORMAT_INFO; + +/* Frame buffer information. */ +typedef struct _gcsSURF_FRAMEBUFFER +{ + gctPOINTER logical; + gctUINT width, height; + gctINT stride; + gceSURF_FORMAT format; +} +gcsSURF_FRAMEBUFFER; + +/* Generic pixel component descriptors. */ +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XXX8; +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XX8X; +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_X8XX; +extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_8XXX; + +typedef enum _gceORIENTATION +{ + gcvORIENTATION_TOP_BOTTOM, + gcvORIENTATION_BOTTOM_TOP, +} +gceORIENTATION; + + +/* Construct a new gcoSURF object. */ +gceSTATUS +gcoSURF_Construct( + IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Depth, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gcePOOL Pool, + OUT gcoSURF * Surface + ); + +/* Destroy an gcoSURF object. */ +gceSTATUS +gcoSURF_Destroy( + IN gcoSURF Surface + ); + +/* Map user-allocated surface. */ +gceSTATUS +gcoSURF_MapUserSurface( + IN gcoSURF Surface, + IN gctUINT Alignment, + IN gctPOINTER Logical, + IN gctUINT32 Physical + ); + +/* Wrapp surface with known logical/GPU address */ +gceSTATUS +gcoSURF_WrapSurface( + IN gcoSURF Surface, + IN gctUINT Alignment, + IN gctPOINTER Logical, + IN gctUINT32 Physical + ); + + +/* Query vid mem node info. */ +gceSTATUS +gcoSURF_QueryVidMemNode( + IN gcoSURF Surface, + OUT gctUINT32 * Node, + OUT gcePOOL * Pool, + OUT gctSIZE_T_PTR Bytes, + OUT gctUINT32 * TsNode, + OUT gcePOOL * TsPool, + OUT gctSIZE_T_PTR TsBytes + ); + +/* Set the color type of the surface. */ +gceSTATUS +gcoSURF_SetColorType( + IN gcoSURF Surface, + IN gceSURF_COLOR_TYPE ColorType + ); + +/* Get the color type of the surface. */ +gceSTATUS +gcoSURF_GetColorType( + IN gcoSURF Surface, + OUT gceSURF_COLOR_TYPE *ColorType + ); + +/* Set the color space of the surface. */ +gceSTATUS +gcoSURF_SetColorSpace( + IN gcoSURF Surface, + IN gceSURF_COLOR_SPACE ColorSpace + ); + +/* Get the color space of the surface. */ +gceSTATUS +gcoSURF_GetColorSpace( + IN gcoSURF Surface, + OUT gceSURF_COLOR_SPACE *ColorSpace + ); + + +/* Set the surface ration angle. */ +gceSTATUS +gcoSURF_SetRotation( + IN gcoSURF Surface, + IN gceSURF_ROTATION Rotation + ); + +gceSTATUS +gcoSURF_IsValid( + IN gcoSURF Surface + ); + +#if gcdENABLE_3D +/* Verify and return the state of the tile status mechanism. */ +gceSTATUS +gcoSURF_IsTileStatusSupported( + IN gcoSURF Surface + ); + +/* Verify if surface has tile status enabled. */ +gceSTATUS +gcoSURF_IsTileStatusEnabled( + IN gcsSURF_VIEW *SurfView + ); + +/* Verify if surface is compressed. */ +gceSTATUS +gcoSURF_IsCompressed( + IN gcsSURF_VIEW *SurfView + ); + +/* Enable tile status for the specified surface on zero slot. */ +gceSTATUS +gcoSURF_EnableTileStatus( + IN gcsSURF_VIEW *Surface + ); + +/* Enable tile status for the specified surface on specified slot. */ +gceSTATUS +gcoSURF_EnableTileStatusEx( + IN gcsSURF_VIEW *surfView, + IN gctUINT RtIndex + ); + +/* Disable tile status for the specified surface. */ +gceSTATUS +gcoSURF_DisableTileStatus( + IN gcsSURF_VIEW *SurfView, + IN gctBOOL Decompress + ); + +/* Flush tile status cache for the specified surface. */ +gceSTATUS +gcoSURF_FlushTileStatus( + IN gcsSURF_VIEW *SurfView, + IN gctBOOL Decompress + ); +#endif /* gcdENABLE_3D */ + +/* Get surface size. */ +gceSTATUS +gcoSURF_GetSize( + IN gcoSURF Surface, + OUT gctUINT * Width, + OUT gctUINT * Height, + OUT gctUINT * Depth + ); + +/* Get surface information */ +gceSTATUS +gcoSURF_GetInfo( + IN gcoSURF Surface, + IN gceSURF_INFO_TYPE InfoType, + IN OUT gctINT32 *Value + ); + +/* Get surface aligned sizes. */ +gceSTATUS +gcoSURF_GetAlignedSize( + IN gcoSURF Surface, + OUT gctUINT * Width, + OUT gctUINT * Height, + OUT gctINT * Stride + ); + +/* Get alignments. */ +gceSTATUS +gcoSURF_GetAlignment( + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + OUT gctUINT * AddressAlignment, + OUT gctUINT * XAlignment, + OUT gctUINT * YAlignment + ); + +gceSTATUS +gcoSURF_AlignResolveRect( + IN gcoSURF Surf, + IN gcsPOINT_PTR RectOrigin, + IN gcsPOINT_PTR RectSize, + OUT gcsPOINT_PTR AlignedOrigin, + OUT gcsPOINT_PTR AlignedSize + ); + +/* Get surface type and format. */ +gceSTATUS +gcoSURF_GetFormat( + IN gcoSURF Surface, + OUT OPTIONAL gceSURF_TYPE * Type, + OUT OPTIONAL gceSURF_FORMAT * Format + ); + +/* Get surface information */ +gceSTATUS +gcoSURF_GetFormatInfo( + IN gcoSURF Surface, + OUT gcsSURF_FORMAT_INFO_PTR * formatInfo + ); + +/* Get Surface pack format */ +gceSTATUS +gcoSURF_GetPackedFormat( + IN gcoSURF Surface, + OUT gceSURF_FORMAT * Format + ); + +/* Get surface tiling. */ +gceSTATUS +gcoSURF_GetTiling( + IN gcoSURF Surface, + OUT gceTILING * Tiling + ); + +/* Get bottom buffer offset bytes. */ +gceSTATUS +gcoSURF_GetBottomBufferOffset( + IN gcoSURF Surface, + OUT gctUINT_PTR BottomBufferOffset + ); + +/* Lock the surface. */ +gceSTATUS +gcoSURF_Lock( + IN gcoSURF Surface, + IN OUT gctUINT32 * Address, + IN OUT gctPOINTER * Memory + ); + +/* Unlock the surface. */ +gceSTATUS +gcoSURF_Unlock( + IN gcoSURF Surface, + IN gctPOINTER Memory + ); + +/*. Query surface flags.*/ +gceSTATUS +gcoSURF_QueryFlags( + IN gcoSURF Surface, + IN gceSURF_FLAG Flag + ); + +gceSTATUS +gcoSURF_QueryHints( + IN gcoSURF Surface, + IN gceSURF_TYPE Hints + ); + +/* Return pixel format parameters; Info is required to be a pointer to an + * array of at least two items because some formats have up to two records + * of description. */ +gceSTATUS +gcoSURF_QueryFormat( + IN gceSURF_FORMAT Format, + OUT gcsSURF_FORMAT_INFO_PTR * Info + ); + +/* Compute the color pixel mask. */ +gceSTATUS +gcoSURF_ComputeColorMask( + IN gcsSURF_FORMAT_INFO_PTR Format, + OUT gctUINT32_PTR ColorMask + ); + +/* Flush the surface. */ +gceSTATUS +gcoSURF_Flush( + IN gcoSURF Surface + ); + +/* Fill surface from it's tile status buffer. */ +gceSTATUS +gcoSURF_FillFromTile( + IN gcsSURF_VIEW *SurView + ); + +/* Fill surface with a value. */ +gceSTATUS +gcoSURF_Fill( + IN gcoSURF Surface, + IN gcsPOINT_PTR Origin, + IN gcsSIZE_PTR Size, + IN gctUINT32 Value, + IN gctUINT32 Mask + ); + +/* Alpha blend two surfaces together. */ +gceSTATUS +gcoSURF_Blend( + IN gcoSURF SrcSurf, + IN gcoSURF DstSurf, + IN gcsPOINT_PTR SrcOrigin, + IN gcsPOINT_PTR DstOrigin, + IN gcsSIZE_PTR Size, + IN gceSURF_BLEND_MODE Mode + ); + +/* Create a new gcoSURF wrapper object. */ +gceSTATUS +gcoSURF_ConstructWrapper( + IN gcoHAL Hal, + OUT gcoSURF * Surface + ); + +/* Set surface flags.*/ +gceSTATUS +gcoSURF_SetFlags( + IN gcoSURF Surface, + IN gceSURF_FLAG Flag, + IN gctBOOL Value + ); + +/* Set the underlying buffer for the surface wrapper. */ +gceSTATUS +gcoSURF_SetBuffer( + IN gcoSURF Surface, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctUINT Stride, + IN gctPOINTER Logical, + IN gctUINT32 Physical + ); + +/* Set the size of the surface in pixels and map the underlying buffer. */ +gceSTATUS +gcoSURF_SetWindow( + IN gcoSURF Surface, + IN gctUINT X, + IN gctUINT Y, + IN gctUINT Width, + IN gctUINT Height + ); + +/* Set the size of the surface in pixels and map the underlying buffer. */ +gceSTATUS +gcoSURF_SetImage( + IN gcoSURF Surface, + IN gctUINT X, + IN gctUINT Y, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Depth + ); + +/* Set width/height alignment of the surface directly and calculate stride/size. This is only for dri backend now. Please be careful before use. */ +gceSTATUS +gcoSURF_SetAlignment( + IN gcoSURF Surface, + IN gctUINT Width, + IN gctUINT Height + ); + +/* Increase reference count of the surface. */ +gceSTATUS +gcoSURF_ReferenceSurface( + IN gcoSURF Surface + ); + +/* Get surface reference count. */ +gceSTATUS +gcoSURF_QueryReferenceCount( + IN gcoSURF Surface, + OUT gctINT32 * ReferenceCount + ); + +/* Set surface orientation. */ +gceSTATUS +gcoSURF_SetOrientation( + IN gcoSURF Surface, + IN gceORIENTATION Orientation + ); + +/* Query surface orientation. */ +gceSTATUS +gcoSURF_QueryOrientation( + IN gcoSURF Surface, + OUT gceORIENTATION * Orientation + ); + +gceSTATUS +gcoSURF_NODE_Cache( + IN gcsSURF_NODE_PTR Node, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes, + IN gceCACHEOPERATION Operation + ); + +gceSTATUS +gcsSURF_NODE_SetHardwareAddress( + IN gcsSURF_NODE_PTR Node, + IN gctUINT32 Address + ); + +gceSTATUS +gcsSURF_NODE_GetHardwareAddress( + IN gcsSURF_NODE_PTR Node, + OUT gctUINT32_PTR Physical, + OUT gctUINT32_PTR Physical2, + OUT gctUINT32_PTR Physical3, + OUT gctUINT32_PTR PhysicalBottom + ); + +gctUINT32 +gcsSURF_NODE_GetHWAddress( + IN gcsSURF_NODE_PTR Node + ); + +/* Lock and unlock surface node */ +gceSTATUS +gcoSURF_LockNode( + IN gcsSURF_NODE_PTR Node, + OUT gctUINT32 * Address, + OUT gctPOINTER * Memory + ); + +gceSTATUS +gcoSURF_UnLockNode( + IN gcsSURF_NODE_PTR Node, + IN gceSURF_TYPE Type + ); + +/* Perform CPU cache operation on surface node */ +gceSTATUS +gcoSURF_NODE_CPUCacheOperation( + IN gcsSURF_NODE_PTR Node, + IN gceSURF_TYPE Type, + IN gctSIZE_T Offset, + IN gctSIZE_T Length, + IN gceCACHEOPERATION Operation + ); + +/* Perform CPU cache operation on surface */ +gceSTATUS +gcoSURF_CPUCacheOperation( + IN gcoSURF Surface, + IN gceCACHEOPERATION Operation + ); + + +gceSTATUS +gcoSURF_Swap( + IN gcoSURF Surface1, + IN gcoSURF Surface2 + ); + +gceSTATUS +gcoSURF_ResetSurWH( + IN gcoSURF Surface, + IN gctUINT oriw, + IN gctUINT orih, + IN gctUINT alignw, + IN gctUINT alignh, + IN gceSURF_FORMAT fmt +); + +/* Update surface timestamp. */ +gceSTATUS +gcoSURF_UpdateTimeStamp( + IN gcoSURF Surface + ); + +/* Query surface current timestamp. */ +gceSTATUS +gcoSURF_QueryTimeStamp( + IN gcoSURF Surface, + OUT gctUINT64 * TimeStamp + ); + +/* + * Allocate shared buffer for this surface, so that + * surface states can be shared across processes. + */ +gceSTATUS +gcoSURF_AllocShBuffer( + IN gcoSURF Surface, + OUT gctSHBUF * ShBuf + ); + +/* Bind shared buffer to this surface */ +gceSTATUS +gcoSURF_BindShBuffer( + IN gcoSURF Surface, + IN gctSHBUF ShBuf + ); + +/* Push surface shared states to shared buffer. */ +gceSTATUS +gcoSURF_PushSharedInfo( + IN gcoSURF Surface + ); + +/* Pop shared states from shared buffer. */ +gceSTATUS +gcoSURF_PopSharedInfo( + IN gcoSURF Surface + ); + +#if (gcdENABLE_3D) +/* Copy surface. */ +gceSTATUS +gcoSURF_Copy( + IN gcoSURF Surface, + IN gcoSURF Source + ); + +/* Set number of samples for a gcoSURF object. */ +gceSTATUS +gcoSURF_SetSamples( + IN gcoSURF Surface, + IN gctUINT Samples + ); + +/* Get the number of samples per pixel. */ +gceSTATUS +gcoSURF_GetSamples( + IN gcoSURF Surface, + OUT gctUINT_PTR Samples + ); + +/* Append tile status buffer to user pool surface. */ +gceSTATUS +gcoSURF_AppendTileStatus( + IN gcoSURF Surface + ); +#endif + +gceSTATUS +gcoSURF_WrapUserMemory( + IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Stride, + IN gctUINT Depth, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctUINT32 Handle, + IN gctUINT32 Flag, + OUT gcoSURF * Surface + ); + +gceSTATUS +gcoSURF_WrapUserMultiBuffer( + IN gcoHAL Hal, + IN gctUINT Width, + IN gctUINT Height, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format, + IN gctUINT Stride[3], + IN gctUINT32 Handle[3], + IN gctUINT BufferOffset[3], + IN gctUINT32 Flag, + OUT gcoSURF * Surface + ); + +gceSTATUS +gcoSURF_UpdateMetadata( + IN gcoSURF Surface, + IN gctINT TsFD + ); + +#define MAX_SURF_MIX_SRC_NUM 64 +gceSTATUS +gcoSURF_MixSurfacesCPU( + IN gcoSURF TargetSurface, + IN gctUINT TargetSliceIndex, + IN gcoSURF *SourceSurface, + IN gctUINT *SourceSliceIndices, + IN gctFLOAT *Weights, + IN gctINT Count + ); + + +/******************************************************************************\ +********************************* gcoDUMP Object ******************************** +\******************************************************************************/ + +/* Construct a new gcoDUMP object. */ +gceSTATUS +gcoDUMP_Construct( + IN gcoOS Os, + IN gcoHAL Hal, + OUT gcoDUMP * Dump + ); + +/* Destroy a gcoDUMP object. */ +gceSTATUS +gcoDUMP_Destroy( + IN gcoDUMP Dump + ); + +/* Enable/disable dumping. */ +gceSTATUS +gcoDUMP_Control( + IN gcoDUMP Dump, + IN gctSTRING FileName + ); + +gceSTATUS +gcoDUMP_IsEnabled( + IN gcoDUMP Dump, + OUT gctBOOL * Enabled + ); + +/* Add surface. */ +gceSTATUS +gcoDUMP_AddSurface( + IN gcoDUMP Dump, + IN gctINT32 Width, + IN gctINT32 Height, + IN gceSURF_FORMAT PixelFormat, + IN gctUINT32 Address, + IN gctSIZE_T ByteCount + ); + +/* Mark the beginning of a frame. */ +gceSTATUS +gcoDUMP_FrameBegin( + IN gcoDUMP Dump + ); + +/* Mark the end of a frame. */ +gceSTATUS +gcoDUMP_FrameEnd( + IN gcoDUMP Dump + ); + +/* Dump data. */ +gceSTATUS +gcoDUMP_DumpData( + IN gcoDUMP Dump, + IN gceDUMP_TAG Type, + IN gctUINT32 Address, + IN gctSIZE_T ByteCount, + IN gctCONST_POINTER Data + ); + +/* Delete an address. */ +gceSTATUS +gcoDUMP_Delete( + IN gcoDUMP Dump, + IN gctUINT32 Address + ); + +/* Enable dump or not. */ +gceSTATUS +gcoDUMP_SetDumpFlag( + IN gctBOOL DumpState + ); + +/******************************************************************************\ +******************************* gcsRECT Structure ****************************** +\******************************************************************************/ + +/* Initialize rectangle structure. */ +gceSTATUS +gcsRECT_Set( + OUT gcsRECT_PTR Rect, + IN gctINT32 Left, + IN gctINT32 Top, + IN gctINT32 Right, + IN gctINT32 Bottom + ); + +/* Return the width of the rectangle. */ +gceSTATUS +gcsRECT_Width( + IN gcsRECT_PTR Rect, + OUT gctINT32 * Width + ); + +/* Return the height of the rectangle. */ +gceSTATUS +gcsRECT_Height( + IN gcsRECT_PTR Rect, + OUT gctINT32 * Height + ); + +/* Ensure that top left corner is to the left and above the right bottom. */ +gceSTATUS +gcsRECT_Normalize( + IN OUT gcsRECT_PTR Rect + ); + +/* Compare two rectangles. */ +gceSTATUS +gcsRECT_IsEqual( + IN gcsRECT_PTR Rect1, + IN gcsRECT_PTR Rect2, + OUT gctBOOL * Equal + ); + +/* Compare the sizes of two rectangles. */ +gceSTATUS +gcsRECT_IsOfEqualSize( + IN gcsRECT_PTR Rect1, + IN gcsRECT_PTR Rect2, + OUT gctBOOL * EqualSize + ); + +gceSTATUS +gcsRECT_RelativeRotation( + IN gceSURF_ROTATION Orientation, + IN OUT gceSURF_ROTATION *Relation); + +gceSTATUS + +gcsRECT_Rotate( + + IN OUT gcsRECT_PTR Rect, + + IN gceSURF_ROTATION Rotation, + + IN gceSURF_ROTATION toRotation, + + IN gctINT32 SurfaceWidth, + + IN gctINT32 SurfaceHeight + + ); + +/******************************************************************************\ +**************************** gcsBOUNDARY Structure ***************************** +\******************************************************************************/ + +typedef struct _gcsBOUNDARY +{ + gctINT x; + gctINT y; + gctINT width; + gctINT height; +} +gcsBOUNDARY; + +/******************************************************************************\ +********************************* gcoHEAP Object ******************************** +\******************************************************************************/ + +typedef struct _gcoHEAP * gcoHEAP; + +/* Construct a new gcoHEAP object. */ +gceSTATUS +gcoHEAP_Construct( + IN gcoOS Os, + IN gctSIZE_T AllocationSize, + OUT gcoHEAP * Heap + ); + +/* Destroy an gcoHEAP object. */ +gceSTATUS +gcoHEAP_Destroy( + IN gcoHEAP Heap + ); + +/* Allocate memory. */ +gceSTATUS +gcoHEAP_Allocate( + IN gcoHEAP Heap, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Node + ); + +gceSTATUS +gcoHEAP_GetMemorySize( + IN gcoHEAP Heap, + IN gctPOINTER Memory, + OUT gctSIZE_T_PTR MemorySize + ); + +/* Free memory. */ +gceSTATUS +gcoHEAP_Free( + IN gcoHEAP Heap, + IN gctPOINTER Node + ); + +#if (VIVANTE_PROFILER || gcdDEBUG) +/* Profile the heap. */ +gceSTATUS +gcoHEAP_ProfileStart( + IN gcoHEAP Heap + ); + +gceSTATUS +gcoHEAP_ProfileEnd( + IN gcoHEAP Heap, + IN gctCONST_STRING Title + ); +#endif + + +/******************************************************************************\ +******************************* Debugging Macros ******************************* +\******************************************************************************/ + +void +gcoOS_SetDebugLevel( + IN gctUINT32 Level + ); + +void +gcoOS_GetDebugLevel( + OUT gctUINT32_PTR DebugLevel + ); + +void +gcoOS_SetDebugZone( + IN gctUINT32 Zone + ); + +void +gcoOS_GetDebugZone( + IN gctUINT32 Zone, + OUT gctUINT32_PTR DebugZone + ); + +void +gcoOS_SetDebugLevelZone( + IN gctUINT32 Level, + IN gctUINT32 Zone + ); + +void +gcoOS_SetDebugZones( + IN gctUINT32 Zones, + IN gctBOOL Enable + ); + +void +gcoOS_SetDebugFile( + IN gctCONST_STRING FileName + ); + +gctFILE +gcoOS_ReplaceDebugFile( + IN gctFILE fp + ); + +/******************************************************************************* +** +** gcmFATAL +** +** Print a message to the debugger and execute a break point. +** +** ARGUMENTS: +** +** message Message. +** ... Optional arguments. +*/ + +void +gckOS_DebugFatal( + IN gctCONST_STRING Message, + ... + ); + +void +gcoOS_DebugFatal( + IN gctCONST_STRING Message, + ... + ); + +#if gcmIS_DEBUG(gcdDEBUG_FATAL) +# define gcmFATAL gcoOS_DebugFatal +# define gcmkFATAL gckOS_DebugFatal +#elif gcdHAS_ELLIPSIS +# define gcmFATAL(...) +# define gcmkFATAL(...) +#else + gcmINLINE static void + __dummy_fatal( + IN gctCONST_STRING Message, + ... + ) + { + } +# define gcmFATAL __dummy_fatal +# define gcmkFATAL __dummy_fatal +#endif + +#define gcmENUM2TEXT(e) case e: return #e + +/******************************************************************************* +** +** gcmTRACE +** +** Print a message to the debugfer if the correct level has been set. In +** retail mode this macro does nothing. +** +** ARGUMENTS: +** +** level Level of message. +** message Message. +** ... Optional arguments. +*/ +#define gcvLEVEL_NONE -1 +#define gcvLEVEL_ERROR 0 +#define gcvLEVEL_WARNING 1 +#define gcvLEVEL_INFO 2 +#define gcvLEVEL_VERBOSE 3 + +void +gckOS_DebugTrace( + IN gctUINT32 Level, + IN gctCONST_STRING Message, + ... + ); + +void +gckOS_DebugTraceN( + IN gctUINT32 Level, + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ); + +void +gcoOS_DebugTrace( + IN gctUINT32 Level, + IN gctCONST_STRING Message, + ... + ); + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +# define gcmTRACE gcoOS_DebugTrace +# define gcmkTRACE gckOS_DebugTrace +# define gcmkTRACE_N gckOS_DebugTraceN +#elif gcdHAS_ELLIPSIS +# define gcmTRACE(...) +# define gcmkTRACE(...) +# define gcmkTRACE_N(...) +#else + gcmINLINE static void + __dummy_trace( + IN gctUINT32 Level, + IN gctCONST_STRING Message, + ... + ) + { + } + + gcmINLINE static void + __dummy_trace_n( + IN gctUINT32 Level, + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ) + { + } + +# define gcmTRACE __dummy_trace +# define gcmkTRACE __dummy_trace +# define gcmkTRACE_N __dummy_trace_n +#endif + +/* Zones common for kernel and user. */ +#define gcvZONE_OS (1 << 0) +#define gcvZONE_HARDWARE (1 << 1) +#define gcvZONE_HEAP (1 << 2) +#define gcvZONE_SIGNAL (1 << 3) + +/* Kernel zones. */ +#define gcvZONE_KERNEL (1 << 4) +#define gcvZONE_VIDMEM (1 << 5) +#define gcvZONE_COMMAND (1 << 6) +#define gcvZONE_DRIVER (1 << 7) +#define gcvZONE_CMODEL (1 << 8) +#define gcvZONE_MMU (1 << 9) +#define gcvZONE_EVENT (1 << 10) +#define gcvZONE_DEVICE (1 << 11) +#define gcvZONE_DATABASE (1 << 12) +#define gcvZONE_INTERRUPT (1 << 13) +#define gcvZONE_POWER (1 << 14) +#define gcvZONE_ASYNC_COMMAND (1 << 15) +#define gcvZONE_ALLOCATOR (1 << 16) + +/* User zones. */ +#define gcvZONE_HAL (1 << 4) +#define gcvZONE_BUFFER (1 << 5) +#define gcvZONE_CONTEXT (1 << 6) +#define gcvZONE_SURFACE (1 << 7) +#define gcvZONE_INDEX (1 << 8) +#define gcvZONE_STREAM (1 << 9) +#define gcvZONE_TEXTURE (1 << 10) +#define gcvZONE_2D (1 << 11) +#define gcvZONE_3D (1 << 12) +#define gcvZONE_COMPILER (1 << 13) +#define gcvZONE_MEMORY (1 << 14) +#define gcvZONE_STATE (1 << 15) +#define gcvZONE_AUX (1 << 16) +#define gcvZONE_VERTEX (1 << 17) +#define gcvZONE_CL (1 << 18) +#define gcvZONE_VG (1 << 19) +#define gcvZONE_VX (1 << 20) +#define gcvZONE_IMAGE (1 << 21) +#define gcvZONE_UTILITY (1 << 22) +#define gcvZONE_PARAMETERS (1 << 23) +#define gcvZONE_BUFOBJ (1 << 24) +#define gcvZONE_SHADER (1 << 25) +#define gcvZONE_STREAM_OUT (1 << 26) + +/* API definitions. */ +#define gcvZONE_API_HAL ((gctUINT32) 1 << 28) +#define gcvZONE_API_EGL ((gctUINT32) 2 << 28) +#define gcvZONE_API_ES11 ((gctUINT32) 3 << 28) +#define gcvZONE_API_ES20 ((gctUINT32) 4 << 28) +#define gcvZONE_API_ES30 ((gctUINT32) 4 << 28) +#define gcvZONE_API_VG11 ((gctUINT32) 5 << 28) +#define gcvZONE_API_GL ((gctUINT32) 6 << 28) +#define gcvZONE_API_DFB ((gctUINT32) 7 << 28) +#define gcvZONE_API_GDI ((gctUINT32) 8 << 28) +#define gcvZONE_API_D3D ((gctUINT32) 9 << 28) +#define gcvZONE_API_CL ((gctUINT32) 10 << 28) +#define gcvZONE_API_VX ((gctUINT32) 11 << 28) + + +#define gcmZONE_GET_API(zone) ((zone) >> 28) +/*Set gcdZONE_MASE like 0x0 | gcvZONE_API_EGL +will enable print EGL module debug info*/ +#define gcdZONE_MASK 0x0FFFFFFF + +/* Handy zones. */ +#define gcvZONE_NONE 0 +#define gcvZONE_ALL 0x0FFFFFFF + +/*Dump API depth set 1 for API, 2 for API and API behavior*/ +#define gcvDUMP_API_DEPTH 1 + + +/******************************************************************************* +** +** gcmTRACE_ZONE +** +** Print a message to the debugger if the correct level and zone has been +** set. In retail mode this macro does nothing. +** +** ARGUMENTS: +** +** Level Level of message. +** Zone Zone of message. +** Message Message. +** ... Optional arguments. +*/ + +void +gckOS_DebugTraceZone( + IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctCONST_STRING Message, + ... + ); + +void +gckOS_DebugTraceZoneN( + IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ); + +void +gcoOS_DebugTraceZone( + IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctCONST_STRING Message, + ... + ); + +#if gcmIS_DEBUG(gcdDEBUG_TRACE) +# define gcmTRACE_ZONE gcoOS_DebugTraceZone +# define gcmkTRACE_ZONE gckOS_DebugTraceZone +# define gcmkTRACE_ZONE_N gckOS_DebugTraceZoneN +#elif gcdHAS_ELLIPSIS +# define gcmTRACE_ZONE(...) +# define gcmkTRACE_ZONE(...) +# define gcmkTRACE_ZONE_N(...) +#else + gcmINLINE static void + __dummy_trace_zone( + IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctCONST_STRING Message, + ... + ) + { + } + + gcmINLINE static void + __dummy_trace_zone_n( + IN gctUINT32 Level, + IN gctUINT32 Zone, + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ) + { + } + +# define gcmTRACE_ZONE __dummy_trace_zone +# define gcmkTRACE_ZONE __dummy_trace_zone +# define gcmkTRACE_ZONE_N __dummy_trace_zone_n +#endif + + +/******************************************************************************* +** +** gcmDEBUG_ONLY +** +** Execute a statement or function only in DEBUG mode. +** +** ARGUMENTS: +** +** f Statement or function to execute. +*/ +#if gcmIS_DEBUG(gcdDEBUG_CODE) +# define gcmDEBUG_ONLY(f) f +#else +# define gcmDEBUG_ONLY(f) +#endif + + +/******************************************************************************* +** +** gcmSTACK_PUSH +** gcmSTACK_POP +** gcmSTACK_DUMP +** gcmSTACK_REMOVE +** +** Push or pop a function with entry arguments on the trace stack. +** +** ARGUMENTS: +** +** Function Name of function. +** Line Line number. +** Text Optional text. +** ... Optional arguments for text. +** +** Thread Thread id. +*/ +void +gcoOS_StackPush( + IN gctINT8_PTR Identity, + IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text, + ... + ); + +void +gcoOS_StackPop( + IN gctINT8_PTR Identity, + IN gctCONST_STRING Function + ); + +void +gcoOS_StackDump( + void); + +void +gcoOS_StackRemove( + IN gctHANDLE Thread + ); + +#if gcmIS_DEBUG(gcdDEBUG_STACK) +# define gcmSTACK_PUSH gcoOS_StackPush +# define gcmSTACK_POP gcoOS_StackPop +# define gcmSTACK_DUMP gcoOS_StackDump +# define gcmSTACK_REMOVE gcoOS_StackRemove +#elif gcdHAS_ELLIPSIS +# define gcmSTACK_PUSH(...) +# define gcmSTACK_POP(...) +# define gcmSTACK_DUMP() +# define gcmSTACK_REMOVE(...) +#else + gcmINLINE static void + __dummy_stack_push( + IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text, + ... + ) + { + } + + gcmINLINE static void + __dummy_stack_pop( + IN gctINT8_PTR Identity, + IN gctCONST_STRING Function + ); + + gcmINLINE static void + __dummy_stack_remove( + IN gctHANDLE Thread + ); + +# define gcmSTACK_PUSH __dummy_stack_push +# define gcmSTACK_POP(a,b) __dummy_stack_pop +# define gcmSTACK_DUMP() +# define gcmSTACK_REMOVE(a) __dummy_stack_remove +#endif + + +/******************************************************************************* +** +** gcmBINARY_TRACE +** +** Push or pop a function with entry arguments on the trace stack. +** +** ARGUMENTS: +** +** Function Name of function +** Line Line number +** Text Optional text +** ... Optional arguments for text. +*/ +typedef struct _gcsBINARY_TRACE_MESSAGE * gcsBINARY_TRACE_MESSAGE_PTR; +typedef struct _gcsBINARY_TRACE_MESSAGE +{ + gctUINT32 signature; + gctUINT32 pid; + gctUINT32 tid; + gctUINT32 line; + gctUINT32 numArguments; + gctUINT8 payload; +} +gcsBINARY_TRACE_MESSAGE; + +void +gcoOS_BinaryTrace( + IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text OPTIONAL, + ... + ); + +void +gckOS_BinaryTrace( + IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text OPTIONAL, + ... + ); + +#define gcdBINARY_TRACE_MESSAGE_SIZE 240 + +#if gcdBINARY_TRACE +# define gcmBINARY_TRACE gcoOS_BinaryTrace +# define gcmkBINARY_TRACE gckOS_BinaryTrace +#elif gcdHAS_ELLIPSIS +# define gcmBINARY_TRACE(Function, Line, Text, ...) +# define gcmkBINARY_TRACE(Function, Line, Text, ...) +#else + gcmINLINE static void + __dummy_binary_trace( + IN gctCONST_STRING Function, + IN gctINT Line, + IN gctCONST_STRING Text, + ) + { + } + +# define gcmBINARY_TRACE __dummy_binary_trace +# define gcmkBINARY_TRACE __dummy_binary_trace +#endif + + +/******************************************************************************* +** +** gcmSYSTRACE_BEGIN +** gcmSYSTRACE_END +** +** Systrace is a performance tunning tool on linux. +** +** ARGUMENTS: +** +** FuncName Function name +** Zone Systrace zone. Only specified zones are traced. +*/ + +void +gcoOS_SysTraceBegin( + IN gctUINT32 Zone, + IN gctCONST_STRING FuncName + ); + +void +gcoOS_SysTraceEnd( + IN gctUINT32 Zone + ); + +#if defined(LINUX) && gcdSYSTRACE +# define gcmSYSTRACE_BEGIN gcoOS_SysTraceBegin +# define gcmSYSTRACE_END gcoOS_SysTraceEnd +#elif gcdHAS_ELLIPSIS +# define gcmSYSTRACE_BEGIN(...) +# define gcmSYSTRACE_END(...) +#else + gcmINLINE static void + __dummy_systrace_begin( + IN gctUINT32 Zone, + IN gctCONST_STRING FuncName + ) + { + } + + gcmINLINE static void + __dummy_systrace_end( + IN gctUINT32 Zone + ) + { + } + +# define gcmSYSTRACE_BEGIN __dummy_systrace_begin +# define gcmSYSTRACE_END __dummy_systrace_end +#endif + + +/******************************************************************************\ +******************************** Logging Macros ******************************** +\******************************************************************************/ + +#define gcdHEADER_LEVEL gcvLEVEL_VERBOSE + +/* Always enable header/footer when systrace build is on */ +#if defined(LINUX) && gcdSYSTRACE +#undef gcdEMPTY_HEADER_FOOTER +#endif + +#ifndef gcdEMPTY_HEADER_FOOTER +#define gcdEMPTY_HEADER_FOOTER 0 +#endif + +#if gcdENABLE_PROFILING +void +gcoOS_ProfileDB( + IN gctCONST_STRING Function, + IN OUT gctBOOL_PTR Initialized + ); + +#define gcmHEADER() \ + gctINT8 __user__ = 1; \ + static gctBOOL __profile__initialized__ = gcvFALSE; \ + gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__) + +#define gcmHEADER_ARG(...) \ + gctINT8 __user__ = 1; \ + static gctBOOL __profile__initialized__ = gcvFALSE; \ + gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__) + +#define gcmFOOTER() \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(__FUNCTION__, gcvNULL) + +#define gcmFOOTER_NO() \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(__FUNCTION__, gcvNULL) + +#define gcmFOOTER_ARG(...) \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(__FUNCTION__, gcvNULL) + +#define gcmFOOTER_KILL() \ + gcmSTACK_POP(&__user__, __FUNCTION__); \ + gcoOS_ProfileDB(gcvNULL, gcvNULL) + +#else /* !gcdENABLE_PROFILING */ + +#if gcdEMPTY_HEADER_FOOTER +# define gcmHEADER() +#elif gcdHAS_ELLIPSIS +#define gcmHEADER() \ + gctINT8 __user__ = 1; \ + gctINT8_PTR __user_ptr__ = &__user__; \ + gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmSYSTRACE_BEGIN(_GC_OBJ_ZONE, __FUNCTION__); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d)", __FUNCTION__, __LINE__) +#else + gcmINLINE static void + __dummy_header(void) + { + } +# define gcmHEADER __dummy_header +#endif + +#if gcdHAS_ELLIPSIS +#if gcdEMPTY_HEADER_FOOTER +# define gcmHEADER_ARG(Text, ...) +#else +# define gcmHEADER_ARG(Text, ...) \ + gctINT8 __user__ = 1; \ + gctINT8_PTR __user_ptr__ = &__user__; \ + gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmSYSTRACE_BEGIN(_GC_OBJ_ZONE, __FUNCTION__); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__) +#endif +#else + gcmINLINE static void + __dummy_header_arg( + IN gctCONST_STRING Text, + ... + ) + { + } +# define gcmHEADER_ARG __dummy_header_arg +#endif + +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER() +#elif gcdHAS_ELLIPSIS +# define gcmFOOTER() \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): status=%d(%s)", \ + __FUNCTION__, __LINE__, \ + status, gcmSTATUS2NAME(status)); \ + *__user_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_footer(void) + { + } +# define gcmFOOTER __dummy_footer +#endif + +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER_NO() +#elif gcdHAS_ELLIPSIS +#define gcmFOOTER_NO() \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d)", __FUNCTION__, __LINE__); \ + *__user_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_footer_no(void) + { + } +# define gcmFOOTER_NO __dummy_footer_no +#endif + +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER_KILL() +#elif gcdHAS_ELLIPSIS +#define gcmFOOTER_KILL() \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d)", __FUNCTION__, __LINE__); \ + *__user_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_footer_kill(void) + { + } +# define gcmFOOTER_KILL __dummy_footer_kill +#endif + +#if gcdHAS_ELLIPSIS +#if gcdEMPTY_HEADER_FOOTER +# define gcmFOOTER_ARG(Text, ...) +#else +# define gcmFOOTER_ARG(Text, ...) \ + gcmSTACK_POP(__user_ptr__, __FUNCTION__); \ + gcmSYSTRACE_END(_GC_OBJ_ZONE); \ + gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__); \ + *__user_ptr__ -= 1 +#endif +#else + gcmINLINE static void + __dummy_footer_arg( + IN gctCONST_STRING Text, + ... + ) + { + } +# define gcmFOOTER_ARG __dummy_footer_arg +#endif + +#endif /* gcdENABLE_PROFILING */ + +#if gcdHAS_ELLIPSIS +#define gcmkHEADER() \ + gctINT8 __kernel__ = 1; \ + gctINT8_PTR __kernel_ptr__ = &__kernel__; \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d)", __FUNCTION__, __LINE__) +#else + gcmINLINE static void + __dummy_kheader(void) + { + } +# define gcmkHEADER __dummy_kheader +#endif + +#if gcdHAS_ELLIPSIS +# define gcmkHEADER_ARG(Text, ...) \ + gctINT8 __kernel__ = 1; \ + gctINT8_PTR __kernel_ptr__ = &__kernel__; \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__) +#else + gcmINLINE static void + __dummy_kheader_arg( + IN gctCONST_STRING Text, + ... + ) + { + } +# define gcmkHEADER_ARG __dummy_kheader_arg +#endif + +#if gcdHAS_ELLIPSIS +#define gcmkFOOTER() \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, status); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): status=%d(%s)", \ + __FUNCTION__, __LINE__, status, gcmkSTATUS2NAME(status)); \ + *__kernel_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_kfooter(void) + { + } +# define gcmkFOOTER __dummy_kfooter +#endif + +#if gcdHAS_ELLIPSIS +#define gcmkFOOTER_NO() \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d)", __FUNCTION__, __LINE__); \ + *__kernel_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_kfooter_no(void) + { + } +# define gcmkFOOTER_NO __dummy_kfooter_no +#endif + +#if gcdHAS_ELLIPSIS +# define gcmkFOOTER_ARG(Text, ...) \ + gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \ + gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \ + "--%s(%d): " Text, \ + __FUNCTION__, __LINE__, __VA_ARGS__); \ + *__kernel_ptr__ -= 1 +#else + gcmINLINE static void + __dummy_kfooter_arg( + IN gctCONST_STRING Text, + ... + ) + { + } +# define gcmkFOOTER_ARG __dummy_kfooter_arg +#endif + +#define gcmOPT_VALUE(ptr) (((ptr) == gcvNULL) ? 0 : *(ptr)) +#define gcmOPT_VALUE_INDEX(ptr, index) (((ptr) == gcvNULL) ? 0 : ptr[index]) +#define gcmOPT_POINTER(ptr) (((ptr) == gcvNULL) ? gcvNULL : *(ptr)) +#define gcmOPT_STRING(ptr) (((ptr) == gcvNULL) ? "(nil)" : (ptr)) + +void +gckOS_Print( + IN gctCONST_STRING Message, + ... + ); + +void +gckOS_PrintN( + IN gctUINT ArgumentSize, + IN gctCONST_STRING Message, + ... + ); + +void +gckOS_CopyPrint( + IN gctCONST_STRING Message, + ... + ); + +void +gcoOS_Print( + IN gctCONST_STRING Message, + ... + ); + +#define gcmPRINT gcoOS_Print +#define gcmkPRINT gckOS_Print +#define gcmkPRINT_N gckOS_PrintN + +#if gcdPRINT_VERSION +# define gcmPRINT_VERSION() do { \ + _gcmPRINT_VERSION(gcm); \ + gcmSTACK_DUMP(); \ + } while (0) +# define gcmkPRINT_VERSION() _gcmPRINT_VERSION(gcmk) +# define _gcmPRINT_VERSION(prefix) \ + prefix##TRACE(gcvLEVEL_ERROR, \ + "Vivante HAL version %d.%d.%d build %d", \ + gcvVERSION_MAJOR, gcvVERSION_MINOR, \ + gcvVERSION_PATCH, gcvVERSION_BUILD) +#else +# define gcmPRINT_VERSION() do { gcmSTACK_DUMP(); } while (gcvFALSE) +# define gcmkPRINT_VERSION() do { } while (gcvFALSE) +#endif + +typedef enum _gceDUMP_BUFFER +{ + gcvDUMP_BUFFER_CONTEXT, + gcvDUMP_BUFFER_USER, + gcvDUMP_BUFFER_KERNEL, + gcvDUMP_BUFFER_LINK, + gcvDUMP_BUFFER_WAITLINK, + gcvDUMP_BUFFER_FROM_USER, +} +gceDUMP_BUFFER; + +void +gckOS_DumpBuffer( + IN gckOS Os, + IN gctPOINTER Buffer, + IN gctSIZE_T Size, + IN gceDUMP_BUFFER Type, + IN gctBOOL CopyMessage + ); + +#define gcmkDUMPBUFFER gckOS_DumpBuffer + +#if gcdDUMP_COMMAND +# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage) \ + gcmkDUMPBUFFER(Os, Buffer, Size, Type, CopyMessage) +#else +# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage) +#endif + +#if gcmIS_DEBUG(gcdDEBUG_CODE) + +void +gckOS_DebugFlush( + gctCONST_STRING CallerName, + gctUINT LineNumber, + gctUINT32 DmaAddress + ); + +# define gcmkDEBUGFLUSH(DmaAddress) \ + gckOS_DebugFlush(__FUNCTION__, __LINE__, DmaAddress) +#else +# define gcmkDEBUGFLUSH(DmaAddress) +#endif + +/******************************************************************************* +** +** gcmDUMP_FRAMERATE +** +** Print average frame rate +** +*/ +#if gcdDUMP_FRAMERATE + gceSTATUS + gcfDumpFrameRate( + void + ); +# define gcmDUMP_FRAMERATE gcfDumpFrameRate +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_FRAMERATE(...) +#else + gcmINLINE static void + __dummy_dump_frame_rate( + void + ) + { + } +# define gcmDUMP_FRAMERATE __dummy_dump_frame_rate +#endif + + +/******************************************************************************* +** +** gcmDUMP +** +** Print a dump message. +** +** ARGUMENTS: +** +** gctSTRING Message. +** +** ... Optional arguments. +*/ + +#if gcdDUMP || gcdDUMP_2DVG + gceSTATUS + gcfDump( + IN gcoOS Os, + IN gctCONST_STRING String, + ... + ); +# define gcmDUMP gcfDump +#elif gcdHAS_ELLIPSIS +# define gcmDUMP(...) +#else + gcmINLINE static void + __dummy_dump( + IN gcoOS Os, + IN gctCONST_STRING Message, + ... + ) + { + } +# define gcmDUMP __dummy_dump +#endif + +/******************************************************************************* +** +** gcmDUMP_DATA +** +** Add data to the dump. +** +** ARGUMENTS: +** +** gctSTRING Tag +** Tag for dump. +** +** gctPOINTER Logical +** Logical address of buffer. +** +** gctSIZE_T Bytes +** Number of bytes. +*/ + +#if gcdDUMP || gcdDUMP_COMMAND + gceSTATUS + gcfDumpData( + IN gcoOS Os, + IN gctSTRING Tag, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ); +# define gcmDUMP_DATA gcfDumpData +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_DATA(...) +#else + gcmINLINE static void + __dummy_dump_data( + IN gcoOS Os, + IN gctSTRING Tag, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) + { + } +# define gcmDUMP_DATA __dummy_dump_data +#endif + +/******************************************************************************* +** +** gcmDUMP_BUFFER +** +** Print a buffer to the dump. +** +** ARGUMENTS: +** +** gctSTRING Tag +** Tag for dump. +** +** gctUINT32 Physical +** Physical address of buffer. +** +** gctPOINTER Logical +** Logical address of buffer. +** +** gctUINT32 Offset +** Offset into buffer. +** +** gctSIZE_T Bytes +** Number of bytes. +*/ + +#if gcdDUMP || gcdDUMP_COMMAND || gcdDUMP_2DVG +gceSTATUS +gcfDumpBuffer( + IN gcoOS Os, + IN gctSTRING Tag, + IN gctUINT32 Physical, + IN gctPOINTER Logical, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes + ); +# define gcmDUMP_BUFFER gcfDumpBuffer +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_BUFFER(...) +#else + gcmINLINE static void + __dummy_dump_buffer( + IN gcoOS Os, + IN gctSTRING Tag, + IN gctUINT32 Physical, + IN gctPOINTER Logical, + IN gctUINT32 Offset, + IN gctSIZE_T Bytes + ) + { + } +# define gcmDUMP_BUFFER __dummy_dump_buffer +#endif + +#if gcdDUMP +void +gcfDumpLock( + void + ); +# define gcmDUMP_LOCK gcfDumpLock +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_LOCK(...) +#else + gcmINLINE static void + __dummy_dump_lock( + void + ) + { + } +# define gcmDUMP_LOCK __dummy_dump_lock +#endif + +#if gcdDUMP +void +gcfDumpUnlock( + void + ); +# define gcmDUMP_UNLOCK gcfDumpUnlock +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_UNLOCK(...) +#else + gcmINLINE static void + __dummy_dump_unlock( + void + ) + { + } +# define gcmDUMP_UNLOCK __dummy_dump_unlock +#endif + +/******************************************************************************* +** +** gcmDUMP_API +** +** Print a dump message for a high level API prefixed by the function name. +** +** ARGUMENTS: +** +** gctSTRING Message. +** +** ... Optional arguments. +*/ +gceSTATUS gcfDumpApi(IN gctCONST_STRING String, ...); +#if gcdDUMP_API +# define gcmDUMP_API gcfDumpApi +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_API(...) +#else + gcmINLINE static void + __dummy_dump_api( + IN gctCONST_STRING Message, + ... + ) + { + } +# define gcmDUMP_API __dummy_dump_api +#endif + +/******************************************************************************* +** +** gcmDUMP_API_ARRAY +** +** Print an array of data. +** +** ARGUMENTS: +** +** gctUINT32_PTR Pointer to array. +** gctUINT32 Size. +*/ +gceSTATUS gcfDumpArray(IN gctCONST_POINTER Data, IN gctUINT32 Size); +#if gcdDUMP_API +# define gcmDUMP_API_ARRAY gcfDumpArray +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_API_ARRAY(...) +#else + gcmINLINE static void + __dummy_dump_api_array( + IN gctCONST_POINTER Data, + IN gctUINT32 Size + ) + { + } +# define gcmDUMP_API_ARRAY __dummy_dump_api_array +#endif + +/******************************************************************************* +** +** gcmDUMP_API_ARRAY_TOKEN +** +** Print an array of data terminated by a token. +** +** ARGUMENTS: +** +** gctUINT32_PTR Pointer to array. +** gctUINT32 Termination. +*/ +gceSTATUS gcfDumpArrayToken(IN gctCONST_POINTER Data, IN gctUINT32 Termination); +#if gcdDUMP_API +# define gcmDUMP_API_ARRAY_TOKEN gcfDumpArrayToken +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_API_ARRAY_TOKEN(...) +#else + gcmINLINE static void + __dummy_dump_api_array_token( + IN gctCONST_POINTER Data, + IN gctUINT32 Termination + ) + { + } +# define gcmDUMP_API_ARRAY_TOKEN __dummy_dump_api_array_token +#endif + +/******************************************************************************* +** +** gcmDUMP_API_DATA +** +** Print an array of bytes. +** +** ARGUMENTS: +** +** gctCONST_POINTER Pointer to array. +** gctSIZE_T Size. +*/ +gceSTATUS gcfDumpApiData(IN gctCONST_POINTER Data, IN gctSIZE_T Size); +#if gcdDUMP_API +# define gcmDUMP_API_DATA gcfDumpApiData +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_API_DATA(...) +#else + gcmINLINE static void + __dummy_dump_api_data( + IN gctCONST_POINTER Data, + IN gctSIZE_T Size + ) + { + } +# define gcmDUMP_API_DATA __dummy_dump_api_data +#endif + +/******************************************************************************* +** gcmDUMP_2D_COMMAND +** +** Print the 2D command buffer. +** +** ARGUMENTS: +** +** gctUINT32_PTR Pointer to the command buffer. +** gctUINT32 Command buffer size. +*/ +gceSTATUS gcfDump2DCommand(IN gctUINT32_PTR Command, IN gctUINT32 Size); +#if gcdDUMP_2D +# define gcmDUMP_2D_COMMAND(cmd, size) \ + if (Hardware->newDump2DLevel > 1) \ + gcfDump2DCommand(cmd, size) +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_2D_COMMAND(...) +#else + gcmINLINE static void + __dummy_dump_2d_command( + IN gctUINT32_PTR Command, + IN gctUINT32 Size + ) + { + } +# define gcmDUMP_2D_COMMAND __dummy_dump_2d_command +#endif + +/******************************************************************************* +** gcmDUMP_2D_SURFACE +** +** Print the 2D surface memory. +** +** ARGUMENTS: +** +** gctBOOL Src. +** gctUINT32 Address. +*/ +gceSTATUS gcfDump2DSurface(IN gctBOOL Src, IN gctUINT32 Address); +#if gcdDUMP_2D +# define gcmDUMP_2D_SURFACE(src, addr) \ + if (Hardware->newDump2DLevel > 2) \ + gcfDump2DSurface(src, addr) +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_2D_SURFACE(...) +#else + gcmINLINE static void + __dummy_dump_2d_surface( + IN gctBOOL Src, + IN gctUINT32 Address + ) + { + } +# define gcmDUMP_2D_SURFACE __dummy_dump_2d_surface +#endif + +/******************************************************************************* +** gcmDUMP_ADD_MEMORY_INFO +** +** Record the memory info. +** +** ARGUMENTS: +** +** gctUINT32 Address. +** gctSIZE_T Size. +*/ +gceSTATUS gcfAddMemoryInfo(IN gctUINT32 GPUAddress, IN gctPOINTER Logical, IN gctUINT32 Physical, IN gctUINT32 Size); +#if gcdDUMP_2D +# define gcmDUMP_ADD_MEMORY_INFO gcfAddMemoryInfo +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_ADD_MEMORY_INFO(...) +#else + gcmINLINE static void + __dummy_dump_add_memory_info( + IN gctUINT32 GPUAddress, + IN gctPOINTER Logical, + IN gctUINT32 Physical, + IN gctUINT32 Size + ) + { + } +# define gcmDUMP_ADD_MEMORY_INFO __dummy_dump_add_memory_info +#endif + +/******************************************************************************* +** gcmDUMP_DEL_MEMORY_INFO +** +** Record the memory info. +** +** ARGUMENTS: +** +** gctUINT32 Address. +*/ +gceSTATUS gcfDelMemoryInfo(IN gctUINT32 Address); +#if gcdDUMP_2D +# define gcmDUMP_DEL_MEMORY_INFO gcfDelMemoryInfo +#elif gcdHAS_ELLIPSIS +# define gcmDUMP_DEL_MEMORY_INFO(...) +#else + gcmINLINE static void + __dummy_dump_del_memory_info( + IN gctUINT32 Address + ) + { + } +# define gcmDUMP_DEL_MEMORY_INFO __dummy_dump_del_memory_info +#endif + +/******************************************************************************* +** +** gcmTRACE_RELEASE +** +** Print a message to the shader debugger. +** +** ARGUMENTS: +** +** message Message. +** ... Optional arguments. +*/ + +#define gcmTRACE_RELEASE gcoOS_DebugShaderTrace + +void +gcoOS_DebugShaderTrace( + IN gctCONST_STRING Message, + ... + ); + +void +gcoOS_SetDebugShaderFiles( + IN gctCONST_STRING VSFileName, + IN gctCONST_STRING FSFileName + ); + +void +gcoOS_SetDebugShaderFileType( + IN gctUINT32 ShaderType + ); + +void +gcoOS_EnableDebugBuffer( + IN gctBOOL Enable + ); + +/******************************************************************************* +** +** gcmBREAK +** +** Break into the debugger. In retail mode this macro does nothing. +** +** ARGUMENTS: +** +** None. +*/ + +void +gcoOS_DebugBreak( + void + ); + +void +gckOS_DebugBreak( + void + ); + +#if gcmIS_DEBUG(gcdDEBUG_BREAK) +# define gcmBREAK gcoOS_DebugBreak +# define gcmkBREAK gckOS_DebugBreak +#else +# define gcmBREAK() +# define gcmkBREAK() +#endif + +/******************************************************************************* +** +** gcmASSERT +** +** Evaluate an expression and break into the debugger if the expression +** evaluates to false. In retail mode this macro does nothing. +** +** ARGUMENTS: +** +** exp Expression to evaluate. +*/ +#if gcmIS_DEBUG(gcdDEBUG_ASSERT) +# define _gcmASSERT(prefix, exp) \ + do \ + { \ + if (!(exp)) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ASSERT at %s(%d)", \ + __FUNCTION__, __LINE__); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + "(%s)", #exp); \ + prefix##BREAK(); \ + } \ + } \ + while (gcvFALSE) +# define gcmASSERT(exp) _gcmASSERT(gcm, exp) +# define gcmkASSERT(exp) _gcmASSERT(gcmk, exp) +#else +# define gcmASSERT(exp) +# define gcmkASSERT(exp) +#endif + +/******************************************************************************* +** +** gcmVERIFY +** +** Verify if an expression returns true. If the expression does not +** evaluates to true, an assertion will happen in debug mode. +** +** ARGUMENTS: +** +** exp Expression to evaluate. +*/ +#if gcmIS_DEBUG(gcdDEBUG_ASSERT) +# define gcmVERIFY(exp) gcmASSERT(exp) +# define gcmkVERIFY(exp) gcmkASSERT(exp) +#else +# define gcmVERIFY(exp) exp +# define gcmkVERIFY(exp) exp +#endif + +/******************************************************************************* +** +** gcmVERIFY_OK +** +** Verify a fucntion returns gcvSTATUS_OK. If the function does not return +** gcvSTATUS_OK, an assertion will happen in debug mode. +** +** ARGUMENTS: +** +** func Function to evaluate. +*/ + +void +gcoOS_Verify( + IN gceSTATUS status + ); + +void +gckOS_Verify( + IN gceSTATUS status + ); + +#if gcmIS_DEBUG(gcdDEBUG_ASSERT) +# define gcmVERIFY_OK(func) \ + do \ + { \ + gceSTATUS verifyStatus = func; \ + gcoOS_Verify(verifyStatus); \ + if (verifyStatus != gcvSTATUS_OK) \ + { \ + gcmTRACE(\ + gcvLEVEL_ERROR, \ + "gcmVERIFY_OK(%d): function returned %d", \ + __LINE__, verifyStatus \ + ); \ + } \ + gcmASSERT(verifyStatus == gcvSTATUS_OK); \ + } \ + while (gcvFALSE) +# define gcmkVERIFY_OK(func) \ + do \ + { \ + gceSTATUS verifyStatus = func; \ + if (verifyStatus != gcvSTATUS_OK) \ + { \ + gcmkTRACE(\ + gcvLEVEL_ERROR, \ + "gcmkVERIFY_OK(%d): function returned %d", \ + __LINE__, verifyStatus \ + ); \ + } \ + gckOS_Verify(verifyStatus); \ + gcmkASSERT(verifyStatus == gcvSTATUS_OK); \ + } \ + while (gcvFALSE) +#else +# define gcmVERIFY_OK(func) func +# define gcmkVERIFY_OK(func) func +#endif + +gctCONST_STRING +gcoOS_DebugStatus2Name( + gceSTATUS status + ); + +gctCONST_STRING +gckOS_DebugStatus2Name( + gceSTATUS status + ); + +#if gcmIS_DEBUG(gcdDEBUG) +# define gcmSTATUS2NAME gcoOS_DebugStatus2Name +# define gcmkSTATUS2NAME gckOS_DebugStatus2Name +#else +# define gcmSTATUS2NAME(status) status +# define gcmkSTATUS2NAME(status) status +#endif + +/******************************************************************************* +** +** gcmERR_BREAK +** +** Executes a break statement on error. +** +** ASSUMPTIONS: +** +** 'status' variable of gceSTATUS type must be defined. +** +** ARGUMENTS: +** +** func Function to evaluate. +*/ +#define _gcmERR_BREAK(prefix, func) \ + status = func; \ + if (gcmIS_ERROR(status)) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \ + status, gcmSTATUS2NAME(status), __FUNCTION__, __LINE__); \ + break; \ + } \ + do { } while (gcvFALSE) +#define _gcmkERR_BREAK(prefix, func) \ + status = func; \ + if (gcmIS_ERROR(status)) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \ + status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \ + break; \ + } \ + do { } while (gcvFALSE) +#define gcmERR_BREAK(func) _gcmERR_BREAK(gcm, func) +#define gcmkERR_BREAK(func) _gcmkERR_BREAK(gcmk, func) + +/******************************************************************************* +** +** gcmERR_RETURN +** +** Executes a return on error. +** +** ASSUMPTIONS: +** +** 'status' variable of gceSTATUS type must be defined. +** +** ARGUMENTS: +** +** func Function to evaluate. +*/ +#define _gcmERR_RETURN(prefix, func) \ + status = func; \ + if (gcmIS_ERROR(status)) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \ + status, gcmSTATUS2NAME(status), __FUNCTION__, __LINE__); \ + prefix##FOOTER(); \ + return status; \ + } \ + do { } while (gcvFALSE) +#define _gcmkERR_RETURN(prefix, func) \ + status = func; \ + if (gcmIS_ERROR(status)) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \ + status, gcmkSTATUS2NAME(status), __FUNCTION__, __LINE__); \ + prefix##FOOTER(); \ + return status; \ + } \ + do { } while (gcvFALSE) +#define gcmERR_RETURN(func) _gcmERR_RETURN(gcm, func) +#define gcmkERR_RETURN(func) _gcmkERR_RETURN(gcmk, func) + + +/******************************************************************************* +** +** gcmONERROR +** +** Jump to the error handler in case there is an error. +** +** ASSUMPTIONS: +** +** 'status' variable of gceSTATUS type must be defined. +** +** ARGUMENTS: +** +** func Function to evaluate. +*/ +#define _gcmONERROR(prefix, func) \ + do \ + { \ + status = func; \ + if (gcmIS_ERROR(status)) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ONERROR: status=%d(%s) @ %s(%d)", \ + status, gcmSTATUS2NAME(status), __FUNCTION__, __LINE__); \ + goto OnError; \ + } \ + } \ + while (gcvFALSE) +#define _gcmkONERROR(prefix, func) \ + do \ + { \ + status = func; \ + if (gcmIS_ERROR(status)) \ + { \ + prefix##PRINT_VERSION(); \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "ONERROR: status=%d(%s) @ %s(%d)", \ + status, gcmkSTATUS2NAME(status), __FUNCTION__, __LINE__); \ + goto OnError; \ + } \ + } \ + while (gcvFALSE) +#define gcmONERROR(func) _gcmONERROR(gcm, func) +#define gcmkONERROR(func) _gcmkONERROR(gcmk, func) + +#define gcmGET_INDEX_SIZE(type, size) \ + switch (type) \ + { \ + case gcvINDEX_8: \ + size = 1; \ + break; \ + case gcvINDEX_16: \ + size = 2; \ + break; \ + case gcvINDEX_32: \ + size = 4; \ + break; \ + default: \ + gcmONERROR(gcvSTATUS_INVALID_ARGUMENT); \ + } \ + +/******************************************************************************* +** +** gcmkSAFECASTSIZET +** +** Check wether value of a gctSIZE_T varible beyond the capability +** of 32bits GPU hardware. +** +** ASSUMPTIONS: +** +** +** +** ARGUMENTS: +** +** x A gctUINT32 variable +** y A gctSIZE_T variable +*/ +#define gcmkSAFECASTSIZET(x, y) \ + do \ + { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) \ + { \ + gcmkASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } \ + while (gcvFALSE) + +#define gcmSAFECASTSIZET(x, y) \ + do \ + { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) \ + { \ + gcmASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } \ + while (gcvFALSE) + +/******************************************************************************* +** +** gcmkSAFECASTPHYSADDRT +** +** Check whether value of a gctPHYS_ADDR_T variable beyond the capability +** of 32bits GPU hardware. +** +** ASSUMPTIONS: +** +** +** +** ARGUMENTS: +** +** x A gctUINT32 variable +** y A gctPHYS_ADDR_T variable +*/ +#define gcmkSAFECASTPHYSADDRT(x, y) \ + do \ + { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctPHYS_ADDR_T) > gcmSIZEOF(gctUINT32)) \ + { \ + gcmkASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } \ + while (gcvFALSE) + +/******************************************************************************* +** +** gcmSAFECASTPHYSADDRT +** +** Check whether value of a gctPHYS_ADDR_T variable beyond the capability +** of 32bits GPU hardware. +** +** ASSUMPTIONS: +** +** +** +** ARGUMENTS: +** +** x A gctUINT32 variable +** y A gctPHYS_ADDR_T variable +*/ +#define gcmSAFECASTPHYSADDRT(x, y) \ + do \ + { \ + gctUINT32 tmp = (gctUINT32)(y); \ + if (gcmSIZEOF(gctPHYS_ADDR_T) > gcmSIZEOF(gctUINT32)) \ + { \ + gcmASSERT(tmp <= gcvMAXUINT32); \ + } \ + (x) = tmp; \ + } \ + while (gcvFALSE) + +/******************************************************************************* +** +** gcmVERIFY_LOCK +** +** Verifies whether the surface is locked. +** +** ARGUMENTS: +** +** surfaceInfo Pointer to the surface iniformational structure. +*/ +#define gcmVERIFY_LOCK(surfaceInfo) \ + if (!surfaceInfo->node.valid) \ + { \ + gcmONERROR(gcvSTATUS_MEMORY_UNLOCKED); \ + } \ + +/******************************************************************************* +** +** gcmVERIFY_NODE_LOCK +** +** Verifies whether the surface node is locked. +** +** ARGUMENTS: +** +** surfaceInfo Pointer to the surface iniformational structure. +*/ +#define gcmVERIFY_NODE_LOCK(surfaceNode) \ + if (!(surfaceNode)->valid) \ + { \ + status = gcvSTATUS_MEMORY_UNLOCKED; \ + break; \ + } \ + do { } while (gcvFALSE) + +/******************************************************************************* +** +** gcmBADOBJECT_BREAK +** +** Executes a break statement on bad object. +** +** ARGUMENTS: +** +** obj Object to test. +** t Expected type of the object. +*/ +#define gcmBADOBJECT_BREAK(obj, t) \ + if ((obj == gcvNULL) \ + || (((gcsOBJECT *)(obj))->type != t) \ + ) \ + { \ + status = gcvSTATUS_INVALID_OBJECT; \ + break; \ + } \ + do { } while (gcvFALSE) + +/******************************************************************************* +** +** gcmCHECK_STATUS +** +** Executes a break statement on error. +** +** ASSUMPTIONS: +** +** 'status' variable of gceSTATUS type must be defined. +** +** ARGUMENTS: +** +** func Function to evaluate. +*/ +#define _gcmCHECK_STATUS(prefix, func) \ + do \ + { \ + last = func; \ + if (gcmIS_ERROR(last)) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \ + last, gcmSTATUS2NAME(last), __FUNCTION__, __LINE__); \ + status = last; \ + } \ + } \ + while (gcvFALSE) +#define _gcmkCHECK_STATUS(prefix, func) \ + do \ + { \ + last = func; \ + if (gcmIS_ERROR(last)) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \ + last, gcmkSTATUS2NAME(last), __FUNCTION__, __LINE__); \ + status = last; \ + } \ + } \ + while (gcvFALSE) +#define gcmCHECK_STATUS(func) _gcmCHECK_STATUS(gcm, func) +#define gcmkCHECK_STATUS(func) _gcmkCHECK_STATUS(gcmk, func) + +/******************************************************************************* +** +** gcmVERIFY_ARGUMENT +** +** Assert if an argument does not apply to the specified expression. If +** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be +** returned from the current function. In retail mode this macro does +** nothing. +** +** ARGUMENTS: +** +** arg Argument to evaluate. +*/ +# define _gcmVERIFY_ARGUMENT(prefix, arg) \ + do \ + { \ + if (!(arg)) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, #prefix "VERIFY_ARGUMENT failed:"); \ + prefix##ASSERT(arg); \ + prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); \ + return gcvSTATUS_INVALID_ARGUMENT; \ + } \ + } \ + while (gcvFALSE) +# define gcmVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg) +# define gcmkVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcmk, arg) + +/******************************************************************************* +** +** gcmDEBUG_VERIFY_ARGUMENT +** +** Works just like gcmVERIFY_ARGUMENT, but is only valid in debug mode. +** Use this to verify arguments inside non-public API functions. +*/ +#if gcdDEBUG +# define gcmDEBUG_VERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg) +# define gcmkDEBUG_VERIFY_ARGUMENT(arg) _gcmkVERIFY_ARGUMENT(gcm, arg) +#else +# define gcmDEBUG_VERIFY_ARGUMENT(arg) +# define gcmkDEBUG_VERIFY_ARGUMENT(arg) +#endif + +/******************************************************************************* +** +** gcmVERIFY_ARGUMENT_RETURN +** +** Assert if an argument does not apply to the specified expression. If +** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be +** returned from the current function. In retail mode this macro does +** nothing. +** +** ARGUMENTS: +** +** arg Argument to evaluate. +*/ +# define _gcmVERIFY_ARGUMENT_RETURN(prefix, arg, value) \ + do \ + { \ + if (!(arg)) \ + { \ + prefix##TRACE(gcvLEVEL_ERROR, \ + #prefix "gcmVERIFY_ARGUMENT_RETURN failed:"); \ + prefix##ASSERT(arg); \ + prefix##FOOTER_ARG("value=%d", value); \ + return value; \ + } \ + } \ + while (gcvFALSE) +# define gcmVERIFY_ARGUMENT_RETURN(arg, value) \ + _gcmVERIFY_ARGUMENT_RETURN(gcm, arg, value) +# define gcmkVERIFY_ARGUMENT_RETURN(arg, value) \ + _gcmVERIFY_ARGUMENT_RETURN(gcmk, arg, value) + +#define MAX_LOOP_COUNT 0x7FFFFFFF + +/******************************************************************************\ +****************************** User Debug Option ****************************** +\******************************************************************************/ + +/* User option. */ +typedef enum _gceDEBUG_MSG +{ + gcvDEBUG_MSG_NONE, + gcvDEBUG_MSG_ERROR, + gcvDEBUG_MSG_WARNING +} +gceDEBUG_MSG; + +typedef struct _gcsUSER_DEBUG_OPTION +{ + gceDEBUG_MSG debugMsg; +} +gcsUSER_DEBUG_OPTION; + +gcsUSER_DEBUG_OPTION * +gcoHAL_GetUserDebugOption( + void + ); + +#if gcdHAS_ELLIPSIS +#define gcmUSER_DEBUG_MSG(level, ...) \ + do \ + { \ + if (level <= gcoHAL_GetUserDebugOption()->debugMsg) \ + { \ + gcoOS_Print(__VA_ARGS__); \ + } \ + } while (gcvFALSE) + +#define gcmUSER_DEBUG_ERROR_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_ERROR, "Error: " __VA_ARGS__) +#define gcmUSER_DEBUG_WARNING_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_WARNING, "Warring: " __VA_ARGS__) +#else +#define gcmUSER_DEBUG_MSG +#define gcmUSER_DEBUG_ERROR_MSG +#define gcmUSER_DEBUG_WARNING_MSG +#endif + +/******************************************************************************* +** +** A set of macros to aid state loading. +** +** ARGUMENTS: +** +** CommandBuffer Pointer to a gcoCMDBUF object. +** StateDelta Pointer to a gcsSTATE_DELTA state delta structure. +** Memory Destination memory pointer of gctUINT32_PTR type. +** PartOfContext Whether or not the state is a part of the context. +** FixedPoint Whether or not the state is of the fixed point format. +** Count Number of consecutive states to be loaded. +** Address State address. +** Data Data to be set to the state. +*/ + +/*----------------------------------------------------------------------------*/ + +#if gcmIS_DEBUG(gcdDEBUG_CODE) + +# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) \ + CommandBuffer->lastLoadStatePtr = gcmPTR_TO_UINT64(Memory); \ + CommandBuffer->lastLoadStateAddress = Address; \ + CommandBuffer->lastLoadStateCount = Count + +# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) \ + gcmASSERT(\ + (gctUINT) (Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastLoadStatePtr, gctUINT32_PTR) - 1) \ + == \ + (gctUINT) (Address - CommandBuffer->lastLoadStateAddress) \ + ); \ + \ + gcmASSERT(CommandBuffer->lastLoadStateCount > 0); \ + \ + CommandBuffer->lastLoadStateCount -= 1 + +# define gcmVERIFYLOADSTATEDONE(CommandBuffer) \ + gcmASSERT(CommandBuffer->lastLoadStateCount == 0); + +# define gcmDEFINELOADSTATEBASE() \ + gctUINT32_PTR LoadStateBase; + +# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide) \ + if (OutSide) \ + {\ + LoadStateBase = (gctUINT32_PTR)*OutSide; \ + }\ + else\ + {\ + LoadStateBase = (gctUINT_PTR)CommandBuffer->buffer;\ + } + + +# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory) \ + gcmASSERT(((Memory - LoadStateBase) & 1) == 0); + +# define gcmUNSETLOADSTATEBASE() \ + LoadStateBase = LoadStateBase; + +#else + +# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) +# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) +# define gcmVERIFYLOADSTATEDONE(CommandBuffer) + +# define gcmDEFINELOADSTATEBASE() +# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide) +# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory) +# define gcmUNSETLOADSTATEBASE() + +#endif + +#if gcdSECURE_USER + +# define gcmDEFINESECUREUSER() \ + gctUINT __secure_user_offset__; \ + gctUINT32_PTR __secure_user_hintArray__; + +# define gcmBEGINSECUREUSER() \ + __secure_user_offset__ = reserve->lastOffset; \ + \ + __secure_user_hintArray__ = gcmUINT64_TO_PTR(reserve->hintArrayTail) + +# define gcmENDSECUREUSER() \ + reserve->hintArrayTail = gcmPTR_TO_UINT64(__secure_user_hintArray__) + +# define gcmSKIPSECUREUSER() \ + __secure_user_offset__ += gcmSIZEOF(gctUINT32) + +# define gcmUPDATESECUREUSER() \ + *__secure_user_hintArray__ = __secure_user_offset__; \ + \ + __secure_user_offset__ += gcmSIZEOF(gctUINT32); \ + __secure_user_hintArray__ += 1 + +#else + +# define gcmDEFINESECUREUSER() +# define gcmBEGINSECUREUSER() +# define gcmENDSECUREUSER() +# define gcmSKIPSECUREUSER() +# define gcmUPDATESECUREUSER() + +#endif + +/*----------------------------------------------------------------------------*/ + +#if gcdDUMP +# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) \ + if (FixedPoint) \ + { \ + gcmDUMP(gcvNULL, "#[state.x 0x%04X 0x%08X]", \ + Address, Data \ + ); \ + } \ + else \ + { \ + gcmDUMP(gcvNULL, "#[state 0x%04X 0x%08X]", \ + Address, Data \ + ); \ + } +#else +# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) +#endif + +#define gcmDEFINESTATEBUFFER(CommandBuffer, StateDelta, Memory, ReserveSize) \ + gcmDEFINESECUREUSER() \ + gctSIZE_T ReserveSize; \ + gcoCMDBUF CommandBuffer; \ + gctUINT32_PTR Memory; \ + gcsSTATE_DELTA_PTR StateDelta; \ + gceENGINE CurrentEngine = gcvENGINE_RENDER + +#define gcmBEGINSTATEBUFFER(Hardware, CommandBuffer, StateDelta, Memory, ReserveSize) \ +{ \ + gcmONERROR(gcoBUFFER_Reserve(\ + Hardware->engine[CurrentEngine].buffer, ReserveSize, gcvTRUE, gcvCOMMAND_3D, &CommandBuffer \ + )); \ + \ + Memory = (gctUINT32_PTR) gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \ + \ + StateDelta = Hardware->delta; \ + \ + gcmBEGINSECUREUSER(); \ +} + +#define gcmENDSTATEBUFFER(Hardware, CommandBuffer, Memory, ReserveSize) \ +{ \ + gcmENDSECUREUSER(); \ + \ + gcmASSERT(\ + gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT8_PTR) + ReserveSize \ + == \ + (gctUINT8_PTR) Memory \ + ); \ +} + +/*----------------------------------------------------------------------------*/ + +#define gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, Count) \ +{ \ + gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \ + gcmASSERT((gctUINT32)Count <= 1024); \ + \ + gcmVERIFYLOADSTATEDONE(CommandBuffer); \ + \ + gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count); \ + \ + *Memory++ \ + = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \ + \ + gcmSKIPSECUREUSER(); \ +} + +#define gcmENDSTATEBATCH(CommandBuffer, Memory) \ +{ \ + gcmVERIFYLOADSTATEDONE(CommandBuffer); \ + \ + gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \ +} + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(\ + StateDelta, Address, 0, __temp_data32__ \ + ); \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + +#define gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(\ + StateDelta, Address, Mask, __temp_data32__ \ + ); \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + + +#define gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \ + \ + gcmSKIPSECUREUSER(); \ +} + +#define gcmSETFILLER(CommandBuffer, Memory) \ +{ \ + gcmVERIFYLOADSTATEDONE(CommandBuffer); \ + \ + Memory += 1; \ + \ + gcmSKIPSECUREUSER(); \ +} + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data); \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ +} + +#define gcmSETSINGLESTATEWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data); \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ +} + + +#define gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data); \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ +} + + + +#define gcmSETSEMASTALLPIPE(StateDelta, CommandBuffer, Memory, Data) \ +{ \ + gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, gcvFALSE, 0x0E02, Data); \ + \ + *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \ + \ + *Memory++ = Data; \ + \ + gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \ + \ + gcmSKIPSECUREUSER(); \ +} + +/******************************************************************************* +** +** gcmSETSTARTDECOMMAND +** +** Form a START_DE command. +** +** ARGUMENTS: +** +** Memory Destination memory pointer of gctUINT32_PTR type. +** Count Number of the rectangles. +*/ + +#define gcmSETSTARTDECOMMAND(Memory, Count) \ +{ \ + *Memory++ \ + = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \ + | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \ + | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \ + \ + *Memory++ = 0xDEADDEED; \ +} + +/***************************************** +** Temp command buffer macro +*/ +#define gcmDEFINESTATEBUFFER_NEW(CommandBuffer, StateDelta, Memory) \ + gcmDEFINESECUREUSER() \ + gcmDEFINELOADSTATEBASE() \ + gcsTEMPCMDBUF CommandBuffer = gcvNULL; \ + gctUINT32_PTR Memory; \ + gcsSTATE_DELTA_PTR StateDelta; \ + gceENGINE CurrentEngine = gcvENGINE_RENDER + + +#define gcmBEGINSTATEBUFFER_NEW(Hardware, CommandBuffer, StateDelta, Memory, OutSide) \ +{ \ + if (OutSide) \ + {\ + Memory = (gctUINT32_PTR)*OutSide; \ + }\ + else \ + {\ + gcmONERROR(gcoBUFFER_StartTEMPCMDBUF(\ + Hardware->engine[CurrentEngine].buffer, &CommandBuffer \ + ));\ + \ + Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \ + \ + }\ + StateDelta = Hardware->delta; \ + \ + gcmBEGINSECUREUSER(); \ + gcmSETLOADSTATEBASE(CommandBuffer,OutSide);\ +} + +#define gcmENDSTATEBUFFER_NEW(Hardware, CommandBuffer, Memory, OutSide) \ +{ \ + gcmENDSECUREUSER(); \ + \ + if (OutSide) \ + {\ + *OutSide = Memory; \ + }\ + else \ + {\ + CommandBuffer->currentByteSize = (gctUINT32)((gctUINT8_PTR)Memory - \ + (gctUINT8_PTR)CommandBuffer->buffer); \ + \ + gcmONERROR(gcoBUFFER_EndTEMPCMDBUF(Hardware->engine[CurrentEngine].buffer, gcvFALSE));\ + }\ + gcmUNSETLOADSTATEBASE()\ +} + +#define gcmDEFINECTRLSTATEBUFFER(CommandBuffer, Memory) \ + gcmDEFINESECUREUSER() \ + gcmDEFINELOADSTATEBASE() \ + gcsTEMPCMDBUF CommandBuffer = gcvNULL; \ + gctUINT32_PTR Memory; \ + gceENGINE CurrentEngine = gcvENGINE_RENDER + +#define gcmBEGINCTRLSTATEBUFFER(Hardware, CommandBuffer, Memory, OutSide) \ +{ \ + if (OutSide) \ + { \ + Memory = (gctUINT32_PTR)*OutSide; \ + } \ + else \ + { \ + gcmONERROR(gcoBUFFER_StartTEMPCMDBUF(\ + Hardware->engine[CurrentEngine].buffer, &CommandBuffer \ + )); \ + \ + Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \ + } \ + gcmBEGINSECUREUSER(); \ + gcmSETLOADSTATEBASE(CommandBuffer,OutSide); \ +} + +/*----------------------------------------------------------------------------*/ + +#define gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, Count) \ +{ \ + gcmVERIFYLOADSTATEALIGNED(CommandBuffer,Memory);\ + gcmASSERT((gctUINT32)Count <= 1024); \ + \ + *Memory++ \ + = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \ + | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \ + \ + gcmSKIPSECUREUSER(); \ +} + +#define gcmENDSTATEBATCH_NEW(CommandBuffer, Memory) \ + gcmVERIFYLOADSTATEALIGNED(CommandBuffer,Memory); + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(\ + StateDelta, Address, 0, __temp_data32__ \ + ); \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + +#define gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcoHARDWARE_UpdateDelta(\ + StateDelta, Address, Mask, __temp_data32__ \ + ); \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + + +#define gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, Address, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \ + \ + gcmSKIPSECUREUSER(); \ +} + +#define gcmSETFILLER_NEW(CommandBuffer, Memory) \ +{ \ + Memory += 1; \ + \ + gcmSKIPSECUREUSER(); \ +} + +/*----------------------------------------------------------------------------*/ + +#define gcmSETSINGLESTATE_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ +} + +#define gcmSETSINGLESTATEWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ +} + + +#define gcmSETSINGLECTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, Address, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ +} + + + +#define gcmSETSEMASTALLPIPE_NEW(StateDelta, CommandBuffer, Memory, Data) \ +{ \ + gcmSETSINGLECTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, gcvFALSE, 0x0E02, Data); \ + \ + *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \ + \ + *Memory++ = Data; \ + \ + gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \ + gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \ + \ + gcmSKIPSECUREUSER(); \ +} + +#define gcmSETSTARTDECOMMAND_NEW(CommandBuffer, Memory, Count) \ +{ \ + *Memory++ \ + = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \ + | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \ + | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \ + \ + *Memory++ = 0xDEADDEED; \ + \ +} + +#define gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + +#define gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + +#define gcmSETSINGLESTATE_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ +} + +#define gcmSETSINGLESTATEWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data); \ + gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \ +} + +#define gcmSETSTATEDATA_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + gcmSAFECASTSIZET(__temp_data32__, Data); \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + +#define gcmSETSTATEDATAWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gctUINT32 __temp_data32__; \ + \ + gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \ + \ + __temp_data32__ = Data; \ + \ + *Memory++ = __temp_data32__; \ + \ + gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \ + \ + gcmUPDATESECUREUSER(); \ +} + +#define gcmSETSINGLESTATE_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data) \ +{ \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATA_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Data); \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ +} + +#define gcmSETSINGLESTATEWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data) \ +{ \ + gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \ + gcmSETSTATEDATAWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \ + Address, Mask, Data); \ + gcmENDSTATEBATCH(CommandBuffer, Memory); \ +} + +#define gcmDEFINESTATEBUFFER_NEW_FAST(CommandBuffer, Memory) \ + gcmDEFINESECUREUSER() \ + gcmDEFINELOADSTATEBASE() \ + gcsTEMPCMDBUF CommandBuffer = gcvNULL; \ + gctUINT32_PTR Memory; + +#define gcmDEFINESTATEBUFFER_FAST(CommandBuffer, Memory, ReserveSize) \ + gcmDEFINESECUREUSER() \ + gctSIZE_T ReserveSize; \ + gcoCMDBUF CommandBuffer; \ + gctUINT32_PTR Memory; + +#define gcmBEGINSTATEBUFFER_FAST(Hardware, CommandBuffer, Memory, ReserveSize) \ +{ \ + gcmONERROR(gcoBUFFER_Reserve(\ + Hardware->engine[gcvENGINE_RENDER].buffer, ReserveSize, gcvTRUE, &CommandBuffer \ + )); \ + \ + Memory = (gctUINT32_PTR) gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \ + \ + gcmBEGINSECUREUSER(); \ +} + +#define gcmBEGINSTATEBUFFER_NEW_FAST(Hardware, CommandBuffer, Memory, OutSide) \ +{ \ + if (OutSide) \ + {\ + Memory = (gctUINT32_PTR)*OutSide; \ + }\ + else \ + {\ + gcmONERROR(gcoBUFFER_StartTEMPCMDBUF(\ + Hardware->engine[gcvENGINE_RENDER].buffer, &CommandBuffer \ + ));\ + \ + Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \ + \ + }\ + \ + gcmBEGINSECUREUSER(); \ + gcmSETLOADSTATEBASE(CommandBuffer,OutSide);\ +} + +#define gcmENDSTATEBUFFER_NEW_FAST(Hardware, CommandBuffer, Memory, OutSide) \ +{ \ + gcmENDSECUREUSER(); \ + \ + if (OutSide) \ + {\ + *OutSide = Memory; \ + }\ + else \ + {\ + CommandBuffer->currentByteSize = (gctUINT32)((gctUINT8_PTR)Memory - \ + (gctUINT8_PTR)CommandBuffer->buffer); \ + \ + gcmONERROR(gcoBUFFER_EndTEMPCMDBUF(Hardware->engine[gcvENGINE_RENDER].buffer, gcvFALSE));\ + }\ + gcmUNSETLOADSTATEBASE()\ +} + +/******************************************************************************* +** +** gcmCONFIGUREUNIFORMS +** +** Configure uniforms according to chip and numConstants. +*/ +#if !gcdENABLE_UNIFIED_CONSTANT +#define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, NumConstants, \ + UnifiedConst, VsConstBase, PsConstBase, VsConstMax, PsConstMax, ConstMax) \ +{ \ + if (ChipModel == gcv2000 && (ChipRevision == 0x5118 || ChipRevision == 0x5140)) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } \ + else if (NumConstants == 320) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } \ + /* All GC1000 series chips can only support 64 uniforms for ps on non-unified const mode. */ \ + else if (NumConstants > 256 && ChipModel == gcv1000) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } \ + else if (NumConstants > 256) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + ConstMax = 512; \ + } \ + else if (NumConstants == 256) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + ConstMax = 512; \ + } \ + else \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 168; \ + PsConstMax = 64; \ + ConstMax = 232; \ + } \ +} +#else +#define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, Halti5Avail, NumConstants, \ + UnifiedConst, VsConstBase, PsConstBase, VsConstMax, PsConstMax, ConstMax) \ +{ \ + if (NumConstants > 256) \ + { \ + UnifiedConst = gcvTRUE; \ + if (Halti5Avail) \ + { \ + VsConstBase = 0xD000; \ + PsConstBase = 0xD800; \ + } \ + else \ + {\ + VsConstBase = 0xC000; \ + PsConstBase = 0xC000; \ + }\ + if ((ChipModel == gcv880) && ((ChipRevision & 0xfff0) == 0x5120)) \ + { \ + VsConstMax = 512; \ + PsConstMax = 64; \ + ConstMax = 576; \ + } \ + else \ + { \ + VsConstMax = gcmMIN(512, NumConstants - 64); \ + PsConstMax = gcmMIN(512, NumConstants - 64); \ + ConstMax = NumConstants; \ + } \ + } \ + else if (NumConstants == 256) \ + { \ + if (ChipModel == gcv2000 && (ChipRevision == 0x5118 || ChipRevision == 0x5140)) \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 64; \ + ConstMax = 320; \ + } \ + else \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 256; \ + PsConstMax = 256; \ + ConstMax = 512; \ + } \ + } \ + else \ + { \ + UnifiedConst = gcvFALSE; \ + VsConstBase = 0x1400; \ + PsConstBase = 0x1C00; \ + VsConstMax = 168; \ + PsConstMax = 64; \ + ConstMax = 232; \ + } \ +} +#endif + +#define gcmAnyTileStatusEnableForFullMultiSlice(SurfView, anyTsEnableForMultiSlice)\ +{\ + gctUINT i = 0; \ + for (; i < (SurfView->surf->requestD); i++)\ + {\ + if ((SurfView->surf->tileStatusNode.pool != gcvPOOL_UNKNOWN) && \ + (SurfView->surf->tileStatusDisabled[i] == gcvFALSE))\ + {\ + *anyTsEnableForMultiSlice = gcvTRUE;\ + break;\ + }\ + }\ +}\ + +#define gcmAnyTileStatusEnableForMultiSlice(SurfView, anyTsEnableForMultiSlice)\ +{\ + gctUINT i = SurfView->firstSlice; \ + for (; i < (SurfView->firstSlice + SurfView->numSlices); i++)\ + {\ + if ((SurfView->surf->tileStatusNode.pool != gcvPOOL_UNKNOWN) && \ + (SurfView->surf->tileStatusDisabled[i] == gcvFALSE))\ + {\ + *anyTsEnableForMultiSlice = gcvTRUE;\ + break;\ + }\ + }\ +}\ + +#define gcmCanTileStatusEnabledForMultiSlice(SurfView, canTsEnabled)\ +{\ + if (SurfView->numSlices > 1)\ + {\ + if (SurfView->surf->tileStatusNode.pool != gcvPOOL_UNKNOWN) \ + {\ + gctUINT i = 0;\ + for (; i < SurfView->numSlices; i++)\ + {\ + if (SurfView->surf->tileStatusDisabled[i] == gcvTRUE)\ + {\ + *canTsEnabled = gcvFALSE;\ + break;\ + }\ + if (SurfView->surf->fcValue[i] != SurfView->surf->fcValue[0])\ + {\ + *canTsEnabled = gcvFALSE;\ + break;\ + }\ + \ + if (SurfView->surf->fcValueUpper[i] != SurfView->surf->fcValueUpper[0])\ + {\ + *canTsEnabled = gcvFALSE;\ + break;\ + }\ + }\ + }\ + else\ + {\ + *canTsEnabled = gcvFALSE;\ + }\ + }\ + else\ + {\ + if ((SurfView->surf->tileStatusNode.pool == gcvPOOL_UNKNOWN) || (SurfView->surf->tileStatusDisabled[SurfView->firstSlice] == gcvTRUE))\ + {\ + *canTsEnabled = gcvFALSE;\ + }\ + }\ +}\ + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_base_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver.h new file mode 100644 index 000000000000..573efc673f3d --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver.h @@ -0,0 +1,1321 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_driver_h_ +#define __gc_hal_driver_h_ + +#include "gc_hal_enum.h" +#include "gc_hal_types.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +******************************* I/O Control Codes ****************************** +\******************************************************************************/ + +#define gcvHAL_CLASS "galcore" +#define IOCTL_GCHAL_INTERFACE 30000 +#define IOCTL_GCHAL_KERNEL_INTERFACE 30001 +#define IOCTL_GCHAL_TERMINATE 30002 + +/******************************************************************************\ +********************************* Command Codes ******************************** +\******************************************************************************/ + +typedef enum _gceHAL_COMMAND_CODES +{ + /* Generic query. */ + gcvHAL_QUERY_VIDEO_MEMORY, + gcvHAL_QUERY_CHIP_IDENTITY, + gcvHAL_QUERY_CHIP_FREQUENCY, + + /* Contiguous memory. */ + gcvHAL_ALLOCATE_NON_PAGED_MEMORY, + gcvHAL_FREE_NON_PAGED_MEMORY, + gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY, + gcvHAL_FREE_CONTIGUOUS_MEMORY, + + /* Video memory allocation. */ + gcvHAL_ALLOCATE_VIDEO_MEMORY, /* Enforced alignment. */ + gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY, /* No alignment. */ + gcvHAL_RELEASE_VIDEO_MEMORY, + + /* Physical-to-logical mapping. */ + gcvHAL_MAP_MEMORY, + gcvHAL_UNMAP_MEMORY, + + /* Logical-to-physical mapping. */ + gcvHAL_MAP_USER_MEMORY, + gcvHAL_UNMAP_USER_MEMORY, + + /* Surface lock/unlock. */ + gcvHAL_LOCK_VIDEO_MEMORY, + gcvHAL_UNLOCK_VIDEO_MEMORY, + + /* Event queue. */ + gcvHAL_EVENT_COMMIT, + + gcvHAL_USER_SIGNAL, + gcvHAL_SIGNAL, + gcvHAL_WRITE_DATA, + + gcvHAL_COMMIT, + gcvHAL_STALL, + + gcvHAL_READ_REGISTER, + gcvHAL_WRITE_REGISTER, + + gcvHAL_GET_PROFILE_SETTING, + gcvHAL_SET_PROFILE_SETTING, + + gcvHAL_PROFILE_REGISTERS_2D, + gcvHAL_READ_ALL_PROFILE_REGISTERS_PART1, + gcvHAL_READ_ALL_PROFILE_REGISTERS_PART2, + gcvHAL_READ_PROFILER_REGISTER_SETTING, + + /* Power management. */ + gcvHAL_SET_POWER_MANAGEMENT_STATE, + gcvHAL_QUERY_POWER_MANAGEMENT_STATE, + + gcvHAL_GET_BASE_ADDRESS, + + gcvHAL_SET_IDLE, /* reserved */ + + /* Queries. */ + gcvHAL_QUERY_KERNEL_SETTINGS, + + /* Reset. */ + gcvHAL_RESET, + + /* Map physical address into handle. */ + gcvHAL_MAP_PHYSICAL, + + /* Debugger stuff. */ + gcvHAL_DEBUG, + + /* Cache stuff. */ + gcvHAL_CACHE, + + /* TimeStamp */ + gcvHAL_TIMESTAMP, + + /* Database. */ + gcvHAL_DATABASE, + + /* Version. */ + gcvHAL_VERSION, + + /* Chip info */ + gcvHAL_CHIP_INFO, + + /* Process attaching/detaching. */ + gcvHAL_ATTACH, + gcvHAL_DETACH, + + /* Set timeOut value */ + gcvHAL_SET_TIMEOUT, + + /* Frame database. */ + gcvHAL_GET_FRAME_INFO, + + /* GPU profile dump */ + gcvHAL_DUMP_GPU_PROFILE, + + gcvHAL_QUERY_COMMAND_BUFFER, + + gcvHAL_COMMIT_DONE, + + /* GPU and event dump */ + gcvHAL_DUMP_GPU_STATE, + gcvHAL_DUMP_EVENT, + + /* Virtual command buffer. */ + gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER, + gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER, + + /* FSCALE_VAL. */ + gcvHAL_SET_FSCALE_VALUE, + gcvHAL_GET_FSCALE_VALUE, + + /* Export video memory as dma_buf fd */ + gcvHAL_EXPORT_VIDEO_MEMORY, + gcvHAL_NAME_VIDEO_MEMORY, + gcvHAL_IMPORT_VIDEO_MEMORY, + + /* Reset time stamp. */ + gcvHAL_QUERY_RESET_TIME_STAMP, + + /* Multi-GPU read/write. */ + gcvHAL_READ_REGISTER_EX, + gcvHAL_WRITE_REGISTER_EX, + + /* Create native fence and return its fd. */ + gcvHAL_CREATE_NATIVE_FENCE, + + /* Let GPU wait on native fence. */ + gcvHAL_WAIT_NATIVE_FENCE, + + /* Destory MMU. */ + gcvHAL_DESTROY_MMU, + + /* Shared buffer. */ + gcvHAL_SHBUF, + + /* + * Fd representation of android graphic buffer contents. + * Currently, it is only to reference video nodes, signal, etc to avoid being + * destroyed when trasfering across processes. + */ + gcvHAL_GET_GRAPHIC_BUFFER_FD, + + + gcvHAL_SET_VIDEO_MEMORY_METADATA, + + /* Connect a video node to an OS native fd. */ + gcvHAL_GET_VIDEO_MEMORY_FD, + + /* Config power management. */ + gcvHAL_CONFIG_POWER_MANAGEMENT, + + /* Wrap a user memory into a video memory node. */ + gcvHAL_WRAP_USER_MEMORY, + + /* Wait until GPU finishes access to a resource. */ + gcvHAL_WAIT_FENCE, + +#if gcdDEC_ENABLE_AHB + gcvHAL_DEC300_READ, + gcvHAL_DEC300_WRITE, + gcvHAL_DEC300_FLUSH, + gcvHAL_DEC300_FLUSH_WAIT, +#endif + + gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY, + gcvHAL_QUERY_CHIP_OPTION + +} +gceHAL_COMMAND_CODES; + +/******************************************************************************\ +****************************** Interface Structure ***************************** +\******************************************************************************/ + +#define gcdMAX_PROFILE_FILE_NAME 128 + +/* Kernel settings. */ +typedef struct _gcsKERNEL_SETTINGS +{ + /* Used RealTime signal between kernel and user. */ + gctINT signal; +} +gcsKERNEL_SETTINGS; + +typedef struct _gcsUSER_MEMORY_DESC +{ + /* Import flag. */ + gctUINT32 flag; + + /* gcvALLOC_FLAG_DMABUF */ + gctUINT32 handle; + gctUINT64 dmabuf; + + /* gcvALLOC_FLAG_USERMEMORY */ + gctUINT64 logical; + gctUINT32 physical; + gctUINT32 size; + + /* gcvALLOC_FLAG_EXTERNAL_MEMORY */ + gcsEXTERNAL_MEMORY_INFO externalMemoryInfo; +} +gcsUSER_MEMORY_DESC; + + +#define gcdMAX_FLAT_MAPPING_COUNT 16 + +typedef struct _gcsFLAT_MAPPING_RANGE +{ + gctUINT64 start; + gctUINT64 end; +} +gcsFLAT_MAPPING_RANGE; + +/* gcvHAL_QUERY_CHIP_IDENTITY */ +typedef struct _gcsHAL_QUERY_CHIP_IDENTITY * gcsHAL_QUERY_CHIP_IDENTITY_PTR; +typedef struct _gcsHAL_QUERY_CHIP_IDENTITY +{ + + /* Chip model. */ + gceCHIPMODEL chipModel; + + /* Revision value.*/ + gctUINT32 chipRevision; + + /* Chip date. */ + gctUINT32 chipDate; + + + /* Number of streams supported. */ + gctUINT32 streamCount; + + /* Number of pixel pipes. */ + gctUINT32 pixelPipes; + + /* Number of resolve pipes. */ + gctUINT32 resolvePipes; + + /* Number of instructions. */ + gctUINT32 instructionCount; + + /* Number of constants. */ + gctUINT32 numConstants; + + /* Number of varyings */ + gctUINT32 varyingsCount; + + /* Number of 3D GPUs */ + gctUINT32 gpuCoreCount; + + /* Product ID */ + gctUINT32 productID; + + /* Special chip flag bits */ + gceCHIP_FLAG chipFlags; + + /* ECO ID. */ + gctUINT32 ecoID; + + /* Customer ID. */ + gctUINT32 customerID; +} +gcsHAL_QUERY_CHIP_IDENTITY; + +typedef struct _gcsHAL_QUERY_CHIP_OPTIONS * gcsHAL_QUERY_CHIP_OPTIONS_PTR; +typedef struct _gcsHAL_QUERY_CHIP_OPTIONS +{ + gctBOOL gpuProfiler; + gctBOOL allowFastClear; + gctBOOL powerManagement; + /* Whether use new MMU. It is meaningless + ** for old MMU since old MMU is always enabled. + */ + gctBOOL enableMMU; + gceCOMPRESSION_OPTION allowCompression; + gctUINT uscL1CacheRatio; + gceSECURE_MODE secureMode; + +} +gcsHAL_QUERY_CHIP_OPTIONS; + +typedef struct _gcsHAL_INTERFACE +{ + /* Command code. */ + gceHAL_COMMAND_CODES command; + + /* Hardware type. */ + gceHARDWARE_TYPE hardwareType; + + /* Core index for current hardware type. */ + gctUINT32 coreIndex; + + /* Status value. */ + gceSTATUS status; + + /* Handle to this interface channel. */ + gctUINT64 handle; + + /* Pid of the client. */ + gctUINT32 pid; + + /* Engine */ + gceENGINE engine; + + /* Ignore information from TSL when doing IO control */ + gctBOOL ignoreTLS; + + /* Union of command structures. */ + union _u + { + /* gcvHAL_GET_BASE_ADDRESS */ + struct _gcsHAL_GET_BASE_ADDRESS + { + /* Physical memory address of internal memory. */ + OUT gctUINT32 baseAddress; + + OUT gctUINT32 flatMappingRangeCount; + + OUT gcsFLAT_MAPPING_RANGE flatMappingRanges[gcdMAX_FLAT_MAPPING_COUNT]; + } + GetBaseAddress; + + /* gcvHAL_QUERY_VIDEO_MEMORY */ + struct _gcsHAL_QUERY_VIDEO_MEMORY + { + /* Physical memory address of internal memory. Just a name. */ + OUT gctUINT32 internalPhysical; + + /* Size in bytes of internal memory. */ + OUT gctUINT64 internalSize; + + /* Physical memory address of external memory. Just a name. */ + OUT gctUINT32 externalPhysical; + + /* Size in bytes of external memory.*/ + OUT gctUINT64 externalSize; + + /* Physical memory address of contiguous memory. Just a name. */ + OUT gctUINT32 contiguousPhysical; + + /* Size in bytes of contiguous memory.*/ + OUT gctUINT64 contiguousSize; + } + QueryVideoMemory; + + /* gcvHAL_QUERY_CHIP_IDENTITY */ + gcsHAL_QUERY_CHIP_IDENTITY QueryChipIdentity; + + struct _gcsHAL_QUERY_CHIP_FREQUENCY + { + OUT gctUINT32 mcClk; + OUT gctUINT32 shClk; + } + QueryChipFrequency; + + /* gcvHAL_MAP_MEMORY */ + struct _gcsHAL_MAP_MEMORY + { + /* Physical memory address to map. Just a name on Linux/Qnx. */ + IN gctUINT32 physical; + + /* Number of bytes in physical memory to map. */ + IN gctUINT64 bytes; + + /* Address of mapped memory. */ + OUT gctUINT64 logical; + } + MapMemory; + + /* gcvHAL_UNMAP_MEMORY */ + struct _gcsHAL_UNMAP_MEMORY + { + /* Physical memory address to unmap. Just a name on Linux/Qnx. */ + IN gctUINT32 physical; + + /* Number of bytes in physical memory to unmap. */ + IN gctUINT64 bytes; + + /* Address of mapped memory to unmap. */ + IN gctUINT64 logical; + } + UnmapMemory; + + /* gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY */ + struct _gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY + { + /* Number of bytes to allocate. */ + IN OUT gctUINT bytes; + + /* Buffer alignment. */ + IN gctUINT alignment; + + /* Type of allocation. */ + IN gceSURF_TYPE type; + + /* Flag of allocation. */ + IN gctUINT32 flag; + + /* Memory pool to allocate from. */ + IN OUT gcePOOL pool; + + /* Allocated video memory. */ + OUT gctUINT32 node; + } + AllocateLinearVideoMemory; + + /* gcvHAL_ALLOCATE_VIDEO_MEMORY */ + struct _gcsHAL_ALLOCATE_VIDEO_MEMORY + { + /* Width of rectangle to allocate. */ + IN OUT gctUINT width; + + /* Height of rectangle to allocate. */ + IN OUT gctUINT height; + + /* Depth of rectangle to allocate. */ + IN gctUINT depth; + + /* Format rectangle to allocate in gceSURF_FORMAT. */ + IN gceSURF_FORMAT format; + + /* Type of allocation. */ + IN gceSURF_TYPE type; + + /* Memory pool to allocate from. */ + IN OUT gcePOOL pool; + + /* Allocated video memory. */ + OUT gctUINT32 node; + } + AllocateVideoMemory; + + /* gcvHAL_RELEASE_VIDEO_MEMORY */ + struct _gcsHAL_RELEASE_VIDEO_MEMORY + { + /* Allocated video memory. */ + IN gctUINT32 node; + +#ifdef __QNXNTO__ + /* Mapped logical address to unmap in user space. */ + OUT gctUINT64 memory; + + /* Number of bytes to allocated. */ + OUT gctUINT64 bytes; +#endif + } + ReleaseVideoMemory; + + /* gcvHAL_LOCK_VIDEO_MEMORY */ + struct _gcsHAL_LOCK_VIDEO_MEMORY + { + /* Allocated video memory. */ + IN gctUINT32 node; + + /* Cache configuration. */ + /* Only gcvPOOL_CONTIGUOUS and gcvPOOL_VIRUTAL + ** can be configured */ + IN gctBOOL cacheable; + + /* Hardware specific address. */ + OUT gctUINT32 address; + + /* Mapped logical address. */ + OUT gctUINT64 memory; + + /* Customer priviate handle*/ + OUT gctUINT32 gid; + + /* Bus address of a contiguous video node. */ + OUT gctUINT64 physicalAddress; + } + LockVideoMemory; + + /* gcvHAL_UNLOCK_VIDEO_MEMORY */ + struct _gcsHAL_UNLOCK_VIDEO_MEMORY + { + /* Allocated video memory. */ + IN gctUINT64 node; + + /* Type of surface. */ + IN gceSURF_TYPE type; + + /* Pool of the unlock node */ + OUT gcePOOL pool; + + /* Bytes of the unlock node */ + OUT gctUINT bytes; + + /* Flag to unlock surface asynchroneously. */ + IN OUT gctBOOL asynchroneous; + } + UnlockVideoMemory; + + /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */ + struct _gcsHAL_ALLOCATE_NON_PAGED_MEMORY + { + /* Number of bytes to allocate. */ + IN OUT gctUINT64 bytes; + + /* Physical address of allocation. Just a name. */ + OUT gctUINT32 physical; + + /* Logical address of allocation. */ + OUT gctUINT64 logical; + } + AllocateNonPagedMemory; + + /* gcvHAL_FREE_NON_PAGED_MEMORY */ + struct _gcsHAL_FREE_NON_PAGED_MEMORY + { + /* Number of bytes allocated. */ + IN gctUINT64 bytes; + + /* Physical address of allocation. Just a name. */ + IN gctUINT32 physical; + + /* Logical address of allocation. */ + IN gctUINT64 logical; + } + FreeNonPagedMemory; + + /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */ + struct _gcsHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER + { + /* Number of bytes to allocate. */ + IN OUT gctUINT64 bytes; + + /* Physical address of allocation. Just a name. */ + OUT gctUINT32 physical; + + /* Logical address of allocation. */ + OUT gctUINT64 logical; + } + AllocateVirtualCommandBuffer; + + /* gcvHAL_FREE_NON_PAGED_MEMORY */ + struct _gcsHAL_FREE_VIRTUAL_COMMAND_BUFFER + { + /* Number of bytes allocated. */ + IN gctUINT64 bytes; + + /* Physical address of allocation. Just a name. */ + IN gctUINT32 physical; + + /* Logical address of allocation. */ + IN gctUINT64 logical; + } + FreeVirtualCommandBuffer; + + /* gcvHAL_EVENT_COMMIT. */ + struct _gcsHAL_EVENT_COMMIT + { + /* Event queue in gcsQUEUE. */ + IN gctUINT64 queue; + } + Event; + + /* gcvHAL_COMMIT */ + struct _gcsHAL_COMMIT + { + /* Context buffer object gckCONTEXT. */ + IN gctUINT64 context; + + /* Command buffer gcoCMDBUF. */ + IN gctUINT64 commandBuffer; + + /* State delta buffer in gcsSTATE_DELTA. */ + gctUINT64 delta; + + gctUINT64 deltas[gcvCORE_COUNT]; + + gctUINT64 contexts[gcvCORE_COUNT]; + + gctUINT64 commandBuffers[gcvCORE_COUNT]; + + + /* Event queue in gcsQUEUE. */ + IN gctUINT64 queue; + + /* Used to distinguish different FE. */ + IN gceENGINE engine1; + + /* The command buffer is linked to multiple command queue. */ + IN gctBOOL shared; + + /* Index of command queue. */ + IN gctUINT32 index; + + /* Count of gpu core. */ + IN gctUINT32 count; + + /* Commit stamp of this commit. */ + OUT gctUINT64 commitStamp; + + /* If context switch for this commit */ + OUT gctBOOL contextSwitched; + } + Commit; + + /* gcvHAL_MAP_USER_MEMORY */ + struct _gcsHAL_MAP_USER_MEMORY + { + /* Base address of user memory to map. */ + IN gctUINT64 memory; + + /* Physical address of user memory to map. */ + IN gctUINT32 physical; + + /* Size of user memory in bytes to map. */ + IN gctUINT64 size; + + /* Info record required by gcvHAL_UNMAP_USER_MEMORY. Just a name. */ + OUT gctUINT32 info; + + /* Physical address of mapped memory. */ + OUT gctUINT32 address; + } + MapUserMemory; + + /* gcvHAL_UNMAP_USER_MEMORY */ + struct _gcsHAL_UNMAP_USER_MEMORY + { + /* Base address of user memory to unmap. */ + IN gctUINT64 memory; + + /* Size of user memory in bytes to unmap. */ + IN gctUINT64 size; + + /* Info record returned by gcvHAL_MAP_USER_MEMORY. Just a name. */ + IN gctUINT32 info; + + /* Physical address of mapped memory as returned by + gcvHAL_MAP_USER_MEMORY. */ + IN gctUINT32 address; + } + UnmapUserMemory; +#if !USE_NEW_LINUX_SIGNAL + /* gcsHAL_USER_SIGNAL */ + struct _gcsHAL_USER_SIGNAL + { + /* Command. */ + gceUSER_SIGNAL_COMMAND_CODES command; + + /* Signal ID. */ + IN OUT gctINT id; + + /* Reset mode. */ + IN gctBOOL manualReset; + + /* Wait timedout. */ + IN gctUINT32 wait; + + /* State. */ + IN gctBOOL state; + } + UserSignal; +#endif + + /* gcvHAL_SIGNAL. */ + struct _gcsHAL_SIGNAL + { + /* Signal handle to signal gctSIGNAL. */ + IN gctUINT64 signal; + + /* Reserved gctSIGNAL. */ + IN gctUINT64 auxSignal; + + /* Process owning the signal gctHANDLE. */ + IN gctUINT64 process; + +#if defined(__QNXNTO__) + /* Client pulse side-channel connection ID. Set by client in gcoOS_CreateSignal. */ + IN gctINT32 coid; + + /* Set by server. */ + IN gctINT32 rcvid; +#endif + /* Event generated from where of pipeline */ + IN gceKERNEL_WHERE fromWhere; + } + Signal; + + /* gcvHAL_WRITE_DATA. */ + struct _gcsHAL_WRITE_DATA + { + /* Address to write data to. */ + IN gctUINT32 address; + + /* Data to write. */ + IN gctUINT32 data; + } + WriteData; + + /* gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY */ + struct _gcsHAL_ALLOCATE_CONTIGUOUS_MEMORY + { + /* Number of bytes to allocate. */ + IN OUT gctUINT64 bytes; + + /* Hardware address of allocation. */ + OUT gctUINT32 address; + + /* Physical address of allocation. Just a name. */ + OUT gctUINT32 physical; + + /* Logical address of allocation. */ + OUT gctUINT64 logical; + } + AllocateContiguousMemory; + + /* gcvHAL_FREE_CONTIGUOUS_MEMORY */ + struct _gcsHAL_FREE_CONTIGUOUS_MEMORY + { + /* Number of bytes allocated. */ + IN gctUINT64 bytes; + + /* Physical address of allocation. Just a name. */ + IN gctUINT32 physical; + + /* Logical address of allocation. */ + IN gctUINT64 logical; + } + FreeContiguousMemory; + + /* gcvHAL_READ_REGISTER */ + struct _gcsHAL_READ_REGISTER + { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + /* Data read. */ + OUT gctUINT32 data; + } + ReadRegisterData; + + /* gcvHAL_WRITE_REGISTER */ + struct _gcsHAL_WRITE_REGISTER + { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + /* Data read. */ + IN gctUINT32 data; + } + WriteRegisterData; + + /* gcvHAL_READ_REGISTER_EX */ + struct _gcsHAL_READ_REGISTER_EX + { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + IN gctUINT32 coreSelect; + + /* Data read. */ + OUT gctUINT32 data[4]; + } + ReadRegisterDataEx; + + /* gcvHAL_WRITE_REGISTER_EX */ + struct _gcsHAL_WRITE_REGISTER_EX + { + /* Logical address of memory to write data to. */ + IN gctUINT32 address; + + IN gctUINT32 coreSelect; + + /* Data read. */ + IN gctUINT32 data[4]; + } + WriteRegisterDataEx; + +#if VIVANTE_PROFILER + /* gcvHAL_GET_PROFILE_SETTING */ + struct _gcsHAL_GET_PROFILE_SETTING + { + /* Enable profiling */ + OUT gctBOOL enable; + } + GetProfileSetting; + + /* gcvHAL_SET_PROFILE_SETTING */ + struct _gcsHAL_SET_PROFILE_SETTING + { + /* Enable profiling */ + IN gctBOOL enable; + } + SetProfileSetting; + + /* gcvHAL_READ_PROFILER_REGISTER_SETTING */ + struct _gcsHAL_READ_PROFILER_REGISTER_SETTING + { + /*Should Clear Register*/ + IN gctBOOL bclear; + } + SetProfilerRegisterClear; + + struct _gcsHAL_READ_ALL_PROFILE_REGISTERS_PART1 + { + /* Context buffer object gckCONTEXT. Just a name. */ + IN gctUINT32 context; + + /* Data read. */ + OUT gcsPROFILER_COUNTERS_PART1 Counters; + } + RegisterProfileData_part1; + + struct _gcsHAL_READ_ALL_PROFILE_REGISTERS_PART2 + { + /* Context buffer object gckCONTEXT. Just a name. */ + IN gctUINT32 context; + + /* Data read. */ + OUT gcsPROFILER_COUNTERS_PART2 Counters; + } + RegisterProfileData_part2; + + /* gcvHAL_PROFILE_REGISTERS_2D */ + struct _gcsHAL_PROFILE_REGISTERS_2D + { + /* Data read in gcs2D_PROFILE. */ + OUT gctUINT64 hwProfile2D; + } + RegisterProfileData2D; +#endif + + /* Power management. */ + /* gcvHAL_SET_POWER_MANAGEMENT_STATE */ + struct _gcsHAL_SET_POWER_MANAGEMENT + { + /* Data read. */ + IN gceCHIPPOWERSTATE state; + } + SetPowerManagement; + + /* gcvHAL_QUERY_POWER_MANAGEMENT_STATE */ + struct _gcsHAL_QUERY_POWER_MANAGEMENT + { + /* Data read. */ + OUT gceCHIPPOWERSTATE state; + + /* Idle query. */ + OUT gctBOOL isIdle; + } + QueryPowerManagement; + + /* gcvHAL_QUERY_KERNEL_SETTINGS */ + struct _gcsHAL_QUERY_KERNEL_SETTINGS + { + /* Settings.*/ + OUT gcsKERNEL_SETTINGS settings; + } + QueryKernelSettings; + + /* gcvHAL_MAP_PHYSICAL */ + struct _gcsHAL_MAP_PHYSICAL + { + /* gcvTRUE to map, gcvFALSE to unmap. */ + IN gctBOOL map; + + /* Physical address. */ + IN OUT gctUINT64 physical; + } + MapPhysical; + + /* gcvHAL_DEBUG */ + struct _gcsHAL_DEBUG + { + /* If gcvTRUE, set the debug information. */ + IN gctBOOL set; + IN gctUINT32 level; + IN gctUINT32 zones; + IN gctBOOL enable; + + IN gceDEBUG_MESSAGE_TYPE type; + IN gctUINT32 messageSize; + + /* Message to print if not empty. */ + IN gctCHAR message[80]; + + } + Debug; + + /* gcvHAL_CACHE */ + struct _gcsHAL_CACHE + { + IN gceCACHEOPERATION operation; + IN gctUINT64 process; + IN gctUINT64 logical; + IN gctUINT64 bytes; + IN gctUINT32 node; + } + Cache; + + /* gcvHAL_TIMESTAMP */ + struct _gcsHAL_TIMESTAMP + { + /* Timer select. */ + IN gctUINT32 timer; + + /* Timer request type (0-stop, 1-start, 2-send delta). */ + IN gctUINT32 request; + + /* Result of delta time in microseconds. */ + OUT gctINT32 timeDelta; + } + TimeStamp; + + /* gcvHAL_DATABASE */ + struct _gcsHAL_DATABASE + { + /* Set to gcvTRUE if you want to query a particular process ID. + ** Set to gcvFALSE to query the last detached process. */ + IN gctBOOL validProcessID; + + /* Process ID to query. */ + IN gctUINT32 processID; + + /* Information. */ + OUT gcuDATABASE_INFO vidMem; + OUT gcuDATABASE_INFO nonPaged; + OUT gcuDATABASE_INFO contiguous; + OUT gcuDATABASE_INFO gpuIdle; + + /* Detail information about video memory. */ + OUT gcuDATABASE_INFO vidMemPool[3]; + } + Database; + + /* gcvHAL_VERSION */ + struct _gcsHAL_VERSION + { + /* Major version: N.n.n. */ + OUT gctINT32 major; + + /* Minor version: n.N.n. */ + OUT gctINT32 minor; + + /* Patch version: n.n.N. */ + OUT gctINT32 patch; + + /* Build version. */ + OUT gctUINT32 build; + } + Version; + + /* gcvHAL_CHIP_INFO */ + struct _gcsHAL_CHIP_INFO + { + /* Chip count. */ + OUT gctINT32 count; + + /* Chip types. */ + OUT gceHARDWARE_TYPE types[gcdCHIP_COUNT]; + + /* Chip IDs. */ + OUT gctUINT32 ids[gcvCORE_COUNT]; + } + ChipInfo; + + /* gcvHAL_ATTACH */ + struct _gcsHAL_ATTACH + { + /* Handle of context buffer object. */ + OUT gctUINT32 context; + + /* Maximum state in the buffer. */ + OUT gctUINT64 maxState; + + /* Number of states in the buffer. */ + OUT gctUINT32 numStates; + + /* Map context buffer to user or not. */ + IN gctBOOL map; + + /* Physical of context buffer. */ + OUT gctUINT32 physicals[2]; + + /* Physical of context buffer. */ + OUT gctUINT64 logicals[2]; + + /* Bytes of context buffer. */ + OUT gctUINT32 bytes; + } + Attach; + + /* gcvHAL_DETACH */ + struct _gcsHAL_DETACH + { + /* Context buffer object gckCONTEXT. Just a name. */ + IN gctUINT32 context; + } + Detach; + + /* gcvHAL_GET_FRAME_INFO. */ + struct _gcsHAL_GET_FRAME_INFO + { + /* gcsHAL_FRAME_INFO* */ + OUT gctUINT64 frameInfo; + } + GetFrameInfo; + + /* gcvHAL_SET_TIME_OUT. */ + struct _gcsHAL_SET_TIMEOUT + { + gctUINT32 timeOut; + } + SetTimeOut; + + + struct _gcsHAL_SET_FSCALE_VALUE + { + IN gctUINT value; + } + SetFscaleValue; + + struct _gcsHAL_GET_FSCALE_VALUE + { + OUT gctUINT value; + OUT gctUINT minValue; + OUT gctUINT maxValue; + } + GetFscaleValue; + + /* gcvHAL_EXPORT_VIDEO_MEMORY */ + struct _gcsHAL_EXPORT_VIDEO_MEMORY + { + /* Allocated video memory. */ + IN gctUINT32 node; + + /* Export flags */ + IN gctUINT32 flags; + + /* Exported dma_buf fd */ + OUT gctINT32 fd; + } + ExportVideoMemory; + + struct _gcsHAL_NAME_VIDEO_MEMORY + { + IN gctUINT32 handle; + OUT gctUINT32 name; + } + NameVideoMemory; + + struct _gcsHAL_IMPORT_VIDEO_MEMORY + { + IN gctUINT32 name; + OUT gctUINT32 handle; + } + ImportVideoMemory; + + struct _gcsHAL_QUERY_RESET_TIME_STAMP + { + OUT gctUINT64 timeStamp; + OUT gctUINT64 contextID; + } + QueryResetTimeStamp; + + struct _gcsHAL_CREATE_NATIVE_FENCE + { + /* Signal id. */ + IN gctUINT64 signal; + + /* Native fence file descriptor. */ + OUT gctINT fenceFD; + + } + CreateNativeFence; + + struct _gcsHAL_WAIT_NATIVE_FENCE + { + /* Native fence file descriptor. */ + IN gctINT fenceFD; + + /* Wait timeout. */ + IN gctUINT32 timeout; + } + WaitNativeFence; + + struct _gcsHAL_DESTROY_MMU + { + /* Mmu object. */ + IN gctUINT64 mmu; + } + DestroyMmu; + + struct _gcsHAL_SHBUF + { + gceSHBUF_COMMAND_CODES command; + + /* Shared buffer. */ + IN OUT gctUINT64 id; + + /* User data to be shared. */ + IN gctUINT64 data; + + /* Data size. */ + IN OUT gctUINT32 bytes; + } + ShBuf; + + struct _gcsHAL_GET_GRAPHIC_BUFFER_FD + { + /* Max 3 video nodes, node handle here. */ + IN gctUINT32 node[3]; + + /* A shBuf. */ + IN gctUINT64 shBuf; + + /* A signal. */ + IN gctUINT32 signal; + + OUT gctINT32 fd; + } + GetGraphicBufferFd; + + + struct _gcsHAL_VIDEO_MEMORY_METADATA + { + /* Allocated video memory. */ + IN gctUINT32 node; + + IN gctUINT32 readback; + + INOUT gctINT32 ts_fd; + INOUT gctUINT32 fc_enabled; + INOUT gctUINT32 fc_value; + INOUT gctUINT32 fc_value_upper; + + INOUT gctUINT32 compressed; + INOUT gctUINT32 compress_format; + } + SetVidMemMetadata; + + struct _gcsHAL_GET_VIDEO_MEMORY_FD + { + IN gctUINT32 handle; + OUT gctINT fd; + } + GetVideoMemoryFd; + + struct _gcsHAL_CONFIG_POWER_MANAGEMENT + { + IN gctBOOL enable; + } + ConfigPowerManagement; + + struct _gcsHAL_WRAP_USER_MEMORY + { + /* Description of user memory. */ + IN gcsUSER_MEMORY_DESC desc; + + /* Output video mmory node. */ + OUT gctUINT32 node; + + /* size of the node in bytes */ + OUT gctUINT64 bytes; + } + WrapUserMemory; + + struct _gcsHAL_WAIT_FENCE + { + IN gctUINT32 handle; + IN gctUINT32 timeOut; + } + WaitFence; + + struct _gcsHAL_COMMIT_DONE + { + IN gctUINT64 context; + } + CommitDone; + +#if gcdDEC_ENABLE_AHB + struct _gcsHAL_DEC300_READ + { + gctUINT32 enable; + gctUINT32 readId; + gctUINT32 format; + gctUINT32 strides[3]; + gctUINT32 is3D; + gctUINT32 isMSAA; + gctUINT32 clearValue; + gctUINT32 isTPC; + gctUINT32 isTPCCompressed; + gctUINT32 surfAddrs[3]; + gctUINT32 tileAddrs[3]; + } + DEC300Read; + + struct _gcsHAL_DEC300_WRITE + { + gctUINT32 enable; + gctUINT32 readId; + gctUINT32 writeId; + gctUINT32 format; + gctUINT32 surfAddr; + gctUINT32 tileAddr; + } + DEC300Write; + + struct _gcsHAL_DEC300_FLUSH + { + IN gctUINT8 useless; + } + DEC300Flush; + + struct _gcsHAL_DEC300_FLUSH_WAIT + { + IN gctUINT32 done; + } + DEC300FlushWait; +#endif + /* gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY: */ + struct _gcsHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY + { + /* Allocated video memory. */ + IN gctUINT32 node; + + /* Type of surface. */ + IN gceSURF_TYPE type; + } + BottomHalfUnlockVideoMemory; + + gcsHAL_QUERY_CHIP_OPTIONS QueryChipOptions; + } + u; +} +gcsHAL_INTERFACE; + + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_driver_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h new file mode 100644 index 000000000000..4fd5d0121947 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h @@ -0,0 +1,302 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_driver_vg_h_ +#define __gc_hal_driver_vg_h_ + + + +#include "gc_hal_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +******************************* I/O Control Codes ****************************** +\******************************************************************************/ + +#define gcvHAL_CLASS "galcore" +#define IOCTL_GCHAL_INTERFACE 30000 + +/******************************************************************************\ +********************* Command buffer information structure. ******************** +\******************************************************************************/ + +typedef struct _gcsCOMMAND_BUFFER_INFO * gcsCOMMAND_BUFFER_INFO_PTR; +typedef struct _gcsCOMMAND_BUFFER_INFO +{ + /* FE command buffer interrupt ID. */ + gctINT32 feBufferInt; + + /* TS overflow interrupt ID. */ + gctINT32 tsOverflowInt; + + /* Alignment and mask for the buffer address. */ + gctUINT addressMask; + gctUINT32 addressAlignment; + + /* Alignment for each command. */ + gctUINT32 commandAlignment; + + /* Number of bytes required by the STATE command. */ + gctUINT32 stateCommandSize; + + /* Number of bytes required by the RESTART command. */ + gctUINT32 restartCommandSize; + + /* Number of bytes required by the FETCH command. */ + gctUINT32 fetchCommandSize; + + /* Number of bytes required by the CALL command. */ + gctUINT32 callCommandSize; + + /* Number of bytes required by the RETURN command. */ + gctUINT32 returnCommandSize; + + /* Number of bytes required by the EVENT command. */ + gctUINT32 eventCommandSize; + + /* Number of bytes required by the END command. */ + gctUINT32 endCommandSize; + + /* Number of bytes reserved at the tail of a static command buffer. */ + gctUINT32 staticTailSize; + + /* Number of bytes reserved at the tail of a dynamic command buffer. */ + gctUINT32 dynamicTailSize; +} +gcsCOMMAND_BUFFER_INFO; + +/******************************************************************************\ +******************************** Task Structures ******************************* +\******************************************************************************/ + +typedef enum _gceTASK +{ + gcvTASK_LINK, + gcvTASK_CLUSTER, + gcvTASK_INCREMENT, + gcvTASK_DECREMENT, + gcvTASK_SIGNAL, + gcvTASK_LOCKDOWN, + gcvTASK_UNLOCK_VIDEO_MEMORY, + gcvTASK_FREE_VIDEO_MEMORY, + gcvTASK_FREE_CONTIGUOUS_MEMORY, + gcvTASK_UNMAP_USER_MEMORY +} +gceTASK; + +typedef struct _gcsTASK_HEADER * gcsTASK_HEADER_PTR; +typedef struct _gcsTASK_HEADER +{ + /* Task ID. */ + IN gceTASK id; +} +gcsTASK_HEADER; + +typedef struct _gcsTASK_LINK * gcsTASK_LINK_PTR; +typedef struct _gcsTASK_LINK +{ + /* Task ID (gcvTASK_LINK). */ + IN gceTASK id; + + /* Pointer to the next task container. */ + IN gctPOINTER cotainer; + + /* Pointer to the next task from the next task container. */ + IN gcsTASK_HEADER_PTR task; +} +gcsTASK_LINK; + +typedef struct _gcsTASK_CLUSTER * gcsTASK_CLUSTER_PTR; +typedef struct _gcsTASK_CLUSTER +{ + /* Task ID (gcvTASK_CLUSTER). */ + IN gceTASK id; + + /* Number of tasks in the cluster. */ + IN gctUINT taskCount; +} +gcsTASK_CLUSTER; + +typedef struct _gcsTASK_INCREMENT * gcsTASK_INCREMENT_PTR; +typedef struct _gcsTASK_INCREMENT +{ + /* Task ID (gcvTASK_INCREMENT). */ + IN gceTASK id; + + /* Address of the variable to increment. */ + IN gctUINT32 address; +} +gcsTASK_INCREMENT; + +typedef struct _gcsTASK_DECREMENT * gcsTASK_DECREMENT_PTR; +typedef struct _gcsTASK_DECREMENT +{ + /* Task ID (gcvTASK_DECREMENT). */ + IN gceTASK id; + + /* Address of the variable to decrement. */ + IN gctUINT32 address; +} +gcsTASK_DECREMENT; + +typedef struct _gcsTASK_SIGNAL * gcsTASK_SIGNAL_PTR; +typedef struct _gcsTASK_SIGNAL +{ + /* Task ID (gcvTASK_SIGNAL). */ + IN gceTASK id; + + /* Process owning the signal. */ + IN gctHANDLE process; + + /* Signal handle to signal. */ + IN gctSIGNAL signal; + +#if defined(__QNXNTO__) + IN gctINT32 coid; + IN gctINT32 rcvid; +#endif +} +gcsTASK_SIGNAL; + +typedef struct _gcsTASK_LOCKDOWN * gcsTASK_LOCKDOWN_PTR; +typedef struct _gcsTASK_LOCKDOWN +{ + /* Task ID (gcvTASK_LOCKDOWN). */ + IN gceTASK id; + + /* Address of the user space counter. */ + IN gctUINT32 userCounter; + + /* Address of the kernel space counter. */ + IN gctUINT32 kernelCounter; + + /* Process owning the signal. */ + IN gctHANDLE process; + + /* Signal handle to signal. */ + IN gctSIGNAL signal; +} +gcsTASK_LOCKDOWN; + +typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY * gcsTASK_UNLOCK_VIDEO_MEMORY_PTR; +typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY +{ + /* Task ID (gcvTASK_UNLOCK_VIDEO_MEMORY). */ + IN gceTASK id; + + /* Allocated video memory. */ + IN gctUINT64 node; +} +gcsTASK_UNLOCK_VIDEO_MEMORY; + +typedef struct _gcsTASK_FREE_VIDEO_MEMORY * gcsTASK_FREE_VIDEO_MEMORY_PTR; +typedef struct _gcsTASK_FREE_VIDEO_MEMORY +{ + /* Task ID (gcvTASK_FREE_VIDEO_MEMORY). */ + IN gceTASK id; + + /* Allocated video memory. */ + IN gctUINT32 node; +} +gcsTASK_FREE_VIDEO_MEMORY; + +typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY * gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR; +typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY +{ + /* Task ID (gcvTASK_FREE_CONTIGUOUS_MEMORY). */ + IN gceTASK id; + + /* Number of bytes allocated. */ + IN gctSIZE_T bytes; + + /* Physical address of allocation. */ + IN gctPHYS_ADDR physical; + + /* Logical address of allocation. */ + IN gctPOINTER logical; +} +gcsTASK_FREE_CONTIGUOUS_MEMORY; + +typedef struct _gcsTASK_UNMAP_USER_MEMORY * gcsTASK_UNMAP_USER_MEMORY_PTR; +typedef struct _gcsTASK_UNMAP_USER_MEMORY +{ + /* Task ID (gcvTASK_UNMAP_USER_MEMORY). */ + IN gceTASK id; + + /* Base address of user memory to unmap. */ + IN gctPOINTER memory; + + /* Size of user memory in bytes to unmap. */ + IN gctSIZE_T size; + + /* Info record returned by gcvHAL_MAP_USER_MEMORY. */ + IN gctPOINTER info; + + /* Physical address of mapped memory as returned by + gcvHAL_MAP_USER_MEMORY. */ + IN gctUINT32 address; +} +gcsTASK_UNMAP_USER_MEMORY; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_driver_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_drm.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_drm.h new file mode 100644 index 000000000000..cb59f8194c7f --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_drm.h @@ -0,0 +1,199 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __VIVNATE_DRM_H__ +#define __VIVNATE_DRM_H__ + +#if !defined(__KERNEL__) +#include +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* creation flag bits. */ +#define DRM_VIV_GEM_CONTIGUOUS (1u << 0) +#define DRM_VIV_GEM_CACHED (1u << 1) +#define DRM_VIV_GEM_SECURE (1u << 2) +#define DRM_VIV_GEM_CMA_LIMIT (1u << 3) + +struct drm_viv_gem_create { + __u64 size; + __u32 flags; + __u32 handle; +}; + +struct drm_viv_gem_lock { + __u32 handle; + __u32 cacheable; + __u64 logical; +}; + +struct drm_viv_gem_unlock { + __u32 handle; +}; + + +#define DRM_VIV_GEM_CLEAN_CACHE 0x01 +#define DRM_VIV_GEM_INVALIDATE_CACHE 0x02 +#define DRM_VIV_GEM_FLUSH_CACHE 0x03 +#define DRM_VIV_GEM_MEMORY_BARRIER 0x04 + +struct drm_viv_gem_cache { + __u32 handle; + __u32 op; + __u64 logical; + __u64 bytes; +}; + + +#define DRM_VIV_GEM_PARAM_POOL 0x00 +#define DRM_VIV_GEM_PARAM_SIZE 0x01 + +struct drm_viv_gem_query { + __u32 handle; + __u32 param; + __u64 value; +}; + + +struct drm_viv_gem_timestamp { + __u32 handle; + /* inc count, 0 for query current. */ + __u32 inc; + /* output inc'ed timestamp. */ + __u64 timestamp; +}; + + +/* basic tiling mode. */ +#define DRM_VIV_GEM_TILING_LINEAR 0x01 +#define DRM_VIV_GEM_TILING_TILED 0x02 +#define DRM_VIV_GEM_TILING_SUPERTILED 0x04 +#define DRM_VIV_GEM_TILING_MINORTILED 0x08 + +/* tiling mode modifiers. */ +#define DRM_VIV_GEM_TILING_SPLIT 0x10 +#define DRM_VIV_GEM_TILING_X_MAJOR 0x20 +#define DRM_VIV_GEM_TILING_Y_MAJOR 0x40 +#define DRM_VIV_GEM_TILING_SWAP 0x80 + +/* ts mode. */ +#define DRM_VIV_GEM_TS_NONE 0x00 +#define DRM_VIV_GEM_TS_DISABLED 0x01 +#define DRM_VIV_GEM_TS_NORMAL 0x02 +#define DRM_VIV_GEM_TS_COMPRESSED 0x03 + +struct drm_viv_gem_set_tiling { + __u32 handle; + __u32 tiling_mode; + + __u32 ts_mode; + __u64 clear_value; +}; + +struct drm_viv_gem_get_tiling { + __u32 handle; + __u32 tiling_mode; + + __u32 ts_mode; + __u64 clear_value; +}; + + +struct drm_viv_gem_attach_aux { + __u32 handle; + __u32 ts_handle; +}; + + +struct drm_viv_gem_ref_node { + __u32 handle; + + /* output. */ + __u32 node; + __u32 ts_node; +}; + + +#define DRM_VIV_GEM_CREATE 0x00 +#define DRM_VIV_GEM_LOCK 0x01 +#define DRM_VIV_GEM_UNLOCK 0x02 +#define DRM_VIV_GEM_CACHE 0x03 +#define DRM_VIV_GEM_QUERY 0x04 +#define DRM_VIV_GEM_TIMESTAMP 0x05 +#define DRM_VIV_GEM_SET_TILING 0x06 +#define DRM_VIV_GEM_GET_TILING 0x07 +#define DRM_VIV_GEM_ATTACH_AUX 0x08 +#define DRM_VIV_GEM_REF_NODE 0x09 +#define DRM_VIV_NUM_IOCTLS 0x0A + +#define DRM_IOCTL_VIV_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_CREATE, struct drm_viv_gem_create) +#define DRM_IOCTL_VIV_GEM_LOCK DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_LOCK, struct drm_viv_gem_lock) +#define DRM_IOCTL_VIV_GEM_UNLOCK DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_UNLOCK, struct drm_viv_gem_unlock) +#define DRM_IOCTL_VIV_GEM_CACHE DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_CACHE, struct drm_viv_gem_cache) +#define DRM_IOCTL_VIV_GEM_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_QUERY, struct drm_viv_gem_query) +#define DRM_IOCTL_VIV_GEM_TIMESTAMP DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_TIMESTAMP, struct drm_viv_gem_timestamp) +#define DRM_IOCTL_VIV_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_SET_TILING, struct drm_viv_gem_set_tiling) +#define DRM_IOCTL_VIV_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_GET_TILING, struct drm_viv_gem_get_tiling) +#define DRM_IOCTL_VIV_GEM_ATTACH_AUX DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_ATTACH_AUX, struct drm_viv_gem_attach_aux) +#define DRM_IOCTL_VIV_GEM_REF_NODE DRM_IOWR(DRM_COMMAND_BASE + DRM_VIV_GEM_REF_NODE, struct drm_viv_gem_ref_node) + +#if defined(__cplusplus) +} +#endif + +#endif /* __VIVNATE_DRM_H__ */ diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_dump.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_dump.h new file mode 100644 index 000000000000..b8c9ffab9ad3 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_dump.h @@ -0,0 +1,125 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_dump_h_ +#define __gc_hal_dump_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** FILE LAYOUT: +** +** gcsDUMP_FILE structure +** +** gcsDUMP_DATA frame +** gcsDUMP_DATA or gcDUMP_DATA_SIZE records rendingring the frame +** gctUINT8 data[length] +*/ + +#define gcvDUMP_FILE_SIGNATURE gcmCC('g','c','D','B') + +typedef struct _gcsDUMP_FILE +{ + gctUINT32 signature; /* File signature */ + gctSIZE_T length; /* Length of file */ + gctUINT32 frames; /* Number of frames in file */ +} +gcsDUMP_FILE; + +typedef enum _gceDUMP_TAG +{ + gcvTAG_SURFACE = gcmCC('s','u','r','f'), + gcvTAG_FRAME = gcmCC('f','r','m',' '), + gcvTAG_COMMAND = gcmCC('c','m','d',' '), + gcvTAG_INDEX = gcmCC('i','n','d','x'), + gcvTAG_STREAM = gcmCC('s','t','r','m'), + gcvTAG_TEXTURE = gcmCC('t','e','x','t'), + gcvTAG_RENDER_TARGET = gcmCC('r','n','d','r'), + gcvTAG_DEPTH = gcmCC('z','b','u','f'), + gcvTAG_RESOLVE = gcmCC('r','s','l','v'), + gcvTAG_DELETE = gcmCC('d','e','l',' '), + gcvTAG_BUFOBJ = gcmCC('b','u','f','o'), +} +gceDUMP_TAG; + +typedef struct _gcsDUMP_SURFACE +{ + gceDUMP_TAG type; /* Type of record. */ + gctUINT32 address; /* Address of the surface. */ + gctINT16 width; /* Width of surface. */ + gctINT16 height; /* Height of surface. */ + gceSURF_FORMAT format; /* Surface pixel format. */ + gctSIZE_T length; /* Number of bytes inside the surface. */ +} +gcsDUMP_SURFACE; + +typedef struct _gcsDUMP_DATA +{ + gceDUMP_TAG type; /* Type of record. */ + gctSIZE_T length; /* Number of bytes of data. */ + gctUINT32 address; /* Address for the data. */ +} +gcsDUMP_DATA; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_dump_h_ */ + + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h new file mode 100644 index 000000000000..7d2a6fa35e25 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h @@ -0,0 +1,589 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_eglplatform_h_ +#define __gc_hal_eglplatform_h_ + +#include "gc_hal_types.h" +#include "gc_hal_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) +#ifndef WIN32_LEAN_AND_MEAN +/* #define WIN32_LEAN_AND_MEAN 1 */ +#endif +#include + +typedef HDC HALNativeDisplayType; +typedef HWND HALNativeWindowType; +typedef HBITMAP HALNativePixmapType; + +typedef struct __BITFIELDINFO +{ + BITMAPINFO bmi; + RGBQUAD bmiColors[2]; +} +BITFIELDINFO; + +#elif /* defined(__APPLE__) || */ defined(__WINSCW__) || defined(__SYMBIAN32__) /* Symbian */ + +#elif defined(WL_EGL_PLATFORM) || defined(EGL_API_WL) /* Wayland */ + +#elif defined(__GBM__) /* GBM */ + +#elif defined(__ANDROID__) || defined(ANDROID) + +#elif defined(MIR_EGL_PLATFORM) /* Mir */ + +#elif defined(__QNXNTO__) + +#elif defined(__unix__) || defined(__APPLE__) + +#if defined(EGL_API_DFB) + +#elif defined(EGL_API_FB) + +#elif defined(EGL_API_NULLWS) + + +#else + +/* X11 (tetative). */ +#endif + +#else +#error "Platform not recognized" +#endif + +#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) + +#include "gc_hal_eglplatform_type.h" + +/******************************************************************************* +** Display. ******************************************************************** +*/ + +gceSTATUS +gcoOS_GetDisplay( + OUT HALNativeDisplayType * Display, + IN gctPOINTER Context + ); + +gceSTATUS +gcoOS_GetDisplayByIndex( + IN gctINT DisplayIndex, + OUT HALNativeDisplayType * Display, + IN gctPOINTER Context + ); + +gceSTATUS +gcoOS_GetDisplayInfo( + IN HALNativeDisplayType Display, + OUT gctINT * Width, + OUT gctINT * Height, + OUT gctSIZE_T * Physical, + OUT gctINT * Stride, + OUT gctINT * BitsPerPixel + ); + + + +gceSTATUS +gcoOS_GetDisplayInfoEx( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctUINT DisplayInfoSize, + OUT halDISPLAY_INFO * DisplayInfo + ); + +gceSTATUS +gcoOS_GetDisplayVirtual( + IN HALNativeDisplayType Display, + OUT gctINT * Width, + OUT gctINT * Height + ); + +gceSTATUS +gcoOS_GetDisplayBackbuffer( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + OUT gctPOINTER * context, + OUT gcoSURF * surface, + OUT gctUINT * Offset, + OUT gctINT * X, + OUT gctINT * Y + ); + +gceSTATUS +gcoOS_SetDisplayVirtual( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctUINT Offset, + IN gctINT X, + IN gctINT Y + ); + +gceSTATUS +gcoOS_SetDisplayVirtualEx( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctPOINTER Context, + IN gcoSURF Surface, + IN gctUINT Offset, + IN gctINT X, + IN gctINT Y + ); + +gceSTATUS +gcoOS_CancelDisplayBackbuffer( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctPOINTER Context, + IN gcoSURF Surface, + IN gctUINT Offset, + IN gctINT X, + IN gctINT Y + ); + +gceSTATUS +gcoOS_SetSwapInterval( + IN HALNativeDisplayType Display, + IN gctINT Interval +); + +gceSTATUS +gcoOS_SetSwapIntervalEx( + IN HALNativeDisplayType Display, + IN gctINT Interval, + IN gctPOINTER localDisplay); + +gceSTATUS +gcoOS_GetSwapInterval( + IN HALNativeDisplayType Display, + IN gctINT_PTR Min, + IN gctINT_PTR Max +); + +gceSTATUS +gcoOS_DisplayBufferRegions( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctINT NumRects, + IN gctINT_PTR Rects + ); + +gceSTATUS +gcoOS_DestroyDisplay( + IN HALNativeDisplayType Display + ); + +gceSTATUS +gcoOS_InitLocalDisplayInfo( + IN HALNativeDisplayType Display, + IN OUT gctPOINTER * localDisplay + ); + +gceSTATUS +gcoOS_DeinitLocalDisplayInfo( + IN HALNativeDisplayType Display, + IN OUT gctPOINTER * localDisplay + ); + +gceSTATUS +gcoOS_GetDisplayInfoEx2( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctPOINTER localDisplay, + IN gctUINT DisplayInfoSize, + OUT halDISPLAY_INFO * DisplayInfo + ); + +gceSTATUS +gcoOS_GetDisplayBackbufferEx( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctPOINTER localDisplay, + OUT gctPOINTER * context, + OUT gcoSURF * surface, + OUT gctUINT * Offset, + OUT gctINT * X, + OUT gctINT * Y + ); + +gceSTATUS +gcoOS_IsValidDisplay( + IN HALNativeDisplayType Display + ); + +gceSTATUS +gcoOS_GetNativeVisualId( + IN HALNativeDisplayType Display, + OUT gctINT* nativeVisualId + ); + +gctBOOL +gcoOS_SynchronousFlip( + IN HALNativeDisplayType Display + ); + +/******************************************************************************* +** Windows. ******************************************************************** +*/ + +gceSTATUS +gcoOS_CreateWindow( + IN HALNativeDisplayType Display, + IN gctINT X, + IN gctINT Y, + IN gctINT Width, + IN gctINT Height, + OUT HALNativeWindowType * Window + ); + +gceSTATUS +gcoOS_GetWindowInfo( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + OUT gctINT * X, + OUT gctINT * Y, + OUT gctINT * Width, + OUT gctINT * Height, + OUT gctINT * BitsPerPixel, + OUT gctUINT * Offset + ); + +gceSTATUS +gcoOS_DestroyWindow( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window + ); + +gceSTATUS +gcoOS_DrawImage( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctINT Left, + IN gctINT Top, + IN gctINT Right, + IN gctINT Bottom, + IN gctINT Width, + IN gctINT Height, + IN gctINT BitsPerPixel, + IN gctPOINTER Bits + ); + +gceSTATUS +gcoOS_GetImage( + IN HALNativeWindowType Window, + IN gctINT Left, + IN gctINT Top, + IN gctINT Right, + IN gctINT Bottom, + OUT gctINT * BitsPerPixel, + OUT gctPOINTER * Bits + ); + +gceSTATUS +gcoOS_GetWindowInfoEx( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + OUT gctINT * X, + OUT gctINT * Y, + OUT gctINT * Width, + OUT gctINT * Height, + OUT gctINT * BitsPerPixel, + OUT gctUINT * Offset, + OUT gceSURF_FORMAT * Format, + OUT gceSURF_TYPE * Type + ); + +gceSTATUS +gcoOS_DrawImageEx( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctINT Left, + IN gctINT Top, + IN gctINT Right, + IN gctINT Bottom, + IN gctINT Width, + IN gctINT Height, + IN gctINT BitsPerPixel, + IN gctPOINTER Bits, + IN gceSURF_FORMAT Format + ); + +/* + * Possiable types: + * gcvSURF_BITMAP + * gcvSURF_RENDER_TARGET + * gcvSURF_RENDER_TARGET_NO_COMPRESSION + * gcvSURF_RENDER_TARGET_NO_TILE_STATUS + */ +gceSTATUS +gcoOS_SetWindowFormat( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gceSURF_TYPE Type, + IN gceSURF_FORMAT Format + ); + + +/******************************************************************************* +** Pixmaps. ******************************************************************** +*/ + +gceSTATUS +gcoOS_CreatePixmap( + IN HALNativeDisplayType Display, + IN gctINT Width, + IN gctINT Height, + IN gctINT BitsPerPixel, + OUT HALNativePixmapType * Pixmap + ); + +gceSTATUS +gcoOS_GetPixmapInfo( + IN HALNativeDisplayType Display, + IN HALNativePixmapType Pixmap, + OUT gctINT * Width, + OUT gctINT * Height, + OUT gctINT * BitsPerPixel, + OUT gctINT * Stride, + OUT gctPOINTER * Bits + ); + +gceSTATUS +gcoOS_DrawPixmap( + IN HALNativeDisplayType Display, + IN HALNativePixmapType Pixmap, + IN gctINT Left, + IN gctINT Top, + IN gctINT Right, + IN gctINT Bottom, + IN gctINT Width, + IN gctINT Height, + IN gctINT BitsPerPixel, + IN gctPOINTER Bits + ); + +gceSTATUS +gcoOS_DestroyPixmap( + IN HALNativeDisplayType Display, + IN HALNativePixmapType Pixmap + ); + +gceSTATUS +gcoOS_GetPixmapInfoEx( + IN HALNativeDisplayType Display, + IN HALNativePixmapType Pixmap, + OUT gctINT * Width, + OUT gctINT * Height, + OUT gctINT * BitsPerPixel, + OUT gctINT * Stride, + OUT gctPOINTER * Bits, + OUT gceSURF_FORMAT * Format + ); + +gceSTATUS +gcoOS_CopyPixmapBits( + IN HALNativeDisplayType Display, + IN HALNativePixmapType Pixmap, + IN gctUINT DstWidth, + IN gctUINT DstHeight, + IN gctINT DstStride, + IN gceSURF_FORMAT DstFormat, + OUT gctPOINTER DstBits + ); + +/******************************************************************************* +** OS relative. **************************************************************** +*/ +gceSTATUS +gcoOS_LoadEGLLibrary( + OUT gctHANDLE * Handle + ); + +gceSTATUS +gcoOS_FreeEGLLibrary( + IN gctHANDLE Handle + ); + +gceSTATUS +gcoOS_ShowWindow( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window + ); + +gceSTATUS +gcoOS_HideWindow( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window + ); + +gceSTATUS +gcoOS_SetWindowTitle( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + IN gctCONST_STRING Title + ); + +gceSTATUS +gcoOS_CapturePointer( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window + ); + +gceSTATUS +gcoOS_GetEvent( + IN HALNativeDisplayType Display, + IN HALNativeWindowType Window, + OUT halEvent * Event + ); + +gceSTATUS +gcoOS_CreateClientBuffer( + IN gctINT Width, + IN gctINT Height, + IN gctINT Format, + IN gctINT Type, + OUT gctPOINTER * ClientBuffer + ); + +gceSTATUS +gcoOS_GetClientBufferInfo( + IN gctPOINTER ClientBuffer, + OUT gctINT * Width, + OUT gctINT * Height, + OUT gctINT * Stride, + OUT gctPOINTER * Bits + ); + +gceSTATUS +gcoOS_DestroyClientBuffer( + IN gctPOINTER ClientBuffer + ); + +gceSTATUS +gcoOS_DestroyContext( + IN gctPOINTER Display, + IN gctPOINTER Context + ); + +gceSTATUS +gcoOS_CreateContext( + IN gctPOINTER LocalDisplay, + IN gctPOINTER Context + ); + +gceSTATUS +gcoOS_MakeCurrent( + IN gctPOINTER LocalDisplay, + IN HALNativeWindowType DrawDrawable, + IN HALNativeWindowType ReadDrawable, + IN gctPOINTER Context, + IN gcoSURF ResolveTarget + ); + +gceSTATUS +gcoOS_CreateDrawable( + IN gctPOINTER LocalDisplay, + IN HALNativeWindowType Drawable + ); + +gceSTATUS +gcoOS_DestroyDrawable( + IN gctPOINTER LocalDisplay, + IN HALNativeWindowType Drawable + ); +gceSTATUS +gcoOS_SwapBuffers( + IN gctPOINTER LocalDisplay, + IN HALNativeWindowType Drawable, + IN gcoSURF RenderTarget, + IN gcoSURF ResolveTarget, + IN gctPOINTER ResolveBits, + OUT gctUINT *Width, + OUT gctUINT *Height + ); + +gceSTATUS +gcoOS_ResizeWindow( + IN gctPOINTER localDisplay, + IN HALNativeWindowType Drawable, + IN gctUINT Width, + IN gctUINT Height + ); + +gceSTATUS +gcoOS_RSForSwap( + IN gctPOINTER localDisplay, + IN HALNativeWindowType Drawable, + IN gctPOINTER resolve + ); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_eglplatform_h_ */ + + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h new file mode 100644 index 000000000000..c35cab5a613b --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h @@ -0,0 +1,326 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_eglplatform_type_h_ +#define __gc_hal_eglplatform_type_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************* +** Events. ********************************************************************* +*/ + +typedef enum _halEventType +{ + /* Keyboard event. */ + HAL_KEYBOARD, + + /* Mouse move event. */ + HAL_POINTER, + + /* Mouse button event. */ + HAL_BUTTON, + + /* Application close event. */ + HAL_CLOSE, + + /* Application window has been updated. */ + HAL_WINDOW_UPDATE +} +halEventType; + +/* Scancodes for keyboard. */ +typedef enum _halKeys +{ + HAL_UNKNOWN = -1, + + HAL_BACKSPACE = 0x08, + HAL_TAB, + HAL_ENTER = 0x0D, + HAL_ESCAPE = 0x1B, + + HAL_SPACE = 0x20, + HAL_SINGLEQUOTE = 0x27, + HAL_PAD_ASTERISK = 0x2A, + HAL_COMMA = 0x2C, + HAL_HYPHEN, + HAL_PERIOD, + HAL_SLASH, + HAL_0, + HAL_1, + HAL_2, + HAL_3, + HAL_4, + HAL_5, + HAL_6, + HAL_7, + HAL_8, + HAL_9, + HAL_SEMICOLON = 0x3B, + HAL_EQUAL = 0x3D, + HAL_A = 0x41, + HAL_B, + HAL_C, + HAL_D, + HAL_E, + HAL_F, + HAL_G, + HAL_H, + HAL_I, + HAL_J, + HAL_K, + HAL_L, + HAL_M, + HAL_N, + HAL_O, + HAL_P, + HAL_Q, + HAL_R, + HAL_S, + HAL_T, + HAL_U, + HAL_V, + HAL_W, + HAL_X, + HAL_Y, + HAL_Z, + HAL_LBRACKET, + HAL_BACKSLASH, + HAL_RBRACKET, + HAL_BACKQUOTE = 0x60, + + HAL_F1 = 0x80, + HAL_F2, + HAL_F3, + HAL_F4, + HAL_F5, + HAL_F6, + HAL_F7, + HAL_F8, + HAL_F9, + HAL_F10, + HAL_F11, + HAL_F12, + + HAL_LCTRL, + HAL_RCTRL, + HAL_LSHIFT, + HAL_RSHIFT, + HAL_LALT, + HAL_RALT, + HAL_CAPSLOCK, + HAL_NUMLOCK, + HAL_SCROLLLOCK, + HAL_PAD_0, + HAL_PAD_1, + HAL_PAD_2, + HAL_PAD_3, + HAL_PAD_4, + HAL_PAD_5, + HAL_PAD_6, + HAL_PAD_7, + HAL_PAD_8, + HAL_PAD_9, + HAL_PAD_HYPHEN, + HAL_PAD_PLUS, + HAL_PAD_SLASH, + HAL_PAD_PERIOD, + HAL_PAD_ENTER, + HAL_SYSRQ, + HAL_PRNTSCRN, + HAL_BREAK, + HAL_UP, + HAL_LEFT, + HAL_RIGHT, + HAL_DOWN, + HAL_HOME, + HAL_END, + HAL_PGUP, + HAL_PGDN, + HAL_INSERT, + HAL_DELETE, + HAL_LWINDOW, + HAL_RWINDOW, + HAL_MENU, + HAL_POWER, + HAL_SLEEP, + HAL_WAKE +} +halKeys; + +/* Structure that defined keyboard mapping. */ +typedef struct _halKeyMap +{ + /* Normal key. */ + halKeys normal; + + /* Extended key. */ + halKeys extended; +} +halKeyMap; + +/* Event structure. */ +typedef struct _halEvent +{ + /* Event type. */ + halEventType type; + + /* Event data union. */ + union _halEventData + { + /* Event data for keyboard. */ + struct _halKeyboard + { + /* Scancode. */ + halKeys scancode; + + /* ASCII characte of the key pressed. */ + char key; + + /* Flag whether the key was pressed (1) or released (0). */ + char pressed; + } + keyboard; + + /* Event data for pointer. */ + struct _halPointer + { + /* Current pointer coordinate. */ + int x; + int y; + } + pointer; + + /* Event data for mouse buttons. */ + struct _halButton + { + /* Left button state. */ + int left; + + /* Middle button state. */ + int middle; + + /* Right button state. */ + int right; + + /* Current pointer coordinate. */ + int x; + int y; + } + button; + } + data; +} +halEvent; + +/* VFK_DISPLAY_INFO structure defining information returned by + vdkGetDisplayInfoEx. */ +typedef struct _halDISPLAY_INFO +{ + /* The size of the display in pixels. */ + int width; + int height; + + /* The stride of the dispay. -1 is returned if the stride is not known + ** for the specified display.*/ + int stride; + + /* The color depth of the display in bits per pixel. */ + int bitsPerPixel; + + /* The logical pointer to the display memory buffer. NULL is returned + ** if the pointer is not known for the specified display. */ + void * logical; + + /* The physical address of the display memory buffer. ~0 is returned + ** if the address is not known for the specified display. */ + unsigned long physical; + + /* Can be wraped as surface. */ + int wrapFB; + + /* FB_MULTI_BUFFER support */ + int multiBuffer; + int backBufferY; + + /* Tiled buffer / tile status support. */ + int tiledBuffer; + int tileStatus; + int compression; + + /* The color info of the display. */ + unsigned int alphaLength; + unsigned int alphaOffset; + unsigned int redLength; + unsigned int redOffset; + unsigned int greenLength; + unsigned int greenOffset; + unsigned int blueLength; + unsigned int blueOffset; + + /* Display flip support. */ + int flip; +} +halDISPLAY_INFO; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_eglplatform_type_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine.h new file mode 100644 index 000000000000..c920fbc8d423 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine.h @@ -0,0 +1,2969 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_engine_h_ +#define __gc_hal_engine_h_ + +#include "gc_hal_types.h" +#include "gc_hal_enum.h" + + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _gcsSURF_RESOLVE_ARGS +{ + gceHAL_ARG_VERSION version; + + union _gcsSURF_RESOLVE_ARGS_UNION + { + + struct _gcsSURF_RESOLVE_ARG_v2 + { + gctBOOL yInverted; + gctBOOL directCopy; + gctBOOL resample; + gctBOOL bUploadTex; /* used for upload tex.*/ + gctBOOL visualizeDepth; /* convert depth to visible color */ + gcsPOINT srcOrigin; + gcsPOINT dstOrigin; + gcsPOINT rectSize; + gctUINT numSlices; + gceENGINE engine; /* 3DBlit engine */ + gctBOOL gpuOnly; /* need only try HW path.*/ + + gctBOOL dump; /* need dump for verify */ + gctBOOL srcSwizzle; /* src surface format swizzle infomation */ + gctBOOL dstSwizzle; /* dst surface format swizzle infomation */ + gctBOOL srcCompressed; /* src compressed format*/ + gctBOOL dstCompressed; /* dst compressed format*/ + } v2; + } uArgs; +} +gcsSURF_RESOLVE_ARGS; + +typedef struct _gscBUFFER_VIEW +{ + gctUINT32 cmd; +}gcsBUFFER_VIEW, *gcsBUFFER_VIEW_PTR; + +typedef struct _gcsIMAGE_VIEW +{ + gctUINT32 cmd; +}gcsIMAGE_VIEW, *gcsIMAGE_VIEW_PTR; + +#if gcdENABLE_3D +/******************************************************************************\ +****************************** Object Declarations ***************************** +\******************************************************************************/ + +typedef struct _gcoSTREAM * gcoSTREAM; +typedef struct _gcoVERTEX * gcoVERTEX; +typedef struct _gcoTEXTURE * gcoTEXTURE; +typedef struct _gcoINDEX * gcoINDEX; +typedef struct _gcsVERTEX_ATTRIBUTES * gcsVERTEX_ATTRIBUTES_PTR; +typedef struct _gcoVERTEXARRAY * gcoVERTEXARRAY; +typedef struct _gcoBUFOBJ * gcoBUFOBJ; + +#define gcdATTRIBUTE_COUNT 32 +#define gcdVERTEXARRAY_POOL_CAPACITY 32 + +typedef enum _gcePROGRAM_STAGE +{ + gcvPROGRAM_STAGE_VERTEX = 0x0, + gcvPROGRAM_STAGE_TCS = 0x1, + gcvPROGRAM_STAGE_TES = 0x2, + gcvPROGRAM_STAGE_GEOMETRY = 0x3, + gcvPROGRAM_STAGE_FRAGMENT = 0x4, + gcvPROGRAM_STAGE_COMPUTE = 0x5, + gcvPROGRAM_STAGE_OPENCL = 0x6, + gcvPROGRAM_STAGE_LAST +} +gcePROGRAM_STAGE; + +typedef enum _gcePROGRAM_STAGE_BIT +{ + gcvPROGRAM_STAGE_VERTEX_BIT = 1 << gcvPROGRAM_STAGE_VERTEX, + gcvPROGRAM_STAGE_TCS_BIT = 1 << gcvPROGRAM_STAGE_TCS, + gcvPROGRAM_STAGE_TES_BIT = 1 << gcvPROGRAM_STAGE_TES, + gcvPROGRAM_STAGE_GEOMETRY_BIT = 1 << gcvPROGRAM_STAGE_GEOMETRY, + gcvPROGRAM_STAGE_FRAGMENT_BIT = 1 << gcvPROGRAM_STAGE_FRAGMENT, + gcvPROGRAM_STAGE_COMPUTE_BIT = 1 << gcvPROGRAM_STAGE_COMPUTE, + gcvPROGRAM_STAGE_OPENCL_BIT = 1 << gcvPROGRAM_STAGE_OPENCL, +} +gcePROGRAM_STAGE_BIT; + + +#define gcvPORGRAM_STAGE_GPIPE (gcvPROGRAM_STAGE_VERTEX_BIT | \ + gcvPROGRAM_STAGE_TCS_BIT | \ + gcvPROGRAM_STAGE_TES_BIT | \ + gcvPROGRAM_STAGE_GEOMETRY_BIT) + +/******************************************************************************\ +********************************* gcoHAL Object ********************************* +\******************************************************************************/ + +gceSTATUS +gcoHAL_QueryShaderCaps( + IN gcoHAL Hal, + OUT gctUINT * UnifiedUniforms, + OUT gctUINT * VertUniforms, + OUT gctUINT * FragUniforms, + OUT gctUINT * Varyings, + OUT gctUINT * ShaderCoreCount, + OUT gctUINT * ThreadCount, + OUT gctUINT * VertInstructionCount, + OUT gctUINT * FragInstructionCount + ); + +gceSTATUS +gcoHAL_QuerySamplerBase( + IN gcoHAL Hal, + OUT gctUINT32 * VertexCount, + OUT gctINT_PTR VertexBase, + OUT gctUINT32 * FragmentCount, + OUT gctINT_PTR FragmentBase + ); + +gceSTATUS +gcoHAL_QueryUniformBase( + IN gcoHAL Hal, + OUT gctUINT32 * VertexBase, + OUT gctUINT32 * FragmentBase + ); + +gceSTATUS +gcoHAL_QueryTextureCaps( + IN gcoHAL Hal, + OUT gctUINT * MaxWidth, + OUT gctUINT * MaxHeight, + OUT gctUINT * MaxDepth, + OUT gctBOOL * Cubic, + OUT gctBOOL * NonPowerOfTwo, + OUT gctUINT * VertexSamplers, + OUT gctUINT * PixelSamplers + ); + +gceSTATUS +gcoHAL_QueryTextureMaxAniso( + IN gcoHAL Hal, + OUT gctUINT * MaxAnisoValue + ); + +gceSTATUS +gcoHAL_QueryStreamCaps( + IN gcoHAL Hal, + OUT gctUINT32 * MaxAttributes, + OUT gctUINT32 * MaxStreamStride, + OUT gctUINT32 * NumberOfStreams, + OUT gctUINT32 * Alignment, + OUT gctUINT32 * MaxAttribOffset + ); + +/******************************************************************************\ +********************************* gcoSURF Object ******************************** +\******************************************************************************/ + +/*----------------------------------------------------------------------------*/ +/*--------------------------------- gcoSURF 3D --------------------------------*/ +typedef enum _gceBLIT_FLAG +{ + gcvBLIT_FLAG_SKIP_DEPTH_WRITE = 1 << 0, + gcvBLIT_FLAG_SKIP_STENCIL_WRITE = 1 << 1, +} gceBLIT_FLAG; + +typedef struct _gcsSURF_BLIT_ARGS +{ + gcoSURF srcSurface; + gctINT srcX, srcY, srcZ; + gctINT srcWidth, srcHeight, srcDepth; + gcoSURF dstSurface; + gctINT dstX, dstY, dstZ; + gctINT dstWidth, dstHeight, dstDepth; + gctBOOL xReverse; + gctBOOL yReverse; + gctBOOL scissorTest; + gcsRECT scissor; + gctUINT flags; + gctUINT srcNumSlice, dstNumSlice; +} +gcsSURF_BLIT_ARGS; + + + + +/* Clear flags. */ +typedef enum _gceCLEAR +{ + gcvCLEAR_COLOR = 0x1, + gcvCLEAR_DEPTH = 0x2, + gcvCLEAR_STENCIL = 0x4, + gcvCLEAR_HZ = 0x8, + gcvCLEAR_WITH_GPU_ONLY = 0x100, + gcvCLEAR_WITH_CPU_ONLY = 0x200, + gcvCLEAR_MULTI_SLICES = 0x400, +} +gceCLEAR; + +typedef struct _gcsSURF_CLEAR_ARGS +{ + /* + ** Color to fill the color portion of the framebuffer when clear + ** is called. + */ + struct { + gcuVALUE r; + gcuVALUE g; + gcuVALUE b; + gcuVALUE a; + /* Color has multiple value type so we must specify it. */ + gceVALUE_TYPE valueType; + } color; + + gcuVALUE depth; + gctUINT stencil; + + gctUINT8 stencilMask; /* stencil bit-wise mask */ + gctBOOL depthMask; /* Depth Write Mask */ + gctUINT8 colorMask; /* 4-bit channel Mask: ABGR:MSB->LSB */ + gcsRECT_PTR clearRect; /* NULL means full clear */ + gceCLEAR flags; /* clear flags */ + + gctUINT32 offset; /* Offset in surface to cube/array/3D, obsolete in v2 version */ + +} gcsSURF_CLEAR_ARGS, *gcsSURF_CLEAR_ARGS_PTR; + + +typedef struct _gscSURF_BLITDRAW_BLIT +{ + gcoSURF srcSurface; + gcoSURF dstSurface; + gcsRECT srcRect; + gcsRECT dstRect; + gceTEXTURE_FILTER filterMode; + gctBOOL xReverse; + gctBOOL yReverse; + gctBOOL scissorEnabled; + gcsRECT scissor; +}gscSURF_BLITDRAW_BLIT; + + +typedef enum _gceBLITDRAW_TYPE +{ + gcvBLITDRAW_CLEAR = 0, + gcvBLITDRAW_BLIT = 1, + + /* last number, not a real type */ + gcvBLITDRAW_NUM_TYPE + } +gceBLITDRAW_TYPE; + +typedef enum _gceSPLIT_DRAW_TYPE +{ + gcvSPLIT_DRAW_UNKNOWN = 0x0, + gcvSPLIT_DRAW_1, + gcvSPLIT_DRAW_2, + gcvSPLIT_DRAW_3, + gcvSPLIT_DRAW_XFB, + gcvSPLIT_DRAW_INDEX_FETCH, + gcvSPLIT_DRAW_TCS, + gcvSPLIT_DRAW_WIDE_LINE, + gcvSPLIT_DRAW_STIPPLE, + gcvSPLIT_DRAW_LAST +} +gceSPLIT_DRAW_TYPE; + +typedef gceSTATUS (* gctSPLIT_DRAW_FUNC_PTR)( + IN gctPOINTER gc, + IN gctPOINTER instantDraw, + IN gctPOINTER splitDrawInfo + ); + +typedef struct _gcsSPLIT_DRAW_INFO +{ + gceSPLIT_DRAW_TYPE splitDrawType; + gctSPLIT_DRAW_FUNC_PTR splitDrawFunc; + + union _gcsSPLIT_DRAW_UNION + { + /* This path will split many draw.*/ + struct __gcsSPLIT_DRAW_INFO_TCS + { + gctPOINTER indexPtr; + gctUINT indexPerPatch; + }info_tcs; + + /* This path split into two draw at most. + ** es11 path follow the old code, es30 path + ** add more info parameter to record + */ + struct __gcsSPLIT_DRAW_INFO_INDEX_FETCH + { + gctSIZE_T instanceCount; + gctSIZE_T splitCount; + gcePRIMITIVE splitPrimMode; + gctSIZE_T splitPrimCount; + }info_index_fetch; + }u; +} gcsSPLIT_DRAW_INFO, +*gcsSPLIT_DRAW_INFO_PTR; + +typedef struct _gscSURF_BLITDRAW_ARGS +{ + /* always the fist member */ + gceHAL_ARG_VERSION version; + + union _gcsSURF_BLITDRAW_ARGS_UNION + { + struct _gscSURF_BLITDRAW_ARG_v1 + { + /* Whether it's clear or blit operation, can be extended. */ + gceBLITDRAW_TYPE type; + + union _gscSURF_BLITDRAW_UNION + { + gscSURF_BLITDRAW_BLIT blit; + + struct _gscSURF_BLITDRAW_CLEAR + { + gcsSURF_CLEAR_ARGS clearArgs; + gcoSURF rtSurface; + gcoSURF dsSurface; + } clear; + } u; + } v1; + } uArgs; +} +gcsSURF_BLITDRAW_ARGS; + +typedef struct _gcsSURF_BLITBLT_ARGS +{ + gctCONST_POINTER buf; + gceSURF_FORMAT format; + gctUINT32 stride; + gcoSURF dstSurf; + gcsPOINT dstOrigin; + gcsPOINT rectSize; + gctUINT32 dstOffset; +} +gcsSURF_BLITBLT_ARGS; + + +/* CPU Blit with format (including linear <-> tile) conversion*/ +gceSTATUS +gcoSURF_BlitCPU( + gcsSURF_BLIT_ARGS* args + ); + +/* Copy a rectangular area with format conversion. */ +gceSTATUS +gcoSURF_CopyPixels( + IN gcsSURF_VIEW *SrcView, + IN gcsSURF_VIEW *DstView, + IN gcsSURF_RESOLVE_ARGS *Args + ); + +/* Clear surface function. */ +gceSTATUS +gcoSURF_Clear( + IN gcsSURF_VIEW *SurfView, + IN gcsSURF_CLEAR_ARGS_PTR ClearArgs + ); + +/* Preserve pixels from source. */ +gceSTATUS +gcoSURF_Preserve( + IN gcoSURF SrcSurf, + IN gcoSURF DstSurf, + IN gcsRECT_PTR MaskRect + ); + +/* TO BE REMOVED */ +gceSTATUS +depr_gcoSURF_Resolve( + IN gcoSURF SrcSurface, + IN gcoSURF DestSurface, + IN gctUINT32 DestAddress, + IN gctPOINTER DestBits, + IN gctINT DestStride, + IN gceSURF_TYPE DestType, + IN gceSURF_FORMAT DestFormat, + IN gctUINT DestWidth, + IN gctUINT DestHeight + ); + +gceSTATUS +depr_gcoSURF_ResolveRect( + IN gcoSURF SrcSurface, + IN gcoSURF DstSurface, + IN gctUINT32 DstAddress, + IN gctPOINTER DstBits, + IN gctINT DstStride, + IN gceSURF_TYPE DstType, + IN gceSURF_FORMAT DstFormat, + IN gctUINT DstWidth, + IN gctUINT DstHeight, + IN gcsPOINT_PTR SrcOrigin, + IN gcsPOINT_PTR gcoSURF, + IN gcsPOINT_PTR RectSize + ); + +/* Resample surface. */ +gceSTATUS +gcoSURF_Resample( + IN gcoSURF SrcSurf, + IN gcoSURF DstSurf + ); + +/* Resolve rectangular area of a surface. */ +gceSTATUS +gcoSURF_ResolveRect( + IN gcsSURF_VIEW *SrcView, + IN gcsSURF_VIEW *DstView, + IN gcsSURF_RESOLVE_ARGS *Args + ); + +gceSTATUS +gcoSURF_GetResolveAlignment( + IN gcoSURF Surface, + OUT gctUINT *originX, + OUT gctUINT *originY, + OUT gctUINT *sizeX, + OUT gctUINT *sizeY + ); + +gceSTATUS +gcoSURF_IsHWResolveable( + IN gcoSURF SrcSurf, + IN gcoSURF DstSurf, + IN gcsPOINT_PTR SrcOrigin, + IN gcsPOINT_PTR DstOrigin, + IN gcsPOINT_PTR RectSize + ); + +/* Set surface resolvability. */ +gceSTATUS +gcoSURF_SetResolvability( + IN gcoSURF Surface, + IN gctBOOL Resolvable + ); + +gceSTATUS +gcoSURF_IsRenderable( + IN gcoSURF Surface + ); + +gceSTATUS +gcoSURF_IsFormatRenderableAsRT( + IN gcoSURF Surface + ); + +gceSTATUS +gcoBUFOBJ_GetFence( + IN gcoBUFOBJ BufObj, + IN gceFENCE_TYPE Type + ); + +gceSTATUS +gcoBUFOBJ_WaitFence( + IN gcoBUFOBJ BufObj, + IN gceFENCE_TYPE Type + ); + +gceSTATUS +gcoBUFOBJ_IsFenceEnabled( + IN gcoBUFOBJ BufObj + ); + +gceSTATUS +gcoSURF_GetFence( + IN gcoSURF Surface, + IN gceFENCE_TYPE Type + ); + +gceSTATUS +gcoSURF_WaitFence( + IN gcoSURF Surface + ); + +gceSTATUS +gcoSTREAM_GetFence( + IN gcoSTREAM stream + ); + +gceSTATUS +gcoSTREAM_WaitFence( + IN gcoSTREAM stream + ); + +gceSTATUS +gcoINDEX_GetFence( + IN gcoINDEX Index + ); + +gceSTATUS +gcoINDEX_WaitFence( + IN gcoINDEX Index, + IN gceFENCE_TYPE Type + ); + +gceSTATUS +gcoSURF_DrawBlit( + gcsSURF_VIEW *SrcView, + gcsSURF_VIEW *DstView, + gscSURF_BLITDRAW_BLIT *Args + ); + + +/******************************************************************************\ +******************************** gcoINDEX Object ******************************* +\******************************************************************************/ + +/* Construct a new gcoINDEX object. */ +gceSTATUS +gcoINDEX_Construct( + IN gcoHAL Hal, + OUT gcoINDEX * Index + ); + +/* Destroy a gcoINDEX object. */ +gceSTATUS +gcoINDEX_Destroy( + IN gcoINDEX Index + ); + +/* Lock index in memory. */ +gceSTATUS +gcoINDEX_Lock( + IN gcoINDEX Index, + OUT gctUINT32 * Address, + OUT gctPOINTER * Memory + ); + +/* Unlock index that was previously locked with gcoINDEX_Lock. */ +gceSTATUS +gcoINDEX_Unlock( + IN gcoINDEX Index + ); + +/* Upload index data into the memory. */ +gceSTATUS +gcoINDEX_Load( + IN gcoINDEX Index, + IN gceINDEX_TYPE IndexType, + IN gctUINT32 IndexCount, + IN gctPOINTER IndexBuffer + ); + +/* Bind an index object to the hardware. */ +gceSTATUS +gcoINDEX_Bind( + IN gcoINDEX Index, + IN gceINDEX_TYPE Type + ); + +/* Bind an index object to the hardware. */ +gceSTATUS +gcoINDEX_BindOffset( + IN gcoINDEX Index, + IN gceINDEX_TYPE Type, + IN gctUINT32 Offset + ); + +/* Free existing index buffer. */ +gceSTATUS +gcoINDEX_Free( + IN gcoINDEX Index + ); + +/* Upload data into an index buffer. */ +gceSTATUS +gcoINDEX_Upload( + IN gcoINDEX Index, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Bytes + ); + +/* Upload data into an index buffer starting at an offset. */ +gceSTATUS +gcoINDEX_UploadOffset( + IN gcoINDEX Index, + IN gctSIZE_T Offset, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Bytes + ); + +/*Merge index2 to index1 from 0, index2 must subset of inex1*/ +gceSTATUS +gcoINDEX_Merge( + IN gcoINDEX Index1, + IN gcoINDEX Index2 + ); + +/*check if index buffer is enough for this draw*/ +gctBOOL +gcoINDEX_CheckRange( + IN gcoINDEX Index, + IN gceINDEX_TYPE Type, + IN gctINT Count, + IN gctUINT32 Indices + ); + +/* Query the index capabilities. */ +gceSTATUS +gcoINDEX_QueryCaps( + OUT gctBOOL * Index8, + OUT gctBOOL * Index16, + OUT gctBOOL * Index32, + OUT gctUINT * MaxIndex + ); + +/* Determine the index range in the current index buffer. */ +gceSTATUS +gcoINDEX_GetIndexRange( + IN gcoINDEX Index, + IN gceINDEX_TYPE Type, + IN gctUINT32 Offset, + IN gctUINT32 Count, + OUT gctUINT32 * MinimumIndex, + OUT gctUINT32 * MaximumIndex + ); + +/* Dynamic buffer management. */ +gceSTATUS +gcoINDEX_SetDynamic( + IN gcoINDEX Index, + IN gctSIZE_T Bytes, + IN gctUINT Buffers + ); + +gceSTATUS +gcoCLHardware_Construct(void); +/******************************************************************************\ +********************************** gco3D Object ********************************* +\******************************************************************************/ + +/* Blending targets. */ +typedef enum _gceBLEND_UNIT +{ + gcvBLEND_SOURCE, + gcvBLEND_TARGET, +} +gceBLEND_UNIT; + +/* Construct a new gco3D object. */ +gceSTATUS +gco3D_Construct( + IN gcoHAL Hal, + IN gctBOOL Robust, + OUT gco3D * Engine + ); + +/* Destroy an gco3D object. */ +gceSTATUS +gco3D_Destroy( + IN gco3D Engine + ); + +/* Set 3D API type. */ +gceSTATUS +gco3D_SetAPI( + IN gco3D Engine, + IN gceAPI ApiType + ); + +/* Get 3D API type. */ +gceSTATUS +gco3D_GetAPI( + IN gco3D Engine, + OUT gceAPI * ApiType + ); + +gceSTATUS +gco3D_SetTarget( + IN gco3D Engine, + IN gctUINT32 TargetIndex, + IN gcsSURF_VIEW *SurfView, + IN gctUINT32 LayerIndex + ); + +gceSTATUS +gco3D_UnsetTarget( + IN gco3D Engine, + IN gctUINT32 TargetIndex, + IN gcoSURF Surface + ); + +gceSTATUS +gco3D_SetPSOutputMapping( + IN gco3D Engine, + IN gctINT32 * psOutputMapping + ); + +gceSTATUS +gco3D_SetRenderLayered( + IN gco3D Engine, + IN gctBOOL Enable, + IN gctUINT MaxLayers + ); + +gceSTATUS +gco3D_SetShaderLayered( + IN gco3D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_IsProgramSwitched( + IN gco3D Engine + ); + +/* Set depth buffer. */ +gceSTATUS +gco3D_SetDepth( + IN gco3D Engine, + IN gcsSURF_VIEW *SurfView + ); + +/* Unset depth buffer. */ +gceSTATUS +gco3D_UnsetDepth( + IN gco3D Engine, + IN gcoSURF Surface + ); + +/* Set viewport. */ +gceSTATUS +gco3D_SetViewport( + IN gco3D Engine, + IN gctINT32 Left, + IN gctINT32 Top, + IN gctINT32 Right, + IN gctINT32 Bottom + ); + +/* Set scissors. */ +gceSTATUS +gco3D_SetScissors( + IN gco3D Engine, + IN gctINT32 Left, + IN gctINT32 Top, + IN gctINT32 Right, + IN gctINT32 Bottom + ); + +/* Set clear color. */ +gceSTATUS +gco3D_SetClearColor( + IN gco3D Engine, + IN gctUINT8 Red, + IN gctUINT8 Green, + IN gctUINT8 Blue, + IN gctUINT8 Alpha + ); + +/* Set fixed point clear color. */ +gceSTATUS +gco3D_SetClearColorX( + IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha + ); + +/* Set floating point clear color. */ +gceSTATUS +gco3D_SetClearColorF( + IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha + ); + +/* Set fixed point clear depth. */ +gceSTATUS +gco3D_SetClearDepthX( + IN gco3D Engine, + IN gctFIXED_POINT Depth + ); + +/* Set floating point clear depth. */ +gceSTATUS +gco3D_SetClearDepthF( + IN gco3D Engine, + IN gctFLOAT Depth + ); + +/* Set clear stencil. */ +gceSTATUS +gco3D_SetClearStencil( + IN gco3D Engine, + IN gctUINT32 Stencil + ); + +/* Set shading mode. */ +gceSTATUS +gco3D_SetShading( + IN gco3D Engine, + IN gceSHADING Shading + ); + +/* Set blending mode. */ +gceSTATUS +gco3D_EnableBlending( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set blending function. */ +gceSTATUS +gco3D_SetBlendFunction( + IN gco3D Engine, + IN gceBLEND_UNIT Unit, + IN gceBLEND_FUNCTION FunctionRGB, + IN gceBLEND_FUNCTION FunctionAlpha + ); + +/* Set blending mode. */ +gceSTATUS +gco3D_SetBlendMode( + IN gco3D Engine, + IN gceBLEND_MODE ModeRGB, + IN gceBLEND_MODE ModeAlpha + ); + +/* Set blending mode for separate rt control */ +gceSTATUS +gco3D_EnableBlendingIndexed( + IN gco3D Engine, + IN gctUINT Index, + IN gctBOOL Enable + ); + +/* Set blending function for separate rt control */ +gceSTATUS +gco3D_SetBlendFunctionIndexed( + IN gco3D Engine, + IN gctUINT Index, + IN gceBLEND_UNIT Unit, + IN gceBLEND_FUNCTION FunctionRGB, + IN gceBLEND_FUNCTION FunctionAlpha + ); + +/* Set blending mode for separate rt control*/ +gceSTATUS +gco3D_SetBlendModeIndexed( + IN gco3D Engine, + IN gctUINT Index, + IN gceBLEND_MODE ModeRGB, + IN gceBLEND_MODE ModeAlpha + ); + +/* Set blending color. */ +gceSTATUS +gco3D_SetBlendColor( + IN gco3D Engine, + IN gctUINT Red, + IN gctUINT Green, + IN gctUINT Blue, + IN gctUINT Alpha + ); + +/* Set fixed point blending color. */ +gceSTATUS +gco3D_SetBlendColorX( + IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha + ); + +/* Set floating point blending color. */ +gceSTATUS +gco3D_SetBlendColorF( + IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha + ); + +/* Set culling mode. */ +gceSTATUS +gco3D_SetCulling( + IN gco3D Engine, + IN gceCULL Mode + ); + +/* Enable point size */ +gceSTATUS +gco3D_SetPointSizeEnable( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set point sprite */ +gceSTATUS +gco3D_SetPointSprite( + IN gco3D Engine, + IN gctBOOL Enable + ); + + +/* Enable/Disable primitive-id. */ +gceSTATUS +gco3D_SetPrimitiveIdEnable( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set fill mode. */ +gceSTATUS +gco3D_SetFill( + IN gco3D Engine, + IN gceFILL Mode + ); + +/* Set depth compare mode. */ +gceSTATUS +gco3D_SetDepthCompare( + IN gco3D Engine, + IN gceCOMPARE Compare + ); + +/* Enable depth writing. */ +gceSTATUS +gco3D_EnableDepthWrite( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set depth mode. */ +gceSTATUS +gco3D_SetDepthMode( + IN gco3D Engine, + IN gceDEPTH_MODE Mode + ); + +/* Set depth range. */ +gceSTATUS +gco3D_SetDepthRangeX( + IN gco3D Engine, + IN gceDEPTH_MODE Mode, + IN gctFIXED_POINT Near, + IN gctFIXED_POINT Far + ); + +/* Set depth range. */ +gceSTATUS +gco3D_SetDepthRangeF( + IN gco3D Engine, + IN gceDEPTH_MODE Mode, + IN gctFLOAT Near, + IN gctFLOAT Far + ); + +/* Set last pixel enable */ +gceSTATUS +gco3D_SetLastPixelEnable( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set depth Bias and Scale */ +gceSTATUS +gco3D_SetDepthScaleBiasX( + IN gco3D Engine, + IN gctFIXED_POINT DepthScale, + IN gctFIXED_POINT DepthBias + ); + +gceSTATUS +gco3D_SetDepthScaleBiasF( + IN gco3D Engine, + IN gctFLOAT DepthScale, + IN gctFLOAT DepthBias + ); + +/* Set depth near and far clipping plane. */ +gceSTATUS +gco3D_SetDepthPlaneF( + IN gco3D Engine, + IN gctFLOAT Near, + IN gctFLOAT Far + ); + +/* Enable or disable dithering. */ +gceSTATUS +gco3D_EnableDither( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set color write enable bits. */ +gceSTATUS +gco3D_SetColorWrite( + IN gco3D Engine, + IN gctUINT8 Enable + ); + +/* Set color write enable bits for separate rt control */ +gceSTATUS +gco3D_SetColorWriteIndexed( + IN gco3D Engine, + IN gctUINT Index, + IN gctUINT8 Enable + ); + +/* Enable or disable early depth. */ +gceSTATUS +gco3D_SetEarlyDepth( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Deprecated: Enable or disable all early depth operations. */ +gceSTATUS +gco3D_SetAllEarlyDepthModes( + IN gco3D Engine, + IN gctBOOL Disable + ); + + +gceSTATUS +gco3D_SetEarlyDepthFromAPP( + IN gco3D Engine, + IN gctBOOL EarlyDepthFromAPP + ); + +gceSTATUS +gco3D_SetRADepthWrite( + IN gco3D Engine, + IN gctBOOL Disable, + IN gctBOOL psReadZ, + IN gctBOOL psReadW + ); + +gceSTATUS +gco3D_SetPatchVertices( + IN gco3D Engine, + IN gctINT PatchVertices + ); + + +/* Switch dynamic early mode */ +gceSTATUS +gco3D_SwitchDynamicEarlyDepthMode( + IN gco3D Engine + ); + +/* Set dynamic early mode */ +gceSTATUS +gco3D_DisableDynamicEarlyDepthMode( + IN gco3D Engine, + IN gctBOOL Disable + ); + +/* Enable or disable depth-only mode. */ +gceSTATUS +gco3D_SetDepthOnly( + IN gco3D Engine, + IN gctBOOL Enable + ); + +typedef struct _gcsSTENCIL_INFO * gcsSTENCIL_INFO_PTR; +typedef struct _gcsSTENCIL_INFO +{ + gceSTENCIL_MODE mode; + + gctUINT8 maskFront; + gctUINT8 maskBack; + gctUINT8 writeMaskFront; + gctUINT8 writeMaskBack; + + gctUINT8 referenceFront; + + gceCOMPARE compareFront; + gceSTENCIL_OPERATION passFront; + gceSTENCIL_OPERATION failFront; + gceSTENCIL_OPERATION depthFailFront; + + gctUINT8 referenceBack; + gceCOMPARE compareBack; + gceSTENCIL_OPERATION passBack; + gceSTENCIL_OPERATION failBack; + gceSTENCIL_OPERATION depthFailBack; +} +gcsSTENCIL_INFO; + +/* Set stencil mode. */ +gceSTATUS +gco3D_SetStencilMode( + IN gco3D Engine, + IN gceSTENCIL_MODE Mode + ); + +/* Set stencil mask. */ +gceSTATUS +gco3D_SetStencilMask( + IN gco3D Engine, + IN gctUINT8 Mask + ); + +/* Set stencil back mask. */ +gceSTATUS +gco3D_SetStencilMaskBack( + IN gco3D Engine, + IN gctUINT8 Mask + ); + +/* Set stencil write mask. */ +gceSTATUS +gco3D_SetStencilWriteMask( + IN gco3D Engine, + IN gctUINT8 Mask + ); + +/* Set stencil back write mask. */ +gceSTATUS +gco3D_SetStencilWriteMaskBack( + IN gco3D Engine, + IN gctUINT8 Mask + ); + +/* Set stencil reference. */ +gceSTATUS +gco3D_SetStencilReference( + IN gco3D Engine, + IN gctUINT8 Reference, + IN gctBOOL Front + ); + +/* Set stencil compare. */ +gceSTATUS +gco3D_SetStencilCompare( + IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceCOMPARE Compare + ); + +/* Set stencil operation on pass. */ +gceSTATUS +gco3D_SetStencilPass( + IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceSTENCIL_OPERATION Operation + ); + +/* Set stencil operation on fail. */ +gceSTATUS +gco3D_SetStencilFail( + IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceSTENCIL_OPERATION Operation + ); + +/* Set stencil operation on depth fail. */ +gceSTATUS +gco3D_SetStencilDepthFail( + IN gco3D Engine, + IN gceSTENCIL_WHERE Where, + IN gceSTENCIL_OPERATION Operation + ); + +/* Set all stencil states in one blow. */ +gceSTATUS +gco3D_SetStencilAll( + IN gco3D Engine, + IN gcsSTENCIL_INFO_PTR Info + ); + +typedef struct _gcsALPHA_INFO * gcsALPHA_INFO_PTR; +typedef struct _gcsALPHA_INFO +{ + /* Alpha test states. */ + gctBOOL test; + gceCOMPARE compare; + gctUINT8 reference; + gctFLOAT floatReference; + + /* Alpha blending states. */ + gctBOOL blend[gcdMAX_DRAW_BUFFERS]; + + gceBLEND_FUNCTION srcFuncColor[gcdMAX_DRAW_BUFFERS]; + gceBLEND_FUNCTION srcFuncAlpha[gcdMAX_DRAW_BUFFERS]; + gceBLEND_FUNCTION trgFuncColor[gcdMAX_DRAW_BUFFERS]; + gceBLEND_FUNCTION trgFuncAlpha[gcdMAX_DRAW_BUFFERS]; + + gceBLEND_MODE modeColor[gcdMAX_DRAW_BUFFERS]; + gceBLEND_MODE modeAlpha[gcdMAX_DRAW_BUFFERS]; + + gctUINT32 color; + + gctBOOL anyBlendEnabled; +} +gcsALPHA_INFO; + + +/* Enable or disable alpha test. */ +gceSTATUS +gco3D_SetAlphaTest( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set alpha test compare. */ +gceSTATUS +gco3D_SetAlphaCompare( + IN gco3D Engine, + IN gceCOMPARE Compare + ); + +/* Set alpha test reference in unsigned integer. */ +gceSTATUS +gco3D_SetAlphaReference( + IN gco3D Engine, + IN gctUINT8 Reference, + IN gctFLOAT FloatReference + ); + +/* Set alpha test reference in fixed point. */ +gceSTATUS +gco3D_SetAlphaReferenceX( + IN gco3D Engine, + IN gctFIXED_POINT Reference + ); + +/* Set alpha test reference in floating point. */ +gceSTATUS +gco3D_SetAlphaReferenceF( + IN gco3D Engine, + IN gctFLOAT Reference + ); + +#if gcdALPHA_KILL_IN_SHADER +gceSTATUS +gco3D_SetAlphaKill( + IN gco3D Engine, + IN gctBOOL AlphaKill, + IN gctBOOL ColorKill + ); +#endif + +/* Enable/Disable anti-alias line. */ +gceSTATUS +gco3D_SetAntiAliasLine( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set texture slot for anti-alias line. */ +gceSTATUS +gco3D_SetAALineTexSlot( + IN gco3D Engine, + IN gctUINT TexSlot + ); + +/* Set anti-alias line width scale. */ +gceSTATUS +gco3D_SetAALineWidth( + IN gco3D Engine, + IN gctFLOAT Width + ); + +/* Draw a number of primitives. */ +gceSTATUS +gco3D_DrawPrimitives( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctSIZE_T StartVertex, + IN gctSIZE_T PrimitiveCount + ); + +gceSTATUS +gco3D_DrawIndirectPrimitives( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctBOOL DrawIndex, + IN gctINT BaseOffset, + IN gcoBUFOBJ BufObj + ); + +gceSTATUS +gco3D_MultiDrawIndirectPrimitives( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctBOOL DrawIndex, + IN gctINT BaseOffset, + IN gctINT DrawCount, + IN gctINT Stride, + IN gcoBUFOBJ BufObj + ); + +gceSTATUS +gco3D_DrawInstancedPrimitives( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctBOOL DrawIndex, + IN gctINT StartVertex, + IN gctSIZE_T StartIndex, + IN gctSIZE_T PrimitiveCount, + IN gctSIZE_T VertexCount, + IN gctSIZE_T InstanceCount + ); + +gceSTATUS +gco3D_DrawNullPrimitives( + IN gco3D Engine + ); + +gceSTATUS +gco3D_DrawPrimitivesCount( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctINT* StartVertex, + IN gctSIZE_T* VertexCount, + IN gctSIZE_T PrimitiveCount + ); + + +/* Draw a number of primitives using offsets. */ +gceSTATUS +gco3D_DrawPrimitivesOffset( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctINT32 StartOffset, + IN gctSIZE_T PrimitiveCount + ); + +/* Draw a number of indexed primitives. */ +gceSTATUS +gco3D_DrawIndexedPrimitives( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctSIZE_T BaseVertex, + IN gctSIZE_T StartIndex, + IN gctSIZE_T PrimitiveCount + ); + +/* Draw a number of indexed primitives using offsets. */ +gceSTATUS +gco3D_DrawIndexedPrimitivesOffset( + IN gco3D Engine, + IN gcePRIMITIVE Type, + IN gctINT32 BaseOffset, + IN gctINT32 StartOffset, + IN gctSIZE_T PrimitiveCount + ); + +/* Draw a element from pattern */ +gceSTATUS +gco3D_DrawPattern( + IN gco3D Engine, + IN gcsFAST_FLUSH_PTR FastFlushInfo + ); + +/* Enable or disable anti-aliasing. */ +gceSTATUS +gco3D_SetAntiAlias( + IN gco3D Engine, + IN gctBOOL Enable + ); + +/* Set msaa samples */ +gceSTATUS +gco3D_SetSamples( + IN gco3D Engine, + IN gctUINT32 Samples + ); + + +/* Write data into the command buffer. */ +gceSTATUS +gco3D_WriteBuffer( + IN gco3D Engine, + IN gctCONST_POINTER Data, + IN gctSIZE_T Bytes, + IN gctBOOL Aligned + ); + +/* Send sempahore and stall until sempahore is signalled. */ +gceSTATUS +gco3D_Semaphore( + IN gco3D Engine, + IN gceWHERE From, + IN gceWHERE To, + IN gceHOW How); + + +/* Explicitly flush shader L1 cache */ +gceSTATUS +gco3D_FlushSHL1Cache( + IN gco3D Engine + ); + +/* Set the subpixels center. */ +gceSTATUS +gco3D_SetCentroids( + IN gco3D Engine, + IN gctUINT32 Index, + IN gctPOINTER Centroids + ); + +/* query msaa sample coordinates */ +gceSTATUS +gco3D_GetSampleCoords( + IN gco3D Engine, + IN gctUINT32 SampleIndex, + IN gctBOOL yInverted, + OUT gctFLOAT_PTR Coords + ); + +gceSTATUS +gco3D_SetLogicOp( + IN gco3D Engine, + IN gctUINT8 Rop + ); + +typedef enum _gceXfbCmd +{ + gcvXFBCMD_BEGIN = 0, + gcvXFBCMD_PAUSE = 1, + gcvXFBCMD_RESUME = 2, + gcvXFBCMD_END = 3, + gcvXFBCMD_PAUSE_INCOMMIT = 4, + gcvXFBCMD_RESUME_INCOMMIT = 5, + gcvXFBCMD_INVALID = 6, +} +gceXfbCmd; + +typedef enum _gceXfbStatus +{ + gcvXFB_Disabled = 0, + gcvXFB_Paused, + gcvXFB_Enabled, +} +gceXfbStatus; + +typedef enum _gceQueryStatus +{ + gcvQUERY_Disabled = 0, + gcvQUERY_Paused = 1, + gcvQUERY_Enabled = 2, +} +gceQueryStatus; + +typedef enum _gceQueryCmd +{ + gcvQUERYCMD_BEGIN = 0, + gcvQUERYCMD_PAUSE = 1, + gcvQUERYCMD_RESUME = 2, + gcvQUERYCMD_END = 3, + gcvQUERYCMD_INVALID = 4, +} +gceQueryCmd; + +typedef enum _gceQueryType +{ + gcvQUERY_OCCLUSION = 0, + gcvQUERY_XFB_WRITTEN = 1, + gcvQUERY_PRIM_GENERATED = 2, + gcvQUERY_MAX_NUM = 3, +} +gceQueryType; + +gceSTATUS +gco3D_SetQuery( + IN gco3D Engine, + IN gctUINT32 QueryHeader, + IN gceQueryType Type, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_GetQuery( + IN gco3D Engine, + IN gceQueryType Type, + IN gcsSURF_NODE_PTR Node, + IN gctUINT32 Size, + IN gctPOINTER Locked, + OUT gctINT32 * Index + ); + +gceSTATUS +gco3D_SetXfbHeader( + IN gco3D Engine, + IN gctUINT32 Physical + ); + +gceSTATUS +gco3D_SetXfbBuffer( + IN gco3D Engine, + IN gctUINT32 Index, + IN gctUINT32 BufferAddr, + IN gctUINT32 BufferStride, + IN gctUINT32 BufferSize + ); + +gceSTATUS +gco3D_SetXfbCmd( + IN gco3D Engine, + IN gceXfbCmd Cmd + ); + +gceSTATUS +gco3D_SetRasterDiscard( + IN gco3D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_SetColorOutCount( + IN gco3D Engine, + IN gctUINT32 ColorOutCount + ); + +gceSTATUS +gco3D_SetColorCacheMode( + IN gco3D Engine + ); + +gceSTATUS +gco3D_Set3DEngine( + IN gco3D Engine + ); + +gceSTATUS +gco3D_UnSet3DEngine( + IN gco3D Engine + ); + +gceSTATUS +gco3D_Get3DEngine( + OUT gco3D * Engine + ); + +gceSTATUS +gco3D_QueryReset( + IN gco3D Engine, + OUT gctBOOL_PTR Innocent + ); + +/* OCL thread walker information. */ +typedef struct _gcsTHREAD_WALKER_INFO * gcsTHREAD_WALKER_INFO_PTR; +typedef struct _gcsTHREAD_WALKER_INFO +{ + gctUINT32 dimensions; + gctUINT32 traverseOrder; + gctUINT32 enableSwathX; + gctUINT32 enableSwathY; + gctUINT32 enableSwathZ; + gctUINT32 swathSizeX; + gctUINT32 swathSizeY; + gctUINT32 swathSizeZ; + gctUINT32 valueOrder; + + gctUINT32 globalSizeX; + gctUINT32 globalOffsetX; + gctUINT32 globalSizeY; + gctUINT32 globalOffsetY; + gctUINT32 globalSizeZ; + gctUINT32 globalOffsetZ; + + gctUINT32 globalScaleX; + gctUINT32 globalScaleY; + gctUINT32 globalScaleZ; + + gctUINT32 workGroupSizeX; + gctUINT32 workGroupCountX; + gctUINT32 workGroupSizeY; + gctUINT32 workGroupCountY; + gctUINT32 workGroupSizeZ; + gctUINT32 workGroupCountZ; + + gctUINT32 threadAllocation; + gctBOOL barrierUsed; + + gctBOOL indirect; + gctUINT32 groupNumberUniformIdx; + gctUINT32 baseAddress; +} +gcsTHREAD_WALKER_INFO; + +#if gcdENABLE_3D && gcdUSE_VX +/* VX thread walker parameters. */ +typedef struct _gcsVX_THREAD_WALKER_PARAMETERS * gcsVX_THREAD_WALKER_PARAMETERS_PTR; + +typedef struct _gcsVX_THREAD_WALKER_PARAMETERS +{ + gctUINT32 valueOrder; + gctUINT32 workDim; + + gctUINT32 workGroupSizeX; + gctUINT32 workGroupCountX; + + gctUINT32 workGroupSizeY; + gctUINT32 workGroupCountY; + + gctUINT32 globalOffsetX; + gctUINT32 globalScaleX; + + gctUINT32 globalOffsetY; + gctUINT32 globalScaleY; + +#if gcdVX_OPTIMIZER > 1 + gctBOOL tileMode; +#endif +} +gcsVX_THREAD_WALKER_PARAMETERS; + +typedef struct _gcsVX_IMAGE_INFO * gcsVX_IMAGE_INFO_PTR; + +typedef struct _gcsVX_IMAGE_INFO +{ + gctUINT32 format; + gctUINT32 rect[4]; + gctUINT32 width; + gctUINT32 height; + + /*arraySize, sliceSize is for imageArray / image3D */ + gctUINT32 arraySize; + gctUINT32 sliceSize; + + gctUINT32 bpp; + gctUINT32 planes; + gctUINT32 componentCount; + gctBOOL isFloat; + + gctUINT32 uPixels; + gctUINT32 vPixels; + gceSURF_FORMAT internalFormat; + gctUINT32 border; + + /*vx_imagepatch_addressing_t == (gctUINT32 * 8) */ + gctUINT32 imagepatch[8 * 3]; + void *base_addr[3]; + + gctUINT32 stride[3]; + + gctPOINTER logicals[3]; + gctUINT32 physicals[3]; + gctUINT32 bytes; + + gcsSURF_NODE_PTR nodes[3]; + + gctBOOL isVXC; +#if gcdVX_OPTIMIZER + gctUINT32 uniformData[3][4]; +#endif +} +gcsVX_IMAGE_INFO; +typedef struct _gcsVX_DISTRIBUTION_INFO * gcsVX_DISTRIBUTION_INFO_PTR; + +typedef struct _gcsVX_DISTRIBUTION_INFO +{ + + gctUINT32 logical; + gctUINT32 physical; + gctUINT32 bytes; + + gcsSURF_NODE_PTR node; +} +gcsVX_DISTRIBUTION_INFO; +#endif + +/* Start OCL thread walker. */ +gceSTATUS +gco3D_InvokeThreadWalker( + IN gco3D Engine, + IN gcsTHREAD_WALKER_INFO_PTR Info + ); + +gceSTATUS +gco3D_GetClosestRenderFormat( + IN gco3D Engine, + IN gceSURF_FORMAT InFormat, + OUT gceSURF_FORMAT* OutFormat + ); + +/* Set w clip and w plane limit value. */ +gceSTATUS +gco3D_SetWClipEnable( + IN gco3D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_GetWClipEnable( + IN gco3D Engine, + OUT gctBOOL * Enable + ); + +gceSTATUS +gco3D_SetWPlaneLimitF( + IN gco3D Engine, + IN gctFLOAT Value + ); + +gceSTATUS +gco3D_SetWPlaneLimitX( + IN gco3D Engine, + IN gctFIXED_POINT Value + ); + +gceSTATUS +gco3D_SetWPlaneLimit( + IN gco3D Engine, + IN gctFLOAT Value + ); + +gceSTATUS +gco3D_PrimitiveRestart( + IN gco3D Engine, + IN gctBOOL PrimitiveRestart + ); + +gceSTATUS +gco3D_LoadProgram( + IN gco3D Engine, + IN gcePROGRAM_STAGE_BIT StageBits, + IN gctPOINTER ProgramState + ); + +gceSTATUS +gco3D_EnableAlphaToCoverage( + IN gco3D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_EnableSampleCoverage( + IN gco3D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_SetSampleCoverageValue( + IN gco3D Engine, + IN gctFLOAT CoverageValue, + IN gctBOOL Invert + ); + +gceSTATUS +gco3D_EnableSampleMask( + IN gco3D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_SetSampleMask( + IN gco3D Engine, + IN gctUINT32 SampleMask + ); + +gceSTATUS +gco3D_EnableSampleShading( + IN gco3D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco3D_SetMinSampleShadingValue( + IN gco3D Engine, + IN gctFLOAT MinSampleShadingValue + ); + +gceSTATUS +gco3D_SetSampleShading( + IN gco3D Engine, + IN gctBOOL Enable, + IN gctBOOL IsSampleIn, + IN gctFLOAT SampleShadingValue + ); + +gceSTATUS +gco3D_EnableSampleMaskOut( + IN gco3D Engine, + IN gctBOOL Enable, + IN gctINT SampleMaskLoc + ); + +/*----------------------------------------------------------------------------*/ +/*-------------------------- gco3D Fragment Processor ------------------------*/ + +/* Set the fragment processor configuration. */ +gceSTATUS +gco3D_SetFragmentConfiguration( + IN gco3D Engine, + IN gctBOOL ColorFromStream, + IN gctBOOL EnableFog, + IN gctBOOL EnableSmoothPoint, + IN gctUINT32 ClipPlanes + ); + +/* Enable/disable texture stage operation. */ +gceSTATUS +gco3D_EnableTextureStage( + IN gco3D Engine, + IN gctINT Stage, + IN gctBOOL Enable + ); + +/* Program the channel enable masks for the color texture function. */ +gceSTATUS +gco3D_SetTextureColorMask( + IN gco3D Engine, + IN gctINT Stage, + IN gctBOOL ColorEnabled, + IN gctBOOL AlphaEnabled + ); + +/* Program the channel enable masks for the alpha texture function. */ +gceSTATUS +gco3D_SetTextureAlphaMask( + IN gco3D Engine, + IN gctINT Stage, + IN gctBOOL ColorEnabled, + IN gctBOOL AlphaEnabled + ); + +/* Program the constant fragment color. */ +gceSTATUS +gco3D_SetFragmentColorX( + IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha + ); + +gceSTATUS +gco3D_SetFragmentColorF( + IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha + ); + +/* Program the constant fog color. */ +gceSTATUS +gco3D_SetFogColorX( + IN gco3D Engine, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha + ); + +gceSTATUS +gco3D_SetFogColorF( + IN gco3D Engine, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha + ); + +/* Program the constant texture color. */ +gceSTATUS +gco3D_SetTetxureColorX( + IN gco3D Engine, + IN gctINT Stage, + IN gctFIXED_POINT Red, + IN gctFIXED_POINT Green, + IN gctFIXED_POINT Blue, + IN gctFIXED_POINT Alpha + ); + +gceSTATUS +gco3D_SetTetxureColorF( + IN gco3D Engine, + IN gctINT Stage, + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha + ); + +/* Configure color texture function. */ +gceSTATUS +gco3D_SetColorTextureFunction( + IN gco3D Engine, + IN gctINT Stage, + IN gceTEXTURE_FUNCTION Function, + IN gceTEXTURE_SOURCE Source0, + IN gceTEXTURE_CHANNEL Channel0, + IN gceTEXTURE_SOURCE Source1, + IN gceTEXTURE_CHANNEL Channel1, + IN gceTEXTURE_SOURCE Source2, + IN gceTEXTURE_CHANNEL Channel2, + IN gctINT Scale + ); + +/* Configure alpha texture function. */ +gceSTATUS +gco3D_SetAlphaTextureFunction( + IN gco3D Engine, + IN gctINT Stage, + IN gceTEXTURE_FUNCTION Function, + IN gceTEXTURE_SOURCE Source0, + IN gceTEXTURE_CHANNEL Channel0, + IN gceTEXTURE_SOURCE Source1, + IN gceTEXTURE_CHANNEL Channel1, + IN gceTEXTURE_SOURCE Source2, + IN gceTEXTURE_CHANNEL Channel2, + IN gctINT Scale + ); + +/******************************************************************************\ +******************************* gcoTEXTURE Object ******************************* +\******************************************************************************/ + +/* Cube faces. */ +typedef enum _gceTEXTURE_FACE +{ + gcvFACE_NONE = 0, + gcvFACE_POSITIVE_X, + gcvFACE_NEGATIVE_X, + gcvFACE_POSITIVE_Y, + gcvFACE_NEGATIVE_Y, + gcvFACE_POSITIVE_Z, + gcvFACE_NEGATIVE_Z, +} +gceTEXTURE_FACE; + +typedef struct _gcsTEXTURE +{ + /* Addressing modes. */ + gceTEXTURE_ADDRESSING s; + gceTEXTURE_ADDRESSING t; + gceTEXTURE_ADDRESSING r; + + gceTEXTURE_SWIZZLE swizzle[gcvTEXTURE_COMPONENT_NUM]; + + /* Border color. */ + gctUINT8 border[gcvTEXTURE_COMPONENT_NUM]; + + /* Filters. */ + gceTEXTURE_FILTER minFilter; + gceTEXTURE_FILTER magFilter; + gceTEXTURE_FILTER mipFilter; + gctUINT anisoFilter; + + /* Level of detail. */ + gctFLOAT lodBias; + gctFLOAT lodMin; + gctFLOAT lodMax; + + /* base/max level */ + gctINT32 baseLevel; + gctINT32 maxLevel; + + /* depth texture comparison */ + gceTEXTURE_COMPARE_MODE compareMode; + gceCOMPARE compareFunc; + + gceTEXTURE_DS_MODE dsMode; + + /* sRGB decode */ + gceTEXTURE_SRGBDECODE sRGB; + + gcuVALUE borderColor[4]; +} +gcsTEXTURE, * gcsTEXTURE_PTR; + +typedef struct _gcsTEXTURE_BINDTEXTS_ARGS +{ + /* must be the first member */ + gceHAL_ARG_VERSION version; + +} +gcsTEXTURE_BINDTEXTS_ARGS; + +/* Construct a new gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_Construct( + IN gcoHAL Hal, + OUT gcoTEXTURE * Texture + ); + +/* Construct a new gcoTEXTURE object with type information. */ +gceSTATUS +gcoTEXTURE_ConstructEx( + IN gcoHAL Hal, + IN gceTEXTURE_TYPE Type, + OUT gcoTEXTURE * Texture + ); + + +/* Construct a new sized gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_ConstructSized( + IN gcoHAL Hal, + IN gceSURF_FORMAT Format, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Depth, + IN gctUINT Faces, + IN gctUINT MipMapCount, + IN gcePOOL Pool, + OUT gcoTEXTURE * Texture + ); + +/* Destroy an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_Destroy( + IN gcoTEXTURE Texture + ); + +/* Upload data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_Upload( + IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_COLOR_SPACE SrcColorSpace + ); + +/* Upload data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadSub( + IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T X, + IN gctSIZE_T Y, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_COLOR_SPACE SrcColorSpace, + IN gctUINT32 PhysicalAddress + ); + + +/* Upload YUV data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadYUV( + IN gcoTEXTURE Texture, + IN gceTEXTURE_FACE Face, + IN gctUINT Width, + IN gctUINT Height, + IN gctUINT Slice, + IN gctPOINTER Memory[3], + IN gctINT Stride[3], + IN gceSURF_FORMAT Format + ); + +/* Upload compressed data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadCompressed( + IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Bytes + ); + +/* Upload compressed sub data to an gcoTEXTURE object. */ +gceSTATUS +gcoTEXTURE_UploadCompressedSub( + IN gcoTEXTURE Texture, + IN gctINT MipMap, + IN gceTEXTURE_FACE Face, + IN gctSIZE_T XOffset, + IN gctSIZE_T YOffset, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctUINT Slice, + IN gctCONST_POINTER Memory, + IN gctSIZE_T Size + ); + +/* Get gcoSURF object for a mipmap level. */ +gceSTATUS +gcoTEXTURE_GetMipMap( + IN gcoTEXTURE Texture, + IN gctUINT MipMap, + OUT gcoSURF * Surface + ); + +/* Get gcoSURF object for a mipmap level and face offset. */ +gceSTATUS +gcoTEXTURE_GetMipMapFace( + IN gcoTEXTURE Texture, + IN gctUINT MipMap, + IN gceTEXTURE_FACE Face, + OUT gcoSURF * Surface, + OUT gctSIZE_T_PTR Offset + ); + +gceSTATUS +gcoTEXTURE_GetMipMapSlice( + IN gcoTEXTURE Texture, + IN gctUINT MipMap, + IN gctUINT Slice, + OUT gcoSURF * Surface, + OUT gctSIZE_T_PTR Offset + ); + +gceSTATUS +gcoTEXTURE_AddMipMap( + IN gcoTEXTURE Texture, + IN gctINT Level, + IN gctINT InternalFormat, + IN gceSURF_FORMAT Format, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctSIZE_T Depth, + IN gctUINT Faces, + IN gcePOOL Pool, + OUT gcoSURF * Surface + ); + +gceSTATUS +gcoTEXTURE_AddMipMapEx( + IN gcoTEXTURE Texture, + IN gctINT Level, + IN gctINT InternalFormat, + IN gceSURF_FORMAT Format, + IN gctSIZE_T Width, + IN gctSIZE_T Height, + IN gctSIZE_T Depth, + IN gctUINT Faces, + IN gcePOOL Pool, + IN gctUINT32 Samples, + IN gctBOOL Protected, + OUT gcoSURF * Surface + ); + +gceSTATUS +gcoTEXTURE_AddMipMapFromClient( + IN gcoTEXTURE Texture, + IN gctINT Level, + IN gcoSURF Surface + ); + +gceSTATUS +gcoTEXTURE_AddMipMapFromSurface( + IN gcoTEXTURE Texture, + IN gctINT Level, + IN gcoSURF Surface + ); + +gceSTATUS +gcoTEXTURE_LockMipMap( + IN gcoTEXTURE Texture, + IN gctUINT MipMap, + OPTIONAL OUT gctUINT32 * Address, + OPTIONAL OUT gctPOINTER * Memory + ); + +gceSTATUS +gcoTEXTURE_SetEndianHint( + IN gcoTEXTURE Texture, + IN gceENDIAN_HINT EndianHint + ); + +gceSTATUS +gcoTEXTURE_Disable( + IN gcoHAL Hal, + IN gctINT Sampler, + IN gctBOOL DefaultInteger + ); + +gceSTATUS +gcoTEXTURE_Flush( + IN gcoTEXTURE Texture + ); + +gceSTATUS +gcoTEXTURE_FlushVS( + IN gcoTEXTURE Texture + ); + +gceSTATUS +gcoTEXTURE_QueryCaps( + IN gcoHAL Hal, + OUT gctUINT * MaxWidth, + OUT gctUINT * MaxHeight, + OUT gctUINT * MaxDepth, + OUT gctBOOL * Cubic, + OUT gctBOOL * NonPowerOfTwo, + OUT gctUINT * VertexSamplers, + OUT gctUINT * PixelSamplers + ); + +gceSTATUS +gcoTEXTURE_GetClosestFormat( + IN gcoHAL Hal, + IN gceSURF_FORMAT InFormat, + OUT gceSURF_FORMAT* OutFormat + ); + +gceSTATUS +gcoTEXTURE_GetClosestFormatEx( + IN gcoHAL Hal, + IN gceSURF_FORMAT InFormat, + IN gceTEXTURE_TYPE TextureType, + OUT gceSURF_FORMAT* OutFormat + ); + +gceSTATUS +gcoTEXTURE_GetFormatInfo( + IN gcoTEXTURE Texture, + IN gctINT preferLevel, + OUT gcsSURF_FORMAT_INFO_PTR * TxFormatInfo + ); + +gceSTATUS +gcoTEXTURE_GetTextureFormatName( + IN gcsSURF_FORMAT_INFO_PTR TxFormatInfo, + OUT gctCONST_STRING * TxName + ); + +gceSTATUS +gcoTEXTURE_RenderIntoMipMap( + IN gcoTEXTURE Texture, + IN gctINT Level + ); + +gceSTATUS +gcoTEXTURE_RenderIntoMipMap2( + IN gcoTEXTURE Texture, + IN gctINT Level, + IN gctBOOL Sync + ); + +gceSTATUS +gcoTEXTURE_IsRenderable( + IN gcoTEXTURE Texture, + IN gctUINT Level + ); + +gceSTATUS +gcoTEXTURE_IsComplete( + IN gcoTEXTURE Texture, + IN gcsTEXTURE_PTR Info, + IN gctINT BaseLevel, + IN gctINT MaxLevel + ); + +gceSTATUS +gcoTEXTURE_CheckTexLevel0Attrib( + IN gcoTEXTURE Texture, + IN gctINT MaxLevel, + IN gctINT usedLevel + ); + +gceSTATUS +gcoTEXTURE_BindTexture( + IN gcoTEXTURE Texture, + IN gctINT Target, + IN gctINT Sampler, + IN gcsTEXTURE_PTR Info + ); + +gceSTATUS +gcoTEXTURE_BindTextureEx( + IN gcoTEXTURE Texture, + IN gctINT Target, + IN gctINT Sampler, + IN gcsTEXTURE_PTR Info, + IN gctINT textureLayer + ); + +gceSTATUS +gcoTEXTURE_BindTextureDesc( + IN gcoTEXTURE Texture, + IN gctINT Sampler, + IN gcsTEXTURE_PTR Info, + IN gctINT TextureLayer + ); + +gceSTATUS +gcoTEXTURE_SetDescDirty( + IN gcoTEXTURE Texture + ); + +gceSTATUS +gcoTEXTURE_InitParams( + IN gcoHAL Hal, + IN gcsTEXTURE_PTR TexParams + ); + +gceSTATUS +gcoTEXTURE_SetDepthTextureFlag( + IN gcoTEXTURE Texture, + IN gctBOOL unsized + ); + +gceSTATUS +gcoTEXTURE_BindTextureTS( + IN gcsTEXTURE_BINDTEXTS_ARGS * args + ); + +gceSTATUS +gcoTEXTURE_GenerateMipMap( + IN gcoTEXTURE Texture, + IN gctINT BaseLevel, + IN gctINT MaxLevel + ); + +/******************************************************************************\ +******************************* gcoSTREAM Object ****************************** +\******************************************************************************/ + +typedef enum _gceVERTEX_FORMAT +{ + gcvVERTEX_BYTE, + gcvVERTEX_UNSIGNED_BYTE, + gcvVERTEX_SHORT, + gcvVERTEX_UNSIGNED_SHORT, + gcvVERTEX_INT, + gcvVERTEX_UNSIGNED_INT, + gcvVERTEX_FIXED, + gcvVERTEX_HALF, + gcvVERTEX_FLOAT, + gcvVERTEX_UNSIGNED_INT_10_10_10_2, + gcvVERTEX_INT_10_10_10_2, + gcvVERTEX_UNSIGNED_INT_2_10_10_10_REV, + gcvVERTEX_INT_2_10_10_10_REV, + /* integer format */ + gcvVERTEX_INT8, + gcvVERTEX_INT16, + gcvVERTEX_INT32, +} +gceVERTEX_FORMAT; + +/* What the SW converting scheme to create temp attrib */ +typedef enum _gceATTRIB_SCHEME +{ + gcvATTRIB_SCHEME_KEEP = 0, + gcvATTRIB_SCHEME_2_10_10_10_REV_TO_FLOAT, + gcvATTRIB_SCHEME_BYTE_TO_IVEC4, + gcvATTRIB_SCHEME_SHORT_TO_IVEC4, + gcvATTRIB_SCHEME_INT_TO_IVEC4, + gcvATTRIB_SCHEME_UBYTE_TO_UVEC4, + gcvATTRIB_SCHEME_USHORT_TO_UVEC4, + gcvATTRIB_SCHEME_UINT_TO_UVEC4, +} gceATTRIB_SCHEME; + +gceSTATUS +gcoSTREAM_Construct( + IN gcoHAL Hal, + OUT gcoSTREAM * Stream + ); + +gceSTATUS +gcoSTREAM_Destroy( + IN gcoSTREAM Stream + ); + +gceSTATUS +gcoSTREAM_Upload( + IN gcoSTREAM Stream, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + IN gctBOOL Dynamic + ); + +gceSTATUS +gcoSTREAM_ReAllocBufNode( + IN gcoSTREAM Stream + ); + +gceSTATUS +gcoSTREAM_SetStride( + IN gcoSTREAM Stream, + IN gctUINT32 Stride + ); + +gceSTATUS +gcoSTREAM_Node( + IN gcoSTREAM Stream, + OUT gcsSURF_NODE_PTR * Node + ); + +gceSTATUS +gcoSTREAM_Lock( + IN gcoSTREAM Stream, + OUT gctPOINTER * Logical, + OUT gctUINT32 * Physical + ); + +gceSTATUS +gcoSTREAM_Unlock( + IN gcoSTREAM Stream + ); + +gceSTATUS +gcoSTREAM_Reserve( + IN gcoSTREAM Stream, + IN gctSIZE_T Bytes + ); + +gceSTATUS +gcoSTREAM_Flush( + IN gcoSTREAM Stream + ); + +typedef struct _gcsSTREAM_INFO +{ + gctUINT index; + gceVERTEX_FORMAT format; + gctBOOL normalized; + gctUINT components; + gctSIZE_T size; + gctCONST_POINTER data; + gctUINT stride; +} +gcsSTREAM_INFO, * gcsSTREAM_INFO_PTR; + +gceSTATUS +gcoSTREAM_CPUCacheOperation( + IN gcoSTREAM Stream, + IN gceCACHEOPERATION Operation + ); + +gceSTATUS +gcoSTREAM_CPUCacheOperation_Range( + IN gcoSTREAM Stream, + IN gctSIZE_T Offset, + IN gctSIZE_T Length, + IN gceCACHEOPERATION Operation + ); + +/******************************************************************************\ +******************************** gcoVERTEX Object ****************************** +\******************************************************************************/ + +typedef struct _gcsVERTEX_ATTRIBUTES +{ + gceVERTEX_FORMAT format; + gctBOOL normalized; + gctUINT32 components; + gctSIZE_T size; + gctUINT32 stream; + gctUINT32 offset; + gctUINT32 stride; +} +gcsVERTEX_ATTRIBUTES; + +gceSTATUS +gcoVERTEX_Construct( + IN gcoHAL Hal, + OUT gcoVERTEX * Vertex + ); + +gceSTATUS +gcoVERTEX_Destroy( + IN gcoVERTEX Vertex + ); + +gceSTATUS +gcoVERTEX_Reset( + IN gcoVERTEX Vertex + ); + +gceSTATUS +gcoVERTEX_EnableAttribute( + IN gcoVERTEX Vertex, + IN gctUINT32 Index, + IN gceVERTEX_FORMAT Format, + IN gctBOOL Normalized, + IN gctUINT32 Components, + IN gcoSTREAM Stream, + IN gctUINT32 Offset, + IN gctUINT32 Stride + ); + +gceSTATUS +gcoVERTEX_DisableAttribute( + IN gcoVERTEX Vertex, + IN gctUINT32 Index + ); + +gceSTATUS +gcoVERTEX_Bind( + IN gcoVERTEX Vertex + ); + +/******************************************************************************* +***** gcoVERTEXARRAY Object ***************************************************/ + +typedef struct _gcsATTRIBUTE +{ + /* Enabled. */ + gctBOOL enable; + + /* Number of components. */ + gctINT size; + + /* Attribute format. */ + gceVERTEX_FORMAT format; + + /* Flag whether the attribute is normalized or not. */ + gctBOOL normalized; + + /* Stride of the component. */ + gctSIZE_T stride; + + /* Divisor of the attribute */ + gctUINT divisor; + + /* Pointer to the attribute data. */ + gctCONST_POINTER pointer; + + /* Stream object owning the attribute data. */ + gcoBUFOBJ stream; + + /* Generic values for attribute. */ + gctFLOAT genericValue[4]; + + /* Generic size for attribute. */ + gctINT genericSize; + + /* Vertex shader linkage. */ + gctUINT linkage; + +#if gcdUSE_WCLIP_PATCH + /* Does it hold positions? */ + gctBOOL isPosition; +#endif + + /* Index to vertex array */ + gctINT arrayIdx; + + gceATTRIB_SCHEME convertScheme; + + /* Pointer to the temporary buffer to be freed */ + gcoBUFOBJ tempStream; + + /* Pointer to the temporary memory to be freed */ + gctCONST_POINTER tempMemory; +} +gcsATTRIBUTE, +* gcsATTRIBUTE_PTR; + +typedef struct _gcsVERTEXARRAY +{ + /* Enabled. */ + gctBOOL enable; + + /* Number of components. */ + gctINT size; + + /* Attribute format. */ + gceVERTEX_FORMAT format; + + /* Flag whether the attribute is normalized or not. */ + gctBOOL normalized; + + /* Stride of the component. */ + gctUINT stride; + + /* Divisor of the attribute */ + gctUINT divisor; + + /* Pointer to the attribute data. */ + gctCONST_POINTER pointer; + + /* Stream object owning the attribute data. */ + gcoSTREAM stream; + + /* Generic values for attribute. */ + gctFLOAT genericValue[4]; + + /* Generic size for attribute. */ + gctINT genericSize; + + /* Vertex shader linkage. */ + gctUINT linkage; + + gctBOOL isPosition; +} +gcsVERTEXARRAY, +* gcsVERTEXARRAY_PTR; + +gceSTATUS +gcoVERTEXARRAY_Construct( + IN gcoHAL Hal, + OUT gcoVERTEXARRAY * Vertex + ); + +gceSTATUS +gcoVERTEXARRAY_Destroy( + IN gcoVERTEXARRAY Vertex + ); + +/* If don't consider isolation, STREAM_INFO / INDEX_INFO could be +** include in the struct of instantDraw in chip level.*/ +typedef struct _gcsVERTEXARRAY_STREAM_INFO +{ + gctUINT attribMask; + gctSIZE_T first; + gctSIZE_T count; + gcePRIMITIVE primMode; + gctSIZE_T primCount; + gctINT vertexInstIndex; + gctBOOL instanced; + gctSIZE_T instanceCount; + + union _gcsVERTEXARRAY_STREAM_INFO_UNION + { + struct _gcsVERTEXARRAY_STREAM_ES11_INFO + { + gcsVERTEXARRAY_PTR attributes; + }es11; + + struct _gcsVERTEXARRAY_STREAM_ES30_INFO + { + gcsATTRIBUTE_PTR attributes; + }es30; + }u; +}gcsVERTEXARRAY_STREAM_INFO, +*gcsVERTEXARRAY_STREAM_INFO_PTR; + +typedef const struct _gcsVERTEXARRAY_STREAM_INFO* gcsVERTEXARRAY_STREAM_INFO_CONST_PTR; + +typedef struct _gcsVERTEXARRAY_INDEX_INFO +{ + gctSIZE_T count; + gceINDEX_TYPE indexType; + gctPOINTER indexMemory; + + union _gcsVERTEXARRAY_INDEX_INFO_UNION + { + struct _gcsVERTEXARRAY_INDEX_ES11_INFO + { + gcoINDEX indexBuffer; + }es11; + + struct _gcsVERTEXARRAY_INDEX_ES30_INFO + { + gcoBUFOBJ indexBuffer; + }es30; + }u; +}gcsVERTEXARRAY_INDEX_INFO, +*gcsVERTEXARRAY_INDEX_INFO_PTR; + +typedef const struct _gcsVERTEXARRAY_INDEX_INFO* gcsVERTEXARRAY_INDEX_INFO_CONST_PTR; + +gceSTATUS +gcoVERTEXARRAY_IndexBind( + IN gcoVERTEXARRAY Vertex, + IN gcsVERTEXARRAY_INDEX_INFO_PTR IndexInfo + ); + +gceSTATUS +gcoVERTEXARRAY_StreamBind( + IN gcoVERTEXARRAY Vertex, +#if gcdUSE_WCLIP_PATCH + IN OUT gctFLOAT * WLimitRms, + IN OUT gctBOOL * WLimitRmsDirty, +#endif + IN gcsVERTEXARRAY_STREAM_INFO_CONST_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_CONST_PTR IndexInfo + ); + +gceSTATUS +gcoVERTEXARRAY_IndexBind_Ex( + IN gcoVERTEXARRAY Vertex, + IN OUT gcsVERTEXARRAY_STREAM_INFO_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_PTR IndexInfo + ); + +gceSTATUS +gcoVERTEXARRAY_StreamBind_Ex( + IN gcoVERTEXARRAY Vertex, +#if gcdUSE_WCLIP_PATCH + IN OUT gctFLOAT * WLimitRms, + IN OUT gctBOOL * WLimitRmsDirty, +#endif + IN OUT gcsVERTEXARRAY_STREAM_INFO_PTR StreamInfo, + IN gcsVERTEXARRAY_INDEX_INFO_PTR IndexInfo + ); + +gceSTATUS +gcoVERTEXARRAY_Bind( + IN gcoVERTEXARRAY Vertex, + IN gctUINT32 EnableBits, + IN gcsVERTEXARRAY_PTR VertexArray, + IN gctUINT First, + IN gctSIZE_T * Count, + IN gceINDEX_TYPE IndexType, + IN gcoINDEX IndexObject, + IN gctPOINTER IndexMemory, + IN OUT gcePRIMITIVE * PrimitiveType, +#if gcdUSE_WCLIP_PATCH + IN OUT gctUINT * PrimitiveCount, + IN OUT gctFLOAT * wLimitRms, + IN OUT gctBOOL * wLimitDirty +#else + IN OUT gctUINT * PrimitiveCount +#endif + ); + +/* Frame Database */ +gceSTATUS +gcoHAL_AddFrameDB( + void + ); + +gceSTATUS +gcoHAL_DumpFrameDB( + gctCONST_STRING Filename OPTIONAL + ); + +gceSTATUS +gcoHAL_InitGPUProfile( + void + ); + +gceSTATUS +gcoHAL_DumpGPUProfile( + void + ); + +/****************************************************************************** +**********************gcoBUFOBJ object***************************************** +*******************************************************************************/ +typedef enum _gceBUFOBJ_TYPE +{ + gcvBUFOBJ_TYPE_ARRAY_BUFFER = 1, + gcvBUFOBJ_TYPE_ELEMENT_ARRAY_BUFFER = 2, + gcvBUFOBJ_TYPE_GENERIC_BUFFER = 100 + +} gceBUFOBJ_TYPE; + +typedef enum _gceBUFOBJ_USAGE +{ + gcvBUFOBJ_USAGE_STREAM_DRAW = 1, + gcvBUFOBJ_USAGE_STREAM_READ, + gcvBUFOBJ_USAGE_STREAM_COPY, + gcvBUFOBJ_USAGE_STATIC_DRAW, + gcvBUFOBJ_USAGE_STATIC_READ, + gcvBUFOBJ_USAGE_STATIC_COPY, + gcvBUFOBJ_USAGE_DYNAMIC_DRAW, + gcvBUFOBJ_USAGE_DYNAMIC_READ, + gcvBUFOBJ_USAGE_DYNAMIC_COPY, + + /* special patch for optimaize performance, + ** no fence and duplicate stream to ensure data correct + */ + gcvBUFOBJ_USAGE_DISABLE_FENCE_DYNAMIC_STREAM = 256 +} gceBUFOBJ_USAGE; + +/* Construct a new gcoBUFOBJ object. */ +gceSTATUS +gcoBUFOBJ_Construct( + IN gcoHAL Hal, + IN gceBUFOBJ_TYPE Type, + OUT gcoBUFOBJ * BufObj + ); + +/* Destroy a gcoBUFOBJ object. */ +gceSTATUS +gcoBUFOBJ_Destroy( + IN gcoBUFOBJ BufObj + ); + +/* Lock pbo in memory. */ +gceSTATUS +gcoBUFOBJ_Lock( + IN gcoBUFOBJ BufObj, + OUT gctUINT32 * Address, + OUT gctPOINTER * Memory + ); + +/* Lock pbo in memory. */ +gceSTATUS +gcoBUFOBJ_FastLock( + IN gcoBUFOBJ BufObj, + OUT gctUINT32 * Address, + OUT gctPOINTER * Memory + ); + +/* Unlock pbo that was previously locked with gcoBUFOBJ_Lock. */ +gceSTATUS +gcoBUFOBJ_Unlock( + IN gcoBUFOBJ BufObj + ); + +/* Free existing pbo buffer. */ +gceSTATUS +gcoBUFOBJ_Free( + IN gcoBUFOBJ BufObj + ); + +/* Upload data into an pbo buffer. */ +gceSTATUS +gcoBUFOBJ_Upload( + IN gcoBUFOBJ BufObj, + IN gctCONST_POINTER Buffer, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + IN gceBUFOBJ_USAGE Usage + ); + +/* Bind an index object to the hardware. */ +gceSTATUS +gcoBUFOBJ_IndexBind ( + IN gcoBUFOBJ Index, + IN gceINDEX_TYPE Type, + IN gctUINT32 Offset, + IN gctSIZE_T Count + ); + +/* Find min and max index for the index buffer */ +gceSTATUS +gcoBUFOBJ_IndexGetRange( + IN gcoBUFOBJ Index, + IN gceINDEX_TYPE Type, + IN gctUINT32 Offset, + IN gctUINT32 Count, + OUT gctUINT32 * MinimumIndex, + OUT gctUINT32 * MaximumIndex + ); + +/* Sets a buffer object as dirty */ +gceSTATUS +gcoBUFOBJ_SetDirty( + IN gcoBUFOBJ BufObj + ); + +/* Creates a new buffer if needed */ +gceSTATUS +gcoBUFOBJ_AlignIndexBufferWhenNeeded( + IN gcoBUFOBJ BufObj, + IN gctSIZE_T Offset, + OUT gcoBUFOBJ * AlignedBufObj + ); + +/* Cache operations on whole range */ +gceSTATUS +gcoBUFOBJ_CPUCacheOperation( + IN gcoBUFOBJ BufObj, + IN gceCACHEOPERATION Operation + ); + +/* Cache operations on a specified range */ +gceSTATUS +gcoBUFOBJ_CPUCacheOperation_Range( + IN gcoBUFOBJ BufObj, + IN gctSIZE_T Offset, + IN gctSIZE_T Length, + IN gceCACHEOPERATION Operation + ); + +/* Return size of the bufobj */ +gceSTATUS +gcoBUFOBJ_GetSize( + IN gcoBUFOBJ BufObj, + OUT gctSIZE_T_PTR Size + ); + +/* Return memory node of the bufobj */ +gceSTATUS +gcoBUFOBJ_GetNode( + IN gcoBUFOBJ BufObj, + OUT gcsSURF_NODE_PTR * Node + ); + +gceSTATUS +gcoBUFOBJ_ReAllocBufNode( + IN gcoBUFOBJ BufObj + ); + +/* Handle GPU cache operations */ +gceSTATUS +gcoBUFOBJ_GPUCacheOperation( + gcoBUFOBJ BufObj + ); + +/* Dump buffer. */ +void +gcoBUFOBJ_Dump( + IN gcoBUFOBJ BufObj + ); + +#endif /* gcdENABLE_3D */ + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_engine_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h new file mode 100644 index 000000000000..9bbbe05f0101 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h @@ -0,0 +1,1320 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_engine_vg_h_ +#define __gc_hal_engine_vg_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "gc_hal_types.h" + +/******************************************************************************\ +******************************** VG Enumerations ******************************* +\******************************************************************************/ + +/** +** @ingroup gcoVG +** +** @brief Tiling mode for painting and imagig. +** +** This enumeration defines the tiling modes supported by the HAL. This is +** in fact a one-to-one mapping of the OpenVG 1.1 tile modes. +*/ +typedef enum _gceTILE_MODE +{ + gcvTILE_FILL, + gcvTILE_PAD, + gcvTILE_REPEAT, + gcvTILE_REFLECT +} +gceTILE_MODE; + +/******************************************************************************/ +/** @ingroup gcoVG +** +** @brief The different paint modes. +** +** This enumeration lists the available paint modes. +*/ +typedef enum _gcePAINT_TYPE +{ + /** Solid color. */ + gcvPAINT_MODE_SOLID, + + /** Linear gradient. */ + gcvPAINT_MODE_LINEAR, + + /** Radial gradient. */ + gcvPAINT_MODE_RADIAL, + + /** Pattern. */ + gcvPAINT_MODE_PATTERN, + + /** Mode count. */ + gcvPAINT_MODE_COUNT +} +gcePAINT_TYPE; + +/** +** @ingroup gcoVG +** +** @brief Types of path data supported by HAL. +** +** This enumeration defines the types of path data supported by the HAL. +** This is in fact a one-to-one mapping of the OpenVG 1.1 path types. +*/ +typedef enum _gcePATHTYPE +{ + gcePATHTYPE_UNKNOWN = -1, + gcePATHTYPE_INT8, + gcePATHTYPE_INT16, + gcePATHTYPE_INT32, + gcePATHTYPE_FLOAT +} +gcePATHTYPE; + +/** +** @ingroup gcoVG +** +** @brief Supported path segment commands. +** +** This enumeration defines the path segment commands supported by the HAL. +*/ +typedef enum _gceVGCMD +{ + gcvVGCMD_END, /* 0: 0x00 */ + gcvVGCMD_CLOSE, /* 1: 0x01 */ + gcvVGCMD_MOVE, /* 2: 0x02 */ + gcvVGCMD_MOVE_REL, /* 3: 0x03 */ + gcvVGCMD_LINE, /* 4: 0x04 */ + gcvVGCMD_LINE_REL, /* 5: 0x05 */ + gcvVGCMD_QUAD, /* 6: 0x06 */ + gcvVGCMD_QUAD_REL, /* 7: 0x07 */ + gcvVGCMD_CUBIC, /* 8: 0x08 */ + gcvVGCMD_CUBIC_REL, /* 9: 0x09 */ + gcvVGCMD_BREAK, /* 10: 0x0A */ + gcvVGCMD_HLINE, /* 11: ******* R E S E R V E D *******/ + gcvVGCMD_HLINE_REL, /* 12: ******* R E S E R V E D *******/ + gcvVGCMD_VLINE, /* 13: ******* R E S E R V E D *******/ + gcvVGCMD_VLINE_REL, /* 14: ******* R E S E R V E D *******/ + gcvVGCMD_SQUAD, /* 15: ******* R E S E R V E D *******/ + gcvVGCMD_SQUAD_REL, /* 16: ******* R E S E R V E D *******/ + gcvVGCMD_SCUBIC, /* 17: ******* R E S E R V E D *******/ + gcvVGCMD_SCUBIC_REL, /* 18: ******* R E S E R V E D *******/ + gcvVGCMD_SCCWARC, /* 19: ******* R E S E R V E D *******/ + gcvVGCMD_SCCWARC_REL, /* 20: ******* R E S E R V E D *******/ + gcvVGCMD_SCWARC, /* 21: ******* R E S E R V E D *******/ + gcvVGCMD_SCWARC_REL, /* 22: ******* R E S E R V E D *******/ + gcvVGCMD_LCCWARC, /* 23: ******* R E S E R V E D *******/ + gcvVGCMD_LCCWARC_REL, /* 24: ******* R E S E R V E D *******/ + gcvVGCMD_LCWARC, /* 25: ******* R E S E R V E D *******/ + gcvVGCMD_LCWARC_REL, /* 26: ******* R E S E R V E D *******/ + + /* The width of the command recognized by the hardware on bits. */ + gcvVGCMD_WIDTH = 5, + + /* Hardware command mask. */ + gcvVGCMD_MASK = (1 << gcvVGCMD_WIDTH) - 1, + + /* Command modifiers. */ + gcvVGCMD_H_MOD = 1 << gcvVGCMD_WIDTH, /* = 32 */ + gcvVGCMD_V_MOD = 2 << gcvVGCMD_WIDTH, /* = 64 */ + gcvVGCMD_S_MOD = 3 << gcvVGCMD_WIDTH, /* = 96 */ + gcvVGCMD_ARC_MOD = 4 << gcvVGCMD_WIDTH, /* = 128 */ + + /* Emulated LINE commands. */ + gcvVGCMD_HLINE_EMUL = gcvVGCMD_H_MOD | gcvVGCMD_LINE, /* = 36 */ + gcvVGCMD_HLINE_EMUL_REL = gcvVGCMD_H_MOD | gcvVGCMD_LINE_REL, /* = 37 */ + gcvVGCMD_VLINE_EMUL = gcvVGCMD_V_MOD | gcvVGCMD_LINE, /* = 68 */ + gcvVGCMD_VLINE_EMUL_REL = gcvVGCMD_V_MOD | gcvVGCMD_LINE_REL, /* = 69 */ + + /* Emulated SMOOTH commands. */ + gcvVGCMD_SQUAD_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD, /* = 102 */ + gcvVGCMD_SQUAD_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD_REL, /* = 103 */ + gcvVGCMD_SCUBIC_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC, /* = 104 */ + gcvVGCMD_SCUBIC_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC_REL, /* = 105 */ + + /* Emulation ARC commands. */ + gcvVGCMD_ARC_LINE = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE, /* = 132 */ + gcvVGCMD_ARC_LINE_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE_REL, /* = 133 */ + gcvVGCMD_ARC_QUAD = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD, /* = 134 */ + gcvVGCMD_ARC_QUAD_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD_REL /* = 135 */ +} +gceVGCMD; +typedef enum _gceVGCMD * gceVGCMD_PTR; + +/** +** @ingroup gcoVG +** +** @brief Blending modes supported by the HAL. +** +** This enumeration defines the blending modes supported by the HAL. This is +** in fact a one-to-one mapping of the OpenVG 1.1 blending modes. +*/ +typedef enum _gceVG_BLEND +{ + gcvVG_BLEND_SRC, + gcvVG_BLEND_SRC_OVER, + gcvVG_BLEND_DST_OVER, + gcvVG_BLEND_SRC_IN, + gcvVG_BLEND_DST_IN, + gcvVG_BLEND_MULTIPLY, + gcvVG_BLEND_SCREEN, + gcvVG_BLEND_DARKEN, + gcvVG_BLEND_LIGHTEN, + gcvVG_BLEND_ADDITIVE, + gcvVG_BLEND_SUBTRACT, + gcvVG_BLEND_FILTER +} +gceVG_BLEND; + +/** +** @ingroup gcoVG +** +** @brief Image modes supported by the HAL. +** +** This enumeration defines the image modes supported by the HAL. This is +** in fact a one-to-one mapping of the OpenVG 1.1 image modes with the addition +** of NO IMAGE. +*/ +typedef enum _gceVG_IMAGE +{ + gcvVG_IMAGE_NONE, + gcvVG_IMAGE_NORMAL, + gcvVG_IMAGE_MULTIPLY, + gcvVG_IMAGE_STENCIL, + gcvVG_IMAGE_FILTER +} +gceVG_IMAGE; + +/** +** @ingroup gcoVG +** +** @brief Filter mode patterns and imaging. +** +** This enumeration defines the filter modes supported by the HAL. +*/ +typedef enum _gceIMAGE_FILTER +{ + gcvFILTER_POINT, + gcvFILTER_LINEAR, + gcvFILTER_BI_LINEAR +} +gceIMAGE_FILTER; + +/** +** @ingroup gcoVG +** +** @brief Primitive modes supported by the HAL. +** +** This enumeration defines the primitive modes supported by the HAL. +*/ +typedef enum _gceVG_PRIMITIVE +{ + gcvVG_SCANLINE, + gcvVG_RECTANGLE, + gcvVG_TESSELLATED, + gcvVG_TESSELLATED_TILED +} +gceVG_PRIMITIVE; + +/** +** @ingroup gcoVG +** +** @brief Rendering quality modes supported by the HAL. +** +** This enumeration defines the rendering quality modes supported by the HAL. +*/ +typedef enum _gceRENDER_QUALITY +{ + gcvVG_NONANTIALIASED, + gcvVG_2X2_MSAA, + gcvVG_2X4_MSAA, + gcvVG_4X4_MSAA +} +gceRENDER_QUALITY; + +/** +** @ingroup gcoVG +** +** @brief Fill rules supported by the HAL. +** +** This enumeration defines the fill rules supported by the HAL. +*/ +typedef enum _gceFILL_RULE +{ + gcvVG_EVEN_ODD, + gcvVG_NON_ZERO +} +gceFILL_RULE; + +/** +** @ingroup gcoVG +** +** @brief Cap styles supported by the HAL. +** +** This enumeration defines the cap styles supported by the HAL. +*/ +typedef enum _gceCAP_STYLE +{ + gcvCAP_BUTT, + gcvCAP_ROUND, + gcvCAP_SQUARE +} +gceCAP_STYLE; + +/** +** @ingroup gcoVG +** +** @brief Join styles supported by the HAL. +** +** This enumeration defines the join styles supported by the HAL. +*/ +typedef enum _gceJOIN_STYLE +{ + gcvJOIN_MITER, + gcvJOIN_ROUND, + gcvJOIN_BEVEL +} +gceJOIN_STYLE; + +/** +** @ingroup gcoVG +** +** @brief Channel mask values. +** +** This enumeration defines the values for channel mask used in image +** filtering. +*/ + +/* Base values for channel mask definitions. */ +#define gcvCHANNEL_X (0) +#define gcvCHANNEL_R (1 << 0) +#define gcvCHANNEL_G (1 << 1) +#define gcvCHANNEL_B (1 << 2) +#define gcvCHANNEL_A (1 << 3) + +typedef enum _gceCHANNEL +{ + gcvCHANNEL_XXXX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_XXXA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_XXBX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_XXBA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A), + + gcvCHANNEL_XGXX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_XGXA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_XGBX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_XGBA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A), + + gcvCHANNEL_RXXX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_RXXA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_RXBX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_RXBA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A), + + gcvCHANNEL_RGXX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X), + gcvCHANNEL_RGXA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A), + gcvCHANNEL_RGBX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X), + gcvCHANNEL_RGBA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A), +} +gceCHANNEL; + +/******************************************************************************\ +******************************** VG Structures ******************************* +\******************************************************************************/ + +/** +** @ingroup gcoVG +** +** @brief Definition of the color ramp used by the gradient paints. +** +** The gcsCOLOR_RAMP structure defines the layout of one single color inside +** a color ramp which is used by gradient paints. +*/ +typedef struct _gcsCOLOR_RAMP +{ + /** Value for the color stop. */ + gctFLOAT stop; + + /** Red color channel value for the color stop. */ + gctFLOAT red; + + /** Green color channel value for the color stop. */ + gctFLOAT green; + + /** Blue color channel value for the color stop. */ + gctFLOAT blue; + + /** Alpha color channel value for the color stop. */ + gctFLOAT alpha; +} +gcsCOLOR_RAMP, * gcsCOLOR_RAMP_PTR; + +/** +** @ingroup gcoVG +** +** @brief Definition of the color ramp used by the gradient paints in fixed form. +** +** The gcsCOLOR_RAMP structure defines the layout of one single color inside +** a color ramp which is used by gradient paints. +*/ +typedef struct _gcsFIXED_COLOR_RAMP +{ + /** Value for the color stop. */ + gctFIXED_POINT stop; + + /** Red color channel value for the color stop. */ + gctFIXED_POINT red; + + /** Green color channel value for the color stop. */ + gctFIXED_POINT green; + + /** Blue color channel value for the color stop. */ + gctFIXED_POINT blue; + + /** Alpha color channel value for the color stop. */ + gctFIXED_POINT alpha; +} +gcsFIXED_COLOR_RAMP, * gcsFIXED_COLOR_RAMP_PTR; + + +/** +** @ingroup gcoVG +** +** @brief Rectangle structure used by the gcoVG object. +** +** This structure defines the layout of a rectangle. Make sure width and +** height are larger than 0. +*/ +typedef struct _gcsVG_RECT * gcsVG_RECT_PTR; +typedef struct _gcsVG_RECT +{ + /** Left location of the rectangle. */ + gctINT x; + + /** Top location of the rectangle. */ + gctINT y; + + /** Width of the rectangle. */ + gctINT width; + + /** Height of the rectangle. */ + gctINT height; +} +gcsVG_RECT; + +/** +** @ingroup gcoVG +** +** @brief Path command buffer attribute structure. +** +** The gcsPATH_BUFFER_INFO structure contains the specifics about +** the layout of the path data command buffer. +*/ +typedef struct _gcsPATH_BUFFER_INFO * gcsPATH_BUFFER_INFO_PTR; +typedef struct _gcsPATH_BUFFER_INFO +{ + gctUINT reservedForHead; + gctUINT reservedForTail; +} +gcsPATH_BUFFER_INFO; + +/** +** @ingroup gcoVG +** +** @brief Definition of the path data container structure. +** +** The gcsPATH structure defines the layout of the path data container. +*/ +typedef struct _gcsPATH_DATA * gcsPATH_DATA_PTR; +typedef struct _gcsPATH_DATA +{ + /* Data container in command buffer format. */ + gcsCMDBUFFER data; + + /* Path data type. */ + gcePATHTYPE dataType; +} +gcsPATH_DATA; + + +/******************************************************************************\ +********************************* gcoHAL Object ******************************** +\******************************************************************************/ + +/* Query path data storage attributes. */ +gceSTATUS +gcoHAL_QueryPathStorage( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + OUT gcsPATH_BUFFER_INFO_PTR Information + ); + +/* Associate a completion signal with the command buffer. */ +gceSTATUS +gcoHAL_AssociateCompletion( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcsPATH_DATA_PTR PathData + ); + +/* Release the current command buffer completion signal. */ +gceSTATUS +gcoHAL_DeassociateCompletion( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcsPATH_DATA_PTR PathData + ); + +/* Verify whether the command buffer is still in use. */ +gceSTATUS +gcoHAL_CheckCompletion( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcsPATH_DATA_PTR PathData + ); + +/* Wait until the command buffer is no longer in use. */ +gceSTATUS +gcoHAL_WaitCompletion( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcsPATH_DATA_PTR PathData + ); + +/* Flush the pixel cache. */ +gceSTATUS +gcoHAL_Flush( +#if gcdGC355_PROFILER + IN gcoHAL Hal, + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth +#else + IN gcoHAL Hal +#endif + ); + +/* Split a harwdare address into pool and offset. */ +gceSTATUS +gcoHAL_SplitAddress( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctUINT32 Address, + OUT gcePOOL * Pool, + OUT gctUINT32 * Offset + ); + +/* Combine pool and offset into a harwdare address. */ +gceSTATUS +gcoHAL_CombineAddress( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcePOOL Pool, + IN gctUINT32 Offset, + OUT gctUINT32 * Address + ); + +/* Schedule to free linear video memory allocated. */ +gceSTATUS +gcoHAL_ScheduleVideoMemory( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctUINT32 Node + ); + +/* Free linear video memory allocated with gcoHAL_AllocateLinearVideoMemory. */ +gceSTATUS +gcoHAL_FreeVideoMemory( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctUINT32 Node, + IN gctBOOL asynchroneous + ); + +/* Query command buffer attributes. */ +gceSTATUS +gcoHAL_QueryCommandBuffer( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + OUT gcsCOMMAND_BUFFER_INFO_PTR Information + ); +/* Allocate and lock linear video memory. */ +gceSTATUS +gcoHAL_AllocateLinearVideoMemory( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctUINT Size, + IN gctUINT Alignment, + IN gcePOOL Pool, + OUT gctUINT32 * Node, + OUT gctUINT32 * Address, + OUT gctPOINTER * Memory + ); + +/* Align the specified size accordingly to the hardware requirements. */ +gceSTATUS +gcoHAL_GetAlignedSurfaceSize( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceSURF_TYPE Type, + IN OUT gctUINT32_PTR Width, + IN OUT gctUINT32_PTR Height + ); + +gceSTATUS +gcoHAL_ReserveTask( + IN gcoHAL Hal, +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceBLOCK Block, + IN gctUINT TaskCount, + IN gctUINT32 Bytes, + OUT gctPOINTER * Memory + ); +/******************************************************************************\ +********************************** gcoVG Object ******************************** +\******************************************************************************/ + +/** @defgroup gcoVG gcoVG +** +** The gcoVG object abstracts the VG hardware pipe. +*/ +#if gcdGC355_PROFILER +void +gcoVG_ProfilerEnableDisable( + IN gcoVG Vg, + IN gctUINT enableGetAPITimes, + IN gctFILE apiTimeFile + ); + +void +gcoVG_ProfilerTreeDepth( + IN gcoVG Vg, + IN gctUINT TreeDepth + ); + +void +gcoVG_ProfilerSetStates( + IN gcoVG Vg, + IN gctUINT treeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth + ); +#endif + +gctBOOL +gcoVG_IsMaskSupported( +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceSURF_FORMAT Format + ); + +gctBOOL +gcoVG_IsTargetSupported( +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceSURF_FORMAT Format + ); + +gctBOOL +gcoVG_IsImageSupported( +#if gcdGC355_PROFILER + IN gcoVG Vg, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceSURF_FORMAT Format + ); + +gctUINT8 gcoVG_PackColorComponent( +#if gcdGC355_PROFILER + gcoVG Vg, + gctUINT TreeDepth, + gctUINT saveLayerTreeDepth, + gctUINT varTreeDepth, +#endif + gctFLOAT Value + ); + +gceSTATUS +gcoVG_Construct( + IN gcoHAL Hal, + OUT gcoVG * Vg + ); + +gceSTATUS +gcoVG_Destroy( + IN gcoVG Vg +#if gcdGC355_PROFILER +, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth +#endif + ); + +gceSTATUS +gcoVG_SetTarget( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Target, + IN gceORIENTATION orientation + ); + +gceSTATUS +gcoVG_UnsetTarget( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Surface + ); + +gceSTATUS +gcoVG_SetUserToSurface( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctFLOAT UserToSurface[9] + ); + +gceSTATUS +gcoVG_SetSurfaceToImage( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctFLOAT SurfaceToImage[9] + ); + +gceSTATUS +gcoVG_EnableMask( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctBOOL Enable + ); + +gceSTATUS +gcoVG_SetMask( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Mask + ); + +gceSTATUS +gcoVG_UnsetMask( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Surface + ); + +gceSTATUS +gcoVG_FlushMask( + IN gcoVG Vg +#if gcdGC355_PROFILER +, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth +#endif + ); + +gceSTATUS +gcoVG_EnableScissor( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctBOOL Enable + ); + +gceSTATUS +gcoVG_SetScissor( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctSIZE_T RectangleCount, + IN gcsVG_RECT_PTR Rectangles + ); + +gceSTATUS +gcoVG_EnableColorTransform( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctBOOL Enable + ); + +gceSTATUS +gcoVG_SetColorTransform( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctFLOAT ColorTransform[8] + ); + +gceSTATUS +gcoVG_SetTileFillColor( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctFLOAT Red, + IN gctFLOAT Green, + IN gctFLOAT Blue, + IN gctFLOAT Alpha + ); + +gceSTATUS +gcoVG_SetSolidPaint( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctUINT8 Red, + IN gctUINT8 Green, + IN gctUINT8 Blue, + IN gctUINT8 Alpha + ); + +gceSTATUS +gcoVG_SetLinearPaint( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctFLOAT Constant, + IN gctFLOAT StepX, + IN gctFLOAT StepY + ); + +gceSTATUS +gcoVG_SetRadialPaint( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctFLOAT LinConstant, + IN gctFLOAT LinStepX, + IN gctFLOAT LinStepY, + IN gctFLOAT RadConstant, + IN gctFLOAT RadStepX, + IN gctFLOAT RadStepY, + IN gctFLOAT RadStepXX, + IN gctFLOAT RadStepYY, + IN gctFLOAT RadStepXY + ); + +gceSTATUS +gcoVG_SetPatternPaint( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctFLOAT UConstant, + IN gctFLOAT UStepX, + IN gctFLOAT UStepY, + IN gctFLOAT VConstant, + IN gctFLOAT VStepX, + IN gctFLOAT VStepY, + IN gctBOOL Linear + ); + +gceSTATUS +gcoVG_SetColorRamp( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF ColorRamp, + IN gceTILE_MODE ColorRampSpreadMode + ); + +gceSTATUS +gcoVG_SetPattern( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctINT32 width, + IN gctINT32 height, + IN gcoSURF Pattern, + IN gceTILE_MODE TileMode, + IN gceIMAGE_FILTER Filter + ); + +gceSTATUS +gcoVG_SetImageMode( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceVG_IMAGE Mode + ); + +gceSTATUS +gcoVG_SetBlendMode( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceVG_BLEND Mode + ); + +gceSTATUS +gcoVG_SetRenderingQuality( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceRENDER_QUALITY Quality + ); + +gceSTATUS +gcoVG_SetFillRule( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceFILL_RULE FillRule + ); + +gceSTATUS +gcoVG_FinalizePath( + IN gcoVG Vg, + IN gcsPATH_DATA_PTR PathData + ); + +gceSTATUS +gcoVG_Clear( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctINT X, + IN gctINT Y, + IN gctINT Width, + IN gctINT Height + ); + +gceSTATUS +gcoVG_DrawPath( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcsPATH_DATA_PTR PathData, + IN gctFLOAT Scale, + IN gctFLOAT Bias, +#if gcdMOVG + IN gctUINT32 Width, + IN gctUINT32 Height, + IN gctFLOAT *Bounds, +#endif + IN gctBOOL SoftwareTesselation + ); + +gceSTATUS +gcoVG_DrawImage( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gceORIENTATION orientation, + IN gcoSURF Source, + IN gcsPOINT_PTR SourceOrigin, + IN gcsPOINT_PTR TargetOrigin, + IN gcsSIZE_PTR SourceSize, + IN gctINT SourceX, + IN gctINT SourceY, + IN gctINT TargetX, + IN gctINT TargetY, + IN gctINT Width, + IN gctINT Height, + IN gctBOOL Mask, + IN gctBOOL isDrawImage + ); + +gceSTATUS +gcoVG_TesselateImage( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Image, + IN gcsVG_RECT_PTR Rectangle, + IN gceIMAGE_FILTER Filter, + IN gctBOOL Mask, +#if gcdMOVG + IN gctBOOL SoftwareTesselation, + IN gceVG_BLEND BlendMode, + IN gctINT Width, + IN gctINT Height +#else + IN gctBOOL SoftwareTesselation +#endif + ); + +gceSTATUS +gcoVG_DrawSurfaceToImage( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Image, + IN const gcsVG_RECT_PTR SrcRectangle, + IN const gcsVG_RECT_PTR DstRectangle, + IN const gctFLOAT Matrix[9], + IN gceIMAGE_FILTER Filter, + IN gctBOOL Mask, + IN gctBOOL FirstTime + ); + +gceSTATUS +gcoVG_Blit( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Source, + IN gcoSURF Target, + IN gcsVG_RECT_PTR SrcRect, + IN gcsVG_RECT_PTR TrgRect, + IN gceIMAGE_FILTER Filter, + IN gceVG_BLEND Mode + ); + +gceSTATUS +gcoVG_ColorMatrix( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Source, + IN gcoSURF Target, + IN const gctFLOAT * Matrix, + IN gceCHANNEL ColorChannels, + IN gctBOOL FilterLinear, + IN gctBOOL FilterPremultiplied, + IN gcsPOINT_PTR SourceOrigin, + IN gcsPOINT_PTR TargetOrigin, + IN gctINT Width, + IN gctINT Height + ); + +gceSTATUS +gcoVG_SeparableConvolve( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Source, + IN gcoSURF Target, + IN gctINT KernelWidth, + IN gctINT KernelHeight, + IN gctINT ShiftX, + IN gctINT ShiftY, + IN const gctINT16 * KernelX, + IN const gctINT16 * KernelY, + IN gctFLOAT Scale, + IN gctFLOAT Bias, + IN gceTILE_MODE TilingMode, + IN gctFLOAT_PTR FillColor, + IN gceCHANNEL ColorChannels, + IN gctBOOL FilterLinear, + IN gctBOOL FilterPremultiplied, + IN gcsPOINT_PTR SourceOrigin, + IN gcsPOINT_PTR TargetOrigin, + IN gcsSIZE_PTR SourceSize, + IN gctINT Width, + IN gctINT Height + ); + +gceSTATUS +gcoVG_GaussianBlur( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gcoSURF Source, + IN gcoSURF Target, + IN gctFLOAT StdDeviationX, + IN gctFLOAT StdDeviationY, + IN gceTILE_MODE TilingMode, + IN gctFLOAT_PTR FillColor, + IN gceCHANNEL ColorChannels, + IN gctBOOL FilterLinear, + IN gctBOOL FilterPremultiplied, + IN gcsPOINT_PTR SourceOrigin, + IN gcsPOINT_PTR TargetOrigin, + IN gcsSIZE_PTR SourceSize, + IN gctINT Width, + IN gctINT Height + ); + +gceSTATUS +gcoVG_EnableDither( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctBOOL Enable + ); + +/* Color Key States. */ +gceSTATUS +gcoVG_SetColorKey( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gcsPROFILERFUNCNODE *DList, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctUINT32* Values, + IN gctBOOL * Enables +); + +/* Index Color States. */ +gceSTATUS +gcoVG_SetColorIndexTable( + IN gcoVG Vg, +#if gcdGC355_PROFILER + IN gcsPROFILERFUNCNODE *DList, + IN gctUINT TreeDepth, + IN gctUINT saveLayerTreeDepth, + IN gctUINT varTreeDepth, +#endif + IN gctUINT32* Values, + IN gctINT32 Count +); + +/* VG RS feature support: YUV format conversion. */ +gceSTATUS +gcoVG_Resolve( + IN gcoVG Vg, + IN gcoSURF Source, + IN gcoSURF Target, + IN gctINT SX, + IN gctINT SY, + IN gctINT DX, + IN gctINT DY, + IN gctINT Width, + IN gctINT Height, + IN gctINT Src_uv, + IN gctINT Src_standard, + IN gctINT Dst_uv, + IN gctINT Dst_standard, + IN gctINT Dst_alpha +); +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_vg_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_enum.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_enum.h new file mode 100644 index 000000000000..796b7afa7113 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_enum.h @@ -0,0 +1,2154 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_enum_h_ +#define __gc_hal_enum_h_ + +#include "gc_hal_options.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Chip models. */ +typedef enum _gceCHIPMODEL +{ + gcv200 = 0x0200, + gcv300 = 0x0300, + gcv320 = 0x0320, + gcv328 = 0x0328, + gcv350 = 0x0350, + gcv355 = 0x0355, + gcv400 = 0x0400, + gcv410 = 0x0410, + gcv420 = 0x0420, + gcv428 = 0x0428, + gcv450 = 0x0450, + gcv500 = 0x0500, + gcv520 = 0x0520, + gcv530 = 0x0530, + gcv600 = 0x0600, + gcv620 = 0x0620, + gcv700 = 0x0700, + gcv800 = 0x0800, + gcv860 = 0x0860, + gcv880 = 0x0880, + gcv900 = 0x0900, + gcv1000 = 0x1000, + gcv1500 = 0x1500, + gcv2000 = 0x2000, + gcv2100 = 0x2100, + gcv2200 = 0x2200, + gcv2500 = 0x2500, + gcv3000 = 0x3000, + gcv4000 = 0x4000, + gcv5000 = 0x5000, + gcv5200 = 0x5200, + gcv6400 = 0x6400, + gcv7000 = 0x7000, + gcv7400 = 0x7400, +} +gceCHIPMODEL; + +/* Chip features. */ +typedef enum _gceFEATURE +{ + gcvFEATURE_PIPE_2D = 0, + gcvFEATURE_PIPE_3D, + gcvFEATURE_PIPE_VG, + gcvFEATURE_DC, + gcvFEATURE_HIGH_DYNAMIC_RANGE, + gcvFEATURE_MODULE_CG, + gcvFEATURE_MIN_AREA, + gcvFEATURE_BUFFER_INTERLEAVING, + gcvFEATURE_BYTE_WRITE_2D, + gcvFEATURE_ENDIANNESS_CONFIG, + gcvFEATURE_DUAL_RETURN_BUS, + gcvFEATURE_DEBUG_MODE, + gcvFEATURE_YUY2_RENDER_TARGET, + gcvFEATURE_FRAGMENT_PROCESSOR, + gcvFEATURE_2DPE20, + gcvFEATURE_FAST_CLEAR, + gcvFEATURE_YUV420_TILER, + gcvFEATURE_YUY2_AVERAGING, + gcvFEATURE_FLIP_Y, + gcvFEATURE_EARLY_Z, + gcvFEATURE_COMPRESSION, + gcvFEATURE_MSAA, + gcvFEATURE_SPECIAL_ANTI_ALIASING, + gcvFEATURE_SPECIAL_MSAA_LOD, + gcvFEATURE_422_TEXTURE_COMPRESSION, + gcvFEATURE_DXT_TEXTURE_COMPRESSION, + gcvFEATURE_ETC1_TEXTURE_COMPRESSION, + gcvFEATURE_CORRECT_TEXTURE_CONVERTER, + gcvFEATURE_TEXTURE_8K, + gcvFEATURE_SCALER, + gcvFEATURE_YUV420_SCALER, + gcvFEATURE_SHADER_HAS_W, + gcvFEATURE_SHADER_HAS_SIGN, + gcvFEATURE_SHADER_HAS_FLOOR, + gcvFEATURE_SHADER_HAS_CEIL, + gcvFEATURE_SHADER_HAS_SQRT, + gcvFEATURE_SHADER_HAS_TRIG, + gcvFEATURE_HZ, + gcvFEATURE_CORRECT_STENCIL, + gcvFEATURE_VG20, + gcvFEATURE_VG_FILTER, + gcvFEATURE_VG21, + gcvFEATURE_VG_DOUBLE_BUFFER, + gcvFEATURE_MC20, + gcvFEATURE_SUPER_TILED, + gcvFEATURE_FAST_CLEAR_FLUSH, + gcvFEATURE_2D_FILTERBLIT_PLUS_ALPHABLEND, + gcvFEATURE_2D_DITHER, + gcvFEATURE_2D_A8_TARGET, + gcvFEATURE_2D_A8_NO_ALPHA, + gcvFEATURE_2D_FILTERBLIT_FULLROTATION, + gcvFEATURE_2D_BITBLIT_FULLROTATION, + gcvFEATURE_WIDE_LINE, + gcvFEATURE_FC_FLUSH_STALL, + gcvFEATURE_FULL_DIRECTFB, + gcvFEATURE_HALF_FLOAT_PIPE, + gcvFEATURE_LINE_LOOP, + gcvFEATURE_2D_YUV_BLIT, + gcvFEATURE_2D_TILING, + gcvFEATURE_NON_POWER_OF_TWO, + gcvFEATURE_3D_TEXTURE, + gcvFEATURE_TEXTURE_ARRAY, + gcvFEATURE_TILE_FILLER, + gcvFEATURE_LOGIC_OP, + gcvFEATURE_MIXED_STREAMS, + gcvFEATURE_2D_MULTI_SOURCE_BLT, + gcvFEATURE_END_EVENT, + gcvFEATURE_VERTEX_10_10_10_2, + gcvFEATURE_TEXTURE_10_10_10_2, + gcvFEATURE_TEXTURE_ANISOTROPIC_FILTERING, + gcvFEATURE_TEXTURE_FLOAT_HALF_FLOAT, + gcvFEATURE_2D_ROTATION_STALL_FIX, + gcvFEATURE_2D_MULTI_SOURCE_BLT_EX, + gcvFEATURE_BUG_FIXES10, + gcvFEATURE_2D_MINOR_TILING, + gcvFEATURE_TEX_COMPRRESSION_SUPERTILED, /* Supertiled compressed textures are supported. */ + gcvFEATURE_FAST_MSAA, + gcvFEATURE_BUG_FIXED_INDEXED_TRIANGLE_STRIP, + gcvFEATURE_TEXTURE_TILE_STATUS_READ, + gcvFEATURE_DEPTH_BIAS_FIX, + gcvFEATURE_RECT_PRIMITIVE, + gcvFEATURE_BUG_FIXES11, + gcvFEATURE_SUPERTILED_TEXTURE, + gcvFEATURE_2D_NO_COLORBRUSH_INDEX8, + gcvFEATURE_RS_YUV_TARGET, + gcvFEATURE_2D_FC_SOURCE, + gcvFEATURE_2D_CC_NOAA_SOURCE, + gcvFEATURE_PE_DITHER_FIX, + gcvFEATURE_2D_YUV_SEPARATE_STRIDE, + gcvFEATURE_FRUSTUM_CLIP_FIX, + gcvFEATURE_TEXTURE_SWIZZLE, + gcvFEATURE_PRIMITIVE_RESTART, + gcvFEATURE_TEXTURE_LINEAR, + gcvFEATURE_TEXTURE_YUV_ASSEMBLER, + gcvFEATURE_LINEAR_RENDER_TARGET, + gcvFEATURE_SHADER_HAS_ATOMIC, + gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE, + gcvFEATURE_SHADER_ENHANCEMENTS2, + gcvFEATURE_BUG_FIXES7, + gcvFEATURE_SHADER_HAS_RTNE, + gcvFEATURE_SHADER_HAS_EXTRA_INSTRUCTIONS2, + gcvFEATURE_SHADER_ENHANCEMENTS3, + gcvFEATURE_DYNAMIC_FREQUENCY_SCALING, + gcvFEATURE_SINGLE_BUFFER, + gcvFEATURE_OCCLUSION_QUERY, + gcvFEATURE_2D_GAMMA, + gcvFEATURE_2D_COLOR_SPACE_CONVERSION, + gcvFEATURE_2D_SUPER_TILE_VERSION, + gcvFEATURE_HALTI0, + gcvFEATURE_HALTI1, + gcvFEATURE_HALTI2, + gcvFEATURE_SUPPORT_GCREGTX, + gcvFEATURE_2D_MIRROR_EXTENSION, + gcvFEATURE_TEXTURE_ASTC, + gcvFEATURE_TEXTURE_ASTC_DECODE_FIX, + gcvFEATURE_TEXTURE_ASTC_BASE_LOD_FIX, + gcvFEATURE_2D_SUPER_TILE_V1, + gcvFEATURE_2D_SUPER_TILE_V2, + gcvFEATURE_2D_SUPER_TILE_V3, + gcvFEATURE_2D_MULTI_SOURCE_BLT_EX2, + gcvFEATURE_NEW_RA, + gcvFEATURE_BUG_FIXED_IMPLICIT_PRIMITIVE_RESTART, + gcvFEATURE_PE_MULTI_RT_BLEND_ENABLE_CONTROL, + gcvFEATURE_SMALL_MSAA, /* An upgraded version of Fast MSAA */ + gcvFEATURE_VERTEX_INST_ID_AS_ATTRIBUTE, + gcvFEATURE_DUAL_16, + gcvFEATURE_BRANCH_ON_IMMEDIATE_REG, + gcvFEATURE_2D_COMPRESSION, + gcvFEATURE_TPC_COMPRESSION, + gcvFEATURE_TPCV11_COMPRESSION, + gcvFEATURE_DEC_COMPRESSION, + gcvFEATURE_DEC300_COMPRESSION, + gcvFEATURE_DEC400_COMPRESSION, + gcvFEATURE_DEC_TPC_COMPRESSION, + gcvFEATURE_DEC_COMPRESSION_TILE_NV12_8BIT, + gcvFEATURE_DEC_COMPRESSION_TILE_NV12_10BIT, + gcvFEATURE_2D_OPF_YUV_OUTPUT, + gcvFEATURE_2D_FILTERBLIT_A8_ALPHA, + gcvFEATURE_2D_MULTI_SRC_BLT_TO_UNIFIED_DST_RECT, + gcvFEATURE_2D_MULTI_SRC_BLT_BILINEAR_FILTER, + gcvFEATURE_2D_MULTI_SRC_BLT_1_5_ENHANCEMENT, + gcvFEATURE_V2_COMPRESSION_Z16_FIX, + gcvFEATURE_VERTEX_INST_ID_AS_INTEGER, + gcvFEATURE_2D_YUV_MODE, + gcvFEATURE_2D_CACHE_128B256BPERLINE, + gcvFEATURE_2D_SEPARATE_CACHE, + gcvFEATURE_2D_MAJOR_SUPER_TILE, + gcvFEATURE_2D_V4COMPRESSION, + gcvFEATURE_2D_VMSAA, + gcvFEATURE_2D_10BIT_OUTPUT_LINEAR, + gcvFEATURE_2D_YUV420_OUTPUT_LINEAR, + gcvFEATURE_ACE, + gcvFEATURE_COLOR_COMPRESSION, + gcvFEATURE_32BPP_COMPONENT_TEXTURE_CHANNEL_SWIZZLE, + gcvFEATURE_64BPP_HW_CLEAR_SUPPORT, + gcvFEATURE_TX_LERP_PRECISION_FIX, + gcvFEATURE_COMPRESSION_V2, + gcvFEATURE_MMU, + gcvFEATURE_COMPRESSION_V3, + gcvFEATURE_TX_DECOMPRESSOR, + gcvFEATURE_MRT_TILE_STATUS_BUFFER, + gcvFEATURE_COMPRESSION_V1, + gcvFEATURE_V1_COMPRESSION_Z16_DECOMPRESS_FIX, + gcvFEATURE_RTT, + gcvFEATURE_GENERIC_ATTRIB, + gcvFEATURE_2D_ONE_PASS_FILTER, + gcvFEATURE_2D_ONE_PASS_FILTER_TAP, + gcvFEATURE_2D_POST_FLIP, + gcvFEATURE_2D_PIXEL_ALIGNMENT, + gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT, + gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT_WIDTH, + gcvFEATURE_8K_RT, + gcvFEATURE_HALTI3, + gcvFEATURE_EEZ, + gcvFEATURE_INTEGER_SIGNEXT_FIX, + gcvFEATURE_PSOUTPUT_MAPPING, + gcvFEATURE_8K_RT_FIX, + gcvFEATURE_TX_TILE_STATUS_MAPPING, + gcvFEATURE_SRGB_RT_SUPPORT, + gcvFEATURE_TEXTURE_16K, + gcvFEATURE_PA_FARZCLIPPING_FIX, + gcvFEATURE_PE_DITHER_COLORMASK_FIX, + gcvFEATURE_ZSCALE_FIX, + gcvFEATURE_MULTI_PIXELPIPES, + gcvFEATURE_PIPE_CL, + gcvFEATURE_BUG_FIXES18, + gcvFEATURE_UNIFIED_SAMPLERS, + gcvFEATURE_CL_PS_WALKER, + gcvFEATURE_NEW_HZ, + gcvFEATURE_TX_FRAC_PRECISION_6BIT, + gcvFEATURE_SH_INSTRUCTION_PREFETCH, + gcvFEATURE_PROBE, + gcvFEATURE_SINGLE_PIPE_HALTI1, + gcvFEATURE_BUG_FIXES8, /* This HW feature is wrong, we can't use this to check integer branch!!!*/ + gcvFEATURE_2D_ALL_QUAD, + gcvFEATURE_SEPARATE_SRC_DST, + gcvFEATURE_TX_HOR_ALIGN_SEL, + gcvFEATURE_HALTI4, + gcvFEATURE_MRT_FC_FIX, + gcvFEATURE_TESSELLATION, + gcvFEATURE_DRAW_INDIRECT, + gcvFEATURE_COMPUTE_INDIRECT, + gcvFEATURE_MSAA_TEXTURE, + gcvFEATURE_STENCIL_TEXTURE, + gcvFEATURE_S8_ONLY_RENDERING, + gcvFEATURE_D24S8_SAMPLE_STENCIL, + gcvFEATURE_ADVANCED_BLEND_MODE_PART0, + gcvFEATURE_RA_DEPTH_WRITE, + gcvFEATURE_RS_DS_DOWNSAMPLE_NATIVE_SUPPORT, + gcvFEATURE_S8_MSAA_COMPRESSION, + gcvFEATURE_MSAA_FRAGMENT_OPERATION, + gcvFEATURE_FE_START_VERTEX_SUPPORT, + gcvFEATURE_DIVISOR_STREAM_ADDR_FIX, + gcvFEATURE_ZERO_ATTRIB_SUPPORT, + gcvFEATURE_DANGLING_VERTEX_FIX, + gcvFEATURE_PE_DISABLE_COLOR_PIPE, + gcvFEATURE_FE_12bit_stride, + gcvFEATURE_TX_LOD_GUARDBAND, + gcvFEATURE_HAS_PRODUCTID, + gcvFEATURE_INTEGER32_FIX, + gcvFEATURE_TEXTURE_GATHER, + gcvFEATURE_IMG_INSTRUCTION, + gcvFEATURE_HELPER_INVOCATION, + gcvFEATURE_NO_USER_CSC, + gcvFEATURE_ANDROID_ONLY, + gcvFEATURE_V2_MSAA_COHERENCY_FIX, + gcvFEATURE_BLOCK_SIZE_16x16, + gcvFEATURE_TX_SUPPORT_DEC, + gcvFEATURE_RSBLT_MSAA_DECOMPRESSION, + gcvFEATURE_TILEFILLER_32TILE_ALIGNED, + gcvFEATURE_GEOMETRY_SHADER, + gcvFEATURE_HALTI5, + gcvFEATURE_PIPELINE_32_ATTRIBUTES, + gcvFEATURE_USC, + gcvFEATURE_CUBEMAP_ARRAY, + gcvFEATURE_TX_DESCRIPTOR, + gcvFEATURE_SEPARATE_RT_CTRL, + gcvFEATURE_RENDER_ARRAY, + gcvFEATURE_BLT_ENGINE, + gcvFEATURE_SMALLDRAW_BATCH, + gcvFEATURE_TEXTURE_BUFFER, + gcvFEATURE_GS_SUPPORT_EMIT, + gcvFEATURE_SAMPLER_BASE_OFFSET, + gcvFEATURE_IMAGE_OUT_BOUNDARY_FIX, + gcvFEATURE_TX_BORDER_CLAMP, + gcvFEATURE_MSAA_SHADING, + gcvFEATURE_ADVANCED_SH_INST, + gcvFEATURE_LOD_FIX_FOR_BASELEVEL, + gcvFEATURE_MULTIDRAW_INDIRECT, + gcvFEATURE_DRAW_ELEMENTS_BASE_VERTEX, + gcvFEATURE_NEW_STEERING_AND_ICACHE_FLUSH, /* Steering base on register base. Trigger-style Icache flush state. */ + gcvFEATURE_PE_DITHER_FIX2, + gcvFEATURE_INDEX_FETCH_FIX, + gcvFEATURE_TEX_BASELOD, + gcvFEATURE_TEX_SEAMLESS_CUBE, + gcvFEATURE_TEX_ETC2, + gcvFEATURE_TEX_CUBE_BORDER_LOD, + gcvFEATURE_FE_ALLOW_STALL_PREFETCH_ENG, + gcvFEATURE_TX_8BPP_TS_FIX, + gcvFEATURE_HW_TFB, + gcvFEATURE_COMPRESSION_V4, + gcvFEATURE_FENCE_32BIT, + gcvFEATURE_FENCE_64BIT, + gcvFEATURE_R8_UNORM, + gcvFEATURE_TX_DEFAULT_VALUE_FIX, + gcvFEATURE_TX_8bit_UVFrac, + gcvFEATURE_TX_MIPFILTER_NONE_FIX, + gcvFEATURE_MC_STENCIL_CTRL, + gcvFEATURE_DEPTH_MATH_FIX, + gcvFEATURE_PE_B2B_PIXEL_FIX, + gcvFEATURE_TEXTURE_GATHER_OFFSETS, + gcvFEATURE_TEX_CACHE_FLUSH_FIX, + gcvFEATURE_WIDELINE_HELPER_FIX, + gcvFEATURE_LINE_DIAMOND_RULE_FIX, + gcvFEATURE_MULTIGPU_SYNC_V2, + gcvFEATURE_DRAW_ID, + gcvFEATURE_SNAPPAGE_CMD, + gcvFEATURE_COMMAND_PREFETCH, + gcvFEATURE_SAMPLEPOS_SWIZZLE_FIX, + gcvFEATURE_SELECTMAP_SRC0_SWIZZLE_FIX, + gcvFEATURE_LOADATTR_OOB_FIX, + gcvFEATURE_RA_DEPTH_WRITE_MSAA1X_FIX, + gcvFEATURE_MRT_8BIT_DUAL_PIPE_FIX, + gcvFEATURE_BUG_FIXES1, + gcvFEATURE_MULTI_SOURCE_BLT, + gcvFEATURE_ZCOMPRESSION, + gcvFEATURE_DITHER_AND_FILTER_PLUS_ALPHA_2D, + gcvFEATURE_ONE_PASS_2D_FILTER, + gcvFEATURE_TX_FILTER, + gcvFEATURE_CHIPENABLE_LINK, + gcvFEATURE_TEXTURE_BIAS_LOD_FIX, + gcvFEATURE_USE_GL_Z, + gcvFEATURE_SUPPORT_INTEGER, + /* PARTLY_SUPPORT_INTEGER_BRANCH: + ** chips can support all integer types for compare instructions, e.g, CMP, SELECT. + ** FULLLY_SUPPORT_INTEGER_BRANCH: + ** chips can support all integer types for JMP instruction. + ** If PARTLY_SUPPORT_INTEGER_BRANCH is TRUE but FULLLY_SUPPORT_INTEGER_BRANCH is FALSE, + ** then this chip can only support INT32/UINT32 JMP instruction. + */ + gcvFEATURE_PARTLY_SUPPORT_INTEGER_BRANCH, + gcvFEATURE_FULLLY_SUPPORT_INTEGER_BRANCH, + gcvFEATURE_SUPPORT_INTEGER_ATTRIBUTE, + gcvFEATURE_SUPPORT_MOVAI, + gcvFEATURE_NEED_FIX_FOR_CL_X, + gcvFEATURE_NEED_FIX_FOR_CL_XE, + gcvFEATURE_HAS_OUTPUT_COUNT_FIX, + gcvFEATURE_VARYING_PACKING_LIMITATION, + gcvFEATURE_HIGHP_VARYING_SHIFT, + gcvFEATURE_BUG_FIXES2, + gcvFEATURE_64K_L2_CACHE, + gcvFEATURE_128BTILE, + gcvFEATURE_ADVANCED_BLEND_OPT, + gcvFEATURE_SNAPPAGE_CMD_FIX, + gcvFEATURE_L2_CACHE_FOR_2D_420, + gcvFEATURE_TILE_STATUS_2BITS, + gcvFEATURE_EXTRA_SHADER_INSTRUCTIONS0, + gcvFEATURE_EXTRA_SHADER_INSTRUCTIONS1, + gcvFEATURE_EXTRA_SHADER_INSTRUCTIONS2, + gcvFEATURE_MEDIUM_PRECISION, + gcvFEATURE_FE20_BIT_INDEX, + gcvFEATURE_BUG_FIXES4, + gcvFEATURE_BUG_FIXES12, + gcvFEATURE_VMSAA, + gcvFEATURE_ROBUST_ATOMIC, + gcvFEATURE_32F_COLORMASK_FIX, + gcvFEATURE_NEW_GPIPE, + gcvFEATURE_RS_NEW_BASEADDR, + gcvFEATURE_TX_DXT, + gcvFEATURE_SH_FLAT_INTERPOLATION_DUAL16_FIX, + gcvFEATURE_EVIS, + gcvFEATURE_SH_SUPPORT_V4, + gcvFEATURE_SH_SUPPORT_ALPHA_KILL, + gcvFEATURE_PE_NO_ALPHA_TEST, + gcvFEATURE_SH_SNAP2PAGE_MAXPAGES_FIX, + gcvFEATURE_USC_FULLCACHE_FIX, + gcvFEATURE_PE_64bit_FENCE_FIX, + gcvFEATURE_BLT_8bit_256TILE_FC_FIX, + gcvFEATURE_PE_RGBA16I_FIX, + gcvFEATURE_BLT_64bpp_MASKED_CLEAR_FIX, + gcvFEATURE_SH_PSO_MSAA1x_FIX, + gcvFEATURE_USC_ATOMIC_FIX, + gcvFEATURE_INDEX_CONST_ON_B0, + gcvFEATURE_SH_NO_ONECONST_LIMIT, + gcvFEATURE_EVIS_NO_ABSDIFF, + gcvFEATURE_EVIS_NO_BITREPLACE, + gcvFEATURE_EVIS_NO_BOXFILTER, + gcvFEATURE_EVIS_NO_CORDIAC, + gcvFEATURE_EVIS_NO_DP32, + gcvFEATURE_EVIS_NO_FILTER, + gcvFEATURE_EVIS_NO_IADD, + gcvFEATURE_EVIS_NO_SELECTADD, + gcvFEATURE_EVIS_LERP_7OUTPUT, + gcvFEATURE_EVIS_ACCSQ_8OUTPUT, + gcvFEATURE_ROBUSTNESS, + gcvFEATURE_SECURITY, + gcvFEATURE_TX_YUV_ASSEMBLER_10BIT, + gcvFEATURE_USC_GOS_ADDR_FIX, + gcvFEATURE_SUPPORT_MSAA2X, + gcvFEATURE_TX_DESC_CACHE_CLOCKGATE_FIX, + gcvFEATURE_TX_INTEGER_COORDINATE, + gcvFEATURE_PSIO_SAMPLEMASK_IN_R0ZW_FIX, + gcvFEATURE_MULTI_CORE_BLOCK_SET_CONFIG, + gcvFEATURE_SH_IMG_LDST_ON_TEMP, + gcvFEATURE_TX_INTEGER_COORDINATE_V2, + gcvFEATURE_COMPUTE_ONLY, + gcvFEATURE_SH_IMG_LDST_CLAMP, + gcvFEATURE_SH_ICACHE_ALLOC_COUNT_FIX, + gcvFEATURE_MSAA_OQ_FIX, + gcvFEATURE_PE_ENHANCEMENTS2, + gcvFEATURE_PSIO_MSAA_CL_FIX, + gcvFEATURE_FE_NEED_DUMMYDRAW, + gcvFEATURE_MULTI_CLUSTER, + gcvFEATURE_PSIO_INTERLOCK, + gcvFEATURE_BLIT_COMPRESS_DEST, + gcvFEATURE_SH_MULTI_WG_PACK, + gcvFEATURE_FE_ROBUST_FIX, + gcvFEATURE_TX_ASTC_MULTISLICE_FIX, + gcvFEATURE_PSIO_DUAL16_32bpc_FIX, + gcvFEATURE_LS_SUPPORT_PER_COMP_DEPENDENCY, + gcvFEATURE_COMPRESSION_DEC400, + gcvFEATURE_SH_TEXLD_U_FIX, + gcvFEATURE_TX_FLUSH_L1CACHE, + gcvFEATURE_USC_DEFER_FILL_FIX, + gcvFEATURE_MC_FCCACHE_BYTEMASK, + gcvFEATURE_SH_MULTI_WG_PACK_FIX, + gcvFEATURE_FE_PATCHLIST_FETCH_FIX, + gcvFEATURE_RA_CG_FIX, + gcvFEATURE_EVIS_VX2, + gcvFEATURE_SH_HALF_DEPENDENCY_FIX, + gcvFEATURE_SH_CLOCK_GATE_FIX, + gcvFEATURE_GPIPE_CLOCK_GATE_FIX, + gcvFEATURE_TP_ENGINE, + gcvFEATURE_TX_BORDER_CLAMP_FIX, + gcvFEATURE_SH_IMAGE_LD_LAST_PIXEL_FIX, + gcvFEATURE_MULTI_CORE_BLOCK_SET_CONFIG2, + gcvFEATURE_MULTIGPU_SYNC_V3, + gcvFEATURE_PE_VMSAA_COVERAGE_CACHE_FIX, + gcvFEATURE_SECURITY_AHB, + gcvFEATURE_TX_LERP_LESS_BIT, + gcvFEATURE_VIP_V7, + gcvFEATURE_ASYNC_BLIT, + gcvFEATURE_ASYNC_FE_FENCE_FIX, + gcvFEATURE_PSCS_THROTTLE, + gcvFEATURE_WIDELINE_TRIANGLE_EMU, + gcvFEATURE_FENCE, + gcvFEATURE_PE_DEPTH_ONLY_OQFIX, + gcvFEATURE_VG_RESOLUTION_8K, + gcvFEATURE_IMAGE_LS_NO_FULLMASK_FIX, + gcvFEATURE_PE_TILE_CACHE_FLUSH_FIX, + gcvFEATURE_USC_ATOMIC_FIX2, + + /* Insert features above this comment only. */ + gcvFEATURE_COUNT /* Not a feature. */ +} +gceFEATURE; + +/* dummy draw type.*/ +typedef enum _gceDUMMY_DRAW_TYPE +{ + gcvDUMMY_DRAW_INVALID = 0, + gcvDUMMY_DRAW_GC400, + gcvDUMMY_DRAW_V60, +} +gceDUMMY_DRAW_TYPE; + +/* Chip SWWA. */ +typedef enum _gceSWWA +{ + gcvSWWA_601 = 0, + gcvSWWA_706, + gcvSWWA_1163, + gcvSWWA_1165, + /* Insert SWWA above this comment only. */ + gcvSWWA_COUNT /* Not a SWWA. */ +} +gceSWWA; + + +/* Option Set*/ +typedef enum _gceOPTION +{ + /* HW setting. */ + gcvOPTION_PREFER_ZCONVERT_BYPASS = 0, + gcvOPTION_PREFER_TILED_DISPLAY_BUFFER = 1, + gcvOPTION_PREFER_GUARDBAND = 2, + gcvOPTION_PREFER_TPG_TRIVIALMODEL = 3, + gcvOPTION_PREFER_RA_DEPTH_WRITE = 4, + gcvOPTION_PREFER_USC_RECONFIG = 5, + gcvOPTION_PREFER_DISALBE_HZ = 6, + + /* SW options */ + gcvOPTION_HW_NULL = 50, + gcvOPTION_PRINT_OPTION = 51, + gcvOPTION_KERNEL_FENCE = 52, + gcvOPTION_ASYNC_PIPE = 53, + gcvOPTION_FBO_PREFER_MEM = 54, + gcvOPTION_GPU_TEX_UPLOAD = 55, + gcvOPTION_GPU_BUFOBJ_UPLOAD = 56, + gcvOPTION_OCL_ASYNC_BLT = 57, + gcvOPTION_OCL_IN_THREAD = 58, + gcvOPTION_COMPRESSION_DEC400 = 59, + gcvOPTION_NO_Y_INVERT = 60, + /* Insert option above this comment only */ + gcvOPTION_COUNT /* Not a OPTION*/ +} +gceOPTION; + +typedef enum _gceFRAMEINFO +{ + /* Total frame count in one run */ + gcvFRAMEINFO_FRAME_NUM = 0, + /* Total draw count in current frame, including draw/compute */ + gcvFRAMEINFO_DRAW_NUM = 1, + /* Total compute count in current frame, subset of drawNum */ + gcvFRAMEINFO_COMPUTE_NUM = 2, + /* Total dual16 draw/compute count in current frame, subset of drawNum */ + gcvFRAMEINFO_DUAL16_NUM = 3, + /* Current programID is being set. only valid for ES20 driver right now */ + gcvFRAMEINFO_PROGRAM_ID = 4, + + gcvFRAMEINFO_COUNT, +} +gceFRAMEINFO; + +typedef enum _gceFRAMEINFO_OP +{ + gcvFRAMEINFO_OP_INC = 0, + gcvFRAMEINFO_OP_DEC = 1, + gcvFRAMEINFO_OP_ZERO = 2, + gcvFRAMEINFO_OP_GET = 3, + gcvFRAMEINFO_OP_SET = 4, + gcvFRAMEINFO_OP_COUNT, +} +gceFRAMEINFO_OP; + + +/* Chip Power Status. */ +typedef enum _gceCHIPPOWERSTATE +{ + gcvPOWER_ON = 0, + gcvPOWER_OFF, + gcvPOWER_IDLE, + gcvPOWER_SUSPEND, + gcvPOWER_IDLE_BROADCAST, + gcvPOWER_SUSPEND_BROADCAST, + gcvPOWER_OFF_BROADCAST, + gcvPOWER_OFF_TIMEOUT, + gcvPOWER_ON_AUTO +} +gceCHIPPOWERSTATE; + +/* CPU cache operations */ +typedef enum _gceCACHEOPERATION +{ + gcvCACHE_CLEAN = 0x01, /* Flush CPU cache to mem */ + gcvCACHE_INVALIDATE = 0x02, /* Invalidte CPU cache */ + gcvCACHE_FLUSH = gcvCACHE_CLEAN | gcvCACHE_INVALIDATE, /* Both flush & invalidate */ + gcvCACHE_MEMORY_BARRIER = 0x04 +} +gceCACHEOPERATION; + +/* Surface types. */ +typedef enum _gceSURF_TYPE +{ + gcvSURF_TYPE_UNKNOWN = 0, + gcvSURF_INDEX, + gcvSURF_VERTEX, + gcvSURF_TEXTURE, + gcvSURF_RENDER_TARGET, + gcvSURF_DEPTH, + gcvSURF_BITMAP, + gcvSURF_TILE_STATUS, + gcvSURF_IMAGE, + gcvSURF_MASK, + gcvSURF_SCISSOR, + gcvSURF_HIERARCHICAL_DEPTH, + gcvSURF_ICACHE, + gcvSURF_TXDESC, + gcvSURF_FENCE, + gcvSURF_TFBHEADER, + gcvSURF_NUM_TYPES, /* Make sure this is the last one! */ + + /* Combinations. */ + gcvSURF_NO_TILE_STATUS = 0x100, + gcvSURF_NO_VIDMEM = 0x200, /* Used to allocate surfaces with no underlying vidmem node. + In Android, vidmem node is allocated by another process. */ + gcvSURF_CACHEABLE = 0x400, /* Used to allocate a cacheable surface */ + gcvSURF_TILE_RLV_FENCE = 0x800, /* create texture fence as tile */ + gcvSURF_TILE_STATUS_DIRTY = 0x1000, /* Init tile status to all dirty */ + gcvSURF_LINEAR = 0x2000, + gcvSURF_CREATE_AS_TEXTURE = 0x4000, /* create it as a texture */ + gcvSURF_PROTECTED_CONTENT = 0x8000, /* create it as content protected */ + gcvSURF_CREATE_AS_DISPLAYBUFFER = 0x10000, /*create it as a display buffer surface */ + gcvSURF_CONTIGUOUS = 0x20000, /*create it as contiguous */ + gcvSURF_NO_COMPRESSION = 0x40000, /* Create it as no compression, valid on when it has tile status. */ + gcvSURF_DEC = 0x80000, /* Surface is DEC compressed */ + gcvSURF_NO_HZ = 0x100000, + gcvSURF_3D = 0x200000, /* It's 3d surface */ + gcvSURF_DMABUF_EXPORTABLE = 0x400000, /* master node can be exported as dma-buf fd */ + gcvSURF_CACHE_MODE_128 = 0x800000, + + gcvSURF_TEXTURE_LINEAR = gcvSURF_TEXTURE + | gcvSURF_LINEAR, + + gcvSURF_RENDER_TARGET_LINEAR = gcvSURF_RENDER_TARGET + | gcvSURF_LINEAR, + + gcvSURF_RENDER_TARGET_NO_TILE_STATUS = gcvSURF_RENDER_TARGET + | gcvSURF_NO_TILE_STATUS, + + gcvSURF_RENDER_TARGET_NO_COMPRESSION = gcvSURF_RENDER_TARGET + | gcvSURF_NO_COMPRESSION, + + gcvSURF_RENDER_TARGET_TS_DIRTY = gcvSURF_RENDER_TARGET + | gcvSURF_TILE_STATUS_DIRTY, + + gcvSURF_DEPTH_NO_TILE_STATUS = gcvSURF_DEPTH + | gcvSURF_NO_TILE_STATUS, + + gcvSURF_DEPTH_TS_DIRTY = gcvSURF_DEPTH + | gcvSURF_TILE_STATUS_DIRTY, + + /* Supported surface types with no vidmem node. */ + gcvSURF_BITMAP_NO_VIDMEM = gcvSURF_BITMAP + | gcvSURF_NO_VIDMEM, + + gcvSURF_TEXTURE_NO_VIDMEM = gcvSURF_TEXTURE + | gcvSURF_NO_VIDMEM, + + /* Cacheable surface types with no vidmem node. */ + gcvSURF_CACHEABLE_BITMAP_NO_VIDMEM = gcvSURF_BITMAP_NO_VIDMEM + | gcvSURF_CACHEABLE, + + gcvSURF_CACHEABLE_BITMAP = gcvSURF_BITMAP + | gcvSURF_CACHEABLE, + + gcvSURF_TEXTURE_3D = gcvSURF_TEXTURE + | gcvSURF_3D +} +gceSURF_TYPE; + +typedef enum _gceSURF_USAGE +{ + gcvSURF_USAGE_UNKNOWN, + gcvSURF_USAGE_RESOLVE_AFTER_CPU, + gcvSURF_USAGE_RESOLVE_AFTER_3D +} +gceSURF_USAGE; + +typedef enum _gceSURF_COLOR_SPACE +{ + gcvSURF_COLOR_SPACE_UNKNOWN, + gcvSURF_COLOR_SPACE_LINEAR, + gcvSURF_COLOR_SPACE_NONLINEAR, +} +gceSURF_COLOR_SPACE; + +typedef enum _gceSURF_COLOR_TYPE +{ + gcvSURF_COLOR_UNKNOWN = 0, + gcvSURF_COLOR_LINEAR = 0x01, + gcvSURF_COLOR_ALPHA_PRE = 0x02, +} +gceSURF_COLOR_TYPE; + +/* Rotation. */ +typedef enum _gceSURF_ROTATION +{ + gcvSURF_0_DEGREE = 0, + gcvSURF_90_DEGREE, + gcvSURF_180_DEGREE, + gcvSURF_270_DEGREE, + gcvSURF_FLIP_X, + gcvSURF_FLIP_Y, + + gcvSURF_POST_FLIP_X = 0x40000000, + gcvSURF_POST_FLIP_Y = 0x80000000, +} +gceSURF_ROTATION; + +/* Surface flag */ +typedef enum _gceSURF_FLAG +{ + /* None flag */ + gcvSURF_FLAG_NONE = 0x0, + /* content is preserved after swap */ + gcvSURF_FLAG_CONTENT_PRESERVED = 0x1, + /* content is updated after swap*/ + gcvSURF_FLAG_CONTENT_UPDATED = 0x2, + /* content is y inverted */ + gcvSURF_FLAG_CONTENT_YINVERTED = 0x4, + /* surface has multiple nodes */ + gcvSURF_FLAG_MULTI_NODE = 0x8, +} +gceSURF_FLAG; + +typedef enum _gceMIPMAP_IMAGE_FORMAT +{ + gcvUNKNOWN_MIPMAP_IMAGE_FORMAT = -2 +} +gceMIPMAP_IMAGE_FORMAT; + +/* Surface formats. +** Name rules is from MSB->LSB. +*/ +typedef enum _gceSURF_FORMAT +{ + /* Unknown format. */ + gcvSURF_UNKNOWN = 0, + + /* Palettized formats. */ + gcvSURF_INDEX1 = 100, + gcvSURF_INDEX4, + gcvSURF_INDEX8, +#if gcdVG_ONLY + gcvSURF_INDEX2, +#endif + + /* RGB formats. */ + gcvSURF_A2R2G2B2 = 200, + gcvSURF_R3G3B2, + gcvSURF_A8R3G3B2, + gcvSURF_X4R4G4B4, + gcvSURF_A4R4G4B4, + gcvSURF_R4G4B4A4, + gcvSURF_X1R5G5B5, + gcvSURF_A1R5G5B5, + gcvSURF_R5G5B5A1, + gcvSURF_R5G6B5, + gcvSURF_R8G8B8, + gcvSURF_X8R8G8B8, + gcvSURF_A8R8G8B8, + gcvSURF_R8G8B8A8, + gcvSURF_G8R8G8B8, + gcvSURF_R8G8B8G8, + gcvSURF_X2R10G10B10, + gcvSURF_A2R10G10B10, + gcvSURF_R10G10B10A2, + gcvSURF_X12R12G12B12, + gcvSURF_A12R12G12B12, + gcvSURF_X16R16G16B16, + gcvSURF_A16R16G16B16, + gcvSURF_A32R32G32B32, + gcvSURF_R8G8B8X8, + gcvSURF_R5G5B5X1, + gcvSURF_R4G4B4X4, + gcvSURF_X16R16G16B16_2_A8R8G8B8, + gcvSURF_A16R16G16B16_2_A8R8G8B8, + gcvSURF_A32R32G32B32_2_G32R32F, + gcvSURF_A32R32G32B32_4_A8R8G8B8, + /* BGR formats. */ + gcvSURF_A4B4G4R4 = 300, + gcvSURF_A1B5G5R5, + gcvSURF_B5G6R5, + gcvSURF_B8G8R8, + gcvSURF_B16G16R16, + gcvSURF_X8B8G8R8, + gcvSURF_A8B8G8R8, + gcvSURF_A2B10G10R10, + gcvSURF_X16B16G16R16, + gcvSURF_A16B16G16R16, + gcvSURF_B32G32R32, + gcvSURF_X32B32G32R32, + gcvSURF_A32B32G32R32, + gcvSURF_B4G4R4A4, + gcvSURF_B5G5R5A1, + gcvSURF_B8G8R8X8, + gcvSURF_B8G8R8A8, + gcvSURF_B10G10R10A2, + gcvSURF_X4B4G4R4, + gcvSURF_X1B5G5R5, + gcvSURF_B4G4R4X4, + gcvSURF_B5G5R5X1, + gcvSURF_X2B10G10R10, + gcvSURF_B8G8R8_SNORM, + gcvSURF_X8B8G8R8_SNORM, + gcvSURF_A8B8G8R8_SNORM, + gcvSURF_A8B12G12R12_2_A8R8G8B8, + + /* Compressed formats. */ + gcvSURF_DXT1 = 400, + gcvSURF_DXT2, + gcvSURF_DXT3, + gcvSURF_DXT4, + gcvSURF_DXT5, + gcvSURF_CXV8U8, + gcvSURF_ETC1, + gcvSURF_R11_EAC, + gcvSURF_SIGNED_R11_EAC, + gcvSURF_RG11_EAC, + gcvSURF_SIGNED_RG11_EAC, + gcvSURF_RGB8_ETC2, + gcvSURF_SRGB8_ETC2, + gcvSURF_RGB8_PUNCHTHROUGH_ALPHA1_ETC2, + gcvSURF_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2, + gcvSURF_RGBA8_ETC2_EAC, + gcvSURF_SRGB8_ALPHA8_ETC2_EAC, + + /* YUV formats. */ + gcvSURF_YUY2 = 500, + gcvSURF_UYVY, + gcvSURF_YV12, + gcvSURF_I420, + gcvSURF_NV12, + gcvSURF_NV21, + gcvSURF_NV16, + gcvSURF_NV61, + gcvSURF_YVYU, + gcvSURF_VYUY, + gcvSURF_AYUV, + gcvSURF_YUV420_10_ST, + gcvSURF_YUV420_TILE_ST, + gcvSURF_YUV420_TILE_10_ST, + gcvSURF_NV12_10BIT, + gcvSURF_NV21_10BIT, + gcvSURF_NV16_10BIT, + gcvSURF_NV61_10BIT, + gcvSURF_P010, +#if gcdVG_ONLY + gcvSURF_AYUY2, + gcvSURF_ANV12, + gcvSURF_ANV16, +#endif + + /* Depth formats. */ + gcvSURF_D16 = 600, + gcvSURF_D24S8, + gcvSURF_D32, + gcvSURF_D24X8, + gcvSURF_D32F, + gcvSURF_S8D32F, + gcvSURF_S8D32F_1_G32R32F, + gcvSURF_S8D32F_2_A8R8G8B8, + gcvSURF_D24S8_1_A8R8G8B8, + gcvSURF_S8, + gcvSURF_X24S8, + gcvSURF_X24S8_1_A8R8G8B8, + + /* Alpha formats. */ + gcvSURF_A4 = 700, + gcvSURF_A8, + gcvSURF_A12, + gcvSURF_A16, + gcvSURF_A32, + gcvSURF_A1, + + gcvSURF_A8_1_A8R8G8B8, + + /* Luminance formats. */ + gcvSURF_L4 = 800, + gcvSURF_L8, + gcvSURF_L12, + gcvSURF_L16, + gcvSURF_L32, + gcvSURF_L1, + + /* Alpha/Luminance formats. */ + gcvSURF_A4L4 = 900, + gcvSURF_A2L6, + gcvSURF_A8L8, + gcvSURF_A4L12, + gcvSURF_A12L12, + gcvSURF_A16L16, + + gcvSURF_A8L8_1_A8R8G8B8, + + /* Bump formats. */ + gcvSURF_L6V5U5 = 1000, + gcvSURF_V8U8, + gcvSURF_X8L8V8U8, + gcvSURF_Q8W8V8U8, + gcvSURF_A2W10V10U10, + gcvSURF_V16U16, + gcvSURF_Q16W16V16U16, + + /* R/RG/RA formats. */ + gcvSURF_R8 = 1100, + gcvSURF_X8R8, + gcvSURF_G8R8, + gcvSURF_X8G8R8, + gcvSURF_A8R8, + gcvSURF_R16, + gcvSURF_X16R16, + gcvSURF_G16R16, + gcvSURF_X16G16R16, + gcvSURF_A16R16, + gcvSURF_R32, + gcvSURF_X32R32, + gcvSURF_G32R32, + gcvSURF_X32G32R32, + gcvSURF_A32R32, + gcvSURF_RG16, + gcvSURF_R8_SNORM, + gcvSURF_G8R8_SNORM, + + gcvSURF_R8_1_X8R8G8B8, + gcvSURF_G8R8_1_X8R8G8B8, + + /* Floating point formats. */ + gcvSURF_R16F = 1200, + gcvSURF_X16R16F, + gcvSURF_G16R16F, + gcvSURF_X16G16R16F, + gcvSURF_B16G16R16F, + gcvSURF_X16B16G16R16F, + gcvSURF_A16B16G16R16F, + gcvSURF_R32F, + gcvSURF_X32R32F, + gcvSURF_G32R32F, + gcvSURF_X32G32R32F, + gcvSURF_B32G32R32F, + gcvSURF_X32B32G32R32F, + gcvSURF_A32B32G32R32F, + gcvSURF_A16F, + gcvSURF_L16F, + gcvSURF_A16L16F, + gcvSURF_A16R16F, + gcvSURF_A32F, + gcvSURF_L32F, + gcvSURF_A32L32F, + gcvSURF_A32R32F, + gcvSURF_E5B9G9R9, + gcvSURF_B10G11R11F, + + gcvSURF_X16B16G16R16F_2_A8R8G8B8, + gcvSURF_A16B16G16R16F_2_A8R8G8B8, + gcvSURF_A16B16G16R16F_2_G16R16F, + gcvSURF_G32R32F_2_A8R8G8B8, + gcvSURF_X32B32G32R32F_2_G32R32F, + gcvSURF_A32B32G32R32F_2_G32R32F, + gcvSURF_X32B32G32R32F_4_A8R8G8B8, + gcvSURF_A32B32G32R32F_4_A8R8G8B8, + + gcvSURF_R16F_1_A4R4G4B4, + gcvSURF_G16R16F_1_A8R8G8B8, + gcvSURF_B16G16R16F_2_A8R8G8B8, + + gcvSURF_R32F_1_A8R8G8B8, + gcvSURF_B32G32R32F_3_A8R8G8B8, + gcvSURF_B10G11R11F_1_A8R8G8B8, + + gcvSURF_A32F_1_R32F, + gcvSURF_L32F_1_R32F, + gcvSURF_A32L32F_1_G32R32F, + + + + /* sRGB format. */ + gcvSURF_SBGR8 = 1400, + gcvSURF_A8_SBGR8, + gcvSURF_X8_SBGR8, + gcvSURF_A8_SRGB8, + gcvSURF_X8_SRGB8, + + /* Integer formats. */ + gcvSURF_R8I = 1500, + gcvSURF_R8UI, + gcvSURF_R16I, + gcvSURF_R16UI, + gcvSURF_R32I, + gcvSURF_R32UI, + gcvSURF_X8R8I, + gcvSURF_G8R8I, + gcvSURF_X8R8UI, + gcvSURF_G8R8UI, + gcvSURF_X16R16I, + gcvSURF_G16R16I, + gcvSURF_X16R16UI, + gcvSURF_G16R16UI, + gcvSURF_X32R32I, + gcvSURF_G32R32I, + gcvSURF_X32R32UI, + gcvSURF_G32R32UI, + gcvSURF_X8G8R8I, + gcvSURF_B8G8R8I, + gcvSURF_X8G8R8UI, + gcvSURF_B8G8R8UI, + gcvSURF_X16G16R16I, + gcvSURF_B16G16R16I, + gcvSURF_X16G16R16UI, + gcvSURF_B16G16R16UI, + gcvSURF_X32G32R32I, + gcvSURF_B32G32R32I, + gcvSURF_X32G32R32UI, + gcvSURF_B32G32R32UI, + gcvSURF_X8B8G8R8I, + gcvSURF_A8B8G8R8I, + gcvSURF_X8B8G8R8UI, + gcvSURF_A8B8G8R8UI, + gcvSURF_X16B16G16R16I, + gcvSURF_A16B16G16R16I, + gcvSURF_X16B16G16R16UI, + gcvSURF_A16B16G16R16UI, + gcvSURF_X32B32G32R32I, + gcvSURF_A32B32G32R32I, + gcvSURF_X32B32G32R32UI, + gcvSURF_A32B32G32R32UI, + gcvSURF_A2B10G10R10UI, + gcvSURF_G32R32I_2_A8R8G8B8, + gcvSURF_G32R32I_1_G32R32F, + gcvSURF_G32R32UI_2_A8R8G8B8, + gcvSURF_G32R32UI_1_G32R32F, + gcvSURF_X16B16G16R16I_2_A8R8G8B8, + gcvSURF_X16B16G16R16I_1_G32R32F, + gcvSURF_A16B16G16R16I_2_A8R8G8B8, + gcvSURF_A16B16G16R16I_1_G32R32F, + gcvSURF_X16B16G16R16UI_2_A8R8G8B8, + gcvSURF_X16B16G16R16UI_1_G32R32F, + gcvSURF_A16B16G16R16UI_2_A8R8G8B8, + gcvSURF_A16B16G16R16UI_1_G32R32F, + gcvSURF_X32B32G32R32I_2_G32R32I, + gcvSURF_A32B32G32R32I_2_G32R32I, + gcvSURF_A32B32G32R32I_2_G32R32F, + gcvSURF_X32B32G32R32I_3_A8R8G8B8, + gcvSURF_A32B32G32R32I_4_A8R8G8B8, + gcvSURF_X32B32G32R32UI_2_G32R32UI, + gcvSURF_A32B32G32R32UI_2_G32R32UI, + gcvSURF_A32B32G32R32UI_2_G32R32F, + gcvSURF_X32B32G32R32UI_3_A8R8G8B8, + gcvSURF_A32B32G32R32UI_4_A8R8G8B8, + gcvSURF_A2B10G10R10UI_1_A8R8G8B8, + gcvSURF_A8B8G8R8I_1_A8R8G8B8, + gcvSURF_A8B8G8R8UI_1_A8R8G8B8, + gcvSURF_R8I_1_A4R4G4B4, + gcvSURF_R8UI_1_A4R4G4B4, + gcvSURF_R16I_1_A4R4G4B4, + gcvSURF_R16UI_1_A4R4G4B4, + gcvSURF_R32I_1_A8R8G8B8, + gcvSURF_R32UI_1_A8R8G8B8, + gcvSURF_X8R8I_1_A4R4G4B4, + gcvSURF_X8R8UI_1_A4R4G4B4, + gcvSURF_G8R8I_1_A4R4G4B4, + gcvSURF_G8R8UI_1_A4R4G4B4, + gcvSURF_X16R16I_1_A4R4G4B4, + gcvSURF_X16R16UI_1_A4R4G4B4, + gcvSURF_G16R16I_1_A8R8G8B8, + gcvSURF_G16R16UI_1_A8R8G8B8, + gcvSURF_X32R32I_1_A8R8G8B8, + gcvSURF_X32R32UI_1_A8R8G8B8, + gcvSURF_X8G8R8I_1_A4R4G4B4, + gcvSURF_X8G8R8UI_1_A4R4G4B4, + gcvSURF_B8G8R8I_1_A8R8G8B8, + gcvSURF_B8G8R8UI_1_A8R8G8B8, + gcvSURF_B16G16R16I_2_A8R8G8B8, + gcvSURF_B16G16R16I_1_G32R32F, + gcvSURF_B16G16R16UI_2_A8R8G8B8, + gcvSURF_B16G16R16UI_1_G32R32F, + gcvSURF_B32G32R32I_3_A8R8G8B8, + gcvSURF_B32G32R32UI_3_A8R8G8B8, + gcvSURF_A16B16G16R16_2_A8R8G8B8, + gcvSURF_R8G8B8_1_A8R8G8B8, + gcvSURF_G16R16_1_A8R8G8B8, + gcvSURF_A2B10G10R10_1_A8R8G8B8, + gcvSURF_A2R10G10B10_1_A8R8G8B8, + gcvSURF_A2W10V10U10_1_A8R8G8B8, + + /* ASTC formats. */ + gcvSURF_ASTC4x4 = 1600, + gcvSURF_ASTC5x4, + gcvSURF_ASTC5x5, + gcvSURF_ASTC6x5, + gcvSURF_ASTC6x6, + gcvSURF_ASTC8x5, + gcvSURF_ASTC8x6, + gcvSURF_ASTC8x8, + gcvSURF_ASTC10x5, + gcvSURF_ASTC10x6, + gcvSURF_ASTC10x8, + gcvSURF_ASTC10x10, + gcvSURF_ASTC12x10, + gcvSURF_ASTC12x12, + gcvSURF_ASTC4x4_SRGB, + gcvSURF_ASTC5x4_SRGB, + gcvSURF_ASTC5x5_SRGB, + gcvSURF_ASTC6x5_SRGB, + gcvSURF_ASTC6x6_SRGB, + gcvSURF_ASTC8x5_SRGB, + gcvSURF_ASTC8x6_SRGB, + gcvSURF_ASTC8x8_SRGB, + gcvSURF_ASTC10x5_SRGB, + gcvSURF_ASTC10x6_SRGB, + gcvSURF_ASTC10x8_SRGB, + gcvSURF_ASTC10x10_SRGB, + gcvSURF_ASTC12x10_SRGB, + gcvSURF_ASTC12x12_SRGB, + + /* Recompile format*/ + gcvSURF_L16_1_A4R4G4B4 = 1700, + gcvSURF_V16U16_1_A8R8G8B8, + gcvSURF_Q8W8V8U8_1_A8R8G8B8, + gcvSURF_X8L8V8U8_1_A8R8G8B8, + gcvSURF_R3G3B2_1_A8R8G8B8, + gcvSURF_A8R3G3B2_1_A8R8G8B8, + gcvSURF_W11V11U10_1_A8R8G8B8, + gcvSURF_Q16W16V16U16_2_A8R8G8B8, + gcvSURF_W11V11U10, + gcvSURF_V8U8_1_A4R4G4B4, + gcvSURF_A8B8G8R8_1_A8R8G8B8, + gcvSURF_A32R32G32B32_1_A8R8G8B8, + gcvSURF_X16B16G16R16F_1_A8R8G8B8, + gcvSURF_A16B16G16R16F_1_A8R8G8B8, + gcvSURF_G32R32F_1_A8R8G8B8, + gcvSURF_X32B32G32R32F_1_A8R8G8B8, + gcvSURF_A32B32G32R32F_1_A8R8G8B8, + gcvSURF_G32R32I_1_A8R8G8B8, + gcvSURF_G32R32UI_1_A8R8G8B8, + gcvSURF_A32B32G32R32I_1_A8R8G8B8, + gcvSURF_A32B32G32R32UI_1_A8R8G8B8, + gcvSURF_Q16W16V16U16_1_A8R8G8B8, + gcvSURF_A16B16G16R16_1_A8R8G8B8, + gcvSURF_FORMAT_COUNT +} +gceSURF_FORMAT; + +typedef enum _gceIMAGE_MEM_TYPE +{ + gcvIMAGE_MEM_DEFAULT, + gcvIMAGE_MEM_HOST_PTR, + gcvIMAGE_MEM_HOST_PTR_UNCACHED, +} +gceIMAGE_MEM_TYPE; + +typedef enum _gceSURF_YUV_COLOR_SPACE +{ + gcvSURF_ITU_REC601, + gcvSURF_ITU_REC709, + gcvSURF_ITU_REC2020, +} +gceSURF_YUV_COLOR_SPACE; + +typedef enum _gceSURF_YUV_SAMPLE_RANGE +{ + gcvSURF_YUV_FULL_RANGE, + gcvSURF_YUV_NARROW_RANGE, +} +gceSURF_YUV_SAMPLE_RANGE; + +typedef enum _gceSURF_YUV_CHROMA_SITING +{ + gcvSURF_YUV_CHROMA_SITING_0, + gcvSURF_YUV_CHROMA_SITING_0_5, +} +gceSURF_YUV_CHROMA_SITING; + +typedef enum _gceSURF_INFO_TYPE +{ + gcvSURF_INFO_UNKNOWN = 0, + gcvSURF_INFO_LAYERSIZE = 1, + gcvSURF_INFO_SLICESIZE = 2, +} +gceSURF_INFO_TYPE; + +/* Format modifiers. */ +typedef enum _gceSURF_FORMAT_MODE +{ + gcvSURF_FORMAT_OCL = 0x80000000, + gcvSURF_FORMAT_PATCH_BORDER = 0x40000000, +} +gceSURF_FORMAT_MODE; + +/* Pixel swizzle modes. */ +typedef enum _gceSURF_SWIZZLE +{ + gcvSURF_NOSWIZZLE = 0, + gcvSURF_ARGB, + gcvSURF_ABGR, + gcvSURF_RGBA, + gcvSURF_BGRA +} +gceSURF_SWIZZLE; + +/* Transparency modes. */ +typedef enum _gceSURF_TRANSPARENCY +{ + /* Valid only for PE 1.0 */ + gcvSURF_OPAQUE = 0, + gcvSURF_SOURCE_MATCH, + gcvSURF_SOURCE_MASK, + gcvSURF_PATTERN_MASK, +} +gceSURF_TRANSPARENCY; + +/* Surface Alignment. */ +typedef enum _gceSURF_ALIGNMENT +{ + gcvSURF_FOUR = 0, + gcvSURF_SIXTEEN, + gcvSURF_SUPER_TILED, + gcvSURF_SPLIT_TILED, + gcvSURF_SPLIT_SUPER_TILED +} +gceSURF_ALIGNMENT; + +/* Surface Addressing. */ +typedef enum _gceSURF_ADDRESSING +{ + gcvSURF_NO_STRIDE_TILED = 0, + gcvSURF_NO_STRIDE_LINEAR, + gcvSURF_STRIDE_TILED, + gcvSURF_STRIDE_LINEAR +} +gceSURF_ADDRESSING; + +/* Transparency modes. */ +typedef enum _gce2D_TRANSPARENCY +{ + /* Valid only for PE 2.0 */ + gcv2D_OPAQUE = 0, + gcv2D_KEYED, + gcv2D_MASKED +} +gce2D_TRANSPARENCY; + +/* Mono packing modes. */ +typedef enum _gceSURF_MONOPACK +{ + gcvSURF_PACKED8 = 0, + gcvSURF_PACKED16, + gcvSURF_PACKED32, + gcvSURF_UNPACKED, +} +gceSURF_MONOPACK; + +/* Blending modes. */ +typedef enum _gceSURF_BLEND_MODE +{ + /* Porter-Duff blending modes. */ + /* Fsrc Fdst */ + gcvBLEND_CLEAR = 0, /* 0 0 */ + gcvBLEND_SRC, /* 1 0 */ + gcvBLEND_DST, /* 0 1 */ + gcvBLEND_SRC_OVER_DST, /* 1 1 - Asrc */ + gcvBLEND_DST_OVER_SRC, /* 1 - Adst 1 */ + gcvBLEND_SRC_IN_DST, /* Adst 0 */ + gcvBLEND_DST_IN_SRC, /* 0 Asrc */ + gcvBLEND_SRC_OUT_DST, /* 1 - Adst 0 */ + gcvBLEND_DST_OUT_SRC, /* 0 1 - Asrc */ + gcvBLEND_SRC_ATOP_DST, /* Adst 1 - Asrc */ + gcvBLEND_DST_ATOP_SRC, /* 1 - Adst Asrc */ + gcvBLEND_SRC_XOR_DST, /* 1 - Adst 1 - Asrc */ + + /* Special blending modes. */ + gcvBLEND_SET, /* DST = 1 */ + gcvBLEND_SUB /* DST = DST * (1 - SRC) */ +} +gceSURF_BLEND_MODE; + +/* Per-pixel alpha modes. */ +typedef enum _gceSURF_PIXEL_ALPHA_MODE +{ + gcvSURF_PIXEL_ALPHA_STRAIGHT = 0, + gcvSURF_PIXEL_ALPHA_INVERSED +} +gceSURF_PIXEL_ALPHA_MODE; + +/* Global alpha modes. */ +typedef enum _gceSURF_GLOBAL_ALPHA_MODE +{ + gcvSURF_GLOBAL_ALPHA_OFF = 0, + gcvSURF_GLOBAL_ALPHA_ON, + gcvSURF_GLOBAL_ALPHA_SCALE +} +gceSURF_GLOBAL_ALPHA_MODE; + +/* Color component modes for alpha blending. */ +typedef enum _gceSURF_PIXEL_COLOR_MODE +{ + gcvSURF_COLOR_STRAIGHT = 0, + gcvSURF_COLOR_MULTIPLY +} +gceSURF_PIXEL_COLOR_MODE; + +/* Color component modes for alpha blending. */ +typedef enum _gce2D_PIXEL_COLOR_MULTIPLY_MODE +{ + gcv2D_COLOR_MULTIPLY_DISABLE = 0, + gcv2D_COLOR_MULTIPLY_ENABLE +} +gce2D_PIXEL_COLOR_MULTIPLY_MODE; + +/* Color component modes for alpha blending. */ +typedef enum _gce2D_GLOBAL_COLOR_MULTIPLY_MODE +{ + gcv2D_GLOBAL_COLOR_MULTIPLY_DISABLE = 0, + gcv2D_GLOBAL_COLOR_MULTIPLY_ALPHA, + gcv2D_GLOBAL_COLOR_MULTIPLY_COLOR +} +gce2D_GLOBAL_COLOR_MULTIPLY_MODE; + +/* Alpha blending factor modes. */ +typedef enum _gceSURF_BLEND_FACTOR_MODE +{ + gcvSURF_BLEND_ZERO = 0, + gcvSURF_BLEND_ONE, + gcvSURF_BLEND_STRAIGHT, + gcvSURF_BLEND_INVERSED, + gcvSURF_BLEND_COLOR, + gcvSURF_BLEND_COLOR_INVERSED, + gcvSURF_BLEND_SRC_ALPHA_SATURATED, + gcvSURF_BLEND_STRAIGHT_NO_CROSS, + gcvSURF_BLEND_INVERSED_NO_CROSS, + gcvSURF_BLEND_COLOR_NO_CROSS, + gcvSURF_BLEND_COLOR_INVERSED_NO_CROSS, + gcvSURF_BLEND_SRC_ALPHA_SATURATED_CROSS +} +gceSURF_BLEND_FACTOR_MODE; + +/* Alpha blending porter duff rules. */ +typedef enum _gce2D_PORTER_DUFF_RULE +{ + gcvPD_CLEAR = 0, + gcvPD_SRC, + gcvPD_SRC_OVER, + gcvPD_DST_OVER, + gcvPD_SRC_IN, + gcvPD_DST_IN, + gcvPD_SRC_OUT, + gcvPD_DST_OUT, + gcvPD_SRC_ATOP, + gcvPD_DST_ATOP, + gcvPD_ADD, + gcvPD_XOR, + gcvPD_DST +} +gce2D_PORTER_DUFF_RULE; + +/* Alpha blending factor modes. */ +typedef enum _gce2D_YUV_COLOR_MODE +{ + gcv2D_YUV_601= 0, + gcv2D_YUV_709, + gcv2D_YUV_USER_DEFINED, + gcv2D_YUV_USER_DEFINED_CLAMP, + + /* Default setting is for src. gcv2D_YUV_DST + can be ORed to set dst. + */ + gcv2D_YUV_DST = 0x80000000, +} +gce2D_YUV_COLOR_MODE; + +/* Nature rotation rules. */ +typedef enum _gce2D_NATURE_ROTATION +{ + gcvNR_0_DEGREE = 0, + gcvNR_LEFT_90_DEGREE, + gcvNR_RIGHT_90_DEGREE, + gcvNR_180_DEGREE, + gcvNR_FLIP_X, + gcvNR_FLIP_Y, + gcvNR_TOTAL_RULE, +} +gce2D_NATURE_ROTATION; + +typedef enum _gce2D_COMMAND +{ + gcv2D_CLEAR = 0, + gcv2D_LINE, + gcv2D_BLT, + gcv2D_STRETCH, + gcv2D_HOR_FILTER, + gcv2D_VER_FILTER, + gcv2D_MULTI_SOURCE_BLT, + gcv2D_FILTER_BLT, +} +gce2D_COMMAND; + +typedef enum _gce2D_TILE_STATUS_CONFIG +{ + gcv2D_TSC_DISABLE = 0, + gcv2D_TSC_ENABLE = 0x00000001, + gcv2D_TSC_COMPRESSED = 0x00000002, + gcv2D_TSC_DOWN_SAMPLER = 0x00000004, + gcv2D_TSC_2D_COMPRESSED = 0x00000008, + + gcv2D_TSC_DEC_COMPRESSED = 0x00000020, + gcv2D_TSC_DEC_TPC = 0x00000040, + gcv2D_TSC_DEC_TPC_COMPRESSED = 0x00000080, + + gcv2D_TSC_V4_COMPRESSED = 0x00000100, + gcv2D_TSC_V4_COMPRESSED_256B = 0x00000200 | gcv2D_TSC_V4_COMPRESSED, + + gcv2D_TSC_DEC_TPC_TILED = gcv2D_TSC_DEC_COMPRESSED | gcv2D_TSC_DEC_TPC, + gcv2D_TSC_DEC_TPC_TILED_COMPRESSED = gcv2D_TSC_DEC_TPC_TILED | gcv2D_TSC_DEC_TPC_COMPRESSED, + + gcv2D_TSC_TPC_COMPRESSED = 0x00001000, + gcv2D_TSC_TPC_COMPRESSED_V10 = gcv2D_TSC_TPC_COMPRESSED | 0x00000400, + gcv2D_TSC_TPC_COMPRESSED_V11 = gcv2D_TSC_TPC_COMPRESSED | 0x00000800, +} +gce2D_TILE_STATUS_CONFIG; + +typedef enum _gce2D_QUERY +{ + gcv2D_QUERY_RGB_ADDRESS_MIN_ALIGN = 0, + gcv2D_QUERY_RGB_STRIDE_MIN_ALIGN, + gcv2D_QUERY_YUV_ADDRESS_MIN_ALIGN, + gcv2D_QUERY_YUV_STRIDE_MIN_ALIGN, +} +gce2D_QUERY; + +typedef enum _gce2D_SUPER_TILE_VERSION +{ + gcv2D_SUPER_TILE_VERSION_V1 = 1, + gcv2D_SUPER_TILE_VERSION_V2 = 2, + gcv2D_SUPER_TILE_VERSION_V3 = 3, +} +gce2D_SUPER_TILE_VERSION; + +typedef enum _gce2D_STATE +{ + gcv2D_STATE_SPECIAL_FILTER_MIRROR_MODE = 1, + gcv2D_STATE_SUPER_TILE_VERSION, + gcv2D_STATE_EN_GAMMA, + gcv2D_STATE_DE_GAMMA, + gcv2D_STATE_MULTI_SRC_BLIT_UNIFIED_DST_RECT, + gcv2D_STATE_MULTI_SRC_BLIT_BILINEAR_FILTER, + gcv2D_STATE_PROFILE_ENABLE, + gcv2D_STATE_XRGB_ENABLE, + + gcv2D_STATE_ARRAY_EN_GAMMA = 0x10001, + gcv2D_STATE_ARRAY_DE_GAMMA, + gcv2D_STATE_ARRAY_CSC_YUV_TO_RGB, + gcv2D_STATE_ARRAY_CSC_RGB_TO_YUV, + + gcv2D_STATE_DEC_TPC_NV12_10BIT = 0x20001, + gcv2D_STATE_ARRAY_YUV_SRC_TILE_STATUS_ADDR, + gcv2D_STATE_ARRAY_YUV_DST_TILE_STATUS_ADDR, +} +gce2D_STATE; + +typedef enum _gce2D_STATE_PROFILE +{ + gcv2D_STATE_PROFILE_NONE = 0x0, + gcv2D_STATE_PROFILE_COMMAND = 0x1, + gcv2D_STATE_PROFILE_SURFACE = 0x2, + gcv2D_STATE_PROFILE_ALL = 0xFFFF, +} +gce2D_STATE_PROFILE; + +/* Texture object types */ +typedef enum _gceTEXTURE_TYPE +{ + gcvTEXTURE_UNKNOWN = 0, + gcvTEXTURE_1D, + gcvTEXTURE_2D, + gcvTEXTURE_3D, + gcvTEXTURE_CUBEMAP, + gcvTEXTURE_1D_ARRAY, + gcvTEXTURE_2D_ARRAY, + gcvTEXTURE_2D_MS, + gcvTEXTURE_2D_MS_ARRAY, + gcvTEXTURE_CUBEMAP_ARRAY, + gcvTEXTURE_EXTERNAL +} +gceTEXTURE_TYPE; + +#if gcdENABLE_3D +/* Texture functions. */ +typedef enum _gceTEXTURE_FUNCTION +{ + gcvTEXTURE_DUMMY = 0, + gcvTEXTURE_REPLACE = 0, + gcvTEXTURE_MODULATE, + gcvTEXTURE_ADD, + gcvTEXTURE_ADD_SIGNED, + gcvTEXTURE_INTERPOLATE, + gcvTEXTURE_SUBTRACT, + gcvTEXTURE_DOT3 +} +gceTEXTURE_FUNCTION; + +/* Texture sources. */ +typedef enum _gceTEXTURE_SOURCE +{ + gcvCOLOR_FROM_TEXTURE = 0, + gcvCOLOR_FROM_CONSTANT_COLOR, + gcvCOLOR_FROM_PRIMARY_COLOR, + gcvCOLOR_FROM_PREVIOUS_COLOR +} +gceTEXTURE_SOURCE; + +/* Texture source channels. */ +typedef enum _gceTEXTURE_CHANNEL +{ + gcvFROM_COLOR = 0, + gcvFROM_ONE_MINUS_COLOR, + gcvFROM_ALPHA, + gcvFROM_ONE_MINUS_ALPHA +} +gceTEXTURE_CHANNEL; +#endif /* gcdENABLE_3D */ + +/* Filter types. */ +typedef enum _gceFILTER_TYPE +{ + gcvFILTER_SYNC = 0, + gcvFILTER_BLUR, + gcvFILTER_USER +} +gceFILTER_TYPE; + +/* Filter pass types. */ +typedef enum _gceFILTER_PASS_TYPE +{ + gcvFILTER_HOR_PASS = 0, + gcvFILTER_VER_PASS +} +gceFILTER_PASS_TYPE; + +/* Endian hints. */ +typedef enum _gceENDIAN_HINT +{ + gcvENDIAN_NO_SWAP = 0, + gcvENDIAN_SWAP_WORD = 1, + gcvENDIAN_SWAP_DWORD = 2, + gcvENDIAN_SWAP_QWORD = 3, +} +gceENDIAN_HINT; + +/* Tiling modes. */ +typedef enum _gceTILING +{ + gcvINVALIDTILED = 0x0, /* Invalid tiling */ + /* Tiling basic modes enum'ed in power of 2. */ + gcvLINEAR = 0x1, /* No tiling. */ + gcvTILED = 0x2, /* 4x4 tiling. */ + gcvSUPERTILED = 0x4, /* 64x64 tiling. */ + gcvMINORTILED = 0x8, /* 2x2 tiling. */ + + /* Tiling special layouts. */ + gcvTILING_SPLIT_BUFFER = 0x10, + gcvTILING_X_MAJOR = 0x20, + gcvTILING_Y_MAJOR = 0x40, + gcvTILING_SWAP = 0x80, + + /* Tiling combination layouts. */ + gcvMULTI_TILED = gcvTILED + | gcvTILING_SPLIT_BUFFER, + + gcvMULTI_SUPERTILED = gcvSUPERTILED + | gcvTILING_SPLIT_BUFFER, + + gcvYMAJOR_SUPERTILED = gcvSUPERTILED + | gcvTILING_Y_MAJOR, + + gcvTILED_8X4 = 0x0100, + gcvTILED_4X8 = 0x0100 | gcvTILING_SWAP, + gcvTILED_8X8 = 0x0200, + gcvTILED_16X4 = 0x0400, + gcvTILED_32X4 = 0x0800, + gcvTILED_64X4 = 0x1000, + + gcvTILED_8X8_XMAJOR = gcvTILED_8X8 | gcvTILING_X_MAJOR, + gcvTILED_8X8_YMAJOR = gcvTILED_8X8 | gcvTILING_Y_MAJOR, + + gcvSUPERTILED_128B = 0x10000 | gcvSUPERTILED, + gcvSUPERTILED_256B = 0x20000 | gcvSUPERTILED, +} +gceTILING; + +typedef enum _gceCACHE_MODE +{ + gcvCACHE_NONE, + gcvCACHE_128, + gcvCACHE_256, +} +gceCACHE_MODE; + +#define DEFAULT_CACHE_MODE gcvCACHE_256 + +/* 2D pattern type. */ +typedef enum _gce2D_PATTERN +{ + gcv2D_PATTERN_SOLID = 0, + gcv2D_PATTERN_MONO, + gcv2D_PATTERN_COLOR, + gcv2D_PATTERN_INVALID +} +gce2D_PATTERN; + +/* 2D source type. */ +typedef enum _gce2D_SOURCE +{ + gcv2D_SOURCE_MASKED = 0, + gcv2D_SOURCE_MONO, + gcv2D_SOURCE_COLOR, + gcv2D_SOURCE_INVALID +} +gce2D_SOURCE; + +/* Pipes. */ +typedef enum _gcePIPE_SELECT +{ + gcvPIPE_INVALID = ~0, + gcvPIPE_3D = 0, + gcvPIPE_2D +} +gcePIPE_SELECT; + +/* Hardware type. */ +typedef enum _gceHARDWARE_TYPE +{ + gcvHARDWARE_INVALID, + gcvHARDWARE_3D, + gcvHARDWARE_2D, + gcvHARDWARE_VG, + gcvHARDWARE_3D2D, + gcvHARDWARE_NUM_TYPES, +} +gceHARDWARE_TYPE; + +#define gcdCHIP_COUNT gcvCORE_COUNT + +typedef enum _gceMMU_MODE +{ + gcvMMU_MODE_1K, + gcvMMU_MODE_4K, +} gceMMU_MODE; + +/* User signal command codes. */ +typedef enum _gceUSER_SIGNAL_COMMAND_CODES +{ + gcvUSER_SIGNAL_CREATE, + gcvUSER_SIGNAL_DESTROY, + gcvUSER_SIGNAL_SIGNAL, + gcvUSER_SIGNAL_WAIT, + gcvUSER_SIGNAL_MAP, + gcvUSER_SIGNAL_UNMAP, +} +gceUSER_SIGNAL_COMMAND_CODES; + +/* Shared buffer command codes. */ +typedef enum _gceSHBUF_COMMAND_CODES +{ + gcvSHBUF_CREATE, + gcvSHBUF_DESTROY, + gcvSHBUF_MAP, + gcvSHBUF_WRITE, + gcvSHBUF_READ, +} +gceSHBUF_COMMAND_CODES; + +/* Event locations. */ +typedef enum _gceKERNEL_WHERE +{ + gcvKERNEL_COMMAND, + gcvKERNEL_VERTEX, + gcvKERNEL_TRIANGLE, + gcvKERNEL_TEXTURE, + gcvKERNEL_PIXEL, + gcvKERNEL_BLT, +} +gceKERNEL_WHERE; + + +/* gcdDUMP message type. */ +typedef enum _gceDEBUG_MESSAGE_TYPE +{ + gcvMESSAGE_TEXT, + gcvMESSAGE_DUMP +} +gceDEBUG_MESSAGE_TYPE; + +/* Shading format. */ +typedef enum _gceSHADING +{ + gcvSHADING_SMOOTH, + gcvSHADING_FLAT_D3D, + gcvSHADING_FLAT_OPENGL, +} +gceSHADING; + +/* Culling modes. */ +typedef enum _gceCULL +{ + gcvCULL_NONE, + gcvCULL_CCW, + gcvCULL_CW, +} +gceCULL; + +/* Fill modes. */ +typedef enum _gceFILL +{ + gcvFILL_POINT, + gcvFILL_WIRE_FRAME, + gcvFILL_SOLID, +} +gceFILL; + +/* Compare modes. */ +typedef enum _gceCOMPARE +{ + gcvCOMPARE_INVALID = 0, + gcvCOMPARE_NEVER, + gcvCOMPARE_NOT_EQUAL, + gcvCOMPARE_LESS, + gcvCOMPARE_LESS_OR_EQUAL, + gcvCOMPARE_EQUAL, + gcvCOMPARE_GREATER, + gcvCOMPARE_GREATER_OR_EQUAL, + gcvCOMPARE_ALWAYS, +} +gceCOMPARE; + +/* Stencil modes. */ +typedef enum _gceSTENCIL_MODE +{ + gcvSTENCIL_NONE, + gcvSTENCIL_SINGLE_SIDED, + gcvSTENCIL_DOUBLE_SIDED, +} +gceSTENCIL_MODE; + +/* Stencil operations. */ +typedef enum _gceSTENCIL_OPERATION +{ + gcvSTENCIL_KEEP, + gcvSTENCIL_REPLACE, + gcvSTENCIL_ZERO, + gcvSTENCIL_INVERT, + gcvSTENCIL_INCREMENT, + gcvSTENCIL_DECREMENT, + gcvSTENCIL_INCREMENT_SATURATE, + gcvSTENCIL_DECREMENT_SATURATE, + gcvSTENCIL_OPERATION_INVALID = -1 +} +gceSTENCIL_OPERATION; + +/* Stencil selection. */ +typedef enum _gceSTENCIL_WHERE +{ + gcvSTENCIL_FRONT, + gcvSTENCIL_BACK, +} +gceSTENCIL_WHERE; + +/* Texture addressing selection. */ +typedef enum _gceTEXTURE_WHICH +{ + gcvTEXTURE_S, + gcvTEXTURE_T, + gcvTEXTURE_R, +} +gceTEXTURE_WHICH; + +/* Texture addressing modes. */ +typedef enum _gceTEXTURE_ADDRESSING +{ + gcvTEXTURE_INVALID = 0, + gcvTEXTURE_CLAMP, + gcvTEXTURE_WRAP, + gcvTEXTURE_MIRROR, + gcvTEXTURE_BORDER, + gcvTEXTURE_MIRROR_ONCE, +} +gceTEXTURE_ADDRESSING; + +/* Texture filters. */ +typedef enum _gceTEXTURE_FILTER +{ + gcvTEXTURE_NONE, + gcvTEXTURE_POINT, + gcvTEXTURE_LINEAR, + gcvTEXTURE_ANISOTROPIC, +} +gceTEXTURE_FILTER; + +typedef enum _gceTEXTURE_COMPONENT +{ + gcvTEXTURE_COMPONENT_R, + gcvTEXTURE_COMPONENT_G, + gcvTEXTURE_COMPONENT_B, + gcvTEXTURE_COMPONENT_A, + + gcvTEXTURE_COMPONENT_NUM, +} gceTEXTURE_COMPONENT; + +/* Texture swizzle modes. */ +typedef enum _gceTEXTURE_SWIZZLE +{ + gcvTEXTURE_SWIZZLE_R = 0, + gcvTEXTURE_SWIZZLE_G, + gcvTEXTURE_SWIZZLE_B, + gcvTEXTURE_SWIZZLE_A, + gcvTEXTURE_SWIZZLE_0, + gcvTEXTURE_SWIZZLE_1, + + gcvTEXTURE_SWIZZLE_INVALID, +} gceTEXTURE_SWIZZLE; + +typedef enum _gceTEXTURE_SRGBDECODE +{ + gcvTEXTURE_SRGB_INVALID = 0, + gcvTEXTURE_DECODE, + gcvTEXTURE_SKIP_DECODE, +}gceTEXTURE_SRGBDECODE; + +typedef enum _gceTEXTURE_COMPARE_MODE +{ + gcvTEXTURE_COMPARE_MODE_INVALID = 0, + gcvTEXTURE_COMPARE_MODE_NONE, + gcvTEXTURE_COMPARE_MODE_REF, +} gceTEXTURE_COMPARE_MODE; + +typedef enum _gceTEXTURE_DS_MODE +{ + gcvTEXTURE_DS_MODE_INVALID = 0, + gcvTEXTURE_DS_MODE_DEPTH = 1, + gcvTEXTURE_DS_MODE_STENCIL = 2, +}gceTEXTURE_DS_MODE; + + +/* Pixel output swizzle modes. */ +typedef enum _gcePIXEL_SWIZZLE +{ + gcvPIXEL_SWIZZLE_R = gcvTEXTURE_SWIZZLE_R, + gcvPIXEL_SWIZZLE_G = gcvTEXTURE_SWIZZLE_G, + gcvPIXEL_SWIZZLE_B = gcvTEXTURE_SWIZZLE_B, + gcvPIXEL_SWIZZLE_A = gcvTEXTURE_SWIZZLE_A, + + gcvPIXEL_SWIZZLE_INVALID, +} gcePIXEL_SWIZZLE; + +/* Primitive types. */ +typedef enum _gcePRIMITIVE +{ + gcvPRIMITIVE_POINT_LIST, + gcvPRIMITIVE_LINE_LIST, + gcvPRIMITIVE_LINE_STRIP, + gcvPRIMITIVE_LINE_LOOP, + gcvPRIMITIVE_TRIANGLE_LIST, + gcvPRIMITIVE_TRIANGLE_STRIP, + gcvPRIMITIVE_TRIANGLE_FAN, + gcvPRIMITIVE_RECTANGLE, + gcvPRIMITIVE_LINES_ADJACENCY, + gcvPRIMITIVE_LINE_STRIP_ADJACENCY, + gcvPRIMITIVE_TRIANGLES_ADJACENCY, + gcvPRIMITIVE_TRIANGLE_STRIP_ADJACENCY, + gcvPRIMITIVE_PATCH_LIST, +} +gcePRIMITIVE; + +/* Index types. */ +typedef enum _gceINDEX_TYPE +{ + gcvINDEX_8, + gcvINDEX_16, + gcvINDEX_32, +} +gceINDEX_TYPE; + +/* Multi GPU rendering modes. */ +typedef enum _gceMULTI_GPU_RENDERING_MODE +{ + gcvMULTI_GPU_RENDERING_MODE_OFF, + gcvMULTI_GPU_RENDERING_MODE_SPLIT_WIDTH, + gcvMULTI_GPU_RENDERING_MODE_SPLIT_HEIGHT, + gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_64x64, + gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x64, + gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x128, + gcvMULTI_GPU_RENDERING_MODE_INVALID +} +gceMULTI_GPU_RENDERING_MODE; + +typedef enum _gceCORE_3D_MASK +{ + gcvCORE_3D_0_MASK = (1 << 0), + gcvCORE_3D_1_MASK = (1 << 1), + + gcvCORE_3D_ALL_MASK = (0xFFFF) +} +gceCORE_3D_MASK; + +typedef enum _gceCORE_3D_ID +{ + gcvCORE_3D_0_ID = 0, + gcvCORE_3D_1_ID = 1, + + gcvCORE_3D_ID_INVALID = ~0UL +} +gceCORE_3D_ID; + +typedef enum _gceMULTI_GPU_MODE +{ + gcvMULTI_GPU_MODE_COMBINED = 0, + gcvMULTI_GPU_MODE_INDEPENDENT = 1 +} +gceMULTI_GPU_MODE; + +typedef enum _gceMACHINECODE +{ + gcvMACHINECODE_ANTUTU0 = 0x0, + + gcvMACHINECODE_GLB27_RELEASE_0, + + gcvMACHINECODE_GLB25_RELEASE_0, + gcvMACHINECODE_GLB25_RELEASE_1, + + /* keep it as the last enum */ + gcvMACHINECODE_COUNT +} +gceMACHINECODE; + +typedef enum _gceUNIFORMCVT +{ + gcvUNIFORMCVT_NONE = 0, + gcvUNIFORMCVT_TO_BOOL, + gcvUNIFORMCVT_TO_FLOAT, +} gceUNIFORMCVT; + +typedef enum _gceHAL_ARG_VERSION +{ + gcvHAL_ARG_VERSION_V1 = 0x0, + gcvHAL_ARG_VERSION_V2, +} +gceHAL_ARG_VERSION; + + +typedef enum _gceCMDBUF_TYPE +{ + /* Contiguous command buffer. */ + gcvCMDBUF_CONTIGUOUS, + /* Virtual command buffer. */ + gcvCMDBUF_VIRTUAL, + /* Command buffer allocated from reserved memory. */ + gcvCMDBUF_RESERVED, +} +gceCMDBUF_SOURCE; + +typedef enum _gceCHIP_FLAG +{ + gcvCHIP_FLAG_MSAA_COHERENCEY_ECO_FIX = 1 << 0, + gcvCHIP_FLAG_GC2000_R2 = 1 << 1, + gcvCHIP_AXI_BUS128_BITS = 1 << 2, +} +gceCHIP_FLAG; + +/* If different, choose render engine */ +#define PRIORITY_ENGINE(a, b) gcmMIN(a,b) + +typedef enum +{ + gcvENGINE_RENDER = 0, + gcvENGINE_BLT = 1, + gcvENGINE_GPU_ENGINE_COUNT = 2, + gcvENGINE_CPU = gcvENGINE_GPU_ENGINE_COUNT, + gcvENGINE_ALL_COUNT = gcvENGINE_CPU + 1, + gcvENGINE_INVALID = gcvENGINE_ALL_COUNT + 0x100 +} +gceENGINE; + +/* CORE enum. */ +typedef enum _gceCORE +{ + gcvCORE_MAJOR, + gcvCORE_3D1, + gcvCORE_3D2, + gcvCORE_3D3, + gcvCORE_3D4, + gcvCORE_3D5, + gcvCORE_3D6, + gcvCORE_3D7, + gcvCORE_3D_MAX = gcvCORE_3D7, + gcvCORE_2D, + gcvCORE_VG, +#if gcdDEC_ENABLE_AHB + gcvCORE_DEC, +#endif + gcvCORE_COUNT +} +gceCORE; + + +typedef enum _gceADDRESS_AREA +{ + gcvADDRESS_AREA_NORMAL, + gcvADDRESS_AREA_SECURE, + + gcvADDRESS_AREA_COUNT +} +gceADDRESS_AREA; + +typedef enum _gceSECURE_MODE +{ + /* For cores without gcvFEATURE_SECURITY. */ + gcvSECURE_NONE, + + /* Use registers added in gcvFEATURE_SECURITY in normal driver, + ** In this mode, GPU always works under non secure mode and + ** should not touch secure buffer. It is used to test basic function. + */ + gcvSECURE_IN_NORMAL, + + /* Make use of gcvFEATURE_SECURITY in trust application. */ + gcvSECURE_IN_TA +} +gceSECURE_MODE; + +/* kernel driver compression option, as it's a system global option, +** it means kernel driver allows the options, NOT necessarily means it must be on. +*/ +typedef enum _gceCOMPRESSION_OPTION +{ + gcvCOMPRESSION_OPTION_NONE = 0x0, /* No any compression */ + gcvCOMPRESSION_OPTION_COLOR = 0x1, /* Compression for non-msaa color format */ + gcvCOMPRESSION_OPTION_DEPTH = 0x2, /* Compression for non-msaa depth format */ + gcvCOMPRESSION_OPTION_MSAA_COLOR = 0x4, /* Compression for msaa color */ + gcvCOMPRESSION_OPTION_MSAA_DEPTH = 0x8, /* Compression for msaa depth */ + + /* default compressio option */ + gcvCOMPRESSION_OPTION_DEFAULT = gcvCOMPRESSION_OPTION_DEPTH | + gcvCOMPRESSION_OPTION_COLOR | + gcvCOMPRESSION_OPTION_MSAA_COLOR | + gcvCOMPRESSION_OPTION_MSAA_DEPTH, +} +gceCOMPRESSION_OPTION; + +/* No special needs. */ +#define gcvALLOC_FLAG_NONE 0x00000000 + +/* Physical contiguous. */ +#define gcvALLOC_FLAG_CONTIGUOUS 0x00000001 +/* Can be remapped as cacheable. */ +#define gcvALLOC_FLAG_CACHEABLE 0x00000002 +/* Secure buffer. */ +#define gcvALLOC_FLAG_SECURITY 0x00000004 +/* Physical non contiguous. */ +#define gcvALLOC_FLAG_NON_CONTIGUOUS 0x00000008 +/* Can be exported as dmabuf-fd */ +#define gcvALLOC_FLAG_DMABUF_EXPORTABLE 0x00000010 + +#define gcvALLOC_FLAG_4GB_ADDR 0x00000020 + +/* Do not try slow pools (gcvPOOL_VIRTUAL/gcvPOOL_CONTIGUOUS) */ +#define gcvALLOC_FLAG_FAST_POOLS 0x00000100 + +/* Import DMABUF. */ +#define gcvALLOC_FLAG_DMABUF 0x00001000 +/* Import USERMEMORY. */ +#define gcvALLOC_FLAG_USERMEMORY 0x00002000 +/* Import an External Buffer. */ +#define gcvALLOC_FLAG_EXTERNAL_MEMORY 0x00004000 +/* Import linux reserved memory. */ +#define gcvALLOC_FLAG_LINUX_RESERVED_MEM 0x00008000 + +/* Real allocation happens when GPU page fault. */ +#define gcvALLOC_FLAG_ALLOC_ON_FAULT 0x01000000 +/* Alloc with memory limit. */ +#define gcvALLOC_FLAG_MEMLIMIT 0x02000000 + + +/* GL_VIV internal usage */ +#ifndef GL_MAP_BUFFER_OBJ_VIV +#define GL_MAP_BUFFER_OBJ_VIV 0x10000 +#endif + +/* Command buffer usage. */ +#define gcvCOMMAND_2D (1 << 0) +#define gcvCOMMAND_3D (1 << 1) + +/* Default chip ID means chip ID same as core index. */ +#define gcvCHIP_ID_DEFAULT (~0U) + +/******************************************************************************\ +****************************** Object Declarations ***************************** +\******************************************************************************/ + +typedef struct _gckCONTEXT * gckCONTEXT; +typedef struct _gcoCMDBUF * gcoCMDBUF; + +typedef struct _gcsSTATE_DELTA * gcsSTATE_DELTA_PTR; +typedef struct _gcsQUEUE * gcsQUEUE_PTR; +typedef struct _gcoQUEUE * gcoQUEUE; +typedef struct _gcsHAL_INTERFACE * gcsHAL_INTERFACE_PTR; +typedef struct _gcs2D_PROFILE * gcs2D_PROFILE_PTR; + + + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_enum_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h new file mode 100644 index 000000000000..2e640fdf50ed --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h @@ -0,0 +1,320 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_buffer_h_ +#define __gc_hal_kernel_buffer_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +************************ Command Buffer and Event Objects ********************** +\******************************************************************************/ + +/* The number of context buffers per user. */ +#define gcdCONTEXT_BUFFER_COUNT 2 + +#define gcdRENDER_FENCE_LENGTH (6 * gcmSIZEOF(gctUINT32)) +#define gcdBLT_FENCE_LENGTH (10 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_FLUSHCACHE_LENGTH (2 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_PAUSE_OQ_LENGTH (2 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_PAUSE_XFBWRITTEN_QUERY_LENGTH (4 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_PAUSE_PRIMGEN_QUERY_LENGTH (4 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_PAUSE_XFB_LENGTH (2 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_HW_FENCE_32BIT (4 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_HW_FENCE_64BIT (6 * gcmSIZEOF(gctUINT32)) +#define gcdRESERVED_PAUSE_PROBE_LENGTH (TOTAL_PROBE_NUMBER * 2 * gcmSIZEOF(gctUINT32)) + +#define gcdRESUME_OQ_LENGTH (2 * gcmSIZEOF(gctUINT32)) +#define gcdRESUME_XFBWRITTEN_QUERY_LENGTH (4 * gcmSIZEOF(gctUINT32)) +#define gcdRESUME_PRIMGEN_QUERY_LENGTH (4 * gcmSIZEOF(gctUINT32)) +#define gcdRESUME_XFB_LENGH (2 * gcmSIZEOF(gctUINT32)) +#define gcdRESUME_PROBE_LENGH (TOTAL_PROBE_NUMBER * 2 * gcmSIZEOF(gctUINT32)) + + +/* State delta record. */ +typedef struct _gcsSTATE_DELTA_RECORD * gcsSTATE_DELTA_RECORD_PTR; +typedef struct _gcsSTATE_DELTA_RECORD +{ + /* State address. */ + gctUINT address; + + /* State mask. */ + gctUINT32 mask; + + /* State data. */ + gctUINT32 data; +} +gcsSTATE_DELTA_RECORD; + +/* State delta. */ +typedef struct _gcsSTATE_DELTA +{ + /* For debugging: the number of delta in the order of creation. */ + gctUINT num; + + /* Main state delta ID. Every time state delta structure gets reinitialized, + main ID is incremented. If main state ID overflows, all map entry IDs get + reinitialized to make sure there is no potential erroneous match after + the overflow.*/ + gctUINT id; + + /* The number of contexts pending modification by the delta. */ + gctINT refCount; + + /* Vertex element count for the delta buffer. */ + gctUINT elementCount; + + /* Number of states currently stored in the record array. */ + gctUINT recordCount; + + /* Record array; holds all modified states in gcsSTATE_DELTA_RECORD. */ + gctUINT64 recordArray; + + /* Map entry ID is used for map entry validation. If map entry ID does not + match the main state delta ID, the entry and the corresponding state are + considered not in use. */ + gctUINT64 mapEntryID; + gctUINT mapEntryIDSize; + + /* If the map entry ID matches the main state delta ID, index points to + the state record in the record array. */ + gctUINT64 mapEntryIndex; + + /* Previous and next state deltas in gcsSTATE_DELTA. */ + gctUINT64 prev; + gctUINT64 next; +} +gcsSTATE_DELTA; + +#define gcdPATCH_LIST_SIZE 1024 + +/* Command buffer patch record. */ +typedef struct _gcsPATCH +{ + /* Handle of a video memory node. */ + gctUINT32 handle; + + /* Flag */ + gctUINT32 flag; +} +gcsPATCH; + +/* List of patches for the command buffer. */ +typedef struct _gcsPATCH_LIST +{ + /* Array of patch records. */ + struct _gcsPATCH patch[gcdPATCH_LIST_SIZE]; + + /* Number of patches in the array. */ + gctUINT count; + + /* Next item in the list. */ + struct _gcsPATCH_LIST *next; +} +gcsPATCH_LIST; + +#define FENCE_NODE_LIST_INIT_COUNT 100 + +typedef struct _gcsFENCE_APPEND_NODE +{ + gcsSURF_NODE_PTR node; + gceFENCE_TYPE type; + +}gcsFENCE_APPEND_NODE; + +typedef gcsFENCE_APPEND_NODE * gcsFENCE_APPEND_NODE_PTR; + +typedef struct _gcsFENCE_LIST * gcsFENCE_LIST_PTR; + +typedef struct _gcsFENCE_LIST +{ + /* Resource that need get fence, but command used this resource not generated */ + gcsFENCE_APPEND_NODE_PTR pendingList; + gctUINT pendingCount; + gctUINT pendingAllocCount; + + /* Resoure that already generated command in this command buffer but not get fence */ + gcsFENCE_APPEND_NODE_PTR onIssueList; + gctUINT onIssueCount; + gctUINT onIssueAllocCount; +} +gcsFENCE_LIST; + +/* Command buffer object. */ +struct _gcoCMDBUF +{ + /* The object. */ + gcsOBJECT object; + + /* Commit count. */ + gctUINT64 commitCount; + + /* Command buffer entry and exit pipes. */ + gcePIPE_SELECT entryPipe; + gcePIPE_SELECT exitPipe; + + /* Feature usage flags. */ + gctBOOL using2D; + gctBOOL using3D; + + /* Size of reserved tail for each commit. */ + gctUINT32 reservedTail; + + /* Physical address of command buffer. Just a name. */ + gctUINT32 physical; + + /* Logical address of command buffer. */ + gctUINT64 logical; + + /* Number of bytes in command buffer. */ + gctUINT32 bytes; + + /* Start offset into the command buffer. */ + gctUINT32 startOffset; + + /* Current offset into the command buffer. */ + gctUINT32 offset; + + /* Number of free bytes in command buffer. */ + gctUINT32 free; + + /* Location of the last reserved area. */ + gctUINT64 lastReserve; + gctUINT32 lastOffset; + +#if gcdSECURE_USER + /* Hint array for the current command buffer. */ + gctUINT hintArraySize; + gctUINT64 hintArray; + gctUINT64 hintArrayTail; +#endif + + /* Last load state command location and hardware address. */ + gctUINT64 lastLoadStatePtr; + gctUINT32 lastLoadStateAddress; + gctUINT32 lastLoadStateCount; + + /* List of patches. */ + gctUINT64 patchHead; + + /* Link to next gcoCMDBUF object in one commit. */ + gctUINT64 nextCMDBUF; + + /* + * Put pointer type member after this line. + */ + + /* Completion signal. */ + gctSIGNAL signal; + + /* Link to the siblings. */ + gcoCMDBUF prev; + gcoCMDBUF next; + + /* Mirror command buffer(s). */ + gcoCMDBUF *mirrors; + gctUINT32 mirrorCount; +}; + +typedef struct _gcsQUEUE +{ + /* Pointer to next gcsQUEUE structure in gcsQUEUE. */ + gctUINT64 next; + + /* Event information. */ + gcsHAL_INTERFACE iface; +} +gcsQUEUE; + +/* Event queue. */ +struct _gcoQUEUE +{ + /* The object. */ + gcsOBJECT object; + + /* Pointer to current event queue. */ + gcsQUEUE_PTR head; + gcsQUEUE_PTR tail; + + /* chunks of the records. */ + gctPOINTER chunks; + + /* List of free records. */ + gcsQUEUE_PTR freeList; + + #define gcdIN_QUEUE_RECORD_LIMIT 16 + /* Number of records currently in queue */ + gctUINT32 recordCount; + + /* Max size of pending unlock node in vidmem pool not committed */ + gctUINT maxUnlockBytes; + + gceENGINE engine; +}; + +struct _gcsTEMPCMDBUF +{ + gctUINT32 currentByteSize; + gctPOINTER buffer; + gctBOOL inUse; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_kernel_buffer_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_mem.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_mem.h new file mode 100644 index 000000000000..57b20ba6a4a0 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_mem.h @@ -0,0 +1,566 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +/* +** Include file for the local memory management. +*/ + +#ifndef __gc_hal_mem_h_ +#define __gc_hal_mem_h_ +#if (gcdENABLE_3D) + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************* +** Usage: + + The macros to declare MemPool type and functions are + gcmMEM_DeclareFSMemPool (Type, TypeName, Prefix) + gcmMEM_DeclareVSMemPool (Type, TypeName, Prefix) + gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) + + The data structures for MemPool are + typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL; + typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL; + typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL; + + The MemPool constructor and destructor functions are + gcfMEM_InitFSMemPool(gcsMEM_FS_MEM_POOL *, gcoOS, gctUINT, gctUINT); + gcfMEM_FreeFSMemPool(gcsMEM_FS_MEM_POOL *); + gcfMEM_InitVSMemPool(gcsMEM_VS_MEM_POOL *, gcoOS, gctUINT, gctBOOL); + gcfMEM_FreeVSMemPool(gcsMEM_VS_MEM_POOL *); + gcfMEM_InitAFSMemPool(gcsMEM_AFS_MEM_POOL *, gcoOS, gctUINT); + gcfMEM_FreeAFSMemPool(gcsMEM_AFS_MEM_POOL *); + + FS: for Fixed-Size data structures + VS: for Variable-size data structures + AFS: for Array of Fixed-Size data structures + + + // Example 1: For a fixed-size data structure, struct gcsNode. + // It is used locally in a file, so the functions are static without prefix. + // At top level, declear allocate and free functions. + // The first argument is the data type. + // The second armument is the short name used in the fuctions. + gcmMEM_DeclareFSMemPool(struct gcsNode, Node, ); + + // The previous macro creates two inline functions, + // _AllocateNode and _FreeNode. + + // In function or struct + gcsMEM_FS_MEM_POOL nodeMemPool; + + // In function, + struct gcsNode * node; + gceSTATUS status; + + // Before using the memory pool, initialize it. + // The second argument is the gcoOS object. + // The third argument is the number of data structures to allocate for each chunk. + status = gcfMEM_InitFSMemPool(&nodeMemPool, os, 100, sizeof(struct gcsNode)); + ... + + // Allocate a node. + status = _AllocateNode(nodeMemPool, &node); + ... + // Free a node. + _FreeNode(nodeMemPool, node); + + // After using the memory pool, free it. + gcfMEM_FreeFSMemPool(&nodeMemPool); + + + // Example 2: For array of fixed-size data structures, struct gcsNode. + // It is used in several files, so the functions are extern with prefix. + // At top level, declear allocate and free functions. + // The first argument is the data type, and the second one is the short name + // used in the fuctions. + gcmMEM_DeclareAFSMemPool(struct gcsNode, NodeArray, gcfOpt); + + // The previous macro creates two inline functions, + // gcfOpt_AllocateNodeArray and gcfOpt_FreeNodeArray. + + // In function or struct + gcsMEM_AFS_MEM_POOL nodeArrayMemPool; + + // In function, + struct gcsNode * nodeArray; + gceSTATUS status; + + // Before using the array memory pool, initialize it. + // The second argument is the gcoOS object, the third is the number of data + // structures to allocate for each chunk. + status = gcfMEM_InitAFSMemPool(&nodeArrayMemPool, os, sizeof(struct gcsNode)); + ... + + // Allocate a node array of size 100. + status = gcfOpt_AllocateNodeArray(nodeArrayMemPool, &nodeArray, 100); + ... + // Free a node array. + gcfOpt_FreeNodeArray(&nodeArrayMemPool, nodeArray); + + // After using the array memory pool, free it. + gcfMEM_FreeAFSMemPool(&nodeArrayMemPool); + +*******************************************************************************/ + +/******************************************************************************* +** To switch back to use gcoOS_Allocate and gcoOS_Free, add +** #define USE_LOCAL_MEMORY_POOL 0 +** before including this file. +*******************************************************************************/ +#ifndef USE_LOCAL_MEMORY_POOL +/* + USE_LOCAL_MEMORY_POOL + + This define enables the local memory management to improve performance. +*/ +#define USE_LOCAL_MEMORY_POOL 1 +#endif + +/******************************************************************************* +** Memory Pool Data Structures +*******************************************************************************/ +#if USE_LOCAL_MEMORY_POOL + typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL; + typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL; + typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL; +#else + typedef gcoOS gcsMEM_FS_MEM_POOL; + typedef gcoOS gcsMEM_VS_MEM_POOL; + typedef gcoOS gcsMEM_AFS_MEM_POOL; +#endif + +/******************************************************************************* +** Memory Pool Macros +*******************************************************************************/ +#if USE_LOCAL_MEMORY_POOL +#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \ +gceSTATUS \ +Prefix##_Allocate##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type ** Pointer \ + ) \ +{ \ + return(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \ +} \ + \ +gceSTATUS \ +Prefix##_CAllocate##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type ** Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + gcmERR_RETURN(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \ + gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \ + gcmFOOTER(); \ + return gcvSTATUS_OK; \ +} \ + \ +gceSTATUS \ +Prefix##_Free##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type * Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + status = gcfMEM_FSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \ + gcmFOOTER(); \ + return status; \ +} \ + \ +gceSTATUS \ +Prefix##_Free##TypeName##List(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type * FirstPointer, \ + Type * LastPointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x FirstPointer=0x%x LastPointer=0x%x", MemPool, FirstPointer, LastPointer); \ + status = gcfMEM_FSMemPoolFreeAList(MemPool, (gctPOINTER) FirstPointer, (gctPOINTER) LastPointer); \ + gcmFOOTER(); \ + return status; \ +} + +#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \ +gceSTATUS \ +Prefix##_Allocate##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Size \ + ) \ +{ \ + gceSTATUS status;\ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \ + status = gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer); \ + gcmFOOTER(); \ + return status; \ +} \ + \ +gceSTATUS \ + Prefix##_CAllocate##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Size \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \ + gcmERR_RETURN(gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer)); \ + gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, size); \ + gcmFOOTER(); \ + return gcvSTATUS_OK; \ +} \ + \ +gceSTATUS \ +Prefix##_Free##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type * Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pinter); \ + status = gcfMEM_VSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \ + gcmFOOTER(); \ + return status; \ +} + +#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \ +gceSTATUS \ +Prefix##_Allocate##TypeName(\ + gcsMEM_AFS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Count \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \ + status = gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer); \ + gcmFOOTER(); \ + return status; \ +} \ + \ +gceSTATUS \ +Prefix##_CAllocate##TypeName(\ + gcsMEM_AFS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Count \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \ + gcmERR_RETURN(gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer)); \ + gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \ + gcmFOOTER(); \ + return gcvSTATUS_OK; \ +} \ + \ +gceSTATUS \ +Prefix##_Free##TypeName(\ + gcsMEM_AFS_MEM_POOL MemPool, \ + Type * Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + status = gcfMEM_AFSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \ + gcmFOOTER(); \ + return status; \ +} + +#else + +#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \ +gceSTATUS \ +Prefix##_Allocate##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type ** Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + status = gcoOS_Allocate(MemPool, \ + gcmSIZEOF(Type), \ + (gctPOINTER *) Pointer); \ + gcmFOOTER(); \ + return status; \ +} \ + \ +gceSTATUS \ +Prefix##_CAllocate##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type ** Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + gcmERR_RETURN(gcoOS_Allocate(MemPool, \ + gcmSIZEOF(Type), \ + (gctPOINTER *) Pointer)); \ + gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \ + gcmFOOTER(); \ + return gcvSTATUS_OK; \ +} \ + \ +gceSTATUS \ +Prefix##_Free##TypeName(\ + gcsMEM_FS_MEM_POOL MemPool, \ + Type * Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + status = gcmOS_SAFE_FREE(MemPool, Pointer); \ + gcmFOOTER(); \ + return status; \ +} + +#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \ +gceSTATUS \ +Prefix##_Allocate##TypeName(\ + gcsMEM_VS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Size \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \ + status = gcoOS_Allocate(MemPool, \ + Size, \ + (gctPOINTER *) Pointer); \ + gcmFOOTER(); \ + return status; \ +} \ + \ +gceSTATUS \ +Prefix##_CAllocate##TypeName(\ + gcsMEM_VS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Size \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \ + gcmERR_RETURN(gcoOS_Allocate(MemPool, \ + Size, \ + (gctPOINTER *) Pointer)); \ + gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Size); \ + gcmFOOTER(); \ + return gcvSTATUS_OK; \ +} \ + \ +gceSTATUS \ +Prefix##_Free##TypeName(\ + gcsMEM_VS_MEM_POOL MemPool, \ + Type * Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + status = gcmOS_SAFE_FREE(MemPool, Pointer); \ + gcmFOOTER(); \ + return status; \ +} + +#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \ +gceSTATUS \ +Prefix##_Allocate##TypeName(\ + gcsMEM_AFS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Count \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \ + status = gcoOS_Allocate(MemPool, \ + Count * gcmSIZEOF(Type), \ + (gctPOINTER *) Pointer); \ + gcmFOOTER(); \ + return status; \ +} \ + \ +gceSTATUS \ +Prefix##_CAllocate##TypeName(\ + gcsMEM_AFS_MEM_POOL MemPool, \ + Type ** Pointer, \ + gctUINT Count \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \ + gcmERR_RETURN(gcoOS_Allocate(MemPool, \ + Count * gcmSIZEOF(Type), \ + (gctPOINTER *) Pointer)); \ + gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \ + gcmFOOTER(); \ + return gcvSTATUS_OK; \ +} \ + \ +gceSTATUS \ +Prefix##_Free##TypeName(\ + gcsMEM_AFS_MEM_POOL MemPool, \ + Type * Pointer \ + ) \ +{ \ + gceSTATUS status; \ + gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \ + status = gcmOS_SAFE_FREE(MemPool, Pointer); \ + gcmFOOTER(); \ + return status; \ +} +#endif + +/******************************************************************************* +** Memory Pool Data Functions +*******************************************************************************/ +gceSTATUS +gcfMEM_InitFSMemPool( + IN gcsMEM_FS_MEM_POOL * MemPool, + IN gcoOS OS, + IN gctUINT NodeCount, + IN gctUINT NodeSize + ); + +gceSTATUS +gcfMEM_FreeFSMemPool( + IN gcsMEM_FS_MEM_POOL * MemPool + ); + +gceSTATUS +gcfMEM_FSMemPoolGetANode( + IN gcsMEM_FS_MEM_POOL MemPool, + OUT gctPOINTER * Node + ); + +gceSTATUS +gcfMEM_FSMemPoolFreeANode( + IN gcsMEM_FS_MEM_POOL MemPool, + IN gctPOINTER Node + ); + +gceSTATUS +gcfMEM_FSMemPoolFreeAList( + IN gcsMEM_FS_MEM_POOL MemPool, + IN gctPOINTER FirstNode, + IN gctPOINTER LastNode + ); + +gceSTATUS +gcfMEM_InitVSMemPool( + IN gcsMEM_VS_MEM_POOL * MemPool, + IN gcoOS OS, + IN gctUINT BlockSize, + IN gctBOOL RecycleFreeNode + ); + +gceSTATUS +gcfMEM_FreeVSMemPool( + IN gcsMEM_VS_MEM_POOL * MemPool + ); + +gceSTATUS +gcfMEM_VSMemPoolGetANode( + IN gcsMEM_VS_MEM_POOL MemPool, + IN gctUINT Size, + IN gctUINT Alignment, + OUT gctPOINTER * Node + ); + +gceSTATUS +gcfMEM_VSMemPoolFreeANode( + IN gcsMEM_VS_MEM_POOL MemPool, + IN gctPOINTER Node + ); + +gceSTATUS +gcfMEM_InitAFSMemPool( + IN gcsMEM_AFS_MEM_POOL *MemPool, + IN gcoOS OS, + IN gctUINT NodeCount, + IN gctUINT NodeSize + ); + +gceSTATUS +gcfMEM_FreeAFSMemPool( + IN gcsMEM_AFS_MEM_POOL *MemPool + ); + +gceSTATUS +gcfMEM_AFSMemPoolGetANode( + IN gcsMEM_AFS_MEM_POOL MemPool, + IN gctUINT Count, + OUT gctPOINTER * Node + ); + +gceSTATUS +gcfMEM_AFSMemPoolFreeANode( + IN gcsMEM_AFS_MEM_POOL MemPool, + IN gctPOINTER Node + ); + +#ifdef __cplusplus +} +#endif + +#endif /* (gcdENABLE_3D || gcdENABLE_VG) */ +#endif /* __gc_hal_mem_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_metadata.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_metadata.h new file mode 100644 index 000000000000..c617e4ff8af7 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_metadata.h @@ -0,0 +1,118 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_metadata_h_ +#define __gc_hal_kernel_metadata_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Macro to combine four characters into a Character Code. */ +#define __FOURCC(a, b, c, d) \ + ((uint32_t)(a) | ((uint32_t)(b) << 8) | ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24)) + +#define VIV_VIDMEM_METADATA_MAGIC __FOURCC('v', 'i', 'v', 'm') + +/* Compressed format now was defined same as dec400d, should be general. */ +typedef enum _VIV_COMPRESS_FMT +{ + _VIV_CFMT_ARGB8 = 0, + _VIV_CFMT_XRGB8, + _VIV_CFMT_AYUV, + _VIV_CFMT_UYVY, + _VIV_CFMT_YUY2, + _VIV_CFMT_YUV_ONLY, + _VIV_CFMT_UV_MIX, + _VIV_CFMT_ARGB4, + _VIV_CFMT_XRGB4, + _VIV_CFMT_A1R5G5B5, + _VIV_CFMT_X1R5G5B5, + _VIV_CFMT_R5G6B5, + _VIV_CFMT_Z24S8, + _VIV_CFMT_Z24, + _VIV_CFMT_Z16, + _VIV_CFMT_A2R10G10B10, + _VIV_CFMT_BAYER, + _VIV_CFMT_SIGNED_BAYER, + _VIV_CFMT_VAA16, + _VIV_CFMT_S8, + + _VIV_CFMT_MAX, +} _VIV_COMPRESS_FMT; + +/* Metadata for cross-device fd share with additional (ts) info. */ +typedef struct _VIV_VIDMEM_METADATA +{ + uint32_t magic; + + int32_t ts_fd; + void * ts_dma_buf; + + uint32_t fc_enabled; + uint32_t fc_value; + uint32_t fc_value_upper; + + uint32_t compressed; + uint32_t compress_format; +} _VIV_VIDMEM_METADATA; + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_kernel_metadata_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_options.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_options.h new file mode 100644 index 000000000000..83e044fa41a5 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_options.h @@ -0,0 +1,1408 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_options_h_ +#define __gc_hal_options_h_ + +/* + gcdSECURITY + +*/ +#ifndef gcdSECURITY +# define gcdSECURITY 0 +#endif + +/* + gcdPRINT_VERSION + + Print HAL version. +*/ +#ifndef gcdPRINT_VERSION +# define gcdPRINT_VERSION 0 +#endif + +/* +USE_KERNEL_VIRTUAL_BUFFERS + +This define enables the use of VM for gckCommand and fence buffers. +*/ +#ifndef USE_KERNEL_VIRTUAL_BUFFERS +#if defined(UNDER_CE) +# define USE_KERNEL_VIRTUAL_BUFFERS 1 +#else +# define USE_KERNEL_VIRTUAL_BUFFERS 1 +#endif +#endif + +/* + USE_NEW_LINUX_SIGNAL + + This define enables the Linux kernel signaling between kernel and user. +*/ +#ifndef USE_NEW_LINUX_SIGNAL +# define USE_NEW_LINUX_SIGNAL 0 +#endif + +/* + USE_LINUX_PCIE + + This define enables galcore as a Linux PCIE driver. +*/ +#ifndef USE_LINUX_PCIE +# define USE_LINUX_PCIE 0 +#endif + +/* + VIVANTE_PROFILER + + This define enables the profiler. +*/ +#ifndef VIVANTE_PROFILER +# define VIVANTE_PROFILER 1 +#endif + +/* + gcdUSE_VG + + Enable VG HAL layer (only for GC350). +*/ +#ifndef gcdUSE_VG +# define gcdUSE_VG 0 +#endif + +/* + gcdUSE_VX + + Enable VX HAL layer. +*/ +#ifndef gcdUSE_VX +# define gcdUSE_VX 1 +#endif + +/* + PROFILE_HAL_COUNTERS + + This define enables HAL counter profiling support. HW and SHADER + counter profiling depends on this. +*/ +#ifndef PROFILE_HAL_COUNTERS +# define PROFILE_HAL_COUNTERS 1 +#endif + +/* + PROFILE_HW_COUNTERS + + This define enables HW counter profiling support. +*/ +#ifndef PROFILE_HW_COUNTERS +# define PROFILE_HW_COUNTERS 1 +#endif + +/* + PROFILE_SHADER_COUNTERS + + This define enables SHADER counter profiling support. +*/ +#ifndef PROFILE_SHADER_COUNTERS +# define PROFILE_SHADER_COUNTERS 1 +#endif + +/* + COMMAND_PROCESSOR_VERSION + + The version of the command buffer and task manager. +*/ +#define COMMAND_PROCESSOR_VERSION 1 + +/* + gcdDUMP_KEY + + Set this to a string that appears in 'cat /proc//cmdline'. E.g. 'camera'. + HAL will create dumps for the processes matching this key. +*/ +#ifndef gcdDUMP_KEY +# define gcdDUMP_KEY "process" +#endif + +/* + gcdDUMP_PATH + + The dump file location. Some processes cannot write to the sdcard. + Try apps' data dir, e.g. /data/data/com.android.launcher +*/ +#ifndef gcdDUMP_PATH +#if defined(ANDROID) +# define gcdDUMP_PATH "/mnt/sdcard/" +#else +# define gcdDUMP_PATH "./" +#endif +#endif + +/* + gcdDUMP + + When set to 1, a dump of all states and memory uploads, as well as other + hardware related execution will be printed to the debug console. This + data can be used for playing back applications. + + When set to 2, for vxc, all output memory will be dump. + +*/ +#ifndef gcdDUMP +# define gcdDUMP 0 +#endif + +/* + gcdDUMP_API + + When set to 1, a high level dump of the EGL and GL/VG APs's are + captured. +*/ +#ifndef gcdDUMP_API +# define gcdDUMP_API 0 +#endif + +#ifndef gcdDUMP_2DVG +# define gcdDUMP_2DVG 0 +#endif + +/* + gcdDUMP_AHB_ACCESS + + When set to 1, a dump of all AHB register access will be printed to kernel + message. +*/ +#ifndef gcdDUMP_AHB_ACCESS +# define gcdDUMP_AHB_ACCESS 0 +#endif + +/* + gcdDEBUG_OPTION + When set to 1, the debug options are enabled. We must set other MACRO to enable + sub case. +*/ +#ifndef gcdDEBUG_OPTION +# define gcdDEBUG_OPTION 0 + +#if gcdDEBUG_OPTION +/* + gcdDEBUG_OPTION_KEY + The process name of debug application. +*/ +#ifndef gcdDEBUG_OPTION_KEY +# define gcdDEBUG_OPTION_KEY "process" +# endif +/* + gcdDEBUG_OPTION_NO_GL_DRAWS + When set to 1, all glDrawArrays and glDrawElements will be skip. +*/ +#ifndef gcdDEBUG_OPTION_NO_GL_DRAWS +# define gcdDEBUG_OPTION_NO_GL_DRAWS 0 +# endif +/* + gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES + When set to 1, all DrawPrimitives will be skip. +*/ +#ifndef gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES +# define gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES 0 +# endif +/* + gcdDEBUG_OPTION_SKIP_SWAP + When set to 1, just one out of gcdDEBUG_OPTION_SKIP_FRAMES(such as 1/10) eglSwapBuffers will be resolve, + others skip. +*/ +#ifndef gcdDEBUG_OPTION_SKIP_SWAP +# define gcdDEBUG_OPTION_SKIP_SWAP 0 +# define gcdDEBUG_OPTION_SKIP_FRAMES 10 +# endif +/* + gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET + When set to 1, the format of render target will force to RGB565. +*/ +#ifndef gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET +# define gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET 0 +# endif +/* + gcdDEBUG_OPTION_NONE_TEXTURE + When set to 1, the type of texture will be set to 0x0. +*/ +#ifndef gcdDEBUG_OPTION_NONE_TEXTURE +# define gcdDEBUG_OPTION_NONE_TEXTURE 0 +# endif +/* + gcdDEBUG_OPTION_NONE_DEPTH + When set to 1, the depth format of surface will be set to gcvSURF_UNKNOWN. +*/ +#ifndef gcdDEBUG_OPTION_NONE_DEPTH +# define gcdDEBUG_OPTION_NONE_DEPTH 0 +# endif + +/* + gcdDEBUG_FORCE_CONTEXT_UPDATE + When set to 1, context will be updated before every commit. +*/ +#ifndef gcdDEBUG_FORCE_CONTEXT_UPDATE +# define gcdDEBUG_FORCE_CONTEXT_UPDATE 0 +# endif + + +/* + gcdDEBUG_FORCE_CONTEXT_UPDATE + When set to 1, pool of each type surface can be specified by + changing poolPerType[] in gcsSURF_NODE_Construct. +*/ +#ifndef gcdDEBUG_OPTION_SPECIFY_POOL +# define gcdDEBUG_OPTION_SPECIFY_POOL 0 +# endif + +# endif +#endif + +/* + gcdDUMP_VERIFY_PER_DRAW + + When set to 1, verify RT and images(if used) for every single draw to ease simulation debug. + Only valid for ES3 driver for now. +*/ +#ifndef gcdDUMP_VERIFY_PER_DRAW +# define gcdDUMP_VERIFY_PER_DRAW 0 +#endif + + + +/* + gcdDUMP_FRAMERATE + When set to a value other than zero, averaqe frame rate will be dumped. + The value set is the starting frame that the average will be calculated. + This is needed because sometimes first few frames are too slow to be included + in the average. Frame count starts from 1. +*/ +#ifndef gcdDUMP_FRAMERATE +# define gcdDUMP_FRAMERATE 0 +#endif + +/* + gcdENABLE_FSCALE_VAL_ADJUST + When non-zero, FSCALE_VAL when gcvPOWER_ON can be adjusted externally. + */ +#ifndef gcdENABLE_FSCALE_VAL_ADJUST +# define gcdENABLE_FSCALE_VAL_ADJUST 1 +#endif + +/* + gcdDUMP_IN_KERNEL + + When set to 1, all dumps will happen in the kernel. This is handy if + you want the kernel to dump its command buffers as well and the data + needs to be in sync. +*/ +#ifndef gcdDUMP_IN_KERNEL +# define gcdDUMP_IN_KERNEL 0 +#endif + +/* + gcdDUMP_COMMAND + + When set to non-zero, the command queue will dump all incoming command + and context buffers as well as all other modifications to the command + queue. +*/ +#ifndef gcdDUMP_COMMAND +# define gcdDUMP_COMMAND 0 +#endif + +/* + gcdDUMP_2D + + When set to non-zero, it will dump the 2D command and surface. +*/ +#ifndef gcdDUMP_2D +# define gcdDUMP_2D 0 +#endif + +/* + gcdDUMP_FRAME_TGA + + When set to a value other than 0, a dump of the frame specified by the value, + will be done into frame.tga. Frame count starts from 1. + */ +#ifndef gcdDUMP_FRAME_TGA +# define gcdDUMP_FRAME_TGA 0 +#endif +/* + gcdNULL_DRIVER + + Set to 1 for infinite speed hardware. + Set to 2 for bypassing the HAL. +*/ +#ifndef gcdNULL_DRIVER +# define gcdNULL_DRIVER 0 +#endif + +/* + gcdENABLE_TIMEOUT_DETECTION + + Enable timeout detection. +*/ +#ifndef gcdENABLE_TIMEOUT_DETECTION +# define gcdENABLE_TIMEOUT_DETECTION 0 +#endif + +/* + gcdCMD_BUFFER_SIZE + + Number of bytes in a command buffer. +*/ +#ifndef gcdCMD_BUFFER_SIZE +# define gcdCMD_BUFFER_SIZE (128 << 10) +#endif + +/* + gcdCMD_BLT_BUFFER_SIZE + + Number of bytes in a command buffer. +*/ +#ifndef gcdCMD_BLT_BUFFER_SIZE +# define gcdCMD_BLT_BUFFER_SIZE (1 << 10) +#endif + +/* + gcdCMD_BUFFERS + + Number of command buffers to use per client. +*/ +#ifndef gcdCMD_BUFFERS +# define gcdCMD_BUFFERS 2 +#endif + +/* + gcdMAX_CMD_BUFFERS + + Maximum number of command buffers to use per client. +*/ +#ifndef gcdMAX_CMD_BUFFERS +# define gcdMAX_CMD_BUFFERS 8 +#endif + +/* + gcdCOMMAND_QUEUES + + Number of command queues in the kernel. +*/ +#ifndef gcdCOMMAND_QUEUES +# define gcdCOMMAND_QUEUES 2 +#endif + +/* + gcdPOWER_CONTROL_DELAY + + The delay in milliseconds required to wait until the GPU has woke up + from a suspend or power-down state. This is system dependent because + the bus clock also needs to stabalize. +*/ +#ifndef gcdPOWER_CONTROL_DELAY +# define gcdPOWER_CONTROL_DELAY 0 +#endif + +/* + gcdMMU_SIZE + + Size of the MMU page table in bytes. Each 4 bytes can hold 4kB worth of + virtual data. +*/ +#ifndef gcdMMU_SIZE +# define gcdMMU_SIZE (256 << 10) +#endif + +#ifndef gcdGC355_VGMMU_MEMORY_SIZE_KB +# define gcdGC355_VGMMU_MEMORY_SIZE_KB 32 +#endif +/* + gcdSECURE_USER + + Use logical addresses instead of physical addresses in user land. In + this case a hint table is created for both command buffers and context + buffers, and that hint table will be used to patch up those buffers in + the kernel when they are ready to submit. +*/ +#ifndef gcdSECURE_USER +# define gcdSECURE_USER 0 +#endif + +/* + gcdSECURE_CACHE_SLOTS + + Number of slots in the logical to DMA address cache table. Each time a + logical address needs to be translated into a DMA address for the GPU, + this cache will be walked. The replacement scheme is LRU. +*/ +#ifndef gcdSECURE_CACHE_SLOTS +# define gcdSECURE_CACHE_SLOTS 1024 +#endif + +/* + gcdSECURE_CACHE_METHOD + + Replacement scheme used for Secure Cache. The following options are + available: + + gcdSECURE_CACHE_LRU + A standard LRU cache. + + gcdSECURE_CACHE_LINEAR + A linear walker with the idea that an application will always + render the scene in a similar way, so the next entry in the + cache should be a hit most of the time. + + gcdSECURE_CACHE_HASH + A 256-entry hash table. + + gcdSECURE_CACHE_TABLE + A simple cache but with potential of a lot of cache replacement. +*/ +#ifndef gcdSECURE_CACHE_METHOD +# define gcdSECURE_CACHE_METHOD gcdSECURE_CACHE_HASH +#endif + +/* + gcdREGISTER_ACCESS_FROM_USER + + Set to 1 to allow IOCTL calls to get through from user land. This + should only be in debug or development drops. +*/ +#ifndef gcdREGISTER_ACCESS_FROM_USER +# define gcdREGISTER_ACCESS_FROM_USER 1 +#endif + +/* + gcdHEAP_SIZE + + Set the allocation size for the internal heaps. Each time a heap is + full, a new heap will be allocated with this minmimum amount of bytes. + The bigger this size, the fewer heaps there are to allocate, the better + the performance. However, heaps won't be freed until they are + completely free, so there might be some more memory waste if the size is + too big. +*/ +#ifndef gcdHEAP_SIZE +# define gcdHEAP_SIZE (64 << 10) +#endif + +/* + gcdPOWER_SUSPEND_WHEN_IDLE + + Set to 1 to make GPU enter gcvPOWER_SUSPEND when idle detected, + otherwise GPU will enter gcvPOWER_IDLE. +*/ +#ifndef gcdPOWER_SUSPEND_WHEN_IDLE +# define gcdPOWER_SUSPEND_WHEN_IDLE 1 +#endif + +#ifndef gcdFPGA_BUILD +# define gcdFPGA_BUILD 0 +#endif + +/* + gcdGPU_TIMEOUT + + This define specified the number of milliseconds the system will wait + before it broadcasts the GPU is stuck. In other words, it will define + the timeout of any operation that needs to wait for the GPU. + + If the value is 0, no timeout will be checked for. +*/ +#ifndef gcdGPU_TIMEOUT +# define gcdGPU_TIMEOUT 20000 +#endif + +/* + gcdGPU_2D_TIMEOUT + + This define specified the number of milliseconds the system will wait + before it broadcasts the 2D GPU is stuck. In other words, it will define + the timeout of any operation that needs to wait for the GPU. + + If the value is 0, no timeout will be checked for. +*/ +#ifndef gcdGPU_2D_TIMEOUT +# define gcdGPU_2D_TIMEOUT 4000 +#endif + + +/* + gcdGPU_ADVANCETIMER + + it is advance timer. +*/ +#ifndef gcdGPU_ADVANCETIMER +# define gcdGPU_ADVANCETIMER 250 +#endif + +/* + gcdSTATIC_LINK + + This define disalbes static linking; +*/ +#ifndef gcdSTATIC_LINK +# define gcdSTATIC_LINK 0 +#endif + +/* + gcdUSE_NEW_HEAP + + Setting this define to 1 enables new heap. +*/ +#ifndef gcdUSE_NEW_HEAP +# define gcdUSE_NEW_HEAP 0 +#endif + +/* + gcdCMD_NO_2D_CONTEXT + + This define enables no-context 2D command buffer. +*/ +#ifndef gcdCMD_NO_2D_CONTEXT +# define gcdCMD_NO_2D_CONTEXT 1 +#endif + +/* + gcdENABLE_BUFFER_ALIGNMENT + + When enabled, video memory is allocated with atleast 16KB aligment + between multiple sub-buffers. +*/ +#ifndef gcdENABLE_BUFFER_ALIGNMENT +# define gcdENABLE_BUFFER_ALIGNMENT 1 +#endif + +/* + gcdENABLE_BANK_ALIGNMENT + + When enabled, video memory is allocated bank aligned. The vendor can modify + _GetSurfaceBankAlignment() and _GetBankOffsetBytes() to define how + different types of allocations are bank and channel aligned. + When disabled (default), no bank alignment is done. +*/ +#ifndef gcdENABLE_BANK_ALIGNMENT +# define gcdENABLE_BANK_ALIGNMENT 0 +#endif + +/* + gcdBANK_BIT_START + + Specifies the start bit of the bank (inclusive). +*/ +#ifndef gcdBANK_BIT_START +# define gcdBANK_BIT_START 12 +#endif + +/* + gcdBANK_BIT_END + + Specifies the end bit of the bank (inclusive). +*/ +#ifndef gcdBANK_BIT_END +# define gcdBANK_BIT_END 14 +#endif + +/* + gcdBANK_CHANNEL_BIT + + When set, video memory when allocated bank aligned is allocated such that + render and depth buffer addresses alternate on the channel bit specified. + This option has an effect only when gcdENABLE_BANK_ALIGNMENT is enabled. + When disabled (default), no alteration is done. +*/ +#ifndef gcdBANK_CHANNEL_BIT +# define gcdBANK_CHANNEL_BIT 7 +#endif + +/* + gcdDYNAMIC_SPEED + + When non-zero, it informs the kernel driver to use the speed throttling + broadcasting functions to inform the system the GPU should be spet up or + slowed down. It will send a broadcast for slowdown each "interval" + specified by this define in milliseconds + (gckOS_BroadcastCalibrateSpeed). +*/ +#ifndef gcdDYNAMIC_SPEED +# define gcdDYNAMIC_SPEED 2000 +#endif + +/* + gcdDYNAMIC_EVENT_THRESHOLD + + When non-zero, it specifies the maximum number of available events at + which the kernel driver will issue a broadcast to speed up the GPU + (gckOS_BroadcastHurry). +*/ +#ifndef gcdDYNAMIC_EVENT_THRESHOLD +# define gcdDYNAMIC_EVENT_THRESHOLD 5 +#endif + +/* + gcdENABLE_PROFILING + + Enable profiling macros. +*/ +#ifndef gcdENABLE_PROFILING +# define gcdENABLE_PROFILING 0 +#endif + +/* + gcdENABLE_128B_MERGE + + Enable 128B merge for the BUS control. +*/ +#ifndef gcdENABLE_128B_MERGE +# define gcdENABLE_128B_MERGE 0 +#endif + +/* + gcdFRAME_DB + + When non-zero, it specified the number of frames inside the frame + database. The frame DB will collect per-frame timestamps and hardware + counters. +*/ +#ifndef gcdFRAME_DB +# define gcdFRAME_DB 0 +# define gcdFRAME_DB_RESET 0 +# define gcdFRAME_DB_NAME "/var/log/frameDB.log" +#endif + +/* + gcdPAGED_MEMORY_CACHEABLE + + When non-zero, paged memory will be cacheable. + + Normally, driver will detemines whether a video memory + is cacheable or not. When cacheable is not neccessary, + it will be writecombine. + + This option is only for those SOC which can't enable + writecombine without enabling cacheable. +*/ +#ifndef gcdPAGED_MEMORY_CACHEABLE +# define gcdPAGED_MEMORY_CACHEABLE 0 +#endif + +/* + gcdENABLE_CACHEABLE_COMMAND_BUFFER + + When non-zero, command buffer will be cacheable. +*/ +#ifndef gcdENABLE_CACHEABLE_COMMAND_BUFFER +# define gcdENABLE_CACHEABLE_COMMAND_BUFFER 0 +#endif + +/* + gcdENABLE_BUFFERABLE_VIDEO_MEMORY + + When non-zero, all video memory will be bufferable by default. +*/ +#ifndef gcdENABLE_BUFFERABLE_VIDEO_MEMORY +# define gcdENABLE_BUFFERABLE_VIDEO_MEMORY 1 +#endif + +/* + gcdENABLE_INFINITE_SPEED_HW + enable the Infinte HW, this is for 2D openVG +*/ +#ifndef gcdENABLE_INFINITE_SPEED_HW +# define gcdENABLE_INFINITE_SPEED_HW 0 +#endif + +/* + gcdPOWEROFF_TIMEOUT + + When non-zero, GPU will power off automatically from + idle state, and gcdPOWEROFF_TIMEOUT is also the default + timeout in milliseconds. + */ +#ifndef gcdPOWEROFF_TIMEOUT +# define gcdPOWEROFF_TIMEOUT 300 +#endif + +/* + QNX_SINGLE_THREADED_DEBUGGING +*/ +#ifndef QNX_SINGLE_THREADED_DEBUGGING +# define QNX_SINGLE_THREADED_DEBUGGING 0 +#endif + +/* + gcdSHARED_RESOLVE_BUFFER_ENABLED + + Use shared resolve buffer for all app buffers. +*/ +#ifndef gcdSHARED_RESOLVE_BUFFER_ENABLED +# define gcdSHARED_RESOLVE_BUFFER_ENABLED 0 +#endif + +/* + gcdUSE_TRIANGLE_STRIP_PATCH + */ +#ifndef gcdUSE_TRIANGLE_STRIP_PATCH +# define gcdUSE_TRIANGLE_STRIP_PATCH 1 +#endif + +/* + gcdPROCESS_ADDRESS_SPACE + + When non-zero, every process which attaches to galcore has its own GPU + address space, size of which is gcdPROCESS_ADDRESS_SPACE_SIZE. +*/ +#ifndef gcdPROCESS_ADDRESS_SPACE +# define gcdPROCESS_ADDRESS_SPACE 0 +# define gcdPROCESS_ADDRESS_SPACE_SIZE 0x80000000 +#endif + +/* + gcdSHARED_PAGETABLE + + When non-zero, multiple GPUs in one chip with same MMU use + one shared pagetable. So that when accessing same surface, + they can use same GPU virtual address. +*/ +#ifndef gcdSHARED_PAGETABLE +# define gcdSHARED_PAGETABLE 0 +#endif + +#ifndef gcdUSE_PVR +# define gcdUSE_PVR 1 +#endif + +/* + gcdSMALL_BLOCK_SIZE + + When non-zero, a part of VIDMEM will be reserved for requests + whose requesting size is less than gcdSMALL_BLOCK_SIZE. + + For Linux, it's the size of a page. If this requeset fallbacks + to gcvPOOL_CONTIGUOUS or gcvPOOL_VIRTUAL, memory will be wasted + because they allocate a page at least. +*/ +#ifndef gcdSMALL_BLOCK_SIZE +# define gcdSMALL_BLOCK_SIZE 4096 +# define gcdRATIO_FOR_SMALL_MEMORY 32 +#endif + +/* + gcdCONTIGUOUS_SIZE_LIMIT + When non-zero, size of video node from gcvPOOL_CONTIGUOUS is + limited by gcdCONTIGUOUS_SIZE_LIMIT. +*/ +#ifndef gcdCONTIGUOUS_SIZE_LIMIT +# define gcdCONTIGUOUS_SIZE_LIMIT 0 +#endif + +/* + gcdLINK_QUEUE_SIZE + + When non-zero, driver maintains a queue to record information of + latest lined context buffer and command buffer. Data in this queue + is be used to debug. +*/ +#ifndef gcdLINK_QUEUE_SIZE +# define gcdLINK_QUEUE_SIZE 64 +#endif + +/* gcdALPHA_KILL_IN_SHADER + + Enable alpha kill inside the shader. This will be set automatically by the + HAL if certain states match a criteria. +*/ +#ifndef gcdALPHA_KILL_IN_SHADER +# define gcdALPHA_KILL_IN_SHADER 1 +#endif + + +#ifndef gcdPRINT_SWAP_TIME +# define gcdPRINT_SWAP_TIME 0 +#endif + +/* + gcdDVFS + + When non-zero, software will make use of dynamic voltage and + frequency feature. + */ +#ifndef gcdDVFS +# define gcdDVFS 1 +# define gcdDVFS_ANAYLSE_WINDOW 4 +# define gcdDVFS_POLLING_TIME (gcdDVFS_ANAYLSE_WINDOW * 4) +#endif + +#ifndef gcdSYNC +# define gcdSYNC 1 +#endif + +#ifndef gcdSHADER_SRC_BY_MACHINECODE +# define gcdSHADER_SRC_BY_MACHINECODE 1 +#endif + +#ifndef gcdGLB27_SHADER_REPLACE_OPTIMIZATION +# define gcdGLB27_SHADER_REPLACE_OPTIMIZATION 1 +#endif + + +/* + gcdSUPPORT_SWAP_RECTANGLE + + Support swap with a specific rectangle. + + Set the rectangle with eglSetSwapRectangleVIV api. + Android only. +*/ +#ifndef gcdSUPPORT_SWAP_RECTANGLE +# define gcdSUPPORT_SWAP_RECTANGLE 0 +#endif + +/* + gcdGPU_LINEAR_BUFFER_ENABLED + + Use linear buffer for GPU apps so HWC can do 2D composition. + Android only. +*/ +#ifndef gcdGPU_LINEAR_BUFFER_ENABLED +# define gcdGPU_LINEAR_BUFFER_ENABLED 1 +#endif + +/* + gcdENABLE_RENDER_INTO_WINDOW + + Enable Render-Into-Window (ie, No-Resolve) feature on android. + NOTE that even if enabled, it still depends on hardware feature and + android application behavior. When hardware feature or application + behavior can not support render into window mode, it will fail back + to normal mode. + When Render-Into-Window is finally used, window back buffer of android + applications will be allocated matching render target tiling format. + Otherwise buffer tiling is decided by the above option + 'gcdGPU_LINEAR_BUFFER_ENABLED'. + Android only for now. +*/ +#ifndef gcdENABLE_RENDER_INTO_WINDOW +# define gcdENABLE_RENDER_INTO_WINDOW 1 +#endif + +/* + gcdENABLE_RENDER_INTO_WINDOW_WITH_FC + + Enable Direct-rendering (ie, No-Resolve) with tile status. + This is expremental and in development stage. + This will dynamically check if color compression is available. +*/ +#ifndef gcdENABLE_RENDER_INTO_WINDOW_WITH_FC +# define gcdENABLE_RENDER_INTO_WINDOW_WITH_FC 0 +#endif + +/* + gcdENABLE_BLIT_BUFFER_PRESERVE + + Render-Into-Window (ie, No-Resolve) does not include preserved swap + behavior. This feature can enable buffer preserve in No-Resolve mode. + When enabled, previous buffer (may be part of ) will be resolve-blitted + to current buffer. +*/ +#ifndef gcdENABLE_BLIT_BUFFER_PRESERVE +# define gcdENABLE_BLIT_BUFFER_PRESERVE 1 +#endif + +/* + gcdANDROID_NATIVE_FENCE_SYNC + + Enable android native fence sync. It is introduced since jellybean-4.2. + Depends on linux kernel option: CONFIG_SYNC. + + 0: Disabled + 1: Build framework for native fence sync feature, and EGL extension + 2: Enable async swap buffers for client + * Native fence sync for client 'queueBuffer' in EGL, which is + 'acquireFenceFd' for layer in compositor side. + 3. Enable async hwcomposer composition. + * 'releaseFenceFd' for layer in compositor side, which is native + fence sync when client 'dequeueBuffer' + * Native fence sync for compositor 'queueBuffer' in EGL, which is + 'acquireFenceFd' for framebuffer target for DC + */ +#ifndef gcdANDROID_NATIVE_FENCE_SYNC +# define gcdANDROID_NATIVE_FENCE_SYNC 0 +#endif + +#ifndef gcdLINUX_SYNC_FILE +# define gcdLINUX_SYNC_FILE 0 +#endif + +/* + gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC + + Enable implicit android native buffer sync. + + For non-HW_RENDER buffer, CPU (or other hardware) and GPU can access + the buffer at the same time. This is to add implicit synchronization + between CPU (or the hardware) and GPU. + + Eventually, please do not use implicit native buffer sync, but use + "fence sync" or "android native fence sync" instead in libgui, which + can be enabled in frameworks/native/libs/gui/Android.mk. This kind + of synchronization should be done by app but not driver itself. + + Please disable this option when either "fence sync" or + "android native fence sync" is enabled. + */ +#ifndef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC +# define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 1 +#endif + +/* + * Implicit native buffer sync is not needed when ANDROID_native_fence_sync + * is available. + */ +#if gcdANDROID_NATIVE_FENCE_SYNC +# undef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC +# define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 0 +#endif + +/* + gcdUSE_WCLIP_PATCH + + Enable wclipping patch. +*/ +#ifndef gcdUSE_WCLIP_PATCH +# define gcdUSE_WCLIP_PATCH 1 +#endif + +#ifndef gcdUSE_NPOT_PATCH +# define gcdUSE_NPOT_PATCH 1 +#endif + +/* + gcdINTERNAL_COMMENT + + Wrap internal comment, content wrapped by it and the macor itself + will be removed in release driver. +*/ +#ifndef gcdINTERNAL_COMMENT +# define gcdINTERNAL_COMMENT 1 +#endif + +/* + gcdRTT_DISABLE_FC + + Disable RTT FC support. For test only. +*/ +#ifndef gcdRTT_DISABLE_FC +# define gcdRTT_DISABLE_FC 0 +#endif + +/* + gcdFORCE_MIPMAP + + Force generate mipmap for texture. +*/ +#ifndef gcdFORCE_MIPMAP +# define gcdFORCE_MIPMAP 0 +#endif + +/* + gcdFORCE_BILINEAR + + Force bilinear for mipfilter. +*/ +#ifndef gcdFORCE_BILINEAR +# define gcdFORCE_BILINEAR 1 +#endif + +/* + gcdBINARY_TRACE + + When non-zero, binary trace will be generated. + + When gcdBINARY_TRACE_FILE_SIZE is non-zero, binary trace buffer will + be written to a file which size is limited to + gcdBINARY_TRACE_FILE_SIZE. +*/ +#ifndef gcdBINARY_TRACE +# define gcdBINARY_TRACE 0 +# define gcdBINARY_TRACE_FILE_SIZE 0 +#endif + +#ifndef gcdMOVG +# define gcdMOVG 0 +# define gcdENABLE_TS_DOUBLE_BUFFER 1 +#else +#if gcdMOVG +# define gcdENABLE_TS_DOUBLE_BUFFER 0 +#else +# define gcdENABLE_TS_DOUBLE_BUFFER 1 +#endif +#endif + +/* gcdINTERRUPT_STATISTIC + * + * Monitor the event send to GPU and interrupt issued by GPU. + */ + +#ifndef gcdINTERRUPT_STATISTIC +#if defined(LINUX) || defined(__QNXNTO__) || defined(UNDER_CE) +# define gcdINTERRUPT_STATISTIC 1 +#else +# define gcdINTERRUPT_STATISTIC 0 +#endif +#endif + +/* + gcdFENCE_WAIT_LOOP_COUNT + Wait fence, loop count. +*/ +#ifndef gcdFENCE_WAIT_LOOP_COUNT +# define gcdFENCE_WAIT_LOOP_COUNT 10000 +#endif + +/* + gcdPARTIAL_FAST_CLEAR + When it's not zero, partial fast clear is enabled. + Depends on gcdHAL_3D_DRAWBLIT, if gcdHAL_3D_DRAWBLIT is not enabled, + only available when scissor box is completely aligned. + Expremental, under test. +*/ +#ifndef gcdPARTIAL_FAST_CLEAR +# define gcdPARTIAL_FAST_CLEAR 1 +#endif + +/* + gcdREMOVE_SURF_ORIENTATION + When it's not zero, we will remove surface orientation function. + It wil become to a parameter of resolve function. +*/ +#ifndef gcdREMOVE_SURF_ORIENTATION +# define gcdREMOVE_SURF_ORIENTATION 1 +#endif + + + +/* + gcdTEST_DEC200 + Test part for DEC200. Remove when release. +*/ +#ifndef gcdTEST_DEC200 +# define gcdTEST_DEC200 0 +#endif + +/* + gcdPATTERN_FAST_PATH + For pattern match +*/ +#ifndef gcdPATTERN_FAST_PATH +# define gcdPATTERN_FAST_PATH 1 +#endif + +/* + gcdUSE_INPUT_DEVICE + disable input devices usage under fb mode to support fb+vdk multi-process +*/ +#ifndef gcdUSE_INPUT_DEVICE +# define gcdUSE_INPUT_DEVICE 1 +#endif + +/* + gcdPERFORMANCE_ANALYSIS + + When set to 1, driver will pass information through loadstate + to HW. This loadstate does not impact HW execution. +*/ +#ifndef gcdPERFORMANCE_ANALYSIS +# define gcdPERFORMANCE_ANALYSIS 0 +#endif + +/* + gcdFRAMEINFO_STATISTIC + When enable, collect frame information. +*/ +#ifndef gcdFRAMEINFO_STATISTIC + +#if (defined(DBG) && DBG) || defined(DEBUG) || \ + defined(_DEBUG) || gcdDUMP || gcdPERFORMANCE_ANALYSIS || \ + (defined(WIN32) && !defined(UNDER_CE)) || \ + gcdFPGA_BUILD + +# define gcdFRAMEINFO_STATISTIC 1 +#else +# define gcdFRAMEINFO_STATISTIC 0 +#endif + +#endif + +/* + gcdDEC_ENABLE_AHB + Enable DEC300 compression AHB mode or not. +*/ +#ifndef gcdDEC_ENABLE_AHB +# define gcdDEC_ENABLE_AHB 0 +#endif + +/* + gcdENABLE_UNIFIED_CONSTANT + Enable unified constant or not. +*/ +#ifndef gcdENABLE_UNIFIED_CONSTANT +# define gcdENABLE_UNIFIED_CONSTANT 1 +#endif + +/* + Core configurations. By default enable all cores. +*/ +#ifndef gcdENABLE_3D +# define gcdENABLE_3D 1 +#endif + +#ifndef gcdENABLE_2D +# define gcdENABLE_2D 1 +#endif + +#ifndef gcdENABLE_VG +# define gcdENABLE_VG 0 +#endif + +#ifndef gcdVG_ONLY +# define gcdVG_ONLY (!gcdENABLE_3D && !gcdENABLE_2D && gcdENABLE_VG) +#endif + +#if defined(WIN32) && !defined(UNDER_CE) && (gcdENABLE_VG == 1) + +#ifdef gcdUSE_VX +#undef gcdUSE_VX +#endif + +#ifdef COMMAND_PROCESSOR_VERSION +#undef COMMAND_PROCESSOR_VERSION +#endif + +#ifdef gcdENABLE_TRUST_APPLICATION +#undef gcdENABLE_TRUST_APPLICATION +#endif + +#ifdef gcdENABLE_3D +#undef gcdENABLE_3D +#endif + +#ifdef gcdENABLE_2D +#undef gcdENABLE_2D +#endif + +#define gcdENABLE_3D 0 +#define gcdENABLE_2D 0 +#define gcdUSE_VX 0 +#define COMMAND_PROCESSOR_VERSION 2 +#define gcdENABLE_TRUST_APPLICATION 0 + +#endif /* Only for GC355 Cmodel build. */ + +#ifndef gcdGC355_PROFILER +# define gcdGC355_PROFILER 0 +#endif + +#ifndef gcdGC355_MEM_PRINT +# define gcdGC355_MEM_PRINT 0 +#else +#if (!((gcdENABLE_3D == 0) && (gcdENABLE_2D == 0) && (gcdENABLE_VG == 1))) +# undef gcdGC355_MEM_PRINT +# define gcdGC355_MEM_PRINT 0 +# endif +#endif + + +/* + gcdRECORD_COMMAND +*/ +#ifndef gcdRECORD_COMMAND +# define gcdRECORD_COMMAND 0 +#endif + +/* + gcdALLOC_CMD_FROM_RESERVE + + Provide a way by which location of command buffer can be + specified. This is a DEBUG option to limit command buffer + to some memory range. +*/ +#ifndef gcdALLOC_CMD_FROM_RESERVE +# define gcdALLOC_CMD_FROM_RESERVE 0 +#endif + +/* + gcdBOUNDARY_CHECK + + When enabled, add bounary before and after a range of + GPU address. So overflow can be trapped by MMU exception. + This is a debug option for new MMU and gcdUSE_MMU_EXCEPTION + is enabled. +*/ +#ifndef gcdBOUNDARY_CHECK +# define gcdBOUNDARY_CHECK 0 +#endif + +/* + gcdRENDER_QUALITY_CHECK + + When enabled, we disable performance opt patch + to get know rendering quality comparing with other vendor. +*/ +#ifndef gcdRENDER_QUALITY_CHECK +# define gcdRENDER_QUALITY_CHECK 0 +#endif + +/* + gcdSYSTRACE + + When enabled, we embed systrace in function header/footer + to gather time information on linux platforms include android. + '1' to trace API (EGL, ES11, ES2x, ES3x, etc) + '2' to trace HAL (except compiler) + '4' to trace HAL compiler + See gc_hal_user_debug.c for more detailed trace zones. +*/ +#ifndef gcdSYSTRACE +# define gcdSYSTRACE 0 +#endif + +#ifndef gcdENABLE_APPCTXT_BLITDRAW +# define gcdENABLE_APPCTXT_BLITDRAW 0 +#endif + +/* + gcdENABLE_TRUST_APPLICATION + + When enabled, trust application is used to handle 'security' registers. + + 1) If HW doesn't have robust and security feature, this option is meaningless. + 2) If HW have robust and security and this option is not enable, + security registers are handled by non secure driver. It is for + platform doesn't want/need to use trust zone. +*/ +#ifndef gcdENABLE_TRUST_APPLICATION +#if (defined(_WIN32) && !defined(UNDER_CE)) || (defined (LINUX) && !defined(EMULATOR)) +# define gcdENABLE_TRUST_APPLICATION 1 +#else +# define gcdENABLE_TRUST_APPLICATION 0 +#endif +#endif + +/* Disable gcdENABLE_TRUST_APPLICATION when oboslete gcdSECURITY enabled. */ +#if gcdSECURITY +#undef gcdENABLE_TRUST_APPLICATION +#define gcdENABLE_TRUST_APPLICATION 0 +#endif + +#ifndef gcdMMU_SECURE_AREA_SIZE +# define gcdMMU_SECURE_AREA_SIZE 128 +#endif + +/* +VIV:gcdUSE_MMU_EXCEPTION + + When enabled, enable and check exception interrupt raised by MMU. +*/ +#ifndef gcdUSE_MMU_EXCEPTION +# define gcdUSE_MMU_EXCEPTION 1 +#endif + +#ifndef gcdVX_OPTIMIZER +# define gcdVX_OPTIMIZER 0 +#endif + +#ifndef gcdOCL_READ_IMAGE_OPTIMIZATION +# define gcdOCL_READ_IMAGE_OPTIMIZATION 0 +#endif + +#ifndef gcdALLOC_ON_FAULT +# define gcdALLOC_ON_FAULT 0 +#endif + +/* + gcdDISABLE_GPU_VIRTUAL_ADDRESS + + When enabled, disable MMU and all virtual allocated from MMU. +*/ +#ifndef gcdDISABLE_GPU_VIRTUAL_ADDRESS +# define gcdDISABLE_GPU_VIRTUAL_ADDRESS 0 +#endif + +/* + gcd2D_COMPRESSION_DEC400_ALIGN_MODE + + Only for DEC400 compression. + Set 0 as 16bytes aligned. 1 as 32bytes aligned. 2 as 64bytes aligned. + Default is 0 which means 32bytes aligned. +*/ +#ifndef gcd2D_COMPRESSION_DEC400_ALIGN_MODE +# define gcd2D_COMPRESSION_DEC400_ALIGN_MODE 1 +#endif + +/* + gcdENABLE_KERNEL_FENCE + When enabled, use kernel fence to do resource tracking. +*/ +#ifndef gcdENABLE_KENREL_FENCE +# define gcdENABLE_KERNEL_FENCE 0 +#endif + + +#endif /* __gc_hal_options_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_profiler.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_profiler.h new file mode 100644 index 000000000000..38f42709fc3e --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_profiler.h @@ -0,0 +1,1175 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_profiler_h_ +#define __gc_hal_profiler_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define GLVERTEX_OBJECT 10 +#define GLVERTEX_OBJECT_BYTES 11 + +#define GLINDEX_OBJECT 20 +#define GLINDEX_OBJECT_BYTES 21 + +#define GLTEXTURE_OBJECT 30 +#define GLTEXTURE_OBJECT_BYTES 31 + +#define GLBUFOBJ_OBJECT 40 +#define GLBUFOBJ_OBJECT_BYTES 41 + +#define ES11_CALLS 151 +#define ES11_DRAWCALLS (ES11_CALLS + 1) +#define ES11_STATECHANGECALLS (ES11_DRAWCALLS + 1) +#define ES11_POINTCOUNT (ES11_STATECHANGECALLS + 1) +#define ES11_LINECOUNT (ES11_POINTCOUNT + 1) +#define ES11_TRIANGLECOUNT (ES11_LINECOUNT + 1) + +#define ES30_CALLS 159 +#define ES30_DRAWCALLS (ES30_CALLS + 1) +#define ES30_STATECHANGECALLS (ES30_DRAWCALLS + 1) +#define ES30_POINTCOUNT (ES30_STATECHANGECALLS + 1) +#define ES30_LINECOUNT (ES30_POINTCOUNT + 1) +#define ES30_TRIANGLECOUNT (ES30_LINECOUNT + 1) + +#define VG11_CALLS 88 +#define VG11_DRAWCALLS (VG11_CALLS + 1) +#define VG11_STATECHANGECALLS (VG11_DRAWCALLS + 1) +#define VG11_FILLCOUNT (VG11_STATECHANGECALLS + 1) +#define VG11_STROKECOUNT (VG11_FILLCOUNT + 1) +/* End of Driver API ID Definitions. */ + +/* HAL & MISC IDs. */ +#define HAL_VERTBUFNEWBYTEALLOC 1 +#define HAL_VERTBUFTOTALBYTEALLOC (HAL_VERTBUFNEWBYTEALLOC + 1) +#define HAL_VERTBUFNEWOBJALLOC (HAL_VERTBUFTOTALBYTEALLOC + 1) +#define HAL_VERTBUFTOTALOBJALLOC (HAL_VERTBUFNEWOBJALLOC + 1) +#define HAL_INDBUFNEWBYTEALLOC (HAL_VERTBUFTOTALOBJALLOC + 1) +#define HAL_INDBUFTOTALBYTEALLOC (HAL_INDBUFNEWBYTEALLOC + 1) +#define HAL_INDBUFNEWOBJALLOC (HAL_INDBUFTOTALBYTEALLOC + 1) +#define HAL_INDBUFTOTALOBJALLOC (HAL_INDBUFNEWOBJALLOC + 1) +#define HAL_TEXBUFNEWBYTEALLOC (HAL_INDBUFTOTALOBJALLOC + 1) +#define HAL_TEXBUFTOTALBYTEALLOC (HAL_TEXBUFNEWBYTEALLOC + 1) +#define HAL_TEXBUFNEWOBJALLOC (HAL_TEXBUFTOTALBYTEALLOC + 1) +#define HAL_TEXBUFTOTALOBJALLOC (HAL_TEXBUFNEWOBJALLOC + 1) + +#define GPU_CYCLES 1 +#define GPU_READ64BYTE (GPU_CYCLES + 1) +#define GPU_WRITE64BYTE (GPU_READ64BYTE + 1) +#define GPU_TOTALCYCLES (GPU_WRITE64BYTE + 1) +#define GPU_IDLECYCLES (GPU_TOTALCYCLES + 1) + +#define VS_INSTCOUNT 1 +#define VS_BRANCHINSTCOUNT (VS_INSTCOUNT + 1) +#define VS_TEXLDINSTCOUNT (VS_BRANCHINSTCOUNT + 1) +#define VS_RENDEREDVERTCOUNT (VS_TEXLDINSTCOUNT + 1) +#define VS_SOURCE (VS_RENDEREDVERTCOUNT + 1) +#define VS_NONIDLESTARVECOUNT (VS_SOURCE + 1) +#define VS_STARVELCOUNT (VS_NONIDLESTARVECOUNT + 1) +#define VS_STALLCOUNT (VS_STARVELCOUNT + 1) +#define VS_PROCESSCOUNT (VS_STALLCOUNT + 1) + +#define PS_INSTCOUNT 1 +#define PS_BRANCHINSTCOUNT (PS_INSTCOUNT + 1) +#define PS_TEXLDINSTCOUNT (PS_BRANCHINSTCOUNT + 1) +#define PS_RENDEREDPIXCOUNT (PS_TEXLDINSTCOUNT + 1) +#define PS_SOURCE (PS_RENDEREDPIXCOUNT + 1) +#define PS_NONIDLESTARVECOUNT (PS_SOURCE + 1) +#define PS_STARVELCOUNT (PS_NONIDLESTARVECOUNT + 1) +#define PS_STALLCOUNT (PS_STARVELCOUNT + 1) +#define PS_PROCESSCOUNT (PS_STALLCOUNT + 1) +#define PS_SHADERCYCLECOUNT (PS_PROCESSCOUNT + 1) + +#define PA_INVERTCOUNT 1 +#define PA_INPRIMCOUNT (PA_INVERTCOUNT + 1) +#define PA_OUTPRIMCOUNT (PA_INPRIMCOUNT + 1) +#define PA_DEPTHCLIPCOUNT (PA_OUTPRIMCOUNT + 1) +#define PA_TRIVIALREJCOUNT (PA_DEPTHCLIPCOUNT + 1) +#define PA_CULLCOUNT (PA_TRIVIALREJCOUNT + 1) +#define PA_NONIDLESTARVECOUNT (PA_CULLCOUNT + 1) +#define PA_STARVELCOUNT (PA_NONIDLESTARVECOUNT + 1) +#define PA_STALLCOUNT (PA_STARVELCOUNT + 1) +#define PA_PROCESSCOUNT (PA_STALLCOUNT + 1) + +#define SE_TRIANGLECOUNT 1 +#define SE_LINECOUNT (SE_TRIANGLECOUNT + 1) +#define SE_STARVECOUNT (SE_LINECOUNT + 1) +#define SE_STALLCOUNT (SE_STARVECOUNT + 1) +#define SE_RECEIVETRIANGLECOUNT (SE_STALLCOUNT + 1) +#define SE_SENDTRIANGLECOUNT (SE_RECEIVETRIANGLECOUNT + 1) +#define SE_RECEIVELINESCOUNT (SE_SENDTRIANGLECOUNT + 1) +#define SE_SENDLINESCOUNT (SE_RECEIVELINESCOUNT + 1) +#define SE_NONIDLESTARVECOUNT (SE_SENDLINESCOUNT + 1) +#define SE_PROCESSCOUNT (SE_NONIDLESTARVECOUNT + 1) + +#define RA_VALIDPIXCOUNT 1 +#define RA_TOTALQUADCOUNT (RA_VALIDPIXCOUNT + 1) +#define RA_VALIDQUADCOUNTEZ (RA_TOTALQUADCOUNT + 1) +#define RA_TOTALPRIMCOUNT (RA_VALIDQUADCOUNTEZ + 1) +#define RA_PIPECACHEMISSCOUNT (RA_TOTALPRIMCOUNT + 1) +#define RA_PREFCACHEMISSCOUNT (RA_PIPECACHEMISSCOUNT + 1) +#define RA_EEZCULLCOUNT (RA_PREFCACHEMISSCOUNT + 1) +#define RA_NONIDLESTARVECOUNT (RA_EEZCULLCOUNT + 1) +#define RA_STARVELCOUNT (RA_NONIDLESTARVECOUNT + 1) +#define RA_STALLCOUNT (RA_STARVELCOUNT + 1) +#define RA_PROCESSCOUNT (RA_STALLCOUNT + 1) + +#define TX_TOTBILINEARREQ 1 +#define TX_TOTTRILINEARREQ (TX_TOTBILINEARREQ + 1) +#define TX_TOTDISCARDTEXREQ (TX_TOTTRILINEARREQ + 1) +#define TX_TOTTEXREQ (TX_TOTDISCARDTEXREQ + 1) +#define TX_MEMREADCOUNT (TX_TOTTEXREQ + 1) +#define TX_MEMREADIN8BCOUNT (TX_MEMREADCOUNT + 1) +#define TX_CACHEMISSCOUNT (TX_MEMREADIN8BCOUNT + 1) +#define TX_CACHEHITTEXELCOUNT (TX_CACHEMISSCOUNT + 1) +#define TX_CACHEMISSTEXELCOUNT (TX_CACHEHITTEXELCOUNT + 1) +#define TX_NONIDLESTARVECOUNT (TX_CACHEMISSTEXELCOUNT+ 1) +#define TX_STARVELCOUNT (TX_NONIDLESTARVECOUNT + 1) +#define TX_STALLCOUNT (TX_STARVELCOUNT + 1) +#define TX_PROCESSCOUNT (TX_STALLCOUNT + 1) + +#define PE_KILLEDBYCOLOR 1 +#define PE_KILLEDBYDEPTH (PE_KILLEDBYCOLOR + 1) +#define PE_DRAWNBYCOLOR (PE_KILLEDBYDEPTH + 1) +#define PE_DRAWNBYDEPTH (PE_DRAWNBYCOLOR + 1) + +#define MC_READREQ8BPIPE 1 +#define MC_READREQ8BIP (MC_READREQ8BPIPE + 1) +#define MC_WRITEREQ8BPIPE (MC_READREQ8BIP + 1) +#define MC_AXIMINLATENCY (MC_WRITEREQ8BPIPE + 1) +#define MC_AXIMAXLATENCY (MC_AXIMINLATENCY + 1) +#define MC_AXITOTALLATENCY (MC_AXIMAXLATENCY + 1) +#define MC_AXISAMPLECOUNT (MC_AXITOTALLATENCY + 1) + +#define AXI_READREQSTALLED 1 +#define AXI_WRITEREQSTALLED (AXI_READREQSTALLED + 1) +#define AXI_WRITEDATASTALLED (AXI_WRITEREQSTALLED + 1) + +#define FE_DRAWCOUNT 1 +#define FE_OUTVERTEXCOUNT (FE_DRAWCOUNT + 1) +#define FE_STALLCOUNT (FE_OUTVERTEXCOUNT + 1) +#define FE_STARVECOUNT (FE_STALLCOUNT + 1) + +#define PVS_INSTRCOUNT 1 +#define PVS_ALUINSTRCOUNT (PVS_INSTRCOUNT + 1) +#define PVS_TEXINSTRCOUNT (PVS_ALUINSTRCOUNT + 1) +#define PVS_ATTRIBCOUNT (PVS_TEXINSTRCOUNT + 1) +#define PVS_UNIFORMCOUNT (PVS_ATTRIBCOUNT + 1) +#define PVS_FUNCTIONCOUNT (PVS_UNIFORMCOUNT + 1) +#define PVS_SOURCE (PVS_FUNCTIONCOUNT + 1) + +#define PPS_INSTRCOUNT 1 +#define PPS_ALUINSTRCOUNT (PPS_INSTRCOUNT + 1) +#define PPS_TEXINSTRCOUNT (PPS_ALUINSTRCOUNT + 1) +#define PPS_ATTRIBCOUNT (PPS_TEXINSTRCOUNT + 1) +#define PPS_UNIFORMCOUNT (PPS_ATTRIBCOUNT + 1) +#define PPS_FUNCTIONCOUNT (PPS_UNIFORMCOUNT + 1) +#define PPS_SOURCE (PPS_FUNCTIONCOUNT + 1) +/* End of MISC Counter IDs. */ + + +/* Category Constants. */ +#define VPHEADER 0x010000 +#define VPG_INFO 0x020000 +#define VPG_TIME 0x030000 +#define VPG_MEM 0x040000 +#define VPG_ES11 0x050000 +#define VPG_ES30 0x060000 +#define VPG_VG11 0x070000 +#define VPG_HAL 0x080000 +#define VPG_HW 0x090000 +#define VPG_GPU 0x0a0000 +#define VPG_VS 0x0b0000 +#define VPG_PS 0x0c0000 +#define VPG_PA 0x0d0000 +#define VPG_SETUP 0x0e0000 +#define VPG_RA 0x0f0000 +#define VPG_TX 0x100000 +#define VPG_PE 0x110000 +#define VPG_MC 0x120000 +#define VPG_AXI 0x130000 +#define VPG_PROG 0x140000 +#define VPG_PVS 0x150000 +#define VPG_PPS 0x160000 +#define VPG_ES11_TIME 0x170000 +#define VPG_ES30_TIME 0x180000 +#define VPG_FRAME 0x190000 +#define VPG_ES11_DRAW 0x200000 +#define VPG_ES30_DRAW 0x210000 +#define VPG_VG11_TIME 0x220000 +#define VPG_FE 0x230000 +#define VPG_MULTI_GPU 0x240000 +#define VPNG_FE 0x250000 +#define VPNG_VS 0x260000 +#define VPNG_PS 0x270000 +#define VPNG_PA 0x280000 +#define VPNG_SETUP 0x290000 +#define VPNG_RA 0x2a0000 +#define VPNG_TX 0x2b0000 +#define VPNG_PE 0x2c0000 +#define VPNG_MCC 0x2d0000 +#define VPNG_MCZ 0x2e0000 +#define VPNG_HI 0x2f0000 +#define VPNG_L2 0x300000 +#define VPG_FINISH 0x310000 +#define VPG_END 0xff0000 + +/* Info. */ +#define VPC_INFOCOMPANY (VPG_INFO + 1) +#define VPC_INFOVERSION (VPC_INFOCOMPANY + 1) +#define VPC_INFORENDERER (VPC_INFOVERSION + 1) +#define VPC_INFOREVISION (VPC_INFORENDERER + 1) +#define VPC_INFODRIVER (VPC_INFOREVISION + 1) +#define VPC_INFODRIVERMODE (VPC_INFODRIVER + 1) +#define VPC_INFOSCREENSIZE (VPC_INFODRIVERMODE + 1) + +/* Counter Constants. */ +#define VPC_ELAPSETIME (VPG_TIME + 1) +#define VPC_CPUTIME (VPC_ELAPSETIME + 1) + +#define VPC_MEMMAXRES (VPG_MEM + 1) +#define VPC_MEMSHARED (VPC_MEMMAXRES + 1) +#define VPC_MEMUNSHAREDDATA (VPC_MEMSHARED + 1) +#define VPC_MEMUNSHAREDSTACK (VPC_MEMUNSHAREDDATA + 1) + +/* OpenGL ES11 Statics Counter IDs. */ +#define VPC_ES11CALLS (VPG_ES11 + ES11_CALLS) +#define VPC_ES11DRAWCALLS (VPG_ES11 + ES11_DRAWCALLS) +#define VPC_ES11STATECHANGECALLS (VPG_ES11 + ES11_STATECHANGECALLS) +#define VPC_ES11POINTCOUNT (VPG_ES11 + ES11_POINTCOUNT) +#define VPC_ES11LINECOUNT (VPG_ES11 + ES11_LINECOUNT) +#define VPC_ES11TRIANGLECOUNT (VPG_ES11 + ES11_TRIANGLECOUNT) + +/* OpenGL ES30 Statistics Counter IDs. */ +#define VPC_ES30CALLS (VPG_ES30 + ES30_CALLS) +#define VPC_ES30DRAWCALLS (VPG_ES30 + ES30_DRAWCALLS) +#define VPC_ES30STATECHANGECALLS (VPG_ES30 + ES30_STATECHANGECALLS) +#define VPC_ES30POINTCOUNT (VPG_ES30 + ES30_POINTCOUNT) +#define VPC_ES30LINECOUNT (VPG_ES30 + ES30_LINECOUNT) +#define VPC_ES30TRIANGLECOUNT (VPG_ES30 + ES30_TRIANGLECOUNT) + +/* OpenVG Statistics Counter IDs. */ +#define VPC_VG11CALLS (VPG_VG11 + VG11_CALLS) +#define VPC_VG11DRAWCALLS (VPG_VG11 + VG11_DRAWCALLS) +#define VPC_VG11STATECHANGECALLS (VPG_VG11 + VG11_STATECHANGECALLS) +#define VPC_VG11FILLCOUNT (VPG_VG11 + VG11_FILLCOUNT) +#define VPC_VG11STROKECOUNT (VPG_VG11 + VG11_STROKECOUNT) + +/* HAL Counters. */ +#define VPC_HALVERTBUFNEWBYTEALLOC (VPG_HAL + HAL_VERTBUFNEWBYTEALLOC) +#define VPC_HALVERTBUFTOTALBYTEALLOC (VPG_HAL + HAL_VERTBUFTOTALBYTEALLOC) +#define VPC_HALVERTBUFNEWOBJALLOC (VPG_HAL + HAL_VERTBUFNEWOBJALLOC) +#define VPC_HALVERTBUFTOTALOBJALLOC (VPG_HAL + HAL_VERTBUFTOTALOBJALLOC) +#define VPC_HALINDBUFNEWBYTEALLOC (VPG_HAL + HAL_INDBUFNEWBYTEALLOC) +#define VPC_HALINDBUFTOTALBYTEALLOC (VPG_HAL + HAL_INDBUFTOTALBYTEALLOC) +#define VPC_HALINDBUFNEWOBJALLOC (VPG_HAL + HAL_INDBUFNEWOBJALLOC) +#define VPC_HALINDBUFTOTALOBJALLOC (VPG_HAL + HAL_INDBUFTOTALOBJALLOC) +#define VPC_HALTEXBUFNEWBYTEALLOC (VPG_HAL + HAL_TEXBUFNEWBYTEALLOC) +#define VPC_HALTEXBUFTOTALBYTEALLOC (VPG_HAL + HAL_TEXBUFTOTALBYTEALLOC) +#define VPC_HALTEXBUFNEWOBJALLOC (VPG_HAL + HAL_TEXBUFNEWOBJALLOC) +#define VPC_HALTEXBUFTOTALOBJALLOC (VPG_HAL + HAL_TEXBUFTOTALOBJALLOC) + +/* HW: GPU Counters. */ +#define VPC_GPUCYCLES (VPG_GPU + GPU_CYCLES) +#define VPC_GPUREAD64BYTE (VPG_GPU + GPU_READ64BYTE) +#define VPC_GPUWRITE64BYTE (VPG_GPU + GPU_WRITE64BYTE) +#define VPC_GPUTOTALCYCLES (VPG_GPU + GPU_TOTALCYCLES) +#define VPC_GPUIDLECYCLES (VPG_GPU + GPU_IDLECYCLES) + +/* HW: Shader Counters. */ +#define VPC_VSINSTCOUNT (VPG_VS + VS_INSTCOUNT) +#define VPC_VSBRANCHINSTCOUNT (VPG_VS + VS_BRANCHINSTCOUNT) +#define VPC_VSTEXLDINSTCOUNT (VPG_VS + VS_TEXLDINSTCOUNT) +#define VPC_VSRENDEREDVERTCOUNT (VPG_VS + VS_RENDEREDVERTCOUNT) +#define VPC_VSNONIDLESTARVECOUNT (VPG_VS + VS_NONIDLESTARVECOUNT) +#define VPC_VSSTARVELCOUNT (VPG_VS + VS_STARVELCOUNT) +#define VPC_VSSTALLCOUNT (VPG_VS + VS_STALLCOUNT) +#define VPC_VSPROCESSCOUNT (VPG_VS + VS_PROCESSCOUNT) +/* HW: PS Count. */ +#define VPC_PSINSTCOUNT (VPG_PS + PS_INSTCOUNT) +#define VPC_PSBRANCHINSTCOUNT (VPG_PS + PS_BRANCHINSTCOUNT) +#define VPC_PSTEXLDINSTCOUNT (VPG_PS + PS_TEXLDINSTCOUNT) +#define VPC_PSRENDEREDPIXCOUNT (VPG_PS + PS_RENDEREDPIXCOUNT) +#define VPC_PSNONIDLESTARVECOUNT (VPG_PS + PS_NONIDLESTARVECOUNT) +#define VPC_PSSTARVELCOUNT (VPG_PS + PS_STARVELCOUNT) +#define VPC_PSSTALLCOUNT (VPG_PS + PS_STALLCOUNT) +#define VPC_PSPROCESSCOUNT (VPG_PS + PS_PROCESSCOUNT) +#define VPC_PSSHADERCYCLECOUNT (VPG_PS + PS_SHADERCYCLECOUNT) + +/* HW: PA Counters. */ +#define VPC_PAINVERTCOUNT (VPG_PA + PA_INVERTCOUNT) +#define VPC_PAINPRIMCOUNT (VPG_PA + PA_INPRIMCOUNT) +#define VPC_PAOUTPRIMCOUNT (VPG_PA + PA_OUTPRIMCOUNT) +#define VPC_PADEPTHCLIPCOUNT (VPG_PA + PA_DEPTHCLIPCOUNT) +#define VPC_PATRIVIALREJCOUNT (VPG_PA + PA_TRIVIALREJCOUNT) +#define VPC_PACULLCOUNT (VPG_PA + PA_CULLCOUNT) +#define VPC_PANONIDLESTARVECOUNT (VPG_PA + PA_NONIDLESTARVECOUNT) +#define VPC_PASTARVELCOUNT (VPG_PA + PA_STARVELCOUNT) +#define VPC_PASTALLCOUNT (VPG_PA + PA_STALLCOUNT) +#define VPC_PAPROCESSCOUNT (VPG_PA + PA_PROCESSCOUNT) + +/* HW: Setup Counters. */ +#define VPC_SETRIANGLECOUNT (VPG_SETUP + SE_TRIANGLECOUNT) +#define VPC_SELINECOUNT (VPG_SETUP + SE_LINECOUNT) +#define VPC_SESTARVECOUNT (VPG_SETUP + SE_STARVECOUNT) +#define VPC_SESTALLCOUNT (VPG_SETUP + SE_STALLCOUNT) +#define VPC_SERECEIVETRIANGLECOUNT (VPG_SETUP + SE_RECEIVETRIANGLECOUNT) +#define VPC_SESENDTRIANGLECOUNT (VPG_SETUP + SE_SENDTRIANGLECOUNT) +#define VPC_SERECEIVELINESCOUNT (VPG_SETUP + SE_RECEIVELINESCOUNT) +#define VPC_SESENDLINESCOUNT (VPG_SETUP + SE_SENDLINESCOUNT) +#define VPC_SENONIDLESTARVECOUNT (VPG_SETUP + SE_NONIDLESTARVECOUNT) +#define VPC_SEPROCESSCOUNT (VPG_SETUP + SE_PROCESSCOUNT) + +/* HW: RA Counters. */ +#define VPC_RAVALIDPIXCOUNT (VPG_RA + RA_VALIDPIXCOUNT) +#define VPC_RATOTALQUADCOUNT (VPG_RA + RA_TOTALQUADCOUNT) +#define VPC_RAVALIDQUADCOUNTEZ (VPG_RA + RA_VALIDQUADCOUNTEZ) +#define VPC_RATOTALPRIMCOUNT (VPG_RA + RA_TOTALPRIMCOUNT) +#define VPC_RAPIPECACHEMISSCOUNT (VPG_RA + RA_PIPECACHEMISSCOUNT) +#define VPC_RAPREFCACHEMISSCOUNT (VPG_RA + RA_PREFCACHEMISSCOUNT) +#define VPC_RAEEZCULLCOUNT (VPG_RA + RA_EEZCULLCOUNT) +#define VPC_RANONIDLESTARVECOUNT (VPG_RA + RA_NONIDLESTARVECOUNT) +#define VPC_RASTARVELCOUNT (VPG_RA + RA_STARVELCOUNT) +#define VPC_RASTALLCOUNT (VPG_RA + RA_STALLCOUNT) +#define VPC_RAPROCESSCOUNT (VPG_RA + RA_PROCESSCOUNT) + +/* HW: TEX Counters. */ +#define VPC_TXTOTBILINEARREQ (VPG_TX + TX_TOTBILINEARREQ) +#define VPC_TXTOTTRILINEARREQ (VPG_TX + TX_TOTTRILINEARREQ) +#define VPC_TXTOTDISCARDTEXREQ (VPG_TX + TX_TOTDISCARDTEXREQ) +#define VPC_TXTOTTEXREQ (VPG_TX + TX_TOTTEXREQ) +#define VPC_TXMEMREADCOUNT (VPG_TX + TX_MEMREADCOUNT) +#define VPC_TXMEMREADIN8BCOUNT (VPG_TX + TX_MEMREADIN8BCOUNT) +#define VPC_TXCACHEMISSCOUNT (VPG_TX + TX_CACHEMISSCOUNT) +#define VPC_TXCACHEHITTEXELCOUNT (VPG_TX + TX_CACHEHITTEXELCOUNT) +#define VPC_TXCACHEMISSTEXELCOUNT (VPG_TX + TX_CACHEMISSTEXELCOUNT) +#define VPC_TXNONIDLESTARVECOUNT (VPG_TX + TX_NONIDLESTARVECOUNT) +#define VPC_TXSTARVELCOUNT (VPG_TX + TX_STARVELCOUNT) +#define VPC_TXSTALLCOUNT (VPG_TX + TX_STALLCOUNT) +#define VPC_TXPROCESSCOUNT (VPG_TX + TX_PROCESSCOUNT) + +/* HW: PE Counters. */ +#define VPC_PEKILLEDBYCOLOR (VPG_PE + PE_KILLEDBYCOLOR) +#define VPC_PEKILLEDBYDEPTH (VPG_PE + PE_KILLEDBYDEPTH) +#define VPC_PEDRAWNBYCOLOR (VPG_PE + PE_DRAWNBYCOLOR) +#define VPC_PEDRAWNBYDEPTH (VPG_PE + PE_DRAWNBYDEPTH) + +/* HW: MC Counters. */ +#define VPC_MCREADREQ8BPIPE (VPG_MC + MC_READREQ8BPIPE) +#define VPC_MCREADREQ8BIP (VPG_MC + MC_READREQ8BIP) +#define VPC_MCWRITEREQ8BPIPE (VPG_MC + MC_WRITEREQ8BPIPE) +#define VPC_MCAXIMINLATENCY (VPG_MC + MC_AXIMINLATENCY) +#define VPC_MCAXIMAXLATENCY (VPG_MC + MC_AXIMAXLATENCY) +#define VPC_MCAXITOTALLATENCY (VPG_MC + MC_AXITOTALLATENCY) +#define VPC_MCAXISAMPLECOUNT (VPG_MC + MC_AXISAMPLECOUNT) + +/* HW: AXI Counters. */ +#define VPC_AXIREADREQSTALLED (VPG_AXI + AXI_READREQSTALLED) +#define VPC_AXIWRITEREQSTALLED (VPG_AXI + AXI_WRITEREQSTALLED) +#define VPC_AXIWRITEDATASTALLED (VPG_AXI + AXI_WRITEDATASTALLED) + +/* HW: FE Counters. */ +#define VPC_FEDRAWCOUNT (VPG_FE + FE_DRAWCOUNT) +#define VPC_FEOUTVERTEXCOUNT (VPG_FE + FE_OUTVERTEXCOUNT) +#define VPC_FESTALLCOUNT (VPG_FE + FE_STALLCOUNT) +#define VPC_FESTARVECOUNT (VPG_FE + FE_STARVECOUNT) + +/* HW: Shader Counters. */ +#define VPNC_VSINSTCOUNT (VPNG_VS + 1) +#define VPNC_VSBRANCHINSTCOUNT (VPNG_VS + 2) +#define VPNC_VSTEXLDINSTCOUNT (VPNG_VS + 3) +#define VPNC_VSRENDEREDVERTCOUNT (VPNG_VS + 4) +#define VPNC_VSNONIDLESTARVECOUNT (VPNG_VS + 5) +#define VPNC_VSSTARVELCOUNT (VPNG_VS + 6) +#define VPNC_VSSTALLCOUNT (VPNG_VS + 7) +#define VPNC_VSPROCESSCOUNT (VPNG_VS + 8) +#define VPNC_VSSHADERCYCLECOUNT (VPNG_VS + 9) +#define VPNC_VS_COUNT VPNC_VSSHADERCYCLECOUNT - VPNG_VS + +/* HW: PS Count. */ +#define VPNC_PSINSTCOUNT (VPNG_PS + 1) +#define VPNC_PSBRANCHINSTCOUNT (VPNG_PS + 2) +#define VPNC_PSTEXLDINSTCOUNT (VPNG_PS + 3) +#define VPNC_PSRENDEREDPIXCOUNT (VPNG_PS + 4) +#define VPNC_PSNONIDLESTARVECOUNT (VPNG_PS + 5) +#define VPNC_PSSTARVELCOUNT (VPNG_PS + 6) +#define VPNC_PSSTALLCOUNT (VPNG_PS + 7) +#define VPNC_PSPROCESSCOUNT (VPNG_PS + 8) +#define VPNC_PSSHADERCYCLECOUNT (VPNG_PS + 9) +#define VPNC_PS_COUNT VPNC_PSSHADERCYCLECOUNT - VPNG_PS + +/* HW: PA Counters. */ +#define VPNC_PAINVERTCOUNT (VPNG_PA + 1) +#define VPNC_PAINPRIMCOUNT (VPNG_PA + 2) +#define VPNC_PAOUTPRIMCOUNT (VPNG_PA + 3) +#define VPNC_PADEPTHCLIPCOUNT (VPNG_PA + 4) +#define VPNC_PATRIVIALREJCOUNT (VPNG_PA + 5) +#define VPNC_PACULLPRIMCOUNT (VPNG_PA + 6) +#define VPNC_PADROPPRIMCOUNT (VPNG_PA + 7) +#define VPNC_PAFRCLIPPRIMCOUNT (VPNG_PA + 8) +#define VPNC_PAFRCLIPDROPPRIMCOUNT (VPNG_PA + 9) +#define VPNC_PANONIDLESTARVECOUNT (VPNG_PA + 10) +#define VPNC_PASTARVELCOUNT (VPNG_PA + 11) +#define VPNC_PASTALLCOUNT (VPNG_PA + 12) +#define VPNC_PAPROCESSCOUNT (VPNG_PA + 13) +#define VPNC_PA_COUNT VPNC_PAPROCESSCOUNT - VPNG_PA + +/* HW: Setup Counters. */ +#define VPNC_SECULLTRIANGLECOUNT (VPNG_SETUP + 1) +#define VPNC_SECULLLINECOUNT (VPNG_SETUP + 2) +#define VPNC_SECLIPTRIANGLECOUNT (VPNG_SETUP + 3) +#define VPNC_SECLIPLINECOUNT (VPNG_SETUP + 4) +#define VPNC_SESTARVECOUNT (VPNG_SETUP + 5) +#define VPNC_SESTALLCOUNT (VPNG_SETUP + 6) +#define VPNC_SERECEIVETRIANGLECOUNT (VPNG_SETUP + 7) +#define VPNC_SESENDTRIANGLECOUNT (VPNG_SETUP + 8) +#define VPNC_SERECEIVELINESCOUNT (VPNG_SETUP + 9) +#define VPNC_SESENDLINESCOUNT (VPNG_SETUP + 10) +#define VPNC_SENONIDLESTARVECOUNT (VPNG_SETUP + 11) +#define VPNC_SETRIVIALREJLINECOUNT (VPNG_SETUP + 12) +#define VPNC_SEPROCESSCOUNT (VPNG_SETUP + 13) +#define VPNC_SE_COUNT VPNC_SEPROCESSCOUNT - VPNG_SETUP + +/* HW: RA Counters. */ +#define VPNC_RAVALIDPIXCOUNT (VPNG_RA + 1) +#define VPNC_RATOTALQUADCOUNT (VPNG_RA + 2) +#define VPNC_RAVALIDQUADCOUNTEZ (VPNG_RA + 3) +#define VPNC_RAINPUTPRIMCOUNT (VPNG_RA + 4) +#define VPNC_RAPIPECACHEMISSCOUNT (VPNG_RA + 5) +#define VPNC_RAPREFCACHEMISSCOUNT (VPNG_RA + 6) +#define VPNC_RAPIPEHZCACHEMISSCOUNT (VPNG_RA + 7) +#define VPNC_RAPREFHZCACHEMISSCOUNT (VPNG_RA + 8) +#define VPNC_RAOUTPUTQUADCOUNT (VPNG_RA + 9) +#define VPNC_RAOUTPUTPIXELCOUNT (VPNG_RA + 10) +#define VPNC_RAEEZCULLCOUNT (VPNG_RA + 11) +#define VPNC_RANONIDLESTARVECOUNT (VPNG_RA + 12) +#define VPNC_RASTARVELCOUNT (VPNG_RA + 13) +#define VPNC_RASTALLCOUNT (VPNG_RA + 14) +#define VPNC_RAPROCESSCOUNT (VPNG_RA + 15) +#define VPNC_RA_COUNT VPNC_RAPROCESSCOUNT - VPNG_RA + +/* HW: TEX Counters. */ +#define VPNC_TXTOTBILINEARREQ (VPNG_TX + 1) +#define VPNC_TXTOTTRILINEARREQ (VPNG_TX + 2) +#define VPNC_TXTOTDISCARDTEXREQ (VPNG_TX + 3) +#define VPNC_TXTOTTEXREQ (VPNG_TX + 4) +#define VPNC_TXMC0MISSCOUNT (VPNG_TX + 5) +#define VPNC_TXMC0REQCOUNT (VPNG_TX + 6) +#define VPNC_TXMC1MISSCOUNT (VPNG_TX + 7) +#define VPNC_TXMC1REQCOUNT (VPNG_TX + 8) +#define VPNC_TX_COUNT VPNC_TXMC1REQCOUNT - VPNG_TX + +/* HW: PE Counters. */ +#define VPNC_PE0KILLEDBYCOLOR (VPNG_PE + 1) +#define VPNC_PE0KILLEDBYDEPTH (VPNG_PE + 2) +#define VPNC_PE0DRAWNBYCOLOR (VPNG_PE + 3) +#define VPNC_PE0DRAWNBYDEPTH (VPNG_PE + 4) +#define VPNC_PE1KILLEDBYCOLOR (VPNG_PE + 5) +#define VPNC_PE1KILLEDBYDEPTH (VPNG_PE + 6) +#define VPNC_PE1DRAWNBYCOLOR (VPNG_PE + 7) +#define VPNC_PE1DRAWNBYDEPTH (VPNG_PE + 8) +#define VPNC_PE_COUNT VPNC_PE1DRAWNBYDEPTH - VPNG_PE + +/* HW: MCC Counters. */ +#define VPNC_MCCREADREQ8BCOLORPIPE (VPNG_MCC + 1) +#define VPNC_MCCREADREQ8BSOCOLORPIPE (VPNG_MCC + 2) +#define VPNC_MCCWRITEREQ8BCOLORPIPE (VPNG_MCC + 3) +#define VPNC_MCCREADREQSOCOLORPIPE (VPNG_MCC + 4) +#define VPNC_MCCWRITEREQCOLORPIPE (VPNG_MCC + 5) +#define VPNC_MCCREADREQ8BDEPTHPIPE (VPNG_MCC + 6) +#define VPNC_MCCREADREQ8BSFDEPTHPIPE (VPNG_MCC + 7) +#define VPNC_MCCWRITEREQ8BDEPTHPIPE (VPNG_MCC + 8) +#define VPNC_MCCREADREQSFDEPTHPIPE (VPNG_MCC + 9) +#define VPNC_MCCWRITEREQDEPTHPIPE (VPNG_MCC + 10) +#define VPNC_MCCREADREQ8BOTHERPIPE (VPNG_MCC + 11) +#define VPNC_MCCWRITEREQ8BOTHERPIPE (VPNG_MCC + 12) +#define VPNC_MCCREADREQOTHERPIPE (VPNG_MCC + 13) +#define VPNC_MCCWRITEREQOTHERPIPE (VPNG_MCC + 14) +#define VPNC_MCCAXIMINLATENCY (VPNG_MCC + 15) +#define VPNC_MCCAXIMAXLATENCY (VPNG_MCC + 16) +#define VPNC_MCCAXITOTALLATENCY (VPNG_MCC + 17) +#define VPNC_MCCAXISAMPLECOUNT (VPNG_MCC + 18) +#define VPNC_MCCFEREADBANDWIDTH (VPNG_MCC + 19) +#define VPNC_MCCMMUREADBANDWIDTH (VPNG_MCC + 20) +#define VPNC_MCCBLTREADBANDWIDTH (VPNG_MCC + 21) +#define VPNC_MCCSH0READBANDWIDTH (VPNG_MCC + 22) +#define VPNC_MCCSH1READBANDWIDTH (VPNG_MCC + 23) +#define VPNC_MCCPEWRITEBANDWIDTH (VPNG_MCC + 24) +#define VPNC_MCCBLTWRITEBANDWIDTH (VPNG_MCC + 25) +#define VPNC_MCCSH0WRITEBANDWIDTH (VPNG_MCC + 26) +#define VPNC_MCCSH1WRITEBANDWIDTH (VPNG_MCC + 27) +#define VPNC_MCC_COUNT VPNC_MCCSH1WRITEBANDWIDTH - VPNG_MCC + +/* HW: MCZ Counters. */ +#define VPNC_MCZREADREQ8BCOLORPIPE (VPNG_MCZ + 1) +#define VPNC_MCZREADREQ8BSOCOLORPIPE (VPNG_MCZ + 2) +#define VPNC_MCZWRITEREQ8BCOLORPIPE (VPNG_MCZ + 3) +#define VPNC_MCZREADREQSOCOLORPIPE (VPNG_MCZ + 4) +#define VPNC_MCZWRITEREQCOLORPIPE (VPNG_MCZ + 5) +#define VPNC_MCZREADREQ8BDEPTHPIPE (VPNG_MCZ + 6) +#define VPNC_MCZREADREQ8BSFDEPTHPIPE (VPNG_MCZ + 7) +#define VPNC_MCZWRITEREQ8BDEPTHPIPE (VPNG_MCZ + 8) +#define VPNC_MCZREADREQSFDEPTHPIPE (VPNG_MCZ + 9) +#define VPNC_MCZWRITEREQDEPTHPIPE (VPNG_MCZ + 10) +#define VPNC_MCZREADREQ8BOTHERPIPE (VPNG_MCZ + 11) +#define VPNC_MCZWRITEREQ8BOTHERPIPE (VPNG_MCZ + 12) +#define VPNC_MCZREADREQOTHERPIPE (VPNG_MCZ + 13) +#define VPNC_MCZWRITEREQOTHERPIPE (VPNG_MCZ + 14) +#define VPNC_MCZAXIMINLATENCY (VPNG_MCZ + 15) +#define VPNC_MCZAXIMAXLATENCY (VPNG_MCZ + 16) +#define VPNC_MCZAXITOTALLATENCY (VPNG_MCZ + 17) +#define VPNC_MCZAXISAMPLECOUNT (VPNG_MCZ + 18) +#define VPNC_MCZ_COUNT VPNC_MCZAXISAMPLECOUNT - VPNG_MCZ + +/* HW: HI Counters. */ +#define VPNC_HI0READ8BYTE (VPNG_HI + 1) +#define VPNC_HI0WRITE8BYTE (VPNG_HI + 2) +#define VPNC_HI0READREQ (VPNG_HI + 3) +#define VPNC_HI0WRITEREQ (VPNG_HI + 4) +#define VPNC_HI0AXIREADREQSTALL (VPNG_HI + 5) +#define VPNC_HI0AXIWRITEREQSTALL (VPNG_HI + 6) +#define VPNC_HI0AXIWRITEDATASTALL (VPNG_HI + 7) +#define VPNC_HI1READ8BYTE (VPNG_HI + 8) +#define VPNC_HI1WRITE8BYTE (VPNG_HI + 9) +#define VPNC_HI1READREQ (VPNG_HI + 10) +#define VPNC_HI1WRITEREQ (VPNG_HI + 11) +#define VPNC_HI1AXIREADREQSTALL (VPNG_HI + 12) +#define VPNC_HI1AXIWRITEREQSTALL (VPNG_HI + 13) +#define VPNC_HI1AXIWRITEDATASTALL (VPNG_HI + 14) +#define VPNC_HITOTALCYCLES (VPNG_HI + 15) +#define VPNC_HIIDLECYCLES (VPNG_HI + 16) +#define VPNC_HIREAD8BYTE (VPNG_HI + 17) +#define VPNC_HIWRITE8BYTE (VPNG_HI + 18) +#define VPNC_HI_COUNT VPNC_HIWRITE8BYTE - VPNG_HI + +/* HW: L2 Counters. */ +#define VPNC_L2AXI0READREQCOUNT (VPNG_L2 + 1) +#define VPNC_L2AXI1READREQCOUNT (VPNG_L2 + 2) +#define VPNC_L2AXI0WRITEREQCOUNT (VPNG_L2 + 3) +#define VPNC_L2AXI1WRITEREQCOUNT (VPNG_L2 + 4) +#define VPNC_L2READTRANSREQBYAXI0 (VPNG_L2 + 5) +#define VPNC_L2READTRANSREQBYAXI1 (VPNG_L2 + 6) +#define VPNC_L2WRITETRANSREQBYAXI0 (VPNG_L2 + 7) +#define VPNC_L2WRITETRANSREQBYAXI1 (VPNG_L2 + 8) +#define VPNC_L2AXI0MINLATENCY (VPNG_L2 + 9) +#define VPNC_L2AXI0MAXLATENCY (VPNG_L2 + 10) +#define VPNC_L2AXI0TOTLATENCY (VPNG_L2 + 11) +#define VPNC_L2AXI0TOTREQCOUNT (VPNG_L2 + 12) +#define VPNC_L2AXI1MINLATENCY (VPNG_L2 + 13) +#define VPNC_L2AXI1MAXLATENCY (VPNG_L2 + 14) +#define VPNC_L2AXI1TOTLATENCY (VPNG_L2 + 15) +#define VPNC_L2AXI1TOTREQCOUNT (VPNG_L2 + 16) +#define VPNC_L2_COUNT VPNC_L2AXI1TOTREQCOUNT - VPNG_L2 + +/* HW: FE Counters. */ +#define VPNC_FEDRAWCOUNT (VPNG_FE + 1) +#define VPNC_FEOUTVERTEXCOUNT (VPNG_FE + 2) +#define VPNC_FECACHEMISSCOUNT (VPNG_FE + 3) +#define VPNC_FECACHELKCOUNT (VPNG_FE + 4) +#define VPNC_FESTALLCOUNT (VPNG_FE + 5) +#define VPNC_FESTARVECOUNT (VPNG_FE + 6) +#define VPNC_FEPROCESSCOUNT (VPNG_FE + 7) +#define VPNC_FE_COUNT VPNC_FEPROCESSCOUNT - VPNG_FE + +#define TOTAL_COUNTER_NUMBER (VPNC_FE_COUNT + VPNC_VS_COUNT + VPNC_PA_COUNT + VPNC_SE_COUNT + VPNC_RA_COUNT \ + + VPNC_PS_COUNT + VPNC_TX_COUNT + VPNC_PE_COUNT + VPNC_MCC_COUNT + VPNC_MCZ_COUNT \ + + VPNC_HI_COUNT + VPNC_L2_COUNT) + +#define TOTAL_MODULE_NUMBER 12 + +/* PROGRAM: Shader program counters. */ +#define VPC_PVSINSTRCOUNT (VPG_PVS + PVS_INSTRCOUNT) +#define VPC_PVSALUINSTRCOUNT (VPG_PVS + PVS_ALUINSTRCOUNT) +#define VPC_PVSTEXINSTRCOUNT (VPG_PVS + PVS_TEXINSTRCOUNT) +#define VPC_PVSATTRIBCOUNT (VPG_PVS + PVS_ATTRIBCOUNT) +#define VPC_PVSUNIFORMCOUNT (VPG_PVS + PVS_UNIFORMCOUNT) +#define VPC_PVSFUNCTIONCOUNT (VPG_PVS + PVS_FUNCTIONCOUNT) +#define VPC_PVSSOURCE (VPG_PVS + PVS_SOURCE) + +#define VPC_PPSINSTRCOUNT (VPG_PPS + PPS_INSTRCOUNT) +#define VPC_PPSALUINSTRCOUNT (VPG_PPS + PPS_ALUINSTRCOUNT) +#define VPC_PPSTEXINSTRCOUNT (VPG_PPS + PPS_TEXINSTRCOUNT) +#define VPC_PPSATTRIBCOUNT (VPG_PPS + PPS_ATTRIBCOUNT) +#define VPC_PPSUNIFORMCOUNT (VPG_PPS + PPS_UNIFORMCOUNT) +#define VPC_PPSFUNCTIONCOUNT (VPG_PPS + PPS_FUNCTIONCOUNT) +#define VPC_PPSSOURCE (VPG_PPS + PPS_SOURCE) + +#define VPC_PROGRAMHANDLE (VPG_PROG + 1) + +#define VPC_ES30_DRAW_NO (VPG_ES30_DRAW + 1) +#define VPC_ES11_DRAW_NO (VPG_ES11_DRAW + 1) +#define VPC_ES30_GPU_NO (VPG_MULTI_GPU + 1) + + +#define MODULE_FRONT_END_COUNTER_NUM 0x5 +#define MODULE_VERTEX_SHADER_COUNTER_NUM 0x9 +#define MODULE_PRIMITIVE_ASSEMBLY_COUNTER_NUM 0xC +#define MODULE_SETUP_COUNTER_NUM 0xD +#define MODULE_RASTERIZER_COUNTER_NUM 0xE +#define MODULE_PIXEL_SHADER_COUNTER_NUM 0x9 +#define MODULE_TEXTURE_COUNTER_NUM 0x8 +#define MODULE_PIXEL_ENGINE_COUNTER_NUM 0x8 +#define MODULE_MEMORY_CONTROLLER_COLOR_COUNTER_NUM 0xC +#define MODULE_MEMORY_CONTROLLER_DEPTH_COUNTER_NUM 0xC +#define MODULE_HOST_INTERFACE0_COUNTER_NUM 0x9 +#define MODULE_HOST_INTERFACE1_COUNTER_NUM 0x7 +#define MODULE_GPUL2_CACHE_COUNTER_NUM 0xE +#define TOTAL_PROBE_NUMBER (MODULE_FRONT_END_COUNTER_NUM + MODULE_VERTEX_SHADER_COUNTER_NUM + MODULE_PRIMITIVE_ASSEMBLY_COUNTER_NUM \ + + MODULE_SETUP_COUNTER_NUM + MODULE_RASTERIZER_COUNTER_NUM + MODULE_PIXEL_SHADER_COUNTER_NUM \ + + MODULE_TEXTURE_COUNTER_NUM + MODULE_PIXEL_ENGINE_COUNTER_NUM + MODULE_MEMORY_CONTROLLER_COLOR_COUNTER_NUM \ + + MODULE_MEMORY_CONTROLLER_DEPTH_COUNTER_NUM + MODULE_HOST_INTERFACE0_COUNTER_NUM + MODULE_HOST_INTERFACE1_COUNTER_NUM \ + + MODULE_GPUL2_CACHE_COUNTER_NUM) + + +#ifdef ANDROID +#define DEFAULT_PROFILE_FILE_NAME "/sdcard/vprofiler.vpd" +#else +#define DEFAULT_PROFILE_FILE_NAME "vprofiler.vpd" +#endif + +#if gcdENDIAN_BIG +#define BIG_ENDIAN_TRANS_INT(x) ((gctUINT32)(\ + (((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \ + (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \ + (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \ + (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24))) +#else +#define BIG_ENDIAN_TRANS_INT(x) x +#endif + +/* Write a data value. */ +#define gcmWRITE_VALUE(IntData) \ + do \ + { \ + gceSTATUS status; \ + gctINT32 value = IntData; \ + value = BIG_ENDIAN_TRANS_INT(value); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, gcmSIZEOF(value), &value)); \ + } \ + while (gcvFALSE) + +#define gcmWRITE_CONST(Const) \ + do \ + { \ + gceSTATUS status; \ + gctINT32 data = Const; \ + data = BIG_ENDIAN_TRANS_INT(data); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, gcmSIZEOF(data), &data)); \ + } \ + while (gcvFALSE) + +#define gcmWRITE_COUNTER(Counter, Value) \ + gcmWRITE_CONST(Counter); \ + gcmWRITE_VALUE(Value) + +/* Write a data value. */ +#define gcmRECORD_VALUE(IntData) \ + do \ + { \ + gctINT32 value = IntData; \ + value = BIG_ENDIAN_TRANS_INT(value); \ + counterData[counterIndex++] = value; \ + } \ + while (gcvFALSE) + +#define gcmRECORD_CONST(Const) \ + do \ + { \ + gctINT32 data = Const; \ + data = BIG_ENDIAN_TRANS_INT(data); \ + counterData[counterIndex++] = data; \ + } \ + while (gcvFALSE) + +#define gcmRECORD_COUNTER(Counter, Value) \ + gcmRECORD_CONST(Counter); \ + gcmRECORD_VALUE(Value) + +/* Write a string value (char*). */ +#define gcmWRITE_STRING(String) \ + do \ + { \ + gceSTATUS status; \ + gctINT32 length; \ + length = (gctINT32) gcoOS_StrLen((gctSTRING)String, gcvNULL); \ + length = BIG_ENDIAN_TRANS_INT(length); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, gcmSIZEOF(length), &length)); \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, length, String)); \ + } \ + while (gcvFALSE) + +#define gcmWRITE_BUFFER(Size, Buffer) \ + do \ + { \ + gceSTATUS status; \ + gcmERR_BREAK(gcoPROFILER_Write(Profiler, Size, Buffer)); \ + } \ + while (gcvFALSE) + +#define gcmGET_COUNTER(counter, counterId) \ + do \ + { \ + if ((gctUINT32)*(memory + counterId + offset) == 0xdeaddead) \ + { \ + counter = 0xdeaddead; \ + } \ + else \ + { \ + gctUINT64_PTR Memory = memory; \ + Memory += TOTAL_PROBE_NUMBER * CoreId; \ + counter = (gctUINT32)*(Memory + counterId + offset); \ + } \ + } \ + while (gcvFALSE) + +#define gcmGET_LATENCY_COUNTER(minLatency, maxLatency, counterId) \ + do \ + { \ + if ((gctUINT32)*(memory + counterId + offset) == 0xdeaddead) \ + { \ + minLatency = maxLatency = 0xdeaddead; \ + } \ + else \ + { \ + gctUINT64_PTR Memory = memory; \ + Memory += TOTAL_PROBE_NUMBER * CoreId; \ + maxLatency = (((gctUINT32)*(Memory + counterId + offset) & 0xfff000) >> 12); \ + minLatency = ((gctUINT32)*(Memory + counterId + offset) & 0x000fff); \ + if (minLatency == 4095) \ + minLatency = 0; \ + } \ + } \ + while (gcvFALSE) + +typedef enum _gceCOUNTER +{ + gcvCOUNTER_FRONT_END, + gcvCOUNTER_VERTEX_SHADER, + gcvCOUNTER_PRIMITIVE_ASSEMBLY, + gcvCOUNTER_SETUP, + gcvCOUNTER_RASTERIZER, + gcvCOUNTER_PIXEL_SHADER, + gcvCOUNTER_TEXTURE, + gcvCOUNTER_PIXEL_ENGINE, + gcvCOUNTER_MEMORY_CONTROLLER_COLOR, + gcvCOUNTER_MEMORY_CONTROLLER_DEPTH, + gcvCOUNTER_HOST_INTERFACE0, + gcvCOUNTER_HOST_INTERFACE1, + gcvCOUNTER_GPUL2_CACHE, + gcvCOUNTER_COUNT +} +gceCOUNTER; + +typedef enum _gceProfilerClient +{ + gcvCLIENT_OPENGLES11 = 1, + gcvCLIENT_OPENGLES, + gcvCLIENT_OPENGL, + gcvCLIENT_OPENVG, + gcvCLIENT_OPENCL, + gcvCLIENT_OPENVX, + gcvCLIENT_OPENVK, +} +gceProfilerClient; + +/* HW profile information. */ +typedef struct _gcsPROFILER_COUNTERS_PART1 +{ + gctUINT32 gpuTotalRead64BytesPerFrame; + gctUINT32 gpuTotalWrite64BytesPerFrame; + + /* FE */ + gctUINT32 fe_draw_count; + gctUINT32 fe_out_vertex_count; + gctUINT32 fe_cache_miss_count; + gctUINT32 fe_cache_lk_count; + gctUINT32 fe_stall_count; + gctUINT32 fe_starve_count; + gctUINT32 fe_process_count; + + /* PE */ + gctUINT32 pe0_pixel_count_killed_by_color_pipe; + gctUINT32 pe0_pixel_count_killed_by_depth_pipe; + gctUINT32 pe0_pixel_count_drawn_by_color_pipe; + gctUINT32 pe0_pixel_count_drawn_by_depth_pipe; + gctUINT32 pe1_pixel_count_killed_by_color_pipe; + gctUINT32 pe1_pixel_count_killed_by_depth_pipe; + gctUINT32 pe1_pixel_count_drawn_by_color_pipe; + gctUINT32 pe1_pixel_count_drawn_by_depth_pipe; + + /* SH */ + gctUINT32 shader_cycle_count; + gctUINT32 vs_shader_cycle_count; + gctUINT32 ps_shader_cycle_count; + gctUINT32 ps_inst_counter; + gctUINT32 ps_rendered_pixel_counter; + gctUINT32 vs_inst_counter; + gctUINT32 vs_rendered_vertice_counter; + gctUINT32 vs_branch_inst_counter; + gctUINT32 vs_texld_inst_counter; + gctUINT32 ps_branch_inst_counter; + gctUINT32 ps_texld_inst_counter; + gctUINT32 vs_non_idle_starve_count; + gctUINT32 vs_starve_count; + gctUINT32 vs_stall_count; + gctUINT32 vs_process_count; + gctUINT32 ps_non_idle_starve_count; + gctUINT32 ps_starve_count; + gctUINT32 ps_stall_count; + gctUINT32 ps_process_count; + + /* PA */ + gctUINT32 pa_input_vtx_counter; + gctUINT32 pa_input_prim_counter; + gctUINT32 pa_output_prim_counter; + gctUINT32 pa_depth_clipped_counter; + gctUINT32 pa_trivial_rejected_counter; + gctUINT32 pa_culled_prim_counter; + gctUINT32 pa_droped_prim_counter; + gctUINT32 pa_frustum_clipped_prim_counter; + gctUINT32 pa_frustum_clipdroped_prim_counter; + gctUINT32 pa_non_idle_starve_count; + gctUINT32 pa_starve_count; + gctUINT32 pa_stall_count; + gctUINT32 pa_process_count; + + /* SE */ + gctUINT32 se_culled_triangle_count; + gctUINT32 se_culled_lines_count; + gctUINT32 se_clipped_triangle_count; + gctUINT32 se_clipped_line_count; + gctUINT32 se_starve_count; + gctUINT32 se_stall_count; + gctUINT32 se_receive_triangle_count; + gctUINT32 se_send_triangle_count; + gctUINT32 se_receive_lines_count; + gctUINT32 se_send_lines_count; + gctUINT32 se_process_count; + gctUINT32 se_trivial_rejected_line_count; + gctUINT32 se_non_idle_starve_count; + + /* RA */ + gctUINT32 ra_input_prim_count; + gctUINT32 ra_total_quad_count; + gctUINT32 ra_valid_quad_count_after_early_z; + gctUINT32 ra_valid_pixel_count_to_render; + gctUINT32 ra_output_valid_quad_count; + gctUINT32 ra_output_valid_pixel_count; + gctUINT32 ra_pipe_cache_miss_counter; + gctUINT32 ra_pipe_hz_cache_miss_counter; + gctUINT32 ra_prefetch_cache_miss_counter; + gctUINT32 ra_prefetch_hz_cache_miss_counter; + gctUINT32 ra_eez_culled_counter; + gctUINT32 ra_non_idle_starve_count; + gctUINT32 ra_starve_count; + gctUINT32 ra_stall_count; + gctUINT32 ra_process_count; + + /* TX */ + gctUINT32 tx_total_bilinear_requests; + gctUINT32 tx_total_trilinear_requests; + gctUINT32 tx_total_discarded_texture_requests; + gctUINT32 tx_total_texture_requests; + gctUINT32 tx_mc0_miss_count; + gctUINT32 tx_mc0_request_byte_count; + gctUINT32 tx_mc1_miss_count; + gctUINT32 tx_mc1_request_byte_count; + gctUINT32 tx_non_idle_starve_count; + gctUINT32 tx_starve_count; + gctUINT32 tx_stall_count; + gctUINT32 tx_process_count; +} +gcsPROFILER_COUNTERS_PART1; + +typedef struct _gcsPROFILER_COUNTERS_PART2 +{ + /* MCC */ + gctUINT32 mcc_total_read_req_8B_from_colorpipe; + gctUINT32 mcc_total_read_req_8B_sentout_from_colorpipe; + gctUINT32 mcc_total_write_req_8B_from_colorpipe; + gctUINT32 mcc_total_read_req_sentout_from_colorpipe; + gctUINT32 mcc_total_write_req_from_colorpipe; + gctUINT32 mcc_total_read_req_8B_from_depthpipe; + gctUINT32 mcc_total_read_req_8B_sentout_from_depthpipe; + gctUINT32 mcc_total_write_req_8B_from_depthpipe; + gctUINT32 mcc_total_read_req_sentout_from_depthpipe; + gctUINT32 mcc_total_write_req_from_depthpipe; + gctUINT32 mcc_total_read_req_8B_from_others; + gctUINT32 mcc_total_write_req_8B_from_others; + gctUINT32 mcc_total_read_req_from_others; + gctUINT32 mcc_total_write_req_from_others; + gctUINT32 mcc_axi_total_latency; + gctUINT32 mcc_axi_sample_count; + gctUINT32 mcc_axi_max_latency; + gctUINT32 mcc_axi_min_latency; + gctUINT32 mc_fe_read_bandwidth; + gctUINT32 mc_mmu_read_bandwidth; + gctUINT32 mc_blt_read_bandwidth; + gctUINT32 mc_sh0_read_bandwidth; + gctUINT32 mc_sh1_read_bandwidth; + gctUINT32 mc_pe_write_bandwidth; + gctUINT32 mc_blt_write_bandwidth; + gctUINT32 mc_sh0_write_bandwidth; + gctUINT32 mc_sh1_write_bandwidth; + + /* MCZ */ + gctUINT32 mcz_total_read_req_8B_from_colorpipe; + gctUINT32 mcz_total_read_req_8B_sentout_from_colorpipe; + gctUINT32 mcz_total_write_req_8B_from_colorpipe; + gctUINT32 mcz_total_read_req_sentout_from_colorpipe; + gctUINT32 mcz_total_write_req_from_colorpipe; + gctUINT32 mcz_total_read_req_8B_from_depthpipe; + gctUINT32 mcz_total_read_req_8B_sentout_from_depthpipe; + gctUINT32 mcz_total_write_req_8B_from_depthpipe; + gctUINT32 mcz_total_read_req_sentout_from_depthpipe; + gctUINT32 mcz_total_write_req_from_depthpipe; + gctUINT32 mcz_total_read_req_8B_from_others; + gctUINT32 mcz_total_write_req_8B_from_others; + gctUINT32 mcz_total_read_req_from_others; + gctUINT32 mcz_total_write_req_from_others; + gctUINT32 mcz_axi_total_latency; + gctUINT32 mcz_axi_sample_count; + gctUINT32 mcz_axi_max_latency; + gctUINT32 mcz_axi_min_latency; + + /* HI */ + gctUINT32 hi0_total_read_8B_count; + gctUINT32 hi0_total_write_8B_count; + gctUINT32 hi0_total_read_request_count; + gctUINT32 hi0_total_write_request_count; + gctUINT32 hi0_axi_cycles_read_request_stalled; + gctUINT32 hi0_axi_cycles_write_request_stalled; + gctUINT32 hi0_axi_cycles_write_data_stalled; + gctUINT32 hi1_total_read_8B_count; + gctUINT32 hi1_total_write_8B_count; + gctUINT32 hi1_total_read_request_count; + gctUINT32 hi1_total_write_request_count; + gctUINT32 hi1_axi_cycles_read_request_stalled; + gctUINT32 hi1_axi_cycles_write_request_stalled; + gctUINT32 hi1_axi_cycles_write_data_stalled; + gctUINT32 hi_total_cycle_count; + gctUINT32 hi_total_idle_cycle_count; + gctUINT32 hi_total_read_8B_count; + gctUINT32 hi_total_write_8B_count; + + /* L2 */ + gctUINT32 l2_total_axi0_read_request_count; + gctUINT32 l2_total_axi1_read_request_count; + gctUINT32 l2_total_axi0_write_request_count; + gctUINT32 l2_total_axi1_write_request_count; + gctUINT32 l2_total_read_transactions_request_by_axi0; + gctUINT32 l2_total_read_transactions_request_by_axi1; + gctUINT32 l2_total_write_transactions_request_by_axi0; + gctUINT32 l2_total_write_transactions_request_by_axi1; + gctUINT32 l2_axi0_minmax_latency; + gctUINT32 l2_axi0_min_latency; + gctUINT32 l2_axi0_max_latency; + gctUINT32 l2_axi0_total_latency; + gctUINT32 l2_axi0_total_request_count; + gctUINT32 l2_axi1_minmax_latency; + gctUINT32 l2_axi1_min_latency; + gctUINT32 l2_axi1_max_latency; + gctUINT32 l2_axi1_total_latency; + gctUINT32 l2_axi1_total_request_count; +} +gcsPROFILER_COUNTERS_PART2; + +typedef struct _gcsPROFILER_COUNTERS +{ + gcsPROFILER_COUNTERS_PART1 counters_part1; + gcsPROFILER_COUNTERS_PART2 counters_part2; +} +gcsPROFILER_COUNTERS; + +#define NumOfPerFrameBuf 16 +#define NumOfPerDrawBuf 128 + +typedef enum _gceCOUNTER_OPTYPE +{ + gcvCOUNTER_OP_DRAW = 0, + gcvCOUNTER_OP_BLT = 1, + gcvCOUNTER_OP_COMPUTE = 2, + gcvCOUNTER_OP_RS = 3, + gcvCOUNTER_OP_FINISH = 4, + gcvCOUNTER_OP_FRAME = 5, + gcvCOUNTER_OP_NONE = 6 +} +gceCOUNTER_OPTYPE; + +typedef struct gcsCounterBuffer * gcsCounterBuffer_PTR; + +struct gcsCounterBuffer +{ + gcsPROFILER_COUNTERS *counters; + gctHANDLE couterBufobj; + gctUINT32 probeAddress; + gctPOINTER logicalAddress; + gceCOUNTER_OPTYPE opType; + gctUINT32 opID; + gctUINT32 startPos; + gctUINT32 endPos; + gctUINT32 dataSize; + gctBOOL available; + gctBOOL needDump; + gcsCounterBuffer_PTR next; + gcsCounterBuffer_PTR prev; +}; + +typedef struct _gcoPROFILER * gcoPROFILER; + +struct _gcoPROFILER +{ + gctBOOL enable; + gctBOOL enablePrint; + gctBOOL disableProbe; + gctBOOL probeMode; + + gctFILE file; + gctCHAR* fileName; + + gcsCounterBuffer_PTR counterBuf; + gctUINT32 bufferCount; + + gctBOOL perDrawMode; + gctBOOL needDump; + gctBOOL counterEnable; + + gceProfilerClient profilerClient; + + /*query some features from hw*/ + gctUINT32 coreCount; + gctUINT32 shaderCoreCount; + gctBOOL bHalti4; + gctBOOL psRenderPixelFix; + gctBOOL axiBus128bits; +}; + +typedef enum _gceProbeStatus +{ + gcvPROBE_Disabled = 0, + gcvPROBE_Paused = 1, + gcvPROBE_Enabled = 2, +} +gceProbeStatus; + +typedef enum _gceProbeCmd +{ + gcvPROBECMD_BEGIN = 0, + gcvPROBECMD_PAUSE = 1, + gcvPROBECMD_RESUME = 2, + gcvPROBECMD_END = 3, +} +gceProbeCmd; + +typedef struct _gcsPROBESTATES +{ + gceProbeStatus status; + gctUINT32 probeAddress; +}gcsPROBESTATES; + +/* Construct a Profiler object per context. */ +gceSTATUS +gcoPROFILER_Construct( + OUT gcoPROFILER * Profiler + ); + +gceSTATUS +gcoPROFILER_Destroy( + IN gcoPROFILER Profiler + ); + +gceSTATUS +gcoPROFILER_Enable( + IN gcoPROFILER Profiler + ); + +gceSTATUS +gcoPROFILER_Disable( + void + ); + +gceSTATUS +gcoPROFILER_Begin( + IN gcoPROFILER Profiler, + IN gceCOUNTER_OPTYPE operationType + ); + +gceSTATUS +gcoPROFILER_End( + IN gcoPROFILER Profiler, + IN gceCOUNTER_OPTYPE operationType, + IN gctUINT32 OpID + ); + +gceSTATUS +gcoPROFILER_Write( + IN gcoPROFILER Profiler, + IN gctSIZE_T ByteCount, + IN gctCONST_POINTER Data + ); + +gceSTATUS +gcoPROFILER_Flush( + IN gcoPROFILER Profiler + ); +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_profiler_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_raster.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_raster.h new file mode 100644 index 000000000000..988577ff9285 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_raster.h @@ -0,0 +1,1109 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_raster_h_ +#define __gc_hal_raster_h_ + +#include "gc_hal_enum.h" +#include "gc_hal_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +****************************** Object Declarations ***************************** +\******************************************************************************/ + +typedef struct _gcoBRUSH * gcoBRUSH; +typedef struct _gcoBRUSH_CACHE * gcoBRUSH_CACHE; + +/******************************************************************************\ +******************************** gcoBRUSH Object ******************************* +\******************************************************************************/ + +/* Create a new solid color gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_ConstructSingleColor( + IN gcoHAL Hal, + IN gctUINT32 ColorConvert, + IN gctUINT32 Color, + IN gctUINT64 Mask, + gcoBRUSH * Brush + ); + +/* Create a new monochrome gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_ConstructMonochrome( + IN gcoHAL Hal, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctUINT32 ColorConvert, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gctUINT64 Bits, + IN gctUINT64 Mask, + gcoBRUSH * Brush + ); + +/* Create a color gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_ConstructColor( + IN gcoHAL Hal, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctPOINTER Address, + IN gceSURF_FORMAT Format, + IN gctUINT64 Mask, + gcoBRUSH * Brush + ); + +/* Destroy an gcoBRUSH object. */ +gceSTATUS +gcoBRUSH_Destroy( + IN gcoBRUSH Brush + ); + +/******************************************************************************\ +******************************** gcoSURF Object ******************************* +\******************************************************************************/ + +/* Set cipping rectangle. */ +gceSTATUS +gcoSURF_SetClipping( + IN gcoSURF Surface + ); + +/* Clear one or more rectangular areas. */ +gceSTATUS +gcoSURF_Clear2D( + IN gcoSURF DestSurface, + IN gctUINT32 RectCount, + IN gcsRECT_PTR DestRect, + IN gctUINT32 LoColor, + IN gctUINT32 HiColor + ); + +/* Draw one or more Bresenham lines. */ +gceSTATUS +gcoSURF_Line( + IN gcoSURF Surface, + IN gctUINT32 LineCount, + IN gcsRECT_PTR Position, + IN gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop + ); + +/* Generic rectangular blit. */ +gceSTATUS +gcoSURF_Blit( + IN OPTIONAL gcoSURF SrcSurface, + IN gcoSURF DestSurface, + IN gctUINT32 RectCount, + IN OPTIONAL gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect, + IN OPTIONAL gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN OPTIONAL gceSURF_TRANSPARENCY Transparency, + IN OPTIONAL gctUINT32 TransparencyColor, + IN OPTIONAL gctPOINTER Mask, + IN OPTIONAL gceSURF_MONOPACK MaskPack + ); + +/* Monochrome blit. */ +gceSTATUS +gcoSURF_MonoBlit( + IN gcoSURF DestSurface, + IN gctPOINTER Source, + IN gceSURF_MONOPACK SourcePack, + IN gcsPOINT_PTR SourceSize, + IN gcsPOINT_PTR SourceOrigin, + IN gcsRECT_PTR DestRect, + IN OPTIONAL gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gctBOOL ColorConvert, + IN gctUINT8 MonoTransparency, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor + ); + +/* Filter blit. */ +gceSTATUS +gcoSURF_FilterBlit( + IN gcoSURF SrcSurface, + IN gcoSURF DestSurface, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect + ); + +/* Enable alpha blending engine in the hardware and disengage the ROP engine. */ +gceSTATUS +gcoSURF_EnableAlphaBlend( + IN gcoSURF Surface, + IN gctUINT8 SrcGlobalAlphaValue, + IN gctUINT8 DstGlobalAlphaValue, + IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode, + IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode, + IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode, + IN gceSURF_BLEND_FACTOR_MODE DstFactorMode, + IN gceSURF_PIXEL_COLOR_MODE SrcColorMode, + IN gceSURF_PIXEL_COLOR_MODE DstColorMode + ); + +/* Disable alpha blending engine in the hardware and engage the ROP engine. */ +gceSTATUS +gcoSURF_DisableAlphaBlend( + IN gcoSURF Surface + ); + +gceSTATUS +gcoSURF_SetDither( + IN gcoSURF Surface, + IN gctBOOL Dither + ); + +gceSTATUS +gcoSURF_Set2DSource( + gcoSURF Surface, + gceSURF_ROTATION Rotation + ); + +gceSTATUS +gcoSURF_Set2DTarget( + gcoSURF Surface, + gceSURF_ROTATION Rotation + ); + +/******************************************************************************\ +********************************** gco2D Object ********************************* +\******************************************************************************/ + +/* Construct a new gco2D object. */ +gceSTATUS +gco2D_Construct( + IN gcoHAL Hal, + OUT gco2D * Hardware + ); + +/* Destroy an gco2D object. */ +gceSTATUS +gco2D_Destroy( + IN gco2D Hardware + ); + +/* Sets the maximum number of brushes in the brush cache. */ +gceSTATUS +gco2D_SetBrushLimit( + IN gco2D Hardware, + IN gctUINT MaxCount + ); + +/* Flush the brush. */ +gceSTATUS +gco2D_FlushBrush( + IN gco2D Engine, + IN gcoBRUSH Brush, + IN gceSURF_FORMAT Format + ); + +/* Program the specified solid color brush. */ +gceSTATUS +gco2D_LoadSolidBrush( + IN gco2D Engine, + IN gceSURF_FORMAT Format, + IN gctUINT32 ColorConvert, + IN gctUINT32 Color, + IN gctUINT64 Mask + ); + +gceSTATUS +gco2D_LoadMonochromeBrush( + IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctUINT32 ColorConvert, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gctUINT64 Bits, + IN gctUINT64 Mask + ); + +gceSTATUS +gco2D_LoadColorBrush( + IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctUINT32 Address, + IN gceSURF_FORMAT Format, + IN gctUINT64 Mask + ); + +/* Configure monochrome source. */ +gceSTATUS +gco2D_SetMonochromeSource( + IN gco2D Engine, + IN gctBOOL ColorConvert, + IN gctUINT8 MonoTransparency, + IN gceSURF_MONOPACK DataPack, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor + ); + +/* Configure color source. */ +gceSTATUS +gco2D_SetColorSource( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 TransparencyColor + ); + +/* Configure color source extension for full rotation. */ +gceSTATUS +gco2D_SetColorSourceEx( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 TransparencyColor + ); + +/* Same as gco2D_SetColorSourceEx, but with better 64bit SW-path support. +** Please do NOT export the API now. +*/ +gceSTATUS +gco2D_SetColorSource64( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctPOINTER Logical, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctBOOL CoordRelative, + IN gceSURF_TRANSPARENCY Transparency, + IN gctUINT32 TransparencyColor + ); + +/* Configure color source. */ +gceSTATUS +gco2D_SetColorSourceAdvanced( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctBOOL CoordRelative + ); + +gceSTATUS +gco2D_SetColorSourceN( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight, + IN gctUINT32 SurfaceNumber + ); + +/* Configure masked color source. */ +gceSTATUS +gco2D_SetMaskedSource( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gctBOOL CoordRelative, + IN gceSURF_MONOPACK MaskPack + ); + +/* Configure masked color source extension for full rotation. */ +gceSTATUS +gco2D_SetMaskedSourceEx( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gctBOOL CoordRelative, + IN gceSURF_MONOPACK MaskPack, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight + ); + +/* Same as gco2D_SetMaskedSourceEx, but with better 64bit SW-path support. +** Please do NOT export the API now. +*/ +gceSTATUS +gco2D_SetMaskedSource64( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctPOINTER Logical, + IN gctUINT32 Stride, + IN gceSURF_FORMAT Format, + IN gctBOOL CoordRelative, + IN gceSURF_MONOPACK MaskPack, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight + ); + +/* Setup the source rectangle. */ +gceSTATUS +gco2D_SetSource( + IN gco2D Engine, + IN gcsRECT_PTR SrcRect + ); + +/* Set clipping rectangle. */ +gceSTATUS +gco2D_SetClipping( + IN gco2D Engine, + IN gcsRECT_PTR Rect + ); + +/* Configure destination. */ +gceSTATUS +gco2D_SetTarget( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth + ); + +/* Configure destination extension for full rotation. */ +gceSTATUS +gco2D_SetTargetEx( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctUINT32 Stride, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight + ); + +/* Same as gco2D_SetTargetEx, but with better 64bit SW-path support. +** Please do NOT export the API now. +*/ +gceSTATUS +gco2D_SetTarget64( + IN gco2D Engine, + IN gctUINT32 Address, + IN gctPOINTER Logical, + IN gctUINT32 Stride, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight + ); + + +/* Calculate and program the stretch factors. */ +gceSTATUS +gco2D_CalcStretchFactor( + IN gco2D Engine, + IN gctINT32 SrcSize, + IN gctINT32 DestSize, + OUT gctUINT32_PTR Factor + ); + +gceSTATUS +gco2D_SetStretchFactors( + IN gco2D Engine, + IN gctUINT32 HorFactor, + IN gctUINT32 VerFactor + ); + +/* Calculate and program the stretch factors based on the rectangles. */ +gceSTATUS +gco2D_SetStretchRectFactors( + IN gco2D Engine, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect + ); + +/* Create a new solid color gcoBRUSH object. */ +gceSTATUS +gco2D_ConstructSingleColorBrush( + IN gco2D Engine, + IN gctUINT32 ColorConvert, + IN gctUINT32 Color, + IN gctUINT64 Mask, + gcoBRUSH * Brush + ); + +/* Create a new monochrome gcoBRUSH object. */ +gceSTATUS +gco2D_ConstructMonochromeBrush( + IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctUINT32 ColorConvert, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gctUINT64 Bits, + IN gctUINT64 Mask, + gcoBRUSH * Brush + ); + +/* Create a color gcoBRUSH object. */ +gceSTATUS +gco2D_ConstructColorBrush( + IN gco2D Engine, + IN gctUINT32 OriginX, + IN gctUINT32 OriginY, + IN gctPOINTER Address, + IN gceSURF_FORMAT Format, + IN gctUINT64 Mask, + gcoBRUSH * Brush + ); + +/* Clear one or more rectangular areas. */ +gceSTATUS +gco2D_Clear( + IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT32 Color32, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +/* Draw one or more Bresenham lines. */ +gceSTATUS +gco2D_Line( + IN gco2D Engine, + IN gctUINT32 LineCount, + IN gcsRECT_PTR Position, + IN gcoBRUSH Brush, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +/* Draw one or more Bresenham lines based on the 32-bit color. */ +gceSTATUS +gco2D_ColorLine( + IN gco2D Engine, + IN gctUINT32 LineCount, + IN gcsRECT_PTR Position, + IN gctUINT32 Color32, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +/* Generic blit. */ +gceSTATUS +gco2D_Blit( + IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +gceSTATUS +gco2D_Blend( + IN gco2D Engine, + IN gctUINT32 SrcCount, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +/* Batch blit. */ +gceSTATUS +gco2D_BatchBlit( + IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DestRect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +/* Stretch blit. */ +gceSTATUS +gco2D_StretchBlit( + IN gco2D Engine, + IN gctUINT32 RectCount, + IN gcsRECT_PTR Rect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +/* Monochrome blit. */ +gceSTATUS +gco2D_MonoBlit( + IN gco2D Engine, + IN gctPOINTER StreamBits, + IN gcsPOINT_PTR StreamSize, + IN gcsRECT_PTR StreamRect, + IN gceSURF_MONOPACK SrcStreamPack, + IN gceSURF_MONOPACK DestStreamPack, + IN gcsRECT_PTR DestRect, + IN gctUINT32 FgRop, + IN gctUINT32 BgRop, + IN gceSURF_FORMAT DestFormat + ); + +gceSTATUS +gco2D_MonoBlitEx( + IN gco2D Engine, + IN gctPOINTER StreamBits, + IN gctINT32 StreamStride, + IN gctINT32 StreamWidth, + IN gctINT32 StreamHeight, + IN gctINT32 StreamX, + IN gctINT32 StreamY, + IN gctUINT32 FgColor, + IN gctUINT32 BgColor, + IN gcsRECT_PTR SrcRect, + IN gcsRECT_PTR DstRect, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop + ); + +/* Set kernel size. */ +gceSTATUS +gco2D_SetKernelSize( + IN gco2D Engine, + IN gctUINT8 HorKernelSize, + IN gctUINT8 VerKernelSize + ); + +/* Set filter type. */ +gceSTATUS +gco2D_SetFilterType( + IN gco2D Engine, + IN gceFILTER_TYPE FilterType + ); + +/* Set the filter kernel by user. */ +gceSTATUS +gco2D_SetUserFilterKernel( + IN gco2D Engine, + IN gceFILTER_PASS_TYPE PassType, + IN gctUINT16_PTR KernelArray + ); + +/* Select the pass(es) to be done for user defined filter. */ +gceSTATUS +gco2D_EnableUserFilterPasses( + IN gco2D Engine, + IN gctBOOL HorPass, + IN gctBOOL VerPass + ); + +/* Frees the temporary buffer allocated by filter blit operation. */ +gceSTATUS +gco2D_FreeFilterBuffer( + IN gco2D Engine + ); + +/* Filter blit. */ +gceSTATUS +gco2D_FilterBlit( + IN gco2D Engine, + IN gctUINT32 SrcAddress, + IN gctUINT SrcStride, + IN gctUINT32 SrcUAddress, + IN gctUINT SrcUStride, + IN gctUINT32 SrcVAddress, + IN gctUINT SrcVStride, + IN gceSURF_FORMAT SrcFormat, + IN gceSURF_ROTATION SrcRotation, + IN gctUINT32 SrcSurfaceWidth, + IN gcsRECT_PTR SrcRect, + IN gctUINT32 DestAddress, + IN gctUINT DestStride, + IN gceSURF_FORMAT DestFormat, + IN gceSURF_ROTATION DestRotation, + IN gctUINT32 DestSurfaceWidth, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect + ); + +/* Filter blit extension for full rotation. */ +gceSTATUS +gco2D_FilterBlitEx( + IN gco2D Engine, + IN gctUINT32 SrcAddress, + IN gctUINT SrcStride, + IN gctUINT32 SrcUAddress, + IN gctUINT SrcUStride, + IN gctUINT32 SrcVAddress, + IN gctUINT SrcVStride, + IN gceSURF_FORMAT SrcFormat, + IN gceSURF_ROTATION SrcRotation, + IN gctUINT32 SrcSurfaceWidth, + IN gctUINT32 SrcSurfaceHeight, + IN gcsRECT_PTR SrcRect, + IN gctUINT32 DestAddress, + IN gctUINT DestStride, + IN gceSURF_FORMAT DestFormat, + IN gceSURF_ROTATION DestRotation, + IN gctUINT32 DestSurfaceWidth, + IN gctUINT32 DestSurfaceHeight, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect + ); + +gceSTATUS +gco2D_FilterBlitEx2( + IN gco2D Engine, + IN gctUINT32_PTR SrcAddresses, + IN gctUINT32 SrcAddressNum, + IN gctUINT32_PTR SrcStrides, + IN gctUINT32 SrcStrideNum, + IN gceTILING SrcTiling, + IN gceSURF_FORMAT SrcFormat, + IN gceSURF_ROTATION SrcRotation, + IN gctUINT32 SrcSurfaceWidth, + IN gctUINT32 SrcSurfaceHeight, + IN gcsRECT_PTR SrcRect, + IN gctUINT32_PTR DestAddresses, + IN gctUINT32 DestAddressNum, + IN gctUINT32_PTR DestStrides, + IN gctUINT32 DestStrideNum, + IN gceTILING DestTiling, + IN gceSURF_FORMAT DestFormat, + IN gceSURF_ROTATION DestRotation, + IN gctUINT32 DestSurfaceWidth, + IN gctUINT32 DestSurfaceHeight, + IN gcsRECT_PTR DestRect, + IN gcsRECT_PTR DestSubRect + ); + +/* Enable alpha blending engine in the hardware and disengage the ROP engine. */ +gceSTATUS +gco2D_EnableAlphaBlend( + IN gco2D Engine, + IN gctUINT8 SrcGlobalAlphaValue, + IN gctUINT8 DstGlobalAlphaValue, + IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode, + IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode, + IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode, + IN gceSURF_BLEND_FACTOR_MODE DstFactorMode, + IN gceSURF_PIXEL_COLOR_MODE SrcColorMode, + IN gceSURF_PIXEL_COLOR_MODE DstColorMode + ); + +/* Enable alpha blending engine in the hardware. */ +gceSTATUS +gco2D_EnableAlphaBlendAdvanced( + IN gco2D Engine, + IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode, + IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode, + IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode, + IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode, + IN gceSURF_BLEND_FACTOR_MODE DstFactorMode + ); + +/* Enable alpha blending engine with Porter Duff rule. */ +gceSTATUS +gco2D_SetPorterDuffBlending( + IN gco2D Engine, + IN gce2D_PORTER_DUFF_RULE Rule + ); + +/* Disable alpha blending engine in the hardware and engage the ROP engine. */ +gceSTATUS +gco2D_DisableAlphaBlend( + IN gco2D Engine + ); + +/* Retrieve the maximum number of 32-bit data chunks for a single DE command. */ +gctUINT32 +gco2D_GetMaximumDataCount( + void + ); + +/* Retrieve the maximum number of rectangles, that can be passed in a single DE command. */ +gctUINT32 +gco2D_GetMaximumRectCount( + void + ); + +/* Returns the pixel alignment of the surface. */ +gceSTATUS +gco2D_GetPixelAlignment( + gceSURF_FORMAT Format, + gcsPOINT_PTR Alignment + ); + +/* Retrieve monochrome stream pack size. */ +gceSTATUS +gco2D_GetPackSize( + IN gceSURF_MONOPACK StreamPack, + OUT gctUINT32 * PackWidth, + OUT gctUINT32 * PackHeight + ); + +/* Flush the 2D pipeline. */ +gceSTATUS +gco2D_Flush( + IN gco2D Engine + ); + +/* Load 256-entry color table for INDEX8 source surfaces. */ +gceSTATUS +gco2D_LoadPalette( + IN gco2D Engine, + IN gctUINT FirstIndex, + IN gctUINT IndexCount, + IN gctPOINTER ColorTable, + IN gctBOOL ColorConvert + ); + +/* Enable/disable 2D BitBlt mirrorring. */ +gceSTATUS +gco2D_SetBitBlitMirror( + IN gco2D Engine, + IN gctBOOL HorizontalMirror, + IN gctBOOL VerticalMirror + ); + +/* + * Set the transparency for source, destination and pattern. + * It also enable or disable the DFB color key mode. + */ +gceSTATUS +gco2D_SetTransparencyAdvancedEx( + IN gco2D Engine, + IN gce2D_TRANSPARENCY SrcTransparency, + IN gce2D_TRANSPARENCY DstTransparency, + IN gce2D_TRANSPARENCY PatTransparency, + IN gctBOOL EnableDFBColorKeyMode + ); + +/* Set the transparency for source, destination and pattern. */ +gceSTATUS +gco2D_SetTransparencyAdvanced( + IN gco2D Engine, + IN gce2D_TRANSPARENCY SrcTransparency, + IN gce2D_TRANSPARENCY DstTransparency, + IN gce2D_TRANSPARENCY PatTransparency + ); + +/* Set the source color key. */ +gceSTATUS +gco2D_SetSourceColorKeyAdvanced( + IN gco2D Engine, + IN gctUINT32 ColorKey + ); + +/* Set the source color key range. */ +gceSTATUS +gco2D_SetSourceColorKeyRangeAdvanced( + IN gco2D Engine, + IN gctUINT32 ColorKeyLow, + IN gctUINT32 ColorKeyHigh + ); + +/* Set the target color key. */ +gceSTATUS +gco2D_SetTargetColorKeyAdvanced( + IN gco2D Engine, + IN gctUINT32 ColorKey + ); + +/* Set the target color key range. */ +gceSTATUS +gco2D_SetTargetColorKeyRangeAdvanced( + IN gco2D Engine, + IN gctUINT32 ColorKeyLow, + IN gctUINT32 ColorKeyHigh + ); + +/* Set the YUV color space mode. */ +gceSTATUS +gco2D_SetYUVColorMode( + IN gco2D Engine, + IN gce2D_YUV_COLOR_MODE Mode + ); + +/* Setup the source global color value in ARGB8 format. */ +gceSTATUS gco2D_SetSourceGlobalColorAdvanced( + IN gco2D Engine, + IN gctUINT32 Color32 + ); + +/* Setup the target global color value in ARGB8 format. */ +gceSTATUS gco2D_SetTargetGlobalColorAdvanced( + IN gco2D Engine, + IN gctUINT32 Color32 + ); + +/* Setup the source and target pixel multiply modes. */ +gceSTATUS +gco2D_SetPixelMultiplyModeAdvanced( + IN gco2D Engine, + IN gce2D_PIXEL_COLOR_MULTIPLY_MODE SrcPremultiplySrcAlpha, + IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstPremultiplyDstAlpha, + IN gce2D_GLOBAL_COLOR_MULTIPLY_MODE SrcPremultiplyGlobalMode, + IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstDemultiplyDstAlpha + ); + +/* Set the GPU clock cycles after which the idle engine will keep auto-flushing. */ +gceSTATUS +gco2D_SetAutoFlushCycles( + IN gco2D Engine, + IN gctUINT32 Cycles + ); + +#if VIVANTE_PROFILER +/* Read the profile registers available in the 2D engine and sets them in the profile. + The function will also reset the pixelsRendered counter every time. +*/ +gceSTATUS +gco2D_ProfileEngine( + IN gco2D Engine, + OPTIONAL gcs2D_PROFILE_PTR Profile + ); +#endif + +/* Enable or disable 2D dithering. */ +gceSTATUS +gco2D_EnableDither( + IN gco2D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco2D_SetGenericSource( + IN gco2D Engine, + IN gctUINT32_PTR Addresses, + IN gctUINT32 AddressNum, + IN gctUINT32_PTR Strides, + IN gctUINT32 StrideNum, + IN gceTILING Tiling, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight +); + +gceSTATUS +gco2D_SetGenericTarget( + IN gco2D Engine, + IN gctUINT32_PTR Addresses, + IN gctUINT32 AddressNum, + IN gctUINT32_PTR Strides, + IN gctUINT32 StrideNum, + IN gceTILING Tiling, + IN gceSURF_FORMAT Format, + IN gceSURF_ROTATION Rotation, + IN gctUINT32 SurfaceWidth, + IN gctUINT32 SurfaceHeight +); + +gceSTATUS +gco2D_SetCurrentSourceIndex( + IN gco2D Engine, + IN gctUINT32 SrcIndex + ); + +gceSTATUS +gco2D_MultiSourceBlit( + IN gco2D Engine, + IN gctUINT32 SourceMask, + IN gcsRECT_PTR DestRect, + IN gctUINT32 RectCount + ); + +gceSTATUS +gco2D_SetROP( + IN gco2D Engine, + IN gctUINT8 FgRop, + IN gctUINT8 BgRop + ); + +gceSTATUS +gco2D_SetGdiStretchMode( + IN gco2D Engine, + IN gctBOOL Enable + ); + +gceSTATUS +gco2D_SetSourceTileStatus( + IN gco2D Engine, + IN gce2D_TILE_STATUS_CONFIG TSControl, + IN gceSURF_FORMAT CompressedFormat, + IN gctUINT32 ClearValue, + IN gctUINT32 GpuAddress + ); + +gceSTATUS +gco2D_SetTargetTileStatus( + IN gco2D Engine, + IN gce2D_TILE_STATUS_CONFIG TileStatusConfig, + IN gceSURF_FORMAT CompressedFormat, + IN gctUINT32 ClearValue, + IN gctUINT32 GpuAddress + ); + +gceSTATUS +gco2D_QueryU32( + IN gco2D Engine, + IN gce2D_QUERY Item, + OUT gctUINT32_PTR Value + ); + +gceSTATUS +gco2D_SetStateU32( + IN gco2D Engine, + IN gce2D_STATE State, + IN gctUINT32 Value + ); + +gceSTATUS +gco2D_SetStateArrayI32( + IN gco2D Engine, + IN gce2D_STATE State, + IN gctINT32_PTR Array, + IN gctINT32 ArraySize + ); + +gceSTATUS +gco2D_SetStateArrayU32( + IN gco2D Engine, + IN gce2D_STATE State, + IN gctUINT32_PTR Array, + IN gctINT32 ArraySize + ); + +gceSTATUS +gco2D_SetTargetRect( + IN gco2D Engine, + IN gcsRECT_PTR Rect + ); + +gceSTATUS +gco2D_Set2DEngine( + IN gco2D Engine + ); + +gceSTATUS +gco2D_UnSet2DEngine( + IN gco2D Engine + ); + +gceSTATUS +gco2D_Get2DEngine( + OUT gco2D * Engine + ); + +gceSTATUS +gco2D_Commit( + IN gco2D Engine, + IN gctBOOL Stall + ); + +gceSTATUS +gco2D_NatureRotateTranslation( + IN gctBOOL IsSrcRot, + IN gce2D_NATURE_ROTATION NatureRotation, + IN gctINT32 SrcSurfaceWidth, + IN gctINT32 SrcSurfaceHeight, + IN gctINT32 DstSurfaceWidth, + IN gctINT32 DstSurfaceHeight, + IN OUT gcsRECT_PTR SrcRect, + IN OUT gcsRECT_PTR DstRect, + OUT gceSURF_ROTATION * SrcRotation, + OUT gceSURF_ROTATION * DstRotation + ); + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_raster_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_rename.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_rename.h new file mode 100644 index 000000000000..df842afcbe61 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_rename.h @@ -0,0 +1,279 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_rename_h_ +#define __gc_hal_rename_h_ + + +#if defined(_HAL2D_APPENDIX) + +#define _HAL2D_RENAME_2(api, appendix) api ## appendix +#define _HAL2D_RENAME_1(api, appendix) _HAL2D_RENAME_2(api, appendix) +#define gcmHAL2D(api) _HAL2D_RENAME_1(api, _HAL2D_APPENDIX) + + +#define gckOS_Construct gcmHAL2D(gckOS_Construct) +#define gckOS_Destroy gcmHAL2D(gckOS_Destroy) +#define gckOS_QueryVideoMemory gcmHAL2D(gckOS_QueryVideoMemory) +#define gckOS_Allocate gcmHAL2D(gckOS_Allocate) +#define gckOS_Free gcmHAL2D(gckOS_Free) +#define gckOS_AllocateMemory gcmHAL2D(gckOS_AllocateMemory) +#define gckOS_FreeMemory gcmHAL2D(gckOS_FreeMemory) +#define gckOS_AllocatePagedMemory gcmHAL2D(gckOS_AllocatePagedMemory) +#define gckOS_AllocatePagedMemoryEx gcmHAL2D(gckOS_AllocatePagedMemoryEx) +#define gckOS_LockPages gcmHAL2D(gckOS_LockPages) +#define gckOS_MapPages gcmHAL2D(gckOS_MapPages) +#define gckOS_UnlockPages gcmHAL2D(gckOS_UnlockPages) +#define gckOS_FreePagedMemory gcmHAL2D(gckOS_FreePagedMemory) +#define gckOS_AllocateNonPagedMemory gcmHAL2D(gckOS_AllocateNonPagedMemory) +#define gckOS_FreeNonPagedMemory gcmHAL2D(gckOS_FreeNonPagedMemory) +#define gckOS_AllocateContiguous gcmHAL2D(gckOS_AllocateContiguous) +#define gckOS_FreeContiguous gcmHAL2D(gckOS_FreeContiguous) +#define gckOS_GetPageSize gcmHAL2D(gckOS_GetPageSize) +#define gckOS_GetPhysicalAddress gcmHAL2D(gckOS_GetPhysicalAddress) +#define gckOS_UserLogicalToPhysical gcmHAL2D(gckOS_UserLogicalToPhysical) +#define gckOS_GetPhysicalAddressProcess gcmHAL2D(gckOS_GetPhysicalAddressProcess) +#define gckOS_MapPhysical gcmHAL2D(gckOS_MapPhysical) +#define gckOS_UnmapPhysical gcmHAL2D(gckOS_UnmapPhysical) +#define gckOS_ReadRegister gcmHAL2D(gckOS_ReadRegister) +#define gckOS_WriteRegister gcmHAL2D(gckOS_WriteRegister) +#define gckOS_WriteMemory gcmHAL2D(gckOS_WriteMemory) +#define gckOS_MapMemory gcmHAL2D(gckOS_MapMemory) +#define gckOS_UnmapMemory gcmHAL2D(gckOS_UnmapMemory) +#define gckOS_UnmapMemoryEx gcmHAL2D(gckOS_UnmapMemoryEx) +#define gckOS_CreateMutex gcmHAL2D(gckOS_CreateMutex) +#define gckOS_DeleteMutex gcmHAL2D(gckOS_DeleteMutex) +#define gckOS_AcquireMutex gcmHAL2D(gckOS_AcquireMutex) +#define gckOS_ReleaseMutex gcmHAL2D(gckOS_ReleaseMutex) +#define gckOS_AtomicExchange gcmHAL2D(gckOS_AtomicExchange) +#define gckOS_AtomicExchangePtr gcmHAL2D(gckOS_AtomicExchangePtr) +#define gckOS_AtomConstruct gcmHAL2D(gckOS_AtomConstruct) +#define gckOS_AtomDestroy gcmHAL2D(gckOS_AtomDestroy) +#define gckOS_AtomGet gcmHAL2D(gckOS_AtomGet) +#define gckOS_AtomIncrement gcmHAL2D(gckOS_AtomIncrement) +#define gckOS_AtomDecrement gcmHAL2D(gckOS_AtomDecrement) +#define gckOS_Delay gcmHAL2D(gckOS_Delay) +#define gckOS_GetTime gcmHAL2D(gckOS_GetTime) +#define gckOS_MemoryBarrier gcmHAL2D(gckOS_MemoryBarrier) +#define gckOS_MapUserPointer gcmHAL2D(gckOS_MapUserPointer) +#define gckOS_UnmapUserPointer gcmHAL2D(gckOS_UnmapUserPointer) +#define gckOS_QueryNeedCopy gcmHAL2D(gckOS_QueryNeedCopy) +#define gckOS_CopyFromUserData gcmHAL2D(gckOS_CopyFromUserData) +#define gckOS_CopyToUserData gcmHAL2D(gckOS_CopyToUserData) +#define gckOS_SuspendInterrupt gcmHAL2D(gckOS_SuspendInterrupt) +#define gckOS_ResumeInterrupt gcmHAL2D(gckOS_ResumeInterrupt) +#define gckOS_GetBaseAddress gcmHAL2D(gckOS_GetBaseAddress) +#define gckOS_MemCopy gcmHAL2D(gckOS_MemCopy) +#define gckOS_ZeroMemory gcmHAL2D(gckOS_ZeroMemory) +#define gckOS_DeviceControl gcmHAL2D(gckOS_DeviceControl) +#define gckOS_GetProcessID gcmHAL2D(gckOS_GetProcessID) +#define gckOS_GetThreadID gcmHAL2D(gckOS_GetThreadID) +#define gckOS_CreateSignal gcmHAL2D(gckOS_CreateSignal) +#define gckOS_DestroySignal gcmHAL2D(gckOS_DestroySignal) +#define gckOS_Signal gcmHAL2D(gckOS_Signal) +#define gckOS_WaitSignal gcmHAL2D(gckOS_WaitSignal) +#define gckOS_MapSignal gcmHAL2D(gckOS_MapSignal) +#define gckOS_MapUserMemory gcmHAL2D(gckOS_MapUserMemory) +#define gckOS_UnmapUserMemory gcmHAL2D(gckOS_UnmapUserMemory) +#define gckOS_CreateUserSignal gcmHAL2D(gckOS_CreateUserSignal) +#define gckOS_DestroyUserSignal gcmHAL2D(gckOS_DestroyUserSignal) +#define gckOS_WaitUserSignal gcmHAL2D(gckOS_WaitUserSignal) +#define gckOS_SignalUserSignal gcmHAL2D(gckOS_SignalUserSignal) +#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal) +#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal) +#define gckOS_CacheClean gcmHAL2D(gckOS_CacheClean) +#define gckOS_CacheFlush gcmHAL2D(gckOS_CacheFlush) +#define gckOS_SetDebugLevel gcmHAL2D(gckOS_SetDebugLevel) +#define gckOS_SetDebugZone gcmHAL2D(gckOS_SetDebugZone) +#define gckOS_SetDebugLevelZone gcmHAL2D(gckOS_SetDebugLevelZone) +#define gckOS_SetDebugZones gcmHAL2D(gckOS_SetDebugZones) +#define gckOS_SetDebugFile gcmHAL2D(gckOS_SetDebugFile) +#define gckOS_Broadcast gcmHAL2D(gckOS_Broadcast) +#define gckOS_SetGPUPower gcmHAL2D(gckOS_SetGPUPower) +#define gckOS_CreateSemaphore gcmHAL2D(gckOS_CreateSemaphore) +#define gckOS_DestroySemaphore gcmHAL2D(gckOS_DestroySemaphore) +#define gckOS_AcquireSemaphore gcmHAL2D(gckOS_AcquireSemaphore) +#define gckOS_ReleaseSemaphore gcmHAL2D(gckOS_ReleaseSemaphore) +#define gckHEAP_Construct gcmHAL2D(gckHEAP_Construct) +#define gckHEAP_Destroy gcmHAL2D(gckHEAP_Destroy) +#define gckHEAP_Allocate gcmHAL2D(gckHEAP_Allocate) +#define gckHEAP_Free gcmHAL2D(gckHEAP_Free) +#define gckHEAP_ProfileStart gcmHAL2D(gckHEAP_ProfileStart) +#define gckHEAP_ProfileEnd gcmHAL2D(gckHEAP_ProfileEnd) +#define gckHEAP_Test gcmHAL2D(gckHEAP_Test) +#define gckVIDMEM_Construct gcmHAL2D(gckVIDMEM_Construct) +#define gckVIDMEM_Destroy gcmHAL2D(gckVIDMEM_Destroy) +#define gckVIDMEM_Allocate gcmHAL2D(gckVIDMEM_Allocate) +#define gckVIDMEM_AllocateLinear gcmHAL2D(gckVIDMEM_AllocateLinear) +#define gckVIDMEM_Free gcmHAL2D(gckVIDMEM_Free) +#define gckVIDMEM_Lock gcmHAL2D(gckVIDMEM_Lock) +#define gckVIDMEM_Unlock gcmHAL2D(gckVIDMEM_Unlock) +#define gckVIDMEM_ConstructVirtual gcmHAL2D(gckVIDMEM_ConstructVirtual) +#define gckVIDMEM_DestroyVirtual gcmHAL2D(gckVIDMEM_DestroyVirtual) +#define gckKERNEL_Construct gcmHAL2D(gckKERNEL_Construct) +#define gckKERNEL_Destroy gcmHAL2D(gckKERNEL_Destroy) +#define gckKERNEL_Dispatch gcmHAL2D(gckKERNEL_Dispatch) +#define gckKERNEL_QueryVideoMemory gcmHAL2D(gckKERNEL_QueryVideoMemory) +#define gckKERNEL_GetVideoMemoryPool gcmHAL2D(gckKERNEL_GetVideoMemoryPool) +#define gckKERNEL_MapVideoMemory gcmHAL2D(gckKERNEL_MapVideoMemory) +#define gckKERNEL_UnmapVideoMemory gcmHAL2D(gckKERNEL_UnmapVideoMemory) +#define gckKERNEL_MapMemory gcmHAL2D(gckKERNEL_MapMemory) +#define gckKERNEL_UnmapMemory gcmHAL2D(gckKERNEL_UnmapMemory) +#define gckKERNEL_Notify gcmHAL2D(gckKERNEL_Notify) +#define gckKERNEL_QuerySettings gcmHAL2D(gckKERNEL_QuerySettings) +#define gckKERNEL_Recovery gcmHAL2D(gckKERNEL_Recovery) +#define gckKERNEL_OpenUserData gcmHAL2D(gckKERNEL_OpenUserData) +#define gckKERNEL_CloseUserData gcmHAL2D(gckKERNEL_CloseUserData) +#define gckHARDWARE_Construct gcmHAL2D(gckHARDWARE_Construct) +#define gckHARDWARE_Destroy gcmHAL2D(gckHARDWARE_Destroy) +#define gckHARDWARE_QuerySystemMemory gcmHAL2D(gckHARDWARE_QuerySystemMemory) +#define gckHARDWARE_BuildVirtualAddress gcmHAL2D(gckHARDWARE_BuildVirtualAddress) +#define gckHARDWARE_QueryCommandBuffer gcmHAL2D(gckHARDWARE_QueryCommandBuffer) +#define gckHARDWARE_WaitLink gcmHAL2D(gckHARDWARE_WaitLink) +#define gckHARDWARE_Execute gcmHAL2D(gckHARDWARE_Execute) +#define gckHARDWARE_End gcmHAL2D(gckHARDWARE_End) +#define gckHARDWARE_Nop gcmHAL2D(gckHARDWARE_Nop) +#define gckHARDWARE_PipeSelect gcmHAL2D(gckHARDWARE_PipeSelect) +#define gckHARDWARE_Link gcmHAL2D(gckHARDWARE_Link) +#define gckHARDWARE_Event gcmHAL2D(gckHARDWARE_Event) +#define gckHARDWARE_QueryMemory gcmHAL2D(gckHARDWARE_QueryMemory) +#define gckHARDWARE_QueryChipIdentity gcmHAL2D(gckHARDWARE_QueryChipIdentity) +#define gckHARDWARE_QueryChipSpecs gcmHAL2D(gckHARDWARE_QueryChipSpecs) +#define gckHARDWARE_QueryShaderCaps gcmHAL2D(gckHARDWARE_QueryShaderCaps) +#define gckHARDWARE_ConvertFormat gcmHAL2D(gckHARDWARE_ConvertFormat) +#define gckHARDWARE_SplitMemory gcmHAL2D(gckHARDWARE_SplitMemory) +#define gckHARDWARE_AlignToTile gcmHAL2D(gckHARDWARE_AlignToTile) +#define gckHARDWARE_UpdateQueueTail gcmHAL2D(gckHARDWARE_UpdateQueueTail) +#define gckHARDWARE_ConvertLogical gcmHAL2D(gckHARDWARE_ConvertLogical) +#define gckHARDWARE_Interrupt gcmHAL2D(gckHARDWARE_Interrupt) +#define gckHARDWARE_SetMMU gcmHAL2D(gckHARDWARE_SetMMU) +#define gckHARDWARE_FlushMMU gcmHAL2D(gckHARDWARE_FlushMMU) +#define gckHARDWARE_GetIdle gcmHAL2D(gckHARDWARE_GetIdle) +#define gckHARDWARE_Flush gcmHAL2D(gckHARDWARE_Flush) +#define gckHARDWARE_SetFastClear gcmHAL2D(gckHARDWARE_SetFastClear) +#define gckHARDWARE_ReadInterrupt gcmHAL2D(gckHARDWARE_ReadInterrupt) +#define gckHARDWARE_SetPowerManagementState gcmHAL2D(gckHARDWARE_SetPowerManagementState) +#define gckHARDWARE_QueryPowerManagementState gcmHAL2D(gckHARDWARE_QueryPowerManagementState) +#define gckHARDWARE_ProfileEngine2D gcmHAL2D(gckHARDWARE_ProfileEngine2D) +#define gckHARDWARE_InitializeHardware gcmHAL2D(gckHARDWARE_InitializeHardware) +#define gckHARDWARE_Reset gcmHAL2D(gckHARDWARE_Reset) +#define gckINTERRUPT_Construct gcmHAL2D(gckINTERRUPT_Construct) +#define gckINTERRUPT_Destroy gcmHAL2D(gckINTERRUPT_Destroy) +#define gckINTERRUPT_SetHandler gcmHAL2D(gckINTERRUPT_SetHandler) +#define gckINTERRUPT_Notify gcmHAL2D(gckINTERRUPT_Notify) +#define gckEVENT_Construct gcmHAL2D(gckEVENT_Construct) +#define gckEVENT_Destroy gcmHAL2D(gckEVENT_Destroy) +#define gckEVENT_AddList gcmHAL2D(gckEVENT_AddList) +#define gckEVENT_FreeNonPagedMemory gcmHAL2D(gckEVENT_FreeNonPagedMemory) +#define gckEVENT_FreeContiguousMemory gcmHAL2D(gckEVENT_FreeContiguousMemory) +#define gckEVENT_FreeVideoMemory gcmHAL2D(gckEVENT_FreeVideoMemory) +#define gckEVENT_Signal gcmHAL2D(gckEVENT_Signal) +#define gckEVENT_Unlock gcmHAL2D(gckEVENT_Unlock) +#define gckEVENT_Submit gcmHAL2D(gckEVENT_Submit) +#define gckEVENT_Commit gcmHAL2D(gckEVENT_Commit) +#define gckEVENT_Notify gcmHAL2D(gckEVENT_Notify) +#define gckEVENT_Interrupt gcmHAL2D(gckEVENT_Interrupt) +#define gckCOMMAND_Construct gcmHAL2D(gckCOMMAND_Construct) +#define gckCOMMAND_Destroy gcmHAL2D(gckCOMMAND_Destroy) +#define gckCOMMAND_EnterCommit gcmHAL2D(gckCOMMAND_EnterCommit) +#define gckCOMMAND_ExitCommit gcmHAL2D(gckCOMMAND_ExitCommit) +#define gckCOMMAND_Start gcmHAL2D(gckCOMMAND_Start) +#define gckCOMMAND_Stop gcmHAL2D(gckCOMMAND_Stop) +#define gckCOMMAND_Commit gcmHAL2D(gckCOMMAND_Commit) +#define gckCOMMAND_Reserve gcmHAL2D(gckCOMMAND_Reserve) +#define gckCOMMAND_Execute gcmHAL2D(gckCOMMAND_Execute) +#define gckCOMMAND_Stall gcmHAL2D(gckCOMMAND_Stall) +#define gckCOMMAND_Attach gcmHAL2D(gckCOMMAND_Attach) +#define gckCOMMAND_Detach gcmHAL2D(gckCOMMAND_Detach) +#define gckMMU_Construct gcmHAL2D(gckMMU_Construct) +#define gckMMU_Destroy gcmHAL2D(gckMMU_Destroy) +#define gckMMU_AllocatePages gcmHAL2D(gckMMU_AllocatePages) +#define gckMMU_FreePages gcmHAL2D(gckMMU_FreePages) +#define gckMMU_Test gcmHAL2D(gckMMU_Test) +#define gckHARDWARE_QueryProfileRegisters gcmHAL2D(gckHARDWARE_QueryProfileRegisters) + + +#define FindMdlMap gcmHAL2D(FindMdlMap) +#define OnProcessExit gcmHAL2D(OnProcessExit) + +#define gckGALDEVICE_Destroy gcmHAL2D(gckGALDEVICE_Destroy) +#define gckOS_Print gcmHAL2D(gckOS_Print) +#define gckGALDEVICE_FreeMemory gcmHAL2D(gckGALDEVICE_FreeMemory) +#define gckGALDEVICE_AllocateMemory gcmHAL2D(gckGALDEVICE_AllocateMemory) +#define gckOS_DebugBreak gcmHAL2D(gckOS_DebugBreak) +#define gckGALDEVICE_Release_ISR gcmHAL2D(gckGALDEVICE_Release_ISR) +#define gckOS_Verify gcmHAL2D(gckOS_Verify) +#define gckCOMMAND_Release gcmHAL2D(gckCOMMAND_Release) +#define gckGALDEVICE_Stop gcmHAL2D(gckGALDEVICE_Stop) +#define gckGALDEVICE_Construct gcmHAL2D(gckGALDEVICE_Construct) +#define gckOS_DebugFatal gcmHAL2D(gckOS_DebugFatal) +#define gckOS_DebugTrace gcmHAL2D(gckOS_DebugTrace) +#define gckHARDWARE_GetBaseAddress gcmHAL2D(gckHARDWARE_GetBaseAddress) +#define gckGALDEVICE_Setup_ISR gcmHAL2D(gckGALDEVICE_Setup_ISR) +#define gckKERNEL_AttachProcess gcmHAL2D(gckKERNEL_AttachProcess) +#define gckKERNEL_AttachProcessEx gcmHAL2D(gckKERNEL_AttachProcessEx) +#define gckGALDEVICE_Start_Thread gcmHAL2D(gckGALDEVICE_Start_Thread) +#define gckHARDWARE_QueryIdle gcmHAL2D(gckHARDWARE_QueryIdle) +#define gckGALDEVICE_Start gcmHAL2D(gckGALDEVICE_Start) +#define gckOS_GetKernelLogical gcmHAL2D(gckOS_GetKernelLogical) +#define gckOS_DebugTraceZone gcmHAL2D(gckOS_DebugTraceZone) +#define gckGALDEVICE_Stop_Thread gcmHAL2D(gckGALDEVICE_Stop_Thread) +#define gckHARDWARE_NeedBaseAddress gcmHAL2D(gckHARDWARE_NeedBaseAddress) + +#endif + +#endif /* __gc_hal_rename_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_resource.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_resource.h new file mode 100644 index 000000000000..01642229d869 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_resource.h @@ -0,0 +1,69 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_resource_h_ +#define __gc_hal_resource_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_resource_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h new file mode 100644 index 000000000000..346dac955c9e --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h @@ -0,0 +1,186 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef _GC_HAL_SECURITY_INTERFACE_H_ +#define _GC_HAL_SECURITY_INTERFACE_H_ +/*! + @brief Command codes between kernel module and TrustZone + @discussion + Critical services must be done in TrustZone to avoid sensitive content leak. Most of kernel module is kept in non-Secure os to minimize + code in TrustZone. + */ +typedef enum kernel_packet_command { + KERNEL_START_COMMAND, + KERNEL_SUBMIT, + KERNEL_MAP_MEMORY, /* */ + KERNEL_UNMAP_MEMORY, + KERNEL_ALLOCATE_SECRUE_MEMORY, /*! Security memory management. */ + KERNEL_FREE_SECURE_MEMORY, + KERNEL_EXECUTE, /* Execute a command buffer. */ + KERNEL_DUMP_MMU_EXCEPTION, + KERNEL_HANDLE_MMU_EXCEPTION, + KERNEL_READ_MMU_EXCEPTION, +} kernel_packet_command_t; + +struct kernel_start_command { + kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */ + gctUINT8 gpu; /*! Which GPU. */ + gctUINT32 address; + gctUINT32 bytes; +}; + +/*! + @brief gckCOMMAND Object requests TrustZone to submit command buffer. + @discussion + Code in trustzone will check content of command buffer after copying command buffer to TrustZone. + */ +struct kernel_submit { + kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */ + gctUINT8 gpu; /*! Which GPU. */ + gctUINT8 kernel_command; /*! Whether it is a kernel command. */ + gctUINT32 command_buffer_handle; /*! Handle to command buffer. */ + gctUINT32 offset; /* Offset in command buffer. */ + gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */ + gctUINT32 command_buffer_length; /*! Length of command buffer. */ +}; + + +/*! + @brief gckVIDMEM Object requests TrustZone to allocate security memory. + @discussion + Allocate a buffer from security GPU memory. + */ +struct kernel_allocate_security_memory { + kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */ + gctUINT32 bytes; /*! Requested bytes. */ + gctUINT32 memory_handle; /*! Handle of allocated memory. */ +}; + +/*! + @brief gckVIDMEM Object requests TrustZone to allocate security memory. + @discussion + Free a video memory buffer from security GPU memory. + */ +struct kernel_free_security_memory { + kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */ + gctUINT32 memory_handle; /*! Handle of allocated memory. */ +}; + +struct kernel_execute { + kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */ + gctUINT8 gpu; /*! Which GPU. */ + gctUINT8 kernel_command; /*! Whether it is a kernel command. */ + gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */ + gctUINT32 command_buffer_length; /*! Length of command buffer. */ +}; + +typedef struct kernel_map_scatter_gather { + gctUINT32 bytes; + gctUINT32 physical; + struct kernel_map_scatter_gather *next; +} +kernel_map_scatter_gather_t; + +struct kernel_map_memory { + kernel_packet_command_t command; + kernel_map_scatter_gather_t *scatter; + gctUINT32 *physicals; + gctPHYS_ADDR_T physical; /*! Contiguous physical address range. */ + gctUINT32 pageCount; + gctUINT32 gpuAddress; +}; + +struct kernel_unmap_memory { + gctUINT32 gpuAddress; + gctUINT32 pageCount; +}; + +struct kernel_read_mmu_exception { + gctUINT32 mmuStatus; + gctUINT32 mmuException; +}; + +struct kernel_handle_mmu_exception { + gctUINT32 mmuStatus; + gctPHYS_ADDR_T physical; + gctUINT32 gpuAddress; +}; + +typedef struct _gcsTA_INTERFACE { + kernel_packet_command_t command; + union { + struct kernel_submit Submit; + struct kernel_start_command StartCommand; + struct kernel_allocate_security_memory AllocateSecurityMemory; + struct kernel_execute Execute; + struct kernel_map_memory MapMemory; + struct kernel_unmap_memory UnmapMemory; + struct kernel_read_mmu_exception ReadMMUException; + struct kernel_handle_mmu_exception HandleMMUException; + } u; + gceSTATUS result; +} gcsTA_INTERFACE; + +enum { + gcvTA_COMMAND_INIT, + gcvTA_COMMAND_DISPATCH, + + gcvTA_CALLBACK_ALLOC_SECURE_MEM, + gcvTA_CALLBACK_FREE_SECURE_MEM, +}; + +#endif + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_statistics.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_statistics.h new file mode 100644 index 000000000000..94a1d3e674b3 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_statistics.h @@ -0,0 +1,135 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_statistics_h_ +#define __gc_hal_statistics_h_ + + +#define VIV_STAT_ENABLE_STATISTICS 0 + +/* Toal number of frames for which the frame time is accounted. We have storage + to keep frame times for last this many frames. +*/ +#define VIV_STAT_FRAME_BUFFER_SIZE 30 + + +/* + Total number of frames sampled for a mode. This means + + # of frames for HZ Current : VIV_STAT_EARLY_Z_SAMPLE_FRAMES + # of frames for HZ Switched : VIV_STAT_EARLY_Z_SAMPLE_FRAMES + + + -------------------------------------------------------- + : (2 * VIV_STAT_EARLY_Z_SAMPLE_FRAMES) frames needed + + IMPORTANT: This total must be smaller than VIV_STAT_FRAME_BUFFER_SIZE +*/ +#define VIV_STAT_EARLY_Z_SAMPLE_FRAMES 7 +#define VIV_STAT_EARLY_Z_LATENCY_FRAMES 2 + +/* Multiplication factor for previous Hz off mode. Make it more than 1.0 to advertise HZ on.*/ +#define VIV_STAT_EARLY_Z_FACTOR (1.05f) + +/* Defines the statistical data keys monitored by the statistics module */ +typedef enum _gceSTATISTICS +{ + gcvFRAME_FPS = 1, +} +gceSTATISTICS; + +/* HAL statistics information. */ +typedef struct _gcsSTATISTICS_EARLYZ +{ + gctUINT switchBackCount; + gctUINT nextCheckPoint; + gctBOOL disabled; +} +gcsSTATISTICS_EARLYZ; + + +/* HAL statistics information. */ +typedef struct _gcsSTATISTICS +{ + gctUINT64 frameTime[VIV_STAT_FRAME_BUFFER_SIZE]; + gctUINT64 previousFrameTime; + gctUINT frame; + gcsSTATISTICS_EARLYZ earlyZ; +} +gcsSTATISTICS; + + +/* Add a frame based data into current statistics. */ +void +gcfSTATISTICS_AddData( + IN gceSTATISTICS Key, + IN gctUINT Value + ); + +/* Marks the frame end and triggers statistical calculations and decisions.*/ +void +gcfSTATISTICS_MarkFrameEnd ( + void + ); + +/* Sets whether the dynmaic HZ is disabled or not .*/ +void +gcfSTATISTICS_DisableDynamicEarlyZ ( + IN gctBOOL Disabled + ); + +#endif /*__gc_hal_statistics_h_ */ + + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_types.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_types.h new file mode 100644 index 000000000000..0027400c95ec --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_types.h @@ -0,0 +1,1035 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_types_h_ +#define __gc_hal_types_h_ + +#include "gc_hal_version.h" +#include "gc_hal_options.h" + +#if !defined(VIV_KMD) +#if defined(__KERNEL__) +#include "linux/version.h" +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + typedef unsigned long uintptr_t; +# endif +# include "linux/types.h" +#elif defined(UNDER_CE) +#include +#elif defined(_MSC_VER) && (_MSC_VER <= 1500) +#include +#include "vadefs.h" +#elif defined(__QNXNTO__) +#define _QNX_SOURCE +#include +#include +#include +#else +#include +#include +#include +#endif +#endif + +#ifdef _WIN32 +#pragma warning(disable:4127) /* Conditional expression is constant (do { } while(0)). */ +#pragma warning(disable:4100) /* Unreferenced formal parameter. */ +#pragma warning(disable:4204) /* Non-constant aggregate initializer (C99). */ +#pragma warning(disable:4131) /* Uses old-style declarator. */ +#pragma warning(disable:4206) /* Translation unit is empty. */ +#pragma warning(disable:4214) /* Nonstandard extension used : + ** bit field types other than int. */ +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +** Platform macros. +*/ + +#if defined(__GNUC__) +# define gcdHAS_ELLIPSIS 1 /* GCC always has it. */ +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define gcdHAS_ELLIPSIS 1 /* C99 has it. */ +#elif defined(_MSC_VER) && (_MSC_VER >= 1500) +# define gcdHAS_ELLIPSIS 1 /* MSVC 2007+ has it. */ +#elif defined(UNDER_CE) +#if UNDER_CE >= 600 +# define gcdHAS_ELLIPSIS 1 +# else +# define gcdHAS_ELLIPSIS 0 +# endif +#else +# error "gcdHAS_ELLIPSIS: Platform could not be determined" +#endif + +/******************************************************************************\ +************************************ Keyword *********************************** +\******************************************************************************/ + +#if defined(ANDROID) && defined(__BIONIC_FORTIFY) +#if defined(__clang__) +# define gcmINLINE __inline__ __attribute__ ((always_inline)) __attribute__ ((gnu_inline)) +# else +# define gcmINLINE __inline__ __attribute__ ((always_inline)) __attribute__ ((gnu_inline)) __attribute__ ((artificial)) +# endif +#elif ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || defined(__APPLE__)) +# define gcmINLINE inline /* C99 keyword. */ +#elif defined(__GNUC__) +# define gcmINLINE __inline__ /* GNU keyword. */ +#elif defined(_MSC_VER) || defined(UNDER_CE) +# define gcmINLINE __inline /* Internal keyword. */ +#else +# error "gcmINLINE: Platform could not be determined" +#endif + +/* Possible debug flags. */ +#define gcdDEBUG_NONE 0 +#define gcdDEBUG_ALL (1 << 0) +#define gcdDEBUG_FATAL (1 << 1) +#define gcdDEBUG_TRACE (1 << 2) +#define gcdDEBUG_BREAK (1 << 3) +#define gcdDEBUG_ASSERT (1 << 4) +#define gcdDEBUG_CODE (1 << 5) +#define gcdDEBUG_STACK (1 << 6) + +#define gcmIS_DEBUG(flag) (gcdDEBUG & (flag | gcdDEBUG_ALL) ) + +#ifndef gcdDEBUG +#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG) +# define gcdDEBUG gcdDEBUG_ALL +# else +# define gcdDEBUG gcdDEBUG_NONE +# endif +#endif + +#ifdef _USRDLL +#ifdef _MSC_VER +#ifdef HAL_EXPORTS +# define HALAPI __declspec(dllexport) +# else +# define HALAPI __declspec(dllimport) +# endif +# define HALDECL __cdecl +# else +#ifdef HAL_EXPORTS +# define HALAPI +# else +# define HALAPI extern +# endif +# endif +#else +# define HALAPI +# define HALDECL +#endif + +/******************************************************************************\ +********************************** Common Types ******************************** +\******************************************************************************/ + +#define gcvFALSE 0 +#define gcvTRUE 1 + +#define gcvINFINITE ((gctUINT32) ~0U) + +#define gcvINVALID_HANDLE ((gctHANDLE) ~0U) + +typedef int gctBOOL; +typedef gctBOOL * gctBOOL_PTR; + +typedef int gctINT; +typedef signed char gctINT8; +typedef signed short gctINT16; +typedef signed int gctINT32; +typedef signed long long gctINT64; + +typedef gctINT * gctINT_PTR; +typedef gctINT8 * gctINT8_PTR; +typedef gctINT16 * gctINT16_PTR; +typedef gctINT32 * gctINT32_PTR; +typedef gctINT64 * gctINT64_PTR; + +typedef unsigned int gctUINT; +typedef unsigned char gctUINT8; +typedef unsigned short gctUINT16; +typedef unsigned int gctUINT32; +typedef unsigned long long gctUINT64; +typedef uintptr_t gctUINTPTR_T; +typedef ptrdiff_t gctPTRDIFF_T; + +typedef gctUINT * gctUINT_PTR; +typedef gctUINT8 * gctUINT8_PTR; +typedef gctUINT16 * gctUINT16_PTR; +typedef gctUINT32 * gctUINT32_PTR; +typedef gctUINT64 * gctUINT64_PTR; + +typedef size_t gctSIZE_T; +typedef gctSIZE_T * gctSIZE_T_PTR; +typedef gctUINT32 gctTRACE; + +#ifdef __cplusplus +# define gcvNULL 0 +#else +# define gcvNULL ((void *) 0) +#endif + +#define gcvMAXINT8 0x7f +#define gcvMININT8 0x80 +#define gcvMAXINT16 0x7fff +#define gcvMININT16 0x8000 +#define gcvMAXINT32 0x7fffffff +#define gcvMININT32 0x80000000 +#define gcvMAXINT64 0x7fffffffffffffff +#define gcvMININT64 0x8000000000000000 +#define gcvMAXUINT8 0xff +#define gcvMINUINT8 0x0 +#define gcvMAXUINT16 0xffff +#define gcvMINUINT16 0x0 +#define gcvMAXUINT32 0xffffffff +#define gcvMINUINT32 0x0 +#define gcvMAXUINT64 0xffffffffffffffff +#define gcvMINUINT64 0x0 +#define gcvMAXUINTPTR_T (~(gctUINTPTR_T)0) + +typedef float gctFLOAT; +typedef signed int gctFIXED_POINT; +typedef float * gctFLOAT_PTR; + +typedef void * gctPHYS_ADDR; +typedef void * gctHANDLE; +typedef void * gctFILE; +typedef void * gctSIGNAL; +typedef void * gctWINDOW; +typedef void * gctIMAGE; +typedef void * gctSHBUF; + +typedef void * gctSEMAPHORE; + +typedef void * gctPOINTER; +typedef const void * gctCONST_POINTER; + +typedef char gctCHAR; +typedef char * gctSTRING; +typedef const char * gctCONST_STRING; + +typedef gctUINT64 gctPHYS_ADDR_T; + +typedef struct _gcsCOUNT_STRING +{ + gctSIZE_T Length; + gctCONST_STRING String; +} +gcsCOUNT_STRING; + +typedef union _gcuFLOAT_UINT32 +{ + gctFLOAT f; + gctUINT32 u; +} +gcuFLOAT_UINT32; + +/* Fixed point constants. */ +#define gcvZERO_X ((gctFIXED_POINT) 0x00000000) +#define gcvHALF_X ((gctFIXED_POINT) 0x00008000) +#define gcvONE_X ((gctFIXED_POINT) 0x00010000) +#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000) +#define gcvTWO_X ((gctFIXED_POINT) 0x00020000) + + + +#define gcmFIXEDCLAMP_NEG1_TO_1(_x) \ + (((_x) < gcvNEGONE_X) \ + ? gcvNEGONE_X \ + : (((_x) > gcvONE_X) \ + ? gcvONE_X \ + : (_x))) + +#define gcmFLOATCLAMP_NEG1_TO_1(_f) \ + (((_f) < -1.0f) \ + ? -1.0f \ + : (((_f) > 1.0f) \ + ? 1.0f \ + : (_f))) + + +#define gcmFIXEDCLAMP_0_TO_1(_x) \ + (((_x) < 0) \ + ? 0 \ + : (((_x) > gcvONE_X) \ + ? gcvONE_X \ + : (_x))) + +#define gcmFLOATCLAMP_0_TO_1(_f) \ + (((_f) < 0.0f) \ + ? 0.0f \ + : (((_f) > 1.0f) \ + ? 1.0f \ + : (_f))) + + +/******************************************************************************\ +******************************* Multicast Values ******************************* +\******************************************************************************/ + +/* Value types. */ +typedef enum _gceVALUE_TYPE +{ + gcvVALUE_UINT = 0x0, + gcvVALUE_FIXED, + gcvVALUE_FLOAT, + gcvVALUE_INT, + + /* + ** The value need be unsigned denormalized. clamp (0.0-1.0) should be done first. + */ + gcvVALUE_FLAG_UNSIGNED_DENORM = 0x00010000, + + /* + ** The value need be signed denormalized. clamp (-1.0-1.0) should be done first. + */ + gcvVALUE_FLAG_SIGNED_DENORM = 0x00020000, + + /* + ** The value need to gammar + */ + gcvVALUE_FLAG_GAMMAR = 0x00040000, + + /* + ** The value need to convert from float to float16 + */ + gcvVALUE_FLAG_FLOAT_TO_FLOAT16 = 0x0080000, + + /* + ** Mask for flag field. + */ + gcvVALUE_FLAG_MASK = 0xFFFF0000, +} +gceVALUE_TYPE; + +/* Value unions. */ +typedef union _gcuVALUE +{ + gctUINT uintValue; + gctFIXED_POINT fixedValue; + gctFLOAT floatValue; + gctINT intValue; +} +gcuVALUE; + + + + +/* Stringizing macro. */ +#define gcmSTRING(Value) #Value + +/******************************************************************************\ +******************************* Fixed Point Math ******************************* +\******************************************************************************/ + +#define gcmXMultiply(x1, x2) gcoMATH_MultiplyFixed(x1, x2) +#define gcmXDivide(x1, x2) gcoMATH_DivideFixed(x1, x2) +#define gcmXMultiplyDivide(x1, x2, x3) gcoMATH_MultiplyDivideFixed(x1, x2, x3) + +/* 2D Engine profile. */ +typedef struct _gcs2D_PROFILE +{ + /* Cycle count. + 32bit counter incremented every 2D clock cycle. + Wraps back to 0 when the counter overflows. + */ + gctUINT32 cycleCount; + + /* Pixels rendered by the 2D engine. + Resets to 0 every time it is read. */ + gctUINT32 pixelsRendered; +} +gcs2D_PROFILE; + +/* Macro to combine four characters into a Charcater Code. */ +#define gcmCC(c1, c2, c3, c4) \ +(\ + (char) (c1) \ + | \ + ((char) (c2) << 8) \ + | \ + ((char) (c3) << 16) \ + | \ + ((char) (c4) << 24) \ +) + +#define gcmPRINTABLE(c) ((((c) >= ' ') && ((c) <= '}')) ? ((c) != '%' ? (c) : ' ') : ' ') + +#define gcmCC_PRINT(cc) \ + gcmPRINTABLE((char) ((cc) & 0xFF)), \ + gcmPRINTABLE((char) (((cc) >> 8) & 0xFF)), \ + gcmPRINTABLE((char) (((cc) >> 16) & 0xFF)), \ + gcmPRINTABLE((char) (((cc) >> 24) & 0xFF)) + +/******************************************************************************\ +****************************** Function Parameters ***************************** +\******************************************************************************/ + +#define IN +#define OUT +#define INOUT +#define OPTIONAL + +/******************************************************************************\ +********************************* Status Codes ********************************* +\******************************************************************************/ + +typedef enum _gceSTATUS +{ + gcvSTATUS_OK = 0, + gcvSTATUS_FALSE = 0, + gcvSTATUS_TRUE = 1, + gcvSTATUS_NO_MORE_DATA = 2, + gcvSTATUS_CACHED = 3, + gcvSTATUS_MIPMAP_TOO_LARGE = 4, + gcvSTATUS_NAME_NOT_FOUND = 5, + gcvSTATUS_NOT_OUR_INTERRUPT = 6, + gcvSTATUS_MISMATCH = 7, + gcvSTATUS_MIPMAP_TOO_SMALL = 8, + gcvSTATUS_LARGER = 9, + gcvSTATUS_SMALLER = 10, + gcvSTATUS_CHIP_NOT_READY = 11, + gcvSTATUS_NEED_CONVERSION = 12, + gcvSTATUS_SKIP = 13, + gcvSTATUS_DATA_TOO_LARGE = 14, + gcvSTATUS_INVALID_CONFIG = 15, + gcvSTATUS_CHANGED = 16, + gcvSTATUS_NOT_SUPPORT_DITHER = 17, + gcvSTATUS_EXECUTED = 18, + gcvSTATUS_TERMINATE = 19, + + gcvSTATUS_INVALID_ARGUMENT = -1, + gcvSTATUS_INVALID_OBJECT = -2, + gcvSTATUS_OUT_OF_MEMORY = -3, + gcvSTATUS_MEMORY_LOCKED = -4, + gcvSTATUS_MEMORY_UNLOCKED = -5, + gcvSTATUS_HEAP_CORRUPTED = -6, + gcvSTATUS_GENERIC_IO = -7, + gcvSTATUS_INVALID_ADDRESS = -8, + gcvSTATUS_CONTEXT_LOSSED = -9, + gcvSTATUS_TOO_COMPLEX = -10, + gcvSTATUS_BUFFER_TOO_SMALL = -11, + gcvSTATUS_INTERFACE_ERROR = -12, + gcvSTATUS_NOT_SUPPORTED = -13, + gcvSTATUS_MORE_DATA = -14, + gcvSTATUS_TIMEOUT = -15, + gcvSTATUS_OUT_OF_RESOURCES = -16, + gcvSTATUS_INVALID_DATA = -17, + gcvSTATUS_INVALID_MIPMAP = -18, + gcvSTATUS_NOT_FOUND = -19, + gcvSTATUS_NOT_ALIGNED = -20, + gcvSTATUS_INVALID_REQUEST = -21, + gcvSTATUS_GPU_NOT_RESPONDING = -22, + gcvSTATUS_TIMER_OVERFLOW = -23, + gcvSTATUS_VERSION_MISMATCH = -24, + gcvSTATUS_LOCKED = -25, + gcvSTATUS_INTERRUPTED = -26, + gcvSTATUS_DEVICE = -27, + gcvSTATUS_NOT_MULTI_PIPE_ALIGNED = -28, + gcvSTATUS_OUT_OF_SAMPLER = -29, + + /* Linker errors. */ + gcvSTATUS_GLOBAL_TYPE_MISMATCH = -1000, + gcvSTATUS_TOO_MANY_ATTRIBUTES = -1001, + gcvSTATUS_TOO_MANY_UNIFORMS = -1002, + gcvSTATUS_TOO_MANY_VARYINGS = -1003, + gcvSTATUS_UNDECLARED_VARYING = -1004, + gcvSTATUS_VARYING_TYPE_MISMATCH = -1005, + gcvSTATUS_MISSING_MAIN = -1006, + gcvSTATUS_NAME_MISMATCH = -1007, + gcvSTATUS_INVALID_INDEX = -1008, + gcvSTATUS_UNIFORM_MISMATCH = -1009, + gcvSTATUS_UNSAT_LIB_SYMBOL = -1010, + gcvSTATUS_TOO_MANY_SHADERS = -1011, + gcvSTATUS_LINK_INVALID_SHADERS = -1012, + gcvSTATUS_CS_NO_WORKGROUP_SIZE = -1013, + gcvSTATUS_LINK_LIB_ERROR = -1014, + + gcvSTATUS_SHADER_VERSION_MISMATCH = -1015, + gcvSTATUS_TOO_MANY_INSTRUCTION = -1016, + gcvSTATUS_SSBO_MISMATCH = -1017, + gcvSTATUS_TOO_MANY_OUTPUT = -1018, + gcvSTATUS_TOO_MANY_INPUT = -1019, + gcvSTATUS_NOT_SUPPORT_CL = -1020, + gcvSTATUS_NOT_SUPPORT_INTEGER = -1021, + gcvSTATUS_UNIFORM_TYPE_MISMATCH = -1022, + + gcvSTATUS_MISSING_PRIMITIVE_TYPE = -1023, + gcvSTATUS_MISSING_OUTPUT_VERTEX_COUNT = -1024, + gcvSTATUS_NON_INVOCATION_ID_AS_INDEX = -1025, + gcvSTATUS_INPUT_ARRAY_SIZE_MISMATCH = -1026, + gcvSTATUS_OUTPUT_ARRAY_SIZE_MISMATCH = -1027, + gcvSTATUS_LOCATION_ALIASED = -1028, + + /* Compiler errors. */ + gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR = -2000, + gcvSTATUS_COMPILER_FE_PARSER_ERROR = -2001, + + /* Recompilation Errors */ + gcvSTATUS_RECOMPILER_CONVERT_UNIMPLEMENTED = -3000, +} +gceSTATUS; + +/******************************************************************************\ +********************************* Status Macros ******************************** +\******************************************************************************/ + +#define gcmIS_ERROR(status) (status < 0) +#define gcmNO_ERROR(status) (status >= 0) +#define gcmIS_SUCCESS(status) (status == gcvSTATUS_OK) + +/******************************************************************************\ +********************************* Field Macros ********************************* +\******************************************************************************/ + +#define __gcmSTART(reg_field) \ + (0 ? reg_field) + +#define __gcmEND(reg_field) \ + (1 ? reg_field) + +#define __gcmGETSIZE(reg_field) \ + (__gcmEND(reg_field) - __gcmSTART(reg_field) + 1) + +#define __gcmALIGN(data, reg_field) \ + (((gctUINT32) (data)) << __gcmSTART(reg_field)) + +#define __gcmMASK(reg_field) \ + ((gctUINT32) ((__gcmGETSIZE(reg_field) == 32) \ + ? ~0U \ + : (~(~0U << __gcmGETSIZE(reg_field))))) + +/******************************************************************************* +** +** gcmFIELDMASK +** +** Get aligned field mask. +** +** ARGUMENTS: +** +** reg Name of register. +** field Name of field within register. +*/ +#define gcmFIELDMASK(reg, field) \ +(\ + __gcmALIGN(__gcmMASK(reg##_##field), reg##_##field) \ +) + +/******************************************************************************* +** +** gcmGETFIELD +** +** Extract the value of a field from specified data. +** +** ARGUMENTS: +** +** data Data value. +** reg Name of register. +** field Name of field within register. +*/ +#define gcmGETFIELD(data, reg, field) \ +(\ + ((((gctUINT32) (data)) >> __gcmSTART(reg##_##field)) \ + & __gcmMASK(reg##_##field)) \ +) + +/******************************************************************************* +** +** gcmSETFIELD +** +** Set the value of a field within specified data. +** +** ARGUMENTS: +** +** data Data value. +** reg Name of register. +** field Name of field within register. +** value Value for field. +*/ +#define gcmSETFIELD(data, reg, field, value) \ +(\ + (((gctUINT32) (data)) \ + & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \ + | __gcmALIGN((gctUINT32) (value) \ + & __gcmMASK(reg##_##field), reg##_##field) \ +) + +/******************************************************************************* +** +** gcmSETFIELDVALUE +** +** Set the value of a field within specified data with a +** predefined value. +** +** ARGUMENTS: +** +** data Data value. +** reg Name of register. +** field Name of field within register. +** value Name of the value within the field. +*/ +#define gcmSETFIELDVALUE(data, reg, field, value) \ +(\ + (((gctUINT32) (data)) \ + & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \ + | __gcmALIGN(reg##_##field##_##value \ + & __gcmMASK(reg##_##field), reg##_##field) \ +) + +/******************************************************************************* +** +** gcmGETMASKEDFIELDMASK +** +** Determine field mask of a masked field. +** +** ARGUMENTS: +** +** reg Name of register. +** field Name of field within register. +*/ +#define gcmGETMASKEDFIELDMASK(reg, field) \ +(\ + gcmSETFIELD(0, reg, field, ~0U) | \ + gcmSETFIELD(0, reg, MASK_ ## field, ~0U) \ +) + +/******************************************************************************* +** +** gcmSETMASKEDFIELD +** +** Set the value of a masked field with specified data. +** +** ARGUMENTS: +** +** reg Name of register. +** field Name of field within register. +** value Value for field. +*/ +#define gcmSETMASKEDFIELD(reg, field, value) \ +(\ + gcmSETFIELD (~0U, reg, field, value) & \ + gcmSETFIELDVALUE(~0U, reg, MASK_ ## field, ENABLED) \ +) + +/******************************************************************************* +** +** gcmSETMASKEDFIELDVALUE +** +** Set the value of a masked field with specified data. +** +** ARGUMENTS: +** +** reg Name of register. +** field Name of field within register. +** value Value for field. +*/ +#define gcmSETMASKEDFIELDVALUE(reg, field, value) \ +(\ + gcmSETFIELDVALUE(~0U, reg, field, value) & \ + gcmSETFIELDVALUE(~0U, reg, MASK_ ## field, ENABLED) \ +) + +/******************************************************************************* +** +** gcmVERIFYFIELDVALUE +** +** Verify if the value of a field within specified data equals a +** predefined value. +** +** ARGUMENTS: +** +** data Data value. +** reg Name of register. +** field Name of field within register. +** value Name of the value within the field. +*/ +#define gcmVERIFYFIELDVALUE(data, reg, field, value) \ +(\ + (((gctUINT32) (data)) >> __gcmSTART(reg##_##field) & \ + __gcmMASK(reg##_##field)) \ + == \ + (reg##_##field##_##value & __gcmMASK(reg##_##field)) \ +) + +/******************************************************************************* +** Bit field macros. +*/ + +#define __gcmSTARTBIT(Field) \ + (1 ? Field ) + +#define __gcmBITSIZE(Field) \ + (0 ? Field ) + +#define __gcmBITMASK(Field) \ +(\ + (1 << __gcmBITSIZE(Field)) - 1 \ +) + +#define gcmGETBITS(Value, Type, Field) \ +(\ + (((Type) (Value)) >> __gcmSTARTBIT(Field) ) \ + & \ + __gcmBITMASK(Field) \ +) + +#define gcmSETBITS(Value, Type, Field, NewValue) \ +(\ + (((Type) (Value)) \ + & ~(__gcmBITMASK(Field) << __gcmSTARTBIT(Field)) \ + ) \ + | \ + ((((Type) (NewValue)) \ + & __gcmBITMASK(Field) \ + ) << __gcmSTARTBIT(Field) \ + ) \ +) + +/******************************************************************************* +** +** gcmISINREGRANGE +** +** Verify whether the specified address is in the register range. +** +** ARGUMENTS: +** +** Address Address to be verified. +** Name Name of a register. +*/ + +#define gcmISINREGRANGE(Address, Name) \ +(\ + ((Address & (~0U << Name ## _LSB)) == (Name ## _Address >> 2)) \ +) + +/******************************************************************************\ +******************************** Ceiling Macro ******************************** +\******************************************************************************/ +#define gcmCEIL(x) (((x) - (gctUINT32)(x)) == 0 ? (gctUINT32)(x) : (gctUINT32)(x) + 1) + +/******************************************************************************\ +******************************** Min/Max Macros ******************************** +\******************************************************************************/ + +#define gcmMIN(x, y) (((x) <= (y)) ? (x) : (y)) +#define gcmMAX(x, y) (((x) >= (y)) ? (x) : (y)) +#define gcmCLAMP(x, min, max) (((x) < (min)) ? (min) : \ + ((x) > (max)) ? (max) : (x)) +#define gcmABS(x) (((x) < 0) ? -(x) : (x)) +#define gcmNEG(x) (((x) < 0) ? (x) : -(x)) + +/******************************************************************************\ +******************************** Bit Macro ******************************** +\******************************************************************************/ +#define gcmBITSET(x, y) ((x) & (y)) +/******************************************************************************* +** +** gcmPTR2INT +** +** Convert a pointer to an integer value. +** +** ARGUMENTS: +** +** p Pointer value. +*/ +#define gcmPTR2INT(p) \ +(\ + (gctUINTPTR_T) (p) \ +) + +#define gcmPTR2INT32(p) \ +(\ + (gctUINT32)(gctUINTPTR_T) (p) \ +) + +/******************************************************************************* +** +** gcmINT2PTR +** +** Convert an integer value into a pointer. +** +** ARGUMENTS: +** +** v Integer value. +*/ + +#define gcmINT2PTR(i) \ +(\ + (gctPOINTER) (gctUINTPTR_T)(i) \ +) + +/******************************************************************************* +** +** gcmOFFSETOF +** +** Compute the byte offset of a field inside a structure. +** +** ARGUMENTS: +** +** s Structure name. +** field Field name. +*/ +#define gcmOFFSETOF(s, field) \ +(\ + gcmPTR2INT32(& (((struct s *) 0)->field)) \ +) + +/******************************************************************************* +** +** gcmCONTAINEROF +** +** Get containing structure of a member. +** +** ARGUMENTS: +** +** Pointer Pointer of member. +** Type Structure name. +** Name Field name. +*/ +#define gcmCONTAINEROF(Pointer, Type, Member) \ +(\ + (struct Type *)((gctUINTPTR_T)Pointer - gcmOFFSETOF(Type, Member)) \ +) + +/******************************************************************************* +** +** gcmBSWAP32 +** +** Return a value with all bytes in the 32 bit argument swapped. +*/ +#if !defined(__KERNEL__) && defined(__GNUC__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ >= 40300) +# define gcmBSWAP32(x) __builtin_bswap32(x) +#else +# define gcmBSWAP32(x) ((gctUINT32)(\ + (((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \ + (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \ + (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \ + (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24))) +#endif + +/******************************************************************************* +***** Database ****************************************************************/ + +typedef struct _gcsDATABASE_COUNTERS +{ + /* Number of currently allocated bytes. */ + gctUINT64 bytes; + + /* Maximum number of bytes allocated (memory footprint). */ + gctUINT64 maxBytes; + + /* Total number of bytes allocated. */ + gctUINT64 totalBytes; + + /* The numbers of times video memory was allocated. */ + gctUINT32 allocCount; + + /* The numbers of times video memory was freed. */ + gctUINT32 freeCount; +} +gcsDATABASE_COUNTERS; + +typedef struct _gcuDATABASE_INFO +{ + /* Counters. */ + gcsDATABASE_COUNTERS counters; + + /* Time value. */ + gctUINT64 time; +} +gcuDATABASE_INFO; + +/******************************************************************************* +***** Frame database **********************************************************/ + +/* gcsHAL_FRAME_INFO */ +typedef struct _gcsHAL_FRAME_INFO +{ + /* Current timer tick. */ + OUT gctUINT64 ticks; + + /* Bandwidth counters. */ + OUT gctUINT readBytes8[8]; + OUT gctUINT writeBytes8[8]; + + /* Counters. */ + OUT gctUINT cycles[8]; + OUT gctUINT idleCycles[8]; + OUT gctUINT mcCycles[8]; + OUT gctUINT readRequests[8]; + OUT gctUINT writeRequests[8]; + + /* 3D counters. */ + OUT gctUINT vertexCount; + OUT gctUINT primitiveCount; + OUT gctUINT rejectedPrimitives; + OUT gctUINT culledPrimitives; + OUT gctUINT clippedPrimitives; + OUT gctUINT outPrimitives; + OUT gctUINT inPrimitives; + OUT gctUINT culledQuadCount; + OUT gctUINT totalQuadCount; + OUT gctUINT quadCount; + OUT gctUINT totalPixelCount; + + /* PE counters. */ + OUT gctUINT colorKilled[8]; + OUT gctUINT colorDrawn[8]; + OUT gctUINT depthKilled[8]; + OUT gctUINT depthDrawn[8]; + + /* Shader counters. */ + OUT gctUINT shaderCycles; + OUT gctUINT vsInstructionCount; + OUT gctUINT vsTextureCount; + OUT gctUINT psInstructionCount; + OUT gctUINT psTextureCount; + + /* Texture counters. */ + OUT gctUINT bilinearRequests; + OUT gctUINT trilinearRequests; + OUT gctUINT txBytes8; + OUT gctUINT txHitCount; + OUT gctUINT txMissCount; +} +gcsHAL_FRAME_INFO; + +typedef struct _gckLINKDATA * gckLINKDATA; +struct _gckLINKDATA +{ + gctUINT32 start; + gctUINT32 end; + gctUINT32 pid; + gctUINT32 linkLow; + gctUINT32 linkHigh; +}; + +typedef struct _gckADDRESSDATA * gckADDRESSDATA; +struct _gckADDRESSDATA +{ + gctUINT32 start; + gctUINT32 end; +}; + +typedef union _gcuQUEUEDATA +{ + struct _gckLINKDATA linkData; + + struct _gckADDRESSDATA addressData; +} +gcuQUEUEDATA; + +typedef struct _gckQUEUE * gckQUEUE; +struct _gckQUEUE +{ + gcuQUEUEDATA * datas; + gctUINT32 rear; + gctUINT32 front; + gctUINT32 count; + gctUINT32 size; +}; + +typedef enum _gceTRACEMODE +{ + gcvTRACEMODE_NONE = 0, + gcvTRACEMODE_FULL = 1, + gcvTRACEMODE_LOGGER = 2, + gcvTRACEMODE_PRE = 3, + gcvTRACEMODE_POST = 4, +} gceTRACEMODE; + +typedef struct _gcsLISTHEAD * gcsLISTHEAD_PTR; +typedef struct _gcsLISTHEAD +{ + gcsLISTHEAD_PTR prev; + gcsLISTHEAD_PTR next; +} +gcsLISTHEAD; + +/* + gcvFEATURE_DATABASE_DATE_MASK + + Mask used to control which bits of chip date will be used to + query feature database, ignore release date for fpga and emulator. +*/ +#if (gcdFPGA_BUILD || defined(EMULATOR)) +# define gcvFEATURE_DATABASE_DATE_MASK (0U) +#else +# define gcvFEATURE_DATABASE_DATE_MASK (~0U) +#endif + +#if defined(__GNUC__) +#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#define gcdENDIAN_BIG 1 +#else +#define gcdENDIAN_BIG 0 +#endif +#else +#define gcdENDIAN_BIG 0 +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_types_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_version.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_version.h new file mode 100644 index 000000000000..bea70f3db4d8 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_version.h @@ -0,0 +1,71 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_version_h_ +#define __gc_hal_version_h_ + +#define gcvVERSION_MAJOR 6 + +#define gcvVERSION_MINOR 2 + +#define gcvVERSION_PATCH 4 + +#define gcvVERSION_BUILD 174315 + +#define gcvVERSION_STRING "6.2.4.p3.174315" + +#endif /* __gc_hal_version_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_vg.h b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_vg.h new file mode 100644 index 000000000000..fe6daf94c552 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/kernel/inc/gc_hal_vg.h @@ -0,0 +1,76 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_vg_h_ +#define __gc_hal_vg_h_ + +#ifdef __cplusplus +extern "C" { +#endif + + +#include "gc_hal_rename.h" +#include "gc_hal_types.h" +#include "gc_hal_enum.h" +#include "gc_hal_base.h" + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __gc_hal_h_ */ + + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h new file mode 100644 index 000000000000..8abb7e52f5ce --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h @@ -0,0 +1,119 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_allocator_array_h_ +#define __gc_hal_kernel_allocator_array_h_ + +extern gceSTATUS +_GFPAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ); + +extern gceSTATUS +_UserMemoryAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ); + +extern gceSTATUS +_ReservedMemoryAllocatorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ); + +#ifdef CONFIG_DMA_SHARED_BUFFER +extern gceSTATUS +_DmabufAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ); +#endif + +#ifndef NO_DMA_COHERENT +extern gceSTATUS +_DmaAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ); +#endif + +/* Default allocator entry. */ +gcsALLOCATOR_DESC allocatorArray[] = +{ + /* GFP allocator. */ + gcmkDEFINE_ALLOCATOR_DESC("gfp", _GFPAlloctorInit), + + /* User memory importer. */ + gcmkDEFINE_ALLOCATOR_DESC("user", _UserMemoryAlloctorInit), + +#ifdef CONFIG_DMA_SHARED_BUFFER + /* Dmabuf allocator. */ + gcmkDEFINE_ALLOCATOR_DESC("dmabuf", _DmabufAlloctorInit), +#endif + +#ifndef NO_DMA_COHERENT + gcmkDEFINE_ALLOCATOR_DESC("dma", _DmaAlloctorInit), +#endif + + gcmkDEFINE_ALLOCATOR_DESC("reserved-mem", _ReservedMemoryAllocatorInit), +}; + +#endif diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dma.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dma.c new file mode 100644 index 000000000000..fe55f451d14c --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dma.c @@ -0,0 +1,610 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_allocator.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define _GC_OBJ_ZONE gcvZONE_OS + +typedef struct _gcsDMA_PRIV * gcsDMA_PRIV_PTR; +typedef struct _gcsDMA_PRIV { + atomic_t usage; +} +gcsDMA_PRIV; + +struct mdl_dma_priv { + gctPOINTER kvaddr; + dma_addr_t dmaHandle; +}; + +/* +* Debugfs support. +*/ +static int gc_dma_usage_show(struct seq_file* m, void* data) +{ + gcsINFO_NODE *node = m->private; + gckALLOCATOR Allocator = node->device; + gcsDMA_PRIV_PTR priv = Allocator->privateData; + long long usage = (long long)atomic_read(&priv->usage); + + seq_printf(m, "type n pages bytes\n"); + seq_printf(m, "normal %10llu %12llu\n", usage, usage * PAGE_SIZE); + + return 0; +} + +static gcsINFO InfoList[] = +{ + {"dmausage", gc_dma_usage_show}, +}; + +static void +_DebugfsInit( + IN gckALLOCATOR Allocator, + IN gckDEBUGFS_DIR Root + ) +{ + gcmkVERIFY_OK( + gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "dma")); + + gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles( + &Allocator->debugfsDir, + InfoList, + gcmCOUNTOF(InfoList), + Allocator + )); +} + +static void +_DebugfsCleanup( + IN gckALLOCATOR Allocator + ) +{ + gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles( + &Allocator->debugfsDir, + InfoList, + gcmCOUNTOF(InfoList) + )); + + gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir); +} + +static gceSTATUS +_DmaAlloc( + IN gckALLOCATOR Allocator, + INOUT PLINUX_MDL Mdl, + IN gctSIZE_T NumPages, + IN gctUINT32 Flags + ) +{ + gceSTATUS status; + u32 gfp = GFP_KERNEL | gcdNOWARN; + gcsDMA_PRIV_PTR allocatorPriv = (gcsDMA_PRIV_PTR)Allocator->privateData; + + struct mdl_dma_priv *mdlPriv=gcvNULL; + gckOS os = Allocator->os; + + gcmkHEADER_ARG("Mdl=%p NumPages=0x%zx Flags=0x%x", Mdl, NumPages, Flags); + + gcmkONERROR(gckOS_Allocate(os, sizeof(struct mdl_dma_priv), (gctPOINTER *)&mdlPriv)); + mdlPriv->kvaddr = gcvNULL; + +#if defined(CONFIG_ZONE_DMA32) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + if (Flags & gcvALLOC_FLAG_4GB_ADDR) + { + gfp |= __GFP_DMA32; + } +#endif +#ifdef CONFIG_MCST + if (Flags & gcvALLOC_FLAG_4GB_ADDR) + { + gfp |= __GFP_DMA; + } +#endif + + mdlPriv->kvaddr +#if defined CONFIG_MIPS || defined CONFIG_CPU_CSKYV2 || defined CONFIG_PPC || defined CONFIG_ARM64 + = dma_alloc_coherent(galcore_device, NumPages * PAGE_SIZE, &mdlPriv->dmaHandle, gfp); +#else + = dma_alloc_wc(galcore_device, NumPages * PAGE_SIZE, &mdlPriv->dmaHandle, gfp); +#endif + +#ifdef CONFLICT_BETWEEN_BASE_AND_PHYS + if ((os->device->baseAddress & 0x80000000) != (mdlPriv->dmaHandle & 0x80000000)) + { + mdlPriv->dmaHandle = (mdlPriv->dmaHandle & ~0x80000000) + | (os->device->baseAddress & 0x80000000); + } +#endif + + if (mdlPriv->kvaddr == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + Mdl->priv = mdlPriv; + + Mdl->dmaHandle = mdlPriv->dmaHandle; + + /* Statistic. */ + atomic_add(NumPages, &allocatorPriv->usage); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (mdlPriv) + { + gckOS_Free(os, mdlPriv); + } + + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_DmaGetSGT( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + OUT gctPOINTER *SGT + ) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) + struct page ** pages = gcvNULL; + struct page * page = gcvNULL; + struct sg_table *sgt = NULL; + struct mdl_dma_priv *mdlPriv = (struct mdl_dma_priv*)Mdl->priv; + + gceSTATUS status = gcvSTATUS_OK; + gctSIZE_T offset = Offset & ~PAGE_MASK; /* Offset to the first page */ + gctINT skipPages = Offset >> PAGE_SHIFT; /* skipped pages */ + gctINT numPages = (PAGE_ALIGN(Offset + Bytes) >> PAGE_SHIFT) - skipPages; + gctINT i; + + gcmkASSERT(Offset + Bytes <= Mdl->numPages << PAGE_SHIFT); + + sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL | gcdNOWARN); + if (!sgt) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + pages = kmalloc(sizeof(struct page*) * numPages, GFP_KERNEL | gcdNOWARN); + if (!pages) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + +#if !defined(phys_to_page) || defined(CONFIG_E90S) + page = virt_to_page(mdlPriv->kvaddr); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) + page = phys_to_page(mdlPriv->dmaHandle); +#else + page = phys_to_page(dma_to_phys(&Allocator->os->device->platform->device->dev, mdlPriv->dmaHandle)); +#endif + + for (i = 0; i < numPages; ++i) + { + pages[i] = nth_page(page, i + skipPages); + } + + if (sg_alloc_table_from_pages(sgt, pages, numPages, offset, Bytes, GFP_KERNEL) < 0) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + *SGT = (gctPOINTER)sgt; + +OnError: + if (pages) + { + kfree(pages); + } + + if (gcmIS_ERROR(status) && sgt) + { + kfree(sgt); + } + + return status; +#else + return gcvSTATUS_NOT_SUPPORTED; +#endif +} + +static void +_DmaFree( + IN gckALLOCATOR Allocator, + IN OUT PLINUX_MDL Mdl + ) +{ + gckOS os = Allocator->os; + struct mdl_dma_priv *mdlPriv=(struct mdl_dma_priv *)Mdl->priv; + gcsDMA_PRIV_PTR allocatorPriv = (gcsDMA_PRIV_PTR)Allocator->privateData; + +#if defined CONFIG_MIPS || defined CONFIG_CPU_CSKYV2 || defined CONFIG_PPC || defined CONFIG_ARM64 + dma_free_coherent(galcore_device, Mdl->numPages * PAGE_SIZE, mdlPriv->kvaddr, mdlPriv->dmaHandle); +#else + dma_free_wc(galcore_device, Mdl->numPages * PAGE_SIZE, mdlPriv->kvaddr, mdlPriv->dmaHandle); +#endif + + gckOS_Free(os, mdlPriv); + + /* Statistic. */ + atomic_sub(Mdl->numPages, &allocatorPriv->usage); +} + +static gceSTATUS +_DmaMmap( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctBOOL Cacheable, + IN gctSIZE_T skipPages, + IN gctSIZE_T numPages, + IN struct vm_area_struct *vma + ) +{ + struct mdl_dma_priv *mdlPriv = (struct mdl_dma_priv*)Mdl->priv; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Allocator=%p Mdl=%p vma=%p", Allocator, Mdl, vma); + + gcmkASSERT(skipPages + numPages <= Mdl->numPages); + + /* map kernel memory to user space.. */ +#if defined CONFIG_MIPS || defined CONFIG_CPU_CSKYV2 || defined CONFIG_PPC + if (remap_pfn_range( + vma, + vma->vm_start, + (mdlPriv->dmaHandle >> PAGE_SHIFT) + skipPages, + numPages << PAGE_SHIFT, + pgprot_writecombine(vma->vm_page_prot)) < 0) +#else + /* map kernel memory to user space.. */ + if (dma_mmap_wc(gcvNULL, + vma, + (gctINT8_PTR)mdlPriv->kvaddr + (skipPages << PAGE_SHIFT), + mdlPriv->dmaHandle + (skipPages << PAGE_SHIFT), + numPages << PAGE_SHIFT) < 0) +#endif + { + gcmkTRACE_ZONE( + gcvLEVEL_WARNING, gcvZONE_OS, + "%s(%d): dma_mmap_attrs error", + __FUNCTION__, __LINE__ + ); + + status = gcvSTATUS_OUT_OF_MEMORY; + } + + gcmkFOOTER(); + return status; +} + +static void +_DmaUnmapUser( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctUINT32 Size + ) +{ + if (unlikely(current->mm == gcvNULL)) + { + /* Do nothing if process is exiting. */ + return; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) + if (vm_munmap((unsigned long)MdlMap->vmaAddr, Size) < 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_WARNING, gcvZONE_OS, + "%s(%d): vm_munmap failed", + __FUNCTION__, __LINE__ + ); + } +#else + down_write(¤t->mm->mmap_sem); + if (do_munmap(current->mm, (unsigned long)MdlMap->vmaAddr, Size) < 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_WARNING, gcvZONE_OS, + "%s(%d): do_munmap failed", + __FUNCTION__, __LINE__ + ); + } + up_write(¤t->mm->mmap_sem); +#endif +} + +static gceSTATUS +_DmaMapUser( + gckALLOCATOR Allocator, + PLINUX_MDL Mdl, + PLINUX_MDL_MAP MdlMap, + gctBOOL Cacheable + ) +{ + gctPOINTER userLogical = gcvNULL; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Allocator=%p Mdl=%p Cacheable=%d", Allocator, Mdl, Cacheable); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) + userLogical = (gctPOINTER)vm_mmap(gcvNULL, + 0L, + Mdl->numPages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_NORESERVE, + 0); +#else + down_write(¤t->mm->mmap_sem); + userLogical = (gctPOINTER)do_mmap_pgoff(gcvNULL, + 0L, + Mdl->numPages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED, + 0); + up_write(¤t->mm->mmap_sem); +#endif + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): vmaAddr->%p for phys_addr->%p", + __FUNCTION__, __LINE__, userLogical, Mdl + ); + + if (IS_ERR(userLogical)) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): do_mmap_pgoff error", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + down_write(¤t->mm->mmap_sem); + do + { + struct vm_area_struct *vma = find_vma(current->mm, (unsigned long)userLogical); + if (vma == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): find_vma error", + __FUNCTION__, __LINE__ + ); + + gcmkERR_BREAK(gcvSTATUS_OUT_OF_RESOURCES); + } + + gcmkERR_BREAK(_DmaMmap(Allocator, Mdl, Cacheable, 0, Mdl->numPages, vma)); + + MdlMap->vmaAddr = userLogical; + MdlMap->cacheable = gcvFALSE; + MdlMap->vma = vma; + } + while (gcvFALSE); + up_write(¤t->mm->mmap_sem); + +OnError: + if (gcmIS_ERROR(status) && userLogical) + { + _DmaUnmapUser(Allocator, Mdl, userLogical, Mdl->numPages * PAGE_SIZE); + } + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_DmaMapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + OUT gctPOINTER *Logical + ) +{ + struct mdl_dma_priv *mdlPriv=(struct mdl_dma_priv *)Mdl->priv; + *Logical =mdlPriv->kvaddr; + return gcvSTATUS_OK; +} + +static gceSTATUS +_DmaUnmapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctPOINTER Logical + ) +{ + return gcvSTATUS_OK; +} + +static gceSTATUS +_DmaCache( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Bytes, + IN gceCACHEOPERATION Operation + ) +{ + switch (Operation) + { + case gcvCACHE_CLEAN: + case gcvCACHE_FLUSH: + _MemoryBarrier(); + break; + case gcvCACHE_INVALIDATE: + break; + default: + return gcvSTATUS_INVALID_ARGUMENT; + } + + return gcvSTATUS_OK; +} + +static gceSTATUS +_DmaPhysical( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * Physical + ) +{ + struct mdl_dma_priv *mdlPriv=(struct mdl_dma_priv *)Mdl->priv; + + *Physical = mdlPriv->dmaHandle + Offset; + + return gcvSTATUS_OK; +} + +static void +_DmaAllocatorDestructor( + gcsALLOCATOR *Allocator + ) +{ + _DebugfsCleanup(Allocator); + + if (Allocator->privateData) + { + kfree(Allocator->privateData); + } + + kfree(Allocator); +} + +/* Default allocator operations. */ +gcsALLOCATOR_OPERATIONS DmaAllocatorOperations = { + .Alloc = _DmaAlloc, + .Free = _DmaFree, + .Mmap = _DmaMmap, + .MapUser = _DmaMapUser, + .UnmapUser = _DmaUnmapUser, + .MapKernel = _DmaMapKernel, + .UnmapKernel = _DmaUnmapKernel, + .Cache = _DmaCache, + .Physical = _DmaPhysical, + .GetSGT = _DmaGetSGT, +}; + +/* Default allocator entry. */ +gceSTATUS +_DmaAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ) +{ + gceSTATUS status; + gckALLOCATOR allocator = gcvNULL; + gcsDMA_PRIV_PTR priv = gcvNULL; + + gcmkONERROR(gckALLOCATOR_Construct(Os, &DmaAllocatorOperations, &allocator)); + + priv = kzalloc(gcmSIZEOF(gcsDMA_PRIV), GFP_KERNEL | gcdNOWARN); + + if (!priv) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + atomic_set(&priv->usage, 0); + + /* Register private data. */ + allocator->privateData = priv; + allocator->destructor = _DmaAllocatorDestructor; + + _DebugfsInit(allocator, Parent); + + /* + * DMA allocator is only used for NonPaged memory + * when NO_DMA_COHERENT is not defined. + */ + allocator->capability = gcvALLOC_FLAG_CONTIGUOUS + | gcvALLOC_FLAG_DMABUF_EXPORTABLE +#if defined(CONFIG_ZONE_DMA32) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + | gcvALLOC_FLAG_4GB_ADDR +#endif +#ifdef CONFIG_MCST + | gcvALLOC_FLAG_4GB_ADDR +#endif + ; + + *Allocator = allocator; + + return gcvSTATUS_OK; + +OnError: + if (allocator) + { + kfree(allocator); + } + return status; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dmabuf.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dmabuf.c new file mode 100644 index 000000000000..c666ab0891e1 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_dmabuf.c @@ -0,0 +1,544 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_allocator.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define _GC_OBJ_ZONE gcvZONE_OS + +/* Descriptor of a dma_buf imported. */ +typedef struct _gcsDMABUF +{ + struct dma_buf * dmabuf; + struct dma_buf_attachment * attachment; + struct sg_table * sgt; + unsigned long * pagearray; + + int npages; + int pid; + struct list_head list; +} +gcsDMABUF; + +struct allocator_priv +{ + struct mutex lock; + struct list_head buf_list; +}; + +/* +* Debugfs support. +*/ +static int dma_buf_info_show(struct seq_file* m, void* data) +{ + int ret; + gcsDMABUF *buf_desc; + struct dma_buf_attachment *attach_obj; + int count = 0; + size_t size = 0; + int npages = 0; + const char *exp_name; + + gcsINFO_NODE *node = m->private; + gckALLOCATOR allocator = node->device; + struct allocator_priv *priv = allocator->privateData; + + ret = mutex_lock_interruptible(&priv->lock); + + if (ret) + return ret; + + seq_puts(m, "Attached dma-buf objects:\n"); + seq_puts(m, " pid fd pages size exporter attached-devices\n"); + + list_for_each_entry(buf_desc, &priv->buf_list, list) { + struct dma_buf *buf_obj = buf_desc->dmabuf; + + ret = mutex_lock_interruptible(&buf_obj->lock); + + if (ret) { + seq_puts(m, + "ERROR locking buffer object: skipping\n"); + continue; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) + exp_name = buf_obj->exp_name; +#else + exp_name = "unknown"; +#endif + + seq_printf(m, "%6d %p %8d %8zu %10s", + buf_desc->pid, + buf_desc->dmabuf, + buf_desc->npages, + buf_obj->size, + exp_name); + + list_for_each_entry(attach_obj, &buf_obj->attachments, node) { + seq_printf(m, " %s", dev_name(attach_obj->dev)); + } + seq_puts(m, "\n"); + + count++; + size += buf_obj->size; + npages += buf_desc->npages; + + mutex_unlock(&buf_obj->lock); + } + + seq_printf(m, "\nTotal %d objects, %d pages, %zu bytes\n", count, npages, size); + + mutex_unlock(&priv->lock); + return 0; +} + +static gcsINFO _InfoList[] = +{ + {"bufinfo", dma_buf_info_show}, +}; + +static void +_DebugfsInit( + IN gckALLOCATOR Allocator, + IN gckDEBUGFS_DIR Root + ) +{ + gcmkVERIFY_OK( + gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "dma_buf")); + + gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles( + &Allocator->debugfsDir, + _InfoList, + gcmCOUNTOF(_InfoList), + Allocator + )); +} + +static void +_DebugfsCleanup( + IN gckALLOCATOR Allocator + ) +{ + gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles( + &Allocator->debugfsDir, + _InfoList, + gcmCOUNTOF(_InfoList) + )); + + gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir); +} + +static gceSTATUS +_DmabufAttach( + IN gckALLOCATOR Allocator, + IN gcsATTACH_DESC_PTR Desc, + IN PLINUX_MDL Mdl + ) +{ + gceSTATUS status; + + gckOS os = Allocator->os; + + struct dma_buf *dmabuf = Desc->dmaBuf.dmabuf; + struct sg_table *sgt = NULL; + struct dma_buf_attachment *attachment = NULL; + int npages = 0; + unsigned long *pagearray = NULL; + int i, j, k = 0; + struct scatterlist *s; + struct allocator_priv *priv = Allocator->privateData; + gcsDMABUF *buf_desc = NULL; + + gcmkHEADER(); + + gcmkVERIFY_OBJECT(os, gcvOBJ_OS); + + if (!dmabuf) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + get_dma_buf(dmabuf); + attachment = dma_buf_attach(dmabuf, &os->device->platform->device->dev); + + if (!attachment) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); + + if (!sgt) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + /* Prepare page array. */ + /* Get number of pages. */ + for_each_sg(sgt->sgl, s, sgt->orig_nents, i) + { + npages += (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE; + } + + /* Allocate page array. */ + gcmkONERROR(gckOS_Allocate(os, npages * gcmSIZEOF(*pagearray), (gctPOINTER *)&pagearray)); + + /* Fill page array. */ + for_each_sg(sgt->sgl, s, sgt->orig_nents, i) + { + for (j = 0; j < (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE; j++) + { + pagearray[k++] = sg_dma_address(s) + j * PAGE_SIZE; + } + } + + /* Prepare descriptor. */ + gcmkONERROR(gckOS_Allocate(os, sizeof(gcsDMABUF), (gctPOINTER *)&buf_desc)); + + buf_desc->dmabuf = dmabuf; + buf_desc->pagearray = pagearray; + buf_desc->attachment = attachment; + buf_desc->sgt = sgt; + + /* Record in buffer list to support debugfs. */ + buf_desc->npages = npages; + buf_desc->pid = _GetProcessID(); + + mutex_lock(&priv->lock); + list_add(&buf_desc->list, &priv->buf_list); + mutex_unlock(&priv->lock); + + /* Record page number. */ + Mdl->numPages = npages; + + Mdl->priv = buf_desc; + + Mdl->contiguous = (sgt->nents == 1) ? gcvTRUE : gcvFALSE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (pagearray) + { + gcmkOS_SAFE_FREE(os, pagearray); + } + + if (sgt) + { + dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL); + } + + gcmkFOOTER(); + return status; +} + + +static void +_DmabufFree( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl + ) +{ + gcsDMABUF *buf_desc = Mdl->priv; + gckOS os = Allocator->os; + struct allocator_priv *priv = Allocator->privateData; + + mutex_lock(&priv->lock); + list_del(&buf_desc->list); + mutex_unlock(&priv->lock); + + dma_buf_unmap_attachment(buf_desc->attachment, buf_desc->sgt, DMA_BIDIRECTIONAL); + + dma_buf_detach(buf_desc->dmabuf, buf_desc->attachment); + + dma_buf_put(buf_desc->dmabuf); + + gckOS_Free(os, buf_desc->pagearray); + + gckOS_Free(os, buf_desc); +} + +static void +_DmabufUnmapUser( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctUINT32 Size + ) +{ + gcsDMABUF *buf_desc = Mdl->priv; + gctINT8_PTR userLogical = MdlMap->vmaAddr; + + if (unlikely(current->mm == gcvNULL)) + { + /* Do nothing if process is exiting. */ + return; + } + + userLogical -= buf_desc->sgt->sgl->offset; + vm_munmap((unsigned long)userLogical, Mdl->numPages << PAGE_SHIFT); +} + +static gceSTATUS +_DmabufMapUser( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctBOOL Cacheable + ) +{ + gcsDMABUF *buf_desc = Mdl->priv; + gctINT8_PTR userLogical = gcvNULL; + gceSTATUS status = gcvSTATUS_OK; + + userLogical = (gctINT8_PTR)vm_mmap(buf_desc->dmabuf->file, + 0L, + Mdl->numPages << PAGE_SHIFT, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_NORESERVE, + 0); + + if (IS_ERR(userLogical)) + { + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + userLogical += buf_desc->sgt->sgl->offset; + + /* To make sure the mapping is created. */ + if (access_ok(userLogical, 4)) + { + uint32_t mem; + get_user(mem, (uint32_t *)userLogical); + + (void)mem; + } + + MdlMap->vmaAddr = (gctPOINTER)userLogical; + MdlMap->cacheable = Cacheable; + +OnError: + if (gcmIS_ERROR(status) && userLogical) + { + _DmabufUnmapUser(Allocator, Mdl, MdlMap, Mdl->numPages << PAGE_SHIFT); + } + return status; +} + +static gceSTATUS +_DmabufMapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + OUT gctPOINTER *Logical + ) +{ + /* Kernel doesn't acess video memory. */ + return gcvSTATUS_NOT_SUPPORTED; + +} + +static gceSTATUS +_DmabufUnmapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctPOINTER Logical + ) +{ + /* Kernel doesn't acess video memory. */ + return gcvSTATUS_NOT_SUPPORTED; +} + +static gceSTATUS +_DmabufCache( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Bytes, + IN gceCACHEOPERATION Operation + ) +{ + gcsDMABUF *buf_desc = Mdl->priv; + struct sg_table *sgt = buf_desc->sgt; + enum dma_data_direction dir; + + switch (Operation) + { + case gcvCACHE_CLEAN: + dir = DMA_TO_DEVICE; + dma_sync_sg_for_device(galcore_device, sgt->sgl, sgt->nents, dir); + break; + case gcvCACHE_FLUSH: + dir = DMA_BIDIRECTIONAL; + dma_sync_sg_for_device(galcore_device, sgt->sgl, sgt->nents, dir); + break; + case gcvCACHE_INVALIDATE: + dir = DMA_FROM_DEVICE; + dma_sync_sg_for_cpu(galcore_device, sgt->sgl, sgt->nents, dir); + break; + default: + return gcvSTATUS_INVALID_ARGUMENT; + } + + return gcvSTATUS_OK; +} + + +static gceSTATUS +_DmabufPhysical( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * Physical + ) +{ + gcsDMABUF *buf_desc = Mdl->priv; + gctUINT32 offsetInPage = Offset & ~PAGE_MASK; + gctUINT32 index = Offset / PAGE_SIZE; + + *Physical = buf_desc->pagearray[index] + offsetInPage; + + + return gcvSTATUS_OK; +} + +/* Default allocator operations. */ +static gcsALLOCATOR_OPERATIONS DmabufAllocatorOperations = +{ + .Attach = _DmabufAttach, + .Free = _DmabufFree, + .MapUser = _DmabufMapUser, + .UnmapUser = _DmabufUnmapUser, + .MapKernel = _DmabufMapKernel, + .UnmapKernel = _DmabufUnmapKernel, + .Cache = _DmabufCache, + .Physical = _DmabufPhysical, +}; + +static void +_DmabufAllocatorDestructor( + gcsALLOCATOR *Allocator + ) +{ + _DebugfsCleanup(Allocator); + + if (Allocator->privateData) + { + kfree(Allocator->privateData); + } + + kfree(Allocator); +} + +/* Default allocator entry. */ +gceSTATUS +_DmabufAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ) +{ + gceSTATUS status; + gckALLOCATOR allocator; + struct allocator_priv *priv = NULL; + + priv = kmalloc(sizeof (struct allocator_priv), GFP_KERNEL | gcdNOWARN); + + if (!priv) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + mutex_init(&priv->lock); + INIT_LIST_HEAD(&priv->buf_list); + + gcmkONERROR( + gckALLOCATOR_Construct(Os, &DmabufAllocatorOperations, &allocator)); + + allocator->capability = gcvALLOC_FLAG_DMABUF + | gcvALLOC_FLAG_DMABUF_EXPORTABLE + ; + + /* Register private data. */ + allocator->privateData = priv; + allocator->destructor = _DmabufAllocatorDestructor; + + _DebugfsInit(allocator, Parent); + + *Allocator = allocator; + + return gcvSTATUS_OK; + +OnError: + if (priv) + { + kfree(priv); + } + + return status; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_gfp.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_gfp.c new file mode 100644 index 000000000000..b9b34d884ab5 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_gfp.c @@ -0,0 +1,1149 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_allocator.h" +#include +#include +#include +#include +#include +#include + +#include "gc_hal_kernel_platform.h" + +#define _GC_OBJ_ZONE gcvZONE_OS + +#define gcdDISCRETE_PAGES 0 + +struct gfp_alloc +{ + atomic_t low; + atomic_t high; +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION (2,6,24) +struct sg_table +{ + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; +#endif + +struct gfp_mdl_priv +{ + int contiguous; + + union + { + /* Pointer to a array of pages. */ + struct + { + struct page *contiguousPages; + dma_addr_t dma_addr; + int exact; + }; + + struct + { + /* Pointer to a array of pointers to page. */ + struct page **nonContiguousPages; + struct sg_table sgt; + }; + }; + + gcsPLATFORM * platform; +}; + +/******************************************************************************\ +************************** GFP Allocator Debugfs *************************** +\******************************************************************************/ + +static int gc_usage_show(struct seq_file* m, void* data) +{ + gcsINFO_NODE *node = m->private; + gckALLOCATOR Allocator = node->device; + struct gfp_alloc *priv = Allocator->privateData; + long long low = (long long)atomic_read(&priv->low); + long long high = (long long)atomic_read(&priv->high); + + seq_printf(m, "type n pages bytes\n"); + seq_printf(m, "normal %10llu %12llu\n", low, low * PAGE_SIZE); + seq_printf(m, "HighMem %10llu %12llu\n", high, high * PAGE_SIZE); + + return 0; +} + +static gcsINFO InfoList[] = +{ + {"usage", gc_usage_show}, +}; + +static void +_GFPAllocatorDebugfsInit( + IN gckALLOCATOR Allocator, + IN gckDEBUGFS_DIR Root + ) +{ + gcmkVERIFY_OK( + gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "gfp")); + + gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles( + &Allocator->debugfsDir, + InfoList, + gcmCOUNTOF(InfoList), + Allocator + )); +} + +static void +_GFPAllocatorDebugfsCleanup( + IN gckALLOCATOR Allocator + ) +{ + gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles( + &Allocator->debugfsDir, + InfoList, + gcmCOUNTOF(InfoList) + )); + + gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir); +} + +static void +_NonContiguousFree( + IN struct page ** Pages, + IN gctUINT32 NumPages + ) +{ + gctINT i; + + gcmkHEADER_ARG("Pages=%p, NumPages=%u", Pages, NumPages); + + gcmkASSERT(Pages != gcvNULL); + + for (i = 0; i < NumPages; i++) + { + __free_page(Pages[i]); + } + + if (is_vmalloc_addr(Pages)) + { + vfree(Pages); + } + else + { + kfree(Pages); + } + + gcmkFOOTER_NO(); +} + +static struct page ** +_NonContiguousAlloc( + IN gctUINT32 NumPages, + IN gctUINT32 Gfp + ) +{ + struct page ** pages; + struct page *p; + gctINT i, size; + + gcmkHEADER_ARG("NumPages=%u", NumPages); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + if (NumPages > totalram_pages()) +#else + if (NumPages > num_physpages) +#endif + { + gcmkFOOTER_NO(); + return gcvNULL; + } + + size = NumPages * sizeof(struct page *); + + pages = kmalloc(size, GFP_KERNEL | gcdNOWARN); + + if (!pages) + { + pages = vmalloc(size); + + if (!pages) + { + gcmkFOOTER_NO(); + return gcvNULL; + } + } + + for (i = 0; i < NumPages; i++) + { + p = alloc_page(Gfp); + + if (!p) + { + _NonContiguousFree(pages, i); + gcmkFOOTER_NO(); + return gcvNULL; + } + +#if gcdDISCRETE_PAGES + if (i != 0) + { + if (page_to_pfn(pages[i-1]) == page_to_pfn(p)-1) + { + /* Replaced page. */ + struct page *l = p; + + /* Allocate a page which is not contiguous to previous one. */ + p = alloc_page(Gfp); + + /* Give replaced page back. */ + __free_page(l); + + if (!p) + { + _NonContiguousFree(pages, i); + gcmkFOOTER_NO(); + return gcvNULL; + } + } + } +#endif + + pages[i] = p; + } + + gcmkFOOTER_ARG("pages=0x%X", pages); + return pages; +} + +/***************************************************************************\ +************************ GFP Allocator ********************************** +\***************************************************************************/ +static gceSTATUS +_GFPAlloc( + IN gckALLOCATOR Allocator, + INOUT PLINUX_MDL Mdl, + IN gctSIZE_T NumPages, + IN gctUINT32 Flags + ) +{ + gceSTATUS status; + gctUINT i; + u32 gfp = GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN; + gctBOOL contiguous = Flags & gcvALLOC_FLAG_CONTIGUOUS; + + struct gfp_alloc *priv = (struct gfp_alloc *)Allocator->privateData; + struct gfp_mdl_priv *mdlPriv = gcvNULL; + int result; + int low = 0; + int high = 0; + + gcmkHEADER_ARG("Allocator=%p Mdl=%p NumPages=%zu Flags=0x%x", Allocator, Mdl, NumPages, Flags); + +#ifdef gcdSYS_FREE_MEMORY_LIMIT + if (Flags & gcvALLOC_FLAG_MEMLIMIT) + { + struct sysinfo temsysinfo; + si_meminfo(&temsysinfo); + + if ((temsysinfo.freeram < NumPages) || ((temsysinfo.freeram-NumPages) < gcdSYS_FREE_MEMORY_LIMIT)) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + } +#endif + + mdlPriv = kzalloc(sizeof(struct gfp_mdl_priv), GFP_KERNEL | __GFP_NORETRY); + + if (!mdlPriv) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + +#if defined(CONFIG_ZONE_DMA32) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + if ((Flags & gcvALLOC_FLAG_4GB_ADDR) || (Allocator->os->device->platform->flagBits & gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS)) + { + /* remove __GFP_HIGHMEM bit, add __GFP_DMA32 bit */ + gfp &= ~__GFP_HIGHMEM; + gfp |= __GFP_DMA32; + } +#else + if (Flags & gcvALLOC_FLAG_4GB_ADDR || (Allocator->os->device->platform->flagBits & gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS)) + { + /* remove __GFP_HIGHMEM bit, add __GFP_DMA bit */ + gfp &= ~__GFP_HIGHMEM; + gfp |= __GFP_DMA; + } + +#endif + + if (contiguous) + { + size_t bytes = NumPages << PAGE_SHIFT; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + void *addr = NULL; + + addr = alloc_pages_exact(bytes, (gfp & ~__GFP_HIGHMEM) | __GFP_NORETRY); + + mdlPriv->contiguousPages = addr ? virt_to_page(addr) : gcvNULL; + + if (mdlPriv->contiguousPages) + { + mdlPriv->exact = gcvTRUE; + } +#endif + + if (mdlPriv->contiguousPages == gcvNULL) + { + int order = get_order(bytes); + + if (order >= MAX_ORDER) + { + status = gcvSTATUS_OUT_OF_MEMORY; + goto OnError; + } + + mdlPriv->contiguousPages = alloc_pages(gfp, order); + + } + + if (mdlPriv->contiguousPages == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + mdlPriv->dma_addr = dma_map_page(galcore_device, + mdlPriv->contiguousPages, 0, NumPages * PAGE_SIZE, + DMA_TO_DEVICE); + + if (!mdlPriv->dma_addr) + { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + if (mdlPriv->exact) + { + free_pages_exact(page_address(mdlPriv->contiguousPages), bytes); + } + else +#endif + { + __free_pages(mdlPriv->contiguousPages, get_order(bytes)); + } + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + +#if defined(CONFIG_X86) + if (!PageHighMem(mdlPriv->contiguousPages)) + { + if (set_memory_wc((unsigned long)page_address(mdlPriv->contiguousPages), NumPages) != 0) + { + printk("%s(%d): failed to set_memory_wc\n", __func__, __LINE__); + } + } +#endif + } + else + { + mdlPriv->nonContiguousPages = _NonContiguousAlloc(NumPages, gfp); + + if (mdlPriv->nonContiguousPages == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3,6,0)) && \ + (defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0)) && \ + !defined(CONFIG_ARCH_NO_SG_CHAIN)) + result = sg_alloc_table_from_pages(&mdlPriv->sgt, + mdlPriv->nonContiguousPages, NumPages, 0, + NumPages << PAGE_SHIFT, GFP_KERNEL); + +#else + result = alloc_sg_list_from_pages(&mdlPriv->sgt.sgl, + mdlPriv->nonContiguousPages, NumPages, 0, + NumPages << PAGE_SHIFT, &mdlPriv->sgt.nents); + + mdlPriv->sgt.orig_nents = mdlPriv->sgt.nents; +#endif + if (result < 0) + { + _NonContiguousFree(mdlPriv->nonContiguousPages, NumPages); + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + result = dma_map_sg(galcore_device, + mdlPriv->sgt.sgl, mdlPriv->sgt.nents, DMA_TO_DEVICE); + + if (result != mdlPriv->sgt.nents) + { + _NonContiguousFree(mdlPriv->nonContiguousPages, NumPages); + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3,6,0)) && \ + (defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0)) && \ + !defined(CONFIG_ARCH_NO_SG_CHAIN)) + sg_free_table(&mdlPriv->sgt); +#else + kfree(mdlPriv->sgt.sgl); +#endif + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + +#if defined(CONFIG_X86) + if (set_pages_array_wc(mdlPriv->nonContiguousPages, NumPages)) + { + printk("%s(%d): failed to set_pages_array_wc\n", __func__, __LINE__); + } +#endif + } + + for (i = 0; i < NumPages; i++) + { + struct page *page; + gctPHYS_ADDR_T phys = 0U; + + if (contiguous) + { + page = nth_page(mdlPriv->contiguousPages, i); + } + else + { + page = mdlPriv->nonContiguousPages[i]; + } + + SetPageReserved(page); + + phys = page_to_phys(page); + + BUG_ON(!phys); + + if (PageHighMem(page)) + { + high++; + } + else + { + low++; + } + } + + mdlPriv->platform = Allocator->os->device->platform; + mdlPriv->contiguous = contiguous; + atomic_add(low, &priv->low); + atomic_add(high, &priv->high); + + Mdl->priv = mdlPriv; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (mdlPriv) + { + kfree(mdlPriv); + } + + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_GFPGetSGT( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + OUT gctPOINTER *SGT + ) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) + struct page ** pages = gcvNULL; + struct page ** tmpPages = gcvNULL; + struct sg_table *sgt = NULL; + struct gfp_mdl_priv *mdlPriv = (struct gfp_mdl_priv*)Mdl->priv; + + gceSTATUS status = gcvSTATUS_OK; + gctSIZE_T offset = Offset & ~PAGE_MASK; /* Offset to the first page */ + gctINT skipPages = Offset >> PAGE_SHIFT; /* skipped pages */ + gctINT numPages = (PAGE_ALIGN(Offset + Bytes) >> PAGE_SHIFT) - skipPages; + gctINT i; + + gcmkASSERT(Offset + Bytes <= Mdl->numPages << PAGE_SHIFT); + + if (Mdl->contiguous) + { + pages = tmpPages = kmalloc(sizeof(struct page*) * numPages, GFP_KERNEL | gcdNOWARN); + if (!pages) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + for (i = 0; i < numPages; ++i) + { + pages[i] = nth_page(mdlPriv->contiguousPages, i + skipPages); + } + } + else + { + pages = &mdlPriv->nonContiguousPages[skipPages]; + } + + sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL | gcdNOWARN); + if (!sgt) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + if (sg_alloc_table_from_pages(sgt, pages, numPages, offset, Bytes, GFP_KERNEL) < 0) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + *SGT = (gctPOINTER)sgt; + +OnError: + if (tmpPages) + { + kfree(tmpPages); + } + + if (gcmIS_ERROR(status) && sgt) + { + kfree(sgt); + } + + return status; +#else + return gcvSTATUS_NOT_SUPPORTED; +#endif +} + +static void +_GFPFree( + IN gckALLOCATOR Allocator, + IN OUT PLINUX_MDL Mdl + ) +{ + gctINT i; + struct page * page; + struct gfp_alloc *priv = (struct gfp_alloc *)Allocator->privateData; + struct gfp_mdl_priv *mdlPriv = Mdl->priv; + int low = 0; + int high = 0; + + if (Mdl->contiguous) + { + dma_unmap_page(galcore_device, mdlPriv->dma_addr, + Mdl->numPages << PAGE_SHIFT, DMA_TO_DEVICE); + } + else + { + dma_unmap_sg(galcore_device, mdlPriv->sgt.sgl, mdlPriv->sgt.nents, + DMA_TO_DEVICE); + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3,6,0)) && \ + (defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0)) && \ + !defined(CONFIG_ARCH_NO_SG_CHAIN)) + sg_free_table(&mdlPriv->sgt); +#else + kfree(mdlPriv->sgt.sgl); +#endif + } + + for (i = 0; i < Mdl->numPages; i++) + { + if (Mdl->contiguous) + { + page = nth_page(mdlPriv->contiguousPages, i); + } + else + { + page = mdlPriv->nonContiguousPages[i]; + } + + ClearPageReserved(page); + + if (PageHighMem(page)) + { + high++; + } + else + { + low++; + } + } + + atomic_sub(low, &priv->low); + atomic_sub(high, &priv->high); + + if (Mdl->contiguous) + { +#if defined(CONFIG_X86) + if (!PageHighMem(mdlPriv->contiguousPages)) + { + set_memory_wb((unsigned long)page_address(mdlPriv->contiguousPages), Mdl->numPages); + } +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + if (mdlPriv->exact == gcvTRUE) + { + free_pages_exact(page_address(mdlPriv->contiguousPages), Mdl->numPages * PAGE_SIZE); + } + else +#endif + { + __free_pages(mdlPriv->contiguousPages, get_order(Mdl->numPages * PAGE_SIZE)); + } + } + else + { +#if defined(CONFIG_X86) + set_pages_array_wb(mdlPriv->nonContiguousPages, Mdl->numPages); +#endif + + _NonContiguousFree(mdlPriv->nonContiguousPages, Mdl->numPages); + } + + kfree(Mdl->priv); +} + +static gceSTATUS +_GFPMmap( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctBOOL Cacheable, + IN gctSIZE_T skipPages, + IN gctSIZE_T numPages, + IN struct vm_area_struct *vma + ) +{ + struct gfp_mdl_priv *mdlPriv = (struct gfp_mdl_priv*)Mdl->priv; + gcsPLATFORM *platform = mdlPriv->platform; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Allocator=%p Mdl=%p vma=%p", Allocator, Mdl, vma); + + vma->vm_flags |= gcdVM_FLAGS; + + if (Cacheable == gcvFALSE) + { + /* Make this mapping non-cached. */ +#if gcdENABLE_BUFFERABLE_VIDEO_MEMORY + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); +#else + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#endif + } + + if (platform && platform->ops->adjustProt) + { + platform->ops->adjustProt(vma); + } + + gcmkASSERT(skipPages + numPages <= Mdl->numPages); + + /* Now map all the vmalloc pages to this user address. */ + if (mdlPriv->contiguous) + { + /* map kernel memory to user space.. */ + if (remap_pfn_range(vma, + vma->vm_start, + page_to_pfn(mdlPriv->contiguousPages) + skipPages, + numPages << PAGE_SHIFT, + vma->vm_page_prot) < 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): remap_pfn_range error.", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + } + else + { + gctUINT i; + unsigned long start = vma->vm_start; + + for (i = 0; i < numPages; ++i) + { + unsigned long pfn = page_to_pfn(mdlPriv->nonContiguousPages[i + skipPages]); + + if (remap_pfn_range(vma, + start, + pfn, + PAGE_SIZE, + vma->vm_page_prot) < 0) + { + gcmkTRACE( + gcvLEVEL_ERROR, + "%s(%d): remap_pfn_range error.", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + start += PAGE_SIZE; + } + } + +OnError: + gcmkFOOTER(); + return status; +} + +static void +_GFPUnmapUser( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctUINT32 Size + ) +{ + MdlMap->cacheable = gcvFALSE; + + if (unlikely(current->mm == gcvNULL)) + { + /* Do nothing if process is exiting. */ + return; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + if (vm_munmap((unsigned long)MdlMap->vmaAddr, Size) < 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_WARNING, gcvZONE_OS, + "%s(%d): vm_munmap failed", + __FUNCTION__, __LINE__ + ); + } +#else + down_write(¤t->mm->mmap_sem); + if (do_munmap(current->mm, (unsigned long)MdlMap->vmaAddr, Size) < 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_WARNING, gcvZONE_OS, + "%s(%d): do_munmap failed", + __FUNCTION__, __LINE__ + ); + } + up_write(¤t->mm->mmap_sem); +#endif + + MdlMap->vma = NULL; +} + +static gceSTATUS +_GFPMapUser( + gckALLOCATOR Allocator, + PLINUX_MDL Mdl, + PLINUX_MDL_MAP MdlMap, + gctBOOL Cacheable + ) +{ + gctPOINTER userLogical = gcvNULL; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Allocator=%p Mdl=%p Cacheable=%d", Allocator, Mdl, Cacheable); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) + userLogical = (gctPOINTER)vm_mmap(NULL, + 0L, + Mdl->numPages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_NORESERVE, + 0); +#else + down_write(¤t->mm->mmap_sem); + userLogical = (gctPOINTER)do_mmap_pgoff(NULL, + 0L, + Mdl->numPages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED, + 0); + up_write(¤t->mm->mmap_sem); +#endif + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): vmaAddr->%p for phys_addr->%p", + __FUNCTION__, __LINE__, + userLogical, + Mdl + ); + + if (IS_ERR(userLogical)) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): do_mmap_pgoff error", + __FUNCTION__, __LINE__ + ); + + userLogical = gcvNULL; + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + down_write(¤t->mm->mmap_sem); + do + { + struct vm_area_struct *vma = find_vma(current->mm, (unsigned long)userLogical); + + if (vma == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): find_vma error", + __FUNCTION__, __LINE__ + ); + + gcmkERR_BREAK(gcvSTATUS_OUT_OF_RESOURCES); + } + + gcmkERR_BREAK(_GFPMmap(Allocator, Mdl, Cacheable, 0, Mdl->numPages, vma)); + MdlMap->vma = vma; + } + while (gcvFALSE); + up_write(¤t->mm->mmap_sem); + + if (gcmIS_SUCCESS(status)) + { + MdlMap->vmaAddr = userLogical; + MdlMap->cacheable = Cacheable; + } + +OnError: + if (gcmIS_ERROR(status) && userLogical) + { + _GFPUnmapUser(Allocator, Mdl, userLogical, Mdl->numPages * PAGE_SIZE); + } + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_GFPMapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + OUT gctPOINTER *Logical + ) +{ + void *addr = 0; + gctINT numPages = Mdl->numPages; + struct gfp_mdl_priv *mdlPriv = Mdl->priv; + + struct page ** pages; + gctBOOL free = gcvFALSE; + pgprot_t pgprot; + gctINT i; + + if (Mdl->contiguous) + { + pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN); + + if (!pages) + { + return gcvSTATUS_OUT_OF_MEMORY; + } + + for (i = 0; i < numPages; i++) + { + pages[i] = nth_page(mdlPriv->contiguousPages, i); + } + + free = gcvTRUE; + } + else + { + pages = mdlPriv->nonContiguousPages; + } + + /* ioremap() can't work on system memory since 2.6.38. */ + if (Mdl->cacheable) + { + pgprot = PAGE_KERNEL; + } + else + { +#if gcdENABLE_BUFFERABLE_VIDEO_MEMORY + pgprot = pgprot_writecombine(PAGE_KERNEL); +#else + pgprot = pgprot_noncached(PAGE_KERNEL); +#endif + } + + addr = vmap(pages, numPages, 0, pgprot); + + if (free) + { + kfree(pages); + } + + if (addr) + { + *Logical = addr; + return gcvSTATUS_OK; + } + else + { + return gcvSTATUS_OUT_OF_MEMORY; + } +} + +static gceSTATUS +_GFPUnmapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctPOINTER Logical + ) +{ + vunmap(Logical); + + return gcvSTATUS_OK; +} + +static gceSTATUS +_GFPCache( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Bytes, + IN gceCACHEOPERATION Operation + ) +{ + struct gfp_mdl_priv *mdlPriv = Mdl->priv; + enum dma_data_direction dir; + + switch (Operation) + { + case gcvCACHE_CLEAN: + dir = DMA_TO_DEVICE; + + if (mdlPriv->contiguous) + { + dma_sync_single_for_device(galcore_device, + mdlPriv->dma_addr, Mdl->numPages << PAGE_SHIFT, dir); + } + else + { + dma_sync_sg_for_device(galcore_device, + mdlPriv->sgt.sgl, mdlPriv->sgt.nents, dir); + } + + break; + case gcvCACHE_FLUSH: + dir = DMA_BIDIRECTIONAL; + + if (mdlPriv->contiguous) + { + dma_sync_single_for_device(galcore_device, + mdlPriv->dma_addr, Mdl->numPages << PAGE_SHIFT, dir); + } + else + { + dma_sync_sg_for_device(galcore_device, + mdlPriv->sgt.sgl, mdlPriv->sgt.nents, dir); + } + + break; + case gcvCACHE_INVALIDATE: + dir = DMA_FROM_DEVICE; + + if (mdlPriv->contiguous) + { + dma_sync_single_for_cpu(galcore_device, + mdlPriv->dma_addr, Mdl->numPages << PAGE_SHIFT, dir); + } + else + { + dma_sync_sg_for_cpu(galcore_device, + mdlPriv->sgt.sgl, mdlPriv->sgt.nents, dir); + } + + break; + default: + return gcvSTATUS_INVALID_ARGUMENT; + } + + return gcvSTATUS_OK; +} + +static gceSTATUS +_GFPPhysical( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * Physical + ) +{ + struct gfp_mdl_priv *mdlPriv = Mdl->priv; + gctUINT32 offsetInPage = Offset & ~PAGE_MASK; + gctUINT32 index = Offset / PAGE_SIZE; + + if (Mdl->contiguous) + { + *Physical = page_to_phys(nth_page(mdlPriv->contiguousPages, index)); + } + else + { + *Physical = page_to_phys(mdlPriv->nonContiguousPages[index]); + } + + *Physical += offsetInPage; + + return gcvSTATUS_OK; +} + +static void +_GFPAllocatorDestructor( + gcsALLOCATOR *Allocator + ) +{ + _GFPAllocatorDebugfsCleanup(Allocator); + + if (Allocator->privateData) + { + kfree(Allocator->privateData); + } + + kfree(Allocator); +} + +/* GFP allocator operations. */ +static gcsALLOCATOR_OPERATIONS GFPAllocatorOperations = { + .Alloc = _GFPAlloc, + .Free = _GFPFree, + .Mmap = _GFPMmap, + .MapUser = _GFPMapUser, + .UnmapUser = _GFPUnmapUser, + .MapKernel = _GFPMapKernel, + .UnmapKernel = _GFPUnmapKernel, + .Cache = _GFPCache, + .Physical = _GFPPhysical, + .GetSGT = _GFPGetSGT, +}; + +/* GFP allocator entry. */ +gceSTATUS +_GFPAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ) +{ + gceSTATUS status; + gckALLOCATOR allocator = gcvNULL; + struct gfp_alloc *priv = gcvNULL; + + gcmkONERROR( + gckALLOCATOR_Construct(Os, &GFPAllocatorOperations, &allocator)); + + priv = kzalloc(sizeof(struct gfp_alloc), GFP_KERNEL | gcdNOWARN); + + if (!priv) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + atomic_set(&priv->low, 0); + atomic_set(&priv->high, 0); + + /* Register private data. */ + allocator->privateData = priv; + allocator->destructor = _GFPAllocatorDestructor; + + _GFPAllocatorDebugfsInit(allocator, Parent); + + allocator->capability = gcvALLOC_FLAG_CONTIGUOUS + | gcvALLOC_FLAG_NON_CONTIGUOUS + | gcvALLOC_FLAG_CACHEABLE + | gcvALLOC_FLAG_MEMLIMIT + | gcvALLOC_FLAG_ALLOC_ON_FAULT + | gcvALLOC_FLAG_DMABUF_EXPORTABLE +#if defined(CONFIG_ZONE_DMA32) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + | gcvALLOC_FLAG_4GB_ADDR +#endif +#ifdef CONFIG_MCST + | gcvALLOC_FLAG_4GB_ADDR +#endif + ; + +#if defined(gcdEMULATE_SECURE_ALLOCATOR) + allocator->capability |= gcvALLOC_FLAG_SECURITY; +#endif + + *Allocator = allocator; + + return gcvSTATUS_OK; + +OnError: + if (allocator) + { + kfree(allocator); + } + return status; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_reserved_mem.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_reserved_mem.c new file mode 100644 index 000000000000..0b35e3dbac6d --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_reserved_mem.c @@ -0,0 +1,511 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_allocator.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#define _GC_OBJ_ZONE gcvZONE_OS + +/* + * reserved_mem is for contiguous pool, internal pool and external pool, etc. + */ + +/* mdl private. */ +struct reserved_mem +{ + unsigned long start; + unsigned long size; + char name[32]; + int release; + + /* Link together. */ + struct list_head link; +}; + +/* allocator info. */ +struct reserved_mem_alloc +{ + /* Record allocated reserved memory regions. */ + struct list_head region; + struct mutex lock; +}; + +static int reserved_mem_show(struct seq_file* m, void* data) +{ + struct list_head *pos; + gcsINFO_NODE *node = m->private; + gckALLOCATOR Allocator = node->device; + struct reserved_mem_alloc *alloc = Allocator->privateData; + + list_for_each(pos, &alloc->region) + { + struct reserved_mem * res= list_entry(pos, struct reserved_mem, link); + + seq_printf(m, "0x%08lx-0x%08lx : %s\n", + res->start, res->start + res->size -1, res->name); + } + + return 0; +} + +static gcsINFO info_list[] = +{ + {"reserved-mem", reserved_mem_show}, +}; + +static void +reserved_mem_debugfs_init( + IN gckALLOCATOR Allocator, + IN gckDEBUGFS_DIR Root + ) +{ + gcmkVERIFY_OK( + gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "reserved-mem")); + + gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles( + &Allocator->debugfsDir, + info_list, + gcmCOUNTOF(info_list), + Allocator + )); +} + +static void +reserved_mem_debugfs_cleanup( + IN gckALLOCATOR Allocator + ) +{ + gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles( + &Allocator->debugfsDir, + info_list, + gcmCOUNTOF(info_list) + )); + + gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir); +} + +static gceSTATUS +reserved_mem_attach( + IN gckALLOCATOR Allocator, + IN gcsATTACH_DESC_PTR Desc, + IN PLINUX_MDL Mdl + ) +{ + struct reserved_mem_alloc *alloc = Allocator->privateData; + struct reserved_mem *res; + struct resource *region = NULL; + + res = kzalloc(sizeof(struct reserved_mem), GFP_KERNEL | gcdNOWARN); + + if (!res) + return gcvSTATUS_OUT_OF_MEMORY; + + res->start = Desc->reservedMem.start; + res->size = Desc->reservedMem.size; + strncpy(res->name, Desc->reservedMem.name, sizeof(res->name)-1); + res->release = 1; + + if (!Desc->reservedMem.requested) + { + region = request_mem_region(res->start, res->size, res->name); + + if (!region) + { + printk("request mem %s(0x%lx - 0x%lx) failed\n", + res->name, res->start, res->start + res->size - 1); + + kfree(res); + return gcvSTATUS_OUT_OF_RESOURCES; + } + + res->release = 1; + } + + mutex_lock(&alloc->lock); + list_add(&res->link, &alloc->region); + mutex_unlock(&alloc->lock); + + Mdl->priv = res; + + return gcvSTATUS_OK; +} + +static void +reserved_mem_detach( + IN gckALLOCATOR Allocator, + IN OUT PLINUX_MDL Mdl + ) +{ + struct reserved_mem_alloc *alloc = Allocator->privateData; + struct reserved_mem *res = Mdl->priv; + + /* unlink from region list. */ + mutex_lock(&alloc->lock); + list_del_init(&res->link); + mutex_unlock(&alloc->lock); + + if (res->release) + { + release_mem_region(res->start, res->size); + } + + kfree(res); +} + +static gceSTATUS +reserved_mem_mmap( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctBOOL Cacheable, + IN gctSIZE_T skipPages, + IN gctSIZE_T numPages, + IN struct vm_area_struct *vma + ) +{ + struct reserved_mem *res = (struct reserved_mem*)Mdl->priv; + unsigned long pfn; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Allocator=%p Mdl=%p vma=%p", Allocator, Mdl, vma); + + gcmkASSERT(skipPages + numPages <= Mdl->numPages); + + pfn = (res->start >> PAGE_SHIFT) + skipPages; + + /* Make this mapping non-cached. */ + vma->vm_flags |= gcdVM_FLAGS; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + if (remap_pfn_range(vma, vma->vm_start, + pfn, numPages << PAGE_SHIFT, vma->vm_page_prot) < 0) + { + gcmkTRACE( + gcvLEVEL_ERROR, + "%s(%d): remap_pfn_range error.", + __FUNCTION__, __LINE__ + ); + + status = gcvSTATUS_OUT_OF_MEMORY; + } + + gcmkFOOTER(); + return status; +} + +static void +reserved_mem_unmap_user( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctUINT32 Size + ) +{ + if (unlikely(!current->mm)) + return; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + if (vm_munmap((unsigned long)MdlMap->vmaAddr, (unsigned long)Size) < 0) + { + printk("%s: vm_munmap failed\n", __func__); + } +#else + down_write(¤t->mm->mmap_sem); + if (do_munmap(current->mm, (unsigned long)MdlMap->vmaAddr, (unsigned long)Size) < 0) + { + printk("%s: do_munmap failed\n", __func__); + } + up_write(¤t->mm->mmap_sem); +#endif +} + +static gceSTATUS +reserved_mem_map_user( + gckALLOCATOR Allocator, + PLINUX_MDL Mdl, + PLINUX_MDL_MAP MdlMap, + gctBOOL Cacheable + ) +{ + struct reserved_mem *res = (struct reserved_mem*)Mdl->priv; + gctPOINTER userLogical = gcvNULL; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Allocator=%p Mdl=%p Cacheable=%d", Allocator, Mdl, Cacheable); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) + userLogical = (gctPOINTER)vm_mmap(NULL, 0L, res->size, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, 0); +#else + down_write(¤t->mm->mmap_sem); + userLogical = (gctPOINTER)do_mmap_pgoff(NULL, 0L, res->size, + PROT_READ | PROT_WRITE, MAP_SHARED, 0); + up_write(¤t->mm->mmap_sem); +#endif + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): vmaAddr->%p for phys_addr->%p", + __FUNCTION__, __LINE__, userLogical, Mdl + ); + + if (IS_ERR(userLogical)) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): do_mmap_pgoff error", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + down_write(¤t->mm->mmap_sem); + do + { + struct vm_area_struct *vma = find_vma(current->mm, (unsigned long)userLogical); + if (vma == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): find_vma error", + __FUNCTION__, __LINE__ + ); + + gcmkERR_BREAK(gcvSTATUS_OUT_OF_RESOURCES); + } + + gcmkERR_BREAK(reserved_mem_mmap(Allocator, Mdl, gcvFALSE, 0, Mdl->numPages, vma)); + + MdlMap->vmaAddr = userLogical; + MdlMap->cacheable = gcvFALSE; + MdlMap->vma = vma; + } + while (gcvFALSE); + up_write(¤t->mm->mmap_sem); + +OnError: + if (gcmIS_ERROR(status) && userLogical) + { + reserved_mem_unmap_user(Allocator, Mdl, userLogical, res->size); + } + gcmkFOOTER(); + return status; +} + +static gceSTATUS +reserved_mem_map_kernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + OUT gctPOINTER *Logical + ) +{ + struct reserved_mem *res = Mdl->priv; + void *vaddr; + + /* Should never run here now. */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) + vaddr = memremap(res->start, res->size, MEMREMAP_WC); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) + vaddr = memremap(res->start, res->size, MEMREMAP_WT); +#else + vaddr = ioremap_nocache(res->start, res->size); +#endif + + if (!vaddr) + { + return gcvSTATUS_OUT_OF_MEMORY; + } + + *Logical = vaddr; + return gcvSTATUS_OK;; +} + +static gceSTATUS +reserved_mem_unmap_kernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctPOINTER Logical + ) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) + memunmap((void *)Logical); +#else + iounmap((void *)Logical); +#endif + return gcvSTATUS_OK; +} + +static gceSTATUS +reserved_mem_cache_op( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Bytes, + IN gceCACHEOPERATION Operation + ) +{ + /* Always WC or UC, safe to use mb. */ + switch (Operation) + { + case gcvCACHE_CLEAN: + case gcvCACHE_FLUSH: + _MemoryBarrier(); + break; + case gcvCACHE_INVALIDATE: + break; + default: + return gcvSTATUS_INVALID_ARGUMENT; + } + + return gcvSTATUS_OK; +} + +static gceSTATUS +reserved_mem_get_physical( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * Physical + ) +{ + struct reserved_mem *res = Mdl->priv; + *Physical = res->start + Offset; + + return gcvSTATUS_OK; +} + +static void +reserved_mem_dtor( + gcsALLOCATOR *Allocator + ) +{ + reserved_mem_debugfs_cleanup(Allocator); + + if (Allocator->privateData) + { + kfree(Allocator->privateData); + } + + kfree(Allocator); +} + +/* GFP allocator operations. */ +static gcsALLOCATOR_OPERATIONS reserved_mem_ops = { + .Alloc = NULL, + .Attach = reserved_mem_attach, + .Free = reserved_mem_detach, + .Mmap = reserved_mem_mmap, + .MapUser = reserved_mem_map_user, + .UnmapUser = reserved_mem_unmap_user, + .MapKernel = reserved_mem_map_kernel, + .UnmapKernel = reserved_mem_unmap_kernel, + .Cache = reserved_mem_cache_op, + .Physical = reserved_mem_get_physical, +}; + +/* GFP allocator entry. */ +gceSTATUS +_ReservedMemoryAllocatorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ) +{ + gceSTATUS status; + gckALLOCATOR allocator = gcvNULL; + struct reserved_mem_alloc *alloc = NULL; + + gcmkONERROR( + gckALLOCATOR_Construct(Os, &reserved_mem_ops, &allocator)); + + alloc = kzalloc(sizeof(*alloc), GFP_KERNEL | gcdNOWARN); + + if (!alloc) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + INIT_LIST_HEAD(&alloc->region); + mutex_init(&alloc->lock); + + /* Register private data. */ + allocator->privateData = alloc; + allocator->destructor = reserved_mem_dtor; + + reserved_mem_debugfs_init(allocator, Parent); + + allocator->capability = gcvALLOC_FLAG_LINUX_RESERVED_MEM; + + *Allocator = allocator; + + return gcvSTATUS_OK; + +OnError: + if (allocator) + { + kfree(allocator); + } + return status; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_user_memory.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_user_memory.c new file mode 100644 index 000000000000..ea818dd557e5 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_user_memory.c @@ -0,0 +1,842 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_allocator.h" +#include + +#include +#include + +#define _GC_OBJ_ZONE gcvZONE_ALLOCATOR + +enum um_desc_type +{ + UM_PHYSICAL_MAP, + UM_PAGE_MAP, + UM_PFN_MAP, +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION (2,6,24) +struct sg_table +{ + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; +#endif + +/* Descriptor of a user memory imported. */ +struct um_desc +{ + int type; + + union + { + /* UM_PHYSICAL_MAP. */ + unsigned long physical; + + /* UM_PAGE_MAP. */ + struct + { + struct page **pages; + struct sg_table sgt; + }; + + /* UM_PFN_MAP. */ + struct + { + unsigned long *pfns; + int *refs; + }; + }; + + /* contiguous chunks, does not include padding pages. */ + int chunk_count; + + unsigned long vm_flags; + unsigned long user_vaddr; + size_t size; + unsigned long offset; + + size_t pageCount; + size_t extraPage; +}; + +static int import_physical_map(struct um_desc *um, unsigned long phys) +{ + um->type = UM_PHYSICAL_MAP; + um->physical = phys & PAGE_MASK; + um->chunk_count = 1; + return 0; +} + +static int import_page_map(struct um_desc *um, + unsigned long addr, size_t page_count, size_t size) +{ + int i; + int result; + struct page **pages; + + pages = kzalloc(page_count * sizeof(void *), GFP_KERNEL | gcdNOWARN); + if (!pages) + return -ENOMEM; + + down_read(¤t->mm->mmap_sem); + + result = get_user_pages( +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + current, + current->mm, +#endif + addr & PAGE_MASK, + page_count, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) + FOLL_WRITE, +#else + 1, + 0, +#endif + pages, + NULL); + + up_read(¤t->mm->mmap_sem); + + if (result < page_count) + { + for (i = 0; i < result; i++) + { + if (pages[i]) + { + put_page(pages[i]); + } + } + + kfree(pages); + return -ENODEV; + } + + um->chunk_count = 1; + for (i = 1; i < page_count; i++) + { + if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) + { + ++um->chunk_count; + } + } + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3,6,0)) && \ + (defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0)) && \ + !defined(CONFIG_ARCH_NO_SG_CHAIN)) + result = sg_alloc_table_from_pages(&um->sgt, pages, page_count, + addr & ~PAGE_MASK, size, GFP_KERNEL | gcdNOWARN); + +#else + result = alloc_sg_list_from_pages(&um->sgt.sgl, pages, page_count, + addr & ~PAGE_MASK, size, &um->sgt.nents); + + um->sgt.orig_nents = um->sgt.nents; +#endif + if (unlikely(result < 0)) + { + printk("[galcore]: %s: sg_alloc_table_from_pages failed\n", __FUNCTION__); + goto error; + } + + result = dma_map_sg(galcore_device, um->sgt.sgl, um->sgt.nents, DMA_TO_DEVICE); + if (unlikely(result != um->sgt.nents)) + { + printk("[galcore]: %s: dma_map_sg failed\n", __FUNCTION__); + goto error; + } + + um->type = UM_PAGE_MAP; + um->pages = pages; + + return 0; + +error: +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3,6,0)) && \ + (defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0)) && \ + !defined(CONFIG_ARCH_NO_SG_CHAIN)) + sg_free_table(&um->sgt); +#else + kfree(um->sgt.sgl); +#endif + + if (um->pages) + { + kfree(um->pages); + } + return result; +} + + +static int import_pfn_map(struct um_desc *um, + unsigned long addr, size_t pfn_count) +{ + int i; + struct vm_area_struct *vma; + unsigned long *pfns; + int *refs; + + if (!current->mm) + return -ENOTTY; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, addr); + up_read(¤t->mm->mmap_sem); + + if (!vma) + return -ENOTTY; + + pfns = kzalloc(pfn_count * sizeof(unsigned long), GFP_KERNEL | gcdNOWARN); + + if (!pfns) + return -ENOMEM; + + refs = kzalloc(pfn_count * sizeof(int), GFP_KERNEL | gcdNOWARN); + + if (!refs) + { + kfree(pfns); + return -ENOMEM; + } + + for (i = 0; i < pfn_count; i++) + { + spinlock_t *ptl; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset(current->mm, addr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + goto err; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud) || pud_bad(*pud)) + goto err; + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + goto err; + + pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); + if (!pte) + { + spin_unlock(ptl); + goto err; + } + + if (!pte_present(*pte)) + { + pte_unmap_unlock(pte, ptl); + goto err; + } + + pfns[i] = pte_pfn(*pte); + pte_unmap_unlock(pte, ptl); + + /* Advance to next. */ + addr += PAGE_SIZE; + } + + for (i = 0; i < pfn_count; i++) + { + if (pfn_valid(pfns[i])) + { + struct page *page = pfn_to_page(pfns[i]); + refs[i] = get_page_unless_zero(page); + } + } + + um->chunk_count = 1; + for (i = 1; i < pfn_count; i++) + { + if (pfns[i] != pfns[i - 1] + 1) + { + ++um->chunk_count; + } + } + + um->type = UM_PFN_MAP; + um->pfns = pfns; + um->refs = refs; + return 0; + +err: + if (pfns) + kfree(pfns); + + if (refs) + kfree(refs); + + return -ENOTTY; +} + +static gceSTATUS +_Import( + IN gckOS Os, + IN gctPOINTER Memory, + IN gctUINT32 Physical, + IN gctSIZE_T Size, + IN struct um_desc * UserMemory + ) +{ + gceSTATUS status = gcvSTATUS_OK; + unsigned long vm_flags = 0; + struct vm_area_struct *vma = NULL; + unsigned long start, end, memory; + int result = 0; + + gctSIZE_T extraPage; + gctSIZE_T pageCount, i; + + gcmkHEADER_ARG("Os=0x%p Memory=%p Physical=0x%x Size=%lu", Os, Memory, Physical, Size); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Memory != gcvNULL || Physical != ~0U); + gcmkVERIFY_ARGUMENT(Size > 0); + + memory = (unsigned long)Memory; + + /* Get the number of required pages. */ + end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = memory >> PAGE_SHIFT; + pageCount = end - start; + + /* Allocate extra page to avoid cache overflow */ + extraPage = (((memory + gcmALIGN(Size + 64, 64) + PAGE_SIZE - 1) >> PAGE_SHIFT) > end) ? 1 : 0; + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, _GC_OBJ_ZONE, + "%s(%d): pageCount: %d. extraPage: %d", + __FUNCTION__, __LINE__, + pageCount, extraPage + ); + + /* Overflow. */ + if ((memory + Size) < memory) + { + gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); + return gcvSTATUS_INVALID_ARGUMENT; + } + + if (memory) + { + unsigned long vaddr = memory; + + for (i = 0; i < pageCount; i++) + { + u32 data; + + get_user(data, (u32 *)vaddr); + put_user(data, (u32 *)vaddr); + vaddr += PAGE_SIZE; + } + + vma = find_vma(current->mm, memory); + + if (!vma) + { + /* No such memory, or across vmas. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + +#ifdef CONFIG_ARM + /* coherent cache in case vivt or vipt-aliasing cache. */ + __cpuc_flush_user_range(memory, memory + Size, vma->vm_flags); +#endif + + vm_flags = vma->vm_flags; + vaddr = vma->vm_end; + + while (vaddr < memory + Size) + { + vma = find_vma(current->mm, vaddr); + + if (!vma) + { + /* No such memory. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if ((vma->vm_flags & VM_PFNMAP) != (vm_flags & VM_PFNMAP)) + { + /* Can not support different map type: both PFN and PAGE detected. */ + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + vaddr = vma->vm_end; + } + } + + if (Physical != gcvINVALID_PHYSICAL_ADDRESS) + { + result = import_physical_map(UserMemory, Physical); + } + else + { + if (vm_flags & VM_PFNMAP) + { + result = import_pfn_map(UserMemory, memory, pageCount); + } + else + { + result = import_page_map(UserMemory, memory, pageCount, Size); + } + } + + if (result == -EINVAL) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + else if (result == -ENOMEM) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + else if (result < 0) + { + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + if(Os->device->platform->flagBits & gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS ) + { + gctPHYS_ADDR_T addr; + + if (Physical != gcvINVALID_PHYSICAL_ADDRESS) + { + if(Physical >0xFFFFFFFFu || Physical + Size > 0xFFFFFFFFu ) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + } + else if (vm_flags & VM_PFNMAP) + { + for(i = 0; i < pageCount; i++) + { + addr = UserMemory->pfns[i] << PAGE_SHIFT; + if( addr > 0xFFFFFFFFu) + { + kfree(UserMemory->pfns); + UserMemory->pfns = gcvNULL; + kfree(UserMemory->refs) ; + UserMemory->refs = gcvNULL; + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + } + } + else + { + for (i = 0; i< pageCount; i++) + { + addr = page_to_phys(UserMemory->pages[i]); + if(addr > 0xFFFFFFFFu ) + { + kfree(UserMemory->pages); + UserMemory->pages = gcvNULL; + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + } + } + } + + UserMemory->vm_flags = vm_flags; + UserMemory->user_vaddr = (unsigned long)Memory; + UserMemory->size = Size; + UserMemory->offset = (Physical != gcvINVALID_PHYSICAL_ADDRESS) + ? (Physical & ~PAGE_MASK) + : (memory & ~PAGE_MASK); + + UserMemory->pageCount = pageCount; + UserMemory->extraPage = extraPage; + + /* Success. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_UserMemoryAttach( + IN gckALLOCATOR Allocator, + IN gcsATTACH_DESC_PTR Desc, + IN PLINUX_MDL Mdl + ) +{ + gceSTATUS status; + struct um_desc * userMemory = gcvNULL; + + gckOS os = Allocator->os; + + gcmkHEADER(); + + /* Handle is meangless for this importer. */ + gcmkVERIFY_ARGUMENT(Desc != gcvNULL); + + gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct um_desc), (gctPOINTER *)&userMemory)); + + gckOS_ZeroMemory(userMemory, gcmSIZEOF(struct um_desc)); + + gcmkONERROR(_Import(os, Desc->userMem.memory, Desc->userMem.physical, Desc->userMem.size, userMemory)); + + Mdl->priv = userMemory; + Mdl->numPages = userMemory->pageCount + userMemory->extraPage; + Mdl->contiguous = (userMemory->chunk_count == 1); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (userMemory != gcvNULL) + { + gckOS_Free(os,(gctPOINTER)userMemory); + } + gcmkFOOTER(); + return status; +} + +static void release_physical_map(struct um_desc *um) +{ +} + +static void release_page_map(struct um_desc *um) +{ + int i; + + dma_unmap_sg(galcore_device, um->sgt.sgl, um->sgt.nents, DMA_TO_DEVICE); + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3,6,0)) && \ + (defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0)) && \ + !defined(CONFIG_ARCH_NO_SG_CHAIN)) + sg_free_table(&um->sgt); +#else + kfree(um->sgt.sgl); +#endif + + for (i = 0; i < um->pageCount; i++) + { + if (!PageReserved(um->pages[i])) + { + SetPageDirty(um->pages[i]); + } + + put_page(um->pages[i]); + } + + kfree(um->pages); +} + +static void release_pfn_map(struct um_desc *um) +{ + + int i; + + for (i = 0; i < um->pageCount; i++) + { + if (pfn_valid(um->pfns[i])) + { + struct page *page = pfn_to_page(um->pfns[i]); + if (!PageReserved(page)) + { + SetPageDirty(page); + } + + if (um->refs[i]) + { + put_page(page); + } + } + } + + kfree(um->pfns); + kfree(um->refs); +} + +static void +_UserMemoryFree( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl + ) +{ + gckOS os = Allocator->os; + struct um_desc *userMemory = Mdl->priv; + + gcmkHEADER(); + + if (userMemory) + { + switch (userMemory->type) + { + case UM_PHYSICAL_MAP: + release_physical_map(userMemory); + break; + case UM_PAGE_MAP: + release_page_map(userMemory); + break; + case UM_PFN_MAP: + release_pfn_map(userMemory); + break; + } + + gcmkOS_SAFE_FREE(os, userMemory); + } + + gcmkFOOTER_NO(); +} + +static gceSTATUS +_UserMemoryMapUser( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctBOOL Cacheable + ) +{ + struct um_desc *userMemory = Mdl->priv; + + MdlMap->vmaAddr = (gctPOINTER)userMemory->user_vaddr; + MdlMap->cacheable = gcvTRUE; + + return gcvSTATUS_OK; +} + +static void +_UserMemoryUnmapUser( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctUINT32 Size + ) +{ + return; +} + +static gceSTATUS +_UserMemoryMapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + OUT gctPOINTER *Logical + ) +{ + /* Kernel doesn't acess video memory. */ + return gcvSTATUS_NOT_SUPPORTED; +} + +static gceSTATUS +_UserMemoryUnmapKernel( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctPOINTER Logical + ) +{ + /* Kernel doesn't acess video memory. */ + return gcvSTATUS_NOT_SUPPORTED; +} + +static gceSTATUS +_UserMemoryCache( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Bytes, + IN gceCACHEOPERATION Operation + ) +{ + struct um_desc *um = Mdl->priv; + enum dma_data_direction dir; + + if (um->type != UM_PAGE_MAP) + { + _MemoryBarrier(); + return gcvSTATUS_OK; + } + +#ifdef CONFIG_ARM + /* coherent cache in case vivt or vipt-aliasing cache. */ + __cpuc_flush_user_range(um->user_vaddr, + um->user_vaddr + um->size, um->vm_flags); +#endif + + switch (Operation) + { + case gcvCACHE_CLEAN: + dir = DMA_TO_DEVICE; + dma_sync_sg_for_device(galcore_device, um->sgt.sgl, um->sgt.nents, dir); + break; + case gcvCACHE_FLUSH: + dir = DMA_BIDIRECTIONAL; + dma_sync_sg_for_device(galcore_device, um->sgt.sgl, um->sgt.nents, dir); + break; + case gcvCACHE_INVALIDATE: + dir = DMA_FROM_DEVICE; + dma_sync_sg_for_cpu(galcore_device, um->sgt.sgl, um->sgt.nents, dir); + break; + default: + return gcvSTATUS_INVALID_ARGUMENT; + } + + + return gcvSTATUS_OK; +} + +static gceSTATUS +_UserMemoryPhysical( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * Physical + ) +{ + gckOS os = Allocator->os; + struct um_desc *userMemory = Mdl->priv; + unsigned long offset = Offset + userMemory->offset; + gctUINT32 offsetInPage = offset & ~PAGE_MASK; + gctUINT32 index = offset / PAGE_SIZE; + + if (index >= userMemory->pageCount) + { + if (index < userMemory->pageCount + userMemory->extraPage) + { + *Physical = page_to_phys(os->paddingPage); + } + else + { + return gcvSTATUS_INVALID_ARGUMENT; + } + } + else + { + switch (userMemory->type) + { + case UM_PHYSICAL_MAP: + *Physical = userMemory->physical + index * PAGE_SIZE; + break; + case UM_PAGE_MAP: + *Physical = page_to_phys(userMemory->pages[index]); + break; + case UM_PFN_MAP: + *Physical = userMemory->pfns[index] << PAGE_SHIFT; + break; + } + } + + *Physical += offsetInPage; + + return gcvSTATUS_OK; +} + +static void +_UserMemoryAllocatorDestructor( + gcsALLOCATOR *Allocator + ) +{ + if (Allocator->privateData) + { + kfree(Allocator->privateData); + } + + kfree(Allocator); +} + +/* User memory allocator (importer) operations. */ +static gcsALLOCATOR_OPERATIONS UserMemoryAllocatorOperations = +{ + .Attach = _UserMemoryAttach, + .Free = _UserMemoryFree, + .MapUser = _UserMemoryMapUser, + .UnmapUser = _UserMemoryUnmapUser, + .MapKernel = _UserMemoryMapKernel, + .UnmapKernel = _UserMemoryUnmapKernel, + .Cache = _UserMemoryCache, + .Physical = _UserMemoryPhysical, +}; + +/* Default allocator entry. */ +gceSTATUS +_UserMemoryAlloctorInit( + IN gckOS Os, + IN gcsDEBUGFS_DIR *Parent, + OUT gckALLOCATOR * Allocator + ) +{ + gceSTATUS status; + gckALLOCATOR allocator; + + gcmkONERROR( + gckALLOCATOR_Construct(Os, &UserMemoryAllocatorOperations, &allocator)); + + allocator->destructor = _UserMemoryAllocatorDestructor; + + allocator->capability = gcvALLOC_FLAG_USERMEMORY; + + *Allocator = allocator; + + return gcvSTATUS_OK; + +OnError: + return status; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c new file mode 100644 index 000000000000..61441f5f8352 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c @@ -0,0 +1,264 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_allocator.h" +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#endif +#include + +#include "gc_hal_kernel_allocator_array.h" +#include "gc_hal_kernel_platform.h" + +#define _GC_OBJ_ZONE gcvZONE_OS + + +/******************************************************************************\ +******************************** Debugfs Support ******************************* +\******************************************************************************/ + +static gceSTATUS +_AllocatorDebugfsInit( + IN gckOS Os + ) +{ + gceSTATUS status; + gckGALDEVICE device = Os->device; + + gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir; + + gcmkONERROR(gckDEBUGFS_DIR_Init(dir, device->debugfsDir.root, "allocators")); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +static void +_AllocatorDebugfsCleanup( + IN gckOS Os + ) +{ + gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir; + + gckDEBUGFS_DIR_Deinit(dir); +} + +/***************************************************************************\ +************************ Allocator management ******************************* +\***************************************************************************/ + +gceSTATUS +gckOS_ImportAllocators( + gckOS Os + ) +{ + gceSTATUS status; + gctUINT i; + gckALLOCATOR allocator; + + _AllocatorDebugfsInit(Os); + + INIT_LIST_HEAD(&Os->allocatorList); + + for (i = 0; i < gcmCOUNTOF(allocatorArray); i++) + { + if (allocatorArray[i].construct) + { + /* Construct allocator. */ + status = allocatorArray[i].construct(Os, &Os->allocatorDebugfsDir, &allocator); + + if (gcmIS_ERROR(status)) + { + gcmkPRINT("["DEVICE_NAME"]: Can't construct allocator(%s)", + allocatorArray[i].name); + + continue; + } + + allocator->name = allocatorArray[i].name; + + list_add_tail(&allocator->link, &Os->allocatorList); + } + } + +#if gcdDEBUG + list_for_each_entry(allocator, &Os->allocatorList, link) + { + gcmkTRACE_ZONE( + gcvLEVEL_WARNING, gcvZONE_OS, + "%s(%d) Allocator: %s", + __FUNCTION__, __LINE__, + allocator->name + ); + } +#endif + + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_FreeAllocators( + gckOS Os + ) +{ + gckALLOCATOR allocator; + gckALLOCATOR temp; + + list_for_each_entry_safe(allocator, temp, &Os->allocatorList, link) + { + list_del(&allocator->link); + + /* Destroy allocator. */ + allocator->destructor(allocator); + } + + _AllocatorDebugfsCleanup(Os); + + return gcvSTATUS_OK; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION (3,6,0)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION (5,4,0) && \ + !defined (ARCH_HAS_SG_CHAIN) && \ + !defined (CONFIG_ARCH_HAS_SG_CHAIN)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0) && \ + defined (CONFIG_ARCH_NO_SG_CHAIN)) + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) +static inline void sg_set_page(struct scatterlist *sg, struct page *page, + unsigned int len, unsigned int offset) +{ + sg->page = page; + sg->offset = offset; + sg->length = len; +} + +static inline void sg_mark_end(struct scatterlist *sg) +{ + (void)sg; +} +# endif + +int +alloc_sg_list_from_pages( + struct scatterlist **sgl, + struct page **pages, + unsigned int n_pages, + unsigned long offset, + unsigned long size, + unsigned int *nents + ) +{ + unsigned int chunks; + unsigned int i; + unsigned int cur_page; + struct scatterlist *s; + + chunks = 1; + + for (i = 1; i < n_pages; ++i) + { + if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) + { + ++chunks; + } + } + + s = kzalloc(sizeof(struct scatterlist) * chunks, GFP_KERNEL); + if (unlikely(!s)) + { + return -ENOMEM; + } + + *sgl = s; + *nents = chunks; + + cur_page = 0; + + for (i = 0; i < chunks; i++, s++) + { + unsigned long chunk_size; + unsigned int j; + + for (j = cur_page + 1; j < n_pages; j++) + { + if (page_to_pfn(pages[j]) != page_to_pfn(pages[j - 1]) + 1) + { + break; + } + } + + chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; + sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); + size -= chunk_size; + offset = 0; + cur_page = j; + } + + sg_mark_end(s - 1); + + return 0; +} +#endif + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h new file mode 100644 index 000000000000..3ce38edd2e25 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h @@ -0,0 +1,576 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_allocator_h_ +#define __gc_hal_kernel_allocator_h_ + +#include "gc_hal_kernel_linux.h" +#include +#include + +typedef struct _gcsALLOCATOR * gckALLOCATOR; +typedef union _gcsATTACH_DESC * gcsATTACH_DESC_PTR; + +typedef struct _gcsALLOCATOR_OPERATIONS +{ + /************************************************************************** + ** + ** Alloc + ** + ** Allocte memory, request size is page aligned. + ** + ** INPUT: + ** + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_Mdl + ** Pointer to Mdl whichs stores information + ** about allocated memory. + ** + ** gctSIZE_T NumPages + ** Number of pages need to allocate. + ** + ** gctUINT32 Flag + ** Allocation option. + ** + ** OUTPUT: + ** + ** Nothing. + ** + */ + gceSTATUS + (*Alloc)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T NumPages, + IN gctUINT32 Flag + ); + + /************************************************************************** + ** + ** Free + ** + ** Free memory. + ** + ** INPUT: + ** + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_MDL Mdl + ** Mdl which stores information. + ** + ** OUTPUT: + ** + ** Nothing. + ** + */ + void + (*Free)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl + ); + + /************************************************************************** + ** + ** Mmap + ** + ** Map a page range of the memory to user space. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_MDL Mdl + ** Pointer to a Mdl. + ** + ** gctSIZE_T skipPages + ** Number of page to be skipped from beginning of this memory. + ** + ** gctSIZE_T numPages + ** Number of pages to be mapped from skipPages. + ** + ** INOUT: + ** + ** struct vm_area_struct *vma + ** Pointer to VMM memory area. + ** + */ + gceSTATUS + (*Mmap)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctBOOL Cacheable, + IN gctSIZE_T skipPages, + IN gctSIZE_T numPages, + IN struct vm_area_struct *vma + ); + + /************************************************************************** + ** + ** MapUser + ** + ** Map memory to user space. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_MDL Mdl + ** Pointer to a Mdl. + ** + ** gctBOOL Cacheable + ** Whether this mapping is cacheable. + ** + ** OUTPUT: + ** + ** gctPOINTER * UserLogical + ** Pointer to user logical address. + ** + ** Nothing. + ** + */ + gceSTATUS + (*MapUser)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctBOOL Cacheable + ); + + /************************************************************************** + ** + ** UnmapUser + ** + ** Unmap address from user address space. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** gctPOINTER Logical + ** Address to be unmap + ** + ** gctUINT32 Size + ** Size of address space + ** + ** OUTPUT: + ** + ** Nothing. + ** + */ + void + (*UnmapUser)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap, + IN gctUINT32 Size + ); + + /************************************************************************** + ** + ** MapKernel + ** + ** Map memory to kernel space. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_MDL Mdl + ** Pointer to a Mdl object. + ** + ** OUTPUT: + ** gctPOINTER * Logical + ** Mapped kernel address. + */ + gceSTATUS + (*MapKernel)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + OUT gctPOINTER *Logical + ); + + /************************************************************************** + ** + ** UnmapKernel + ** + ** Unmap memory from kernel space. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_MDL Mdl + ** Pointer to a Mdl object. + ** + ** gctPOINTER Logical + ** Mapped kernel address. + ** + ** OUTPUT: + ** + ** Nothing. + ** + */ + gceSTATUS + (*UnmapKernel)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctPOINTER Logical + ); + + /************************************************************************** + ** + ** Cache + ** + ** Maintain cache coherency. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_MDL Mdl + ** Pointer to a Mdl object. + ** + ** gctSIZE_T Offset + ** Offset to this memory block + ** + ** gctPOINTER Logical + ** Logical address, could be user address or kernel address + ** + ** gctUINT32 Bytes + ** Size of memory region. + ** + ** gceCACHEOPERATION Opertaion + ** Cache operation. + ** + ** OUTPUT: + ** + ** Nothing. + ** + */ + gceSTATUS (*Cache)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctUINT32 Bytes, + IN gceCACHEOPERATION Operation + ); + + /************************************************************************** + ** + ** Physical + ** + ** Get physical address from a offset in memory region. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** PLINUX_MDL Mdl + ** Pointer to a Mdl object. + ** + ** gctUINT32 Offset + ** Offset in this memory region. + ** + ** OUTPUT: + ** gctUINT32_PTR Physical + ** Physical address. + ** + */ + gceSTATUS (*Physical)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * Physical + ); + + /************************************************************************** + ** + ** Attach + ** + ** Import memory allocated by an external allocator. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** gctUINT32 Handle + ** Handle of the memory. + ** + ** OUTPUT: + ** None. + ** + */ + gceSTATUS (*Attach)( + IN gckALLOCATOR Allocator, + IN gcsATTACH_DESC_PTR Desc, + OUT PLINUX_MDL Mdl + ); + + /************************************************************************** + ** + ** GetSGT + ** + ** Get scatter-gather table from a range of the memory. + ** + ** INPUT: + ** gckALLOCATOR Allocator + ** Pointer to an gckALLOCATOER object. + ** + ** gctUINT32 Handle + ** Handle of the memory. + ** + ** gctSIZE_T Offset + ** Offset to the beginning of this mdl. + ** + ** gctSIZE_T Bytes + ** Total bytes form Offset. + ** + ** OUTPUT: + ** gctPOINTER *SGT + ** scatter-gather table + ** + */ + gceSTATUS (*GetSGT)( + IN gckALLOCATOR Allocator, + IN PLINUX_MDL Mdl, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + OUT gctPOINTER *SGT + ); +} +gcsALLOCATOR_OPERATIONS; + +typedef struct _gcsALLOCATOR +{ + /* Pointer to gckOS Object. */ + gckOS os; + + /* Name. */ + gctSTRING name; + + /* Operations. */ + gcsALLOCATOR_OPERATIONS * ops; + + /* Capability of this allocator. */ + gctUINT32 capability; + + /* Debugfs entry of this allocator. */ + gcsDEBUGFS_DIR debugfsDir; + + /* Private data used by customer allocator. */ + void * privateData; + + /* Allocator destructor. */ + void (*destructor)(struct _gcsALLOCATOR *); + + struct list_head link; +} +gcsALLOCATOR; + +typedef struct _gcsALLOCATOR_DESC +{ + /* Name of a allocator. */ + char * name; + + /* Entry function to construct a allocator. */ + gceSTATUS (*construct)(gckOS, gcsDEBUGFS_DIR *, gckALLOCATOR *); +} +gcsALLOCATOR_DESC; + +typedef union _gcsATTACH_DESC +{ + /* gcvALLOC_FLAG_DMABUF */ + struct + { + gctPOINTER dmabuf; + } + dmaBuf; + + /* gcvALLOC_FLAG_USERMEMORY */ + struct + { + gctPOINTER memory; + gctPHYS_ADDR_T physical; + gctSIZE_T size; + } + userMem; + + /* gcvALLOC_FLAG_EXTERNAL_MEMORY */ + struct + { + gcsEXTERNAL_MEMORY_INFO info; + } + externalMem; + + /* Reserved memory. */ + struct + { + unsigned long start; + unsigned long size; + const char * name; + int requested; + } + reservedMem; +} +gcsATTACH_DESC; + +/* +* Helpers +*/ + +/* Fill a gcsALLOCATOR_DESC structure. */ +#define gcmkDEFINE_ALLOCATOR_DESC(Name, Construct) \ + { \ + .name = Name, \ + .construct = Construct, \ + } + +/* Construct a allocator. */ +static inline gceSTATUS +gckALLOCATOR_Construct( + IN gckOS Os, + IN gcsALLOCATOR_OPERATIONS * Operations, + OUT gckALLOCATOR * Allocator + ) +{ + gceSTATUS status; + gckALLOCATOR allocator; + + gcmkASSERT(Allocator != gcvNULL); + gcmkASSERT + ( Operations + && (Operations->Alloc || Operations->Attach) + && (Operations->Free) + && Operations->MapUser + && Operations->UnmapUser + && Operations->MapKernel + && Operations->UnmapKernel + && Operations->Cache + && Operations->Physical + ); + + allocator = kzalloc(sizeof(gcsALLOCATOR), GFP_KERNEL | gcdNOWARN); + if (unlikely(!allocator)) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Record os. */ + allocator->os = Os; + + /* Set operations. */ + allocator->ops = Operations; + + *Allocator = allocator; + + return gcvSTATUS_OK; + +OnError: + return status; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION (3,6,0)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION (5,4,0) && \ + !defined (ARCH_HAS_SG_CHAIN) && \ + !defined (CONFIG_ARCH_HAS_SG_CHAIN)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION (5,4,0) && \ + defined (CONFIG_ARCH_NO_SG_CHAIN)) +int +alloc_sg_list_from_pages( + struct scatterlist **sgl, + struct page **pages, + unsigned int n_pages, + unsigned long offset, + unsigned long size, + unsigned int *nents + ); +#endif + +/* + How to implement customer allocator + + Build in customer alloctor + + It is recommanded that customer allocator is implmented in independent + source file(s) which is specified by CUSOMTER_ALLOCATOR_OBJS in Kbuld. + + Register gcsALLOCATOR + + For each customer specified allocator, a desciption entry must be added + to allocatorArray defined in gc_hal_kernel_allocator_array.h. + + An entry in allocatorArray is a gcsALLOCATOR_DESC structure which describes + name and constructor of a gckALLOCATOR object. + + + Implement gcsALLOCATOR_DESC.init() + + In gcsALLOCATOR_DESC.init(), gckALLOCATOR_Construct should be called + to create a gckALLOCATOR object, customer specified private data can + be put in gcsALLOCATOR.privateData. + + + Implement gcsALLOCATOR_OPERATIONS + + When call gckALLOCATOR_Construct to create a gckALLOCATOR object, a + gcsALLOCATOR_OPERATIONS structure must be provided whose all members + implemented. + +*/ +#endif diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h new file mode 100644 index 000000000000..a3428a1667b8 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h @@ -0,0 +1,147 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_debug_h_ +#define __gc_hal_kernel_debug_h_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************\ +****************************** OS-dependent Macros ***************************** +\******************************************************************************/ + +typedef va_list gctARGUMENTS; + +#define gcmkARGUMENTS_START(Arguments, Pointer) \ + va_start(Arguments, Pointer) + +#define gcmkARGUMENTS_END(Arguments) \ + va_end(Arguments) + +#define gcmkARGUMENTS_ARG(Arguments, Type) \ + va_arg(Arguments, Type) + +#define gcmkDECLARE_MUTEX(__mutex__) \ + DEFINE_MUTEX(__mutex__); \ + +#define gcmkMUTEX_LOCK(__mutex__) \ + mutex_lock(&__mutex__); + +#define gcmkMUTEX_UNLOCK(__mutex__) \ + mutex_unlock(&__mutex__); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) +# define gcmkGETPROCESSID() \ + task_tgid_vnr(current) +#else +# define gcmkGETPROCESSID() \ + current->tgid +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) +# define gcmkGETTHREADID() \ + task_pid_vnr(current) +#else +# define gcmkGETTHREADID() \ + current->pid +#endif + +#define gcmkOUTPUT_STRING(String) \ + if (gckDEBUGFS_IsEnabled()) \ + { \ + while (-ERESTARTSYS == gckDEBUGFS_Print(String)); \ + } \ + else \ + { \ + printk(String); \ + } + +#define gcmkSPRINTF(Destination, Size, Message, Value) \ + snprintf(Destination, Size, Message, Value) + +#define gcmkSPRINTF2(Destination, Size, Message, Value1, Value2) \ + snprintf(Destination, Size, Message, Value1, Value2) + +#define gcmkSPRINTF3(Destination, Size, Message, Value1, Value2, Value3) \ + snprintf(Destination, Size, Message, Value1, Value2, Value3) + +#define gcmkVSPRINTF(Destination, Size, Message, Arguments) \ + vsnprintf(Destination, Size, Message, *((va_list*)Arguments)) + +#define gcmkSTRCATSAFE(Destination, Size, String) \ + strncat(Destination, String, (Size) - 1) + +#define gcmkMEMCPY(Destination, Source, Size) \ + memcpy(Destination, Source, Size) + +#define gcmkSTRLEN(String) \ + strlen(String) + +/* If not zero, forces data alignment in the variable argument list + by its individual size. */ +#define gcdALIGNBYSIZE 1 + +#ifdef __cplusplus +} +#endif + +#endif /* __gc_hal_kernel_debug_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c new file mode 100644 index 000000000000..885eec644bb0 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c @@ -0,0 +1,965 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifdef MODULE +#include +#endif +#include +#include +#include +#ifdef MODVERSIONS +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel.h" +#include "gc_hal_kernel_debug.h" + +/* + Prequsite: + + 1) Debugfs feature must be enabled in the kernel. + 1.a) You can enable this, in the compilation of the uImage, all you have to do is, In the "make menuconfig" part, + you have to enable the debugfs in the kernel hacking part of the menu. + + HOW TO USE: + 1) insert the driver with the following option logFileSize, Ex: insmod galcore.ko ...... logFileSize=10240 + This gives a circular buffer of 10 MB + + 2)Usually after inserting the driver, the debug file system is mounted under /sys/kernel/debug/ + + 2.a)If the debugfs is not mounted, you must do "mount -t debugfs none /sys/kernel/debug" + + 3) To read what is being printed in the debugfs file system: + Ex : cat /sys/kernel/debug/gc/galcore_trace + + 4)To write into the debug file system from user side : + Ex: echo "hello" > cat /sys/kernel/debug/gc/galcore_trace + + 5)To write into debugfs from kernel side, Use the function called gckDEBUGFS_Print + + How to Get Video Memory Usage: + 1) Select a process whose video memory usage can be dump, no need to reset it until is needed to be change. + echo > /sys/kernel/debug/gc/vidmem + + 2) Get video memory usage. + cat /sys/kernel/debug/gc/vidmem + + USECASE Kernel Dump: + + 1) Go to /hal/inc/gc_hal_options.h, and enable the following flags: + - # define gcdDUMP 1 + - # define gcdDUMP_IN_KERNEL 1 + - # define gcdDUMP_COMMAND 1 + + 2) Go to /hal/kernel/gc_hal_kernel_command.c and disable the following flag + -#define gcdSIMPLE_COMMAND_DUMP 0 + + 3) Compile the driver + 4) insmod it with the logFileSize option + 5) Run an application + 6) You can get the dump by cat /sys/kernel/debug/gpu/galcore_trace + + */ + +/**/ +typedef va_list gctDBGARGS ; +#define gcmkARGS_START(argument, pointer) va_start(argument, pointer) +#define gcmkARGS_END(argument) va_end(argument) + +#define gcmkDEBUGFS_PRINT(ArgumentSize, Message) \ + { \ + gctDBGARGS __arguments__; \ + gcmkARGS_START(__arguments__, Message); \ + _debugfs_res = _DebugFSPrint(ArgumentSize, Message, &__arguments__);\ + gcmkARGS_END(__arguments__); \ + } + + +static DEFINE_SPINLOCK(traceLock); + +/* Debug File System Node Struct. */ +struct _gcsDEBUGFS_Node +{ + /*wait queues for read and write operations*/ +#if defined(DECLARE_WAIT_QUEUE_HEAD) + wait_queue_head_t read_q , write_q ; +#else + struct wait_queue *read_q , *write_q ; +#endif + struct dentry *parent ; /*parent directory*/ + struct dentry *filen ; /*filename*/ + struct semaphore sem ; /* mutual exclusion semaphore */ + char *data ; /* The circular buffer data */ + int size ; /* Size of the buffer pointed to by 'data' */ + int refcount ; /* Files that have this buffer open */ + int read_point ; /* Offset in circ. buffer of oldest data */ + int write_point ; /* Offset in circ. buffer of newest data */ + int offset ; /* Byte number of read_point in the stream */ + struct _gcsDEBUGFS_Node *next ; + + caddr_t temp; + int tempSize; +}; + +/* amount of data in the queue */ +#define gcmkNODE_QLEN(node) ( (node)->write_point >= (node)->read_point ? \ + (node)->write_point - (node)->read_point : \ + (node)->size - (node)->read_point + (node)->write_point) + +/* byte number of the last byte in the queue */ +#define gcmkNODE_FIRST_EMPTY_BYTE(node) ((node)->offset + gcmkNODE_QLEN(node)) + +/*Synchronization primitives*/ +#define gcmkNODE_READQ(node) (&((node)->read_q)) +#define gcmkNODE_WRITEQ(node) (&((node)->write_q)) +#define gcmkNODE_SEM(node) (&((node)->sem)) + +/*Utilities*/ +#define gcmkMIN(x, y) ((x) < (y) ? (x) : y) + +/*Debug File System Struct*/ +typedef struct _gcsDEBUGFS_ +{ + gcsDEBUGFS_Node* linkedlist ; + gcsDEBUGFS_Node* currentNode ; + int isInited ; +} gcsDEBUGFS_ ; + +/*debug file system*/ +static gcsDEBUGFS_ gc_dbgfs ; + +static int gc_debugfs_open(struct inode *inode, struct file *file) +{ + gcsINFO_NODE *node = inode->i_private; + + return single_open(file, node->info->show, node); +} + +static ssize_t +gc_debugfs_write( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos + ) +{ + struct seq_file *s = file->private_data; + gcsINFO_NODE *node = s->private; + gcsINFO *info = node->info; + + if (info->write) + { + info->write(buf, count, node); + } + + return count; +} + +static const struct file_operations gc_debugfs_operations = { + .owner = THIS_MODULE, + .open = gc_debugfs_open, + .write = gc_debugfs_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +gceSTATUS +gckDEBUGFS_DIR_Init( + IN gckDEBUGFS_DIR Dir, + IN struct dentry *root, + IN gctCONST_STRING Name + ) +{ + Dir->root = debugfs_create_dir(Name, root); + + if (!Dir->root) + { + return gcvSTATUS_NOT_SUPPORTED; + } + + INIT_LIST_HEAD(&Dir->nodeList); + + return gcvSTATUS_OK; +} + +gceSTATUS +gckDEBUGFS_DIR_CreateFiles( + IN gckDEBUGFS_DIR Dir, + IN gcsINFO * List, + IN int count, + IN gctPOINTER Data + ) +{ + int i; + gcsINFO_NODE * node; + gceSTATUS status; + + for (i = 0; i < count; i++) + { + umode_t mode = 0; + + /* Create a node. */ + node = (gcsINFO_NODE *)kzalloc(sizeof(gcsINFO_NODE), GFP_KERNEL); + + node->info = &List[i]; + node->device = Data; + + mode |= List[i].show ? S_IRUGO : 0; + mode |= List[i].write ? S_IWUSR : 0; + + node->entry = debugfs_create_file( + List[i].name, mode, Dir->root, node, &gc_debugfs_operations); + + if (!node->entry) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + list_add(&(node->head), &(Dir->nodeList)); + } + + return gcvSTATUS_OK; + +OnError: + gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(Dir, List, count)); + return status; +} + +gceSTATUS +gckDEBUGFS_DIR_RemoveFiles( + IN gckDEBUGFS_DIR Dir, + IN gcsINFO * List, + IN int count + ) +{ + int i; + gcsINFO_NODE * node; + gcsINFO_NODE * temp; + + for (i = 0; i < count; i++) + { + list_for_each_entry_safe(node, temp, &Dir->nodeList, head) + { + if (node->info == &List[i]) + { + debugfs_remove(node->entry); + list_del(&node->head); + kfree(node); + } + } + } + + return gcvSTATUS_OK; +} + +void +gckDEBUGFS_DIR_Deinit( + IN gckDEBUGFS_DIR Dir + ) +{ + if (Dir->root != NULL) + { + debugfs_remove(Dir->root); + Dir->root = NULL; + } +} + +/******************************************************************************* + ** + ** READ & WRITE FUNCTIONS (START) + ** + *******************************************************************************/ + +/******************************************************************************* + ** + ** _ReadFromNode + ** + ** 1) reading bytes out of a circular buffer with wraparound. + ** 2)returns caddr_t, pointer to data read, which the caller must free. + ** 3) length is (a pointer to) the number of bytes to be read, which will be set by this function to + ** be the number of bytes actually returned + ** + *******************************************************************************/ +static caddr_t +_ReadFromNode ( + gcsDEBUGFS_Node* Node , + size_t *Length , + loff_t *Offset + ) +{ + caddr_t retval ; + int bytes_copied = 0 , n , start_point , remaining ; + + /* find the smaller of the total bytes we have available and what + * the user is asking for */ + *Length = gcmkMIN ( *Length , gcmkNODE_QLEN(Node) ) ; + + remaining = * Length ; + + /* Get start point. */ + start_point = Node->read_point; + + /* allocate memory to return */ + if (remaining > Node->tempSize) + { + kfree(Node->temp); + + if ( ( retval = kmalloc ( sizeof (char ) * remaining , GFP_ATOMIC ) ) == NULL ) + return NULL; + + Node->temp = retval; + Node->tempSize = remaining; + } + else + { + retval = Node->temp; + } + + /* copy the (possibly noncontiguous) data to our buffer */ + while ( remaining ) + { + n = gcmkMIN ( remaining , Node->size - start_point ) ; + memcpy ( retval + bytes_copied , Node->data + start_point , n ) ; + bytes_copied += n ; + remaining -= n ; + start_point = ( start_point + n ) % Node->size ; + } + + /* advance user's file pointer */ + Node->read_point = (Node->read_point + * Length) % Node->size ; + + return retval ; +} + +/******************************************************************************* + ** + ** _WriteToNode + ** + ** 1) writes to a circular buffer with wraparound. + ** 2)in case of an overflow, it overwrites the oldest unread data. + ** + *********************************************************************************/ +static void +_WriteToNode ( + gcsDEBUGFS_Node* Node , + caddr_t Buf , + int Length + ) +{ + int bytes_copied = 0 ; + int overflow = 0 ; + int n ; + + if ( Length + gcmkNODE_QLEN ( Node ) >= ( Node->size - 1 ) ) + { + overflow = 1 ; + } + + while ( Length ) + { + /* how many contiguous bytes are available from the write point to + * the end of the circular buffer? */ + n = gcmkMIN ( Length , Node->size - Node->write_point ) ; + memcpy ( Node->data + Node->write_point , Buf + bytes_copied , n ) ; + bytes_copied += n ; + Length -= n ; + Node->write_point = ( Node->write_point + n ) % Node->size ; + } + + /* if there is an overflow, reset the read point to read whatever is + * the oldest data that we have, that has not yet been + * overwritten. */ + if ( overflow ) + { + Node->read_point = ( Node->write_point + 1 ) % Node->size ; + } +} + +/******************************************************************************* + ** + ** PRINTING UTILITY (START) + ** + *******************************************************************************/ + +/******************************************************************************* + ** + ** _GetArgumentSize + ** + ** + *******************************************************************************/ +static gctINT +_GetArgumentSize ( + IN gctCONST_STRING Message + ) +{ + gctINT i , count ; + + for ( i = 0 , count = 0 ; Message[i] ; i += 1 ) + { + if ( Message[i] == '%' ) + { + count += 1 ; + } + } + return count * sizeof (unsigned int ) ; +} + +/******************************************************************************* + ** + ** _AppendString + ** + ** + *******************************************************************************/ +static ssize_t +_AppendString ( + IN gcsDEBUGFS_Node* Node, + IN gctCONST_STRING String, + IN int Length + ) +{ + int n; + unsigned long flags; + + /* if the message is longer than the buffer, just take the beginning + * of it, in hopes that the reader (if any) will have time to read + * before we wrap around and obliterate it */ + n = gcmkMIN ( Length , Node->size - 1 ); + + spin_lock_irqsave(&traceLock, flags); + + /* now copy it into the circular buffer and free our temp copy */ + _WriteToNode ( Node , (caddr_t)String , n ) ; + + spin_unlock_irqrestore(&traceLock, flags); + + return n ; +} + +/******************************************************************************* + ** + ** _DebugFSPrint + ** + ** + *******************************************************************************/ +static ssize_t +_DebugFSPrint ( + IN unsigned int ArgumentSize , + IN const char* Message , + IN gctDBGARGS * Arguments + + ) +{ + char buffer[MAX_LINE_SIZE] ; + int len ; + ssize_t res = 0; + + if ( gc_dbgfs.currentNode ) + { + len = vsnprintf ( buffer , sizeof (buffer ) , Message , *( va_list * ) Arguments ) ; + + buffer[len] = '\0' ; + /* Add end-of-line if missing. */ + if ( buffer[len - 1] != '\n' ) + { + buffer[len ++] = '\n' ; + buffer[len] = '\0' ; + } + + res = _AppendString ( gc_dbgfs.currentNode , buffer , len ) ; + wake_up_interruptible ( gcmkNODE_READQ ( gc_dbgfs.currentNode ) ) ; /* blocked in read*/ + } + + return res; +} + +/******************************************************************************* + ** + ** LINUX SYSTEM FUNCTIONS (START) + ** + *******************************************************************************/ +static int +_DebugFSOpen ( + struct inode* inode, + struct file* filp + ) +{ + filp->private_data = inode->i_private; + + return 0; +} + +/******************************************************************************* + ** + ** _DebugFSRead + ** + *******************************************************************************/ +static ssize_t +_DebugFSRead ( + struct file *file, + char __user * buffer, + size_t length, + loff_t * offset + ) +{ + int retval; + caddr_t data_to_return; + unsigned long flags; + gcsDEBUGFS_Node* node = file->private_data; + + if (node == NULL) + { + printk ( "debugfs_read: record not found\n" ); + return - EIO ; + } + + spin_lock_irqsave(&traceLock, flags); + + /* wait until there's data available (unless we do nonblocking reads) */ + while (!gcmkNODE_QLEN(node)) + { + spin_unlock_irqrestore(&traceLock, flags); + + if (file->f_flags & O_NONBLOCK) + { + return - EAGAIN ; + } + + if (wait_event_interruptible((*(gcmkNODE_READQ(node))) , (*offset < gcmkNODE_FIRST_EMPTY_BYTE(node)))) + { + return - ERESTARTSYS ; /* signal: tell the fs layer to handle it */ + } + + spin_lock_irqsave(&traceLock, flags); + } + + data_to_return = _ReadFromNode(node , &length , offset); + + spin_unlock_irqrestore(&traceLock, flags); + + if (data_to_return == NULL) + { + retval = 0; + goto unlock; + } + + if (copy_to_user(buffer, data_to_return, length) > 0) + { + retval = - EFAULT; + } + else + { + retval = length; + } +unlock: + + wake_up_interruptible(gcmkNODE_WRITEQ(node)); + return retval ; +} + +/******************************************************************************* + ** + **_DebugFSWrite + ** + *******************************************************************************/ +static ssize_t +_DebugFSWrite ( + struct file *file , + const char __user * buffer , + size_t length , + loff_t * offset + ) +{ + caddr_t message = NULL ; + int n ; + gcsDEBUGFS_Node* node = file->private_data; + + /* get the metadata about this log */ + if (node == NULL) + { + return - EIO ; + } + + if ( down_interruptible ( gcmkNODE_SEM ( node ) ) ) + { + return - ERESTARTSYS ; + } + + /* if the message is longer than the buffer, just take the beginning + * of it, in hopes that the reader (if any) will have time to read + * before we wrap around and obliterate it */ + n = gcmkMIN ( length , node->size - 1 ) ; + + /* make sure we have the memory for it */ + if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL ) + { + up ( gcmkNODE_SEM ( node ) ) ; + return - ENOMEM ; + } + + + /* copy into our temp buffer */ + if ( copy_from_user ( message , buffer , n ) > 0 ) + { + up ( gcmkNODE_SEM ( node ) ) ; + kfree ( message ) ; + return - EFAULT ; + } + + /* now copy it into the circular buffer and free our temp copy */ + _WriteToNode ( node , message , n ) ; + + kfree ( message ) ; + up ( gcmkNODE_SEM ( node ) ) ; + + /* wake up any readers that might be waiting for the data. we call + * schedule in the vague hope that a reader will run before the + * writer's next write, to avoid losing data. */ + wake_up_interruptible ( gcmkNODE_READQ ( node ) ) ; + + return n ; +} + +/******************************************************************************* + ** + ** File Operations Table + ** + *******************************************************************************/ +static const struct file_operations debugfs_operations = { + .owner = THIS_MODULE , + .open = _DebugFSOpen , + .read = _DebugFSRead , + .write = _DebugFSWrite , +} ; + +/******************************************************************************* + ** + ** INTERFACE FUNCTIONS (START) + ** + *******************************************************************************/ + +/******************************************************************************* + ** + ** gckDEBUGFS_IsEnabled + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + *******************************************************************************/ + + +gctINT +gckDEBUGFS_IsEnabled ( void ) +{ + return gc_dbgfs.isInited ; +} +/******************************************************************************* + ** + ** gckDEBUGFS_Initialize + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + *******************************************************************************/ + +gctINT +gckDEBUGFS_Initialize ( void ) +{ + if ( ! gc_dbgfs.isInited ) + { + gc_dbgfs.linkedlist = gcvNULL ; + gc_dbgfs.currentNode = gcvNULL ; + gc_dbgfs.isInited = 1 ; + } + return gc_dbgfs.isInited ; +} +/******************************************************************************* + ** + ** gckDEBUGFS_Terminate + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + *******************************************************************************/ + +gctINT +gckDEBUGFS_Terminate ( void ) +{ + gcsDEBUGFS_Node * next = gcvNULL ; + gcsDEBUGFS_Node * temp = gcvNULL ; + if ( gc_dbgfs.isInited ) + { + temp = gc_dbgfs.linkedlist ; + while ( temp != gcvNULL ) + { + next = temp->next ; + gckDEBUGFS_FreeNode ( temp ) ; + kfree ( temp ) ; + temp = next ; + } + gc_dbgfs.isInited = 0 ; + } + return 0 ; +} + + +/******************************************************************************* + ** + ** gckDEBUGFS_CreateNode + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + ** gckDEBUGFS_FreeNode * Device + ** Pointer to a variable receiving the gcsDEBUGFS_Node object pointer on + ** success. + *********************************************************************************/ + +gctINT +gckDEBUGFS_CreateNode ( + IN gctPOINTER Device, + IN gctINT SizeInKB , + IN struct dentry * Root , + IN gctCONST_STRING NodeName , + OUT gcsDEBUGFS_Node **Node + ) +{ + gcsDEBUGFS_Node*node ; + /* allocate space for our metadata and initialize it */ + if ( ( node = kmalloc ( sizeof (gcsDEBUGFS_Node ) , GFP_KERNEL ) ) == NULL ) + goto struct_malloc_failed ; + + /*Zero it out*/ + memset ( node , 0 , sizeof (gcsDEBUGFS_Node ) ) ; + + /*Init the sync primitives*/ +#if defined(DECLARE_WAIT_QUEUE_HEAD) + init_waitqueue_head ( gcmkNODE_READQ ( node ) ) ; +#else + init_waitqueue ( gcmkNODE_READQ ( node ) ) ; +#endif + +#if defined(DECLARE_WAIT_QUEUE_HEAD) + init_waitqueue_head ( gcmkNODE_WRITEQ ( node ) ) ; +#else + init_waitqueue ( gcmkNODE_WRITEQ ( node ) ) ; +#endif + sema_init ( gcmkNODE_SEM ( node ) , 1 ) ; + /*End the sync primitives*/ + + /*creating the debug file system*/ + node->parent = Root; + + if (SizeInKB) + { + /* figure out how much of a buffer this should be and allocate the buffer */ + node->size = 1024 * SizeInKB ; + if ( ( node->data = ( char * ) vmalloc ( sizeof (char ) * node->size ) ) == NULL ) + goto data_malloc_failed ; + + node->tempSize = 0; + node->temp = NULL; + + /*creating the file*/ + node->filen = debugfs_create_file(NodeName, S_IRUGO|S_IWUSR, node->parent, node, + &debugfs_operations); + } + + /* add it to our linked list */ + node->next = gc_dbgfs.linkedlist ; + gc_dbgfs.linkedlist = node ; + + + /* pass the struct back */ + *Node = node ; + return 0 ; + + +data_malloc_failed: + kfree ( node ) ; +struct_malloc_failed: + return - ENOMEM ; +} + +/******************************************************************************* + ** + ** gckDEBUGFS_FreeNode + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + *******************************************************************************/ +void +gckDEBUGFS_FreeNode ( + IN gcsDEBUGFS_Node * Node + ) +{ + + gcsDEBUGFS_Node **ptr ; + + if ( Node == NULL ) + { + printk ( "null passed to free_vinfo\n" ) ; + return ; + } + + down ( gcmkNODE_SEM ( Node ) ) ; + /*free data*/ + vfree ( Node->data ) ; + + kfree(Node->temp); + + /*Close Debug fs*/ + if ( Node->filen ) + { + debugfs_remove ( Node->filen ) ; + } + + /* now delete the node from the linked list */ + ptr = & ( gc_dbgfs.linkedlist ) ; + while ( *ptr != Node ) + { + if ( ! *ptr ) + { + printk ( "corrupt info list!\n" ) ; + break ; + } + else + ptr = & ( ( **ptr ).next ) ; + } + *ptr = Node->next ; + up ( gcmkNODE_SEM ( Node ) ) ; +} + +/******************************************************************************* + ** + ** gckDEBUGFS_SetCurrentNode + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + *******************************************************************************/ +void +gckDEBUGFS_SetCurrentNode ( + IN gcsDEBUGFS_Node * Node + ) +{ + gc_dbgfs.currentNode = Node ; +} + +/******************************************************************************* + ** + ** gckDEBUGFS_GetCurrentNode + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + *******************************************************************************/ +void +gckDEBUGFS_GetCurrentNode ( + OUT gcsDEBUGFS_Node ** Node + ) +{ + *Node = gc_dbgfs.currentNode ; +} + +/******************************************************************************* + ** + ** gckDEBUGFS_Print + ** + ** + ** INPUT: + ** + ** OUTPUT: + ** + *******************************************************************************/ +ssize_t +gckDEBUGFS_Print ( + IN gctCONST_STRING Message , + ... + ) +{ + ssize_t _debugfs_res = 0; + gcmkDEBUGFS_PRINT ( _GetArgumentSize ( Message ) , Message ) ; + return _debugfs_res; +} diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h new file mode 100644 index 000000000000..14a3324442a9 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h @@ -0,0 +1,170 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include + +#ifndef __gc_hal_kernel_debugfs_h_ +#define __gc_hal_kernel_debugfs_h_ + + #define MAX_LINE_SIZE 768 /* Max bytes for a line of debug info */ + + + typedef struct _gcsDEBUGFS_Node gcsDEBUGFS_Node; + +typedef struct _gcsDEBUGFS_DIR *gckDEBUGFS_DIR; +typedef struct _gcsDEBUGFS_DIR +{ + struct dentry * root; + struct list_head nodeList; +} +gcsDEBUGFS_DIR; + +typedef struct _gcsINFO +{ + const char * name; + int (*show)(struct seq_file*, void*); + int (*write)(const char __user *buf, size_t count, void*); +} +gcsINFO; + +typedef struct _gcsINFO_NODE +{ + gcsINFO * info; + gctPOINTER device; + struct dentry * entry; + struct list_head head; +} +gcsINFO_NODE; + +gceSTATUS +gckDEBUGFS_DIR_Init( + IN gckDEBUGFS_DIR Dir, + IN struct dentry *root, + IN gctCONST_STRING Name + ); + +gceSTATUS +gckDEBUGFS_DIR_CreateFiles( + IN gckDEBUGFS_DIR Dir, + IN gcsINFO * List, + IN int count, + IN gctPOINTER Data + ); + +gceSTATUS +gckDEBUGFS_DIR_RemoveFiles( + IN gckDEBUGFS_DIR Dir, + IN gcsINFO * List, + IN int count + ); + +void +gckDEBUGFS_DIR_Deinit( + IN gckDEBUGFS_DIR Dir + ); + +/******************************************************************************* + ** + ** System Related + ** + *******************************************************************************/ + +gctINT gckDEBUGFS_IsEnabled(void); + +gctINT gckDEBUGFS_Initialize(void); + +gctINT gckDEBUGFS_Terminate(void); + + +/******************************************************************************* + ** + ** Node Related + ** + *******************************************************************************/ + +gctINT +gckDEBUGFS_CreateNode( + IN gctPOINTER Device, + IN gctINT SizeInKB, + IN struct dentry * Root, + IN gctCONST_STRING NodeName, + OUT gcsDEBUGFS_Node **Node + ); + +void gckDEBUGFS_FreeNode( + IN gcsDEBUGFS_Node * Node + ); + + + +void gckDEBUGFS_SetCurrentNode( + IN gcsDEBUGFS_Node * Node + ); + + + +void gckDEBUGFS_GetCurrentNode( + OUT gcsDEBUGFS_Node ** Node + ); + + +ssize_t gckDEBUGFS_Print( + IN gctCONST_STRING Message, + ... + ); + +#endif + + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c new file mode 100644 index 000000000000..834f136c1042 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c @@ -0,0 +1,2340 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_allocator.h" +#include +#include +#include +#include + +#define _GC_OBJ_ZONE gcvZONE_DEVICE + +#define DEBUG_FILE "galcore_trace" +#define PARENT_FILE "gpu" + +#define gcdDEBUG_FS_WARN "Experimental debug entry, may be removed in future release, do NOT rely on it!\n" + +static gckGALDEVICE galDevice; + +extern gcTA globalTA[16]; + +/******************************************************************************\ +******************************** Debugfs Support ******************************* +\******************************************************************************/ + +/******************************************************************************\ +***************************** DEBUG SHOW FUNCTIONS ***************************** +\******************************************************************************/ + +int gc_info_show(struct seq_file* m, void* data) +{ + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + int i = 0; + gceCHIPMODEL chipModel; + gctUINT32 chipRevision; + gctUINT32 productID = 0; + gctUINT32 ecoID = 0; + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (device->irqLines[i] != -1) + { + { + chipModel = device->kernels[i]->hardware->identity.chipModel; + chipRevision = device->kernels[i]->hardware->identity.chipRevision; + productID = device->kernels[i]->hardware->identity.productID; + ecoID = device->kernels[i]->hardware->identity.ecoID; + } + + seq_printf(m, "gpu : %d\n", i); + seq_printf(m, "model : %4x\n", chipModel); + seq_printf(m, "revision : %4x\n", chipRevision); + seq_printf(m, "product : %4x\n", productID); + seq_printf(m, "eco : %4x\n", ecoID); + seq_printf(m, "\n"); + } + } + + return 0; +} + +int gc_clients_show(struct seq_file* m, void* data) +{ + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + + gckKERNEL kernel = _GetValidKernel(device); + + gcsDATABASE_PTR database; + gctINT i, pid; + char name[24]; + + seq_printf(m, "%-8s%s\n", "PID", "NAME"); + seq_printf(m, "------------------------\n"); + + /* Acquire the database mutex. */ + gcmkVERIFY_OK( + gckOS_AcquireMutex(kernel->os, kernel->db->dbMutex, gcvINFINITE)); + + /* Walk the databases. */ + for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i) + { + for (database = kernel->db->db[i]; + database != gcvNULL; + database = database->next) + { + pid = database->processID; + + gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name)); + + seq_printf(m, "%-8d%s\n", pid, name); + } + } + + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(kernel->os, kernel->db->dbMutex)); + + /* Success. */ + return 0; +} + +static void +_CounterAdd( + gcsDATABASE_COUNTERS * Dest, + gcsDATABASE_COUNTERS * Src + ) +{ + Dest->bytes += Src->bytes; + Dest->maxBytes += Src->maxBytes; + Dest->totalBytes += Src->totalBytes; +} + +static void +_CounterPrint( + gcsDATABASE_COUNTERS * Counter, + gctCONST_STRING Name, + struct seq_file* m + ) +{ + seq_printf(m, " %s:\n", Name); + seq_printf(m, " Used : %10llu B\n", Counter->bytes); +} + +int gc_meminfo_show(struct seq_file* m, void* data) +{ + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + gckKERNEL kernel = _GetValidKernel(device); + gckVIDMEM memory; + gceSTATUS status; + gcsDATABASE_PTR database; + gctUINT32 i; + + gctUINT32 free = 0, used = 0, total = 0, minFree = 0, maxUsed = 0; + + gcsDATABASE_COUNTERS contiguousCounter = {0, 0, 0}; + gcsDATABASE_COUNTERS virtualCounter = {0, 0, 0}; + gcsDATABASE_COUNTERS nonPagedCounter = {0, 0, 0}; + + status = gckKERNEL_GetVideoMemoryPool(kernel, gcvPOOL_SYSTEM, &memory); + + if (gcmIS_SUCCESS(status)) + { + gcmkVERIFY_OK( + gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE)); + + free = memory->freeBytes; + minFree = memory->minFreeBytes; + used = memory->bytes - memory->freeBytes; + maxUsed = memory->bytes - memory->minFreeBytes; + total = memory->bytes; + + gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex)); + } + + seq_printf(m, "VIDEO MEMORY:\n"); + seq_printf(m, " gcvPOOL_SYSTEM:\n"); + seq_printf(m, " Free : %10u B\n", free); + seq_printf(m, " Used : %10u B\n", used); + seq_printf(m, " MinFree : %10u B\n", minFree); + seq_printf(m, " MaxUsed : %10u B\n", maxUsed); + seq_printf(m, " Total : %10u B\n", total); + + /* Acquire the database mutex. */ + gcmkVERIFY_OK( + gckOS_AcquireMutex(kernel->os, kernel->db->dbMutex, gcvINFINITE)); + + /* Walk the databases. */ + for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i) + { + for (database = kernel->db->db[i]; + database != gcvNULL; + database = database->next) + { + gcsDATABASE_COUNTERS * counter = &database->vidMemPool[gcvPOOL_CONTIGUOUS]; + _CounterAdd(&contiguousCounter, counter); + + counter = &database->vidMemPool[gcvPOOL_VIRTUAL]; + _CounterAdd(&virtualCounter, counter); + + + counter = &database->nonPaged; + _CounterAdd(&nonPagedCounter, counter); + } + } + + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(kernel->os, kernel->db->dbMutex)); + + _CounterPrint(&contiguousCounter, "gcvPOOL_CONTIGUOUS", m); + _CounterPrint(&virtualCounter, "gcvPOOL_VIRTUAL", m); + + seq_printf(m, "\n"); + + seq_printf(m, "NON PAGED MEMORY:\n"); + seq_printf(m, " Used : %10llu B\n", nonPagedCounter.bytes); + + return 0; +} + +static int +_ShowRecord( + IN struct seq_file *File, + IN gcsDATABASE_RECORD_PTR Record + ) +{ + static const char * recordTypes[gcvDB_NUM_TYPES] = { + "Unknown", + "VideoMemory", + "CommandBuffer", + "NonPaged", + "Contiguous", + "Signal", + "VidMemLock", + "Context", + "Idel", + "MapMemory", + "MapUserMemory", + "ShBuf", + }; + + seq_printf(File, "%-14s %3d %16p %16zu %16zu\n", + recordTypes[Record->type], + Record->kernel->core, + Record->data, + (size_t) Record->physical, + Record->bytes + ); + + return 0; +} + +static int +_ShowRecords( + IN struct seq_file *File, + IN gcsDATABASE_PTR Database + ) +{ + gctUINT i; + + seq_printf(File, "Records:\n"); + + seq_printf(File, "%14s %3s %16s %16s %16s\n", + "Type", "GPU", "Data/Node", "Physical/Node", "Bytes"); + + for (i = 0; i < gcmCOUNTOF(Database->list); i++) + { + gcsDATABASE_RECORD_PTR record = Database->list[i]; + + while (record != NULL) + { + _ShowRecord(File, record); + record = record->next; + } + } + + return 0; +} + +static void +_ShowCounters( + struct seq_file *File, + gcsDATABASE_PTR Database + ) +{ + gctUINT i = 0; + + static const char * surfaceTypes[gcvSURF_NUM_TYPES] = { + "Unknown", + "Index", + "Vertex", + "Texture", + "RenderTarget", + "Depth", + "Bitmap", + "TileStatus", + "Image", + "Mask", + "Scissor", + "HZ", + "ICache", + "TxDesc", + "Fence", + "TFBHeader", + }; + + static const char * poolTypes[gcvPOOL_NUMBER_OF_POOLS] = { + "Unknown", + "Default", + "Local", + "Internal", + "External", + "Unified", + "System", + "Virtual", + "User", + "Contiguous", + }; + + static const char * otherCounterNames[] = { + "AllocNonPaged", + "AllocContiguous", + "MapUserMemory", + "MapMemory", + }; + + gcsDATABASE_COUNTERS * otherCounters[] = { + &Database->nonPaged, + &Database->contiguous, + &Database->mapUserMemory, + &Database->mapMemory, + }; + + seq_printf(File, "%-16s %16s %16s %16s\n", "", "Current", "Maximum", "Total"); + + /* Print surface type counters. */ + seq_printf(File, "%-16s %16lld %16lld %16lld\n", + "All-Types", + Database->vidMem.bytes, + Database->vidMem.maxBytes, + Database->vidMem.totalBytes); + + for (i = 1; i < gcvSURF_NUM_TYPES; i++) + { + seq_printf(File, "%-16s %16lld %16lld %16lld\n", + surfaceTypes[i], + Database->vidMemType[i].bytes, + Database->vidMemType[i].maxBytes, + Database->vidMemType[i].totalBytes); + } + seq_puts(File, "\n"); + + /* Print surface pool counters. */ + seq_printf(File, "%-16s %16lld %16lld %16lld\n", + "All-Pools", + Database->vidMem.bytes, + Database->vidMem.maxBytes, + Database->vidMem.totalBytes); + + for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++) + { + seq_printf(File, "%-16s %16lld %16lld %16lld\n", + poolTypes[i], + Database->vidMemPool[i].bytes, + Database->vidMemPool[i].maxBytes, + Database->vidMemPool[i].totalBytes); + } + seq_puts(File, "\n"); + + /* Print other counters. */ + for (i = 0; i < gcmCOUNTOF(otherCounterNames); i++) + { + seq_printf(File, "%-16s %16lld %16lld %16lld\n", + otherCounterNames[i], + otherCounters[i]->bytes, + otherCounters[i]->maxBytes, + otherCounters[i]->totalBytes); + } + seq_puts(File, "\n"); +} + +static void +_ShowProcess( + IN struct seq_file *File, + IN gcsDATABASE_PTR Database + ) +{ + gctINT pid; + char name[24]; + + /* Process ID and name */ + pid = Database->processID; + gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name)); + + seq_printf(File, "--------------------------------------------------------------------------------\n"); + seq_printf(File, "Process: %-8d %s\n", pid, name); + + /* Detailed records */ + _ShowRecords(File, Database); + + seq_printf(File, "Counters:\n"); + + _ShowCounters(File, Database); +} + +static void +_ShowProcesses( + IN struct seq_file * File, + IN gckKERNEL Kernel + ) +{ + gcsDATABASE_PTR database; + gctINT i; + static gctUINT64 idleTime = 0; + + /* Acquire the database mutex. */ + gcmkVERIFY_OK( + gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); + + if (Kernel->db->idleTime) + { + /* Record idle time if DB upated. */ + idleTime = Kernel->db->idleTime; + Kernel->db->idleTime = 0; + } + + /* Idle time since last call */ + seq_printf(File, "GPU Idle: %llu ns\n", idleTime); + + /* Walk the databases. */ + for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i) + { + for (database = Kernel->db->db[i]; + database != gcvNULL; + database = database->next) + { + _ShowProcess(File, database); + } + } + + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); +} + +static int +gc_db_show(struct seq_file *m, void *data) +{ + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + gckKERNEL kernel = _GetValidKernel(device); + _ShowProcesses(m, kernel); + return 0 ; +} + +static int +gc_version_show(struct seq_file *m, void *data) +{ + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + gcsPLATFORM * platform = device->platform; + + seq_printf(m, "%s built at %s\n", gcvVERSION_STRING, HOST); + + if (platform->name) + { + seq_printf(m, "Platform path: %s\n", platform->name); + } + else + { + seq_printf(m, "Code path: %s\n", __FILE__); + } + + return 0 ; +} + +/******************************************************************************* +** +** Show PM state timer. +** +** Entry is called as 'idle' for compatible reason, it shows more information +** than idle actually. +** +** Start: Start time of this counting period. +** End: End time of this counting peroid. +** On: Time GPU stays in gcvPOWER_0N. +** Off: Time GPU stays in gcvPOWER_0FF. +** Idle: Time GPU stays in gcvPOWER_IDLE. +** Suspend: Time GPU stays in gcvPOWER_SUSPEND. +*/ +static int +gc_idle_show(struct seq_file *m, void *data) +{ + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + gckKERNEL kernel = _GetValidKernel(device); + + gctUINT64 start; + gctUINT64 end; + gctUINT64 on; + gctUINT64 off; + gctUINT64 idle; + gctUINT64 suspend; + + gckHARDWARE_QueryStateTimer(kernel->hardware, &start, &end, &on, &off, &idle, &suspend); + + /* Idle time since last call */ + seq_printf(m, "Start: %llu ns\n", start); + seq_printf(m, "End: %llu ns\n", end); + seq_printf(m, "On: %llu ns\n", on); + seq_printf(m, "Off: %llu ns\n", off); + seq_printf(m, "Idle: %llu ns\n", idle); + seq_printf(m, "Suspend: %llu ns\n", suspend); + + return 0 ; +} + +extern void +_DumpState( + IN gckKERNEL Kernel + ); + +/******************************************************************************* +** +** Show PM state timer. +** +** Entry is called as 'idle' for compatible reason, it shows more information +** than idle actually. +** +** Start: Start time of this counting period. +** End: End time of this counting peroid. +** On: Time GPU stays in gcvPOWER_0N. +** Off: Time GPU stays in gcvPOWER_0FF. +** Idle: Time GPU stays in gcvPOWER_IDLE. +** Suspend: Time GPU stays in gcvPOWER_SUSPEND. +*/ + +static int dumpCore = 0; + +static int +gc_dump_trigger_show(struct seq_file *m, void *data) +{ +#if gcdENABLE_3D + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + gckKERNEL kernel = gcvNULL; + + if (dumpCore >= gcvCORE_MAJOR && dumpCore < gcvCORE_COUNT) + { + kernel = device->kernels[dumpCore]; + } +#endif + + seq_printf(m, gcdDEBUG_FS_WARN); + +#if gcdENABLE_3D + seq_printf(m, "Get dump from /proc/kmsg or /sys/kernel/debug/gc/galcore_trace\n"); + + if (kernel && kernel->hardware->options.powerManagement == gcvFALSE) + { + _DumpState(kernel); + } +#endif + + return 0; +} + +static int dumpProcess = 0; + + +static int gc_vidmem_show(struct seq_file *m, void *unused) +{ + gceSTATUS status; + gcsDATABASE_PTR database; + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + char name[64]; + int i; + + gckKERNEL kernel = _GetValidKernel(device); + + if (dumpProcess == 0) + { + /* Acquire the database mutex. */ + gcmkVERIFY_OK( + gckOS_AcquireMutex(kernel->os, kernel->db->dbMutex, gcvINFINITE)); + + for (i = 0; i < gcmCOUNTOF(kernel->db->db); i++) + { + for (database = kernel->db->db[i]; + database != gcvNULL; + database = database->next) + { + gckOS_GetProcessNameByPid(database->processID, gcmSIZEOF(name), name); + seq_printf(m, "VidMem Usage (Process %d: %s):\n", database->processID, name); + _ShowCounters(m, database); + seq_puts(m, "\n"); + } + } + + /* Release the database mutex. */ + gcmkVERIFY_OK(gckOS_ReleaseMutex(kernel->os, kernel->db->dbMutex)); + } + else + { + /* Find the database. */ + status = gckKERNEL_FindDatabase(kernel, dumpProcess, gcvFALSE, &database); + + if (gcmIS_ERROR(status)) + { + seq_printf(m, "ERROR: process %d not found\n", dumpProcess); + return 0; + } + + gckOS_GetProcessNameByPid(dumpProcess, gcmSIZEOF(name), name); + seq_printf(m, "VidMem Usage (Process %d: %s):\n", dumpProcess, name); + _ShowCounters(m, database); + } + + return 0; +} + +static inline int strtoint_from_user(const char __user *s, + size_t count, int *res) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + int ret = kstrtoint_from_user(s, count, 10, res); + + return ret < 0 ? ret : count; +#else + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + size_t len = min(count, sizeof(buf) - 1); + + if (copy_from_user(buf, s, len)) + return -EFAULT; + buf[len] = '\0'; + + *res = (int) simple_strtol(buf, NULL, 0); + + return count; +#endif +} + +static int gc_vidmem_write(const char __user *buf, size_t count, void* data) +{ + return strtoint_from_user(buf, count, &dumpProcess); +} + +static int gc_dump_trigger_write(const char __user *buf, size_t count, void* data) +{ + return strtoint_from_user(buf, count, &dumpCore); +} + +static int gc_clk_show(struct seq_file* m, void* data) +{ + gcsINFO_NODE *node = m->private; + gckGALDEVICE device = node->device; + gctUINT i; + + for (i = gcvCORE_MAJOR; i < gcvCORE_COUNT; i++) + { + if (device->kernels[i]) + { + gckHARDWARE hardware = device->kernels[i]->hardware; + + + if (hardware->mcClk) + { + seq_printf(m, "gpu%d mc clock: %d HZ.\n", i, hardware->mcClk); + } + + if (hardware->shClk) + { + seq_printf(m, "gpu%d sh clock: %d HZ.\n", i, hardware->shClk); + } + } + } + + return 0; +} + +static gcsINFO InfoList[] = +{ + {"info", gc_info_show}, + {"clients", gc_clients_show}, + {"meminfo", gc_meminfo_show}, + {"idle", gc_idle_show}, + {"database", gc_db_show}, + {"version", gc_version_show}, + {"vidmem", gc_vidmem_show, gc_vidmem_write}, + {"dump_trigger", gc_dump_trigger_show, gc_dump_trigger_write}, + {"clk", gc_clk_show}, +}; + +static gceSTATUS +_DebugfsInit( + IN gckGALDEVICE Device + ) +{ + gceSTATUS status; + + gckDEBUGFS_DIR dir = &Device->debugfsDir; + + gcmkONERROR(gckDEBUGFS_DIR_Init(dir, gcvNULL, "gc")); + + gcmkONERROR(gckDEBUGFS_DIR_CreateFiles(dir, InfoList, gcmCOUNTOF(InfoList), Device)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +static void +_DebugfsCleanup( + IN gckGALDEVICE Device + ) +{ + gckDEBUGFS_DIR dir = &Device->debugfsDir; + + if (Device->debugfsDir.root) + { + gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(dir, InfoList, gcmCOUNTOF(InfoList))); + + gckDEBUGFS_DIR_Deinit(dir); + } +} + + +/******************************************************************************\ +*************************** Memory Allocation Wrappers ************************* +\******************************************************************************/ + +static gceSTATUS +_AllocateMemory( + IN gckGALDEVICE Device, + IN gctSIZE_T Bytes, + OUT gctPOINTER *Logical, + OUT gctPHYS_ADDR *Physical, + OUT gctUINT32 *PhysAddr + ) +{ + gceSTATUS status; + gctPHYS_ADDR_T physAddr; + + gcmkHEADER_ARG("Device=0x%x Bytes=%lu", Device, Bytes); + + gcmkVERIFY_ARGUMENT(Device != NULL); + gcmkVERIFY_ARGUMENT(Logical != NULL); + gcmkVERIFY_ARGUMENT(Physical != NULL); + gcmkVERIFY_ARGUMENT(PhysAddr != NULL); + + gcmkONERROR(gckOS_AllocateContiguous( + Device->os, gcvFALSE, &Bytes, Physical, Logical + )); + + gcmkONERROR(gckOS_GetPhysicalAddress( + Device->os, *Logical, &physAddr + )); + + gcmkSAFECASTPHYSADDRT(*PhysAddr, physAddr); + + /* Success. */ + gcmkFOOTER_ARG( + "*Logical=0x%x *Physical=0x%x *PhysAddr=0x%08x", + *Logical, *Physical, *PhysAddr + ); + + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_FreeMemory( + IN gckGALDEVICE Device, + IN gctPOINTER Logical, + IN gctPHYS_ADDR Physical) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Device=0x%x Logical=0x%x Physical=0x%x", + Device, Logical, Physical); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + status = gckOS_FreeContiguous( + Device->os, Physical, Logical, + ((PLINUX_MDL) Physical)->numPages * PAGE_SIZE + ); + + gcmkFOOTER(); + return status; +} + +static gceSTATUS +_SetupVidMem( + IN gckGALDEVICE Device, + IN gctUINT32 ContiguousBase, + IN gctSIZE_T ContiguousSize, + IN gctSIZE_T BankSize, + IN gcsDEVICE_CONSTRUCT_ARGS * Args + ) +{ + gceSTATUS status; + gctUINT32 physAddr = ~0U; + gckGALDEVICE device = Device; + + /* set up the contiguous memory */ + device->contiguousBase = ContiguousBase; + device->contiguousSize = ContiguousSize; + + if (ContiguousSize > 0) + { + if (ContiguousBase == 0) + { + while (device->contiguousSize > 0) + { + /* Allocate contiguous memory. */ + status = _AllocateMemory( + device, + device->contiguousSize, + &device->contiguousLogical, + &device->contiguousPhysical, + &physAddr + ); + + if (gcmIS_SUCCESS(status)) + { + status = gckVIDMEM_Construct( + device->os, + physAddr | device->systemMemoryBaseAddress, + device->contiguousSize, + 64, + BankSize, + &device->contiguousVidMem + ); + + if (gcmIS_SUCCESS(status)) + { + gckALLOCATOR allocator = ((PLINUX_MDL)device->contiguousPhysical)->allocator; + device->contiguousVidMem->capability = allocator->capability | gcvALLOC_FLAG_MEMLIMIT; + device->contiguousVidMem->physical = device->contiguousPhysical; + device->contiguousBase = physAddr; + break; + } + + gcmkONERROR(_FreeMemory( + device, + device->contiguousLogical, + device->contiguousPhysical + )); + + device->contiguousLogical = gcvNULL; + device->contiguousPhysical = gcvNULL; + } + + if (device->contiguousSize <= (4 << 20)) + { + device->contiguousSize = 0; + } + else + { + device->contiguousSize -= (4 << 20); + } + } + } + else + { + /* Create the contiguous memory heap. */ + status = gckVIDMEM_Construct( + device->os, + ContiguousBase | device->systemMemoryBaseAddress, + ContiguousSize, + 64, BankSize, + &device->contiguousVidMem + ); + + if (gcmIS_ERROR(status)) + { + /* Error, disable contiguous memory pool. */ + device->contiguousVidMem = gcvNULL; + device->contiguousSize = 0; + } + else + { + gckALLOCATOR allocator; + + gcmkONERROR(gckOS_RequestReservedMemory( + device->os, ContiguousBase, ContiguousSize, + "galcore contiguous memory", + Args->contiguousRequested, + &device->contiguousPhysical + )); + + allocator = ((PLINUX_MDL)device->contiguousPhysical)->allocator; + device->contiguousVidMem->capability = allocator->capability | gcvALLOC_FLAG_MEMLIMIT; + device->contiguousVidMem->physical = device->contiguousPhysical; + device->requestedContiguousBase = ContiguousBase; + device->requestedContiguousSize = ContiguousSize; + + device->contiguousPhysicalName = 0; + device->contiguousSize = ContiguousSize; + } + } + } + + return gcvSTATUS_OK; +OnError: + return status; +} + +void +_SetupRegisterPhysical( + IN gckGALDEVICE Device, + IN gcsDEVICE_CONSTRUCT_ARGS * Args + ) +{ + gctINT *irqs = Args->irqs; + gctUINT *registerBases = Args->registerBases; + gctUINT *registerSizes = Args->registerSizes; + + gctINT i = 0; + + for (i = 0; i < gcvCORE_COUNT; i++) + { + if (irqs[i] != -1) + { + Device->requestedRegisterMemBases[i] = registerBases[i]; + Device->requestedRegisterMemSizes[i] = registerSizes[i]; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, _GC_OBJ_ZONE, + "Get register base %llx of core %d", + registerBases[i], i); + } + } +} + +/******************************************************************************\ +******************************* Interrupt Handler ****************************** +\******************************************************************************/ +static irqreturn_t isrRoutine(int irq, void *ctxt) +{ + gceSTATUS status; + gckGALDEVICE device; + gceCORE core = (gceCORE)gcmPTR2INT32(ctxt) - 1; + + device = galDevice; + + /* Call kernel interrupt notification. */ + status = gckKERNEL_Notify(device->kernels[core], gcvNOTIFY_INTERRUPT, gcvTRUE); + + if (gcmIS_SUCCESS(status)) + { + up(&device->semas[core]); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int threadRoutine(void *ctxt) +{ + gckGALDEVICE device = galDevice; + gceCORE core = (gceCORE) gcmPTR2INT32(ctxt); + gctUINT i; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER, + "Starting isr Thread with extension=%p", + device); + + if (core != gcvCORE_VG) + { + /* Make kernel update page table of this thread to include entry related to command buffer.*/ + for (i = 0; i < gcdCOMMAND_QUEUES; i++) + { + gctUINT32 data = *(gctUINT32_PTR)device->kernels[core]->command->queues[i].logical; + + data = 0; + } + } + + for (;;) + { + static int down; + + down = down_interruptible(&device->semas[core]); + if (down); /*To make gcc 4.6 happye*/ + + if (device->killThread == gcvTRUE) + { + /* The daemon exits. */ + while (!kthread_should_stop()) + { + gckOS_Delay(device->os, 1); + } + + return 0; + } + + gckKERNEL_Notify(device->kernels[core], + gcvNOTIFY_INTERRUPT, + gcvFALSE); + } +} + +static irqreturn_t isrRoutineVG(int irq, void *ctxt) +{ + return IRQ_NONE; +} + +/******************************************************************************\ +******************************* gckGALDEVICE Code ****************************** +\******************************************************************************/ + +static gceSTATUS +_StartThread( + IN int (*ThreadRoutine)(void *data), + IN gceCORE Core + ) +{ + gceSTATUS status; + gckGALDEVICE device = galDevice; + struct task_struct * task; + + if (device->kernels[Core] != gcvNULL) + { + /* Start the kernel thread. */ + task = kthread_run(ThreadRoutine, (void *)Core, "galcore deamon thread for core[%d]", Core); + + if (IS_ERR(task)) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Could not start the kernel thread.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + device->threadCtxts[Core] = task; + device->threadInitializeds[Core] = gcvTRUE; + } + else + { + device->threadInitializeds[Core] = gcvFALSE; + } + + return gcvSTATUS_OK; + +OnError: + return status; +} + +/******************************************************************************* +** +** gckGALDEVICE_Construct +** +** Constructor. +** +** INPUT: +** +** OUTPUT: +** +** gckGALDEVICE * Device +** Pointer to a variable receiving the gckGALDEVICE object pointer on +** success. +*/ +gceSTATUS +gckGALDEVICE_Construct( + IN gctINT IrqLine, + IN gctUINT32 RegisterMemBase, + IN gctSIZE_T RegisterMemSize, + IN gctINT IrqLine2D, + IN gctUINT32 RegisterMemBase2D, + IN gctSIZE_T RegisterMemSize2D, + IN gctINT IrqLineVG, + IN gctUINT32 RegisterMemBaseVG, + IN gctSIZE_T RegisterMemSizeVG, + IN gctUINT32 ContiguousBase, + IN gctSIZE_T ContiguousSize, + IN gctUINT32 ExternalBase, + IN gctSIZE_T ExternalSize, + IN gctSIZE_T BankSize, + IN gctINT FastClear, + IN gctINT Compression, + IN gctUINT32 PhysBaseAddr, + IN gctUINT32 PhysSize, + IN gctINT Signal, + IN gctUINT LogFileSize, + IN gctINT PowerManagement, + IN gctINT GpuProfiler, + IN gcsDEVICE_CONSTRUCT_ARGS * Args, + OUT gckGALDEVICE *Device + ) +{ + gctUINT32 internalBaseAddress = 0, internalAlignment = 0; + gctUINT32 externalAlignment = 0; + gctUINT32 physical; + gckGALDEVICE device; + gceSTATUS status; + gctINT32 i; + gceHARDWARE_TYPE type; + gckKERNEL kernel = gcvNULL; + + gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u " + "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u " + "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u " + "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu " + "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d", + IrqLine, RegisterMemBase, RegisterMemSize, + IrqLine2D, RegisterMemBase2D, RegisterMemSize2D, + IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG, + ContiguousBase, ContiguousSize, BankSize, FastClear, Compression, + PhysBaseAddr, PhysSize, Signal); + +#if !gcdENABLE_3D + IrqLine = -1; +#endif + + IrqLine2D = -1; + /* Allocate device structure. */ + device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN); + if (!device) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + memset(device, 0, sizeof(struct _gckGALDEVICE)); + + device->dbgNode = gcvNULL; + + device->platform = Args->platform; + + device->args = *Args; + + /* set up the contiguous memory */ + device->contiguousSize = ContiguousSize; + + /* Clear irq lines. */ + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + device->irqLines[i] = -1; + } + + gcmkONERROR(_DebugfsInit(device)); + + if (gckDEBUGFS_CreateNode( + device, LogFileSize, device->debugfsDir.root ,DEBUG_FILE, &(device->dbgNode))) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Failed to create the debug file system %s/%s \n", + __FUNCTION__, __LINE__, + PARENT_FILE, DEBUG_FILE + ); + } + else if (LogFileSize) + { + gckDEBUGFS_SetCurrentNode(device->dbgNode); + } + + _SetupRegisterPhysical(device, Args); + + if (IrqLine != -1) + { + device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase; + device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize; + } + + if (IrqLine2D != -1) + { + device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D; + device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D; + } + + if (IrqLineVG != -1) + { + device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG; + device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG; + } +#if gcdDEC_ENABLE_AHB + { + device->requestedRegisterMemBases[gcvCORE_DEC] = Args->registerMemBaseDEC300; + device->requestedRegisterMemSizes[gcvCORE_DEC] = Args->registerMemSizeDEC300; + } +#endif + + + for (i = gcvCORE_MAJOR; i < gcvCORE_COUNT; i++) + { + if (Args->irqs[i] != -1) + { + device->requestedRegisterMemBases[i] = Args->registerBases[i]; + device->requestedRegisterMemSizes[i] = Args->registerSizes[i]; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DEVICE, + "%s(%d): Core = %d, RegiseterBase = %x", + __FUNCTION__, __LINE__, + i, Args->registerBases[i] + ); + } + } + + /* Initialize the ISR. */ + device->irqLines[gcvCORE_MAJOR] = IrqLine; + device->irqLines[gcvCORE_2D] = IrqLine2D; + device->irqLines[gcvCORE_VG] = IrqLineVG; + + for (i = gcvCORE_MAJOR; i < gcvCORE_COUNT; i++) + { + if (Args->irqs[i] != -1) + { + device->irqLines[i] = Args->irqs[i]; + } + } + + device->requestedContiguousBase = 0; + device->requestedContiguousSize = 0; + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + physical = device->requestedRegisterMemBases[i]; + + /* Set up register memory region. */ + if (physical != 0) + { + if (Args->registerMemMapped) + { + device->registerBases[i] = Args->registerMemAddress; + device->requestedRegisterMemBases[i] = 0; + + } + else + { +#if USE_LINUX_PCIE + device->registerBases[i] = (gctPOINTER) pci_iomap(device->platform->device, 1, + device->requestedRegisterMemSizes[i]); +#else + if (!request_mem_region(physical, device->requestedRegisterMemSizes[i], "galcore register region")) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Failed to claim %lu bytes @ 0x%zx\n", + __FUNCTION__, __LINE__, + physical, device->requestedRegisterMemSizes[i] + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + device->registerBases[i] = (gctPOINTER)ioremap_nocache( + physical, device->requestedRegisterMemSizes[i]); +#endif + + if (device->registerBases[i] == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Unable to map %ld bytes @ 0x%08X\n", + __FUNCTION__, __LINE__, + physical, device->requestedRegisterMemSizes[i] + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + } + + physical += device->requestedRegisterMemSizes[i]; + } + } + + /* Set the base address */ + device->baseAddress = device->physBase = PhysBaseAddr; + device->physSize = PhysSize; + + /* Construct the gckOS object. */ + gcmkONERROR(gckOS_Construct(device, &device->os)); + + /* Construct the gckDEVICE object for os independent core management. */ + gcmkONERROR(gckDEVICE_Construct(device->os, &device->device)); + + if (device->irqLines[gcvCORE_MAJOR] != -1) + { + gcmkONERROR(gctaOS_ConstructOS(device->os, &device->taos)); + } + + gcmkONERROR(_SetupVidMem(device, ContiguousBase, ContiguousSize, BankSize, Args)); + + /* Set external base and size */ + device->externalBase = ExternalBase; + device->externalSize = ExternalSize; + + if (device->irqLines[gcvCORE_MAJOR] != -1) + { + gcmkONERROR(gcTA_Construct(device->taos, gcvCORE_MAJOR, &globalTA[gcvCORE_MAJOR])); + + gcmkONERROR(gckDEVICE_AddCore(device->device, gcvCORE_MAJOR, Args->chipIDs[gcvCORE_MAJOR], device, &device->kernels[gcvCORE_MAJOR])); + + gcmkONERROR(gckHARDWARE_SetFastClear( + device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression + )); + + gcmkONERROR(gckHARDWARE_SetPowerManagement( + device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement + )); + +#if gcdENABLE_FSCALE_VAL_ADJUST + gcmkONERROR(gckHARDWARE_SetMinFscaleValue( + device->kernels[gcvCORE_MAJOR]->hardware, Args->gpu3DMinClock + )); +#endif + + gcmkONERROR(gckHARDWARE_SetGpuProfiler( + device->kernels[gcvCORE_MAJOR]->hardware, GpuProfiler + )); + } + else + { + device->kernels[gcvCORE_MAJOR] = gcvNULL; + } + + if (device->irqLines[gcvCORE_2D] != -1) + { + gcmkONERROR(gckDEVICE_AddCore(device->device, gcvCORE_2D, gcvCHIP_ID_DEFAULT, device, &device->kernels[gcvCORE_2D])); + + /* Verify the hardware type */ + gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type)); + + if (type != gcvHARDWARE_2D) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Unexpected hardware type: %d\n", + __FUNCTION__, __LINE__, + type + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gcmkONERROR(gckHARDWARE_SetPowerManagement( + device->kernels[gcvCORE_2D]->hardware, PowerManagement + )); + +#if gcdENABLE_FSCALE_VAL_ADJUST + gcmkONERROR(gckHARDWARE_SetMinFscaleValue( + device->kernels[gcvCORE_2D]->hardware, 1 + )); +#endif + } + else + { + device->kernels[gcvCORE_2D] = gcvNULL; + } + + if (device->irqLines[gcvCORE_VG] != -1) + { + } + else + { + device->kernels[gcvCORE_VG] = gcvNULL; + } + + /* Add core for multiple core. */ + for (i = gcvCORE_3D1; i <= gcvCORE_3D_MAX; i++) + { + if (Args->irqs[i] != -1) + { + gcmkONERROR(gcTA_Construct(device->taos, (gceCORE)i, &globalTA[i])); + gckDEVICE_AddCore(device->device, i, Args->chipIDs[i], device, &device->kernels[i]); + + gcmkONERROR( + gckHARDWARE_SetFastClear(device->kernels[i]->hardware, + FastClear, + Compression)); + + gcmkONERROR(gckHARDWARE_SetPowerManagement( + device->kernels[i]->hardware, PowerManagement + )); + + gcmkONERROR(gckHARDWARE_SetGpuProfiler( + device->kernels[i]->hardware, GpuProfiler + )); + } + } + + /* Initialize the kernel thread semaphores. */ + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0); + } + + device->signal = Signal; + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (device->kernels[i] != gcvNULL) break; + } + + if (i == gcdMAX_GPU_COUNT) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + { + /* Query the ceiling of the system memory. */ + gcmkONERROR(gckHARDWARE_QuerySystemMemory( + device->kernels[i]->hardware, + &device->systemMemorySize, + &device->systemMemoryBaseAddress + )); + } + + /* Grab the first availiable kernel */ + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (device->irqLines[i] != -1) + { + kernel = device->kernels[i]; + break; + } + } + + /* Set up the internal memory region. */ + if (device->internalSize > 0) + { + status = gckVIDMEM_Construct( + device->os, + internalBaseAddress, device->internalSize, internalAlignment, + 0, &device->internalVidMem + ); + + if (gcmIS_ERROR(status)) + { + /* Error, disable internal heap. */ + device->internalSize = 0; + } + else + { + /* Map internal memory. */ + device->internalLogical + = (gctPOINTER) ioremap_nocache(physical, device->internalSize); + + if (device->internalLogical == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical; + physical += device->internalSize; + } + } + + if (device->externalSize > 0) + { + /* create the external memory heap */ + status = gckVIDMEM_Construct( + device->os, + device->externalBase, device->externalSize, externalAlignment, + 0, &device->externalVidMem + ); + + if (gcmIS_ERROR(status)) + { + /* Error, disable external heap. */ + device->externalSize = 0; + } + else + { + /* Map external memory. */ + gcmkONERROR(gckOS_RequestReservedMemory( + device->os, + device->externalBase, device->externalSize, + "galcore external memory", + gcvTRUE, + &device->externalPhysical + )); + device->externalVidMem->physical = device->externalPhysical; + } + } + + if (device->internalPhysical) + { + device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical); + } + + if (device->externalPhysical) + { + device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical); + } + + if (device->contiguousPhysical) + { + device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical); + } + + /* Return pointer to the device. */ + *Device = galDevice = device; + + gcmkFOOTER_ARG("*Device=0x%x", * Device); + return gcvSTATUS_OK; + +OnError: + /* Roll back. */ + gcmkVERIFY_OK(gckGALDEVICE_Destroy(device)); + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckGALDEVICE_Destroy +** +** Class destructor. +** +** INPUT: +** +** Nothing. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** Nothing. +*/ +gceSTATUS +gckGALDEVICE_Destroy( + gckGALDEVICE Device) +{ + gctINT i; + gckKERNEL kernel = gcvNULL; + + gcmkHEADER_ARG("Device=0x%x", Device); + + if (Device != gcvNULL) + { + /* Grab the first availiable kernel */ + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (Device->irqLines[i] != -1) + { + kernel = Device->kernels[i]; + break; + } + } + + if (Device->internalPhysicalName != 0) + { + gcmRELEASE_NAME(Device->internalPhysicalName); + Device->internalPhysicalName = 0; + } + if (Device->externalPhysicalName != 0) + { + gcmRELEASE_NAME(Device->externalPhysicalName); + Device->externalPhysicalName = 0; + } + if (Device->contiguousPhysicalName != 0) + { + gcmRELEASE_NAME(Device->contiguousPhysicalName); + Device->contiguousPhysicalName = 0; + } + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (Device->kernels[i] != gcvNULL) + { + Device->kernels[i] = gcvNULL; + } + } + + if (Device->internalLogical != gcvNULL) + { + /* Unmap the internal memory. */ + iounmap(Device->internalLogical); + Device->internalLogical = gcvNULL; + } + + if (Device->internalVidMem != gcvNULL) + { + /* Destroy the internal heap. */ + gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->internalVidMem)); + Device->internalVidMem = gcvNULL; + } + + if (Device->externalPhysical != gcvNULL) + { + gckOS_ReleaseReservedMemory( + Device->os, + Device->externalPhysical + ); + } + + if (Device->externalLogical != gcvNULL) + { + Device->externalLogical = gcvNULL; + } + + if (Device->externalVidMem != gcvNULL) + { + /* destroy the external heap */ + gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->externalVidMem)); + Device->externalVidMem = gcvNULL; + } + + if (Device->contiguousPhysical != gcvNULL) + { + if (Device->requestedContiguousBase == 0) + { + gcmkVERIFY_OK(_FreeMemory( + Device, + Device->contiguousLogical, + Device->contiguousPhysical + )); + } + else + { + gckOS_ReleaseReservedMemory( + Device->os, + Device->contiguousPhysical + ); + + Device->requestedContiguousBase = 0; + Device->requestedContiguousSize = 0; + } + + Device->contiguousLogical = gcvNULL; + Device->contiguousPhysical = gcvNULL; + } + + if (Device->contiguousVidMem != gcvNULL) + { + /* Destroy the contiguous heap. */ + gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->contiguousVidMem)); + Device->contiguousVidMem = gcvNULL; + } + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (Device->registerBases[i]) + { + /* Unmap register memory. */ + if (Device->requestedRegisterMemBases[i] != 0) + { +#if USE_LINUX_PCIE + pci_iounmap(Device->platform->device, Device->registerBases[i]); +#else + + iounmap(Device->registerBases[i]); + release_mem_region(Device->requestedRegisterMemBases[i], + Device->requestedRegisterMemSizes[i]); +#endif + } + + Device->registerBases[i] = gcvNULL; + Device->requestedRegisterMemBases[i] = 0; + Device->requestedRegisterMemSizes[i] = 0; + } + } + + if (Device->device) + { + gcmkVERIFY_OK(gckDEVICE_Destroy(Device->os, Device->device)); + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (globalTA[i]) + { + gcTA_Destroy(globalTA[i]); + globalTA[i] = gcvNULL; + } + } + + Device->device = gcvNULL; + } + + if (Device->taos) + { + gcmkVERIFY_OK(gctaOS_DestroyOS(Device->taos)); + Device->taos = gcvNULL; + } + + /* Destroy the gckOS object. */ + if (Device->os != gcvNULL) + { + gcmkVERIFY_OK(gckOS_Destroy(Device->os)); + Device->os = gcvNULL; + } + + if (Device->dbgNode) + { + gckDEBUGFS_FreeNode(Device->dbgNode); + + if(Device->dbgNode != gcvNULL) + { + kfree(Device->dbgNode); + Device->dbgNode = gcvNULL; + } + } + + _DebugfsCleanup(Device); + + /* Free the device. */ + kfree(Device); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +static const char *isrNames[] = +{ + "galcore:0", + "galcore:3d-1", + "galcore:3d-2", + "galcore:3d-3", + "galcore:3d-4", + "galcore:3d-5", + "galcore:3d-6", + "galcore:3d-7", + "galcore:2d", + "galcore:vg", +#if gcdDEC_ENABLE_AHB + "galcore:dec" +#endif +}; + +/******************************************************************************* +** +** gckGALDEVICE_Setup_ISR +** +** Start the ISR routine. +** +** INPUT: +** +** gckGALDEVICE Device +** Pointer to an gckGALDEVICE object. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** gcvSTATUS_OK +** Setup successfully. +** gcvSTATUS_GENERIC_IO +** Setup failed. +*/ +gceSTATUS +gckGALDEVICE_Setup_ISR( + IN gceCORE Core + ) +{ + gceSTATUS status; + gctINT ret = 0; + gckGALDEVICE Device = galDevice; + + gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + if (Device->irqLines[Core] < 0) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + +#if defined(__GNUC__) && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4)) + { + _Static_assert(gcvCORE_COUNT == gcmCOUNTOF(isrNames), + "Core count is lager than isrNames size"); + } +#endif + + /* Hook up the isr based on the irq line. */ + ret = request_irq( + Device->irqLines[Core], isrRoutine, gcdIRQF_FLAG, + isrNames[Core], (void *)(uintptr_t)(Core + 1) + ); + + if (ret != 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Could not register irq line %d (error=%d)\n", + __FUNCTION__, __LINE__, + Device->irqLines[Core], ret + ); + + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + /* Mark ISR as initialized. */ + Device->isrInitializeds[Core] = gcvTRUE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckGALDEVICE_Setup_ISR_VG( + IN gckGALDEVICE Device + ) +{ + gceSTATUS status; + gctINT ret; + + gcmkHEADER_ARG("Device=0x%x", Device); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + if (Device->irqLines[gcvCORE_VG] < 0) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + /* Hook up the isr based on the irq line. */ + ret = request_irq( + Device->irqLines[gcvCORE_VG], isrRoutineVG, gcdIRQF_FLAG, + isrNames[gcvCORE_VG], Device + ); + + if (ret != 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Could not register irq line %d (error=%d)\n", + __FUNCTION__, __LINE__, + Device->irqLines[gcvCORE_VG], ret + ); + + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + /* Mark ISR as initialized. */ + Device->isrInitializeds[gcvCORE_VG] = gcvTRUE; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckGALDEVICE_Release_ISR +** +** Release the irq line. +** +** INPUT: +** +** gckGALDEVICE Device +** Pointer to an gckGALDEVICE object. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** Nothing. +*/ +gceSTATUS +gckGALDEVICE_Release_ISR( + IN gceCORE Core + ) +{ + gckGALDEVICE Device = galDevice; + gcmkHEADER_ARG("Device=0x%x", Device); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + /* release the irq */ + if (Device->isrInitializeds[Core]) + { + free_irq(Device->irqLines[Core], (void *)(uintptr_t)(Core + 1)); + Device->isrInitializeds[Core] = gcvFALSE; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckGALDEVICE_Release_ISR_VG( + IN gckGALDEVICE Device + ) +{ + gcmkHEADER_ARG("Device=0x%x", Device); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + /* release the irq */ + if (Device->isrInitializeds[gcvCORE_VG]) + { + free_irq(Device->irqLines[gcvCORE_VG], Device); + Device->isrInitializeds[gcvCORE_VG] = gcvFALSE; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckGALDEVICE_Start_Threads +** +** Start the daemon threads. +** +** INPUT: +** +** gckGALDEVICE Device +** Pointer to an gckGALDEVICE object. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** gcvSTATUS_OK +** Start successfully. +** gcvSTATUS_GENERIC_IO +** Start failed. +*/ +gceSTATUS +gckGALDEVICE_Start_Threads( + IN gckGALDEVICE Device + ) +{ + gceSTATUS status; + gctUINT i; + + gcmkHEADER_ARG("Device=0x%x", Device); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + gcmkONERROR(_StartThread(threadRoutine, gcvCORE_MAJOR)); + gcmkONERROR(_StartThread(threadRoutine, gcvCORE_2D)); + + gcmkONERROR(_StartThread(threadRoutine, gcvCORE_VG)); + + for (i = gcvCORE_3D1; i <= gcvCORE_3D_MAX; i++) + { + gcmkONERROR(_StartThread(threadRoutine, i)); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckGALDEVICE_Stop_Threads +** +** Stop the gal device, including the following actions: stop the daemon +** thread, release the irq. +** +** INPUT: +** +** gckGALDEVICE Device +** Pointer to an gckGALDEVICE object. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** Nothing. +*/ +gceSTATUS +gckGALDEVICE_Stop_Threads( + gckGALDEVICE Device + ) +{ + gctINT i; + + gcmkHEADER_ARG("Device=0x%x", Device); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + /* Stop the kernel threads. */ + if (Device->threadInitializeds[i]) + { + Device->killThread = gcvTRUE; + up(&Device->semas[i]); + + kthread_stop(Device->threadCtxts[i]); + Device->threadCtxts[i] = gcvNULL; + Device->threadInitializeds[i] = gcvFALSE; + } + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckGALDEVICE_QueryFrequency( + IN gckGALDEVICE Device + ) +{ + gctUINT64 mcStart[gcvCORE_COUNT], shStart[gcvCORE_COUNT]; + gctUINT32 mcClk[gcvCORE_COUNT], shClk[gcvCORE_COUNT]; + gckHARDWARE hardware = gcvNULL; + gceSTATUS status; + gctUINT i; + + gcmkHEADER_ARG("Device=0x%p", Device); + + for (i = gcvCORE_MAJOR; i < gcvCORE_COUNT; i++) + { + + if (Device->kernels[i]) + { + hardware = Device->kernels[i]->hardware; + + mcStart[i] = shStart[i] = 0; + + if (Device->args.powerManagement) + { + gcmkONERROR(gckHARDWARE_SetPowerManagement( + hardware, gcvFALSE + )); + } + + gcmkONERROR(gckHARDWARE_SetPowerManagementState( + hardware, gcvPOWER_ON_AUTO + )); + + gckHARDWARE_EnterQueryClock(hardware, + &mcStart[i], &shStart[i]); + } + } + + gcmkONERROR(gckOS_Delay(Device->os, 50)); + + for (i = gcvCORE_MAJOR; i < gcvCORE_COUNT; i++) + { + mcClk[i] = shClk[i] = 0; + + + if (Device->kernels[i] && mcStart[i]) + { + hardware = Device->kernels[i]->hardware; + + if (Device->args.powerManagement) + { + gcmkONERROR(gckHARDWARE_SetPowerManagement( + hardware, gcvTRUE + )); + } + + gckHARDWARE_ExitQueryClock(hardware, + mcStart[i], shStart[i], + &mcClk[i], &shClk[i]); + + hardware->mcClk = mcClk[i]; + hardware->shClk = shClk[i]; + } + } + +OnError: + gcmkFOOTER_NO(); + + return status; +} + +/******************************************************************************* +** +** gckGALDEVICE_Start +** +** Start the gal device, including the following actions: setup the isr routine +** and start the daemoni thread. +** +** INPUT: +** +** gckGALDEVICE Device +** Pointer to an gckGALDEVICE object. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** gcvSTATUS_OK +** Start successfully. +*/ +gceSTATUS +gckGALDEVICE_Start( + IN gckGALDEVICE Device + ) +{ + gceSTATUS status; + gctUINT i; + + gcmkHEADER_ARG("Device=0x%x", Device); + + /* Start the kernel thread. */ + gcmkONERROR(gckGALDEVICE_Start_Threads(Device)); + + gcmkONERROR(gckGALDEVICE_QueryFrequency(Device)); + + for (i = 0; i < gcvCORE_COUNT; i++) + { + if (i == gcvCORE_VG) + { + continue; + } + + if (Device->kernels[i] != gcvNULL) + { + /* Setup the ISR routine. */ + gcmkONERROR(gckGALDEVICE_Setup_ISR(i)); + + /* Switch to SUSPEND power state. */ + gcmkONERROR(gckHARDWARE_SetPowerManagementState( + Device->kernels[i]->hardware, gcvPOWER_OFF_BROADCAST + )); + } + } + + if (Device->kernels[gcvCORE_VG] != gcvNULL) + { + /* Setup the ISR routine. */ + gcmkONERROR(gckGALDEVICE_Setup_ISR_VG(Device)); + + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckGALDEVICE_Stop +** +** Stop the gal device, including the following actions: stop the daemon +** thread, release the irq. +** +** INPUT: +** +** gckGALDEVICE Device +** Pointer to an gckGALDEVICE object. +** +** OUTPUT: +** +** Nothing. +** +** RETURNS: +** +** Nothing. +*/ +gceSTATUS +gckGALDEVICE_Stop( + gckGALDEVICE Device + ) +{ + gceSTATUS status; + gctUINT i; + + gcmkHEADER_ARG("Device=0x%x", Device); + + gcmkVERIFY_ARGUMENT(Device != NULL); + + for (i = 0; i < gcvCORE_COUNT; i++) + { + if (i == gcvCORE_VG) + { + continue; + } + + if (Device->kernels[i] != gcvNULL) + { + gcmkONERROR(gckHARDWARE_SetPowerManagement( + Device->kernels[i]->hardware, gcvTRUE + )); + + /* Switch to OFF power state. */ + gcmkONERROR(gckHARDWARE_SetPowerManagementState( + Device->kernels[i]->hardware, gcvPOWER_OFF + )); + + /* Remove the ISR routine. */ + gcmkONERROR(gckGALDEVICE_Release_ISR(i)); + } + } + + if (Device->kernels[gcvCORE_VG] != gcvNULL) + { + /* Setup the ISR routine. */ + gcmkONERROR(gckGALDEVICE_Release_ISR_VG(Device)); + + } + + /* Stop the kernel thread. */ + gcmkONERROR(gckGALDEVICE_Stop_Threads(Device)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckGALDEVICE_AddCore +** +** Add a core after gckGALDevice is constructed. +** +** INPUT: +** +** OUTPUT: +** +*/ +gceSTATUS +gckGALDEVICE_AddCore( + IN gckGALDEVICE Device, + IN gcsDEVICE_CONSTRUCT_ARGS * Args + ) +{ + gceSTATUS status; + gceCORE core = gcvCORE_COUNT; + gctUINT i = 0; + + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(Device != gcvNULL); + + /* Find which core is added. */ + for (i = 0; i < gcvCORE_COUNT; i++) + { + if (Args->irqs[i] != -1) + { + core = i; + break; + } + } + + if (i == gcvCORE_COUNT) + { + gcmkPRINT("[galcore]: No valid core information found"); + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + + gcmkPRINT("[galcore]: add core[%d]", core); + + /* Record irq, registerBase, registerSize. */ + Device->irqLines[core] = Args->irqs[core]; + _SetupRegisterPhysical(Device, Args); + + /* Map register memory.*/ + + /* Add a platform indepedent framework. */ + gcmkONERROR(gckDEVICE_AddCore( + Device->device, + core, + Args->chipIDs[core], + Device, + &Device->kernels[core] + )); + + /* Start thread routine. */ + _StartThread(threadRoutine, core); + + /* Register ISR. */ + gckGALDEVICE_Setup_ISR(core); + + /* Set default power management state. */ + gcmkONERROR(gckHARDWARE_SetPowerManagementState( + Device->kernels[core]->hardware, gcvPOWER_OFF_BROADCAST + )); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return gcvSTATUS_OK; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h new file mode 100644 index 000000000000..185485cf310b --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h @@ -0,0 +1,272 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_device_h_ +#define __gc_hal_kernel_device_h_ + +#include "gc_hal_kernel_debugfs.h" +#include "gc_hal_ta.h" + +typedef struct _gcsDEVICE_CONSTRUCT_ARGS +{ + gctBOOL recovery; + gctUINT stuckDump; + gctUINT gpu3DMinClock; + + gctBOOL contiguousRequested; + gcsPLATFORM* platform; + gctBOOL mmu; + gctBOOL registerMemMapped; + gctPOINTER registerMemAddress; +#if gcdDEC_ENABLE_AHB + gctUINT32 registerMemBaseDEC300; + gctSIZE_T registerMemSizeDEC300; +#endif + gctINT irqs[gcvCORE_COUNT]; + gctUINT registerBases[gcvCORE_COUNT]; + gctUINT registerSizes[gcvCORE_COUNT]; + gctBOOL powerManagement; + gctBOOL gpuProfiler; + gctUINT chipIDs[gcvCORE_COUNT]; +} +gcsDEVICE_CONSTRUCT_ARGS; + +/******************************************************************************\ +************************** gckGALDEVICE Structure ****************************** +\******************************************************************************/ + +typedef struct _gckGALDEVICE +{ + /* Objects. */ + gckOS os; + gckKERNEL kernels[gcdMAX_GPU_COUNT]; + + gcsPLATFORM* platform; + + /* Attributes. */ + gctSIZE_T internalSize; + gctPHYS_ADDR internalPhysical; + gctUINT32 internalPhysicalName; + gctPOINTER internalLogical; + gckVIDMEM internalVidMem; + + gctUINT32 externalBase; + gctSIZE_T externalSize; + gctPHYS_ADDR externalPhysical; + gctUINT32 externalPhysicalName; + gctPOINTER externalLogical; + gckVIDMEM externalVidMem; + + gctPHYS_ADDR_T contiguousBase; + gctSIZE_T contiguousSize; + + gckVIDMEM contiguousVidMem; + gctPOINTER contiguousLogical; + gctPHYS_ADDR contiguousPhysical; + gctUINT32 contiguousPhysicalName; + + gctSIZE_T systemMemorySize; + gctUINT32 systemMemoryBaseAddress; + gctPOINTER registerBases[gcdMAX_GPU_COUNT]; + gctSIZE_T registerSizes[gcdMAX_GPU_COUNT]; + + gctUINT32 baseAddress; + gctUINT32 physBase; + gctUINT32 physSize; + + /* By request_mem_region. */ + gctUINT32 requestedRegisterMemBases[gcdMAX_GPU_COUNT]; + gctSIZE_T requestedRegisterMemSizes[gcdMAX_GPU_COUNT]; + + /* By request_mem_region. */ + gctUINT32 requestedContiguousBase; + gctSIZE_T requestedContiguousSize; + + /* IRQ management. */ + gctINT irqLines[gcdMAX_GPU_COUNT]; + gctBOOL isrInitializeds[gcdMAX_GPU_COUNT]; + + /* Thread management. */ + struct task_struct *threadCtxts[gcdMAX_GPU_COUNT]; + struct semaphore semas[gcdMAX_GPU_COUNT]; + gctBOOL threadInitializeds[gcdMAX_GPU_COUNT]; + gctBOOL killThread; + + /* Signal management. */ + gctINT signal; + + /* States before suspend. */ + gceCHIPPOWERSTATE statesStored[gcdMAX_GPU_COUNT]; + + /* Device Debug File System Entry in kernel. */ + struct _gcsDEBUGFS_Node * dbgNode; + + gcsDEBUGFS_DIR debugfsDir; + + gckDEVICE device; + + gcsDEVICE_CONSTRUCT_ARGS args; + + /* gctsOs object for trust application. */ + gctaOS taos; + +#if gcdENABLE_DRM + void* drm; +#endif +} +* gckGALDEVICE; + +typedef struct _gcsHAL_PRIVATE_DATA +{ + gckGALDEVICE device; + /* + * 'fput' schedules actual work in '__fput' in a different thread. + * So the process opens the device may not be the same as the one that + * closes it. + */ + gctUINT32 pidOpen; +} +gcsHAL_PRIVATE_DATA, * gcsHAL_PRIVATE_DATA_PTR; + +gceSTATUS gckGALDEVICE_Setup_ISR( + IN gceCORE Core + ); + +gceSTATUS gckGALDEVICE_Setup_ISR_VG( + IN gckGALDEVICE Device + ); + +gceSTATUS gckGALDEVICE_Release_ISR( + IN gceCORE Core + ); + +gceSTATUS gckGALDEVICE_Release_ISR_VG( + IN gckGALDEVICE Device + ); + +gceSTATUS gckGALDEVICE_Start_Threads( + IN gckGALDEVICE Device + ); + +gceSTATUS gckGALDEVICE_Stop_Threads( + gckGALDEVICE Device + ); + +gceSTATUS gckGALDEVICE_Start( + IN gckGALDEVICE Device + ); + +gceSTATUS gckGALDEVICE_Stop( + gckGALDEVICE Device + ); + +gceSTATUS gckGALDEVICE_Construct( + IN gctINT IrqLine, + IN gctUINT32 RegisterMemBase, + IN gctSIZE_T RegisterMemSize, + IN gctINT IrqLine2D, + IN gctUINT32 RegisterMemBase2D, + IN gctSIZE_T RegisterMemSize2D, + IN gctINT IrqLineVG, + IN gctUINT32 RegisterMemBaseVG, + IN gctSIZE_T RegisterMemSizeVG, + IN gctUINT32 ContiguousBase, + IN gctSIZE_T ContiguousSize, + IN gctUINT32 ExternalBase, + IN gctSIZE_T ExternalSize, + IN gctSIZE_T BankSize, + IN gctINT FastClear, + IN gctINT Compression, + IN gctUINT32 PhysBaseAddr, + IN gctUINT32 PhysSize, + IN gctINT Signal, + IN gctUINT LogFileSize, + IN gctINT PowerManagement, + IN gctINT GpuProfiler, + IN gcsDEVICE_CONSTRUCT_ARGS * Args, + OUT gckGALDEVICE *Device + ); + +gceSTATUS gckGALDEVICE_Destroy( + IN gckGALDEVICE Device + ); + +static gcmINLINE gckKERNEL +_GetValidKernel( + gckGALDEVICE Device + ) +{ + if (Device->kernels[gcvCORE_MAJOR]) + { + return Device->kernels[gcvCORE_MAJOR]; + } + else + if (Device->kernels[gcvCORE_2D]) + { + return Device->kernels[gcvCORE_2D]; + } + else + if (Device->kernels[gcvCORE_VG]) + { + return Device->kernels[gcvCORE_VG]; + } + else + { + gcmkASSERT(gcvFALSE); + return gcvNULL; + } +} + +#endif /* __gc_hal_kernel_device_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c new file mode 100644 index 000000000000..5f85b1274a10 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c @@ -0,0 +1,1255 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include +#include +#include +#include + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_driver.h" + +#include + +/* Zone used for header/footer. */ +#define _GC_OBJ_ZONE gcvZONE_DRIVER + +MODULE_DESCRIPTION("Vivante Graphics Driver"); +MODULE_LICENSE("Dual MIT/GPL"); + +/* Disable MSI for internal FPGA build except PPC */ +#if gcdFPGA_BUILD && !defined(CONFIG_PPC) +#define USE_MSI 0 +#else +#define USE_MSI 1 +#endif + +static struct class* gpuClass; + +static gcsPLATFORM *platform; + +static gckGALDEVICE galDevice; + +static uint major = 199; +module_param(major, uint, 0644); +MODULE_PARM_DESC(major, "major device number for GC device"); + +static int irqLine = -1; +module_param(irqLine, int, 0644); +MODULE_PARM_DESC(irqLine, "IRQ number of GC core"); + +static ulong registerMemBase = 0x80000000; +module_param(registerMemBase, ulong, 0644); +MODULE_PARM_DESC(registerMemBase, "Base of bus address of GC core AHB register"); + +static ulong registerMemSize = 2 << 10; +module_param(registerMemSize, ulong, 0644); +MODULE_PARM_DESC(registerMemSize, "Size of bus address range of GC core AHB register"); + +static int irqLine2D = -1; +module_param(irqLine2D, int, 0644); +MODULE_PARM_DESC(irqLine2D, "IRQ number of G2D core if irqLine is used for a G3D core"); + +static ulong registerMemBase2D = 0x00000000; +module_param(registerMemBase2D, ulong, 0644); +MODULE_PARM_DESC(registerMemBase2D, "Base of bus address of G2D core if registerMemBase2D is used for a G3D core"); + +static ulong registerMemSize2D = 2 << 10; +module_param(registerMemSize2D, ulong, 0644); +MODULE_PARM_DESC(registerMemSize2D, "Size of bus address range of G2D core if registerMemSize is used for a G3D core"); + +static int irqLineVG = -1; +module_param(irqLineVG, int, 0644); +MODULE_PARM_DESC(irqLineVG, "IRQ number of VG core"); + +static ulong registerMemBaseVG = 0x00000000; +module_param(registerMemBaseVG, ulong, 0644); +MODULE_PARM_DESC(registerMemBaseVG, "Base of bus address of VG core"); + +static ulong registerMemSizeVG = 2 << 10; +module_param(registerMemSizeVG, ulong, 0644); +MODULE_PARM_DESC(registerMemSizeVG, "Size of bus address range of VG core"); + +#if gcdDEC_ENABLE_AHB +static ulong registerMemBaseDEC300 = 0x00000000; +module_param(registerMemBaseDEC300, ulong, 0644); + +static ulong registerMemSizeDEC300 = 2 << 10; +module_param(registerMemSizeDEC300, ulong, 0644); +#endif + +#ifndef gcdDEFAULT_CONTIGUOUS_SIZE +#define gcdDEFAULT_CONTIGUOUS_SIZE (4 << 20) +#endif +static ulong contiguousSize = gcdDEFAULT_CONTIGUOUS_SIZE; +module_param(contiguousSize, ulong, 0644); +MODULE_PARM_DESC(contiguousSize, "Size of memory reserved for GC"); + +static ulong contiguousBase = 0; +module_param(contiguousBase, ulong, 0644); +MODULE_PARM_DESC(contiguousBase, "Base address of memory reserved for GC, if it is 0, GC driver will try to allocate a buffer whose size defined by contiguousSize"); + +static ulong externalSize = 0; +module_param(externalSize, ulong, 0644); +MODULE_PARM_DESC(externalSize, "Size of external memory, if it is 0, means there is no external pool"); + +static ulong externalBase = 0; +module_param(externalBase, ulong, 0644); +MODULE_PARM_DESC(externalBase, "Base address of external memory"); + +static int fastClear = -1; +module_param(fastClear, int, 0644); +MODULE_PARM_DESC(fastClear, "Disable fast clear if set it to 0, enabled by default"); + +static int compression = -1; +module_param(compression, int, 0644); +MODULE_PARM_DESC(compression, "Disable compression if set it to 0, enabled by default"); + +static int powerManagement = 1; +module_param(powerManagement, int, 0644); +MODULE_PARM_DESC(powerManagement, "Disable auto power saving if set it to 1, enabled by default"); + +static int gpuProfiler = 0; +module_param(gpuProfiler, int, 0644); +MODULE_PARM_DESC(gpuProfiler, "Enable profiling support, disabled by default"); + +static ulong baseAddress = 0; +module_param(baseAddress, ulong, 0644); +MODULE_PARM_DESC(baseAddress, "Only used for old MMU, set it to 0 if memory which can be accessed by GPU falls into 0 - 2G, otherwise set it to 0x80000000"); + +static ulong physSize = 0; +module_param(physSize, ulong, 0644); +MODULE_PARM_DESC(physSize, "Obsolete"); + +static uint logFileSize = 0; +module_param(logFileSize,uint, 0644); +MODULE_PARM_DESC(logFileSize, "Size of buffer to store GC driver output messsage, if it is not 0, message is read from /sys/kernel/debug/gc/galcore_trace, default value is 0"); + +static uint recovery = 1; +module_param(recovery, uint, 0644); +MODULE_PARM_DESC(recovery, "Recover GPU from stuck (1: Enable, 0: Disable)"); + +/* Middle needs about 40KB buffer, Maximal may need more than 200KB buffer. */ +static uint stuckDump = 0; +module_param(stuckDump, uint, 0644); +MODULE_PARM_DESC(stuckDump, "Level of stuck dump content (1: Minimal, 2: Middle, 3: Maximal)"); + +static int showArgs = 0; +module_param(showArgs, int, 0644); +MODULE_PARM_DESC(showArgs, "Display parameters value when driver loaded"); + +static int mmu = 1; +module_param(mmu, int, 0644); +MODULE_PARM_DESC(mmu, "Disable MMU if set it to 0, enabled by default"); + +static int irqs[gcvCORE_COUNT] = {[0 ... gcvCORE_COUNT - 1] = -1}; +module_param_array(irqs, int, NULL, 0644); +MODULE_PARM_DESC(irqs, "Array of IRQ numbers of multi-GPU"); + +static uint registerBases[gcvCORE_COUNT]; +module_param_array(registerBases, uint, NULL, 0644); +MODULE_PARM_DESC(registerBases, "Array of bases of bus address of register of multi-GPU"); + +static uint registerSizes[gcvCORE_COUNT] = {[0 ... gcvCORE_COUNT - 1] = 2 << 10}; +module_param_array(registerSizes, uint, NULL, 0644); +MODULE_PARM_DESC(registerSizes, "Array of sizes of bus address range of register of multi-GPU"); + +static uint chipIDs[gcvCORE_COUNT] = {[0 ... gcvCORE_COUNT - 1] = gcvCHIP_ID_DEFAULT}; +module_param_array(chipIDs, uint, NULL, 0644); +MODULE_PARM_DESC(chipIDs, "Array of chipIDs of multi-GPU"); + +static uint type = 0; +module_param(type, uint, 0664); +MODULE_PARM_DESC(type, "0 - Char Driver (Default), 1 - Misc Driver"); + +static int gpu3DMinClock = 1; + +static int contiguousRequested = 0; + +static gctBOOL registerMemMapped = gcvFALSE; +static gctPOINTER registerMemAddress = gcvNULL; +static ulong bankSize = 0; +static int signal = 48; + +void +_UpdateModuleParam( + gcsMODULE_PARAMETERS *Param + ) +{ + irqLine = Param->irqLine ; + registerMemBase = Param->registerMemBase; + registerMemSize = Param->registerMemSize; + irqLine2D = Param->irqLine2D ; + registerMemBase2D = Param->registerMemBase2D; + registerMemSize2D = Param->registerMemSize2D; + contiguousSize = Param->contiguousSize; + contiguousBase = Param->contiguousBase; + externalSize = Param->externalSize; + externalBase = Param->externalBase; + bankSize = Param->bankSize; + fastClear = Param->fastClear; + compression = (gctINT)Param->compression; + powerManagement = Param->powerManagement; + gpuProfiler = Param->gpuProfiler; + signal = Param->signal; + baseAddress = Param->baseAddress; + physSize = Param->physSize; + logFileSize = Param->logFileSize; + recovery = Param->recovery; + stuckDump = Param->stuckDump; + showArgs = Param->showArgs; + contiguousRequested = Param->contiguousRequested; + gpu3DMinClock = Param->gpu3DMinClock; + registerMemMapped = Param->registerMemMapped; + registerMemAddress = Param->registerMemAddress; + + memcpy(irqs, Param->irqs, gcmSIZEOF(gctINT) * gcvCORE_COUNT); + memcpy(registerBases, Param->registerBases, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + memcpy(registerSizes, Param->registerSizes, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + memcpy(chipIDs, Param->chipIDs, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); +} + +void +gckOS_DumpParam( + void + ) +{ + gctINT i; + + printk("Galcore options:\n"); + if (irqLine != -1) + { + printk(" irqLine = %d\n", irqLine); + printk(" registerMemBase = 0x%08lX\n", registerMemBase); + printk(" registerMemSize = 0x%08lX\n", registerMemSize); + } + + if (irqLine2D != -1) + { + printk(" irqLine2D = %d\n", irqLine2D); + printk(" registerMemBase2D = 0x%08lX\n", registerMemBase2D); + printk(" registerMemSize2D = 0x%08lX\n", registerMemSize2D); + } + + if (irqLineVG != -1) + { + printk(" irqLineVG = %d\n", irqLineVG); + printk(" registerMemBaseVG = 0x%08lX\n", registerMemBaseVG); + printk(" registerMemSizeVG = 0x%08lX\n", registerMemSizeVG); + } + +#if gcdDEC_ENABLE_AHB + printk(" registerMemBaseDEC300 = 0x%08lX\n", registerMemBaseDEC300); + printk(" registerMemSizeDEC300 = 0x%08lX\n", registerMemSizeDEC300); +#endif + + printk(" contiguousSize = 0x%08lX\n", contiguousSize); + printk(" contiguousBase = 0x%08lX\n", contiguousBase); + printk(" externalSize = 0x%08lX\n", externalSize); + printk(" externalBase = 0x%08lX\n", externalBase); + printk(" bankSize = 0x%08lX\n", bankSize); + printk(" fastClear = %d\n", fastClear); + printk(" compression = %d\n", compression); + printk(" signal = %d\n", signal); + printk(" powerManagement = %d\n", powerManagement); + printk(" baseAddress = 0x%08lX\n", baseAddress); + printk(" physSize = 0x%08lX\n", physSize); + printk(" logFileSize = %d KB \n", logFileSize); + printk(" recovery = %d\n", recovery); + printk(" stuckDump = %d\n", stuckDump); + printk(" gpuProfiler = %d\n", gpuProfiler); + + printk(" irqs = "); + for (i = 0; i < gcvCORE_COUNT; i++) + { + printk("%d, ", irqs[i]); + } + printk("\n"); + + printk(" registerBases = "); + for (i = 0; i < gcvCORE_COUNT; i++) + { + printk("0x%08X, ", registerBases[i]); + } + printk("\n"); + + printk(" registerSizes = "); + for (i = 0; i < gcvCORE_COUNT; i++) + { + printk("0x%08X, ", registerSizes[i]); + } + printk("\n"); + + printk(" chipIDs = "); + for (i = 0; i < gcvCORE_COUNT; i++) + { + printk("0x%08X, ", chipIDs[i]); + } + printk("\n"); + + printk("Build options:\n"); + printk(" gcdGPU_TIMEOUT = %d\n", gcdGPU_TIMEOUT); + printk(" gcdGPU_2D_TIMEOUT = %d\n", gcdGPU_2D_TIMEOUT); + printk(" gcdINTERRUPT_STATISTIC = %d\n", gcdINTERRUPT_STATISTIC); +} + +static int drv_open( + struct inode* inode, + struct file* filp + ) +{ + gceSTATUS status; + gctBOOL attached = gcvFALSE; + gcsHAL_PRIVATE_DATA_PTR data = gcvNULL; + gctINT i; + + gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp); + + if (filp == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): filp is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL | __GFP_NOWARN); + + if (data == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): private_data is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + data->device = galDevice; + data->pidOpen = _GetProcessID(); + + /* Attached the process. */ + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (galDevice->kernels[i] != gcvNULL) + { + gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE)); + } + } + attached = gcvTRUE; + + filp->private_data = data; + + /* Success. */ + gcmkFOOTER_NO(); + return 0; + +OnError: + if (data != gcvNULL) + { + kfree(data); + } + + if (attached) + { + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (galDevice->kernels[i] != gcvNULL) + { + gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE)); + } + } + } + + gcmkFOOTER(); + return -ENOTTY; +} + +static int drv_release( + struct inode* inode, + struct file* filp + ) +{ + gceSTATUS status; + gcsHAL_PRIVATE_DATA_PTR data; + gckGALDEVICE device; + gctINT i; + + gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp); + + if (filp == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): filp is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + data = filp->private_data; + + if (data == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): private_data is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + device = data->device; + + if (device == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): device is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* A process gets detached. */ + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (galDevice->kernels[i] != gcvNULL) + { + gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen)); + } + } + + kfree(data); + filp->private_data = NULL; + + /* Success. */ + gcmkFOOTER_NO(); + return 0; + +OnError: + gcmkFOOTER(); + return -ENOTTY; +} + +static long drv_ioctl( + struct file* filp, + unsigned int ioctlCode, + unsigned long arg + ) +{ + gceSTATUS status; + gcsHAL_INTERFACE iface; + gctUINT32 copyLen; + DRIVER_ARGS drvArgs; + gckGALDEVICE device; + gcsHAL_PRIVATE_DATA_PTR data; + + gcmkHEADER_ARG( + "filp=0x%08X ioctlCode=0x%08X arg=0x%08X", + filp, ioctlCode, arg + ); + + if (filp == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): filp is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + data = filp->private_data; + + if (data == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): private_data is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + device = data->device; + + if (device == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): device is NULL\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if ((ioctlCode != IOCTL_GCHAL_INTERFACE) + && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE) + ) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): unknown command %d\n", + __FUNCTION__, __LINE__, + ioctlCode + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* Get the drvArgs. */ + copyLen = copy_from_user( + &drvArgs, (void *) arg, sizeof(DRIVER_ARGS) + ); + + if (copyLen != 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): error copying of the input arguments.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* Now bring in the gcsHAL_INTERFACE structure. */ + if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE)) + || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE)) + ) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): input or/and output structures are invalid.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + copyLen = copy_from_user( + &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE) + ); + + if (copyLen != 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): error copying of input HAL interface.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + status = gckDEVICE_Dispatch(device->device, &iface); + + /* Redo system call after pending signal is handled. */ + if (status == gcvSTATUS_INTERRUPTED) + { + gcmkFOOTER(); + return -ERESTARTSYS; + } + + /* Copy data back to the user. */ + copyLen = copy_to_user( + gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE) + ); + + if (copyLen != 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): error copying of output HAL interface.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + /* Success. */ + gcmkFOOTER_NO(); + return 0; + +OnError: + gcmkFOOTER(); + return -ENOTTY; +} + +static struct file_operations driver_fops = +{ + .owner = THIS_MODULE, + .open = drv_open, + .release = drv_release, + .unlocked_ioctl = drv_ioctl, +#ifdef HAVE_COMPAT_IOCTL + .compat_ioctl = drv_ioctl, +#endif +}; + +static struct miscdevice gal_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = DEVICE_NAME, + .fops = &driver_fops, +}; + +static int drv_init(void) +{ + int ret; + int result = -EINVAL; + gceSTATUS status; + gckGALDEVICE device = gcvNULL; + struct class* device_class = gcvNULL; + + gcsDEVICE_CONSTRUCT_ARGS args = { + .recovery = recovery, + .stuckDump = stuckDump, + .gpu3DMinClock = gpu3DMinClock, + .contiguousRequested = contiguousRequested, + .platform = platform, + .mmu = mmu, + .registerMemMapped = registerMemMapped, + .registerMemAddress = registerMemAddress, +#if gcdDEC_ENABLE_AHB + .registerMemBaseDEC300 = registerMemBaseDEC300, + .registerMemSizeDEC300 = registerMemSizeDEC300, +#endif + }; + + gcmkHEADER(); + + memcpy(args.irqs, irqs, gcmSIZEOF(gctINT) * gcvCORE_COUNT); + memcpy(args.registerBases, registerBases, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + memcpy(args.registerSizes, registerSizes, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + memcpy(args.chipIDs, chipIDs, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + + printk(KERN_INFO "Galcore version %d.%d.%d.%d\n", + gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD); + + args.powerManagement = powerManagement; + args.gpuProfiler = gpuProfiler; + + if (showArgs) + { + gckOS_DumpParam(); + } + + if (logFileSize != 0) + { + gckDEBUGFS_Initialize(); + } + + /* Create the GAL device. */ + status = gckGALDEVICE_Construct( + irqLine, + registerMemBase, registerMemSize, + irqLine2D, + registerMemBase2D, registerMemSize2D, + irqLineVG, + registerMemBaseVG, registerMemSizeVG, + contiguousBase, contiguousSize, + externalBase, externalSize, + bankSize, fastClear, compression, baseAddress, physSize, signal, + logFileSize, + powerManagement, + gpuProfiler, + &args, + &device + ); + + if (gcmIS_ERROR(status)) + { + gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Failed to create the GAL device: status=%d\n", + __FUNCTION__, __LINE__, status); + + goto OnError; + } + + /* Start the GAL device. */ + gcmkONERROR(gckGALDEVICE_Start(device)); + + if ((physSize != 0) + && (device->kernels[gcvCORE_MAJOR] != gcvNULL) + && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0)) + { + /* Reset the base address */ + device->baseAddress = 0; + } + + /* Set global galDevice pointer. */ + galDevice = device; + + if (type == 1) + { + /* Register as misc driver. */ + ret = misc_register(&gal_device); + + if (ret < 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): misc_register fails.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + } + else + { + /* Register the character device. */ + ret = register_chrdev(major, DEVICE_NAME, &driver_fops); + + if (ret < 0) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Could not allocate major number for mmap.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + if (major == 0) + { + major = ret; + } + + /* Create the device class. */ + device_class = class_create(THIS_MODULE, CLASS_NAME); + + if (IS_ERR(device_class)) + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_DRIVER, + "%s(%d): Failed to create the class.\n", + __FUNCTION__, __LINE__ + ); + + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) + device_create(device_class, NULL, MKDEV(major, 0), NULL, DEVICE_NAME); +#else + device_create(device_class, NULL, MKDEV(major, 0), DEVICE_NAME); +#endif + + gpuClass = device_class; + } + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_DRIVER, + "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n", + __FUNCTION__, __LINE__, + irqLine, contiguousSize, registerMemBase + ); + + /* Success. */ + gcmkFOOTER_NO(); + return 0; + +OnError: + /* Roll back. */ + if (device_class != gcvNULL) + { + device_destroy(device_class, MKDEV(major, 0)); + class_destroy(device_class); + } + + if (device != gcvNULL) + { + gcmkVERIFY_OK(gckGALDEVICE_Stop(device)); + gcmkVERIFY_OK(gckGALDEVICE_Destroy(device)); + } + + gcmkFOOTER(); + return result; +} + +static void drv_exit(void) +{ + gcmkHEADER(); + + if (type == 1) + { + misc_deregister(&gal_device); + } + else + { + gcmkASSERT(gpuClass != gcvNULL); + device_destroy(gpuClass, MKDEV(major, 0)); + class_destroy(gpuClass); + + unregister_chrdev(major, DEVICE_NAME); + } + + gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice)); + gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice)); + + if(gckDEBUGFS_IsEnabled()) + { + gckDEBUGFS_Terminate(); + } + + gcmkFOOTER_NO(); +} + +#if gcdENABLE_DRM +int viv_drm_probe(struct device *dev); +int viv_drm_remove(struct device *dev); +#endif + +struct device *galcore_device; + +#if USE_LINUX_PCIE +static int gpu_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +#else /* USE_LINUX_PCIE */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + static int gpu_probe(struct platform_device *pdev) +#else + static int __devinit gpu_probe(struct platform_device *pdev) +#endif +#endif /* USE_LINUX_PCIE */ +{ + int ret = -ENODEV; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + static u64 dma_mask = DMA_BIT_MASK(40); +#else + static u64 dma_mask = DMA_40BIT_MASK; +#endif + + gcsMODULE_PARAMETERS moduleParam = { + .irqLine = irqLine, + .registerMemBase = registerMemBase, + .registerMemSize = registerMemSize, + .irqLine2D = irqLine2D, + .registerMemBase2D = registerMemBase2D, + .registerMemSize2D = registerMemSize2D, + .irqLineVG = irqLineVG, + .registerMemBaseVG = registerMemBaseVG, + .registerMemSizeVG = registerMemSizeVG, + .contiguousSize = contiguousSize, + .contiguousBase = contiguousBase, + .externalSize = externalSize, + .externalBase = externalBase, + .bankSize = bankSize, + .fastClear = fastClear, + .powerManagement = powerManagement, + .gpuProfiler = gpuProfiler, + .signal = signal, + .baseAddress = baseAddress, + .physSize = physSize, + .logFileSize = logFileSize, + .recovery = recovery, + .stuckDump = stuckDump, + .showArgs = showArgs, + .gpu3DMinClock = gpu3DMinClock, + .registerMemMapped = registerMemMapped, + }; + + gcmkHEADER(); + + memcpy(moduleParam.irqs, irqs, gcmSIZEOF(gctINT) * gcvCORE_COUNT); + memcpy(moduleParam.registerBases, registerBases, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + memcpy(moduleParam.registerSizes, registerSizes, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + memcpy(moduleParam.chipIDs, chipIDs, gcmSIZEOF(gctUINT) * gcvCORE_COUNT); + moduleParam.compression = (compression == -1) ? gcvCOMPRESSION_OPTION_DEFAULT : (gceCOMPRESSION_OPTION)compression; + platform->device = pdev; + galcore_device = &pdev->dev; + +#if USE_LINUX_PCIE + if (pci_enable_device(pdev)) { + printk(KERN_ERR "galcore: pci_enable_device() failed.\n"); + } + + if (pci_set_dma_mask(pdev, dma_mask)) { + printk(KERN_ERR "galcore: Failed to set DMA mask.\n"); + } + + pci_set_master(pdev); + + if (pci_request_regions(pdev, "galcore")) { + printk(KERN_ERR "galcore: Failed to get ownership of BAR region.\n"); + } + +#if USE_MSI + if (pci_enable_msi(pdev)) { + printk(KERN_ERR "galcore: Failed to enable MSI.\n"); + } +# endif +#else + galcore_device->dma_mask = &dma_mask; +#endif + + if (platform->ops->getPower) + { + if (gcmIS_ERROR(platform->ops->getPower(platform))) + { + gcmkFOOTER_NO(); + return ret; + } + } + + if (platform->ops->adjustParam) + { + /* Override default module param. */ + platform->ops->adjustParam(platform, &moduleParam); + + /* Update module param because drv_init() uses them directly. */ + _UpdateModuleParam(&moduleParam); + } + + ret = drv_init(); + + if (!ret) + { +#if USE_LINUX_PCIE + pci_set_drvdata(pdev, galDevice); +#else + platform_set_drvdata(pdev, galDevice); +#endif + +#if gcdENABLE_DRM + ret = viv_drm_probe(&pdev->dev); +#endif + } + + if (ret < 0) + { + gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret); + } + else + { + gcmkFOOTER_NO(); + } + return ret; +} + +#if USE_LINUX_PCIE +static void gpu_remove(struct pci_dev *pdev) +#else /* USE_LINUX_PCIE */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + static int gpu_remove(struct platform_device *pdev) +#else + static int __devexit gpu_remove(struct platform_device *pdev) +#endif +#endif /* USE_LINUX_PCIE */ +{ + gcmkHEADER(); + +#if gcdENABLE_DRM + viv_drm_remove(&pdev->dev); +#endif + + drv_exit(); + + if (platform->ops->putPower) + { + platform->ops->putPower(platform); + } + +#if USE_LINUX_PCIE + pci_set_drvdata(pdev, NULL); +#if USE_MSI + pci_disable_msi(pdev); +#endif + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + gcmkFOOTER_NO(); + return; +#else + gcmkFOOTER_NO(); + return 0; +#endif +} + +static int gpu_suspend(struct platform_device *dev, pm_message_t state) +{ + gceSTATUS status; + gckGALDEVICE device; + gctINT i; + + device = platform_get_drvdata(dev); + + if (!device) + { + return -1; + } + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (device->kernels[i] != gcvNULL) + { + /* Store states. */ + { + status = gckHARDWARE_QueryPowerManagementState(device->kernels[i]->hardware, &device->statesStored[i]); + } + + if (gcmIS_ERROR(status)) + { + return -1; + } + + { + status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_OFF); + } + + if (gcmIS_ERROR(status)) + { + return -1; + } + + } + } + + return 0; +} + +static int gpu_resume(struct platform_device *dev) +{ + gceSTATUS status; + gckGALDEVICE device; + gctINT i; + gceCHIPPOWERSTATE statesStored; + + device = platform_get_drvdata(dev); + + if (!device) + { + return -1; + } + + for (i = 0; i < gcdMAX_GPU_COUNT; i++) + { + if (device->kernels[i] != gcvNULL) + { + { + status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_ON); + } + + if (gcmIS_ERROR(status)) + { + return -1; + } + + /* Convert global state to crossponding internal state. */ + switch(device->statesStored[i]) + { + case gcvPOWER_OFF: + statesStored = gcvPOWER_OFF_BROADCAST; + break; + case gcvPOWER_IDLE: + statesStored = gcvPOWER_IDLE_BROADCAST; + break; + case gcvPOWER_SUSPEND: + statesStored = gcvPOWER_SUSPEND_BROADCAST; + break; + case gcvPOWER_ON: + statesStored = gcvPOWER_ON_AUTO; + break; + default: + statesStored = device->statesStored[i]; + break; + } + + /* Restore states. */ + { + status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, statesStored); + } + + if (gcmIS_ERROR(status)) + { + return -1; + } + } + } + + return 0; +} + +#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) +#ifdef CONFIG_PM_SLEEP +static int gpu_system_suspend(struct device *dev) +{ + pm_message_t state={0}; + return gpu_suspend(to_platform_device(dev), state); +} + +static int gpu_system_resume(struct device *dev) +{ + return gpu_resume(to_platform_device(dev)); +} +#endif + +static const struct dev_pm_ops gpu_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(gpu_system_suspend, gpu_system_resume) +}; +#endif + +#if USE_LINUX_PCIE +static const struct pci_device_id vivpci_ids[] = { + { + .class = 0x000000, + .class_mask = 0x000000, + .vendor = 0x10ee, + .device = 0x7012, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = 0 + }, + { + .class = 0x000000, + .class_mask = 0x000000, + .vendor = PCI_VENDOR_ID_MCST_TMP, + .device = 0x8039, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = 0 + }, + { /* End: all zeroes */ } +}; + +MODULE_DEVICE_TABLE(pci, vivpci_ids); + +static struct pci_driver gpu_driver = { + .name = DEVICE_NAME, + .id_table = vivpci_ids, + .probe = gpu_probe, + .remove = gpu_remove +}; + +#else /* USE_LINUX_PCIE */ + +static struct platform_driver gpu_driver = { + .probe = gpu_probe, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + .remove = gpu_remove, +#else + .remove = __devexit_p(gpu_remove), +#endif + + .suspend = gpu_suspend, + .resume = gpu_resume, + + .driver = { + .owner = THIS_MODULE, + .name = DEVICE_NAME, +#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) + .pm = &gpu_pm_ops, +#endif + } +}; +#endif /* USE_LINUX_PCIE */ + +static int __init gpu_init(void) +{ + int ret = 0; + + ret = soc_platform_init(&gpu_driver, &platform); + + if (ret || !platform) + { + printk(KERN_ERR "galcore: Soc platform init failed.\n"); + return -ENODEV; + } + +#if USE_LINUX_PCIE + ret = pci_register_driver(&gpu_driver); +#else /* USE_LINUX_PCIE */ + ret = platform_driver_register(&gpu_driver); +#endif /* USE_LINUX_PCIE */ + + if (ret) + { + printk(KERN_ERR "galcore: gpu_init() failed to register driver!\n"); + soc_platform_terminate(platform); + platform = NULL; + return ret; + } + + platform->driver = &gpu_driver; + + return 0; +} + +static void __exit gpu_exit(void) +{ +#if USE_LINUX_PCIE + pci_unregister_driver(&gpu_driver); +#else + platform_driver_unregister(&gpu_driver); +#endif /* USE_LINUX_PCIE */ + + soc_platform_terminate(platform); + platform = NULL; +} + +module_init(gpu_init); +module_exit(gpu_exit); diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_drm.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_drm.c new file mode 100644 index 000000000000..aaf92fb22feb --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_drm.c @@ -0,0 +1,809 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#if gcdENABLE_DRM + +#include +#include +#include +#include "gc_hal_kernel_linux.h" +#include "gc_hal_drm.h" + +#define _GC_OBJ_ZONE gcvZONE_KERNEL + +/******************************************************************************\ +******************************* gckKERNEL DRM Code ****************************** +\******************************************************************************/ + +struct viv_gem_object { + struct drm_gem_object base; + + uint32_t node_handle; + gckVIDMEM_NODE node_object; +}; + +struct dma_buf *viv_gem_prime_export(struct drm_device *drm, + struct drm_gem_object *gem_obj, + int flags) +{ + struct viv_gem_object *viv_obj = container_of(gem_obj, struct viv_gem_object, base); + struct dma_buf *dmabuf = gcvNULL; + gckGALDEVICE gal_dev = (gckGALDEVICE)drm->dev_private; + + if (gal_dev) + { + gckKERNEL kernel = gal_dev->device->map[gal_dev->device->defaultHwType].kernels[0]; + gcmkVERIFY_OK(gckVIDMEM_NODE_Export(kernel, viv_obj->node_handle, flags, + (gctPOINTER*)&dmabuf, gcvNULL)); + } + + return dmabuf; +} + +struct drm_gem_object *viv_gem_prime_import(struct drm_device *drm, + struct dma_buf *dmabuf) +{ + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj; + + gcsHAL_INTERFACE iface; + gckGALDEVICE gal_dev; + gckKERNEL kernel; + gctUINT32 processID; + gckVIDMEM_NODE nodeObject; + gceSTATUS status = gcvSTATUS_OK; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gckOS_ZeroMemory(&iface, sizeof(iface)); + iface.command = gcvHAL_WRAP_USER_MEMORY; + iface.hardwareType = gal_dev->device->defaultHwType; + iface.u.WrapUserMemory.desc.flag = gcvALLOC_FLAG_DMABUF; + iface.u.WrapUserMemory.desc.handle = -1; + iface.u.WrapUserMemory.desc.dmabuf = gcmPTR_TO_UINT64(dmabuf); + gcmkONERROR(gckDEVICE_Dispatch(gal_dev->device, &iface)); + + kernel = gal_dev->device->map[gal_dev->device->defaultHwType].kernels[0]; + gcmkONERROR(gckOS_GetProcessID(&processID)); + gcmkONERROR(gckVIDMEM_HANDLE_Lookup(kernel, processID, iface.u.WrapUserMemory.node, &nodeObject)); + + /* ioctl output */ + gem_obj = kzalloc(sizeof(struct viv_gem_object), GFP_KERNEL); + drm_gem_private_object_init(drm, gem_obj, dmabuf->size); + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + viv_obj->node_handle = iface.u.WrapUserMemory.node; + viv_obj->node_object = nodeObject; + +OnError: + return gem_obj; +} + +void viv_gem_free_object(struct drm_gem_object *gem_obj) +{ + struct viv_gem_object *viv_obj = container_of(gem_obj, struct viv_gem_object, base); + struct drm_device *drm = gem_obj->dev; + + gcsHAL_INTERFACE iface; + gckGALDEVICE gal_dev = (gckGALDEVICE)drm->dev_private; + + gckOS_ZeroMemory(&iface, sizeof(iface)); + iface.command = gcvHAL_RELEASE_VIDEO_MEMORY; + iface.hardwareType = gal_dev->device->defaultHwType; + iface.u.ReleaseVideoMemory.node = viv_obj->node_handle; + gcmkVERIFY_OK(gckDEVICE_Dispatch(gal_dev->device, &iface)); + + drm_gem_object_release(gem_obj); + kfree(gem_obj); +} + +static int viv_ioctl_gem_create(struct drm_device *drm, void *data, + struct drm_file *file) +{ + int ret = 0; + struct drm_viv_gem_create *args = (struct drm_viv_gem_create*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gcsHAL_INTERFACE iface; + gckGALDEVICE gal_dev; + gckKERNEL kernel; + gctUINT32 processID; + gckVIDMEM_NODE nodeObject; + gctUINT32 flags = gcvALLOC_FLAG_DMABUF_EXPORTABLE; + gceSTATUS status = gcvSTATUS_OK; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if (args->flags & DRM_VIV_GEM_CONTIGUOUS) + { + flags |= gcvALLOC_FLAG_CONTIGUOUS; + } + if (args->flags & DRM_VIV_GEM_CACHED) + { + flags |= gcvALLOC_FLAG_CACHEABLE; + } + if (args->flags & DRM_VIV_GEM_SECURE) + { + flags |= gcvALLOC_FLAG_SECURITY; + } + + gckOS_ZeroMemory(&iface, sizeof(iface)); + iface.command = gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY; + iface.hardwareType = gal_dev->device->defaultHwType; + iface.u.AllocateLinearVideoMemory.bytes = PAGE_ALIGN(args->size); + iface.u.AllocateLinearVideoMemory.alignment = 256; + iface.u.AllocateLinearVideoMemory.type = gcvSURF_RENDER_TARGET; /* should be general */ + iface.u.AllocateLinearVideoMemory.flag = flags; + iface.u.AllocateLinearVideoMemory.pool = gcvPOOL_DEFAULT; + gcmkONERROR(gckDEVICE_Dispatch(gal_dev->device, &iface)); + + kernel = gal_dev->device->map[gal_dev->device->defaultHwType].kernels[0]; + gcmkONERROR(gckOS_GetProcessID(&processID)); + gcmkONERROR(gckVIDMEM_HANDLE_Lookup(kernel, processID, iface.u.AllocateLinearVideoMemory.node, &nodeObject)); + + /* ioctl output */ + gem_obj = kzalloc(sizeof(struct viv_gem_object), GFP_KERNEL); + drm_gem_private_object_init(drm, gem_obj, iface.u.AllocateLinearVideoMemory.bytes); + ret = drm_gem_handle_create(file, gem_obj, &args->handle); + + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + viv_obj->node_handle = iface.u.AllocateLinearVideoMemory.node; + viv_obj->node_object = nodeObject; + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gem_obj); + +OnError: + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_lock(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_lock *args = (struct drm_viv_gem_lock*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gcsHAL_INTERFACE iface; + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + + gckOS_ZeroMemory(&iface, sizeof(iface)); + iface.command = gcvHAL_LOCK_VIDEO_MEMORY; + iface.hardwareType = gal_dev->device->defaultHwType; + iface.u.LockVideoMemory.node = viv_obj->node_handle; + iface.u.LockVideoMemory.cacheable = args->cacheable; + gcmkONERROR(gckDEVICE_Dispatch(gal_dev->device, &iface)); + + args->logical = iface.u.LockVideoMemory.memory; + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_unlock(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_unlock *args = (struct drm_viv_gem_unlock*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gcsHAL_INTERFACE iface; + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + + memset(&iface, 0, sizeof(iface)); + iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY; + iface.hardwareType = gal_dev->device->defaultHwType; + iface.u.UnlockVideoMemory.node = (gctUINT64)viv_obj->node_handle; + iface.u.UnlockVideoMemory.type = gcvSURF_TYPE_UNKNOWN; + gcmkONERROR(gckDEVICE_Dispatch(gal_dev->device, &iface)); + + memset(&iface, 0, sizeof(iface)); + iface.command = gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY; + iface.hardwareType = gal_dev->device->defaultHwType; + iface.u.BottomHalfUnlockVideoMemory.node = (gctUINT64)viv_obj->node_handle; + iface.u.BottomHalfUnlockVideoMemory.type = gcvSURF_TYPE_UNKNOWN; + gcmkONERROR(gckDEVICE_Dispatch(gal_dev->device, &iface)); + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_cache(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_cache *args = (struct drm_viv_gem_cache*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gcsHAL_INTERFACE iface; + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + gceCACHEOPERATION cache_op = 0; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + + switch (args->op) + { + case DRM_VIV_GEM_CLEAN_CACHE: + cache_op = gcvCACHE_CLEAN; + break; + case DRM_VIV_GEM_INVALIDATE_CACHE: + cache_op = gcvCACHE_INVALIDATE; + break; + case DRM_VIV_GEM_FLUSH_CACHE: + cache_op = gcvCACHE_FLUSH; + break; + case DRM_VIV_GEM_MEMORY_BARRIER: + cache_op = gcvCACHE_MEMORY_BARRIER; + break; + default: + break; + } + + gckOS_ZeroMemory(&iface, sizeof(iface)); + iface.command = gcvHAL_CACHE; + iface.hardwareType = gal_dev->device->defaultHwType; + iface.u.Cache.node = viv_obj->node_handle; + iface.u.Cache.operation = cache_op; + iface.u.Cache.logical = args->logical; + iface.u.Cache.bytes = args->bytes; + gcmkONERROR(gckDEVICE_Dispatch(gal_dev->device, &iface)); + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_query(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_query *args = (struct drm_viv_gem_query*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + + switch (args->param) + { + case DRM_VIV_GEM_PARAM_POOL: + args->value = (__u64)viv_obj->node_object->pool; + break; + case DRM_VIV_GEM_PARAM_SIZE: + args->value = (__u64)gem_obj->size; + break; + default: + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_timestamp(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_timestamp *args = (struct drm_viv_gem_timestamp *)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + + viv_obj->node_object->timeStamp += args->inc; + args->timestamp = viv_obj->node_object->timeStamp; + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_set_tiling(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_set_tiling *args = (struct drm_viv_gem_set_tiling*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + + viv_obj->node_object->tilingMode = args->tiling_mode; + viv_obj->node_object->tsMode = args->ts_mode; + viv_obj->node_object->clearValue = args->clear_value; + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_get_tiling(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_get_tiling *args = (struct drm_viv_gem_get_tiling*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + + args->tiling_mode = viv_obj->node_object->tilingMode; + args->ts_mode = viv_obj->node_object->tsMode; + args->clear_value = viv_obj->node_object->clearValue; + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_attach_aux(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_attach_aux *args = (struct drm_viv_gem_attach_aux*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + struct drm_gem_object *gem_ts_obj = gcvNULL; + + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + gckVIDMEM_NODE nodeObj = gcvNULL; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + nodeObj = viv_obj->node_object; + + /* do not support re-attach */ + if (nodeObj->tsNode) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + if (args->ts_handle) + { + struct viv_gem_object *viv_ts_obj; + gckKERNEL kernel = gal_dev->device->map[gal_dev->device->defaultHwType].kernels[0]; + + gem_ts_obj = drm_gem_object_lookup(file, args->ts_handle); + if (!gem_ts_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_ts_obj = container_of(gem_ts_obj, struct viv_gem_object, base); + + gcmkONERROR(gckVIDMEM_NODE_Reference(kernel, viv_ts_obj->node_object)); + nodeObj->tsNode = viv_ts_obj->node_object; + } + +OnError: + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + + if (gem_ts_obj) + { + drm_gem_object_unreference_unlocked(gem_ts_obj); + } + } + return gcmIS_ERROR(status) ? -ENOTTY : 0; +} + +static int viv_ioctl_gem_ref_node(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_viv_gem_ref_node *args = (struct drm_viv_gem_ref_node*)data; + struct drm_gem_object *gem_obj = gcvNULL; + struct viv_gem_object *viv_obj = gcvNULL; + + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + gckKERNEL kernel = gcvNULL; + gctUINT32 processID; + gckVIDMEM_NODE nodeObj; + gctUINT32 nodeHandle = 0, tsNodeHandle = 0; + gctBOOL refered = gcvFALSE; + int ret = 0; + + gal_dev = (gckGALDEVICE)drm->dev_private; + if (!gal_dev) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + kernel = gal_dev->device->map[gal_dev->device->defaultHwType].kernels[0]; + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + { + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + viv_obj = container_of(gem_obj, struct viv_gem_object, base); + nodeObj = viv_obj->node_object; + + gcmkONERROR(gckOS_GetProcessID(&processID)); + gcmkONERROR(gckVIDMEM_HANDLE_Allocate(kernel, nodeObj, &nodeHandle)); + gcmkONERROR( + gckKERNEL_AddProcessDB(kernel, + processID, gcvDB_VIDEO_MEMORY, + gcmINT2PTR(nodeHandle), + gcvNULL, + 0)); + gcmkONERROR(gckVIDMEM_NODE_Reference(kernel, nodeObj)); + refered = gcvTRUE; + + if (nodeObj->tsNode) + { + gcmkONERROR(gckVIDMEM_HANDLE_Allocate(kernel, nodeObj->tsNode, &tsNodeHandle)); + gcmkONERROR( + gckKERNEL_AddProcessDB(kernel, + processID, gcvDB_VIDEO_MEMORY, + gcmINT2PTR(tsNodeHandle), + gcvNULL, + 0)); + gcmkONERROR(gckVIDMEM_NODE_Reference(kernel, nodeObj->tsNode)); + } + args->node = nodeHandle; + args->ts_node = tsNodeHandle; + +OnError: + if (gcmIS_ERROR(status) && kernel) + { + gctUINT32 processID; + + gcmkVERIFY_OK(gckOS_GetProcessID(&processID)); + + if (tsNodeHandle) + { + gckVIDMEM_HANDLE_Dereference(kernel, processID, tsNodeHandle); + } + + if (nodeHandle) + { + gckVIDMEM_HANDLE_Dereference(kernel, processID, nodeHandle); + } + + if (refered) + { + gcmkONERROR(gckVIDMEM_NODE_Dereference(kernel, nodeObj)); + } + + args->node = 0; + args->ts_node = 0; + + ret = -ENOTTY; + } + + if (gem_obj) + { + drm_gem_object_unreference_unlocked(gem_obj); + } + + return ret; +} + +static const struct drm_ioctl_desc viv_ioctls[] = +{ + DRM_IOCTL_DEF_DRV(VIV_GEM_CREATE, viv_ioctl_gem_create, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_LOCK, viv_ioctl_gem_lock, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_UNLOCK, viv_ioctl_gem_unlock, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_CACHE, viv_ioctl_gem_cache, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_QUERY, viv_ioctl_gem_query, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_TIMESTAMP, viv_ioctl_gem_timestamp, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_SET_TILING, viv_ioctl_gem_set_tiling, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_GET_TILING, viv_ioctl_gem_get_tiling, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_ATTACH_AUX, viv_ioctl_gem_attach_aux, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIV_GEM_REF_NODE, viv_ioctl_gem_ref_node, DRM_AUTH | DRM_RENDER_ALLOW), +}; + +int viv_drm_open(struct drm_device *drm, struct drm_file *file) +{ + gctINT i; + gctUINT32 pid = _GetProcessID(); + gckGALDEVICE gal_dev = (gckGALDEVICE)drm->dev_private; + gceSTATUS status = gcvSTATUS_OK; + + for (i = 0; i < gcdMAX_GPU_COUNT; ++i) + { + if (gal_dev->kernels[i]) + { + gcmkONERROR(gckKERNEL_AttachProcessEx(gal_dev->kernels[i], gcvTRUE, pid)); + } + } + file->driver_priv = gcmINT2PTR(pid); + +OnError: + return gcmIS_ERROR(status) ? -ENODEV : 0; +} + +void viv_drm_postclose(struct drm_device *drm, struct drm_file *file) +{ + gctINT i; + gctUINT32 pid = gcmPTR2INT(file->driver_priv); + gckGALDEVICE gal_dev = (gckGALDEVICE)drm->dev_private; + + for (i = 0; i < gcdMAX_GPU_COUNT; ++i) + { + if (gal_dev->kernels[i]) + { + gcmkVERIFY_OK(gckKERNEL_AttachProcessEx(gal_dev->kernels[i], gcvFALSE, pid)); + } + } +} + +static const struct file_operations viv_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, +}; + +static struct drm_driver viv_drm_driver = { + .driver_features = DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, + .open = viv_drm_open, + .postclose = viv_drm_postclose, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0) + .gem_free_object_unlocked = viv_gem_free_object, +#else + .gem_free_object = viv_gem_free_object, +#endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = viv_gem_prime_export, + .gem_prime_import = viv_gem_prime_import, + .ioctls = viv_ioctls, + .num_ioctls = DRM_VIV_NUM_IOCTLS, + .fops = &viv_drm_fops, + .name = "vivante", + .desc = "vivante DRM", + .date = "20170808", + .major = 1, + .minor = 0, +}; + +int viv_drm_probe(struct device *dev) +{ + int ret = 0; + gceSTATUS status = gcvSTATUS_OK; + gckGALDEVICE gal_dev = gcvNULL; + struct drm_device *drm = gcvNULL; + + gal_dev = (gckGALDEVICE)dev_get_drvdata(dev); + if (!gal_dev) + { + ret = -ENODEV; + gcmkONERROR(gcvSTATUS_INVALID_OBJECT); + } + + drm = drm_dev_alloc(&viv_drm_driver, dev); + if (IS_ERR(drm)) + { + ret = PTR_ERR(drm); + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + drm->dev_private = (void*)gal_dev; + + ret = drm_dev_register(drm, 0); + if (ret) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + gal_dev->drm = (void*)drm; + +OnError: + if (gcmIS_ERROR(status)) + { + if (drm) + { + drm_dev_unref(drm); + } + printk(KERN_ERR "galcore: Failed to setup drm device.\n"); + } + return ret; +} + +int viv_drm_remove(struct device *dev) +{ + gckGALDEVICE gal_dev = (gckGALDEVICE)dev_get_drvdata(dev); + + if (gal_dev) + { + struct drm_device *drm = (struct drm_device*)gal_dev->drm; + + drm_dev_unregister(drm); + drm_dev_unref(drm); + } + + return 0; +} + +#endif diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c new file mode 100644 index 000000000000..f36e11682f91 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c @@ -0,0 +1,250 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_device.h" + +#include +#include + +#define _GC_OBJ_ZONE gcvZONE_OS + +typedef struct _gcsIOMMU +{ + struct iommu_domain * domain; + struct device * device; +} +gcsIOMMU; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) +static int +_IOMMU_Fault_Handler( + struct iommu_domain * Domain, + struct device * Dev, + unsigned long DomainAddress, + int flags, + void * args + ) +#else +static int +_IOMMU_Fault_Handler( + struct iommu_domain * Domain, + struct device * Dev, + unsigned long DomainAddress, + int flags + ) +#endif +{ + return 0; +} + +static int +_FlatMapping( + IN gckIOMMU Iommu + ) +{ + gceSTATUS status; + gctUINT32 physical; + + for (physical = 0; physical < 0x80000000; physical += PAGE_SIZE) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "Map %x => %x bytes = %d", + physical, physical, PAGE_SIZE + ); + + gcmkONERROR(gckIOMMU_Map(Iommu, physical, physical, PAGE_SIZE)); + } + + return gcvSTATUS_OK; + +OnError: + return status; +} + +void +gckIOMMU_Destory( + IN gckOS Os, + IN gckIOMMU Iommu + ) +{ + gcmkHEADER(); + + if (Iommu->domain && Iommu->device) + { + iommu_attach_device(Iommu->domain, Iommu->device); + } + + if (Iommu->domain) + { + iommu_domain_free(Iommu->domain); + } + + if (Iommu) + { + gcmkOS_SAFE_FREE(Os, Iommu); + } + + gcmkFOOTER_NO(); +} + +gceSTATUS +gckIOMMU_Construct( + IN gckOS Os, + OUT gckIOMMU * Iommu + ) +{ + gceSTATUS status; + gckIOMMU iommu = gcvNULL; + struct device *dev; + int ret; + + gcmkHEADER(); + + dev = &Os->device->platform->device->dev; + + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsIOMMU), (gctPOINTER *)&iommu)); + + gckOS_ZeroMemory(iommu, gcmSIZEOF(gcsIOMMU)); + + iommu->domain = iommu_domain_alloc(&platform_bus_type); + + if (!iommu->domain) + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "iommu_domain_alloc() fail"); + + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler, dev); +#else + iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler); +#endif + + ret = iommu_attach_device(iommu->domain, dev); + + if (ret) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, "iommu_attach_device() fail %d", ret); + + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + iommu->device = dev; + + _FlatMapping(iommu); + + *Iommu = iommu; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + + gckIOMMU_Destory(Os, iommu); + + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckIOMMU_Map( + IN gckIOMMU Iommu, + IN gctUINT32 DomainAddress, + IN gctUINT32 Physical, + IN gctUINT32 Bytes + ) +{ + gceSTATUS status; + int ret; + + gcmkHEADER_ARG("DomainAddress=%#X, Physical=%#X, Bytes=%d", + DomainAddress, Physical, Bytes); + + ret = iommu_map(Iommu->domain, DomainAddress, Physical, Bytes, 0); + + if (ret) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + + gcmkFOOTER(); + return status; + +} + +gceSTATUS +gckIOMMU_Unmap( + IN gckIOMMU Iommu, + IN gctUINT32 DomainAddress, + IN gctUINT32 Bytes + ) +{ + gcmkHEADER(); + + iommu_unmap(Iommu->domain, DomainAddress, Bytes); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c new file mode 100644 index 000000000000..9e258a959143 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c @@ -0,0 +1,476 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" + +#define _GC_OBJ_ZONE gcvZONE_KERNEL + +/******************************************************************************\ +******************************* gckKERNEL API Code ****************************** +\******************************************************************************/ + +/******************************************************************************* +** +** gckKERNEL_QueryVideoMemory +** +** Query the amount of video memory. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** OUTPUT: +** +** gcsHAL_INTERFACE * Interface +** Pointer to an gcsHAL_INTERFACE structure that will be filled in with +** the memory information. +*/ +gceSTATUS +gckKERNEL_QueryVideoMemory( + IN gckKERNEL Kernel, + OUT gcsHAL_INTERFACE * Interface + ) +{ + gckGALDEVICE device; + + gcmkHEADER_ARG("Kernel=%p", Kernel); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Interface != NULL); + + /* Extract the pointer to the gckGALDEVICE class. */ + device = (gckGALDEVICE) Kernel->context; + + /* Get internal memory size and physical address. */ + Interface->u.QueryVideoMemory.internalSize = device->internalSize; + Interface->u.QueryVideoMemory.internalPhysical = device->internalPhysicalName; + + /* Get external memory size and physical address. */ + Interface->u.QueryVideoMemory.externalSize = device->externalSize; + Interface->u.QueryVideoMemory.externalPhysical = device->externalPhysicalName; + + /* Get contiguous memory size and physical address. */ + Interface->u.QueryVideoMemory.contiguousSize = device->contiguousSize; + Interface->u.QueryVideoMemory.contiguousPhysical = device->contiguousPhysicalName; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckKERNEL_GetVideoMemoryPool +** +** Get the gckVIDMEM object belonging to the specified pool. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gcePOOL Pool +** Pool to query gckVIDMEM object for. +** +** OUTPUT: +** +** gckVIDMEM * VideoMemory +** Pointer to a variable that will hold the pointer to the gckVIDMEM +** object belonging to the requested pool. +*/ +gceSTATUS +gckKERNEL_GetVideoMemoryPool( + IN gckKERNEL Kernel, + IN gcePOOL Pool, + OUT gckVIDMEM * VideoMemory + ) +{ + gckGALDEVICE device; + gckVIDMEM videoMemory; + + gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(VideoMemory != NULL); + + /* Extract the pointer to the gckGALDEVICE class. */ + device = (gckGALDEVICE) Kernel->context; + + /* Dispatch on pool. */ + switch (Pool) + { + case gcvPOOL_LOCAL_INTERNAL: + /* Internal memory. */ + videoMemory = device->internalVidMem; + break; + + case gcvPOOL_LOCAL_EXTERNAL: + /* External memory. */ + videoMemory = device->externalVidMem; + break; + + case gcvPOOL_SYSTEM: + /* System memory. */ + videoMemory = device->contiguousVidMem; + break; + + default: + /* Unknown pool. */ + videoMemory = NULL; + } + + /* Return pointer to the gckVIDMEM object. */ + *VideoMemory = videoMemory; + + /* Return status. */ + gcmkFOOTER_ARG("*VideoMemory=%p", *VideoMemory); + return (videoMemory == NULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckKERNEL_MapMemory +** +** Map video memory into the current process space. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctPHYS_ADDR Physical +** Physical address of video memory to map. +** +** gctSIZE_T Bytes +** Number of bytes to map. +** +** OUTPUT: +** +** gctPOINTER * Logical +** Pointer to a variable that will hold the base address of the mapped +** memory region. +*/ +gceSTATUS +gckKERNEL_MapMemory( + IN gckKERNEL Kernel, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical + ) +{ + gckKERNEL kernel = Kernel; + gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical); + + return gckOS_MapMemory(Kernel->os, physical, Bytes, Logical); +} + +/******************************************************************************* +** +** gckKERNEL_UnmapMemory +** +** Unmap video memory from the current process space. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctPHYS_ADDR Physical +** Physical address of video memory to map. +** +** gctSIZE_T Bytes +** Number of bytes to map. +** +** gctPOINTER Logical +** Base address of the mapped memory region. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_UnmapMemory( + IN gckKERNEL Kernel, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + IN gctUINT32 ProcessID + ) +{ + gckKERNEL kernel = Kernel; + gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical); + + return gckOS_UnmapMemoryEx(Kernel->os, physical, Bytes, Logical, ProcessID); +} + +/******************************************************************************* +** +** gckKERNEL_MapVideoMemory +** +** Get the logical address for a hardware specific memory address for the +** current process. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctBOOL InUserSpace +** gcvTRUE to map the memory into the user space. +** +** gctUINT32 Address +** Hardware specific memory address. +** +** OUTPUT: +** +** gctPOINTER * Logical +** Pointer to a variable that will hold the logical address of the +** specified memory address. +*/ +gceSTATUS +gckKERNEL_MapVideoMemoryEx( + IN gckKERNEL Kernel, + IN gceCORE Core, + IN gctBOOL InUserSpace, + IN gctUINT32 Address, + IN gcePOOL Pool, + OUT gctPOINTER * Logical + ) +{ + gckGALDEVICE device = gcvNULL; + gctUINT32 offset = 0; + gctUINT32 base = 0; + gctSIZE_T bytes = 0; + gctPHYS_ADDR physical = gcvNULL; + gceSTATUS status; + gctPOINTER logical = gcvNULL; + + gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x", + Kernel, InUserSpace, Address); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Logical != NULL); + + /* Extract the pointer to the gckGALDEVICE class. */ + device = (gckGALDEVICE) Kernel->context; + + { + offset = Address; + } + + /* Dispatch on pool. */ + switch (Pool) + { + case gcvPOOL_LOCAL_INTERNAL: + /* Internal memory. */ + logical = device->internalLogical; + /* Impossible to use per device logical for all user processes. */ + BUG_ON("Incorrect path"); + break; + + case gcvPOOL_LOCAL_EXTERNAL: + physical = device->externalPhysical; + bytes = device->externalSize; + + { + base = Kernel->externalBaseAddress; + } + + break; + + case gcvPOOL_SYSTEM: + /* System memory. */ + physical = device->contiguousPhysical; + bytes = device->contiguousSize; + + { + base = Kernel->contiguousBaseAddress; + } + + break; + + default: + /* Invalid memory pool. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + gcmkONERROR(gckOS_MapMemory(Kernel->os, physical, bytes, &logical)); + + /* GPU address offset */ + offset -= base; + + /* Build logical address of specified address. */ + *Logical = (gctPOINTER) ((gctUINT8_PTR) logical + offset); + + /* Success. */ + gcmkFOOTER_ARG("*Logical=%p", *Logical); + return gcvSTATUS_OK; + +OnError: + /* Retunn the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckKERNEL_MapVideoMemory +** +** Get the logical address for a hardware specific memory address for the +** current process. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gctBOOL InUserSpace +** gcvTRUE to map the memory into the user space. +** +** gctUINT32 Address +** Hardware specific memory address. +** +** OUTPUT: +** +** gctPOINTER * Logical +** Pointer to a variable that will hold the logical address of the +** specified memory address. +*/ +gceSTATUS +gckKERNEL_MapVideoMemory( + IN gckKERNEL Kernel, + IN gctBOOL InUserSpace, + IN gctUINT32 Address, + OUT gctPOINTER * Logical + ) +{ + return gckKERNEL_MapVideoMemoryEx(Kernel, gcvCORE_MAJOR, InUserSpace, Address, gcvPOOL_SYSTEM, Logical); +} +/******************************************************************************* +** +** gckKERNEL_Notify +** +** This function iscalled by clients to notify the gckKERNRL object of an event. +** +** INPUT: +** +** gckKERNEL Kernel +** Pointer to an gckKERNEL object. +** +** gceNOTIFY Notification +** Notification event. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckKERNEL_Notify( + IN gckKERNEL Kernel, + IN gceNOTIFY Notification, + IN gctBOOL Data + ) +{ + gceSTATUS status; + + /* Dispatch on notifcation. */ + switch (Notification) + { + case gcvNOTIFY_INTERRUPT: + /* Process the interrupt. */ +#if COMMAND_PROCESSOR_VERSION > 1 + status = gckINTERRUPT_Notify(Kernel->interrupt, Data); +#else + status = gckHARDWARE_Interrupt(Kernel->hardware, Data); +#endif + break; + + default: + status = gcvSTATUS_OK; + break; + } + + /* Success. */ + return status; +} + +gceSTATUS +gckKERNEL_QuerySettings( + IN gckKERNEL Kernel, + OUT gcsKERNEL_SETTINGS * Settings + ) +{ + gckGALDEVICE device; + + gcmkHEADER_ARG("Kernel=%p", Kernel); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); + gcmkVERIFY_ARGUMENT(Settings != gcvNULL); + + /* Extract the pointer to the gckGALDEVICE class. */ + device = (gckGALDEVICE) Kernel->context; + + /* Fill in signal. */ + Settings->signal = device->signal; + + /* Success. */ + gcmkFOOTER_ARG("Settings->signal=%d", Settings->signal); + return gcvSTATUS_OK; +} diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h new file mode 100644 index 000000000000..cddb76f50c64 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h @@ -0,0 +1,409 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_linux_h_ +#define __gc_hal_kernel_linux_h_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef MODVERSIONS +# include +#endif +#include +#include + +#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) +#include +#endif + +#define NTSTRSAFE_NO_CCH_FUNCTIONS +#include "gc_hal.h" +#include "gc_hal_driver.h" +#include "gc_hal_kernel.h" +#include "gc_hal_kernel_platform.h" +#include "gc_hal_kernel_device.h" +#include "gc_hal_kernel_os.h" +#include "gc_hal_kernel_debugfs.h" +#include "gc_hal_ta.h" + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) +#define FIND_TASK_BY_PID(x) pid_task(find_vpid(x), PIDTYPE_PID) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) +#define FIND_TASK_BY_PID(x) find_task_by_vpid(x) +#else +#define FIND_TASK_BY_PID(x) find_task_by_pid(x) +#endif + +#ifndef DEVICE_NAME +# define DEVICE_NAME "galcore" +#endif + +#ifndef CLASS_NAME +# define CLASS_NAME "graphics_class" +#endif + +#define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION (3,7,0) +#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP) +#else +#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED) +#endif + +/* Protection bit when mapping memroy to user sapce */ +#define gcmkPAGED_MEMROY_PROT(x) pgprot_writecombine(x) + +#define gcdSUPPRESS_OOM_MESSAGE 1 + +#if gcdSUPPRESS_OOM_MESSAGE +#define gcdNOWARN __GFP_NOWARN +#else +#define gcdNOWARN 0 +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION (4, 1, 0) +#ifdef gcdIRQ_SHARED +# define gcdIRQF_FLAG (IRQF_SHARED) +# else +# define gcdIRQF_FLAG (0) +# endif +#else +#ifdef gcdIRQ_SHARED +# define gcdIRQF_FLAG (IRQF_DISABLED | IRQF_SHARED) +# else +# define gcdIRQF_FLAG (IRQF_DISABLED) +# endif +#endif + +/* gcdLINUX_SYNC_FILE and CONFIG_SYNC_FILE. */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) +# define dma_fence fence +# define dma_fence_array fence_array +# define dma_fence_ops fence_ops + +# define dma_fence_default_wait fence_default_wait + +# define dma_fence_signal(f) fence_signal(f) +# define dma_fence_signal_locked(f) fence_signal_locked(f) +# define dma_fence_get(f) fence_get(f) +# define dma_fence_put(f) fence_put(f) +# define dma_fence_is_array(f) fence_is_array(f) +# define dma_fence_is_signaled(f) fence_is_signaled(f) +# define to_dma_fence_array(f) to_fence_array(f) +# define dma_fence_wait_timeout(f, n, t) fence_wait_timeout((f), (n), (t)) +# define dma_fence_init(f, o, l, t, s) fence_init((f), (o), (l), (t), (s)) +# define dma_fence_context_alloc(s) fence_context_alloc(s) + +#endif + +extern struct device *galcore_device; + +/******************************************************************************\ +********************************** Structures ********************************** +\******************************************************************************/ +typedef struct _gcsIOMMU * gckIOMMU; + +typedef struct _gcsUSER_MAPPING * gcsUSER_MAPPING_PTR; +typedef struct _gcsUSER_MAPPING +{ + /* Pointer to next mapping structure. */ + gcsUSER_MAPPING_PTR next; + + /* Physical address of this mapping. */ + gctUINT32 physical; + + /* Logical address of this mapping. */ + gctPOINTER logical; + + /* Number of bytes of this mapping. */ + gctSIZE_T bytes; + + /* Starting address of this mapping. */ + gctINT8_PTR start; + + /* Ending address of this mapping. */ + gctINT8_PTR end; +} +gcsUSER_MAPPING; + +typedef struct _gcsINTEGER_DB * gcsINTEGER_DB_PTR; +typedef struct _gcsINTEGER_DB +{ + struct idr idr; + spinlock_t lock; + gctINT curr; +} +gcsINTEGER_DB; + +struct _gckOS +{ + /* Object. */ + gcsOBJECT object; + + /* Pointer to device */ + gckGALDEVICE device; + + /* Memory management */ + struct mutex mdlMutex; + struct list_head mdlHead; + + /* Kernel process ID. */ + gctUINT32 kernelProcessID; + + /* Signal management. */ + + /* Lock. */ + struct mutex signalMutex; + + /* signal id database. */ + gcsINTEGER_DB signalDB; + + gcsUSER_MAPPING_PTR userMap; + + /* workqueue for os timer. */ + struct workqueue_struct * workqueue; + + /* Allocate extra page to avoid cache overflow */ + struct page* paddingPage; + + /* Detect unfreed allocation. */ + atomic_t allocateCount; + + struct list_head allocatorList; + + gcsDEBUGFS_DIR allocatorDebugfsDir; + + /* Lock for register access check. */ + spinlock_t registerAccessLock; + + /* External power states. */ + gctBOOL powerStates[gcdMAX_GPU_COUNT]; + + /* External clock states. */ + gctBOOL clockStates[gcdMAX_GPU_COUNT]; + + /* IOMMU. */ + gckIOMMU iommu; +}; + +typedef struct _gcsSIGNAL * gcsSIGNAL_PTR; +typedef struct _gcsSIGNAL +{ + /* Kernel sync primitive. */ + volatile unsigned int done; + spinlock_t lock; + + wait_queue_head_t wait; + + /* Manual reset flag. */ + gctBOOL manualReset; + + /* The reference counter. */ + atomic_t ref; + + /* The owner of the signal. */ + gctHANDLE process; + + /* ID. */ + gctUINT32 id; + +#if gcdLINUX_SYNC_FILE +#ifndef CONFIG_SYNC_FILE + /* Parent timeline. */ + struct sync_timeline * timeline; +# else + struct dma_fence *fence; +# endif +#endif +} +gcsSIGNAL; + +typedef struct _gcsOSTIMER * gcsOSTIMER_PTR; +typedef struct _gcsOSTIMER +{ + struct delayed_work work; + gctTIMERFUNCTION function; + gctPOINTER data; +} gcsOSTIMER; + +gceSTATUS +gckOS_ImportAllocators( + gckOS Os + ); + +gceSTATUS +gckOS_FreeAllocators( + gckOS Os + ); + +/* Reserved memory. */ +gceSTATUS +gckOS_RequestReservedMemory( + gckOS Os, + unsigned long Start, + unsigned long Size, + const char * Name, + gctBOOL Requested, + void ** MemoryHandle + ); + +void +gckOS_ReleaseReservedMemory( + gckOS Os, + void * MemoryHandle + ); + +gceSTATUS +_ConvertLogical2Physical( + IN gckOS Os, + IN gctPOINTER Logical, + IN gctUINT32 ProcessID, + IN PLINUX_MDL Mdl, + OUT gctPHYS_ADDR_T * Physical + ); + +gctBOOL +_QuerySignal( + IN gckOS Os, + IN gctSIGNAL Signal + ); + +static inline gctINT +_GetProcessID( + void + ) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) + return task_tgid_vnr(current); +#else + return current->tgid; +#endif +} + +static inline void +_MemoryBarrier( + void + ) +{ +#if defined(CONFIG_ARM) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) + dsb(); +#else + mb(); +#endif +} + +static inline void +_Barrier( + void + ) +{ + barrier(); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) +static inline int +is_vmalloc_addr( + void *Addr + ) +{ + unsigned long addr = (unsigned long)Addr; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +} +#endif + +#ifdef CONFIG_IOMMU_SUPPORT +void +gckIOMMU_Destory( + IN gckOS Os, + IN gckIOMMU Iommu + ); + +gceSTATUS +gckIOMMU_Construct( + IN gckOS Os, + OUT gckIOMMU * Iommu + ); + +gceSTATUS +gckIOMMU_Map( + IN gckIOMMU Iommu, + IN gctUINT32 DomainAddress, + IN gctUINT32 Physical, + IN gctUINT32 Bytes + ); + +gceSTATUS +gckIOMMU_Unmap( + IN gckIOMMU Iommu, + IN gctUINT32 DomainAddress, + IN gctUINT32 Bytes + ); +#endif + +#endif /* __gc_hal_kernel_linux_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c new file mode 100644 index 000000000000..f373f47a41e0 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c @@ -0,0 +1,66 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" + +gctINT +gckMATH_ModuloInt( + IN gctINT X, + IN gctINT Y + ) +{ + if(Y ==0) {return 0;} + else {return X % Y;} +} diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_mutex.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_mutex.h new file mode 100644 index 000000000000..d2c94e2254ad --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_mutex.h @@ -0,0 +1,89 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef _gc_hal_kernel_mutex_h_ +#define _gc_hal_kernel_mutex_h_ + +#include "gc_hal.h" +#include + +/* Create a new mutex. */ +#define gckOS_CreateMutex(Os, Mutex) \ +({ \ + gceSTATUS _status; \ + gcmkHEADER_ARG("Os=0x%X", Os); \ + \ + /* Validate the arguments. */ \ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); \ + gcmkVERIFY_ARGUMENT(Mutex != gcvNULL); \ + \ + /* Allocate the mutex structure. */ \ + _status = gckOS_Allocate(Os, gcmSIZEOF(struct mutex), Mutex); \ + \ + if (gcmIS_SUCCESS(_status)) \ + { \ + /* Initialize the mutex. */ \ + mutex_init(*(struct mutex **)Mutex); \ + } \ + \ + /* Return status. */ \ + gcmkFOOTER_ARG("*Mutex=0x%X", *(struct mutex **)Mutex); \ + _status; \ +}) + +#endif + + + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c new file mode 100644 index 000000000000..573cc80cc83c --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c @@ -0,0 +1,7419 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can + your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) +#include +#endif +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) +#include +#endif + +#if gcdLINUX_SYNC_FILE +# include +# include "gc_hal_kernel_sync.h" +#endif + +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include +#endif + +#if defined(CONFIG_ARM) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) +#include +#endif + +#define _GC_OBJ_ZONE gcvZONE_OS + +#include "gc_hal_kernel_allocator.h" + +#define gcmkBUG_ON(x) \ + do { \ + if (unlikely(!!(x))) \ + { \ + printk("[galcore]: BUG ON @ %s(%d)\n", __func__, __LINE__); \ + dump_stack(); \ + } \ + } while (0) + +/******************************************************************************\ +******************************* Private Functions ****************************** +\******************************************************************************/ +static gctINT +_GetThreadID( + void + ) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) + return task_pid_vnr(current); +#else + return current->pid; +#endif +} + +/* Must hold Mdl->mpasMutex before call this function. */ +static inline PLINUX_MDL_MAP +_CreateMdlMap( + IN PLINUX_MDL Mdl, + IN gctINT ProcessID + ) +{ + PLINUX_MDL_MAP mdlMap; + + gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID); + + mdlMap = (PLINUX_MDL_MAP)kmalloc(sizeof(struct _LINUX_MDL_MAP), GFP_KERNEL | gcdNOWARN); + + if (mdlMap == gcvNULL) + { + gcmkFOOTER_NO(); + return gcvNULL; + } + + mdlMap->pid = ProcessID; + mdlMap->vmaAddr = gcvNULL; + mdlMap->count = 0; + + list_add(&mdlMap->link, &Mdl->mapsHead); + + gcmkFOOTER_ARG("0x%X", mdlMap); + return mdlMap; +} + +/* Must hold Mdl->mpasMutex before call this function. */ +static inline gceSTATUS +_DestroyMdlMap( + IN PLINUX_MDL Mdl, + IN PLINUX_MDL_MAP MdlMap + ) +{ + gcmkHEADER_ARG("Mdl=0x%X MdlMap=0x%X", Mdl, MdlMap); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(MdlMap != gcvNULL); + + list_del(&MdlMap->link); + kfree(MdlMap); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/* Must hold Mdl->mpasMutex before call this function. */ +extern PLINUX_MDL_MAP +FindMdlMap( + IN PLINUX_MDL Mdl, + IN gctINT ProcessID + ) +{ + PLINUX_MDL_MAP mdlMap; + + gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID); + + if (Mdl == gcvNULL) + { + gcmkFOOTER_NO(); + return gcvNULL; + } + + list_for_each_entry(mdlMap, &Mdl->mapsHead, link) + { + if (mdlMap->pid == ProcessID) + { + gcmkFOOTER_ARG("0x%X", mdlMap); + return mdlMap; + } + } + + gcmkFOOTER_NO(); + return gcvNULL; +} + + +static PLINUX_MDL +_CreateMdl( + IN gckOS Os + ) +{ + PLINUX_MDL mdl; + + gcmkHEADER(); + + mdl = (PLINUX_MDL)kzalloc(sizeof(struct _LINUX_MDL), GFP_KERNEL | gcdNOWARN); + + if (mdl) + { + mdl->os = Os; + atomic_set(&mdl->refs, 1); + mutex_init(&mdl->mapsMutex); + INIT_LIST_HEAD(&mdl->mapsHead); + } + + gcmkFOOTER_ARG("0x%X", mdl); + return mdl; +} + +static gceSTATUS +_DestroyMdl( + IN PLINUX_MDL Mdl + ) +{ + gcmkHEADER_ARG("Mdl=0x%X", Mdl); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Mdl != gcvNULL); + + if (atomic_dec_and_test(&Mdl->refs)) + { + gckOS os = Mdl->os; + gckALLOCATOR allocator = Mdl->allocator; + PLINUX_MDL_MAP mdlMap, next; + + /* Valid private means alloc/attach successfully */ + if (Mdl->priv) + { + if (Mdl->addr) + { + allocator->ops->UnmapKernel(allocator, Mdl, Mdl->addr); + } + allocator->ops->Free(allocator, Mdl); + } + + mutex_lock(&Mdl->mapsMutex); + list_for_each_entry_safe(mdlMap, next, &Mdl->mapsHead, link) + { + gcmkVERIFY_OK(_DestroyMdlMap(Mdl, mdlMap)); + } + mutex_unlock(&Mdl->mapsMutex); + + if (Mdl->link.next) + { + /* Remove the node from global list.. */ + mutex_lock(&os->mdlMutex); + list_del(&Mdl->link); + mutex_unlock(&os->mdlMutex); + } + + kfree(Mdl); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** Integer Id Management. +*/ +gceSTATUS +_AllocateIntegerId( + IN gcsINTEGER_DB_PTR Database, + IN gctPOINTER KernelPointer, + OUT gctUINT32 *Id + ) +{ + int result; + gctINT next; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + idr_preload(GFP_KERNEL | gcdNOWARN); + + spin_lock(&Database->lock); + + next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1; + + result = idr_alloc(&Database->idr, KernelPointer, next, 0, GFP_ATOMIC); + + /* ID allocated should not be 0. */ + gcmkASSERT(result != 0); + + if (result > 0) + { + Database->curr = *Id = result; + } + + spin_unlock(&Database->lock); + + idr_preload_end(); + + if (result < 0) + { + return gcvSTATUS_OUT_OF_RESOURCES; + } +#else +again: + if (idr_pre_get(&Database->idr, GFP_KERNEL | gcdNOWARN) == 0) + { + return gcvSTATUS_OUT_OF_MEMORY; + } + + spin_lock(&Database->lock); + + next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1; + + /* Try to get a id greater than 0. */ + result = idr_get_new_above(&Database->idr, KernelPointer, next, Id); + + if (!result) + { + Database->curr = *Id; + } + + spin_unlock(&Database->lock); + + if (result == -EAGAIN) + { + goto again; + } + + if (result != 0) + { + return gcvSTATUS_OUT_OF_RESOURCES; + } +#endif + + return gcvSTATUS_OK; +} + +gceSTATUS +_QueryIntegerId( + IN gcsINTEGER_DB_PTR Database, + IN gctUINT32 Id, + OUT gctPOINTER * KernelPointer + ) +{ + gctPOINTER pointer; + + spin_lock(&Database->lock); + + pointer = idr_find(&Database->idr, Id); + + spin_unlock(&Database->lock); + + if (pointer) + { + *KernelPointer = pointer; + return gcvSTATUS_OK; + } + else + { + gcmkTRACE_ZONE( + gcvLEVEL_ERROR, gcvZONE_OS, + "%s(%d) Id = %d is not found", + __FUNCTION__, __LINE__, Id); + + return gcvSTATUS_NOT_FOUND; + } +} + +gceSTATUS +_DestroyIntegerId( + IN gcsINTEGER_DB_PTR Database, + IN gctUINT32 Id + ) +{ + spin_lock(&Database->lock); + + idr_remove(&Database->idr, Id); + + spin_unlock(&Database->lock); + + return gcvSTATUS_OK; +} + +static inline gceSTATUS +_QueryProcessPageTable( + IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T * Address + ) +{ + unsigned long logical = (unsigned long)Logical; + unsigned long offset = logical & ~PAGE_MASK; + + if (is_vmalloc_addr(Logical)) + { + /* vmalloc area. */ + *Address = page_to_phys(vmalloc_to_page(Logical)) | offset; + return gcvSTATUS_OK; + } + else if (virt_addr_valid(logical)) + { + /* Kernel logical address. */ + *Address = virt_to_phys(Logical); + return gcvSTATUS_OK; + } + else + { + /* Try user VM area. */ + struct vm_area_struct *vma; + spinlock_t *ptl; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (!current->mm) + return gcvSTATUS_NOT_FOUND; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, logical); + up_read(¤t->mm->mmap_sem); + + /* To check if mapped to user. */ + if (!vma) + return gcvSTATUS_NOT_FOUND; + + pgd = pgd_offset(current->mm, logical); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + return gcvSTATUS_NOT_FOUND; + + pud = pud_offset(pgd, logical); + if (pud_none(*pud) || pud_bad(*pud)) + return gcvSTATUS_NOT_FOUND; + + pmd = pmd_offset(pud, logical); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return gcvSTATUS_NOT_FOUND; + + pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl); + if (!pte) + { + spin_unlock(ptl); + return gcvSTATUS_NOT_FOUND; + } + + if (!pte_present(*pte)) + { + pte_unmap_unlock(pte, ptl); + return gcvSTATUS_NOT_FOUND; + } + + *Address = (pte_pfn(*pte) << PAGE_SHIFT) | offset; + pte_unmap_unlock(pte, ptl); + + return gcvSTATUS_OK; + } +} + + +static gceSTATUS +_ShrinkMemory( + IN gckOS Os + ) +{ + gcsPLATFORM * platform; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Os=0x%X", Os); + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + + platform = Os->device->platform; + + if (platform && platform->ops->shrinkMemory) + { + status = platform->ops->shrinkMemory(platform); + } + else + { + gcmkFOOTER_NO(); + return gcvSTATUS_NOT_SUPPORTED; + } + + gcmkFOOTER_NO(); + return status; +} + +/******************************************************************************* +** +** gckOS_Construct +** +** Construct a new gckOS object. +** +** INPUT: +** +** gctPOINTER Context +** Pointer to the gckGALDEVICE class. +** +** OUTPUT: +** +** gckOS * Os +** Pointer to a variable that will hold the pointer to the gckOS object. +*/ +gceSTATUS +gckOS_Construct( + IN gctPOINTER Context, + OUT gckOS * Os + ) +{ + gckOS os; + gceSTATUS status; + + gcmkHEADER_ARG("Context=0x%X", Context); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Os != gcvNULL); + + /* Allocate the gckOS object. */ + os = (gckOS) kmalloc(gcmSIZEOF(struct _gckOS), GFP_KERNEL | gcdNOWARN); + + if (os == gcvNULL) + { + /* Out of memory. */ + gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY); + return gcvSTATUS_OUT_OF_MEMORY; + } + + /* Zero the memory. */ + gckOS_ZeroMemory(os, gcmSIZEOF(struct _gckOS)); + + /* Initialize the gckOS object. */ + os->object.type = gcvOBJ_OS; + + /* Set device device. */ + os->device = Context; + + /* Set allocateCount to 0, gckOS_Allocate has not been used yet. */ + atomic_set(&os->allocateCount, 0); + + /* Initialize the memory lock. */ + mutex_init(&os->mdlMutex); + + INIT_LIST_HEAD(&os->mdlHead); + + /* Get the kernel process ID. */ + os->kernelProcessID = _GetProcessID(); + + /* + * Initialize the signal manager. + */ + + /* Initialize mutex. */ + mutex_init(&os->signalMutex); + + /* Initialize signal id database lock. */ + spin_lock_init(&os->signalDB.lock); + + /* Initialize signal id database. */ + idr_init(&os->signalDB.idr); + + /* Create a workqueue for os timer. */ + os->workqueue = create_singlethread_workqueue("galcore workqueue"); + + if (os->workqueue == gcvNULL) + { + /* Out of memory. */ + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + os->paddingPage = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN); + if (os->paddingPage == gcvNULL) + { + /* Out of memory. */ + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + else + { + SetPageReserved(os->paddingPage); + } + + spin_lock_init(&os->registerAccessLock); + + gckOS_ImportAllocators(os); + +#ifdef CONFIG_IOMMU_SUPPORT + if (((gckGALDEVICE)(os->device))->args.mmu == gcvFALSE) + { + /* Only use IOMMU when internal MMU is not enabled. */ + status = gckIOMMU_Construct(os, &os->iommu); + + if (gcmIS_ERROR(status)) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): Fail to setup IOMMU", + __FUNCTION__, __LINE__ + ); + } + } +#endif + + /* Return pointer to the gckOS object. */ + *Os = os; + + /* Success. */ + gcmkFOOTER_ARG("*Os=0x%X", *Os); + return gcvSTATUS_OK; + +OnError: + if (os->workqueue != gcvNULL) + { + destroy_workqueue(os->workqueue); + } + + kfree(os); + + /* Return the error. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_Destroy +** +** Destroy an gckOS object. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object that needs to be destroyed. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_Destroy( + IN gckOS Os + ) +{ + gcmkHEADER_ARG("Os=0x%X", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + + if (Os->paddingPage != gcvNULL) + { + ClearPageReserved(Os->paddingPage); + __free_page(Os->paddingPage); + Os->paddingPage = gcvNULL; + } + + /* + * Destroy the signal manager. + */ + + /* Wait for all works done. */ + flush_workqueue(Os->workqueue); + + /* Destory work queue. */ + destroy_workqueue(Os->workqueue); + + gckOS_FreeAllocators(Os); + +#ifdef CONFIG_IOMMU_SUPPORT + if (Os->iommu) + { + gckIOMMU_Destory(Os, Os->iommu); + } +#endif + + /* Flush the debug cache. */ + gcmkDEBUGFLUSH(~0U); + + /* Mark the gckOS object as unknown. */ + Os->object.type = gcvOBJ_UNKNOWN; + + + /* Free the gckOS object. */ + kfree(Os); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_CreateKernelVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical, + OUT gctSIZE_T * PageCount + ) +{ + gceSTATUS status; + PLINUX_MDL mdl = (PLINUX_MDL)Physical; + gckALLOCATOR allocator = mdl->allocator; + + gcmkHEADER(); + + *PageCount = mdl->numPages; + + gcmkONERROR(allocator->ops->MapKernel(allocator, mdl, Logical)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckOS_DestroyKernelVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ) +{ + PLINUX_MDL mdl = (PLINUX_MDL)Physical; + gckALLOCATOR allocator = mdl->allocator; + + gcmkHEADER(); + + allocator->ops->UnmapKernel(allocator, mdl, Logical); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_CreateUserVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical, + OUT gctSIZE_T * PageCount + ) +{ + return gckOS_LockPages(Os, Physical, Bytes, gcvFALSE, Logical, PageCount); +} + +gceSTATUS +gckOS_DestroyUserVirtualMapping( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ) +{ + return gckOS_UnlockPages(Os, Physical, Bytes, Logical); +} + +/******************************************************************************* +** +** gckOS_Allocate +** +** Allocate memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIZE_T Bytes +** Number of bytes to allocate. +** +** OUTPUT: +** +** gctPOINTER * Memory +** Pointer to a variable that will hold the allocated memory location. +*/ +gceSTATUS +gckOS_Allocate( + IN gckOS Os, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + + gcmkONERROR(gckOS_AllocateMemory(Os, Bytes, Memory)); + + /* Success. */ + gcmkFOOTER_ARG("*Memory=0x%X", *Memory); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_Free +** +** Free allocated memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Memory +** Pointer to memory allocation to free. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_Free( + IN gckOS Os, + IN gctPOINTER Memory + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Memory=0x%X", Os, Memory); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + + gcmkONERROR(gckOS_FreeMemory(Os, Memory)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_AllocateMemory +** +** Allocate memory wrapper. +** +** INPUT: +** +** gctSIZE_T Bytes +** Number of bytes to allocate. +** +** OUTPUT: +** +** gctPOINTER * Memory +** Pointer to a variable that will hold the allocated memory location. +*/ +gceSTATUS +gckOS_AllocateMemory( + IN gckOS Os, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Memory + ) +{ + gctPOINTER memory; + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + + if (Bytes > PAGE_SIZE) + { + memory = (gctPOINTER) vmalloc(Bytes); + } + else + { + memory = (gctPOINTER) kmalloc(Bytes, GFP_KERNEL | gcdNOWARN); + } + + if (memory == gcvNULL) + { + /* Out of memory. */ + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Increase count. */ + atomic_inc(&Os->allocateCount); + + /* Return pointer to the memory allocation. */ + *Memory = memory; + + /* Success. */ + gcmkFOOTER_ARG("*Memory=0x%X", *Memory); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_FreeMemory +** +** Free allocated memory wrapper. +** +** INPUT: +** +** gctPOINTER Memory +** Pointer to memory allocation to free. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_FreeMemory( + IN gckOS Os, + IN gctPOINTER Memory + ) +{ + gcmkHEADER_ARG("Memory=0x%X", Memory); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + + /* Free the memory from the OS pool. */ + if (is_vmalloc_addr(Memory)) + { + vfree(Memory); + } + else + { + kfree(Memory); + } + + /* Decrease count. */ + atomic_dec(&Os->allocateCount); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_MapMemory +** +** Map physical memory into the current process. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Start of physical address memory. +** +** gctSIZE_T Bytes +** Number of bytes to map. +** +** OUTPUT: +** +** gctPOINTER * Memory +** Pointer to a variable that will hold the logical address of the +** mapped memory. +*/ +gceSTATUS +gckOS_MapMemory( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical + ) +{ + gceSTATUS status; + PLINUX_MDL_MAP mdlMap; + PLINUX_MDL mdl = (PLINUX_MDL) Physical; + gckALLOCATOR allocator; + gctINT pid = _GetProcessID(); + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != 0); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + mutex_lock(&mdl->mapsMutex); + + mdlMap = FindMdlMap(mdl, pid); + + if (mdlMap == gcvNULL) + { + mdlMap = _CreateMdlMap(mdl, pid); + + if (mdlMap == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + } + + if (mdlMap->vmaAddr == gcvNULL) + { + allocator = mdl->allocator; + + gcmkONERROR( + allocator->ops->MapUser(allocator, + mdl, mdlMap, + gcvFALSE)); + } + + mutex_unlock(&mdl->mapsMutex); + + *Logical = mdlMap->vmaAddr; + + gcmkFOOTER_ARG("*Logical=0x%X", *Logical); + return gcvSTATUS_OK; + +OnError: + mutex_unlock(&mdl->mapsMutex); + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_UnmapMemory +** +** Unmap physical memory out of the current process. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Start of physical address memory. +** +** gctSIZE_T Bytes +** Number of bytes to unmap. +** +** gctPOINTER Memory +** Pointer to a previously mapped memory region. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UnmapMemory( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ) +{ + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X", + Os, Physical, Bytes, Logical); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != 0); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + gckOS_UnmapMemoryEx(Os, Physical, Bytes, Logical, _GetProcessID()); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + + +/******************************************************************************* +** +** gckOS_UnmapMemoryEx +** +** Unmap physical memory in the specified process. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Start of physical address memory. +** +** gctSIZE_T Bytes +** Number of bytes to unmap. +** +** gctPOINTER Memory +** Pointer to a previously mapped memory region. +** +** gctUINT32 PID +** Pid of the process that opened the device and mapped this memory. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UnmapMemoryEx( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + IN gctUINT32 PID + ) +{ + PLINUX_MDL_MAP mdlMap; + PLINUX_MDL mdl = (PLINUX_MDL)Physical; + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X PID=%d", + Os, Physical, Bytes, Logical, PID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != 0); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(PID != 0); + + if (Logical) + { + gckALLOCATOR allocator = mdl->allocator; + + mutex_lock(&mdl->mapsMutex); + + mdlMap = FindMdlMap(mdl, PID); + + if (mdlMap == gcvNULL || mdlMap->vmaAddr == gcvNULL) + { + mutex_unlock(&mdl->mapsMutex); + + gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); + return gcvSTATUS_INVALID_ARGUMENT; + } + + BUG_ON(!allocator || !allocator->ops->UnmapUser); + + allocator->ops->UnmapUser(allocator, mdl, mdlMap, mdl->bytes); + + gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap)); + + mutex_unlock(&mdl->mapsMutex); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_UnmapUserLogical +** +** Unmap user logical memory out of physical memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Start of physical address memory. +** +** gctSIZE_T Bytes +** Number of bytes to unmap. +** +** gctPOINTER Memory +** Pointer to a previously mapped memory region. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UnmapUserLogical( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ) +{ + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X", + Os, Physical, Bytes, Logical); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != 0); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + gckOS_UnmapMemory(Os, Physical, Bytes, Logical); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +} + +/******************************************************************************* +** +** gckOS_AllocateNonPagedMemory +** +** Allocate a number of pages from non-paged memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctBOOL InUserSpace +** gcvTRUE if the pages need to be mapped into user space. +** +** gctUINT32 Flag +** Allocation attribute. +** +** gctSIZE_T * Bytes +** Pointer to a variable that holds the number of bytes to allocate. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that hold the number of bytes allocated. +** +** gctPHYS_ADDR * Physical +** Pointer to a variable that will hold the physical address of the +** allocation. +** +** gctPOINTER * Logical +** Pointer to a variable that will hold the logical address of the +** allocation. +*/ +gceSTATUS +gckOS_AllocateNonPagedMemory( + IN gckOS Os, + IN gctBOOL InUserSpace, + IN gctUINT32 Flag, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ) +{ + gctSIZE_T bytes; + gctINT numPages; + PLINUX_MDL mdl = gcvNULL; + PLINUX_MDL_MAP mdlMap = gcvNULL; + gctPOINTER addr; + gceSTATUS status = gcvSTATUS_NOT_SUPPORTED; + gckALLOCATOR allocator; + + gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu", + Os, InUserSpace, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes != gcvNULL); + gcmkVERIFY_ARGUMENT(*Bytes > 0); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + /* Align number of bytes to page size. */ + bytes = gcmALIGN(*Bytes, PAGE_SIZE); + + /* Get total number of pages.. */ + numPages = GetPageCount(bytes, 0); + + /* Allocate mdl structure */ + mdl = _CreateMdl(Os); + if (mdl == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + gcmkASSERT(Flag & gcvALLOC_FLAG_CONTIGUOUS); + + /* Walk all allocators. */ + list_for_each_entry(allocator, &Os->allocatorList, link) + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d) flag = %x allocator->capability = %x", + __FUNCTION__, __LINE__, Flag, allocator->capability); + +#ifndef NO_DMA_COHERENT + /* Point to dma coherent allocator. */ + if (strcmp(allocator->name, "dma")) + { + /*!VIV: + * For historical issue, we force allocate all non-paged memory from + * dma coherent pool when it is not disabled. + * + * The code below changes the scheme a little: force allocate + * non-paged memory whose size is larger than 1 pages, can try other + * allocators otherwise. This is to save memory usage of dma + * coherent pool. + */ + if (((Flag & allocator->capability) != Flag) || + (numPages > 1)) + { + continue; + } + } +#else + if ((Flag & allocator->capability) != Flag) + { + continue; + } +#endif + status = allocator->ops->Alloc(allocator, mdl, numPages, Flag); + + if (gcmIS_SUCCESS(status)) + { + mdl->allocator = allocator; + break; + } + } + + /* Check status. */ + gcmkONERROR(status); + + mdl->cacheable = Flag & gcvALLOC_FLAG_CACHEABLE; + + mdl->bytes = bytes; + mdl->numPages = numPages; + + mdl->contiguous = gcvTRUE; + + gcmkONERROR(allocator->ops->MapKernel(allocator, mdl, &addr)); + + /* Trigger a page fault. */ + memset(addr, 0, numPages * PAGE_SIZE); + + mdl->addr = addr; + + if (InUserSpace) + { + mdlMap = _CreateMdlMap(mdl, _GetProcessID()); + + if (mdlMap == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + gcmkONERROR(allocator->ops->MapUser(allocator, mdl, mdlMap, gcvFALSE)); + + *Logical = mdlMap->vmaAddr; + } + else + { + *Logical = addr; + } + + /* + * Add this to a global list. + * Will be used by get physical address + * and mapuser pointer functions. + */ + mutex_lock(&Os->mdlMutex); + list_add_tail(&mdl->link, &Os->mdlHead); + mutex_unlock(&Os->mdlMutex); + + /* Return allocated memory. */ + *Bytes = bytes; + *Physical = (gctPHYS_ADDR) mdl; + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X", + *Bytes, *Physical, *Logical); + return gcvSTATUS_OK; + +OnError: + if (mdl != gcvNULL) + { + /* Free LINUX_MDL. */ + gcmkVERIFY_OK(_DestroyMdl(mdl)); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + + +/******************************************************************************* +** +** gckOS_FreeNonPagedMemory +** +** Free previously allocated and mapped pages from non-paged memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIZE_T Bytes +** Number of bytes allocated. +** +** gctPHYS_ADDR Physical +** Physical address of the allocated memory. +** +** gctPOINTER Logical +** Logical address of the allocated memory. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS gckOS_FreeNonPagedMemory( + IN gckOS Os, + IN gctSIZE_T Bytes, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical + ) +{ + PLINUX_MDL mdl = (PLINUX_MDL)Physical; + + gcmkHEADER_ARG("Os=0x%X Bytes=%lu Physical=0x%X Logical=0x%X", + Os, Bytes, Physical, Logical); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Physical != 0); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + gcmkVERIFY_OK(_DestroyMdl(mdl)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +static inline gckALLOCATOR +_FindAllocator( + gckOS Os, + gctUINT Flag + ) +{ + gckALLOCATOR allocator; + + list_for_each_entry(allocator, &Os->allocatorList, link) + { + if ((allocator->capability & Flag) == Flag) + { + return allocator; + } + } + + return gcvNULL; +} + +gceSTATUS +gckOS_RequestReservedMemory( + gckOS Os, + unsigned long Start, + unsigned long Size, + const char * Name, + gctBOOL Requested, + void ** MemoryHandle + ) +{ + PLINUX_MDL mdl = gcvNULL; + gceSTATUS status; + gckALLOCATOR allocator; + gcsATTACH_DESC desc; + + gcmkHEADER_ARG("start=0x%lx size=0x%lx name=%s", Start, Size, Name); + + /* Round up to page size. */ + Size = (Size + ~PAGE_MASK) & PAGE_MASK; + + mdl = _CreateMdl(Os); + if (!mdl) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + desc.reservedMem.start = Start; + desc.reservedMem.size = Size; + desc.reservedMem.name = Name; + desc.reservedMem.requested = Requested; + + allocator = _FindAllocator(Os, gcvALLOC_FLAG_LINUX_RESERVED_MEM); + if (!allocator) + { + gcmkPRINT("reserved-mem allocator not integrated!"); + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + /* Call attach. */ + gcmkONERROR(allocator->ops->Attach(allocator, &desc, mdl)); + + /* Assign alloator. */ + mdl->allocator = allocator; + mdl->bytes = Size; + mdl->numPages = Size >> PAGE_SHIFT; + mdl->contiguous = gcvTRUE; + mdl->addr = gcvNULL; + mdl->dmaHandle = Start; + mdl->gid = 0; + + /* + * Add this to a global list. + * Will be used by get physical address + * and mapuser pointer functions. + */ + mutex_lock(&Os->mdlMutex); + list_add_tail(&mdl->link, &Os->mdlHead); + mutex_unlock(&Os->mdlMutex); + + *MemoryHandle = (void *)mdl; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (mdl) + { + gcmkVERIFY_OK(_DestroyMdl(mdl)); + } + + gcmkFOOTER(); + return status; +} + +void +gckOS_ReleaseReservedMemory( + gckOS Os, + void * MemoryHandle + ) +{ + gckALLOCATOR allocator; + PLINUX_MDL mdl = (PLINUX_MDL)MemoryHandle; + + allocator = _FindAllocator(Os, gcvALLOC_FLAG_LINUX_RESERVED_MEM); + + /* If no allocator, how comes the memory? */ + BUG_ON(!allocator); + + allocator->ops->Free(allocator, mdl); +} + +/******************************************************************************* +** +** gckOS_ReadRegister +** +** Read data from a register. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 Address +** Address of register. +** +** OUTPUT: +** +** gctUINT32 * Data +** Pointer to a variable that receives the data read from the register. +*/ +gceSTATUS +gckOS_ReadRegister( + IN gckOS Os, + IN gctUINT32 Address, + OUT gctUINT32 * Data + ) +{ + return gckOS_ReadRegisterEx(Os, gcvCORE_MAJOR, Address, Data); +} + +gceSTATUS +gckOS_ReadRegisterEx( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT32 Address, + OUT gctUINT32 * Data + ) +{ + if (in_irq()) + { + uint32_t data; + + spin_lock(&Os->registerAccessLock); + + if (unlikely(Os->clockStates[Core] == gcvFALSE)) + { + spin_unlock(&Os->registerAccessLock); + + /* + * Read register when external clock off: + * 1. In shared IRQ, read register may be called and that's not our irq. + */ + return gcvSTATUS_GENERIC_IO; + } + + data = readl(Os->device->registerBases[Core]); + + if (unlikely((data & 0x3) == 0x3)) + { + spin_unlock(&Os->registerAccessLock); + + /* + * Read register when internal clock off: + * a. In shared IRQ, read register may be called and that's not our irq. + * b. In some condition, when ISR handled normal FE/PE, PM thread could + * trun off internal clock before ISR read register of async FE. And + * then IRQ handler will call read register with internal clock off. + * So here we just skip for such case. + */ + return gcvSTATUS_GENERIC_IO; + } + + *Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address); + spin_unlock(&Os->registerAccessLock); + } + else + { + unsigned long flags; + + spin_lock_irqsave(&Os->registerAccessLock, flags); + + if (unlikely(Os->clockStates[Core] == gcvFALSE)) + { + spin_unlock_irqrestore(&Os->registerAccessLock, flags); + + /* + * Read register when external clock off: + * 2. In non-irq context, register access should not be called, + * otherwise it's driver bug. + */ + printk(KERN_ERR "[galcore]: %s(%d) GPU[%d] external clock off", + __func__, __LINE__, Core); + gcmkBUG_ON(1); + return gcvSTATUS_GENERIC_IO; + } + + *Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address); + spin_unlock_irqrestore(&Os->registerAccessLock, flags); + +#if gcdDUMP_AHB_ACCESS + /* Dangerous to print in interrupt context, skip. */ + gcmkPRINT("@[RD %d] %08x %08x", Core, Address, *Data); +#endif + } + + /* Success. */ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_WriteRegister +** +** Write data to a register. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 Address +** Address of register. +** +** gctUINT32 Data +** Data for register. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_WriteRegister( + IN gckOS Os, + IN gctUINT32 Address, + IN gctUINT32 Data + ) +{ + return gckOS_WriteRegisterEx(Os, gcvCORE_MAJOR, Address, Data); +} + +gceSTATUS +gckOS_WriteRegisterEx( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT32 Address, + IN gctUINT32 Data + ) +{ + if (in_irq()) + { + spin_lock(&Os->registerAccessLock); + + if (unlikely(Os->clockStates[Core] == gcvFALSE)) + { + spin_unlock(&Os->registerAccessLock); + + printk(KERN_ERR "[galcore]: %s(%d) GPU[%d] external clock off", + __func__, __LINE__, Core); + + /* Driver bug: register write when clock off. */ + gcmkBUG_ON(1); + return gcvSTATUS_GENERIC_IO; + } + + writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address); + spin_unlock(&Os->registerAccessLock); + } + else + { + unsigned long flags; + + spin_lock_irqsave(&Os->registerAccessLock, flags); + + if (unlikely(Os->clockStates[Core] == gcvFALSE)) + { + spin_unlock_irqrestore(&Os->registerAccessLock, flags); + + printk(KERN_ERR "[galcore]: %s(%d) GPU[%d] external clock off", + __func__, __LINE__, Core); + + /* Driver bug: register write when clock off. */ + gcmkBUG_ON(1); + return gcvSTATUS_GENERIC_IO; + } + + writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address); + spin_unlock_irqrestore(&Os->registerAccessLock, flags); + +#if gcdDUMP_AHB_ACCESS + /* Dangerous to print in interrupt context, skip. */ + gcmkPRINT("@[WR %d] %08x %08x", Core, Address, Data); +#endif + } + + /* Success. */ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_GetPageSize +** +** Get the system's page size. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** OUTPUT: +** +** gctSIZE_T * PageSize +** Pointer to a variable that will receive the system's page size. +*/ +gceSTATUS gckOS_GetPageSize( + IN gckOS Os, + OUT gctSIZE_T * PageSize + ) +{ + gcmkHEADER_ARG("Os=0x%X", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(PageSize != gcvNULL); + + /* Return the page size. */ + *PageSize = (gctSIZE_T) PAGE_SIZE; + + /* Success. */ + gcmkFOOTER_ARG("*PageSize=%d", *PageSize); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_GetPhysicalAddressProcess +** +** Get the physical system address of a corresponding virtual address for a +** given process. +** +** INPUT: +** +** gckOS Os +** Pointer to gckOS object. +** +** gctPOINTER Logical +** Logical address. +** +** gctUINT32 ProcessID +** Process ID. +** +** OUTPUT: +** +** gctUINT32 * Address +** Poinetr to a variable that receives the 32-bit physical adress. +*/ +static gceSTATUS +_GetPhysicalAddressProcess( + IN gckOS Os, + IN gctPOINTER Logical, + IN gctUINT32 ProcessID, + OUT gctPHYS_ADDR_T * Address + ) +{ + PLINUX_MDL mdl; + gceSTATUS status = gcvSTATUS_INVALID_ADDRESS; + + gcmkHEADER_ARG("Os=0x%X Logical=0x%X ProcessID=%d", Os, Logical, ProcessID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + + mutex_lock(&Os->mdlMutex); + + if (Os->device->contiguousPhysical) + { + /* Try the contiguous memory pool. */ + mdl = (PLINUX_MDL) Os->device->contiguousPhysical; + + mutex_lock(&mdl->mapsMutex); + + status = _ConvertLogical2Physical(Os, Logical, ProcessID, mdl, Address); + + mutex_unlock(&mdl->mapsMutex); + } + + if (gcmIS_ERROR(status)) + { + /* Walk all MDLs. */ + list_for_each_entry(mdl, &Os->mdlHead, link) + { + mutex_lock(&mdl->mapsMutex); + + status = _ConvertLogical2Physical(Os, Logical, ProcessID, mdl, Address); + + mutex_unlock(&mdl->mapsMutex); + + if (gcmIS_SUCCESS(status)) + { + break; + } + } + } + + mutex_unlock(&Os->mdlMutex); + + gcmkONERROR(status); + + /* Success. */ + gcmkFOOTER_ARG("*Address=%p", *Address); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + + + +/******************************************************************************* +** +** gckOS_GetPhysicalAddress +** +** Get the physical system address of a corresponding virtual address. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Logical +** Logical address. +** +** OUTPUT: +** +** gctUINT32 * Address +** Poinetr to a variable that receives the 32-bit physical adress. +*/ +gceSTATUS +gckOS_GetPhysicalAddress( + IN gckOS Os, + IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T * Address + ) +{ + gceSTATUS status; + gctUINT32 processID; + + gcmkHEADER_ARG("Os=0x%X Logical=0x%X", Os, Logical); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + + /* Query page table of current process first. */ + status = _QueryProcessPageTable(Logical, Address); + + if (gcmIS_ERROR(status)) + { + /* Get current process ID. */ + processID = _GetProcessID(); + + /* Route through other function. */ + gcmkONERROR( + _GetPhysicalAddressProcess(Os, Logical, processID, Address)); + } + + /* Success. */ + gcmkFOOTER_ARG("*Address=%p", *Address); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_UserLogicalToPhysical +** +** Get the physical system address of a corresponding user virtual address. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Logical +** Logical address. +** +** OUTPUT: +** +** gctUINT32 * Address +** Pointer to a variable that receives the 32-bit physical address. +*/ +gceSTATUS gckOS_UserLogicalToPhysical( + IN gckOS Os, + IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T * Address + ) +{ + return gckOS_GetPhysicalAddress(Os, Logical, Address); +} + +#if gcdSECURE_USER +static gceSTATUS +gckOS_AddMapping( + IN gckOS Os, + IN gctUINT32 Physical, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + gcsUSER_MAPPING_PTR map; + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu", + Os, Physical, Logical, Bytes); + + gcmkONERROR(gckOS_Allocate(Os, + gcmSIZEOF(gcsUSER_MAPPING), + (gctPOINTER *) &map)); + + map->next = Os->userMap; + map->physical = Physical - Os->device->baseAddress; + map->logical = Logical; + map->bytes = Bytes; + map->start = (gctINT8_PTR) Logical; + map->end = map->start + Bytes; + + Os->userMap = map; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +static gceSTATUS +gckOS_RemoveMapping( + IN gckOS Os, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + gcsUSER_MAPPING_PTR map, prev; + + gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes); + + for (map = Os->userMap, prev = gcvNULL; map != gcvNULL; map = map->next) + { + if ((map->logical == Logical) && (map->bytes == Bytes)) + { + break; + } + + prev = map; + } + + if (map == gcvNULL) + { + gcmkONERROR(gcvSTATUS_INVALID_ADDRESS); + } + + if (prev == gcvNULL) + { + Os->userMap = map->next; + } + else + { + prev->next = map->next; + } + + gcmkONERROR(gcmkOS_SAFE_FREE(Os, map)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} +#endif + +gceSTATUS +_ConvertLogical2Physical( + IN gckOS Os, + IN gctPOINTER Logical, + IN gctUINT32 ProcessID, + IN PLINUX_MDL Mdl, + OUT gctPHYS_ADDR_T * Physical + ) +{ + gckALLOCATOR allocator = Mdl->allocator; + gctUINT32 offset; + gceSTATUS status = gcvSTATUS_NOT_FOUND; + gctINT8_PTR vBase; + + /* TASK_SIZE is userspace - kernelspace virtual memory split. */ + if ((gctUINTPTR_T)Logical >= TASK_SIZE) + { + /* Kernel virtual address. */ + vBase = Mdl->addr; + } + else + { + /* User virtual address. */ + PLINUX_MDL_MAP map; + + map = FindMdlMap(Mdl, (gctINT) ProcessID); + vBase = (map == gcvNULL) ? gcvNULL : (gctINT8_PTR) map->vmaAddr; + } + + /* Is the given address within that range. */ + if ((vBase != gcvNULL) + && ((gctINT8_PTR) Logical >= vBase) + && ((gctINT8_PTR) Logical < vBase + Mdl->bytes) + ) + { + offset = (gctINT8_PTR) Logical - vBase; + + allocator->ops->Physical(allocator, Mdl, offset, Physical); + + status = gcvSTATUS_OK; + } + + return status; +} + +/******************************************************************************* +** +** gckOS_MapPhysical +** +** Map a physical address into kernel space. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 Physical +** Physical address of the memory to map. +** +** gctSIZE_T Bytes +** Number of bytes to map. +** +** OUTPUT: +** +** gctPOINTER * Logical +** Pointer to a variable that receives the base address of the mapped +** memory. +*/ +gceSTATUS +gckOS_MapPhysical( + IN gckOS Os, + IN gctUINT32 Physical, + IN gctSIZE_T Bytes, + OUT gctPOINTER * Logical + ) +{ + gctPOINTER logical; + PLINUX_MDL mdl; + gctBOOL found = gcvFALSE; + gctUINT32 physical = Physical; + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + mutex_lock(&Os->mdlMutex); + + /* Go through our mapping to see if we know this physical address already. */ + list_for_each_entry(mdl, &Os->mdlHead, link) + { + if (mdl->dmaHandle != 0) + { + if ((physical >= mdl->dmaHandle) + && (physical < mdl->dmaHandle + mdl->bytes) + ) + { + *Logical = mdl->addr + (physical - mdl->dmaHandle); + found = gcvTRUE; + break; + } + } + } + + mutex_unlock(&Os->mdlMutex); + + if (!found) + { + unsigned long pfn = physical >> PAGE_SHIFT; + + if (pfn_valid(pfn)) + { + gctUINT32 offset = physical & ~PAGE_MASK; + struct page ** pages; + struct page * page; + gctUINT numPages; + gctINT i; + pgprot_t pgprot; + + numPages = GetPageCount(PAGE_ALIGN(offset + Bytes), 0); + + pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN); + + if (!pages) + { + gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY); + return gcvSTATUS_OUT_OF_MEMORY; + } + + page = pfn_to_page(pfn); + + for (i = 0; i < numPages; i++) + { + pages[i] = nth_page(page, i); + } + +#if gcdENABLE_BUFFERABLE_VIDEO_MEMORY + pgprot = pgprot_writecombine(PAGE_KERNEL); +#else + pgprot = pgprot_noncached(PAGE_KERNEL); +#endif + + logical = vmap(pages, numPages, 0, pgprot); + + kfree(pages); + + if (logical == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): Failed to vmap", + __FUNCTION__, __LINE__ + ); + + /* Out of resources. */ + gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES); + return gcvSTATUS_OUT_OF_RESOURCES; + } + + logical += offset; + } + else + { + /* Map memory as cached memory. */ + request_mem_region(physical, Bytes, "MapRegion"); + logical = (gctPOINTER) ioremap_nocache(physical, Bytes); + + if (logical == gcvNULL) + { + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): Failed to ioremap", + __FUNCTION__, __LINE__ + ); + + /* Out of resources. */ + gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES); + return gcvSTATUS_OUT_OF_RESOURCES; + } + } + + /* Return pointer to mapped memory. */ + *Logical = logical; + } + + /* Success. */ + gcmkFOOTER_ARG("*Logical=0x%X", *Logical); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_UnmapPhysical +** +** Unmap a previously mapped memory region from kernel memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Logical +** Pointer to the base address of the memory to unmap. +** +** gctSIZE_T Bytes +** Number of bytes to unmap. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UnmapPhysical( + IN gckOS Os, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + PLINUX_MDL mdl; + gctBOOL found = gcvFALSE; + + gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + mutex_lock(&Os->mdlMutex); + + list_for_each_entry(mdl, &Os->mdlHead, link) + { + if (mdl->addr != gcvNULL) + { + if ((Logical >= (gctPOINTER)mdl->addr) && + (Logical < (gctPOINTER)((gctSTRING)mdl->addr + mdl->bytes))) + { + found = gcvTRUE; + break; + } + } + } + + mutex_unlock(&Os->mdlMutex); + + if (!found) + { + /* Unmap the memory. */ + vunmap((void *)((unsigned long)Logical & PAGE_MASK)); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_DeleteMutex +** +** Delete a mutex. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Mutex +** Pointer to the mute to be deleted. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_DeleteMutex( + IN gckOS Os, + IN gctPOINTER Mutex + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Mutex=0x%X", Os, Mutex); + + /* Validate the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Mutex != gcvNULL); + + /* Destroy the mutex. */ + mutex_destroy((struct mutex *)Mutex); + + /* Free the mutex structure. */ + gcmkONERROR(gckOS_Free(Os, Mutex)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_AcquireMutex +** +** Acquire a mutex. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Mutex +** Pointer to the mutex to be acquired. +** +** gctUINT32 Timeout +** Timeout value specified in milliseconds. +** Specify the value of gcvINFINITE to keep the thread suspended +** until the mutex has been acquired. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AcquireMutex( + IN gckOS Os, + IN gctPOINTER Mutex, + IN gctUINT32 Timeout + ) +{ + gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x Timeout=%u", Os, Mutex, Timeout); + + /* Validate the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Mutex != gcvNULL); + + if (Timeout == gcvINFINITE) + { + /* Lock the mutex. */ + mutex_lock(Mutex); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + for (;;) + { + /* Try to acquire the mutex. */ + if (mutex_trylock(Mutex)) + { + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + if (Timeout-- == 0) + { + break; + } + + /* Wait for 1 millisecond. */ + gcmkVERIFY_OK(gckOS_Delay(Os, 1)); + } + + /* Timeout. */ + gcmkFOOTER_ARG("status=%d", gcvSTATUS_TIMEOUT); + return gcvSTATUS_TIMEOUT; +} + +/******************************************************************************* +** +** gckOS_ReleaseMutex +** +** Release an acquired mutex. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Mutex +** Pointer to the mutex to be released. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_ReleaseMutex( + IN gckOS Os, + IN gctPOINTER Mutex + ) +{ + gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x", Os, Mutex); + + /* Validate the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Mutex != gcvNULL); + + /* Release the mutex. */ + mutex_unlock(Mutex); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomicExchange +** +** Atomically exchange a pair of 32-bit values. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** IN OUT gctINT32_PTR Target +** Pointer to the 32-bit value to exchange. +** +** IN gctINT32 NewValue +** Specifies a new value for the 32-bit value pointed to by Target. +** +** OUT gctINT32_PTR OldValue +** The old value of the 32-bit value pointed to by Target. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomicExchange( + IN gckOS Os, + IN OUT gctUINT32_PTR Target, + IN gctUINT32 NewValue, + OUT gctUINT32_PTR OldValue + ) +{ + /* Exchange the pair of 32-bit values. */ + *OldValue = (gctUINT32) atomic_xchg((atomic_t *) Target, (int) NewValue); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomicExchangePtr +** +** Atomically exchange a pair of pointers. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** IN OUT gctPOINTER * Target +** Pointer to the 32-bit value to exchange. +** +** IN gctPOINTER NewValue +** Specifies a new value for the pointer pointed to by Target. +** +** OUT gctPOINTER * OldValue +** The old value of the pointer pointed to by Target. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomicExchangePtr( + IN gckOS Os, + IN OUT gctPOINTER * Target, + IN gctPOINTER NewValue, + OUT gctPOINTER * OldValue + ) +{ + /* Exchange the pair of pointers. */ + *OldValue = (gctPOINTER)(gctUINTPTR_T) atomic_xchg((atomic_t *) Target, (int)(gctUINTPTR_T) NewValue); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomicSetMask +** +** Atomically set mask to Atom +** +** INPUT: +** IN OUT gctPOINTER Atom +** Pointer to the atom to set. +** +** IN gctUINT32 Mask +** Mask to set. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomSetMask( + IN gctPOINTER Atom, + IN gctUINT32 Mask + ) +{ + gctUINT32 oval, nval; + do + { + oval = atomic_read((atomic_t *) Atom); + nval = oval | Mask; + } + while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomClearMask +** +** Atomically clear mask from Atom +** +** INPUT: +** IN OUT gctPOINTER Atom +** Pointer to the atom to clear. +** +** IN gctUINT32 Mask +** Mask to clear. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomClearMask( + IN gctPOINTER Atom, + IN gctUINT32 Mask + ) +{ + gctUINT32 oval, nval; + + do + { + oval = atomic_read((atomic_t *) Atom); + nval = oval & ~Mask; + } + while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval); + + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomConstruct +** +** Create an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** OUTPUT: +** +** gctPOINTER * Atom +** Pointer to a variable receiving the constructed atom. +*/ +gceSTATUS +gckOS_AtomConstruct( + IN gckOS Os, + OUT gctPOINTER * Atom + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Atom != gcvNULL); + + /* Allocate the atom. */ + gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(atomic_t), Atom)); + + /* Initialize the atom. */ + atomic_set((atomic_t *) *Atom, 0); + + /* Success. */ + gcmkFOOTER_ARG("*Atom=0x%X", *Atom); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_AtomDestroy +** +** Destroy an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom to destroy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomDestroy( + IN gckOS Os, + OUT gctPOINTER Atom + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Atom != gcvNULL); + + /* Free the atom. */ + gcmkONERROR(gcmkOS_SAFE_FREE(Os, Atom)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_AtomGet +** +** Get the 32-bit value protected by an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** OUTPUT: +** +** gctINT32_PTR Value +** Pointer to a variable the receives the value of the atom. +*/ +gceSTATUS +gckOS_AtomGet( + IN gckOS Os, + IN gctPOINTER Atom, + OUT gctINT32_PTR Value + ) +{ + /* Return the current value of atom. */ + *Value = atomic_read((atomic_t *) Atom); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomSet +** +** Set the 32-bit value protected by an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** gctINT32 Value +** The value of the atom. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AtomSet( + IN gckOS Os, + IN gctPOINTER Atom, + IN gctINT32 Value + ) +{ + /* Set the current value of atom. */ + atomic_set((atomic_t *) Atom, Value); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomIncrement +** +** Atomically increment the 32-bit integer value inside an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** OUTPUT: +** +** gctINT32_PTR Value +** Pointer to a variable that receives the original value of the atom. +*/ +gceSTATUS +gckOS_AtomIncrement( + IN gckOS Os, + IN gctPOINTER Atom, + OUT gctINT32_PTR Value + ) +{ + /* Increment the atom. */ + *Value = atomic_inc_return((atomic_t *) Atom) - 1; + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AtomDecrement +** +** Atomically decrement the 32-bit integer value inside an atom. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gctPOINTER Atom +** Pointer to the atom. +** +** OUTPUT: +** +** gctINT32_PTR Value +** Pointer to a variable that receives the original value of the atom. +*/ +gceSTATUS +gckOS_AtomDecrement( + IN gckOS Os, + IN gctPOINTER Atom, + OUT gctINT32_PTR Value + ) +{ + /* Decrement the atom. */ + *Value = atomic_dec_return((atomic_t *) Atom) + 1; + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_Delay +** +** Delay execution of the current thread for a number of milliseconds. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 Delay +** Delay to sleep, specified in milliseconds. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_Delay( + IN gckOS Os, + IN gctUINT32 Delay + ) +{ + gcmkHEADER_ARG("Os=0x%X Delay=%u", Os, Delay); + + if (Delay > 0) + { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + ktime_t delay = ktime_set((Delay / MSEC_PER_SEC), (Delay % MSEC_PER_SEC) * NSEC_PER_MSEC); + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&delay, HRTIMER_MODE_REL); +#else + msleep(Delay); +#endif + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_GetTicks +** +** Get the number of milliseconds since the system started. +** +** INPUT: +** +** OUTPUT: +** +** gctUINT32_PTR Time +** Pointer to a variable to get time. +** +*/ +gceSTATUS +gckOS_GetTicks( + OUT gctUINT32_PTR Time + ) +{ + gcmkHEADER(); + + *Time = jiffies_to_msecs(jiffies); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_TicksAfter +** +** Compare time values got from gckOS_GetTicks. +** +** INPUT: +** gctUINT32 Time1 +** First time value to be compared. +** +** gctUINT32 Time2 +** Second time value to be compared. +** +** OUTPUT: +** +** gctBOOL_PTR IsAfter +** Pointer to a variable to result. +** +*/ +gceSTATUS +gckOS_TicksAfter( + IN gctUINT32 Time1, + IN gctUINT32 Time2, + OUT gctBOOL_PTR IsAfter + ) +{ + gcmkHEADER(); + + *IsAfter = time_after((unsigned long)Time1, (unsigned long)Time2); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_GetTime +** +** Get the number of microseconds since the system started. +** +** INPUT: +** +** OUTPUT: +** +** gctUINT64_PTR Time +** Pointer to a variable to get time. +** +*/ +gceSTATUS +gckOS_GetTime( + OUT gctUINT64_PTR Time + ) +{ + struct timespec64 ts; + gcmkHEADER(); + + /* Return the time of day in microseconds. */ + ktime_get_real_ts64(&ts); + *Time = (ts.tv_sec * 1000000ULL) + ts.tv_nsec / NSEC_PER_USEC; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_MemoryBarrier +** +** Make sure the CPU has executed everything up to this point and the data got +** written to the specified pointer. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Address +** Address of memory that needs to be barriered. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_MemoryBarrier( + IN gckOS Os, + IN gctPOINTER Address + ) +{ + _MemoryBarrier(); + + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_AllocatePagedMemory +** +** Allocate memory from the paged pool. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIZE_T Bytes +** Number of bytes to allocate. +** +** OUTPUT: +** +** gctPHYS_ADDR * Physical +** Pointer to a variable that receives the physical address of the +** memory allocation. +*/ +gceSTATUS +gckOS_AllocatePagedMemory( + IN gckOS Os, + IN gctSIZE_T Bytes, + OUT gctPHYS_ADDR * Physical + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + + /* Allocate the memory. */ + gcmkONERROR(gckOS_AllocatePagedMemoryEx(Os, gcvALLOC_FLAG_NONE, Bytes, gcvNULL, Physical)); + + /* Success. */ + gcmkFOOTER_ARG("*Physical=0x%X", *Physical); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_AllocatePagedMemoryEx +** +** Allocate memory from the paged pool. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 Flag +** Allocation attribute. +** +** gctSIZE_T Bytes +** Number of bytes to allocate. +** +** OUTPUT: +** +** gctUINT32 * Gid +** Save the global ID for the piece of allocated memory. +** +** gctPHYS_ADDR * Physical +** Pointer to a variable that receives the physical address of the +** memory allocation. +*/ +gceSTATUS +gckOS_AllocatePagedMemoryEx( + IN gckOS Os, + IN gctUINT32 Flag, + IN gctSIZE_T Bytes, + OUT gctUINT32 * Gid, + OUT gctPHYS_ADDR * Physical + ) +{ + gctINT numPages; + PLINUX_MDL mdl = gcvNULL; + gctSIZE_T bytes; + gceSTATUS status = gcvSTATUS_NOT_SUPPORTED; + gckALLOCATOR allocator; + + gcmkHEADER_ARG("Os=0x%X Flag=%x Bytes=%lu", Os, Flag, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes > 0); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + + bytes = gcmALIGN(Bytes, PAGE_SIZE); + + numPages = GetPageCount(bytes, 0); + + mdl = _CreateMdl(Os); + if (mdl == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Walk all allocators. */ + list_for_each_entry(allocator, &Os->allocatorList, link) + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d) flag = %x allocator->capability = %x", + __FUNCTION__, __LINE__, Flag, allocator->capability); + + if ((Flag & allocator->capability) != Flag) + { + continue; + } + + status = allocator->ops->Alloc(allocator, mdl, numPages, Flag); + + if (gcmIS_SUCCESS(status)) + { + mdl->allocator = allocator; + break; + } + } + + /* Check status. */ + gcmkONERROR(status); + + mdl->dmaHandle = 0; + mdl->addr = 0; + mdl->bytes = bytes; + mdl->numPages = numPages; + mdl->contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS; + mdl->cacheable = Flag & gcvALLOC_FLAG_CACHEABLE; + + if (Gid != gcvNULL) + { + *Gid = mdl->gid; + } + + /* + * Add this to a global list. + * Will be used by get physical address + * and mapuser pointer functions. + */ + mutex_lock(&Os->mdlMutex); + list_add_tail(&mdl->link, &Os->mdlHead); + mutex_unlock(&Os->mdlMutex); + + /* Return physical address. */ + *Physical = (gctPHYS_ADDR) mdl; + + /* Success. */ + gcmkFOOTER_ARG("*Physical=0x%X", *Physical); + return gcvSTATUS_OK; + +OnError: + if (mdl != gcvNULL) + { + /* Free the memory. */ + _DestroyMdl(mdl); + } + + /* Return the status. */ + gcmkFOOTER_ARG("Os=0x%X Flag=%x Bytes=%lu", Os, Flag, Bytes); + return status; +} + +/******************************************************************************* +** +** gckOS_FreePagedMemory +** +** Free memory allocated from the paged pool. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Physical address of the allocation. +** +** gctSIZE_T Bytes +** Number of bytes of the allocation. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_FreePagedMemory( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes + ) +{ + PLINUX_MDL mdl = (PLINUX_MDL)Physical; + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + /* Free the structure... */ + gcmkVERIFY_OK(_DestroyMdl(mdl)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_LockPages +** +** Lock memory allocated from the paged pool. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Physical address of the allocation. +** +** gctSIZE_T Bytes +** Number of bytes of the allocation. +** +** gctBOOL Cacheable +** Cache mode of mapping. +** +** OUTPUT: +** +** gctPOINTER * Logical +** Pointer to a variable that receives the address of the mapped +** memory. +** +** gctSIZE_T * PageCount +** Pointer to a variable that receives the number of pages required for +** the page table according to the GPU page size. +*/ +gceSTATUS +gckOS_LockPages( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctBOOL Cacheable, + OUT gctPOINTER * Logical, + OUT gctSIZE_T * PageCount + ) +{ + gceSTATUS status; + PLINUX_MDL mdl; + PLINUX_MDL_MAP mdlMap; + gckALLOCATOR allocator; + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Logical); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(PageCount != gcvNULL); + + mdl = (PLINUX_MDL) Physical; + allocator = mdl->allocator; + + mutex_lock(&mdl->mapsMutex); + + mdlMap = FindMdlMap(mdl, _GetProcessID()); + + if (mdlMap == gcvNULL) + { + mdlMap = _CreateMdlMap(mdl, _GetProcessID()); + + if (mdlMap == gcvNULL) + { + mutex_unlock(&mdl->mapsMutex); + + gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY); + return gcvSTATUS_OUT_OF_MEMORY; + } + } + + if (mdlMap->vmaAddr == gcvNULL) + { + status = allocator->ops->MapUser(allocator, mdl, mdlMap, Cacheable); + + if (gcmIS_ERROR(status)) + { + mutex_unlock(&mdl->mapsMutex); + + gcmkFOOTER_ARG("*status=%d", status); + return status; + } + } + + mdlMap->count++; + + /* Convert pointer to MDL. */ + *Logical = mdlMap->vmaAddr; + + /* Return the page number according to the GPU page size. */ + gcmkASSERT((PAGE_SIZE % 4096) == 0); + gcmkASSERT((PAGE_SIZE / 4096) >= 1); + + *PageCount = mdl->numPages * (PAGE_SIZE / 4096); + + mutex_unlock(&mdl->mapsMutex); + + /* Success. */ + gcmkFOOTER_ARG("*Logical=0x%X *PageCount=%lu", *Logical, *PageCount); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_MapPages +** +** Map paged memory into a page table. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Physical address of the allocation. +** +** gctSIZE_T PageCount +** Number of pages required for the physical address. +** +** gctPOINTER PageTable +** Pointer to the page table to fill in. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_MapPages( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T PageCount, + IN gctPOINTER PageTable + ) +{ + return gcvSTATUS_NOT_SUPPORTED; +} + +gceSTATUS +gckOS_MapPagesEx( + IN gckOS Os, + IN gceCORE Core, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T PageCount, + IN gctUINT32 Address, + IN gctPOINTER PageTable, + IN gctBOOL Writable, + IN gceSURF_TYPE Type + ) +{ + gceSTATUS status = gcvSTATUS_OK; + PLINUX_MDL mdl; + gctUINT32* table; + gctUINT32 offset = 0; + +#if gcdPROCESS_ADDRESS_SPACE + gckKERNEL kernel = Os->device->kernels[Core]; + gckMMU mmu; +#endif + + gctUINT32 bytes = PageCount * 4; + + gckALLOCATOR allocator; + + gctUINT32 policyID = 0; + gctUINT32 axiConfig = 0; + + gcsPLATFORM * platform = Os->device->platform; + + gcmkHEADER_ARG("Os=0x%X Core=%d Physical=0x%X PageCount=%u PageTable=0x%X", + Os, Core, Physical, PageCount, PageTable); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(PageCount > 0); + gcmkVERIFY_ARGUMENT(PageTable != gcvNULL); + + /* Convert pointer to MDL. */ + mdl = (PLINUX_MDL)Physical; + + allocator = mdl->allocator; + + gcmkASSERT(allocator != gcvNULL); + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): Physical->0x%X PageCount->0x%X", + __FUNCTION__, __LINE__, + (gctUINT32)(gctUINTPTR_T)Physical, + (gctUINT32)(gctUINTPTR_T)PageCount + ); + +#if gcdPROCESS_ADDRESS_SPACE + gcmkONERROR(gckKERNEL_GetProcessMMU(kernel, &mmu)); +#endif + + table = (gctUINT32 *)PageTable; + + if (platform && platform->ops->getPolicyID) + { + platform->ops->getPolicyID(platform, Type, &policyID, &axiConfig); + + gcmkBUG_ON(policyID > 0x1F); + + /* ID[3:0] is used in STLB. */ + policyID &= 0xF; + } + + /* Get all the physical addresses and store them in the page table. */ + + PageCount = PageCount / (PAGE_SIZE / 4096); + + /* Try to get the user pages so DMA can happen. */ + while (PageCount-- > 0) + { + gctUINT i; + gctPHYS_ADDR_T phys = ~0U; + + allocator->ops->Physical(allocator, mdl, offset, &phys); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os, phys, &phys)); + + if (policyID) + { + /* AxUSER must not used for address currently. */ + gcmkBUG_ON((phys >> 32) & 0xF); + + /* Merge policyID to AxUSER[7:4].*/ + phys |= ((gctPHYS_ADDR_T)policyID << 36); + } + +#ifdef CONFIG_IOMMU_SUPPORT + if (Os->iommu) + { + /* remove LSB. */ + phys &= PAGE_MASK; + + gcmkTRACE_ZONE( + gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d): Setup mapping in IOMMU %x => %x", + __FUNCTION__, __LINE__, + Address + offset, phys + ); + + /* When use IOMMU, GPU use system PAGE_SIZE. */ + gcmkONERROR(gckIOMMU_Map( + Os->iommu, Address + offset, phys, PAGE_SIZE)); + } + else +#endif + { + /* remove LSB. */ + phys &= ~(4096ull - 1); + + { + for (i = 0; i < (PAGE_SIZE / 4096); i++) + { +#if gcdPROCESS_ADDRESS_SPACE + gctUINT32_PTR pageTableEntry; + gckMMU_GetPageEntry(mmu, Address + offset + (i * 4096), &pageTableEntry); + gcmkONERROR( + gckMMU_SetPage(mmu, + phys + (i * 4096), + Writable, + pageTableEntry)); +#else + gcmkONERROR( + gckMMU_SetPage(Os->device->kernels[Core]->mmu, + phys + (i * 4096), + Writable, + table++)); +#endif + } + } + } + + offset += PAGE_SIZE; + } + + { + gckMMU mmu = Os->device->kernels[Core]->mmu; + gcsADDRESS_AREA * area = &mmu->area[0]; + + offset = (gctUINT8_PTR)PageTable - (gctUINT8_PTR)area->pageTableLogical; + + /* must be in dynamic area. */ + gcmkASSERT(offset < area->pageTableSize); + + gcmkVERIFY_OK(gckOS_CacheClean( + Os, + 0, + area->pageTablePhysical, + offset, + PageTable, + bytes + )); + + if (mmu->mtlbPhysical) + { + /* Flush MTLB table. */ + gcmkVERIFY_OK(gckOS_CacheClean( + Os, + 0, + mmu->mtlbPhysical, + 0, + mmu->mtlbLogical, + mmu->mtlbSize + )); + } + } + +OnError: + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckOS_UnmapPages( + IN gckOS Os, + IN gctSIZE_T PageCount, + IN gctUINT32 Address + ) +{ +#ifdef CONFIG_IOMMU_SUPPORT + if (Os->iommu) + { + gcmkVERIFY_OK(gckIOMMU_Unmap( + Os->iommu, Address, PageCount * PAGE_SIZE)); + } +#endif + + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_UnlockPages +** +** Unlock memory allocated from the paged pool. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Physical address of the allocation. +** +** gctSIZE_T Bytes +** Number of bytes of the allocation. +** +** gctPOINTER Logical +** Address of the mapped memory. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UnlockPages( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical + ) +{ + PLINUX_MDL_MAP mdlMap; + PLINUX_MDL mdl = (PLINUX_MDL)Physical; + gckALLOCATOR allocator = mdl->allocator; + gctINT pid = _GetProcessID(); + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%u Logical=0x%X", + Os, Physical, Bytes, Logical); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + mutex_lock(&mdl->mapsMutex); + + list_for_each_entry(mdlMap, &mdl->mapsHead, link) + { + if ((mdlMap->vmaAddr != gcvNULL) && (mdlMap->pid == pid)) + { + if (--mdlMap->count == 0) + { + allocator->ops->UnmapUser( + allocator, + mdl, + mdlMap, + mdl->bytes); + + mdlMap->vmaAddr = gcvNULL; + } + } + } + + mutex_unlock(&mdl->mapsMutex); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + + +/******************************************************************************* +** +** gckOS_AllocateContiguous +** +** Allocate memory from the contiguous pool. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctBOOL InUserSpace +** gcvTRUE if the pages need to be mapped into user space. +** +** gctSIZE_T * Bytes +** Pointer to the number of bytes to allocate. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that receives the number of bytes allocated. +** +** gctPHYS_ADDR * Physical +** Pointer to a variable that receives the physical address of the +** memory allocation. +** +** gctPOINTER * Logical +** Pointer to a variable that receives the logical address of the +** memory allocation. +*/ +gceSTATUS +gckOS_AllocateContiguous( + IN gckOS Os, + IN gctBOOL InUserSpace, + IN OUT gctSIZE_T * Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctPOINTER * Logical + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu", + Os, InUserSpace, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Bytes != gcvNULL); + gcmkVERIFY_ARGUMENT(*Bytes > 0); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + + /* Same as non-paged memory for now. */ + gcmkONERROR(gckOS_AllocateNonPagedMemory(Os, + InUserSpace, + gcvALLOC_FLAG_CONTIGUOUS, + Bytes, + Physical, + Logical)); + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X", + *Bytes, *Physical, *Logical); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_FreeContiguous +** +** Free memory allocated from the contiguous pool. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPHYS_ADDR Physical +** Physical address of the allocation. +** +** gctPOINTER Logical +** Logicval address of the allocation. +** +** gctSIZE_T Bytes +** Number of bytes of the allocation. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_FreeContiguous( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu", + Os, Physical, Logical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + /* Same of non-paged memory for now. */ + gcmkONERROR(gckOS_FreeNonPagedMemory(Os, Bytes, Physical, Logical)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + + +/******************************************************************************* +** +** gckOS_MapUserPointer +** +** Map a pointer from the user process into the kernel address space. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Pointer +** Pointer in user process space that needs to be mapped. +** +** gctSIZE_T Size +** Number of bytes that need to be mapped. +** +** OUTPUT: +** +** gctPOINTER * KernelPointer +** Pointer to a variable receiving the mapped pointer in kernel address +** space. +*/ +gceSTATUS +gckOS_MapUserPointer( + IN gckOS Os, + IN gctPOINTER Pointer, + IN gctSIZE_T Size, + OUT gctPOINTER * KernelPointer + ) +{ + gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu", Os, Pointer, Size); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Pointer != gcvNULL); + gcmkVERIFY_ARGUMENT(Size > 0); + gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL); + + *KernelPointer = Pointer; + + gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_UnmapUserPointer +** +** Unmap a user process pointer from the kernel address space. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Pointer +** Pointer in user process space that needs to be unmapped. +** +** gctSIZE_T Size +** Number of bytes that need to be unmapped. +** +** gctPOINTER KernelPointer +** Pointer in kernel address space that needs to be unmapped. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UnmapUserPointer( + IN gckOS Os, + IN gctPOINTER Pointer, + IN gctSIZE_T Size, + IN gctPOINTER KernelPointer + ) +{ + gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu KernelPointer=0x%X", + Os, Pointer, Size, KernelPointer); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_QueryNeedCopy +** +** Query whether the memory can be accessed or mapped directly or it has to be +** copied. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 ProcessID +** Process ID of the current process. +** +** OUTPUT: +** +** gctBOOL_PTR NeedCopy +** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or +** gcvFALSE if the memory can be accessed or mapped dircetly. +*/ +gceSTATUS +gckOS_QueryNeedCopy( + IN gckOS Os, + IN gctUINT32 ProcessID, + OUT gctBOOL_PTR NeedCopy + ) +{ + gcmkHEADER_ARG("Os=0x%X ProcessID=%d", Os, ProcessID); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(NeedCopy != gcvNULL); + + /* We need to copy data. */ + *NeedCopy = gcvTRUE; + + /* Success. */ + gcmkFOOTER_ARG("*NeedCopy=%d", *NeedCopy); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_CopyFromUserData +** +** Copy data from user to kernel memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER KernelPointer +** Pointer to kernel memory. +** +** gctPOINTER Pointer +** Pointer to user memory. +** +** gctSIZE_T Size +** Number of bytes to copy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_CopyFromUserData( + IN gckOS Os, + IN gctPOINTER KernelPointer, + IN gctPOINTER Pointer, + IN gctSIZE_T Size + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu", + Os, KernelPointer, Pointer, Size); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL); + gcmkVERIFY_ARGUMENT(Pointer != gcvNULL); + gcmkVERIFY_ARGUMENT(Size > 0); + + /* Copy data from user. */ + if (copy_from_user(KernelPointer, Pointer, Size) != 0) + { + /* Could not copy all the bytes. */ + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_CopyToUserData +** +** Copy data from kernel to user memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER KernelPointer +** Pointer to kernel memory. +** +** gctPOINTER Pointer +** Pointer to user memory. +** +** gctSIZE_T Size +** Number of bytes to copy. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_CopyToUserData( + IN gckOS Os, + IN gctPOINTER KernelPointer, + IN gctPOINTER Pointer, + IN gctSIZE_T Size + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu", + Os, KernelPointer, Pointer, Size); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL); + gcmkVERIFY_ARGUMENT(Pointer != gcvNULL); + gcmkVERIFY_ARGUMENT(Size > 0); + + /* Copy data to user. */ + if (copy_to_user(Pointer, KernelPointer, Size) != 0) + { + /* Could not copy all the bytes. */ + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_WriteMemory +** +** Write data to a memory. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctPOINTER Address +** Address of the memory to write to. +** +** gctUINT32 Data +** Data for register. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_WriteMemory( + IN gckOS Os, + IN gctPOINTER Address, + IN gctUINT32 Data + ) +{ + gceSTATUS status; + gcmkHEADER_ARG("Os=0x%X Address=0x%X Data=%u", Os, Address, Data); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + + /* Write memory. */ + if (access_ok(Address, 4)) + { + /* User address. */ + if (put_user(Data, (gctUINT32*)Address)) + { + gcmkONERROR(gcvSTATUS_INVALID_ADDRESS); + } + } + else + { + /* Kernel address. */ + *(gctUINT32 *)Address = Data; + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckOS_ReadMappedPointer( + IN gckOS Os, + IN gctPOINTER Address, + IN gctUINT32_PTR Data + ) +{ + gceSTATUS status; + gcmkHEADER_ARG("Os=0x%X Address=0x%X Data=%u", Os, Address, Data); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(Address != gcvNULL); + + /* Write memory. */ + if (access_ok(Address, 4)) + { + /* User address. */ + if (get_user(*Data, (gctUINT32*)Address)) + { + gcmkONERROR(gcvSTATUS_INVALID_ADDRESS); + } + } + else + { + /* Kernel address. */ + *Data = *(gctUINT32_PTR)Address; + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_MapUserMemory +** +** Lock down a user buffer and return an DMA'able address to be used by the +** hardware to access it. +** +** INPUT: +** +** gctPOINTER Memory +** Pointer to memory to lock down. +** +** gctSIZE_T Size +** Size in bytes of the memory to lock down. +** +** OUTPUT: +** +** gctPOINTER * Info +** Pointer to variable receiving the information record required by +** gckOS_UnmapUserMemory. +** +** gctUINT32_PTR Address +** Pointer to a variable that will receive the address DMA'able by the +** hardware. +*/ +gceSTATUS +gckOS_MapUserMemory( + IN gckOS Os, + IN gceCORE Core, + IN gctPOINTER Memory, + IN gctUINT32 Physical, + IN gctSIZE_T Size, + OUT gctPOINTER * Info, + OUT gctUINT32_PTR Address + ) +{ + return gcvSTATUS_NOT_SUPPORTED; +} + +/******************************************************************************* +** +** gckOS_UnmapUserMemory +** +** Unlock a user buffer and that was previously locked down by +** gckOS_MapUserMemory. +** +** INPUT: +** +** gctPOINTER Memory +** Pointer to memory to unlock. +** +** gctSIZE_T Size +** Size in bytes of the memory to unlock. +** +** gctPOINTER Info +** Information record returned by gckOS_MapUserMemory. +** +** gctUINT32_PTR Address +** The address returned by gckOS_MapUserMemory. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UnmapUserMemory( + IN gckOS Os, + IN gceCORE Core, + IN gctPOINTER Memory, + IN gctSIZE_T Size, + IN gctPOINTER Info, + IN gctUINT32 Address + ) +{ + return gcvSTATUS_NOT_SUPPORTED; +} + +/******************************************************************************* +** +** gckOS_GetBaseAddress +** +** Get the base address for the physical memory. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** OUTPUT: +** +** gctUINT32_PTR BaseAddress +** Pointer to a variable that will receive the base address. +*/ +gceSTATUS +gckOS_GetBaseAddress( + IN gckOS Os, + OUT gctUINT32_PTR BaseAddress + ) +{ + gcmkHEADER_ARG("Os=0x%X", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL); + + /* Return base address. */ + *BaseAddress = Os->device->baseAddress; + + /* Success. */ + gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_SuspendInterrupt( + IN gckOS Os + ) +{ + return gckOS_SuspendInterruptEx(Os, gcvCORE_MAJOR); +} + +gceSTATUS +gckOS_SuspendInterruptEx( + IN gckOS Os, + IN gceCORE Core + ) +{ + gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + + disable_irq(Os->device->irqLines[Core]); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_ResumeInterrupt( + IN gckOS Os + ) +{ + return gckOS_ResumeInterruptEx(Os, gcvCORE_MAJOR); +} + +gceSTATUS +gckOS_ResumeInterruptEx( + IN gckOS Os, + IN gceCORE Core + ) +{ + gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + + enable_irq(Os->device->irqLines[Core]); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_MemCopy( + IN gctPOINTER Destination, + IN gctCONST_POINTER Source, + IN gctSIZE_T Bytes + ) +{ + gcmkHEADER_ARG("Destination=0x%X Source=0x%X Bytes=%lu", + Destination, Source, Bytes); + + gcmkVERIFY_ARGUMENT(Destination != gcvNULL); + gcmkVERIFY_ARGUMENT(Source != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + memcpy(Destination, Source, Bytes); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_ZeroMemory( + IN gctPOINTER Memory, + IN gctSIZE_T Bytes + ) +{ + gcmkHEADER_ARG("Memory=0x%X Bytes=%lu", Memory, Bytes); + + gcmkVERIFY_ARGUMENT(Memory != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + memset(Memory, 0, Bytes); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +********************************* Cache Control ******************************** +*******************************************************************************/ +static gceSTATUS +_CacheOperation( + IN gckOS Os, + IN gctUINT32 ProcessID, + IN gctPHYS_ADDR Handle, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes, + IN gceCACHEOPERATION Operation + ) +{ + PLINUX_MDL mdl = (PLINUX_MDL)Handle; + PLINUX_MDL_MAP mdlMap; + gckALLOCATOR allocator; + + if (!mdl || !mdl->allocator) + { + gcmkPRINT("[galcore]: %s: Logical=%p no mdl", __FUNCTION__, Logical); + return gcvSTATUS_INVALID_ARGUMENT; + } + + allocator = mdl->allocator; + + if (allocator->ops->Cache) + { + mutex_lock(&mdl->mapsMutex); + + mdlMap = FindMdlMap(mdl, ProcessID); + + mutex_unlock(&mdl->mapsMutex); + + if (ProcessID && mdlMap == gcvNULL) + { + return gcvSTATUS_INVALID_ARGUMENT; + } + + if ((!ProcessID && mdl->cacheable) || + (mdlMap && mdlMap->cacheable)) + { + allocator->ops->Cache(allocator, + mdl, Offset, Logical, Bytes, Operation); + + return gcvSTATUS_OK; + } + } + + _MemoryBarrier(); + + return gcvSTATUS_OK; +} + +/******************************************************************************* +** gckOS_CacheClean +** +** Clean the cache for the specified addresses. The GPU is going to need the +** data. If the system is allocating memory as non-cachable, this function can +** be ignored. +** +** ARGUMENTS: +** +** gckOS Os +** Pointer to gckOS object. +** +** gctUINT32 ProcessID +** Process ID Logical belongs. +** +** gctPHYS_ADDR Handle +** Physical address handle. If gcvNULL it is video memory. +** +** gctSIZE_T Offset +** Offset to this memory block. +** +** gctPOINTER Logical +** Logical address to flush. +** +** gctSIZE_T Bytes +** Size of the address range in bytes to flush. +*/ + +/* + +Following patch can be applied to kernel in case cache API is not exported. + +diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c +index 054b491..e9e74ec 100644 +--- a/arch/arm/mm/proc-syms.c ++++ b/arch/arm/mm/proc-syms.c +@@ -30,6 +30,9 @@ EXPORT_SYMBOL(__cpuc_flush_user_all); + EXPORT_SYMBOL(__cpuc_flush_user_range); + EXPORT_SYMBOL(__cpuc_coherent_kern_range); + EXPORT_SYMBOL(__cpuc_flush_dcache_area); ++EXPORT_SYMBOL(__glue(_CACHE,_dma_map_area)); ++EXPORT_SYMBOL(__glue(_CACHE,_dma_unmap_area)); ++EXPORT_SYMBOL(__glue(_CACHE,_dma_flush_range)); + #else + EXPORT_SYMBOL(cpu_cache); + #endif + +*/ +gceSTATUS +gckOS_CacheClean( + IN gckOS Os, + IN gctUINT32 ProcessID, + IN gctPHYS_ADDR Handle, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=%p Bytes=%lu", + Os, ProcessID, Handle, Logical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + gcmkONERROR(_CacheOperation(Os, ProcessID, + Handle, Offset, Logical, Bytes, + gcvCACHE_CLEAN)); + +OnError: + gcmkFOOTER(); + return status; + +} + +/******************************************************************************* +** gckOS_CacheInvalidate +** +** Invalidate the cache for the specified addresses. The GPU is going to need +** data. If the system is allocating memory as non-cachable, this function can +** be ignored. +** +** ARGUMENTS: +** +** gckOS Os +** Pointer to gckOS object. +** +** gctUINT32 ProcessID +** Process ID Logical belongs. +** +** gctPHYS_ADDR Handle +** Physical address handle. If gcvNULL it is video memory. +** +** gctSIZE_T Offset +** Offset to this memory block. +** +** gctPOINTER Logical +** Logical address to flush. +** +** gctSIZE_T Bytes +** Size of the address range in bytes to flush. +*/ +gceSTATUS +gckOS_CacheInvalidate( + IN gckOS Os, + IN gctUINT32 ProcessID, + IN gctPHYS_ADDR Handle, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=%p ProcessID=%d Handle=%p Offset=0x%llx Logical=%p Bytes=0x%zx", + Os, ProcessID, Handle, Offset, Logical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + gcmkONERROR(_CacheOperation(Os, ProcessID, + Handle, Offset, Logical, Bytes, + gcvCACHE_INVALIDATE)); + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** gckOS_CacheFlush +** +** Clean the cache for the specified addresses and invalidate the lines as +** well. The GPU is going to need and modify the data. If the system is +** allocating memory as non-cachable, this function can be ignored. +** +** ARGUMENTS: +** +** gckOS Os +** Pointer to gckOS object. +** +** gctUINT32 ProcessID +** Process ID Logical belongs. +** +** gctPHYS_ADDR Handle +** Physical address handle. If gcvNULL it is video memory. +** +** gctSIZE_T Offset +** Offset to this memory block. +** +** gctPOINTER Logical +** Logical address to flush. +** +** gctSIZE_T Bytes +** Size of the address range in bytes to flush. +*/ +gceSTATUS +gckOS_CacheFlush( + IN gckOS Os, + IN gctUINT32 ProcessID, + IN gctPHYS_ADDR Handle, + IN gctSIZE_T Offset, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=%p ProcessID=%d Handle=%p Offset=0x%llx Logical=%p Bytes=0x%zx", + Os, ProcessID, Handle, Offset, Logical, Bytes); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Logical != gcvNULL); + gcmkVERIFY_ARGUMENT(Bytes > 0); + + gcmkONERROR(_CacheOperation(Os, ProcessID, + Handle, Offset, Logical, Bytes, + gcvCACHE_FLUSH)); + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +********************************* Broadcasting ********************************* +*******************************************************************************/ + +/******************************************************************************* +** +** gckOS_Broadcast +** +** System hook for broadcast events from the kernel driver. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** gceBROADCAST Reason +** Reason for the broadcast. Can be one of the following values: +** +** gcvBROADCAST_GPU_IDLE +** Broadcasted when the kernel driver thinks the GPU might be +** idle. This can be used to handle power management. +** +** gcvBROADCAST_GPU_COMMIT +** Broadcasted when any client process commits a command +** buffer. This can be used to handle power management. +** +** gcvBROADCAST_GPU_STUCK +** Broadcasted when the kernel driver hits the timeout waiting +** for the GPU. +** +** gcvBROADCAST_FIRST_PROCESS +** First process is trying to connect to the kernel. +** +** gcvBROADCAST_LAST_PROCESS +** Last process has detached from the kernel. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_Broadcast( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gceBROADCAST Reason + ) +{ + gceSTATUS status; +#if gcdPOWER_SUSPEND_WHEN_IDLE + gceCHIPPOWERSTATE state = gcvPOWER_SUSPEND_BROADCAST; +#else + gceCHIPPOWERSTATE state = gcvPOWER_IDLE_BROADCAST; +#endif + + gcmkHEADER_ARG("Os=0x%X Hardware=0x%X Reason=%d", Os, Hardware, Reason); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + switch (Reason) + { + case gcvBROADCAST_FIRST_PROCESS: + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "First process has attached"); + break; + + case gcvBROADCAST_LAST_PROCESS: + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "Last process has detached"); + + /* Put GPU OFF. */ + gcmkONERROR( + gckHARDWARE_SetPowerManagementState(Hardware, + gcvPOWER_OFF_BROADCAST)); + break; + + case gcvBROADCAST_GPU_IDLE: + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "GPU idle."); + + /* Put GPU IDLE. */ + gcmkONERROR( + gckHARDWARE_SetPowerManagementState(Hardware, state)); + + /* Add idle process DB. */ + gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel, + 1, + gcvDB_IDLE, + gcvNULL, gcvNULL, 0)); + break; + + case gcvBROADCAST_GPU_COMMIT: + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "COMMIT has arrived."); + + /* Add busy process DB. */ + gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel, + 0, + gcvDB_IDLE, + gcvNULL, gcvNULL, 0)); + + /* Put GPU ON. */ + gcmkONERROR( + gckHARDWARE_SetPowerManagementState(Hardware, gcvPOWER_ON_AUTO)); + break; + + case gcvBROADCAST_GPU_STUCK: + gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_GPU_STUCK\n"); + gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel)); + break; + + case gcvBROADCAST_AXI_BUS_ERROR: + gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_AXI_BUS_ERROR\n"); + gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware)); + gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel)); + break; + + case gcvBROADCAST_OUT_OF_MEMORY: + gcmkTRACE_N(gcvLEVEL_INFO, 0, "gcvBROADCAST_OUT_OF_MEMORY\n"); + + status = _ShrinkMemory(Os); + + if (status == gcvSTATUS_NOT_SUPPORTED) + { + goto OnError; + } + + gcmkONERROR(status); + + break; + + default: + /* Skip unimplemented broadcast. */ + break; + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_BroadcastHurry +** +** The GPU is running too slow. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** gctUINT Urgency +** The higher the number, the higher the urgency to speed up the GPU. +** The maximum value is defined by the gcdDYNAMIC_EVENT_THRESHOLD. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_BroadcastHurry( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gctUINT Urgency + ) +{ + gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Urgency=%u", Os, Hardware, Urgency); + + /* Do whatever you need to do to speed up the GPU now. */ + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_BroadcastCalibrateSpeed +** +** Calibrate the speed of the GPU. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gckHARDWARE Hardware +** Pointer to the gckHARDWARE object. +** +** gctUINT Idle, Time +** Idle/Time will give the percentage the GPU is idle, so you can use +** this to calibrate the working point of the GPU. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_BroadcastCalibrateSpeed( + IN gckOS Os, + IN gckHARDWARE Hardware, + IN gctUINT Idle, + IN gctUINT Time + ) +{ + gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Idle=%u Time=%u", + Os, Hardware, Idle, Time); + + /* Do whatever you need to do to callibrate the GPU speed. */ + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +********************************** Semaphores ********************************** +*******************************************************************************/ + +/******************************************************************************* +** +** gckOS_CreateSemaphore +** +** Create a semaphore. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** OUTPUT: +** +** gctPOINTER * Semaphore +** Pointer to the variable that will receive the created semaphore. +*/ +gceSTATUS +gckOS_CreateSemaphore( + IN gckOS Os, + OUT gctPOINTER * Semaphore + ) +{ + gceSTATUS status; + struct semaphore *sem = gcvNULL; + + gcmkHEADER_ARG("Os=0x%X", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL); + + /* Allocate the semaphore structure. */ + sem = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN); + if (sem == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Initialize the semaphore. */ + sema_init(sem, 1); + + /* Return to caller. */ + *Semaphore = (gctPOINTER) sem; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_AcquireSemaphore +** +** Acquire a semaphore. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctPOINTER Semaphore +** Pointer to the semaphore thet needs to be acquired. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_AcquireSemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ) +{ + gcmkHEADER_ARG("Os=0x%08X Semaphore=0x%08X", Os, Semaphore); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL); + + /* Acquire the semaphore. */ + down((struct semaphore *) Semaphore); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_TryAcquireSemaphore +** +** Try to acquire a semaphore. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctPOINTER Semaphore +** Pointer to the semaphore thet needs to be acquired. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_TryAcquireSemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%x", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL); + + /* Acquire the semaphore. */ + if (down_trylock((struct semaphore *) Semaphore)) + { + /* Timeout. */ + status = gcvSTATUS_TIMEOUT; + gcmkFOOTER(); + return status; + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_ReleaseSemaphore +** +** Release a previously acquired semaphore. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctPOINTER Semaphore +** Pointer to the semaphore thet needs to be released. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_ReleaseSemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ) +{ + gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL); + + /* Release the semaphore. */ + up((struct semaphore *) Semaphore); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_DestroySemaphore +** +** Destroy a semaphore. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctPOINTER Semaphore +** Pointer to the semaphore thet needs to be destroyed. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_DestroySemaphore( + IN gckOS Os, + IN gctPOINTER Semaphore + ) +{ + gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL); + + /* Free the sempahore structure. */ + kfree(Semaphore); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_GetProcessID +** +** Get current process ID. +** +** INPUT: +** +** Nothing. +** +** OUTPUT: +** +** gctUINT32_PTR ProcessID +** Pointer to the variable that receives the process ID. +*/ +gceSTATUS +gckOS_GetProcessID( + OUT gctUINT32_PTR ProcessID + ) +{ + /* Get process ID. */ + *ProcessID = _GetProcessID(); + + /* Success. */ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_GetThreadID +** +** Get current thread ID. +** +** INPUT: +** +** Nothing. +** +** OUTPUT: +** +** gctUINT32_PTR ThreadID +** Pointer to the variable that receives the thread ID. +*/ +gceSTATUS +gckOS_GetThreadID( + OUT gctUINT32_PTR ThreadID + ) +{ + /* Get thread ID. */ + if (ThreadID != gcvNULL) + { + *ThreadID = _GetThreadID(); + } + + /* Success. */ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_SetGPUPower +** +** Set the power of the GPU on or off. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gceCORE Core +** GPU whose power is set. +** +** gctBOOL Clock +** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock. +** +** gctBOOL Power +** gcvTRUE to turn on the power, or gcvFALSE to turn off the power. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_SetGPUPower( + IN gckOS Os, + IN gceCORE Core, + IN gctBOOL Clock, + IN gctBOOL Power + ) +{ + gcsPLATFORM * platform; + + gctBOOL powerChange = gcvFALSE; + gctBOOL clockChange = gcvFALSE; + + gcmkHEADER_ARG("Os=0x%X Core=%d Clock=%d Power=%d", Os, Core, Clock, Power); + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + + platform = Os->device->platform; + + powerChange = (Power != Os->powerStates[Core]); + + clockChange = (Clock != Os->clockStates[Core]); + + if (powerChange && (Power == gcvTRUE)) + { + if (platform && platform->ops->setPower) + { + gcmkVERIFY_OK(platform->ops->setPower(platform, Core, Power)); + } + + Os->powerStates[Core] = Power; + } + + if (clockChange) + { + unsigned long flags; + + if (!Clock) + { + spin_lock_irqsave(&Os->registerAccessLock, flags); + + /* Record clock off, ahead. */ + Os->clockStates[Core] = gcvFALSE; + + spin_unlock_irqrestore(&Os->registerAccessLock, flags); + } + + if (platform && platform->ops->setClock) + { + gcmkVERIFY_OK(platform->ops->setClock(platform, Core, Clock)); + } + + if (Clock) + { + spin_lock_irqsave(&Os->registerAccessLock, flags); + + /* Record clock on, behind. */ + Os->clockStates[Core] = gcvTRUE; + + spin_unlock_irqrestore(&Os->registerAccessLock, flags); + } + } + + if (powerChange && (Power == gcvFALSE)) + { + if (platform && platform->ops->setPower) + { + gcmkVERIFY_OK(platform->ops->setPower(platform, Core, Power)); + } + + Os->powerStates[Core] = Power; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_ResetGPU +** +** Reset the GPU. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gckCORE Core +** GPU whose power is set. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_ResetGPU( + IN gckOS Os, + IN gceCORE Core + ) +{ + gceSTATUS status = gcvSTATUS_NOT_SUPPORTED; + gcsPLATFORM * platform; + + gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core); + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + + platform = Os->device->platform; + + if (platform && platform->ops->reset) + { + status = platform->ops->reset(platform, Core); + } + + gcmkFOOTER_NO(); + return status; +} + +/******************************************************************************* +** +** gckOS_PrepareGPUFrequency +** +** Prepare to set GPU frequency and voltage. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gckCORE Core +** GPU whose frequency and voltage will be set. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_PrepareGPUFrequency( + IN gckOS Os, + IN gceCORE Core + ) +{ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_FinishGPUFrequency +** +** Finish GPU frequency setting. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gckCORE Core +** GPU whose frequency and voltage is set. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_FinishGPUFrequency( + IN gckOS Os, + IN gceCORE Core + ) +{ + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_QueryGPUFrequency +** +** Query the current frequency of the GPU. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gckCORE Core +** GPU whose power is set. +** +** gctUINT32 * Frequency +** Pointer to a gctUINT32 to obtain current frequency, in MHz. +** +** gctUINT8 * Scale +** Pointer to a gctUINT8 to obtain current scale(1 - 64). +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_QueryGPUFrequency( + IN gckOS Os, + IN gceCORE Core, + OUT gctUINT32 * Frequency, + OUT gctUINT8 * Scale + ) +{ + /* In case of RT kernel pmc is not active, + * so we define Frequency and Scale fro GPU as constants. + */ +#ifdef CONFIG_MCST_RT + *Frequency = 533000; + *Scale = 64; +#else + *Frequency = pmc_l_gpufreq_get_frequency(); + *Scale = pmc_l_gpufreq_get_scale(); +#endif + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_SetGPUFrequency +** +** Set frequency and voltage of the GPU. +** +** 1. DVFS manager gives the target scale of full frequency, BSP must find +** a real frequency according to this scale and board's configure. +** +** 2. BSP should find a suitable voltage for this frequency. +** +** 3. BSP must make sure setting take effect before this function returns. +** +** INPUT: +** +** gckOS Os +** Pointer to a gckOS object. +** +** gckCORE Core +** GPU whose power is set. +** +** gctUINT8 Scale +** Target scale of full frequency, range is [1, 64]. 1 means 1/64 of +** full frequency and 64 means 64/64 of full frequency. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_SetGPUFrequency( + IN gckOS Os, + IN gceCORE Core, + IN gctUINT8 Scale + ) +{ + /* In case of RT kernel pmc is not active + * and we really don't set any scale. + */ +#ifdef CONFIG_MCST_RT + return gcvSTATUS_OK; +#else + if (pmc_l_gpufreq_set_scale(Scale)) + return gcvSTATUS_INVALID_ARGUMENT; +#endif + return gcvSTATUS_OK; +} + +/*----------------------------------------------------------------------------*/ +/*----- Profile --------------------------------------------------------------*/ + +gceSTATUS +gckOS_GetProfileTick( + OUT gctUINT64_PTR Tick + ) +{ + struct timespec time; + + ktime_get_ts(&time); + + *Tick = time.tv_nsec + time.tv_sec * 1000000000ULL; + + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_QueryProfileTickRate( + OUT gctUINT64_PTR TickRate + ) +{ + struct timespec res; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) + res.tv_sec = 0; + res.tv_nsec = hrtimer_resolution; +#else + hrtimer_get_res(CLOCK_MONOTONIC, &res); +#endif + + *TickRate = res.tv_nsec + res.tv_sec * 1000000000ULL; + + return gcvSTATUS_OK; +} + +gctUINT32 +gckOS_ProfileToMS( + IN gctUINT64 Ticks + ) +{ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) + return div_u64(Ticks, 1000000); +#else + gctUINT64 rem = Ticks; + gctUINT64 b = 1000000; + gctUINT64 res, d = 1; + gctUINT32 high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= 1000000) + { + high /= 1000000; + res = (gctUINT64) high << 32; + rem -= (gctUINT64) (high * 1000000) << 32; + } + + while (((gctINT64) b > 0) && (b < rem)) + { + b <<= 1; + d <<= 1; + } + + do + { + if (rem >= b) + { + rem -= b; + res += d; + } + + b >>= 1; + d >>= 1; + } + while (d); + + return (gctUINT32) res; +#endif +} + +/******************************************************************************\ +******************************* Signal Management ****************************** +\******************************************************************************/ + +#undef _GC_OBJ_ZONE +#define _GC_OBJ_ZONE gcvZONE_SIGNAL + +/******************************************************************************* +** +** gckOS_CreateSignal +** +** Create a new signal. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctBOOL ManualReset +** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in +** order to set the signal to nonsignaled state. +** If set to gcvFALSE, the signal will automatically be set to +** nonsignaled state by gckOS_WaitSignal function. +** +** OUTPUT: +** +** gctSIGNAL * Signal +** Pointer to a variable receiving the created gctSIGNAL. +*/ +gceSTATUS +gckOS_CreateSignal( + IN gckOS Os, + IN gctBOOL ManualReset, + OUT gctSIGNAL * Signal + ) +{ + gceSTATUS status; + gcsSIGNAL_PTR signal; + + gcmkHEADER_ARG("Os=0x%X ManualReset=%d", Os, ManualReset); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Signal != gcvNULL); + + /* Create an event structure. */ + signal = (gcsSIGNAL_PTR) kmalloc(sizeof(gcsSIGNAL), GFP_KERNEL | gcdNOWARN); + + if (signal == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Save the process ID. */ + signal->process = (gctHANDLE)(gctUINTPTR_T) _GetProcessID(); + + signal->done = 0; + init_waitqueue_head(&signal->wait); + spin_lock_init(&signal->lock); + signal->manualReset = ManualReset; + + atomic_set(&signal->ref, 1); + +#if gcdLINUX_SYNC_FILE +#ifndef CONFIG_SYNC_FILE + signal->timeline = gcvNULL; +# else + signal->fence = gcvNULL; +# endif +#endif + + gcmkONERROR(_AllocateIntegerId(&Os->signalDB, signal, &signal->id)); + + *Signal = (gctSIGNAL)(gctUINTPTR_T)signal->id; + + gcmkFOOTER_ARG("*Signal=0x%X", *Signal); + return gcvSTATUS_OK; + +OnError: + if (signal != gcvNULL) + { + kfree(signal); + } + + gcmkFOOTER_NO(); + return status; +} + +/******************************************************************************* +** +** gckOS_DestroySignal +** +** Destroy a signal. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIGNAL Signal +** Pointer to the gctSIGNAL. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_DestroySignal( + IN gckOS Os, + IN gctSIGNAL Signal + ) +{ + gceSTATUS status; + gcsSIGNAL_PTR signal; + gctBOOL acquired = gcvFALSE; + + gcmkHEADER_ARG("Os=0x%X Signal=0x%X", Os, Signal); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Signal != gcvNULL); + + mutex_lock(&Os->signalMutex); + acquired = gcvTRUE; + + gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal)); + + gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal); + + if (atomic_dec_and_test(&signal->ref)) + { + gcmkVERIFY_OK(_DestroyIntegerId(&Os->signalDB, signal->id)); + + /* Free the sgianl. */ + kfree(signal); + } + + mutex_unlock(&Os->signalMutex); + acquired = gcvFALSE; + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + if (acquired) + { + /* Release the mutex. */ + mutex_unlock(&Os->signalMutex); + } + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_Signal +** +** Set a state of the specified signal. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIGNAL Signal +** Pointer to the gctSIGNAL. +** +** gctBOOL State +** If gcvTRUE, the signal will be set to signaled state. +** If gcvFALSE, the signal will be set to nonsignaled state. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_Signal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctBOOL State + ) +{ + gceSTATUS status; + gcsSIGNAL_PTR signal; +#if gcdLINUX_SYNC_FILE +#ifndef CONFIG_SYNC_FILE + struct sync_timeline * timeline = gcvNULL; +# else + struct dma_fence * fence = gcvNULL; +# endif +#endif + + gcmkHEADER_ARG("Os=0x%X Signal=0x%X State=%d", Os, Signal, State); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Signal != gcvNULL); + + mutex_lock(&Os->signalMutex); + + status = _QueryIntegerId(&Os->signalDB, + (gctUINT32)(gctUINTPTR_T)Signal, + (gctPOINTER)&signal); + + if (gcmIS_ERROR(status)) + { + mutex_unlock(&Os->signalMutex); + gcmkONERROR(status); + } + + /* + * Signal saved in event is not referenced. Inc reference here to avoid + * concurrent issue: signaling the signal while another thread is destroying + * it. + */ + atomic_inc(&signal->ref); + + mutex_unlock(&Os->signalMutex); + + gcmkONERROR(status); + + gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal); + + spin_lock(&signal->lock); + + if (State) + { + signal->done = 1; + + wake_up(&signal->wait); + +#if gcdLINUX_SYNC_FILE +#ifndef CONFIG_SYNC_FILE + timeline = signal->timeline; +# else + fence = signal->fence; + signal->fence = NULL; +# endif +#endif + } + else + { + signal->done = 0; + } + + spin_unlock(&signal->lock); + +#if gcdLINUX_SYNC_FILE +#ifndef CONFIG_SYNC_FILE + /* Signal timeline. */ + if (timeline) + { + sync_timeline_signal(timeline); + } +# else + if (fence) + { + dma_fence_signal(fence); + dma_fence_put(fence); + } +# endif +#endif + + mutex_lock(&Os->signalMutex); + + if (atomic_dec_and_test(&signal->ref)) + { + gcmkVERIFY_OK(_DestroyIntegerId(&Os->signalDB, signal->id)); + + /* Free the sgianl. */ + kfree(signal); + } + + mutex_unlock(&Os->signalMutex); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_UserSignal +** +** Set the specified signal which is owned by a process to signaled state. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIGNAL Signal +** Pointer to the gctSIGNAL. +** +** gctHANDLE Process +** Handle of process owning the signal. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_UserSignal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctHANDLE Process + ) +{ + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=%d", + Os, Signal, (gctINT32)(gctUINTPTR_T)Process); + + /* Signal. */ + status = gckOS_Signal(Os, Signal, gcvTRUE); + + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_WaitSignal +** +** Wait for a signal to become signaled. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIGNAL Signal +** Pointer to the gctSIGNAL. +** +** gctUINT32 Wait +** Number of milliseconds to wait. +** Pass the value of gcvINFINITE for an infinite wait. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_WaitSignal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctBOOL Interruptable, + IN gctUINT32 Wait + ) +{ + gceSTATUS status; + gcsSIGNAL_PTR signal; + int done; + + gcmkHEADER_ARG("Os=0x%X Signal=0x%X Wait=0x%08X", Os, Signal, Wait); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Signal != gcvNULL); + + gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal)); + + gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal); + + spin_lock(&signal->lock); + done = signal->done; + spin_unlock(&signal->lock); + + /* + * Do not need to lock below: + * 1. If signal already done, return immediately. + * 2. If signal not done, wait_event_xxx will handle correctly even read of + * signal->done is not atomic. + * + * Rest signal->done do not require lock either: + * No other thread can query/wait auto-reseted signal, because that is + * logic error. + */ + if (done) + { + status = gcvSTATUS_OK; + + if (!signal->manualReset) + { + signal->done = 0; + } + } + else if (Wait == 0) + { + status = gcvSTATUS_TIMEOUT; + } + else + { + /* Convert wait to milliseconds. */ + long timeout = (Wait == gcvINFINITE) + ? MAX_SCHEDULE_TIMEOUT + : msecs_to_jiffies(Wait); + + long ret; + + if (Interruptable) + { + ret = wait_event_interruptible_timeout(signal->wait, signal->done, timeout); + } + else + { + ret = wait_event_timeout(signal->wait, signal->done, timeout); + } + + if (likely(ret > 0)) + { + status = gcvSTATUS_OK; + + if (!signal->manualReset) + { + /* Auto reset. */ + signal->done = 0; + } + } + else + { + status = (ret == -ERESTARTSYS) ? gcvSTATUS_INTERRUPTED + : gcvSTATUS_TIMEOUT; + } + } + +OnError: + /* Return status. */ + gcmkFOOTER_ARG("Signal=0x%lX status=%d", Signal, status); + return status; +} + +gceSTATUS +_QuerySignal( + IN gckOS Os, + IN gctSIGNAL Signal + ) +{ + /* + * This function is called by 'has_signaled' callback of sync_timeline. + * By design, 'has_signaled' could be called in interrupt context, but + * in current driver, it can be called only when 'gckOS_Signal' and + * 'gckOS_CreateNativeFence'. Thus its safe to use normal version of + * spinlock for 'Os->signalDB.lock' and 'signal->obj.wait.lock'. + */ + gceSTATUS status; + gcsSIGNAL_PTR signal = gcvNULL; + + status = _QueryIntegerId(&Os->signalDB, + (gctUINT32)(gctUINTPTR_T)Signal, + (gctPOINTER)&signal); + + if (gcmIS_SUCCESS(status)) + { + spin_lock(&signal->lock); + status = signal->done ? gcvSTATUS_TRUE : gcvSTATUS_FALSE; + spin_unlock(&signal->lock); + } + + return status; +} + +/******************************************************************************* +** +** gckOS_MapSignal +** +** Map a signal in to the current process space. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIGNAL Signal +** Pointer to tha gctSIGNAL to map. +** +** gctHANDLE Process +** Handle of process owning the signal. +** +** OUTPUT: +** +** gctSIGNAL * MappedSignal +** Pointer to a variable receiving the mapped gctSIGNAL. +*/ +gceSTATUS +gckOS_MapSignal( + IN gckOS Os, + IN gctSIGNAL Signal, + IN gctHANDLE Process, + OUT gctSIGNAL * MappedSignal + ) +{ + gceSTATUS status; + gcsSIGNAL_PTR signal = gcvNULL; + gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=0x%X", Os, Signal, Process); + + gcmkVERIFY_ARGUMENT(Signal != gcvNULL); + gcmkVERIFY_ARGUMENT(MappedSignal != gcvNULL); + + mutex_lock(&Os->signalMutex); + + gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal)); + + if (atomic_inc_return(&signal->ref) <= 1) + { + /* The previous value is 0, it has been deleted. */ + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + *MappedSignal = (gctSIGNAL) Signal; + + mutex_unlock(&Os->signalMutex); + + /* Success. */ + gcmkFOOTER_ARG("*MappedSignal=0x%X", *MappedSignal); + return gcvSTATUS_OK; + +OnError: + mutex_unlock(&Os->signalMutex); + + gcmkFOOTER_NO(); + return status; +} + +/******************************************************************************* +** +** gckOS_UnmapSignal +** +** Unmap a signal . +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctSIGNAL Signal +** Pointer to that gctSIGNAL mapped. +*/ +gceSTATUS +gckOS_UnmapSignal( + IN gckOS Os, + IN gctSIGNAL Signal + ) +{ + return gckOS_DestroySignal(Os, Signal); +} + +/******************************************************************************* +** +** gckOS_CreateUserSignal +** +** Create a new signal to be used in the user space. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctBOOL ManualReset +** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in +** order to set the signal to nonsignaled state. +** If set to gcvFALSE, the signal will automatically be set to +** nonsignaled state by gckOS_WaitSignal function. +** +** OUTPUT: +** +** gctINT * SignalID +** Pointer to a variable receiving the created signal's ID. +*/ +gceSTATUS +gckOS_CreateUserSignal( + IN gckOS Os, + IN gctBOOL ManualReset, + OUT gctINT * SignalID + ) +{ + gceSTATUS status; + gctSIZE_T signal; + + /* Create a new signal. */ + gcmkONERROR(gckOS_CreateSignal(Os, ManualReset, (gctSIGNAL *) &signal)); + *SignalID = (gctINT) signal; + +OnError: + return status; +} + +/******************************************************************************* +** +** gckOS_DestroyUserSignal +** +** Destroy a signal to be used in the user space. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctINT SignalID +** The signal's ID. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_DestroyUserSignal( + IN gckOS Os, + IN gctINT SignalID + ) +{ + return gckOS_DestroySignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID); +} + +/******************************************************************************* +** +** gckOS_WaitUserSignal +** +** Wait for a signal used in the user mode to become signaled. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctINT SignalID +** Signal ID. +** +** gctUINT32 Wait +** Number of milliseconds to wait. +** Pass the value of gcvINFINITE for an infinite wait. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_WaitUserSignal( + IN gckOS Os, + IN gctINT SignalID, + IN gctUINT32 Wait + ) +{ + return gckOS_WaitSignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, gcvTRUE, Wait); +} + +/******************************************************************************* +** +** gckOS_SignalUserSignal +** +** Set a state of the specified signal to be used in the user space. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctINT SignalID +** SignalID. +** +** gctBOOL State +** If gcvTRUE, the signal will be set to signaled state. +** If gcvFALSE, the signal will be set to nonsignaled state. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_SignalUserSignal( + IN gckOS Os, + IN gctINT SignalID, + IN gctBOOL State + ) +{ + return gckOS_Signal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, State); +} + + +/******************************************************************************\ +******************************** Software Timer ******************************** +\******************************************************************************/ + +void +_TimerFunction( + struct work_struct * work + ) +{ + gcsOSTIMER_PTR timer = (gcsOSTIMER_PTR)work; + + gctTIMERFUNCTION function = timer->function; + + function(timer->data); +} + +/******************************************************************************* +** +** gckOS_CreateTimer +** +** Create a software timer. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctTIMERFUNCTION Function. +** Pointer to a call back function which will be called when timer is +** expired. +** +** gctPOINTER Data. +** Private data which will be passed to call back function. +** +** OUTPUT: +** +** gctPOINTER * Timer +** Pointer to a variable receiving the created timer. +*/ +gceSTATUS +gckOS_CreateTimer( + IN gckOS Os, + IN gctTIMERFUNCTION Function, + IN gctPOINTER Data, + OUT gctPOINTER * Timer + ) +{ + gceSTATUS status; + gcsOSTIMER_PTR pointer; + gcmkHEADER_ARG("Os=0x%X Function=0x%X Data=0x%X", Os, Function, Data); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Timer != gcvNULL); + + gcmkONERROR(gckOS_Allocate(Os, sizeof(gcsOSTIMER), (gctPOINTER)&pointer)); + + pointer->function = Function; + pointer->data = Data; + + INIT_DELAYED_WORK(&pointer->work, _TimerFunction); + + *Timer = pointer; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gckOS_DestroyTimer +** +** Destory a software timer. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctPOINTER Timer +** Pointer to the timer to be destoryed. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_DestroyTimer( + IN gckOS Os, + IN gctPOINTER Timer + ) +{ + gcsOSTIMER_PTR timer; + gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer); + + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Timer != gcvNULL); + + timer = (gcsOSTIMER_PTR)Timer; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) + cancel_delayed_work_sync(&timer->work); +#else + cancel_delayed_work(&timer->work); + flush_workqueue(Os->workqueue); +#endif + + gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, Timer)); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_StartTimer +** +** Schedule a software timer. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctPOINTER Timer +** Pointer to the timer to be scheduled. +** +** gctUINT32 Delay +** Delay in milliseconds. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_StartTimer( + IN gckOS Os, + IN gctPOINTER Timer, + IN gctUINT32 Delay + ) +{ + gcsOSTIMER_PTR timer; + + gcmkHEADER_ARG("Os=0x%X Timer=0x%X Delay=%u", Os, Timer, Delay); + + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Timer != gcvNULL); + gcmkVERIFY_ARGUMENT(Delay != 0); + + timer = (gcsOSTIMER_PTR)Timer; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + mod_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay)); +#else + if (unlikely(delayed_work_pending(&timer->work))) + { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) + cancel_delayed_work_sync(&timer->work); +#else + cancel_delayed_work(&timer->work); + flush_workqueue(Os->workqueue); +#endif + } + + queue_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay)); +#endif + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_StopTimer +** +** Cancel a unscheduled timer. +** +** INPUT: +** +** gckOS Os +** Pointer to the gckOS object. +** +** gctPOINTER Timer +** Pointer to the timer to be cancel. +** +** OUTPUT: +** +** Nothing. +*/ +gceSTATUS +gckOS_StopTimer( + IN gckOS Os, + IN gctPOINTER Timer + ) +{ + gcsOSTIMER_PTR timer; + gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer); + + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Timer != gcvNULL); + + timer = (gcsOSTIMER_PTR)Timer; + + cancel_delayed_work(&timer->work); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_GetProcessNameByPid( + IN gctINT Pid, + IN gctSIZE_T Length, + OUT gctUINT8_PTR String + ) +{ + struct task_struct *task; + + /* Get the task_struct of the task with pid. */ + rcu_read_lock(); + + task = FIND_TASK_BY_PID(Pid); + + if (task == gcvNULL) + { + rcu_read_unlock(); + return gcvSTATUS_NOT_FOUND; + } + + /* Get name of process. */ + strncpy(String, task->comm, Length); + + rcu_read_unlock(); + + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_DumpCallStack( + IN gckOS Os + ) +{ + gcmkHEADER_ARG("Os=0x%X", Os); + + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + + dump_stack(); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +/******************************************************************************* +** +** gckOS_DetectProcessByName +** +** task->comm maybe part of process name, so this function +** can only be used for debugging. +** +** INPUT: +** +** gctCONST_POINTER Name +** Pointer to a string to hold name to be check. If the length +** of name is longer than TASK_COMM_LEN (16), use part of name +** to detect. +** +** OUTPUT: +** +** gcvSTATUS_TRUE if name of current process matches Name. +** +*/ +gceSTATUS +gckOS_DetectProcessByName( + IN gctCONST_POINTER Name + ) +{ + char comm[sizeof(current->comm)]; + + memset(comm, 0, sizeof(comm)); + + gcmkVERIFY_OK( + gckOS_GetProcessNameByPid(_GetProcessID(), sizeof(current->comm), comm)); + + return strstr(comm, Name) ? gcvSTATUS_TRUE + : gcvSTATUS_FALSE; +} + +#if gcdLINUX_SYNC_FILE +#ifndef CONFIG_SYNC_FILE +gceSTATUS +gckOS_CreateSyncTimeline( + IN gckOS Os, + IN gceCORE Core, + OUT gctHANDLE * Timeline + ) +{ + struct viv_sync_timeline * timeline; + char name[32]; + + snprintf(name, 32, "gccore-%u", (unsigned int) Core); + + /* Create viv sync timeline. */ + timeline = viv_sync_timeline_create(name, Os); + + if (timeline == gcvNULL) + { + /* Out of memory. */ + return gcvSTATUS_OUT_OF_MEMORY; + } + + *Timeline = (gctHANDLE) timeline; + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_DestroySyncTimeline( + IN gckOS Os, + IN gctHANDLE Timeline + ) +{ + struct viv_sync_timeline * timeline; + gcmkASSERT(Timeline != gcvNULL); + + /* Destroy timeline. */ + timeline = (struct viv_sync_timeline *) Timeline; + sync_timeline_destroy(&timeline->obj); + + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_CreateNativeFence( + IN gckOS Os, + IN gctHANDLE Timeline, + IN gctSIGNAL Signal, + OUT gctINT * FenceFD + ) +{ + int fd = -1; + struct viv_sync_timeline *timeline; + struct sync_pt * pt = gcvNULL; + struct sync_fence * fence; + char name[32]; + gcsSIGNAL_PTR signal; + gceSTATUS status; + + gcmkHEADER_ARG("Os=0x%X Timeline=0x%X Signal=%d", + Os, Timeline, (gctUINT)(gctUINTPTR_T)Signal); + + gcmkONERROR( + _QueryIntegerId(&Os->signalDB, + (gctUINT32)(gctUINTPTR_T)Signal, + (gctPOINTER)&signal)); + + /* Cast timeline. */ + timeline = (struct viv_sync_timeline *) Timeline; + + fd = get_unused_fd_flags(O_CLOEXEC); + + if (fd < 0) + { + /* Out of resources. */ + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + /* Create viv_sync_pt. */ + pt = viv_sync_pt_create(timeline, Signal); + + if (pt == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Reference sync_timeline. */ + signal->timeline = &timeline->obj; + + /* Build fence name. */ + snprintf(name, 32, "%.16s-signal_%lu", + current->comm, + (unsigned long)Signal); + + /* Create sync_fence. */ + fence = sync_fence_create(name, pt); + + if (fence == NULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Install fence to fd. */ + sync_fence_install(fence, fd); + + *FenceFD = fd; + gcmkFOOTER_ARG("*FenceFD=%d", fd); + return gcvSTATUS_OK; + +OnError: + /* Error roll back. */ + if (pt) + { + sync_pt_free(pt); + } + + if (fd > 0) + { + put_unused_fd(fd); + } + + gcmkFOOTER(); + return status; +} + +static void +_NativeFenceSignaled( + struct sync_fence *fence, + struct sync_fence_waiter *waiter + ) +{ + kfree(waiter); + sync_fence_put(fence); +} + +gceSTATUS +gckOS_WaitNativeFence( + IN gckOS Os, + IN gctHANDLE Timeline, + IN gctINT FenceFD, + IN gctUINT32 Timeout + ) +{ + struct sync_timeline * timeline; + struct sync_fence * fence; + gctBOOL wait; + gceSTATUS status = gcvSTATUS_OK; + + gcmkHEADER_ARG("Os=0x%X Timeline=0x%X FenceFD=%d Timeout=%u", + Os, Timeline, FenceFD, Timeout); + + /* Get shortcut. */ + timeline = (struct sync_timeline *) Timeline; + + /* Get sync fence. */ + fence = sync_fence_fdget(FenceFD); + + if (!fence) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if (sync_fence_wait(fence, 0) == 0) + { + /* Already signaled. */ + sync_fence_put(fence); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + } + + wait = gcvFALSE; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) + { + int i; + + for (i = 0; i < fence->num_fences; i++) + { + struct fence *f = fence->cbs[i].sync_pt; + struct sync_pt *pt = container_of(f, struct sync_pt, base); + + /* Do not need to wait on same timeline. */ + if ((sync_pt_parent(pt) != timeline) && !fence_is_signaled(f)) + { + wait = gcvTRUE; + break; + } + } + } +#else + { + struct list_head *pos; + list_for_each(pos, &fence->pt_list_head) + { + struct sync_pt * pt = + container_of(pos, struct sync_pt, pt_list); + + /* Do not need to wait on same timeline. */ + if (pt->parent != timeline) + { + wait = gcvTRUE; + break; + } + } + } +#endif + + if (wait) + { + int err; + long timeout = (Timeout == gcvINFINITE) ? - 1 : (long) Timeout; + err = sync_fence_wait(fence, timeout); + + /* Put the fence. */ + sync_fence_put(fence); + + switch (err) + { + case 0: + break; + case -ETIME: + status = gcvSTATUS_TIMEOUT; + break; + default: + gcmkONERROR(gcvSTATUS_GENERIC_IO); + break; + } + } + else + { + int err; + struct sync_fence_waiter *waiter; + waiter = (struct sync_fence_waiter *)kmalloc( + sizeof (struct sync_fence_waiter), gcdNOWARN | GFP_KERNEL); + + /* + * schedule a callback to put the sync_fence. Otherwise after this function + * is returned, the caller may free it since it's signaled. Then there's + * be a real signal on a free'ed sync fence. + */ + if (!waiter) + { + sync_fence_put(fence); + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Schedule a waiter callback. */ + sync_fence_waiter_init(waiter, _NativeFenceSignaled); + err = sync_fence_wait_async(fence, waiter); + + switch (err) + { + case 0: + /* Put fence in callback function. */ + break; + case 1: + /* already signaled. */ + sync_fence_put(fence); + break; + default: + sync_fence_put(fence); + gcmkONERROR(gcvSTATUS_GENERIC_IO); + break; + } + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +# else /* !CONFIG_SYNC_FILE */ + +gceSTATUS +gckOS_CreateSyncTimeline( + IN gckOS Os, + IN gceCORE Core, + OUT gctHANDLE * Timeline + ) +{ + struct viv_sync_timeline *timeline; + + char name[32]; + + snprintf(name, 32, "gccore-%u", (unsigned int) Core); + timeline = viv_sync_timeline_create(name, Os); + + if (timeline == gcvNULL) + { + /* Out of memory. */ + return gcvSTATUS_OUT_OF_MEMORY; + } + + *Timeline = (gctHANDLE) timeline; + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_DestroySyncTimeline( + IN gckOS Os, + IN gctHANDLE Timeline + ) +{ + struct viv_sync_timeline * timeline; + + /* Destroy timeline. */ + timeline = (struct viv_sync_timeline *) Timeline; + viv_sync_timeline_destroy(timeline); + + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_CreateNativeFence( + IN gckOS Os, + IN gctHANDLE Timeline, + IN gctSIGNAL Signal, + OUT gctINT * FenceFD + ) +{ + struct dma_fence *fence = NULL; + struct sync_file *sync = NULL; + int fd = 0; + struct viv_sync_timeline *timeline; + gcsSIGNAL_PTR signal = gcvNULL; + gceSTATUS status = gcvSTATUS_OK; + + /* Create fence. */ + timeline = (struct viv_sync_timeline *) Timeline; + + gcmkONERROR( + _QueryIntegerId(&Os->signalDB, + (gctUINT32)(gctUINTPTR_T)Signal, + (gctPOINTER)&signal)); + + fence = viv_fence_create(timeline, signal); + + if (!fence) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Create sync_file. */ + sync = sync_file_create(fence); + + if (!sync) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + /* Get a unused fd. */ + fd = get_unused_fd_flags(O_CLOEXEC); + + if (fd < 0) + { + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + fd_install(fd, sync->file); + + *FenceFD = fd; + return gcvSTATUS_OK; + +OnError: + if (sync) + { + fput(sync->file); + } + + if (fence) + { + dma_fence_put(fence); + } + + if (fd > 0) + { + put_unused_fd(fd); + } + + *FenceFD = -1; + return status; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) +/** + * sync_file_fdget() - get a sync_file from an fd + * @fd: fd referencing a fence + * + * Ensures @fd references a valid sync_file, increments the refcount of the + * backing file. Returns the sync_file or NULL in case of error. + */ +static struct sync_file *sync_file_fdget(int fd) +{ + struct file *file = fget(fd); + + if (!file) + return NULL; + + return file->private_data; +} + +gceSTATUS +gckOS_WaitNativeFence( + IN gckOS Os, + IN gctHANDLE Timeline, + IN gctINT FenceFD, + IN gctUINT32 Timeout + ) +{ + struct viv_sync_timeline *timeline; + gceSTATUS status = gcvSTATUS_OK; + unsigned int i; + unsigned long timeout; + unsigned int numFences; + struct sync_file *sync_file; + + timeline = (struct viv_sync_timeline *) Timeline; + + sync_file = sync_file_fdget(FenceFD); + + if (!sync_file) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + numFences = sync_file->num_fences; + + timeout = msecs_to_jiffies(Timeout); + + for (i = 0; i < numFences; i++) + { + struct fence *f = sync_file->cbs[i].fence; + fence_get(f); + + if (f->context != timeline->context && + !fence_is_signaled(f)) + { + signed long ret; + ret = fence_wait_timeout(f, 1, timeout); + + if (ret == -ERESTARTSYS) + { + status = gcvSTATUS_INTERRUPTED; + fence_put(f); + break; + } + else if (ret <= 0) + { + status = gcvSTATUS_TIMEOUT; + fence_put(f); + break; + } + else + { + /* wait success. */ + timeout -= ret; + } + } + + fence_put(f); + } + + return gcvSTATUS_OK; + +OnError: + return status; +} + +# else + +gceSTATUS +gckOS_WaitNativeFence( + IN gckOS Os, + IN gctHANDLE Timeline, + IN gctINT FenceFD, + IN gctUINT32 Timeout + ) +{ + struct viv_sync_timeline *timeline; + gceSTATUS status = gcvSTATUS_OK; + unsigned int i; + unsigned long timeout; + unsigned int numFences; + struct dma_fence *fence; + struct dma_fence **fences; + + timeline = (struct viv_sync_timeline *) Timeline; + + fence = sync_file_get_fence(FenceFD); + + if (!fence) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + if (dma_fence_is_array(fence)) + { + struct dma_fence_array *array = to_dma_fence_array(fence); + fences = array->fences; + numFences = array->num_fences; + } + else + { + fences = &fence; + numFences = 1; + } + + timeout = msecs_to_jiffies(Timeout); + + for (i = 0; i < numFences; i++) + { + struct dma_fence *f = fences[i]; + + if (f->context != timeline->context && + !dma_fence_is_signaled(f)) + { + signed long ret; + ret = dma_fence_wait_timeout(f, 1, timeout); + + if (ret == -ERESTARTSYS) + { + status = gcvSTATUS_INTERRUPTED; + break; + } + else if (ret <= 0) + { + status = gcvSTATUS_TIMEOUT; + break; + } + else + { + /* wait success. */ + timeout -= ret; + } + } + } + + dma_fence_put(fence); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +# endif +# endif +#endif + +#if gcdSECURITY +gceSTATUS +gckOS_AllocatePageArray( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T PageCount, + OUT gctPOINTER * PageArrayLogical, + OUT gctPHYS_ADDR * PageArrayPhysical + ) +{ + gceSTATUS status = gcvSTATUS_OK; + PLINUX_MDL mdl; + gctUINT32* table; + gctUINT32 offset; + gctSIZE_T bytes; + gckALLOCATOR allocator; + + gcmkHEADER_ARG("Os=0x%X Physical=0x%X PageCount=%u", + Os, Physical, PageCount); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + gcmkVERIFY_ARGUMENT(PageCount > 0); + + bytes = PageCount * gcmSIZEOF(gctUINT32); + gcmkONERROR(gckOS_AllocateNonPagedMemory( + Os, + gcvFALSE, + gcvALLOC_FLAG_CONTIGUOUS, + &bytes, + PageArrayPhysical, + PageArrayLogical + )); + + table = *PageArrayLogical; + + /* Convert pointer to MDL. */ + mdl = (PLINUX_MDL)Physical; + + allocator = mdl->allocator; + + /* Get all the physical addresses and store them in the page table. */ + + offset = 0; + PageCount = PageCount / (PAGE_SIZE / 4096); + + /* Try to get the user pages so DMA can happen. */ + while (PageCount-- > 0) + { + unsigned long phys = ~0; + + gctPHYS_ADDR_T phys_addr; + + allocator->ops->Physical(allocator, mdl, offset * PAGE_SIZE, &phys_addr); + + phys = (unsigned long)phys_addr; + + table[offset] = phys & PAGE_MASK; + + offset += 1; + } + +OnError: + + /* Return the status. */ + gcmkFOOTER(); + return status; +} +#endif + +gceSTATUS +gckOS_CPUPhysicalToGPUPhysical( + IN gckOS Os, + IN gctPHYS_ADDR_T CPUPhysical, + IN gctPHYS_ADDR_T * GPUPhysical + ) +{ + gcsPLATFORM * platform; + gcmkHEADER_ARG("CPUPhysical=%p", CPUPhysical); + + platform = Os->device->platform; + + if (platform && platform->ops->getGPUPhysical) + { + gcmkVERIFY_OK( + platform->ops->getGPUPhysical(platform, CPUPhysical, GPUPhysical)); + } + else + { + *GPUPhysical = CPUPhysical; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_GPUPhysicalToCPUPhysical( + IN gckOS Os, + IN gctUINT32 GPUPhysical, + IN gctPHYS_ADDR_T * CPUPhysical + ) +{ + gcsPLATFORM * platform; + gcmkHEADER_ARG("GPUPhysical=0x%X", GPUPhysical); + + platform = Os->device->platform; + + if (platform && platform->ops->getCPUPhysical) + { + gcmkVERIFY_OK( + platform->ops->getCPUPhysical(platform, GPUPhysical, CPUPhysical)); + } + else + { + *CPUPhysical = GPUPhysical; + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_PhysicalToPhysicalAddress( + IN gckOS Os, + IN gctPOINTER Physical, + IN gctUINT32 Offset, + OUT gctPHYS_ADDR_T * PhysicalAddress + ) +{ + PLINUX_MDL mdl = (PLINUX_MDL)Physical; + gckALLOCATOR allocator = mdl->allocator; + + if (allocator) + { + return allocator->ops->Physical(allocator, mdl, Offset, PhysicalAddress); + } + + return gcvSTATUS_NOT_SUPPORTED; +} + +static int fd_release(struct inode *inode, struct file *file) +{ + gcsFDPRIVATE_PTR private = (gcsFDPRIVATE_PTR)file->private_data; + + if (private && private->release) + { + return private->release(private); + } + + return 0; +} + +static const struct file_operations fd_fops = +{ + .release = fd_release, +}; + +gceSTATUS +gckOS_GetFd( + IN gctSTRING Name, + IN gcsFDPRIVATE_PTR Private, + OUT gctINT * Fd + ) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) + *Fd = anon_inode_getfd(Name, &fd_fops, Private, O_RDWR); + + if (*Fd < 0) + { + return gcvSTATUS_OUT_OF_RESOURCES; + } + + return gcvSTATUS_OK; +#else + return gcvSTATUS_NOT_SUPPORTED; +#endif +} + +gceSTATUS +gckOS_QueryOption( + IN gckOS Os, + IN gctCONST_STRING Option, + OUT gctUINT32 * Value + ) +{ + gckGALDEVICE device = Os->device; + + if (!strcmp(Option, "physBase")) + { + *Value = device->physBase; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "physSize")) + { + *Value = device->physSize; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "mmu")) + { +#if gcdSECURITY + *Value = 0; +#else + *Value = device->args.mmu; +#endif + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "contiguousSize")) + { + *Value = device->contiguousSize; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "contiguousBase")) + { + *Value = (gctUINT32)device->contiguousBase; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "externalSize")) + { + *Value = device->externalSize; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "externalBase")) + { + *Value = device->externalBase; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "recovery")) + { + *Value = device->args.recovery; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "stuckDump")) + { + *Value = device->args.stuckDump; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "powerManagement")) + { + *Value = device->args.powerManagement; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "TA")) + { + *Value = 0; + return gcvSTATUS_OK; + } + else if (!strcmp(Option, "gpuProfiler")) + { + *Value = device->args.gpuProfiler; + return gcvSTATUS_OK; + } + + return gcvSTATUS_NOT_SUPPORTED; +} + +gceSTATUS +gckOS_MemoryGetSGT( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T Offset, + IN gctSIZE_T Bytes, + OUT gctPOINTER *SGT + ) +{ + PLINUX_MDL mdl; + gckALLOCATOR allocator; + gceSTATUS status = gcvSTATUS_OK; + + if (!Physical) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + mdl = (PLINUX_MDL)Physical; + allocator = mdl->allocator; + + if (!allocator->ops->GetSGT) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + if (Bytes > 0) + { + gcmkONERROR(allocator->ops->GetSGT(allocator, mdl, Offset, Bytes, SGT)); + } + +OnError: + return status; +} + +gceSTATUS +gckOS_MemoryMmap( + IN gckOS Os, + IN gctPHYS_ADDR Physical, + IN gctSIZE_T skipPages, + IN gctSIZE_T numPages, + INOUT gctPOINTER Vma + ) +{ + PLINUX_MDL mdl; + PLINUX_MDL_MAP mdlMap; + gckALLOCATOR allocator; + gceSTATUS status = gcvSTATUS_OK; + gctBOOL cacheable = gcvFALSE; + + if (!Physical) + { + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + mdl = (PLINUX_MDL)Physical; + allocator = mdl->allocator; + + if (!allocator->ops->Mmap) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + mutex_lock(&mdl->mapsMutex); + + mdlMap = FindMdlMap(mdl, _GetProcessID()); + if (mdlMap) + { + cacheable = mdlMap->cacheable; + } + + mutex_unlock(&mdl->mapsMutex); + + gcmkONERROR(allocator->ops->Mmap(allocator, mdl, cacheable, skipPages, numPages, Vma)); + +OnError: + return status; +} + +/******************************************************************************* +** +** gckOS_WrapMemory +** +** Import a number of pages allocated by other allocator. +** +** INPUT: +** +** gckOS Os +** Pointer to an gckOS object. +** +** gctUINT32 Flag +** Memory type. +** +** OUTPUT: +** +** gctSIZE_T * Bytes +** Pointer to a variable that hold the number of bytes allocated. +** +** gctPHYS_ADDR * Physical +** Pointer to a variable that will hold the physical address of the +** allocation. +*/ +gceSTATUS +gckOS_WrapMemory( + IN gckOS Os, + IN gcsUSER_MEMORY_DESC_PTR Desc, + OUT gctSIZE_T *Bytes, + OUT gctPHYS_ADDR * Physical, + OUT gctBOOL *Contiguous + ) +{ + PLINUX_MDL mdl = gcvNULL; + gceSTATUS status = gcvSTATUS_OUT_OF_MEMORY; + gckALLOCATOR allocator; + gcsATTACH_DESC desc; + gctSIZE_T bytes = 0; + + gcmkHEADER_ARG("Os=0x%X ", Os); + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); + gcmkVERIFY_ARGUMENT(Physical != gcvNULL); + + mdl = _CreateMdl(Os); + if (mdl == gcvNULL) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + if (Desc->flag & gcvALLOC_FLAG_DMABUF) + { + desc.dmaBuf.dmabuf = gcmUINT64_TO_PTR(Desc->dmabuf); + +#if defined(CONFIG_DMA_SHARED_BUFFER) + { + struct dma_buf *dmabuf = (struct dma_buf*)desc.dmaBuf.dmabuf; + bytes = dmabuf->size; + } +#endif + } + else if (Desc->flag & gcvALLOC_FLAG_USERMEMORY) + { + desc.userMem.memory = gcmUINT64_TO_PTR(Desc->logical); + desc.userMem.physical = Desc->physical; + desc.userMem.size = Desc->size; + bytes = Desc->size; + } + else if (Desc->flag & gcvALLOC_FLAG_EXTERNAL_MEMORY) + { + desc.externalMem.info = Desc->externalMemoryInfo; + } + else + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + /* Walk all allocators. */ + list_for_each_entry(allocator, &Os->allocatorList, link) + { + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, + "%s(%d) Flag = %x allocator->capability = %x", + __FUNCTION__, __LINE__, Desc->flag, allocator->capability); + + if ((Desc->flag & allocator->capability) != Desc->flag) + { + status = gcvSTATUS_NOT_SUPPORTED; + continue; + } + + if (Desc->flag == gcvALLOC_FLAG_EXTERNAL_MEMORY) + { + /* Use name to match suitable allocator for external memory. */ + if (!strncmp(Desc->externalMemoryInfo.allocatorName, + allocator->name, gcdEXTERNAL_MEMORY_NAME_MAX)) + { + status = gcvSTATUS_NOT_SUPPORTED; + continue; + } + } + + status = allocator->ops->Attach(allocator, &desc, mdl); + + if (gcmIS_SUCCESS(status)) + { + mdl->allocator = allocator; + break; + } + } + + /* Check status. */ + gcmkONERROR(status); + + mdl->dmaHandle = 0; + mdl->addr = 0; + + mdl->bytes = bytes ? bytes : mdl->numPages * PAGE_SIZE; + *Bytes = mdl->bytes; + + /* Return physical address. */ + *Physical = (gctPHYS_ADDR) mdl; + + *Contiguous = mdl->contiguous; + + /* + * Add this to a global list. + * Will be used by get physical address + * and mapuser pointer functions. + */ + mutex_lock(&Os->mdlMutex); + list_add_tail(&mdl->link, &Os->mdlHead); + mutex_unlock(&Os->mdlMutex); + + /* Success. */ + gcmkFOOTER_ARG("*Physical=0x%X", *Physical); + return gcvSTATUS_OK; + +OnError: + if (mdl != gcvNULL) + { + /* Free the memory. */ + _DestroyMdl(mdl); + } + + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckOS_GetPolicyID( + IN gckOS Os, + IN gceSURF_TYPE Type, + OUT gctUINT32_PTR PolicyID, + OUT gctUINT32_PTR AXIConfig + ) +{ + gcsPLATFORM * platform = Os->device->platform; + + if (platform && platform->ops->getPolicyID) + { + return platform->ops->getPolicyID(platform, Type, PolicyID, AXIConfig); + } + + return gcvSTATUS_NOT_SUPPORTED; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h new file mode 100644 index 000000000000..6f63109903e1 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h @@ -0,0 +1,123 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_os_h_ +#define __gc_hal_kernel_os_h_ + +typedef struct _LINUX_MDL LINUX_MDL, *PLINUX_MDL; +typedef struct _LINUX_MDL_MAP LINUX_MDL_MAP, *PLINUX_MDL_MAP; + +struct _LINUX_MDL_MAP +{ + gctINT pid; + + /* map references. */ + gctUINT32 count; + + struct vm_area_struct * vma; + gctPOINTER vmaAddr; + gctBOOL cacheable; + + struct list_head link; +}; + +struct _LINUX_MDL +{ + gckOS os; + + atomic_t refs; + + /* Kernel address. */ + char * addr; + + /* Size and covered page count. */ + size_t bytes; + gctINT numPages; + + gctBOOL contiguous; + dma_addr_t dmaHandle; + + gctBOOL cacheable; + + struct mutex mapsMutex; + struct list_head mapsHead; + + /* Pointer to allocator which allocates memory for this mdl. */ + void * allocator; + + /* Private data used by allocator. */ + void * priv; + + uint gid; + + struct list_head link; +}; + +extern PLINUX_MDL_MAP +FindMdlMap( + IN PLINUX_MDL Mdl, + IN gctINT PID + ); + +typedef struct _DRIVER_ARGS +{ + gctUINT64 InputBuffer; + gctUINT64 InputBufferSize; + gctUINT64 OutputBuffer; + gctUINT64 OutputBufferSize; +} +DRIVER_ARGS; + +#endif /* __gc_hal_kernel_os_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h new file mode 100644 index 000000000000..f83354d47735 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h @@ -0,0 +1,313 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef _gc_hal_kernel_platform_h_ +#define _gc_hal_kernel_platform_h_ +#include +#include +#if USE_LINUX_PCIE +#include +#endif + +typedef struct _gcsMODULE_PARAMETERS +{ + gctINT irqLine; + gctUINT registerMemBase; + gctUINT registerMemSize; + gctINT irqLine2D; + gctUINT registerMemBase2D; + gctUINT registerMemSize2D; + gctINT irqLineVG; + gctUINT registerMemBaseVG; + gctUINT registerMemSizeVG; + gctUINT contiguousSize; + gctUINT contiguousBase; + gctUINT contiguousRequested; + gctUINT externalSize; + gctUINT externalBase; + gctUINT bankSize; + gctINT fastClear; + gceCOMPRESSION_OPTION compression; + gctINT powerManagement; + gctINT gpuProfiler; + gctINT signal; + gctUINT baseAddress; + gctUINT physSize; + gctUINT logFileSize; + gctUINT recovery; + gctUINT stuckDump; + gctUINT showArgs; + gctUINT gpu3DMinClock; + gctBOOL registerMemMapped; + gctPOINTER registerMemAddress; + gctINT irqs[gcvCORE_COUNT]; + gctUINT registerBases[gcvCORE_COUNT]; + gctUINT registerSizes[gcvCORE_COUNT]; + gctUINT chipIDs[gcvCORE_COUNT]; +} +gcsMODULE_PARAMETERS; + +typedef struct soc_platform gcsPLATFORM; + +typedef struct soc_platform_ops +{ + + /******************************************************************************* + ** + ** adjustParam + ** + ** Override content of arguments, if a argument is not changed here, it will + ** keep as default value or value set by insmod command line. + */ + gceSTATUS + (*adjustParam)( + IN gcsPLATFORM * Platform, + OUT gcsMODULE_PARAMETERS *Args + ); + + /******************************************************************************* + ** + ** getPower + ** + ** Prepare power and clock operation. + */ + gceSTATUS + (*getPower)( + IN gcsPLATFORM * Platform + ); + + /******************************************************************************* + ** + ** putPower + ** + ** Finish power and clock operation. + */ + gceSTATUS + (*putPower)( + IN gcsPLATFORM * Platform + ); + + /******************************************************************************* + ** + ** setPower + ** + ** Set power state of specified GPU. + ** + ** INPUT: + ** + ** gceCORE GPU + ** GPU neeed to config. + ** + ** gceBOOL Enable + ** Enable or disable power. + */ + gceSTATUS + (*setPower)( + IN gcsPLATFORM * Platform, + IN gceCORE GPU, + IN gctBOOL Enable + ); + + /******************************************************************************* + ** + ** setClock + ** + ** Set clock state of specified GPU. + ** + ** INPUT: + ** + ** gceCORE GPU + ** GPU neeed to config. + ** + ** gceBOOL Enable + ** Enable or disable clock. + */ + gceSTATUS + (*setClock)( + IN gcsPLATFORM * Platform, + IN gceCORE GPU, + IN gctBOOL Enable + ); + + /******************************************************************************* + ** + ** reset + ** + ** Reset GPU outside. + ** + ** INPUT: + ** + ** gceCORE GPU + ** GPU neeed to reset. + */ + gceSTATUS + (*reset)( + IN gcsPLATFORM * Platform, + IN gceCORE GPU + ); + + /******************************************************************************* + ** + ** getGPUPhysical + ** + ** Convert CPU physical address to GPU physical address if they are + ** different. + */ + gceSTATUS + (*getGPUPhysical)( + IN gcsPLATFORM * Platform, + IN gctPHYS_ADDR_T CPUPhysical, + OUT gctPHYS_ADDR_T * GPUPhysical + ); + + /******************************************************************************* + ** + ** getCPUPhysical + ** + ** Convert GPU physical address to CPU physical address if they are + ** different. + */ + gceSTATUS + (*getCPUPhysical)( + IN gcsPLATFORM * Platform, + IN gctUINT32 GPUPhysical, + OUT gctPHYS_ADDR_T * CPUPhysical + ); + + /******************************************************************************* + ** + ** adjustProt + ** + ** Override Prot flag when mapping paged memory to userspace. + */ + gceSTATUS + (*adjustProt)( + IN struct vm_area_struct * vma + ); + + /******************************************************************************* + ** + ** shrinkMemory + ** + ** Do something to collect memory, eg, act as oom killer. + */ + gceSTATUS + (*shrinkMemory)( + IN gcsPLATFORM * Platform + ); + + /******************************************************************************* + ** + ** cache + ** + ** Cache operation. + */ + gceSTATUS + (*cache)( + IN gcsPLATFORM * Platform, + IN gctUINT32 ProcessID, + IN gctPHYS_ADDR Handle, + IN gctUINT32 Physical, + IN gctPOINTER Logical, + IN gctSIZE_T Bytes, + IN gceCACHEOPERATION Operation + ); + + /******************************************************************************* + ** + ** getPolicyID + ** + ** Get policyID for a specified surface type. + */ + gceSTATUS + (*getPolicyID)( + IN gcsPLATFORM * Platform, + IN gceSURF_TYPE Type, + OUT gctUINT32_PTR PolicyID, + OUT gctUINT32_PTR AXIConfig + ); +} +gcsPLATFORM_OPERATIONS; + +enum +{ + /* GPU can't issue more that 32bit physical address */ + gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS = 1 << 0, +}; + +struct soc_platform +{ +#if USE_LINUX_PCIE + struct pci_dev* device; + struct pci_driver* driver; +#else + struct platform_device* device; + struct platform_driver* driver; +#endif + + const char *name; + gcsPLATFORM_OPERATIONS* ops; + /* PLATFORM specific flags */ + gctUINT32 flagBits; +}; + +#if USE_LINUX_PCIE +int soc_platform_init(struct pci_driver *pdrv, gcsPLATFORM **platform); +#else +int soc_platform_init(struct platform_driver *pdrv, gcsPLATFORM **platform); +#endif +int soc_platform_terminate(gcsPLATFORM *platform); + +#endif diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c new file mode 100644 index 000000000000..d0969ff24661 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c @@ -0,0 +1,426 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include + +#include "tee_client_api.h" + +#define _GC_OBJ_ZONE gcvZONE_OS + +#define GPU3D_UUID { 0xcc9f80ea, 0xa836, 0x11e3, { 0x9b, 0x07, 0x78, 0x2b, 0xcb, 0x5c, 0xf3, 0xe3 } } + +static const TEEC_UUID gpu3d_uuid = GPU3D_UUID; +TEEC_Context teecContext; + +typedef struct _gcsSecurityChannel +{ + gckOS os; + TEEC_Session session; + int * virtual; + TEEC_SharedMemory inputBuffer; + gctUINT32 bytes; + gctPOINTER mutex; +} +gcsSecurityChannel; + +TEEC_SharedMemory * +gpu3d_allocate_secure_mem( + gckOS Os, + unsigned int size + ) +{ + TEEC_Result result; + TEEC_Context *context = &teecContext; + TEEC_SharedMemory *shm = NULL; + void *handle = NULL; + gctPHYS_ADDR_T phyAddr; + gceSTATUS status; + gctSIZE_T bytes = size; + + shm = kmalloc(sizeof(TEEC_SharedMemory), GFP_KERNEL); + + if (NULL == shm) + { + return NULL; + } + + memset(shm, 0, sizeof(TEEC_SharedMemory)); + + status = gckOS_AllocatePagedMemoryEx( + Os, + gcvALLOC_FLAG_SECURITY, + bytes, + gcvNULL, + (gctPHYS_ADDR *)&handle); + + if (gcmIS_ERROR(status)) + { + kfree(shm); + return NULL; + } + + status = gckOS_PhysicalToPhysicalAddress( + Os, + handle, + 0, + &phyAddr); + + if (gcmIS_ERROR(status)) + { + kfree(shm); + return NULL; + } + + /* record the handle into shm->user_data */ + shm->userdata = handle; + + /* [b] Bulk input buffer. */ + shm->size = size; + shm->flags = TEEC_MEM_INPUT; + + /* Use TEE Client API to register the underlying memory buffer. */ + shm->phyAddr = (void *)(gctUINT32)phyAddr; + + result = TEEC_RegisterSharedMemory( + context, + shm); + + if (result != TEEC_SUCCESS) + { + gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size); + kfree(shm); + return NULL; + } + + return shm; +} + +void gpu3d_release_secure_mem( + gckOS Os, + void *shm_handle + ) +{ + TEEC_SharedMemory *shm = shm_handle; + void * handle; + + if (!shm) + { + return; + } + + handle = shm->userdata; + + TEEC_ReleaseSharedMemory(shm); + gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size); + + kfree(shm); + + return; +} + +static TEEC_Result gpu3d_session_callback( + TEEC_Session* session, + uint32_t commandID, + TEEC_Operation* operation, + void* userdata + ) +{ + gcsSecurityChannel *channel = userdata; + + if (channel == gcvNULL) + { + return TEEC_ERROR_BAD_PARAMETERS; + } + + switch (commandID) + { + case gcvTA_CALLBACK_ALLOC_SECURE_MEM: + { + uint32_t size = operation->params[0].value.a; + TEEC_SharedMemory *shm = NULL; + + shm = gpu3d_allocate_secure_mem(channel->os, size); + if (shm == NULL) + { + return TEEC_ERROR_OUT_OF_MEMORY; + } + + /* use the value to save the pointer in client side */ + operation->params[0].value.a = (uint32_t)shm; + operation->params[0].value.b = (uint32_t)shm->phyAddr; + + break; + } + case gcvTA_CALLBACK_FREE_SECURE_MEM: + { + TEEC_SharedMemory *shm = (TEEC_SharedMemory *)operation->params[0].value.a; + + gpu3d_release_secure_mem(channel->os, shm); + break; + } + default: + break; + } + + return TEEC_SUCCESS; +} + +gceSTATUS +gckOS_OpenSecurityChannel( + IN gckOS Os, + IN gceCORE GPU, + OUT gctUINT32 *Channel + ) +{ + gceSTATUS status; + TEEC_Result result; + static bool initialized = gcvFALSE; + gcsSecurityChannel *channel = gcvNULL; + + TEEC_Operation operation = {0}; + + /* Connect to TEE. */ + if (initialized == gcvFALSE) + { + result = TEEC_InitializeContext(NULL, &teecContext); + + if (result != TEEC_SUCCESS) + { + gcmkONERROR(gcvSTATUS_CHIP_NOT_READY); + } + + initialized = gcvTRUE; + } + + /* Construct channel. */ + gcmkONERROR( + gckOS_Allocate(Os, gcmSIZEOF(*channel), (gctPOINTER *)&channel)); + + gckOS_ZeroMemory(channel, gcmSIZEOF(gcsSecurityChannel)); + + channel->os = Os; + + gcmkONERROR(gckOS_CreateMutex(Os, &channel->mutex)); + + /* Allocate shared memory for passing gcTA_INTERFACE. */ + channel->bytes = gcmSIZEOF(gcsTA_INTERFACE); + channel->virtual = kmalloc(channel->bytes, GFP_KERNEL | __GFP_NOWARN); + + if (!channel->virtual) + { + gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); + } + + channel->inputBuffer.size = channel->bytes; + channel->inputBuffer.flags = TEEC_MEM_INPUT | TEEC_MEM_OUTPUT; + channel->inputBuffer.phyAddr = (void *)virt_to_phys(channel->virtual); + + result = TEEC_RegisterSharedMemory(&teecContext, &channel->inputBuffer); + + if (result != TEEC_SUCCESS) + { + gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); + } + + operation.paramTypes = TEEC_PARAM_TYPES( + TEEC_VALUE_INPUT, + TEEC_NONE, + TEEC_NONE, + TEEC_NONE); + + operation.params[0].value.a = GPU; + + /* Open session with TEE application. */ + result = TEEC_OpenSession( + &teecContext, + &channel->session, + &gpu3d_uuid, + TEEC_LOGIN_USER, + NULL, + &operation, + NULL); + + /* Prepare callback. */ + TEEC_RegisterCallback(&channel->session, gpu3d_session_callback, channel); + + *Channel = (gctUINT32)channel; + + return gcvSTATUS_OK; + +OnError: + if (channel) + { + if (channel->virtual) + { + } + + if (channel->mutex) + { + gcmkVERIFY_OK(gckOS_DeleteMutex(Os, channel->mutex)); + } + + gcmkVERIFY_OK(gckOS_Free(Os, channel)); + } + + return status; +} + +gceSTATUS +gckOS_CloseSecurityChannel( + IN gctUINT32 Channel + ) +{ + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_CallSecurityService( + IN gctUINT32 Channel, + IN gcsTA_INTERFACE *Interface + ) +{ + gceSTATUS status; + TEEC_Result result; + gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel; + TEEC_Operation operation = {0}; + + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(Channel != 0); + + gckOS_AcquireMutex(channel->os, channel->mutex, gcvINFINITE); + + gckOS_MemCopy(channel->virtual, Interface, channel->bytes); + + operation.paramTypes = TEEC_PARAM_TYPES( + TEEC_MEMREF_PARTIAL_INPUT, + TEEC_NONE, + TEEC_NONE, + TEEC_NONE); + + /* Note: we use the updated size in the MemRef output by the encryption. */ + operation.params[0].memref.parent = &channel->inputBuffer; + operation.params[0].memref.offset = 0; + operation.params[0].memref.size = sizeof(gcsTA_INTERFACE); + operation.started = true; + + /* Start the commit command within the TEE application. */ + result = TEEC_InvokeCommand( + &channel->session, + gcvTA_COMMAND_DISPATCH, + &operation, + NULL); + + gckOS_MemCopy(Interface, channel->virtual, channel->bytes); + + gckOS_ReleaseMutex(channel->os, channel->mutex); + + if (result != TEEC_SUCCESS) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gckOS_InitSecurityChannel( + IN gctUINT32 Channel + ) +{ + gceSTATUS status; + TEEC_Result result; + gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel; + TEEC_Operation operation = {0}; + + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(Channel != 0); + + operation.paramTypes = TEEC_PARAM_TYPES( + TEEC_MEMREF_PARTIAL_INPUT, + TEEC_NONE, + TEEC_NONE, + TEEC_NONE); + + /* Note: we use the updated size in the MemRef output by the encryption. */ + operation.params[0].memref.parent = &channel->inputBuffer; + operation.params[0].memref.offset = 0; + operation.params[0].memref.size = gcmSIZEOF(gcsTA_INTERFACE); + operation.started = true; + + /* Start the commit command within the TEE application. */ + result = TEEC_InvokeCommand( + &channel->session, + gcvTA_COMMAND_INIT, + &operation, + NULL); + + if (result != TEEC_SUCCESS) + { + gcmkONERROR(gcvSTATUS_GENERIC_IO); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel_emulator.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel_emulator.c new file mode 100644 index 000000000000..c0b17ceead52 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel_emulator.c @@ -0,0 +1,116 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" + +#define _GC_OBJ_ZONE gcvZONE_OS + +#if gcdENABLE_TRUST_APPLICATION + +gceSTATUS +gckOS_OpenSecurityChannel( + IN gckOS Os, + IN gceCORE Core, + OUT gctUINT32 *Channel + ) +{ + *Channel = Core + 1; + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_InitSecurityChannel( + OUT gctUINT32 Channel + ) +{ + return gcvSTATUS_OK; +} + +gceSTATUS +gckOS_CloseSecurityChannel( + IN gctUINT32 Channel + ) +{ + return gcvSTATUS_OK; +} + +extern gceSTATUS +TAEmulator ( + gceCORE, + void * + ); + +gceSTATUS +gckOS_CallSecurityService( + IN gctUINT32 Channel, + IN gcsTA_INTERFACE *Interface + ) +{ + gceCORE core; + gceSTATUS status; + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(Channel != 0); + + core = (gceCORE)(Channel - 1); + + TAEmulator(core, Interface); + + status = Interface->result; + + gcmkFOOTER(); + return status; +} + +#endif diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c new file mode 100644 index 000000000000..2154c785279f --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c @@ -0,0 +1,373 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include +#include + +#if gcdLINUX_SYNC_FILE + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gc_hal_kernel_sync.h" +#include "gc_hal_kernel_linux.h" + +#ifndef CONFIG_SYNC_FILE + +static struct sync_pt * viv_sync_pt_dup(struct sync_pt *sync_pt) +{ + gceSTATUS status; + struct viv_sync_pt *pt; + struct viv_sync_pt *src; + struct viv_sync_timeline *obj; + + src = (struct viv_sync_pt *)sync_pt; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) + obj = (struct viv_sync_timeline *)sync_pt_parent(sync_pt); +#else + obj = (struct viv_sync_timeline *)sync_pt->parent; +#endif + + /* Create the new sync_pt. */ + pt = (struct viv_sync_pt *) + sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt)); + + pt->stamp = src->stamp; + + /* Reference signal. */ + status = gckOS_MapSignal(obj->os, + src->signal, + gcvNULL /* (gctHANDLE) _GetProcessID() */, + &pt->signal); + + if (gcmIS_ERROR(status)) { + sync_pt_free((struct sync_pt *)pt); + return NULL; + } + + return (struct sync_pt *)pt; +} + +static int viv_sync_pt_has_signaled(struct sync_pt *sync_pt) +{ + gceSTATUS status; + struct viv_sync_pt *pt; + struct viv_sync_timeline *obj; + + pt = (struct viv_sync_pt *)sync_pt; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) + obj = (struct viv_sync_timeline *)sync_pt_parent(sync_pt); +#else + obj = (struct viv_sync_timeline *)sync_pt->parent; +#endif + + status = _QuerySignal(obj->os, pt->signal); + + if (gcmIS_ERROR(status)) { + /* Error. */ + return -1; + } + + return (int) status; +} + +static int viv_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ + int ret; + struct viv_sync_pt *pt1 = (struct viv_sync_pt *)a; + struct viv_sync_pt *pt2 = (struct viv_sync_pt *)b; + + ret = (pt1->stamp < pt2->stamp) ? -1 + : (pt1->stamp == pt2->stamp) ? 0 + : 1; + + return ret; +} + +static void viv_sync_pt_free(struct sync_pt *sync_pt) +{ + struct viv_sync_pt *pt; + struct viv_sync_timeline *obj; + + pt = (struct viv_sync_pt *)sync_pt; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) + obj = (struct viv_sync_timeline *)sync_pt_parent(sync_pt); +#else + obj = (struct viv_sync_timeline *)sync_pt->parent; +#endif + + gckOS_DestroySignal(obj->os, pt->signal); +} + +static void viv_timeline_value_str(struct sync_timeline *timeline, + char *str, int size) +{ + struct viv_sync_timeline *obj; + + obj = (struct viv_sync_timeline *)timeline; + snprintf(str, size, "stamp_%llu", obj->stamp); +} + +static void viv_pt_value_str(struct sync_pt *sync_pt, char *str, int size) +{ + struct viv_sync_pt *pt; + + pt = (struct viv_sync_pt *)sync_pt; + snprintf(str, size, "signal_%lu@stamp_%llu", + (unsigned long)pt->signal, pt->stamp); +} + +static struct sync_timeline_ops viv_timeline_ops = +{ + .driver_name = "viv_gpu_sync", + .dup = viv_sync_pt_dup, + .has_signaled = viv_sync_pt_has_signaled, + .compare = viv_sync_pt_compare, + .free_pt = viv_sync_pt_free, + .timeline_value_str = viv_timeline_value_str, + .pt_value_str = viv_pt_value_str, +}; + +struct viv_sync_timeline * viv_sync_timeline_create(const char *name, gckOS os) +{ + struct viv_sync_timeline * obj; + + obj = (struct viv_sync_timeline *) + sync_timeline_create(&viv_timeline_ops, sizeof(struct viv_sync_timeline), name); + + obj->os = os; + obj->stamp = 0; + + return obj; +} + +struct sync_pt * viv_sync_pt_create(struct viv_sync_timeline *obj, + gctSIGNAL Signal) +{ + gceSTATUS status; + struct viv_sync_pt *pt; + + pt = (struct viv_sync_pt *) + sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt)); + + pt->stamp = obj->stamp++; + + /* Dup signal. */ + status = gckOS_MapSignal(obj->os, + Signal, + gcvNULL /* (gctHANDLE) _GetProcessID() */, + &pt->signal); + + if (gcmIS_ERROR(status)) { + sync_pt_free((struct sync_pt *)pt); + return NULL; + } + + return (struct sync_pt *)pt; +} + +#else + +struct viv_sync_timeline * viv_sync_timeline_create(const char *name, gckOS Os) +{ + struct viv_sync_timeline *timeline; + + timeline = kmalloc(sizeof(struct viv_sync_timeline), + gcdNOWARN | GFP_KERNEL); + + if (!timeline) + return NULL; + + strncpy(timeline->name, name, sizeof(timeline->name)); + timeline->context = dma_fence_context_alloc(1); + atomic64_set(&timeline->seqno, 0); + timeline->os = Os; + + return timeline; +} + +void viv_sync_timeline_destroy(struct viv_sync_timeline *timeline) +{ + kfree(timeline); +} + +static const char * viv_fence_get_driver_name(struct dma_fence *fence) +{ + return "viv_gpu_sync"; +} + +static const char * viv_fence_get_timeline_name(struct dma_fence *fence) +{ + struct viv_fence *f = (struct viv_fence *)fence; + return f->parent->name; +} + +/* Same as fence_signaled. */ +static inline bool __viv_fence_signaled(struct dma_fence *fence) +{ + struct viv_fence *f = (struct viv_fence *)fence; + struct viv_sync_timeline *timeline = f->parent; + gceSTATUS status; + + status = _QuerySignal(timeline->os, f->signal); + + return (status == gcvSTATUS_TRUE) ? true : false; +} + +static bool viv_fence_enable_signaling(struct dma_fence *fence) +{ + /* fence is locked already. */ + return !__viv_fence_signaled(fence); +} + +static bool viv_fence_signaled(struct dma_fence *fence) +{ + /* fence could be locked, could be not. */ + return __viv_fence_signaled(fence); +} + +static void viv_fence_release(struct dma_fence *fence) +{ + struct viv_fence *f = (struct viv_fence *)fence; + struct viv_sync_timeline *timeline = f->parent; + + if (f->signal) + gckOS_DestroySignal(timeline->os, f->signal); + + kfree(fence); +} + +static struct dma_fence_ops viv_fence_ops = +{ + .get_driver_name = viv_fence_get_driver_name, + .get_timeline_name = viv_fence_get_timeline_name, + .enable_signaling = viv_fence_enable_signaling, + .signaled = viv_fence_signaled, + .wait = dma_fence_default_wait, + .release = viv_fence_release, +}; + +struct dma_fence * viv_fence_create(struct viv_sync_timeline *timeline, + gcsSIGNAL *signal) +{ + gceSTATUS status; + struct viv_fence *fence; + struct dma_fence *old_fence = NULL; + unsigned seqno; + + fence = kzalloc(sizeof(struct viv_fence), gcdNOWARN | GFP_KERNEL); + + if (!fence) + return NULL; + + /* Reference signal in fence. */ + status = gckOS_MapSignal(timeline->os, (gctSIGNAL)(uintptr_t)signal->id, + NULL, &fence->signal); + + if (gcmIS_ERROR(status)) { + kfree(fence); + return NULL; + } + + spin_lock_init(&fence->lock); + + fence->parent = timeline; + + seqno = (unsigned)atomic64_inc_return(&timeline->seqno); + + dma_fence_init((struct dma_fence *)fence, &viv_fence_ops, + &fence->lock, timeline->context, seqno); + + /* + * Reference fence in signal. + * Be aware of recursive reference!! + */ + spin_lock(&signal->lock); + + if (signal->fence) { + old_fence = signal->fence; + signal->fence = NULL; + } + + if (!signal->done) { + signal->fence = (struct dma_fence*)fence; + dma_fence_get((struct dma_fence*)fence); + } + + spin_unlock(&signal->lock); + + if (old_fence) + dma_fence_put(old_fence); + + if (!signal->fence) { + /* Fence already signaled. */ + gckOS_DestroySignal(timeline->os, fence->signal); + fence->signal = NULL; + + dma_fence_signal_locked((struct dma_fence*)fence); + } + + return (struct dma_fence*)fence; +} + +#endif + +#endif diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h new file mode 100644 index 000000000000..d723e469a470 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h @@ -0,0 +1,151 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef __gc_hal_kernel_sync_h_ +#define __gc_hal_kernel_sync_h_ + +#include + +#ifndef CONFIG_SYNC_FILE + +/* sync.h is in drivers/staging/android/ for now. */ +#include + +#include +#include + +struct viv_sync_timeline +{ + /* Parent object. */ + struct sync_timeline obj; + + /* Timestamp when sync_pt is created. */ + gctUINT64 stamp; + + /* Pointer to os struct. */ + gckOS os; +}; + + +struct viv_sync_pt +{ + /* Parent object. */ + struct sync_pt pt; + + /* Reference signal. */ + gctSIGNAL signal; + + /* Timestamp when sync_pt is created. */ + gctUINT64 stamp; +}; + +/* Create viv_sync_timeline object. */ +struct viv_sync_timeline * viv_sync_timeline_create(const char *name, gckOS Os); + +/* Create viv_sync_pt object. */ +struct sync_pt * viv_sync_pt_create(struct viv_sync_timeline *obj, + gctSIGNAL signal); + +#else + +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) +# include +# include +#else +# include +# include +#endif + +#include +#include +#include "gc_hal_kernel_linux.h" + +struct viv_sync_timeline +{ + char name[64]; + + /* Parent object. */ + u64 context; + + /* Timestamp when sync_pt is created. */ + atomic64_t seqno; + + /* Pointer to os struct. */ + gckOS os; +}; + +struct viv_fence +{ + /* must be the first. */ + struct dma_fence base; + spinlock_t lock; + + struct viv_sync_timeline *parent; + + /* link with signal. */ + gctSIGNAL signal; +}; + +struct viv_sync_timeline * viv_sync_timeline_create(const char *name, gckOS Os); + +void viv_sync_timeline_destroy(struct viv_sync_timeline *timeline); + +struct dma_fence * viv_fence_create(struct viv_sync_timeline *timeline, + gcsSIGNAL *signal); + +#endif + +#endif /* __gc_hal_kernel_sync_h_ */ diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/default/gc_hal_kernel_platform_default.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/default/gc_hal_kernel_platform_default.c new file mode 100644 index 000000000000..1f522b6ced68 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/default/gc_hal_kernel_platform_default.c @@ -0,0 +1,146 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_platform.h" + + +gceSTATUS +_AdjustParam( + IN gcsPLATFORM *Platform, + OUT gcsMODULE_PARAMETERS *Args + ) +{ +#if USE_LINUX_PCIE + struct pci_dev *pdev = Platform->device; + unsigned char irqline = pdev->irq; + + if ((Args->irqLine2D != -1) && (Args->irqLine2D != irqline)) + { + Args->irqLine2D = irqline; + } + if ((Args->irqLine != -1) && (Args->irqLine != irqline)) + { + Args->irqLine = irqline; + } +#endif + return gcvSTATUS_OK; +} + +static struct soc_platform_ops default_ops = +{ + .adjustParam = _AdjustParam, +}; + +static struct soc_platform default_platform = +{ + .name = __FILE__, + .ops = &default_ops, +}; + +#if USE_LINUX_PCIE + +int soc_platform_init(struct pci_driver *pdrv, + struct soc_platform **platform) +{ + *platform = &default_platform; + return 0; +} + +int soc_platform_terminate(struct soc_platform *platform) +{ + return 0; +} + +#else +static struct platform_device *default_dev; + +int soc_platform_init(struct platform_driver *pdrv, + struct soc_platform **platform) +{ + int ret; + default_dev = platform_device_alloc(pdrv->driver.name, -1); + + if (!default_dev) { + printk(KERN_ERR "galcore: platform_device_alloc failed.\n"); + return -ENOMEM; + } + + /* Add device */ + ret = platform_device_add(default_dev); + if (ret) { + printk(KERN_ERR "galcore: platform_device_add failed.\n"); + goto put_dev; + } + + *platform = &default_platform; + return 0; + +put_dev: + platform_device_put(default_dev); + + return ret; +} + +int soc_platform_terminate(struct soc_platform *platform) +{ + if (default_dev) { + platform_device_unregister(default_dev); + default_dev = NULL; + } + + return 0; +} +#endif diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.c b/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.c new file mode 100644 index 000000000000..bb3b9732d08f --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.c @@ -0,0 +1,155 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2016 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2016 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#include + +#include "gc_hal_kernel_linux.h" +#include "gc_hal_kernel_platform.h" + + +#define VRAM_BAR 0 +#define GC2500_BAR 3 + + +static struct platform_device *mcst_dev; + +/******************************************************************************* +** +** adjustParam +** +** Override content of arguments, if a argument is not changed here, it will +** keep as default value or value set by insmod command line. +*/ +static gceSTATUS +_AdjustParam ( + IN gcsPLATFORM * Platform, + OUT gcsMODULE_PARAMETERS *Args + ) +{ + struct pci_dev *pdev; + if (!mcst_dev || !mcst_dev->dev.parent) + return gcvSTATUS_NOT_FOUND; + + pdev = to_pci_dev(mcst_dev->dev.parent); + Args->irqLine = pdev->irq; + + Args->registerMemBase = pci_resource_start(pdev, GC2500_BAR); + Args->registerMemSize = pci_resource_len(pdev, GC2500_BAR); +#if 0 + Args->contiguousBase = pci_resource_start(pdev, VRAM_BAR); + Args->contiguousSize = pci_resource_len(pdev, VRAM_BAR); */ +#else + Args->contiguousSize = (128 << 20); /* Do not forget set CONFIG_FORCE_MAX_ZONEORDER=16 ! */ +#endif + Args->bankSize = 65536; + + return gcvSTATUS_OK; +} + +static struct soc_platform_ops mcst_ops = +{ + .adjustParam = _AdjustParam, +}; + +static struct soc_platform mcst_platform = +{ + .name = __FILE__, + .ops = &mcst_ops, + .flagBits = gcvPLATFORM_FLAG_LIMIT_4G_ADDRESS, +}; + +int soc_platform_init(struct platform_driver *pdrv, + struct soc_platform **platform) +{ + int ret; + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_MGA2, NULL); + if (!pdev) + return -ENODEV; + + mcst_dev = platform_device_alloc(pdrv->driver.name, -1); + + if (!mcst_dev) { + printk(KERN_ERR "galcore: platform_device_alloc failed.\n"); + return -ENOMEM; + } + mcst_dev->dev.parent = &pdev->dev; + /* Add device */ + ret = platform_device_add(mcst_dev); + if (ret) { + printk(KERN_ERR "galcore: platform_device_add failed.\n"); + goto put_dev; + } + + set_dma_ops(&mcst_dev->dev, get_dma_ops(&pdev->dev)); + *platform = &mcst_platform; + return 0; + +put_dev: + platform_device_put(mcst_dev); + + return ret; +} + +int soc_platform_terminate(struct soc_platform *platform) +{ + if (mcst_dev) { + pci_dev_put(to_pci_dev(mcst_dev->dev.parent)); + platform_device_unregister(mcst_dev); + mcst_dev = NULL; + } + + return 0; +} + diff --git a/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.config b/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.config new file mode 100644 index 000000000000..88df6124fdea --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/os/linux/kernel/platform/mcst/gc_hal_kernel_platform_mcst.config @@ -0,0 +1 @@ +EXTRA_CFLAGS += diff --git a/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.c b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.c new file mode 100644 index 000000000000..7b677e43a543 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.c @@ -0,0 +1,348 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_types.h" +#include "gc_hal_base.h" +#include "gc_hal_security_interface.h" +#include "gc_hal_ta.h" +#include "gc_hal.h" + +#define _GC_OBJ_ZONE gcvZONE_KERNEL + +/* +* Responsibility of TA (trust application). +* 1) Start FE. +* When non secure driver asks for start FE. TA enable MMU and start FE. +* TA always execute MMU enable processes because it has no idea whether +* GPU has been power off. +* +* 2) Setup page table +* When non secure driver asks for set up GPU address to physical address +* mapping, TA check the attribute of physical address and attribute of +* GPU address to make sure they are match. Then it change page table. +* +*/ + +gcTA_MMU SharedMmu = gcvNULL; + +/******************************************************************************* +** +** gcTA_Construct +** +** Construct a new gcTA object. +*/ +int +gcTA_Construct( + IN gctaOS Os, + IN gceCORE Core, + OUT gcTA *TA + ) +{ + gceSTATUS status; + gctPOINTER pointer; + gcTA ta = gcvNULL; + + gcmkHEADER(); + gcmkVERIFY_ARGUMENT(TA != gcvNULL); + + /* Construct a gcTA object. */ + gcmkONERROR(gctaOS_Allocate(sizeof(struct _gcTA), &pointer)); + + gctaOS_ZeroMemory(pointer, sizeof(struct _gcTA)); + + ta = (gcTA)pointer; + + ta->os = Os; + ta->core = Core; + + gcmkONERROR(gctaHARDWARE_Construct(ta, &ta->hardware)); + + if (gctaHARDWARE_IsFeatureAvailable(ta->hardware, gcvFEATURE_SECURITY)) + { + if (SharedMmu == gcvNULL) + { + gcmkONERROR(gctaMMU_Construct(ta, &ta->mmu)); + + /* Record shared MMU. */ + SharedMmu = ta->mmu; + ta->destoryMmu = gcvTRUE; + } + else + { + ta->mmu = SharedMmu; + ta->destoryMmu = gcvFALSE; + } + + gcmkONERROR(gctaHARDWARE_PrepareFunctions(ta->hardware)); + } + + *TA = ta; + + gcmkFOOTER_NO(); + return 0; + +OnError: + if (ta) + { + if (ta->mmu && ta->destoryMmu) + { + gcmkVERIFY_OK(gctaMMU_Destory(ta->mmu)); + } + + if (ta->hardware) + { + gcmkVERIFY_OK(gctaHARDWARE_Destroy(ta->hardware)); + } + + gcmkVERIFY_OK(gctaOS_Free(ta)); + } + gcmkFOOTER(); + return status; +} + +/******************************************************************************* +** +** gcTA_Construct +** +** Destroy a gcTA object. +*/ +int +gcTA_Destroy( + IN gcTA TA + ) +{ + if (TA->mmu && TA->destoryMmu) + { + gcmkVERIFY_OK(gctaMMU_Destory(TA->mmu)); + } + + if (TA->hardware) + { + gcmkVERIFY_OK(gctaHARDWARE_Destroy(TA->hardware)); + } + + gcmkVERIFY_OK(gctaOS_Free(TA)); + + /* Destroy. */ + return 0; +} + + +/* +* Map a scatter gather list into gpu address space. +* +*/ +gceSTATUS +gcTA_MapMemory( + IN gcTA TA, + IN gctUINT32 *PhysicalArray, + IN gctPHYS_ADDR_T Physical, + IN gctUINT32 PageCount, + OUT gctUINT32 *GPUAddress + ) +{ + gceSTATUS status; + gcTA_MMU mmu; + gctUINT32 pageCount = PageCount; + gctUINT32 i; + gctUINT32 gpuAddress = *GPUAddress; + gctBOOL mtlbSecure = gcvFALSE; + gctBOOL physicalSecure = gcvFALSE; + + mmu = TA->mmu; + + /* Fill in page table. */ + for (i = 0; i < pageCount; i++) + { + gctUINT32 physical; + gctUINT32_PTR entry; + + if (PhysicalArray) + { + physical = PhysicalArray[i]; + } + else + { + physical = (gctUINT32)Physical + 4096 * i; + } + + gcmkONERROR(gctaMMU_GetPageEntry(mmu, gpuAddress, gcvNULL, &entry, &mtlbSecure)); + + status = gctaOS_IsPhysicalSecure(TA->os, physical, &physicalSecure); + + if (gcmIS_SUCCESS(status) && physicalSecure != mtlbSecure) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + gctaMMU_SetPage(mmu, physical, entry); + + gpuAddress += 4096; + } + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gcTA_UnmapMemory( + IN gcTA TA, + IN gctUINT32 GPUAddress, + IN gctUINT32 PageCount + ) +{ + gceSTATUS status; + + gcmkONERROR(gctaMMU_FreePages(TA->mmu, GPUAddress, PageCount)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gcTA_StartCommand( + IN gcTA TA, + IN gctUINT32 Address, + IN gctUINT32 Bytes + ) +{ + gctaHARDWARE_Execute(TA, Address, Bytes); + return gcvSTATUS_OK; +} + +int +gcTA_Dispatch( + IN gcTA TA, + IN gcsTA_INTERFACE * Interface + ) +{ + int command = Interface->command; + + gceSTATUS status = gcvSTATUS_OK; + + switch (command) + { + case KERNEL_START_COMMAND: + /* Enable MMU every time FE starts. + ** Because if normal world stop GPU and power off GPU, MMU states is reset. + */ + gcmkONERROR(gctaHARDWARE_SetMMU(TA->hardware, TA->mmu->mtlbLogical)); + + gcmkONERROR(gcTA_StartCommand( + TA, + Interface->u.StartCommand.address, + Interface->u.StartCommand.bytes + )); + break; + + case KERNEL_MAP_MEMORY: + gcmkONERROR(gcTA_MapMemory( + TA, + Interface->u.MapMemory.physicals, + Interface->u.MapMemory.physical, + Interface->u.MapMemory.pageCount, + &Interface->u.MapMemory.gpuAddress + )); + + break; + + case KERNEL_UNMAP_MEMORY: + status = gcTA_UnmapMemory( + TA, + Interface->u.UnmapMemory.gpuAddress, + Interface->u.UnmapMemory.pageCount + ); + break; + + case KERNEL_DUMP_MMU_EXCEPTION: + status = gctaHARDWARE_DumpMMUException(TA->hardware); + break; + + case KERNEL_HANDLE_MMU_EXCEPTION: + status = gctaHARDWARE_HandleMMUException( + TA->hardware, + Interface->u.HandleMMUException.mmuStatus, + Interface->u.HandleMMUException.physical, + Interface->u.HandleMMUException.gpuAddress + ); + break; + + case KERNEL_READ_MMU_EXCEPTION: + status = gctaHARDWARE_ReadMMUException( + TA->hardware, + &Interface->u.ReadMMUException.mmuStatus, + &Interface->u.ReadMMUException.mmuException + ); + break; + + default: + gcmkASSERT(0); + + status = gcvSTATUS_INVALID_ARGUMENT; + break; + } + +OnError: + Interface->result = status; + + return 0; +} + + + diff --git a/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.h b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.h new file mode 100644 index 000000000000..a29513bd2877 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta.h @@ -0,0 +1,373 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef _GC_HAL_TA_H_ +#define _GC_HAL_TA_H_ +#include "gc_hal_types.h" +#include "gc_hal_security_interface.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct _gctaOS * gctaOS; +typedef struct _gcTA * gcTA; + +typedef struct _gcTA_HARDWARE * gcTA_HARDWARE; +typedef struct _gcTA_MMU * gcTA_MMU; + +/* + Trust Application is a object needed to be created as a context in trust zone. + One client for a core. +*/ +typedef struct _gcTA { + /* gctaOS object */ + gctaOS os; + + gceCORE core; + + gcTA_MMU mmu; + + gcTA_HARDWARE hardware; + + gctBOOL destoryMmu; +} gcsTA; + +typedef struct _gcTA_MMU +{ + gctaOS os; + + gctSIZE_T mtlbBytes; + gctPOINTER mtlbLogical; + gctPHYS_ADDR mtlbPhysical; + + gctPOINTER stlbs; + + gctPOINTER safePageLogical; + gctPHYS_ADDR safePagePhysical; + + gctPOINTER nonSecureSafePageLogical; + gctPHYS_ADDR nonSecureSafePagePhysical; + + gctPOINTER mutex; +} +gcsTA_MMU; + +gceSTATUS HALDECL +TAEmulator( + gceCORE Core, + void * Interface + ); + +int +gcTA_Construct( + IN gctaOS Os, + IN gceCORE Core, + OUT gcTA *TA +); + +int +gcTA_Destroy( + IN gcTA TA +); + +int +gcTA_Dispatch( + IN gcTA TA, + IN OUT gcsTA_INTERFACE * Interface +); + +/************************************* +* Porting layer +*/ + +gceSTATUS +gctaOS_ConstructOS( + IN gckOS Os, + OUT gctaOS *TAos + ); + +gceSTATUS +gctaOS_DestroyOS( + IN gctaOS Os + ); + +gceSTATUS +gctaOS_Allocate( + IN gctUINT32 Bytes, + OUT gctPOINTER *Pointer + ); + +gceSTATUS +gctaOS_Free( + IN gctPOINTER Pointer + ); + +gceSTATUS +gctaOS_AllocateSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T *Bytes, + OUT gctPOINTER *Logical, + OUT gctPOINTER *Physical + ); + +gceSTATUS +gctaOS_FreeSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + OUT gctPOINTER Physical + ); + +gceSTATUS +gctaOS_AllocateNonSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T *Bytes, + OUT gctPOINTER *Logical, + OUT gctPOINTER *Physical + ); + +gceSTATUS +gctaOS_FreeNonSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + OUT gctPOINTER Physical + ); + + + +gceSTATUS +gctaOS_GetPhysicalAddress( + IN gctaOS Os, + IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T * Physical + ); + +gceSTATUS gctaOS_WriteRegister( + IN gctaOS Os, IN gceCORE Core, + IN gctUINT32 Address, + IN gctUINT32 Data + ); + +gceSTATUS gctaOS_ReadRegister( + IN gctaOS Os, IN gceCORE Core, + IN gctUINT32 Address, + IN gctUINT32 *Data + ); + +gceSTATUS +gctaOS_MemCopy( + IN gctUINT8_PTR Dest, + IN gctUINT8_PTR Src, + IN gctUINT32 Bytes + ); + +gceSTATUS +gctaOS_ZeroMemory( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ); + +void +gctaOS_CacheFlush( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ); + +void +gctaOS_CacheClean( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ); + +void +gctaOS_CacheInvalidate( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ); + +gceSTATUS +gctaOS_IsPhysicalSecure( + IN gctaOS Os, + IN gctUINT32 Physical, + OUT gctBOOL *Secure + ); + +gceSTATUS +gctaOS_Delay( + IN gctaOS Os, + IN gctUINT32 Delay + ); + +gceSTATUS +gctaOS_SetGPUPower( + IN gctaOS Os, + IN gctUINT32 Core, + IN gctBOOL Clock, + IN gctBOOL Power + ); + +/* +** gctaHARDWARE +*/ +gceSTATUS +gctaHARDWARE_Construct( + IN gcTA TA, + OUT gcTA_HARDWARE * Hardware + ); + +gceSTATUS +gctaHARDWARE_Destroy( + IN gcTA_HARDWARE Hardware + ); + +gceSTATUS +gctaHARDWARE_Execute( + IN gcTA TA, + IN gctUINT32 Address, + IN gctUINT32 Bytes + ); + +gceSTATUS +gctaHARDWARE_End( + IN gcTA_HARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ); + +gceSTATUS +gctaHARDWARE_SetMMU( + IN gcTA_HARDWARE Hardware, + IN gctPOINTER Logical + ); + +gceSTATUS +gctaHARDWARE_IsFeatureAvailable( + IN gcTA_HARDWARE Hardware, + IN gceFEATURE Feature + ); + +gceSTATUS +gctaHARDWARE_PrepareFunctions( + IN gcTA_HARDWARE Hardware + ); + +gceSTATUS +gctaHARDWARE_DumpMMUException( + IN gcTA_HARDWARE Hardware + ); + +gceSTATUS +gctaHARDWARE_HandleMMUException( + IN gcTA_HARDWARE Hardware, + IN gctUINT32 MMUStatus, + IN gctPHYS_ADDR_T Physical, + IN gctUINT32 GPUAddress + ); + +gceSTATUS +gctaHARDWARE_ReadMMUException( + IN gcTA_HARDWARE Hardware, + OUT gctUINT32_PTR MMUStatus, + OUT gctUINT32_PTR MMUException + ); + +gceSTATUS +gctaMMU_Construct( + IN gcTA TA, + OUT gcTA_MMU *Mmu + ); + +gceSTATUS +gctaMMU_Destory( + IN gcTA_MMU Mmu + ); + +gceSTATUS +gctaMMU_SetPage( + IN gcTA_MMU Mmu, + IN gctUINT32 PageAddress, + IN gctUINT32 *PageEntry + ); + +gceSTATUS +gctaMMU_GetPageEntry( + IN gcTA_MMU Mmu, + IN gctUINT32 Address, + OUT gctUINT32_PTR MtlbEntry, + OUT gctUINT32_PTR *PageTable, + OUT gctBOOL * Secure + ); + +void +gctaMMU_DumpPagetableEntry( + IN gcTA_MMU Mmu, + IN gctUINT32 Address + ); + +gceSTATUS +gctaMMU_FreePages( + IN gcTA_MMU Mmu, + IN gctUINT32 Address, + IN gctUINT32 PageCount + ); + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.c b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.c new file mode 100644 index 000000000000..1ab52dfcac50 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.c @@ -0,0 +1,1070 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_types.h" +#include "gc_hal_base.h" +#include "gc_hal_security_interface.h" +#include "gc_hal_ta.h" +#include "gc_hal_ta_hardware.h" +#include "gc_hal.h" +#include "gc_feature_database.h" + + +#define _GC_OBJ_ZONE 1 +#define SRC_MAX 8 +#define RECT_ADDR_OFFSET 3 + +#define INVALID_ADDRESS ~0U + +/******************************************************************************\ +********************************* Support Code ********************************* +\******************************************************************************/ +static gceSTATUS +_IdentifyHardwareByDatabase( + IN gcTA_HARDWARE Hardware + ) +{ + gceSTATUS status; + gctUINT32 chipIdentity; + gcsFEATURE_DATABASE *database; + gctaOS os = Hardware->os; + + gcmkHEADER(); + + /*************************************************************************** + ** Get chip ID and revision. + */ + + /* Read chip identity register. */ + gcmkONERROR(gctaOS_ReadRegister(os, Hardware->ta->core, 0x00018, &chipIdentity)); + + /* Special case for older graphic cores. */ + if (((((gctUINT32) (chipIdentity)) >> (0 ? + 31:24) & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:24) - (0 ? + 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? + 31:24) - (0 ? + 31:24) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 31:24) - (0 ? 31:24) + 1)))))))) + { + Hardware->chipModel = gcv500; + Hardware->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) ); + } + + else + { + /* Read chip identity register. */ + gcmkONERROR( + gctaOS_ReadRegister(os, Hardware->ta->core, + 0x00020, + (gctUINT32_PTR) &Hardware->chipModel)); + + if (((Hardware->chipModel & 0xFF00) == 0x0400) + && (Hardware->chipModel != 0x0420) + && (Hardware->chipModel != 0x0428)) + { + Hardware->chipModel = (gceCHIPMODEL) (Hardware->chipModel & 0x0400); + } + + /* Read CHIP_REV register. */ + gcmkONERROR( + gctaOS_ReadRegister(os, Hardware->ta->core, + 0x00024, + &Hardware->chipRevision)); + + if ((Hardware->chipModel == gcv300) + && (Hardware->chipRevision == 0x2201) + ) + { + gctUINT32 chipDate; + gctUINT32 chipTime; + + /* Read date and time registers. */ + gcmkONERROR( + gctaOS_ReadRegister(os, Hardware->ta->core, + 0x00028, + &chipDate)); + + gcmkONERROR( + gctaOS_ReadRegister(os, Hardware->ta->core, + 0x0002C, + &chipTime)); + + if ((chipDate == 0x20080814) && (chipTime == 0x12051100)) + { + /* This IP has an ECO; put the correct revision in it. */ + Hardware->chipRevision = 0x1051; + } + } + + gcmkONERROR( + gctaOS_ReadRegister(os, Hardware->ta->core, + 0x000A8, + &Hardware->productID)); + } + + gcmkVERIFY_OK(gctaOS_ReadRegister( + os, Hardware->ta->core, + 0x000E8 +, + &Hardware->ecoID + )); + + gcmkVERIFY_OK(gctaOS_ReadRegister( + os, Hardware->ta->core, + 0x00030 +, + &Hardware->customerID + )); + + /*************************************************************************** + ** Get chip features. + */ + + database = + Hardware->featureDatabase = + gcQueryFeatureDB( + Hardware->chipModel, + Hardware->chipRevision, + Hardware->productID, + Hardware->ecoID, + Hardware->customerID + ); + + if (database == gcvNULL) + { + gcmkPRINT("[galcore]: Feature database is not found," + "chipModel=0x%0x, chipRevision=0x%x, productID=0x%x, ecoID=0x%x, customerID=0x%x", + Hardware->chipModel, + Hardware->chipRevision, + Hardware->productID, + Hardware->ecoID, + Hardware->customerID); + gcmkONERROR(gcvSTATUS_NOT_FOUND); + } + + /* Success. */ + gcmkFOOTER(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + + +gceSTATUS +gctaHARDWARE_SetMMUStates( + IN gcTA_HARDWARE Hardware, + IN gctPOINTER MtlbAddress, + IN gceMMU_MODE Mode, + IN gctPOINTER SafeAddress, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ) +{ + gceSTATUS status; + gctUINT32 config; + gctUINT32 extMtlb; + gctPHYS_ADDR_T physical; + gctUINT32_PTR buffer; + gctUINT32 reserveBytes = 2 * 4; + gcsMMU_TABLE_ARRAY_ENTRY * entry; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + entry = (gcsMMU_TABLE_ARRAY_ENTRY *) Hardware->pagetableArray.logical; + + /* Convert logical address into physical address. */ + gcmkONERROR( + gctaOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &physical)); + + config = (gctUINT32)(physical & 0xFFFFFFFF); + extMtlb = (gctUINT32)(physical >> 32); + /* more than 40bit physical address */ + if (extMtlb & 0xFFFFFF00) + { + gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); + } + + switch (Mode) + { + case gcvMMU_MODE_1K: + if (config & 0x3FF) + { + gcmkONERROR(gcvSTATUS_NOT_ALIGNED); + } + + config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + break; + + case gcvMMU_MODE_4K: + if (config & 0xFFF) + { + gcmkONERROR(gcvSTATUS_NOT_ALIGNED); + } + + config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))); + + break; + + default: + gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); + } + + if (Logical != gcvNULL) + { + buffer = Logical; + + /* Setup page table array entry. */ + entry->low = config; + entry->high = extMtlb; + + /* Setup command buffer to load index 0 of page table array. */ + *buffer++ + = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0x006B) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 25:16) - (0 ? + 25:16) + 1))))))) << (0 ? + 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 25:16) - (0 ? + 25:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))); + + *buffer++ + = (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) &((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))); + } + + if (Bytes != gcvNULL) + { + *Bytes = reserveBytes; + } + + /* Return the status. */ + gcmkFOOTER_NO(); + return status; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gctaHARDWARE_End( + IN gcTA_HARDWARE Hardware, + IN gctPOINTER Logical, + IN OUT gctUINT32 * Bytes + ) +{ + gctUINT32_PTR logical = (gctUINT32_PTR) Logical; + gceSTATUS status; + + gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu", + Hardware, Logical, gcmOPT_VALUE(Bytes)); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL)); + + if (Logical != gcvNULL) + { + if (*Bytes < 8) + { + /* Command queue too small. */ + gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); + } + + /* Append END. */ + logical[0] = + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:27) - (0 ? + 31:27) + 1))))))) << (0 ? + 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? + 31:27) - (0 ? + 31:27) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); + + /* Record the count of execution which is finised by this END. */ + logical[1] = + 0; + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical); + } + + if (Bytes != gcvNULL) + { + /* Return number of bytes required by the END command. */ + *Bytes = 8; + } + + /* Success. */ + gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + + +gceSTATUS +gctaHARDWARE_Construct( + IN gcTA TA, + OUT gcTA_HARDWARE * Hardware + ) +{ + gceSTATUS status; + gcTA_HARDWARE hardware = gcvNULL; + + gctaOS os = TA->os; + + gcmkONERROR(gctaOS_Allocate( + gcmSIZEOF(gcsTA_HARDWARE), + (gctPOINTER *)&hardware + )); + + gctaOS_ZeroMemory((gctUINT8_PTR)hardware, gcmSIZEOF(gcsTA_HARDWARE)); + + hardware->ta = TA; + hardware->os = os; + + hardware->pagetableArray.size = 4096; + + hardware->functionBytes = 4096; + + /* Power on GPU. */ + gctaOS_SetGPUPower(os, TA->core, gcvTRUE, gcvTRUE); + + /*************************************/ + /******** Get chip information ******/ + /*************************************/ + gctaOS_WriteRegister( + hardware->ta->os, hardware->ta->core, + 0x00000, + 0x00000900 + ); + + gcmkONERROR(_IdentifyHardwareByDatabase(hardware)); + + *Hardware = hardware; + + return gcvSTATUS_OK; + +OnError: + if (hardware) + { + gctaOS_Free(hardware); + } + + return status; +} + +gceSTATUS +gctaHARDWARE_Destroy( + IN gcTA_HARDWARE Hardware + ) +{ + if (Hardware->pagetableArray.logical) + { + gctaOS_FreeSecurityMemory( + Hardware->ta->os, + Hardware->pagetableArray.size, + Hardware->pagetableArray.logical, + (gctUINT32_PTR)Hardware->pagetableArray.physical + ); + } + + if (Hardware->functionLogical) + { + gctaOS_FreeSecurityMemory( + Hardware->ta->os, + Hardware->functionBytes, + Hardware->functionLogical, + (gctUINT32_PTR)Hardware->functionPhysical + ); + } + + gctaOS_Free(Hardware); + + return gcvSTATUS_OK; +} + +gceSTATUS +gctaHARDWARE_Execute( + IN gcTA TA, + IN gctUINT32 Address, + IN gctUINT32 Bytes + ) +{ + gceSTATUS status; + gctUINT32 address = Address, control; + + gcmkHEADER_ARG("Address=0x%x Bytes=%lu", + Address, Bytes); + + /* Enable all events. */ + gcmkONERROR( + gctaOS_WriteRegister(TA->os, TA->core, 0x00014, ~0U)); + + /* Write address register. */ + gcmkONERROR( + gctaOS_WriteRegister(TA->os, TA->core, 0x00654, address)); + + /* Build control register. */ + control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 16:16) - (0 ? + 16:16) + 1))))))) << (0 ? + 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 16:16) - (0 ? + 16:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:0) - (0 ? + 15:0) + 1))))))) << (0 ? + 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? + 15:0) - (0 ? + 15:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); + + /* Write control register. */ + gcmkONERROR( + gctaOS_WriteRegister(TA->os, TA->core, 0x003A4, control)); + + gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, + "Started command buffer @ 0x%08x", + address); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + /* Return the status. */ + gcmkFOOTER(); + return status; +} + +gceSTATUS +gctaHARDWARE_MmuEnable( + IN gcTA_HARDWARE Hardware + ) +{ + gctaOS_WriteRegister( + Hardware->ta->os, Hardware->ta->core, + 0x0018C, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) ((gctUINT32) (1 ) & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))); + + return gcvSTATUS_OK; +} + +/* +* In trust zone, we prepare page table array table and configure base address of +* it to hardware. +*/ +gceSTATUS +gctaHARDWARE_SetMMU( + IN gcTA_HARDWARE Hardware, + IN gctPOINTER Logical + ) +{ + gcsMMU_TABLE_ARRAY_ENTRY *entry; + gcsHARDWARE_FUNCTION *function = &Hardware->functions[0]; + gctUINT32 delay = 1; + gctUINT32 timer = 0; + gctUINT32 idle; + gctPHYS_ADDR_T mtlbPhysical; + gctPHYS_ADDR_T secureSafeAddress; + gctPHYS_ADDR_T nonSecureSafeAddress; + + gctaOS_GetPhysicalAddress(Hardware->ta->os, Logical, &mtlbPhysical); + + gctaOS_GetPhysicalAddress(Hardware->ta->os, Hardware->ta->mmu->safePageLogical, &secureSafeAddress); + + gctaOS_GetPhysicalAddress(Hardware->ta->os, Hardware->ta->mmu->nonSecureSafePageLogical, &nonSecureSafeAddress); + + /* not support more than 40bit physical address */ + if ((secureSafeAddress & 0xFFFFFF0000000000ULL) || + (nonSecureSafeAddress & 0xFFFFFF0000000000ULL)) + { + return (gcvSTATUS_NOT_SUPPORTED); + } + + /* Fill entry 0 of page table array. */ + entry = (gcsMMU_TABLE_ARRAY_ENTRY *)Hardware->pagetableArray.logical; + + entry->low = (gctUINT32)(mtlbPhysical & 0xFFFFFFFF); + + entry->high = (gctUINT32)(mtlbPhysical >> 32) + | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 8:8) - (0 ? + 8:8) + 1))))))) << (0 ? + 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? + 8:8) - (0 ? + 8:8) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) + ; + + /* Set page table base. */ + gctaOS_WriteRegister( + Hardware->ta->os, Hardware->ta->core, + 0x0038C, + (gctUINT32)(Hardware->pagetableArray.address & 0xFFFFFFFF) + ); + + gctaOS_WriteRegister( + Hardware->ta->os, Hardware->ta->core, + 0x00390, + (gctUINT32)((Hardware->pagetableArray.address >> 32) & 0xFFFFFFFF) + ); + + gctaOS_WriteRegister( + Hardware->ta->os, Hardware->ta->core, + 0x00394 +, + 1 + ); + + gctaOS_WriteRegister( + Hardware->ta->os, Hardware->ta->core, + 0x0039C, + (gctUINT32)(secureSafeAddress & 0xFFFFFFFF) + ); + + gctaOS_WriteRegister( + Hardware->ta->os, Hardware->ta->core, + 0x00398, + (gctUINT32)(nonSecureSafeAddress & 0xFFFFFFFF) + ); + + gctaOS_WriteRegister( + Hardware->ta->os, Hardware->ta->core, + 0x003A0, + (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) | (((gctUINT32) ((gctUINT32) ((gctUINT32)((secureSafeAddress >> 32) & 0xFFFFFFFF)) & ((gctUINT32) ((((1 ? + 23:16) - (0 ? + 23:16) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 23:16) - (0 ? + 23:16) + 1))))))) << (0 ? + 23:16))) &((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 31:31) - (0 ? + 31:31) + 1))))))) << (0 ? + 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 31:31) - (0 ? + 31:31) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))) + | (((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) | (((gctUINT32) ((gctUINT32) ((gctUINT32)((nonSecureSafeAddress >> 32) & 0xFFFFFFFF)) & ((gctUINT32) ((((1 ? + 7:0) - (0 ? + 7:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 7:0) - (0 ? + 7:0) + 1))))))) << (0 ? + 7:0))) &((((gctUINT32) (~0U)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 15:15) - (0 ? + 15:15) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 15:15) - (0 ? + 15:15) + 1))))))) << (0 ? + 15:15))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? + 15:15) - (0 ? + 15:15) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)))) + ); + + /* Execute prepared command sequence. */ + gctaHARDWARE_Execute( + Hardware->ta, + function->address, + function->bytes + ); + + /* Wait until MMU configure finishes. */ + do + { + gctaOS_Delay(Hardware->os, delay); + + gctaOS_ReadRegister( + Hardware->ta->os, Hardware->ta->core, + 0x00004, + &idle); + + timer += delay; + delay *= 2; + } + while (!(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) )); + + /* Enable MMU. */ + gctaOS_WriteRegister( + Hardware->os, Hardware->ta->core, + 0x00388, + ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? + 0:0) - (0 ? + 0:0) + 1))))))) << (0 ? + 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? + 0:0) - (0 ? + 0:0) + 1) == 32) ? + ~0U : (~(~0U << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) + ); + + return gcvSTATUS_OK; +} + +gceSTATUS +gctaHARDWARE_PrepareFunctions( + IN gcTA_HARDWARE Hardware + ) +{ + gceSTATUS status; + gcsHARDWARE_FUNCTION * function; + gctUINT32 mmuBytes; + gctUINT32 endBytes = 8; + gctUINT8_PTR logical; + + gcmkHEADER(); + + /* Allocate page table array. */ + gcmkONERROR(gctaOS_AllocateSecurityMemory( + Hardware->ta->os, + &Hardware->pagetableArray.size, + &Hardware->pagetableArray.logical, + &Hardware->pagetableArray.physical + )); + + gcmkONERROR(gctaOS_GetPhysicalAddress( + Hardware->ta->os, + Hardware->pagetableArray.logical, + &Hardware->pagetableArray.address + )); + + /* Allocate GPU functions. */ + gcmkONERROR(gctaOS_AllocateSecurityMemory( + Hardware->ta->os, + &Hardware->functionBytes, + &Hardware->functionLogical, + &Hardware->functionPhysical + )); + + gcmkONERROR(gctaOS_GetPhysicalAddress( + Hardware->ta->os, + Hardware->functionLogical, + (gctPHYS_ADDR_T *)&Hardware->functionAddress + )); + + function = &Hardware->functions[0]; + + function->logical = Hardware->functionLogical; + + function->address = Hardware->functionAddress; + + logical = function->logical; + + gcmkONERROR(gctaHARDWARE_SetMMUStates( + Hardware, + Hardware->ta->mmu->mtlbLogical, + gcvMMU_MODE_4K, + Hardware->ta->mmu->safePageLogical, + logical, + &mmuBytes + )); + + logical += 8; + + gcmkONERROR(gctaHARDWARE_End( + Hardware, + logical, + &endBytes + )); + + function->bytes = mmuBytes + endBytes; + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gctaHARDWARE_IsFeatureAvailable( + IN gcTA_HARDWARE Hardware, + IN gceFEATURE Feature + ) +{ + gctBOOL available; + gcsFEATURE_DATABASE *database = Hardware->featureDatabase; + + switch (Feature) + { + case gcvFEATURE_SECURITY: + available = database->SECURITY; + break; + default: + gcmkFATAL("Invalid feature has been requested."); + available = gcvFALSE; + } + + return available; +} + +gceSTATUS +gctaHARDWARE_DumpMMUException( + IN gcTA_HARDWARE Hardware + ) +{ + gctUINT32 mmu = 0; + gctUINT32 mmuStatus = 0; + gctUINT32 address = 0; + gctUINT32 i = 0; + + gctUINT32 mmuStatusRegAddress; + gctUINT32 mmuExceptionAddress; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + mmuStatusRegAddress = 0x00384; + mmuExceptionAddress = 0x00380; + + /* Verify the arguments. */ + gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); + + gcmkPRINT("ChipModel=0x%x ChipRevision=0x%x:\n", + Hardware->chipModel, + Hardware->chipRevision); + + gcmkPRINT("**************************\n"); + gcmkPRINT("*** MMU ERROR DUMP ***\n"); + gcmkPRINT("**************************\n"); + + gcmkVERIFY_OK(gctaOS_ReadRegister( + Hardware->os, Hardware->ta->core, + mmuStatusRegAddress +, + &mmuStatus + )); + + gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus); + + for (i = 0; i < 4; i += 1) + { + mmu = mmuStatus & 0xF; + mmuStatus >>= 4; + + if (mmu == 0) + { + continue; + } + + switch (mmu) + { + case 1: + gcmkPRINT(" MMU%d: slave not present\n", i); + break; + + case 2: + gcmkPRINT(" MMU%d: page not present\n", i); + break; + + case 3: + gcmkPRINT(" MMU%d: write violation\n", i); + break; + + case 4: + gcmkPRINT(" MMU%d: out of bound", i); + break; + + case 5: + gcmkPRINT(" MMU%d: read security violation", i); + break; + + case 6: + gcmkPRINT(" MMU%d: write security violation", i); + break; + + default: + gcmkPRINT(" MMU%d: unknown state\n", i); + } + + gcmkVERIFY_OK(gctaOS_ReadRegister( + Hardware->os, Hardware->ta->core, + mmuExceptionAddress + i * 4 +, + &address + )); + + gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address); + + gctaMMU_DumpPagetableEntry(Hardware->ta->mmu, address); + } + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gctaHARDWARE_ReadMMUException( + IN gcTA_HARDWARE Hardware, + OUT gctUINT32_PTR MMUStatus, + OUT gctUINT32_PTR MMUException + ) +{ + gctUINT32 mmuStatusRegAddress; + gctUINT32 mmuExceptionAddress; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + mmuStatusRegAddress = 0x00384; + mmuExceptionAddress = 0x00380; + + gcmkVERIFY_OK(gctaOS_ReadRegister( + Hardware->os, Hardware->ta->core, + mmuStatusRegAddress +, + MMUStatus + )); + + gcmkVERIFY_OK(gctaOS_ReadRegister( + Hardware->os, Hardware->ta->core, + mmuExceptionAddress +, + MMUException + )); + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gctaHARDWARE_HandleMMUException( + IN gcTA_HARDWARE Hardware, + IN gctUINT32 MMUStatus, + IN gctPHYS_ADDR_T Physical, + IN gctUINT32 GPUAddress + ) +{ + gctUINT32 mmu = 0; + gctUINT32 mmuStatus = 0; + gctUINT32 mtlbEntry = 0; + gctUINT32_PTR stlbEntry; + gctBOOL secure; + + gctUINT32 mmuStatusRegAddress; + gctUINT32 mmuExceptionAddress; + + gcmkHEADER_ARG("Hardware=0x%x", Hardware); + + mmuStatusRegAddress = 0x00384; + mmuExceptionAddress = 0x00380; + + gcmkVERIFY_OK(gctaOS_ReadRegister( + Hardware->os, Hardware->ta->core, + mmuStatusRegAddress +, + &mmuStatus + )); + + mmu = mmuStatus & 0xF; + + /* Setup page table. */ + + gctaMMU_GetPageEntry( + Hardware->ta->mmu, + GPUAddress, + &mtlbEntry, + &stlbEntry, + &secure + ); + + gctaMMU_SetPage( + Hardware->ta->mmu, + (gctUINT32)Physical, + stlbEntry + ); + + switch (mmu) + { + case 1: + gcmkASSERT(mtlbEntry != 0); + gctaOS_WriteRegister( + Hardware->os, Hardware->ta->core, + mmuExceptionAddress +, + mtlbEntry + ); + + break; + + case 2: + gctaOS_WriteRegister( + Hardware->os, Hardware->ta->core, + mmuExceptionAddress +, + *stlbEntry + ); + break; + + default: + gcmkASSERT(0); + } + + + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + diff --git a/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.h b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.h new file mode 100644 index 000000000000..2ae926aa0f80 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_hardware.h @@ -0,0 +1,139 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#ifndef _GC_HAL_TA_HARDWARE_H_ +#define _GC_HAL_TA_HARDWARE_H_ +#include "gc_hal_types.h" +#include "gc_hal_security_interface.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _gcsMMU_TABLE_ARRAY_ENTRY +{ + gctUINT32 low; + gctUINT32 high; +} +gcsMMU_TABLE_ARRAY_ENTRY; + +typedef struct _gcsHARDWARE_PAGETABLE_ARRAY +{ + /* Number of entries in page table array. */ + gctUINT num; + + /* Size in bytes of array. */ + gctSIZE_T size; + + /* Physical address of array. */ + gctPHYS_ADDR_T address; + + /* Memory descriptor. */ + gctPOINTER physical; + + /* Logical address of array. */ + gctPOINTER logical; +} +gcsHARDWARE_PAGETABLE_ARRAY; + +typedef struct _gcsHARWARE_FUNCTION +{ + /* Entry of the function. */ + gctUINT32 address; + + /* CPU address of the function. */ + gctUINT8_PTR logical; + + /* Bytes of the function. */ + gctUINT32 bytes; + + /* Hardware address of END in this function. */ + gctUINT32 endAddress; + + /* Logical of END in this function. */ + gctUINT8_PTR endLogical; +} +gcsHARDWARE_FUNCTION; + +typedef struct _gcTA_HARDWARE +{ + gctaOS os; + gcTA ta; + + gctUINT32 chipModel; + gctUINT32 chipRevision; + gctUINT32 productID; + gctUINT32 ecoID; + gctUINT32 customerID; + + gctPOINTER featureDatabase; + + gcsHARDWARE_PAGETABLE_ARRAY pagetableArray; + + /* Function used by gctaHARDWARE. */ + gctPHYS_ADDR functionPhysical; + gctPOINTER functionLogical; + gctUINT32 functionAddress; + gctSIZE_T functionBytes; + + gcsHARDWARE_FUNCTION functions[1]; +} +gcsTA_HARDWARE; + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_mmu.c b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_mmu.c new file mode 100644 index 000000000000..396712e81aed --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/security_v1/gc_hal_ta_mmu.c @@ -0,0 +1,586 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_types.h" +#include "gc_hal_base.h" +#include "gc_hal_security_interface.h" +#include "gc_hal_ta.h" +#include "gc_hal.h" + +#define _GC_OBJ_ZONE 2 +/******************************************************************************* +************************************ Define ************************************ +********************************************************************************/ + +#define gcdMMU_MTLB_SHIFT 22 +#define gcdMMU_STLB_4K_SHIFT 12 +#define gcdMMU_STLB_64K_SHIFT 16 + +#define gcdMMU_MTLB_BITS (32 - gcdMMU_MTLB_SHIFT) +#define gcdMMU_PAGE_4K_BITS gcdMMU_STLB_4K_SHIFT +#define gcdMMU_STLB_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_4K_BITS) +#define gcdMMU_PAGE_64K_BITS gcdMMU_STLB_64K_SHIFT +#define gcdMMU_STLB_64K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_64K_BITS) + +#define gcdMMU_MTLB_ENTRY_NUM (1 << gcdMMU_MTLB_BITS) +#define gcdMMU_MTLB_SIZE (gcdMMU_MTLB_ENTRY_NUM << 2) +#define gcdMMU_STLB_4K_ENTRY_NUM (1 << gcdMMU_STLB_4K_BITS) +#define gcdMMU_STLB_4K_SIZE (gcdMMU_STLB_4K_ENTRY_NUM << 2) +#define gcdMMU_PAGE_4K_SIZE (1 << gcdMMU_STLB_4K_SHIFT) +#define gcdMMU_STLB_64K_ENTRY_NUM (1 << gcdMMU_STLB_64K_BITS) +#define gcdMMU_STLB_64K_SIZE (gcdMMU_STLB_64K_ENTRY_NUM << 2) +#define gcdMMU_PAGE_64K_SIZE (1 << gcdMMU_STLB_64K_SHIFT) + +#define gcdMMU_MTLB_MASK (~((1U << gcdMMU_MTLB_SHIFT)-1)) +#define gcdMMU_STLB_4K_MASK ((~0U << gcdMMU_STLB_4K_SHIFT) ^ gcdMMU_MTLB_MASK) +#define gcdMMU_PAGE_4K_MASK (gcdMMU_PAGE_4K_SIZE - 1) +#define gcdMMU_STLB_64K_MASK ((~((1U << gcdMMU_STLB_64K_SHIFT)-1)) ^ gcdMMU_MTLB_MASK) +#define gcdMMU_PAGE_64K_MASK (gcdMMU_PAGE_64K_SIZE - 1) + +/* Page offset definitions. */ +#define gcdMMU_OFFSET_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_4K_BITS) +#define gcdMMU_OFFSET_4K_MASK ((1U << gcdMMU_OFFSET_4K_BITS) - 1) +#define gcdMMU_OFFSET_16K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_16K_BITS) +#define gcdMMU_OFFSET_16K_MASK ((1U << gcdMMU_OFFSET_16K_BITS) - 1) + +#define gcdMMU_MTLB_PRESENT 0x00000001 +#define gcdMMU_MTLB_EXCEPTION 0x00000002 +#define gcdMMU_MTLB_4K_PAGE 0x00000000 + +#define gcdMMU_STLB_PRESENT 0x00000001 +#define gcdMMU_STLB_EXCEPTION 0x00000002 +#define gcdMMU_STLB_SECURITY (1 << 4) +#define gcdMMU_STLB_4K_PAGE 0x00000000 + +#define gcdUSE_MMU_EXCEPTION 1 + +#define gcdMMU_SECURE_AREA_START ((gcdMMU_MTLB_ENTRY_NUM - gcdMMU_SECURE_AREA_SIZE) << gcdMMU_MTLB_SHIFT) + +typedef enum _gceMMU_TYPE +{ + gcvMMU_USED = (0 << 4), + gcvMMU_SINGLE = (1 << 4), + gcvMMU_FREE = (2 << 4), +} +gceMMU_TYPE; + +typedef struct _gcsMMU_STLB *gcsMMU_STLB_PTR; +typedef struct _gcsMMU_STLB +{ + gctPHYS_ADDR physical; + gctUINT32_PTR logical; + gctSIZE_T size; + gctPHYS_ADDR_T physBase; + gctSIZE_T pageCount; + gctUINT32 mtlbIndex; + gctUINT32 mtlbEntryNum; + gcsMMU_STLB_PTR next; +} gcsMMU_STLB; + + +#define gcmENTRY_TYPE(x) (x & 0xF0) +/* +* We need flat mapping ta command buffer. + +*/ + +/* +* Helper +*/ +gctUINT32 +_MtlbOffset( + gctUINT32 Address + ) +{ + return (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT; +} + +gctUINT32 +_StlbOffset( + gctUINT32 Address + ) +{ + return (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT; +} + +static gctUINT32 +_SetPage(gctUINT32 PageAddress) +{ + return PageAddress + /* writable */ + | (1 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); +} + +static void +_WritePageEntry( + IN gctUINT32_PTR PageEntry, + IN gctUINT32 EntryValue + ) +{ + *PageEntry = EntryValue; + + gctaOS_CacheClean((gctUINT8_PTR)PageEntry, gcmSIZEOF(gctUINT32)); +} + +static gceSTATUS +_FillPageTable( + IN gctUINT32_PTR PageTable, + IN gctUINT32 PageCount, + IN gctUINT32 EntryValue +) +{ + gctUINT i; + + for (i = 0; i < PageCount; i++) + { + _WritePageEntry(PageTable + i, EntryValue); + } + + return gcvSTATUS_OK; +} + + +static gceSTATUS +_AllocateStlb( + IN gctaOS Os, + OUT gcsMMU_STLB_PTR *Stlb + ) +{ + gceSTATUS status; + gcsMMU_STLB_PTR stlb; + gctPOINTER pointer = gcvNULL; + + /* Allocate slave TLB record. */ + gcmkONERROR(gctaOS_Allocate(gcmSIZEOF(gcsMMU_STLB), &pointer)); + stlb = pointer; + + stlb->size = gcdMMU_STLB_4K_SIZE; + + /* Allocate slave TLB entries. */ + gcmkONERROR(gctaOS_AllocateSecurityMemory( + Os, + &stlb->size, + (gctPOINTER *)&stlb->logical, + &stlb->physical + )); + + gcmkONERROR(gctaOS_GetPhysicalAddress(Os, stlb->logical, &stlb->physBase)); + +#if gcdUSE_MMU_EXCEPTION + _FillPageTable(stlb->logical, (gctUINT32)stlb->size / 4, gcdMMU_STLB_EXCEPTION); +#else + gctaOS_ZeroMemory(stlb->logical, (gctUINT32)stlb->size); +#endif + + *Stlb = stlb; + + return gcvSTATUS_OK; + +OnError: + if(pointer != gcvNULL) + gcmkVERIFY_OK(gctaOS_Free(pointer)); + return status; +} + +gceSTATUS +gctaMMU_Construct( + IN gcTA TA, + OUT gcTA_MMU *Mmu + ) +{ + gceSTATUS status; + gctSIZE_T bytes = 4096; + + gcTA_MMU mmu = gcvNULL; + + gcmkONERROR(gctaOS_Allocate( + gcmSIZEOF(gcsTA_MMU), + (gctPOINTER *)&mmu + )); + + mmu->mtlbLogical = gcvNULL; + mmu->stlbs = gcvNULL; + mmu->safePageLogical = gcvNULL; + mmu->nonSecureSafePageLogical = gcvNULL; + + mmu->os = TA->os; + + /* MTLB bytes. */ + mmu->mtlbBytes = gcdMMU_MTLB_SIZE; + + /* Allocate MTLB. */ + gcmkONERROR(gctaOS_AllocateSecurityMemory( + TA->os, + &mmu->mtlbBytes, + &mmu->mtlbLogical, + &mmu->mtlbPhysical + )); + +#if gcdUSE_MMU_EXCEPTION + _FillPageTable(mmu->mtlbLogical, (gctUINT32)mmu->mtlbBytes / 4, gcdMMU_STLB_EXCEPTION); +#else + gctaOS_ZeroMemory(mmu->mtlbLogical, (gctUINT32)mmu->mtlbBytes); +#endif + + /* Allocate a array to store stlbs. */ + gcmkONERROR(gctaOS_Allocate((gctUINT32)mmu->mtlbBytes, &mmu->stlbs)); + + gctaOS_ZeroMemory((gctUINT8_PTR)mmu->stlbs, (gctUINT32)mmu->mtlbBytes); + + /* Allocate security safe page. */ + gcmkONERROR(gctaOS_AllocateSecurityMemory( + TA->os, + &bytes, + &mmu->safePageLogical, + &mmu->safePagePhysical + )); + + gctaOS_ZeroMemory((gctUINT8_PTR)mmu->safePageLogical, (gctUINT32)bytes); + + /* Allocate non security safe page. */ + gcmkONERROR(gctaOS_AllocateSecurityMemory( + TA->os, + &bytes, + &mmu->nonSecureSafePageLogical, + &mmu->nonSecureSafePagePhysical + )); + + gctaOS_ZeroMemory((gctUINT8_PTR)mmu->nonSecureSafePageLogical, (gctUINT32)bytes); + + /* gcmkONERROR(gctaOS_CreateMutex(TA->os, &mmu->mutex)); */ + + *Mmu = mmu; + + return gcvSTATUS_OK; + +OnError: + if (mmu) + { + if (mmu->safePageLogical) + { + gcmkVERIFY_OK(gctaOS_FreeSecurityMemory( + TA->os, + 4096, + mmu->safePageLogical, + mmu->safePagePhysical + )); + } + + if (mmu->nonSecureSafePageLogical) + { + gcmkVERIFY_OK(gctaOS_FreeSecurityMemory( + TA->os, + 4096, + mmu->nonSecureSafePageLogical, + mmu->nonSecureSafePagePhysical + )); + } + + if (mmu->mtlbLogical) + { + gcmkVERIFY_OK(gctaOS_FreeSecurityMemory( + TA->os, + 4096, + mmu->mtlbLogical, + mmu->mtlbPhysical + )); + } + + if (mmu->stlbs) + { + gcmkVERIFY_OK(gctaOS_Free((gctPOINTER)mmu->stlbs)); + } + + gcmkVERIFY_OK(gctaOS_Free((gctPOINTER)mmu)); + } + return status; +} + +gceSTATUS +gctaMMU_Destory( + IN gcTA_MMU Mmu + ) +{ + gctaOS os = Mmu->os; + + if (Mmu->safePageLogical) + { + gcmkVERIFY_OK(gctaOS_FreeSecurityMemory( + os, + 4096, + Mmu->safePageLogical, + Mmu->safePagePhysical + )); + } + + if (Mmu->nonSecureSafePageLogical) + { + gcmkVERIFY_OK(gctaOS_FreeSecurityMemory( + os, + 4096, + Mmu->nonSecureSafePageLogical, + Mmu->nonSecureSafePagePhysical + )); + } + + if (Mmu->mtlbLogical) + { + gcmkVERIFY_OK(gctaOS_FreeSecurityMemory( + os, + 4096, + Mmu->mtlbLogical, + Mmu->mtlbPhysical + )); + } + + if (Mmu->stlbs) + { + gcmkVERIFY_OK(gctaOS_Free((gctPOINTER)Mmu->stlbs)); + } + + gcmkVERIFY_OK(gctaOS_Free(Mmu)); + + return gcvSTATUS_OK; +} + +gceSTATUS +gctaMMU_GetPageEntry( + IN gcTA_MMU Mmu, + IN gctUINT32 Address, + OUT gctUINT32_PTR MtlbEntry, + OUT gctUINT32_PTR *PageTable, + OUT gctBOOL * Secure + ) +{ + gceSTATUS status; + struct _gcsMMU_STLB *stlb; + struct _gcsMMU_STLB **stlbs = (struct _gcsMMU_STLB **)Mmu->stlbs; + gctUINT32 offset = _MtlbOffset(Address); + gctUINT32 mtlbEntry; + gctBOOL secure = Address > gcdMMU_SECURE_AREA_START; + + gcmkHEADER_ARG("Mmu=0x%x", Mmu); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT((Address & 0xFFF) == 0); + + stlb = stlbs[offset]; + + if (stlb == gcvNULL) + { + gcmkONERROR(_AllocateStlb(Mmu->os, &stlb)); + + mtlbEntry = (gctUINT32)(stlb->physBase & 0xFFFFFFFF) + | gcdMMU_MTLB_4K_PAGE + | gcdMMU_MTLB_PRESENT + ; + + if (secure) + { + /* Secure MTLB. */ + mtlbEntry |= (1 << 4); + } + + /* Insert Slave TLB address to Master TLB entry.*/ + _WritePageEntry((gctUINT32_PTR)Mmu->mtlbLogical + offset, mtlbEntry); + + /* Record stlb. */ + stlbs[offset] = stlb; + + if (MtlbEntry) + { + /* Return entry value of new mtlb entry. */ + *MtlbEntry = mtlbEntry; + } + } + + *PageTable = &stlb->logical[_StlbOffset(Address)]; + + if (Secure) + { + *Secure = secure; + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gctaMMU_SetPage( + IN gcTA_MMU Mmu, + IN gctUINT32 PageAddress, + IN gctUINT32 *PageEntry + ) +{ + /* gctBOOL secure; */ + + gcmkHEADER_ARG("Mmu=0x%x", Mmu); + + /* Verify the arguments. */ + gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL); + gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF)); + + _WritePageEntry(PageEntry, _SetPage(PageAddress)); + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; +} + +gceSTATUS +gctaMMU_FreePages( + IN gcTA_MMU Mmu, + IN gctUINT32 Address, + IN gctUINT32 PageCount + ) +{ + gceSTATUS status; + gctUINT32 i; + gctUINT32_PTR entry; + gcmkHEADER_ARG("Mmu=0x%x", Mmu); + + /* Fill in page table. */ + for (i = 0; i < PageCount; i++) + { + gcmkONERROR(gctaMMU_GetPageEntry(Mmu, Address, gcvNULL, &entry, gcvNULL)); + +#if gcdUSE_MMU_EXCEPTION + *entry = gcdMMU_STLB_EXCEPTION; +#else + *entry = 0; +#endif + + Address += 4096; + } + + /* Success. */ + gcmkFOOTER_NO(); + return gcvSTATUS_OK; + +OnError: + gcmkFOOTER(); + return status; +} + +gceSTATUS +gctaMMU_Enable( + IN gcTA_MMU Mmu, + IN gcTA TA + ) +{ + gceSTATUS status; + gctPHYS_ADDR_T address; + gctPHYS_ADDR_T safeAddress; + + gcmkONERROR(gctaOS_GetPhysicalAddress(Mmu->os, Mmu->mtlbLogical, &address)); + + gctaOS_GetPhysicalAddress(Mmu->os, Mmu->safePageLogical, &safeAddress); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +void +gctaMMU_DumpPagetableEntry( + IN gcTA_MMU Mmu, + IN gctUINT32 Address + ) +{ + gctUINT32 entry; + gctUINT32 mtlb = _MtlbOffset(Address); + gctUINT32_PTR mtlbLogical = Mmu->mtlbLogical; + gctUINT32_PTR stlbLogical; + gcsMMU_STLB_PTR stlb; + struct _gcsMMU_STLB **stlbs = (struct _gcsMMU_STLB **)Mmu->stlbs; + + gctUINT32 stlbOffset = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT; + gctUINT32 offsetInPage = Address & gcdMMU_OFFSET_4K_MASK; + + stlb = stlbs[mtlb]; + + gcmkPRINT(" MTLB entry = %d\n", mtlb); + + gcmkPRINT(" STLB entry = %d\n", stlbOffset); + + gcmkPRINT(" Offset = 0x%08X (%d)\n", offsetInPage, offsetInPage); + + + if (stlb == gcvNULL) + { + /* Dmp mtlb entry. */ + entry = mtlbLogical[mtlb]; + + gcmkPRINT(" mtlb entry [%d] = %x", mtlb, entry); + } + else + { + stlbLogical = stlb->logical; + + gcmkPRINT(" stlb entry = 0x%08X", stlbLogical[stlbOffset]); + } +} + + diff --git a/drivers/mcst/gpu-viv/hal/security_v1/os/emulator/gc_hal_ta_emulator.c b/drivers/mcst/gpu-viv/hal/security_v1/os/emulator/gc_hal_ta_emulator.c new file mode 100644 index 000000000000..bd2fcbae2581 --- /dev/null +++ b/drivers/mcst/gpu-viv/hal/security_v1/os/emulator/gc_hal_ta_emulator.c @@ -0,0 +1,324 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2018 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2018 Vivante Corporation +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + + +#include "gc_hal_base.h" +#include "gc_hal.h" +#include "gc_hal_ta.h" +#include "gc_hal_kernel_mutex.h" + +#define _GC_OBJ_ZONE gcvZONE_OS + +gcTA globalTA[16] = { gcvNULL, gcvNULL, gcvNULL, gcvNULL,gcvNULL, gcvNULL, gcvNULL, gcvNULL }; +gctaOS globalTAos; + +struct _gctaOS { + void *os; + + gctPOINTER dispatchMutex; +}; + +gceSTATUS HALDECL +TAEmulator( + gceCORE Core, + void * Interface + ) +{ + gckOS_AcquireMutex(globalTAos->os, globalTAos->dispatchMutex, gcvINFINITE); + + gcTA_Dispatch(globalTA[Core], Interface); + + gckOS_ReleaseMutex(globalTAos->os, globalTAos->dispatchMutex); + return gcvSTATUS_OK; +} + + +gceSTATUS +gctaOS_ConstructOS( + IN gckOS Os, + OUT gctaOS *TAos + ) +{ + gctaOS os; + gctPOINTER pointer = gcvNULL; + gceSTATUS status; + + gcmkONERROR(gckOS_AllocateMemory(Os, gcmSIZEOF(struct _gctaOS), &pointer)); + + os = (gctaOS)pointer; + os->os = Os; + + gcmkONERROR(gckOS_CreateMutex(Os, &os->dispatchMutex)); + + *TAos = globalTAos = os; + + return gcvSTATUS_OK; + +OnError: + if (pointer != gcvNULL) + { + gcmkVERIFY_OK(gckOS_FreeMemory(Os, pointer)); + } + return status; +} + +gceSTATUS +gctaOS_DestroyOS( + IN gctaOS Os + ) +{ + gckOS os = Os->os; + + gcmkVERIFY_OK(gckOS_DeleteMutex(os, Os->dispatchMutex)); + gcmkVERIFY_OK(gckOS_FreeMemory(os, Os)); + + return gcvSTATUS_OK; +} + +gceSTATUS +gctaOS_AllocateSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T *Bytes, + OUT gctPOINTER *Logical, + OUT gctPOINTER *Physical + ) +{ + gceSTATUS status; + + gcmkONERROR(gckOS_AllocateNonPagedMemory(Os->os, gcvFALSE, gcvALLOC_FLAG_CONTIGUOUS, Bytes, (gctPHYS_ADDR *)Physical, Logical)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gctaOS_FreeSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + OUT gctPOINTER Physical + ) +{ + gckOS_FreeNonPagedMemory(Os->os, Bytes, (gctPHYS_ADDR)Physical, Logical); + return gcvSTATUS_OK; +} + +gceSTATUS +gctaOS_AllocateNonSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T *Bytes, + OUT gctPOINTER *Logical, + OUT gctPOINTER *Physical + ) +{ + gceSTATUS status; + + gcmkONERROR(gckOS_AllocateNonPagedMemory(Os->os, gcvFALSE, gcvALLOC_FLAG_CONTIGUOUS, Bytes, (gctPHYS_ADDR *)Physical, Logical)); + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS +gctaOS_FreeNonSecurityMemory( + IN gctaOS Os, + IN gctSIZE_T Bytes, + IN gctPOINTER Logical, + OUT gctPOINTER Physical + ) +{ + gckOS_FreeNonPagedMemory(Os->os, Bytes, (gctPHYS_ADDR)Physical, Logical); + return gcvSTATUS_OK; +} + +gceSTATUS +gctaOS_Allocate( + IN gctUINT32 Bytes, + OUT gctPOINTER *Pointer + ) +{ + return gckOS_AllocateMemory(globalTAos->os, Bytes, Pointer); +} + +gceSTATUS +gctaOS_Free( + IN gctPOINTER Pointer + ) +{ + return gckOS_FreeMemory(globalTAos->os, Pointer); +} + +gceSTATUS +gctaOS_GetPhysicalAddress( + IN gctaOS Os, + IN gctPOINTER Logical, + OUT gctPHYS_ADDR_T * Physical + ) +{ + gctPHYS_ADDR_T physical; + gceSTATUS status; + + gcmkONERROR(gckOS_GetPhysicalAddress(Os->os, Logical, &physical)); + + gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os->os, physical, &physical)); + + *Physical = (gctUINT32)physical; + + return gcvSTATUS_OK; + +OnError: + return status; +} + +gceSTATUS gctaOS_WriteRegister( + IN gctaOS Os, IN gceCORE Core, + IN gctUINT32 Address, + IN gctUINT32 Data + ) +{ + return gckOS_WriteRegisterEx(Os->os, Core, Address, Data); +} + +gceSTATUS gctaOS_ReadRegister( + IN gctaOS Os, IN gceCORE Core, + IN gctUINT32 Address, + IN gctUINT32 *Data + ) +{ + return gckOS_ReadRegisterEx(Os->os, Core, Address, Data); +} + +gceSTATUS +gctaOS_MemCopy( + IN gctUINT8_PTR Dest, + IN gctUINT8_PTR Src, + IN gctUINT32 Bytes + ) +{ + gckOS_MemCopy(Dest, Src, Bytes); + return gcvSTATUS_OK; +} + +gceSTATUS +gctaOS_ZeroMemory( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ) +{ + gckOS_ZeroMemory(Dest, Bytes); + return gcvSTATUS_OK; +} + +void +gctaOS_CacheFlush( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ) +{ + +} + +void +gctaOS_CacheClean( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ) +{ + +} + +void +gctaOS_CacheInvalidate( + IN gctUINT8_PTR Dest, + IN gctUINT32 Bytes + ) +{ + +} + +gceSTATUS +gctaOS_IsPhysicalSecure( + IN gctaOS Os, + IN gctUINT32 Physical, + OUT gctBOOL *Secure + ) +{ + return gcvSTATUS_NOT_SUPPORTED; +} + +gceSTATUS +gctaOS_Delay( + IN gctaOS Os, + IN gctUINT32 Delay + ) +{ + return gckOS_Delay(Os->os, Delay); +} + +gceSTATUS +gctaOS_SetGPUPower( + IN gctaOS Os, + IN gctUINT32 Core, + IN gctBOOL Clock, + IN gctBOOL Power + ) +{ + return gckOS_SetGPUPower(Os->os, Core, Power, Clock); +} + + diff --git a/drivers/mcst/hantrodec/Makefile b/drivers/mcst/hantrodec/Makefile new file mode 100644 index 000000000000..2e71ab1ce725 --- /dev/null +++ b/drivers/mcst/hantrodec/Makefile @@ -0,0 +1,3 @@ +# Copyright 2013 Google Inc. All Rights Reserved. + +obj-$(CONFIG_HANTRODEC) += hantrodec.o \ No newline at end of file diff --git a/drivers/mcst/hantrodec/README b/drivers/mcst/hantrodec/README new file mode 100644 index 000000000000..94a11ef7ed79 --- /dev/null +++ b/drivers/mcst/hantrodec/README @@ -0,0 +1,41 @@ +/* Copyright 2013 Google Inc. All Rights Reserved. */ + +-- BUILD -- + +You need a fully configured kernel source tree in order to build the +driver. Please set the location of the kernel tree in the Makefile (KDIR). +If you want some extra debug information in the kernel logs, you could +define the HANTRODEC_DEBUG but please be aware that allot of things are traced +with this option. +Also you could set a particular device MAJOR in the 'hantrodec.c' if you don't want +dynamic allocation. + +Just run in this dir: + +%make + +-- USAGE -- + +The parameters that can be set when loading the driver are the HW IO base +address and the assigned IRQ number. + +First of all the module has to be inserted into the kernel with: +(you need a Linux shell cmd line) + +%insmod hantrodec.o base_port= irq= + +Set the correct values for the HW IO base address and the IRQ number if +the default values compiled into the module are not valid. + +Second of all a char device file has to be created: + +%mknod /dev/hantrodec c 0 + +Replace MAJOR with the correct value (i.e. read /proc/devices to found out +the exact values). + +Make sure that you have RW rights for the newly created dev file (use 'chmod'). + +The 'driver_load' script is provided for preparing all the things necessary for +the driver to be usable. The script is using 'awk' to retrieve the device's +major from /proc/devices. Remember to set the driver parameters. diff --git a/drivers/mcst/hantrodec/driver_load.sh b/drivers/mcst/hantrodec/driver_load.sh new file mode 100644 index 000000000000..329f720c9fc0 --- /dev/null +++ b/drivers/mcst/hantrodec/driver_load.sh @@ -0,0 +1,43 @@ +# Copyright 2013 Google Inc. All Rights Reserved. + +module="hantrodec" +device="/tmp/dev/hantrodec" +mode="666" + +echo + +if [ ! -e /tmp/dev ] +then + mkdir -p /tmp/dev/ +fi + +#insert module +rm_module=`lsmod |grep $module` +if [ ! -z "$rm_module" ] +then + rmmod $module || exit 1 +fi +insmod $module.ko $* || exit 1 + +echo "module $module inserted" + +#remove old nod +rm -f $device + +#read the major asigned at loading time +major=`cat /proc/devices | grep $module | cut -c1-3` + +echo "$module major = $major" + +#create dev node +mknod $device c $major 0 + +echo "node $device created" + +#give all 'rw' access +chmod $mode $device + +echo "set node access to $mode" + +#the end +echo diff --git a/drivers/mcst/hantrodec/dwl_defs.h b/drivers/mcst/hantrodec/dwl_defs.h new file mode 100644 index 000000000000..b11e167c14db --- /dev/null +++ b/drivers/mcst/hantrodec/dwl_defs.h @@ -0,0 +1,44 @@ +/* Copyright 2013 Google Inc. All Rights Reserved. */ +/* Author: attilanagy@google.com (Atti Nagy) */ + +#ifndef SOFTWARE_LINUX_DWL_DWL_DEFS_H_ +#define SOFTWARE_LINUX_DWL_DWL_DEFS_H_ + +#define DWL_MPEG2_E 31 /* 1 bit */ +#define DWL_VC1_E 29 /* 2 bits */ +#define DWL_JPEG_E 28 /* 1 bit */ +#define DWL_MPEG4_E 26 /* 2 bits */ +#define DWL_H264_E 24 /* 2 bits */ +#define DWL_VP6_E 23 /* 1 bit */ +#define DWL_RV_E 26 /* 2 bits */ +#define DWL_VP8_E 23 /* 1 bit */ +#define DWL_VP7_E 24 /* 1 bit */ +#define DWL_WEBP_E 19 /* 1 bit */ +#define DWL_AVS_E 22 /* 1 bit */ +#define DWL_PP_E 31 /* 1 bit */ +#define DWL_HEVC_E 5 /* 2 bits */ +#define DWL_VP9_E 3 /* 2 bits */ + +#define HANTRODEC_IRQ_STAT_DEC 1 +#define HANTRODEC_IRQ_STAT_DEC_OFF (HANTRODEC_IRQ_STAT_DEC * 4) +#define HANTRODEC_IRQ_STAT_PP 60 +#define HANTRODEC_IRQ_STAT_PP_OFF (HANTRODEC_IRQ_STAT_PP * 4) + +#define HANTRODECPP_SYNTH_CFG 60 +#define HANTRODECPP_SYNTH_CFG_OFF (HANTRODECPP_SYNTH_CFG * 4) +#define HANTRODEC_SYNTH_CFG 50 +#define HANTRODEC_SYNTH_CFG_OFF (HANTRODEC_SYNTH_CFG * 4) +#define HANTRODEC_SYNTH_CFG_2 54 +#define HANTRODEC_SYNTH_CFG_2_OFF (HANTRODEC_SYNTH_CFG_2 * 4) +#define HANTRODEC_SYNTH_CFG_3 56 +#define HANTRODEC_SYNTH_CFG_3_OFF (HANTRODEC_SYNTH_CFG_3 * 4) + +#define HANTRODEC_DEC_E 0x01 +#define HANTRODEC_PP_E 0x01 +#define HANTRODEC_DEC_ABORT 0x20 +#define HANTRODEC_DEC_IRQ_DISABLE 0x10 +#define HANTRODEC_PP_IRQ_DISABLE 0x10 +#define HANTRODEC_DEC_IRQ 0x100 +#define HANTRODEC_PP_IRQ 0x100 + +#endif /* SOFTWARE_LINUX_DWL_DWL_DEFS_H_ */ diff --git a/drivers/mcst/hantrodec/hantrodec.c b/drivers/mcst/hantrodec/hantrodec.c new file mode 100644 index 000000000000..008d1be958e1 --- /dev/null +++ b/drivers/mcst/hantrodec/hantrodec.c @@ -0,0 +1,1332 @@ +/* Copyright 2013 Google Inc. All Rights Reserved. */ + +#include "hantrodec.h" +#include "dwl_defs.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HXDEC_MAX_CORES 1 + +#define HANTRO_DEC_REGS 184 +#define HANTRO_PP_REGS 41 /* No separate PP regs. */ + +#define HANTRO_DEC_FIRST_REG 0 +#define HANTRO_DEC_LAST_REG HANTRO_DEC_REGS-1 +#define HANTRO_PP_FIRST_REG 60 +#define HANTRO_PP_LAST_REG 100 + +/* Logic module IRQs */ +#define HXDEC_NO_IRQ -1 + +/* module defaults */ +#define DEC_IO_SIZE (HANTRO_DEC_REGS * 4) /* bytes, PP regs included + within dec regs. */ +#define DEC_IRQ HXDEC_NO_IRQ + +static const int DecHwId[] = +{ + 0x6732 +}; + +static unsigned long base_port = -1; + +static u32 multicorebase[HXDEC_MAX_CORES] = +{ + -1 +}; + +static int irq = DEC_IRQ; +static int elements = 0; + +/* module_param(name, type, perm) */ +module_param(base_port, ulong, 0); +module_param(irq, int, 0); +module_param_array(multicorebase, uint, &elements, 0); + +static int hantrodec_major = 0; /* dynamic allocation */ + +/* here's all the must remember stuff */ +typedef struct +{ + char *buffer; + unsigned int iosize; + void __iomem *hwregs[HXDEC_MAX_CORES]; + int irq; + int cores; + struct fasync_struct *async_queue_dec; + struct fasync_struct *async_queue_pp; +} hantrodec_t; + +static hantrodec_t hantrodec_data; /* dynamic allocation? */ + +static int ReserveIO(void); +static void ReleaseIO(void); + +static void ResetAsic(hantrodec_t * dev); + +#ifdef HANTRODEC_DEBUG +static void dump_regs(hantrodec_t *dev); +#endif + +/* IRQ handler */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +static irqreturn_t hantrodec_isr(int irq, void *dev_id, struct pt_regs *regs); +#else +static irqreturn_t hantrodec_isr(int irq, void *dev_id); +#endif + + +static u32 dec_regs[HXDEC_MAX_CORES][DEC_IO_SIZE/4]; +static struct semaphore dec_core_sem; +static struct semaphore pp_core_sem; + +static int dec_irq = 0; +static int pp_irq = 0; + +static atomic_t irq_rx = ATOMIC_INIT(0); +static atomic_t irq_tx = ATOMIC_INIT(0); + +static struct file* dec_owner[HXDEC_MAX_CORES]; +static struct file* pp_owner[HXDEC_MAX_CORES]; + +static DEFINE_SPINLOCK(owner_lock); + +static DECLARE_WAIT_QUEUE_HEAD(dec_wait_queue); +static DECLARE_WAIT_QUEUE_HEAD(pp_wait_queue); + +static DECLARE_WAIT_QUEUE_HEAD(hw_queue); + +#define DWL_CLIENT_TYPE_PP 4U +#define DWL_CLIENT_TYPE_VP9_DEC 11U +#define DWL_CLIENT_TYPE_HEVC_DEC 12U + +static u32 cfg[HXDEC_MAX_CORES]; + +static struct pci_dev *gDev = NULL; /* PCI device structure. */ + +static void ReadCoreConfig(hantrodec_t *dev) +{ + int c; + u32 reg, tmp; + + memset(cfg, 0, sizeof(cfg)); + + for(c = 0; c < dev->cores; c++) + { + /* Decoder configuration */ + reg = ioread32(dev->hwregs[c] + HANTRODEC_SYNTH_CFG_2 * 4); + + tmp = (reg >> DWL_HEVC_E) & 0x3U; + if(tmp) printk(KERN_INFO "hantrodec: core[%d] has HEVC\n", c); + cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_HEVC_DEC : 0; + + tmp = (reg >> DWL_VP9_E) & 0x03U; + if(tmp) printk(KERN_INFO "hantrodec: core[%d] has VP9\n", c); + cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP9_DEC : 0; + + /* Post-processor configuration */ + reg = ioread32(dev->hwregs[c] + HANTRODECPP_SYNTH_CFG * 4); + + tmp = (reg >> DWL_PP_E) & 0x01U; + if(tmp) printk(KERN_INFO "hantrodec: core[%d] has PP\n", c); + cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_PP : 0; + } +} + +static int CoreHasFormat(const u32 *cfg, int core, u32 format) +{ + return (cfg[core] & (1 << format)) ? 1 : 0; +} + +int GetDecCore(long core, hantrodec_t *dev, struct file* filp) +{ + int success = 0; + unsigned long flags; + + spin_lock_irqsave(&owner_lock, flags); + if(dec_owner[core] == NULL ) + { + dec_owner[core] = filp; + success = 1; + } + + spin_unlock_irqrestore(&owner_lock, flags); + + return success; +} + +int GetDecCoreAny(long *core, hantrodec_t *dev, struct file* filp, + unsigned long format) +{ + int success = 0; + long c; + + *core = -1; + + for(c = 0; c < dev->cores; c++) + { + /* a free core that has format */ + if(CoreHasFormat(cfg, c, format) && GetDecCore(c, dev, filp)) + { + success = 1; + *core = c; + break; + } + } + + return success; +} + +long ReserveDecoder(hantrodec_t *dev, struct file* filp, unsigned long format) +{ + long core = -1; + + /* reserve a core */ + if (down_interruptible(&dec_core_sem)) + return -ERESTARTSYS; + + /* lock a core that has specific format*/ + if(wait_event_interruptible(hw_queue, + GetDecCoreAny(&core, dev, filp, format) != 0 )) + return -ERESTARTSYS; + + return core; +} + +void ReleaseDecoder(hantrodec_t *dev, long core) +{ + u32 status; + unsigned long flags; + + status = ioread32(dev->hwregs[core] + HANTRODEC_IRQ_STAT_DEC_OFF); + + /* make sure HW is disabled */ + if(status & HANTRODEC_DEC_E) + { + printk(KERN_INFO "hantrodec: DEC[%li] still enabled -> reset\n", core); + + /* abort decoder */ + status |= HANTRODEC_DEC_ABORT | HANTRODEC_DEC_IRQ_DISABLE; + iowrite32(status, dev->hwregs[core] + HANTRODEC_IRQ_STAT_DEC_OFF); + } + + spin_lock_irqsave(&owner_lock, flags); + + dec_owner[core] = NULL; + + spin_unlock_irqrestore(&owner_lock, flags); + + up(&dec_core_sem); + + wake_up_interruptible_all(&hw_queue); +} + +long ReservePostProcessor(hantrodec_t *dev, struct file* filp) +{ + unsigned long flags; + + long core = 0; + + /* single core PP only */ + if (down_interruptible(&pp_core_sem)) + return -ERESTARTSYS; + + spin_lock_irqsave(&owner_lock, flags); + + pp_owner[core] = filp; + + spin_unlock_irqrestore(&owner_lock, flags); + + return core; +} + +void ReleasePostProcessor(hantrodec_t *dev, long core) +{ + unsigned long flags; + + u32 status = ioread32(dev->hwregs[core] + HANTRODEC_IRQ_STAT_PP_OFF); + + /* make sure HW is disabled */ + if(status & HANTRODEC_PP_E) + { + printk(KERN_INFO "hantrodec: PP[%li] still enabled -> reset\n", core); + + /* disable IRQ */ + status |= HANTRODEC_PP_IRQ_DISABLE; + + /* disable postprocessor */ + status &= (~HANTRODEC_PP_E); + iowrite32(0x10, dev->hwregs[core] + HANTRODEC_IRQ_STAT_PP_OFF); + } + + spin_lock_irqsave(&owner_lock, flags); + + pp_owner[core] = NULL; + + spin_unlock_irqrestore(&owner_lock, flags); + + up(&pp_core_sem); +} + +long ReserveDecPp(hantrodec_t *dev, struct file* filp, unsigned long format) +{ + /* reserve core 0, DEC+PP for pipeline */ + unsigned long flags; + + long core = 0; + + /* check that core has the requested dec format */ + if(!CoreHasFormat(cfg, core, format)) + return -EFAULT; + + /* check that core has PP */ + if(!CoreHasFormat(cfg, core, DWL_CLIENT_TYPE_PP)) + return -EFAULT; + + /* reserve a core */ + if (down_interruptible(&dec_core_sem)) + return -ERESTARTSYS; + + /* wait until the core is available */ + if(wait_event_interruptible(hw_queue, + GetDecCore(core, dev, filp) != 0)) + { + up(&dec_core_sem); + return -ERESTARTSYS; + } + + + if (down_interruptible(&pp_core_sem)) + { + ReleaseDecoder(dev, core); + return -ERESTARTSYS; + } + + spin_lock_irqsave(&owner_lock, flags); + pp_owner[core] = filp; + spin_unlock_irqrestore(&owner_lock, flags); + + return core; +} + +long DecFlushRegs(hantrodec_t *dev, struct core_desc *core) +{ + long ret = 0, i; + + u32 id = core->id; + + ret = copy_from_user(dec_regs[id], core->regs, HANTRO_DEC_REGS*4); + if (ret) + { + PDEBUG("copy_from_user failed, returned %li\n", ret); + return -EFAULT; + } + + /* write all regs but the status reg[1] to hardware */ + for(i = 2; i <= HANTRO_DEC_LAST_REG; i++) + iowrite32(dec_regs[id][i], dev->hwregs[id] + i*4); + + /* write the status register, which may start the decoder */ + iowrite32(dec_regs[id][1], dev->hwregs[id] + 4); + + PDEBUG("flushed registers on core %d\n", id); + + return 0; +} + +long DecRefreshRegs(hantrodec_t *dev, struct core_desc *core) +{ + long ret, i; + u32 id = core->id; + + /* user has to know exactly what they are asking for */ + if(core->size != (HANTRO_DEC_REGS * 4)) + return -EFAULT; + + /* read all registers from hardware */ + for(i = 0; i <= HANTRO_DEC_LAST_REG; i++) + dec_regs[id][i] = ioread32(dev->hwregs[id] + i*4); + + /* put registers to user space*/ + ret = copy_to_user(core->regs, dec_regs[id], HANTRO_DEC_REGS*4); + if (ret) + { + PDEBUG("copy_to_user failed, returned %li\n", ret); + return -EFAULT; + } + + return 0; +} + +static int CheckDecIrq(hantrodec_t *dev, int id) +{ + unsigned long flags; + int rdy = 0; + + const u32 irq_mask = (1 << id); + + spin_lock_irqsave(&owner_lock, flags); + + if(dec_irq & irq_mask) + { + /* reset the wait condition(s) */ + dec_irq &= ~irq_mask; + rdy = 1; + } + + spin_unlock_irqrestore(&owner_lock, flags); + + return rdy; +} + +long WaitDecReadyAndRefreshRegs(hantrodec_t *dev, struct core_desc *core) +{ + u32 id = core->id; + + PDEBUG("wait_event_interruptible DEC[%d]\n", id); + + if(wait_event_interruptible(dec_wait_queue, CheckDecIrq(dev, id))) + { + PDEBUG("DEC[%d] wait_event_interruptible interrupted\n", id); + return -ERESTARTSYS; + } + + atomic_inc(&irq_tx); + + /* refresh registers */ + return DecRefreshRegs(dev, core); +} + +long PPFlushRegs(hantrodec_t *dev, struct core_desc *core) +{ + long ret = 0; + u32 id = core->id; + u32 i; + + ret = copy_from_user(dec_regs[id] + HANTRO_DEC_REGS, core->regs, + HANTRO_PP_REGS*4); + if (ret) + { + PDEBUG("copy_from_user failed, returned %li\n", ret); + return -EFAULT; + } + + /* write all regs but the status reg[1] to hardware */ + for(i = HANTRO_PP_FIRST_REG + 1; i <= HANTRO_PP_LAST_REG; i++) + iowrite32(dec_regs[id][i], dev->hwregs[id] + i*4); + + /* write the stat reg, which may start the PP */ + iowrite32(dec_regs[id][HANTRO_PP_FIRST_REG], + dev->hwregs[id] + HANTRO_PP_FIRST_REG * 4); + + return 0; +} + +long PPRefreshRegs(hantrodec_t *dev, struct core_desc *core) +{ + long i, ret; + u32 id = core->id; + + /* user has to know exactly what they are asking for */ + if(core->size != (HANTRO_PP_REGS * 4)) + return -EFAULT; + + /* read all registers from hardware */ + for(i = HANTRO_PP_FIRST_REG; i <= HANTRO_PP_LAST_REG; i++) + dec_regs[id][i] = ioread32(dev->hwregs[id] + i*4); + + /* put registers to user space*/ + ret = copy_to_user(core->regs, dec_regs[id] + HANTRO_PP_FIRST_REG, + HANTRO_PP_REGS * 4); + if (ret) + { + PDEBUG("copy_to_user failed, returned %li\n", ret); + return -EFAULT; + } + + return 0; +} + +static int CheckPPIrq(hantrodec_t *dev, int id) +{ + unsigned long flags; + int rdy = 0; + + const u32 irq_mask = (1 << id); + + spin_lock_irqsave(&owner_lock, flags); + + if(pp_irq & irq_mask) + { + /* reset the wait condition(s) */ + pp_irq &= ~irq_mask; + rdy = 1; + } + + spin_unlock_irqrestore(&owner_lock, flags); + + return rdy; +} + +long WaitPPReadyAndRefreshRegs(hantrodec_t *dev, struct core_desc *core) +{ + u32 id = core->id; + + PDEBUG("wait_event_interruptible PP[%d]\n", id); + + if(wait_event_interruptible(pp_wait_queue, CheckPPIrq(dev, id))) + { + PDEBUG("PP[%d] wait_event_interruptible interrupted\n", id); + return -ERESTARTSYS; + } + + atomic_inc(&irq_tx); + + /* refresh registers */ + return PPRefreshRegs(dev, core); +} + +static int CheckCoreIrq(hantrodec_t *dev, const struct file *filp, int *id) +{ + unsigned long flags; + int rdy = 0, n = 0; + + do + { + u32 irq_mask = (1 << n); + + spin_lock_irqsave(&owner_lock, flags); + + if(dec_irq & irq_mask) + { + if (dec_owner[n] == filp) + { + /* we have an IRQ for our client */ + + /* reset the wait condition(s) */ + dec_irq &= ~irq_mask; + + /* signal ready core no. for our client */ + *id = n; + + rdy = 1; + + break; + } + else if(dec_owner[n] == NULL) + { + /* zombie IRQ */ + printk(KERN_INFO "IRQ on core[%d], but no owner!!!\n", n); + + /* reset the wait condition(s) */ + dec_irq &= ~irq_mask; + } + } + + spin_unlock_irqrestore(&owner_lock, flags); + + n++; /* next core */ + } + while(n < dev->cores); + + return rdy; +} + +long WaitCoreReady(hantrodec_t *dev, const struct file *filp, int *id) +{ + PDEBUG("wait_event_interruptible CORE\n"); + + if(wait_event_interruptible(dec_wait_queue, CheckCoreIrq(dev, filp, id))) + { + PDEBUG("CORE wait_event_interruptible interrupted\n"); + return -ERESTARTSYS; + } + + atomic_inc(&irq_tx); + + return 0; +} + +/*------------------------------------------------------------------------------ + Function name : hantrodec_ioctl + Description : communication method to/from the user space + + Return type : long +------------------------------------------------------------------------------*/ + +static long hantrodec_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int err = 0; + long tmp; + +#ifdef HW_PERFORMANCE + struct timeval *end_time_arg; +#endif + + PDEBUG("ioctl cmd 0x%08x\n", cmd); + /* + * extract the type and number bitfields, and don't decode + * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + */ + if (_IOC_TYPE(cmd) != HANTRODEC_IOC_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) > HANTRODEC_IOC_MAXNR) + return -ENOTTY; + + /* + * the direction is a bitmask, and VERIFY_WRITE catches R/W + * transfers. `Type' is user-oriented, while + * access_ok is kernel-oriented, so the concept of "read" and + * "write" is reversed + */ + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); + + if (err) + return -EFAULT; + + switch (cmd) + { + case HANTRODEC_IOC_CLI: + disable_irq(hantrodec_data.irq); + break; + case HANTRODEC_IOC_STI: + enable_irq(hantrodec_data.irq); + break; + case HANTRODEC_IOCGHWOFFSET: + __put_user(multicorebase[0], (unsigned long *) arg); + break; + case HANTRODEC_IOCGHWIOSIZE: + __put_user(hantrodec_data.iosize, (unsigned int *) arg); + break; + case HANTRODEC_IOC_MC_OFFSETS: + { + tmp = copy_to_user((u32 *) arg, multicorebase, sizeof(multicorebase)); + if (err) + { + PDEBUG("copy_to_user failed, returned %li\n", tmp); + return -EFAULT; + } + break; + } + case HANTRODEC_IOC_MC_CORES: + __put_user(hantrodec_data.cores, (unsigned int *) arg); + break; + case HANTRODEC_IOCS_DEC_PUSH_REG: + { + struct core_desc core; + + /* get registers from user space*/ + tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); + if (tmp) + { + PDEBUG("copy_from_user failed, returned %li\n", tmp); + return -EFAULT; + } + + DecFlushRegs(&hantrodec_data, &core); + break; + } + case HANTRODEC_IOCS_PP_PUSH_REG: + { + struct core_desc core; + + /* get registers from user space*/ + tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); + if (tmp) + { + PDEBUG("copy_from_user failed, returned %li\n", tmp); + return -EFAULT; + } + + PPFlushRegs(&hantrodec_data, &core); + break; + } + case HANTRODEC_IOCS_DEC_PULL_REG: + { + struct core_desc core; + + /* get registers from user space*/ + tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); + if (tmp) + { + PDEBUG("copy_from_user failed, returned %li\n", tmp); + return -EFAULT; + } + + return DecRefreshRegs(&hantrodec_data, &core); + } + case HANTRODEC_IOCS_PP_PULL_REG: + { + struct core_desc core; + + /* get registers from user space*/ + tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); + if (tmp) + { + PDEBUG("copy_from_user failed, returned %li\n", tmp); + return -EFAULT; + } + + return PPRefreshRegs(&hantrodec_data, &core); + } + case HANTRODEC_IOCH_DEC_RESERVE: + { + PDEBUG("Reserve DEC core, format = %li\n", arg); + return ReserveDecoder(&hantrodec_data, filp, arg); + } + case HANTRODEC_IOCT_DEC_RELEASE: + { + if(arg >= hantrodec_data.cores || dec_owner[arg] != filp) + { + PDEBUG("bogus DEC release, core = %li\n", arg); + return -EFAULT; + } + + PDEBUG("Release DEC, core = %li\n", arg); + + ReleaseDecoder(&hantrodec_data, arg); + + break; + } + case HANTRODEC_IOCQ_PP_RESERVE: + return ReservePostProcessor(&hantrodec_data, filp); + case HANTRODEC_IOCT_PP_RELEASE: + { + if(arg != 0 || pp_owner[arg] != filp) + { + PDEBUG("bogus PP release %li\n", arg); + return -EFAULT; + } + + ReleasePostProcessor(&hantrodec_data, arg); + + break; + } + case HANTRODEC_IOCX_DEC_WAIT: + { + struct core_desc core; + + /* get registers from user space */ + tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); + if (tmp) + { + PDEBUG("copy_from_user failed, returned %li\n", tmp); + return -EFAULT; + } + + return WaitDecReadyAndRefreshRegs(&hantrodec_data, &core); + } + case HANTRODEC_IOCX_PP_WAIT: + { + struct core_desc core; + + /* get registers from user space */ + tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc)); + if (tmp) + { + PDEBUG("copy_from_user failed, returned %li\n", tmp); + return -EFAULT; + } + + return WaitPPReadyAndRefreshRegs(&hantrodec_data, &core); + } + case HANTRODEC_IOCG_CORE_WAIT: + { + int id; + tmp = WaitCoreReady(&hantrodec_data, filp, &id); + __put_user(id, (int *) arg); + return tmp; + } + case HANTRODEC_IOX_ASIC_ID: + { + u32 id; + __get_user(id, (u32*)arg); + + if(id >= hantrodec_data.cores) + { + return -EFAULT; + } + id = ioread32(hantrodec_data.hwregs[id]); + __put_user(id, (u32 *) arg); + break; + } + case HANTRODEC_IOX_GHW_PCI_POS: + { + u32 id; + u8 pci_position[4]; + + __get_user(id, (size_t *)arg); + if (id >= hantrodec_data.cores) + { + return -EFAULT; + } + + pci_position[0] = pci_domain_nr(gDev->bus); + pci_position[1] = gDev->bus->number; + pci_position[2] = PCI_SLOT(gDev->devfn); + pci_position[3] = PCI_FUNC(gDev->devfn); + + err = copy_to_user((u8 *) arg, pci_position, sizeof(pci_position)); + if (err) + { + PDEBUG("copy_to_user failed, returned %li\n", tmp); + return -EFAULT; + } + break; + } + + case HANTRODEC_DEBUG_STATUS: + { + printk(KERN_INFO "hantrodec: dec_irq = 0x%08x \n", dec_irq); + printk(KERN_INFO "hantrodec: pp_irq = 0x%08x \n", pp_irq); + + printk(KERN_INFO "hantrodec: IRQs received/sent2user = %d / %d \n", + atomic_read(&irq_rx), atomic_read(&irq_tx)); + + for (tmp = 0; tmp < hantrodec_data.cores; tmp++) + { + printk(KERN_INFO "hantrodec: dec_core[%li] %s\n", + tmp, dec_owner[tmp] == NULL ? "FREE" : "RESERVED"); + printk(KERN_INFO "hantrodec: pp_core[%li] %s\n", + tmp, pp_owner[tmp] == NULL ? "FREE" : "RESERVED"); + } + } + default: + return -ENOTTY; + } + + return 0; +} + +/*------------------------------------------------------------------------------ + Function name : hantrodec_open + Description : open method + + Return type : int +------------------------------------------------------------------------------*/ + +static int hantrodec_open(struct inode *inode, struct file *filp) +{ + PDEBUG("dev opened\n"); + return 0; +} + +/*------------------------------------------------------------------------------ + Function name : hantrodec_release + Description : Release driver + + Return type : int +------------------------------------------------------------------------------*/ + +static int hantrodec_release(struct inode *inode, struct file *filp) +{ + int n; + hantrodec_t *dev = &hantrodec_data; + + PDEBUG("closing ...\n"); + + for(n = 0; n < dev->cores; n++) + { + if(dec_owner[n] == filp) + { + PDEBUG("releasing dec core %i lock\n", n); + ReleaseDecoder(dev, n); + } + } + + for(n = 0; n < 1; n++) + { + if(pp_owner[n] == filp) + { + PDEBUG("releasing pp core %i lock\n", n); + ReleasePostProcessor(dev, n); + } + } + + PDEBUG("closed\n"); + return 0; +} + +/* VFS methods */ +static struct file_operations hantrodec_fops = +{ + .owner = THIS_MODULE, + .open = hantrodec_open, + .release = hantrodec_release, + .unlocked_ioctl = hantrodec_ioctl, + .fasync = NULL +}; + +/*------------------------------------------------------------------------------ + Function name : hantrodec_init + Description : Initialize the driver + + Return type : int +------------------------------------------------------------------------------*/ + +static int __hantrodec_init(void) +{ + int result, i; + + PDEBUG("module init\n"); + + printk(KERN_INFO "hantrodec: dec/pp kernel module. \n"); + + multicorebase[0] = base_port; + elements = 1; + printk(KERN_INFO "hantrodec: Init single core at 0x%08x IRQ=%i\n", + multicorebase[0], irq); + + hantrodec_data.iosize = DEC_IO_SIZE; + hantrodec_data.irq = irq; + + for(i=0; i< HXDEC_MAX_CORES; i++) + { + hantrodec_data.hwregs[i] = 0; + /* If user gave less core bases that we have by default, + * invalidate default bases + */ + if(elements && i>=elements) + { + multicorebase[i] = -1; + } + } + + hantrodec_data.async_queue_dec = NULL; + hantrodec_data.async_queue_pp = NULL; + + result = register_chrdev(hantrodec_major, "hantrodec", &hantrodec_fops); + if(result < 0) + { + printk(KERN_INFO "hantrodec: unable to get major %d\n", hantrodec_major); + goto err; + } + else if(result != 0) /* this is for dynamic major */ + { + hantrodec_major = result; + } + + result = ReserveIO(); + if(result < 0) + { + goto err; + } + + memset(dec_owner, 0, sizeof(dec_owner)); + memset(pp_owner, 0, sizeof(pp_owner)); + + sema_init(&dec_core_sem, hantrodec_data.cores); + sema_init(&pp_core_sem, 1); + + /* read configuration fo all cores */ + ReadCoreConfig(&hantrodec_data); + + /* reset hardware */ + ResetAsic(&hantrodec_data); + + /* get the IRQ line */ + if(irq > 0) + { + result = request_irq(irq, hantrodec_isr, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) + SA_INTERRUPT | SA_SHIRQ, +#else + IRQF_SHARED, +#endif + "hantrodec", (void *) &hantrodec_data); + if(result != 0) + { + if(result == -EINVAL) + { + printk(KERN_ERR "hantrodec: Bad irq number or handler\n"); + } + else if(result == -EBUSY) + { + printk(KERN_ERR "hantrodec: IRQ <%d> busy, change your config\n", + hantrodec_data.irq); + } + + ReleaseIO(); + goto err; + } + } + else + { + printk(KERN_INFO "hantrodec: IRQ not in use!\n"); + } + + printk(KERN_INFO "hantrodec: module inserted. Major = %d\n", hantrodec_major); + + return 0; + + err: + printk(KERN_INFO "hantrodec: module not inserted\n"); + unregister_chrdev(hantrodec_major, "hantrodec"); + return result; +} +/*------------------------------------------------------------------------------ + Function name : hantrodec_cleanup + Description : clean up + + Return type : int +------------------------------------------------------------------------------*/ + +static void __hantrodec_cleanup(void) +{ + hantrodec_t *dev = &hantrodec_data; + + /* reset hardware */ + ResetAsic(dev); + + /* free the IRQ */ + if(dev->irq != -1) + { + free_irq(dev->irq, (void *) dev); + } + + ReleaseIO(); + + unregister_chrdev(hantrodec_major, "hantrodec"); + + printk(KERN_INFO "hantrodec: module removed\n"); + return; +} + +/*------------------------------------------------------------------------------ + Function name : CheckHwId + Return type : int +------------------------------------------------------------------------------*/ +static int CheckHwId(hantrodec_t * dev) +{ + u32 hwid; + int i; + size_t numHw = sizeof(DecHwId) / sizeof(*DecHwId); + + int found = 0; + + for (i = 0; i < dev->cores; i++) + { + if (dev->hwregs[i] != NULL ) + { + hwid = readl(dev->hwregs[i]); + printk(KERN_INFO "hantrodec: Core %d HW ID=0x%08x\n", i, hwid); + hwid = (hwid >> 16) & 0xFFFF; /* product version only */ + + while (numHw--) + { + if (hwid == DecHwId[numHw]) + { + printk(KERN_INFO "hantrodec: Supported HW found at 0x%08x\n", + multicorebase[i]); + found++; + break; + } + } + if (!found) + { + printk(KERN_INFO "hantrodec: Unknown HW (%x) found at 0x%08x\n", + hwid, multicorebase[i]); + return 0; + } + found = 0; + numHw = sizeof(DecHwId) / sizeof(*DecHwId); + } + } + + return 1; +} + +/*------------------------------------------------------------------------------ + Function name : ReserveIO + Description : IO reserve + + Return type : int +------------------------------------------------------------------------------*/ +static int ReserveIO(void) +{ + int i; + + for (i = 0; i < HXDEC_MAX_CORES; i++) + { + if (multicorebase[i] != -1) + { + if (!request_mem_region(multicorebase[i], hantrodec_data.iosize, + "hantrodec0")) + { + printk(KERN_INFO "hantrodec: failed to reserve HW regs\n"); + return -EBUSY; + } + + hantrodec_data.hwregs[i] = ioremap_nocache(multicorebase[i], + hantrodec_data.iosize); + + if (hantrodec_data.hwregs[i] == NULL ) + { + printk(KERN_INFO "hantrodec: failed to ioremap HW regs\n"); + ReleaseIO(); + return -EBUSY; + } + hantrodec_data.cores++; + } + } + + /* check for correct HW */ + if (!CheckHwId(&hantrodec_data)) + { + ReleaseIO(); + return -EBUSY; + } + + return 0; +} + +/*------------------------------------------------------------------------------ + Function name : releaseIO + Description : release + + Return type : void +------------------------------------------------------------------------------*/ + +static void ReleaseIO(void) +{ + int i; + for (i = 0; i < hantrodec_data.cores; i++) + { + if (hantrodec_data.hwregs[i]) + iounmap((void *) hantrodec_data.hwregs[i]); + release_mem_region(multicorebase[i], hantrodec_data.iosize); + } +} + +/*------------------------------------------------------------------------------ + Function name : hantrodec_isr + Description : interrupt handler + + Return type : irqreturn_t +------------------------------------------------------------------------------*/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +irqreturn_t hantrodec_isr(int irq, void *dev_id, struct pt_regs *regs) +#else +irqreturn_t hantrodec_isr(int irq, void *dev_id) +#endif +{ + unsigned long flags; + unsigned int handled = 0; + int i; + void __iomem *hwregs; + + hantrodec_t *dev = (hantrodec_t *) dev_id; + u32 irq_status_dec; + u32 irq_status_pp; + + spin_lock_irqsave(&owner_lock, flags); + + for(i=0; icores; i++) + { + void __iomem *hwregs = dev->hwregs[i]; + + /* interrupt status register read */ + irq_status_dec = ioread32(hwregs + HANTRODEC_IRQ_STAT_DEC_OFF); + + if(irq_status_dec & HANTRODEC_DEC_IRQ) + { + /* clear dec IRQ */ + irq_status_dec &= (~HANTRODEC_DEC_IRQ); + iowrite32(irq_status_dec, hwregs + HANTRODEC_IRQ_STAT_DEC_OFF); + + PDEBUG("decoder IRQ received! core %d\n", i); + + atomic_inc(&irq_rx); + + dec_irq |= (1 << i); + + wake_up_interruptible_all(&dec_wait_queue); + handled++; + } + } + + /* check PP also */ + hwregs = dev->hwregs[0]; + irq_status_pp = ioread32(hwregs + HANTRODEC_IRQ_STAT_PP_OFF); + if(irq_status_pp & HANTRODEC_PP_IRQ) + { + /* clear pp IRQ */ + irq_status_pp &= (~HANTRODEC_PP_IRQ); + iowrite32(irq_status_pp, hwregs + HANTRODEC_IRQ_STAT_PP_OFF); + + PDEBUG("post-processor IRQ received!\n"); + + atomic_inc(&irq_rx); + + pp_irq |= 1; + + wake_up_interruptible_all(&pp_wait_queue); + handled++; + } + + spin_unlock_irqrestore(&owner_lock, flags); + + if(!handled) + { + PDEBUG("IRQ received, but not hantrodec's!\n"); + } + + return IRQ_RETVAL(handled); +} + +/*------------------------------------------------------------------------------ + Function name : ResetAsic + Description : reset asic + + Return type : +------------------------------------------------------------------------------*/ +void ResetAsic(hantrodec_t * dev) +{ + int i, j; + u32 status; + + for (j = 0; j < dev->cores; j++) + { + status = ioread32(dev->hwregs[j] + HANTRODEC_IRQ_STAT_DEC_OFF); + + if( status & HANTRODEC_DEC_E) + { + /* abort with IRQ disabled */ + status = HANTRODEC_DEC_ABORT | HANTRODEC_DEC_IRQ_DISABLE; + iowrite32(status, dev->hwregs[j] + HANTRODEC_IRQ_STAT_DEC_OFF); + } + + /* reset PP */ + iowrite32(0, dev->hwregs[j] + HANTRODEC_IRQ_STAT_PP_OFF); + + for (i = 4; i < dev->iosize; i += 4) + { + iowrite32(0, dev->hwregs[j] + i); + } + } +} + +/*------------------------------------------------------------------------------ + Function name : dump_regs + Description : Dump registers + + Return type : +------------------------------------------------------------------------------*/ +#ifdef HANTRODEC_DEBUG +void dump_regs(hantrodec_t *dev) +{ + int i,c; + + PDEBUG("Reg Dump Start\n"); + for(c = 0; c < dev->cores; c++) + { + for(i = 0; i < dev->iosize; i += 4*4) + { + PDEBUG("\toffset %04X: %08X %08X %08X %08X\n", i, + ioread32(dev->hwregs[c] + i), + ioread32(dev->hwregs[c] + i + 4), + ioread32(dev->hwregs[c] + i + 16), + ioread32(dev->hwregs[c] + i + 24)); + } + } + PDEBUG("Reg Dump End\n"); +} +#endif + +static int __init hantrodec_probe(struct pci_dev *pdev, + const struct pci_device_id *pciid) +{ + /* Enable the device*/ + int rc = pci_enable_device(pdev); + if (rc) { + printk(KERN_ERR "hantrodec: pci_enable_device() failed.\n"); + return rc; + } + pci_set_master(pdev); + gDev = pdev; + + if (pdev->device == PCI_DEVICE_ID_MCST_VP9_G2_R2000P) { + base_port = pci_resource_start(pdev, 2); + rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (rc < 0) { + printk(KERN_ERR "hantrodec: unable to allocate MSIX irq vector.\n"); + return rc; + } + irq = pci_irq_vector(pdev, 0); + } else { + base_port = pci_resource_start(pdev, 0); + irq = pdev->irq; + } + return __hantrodec_init(); +} + +static void __exit hantrodec_remove(struct pci_dev *pdev) +{ + __hantrodec_cleanup(); +} + +static struct pci_device_id hantrodec_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_VP9_G2)}, + {PCI_DEVICE(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_VP9_G2_R2000P)}, + {}, /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, hantrodec_pci_tbl); + +static +#ifdef CONFIG_MCST +__refdata +#endif + struct pci_driver hantrodec_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = hantrodec_pci_tbl, + .probe = hantrodec_probe, + .remove = hantrodec_remove, +}; + +static int __init hantrodec_init(void) +{ + return pci_register_driver(&hantrodec_pci_driver); +} + +static void __exit hantrodec_cleanup(void) +{ + pci_unregister_driver(&hantrodec_pci_driver); +} + +module_init( hantrodec_init); +module_exit( hantrodec_cleanup); + +/* module description */ +#ifndef CONFIG_MCST +MODULE_LICENSE("Proprietary"); +#else +MODULE_LICENSE("GPL"); +#endif +MODULE_AUTHOR("Google Finland Oy"); +MODULE_DESCRIPTION("Driver module for Hantro Decoder/Post-Processor"); + diff --git a/drivers/mcst/hantrodec/hantrodec.h b/drivers/mcst/hantrodec/hantrodec.h new file mode 100644 index 000000000000..9663f803e972 --- /dev/null +++ b/drivers/mcst/hantrodec/hantrodec.h @@ -0,0 +1,73 @@ +/* Copyright 2013 Google Inc. All Rights Reserved. */ + +#ifndef _HANTRODEC_H_ +#define _HANTRODEC_H_ +#include +#include + +#undef PDEBUG +#ifdef HANTRODEC_DEBUG +# ifdef __KERNEL__ +# define PDEBUG(fmt, args...) printk( KERN_INFO "hantrodec: " fmt, ## args) +# else +# define PDEBUG(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#else +# define PDEBUG(fmt, args...) +#endif + +struct core_desc +{ + __u32 id; /* id of the core */ + __u32 *regs; /* pointer to user registers */ + __u32 size; /* size of register space */ +}; + +/* Use 'k' as magic number */ +#define HANTRODEC_IOC_MAGIC 'k' + +/* + * S means "Set" through a ptr, + * T means "Tell" directly with the argument value + * G means "Get": reply by setting through a pointer + * Q means "Query": response is on the return value + * X means "eXchange": G and S atomically + * H means "sHift": T and Q atomically + */ + +#define HANTRODEC_PP_INSTANCE _IO(HANTRODEC_IOC_MAGIC, 1) +#define HANTRODEC_HW_PERFORMANCE _IO(HANTRODEC_IOC_MAGIC, 2) +#define HANTRODEC_IOCGHWOFFSET _IOR(HANTRODEC_IOC_MAGIC, 3, unsigned long *) +#define HANTRODEC_IOCGHWIOSIZE _IOR(HANTRODEC_IOC_MAGIC, 4, unsigned int *) + +#define HANTRODEC_IOC_CLI _IO(HANTRODEC_IOC_MAGIC, 5) +#define HANTRODEC_IOC_STI _IO(HANTRODEC_IOC_MAGIC, 6) +#define HANTRODEC_IOC_MC_OFFSETS _IOR(HANTRODEC_IOC_MAGIC, 7, unsigned long *) +#define HANTRODEC_IOC_MC_CORES _IOR(HANTRODEC_IOC_MAGIC, 8, unsigned int *) + + +#define HANTRODEC_IOCS_DEC_PUSH_REG _IOW(HANTRODEC_IOC_MAGIC, 9, struct core_desc *) +#define HANTRODEC_IOCS_PP_PUSH_REG _IOW(HANTRODEC_IOC_MAGIC, 10, struct core_desc *) + +#define HANTRODEC_IOCH_DEC_RESERVE _IO(HANTRODEC_IOC_MAGIC, 11) +#define HANTRODEC_IOCT_DEC_RELEASE _IO(HANTRODEC_IOC_MAGIC, 12) +#define HANTRODEC_IOCQ_PP_RESERVE _IO(HANTRODEC_IOC_MAGIC, 13) +#define HANTRODEC_IOCT_PP_RELEASE _IO(HANTRODEC_IOC_MAGIC, 14) + +#define HANTRODEC_IOCX_DEC_WAIT _IOWR(HANTRODEC_IOC_MAGIC, 15, struct core_desc *) +#define HANTRODEC_IOCX_PP_WAIT _IOWR(HANTRODEC_IOC_MAGIC, 16, struct core_desc *) + +#define HANTRODEC_IOCS_DEC_PULL_REG _IOWR(HANTRODEC_IOC_MAGIC, 17, struct core_desc *) +#define HANTRODEC_IOCS_PP_PULL_REG _IOWR(HANTRODEC_IOC_MAGIC, 18, struct core_desc *) + +#define HANTRODEC_IOCG_CORE_WAIT _IOR(HANTRODEC_IOC_MAGIC, 19, int *) + +#define HANTRODEC_IOX_ASIC_ID _IOWR(HANTRODEC_IOC_MAGIC, 20, __u32 *) + +#define HANTRODEC_IOX_GHW_PCI_POS _IOWR(HANTRODEC_IOC_MAGIC, 25, __u32 *) + +#define HANTRODEC_DEBUG_STATUS _IO(HANTRODEC_IOC_MAGIC, 29) + +#define HANTRODEC_IOC_MAXNR 29 + +#endif /* !_HANTRODEC_H_ */ diff --git a/drivers/mcst/i2c_spd/Makefile b/drivers/mcst/i2c_spd/Makefile new file mode 100644 index 000000000000..6f4dc74fff82 --- /dev/null +++ b/drivers/mcst/i2c_spd/Makefile @@ -0,0 +1,2 @@ + +obj-$(CONFIG_I2C_SPD) += i2c_spd.o diff --git a/drivers/mcst/i2c_spd/i2c_spd.c b/drivers/mcst/i2c_spd/i2c_spd.c new file mode 100644 index 000000000000..c9bf5b8698a0 --- /dev/null +++ b/drivers/mcst/i2c_spd/i2c_spd.c @@ -0,0 +1,358 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#define DEV_NAME "i2c_spd" + +#define I2C_CTRL 0x14 +#define I2C_STATUS 0x18 +#define I2C_MODE 0x1c + +struct i2c_dev { + int bus; + int _addr; +}; + +struct i2c_spd_info_t { + + struct pci_dev *pcidev; + void __iomem *dev; + struct i2c_dev adrdev[256]; + void __iomem *base_io; + struct proc_dir_entry *proc_dev; + int count; + spinlock_t spd_lock; +}; + +static int count = 0; + +static int i2c_spd_probe(struct pci_dev *pcidev, + const struct pci_device_id *pciid); +static void i2c_spd_remove(struct pci_dev *pci_dev); +inline int i2c_read_reg(struct pci_dev *dev, char *base, unsigned char reg, + int bus, int adr); +inline int i2c_write_reg(struct pci_dev *dev, char *base, unsigned char reg, + int bus, int adr); + +#define SPD_SZ 128 +static int proc_read_i2c_spd(char *page, char **start, + off_t off, int count, int *eof, void *data) +{ + int len = 0; + struct i2c_spd_info_t *device = (struct i2c_spd_info_t *)data; + struct pci_dev *dev = device->pcidev; + int i, j; + unsigned char dt; + int bus, adr; + + spin_lock(&device->spd_lock); + for (i = 0; i < device->count; i++) { + bus = device->adrdev[i].bus; + adr = device->adrdev[i]._addr; + len += sprintf(&page[len], "Module %d", i); + + for (j = 0; j < SPD_SZ; j++) { + if(!(j % 16)) + len += sprintf(&page[len], "\n\t"); + if (i2c_read_reg(dev, &dt, j, bus, adr) <= 0) { // read status reg + len = sprintf(page, "Error read spd unit\n"); + goto l_1; + } + len += sprintf(&page[len], "%02x ", dt); + } + len += sprintf(&page[len], "\n\n"); + } + l_1: + spin_unlock(&device->spd_lock); + return len; +} + +int i2c_base_write(struct pci_dev *dev, char *base, int sz, int bus, int adr, + int reg) +{ + struct i2c_spd_info_t *device = pci_get_drvdata(dev); + // sz < 63 + volatile u32 i2c_ctrl = + 0x1 | (sz << 1) | (adr << 15) | (bus << 24) | (0 << 26) | (1 << 28) + | (reg << 7) | (0 << 22) | (1 << 23); + // write | size | 7bit addr | 0 -phase data | bus number | start byte on | start + volatile u32 i2c_mode = 0; + int k = 0; + int i; + int ret; + + for (i = 1000000; i; i--) { + if (readl(device->dev + I2C_STATUS) & 0x1) { + udelay(1); + continue; + } + break; + } + + if (!i) + return 0; + + for (k = 0; k < sz; k++) { + writeb(*(base + k), device->base_io + k); + } + + writel(i2c_mode, device->dev + I2C_MODE); + writel(0x1e, device->dev + I2C_STATUS); + writel(i2c_ctrl, device->dev + I2C_CTRL); + + // data transfer + + for (i = 1000000; i; i--) { + ret = readl(device->dev + I2C_STATUS); + if (ret & 0x1) { + udelay(1); + continue; + } + if (ret & 0x2 && (!(ret & 0x1c))) { + return sz; + } + if (ret & 0x1c) { + break; + } + } + + if (!i) { + writel((1 << 27), device->dev + I2C_CTRL); //kill task + writel(0x1e, device->dev + I2C_STATUS); //clean error bits + } + + return -1; +} + +int i2c_base_read(struct pci_dev *dev, char *base, int sz, int bus, int adr, + int reg) +{ + + struct i2c_spd_info_t *device = pci_get_drvdata(dev); + // sz < 63 //0x1 | (sz << 1) | (adr << 15) | (bus << 24) | (0 << 26) | (1 << 28) | (reg << 7) | (0 << 22) | (1 << 23); + volatile u32 i2c_ctrl = + 0x0 | (sz << 1) | (adr << 15) | (bus << 24) | (0 << 26) | (1 << 28) + | (reg << 7) | (1 << 23) | (0 << 22); + volatile u32 i2c_mode = 0; + + int i; + int ret; + + // start transfer data + + for (i = 1000000; i; i--) { + if (readl(device->dev + I2C_STATUS) & 0x1) { + udelay(1); + continue; + } + + break; + } + if (!i) + return -1; + + writel(i2c_mode, device->dev + I2C_MODE); + writel(0x1e, device->dev + I2C_STATUS); + + writel(i2c_ctrl, device->dev + I2C_CTRL); + + // check transfer + + for (i = 1000000; i; i--) { + ret = readl(device->dev + I2C_STATUS); + if (ret & 0x1) { + udelay(1); + continue; + } + + if (ret & 0x2 && (!(ret & 0x1c))) { + int j = 0; + + for (j = 0; j < sz; j++) { + char b = readb(device->base_io + j); + *(base + j) = b; + } + return sz; + } + + if (ret & 0x1c) + break; + } + + if (!i) { + writel((i2c_ctrl = (1 << 27)), device->dev + I2C_CTRL); + writel(0x1e, device->dev + I2C_STATUS); //clean error bits + } + + return -1; + +} + +inline int i2c_read_reg(struct pci_dev *dev, char *base, unsigned char reg, + int bus, int adr) +{ + int ret = i2c_base_write(dev, ®, 1, bus, adr, 00); + + if (ret == -1) + return ret; + + return i2c_base_read(dev, base, 1, bus, adr, 00); +} + +inline int i2c_write_reg(struct pci_dev *dev, char *base, unsigned char reg, + int bus, int adr) +{ + char ctm[2]; + ctm[0] = reg; + ctm[1] = *base; + return i2c_base_write(dev, &ctm[0], 2, bus, adr, 00); +} + +static int i2c_find_spd_dev(struct i2c_spd_info_t *device) +{ + char adr = 0x50; + struct pci_dev *pcidev = device->pcidev; + int j, f, k = 0; + char ctm[64]; + + for (j = 0; j < 1; j++) { + for (f = 0; f < 8; f++) { + if (i2c_read_reg(pcidev, &ctm[0], 0, j, adr + f) > 0) { + printk("spd chip found at %d:%x\n",j, adr + f); + device->adrdev[k].bus = j; + device->adrdev[k]._addr = adr + f; + k++; + } + } + } + return k; + +} + +static int i2c_spd_probe(struct pci_dev *pcidev, + const struct pci_device_id *pciid) +{ + struct i2c_spd_info_t *device; + static char fflag = 0; + + device = + (struct i2c_spd_info_t *)kmalloc(sizeof(struct i2c_spd_info_t), + GFP_KERNEL); + if (device == NULL) { + printk(KERN_ALERT "I2C - Error while trying alloc memory.\n"); + return -ENOMEM; + } + memset(device, 0, sizeof(struct i2c_spd_info_t)); + + memset(device->adrdev, -1, sizeof(struct i2c_dev) * 256); + + device->pcidev = pcidev; + spin_lock_init(&device->spd_lock); + + + device->dev = pci_iomap(device->pcidev, 0, 0); + device->base_io = pci_iomap(device->pcidev, 1, 0); + + pci_set_drvdata(pcidev, device); + + //check i2c + device->count = i2c_find_spd_dev(device); + + if (device->count == 0) + goto release_device; + + printk("%d spd chips was found\n", device->count); + + device->proc_dev = create_proc_entry("i2c_spd", 0444, NULL); + if (device->proc_dev == NULL) { + goto release_device; + } + strcpy((char *)device->proc_dev->name, "i2c_spd"); + device->proc_dev->data = device; + device->proc_dev->read_proc = proc_read_i2c_spd; + device->proc_dev->write_proc = NULL; + fflag++; + count = fflag; + + return 0; + + release_device: + pci_set_drvdata(pcidev, NULL); + kfree(device); + return -1; + +} + +static int __init i2c_spd_init_module(void) +{ + int ret = 0; + struct pci_dev *dev = NULL; + + do { + dev = pci_get_device(0x8086, 0x0002, dev); + if (dev) { + if (i2c_spd_probe(dev, NULL) == 0) + ret++; + } + } while (dev != NULL); + + if (ret == 0) { + printk ("i2c_spd: Unable to locate any i2c_spd" + " device with valid IDs\n"); + return -ENODEV; + } + + return 0; +} + +static void i2c_spd_remove(struct pci_dev *pci_dev1) +{ + struct i2c_spd_info_t *device; // = pci_get_drvdata(pci_dev); + struct pci_dev *pcidev = 0; // = device->pcidev; + + do { + pcidev = pci_get_device(0x8086, 0x0002, pcidev); + if (pcidev) { + device = pci_get_drvdata(pcidev); + pci_set_drvdata(pcidev, NULL); + remove_proc_entry("i2c_spd", device->proc_dev); + pci_iounmap(pcidev, device->dev); + pci_iounmap(pcidev, device->base_io); + kfree(device); + } + } while (pcidev != NULL); +} + +static void i2c_spd_exit_module(void) +{ + i2c_spd_remove(NULL); +} + +module_init(i2c_spd_init_module); +module_exit(i2c_spd_exit_module); + +MODULE_DESCRIPTION("I2C Driver for dumping spd"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mcst/lptouts/Makefile b/drivers/mcst/lptouts/Makefile new file mode 100644 index 000000000000..0118b73c08c1 --- /dev/null +++ b/drivers/mcst/lptouts/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_LPTOUTS) += lptouts.o diff --git a/drivers/mcst/lptouts/lptouts.c b/drivers/mcst/lptouts/lptouts.c new file mode 100644 index 000000000000..19f5849e3d20 --- /dev/null +++ b/drivers/mcst/lptouts/lptouts.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2014 by INEUM + * Output discrete signals via LPT-port + * parport and parport_povozka modules running required before starting this module + * Developed for INEUM-BCVM module + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#include +//#include +#include +//#include + +#define LPTOUTS_INIT_VALUE 0xFF + +#define PCI_DEVICE_ID_PAR_SER 0x8000 +#define E3M_MULTIFUNC_VENDOR 0x8086 + +#define PCI_DEVICE_ID_PARPORTMCST 0x0121 //0x8007 +#define PCI_VENDOR_ID_MCST 0x14F2 //0x1fff + +struct pci_parport_data { + int num; + int driver_data; + struct parport *ports[2]; +}; + +struct pci_dev *pcidev = NULL; +void *pIO = NULL; +unsigned long lpt_io_lo, lpt_io_hi; + +#define MAX_LPTOUT_DEVICES 9 +static struct device *lpt_devices[MAX_LPTOUT_DEVICES]; + +/////////////////////// LINUX KERNEL MODULE ///////////////////////////////////// + +#define VERSION "1.1" +#define LAST_UPDATE "22.06.2021" + +#define SUCCESS 0 +#define DEVICE_NAME "lptouts" +#define BUF_LEN 80 + +static int Major; +static int Device_Open = 0; + +static dev_t first_dev; // Global variable for the first device number +static struct class *dev_class; + +static int lpt_probe(struct pci_dev *dev, const struct pci_device_id *ids) +{ + int err; + int lo = 0; + int irq; + + err = pci_enable_device(dev); + if (err) + return err; + + //accessing lpt port device + pIO = + ioremap_nocache(pci_resource_start(dev, lo), + pci_resource_len(dev, lo)); + if (pIO == NULL) { + printk(KERN_INFO "%s: ERROR! Can`t ioremap. \n", DEVICE_NAME); + pci_disable_device(pcidev); + pcidev = NULL; + return -1; + } + lpt_io_lo = pci_resource_start(dev, lo); + lpt_io_hi = 0; + + irq = dev->irq; + + //reset device + outb(0x01, lpt_io_lo + 0x0A); + udelay(1000); + outb(0x20, lpt_io_lo + 0x0A); + + udelay(1000); + outb(0xa3, lpt_io_lo + 0x17); + + udelay(1000); + + //out startup data + outb(LPTOUTS_INIT_VALUE, lpt_io_lo); + + //data can be read + //printk("0x%X\n", inb(lpt_io_lo)); + + return 0; +} + +static int init_lpt(void) +{ + struct pci_device_id id; + + id.vendor = E3M_MULTIFUNC_VENDOR; + id.device = PCI_DEVICE_ID_PAR_SER; + id.subvendor = 0x8086; + id.subdevice = 0x8001; + id.class = 0; + id.class_mask = 0; + id.driver_data = 0; //mcst_pp_iee1284; + + pcidev = + pci_get_device(E3M_MULTIFUNC_VENDOR, PCI_DEVICE_ID_PAR_SER, NULL); + + if (!pcidev) { + printk + ("lptouts: %s: Unable to locate any shared_mem device with valid IDs 0x%x-0x%x\n", + __func__, PCI_VENDOR_ID_MCST, + PCI_DEVICE_ID_PARPORTMCST); + return -1; + } + + return lpt_probe(pcidev, &id); +} + +static int device_open(struct inode *inode, struct file *file) +{ + int fd = 0; + + if (MINOR(inode->i_rdev) > 0) { + fd = MINOR(inode->i_rdev); + } else { + Device_Open++; + } + try_module_get(THIS_MODULE); + + return SUCCESS; +} + +static int device_release(struct inode *inode, struct file *file) +{ + int fd = 0; + if (MINOR(inode->i_rdev) > 0) { + fd = MINOR(inode->i_rdev); + } else { + Device_Open--; + } + + module_put(THIS_MODULE); + + return 0; +} + +static long device_ioctl(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + int fd = 0, ret = 0; + + fd = MINOR(file_inode(file)->i_rdev); + + if (fd > 0) { + fd--; + switch (ioctl_num) { + default: + break; + } + } else { + switch (ioctl_num) { + default: + break; + } + } + + return ret; +} + +static ssize_t device_read(struct file *filp, char *buffer, size_t length, + loff_t * offset) +{ + struct inode *inode; + int bytes_read = 0; + int fd; + + inode = file_inode(filp); + + fd = MINOR(inode->i_rdev); + + if (fd > 0) { + fd--; + } else { + } + + return bytes_read; +} + +static ssize_t device_write(struct file *filp, const char *buff, size_t len, + loff_t * off) +{ + struct inode *inode; + int i = 0, fd = -1, len_to_read = len; + char szbuf[255]; + + inode = file_inode(filp); + + fd = MINOR(inode->i_rdev); + + if (len > 255) + len_to_read = 255; + + if (fd > 0) { + fd--; + } else { + for (i = 0; i < len_to_read; i++) { + get_user(szbuf[i], buff + i); + } + } + + return i; +} + +static ssize_t sys_write_value(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + int fd, ret = 0; + unsigned long state = 0; + unsigned char reg = 0; + + //reading out number + fd = MINOR(dev->devt); + + if (fd > 0 && fd < MAX_LPTOUT_DEVICES) { + ret = kstrtoul(buf, 0, &state); + if (ret) + return ret; + + if (pcidev != NULL && pIO != NULL) { + //out state value to fd output + //reading current value + reg = inb(lpt_io_lo); + if (state == 1) + reg |= ((unsigned char)0x01) << (fd - 1); + else if (state == 0) + reg &= ~(((unsigned char)0x01) << (fd - 1)); + + //out value to lpt port + outb(reg, lpt_io_lo); + //printk("out 0x%X to 0x%X\n", reg, lpt_io_lo); + } + } else if (fd == 0) //working with root device + { + ret = kstrtoul(buf, 0, &state); + if (ret) + return ret; + if (pcidev != NULL && pIO != NULL) { + outb((unsigned char)state, lpt_io_lo); + } + } + + return count; +} + +static ssize_t sys_read_value(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int fd; + + //reading out number + fd = MINOR(dev->devt); + + if (fd > 0 && fd < MAX_LPTOUT_DEVICES) { + if (pcidev != NULL && pIO != NULL) { + return sprintf(buf, "%d\n", + inb(lpt_io_lo) & (0x00000001 << + (fd - 1))); + } + } else if (fd == 0) //working with root device + { + if (pcidev != NULL && pIO != NULL) { + return sprintf(buf, "0x%02X\n", inb(lpt_io_lo)); + } + } + + return sprintf(buf, "unknown\n"); +} + +static struct file_operations fops = { + .read = device_read, + .write = device_write, + .open = device_open, + .release = device_release, + .unlocked_ioctl = device_ioctl +}; + +static DEVICE_ATTR(value, 0644 /*S_IWUSR | S_IRUSR */ , sys_read_value, + sys_write_value); + +static int __init lptouts_init(void) +{ + int i = 0, retval = 0; + + printk(KERN_INFO "%s: driver started (version: %s, date: %s)\n", + DEVICE_NAME, VERSION, LAST_UPDATE); + + //registering device... + Major = register_chrdev(0, DEVICE_NAME, &fops); + if (Major < 0) { + printk("Registering the character device failed with %dn\n", + Major); + return -EINVAL; + } + + dev_class = class_create(THIS_MODULE, DEVICE_NAME); + if (dev_class == NULL) { + printk("udev is unavailable\n"); + } else { + //creating devices + first_dev = MKDEV(Major, i); + lpt_devices[0] = + device_create(dev_class, NULL, first_dev, NULL, + DEVICE_NAME); + retval = device_create_file(lpt_devices[0], &dev_attr_value); + if (retval < 0) { + printk(KERN_INFO + "%s: failed to create write /sys endpoint - continuing without (0)\n", + DEVICE_NAME); + } + + //creating out devices + for (i = 1; i < MAX_LPTOUT_DEVICES; i++) { + lpt_devices[i] = + device_create(dev_class, NULL, MKDEV(Major, i), + NULL, "lptout%d", i); + retval = + device_create_file(lpt_devices[i], &dev_attr_value); + if (retval < 0) { + printk(KERN_INFO + "%s: failed to create write /sys endpoint - continuing without (%d)\n", + DEVICE_NAME, i); + } + } + } + + printk(KERN_INFO "%s: driver was assigned major number %d.\n", + DEVICE_NAME, Major); + + if (init_lpt() < 0) { + return -EINVAL; + } + + return 0; +} + +static void __exit lptouts_exit(void) +{ + int i = 0; + + for (i = 0; i < MAX_LPTOUT_DEVICES; i++) { + device_remove_file(lpt_devices[i], &dev_attr_value); + device_destroy(dev_class, MKDEV(Major, i)); + } + device_destroy(dev_class, first_dev); + + class_destroy(dev_class); + + unregister_chrdev(Major, DEVICE_NAME); + + //disabling pci device + if (pcidev != NULL) { + //freeing resources + if (pIO != NULL) + iounmap(pIO); + //disabling device + pci_disable_device(pcidev); + + pcidev = NULL; + } + + printk(KERN_INFO "%s: module exit\n", DEVICE_NAME); +} + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Anton V. Glukhov"); +module_init(lptouts_init) + module_exit(lptouts_exit) diff --git a/drivers/mcst/lptouts/readme.txt b/drivers/mcst/lptouts/readme.txt new file mode 100644 index 000000000000..3ff8ac373e02 --- /dev/null +++ b/drivers/mcst/lptouts/readme.txt @@ -0,0 +1,18 @@ +Модуль lptouts +Предназначен для непосредственного индивидуального воздействия на сигналы данных параллельного порта повозки. +Разработан для модуля БЦВМ в рамках темы БЦВМ-ИНЭУМ. +Для корректной работы модуля необходимо наличие модулей: parport и parport_povozka. +Модуль не должен быть подключен одновременно с модулями: lp. +Значения вывода после запуска модуля: 0xFF. + +Описание работы + +Модуль БЦВМ-ИНЭУМ имеет на борту 8 сигналов дискретного вывода, подключенных к линиям данных LPT-порта. +Каналы GPIO не могли быть задействованы для этих целей, т.к. были заняты для обеспечения функций дискретного ввода. +Для пользователя модуль организует интерфейс доступа через sysfs. В /sys/class появляется класс lptouts в котором организуется 8 узлов lptouts1..8 для непосредственного доступа к каждой линии вывода и отдельно узел lptouts для группового управления. +Таким образом, для установки линии данных 2 в значение 1 необходимо выполнить команду: +echo 1 > /sys/class/lptouts/lptouts2/value +или +echo 2 > /sys/class/lptouts/lptouts/value + +Все узлы поддерживают функцию чтения, что позволяет прочитать ранее установленное значение. При этом чтение производится непосредственно из регистров LPT порта. diff --git a/drivers/mcst/m2mlc/Makefile b/drivers/mcst/m2mlc/Makefile new file mode 100644 index 000000000000..d6afeecb7463 --- /dev/null +++ b/drivers/mcst/m2mlc/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Node Interconnect Controller +# + +m2mlc-objs := m2mlc_hw.o m2mlc_dev.o m2mlc_ksvv.o m2mlc_net.o m2mlc_pci.o m2mlc_main.o + +obj-$(CONFIG_M2MLC) := m2mlc.o diff --git a/drivers/mcst/m2mlc/m2mlc.h b/drivers/mcst/m2mlc/m2mlc.h new file mode 100644 index 000000000000..a2df6f0cd7a3 --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc.h @@ -0,0 +1,238 @@ +#ifndef M2MLC_H__ +#define M2MLC_H__ + + +/* Global parameters */ +#define ENABLE_NET_DEV /* define: enable Network device */ +#undef USE_ALLOCPOOL /* define: use dma_pool_create */ +#define USE_MUL2ALIGN /* define: allocate mem * 2 (RTL BUG) */ +#undef TESTWOIRQ /* define: disable interrupt */ +#undef USE_DUALIOLINK /* define: enable second iolink */ +#undef TESTWOHARDWARE /* define: disable access real hardware */ + +#ifdef TESTWOHARDWARE + #define TESTWOIRQ +#endif /* TESTWOHARDWARE */ + +#ifdef __sparc__ /* ARCH: e90, e90s */ +#undef USE_DUALIOLINK +#define USE_RDMA2_MODE /* define: RDMA2 */ +#else /* ARCH: e2k, x86 */ +#undef USE_RDMA2_MODE /* undef: KSVV */ +#endif /* __sparc__ */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "m2mlc_dbg.h" +#include "m2mlc_regs.h" +#ifdef DEBUG +#include "m2mlc_io.h" +#else +#include +#endif +#ifdef ENABLE_NET_DEV +#include "m2mlc_ksvv.h" +#endif /* ENABLE_NET_DEV */ + + +/** + ****************************************************************************** + * Driver + ****************************************************************************** + **/ + +#define DRIVER_NAME "m2mlc" +#define DRIVER_VERSION "1.0.2" + + +/** + ****************************************************************************** + * Interface & Settings + ****************************************************************************** + **/ + +/* PCI */ +#define DEVICE_ID (0x8021) +#define VENDOR_ID (0x1FFF) + +/* Endpoint number: + * 0 - root: maintenance + * 1..16 - for user + * 17 - root: network + * 18..19 - root + */ +#define CDEV_ENDPOINT_UMIN 1 +#define CDEV_ENDPOINT_UMAX 16 +#define CDEV_ENDPOINT_NET 17 +#define CDEV_ENDPOINT_NONE (-1) + + +/** + ****************************************************************************** + * Module parameters + ****************************************************************************** + **/ + +extern u16 rtl_version; +extern u32 debug_mask; +extern u32 softreset_enable; +extern u32 timeout_retry; +extern u32 timeout_counter; +extern unsigned int dma_max_seg_size; +extern unsigned long dma_seg_boundary; + + +/** + ****************************************************************************** + * Buffers in RAM + ****************************************************************************** + **/ + +#define PIO_DONE_QUE_RAM PAGE_SIZE /* xN */ +#define PIO_DATA_QUE_RAM (16 * 256) /* xN */ +#define MDD_RET_RAM PAGE_SIZE /* for N */ +#define MB_STRUCT_RAM (1024 * 4096) /* for N */ +#define MB_DONE_QUE_RAM (1024 * 8) /* for N */ +#define DB_START_RAM PAGE_SIZE /* for N (256 * 8) */ +#define DMA_START_RAM (4096 * 32) /* for N */ +#define DMA_DONE_QUE_RAM (4096 * 4) /* for N */ + + +/** + ****************************************************************************** + * Private structs + ****************************************************************************** + **/ + +extern struct pci_driver m2mlc_pci_driver; + + +#ifdef ENABLE_NET_DEV + +struct m2mlc_priv; + +typedef struct m2mlc_npriv { + struct m2mlc_priv *p_priv; /* parent */ + + struct net_device_stats stats; + struct napi_struct napi; + + /* + struct sk_buff *tx_dma_skb; + struct sk_buff *rx_skb; + struct sk_buff *rx_dma_skb; + */ + + dma_addr_t tx_dma_map_adr; + dma_addr_t rx_dma_map_adr; + + /* Lock long rx packet (>MTU): 0 - unlock, 1 - lock */ + int rx_lock_long_rx_pack; + + /* Network endpoint */ + struct ksvv_endpoint ksvvendpoint; +} m2mlc_npriv_t; + +#endif /* ENABLE_NET_DEV */ + + +typedef struct m2mlc_priv { + /* PCI */ + struct pci_dev *pdev; /* PCI device information struct */ +#ifdef USE_DUALIOLINK + struct platform_device *fakedev; /* second CPU link */ +#endif /* USE_DUALIOLINK */ + void __iomem *ecs_base; /* ioremap'ed address to BAR0 */ + void __iomem *reg_base; /* ioremap'ed address to BAR1 */ + void __iomem *buf_base; /* ioremap'ed address to BAR2 */ +#ifndef USE_RDMA2_MODE + void __iomem *iom_base; /* ioremap'ed address to BAR3 */ +#endif /* USE_RDMA2_MODE */ + phys_addr_t reg_base_bus; /* BAR1 phys address for mmap */ + phys_addr_t buf_base_bus; /* BAR2 phys address for mmap */ + u8 niccpb_procval; /* PCI conf space - NIC Capability */ + + /* CDEV */ + #ifdef CONFIG_MCST_RT_NO + raw_spinlock_t cdev_open_lock; + #else + spinlock_t cdev_open_lock; + #endif + int device_open; + struct cdev cdev; + struct device *dev; + unsigned int minor; + /* Endpoint */ + int pid[NICCPB_PROCVAL]; + int signal[NICCPB_PROCVAL]; /* signal to user, or 0 */ + struct task_struct *tsk[NICCPB_PROCVAL]; + /* <<< cdev_open_lock */ + + #ifdef ENABLE_NET_DEV + struct net_device *ndev; + #endif + + /* = buffs in main memory = */ + /* PIO Done Queue */ + size_t pio_done_que_size; /* Size */ + void *pio_done_que_buff; /* CPU-viewed address */ + dma_addr_t pio_done_que_handle; /* device-viewed address */ + /* PIO Data Queue */ + size_t pio_data_que_size; + void *pio_data_que_buff; + dma_addr_t pio_data_que_handle; + /* Mailbox/Doorbell/DMA Return */ + size_t mdd_ret_size[NICCPB_PROCVAL]; + void *mdd_ret_buff[NICCPB_PROCVAL]; + dma_addr_t mdd_ret_handle[NICCPB_PROCVAL]; + /* Mailbox Structure */ + size_t mb_struct_size[NICCPB_PROCVAL]; + void *mb_struct_buff[NICCPB_PROCVAL]; + dma_addr_t mb_struct_handle[NICCPB_PROCVAL]; + /* Mailbox Done Queue */ + size_t mb_done_que_size[NICCPB_PROCVAL]; + void *mb_done_que_buff[NICCPB_PROCVAL]; + dma_addr_t mb_done_que_handle[NICCPB_PROCVAL]; + /* Doorbell Start */ + size_t db_start_size[NICCPB_PROCVAL]; + void *db_start_buff[NICCPB_PROCVAL]; + dma_addr_t db_start_handle[NICCPB_PROCVAL]; + /* DMA Start */ + size_t dma_start_size[NICCPB_PROCVAL]; + void *dma_start_buff[NICCPB_PROCVAL]; + dma_addr_t dma_start_handle[NICCPB_PROCVAL]; + /* DMA Done Queue */ + size_t dma_done_que_size[NICCPB_PROCVAL]; + void *dma_done_que_buff[NICCPB_PROCVAL]; + dma_addr_t dma_done_que_handle[NICCPB_PROCVAL]; +#ifdef USE_ALLOCPOOL + /* Mailbox Structure, Done Queue; DMA Start, Done Queue */ + struct dma_pool *mb_struct_dma_pool; + struct dma_pool *mb_done_dma_pool; + struct dma_pool *dma_start_dma_pool; + struct dma_pool *dma_done_dma_pool; +#endif /* USE_ALLOCPOOL */ +#ifdef USE_MUL2ALIGN + unsigned int pio_done_que_offset; + unsigned int pio_data_que_offset; + unsigned int mb_struct_offset[NICCPB_PROCVAL]; + unsigned int mb_done_offset[NICCPB_PROCVAL]; + unsigned int dma_start_offset[NICCPB_PROCVAL]; + unsigned int dma_done_offset[NICCPB_PROCVAL]; +#endif /* USE_MUL2ALIGN */ +} m2mlc_priv_t; + + +#endif /* M2MLC_H__ */ diff --git a/drivers/mcst/m2mlc/m2mlc_dbg.h b/drivers/mcst/m2mlc/m2mlc_dbg.h new file mode 100644 index 000000000000..2c75f5329917 --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_dbg.h @@ -0,0 +1,60 @@ +#ifndef M2MLC_DBG_H__ +#define M2MLC_DBG_H__ + + +/** + * Debug + * + * DEBUG - defined in makefile + */ +#undef PDEBUG +#ifdef DEBUG +#define PDEBUG(msk, fmt, args...) \ +do { \ + if (debug_mask & msk) { \ + printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, ## args); \ + } \ +} while (0) +#else +#define PDEBUG(msk, fmt, args...) do {} while (0) +#endif + +#undef nPDEBUG +#define nPDEBUG(msk, fmt, args...) do {} while (0) + +#ifdef DEBUG +#define DEV_DBG(msk, dev, fmt, args...) \ +do { \ + if (debug_mask & msk) { \ + dev_dbg(dev, fmt, ## args); \ + } \ +} while (0) +#else +#define DEV_DBG(msk, dev, fmt, args...) do {} while (0) +#endif + +#undef nDEV_DBG +#define nDEV_DBG(msk, dev, fmt, args...) do {} while (0) + +#define ERR_MSG(fmt, args...) \ + printk(KERN_ERR KBUILD_MODNAME ": " fmt, ## args) +#define WRN_MSG(fmt, args...) \ + printk(KERN_WARNING KBUILD_MODNAME ": " fmt, ## args) +#define LOG_MSG(fmt, args...) \ + printk(KERN_INFO KBUILD_MODNAME ": " fmt, ## args) + +#ifdef DEBUG +#define assert(expr) \ +do { \ + if (!(expr)) { \ + printk(KERN_CRIT KBUILD_MODNAME \ + ": Assertion failed! %s,%s,%s,line=%d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } \ +} while (0) +#else +#define assert(expr) do {} while (0) +#endif + + +#endif /* M2MLC_DBG_H__ */ diff --git a/drivers/mcst/m2mlc/m2mlc_dev.c b/drivers/mcst/m2mlc/m2mlc_dev.c new file mode 100644 index 000000000000..f8d85ea19f89 --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_dev.c @@ -0,0 +1,1647 @@ +/** + * m2mlc_dev.c - M2MLC module device driver + * + * Char Device part + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "m2mlc.h" + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) + #define __kuid_val(uid) (uid) +#endif + +#ifndef VM_RESERVED + #define VM_RESERVED 0 +#endif + + +/* extern */ +u32 m2mlc_read_reg32(void __iomem *base_addr, u32 port); +void m2mlc_write_reg32(void __iomem *base_addr, u32 port, u32 val); +void m2mlc_hw_print_all_regs(m2mlc_priv_t *priv, uint32_t regmsk); +void m2mlc_hw_pio_start(m2mlc_priv_t *priv, int ep, m2mlc_pio_cmd_t *pio_cmd); +void m2mlc_hw_pio_getstat(m2mlc_priv_t *priv, int ep, uint8_t *piostat/*, + int complete*/); +void m2mlc_hw_mb_getptrs_mail(m2mlc_priv_t *priv, int ep, + m2mlc_mb_ptrs_t *mbptrs); +void m2mlc_hw_mb_getptrs_done(m2mlc_priv_t *priv, int ep, + m2mlc_mb_ptrs_t *mbptrs); +void m2mlc_hw_mb_settailptr_mail(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr); +void m2mlc_hw_mb_settailptr_done(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr); +void m2mlc_hw_db_getptrs(m2mlc_priv_t *priv, int ep, m2mlc_db_ptrs_t *dbptrs); +void m2mlc_hw_db_settailptr(m2mlc_priv_t *priv, int ep, uint16_t tail_ptr); +void m2mlc_hw_int_setmask(m2mlc_priv_t *priv, int ep, + m2mlc_interrupt_t intmask); +void m2mlc_hw_int_clear(m2mlc_priv_t *priv, int ep, + m2mlc_interrupt_t intclr); +void m2mlc_hw_int_getstat(m2mlc_priv_t *priv, int ep, + m2mlc_int_stat_t *intstat); +void m2mlc_hw_dma_getptrs_str(m2mlc_priv_t *priv, int ep, + m2mlc_dma_str_ptrs_t *dmaptrs); +void m2mlc_hw_dma_getptrs_done(m2mlc_priv_t *priv, int ep, + m2mlc_dma_done_ptrs_t *dmaptrs); +void m2mlc_hw_dma_setheadptr_str(m2mlc_priv_t *priv, int ep, + uint16_t head_ptr); +void m2mlc_hw_dma_settailptr_done(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr); + + +/** + ****************************************************************************** + * Mem alloc for DMA + ****************************************************************************** + */ + +static long dma_mem_alloc(m2mlc_priv_t *priv, m2mlc_mem_ptrs_t *mem_ptrs) +{ + long ret = 0; + int i; + + struct scatterlist *sglist; + int nents; +#ifdef USE_DUALIOLINK + int nentsf; +#endif /* USE_DUALIOLINK */ + + uint64_t first_page, last_page, fp_offset; + unsigned int npages; + struct page **pages; + + struct pci_dev *pdev = priv->pdev; + + uint64_t uaddr = mem_ptrs->useraddr; + size_t bytecount = mem_ptrs->bytes; + + /* clean */ + mem_ptrs->dmaaddr = 0; + mem_ptrs->len = 0; + /* TODO: move to internal list */ + mem_ptrs->pages = 0; + mem_ptrs->npages = 0; + mem_ptrs->nents = 0; + mem_ptrs->sg = 0; + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "useraddr=0x%llX, bytecount=0x%zX(%zu)\n", + uaddr, bytecount, bytecount); + if (0 == uaddr || 0 == bytecount) { + return -EINVAL; + } + + /* get_user_pages */ + + first_page = (uaddr & PAGE_MASK) >> PAGE_SHIFT; + last_page = ((uaddr+bytecount-1) & PAGE_MASK) >> PAGE_SHIFT; + fp_offset = uaddr & ~PAGE_MASK; + npages = last_page - first_page + 1; + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "first_p=%llu, last_p=%llu, fp_offset=%llu(%llX), npages=%u\n", + first_page, last_page, fp_offset, fp_offset, npages); + + pages = kmalloc_node(sizeof(struct page *) * npages, GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!pages) { + dev_err(priv->dev, + "kmalloc for pages failure\n"); + return -ENOMEM; + } + + down_read(¤t->mm->mmap_sem); + ret = get_user_pages(uaddr & PAGE_MASK, npages, FOLL_WRITE, pages, + NULL); + up_read(¤t->mm->mmap_sem); + if (ret != npages) { + dev_err(priv->dev, + "get_user_pages failure\n"); + npages = ret; /* for SetPageDirty & page_cache_release */ + ret = -EINVAL; + goto out_unpage; + } + ret = 0; + /* save for dma_mem_free */ + mem_ptrs->pages = pages; + mem_ptrs->npages = npages; + + /* map pages */ + + sglist = kcalloc(npages, sizeof(*sglist), GFP_KERNEL); + if (NULL == sglist) { + dev_err(priv->dev, + "kcalloc for sglist failure\n"); + ret = -ENOMEM; + goto out_unpage; + } + + /* per-page */ + for (i = 0; i < npages; i++) { + if (i == 0) { /* first */ + sg_set_page(&sglist[i], pages[i], + PAGE_SIZE - fp_offset, fp_offset); + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "first page[%d]=%p, offset=0x%X, len=%u\n", + i, pages[i], sglist[i].offset, + sglist[i].length); + } else if (i == npages-1) { /* last */ + sg_set_page(&sglist[i], pages[i], + bytecount-(PAGE_SIZE-fp_offset)- + ((npages-2)*PAGE_SIZE), 0); + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "last page[%d]=%p, offset=0x%X, len=%u\n", + i, pages[i], sglist[i].offset, + sglist[i].length); + } else { /* middle */ + sg_set_page(&sglist[i], pages[i], + PAGE_SIZE, 0); + /*DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "middle page[%d]=%p, offset=0x%X, len=%u\n", + i, pages[i], sglist[i].offset, + sglist[i].length);*/ + } + } /* for (i) */ + + nents = dma_map_sg(&priv->pdev->dev, sglist, npages, DMA_BIDIRECTIONAL); + if (0 == nents) { + dev_err(priv->dev, "map1 sglist error - npages=%d, nents=%d\n", + npages, nents); + ret = -ENOMEM; + goto out_unalloc; + } +#if 0 /* move chk if(1!=nents) to user */ + if (1 != nents) { + dev_err(priv->dev, "map1 sglist error nents%d != 1\n", nents); + ret = -ENOMEM; + goto out_unmap; + } +#endif /* 0 */ + +#ifdef USE_DUALIOLINK + nentsf = dma_map_sg(&priv->fakedev->dev, sglist, npages, + DMA_BIDIRECTIONAL); + if (0 == nentsf) { + dev_err(priv->dev, "map2 sglist error - npages=%d, nents=%d\n", + npages, nentsf); + ret = -ENOMEM; + goto out_unalloc; /* FIXME: */ + } + if (nentsf != nents) { + dev_err(priv->dev, "map1 nents(%d) != map2 nents(%d)\n", + nents, nentsf); + ret = -EINVAL; + goto out_unalloc; /* FIXME: */ + } +#endif /* USE_DUALIOLINK */ + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "map sglist - npages=%d, nents=%d\n", npages, nents); + /* save for dma_mem_free */ + mem_ptrs->nents = nents; + mem_ptrs->sg = sglist; + + /* To User */ + mem_ptrs->dmaaddr = sg_dma_address(sglist); + mem_ptrs->len = sg_dma_len(sglist); + + return 0; + + +#if 0 /* move chk if(1!=nents) to user */ +out_unmap: + if (nents) { + dma_unmap_sg(&priv->pdev->dev, sglist, npages, + DMA_BIDIRECTIONAL); +#ifdef USE_DUALIOLINK + dma_unmap_sg(&priv->fakedev->dev, sglist, npages, + DMA_BIDIRECTIONAL); +#endif + } +#endif /* 0 */ +out_unalloc: + kfree(sglist); +out_unpage: + for (i = 0; i < npages; i++) { + if (!PageReserved(pages[i])) + SetPageDirty(pages[i]); + put_page(pages[i]); + } + + kfree(pages); + return ret; +} /* dma_mem_alloc */ + +static void dma_mem_free(m2mlc_priv_t *priv, m2mlc_mem_ptrs_t *mem_ptrs) +{ + int i; + unsigned int npages = mem_ptrs->npages; + struct page **pages = mem_ptrs->pages; + + struct scatterlist *sglist = mem_ptrs->sg; + + if (npages) { + dma_unmap_sg(&priv->pdev->dev, sglist, npages, + DMA_BIDIRECTIONAL); +#ifdef USE_DUALIOLINK + dma_unmap_sg(&priv->fakedev->dev, sglist, npages, + DMA_BIDIRECTIONAL); +#endif /* USE_DUALIOLINK */ + } + kfree(sglist); + + for (i = 0; i < npages; i++) { + if (!PageReserved(pages[i])) + SetPageDirty(pages[i]); + put_page(pages[i]); + } + + kfree(pages); +} /* dma_mem_free */ + + +/** + ****************************************************************************** + * file operation part (Char device methods) + ****************************************************************************** + */ + +/* .mmap_id */ +#define MMAP_ENDPOINT_REGS_ID 0x001 +#define MMAP_PIO_PAYLOAD_ID 0x002 +#define MMAP_PIO_DONE_QUEUE_ID 0x004 +#define MMAP_PIO_DATA_QUEUE_ID 0x008 +#define MMAP_DONE_REGS_COPY_ID 0x020 +#define MMAP_DB_QUEUE_ID 0x040 +#define MMAP_DMA_DESCR_QUEUE_ID 0x080 +#define MMAP_DMA_DONE_QUEUE_ID 0x100 +#define MMAP_MB_DONE_QUEUE_ID 0x200 +#define MMAP_MB_MAIL_ID 0x400 + + +typedef struct { + m2mlc_priv_t *priv; + int endpoint; + unsigned int mmap_id; +} cdev_priv_t; + + +#define FIOCTL_CHECK_ENDPOINT_NONE \ +do { \ + if (cpriv->endpoint == CDEV_ENDPOINT_NONE) { \ + dev_err(priv->dev, "IOCTL ERROR: endpoint not opened\n"); \ + ret = -EFAULT; \ + break; \ + } \ +} while (0) + +#define FIOCTL_CHECK_MMAP_REGS \ +do { \ + if (cpriv->mmap_id & MMAP_ENDPOINT_REGS_ID) { \ + dev_err(priv->dev, "IOCTL ERROR: registers mmaped\n"); \ + ret = -EFAULT; \ + break; \ + } \ +} while (0) + +#define FIOCTL_COPY_FROM_USER(val) \ +do { \ + if (copy_from_user((caddr_t)&(val), uarg, _IOC_SIZE(cmd))) { \ + dev_err(priv->dev, "IOCTL: copy_from_user failure\n"); \ + ret = -EFAULT; \ + break; \ + } \ +} while (0) + +#define FIOCTL_COPY_TO_USER(val) \ +do { \ + if (copy_to_user(uarg, (caddr_t)&(val), _IOC_SIZE(cmd))) { \ + dev_err(priv->dev, "IOCTL: copy_to_user failure\n"); \ + ret = -EFAULT; \ + } \ +} while (0) + + +/** + * ioctl file operation + */ +static long cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret = 0; + cdev_priv_t *cpriv; + m2mlc_priv_t *priv; + struct pci_dev *pdev; + void __user *uarg = (void __user *) arg; + + + cpriv = (cdev_priv_t *)filp->private_data; + assert(cpriv); + if (!cpriv) + return -ENODEV; + + priv = cpriv->priv; + assert(priv); + if (!priv) + return -ENODEV; + + pdev = priv->pdev; + assert(pdev); + if (!pdev) + return -ENODEV; + + if ((_IOC_TYPE(cmd) != M2MLC_IOC_MAGIC)) { + dev_err(priv->dev, "IOCTL ERROR: invalid command 0x%X(%d)\n", + cmd, cmd); + return -ENOTTY; + } + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, "CDEV_IOCTL: 0x%X(%d)\n", + cmd, cmd); + + + switch (cmd) { + + /* Open/Close Endpoint */ + + case M2MLC_IOCTL_OPEN_ENDPOINT: + { + m2mlc_resource_t res; + + if (cpriv->endpoint != CDEV_ENDPOINT_NONE) { + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_OPEN_ENDPOINT ERROR: " \ + "used endpoint %d\n", cpriv->endpoint); + ret = -EAGAIN; + break; + } + + FIOCTL_COPY_FROM_USER(res); + + if (res.num > priv->niccpb_procval) { + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_OPEN_ENDPOINT ERROR: " \ + "wrong endpoint number %d\n", res.num); + ret = -EFAULT; + break; + } + + if (res.num == CDEV_ENDPOINT_NET) { + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_OPEN_ENDPOINT ERROR: " \ + "endpoint number %d reserved\n", res.num); + ret = -EFAULT; + break; + } + + if ((res.num > CDEV_ENDPOINT_UMAX) || + (res.num < CDEV_ENDPOINT_UMIN)) { + if (__kuid_val(current_euid()) != 0) { + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_OPEN_ENDPOINT ERROR: " \ + "endpoint number %d, for root only\n", + res.num); + ret = -EACCES; + break; + } + } + + spin_lock(&priv->cdev_open_lock); + if (priv->pid[res.num] != 0) { + spin_unlock(&priv->cdev_open_lock); + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_OPEN_ENDPOINT ERROR: " \ + "endpoint busy\n"); + ret = -EBUSY; + break; + } + cpriv->endpoint = res.num; + priv->signal[res.num] = res.signal; + priv->pid[res.num] = (int)current->pid; + priv->tsk[res.num] = current; + spin_unlock(&priv->cdev_open_lock); + break; + } + + case M2MLC_IOCTL_CLOSE_ENDPOINT: + { + if (cpriv->endpoint != CDEV_ENDPOINT_NONE) { + spin_lock(&priv->cdev_open_lock); + priv->signal[cpriv->endpoint] = 0; + priv->pid[cpriv->endpoint] = 0; + priv->tsk[cpriv->endpoint] = NULL; + cpriv->endpoint = CDEV_ENDPOINT_NONE; + spin_unlock(&priv->cdev_open_lock); + } + break; + } + + /* PIO */ + + case M2MLC_IOCTL_PIO_START: + { + uint8_t piostat; + m2mlc_pio_cmd_t pio_cmd; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(pio_cmd); + m2mlc_hw_pio_getstat(priv, cpriv->endpoint, &piostat/*, 0*/); + if (M2MLC_PIO_BLOCK_BUSY & piostat) { + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL ERROR: resource busy\n"); + ret = -EBUSY; + break; + } + m2mlc_hw_pio_start(priv, cpriv->endpoint, &pio_cmd); + break; + } + + case M2MLC_IOCTL_PIO_GETSTAT: + { + uint8_t piostat; + + FIOCTL_CHECK_ENDPOINT_NONE; + m2mlc_hw_pio_getstat(priv, cpriv->endpoint, &piostat/*, 1*/); + FIOCTL_COPY_TO_USER(piostat); + break; + } + + /* Mailbox */ + + case M2MLC_IOCTL_MB_GETPTRS_MAIL: + { + m2mlc_mb_ptrs_t mbptrs; + + FIOCTL_CHECK_ENDPOINT_NONE; + m2mlc_hw_mb_getptrs_mail(priv, cpriv->endpoint, &mbptrs); + FIOCTL_COPY_TO_USER(mbptrs); + break; + } + + case M2MLC_IOCTL_MB_SETTAILPTR_MAIL: + { + uint16_t tail_ptr; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(tail_ptr); + m2mlc_hw_mb_settailptr_mail(priv, cpriv->endpoint, tail_ptr); + break; + } + + case M2MLC_IOCTL_MB_GETPTRS_DONE: + { + m2mlc_mb_ptrs_t mbptrs; + + FIOCTL_CHECK_ENDPOINT_NONE; + m2mlc_hw_mb_getptrs_done(priv, cpriv->endpoint, &mbptrs); + FIOCTL_COPY_TO_USER(mbptrs); + break; + } + + case M2MLC_IOCTL_MB_SETTAILPTR_DONE: + { + uint16_t tail_ptr; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(tail_ptr); + m2mlc_hw_mb_settailptr_done(priv, cpriv->endpoint, tail_ptr); + break; + } + + /* DoorBell */ + + case M2MLC_IOCTL_DB_GETPTRS: + { + m2mlc_db_ptrs_t dbptrs; + + FIOCTL_CHECK_ENDPOINT_NONE; + m2mlc_hw_db_getptrs(priv, cpriv->endpoint, &dbptrs); + FIOCTL_COPY_TO_USER(dbptrs); + break; + } + + case M2MLC_IOCTL_DB_SETTAILPTR: + { + uint16_t tail_ptr; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(tail_ptr); + m2mlc_hw_db_settailptr(priv, cpriv->endpoint, tail_ptr); + break; + } + + /* Interrupt */ + + case M2MLC_IOCTL_INT_SETMASK: + { + m2mlc_interrupt_t intmask; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(intmask); + m2mlc_hw_int_setmask(priv, cpriv->endpoint, intmask); + break; + } + +#if 0 + case M2MLC_IOCTL_INT_CLEAR: + { + m2mlc_interrupt_t intclr; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(intclr); + m2mlc_hw_int_clear(priv, cpriv->endpoint, intclr); + break; + } +#endif /* 0 */ + + case M2MLC_IOCTL_INT_GETSTATUS: + { + m2mlc_int_stat_t intstat; + + FIOCTL_CHECK_ENDPOINT_NONE; + m2mlc_hw_int_getstat(priv, cpriv->endpoint, &intstat); +#if 0 + /* TODO: lock & clean fromirq */ + /* intstat->fromirq.r = 0; */ /* TODO: new RTL */ +#endif /* 0 */ + FIOCTL_COPY_TO_USER(intstat); + break; + } + + /* DMA */ + + case M2MLC_IOCTL_DMA_GETPTRS_STR: + { + m2mlc_dma_str_ptrs_t dmaptrs; + + FIOCTL_CHECK_ENDPOINT_NONE; + m2mlc_hw_dma_getptrs_str(priv, cpriv->endpoint, &dmaptrs); + FIOCTL_COPY_TO_USER(dmaptrs); + break; + } + + case M2MLC_IOCTL_DMA_SETHEADPTR_STR: + { + uint16_t head_ptr; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(head_ptr); + m2mlc_hw_dma_setheadptr_str(priv, cpriv->endpoint, head_ptr); + break; + } + + case M2MLC_IOCTL_DMA_GETPTRS_DONE: + { + m2mlc_dma_done_ptrs_t dmaptrs; + + FIOCTL_CHECK_ENDPOINT_NONE; + m2mlc_hw_dma_getptrs_done(priv, cpriv->endpoint, &dmaptrs); + FIOCTL_COPY_TO_USER(dmaptrs); + break; + } + + case M2MLC_IOCTL_DMA_SETTAILPTR_DONE: + { + uint16_t tail_ptr; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_CHECK_MMAP_REGS; + FIOCTL_COPY_FROM_USER(tail_ptr); + m2mlc_hw_dma_settailptr_done(priv, cpriv->endpoint, tail_ptr); + break; + } + + /* MEM for DMA */ + + case M2MLC_IOCTL_MEM_LOC: + { + m2mlc_mem_ptrs_t mem_ptrs; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_COPY_FROM_USER(mem_ptrs); + + ret = dma_mem_alloc(priv, &mem_ptrs); + if (ret) + break; + + FIOCTL_COPY_TO_USER(mem_ptrs); + break; + } + + case M2MLC_IOCTL_MEM_REL: + { + m2mlc_mem_ptrs_t mem_ptrs; + + FIOCTL_CHECK_ENDPOINT_NONE; + FIOCTL_COPY_FROM_USER(mem_ptrs); + + dma_mem_free(priv, &mem_ptrs); + break; + } + + /* ECS */ + + case M2MLC_IOCTL_ECS_READ_REG: + { + m2mlc_ecs_reg_t reg; + + if (copy_from_user((caddr_t)®, uarg, _IOC_SIZE(cmd))) { + dev_err(priv->dev, + "IOCTL_ECS_READ_REG: " \ + "copy_from_user failure\n"); + ret = -EFAULT; + break; + } + + switch (reg.id) { + case ECS_DEVID_CAR: + case ECS_DEVINF_CAR: + case ECS_ASMBLID_CAR: + case ECS_ASMBLINF_CAR: + case ECS_PEF_CAR: + case ECS_PELLCTRL_CSR: + case ECS_GPSTAT_CSR: + case ECS_BASEDEVID_CSR: + case ECS_HBASEDEVIDLOCK_CSR: + case ECS_ROUTE_RESP: + case ECS_PHYSTAT_CTRL: + case ECS_RTACCSTAT_0: + case ECS_RTACCSTAT_1: + case ECS_RTACCSTAT_2: + case ECS_RTACCSTAT_3: + case ECS_RTACCSTAT_4: + case ECS_RTACCSTAT_5: + case ECS_RTACCSTAT_6: + case ECS_RTACCSTAT_7: +#ifdef USE_RDMA2_MODE + case 0x0104: + case 0x0108: +#endif /* USE_RDMA2_MODE */ + reg.val = m2mlc_read_reg32(priv->ecs_base, reg.id); + break; + default: + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_ECS_READ_REG: " \ + "wrong register ID\n"); + ret = -EINVAL; + break; + } + + if (copy_to_user(uarg, (caddr_t)®, _IOC_SIZE(cmd))) { + dev_err(priv->dev, + "IOCTL_ECS_READ_REG: " \ + "copy_to_user failure\n"); + ret = -EFAULT; + } + break; + } + + case M2MLC_IOCTL_ECS_WRITE_REG: + { + m2mlc_ecs_reg_t reg; + + if (__kuid_val(current_euid()) != 0) { + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_ECS_WRITE_REG ERROR: " \ + "for root only\n"); + ret = -EACCES; + break; + } + + if (copy_from_user((caddr_t)®, uarg, _IOC_SIZE(cmd))) { + dev_err(priv->dev, + "IOCTL_ECS_WRITE_REG: " \ + "copy_from_user failure\n"); + ret = -EFAULT; + break; + } + + switch (reg.id) { + case ECS_PELLCTRL_CSR: + case ECS_GPSTAT_CSR: + case ECS_BASEDEVID_CSR: + /* TODO: + * if !0 -> enable DMA + * else if FF dis DMA + */ + case ECS_HBASEDEVIDLOCK_CSR: + case ECS_ROUTE_RESP: +#ifdef USE_RDMA2_MODE + case 0x0104: + case 0x0108: +#endif /* USE_RDMA2_MODE */ + m2mlc_write_reg32(priv->ecs_base, reg.id, reg.val); + break; + default: + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "IOCTL_ECS_WRITE_REG: " \ + "wrong register ID\n"); + ret = -EINVAL; + break; + } + break; + } + + /* debug */ + + case M2MLC_IOCTL_PRINT_REGS: + { + uint32_t regmsk; + + if (copy_from_user((caddr_t)®msk, uarg, _IOC_SIZE(cmd))) { + dev_err(priv->dev, + "IOCTL_PRINT_REGS: " \ + "copy_from_user failure\n"); + ret = -EFAULT; + break; + } + + m2mlc_hw_print_all_regs(priv, regmsk); + break; + } + + /* FUTURE: MCST_SELFTEST_MAGIC + case MCST_SELFTEST_MAGIC: + { + if (copy_to_user(uarg, (caddr_t)&?, _IOC_SIZE(cmd))) { + dev_err(priv->dev, + "%s MCST_SELFTEST: copy_to_user failure\n", + __func__); + ret = -EFAULT; + } + break; + } + */ + + default: + { + dev_err(priv->dev, "IOCTL ERROR: invalid command 0x%X(%d)\n", + cmd, cmd); + return -ENOTTY; + } + } /* switch( cmd ) */ + + return ret; +} /* cdev_ioctl */ + + +#ifdef CONFIG_COMPAT + +static int do_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + return cdev_ioctl(filp, cmd, arg); +} + +static long compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return do_ioctl(filp, cmd, arg); +} + +#endif /* CONFIG_COMPAT */ + + +/** + * mmap file operation + * Remap DMA memory to user + */ +static int cdev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + cdev_priv_t *cpriv; + m2mlc_priv_t *priv; + + phys_addr_t base_bus; + void *ram_buff; + unsigned long base_size; + + unsigned long off = vma->vm_pgoff << PAGE_SHIFT; + unsigned long long pfn; + unsigned long vsize; + unsigned long psize; + + + cpriv = (cdev_priv_t *)filp->private_data; + assert(cpriv); + if (!cpriv) + return -ENODEV; + + priv = cpriv->priv; + assert(priv); + if (!priv) + return -ENODEV; + + if (cpriv->endpoint == CDEV_ENDPOINT_NONE) { + dev_err(priv->dev, "MMAP ERROR: endpoint not opened\n"); + return -ENODEV; + } + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, "off=%ld\n", off); + if (off < M2MLC_MMAP_PIO_DONE_QUEUE_BASE) { + /* 0..8k: mmap HW bufs */ + if (off == M2MLC_MMAP_ENDPOINT_REGS_BASE) { + base_bus = priv->reg_base_bus + + (cpriv->endpoint * PAGE_SIZE); + base_size = M2MLC_MMAP_ENDPOINT_REGS_SIZE; + cpriv->mmap_id |= MMAP_ENDPOINT_REGS_ID; + } else if (off == M2MLC_MMAP_PIO_PAYLOAD_BASE) { + base_bus = priv->buf_base_bus + + (cpriv->endpoint * PAGE_SIZE); + base_size = M2MLC_MMAP_PIO_PAYLOAD_SIZE; + cpriv->mmap_id |= MMAP_PIO_PAYLOAD_ID; + } else { + dev_err(priv->dev, + "MMAP ERROR: Wrong offset: 0x%lX\n", off); + return -EINVAL; + } + + /* FIXME: x86 + pfn = page_to_pfn(virt_to_page(bus_to_virt(base_bus))); + */ + #ifdef CONFIG_E90 + pfn = MK_IOSPACE_PFN(0xa, (base_bus >> PAGE_SHIFT)); + #else + pfn = base_bus >> PAGE_SHIFT; + #endif + + vsize = vma->vm_end - vma->vm_start; + psize = (PAGE_SIZE > base_size) ? PAGE_SIZE : base_size; + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "mmap HW bufs: pfn=%#llx (off=%ld, bus=%#llx), " + "vsize=%#lx, psize=%#lx\n", + pfn, off, (u64)base_bus, vsize, psize); + + if (vsize > psize) { + dev_err(priv->dev, + "MMAP ERROR: vsize > psize\n"); + return -EINVAL; + } + + vma->vm_pgoff = 0; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_flags |= (VM_READ | VM_WRITE | VM_IO | + VM_DONTCOPY | VM_RESERVED); + + if (io_remap_pfn_range(vma, vma->vm_start, pfn, vsize, + vma->vm_page_prot)) { + dev_err(priv->dev, + "MMAP ERROR: Error remap memory to user\n"); + return -EAGAIN; + } + } else { + /* mmap RAM bufs */ + if (off == M2MLC_MMAP_PIO_DONE_QUEUE_BASE) { + ram_buff = priv->pio_done_que_buff + +#ifdef USE_MUL2ALIGN + priv->pio_done_que_offset + +#endif /* USE_MUL2ALIGN */ + (cpriv->endpoint * PAGE_SIZE); + base_size = M2MLC_MMAP_PIO_DONE_QUEUE_SIZE; + cpriv->mmap_id |= MMAP_PIO_DONE_QUEUE_ID; + } else if (off == M2MLC_MMAP_PIO_DATA_QUEUE_BASE) { + ram_buff = priv->pio_data_que_buff + +#ifdef USE_MUL2ALIGN + priv->pio_data_que_offset + +#endif /* USE_MUL2ALIGN */ + (cpriv->endpoint * PAGE_SIZE); + base_size = M2MLC_MMAP_PIO_DATA_QUEUE_SIZE; + cpriv->mmap_id |= MMAP_PIO_DATA_QUEUE_ID; + } else if (off == M2MLC_MMAP_DONE_REGS_COPY_BASE) { + ram_buff = priv->mdd_ret_buff[cpriv->endpoint]; + base_size = M2MLC_MMAP_DONE_REGS_COPY_SIZE; + cpriv->mmap_id |= MMAP_DONE_REGS_COPY_ID; + } else if (off == M2MLC_MMAP_DB_QUEUE_BASE) { + ram_buff = priv->db_start_buff[cpriv->endpoint]; + base_size = M2MLC_MMAP_DB_QUEUE_SIZE; + cpriv->mmap_id |= MMAP_DB_QUEUE_ID; + } else if (off == M2MLC_MMAP_DMA_DESCR_QUEUE_BASE) { +#ifdef USE_MUL2ALIGN + ram_buff = priv->dma_start_buff[cpriv->endpoint] + + priv->dma_start_offset[cpriv->endpoint]; +#else + ram_buff = priv->dma_start_buff[cpriv->endpoint]; +#endif /* USE_MUL2ALIGN */ + base_size = M2MLC_MMAP_DMA_DESCR_QUEUE_SIZE; + cpriv->mmap_id |= MMAP_DMA_DESCR_QUEUE_ID; + } else if (off == M2MLC_MMAP_DMA_DONE_QUEUE_BASE) { +#ifdef USE_MUL2ALIGN + ram_buff = priv->dma_done_que_buff[cpriv->endpoint] + + priv->dma_done_offset[cpriv->endpoint]; +#else + ram_buff = priv->dma_done_que_buff[cpriv->endpoint]; +#endif /* USE_MUL2ALIGN */ + base_size = M2MLC_MMAP_DMA_DONE_QUEUE_SIZE; + cpriv->mmap_id |= MMAP_DMA_DONE_QUEUE_ID; + } else if (off == M2MLC_MMAP_MB_DONE_QUEUE_BASE) { +#ifdef USE_MUL2ALIGN + ram_buff = priv->mb_done_que_buff[cpriv->endpoint] + + priv->mb_done_offset[cpriv->endpoint]; +#else + ram_buff = priv->mb_done_que_buff[cpriv->endpoint]; +#endif /* USE_MUL2ALIGN */ + base_size = M2MLC_MMAP_MB_DONE_QUEUE_SIZE; + cpriv->mmap_id |= MMAP_MB_DONE_QUEUE_ID; + } else if (off == M2MLC_MMAP_MB_MAIL_BASE) { +#ifdef USE_MUL2ALIGN + ram_buff = priv->mb_struct_buff[cpriv->endpoint] + + priv->mb_struct_offset[cpriv->endpoint]; +#else + ram_buff = priv->mb_struct_buff[cpriv->endpoint]; +#endif /* USE_MUL2ALIGN */ + base_size = M2MLC_MMAP_MB_MAIL_SIZE; + cpriv->mmap_id |= MMAP_MB_MAIL_ID; + } else { + dev_err(priv->dev, + "MMAP ERROR: Wrong offset: 0x%lX\n", off); + return -EINVAL; + } + + pfn = ((u64)virt_to_phys(ram_buff)) >> PAGE_SHIFT; + vsize = vma->vm_end - vma->vm_start; + psize = base_size; + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "mmap RAM bufs: pfn=%#llx (off=%ld, phys_mem=%#llx, " + "vsize=%#lx, psize=%#lx\n", + pfn, off, (u64)virt_to_phys(ram_buff), + vsize, psize); + + if (vsize > psize) { + dev_err(priv->dev, + "MMAP ERROR: vsize > psize\n"); + return -EINVAL; + } + + vma->vm_pgoff = 0; + vma->vm_flags |= (VM_READ | VM_WRITE | + VM_DONTCOPY | VM_RESERVED); + + if (remap_pfn_range(vma, vma->vm_start, pfn, vsize, + vma->vm_page_prot)) { + dev_err(priv->dev, + "MMAP ERROR: Error remap memory to user\n"); + return -EAGAIN; + } + } + + return 0; +} /* cdev_mmap */ + + +#define RPRINT(fmt, args...) \ +do { \ + len += sprintf(l_buf + len, fmt, ## args); \ +} while (0) + +/** + * read file operation + * Read device info and current status + * + * Returns: + * -ENODEV + * -EFAULT - copy_from/to_user failure + * >0 - bytes readed + */ +static ssize_t cdev_read(struct file *filp, char *buf, size_t count, + loff_t *ppos) +{ + size_t len; + char *l_buf; + cdev_priv_t *cpriv; + m2mlc_priv_t *priv; + int i, j; + u_int32_t reg_id[] = { + /* + ECS_DEVID_CAR, + ECS_DEVINF_CAR, + ECS_ASMBLID_CAR, + ECS_ASMBLINF_CAR, + ECS_PEF_CAR, + ECS_PELLCTRL_CSR, + */ + ECS_GPSTAT_CSR, + ECS_BASEDEVID_CSR, + ECS_HBASEDEVIDLOCK_CSR, + ECS_ROUTE_RESP, + ECS_PHYSTAT_CTRL + }; + char *reg_name[] = { + /* + "Device_Identity_CAR ", + "Device_Information_CAR ", + "Assembly_Identity_CAR ", + "Assembly_Information_CAR ", + "Processing_Elem_Features_CAR ", + "Processing_Elem_LogLayCtrl_CSR", + */ + "General_Port_Status_CSR ", + "Base_Device_ID_CSR ", + "Host_Base_Device_ID_Lock_CSR ", + "Responce Route Field ", + "PHY_Port_Pn_Status_Control ", + " " + }; + u_int32_t reg_id_rtacc[] = { + ECS_RTACCSTAT_0, + ECS_RTACCSTAT_1, + ECS_RTACCSTAT_2, + ECS_RTACCSTAT_3, + ECS_RTACCSTAT_4, + ECS_RTACCSTAT_5, + ECS_RTACCSTAT_6, + ECS_RTACCSTAT_7 + }; + u_int32_t reg_val; + ecs_devid_car_reg_t ecs_devid_car; + ecs_devinf_car_reg_t ecs_devinf_car; + ecs_asmblid_car_reg_t ecs_asmblid_car; + ecs_asmblinf_car_reg_t ecs_asmblinf_car; + ecs_pef_car_reg_t ecs_pef_car; + ecs_pellctrl_csr_reg_t ecs_pellctrl_csr; + ecs_gpstat_csr_reg_t ecs_gpstat_csr; + ecs_basedevid_csr_reg_t ecs_basedevid_csr; + ecs_hbasedevidlock_csr_reg_t ecs_hbasedevidlock_csr; + ecs_route_resp_reg_t ecs_route_resp; + ecs_phystat_ctrl_reg_t ecs_phystat_ctrl; + u_int32_t u32i; + + + cpriv = (cdev_priv_t *)filp->private_data; + assert(cpriv); + if (!cpriv) + return -ENODEV; + + priv = cpriv->priv; + assert(priv); + if (!priv) + return -ENODEV; + + if (*ppos != 0) + return 0; /* EOF */ + + l_buf = kzalloc(M2MLC_READ_BUF_SIZE, GFP_KERNEL); + if (!priv) { + dev_err(priv->dev, + "ERROR: Cannot allocate memory, aborting\n"); + return -ENOMEM; + } + + /* -= read =- */ + + len = 0; + /*01*/ + RPRINT(" -= M2MLC device %s%d =-\n", M2MLC_DEVNAME, priv->minor); + /*02*/ + RPRINT("EP PID\t SIG\t\tEP PID\t SIG\n"); + for (i = 0; i < priv->niccpb_procval; i += 2) { + /*03..12*/ + RPRINT("%2d: %s %8d\t %d\t\t%2d: %s %8d\t %d\n", + i, ((i > 16) | (i == 0)) ? "S" : " ", + priv->pid[i], priv->signal[i], + i + 1, (((i + 1) > 16) | ((i + 1) == 0)) ? "S" : " ", + priv->pid[i + 1], priv->signal[i + 1]); + } + + /*13*/ + RPRINT("\n"); + /*14*/ + RPRINT(" Element Config Space:\n"); + for (i = 0; i < ARRAY_SIZE(reg_id); i++) { + reg_val = m2mlc_read_reg32(priv->ecs_base, reg_id[i]); + /*15..25*/ + RPRINT("%s: ", reg_name[i]); + switch (reg_id[i]) { + case ECS_DEVID_CAR: + ecs_devid_car.r = reg_val; + RPRINT("Device_ID=0x%04X ", + ecs_devid_car.p.Device_Identity); + RPRINT("Device_Vendor_ID=0x%04X ", + ecs_devid_car.p.Device_Vendor_Identity); + break; + case ECS_DEVINF_CAR: + ecs_devinf_car.r = reg_val; + RPRINT("Device_Revision=0x%04X ", + ecs_devinf_car.p.Device_Revision); + break; + case ECS_ASMBLID_CAR: + ecs_asmblid_car.r = reg_val; + RPRINT("Assembly_ID=0x%04X ", + ecs_asmblid_car.p.Assy_Identity); + RPRINT("Assembly_Vendor_ID=0x%04X ", + ecs_asmblid_car.p.Assy_Vendor_Identity); + break; + case ECS_ASMBLINF_CAR: + ecs_asmblinf_car.r = reg_val; + RPRINT("Assembly_Revision=0x%04X ", + ecs_asmblinf_car.p.Assy_Revision); + RPRINT("Ext_Feat_Ptr=0x%04X ", + ecs_asmblinf_car.p.Extended_Features_Ptr); + break; + case ECS_PEF_CAR: + ecs_pef_car.r = reg_val; + RPRINT("Bridge%s ", + (ecs_pef_car.p.Bridge) ? "+" : "-"); + RPRINT("Memory%s ", + (ecs_pef_car.p.Memory) ? "+" : "-"); + RPRINT("Processor%s ", + (ecs_pef_car.p.Processor) ? "+" : "-"); + RPRINT("Switch%s ", + (ecs_pef_car.p.Switch) ? "+" : "-"); + /*26*/ + RPRINT("\n%s: ", reg_name[ARRAY_SIZE(reg_id)]); + RPRINT("StdRouteTblCFg%s ", + (ecs_pef_car.p.Std_Route_Tbl_CFg_Sup) ? \ + "+" : "-"); + RPRINT("ExtFeat%s ", + (ecs_pef_car.p.Extended_Features) ? "+" : "-"); + u32i = ecs_pef_car.p.Extended_Addr_Suport; + RPRINT("ExtAddrSup=%s/%s/%s ", + (u32i & ECS_PEF_CAR_EXTADDRSUP_66) ? "66" : "-", + (u32i & ECS_PEF_CAR_EXTADDRSUP_50) ? "50" : "-", + (u32i & ECS_PEF_CAR_EXTADDRSUP_34) ? "34" : "-"); + break; + case ECS_PELLCTRL_CSR: + ecs_pellctrl_csr.r = reg_val; + u32i = ecs_pellctrl_csr.p.Extended_Addr_Control; + RPRINT("Extended_Addr_Control=%s/%s ", + (u32i & ECS_PELLCTRL_CSR_EXTADDRCTRL_64) ? \ + "64" : "-", + (u32i & ECS_PELLCTRL_CSR_EXTADDRCTRL_32) ? \ + "32" : "-"); + break; + case ECS_GPSTAT_CSR: + ecs_gpstat_csr.r = reg_val; + RPRINT("Auto_Enable%s ", + (ecs_gpstat_csr.p.Auto_Enable) ? "+" : "-"); + RPRINT("Discovered%s ", + (ecs_gpstat_csr.p.Discovered) ? "+" : "-"); + RPRINT("Host%s ", + (ecs_gpstat_csr.p.Host) ? "+" : "-"); + break; + case ECS_BASEDEVID_CSR: + ecs_basedevid_csr.r = reg_val; + RPRINT("Base_DeviceID=0x%02X ", + ecs_basedevid_csr.p.Base_DeviceID); + break; + case ECS_HBASEDEVIDLOCK_CSR: + ecs_hbasedevidlock_csr.r = reg_val; + RPRINT("Host_Base_DeviceID=0x%04X ", + ecs_hbasedevidlock_csr.p.Host_Base_DeviceID); + break; + case ECS_ROUTE_RESP: + ecs_route_resp.r = reg_val; + RPRINT("Msg_Route=0x%01X ", + ecs_route_resp.p.Msg_Route); + RPRINT("RDMA_Route=0x%01X ", + ecs_route_resp.p.RDMA_Route); + break; + case ECS_PHYSTAT_CTRL: + ecs_phystat_ctrl.r = reg_val; + RPRINT("Port_OK%s ", + (ecs_phystat_ctrl.p.Port_OK) ? "+" : "-"); + break; + default: + RPRINT("=0x%08X ", reg_val); + break; + } + RPRINT("\n"); + } + + /*27*/ + RPRINT("\n"); + /*28*/ + RPRINT(" RT Access Status:\n"); + RPRINT(" 0123456789ABCDEF "); + /*29*/ + RPRINT(" 0123456789ABCDEF\n"); + for (i = 0; i < ARRAY_SIZE(reg_id_rtacc); i++) { + reg_val = m2mlc_read_reg32(priv->ecs_base, reg_id_rtacc[i]); + for (j = 0; j < 32; j++) { + if (0 == j) + RPRINT("0x%02X: ", i * 32); + if (16 == j) + RPRINT(" 0x%02X: ", (i * 32) + 16); + RPRINT("%s", ((reg_val >> j) & 1) ? "*" : "."); + if (31 == j) + RPRINT(" - 0x%08X", reg_val); + } + /*30..37*/ + RPRINT("\n"); + } + /*_38_*/ + RPRINT("\n"); + + /* set _38_ to M2MLC_READ_BUF_SIZE in m2mlc_io.h */ + /* -= read =- */ + + if (count < len) { + dev_err(priv->dev, + "READ ERROR: needed %zu bytes for read\n", len); + kfree(l_buf); + return -EINVAL; + } + + if (copy_to_user(buf, l_buf, len)) { + dev_err(priv->dev, + "READ ERROR: copy_to_user failure\n"); + kfree(l_buf); + return -EFAULT; + } + *ppos = len; + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "CDEV_READ: readed %zu bytes\n", len); + + kfree(l_buf); + return len; +} /* cdev_read */ + + +/** + * write file operation + * Write mask for print regs dump to syslog + * + * Returns: + * -ENODEV + * -EFAULT - copy_from/to_user failure + * >0 - bytes written + */ +static ssize_t cdev_write(struct file *filp, const char *buf, size_t count, + loff_t *ppos) +{ + size_t len; + cdev_priv_t *cpriv; + m2mlc_priv_t *priv; + char l_buf[M2MLC_WRITE_BUF_SIZE+1]; + uint32_t regmsk; + + cpriv = (cdev_priv_t *)filp->private_data; + assert(cpriv); + if (!cpriv) + return -ENODEV; + + priv = cpriv->priv; + assert(priv); + if (!priv) + return -ENODEV; + + /* max to write */ + len = M2MLC_WRITE_BUF_SIZE; + len = (count < len) ? count : len; + + if (copy_from_user((void *)l_buf, (void *)buf, len)) { + dev_err(priv->dev, + "WRITE ERROR: copy_from_user failure\n"); + return -EFAULT; + } + l_buf[len] = 0; + + sscanf(l_buf, "%x", ®msk); + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "CDEV_WRITE: regmsk = 0x%08X\n", regmsk); + + if (regmsk) { + m2mlc_hw_print_all_regs(priv, regmsk); + } + + return count; /*len;*/ +} /* cdev_write */ + + +/** + * open file operation + */ +static int cdev_open(struct inode *inode, struct file *filp) +{ + cdev_priv_t *cpriv; + m2mlc_priv_t *priv; + + priv = container_of(inode->i_cdev, m2mlc_priv_t, cdev); + assert(priv); + if (!priv) + return -ENODEV; + + spin_lock(&priv->cdev_open_lock); + if (1 == priv->device_open) { + spin_unlock(&priv->cdev_open_lock); + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "CDEV_OPEN WARNING: device busy!\n"); + return -EBUSY; + } + spin_unlock(&priv->cdev_open_lock); + + filp->private_data = kzalloc(sizeof(cdev_priv_t), GFP_KERNEL); + cpriv = (cdev_priv_t *)filp->private_data; + assert(cpriv); + if (!cpriv) + return -ENOMEM; + cpriv->priv = priv; + cpriv->endpoint = CDEV_ENDPOINT_NONE; + + kobject_get(&priv->dev->kobj); + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, "CDEV_OPEN\n"); + + return 0; +} /* cdev_open */ + +/** + * close file operation + */ +static int cdev_release(struct inode *inode, struct file *filp) +{ + cdev_priv_t *cpriv; + m2mlc_priv_t *priv; + + cpriv = (cdev_priv_t *)filp->private_data; + assert(cpriv); + if (!cpriv) + return -ENODEV; + + priv = cpriv->priv; + assert(priv); + if (!priv) + return -ENODEV; + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, "CDEV_CLOSE\n"); + + kobject_put(&priv->dev->kobj); + + kfree(filp->private_data); + filp->private_data = NULL; + + return 0; +} /* cdev_release */ + + +/** + * file operation + */ +static const struct file_operations dev_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = cdev_read, + .write = cdev_write, + .mmap = cdev_mmap, + .unlocked_ioctl = cdev_ioctl, + #ifdef CONFIG_COMPAT + .compat_ioctl = compat_ioctl, + #endif + .open = cdev_open, + .release = cdev_release, +}; + + +/** + ****************************************************************************** + * GLOBAL + ****************************************************************************** + */ + +#define DEVICE_FIRST 0 +#define ALL_DEVICE_COUNT (1U << MINORBITS) /* max minor num */ +#define MAX_DEVICE_COUNT (ALL_DEVICE_COUNT >> 2) /* ==max/4 - ... */ +#define DEVICE_COUNT ((MAX_DEVICE_COUNT > 32) ? 32 : MAX_DEVICE_COUNT) + + +static struct class *DevClass; + +static int Major = 0; + +static DEFINE_MUTEX(minor_lock); +static int last_minor = 0; +static int minors[DEVICE_COUNT] = {-1}; +static char bus_name[DEVICE_COUNT][20] = { {0} }; + + +/** + ****************************************************************************** + * Minor part of cdev + ****************************************************************************** + */ + +/* + * Allocate minor for current PCI device + */ +static int get_minor(m2mlc_priv_t *priv, unsigned int *minor) +{ + int ret = -EINVAL; + int i; + struct pci_dev *pdev = priv->pdev; + + mutex_lock(&minor_lock); + /* find prev minor for busname */ + for (i = 0; i < last_minor; i++) { + if (0 == strcmp(dev_name(&pdev->dev), bus_name[i])) { + *minor = minors[i]; + DEV_DBG(M2MLC_DBG_MSK_CDEV, &pdev->dev, + "Found saved minor: %d (%s)\n", + *minor, dev_name(&pdev->dev)); + ret = 0; + break; + } + } + if (ret != 0) { + if (DEVICE_COUNT == last_minor) { + dev_err(&pdev->dev, "ERROR: too many char devices\n"); + } else { + /* new busname */ + minors[last_minor] = last_minor; + strcpy(bus_name[last_minor], dev_name(&pdev->dev)); + *minor = last_minor; + last_minor += 1; + ret = 0; + DEV_DBG(M2MLC_DBG_MSK_CDEV, &pdev->dev, + "Save minor %d for bus %s\n", + *minor, bus_name[*minor]); + } + } + mutex_unlock(&minor_lock); + + return ret; +} /* get_minor */ + +static void free_minor(unsigned int minor) +{ + /* + mutex_lock(&minor_lock); + mutex_unlock(&minor_lock); + */ +} /* free_minor */ + + +/** + * Create cdev for IRQ or DMA + */ +int m2mlc_cdev_register(m2mlc_priv_t *priv) +{ + int ret = 0; + dev_t devt; + unsigned int minor; + char name[20]; + struct pci_dev *pdev; + + assert(priv); + if (!priv) + return -ENODEV; + + pdev = priv->pdev; + assert(pdev); + if (!pdev) + return -ENODEV; + + ret = get_minor(priv, &minor); + if (ret) { + dev_err(&pdev->dev, "ERROR: get_minor failed\n"); + goto err_exit; + } + + sprintf(name, "%s%d", M2MLC_DEVNAME, minor); + devt = MKDEV(Major, minor); + DEV_DBG(M2MLC_DBG_MSK_CDEV, &pdev->dev, + "try to register char device (%d:%d)\n", Major, minor); + + cdev_init(&priv->cdev, &dev_fops); + priv->cdev.owner = THIS_MODULE; + priv->minor = minor; + + ret = cdev_add(&priv->cdev, devt, 1); + if (ret) { + dev_err(&pdev->dev, + "ERROR: failed to add char device %d:%d\n", + Major, minor); + goto err_free_idr; + } + + priv->dev = device_create(DevClass, &pdev->dev, devt, + NULL, name); + if (IS_ERR(priv->dev)) { + dev_err(&pdev->dev, + "ERROR: char device register failed\n"); + ret = PTR_ERR(priv->dev); + goto err_del_cdev; + } + dev_info(&pdev->dev, "char device %s (%d:%d) installed\n", + name, Major, minor); + + minor += DEVICE_COUNT; + return 0; + +err_del_cdev: + cdev_del(&priv->cdev); +err_free_idr: + free_minor(minor); +err_exit: + return ret; +} /* m2mlc_cdev_register */ + +/** + * Remove cdev + */ +void m2mlc_cdev_remove(m2mlc_priv_t *priv) +{ + assert(priv); + if (!priv) + return; + + DEV_DBG(M2MLC_DBG_MSK_CDEV, priv->dev, + "char device (%d:%d) removed\n", Major, priv->minor); + + device_destroy(DevClass, MKDEV(Major, priv->minor)); + cdev_del(&priv->cdev); + free_minor(priv->minor); +} /* m2mlc_cdev_remove */ + + +/** + ****************************************************************************** + * Major part of cdev + ****************************************************************************** + */ + +static int major_init(void) +{ + int ret = 0; + dev_t devt = 0; + + ret = alloc_chrdev_region(&devt, DEVICE_FIRST, ALL_DEVICE_COUNT, + DRIVER_NAME); + if (ret) { + ERR_MSG("ERROR: Could not register char device region\n"); + goto err_exit; + } + + Major = MAJOR(devt); + + PDEBUG(M2MLC_DBG_MSK_CDEV, + "chrdev_region registered: major %d\n", + Major); + + return 0; + +err_exit: + return ret; +} /* major_init */ + +static void major_delete(void) +{ + unregister_chrdev_region(MKDEV(Major, 0), ALL_DEVICE_COUNT); + + PDEBUG(M2MLC_DBG_MSK_CDEV, + "chrdev_region unregistered (major %d)\n", + Major); +} /* major_delete */ + + +/** + * Get Major and register class for cdev + */ +int __init m2mlc_dev_init(void) +{ + int ret; + + ret = major_init(); + if (ret) + goto err_exit; + + /* class register */ + DevClass = class_create(THIS_MODULE, DRIVER_NAME); + if (IS_ERR(DevClass)) { + ERR_MSG("ERROR: couldn't create class %s\n", DRIVER_NAME); + ret = PTR_ERR(DevClass); + goto err_class_register; + } + PDEBUG(M2MLC_DBG_MSK_CDEV, "class %s created\n", DRIVER_NAME); + + return 0; + +err_class_register: + major_delete(); +err_exit: + return ret; +} /* m2mlc_dev_init */ + +/** + * Deregister class for cdev and free major + */ +void m2mlc_dev_exit(void) +{ + class_destroy(DevClass); + major_delete(); + + PDEBUG(M2MLC_DBG_MSK_CDEV, "class %s destroyed\n", DRIVER_NAME); +} /* m2mlc_dev_exit */ diff --git a/drivers/mcst/m2mlc/m2mlc_hw.c b/drivers/mcst/m2mlc/m2mlc_hw.c new file mode 100644 index 000000000000..10bb9dd78c5e --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_hw.c @@ -0,0 +1,824 @@ +/** + * m2mlc_hw.c - M2MLC module device driver + * + * Hardware part + */ + +#include + +#include "m2mlc.h" + + +void m2mlc_hw_mb_settailptr_done(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr); +void m2mlc_hw_mb_settailptr_mail(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr); +void m2mlc_hw_dma_settailptr_done(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr); +void m2mlc_hw_dma_setheadptr_str(m2mlc_priv_t *priv, int ep, + uint16_t head_ptr); + + +/** + ****************************************************************************** + * COMMON + ****************************************************************************** + **/ + +/** + * Read register + * + * @base_addr: registers base address + * @port: register offset + * + * Returns - readed value + */ +u32 m2mlc_read_reg32(void __iomem *base_addr, u32 port) +{ +#ifdef TESTWOHARDWARE + return 0; +#else + return ioread32(base_addr + port); +#endif /* TESTWOHARDWARE */ +} /* m2mlc_read_reg32 */ + +/** + * Write register + * + * @base_addr: registers base address + * @port: register offset + * @val: value + */ +void m2mlc_write_reg32(void __iomem *base_addr, u32 port, u32 val) +{ +#ifndef TESTWOHARDWARE + iowrite32(val, base_addr + port); +#endif /* TESTWOHARDWARE */ +} /* m2mlc_write_reg32 */ + +/** + * Write register + * + * @base_addr: registers base address + * @port: register offset + * @val: value + */ +void m2mlc_write_reg64(void __iomem *base_addr, u32 port, u64 val) +{ +#ifndef TESTWOHARDWARE + /* TODO: iowrite64 */ + iowrite32((u32)(val>>32), base_addr + port + 4); /* Hi */ + iowrite32((u32)val, base_addr + port); /* Lo */ +#endif /* TESTWOHARDWARE */ +} /* m2mlc_write_reg64 */ + +void m2mlc_write_reg64_endian(void __iomem *base_addr, u32 port, u64 val) +{ +#ifndef TESTWOHARDWARE +#ifdef __sparc__ /* ARCH: e90, e90s */ + /* writeq(cpu_to_le64(val), base_addr + port); */ + writeq(val, base_addr + port); +#else /* __e2k__ */ + iowrite32((u32)(val>>32), base_addr + port + 4); /* Hi */ + iowrite32((u32)val, base_addr + port); /* Lo */ +#endif /* __sparc__ */ +#endif /* TESTWOHARDWARE */ +} /* m2mlc_write_reg64 */ + + +/** + ****************************************************************************** + * Init + ****************************************************************************** + */ + +/** + * Get NIC Capability + * + * Returns RTL Version/Revision & Arbiter Config + */ +u16 m2mlc_hw_get_niccapability(struct pci_dev *pdev) +{ + u16 val = 0; + +#ifndef TESTWOHARDWARE + pci_read_config_word(pdev, NICCPB_REG, &val); +#else + val = NICCPB_PROCVAL; +#endif /* TESTWOHARDWARE */ + return val; +} /* m2mlc_hw_get_niccapability */ + +/** + * Full Reset on probe and remove + */ +int m2mlc_hw_reset(m2mlc_priv_t *priv, int first) +{ + int i; + u32 val; + + + /* Request for full softreset */ + pci_read_config_dword(priv->pdev, NICCPB_REG, &val); + if (NICCPB_GET_SOFTRES(val) != 0) { + DEV_DBG(M2MLC_DBG_MSK_HW, &priv->pdev->dev, + "SOFTRESET is not null: 0x%X\n", val); + return 1; /* ERROR */ + } + + if (softreset_enable && !first) { + val = val | NICCPB_SET_SOFTRES; + } + DEV_DBG(M2MLC_DBG_MSK_HW, &priv->pdev->dev, + "SET NICCPB to: 0x%X\n", val); + + pci_write_config_dword(priv->pdev, NICCPB_REG, val); + mdelay(10); + pci_read_config_dword(priv->pdev, NICCPB_REG, &val); + if (NICCPB_GET_SOFTRES(val) != 0) { + DEV_DBG(M2MLC_DBG_MSK_HW, &priv->pdev->dev, + "SOFTRESET is not null after 10ms: 0x%X\n", val); + mdelay(10); + pci_read_config_dword(priv->pdev, NICCPB_REG, &val); + if (NICCPB_GET_SOFTRES(val) != 0) { + DEV_DBG(M2MLC_DBG_MSK_HW, &priv->pdev->dev, + "SOFTRESET is not null after 20ms: 0x%X\n", + val); + return 2; /* ERROR */ + } + } + DEV_DBG(M2MLC_DBG_MSK_HW, &priv->pdev->dev, + "NICCPB is now: 0x%X (after 10ms)\n", val); + + /* enable access */ + for (i = 0; i < priv->niccpb_procval; i++) { + m2mlc_write_reg32(priv->reg_base, RB_COM + CB_ADDR_ACC_CTRL(i), + CB_ADDR_ACC_CTRL_ADDR_MASK); + } + + /* enable timeout */ + if ((timeout_retry > 0) || (timeout_counter > 0)) { + val = (timeout_retry & CB_TO_CONTROL_RETRY_MASK) << + CB_TO_CONTROL_RETRY_SHIFT | + (timeout_counter & CB_TO_CONTROL_COUNTER_MASK) << + CB_TO_CONTROL_COUNTER_SHIFT; + DEV_DBG(M2MLC_DBG_MSK_HW, &priv->pdev->dev, + "Setting NIC_TO to %d %d: 0x%08X\n", + timeout_retry, timeout_counter, val); + m2mlc_write_reg32(priv->reg_base, RB_COM + CB_TO_CONTROL, val); + } + + + /* init base address registers */ + m2mlc_write_reg64(priv->reg_base, RB_COM + CB_PIO_DONE_QUE_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, RB_COM + CB_PIO_DATA_QUE_ADDR_L, 0); + for (i = 0; i < priv->niccpb_procval; i++) { + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_MB_STR_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_MB_RET_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_MB_DONE_QUE_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_DB_START_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_DB_RET_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_DMA_START_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_DMA_RET_ADDR_L, 0); + m2mlc_write_reg64(priv->reg_base, + RB_N(i) + M2MLC_RB_DMA_DONE_QUE_ADDR_L, 0); + } + + /* dofix */ + for (i = 0; i < priv->niccpb_procval; i++) { + m2mlc_hw_mb_settailptr_done(priv, i, 0); + m2mlc_hw_mb_settailptr_mail(priv, i, 0); + m2mlc_hw_dma_settailptr_done(priv, i, 0); + m2mlc_hw_dma_setheadptr_str(priv, i, 0); + } + + /* disable access */ + for (i = 0; i < priv->niccpb_procval; i++) { + m2mlc_write_reg32(priv->reg_base, RB_COM + CB_ADDR_ACC_CTRL(i), + /* 0 */ 1); /* FIXME: chk in new RTL - ok? */ + } + + return 0; +} /* m2mlc_hw_reset */ + +/** + * First Init at end of probe + */ +void m2mlc_hw_init(m2mlc_priv_t *priv) +{ + int i; +#if 0 + u32 irq_mask; +#endif + + /* enable access */ + for (i = 0; i < priv->niccpb_procval; i++) { + m2mlc_write_reg32(priv->reg_base, RB_COM + CB_ADDR_ACC_CTRL(i), + CB_ADDR_ACC_CTRL_ADDR_MASK); + } + + /* init base address registers */ + m2mlc_write_reg64_endian(priv->reg_base, + RB_COM + CB_PIO_DONE_QUE_ADDR_L, +#ifdef USE_MUL2ALIGN + priv->pio_done_que_handle + + priv->pio_done_que_offset); +#else + priv->pio_done_que_handle); +#endif /* USE_MUL2ALIGN */ + m2mlc_write_reg64_endian(priv->reg_base, + RB_COM + CB_PIO_DATA_QUE_ADDR_L, +#ifdef USE_MUL2ALIGN + priv->pio_data_que_handle + + priv->pio_data_que_offset); +#else + priv->pio_data_que_handle); +#endif /* USE_MUL2ALIGN */ + for (i = 0; i < priv->niccpb_procval; i++) { + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_MB_STR_ADDR_L, +#ifdef USE_MUL2ALIGN + priv->mb_struct_handle[i] + + priv->mb_struct_offset[i]); +#else + priv->mb_struct_handle[i]); +#endif /* USE_MUL2ALIGN */ + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_MB_RET_ADDR_L, + priv->mdd_ret_handle[i] + 0); + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_MB_DONE_QUE_ADDR_L, +#ifdef USE_MUL2ALIGN + priv->mb_done_que_handle[i] + + priv->mb_done_offset[i]); +#else + priv->mb_done_que_handle[i]); +#endif /* USE_MUL2ALIGN */ + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_DB_START_ADDR_L, + priv->db_start_handle[i]); + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_DB_RET_ADDR_L, + priv->mdd_ret_handle[i] + sizeof(u32)); + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_DMA_START_ADDR_L, +#ifdef USE_MUL2ALIGN + priv->dma_start_handle[i] + + priv->dma_start_offset[i]); +#else + priv->dma_start_handle[i]); +#endif /* USE_MUL2ALIGN */ + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_DMA_RET_ADDR_L, + priv->mdd_ret_handle[i] + (sizeof(u32)*2)); + m2mlc_write_reg64_endian(priv->reg_base, + RB_N(i) + M2MLC_RB_DMA_DONE_QUE_ADDR_L, +#ifdef USE_MUL2ALIGN + priv->dma_done_que_handle[i] + + priv->dma_done_offset[i]); +#else + priv->dma_done_que_handle[i]); +#endif /* USE_MUL2ALIGN */ + } + + /* disable access */ + for (i = 0; i < priv->niccpb_procval; i++) { + m2mlc_write_reg32(priv->reg_base, RB_COM + CB_ADDR_ACC_CTRL(i), + 0); + } + + /* Enable Maintenance on PIO 0 */ + m2mlc_write_reg32(priv->reg_base, RB_COM + CB_ADDR_ACC_CTRL(0), + CB_ADDR_ACC_CTRL_MAINT_EN | 1); + +#if 0 + /* DEBUG: */ + for (i = 0; i < priv->niccpb_procval; i++) { + irq_mask = M2MLC_RB_INT_ENABLE_MSK; + m2mlc_write_reg32(priv->reg_base, + RB_N(i) + M2MLC_RB_INT_ENABLE, irq_mask); + } +#endif + +} /* m2mlc_hw_init */ + +/** + * Set ENDIANES + */ +void m2mlc_hw_set_endianes(m2mlc_priv_t *priv) +{ + /* FUTURE: for sparc */ +} /* m2mlc_hw_set_endianes */ + + +/** + ****************************************************************************** + * PIO + ****************************************************************************** + */ + +/* + * Start PIO Transaction + * + */ +void m2mlc_hw_pio_start(m2mlc_priv_t *priv, int ep, m2mlc_pio_cmd_t *pio_cmd) +{ + m2mlc_write_reg64(priv->reg_base, RB_N(ep) + M2MLC_RB_PIO_TRGT_PTR_L, + pio_cmd->Target_ptr); + m2mlc_write_reg32(priv->reg_base, RB_N(ep) + M2MLC_RB_PIO_TRANS_PRM, + pio_cmd->Parameter.r); + m2mlc_write_reg32(priv->reg_base, RB_N(ep) + M2MLC_RB_PIO_DRBL, + pio_cmd->Remote_Doorbell); + m2mlc_write_reg32(priv->reg_base, RB_N(ep) + M2MLC_RB_PIO_TRANS_FS, + pio_cmd->Format.r); +} /* m2mlc_hw_pio_start */ + +/* + * Get PIO Transaction status + * + */ +void m2mlc_hw_pio_getstat(m2mlc_priv_t *priv, int ep, uint8_t *piostat/*, + int complete*/) +{ + u32 val = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_PIO_TRANS_FS); + *piostat = (u8)val; + + /* FIXME: ??? */ + /*if (complete) { + m2mlc_write_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_PIO_TRANS_FS, + M2MLC_PIO_STAT_COMPLETE); + }*/ +} /* m2mlc_hw_pio_getstat */ + + +/** + ****************************************************************************** + * Mailbox + ****************************************************************************** + */ + +/* + * Get Mailbox Structure Head & Tail Pointers + * + */ +void m2mlc_hw_mb_getptrs_mail(m2mlc_priv_t *priv, int ep, + m2mlc_mb_ptrs_t *mbptrs) +{ + mbptrs->r = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_MB_STR_PTRS); +} /* m2mlc_hw_mb_getptrs_mail */ + +/* + * Set Mailbox Structure Tail Pointer + * + */ +void m2mlc_hw_mb_settailptr_mail(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr) +{ + m2mlc_mb_ptrs_t mbptrs; + + mbptrs.r = 0; + mbptrs.p.TailPtr = tail_ptr; + m2mlc_write_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_MB_STR_PTRS, mbptrs.r); +} /* m2mlc_hw_mb_settailptr_mail */ + + +/* + * Get Mailbox Done Head & Tail Pointers + * + */ +void m2mlc_hw_mb_getptrs_done(m2mlc_priv_t *priv, int ep, + m2mlc_mb_ptrs_t *mbptrs) +{ + mbptrs->r = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_MB_DONE_PTRS); +} /* m2mlc_hw_mb_getptrs_done */ + +/* + * Set Mailbox Done Tail Pointer + * + */ +void m2mlc_hw_mb_settailptr_done(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr) +{ + m2mlc_mb_ptrs_t mbptrs; + + mbptrs.r = 0; + mbptrs.p.TailPtr = tail_ptr; /* + bit 31: Shadow Copy Enable */ + m2mlc_write_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_MB_DONE_PTRS, mbptrs.r); +} /* m2mlc_hw_mb_settailptr_done */ + + +/** + ****************************************************************************** + * DoorBell + ****************************************************************************** + */ + +/* + * Get DoorBell Head & Tail Pointer + * + */ +void m2mlc_hw_db_getptrs(m2mlc_priv_t *priv, int ep, m2mlc_db_ptrs_t *dbptrs) +{ + dbptrs->r = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_DB_PTRS); +} /* m2mlc_hw_db_getptrs */ + +/* + * Set DoorBell Head Pointer + * + */ +void m2mlc_hw_db_settailptr(m2mlc_priv_t *priv, int ep, uint16_t tail_ptr) +{ + m2mlc_db_ptrs_t dbptrs; + + dbptrs.p.HeadPtr = tail_ptr; /* + bit 31: Shadow Copy Enable */ + m2mlc_write_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_DB_PTRS, dbptrs.r); +} /* m2mlc_hw_db_settailptr */ + + +/** + ****************************************************************************** + * Interrupt + ****************************************************************************** + */ + +/* + * Interrupt Enable/Disable + * + */ +void m2mlc_hw_int_setmask(m2mlc_priv_t *priv, int ep, + m2mlc_interrupt_t intmask) +{ + /* TODO: lock */ + m2mlc_write_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_INT_ENABLE, intmask.r); +} /* m2mlc_hw_int_setmask */ + +#if 0 +/* + * Confirm Interrupt + * + */ +void m2mlc_hw_int_clear(m2mlc_priv_t *priv, int ep, + m2mlc_interrupt_t intclr) +{ + m2mlc_write_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_INT_STATUS, intclr.r); +} /* m2mlc_hw_int_clear */ +#endif /* 0 */ + +/* + * Get Interrupt Status + * + */ +void m2mlc_hw_int_getstat(m2mlc_priv_t *priv, int ep, + m2mlc_int_stat_t *intstat) +{ + intstat->fromreg.r = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_INT_STATUS); + intstat->intmask.r = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_INT_ENABLE); + /* TODO: lock & clean fromreg & ~intmask */ +} /* m2mlc_hw_int_getstat */ + + +/** + ****************************************************************************** + * DMA + ****************************************************************************** + */ + +/* + * Get DMA Structure Head & Tail Pointers + * + */ +void m2mlc_hw_dma_getptrs_str(m2mlc_priv_t *priv, int ep, + m2mlc_dma_str_ptrs_t *dmaptrs) +{ + dmaptrs->r = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_DMA_STR_PTRS); +} /* m2mlc_hw_dma_getptrs_str */ + +/* + * Set DMA Structure Head Pointer + * + */ +void m2mlc_hw_dma_setheadptr_str(m2mlc_priv_t *priv, int ep, + uint16_t head_ptr) +{ + m2mlc_dma_str_ptrs_t dmaptrs; + + dmaptrs.r = 0; + dmaptrs.p.HeadPtr = head_ptr; + m2mlc_write_reg32(priv->reg_base, RB_N(ep) + M2MLC_RB_DMA_STR_PTRS, + dmaptrs.r); +} /* m2mlc_hw_dma_setheadptr_str */ + + +/* + * Get DMA Done Head & Tail Pointers + * + */ +void m2mlc_hw_dma_getptrs_done(m2mlc_priv_t *priv, int ep, + m2mlc_dma_done_ptrs_t *dmaptrs) +{ + dmaptrs->r = m2mlc_read_reg32(priv->reg_base, + RB_N(ep) + M2MLC_RB_DMA_DONE_PTRS); +} /* m2mlc_hw_dma_getptrs_done */ + +/* + * Set DMA Done Tail Pointer + * + */ +void m2mlc_hw_dma_settailptr_done(m2mlc_priv_t *priv, int ep, + uint16_t tail_ptr) +{ + m2mlc_dma_done_ptrs_t dmaptrs; + + dmaptrs.r = 0; + dmaptrs.p.TailPtr = tail_ptr; + m2mlc_write_reg32(priv->reg_base, RB_N(ep) + M2MLC_RB_DMA_DONE_PTRS, + dmaptrs.r); +} /* m2mlc_hw_dma_settailptr_done */ + + +/** + ****************************************************************************** + * IRQ handler + ****************************************************************************** + */ + +void m2mlc_net_irq_handler(m2mlc_priv_t *m2mlc_priv, uint32_t irq_stat); + +/** + * Interrupt handler + * + * @irq: not used / TODO: save base IRQ num in priv + * @dev_id: PCI device information struct + */ +irqreturn_t m2mlc_irq_handler(int irq, void *dev_id) +{ + struct pci_dev *pdev; + m2mlc_priv_t *priv; + uint32_t irq_stat; + void __iomem *base_addr; + int nirq; + struct kernel_siginfo info; + + + if (!dev_id) + return IRQ_NONE; + pdev = (struct pci_dev *)dev_id; + + priv = pci_get_drvdata(dev_id); + if (!priv) + return IRQ_NONE; + + base_addr = priv->reg_base; + if (!base_addr) + return IRQ_NONE; + + DEV_DBG(M2MLC_DBG_MSK_IRQ, &pdev->dev, "IRQ #%d\n", irq); + + /* Read IRQ status */ + /*irq_stat = COMMON_STATUS_GET_INTSRC( + ioread32(P_COMMON_STATUS_REG(base_addr))); + if (!irq_stat) return IRQ_NONE; */ + + /* TODO: read CB_COM_INT_STATUS + * + ioctl --> 0-EP + */ + + nirq = irq - pdev->irq; + if ((nirq < 0) || (nirq >= priv->niccpb_procval)) { + DEV_DBG(M2MLC_DBG_MSK_IRQ, &pdev->dev, "IRQ_NONE #%d\n", irq); + return IRQ_NONE; + } + + irq_stat = m2mlc_read_reg32(priv->reg_base, + RB_N(nirq) + M2MLC_RB_INT_STATUS); + m2mlc_write_reg32(priv->reg_base, + RB_N(nirq) + M2MLC_RB_INT_STATUS, irq_stat); + DEV_DBG(M2MLC_DBG_MSK_IRQ, &pdev->dev, + "IRQ: stat = 0x%X\n", irq_stat); + +#ifdef ENABLE_NET_DEV + if (CDEV_ENDPOINT_NET == nirq) { + m2mlc_net_irq_handler(priv, irq_stat); + } else { +#else + { +#endif /* ENABLE_NET_DEV */ + if (priv->signal[nirq]) { + info.si_signo = priv->signal[nirq]; + info.si_code = (int)irq_stat; + info.si_errno = 0; /* no recovery */ + send_sig_info(priv->signal[nirq], &info, + priv->tsk[nirq]); + /*send_sig(priv->signal[nirq], priv->tsk[nirq], 0);*/ + } + } + + /* TODO: lock & save fromirq */ + + return IRQ_HANDLED; +} /* m2mlc_irq_handler */ + + +/** + ****************************************************************************** + * Debug + ****************************************************************************** + */ + +#define PREG_N(R, C, N) LOG_MSG("\t0x%02X: 0x%08X - %s\n", R, \ + m2mlc_read_reg32(priv->reg_base, RB_N(N) + R), \ + C) +#define PREG_C(R, C) LOG_MSG("\t0x%02X: 0x%08X - %s\n", R, \ + m2mlc_read_reg32(priv->reg_base, RB_COM + R), \ + C) + +#define PREG_E(R, C) LOG_MSG("\t0x%03X: 0x%08X - %s\n", R, \ + m2mlc_read_reg32(priv->ecs_base, R), \ + C) + +/** + * Regs Dump + */ +void m2mlc_hw_print_all_regs(m2mlc_priv_t *priv, uint32_t regmsk) +{ + int i; + u_int32_t reg_id[] = { + ECS_DEVID_CAR, + ECS_DEVINF_CAR, + ECS_ASMBLID_CAR, + ECS_ASMBLINF_CAR, + ECS_PEF_CAR, + ECS_PELLCTRL_CSR, + ECS_GPSTAT_CSR, + ECS_BASEDEVID_CSR, + ECS_HBASEDEVIDLOCK_CSR, + ECS_ROUTE_RESP, + ECS_PHYSTAT_CTRL + }; + char *reg_name[] = { + "Device_Identity_CAR ", + "Device_Information_CAR ", + "Assembly_Identity_CAR ", + "Assembly_Information_CAR ", + "Processing_Elem_Features_CAR ", + "Processing_Elem_LogLayCtrl_CSR", + "General_Port_Status_CSR ", + "Base_Device_ID_CSR ", + "Host_Base_Device_ID_Lock_CSR ", + "Responce Route Field ", + "PHY_Port_Pn_Status_Control ", + " " + }; + u_int32_t reg_id_rtacc[] = { + ECS_RTACCSTAT_0, + ECS_RTACCSTAT_1, + ECS_RTACCSTAT_2, + ECS_RTACCSTAT_3, + ECS_RTACCSTAT_4, + ECS_RTACCSTAT_5, + ECS_RTACCSTAT_6, + ECS_RTACCSTAT_7 + }; + char *reg_name_rtacc[] = { + "Status_0 [1F..00]", + "Status_1 [3F..20]", + "Status_2 [5F..40]", + "Status_3 [7F..60]", + "Status_4 [9F..80]", + "Status_5 [BF..A0]", + "Status_6 [DF..C0]", + "Status_7 [FF..E0]", + " " + }; + + + LOG_MSG("\n"); + LOG_MSG(" -= register dump (hex) =-\n"); + + if (!(M2MLC_PRINTREG_BAR0 & regmsk)) + goto skip_1; + LOG_MSG("BAR0: Element_Config_Space\n"); + for (i = 0; i < ARRAY_SIZE(reg_id); i++) { + PREG_E(reg_id[i], reg_name[i]); + } + +skip_1: + if (!(M2MLC_PRINTREG_RTACCESS & regmsk)) + goto skip_2; + LOG_MSG(" RT Access Status:\n"); + for (i = 0; i < ARRAY_SIZE(reg_id_rtacc); i++) { + PREG_E(reg_id_rtacc[i], reg_name_rtacc[i]); + } + +skip_2: + if (!(M2MLC_PRINTREG_BAR1 & regmsk)) + goto skip_3; + LOG_MSG("BAR1: Control Regs: PIO,Mailbox," \ + "DoorBell,DMA,Interrupt,Status\n"); + for (i = 0; i < priv->niccpb_procval; i++) { + if (!((1UL << i) & regmsk)) + continue; + LOG_MSG(" Resource Block %d\n", i); + /* = PIO box = */ + PREG_N(M2MLC_RB_PIO_TRGT_PTR_L, "PIO Target Pointer lower", i); + PREG_N(M2MLC_RB_PIO_TRGT_PTR_H, "PIO Target Pointer upper", i); + PREG_N(M2MLC_RB_PIO_TRANS_PRM, + "PIO Transaction parameters", i); + PREG_N(M2MLC_RB_PIO_TRANS_FS, + "PIO Transaction Format & Status", i); + PREG_N(M2MLC_RB_PIO_DRBL, "PIO Doorbell", i); + /* = Mailbox Register Block = */ + PREG_N(M2MLC_RB_MB_STR_ADDR_L, + "Mailbox Structure Address Lower", i); + PREG_N(M2MLC_RB_MB_STR_ADDR_H, + "Mailbox Structure Address Upper", i); + PREG_N(M2MLC_RB_MB_STR_PTRS, + "Mailbox Struct Head & Tail Pointer", i); + PREG_N(M2MLC_RB_MB_DONE_PTRS, + "Mailbox Done Head & Tail Pointer", i); + PREG_N(M2MLC_RB_MB_RET_ADDR_L, + "Mailbox Return Address Lower", i); + PREG_N(M2MLC_RB_MB_RET_ADDR_H, + "Mailbox Return Address Upper", i); + PREG_N(M2MLC_RB_MB_DONE_QUE_ADDR_L, + "Mailbox Done Queue Address Lower", i); + PREG_N(M2MLC_RB_MB_DONE_QUE_ADDR_H, + "Mailbox Done Queue Address Upper", i); + /* = Doorbell's Register Block = */ + PREG_N(M2MLC_RB_DB_START_ADDR_L, + "Doorbell Start Address Lower", i); + PREG_N(M2MLC_RB_DB_START_ADDR_H, + "Doorbell Start Address Upper", i); + PREG_N(M2MLC_RB_DB_RET_ADDR_L, + "Doorbell Return Address Lower", i); + PREG_N(M2MLC_RB_DB_RET_ADDR_H, + "Doorbell Return Address Upper", i); + PREG_N(M2MLC_RB_DB_PTRS, "Doorbell Head & Tail Pointer", i); + /* = DMA Mode Block = */ + PREG_N(M2MLC_RB_DMA_START_ADDR_L, + "DMA Start Address Lower", i); + PREG_N(M2MLC_RB_DMA_START_ADDR_H, + "DMA Start Address Upper", i); + PREG_N(M2MLC_RB_DMA_STR_PTRS, + "DMA Structure Head & Tail Pointer", i); + PREG_N(M2MLC_RB_DMA_QUE_SIZE, "DMA Queue Size Register", i); + PREG_N(M2MLC_RB_DMA_RET_ADDR_L, "DMA Return Address Lower", i); + PREG_N(M2MLC_RB_DMA_RET_ADDR_H, "DMA Return Address Upper", i); + PREG_N(M2MLC_RB_DMA_DONE_QUE_ADDR_L, + "DMA Done Queue Address Lower", i); + PREG_N(M2MLC_RB_DMA_DONE_QUE_ADDR_H, + "DMA Done Queue Address Upper", i); + PREG_N(M2MLC_RB_DMA_DONE_PTRS, + "DMA Done Head & Tail Pointer", i); + /* = Interrupts = */ + PREG_N(M2MLC_RB_INT_STATUS, "Interrupt Status", i); + PREG_N(M2MLC_RB_INT_ENABLE, "Interrupt Enable", i); + /* = Error Reporting = */ + PREG_N(M2MLC_RB_ERR_STATUS, "Error Status", i); + } + +skip_3: + if (!(M2MLC_PRINTREG_COMMON & regmsk)) + return; + LOG_MSG(" Common Block\n"); +#if 0 + LOG_MSG(" IOMMU Control Block\n"); + PREG_C(CB_IOMMU_CONTROL, "IOMMU Control Register"); +#endif /* 0 */ + + LOG_MSG(" Addresses Access Control Structure\n"); + for (i = 0; i < priv->niccpb_procval; i++) { + PREG_C(CB_ADDR_ACC_CTRL(i), "Addresses Access Register N"); + } + LOG_MSG(" PIO Common Block\n"); + PREG_C(CB_PIO_DONE_QUE_ADDR_L, "PIO Done Queue Table Address Lower"); + PREG_C(CB_PIO_DONE_QUE_ADDR_H, "PIO Done Queue Table Address Upper"); + PREG_C(CB_PIO_DATA_QUE_ADDR_L, "PIO Data Queue Table Address Lower"); + PREG_C(CB_PIO_DATA_QUE_ADDR_H, "PIO Data Queue Table Address Upper"); + PREG_C(CB_PIO_BOXES_AVAIL, "PIO boxes availability"); + LOG_MSG(" Timeout Control\n"); + PREG_C(CB_TO_CONTROL, "Timeout Control Register"); + LOG_MSG(" Common Interrupt Status & Mask\n"); + PREG_C(CB_COM_INT_STATUS, "Common Interrupt Status"); + PREG_C(CB_COM_INT_MASK, "Common Interrupt Mask"); +} /* hw_print_all_regs */ diff --git a/drivers/mcst/m2mlc/m2mlc_ksvv.c b/drivers/mcst/m2mlc/m2mlc_ksvv.c new file mode 100644 index 000000000000..23c9bb79439d --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_ksvv.c @@ -0,0 +1,887 @@ +/** + * m2mlc_ksvv.c - M2MLC module device driver + * + * Network part + */ + +#include +#include + +#include "m2mlc.h" +#include "m2mlc_ksvv.h" + + +/* 1024 msgs; 10 bit address; 11 bit ptrs */ +#define MSGS_10BIT_MSK ((1 << 10) - 1) + +/* 4096 dones; 13 bit address; 12 bit ptrs */ +#define MSGS_12BIT_MSK ((1 << 12) - 1) + +#define KSVV_GET_TYPE(u) ((((u).type_size) & 0xf000) >> 12) +#define KSVV_GET_SIZE(u) ((((u).type_size) & 0xfff) + 1) +#define KSVV_TYPE_SIZE(type, size) \ + ((uint16_t)((((type) & 0xf) << 12) + (((size) - 1) & 0xfff))) + + +/** + ****************************************************************************** + * INIT + ****************************************************************************** + **/ + +/* + * size parameter in 4K pages; not more 4 MB at once + * Todo: add check of NUMA memory location (nume_movepages 0?) + */ +static void *ksvv_alloc_mem(m2mlc_npriv_t *npriv, uint32_t size, void **virt, + void **phys) +{ + void *useraddr = NULL; + m2mlc_mem_ptrs_t mem_ptrs; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + struct net_device *ndev = npriv->p_priv->ndev; + + + if (endpoint->cur_mem >= KSVV_MEM_SEGMENTS) { + dev_err(&ndev->dev, "ERROR: max mem count reached\n"); + return NULL; + } + /* not more 256 MB */ + if ((size <= 0) || (size > 1024)) { + dev_err(&ndev->dev, + "ERROR: size %d too high (min 1 page; " \ + "max 1024 pages; page=4K)\n", size); + return NULL; + } + useraddr = dma_alloc_coherent(&ndev->dev, size * 4 * 1024, + (dma_addr_t *)(&(mem_ptrs.dmaaddr)), + GFP_KERNEL); + if (!useraddr) { + dev_err(&ndev->dev, "ERROR: Can't allocate memory\n"); + return NULL; + } + mem_ptrs.useraddr = (uint64_t)useraddr; + mem_ptrs.bytes = size * 4 * 1024; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "Alloc Mem: dmaaddr=0x%llX virtaddr=0x%llX " \ + "size=0x%llX(%lld), %d\n", + mem_ptrs.dmaaddr, mem_ptrs.useraddr, + mem_ptrs.len, mem_ptrs.len, endpoint->cur_mem); + + /* use data on buffer */ + endpoint->mems_ptrs[endpoint->cur_mem] = mem_ptrs; + endpoint->cur_mem++; + if (virt) + *virt = (void *)(uintptr_t)mem_ptrs.useraddr; + if (phys) + *phys = (void *)(uintptr_t)mem_ptrs.dmaaddr; + + return (void *)(uintptr_t)mem_ptrs.useraddr; +} /* ksvv_alloc_mem */ + +static void ksvv_free_all_mem(m2mlc_npriv_t *npriv) +{ + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + struct net_device *ndev = npriv->p_priv->ndev; + m2mlc_mem_ptrs_t mem_ptrs; + int i; + + + for (i = 0; i < endpoint->cur_mem; i++) { + mem_ptrs = endpoint->mems_ptrs[i]; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "Free Mem: dmaaddr=0x%llX virtaddr=0x%llX " \ + "size=0x%llX(%lld) nents=%ld, [%d]\n", + mem_ptrs.dmaaddr, mem_ptrs.useraddr, + mem_ptrs.len, mem_ptrs.len, mem_ptrs.nents, i); + + if (mem_ptrs.useraddr) + dma_free_coherent(&ndev->dev, mem_ptrs.bytes, + (void *)mem_ptrs.useraddr, + mem_ptrs.dmaaddr); + } +} /* ksvv_free_all_mem */ + +/* + * Read initial values of ptrs + */ +static int ksvv_reinit_queue_ptr(m2mlc_npriv_t *npriv) +{ + int status = 0; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + struct net_device *ndev = npriv->p_priv->ndev; + volatile uint32_t *endpoint_regs = endpoint->endpoint_regs; + + + endpoint->dma_0.r = endpoint_regs[M2MLC_RB_DMA_STR_PTRS >> 2]; + endpoint->dmadone_0.r = endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2]; + endpoint->mb_0.r = endpoint_regs[M2MLC_RB_MB_STR_PTRS >> 2]; + endpoint->mbdone_0.r = endpoint_regs[M2MLC_RB_MB_DONE_PTRS >> 2]; + endpoint->db_0.r = endpoint_regs[M2MLC_RB_DB_PTRS >> 2]; + + if (endpoint->dma_0.p.r_tail != endpoint->dma_0.p.w_head) { + dev_err(&ndev->dev, + "ERROR: DMA Queue is not empty; tail=%d head=%d\n", + endpoint->dma_0.p.r_tail, + endpoint->dma_0.p.w_head); + status = 4; + goto exit_reg_init; + } + if (endpoint->dmadone_0.p.w_tail != endpoint->dmadone_0.p.r_head) { + dev_err(&ndev->dev, + "ERROR: DMA Done Queue is not empty; tail=%d head=%d\n", + endpoint->dmadone_0.p.w_tail, + endpoint->dmadone_0.p.r_head); + status = 5; + goto exit_reg_init; + } +#if 0 + if (endpoint->mb_0.p.w_tail != endpoint->mb_0.p.r_head) { + dev_err(&ndev->dev, + "ERROR: Mailbox Queue is not empty; tail=%d head=%d\n", + endpoint->mb_0.p.w_tail, + endpoint->mb_0.p.r_head); + status = 6; + goto exit_reg_init; + } + if (endpoint->mbdone_0.p.w_tail != endpoint->mbdone_0.p.r_head) { + dev_err(&ndev->dev, + "ERROR: Mailbox done Queue is not empty; " \ + "tail=%d head=%d\n", + endpoint->mbdone_0.p.w_tail, + endpoint->mbdone_0.p.r_head); + status = 7; + goto exit_reg_init; + } +#else + endpoint->mb_0.p.w_tail = endpoint->mb_0.p.r_head; + endpoint->mbdone_0.p.w_tail = endpoint->mbdone_0.p.r_head; + + endpoint_regs[M2MLC_RB_MB_STR_PTRS >> 2] = endpoint->mb_0.r; + endpoint_regs[M2MLC_RB_MB_DONE_PTRS >> 2] = endpoint->mbdone_0.r; +#endif /* 0 */ + + if (endpoint->db_0.p.w_tail != endpoint->db_0.p.r_head) { + dev_err(&ndev->dev, + "ERROR: Doorbell Queue is not empty; " \ + "tail=%d head=%d\n", + endpoint->db_0.p.w_tail, + endpoint->db_0.p.r_head); + status = 8; + goto exit_reg_init; + } + + /* Enable Shadow copy */ + endpoint->dmadone_0.p.sce = 1; + endpoint->mbdone_0.p.sce = 1; + endpoint->db_0.p.sce = 1; + endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2] = endpoint->dmadone_0.r; + endpoint_regs[M2MLC_RB_MB_DONE_PTRS >> 2] = endpoint->mbdone_0.r; + endpoint_regs[M2MLC_RB_DB_PTRS >> 2] = endpoint->db_0.r; + /* Repeat write to get actual values in the Shadow Copy in mem */ + endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2] = endpoint->dmadone_0.r; + endpoint_regs[M2MLC_RB_MB_DONE_PTRS >> 2] = endpoint->mbdone_0.r; + endpoint_regs[M2MLC_RB_DB_PTRS >> 2] = endpoint->db_0.r; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "PTRS[%d] SCE done; *_0: dma=%08x dmadone=%08x (SC:%08x) " \ + "mb=%08x mbdone=%08x (SC:%08x) db=%08x (SC:%08x)\n", + CDEV_ENDPOINT_NET, + endpoint->dma_0.r, endpoint->dmadone_0.r, + endpoint->done_regs->dma_head_done_ptr, + endpoint->mb_0.r, endpoint->mbdone_0.r, + endpoint->done_regs->mb_write_done_ptr, + endpoint->db_0.r, endpoint->done_regs->db_write_done_ptr); + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "PTRS[%d] SCE done; endp dma=%08x dmadone=%08x (SC:%08x) " \ + "mb=%08x mbdone=%08x (SC:%08x) db=%08x (SC:%08x)\n", + CDEV_ENDPOINT_NET, + endpoint_regs[M2MLC_RB_DMA_STR_PTRS>>2], + endpoint_regs[M2MLC_RB_DMA_DONE_PTRS>>2], + endpoint->done_regs->dma_head_done_ptr, + endpoint_regs[M2MLC_RB_MB_STR_PTRS>>2], + endpoint_regs[M2MLC_RB_MB_DONE_PTRS>>2], + endpoint->done_regs->mb_write_done_ptr, + endpoint_regs[M2MLC_RB_DB_PTRS>>2], + endpoint->done_regs->db_write_done_ptr); + +exit_reg_init: + return status; +} /* ksvv_reinit_queue_ptr */ + +/* + * Open endpoint + * allocate endpoint and return in case of success + */ +int ksvv_open_endpoint(m2mlc_npriv_t *npriv) +{ + int status = 0; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + struct net_device *ndev = npriv->p_priv->ndev; + volatile uint32_t *endpoint_regs; + int i; + + + /* ENDPOINT Registers (BAR1) */ + endpoint_regs = npriv->p_priv->reg_base + RB_N(CDEV_ENDPOINT_NET); + endpoint->endpoint_regs = endpoint_regs; + + /* FIXME: 20150713 */ + /* READ DMA Queue size [06:00] */ + endpoint_regs[M2MLC_RB_DMA_QUE_SIZE >> 2] = 64; /* 4096 */ + switch (endpoint_regs[M2MLC_RB_DMA_QUE_SIZE >> 2] & 0x7f) { + case 0: + endpoint->dma_queue_size = 32; + break; + case 1: + endpoint->dma_queue_size = 64; + break; + case 2: + endpoint->dma_queue_size = 128; + break; + case 4: + endpoint->dma_queue_size = 256; + break; + case 8: + endpoint->dma_queue_size = 512; + break; + case 16: + endpoint->dma_queue_size = 1024; + break; + case 32: + endpoint->dma_queue_size = 2048; + break; + case 64: + endpoint->dma_queue_size = 4096; + break; + } + endpoint->dma_queue_mask = endpoint->dma_queue_size - 1; + endpoint->dma_queue_mask1 = (endpoint->dma_queue_size * 2) - 1; + +#if 0 + /* PIO Payload Base Address (BAR2, 256b) */ + endpoint->pio_payload = \ + npriv->p_priv->buf_base + RB_N(CDEV_ENDPOINT_NET); + + endpoint->pio_done_queue = npriv->p_priv->pio_done_que_buff + +#ifdef USE_MUL2ALIGN + npriv->p_priv->pio_done_que_offset + +#endif /* USE_MUL2ALIGN */ + RB_N(CDEV_ENDPOINT_NET); + + endpoint->pio_data_queue = npriv->p_priv->pio_data_que_buff + +#ifdef USE_MUL2ALIGN + npriv->p_priv->pio_data_que_offset + +#endif /* USE_MUL2ALIGN */ + RB_N(CDEV_ENDPOINT_NET); +#endif /* 0 */ + + /* Status flags & Done pointers (RAM, 3 * 4b = 12b) */ + endpoint->done_regs = npriv->p_priv->mdd_ret_buff[CDEV_ENDPOINT_NET]; + +#if 0 + /* Doorbell Queue Base Address (RAM, 256*8b) */ + endpoint->db_queue = npriv->p_priv->db_start_buff[CDEV_ENDPOINT_NET]; +#endif /* 0 */ + + /* DMA Descrs Queue Base Address (RAM) */ +#ifdef USE_MUL2ALIGN + endpoint->dma_desc_queue = \ + npriv->p_priv->dma_start_buff[CDEV_ENDPOINT_NET] + + npriv->p_priv->dma_start_offset[CDEV_ENDPOINT_NET]; +#else + endpoint->dma_desc_queue = \ + npriv->p_priv->dma_start_buff[CDEV_ENDPOINT_NET]; +#endif /* USE_MUL2ALIGN */ + + /* DMA DONE Queue Base Address (RAM) */ +#ifdef USE_MUL2ALIGN + endpoint->dma_done_queue = \ + npriv->p_priv->dma_done_que_buff[CDEV_ENDPOINT_NET] + + npriv->p_priv->dma_done_offset[CDEV_ENDPOINT_NET]; +#else + endpoint->dma_done_queue = \ + npriv->p_priv->dma_done_que_buff[CDEV_ENDPOINT_NET]; +#endif /* USE_MUL2ALIGN */ + + /* Mailbox DONE Queue Base Address (RAM) */ +#ifdef USE_MUL2ALIGN + endpoint->mb_done_queue = \ + npriv->p_priv->mb_done_que_buff[CDEV_ENDPOINT_NET] + + npriv->p_priv->mb_done_offset[CDEV_ENDPOINT_NET]; +#else + endpoint->mb_done_queue = \ + npriv->p_priv->mb_done_que_buff[CDEV_ENDPOINT_NET]; +#endif /* USE_MUL2ALIGN */ + + /* Mailbox Base Address (RAM) */ +#ifdef USE_MUL2ALIGN + endpoint->mbox = npriv->p_priv->mb_struct_buff[CDEV_ENDPOINT_NET] + + npriv->p_priv->mb_struct_offset[CDEV_ENDPOINT_NET]; +#else + endpoint->mbox = npriv->p_priv->mb_struct_buff[CDEV_ENDPOINT_NET]; +#endif /* USE_MUL2ALIGN */ + + + /* Init memory allocation structures */ + endpoint->cur_mem = 0; + memset(&(endpoint->mems_ptrs), 0, + KSVV_MEM_SEGMENTS * sizeof(m2mlc_mem_ptrs_t)); + if (NULL == ksvv_alloc_mem(npriv, KSVV_MEM_SIZE, + &(endpoint->dma1_virt), &(endpoint->dma1_phys))) { + dev_err(&ndev->dev, + "ERROR: Can't allocate DMA memory of %d pages, " \ + "region 1\n", + KSVV_MEM_SIZE); + status = 1; + goto exit_oe_err; + } + if (NULL == ksvv_alloc_mem(npriv, KSVV_MEM_SIZE, + &(endpoint->dma2_virt), &(endpoint->dma2_phys))) { + dev_err(&ndev->dev, + "ERROR: Can't allocate DMA memory of %d pages, " \ + "region 2\n", + KSVV_MEM_SIZE); + status = 2; + goto exit_oe_err; + } + if (NULL == ksvv_alloc_mem(npriv, KSVV_MEM_SIZE, + &(endpoint->dma3_virt), &(endpoint->dma3_phys))) { + dev_err(&ndev->dev, + "ERROR: Can't allocate DMA memory of %d pages, " \ + "region 3\n", + KSVV_MEM_SIZE); + status = 3; + goto exit_oe_err; + } + + + /* Init local copies of queue registers */ + if ((status = ksvv_reinit_queue_ptr(npriv)) != 0) { + goto exit_oe_err; + } + + /* Zero out reordering window masks */ + for (i = 0; i < KSVV_MBOX_WIN_SIZE; i++) + endpoint->mbox_window[i] = 0; + + for (i = 0; i < KSVV_DMA_WIN_SIZE; i++) + endpoint->dma_window[i] = 0; + + endpoint->mbox_window_pending = 0; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "ksvv_open_endpoint SUCCESS\n"); + + return status; + +exit_oe_err: + ksvv_free_all_mem(npriv); + return status; +} /* ksvv_open_endpoint */ + +int ksvv_poll(m2mlc_npriv_t *npriv); + +int ksvv_close_endpoint(m2mlc_npriv_t *npriv) +{ + int status = 0; + struct net_device *ndev = npriv->p_priv->ndev; + unsigned long timestart; + + + timestart = jiffies; + while (ksvv_poll(npriv) != 0) { + dev_err(&ndev->dev, + "ERROR: ksvv_close_endpoint; nonzero poll\n"); + if (time_after(jiffies, timestart + HZ)) + break; + } + + /* TODO: check queues, delete pending dones? */ + if ((status = ksvv_reinit_queue_ptr(npriv)) != 0) { + /* FIXME: do some cleaning? */ + dev_err(&ndev->dev, + "ERROR: KSVV_REINIT_QUEUE_PTR failed with %d code\n", + status); + if ((status = ksvv_reinit_queue_ptr(npriv)) != 0) { + dev_err(&ndev->dev, + "ERROR: KSVV_REINIT_QUEUE_PTR failed " \ + "for second time with %d code\n", + status); + } + } + + /* free DMA memory (TODO: in case of crash delegate free to ksvvd?) */ + ksvv_free_all_mem(npriv); + + return 0; +} /* ksvv_close_endpoint */ + + +/** + ****************************************************************************** + * RECEIVE + ****************************************************************************** + **/ + +void m2mlc_hw_rx(struct net_device *ndev, char *data, ssize_t size); + +static ksvv_mb_done_t ksvv_consume_mb_done(m2mlc_npriv_t *npriv, + ksvv_mb_done_regs_t mbdone) +{ + const uint32_t mb_mask = MSGS_10BIT_MSK; + ksvv_mb_done_t done; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + volatile uint32_t *endpoint_regs = endpoint->endpoint_regs; + struct net_device *ndev = npriv->p_priv->ndev; + + + done.r = endpoint->mb_done_queue[endpoint->mbdone_0.p.w_tail & mb_mask]; + if (done.p.live != 1) { + done.r = endpoint->mb_done_queue[endpoint->mbdone_0.p.w_tail \ + & mb_mask]; + } + + /* TODO: add many read !!! */ + if (done.p.live != 1) { + done.r = endpoint->mb_done_queue[endpoint->mbdone_0.p.w_tail \ + & mb_mask]; + } + if (done.p.live != 1) { + mbdone.r = endpoint_regs[M2MLC_RB_MB_DONE_PTRS >> 2]; + done.r = endpoint->mb_done_queue[mbdone.p.w_tail & mb_mask]; + } + if (done.p.live != 1) { + dev_err(&ndev->dev, + "ERROR: ksvv_poll: GOT MB_DONE without live flag\n"); + return done; + } + + /* Should reset live flag */ + endpoint->mb_done_queue[mbdone.p.w_tail & mb_mask] = 0; + + endpoint->mbdone_0.p.w_tail++; + mbdone.r = endpoint->mbdone_0.r; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "SET MB_DONE (head=%d, tail=%d) %08x\n", + mbdone.p.r_head, mbdone.p.w_tail, mbdone.r); + + /* TODO Amortization */ + endpoint_regs[M2MLC_RB_MB_DONE_PTRS >> 2] = mbdone.r; + return done; +} /* ksvv_consume_mb_done */ + +static int ksvv_consume_mb(m2mlc_npriv_t *npriv, ksvv_mb_done_t done) +{ + const uint32_t mb_mask = MSGS_10BIT_MSK; + ksvv_mb_regs_t mb; + int consumed = 0; + ksvv_packet_t *pkt; + char *pkt_data; + uint16_t gotcrc; + int size; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + volatile uint32_t *endpoint_regs = endpoint->endpoint_regs; + struct net_device *ndev = npriv->p_priv->ndev; +#ifdef DEBUG + int j; +#endif /* DEBUG */ + + + if (done.p.live != 1) { + dev_err(&ndev->dev, + "ERROR: ksvv_consume_mb - done without live"); + return 0; + } + mb.r = endpoint->mb_0.r; /* endpoint_regs[M2MLC_RB_MB_STR_PTRS>>2]; */ + endpoint->mbox_window[done.p.mb_ptr & mb_mask] = done.r; + endpoint->mbox_window_pending++; + + + /* + * loop over not consumed packets in window; + * try to consume this type = if success - mark as consumed + * connected - in order? datagram out of order? + * loop from tail + */ + while (endpoint->mbox_window[mb.p.w_tail & mb_mask] != 0) { + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, "ksvv_consume_mb"); + + done.r = endpoint->mbox_window[mb.p.w_tail & mb_mask]; + endpoint->mbox_window[mb.p.w_tail & mb_mask] = 0; + + pkt = (ksvv_packet_t *)\ + &((uint8_t *)(endpoint->mbox))[4096 * done.p.mb_ptr]; + pkt_data = ((char *)pkt) + sizeof(ksvv_packet_t); + + /* TODO: crc32 */ + gotcrc = pkt->crc16; + pkt->crc16 = (uint16_t)crc32(0, (unsigned char *)pkt_data, + KSVV_GET_SIZE(*pkt)); + if (pkt->crc16 != gotcrc) { + dev_err(&ndev->dev, + "ERROR: MSG CRC mismatch, " \ + "got %04x in msg, " \ + "computed %04x for size %d\n", + gotcrc, pkt->crc16, + KSVV_GET_SIZE(*pkt)); + } + size = KSVV_GET_SIZE(*pkt) + sizeof(ksvv_packet_t); + size = (size + 63) / 64; + if (((size == 64) && (done.p.packet_num != 0)) || + ((size != 64) && (size != done.p.packet_num))) { + dev_err(&ndev->dev, + "ERROR: PKT size / done size mismatch, "\ + "got 0x%04x bytes in msg, " \ + "computed %d pkts; got %d x 64 byte " \ + "pkts in done\n", + KSVV_GET_SIZE(*pkt), size, + done.p.packet_num); + } + /* + * add accounting (recv_bytes/recv_packets) + * post msg to stream with size GET_SIZE + * Don't work on dead message + */ + if (done.p.dead == 0) { + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "ksvv_consume_mb - pack received"); + /* pack received !!! */ +#ifdef DEBUG + printk(KERN_DEBUG "------------------------------\n"); + for (j = 0; + j < (KSVV_GET_SIZE(*pkt) + sizeof(ksvv_packet_t)); + j++) { + printk("%02X ", *(((unsigned char *)pkt) + j)); + } + printk(KERN_DEBUG "\n------------------------------\n"); +#endif /* DEBUG */ + m2mlc_hw_rx(ndev, pkt_data, KSVV_GET_SIZE(*pkt)); + } else { + dev_err(&ndev->dev, + "ERROR: DEAD!=0 incoming packet " \ + "in mb_ptr %d sz %d of typesize %04x " \ + "pkt= %016llx %016llx " \ + "%016llx\n done is %016llx: " \ + "live=%d dead=%d\n", + done.p.mb_ptr, done.p.packet_num, + pkt->type_size, + ((uint64_t *)pkt)[0], + ((uint64_t *)pkt)[1], + ((uint64_t *)pkt)[2], done.r, + done.p.live, done.p.dead); + } + + mb.p.w_tail++; + consumed++; + endpoint->mbox_window_pending--; + + /* TODO: correct exit */ + /* + unsigned long timestart; + timestart = jiffies; + if (time_after(jiffies, timestart + HZ)) return -1; + */ + } /* while */ + + /* Save new value of mb tail */ + endpoint->mb_0.r = mb.r; + /* TODO: Amortization */ + endpoint_regs[M2MLC_RB_MB_STR_PTRS >> 2] = mb.r; + + return consumed; +} /* ksvv_consume_mb */ + +/** + * Poll incoming queues: mb_done, dma_done?, db?, pio_done?? + * + * return 1 for some work done + */ +int ksvv_poll(m2mlc_npriv_t *npriv) +{ + int consumed; + ksvv_mb_done_regs_t mbdone; + ksvv_mb_done_t done; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + volatile uint32_t *endpoint_regs = endpoint->endpoint_regs; + + + /* mbdone.r = endpoint->done_regs->mb_write_done_ptr; */ + mbdone.r = endpoint_regs[M2MLC_RB_MB_DONE_PTRS>>2]; + /* ^ HW BUG ^ */ + if (mbdone.p.r_head == endpoint->mbdone_0.p.w_tail) + return 0; + + done = ksvv_consume_mb_done(npriv, mbdone); + consumed = ksvv_consume_mb(npriv, done); + + return consumed; +} /* ksvv_poll */ + + +/** + ****************************************************************************** + * SEND + ****************************************************************************** + **/ + +/** + * Post DMA descriptor to send queue + * + * return descriptor id + */ +static int ksvv_post_dma_desc(m2mlc_npriv_t *npriv, ksvv_dma_desc_t *desc) +{ + int saved_head; + ksvv_dma_regs_t dma; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + volatile uint32_t *endpoint_regs = endpoint->endpoint_regs; + + + /* use local copy */ + dma.r = endpoint->dma_0.r; + + saved_head = dma.p.w_head & endpoint->dma_queue_mask; + /* new descriptor is added */ + dma.p.w_head = (dma.p.w_head + 1) & endpoint->dma_queue_mask1; + /* copydesc */ + memcpy((void *)&(endpoint->dma_desc_queue[(saved_head) * 8]), + (void *)desc, sizeof(ksvv_dma_desc_t)); + + __sync_synchronize(); + /* Post PIO write to the H/W to read out descriptor */ + endpoint_regs[M2MLC_RB_DMA_STR_PTRS >> 2] = dma.r; + /* update local copy */ + endpoint->dma_0.r = dma.r; + + return saved_head; +} /* ksvv_post_dma_desc */ + +/** + * Wait for current DMA Descriptor (out-of-order is not implemented) + * + * return 1 if sent; <=0 if error + */ +static int ksvv_wait_dma_done(m2mlc_npriv_t *npriv, int desc_id) +{ + const uint32_t dma_done_mask = MSGS_12BIT_MSK; + ksvv_dma_done_regs_t dmadone; + ksvv_dma_done_t done; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + volatile uint32_t *endpoint_regs = endpoint->endpoint_regs; + struct net_device *ndev = npriv->p_priv->ndev; + unsigned long timestart; + + + done.r = 0; + /* dmadone.r = endpoint->done_regs->dma_head_done_ptr; */ + dmadone.r = endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2]; + /* ^ HW bug ^ */ + timestart = jiffies; + while (dmadone.p.r_head == endpoint->dmadone_0.p.w_tail) { + /* dmadone.r = endpoint->done_regs->dma_head_done_ptr; */ + dmadone.r = endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2]; + /* ^ HW bug ^ */ + if (time_after(jiffies, timestart + HZ)) + return -1; + } + + done.r = endpoint->dma_done_queue[endpoint->dmadone_0.p.w_tail \ + & dma_done_mask]; + if (!done.p.live) { + done.r = endpoint->dma_done_queue[endpoint->dmadone_0.p.w_tail \ + & dma_done_mask]; + } + if (!done.p.live) { + done.r = endpoint->dma_done_queue[endpoint->dmadone_0.p.w_tail \ + & dma_done_mask]; + } + if (!done.p.live) { + dmadone.r = endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2]; + done.r = endpoint->dma_done_queue[endpoint->dmadone_0.p.w_tail \ + & dma_done_mask]; + } + if (!done.p.live) { + dev_err(&ndev->dev, + "ERROR: ksvv_wait_dma_done: GOT DMA_DONE " \ + "without live flag (3 retries)\n"); + return -(256 + 1); + } + endpoint->dma_done_queue[endpoint->dmadone_0.p.w_tail \ + & dma_done_mask] = 0; + + endpoint->dmadone_0.p.w_tail = (endpoint->dmadone_0.p.w_tail + 1); + dmadone.r = endpoint->dmadone_0.r; + + /* Amortization */ + endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2] = dmadone.r; + + /* parse dma done */ + if (done.p.cplstatus != 0) { + dmadone.r = endpoint_regs[M2MLC_RB_DMA_DONE_PTRS >> 2]; + dev_err(&ndev->dev, + "ERROR: DMA_DONE with nonzero CplStatus " \ + "(head=%d, tail=%d): %08x : live: %d, " \ + "CplStat: 0x%02X, DescID:%d\n", + dmadone.p.r_head, dmadone.p.w_tail, done.r, + done.p.live, done.p.cplstatus, done.p.desc_id); + return -done.p.cplstatus; + } + if (done.p.desc_id != desc_id) { + dev_err(&ndev->dev, + "ERROR: DMA_DONE for different dma desc_id %d " \ + "(expected %d)\n", done.p.desc_id, desc_id); + return -(256 + 2); + } + + return 1; +} /* ksvv_wait_dma_done */ + +/** + * Send one packet + * @pkt_sz: in bytes, unaligned + * + * return 1 if sent; <=0 if error; + */ +static int ksvv_send_pkt(m2mlc_npriv_t *npriv, int rem_node_id, int rem_endp_id, + size_t pkt_offset_dma1, uint32_t pkt_sz) +{ + int status; + ksvv_dma_desc_t desc; + ksvv_target_ptr_msg_t ptr; + uint64_t req_ptr; + int desc_id; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + + + ptr.r = 0; + ptr.p.Mbox = rem_endp_id; + desc.Target_ptr = ptr.r; + + req_ptr = (uint64_t)((uintptr_t)endpoint->dma1_phys + pkt_offset_dma1); + + desc.Request_ptr = req_ptr; + desc.Format.r = 0; + desc.Format.p.InOrder = 1; /* Wait for end of early descriptors */ + desc.Format.p.RemIntReq = 0; + desc.Format.p.LocIntReq = 0; + desc.Format.p.Format_Type = M2MLC_FMT_TYPE_MSGL; + desc.Transfer_size = (pkt_sz + 3) / 4; /* Convert bytes to words */ + desc.Parameter.r = 0; + desc.Parameter.p.BEmaskLBE = 0xF; + desc.Parameter.p.BEmaskFBE = 0xF; + desc.Parameter.p.DestId = rem_node_id; + desc.Parameter.p.Route = 0; + desc.Remote_Doorbell = 0; + + desc_id = ksvv_post_dma_desc(npriv, &desc); + status = ksvv_wait_dma_done(npriv, desc_id); + + return status; +} /* ksvv_send_pkt */ + +/** + * Send data using stream (and dma?) + * + * return amount of sent data + */ +uint32_t ksvv_send(m2mlc_npriv_t *npriv, int rem_node_id, int rem_endp_id, + void *data, uint32_t send_size) +{ + int i; + int status = 0; + int retry = 0; + char *pkt_data; + ksvv_packet_t *packet; + ksvv_endpoint_t *endpoint = &npriv->ksvvendpoint; + struct net_device *ndev = npriv->p_priv->ndev; + unsigned long timestart; +#ifdef DEBUG + int j; +#endif /* DEBUG */ + + + for (i = 0; i < send_size;) { + int pkt_size; + packet = (ksvv_packet_t *)(endpoint->dma1_virt + \ + endpoint->dma1_pos * 4096); + endpoint->dma1_pos = (endpoint->dma1_pos + 1) % 1024; + pkt_data = ((char *)packet) + sizeof(ksvv_packet_t); + + if (KSVV_MSG_PAYLOAD_SIZE < send_size) { + dev_err(&ndev->dev, + "ERROR: Can't send packet to destination, " \ + "%u bytes > PAYLOAD_SIZE %lu\n", + send_size, KSVV_MSG_PAYLOAD_SIZE); + return 0; + } else { + pkt_size = send_size + sizeof(ksvv_packet_t); + } + packet->type_size = KSVV_TYPE_SIZE(KSVV_PKT_NET, send_size); + packet->crc16 = 0; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "pkt data copy memcpy(%p, %p, %d)\n", + pkt_data, data, send_size); + + memcpy(pkt_data, data, send_size); + packet->crc16 = (uint16_t)crc32(0, pkt_data, send_size); + +#ifdef DEBUG + printk(KERN_DEBUG "------------------------------\n"); + for (j = 0; j < pkt_size; j++) { + printk("%02X ", *(((unsigned char *)packet) + j)); + } + printk(KERN_DEBUG "\n------------------------------\n"); +#endif /* DEBUG */ + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "send => send_pkt(%p, %d, %d, %08lx, %d)\n", + endpoint, rem_node_id, rem_endp_id, + ((char *)packet) - ((char *)endpoint->dma1_virt), + pkt_size); + + status = ksvv_send_pkt(npriv, rem_node_id, rem_endp_id, + ((char *)packet) - ((char *)endpoint->dma1_virt), + pkt_size); + if (status == 1) { + /* success dma_done as ack */ + i += send_size; + retry = 0; + } else if ((status == -0x40) || (status == -0x48)) { + /* Recv Mailbox full retry */ + dev_err(&ndev->dev, "ERROR: RETRY %d\n", retry); + retry++; + + /* usleep(10000 * retry); */ + timestart = jiffies; + do {} while (!time_after(jiffies, + timestart + (HZ * retry))); + + if (retry > 5) { + dev_err(&ndev->dev, + "ERROR: Can't send packet" \ + " to destination, 5 x %d (%02x)" \ + " status from ksvv_send_pkt at" \ + " byte %d of %d bytes\n", + -status, -status, i, send_size); + return i; + } + } else { + /* TODO RETRY? */ + dev_err(&ndev->dev, + "ERROR: Can't send packet to destination, " \ + "%d (%02x) status from ksvv_send_pkt " \ + "at byte %d of %d bytes\n", + -status, -status, i, send_size); + return i; + } + } + + return i; +} /* ksvv_send */ + +/* EOF */ diff --git a/drivers/mcst/m2mlc/m2mlc_ksvv.h b/drivers/mcst/m2mlc/m2mlc_ksvv.h new file mode 100644 index 000000000000..5173d54fb7b0 --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_ksvv.h @@ -0,0 +1,340 @@ +#ifndef M2MLC_KSVV_H__ +#define M2MLC_KSVV_H__ + + +/* ====================== KSVV/M2MLC structures ========================= */ +/* Mailbox queue pointers */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t sce : 1; /* [31:31] */ + uint32_t res1 : 4; /* [30:27] */ + uint32_t w_tail :11; /* [26:16] */ + uint32_t res2 : 5; /* [15:11] */ + uint32_t r_head :11; /* [10:00] */ +#else /* __e2k__ */ + uint32_t r_head :11; /* [10:00] */ + uint32_t res2 : 5; /* [15:11] */ + uint32_t w_tail :11; /* [26:16] */ + uint32_t res1 : 4; /* [30:27] */ + uint32_t sce : 1; /* [31:31] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; +} __packed ksvv_mb_regs_t; + +/* Mailbox Done queue pointers */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t sce : 1; /* [31:31] */ + uint32_t res1 : 4; /* [30:27] */ + uint32_t w_tail :11; /* [26:16] */ + uint32_t res2 : 5; /* [15:11] */ + uint32_t r_head :11; /* [10:00] */ +#else /* __e2k__ */ + uint32_t r_head :11; /* [10:00] */ + uint32_t res2 : 5; /* [15:11] */ + uint32_t w_tail :11; /* [26:16] */ + uint32_t res1 : 4; /* [30:27] */ + uint32_t sce : 1; /* [31:31] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; +} __packed ksvv_mb_done_regs_t; + +/* Mailbox done format */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t live : 1; /* [31:31] */ + uint32_t dead : 1; /* [30:30] */ + uint32_t rsv3 :22; /* [29:08] */ + uint32_t src_mb : 8; /* [07:00] */ + + uint32_t rsv1 : 4; /* [31:28] */ + uint32_t packet_num : 8; /* [27:20] in 64bytes */ + uint32_t rsv2 : 1; /* [19:19] */ + uint32_t mb_ptr :11; /* [18:08] */ /* FIXME? */ + uint32_t src_id : 8; /* [07:00] */ +#else /* __e2k__ */ + uint32_t src_id : 8; /* [07:00] */ + uint32_t mb_ptr :11; /* [18:08] */ /* FIXME? */ + uint32_t rsv2 : 1; /* [19:19] */ + uint32_t packet_num : 8; /* [27:20] in 64bytes */ + uint32_t rsv1 : 4; /* [31:28] */ + + uint32_t src_mb : 8; /* [07:00] */ + uint32_t rsv3 :22; /* [29:08] */ + uint32_t dead : 1; /* [30:30] */ + uint32_t live : 1; /* [31:31] */ +#endif /* __sparc__ */ + } __packed p; + uint64_t r; +} __packed ksvv_mb_done_t; + +/* Doorbell queue (IN) - 256 elements */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t sce : 1; /* [31:31] */ + uint32_t res1 : 6; /* [30:25] */ + uint32_t w_tail : 9; /* [24:16] RW */ + uint32_t res2 : 7; /* [15:09] */ + uint32_t r_head : 9; /* [08:00] RO */ +#else /* __e2k__ */ + uint32_t r_head : 9; /* [08:00] RO */ + uint32_t res2 : 7; /* [15:09] */ + uint32_t w_tail : 9; /* [24:16] RW */ + uint32_t res1 : 6; /* [30:25] */ + uint32_t sce : 1; /* [31:31] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; +} __packed ksvv_db_regs_t; + +/* DB format in queue */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t db_dst : 8; /* [31:24] */ + uint32_t db :24; /* [23:00] */ + uint32_t live : 1; /* [31:31] */ + uint32_t rsv1 :15; /* [30:16] */ + uint32_t src_mb : 8; /* [15:08] */ + uint32_t src_id : 8; /* [07:00] */ +#else /* __e2k__ */ + uint32_t src_id : 8; /* [07:00] */ + uint32_t src_mb : 8; /* [15:08] */ + uint32_t rsv1 :15; /* [30:16] */ + uint32_t live : 1; /* [31:31] */ + uint32_t db :24; /* [23:00] */ + uint32_t db_dst : 8; /* [31:24] */ +#endif /* __sparc__ */ + } __packed p; + uint64_t r; +} __packed ksvv_db_entry_t; + +/* DB format in descriptors */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t db_dst : 8; /* [31:24] */ + uint32_t db :24; /* [23:00] */ +#else /* __e2k__ */ + uint32_t db :24; /* [23:00] */ + uint32_t db_dst : 8; /* [31:24] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; +} __packed ksvv_db_desc_t; + +/* DMA descriptor queue (OUT) */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t sce : 1; /* [31:31] */ + uint32_t res1 : 2; /* [30:29] */ + uint32_t w_head :13; /* [28:16] RW */ + uint32_t res2 : 3; /* [15:13] */ + uint32_t r_tail :13; /* [12:00] RO */ +#else /* __e2k__ */ + uint32_t r_tail :13; /* [12:00] RO */ + uint32_t res2 : 3; /* [15:13] */ + uint32_t w_head :13; /* [28:16] RW */ + uint32_t res1 : 2; /* [30:29] */ + uint32_t sce : 1; /* [31:31] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; +} __packed ksvv_dma_regs_t; + +/* DMA Descriptor format - 11.2 */ +typedef struct { + uint64_t Request_ptr; /* bits [1:0] are zero */ + uint64_t Target_ptr; /* or target mbx-ksvv_target_ptr_msg_t; + bits [1:0] are zero */ + union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t Format_Type : 8; /* [31:24] */ + uint32_t LocIntReq : 1; /* [23] */ + uint32_t RemIntReq : 1; /* [22] */ + uint32_t InOrder : 1; /* [21] */ + uint32_t _reserved1 :21; /* [20:00] */ +#else /* __e2k__ */ + uint32_t _reserved1 :21; /* [20:00] */ + uint32_t InOrder : 1; /* [21] */ + uint32_t RemIntReq : 1; /* [22] */ + uint32_t LocIntReq : 1; /* [23] */ + uint32_t Format_Type : 8; /* [31:24] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; + } __packed Format; + uint32_t Transfer_size; /* in units of dwords (4bytes) */ + union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t BEmaskLBE : 4; /* [31:28] */ + uint32_t BEmaskFBE : 4; /* [27:24] */ + uint32_t _reserved1 : 8; /* [23:16] */ + uint32_t DestId : 8; /* [15:08] */ + uint32_t Route : 4; /* [07:04] */ + uint32_t _reserved0 : 4; /* [03:00] */ +#else /* __e2k__ */ + uint32_t _reserved0 : 4; /* [03:00] */ + uint32_t Route : 4; /* [07:04] */ + uint32_t DestId : 8; /* [15:08] */ + uint32_t _reserved1 : 8; /* [23:16] */ + uint32_t BEmaskFBE : 4; /* [27:24] */ + uint32_t BEmaskLBE : 4; /* [31:28] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; + } __packed Parameter; + uint32_t Remote_Doorbell; +} __packed ksvv_dma_desc_t; + +/* DMA descriptor done queue */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t sce : 1; /* [31:31] */ + uint32_t res1 : 2; /* [30:29] */ + uint32_t w_tail :13; /* [28:16] RW */ + uint32_t res2 : 3; /* [15:13] */ + uint32_t r_head :13; /* [12:00] RO */ +#else /* __e2k__ */ + uint32_t r_head :13; /* [12:00] RO */ + uint32_t res2 : 3; /* [15:13] */ + uint32_t w_tail :13; /* [28:16] RW */ + uint32_t res1 : 2; /* [30:29] */ + uint32_t sce : 1; /* [31:31] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; +} __packed ksvv_dma_done_regs_t; + +/* DMA desc done format */ +typedef union { + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint32_t live : 1; /* [31:31] */ + uint32_t rsv1 : 7; /* [30:24] */ + uint32_t cplstatus : 8; /* [23:16] */ + uint32_t desc_id :16; /* [15:00] */ /* FIXME? */ +#else /* __e2k__ */ + uint32_t desc_id :16; /* [15:00] */ /* FIXME? */ + uint32_t cplstatus : 8; /* [23:16] */ + uint32_t rsv1 : 7; /* [30:24] */ + uint32_t live : 1; /* [31:31] */ +#endif /* __sparc__ */ + } __packed p; + uint32_t r; +} __packed ksvv_dma_done_t; + +/* KSVV Target pointer encoding for MsgL */ +typedef union { + uint64_t r; + struct { +#ifdef __sparc__ /* ARCH: e90, e90s */ + uint64_t _res2 : 32; /* [63:32] */ + uint64_t _res1 : 12; /* [31:20] */ + uint64_t Mbox : 8; /* [19:12] */ + uint64_t _res0 : 10; /* [11:02] */ + uint64_t Zero : 2; /* [01:00] */ +#else /* __e2k__ */ + uint64_t Zero : 2; /* [01:00] */ + uint64_t _res0 : 10; /* [11:02] */ + uint64_t Mbox : 8; /* [19:12] */ + uint64_t _res1 : 12; /* [31:20] */ + uint64_t _res2 : 32; /* [63:32] */ +#endif /* __sparc__ */ + } __packed p; +} ksvv_target_ptr_msg_t; +/* ====================== KSVV/M2MLC structures ========================= */ + +/* ===== Packet header ===== */ + +/* 0xX000 - 0..15; size=0..0xfff (size+1)= 1..4096 bytes of full packet+hdr */ +/* 8byte header of ksvv packet with 0xf type 0xfff size-1, crc16, + * and 32-bit field + */ +typedef struct { + uint16_t type_size; + uint16_t crc16; +} ksvv_packet_t; + +#define KSVV_PKT_NET (14) /* type_size */ + +/** frame size */ +/* 4k-ksvv-14 - DMA */ +#define KSVV_MSG_PAYLOAD_SIZE (4096 - sizeof(ksvv_packet_t)) +#define M2MLC_ETH_HEAD_LEN (14) +#define M2MLC_MTU (KSVV_MSG_PAYLOAD_SIZE - M2MLC_ETH_HEAD_LEN - 4) + +/* ===== Packet header ===== */ + + +/* this struct included in m2mlc_npriv_t */ +typedef struct ksvv_endpoint { + /* PIO registers of endpoint; pio and low-level data structures */ + volatile uint32_t *endpoint_regs; /* 0x100 */ +#if 0 + volatile uint32_t *pio_payload; /* 1x256 bytes */ + volatile uint32_t *pio_done_queue; /* 16*4 = 64 bytes */ + volatile uint32_t *pio_data_queue; /* 4K = 16*256 */ +#endif /* 0 */ + volatile m2mlc_done_regs_t *done_regs; /* mb_done w/d, + db_queue w/d, dma_done h/d */ + /* Endpoint queues */ +#if 0 + volatile uint64_t *db_queue; /* 256*8bytes */ +#endif /* 0 */ + volatile uint32_t *dma_desc_queue; /* 4096*32b */ + volatile uint32_t *dma_done_queue; /* 4096*4b = 16k */ + volatile uint64_t *mb_done_queue; /* 1024*8b = 8k */ + volatile void *mbox; /* 1024 * 4k = 4M */ + + /* Original values of queue pointers */ + ksvv_mb_done_regs_t mbdone_0; + ksvv_mb_regs_t mb_0; + ksvv_dma_done_regs_t dmadone_0; + ksvv_dma_regs_t dma_0; + ksvv_db_regs_t db_0; + + /* Flags for consumed mb/dma entries for out-of-order dma_done; + * flag=1 not consumed + * flag=0 free + * mbox_window is copy of corresponding done + */ +#define KSVV_MBOX_WIN_SIZE 1024 + uint64_t mbox_window[KSVV_MBOX_WIN_SIZE]; +#define KSVV_DMA_WIN_SIZE 4096 + uint8_t dma_window[KSVV_DMA_WIN_SIZE]; /* need if !one tx */ + uint32_t mbox_window_pending; + + /* DMA queue size management */ + uint32_t dma_queue_size; /* =0 */ + uint32_t dma_queue_mask; /* =0 */ + uint32_t dma_queue_mask1; /* =0 */ + + /* Memory management */ +#define KSVV_MEM_SEGMENTS 100 + m2mlc_mem_ptrs_t mems_ptrs[KSVV_MEM_SEGMENTS]; + int cur_mem; + +#define KSVV_MEM_SIZE 1024 + /* Special DMA regions of 1024 4KB pages = 4MB */ + void *dma1_virt; /* msg sender buffer */ + void *dma1_phys; + int dma1_pos; /* Position of current 4KB segment */ + void *dma2_virt; /* receiver buffer */ + void *dma2_phys; + void *dma3_virt; /* dma sender buffer */ + void *dma3_phys; +} ksvv_endpoint_t; + + +#endif /* M2MLC_KSVV_H__ */ diff --git a/drivers/mcst/m2mlc/m2mlc_main.c b/drivers/mcst/m2mlc/m2mlc_main.c new file mode 100644 index 000000000000..d0beb9597c0f --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_main.c @@ -0,0 +1,963 @@ +/** + * m2mlc.c - M2MLC module device driver + */ + +#include + +#include "m2mlc.h" + + +#ifdef USE_MUL2ALIGN +#define MUL2ALIGN 2 +#else +#define MUL2ALIGN 1 +#endif /* USE_MUL2ALIGN */ + + +u32 m2mlc_read_reg32(void __iomem *base_addr, u32 port); +void m2mlc_write_reg32(void __iomem *base_addr, u32 port, u32 val); +irqreturn_t m2mlc_irq_handler(int irq, void *dev_id); +u16 m2mlc_hw_get_niccapability(struct pci_dev *pdev); +int m2mlc_hw_reset(void __iomem *base_addr, int first); +void m2mlc_hw_init(m2mlc_priv_t *priv); +void m2mlc_hw_set_endianes(void __iomem *base_addr); + +int m2mlc_cdev_register(m2mlc_priv_t *priv); +void m2mlc_cdev_remove(m2mlc_priv_t *priv); + +#ifdef ENABLE_NET_DEV +int m2mlc_net_register(m2mlc_priv_t *priv); +void m2mlc_net_remove(m2mlc_priv_t *priv); +#endif /* ENABLE_NET_DEV */ + +int __init m2mlc_dev_init(void); +void m2mlc_dev_exit(void); + + +/** + ****************************************************************************** + * Module parameters + ****************************************************************************** + **/ + +u16 rtl_version = NICCPB_PROCVAL; +module_param(rtl_version, ushort, S_IRUGO); +MODULE_PARM_DESC(rtl_version, + "RTL Version (default: 20, don't check: 0)"); + +#ifdef DEBUG +u32 debug_mask = + M2MLC_DBG_MSK_UNK + | M2MLC_DBG_MSK_MODULE + | M2MLC_DBG_MSK_PCI + | M2MLC_DBG_MSK_CDEV + | M2MLC_DBG_MSK_NET +#ifndef TESTWOHARDWARE + | M2MLC_DBG_MSK_MEM + | M2MLC_DBG_MSK_HW + | M2MLC_DBG_MSK_IRQ + | M2MLC_DBG_MSK_REGS +#endif /* TESTWOHARDWARE */ + ; +#else +u32 debug_mask = 0; +#endif + +module_param(debug_mask, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_mask, "Mask for debug level (default: 0)"); + +u32 softreset_enable = 0; + +module_param(softreset_enable, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(softreset_enable, + "Set to 1 to enable softreset on reload (default: 0)"); + +u32 timeout_retry = 0; +u32 timeout_counter = 0; + +module_param(timeout_retry, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(timeout_retry, + "Set retry count for DMA descriptors to 0..3 (default: 0)"); + +module_param(timeout_counter, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(timeout_counter, + "Set timeout to n*5 mks (default: 0; typical: 100000)"); + +unsigned int dma_max_seg_size = 65536; +unsigned long dma_seg_boundary = 0xFFFFFFFF; + +module_param(dma_max_seg_size, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(dma_max_seg_size, + "Set max_seg_size for DMA memory (default: 65536)"); + +module_param(dma_seg_boundary, ulong, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(dma_seg_boundary, + "Set seg_boundary for DMA memory (default: 0xFFFFFFFF)"); + +int numa_node = 2; + +module_param(numa_node, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(numa_node, "Set numa_node for memalloc (default: 2)"); + + +/** + * Module parameters checker + * + * Returns 0 on success, negative on failure + **/ +static int check_parameters(void) +{ + if (rtl_version > NICCPB_PROCVAL) { + ERR_MSG("ERROR: Invalid parameter rtl_version: %u" \ + " (max valid: %u)\n", + rtl_version, NICCPB_PROCVAL); + return -1; + } + + if (softreset_enable > 1) { + ERR_MSG("ERROR: Invalid parameter softreset_enable: %u" \ + " (max valid: %u)\n", + softreset_enable, 1); + return -1; + } + + if (timeout_retry > 3) { + ERR_MSG("ERROR: Invalid parameter timeout_retry: %u" \ + " (max valid: %u)\n", + timeout_retry, 3); + return -1; + } + + if ((timeout_counter != 0) && (timeout_counter < 1000)) { + ERR_MSG("ERROR: Invalid parameter timeout_counter: %u" \ + " (min valid: %u = 5ms)\n", + timeout_counter, 1000); + return -1; + } + + if (timeout_counter > 1024*1024*1024) { + ERR_MSG("ERROR: Invalid parameter timeout_counter: %u" \ + " (max valid: %u)\n", + timeout_counter, 1024*1024*1024); + return -1; + } + + return 0; +} /* check_parameters */ + + +/** + ****************************************************************************** + * Fake dev Part + ****************************************************************************** + **/ + +#ifdef USE_DUALIOLINK + +static int fakedev_probe(struct platform_device *device) +{ + /* create iommu mapping */ + device->dev.dma_parms = devm_kzalloc(&device->dev, + sizeof(*(device->dev.dma_parms)), + GFP_KERNEL); + if (!(device->dev.dma_parms)) + return -ENOMEM; + + dev_info(&device->dev, "fakedev registered\n"); + return 0; +} /* fakedev_probe */ + +static int fakedev_remove(struct platform_device *device) +{ + return 0; +} /* fakedev_remove */ + +static struct platform_driver fakedev_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = fakedev_probe, + .remove = fakedev_remove, +}; + +static int fakedev_init(m2mlc_priv_t *priv) +{ + int err; + + err = platform_driver_register(&fakedev_driver); + if (err) + return err; + + priv->fakedev = platform_device_register_simple(KBUILD_MODNAME, + -1, NULL, 0); + if (IS_ERR(priv->fakedev)) { + err = PTR_ERR(priv->fakedev); + goto fail; + } + + return 0; + +fail: + platform_driver_unregister(&fakedev_driver); + return err; +} /* fakedev_init */ + +static void fakedev_exit(m2mlc_priv_t *priv) +{ + platform_device_unregister(priv->fakedev); + platform_driver_unregister(&fakedev_driver); +} /* fakedev_exit */ + +#endif /* USE_DUALIOLINK */ + + +/** + ****************************************************************************** + * Board Init Part + ****************************************************************************** + **/ + +#define DMA_ALLOC_RAM(NM_size, NM_buff, NM_handle, SIZ, ELB, S) \ +do { \ + NM_size = SIZ; \ + NM_buff = dma_alloc_coherent(&pdev->dev, NM_size, \ + &(NM_handle), GFP_KERNEL); \ + if (!NM_buff) { \ + dev_err(&pdev->dev, \ + "ERROR: Can't allocate %zu(0x%zX) memory, aborting\n", \ + NM_size, NM_size); \ + err = -ENOMEM; \ + goto ELB; \ + } \ + assert(!(NM_size & (PAGE_SIZE-1))); \ + assert(!(NM_handle & (PAGE_SIZE-1))); \ + DEV_DBG(M2MLC_DBG_MSK_MEM, &pdev->dev, \ + "Alloc %zu(0x%zX) bytes at 0x%p (hw:0x%llX) for %s\n", \ + NM_size, NM_size, NM_buff, (unsigned long long)NM_handle, S); \ +} while (0) + +#define DMA_FREE_RAM(NM_size, NM_buff, NM_handle) \ +do { \ + if (NM_buff) \ + dma_free_coherent(&pdev->dev, NM_size, \ + NM_buff, NM_handle); \ +} while (0) + +#ifdef USE_ALLOCPOOL + +#define DMA_ALLOC_POOL(NM_pool, NM_size, NM_buff, NM_handle, SIZ, ELB, S) \ +do { \ + NM_size = SIZ; \ + NM_buff = dma_pool_alloc(NM_pool, GFP_KERNEL, &(NM_handle)); \ + if (!NM_buff) { \ + dev_err(&pdev->dev, \ + "ERROR: Can't allocate %zu(0x%zX) pool, aborting\n", \ + NM_size, NM_size); \ + err = -ENOMEM; \ + goto ELB; \ + } \ + assert(!(NM_size & (NM_size-1))); \ + assert(!(NM_handle & (NM_size-1))); \ + DEV_DBG(M2MLC_DBG_MSK_MEM, &pdev->dev, \ + "Alloc %zu(0x%zX) bytes at 0x%p (hw:0x%llX) for %s\n", \ + NM_size, NM_size, NM_buff, (unsigned long long)NM_handle, S); \ +} while (0) + +#define DMA_FREE_POOL(NM_pool, NM_buff, NM_handle) \ +do { \ + if (NM_buff) \ + dma_pool_free(NM_pool, NM_buff, NM_handle); \ +} while (0) + +#endif /* USE_ALLOCPOOL */ + + +/** + * Driver Initialization Routine + */ +int m2mlc_init_board(struct pci_dev *pdev, void __iomem *bar_addr[], + phys_addr_t bar_addr_bus[]) +{ + int err; + int i; + m2mlc_priv_t *priv; + u16 nic_capab, rtl_ver; + + + assert(pdev); + if (!pdev) + return -ENODEV; + + assert(bar_addr[0]); + assert(bar_addr[1]); + assert(bar_addr[2]); +#ifndef USE_RDMA2_MODE + assert(bar_addr[3]); +#endif /* USE_RDMA2_MODE */ + + /* Check RTL Version */ + nic_capab = m2mlc_hw_get_niccapability(pdev); + rtl_ver = NICCPB_GET_PROCVAL(nic_capab); + if (rtl_version != 0) { + if (rtl_ver != rtl_version) { + dev_err(&pdev->dev, + "ERROR: wrong RTL version (%d), aborting\n", + rtl_ver); + err = -EFAULT; + goto err_out; + } + } + dev_info(&pdev->dev, "rtl version %d\n", rtl_ver); + dev_info(&pdev->dev, "Arbiter Config: " \ + "DMA0->IOLink%d, DMA1->IOLink%d, DMA2->IOLink%d\n", + NICCPB_GET_AACFG_DMA0(nic_capab), + NICCPB_GET_AACFG_DMA1(nic_capab), + NICCPB_GET_AACFG_DMA2(nic_capab)); + + /* allocate memory for priv* */ + priv = kzalloc(sizeof(m2mlc_priv_t), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, + "ERROR: Cannot allocate memory for priv*, aborting\n"); + err = -ENOMEM; + goto err_out; + } + pci_set_drvdata(pdev, priv); + + /* init priv-> */ + priv->pdev = pdev; + priv->ecs_base = bar_addr[0]; + priv->reg_base = bar_addr[1]; + priv->buf_base = bar_addr[2]; +#ifndef USE_RDMA2_MODE + priv->iom_base = bar_addr[3]; +#endif /* USE_RDMA2_MODE */ + priv->reg_base_bus = bar_addr_bus[1]; + priv->buf_base_bus = bar_addr_bus[2]; + priv->niccpb_procval = NICCPB_GET_PROCVAL(rtl_ver); + + spin_lock_init(&priv->cdev_open_lock); + priv->device_open = 1; /* disable open */ + + /* NUMA node */ +#if defined(__e2k__) /* Oops on sparc */ + set_dev_node(&pdev->dev, numa_node); +#endif + + /* create iommu mapping */ + pdev->dev.dma_parms = devm_kzalloc(&pdev->dev, + sizeof(*(pdev->dev.dma_parms)), + GFP_KERNEL); + if (!(pdev->dev.dma_parms)) { + err = -ENOMEM; + goto err_free_mem; + } + + /* Set DMA seg_size & boundary */ + if (dma_set_max_seg_size(&pdev->dev, dma_max_seg_size)) { + dev_warn(&pdev->dev, + "WARNING: wrong dma_max_seg_size\n"); + } + if (dma_set_seg_boundary(&pdev->dev, dma_seg_boundary)) { + dev_warn(&pdev->dev, + "WARNING: wrong dma_seg_boundary\n"); + } + + DEV_DBG(M2MLC_DBG_MSK_MEM, &pdev->dev, + "pdev->dma_parms-> max_segment_size: %u(0x%X), " + "segment_boundary_mask: %lu(0x%lX)", + dma_get_max_seg_size(&pdev->dev), + dma_get_max_seg_size(&pdev->dev), + dma_get_seg_boundary(&pdev->dev), + dma_get_seg_boundary(&pdev->dev)); + + + /* Full Reset */ + err = m2mlc_hw_reset(priv, 1); + if (err) { + dev_err(&pdev->dev, + "ERROR: Cannot reset hw, aborting\n"); + goto err_free_mem; + } + m2mlc_hw_set_endianes(priv); + + + /* Create cdev */ + err = m2mlc_cdev_register(priv); + if (err) { + dev_err(&pdev->dev, + "ERROR: Cannot create cdev, aborting\n"); + goto err_free_mem; + } + + #ifdef ENABLE_NET_DEV + /* Create ndev */ + err = m2mlc_net_register(priv); + if (err) { + dev_err(&pdev->dev, + "ERROR: Cannot create ndev, aborting\n"); + goto err_cdev_remove; + } + #endif /* ENABLE_NET_DEV */ + + +#ifdef USE_DUALIOLINK + /* create fake dev */ + err = fakedev_init(priv); + if (err) { + dev_err(&pdev->dev, + "ERROR: Cannot create fakedev, aborting\n"); + goto err_dev_remove; + } + set_dev_node(&priv->fakedev->dev, /*node*/ 3); +#if defined(__e2k__) + set_dev_link(&priv->fakedev->dev, /*link*/ 0); +#endif + /* Set DMA seg_size & boundary */ + if (dma_set_max_seg_size(&priv->fakedev->dev, dma_max_seg_size)) { + dev_warn(&priv->fakedev->dev, + "WARNING: wrong dma_max_seg_size\n"); + } + if (dma_set_seg_boundary(&priv->fakedev->dev, dma_seg_boundary)) { + dev_warn(&priv->fakedev->dev, + "WARNING: wrong dma_seg_boundary\n"); + } +#endif /* USE_DUALIOLINK */ + + + /* = Alloc pages for buffers = */ + + /* PIO Done Queue */ + /* FIXME: Align to 32 pages; 1 page per endpoint used; 24 bytes? */ + /* FIXME: compute nearest power of two greater than niccpb_procval */ + DMA_ALLOC_RAM(priv->pio_done_que_size, + priv->pio_done_que_buff, + priv->pio_done_que_handle, + /*priv->niccpb_procval*/32 * PIO_DONE_QUE_RAM * MUL2ALIGN, + err_fakedev_remove, + "PIO Done Queue"); +#ifdef USE_MUL2ALIGN + priv->pio_done_que_offset = ((priv->pio_done_que_handle + + (32 * PIO_DONE_QUE_RAM - 1)) & + ~(32 * PIO_DONE_QUE_RAM - 1)) - + priv->pio_done_que_handle; + DEV_DBG(M2MLC_DBG_MSK_MEM, &pdev->dev, + "offset for PIO Done queue is %u(0x%X) result at buf+off=" \ + " 0x%p (handle+off= hw:0x%llX)\n", + priv->pio_done_que_offset, + priv->pio_done_que_offset, + (void *)(priv->pio_done_que_buff + priv->pio_done_que_offset), + (long long unsigned int)(priv->pio_done_que_handle + + priv->pio_done_que_offset)); +#endif /* USE_MUL2ALIGN */ + + /* PIO Data Queue */ + /* FIXME: Align to 32 pages; 1 page per endpoint used */ + DMA_ALLOC_RAM(priv->pio_data_que_size, + priv->pio_data_que_buff, + priv->pio_data_que_handle, + /*priv->niccpb_procval*/32 * PIO_DATA_QUE_RAM * MUL2ALIGN, + err_free_pio_done_que, + "PIO Data Queue"); +#ifdef USE_MUL2ALIGN + priv->pio_data_que_offset = ((priv->pio_data_que_handle + + (32 * PIO_DATA_QUE_RAM - 1)) & + ~(32 * PIO_DATA_QUE_RAM - 1)) - + priv->pio_data_que_handle; + DEV_DBG(M2MLC_DBG_MSK_MEM, &pdev->dev, + "offset for PIO Data queue is %u(0x%X) result at buf+off=" \ + " 0x%p (handle+off= hw:0x%llX)\n", + priv->pio_data_que_offset, + priv->pio_data_que_offset, + (void *)(priv->pio_data_que_buff + priv->pio_data_que_offset), + (long long unsigned int)(priv->pio_data_que_handle + + priv->pio_data_que_offset)); +#endif /* USE_MUL2ALIGN */ + + + /* Mailbox/Doorbell/DMA Return */ + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_RAM(priv->mdd_ret_size[i], + priv->mdd_ret_buff[i], + priv->mdd_ret_handle[i], + MDD_RET_RAM, + err_free_pio_data_que, + "Mailbox/Doorbell/DMA Return"); + } + + /* Mailbox Structure */ +#ifdef USE_ALLOCPOOL + priv->mb_struct_dma_pool = dma_pool_create("mb_struct", &pdev->dev, + MB_STRUCT_RAM, + MB_STRUCT_RAM, 0); + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_POOL(priv->mb_struct_dma_pool, + priv->mb_struct_size[i], + priv->mb_struct_buff[i], + priv->mb_struct_handle[i], + MB_STRUCT_RAM, + err_free_mdd_ret, + "Mailbox Structure"); + } +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_RAM(priv->mb_struct_size[i], + priv->mb_struct_buff[i], + priv->mb_struct_handle[i], + MB_STRUCT_RAM * MUL2ALIGN, + err_free_mdd_ret, + "Mailbox Structure"); +#ifdef USE_MUL2ALIGN + priv->mb_struct_offset[i] = ((priv->mb_struct_handle[i] + + (MB_STRUCT_RAM - 1)) & + ~(MB_STRUCT_RAM - 1)) - + priv->mb_struct_handle[i]; +#endif /* USE_MUL2ALIGN */ + } +#endif /* USE_ALLOCPOOL */ + + /* Mailbox Done Queue */ +#ifdef USE_ALLOCPOOL + priv->mb_done_dma_pool = dma_pool_create("mb_done", &pdev->dev, + MB_DONE_QUE_RAM, + MB_DONE_QUE_RAM, 0); + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_POOL(priv->mb_done_dma_pool, + priv->mb_done_que_size[i], + priv->mb_done_que_buff[i], + priv->mb_done_que_handle[i], + MB_DONE_QUE_RAM, + err_free_mb_struct, + "Mailbox Done Queue"); + } +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_RAM(priv->mb_done_que_size[i], + priv->mb_done_que_buff[i], + priv->mb_done_que_handle[i], + MB_DONE_QUE_RAM * MUL2ALIGN, + err_free_mb_struct, + "Mailbox Done Queue"); +#ifdef USE_MUL2ALIGN + priv->mb_done_offset[i] = ((priv->mb_done_que_handle[i] + + (MB_DONE_QUE_RAM - 1)) & + ~(MB_DONE_QUE_RAM - 1)) - + priv->mb_done_que_handle[i]; +#endif /* USE_MUL2ALIGN */ + } +#endif /* USE_ALLOCPOOL */ + + /* Doorbell Start */ + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_RAM(priv->db_start_size[i], + priv->db_start_buff[i], + priv->db_start_handle[i], + DB_START_RAM, + err_free_mb_done_que, + "Doorbell Start"); + } + + /* DMA Start */ +#ifdef USE_ALLOCPOOL + priv->dma_start_dma_pool = dma_pool_create("dma_start", &pdev->dev, + DMA_START_RAM, + DMA_START_RAM, 0); + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_POOL(priv->dma_start_dma_pool, + priv->dma_start_size[i], + priv->dma_start_buff[i], + priv->dma_start_handle[i], + DMA_START_RAM, + err_free_db_start, + "DMA Start"); + } +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_RAM(priv->dma_start_size[i], + priv->dma_start_buff[i], + priv->dma_start_handle[i], + DMA_START_RAM * MUL2ALIGN, + err_free_db_start, + "DMA Start"); +#ifdef USE_MUL2ALIGN + priv->dma_start_offset[i] = ((priv->dma_start_handle[i] + + (DMA_START_RAM - 1)) & + ~(DMA_START_RAM - 1)) - + priv->dma_start_handle[i]; +#endif /* USE_MUL2ALIGN */ + } +#endif /* USE_ALLOCPOOL */ + + /* DMA Done Queue */ +#ifdef USE_ALLOCPOOL + priv->dma_done_dma_pool = dma_pool_create("dma_done", &pdev->dev, + DMA_DONE_QUE_RAM, + DMA_DONE_QUE_RAM, 0); + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_POOL(priv->dma_done_dma_pool, + priv->dma_done_que_size[i], + priv->dma_done_que_buff[i], + priv->dma_done_que_handle[i], + DMA_DONE_QUE_RAM, + err_free_dma_start, + "DMA Done Queue"); + } +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_ALLOC_RAM(priv->dma_done_que_size[i], + priv->dma_done_que_buff[i], + priv->dma_done_que_handle[i], + DMA_DONE_QUE_RAM * MUL2ALIGN, + err_free_dma_start, + "DMA Done Queue"); +#ifdef USE_MUL2ALIGN + priv->dma_done_offset[i] = ((priv->dma_done_que_handle[i] + + (DMA_DONE_QUE_RAM - 1)) & + ~(DMA_DONE_QUE_RAM - 1)) - + priv->dma_done_que_handle[i]; +#endif /* USE_MUL2ALIGN */ + } +#endif /* USE_ALLOCPOOL */ + + +#ifndef TESTWOIRQ + /* Register IRQ */ + if (-1 == pdev->irq) { + dev_warn(&pdev->dev, "WARNING: no interrupt (%d) for %s\n", + pdev->irq, dev_name(priv->dev)); + } else { + for (i = 0; i < priv->niccpb_procval; i++) { + dev_info(&pdev->dev, "request interrupt: %d - %s\n", + pdev->irq + i, dev_name(priv->dev)); + err = request_irq(pdev->irq + i, m2mlc_irq_handler, + IRQF_SHARED, dev_name(priv->dev), + (void *)pdev); + if (err) { + dev_err(&pdev->dev, + "ERROR: Cannot request PCI irq %d," \ + " aborting\n", + pdev->irq + i); + goto err_unregister_irq; + } + } + } +#endif /* TESTWOIRQ */ + + m2mlc_hw_init(priv); + + /* enable open */ + spin_lock(&priv->cdev_open_lock); + priv->device_open = 0; + spin_unlock(&priv->cdev_open_lock); + + return 0; + + +#ifndef TESTWOIRQ +err_unregister_irq: + if (pdev->irq != -1) { + for (i = 0; i < priv->niccpb_procval; i++) { + free_irq(pdev->irq + i, (void *)pdev); + } + } +#endif /* TESTWOIRQ */ +/*err_free_dma_done_que:*/ +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->dma_done_dma_pool, + priv->dma_done_que_buff[i], + priv->dma_done_que_handle[i]); + } + dma_pool_destroy(priv->dma_done_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->dma_done_que_size[i], + priv->dma_done_que_buff[i], + priv->dma_done_que_handle[i]); + } +#endif /* USE_ALLOCPOOL */ +err_free_dma_start: +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->dma_start_dma_pool, + priv->dma_start_buff[i], + priv->dma_start_handle[i]); + } + dma_pool_destroy(priv->dma_start_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->dma_start_size[i], + priv->dma_start_buff[i], + priv->dma_start_handle[i]); + } +#endif /* USE_ALLOCPOOL */ +err_free_db_start: + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->db_start_size[i], + priv->db_start_buff[i], + priv->db_start_handle[i]); + } +err_free_mb_done_que: +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->mb_done_dma_pool, + priv->mb_done_que_buff[i], + priv->mb_done_que_handle[i]); + } + dma_pool_destroy(priv->mb_done_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->mb_done_que_size[i], + priv->mb_done_que_buff[i], + priv->mb_done_que_handle[i]); + } +#endif /* USE_ALLOCPOOL */ +err_free_mb_struct: +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->mb_struct_dma_pool, + priv->mb_struct_buff[i], + priv->mb_struct_handle[i]); + } + dma_pool_destroy(priv->mb_struct_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->mb_struct_size[i], + priv->mb_struct_buff[i], + priv->mb_struct_handle[i]); + } +#endif /* USE_ALLOCPOOL */ +err_free_mdd_ret: + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->mdd_ret_size[i], + priv->mdd_ret_buff[i], + priv->mdd_ret_handle[i]); + } +err_free_pio_data_que: + DMA_FREE_RAM(priv->pio_data_que_size, + priv->pio_data_que_buff, + priv->pio_data_que_handle); +err_free_pio_done_que: + DMA_FREE_RAM(priv->pio_done_que_size, + priv->pio_done_que_buff, + priv->pio_done_que_handle); +err_fakedev_remove: +#ifdef USE_DUALIOLINK + fakedev_exit(priv); +err_dev_remove: +#endif /* USE_DUALIOLINK */ +#ifdef ENABLE_NET_DEV + m2mlc_net_remove(priv); +err_cdev_remove: +#endif /* ENABLE_NET_DEV */ + m2mlc_cdev_remove(priv); +err_free_mem: + kfree(priv); +err_out: + return err; +} /* m2mlc_init_board */ + +/** + * Cleanup Routine + */ +void m2mlc_release_board(struct pci_dev *pdev) +{ + int i; + int err; + m2mlc_priv_t *priv; +#if 0 + /* TODO: move to daemon */ + ecs_gpstat_csr_reg_t ecs_gpstat_csr; +#endif + + assert(pdev); + if (!pdev) + return; + + priv = pci_get_drvdata(pdev); + assert(priv); + if (!priv) + return; + + /* disable open */ + spin_lock(&priv->cdev_open_lock); + priv->device_open = 1; + spin_unlock(&priv->cdev_open_lock); + +#if 0 + /* TODO: move to daemon */ + /* Clean Discovered bit */ + ecs_gpstat_csr.r = m2mlc_read_reg32(priv->ecs_base, ECS_GPSTAT_CSR); + ecs_gpstat_csr.p.Discovered = 0; + m2mlc_write_reg32(priv->ecs_base, ECS_GPSTAT_CSR, ecs_gpstat_csr.r); + mdelay(1); +#endif + + /* Full Reset */ + err = m2mlc_hw_reset(priv, 0); + if (err) { + dev_err(&pdev->dev, + "ERROR: Cannot reset hw, continue\n"); + } + +#ifndef TESTWOIRQ + if (pdev->irq != -1) { + for (i = 0; i < priv->niccpb_procval; i++) { + free_irq(pdev->irq + i, (void *)pdev); + } + } +#endif /* TESTWOIRQ */ + + /* free pages for DMA buffers */ + pdev = priv->pdev; + +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->dma_done_dma_pool, + priv->dma_done_que_buff[i], + priv->dma_done_que_handle[i]); + } + dma_pool_destroy(priv->dma_done_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->dma_done_que_size[i], + priv->dma_done_que_buff[i], + priv->dma_done_que_handle[i]); + } +#endif /* USE_ALLOCPOOL */ +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->dma_start_dma_pool, + priv->dma_start_buff[i], + priv->dma_start_handle[i]); + } + dma_pool_destroy(priv->dma_start_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->dma_start_size[i], + priv->dma_start_buff[i], + priv->dma_start_handle[i]); + } +#endif /* USE_ALLOCPOOL */ + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->db_start_size[i], + priv->db_start_buff[i], + priv->db_start_handle[i]); + } +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->mb_done_dma_pool, + priv->mb_done_que_buff[i], + priv->mb_done_que_handle[i]); + } + dma_pool_destroy(priv->mb_done_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->mb_done_que_size[i], + priv->mb_done_que_buff[i], + priv->mb_done_que_handle[i]); + } +#endif /* USE_ALLOCPOOL */ +#ifdef USE_ALLOCPOOL + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_POOL(priv->mb_struct_dma_pool, + priv->mb_struct_buff[i], + priv->mb_struct_handle[i]); + } + dma_pool_destroy(priv->mb_struct_dma_pool); +#else + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->mb_struct_size[i], + priv->mb_struct_buff[i], + priv->mb_struct_handle[i]); + } +#endif /* USE_ALLOCPOOL */ + for (i = 0; i < priv->niccpb_procval; i++) { + DMA_FREE_RAM(priv->mdd_ret_size[i], + priv->mdd_ret_buff[i], + priv->mdd_ret_handle[i]); + } + DMA_FREE_RAM(priv->pio_data_que_size, + priv->pio_data_que_buff, + priv->pio_data_que_handle); + DMA_FREE_RAM(priv->pio_done_que_size, + priv->pio_done_que_buff, + priv->pio_done_que_handle); + +#ifdef USE_DUALIOLINK + fakedev_exit(priv); +#endif /* USE_DUALIOLINK */ +#ifdef ENABLE_NET_DEV + m2mlc_net_remove(priv); +#endif /* ENABLE_NET_DEV */ + m2mlc_cdev_remove(priv); + + kfree(priv); +} /* m2mlc_release_board */ + + +/** + ****************************************************************************** + * Module Part + ****************************************************************************** + **/ + +/** + * Driver Registration Routine + * + * m2mlc_init is the first routine called when the driver is loaded. + * All it does is register with the PCI subsystem. + */ +static int __init m2mlc_init(void) +{ + int status; + + PDEBUG(M2MLC_DBG_MSK_MODULE, + "------------------------------------------\n"); + LOG_MSG("Init M2MLC module device driver\n"); + + if (0 != check_parameters()) { + ERR_MSG("ERROR: Invalid module parameters, aborting\n"); + return -EINVAL; + } + + m2mlc_dev_init(); + + status = pci_register_driver(&m2mlc_pci_driver); + if (status != 0) { + ERR_MSG("ERROR: Could not register driver\n"); + goto cdevexit; + } + + PDEBUG(M2MLC_DBG_MSK_MODULE, "Init done\n"); + return 0; + +cdevexit: + m2mlc_dev_exit(); + return status; +} /* m2mlc_init */ + +/** + * Driver Exit Cleanup Routine + * + * m2mlc_exit is called just before the driver is removed from memory. + */ +static void __exit m2mlc_exit(void) +{ + pci_unregister_driver(&m2mlc_pci_driver); + + m2mlc_dev_exit(); + + PDEBUG(M2MLC_DBG_MSK_MODULE, "Exit\n"); +} /* m2mlc_exit */ + + +module_init(m2mlc_init); +module_exit(m2mlc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Andrey Kalita "); +MODULE_DESCRIPTION("M2MLC module device driver"); +MODULE_SUPPORTED_DEVICE("M2MLC, DeviceID:" DEVICE_ID ", VendorID:" VENDOR_ID); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/mcst/m2mlc/m2mlc_net.c b/drivers/mcst/m2mlc/m2mlc_net.c new file mode 100644 index 000000000000..46c053068d33 --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_net.c @@ -0,0 +1,605 @@ +/** + * m2mlc_net.c - M2MLC module device driver + * + * Network part + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "m2mlc.h" + + +#ifdef ENABLE_NET_DEV + +#undef ENABLE_NAPI + +/** + * Network interface Consts + */ +#define M2MLC_WATCHDOG_PERIOD (1 * HZ) +#define M2MLC_NAPI_WEIGHT (16) +#define M2MLC_TX_QUE_LEN (100) + +/** get device_id from MAC */ +#define M2MLC_GET_DESTID(x) (*((u8 *)x + 5)) + + +u32 m2mlc_read_reg32(void __iomem *base_addr, u32 port); +void m2mlc_hw_int_setmask(m2mlc_priv_t *priv, int ep, + m2mlc_interrupt_t intmask); +int ksvv_open_endpoint(m2mlc_npriv_t *npriv); +int ksvv_close_endpoint(m2mlc_npriv_t *npriv); +uint32_t ksvv_send(m2mlc_npriv_t *npriv, int rem_node_id, int rem_endp_id, + void *data, uint32_t send_size); +int ksvv_poll(m2mlc_npriv_t *npriv); + + +/** + ****************************************************************************** + * Rx Part + ****************************************************************************** + **/ + +/** + * The rx poll function + */ +void m2mlc_hw_rx(struct net_device *ndev, char *data, ssize_t size) +{ + m2mlc_npriv_t *npriv; + struct sk_buff *rx_skb; + u8 *prdbuf; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, "m2mlc_hw_rx(%d)\n", (int)size); + + assert(ndev); + if (!ndev) + return; + + npriv = netdev_priv(ndev); + assert(npriv); + if (!npriv) + return; + + /* nothing receive */ + if (size < (M2MLC_ETH_HEAD_LEN)) { + dev_err(&ndev->dev, + "ERROR: Very small packet's size (< header's size)\n"); + return; + } + + /* allocate new skb */ + rx_skb = netdev_alloc_skb(ndev, size); + if (!rx_skb) { + dev_err(&ndev->dev, + "ERROR: Cannot allocate rx buffer (skb)\n"); + npriv->stats.rx_dropped++; + return; + } + + /* receive err packet */ + /*if () { + rx_skb->len = 0; + npriv->stats.rx_errors++; + dev_kfree_skb(rx_skb); + return; + }*/ + + /* getting all data */ + prdbuf = skb_put(rx_skb, size); + memcpy(prdbuf, data, size); + + /* Pass to upper layer */ + rx_skb->protocol = eth_type_trans(rx_skb, ndev); + npriv->stats.rx_packets++; + npriv->stats.rx_bytes += size; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, "netif_rx(%d)\n", rx_skb->len); +#ifdef ENABLE_NAPI + netif_receive_skb(rx_skb); +#else /* ENABLE_NAPI */ + netif_rx(rx_skb); +#endif /* ENABLE_NAPI */ + /* rx_skb = NULL; */ + + return; +} /* m2mlc_hw_rx */ + +/** + * m2mlc_irq_handler + */ +void m2mlc_net_irq_handler(m2mlc_priv_t *m2mlc_priv, uint32_t irq_stat) +{ + struct net_device *ndev; + m2mlc_npriv_t *npriv; +#ifdef ENABLE_NAPI + m2mlc_interrupt_t intmask; +#endif /* ENABLE_NAPI */ + + ndev = m2mlc_priv->ndev; + assert(ndev); + if (!ndev) + return; + + npriv = netdev_priv(ndev); + assert(npriv); + if (!npriv) + return; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "net_irq_handler(0x%X)\n", irq_stat); + +#ifdef ENABLE_NAPI + intmask.r = 0; + m2mlc_hw_int_setmask(npriv->p_priv, CDEV_ENDPOINT_NET, intmask); + napi_schedule(&(npriv->napi)); +#else /* ENABLE_NAPI */ + while (ksvv_poll(npriv)) + ; +#endif /* ENABLE_NAPI */ + +#if 0 + if (link) { + /* Link Up if transmit enable */ + if (1) + netif_carrier_on(ndev); + else + netif_carrier_off(ndev); + } +#endif /* 0 */ +} /* m2mlc_net_irq_handler */ + +/** + * The main poll function + */ +#ifdef ENABLE_NAPI +static int m2mlc_poll(struct napi_struct *napi, int budget) +{ + m2mlc_npriv_t *npriv; + struct net_device *ndev; + int work_done; + int work_done_; + m2mlc_interrupt_t intmask; + + assert(napi); + if (!napi) + return -1; + + npriv = container_of(napi, m2mlc_npriv_t, napi); + assert(npriv); + if (!npriv) + return -1; + + ndev = npriv->p_priv->ndev; + assert(ndev); + if (!ndev) + return -1; + + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, "m2mlc_poll\n"); + + work_done = 0; + do { + /* Reset interrupt controller */ + /* + intmask.r = 0; + m2mlc_hw_int_setmask(npriv->p_priv, CDEV_ENDPOINT_NET, intmask); + */ + + work_done_ = work_done; + if (ksvv_poll(npriv)) + work_done++; + } while ((work_done != budget) && (work_done != work_done_)); + + if (work_done < budget) { + /* Restore irq mask */ + intmask.r = 0; + intmask.p.mb = M2MLC_INT_MB_RXMSG; + m2mlc_hw_int_setmask(npriv->p_priv, CDEV_ENDPOINT_NET, intmask); + + napi_complete(napi); + } + + return work_done; +} /* m2mlc_poll */ +#endif /* ENABLE_NAPI */ + + +/** + ****************************************************************************** + * Network Driver Part + ****************************************************************************** + **/ + +/** + * The network interface transmission function + * @skb: socket buffer for tx + * @ndev: network interface device structure + * + * m2mlc_start_xmit is called by socket send function + */ +static netdev_tx_t m2mlc_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + int tx_ret = -1; + uint32_t DestId; + m2mlc_npriv_t *npriv; + struct ethhdr *eth; + + assert(skb); + if (!skb) + return -1; + assert(ndev); + if (!ndev) + return -1; + + npriv = netdev_priv(ndev); + assert(npriv); + if (!npriv) + return -1; + + /* Check packet's size */ + if (skb->len < (M2MLC_ETH_HEAD_LEN)) { + dev_err(&ndev->dev, + "ERROR: Very small packet's size (< header's size)\n"); + goto tx_free_skb; + } + + /* Save the timestamp */ + netif_trans_update(ndev); + + /* HW transmit data */ + eth = (struct ethhdr *)skb->data; + DestId = M2MLC_GET_DESTID(eth->h_dest); + if (DestId == 0) { + dev_dbg(&ndev->dev, + "net_tx: DROP: skblen=%d DestId=%u\n", + skb->len, DestId); + goto tx_free_skb; + } + DEV_DBG(M2MLC_DBG_MSK_NET, &ndev->dev, + "TX: [%ld] skblen=%d DestId=%u\n", + ndev->trans_start, skb->len, DestId); + tx_ret = ksvv_send(npriv, DestId, CDEV_ENDPOINT_NET, + (void *)(skb->data), skb->len); + +tx_free_skb: + /* Free skb */ + dev_kfree_skb(skb); + + /* Stats */ + if (tx_ret < 0) { + npriv->stats.tx_dropped++; + } else { + npriv->stats.tx_packets++; + npriv->stats.tx_bytes += skb->len; + } + + if (tx_ret) + return NETDEV_TX_OK; + else + return -1; +} /* m2mlc_start_xmit */ + + +/** + * The network interface open function + * @ndev: network interface device structure + * + * m2mlc_open is called by register_netdev + */ +static int m2mlc_open(struct net_device *ndev) +{ + m2mlc_npriv_t *npriv; + m2mlc_interrupt_t intmask; + + assert(ndev); + if (!ndev) + return -1; + + npriv = netdev_priv(ndev); + assert(npriv); + if (!npriv) + return -1; + + if (!ndev->dev_addr[5]) { + dev_err(&ndev->dev, "please assign DeviceID before open\n"); + return -1; + } + + dev_info(&ndev->dev, "interface OPEN\n"); + + if (ksvv_open_endpoint(npriv)) + return -1; + + /* Enable interrupt */ + intmask.r = 0; + intmask.p.mb = M2MLC_INT_MB_RXMSG; /* Mailbox Rx irq */ + m2mlc_hw_int_setmask(npriv->p_priv, CDEV_ENDPOINT_NET, intmask); + + /* start tx/rx */ + netif_start_queue(ndev); +#ifdef ENABLE_NAPI + napi_enable(&(npriv->napi)); +#endif /* ENABLE_NAPI */ + + return 0; +} /* m2mlc_open */ + +/** + * The network interface close function + * @ndev: network interface device structure + * + * m2mlc_stop is called by free_netdev + */ +static int m2mlc_stop(struct net_device *ndev) +{ + m2mlc_npriv_t *npriv; + m2mlc_interrupt_t intmask; + + assert(ndev); + if (!ndev) + return -1; + + npriv = netdev_priv(ndev); + assert(npriv); + if (!npriv) + return -1; + + /*if (netif_msg_ifup(npriv))*/ + dev_info(&ndev->dev, "interface STOP\n"); + + /* Disable interrupt */ + intmask.r = 0; + m2mlc_hw_int_setmask(npriv->p_priv, CDEV_ENDPOINT_NET, intmask); + + /* stop tx/rx */ +#ifdef ENABLE_NAPI + napi_disable(&(npriv->napi)); +#endif /* ENABLE_NAPI */ + netif_stop_queue(ndev); + + /* link off */ + netif_carrier_off(ndev); + + ksvv_close_endpoint(npriv); + + return 0; +} /* m2mlc_stop */ + + +/** + * The network interface status function + * @ndev: network interface device structure + */ +static struct net_device_stats *m2mlc_get_stats(struct net_device *ndev) +{ + m2mlc_npriv_t *npriv; + + assert(ndev); + if (!ndev) + return NULL; + + npriv = netdev_priv(ndev); + assert(npriv); + if (!npriv) + return NULL; + + return &npriv->stats; +} /* m2mlc_get_stats */ + +#if 0 +static int m2mlc_change_mtu(struct net_device *ndev, int new_mtu) +{ + dev_info(&ndev->dev, "change MTU: old=%u, new=%d\n", + ndev->mtu, new_mtu); + + if (new_mtu == ndev->mtu) { + return 0; + } + if (new_mtu > M2MLC_MTU) { + return 0; + } + + ndev->mtu = new_mtu; + return 0; +} /* m2mlc_change_mtu */ +#endif /* 0 */ + +static int m2mlc_set_mac_addr(struct net_device *ndev, void *p) +{ + m2mlc_npriv_t *npriv; + ecs_basedevid_csr_reg_t basedevid; + struct sockaddr *addr = p; + + npriv = netdev_priv(ndev); + assert(npriv); + if (!npriv) + return -EBUSY; + + if (netif_running(ndev)) + return -EBUSY; + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + basedevid.r = m2mlc_read_reg32(npriv->p_priv->ecs_base, + ECS_BASEDEVID_CSR); + if (basedevid.r) { + ndev->dev_addr[5] = basedevid.p.Base_DeviceID; + } + + dev_info(&ndev->dev, "set new mac address (%d)\n", ndev->dev_addr[5]); + + return 0; +} /* m2mlc_set_mac_addr */ + +#if 0 +/** + * The network interface transmission timeout function + * @ndev: network interface device structure + */ +static void m2mlc_tx_timeout(struct net_device *ndev) +{ + assert(ndev); + if (!ndev) + return; + + /* Save the timestamp */ + ndev->trans_start = jiffies; + + dev_err(&ndev->dev, + "ERROR: Tx Timeout\n"); +} /* m2mlc_tx_timeout */ +#endif /* 0 */ + + +/** + * net_device_ops + */ +static const struct net_device_ops m2mlc_netdev_ops = { + .ndo_open = m2mlc_open, + .ndo_stop = m2mlc_stop, + .ndo_start_xmit = m2mlc_start_xmit, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = m2mlc_set_mac_addr, + .ndo_get_stats = m2mlc_get_stats, +#if 0 + .ndo_tx_timeout = m2mlc_tx_timeout, +#endif /* 0 */ +}; + + +/** + ****************************************************************************** + * Init Network Driver Part + ****************************************************************************** + **/ + +/** + * The network interface init function + * @ndev: network interface device structure + * + * m2mlc_net_init is called by alloc_netdev + */ +static void m2mlc_net_init(struct net_device *ndev) +{ + m2mlc_npriv_t *npriv; + + assert(ndev); + + /* net device ops */ +#if 0 /*def CONFIG_E2K*/ + ndev->dev_addr[0] = l_base_mac_addr[0]; + ndev->dev_addr[1] = l_base_mac_addr[1]; + ndev->dev_addr[2] = l_base_mac_addr[2]; + ndev->dev_addr[3] = l_base_mac_addr[3]; + ndev->dev_addr[4] = 0xF0; + ndev->dev_addr[5] = 0; /* << device_id */ +#else + ndev->dev_addr[0] = 0x00; + ndev->dev_addr[1] = 0x01; + ndev->dev_addr[2] = 0x00; + ndev->dev_addr[3] = 0x01; + ndev->dev_addr[4] = 0xF0; + ndev->dev_addr[5] = 0; /* << device_id */ + /* ifconfig m2m0 hw ether 00:01:00:01:f0: */ +#endif + ether_setup(ndev); + ndev->mtu = M2MLC_MTU; + ndev->netdev_ops = &m2mlc_netdev_ops; + ndev->flags = IFF_NOARP; + ndev->tx_queue_len = M2MLC_TX_QUE_LEN; + /* FIXME: disable ipv6 */ +#if 0 + ndev->features = NETIF_F_LLTX; + ndev->type = ARPHRD_NONE; ARPHRD_ETHER + ndev->flags = IFF_NOARP | IFF_POINTOPOINT | IFF_PROMISC; + ndev->watchdog_timeo = M2MLC_WATCHDOG_PERIOD; + ndev->hard_header_len = M2MLC_ETH_HEAD_LEN; +#endif /* 0 */ + memset(ndev->broadcast, 0, sizeof(ndev->broadcast)); + + /* initialize the npriv field. */ + npriv = netdev_priv(ndev); + memset(npriv, 0, sizeof(m2mlc_npriv_t)); +} /* m2mlc_net_init */ + +int m2mlc_net_register(m2mlc_priv_t *priv) +{ + int ret = 0; + struct pci_dev *pdev; + m2mlc_npriv_t *npriv; + + assert(priv); + if (!priv) + return -ENODEV; + + pdev = priv->pdev; + assert(pdev); + if (!pdev) + return -ENODEV; + + priv->ndev = alloc_netdev(sizeof(m2mlc_npriv_t), M2MLC_DEVNAME "%d", + NET_NAME_UNKNOWN, m2mlc_net_init); + if (!priv->ndev) { + dev_err(&pdev->dev, + "ERROR: Cannot allocate memory" \ + " for net_dev, aborting\n"); + ret = -ENOMEM; + goto err_out; + } + SET_NETDEV_DEV(priv->ndev, &pdev->dev); /* parent := pci */ + npriv = netdev_priv(priv->ndev); + npriv->p_priv = priv; + +#ifdef ENABLE_NAPI + netif_napi_add(priv->ndev, &(npriv->napi), + m2mlc_poll, M2MLC_NAPI_WEIGHT); +#endif /* ENABLE_NAPI */ + + /* link off */ + netif_carrier_off(priv->ndev); + + if ((ret = register_netdev(priv->ndev))) { + dev_err(&pdev->dev, + "ERROR: Cannot register net device, aborting\n"); + goto err_out_free_netdev; + } + + dev_info(&pdev->dev, "network interface %s init\n", + dev_name(&priv->ndev->dev)); + + DEV_DBG(M2MLC_DBG_MSK_NET, &priv->ndev->dev, + "network interface hard_header_len=%d\n", + priv->ndev->hard_header_len); + + netif_carrier_on(priv->ndev); + return 0; + + +err_out_free_netdev: + free_netdev(priv->ndev); +err_out: + return ret; +} /* m2mlc_net_register */ + +void m2mlc_net_remove(m2mlc_priv_t *priv) +{ + assert(priv); + if (!priv) + return; + + /* link off */ + netif_carrier_off(priv->ndev); + + if (priv->ndev) { + unregister_netdev(priv->ndev); + free_netdev(priv->ndev); + } +} /* m2mlc_net_remove */ + +#endif /* ENABLE_NET_DEV */ diff --git a/drivers/mcst/m2mlc/m2mlc_pci.c b/drivers/mcst/m2mlc/m2mlc_pci.c new file mode 100644 index 000000000000..a3dc89c944a3 --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_pci.c @@ -0,0 +1,186 @@ +/** + * m2mlc_pci.c - M2MLC module device driver + * + * PCI Device Driver Part + */ + +#include "m2mlc.h" + +#ifndef USE_RDMA2_MODE +#define PCI_BAR_NUMS 4 +#else /* USE_RDMA2_MODE */ +#define PCI_BAR_NUMS 3 +#endif /* USE_RDMA2_MODE */ + + +/* extern */ +int m2mlc_init_board(struct pci_dev *pdev, void __iomem *bar_addr[], + phys_addr_t bar_addr_bus[]); +void m2mlc_release_board(struct pci_dev *pdev); + + +/** + * Device Initialization Routine + * + * @pdev: PCI device information struct + * @pid: entry in ids + * + * Returns 0 on success, negative on failure + * + * probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + int err; + int i; + size_t size; + void __iomem *bar_addr[PCI_BAR_NUMS] = {NULL}; + phys_addr_t bar_addr_bus[PCI_BAR_NUMS] = {0}; + /*char irq;*/ + + + assert(pdev); + if (!pdev) + return -ENODEV; + + DEV_DBG(M2MLC_DBG_MSK_PCI, &pdev->dev, "PCI Probe: device %s\n", + pci_name(pdev)); + + dev_info(&pdev->dev, "initializing PCI device %04x:%04x\n", + pdev->vendor, pdev->device); + + /* PCI */ + if ((err = pci_enable_device(pdev))) { + dev_err(&pdev->dev, + "ERROR: Cannot enable PCI device, aborting\n"); + goto failure; + } + + if ((err = pci_request_regions(pdev, DRIVER_NAME))) { + dev_err(&pdev->dev, + "ERROR: Cannot obtain PCI resources, aborting\n"); + goto failure_release_pci; + } + + for (i = 0; i < PCI_BAR_NUMS; i++) { + size = pci_resource_end(pdev, i) - + pci_resource_start(pdev, i) + 1; + if ((bar_addr[i] = pci_iomap(pdev, i, size)) == NULL) { + err = -ENODEV; + dev_err(&pdev->dev, + "ERROR: Cannot map BAR%d, aborting\n", i); + goto failure_iounmap; + } + bar_addr_bus[i] = pci_resource_start(pdev, i); + } + + /*pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &irq);*/ + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + dev_warn(&pdev->dev, + "WARNING: No usable 64bit DMA configuration\n"); + } else { + pci_set_master(pdev); + /*pci_set_cacheline_size(pdev);*/ + } + + if ((err = m2mlc_init_board(pdev, bar_addr, bar_addr_bus))) + goto failure_iounmap; + + DEV_DBG(M2MLC_DBG_MSK_PCI, &pdev->dev, "PCI Probe: done\n"); + return 0; + + +failure_iounmap: + for (i = 0; i < PCI_BAR_NUMS; i++) { + if (bar_addr[i] != NULL) + pci_iounmap(pdev, bar_addr[i]); + } + pci_release_regions(pdev); +failure_release_pci: + pci_disable_device(pdev); +failure: + return err; +} /* probe */ + +/** + * Device Removal Routine + * @pdev: PCI device information struct + * + * remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void remove(struct pci_dev *pdev) +{ + int i; + m2mlc_priv_t *priv; + void __iomem *bar_addr[PCI_BAR_NUMS] = {NULL}; + + + assert(pdev); + if (!pdev) + return; + + DEV_DBG(M2MLC_DBG_MSK_PCI, &pdev->dev, "PCI Remove device\n"); + + priv = pci_get_drvdata(pdev); + assert(priv); + if (!priv) + return; + + bar_addr[0] = priv->ecs_base; + bar_addr[1] = priv->reg_base; + bar_addr[2] = priv->buf_base; +#ifndef USE_RDMA2_MODE + bar_addr[3] = priv->iom_base; +#endif /* USE_RDMA2_MODE */ + + m2mlc_release_board(pdev); + pci_set_drvdata(pdev, NULL); + + for (i = 0; i < PCI_BAR_NUMS; i++) { + if (bar_addr[i] != NULL) + pci_iounmap(pdev, bar_addr[i]); + } + pci_release_regions(pdev); + pci_disable_device(pdev); + + DEV_DBG(M2MLC_DBG_MSK_PCI, &pdev->dev, "PCI Remove: done\n"); +} /* remove */ + +static void shutdown(struct pci_dev *pdev) +{ + if (!pdev) + return; + + DEV_DBG(M2MLC_DBG_MSK_PCI, &pdev->dev, "PCI Shutdown\n"); + + m2mlc_release_board(pdev); + + pci_save_state(pdev); + pci_clear_master(pdev); +} /* shutdown */ + + +static const struct pci_device_id ids[] = { + { + PCI_DEVICE(VENDOR_ID, DEVICE_ID) + }, + { 0, }, +}; + +MODULE_DEVICE_TABLE(pci, ids); + + +/* PCI Device API Driver */ +struct pci_driver m2mlc_pci_driver = { + .name = DRIVER_NAME, + .id_table = ids, + .probe = probe, + .remove = remove, + .shutdown = shutdown, +}; diff --git a/drivers/mcst/m2mlc/m2mlc_regs.h b/drivers/mcst/m2mlc/m2mlc_regs.h new file mode 100644 index 000000000000..331af187ea5f --- /dev/null +++ b/drivers/mcst/m2mlc/m2mlc_regs.h @@ -0,0 +1,149 @@ +#ifndef M2MLC_REGS_H__ +#define M2MLC_REGS_H__ + +/* iconnect.pdf - 2015.06.16 */ + +/** + * Bitfield tool + */ +#define GET_FIELD(r, p, m) (((r) >> (p)) & (m)) +#define GET_BIT(r, p) GET_FIELD(r, p, 1) + +#define SET_FIELD(d, p, m) (((m) & (d)) << (p)) +#define SET_BIT(p) (1UL << (p)) + + +/** + * ENDIANES + */ +/* TODO: sparc */ +#ifdef __sparc__ /* ARCH: e90, e90s */ + #if defined(VER_2614) + /* convert regs + convert DMA to ram */ + #define M2MLC_ENDIAN (0x00030300) + #else + /* iowrite32 for regs; convert DMA to ram */ + #define M2MLC_ENDIAN (0x00020200) + #endif +#else /* ARCH: e2k, x86 */ + /* iowrite32 for regs; normal DMA */ + #define M2MLC_ENDIAN (0x00000000) +#endif /* __sparc__ */ + + +/** + ****************************************************************************** + * PCI Config Space + ****************************************************************************** + */ + +/* NIC Capability Register */ +#define NICCPB_REG 0x40 + #define NICCPB_GET_PROCVAL(r) GET_FIELD(r, 0, 0xFF) /* RO [07:00] */ + #define NICCPB_PROCVAL 0x14 + #define NICCPB_GET_AACFG(r) GET_FIELD(r, 8, 0x3F) /* RW [13:08] */ + #define NICCPB_GET_AACFG_DMA2(r) GET_FIELD(r, 12, 0x03) /* RW [13:12] */ + #define NICCPB_GET_AACFG_DMA1(r) GET_FIELD(r, 10, 0x03) /* RW [11:10] */ + #define NICCPB_GET_AACFG_DMA0(r) GET_FIELD(r, 8, 0x03) /* RW [09:08] */ + #define NICCPB_SET_AACFG(d) SET_FIELD(d, 8, 0x3F) /* RW [13:08] */ + #define NICCPB_SET_AACFG_DMA2(d) SET_FIELD(d, 12, 0x03) /* RW [13:12] */ + #define NICCPB_SET_AACFG_DMA1(d) SET_FIELD(d, 10, 0x03) /* RW [11:10] */ + #define NICCPB_SET_AACFG_DMA0(d) SET_FIELD(d, 8, 0x03) /* RW [09:08] */ + #define NICCPB_AACFG_IOLINK0 0x00 + #define NICCPB_AACFG_IOLINK1 0x01 + #define NICCPB_AACFG_IOLINK2 0x02 + #define NICCPB_GET_SOFTRES(r) GET_BIT(r, 31) /* RW [31:31] */ + #define NICCPB_SET_SOFTRES SET_BIT(31) /* RW [31:31] */ + + +/** + ****************************************************************************** + * BAR0: Element_Config_Space (512) + ****************************************************************************** + */ + +/* "Element_Config_Space" in include/uapi/linux/mcst/m2mlc_io.h */ + + +/** + ****************************************************************************** + * BAR1: Control Regs: PIO, Mailbox, DoorBell, DMA, Interrupt, Status (128k) + ****************************************************************************** + */ + +#define RB_N(n) ((n) * PAGE_SIZE) /* Resource Block N offset */ +#define RB_COM ((NICCPB_PROCVAL) * PAGE_SIZE) /* Common Block offset */ + +/** + * Resource Block N + * defined in include/uapi/linux/mcst/m2mlc_io.h + */ + +/** + * Common Block + */ + +#if 0 +/* = IOMMU Control Block = */ +#define CB_IOMMU_CONTROL 0x000 /* IOMMU Control Register */ +#endif /* 0 */ + +/* = Addresses Access Control Structure = */ +/* Addresses Access Register N, n=0..19 */ +#define CB_ADDR_ACC_CTRL(n) (0x010 + ((n) * 4)) + #define CB_ADDR_ACC_CTRL_ADDR_MASK 0x1FF + #define CB_ADDR_ACC_CTRL_MAINT_EN 0x200 + + +/* = PIO Common Block = */ +#define CB_PIO_DONE_QUE_ADDR_L 0x100 /* PIO Done Queue Table Address Lower */ +#define CB_PIO_DONE_QUE_ADDR_H 0x104 /* PIO Done Queue Table Address Upper */ +#define CB_PIO_DATA_QUE_ADDR_L 0x108 /* PIO Data Queue Table Address Lower */ +#define CB_PIO_DATA_QUE_ADDR_H 0x10C /* PIO Data Queue Table Address Upper */ +#define CB_PIO_BOXES_AVAIL 0x110 /* PIO boxes availability | RO */ + +/* = Timeout Control = */ +#define CB_TO_CONTROL 0x120 /* Timeout Control Register */ + #define CB_TO_CONTROL_RETRY_MASK 0x3 + #define CB_TO_CONTROL_RETRY_SHIFT 30 + #define CB_TO_CONTROL_COUNTER_MASK 0x3FFFFFFF + #define CB_TO_CONTROL_COUNTER_SHIFT 0 + +/* = Common Interrupt Status & Mask = */ +#define CB_COM_INT_STATUS 0x124 /* Common Interrupt Status */ +#define CB_COM_INT_MASK 0x128 /* Common Interrupt Mask */ + + +/** + ****************************************************************************** + * BAR2: Data buffer in PIO Mode (128k) + ****************************************************************************** + */ + +/* Data for PIO Box N, n=0..19 */ +#define PIO_BOX_DATA(n) ((n) * PAGE_SIZE) + + +/** + ****************************************************************************** + * I/O + ****************************************************************************** + */ +#ifdef __sparc__ +#if defined(VER_2614) + +static inline void iowrite32(u32 b, void __iomem *addr) +{ + *(u32 __force *)addr = b; +} + +static inline u32 ioread32(const void __iomem *addr) +{ + return *(const u32 __force *)addr; +} + +#endif /* VER_2614 */ +#endif /* __sparc__ */ + + +#endif /* M2MLC_REGS_H__ */ diff --git a/drivers/mcst/mem2alloc/Makefile b/drivers/mcst/mem2alloc/Makefile new file mode 100644 index 000000000000..2bd8e43261e0 --- /dev/null +++ b/drivers/mcst/mem2alloc/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MEM2ALLOC) += mem2alloc.o diff --git a/drivers/mcst/mem2alloc/mem2alloc.c b/drivers/mcst/mem2alloc/mem2alloc.c new file mode 100644 index 000000000000..730e7068435f --- /dev/null +++ b/drivers/mcst/mem2alloc/mem2alloc.c @@ -0,0 +1,215 @@ +/* Copyright 2012 Google Inc. All Rights Reserved. */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mem2alloc.h" + +static int mem2alloc_major = 0; /* dynamic */ +static DEFINE_SPINLOCK(mem_lock); + +struct ma_chunk { + struct ma_chunk *next; + struct page *page; + MemallocParams params; +}; + +static int AllocMemory(MemallocParams *p, struct file *filp); +static int FreeMemory(u64 busaddr, struct file *filp); + +static long mem2alloc_ioctl(struct file *filp, unsigned int cmd, + unsigned long _arg) +{ + int ret = 0; + void __user *arg = (void __user *) _arg; + MemallocParams memparams; + u64 busaddr; + + if (_IOC_DIR(cmd) & _IOC_READ) + ret = !access_ok(arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + ret = !access_ok(arg, _IOC_SIZE(cmd)); + if (ret) + return -EFAULT; + + switch (cmd) { + case MEMALLOC_IOCXGETBUFFER: + ret = copy_from_user(&memparams, (MemallocParams *) arg, + sizeof(MemallocParams)); + if (ret) + break; + + ret = AllocMemory(&memparams, filp); + + ret |= copy_to_user((MemallocParams *) arg, &memparams, + sizeof(MemallocParams)); + break; + case MEMALLOC_IOCSFREEBUFFER: + __get_user(busaddr, (u64 *) arg); + + ret = FreeMemory(busaddr, filp); + break; + } + return ret; +} + +static int mem2alloc_open(struct inode *inode, struct file *filp) +{ + filp->private_data = NULL; + return 0; +} + +static int mem2alloc_release(struct inode *inode, struct file *filp) +{ + struct ma_chunk *c; + for (c = filp->private_data; c;) { + struct pci_dev *pdev; + struct device *dev = NULL; + MemallocParams *p = &c->params; + struct ma_chunk *c2 = c; + pdev = pci_get_domain_bus_and_slot(p->pci_domain, p->bus, + PCI_DEVFN(p->slot, p->function)); + if (pdev) + dev = &pdev->dev; + dma_unmap_page(dev, p->dma_address, p->size, + DMA_BIDIRECTIONAL); + __free_pages(c->page, get_order(p->size)); + c = c->next; + kfree(c2); + } + return 0; +} + +void __exit mem2alloc_cleanup(void) +{ + unregister_chrdev(mem2alloc_major, "mem2alloc"); +} + +/* VFS methods */ +static struct file_operations mem2alloc_fops = { + .owner = THIS_MODULE, + .open = mem2alloc_open, + .release = mem2alloc_release, + .compat_ioctl = mem2alloc_ioctl, + .unlocked_ioctl = mem2alloc_ioctl +}; + +int __init mem2alloc_init(void) +{ + int result = + register_chrdev(mem2alloc_major, "mem2alloc", &mem2alloc_fops); + if (result < 0) + goto err; + else if (result != 0) /* this is for dynamic major */ + mem2alloc_major = result; + + return 0; + err: + return result; +} + +static int AllocMemory(MemallocParams *p, struct file *filp) +{ + int ret = 0; + struct pci_dev *pdev; + struct device *dev = NULL; + struct ma_chunk *n, *c = kzalloc(sizeof(*c), GFP_KERNEL); + gfp_t gfp_mask = __GFP_ZERO | GFP_KERNEL; + pdev = pci_get_domain_bus_and_slot(p->pci_domain, p->bus, + PCI_DEVFN(p->slot, p->function)); + if (!c) + return -ENOMEM; + if (p->size == 0) { + ret = -EINVAL; + goto err; + } + if (pdev) + dev = &pdev->dev; + if (swiotlb_max_segment() && /* swiotlb is running */ + dma_get_mask(dev) <= DMA_BIT_MASK(32)) { + gfp_mask |= __GFP_DMA; + } + c->page = alloc_pages(gfp_mask | __GFP_RETRY_MAYFAIL | __GFP_NOWARN, + get_order(p->size)); + if (!c->page) + c->page = alloc_pages(gfp_mask | __GFP_NOFAIL, + get_order(p->size)); + if (!c->page) { + ret = -ENOMEM; + goto err; + } + + p->dma_address = dma_map_page(dev, c->page, 0, p->size, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev, p->dma_address); + pci_dev_put(pdev); + if (ret) { + ret = -EFAULT; + goto err; + } + spin_lock(&mem_lock); + n = filp->private_data; + c->next = n; + filp->private_data = c; + spin_unlock(&mem_lock); + + p->phys_address = page_to_phys(c->page); + memcpy(&c->params, p, sizeof(*p)); + return 0; + err: + if (c->page) + __free_pages(c->page, get_order(p->size)); + kfree(c); + return ret; +} + +static int FreeMemory(u64 busaddr, struct file *filp) +{ + int r = -ENOENT; + struct ma_chunk *c, *prev = NULL; + struct pci_dev *pdev; + struct device *dev = NULL; + MemallocParams *p; + + spin_lock(&mem_lock); + for (c = filp->private_data; c && c->params.dma_address != busaddr; + c = c->next) + prev = c; + + if (c) { + if (prev) + prev->next = c->next; + else + filp->private_data = c->next; + } + spin_unlock(&mem_lock); + + if (!c) + return r; + + p = &c->params; + pdev = pci_get_domain_bus_and_slot(p->pci_domain, p->bus, + PCI_DEVFN(p->slot, p->function)); + if (pdev) + dev = &pdev->dev; + dma_unmap_page(dev, p->dma_address, p->size, + DMA_BIDIRECTIONAL); + __free_pages(c->page, get_order(p->size)); + kfree(c); + r = 0; + + return r; +} + +module_init(mem2alloc_init); +module_exit(mem2alloc_cleanup); + +/* module description */ +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Google"); +MODULE_DESCRIPTION("DMA RAM allocation"); diff --git a/drivers/mcst/mem2alloc/mem2alloc.h b/drivers/mcst/mem2alloc/mem2alloc.h new file mode 100644 index 000000000000..4fdf3029c3ca --- /dev/null +++ b/drivers/mcst/mem2alloc/mem2alloc.h @@ -0,0 +1,36 @@ +/* Copyright 2012 Google Inc. All Rights Reserved. */ + +#ifndef MEMALLOC_H +#define MEMALLOC_H + +#include + +#undef PDEBUG +#ifdef MEMALLOC_DEBUG +#ifdef __KERNEL__ +#define PDEBUG(fmt, args...) printk(KERN_INFO "memalloc: " fmt, ##args) +#else +#define PDEBUG(fmt, args...) fprintf(stderr, fmt, ##args) +#endif +#else +#define PDEBUG(fmt, args...) +#endif + +typedef struct { + uint64_t phys_address; + uint64_t dma_address; + uint32_t size; + uint8_t pci_domain; + uint8_t bus; + uint8_t slot; + uint8_t function; +} MemallocParams; + +#define MEMALLOC_IOC_MAGIC 0xc9 + +#define MEMALLOC_IOCXGETBUFFER _IOWR(MEMALLOC_IOC_MAGIC, 1, MemallocParams) +#define MEMALLOC_IOCSFREEBUFFER _IOW(MEMALLOC_IOC_MAGIC, 2, uint64_t) + +#define MEMALLOC_IOC_MAXNR 15 + +#endif /* MEMALLOC_H */ diff --git a/drivers/mcst/mem2alloc/mem2alloc_load.sh b/drivers/mcst/mem2alloc/mem2alloc_load.sh new file mode 100644 index 000000000000..bc56db930027 --- /dev/null +++ b/drivers/mcst/mem2alloc/mem2alloc_load.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +# Copyright 2012 Google Inc. All Rights Reserved. +# +# Load mem2alloc + +module="mem2alloc" +device="/dev/mem2alloc" +mode="666" + +echo + +#insert module +rm_module=`lsmod |grep $module` +if [ ! -z "$rm_module" ] +then + rmmod $module || exit 1 +fi +modprobe $module $* || exit 1 + +echo "module $module inserted" + +#remove old nod +rm -f $device + +#read the major asigned at loading time +major=`cat /proc/devices | grep $module | cut -c1-3` + +echo "$module major = $major" + +#create dev node +mknod $device c $major 0 + +echo "node $device created" + +#give all 'rw' access +chmod $mode $device + +echo "set node access to $mode" + +#the end +echo diff --git a/drivers/mcst/mga2-gpio/Makefile b/drivers/mcst/mga2-gpio/Makefile new file mode 100644 index 000000000000..f1e044321085 --- /dev/null +++ b/drivers/mcst/mga2-gpio/Makefile @@ -0,0 +1,2 @@ +ccflags-y := -I$(srctree)/drivers/mcst/mga2 +obj-$(CONFIG_MGA2_GPIO) := mga2-gpio.o diff --git a/drivers/mcst/mga2-gpio/mga2-gpio.c b/drivers/mcst/mga2-gpio/mga2-gpio.c new file mode 100644 index 000000000000..75e747d10afc --- /dev/null +++ b/drivers/mcst/mga2-gpio/mga2-gpio.c @@ -0,0 +1,164 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "mga2_regs.h" + +struct mga2_gpio { + void __iomem *regs; + struct pci_dev *pci_dev; + struct gpio_chip gpio_chip; + struct irq_chip irq_chip; + raw_spinlock_t lock; + struct platform_device *pdev; +}; + +#define mga2_r(__offset) \ +({ \ + u32 __val = readl(p->regs + (__offset)); \ + DRM_DEBUG("r: %x: %s\n", __val, # __offset); \ + __val; \ +}) + +#define mga2_w(__val, __offset) do { \ + u32 __val2 = __val; \ + DRM_DEBUG("w: %x %s: %s\n", \ + __val2, #__val, #__offset); \ + writel(__val2, p->regs + (__offset)); \ +} while (0) + +#define mga2_mod_bit(__reg, __bit, __value) do { \ + u32 __tmp = mga2_r(__reg); \ + if (__value) \ + __tmp |= BIT(__bit); \ + else \ + __tmp &= ~BIT(__bit); \ + mga2_w(__tmp, __reg); \ +} while (0) + +static int mga2_gpio_get_direction(struct gpio_chip *chip, unsigned offset) +{ + struct mga2_gpio *p = gpiochip_get_data(chip); + + return !(mga2_r(MGA2_VID3_GPIO_DIR) & BIT(offset)); +} + +static int mga2_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + struct mga2_gpio *p = gpiochip_get_data(chip); + unsigned long flags; + raw_spin_lock_irqsave(&p->lock, flags); + mga2_mod_bit(MGA2_VID3_GPIO_DIR, offset, false); + raw_spin_unlock_irqrestore(&p->lock, flags); + return 0; +} + +static int mga2_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct mga2_gpio *p = gpiochip_get_data(chip); + return !!(mga2_r(MGA2_VID3_GPIO_IN) & BIT(offset)); +} + +static void mga2_gpio_set(struct gpio_chip *chip, unsigned offset, + int value) +{ + struct mga2_gpio *p = gpiochip_get_data(chip); + unsigned long flags; + + raw_spin_lock_irqsave(&p->lock, flags); + mga2_mod_bit(MGA2_VID3_GPIO_OUT, offset, value); + raw_spin_unlock_irqrestore(&p->lock, flags); +} + +static int mga2_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct mga2_gpio *p = gpiochip_get_data(chip); + unsigned long flags; + raw_spin_lock_irqsave(&p->lock, flags); + mga2_mod_bit(MGA2_VID3_GPIO_DIR, offset, true); + raw_spin_unlock_irqrestore(&p->lock, flags); + return 0; +} + +static int mga2_gpio_probe(struct platform_device *pdev) +{ + int ret; + struct mga2_gpio *p; + struct gpio_chip *gpio_chip; + struct device *dev = &pdev->dev; + const char *name = dev_name(dev); + struct pci_dev *pci_dev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_MGA2, + NULL); + if (!pci_dev) + return -ENODEV; + p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->regs = devm_ioremap(dev, pci_resource_start(pci_dev, 2), + pci_resource_len(pci_dev, 2)); + if (IS_ERR(p->regs)) + return PTR_ERR(p->regs); + p->pci_dev = pci_dev; + p->pdev = pdev; + raw_spin_lock_init(&p->lock); + + platform_set_drvdata(pdev, p); + + gpio_chip = &p->gpio_chip; + gpio_chip->get_direction = mga2_gpio_get_direction; + gpio_chip->direction_input = mga2_gpio_direction_input; + gpio_chip->get = mga2_gpio_get; + gpio_chip->direction_output = mga2_gpio_direction_output; + gpio_chip->set = mga2_gpio_set; + gpio_chip->label = name; + gpio_chip->parent = dev; + gpio_chip->owner = THIS_MODULE; + gpio_chip->base = -1; + gpio_chip->ngpio = 8; + + ret = gpiochip_add_data(gpio_chip, p); + if (ret) { + dev_err(dev, "failed to add GPIO controller\n"); + goto err0; + } + return 0; +err0: + return ret; +} + +static int mga2_gpio_remove(struct platform_device *pdev) +{ + struct mga2_gpio *p = platform_get_drvdata(pdev); + gpiochip_remove(&p->gpio_chip); + pci_dev_put(p->pci_dev); + return 0; +} + +static const struct of_device_id __maybe_unused mga2_gpio_dt_ids[] = { + {.compatible = "mcst,mga2-gpio", }, + { } +}; + +MODULE_DEVICE_TABLE(of, mga2_gpio_dt_ids); + +static struct platform_driver mga2_gpio_driver = { + .driver = { + .name = "mga2-gpio", + .of_match_table = of_match_ptr(mga2_gpio_dt_ids), + }, + .probe = mga2_gpio_probe, + .remove = mga2_gpio_remove, +}; + +module_platform_driver(mga2_gpio_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Dmitry.E.Cherednichenko "); diff --git a/drivers/mcst/mga2-pwm/Makefile b/drivers/mcst/mga2-pwm/Makefile new file mode 100644 index 000000000000..6adaed9be15f --- /dev/null +++ b/drivers/mcst/mga2-pwm/Makefile @@ -0,0 +1,2 @@ +ccflags-y := -I$(srctree)/drivers/mcst/mga2 +obj-$(CONFIG_MGA2_PWM) := mga2-pwm.o diff --git a/drivers/mcst/mga2-pwm/mga2-pwm.c b/drivers/mcst/mga2-pwm/mga2-pwm.c new file mode 100644 index 000000000000..91b71e9f5fec --- /dev/null +++ b/drivers/mcst/mga2-pwm/mga2-pwm.c @@ -0,0 +1,188 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "mga2_regs.h" + +struct mga2_pwm_chip { + struct pwm_chip chip; + void __iomem *regs; + struct pci_dev *pci_dev; +}; + +static inline struct mga2_pwm_chip *to_mga2_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct mga2_pwm_chip, chip); +} + +#define mga2_r(__offset) \ +({ \ + u32 __val = readl(mga2->regs + (__offset) + \ + pwm->hwpwm * MGA2_VID3_PWM_REGS_SZ); \ + DRM_DEBUG("r: %x: %s\n", __val, # __offset); \ + __val; \ +}) + +#define mga2_w(__val, __offset) do { \ + u32 __val2 = __val; \ + DRM_DEBUG("w: %x %s: %s\n", \ + __val2, #__val, #__offset); \ + writel(__val2, mga2->regs + (__offset) + \ + pwm->hwpwm * MGA2_VID3_PWM_REGS_SZ); \ +} while (0) + +static int mga2_pwm_get_clock_division(int period_ns) +{ + unsigned long long max; + unsigned int div; + + for (div = 0; div < MGA2_PWM_MAX_DIVISION; div++) { + max = NSEC_PER_SEC * MGA2_PWM_MAX_CYCLE * + (1 << div) / MGA2_CLK_RATE; + if (period_ns <= max) + break; + } + + return (div <= MGA2_PWM_MAX_DIVISION) ? div : -ERANGE; +} + +static int mga2_pwm_set_counter(struct mga2_pwm_chip *mga2, + struct pwm_device *pwm, int div, + int duty_ns, int period_ns) +{ + u64 one_cycle, prd, high; /* 0.01 nanoseconds */ + one_cycle = NSEC_PER_SEC * 100ULL * (1 << div) / MGA2_CLK_RATE; + + high = duty_ns * 100ULL / one_cycle; + prd = period_ns * 100ULL / one_cycle; + DRM_DEBUG("duty: %d period: %d (%lld %lld %lld)\n", + duty_ns, period_ns, one_cycle, high, prd); + + /* Avoid prohibited setting */ + if (prd == 0) + return -EINVAL; + + /*0 -> 1/Fpwm, 1 -> 2/Fpwm ... 1 -> 65536/Fpwm */ + mga2_w(prd - 1, MGA2_VID3_PWM0_PERIOD); + mga2_w((div << MGA2_VID3_B_PWMPRESCL_OFFSET) | + high | MGA2_VID3_B_PWMENABLE, + MGA2_VID3_PWM0_CTRL); + return 0; +} + +static int mga2_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct mga2_pwm_chip *mga2 = to_mga2_chip(chip); + int div, ret; + mga2_w(0, MGA2_VID3_PWM0_CTRL); + if (!state->enabled) + return 0; + div = mga2_pwm_get_clock_division(state->period); + if (div < 0) + return div; + + /* TODO: handle state->polarity */ + ret = mga2_pwm_set_counter(mga2, pwm, div, + state->duty_cycle, state->period); + if (ret < 0) + return div; + return ret; +} + +static void mga2_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct mga2_pwm_chip *mga2 = to_mga2_chip(chip); + u64 tmp, multi, rate = MGA2_CLK_RATE * 100ULL; + u32 value, prescale; + + value = mga2_r(MGA2_VID3_PWM0_CTRL); + if (value & MGA2_VID3_B_PWMENABLE) + state->enabled = true; + else + state->enabled = false; + + if (value & MGA2_VID3_B_PWMINVERT) + state->polarity = PWM_POLARITY_INVERSED; + else + state->polarity = PWM_POLARITY_NORMAL; + + value = mga2_r(MGA2_VID3_PWM0_CTRL); + prescale = value >> MGA2_VID3_B_PWMPRESCL_OFFSET; + multi = NSEC_PER_SEC * (1 << prescale) * 100ULL; + + tmp = (value & MGA2_VID3_PWM_PERIOD_MASK) * multi; + state->duty_cycle = div64_u64(tmp, rate); + + value = mga2_r(MGA2_VID3_PWM0_PERIOD); + tmp = (value & MGA2_VID3_PWM_PERIOD_MASK) * multi; + state->period = div64_u64(tmp, rate); +} + +static const struct pwm_ops mga2_pwm_ops = { + .get_state = mga2_pwm_get_state, + .apply = mga2_pwm_apply, + .owner = THIS_MODULE, +}; + +static int mga2_pwm_probe(struct platform_device *pdev) +{ + struct mga2_pwm_chip *mga2; + struct pci_dev *pci_dev = pci_get_device(PCI_VENDOR_ID_MCST_TMP, + PCI_DEVICE_ID_MCST_MGA2, + NULL); + if (!pci_dev) + return -ENODEV; + mga2 = devm_kzalloc(&pdev->dev, sizeof(*mga2), GFP_KERNEL); + if (!mga2) + return -ENOMEM; + + mga2->regs = devm_ioremap(&pdev->dev, pci_resource_start(pci_dev, 2), + pci_resource_len(pci_dev, 2)); + if (IS_ERR(mga2->regs)) + return PTR_ERR(mga2->regs); + mga2->pci_dev = pci_dev; + mga2->chip.ops = &mga2_pwm_ops; + mga2->chip.dev = &pdev->dev; + mga2->chip.base = -1; + mga2->chip.npwm = 2; + mga2->chip.of_xlate = of_pwm_xlate_with_flags; + mga2->chip.of_pwm_n_cells = 3; + + platform_set_drvdata(pdev, mga2); + + return pwmchip_add(&mga2->chip); +} + +static int mga2_pwm_remove(struct platform_device *pdev) +{ + struct mga2_pwm_chip *mga2 = platform_get_drvdata(pdev); + pci_dev_put(mga2->pci_dev); + return pwmchip_remove(&mga2->chip); +} + +static const struct of_device_id __maybe_unused mga2_pwm_dt_ids[] = { + {.compatible = "mcst,mga2-pwm", }, + { } +}; + +MODULE_DEVICE_TABLE(of, mga2_pwm_dt_ids); + +static struct platform_driver mga2_pwm_driver = { + .driver = { + .name = "mga2-pwm", + .of_match_table = of_match_ptr(mga2_pwm_dt_ids), + }, + .probe = mga2_pwm_probe, + .remove = mga2_pwm_remove, +}; + +module_platform_driver(mga2_pwm_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Dmitry.E.Cherednichenko "); diff --git a/drivers/mcst/mga2/Makefile b/drivers/mcst/mga2/Makefile new file mode 100644 index 000000000000..7d38c1d730e7 --- /dev/null +++ b/drivers/mcst/mga2/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrmga2ructure (DRI) in XFree86 4.1.0 and higher. + +EXTRA_CFLAGS += -Werror -D_MCU_ +ccflags-y := -I$(srctree)/drivers/mcst/mga2/it6613 + +mga2-y := mga2_drv.o mga2_main.o mga2_mode.o mga2_fb.o mga2_irq.o \ + mga2_i2c.o mga2_pll.o mga2_dvi.o mga2_lvds.o \ + mga2_layer.o mga2_hdmi_it6613.o \ + it6613/HDMI_TX/HDMI_TX.o it6613/HDMI_TX/it6613_drv.o \ + it6613/HDMI_TX/it6613_sys.o it6613/HDMI_TX/EDID.o + +obj-$(CONFIG_DRM_MGA2) := mga2.o diff --git a/drivers/mcst/mga2/it6613/HDMI_COMMON.h b/drivers/mcst/mga2/it6613/HDMI_COMMON.h new file mode 100644 index 000000000000..60b572e4b5da --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_COMMON.h @@ -0,0 +1,11 @@ +#ifndef HDMI_COMMON_H_ +#define HDMI_COMMON_H_ + +typedef enum{ + COLOR_RGB444 = 0, + COLOR_YUV422, + COLOR_YUV444, + COLOR_MODE_NUM +}COLOR_TYPE; + +#endif /*HDMI_COMMON_H_*/ diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/EDID.c b/drivers/mcst/mga2/it6613/HDMI_TX/EDID.c new file mode 100644 index 000000000000..afd19af28431 --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/EDID.c @@ -0,0 +1,195 @@ +#include "hdmitx.h" + +#ifdef SUPPORT_EDID +static SYS_STATUS EDIDCheckSum(BYTE *pEDID) ; + +static SYS_STATUS +EDIDCheckSum(BYTE *pEDID) +{ + BYTE CheckSum ; + int i ; + + if( !pEDID ) + { + return ER_FAIL ; + } + for( i = 0, CheckSum = 0 ; i < 128 ; i++ ) + { + CheckSum += pEDID[i] ; CheckSum &= 0xFF ; + } + + return (CheckSum == 0)?ER_SUCCESS:ER_FAIL ; +} + +SYS_STATUS +ParseVESAEDID(BYTE *pEDID) +{ + if( ER_SUCCESS != EDIDCheckSum(pEDID) ) return ER_FAIL ; + + if( pEDID[0] != 0 || + pEDID[7] != 0 || + pEDID[1] != 0xFF || + pEDID[2] != 0xFF || + pEDID[3] != 0xFF || + pEDID[4] != 0xFF || + pEDID[5] != 0xFF || + pEDID[6] != 0xFF ) + { + return ER_FAIL ; // not a EDID 1.3 standard block. + } + + ///////////////////////////////////////////////////////// + // if need to parse EDID property , put here. + ///////////////////////////////////////////////////////// + + return ER_SUCCESS ; + +} + +SYS_STATUS +ParseCEAEDID(BYTE *pCEAEDID, RX_CAP *pRxCap) +{ + BYTE offset,End ; + BYTE count ; + BYTE tag ; + int i ; + + if( !pCEAEDID || !pRxCap ) return ER_FAIL ; + + pRxCap->ValidCEA = FALSE ; + + if( ER_SUCCESS != EDIDCheckSum(pCEAEDID) ) return ER_FAIL ; + + if( pCEAEDID[0] != 0x02 || pCEAEDID[1] != 0x03 ) return ER_SUCCESS ; // not a CEA BLOCK. + End = pCEAEDID[2] ; // CEA description. + pRxCap->VideoMode = pCEAEDID[3] ; + + if (pRxCap->VideoMode & CEA_SUPPORT_YUV444) + OS_PRINTF("Support Color: YUV444\n"); + if (pRxCap->VideoMode & CEA_SUPPORT_YUV422) + OS_PRINTF("Support Color: YUV422\n"); + + for( offset = 0 ; offset < 0x80 ; offset ++ ) + { + if( (offset % 0x10) == 0 ) + { + OS_PRINTF("[%02X]", offset ) ; + } + else if((offset%0x10)==0x08) + { + OS_PRINTF( " -" ) ; + } + OS_PRINTF(" %02X",pCEAEDID[offset]) ; + if((offset%0x10)==0x0f) + { + OS_PRINTF("\n") ; + } + } + + pRxCap->VDOModeCount = 0 ; + pRxCap->idxNativeVDOMode = 0xff ; + for( offset = 4 ; offset < End ; ) + { + tag = pCEAEDID[offset] >> 5 ; + count = pCEAEDID[offset] & 0x1f ; + switch( tag ) + { + case 0x01: // Audio Data Block ; + pRxCap->AUDDesCount = count/3 ; + offset++ ; + for( i = 0 ; i < pRxCap->AUDDesCount ; i++ ) + { + pRxCap->AUDDes[i].uc[0] = pCEAEDID[offset++] ; + pRxCap->AUDDes[i].uc[1] = pCEAEDID[offset++] ; + pRxCap->AUDDes[i].uc[2] = pCEAEDID[offset++] ; + + } + + break ; + + case 0x02: // Video Data Block ; + //pRxCap->VDOModeCount = 0 ; + offset ++ ; + for( i = 0,pRxCap->idxNativeVDOMode = 0xff ; i < count ; i++, offset++ ) + { + BYTE VIC ; + VIC = pCEAEDID[offset] & (~0x80) ; + OS_PRINTF("HDMI Sink VIC(Video Identify Code)=%d\n", VIC); + // if( FindModeTableEntryByVIC(VIC) != -1 ) + { + pRxCap->VDOMode[pRxCap->VDOModeCount] = VIC ; + if( pCEAEDID[offset] & 0x80 ) + { + pRxCap->idxNativeVDOMode = (BYTE)pRxCap->VDOModeCount ; + // iVideoModeSelect = pRxCap->VDOModeCount ; + } + + pRxCap->VDOModeCount++ ; + } + } + break ; + case 0x03: // Vendor Specific Data Block ; + offset ++ ; + pRxCap->IEEEOUI = (ULONG)pCEAEDID[offset+2] ; + pRxCap->IEEEOUI <<= 8 ; + pRxCap->IEEEOUI += (ULONG)pCEAEDID[offset+1] ; + pRxCap->IEEEOUI <<= 8 ; + pRxCap->IEEEOUI += (ULONG)pCEAEDID[offset] ; + + /////////////////////////////////////////////////////////// + // For HDMI 1.3 extension handling. + /////////////////////////////////////////////////////////// + + pRxCap->dc.uc = 0 ; + pRxCap->MaxTMDSClock = 0 ; + pRxCap->lsupport.uc = 0 ; + pRxCap->ValidHDMI = (pRxCap->IEEEOUI==HDMI_IEEEOUI)? TRUE:FALSE ; + if( (pRxCap->ValidHDMI) && (count > 5 )) + { + // HDMI 1.3 extension + pRxCap->dc.uc = pCEAEDID[offset+5] ; + pRxCap->MaxTMDSClock = pCEAEDID[offset+6] ; + pRxCap->lsupport.uc = pCEAEDID[offset+7] ; + + if(pRxCap->lsupport.info.Latency_Present) + { + pRxCap->V_Latency = pCEAEDID[offset+9] ; + pRxCap->A_Latency = pCEAEDID[offset+10] ; + } + + if(pRxCap->lsupport.info.I_Latency_Present) + { + pRxCap->V_I_Latency = pCEAEDID[offset+11] ; + pRxCap->A_I_Latency = pCEAEDID[offset+12] ; + } + + } + + offset += count ; // ignore the remaind. + break ; + + case 0x04: // Speaker Data Block ; + offset ++ ; + pRxCap->SpeakerAllocBlk.uc[0] = pCEAEDID[offset] ; + pRxCap->SpeakerAllocBlk.uc[1] = pCEAEDID[offset+1] ; + pRxCap->SpeakerAllocBlk.uc[2] = pCEAEDID[offset+2] ; + offset += 3 ; + break ; + case 0x05: // VESA Data Block ; + offset += count+1 ; + break ; + case 0x07: // Extended Data Block ; + offset += count+1 ; //ignore + break ; + default: + offset += count+1 ; // ignore + } + } + + pRxCap->ValidCEA = TRUE ; + return ER_SUCCESS ; +} + + + +#endif // SUPPORT_EDID diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.c b/drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.c new file mode 100644 index 000000000000..7b66cc476481 --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.c @@ -0,0 +1,311 @@ +#include "mcu.h" +#include "it6613_sys.h" +#include "hdmitx.h" +#include "HDMI_TX.h" + + +int TX_HDP = FALSE; +extern BYTE bOutputColorMode; +extern BYTE bInputColorMode; +extern BOOL bChangeMode; +extern RX_CAP RxCapability; +extern BOOL bHDMIMode; +extern BOOL bAudioEnable; +BOOL ParseEDID(void); + + +INSTANCE InitInstanceData = +{ + 0,0, //I2C_DEV, I2C_ADDR + 0, //bIntType (TxCLK active, Push-Pull Mode, INT active low) + 0,/* | T_MODE_CCIR656 | T_MODE_SYNCEMB | T_MODE_INDDR */ // bInputVideoSignalType + + B_AUDFMT_STD_I2S, // bOutputAudioMode, 0x00, standard i2s, rising edge to sample ws/i2s, not full packet mode REG[0xE1] + + 0,// bAudioChannelSwap + B_AUD_EN_I2S0 | B_AUD_I2S | M_AUD_16BIT, // bAudioChannelEnable, 0x01, REG[0xE0], defined in it6613_drv.h + AUDFS_48KHz, //0, //bAudFs, + 0, // TMDSClock + TRUE,//bAuthenticated + TRUE,// bHDMIMode + FALSE,// bIntPOL + FALSE // bHPD +} ; + +bool HDMITX_ChipVerify(void){ + bool bPass = FALSE; + alt_u8 szID[4]; + int i; + + + for(i=0;i<4;i++) + szID[i] = HDMITX_ReadI2C_Byte(i); + +// if (szID[0] == 0x00 && szID[1] == 0xCA && szID[1] == 0x13 && szID[1] == 0x06) szID[0] ??? + if ((szID[1] == 0xCA && szID[2] == 0x13 && szID[3] == 0x06) || (szID[1] == 0xCA && szID[2] == 0x13 && szID[3] == 0x16)){ + bPass = TRUE; + printf("TX Chip Revision ID: %d\n", szID[0]); + }else{ + printf("NG, Read TX Chip ID:%02X%02X%02X%02Xh (expected:00CA1306h)\n", szID[0], szID[1], szID[2], szID[3]); + } + + return bPass; +} + +bool HDMITX_Init(void){ + bool bSuccess = TRUE; +// HDMITX_Reset(); + usleep(500*1000); + if (!HDMITX_ChipVerify()){ + OS_PRINTF("Failed to find IT6613 HDMI-TX Chip.\n"); + bSuccess = FALSE; + //return 0; + } + + HDMITX_InitInstance(&InitInstanceData) ; + InitIT6613() ; + + + return bSuccess; +} + +bool HDMITX_HPD(void){ + if (TX_HDP) + return TRUE; + return FALSE; +} + + +void HDMITX_SetAVIInfoFrame(alt_u8 VIC, alt_u8 OutputColorMode, bool b16x9, bool ITU709) +{ + AVI_InfoFrame AviInfo; + alt_u8 pixelrep = 0; + + OS_PRINTF("HDMITX_SetAVIInfoFrame, VIC=%d, ColorMode=%d, Aspect-Ratio=%s, ITU709=%s\n", + VIC, OutputColorMode, b16x9?"16:9":"4:3", ITU709?"Yes":"No"); + + AviInfo.pktbyte.AVI_HB[0] = AVI_INFOFRAME_TYPE|0x80 ; + AviInfo.pktbyte.AVI_HB[1] = AVI_INFOFRAME_VER ; + AviInfo.pktbyte.AVI_HB[2] = AVI_INFOFRAME_LEN ; + + switch(OutputColorMode) + { + case F_MODE_YUV444: + // AviInfo.info.ColorMode = 2 ; + AviInfo.pktbyte.AVI_DB[0] = (2<<5)|(1<<4) ; + break ; + case F_MODE_YUV422: + // AviInfo.info.ColorMode = 1 ; + AviInfo.pktbyte.AVI_DB[0] = (1<<5)|(1<<4) ; + break ; + case F_MODE_RGB444: + default: + // AviInfo.info.ColorMode = 0 ; + AviInfo.pktbyte.AVI_DB[0] = (0<<5)|(1<<4) ; + break ; + } + AviInfo.pktbyte.AVI_DB[1] = 8 ; + AviInfo.pktbyte.AVI_DB[1] |= (!b16x9)?(1<<4):(2<<4) ; // 4:3 or 16:9 + AviInfo.pktbyte.AVI_DB[1] |= (!ITU709)?(1<<6):(2<<6) ; // 4:3 or 16:9 + AviInfo.pktbyte.AVI_DB[2] = 0 ; + AviInfo.pktbyte.AVI_DB[3] = VIC ; + AviInfo.pktbyte.AVI_DB[4] = pixelrep & 3 ; + AviInfo.pktbyte.AVI_DB[5] = 0 ; + AviInfo.pktbyte.AVI_DB[6] = 0 ; + AviInfo.pktbyte.AVI_DB[7] = 0 ; + AviInfo.pktbyte.AVI_DB[8] = 0 ; + AviInfo.pktbyte.AVI_DB[9] = 0 ; + AviInfo.pktbyte.AVI_DB[10] = 0 ; + AviInfo.pktbyte.AVI_DB[11] = 0 ; + AviInfo.pktbyte.AVI_DB[12] = 0 ; + + EnableAVIInfoFrame(TRUE, (unsigned char *)&AviInfo) ; +} + +void HDMITX_ChangeVideoTiming(int VIC){ + int OutputVideoTiming = VIC; + int HdmiColorMode; + + switch(bOutputColorMode) + { + case F_MODE_YUV444: + HdmiColorMode = HDMI_YUV444; + break ; + case F_MODE_YUV422: + HdmiColorMode = HDMI_YUV422; + break ; + case F_MODE_RGB444: + default: + HdmiColorMode = HDMI_RGB444; + break ; + } + + HDMITX_ChangeDisplayOption(OutputVideoTiming, HdmiColorMode); // just modify variable. Take effect when HDMITX_SetOutput is called in HDMITX_DevLoopProc +} + +void HDMITX_ChangeVideoTimingAndColor(int VIC, COLOR_TYPE Color){ + int OutputVideoTiming = VIC; + int HdmiColorMode; + + switch(Color) + { + case COLOR_YUV444: + HdmiColorMode = HDMI_YUV444; + break ; + case COLOR_YUV422: + HdmiColorMode = HDMI_YUV422; + break ; + case COLOR_RGB444: + default: + HdmiColorMode = HDMI_RGB444; + break ; + } + HDMITX_ChangeDisplayOption(OutputVideoTiming, HdmiColorMode); +} + +void HDMITX_DisableVideoOutput(void){ + DisableVideoOutput(); +} + +void HDMITX_EnableVideoOutput(void){ + HDMITX_SetOutput(); +} + +void HDMITX_SetColorSpace(COLOR_TYPE InputColor, COLOR_TYPE OutputColor){ + // DisableVideoOutput(); + bInputColorMode = InputColor; + bOutputColorMode = OutputColor; + // HDMITX_SetOutput(); +} + +bool HDMITX_IsSinkSupportYUV444(void){ + bool bSupport = FALSE; + if (RxCapability.Valid && RxCapability.ValidHDMI && RxCapability.ValidCEA && + (RxCapability.VideoMode & CEA_SUPPORT_YUV444)) + bSupport = TRUE; + return bSupport; +} + +bool HDMITX_IsSinkSupportYUV422(void){ + bool bSupport = FALSE; + if (RxCapability.Valid && RxCapability.ValidHDMI && RxCapability.ValidCEA && + (RxCapability.VideoMode & CEA_SUPPORT_YUV422)) + bSupport = TRUE; + return bSupport; +} + +bool HDMITX_IsSinkSupportColorDepth36(void){ + bool bSupport = FALSE; + if (RxCapability.Valid && RxCapability.ValidHDMI && RxCapability.ValidCEA && + RxCapability.dc.info.DC_36Bit) + bSupport = TRUE; + return bSupport; +} + + +bool HDMITX_IsSinkSupportColorDepth30(void){ + bool bSupport = FALSE; + if (RxCapability.Valid && RxCapability.ValidHDMI && RxCapability.ValidCEA && + RxCapability.dc.info.DC_30Bit) + bSupport = TRUE; + return bSupport; +} + +void HDMITX_SetOutputColorDepth(int ColorDepth){ + SetOutputColorDepthPhase(ColorDepth, 0); +} + + + +bool HDMITX_DevLoopProc() +{ + static BYTE PreHPDChange = 0; + static BYTE PreHPD = 0; + BYTE HPD, HPDChange ; + + // Richard CheckHDMI(&HPD,&HPDChange) ; + CheckHDMITX(&HPD,&HPDChange) ; + + if (HPD == PreHPD && HPDChange) // richard add + return FALSE; + + TX_HDP = HPD; + PreHPD = HPD; + PreHPDChange = HPDChange; + + if( HPDChange ) + { + + + OS_PRINTF("HPDChange\n"); + if( HPD ) + { + OS_PRINTF("HPD=ON\n"); + RxCapability.Valid = ParseEDID() ; + //bOutputColorMode = F_MODE_YUV444; //F_MODE_RGB444; // richard node. users can change color space here according to HDMI sink + + if( RxCapability.Valid && RxCapability.ValidHDMI ) + { + OS_PRINTF("HDMI Display found\n"); + bHDMIMode = TRUE ; + + if(RxCapability.VideoMode & (1<<6)) + { + bAudioEnable = TRUE ; + } + +#if 0 // richard, don't care edid, the output always RGB444 + if( RxCapability.VideoMode & (1<<5)) + { + bOutputColorMode &= ~F_MODE_CLRMOD_MASK ; + bOutputColorMode |= F_MODE_YUV444; + } + else if (RxCapability.VideoMode & (1<<4)) + { + bOutputColorMode &= ~F_MODE_CLRMOD_MASK ; + bOutputColorMode |= F_MODE_YUV422 ; + } +#endif + } + else if (!RxCapability.Valid) + { + OS_PRINTF("Failed to read EDID\n"); + + // enable it when edid fail + bHDMIMode = TRUE ; + bAudioEnable = TRUE ; + } + else + { + OS_PRINTF("Invalid HDMI Display\n"); + bHDMIMode = FALSE ; + bAudioEnable = FALSE ; + } + + OS_PRINTF("HDMITX_SetOutput\n"); + //HDMITX_SetOutput() ; + + } + else + { + OS_PRINTF("HPD=OFF\n"); + // unplug mode, ... + OS_PRINTF("DisableVideoOutput\n"); + //DisableVideoOutput() ; + RxCapability.Valid = FALSE; // richard add + RxCapability.ValidHDMI = FALSE; // richard add + RxCapability.ValidCEA = FALSE; // richard add + } + } + else // no stable but need to process mode change procedure + { + if(bChangeMode && HPD) + { + OS_PRINTF("HDMITX_SetOutput\n"); + HDMITX_SetOutput() ; + } + } + + return HPDChange; +} + diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.h b/drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.h new file mode 100644 index 000000000000..487b530f5763 --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/HDMI_TX.h @@ -0,0 +1,27 @@ +#ifndef HDMI_TX_H_ +#define HDMI_TX_H_ + +#include "HDMI_COMMON.h" + + + +bool HDMITX_Init(void); +bool HDMITX_ChipVerify(void); +bool HDMITX_HPD(void); +void HDMITX_ChangeVideoTiming(int VIC); +void HDMITX_ChangeVideoTimingAndColor(int VIC, COLOR_TYPE Color); +void HDMITX_SetAVIInfoFrame(alt_u8 VIC, alt_u8 OutputColorMode, bool b16x9, bool ITU709); + +void HDMITX_DisableVideoOutput(void); +void HDMITX_EnableVideoOutput(void); +void HDMITX_SetColorSpace(COLOR_TYPE InputColor, COLOR_TYPE OutputColor); +bool HDMITX_DevLoopProc(void); + +bool HDMITX_IsSinkSupportYUV444(void); +bool HDMITX_IsSinkSupportYUV422(void); + +bool HDMITX_IsSinkSupportColorDepth36(void); +bool HDMITX_IsSinkSupportColorDepth30(void); +void HDMITX_SetOutputColorDepth(int ColorDepth); + +#endif /*HDMI_TX_H_*/ diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/edid.h b/drivers/mcst/mga2/it6613/HDMI_TX/edid.h new file mode 100644 index 000000000000..b5699226a3cc --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/edid.h @@ -0,0 +1,126 @@ +#ifndef _EDID_H_ +#define _EDID_H_ +#ifdef SUPPORT_EDID + +///////////////////////////////////////// +// RX Capability. +///////////////////////////////////////// +typedef struct { + BYTE b16bit:1 ; + BYTE b20bit:1 ; + BYTE b24bit:1 ; + BYTE Rsrv:5 ; +} LPCM_BitWidth ; + +typedef enum { + AUD_RESERVED_0 = 0 , + AUD_LPCM, + AUD_AC3, + AUD_MPEG1, + AUD_MP3, + AUD_MPEG2, + AUD_AAC, + AUD_DTS, + AUD_ATRAC, + AUD_ONE_BIT_AUDIO, + AUD_DOLBY_DIGITAL_PLUS, + AUD_DTS_HD, + AUD_MAT_MLP, + AUD_DST, + AUD_WMA_PRO, + AUD_RESERVED_15 +} AUDIO_FORMAT_CODE ; + +typedef union { + struct { + BYTE channel:3 ; + BYTE AudioFormatCode:4 ; + BYTE Rsrv1:1 ; + + BYTE b32KHz:1 ; + BYTE b44_1KHz:1 ; + BYTE b48KHz:1 ; + BYTE b88_2KHz:1 ; + BYTE b96KHz:1 ; + BYTE b176_4KHz:1 ; + BYTE b192KHz:1 ; + BYTE Rsrv2:1 ; + BYTE ucCode ; + } s ; + BYTE uc[3] ; + +} AUDDESCRIPTOR ; + +typedef union { + struct { + BYTE FL_FR:1 ; + BYTE LFE:1 ; + BYTE FC:1 ; + BYTE RL_RR:1 ; + BYTE RC:1 ; + BYTE FLC_FRC:1 ; + BYTE RLC_RRC:1 ; + BYTE Reserve:1 ; + BYTE Unuse[2] ; + } s ; + BYTE uc[3] ; +} SPK_ALLOC ; + +#define CEA_SUPPORT_UNDERSCAN (1<<7) +#define CEA_SUPPORT_AUDIO (1<<6) +#define CEA_SUPPORT_YUV444 (1<<5) +#define CEA_SUPPORT_YUV422 (1<<4) +#define CEA_NATIVE_MASK 0xF + +typedef union _tag_DCSUPPORT { + struct { + BYTE DVI_Dual:1 ; + BYTE Rsvd:2 ; + BYTE DC_Y444:1 ; + BYTE DC_30Bit:1 ; + BYTE DC_36Bit:1 ; + BYTE DC_48Bit:1 ; + BYTE SUPPORT_AI:1 ; + } info ; + BYTE uc ; +} DCSUPPORT ; // Richard Note: Color Depth + +typedef union _LATENCY_SUPPORT{ + struct { + BYTE Rsvd:6 ; + BYTE I_Latency_Present:1 ; + BYTE Latency_Present:1 ; + } info ; + BYTE uc ; +} LATENCY_SUPPORT ; + +#define HDMI_IEEEOUI 0x0c03 + +typedef struct _RX_CAP{ + BYTE Valid; // richard add + BYTE VideoMode ; + BYTE VDOModeCount ; + BYTE idxNativeVDOMode ; + BYTE VDOMode[128] ; + BYTE AUDDesCount ; + AUDDESCRIPTOR AUDDes[32] ; + ULONG IEEEOUI ; + DCSUPPORT dc ; + BYTE MaxTMDSClock ; + LATENCY_SUPPORT lsupport ; + BYTE V_Latency ; + BYTE A_Latency ; + BYTE V_I_Latency ; + BYTE A_I_Latency ; + SPK_ALLOC SpeakerAllocBlk ; + BYTE ValidCEA:1 ; + BYTE ValidHDMI:1 ; +} RX_CAP ; + +SYS_STATUS ParseVESAEDID(BYTE *pEDID) ; +SYS_STATUS ParseCEAEDID(BYTE *pCEAEDID, RX_CAP *pRxCap) ; + + + +#endif // SUPPORT_EDID +#endif // _EDID_H_ diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/hdmitx.h b/drivers/mcst/mga2/it6613/HDMI_TX/hdmitx.h new file mode 100644 index 000000000000..a848e5a4b940 --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/hdmitx.h @@ -0,0 +1,75 @@ +#ifndef _HDMITX_H_ +#define _HDMITX_H_ + +#define SUPPORT_EDID +//#define SUPPORT_HDCP +#define SUPPORT_INPUTRGB +#define SUPPORT_INPUTYUV444 +#define SUPPORT_INPUTYUV422 +#define SUPPORT_SYNCEMBEDDED +#define SUPPORT_DEGEN +#define SUPPORT_INPUTYUV // richard add + + +#ifndef _MCU_ // DSSSHA need large computation data rather than 8051 supported. +#define SUPPORT_DSSSHA +#endif + +#if defined(SUPPORT_INPUTYUV444) || defined(SUPPORT_INPUTYUV422) +#define SUPPORT_INPUTYUV +#endif + +#ifdef _MCU_ + #include "mcu.h" +#else // not MCU + #include + #include + #include + #include + #include + #include + #include "ioaccess.h" + #include "install.h" + #include "pc.h" +#endif // MCU + +#include "typedef.h" +#include "edid.h" +// #include "dss_sha.h" +#include "it6613_drv.h" + +#define HDMITX_INSTANCE_MAX 1 + +#define SIZEOF_CSCMTX 18 +#define SIZEOF_CSCGAIN 6 +#define SIZEOF_CSCOFFSET 3 + +/////////////////////////////////////////////////////////////////////// +// Output Mode Type +/////////////////////////////////////////////////////////////////////// + +#define RES_ASPEC_4x3 0 +#define RES_ASPEC_16x9 1 +#define F_MODE_REPT_NO 0 +#define F_MODE_REPT_TWICE 1 +#define F_MODE_REPT_QUATRO 3 +#define F_MODE_CSC_ITU601 0 +#define F_MODE_CSC_ITU709 1 + +/////////////////////////////////////////////////////////////////////// +// ROM OFFSET +/////////////////////////////////////////////////////////////////////// +#define ROMOFF_INT_TYPE 0 +#define ROMOFF_INPUT_VIDEO_TYPE 1 +#define ROMOFF_OUTPUT_AUDIO_MODE 8 +#define ROMOFF_AUDIO_CH_SWAP 9 + + + +#define TIMER_LOOP_LEN 10 +#define MS(x) (((x)+(TIMER_LOOP_LEN-1))/TIMER_LOOP_LEN) ; // for timer loop + + + +#endif // _HDMITX_H_ + diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.c b/drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.c new file mode 100644 index 000000000000..504df9037ea1 --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.c @@ -0,0 +1,3465 @@ +///////////////////////////////////////////////////////////////////// +// IT6613.C +// Driver code for platform independent +///////////////////////////////////////////////////////////////////// +#include "hdmitx.h" +//#include "dss_sha.h" + +#define MSCOUNT 1000 +#define LOADING_UPDATE_TIMEOUT (3000/32) // 3sec +// USHORT u8msTimer = 0 ; +// USHORT TimerServF = TRUE ; + + + +////////////////////////////////////////////////////////////////////// +// Authentication status +////////////////////////////////////////////////////////////////////// + +// #define TIMEOUT_WAIT_AUTH MS(2000) + +#define Switch_HDMITX_Bank(x) HDMITX_WriteI2C_Byte(0x0f,(x)&1) + +#define HDMITX_OrREG_Byte(reg,ormask) HDMITX_WriteI2C_Byte(reg,(HDMITX_ReadI2C_Byte(reg) | (ormask))) +#define HDMITX_AndREG_Byte(reg,andmask) HDMITX_WriteI2C_Byte(reg,(HDMITX_ReadI2C_Byte(reg) & (andmask))) +#define HDMITX_SetREG_Byte(reg,andmask,ormask) HDMITX_WriteI2C_Byte(reg,((HDMITX_ReadI2C_Byte(reg) & (andmask))|(ormask))) + +////////////////////////////////////////////////////////////////////// +// General global variables +////////////////////////////////////////////////////////////////////// +// static _IDATA TXVideo_State_Type VState ; +// static _IDATA TXAudio_State_Type AState ; +// static _XDATA MODE_DESCRIPTION ModeID = MODE_InvalidMode; + +// BYTE I2C_DEV ; +// BYTE I2C_ADDR ; +////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////// +// // Interrupt Type +// ///////////////////////////////////////////////// +// BYTE bIntType = 0 ; +// ///////////////////////////////////////////////// +// // Video Property +// ///////////////////////////////////////////////// +// BYTE bInputVideoMode ; +// BYTE bOutputVideoMode ; +// BYTE Instance[0].bInputVideoSignalType = 0 /* | T_MODE_CCIR656 | T_MODE_SYNCEMB | T_MODE_INDDR */ ; // for Sync Embedded,CCIR656,InputDDR +// BOOL Instance[0].bAuthenticated = FALSE ; +// ///////////////////////////////////////////////// +// // Video Property +// ///////////////////////////////////////////////// +// BYTE bOutputAudioMode = 0 ; +// BYTE bAudioChannelSwap = 0 ; +////////////////////////////////////////////////////////////////////// +// BOOL Instance[0].bHDMIMode = FALSE; +// BOOL Instance[0].bIntPOL = FALSE ; // 0 = Low Active +// BOOL bHPD ; + +INSTANCE Instance[HDMITX_INSTANCE_MAX] ; + +////////////////////////////////////////////////////////////////////// +// Function Prototype +////////////////////////////////////////////////////////////////////// + +// static BOOL IsRxSense() ; + +static void SetInputMode(BYTE InputMode,BYTE bInputSignalType) ; +static void SetCSCScale(BYTE bInputMode,BYTE bOutputMode) ; +// static void SetupAFE(BYTE ucFreqInMHz) ; +static void SetupAFE(VIDEOPCLKLEVEL PCLKLevel) ; +static void FireAFE(void) ; + + +static SYS_STATUS SetAudioFormat(BYTE NumChannel,BYTE AudioEnable,BYTE bSampleFreq,BYTE AudSWL,BYTE AudioCatCode) ; +static SYS_STATUS SetNCTS(ULONG PCLK,ULONG Fs) ; + +static void AutoAdjustAudio(void) ; +static void SetupAudioChannel(void) ; + +static SYS_STATUS SetAVIInfoFrame(AVI_InfoFrame *pAVIInfoFrame) ; +static SYS_STATUS SetAudioInfoFrame(Audio_InfoFrame *pAudioInfoFrame) ; +//static SYS_STATUS SetSPDInfoFrame(SPD_InfoFrame *pSPDInfoFrame) ; +//static SYS_STATUS SetMPEGInfoFrame(MPEG_InfoFrame *pMPGInfoFrame) ; +static SYS_STATUS ReadEDID(BYTE *pData,BYTE bSegment,BYTE offset,SHORT Count) ; +static void AbortDDC(void) ; +static void ClearDDCFIFO(void) ; +static void ClearDDCFIFO(void) ; +#if 0 +static void GenerateDDCSCLK() ; +static SYS_STATUS HDCP_EnableEncryption() ; +static void HDCP_ResetAuth() ; +static void HDCP_Auth_Fire() ; +static void HDCP_StartAnCipher() ; +static void HDCP_StopAnCipher() ; +static void HDCP_GenerateAn() ; +// RICHARD static SYS_STATUS HDCP_GetVr(ULONG *pVr) ; +static SYS_STATUS HDCP_GetBCaps(PBYTE pBCaps ,PUSHORT pBStatus) ; +static SYS_STATUS HDCP_GetBKSV(BYTE *pBKSV) ; +static SYS_STATUS HDCP_Authenticate() ; +static SYS_STATUS HDCP_Authenticate_Repeater() ; +static SYS_STATUS HDCP_VerifyIntegration() ; +static SYS_STATUS HDCP_GetKSVList(BYTE *pKSVList,BYTE cDownStream) ; +static SYS_STATUS HDCP_CheckSHA(BYTE M0[],USHORT BStatus,BYTE KSVList[],int devno,BYTE Vr[]) ; +static void HDCP_ResumeAuthentication() ; +static void HDCP_Reset() ; + +static void ENABLE_NULL_PKT() ; +static void ENABLE_ACP_PKT() ; +static void ENABLE_ISRC1_PKT() ; +static void ENABLE_ISRC2_PKT() ; +static void ENABLE_AVI_INFOFRM_PKT() ; +static void ENABLE_AUD_INFOFRM_PKT() ; +static void ENABLE_SPD_INFOFRM_PKT() ; +static void ENABLE_MPG_INFOFRM_PKT() ; +#endif +static void ENABLE_AVI_INFOFRM_PKT(void) ; + +static void DISABLE_NULL_PKT(void) ; +static void DISABLE_ACP_PKT(void) ; +static void DISABLE_ISRC1_PKT(void) ; +static void DISABLE_ISRC2_PKT(void) ; +static void DISABLE_AVI_INFOFRM_PKT(void) ; +static void DISABLE_AUD_INFOFRM_PKT(void) ; +static void DISABLE_SPD_INFOFRM_PKT(void) ; +static void DISABLE_MPG_INFOFRM_PKT(void) ; +//static BYTE countbit(BYTE b) ; +#ifdef HDMITX_REG_DEBUG +static void DumpCatHDMITXReg(void) ; +#endif // DEBUG + + + + + +////////////////////////////////////////////////////////////////////// +// Function Body. +////////////////////////////////////////////////////////////////////// + + +////////////////////////////////////////////////////////////////////// +// utility function for main.. +////////////////////////////////////////////////////////////////////// + + + +// int aiNonCEAVIC[] = { 2 } ; + +// Y,C,RGB offset +// for register 73~75 +static _CODE BYTE bCSCOffset_16_235[] = +{ + 0x00,0x80,0x00 +}; + +static _CODE BYTE bCSCOffset_0_255[] = +{ + 0x10,0x80,0x10 +}; + +#ifdef SUPPORT_INPUTRGB + static _CODE BYTE bCSCMtx_RGB2YUV_ITU601_16_235[] = + { + 0xB2,0x04,0x64,0x02,0xE9,0x00, + 0x93,0x3C,0x16,0x04,0x56,0x3F, + 0x49,0x3D,0x9F,0x3E,0x16,0x04 + } ; + + static _CODE BYTE bCSCMtx_RGB2YUV_ITU601_0_255[] = + { + 0x09,0x04,0x0E,0x02,0xC8,0x00, + 0x0E,0x3D,0x83,0x03,0x6E,0x3F, + 0xAC,0x3D,0xD0,0x3E,0x83,0x03 + } ; + + static _CODE BYTE bCSCMtx_RGB2YUV_ITU709_16_235[] = + { + 0xB8,0x05,0xB4,0x01,0x93,0x00, + 0x49,0x3C,0x16,0x04,0x9F,0x3F, + 0xD9,0x3C,0x10,0x3F,0x16,0x04 + } ; + + static _CODE BYTE bCSCMtx_RGB2YUV_ITU709_0_255[] = + { + 0xE5,0x04,0x78,0x01,0x81,0x00, + 0xCE,0x3C,0x83,0x03,0xAE,0x3F, + 0x49,0x3D,0x33,0x3F,0x83,0x03 + } ; +#endif +/* +#ifdef SUPPORT_INPUTYUV + + static _CODE BYTE bCSCMtx_YUV2RGB_ITU601_16_235[] = + { + 0x00,0x08,0x6A,0x3A,0x4F,0x3D, + 0x00,0x08,0xF7,0x0A,0x00,0x00, + 0x00,0x08,0x00,0x00,0xDB,0x0D + } ; + + static _CODE BYTE bCSCMtx_YUV2RGB_ITU601_0_255[] = + { + 0x4F,0x09,0x81,0x39,0xDF,0x3C, + 0x4F,0x09,0xC2,0x0C,0x00,0x00, + 0x4F,0x09,0x00,0x00,0x1E,0x10 + } ; + + static _CODE BYTE bCSCMtx_YUV2RGB_ITU709_16_235[] = + { + 0x00,0x08,0x53,0x3C,0x89,0x3E, + 0x00,0x08,0x51,0x0C,0x00,0x00, + 0x00,0x08,0x00,0x00,0x87,0x0E + } ; + + static _CODE BYTE bCSCMtx_YUV2RGB_ITU709_0_255[] = + { + 0x4F,0x09,0xBA,0x3B,0x4B,0x3E, + 0x4F,0x09,0x56,0x0E,0x00,0x00, + 0x4F,0x09,0x00,0x00,0xE7,0x10 + } ; +#endif*/ + +#ifdef SUPPORT_INPUTYUV + + BYTE bCSCMtx_YUV2RGB_ITU601_16_235[] = + { + 0x00,0x08,0x6A,0x3A,0x4F,0x3D, + 0x00,0x08,0xF7,0x0A,0x00,0x00, + 0x00,0x08,0x00,0x00,0xDB,0x0D + } ; + + BYTE bCSCMtx_YUV2RGB_ITU601_0_255[] = + { + 0x4F,0x09,0x81,0x39,0xDF,0x3C, + 0x4F,0x09,0xC2,0x0C,0x00,0x00, + 0x4F,0x09,0x00,0x00,0x1E,0x10 + } ; + + BYTE bCSCMtx_YUV2RGB_ITU709_16_235[] = + { + 0x00,0x08,0x53,0x3C,0x89,0x3E, + 0x00,0x08,0x51,0x0C,0x00,0x00, + 0x00,0x08,0x00,0x00,0x87,0x0E + } ; + + BYTE bCSCMtx_YUV2RGB_ITU709_0_255[] = + { + 0x4F,0x09,0xBA,0x3B,0x4B,0x3E, + 0x4F,0x09,0x56,0x0E,0x00,0x00, + 0x4F,0x09,0x00,0x00,0xE7,0x10 + } ; +#endif + + + +////////////////////////////////////////////////////////////////////// +// external Interface // +////////////////////////////////////////////////////////////////////// + +void +HDMITX_InitInstance(INSTANCE *pInstance) +{ + if(pInstance && 0 < HDMITX_INSTANCE_MAX) + { + Instance[0] = *pInstance ; + } +} +#if 0 +static BYTE InitIT6613_HDCPROM() +{ + BYTE uc[5] ; + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(0xF8,0xC3) ; //password + HDMITX_WriteI2C_Byte(0xF8,0xA5) ; // password + HDMITX_WriteI2C_Byte(REG_TX_LISTCTRL,0x60) ; // Richard, ???? + I2C_Read_ByteN(0xE0,0x00,uc,5) ; // richard note. internal rom is used + + if(uc[0] == 1 && + uc[1] == 1 && + uc[2] == 1 && + uc[3] == 1 && + uc[4] == 1) + { + // with internal eMem + HDMITX_WriteI2C_Byte(REG_TX_ROM_HEADER,0xE0) ; + HDMITX_WriteI2C_Byte(REG_TX_LISTCTRL,0x48) ; + } + else + { + // with external ROM + HDMITX_WriteI2C_Byte(REG_TX_ROM_HEADER,0xA0) ; // ROMHeader + HDMITX_WriteI2C_Byte(REG_TX_LISTCTRL,0x00) ; // Richard, ???? + } + HDMITX_WriteI2C_Byte(0xF8,0xFF) ; // password + + // richard add + return ER_SUCCESS; +} +#endif + +void InitIT6613() +{ + // config interrupt + HDMITX_WriteI2C_Byte(REG_TX_INT_CTRL,Instance[0].bIntType) ; + Instance[0].bIntPOL = (Instance[0].bIntType&B_INTPOL_ACTH)?TRUE:FALSE ; + + // Reset + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,B_REF_RST|B_VID_RST|B_AUD_RST|B_AREF_RST|B_HDCP_RST) ; + DelayMS(1) ; + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,B_VID_RST|B_AUD_RST|B_AREF_RST|B_HDCP_RST) ; + +#if 0 + // Enable clock ring (richard add according toe programming guide) + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL, 0x10); + + // Set default DVI mode (richard add according toe programming guide) +// HDMITX_WriteI2C_Byte(REG_TX_HDMI_MODE, 0x01); // set HDMI mode + HDMITX_WriteI2C_Byte(REG_TX_HDMI_MODE, 0x00); // set DVI mode +#endif + + // Avoid power loading in un play status. + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,B_AFE_DRV_RST|B_AFE_DRV_PWD) ; + + // set interrupt mask,mask value 0 is interrupt available. +// richard HDMITX_WriteI2C_Byte(REG_TX_INT_MASK1,0xB2) ; // enable interrupt: HPD, DDCBusHangMask, + HDMITX_WriteI2C_Byte(REG_TX_INT_MASK1,0xB2) ; // enable interrupt: HPD, DDCBusHangMask, + HDMITX_WriteI2C_Byte(REG_TX_INT_MASK2,0xF8) ; // enable interrupt: AuthFailMask, AUthDoneMask, KSVListChkMask + HDMITX_WriteI2C_Byte(REG_TX_INT_MASK3,0x37) ; // enable interrupt: PktAudMask, PktDBDMask, PkMpgMask, AUdCTSMask, HDCPSynDetMask + + Switch_HDMITX_Bank(0) ; + DISABLE_NULL_PKT() ; + DISABLE_ACP_PKT() ; + DISABLE_ISRC1_PKT() ; + DISABLE_ISRC2_PKT() ; + DISABLE_AVI_INFOFRM_PKT() ; + DISABLE_AUD_INFOFRM_PKT() ; + DISABLE_SPD_INFOFRM_PKT() ; + DISABLE_MPG_INFOFRM_PKT(); + + + ////////////////////////////////////////////////////////////////// + // Setup Output Audio format. + ////////////////////////////////////////////////////////////////// + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_CTRL1,Instance[0].bOutputAudioMode) ; // regE1 bOutputAudioMode should be loaded from ROM image. + + ////////////////////////////////////////////////////////////////// + // Setup HDCP ROM + ////////////////////////////////////////////////////////////////// +// InitIT6613_HDCPROM() ; +// #ifdef EXTERN_HDCPROM +// #pragma message("EXTERN ROM CODED") ; +// HDMITX_WriteI2C_Byte(REG_TX_ROM_HEADER,0xA0) ; +// #endif + + +} + +////////////////////////////////////////////////////////////////////// +// export this for dynamic change input signal +////////////////////////////////////////////////////////////////////// +BOOL SetupVideoInputSignal(BYTE inputSignalType) +{ + Instance[0].bInputVideoSignalType = inputSignalType ; + // SetInputMode(inputColorMode,Instance[0].bInputVideoSignalType) ; + return TRUE ; +} + +BOOL EnableVideoOutput(VIDEOPCLKLEVEL level,BYTE inputColorMode,BYTE outputColorMode,BYTE bHDMI) +{ + // bInputVideoMode,bOutputVideoMode,Instance[0].bInputVideoSignalType,bAudioInputType,should be configured by upper F/W or loaded from EEPROM. + // should be configured by initsys.c + WORD i ; +#ifdef INVERT_VID_LATCHEDGE + BYTE uc ; +#endif + // VIDEOPCLKLEVEL level ; + + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,B_VID_RST|B_AUD_RST|B_AREF_RST|B_HDCP_RST) ; + + Instance[0].bHDMIMode = (BYTE)bHDMI ; + + if(Instance[0].bHDMIMode) + { + SetAVMute(TRUE) ; + } + + SetInputMode(inputColorMode,Instance[0].bInputVideoSignalType) ; + + SetCSCScale(inputColorMode,outputColorMode) ; + + if(Instance[0].bHDMIMode) + { + HDMITX_WriteI2C_Byte(REG_TX_HDMI_MODE,B_TX_HDMI_MODE) ; + } + else + { + HDMITX_WriteI2C_Byte(REG_TX_HDMI_MODE,B_TX_DVI_MODE) ; + } + +#ifdef INVERT_VID_LATCHEDGE + uc = HDMITX_ReadI2C_Byte(REG_TX_CLK_CTRL1) ; + uc |= B_VDO_LATCH_EDGE ; + HDMITX_WriteI2C_Byte(REG_TX_CLK_CTRL1, uc) ; +#endif + + HDMITX_WriteI2C_Byte(REG_TX_SW_RST, B_AUD_RST|B_AREF_RST|B_HDCP_RST) ; + + // if (pVTiming->VideoPixelClock>80000000) + // { + // level = PCLK_HIGH ; + // } + // else if (pVTiming->VideoPixelClock>20000000) + // { + // level = PCLK_MEDIUM ; + // } + // else + // { + // level = PCLK_LOW ; + // } + + SetupAFE(level) ; // pass if High Freq request + + for(i = 0 ; i < 100 ; i++) + { + if(HDMITX_ReadI2C_Byte(REG_TX_SYS_STATUS) & B_TXVIDSTABLE) + { + break ; + + } + DelayMS(1) ; + } + // Clive suggestion. + // clear int3 video stable interrupt. + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR0,0) ; + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR1,B_CLR_VIDSTABLE) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,B_INTACTDONE) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,0) ; + + FireAFE() ; + return TRUE ; +} + +BOOL EnableAudioOutput(ULONG VideoPixelClock,BYTE bAudioSampleFreq,BYTE ChannelNumber,BYTE bAudSWL,BYTE bSPDIF) +{ + BYTE bAudioChannelEnable ; + // richard unsigned long N ; + + Instance[0].TMDSClock = VideoPixelClock ; + Instance[0].bAudFs = bAudioSampleFreq ; + + OS_PRINTF("EnableAudioOutput(%d,%ld,%x,%d,%d,%d);\n",0,VideoPixelClock,bAudioSampleFreq,ChannelNumber,bAudSWL,bSPDIF) ; + + switch(ChannelNumber) + { + case 7: + case 8: + bAudioChannelEnable = 0xF ; + break ; + case 6: + case 5: + bAudioChannelEnable = 0x7 ; + break ; + case 4: + case 3: + bAudioChannelEnable = 0x3 ; + break ; + case 2: + case 1: + default: + bAudioChannelEnable = 0x1 ; + break ; + } + + if(bSPDIF) bAudioChannelEnable |= B_AUD_SPDIF ; + + if( bSPDIF ) + { + Switch_HDMITX_Bank(1) ; + HDMITX_WriteI2C_Byte(REGPktAudCTS0,0x50) ; + HDMITX_WriteI2C_Byte(REGPktAudCTS1,0x73) ; + HDMITX_WriteI2C_Byte(REGPktAudCTS2,0x00) ; + + HDMITX_WriteI2C_Byte(REGPktAudN0,0) ; + HDMITX_WriteI2C_Byte(REGPktAudN1,0x18) ; + HDMITX_WriteI2C_Byte(REGPktAudN2,0) ; + Switch_HDMITX_Bank(0) ; + + HDMITX_WriteI2C_Byte(0xC5, 2) ; // D[1] = 0, HW auto count CTS + } + else + { + SetNCTS(VideoPixelClock,bAudioSampleFreq) ; + } + + /* + if(VideoPixelClock != 0) + { + SetNCTS(VideoPixelClock,bAudioSampleFreq) ; + } + else + { + switch(bAudioSampleFreq) + { + case AUDFS_32KHz: N = 4096; break; + case AUDFS_44p1KHz: N = 6272; break; + case AUDFS_48KHz: N = 6144; break; + case AUDFS_88p2KHz: N = 12544; break; + case AUDFS_96KHz: N = 12288; break; + case AUDFS_176p4KHz: N = 25088; break; + case AUDFS_192KHz: N = 24576; break; + default: N = 6144; + } + Switch_HDMITX_Bank(1) ; + HDMITX_WriteI2C_Byte(REGPktAudN0,(BYTE)((N)&0xFF)) ; + HDMITX_WriteI2C_Byte(REGPktAudN1,(BYTE)((N>>8)&0xFF)) ; + HDMITX_WriteI2C_Byte(REGPktAudN2,(BYTE)((N>>16)&0xF)) ; + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(REG_TX_PKT_SINGLE_CTRL,0) ; // D[1] = 0,HW auto count CTS + } + */ + + //HDMITX_AndREG_Byte(REG_TX_SW_RST,~(B_AUD_RST|B_AREF_RST)) ; + SetAudioFormat(ChannelNumber,bAudioChannelEnable,bAudioSampleFreq,bAudSWL,bSPDIF) ; + + #ifdef HDMITX_REG_DEBUG + DumpCatHDMITXReg() ; + #endif // HDMITX_REG_DEBUG + return TRUE ; +} + + +BOOL +GetEDIDData(int EDIDBlockID,BYTE *pEDIDData) +{ + if(!pEDIDData) + { + return FALSE ; + } + + if(ReadEDID(pEDIDData,EDIDBlockID/2,(EDIDBlockID%2)*128,128) == ER_FAIL) + { + return FALSE ; + } + + return TRUE ; +} +#if 0 +BOOL +EnableHDCP(BYTE bEnable) +{ + if(bEnable) + { + if(ER_FAIL == HDCP_Authenticate()) + { + + HDCP_ResetAuth() ; + return FALSE ; + } + + } + else + { + HDCP_ResetAuth() ; + } + return TRUE ; +} +#endif + +BOOL +CheckHDMITX(BYTE *pHPD,BYTE *pHPDChange) +{ + BYTE intdata1,intdata2,intdata3,sysstat; + BYTE intclr3 = 0 ; + BOOL PrevHPD = Instance[0].bHPD ; + BOOL HPD ; + + sysstat = HDMITX_ReadI2C_Byte(REG_TX_SYS_STATUS) ; // read system status register + + // OS_PRINTF("sysstat(REG[0x0E])=%02Xh\r\n", sysstat); + + HPD = ((sysstat & (B_HPDETECT|B_RXSENDETECT)) == (B_HPDETECT|B_RXSENDETECT))?TRUE:FALSE ; + + // 2007/06/20 added by jj_tseng@chipadvanced.com + if(pHPDChange) + { + *pHPDChange = FALSE ; + + } + //~jj_tseng@chipadvanced.com 2007/06/20 + + if(!HPD) + { + Instance[0].bAuthenticated = FALSE ; + } + + if(sysstat & B_INT_ACTIVE) // interrupt is activce + { + + intdata1 = HDMITX_ReadI2C_Byte(REG_TX_INT_STAT1) ; // reg 0x06 + //ErrorF("INT_Handler: reg%02x = %02x\n",REG_TX_INT_STAT1,intdata1) ; + + if(intdata1 & B_INT_DDCFIFO_ERR) + { + //ErrorF("DDC FIFO Error.\n") ; + ClearDDCFIFO() ; + } + + + if(intdata1 & B_INT_DDC_BUS_HANG) + { + ErrorF("DDC BUS HANG.\n") ; + AbortDDC() ; + + if(Instance[0].bAuthenticated) + { + ErrorF("when DDC hang,and aborted DDC,the HDCP authentication need to restart.\n") ; +// HDCP_ResumeAuthentication() ; + } + } + + + if(intdata1 & (B_INT_HPD_PLUG|B_INT_RX_SENSE)) + { + + if(pHPDChange) *pHPDChange = TRUE ; + + if(!HPD) + { + // reset + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,B_AREF_RST|B_VID_RST|B_AUD_RST|B_HDCP_RST) ; + DelayMS(1) ; + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,B_AFE_DRV_RST|B_AFE_DRV_PWD) ; + //ErrorF("Unplug,%x %x\n",HDMITX_ReadI2C_Byte(REG_TX_SW_RST),HDMITX_ReadI2C_Byte(REG_TX_AFE_DRV_CTRL)) ; + // VState = TXVSTATE_Unplug ; + } + } + + + intdata2 = HDMITX_ReadI2C_Byte(REG_TX_INT_STAT2) ; // reg 0x07 + //ErrorF("INT_Handler: reg%02x = %02x\n",REG_TX_INT_STAT2,intdata2) ; + + + + #ifdef SUPPORT_HDCP + if(intdata2 & B_INT_AUTH_DONE) + { + ErrorF("interrupt Authenticate Done.\n") ; + HDMITX_OrREG_Byte(REG_TX_INT_MASK2,B_T_AUTH_DONE_MASK) ; + Instance[0].bAuthenticated = TRUE ; + SetAVMute(FALSE) ; + } + + if(intdata2 & B_INT_AUTH_FAIL) + { + ErrorF("interrupt Authenticate Fail.\n") ; + AbortDDC(); // @emily add + HDCP_ResumeAuthentication() ; + } + #endif // SUPPORT_HDCP + + intdata3 = HDMITX_ReadI2C_Byte(REG_TX_INT_STAT3) ; // reg 0x08 + if(intdata3 & B_INT_VIDSTABLE) + { + sysstat = HDMITX_ReadI2C_Byte(REG_TX_SYS_STATUS) ; + if(sysstat & B_TXVIDSTABLE) + { + FireAFE() ; + } + } + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR0,0xFF) ; + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR1,0xFF) ; + intclr3 = (HDMITX_ReadI2C_Byte(REG_TX_SYS_STATUS))|B_CLR_AUD_CTS | B_INTACTDONE ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,intclr3) ; // clear interrupt. + intclr3 &= ~(B_INTACTDONE) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,intclr3) ; // INTACTDONE reset to zero. + } + else + { + if(pHPDChange) + { + *pHPDChange = (HPD != PrevHPD)?TRUE:FALSE ; + + if(*pHPDChange &&(!HPD)) + { + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,B_AFE_DRV_RST|B_AFE_DRV_PWD) ; + } + } + } + + SetupAudioChannel() ; // 2007/12/12 added by jj_tseng + + if(pHPD) + { + *pHPD = HPD ? TRUE:FALSE ; + } + + Instance[0].bHPD = (BYTE)HPD ; + return HPD ; +} + +void +DisableIT6613() +{ + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,B_AREF_RST|B_VID_RST|B_AUD_RST|B_HDCP_RST) ; + DelayMS(1) ; + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,B_AFE_DRV_RST|B_AFE_DRV_PWD) ; +} + +void +DisableVideoOutput() +{ + BYTE uc = HDMITX_ReadI2C_Byte(REG_TX_SW_RST) | B_VID_RST ; + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,uc) ; + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,B_AFE_DRV_RST|B_AFE_DRV_PWD) ; +} + + +void +DisableAudioOutput() +{ + BYTE uc = HDMITX_ReadI2C_Byte(REG_TX_SW_RST) | B_AUD_RST ; + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,uc) ; +} + + + +BOOL +EnableAVIInfoFrame(BYTE bEnable,BYTE *pAVIInfoFrame) +{ + if(!bEnable) + { + DISABLE_AVI_INFOFRM_PKT() ; + return TRUE ; + } + + if(SetAVIInfoFrame((AVI_InfoFrame *)pAVIInfoFrame) == ER_SUCCESS) + { + return TRUE ; + } + + return FALSE ; +} + +BOOL +EnableAudioInfoFrame(BYTE bEnable,BYTE *pAudioInfoFrame) +{ + if(!bEnable) + { + // richard modify, DISABLE_AVI_INFOFRM_PKT() ; + DISABLE_AUD_INFOFRM_PKT(); + return TRUE ; + } + + + if(SetAudioInfoFrame((Audio_InfoFrame *)pAudioInfoFrame) == ER_SUCCESS) + { + return TRUE ; + } + + return FALSE ; +} + +void +SetAVMute(BYTE bEnable) +{ + BYTE uc ; + + Switch_HDMITX_Bank(0) ; + uc = HDMITX_ReadI2C_Byte(REG_TX_GCP) ; + uc &= ~B_TX_SETAVMUTE ; + uc |= bEnable?B_TX_SETAVMUTE:0 ; + HDMITX_WriteI2C_Byte(REG_TX_GCP,uc) ; + HDMITX_WriteI2C_Byte(REG_TX_PKT_GENERAL_CTRL,B_ENABLE_PKT|B_REPEAT_PKT) ; +} + +void +SetOutputColorDepthPhase(BYTE ColorDepth,BYTE bPhase) +{ + BYTE uc ; + BYTE bColorDepth ; + + if(ColorDepth == 30) + { + bColorDepth = B_CD_30 ; + } + else if (ColorDepth == 36) + { + bColorDepth = B_CD_36 ; + } + else if (ColorDepth == 24) + { + bColorDepth = B_CD_24 ; + } + else + { + bColorDepth = 0 ; // not indicated + } + + Switch_HDMITX_Bank(0) ; + uc = HDMITX_ReadI2C_Byte(REG_TX_GCP) ; + uc &= ~B_COLOR_DEPTH_MASK ; + uc |= bColorDepth&B_COLOR_DEPTH_MASK; + HDMITX_WriteI2C_Byte(REG_TX_GCP,uc) ; +} + +void +Get6613Reg(BYTE *pReg) +{ + int i ; + BYTE reg ; + Switch_HDMITX_Bank(0) ; + for(i = 0 ; i < 0x100 ; i++) + { + reg = i & 0xFF ; + pReg[i] = HDMITX_ReadI2C_Byte(reg) ; + } + Switch_HDMITX_Bank(1) ; + for(reg = 0x30 ; reg < 0xB0 ; i++,reg++) + { + pReg[i] = HDMITX_ReadI2C_Byte(reg) ; + } + Switch_HDMITX_Bank(0) ; + +} +////////////////////////////////////////////////////////////////////// +// SubProcedure process // +////////////////////////////////////////////////////////////////////// +#ifdef SUPPORT_DEGEN + +typedef struct { + MODE_ID id ; + BYTE Reg90; + BYTE Reg92; + BYTE Reg93; + BYTE Reg94; + BYTE Reg9A; + BYTE Reg9B; + BYTE Reg9C; + BYTE Reg9D; + BYTE Reg9E; + BYTE Reg9F; +} DEGEN_Setting ; + + +static _CODE DEGEN_Setting DeGen_Table[] = { + {CEA_640x480p60 ,0x01,0x8E,0x0E,0x30,0x22,0x02,0x20,0xFF,0xFF,0xFF}, + // HDES = 142, HDEE = 782, VDES = 34, VDEE = 514 + {CEA_720x480p60 ,0x01,0x78,0x48,0x30,0x23,0x03,0x20,0xFF,0xFF,0xFF}, + // HDES = 120, HDEE = 840, VDES = 35, VDEE = 515 + {CEA_1280x720p60 ,0x07,0x02,0x02,0x61,0x18,0xE8,0x20,0xFF,0xFF,0xFF}, + // HDES = 258, HDEE = 1538, VDES = 24, VDEE = 744 +// {CEA_1920x1080i60 ,0x07,0xBE,0x3E,0x80,0x13,0x2F,0x20,0x45,0x61,0x42}, +// // HDES = 190, HDEE = 2110, VDES = 19, VDEE = 559, VDS2 = 581, VDE2 = 1121 + {CEA_1920x1080i60 ,0x07,0xBE,0x3E,0x80,0x13,0x2F,0x20,0x46,0x62,0x42}, + // HDES = 190, HDEE = 2110, VDES = 19, VDEE = 559, VDS2 = 582, VDE2 = 1122 + + {CEA_720x480i60 ,0x01,0x75,0x45,0x30,0x11,0x01,0x10,0x17,0x07,0x21}, + // HDES = 117, HDEE = 837, VDES = 17, VDEE = 257, VDS2 = 279, VDE2 = 519 + {CEA_720x240p60 ,0x01,0x75,0x45,0x30,0x11,0x01,0x10,0xFF,0xFF,0xFF}, + // HDES = 117, HDEE = 837, VDES = 17, VDEE = 257 + {CEA_1440x480i60 ,0x01,0xEC,0x8C,0x60,0x11,0x01,0x10,0x17,0x07,0x21}, + // HDES = 236, HDEE = 1676, VDES = 17, VDEE = 257, VDS2 = 279, VDE2 = 519 + {CEA_1440x240p60 ,0x01,0xEC,0x8C,0x60,0x11,0x01,0x10,0xFF,0xFF,0xFF}, + // HDES = 236, HDEE = 1676, VDES = 17, VDEE = 257 + {CEA_2880x480i60 ,0x01,0x16,0x56,0xD2,0x11,0x01,0x10,0x17,0x07,0x21}, + // HDES = 534, HDEE = 3414, VDES = 17, VDEE = 257, VDS2 = 279, VDE2 = 519 + {CEA_2880x240p60 ,0x01,0x16,0x56,0xD2,0x11,0x01,0x10,0xFF,0xFF,0xFF}, + // HDES = 534, HDEE = 3414, VDES = 17, VDEE = 257 + {CEA_1440x480p60 ,0x01,0xF2,0x92,0x60,0x23,0x03,0x20,0xFF,0xFF,0xFF}, + // HDES = 242, HDEE = 1682, VDES = 35, VDEE = 515 + {CEA_1920x1080p60 ,0x07,0xBE,0x3E,0x80,0x28,0x60,0x40,0xFF,0xFF,0xFF}, + // HDES = 190, HDEE = 2110, VDES = 40, VDEE = 1120 + {CEA_720x576p50 ,0x01,0x82,0x52,0x30,0x2b,0x6b,0x20,0xFF,0xFF,0xFF}, + // HDES = 130, HDEE = 850, VDES = 43, VDEE = 619 + {CEA_1280x720p50 ,0x07,0x02,0x02,0x61,0x18,0xE8,0x20,0xFF,0xFF,0xFF}, + // HDES = 258, HDEE = 1538, VDES = 24, VDEE = 744 + {CEA_1920x1080i50 ,0x07,0xBE,0x3E,0x80,0x13,0x2F,0x20,0x46,0x62,0x42}, + // HDES = 190, HDEE = 2110, VDES = 19, VDEE = 559, VDS2 = 582, VDE2 = 1122 + {CEA_720x576i50 ,0x01,0x82,0x52,0x30,0x15,0x35,0x10,0x4D,0x6D,0x21}, + // HDES = 130, HDEE = 850, VDES = 21, VDEE = 309, VDS2 = 333, VDE2 = 621 + {CEA_1440x576i50 ,0x01,0x06,0xA6,0x61,0x15,0x35,0x10,0x4D,0x6D,0x21}, + // HDES = 262, HDEE = 1702, VDES = 21, VDEE = 309, VDS2 = 333, VDE2 = 621 + {CEA_720x288p50 ,0x01,0x82,0x52,0x30,0x15,0x35,0x10,0xFF,0xFF,0xFF}, + // HDES = 130, HDEE = 850, VDES = 21, VDEE = 309 + {CEA_1440x288p50 ,0x01,0x06,0xA6,0x61,0x15,0x35,0x10,0xFF,0xFF,0xFF}, + // HDES = 262, HDEE = 1702, VDES = 21, VDEE = 309 + {CEA_2880x576i50 ,0x01,0x0E,0x4E,0xD2,0x15,0x35,0x10,0x4D,0x6D,0x21}, + // HDES = 526, HDEE = 3406, VDES = 21, VDEE = 309, VDS2 = 333, VDE2 = 621 + {CEA_2880x288p50 ,0x01,0x0E,0x4E,0xD2,0x15,0x35,0x10,0xFF,0xFF,0xFF}, + // HDES = 526, HDEE = 3406, VDES = 21, VDEE = 309 + {CEA_1440x576p50 ,0x05,0x06,0xA6,0x61,0x2B,0x6B,0x20,0xFF,0xFF,0xFF}, + // HDES = 262, HDEE = 1702, VDES = 43, VDEE = 619 + {CEA_1920x1080p50 ,0x07,0xBE,0x3E,0x80,0x28,0x60,0x40,0xFF,0xFF,0xFF}, + // HDES = 190, HDEE = 2110, VDES = 40, VDEE = 1120 + {CEA_1920x1080p24 ,0x07,0xBE,0x3E,0x80,0x28,0x60,0x40,0xFF,0xFF,0xFF}, + // HDES = 190, HDEE = 2110, VDES = 40, VDEE = 1120 + {CEA_1920x1080p25 ,0x07,0xBE,0x3E,0x80,0x28,0x60,0x40,0xFF,0xFF,0xFF}, + // HDES = 190, HDEE = 2110, VDES = 40, VDEE = 1120 + {CEA_1920x1080p30 ,0x07,0xBE,0x3E,0x80,0x28,0x60,0x40,0xFF,0xFF,0xFF}, + // HDES = 190, HDEE = 2110, VDES = 40, VDEE = 1120 + {VESA_640x350p85 ,0x03,0x9E,0x1E,0x30,0x3E,0x9C,0x10,0xFF,0xFF,0xFF}, + // HDES = 158, HDEE = 798, VDES = 62, VDEE = 412 + {VESA_640x400p85 ,0x05,0x9E,0x1E,0x30,0x2B,0xBB,0x10,0xFF,0xFF,0xFF}, + // HDES = 158, HDEE = 798, VDES = 43, VDEE = 443 + {VESA_720x400p85 ,0x05,0xB2,0x82,0x30,0x2C,0xBC,0x10,0xFF,0xFF,0xFF}, + // HDES = 178, HDEE = 898, VDES = 44, VDEE = 444 + {VESA_640x480p60 ,0x01,0x8E,0x0E,0x30,0x22,0x02,0x20,0xFF,0xFF,0xFF}, + // HDES = 142, HDEE = 782, VDES = 34, VDEE = 514 + {VESA_640x480p72 ,0x01,0xA6,0x26,0x30,0x1E,0xFE,0x10,0xFF,0xFF,0xFF}, + // HDES = 166, HDEE = 806, VDES = 30, VDEE = 510 + {VESA_640x480p75 ,0x01,0xB6,0x36,0x30,0x12,0xF2,0x10,0xFF,0xFF,0xFF}, + // HDES = 182, HDEE = 822, VDES = 18, VDEE = 498 + {VESA_640x480p85 ,0x01,0x86,0x06,0x30,0x1B,0xFB,0x10,0xFF,0xFF,0xFF}, + // HDES = 134, HDEE = 774, VDES = 27, VDEE = 507 + {VESA_800x600p56 ,0x07,0xC6,0xE6,0x30,0x17,0x6F,0x20,0xFF,0xFF,0xFF}, + // HDES = 198, HDEE = 998, VDES = 23, VDEE = 623 + {VESA_800x600p60 ,0x07,0xD6,0xF6,0x30,0x1A,0x72,0x20,0xFF,0xFF,0xFF}, + // HDES = 214, HDEE = 1014, VDES = 26, VDEE = 626 + {VESA_800x600p72 ,0x07,0xB6,0xD6,0x30,0x1C,0x74,0x20,0xFF,0xFF,0xFF}, + // HDES = 182, HDEE = 982, VDES = 28, VDEE = 628 + {VESA_800x600p75 ,0x07,0xEE,0x0E,0x40,0x17,0x6F,0x20,0xFF,0xFF,0xFF}, + // HDES = 238, HDEE = 1038, VDES = 23, VDEE = 623 + {VESA_800X600p85 ,0x07,0xD6,0xF6,0x30,0x1D,0x75,0x20,0xFF,0xFF,0xFF}, + // HDES = 214, HDEE = 1014, VDES = 29, VDEE = 629 + {VESA_840X480p60 ,0x07,0xDE,0x2E,0x40,0x1E,0xFE,0x10,0xFF,0xFF,0xFF}, + // HDES = 222, HDEE = 1070, VDES = 30, VDEE = 510 + {VESA_1024x768p60 ,0x01,0x26,0x26,0x51,0x22,0x22,0x30,0xFF,0xFF,0xFF}, + // HDES = 294, HDEE = 1318, VDES = 34, VDEE = 802 + {VESA_1024x768p70 ,0x01,0x16,0x16,0x51,0x22,0x22,0x30,0xFF,0xFF,0xFF}, + // HDES = 278, HDEE = 1302, VDES = 34, VDEE = 802 + {VESA_1024x768p75 ,0x07,0x0E,0x0E,0x51,0x1E,0x1E,0x30,0xFF,0xFF,0xFF}, + // HDES = 270, HDEE = 1294, VDES = 30, VDEE = 798 + {VESA_1024x768p85 ,0x07,0x2E,0x2E,0x51,0x26,0x26,0x30,0xFF,0xFF,0xFF}, + // HDES = 302, HDEE = 1326, VDES = 38, VDEE = 806 + {VESA_1152x864p75 ,0x07,0x7E,0xFE,0x51,0x22,0x82,0x30,0xFF,0xFF,0xFF}, + // HDES = 382, HDEE = 1534, VDES = 34, VDEE = 898 + {VESA_1280x768p60R ,0x03,0x6E,0x6E,0x50,0x12,0x12,0x30,0xFF,0xFF,0xFF}, + // HDES = 110, HDEE = 1390, VDES = 18, VDEE = 786 + {VESA_1280x768p60 ,0x05,0x3E,0x3E,0x61,0x1A,0x1A,0x30,0xFF,0xFF,0xFF}, + // HDES = 318, HDEE = 1598, VDES = 26, VDEE = 794 + {VESA_1280x768p75 ,0x05,0x4E,0x4E,0x61,0x21,0x21,0x30,0xFF,0xFF,0xFF}, + // HDES = 334, HDEE = 1614, VDES = 33, VDEE = 801 + {VESA_1280x768p85 ,0x05,0x5E,0x5E,0x61,0x25,0x25,0x30,0xFF,0xFF,0xFF}, + // HDES = 350, HDEE = 1630, VDES = 37, VDEE = 805 + {VESA_1280x960p60 ,0x07,0xA6,0xA6,0x61,0x26,0xE6,0x30,0xFF,0xFF,0xFF}, + // HDES = 422, HDEE = 1702, VDES = 38, VDEE = 998 + {VESA_1280x960p85 ,0x07,0x7E,0x7E,0x61,0x31,0xF1,0x30,0xFF,0xFF,0xFF}, + // HDES = 382, HDEE = 1662, VDES = 49, VDEE = 1009 + {VESA_1280x1024p60 ,0x07,0x66,0x66,0x61,0x28,0x28,0x40,0xFF,0xFF,0xFF}, + // HDES = 358, HDEE = 1638, VDES = 40, VDEE = 1064 + {VESA_1280x1024p75 ,0x07,0x86,0x86,0x61,0x28,0x28,0x40,0xFF,0xFF,0xFF}, + // HDES = 390, HDEE = 1670, VDES = 40, VDEE = 1064 + {VESA_1280X1024p85 ,0x07,0x7E,0x7E,0x61,0x2E,0x2E,0x40,0xFF,0xFF,0xFF}, + // HDES = 382, HDEE = 1662, VDES = 46, VDEE = 1070 + {VESA_1360X768p60 ,0x07,0x6E,0xBE,0x61,0x17,0x17,0x30,0xFF,0xFF,0xFF}, + // HDES = 366, HDEE = 1726, VDES = 23, VDEE = 791 + {VESA_1400x768p60R ,0x03,0x6E,0xE6,0x50,0x1A,0x34,0x40,0xFF,0xFF,0xFF}, + // HDES = 110, HDEE = 1510, VDES = 26, VDEE = 1076 + {VESA_1400x768p60 ,0x05,0x76,0xEE,0x61,0x23,0x3D,0x40,0xFF,0xFF,0xFF}, + // HDES = 374, HDEE = 1774, VDES = 35, VDEE = 1085 + {VESA_1400x1050p75 ,0x05,0x86,0xFE,0x61,0x2D,0x47,0x40,0xFF,0xFF,0xFF}, + // HDES = 390, HDEE = 1790, VDES = 45, VDEE = 1095 + {VESA_1400x1050p85 ,0x05,0x96,0x0E,0x71,0x33,0x4D,0x40,0xFF,0xFF,0xFF}, + // HDES = 406, HDEE = 1806, VDES = 51, VDEE = 1101 + {VESA_1440x900p60R ,0x03,0x6E,0x0E,0x60,0x16,0x9A,0x30,0xFF,0xFF,0xFF}, + // HDES = 110, HDEE = 1550, VDES = 22, VDEE = 922 + {VESA_1440x900p60 ,0x05,0x7E,0x1E,0x71,0x1E,0xA2,0x30,0xFF,0xFF,0xFF}, + // HDES = 382, HDEE = 1822, VDES = 30, VDEE = 930 + {VESA_1440x900p75 ,0x05,0x8E,0x2E,0x71,0x26,0xAA,0x30,0xFF,0xFF,0xFF}, + // HDES = 398, HDEE = 1838, VDES = 38, VDEE = 938 + {VESA_1440x900p85 ,0x05,0x96,0x36,0x71,0x2C,0xB0,0x30,0xFF,0xFF,0xFF}, + // HDES = 406, HDEE = 1846, VDES = 44, VDEE = 944 + {VESA_1600x1200p60 ,0x07,0xEE,0x2E,0x81,0x30,0xE0,0x40,0xFF,0xFF,0xFF}, + // HDES = 494, HDEE = 2094, VDES = 48, VDEE = 1248 + {VESA_1600x1200p65 ,0x07,0xEE,0x2E,0x81,0x30,0xE0,0x40,0xFF,0xFF,0xFF}, + // HDES = 494, HDEE = 2094, VDES = 48, VDEE = 1248 + {VESA_1600x1200p70 ,0x07,0xEE,0x2E,0x81,0x30,0xE0,0x40,0xFF,0xFF,0xFF}, + // HDES = 494, HDEE = 2094, VDES = 48, VDEE = 1248 + {VESA_1600x1200p75 ,0x07,0xEE,0x2E,0x81,0x30,0xE0,0x40,0xFF,0xFF,0xFF}, + // HDES = 494, HDEE = 2094, VDES = 48, VDEE = 1248 + {VESA_1600x1200p85 ,0x07,0xEE,0x2E,0x81,0x30,0xE0,0x40,0xFF,0xFF,0xFF}, + // HDES = 494, HDEE = 2094, VDES = 48, VDEE = 1248 + {VESA_1680x1050p60R ,0x03,0x6E,0xFE,0x60,0x1A,0x34,0x40,0xFF,0xFF,0xFF}, + // HDES = 110, HDEE = 1790, VDES = 26, VDEE = 1076 + {VESA_1680x1050p60 ,0x05,0xC6,0x56,0x81,0x23,0x3D,0x40,0xFF,0xFF,0xFF}, + // HDES = 454, HDEE = 2134, VDES = 35, VDEE = 1085 + {VESA_1680x1050p75 ,0x05,0xD6,0x66,0x81,0x2D,0x47,0x40,0xFF,0xFF,0xFF}, + // HDES = 470, HDEE = 2150, VDES = 45, VDEE = 1095 + {VESA_1680x1050p85 ,0x05,0xDE,0x6E,0x81,0x33,0x4D,0x40,0xFF,0xFF,0xFF}, + // HDES = 478, HDEE = 2158, VDES = 51, VDEE = 1101 + {VESA_1792x1344p60 ,0x05,0x0E,0x0E,0x92,0x30,0x70,0x50,0xFF,0xFF,0xFF}, + // HDES = 526, HDEE = 2318, VDES = 48, VDEE = 1392 + {VESA_1792x1344p75 ,0x05,0x36,0x36,0x92,0x47,0x87,0x50,0xFF,0xFF,0xFF}, + // HDES = 566, HDEE = 2358, VDES = 71, VDEE = 1415 + {VESA_1856x1392p60 ,0x05,0x3E,0x7E,0x92,0x2D,0x9D,0x50,0xFF,0xFF,0xFF}, + // HDES = 574, HDEE = 2430, VDES = 45, VDEE = 1437 + {VESA_1856x1392p75 ,0x05,0x3E,0x7E,0x92,0x6A,0xDA,0x50,0xFF,0xFF,0xFF}, + // HDES = 574, HDEE = 2430, VDES = 106, VDEE = 1498 + {VESA_1920x1200p60R ,0x03,0x6E,0xEE,0x70,0x1F,0xCF,0x40,0xFF,0xFF,0xFF}, + // HDES = 110, HDEE = 2030, VDES = 31, VDEE = 1231 + {VESA_1920x1200p60 ,0x05,0x16,0x96,0x92,0x29,0xD9,0x40,0xFF,0xFF,0xFF}, + // HDES = 534, HDEE = 2454, VDES = 41, VDEE = 1241 + {VESA_1920x1200p75 ,0x05,0x26,0xA6,0x92,0x33,0xE3,0x40,0xFF,0xFF,0xFF}, + // HDES = 550, HDEE = 2470, VDES = 51, VDEE = 1251 + {VESA_1920x1200p85 ,0x05,0x2E,0xAE,0x92,0x3A,0xEA,0x40,0xFF,0xFF,0xFF}, + // HDES = 558, HDEE = 2478, VDES = 58, VDEE = 1258 + {VESA_1920x1440p60 ,0x05,0x26,0xA6,0x92,0x3A,0xDA,0x50,0xFF,0xFF,0xFF}, + // HDES = 550, HDEE = 2470, VDES = 58, VDEE = 1498 + {VESA_1920x1440p75 ,0x05,0x3E,0xBE,0x92,0x3A,0xDA,0x50,0xFF,0xFF,0xFF}, + // HDES = 574, HDEE = 2494, VDES = 58, VDEE = 1498 + {UNKNOWN_MODE,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF} +} ; + +BOOL ProgramDEGenModeByID(MODE_ID id,BYTE bInputSignalType) +{ + int i ; + if( (bInputSignalType & (T_MODE_DEGEN|T_MODE_SYNCGEN|T_MODE_SYNCEMB) )==(T_MODE_DEGEN)) + { + for( i = 0 ; DeGen_Table[i].id != UNKNOWN_MODE ; i++ ) + { + if( id == DeGen_Table[i].id ) break ; + } + if( DeGen_Table[i].id == UNKNOWN_MODE ) + { + return FALSE ; + } + + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(0x90,DeGen_Table[i].Reg90) ; + HDMITX_WriteI2C_Byte(0x92,DeGen_Table[i].Reg92) ; + HDMITX_WriteI2C_Byte(0x93,DeGen_Table[i].Reg93) ; + HDMITX_WriteI2C_Byte(0x94,DeGen_Table[i].Reg94) ; + HDMITX_WriteI2C_Byte(0x9A,DeGen_Table[i].Reg9A) ; + HDMITX_WriteI2C_Byte(0x9B,DeGen_Table[i].Reg9B) ; + HDMITX_WriteI2C_Byte(0x9C,DeGen_Table[i].Reg9C) ; + HDMITX_WriteI2C_Byte(0x9D,DeGen_Table[i].Reg9D) ; + HDMITX_WriteI2C_Byte(0x9E,DeGen_Table[i].Reg9E) ; + HDMITX_WriteI2C_Byte(0x9F,DeGen_Table[i].Reg9F) ; + return TRUE ; + + } + return FALSE ; +} + +#endif + +#ifdef SUPPORT_SYNCEMBEDDED +/* ****************************************************** */ +// sync embedded table setting,defined as comment. +/* ****************************************************** */ +struct SyncEmbeddedSetting { + BYTE fmt ; + BYTE RegHVPol ; // Reg90 + BYTE RegHfPixel ; // Reg91 + BYTE RegHSSL ; // Reg95 + BYTE RegHSEL ; // Reg96 + BYTE RegHSH ; // Reg97 + BYTE RegVSS1 ; // RegA0 + BYTE RegVSE1 ; // RegA1 + BYTE RegVSS2 ; // RegA2 + BYTE RegVSE2 ; // RegA3 + + ULONG PCLK ; + BYTE VFreq ; +} ; + +static _CODE struct SyncEmbeddedSetting SyncEmbTable[] = { + // {FMT,0x90,0x91, + // 0x95,0x96,0x97,0xA0,0xA1,0xA2,0xA3,PCLK,VFREQ}, + { 1,0xF0,0x31,0x0E,0x6E,0x00,0x0A,0xC0,0xFF,0xFF,25175000,60}, + { 2,0xF0,0x31,0x0E,0x4c,0x00,0x09,0xF0,0xFF,0xFF,27000000,60}, + { 3,0xF0,0x31,0x0E,0x4c,0x00,0x09,0xF0,0xFF,0xFF,27000000,60}, + { 4,0x76,0x33,0x6c,0x94,0x00,0x05,0xA0,0xFF,0xFF,74175000,60}, + { 5,0x26,0x4A,0x56,0x82,0x00,0x02,0x70,0x34,0x92,74175000,60}, + { 6,0xE0,0x1B,0x11,0x4F,0x00,0x04,0x70,0x0A,0xD1,27000000,60}, + { 7,0xE0,0x1B,0x11,0x4F,0x00,0x04,0x70,0x0A,0xD1,27000000,60}, + { 8,0x00,0xff,0x11,0x4F,0x00,0x04,0x70,0xFF,0xFF,27000000,60}, + { 9,0x00,0xff,0x11,0x4F,0x00,0x04,0x70,0xFF,0xFF,27000000,60}, + { 10,0xe0,0x1b,0x11,0x4F,0x00,0x04,0x70,0x0A,0xD1,54000000,60}, + { 11,0xe0,0x1b,0x11,0x4F,0x00,0x04,0x70,0x0A,0xD1,54000000,60}, + { 12,0x00,0xff,0x11,0x4F,0x00,0x04,0x70,0xFF,0xFF,54000000,60}, + { 13,0x00,0xff,0x11,0x4F,0x00,0x04,0x70,0xFF,0xFF,54000000,60}, + { 14,0x00,0xff,0x1e,0x9A,0x00,0x09,0xF0,0xFF,0xFF,54000000,60}, + { 15,0x00,0xff,0x1e,0x9A,0x00,0x09,0xF0,0xFF,0xFF,54000000,60}, + { 16,0x06,0xff,0x56,0x82,0x00,0x04,0x90,0xFF,0xFF,148350000,60}, + { 17,0x00,0xff,0x0a,0x4A,0x00,0x05,0xA0,0xFF,0xFF,27000000,50}, + { 18,0x00,0xff,0x0a,0x4A,0x00,0x05,0xA0,0xFF,0xFF,27000000,50}, + { 19,0x06,0xff,0xB6,0xDE,0x11,0x05,0xA0,0xFF,0xFF,74250000,50}, + { 20,0x66,0x73,0x0e,0x3A,0x22,0x02,0x70,0x34,0x92,74250000,50}, + { 21,0xA0,0x1B,0x0a,0x49,0x00,0x02,0x50,0x3A,0xD1,27000000,50}, + { 22,0xA0,0x1B,0x0a,0x49,0x00,0x02,0x50,0x3A,0xD1,27000000,50}, + { 23,0x00,0xff,0x0a,0x49,0x00,0x02,0x50,0xFF,0xFF,27000000,50}, + { 24,0x00,0xff,0x0a,0x49,0x00,0x02,0x50,0xFF,0xFF,27000000,50}, + { 25,0xA0,0x1B,0x0a,0x49,0x00,0x02,0x50,0x3A,0xD1,54000000,50}, + { 26,0xA0,0x1B,0x0a,0x49,0x00,0x02,0x50,0x3A,0xD1,54000000,50}, + { 27,0x00,0xff,0x0a,0x49,0x00,0x02,0x50,0xFF,0xFF,54000000,50}, + { 28,0x00,0xff,0x0a,0x49,0x00,0x02,0x50,0xFF,0xFF,54000000,50}, + { 29,0x04,0xff,0x16,0x96,0x00,0x05,0xA0,0xFF,0xFF,54000000,50}, + { 30,0x04,0xff,0x16,0x96,0x00,0x05,0xA0,0xFF,0xFF,54000000,50}, + { 31,0x06,0xff,0x0e,0x3a,0x22,0x04,0x90,0xFF,0xFF,148500000,50}, + {0xFF,0xFF,0xff,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0,0} +} ; + +BOOL +ProgramSyncEmbeddedVideoMode(BYTE VIC,BYTE bInputSignalType) +{ + int i ; + // if Embedded Video,need to generate timing with pattern register + + ErrorF("ProgramSyncEmbeddedVideoMode(%d,%x)\n",VIC,bInputSignalType) ; + + if( bInputSignalType & T_MODE_SYNCEMB ) + { + for(i = 0 ; SyncEmbTable[i].fmt != 0xFF ; i++) + { + if(VIC == SyncEmbTable[i].fmt) + { + break ; + } + } + + if(SyncEmbTable[i].fmt == 0xFF) + { + return FALSE ; + } + + HDMITX_WriteI2C_Byte(REG_TX_HVPol,SyncEmbTable[i].RegHVPol) ; // Reg90 + HDMITX_WriteI2C_Byte(REG_TX_HfPixel,SyncEmbTable[i].RegHfPixel) ; // Reg91 + + HDMITX_WriteI2C_Byte(REG_TX_HSSL,SyncEmbTable[i].RegHSSL) ; // Reg95 + HDMITX_WriteI2C_Byte(REG_TX_HSEL,SyncEmbTable[i].RegHSEL) ; // Reg96 + HDMITX_WriteI2C_Byte(REG_TX_HSH,SyncEmbTable[i].RegHSH) ; // Reg97 + HDMITX_WriteI2C_Byte(REG_TX_VSS1,SyncEmbTable[i].RegVSS1) ; // RegA0 + HDMITX_WriteI2C_Byte(REG_TX_VSE1,SyncEmbTable[i].RegVSE1) ; // RegA1 + + HDMITX_WriteI2C_Byte(REG_TX_VSS2,SyncEmbTable[i].RegVSS2) ; // RegA2 + HDMITX_WriteI2C_Byte(REG_TX_VSE2,SyncEmbTable[i].RegVSE2) ; // RegA3 + } + + return TRUE ; +} +#endif // SUPPORT_SYNCEMBEDDED + +//~jj_tseng@chipadvanced.com 2007/01/02 + + +////////////////////////////////////////////////////////////////////// +// Function: SetInputMode +// Parameter: InputMode,bInputSignalType +// InputMode - use [1:0] to identify the color space for reg70[7:6], +// definition: +// #define F_MODE_RGB444 0 +// #define F_MODE_YUV422 1 +// #define F_MODE_YUV444 2 +// #define F_MODE_CLRMOD_MASK 3 +// bInputSignalType - defined the CCIR656 D[0],SYNC Embedded D[1],and +// DDR input in D[2]. +// Return: N/A +// Remark: program Reg70 with the input value. +// Side-Effect: Reg70. +////////////////////////////////////////////////////////////////////// + +static void +SetInputMode(BYTE InputMode,BYTE bInputSignalType) +{ + BYTE ucData ; + + OS_PRINTF("SetInputMode(%02X,%02X)\n",InputMode,bInputSignalType) ; + + ucData = HDMITX_ReadI2C_Byte(REG_TX_INPUT_MODE) ; + + ucData &= ~(M_INCOLMOD|B_2X656CLK|B_SYNCEMB|B_INDDR|B_PCLKDIV2) ; + + switch(InputMode & F_MODE_CLRMOD_MASK) + { + case F_MODE_YUV422: + ucData |= B_IN_YUV422 ; + break ; + case F_MODE_YUV444: + ucData |= B_IN_YUV444 ; + break ; + case F_MODE_RGB444: + default: + ucData |= B_IN_RGB ; + break ; + } + + if(bInputSignalType & T_MODE_PCLKDIV2) + { + ucData |= B_PCLKDIV2 ; OS_PRINTF("PCLK Divided by 2 mode\n") ; + } + if(bInputSignalType & T_MODE_CCIR656) + { + ucData |= B_2X656CLK ; OS_PRINTF("CCIR656 mode\n") ; + } + + if(bInputSignalType & T_MODE_SYNCEMB) + { + ucData |= B_SYNCEMB ; OS_PRINTF("Sync Embedded mode\n") ; + } + + if(bInputSignalType & T_MODE_INDDR) + { + ucData |= B_INDDR ; OS_PRINTF("Input DDR mode\n") ; + } + + HDMITX_WriteI2C_Byte(REG_TX_INPUT_MODE,ucData) ; +} + +////////////////////////////////////////////////////////////////////// +// Function: SetCSCScale +// Parameter: bInputMode - +// D[1:0] - Color Mode +// D[4] - Colorimetry 0: ITU_BT601 1: ITU_BT709 +// D[5] - Quantization 0: 0_255 1: 16_235 +// D[6] - Up/Dn Filter 'Required' +// 0: no up/down filter +// 1: enable up/down filter when csc need. +// D[7] - Dither Filter 'Required' +// 0: no dither enabled. +// 1: enable dither and dither free go "when required". +// bOutputMode - +// D[1:0] - Color mode. +// Return: N/A +// Remark: reg72~reg8D will be programmed depended the input with table. +// Side-Effect: +////////////////////////////////////////////////////////////////////// + +static void +SetCSCScale(BYTE bInputMode,BYTE bOutputMode) +{ + BYTE ucData,csc = -1; + BYTE filter = 0 ; // filter is for Video CTRL DN_FREE_GO,EN_DITHER,and ENUDFILT + + + // (1) YUV422 in,RGB/YUV444 output (Output is 8-bit,input is 12-bit) + // (2) YUV444/422 in,RGB output (CSC enable,and output is not YUV422) + // (3) RGB in,YUV444 output (CSC enable,and output is not YUV422) + // + // YUV444/RGB24 <-> YUV422 need set up/down filter. + + switch(bInputMode&F_MODE_CLRMOD_MASK) + { + #ifdef SUPPORT_INPUTYUV444 + case F_MODE_YUV444: + OS_PRINTF("Input mode is YUV444 ") ; + switch(bOutputMode&F_MODE_CLRMOD_MASK) + { + case F_MODE_YUV444: + OS_PRINTF("Output mode is YUV444\n") ; + csc = B_CSC_BYPASS ; + break ; + + case F_MODE_YUV422: + OS_PRINTF("Output mode is YUV422\n") ; + if(bInputMode & F_MODE_EN_UDFILT) // YUV444 to YUV422 need up/down filter for processing. + { + filter |= B_TX_EN_UDFILTER ; + } + csc = B_CSC_BYPASS ; + break ; + case F_MODE_RGB444: + OS_PRINTF("Output mode is RGB24\n") ; + csc = B_CSC_YUV2RGB ; + if(bInputMode & F_MODE_EN_DITHER) // YUV444 to RGB24 need dither + { + filter |= B_TX_EN_DITHER | B_TX_DNFREE_GO ; + } + + break ; + } + break ; + #endif + + #ifdef SUPPORT_INPUTYUV422 + case F_MODE_YUV422: + OS_PRINTF("Input mode is YUV422\n") ; + switch(bOutputMode&F_MODE_CLRMOD_MASK) + { + case F_MODE_YUV444: + OS_PRINTF("Output mode is YUV444\n") ; + csc = B_CSC_BYPASS ; + if(bInputMode & F_MODE_EN_UDFILT) // YUV422 to YUV444 need up filter + { + filter |= B_TX_EN_UDFILTER ; + } + + if(bInputMode & F_MODE_EN_DITHER) // YUV422 to YUV444 need dither + { + filter |= B_TX_EN_DITHER | B_TX_DNFREE_GO ; + } + + break ; + case F_MODE_YUV422: + OS_PRINTF("Output mode is YUV422\n") ; + csc = B_CSC_BYPASS ; + + break ; + + case F_MODE_RGB444: + OS_PRINTF("Output mode is RGB24\n") ; + csc = B_CSC_YUV2RGB ; + if(bInputMode & F_MODE_EN_UDFILT) // YUV422 to RGB24 need up/dn filter. + { + filter |= B_TX_EN_UDFILTER ; + } + + if(bInputMode & F_MODE_EN_DITHER) // YUV422 to RGB24 need dither + { + filter |= B_TX_EN_DITHER | B_TX_DNFREE_GO ; + } + + break ; + } + break ; + #endif + + #ifdef SUPPORT_INPUTRGB + case F_MODE_RGB444: + OS_PRINTF("Input mode is RGB24\n") ; + switch(bOutputMode&F_MODE_CLRMOD_MASK) + { + case F_MODE_YUV444: + OS_PRINTF("Output mode is YUV444\n") ; + csc = B_CSC_RGB2YUV ; + + if(bInputMode & F_MODE_EN_DITHER) // RGB24 to YUV444 need dither + { + filter |= B_TX_EN_DITHER | B_TX_DNFREE_GO ; + } + break ; + + case F_MODE_YUV422: + OS_PRINTF("Output mode is YUV422\n") ; + if(bInputMode & F_MODE_EN_UDFILT) // RGB24 to YUV422 need down filter. + { + filter |= B_TX_EN_UDFILTER ; + } + + if(bInputMode & F_MODE_EN_DITHER) // RGB24 to YUV422 need dither + { + filter |= B_TX_EN_DITHER | B_TX_DNFREE_GO ; + } + csc = B_CSC_RGB2YUV ; + break ; + + case F_MODE_RGB444: + OS_PRINTF("Output mode is RGB24\n") ; + csc = B_CSC_BYPASS ; + break ; + } + break ; + #endif + } + + #ifdef SUPPORT_INPUTRGB + // set the CSC metrix registers by colorimetry and quantization + if(csc == B_CSC_RGB2YUV) + { + OS_PRINTF("CSC = RGB2YUV %x ",csc) ; + switch(bInputMode&(F_MODE_ITU709|F_MODE_16_235)) + { + case F_MODE_ITU709|F_MODE_16_235: + OS_PRINTF("ITU709 16-235 ") ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_16_235,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_RGB2YUV_ITU709_16_235,SIZEOF_CSCMTX) ; + break ; + case F_MODE_ITU709|F_MODE_0_255: + OS_PRINTF("ITU709 0-255 ") ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_0_255,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_RGB2YUV_ITU709_0_255,SIZEOF_CSCMTX) ; + break ; + case F_MODE_ITU601|F_MODE_16_235: + OS_PRINTF("ITU601 16-235 ") ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_16_235,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_RGB2YUV_ITU601_16_235,SIZEOF_CSCMTX) ; + break ; + case F_MODE_ITU601|F_MODE_0_255: + default: + OS_PRINTF("ITU601 0-255 ") ; + + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_0_255,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_RGB2YUV_ITU601_0_255,SIZEOF_CSCMTX) ; + break ; + } + + } + #endif + + #ifdef SUPPORT_INPUTYUV + if (csc == B_CSC_YUV2RGB) + { +// int i; + OS_PRINTF("CSC = YUV2RGB %x ",csc) ; + + switch(bInputMode&(F_MODE_ITU709|F_MODE_16_235)) + { + case F_MODE_ITU709|F_MODE_16_235: + OS_PRINTF("ITU709 16-235 ") ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_16_235,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_YUV2RGB_ITU709_16_235,SIZEOF_CSCMTX) ; + break ; + case F_MODE_ITU709|F_MODE_0_255: + OS_PRINTF("ITU709 0-255 ") ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_0_255,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_YUV2RGB_ITU709_0_255,SIZEOF_CSCMTX) ; + break ; + case F_MODE_ITU601|F_MODE_16_235: + OS_PRINTF("ITU601 16-235 ") ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_16_235,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_YUV2RGB_ITU601_16_235,SIZEOF_CSCMTX) ; + break ; + case F_MODE_ITU601|F_MODE_0_255: + default: + //????? debug + OS_PRINTF("ITU601 0-255 ") ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_YOFF,bCSCOffset_0_255,SIZEOF_CSCOFFSET) ; + HDMITX_WriteI2C_ByteN(REG_TX_CSC_MTX11_L,bCSCMtx_YUV2RGB_ITU601_0_255,SIZEOF_CSCMTX) ; + break ; + } + } + #endif + + ucData = HDMITX_ReadI2C_Byte(REG_TX_CSC_CTRL) & ~(M_CSC_SEL|B_TX_DNFREE_GO|B_TX_EN_DITHER|B_TX_EN_UDFILTER) ; + ucData |= filter|csc ; + HDMITX_WriteI2C_Byte(REG_TX_CSC_CTRL,ucData) ; + // set output Up/Down Filter,Dither control + +} + + +////////////////////////////////////////////////////////////////////// +// Function: SetupAFE +// Parameter: VIDEOPCLKLEVEL level +// PCLK_LOW - for 13.5MHz (for mode less than 1080p) +// PCLK MEDIUM - for 25MHz~74MHz +// PCLK HIGH - PCLK > 80Hz (for 1080p mode or above) +// Return: N/A +// Remark: set reg62~reg65 depended on HighFreqMode +// reg61 have to be programmed at last and after video stable input. +// Side-Effect: +////////////////////////////////////////////////////////////////////// + +static void +// SetupAFE(BYTE ucFreqInMHz) +SetupAFE(VIDEOPCLKLEVEL level) +{ + // @emily turn off reg61 before SetupAFE parameters. + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,B_AFE_DRV_RST) ;/* 0x10 */ + // HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,0x3) ; + OS_PRINTF("SetupAFE()\n") ; + + //TMDS Clock < 80MHz TMDS Clock > 80MHz + //Reg61 0x03 0x03 + + //Reg62 0x18 0x88 + //Reg63 Default Default + //Reg64 0x08 0x80 + //Reg65 Default Default + //Reg66 Default Default + //Reg67 Default Default + + switch(level) + { + case PCLK_HIGH: + HDMITX_WriteI2C_Byte(REG_TX_AFE_XP_CTRL,0x88) ; // reg62 + HDMITX_WriteI2C_Byte(REG_TX_AFE_ISW_CTRL, 0x10) ; // reg63 + HDMITX_WriteI2C_Byte(REG_TX_AFE_IP_CTRL,0x84) ; // reg64 + break ; + default: + HDMITX_WriteI2C_Byte(REG_TX_AFE_XP_CTRL,0x18) ; // reg62 + HDMITX_WriteI2C_Byte(REG_TX_AFE_ISW_CTRL, 0x10) ; // reg63 + HDMITX_WriteI2C_Byte(REG_TX_AFE_IP_CTRL,0x0C) ; // reg64 + break ; + } + //HDMITX_AndREG_Byte(REG_TX_SW_RST,~(B_REF_RST|B_VID_RST|B_AREF_RST|B_HDMI_RST)) ; + DelayMS(1) ; + HDMITX_AndREG_Byte(REG_TX_SW_RST,B_VID_RST|B_AREF_RST|B_AUD_RST|B_HDCP_RST) ; + DelayMS(100) ; + HDMITX_AndREG_Byte(REG_TX_SW_RST, B_AREF_RST|B_AUD_RST|B_HDCP_RST) ; + // REG_TX_AFE_DRV_CTRL have to be set at the last step of setup . +} + + +////////////////////////////////////////////////////////////////////// +// Function: FireAFE +// Parameter: N/A +// Return: N/A +// Remark: write reg61 with 0x04 +// When program reg61 with 0x04,then audio and video circuit work. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// +static void +FireAFE() +{ + BYTE reg; + Switch_HDMITX_Bank(0) ; + + HDMITX_WriteI2C_Byte(REG_TX_AFE_DRV_CTRL,0) ; + + for(reg = 0x61 ; reg <= 0x67 ; reg++) + { + OS_PRINTF("Reg[%02X] = %02X\n",reg,HDMITX_ReadI2C_Byte(reg)) ; + } +} + +////////////////////////////////////////////////////////////////////// +// Audio Output +////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////// +// Function: SetAudioFormat +// Parameter: +// NumChannel - number of channel,from 1 to 8 +// AudioEnable - Audio source and type bit field,value of bit field are +// ENABLE_SPDIF (1<<4) +// ENABLE_I2S_SRC3 (1<<3) +// ENABLE_I2S_SRC2 (1<<2) +// ENABLE_I2S_SRC1 (1<<1) +// ENABLE_I2S_SRC0 (1<<0) +// SampleFreq - the audio sample frequence in Hz +// AudSWL - Audio sample width,only support 16,18,20,or 24. +// AudioCatCode - The audio channel catalogy code defined in IEC 60958-3 +// Return: ER_SUCCESS if done,ER_FAIL for otherwise. +// Remark: program audio channel control register and audio channel registers +// to enable audio by input. +// Side-Effect: register bank will keep in bank zero. +////////////////////////////////////////////////////////////////////// + + +static SYS_STATUS +SetAudioFormat(BYTE NumChannel,BYTE AudioEnable,BYTE bSampleFreq,BYTE AudSWL,BYTE AudioCatCode) +{ + BYTE fs = bSampleFreq ; + BYTE SWL ; + + BYTE SourceValid ; + BYTE SoruceNum ; + + + OS_PRINTF("SetAudioFormat(%d channel,%02X,SampleFreq %d,AudSWL %d,%02X)\n",NumChannel,AudioEnable,bSampleFreq,AudSWL,AudioCatCode) ; + + +//richard remove Instance[0].bOutputAudioMode |= 0x41 ; + if(NumChannel > 6) + { + SourceValid = B_AUD_ERR2FLAT | B_AUD_S3VALID | B_AUD_S2VALID | B_AUD_S1VALID ; + SoruceNum = 4 ; + } + else if (NumChannel > 4) + { + SourceValid = B_AUD_ERR2FLAT | B_AUD_S2VALID | B_AUD_S1VALID ; + SoruceNum = 3 ; + } + else if (NumChannel > 2) + { + SourceValid = B_AUD_ERR2FLAT | B_AUD_S1VALID ; + SoruceNum = 2 ; + } + else + { + SourceValid = B_AUD_ERR2FLAT ; // only two channel. + SoruceNum = 1 ; + Instance[0].bOutputAudioMode &= ~0x40 ; + } + + AudioEnable &= ~ (M_AUD_SWL|B_SPDIFTC) ; + + switch(AudSWL) + { + case 16: + SWL = AUD_SWL_16 ; + AudioEnable |= M_AUD_16BIT ; + break ; + case 18: + SWL = AUD_SWL_18 ; + AudioEnable |= M_AUD_18BIT ; + break ; + case 20: + SWL = AUD_SWL_20 ; + AudioEnable |= M_AUD_20BIT ; + break ; + case 24: + SWL = AUD_SWL_24 ; + AudioEnable |= M_AUD_24BIT ; + break ; + default: + return ER_FAIL ; + } + + + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_CTRL0,AudioEnable&0xF0) ; + + HDMITX_AndREG_Byte(REG_TX_SW_RST,~(B_AUD_RST|B_AREF_RST)) ; + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_CTRL1,Instance[0].bOutputAudioMode) ; // regE1 bOutputAudioMode should be loaded from ROM image. + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_FIFOMAP,0xE4) ; // default mapping. + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_CTRL3,(Instance[0].bAudioChannelSwap&0xF)|(AudioEnable&B_AUD_SPDIF)) ; + HDMITX_WriteI2C_Byte(REG_TX_AUD_SRCVALID_FLAT,SourceValid) ; + + // suggested to be 0x41 + +// Switch_HDMITX_Bank(1) ; +// HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_MODE,0 |((NumChannel == 1)?1:0)) ; // 2 audio channel without pre-emphasis,if NumChannel set it as 1. +// HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_CAT,AudioCatCode) ; +// HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_SRCNUM,SoruceNum) ; +// HDMITX_WriteI2C_Byte(REG_TX_AUD0CHST_CHTNUM,0x21) ; +// HDMITX_WriteI2C_Byte(REG_TX_AUD1CHST_CHTNUM,0x43) ; +// HDMITX_WriteI2C_Byte(REG_TX_AUD2CHST_CHTNUM,0x65) ; +// HDMITX_WriteI2C_Byte(REG_TX_AUD3CHST_CHTNUM,0x87) ; +// HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_CA_FS,0x00|fs) ; // choose clock +// fs = ~fs ; // OFS is the one's complement of FS +// HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_OFS_WL,(fs<<4)|SWL) ; +// Switch_HDMITX_Bank(0) ; + + Switch_HDMITX_Bank(1) ; + HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_MODE,0 |((NumChannel == 1)?1:0)) ; // 2 audio channel without pre-emphasis,if NumChannel set it as 1. + HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_CAT,AudioCatCode) ; + HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_SRCNUM,SoruceNum) ; + HDMITX_WriteI2C_Byte(REG_TX_AUD0CHST_CHTNUM,0) ; + HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_CA_FS,0x00|fs) ; // choose clock + fs = ~fs ; // OFS is the one's complement of FS + HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_OFS_WL,(fs<<4)|SWL) ; + Switch_HDMITX_Bank(0) ; + + // richard modify (could be bug), if(!(AudioEnable | B_AUD_SPDIF)) + if(!(AudioEnable & B_AUD_SPDIF)) + { + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_CTRL0,AudioEnable) ; + } + + Instance[0].bAudioChannelEnable = AudioEnable ; + + // HDMITX_AndREG_Byte(REG_TX_SW_RST,B_AUD_RST) ; // enable Audio + return ER_SUCCESS; +} + + + +static void +AutoAdjustAudio() +{ + unsigned long SampleFreq ; + unsigned long N ; + unsigned long CTS ; + BYTE fs, uc ; + +// bPendingAdjustAudioFreq = TRUE ; + +// if( CAT6611_AudioChannelEnable & B_AUD_SPDIF ) +// { +// if(!(HDMITX_ReadI2C_Byte(REG_TX_CLK_STATUS2) & B_OSF_LOCK)) +// { +// return ; +// } +// } + + Switch_HDMITX_Bank(1) ; + + N = ((unsigned long)HDMITX_ReadI2C_Byte(REGPktAudN2)&0xF) << 16 ; + N |= ((unsigned long)HDMITX_ReadI2C_Byte(REGPktAudN1)) <<8 ; + N |= ((unsigned long)HDMITX_ReadI2C_Byte(REGPktAudN0)) ; + + CTS = ((unsigned long)HDMITX_ReadI2C_Byte(REGPktAudCTSCnt2)&0xF) << 16 ; + CTS |= ((unsigned long)HDMITX_ReadI2C_Byte(REGPktAudCTSCnt1)) <<8 ; + CTS |= ((unsigned long)HDMITX_ReadI2C_Byte(REGPktAudCTSCnt0)) ; + Switch_HDMITX_Bank(0) ; + + // CTS = TMDSCLK * N / ( 128 * SampleFreq ) + // SampleFreq = TMDSCLK * N / (128*CTS) + + if( CTS == 0 ) + { + return ; + } + + SampleFreq = Instance[0].TMDSClock/CTS ; + SampleFreq *= N ; + SampleFreq /= 128 ; + + if( SampleFreq>31000 && SampleFreq<=38050 ) + { + Instance[0].bAudFs = AUDFS_32KHz ; + fs = AUDFS_32KHz ;; + } + else if (SampleFreq < 46050 ) // 44.1KHz + { + Instance[0].bAudFs = AUDFS_44p1KHz ; + fs = AUDFS_44p1KHz ;; + } + else if (SampleFreq < 68100 ) // 48KHz + { + Instance[0].bAudFs = AUDFS_48KHz ; + fs = AUDFS_48KHz ;; + } + else if (SampleFreq < 92100 ) // 88.2 KHz + { + Instance[0].bAudFs = AUDFS_88p2KHz ; + fs = AUDFS_88p2KHz ;; + } + else if (SampleFreq < 136200 ) // 96KHz + { + Instance[0].bAudFs = AUDFS_96KHz ; + fs = AUDFS_96KHz ;; + } + else if (SampleFreq < 184200 ) // 176.4KHz + { + Instance[0].bAudFs = AUDFS_176p4KHz ; + fs = AUDFS_176p4KHz ;; + } + else if (SampleFreq < 240200 ) // 192KHz + { + Instance[0].bAudFs = AUDFS_192KHz ; + fs = AUDFS_192KHz ;; + } + else + { + Instance[0].bAudFs = AUDFS_OTHER; + fs = AUDFS_OTHER;; + } + +// bPendingAdjustAudioFreq = FALSE ; + + SetNCTS(Instance[0].TMDSClock, Instance[0].bAudFs) ; // set N, CTS by new generated clock. + + Switch_HDMITX_Bank(1) ; // adjust the new fs in channel status registers + HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_CA_FS,0x00|fs) ; // choose clock + fs = ~fs ; // OFS is the one's complement of FS + uc = HDMITX_ReadI2C_Byte(REG_TX_AUDCHST_OFS_WL) ; + uc &= 0xF ; + uc |= fs << 4 ; + HDMITX_WriteI2C_Byte(REG_TX_AUDCHST_OFS_WL,uc) ; + + Switch_HDMITX_Bank(0) ; + +} + +static void +SetupAudioChannel() +{ + static BYTE bEnableAudioChannel=FALSE ; + if( (HDMITX_ReadI2C_Byte(REG_TX_SW_RST) & (B_AUD_RST|B_AREF_RST)) == 0) // audio enabled + { + Switch_HDMITX_Bank(0) ; + + if((HDMITX_ReadI2C_Byte(REG_TX_AUDIO_CTRL0) & 0xf) == 0) + { + if(HDMITX_ReadI2C_Byte(REG_TX_CLK_STATUS2) & B_OSF_LOCK) + { + SetNCTS(Instance[0].TMDSClock, Instance[0].bAudFs) ; // to enable automatic progress setting for N/CTS + DelayMS(5); + AutoAdjustAudio() ; + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_CTRL0, Instance[0].bAudioChannelEnable) ; + bEnableAudioChannel=TRUE ; + } + } + else + { + if((HDMITX_ReadI2C_Byte(REG_TX_CLK_STATUS2) & B_OSF_LOCK)==0) + { + // AutoAdjustAudio() ; + // ForceSetNCTS(CurrentPCLK, CurrentSampleFreq) ; + if( bEnableAudioChannel == TRUE ) + { + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(REG_TX_AUDIO_CTRL0, Instance[0].bAudioChannelEnable&0xF0) ; + } + bEnableAudioChannel=FALSE ; + } + } + } +} +////////////////////////////////////////////////////////////////////// +// Function: SetNCTS +// Parameter: PCLK - video clock in Hz. +// Fs - audio sample frequency in Hz +// Return: ER_SUCCESS if success +// Remark: set N value,the CTS will be auto generated by HW. +// Side-Effect: register bank will reset to bank 0. +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +SetNCTS(ULONG PCLK,ULONG Fs) +{ + ULONG n,MCLK ; + + MCLK = Fs * 256 ; // MCLK = fs * 256 ; + + OS_PRINTF("SetNCTS(%ld,%ld): MCLK = %ld\n",PCLK,Fs,MCLK) ; + + if( PCLK ) + { + switch (Fs) { + case AUDFS_32KHz: + switch (PCLK) { + case 74175000: n = 11648; break; + case 14835000: n = 11648; break; + default: n = 4096; + } + break; + case AUDFS_44p1KHz: + switch (PCLK) { + case 74175000: n = 17836; break; + case 14835000: n = 8918; break; + default: n = 6272; + } + break; + case AUDFS_48KHz: + switch (PCLK) { + case 74175000: n = 11648; break; + case 14835000: n = 5824; break; + default: n = 6144; + } + break; + case AUDFS_88p2KHz: + switch (PCLK) { + case 74175000: n = 35672; break; + case 14835000: n = 17836; break; + default: n = 12544; + } + break; + case AUDFS_96KHz: + switch (PCLK) { + case 74175000: n = 23296; break; + case 14835000: n = 11648; break; + default: n = 12288; + } + break; + case AUDFS_176p4KHz: + switch (PCLK) { + case 74175000: n = 71344; break; + case 14835000: n = 35672; break; + default: n = 25088; + } + break; + case AUDFS_192KHz: + switch (PCLK) { + case 74175000: n = 46592; break; + case 14835000: n = 23296; break; + default: n = 24576; + } + break; + default: n = MCLK / 2000; + } + } + else + { + switch(Fs) + { + case AUDFS_32KHz: n = 4096; break; + case AUDFS_44p1KHz: n = 6272; break; + case AUDFS_48KHz: n = 6144; break; + case AUDFS_88p2KHz: n = 12544; break; + case AUDFS_96KHz: n = 12288; break; + case AUDFS_176p4KHz: n = 25088; break; + case AUDFS_192KHz: n = 24576; break; + default: n = 6144; + } + + } + + + OS_PRINTF("N = %ld\n",n) ; + Switch_HDMITX_Bank(1) ; + HDMITX_WriteI2C_Byte(REGPktAudN0,(BYTE)((n)&0xFF)) ; + HDMITX_WriteI2C_Byte(REGPktAudN1,(BYTE)((n>>8)&0xFF)) ; + HDMITX_WriteI2C_Byte(REGPktAudN2,(BYTE)((n>>16)&0xF)) ; + Switch_HDMITX_Bank(0) ; + + HDMITX_WriteI2C_Byte(REG_TX_PKT_SINGLE_CTRL,0) ; // D[1] = 0,HW auto count CTS + + HDMITX_SetREG_Byte(REG_TX_CLK_CTRL0,~M_EXT_MCLK_SEL,B_EXT_256FS) ; + return ER_SUCCESS ; +} + +////////////////////////////////////////////////////////////////////// +// DDC Function. +////////////////////////////////////////////////////////////////////// + + +////////////////////////////////////////////////////////////////////// +// Function: ClearDDCFIFO +// Parameter: N/A +// Return: N/A +// Remark: clear the DDC FIFO. +// Side-Effect: DDC master will set to be HOST. +////////////////////////////////////////////////////////////////////// + +static void +ClearDDCFIFO() +{ + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_FIFO_CLR) ; +} +#if 0 +static void +GenerateDDCSCLK() +{ + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_GEN_SCLCLK) ; +} +#endif +////////////////////////////////////////////////////////////////////// +// Function: AbortDDC +// Parameter: N/A +// Return: N/A +// Remark: Force abort DDC and reset DDC bus. +// Side-Effect: +////////////////////////////////////////////////////////////////////// + +static void +AbortDDC() +{ + BYTE CPDesire,SWReset,DDCMaster ; + BYTE uc, timeout ; + // save the SW reset,DDC master,and CP Desire setting. + SWReset = HDMITX_ReadI2C_Byte(REG_TX_SW_RST) ; + CPDesire = HDMITX_ReadI2C_Byte(REG_TX_HDCP_DESIRE) ; + DDCMaster = HDMITX_ReadI2C_Byte(REG_TX_DDC_MASTER_CTRL) ; + + + HDMITX_WriteI2C_Byte(REG_TX_HDCP_DESIRE,CPDesire&(~B_CPDESIRE)) ; // @emily change order + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,SWReset|B_HDCP_RST) ; // @emily change order + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_DDC_ABORT) ; + + for( timeout = 0 ; timeout < 200 ; timeout++ ) + { + uc = HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS) ; + if (uc&B_DDC_DONE) + { + break ; // success + } + + if( uc & (B_DDC_NOACK|B_DDC_WAITBUS|B_DDC_ARBILOSE) ) + { + ErrorF("AbortDDC Fail by reg16=%02X\n",uc) ; + break ; + } + DelayMS(1) ; // delay 1 ms to stable. + } + + // restore the SW reset,DDC master,and CP Desire setting. + HDMITX_WriteI2C_Byte(REG_TX_SW_RST,SWReset) ; + HDMITX_WriteI2C_Byte(REG_TX_HDCP_DESIRE,CPDesire) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,DDCMaster) ; +} + +////////////////////////////////////////////////////////////////////// +// Packet and InfoFrame +////////////////////////////////////////////////////////////////////// + +// //////////////////////////////////////////////////////////////////////////////// +// // Function: SetAVMute() +// // Parameter: N/A +// // Return: N/A +// // Remark: set AVMute as TRUE and enable GCP sending. +// // Side-Effect: N/A +// //////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////// +// void +// SetAVMute() +// { +// Switch_HDMITX_Bank(0) ; +// HDMITX_WriteI2C_Byte(REG_TX_GCP,B_SET_AVMUTE) ; +// HDMITX_WriteI2C_Byte(REG_TX_PKT_GENERAL_CTRL,B_ENABLE_PKT|B_REPEAT_PKT) ; +// } + +// //////////////////////////////////////////////////////////////////////////////// +// // Function: SetAVMute(FALSE) +// // Parameter: N/A +// // Return: N/A +// // Remark: clear AVMute as TRUE and enable GCP sending. +// // Side-Effect: N/A +// //////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////// +// void +// SetAVMute(FALSE) +// { +// Switch_HDMITX_Bank(0) ; +// HDMITX_WriteI2C_Byte(REG_TX_GCP,B_CLR_AVMUTE) ; +// HDMITX_WriteI2C_Byte(REG_TX_PKT_GENERAL_CTRL,B_ENABLE_PKT|B_REPEAT_PKT) ; +// } + + + +////////////////////////////////////////////////////////////////////// +// Function: ReadEDID +// Parameter: pData - the pointer of buffer to receive EDID ucdata. +// bSegment - the segment of EDID readback. +// offset - the offset of EDID ucdata in the segment. in byte. +// count - the read back bytes count,cannot exceed 32 +// Return: ER_SUCCESS if successfully getting EDID. ER_FAIL otherwise. +// Remark: function for read EDID ucdata from reciever. +// Side-Effect: DDC master will set to be HOST. DDC FIFO will be used and dirty. +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +ReadEDID(BYTE *pData,BYTE bSegment,BYTE offset,SHORT Count) +{ + SHORT RemainedCount,ReqCount ; + BYTE bCurrOffset ; + SHORT TimeOut ; + BYTE *pBuff = pData ; + BYTE ucdata ; + + // ErrorF("ReadEDID(%08lX,%d,%d,%d)\n",(ULONG)pData,bSegment,offset,Count) ; + if(!pData) + { + ErrorF("ReadEDID(): Invallid pData pointer %08lX\n",(ULONG)pData) ; + return ER_FAIL ; + } + + if(HDMITX_ReadI2C_Byte(REG_TX_INT_STAT1) & B_INT_DDC_BUS_HANG) + { + ErrorF("Called AboutDDC()\n") ; + AbortDDC() ; + + } + + ClearDDCFIFO() ; + + RemainedCount = Count ; + bCurrOffset = offset ; + + Switch_HDMITX_Bank(0) ; + + while(RemainedCount > 0) + { + + ReqCount = (RemainedCount > DDC_FIFO_MAXREQ)?DDC_FIFO_MAXREQ:RemainedCount ; + OS_PRINTF("ReadEDID(): ReqCount = %d,bCurrOffset = %d\n",ReqCount,bCurrOffset) ; + + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_FIFO_CLR) ; + + for(TimeOut = 0 ; TimeOut < 200 ; TimeOut++) + { + ucdata = HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS) ; + + if(ucdata&B_DDC_DONE) + { + break ; + } + + if((ucdata & B_DDC_ERROR)||(HDMITX_ReadI2C_Byte(REG_TX_INT_STAT1) & B_INT_DDC_BUS_HANG)) + { + ErrorF("Called AboutDDC()\n") ; + AbortDDC() ; + return ER_FAIL ; + } + } + + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_HEADER,DDC_EDID_ADDRESS) ; // for EDID ucdata get + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQOFF,bCurrOffset) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQCOUNT,(BYTE)ReqCount) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_EDIDSEG,bSegment) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_EDID_READ) ; + + bCurrOffset += ReqCount ; + RemainedCount -= ReqCount ; + + for(TimeOut = 250 ; TimeOut > 0 ; TimeOut --) + { + DelayMS(1) ; + ucdata = HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS) ; + if(ucdata & B_DDC_DONE) + { + break ; + } + + if(ucdata & B_DDC_ERROR) + { + ErrorF("ReadEDID(): DDC_STATUS = %02X,fail.\n",ucdata) ; + return ER_FAIL ; + } + } + + if(TimeOut == 0) + { + ErrorF("ReadEDID(): DDC TimeOut. \n") ; + return ER_FAIL ; + } + + do + { + *(pBuff++) = HDMITX_ReadI2C_Byte(REG_TX_DDC_READFIFO) ; + ReqCount -- ; + }while(ReqCount > 0) ; + + } + + return ER_SUCCESS ; +} + + + +#ifdef SUPPORT_HDCP +////////////////////////////////////////////////////////////////////// +// Authentication +////////////////////////////////////////////////////////////////////// +static void +HDCP_ClearAuthInterrupt() +{ + BYTE uc ; + uc = HDMITX_ReadI2C_Byte(REG_TX_INT_MASK2) & (~(B_KSVLISTCHK_MASK|B_T_AUTH_DONE_MASK|B_AUTH_FAIL_MASK)); + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR0,B_CLR_AUTH_FAIL|B_CLR_AUTH_DONE|B_CLR_KSVLISTCHK) ; + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR1,0) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,B_INTACTDONE) ; +} + +static void +HDCP_ResetAuth() +{ + HDMITX_WriteI2C_Byte(REG_TX_LISTCTRL,0) ; + HDMITX_WriteI2C_Byte(REG_TX_HDCP_DESIRE,0) ; + HDMITX_OrREG_Byte(REG_TX_SW_RST,B_HDCP_RST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDCP_ClearAuthInterrupt() ; + AbortDDC() ; +} +////////////////////////////////////////////////////////////////////// +// Function: HDCP_EnableEncryption +// Parameter: N/A +// Return: ER_SUCCESS if done. +// Remark: Set regC1 as zero to enable continue authentication. +// Side-Effect: register bank will reset to zero. +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +HDCP_EnableEncryption() +{ + Switch_HDMITX_Bank(0) ; + return HDMITX_WriteI2C_Byte(REG_TX_ENCRYPTION,B_ENABLE_ENCRYPTION); +} + + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_Auth_Fire() +// Parameter: N/A +// Return: N/A +// Remark: write anything to reg21 to enable HDCP authentication by HW +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static void +HDCP_Auth_Fire() +{ + // ErrorF("HDCP_Auth_Fire():\n") ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHDCP) ; // MASTERHDCP,no need command but fire. + HDMITX_WriteI2C_Byte(REG_TX_AUTHFIRE,1); +} + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_StartAnCipher +// Parameter: N/A +// Return: N/A +// Remark: Start the Cipher to free run for random number. When stop,An is +// ready in Reg30. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static void +HDCP_StartAnCipher() +{ + HDMITX_WriteI2C_Byte(REG_TX_AN_GENERATE,B_START_CIPHER_GEN) ; + DelayMS(1) ; // delay 1 ms +} + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_StopAnCipher +// Parameter: N/A +// Return: N/A +// Remark: Stop the Cipher,and An is ready in Reg30. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static void +HDCP_StopAnCipher() +{ + HDMITX_WriteI2C_Byte(REG_TX_AN_GENERATE,B_STOP_CIPHER_GEN) ; +} + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_GenerateAn +// Parameter: N/A +// Return: N/A +// Remark: start An ciper random run at first,then stop it. Software can get +// an in reg30~reg38,the write to reg28~2F +// Side-Effect: +////////////////////////////////////////////////////////////////////// + +static void +HDCP_GenerateAn() +{ + BYTE Data[8] ; + + HDCP_StartAnCipher() ; + // HDMITX_WriteI2C_Byte(REG_TX_AN_GENERATE,B_START_CIPHER_GEN) ; + // DelayMS(1) ; // delay 1 ms + // HDMITX_WriteI2C_Byte(REG_TX_AN_GENERATE,B_STOP_CIPHER_GEN) ; + + HDCP_StopAnCipher() ; + + Switch_HDMITX_Bank(0) ; + // new An is ready in reg30 + HDMITX_ReadI2C_ByteN(REG_TX_AN_GEN,Data,8) ; + HDMITX_WriteI2C_ByteN(REG_TX_AN,Data,8) ; + +} + + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_GetBCaps +// Parameter: pBCaps - pointer of byte to get BCaps. +// pBStatus - pointer of two bytes to get BStatus +// Return: ER_SUCCESS if successfully got BCaps and BStatus. +// Remark: get B status and capability from HDCP reciever via DDC bus. +// Side-Effect: +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +HDCP_GetBCaps(PBYTE pBCaps ,PUSHORT pBStatus) +{ + BYTE ucdata ; + BYTE TimeOut ; + + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_HEADER,DDC_HDCP_ADDRESS) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQOFF,0x40) ; // BCaps offset + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQCOUNT,3) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_DDC_SEQ_BURSTREAD) ; + + for(TimeOut = 200 ; TimeOut > 0 ; TimeOut --) + { + DelayMS(1) ; + + ucdata = HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS) ; + if(ucdata & B_DDC_DONE) + { + //ErrorF("HDCP_GetBCaps(): DDC Done.\n") ; + break ; + } + + if(ucdata & B_DDC_ERROR) + { + ErrorF("HDCP_GetBCaps(): DDC fail by reg16=%02X.\n",ucdata) ; + return ER_FAIL ; + } + } + + if(TimeOut == 0) + { + return ER_FAIL ; + } + + HDMITX_ReadI2C_ByteN(REG_TX_BSTAT,(PBYTE)pBStatus,2) ; + *pBCaps = HDMITX_ReadI2C_Byte(REG_TX_BCAP) ; + return ER_SUCCESS ; + +} + + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_GetBKSV +// Parameter: pBKSV - pointer of 5 bytes buffer for getting BKSV +// Return: ER_SUCCESS if successfuly got BKSV from Rx. +// Remark: Get BKSV from HDCP reciever. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +HDCP_GetBKSV(BYTE *pBKSV) +{ + BYTE ucdata ; + BYTE TimeOut ; + + Switch_HDMITX_Bank(0) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_HEADER,DDC_HDCP_ADDRESS) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQOFF,0x00) ; // BKSV offset + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQCOUNT,5) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_DDC_SEQ_BURSTREAD) ; + + for(TimeOut = 200 ; TimeOut > 0 ; TimeOut --) + { + DelayMS(1) ; + + ucdata = HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS) ; + if(ucdata & B_DDC_DONE) + { + ErrorF("HDCP_GetBCaps(): DDC Done.\n") ; + break ; + } + + if(ucdata & B_DDC_ERROR) + { + ErrorF("HDCP_GetBCaps(): DDC No ack or arbilose,%x,maybe cable did not connected. Fail.\n",ucdata) ; + return ER_FAIL ; + } + } + + if(TimeOut == 0) + { + return ER_FAIL ; + } + + HDMITX_ReadI2C_ByteN(REG_TX_BKSV,(PBYTE)pBKSV,5) ; + + return ER_SUCCESS ; +} + +////////////////////////////////////////////////////////////////////// +// Function:HDCP_Authenticate +// Parameter: N/A +// Return: ER_SUCCESS if Authenticated without error. +// Remark: do Authentication with Rx +// Side-Effect: +// 1. Instance[0].bAuthenticated global variable will be TRUE when authenticated. +// 2. Auth_done interrupt and AUTH_FAIL interrupt will be enabled. +////////////////////////////////////////////////////////////////////// +static BYTE +countbit(BYTE b) +{ + BYTE i,count ; + for( i = 0, count = 0 ; i < 8 ; i++ ) + { + if( b & (1< 6) + { + ErrorF("Down Stream Count %d is over maximum supported number 6,fail.\n",(BStatus & M_DOWNSTREAM_COUNT)) ; + return ER_FAIL ; + } + */ + + HDCP_GetBKSV(BKSV) ; + ErrorF("BKSV %02X %02X %02X %02X %02X\n",BKSV[0],BKSV[1],BKSV[2],BKSV[3],BKSV[4]) ; + + for(TimeOut = 0, ucdata = 0 ; TimeOut < 5 ; TimeOut ++) + { + ucdata += countbit(BKSV[TimeOut]) ; + } + if( ucdata != 20 ) return ER_FAIL ; + + + #ifdef SUPPORT_REVOKE_KSV + HDCP_VerifyRevocationList(SRM1,BKSV,&revoked) ; + if(revoked) + { + ErrorF("BKSV is revoked\n") ; return ER_FAIL ; + } + ErrorF("BKSV %02X %02X %02X %02X %02X is NOT %srevoked\n",BKSV[0],BKSV[1],BKSV[2],BKSV[3],BKSV[4],revoked?"not ":"") ; + #endif // SUPPORT_DSSSHA + + Switch_HDMITX_Bank(0) ; // switch bank action should start on direct register writting of each function. + + // 2006/08/11 added by jjtseng + // enable HDCP on CPDired enabled. + HDMITX_AndREG_Byte(REG_TX_SW_RST,~(B_HDCP_RST)) ; + //~jjtseng 2006/08/11 + +// if(BCaps & B_CAP_HDCP_1p1) +// { +// ErrorF("RX support HDCP 1.1\n") ; +// HDMITX_WriteI2C_Byte(REG_TX_HDCP_DESIRE,B_ENABLE_HDPC11|B_CPDESIRE) ; +// } +// else +// { +// ErrorF("RX not support HDCP 1.1\n") ; + HDMITX_WriteI2C_Byte(REG_TX_HDCP_DESIRE,B_CPDESIRE) ; +// } + + + // HDMITX_WriteI2C_Byte(REG_TX_INT_CLR0,B_CLR_AUTH_DONE|B_CLR_AUTH_FAIL|B_CLR_KSVLISTCHK) ; + // HDMITX_WriteI2C_Byte(REG_TX_INT_CLR1,0) ; // don't clear other settings. + // ucdata = HDMITX_ReadI2C_Byte(REG_TX_SYS_STATUS) ; + // ucdata = (ucdata & M_CTSINTSTEP) | B_INTACTDONE ; + // HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,ucdata) ; // clear action. + + // HDMITX_AndREG_Byte(REG_TX_INT_MASK2,~(B_AUTH_FAIL_MASK|B_T_AUTH_DONE_MASK)) ; // enable GetBCaps Interrupt + HDCP_ClearAuthInterrupt() ; + ErrorF("int2 = %02X DDC_Status = %02X\n",HDMITX_ReadI2C_Byte(REG_TX_INT_STAT2),HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS)) ; + + + HDCP_GenerateAn() ; + HDMITX_WriteI2C_Byte(REG_TX_LISTCTRL,0) ; + Instance[0].bAuthenticated = FALSE ; + + if((BCaps & B_CAP_HDMI_REPEATER) == 0) + { + HDCP_Auth_Fire(); + // wait for status ; + + for(TimeOut = 250 ; TimeOut > 0 ; TimeOut --) + { + DelayMS(5) ; // delay 1ms + ucdata = HDMITX_ReadI2C_Byte(REG_TX_AUTH_STAT) ; + ErrorF("reg46 = %02x reg16 = %02x\n",ucdata,HDMITX_ReadI2C_Byte(0x16)) ; + + if(ucdata & B_T_AUTH_DONE) + { + Instance[0].bAuthenticated = TRUE ; + break ; + } + + ucdata = HDMITX_ReadI2C_Byte(REG_TX_INT_STAT2) ; + if(ucdata & B_INT_AUTH_FAIL) + { + /* + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR0,B_CLR_AUTH_FAIL) ; + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR1,0) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,B_INTACTDONE) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,0) ; + */ + ErrorF("HDCP_Authenticate(): Authenticate fail\n") ; + Instance[0].bAuthenticated = FALSE ; + return ER_FAIL ; + } + } + + if(TimeOut == 0) + { + ErrorF("HDCP_Authenticate(): Time out. return fail\n") ; + Instance[0].bAuthenticated = FALSE ; + return ER_FAIL ; + } + return ER_SUCCESS ; + } + + return HDCP_Authenticate_Repeater() ; +} + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_VerifyIntegration +// Parameter: N/A +// Return: ER_SUCCESS if success,if AUTH_FAIL interrupt status,return fail. +// Remark: no used now. +// Side-Effect: +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +HDCP_VerifyIntegration() +{ + // richard BYTE ucdata ; + // if any interrupt issued a Auth fail,returned the Verify Integration fail. + + if(HDMITX_ReadI2C_Byte(REG_TX_INT_STAT1) & B_INT_AUTH_FAIL) + { + HDCP_ClearAuthInterrupt() ; + Instance[0].bAuthenticated = FALSE ; + return ER_FAIL ; + } + + if(Instance[0].bAuthenticated == TRUE) + { + return ER_SUCCESS ; + } + + return ER_FAIL ; +} + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_Authenticate_Repeater +// Parameter: BCaps and BStatus +// Return: ER_SUCCESS if success,if AUTH_FAIL interrupt status,return fail. +// Remark: +// Side-Effect: as Authentication +////////////////////////////////////////////////////////////////////// +static _XDATA BYTE KSVList[32] ; +static _XDATA BYTE Vr[20] ; +static _XDATA BYTE M0[8] ; + +static void +HDCP_CancelRepeaterAuthenticate() +{ + ErrorF("HDCP_CancelRepeaterAuthenticate") ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERDDC|B_MASTERHOST) ; + AbortDDC() ; + HDMITX_WriteI2C_Byte(REG_TX_LISTCTRL,B_LISTFAIL|B_LISTDONE) ; + HDCP_ClearAuthInterrupt() ; +} + +static void +HDCP_ResumeRepeaterAuthenticate() +{ + HDMITX_WriteI2C_Byte(REG_TX_LISTCTRL,B_LISTDONE) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERHDCP) ; +} + + +static SYS_STATUS +HDCP_GetKSVList(BYTE *pKSVList,BYTE cDownStream) +{ + BYTE TimeOut = 100 ; + BYTE ucdata ; + + if(cDownStream == 0 || pKSVList == NULL) + { + return ER_FAIL ; + } + + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_HEADER,0x74) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQOFF,0x43) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQCOUNT,cDownStream * 5) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_DDC_SEQ_BURSTREAD) ; + + + for(TimeOut = 200 ; TimeOut > 0 ; TimeOut --) + { + + ucdata = HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS) ; + if(ucdata & B_DDC_DONE) + { + ErrorF("HDCP_GetKSVList(): DDC Done.\n") ; + break ; + } + + if(ucdata & B_DDC_ERROR) + { + ErrorF("HDCP_GetKSVList(): DDC Fail by REG_TX_DDC_STATUS = %x.\n",ucdata) ; + return ER_FAIL ; + } + DelayMS(5) ; + } + + if(TimeOut == 0) + { + return ER_FAIL ; + } + + ErrorF("HDCP_GetKSVList(): KSV") ; + for(TimeOut = 0 ; TimeOut < cDownStream * 5 ; TimeOut++) + { + pKSVList[TimeOut] = HDMITX_ReadI2C_Byte(REG_TX_DDC_READFIFO) ; + ErrorF(" %02X",pKSVList[TimeOut]) ; + } + ErrorF("\n") ; + return ER_SUCCESS ; +} + +static SYS_STATUS +HDCP_GetVr(BYTE *pVr) +{ + BYTE TimeOut ; + BYTE ucdata ; + + if(pVr == NULL) + { + // richard return NULL ; + return ER_FAIL; + } + + HDMITX_WriteI2C_Byte(REG_TX_DDC_MASTER_CTRL,B_MASTERHOST) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_HEADER,0x74) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQOFF,0x20) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_REQCOUNT,20) ; + HDMITX_WriteI2C_Byte(REG_TX_DDC_CMD,CMD_DDC_SEQ_BURSTREAD) ; + + + for(TimeOut = 200 ; TimeOut > 0 ; TimeOut --) + { + ucdata = HDMITX_ReadI2C_Byte(REG_TX_DDC_STATUS) ; + if(ucdata & B_DDC_DONE) + { + ErrorF("HDCP_GetVr(): DDC Done.\n") ; + break ; + } + + if(ucdata & B_DDC_ERROR) + { + ErrorF("HDCP_GetVr(): DDC fail by REG_TX_DDC_STATUS = %x.\n",ucdata) ; + return ER_FAIL ; + } + DelayMS(5) ; + } + + if(TimeOut == 0) + { + ErrorF("HDCP_GetVr(): DDC fail by timeout.\n",ucdata) ; + return ER_FAIL ; + } + + Switch_HDMITX_Bank(0) ; + + for(TimeOut = 0 ; TimeOut < 5 ; TimeOut++) + { + HDMITX_WriteI2C_Byte(REG_TX_SHA_SEL ,TimeOut) ; + pVr[TimeOut*4+3] = (ULONG)HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE1) ; + pVr[TimeOut*4+2] = (ULONG)HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE2) ; + pVr[TimeOut*4+1] = (ULONG)HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE3) ; + pVr[TimeOut*4] = (ULONG)HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE4) ; + ErrorF("V' = %02X %02X %02X %02X\n",pVr[TimeOut*4],pVr[TimeOut*4+1],pVr[TimeOut*4+2],pVr[TimeOut*4+3]) ; + } + + return ER_SUCCESS ; +} + +static SYS_STATUS +HDCP_GetM0(BYTE *pM0) +{ + int i ; + + if(!pM0) + { + return ER_FAIL ; + } + + HDMITX_WriteI2C_Byte(REG_TX_SHA_SEL,5) ; // read m0[31:0] from reg51~reg54 + pM0[0] = HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE1) ; + pM0[1] = HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE2) ; + pM0[2] = HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE3) ; + pM0[3] = HDMITX_ReadI2C_Byte(REG_TX_SHA_RD_BYTE4) ; + HDMITX_WriteI2C_Byte(REG_TX_SHA_SEL,0) ; // read m0[39:32] from reg55 + pM0[4] = HDMITX_ReadI2C_Byte(REG_TX_AKSV_RD_BYTE5) ; + HDMITX_WriteI2C_Byte(REG_TX_SHA_SEL,1) ; // read m0[47:40] from reg55 + pM0[5] = HDMITX_ReadI2C_Byte(REG_TX_AKSV_RD_BYTE5) ; + HDMITX_WriteI2C_Byte(REG_TX_SHA_SEL,2) ; // read m0[55:48] from reg55 + pM0[6] = HDMITX_ReadI2C_Byte(REG_TX_AKSV_RD_BYTE5) ; + HDMITX_WriteI2C_Byte(REG_TX_SHA_SEL,3) ; // read m0[63:56] from reg55 + pM0[7] = HDMITX_ReadI2C_Byte(REG_TX_AKSV_RD_BYTE5) ; + + ErrorF("M[] =") ; + for(i = 0 ; i < 8 ; i++){ + ErrorF("0x%02x,",pM0[i]) ; + } + ErrorF("\n") ; + return ER_SUCCESS ; +} + +static _XDATA BYTE SHABuff[64] ; +static _XDATA BYTE V[20] ; + +static _XDATA ULONG w[80]; +static _XDATA ULONG sha[5] ; + +#define rol(x,y) (((x) << (y)) | (((ULONG)x) >> (32-y))) + +static void SHATransform(ULONG * h); // richard add +void SHATransform(ULONG * h) +{ + LONG t; + + + for (t = 16; t < 80; t++) { + ULONG tmp = w[t - 3] ^ w[t - 8] ^ w[t - 14] ^ w[t - 16]; + w[t] = rol(tmp,1); + printf("w[%2d] = %08lX\n",t,w[t]) ; + } + + h[0] = 0x67452301 ; + h[1] = 0xefcdab89; + h[2] = 0x98badcfe; + h[3] = 0x10325476; + h[4] = 0xc3d2e1f0; + + for (t = 0; t < 20; t++) { + ULONG tmp = + rol(h[0],5) + ((h[1] & h[2]) | (h[3] & ~h[1])) + h[4] + w[t] + 0x5a827999; + printf("%08lX %08lX %08lX %08lX %08lX\n",h[0],h[1],h[2],h[3],h[4]) ; + + h[4] = h[3]; + h[3] = h[2]; + h[2] = rol(h[1],30); + h[1] = h[0]; + h[0] = tmp; + + } + for (t = 20; t < 40; t++) { + ULONG tmp = rol(h[0],5) + (h[1] ^ h[2] ^ h[3]) + h[4] + w[t] + 0x6ed9eba1; + printf("%08lX %08lX %08lX %08lX %08lX\n",h[0],h[1],h[2],h[3],h[4]) ; + h[4] = h[3]; + h[3] = h[2]; + h[2] = rol(h[1],30); + h[1] = h[0]; + h[0] = tmp; + } + for (t = 40; t < 60; t++) { + ULONG tmp = rol(h[0], + 5) + ((h[1] & h[2]) | (h[1] & h[3]) | (h[2] & h[3])) + h[4] + w[t] + + 0x8f1bbcdc; + printf("%08lX %08lX %08lX %08lX %08lX\n",h[0],h[1],h[2],h[3],h[4]) ; + h[4] = h[3]; + h[3] = h[2]; + h[2] = rol(h[1],30); + h[1] = h[0]; + h[0] = tmp; + } + for (t = 60; t < 80; t++) { + ULONG tmp = rol(h[0],5) + (h[1] ^ h[2] ^ h[3]) + h[4] + w[t] + 0xca62c1d6; + printf("%08lX %08lX %08lX %08lX %08lX\n",h[0],h[1],h[2],h[3],h[4]) ; + h[4] = h[3]; + h[3] = h[2]; + h[2] = rol(h[1],30); + h[1] = h[0]; + h[0] = tmp; + } + printf("%08lX %08lX %08lX %08lX %08lX\n",h[0],h[1],h[2],h[3],h[4]) ; + + h[0] += 0x67452301 ; + h[1] += 0xefcdab89; + h[2] += 0x98badcfe; + h[3] += 0x10325476; + h[4] += 0xc3d2e1f0; + printf("%08lX %08lX %08lX %08lX %08lX\n",h[0],h[1],h[2],h[3],h[4]) ; +} + +/* ---------------------------------------------------------------------- + * Outer SHA algorithm: take an arbitrary length byte string, + * convert it into 16-word blocks with the prescribed padding at + * the end,and pass those blocks to the core SHA algorithm. + */ + + +void SHA_Simple(void *p,LONG len,BYTE *output) +{ + // SHA_State s; + int i, t ; + ULONG c ; + char *pBuff = p ; + + + for( i = 0 ; i < len ; i++ ) + { + t = i/4 ; + if( i%4 == 0 ) + { + w[t] = 0 ; + } + c = pBuff[i] ; + c <<= (3-(i%4))*8 ; + w[t] |= c ; + printf("pBuff[%d] = %02x, c = %08lX, w[%d] = %08lX\n",i,pBuff[i],c,t,w[t]) ; + } + t = i/4 ; + if( i%4 == 0 ) + { + w[t] = 0 ; + } + c = 0x80 << ((3-i%4)*24) ; + w[t]|= c ; t++ ; + for( ; t < 15 ; t++ ) + { + w[t] = 0 ; + } + w[15] = len*8 ; + + for( t = 0 ; t< 16 ; t++ ) + { + printf("w[%2d] = %08lX\n",t,w[t]) ; + } + + SHATransform(sha) ; + + for( i = 0 ; i < 5 ; i++ ) + { + output[i*4] = (BYTE)((sha[i]>>24)&0xFF) ; + output[i*4+1] = (BYTE)((sha[i]>>16)&0xFF) ; + output[i*4+2] = (BYTE)((sha[i]>>8)&0xFF) ; + output[i*4+3] = (BYTE)(sha[i]&0xFF) ; + } +} + +static SYS_STATUS +HDCP_CheckSHA(BYTE pM0[],USHORT BStatus,BYTE pKSVList[],int cDownStream,BYTE Vr[]) +{ + int i,n ; + + for(i = 0 ; i < cDownStream*5 ; i++) + { + SHABuff[i] = pKSVList[i] ; + } + SHABuff[i++] = BStatus & 0xFF ; + SHABuff[i++] = (BStatus>>8) & 0xFF ; + for(n = 0 ; n < 8 ; n++,i++) + { + SHABuff[i] = pM0[n] ; + } + n = i ; + // SHABuff[i++] = 0x80 ; // end mask + for(; i < 64 ; i++) + { + SHABuff[i] = 0 ; + } + // n = cDownStream * 5 + 2 /* for BStatus */ + 8 /* for M0 */ ; + // n *= 8 ; + // SHABuff[62] = (n>>8) & 0xff ; + // SHABuff[63] = (n>>8) & 0xff ; + for(i = 0 ; i < 64 ; i++) + { + if(i % 16 == 0) printf("SHA[]: ") ; + printf(" %02X",SHABuff[i]) ; + if((i%16)==15) printf("\n") ; + } + SHA_Simple(SHABuff,n,V) ; + printf("V[] =") ; + for(i = 0 ; i < 20 ; i++) + { + printf(" %02X",V[i]) ; + } + printf("\nVr[] =") ; + for(i = 0 ; i < 20 ; i++) + { + printf(" %02X",Vr[i]) ; + } + + for(i = 0 ; i < 20 ; i++) + { + if(V[i] != Vr[i]) + { + return ER_FAIL ; + } + } + return ER_SUCCESS ; +} + +static SYS_STATUS +HDCP_Authenticate_Repeater() +{ + BYTE uc ; + #ifdef SUPPORT_DSSSHA + BYTE revoked ; + int i ; + #else + int i; // richard add + BYTE revoked; // richard add + #endif // _DSS_SHA_ + // BYTE test; + // BYTE test06; + // BYTE test07; + // BYTE test08; + BYTE cDownStream ; + + BYTE BCaps; + USHORT BStatus ; + USHORT TimeOut ; + + ErrorF("Authentication for repeater\n") ; + // emily add for test,abort HDCP + // 2007/10/01 marked by jj_tseng@chipadvanced.com + // HDMITX_WriteI2C_Byte(0x20,0x00) ; + // HDMITX_WriteI2C_Byte(0x04,0x01) ; + // HDMITX_WriteI2C_Byte(0x10,0x01) ; + // HDMITX_WriteI2C_Byte(0x15,0x0F) ; + // DelayMS(100); + // HDMITX_WriteI2C_Byte(0x04,0x00) ; + // HDMITX_WriteI2C_Byte(0x10,0x00) ; + // HDMITX_WriteI2C_Byte(0x20,0x01) ; + // DelayMS(100); + // test07 = HDMITX_ReadI2C_Byte(0x7) ; + // test06 = HDMITX_ReadI2C_Byte(0x6); + // test08 = HDMITX_ReadI2C_Byte(0x8); + //~jj_tseng@chipadvanced.com + // end emily add for test + ////////////////////////////////////// + // Authenticate Fired + ////////////////////////////////////// + + HDCP_GetBCaps(&BCaps,&BStatus) ; + DelayMS(2); + HDCP_Auth_Fire(); + DelayMS(550); // emily add for test + + for(TimeOut = 250*6 ; TimeOut > 0 ; TimeOut --) + { + + uc = HDMITX_ReadI2C_Byte(REG_TX_INT_STAT1) ; + if(uc & B_INT_DDC_BUS_HANG) + { + ErrorF("DDC Bus hang\n") ; + goto HDCP_Repeater_Fail ; + } + + uc = HDMITX_ReadI2C_Byte(REG_TX_INT_STAT2) ; + + if(uc & B_INT_AUTH_FAIL) + { + /* + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR0,B_CLR_AUTH_FAIL) ; + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR1,0) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,B_INTACTDONE) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,0) ; + */ + ErrorF("HDCP_Authenticate_Repeater(): B_INT_AUTH_FAIL.\n") ; + goto HDCP_Repeater_Fail ; + } + // emily add for test + // test =(HDMITX_ReadI2C_Byte(0x7)&0x4)>>2 ; + if(uc & B_INT_KSVLIST_CHK) + { + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR0,B_CLR_KSVLISTCHK) ; + HDMITX_WriteI2C_Byte(REG_TX_INT_CLR1,0) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,B_INTACTDONE) ; + HDMITX_WriteI2C_Byte(REG_TX_SYS_STATUS,0) ; + ErrorF("B_INT_KSVLIST_CHK\n") ; + break ; + } + + DelayMS(5) ; + } + + if(TimeOut == 0) + { + ErrorF("Time out for wait KSV List checking interrupt\n") ; + goto HDCP_Repeater_Fail ; + } + + /////////////////////////////////////// + // clear KSVList check interrupt. + /////////////////////////////////////// + + for(TimeOut = 500 ; TimeOut > 0 ; TimeOut --) + { + if((TimeOut % 100) == 0) + { + ErrorF("Wait KSV FIFO Ready %d\n",TimeOut) ; + } + + if(HDCP_GetBCaps(&BCaps,&BStatus) == ER_FAIL) + { + ErrorF("Get BCaps fail\n") ; + goto HDCP_Repeater_Fail ; + } + + if(BCaps & B_CAP_KSV_FIFO_RDY) + { + ErrorF("FIFO Ready\n") ; + break ; + } + DelayMS(5) ; + + } + + if(TimeOut == 0) + { + ErrorF("Get KSV FIFO ready TimeOut\n") ; + goto HDCP_Repeater_Fail ; + } + + ErrorF("Wait timeout = %d\n",TimeOut) ; + + ClearDDCFIFO() ; + GenerateDDCSCLK() ; + cDownStream = (BStatus & M_DOWNSTREAM_COUNT) ; + + if(cDownStream == 0 || cDownStream > 6 || BStatus & (B_MAX_CASCADE_EXCEEDED|B_DOWNSTREAM_OVER)) + { + ErrorF("Invalid Down stream count,fail\n") ; + goto HDCP_Repeater_Fail ; + } + + + if(HDCP_GetKSVList(KSVList,cDownStream) == ER_FAIL) + { + goto HDCP_Repeater_Fail ; + } + + for(i = 0 ; i < cDownStream ; i++) + { + revoked=FALSE ; uc = 0 ; + for( TimeOut = 0 ; TimeOut < 5 ; TimeOut++ ) + { + // check bit count + uc += countbit(KSVList[i*5+TimeOut]) ; + } + if( uc != 20 ) revoked = TRUE ; + #ifdef SUPPORT_REVOKE_KSV + HDCP_VerifyRevocationList(SRM1,&KSVList[i*5],&revoked) ; + #endif + if(revoked) + { + ErrorF("KSVFIFO[%d] = %02X %02X %02X %02X %02X is revoked\n",i,KSVList[i*5],KSVList[i*5+1],KSVList[i*5+2],KSVList[i*5+3],KSVList[i*5+4]) ; + goto HDCP_Repeater_Fail ; + } + } + + + if(HDCP_GetVr(Vr) == ER_FAIL) + { + goto HDCP_Repeater_Fail ; + } + + if(HDCP_GetM0(M0) == ER_FAIL) + { + goto HDCP_Repeater_Fail ; + } + + // do check SHA + if(HDCP_CheckSHA(M0,BStatus,KSVList,cDownStream,Vr) == ER_FAIL) + { + goto HDCP_Repeater_Fail ; + } + + + HDCP_ResumeRepeaterAuthenticate() ; + Instance[0].bAuthenticated = TRUE ; + return ER_SUCCESS ; + +HDCP_Repeater_Fail: + HDCP_CancelRepeaterAuthenticate() ; + return ER_FAIL ; +} + +////////////////////////////////////////////////////////////////////// +// Function: HDCP_ResumeAuthentication +// Parameter: N/A +// Return: N/A +// Remark: called by interrupt handler to restart Authentication and Encryption. +// Side-Effect: as Authentication and Encryption. +////////////////////////////////////////////////////////////////////// + +static void +HDCP_ResumeAuthentication() +{ + SetAVMute(TRUE) ; + if(HDCP_Authenticate() == ER_SUCCESS) + { + HDCP_EnableEncryption() ; + } + SetAVMute(FALSE) ; +} + + + +#endif // SUPPORT_HDCP + +#if 0 +static void +ENABLE_NULL_PKT() +{ + + HDMITX_WriteI2C_Byte(REG_TX_NULL_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} + + +static void +ENABLE_ACP_PKT() +{ + + HDMITX_WriteI2C_Byte(REG_TX_ACP_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} + + +static void +ENABLE_ISRC1_PKT() +{ + + HDMITX_WriteI2C_Byte(REG_TX_ISRC1_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} + + +static void +ENABLE_ISRC2_PKT() +{ + + HDMITX_WriteI2C_Byte(REG_TX_ISRC2_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} +#endif + + +static void +ENABLE_AVI_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_AVI_INFOFRM_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} + + +static void +ENABLE_AUD_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_AUD_INFOFRM_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} + + +#if 0 +static void +ENABLE_SPD_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_SPD_INFOFRM_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} + + +static void +ENABLE_MPG_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_MPG_INFOFRM_CTRL,B_ENABLE_PKT|B_REPEAT_PKT); +} +#endif + +static void +DISABLE_NULL_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_NULL_CTRL,0); +} + + +static void +DISABLE_ACP_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_ACP_CTRL,0); +} + + +static void +DISABLE_ISRC1_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_ISRC1_CTRL,0); +} + + +static void +DISABLE_ISRC2_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_ISRC2_CTRL,0); +} + + +static void +DISABLE_AVI_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_AVI_INFOFRM_CTRL,0); +} + + +static void +DISABLE_AUD_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_AUD_INFOFRM_CTRL,0); +} + + +static void +DISABLE_SPD_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_SPD_INFOFRM_CTRL,0); +} + + +static void +DISABLE_MPG_INFOFRM_PKT(void) +{ + + HDMITX_WriteI2C_Byte(REG_TX_MPG_INFOFRM_CTRL,0); +} + + +////////////////////////////////////////////////////////////////////// +// Function: SetAVIInfoFrame() +// Parameter: pAVIInfoFrame - the pointer to HDMI AVI Infoframe ucData +// Return: N/A +// Remark: Fill the AVI InfoFrame ucData,and count checksum,then fill into +// AVI InfoFrame registers. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +SetAVIInfoFrame(AVI_InfoFrame *pAVIInfoFrame) +{ + int i ; + byte ucData ; + + if(!pAVIInfoFrame) + { + return ER_FAIL ; + } + + Switch_HDMITX_Bank(1) ; + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB1,pAVIInfoFrame->pktbyte.AVI_DB[0]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB2,pAVIInfoFrame->pktbyte.AVI_DB[1]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB3,pAVIInfoFrame->pktbyte.AVI_DB[2]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB4,pAVIInfoFrame->pktbyte.AVI_DB[3]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB5,pAVIInfoFrame->pktbyte.AVI_DB[4]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB6,pAVIInfoFrame->pktbyte.AVI_DB[5]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB7,pAVIInfoFrame->pktbyte.AVI_DB[6]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB8,pAVIInfoFrame->pktbyte.AVI_DB[7]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB9,pAVIInfoFrame->pktbyte.AVI_DB[8]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB10,pAVIInfoFrame->pktbyte.AVI_DB[9]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB11,pAVIInfoFrame->pktbyte.AVI_DB[10]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB12,pAVIInfoFrame->pktbyte.AVI_DB[11]); + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_DB13,pAVIInfoFrame->pktbyte.AVI_DB[12]); + for(i = 0,ucData = 0; i < 13 ; i++) + { + ucData -= pAVIInfoFrame->pktbyte.AVI_DB[i] ; + } + OS_PRINTF("SetAVIInfo(): ") ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB1)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB2)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB3)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB4)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB5)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB6)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB7)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB8)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB9)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB10)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB11)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB12)) ; + //ErrorF("%02X ",HDMITX_ReadI2C_Byte(REG_TX_AVIINFO_DB13)) ; + OS_PRINTF("\n") ; + ucData -= 0x80+AVI_INFOFRAME_VER+AVI_INFOFRAME_TYPE+AVI_INFOFRAME_LEN ; + HDMITX_WriteI2C_Byte(REG_TX_AVIINFO_SUM,ucData); + + + Switch_HDMITX_Bank(0) ; + ENABLE_AVI_INFOFRM_PKT(); + return ER_SUCCESS ; +} + +////////////////////////////////////////////////////////////////////// +// Function: SetAudioInfoFrame() +// Parameter: pAudioInfoFrame - the pointer to HDMI Audio Infoframe ucData +// Return: N/A +// Remark: Fill the Audio InfoFrame ucData,and count checksum,then fill into +// Audio InfoFrame registers. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +SetAudioInfoFrame(Audio_InfoFrame *pAudioInfoFrame) +{ + int i ; + BYTE ucData ; + + if(!pAudioInfoFrame) + { + return ER_FAIL ; + } + + Switch_HDMITX_Bank(1) ; + HDMITX_WriteI2C_Byte(REG_TX_PKT_AUDINFO_CC,pAudioInfoFrame->pktbyte.AUD_DB[0]); + HDMITX_WriteI2C_Byte(REG_TX_PKT_AUDINFO_SF,pAudioInfoFrame->pktbyte.AUD_DB[1]); + HDMITX_WriteI2C_Byte(REG_TX_PKT_AUDINFO_CA,pAudioInfoFrame->pktbyte.AUD_DB[3]); + HDMITX_WriteI2C_Byte(REG_TX_PKT_AUDINFO_DM_LSV,pAudioInfoFrame->pktbyte.AUD_DB[4]) ; + + for(i = 0,ucData = 0 ; i< 5 ; i++) + { + ucData -= pAudioInfoFrame->pktbyte.AUD_DB[i] ; + } + ucData -= 0x80+AUDIO_INFOFRAME_VER+AUDIO_INFOFRAME_TYPE+AUDIO_INFOFRAME_LEN ; + + HDMITX_WriteI2C_Byte(REG_TX_PKT_AUDINFO_SUM,ucData) ; + + + Switch_HDMITX_Bank(0) ; + ENABLE_AUD_INFOFRM_PKT(); + return ER_SUCCESS ; +} +#if 0 +////////////////////////////////////////////////////////////////////// +// Function: SetSPDInfoFrame() +// Parameter: pSPDInfoFrame - the pointer to HDMI SPD Infoframe ucData +// Return: N/A +// Remark: Fill the SPD InfoFrame ucData,and count checksum,then fill into +// SPD InfoFrame registers. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +SetSPDInfoFrame(SPD_InfoFrame *pSPDInfoFrame) +{ + int i ; + BYTE ucData ; + + if(!pSPDInfoFrame) + { + return ER_FAIL ; + } + + Switch_HDMITX_Bank(1) ; + for(i = 0,ucData = 0 ; i < 25 ; i++) + { + ucData -= pSPDInfoFrame->pktbyte.SPD_DB[i] ; + HDMITX_WriteI2C_Byte(REG_TX_PKT_SPDINFO_PB1+i,pSPDInfoFrame->pktbyte.SPD_DB[i]) ; + } + ucData -= 0x80+SPD_INFOFRAME_VER+SPD_INFOFRAME_TYPE+SPD_INFOFRAME_LEN ; + HDMITX_WriteI2C_Byte(REG_TX_PKT_SPDINFO_SUM,ucData) ; // checksum + Switch_HDMITX_Bank(0) ; + ENABLE_SPD_INFOFRM_PKT(); + return ER_SUCCESS ; +} + +////////////////////////////////////////////////////////////////////// +// Function: SetMPEGInfoFrame() +// Parameter: pMPEGInfoFrame - the pointer to HDMI MPEG Infoframe ucData +// Return: N/A +// Remark: Fill the MPEG InfoFrame ucData,and count checksum,then fill into +// MPEG InfoFrame registers. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +static SYS_STATUS +SetMPEGInfoFrame(MPEG_InfoFrame *pMPGInfoFrame) +{ + int i ; + BYTE ucData ; + + if(!pMPGInfoFrame) + { + return ER_FAIL ; + } + + Switch_HDMITX_Bank(1) ; + + HDMITX_WriteI2C_Byte(REG_TX_PKT_MPGINFO_FMT,pMPGInfoFrame->info.FieldRepeat|(pMPGInfoFrame->info.MpegFrame<<1)) ; + HDMITX_WriteI2C_Byte(REG_TX_PKG_MPGINFO_DB0,pMPGInfoFrame->pktbyte.MPG_DB[0]) ; + HDMITX_WriteI2C_Byte(REG_TX_PKG_MPGINFO_DB1,pMPGInfoFrame->pktbyte.MPG_DB[1]) ; + HDMITX_WriteI2C_Byte(REG_TX_PKG_MPGINFO_DB2,pMPGInfoFrame->pktbyte.MPG_DB[2]) ; + HDMITX_WriteI2C_Byte(REG_TX_PKG_MPGINFO_DB3,pMPGInfoFrame->pktbyte.MPG_DB[3]) ; + + for(ucData = 0,i = 0 ; i < 5 ; i++) + { + ucData -= pMPGInfoFrame->pktbyte.MPG_DB[i] ; + } + ucData -= 0x80+MPEG_INFOFRAME_VER+MPEG_INFOFRAME_TYPE+MPEG_INFOFRAME_LEN ; + + HDMITX_WriteI2C_Byte(REG_TX_PKG_MPGINFO_SUM,ucData) ; + + Switch_HDMITX_Bank(0) ; + ENABLE_SPD_INFOFRM_PKT() ; + + return ER_SUCCESS ; +} +#endif + +////////////////////////////////////////////////////////////////////// +// Function: DumpCatHDMITXReg() +// Parameter: N/A +// Return: N/A +// Remark: Debug function,dumps the registers of CAT6611. +// Side-Effect: N/A +////////////////////////////////////////////////////////////////////// + +#ifdef HDMITX_REG_DEBUG +static void +DumpCatHDMITXReg() +{ + int i,j ; + BYTE reg ; + BYTE bank ; + BYTE ucData ; + + ErrorF(" ") ; + for(j = 0 ; j < 16 ; j++) + { + ErrorF(" %02X",j) ; + if((j == 3)||(j==7)||(j==11)) + { + ErrorF(" ") ; + } + } + ErrorF("\n -----------------------------------------------------\n") ; + + Switch_HDMITX_Bank(0) ; + + for(i = 0 ; i < 0x100 ; i+=16) + { + ErrorF("[%3X] ",i) ; + for(j = 0 ; j < 16 ; j++) + { + ucData = HDMITX_ReadI2C_Byte((BYTE)((i+j)&0xFF)) ; + ErrorF(" %02X",ucData) ; + if((j == 3)||(j==7)||(j==11)) + { + ErrorF(" -") ; + } + } + ErrorF("\n") ; + if((i % 0x40) == 0x30) + { + ErrorF(" -----------------------------------------------------\n") ; + } + } + + Switch_HDMITX_Bank(1) ; + for(i = 0x130; i < 0x1B0 ; i+=16) + { + ErrorF("[%3X] ",i) ; + for(j = 0 ; j < 16 ; j++) + { + ucData = HDMITX_ReadI2C_Byte((BYTE)((i+j)&0xFF)) ; + ErrorF(" %02X",ucData) ; + if((j == 3)||(j==7)||(j==11)) + { + ErrorF(" -") ; + } + } + ErrorF("\n") ; + if(i == 0x160) + { + ErrorF(" -----------------------------------------------------\n") ; + } + + } + Switch_HDMITX_Bank(0) ; +} +#endif diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.h b/drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.h new file mode 100644 index 000000000000..cbd4f9cc8b4a --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/it6613_drv.h @@ -0,0 +1,883 @@ +#ifndef _IT6613_H_ +#define _IT6613_H_ + +//#define EXTERN_HDCPROM +///////////////////////////////////////// +// DDC Address +///////////////////////////////////////// +#define DDC_HDCP_ADDRESS 0x74 +#define DDC_EDID_ADDRESS 0xA0 +#define DDC_FIFO_MAXREQ 0x20 + +// I2C address + +#define _80MHz 80000000 +#define HDMI_TX_I2C_SLAVE_ADDR 0x98 // PCADR is ground, if PCADR=1, address=0x9A + +/////////////////////////////////////////////////////////////////////// +// Register offset +/////////////////////////////////////////////////////////////////////// + + +#define REG_TX_VENDOR_ID0 0x00 +#define REG_TX_VENDOR_ID1 0x01 +#define REG_TX_DEVICE_ID0 0x02 +#define REG_TX_DEVICE_ID1 0x03 + + #define O_DEVID 0 + #define M_DEVID 0xF + #define O_REVID 4 + #define M_REVID 0xF + +#define REG_TX_SW_RST 0x04 + #define B_ENTEST (1<<7) + #define B_REF_RST (1<<5) + #define B_AREF_RST (1<<4) + #define B_VID_RST (1<<3) + #define B_AUD_RST (1<<2) + #define B_HDMI_RST (1<<1) + #define B_HDCP_RST (1<<0) + +#define REG_TX_INT_CTRL 0x05 + #define B_INTPOL_ACTL 0 + #define B_INTPOL_ACTH (1<<7) + #define B_INT_PUSHPULL 0 + #define B_INT_OPENDRAIN (1<<6) + +#define REG_TX_INT_STAT1 0x06 + #define B_INT_AUD_OVERFLOW (1<<7) + #define B_INT_ROMACQ_NOACK (1<<6) + #define B_INT_RDDC_NOACK (1<<5) + #define B_INT_DDCFIFO_ERR (1<<4) + #define B_INT_ROMACQ_BUS_HANG (1<<3) + #define B_INT_DDC_BUS_HANG (1<<2) + #define B_INT_RX_SENSE (1<<1) + #define B_INT_HPD_PLUG (1<<0) + +#define REG_TX_INT_STAT2 0x07 + #define B_INT_HDCP_SYNC_DET_FAIL (1<<7) + #define B_INT_VID_UNSTABLE (1<<6) + #define B_INT_PKTACP (1<<5) + #define B_INT_PKTNULL (1<<4) + #define B_INT_PKTGENERAL (1<<3) + #define B_INT_KSVLIST_CHK (1<<2) + #define B_INT_AUTH_DONE (1<<1) + #define B_INT_AUTH_FAIL (1<<0) + +#define REG_TX_INT_STAT3 0x08 + #define B_INT_AUD_CTS (1<<6) + #define B_INT_VSYNC (1<<5) + #define B_INT_VIDSTABLE (1<<4) + #define B_INT_PKTMPG (1<<3) + #define B_INT_PKTSPD (1<<2) + #define B_INT_PKTAUD (1<<1) + #define B_INT_PKTAVI (1<<0) + +#define REG_TX_INT_MASK1 0x09 + #define B_AUDIO_OVFLW_MASK (1<<7) + #define B_DDC_NOACK_MASK (1<<5) + #define B_DDC_FIFO_ERR_MASK (1<<4) + #define B_DDC_BUS_HANG_MASK (1<<2) + #define B_RXSEN_MASK (1<<1) + #define B_HPD_MASK (1<<0) + +#define REG_TX_INT_MASK2 0x0A + #define B_PKT_AVI_MASK (1<<7) + #define B_PKT_VID_UNSTABLE_MASK (1<<6) + #define B_PKT_ACP_MASK (1<<5) + #define B_PKT_NULL_MASK (1<<4) + #define B_PKT_GEN_MASK (1<<3) + #define B_KSVLISTCHK_MASK (1<<2) + #define B_T_AUTH_DONE_MASK (1<<1) + #define B_AUTH_FAIL_MASK (1<<0) + +#define REG_TX_INT_MASK3 0x0B + #define B_HDCP_SYNC_DET_FAIL_MASK (1<<6) + #define B_AUDCTS_MASK (1<<5) + #define B_VSYNC_MASK (1<<4) + #define B_VIDSTABLE_MASK (1<<3) + #define B_PKT_MPG_MASK (1<<2) + #define B_PKT_SPD_MASK (1<<1) + #define B_PKT_AUD_MASK (1<<0) + + +#define REG_TX_INT_CLR0 0x0C + #define B_CLR_PKTACP (1<<7) + #define B_CLR_PKTNULL (1<<6) + #define B_CLR_PKTGENERAL (1<<5) + #define B_CLR_KSVLISTCHK (1<<4) + #define B_CLR_AUTH_DONE (1<<3) + #define B_CLR_AUTH_FAIL (1<<2) + #define B_CLR_RXSENSE (1<<1) + #define B_CLR_HPD (1<<0) + +#define REG_TX_INT_CLR1 0x0D + #define B_CLR_VSYNC (1<<7) + #define B_CLR_VIDSTABLE (1<<6) + #define B_CLR_PKTMPG (1<<5) + #define B_CLR_PKTSPD (1<<4) + #define B_CLR_PKTAUD (1<<3) + #define B_CLR_PKTAVI (1<<2) + #define B_CLR_HDCP_SYNC_DET_FAIL (1<<1) + #define B_CLR_VID_UNSTABLE (1<<0) + +#define REG_TX_SYS_STATUS 0x0E + // readonly + #define B_INT_ACTIVE (1<<7) + #define B_HPDETECT (1<<6) + #define B_RXSENDETECT (1<<5) + #define B_TXVIDSTABLE (1<<4) + // read/write + #define O_CTSINTSTEP 2 + #define M_CTSINTSTEP (3<<2) + #define B_CLR_AUD_CTS (1<<1) + #define B_INTACTDONE (1<<0) + +#define REG_TX_BANK_CTRL 0x0F + #define B_BANK0 0 + #define B_BANK1 1 + +// DDC + +#define REG_TX_DDC_MASTER_CTRL 0x10 + #define B_MASTERROM (1<<1) + #define B_MASTERDDC (0<<1) + #define B_MASTERHOST (1<<0) + #define B_MASTERHDCP (0<<0) + +#define REG_TX_DDC_HEADER 0x11 +#define REG_TX_DDC_REQOFF 0x12 +#define REG_TX_DDC_REQCOUNT 0x13 +#define REG_TX_DDC_EDIDSEG 0x14 +#define REG_TX_DDC_CMD 0x15 + #define CMD_DDC_SEQ_BURSTREAD 0 + #define CMD_LINK_CHKREAD 2 + #define CMD_EDID_READ 3 + #define CMD_FIFO_CLR 9 + #define CMD_GEN_SCLCLK 0xA + #define CMD_DDC_ABORT 0xF + +#define REG_TX_DDC_STATUS 0x16 + #define B_DDC_DONE (1<<7) + #define B_DDC_ACT (1<<6) + #define B_DDC_NOACK (1<<5) + #define B_DDC_WAITBUS (1<<4) + #define B_DDC_ARBILOSE (1<<3) + #define B_DDC_ERROR (B_DDC_NOACK|B_DDC_WAITBUS|B_DDC_ARBILOSE) + #define B_DDC_FIFOFULL (1<<2) + #define B_DDC_FIFOEMPTY (1<<1) + +#define REG_TX_DDC_READFIFO 0x17 +#define REG_TX_ROM_STARTADDR 0x18 +#define REG_TX_HDCP_HEADER 0x19 +#define REG_TX_ROM_HEADER 0x1A +#define REG_TX_BUSHOLD_T 0x1B +#define REG_TX_ROM_STAT 0x1C + #define B_ROM_DONE (1<<7) + #define B_ROM_ACTIVE (1<<6) + #define B_ROM_NOACK (1<<5) + #define B_ROM_WAITBUS (1<<4) + #define B_ROM_ARBILOSE (1<<3) + #define B_ROM_BUSHANG (1<<2) + +// HDCP +#define REG_TX_AN_GENERATE 0x1F + #define B_START_CIPHER_GEN 1 + #define B_STOP_CIPHER_GEN 0 + +#define REG_TX_HDCP_DESIRE 0x20 + #define B_ENABLE_HDPC11 (1<<1) + #define B_CPDESIRE (1<<0) + +#define REG_TX_AUTHFIRE 0x21 +#define REG_TX_LISTCTRL 0x22 + #define B_LISTFAIL (1<<1) + #define B_LISTDONE (1<<0) + +#define REG_TX_AKSV 0x23 +#define REG_TX_AKSV0 0x23 +#define REG_TX_AKSV1 0x24 +#define REG_TX_AKSV2 0x25 +#define REG_TX_AKSV3 0x26 +#define REG_TX_AKSV4 0x27 + +#define REG_TX_AN 0x28 +#define REG_TX_AN_GEN 0x30 +#define REG_TX_ARI 0x38 +#define REG_TX_ARI0 0x38 +#define REG_TX_ARI1 0x39 +#define REG_TX_APJ 0x3A + +#define REG_TX_BKSV 0x3B +#define REG_TX_BRI 0x40 +#define REG_TX_BRI0 0x40 +#define REG_TX_BRI1 0x41 +#define REG_TX_BPJ 0x42 +#define REG_TX_BCAP 0x43 + #define B_CAP_HDMI_REPEATER (1<<6) + #define B_CAP_KSV_FIFO_RDY (1<<5) + #define B_CAP_HDMI_FAST_MODE (1<<4) + #define B_CAP_HDCP_1p1 (1<<1) + #define B_CAP_FAST_REAUTH (1<<0) +#define REG_TX_BSTAT 0x44 +#define REG_TX_BSTAT0 0x44 +#define REG_TX_BSTAT1 0x45 + #define B_CAP_HDMI_MODE (1<<12) + #define B_CAP_DVI_MODE (0<<12) + #define B_MAX_CASCADE_EXCEEDED (1<<11) + #define M_REPEATER_DEPTH (0x7<<8) + #define O_REPEATER_DEPTH 8 + #define B_DOWNSTREAM_OVER (1<<7) + #define M_DOWNSTREAM_COUNT 0x7F + +#define REG_TX_AUTH_STAT 0x46 +#define B_T_AUTH_DONE (1<<7) +#define REG_TX_CLK_CTRL0 0x58 + #define O_OSCLK_SEL 5 + #define M_OSCLK_SEL 3 + #define B_AUTO_OVER_SAMPLING_CLOCK (1<<4) + #define O_EXT_MCLK_SEL 2 + #define M_EXT_MCLK_SEL (3<80000000 ) + { + level = PCLK_HIGH ; + } + else if(TMDSClock>20000000) + { + level = PCLK_MEDIUM ; + } + else + { + level = PCLK_LOW ; + } + + + + //BOOL EnableVideoOutput(VIDEOPCLKLEVEL level,BYTE inputColorMode,BYTE outputColorMode,BYTE bHDMI) ; + //EnableVideoOutput(level,bInputColorMode, bInputSignalType, bOutputColorMode,bHDMIMode) ; + EnableVideoOutput(level,bInputColorMode, bOutputColorMode,bHDMIMode) ; // richard modify + + if( bHDMIMode ) + { + OS_PRINTF("ConfigAVIInfoFrame, VIC=%d\n", VIC); + ConfigAVIInfoFrame(VIC, pixelrep) ; + + // EnableHDCP(TRUE) ; + if( bAudioEnable ) + { + //BOOL EnableAudioOutput(ULONG VideoPixelClock,BYTE bAudioSampleFreq,BYTE ChannelNumber,BYTE bAudSWL,BYTE bSPDIF) + //EnableAudioOutput(TMDSClock,48000, 2, FALSE); + bool bSPDIF = FALSE; + EnableAudioOutput(TMDSClock,AUDFS_48KHz, 2, 16, bSPDIF); // richard modify + ConfigAudioInfoFrm() ; + } + } + SetAVMute(FALSE) ; + bChangeMode = FALSE ; +} + + + +void +HDMITX_ChangeDisplayOption(HDMI_Video_Type OutputVideoTiming, HDMI_OutputColorMode OutputColorMode) +{ + //HDMI_Video_Type t=HDMI_480i60_16x9; + switch(OutputVideoTiming) + { + case HDMI_640x480p60: + VIC = 1 ; + VideoPixelClock = 25000000 ; + pixelrep = 0 ; + aspec = HDMI_4x3 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_480p60: + VIC = 2 ; + VideoPixelClock = 27000000 ; + pixelrep = 0 ; + aspec = HDMI_4x3 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_480p60_16x9: + VIC = 3 ; + VideoPixelClock = 27000000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_720p60: + VIC = 4 ; + VideoPixelClock = 74250000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_1080i60: + VIC = 5 ; + VideoPixelClock = 74250000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_480i60: + VIC = 6 ; + VideoPixelClock = 13500000 ; + pixelrep = 1 ; + aspec = HDMI_4x3 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_480i60_16x9: + VIC = 7 ; + VideoPixelClock = 13500000 ; + pixelrep = 1 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_1080p60: + VIC = 16 ; + VideoPixelClock = 148500000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_576p50: + VIC = 17 ; + VideoPixelClock = 27000000 ; + pixelrep = 0 ; + aspec = HDMI_4x3 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_576p50_16x9: + VIC = 18 ; + VideoPixelClock = 27000000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_720p50: + VIC = 19 ; + VideoPixelClock = 74250000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_1080i50: + VIC = 20 ; + VideoPixelClock = 74250000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_576i50: + VIC = 21 ; + VideoPixelClock = 13500000 ; + pixelrep = 1 ; + aspec = HDMI_4x3 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_576i50_16x9: + VIC = 22 ; + VideoPixelClock = 13500000 ; + pixelrep = 1 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU601 ; + break ; + case HDMI_1080p50: + VIC = 31 ; + VideoPixelClock = 148500000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_1080p24: + VIC = 32 ; + VideoPixelClock = 74250000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_1080p25: + VIC = 33 ; + VideoPixelClock = 74250000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_1080p30: + VIC = 34 ; + VideoPixelClock = 74250000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU709 ; + break ; + case HDMI_1080i120:// richard add + VIC = 46 ; + VideoPixelClock = 148500000 ; + pixelrep = 0 ; + aspec = HDMI_16x9 ; + Colorimetry = HDMI_ITU601 ; + break ; + + + default: + VIC = 0; // richard add + bChangeMode = FALSE ; + return ; + } + + switch(OutputColorMode) + { + case HDMI_YUV444: + bOutputColorMode = F_MODE_YUV444 ; + break ; + case HDMI_YUV422: + bOutputColorMode = F_MODE_YUV422 ; + break ; + case HDMI_RGB444: + default: + bOutputColorMode = F_MODE_RGB444 ; + break ; + } + + if( Colorimetry == HDMI_ITU709 ) + { + bInputColorMode |= F_VIDMODE_ITU709 ; + } + else + { + bInputColorMode &= ~F_VIDMODE_ITU709 ; + } + + if( (HDMI_Video_Type)Colorimetry != HDMI_640x480p60) + { + bInputColorMode |= F_VIDMODE_16_235 ; + } + else + { + bInputColorMode &= ~F_VIDMODE_16_235 ; + } + + bChangeMode = TRUE ; +} + + +void +ConfigAVIInfoFrame(BYTE VIC, BYTE pixelrep) +{ +// AVI_InfoFrame AviInfo; + + AviInfo.pktbyte.AVI_HB[0] = AVI_INFOFRAME_TYPE|0x80 ; + AviInfo.pktbyte.AVI_HB[1] = AVI_INFOFRAME_VER ; + AviInfo.pktbyte.AVI_HB[2] = AVI_INFOFRAME_LEN ; + + switch(bOutputColorMode) + { + case F_MODE_YUV444: + // AviInfo.info.ColorMode = 2 ; + AviInfo.pktbyte.AVI_DB[0] = (2<<5)|(1<<4) ; + break ; + case F_MODE_YUV422: + // AviInfo.info.ColorMode = 1 ; + AviInfo.pktbyte.AVI_DB[0] = (1<<5)|(1<<4) ; + break ; + case F_MODE_RGB444: + default: + // AviInfo.info.ColorMode = 0 ; + AviInfo.pktbyte.AVI_DB[0] = (0<<5)|(1<<4) ; + break ; + } + AviInfo.pktbyte.AVI_DB[1] = 8 ; + AviInfo.pktbyte.AVI_DB[1] |= (aspec != HDMI_16x9)?(1<<4):(2<<4) ; // 4:3 or 16:9 + AviInfo.pktbyte.AVI_DB[1] |= (Colorimetry != HDMI_ITU709)?(1<<6):(2<<6) ; // 4:3 or 16:9 + AviInfo.pktbyte.AVI_DB[2] = 0 ; + AviInfo.pktbyte.AVI_DB[3] = VIC ; + AviInfo.pktbyte.AVI_DB[4] = pixelrep & 3 ; + AviInfo.pktbyte.AVI_DB[5] = 0 ; + AviInfo.pktbyte.AVI_DB[6] = 0 ; + AviInfo.pktbyte.AVI_DB[7] = 0 ; + AviInfo.pktbyte.AVI_DB[8] = 0 ; + AviInfo.pktbyte.AVI_DB[9] = 0 ; + AviInfo.pktbyte.AVI_DB[10] = 0 ; + AviInfo.pktbyte.AVI_DB[11] = 0 ; + AviInfo.pktbyte.AVI_DB[12] = 0 ; + + EnableAVIInfoFrame(TRUE, (unsigned char *)&AviInfo) ; +} + + + +//////////////////////////////////////////////////////////////////////////////// +// Function: ConfigAudioInfoFrm +// Parameter: NumChannel, number from 1 to 8 +// Return: ER_SUCCESS for successfull. +// Remark: Evaluate. The speakerplacement is only for reference. +// For production, the caller of SetAudioInfoFrame should program +// Speaker placement by actual status. +// Side-Effect: +//////////////////////////////////////////////////////////////////////////////// + +void +ConfigAudioInfoFrm(void) +{ + int i ; + OS_PRINTF("ConfigAudioInfoFrm(%d)\n",2) ; + + AudioInfo.pktbyte.AUD_HB[0] = AUDIO_INFOFRAME_TYPE ; + AudioInfo.pktbyte.AUD_HB[1] = 1 ; + AudioInfo.pktbyte.AUD_HB[2] = AUDIO_INFOFRAME_LEN ; + AudioInfo.pktbyte.AUD_DB[0] = 1 ; + for( i = 1 ;i < AUDIO_INFOFRAME_LEN ; i++ ) + { + AudioInfo.pktbyte.AUD_DB[i] = 0 ; + } + EnableAudioInfoFrame(TRUE, (unsigned char *)&AudioInfo) ; +} + + +///////////////////////////////////////////////////////////////////// +// ParseEDID() +// Check EDID check sum and EDID 1.3 extended segment. +///////////////////////////////////////////////////////////////////// + +BOOL +ParseEDID(void) +{ + // collect the EDID ucdata of segment 0 + BYTE CheckSum ; + BYTE BlockCount ; + BOOL err ; + BOOL bValidCEA = FALSE ; + int i ; + + RxCapability.ValidCEA = FALSE ; + + // richard GetEDIDData(0, EDID_Buf); + if (!GetEDIDData(0, EDID_Buf)) + return FALSE; + + + for( i = 0, CheckSum = 0 ; i < 128 ; i++ ) + { + CheckSum += EDID_Buf[i] ; CheckSum &= 0xFF ; + } + + //Eep_Write(0x80, 0x80, EDID_Buf) ; + if( CheckSum != 0 ) // 128-byte EDID sum shall equal zero + { + return FALSE ; + } + + // check EDID Header + if( EDID_Buf[0] != 0x00 || + EDID_Buf[1] != 0xFF || + EDID_Buf[2] != 0xFF || + EDID_Buf[3] != 0xFF || + EDID_Buf[4] != 0xFF || + EDID_Buf[5] != 0xFF || + EDID_Buf[6] != 0xFF || + EDID_Buf[7] != 0x00) + { + return FALSE ; + } + + + BlockCount = EDID_Buf[0x7E] ; // Extention Flash: Number of 128-byte EDID extesion blocks to follow + + if( BlockCount == 0 ) + { + return TRUE ; // do nothing. + } + else if ( BlockCount > 4 ) + { + BlockCount = 4 ; + } + + // read all segment for test + for( i = 1 ; i <= BlockCount ; i++ ) + { + BYTE *b = EDID_Buf + 128 * i; + err = GetEDIDData(i, b) ; + + if( err ) + { + if( !bValidCEA && b[0] == 0x2 && b[1] == 0x3 ) //EDID_Buf[0] == 0x2 ==> Additional timing data type 2 + { + // richard change + //err = ParseCEAEDID(EDID_Buf) ; + err = ParseCEAEDID(b, &RxCapability); + if( err ) + { + + if(RxCapability.IEEEOUI==0x0c03) + { + RxCapability.ValidHDMI = TRUE ; + bValidCEA = TRUE ; + } + else + { + RxCapability.ValidHDMI = FALSE ; + } + + } + } + } + } + + return err?FALSE:TRUE ; // richard modify + +} + +/* richard: use the one defined edid.c +static BOOL +ParseCEAEDID(BYTE *pCEAEDID) +{ + BYTE offset,End ; + BYTE count ; + BYTE tag ; + int i ; + +// richard if( pCEAEDID[0] != 0x02 || pCEAEDID[1] != 0x03 ) return ER_SUCCESS ; // not a CEA BLOCK. + if( pCEAEDID[0] != 0x02 || pCEAEDID[1] != 0x03 ) // not a CEA BLOCK. + return FALSE; + End = pCEAEDID[2] ; // CEA description. + RxCapability.VideoMode = pCEAEDID[3] ; + + RxCapability.VDOModeCount = 0 ; + RxCapability.idxNativeVDOMode = 0xff ; + + for( offset = 4 ; offset < End ; ) + { + tag = pCEAEDID[offset] >> 5 ; + count = pCEAEDID[offset] & 0x1f ; + switch( tag ) + { + case 0x01: // Audio Data Block ; + RxCapability.AUDDesCount = count/3 ; + offset++ ; + for( i = 0 ; i < RxCapability.AUDDesCount ; i++ ) + { + RxCapability.AUDDes[i].uc[0] = pCEAEDID[offset++] ; + RxCapability.AUDDes[i].uc[1] = pCEAEDID[offset++] ; + RxCapability.AUDDes[i].uc[2] = pCEAEDID[offset++] ; + } + + break ; + + case 0x02: // Video Data Block ; + //RxCapability.VDOModeCount = 0 ; + offset ++ ; + for( i = 0,RxCapability.idxNativeVDOMode = 0xff ; i < count ; i++, offset++ ) + { + BYTE VIC ; + VIC = pCEAEDID[offset] & (~0x80) ; + // if( FindModeTableEntryByVIC(VIC) != -1 ) + { + RxCapability.VDOMode[RxCapability.VDOModeCount] = VIC ; + if( pCEAEDID[offset] & 0x80 ) + { + RxCapability.idxNativeVDOMode = (BYTE)RxCapability.VDOModeCount ; + iVideoModeSelect = RxCapability.VDOModeCount ; + } + + RxCapability.VDOModeCount++ ; + } + } + break ; + + case 0x03: // Vendor Specific Data Block ; + offset ++ ; + RxCapability.IEEEOUI = (ULONG)pCEAEDID[offset+2] ; + RxCapability.IEEEOUI <<= 8 ; + RxCapability.IEEEOUI += (ULONG)pCEAEDID[offset+1] ; + RxCapability.IEEEOUI <<= 8 ; + RxCapability.IEEEOUI += (ULONG)pCEAEDID[offset] ; + offset += count ; // ignore the remaind. + + break ; + + case 0x04: // Speaker Data Block ; + offset ++ ; + RxCapability.SpeakerAllocBlk.uc[0] = pCEAEDID[offset] ; + RxCapability.SpeakerAllocBlk.uc[1] = pCEAEDID[offset+1] ; + RxCapability.SpeakerAllocBlk.uc[2] = pCEAEDID[offset+2] ; + offset += 3 ; + break ; + case 0x05: // VESA Data Block ; + offset += count+1 ; + break ; + case 0x07: // Extended Data Block ; + offset += count+1 ; //ignore + break ; + default: + offset += count+1 ; // ignore + } + } + RxCapability.ValidCEA = TRUE ; + return TRUE ; +} +*/ + diff --git a/drivers/mcst/mga2/it6613/HDMI_TX/it6613_sys.h b/drivers/mcst/mga2/it6613/HDMI_TX/it6613_sys.h new file mode 100644 index 000000000000..c1600cd1c84e --- /dev/null +++ b/drivers/mcst/mga2/it6613/HDMI_TX/it6613_sys.h @@ -0,0 +1,63 @@ +#ifndef _CAT6611_SYS_H_ +#define _CAT6611_SYS_H_ +//////////////////////////////////////////////////////////////////////////////// +// Internal Data Type +//////////////////////////////////////////////////////////////////////////////// + +typedef enum tagHDMI_Video_Type { + HDMI_Unkown = 0 , + HDMI_640x480p60 = 1 , + HDMI_480p60, + HDMI_480p60_16x9, + HDMI_720p60, + HDMI_1080i60, + HDMI_480i60, + HDMI_480i60_16x9, + HDMI_1080p60 = 16, + HDMI_576p50, + HDMI_576p50_16x9, + HDMI_720p50 = 19, + HDMI_1080i50, + HDMI_576i50, + HDMI_576i50_16x9, + HDMI_1080p50 = 31, + HDMI_1080p24, + HDMI_1080p25, + HDMI_1080p30, + HDMI_1080i120 = 46, // richard add +} HDMI_Video_Type ; + +typedef enum tagHDMI_Aspec { + HDMI_4x3 , + HDMI_16x9 +} HDMI_Aspec; + +typedef enum tagHDMI_OutputColorMode { + HDMI_RGB444, + HDMI_YUV444, + HDMI_YUV422 +} HDMI_OutputColorMode ; + +typedef enum tagHDMI_Colorimetry { + HDMI_ITU601, + HDMI_ITU709 +} HDMI_Colorimetry ; + +/////////////////////////////////////////////////////////////////////// +// Output Mode Type +/////////////////////////////////////////////////////////////////////// + +#define RES_ASPEC_4x3 0 +#define RES_ASPEC_16x9 1 +#define F_MODE_REPT_NO 0 +#define F_MODE_REPT_TWICE 1 +#define F_MODE_REPT_QUATRO 3 +#define F_MODE_CSC_ITU601 0 +#define F_MODE_CSC_ITU709 1 + +void HDMITX_ChangeDisplayOption(HDMI_Video_Type VideoMode, HDMI_OutputColorMode OutputColorMode); +void HDMITX_SetOutput(void); + + + +#endif // _CAT6611_SYS_H_ diff --git a/drivers/mcst/mga2/it6613/mcu.h b/drivers/mcst/mga2/it6613/mcu.h new file mode 100644 index 000000000000..f68783fe0dd5 --- /dev/null +++ b/drivers/mcst/mga2/it6613/mcu.h @@ -0,0 +1,57 @@ +#ifndef _MCU_H_ +#define _MCU_H_ + +#include +#include +#include +#include + +#define printf DRM_INFO +#define usleep udelay + +typedef u8 alt_u8; + +typedef int bit; + +#ifndef NULL + #define NULL 0 +#endif + +//void ErrorF(char *fmt,...); +#define ErrorF DRM_ERROR +//void EnableDebugMessage(BOOL bEnable); +//void DelayMS(unsigned short ms); +#define DelayMS msleep +//void OS_PRINTF(char *fmt,...); +#define OS_PRINTF(fmt, ...) \ + _DRM_PRINTK(, DEBUG, fmt, ##__VA_ARGS__) +void OS_DelayMS(unsigned short ms); + +void HDMITX_Reset(void); +void HDMIRX_Reset(void); +void HDMIRX_DumpAllReg(void); +void HDMIRX_DumpReg(int RegIndex); +void HDMITX_DumpAllReg(void); +void HDMITX_DumpReg(int RegIndex); + + + +bool ReadRXIntPin(void); + +#define HDMI_TX_I2C_CLOCK HDMI_TX_I2C_SCL_BASE +#define HDMI_TX_I2C_DATA HDMI_TX_I2C_SDA_BASE +#define HDMI_RX_I2C_CLOCK HDMI_RX_I2C_SCL_BASE +#define HDMI_RX_I2C_DATA HDMI_RX_I2C_SDA_BASE + +bool HDMIRX_EEPROM0_WriteI2C_Byte(alt_u8 RegAddr,alt_u8 Data); +bool HDMIRX_EEPROM1_WriteI2C_Byte(alt_u8 RegAddr,alt_u8 Data); +bool HDMIRX_EEPROM0_ReadI2C_Byte(alt_u8 RegAddr, alt_u8 *pData); +bool HDMIRX_EEPROM1_ReadI2C_Byte(alt_u8 RegAddr, alt_u8 *pData); + +// OS Tick API +typedef unsigned int OS_TICK; +OS_TICK OS_GetTicks(void); +OS_TICK OS_TicksPerSecond(void); + + +#endif /*_MCU_H_*/ diff --git a/drivers/mcst/mga2/it6613/typedef.h b/drivers/mcst/mga2/it6613/typedef.h new file mode 100644 index 000000000000..c4ba3c732294 --- /dev/null +++ b/drivers/mcst/mga2/it6613/typedef.h @@ -0,0 +1,335 @@ +#ifndef _TYPEDEF_H_ +#define _TYPEDEF_H_ + +////////////////////////////////////////////////// +// data type +////////////////////////////////////////////////// +#ifdef _MCU_ +typedef bit BOOL ; +#define _CODE //richard code +#define _IDATA //richard idata +#define _XDATA //richard xdata +#else +typedef int BOOL ; +#define _CODE +#define _IDATA +#define _XDATA +#endif // _MCU_ + + + +typedef char CHAR,*PCHAR ; +typedef unsigned char uchar,*puchar ; +typedef unsigned char UCHAR,*PUCHAR ; +typedef unsigned char byte,*pbyte ; +typedef unsigned char BYTE,*PBYTE ; + +typedef short SHORT,*PSHORT ; +//typedef unsigned short ushort,*pushort ; +typedef unsigned short USHORT,*PUSHORT ; +typedef unsigned short word,*pword ; +typedef unsigned short WORD,*PWORD ; + +typedef long LONG,*PLONG ; +//typedef unsigned long ulong,*pulong ; +typedef unsigned long ULONG,*PULONG ; +typedef unsigned long dword,*pdword ; +typedef unsigned long DWORD,*PDWORD ; + +#undef FALSE +#undef TRUE +#define FALSE 0 +#define TRUE 1 + +#undef SUCCESS +#undef FAIL +#define SUCCESS 0 +#define FAIL -1 + +#undef ON +#undef ON +#define ON 1 +#define OFF 0 + +typedef enum _SYS_STATUS { + ER_SUCCESS = 0, + ER_FAIL, + ER_RESERVED +} SYS_STATUS ; + +//#define abs(x) (((x)>=0)?(x):(-(x))) + + +typedef enum _Video_State_Type { + VSTATE_PwrOff = 0, + VSTATE_SyncWait , + VSTATE_SWReset, + VSTATE_SyncChecking, + VSTATE_HDCPSet, + VSTATE_HDCP_Reset, + VSTATE_ModeDetecting, + VSTATE_VideoOn, + VSTATE_Reserved +} Video_State_Type ; + + +typedef enum _Audio_State_Type { + ASTATE_AudioOff = 0, + ASTATE_RequestAudio , + ASTATE_ResetAudio, + ASTATE_WaitForReady, + ASTATE_AudioOn , + ASTATE_Reserved +} Audio_State_Type ; + +typedef enum _TXVideo_State_Type { + TXVSTATE_Unplug = 0, + TXVSTATE_HPD, + TXVSTATE_WaitForMode, + TXVSTATE_WaitForVStable, + TXVSTATE_VideoInit, + TXVSTATE_VideoSetup, + TXVSTATE_VideoOn, + TXVSTATE_Reserved +} TXVideo_State_Type ; + + +typedef enum _TXAudio_State_Type { + TXASTATE_AudioOff = 0, + TXASTATE_AudioPrepare, + TXASTATE_AudioOn, + TXASTATE_AudioFIFOFail, + TXASTATE_Reserved +} TXAudio_State_Type ; + + + + +typedef enum { + PCLK_LOW = 0 , + PCLK_MEDIUM, + PCLK_HIGH +} VIDEOPCLKLEVEL ; + +/////////////////////////////////////////////////////////////////////// +// Video Data Type +/////////////////////////////////////////////////////////////////////// +#define F_MODE_RGB24 0 +#define F_MODE_RGB444 0 +#define F_MODE_YUV422 1 +#define F_MODE_YUV444 2 +#define F_MODE_CLRMOD_MASK 3 + + +#define F_MODE_INTERLACE 1 + +#define F_MODE_ITU709 (1<<4) +#define F_MODE_ITU601 0 + +#define F_MODE_0_255 0 +#define F_MODE_16_235 (1<<5) + +#define F_MODE_EN_UDFILT (1<<6) // output mode only,and loaded from EEPROM +#define F_MODE_EN_DITHER (1<<7) // output mode only,and loaded from EEPROM + +#define F_VIDMODE_ITU709 F_MODE_ITU709 // richard add +#define F_VIDMODE_16_235 F_MODE_16_235 // richard add + + +typedef union _VideoFormatCode +{ + struct _VFC + { + BYTE colorfmt:2 ; + BYTE interlace:1 ; + BYTE Colorimetry:1 ; + BYTE Quantization:1 ; + BYTE UpDownFilter:1 ; + BYTE Dither:1 ; + } VFCCode ; + unsigned char VFCByte ; +} VideoFormatCode ; + +#define T_MODE_CCIR656 (1<<0) +#define T_MODE_SYNCEMB (1<<1) +#define T_MODE_INDDR (1<<2) +#define T_MODE_PCLKDIV2 (1<<3) +#define T_MODE_DEGEN (1<<4) +#define T_MODE_SYNCGEN (1<<5) +////////////////////////////////////////////////////////////////// +// Audio relate definition and macro. +////////////////////////////////////////////////////////////////// + +// for sample clock +#define AUDFS_22p05KHz 4 +#define AUDFS_44p1KHz 0 +#define AUDFS_88p2KHz 8 +#define AUDFS_176p4KHz 12 + +#define AUDFS_24KHz 6 +#define AUDFS_48KHz 2 +#define AUDFS_96KHz 10 +#define AUDFS_192KHz 14 + +#define AUDFS_32KHz 3 +#define AUDFS_OTHER 1 + +// Audio Enable +#define ENABLE_SPDIF (1<<4) +#define ENABLE_I2S_SRC3 (1<<3) +#define ENABLE_I2S_SRC2 (1<<2) +#define ENABLE_I2S_SRC1 (1<<1) +#define ENABLE_I2S_SRC0 (1<<0) + +#define AUD_SWL_NOINDICATE 0x0 +#define AUD_SWL_16 0x2 +#define AUD_SWL_17 0xC +#define AUD_SWL_18 0x4 +#define AUD_SWL_20 0xA // for maximum 20 bit +#define AUD_SWL_21 0xD +#define AUD_SWL_22 0x5 +#define AUD_SWL_23 0x9 +#define AUD_SWL_24 0xB + + +///////////////////////////////////////////////////////////////////// +// Packet and Info Frame definition and datastructure. +///////////////////////////////////////////////////////////////////// + +#define VENDORSPEC_INFOFRAME_TYPE 0x01 +#define AVI_INFOFRAME_TYPE 0x02 +#define SPD_INFOFRAME_TYPE 0x03 +#define AUDIO_INFOFRAME_TYPE 0x04 +#define MPEG_INFOFRAME_TYPE 0x05 + +#define VENDORSPEC_INFOFRAME_VER 0x01 +#define AVI_INFOFRAME_VER 0x02 +#define SPD_INFOFRAME_VER 0x01 +#define AUDIO_INFOFRAME_VER 0x01 +#define MPEG_INFOFRAME_VER 0x01 + +#define VENDORSPEC_INFOFRAME_LEN 8 +#define AVI_INFOFRAME_LEN 13 +#define SPD_INFOFRAME_LEN 25 +#define AUDIO_INFOFRAME_LEN 10 +#define MPEG_INFOFRAME_LEN 10 + +#define ACP_PKT_LEN 9 +#define ISRC1_PKT_LEN 16 +#define ISRC2_PKT_LEN 16 + +typedef union _AVI_InfoFrame +{ + struct { + BYTE Type ; + BYTE Ver ; + BYTE Len ; + + BYTE Scan:2 ; + BYTE BarInfo:2 ; + BYTE ActiveFmtInfoPresent:1 ; + BYTE ColorMode:2 ; + BYTE FU1:1 ; + + BYTE ActiveFormatAspectRatio:4 ; + BYTE PictureAspectRatio:2 ; + BYTE Colorimetry:2 ; + + BYTE Scaling:2 ; + BYTE FU2:6 ; + + BYTE VIC:7 ; + BYTE FU3:1 ; + + BYTE PixelRepetition:4 ; + BYTE FU4:4 ; + + SHORT Ln_End_Top ; + SHORT Ln_Start_Bottom ; + SHORT Pix_End_Left ; + SHORT Pix_Start_Right ; + } info ; + struct { + BYTE AVI_HB[3] ; + BYTE AVI_DB[AVI_INFOFRAME_LEN] ; + } pktbyte ; +} AVI_InfoFrame ; + +typedef union _Audio_InfoFrame { + + struct { + BYTE Type ; + BYTE Ver ; + BYTE Len ; + + BYTE AudioChannelCount:3 ; + BYTE RSVD1:1 ; + BYTE AudioCodingType:4 ; + + BYTE SampleSize:2 ; + BYTE SampleFreq:3 ; + BYTE Rsvd2:3 ; + + BYTE FmtCoding ; + + BYTE SpeakerPlacement ; + + BYTE Rsvd3:3 ; + BYTE LevelShiftValue:4 ; + BYTE DM_INH:1 ; + } info ; + + struct { + BYTE AUD_HB[3] ; + BYTE AUD_DB[AUDIO_INFOFRAME_LEN] ; + } pktbyte ; + +} Audio_InfoFrame ; + +typedef union _MPEG_InfoFrame { + struct { + BYTE Type ; + BYTE Ver ; + BYTE Len ; + + ULONG MpegBitRate ; + + BYTE MpegFrame:2 ; + BYTE Rvsd1:2 ; + BYTE FieldRepeat:1 ; + BYTE Rvsd2:3 ; + } info ; + struct { + BYTE MPG_HB[3] ; + BYTE MPG_DB[MPEG_INFOFRAME_LEN] ; + } pktbyte ; +} MPEG_InfoFrame ; + +// Source Product Description +typedef union _SPD_InfoFrame { + struct { + BYTE Type ; + BYTE Ver ; + BYTE Len ; + + char VN[8] ; // vendor name character in 7bit ascii characters + char PD[16] ; // product description character in 7bit ascii characters + BYTE SourceDeviceInfomation ; + } info ; + struct { + BYTE SPD_HB[3] ; + BYTE SPD_DB[SPD_INFOFRAME_LEN] ; + } pktbyte ; +} SPD_InfoFrame ; + +/////////////////////////////////////////////////////////////////////////// +// Using for interface. +/////////////////////////////////////////////////////////////////////////// +struct VideoTiming { + ULONG VideoPixelClock ; + BYTE VIC ; + BYTE pixelrep ; + BYTE outputVideoMode ; +} ; + +#endif // _TYPEDEF_H_ diff --git a/drivers/mcst/mga2/mga2_auc2.c b/drivers/mcst/mga2/mga2_auc2.c new file mode 100644 index 000000000000..9d8fe5145f74 --- /dev/null +++ b/drivers/mcst/mga2/mga2_auc2.c @@ -0,0 +1,310 @@ +/* AUC2 registers */ +#define MGA2_AUC2_QUEUEPTRL (0x04000 + 0x00) +#define MGA2_AUC2_QUEUEPTRH (0x04000 + 0x04) +#define MGA2_AUC2_STATUSPTRL (0x04000 + 0x08) +#define MGA2_AUC2_STATUSPTRH (0x04000 + 0x0c) +#define MGA2_AUC2_CTRLSTAT (0x04000 + 0x10) +# define MGA2_AUC2_B_BUSY (1 << 30) +# define MGA2_AUC2_B_ABORT (1 << 31) +#define MGA2_AUC2_HEADTAIL (0x04000 + 0x14) +#define MGA2_AUC2_DUMMY (0x04000 + 0x18) + + +#define MGA2_AUC2_B_TAIL_SHIFT 16 +#define MGA2_AUC2_B_HEAD_MASK 0xffff + +#define DESC0_NOT_LAST (1ULL << 63) +#define DESC0_TYPE (0ULL << 62) +#define DESC0_WAIT_VBLANK2 (1ULL << 54) +#define DESC0_WAIT_VBLANK1 (1ULL << 53) +#define DESC0_WAIT_VBLANK0 (1ULL << 52) +#define DESC0_WAIT_BLITER_STOP1 (1ULL << 51) +#define DESC0_WAIT_BLITER_READY1 (1ULL << 50) +#define DESC0_WAIT_BLITER_STOP0 (1ULL << 49) +#define DESC0_WAIT_BLITER_READY0 (1ULL << 48) +#define DESC0_REG_OFFSET 32 + +struct auc2_st { + u64 status; +} __packed; + +struct desc0 { + u64 next; + u64 val; + u64 val2; +} __packed; + +#define DESC1_NOT_LAST (1UL << 63) +#define DESC1_TYPE (1UL << 62) +#define DESC1_BLITTER_OFFSET 54 +#define DESC1_WAIT_ENABLE (1UL << 52) +#define DESC1_WAIT_OFFSET 48 +#define DESC1_REG_MASK_OFFSET 32 + +struct desc1 { + u64 next; + union { + struct { + u32 mask; + u32 regs[15]; + }; + u32 val32[16]; + u64 val64[8]; + }; +} __packed; + +static u64 auc2_get_current_desc(struct mga2 *mga2) +{ + return le64_to_cpu(READ_ONCE(mga2->status->status)) >> 32; +} + +static int blitter_reg_nr(u32 reg) +{ + return (reg / 4) % 16; +} + +static void mga25_wdesc(struct mga2 *mga2, u32 data, u32 reg) +{ + struct desc1 *c = &mga2->desc1[mga2->head]; + + reg = blitter_reg_nr(reg); + + c->mask |= 1 << reg; + c->regs[reg] = data; +} + +static void auc2_update_ptr(struct mga2 *mga2) +{ + u16 tail = le64_to_cpu(mga2->status->status); + mga2->tail = tail % MGA2_RING_SIZE; +} + +static void mga25_desc1_serialize(struct desc1 *c) +{ + int i, k = 0, l = 0; + u64 v[2]; + long mask = (c->mask << 1) | 1; + c->mask |= (((1UL << DESC1_BLITTER_OFFSET) | DESC1_TYPE) >> 32); + + for_each_set_bit(i, &mask, ARRAY_SIZE(c->val32)) { + v[l++] = c->val32[i]; + if (l == 2) + c->val64[k++] = cpu_to_le64((v[0] << 32) | v[1]); + l %= 2; + } + if (l) + c->val64[k] = cpu_to_le64(v[0] << 32); +} + +static int mga25_append_desc(struct mga2 *mga2, struct mga2_gem_object *mo) +{ + int ret = 0; + int h = mga2->head; + struct desc1 *c = mo ? (struct desc1 *)mo->vaddr : &mga2->desc1[h]; + dma_addr_t dma_addr = mo ? mo->dma_addr : + cpu_to_le64(mga2->desc1_dma + h * sizeof(*c)); + struct desc0 *d = &mga2->desc0[h]; + u64 v; + + if (!mo) + mga25_desc1_serialize(c); + + c->next = cpu_to_le64(mga2->desc0_dma + h * sizeof(*d)); + + memset(d, 0, sizeof(*d)); + v = DESC0_WAIT_BLITER_STOP0 | DESC0_NOT_LAST | + ((((u64)MGA2_AUC2_DUMMY) / 4) << DESC0_REG_OFFSET); + d->val = cpu_to_le64(v); + v = DESC0_WAIT_BLITER_STOP1 | + ((((u64)mga2->info->int_regs_base + MGA2_INTREQ) / 4) + << DESC0_REG_OFFSET) | + MGA2_INT_B_SETRST | MGA25_INT_B_SOFTINT; + d->val2 = cpu_to_le64(v); + + mga2->ring[h] = dma_addr; + mga2->head = circ_inc(h); + mga2->fence_seqno++; + wfb(mga2->head, MGA2_AUC2_HEADTAIL); + + return ret; +} + +static struct mga2_gem_object *mga2_auc_ioctl(struct drm_device *drm, + void *data, struct drm_file *file) +{ + int ret = 0; + void __user *p; + int head; + u32 *desc, nr, handle, reltype; + struct mga2_gem_object *mo; + struct dma_resv *resv; + struct dma_fence *fence; + struct drm_mga2_bctrl *udesc = data; + struct mga2 *mga2 = drm->dev_private; + struct drm_mga2_buffers __user *b = (void *)((long)udesc->buffers_ptr); + struct drm_gem_object *gobj = NULL; + + head = get_free_desc(mga2); + if (head < 0) { + ret = -ENOSPC; + goto out; + } + if (!(gobj = drm_gem_object_lookup(file, udesc->desc_handle))) { + ret = -ENOENT; + goto out; + } + /* drop reference from lookup - + fence is used for reference control */ + drm_gem_object_put_unlocked(gobj); + + mo = to_mga2_obj(gobj); + if (mo->write_domain != MGA2_GEM_DOMAIN_CPU) { + ret = -EINVAL; + goto out; + } + + desc = mo->vaddr; + fence = &mga2->mga2_fence[head]; + + for (p = b; !ret;) { + struct drm_gem_object *o; + unsigned long a = -1; + int i; + if (get_user(nr, &b->nr) || + get_user(reltype, &b->reltype) || + get_user(handle, &b->handle)) { + ret = -EFAULT; + goto out; + } + if (nr == 0) + break; + + if (!(o = drm_gem_object_lookup(file, handle))) { + ret = -ENOENT; + goto out; + } + /* drop reference from lookup - + fence is used for reference control */ + drm_gem_object_put_unlocked(o); + a = to_mga2_obj(o)->dma_addr; + for (i = 0; i < nr; i++) { + u32 offset; + if (get_user(offset, &b->offset[i])) { + ret = -EFAULT; + goto out; + } + offset /= sizeof(*desc); + if (desc[offset] >= o->size) { + ret = -EINVAL; + goto out; + } + desc[offset] += reltype ? a >> 32 : a; + } + resv = &to_mga2_obj(o)->resv; + dma_resv_lock(resv, NULL); + if ((ret = dma_resv_reserve_shared(resv, 1)) == 0) + dma_resv_add_shared_fence(resv, fence); + dma_resv_unlock(resv); + + if (0) + DRM_DEBUG("add fence %lld to %lx\n", fence->seqno, a); + + p += sizeof(*b) + nr * sizeof(u32); + b = (struct drm_mga2_buffers __user *)p; + } + + resv = &mo->resv; + dma_resv_lock(resv, NULL); + if ((ret = dma_resv_reserve_shared(resv, 1)) == 0) + dma_resv_add_shared_fence(resv, fence); + dma_resv_unlock(resv); +out: + return ret ? ERR_PTR(ret) : mo; +} + +int mga2_auc2_ioctl(struct drm_device *drm, void *data, struct drm_file *file) +{ + struct mga2 *mga2 = drm->dev_private; + struct mga2_gem_object *mo; + int ret = 0; + if (mga2_p2(mga2)) + return -ENODEV; + if (mga2->flags & MGA2_BCTRL_OFF) + return -ENODEV; + + mutex_lock(&mga2->bctrl_mu); + + mo = mga2_auc_ioctl(drm, data, file); + if (IS_ERR(mo)) { + ret = PTR_ERR(mo); + goto out; + } + append_desc(mga2, mo); +out: + mutex_unlock(&mga2->bctrl_mu); + return ret; +} + +static int mga2fb_auc2_fini(struct mga2 *mga2) +{ + int sz; + struct device *dev = mga2->drm->dev; + + sz = MGA2_RING_SIZE * sizeof(*mga2->ring); + dma_free_coherent(dev, sz, mga2->ring, mga2->ring_dma); + + sz = MGA2_RING_SIZE * sizeof(*mga2->desc1); + dma_free_coherent(dev, sz, mga2->desc1, mga2->desc1_dma); + + sz = MGA2_RING_SIZE * sizeof(*mga2->desc0); + dma_free_coherent(dev, sz, mga2->desc0, mga2->desc0_dma); + + sz = sizeof(*mga2->status); + dma_free_coherent(dev, sz, mga2->status, mga2->status_dma); + return 0; +} + +static int mga2fb_auc2_hw_init(struct mga2 *mga2) +{ + wfb(mga2->ring_dma, MGA2_AUC2_QUEUEPTRL); + wfb((u64)mga2->ring_dma >> 32, MGA2_AUC2_QUEUEPTRH); + wfb(mga2->status_dma, MGA2_AUC2_STATUSPTRL); + wfb((u64)mga2->status_dma >> 32, MGA2_AUC2_STATUSPTRH); + wfb(MGA2_RING_SIZE, MGA2_AUC2_CTRLSTAT); + return 0; +} + +static int mga2fb_auc2_init(struct mga2 *mga2) +{ + dma_addr_t addr; + int sz = MGA2_RING_SIZE * sizeof(*mga2->ring); + void *b; + if (!(b = dma_alloc_coherent(mga2->drm->dev, sz, &addr, GFP_KERNEL))) + goto err; + + mga2->ring = b; + mga2->ring_dma = addr; + + sz = MGA2_RING_SIZE * sizeof(*mga2->desc1); + if (!(b = dma_alloc_coherent(mga2->drm->dev, sz, &addr, GFP_KERNEL))) + goto err; + mga2->desc1 = b; + mga2->desc1_dma = addr; + + sz = MGA2_RING_SIZE * sizeof(*mga2->desc0); + if (!(b = dma_alloc_coherent(mga2->drm->dev, sz, &addr, GFP_KERNEL))) + goto err; + mga2->desc0 = b; + mga2->desc0_dma = addr; + + sz = sizeof(*mga2->status); + if (!(b = dma_alloc_coherent(mga2->drm->dev, sz, &addr, GFP_KERNEL))) + goto err; + mga2->status = b; + mga2->status_dma = addr; + + return 0; +err: + mga2fb_auc2_fini(mga2); + return -ENOMEM; +} diff --git a/drivers/mcst/mga2/mga2_bctrl.c b/drivers/mcst/mga2/mga2_bctrl.c new file mode 100644 index 000000000000..161974fe43f4 --- /dev/null +++ b/drivers/mcst/mga2/mga2_bctrl.c @@ -0,0 +1,268 @@ +#define MGA2_BCTRL_LBASEPTR 0x01800 /* ( 32- ) . */ +#define MGA2_BCTRL_HBASEPTR 0x01804 /* (R/W) */ +#define MGA2_BCTRL_START 0x01808 /* */ +# define MGA2_BCTRL_B_START (1 << 0) +#define MGA2_BCTRL_CURLPTR 0x0180C /* (R/O) . */ +#define MGA2_BCTRL_STATUS 0x01810 /* (R/O) */ +# define MGA2_BCTRL_B_BUSY (1 << 0) + +#define MGA2_BCTRL_DUMMY 0x01814 /* - */ + +#define MGA2_BCTRL_TAIL (0x800 + MGA2_DC0_VGAWINOFFS) /* use it as tail pointer */ + +#define MGA2_VIDMUX_BITS 0x03404 +# define MGA2_VIDMUX_BLT_WR_BUSY (1 << 5) +# define MGA2_SYSMUX_BLT_WR_BUSY (1 << 1) + +#define MGA2_SYSMUX_BITS 0x03804 +#define MGA2_SYSMUX_BLT_WR_BUSY (1 << 1) + + +#define MGA25_SYSMUX_BITS 0x03004 +#define MGA25_FBMUX_BITS 0x03404 +#define MGA25_VMMUX_BITS 0x03804 + +#define CIRC_SIZE MGA2_RING_SIZE +#define CIRC_MASK (CIRC_SIZE - 1) +#define circ_idle(circ) ((circ)->head == (circ)->tail) +#define __circ_space(head, tail) CIRC_SPACE(head, tail, CIRC_SIZE) +#define circ_space(circ) __circ_space((circ)->head, (circ)->tail) +#define circ_cnt(circ) CIRC_CNT((circ)->head, (circ)->tail, CIRC_SIZE) +#define circ_clear(circ) ((circ)->tail = (circ)->head) +#define circ_add(__v, __i) (((__v) + (__i)) & CIRC_MASK) +#define circ_inc(__v) circ_add(__v, 1) +#define circ_dec(__v) circ_add(__v, -1) + + +struct bctrl_base { + u64 current_desc; + u32 status; + u32 reserved; +} __packed; + +#define BCTRL_CMD_NR 16 + +struct bctrl_desc { + u32 next_lo; + u32 next_hi; + struct bctrl_cmd { + u16 ctrl; + u16 reg; + u32 data; + } cmd[BCTRL_CMD_NR] __packed; +} __packed; + +/* ctrl bitmasks */ +#define MGA2_BCTRL_LST_CMD (1 << 15) +#define MGA2_BCTRL_WAIT_ROP3 (1 << 3) +#define MGA2_BCTRL_WAIT_ROP2 (1 << 1) + + +struct bctrl { + struct bctrl_base base; + struct bctrl_desc desc[MGA2_RING_SIZE]; + struct bctrl_desc fence[MGA2_RING_SIZE]; +} __packed; + +static u64 bctrl_get_current_desc(struct mga2 *mga2) +{ + struct bctrl_base *base = &mga2->bctrl->base; + return le64_to_cpu(READ_ONCE(base->current_desc)); +} + +static dma_addr_t idx_to_addr(struct mga2 *mga2, int i) +{ + dma_addr_t base = mga2->bctrl_dma + offsetof(struct bctrl, desc); + return base + i * sizeof(struct bctrl_desc); +} + +static dma_addr_t idx_to_fence_addr(struct mga2 *mga2, int i) +{ + dma_addr_t base = mga2->bctrl_dma + offsetof(struct bctrl, fence); + return base + i * sizeof(struct bctrl_desc); +} + +static inline void write_desc(void *p, dma_addr_t addr) +{ + volatile dma_addr_t *d = p; + if (sizeof(dma_addr_t) == 32) + *d = cpu_to_le32(addr); + else + *d = cpu_to_le64(addr); +} + +static inline dma_addr_t read_desc(void *p) +{ + dma_addr_t ret; + volatile dma_addr_t *d = p; + if (sizeof(dma_addr_t) == 32) + ret = cpu_to_le32(*d); + else + ret = le64_to_cpu(*d); + return ret; +} + +static void bctrl_update_ptr(struct mga2 *mga2) +{ + mga2->tail = rfb(MGA2_BCTRL_TAIL) >> 16; +} + +static void __mga2_wdesc(struct bctrl_cmd *c, u32 data, u32 reg, + bool first, bool last) +{ + u16 v = 0; + if (last) + v |= MGA2_BCTRL_LST_CMD; + if (first) + v |= MGA2_BCTRL_WAIT_ROP2; + c->ctrl = cpu_to_le16(v); + c->data = cpu_to_le32(data); + c->reg = cpu_to_le16(reg / 4); +} + +static void mga2_wdesc(struct mga2 *mga2, int n, u32 data, u32 reg, + bool first, bool last) +{ + struct bctrl_cmd *c = &mga2->bctrl->desc[mga2->head].cmd[n]; + __mga2_wdesc(c, data, reg, first, last); +} + +static void __fence_wdesc(struct mga2 *mga2, int n, u32 data, u32 reg, + bool first, bool last) +{ + struct bctrl_cmd *c = &mga2->bctrl->fence[mga2->head].cmd[n]; + __mga2_wdesc(c, data, reg, first, last); +} + +#define fence_wdesc(__cmd, __data, __reg, __first, __last) \ + __fence_wdesc(mga2, __cmd, __data, __reg, __first, __last); + +static int mga2_append_desc(struct mga2 *mga2, struct mga2_gem_object *mo) +{ + long ret = 0, timeout = msecs_to_jiffies(mga2_timeout(mga2)); + int h = mga2->head, l = circ_dec(h); + struct bctrl_base *base = &mga2->bctrl->base; + struct bctrl_desc *desc = mo ? mo->vaddr : &mga2->bctrl->desc[h]; + dma_addr_t addr = mo ? mo->dma_addr : idx_to_addr(mga2, h); + struct bctrl_desc *fence = &mga2->bctrl->fence[h]; + dma_addr_t fence_addr = idx_to_fence_addr(mga2, h); + dma_addr_t last_addr = idx_to_fence_addr(mga2, l), current_desc; + struct bctrl_desc *last_fence = &mga2->bctrl->fence[l]; + struct dma_fence *dfence = &mga2->mga2_fence[l]; + u32 status; + u32 v = mga25(mga2) ? MGA25_INT_B_SOFTINT : MGA2_INT_B_SOFTINT; + + fence_wdesc(0, circ_inc(h) << 16, MGA2_BCTRL_TAIL, 1, 0); + fence_wdesc(1, MGA2_INT_B_SETRST | v, + mga2->info->int_regs_base + MGA2_INTREQ, 0, 1); + + write_desc(fence, 0); + write_desc(desc, fence_addr); + wmb(); /* descriptor is written */ + write_desc(last_fence, addr); /* link the descriptor */ + + mga2->head = circ_inc(h); + mga2->fence_seqno++; + + /* mga2 writes desc first, then the status */ + status = le32_to_cpu(READ_ONCE(base->status)); + current_desc = le64_to_cpu(READ_ONCE(base->current_desc)); + + if (!(status & MGA2_BCTRL_B_BUSY)) + goto uptdate_ptr; + else if (current_desc && current_desc != last_addr) + goto out; + /* Now we don't know if mga2 has read the desc. + Let's wait for previous one and see */ + ret = dma_fence_wait_timeout(dfence, true, timeout); + if (ret == 0) { + ret = -ETIMEDOUT; + mga2->flags |= MGA2_BCTRL_OFF; + dma_fence_signal(dfence); + DRM_ERROR("fence %d wait timed out.\n", l); + } else if (ret < 0) { + DRM_DEBUG("fence %d wait failed (%ld).\n", l, ret); + } else { + ret = 0; + } +uptdate_ptr: + mga2_update_ptr(mga2); + if (circ_idle(mga2)) + goto out; + write_desc(base, addr); + wfb(MGA2_BCTRL_B_START, MGA2_BCTRL_START); +out: + return ret; +} + +#ifdef CONFIG_DEBUG_FS +int mga2_debugfs_bctrl(struct seq_file *s, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct drm_device *drm = node->minor->dev; + struct mga2 *mga2 = drm->dev_private; + seq_printf(s, "head: %x, tail: %x (%x)\n", + mga2->tail, mga2->head, rfb(MGA2_BCTRL_TAIL)); + + seq_hex_dump(s, "", DUMP_PREFIX_OFFSET, 32, 4, + mga2->bctrl, sizeof(*mga2->bctrl), false); + + return 0; +} +#endif + +int __mga2fb_bctrl_hw_init(struct mga2 *mga2) +{ + u64 addr = mga2->bctrl_dma; + wfb(mga2->tail << 16, MGA2_BCTRL_TAIL); + wfb(addr, MGA2_BCTRL_LBASEPTR); + wfb(addr >> 32, MGA2_BCTRL_HBASEPTR); + return 0; +} + +static int __mga2fb_bctrl_init(struct mga2 *mga2) +{ + mga2->bctrl = dma_alloc_coherent(mga2->drm->dev, sizeof(*mga2->bctrl), + &mga2->bctrl_dma, GFP_KERNEL); + if (!mga2->bctrl) + return -ENOMEM; + return 0; +} + +static int __mga2fb_bctrl_fini(struct mga2 *mga2) +{ + BUILD_BUG_ON(BCTRL_CMD_NR >= (1 << 16)); + dma_free_coherent(mga2->drm->dev, sizeof(*mga2->bctrl), + mga2->bctrl, mga2->bctrl_dma); + return 0; +} + +int mga2_bctrl_ioctl(struct drm_device *drm, void *data, struct drm_file *file) +{ + struct mga2 *mga2 = drm->dev_private; + struct mga2_gem_object *mo; + int ret = 0; + if (mga2->flags & MGA2_BCTRL_OFF) + return -ENODEV; + + mutex_lock(&mga2->bctrl_mu); + if ((ret = __mga2_sync(mga2))) + goto out; + + if (mga25(mga2) && !mga2->bctrl_active) { + mga2_update_ptr(mga2); + wfb(mga2->tail << 16, MGA2_BCTRL_TAIL); + mga2->bctrl_active = true; + } + + mo = mga2_auc_ioctl(drm, data, file); + if (IS_ERR(mo)) { + ret = PTR_ERR(mo); + goto out; + } + + mga2_append_desc(mga2, mo); +out: + mutex_unlock(&mga2->bctrl_mu); + return ret; +} \ No newline at end of file diff --git a/drivers/mcst/mga2/mga2_drv.c b/drivers/mcst/mga2/mga2_drv.c new file mode 100644 index 000000000000..67ec508af105 --- /dev/null +++ b/drivers/mcst/mga2/mga2_drv.c @@ -0,0 +1,267 @@ +#include +#include + +#include + +#include "mga2_drv.h" + +#define DRIVER_AUTHOR "MCST" + +#define DRIVER_NAME "mga2" +#define DRIVER_DESC "DRM driver for MCST MGA2" +#define DRIVER_DATE "20210728" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 0 + +static struct drm_driver driver; + +static struct mga2_info mga26_info = { + .regs_bar = 2, + .dc_regs_base = 0x400, + .int_regs_base = 0x1c00, + .vid_regs_base = 0x02000, + .mga2_crts_nr = 3, +}; + +static struct mga2_info mga25_info = { + .regs_bar = 0, + .vram_bar = 2, + .dc_regs_base = 0x400, + .int_regs_base = 0x1c00, + .vid_regs_base = 0x02000, + .mga2_crts_nr = 3, +}; + +static struct mga2_info mga2_info = { + .regs_bar = 2, + .vram_bar = 0, + .dc_regs_base = 0x800, + .int_regs_base = 0x02000, + .vid_regs_base = 0x02400, + .mga2_crts_nr = 2, +}; + +static const struct pci_device_id pciidlist[] = { + { PCI_VDEVICE(MCST_TMP, PCI_DEVICE_ID_MCST_MGA26), + (kernel_ulong_t)&mga26_info }, + { PCI_VDEVICE(MCST_TMP, PCI_DEVICE_ID_MCST_MGA25), + (kernel_ulong_t)&mga25_info }, + { PCI_VDEVICE(MCST_TMP, PCI_DEVICE_ID_MCST_MGA2), + (kernel_ulong_t)&mga2_info }, + {}, +}; + +MODULE_DEVICE_TABLE(pci, pciidlist); + +static int +mga2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_pci_dev(pdev, ent, &driver); +} + +static void mga2_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); +} + +/* + * Userspace get information ioctl + */ +/** + * mga2_info_ioctl - answer a device specific request. + * + * @mga2: amdgpu device pointer + * @data: request object + * @filp: drm filp + * + * This function is used to pass device specific parameters to the userspace + * drivers. Examples include: pci device id, pipeline parms, tiling params, + * etc. (all asics). + * Returns 0 on success, -EINVAL on failure. + */ +static int mga2_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +{ + struct mga2 *mga2 = dev->dev_private; + struct drm_mga2_info *info = data; + void __user *out = (void __user *)(uintptr_t)info->return_pointer; + uint32_t size = info->return_size; + + if (!info->return_size || !info->return_pointer) + return -EINVAL; + + switch (info->query) { + case MGA2_INFO_MEMORY: { + const struct drm_mm *mm = &mga2->vram_mm; + struct drm_mga2_memory_info mem = {}; + const struct drm_mm_node *entry = NULL; + u64 total_used = 0, total_free = 0, total = 0; + + total_free += mm->head_node.hole_size; + + drm_mm_for_each_node(entry, mm) { + total_used += entry->size; + total_free += entry->hole_size; + } + total = total_free + total_used; + mem.vram.total_heap_size = total; + mem.vram.usable_heap_size = total; + mem.vram.heap_usage = total_used; + mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; + + memcpy(&mem.cpu_accessible_vram, &mem.vram, sizeof(mem.vram)); + return copy_to_user(out, &mem, + min((size_t)size, sizeof(mem))) + ? -EFAULT : 0; + } + default: + DRM_DEBUG_KMS("Invalid request %d\n", info->query); + return -EINVAL; + } + return 0; +} + + +struct drm_ioctl_desc mga2_ioctls[] = { + DRM_IOCTL_DEF_DRV(MGA2_BCTRL, mga2_bctrl_ioctl, DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(MGA2_GEM_CREATE, mga2_gem_create_ioctl, DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(MGA2_GEM_MMAP, mga2_gem_mmap_ioctl, DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(MGA2_SYNC, mga2_gem_sync_ioctl, DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(MGA2_INFO, mga2_info_ioctl, DRM_AUTH | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(MGA2_AUC2, mga2_auc2_ioctl, DRM_AUTH | DRM_UNLOCKED), +}; + +#ifdef CONFIG_PM_SLEEP +static int mga2_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm = pci_get_drvdata(pdev); + int ret = drm_mode_config_helper_suspend(drm); + if (ret) + return ret; + mga2_reset(drm); + return 0; +} + +static int mga2_resume(struct device *dev) +{ + int ret; + struct drm_crtc *crtc; + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm = pci_get_drvdata(pdev); + struct mga2 *mga2 = drm->dev_private; + + if (pci_enable_device(pdev)) + return -EIO; + + mga2_reset(drm); + pci_set_master(pdev); + + if ((ret = mga2fb_bctrl_init(mga2))) + goto out; + + drm_for_each_crtc(crtc, drm) + mga2_crtc_hw_init(crtc); + + if ((ret = mga2_mode_init_hw(drm))) + goto out; + /*enable irqs*/ + mga2_driver_irq_postinstall(drm); + + ret = drm_mode_config_helper_resume(drm); +out: + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(mga2_pm_ops, mga2_suspend, mga2_resume); + +static void mga2_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *drm = pci_get_drvdata(pdev); + /* prevent dma during reboot & kexec */ + mga2_reset(drm); +} + +static struct pci_driver mga2_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = mga2_pci_probe, + .remove = mga2_pci_remove, + .shutdown = mga2_pci_shutdown, + .driver.pm = &mga2_pm_ops, +}; + +static const struct file_operations mga2_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .mmap = mga2_mmap, + .poll = drm_poll, + .read = drm_read, +}; + +const struct vm_operations_struct mga2_gem_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static struct drm_driver driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + + .dev_priv_size = 0, + + .load = mga2_driver_load, + .unload = mga2_driver_unload, + .lastclose = mga2_lastclose, + + .fops = &mga2_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + .gem_free_object_unlocked = mga2_gem_free_object, + .gem_vm_ops = &mga2_gem_vm_ops, + + .dumb_create = mga2_dumb_create, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = mga2_prime_get_sg_table, + .gem_prime_import_sg_table = mga2_prime_import_sg_table, + .gem_prime_vmap = mga2_prime_vmap, + .gem_prime_vunmap = mga2_prime_vunmap, + .gem_prime_mmap = mga2_prime_mmap, + + .get_vblank_counter = mga2_vblank_count, + .enable_vblank = mga2_enable_vblank, + .disable_vblank = mga2_disable_vblank, + .irq_preinstall = mga2_driver_irq_preinstall, + .irq_uninstall = mga2_driver_irq_uninstall, + .irq_handler = mga2_driver_irq_handler, + + .ioctls = mga2_ioctls, + .num_ioctls = DRM_ARRAY_SIZE(mga2_ioctls), + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = mga2_debugfs_init, +#endif +}; + +module_pci_driver(mga2_pci_driver); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/mcst/mga2/mga2_drv.h b/drivers/mcst/mga2/mga2_drv.h new file mode 100644 index 000000000000..e24c6d7ecbf5 --- /dev/null +++ b/drivers/mcst/mga2/mga2_drv.h @@ -0,0 +1,386 @@ +#ifndef __MGA2_DRV_H__ +#define __MGA2_DRV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "mga2_regs.h" + +#define MGA2_MAX_CRTS_NR 3 +#define MGA2_ENCODER_NR 1 +#define MGA2_HDMI_NR 2 + +#define MGA2_RING_SIZE 256 + +struct mga2_fbdev; +struct auc2_st; +struct bctrl; + +enum { + MGA2_DVI, + MGA2_HDMI1, + MGA2_HDMI2, + MGA2_LVDS, + + MGA2_CONNECTOR_NR +}; + +struct mga2_info { + int regs_bar; + int vram_bar; + int dc_regs_base; + int int_regs_base; + int vid_regs_base; + int mga2_crts_nr; +}; + +struct mga2 { + struct drm_device *drm; + void __iomem *regs; + resource_size_t regs_phys; + struct mga2_fbdev *fbdev; +#define MGA2_BCTRL_OFF (1 << 31) + int flags; + int used_lvds_channels; + u16 subdevice; + struct mga2_info *info; + + unsigned long base_freq; + unsigned long vram_paddr; + struct drm_mm vram_mm; + struct mutex vram_mu; + + struct mutex bctrl_mu; + struct bctrl *bctrl; + bool bctrl_active; + dma_addr_t bctrl_dma; + int head, tail; + + u64 *ring; + dma_addr_t ring_dma; + struct auc2_st *status; + dma_addr_t status_dma; + u64 *desc; + dma_addr_t desc_dma; + struct desc1 *desc1; + dma_addr_t desc1_dma; + struct desc0 *desc0; + dma_addr_t desc0_dma; + struct dma_fence mga2_fence[MGA2_RING_SIZE]; + spinlock_t fence_lock; + unsigned fence_seqno; + + atomic_t ring_int; + + struct i2c_adapter *dvi_i2c; + struct platform_device *mga2_hdmi_device[MGA2_HDMI_NR]; + struct i2c_adapter *hdmi_ddc[MGA2_HDMI_NR]; + + /* page-flip handling */ + struct drm_pending_vblank_event *event[MGA2_MAX_CRTS_NR]; + + struct msix_entry msix_entries[1]; +}; + +struct mga2_fbdev { + struct drm_fb_helper helper; + struct list_head fbdev_list; + dma_addr_t pixmap_dma; +}; + +int mga2_driver_load(struct drm_device *dev, unsigned long flags); +void mga2_driver_unload(struct drm_device *dev); +void mga2_lastclose(struct drm_device *dev); +void mga2_reset(struct drm_device *dev); + +struct mga2_connector { + struct drm_connector base; + struct i2c_adapter *ddci2c; + void __iomem *regs; +}; + +struct mga2_crtc { + struct drm_crtc base; + int index; + struct drm_gem_object *cursor_bo; + uint64_t cursor_offset; + void __iomem *cursor_addr; + void __iomem *regs; + int pll; + + struct i2c_adapter *i2c; + struct drm_pending_vblank_event *event; + struct drm_flip_work fb_unref_work; + unsigned long pending; +#define MGA2_PENDING_FB_UNREF 1 +#define MGA2_PENDING_FB_UNREF_DISABLE 2 + +}; + +struct mga2_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *gobj; +}; + +struct mga2_gem_object { + struct drm_gem_object base; + struct drm_mm_node node; + + void *vaddr; + dma_addr_t dma_addr; + struct sg_table *sgt; + struct page **pages; + + /** + * @read_domains: Read memory domains. + * + * These monitor which caches contain read/write data related to the + * object. When transitioning from one set of domains to another, + * the driver is called to ensure that caches are suitably flushed and + * invalidated. + */ + u16 read_domains; + + /** + * @write_domain: Corresponding unique write memory domain. + */ + u16 write_domain; + + struct dma_resv resv; + +}; + +#define to_mga2_obj(x) container_of(x, struct mga2_gem_object, base) +#define to_mga2_crtc(x) container_of(x, struct mga2_crtc, base) +#define to_mga2_connector(x) container_of(x, struct mga2_connector, base) +#define to_mga2_framebuffer(x) container_of(x, struct mga2_framebuffer, base) +#define to_mga2_fbdev(x) container_of(x, struct mga2_fbdev, helper) +#define to_mga2_gem(x) container_of(x, struct mga2_gem_object, base) + +int mga2_mode_init_hw(struct drm_device *dev); +int mga2_mode_init(struct drm_device *dev); +void mga2_mode_fini(struct drm_device *dev); +int mga2fb_bctrl_init(struct mga2 *mga2); +int mga2fb_bctrl_fini(struct mga2 *mga2); +int mga2fb_bctrl_hw_init(struct mga2 *mga2); +void mga2_crtc_hw_init(struct drm_crtc *crtc); + +void mga2_mode_fini(struct drm_device *dev); + +int mga2_framebuffer_init(struct drm_device *dev, + struct mga2_framebuffer *mga2_fb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); + +int mga2_fbdev_init(struct drm_device *dev); +void mga2_fbdev_fini(struct drm_device *dev); +void mga2_fbdev_set_suspend(struct drm_device *dev, int state); + +#define MGA2_MAX_HWC_WIDTH 64 +#define MGA2_MAX_HWC_HEIGHT 64 + +#define MGA2_HWC_SIZE (MGA2_MAX_HWC_WIDTH*MGA2_MAX_HWC_HEIGHT*4) + +int mga2_cursor_move(struct drm_crtc *crtc, int x, int y); + +void mga2_cursor_show(struct drm_crtc *crtc, u32 addr); +void mga2_cursor_hide(struct drm_crtc *crtc); +extern void mga2_gem_free_object(struct drm_gem_object *obj); +extern int mga2_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +#define DRM_FILE_PAGE_OFFSET ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) + +struct drm_gem_object *mga2_gem_create(struct drm_device *dev, size_t size, + u32 domain); +struct drm_gem_object *mga2_gem_create_with_handle(struct drm_file *file, + struct drm_device *dev, + size_t size, u32 domain, + u32 *handle); + +int mga2_mmap(struct file *filp, struct vm_area_struct *vma); + +u32 mga2_vblank_count(struct drm_device *dev, unsigned int crtc); +int mga2_enable_vblank(struct drm_device *dev, unsigned int crtc); +void mga2_disable_vblank(struct drm_device *dev, unsigned int crtc); +irqreturn_t mga2_driver_irq_handler(int irq, void *arg); +void mga2_driver_irq_preinstall(struct drm_device *dev); +int mga2_driver_irq_postinstall(struct drm_device *dev); +void mga2_driver_irq_uninstall(struct drm_device *dev); +void mga2_irq_sw_irq_get(struct mga2 *mga2); +void mga2_irq_sw_irq_put(struct mga2 *mga2); + +void mga2_update_ptr(struct mga2 *mga2); + +int mga2_auc2_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int mga2_bctrl_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); + +int mga2_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int mga2_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int mga2_gem_sync_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +#define MGA2_PCI_PROTO 0 +#define MGA2_P2_PROTO 1 +#define MGA2_P2 2 +#define MGA25_PCI_PROTO 3 +#define MGA25_PROTO 4 +#define MGA25 5 +#define MGA26_PCI_PROTO 6 +#define MGA26_PROTO 7 +#define MGA26 8 + +static inline bool mga2_p2(struct mga2 *mga2) +{ + if (mga2->subdevice == MGA2_P2 || mga2->subdevice == MGA2_P2_PROTO) + return true; + return false; +} + +static inline bool mga25(struct mga2 *mga2) +{ + if (mga2->subdevice >= MGA25_PCI_PROTO) + return true; + return false; +} + +static inline bool mga2_has_vram(struct mga2 *mga2) +{ + switch (mga2->subdevice) { + case MGA25_PROTO: + case MGA26_PROTO: + case MGA25: + case MGA26: + return false; + } + return true; +} + +static inline bool mga2_use_uncached(struct mga2 *mga2) +{ + switch (mga2->subdevice) { + case MGA26_PROTO: + case MGA26: + return true; + } + return false; +} + +static inline bool mga2_proto(struct mga2 *mga2) +{ + switch (mga2->subdevice) { + case MGA25_PCI_PROTO: + case MGA25_PROTO: + case MGA26_PROTO: + case MGA26_PCI_PROTO: + return true; + } + return false; +} + +extern int mga2_timeout_ms; +static inline int mga2_timeout(struct mga2 *mga2) +{ + return mga2_timeout_ms; +} + + +struct mga2_clk { + int nr, od, nb; + long long nf, nf_i, nf_f; +}; + +struct mga2_div { + int pix, aux; +}; + +void mga2_pll_init_pixclock(struct i2c_adapter *adapter); +int _mga2_ext_pll_set_pixclock(int pll, struct i2c_adapter *adapter, + unsigned long clock_khz); + +int mga2_calc_int_pll(struct mga2_clk *res, const unsigned long long fout, + unsigned long long *rfvco, unsigned long long *rerr); + +int mga25_calc_int_pll(struct mga2_clk *res, const unsigned long long fout, + unsigned long long *rfvco, unsigned long long *rerr); + +struct i2c_adapter *mga2_i2c_create(struct device *parent, resource_size_t regs, + char *name, unsigned base_freq_hz, + unsigned desired_freq_hz); +void mga2_i2c_destroy(struct i2c_adapter *i2c); + +int mga2_dvi_init(struct drm_device *dev, void __iomem *regs, + resource_size_t regs_phys); +int mga2_debugfs_bctrl(struct seq_file *s, void *data); +int mga2_common_connector_init(struct drm_device *dev, + resource_size_t regs_phys, + int connector_type, bool i2c, + uint32_t possible_crtcs); +int mga2_hdmi_it6613_connector_init(struct drm_device *dev, + resource_size_t regs_phys); + +#if defined(CONFIG_DEBUG_FS) +int mga2_debugfs_init(struct drm_minor *minor); +#endif + +#define __rvidc(__addr) readl(vid_regs + \ + (MGA2_VID0_ ## __addr)) +#define __wvidc(__v, __addr) writel(__v, vid_regs + \ + (MGA2_VID0_ ## __addr)) + +#ifdef DEBUG +#define rvidc(__offset) \ +({ \ + unsigned __val = __rvidc(__offset); \ + DRM_DEBUG_KMS("R: %x: %s\n", __val, # __offset); \ + __val; \ +}) + +#define wvidc(__val, __offset) \ +({ \ + unsigned __val2 = __val; \ + DRM_DEBUG_KMS("W: %x: %s\n", __val2, # __offset); \ + __wvidc(__val2, __offset); \ +}) + +#else +#define rvidc __rvidc +#define wvidc __wvidc +#endif + + +int drm_vblank_get(struct drm_device *dev, unsigned int pipe); +void drm_vblank_put(struct drm_device *dev, unsigned int pipe); + +extern bool mga2_use_external_pll; +extern int mga2_lvds_channels; + +/* low-level interface prime helpers */ + +struct sg_table *mga2_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +mga2_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +int mga2_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); +void *mga2_prime_vmap(struct drm_gem_object *obj); +void mga2_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +struct drm_plane **mga2_layers_init(struct drm_device *drm); +void mga2_handle_vblank(struct drm_device *drm, int crtc); + +#endif /*__MGA2_DRV_H__*/ diff --git a/drivers/mcst/mga2/mga2_dvi.c b/drivers/mcst/mga2/mga2_dvi.c new file mode 100644 index 000000000000..e2ad58143216 --- /dev/null +++ b/drivers/mcst/mga2/mga2_dvi.c @@ -0,0 +1,219 @@ +#define DEBUG + +#include "mga2_drv.h" +#include +#include +#include + + +static int mga2_get_modes(struct drm_connector *connector) +{ + struct mga2_connector *mga2_connector = to_mga2_connector(connector); + struct edid *edid = NULL; + int ret; + if (!mga2_connector->ddci2c) { + /* Just add a static list of modes */ + drm_add_modes_noedid(connector, 640, 480); + drm_add_modes_noedid(connector, 800, 600); + drm_add_modes_noedid(connector, 1024, 768); + drm_add_modes_noedid(connector, 1280, 1024); + return 1; + } + edid = drm_get_edid(connector, mga2_connector->ddci2c); + if (edid) { + drm_connector_update_edid_property(&mga2_connector->base, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + return ret; + } else + drm_connector_update_edid_property(&mga2_connector->base, NULL); + return 0; +} + +static int mga2_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ +#if 0 + if (mode->hdisplay > 1280) + return MODE_VIRTUAL_X; + if (mode->vdisplay > 1200) + return MODE_VIRTUAL_Y; + + if (mode->hdisplay > 1920) + return MODE_VIRTUAL_X; + if (mode->vdisplay > 1200) + return MODE_VIRTUAL_Y; +#endif + return MODE_OK; +} + +static void mga2_connector_destroy(struct drm_connector *connector) +{ + struct mga2_connector *mga2_connector = to_mga2_connector(connector); + mga2_i2c_destroy(mga2_connector->ddci2c); + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static enum drm_connector_status +mga2_connector_detect(struct drm_connector *connector, bool force) +{ + struct mga2_connector *mga2_connector = to_mga2_connector(connector); + void __iomem *vid_regs = mga2_connector->regs; + + return rvidc(GPIO_IN) & MGA2_VID0_GPIO_MSEN ? + connector_status_connected : + connector_status_disconnected; +} + +struct drm_encoder *mga2_drm_connector_encoder(struct drm_connector *conn) +{ + struct drm_encoder *enc = conn->encoder; + + return enc ? enc : drm_encoder_find(conn->dev, NULL, + conn->encoder_ids[0]); +} + +static const struct drm_connector_helper_funcs mga2_connector_helper_funcs = { + .mode_valid = mga2_mode_valid, + .get_modes = mga2_get_modes, + .best_encoder = mga2_drm_connector_encoder, +}; + +static const struct drm_connector_funcs mga2_connector_funcs = { + .detect = mga2_connector_detect, + .destroy = mga2_connector_destroy, + + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static void mga2_drm_slave_destroy(struct drm_encoder *enc) +{ + struct drm_encoder_slave *slave = to_encoder_slave(enc); + struct i2c_client *client = drm_i2c_encoder_get_client(enc); + + if (slave->slave_funcs) + slave->slave_funcs->destroy(enc); + if (client) + i2c_put_adapter(client->adapter); + + drm_encoder_cleanup(&slave->base); + kfree(slave); +} + +static const struct drm_encoder_funcs mga2_drm_slave_encoder_funcs = { + .destroy = mga2_drm_slave_destroy, +}; + +static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = { + .dpms = drm_i2c_encoder_dpms, + .mode_fixup = drm_i2c_encoder_mode_fixup, + .prepare = drm_i2c_encoder_prepare, + .commit = drm_i2c_encoder_commit, + .mode_set = drm_i2c_encoder_mode_set, + .detect = drm_i2c_encoder_detect, +}; + +#define DVO_SIL1178_MASTER_ADDR (0x70 >> 1) /* 7 bit addressing */ +#define DVO_SIL1178_SLAVE_ADDR (0x72 >> 1) /* 7 bit addressing */ + +static struct i2c_board_info mga2_dvi_sil1178_info = { + .type = "sil1178", + .addr = DVO_SIL1178_MASTER_ADDR, + .platform_data = &(struct sil164_encoder_params) { + .input_edge = SIL164_INPUT_EDGE_RISING + } +}; + +int mga2_dvi_init(struct drm_device *dev, void __iomem *regs, + resource_size_t regs_phys) +{ + struct mga2_connector *mga2_connector; + struct drm_connector *conn; + struct mga2 *mga2 = dev->dev_private; + struct drm_encoder_slave *slave; + struct i2c_adapter *adap; + struct drm_crtc *crtc; + uint32_t crtc_mask = 0; + int ret = 0; + + slave = kzalloc(sizeof(*slave), GFP_KERNEL); + if (!slave) + return -ENOMEM; + + drm_for_each_crtc(crtc, dev) + crtc_mask |= drm_crtc_mask(crtc); + + slave->base.possible_crtcs = crtc_mask; + + adap = mga2->dvi_i2c; + + ret = drm_encoder_init(dev, &slave->base, + &mga2_drm_slave_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) { + DRM_ERROR("unable to init encoder\n"); + i2c_put_adapter(adap); + kfree(slave); + return ret; + } + + ret = drm_i2c_encoder_init(dev, slave, + adap, &mga2_dvi_sil1178_info); + if (ret) { + if (ret != -ENODEV) + DRM_ERROR("unable to init encoder slave\n"); + mga2_drm_slave_destroy(&slave->base); + return ret; + } + + drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers); + + mga2_connector = kzalloc(sizeof(struct mga2_connector), GFP_KERNEL); + if (!mga2_connector) { + mga2_drm_slave_destroy(&slave->base); + return -ENOMEM; + }; + + mga2_connector->regs = regs; + mga2_connector->ddci2c = mga2_i2c_create(dev->dev, regs_phys + + MGA2_VID0_DDCI2C, "dvi ddc", + mga2->base_freq, 100 * 1000); + + if (!mga2_connector->ddci2c) { + mga2_drm_slave_destroy(&slave->base); + return -1; + } + + conn = &mga2_connector->base; + drm_connector_init(dev, conn, &mga2_connector_funcs, + DRM_MODE_CONNECTOR_DVID); + + conn->interlace_allowed = 0; + conn->doublescan_allowed = 0; + conn->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + + drm_connector_helper_add(conn, &mga2_connector_helper_funcs); + + drm_connector_register(conn); + + ret = slave->slave_funcs->create_resources(&slave->base, conn); + if (ret) { + mga2_drm_slave_destroy(&slave->base); + return ret; + } + + ret = drm_connector_attach_encoder(conn, &slave->base); + if (ret) { + mga2_drm_slave_destroy(&slave->base); + return ret; + } + + return ret; +} diff --git a/drivers/mcst/mga2/mga2_fb.c b/drivers/mcst/mga2/mga2_fb.c new file mode 100644 index 000000000000..4ec5ded73c56 --- /dev/null +++ b/drivers/mcst/mga2/mga2_fb.c @@ -0,0 +1,316 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mga2_drv.h" + +MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); +static int mga2_nofbaccel = 0; +module_param_named(nofbaccel, mga2_nofbaccel, int, 0400); +MODULE_PARM_DESC(nohwcursor, "Disable hardware cursor"); +static int mga2_nohwcursor = 0; +module_param_named(nohwcursor, mga2_nohwcursor, int, 0400); + +#define __rfb(__addr) readl(mga2->regs + __addr) +#define __wfb(__v, __addr) writel(__v, mga2->regs + __addr) + +#ifdef DEBUG +#define rfb(__offset) \ +({ \ + unsigned __val = __rfb(__offset); \ + /*DRM_DEBUG_KMS("R: %x: %s\n", __val, # __offset);*/ \ + __val; \ +}) + +#define wfb(__val, __offset) \ +({ \ + unsigned __val2 = __val; \ + DRM_DEBUG_KMS("W: %x: %s\n", __val2, # __offset); \ + /*printk(KERN_DEBUG"%x %x\n", MGA2_DC0_ ## __offset, __val2);*/ \ + __wfb(__val2, __offset); \ +}) + +#else +#define rfb __rfb +#define wfb __wfb +#endif + +static int __mga2_sync(struct mga2 *mga2); +static int get_free_desc(struct mga2 *mga2); +static int append_desc(struct mga2 *mga2, struct mga2_gem_object *mo); +static struct mga2_gem_object *mga2_auc_ioctl(struct drm_device *dev, + void *data, struct drm_file *filp); +static void __mga2_update_ptr(struct mga2 *mga2); + +#include "mga2_bctrl.c" +#include "mga2_auc2.c" +#include "mga2_fbdev.c" + +static int mga2fb_create_object(struct mga2_fbdev *fbdev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object **gobj_p) +{ + struct drm_device *drm = fbdev->helper.dev; + u32 size; + struct drm_gem_object *gobj; + + size = mode_cmd->pitches[0] * mode_cmd->height; + gobj = mga2_gem_create(drm, size, MGA2_GEM_DOMAIN_VRAM); + + if (IS_ERR(gobj)) + return PTR_ERR(gobj); + + *gobj_p = gobj; + return 0; +} + +static int mga2fb_create(struct mga2_fbdev *fbdev, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_device *drm = fbdev->helper.dev; + struct mga2 *mga2 = drm->dev_private; + struct drm_mode_fb_cmd2 mode_cmd; + struct drm_framebuffer *dfb; + struct fb_info *info; + int size, ret; + struct mga2_framebuffer *mga2_fb; + struct drm_gem_object *gobj = NULL; + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); + + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + + info = drm_fb_helper_alloc_fbi(&fbdev->helper); + if (IS_ERR(info)) { + dev_err(drm->dev, "failed to allocate framebuffer info\n"); + return PTR_ERR(info); + } + + ret = mga2fb_create_object(fbdev, &mode_cmd, &gobj); + if (ret) { + DRM_ERROR("failed to create fbcon backing object %d\n", ret); + return ret; + } + + mga2_fb = kzalloc(sizeof(*mga2_fb), GFP_KERNEL); + if (!mga2_fb) { + return -ENOMEM; + } + ret = mga2_framebuffer_init(drm, mga2_fb, &mode_cmd, gobj); + if (ret) + goto out; + + dfb = &mga2_fb->base; + fbdev->helper.fb = dfb; + + info->flags = FBINFO_DEFAULT; + /*TODO: + FBINFO_READS_FAST | + FBINFO_HWACCEL_XPAN | + FBINFO_HWACCEL_YPAN; + */ + + switch (mga2_nofbaccel) { + case 1: + info->flags |= FBINFO_HWACCEL_DISABLED; + break; + case 2: + mga2->flags |= MGA2_BCTRL_OFF; + info->flags |= FBINFO_HWACCEL_COPYAREA | + FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; + break; + case 3: + mga2->flags |= MGA2_BCTRL_OFF; + info->flags |= FBINFO_HWACCEL_COPYAREA; + break; + case 4: + mga2->flags |= MGA2_BCTRL_OFF; + info->flags |= FBINFO_HWACCEL_FILLRECT; + break; + case 5: + mga2->flags |= MGA2_BCTRL_OFF; + info->flags |= FBINFO_HWACCEL_IMAGEBLIT; + break; + default: + info->flags |= FBINFO_HWACCEL_COPYAREA | + FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; + } + + if (mga2_nohwcursor) + mga2fb_ops.fb_cursor = NULL; + + info->fbops = &mga2fb_ops; + + drm_fb_helper_fill_info(info, &fbdev->helper, sizes); + + info->apertures->ranges[0].base = pci_resource_start(drm->pdev, 0); + info->apertures->ranges[0].size = pci_resource_len(drm->pdev, 0); + + info->screen_base = to_mga2_obj(mga2_fb->gobj)->vaddr; + info->screen_size = size; + + info->fix.smem_len = size; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + + DRM_DEBUG_KMS("allocated %dx%d\n", dfb->width, dfb->height); + DRM_INFO("fb is %dx%d-%d\n", sizes->fb_width, + sizes->fb_height, dfb->format->depth); + DRM_INFO(" pitch is %d\n", dfb->pitches[0]); + + return 0; + out: + return ret; +} + +static int mga2_find_or_create_single(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct mga2_fbdev *fb = to_mga2_fbdev(helper); + int new_fb = 0; + int ret; + + if (!helper->fb) { + ret = mga2fb_create(fb, sizes); + if (ret) + return ret; + new_fb = 1; + } + return new_fb; +} + +static struct drm_fb_helper_funcs mga2_fb_helper_funcs = { + .fb_probe = mga2_find_or_create_single, +}; + +static void mga2_fbdev_destroy(struct drm_device *drm, struct mga2_fbdev *fb) +{ + if (!fb) + return; + + drm_fb_helper_unregister_fbi(&fb->helper); + if (fb->helper.fbdev && fb->pixmap_dma) { + struct mga2 *mga2 = drm->dev_private; + struct fb_info *info = fb->helper.fbdev; + dma_unmap_single(mga2->drm->dev, fb->pixmap_dma, + info->pixmap.size, DMA_TO_DEVICE); + } + /* release drm framebuffer and real buffer */ + if (fb->helper.fb) + drm_framebuffer_remove(fb->helper.fb); + + drm_fb_helper_fini(&fb->helper); +} + +int mga2fb_bctrl_hw_init(struct mga2 *mga2) +{ + int ret = 0; + wfb(0, MGA2_BB_SRC64); + wfb(0, MGA2_BB_DST64); + if (mga25(mga2)) + ret = mga2fb_auc2_hw_init(mga2); + if (ret) + return ret; + return __mga2fb_bctrl_hw_init(mga2); +} + +int mga2fb_bctrl_init(struct mga2 *mga2) +{ + int ret = 0; + + if (mga25(mga2)) + ret = mga2fb_auc2_init(mga2); + if (ret) + return ret; + ret = __mga2fb_bctrl_init(mga2); + if (ret) + return ret; + return mga2fb_bctrl_hw_init(mga2); +} + +int mga2fb_bctrl_fini(struct mga2 *mga2) +{ + int ret = 0; + if (mga25(mga2)) + ret = mga2fb_auc2_fini(mga2); + if (ret) + return ret; + return __mga2fb_bctrl_fini(mga2); +} + +int mga2_fbdev_init(struct drm_device *drm) +{ + struct mga2 *mga2 = drm->dev_private; + struct mga2_fbdev *fb; + int ret; + + if (WARN_ON(!drm->mode_config.num_crtc || !drm->mode_config.num_connector)) + return 0; + + drm_mode_config_reset(drm); + + fb = kzalloc(sizeof(struct mga2_fbdev), GFP_KERNEL); + if (!fb) + return -ENOMEM; + + mga2->fbdev = fb; + + drm_fb_helper_prepare(drm, &fb->helper, &mga2_fb_helper_funcs); + + ret = drm_fb_helper_init(drm, &fb->helper, MGA2_CONNECTOR_NR); + if (ret) + goto fail; + + ret = drm_fb_helper_single_add_all_connectors(&fb->helper); + if (ret) + goto fail; + + ret = drm_fb_helper_initial_config(&fb->helper, 32); + if (ret) + goto fail; + + return 0; + fail: + kfree(fb); + return ret; +} + +void mga2_fbdev_fini(struct drm_device *drm) +{ + struct mga2 *mga2 = drm->dev_private; + + if (!mga2->fbdev) + return; + mga2_fbdev_destroy(drm, mga2->fbdev); + kfree(mga2->fbdev); + mga2->fbdev = NULL; +} + +void mga2_fbdev_set_suspend(struct drm_device *drm, int state) +{ + struct mga2 *mga2 = drm->dev_private; + + if (!mga2->fbdev) + return; + + fb_set_suspend(mga2->fbdev->helper.fbdev, state); +} + +int mga2_gem_sync_ioctl(struct drm_device *drm, void *data, + struct drm_file *filp) +{ + struct mga2 *mga2 = drm->dev_private; + return __mga2_sync(mga2); +} diff --git a/drivers/mcst/mga2/mga2_fbdev.c b/drivers/mcst/mga2/mga2_fbdev.c new file mode 100644 index 000000000000..6d88d7659564 --- /dev/null +++ b/drivers/mcst/mga2/mga2_fbdev.c @@ -0,0 +1,843 @@ + +#define MGA2_BB_SZ 0x400 +/* + ******************************************************************************* + * MMIO BitBlt Module Registers + ******************************************************************************* + */ +#define REG_BB_CTRL 0x1000 /* BitBlt module control register (write only) */ +#define REG_BB_STAT 0x1000 /* BitBlt module status register (read only) */ + +#define REG_BB_WINDOW 0x1004 /* Operation geometry */ +#define REG_BB_SADDR 0x1008 /* Source start address */ +#define REG_BB_DADDR 0x100c /* Destination start address */ +#define REG_BB_PITCH 0x1010 /* */ +#define REG_BB_BG 0x1014 /* Background color */ +#define REG_BB_FG 0x1018 /* Foreground color */ + +/* BitBlt status register bits */ +#define BB_STAT_PROCESS (0x1<<31) /* 1 - processing operation, 0 - idle */ +#define BB_STAT_FULL (0x1<<30) /* 1 - pipeline full */ +#define BB_STAT_DMA (0x1<<26) /* DMA support */ + +#define BB_CTRL_CMD_MASK 0xC0000000 +#define BB_CTRL_CMD_START (0x1<<31) +#define BB_CTRL_CMD_ABORT (0x1<<30) + + +#define BB_CTRL_BITS_IN_BYTE_TWISTER (0x1<<22) + +#define BB_CTRL_DDMA_EN (0x1<<21) +#define BB_CTRL_SDMA_EN (0x1<<20) +#define BB_CTRL_SOFFS_MASK (0x7<<16) + +/* Binary raster operations */ +#define BB_CTRL_ROP_MASK 0x0000F000 + +#define BB_CTRL_ROP_0 (0x0<<12) /* clear */ +#define BB_CTRL_ROP_AND (0x1<<12) /* and */ +#define BB_CTRL_ROP_NOT_SRC_AND_DST (0x2<<12) /* andReverse */ +#define BB_CTRL_ROP_DST (0x3<<12) /* copy */ +#define BB_CTRL_ROP_SRC_AND_NOT_DST (0x4<<12) /* andInverted */ +#define BB_CTRL_ROP_SRC (0x5<<12) /* noop */ +#define BB_CTRL_ROP_XOR (0x6<<12) /* xor */ +#define BB_CTRL_ROP_OR (0x7<<12) /* or */ +#define BB_CTRL_ROP_NOR (0x8<<12) /* nor */ +#define BB_CTRL_ROP_NXOR (0x9<<12) /* equiv */ +#define BB_CTRL_ROP_NOT_SRC (0xa<<12) /* invert */ +#define BB_CTRL_ROP_NOT_SRC_OR_DST (0xb<<12) /* orReverse */ +#define BB_CTRL_ROP_NOT_DST (0xc<<12) /* copyInverted */ +#define BB_CTRL_ROP_SRC_OR_NOT_DST (0xd<<12) /* orInverted */ +#define BB_CTRL_ROP_NAND (0xe<<12) /* nand */ +#define BB_CTRL_ROP_1 (0xf<<12) /* set */ + +#define BB_CTRL_HDIR (0x1<<5) +#define BB_CTRL_VDIR (0x1<<6) + +#define BB_CTRL_CE_EN (0x1<<0) +#define BB_CTRL_PAT_EN (0x1<<1) +#define BB_CTRL_SFILL_EN (0x1<<2) +#define BB_CTRL_TR_EN (0x1<<4) + +#define BB_CTRL_SRC_MODE (0x1<<7) + +#define BB_CTRL_TERM_00 (0x0<<8) +#define BB_CTRL_TERM_01 (0x1<<8) +#define BB_CTRL_TERM_10 (0x2<<8) + +#define BB_CTRL_BPP_8 (0x0<<10) +#define BB_CTRL_BPP_16 (0x1<<10) +#define BB_CTRL_BPP_24 (0x2<<10) +#define BB_CTRL_BPP_32 (0x3<<10) +#ifdef __BIG_ENDIAN +#define BB_CTRL_BPP_CD_8 (BB_CTRL_BPP_8) +#define BB_CTRL_BPP_CD_16 (BB_CTRL_BPP_16 | 0x0800000) +#define BB_CTRL_BPP_CD_24 (BB_CTRL_BPP_24 | 0x1800000) +#define BB_CTRL_BPP_CD_32 (BB_CTRL_BPP_32 | 0x1800000) +#elif defined(__LITTLE_ENDIAN) +#define BB_CTRL_BPP_CD_8 BB_CTRL_BPP_8 +#define BB_CTRL_BPP_CD_16 BB_CTRL_BPP_16 +#define BB_CTRL_BPP_CD_24 BB_CTRL_BPP_24 +#define BB_CTRL_BPP_CD_32 BB_CTRL_BPP_32 +#else +#error byte order not defined +#endif + + +#define MGA2_BB_R0 0x01000 +#define MGA2_BB_R7 0x0101C /* base registers of MGA-compatible blitter */ +#define MGA2_BB_FMTCFG 0x01020 /* pixel format control for alpha-op */ +#define MGA2_BB_ASRC 0x01024 /* Fs calculation */ +#define MGA2_BB_ADST 0x01028 /* Fd calculation */ +#define MGA2_BB_PALADDR 0x0102C /* set LUT address (palette) for 1bpp/4bpp/8bpp formats */ +#define MGA2_BB_PALDATA 0x01030 /* write data to LUT element (palette) for 1bpp/4bpp/8bpp formats */ +#define MGA2_BB_SRC64 0x01034 /* high part of 64-bit DMA address */ + /* in system memory for source channel */ +#define MGA2_BB_DST64 0x01038 /* low part of 64-bit DMA address */ + /* in system memory for destination channel */ + + +static bool mga2_drm_is_open(struct drm_device *dev) +{ + /* + * FIXME: open_count is protected by drm_global_mutex but that would lead + * to locking inversion with the driver load path. And the access here is + * completely racy anyway. So don't bother with locking for now. + */ + return dev->open_count != 0; +} + +static u32 mga2_get_busy(struct mga2 *mga2) +{ + u32 busy = rfb(REG_BB_CTRL) & BB_STAT_PROCESS; + if (busy) + return busy; + if (mga2_p2(mga2)) { + busy = (rfb(MGA2_BCTRL_STATUS) & MGA2_BCTRL_B_BUSY) || + (rfb(MGA2_SYSMUX_BITS) & MGA2_SYSMUX_BLT_WR_BUSY) || + (rfb(MGA2_VIDMUX_BITS) & MGA2_VIDMUX_BLT_WR_BUSY); + } else if (mga25(mga2)) { + busy = (rfb(MGA2_AUC2_CTRLSTAT) & MGA2_AUC2_B_BUSY) || + (rfb(REG_BB_CTRL + MGA2_BB_SZ) & BB_STAT_PROCESS) || + rfb(MGA25_SYSMUX_BITS) || + rfb(MGA25_VMMUX_BITS) || + (rfb(MGA2_BCTRL_STATUS) & MGA2_BCTRL_B_BUSY); + } + return busy; +} + +static int ___mga2_sync(struct mga2 *mga2) +{ + int ret = 0, i; + int timeout_usec = mga2_timeout(mga2) * 1000; + + for (i = 0; i < timeout_usec; i++) { + u32 busy = mga2_get_busy(mga2); + + if (!busy) + break; + udelay(1); + } + + if (i == timeout_usec) { + mga2->flags |= MGA2_BCTRL_OFF; + DRM_ERROR("sync timeout\n"); + ret = -ETIME; + } + return ret; +} + +static u64 mga2_get_current_desc(struct mga2 *mga2) +{ + if (mga25(mga2)) + return auc2_get_current_desc(mga2); + else + return bctrl_get_current_desc(mga2); +} + +static int __mga2_sync(struct mga2 *mga2) +{ + long ret = 0, timeout = msecs_to_jiffies(mga2_timeout(mga2)); + int n = circ_dec(mga2->head); + struct dma_fence *fence = &mga2->mga2_fence[n]; + u64 current_desc = mga2_get_current_desc(mga2); + if (mga2->flags & MGA2_BCTRL_OFF) + goto cant_sleep; + + if (circ_idle(mga2)) + return 0; + + if (in_atomic() || in_dbg_master() || irqs_disabled()) + goto cant_sleep; + + while (0 == (ret = dma_fence_wait_timeout(fence, true, timeout))) { + /* Timeout */ + u64 d = mga2_get_current_desc(mga2); + if (d == current_desc) /* AUC's stuck */ + break; + /* AUC is still working, let's wait. */ + current_desc = d; + } + if (ret == 0) { + ret = -ETIMEDOUT; + mga2->flags |= MGA2_BCTRL_OFF; + dma_fence_signal(fence); + DRM_ERROR("fence %d wait timed out.\n", n); + } else if (ret < 0) { + DRM_DEBUG("fence %d wait failed (%ld).\n", n, ret); + } else { + ret = 0; + } + return ret; +cant_sleep: + return ___mga2_sync(mga2); +} + +/* + * fb_sync - NOT a required function. Normally the accel engine + * for a graphics card take a specific amount of time. + * Often we have to wait for the accelerator to finish + * its operation before we can write to the framebuffer + * so we can have consistent display output. + * + * @info: frame buffer structure that represents a single frame buffer + * + * If the driver has implemented its own hardware-based drawing function, + * implementing this function is highly recommended. + */ + +static int mga2_sync(struct fb_info *info) +{ + struct mga2_fbdev *fb = info->par; + struct mga2 *mga2 = fb->helper.dev->dev_private; + if (!mga2_drm_is_open(mga2->drm)) + return __mga2_sync(mga2); + return 0; +} + +static inline struct mga2 *fence_to_mga2(struct dma_fence *f) +{ + unsigned n = f->seqno % MGA2_RING_SIZE; + f -= n; + return container_of(f, struct mga2, mga2_fence[0]); +} + +/* + * Common fence implementation + */ + +static const char *mga2_fence_get_driver_name(struct dma_fence *fence) +{ + return "mga2"; +} + +static const char *mga2_fence_get_timeline_name(struct dma_fence *f) +{ + return "mga2-auc"; +} + +/** + * mga2_fence_enable_signaling - enable signalling on fence + * @fence: fence + * + * This function is called with fence_queue lock held, and adds a callback + * to fence_queue that checks if this fence is signaled, and if so it + * signals the fence and removes itself. + */ +static bool mga2_fence_enable_signaling(struct dma_fence *f) +{ + struct mga2 *mga2 = fence_to_mga2(f); + + mga2_irq_sw_irq_get(mga2); + DMA_FENCE_TRACE(f, "armed on ring!\n"); + + return true; +} + +/** + * mga2_fence_release - callback that fence can be freed + * + * @fence: fence + * + * This function is called when the reference count becomes zero. + */ +static void mga2_fence_release(struct dma_fence *f) +{ +} + +static const struct dma_fence_ops mga2_fence_ops = { +//FIXME: .use_64bit_seqno = true, + .get_driver_name = mga2_fence_get_driver_name, + .get_timeline_name = mga2_fence_get_timeline_name, + .enable_signaling = mga2_fence_enable_signaling, + .release = mga2_fence_release, +}; + + +static void __mga2_update_ptr(struct mga2 *mga2) +{ + if (mga25(mga2) && !mga2->bctrl_active) + auc2_update_ptr(mga2); + else + bctrl_update_ptr(mga2); +} + +void mga2_update_ptr(struct mga2 *mga2) +{ + int h, t; + unsigned long flags; + spin_lock_irqsave(&mga2->fence_lock, flags); + h = mga2->tail; + __mga2_update_ptr(mga2); + t = circ_inc(mga2->tail); + + for (; __circ_space(h, t); h = circ_inc(h)) { + struct dma_fence *f = &mga2->mga2_fence[h]; + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &f->flags)) + mga2_irq_sw_irq_put(mga2); + dma_fence_signal_locked(f); + } + spin_unlock_irqrestore(&mga2->fence_lock, flags); +} + +static int wait_for_ring(struct mga2 *mga2) +{ + struct dma_fence *fence; + long ret = 0; + int n, timeout = msecs_to_jiffies(mga2_timeout(mga2)); + + mga2_update_ptr(mga2); + if (circ_space(mga2)) + return 0; + + if (in_atomic() || in_dbg_master() || irqs_disabled()) { + ret = ___mga2_sync(mga2); + if (ret) + return ret; + mga2_update_ptr(mga2); + if (!circ_space(mga2)) + return -ENOSPC; + } + n = mga2->tail; + fence = &mga2->mga2_fence[n]; + ret = dma_fence_wait_timeout(fence, true, timeout); + if (ret == 0) { + ret = -ETIMEDOUT; + mga2->flags |= MGA2_BCTRL_OFF; + dma_fence_signal(fence); + DRM_ERROR("fence %d wait timed out.\n", n); + } else if (ret < 0) { + DRM_DEBUG("fence %d wait failed (%ld).\n", n, ret); + } else { + ret = 0; + } + + return ret; +} + +static int __get_free_desc(struct mga2 *mga2) +{ + int ret, h; + unsigned seqno; + struct dma_fence *fence; + if (!circ_space(mga2)) { + if ((ret = wait_for_ring(mga2))) + return ret; + } + h = mga2->head; + fence = &mga2->mga2_fence[h]; + seqno = mga2->fence_seqno; + BUG_ON(h != seqno % MGA2_RING_SIZE); + dma_fence_init(fence, &mga2_fence_ops, &mga2->fence_lock, 0, seqno); + + if (mga25(mga2)) { + struct desc1 *c = &mga2->desc1[h]; + memset(c, 0, sizeof(*c)); + } + return h; +} + +static int get_free_desc(struct mga2 *mga2) +{ + if (mga2->flags & MGA2_BCTRL_OFF) { + __mga2_sync(mga2); + return 0; + } + return __get_free_desc(mga2); +} + +static int append_desc(struct mga2 *mga2, struct mga2_gem_object *mo) +{ + if (mga2->flags & MGA2_BCTRL_OFF) + return 0; + if (mga25(mga2) && mga2->bctrl_active) { + mga2_update_ptr(mga2); + mga2->bctrl_active = false; + } + + if (mga25(mga2)) + return mga25_append_desc(mga2, mo); + + mga2_append_desc(mga2, mo); + + return 0; +} + +#define wdesc(__cmd, __data, __reg, __first, __last) do { \ + if (mga2->flags & MGA2_BCTRL_OFF) { \ + wfb(__data, __reg); \ + break; \ + } \ + mga25(mga2) ? mga25_wdesc(mga2, __data, __reg) : \ + mga2_wdesc(mga2, __cmd, __data, __reg, __first, __last); \ +} while(0) + +static void mga2_color_blit(int width, int height, int pitch, int dest, + int rop, int color, int Bpp, struct mga2 *mga2) +{ + int head = get_free_desc(mga2); + u32 ctrl = rop | BB_CTRL_CE_EN | BB_CTRL_SFILL_EN | BB_CTRL_CMD_START + | (((Bpp - 1) & 0x3) << 10); + if (head < 0) + return; + + wdesc(0, color, REG_BB_FG, 1, 0); + wdesc(1, (height << 16) | (width * Bpp), REG_BB_WINDOW, 0, 0); + wdesc(2, dest, REG_BB_DADDR, 0, 0); + wdesc(3, pitch << 16 | 0, REG_BB_PITCH, 0, 0); + wdesc(4, 0, MGA2_BB_FMTCFG, 0, 0); + wdesc(5, ctrl, REG_BB_CTRL, 0, 1); + + append_desc(mga2, NULL); +} + +void mga2_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct mga2_fbdev *fbdev = info->par; + struct mga2 *mga2 = fbdev->helper.dev->dev_private; + struct mga2_framebuffer *fb = to_mga2_framebuffer(fbdev->helper.fb); + struct mga2_gem_object *mo = to_mga2_obj(fb->gobj); + u32 dx, dy, width, height, dest, rop = 0, color = 0; + u32 Bpp = info->var.bits_per_pixel >> 3; + + if (mga2_drm_is_open(mga2->drm) || mga2_nofbaccel == 1 || + info->flags & FBINFO_HWACCEL_DISABLED || + !(info->flags & FBINFO_HWACCEL_FILLRECT)) { + if (mga2_has_vram(mga2) || mga2_use_uncached(mga2)) + cfb_fillrect(info, rect); + else + sys_fillrect(info, rect); + cfb_fillrect(info, rect); + return; + } + + if (Bpp == 1) + color = rect->color; + else + color = ((u32 *) (info->pseudo_palette))[rect->color]; + + rop = (rect->rop != ROP_COPY) ? BB_CTRL_ROP_XOR : BB_CTRL_ROP_SRC; + + dx = rect->dx * Bpp; + width = rect->width; + dy = rect->dy; + height = rect->height; + + dest = mo->dma_addr + (dy * info->fix.line_length) + dx; + mga2_color_blit(width, height, info->fix.line_length, dest, rop, color, + Bpp, mga2); +} + +static void mga2_hw_copyarea(struct mga2 *mga2, unsigned sBase, /* Address of source: offset in frame buffer */ + unsigned sPitch, /* Pitch value of source surface in BYTE */ + unsigned sx, unsigned sy, /* Starting coordinate of source surface */ + unsigned dBase, /* Address of destination: offset in frame buffer */ + unsigned dPitch, /* Pitch value of destination surface in BYTE */ + unsigned Bpp, /* Color depth of destination surface */ + unsigned dx, unsigned dy, /* Starting coordinate of destination surface */ + unsigned width, unsigned height /* width and height of rectangle in pixel value */ + ) +{ + int head = get_free_desc(mga2); + unsigned saddr, daddr; + unsigned ctrl = BB_CTRL_ROP_SRC | BB_CTRL_CMD_START; + + if (head < 0) + return; + + saddr = sBase + sy * dPitch + sx * Bpp; + daddr = dBase + dy * dPitch + dx * Bpp; + + wdesc(0, (height << 16) | (width * Bpp), REG_BB_WINDOW, 1, 0); + wdesc(1, saddr, REG_BB_SADDR, 0, 0); + wdesc(2, daddr, REG_BB_DADDR, 0, 0); + wdesc(3, sPitch << 16 | sPitch, REG_BB_PITCH, 0, 0); + wdesc(4, 0, MGA2_BB_FMTCFG, 0, 0); + wdesc(5, ctrl, REG_BB_CTRL, 0, 1); + + append_desc(mga2, NULL); +} + +static void mga2_copyarea(struct fb_info *info, const struct fb_copyarea *area) +{ + struct mga2_fbdev *fbdev = info->par; + struct mga2 *mga2 = fbdev->helper.dev->dev_private; + struct mga2_framebuffer *fb = to_mga2_framebuffer(fbdev->helper.fb); + struct mga2_gem_object *mo = to_mga2_obj(fb->gobj); + unsigned base, pitch, Bpp; + + if (mga2_drm_is_open(mga2->drm) || mga2_nofbaccel == 1 || + info->flags & FBINFO_HWACCEL_DISABLED || + !(info->flags & FBINFO_HWACCEL_COPYAREA)) { + if (mga2_has_vram(mga2) || mga2_use_uncached(mga2)) + cfb_copyarea(info, area); + else + sys_copyarea(info, area); + return; + } + + base = mo->dma_addr; + pitch = info->fix.line_length; + Bpp = info->var.bits_per_pixel >> 3; + + mga2_hw_copyarea(mga2, base, pitch, area->sx, area->sy, + base, pitch, Bpp, area->dx, area->dy, + area->width, area->height); +} + +static void mga2_hw_imageblit(struct mga2 *mga2, dma_addr_t pSrcbuf, /* pointer to start of source buffer in system memory */ + unsigned dBase, /* Address of destination: offset in frame buffer */ + unsigned dPitch, /* Pitch value of destination surface in BYTE */ + unsigned Bpp, /* Color depth of destination surface */ + unsigned dx, unsigned dy, /* Starting coordinate of destination surface */ + unsigned width, unsigned height, /* width and height of rectange in pixel value */ + unsigned fColor, /* Foreground color (corresponding to a 1 in the monochrome data */ + unsigned bColor /* Background color (corresponding to a 0 in the monochrome data */ + ) { + unsigned cbpp; + unsigned ctrl = BB_CTRL_CE_EN | BB_CTRL_SDMA_EN | + BB_CTRL_CMD_START | BB_CTRL_ROP_SRC | + BB_CTRL_SRC_MODE | BB_CTRL_BITS_IN_BYTE_TWISTER; + unsigned daddr; + int head = get_free_desc(mga2); + if (head < 0) + return; + + switch (Bpp) { + case 4: + cbpp = BB_CTRL_BPP_32; + fColor = cpu_to_le32(fColor); + bColor = cpu_to_le32(bColor); + break; + case 3: + cbpp = BB_CTRL_BPP_24; + break; + case 2: + cbpp = BB_CTRL_BPP_16; + break; + case 1: + cbpp = BB_CTRL_BPP_8; + break; + default: + return; + } + + daddr = dBase + dy * dPitch + dx * Bpp; + + ctrl |= cbpp; + + wdesc(0, (height << 16) | (width * Bpp), REG_BB_WINDOW, 1, 0); + wdesc(1, pSrcbuf & 0xffffFFFF, REG_BB_SADDR, 0, 0); + wdesc(2, (u64)pSrcbuf >> 32, MGA2_BB_SRC64, 0, 0); + wdesc(3, daddr, REG_BB_DADDR, 0, 0); + wdesc(4, dPitch << 16 | 0, REG_BB_PITCH, 0, 0); + wdesc(5, bColor, REG_BB_BG, 0, 0); + wdesc(6, fColor, REG_BB_FG, 0, 0); + wdesc(7, 0, MGA2_BB_FMTCFG, 0, 0); + wdesc(8, ctrl, REG_BB_CTRL, 0, 1); + + append_desc(mga2, NULL); +} + +static void mga2_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct mga2_fbdev *fbdev = info->par; + struct mga2 *mga2 = fbdev->helper.dev->dev_private; + struct mga2_framebuffer *fb = to_mga2_framebuffer(fbdev->helper.fb); + struct mga2_gem_object *mo = to_mga2_obj(fb->gobj); + unsigned base, pitch, Bpp; + unsigned fgcol, bgcol; + int offset = (void *)image->data - (void *)info->pixmap.addr; + + if (mga2_drm_is_open(mga2->drm) || mga2_nofbaccel == 1 || + info->flags & FBINFO_HWACCEL_DISABLED || + !(info->flags & FBINFO_HWACCEL_IMAGEBLIT) || + mga2->subdevice == MGA2_PCI_PROTO || !fbdev->pixmap_dma || + offset > info->pixmap.size || offset < 0) { + if (mga2_has_vram(mga2) || mga2_use_uncached(mga2)) + cfb_imageblit(info, image); + else + sys_imageblit(info, image); + return; + } + + base = mo->dma_addr; + pitch = info->fix.line_length; + Bpp = info->var.bits_per_pixel >> 3; + + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + fgcol = ((u32 *) info->pseudo_palette)[image->fg_color]; + bgcol = ((u32 *) info->pseudo_palette)[image->bg_color]; + } else { + fgcol = image->fg_color; + bgcol = image->bg_color; + } + + mga2_hw_imageblit(mga2, fbdev->pixmap_dma + offset, base, pitch, Bpp, + image->dx, image->dy, + image->width, image->height, fgcol, bgcol); + /* wait DMA to complete: pixmap can be modified immediately after + imageblit operation */ + __mga2_sync(mga2); +} + +static void mga2_fb_writel(struct mga2 *mga2, + u32 value, void *addr) +{ + if (mga2_has_vram(mga2) || mga2_use_uncached(mga2)) + writel(value, addr); + else + *(u32 *)addr = value; +} + +static void mga2_load_mono_to_argb_cursor(struct mga2 *mga2, u32 *dst1, + const void *data8, u32 bg, u32 fg, u32 w, u32 h) +{ + int i, j; + const int spitch = DIV_ROUND_UP(w, 8), dpitch = MGA2_MAX_HWC_WIDTH; + const u8 *s = data8, *src; + + for (i = 0; i < h; i++, dst1 += dpitch, s += spitch) { + u32 *dst = dst1; + int shift; + + for (j = 0, shift = 7, src = s; j < w; j++, dst++, shift--) { + u32 p = *src & (1 << shift) ? fg : bg; + mga2_fb_writel(mga2, p, dst); + if (!shift) { + shift = 7; + src++; + } + } + } +} + +static int mga2_cursor_load(struct drm_crtc *crtc, struct fb_info *info, + struct fb_cursor *c) +{ + struct mga2_crtc *mga2_crtc = to_mga2_crtc(crtc); + struct mga2_fbdev *fb = info->par; + struct mga2 *mga2 = fb->helper.dev->dev_private; + struct fb_image *image = &c->image; + u32 s_pitch = (c->image.width + 7) >> 3; + unsigned i, dsize; + u8 *src; + u32 bg_idx = image->bg_color; + u32 fg_idx = image->fg_color; + u32 fg, bg; + void *dst = mga2_crtc->cursor_addr; + s_pitch = (c->image.width + 7) >> 3; + dsize = s_pitch * image->height; + + src = kmalloc(dsize, GFP_ATOMIC); + if (!src) { + return -ENOMEM; + } + + switch (c->rop) { + case ROP_XOR: + for (i = 0; i < dsize; i++) { + src[i] = image->data[i] ^ c->mask[i]; + } + break; + case ROP_COPY: + default: + for (i = 0; i < dsize; i++) { + src[i] = image->data[i] & c->mask[i]; + } + break; + } + + fg = ((info->cmap.red[fg_idx] & 0xff) << 0) | + ((info->cmap.green[fg_idx] & 0xff) << 8) | + ((info->cmap.blue[fg_idx] & 0xff) << 16) | (0xff << 24); + + bg = ((info->cmap.red[bg_idx] & 0xff) << 0) | + ((info->cmap.green[bg_idx] & 0xff) << 8) | + ((info->cmap.blue[bg_idx] & 0xff) << 16); + + mga2_load_mono_to_argb_cursor(mga2, dst, c->mask, bg, fg, image->width, + image->height); + kfree(src); + return 0; +} + +static int __mga2_fb_cursor(struct drm_crtc *crtc, struct fb_info *info, + struct fb_cursor *cursor) +{ + struct mga2_crtc *mga2_crtc = to_mga2_crtc(crtc); + struct mga2 *mga2 = mga2_crtc->base.dev->dev_private; + mga2_cursor_hide(crtc); + + if (cursor->set & FB_CUR_SETPOS) { + mga2_cursor_move(crtc, cursor->image.dx - info->var.xoffset, + cursor->image.dy - info->var.yoffset); + } + + if (cursor->set & FB_CUR_SETSIZE) { + if (mga2_has_vram(mga2) || mga2_use_uncached(mga2)) + memset_io(mga2_crtc->cursor_addr, 0, MGA2_HWC_SIZE); + else + memset(mga2_crtc->cursor_addr, 0, MGA2_HWC_SIZE); + + } + if (cursor->set & FB_CUR_SETCMAP) /* Nothing to do */ + ; + + if (cursor->set & (FB_CUR_SETSHAPE)) + mga2_cursor_load(crtc, info, cursor); + + if (cursor->enable) + mga2_cursor_show(crtc, mga2_crtc->cursor_offset); + + return 0; +} + +static int mga2_fb_cursor(struct fb_info *info, struct fb_cursor *cursor) +{ + struct mga2_fbdev *fb = info->par; + struct drm_client_dev *client = &fb->helper.client; + struct drm_mode_set *mode_set; + + if (cursor->image.width > MGA2_MAX_HWC_WIDTH + || cursor->image.height > MGA2_MAX_HWC_HEIGHT) + return -EINVAL; + + mutex_lock(&client->modeset_mutex); + + drm_client_for_each_modeset(mode_set, client) { + struct drm_crtc *crtc = mode_set->crtc; + __mga2_fb_cursor(crtc, info, cursor); + } + + mutex_unlock(&client->modeset_mutex); + + return 0; +} + +static int mga2_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct mga2_fbdev *fbdev = info->par; + struct mga2 *mga2 = fbdev->helper.dev->dev_private; + struct mga2_framebuffer *fb = to_mga2_framebuffer(fbdev->helper.fb); + struct mga2_gem_object *mo = to_mga2_obj(fb->gobj); + unsigned long vm_size; + int ret; + + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + + vm_size = vma->vm_end - vma->vm_start; + + if (vm_size > mo->base.size) + return -EINVAL; + + if (mga2_has_vram(mga2) || mga2_use_uncached(mga2)) { + phys_addr_t start = mo->node.start; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + if (mga2_use_uncached(mga2)) { + start = (phys_addr_t)mo->vaddr; + WARN(!IS_ENABLED(CONFIG_E90S), "FIXME:start\n"); + } + ret = vm_iomap_memory(vma, start, vm_size); + } else { + ret = dma_mmap_wc(mga2->drm->dev, vma, mo->vaddr, + mo->dma_addr, vm_size); + } + + return ret; +} + +static int mga2_fb_set_par(struct fb_info *info) +{ + struct mga2_fbdev *fbdev = info->par; + struct mga2 *mga2 = fbdev->helper.dev->dev_private; + if (fbdev->pixmap_dma == 0 && /* if swiotlb is running */ + !(swiotlb_max_segment() && /* don't try to map 64-bit address*/ + (virt_to_phys(info->pixmap.addr) & (-1LL << 32)))) { + dma_addr_t dma; + dma = dma_map_single(mga2->drm->dev, info->pixmap.addr, + info->pixmap.size, DMA_TO_DEVICE); + if (!dma_mapping_error(mga2->drm->dev, dma)) + fbdev->pixmap_dma = dma; + } + + return drm_fb_helper_set_par(info); +} + +#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) + +static int mga2_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, + struct fb_info *info) +{ + uint32_t v; + + if (regno >= 16) + return -EINVAL; + + if (info->var.grayscale) { + /* grayscale = 0.30*R + 0.59*G + 0.11*B */ + red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; + } + + if (info->fix.visual != FB_VISUAL_TRUECOLOR) + return -EINVAL; + + red = CMAP_TOHW(red, info->var.red.length); + blue = CMAP_TOHW(blue, info->var.blue.length); + green = CMAP_TOHW(green, info->var.green.length); + transp = CMAP_TOHW(transp, info->var.transp.length); + + v = (red << info->var.red.offset) | + (green << info->var.green.offset) | + (blue << info->var.blue.offset) | + (transp << info->var.transp.offset); + + switch (info->var.bits_per_pixel) { + case 16: + ((uint32_t *) info->pseudo_palette)[regno] = v; + break; + case 24: + case 32: + ((uint32_t *) info->pseudo_palette)[regno] = v; + break; + } + + return 0; +} + +static struct fb_ops mga2fb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = mga2_fb_set_par, + .fb_fillrect = mga2_fillrect, + .fb_copyarea = mga2_copyarea, + .fb_imageblit = mga2_imageblit, + .fb_cursor = mga2_fb_cursor, + .fb_sync = mga2_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcolreg = mga2_setcolreg, + .fb_mmap = mga2_fb_mmap, +}; diff --git a/drivers/mcst/mga2/mga2_hdmi_it6613.c b/drivers/mcst/mga2/mga2_hdmi_it6613.c new file mode 100644 index 000000000000..2715e456609b --- /dev/null +++ b/drivers/mcst/mga2/mga2_hdmi_it6613.c @@ -0,0 +1,493 @@ + +#include "drm/drm_probe_helper.h" + +#include "mga2_drv.h" + +#include "mcu.h" +#include "typedef.h" +#include "HDMI_TX/it6613_drv.h" +#include "HDMI_TX/HDMI_TX.h" + +static u8 mga2_i2c_rd(struct i2c_adapter *adapter, u8 slave_addr, u8 addr) +{ + u8 val = 0; + u8 out_buf[2]; + u8 in_buf[2]; + struct i2c_msg msgs[] = { + { + .addr = slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + val = in_buf[0]; + if (0) DRM_DEBUG("%s: rd: 0x%02x: 0x%02x\n", + adapter->name, addr, val); + } else { + DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", addr, val); + } + return val; +} + +static void +mga2_i2c_wr(struct i2c_adapter *adapter, u8 slave_addr, u8 addr, u8 val) +{ + uint8_t out_buf[2]; + struct i2c_msg msg = { + .addr = slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = val; + + if (0) DRM_DEBUG("%s: wr: 0x%02x: 0x%02x\n", adapter->name, addr, val); + if (i2c_transfer(adapter, &msg, 1) != 1) + DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", addr, val); +} + +static struct i2c_adapter *mga2_it6613_i2c_adapter; + + +BYTE _HDMITX_ReadI2C_Byte(BYTE RegAddr) +{ + BYTE Value; + _HDMITX_ReadI2C_ByteN(RegAddr, &Value, 1); + return Value; +} + +SYS_STATUS _HDMITX_WriteI2C_Byte(BYTE RegAddr, BYTE Data) +{ + return _HDMITX_WriteI2C_ByteN(RegAddr, &Data, 1); +} + +SYS_STATUS _HDMITX_ReadI2C_ByteN(BYTE RegAddr, BYTE * pData, int N) +{ + bool bSuccess = TRUE; + int i; + for (i = 0; i < N && bSuccess; i++) { + pData[i] = + mga2_i2c_rd(mga2_it6613_i2c_adapter, + HDMI_TX_I2C_SLAVE_ADDR >> 1, RegAddr + i); + } + return bSuccess ? ER_SUCCESS : ER_FAIL; +} + +SYS_STATUS _HDMITX_WriteI2C_ByteN(BYTE RegAddr, BYTE * pData, int N) +{ + BOOL bSuccess = TRUE; + int i; + for (i = 0; i < N && bSuccess; i++) { + mga2_i2c_wr(mga2_it6613_i2c_adapter, + HDMI_TX_I2C_SLAVE_ADDR >> 1, RegAddr + i, + *(pData + i)); + } + return bSuccess ? ER_SUCCESS : ER_FAIL; +} + +struct mga2_it6613 { + struct drm_connector connector; + struct drm_encoder encoder; + struct i2c_adapter *ddci2c; +}; + +static inline struct mga2_it6613 * +drm_connector_to_mga2_it6613(struct drm_connector *connector) +{ + return container_of(connector, struct mga2_it6613, + connector); +} + +static inline struct mga2_it6613 * +drm_encoder_to_mga2_it6613(struct drm_encoder *encoder) +{ + return container_of(encoder, struct mga2_it6613, + encoder); +} + +static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length) +{ + extern unsigned char EDID_Buf[128 * 5]; + memcpy(buf, EDID_Buf + 128 * blk, + length > sizeof(EDID_Buf) ? sizeof(EDID_Buf) : length); + return 0; +} + +static int mga2_it6613_get_modes(struct drm_connector *connector) +{ + struct mga2_connector *mga2_connector = to_mga2_connector(connector); + struct edid *edid = NULL; + int ret; + + edid = drm_do_get_edid(connector, read_edid_block, NULL); + if (edid) { + drm_connector_update_edid_property + (&mga2_connector->base, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + return ret; + } else { + drm_connector_update_edid_property + (&mga2_connector->base, NULL); + } + return 0; +} + + +static struct drm_connector_helper_funcs mga2_it6613_con_helper_funcs = { + .get_modes = mga2_it6613_get_modes, +}; + +static void +mga2_it6613_connector_destroy(struct drm_connector *connector) +{ + struct mga2_it6613 *vp = drm_connector_to_mga2_it6613(connector); + mga2_i2c_destroy(vp->ddci2c); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs mga2_it6613_con_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = mga2_it6613_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_encoder_helper_funcs mga2_it6613_enc_helper_funcs = { +}; + +static const struct drm_encoder_funcs mga2_it6613_enc_funcs = { + .destroy = drm_encoder_cleanup, +}; + + +#define RX_DISABLED + +typedef enum { + DEMO_READY = 0, + DEMO_TX_ONLY, + DEMO_LOOPBACK +} DEMO_MODE; +DEMO_MODE gDemoMode = DEMO_READY; + + +//========================================================================= +// VPG data definition +// VPG: Video Pattern Generation, implement in vpg.v +//========================================================================= + +char gszVicText[][64] = { + "720x480p60 VIC=3", + "1024x768p60", + "1280x720p50 VIC=19", + "1280x720p60 VIC=4", + "1280x1024", + "1920x1080i60 VIC=5", + "1920x1080i50 VIC=20", + "1920x1080p60 VIC=16", + "1920x1080p50 VIC=31", + "1600x1200", + "1920x1080i120 VIC=46", +}; + +typedef enum { + MODE_720x480 = 0, // 480p, 27 MHZ VIC=3 + MODE_1024x768 = 1, // XGA, 65 MHZ + MODE_1280x720p50 = 2, // 720p50 74.25 MHZ VIC=19 + MODE_1280x720 = 3, // 720p, 74.25 MHZ VIC=4 + MODE_1280x1024 = 4, // SXGA, 108 MHZ + MODE_1920x1080i = 5, // 1080i, 74.25 MHZ VIC=5 + MODE_1920x1080i50 = 6, // 1080i, 74.25 MHZ VIC=20 + MODE_1920x1080 = 7, // 1080p, 148.5 MHZ VIC=16 + MODE_1920x1080p50 = 8, // 1080p50, 148.5 MHZ VIC=31 + MODE_1600x1200 = 9, // UXGA, 162 MHZ + MODE_1920x1080i120 = 10 // 1080i120, 148.5 MHZ VIC=46 +} VPG_MODE; + +typedef enum { + VPG_RGB444 = 0, + VPG_YUV422 = 1, + VPG_YUV444 = 2 +} VPG_COLOR; + + +VPG_MODE gVpgMode = MODE_1920x1080; //MODE_1920x1080;MODE_720x480 +COLOR_TYPE gVpgColor = COLOR_RGB444;// video pattern generator - output color (defined ind vpg.v) + + +extern int gEnableColorDepth; +//========================================================================= +// TX video formation control +//========================================================================= + +void FindVIC(VPG_MODE Mode, alt_u8 * vic, bool * pb16x9) +{ + switch (Mode) { + case MODE_720x480: + *vic = 3; + break; + case MODE_1280x720p50: + *vic = 19; + break; + case MODE_1280x720: + *vic = 4; + break; + case MODE_1920x1080i: + *vic = 5; + break; + case MODE_1920x1080i50: + *vic = 20; + break; + case MODE_1920x1080: + *vic = 16; + break; + case MODE_1920x1080p50: + *vic = 31; + break; + case MODE_1920x1080i120: + *vic = 46; + break; + default: + *vic = 0; + } + if (*vic != 0) + *pb16x9 = TRUE; + else + *pb16x9 = FALSE; +} + +void SetupTxVIC(VPG_MODE Mode) +{ + alt_u8 tx_vic; + bool b16x9; + FindVIC(Mode, &tx_vic, &b16x9); + HDMITX_ChangeVideoTiming(tx_vic); +} + +void VPG_Config(VPG_MODE Mode, COLOR_TYPE Color) +{ +#ifndef TX_DISABLED + //===== check whether vpg function is active + if (!HDMITX_HPD()) + return; +#ifndef RX_DISABLED + if (HDMIRX_IsVideoOn()) + return; +#endif //RX_DISABLED + + + OS_PRINTF("===> Pattern Generator Mode: %d (%s)\n", gVpgMode, + gszVicText[gVpgMode]); + +#if 0 + //===== updagte vpg mode & color + IOWR(HDMI_TX_MODE_CHANGE_BASE, 0, 0); + // change color mode of VPG + if (gVpgColor == COLOR_RGB444) + IOWR(HDMI_TX_VPG_COLOR_BASE, 0, VPG_RGB444); // RGB444 + else if (gVpgColor == COLOR_YUV422) + IOWR(HDMI_TX_VPG_COLOR_BASE, 0, VPG_YUV422); // YUV422 + else if (gVpgColor == COLOR_YUV444) + IOWR(HDMI_TX_VPG_COLOR_BASE, 0, VPG_YUV444); // YUV444 + + IOWR(HDMI_TX_DISP_MODE_BASE, 0, gVpgMode); + IOWR(HDMI_TX_MODE_CHANGE_BASE, 0, 1); + IOWR(HDMI_TX_MODE_CHANGE_BASE, 0, 0); + // + //HDMITX_EnableVideoOutput(); + +#endif //#ifndef TX_DISABLED +#endif +} + + +bool SetupColorSpace(void) +{ + int ColorDepth = 24; /* defualt */ + char szColor[][32] = { "RGB444", "YUV422", "YUV444" }; + bool bSuccess = TRUE; +// bool bRxVideoOn = FALSE; + COLOR_TYPE TxInputColor; + COLOR_TYPE TxOutputColor; +#ifndef RX_DISABLED + bRxVideoOn = HDMIRX_IsVideoOn(); +#endif // RX_DISABLED + + +#ifndef TX_DISABLED +#if 0 + if (gDemoMode == DEMO_LOOPBACK) { + // rx-tx loopback + int RxSourceColor, RxSinkColor; + bSuccess = HDMIRX_GetSourceColor(&RxSourceColor); + if (bSuccess) { + // RX-TX loopback (bypass) + if (RxSourceColor == COLOR_RGB444 || + (RxSourceColor == COLOR_YUV422 + && HDMITX_IsSinkSupportYUV422()) + || (RxSourceColor == COLOR_YUV444 + && HDMITX_IsSinkSupportYUV444())) { + + // Source color --> RX --> TX ---> Display + // bypass color space + TxInputColor = RxSourceColor; + TxOutputColor = RxSourceColor; + RxSinkColor = RxSourceColor; + } else { + // Source color --> RX --(RGB color)--> TX --(RBG Color)--> Display + TxInputColor = COLOR_RGB444; + TxOutputColor = COLOR_RGB444; + RxSinkColor = COLOR_RGB444; + } + HDMIRX_SetOutputColor(RxSinkColor); + OS_PRINTF("Set Rx Color Convert:%s->%s\n", + szColor[RxSourceColor], + szColor[RxSinkColor]); + } + } else +#endif + if (gDemoMode == DEMO_TX_ONLY) { + // tx-only +#ifdef TX_CSC_DISABLED + // Transmittor: output color == input color + TxInputColor = gVpgColor; + TxOutputColor = gVpgColor; +#else + // Trasmitter: output color is fixed as RGB + TxInputColor = gVpgColor; + TxOutputColor = COLOR_RGB444; +#endif + + + } else { + return TRUE; + } + + HDMITX_SetColorSpace(TxInputColor, TxOutputColor); + + + // set TX color depth + if (gEnableColorDepth) { + if (HDMITX_IsSinkSupportColorDepth36()) + ColorDepth = 36; + else if (HDMITX_IsSinkSupportColorDepth30()) + ColorDepth = 30; + } + HDMITX_SetOutputColorDepth(ColorDepth); + + OS_PRINTF("Set Tx Color Depth: %d bits %s\n", ColorDepth, + gEnableColorDepth ? "" : "(default)"); + OS_PRINTF("Set Tx Color Convert:%s->%s\n", szColor[TxInputColor], + szColor[TxOutputColor]); + +#if 0 // dump debug message + int i; + HDMITX_DumpReg(0xC0); + HDMITX_DumpReg(0x72); + for (i = 0x73; i <= 0x8d; i++) + HDMITX_DumpReg(i); + HDMITX_DumpReg(0x158); +#endif + +#endif //TX_DISABLED + return bSuccess; +} + +static void mga2_it6613_iteration(void) +{ + + bool bRxVideoOn = FALSE, bTxSinkOn = FALSE, bRxModeChanged = FALSE; + //========== TX + if (HDMITX_DevLoopProc() || bRxModeChanged) { + bTxSinkOn = HDMITX_HPD(); + if (bTxSinkOn) { + // update state + gDemoMode = + bRxVideoOn ? DEMO_LOOPBACK : DEMO_TX_ONLY; + // + HDMITX_DisableVideoOutput(); + if (gDemoMode == DEMO_TX_ONLY) { + // tx-only + VPG_Config(gVpgMode, gVpgColor); + SetupTxVIC(gVpgMode); + } + SetupColorSpace(); + HDMITX_EnableVideoOutput(); + } else { + HDMITX_DisableVideoOutput(); + } + } +} +int mga2_hdmi_it6613_connector_init(struct drm_device *drm, + resource_size_t regs_phys) +{ + struct mga2_it6613 *vp; + int ret; + struct mga2 *mga2 = drm->dev_private; + + vp = devm_kzalloc(drm->dev, sizeof(*vp), GFP_KERNEL); + if (!vp) + return -ENOMEM; + + drm_encoder_helper_add(&vp->encoder, + &mga2_it6613_enc_helper_funcs); + ret = drm_encoder_init(drm, + &vp->encoder, + &mga2_it6613_enc_funcs, + DRM_MODE_ENCODER_TMDS, + NULL); + if (ret) { + DRM_ERROR("Couldn't initialise the vp encoder\n"); + goto err_out; + } + + vp->encoder.possible_crtcs = 1; + + drm_connector_helper_add(&vp->connector, + &mga2_it6613_con_helper_funcs); + ret = drm_connector_init(drm, &vp->connector, + &mga2_it6613_con_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) { + DRM_ERROR("Couldn't initialise the vp connector\n"); + goto err_cleanup_connector; + } + + drm_connector_attach_encoder(&vp->connector, &vp->encoder); + + vp->ddci2c = + mga2_i2c_create(drm->dev, regs_phys + MGA2_VID0_DDCI2C, + "mga2 ddc", mga2->base_freq, 100 * 1000); + if (!vp->ddci2c) { + ret = -ENOSYS; + DRM_ERROR("failed to add ddc bus for conn\n"); + goto err_cleanup_connector; + } + mga2_it6613_i2c_adapter = vp->ddci2c; + + HDMITX_Init(); + + msleep(200); + mga2_it6613_iteration(); + + return 0; +err_cleanup_connector: + drm_encoder_cleanup(&vp->encoder); +err_out: + return ret; +} diff --git a/drivers/mcst/mga2/mga2_i2c.c b/drivers/mcst/mga2/mga2_i2c.c new file mode 100644 index 000000000000..b5f0b96df233 --- /dev/null +++ b/drivers/mcst/mga2/mga2_i2c.c @@ -0,0 +1,73 @@ +#include "mga2_drv.h" +#include + + +static int dev_is_type(struct device *dev, void *type) +{ + if (dev->type == type) + return 1; + return 0; +} + +static struct device *dev_find_type(struct device *parent, void *type) +{ + if (dev_is_type(parent, type)) { + get_device(parent); + return parent; + } + return device_find_child(parent, type, dev_is_type); +} + +static struct resource res_parent; + +struct i2c_adapter *mga2_i2c_create(struct device *parent, + resource_size_t regs_phys, + char *name, unsigned base_freq_hz, + unsigned desired_freq_hz) +{ + struct device *d; + struct platform_device *p; + + struct resource r[] = { + { + .parent = &res_parent, + .flags = IORESOURCE_MEM, + .start = regs_phys, + .end = regs_phys + 0x20 - 1 + }, + }; + struct l_i2c2_platform_data mga2_i2c = { + .bus_nr = -1, + .base_freq_hz = base_freq_hz, + .desired_freq_hz = desired_freq_hz, + .two_stage_register_access = true, + }; + /* + * HACK: void insert_resource() call failure in platform_device_add(). + */ + memset(&res_parent, 0, sizeof(res_parent)); + res_parent.end = ULONG_MAX; + res_parent.flags = IORESOURCE_MEM; + p = platform_device_register_resndata(parent, + "mga2-i2c", PLATFORM_DEVID_AUTO, r, + ARRAY_SIZE(r), + &mga2_i2c, sizeof(mga2_i2c)); + if (IS_ERR(p)) { + DRM_ERROR("failed to register mga2-i2c (%ld)\n", PTR_ERR(p)); + return NULL; + } + + d = dev_find_type(&p->dev, &i2c_adapter_type); + if (!d) + return NULL; + return to_i2c_adapter(d); +} + +void mga2_i2c_destroy(struct i2c_adapter *adapter) +{ + struct device *d = &adapter->dev; + if (!adapter) + return; + put_device(d); /* for dev_find_type() above */ + platform_device_unregister(to_platform_device(d->parent)); +} diff --git a/drivers/mcst/mga2/mga2_irq.c b/drivers/mcst/mga2/mga2_irq.c new file mode 100644 index 000000000000..001fc74216b7 --- /dev/null +++ b/drivers/mcst/mga2/mga2_irq.c @@ -0,0 +1,201 @@ +#include "mga2_drv.h" + +#define __rint(__addr) readl(mga2->regs + \ + mga2->info->int_regs_base + MGA2_ ## __addr) +#define __wint(__v, __addr) writel(__v, mga2->regs + \ + mga2->info->int_regs_base + MGA2_ ## __addr) + +#ifdef DEBUG +#define rint(__offset) \ +({ \ + unsigned __val = __rint(__offset); \ + DRM_DEBUG_KMS("R: %x: %s\n", __val, # __offset); \ + __val; \ +}) + +#define wwint(__val, __offset) \ +({ \ + unsigned __val2 = __val; \ + DRM_DEBUG_KMS("W: %x: %s\n", __val2, # __offset); \ + __wint(__val2, __offset); \ +}) + +#else +#define rint __rint +#define wwint __wint +#endif + +irqreturn_t mga2_driver_irq_handler(int irq, void *arg) +{ + struct drm_device *drm = (struct drm_device *)arg; + struct mga2 *mga2 = drm->dev_private; + u32 status = rint(INTREQ); + u32 ena = rint(INTENA); + int ret = IRQ_HANDLED; + if (!(status & ena)) + return IRQ_NONE; + + wwint(status & ena, INTREQ); + + if (mga25(mga2)) { + /* VBLANK interrupt */ + if (status & MGA25_INT_B_DC0_V) + mga2_handle_vblank(drm, 0); + if (status & MGA25_INT_B_DC1_V) + mga2_handle_vblank(drm, 1); + if (status & MGA25_INT_B_DC2_V) + mga2_handle_vblank(drm, 2); + + if (status & MGA25_INT_B_SOFTINT) + mga2_update_ptr(mga2); + + if (status & MGA25_INT_B_V1HDMI) /* hdmi will handle it */ + ret = IRQ_NONE; + if (status & MGA25_INT_B_V2HDMI) /* hdmi will handle it */ + ret = IRQ_NONE; + } else { + /* VBLANK interrupt */ + if (status & MGA2_INT_B_DC0_V) + mga2_handle_vblank(drm, 0); + if (status & MGA2_INT_B_DC1_V) + mga2_handle_vblank(drm, 1); + + if (status & MGA2_INT_B_SOFTINT) + mga2_update_ptr(mga2); + + if (status & MGA2_INT_B_V1HDMI) /* hdmi will handle it */ + ret = IRQ_NONE; + if (status & MGA2_INT_B_V2HDMI) /* hdmi will handle it */ + ret = IRQ_NONE; + } + + return ret; +} + +static void mga2_disable_irq(struct mga2 *mga2, u32 mask) +{ + wwint(mask, INTENA); +} + +static void mga2_enable_irq(struct mga2 *mga2, u32 mask) +{ + wwint(mask | MGA2_INT_B_SETRST, INTENA); +} + +/** + * mga2_irq_sw_irq_get - enable software interrupt + * + * @mga2: mga2 device pointer + * + * Enables the software interrupt for the ring. + * The software interrupt is used to signal a fence on + * the ring. + */ +void mga2_irq_sw_irq_get(struct mga2 *mga2) +{ + unsigned mask = mga25(mga2) ? MGA25_INT_B_SOFTINT : MGA2_INT_B_SOFTINT; + if (atomic_inc_return(&mga2->ring_int) == 1) { + mga2_enable_irq(mga2, mask); + } +} + +/** + * mga2_irq_sw_irq_put - disable software interrupt + * + * @mga2: mga2 device pointer + * + * Disables the software interrupt for the ring. + * The software interrupt is used to signal a fence on + * the ring. + */ +void mga2_irq_sw_irq_put(struct mga2 *mga2) +{ + unsigned mask = mga25(mga2) ? MGA25_INT_B_SOFTINT : MGA2_INT_B_SOFTINT; + if (atomic_dec_and_test(&mga2->ring_int)) + mga2_disable_irq(mga2, mask); +} + +int mga2_enable_vblank(struct drm_device *drm, unsigned int base) +{ + struct mga2 *mga2 = drm->dev_private; + u32 v = base == 0 ? MGA2_INT_B_DC0_V : base == 1 ? MGA2_INT_B_DC1_V : 0; + if (mga25(mga2)) switch(base) { + case 0: + v = MGA25_INT_B_DC0_V; + break; + case 1: + v = MGA25_INT_B_DC1_V; + break; + case 2: + v = MGA25_INT_B_DC2_V; + break; + default: + WARN_ON(1); + } + mga2_enable_irq(mga2, v); + + return 0; +} + +void mga2_disable_vblank(struct drm_device *drm, unsigned int base) +{ + struct mga2 *mga2 = drm->dev_private; + u32 v = base == 0 ? MGA2_INT_B_DC0_V : base == 1 ? MGA2_INT_B_DC1_V : 0; + if (mga25(mga2)) switch(base) { + case 0: + v = MGA25_INT_B_DC0_V; + break; + case 1: + v = MGA25_INT_B_DC1_V; + break; + case 2: + v = MGA25_INT_B_DC2_V; + break; + default: + WARN_ON(1); + } + mga2_disable_irq(mga2, v); +} + +void mga2_driver_irq_preinstall(struct drm_device *drm) +{ + struct mga2 *mga2 = drm->dev_private; + + atomic_set(&mga2->ring_int, 0); + /* Disable *all* interrupts */ + wwint(0x7FFFffff, INTENA); + /* Set *all* edge-low*/ + wwint(0x7FFFffff, INTLEVEL); + wwint(0x7FFFffff, INTMODE); + if (mga25(mga2)) { + u32 v = MGA2_INT_B_SETRST | + MGA25_INT_B_V1HDMI | MGA25_INT_B_V2HDMI | + MGA25_INT_B_V1HDMI_WAKEUP | MGA25_INT_B_V2HDMI_WAKEUP | + MGA25_INT_B_HDA1 | MGA25_INT_B_HDA2; + wwint(v, INTLEVEL); + wwint(v, INTMODE); + } + /* Clear *all* interrupts */ + wwint(0x7FFFffff, INTREQ); +} + +int mga2_driver_irq_postinstall(struct drm_device *drm) +{ + struct mga2 *mga2 = drm->dev_private; + u32 v = MGA2_INT_B_V1HDMI | MGA2_INT_B_V2HDMI; + if (mga25(mga2)) { + v = MGA2_INT_B_SETRST | + MGA25_INT_B_V1HDMI | MGA25_INT_B_V2HDMI; + } + mga2_enable_irq(mga2, v); + return 0; +} + +void mga2_driver_irq_uninstall(struct drm_device *drm) +{ + struct mga2 *mga2 = drm->dev_private; + atomic_set(&mga2->ring_int, 0); + + /* Disable *all* interrupts */ + wwint(0x7FFFffff, INTENA); +} diff --git a/drivers/mcst/mga2/mga2_layer.c b/drivers/mcst/mga2/mga2_layer.c new file mode 100644 index 000000000000..30f0c1c86fc6 --- /dev/null +++ b/drivers/mcst/mga2/mga2_layer.c @@ -0,0 +1,703 @@ +#define DEBUG + +#include "mga2_drv.h" + +#define __rlayer(__addr) readl(mcrtc->regs + MGA2_DC0_ ## __addr) +#define __wlayer(__v, __addr) writel(__v, mcrtc->regs + MGA2_DC0_ ## __addr) + +#ifdef DEBUG +#define rlayer(__offset) \ +({ \ + unsigned __val = __rlayer(__offset); \ + DRM_DEBUG_KMS("%x:R: %x:%s\n", mcrtc->index, \ + __val, # __offset); \ + __val; \ +}) + +#define wlayer(__val, __offset) do { \ + unsigned __val2 = __val; \ + DRM_DEBUG_KMS("%x:W:%x: %s\n", mcrtc->index, \ + __val2, # __offset); \ + __wlayer(__val2, __offset); \ +} while (0) + +#else +#define rlayer __rlayer +#define wlayer __wlayer +#endif + +struct mga2_layer { + struct drm_plane plane; +}; + +static inline struct mga2_layer * +plane_to_mga2_layer(struct drm_plane *plane) +{ + return container_of(plane, struct mga2_layer, plane); +} + +struct mga2_plane_desc { + enum drm_plane_type type; + u8 pipe; + const uint32_t *formats; + uint32_t nformats; + const struct drm_plane_helper_funcs *func; +}; + +static void mga2_cursor_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + if (!old_state->crtc) + return; + mga2_cursor_hide(old_state->crtc); +} + +static void mga2_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + struct drm_framebuffer *fb = state->fb; + struct mga2_framebuffer *mga2_fb = to_mga2_framebuffer(fb); + struct mga2_crtc *mcrtc = to_mga2_crtc(state->crtc); + unsigned offset = to_mga2_obj(mga2_fb->gobj)->dma_addr; + wlayer((state->crtc_x << 16) | + (state->crtc_y & 0xffff), NCRSCOORD); + wlayer(offset | MGA2_DC_B_CRS_ENA, NCRSADDR); + wlayer(MGA2_DC_B_STROB, DISPCTRL); +} + +static const struct drm_plane_helper_funcs mga2_cursor_helper_funcs = { + .atomic_disable = mga2_cursor_atomic_disable, + .atomic_update = mga2_cursor_atomic_update, + /*.atomic_check = TODO:*/ +}; + +#define MGA2_MAX_SCALE (2 << 16) /*16.16 fixed point*/ +#define MGA2_MIN_SCALE 1 /*16.16 fixed point*/ + +static int mga2_overlay_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *pstate) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc = pstate->crtc; + + if (!crtc) + return 0; + + crtc_state = drm_atomic_get_existing_crtc_state(pstate->state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + return drm_atomic_helper_check_plane_state(pstate, crtc_state, + MGA2_MIN_SCALE, + MGA2_MAX_SCALE, + true, true); +} + +static void mga2_overlay_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mga2_crtc *mcrtc = to_mga2_crtc(old_state->crtc); + if (!old_state->crtc) + return; + wlayer(MGA2_DC0_OVL_UPD_BUSY, OVL_CTRL); +} + +static u64 mga2_plane_to_offset(struct drm_plane_state *state, int plane) +{ + struct drm_framebuffer *fb = state->fb; + unsigned x = state->src.x1 >> 16; + unsigned y = state->src.y1 >> 16; + + if (plane) { + x /= fb->format->hsub; + y /= fb->format->vsub; + } + return fb->offsets[plane] + fb->pitches[plane] * y + + fb->format->cpp[plane] * x; +} + +static u32 mga2_format_to_overlay(u32 format, int *plane) +{ + u32 mode; + plane[0] = 0; + plane[1] = 1; + plane[2] = 2; + switch (format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + mode = MGA2_DC_B_MODE_ARGB | MGA_MODE_ENDIAN(3, 2, 1, 0); + break; + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + mode = MGA2_DC_B_MODE_ARGB | MGA_MODE_ENDIAN(0, 3, 2, 1); + break; + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + mode = MGA2_DC_B_MODE_ARGB | MGA_MODE_ENDIAN(3, 0, 1, 2); + break; + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + mode = MGA2_DC_B_MODE_ARGB | MGA_MODE_ENDIAN(0, 1, 2, 3); + break; + case DRM_FORMAT_AYUV: + mode = MGA2_DC_B_MODE_AYUV | MGA_MODE_ENDIAN(0, 1, 2, 3); + break; + case DRM_FORMAT_RGB888: + mode = MGA2_DC_B_MODE_RGB | MGA_MODE_ENDIAN(3, 2, 1, 0); + break; + case DRM_FORMAT_BGR888: + mode = MGA2_DC_B_MODE_RGB | MGA_MODE_ENDIAN(3, 0, 1, 2); + break; + case DRM_FORMAT_YUYV: + mode = MGA2_DC_B_MODE_YUYV | MGA_MODE_ENDIAN(0, 1, 2, 3); + break; + case DRM_FORMAT_YVYU: + mode = MGA2_DC_B_MODE_YUYV | MGA_MODE_ENDIAN(0, 3, 2, 1); + break; + case DRM_FORMAT_UYVY: + mode = MGA2_DC_B_MODE_YUYV | MGA_MODE_ENDIAN(1, 0, 3, 2); + break; + case DRM_FORMAT_VYUY: + mode = MGA2_DC_B_MODE_YUYV | MGA_MODE_ENDIAN(1, 2, 3, 0); + break; + case DRM_FORMAT_NV12: + mode = MGA2_DC_B_MODE_NV12; + break; + case DRM_FORMAT_NV21: + mode = MGA2_DC_B_MODE_NV21; + break; + case DRM_FORMAT_NV16: + mode = MGA2_DC_B_MODE_NV16; + break; + case DRM_FORMAT_NV61: + mode = MGA2_DC_B_MODE_NV61; + break; + case DRM_FORMAT_NV24: + mode = MGA2_DC_B_MODE_NV24; + break; + case DRM_FORMAT_NV42: + mode = MGA2_DC_B_MODE_NV42; + break; + case DRM_FORMAT_YUV420: + mode = MGA2_DC_B_MODE_YUV420; + break; + case DRM_FORMAT_YVU420: + mode = MGA2_DC_B_MODE_YUV420; + plane[0] = 0; + plane[1] = 2; + plane[2] = 1; + break; + case DRM_FORMAT_YUV422: + mode = MGA2_DC_B_MODE_YUV422; + break; + case DRM_FORMAT_YVU422: + mode = MGA2_DC_B_MODE_YUV422; + plane[0] = 0; + plane[1] = 2; + plane[2] = 1; + break; + case DRM_FORMAT_YUV444: + mode = MGA2_DC_B_MODE_YUV444; + break; + case DRM_FORMAT_YVU444: + mode = MGA2_DC_B_MODE_YUV444; + plane[0] = 0; + plane[1] = 2; + plane[2] = 1; + break; + default: + mode = MGA2_DC_B_MODE_ARGB | MGA_MODE_ENDIAN(3, 2, 1, 0); + WARN_ON(1); + } + return mode; +} + +/* + Lanczos2-windowed sinc function + 5 taps + 16 phases per tap + Quantized to 2.8-bit signed numbers +*/ +static int mga2_fir_coeff[16][5] = { + { 0, 0, 256, 0, 0, }, + { 0, -9, 254, 11, 0, }, + { -1, -15, 248, 25, -1, }, + { -2, -20, 238, 42, -2, }, + { -3, -22, 225, 60, -4, }, + { -4, -22, 208, 81, -7, }, + { -5, -21, 190, 102, -10, }, + { -5, -19, 169, 124, -13, }, + { -6, -16, 147, 147, -16, }, + { -6, -13, 125, 169, -19, }, + { -5, -10, 102, 190, -21, }, + { -5, -7, 81, 209, -22, }, + { -4, -4, 60, 225, -21, }, + { -3, -2, 42, 238, -19, }, + { -2, -1, 26, 248, -15, }, + { -1, 0, 12, 254, -9, }, +}; + +static u32 mga2_get_fir_coeff(int ratio, int phase, int tap) +{ + u32 no_zoom_coeff[] = {0, 0, 256, 0, 0}; + if (ratio >> 16 == 1) + return no_zoom_coeff[tap]; + + return mga2_fir_coeff[phase][tap]; +} + +static u32 mga2_rect_wh_fp(struct drm_rect *r) +{ + /* round to nearest */ + return ((drm_rect_width(r) + 0x8000) & 0xffff0000) | + (drm_rect_height(r) + 0x8000) >> 16; +} + +static u32 mga2_rect_wh(struct drm_rect *r) +{ + return drm_rect_width(r) << 16 | (drm_rect_height(r) & 0x0000ffff); +} + +static u32 mga2_rect_xy(struct drm_rect *r) +{ + return (r)->x1 << 16 | ((r)->y1 & 0x0000ffff); +} + +static void mga2_set_zoom(struct mga2_crtc *mcrtc, int hscale, int vscale) +{ + int coord, phase, tap; + int ratio[2] = {vscale, hscale}; + + for (coord = 0; coord < 2; coord++) { + for (phase = 0; phase < 16; phase++) { + for (tap = 0; tap < 5; tap++) { + u32 v = mga2_get_fir_coeff( + ratio[coord], phase, tap); + wlayer(v, ZOOM_FTAP0 + tap * 4); + } + wlayer(coord << MGA2_DC0_ZOOM_COORD_SHIFT | phase, + ZOOM_FWRITE); + } + } + wlayer(vscale, ZOOM_VPITCH); + wlayer(hscale, ZOOM_HPITCH); +} + +static int mga2_rect_calc_vscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_hscale, int max_hscale) +{ + int src_h = drm_rect_height(src) - (1 << 16); + int dst_h = drm_rect_height(dst) - 1; + return dst_h ? src_h / dst_h : 1 << 16; +} + +static int mga2_rect_calc_hscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_hscale, int max_hscale) +{ + int src_w = drm_rect_width(src) - (1 << 16); + int dst_w = drm_rect_width(dst) - 1; + return dst_w ? src_w / dst_w : 1 << 16; +} + +static void mga2_overlay_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + struct mga2_crtc *mcrtc = to_mga2_crtc(state->crtc); + struct drm_framebuffer *fb = state->fb; + struct mga2_framebuffer *mga2_fb = to_mga2_framebuffer(fb); + u64 ba = to_mga2_obj(mga2_fb->gobj)->dma_addr; + int pl[3] = {}; + u32 alpha, v; + int hscale, vscale, scale = 0; + struct drm_rect s = drm_plane_state_src(state); + struct drm_rect d = drm_plane_state_dest(state); + + BUILD_BUG_ON(-1 >> 1 != -1); + if (!state->fb || WARN_ON(!state->crtc) || + (!old_state->visible && !state->visible)) + return; + if (!state->visible) { + wlayer(MGA2_DC0_OVL_UPD_BUSY, OVL_CTRL); + return; + } + WARN_ON(DRM_MODE_ROTATE_0 != (state->rotation & DRM_MODE_ROTATE_MASK)); + + drm_rect_debug_print("src: ", &state->src, true); + drm_rect_debug_print("dst: ", &state->dst, false); + + hscale = mga2_rect_calc_hscale(&s, &d, MGA2_MIN_SCALE, MGA2_MAX_SCALE); + vscale = mga2_rect_calc_vscale(&s, &d, MGA2_MIN_SCALE, MGA2_MAX_SCALE); + + s = drm_plane_state_src(old_state); + d = drm_plane_state_dest(old_state); + + scale = hscale != mga2_rect_calc_hscale(&s, &d, + MGA2_MIN_SCALE, MGA2_MAX_SCALE) || + vscale != mga2_rect_calc_vscale(&s, &d, MGA2_MIN_SCALE, + MGA2_MAX_SCALE); + scale = scale || !old_state->fb; + if (scale) + mga2_set_zoom(mcrtc, hscale, vscale); + + v = mga2_rect_xy(&state->dst); + if (mga2_rect_xy(&old_state->dst) != v || scale) + wlayer(v, OVL_XY); + v = mga2_rect_wh_fp(&state->src); + if (mga2_rect_wh_fp(&old_state->src) != v || scale) { + wlayer(v, OVL_GEOMETRY); + wlayer(v, ZOOM_SRCGEOM); + } + v = mga2_rect_wh(&state->dst); + if (mga2_rect_wh(&old_state->dst) != v || scale) + wlayer(v, ZOOM_DSTGEOM); + + v = mga2_format_to_overlay(fb->format->format, pl); + if (!old_state->fb || fb->format->format != + old_state->fb->format->format) { + wlayer(v, OVL_MODE); + wlayer(fb->pitches[pl[0]], OVL_STRIDE0); + wlayer(fb->pitches[pl[1]], OVL_STRIDE1); + wlayer(fb->pitches[pl[2]], OVL_STRIDE2); + } + if (old_state->src.x1 != state->src.x1 || + old_state->src.y1 != state->src.y1 || + old_state->fb != state->fb || + state->crtc->state->mode_changed) { + wlayer(ba + mga2_plane_to_offset(state, pl[0]), OVL_BASE0); + wlayer(ba + mga2_plane_to_offset(state, pl[1]), OVL_BASE1); + wlayer(ba + mga2_plane_to_offset(state, pl[2]), OVL_BASE2); + } + + alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE ? + state->alpha >> 8 : + fb->format->has_alpha ? 0 : 0xff; + alpha <<= MGA2_DC0_OVL_ALPHA_SHIFT; + wlayer(MGA2_DC0_OVL_UPD_BUSY | MGA2_DC0_OVL_ENABLE | alpha, OVL_CTRL); +} + +static const struct drm_plane_helper_funcs mga2_overlay_helper_funcs = { + .atomic_disable = mga2_overlay_atomic_disable, + .atomic_update = mga2_overlay_atomic_update, + .atomic_check = mga2_overlay_plane_atomic_check, +}; + + +static int mga2_primary_atomic_check(struct drm_plane *plane, + struct drm_plane_state *pstate) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc = pstate->crtc; + if (!crtc) + return 0; + + crtc_state = drm_atomic_get_existing_crtc_state(pstate->state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + return drm_atomic_helper_check_plane_state(pstate, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); +} + +static void mga2_finish_page_flip(struct drm_device *drm, int ctrc) +{ + unsigned long flags; + struct mga2_crtc *mcrtc = + to_mga2_crtc(drm_crtc_from_index(drm, ctrc)); + + spin_lock_irqsave(&drm->event_lock, flags); + if (mcrtc->event) { + drm_crtc_send_vblank_event(&mcrtc->base, mcrtc->event); + drm_crtc_vblank_put(&mcrtc->base); + mcrtc->event = NULL; + } + spin_unlock_irqrestore(&drm->event_lock, flags); +} + +void mga2_handle_vblank(struct drm_device *drm, int crtc) +{ + struct mga2_crtc *mcrtc = to_mga2_crtc(drm_crtc_from_index(drm, crtc)); + mga2_finish_page_flip(drm, crtc); + drm_handle_vblank(drm, crtc); + if (!test_and_clear_bit(MGA2_PENDING_FB_UNREF, &mcrtc->pending)) + return; + if (rlayer(WSTART) != rlayer(NSTART)) { + /* Race occured. We have to wait for another IRQ. */ + set_bit(MGA2_PENDING_FB_UNREF, &mcrtc->pending); + return; + } + drm_crtc_vblank_put(&mcrtc->base); + drm_flip_work_commit(&mcrtc->fb_unref_work, system_unbound_wq); + +} + +static void mga2_primary_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mga2_crtc *mcrtc = to_mga2_crtc(old_state->crtc); + wlayer(MGA2_DC_CTRL_NOSCRRFRSH | rlayer(CTRL), CTRL); + drm_framebuffer_get(old_state->fb); + drm_flip_work_queue(&mcrtc->fb_unref_work, old_state->fb); + set_bit(MGA2_PENDING_FB_UNREF_DISABLE, &mcrtc->pending); +} + +static int mga2_format_to_primary(u32 format) +{ + int pixfmt; + + switch (format) { + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + pixfmt = MGA2_DC_B_32BPP; +#ifdef __BIG_ENDIAN + pixfmt |= MGA2_DC_B_BGR | MGA2_DC_B_RGBX_FMT; +#else + pixfmt |= MGA2_DC_B_RGB; +#endif + break; + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + pixfmt = MGA2_DC_B_24BPP; +#ifdef __BIG_ENDIAN + pixfmt |= MGA2_DC_B_BGR | MGA2_DC_B_RGBX_FMT; +#else + pixfmt |= MGA2_DC_B_RGB; +#endif + break; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + pixfmt = MGA2_DC_B_16BPP; + pixfmt |= MGA2_DC_B_565_FMT; +#ifdef __BIG_ENDIAN + pixfmt |= MGA2_DC_B_RGB_16SWAP; +#endif + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_BGRX5551: + pixfmt = MGA2_DC_B_16BPP; + pixfmt |= MGA2_DC_B_565_FMT; +#ifdef __BIG_ENDIAN + pixfmt |= MGA2_DC_B_RGB_16SWAP; +#endif + break; + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_BGRX4444: + pixfmt = MGA2_DC_B_16BPP; + pixfmt |= MGA2_DC_B_4444_FMT; +#ifdef __BIG_ENDIAN + pixfmt |= MGA2_DC_B_RGB_16SWAP; +#endif + break; + case DRM_FORMAT_C8: + pixfmt = MGA2_DC_B_8BPP; + break; + default: + return -EINVAL; + } + + return pixfmt; +} + +static void mga2_primary_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + struct drm_framebuffer *fb = state->fb; + struct mga2_framebuffer *mga2_fb = to_mga2_framebuffer(fb); + struct drm_crtc *crtc = state->crtc; + struct mga2_crtc *mcrtc = to_mga2_crtc(state->crtc); + unsigned offset = to_mga2_obj(mga2_fb->gobj)->dma_addr; + int x = state->src_x >> 16; + int y = state->src_y >> 16; + int pix = mga2_format_to_primary(fb->format->format); + bool fb_changed = old_state->fb && old_state->fb != state->fb; + + WARN_ON(to_mga2_obj(mga2_fb->gobj)->dma_addr & (-1LL << 32)); + if (WARN_ON(pix < 0)) + return; + + drm_rect_debug_print("src: ", &state->src, true); + drm_rect_debug_print("dst: ", &state->dst, false); + + offset += /*fb->offsets[0] +*/ y * fb->pitches[0] + + x * fb->format->cpp[0]; + + wlayer(offset, NSTART); + wlayer(fb->pitches[0], NOFFS); + wlayer(pix, PIXFMT); + wlayer(MGA2_DC_B_STROB, DISPCTRL); + wlayer(MGA2_DC_CTRL_DEFAULT, CTRL); + + DRM_DEBUG("%s:%s (%d, %d) pitch: %d, offset: %d\n", + plane->name, (char *)&fb->format->format, x, y, + fb->pitches[0], fb->offsets[0]); + /* + * A scanout can still be occurring, so we can't drop the + * reference to the old framebuffer. To solve this we get a + * reference to old_fb and set a worker to release it later. + */ + if (fb_changed) { + drm_framebuffer_get(old_state->fb); + drm_flip_work_queue(&mcrtc->fb_unref_work, old_state->fb); + } + + if (fb_changed || test_and_clear_bit(MGA2_PENDING_FB_UNREF_DISABLE, + &mcrtc->pending)) { + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + set_bit(MGA2_PENDING_FB_UNREF, &mcrtc->pending); + } +} + +static const struct drm_plane_helper_funcs mga2_primary_helper_funcs = { + .atomic_disable = mga2_primary_atomic_disable, + .atomic_update = mga2_primary_atomic_update, + .atomic_check = mga2_primary_atomic_check, +}; + +static const struct drm_plane_funcs mga2_layer_funcs = { + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .destroy = drm_plane_cleanup, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = drm_atomic_helper_plane_reset, + .update_plane = drm_atomic_helper_update_plane, +}; + +static const uint32_t mga2_primary_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_BGRX5551, +#ifdef __BIG_ENDIAN + DRM_FORMAT_XRGB4444, + DRM_FORMAT_BGRX4444, +#endif + DRM_FORMAT_C8, +}; + +static const uint32_t mga2_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static const uint32_t mga2_overlay_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_AYUV, + /*DRM_FORMAT_YUVA, linux don't know such */ + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_NV24, + DRM_FORMAT_NV42, + + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, + DRM_FORMAT_YUV422, + DRM_FORMAT_YVU422, + DRM_FORMAT_YUV444, + DRM_FORMAT_YVU444, +}; + +static const struct mga2_plane_desc mga2_planes[] = { + { + .type = DRM_PLANE_TYPE_PRIMARY, + .formats = mga2_primary_formats, + .nformats = ARRAY_SIZE(mga2_primary_formats), + .func = &mga2_primary_helper_funcs, + }, + { + .type = DRM_PLANE_TYPE_CURSOR, + .formats = mga2_cursor_formats, + .nformats = ARRAY_SIZE(mga2_cursor_formats), + .func = &mga2_cursor_helper_funcs, + }, + + { /* must be the last */ + .type = DRM_PLANE_TYPE_OVERLAY, + .formats = mga2_overlay_formats, + .nformats = ARRAY_SIZE(mga2_overlay_formats), + .func = &mga2_overlay_helper_funcs, + }, +}; + +static struct mga2_layer *mga2_layer_init_one(struct drm_device *drm, + const struct mga2_plane_desc *plane) +{ + struct mga2_layer *layer; + int ret; + + layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL); + if (!layer) + return ERR_PTR(-ENOMEM); + + /* possible crtcs are set later */ + ret = drm_universal_plane_init(drm, &layer->plane, 0, + &mga2_layer_funcs, + plane->formats, plane->nformats, + NULL, plane->type, NULL); + if (ret) { + dev_err(drm->dev, "Couldn't initialize layer\n"); + return ERR_PTR(ret); + } + + if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + ret = drm_plane_create_alpha_property(&layer->plane); + if (ret) + return ERR_PTR(ret); + } + drm_plane_helper_add(&layer->plane, plane->func); + + return layer; +} + +struct drm_plane **mga2_layers_init(struct drm_device *drm) +{ + struct mga2 *mga2 = drm->dev_private; + struct drm_plane **planes; + int i; + int n = mga25(mga2) ? ARRAY_SIZE(mga2_planes) : + ARRAY_SIZE(mga2_planes) - 1; + planes = devm_kcalloc(drm->dev, ARRAY_SIZE(mga2_planes) + 1, + sizeof(*planes), GFP_KERNEL); + if (!planes) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < n; i++) { + const struct mga2_plane_desc *plane = &mga2_planes[i]; + struct mga2_layer *layer; + + layer = mga2_layer_init_one(drm, plane); + if (IS_ERR(layer)) { + return ERR_CAST(layer); + }; + planes[i] = &layer->plane; + } + + return planes; +} diff --git a/drivers/mcst/mga2/mga2_lvds.c b/drivers/mcst/mga2/mga2_lvds.c new file mode 100644 index 000000000000..2abf388b07a6 --- /dev/null +++ b/drivers/mcst/mga2/mga2_lvds.c @@ -0,0 +1,244 @@ +#include "mga2_drv.h" +#include +#include